summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Makefile354
-rw-r--r--contrib/altq/rtems/freebsd/altq/altq.h204
-rw-r--r--contrib/altq/rtems/freebsd/altq/altq_cbq.c1187
-rw-r--r--contrib/altq/rtems/freebsd/altq/altq_cbq.h221
-rw-r--r--contrib/altq/rtems/freebsd/altq/altq_cdnr.c1393
-rw-r--r--contrib/altq/rtems/freebsd/altq/altq_cdnr.h335
-rw-r--r--contrib/altq/rtems/freebsd/altq/altq_classq.h206
-rw-r--r--contrib/altq/rtems/freebsd/altq/altq_hfsc.c2279
-rw-r--r--contrib/altq/rtems/freebsd/altq/altq_hfsc.h320
-rw-r--r--contrib/altq/rtems/freebsd/altq/altq_priq.c1055
-rw-r--r--contrib/altq/rtems/freebsd/altq/altq_priq.h170
-rw-r--r--contrib/altq/rtems/freebsd/altq/altq_red.c1503
-rw-r--r--contrib/altq/rtems/freebsd/altq/altq_red.h198
-rw-r--r--contrib/altq/rtems/freebsd/altq/altq_rio.c855
-rw-r--r--contrib/altq/rtems/freebsd/altq/altq_rio.h144
-rw-r--r--contrib/altq/rtems/freebsd/altq/altq_rmclass.c1843
-rw-r--r--contrib/altq/rtems/freebsd/altq/altq_rmclass.h266
-rw-r--r--contrib/altq/rtems/freebsd/altq/altq_rmclass_debug.h112
-rw-r--r--contrib/altq/rtems/freebsd/altq/altq_subr.c2032
-rw-r--r--contrib/altq/rtems/freebsd/altq/altq_var.h265
-rw-r--r--contrib/altq/rtems/freebsd/altq/altqconf.h29
-rw-r--r--contrib/altq/rtems/freebsd/altq/if_altq.h191
-rw-r--r--contrib/pf/rtems/freebsd/net/if_pflog.c438
-rw-r--r--contrib/pf/rtems/freebsd/net/if_pflog.h103
-rw-r--r--contrib/pf/rtems/freebsd/net/if_pfsync.c2331
-rw-r--r--contrib/pf/rtems/freebsd/net/if_pfsync.h375
-rw-r--r--contrib/pf/rtems/freebsd/net/pf.c7771
-rw-r--r--contrib/pf/rtems/freebsd/net/pf_if.c950
-rw-r--r--contrib/pf/rtems/freebsd/net/pf_ioctl.c3896
-rw-r--r--contrib/pf/rtems/freebsd/net/pf_mtag.h82
-rw-r--r--contrib/pf/rtems/freebsd/net/pf_norm.c2062
-rw-r--r--contrib/pf/rtems/freebsd/net/pf_osfp.c640
-rw-r--r--contrib/pf/rtems/freebsd/net/pf_ruleset.c433
-rw-r--r--contrib/pf/rtems/freebsd/net/pf_subr.c170
-rw-r--r--contrib/pf/rtems/freebsd/net/pf_table.c2363
-rw-r--r--contrib/pf/rtems/freebsd/net/pfvar.h1866
-rw-r--r--contrib/pf/rtems/freebsd/netinet/in4_cksum.c122
-rw-r--r--rtems/freebsd/bsd.h43
-rw-r--r--rtems/freebsd/bsm/audit.h328
-rw-r--r--rtems/freebsd/bsm/audit_kevents.h799
-rw-r--r--rtems/freebsd/cam/ata/ata_all.h127
-rw-r--r--rtems/freebsd/cam/cam.c438
-rw-r--r--rtems/freebsd/cam/cam.h263
-rw-r--r--rtems/freebsd/cam/cam_ccb.h1193
-rw-r--r--rtems/freebsd/cam/cam_debug.h87
-rw-r--r--rtems/freebsd/cam/cam_periph.h204
-rw-r--r--rtems/freebsd/cam/cam_queue.h1
-rw-r--r--rtems/freebsd/cam/cam_sim.h201
-rw-r--r--rtems/freebsd/cam/cam_xpt.h138
-rw-r--r--rtems/freebsd/cam/cam_xpt_sim.h57
-rw-r--r--rtems/freebsd/cam/scsi/scsi_all.c4305
-rw-r--r--rtems/freebsd/cam/scsi/scsi_all.h1454
-rw-r--r--rtems/freebsd/cam/scsi/scsi_da.h463
-rw-r--r--rtems/freebsd/crypto/blowfish/bf_ecb.c88
-rw-r--r--rtems/freebsd/crypto/blowfish/bf_enc.c163
-rw-r--r--rtems/freebsd/crypto/blowfish/bf_locl.h224
-rw-r--r--rtems/freebsd/crypto/blowfish/bf_pi.h328
-rw-r--r--rtems/freebsd/crypto/blowfish/bf_skey.c125
-rw-r--r--rtems/freebsd/crypto/blowfish/blowfish.h93
-rw-r--r--rtems/freebsd/crypto/camellia/camellia-api.c60
-rw-r--r--rtems/freebsd/crypto/camellia/camellia.c1334
-rw-r--r--rtems/freebsd/crypto/camellia/camellia.h69
-rw-r--r--rtems/freebsd/crypto/des/des.h117
-rw-r--r--rtems/freebsd/crypto/des/des_ecb.c141
-rw-r--r--rtems/freebsd/crypto/des/des_enc.c299
-rw-r--r--rtems/freebsd/crypto/des/des_locl.h364
-rw-r--r--rtems/freebsd/crypto/des/des_setkey.c238
-rw-r--r--rtems/freebsd/crypto/des/podd.h67
-rw-r--r--rtems/freebsd/crypto/des/sk.h196
-rw-r--r--rtems/freebsd/crypto/des/spr.h207
-rw-r--r--rtems/freebsd/crypto/rc4/rc4.c130
-rw-r--r--rtems/freebsd/crypto/rc4/rc4.h54
-rw-r--r--rtems/freebsd/crypto/rijndael/rijndael-alg-fst.c1225
-rw-r--r--rtems/freebsd/crypto/rijndael/rijndael-api-fst.c443
-rw-r--r--rtems/freebsd/crypto/rijndael/rijndael-api-fst.h73
-rw-r--r--rtems/freebsd/crypto/rijndael/rijndael-api.c61
-rw-r--r--rtems/freebsd/crypto/rijndael/rijndael.h55
-rw-r--r--rtems/freebsd/crypto/rijndael/rijndael_local.h7
-rw-r--r--rtems/freebsd/crypto/sha1.c278
-rw-r--r--rtems/freebsd/crypto/sha1.h72
-rw-r--r--rtems/freebsd/crypto/sha2/sha2.c1054
-rw-r--r--rtems/freebsd/crypto/sha2/sha2.h141
-rw-r--r--rtems/freebsd/ddb/db_sym.h1
-rw-r--r--rtems/freebsd/ddb/ddb.h1
-rw-r--r--rtems/freebsd/dev/mii/icsphy.c277
-rw-r--r--rtems/freebsd/dev/mii/icsphyreg.h129
-rw-r--r--rtems/freebsd/dev/mii/mii.c576
-rw-r--r--rtems/freebsd/dev/mii/mii.h212
-rw-r--r--rtems/freebsd/dev/mii/mii_physubr.c667
-rw-r--r--rtems/freebsd/dev/mii/miivar.h260
-rw-r--r--rtems/freebsd/dev/pci/pcireg.h1
-rw-r--r--rtems/freebsd/dev/pci/pcivar.h1
-rw-r--r--rtems/freebsd/dev/usb/controller/ehci.c3939
-rw-r--r--rtems/freebsd/dev/usb/controller/ehci.h486
-rw-r--r--rtems/freebsd/dev/usb/controller/ehcireg.h171
-rw-r--r--rtems/freebsd/dev/usb/controller/ohci.c2764
-rw-r--r--rtems/freebsd/dev/usb/controller/ohci.h276
-rw-r--r--rtems/freebsd/dev/usb/controller/ohci_lpc3250.c361
-rw-r--r--rtems/freebsd/dev/usb/controller/ohcireg.h131
-rw-r--r--rtems/freebsd/dev/usb/controller/usb_controller.c606
-rw-r--r--rtems/freebsd/dev/usb/quirk/usb_quirk.c807
-rw-r--r--rtems/freebsd/dev/usb/quirk/usb_quirk.h108
-rw-r--r--rtems/freebsd/dev/usb/storage/umass.c3119
-rw-r--r--rtems/freebsd/dev/usb/ufm_ioctl.h39
-rw-r--r--rtems/freebsd/dev/usb/usb.h755
-rw-r--r--rtems/freebsd/dev/usb/usb_bus.h114
-rw-r--r--rtems/freebsd/dev/usb/usb_busdma.c1071
-rw-r--r--rtems/freebsd/dev/usb/usb_busdma.h161
-rw-r--r--rtems/freebsd/dev/usb/usb_cdc.h295
-rw-r--r--rtems/freebsd/dev/usb/usb_controller.h225
-rw-r--r--rtems/freebsd/dev/usb/usb_core.c62
-rw-r--r--rtems/freebsd/dev/usb/usb_core.h183
-rw-r--r--rtems/freebsd/dev/usb/usb_debug.c179
-rw-r--r--rtems/freebsd/dev/usb/usb_debug.h62
-rw-r--r--rtems/freebsd/dev/usb/usb_dev.c2309
-rw-r--r--rtems/freebsd/dev/usb/usb_dev.h154
-rw-r--r--rtems/freebsd/dev/usb/usb_device.c2693
-rw-r--r--rtems/freebsd/dev/usb/usb_device.h227
-rw-r--r--rtems/freebsd/dev/usb/usb_dynamic.c151
-rw-r--r--rtems/freebsd/dev/usb/usb_dynamic.h61
-rw-r--r--rtems/freebsd/dev/usb/usb_endian.h119
-rw-r--r--rtems/freebsd/dev/usb/usb_error.c93
-rw-r--r--rtems/freebsd/dev/usb/usb_freebsd.h69
-rw-r--r--rtems/freebsd/dev/usb/usb_generic.c2239
-rw-r--r--rtems/freebsd/dev/usb/usb_generic.h33
-rw-r--r--rtems/freebsd/dev/usb/usb_handle_request.c807
-rw-r--r--rtems/freebsd/dev/usb/usb_hid.c820
-rw-r--r--rtems/freebsd/dev/usb/usb_hub.c2474
-rw-r--r--rtems/freebsd/dev/usb/usb_hub.h83
-rw-r--r--rtems/freebsd/dev/usb/usb_ioctl.h272
-rw-r--r--rtems/freebsd/dev/usb/usb_lookup.c156
-rw-r--r--rtems/freebsd/dev/usb/usb_mbuf.c101
-rw-r--r--rtems/freebsd/dev/usb/usb_mbuf.h90
-rw-r--r--rtems/freebsd/dev/usb/usb_msctest.c641
-rw-r--r--rtems/freebsd/dev/usb/usb_msctest.h44
-rw-r--r--rtems/freebsd/dev/usb/usb_parse.c291
-rw-r--r--rtems/freebsd/dev/usb/usb_process.c497
-rw-r--r--rtems/freebsd/dev/usb/usb_process.h84
-rw-r--r--rtems/freebsd/dev/usb/usb_request.c2031
-rw-r--r--rtems/freebsd/dev/usb/usb_request.h89
-rw-r--r--rtems/freebsd/dev/usb/usb_transfer.c3305
-rw-r--r--rtems/freebsd/dev/usb/usb_transfer.h140
-rw-r--r--rtems/freebsd/dev/usb/usb_util.c251
-rw-r--r--rtems/freebsd/dev/usb/usb_util.h35
-rw-r--r--rtems/freebsd/dev/usb/usbdi.h562
-rw-r--r--rtems/freebsd/dev/usb/usbdi_util.h91
-rw-r--r--rtems/freebsd/dev/usb/usbhid.h244
-rw-r--r--rtems/freebsd/fs/devfs/devfs_int.h90
-rw-r--r--rtems/freebsd/geom/geom_disk.h1
-rw-r--r--rtems/freebsd/kern/init_main.c877
-rw-r--r--rtems/freebsd/kern/kern_mbuf.c706
-rw-r--r--rtems/freebsd/kern/kern_module.c551
-rw-r--r--rtems/freebsd/kern/kern_sysctl.c1579
-rw-r--r--rtems/freebsd/kern/subr_bus.c4523
-rw-r--r--rtems/freebsd/kern/subr_kobj.c363
-rw-r--r--rtems/freebsd/kern/uipc_mbuf.c2123
-rw-r--r--rtems/freebsd/kern/uipc_mbuf2.c455
-rw-r--r--rtems/freebsd/kern/uipc_socket.c3575
-rw-r--r--rtems/freebsd/local/bus_if.c273
-rw-r--r--rtems/freebsd/local/bus_if.h786
-rw-r--r--rtems/freebsd/local/cryptodev_if.c54
-rw-r--r--rtems/freebsd/local/cryptodev_if.h69
-rw-r--r--rtems/freebsd/local/device_if.c106
-rw-r--r--rtems/freebsd/local/device_if.h340
-rw-r--r--rtems/freebsd/local/miibus_if.c61
-rw-r--r--rtems/freebsd/local/miibus_if.h77
-rw-r--r--rtems/freebsd/local/miidevs.h376
-rw-r--r--rtems/freebsd/local/opt_altq.h0
-rw-r--r--rtems/freebsd/local/opt_atalk.h0
-rw-r--r--rtems/freebsd/local/opt_bootp.h0
-rw-r--r--rtems/freebsd/local/opt_bpf.h0
-rw-r--r--rtems/freebsd/local/opt_bus.h0
-rw-r--r--rtems/freebsd/local/opt_cam.h0
-rw-r--r--rtems/freebsd/local/opt_carp.h0
-rw-r--r--rtems/freebsd/local/opt_compat.h0
-rw-r--r--rtems/freebsd/local/opt_config.h0
-rw-r--r--rtems/freebsd/local/opt_cpu.h0
-rw-r--r--rtems/freebsd/local/opt_ddb.h0
-rw-r--r--rtems/freebsd/local/opt_device_polling.h0
-rw-r--r--rtems/freebsd/local/opt_ef.h0
-rw-r--r--rtems/freebsd/local/opt_enc.h1
-rw-r--r--rtems/freebsd/local/opt_hwpmc_hooks.h0
-rw-r--r--rtems/freebsd/local/opt_inet.h1
-rw-r--r--rtems/freebsd/local/opt_inet6.h0
-rw-r--r--rtems/freebsd/local/opt_init_path.h0
-rw-r--r--rtems/freebsd/local/opt_ipdivert.h0
-rw-r--r--rtems/freebsd/local/opt_ipdn.h0
-rw-r--r--rtems/freebsd/local/opt_ipfw.h0
-rw-r--r--rtems/freebsd/local/opt_ipsec.h0
-rw-r--r--rtems/freebsd/local/opt_ipstealth.h0
-rw-r--r--rtems/freebsd/local/opt_ipx.h0
-rw-r--r--rtems/freebsd/local/opt_kdb.h0
-rw-r--r--rtems/freebsd/local/opt_kdtrace.h0
-rw-r--r--rtems/freebsd/local/opt_ktrace.h0
-rw-r--r--rtems/freebsd/local/opt_mbuf_profiling.h0
-rw-r--r--rtems/freebsd/local/opt_mbuf_stress_test.h0
-rw-r--r--rtems/freebsd/local/opt_mpath.h0
-rw-r--r--rtems/freebsd/local/opt_mrouting.h0
-rw-r--r--rtems/freebsd/local/opt_natm.h0
-rw-r--r--rtems/freebsd/local/opt_netgraph.h0
-rw-r--r--rtems/freebsd/local/opt_param.h0
-rw-r--r--rtems/freebsd/local/opt_pf.h0
-rw-r--r--rtems/freebsd/local/opt_posix.h0
-rw-r--r--rtems/freebsd/local/opt_printf.h0
-rw-r--r--rtems/freebsd/local/opt_route.h0
-rw-r--r--rtems/freebsd/local/opt_scsi.h0
-rw-r--r--rtems/freebsd/local/opt_sctp.h0
-rw-r--r--rtems/freebsd/local/opt_tcpdebug.h0
-rw-r--r--rtems/freebsd/local/opt_tdma.h0
-rw-r--r--rtems/freebsd/local/opt_usb.h19
-rw-r--r--rtems/freebsd/local/opt_vlan.h0
-rw-r--r--rtems/freebsd/local/opt_wlan.h0
-rw-r--r--rtems/freebsd/local/opt_zero.h0
-rw-r--r--rtems/freebsd/local/pmap.h29
-rw-r--r--rtems/freebsd/local/usb_if.c29
-rw-r--r--rtems/freebsd/local/usb_if.h31
-rw-r--r--rtems/freebsd/local/usbdevs.h3433
-rw-r--r--rtems/freebsd/local/usbdevs_data.h15530
-rw-r--r--rtems/freebsd/local/vnode_if.h1546
-rw-r--r--rtems/freebsd/local/vnode_if_newproto.h66
-rw-r--r--rtems/freebsd/local/vnode_if_typedef.h170
-rw-r--r--rtems/freebsd/machine/_align.h33
-rw-r--r--rtems/freebsd/machine/_bus.h1
-rw-r--r--rtems/freebsd/machine/_limits.h30
-rw-r--r--rtems/freebsd/machine/_stdint.h30
-rw-r--r--rtems/freebsd/machine/_types.h30
-rw-r--r--rtems/freebsd/machine/atomic.h367
-rw-r--r--rtems/freebsd/machine/bus.h781
-rw-r--r--rtems/freebsd/machine/bus_dma.h32
-rw-r--r--rtems/freebsd/machine/clock.h35
-rw-r--r--rtems/freebsd/machine/cpu.h1
-rw-r--r--rtems/freebsd/machine/cpufunc.h30
-rw-r--r--rtems/freebsd/machine/elf.h1
-rw-r--r--rtems/freebsd/machine/endian.h48
-rw-r--r--rtems/freebsd/machine/in_cksum.h77
-rw-r--r--rtems/freebsd/machine/mutex.h30
-rw-r--r--rtems/freebsd/machine/param.h42
-rw-r--r--rtems/freebsd/machine/pcpu.h41
-rw-r--r--rtems/freebsd/machine/proc.h38
-rw-r--r--rtems/freebsd/machine/resource.h30
-rw-r--r--rtems/freebsd/machine/rtems-bsd-cache.h37
-rw-r--r--rtems/freebsd/machine/rtems-bsd-config.h255
-rw-r--r--rtems/freebsd/machine/rtems-bsd-select.h76
-rw-r--r--rtems/freebsd/machine/rtems-bsd-symbols.h561
-rw-r--r--rtems/freebsd/machine/rtems-bsd-sysinit.h67
-rw-r--r--rtems/freebsd/machine/runq.h41
-rw-r--r--rtems/freebsd/machine/sf_buf.h1
-rw-r--r--rtems/freebsd/machine/signal.h30
-rw-r--r--rtems/freebsd/machine/stdarg.h30
-rw-r--r--rtems/freebsd/machine/ucontext.h32
-rw-r--r--rtems/freebsd/net/bpf.c2398
-rw-r--r--rtems/freebsd/net/bpf.h974
-rw-r--r--rtems/freebsd/net/bpf_buffer.c212
-rw-r--r--rtems/freebsd/net/bpf_buffer.h50
-rw-r--r--rtems/freebsd/net/bpf_filter.c582
-rw-r--r--rtems/freebsd/net/bpf_jitter.c143
-rw-r--r--rtems/freebsd/net/bpf_jitter.h84
-rw-r--r--rtems/freebsd/net/bpf_zerocopy.h56
-rw-r--r--rtems/freebsd/net/bpfdesc.h149
-rw-r--r--rtems/freebsd/net/bridgestp.c2250
-rw-r--r--rtems/freebsd/net/bridgestp.h396
-rw-r--r--rtems/freebsd/net/ethernet.h405
-rw-r--r--rtems/freebsd/net/fddi.h105
-rw-r--r--rtems/freebsd/net/firewire.h142
-rw-r--r--rtems/freebsd/net/flowtable.h82
-rw-r--r--rtems/freebsd/net/ieee8023ad_lacp.c1947
-rw-r--r--rtems/freebsd/net/ieee8023ad_lacp.h333
-rw-r--r--rtems/freebsd/net/if.c3431
-rw-r--r--rtems/freebsd/net/if.h470
-rw-r--r--rtems/freebsd/net/if_arc.h143
-rw-r--r--rtems/freebsd/net/if_arcsubr.c886
-rw-r--r--rtems/freebsd/net/if_arp.h138
-rw-r--r--rtems/freebsd/net/if_atm.h337
-rw-r--r--rtems/freebsd/net/if_atmsubr.c504
-rw-r--r--rtems/freebsd/net/if_bridge.c3458
-rw-r--r--rtems/freebsd/net/if_bridgevar.h328
-rw-r--r--rtems/freebsd/net/if_clone.c617
-rw-r--r--rtems/freebsd/net/if_clone.h116
-rw-r--r--rtems/freebsd/net/if_dead.c116
-rw-r--r--rtems/freebsd/net/if_disc.c247
-rw-r--r--rtems/freebsd/net/if_dl.h82
-rw-r--r--rtems/freebsd/net/if_edsc.c356
-rw-r--r--rtems/freebsd/net/if_ef.c610
-rw-r--r--rtems/freebsd/net/if_enc.c375
-rw-r--r--rtems/freebsd/net/if_enc.h35
-rw-r--r--rtems/freebsd/net/if_epair.c955
-rw-r--r--rtems/freebsd/net/if_ethersubr.c1364
-rw-r--r--rtems/freebsd/net/if_faith.c353
-rw-r--r--rtems/freebsd/net/if_fddisubr.c800
-rw-r--r--rtems/freebsd/net/if_fwsubr.c853
-rw-r--r--rtems/freebsd/net/if_gif.c1025
-rw-r--r--rtems/freebsd/net/if_gif.h130
-rw-r--r--rtems/freebsd/net/if_gre.c909
-rw-r--r--rtems/freebsd/net/if_gre.h194
-rw-r--r--rtems/freebsd/net/if_iso88025subr.c831
-rw-r--r--rtems/freebsd/net/if_lagg.c1808
-rw-r--r--rtems/freebsd/net/if_lagg.h247
-rw-r--r--rtems/freebsd/net/if_llatbl.c528
-rw-r--r--rtems/freebsd/net/if_llatbl.h208
-rw-r--r--rtems/freebsd/net/if_llc.h161
-rw-r--r--rtems/freebsd/net/if_loop.c451
-rw-r--r--rtems/freebsd/net/if_media.c566
-rw-r--r--rtems/freebsd/net/if_media.h692
-rw-r--r--rtems/freebsd/net/if_mib.c171
-rw-r--r--rtems/freebsd/net/if_mib.h171
-rw-r--r--rtems/freebsd/net/if_sppp.h234
-rw-r--r--rtems/freebsd/net/if_spppfr.c636
-rw-r--r--rtems/freebsd/net/if_spppsubr.c5492
-rw-r--r--rtems/freebsd/net/if_stf.c850
-rw-r--r--rtems/freebsd/net/if_stf.h38
-rw-r--r--rtems/freebsd/net/if_tap.c1086
-rw-r--r--rtems/freebsd/net/if_tap.h74
-rw-r--r--rtems/freebsd/net/if_tapvar.h69
-rw-r--r--rtems/freebsd/net/if_tun.c1059
-rw-r--r--rtems/freebsd/net/if_tun.h48
-rw-r--r--rtems/freebsd/net/if_types.h254
-rw-r--r--rtems/freebsd/net/if_var.h904
-rw-r--r--rtems/freebsd/net/if_vlan.c1538
-rw-r--r--rtems/freebsd/net/if_vlan_var.h137
-rw-r--r--rtems/freebsd/net/iso88025.h172
-rw-r--r--rtems/freebsd/net/netisr.c1158
-rw-r--r--rtems/freebsd/net/netisr.h156
-rw-r--r--rtems/freebsd/net/pfil.c331
-rw-r--r--rtems/freebsd/net/pfil.h117
-rw-r--r--rtems/freebsd/net/pfkeyv2.h432
-rw-r--r--rtems/freebsd/net/ppp_defs.h158
-rw-r--r--rtems/freebsd/net/radix.c1205
-rw-r--r--rtems/freebsd/net/radix.h176
-rw-r--r--rtems/freebsd/net/radix_mpath.c357
-rw-r--r--rtems/freebsd/net/radix_mpath.h63
-rw-r--r--rtems/freebsd/net/raw_cb.c119
-rw-r--r--rtems/freebsd/net/raw_cb.h84
-rw-r--r--rtems/freebsd/net/raw_usrreq.c266
-rw-r--r--rtems/freebsd/net/route.c1601
-rw-r--r--rtems/freebsd/net/route.h446
-rw-r--r--rtems/freebsd/net/rtsock.c1702
-rw-r--r--rtems/freebsd/net/slcompress.c609
-rw-r--r--rtems/freebsd/net/slcompress.h158
-rw-r--r--rtems/freebsd/net/vnet.h437
-rw-r--r--rtems/freebsd/net/zlib.c5409
-rw-r--r--rtems/freebsd/net/zlib.h1018
-rw-r--r--rtems/freebsd/net80211/_ieee80211.h396
-rw-r--r--rtems/freebsd/net80211/ieee80211.c1638
-rw-r--r--rtems/freebsd/net80211/ieee80211.h1087
-rw-r--r--rtems/freebsd/net80211/ieee80211_acl.c341
-rw-r--r--rtems/freebsd/net80211/ieee80211_action.c281
-rw-r--r--rtems/freebsd/net80211/ieee80211_action.h52
-rw-r--r--rtems/freebsd/net80211/ieee80211_adhoc.c929
-rw-r--r--rtems/freebsd/net80211/ieee80211_adhoc.h35
-rw-r--r--rtems/freebsd/net80211/ieee80211_ageq.c239
-rw-r--r--rtems/freebsd/net80211/ieee80211_ageq.h54
-rw-r--r--rtems/freebsd/net80211/ieee80211_amrr.c319
-rw-r--r--rtems/freebsd/net80211/ieee80211_amrr.h61
-rw-r--r--rtems/freebsd/net80211/ieee80211_crypto.c663
-rw-r--r--rtems/freebsd/net80211/ieee80211_crypto.h245
-rw-r--r--rtems/freebsd/net80211/ieee80211_crypto_ccmp.c636
-rw-r--r--rtems/freebsd/net80211/ieee80211_crypto_none.c146
-rw-r--r--rtems/freebsd/net80211/ieee80211_crypto_tkip.c1000
-rw-r--r--rtems/freebsd/net80211/ieee80211_crypto_wep.c482
-rw-r--r--rtems/freebsd/net80211/ieee80211_ddb.c881
-rw-r--r--rtems/freebsd/net80211/ieee80211_dfs.c379
-rw-r--r--rtems/freebsd/net80211/ieee80211_dfs.h57
-rw-r--r--rtems/freebsd/net80211/ieee80211_freebsd.c831
-rw-r--r--rtems/freebsd/net80211/ieee80211_freebsd.h550
-rw-r--r--rtems/freebsd/net80211/ieee80211_hostap.c2307
-rw-r--r--rtems/freebsd/net80211/ieee80211_hostap.h35
-rw-r--r--rtems/freebsd/net80211/ieee80211_ht.c2523
-rw-r--r--rtems/freebsd/net80211/ieee80211_ht.h202
-rw-r--r--rtems/freebsd/net80211/ieee80211_hwmp.c1440
-rw-r--r--rtems/freebsd/net80211/ieee80211_input.c852
-rw-r--r--rtems/freebsd/net80211/ieee80211_input.h160
-rw-r--r--rtems/freebsd/net80211/ieee80211_ioctl.c3349
-rw-r--r--rtems/freebsd/net80211/ieee80211_ioctl.h849
-rw-r--r--rtems/freebsd/net80211/ieee80211_mesh.c2755
-rw-r--r--rtems/freebsd/net80211/ieee80211_mesh.h503
-rw-r--r--rtems/freebsd/net80211/ieee80211_monitor.c140
-rw-r--r--rtems/freebsd/net80211/ieee80211_monitor.h35
-rw-r--r--rtems/freebsd/net80211/ieee80211_node.c2641
-rw-r--r--rtems/freebsd/net80211/ieee80211_node.h456
-rw-r--r--rtems/freebsd/net80211/ieee80211_output.c3043
-rw-r--r--rtems/freebsd/net80211/ieee80211_phy.c467
-rw-r--r--rtems/freebsd/net80211/ieee80211_phy.h155
-rw-r--r--rtems/freebsd/net80211/ieee80211_power.c529
-rw-r--r--rtems/freebsd/net80211/ieee80211_power.h79
-rw-r--r--rtems/freebsd/net80211/ieee80211_proto.c1888
-rw-r--r--rtems/freebsd/net80211/ieee80211_proto.h387
-rw-r--r--rtems/freebsd/net80211/ieee80211_radiotap.c357
-rw-r--r--rtems/freebsd/net80211/ieee80211_radiotap.h234
-rw-r--r--rtems/freebsd/net80211/ieee80211_ratectl.c94
-rw-r--r--rtems/freebsd/net80211/ieee80211_ratectl.h117
-rw-r--r--rtems/freebsd/net80211/ieee80211_ratectl_none.c116
-rw-r--r--rtems/freebsd/net80211/ieee80211_regdomain.c450
-rw-r--r--rtems/freebsd/net80211/ieee80211_regdomain.h282
-rw-r--r--rtems/freebsd/net80211/ieee80211_rssadapt.c351
-rw-r--r--rtems/freebsd/net80211/ieee80211_rssadapt.h71
-rw-r--r--rtems/freebsd/net80211/ieee80211_scan.c1240
-rw-r--r--rtems/freebsd/net80211/ieee80211_scan.h301
-rw-r--r--rtems/freebsd/net80211/ieee80211_scan_sta.c1928
-rw-r--r--rtems/freebsd/net80211/ieee80211_sta.c1748
-rw-r--r--rtems/freebsd/net80211/ieee80211_sta.h36
-rw-r--r--rtems/freebsd/net80211/ieee80211_superg.c902
-rw-r--r--rtems/freebsd/net80211/ieee80211_superg.h129
-rw-r--r--rtems/freebsd/net80211/ieee80211_tdma.c822
-rw-r--r--rtems/freebsd/net80211/ieee80211_tdma.h102
-rw-r--r--rtems/freebsd/net80211/ieee80211_var.h916
-rw-r--r--rtems/freebsd/net80211/ieee80211_wds.c789
-rw-r--r--rtems/freebsd/net80211/ieee80211_wds.h39
-rw-r--r--rtems/freebsd/net80211/ieee80211_xauth.c78
-rw-r--r--rtems/freebsd/netgraph/ng_ipfw.h1
-rw-r--r--rtems/freebsd/netinet/accf_data.c68
-rw-r--r--rtems/freebsd/netinet/accf_dns.c134
-rw-r--r--rtems/freebsd/netinet/accf_http.c351
-rw-r--r--rtems/freebsd/netinet/icmp6.h741
-rw-r--r--rtems/freebsd/netinet/icmp_var.h108
-rw-r--r--rtems/freebsd/netinet/if_atm.c366
-rw-r--r--rtems/freebsd/netinet/if_atm.h47
-rw-r--r--rtems/freebsd/netinet/if_ether.c859
-rw-r--r--rtems/freebsd/netinet/if_ether.h122
-rw-r--r--rtems/freebsd/netinet/igmp.c3655
-rw-r--r--rtems/freebsd/netinet/igmp.h148
-rw-r--r--rtems/freebsd/netinet/igmp_var.h225
-rw-r--r--rtems/freebsd/netinet/in.c1601
-rw-r--r--rtems/freebsd/netinet/in.h794
-rw-r--r--rtems/freebsd/netinet/in_cksum.c148
-rw-r--r--rtems/freebsd/netinet/in_gif.c469
-rw-r--r--rtems/freebsd/netinet/in_gif.h45
-rw-r--r--rtems/freebsd/netinet/in_mcast.c2902
-rw-r--r--rtems/freebsd/netinet/in_pcb.c1958
-rw-r--r--rtems/freebsd/netinet/in_pcb.h525
-rw-r--r--rtems/freebsd/netinet/in_proto.c400
-rw-r--r--rtems/freebsd/netinet/in_rmx.c516
-rw-r--r--rtems/freebsd/netinet/in_systm.h58
-rw-r--r--rtems/freebsd/netinet/in_var.h475
-rw-r--r--rtems/freebsd/netinet/ip.h196
-rw-r--r--rtems/freebsd/netinet/ip6.h352
-rw-r--r--rtems/freebsd/netinet/ip_carp.c2427
-rw-r--r--rtems/freebsd/netinet/ip_carp.h191
-rw-r--r--rtems/freebsd/netinet/ip_divert.c818
-rw-r--r--rtems/freebsd/netinet/ip_divert.h55
-rw-r--r--rtems/freebsd/netinet/ip_dummynet.h263
-rw-r--r--rtems/freebsd/netinet/ip_ecn.c194
-rw-r--r--rtems/freebsd/netinet/ip_ecn.h53
-rw-r--r--rtems/freebsd/netinet/ip_encap.c465
-rw-r--r--rtems/freebsd/netinet/ip_encap.h64
-rw-r--r--rtems/freebsd/netinet/ip_fastfwd.c619
-rw-r--r--rtems/freebsd/netinet/ip_fw.h579
-rw-r--r--rtems/freebsd/netinet/ip_gre.c336
-rw-r--r--rtems/freebsd/netinet/ip_gre.h43
-rw-r--r--rtems/freebsd/netinet/ip_icmp.c986
-rw-r--r--rtems/freebsd/netinet/ip_icmp.h214
-rw-r--r--rtems/freebsd/netinet/ip_id.c211
-rw-r--r--rtems/freebsd/netinet/ip_input.c1794
-rw-r--r--rtems/freebsd/netinet/ip_ipsec.c424
-rw-r--r--rtems/freebsd/netinet/ip_ipsec.h41
-rw-r--r--rtems/freebsd/netinet/ip_mroute.c2952
-rw-r--r--rtems/freebsd/netinet/ip_mroute.h359
-rw-r--r--rtems/freebsd/netinet/ip_options.c747
-rw-r--r--rtems/freebsd/netinet/ip_options.h60
-rw-r--r--rtems/freebsd/netinet/ip_output.c1284
-rw-r--r--rtems/freebsd/netinet/ip_var.h315
-rw-r--r--rtems/freebsd/netinet/ipfw/dn_heap.c552
-rw-r--r--rtems/freebsd/netinet/ipfw/dn_heap.h191
-rw-r--r--rtems/freebsd/netinet/ipfw/dn_sched.h189
-rw-r--r--rtems/freebsd/netinet/ipfw/dn_sched_fifo.c122
-rw-r--r--rtems/freebsd/netinet/ipfw/dn_sched_prio.c231
-rw-r--r--rtems/freebsd/netinet/ipfw/dn_sched_qfq.c866
-rw-r--r--rtems/freebsd/netinet/ipfw/dn_sched_rr.c309
-rw-r--r--rtems/freebsd/netinet/ipfw/dn_sched_wf2q.c375
-rw-r--r--rtems/freebsd/netinet/ipfw/ip_dn_glue.c847
-rw-r--r--rtems/freebsd/netinet/ipfw/ip_dn_io.c796
-rw-r--r--rtems/freebsd/netinet/ipfw/ip_dn_private.h402
-rw-r--r--rtems/freebsd/netinet/ipfw/ip_dummynet.c2297
-rw-r--r--rtems/freebsd/netinet/ipfw/ip_fw2.c2495
-rw-r--r--rtems/freebsd/netinet/ipfw/ip_fw_log.c451
-rw-r--r--rtems/freebsd/netinet/ipfw/ip_fw_nat.c606
-rw-r--r--rtems/freebsd/netinet/ipfw/ip_fw_pfil.c417
-rw-r--r--rtems/freebsd/netinet/ipfw/ip_fw_private.h301
-rw-r--r--rtems/freebsd/netinet/ipfw/ip_fw_sockopt.c1345
-rw-r--r--rtems/freebsd/netinet/ipfw/ip_fw_table.c288
-rw-r--r--rtems/freebsd/netinet/libalias/alias.c1793
-rw-r--r--rtems/freebsd/netinet/libalias/alias.h232
-rw-r--r--rtems/freebsd/netinet/libalias/alias_cuseeme.c230
-rw-r--r--rtems/freebsd/netinet/libalias/alias_db.c2940
-rw-r--r--rtems/freebsd/netinet/libalias/alias_dummy.c155
-rw-r--r--rtems/freebsd/netinet/libalias/alias_ftp.c696
-rw-r--r--rtems/freebsd/netinet/libalias/alias_irc.c490
-rw-r--r--rtems/freebsd/netinet/libalias/alias_local.h397
-rw-r--r--rtems/freebsd/netinet/libalias/alias_mod.c292
-rw-r--r--rtems/freebsd/netinet/libalias/alias_mod.h163
-rw-r--r--rtems/freebsd/netinet/libalias/alias_nbt.c855
-rw-r--r--rtems/freebsd/netinet/libalias/alias_pptp.c525
-rw-r--r--rtems/freebsd/netinet/libalias/alias_proxy.c870
-rw-r--r--rtems/freebsd/netinet/libalias/alias_sctp.c2700
-rw-r--r--rtems/freebsd/netinet/libalias/alias_sctp.h201
-rw-r--r--rtems/freebsd/netinet/libalias/alias_skinny.c449
-rw-r--r--rtems/freebsd/netinet/libalias/alias_smedia.c551
-rw-r--r--rtems/freebsd/netinet/libalias/alias_util.c178
-rw-r--r--rtems/freebsd/netinet/pim.h119
-rw-r--r--rtems/freebsd/netinet/pim_var.h84
-rw-r--r--rtems/freebsd/netinet/raw_ip.c1116
-rw-r--r--rtems/freebsd/netinet/sctp.h549
-rw-r--r--rtems/freebsd/netinet/sctp_asconf.c3397
-rw-r--r--rtems/freebsd/netinet/sctp_asconf.h96
-rw-r--r--rtems/freebsd/netinet/sctp_auth.c2128
-rw-r--r--rtems/freebsd/netinet/sctp_auth.h235
-rw-r--r--rtems/freebsd/netinet/sctp_bsd_addr.c562
-rw-r--r--rtems/freebsd/netinet/sctp_bsd_addr.h63
-rw-r--r--rtems/freebsd/netinet/sctp_cc_functions.c1565
-rw-r--r--rtems/freebsd/netinet/sctp_cc_functions.h116
-rw-r--r--rtems/freebsd/netinet/sctp_constants.h1051
-rw-r--r--rtems/freebsd/netinet/sctp_crc32.c148
-rw-r--r--rtems/freebsd/netinet/sctp_crc32.h47
-rw-r--r--rtems/freebsd/netinet/sctp_header.h624
-rw-r--r--rtems/freebsd/netinet/sctp_indata.c5800
-rw-r--r--rtems/freebsd/netinet/sctp_indata.h129
-rw-r--r--rtems/freebsd/netinet/sctp_input.c5965
-rw-r--r--rtems/freebsd/netinet/sctp_input.h57
-rw-r--r--rtems/freebsd/netinet/sctp_lock_bsd.h430
-rw-r--r--rtems/freebsd/netinet/sctp_os.h72
-rw-r--r--rtems/freebsd/netinet/sctp_os_bsd.h503
-rw-r--r--rtems/freebsd/netinet/sctp_output.c13537
-rw-r--r--rtems/freebsd/netinet/sctp_output.h229
-rw-r--r--rtems/freebsd/netinet/sctp_pcb.c6810
-rw-r--r--rtems/freebsd/netinet/sctp_pcb.h632
-rw-r--r--rtems/freebsd/netinet/sctp_peeloff.c240
-rw-r--r--rtems/freebsd/netinet/sctp_peeloff.h52
-rw-r--r--rtems/freebsd/netinet/sctp_structs.h1094
-rw-r--r--rtems/freebsd/netinet/sctp_sysctl.c1108
-rw-r--r--rtems/freebsd/netinet/sctp_sysctl.h532
-rw-r--r--rtems/freebsd/netinet/sctp_timer.c1804
-rw-r--r--rtems/freebsd/netinet/sctp_timer.h101
-rw-r--r--rtems/freebsd/netinet/sctp_uio.h1166
-rw-r--r--rtems/freebsd/netinet/sctp_usrreq.c4918
-rw-r--r--rtems/freebsd/netinet/sctp_var.h336
-rw-r--r--rtems/freebsd/netinet/sctputil.c6977
-rw-r--r--rtems/freebsd/netinet/sctputil.h392
-rw-r--r--rtems/freebsd/netinet/tcp.h226
-rw-r--r--rtems/freebsd/netinet/tcp_debug.c226
-rw-r--r--rtems/freebsd/netinet/tcp_debug.h80
-rw-r--r--rtems/freebsd/netinet/tcp_fsm.h112
-rw-r--r--rtems/freebsd/netinet/tcp_hostcache.h82
-rw-r--r--rtems/freebsd/netinet/tcp_input.c3453
-rw-r--r--rtems/freebsd/netinet/tcp_lro.c389
-rw-r--r--rtems/freebsd/netinet/tcp_lro.h85
-rw-r--r--rtems/freebsd/netinet/tcp_offload.c147
-rw-r--r--rtems/freebsd/netinet/tcp_offload.h354
-rw-r--r--rtems/freebsd/netinet/tcp_output.c1485
-rw-r--r--rtems/freebsd/netinet/tcp_reass.c335
-rw-r--r--rtems/freebsd/netinet/tcp_sack.c687
-rw-r--r--rtems/freebsd/netinet/tcp_seq.h68
-rw-r--r--rtems/freebsd/netinet/tcp_subr.c2315
-rw-r--r--rtems/freebsd/netinet/tcp_syncache.c1812
-rw-r--r--rtems/freebsd/netinet/tcp_syncache.h127
-rw-r--r--rtems/freebsd/netinet/tcp_timer.c660
-rw-r--r--rtems/freebsd/netinet/tcp_timer.h183
-rw-r--r--rtems/freebsd/netinet/tcp_timewait.c618
-rw-r--r--rtems/freebsd/netinet/tcp_usrreq.c1886
-rw-r--r--rtems/freebsd/netinet/tcp_var.h687
-rw-r--r--rtems/freebsd/netinet/tcpip.h59
-rw-r--r--rtems/freebsd/netinet/toedev.h162
-rw-r--r--rtems/freebsd/netinet/udp.h67
-rw-r--r--rtems/freebsd/netinet/udp_usrreq.c1633
-rw-r--r--rtems/freebsd/netinet/udp_var.h161
-rw-r--r--rtems/freebsd/netinet6/dest6.c125
-rw-r--r--rtems/freebsd/netinet6/frag6.c781
-rw-r--r--rtems/freebsd/netinet6/icmp6.c2857
-rw-r--r--rtems/freebsd/netinet6/icmp6.h4
-rw-r--r--rtems/freebsd/netinet6/in6.c2671
-rw-r--r--rtems/freebsd/netinet6/in6.h708
-rw-r--r--rtems/freebsd/netinet6/in6_cksum.c303
-rw-r--r--rtems/freebsd/netinet6/in6_gif.c466
-rw-r--r--rtems/freebsd/netinet6/in6_gif.h45
-rw-r--r--rtems/freebsd/netinet6/in6_ifattach.c971
-rw-r--r--rtems/freebsd/netinet6/in6_ifattach.h45
-rw-r--r--rtems/freebsd/netinet6/in6_mcast.c2840
-rw-r--r--rtems/freebsd/netinet6/in6_pcb.c936
-rw-r--r--rtems/freebsd/netinet6/in6_pcb.h109
-rw-r--r--rtems/freebsd/netinet6/in6_proto.c597
-rw-r--r--rtems/freebsd/netinet6/in6_rmx.c449
-rw-r--r--rtems/freebsd/netinet6/in6_src.c1204
-rw-r--r--rtems/freebsd/netinet6/in6_var.h786
-rw-r--r--rtems/freebsd/netinet6/ip6.h4
-rw-r--r--rtems/freebsd/netinet6/ip6_ecn.h41
-rw-r--r--rtems/freebsd/netinet6/ip6_forward.c626
-rw-r--r--rtems/freebsd/netinet6/ip6_id.c269
-rw-r--r--rtems/freebsd/netinet6/ip6_input.c1759
-rw-r--r--rtems/freebsd/netinet6/ip6_ipsec.c386
-rw-r--r--rtems/freebsd/netinet6/ip6_ipsec.h43
-rw-r--r--rtems/freebsd/netinet6/ip6_mroute.c2065
-rw-r--r--rtems/freebsd/netinet6/ip6_mroute.h271
-rw-r--r--rtems/freebsd/netinet6/ip6_output.c2928
-rw-r--r--rtems/freebsd/netinet6/ip6_var.h444
-rw-r--r--rtems/freebsd/netinet6/ip6protosw.h148
-rw-r--r--rtems/freebsd/netinet6/mld6.c3311
-rw-r--r--rtems/freebsd/netinet6/mld6.h112
-rw-r--r--rtems/freebsd/netinet6/mld6_var.h164
-rw-r--r--rtems/freebsd/netinet6/nd6.c2249
-rw-r--r--rtems/freebsd/netinet6/nd6.h455
-rw-r--r--rtems/freebsd/netinet6/nd6_nbr.c1514
-rw-r--r--rtems/freebsd/netinet6/nd6_rtr.c2162
-rw-r--r--rtems/freebsd/netinet6/pim6.h69
-rw-r--r--rtems/freebsd/netinet6/pim6_var.h68
-rw-r--r--rtems/freebsd/netinet6/raw_ip6.c905
-rw-r--r--rtems/freebsd/netinet6/raw_ip6.h55
-rw-r--r--rtems/freebsd/netinet6/route6.c111
-rw-r--r--rtems/freebsd/netinet6/scope6.c498
-rw-r--r--rtems/freebsd/netinet6/scope6_var.h60
-rw-r--r--rtems/freebsd/netinet6/sctp6_usrreq.c1319
-rw-r--r--rtems/freebsd/netinet6/sctp6_var.h61
-rw-r--r--rtems/freebsd/netinet6/tcp6_var.h83
-rw-r--r--rtems/freebsd/netinet6/udp6_usrreq.c1112
-rw-r--r--rtems/freebsd/netinet6/udp6_var.h75
-rw-r--r--rtems/freebsd/netipsec/ah.h56
-rw-r--r--rtems/freebsd/netipsec/ah_var.h82
-rw-r--r--rtems/freebsd/netipsec/esp.h69
-rw-r--r--rtems/freebsd/netipsec/esp_var.h81
-rw-r--r--rtems/freebsd/netipsec/ipcomp.h55
-rw-r--r--rtems/freebsd/netipsec/ipcomp_var.h74
-rw-r--r--rtems/freebsd/netipsec/ipip_var.h68
-rw-r--r--rtems/freebsd/netipsec/ipsec.c1749
-rw-r--r--rtems/freebsd/netipsec/ipsec.h453
-rw-r--r--rtems/freebsd/netipsec/ipsec6.h78
-rw-r--r--rtems/freebsd/netipsec/ipsec_input.c891
-rw-r--r--rtems/freebsd/netipsec/ipsec_mbuf.c329
-rw-r--r--rtems/freebsd/netipsec/ipsec_output.c892
-rw-r--r--rtems/freebsd/netipsec/key.c8086
-rw-r--r--rtems/freebsd/netipsec/key.h127
-rw-r--r--rtems/freebsd/netipsec/key_debug.c771
-rw-r--r--rtems/freebsd/netipsec/key_debug.h89
-rw-r--r--rtems/freebsd/netipsec/key_var.h74
-rw-r--r--rtems/freebsd/netipsec/keydb.h227
-rw-r--r--rtems/freebsd/netipsec/keysock.c584
-rw-r--r--rtems/freebsd/netipsec/keysock.h83
-rw-r--r--rtems/freebsd/netipsec/xform.h129
-rw-r--r--rtems/freebsd/netipsec/xform_ah.c1219
-rw-r--r--rtems/freebsd/netipsec/xform_esp.c1005
-rw-r--r--rtems/freebsd/netipsec/xform_ipcomp.c625
-rw-r--r--rtems/freebsd/netipsec/xform_ipip.c708
-rw-r--r--rtems/freebsd/netipsec/xform_tcp.c173
-rw-r--r--rtems/freebsd/opencrypto/cast.c246
-rw-r--r--rtems/freebsd/opencrypto/cast.h23
-rw-r--r--rtems/freebsd/opencrypto/castsb.h545
-rw-r--r--rtems/freebsd/opencrypto/criov.c200
-rw-r--r--rtems/freebsd/opencrypto/crypto.c1578
-rw-r--r--rtems/freebsd/opencrypto/cryptodev.c1178
-rw-r--r--rtems/freebsd/opencrypto/cryptodev.h432
-rw-r--r--rtems/freebsd/opencrypto/cryptosoft.c1156
-rw-r--r--rtems/freebsd/opencrypto/cryptosoft.h67
-rw-r--r--rtems/freebsd/opencrypto/deflate.c267
-rw-r--r--rtems/freebsd/opencrypto/deflate.h60
-rw-r--r--rtems/freebsd/opencrypto/rmd160.c369
-rw-r--r--rtems/freebsd/opencrypto/rmd160.h41
-rw-r--r--rtems/freebsd/opencrypto/skipjack.c262
-rw-r--r--rtems/freebsd/opencrypto/skipjack.h19
-rw-r--r--rtems/freebsd/opencrypto/xform.c815
-rw-r--r--rtems/freebsd/opencrypto/xform.h104
-rw-r--r--rtems/freebsd/rtems/rtems-bsd-assert.c39
-rw-r--r--rtems/freebsd/rtems/rtems-bsd-autoconf.c51
-rw-r--r--rtems/freebsd/rtems/rtems-bsd-bus-dma.c455
-rw-r--r--rtems/freebsd/rtems/rtems-bsd-callout.c122
-rw-r--r--rtems/freebsd/rtems/rtems-bsd-cam.c495
-rw-r--r--rtems/freebsd/rtems/rtems-bsd-condvar.c167
-rw-r--r--rtems/freebsd/rtems/rtems-bsd-delay.c45
-rw-r--r--rtems/freebsd/rtems/rtems-bsd-generic.c209
-rw-r--r--rtems/freebsd/rtems/rtems-bsd-init-with-irq.c46
-rw-r--r--rtems/freebsd/rtems/rtems-bsd-init.c65
-rw-r--r--rtems/freebsd/rtems/rtems-bsd-jail.c92
-rw-r--r--rtems/freebsd/rtems/rtems-bsd-lock.c45
-rw-r--r--rtems/freebsd/rtems/rtems-bsd-malloc.c77
-rw-r--r--rtems/freebsd/rtems/rtems-bsd-mutex.c314
-rw-r--r--rtems/freebsd/rtems/rtems-bsd-nexus.c71
-rw-r--r--rtems/freebsd/rtems/rtems-bsd-panic.c70
-rw-r--r--rtems/freebsd/rtems/rtems-bsd-prot.c142
-rw-r--r--rtems/freebsd/rtems/rtems-bsd-resource.c173
-rw-r--r--rtems/freebsd/rtems/rtems-bsd-rwlock.c340
-rw-r--r--rtems/freebsd/rtems/rtems-bsd-shell.c181
-rw-r--r--rtems/freebsd/rtems/rtems-bsd-signal.c33
-rw-r--r--rtems/freebsd/rtems/rtems-bsd-support.c75
-rw-r--r--rtems/freebsd/rtems/rtems-bsd-sx.c335
-rw-r--r--rtems/freebsd/rtems/rtems-bsd-synch.c274
-rw-r--r--rtems/freebsd/rtems/rtems-bsd-syscalls.c1487
-rw-r--r--rtems/freebsd/rtems/rtems-bsd-sysctl.c64
-rw-r--r--rtems/freebsd/rtems/rtems-bsd-sysctlbyname.c43
-rw-r--r--rtems/freebsd/rtems/rtems-bsd-sysctlnametomib.c67
-rw-r--r--rtems/freebsd/rtems/rtems-bsd-thread.c208
-rw-r--r--rtems/freebsd/rtems/rtems-bsd-uma.c2796
-rw-r--r--rtems/freebsd/security/audit/audit.h1
-rw-r--r--rtems/freebsd/security/mac/mac_framework.h441
-rw-r--r--rtems/freebsd/sys/_bus_dma.h63
-rw-r--r--rtems/freebsd/sys/_iovec.h48
-rw-r--r--rtems/freebsd/sys/_lock.h47
-rw-r--r--rtems/freebsd/sys/_lockmgr.h48
-rw-r--r--rtems/freebsd/sys/_mutex.h44
-rw-r--r--rtems/freebsd/sys/_null.h45
-rw-r--r--rtems/freebsd/sys/_pthreadtypes.h1
-rw-r--r--rtems/freebsd/sys/_rmlock.h62
-rw-r--r--rtems/freebsd/sys/_rwlock.h43
-rw-r--r--rtems/freebsd/sys/_semaphore.h73
-rw-r--r--rtems/freebsd/sys/_sigset.h59
-rw-r--r--rtems/freebsd/sys/_sx.h44
-rw-r--r--rtems/freebsd/sys/_task.h50
-rw-r--r--rtems/freebsd/sys/_timeval.h1
-rw-r--r--rtems/freebsd/sys/_types.h105
-rw-r--r--rtems/freebsd/sys/acl.h407
-rw-r--r--rtems/freebsd/sys/ata.h560
-rw-r--r--rtems/freebsd/sys/bio.h1
-rw-r--r--rtems/freebsd/sys/bitstring.h146
-rw-r--r--rtems/freebsd/sys/buf_ring.h277
-rw-r--r--rtems/freebsd/sys/bufobj.h131
-rw-r--r--rtems/freebsd/sys/bus.h748
-rw-r--r--rtems/freebsd/sys/bus_dma.h277
-rw-r--r--rtems/freebsd/sys/callout.h114
-rw-r--r--rtems/freebsd/sys/cdefs.h582
-rw-r--r--rtems/freebsd/sys/condvar.h87
-rw-r--r--rtems/freebsd/sys/conf.h345
-rw-r--r--rtems/freebsd/sys/copyright.h1
-rw-r--r--rtems/freebsd/sys/cpu.h173
-rw-r--r--rtems/freebsd/sys/cpuset.h1
-rw-r--r--rtems/freebsd/sys/ctype.h57
-rw-r--r--rtems/freebsd/sys/domain.h106
-rw-r--r--rtems/freebsd/sys/endian.h200
-rw-r--r--rtems/freebsd/sys/errno.h192
-rw-r--r--rtems/freebsd/sys/event.h279
-rw-r--r--rtems/freebsd/sys/eventhandler.h242
-rw-r--r--rtems/freebsd/sys/exec.h1
-rw-r--r--rtems/freebsd/sys/fail.h1
-rw-r--r--rtems/freebsd/sys/fcntl.h296
-rw-r--r--rtems/freebsd/sys/file.h307
-rw-r--r--rtems/freebsd/sys/filedesc.h145
-rw-r--r--rtems/freebsd/sys/filio.h64
-rw-r--r--rtems/freebsd/sys/fnv_hash.h68
-rw-r--r--rtems/freebsd/sys/hash.h121
-rw-r--r--rtems/freebsd/sys/interrupt.h186
-rw-r--r--rtems/freebsd/sys/ioccom.h80
-rw-r--r--rtems/freebsd/sys/jail.h385
-rw-r--r--rtems/freebsd/sys/kernel.h424
-rw-r--r--rtems/freebsd/sys/kobj.h257
-rw-r--r--rtems/freebsd/sys/kthread.h79
-rw-r--r--rtems/freebsd/sys/ktr.h282
-rw-r--r--rtems/freebsd/sys/libkern.h195
-rw-r--r--rtems/freebsd/sys/limits.h1
-rw-r--r--rtems/freebsd/sys/linker.h349
-rw-r--r--rtems/freebsd/sys/linker_set.h114
-rw-r--r--rtems/freebsd/sys/lock.h319
-rw-r--r--rtems/freebsd/sys/lock_profile.h75
-rw-r--r--rtems/freebsd/sys/lockmgr.h195
-rw-r--r--rtems/freebsd/sys/lockstat.h220
-rw-r--r--rtems/freebsd/sys/mac.h111
-rw-r--r--rtems/freebsd/sys/malloc.h189
-rw-r--r--rtems/freebsd/sys/mbuf.h1032
-rw-r--r--rtems/freebsd/sys/md5.h53
-rw-r--r--rtems/freebsd/sys/module.h218
-rw-r--r--rtems/freebsd/sys/mount.h798
-rw-r--r--rtems/freebsd/sys/mutex.h463
-rw-r--r--rtems/freebsd/sys/namei.h1
-rw-r--r--rtems/freebsd/sys/osd.h101
-rw-r--r--rtems/freebsd/sys/param.h320
-rw-r--r--rtems/freebsd/sys/pcpu.h228
-rw-r--r--rtems/freebsd/sys/poll.h104
-rw-r--r--rtems/freebsd/sys/priority.h130
-rw-r--r--rtems/freebsd/sys/priv.h517
-rw-r--r--rtems/freebsd/sys/proc.h915
-rw-r--r--rtems/freebsd/sys/protosw.h339
-rw-r--r--rtems/freebsd/sys/queue.h636
-rw-r--r--rtems/freebsd/sys/random.h66
-rw-r--r--rtems/freebsd/sys/reboot.h66
-rw-r--r--rtems/freebsd/sys/refcount.h68
-rw-r--r--rtems/freebsd/sys/resource.h176
-rw-r--r--rtems/freebsd/sys/resourcevar.h143
-rw-r--r--rtems/freebsd/sys/rman.h155
-rw-r--r--rtems/freebsd/sys/rmlock.h121
-rw-r--r--rtems/freebsd/sys/rtprio.h92
-rw-r--r--rtems/freebsd/sys/runq.h75
-rw-r--r--rtems/freebsd/sys/rwlock.h251
-rw-r--r--rtems/freebsd/sys/sbuf.h88
-rw-r--r--rtems/freebsd/sys/sched.h1
-rw-r--r--rtems/freebsd/sys/sdt.h232
-rw-r--r--rtems/freebsd/sys/select.h1
-rw-r--r--rtems/freebsd/sys/selinfo.h64
-rw-r--r--rtems/freebsd/sys/sf_buf.h56
-rw-r--r--rtems/freebsd/sys/sigio.h67
-rw-r--r--rtems/freebsd/sys/signal.h439
-rw-r--r--rtems/freebsd/sys/signalvar.h372
-rw-r--r--rtems/freebsd/sys/smp.h183
-rw-r--r--rtems/freebsd/sys/sockbuf.h223
-rw-r--r--rtems/freebsd/sys/socket.h691
-rw-r--r--rtems/freebsd/sys/socketvar.h393
-rw-r--r--rtems/freebsd/sys/sockio.h128
-rw-r--r--rtems/freebsd/sys/sockopt.h72
-rw-r--r--rtems/freebsd/sys/sockstate.h83
-rw-r--r--rtems/freebsd/sys/stat.h1
-rw-r--r--rtems/freebsd/sys/stddef.h42
-rw-r--r--rtems/freebsd/sys/stdint.h106
-rw-r--r--rtems/freebsd/sys/sx.h307
-rw-r--r--rtems/freebsd/sys/syscallsubr.h1
-rw-r--r--rtems/freebsd/sys/sysctl.h762
-rw-r--r--rtems/freebsd/sys/sysent.h1
-rw-r--r--rtems/freebsd/sys/syslimits.h1
-rw-r--r--rtems/freebsd/sys/syslog.h203
-rw-r--r--rtems/freebsd/sys/sysproto.h1
-rw-r--r--rtems/freebsd/sys/systm.h418
-rw-r--r--rtems/freebsd/sys/taskqueue.h161
-rw-r--r--rtems/freebsd/sys/time.h351
-rw-r--r--rtems/freebsd/sys/timespec.h1
-rw-r--r--rtems/freebsd/sys/tree.h765
-rw-r--r--rtems/freebsd/sys/ttycom.h146
-rw-r--r--rtems/freebsd/sys/types.h360
-rw-r--r--rtems/freebsd/sys/ucontext.h99
-rw-r--r--rtems/freebsd/sys/ucred.h110
-rw-r--r--rtems/freebsd/sys/uio.h117
-rw-r--r--rtems/freebsd/sys/unistd.h188
-rw-r--r--rtems/freebsd/time.h1
-rw-r--r--rtems/freebsd/vm/uma.h636
-rw-r--r--rtems/freebsd/vm/uma_dbg.h55
-rw-r--r--rtems/freebsd/vm/uma_int.h440
815 files changed, 489700 insertions, 0 deletions
diff --git a/Makefile b/Makefile
new file mode 100644
index 00000000..55298b75
--- /dev/null
+++ b/Makefile
@@ -0,0 +1,354 @@
+RTEMS_MAKEFILE_PATH = /home/kirspelk/sandbox/opti_touch/b_rtems_arm/arm-rtems/c/lpc3250/make
+INSTALL_BASE = /home/kirspelk/sandbox/opti_touch/b_rtems_arm/arm-rtems/lpc3250/lib
+
+include $(RTEMS_MAKEFILE_PATH)/Makefile.inc
+include $(RTEMS_CUSTOM)
+include $(PROJECT_ROOT)/make/leaf.cfg
+include $(PROJECT_ROOT)/make/target.cfg
+
+CFLAGS += -ffreestanding -I . -I contrib/altq -I contrib/pf -B $(INSTALL_BASE) -w -std=gnu99
+
+C_FILES = \
+ rtems/freebsd/net/bridgestp.c \
+ rtems/freebsd/net/ieee8023ad_lacp.c \
+ rtems/freebsd/net/if_atmsubr.c \
+ rtems/freebsd/net/if.c \
+ rtems/freebsd/net/if_clone.c \
+ rtems/freebsd/net/if_dead.c \
+ rtems/freebsd/net/if_disc.c \
+ rtems/freebsd/net/if_edsc.c \
+ rtems/freebsd/net/if_ef.c \
+ rtems/freebsd/net/if_enc.c \
+ rtems/freebsd/net/if_epair.c \
+ rtems/freebsd/net/if_faith.c \
+ rtems/freebsd/net/if_fddisubr.c \
+ rtems/freebsd/net/if_fwsubr.c \
+ rtems/freebsd/net/if_gif.c \
+ rtems/freebsd/net/if_gre.c \
+ rtems/freebsd/net/if_iso88025subr.c \
+ rtems/freebsd/net/if_lagg.c \
+ rtems/freebsd/net/if_llatbl.c \
+ rtems/freebsd/net/if_loop.c \
+ rtems/freebsd/net/if_media.c \
+ rtems/freebsd/net/if_mib.c \
+ rtems/freebsd/net/if_spppfr.c \
+ rtems/freebsd/net/if_spppsubr.c \
+ rtems/freebsd/net/if_stf.c \
+ rtems/freebsd/net/if_tap.c \
+ rtems/freebsd/net/if_tun.c \
+ rtems/freebsd/net/if_vlan.c \
+ rtems/freebsd/net/pfil.c \
+ rtems/freebsd/net/radix.c \
+ rtems/freebsd/net/radix_mpath.c \
+ rtems/freebsd/net/raw_cb.c \
+ rtems/freebsd/net/raw_usrreq.c \
+ rtems/freebsd/net/route.c \
+ rtems/freebsd/net/rtsock.c \
+ rtems/freebsd/net/slcompress.c \
+ rtems/freebsd/net/zlib.c \
+ rtems/freebsd/net/bpf_buffer.c \
+ rtems/freebsd/net/bpf.c \
+ rtems/freebsd/net/bpf_filter.c \
+ rtems/freebsd/net/bpf_jitter.c \
+ rtems/freebsd/net/if_arcsubr.c \
+ rtems/freebsd/net/if_bridge.c \
+ rtems/freebsd/net/if_ethersubr.c \
+ rtems/freebsd/net/netisr.c \
+ rtems/freebsd/netinet/accf_data.c \
+ rtems/freebsd/netinet/accf_dns.c \
+ rtems/freebsd/netinet/accf_http.c \
+ rtems/freebsd/netinet/if_atm.c \
+ rtems/freebsd/netinet/if_ether.c \
+ rtems/freebsd/netinet/igmp.c \
+ rtems/freebsd/netinet/in.c \
+ rtems/freebsd/netinet/in_cksum.c \
+ rtems/freebsd/netinet/in_gif.c \
+ rtems/freebsd/netinet/in_mcast.c \
+ rtems/freebsd/netinet/in_pcb.c \
+ rtems/freebsd/netinet/in_proto.c \
+ rtems/freebsd/netinet/in_rmx.c \
+ rtems/freebsd/netinet/ip_carp.c \
+ rtems/freebsd/netinet/ip_divert.c \
+ rtems/freebsd/netinet/ip_ecn.c \
+ rtems/freebsd/netinet/ip_encap.c \
+ rtems/freebsd/netinet/ip_fastfwd.c \
+ rtems/freebsd/netinet/ip_gre.c \
+ rtems/freebsd/netinet/ip_icmp.c \
+ rtems/freebsd/netinet/ip_id.c \
+ rtems/freebsd/netinet/ip_input.c \
+ rtems/freebsd/netinet/ip_ipsec.c \
+ rtems/freebsd/netinet/ip_mroute.c \
+ rtems/freebsd/netinet/ip_options.c \
+ rtems/freebsd/netinet/ip_output.c \
+ rtems/freebsd/netinet/raw_ip.c \
+ rtems/freebsd/netinet/sctp_asconf.c \
+ rtems/freebsd/netinet/sctp_auth.c \
+ rtems/freebsd/netinet/sctp_bsd_addr.c \
+ rtems/freebsd/netinet/sctp_cc_functions.c \
+ rtems/freebsd/netinet/sctp_crc32.c \
+ rtems/freebsd/netinet/sctp_indata.c \
+ rtems/freebsd/netinet/sctp_input.c \
+ rtems/freebsd/netinet/sctp_output.c \
+ rtems/freebsd/netinet/sctp_pcb.c \
+ rtems/freebsd/netinet/sctp_peeloff.c \
+ rtems/freebsd/netinet/sctp_sysctl.c \
+ rtems/freebsd/netinet/sctp_timer.c \
+ rtems/freebsd/netinet/sctp_usrreq.c \
+ rtems/freebsd/netinet/sctputil.c \
+ rtems/freebsd/netinet/tcp_debug.c \
+ rtems/freebsd/netinet/tcp_input.c \
+ rtems/freebsd/netinet/tcp_lro.c \
+ rtems/freebsd/netinet/tcp_offload.c \
+ rtems/freebsd/netinet/tcp_output.c \
+ rtems/freebsd/netinet/tcp_reass.c \
+ rtems/freebsd/netinet/tcp_sack.c \
+ rtems/freebsd/netinet/tcp_subr.c \
+ rtems/freebsd/netinet/tcp_syncache.c \
+ rtems/freebsd/netinet/tcp_timer.c \
+ rtems/freebsd/netinet/tcp_timewait.c \
+ rtems/freebsd/netinet/tcp_usrreq.c \
+ rtems/freebsd/netinet/udp_usrreq.c \
+ rtems/freebsd/netinet/ipfw/dn_sched_fifo.c \
+ rtems/freebsd/netinet/ipfw/dn_sched_rr.c \
+ rtems/freebsd/netinet/ipfw/ip_fw_log.c \
+ rtems/freebsd/netinet/ipfw/dn_sched_qfq.c \
+ rtems/freebsd/netinet/ipfw/dn_sched_prio.c \
+ rtems/freebsd/netinet/ipfw/ip_dn_glue.c \
+ rtems/freebsd/netinet/ipfw/ip_fw2.c \
+ rtems/freebsd/netinet/ipfw/dn_heap.c \
+ rtems/freebsd/netinet/ipfw/ip_dummynet.c \
+ rtems/freebsd/netinet/ipfw/ip_fw_sockopt.c \
+ rtems/freebsd/netinet/ipfw/dn_sched_wf2q.c \
+ rtems/freebsd/netinet/ipfw/ip_fw_nat.c \
+ rtems/freebsd/netinet/ipfw/ip_fw_pfil.c \
+ rtems/freebsd/netinet/ipfw/ip_dn_io.c \
+ rtems/freebsd/netinet/ipfw/ip_fw_table.c \
+ rtems/freebsd/netinet/libalias/alias_dummy.c \
+ rtems/freebsd/netinet/libalias/alias_pptp.c \
+ rtems/freebsd/netinet/libalias/alias_smedia.c \
+ rtems/freebsd/netinet/libalias/alias_mod.c \
+ rtems/freebsd/netinet/libalias/alias_cuseeme.c \
+ rtems/freebsd/netinet/libalias/alias_nbt.c \
+ rtems/freebsd/netinet/libalias/alias_irc.c \
+ rtems/freebsd/netinet/libalias/alias_util.c \
+ rtems/freebsd/netinet/libalias/alias_db.c \
+ rtems/freebsd/netinet/libalias/alias_ftp.c \
+ rtems/freebsd/netinet/libalias/alias_proxy.c \
+ rtems/freebsd/netinet/libalias/alias.c \
+ rtems/freebsd/netinet/libalias/alias_skinny.c \
+ rtems/freebsd/netinet/libalias/alias_sctp.c \
+ rtems/freebsd/netinet6/dest6.c \
+ rtems/freebsd/netinet6/frag6.c \
+ rtems/freebsd/netinet6/icmp6.c \
+ rtems/freebsd/netinet6/in6.c \
+ rtems/freebsd/netinet6/in6_cksum.c \
+ rtems/freebsd/netinet6/in6_gif.c \
+ rtems/freebsd/netinet6/in6_ifattach.c \
+ rtems/freebsd/netinet6/in6_mcast.c \
+ rtems/freebsd/netinet6/in6_pcb.c \
+ rtems/freebsd/netinet6/in6_proto.c \
+ rtems/freebsd/netinet6/in6_rmx.c \
+ rtems/freebsd/netinet6/in6_src.c \
+ rtems/freebsd/netinet6/ip6_forward.c \
+ rtems/freebsd/netinet6/ip6_id.c \
+ rtems/freebsd/netinet6/ip6_input.c \
+ rtems/freebsd/netinet6/ip6_ipsec.c \
+ rtems/freebsd/netinet6/ip6_mroute.c \
+ rtems/freebsd/netinet6/ip6_output.c \
+ rtems/freebsd/netinet6/mld6.c \
+ rtems/freebsd/netinet6/nd6.c \
+ rtems/freebsd/netinet6/nd6_nbr.c \
+ rtems/freebsd/netinet6/nd6_rtr.c \
+ rtems/freebsd/netinet6/raw_ip6.c \
+ rtems/freebsd/netinet6/route6.c \
+ rtems/freebsd/netinet6/scope6.c \
+ rtems/freebsd/netinet6/sctp6_usrreq.c \
+ rtems/freebsd/netinet6/udp6_usrreq.c \
+ rtems/freebsd/netipsec/ipsec_input.c \
+ rtems/freebsd/netipsec/ipsec_mbuf.c \
+ rtems/freebsd/netipsec/ipsec_output.c \
+ rtems/freebsd/netipsec/key.c \
+ rtems/freebsd/netipsec/key_debug.c \
+ rtems/freebsd/netipsec/keysock.c \
+ rtems/freebsd/netipsec/xform_ah.c \
+ rtems/freebsd/netipsec/xform_esp.c \
+ rtems/freebsd/netipsec/xform_ipcomp.c \
+ rtems/freebsd/netipsec/xform_ipip.c \
+ rtems/freebsd/netipsec/xform_tcp.c \
+ rtems/freebsd/net80211/ieee80211_acl.c \
+ rtems/freebsd/net80211/ieee80211_action.c \
+ rtems/freebsd/net80211/ieee80211_adhoc.c \
+ rtems/freebsd/net80211/ieee80211_ageq.c \
+ rtems/freebsd/net80211/ieee80211_amrr.c \
+ rtems/freebsd/net80211/ieee80211.c \
+ rtems/freebsd/net80211/ieee80211_crypto.c \
+ rtems/freebsd/net80211/ieee80211_crypto_ccmp.c \
+ rtems/freebsd/net80211/ieee80211_crypto_none.c \
+ rtems/freebsd/net80211/ieee80211_crypto_tkip.c \
+ rtems/freebsd/net80211/ieee80211_crypto_wep.c \
+ rtems/freebsd/net80211/ieee80211_ddb.c \
+ rtems/freebsd/net80211/ieee80211_dfs.c \
+ rtems/freebsd/net80211/ieee80211_freebsd.c \
+ rtems/freebsd/net80211/ieee80211_hostap.c \
+ rtems/freebsd/net80211/ieee80211_ht.c \
+ rtems/freebsd/net80211/ieee80211_hwmp.c \
+ rtems/freebsd/net80211/ieee80211_input.c \
+ rtems/freebsd/net80211/ieee80211_ioctl.c \
+ rtems/freebsd/net80211/ieee80211_mesh.c \
+ rtems/freebsd/net80211/ieee80211_monitor.c \
+ rtems/freebsd/net80211/ieee80211_node.c \
+ rtems/freebsd/net80211/ieee80211_output.c \
+ rtems/freebsd/net80211/ieee80211_phy.c \
+ rtems/freebsd/net80211/ieee80211_power.c \
+ rtems/freebsd/net80211/ieee80211_proto.c \
+ rtems/freebsd/net80211/ieee80211_radiotap.c \
+ rtems/freebsd/net80211/ieee80211_ratectl.c \
+ rtems/freebsd/net80211/ieee80211_ratectl_none.c \
+ rtems/freebsd/net80211/ieee80211_regdomain.c \
+ rtems/freebsd/net80211/ieee80211_rssadapt.c \
+ rtems/freebsd/net80211/ieee80211_scan.c \
+ rtems/freebsd/net80211/ieee80211_scan_sta.c \
+ rtems/freebsd/net80211/ieee80211_sta.c \
+ rtems/freebsd/net80211/ieee80211_superg.c \
+ rtems/freebsd/net80211/ieee80211_tdma.c \
+ rtems/freebsd/net80211/ieee80211_wds.c \
+ rtems/freebsd/net80211/ieee80211_xauth.c \
+ rtems/freebsd/opencrypto/crypto.c \
+ rtems/freebsd/opencrypto/deflate.c \
+ rtems/freebsd/opencrypto/cryptosoft.c \
+ rtems/freebsd/opencrypto/criov.c \
+ rtems/freebsd/opencrypto/rmd160.c \
+ rtems/freebsd/opencrypto/xform.c \
+ rtems/freebsd/opencrypto/skipjack.c \
+ rtems/freebsd/opencrypto/cast.c \
+ rtems/freebsd/opencrypto/cryptodev.c \
+ rtems/freebsd/crypto/sha1.c \
+ rtems/freebsd/crypto/sha2/sha2.c \
+ rtems/freebsd/crypto/rijndael/rijndael-alg-fst.c \
+ rtems/freebsd/crypto/rijndael/rijndael-api.c \
+ rtems/freebsd/crypto/rijndael/rijndael-api-fst.c \
+ rtems/freebsd/crypto/des/des_setkey.c \
+ rtems/freebsd/crypto/des/des_enc.c \
+ rtems/freebsd/crypto/des/des_ecb.c \
+ rtems/freebsd/crypto/blowfish/bf_enc.c \
+ rtems/freebsd/crypto/blowfish/bf_skey.c \
+ rtems/freebsd/crypto/blowfish/bf_ecb.c \
+ rtems/freebsd/crypto/rc4/rc4.c \
+ rtems/freebsd/crypto/camellia/camellia-api.c \
+ rtems/freebsd/crypto/camellia/camellia.c \
+ contrib/altq/rtems/freebsd/altq/altq_rmclass.c \
+ contrib/altq/rtems/freebsd/altq/altq_rio.c \
+ contrib/altq/rtems/freebsd/altq/altq_subr.c \
+ contrib/altq/rtems/freebsd/altq/altq_cdnr.c \
+ contrib/altq/rtems/freebsd/altq/altq_priq.c \
+ contrib/altq/rtems/freebsd/altq/altq_cbq.c \
+ contrib/altq/rtems/freebsd/altq/altq_hfsc.c \
+ contrib/altq/rtems/freebsd/altq/altq_red.c \
+ contrib/pf/rtems/freebsd/netinet/in4_cksum.c \
+ contrib/pf/rtems/freebsd/net/pf.c \
+ contrib/pf/rtems/freebsd/net/if_pflog.c \
+ contrib/pf/rtems/freebsd/net/pf_subr.c \
+ contrib/pf/rtems/freebsd/net/pf_ioctl.c \
+ contrib/pf/rtems/freebsd/net/pf_table.c \
+ contrib/pf/rtems/freebsd/net/pf_if.c \
+ contrib/pf/rtems/freebsd/net/pf_osfp.c \
+ contrib/pf/rtems/freebsd/net/pf_norm.c \
+ contrib/pf/rtems/freebsd/net/pf_ruleset.c \
+ contrib/pf/rtems/freebsd/net/if_pfsync.c \
+ rtems/freebsd/dev/mii/mii.c \
+ rtems/freebsd/dev/mii/mii_physubr.c \
+ rtems/freebsd/dev/mii/icsphy.c \
+ rtems/freebsd/rtems/rtems-bsd-cam.c \
+ rtems/freebsd/rtems/rtems-bsd-nexus.c \
+ rtems/freebsd/rtems/rtems-bsd-autoconf.c \
+ rtems/freebsd/rtems/rtems-bsd-delay.c \
+ rtems/freebsd/rtems/rtems-bsd-mutex.c \
+ rtems/freebsd/rtems/rtems-bsd-thread.c \
+ rtems/freebsd/rtems/rtems-bsd-condvar.c \
+ rtems/freebsd/rtems/rtems-bsd-lock.c \
+ rtems/freebsd/rtems/rtems-bsd-sx.c \
+ rtems/freebsd/rtems/rtems-bsd-rwlock.c \
+ rtems/freebsd/rtems/rtems-bsd-generic.c \
+ rtems/freebsd/rtems/rtems-bsd-panic.c \
+ rtems/freebsd/rtems/rtems-bsd-synch.c \
+ rtems/freebsd/rtems/rtems-bsd-signal.c \
+ rtems/freebsd/rtems/rtems-bsd-callout.c \
+ rtems/freebsd/rtems/rtems-bsd-init.c \
+ rtems/freebsd/rtems/rtems-bsd-init-with-irq.c \
+ rtems/freebsd/rtems/rtems-bsd-assert.c \
+ rtems/freebsd/rtems/rtems-bsd-prot.c \
+ rtems/freebsd/rtems/rtems-bsd-resource.c \
+ rtems/freebsd/rtems/rtems-bsd-jail.c \
+ rtems/freebsd/rtems/rtems-bsd-shell.c \
+ rtems/freebsd/rtems/rtems-bsd-syscalls.c \
+ rtems/freebsd/rtems/rtems-bsd-malloc.c \
+ rtems/freebsd/rtems/rtems-bsd-support.c \
+ rtems/freebsd/rtems/rtems-bsd-bus-dma.c \
+ rtems/freebsd/rtems/rtems-bsd-sysctl.c \
+ rtems/freebsd/rtems/rtems-bsd-sysctlbyname.c \
+ rtems/freebsd/rtems/rtems-bsd-sysctlnametomib.c \
+ rtems/freebsd/rtems/rtems-bsd-uma.c \
+ rtems/freebsd/local/usb_if.c \
+ rtems/freebsd/local/bus_if.c \
+ rtems/freebsd/local/device_if.c \
+ rtems/freebsd/local/cryptodev_if.c \
+ rtems/freebsd/local/miibus_if.c \
+ rtems/freebsd/kern/init_main.c \
+ rtems/freebsd/kern/kern_mbuf.c \
+ rtems/freebsd/kern/kern_module.c \
+ rtems/freebsd/kern/kern_sysctl.c \
+ rtems/freebsd/kern/subr_bus.c \
+ rtems/freebsd/kern/subr_kobj.c \
+ rtems/freebsd/kern/uipc_mbuf.c \
+ rtems/freebsd/kern/uipc_mbuf2.c \
+ rtems/freebsd/kern/uipc_socket.c \
+ rtems/freebsd/dev/usb/usb_busdma.c \
+ rtems/freebsd/dev/usb/usb_core.c \
+ rtems/freebsd/dev/usb/usb_debug.c \
+ rtems/freebsd/dev/usb/usb_dev.c \
+ rtems/freebsd/dev/usb/usb_device.c \
+ rtems/freebsd/dev/usb/usb_dynamic.c \
+ rtems/freebsd/dev/usb/usb_error.c \
+ rtems/freebsd/dev/usb/usb_generic.c \
+ rtems/freebsd/dev/usb/usb_handle_request.c \
+ rtems/freebsd/dev/usb/usb_hid.c \
+ rtems/freebsd/dev/usb/usb_hub.c \
+ rtems/freebsd/dev/usb/usb_lookup.c \
+ rtems/freebsd/dev/usb/usb_mbuf.c \
+ rtems/freebsd/dev/usb/usb_msctest.c \
+ rtems/freebsd/dev/usb/usb_parse.c \
+ rtems/freebsd/dev/usb/usb_process.c \
+ rtems/freebsd/dev/usb/usb_request.c \
+ rtems/freebsd/dev/usb/usb_transfer.c \
+ rtems/freebsd/dev/usb/usb_util.c \
+ rtems/freebsd/dev/usb/quirk/usb_quirk.c \
+ rtems/freebsd/dev/usb/controller/ohci.c \
+ rtems/freebsd/dev/usb/controller/ehci.c \
+ rtems/freebsd/dev/usb/controller/usb_controller.c \
+ rtems/freebsd/dev/usb/controller/ohci_lpc3250.c \
+ rtems/freebsd/cam/cam.c \
+ rtems/freebsd/cam/scsi/scsi_all.c \
+ rtems/freebsd/dev/usb/storage/umass.c
+C_O_FILES = $(C_FILES:%.c=%.o)
+C_DEP_FILES = $(C_FILES:%.c=%.dep)
+
+LIB = libbsd.a
+
+all: lib_usb
+
+$(LIB): $(C_O_FILES)
+ $(AR) rcu $@ $^
+
+lib_usb:
+ export PATH=$(PATH):/usr/local/gcc-4.5.2/bin ; \
+ make $(LIB)
+
+install: $(LIB)
+ install -c -m 644 $(LIB) $(INSTALL_BASE)
+ for i in `find . -name '*.h'` ; do install -c -m 644 -D "$$i" "$(INSTALL_BASE)/include/$$i" ; done
+
+clean:
+ rm -f -r $(PROJECT_INCLUDE)/rtems/freebsd
+ rm -f $(LIB) $(C_O_FILES) $(C_DEP_FILES)
+
+-include $(C_DEP_FILES)
diff --git a/contrib/altq/rtems/freebsd/altq/altq.h b/contrib/altq/rtems/freebsd/altq/altq.h
new file mode 100644
index 00000000..78ec2d8c
--- /dev/null
+++ b/contrib/altq/rtems/freebsd/altq/altq.h
@@ -0,0 +1,204 @@
+/* $FreeBSD$ */
+/* $KAME: altq.h,v 1.10 2003/07/10 12:07:47 kjc Exp $ */
+
+/*
+ * Copyright (C) 1998-2003
+ * Sony Computer Science Laboratories Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY SONY CSL AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL SONY CSL OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+#ifndef _ALTQ_ALTQ_HH_
+#define _ALTQ_ALTQ_HH_
+
+#if 0
+/*
+ * allow altq-3 (altqd(8) and /dev/altq) to coexist with the new pf-based altq.
+ * altq3 is mainly for research experiments. pf-based altq is for daily use.
+ */
+#define ALTQ3_COMPAT /* for compatibility with altq-3 */
+#define ALTQ3_CLFIER_COMPAT /* for compatibility with altq-3 classifier */
+#endif
+
+#ifdef ALTQ3_COMPAT
+#include <rtems/freebsd/sys/param.h>
+#include <rtems/freebsd/sys/ioccom.h>
+#include <rtems/freebsd/sys/queue.h>
+#include <rtems/freebsd/netinet/in.h>
+
+#ifndef IFNAMSIZ
+#define IFNAMSIZ 16
+#endif
+#endif /* ALTQ3_COMPAT */
+
+/* altq discipline type */
+#define ALTQT_NONE 0 /* reserved */
+#define ALTQT_CBQ 1 /* cbq */
+#define ALTQT_WFQ 2 /* wfq */
+#define ALTQT_AFMAP 3 /* afmap */
+#define ALTQT_FIFOQ 4 /* fifoq */
+#define ALTQT_RED 5 /* red */
+#define ALTQT_RIO 6 /* rio */
+#define ALTQT_LOCALQ 7 /* local use */
+#define ALTQT_HFSC 8 /* hfsc */
+#define ALTQT_CDNR 9 /* traffic conditioner */
+#define ALTQT_BLUE 10 /* blue */
+#define ALTQT_PRIQ 11 /* priority queue */
+#define ALTQT_JOBS 12 /* JoBS */
+#define ALTQT_MAX 13 /* should be max discipline type + 1 */
+
+#ifdef ALTQ3_COMPAT
+struct altqreq {
+ char ifname[IFNAMSIZ]; /* if name, e.g. "en0" */
+ u_long arg; /* request-specific argument */
+};
+#endif
+
+/* simple token backet meter profile */
+struct tb_profile {
+ u_int rate; /* rate in bit-per-sec */
+ u_int depth; /* depth in bytes */
+};
+
+#ifdef ALTQ3_COMPAT
+struct tbrreq {
+ char ifname[IFNAMSIZ]; /* if name, e.g. "en0" */
+ struct tb_profile tb_prof; /* token bucket profile */
+};
+
+#ifdef ALTQ3_CLFIER_COMPAT
+/*
+ * common network flow info structure
+ */
+struct flowinfo {
+ u_char fi_len; /* total length */
+ u_char fi_family; /* address family */
+ u_int8_t fi_data[46]; /* actually longer; address family
+ specific flow info. */
+};
+
+/*
+ * flow info structure for internet protocol family.
+ * (currently this is the only protocol family supported)
+ */
+struct flowinfo_in {
+ u_char fi_len; /* sizeof(struct flowinfo_in) */
+ u_char fi_family; /* AF_INET */
+ u_int8_t fi_proto; /* IPPROTO_XXX */
+ u_int8_t fi_tos; /* type-of-service */
+ struct in_addr fi_dst; /* dest address */
+ struct in_addr fi_src; /* src address */
+ u_int16_t fi_dport; /* dest port */
+ u_int16_t fi_sport; /* src port */
+ u_int32_t fi_gpi; /* generalized port id for ipsec */
+ u_int8_t _pad[28]; /* make the size equal to
+ flowinfo_in6 */
+};
+
+#ifdef SIN6_LEN
+struct flowinfo_in6 {
+ u_char fi6_len; /* sizeof(struct flowinfo_in6) */
+ u_char fi6_family; /* AF_INET6 */
+ u_int8_t fi6_proto; /* IPPROTO_XXX */
+ u_int8_t fi6_tclass; /* traffic class */
+ u_int32_t fi6_flowlabel; /* ipv6 flowlabel */
+ u_int16_t fi6_dport; /* dest port */
+ u_int16_t fi6_sport; /* src port */
+ u_int32_t fi6_gpi; /* generalized port id */
+ struct in6_addr fi6_dst; /* dest address */
+ struct in6_addr fi6_src; /* src address */
+};
+#endif /* INET6 */
+
+/*
+ * flow filters for AF_INET and AF_INET6
+ */
+struct flow_filter {
+ int ff_ruleno;
+ struct flowinfo_in ff_flow;
+ struct {
+ struct in_addr mask_dst;
+ struct in_addr mask_src;
+ u_int8_t mask_tos;
+ u_int8_t _pad[3];
+ } ff_mask;
+ u_int8_t _pad2[24]; /* make the size equal to flow_filter6 */
+};
+
+#ifdef SIN6_LEN
+struct flow_filter6 {
+ int ff_ruleno;
+ struct flowinfo_in6 ff_flow6;
+ struct {
+ struct in6_addr mask6_dst;
+ struct in6_addr mask6_src;
+ u_int8_t mask6_tclass;
+ u_int8_t _pad[3];
+ } ff_mask6;
+};
+#endif /* INET6 */
+#endif /* ALTQ3_CLFIER_COMPAT */
+#endif /* ALTQ3_COMPAT */
+
+/*
+ * generic packet counter
+ */
+struct pktcntr {
+ u_int64_t packets;
+ u_int64_t bytes;
+};
+
+#define PKTCNTR_ADD(cntr, len) \
+ do { (cntr)->packets++; (cntr)->bytes += len; } while (/*CONSTCOND*/ 0)
+
+#ifdef ALTQ3_COMPAT
+/*
+ * altq related ioctls
+ */
+#define ALTQGTYPE _IOWR('q', 0, struct altqreq) /* get queue type */
+#if 0
+/*
+ * these ioctls are currently discipline-specific but could be shared
+ * in the future.
+ */
+#define ALTQATTACH _IOW('q', 1, struct altqreq) /* attach discipline */
+#define ALTQDETACH _IOW('q', 2, struct altqreq) /* detach discipline */
+#define ALTQENABLE _IOW('q', 3, struct altqreq) /* enable discipline */
+#define ALTQDISABLE _IOW('q', 4, struct altqreq) /* disable discipline*/
+#define ALTQCLEAR _IOW('q', 5, struct altqreq) /* (re)initialize */
+#define ALTQCONFIG _IOWR('q', 6, struct altqreq) /* set config params */
+#define ALTQADDCLASS _IOWR('q', 7, struct altqreq) /* add a class */
+#define ALTQMODCLASS _IOWR('q', 8, struct altqreq) /* modify a class */
+#define ALTQDELCLASS _IOWR('q', 9, struct altqreq) /* delete a class */
+#define ALTQADDFILTER _IOWR('q', 10, struct altqreq) /* add a filter */
+#define ALTQDELFILTER _IOWR('q', 11, struct altqreq) /* delete a filter */
+#define ALTQGETSTATS _IOWR('q', 12, struct altqreq) /* get statistics */
+#define ALTQGETCNTR _IOWR('q', 13, struct altqreq) /* get a pkt counter */
+#endif /* 0 */
+#define ALTQTBRSET _IOW('q', 14, struct tbrreq) /* set tb regulator */
+#define ALTQTBRGET _IOWR('q', 15, struct tbrreq) /* get tb regulator */
+#endif /* ALTQ3_COMPAT */
+
+#ifdef _KERNEL
+#include <rtems/freebsd/altq/altq_var.h>
+#endif
+
+#endif /* _ALTQ_ALTQ_HH_ */
diff --git a/contrib/altq/rtems/freebsd/altq/altq_cbq.c b/contrib/altq/rtems/freebsd/altq/altq_cbq.c
new file mode 100644
index 00000000..27454d47
--- /dev/null
+++ b/contrib/altq/rtems/freebsd/altq/altq_cbq.c
@@ -0,0 +1,1187 @@
+#include <rtems/freebsd/machine/rtems-bsd-config.h>
+
+/* $FreeBSD$ */
+/* $KAME: altq_cbq.c,v 1.19 2003/09/17 14:23:25 kjc Exp $ */
+
+/*
+ * Copyright (c) Sun Microsystems, Inc. 1993-1998 All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the SMCC Technology
+ * Development Group at Sun Microsystems, Inc.
+ *
+ * 4. The name of the Sun Microsystems, Inc nor may not be used to endorse or
+ * promote products derived from this software without specific prior
+ * written permission.
+ *
+ * SUN MICROSYSTEMS DOES NOT CLAIM MERCHANTABILITY OF THIS SOFTWARE OR THE
+ * SUITABILITY OF THIS SOFTWARE FOR ANY PARTICULAR PURPOSE. The software is
+ * provided "as is" without express or implied warranty of any kind.
+ *
+ * These notices must be retained in any copies of any part of this software.
+ */
+
+#if defined(__FreeBSD__) || defined(__NetBSD__)
+#include <rtems/freebsd/local/opt_altq.h>
+#if (__FreeBSD__ != 2)
+#include <rtems/freebsd/local/opt_inet.h>
+#ifdef __FreeBSD__
+#include <rtems/freebsd/local/opt_inet6.h>
+#endif
+#endif
+#endif /* __FreeBSD__ || __NetBSD__ */
+#ifdef ALTQ_CBQ /* cbq is enabled by ALTQ_CBQ option in opt_altq.h */
+
+#include <rtems/freebsd/sys/param.h>
+#include <rtems/freebsd/sys/malloc.h>
+#include <rtems/freebsd/sys/mbuf.h>
+#include <rtems/freebsd/sys/socket.h>
+#include <rtems/freebsd/sys/systm.h>
+#include <rtems/freebsd/sys/proc.h>
+#include <rtems/freebsd/sys/errno.h>
+#include <rtems/freebsd/sys/time.h>
+#ifdef ALTQ3_COMPAT
+#include <rtems/freebsd/sys/uio.h>
+#include <rtems/freebsd/sys/kernel.h>
+#endif
+
+#include <rtems/freebsd/net/if.h>
+#include <rtems/freebsd/netinet/in.h>
+
+#include <rtems/freebsd/net/pfvar.h>
+#include <rtems/freebsd/altq/altq.h>
+#include <rtems/freebsd/altq/altq_cbq.h>
+#ifdef ALTQ3_COMPAT
+#include <rtems/freebsd/altq/altq_conf.h>
+#endif
+
+#ifdef ALTQ3_COMPAT
+/*
+ * Local Data structures.
+ */
+static cbq_state_t *cbq_list = NULL;
+#endif
+
+/*
+ * Forward Declarations.
+ */
+static int cbq_class_destroy(cbq_state_t *, struct rm_class *);
+static struct rm_class *clh_to_clp(cbq_state_t *, u_int32_t);
+static int cbq_clear_interface(cbq_state_t *);
+static int cbq_request(struct ifaltq *, int, void *);
+static int cbq_enqueue(struct ifaltq *, struct mbuf *,
+ struct altq_pktattr *);
+static struct mbuf *cbq_dequeue(struct ifaltq *, int);
+static void cbqrestart(struct ifaltq *);
+static void get_class_stats(class_stats_t *, struct rm_class *);
+static void cbq_purge(cbq_state_t *);
+#ifdef ALTQ3_COMPAT
+static int cbq_add_class(struct cbq_add_class *);
+static int cbq_delete_class(struct cbq_delete_class *);
+static int cbq_modify_class(struct cbq_modify_class *);
+static int cbq_class_create(cbq_state_t *, struct cbq_add_class *,
+ struct rm_class *, struct rm_class *);
+static int cbq_clear_hierarchy(struct cbq_interface *);
+static int cbq_set_enable(struct cbq_interface *, int);
+static int cbq_ifattach(struct cbq_interface *);
+static int cbq_ifdetach(struct cbq_interface *);
+static int cbq_getstats(struct cbq_getstats *);
+
+static int cbq_add_filter(struct cbq_add_filter *);
+static int cbq_delete_filter(struct cbq_delete_filter *);
+#endif /* ALTQ3_COMPAT */
+
+/*
+ * int
+ * cbq_class_destroy(cbq_mod_state_t *, struct rm_class *) - This
+ * function destroys a given traffic class. Before destroying
+ * the class, all traffic for that class is released.
+ */
+static int
+cbq_class_destroy(cbq_state_t *cbqp, struct rm_class *cl)
+{
+ int i;
+
+ /* delete the class */
+ rmc_delete_class(&cbqp->ifnp, cl);
+
+ /*
+ * free the class handle
+ */
+ for (i = 0; i < CBQ_MAX_CLASSES; i++)
+ if (cbqp->cbq_class_tbl[i] == cl)
+ cbqp->cbq_class_tbl[i] = NULL;
+
+ if (cl == cbqp->ifnp.root_)
+ cbqp->ifnp.root_ = NULL;
+ if (cl == cbqp->ifnp.default_)
+ cbqp->ifnp.default_ = NULL;
+#ifdef ALTQ3_COMPAT
+ if (cl == cbqp->ifnp.ctl_)
+ cbqp->ifnp.ctl_ = NULL;
+#endif
+ return (0);
+}
+
+/* convert class handle to class pointer */
+static struct rm_class *
+clh_to_clp(cbq_state_t *cbqp, u_int32_t chandle)
+{
+ int i;
+ struct rm_class *cl;
+
+ if (chandle == 0)
+ return (NULL);
+ /*
+ * first, try optimistically the slot matching the lower bits of
+ * the handle. if it fails, do the linear table search.
+ */
+ i = chandle % CBQ_MAX_CLASSES;
+ if ((cl = cbqp->cbq_class_tbl[i]) != NULL &&
+ cl->stats_.handle == chandle)
+ return (cl);
+ for (i = 0; i < CBQ_MAX_CLASSES; i++)
+ if ((cl = cbqp->cbq_class_tbl[i]) != NULL &&
+ cl->stats_.handle == chandle)
+ return (cl);
+ return (NULL);
+}
+
+static int
+cbq_clear_interface(cbq_state_t *cbqp)
+{
+ int again, i;
+ struct rm_class *cl;
+
+#ifdef ALTQ3_CLFIER_COMPAT
+ /* free the filters for this interface */
+ acc_discard_filters(&cbqp->cbq_classifier, NULL, 1);
+#endif
+
+ /* clear out the classes now */
+ do {
+ again = 0;
+ for (i = 0; i < CBQ_MAX_CLASSES; i++) {
+ if ((cl = cbqp->cbq_class_tbl[i]) != NULL) {
+ if (is_a_parent_class(cl))
+ again++;
+ else {
+ cbq_class_destroy(cbqp, cl);
+ cbqp->cbq_class_tbl[i] = NULL;
+ if (cl == cbqp->ifnp.root_)
+ cbqp->ifnp.root_ = NULL;
+ if (cl == cbqp->ifnp.default_)
+ cbqp->ifnp.default_ = NULL;
+#ifdef ALTQ3_COMPAT
+ if (cl == cbqp->ifnp.ctl_)
+ cbqp->ifnp.ctl_ = NULL;
+#endif
+ }
+ }
+ }
+ } while (again);
+
+ return (0);
+}
+
+static int
+cbq_request(struct ifaltq *ifq, int req, void *arg)
+{
+ cbq_state_t *cbqp = (cbq_state_t *)ifq->altq_disc;
+
+ IFQ_LOCK_ASSERT(ifq);
+
+ switch (req) {
+ case ALTRQ_PURGE:
+ cbq_purge(cbqp);
+ break;
+ }
+ return (0);
+}
+
+/* copy the stats info in rm_class to class_states_t */
+static void
+get_class_stats(class_stats_t *statsp, struct rm_class *cl)
+{
+ statsp->xmit_cnt = cl->stats_.xmit_cnt;
+ statsp->drop_cnt = cl->stats_.drop_cnt;
+ statsp->over = cl->stats_.over;
+ statsp->borrows = cl->stats_.borrows;
+ statsp->overactions = cl->stats_.overactions;
+ statsp->delays = cl->stats_.delays;
+
+ statsp->depth = cl->depth_;
+ statsp->priority = cl->pri_;
+ statsp->maxidle = cl->maxidle_;
+ statsp->minidle = cl->minidle_;
+ statsp->offtime = cl->offtime_;
+ statsp->qmax = qlimit(cl->q_);
+ statsp->ns_per_byte = cl->ns_per_byte_;
+ statsp->wrr_allot = cl->w_allotment_;
+ statsp->qcnt = qlen(cl->q_);
+ statsp->avgidle = cl->avgidle_;
+
+ statsp->qtype = qtype(cl->q_);
+#ifdef ALTQ_RED
+ if (q_is_red(cl->q_))
+ red_getstats(cl->red_, &statsp->red[0]);
+#endif
+#ifdef ALTQ_RIO
+ if (q_is_rio(cl->q_))
+ rio_getstats((rio_t *)cl->red_, &statsp->red[0]);
+#endif
+}
+
+int
+cbq_pfattach(struct pf_altq *a)
+{
+ struct ifnet *ifp;
+ int s, error;
+
+ if ((ifp = ifunit(a->ifname)) == NULL || a->altq_disc == NULL)
+ return (EINVAL);
+#ifdef __NetBSD__
+ s = splnet();
+#else
+ s = splimp();
+#endif
+ error = altq_attach(&ifp->if_snd, ALTQT_CBQ, a->altq_disc,
+ cbq_enqueue, cbq_dequeue, cbq_request, NULL, NULL);
+ splx(s);
+ return (error);
+}
+
+int
+cbq_add_altq(struct pf_altq *a)
+{
+ cbq_state_t *cbqp;
+ struct ifnet *ifp;
+
+ if ((ifp = ifunit(a->ifname)) == NULL)
+ return (EINVAL);
+ if (!ALTQ_IS_READY(&ifp->if_snd))
+ return (ENODEV);
+
+ /* allocate and initialize cbq_state_t */
+ cbqp = malloc(sizeof(cbq_state_t), M_DEVBUF, M_WAITOK);
+ if (cbqp == NULL)
+ return (ENOMEM);
+ bzero(cbqp, sizeof(cbq_state_t));
+ CALLOUT_INIT(&cbqp->cbq_callout);
+ cbqp->cbq_qlen = 0;
+ cbqp->ifnp.ifq_ = &ifp->if_snd; /* keep the ifq */
+
+ /* keep the state in pf_altq */
+ a->altq_disc = cbqp;
+
+ return (0);
+}
+
+int
+cbq_remove_altq(struct pf_altq *a)
+{
+ cbq_state_t *cbqp;
+
+ if ((cbqp = a->altq_disc) == NULL)
+ return (EINVAL);
+ a->altq_disc = NULL;
+
+ cbq_clear_interface(cbqp);
+
+ if (cbqp->ifnp.default_)
+ cbq_class_destroy(cbqp, cbqp->ifnp.default_);
+ if (cbqp->ifnp.root_)
+ cbq_class_destroy(cbqp, cbqp->ifnp.root_);
+
+ /* deallocate cbq_state_t */
+ free(cbqp, M_DEVBUF);
+
+ return (0);
+}
+
+int
+cbq_add_queue(struct pf_altq *a)
+{
+ struct rm_class *borrow, *parent;
+ cbq_state_t *cbqp;
+ struct rm_class *cl;
+ struct cbq_opts *opts;
+ int i;
+
+ if ((cbqp = a->altq_disc) == NULL)
+ return (EINVAL);
+ if (a->qid == 0)
+ return (EINVAL);
+
+ /*
+ * find a free slot in the class table. if the slot matching
+ * the lower bits of qid is free, use this slot. otherwise,
+ * use the first free slot.
+ */
+ i = a->qid % CBQ_MAX_CLASSES;
+ if (cbqp->cbq_class_tbl[i] != NULL) {
+ for (i = 0; i < CBQ_MAX_CLASSES; i++)
+ if (cbqp->cbq_class_tbl[i] == NULL)
+ break;
+ if (i == CBQ_MAX_CLASSES)
+ return (EINVAL);
+ }
+
+ opts = &a->pq_u.cbq_opts;
+ /* check parameters */
+ if (a->priority >= CBQ_MAXPRI)
+ return (EINVAL);
+
+ /* Get pointers to parent and borrow classes. */
+ parent = clh_to_clp(cbqp, a->parent_qid);
+ if (opts->flags & CBQCLF_BORROW)
+ borrow = parent;
+ else
+ borrow = NULL;
+
+ /*
+ * A class must borrow from it's parent or it can not
+ * borrow at all. Hence, borrow can be null.
+ */
+ if (parent == NULL && (opts->flags & CBQCLF_ROOTCLASS) == 0) {
+ printf("cbq_add_queue: no parent class!\n");
+ return (EINVAL);
+ }
+
+ if ((borrow != parent) && (borrow != NULL)) {
+ printf("cbq_add_class: borrow class != parent\n");
+ return (EINVAL);
+ }
+
+ /*
+ * check parameters
+ */
+ switch (opts->flags & CBQCLF_CLASSMASK) {
+ case CBQCLF_ROOTCLASS:
+ if (parent != NULL)
+ return (EINVAL);
+ if (cbqp->ifnp.root_)
+ return (EINVAL);
+ break;
+ case CBQCLF_DEFCLASS:
+ if (cbqp->ifnp.default_)
+ return (EINVAL);
+ break;
+ case 0:
+ if (a->qid == 0)
+ return (EINVAL);
+ break;
+ default:
+ /* more than two flags bits set */
+ return (EINVAL);
+ }
+
+ /*
+ * create a class. if this is a root class, initialize the
+ * interface.
+ */
+ if ((opts->flags & CBQCLF_CLASSMASK) == CBQCLF_ROOTCLASS) {
+ rmc_init(cbqp->ifnp.ifq_, &cbqp->ifnp, opts->ns_per_byte,
+ cbqrestart, a->qlimit, RM_MAXQUEUED,
+ opts->maxidle, opts->minidle, opts->offtime,
+ opts->flags);
+ cl = cbqp->ifnp.root_;
+ } else {
+ cl = rmc_newclass(a->priority,
+ &cbqp->ifnp, opts->ns_per_byte,
+ rmc_delay_action, a->qlimit, parent, borrow,
+ opts->maxidle, opts->minidle, opts->offtime,
+ opts->pktsize, opts->flags);
+ }
+ if (cl == NULL)
+ return (ENOMEM);
+
+ /* return handle to user space. */
+ cl->stats_.handle = a->qid;
+ cl->stats_.depth = cl->depth_;
+
+ /* save the allocated class */
+ cbqp->cbq_class_tbl[i] = cl;
+
+ if ((opts->flags & CBQCLF_CLASSMASK) == CBQCLF_DEFCLASS)
+ cbqp->ifnp.default_ = cl;
+
+ return (0);
+}
+
+int
+cbq_remove_queue(struct pf_altq *a)
+{
+ struct rm_class *cl;
+ cbq_state_t *cbqp;
+ int i;
+
+ if ((cbqp = a->altq_disc) == NULL)
+ return (EINVAL);
+
+ if ((cl = clh_to_clp(cbqp, a->qid)) == NULL)
+ return (EINVAL);
+
+ /* if we are a parent class, then return an error. */
+ if (is_a_parent_class(cl))
+ return (EINVAL);
+
+ /* delete the class */
+ rmc_delete_class(&cbqp->ifnp, cl);
+
+ /*
+ * free the class handle
+ */
+ for (i = 0; i < CBQ_MAX_CLASSES; i++)
+ if (cbqp->cbq_class_tbl[i] == cl) {
+ cbqp->cbq_class_tbl[i] = NULL;
+ if (cl == cbqp->ifnp.root_)
+ cbqp->ifnp.root_ = NULL;
+ if (cl == cbqp->ifnp.default_)
+ cbqp->ifnp.default_ = NULL;
+ break;
+ }
+
+ return (0);
+}
+
+int
+cbq_getqstats(struct pf_altq *a, void *ubuf, int *nbytes)
+{
+ cbq_state_t *cbqp;
+ struct rm_class *cl;
+ class_stats_t stats;
+ int error = 0;
+
+ if ((cbqp = altq_lookup(a->ifname, ALTQT_CBQ)) == NULL)
+ return (EBADF);
+
+ if ((cl = clh_to_clp(cbqp, a->qid)) == NULL)
+ return (EINVAL);
+
+ if (*nbytes < sizeof(stats))
+ return (EINVAL);
+
+ get_class_stats(&stats, cl);
+
+ if ((error = copyout((caddr_t)&stats, ubuf, sizeof(stats))) != 0)
+ return (error);
+ *nbytes = sizeof(stats);
+ return (0);
+}
+
+/*
+ * int
+ * cbq_enqueue(struct ifaltq *ifq, struct mbuf *m, struct altq_pktattr *pattr)
+ * - Queue data packets.
+ *
+ * cbq_enqueue is set to ifp->if_altqenqueue and called by an upper
+ * layer (e.g. ether_output). cbq_enqueue queues the given packet
+ * to the cbq, then invokes the driver's start routine.
+ *
+ * Assumptions: called in splimp
+ * Returns: 0 if the queueing is successful.
+ * ENOBUFS if a packet dropping occurred as a result of
+ * the queueing.
+ */
+
+static int
+cbq_enqueue(struct ifaltq *ifq, struct mbuf *m, struct altq_pktattr *pktattr)
+{
+ cbq_state_t *cbqp = (cbq_state_t *)ifq->altq_disc;
+ struct rm_class *cl;
+ struct pf_mtag *t;
+ int len;
+
+ IFQ_LOCK_ASSERT(ifq);
+
+ /* grab class set by classifier */
+ if ((m->m_flags & M_PKTHDR) == 0) {
+ /* should not happen */
+#if defined(__NetBSD__) || defined(__OpenBSD__)\
+ || (defined(__FreeBSD__) && __FreeBSD_version >= 501113)
+ printf("altq: packet for %s does not have pkthdr\n",
+ ifq->altq_ifp->if_xname);
+#else
+ printf("altq: packet for %s%d does not have pkthdr\n",
+ ifq->altq_ifp->if_name, ifq->altq_ifp->if_unit);
+#endif
+ m_freem(m);
+ return (ENOBUFS);
+ }
+ cl = NULL;
+ if ((t = pf_find_mtag(m)) != NULL)
+ cl = clh_to_clp(cbqp, t->qid);
+#ifdef ALTQ3_COMPAT
+ else if ((ifq->altq_flags & ALTQF_CLASSIFY) && pktattr != NULL)
+ cl = pktattr->pattr_class;
+#endif
+ if (cl == NULL) {
+ cl = cbqp->ifnp.default_;
+ if (cl == NULL) {
+ m_freem(m);
+ return (ENOBUFS);
+ }
+ }
+#ifdef ALTQ3_COMPAT
+ if (pktattr != NULL)
+ cl->pktattr_ = pktattr; /* save proto hdr used by ECN */
+ else
+#endif
+ cl->pktattr_ = NULL;
+ len = m_pktlen(m);
+ if (rmc_queue_packet(cl, m) != 0) {
+ /* drop occurred. some mbuf was freed in rmc_queue_packet. */
+ PKTCNTR_ADD(&cl->stats_.drop_cnt, len);
+ return (ENOBUFS);
+ }
+
+ /* successfully queued. */
+ ++cbqp->cbq_qlen;
+ IFQ_INC_LEN(ifq);
+ return (0);
+}
+
+static struct mbuf *
+cbq_dequeue(struct ifaltq *ifq, int op)
+{
+ cbq_state_t *cbqp = (cbq_state_t *)ifq->altq_disc;
+ struct mbuf *m;
+
+ IFQ_LOCK_ASSERT(ifq);
+
+ m = rmc_dequeue_next(&cbqp->ifnp, op);
+
+ if (m && op == ALTDQ_REMOVE) {
+ --cbqp->cbq_qlen; /* decrement # of packets in cbq */
+ IFQ_DEC_LEN(ifq);
+
+ /* Update the class. */
+ rmc_update_class_util(&cbqp->ifnp);
+ }
+ return (m);
+}
+
+/*
+ * void
+ * cbqrestart(queue_t *) - Restart sending of data.
+ * called from rmc_restart in splimp via timeout after waking up
+ * a suspended class.
+ * Returns: NONE
+ */
+
+static void
+cbqrestart(struct ifaltq *ifq)
+{
+ cbq_state_t *cbqp;
+ struct ifnet *ifp;
+
+ IFQ_LOCK_ASSERT(ifq);
+
+ if (!ALTQ_IS_ENABLED(ifq))
+ /* cbq must have been detached */
+ return;
+
+ if ((cbqp = (cbq_state_t *)ifq->altq_disc) == NULL)
+ /* should not happen */
+ return;
+
+ ifp = ifq->altq_ifp;
+ if (ifp->if_start &&
+ cbqp->cbq_qlen > 0 && (ifp->if_drv_flags & IFF_DRV_OACTIVE) == 0) {
+ IFQ_UNLOCK(ifq);
+ (*ifp->if_start)(ifp);
+ IFQ_LOCK(ifq);
+ }
+}
+
+static void cbq_purge(cbq_state_t *cbqp)
+{
+ struct rm_class *cl;
+ int i;
+
+ for (i = 0; i < CBQ_MAX_CLASSES; i++)
+ if ((cl = cbqp->cbq_class_tbl[i]) != NULL)
+ rmc_dropall(cl);
+ if (ALTQ_IS_ENABLED(cbqp->ifnp.ifq_))
+ cbqp->ifnp.ifq_->ifq_len = 0;
+}
+#ifdef ALTQ3_COMPAT
+
+static int
+cbq_add_class(acp)
+ struct cbq_add_class *acp;
+{
+ char *ifacename;
+ struct rm_class *borrow, *parent;
+ cbq_state_t *cbqp;
+
+ ifacename = acp->cbq_iface.cbq_ifacename;
+ if ((cbqp = altq_lookup(ifacename, ALTQT_CBQ)) == NULL)
+ return (EBADF);
+
+ /* check parameters */
+ if (acp->cbq_class.priority >= CBQ_MAXPRI ||
+ acp->cbq_class.maxq > CBQ_MAXQSIZE)
+ return (EINVAL);
+
+ /* Get pointers to parent and borrow classes. */
+ parent = clh_to_clp(cbqp, acp->cbq_class.parent_class_handle);
+ borrow = clh_to_clp(cbqp, acp->cbq_class.borrow_class_handle);
+
+ /*
+ * A class must borrow from it's parent or it can not
+ * borrow at all. Hence, borrow can be null.
+ */
+ if (parent == NULL && (acp->cbq_class.flags & CBQCLF_ROOTCLASS) == 0) {
+ printf("cbq_add_class: no parent class!\n");
+ return (EINVAL);
+ }
+
+ if ((borrow != parent) && (borrow != NULL)) {
+ printf("cbq_add_class: borrow class != parent\n");
+ return (EINVAL);
+ }
+
+ return cbq_class_create(cbqp, acp, parent, borrow);
+}
+
+static int
+cbq_delete_class(dcp)
+ struct cbq_delete_class *dcp;
+{
+ char *ifacename;
+ struct rm_class *cl;
+ cbq_state_t *cbqp;
+
+ ifacename = dcp->cbq_iface.cbq_ifacename;
+ if ((cbqp = altq_lookup(ifacename, ALTQT_CBQ)) == NULL)
+ return (EBADF);
+
+ if ((cl = clh_to_clp(cbqp, dcp->cbq_class_handle)) == NULL)
+ return (EINVAL);
+
+ /* if we are a parent class, then return an error. */
+ if (is_a_parent_class(cl))
+ return (EINVAL);
+
+ /* if a filter has a reference to this class delete the filter */
+ acc_discard_filters(&cbqp->cbq_classifier, cl, 0);
+
+ return cbq_class_destroy(cbqp, cl);
+}
+
+static int
+cbq_modify_class(acp)
+ struct cbq_modify_class *acp;
+{
+ char *ifacename;
+ struct rm_class *cl;
+ cbq_state_t *cbqp;
+
+ ifacename = acp->cbq_iface.cbq_ifacename;
+ if ((cbqp = altq_lookup(ifacename, ALTQT_CBQ)) == NULL)
+ return (EBADF);
+
+ /* Get pointer to this class */
+ if ((cl = clh_to_clp(cbqp, acp->cbq_class_handle)) == NULL)
+ return (EINVAL);
+
+ if (rmc_modclass(cl, acp->cbq_class.nano_sec_per_byte,
+ acp->cbq_class.maxq, acp->cbq_class.maxidle,
+ acp->cbq_class.minidle, acp->cbq_class.offtime,
+ acp->cbq_class.pktsize) < 0)
+ return (EINVAL);
+ return (0);
+}
+
+/*
+ * struct rm_class *
+ * cbq_class_create(cbq_mod_state_t *cbqp, struct cbq_add_class *acp,
+ * struct rm_class *parent, struct rm_class *borrow)
+ *
+ * This function create a new traffic class in the CBQ class hierarchy of
+ * given paramters. The class that created is either the root, default,
+ * or a new dynamic class. If CBQ is not initilaized, the the root class
+ * will be created.
+ */
+static int
+cbq_class_create(cbqp, acp, parent, borrow)
+ cbq_state_t *cbqp;
+ struct cbq_add_class *acp;
+ struct rm_class *parent, *borrow;
+{
+ struct rm_class *cl;
+ cbq_class_spec_t *spec = &acp->cbq_class;
+ u_int32_t chandle;
+ int i;
+
+ /*
+ * allocate class handle
+ */
+ for (i = 1; i < CBQ_MAX_CLASSES; i++)
+ if (cbqp->cbq_class_tbl[i] == NULL)
+ break;
+ if (i == CBQ_MAX_CLASSES)
+ return (EINVAL);
+ chandle = i; /* use the slot number as class handle */
+
+ /*
+ * create a class. if this is a root class, initialize the
+ * interface.
+ */
+ if ((spec->flags & CBQCLF_CLASSMASK) == CBQCLF_ROOTCLASS) {
+ rmc_init(cbqp->ifnp.ifq_, &cbqp->ifnp, spec->nano_sec_per_byte,
+ cbqrestart, spec->maxq, RM_MAXQUEUED,
+ spec->maxidle, spec->minidle, spec->offtime,
+ spec->flags);
+ cl = cbqp->ifnp.root_;
+ } else {
+ cl = rmc_newclass(spec->priority,
+ &cbqp->ifnp, spec->nano_sec_per_byte,
+ rmc_delay_action, spec->maxq, parent, borrow,
+ spec->maxidle, spec->minidle, spec->offtime,
+ spec->pktsize, spec->flags);
+ }
+ if (cl == NULL)
+ return (ENOMEM);
+
+ /* return handle to user space. */
+ acp->cbq_class_handle = chandle;
+
+ cl->stats_.handle = chandle;
+ cl->stats_.depth = cl->depth_;
+
+ /* save the allocated class */
+ cbqp->cbq_class_tbl[i] = cl;
+
+ if ((spec->flags & CBQCLF_CLASSMASK) == CBQCLF_DEFCLASS)
+ cbqp->ifnp.default_ = cl;
+ if ((spec->flags & CBQCLF_CLASSMASK) == CBQCLF_CTLCLASS)
+ cbqp->ifnp.ctl_ = cl;
+
+ return (0);
+}
+
+static int
+cbq_add_filter(afp)
+ struct cbq_add_filter *afp;
+{
+ char *ifacename;
+ cbq_state_t *cbqp;
+ struct rm_class *cl;
+
+ ifacename = afp->cbq_iface.cbq_ifacename;
+ if ((cbqp = altq_lookup(ifacename, ALTQT_CBQ)) == NULL)
+ return (EBADF);
+
+ /* Get the pointer to class. */
+ if ((cl = clh_to_clp(cbqp, afp->cbq_class_handle)) == NULL)
+ return (EINVAL);
+
+ return acc_add_filter(&cbqp->cbq_classifier, &afp->cbq_filter,
+ cl, &afp->cbq_filter_handle);
+}
+
+static int
+cbq_delete_filter(dfp)
+ struct cbq_delete_filter *dfp;
+{
+ char *ifacename;
+ cbq_state_t *cbqp;
+
+ ifacename = dfp->cbq_iface.cbq_ifacename;
+ if ((cbqp = altq_lookup(ifacename, ALTQT_CBQ)) == NULL)
+ return (EBADF);
+
+ return acc_delete_filter(&cbqp->cbq_classifier,
+ dfp->cbq_filter_handle);
+}
+
+/*
+ * cbq_clear_hierarchy deletes all classes and their filters on the
+ * given interface.
+ */
+static int
+cbq_clear_hierarchy(ifacep)
+ struct cbq_interface *ifacep;
+{
+ char *ifacename;
+ cbq_state_t *cbqp;
+
+ ifacename = ifacep->cbq_ifacename;
+ if ((cbqp = altq_lookup(ifacename, ALTQT_CBQ)) == NULL)
+ return (EBADF);
+
+ return cbq_clear_interface(cbqp);
+}
+
+/*
+ * static int
+ * cbq_set_enable(struct cbq_enable *ep) - this function processed the
+ * ioctl request to enable class based queueing. It searches the list
+ * of interfaces for the specified interface and then enables CBQ on
+ * that interface.
+ *
+ * Returns: 0, for no error.
+ * EBADF, for specified inteface not found.
+ */
+
+static int
+cbq_set_enable(ep, enable)
+ struct cbq_interface *ep;
+ int enable;
+{
+ int error = 0;
+ cbq_state_t *cbqp;
+ char *ifacename;
+
+ ifacename = ep->cbq_ifacename;
+ if ((cbqp = altq_lookup(ifacename, ALTQT_CBQ)) == NULL)
+ return (EBADF);
+
+ switch (enable) {
+ case ENABLE:
+ if (cbqp->ifnp.root_ == NULL || cbqp->ifnp.default_ == NULL ||
+ cbqp->ifnp.ctl_ == NULL) {
+ if (cbqp->ifnp.root_ == NULL)
+ printf("No Root Class for %s\n", ifacename);
+ if (cbqp->ifnp.default_ == NULL)
+ printf("No Default Class for %s\n", ifacename);
+ if (cbqp->ifnp.ctl_ == NULL)
+ printf("No Control Class for %s\n", ifacename);
+ error = EINVAL;
+ } else if ((error = altq_enable(cbqp->ifnp.ifq_)) == 0) {
+ cbqp->cbq_qlen = 0;
+ }
+ break;
+
+ case DISABLE:
+ error = altq_disable(cbqp->ifnp.ifq_);
+ break;
+ }
+ return (error);
+}
+
+static int
+cbq_getstats(gsp)
+ struct cbq_getstats *gsp;
+{
+ char *ifacename;
+ int i, n, nclasses;
+ cbq_state_t *cbqp;
+ struct rm_class *cl;
+ class_stats_t stats, *usp;
+ int error = 0;
+
+ ifacename = gsp->iface.cbq_ifacename;
+ nclasses = gsp->nclasses;
+ usp = gsp->stats;
+
+ if ((cbqp = altq_lookup(ifacename, ALTQT_CBQ)) == NULL)
+ return (EBADF);
+ if (nclasses <= 0)
+ return (EINVAL);
+
+ for (n = 0, i = 0; n < nclasses && i < CBQ_MAX_CLASSES; n++, i++) {
+ while ((cl = cbqp->cbq_class_tbl[i]) == NULL)
+ if (++i >= CBQ_MAX_CLASSES)
+ goto out;
+
+ get_class_stats(&stats, cl);
+ stats.handle = cl->stats_.handle;
+
+ if ((error = copyout((caddr_t)&stats, (caddr_t)usp++,
+ sizeof(stats))) != 0)
+ return (error);
+ }
+
+ out:
+ gsp->nclasses = n;
+ return (error);
+}
+
+static int
+cbq_ifattach(ifacep)
+ struct cbq_interface *ifacep;
+{
+ int error = 0;
+ char *ifacename;
+ cbq_state_t *new_cbqp;
+ struct ifnet *ifp;
+
+ ifacename = ifacep->cbq_ifacename;
+ if ((ifp = ifunit(ifacename)) == NULL)
+ return (ENXIO);
+ if (!ALTQ_IS_READY(&ifp->if_snd))
+ return (ENXIO);
+
+ /* allocate and initialize cbq_state_t */
+ new_cbqp = malloc(sizeof(cbq_state_t), M_DEVBUF, M_WAITOK);
+ if (new_cbqp == NULL)
+ return (ENOMEM);
+ bzero(new_cbqp, sizeof(cbq_state_t));
+ CALLOUT_INIT(&new_cbqp->cbq_callout);
+
+ new_cbqp->cbq_qlen = 0;
+ new_cbqp->ifnp.ifq_ = &ifp->if_snd; /* keep the ifq */
+
+ /*
+ * set CBQ to this ifnet structure.
+ */
+ error = altq_attach(&ifp->if_snd, ALTQT_CBQ, new_cbqp,
+ cbq_enqueue, cbq_dequeue, cbq_request,
+ &new_cbqp->cbq_classifier, acc_classify);
+ if (error) {
+ free(new_cbqp, M_DEVBUF);
+ return (error);
+ }
+
+ /* prepend to the list of cbq_state_t's. */
+ new_cbqp->cbq_next = cbq_list;
+ cbq_list = new_cbqp;
+
+ return (0);
+}
+
+static int
+cbq_ifdetach(ifacep)
+ struct cbq_interface *ifacep;
+{
+ char *ifacename;
+ cbq_state_t *cbqp;
+
+ ifacename = ifacep->cbq_ifacename;
+ if ((cbqp = altq_lookup(ifacename, ALTQT_CBQ)) == NULL)
+ return (EBADF);
+
+ (void)cbq_set_enable(ifacep, DISABLE);
+
+ cbq_clear_interface(cbqp);
+
+ /* remove CBQ from the ifnet structure. */
+ (void)altq_detach(cbqp->ifnp.ifq_);
+
+ /* remove from the list of cbq_state_t's. */
+ if (cbq_list == cbqp)
+ cbq_list = cbqp->cbq_next;
+ else {
+ cbq_state_t *cp;
+
+ for (cp = cbq_list; cp != NULL; cp = cp->cbq_next)
+ if (cp->cbq_next == cbqp) {
+ cp->cbq_next = cbqp->cbq_next;
+ break;
+ }
+ ASSERT(cp != NULL);
+ }
+
+ /* deallocate cbq_state_t */
+ free(cbqp, M_DEVBUF);
+
+ return (0);
+}
+
+/*
+ * cbq device interface
+ */
+
+altqdev_decl(cbq);
+
+int
+cbqopen(dev, flag, fmt, p)
+ dev_t dev;
+ int flag, fmt;
+#if (__FreeBSD_version > 500000)
+ struct thread *p;
+#else
+ struct proc *p;
+#endif
+{
+ return (0);
+}
+
+int
+cbqclose(dev, flag, fmt, p)
+ dev_t dev;
+ int flag, fmt;
+#if (__FreeBSD_version > 500000)
+ struct thread *p;
+#else
+ struct proc *p;
+#endif
+{
+ struct ifnet *ifp;
+ struct cbq_interface iface;
+ int err, error = 0;
+
+ while (cbq_list) {
+ ifp = cbq_list->ifnp.ifq_->altq_ifp;
+#if defined(__NetBSD__) || defined(__OpenBSD__)\
+ || (defined(__FreeBSD__) && __FreeBSD_version >= 501113)
+ sprintf(iface.cbq_ifacename, "%s", ifp->if_xname);
+#else
+ sprintf(iface.cbq_ifacename,
+ "%s%d", ifp->if_name, ifp->if_unit);
+#endif
+ err = cbq_ifdetach(&iface);
+ if (err != 0 && error == 0)
+ error = err;
+ }
+
+ return (error);
+}
+
+int
+cbqioctl(dev, cmd, addr, flag, p)
+ dev_t dev;
+ ioctlcmd_t cmd;
+ caddr_t addr;
+ int flag;
+#if (__FreeBSD_version > 500000)
+ struct thread *p;
+#else
+ struct proc *p;
+#endif
+{
+ int error = 0;
+
+ /* check cmd for superuser only */
+ switch (cmd) {
+ case CBQ_GETSTATS:
+ /* currently only command that an ordinary user can call */
+ break;
+ default:
+#if (__FreeBSD_version > 700000)
+ error = priv_check(p, PRIV_ALTQ_MANAGE);
+#elsif (__FreeBSD_version > 400000)
+ error = suser(p);
+#else
+ error = suser(p->p_ucred, &p->p_acflag);
+#endif
+ if (error)
+ return (error);
+ break;
+ }
+
+ switch (cmd) {
+
+ case CBQ_ENABLE:
+ error = cbq_set_enable((struct cbq_interface *)addr, ENABLE);
+ break;
+
+ case CBQ_DISABLE:
+ error = cbq_set_enable((struct cbq_interface *)addr, DISABLE);
+ break;
+
+ case CBQ_ADD_FILTER:
+ error = cbq_add_filter((struct cbq_add_filter *)addr);
+ break;
+
+ case CBQ_DEL_FILTER:
+ error = cbq_delete_filter((struct cbq_delete_filter *)addr);
+ break;
+
+ case CBQ_ADD_CLASS:
+ error = cbq_add_class((struct cbq_add_class *)addr);
+ break;
+
+ case CBQ_DEL_CLASS:
+ error = cbq_delete_class((struct cbq_delete_class *)addr);
+ break;
+
+ case CBQ_MODIFY_CLASS:
+ error = cbq_modify_class((struct cbq_modify_class *)addr);
+ break;
+
+ case CBQ_CLEAR_HIERARCHY:
+ error = cbq_clear_hierarchy((struct cbq_interface *)addr);
+ break;
+
+ case CBQ_IF_ATTACH:
+ error = cbq_ifattach((struct cbq_interface *)addr);
+ break;
+
+ case CBQ_IF_DETACH:
+ error = cbq_ifdetach((struct cbq_interface *)addr);
+ break;
+
+ case CBQ_GETSTATS:
+ error = cbq_getstats((struct cbq_getstats *)addr);
+ break;
+
+ default:
+ error = EINVAL;
+ break;
+ }
+
+ return error;
+}
+
+#if 0
+/* for debug */
+static void cbq_class_dump(int);
+
+static void cbq_class_dump(i)
+ int i;
+{
+ struct rm_class *cl;
+ rm_class_stats_t *s;
+ struct _class_queue_ *q;
+
+ if (cbq_list == NULL) {
+ printf("cbq_class_dump: no cbq_state found\n");
+ return;
+ }
+ cl = cbq_list->cbq_class_tbl[i];
+
+ printf("class %d cl=%p\n", i, cl);
+ if (cl != NULL) {
+ s = &cl->stats_;
+ q = cl->q_;
+
+ printf("pri=%d, depth=%d, maxrate=%d, allotment=%d\n",
+ cl->pri_, cl->depth_, cl->maxrate_, cl->allotment_);
+ printf("w_allotment=%d, bytes_alloc=%d, avgidle=%d, maxidle=%d\n",
+ cl->w_allotment_, cl->bytes_alloc_, cl->avgidle_,
+ cl->maxidle_);
+ printf("minidle=%d, offtime=%d, sleeping=%d, leaf=%d\n",
+ cl->minidle_, cl->offtime_, cl->sleeping_, cl->leaf_);
+ printf("handle=%d, depth=%d, packets=%d, bytes=%d\n",
+ s->handle, s->depth,
+ (int)s->xmit_cnt.packets, (int)s->xmit_cnt.bytes);
+ printf("over=%d\n, borrows=%d, drops=%d, overactions=%d, delays=%d\n",
+ s->over, s->borrows, (int)s->drop_cnt.packets,
+ s->overactions, s->delays);
+ printf("tail=%p, head=%p, qlen=%d, qlim=%d, qthresh=%d,qtype=%d\n",
+ q->tail_, q->head_, q->qlen_, q->qlim_,
+ q->qthresh_, q->qtype_);
+ }
+}
+#endif /* 0 */
+
+#ifdef KLD_MODULE
+
+static struct altqsw cbq_sw =
+ {"cbq", cbqopen, cbqclose, cbqioctl};
+
+ALTQ_MODULE(altq_cbq, ALTQT_CBQ, &cbq_sw);
+MODULE_DEPEND(altq_cbq, altq_red, 1, 1, 1);
+MODULE_DEPEND(altq_cbq, altq_rio, 1, 1, 1);
+
+#endif /* KLD_MODULE */
+#endif /* ALTQ3_COMPAT */
+
+#endif /* ALTQ_CBQ */
diff --git a/contrib/altq/rtems/freebsd/altq/altq_cbq.h b/contrib/altq/rtems/freebsd/altq/altq_cbq.h
new file mode 100644
index 00000000..ecc730c7
--- /dev/null
+++ b/contrib/altq/rtems/freebsd/altq/altq_cbq.h
@@ -0,0 +1,221 @@
+/* $KAME: altq_cbq.h,v 1.12 2003/10/03 05:05:15 kjc Exp $ */
+
+/*
+ * Copyright (c) Sun Microsystems, Inc. 1993-1998 All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the SMCC Technology
+ * Development Group at Sun Microsystems, Inc.
+ *
+ * 4. The name of the Sun Microsystems, Inc nor may not be used to endorse or
+ * promote products derived from this software without specific prior
+ * written permission.
+ *
+ * SUN MICROSYSTEMS DOES NOT CLAIM MERCHANTABILITY OF THIS SOFTWARE OR THE
+ * SUITABILITY OF THIS SOFTWARE FOR ANY PARTICULAR PURPOSE. The software is
+ * provided "as is" without express or implied warranty of any kind.
+ *
+ * These notices must be retained in any copies of any part of this software.
+ */
+
+#ifndef _ALTQ_ALTQ_CBQ_HH_
+#define _ALTQ_ALTQ_CBQ_HH_
+
+#include <rtems/freebsd/altq/altq.h>
+#include <rtems/freebsd/altq/altq_rmclass.h>
+#include <rtems/freebsd/altq/altq_red.h>
+#include <rtems/freebsd/altq/altq_rio.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#define NULL_CLASS_HANDLE 0
+
+/* class flags should be same as class flags in rm_class.h */
+#define CBQCLF_RED 0x0001 /* use RED */
+#define CBQCLF_ECN 0x0002 /* use RED/ECN */
+#define CBQCLF_RIO 0x0004 /* use RIO */
+#define CBQCLF_FLOWVALVE 0x0008 /* use flowvalve (aka penalty-box) */
+#define CBQCLF_CLEARDSCP 0x0010 /* clear diffserv codepoint */
+#define CBQCLF_BORROW 0x0020 /* borrow from parent */
+
+/* class flags only for root class */
+#define CBQCLF_WRR 0x0100 /* weighted-round robin */
+#define CBQCLF_EFFICIENT 0x0200 /* work-conserving */
+
+/* class flags for special classes */
+#define CBQCLF_ROOTCLASS 0x1000 /* root class */
+#define CBQCLF_DEFCLASS 0x2000 /* default class */
+#ifdef ALTQ3_COMPAT
+#define CBQCLF_CTLCLASS 0x4000 /* control class */
+#endif
+#define CBQCLF_CLASSMASK 0xf000 /* class mask */
+
+#define CBQ_MAXQSIZE 200
+#define CBQ_MAXPRI RM_MAXPRIO
+
+typedef struct _cbq_class_stats_ {
+ u_int32_t handle;
+ u_int depth;
+
+ struct pktcntr xmit_cnt; /* packets sent in this class */
+ struct pktcntr drop_cnt; /* dropped packets */
+ u_int over; /* # times went over limit */
+ u_int borrows; /* # times tried to borrow */
+ u_int overactions; /* # times invoked overlimit action */
+ u_int delays; /* # times invoked delay actions */
+
+ /* other static class parameters useful for debugging */
+ int priority;
+ int maxidle;
+ int minidle;
+ int offtime;
+ int qmax;
+ int ns_per_byte;
+ int wrr_allot;
+
+ int qcnt; /* # packets in queue */
+ int avgidle;
+
+ /* red and rio related info */
+ int qtype;
+ struct redstats red[3];
+} class_stats_t;
+
+#ifdef ALTQ3_COMPAT
+/*
+ * Define structures associated with IOCTLS for cbq.
+ */
+
+/*
+ * Define the CBQ interface structure. This must be included in all
+ * IOCTL's such that the CBQ driver may find the appropriate CBQ module
+ * associated with the network interface to be affected.
+ */
+struct cbq_interface {
+ char cbq_ifacename[IFNAMSIZ];
+};
+
+typedef struct cbq_class_spec {
+ u_int priority;
+ u_int nano_sec_per_byte;
+ u_int maxq;
+ u_int maxidle;
+ int minidle;
+ u_int offtime;
+ u_int32_t parent_class_handle;
+ u_int32_t borrow_class_handle;
+
+ u_int pktsize;
+ int flags;
+} cbq_class_spec_t;
+
+struct cbq_add_class {
+ struct cbq_interface cbq_iface;
+
+ cbq_class_spec_t cbq_class;
+ u_int32_t cbq_class_handle;
+};
+
+struct cbq_delete_class {
+ struct cbq_interface cbq_iface;
+ u_int32_t cbq_class_handle;
+};
+
+struct cbq_modify_class {
+ struct cbq_interface cbq_iface;
+
+ cbq_class_spec_t cbq_class;
+ u_int32_t cbq_class_handle;
+};
+
+struct cbq_add_filter {
+ struct cbq_interface cbq_iface;
+ u_int32_t cbq_class_handle;
+ struct flow_filter cbq_filter;
+
+ u_long cbq_filter_handle;
+};
+
+struct cbq_delete_filter {
+ struct cbq_interface cbq_iface;
+ u_long cbq_filter_handle;
+};
+
+/* number of classes are returned in nclasses field */
+struct cbq_getstats {
+ struct cbq_interface iface;
+ int nclasses;
+ class_stats_t *stats;
+};
+
+/*
+ * Define IOCTLs for CBQ.
+ */
+#define CBQ_IF_ATTACH _IOW('Q', 1, struct cbq_interface)
+#define CBQ_IF_DETACH _IOW('Q', 2, struct cbq_interface)
+#define CBQ_ENABLE _IOW('Q', 3, struct cbq_interface)
+#define CBQ_DISABLE _IOW('Q', 4, struct cbq_interface)
+#define CBQ_CLEAR_HIERARCHY _IOW('Q', 5, struct cbq_interface)
+#define CBQ_ADD_CLASS _IOWR('Q', 7, struct cbq_add_class)
+#define CBQ_DEL_CLASS _IOW('Q', 8, struct cbq_delete_class)
+#define CBQ_MODIFY_CLASS _IOWR('Q', 9, struct cbq_modify_class)
+#define CBQ_ADD_FILTER _IOWR('Q', 10, struct cbq_add_filter)
+#define CBQ_DEL_FILTER _IOW('Q', 11, struct cbq_delete_filter)
+#define CBQ_GETSTATS _IOWR('Q', 12, struct cbq_getstats)
+#endif /* ALTQ3_COMPAT */
+
+#ifdef _KERNEL
+/*
+ * Define macros only good for kernel drivers and modules.
+ */
+#define CBQ_WATCHDOG (hz / 20)
+#define CBQ_TIMEOUT 10
+#define CBQ_LS_TIMEOUT (20 * hz / 1000)
+
+#define CBQ_MAX_CLASSES 256
+
+#ifdef ALTQ3_COMPAT
+#define CBQ_MAX_FILTERS 256
+
+#define DISABLE 0x00
+#define ENABLE 0x01
+#endif /* ALTQ3_COMPAT */
+
+/*
+ * Define State structures.
+ */
+typedef struct cbqstate {
+#ifdef ALTQ3_COMPAT
+ struct cbqstate *cbq_next;
+#endif
+ int cbq_qlen; /* # of packets in cbq */
+ struct rm_class *cbq_class_tbl[CBQ_MAX_CLASSES];
+
+ struct rm_ifdat ifnp;
+ struct callout cbq_callout; /* for timeouts */
+#ifdef ALTQ3_CLFIER_COMPAT
+ struct acc_classifier cbq_classifier;
+#endif
+} cbq_state_t;
+
+#endif /* _KERNEL */
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* !_ALTQ_ALTQ_CBQ_HH_ */
diff --git a/contrib/altq/rtems/freebsd/altq/altq_cdnr.c b/contrib/altq/rtems/freebsd/altq/altq_cdnr.c
new file mode 100644
index 00000000..636d4b79
--- /dev/null
+++ b/contrib/altq/rtems/freebsd/altq/altq_cdnr.c
@@ -0,0 +1,1393 @@
+#include <rtems/freebsd/machine/rtems-bsd-config.h>
+
+/* $FreeBSD$ */
+/* $KAME: altq_cdnr.c,v 1.14 2003/09/05 22:40:36 itojun Exp $ */
+
+/*
+ * Copyright (C) 1999-2002
+ * Sony Computer Science Laboratories Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY SONY CSL AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL SONY CSL OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#if defined(__FreeBSD__) || defined(__NetBSD__)
+#include <rtems/freebsd/local/opt_altq.h>
+#if (__FreeBSD__ != 2)
+#include <rtems/freebsd/local/opt_inet.h>
+#ifdef __FreeBSD__
+#include <rtems/freebsd/local/opt_inet6.h>
+#endif
+#endif
+#endif /* __FreeBSD__ || __NetBSD__ */
+
+#include <rtems/freebsd/sys/param.h>
+#include <rtems/freebsd/sys/malloc.h>
+#include <rtems/freebsd/sys/mbuf.h>
+#include <rtems/freebsd/sys/socket.h>
+#include <rtems/freebsd/sys/sockio.h>
+#include <rtems/freebsd/sys/systm.h>
+#include <rtems/freebsd/sys/proc.h>
+#include <rtems/freebsd/sys/errno.h>
+#include <rtems/freebsd/sys/kernel.h>
+#include <rtems/freebsd/sys/queue.h>
+
+#include <rtems/freebsd/net/if.h>
+#include <rtems/freebsd/net/if_types.h>
+#include <rtems/freebsd/netinet/in.h>
+#include <rtems/freebsd/netinet/in_systm.h>
+#include <rtems/freebsd/netinet/ip.h>
+#ifdef INET6
+#include <rtems/freebsd/netinet/ip6.h>
+#endif
+
+#include <rtems/freebsd/altq/altq.h>
+#ifdef ALTQ3_COMPAT
+#include <rtems/freebsd/altq/altq_conf.h>
+#endif
+#include <rtems/freebsd/altq/altq_cdnr.h>
+
+#ifdef ALTQ3_COMPAT
+/*
+ * diffserv traffic conditioning module
+ */
+
+int altq_cdnr_enabled = 0;
+
+/* traffic conditioner is enabled by ALTQ_CDNR option in opt_altq.h */
+#ifdef ALTQ_CDNR
+
+/* cdnr_list keeps all cdnr's allocated. */
+static LIST_HEAD(, top_cdnr) tcb_list;
+
+static int altq_cdnr_input(struct mbuf *, int);
+static struct top_cdnr *tcb_lookup(char *ifname);
+static struct cdnr_block *cdnr_handle2cb(u_long);
+static u_long cdnr_cb2handle(struct cdnr_block *);
+static void *cdnr_cballoc(struct top_cdnr *, int,
+ struct tc_action *(*)(struct cdnr_block *, struct cdnr_pktinfo *));
+static void cdnr_cbdestroy(void *);
+static int tca_verify_action(struct tc_action *);
+static void tca_import_action(struct tc_action *, struct tc_action *);
+static void tca_invalidate_action(struct tc_action *);
+
+static int generic_element_destroy(struct cdnr_block *);
+static struct top_cdnr *top_create(struct ifaltq *);
+static int top_destroy(struct top_cdnr *);
+static struct cdnr_block *element_create(struct top_cdnr *, struct tc_action *);
+static int element_destroy(struct cdnr_block *);
+static void tb_import_profile(struct tbe *, struct tb_profile *);
+static struct tbmeter *tbm_create(struct top_cdnr *, struct tb_profile *,
+ struct tc_action *, struct tc_action *);
+static int tbm_destroy(struct tbmeter *);
+static struct tc_action *tbm_input(struct cdnr_block *, struct cdnr_pktinfo *);
+static struct trtcm *trtcm_create(struct top_cdnr *,
+ struct tb_profile *, struct tb_profile *,
+ struct tc_action *, struct tc_action *, struct tc_action *,
+ int);
+static int trtcm_destroy(struct trtcm *);
+static struct tc_action *trtcm_input(struct cdnr_block *, struct cdnr_pktinfo *);
+static struct tswtcm *tswtcm_create(struct top_cdnr *,
+ u_int32_t, u_int32_t, u_int32_t,
+ struct tc_action *, struct tc_action *, struct tc_action *);
+static int tswtcm_destroy(struct tswtcm *);
+static struct tc_action *tswtcm_input(struct cdnr_block *, struct cdnr_pktinfo *);
+
+static int cdnrcmd_if_attach(char *);
+static int cdnrcmd_if_detach(char *);
+static int cdnrcmd_add_element(struct cdnr_add_element *);
+static int cdnrcmd_delete_element(struct cdnr_delete_element *);
+static int cdnrcmd_add_filter(struct cdnr_add_filter *);
+static int cdnrcmd_delete_filter(struct cdnr_delete_filter *);
+static int cdnrcmd_add_tbm(struct cdnr_add_tbmeter *);
+static int cdnrcmd_modify_tbm(struct cdnr_modify_tbmeter *);
+static int cdnrcmd_tbm_stats(struct cdnr_tbmeter_stats *);
+static int cdnrcmd_add_trtcm(struct cdnr_add_trtcm *);
+static int cdnrcmd_modify_trtcm(struct cdnr_modify_trtcm *);
+static int cdnrcmd_tcm_stats(struct cdnr_tcm_stats *);
+static int cdnrcmd_add_tswtcm(struct cdnr_add_tswtcm *);
+static int cdnrcmd_modify_tswtcm(struct cdnr_modify_tswtcm *);
+static int cdnrcmd_get_stats(struct cdnr_get_stats *);
+
+altqdev_decl(cdnr);
+
+/*
+ * top level input function called from ip_input.
+ * should be called before converting header fields to host-byte-order.
+ */
+int
+altq_cdnr_input(m, af)
+ struct mbuf *m;
+ int af; /* address family */
+{
+ struct ifnet *ifp;
+ struct ip *ip;
+ struct top_cdnr *top;
+ struct tc_action *tca;
+ struct cdnr_block *cb;
+ struct cdnr_pktinfo pktinfo;
+
+ ifp = m->m_pkthdr.rcvif;
+ if (!ALTQ_IS_CNDTNING(&ifp->if_snd))
+ /* traffic conditioner is not enabled on this interface */
+ return (1);
+
+ top = ifp->if_snd.altq_cdnr;
+
+ ip = mtod(m, struct ip *);
+#ifdef INET6
+ if (af == AF_INET6) {
+ u_int32_t flowlabel;
+
+ flowlabel = ((struct ip6_hdr *)ip)->ip6_flow;
+ pktinfo.pkt_dscp = (ntohl(flowlabel) >> 20) & DSCP_MASK;
+ } else
+#endif
+ pktinfo.pkt_dscp = ip->ip_tos & DSCP_MASK;
+ pktinfo.pkt_len = m_pktlen(m);
+
+ tca = NULL;
+
+ cb = acc_classify(&top->tc_classifier, m, af);
+ if (cb != NULL)
+ tca = &cb->cb_action;
+
+ if (tca == NULL)
+ tca = &top->tc_block.cb_action;
+
+ while (1) {
+ PKTCNTR_ADD(&top->tc_cnts[tca->tca_code], pktinfo.pkt_len);
+
+ switch (tca->tca_code) {
+ case TCACODE_PASS:
+ return (1);
+ case TCACODE_DROP:
+ m_freem(m);
+ return (0);
+ case TCACODE_RETURN:
+ return (0);
+ case TCACODE_MARK:
+#ifdef INET6
+ if (af == AF_INET6) {
+ struct ip6_hdr *ip6 = (struct ip6_hdr *)ip;
+ u_int32_t flowlabel;
+
+ flowlabel = ntohl(ip6->ip6_flow);
+ flowlabel = (tca->tca_dscp << 20) |
+ (flowlabel & ~(DSCP_MASK << 20));
+ ip6->ip6_flow = htonl(flowlabel);
+ } else
+#endif
+ ip->ip_tos = tca->tca_dscp |
+ (ip->ip_tos & DSCP_CUMASK);
+ return (1);
+ case TCACODE_NEXT:
+ cb = tca->tca_next;
+ tca = (*cb->cb_input)(cb, &pktinfo);
+ break;
+ case TCACODE_NONE:
+ default:
+ return (1);
+ }
+ }
+}
+
+static struct top_cdnr *
+tcb_lookup(ifname)
+ char *ifname;
+{
+ struct top_cdnr *top;
+ struct ifnet *ifp;
+
+ if ((ifp = ifunit(ifname)) != NULL)
+ LIST_FOREACH(top, &tcb_list, tc_next)
+ if (top->tc_ifq->altq_ifp == ifp)
+ return (top);
+ return (NULL);
+}
+
+static struct cdnr_block *
+cdnr_handle2cb(handle)
+ u_long handle;
+{
+ struct cdnr_block *cb;
+
+ cb = (struct cdnr_block *)handle;
+ if (handle != ALIGN(cb))
+ return (NULL);
+
+ if (cb == NULL || cb->cb_handle != handle)
+ return (NULL);
+ return (cb);
+}
+
+static u_long
+cdnr_cb2handle(cb)
+ struct cdnr_block *cb;
+{
+ return (cb->cb_handle);
+}
+
+static void *
+cdnr_cballoc(top, type, input_func)
+ struct top_cdnr *top;
+ int type;
+ struct tc_action *(*input_func)(struct cdnr_block *,
+ struct cdnr_pktinfo *);
+{
+ struct cdnr_block *cb;
+ int size;
+
+ switch (type) {
+ case TCETYPE_TOP:
+ size = sizeof(struct top_cdnr);
+ break;
+ case TCETYPE_ELEMENT:
+ size = sizeof(struct cdnr_block);
+ break;
+ case TCETYPE_TBMETER:
+ size = sizeof(struct tbmeter);
+ break;
+ case TCETYPE_TRTCM:
+ size = sizeof(struct trtcm);
+ break;
+ case TCETYPE_TSWTCM:
+ size = sizeof(struct tswtcm);
+ break;
+ default:
+ return (NULL);
+ }
+
+ cb = malloc(size, M_DEVBUF, M_WAITOK);
+ if (cb == NULL)
+ return (NULL);
+ bzero(cb, size);
+
+ cb->cb_len = size;
+ cb->cb_type = type;
+ cb->cb_ref = 0;
+ cb->cb_handle = (u_long)cb;
+ if (top == NULL)
+ cb->cb_top = (struct top_cdnr *)cb;
+ else
+ cb->cb_top = top;
+
+ if (input_func != NULL) {
+ /*
+ * if this cdnr has an action function,
+ * make tc_action to call itself.
+ */
+ cb->cb_action.tca_code = TCACODE_NEXT;
+ cb->cb_action.tca_next = cb;
+ cb->cb_input = input_func;
+ } else
+ cb->cb_action.tca_code = TCACODE_NONE;
+
+ /* if this isn't top, register the element to the top level cdnr */
+ if (top != NULL)
+ LIST_INSERT_HEAD(&top->tc_elements, cb, cb_next);
+
+ return ((void *)cb);
+}
+
+static void
+cdnr_cbdestroy(cblock)
+ void *cblock;
+{
+ struct cdnr_block *cb = cblock;
+
+ /* delete filters belonging to this cdnr */
+ acc_discard_filters(&cb->cb_top->tc_classifier, cb, 0);
+
+ /* remove from the top level cdnr */
+ if (cb->cb_top != cblock)
+ LIST_REMOVE(cb, cb_next);
+
+ free(cb, M_DEVBUF);
+}
+
+/*
+ * conditioner common destroy routine
+ */
+static int
+generic_element_destroy(cb)
+ struct cdnr_block *cb;
+{
+ int error = 0;
+
+ switch (cb->cb_type) {
+ case TCETYPE_TOP:
+ error = top_destroy((struct top_cdnr *)cb);
+ break;
+ case TCETYPE_ELEMENT:
+ error = element_destroy(cb);
+ break;
+ case TCETYPE_TBMETER:
+ error = tbm_destroy((struct tbmeter *)cb);
+ break;
+ case TCETYPE_TRTCM:
+ error = trtcm_destroy((struct trtcm *)cb);
+ break;
+ case TCETYPE_TSWTCM:
+ error = tswtcm_destroy((struct tswtcm *)cb);
+ break;
+ default:
+ error = EINVAL;
+ }
+ return (error);
+}
+
+static int
+tca_verify_action(utca)
+ struct tc_action *utca;
+{
+ switch (utca->tca_code) {
+ case TCACODE_PASS:
+ case TCACODE_DROP:
+ case TCACODE_MARK:
+ /* these are ok */
+ break;
+
+ case TCACODE_HANDLE:
+ /* verify handle value */
+ if (cdnr_handle2cb(utca->tca_handle) == NULL)
+ return (-1);
+ break;
+
+ case TCACODE_NONE:
+ case TCACODE_RETURN:
+ case TCACODE_NEXT:
+ default:
+ /* should not be passed from a user */
+ return (-1);
+ }
+ return (0);
+}
+
+static void
+tca_import_action(ktca, utca)
+ struct tc_action *ktca, *utca;
+{
+ struct cdnr_block *cb;
+
+ *ktca = *utca;
+ if (ktca->tca_code == TCACODE_HANDLE) {
+ cb = cdnr_handle2cb(ktca->tca_handle);
+ if (cb == NULL) {
+ ktca->tca_code = TCACODE_NONE;
+ return;
+ }
+ ktca->tca_code = TCACODE_NEXT;
+ ktca->tca_next = cb;
+ cb->cb_ref++;
+ } else if (ktca->tca_code == TCACODE_MARK) {
+ ktca->tca_dscp &= DSCP_MASK;
+ }
+ return;
+}
+
+static void
+tca_invalidate_action(tca)
+ struct tc_action *tca;
+{
+ struct cdnr_block *cb;
+
+ if (tca->tca_code == TCACODE_NEXT) {
+ cb = tca->tca_next;
+ if (cb == NULL)
+ return;
+ cb->cb_ref--;
+ }
+ tca->tca_code = TCACODE_NONE;
+}
+
+/*
+ * top level traffic conditioner
+ */
+static struct top_cdnr *
+top_create(ifq)
+ struct ifaltq *ifq;
+{
+ struct top_cdnr *top;
+
+ if ((top = cdnr_cballoc(NULL, TCETYPE_TOP, NULL)) == NULL)
+ return (NULL);
+
+ top->tc_ifq = ifq;
+ /* set default action for the top level conditioner */
+ top->tc_block.cb_action.tca_code = TCACODE_PASS;
+
+ LIST_INSERT_HEAD(&tcb_list, top, tc_next);
+
+ ifq->altq_cdnr = top;
+
+ return (top);
+}
+
+static int
+top_destroy(top)
+ struct top_cdnr *top;
+{
+ struct cdnr_block *cb;
+
+ if (ALTQ_IS_CNDTNING(top->tc_ifq))
+ ALTQ_CLEAR_CNDTNING(top->tc_ifq);
+ top->tc_ifq->altq_cdnr = NULL;
+
+ /*
+ * destroy all the conditioner elements belonging to this interface
+ */
+ while ((cb = LIST_FIRST(&top->tc_elements)) != NULL) {
+ while (cb != NULL && cb->cb_ref > 0)
+ cb = LIST_NEXT(cb, cb_next);
+ if (cb != NULL)
+ generic_element_destroy(cb);
+ }
+
+ LIST_REMOVE(top, tc_next);
+
+ cdnr_cbdestroy(top);
+
+ /* if there is no active conditioner, remove the input hook */
+ if (altq_input != NULL) {
+ LIST_FOREACH(top, &tcb_list, tc_next)
+ if (ALTQ_IS_CNDTNING(top->tc_ifq))
+ break;
+ if (top == NULL)
+ altq_input = NULL;
+ }
+
+ return (0);
+}
+
+/*
+ * simple tc elements without input function (e.g., dropper and makers).
+ */
+static struct cdnr_block *
+element_create(top, action)
+ struct top_cdnr *top;
+ struct tc_action *action;
+{
+ struct cdnr_block *cb;
+
+ if (tca_verify_action(action) < 0)
+ return (NULL);
+
+ if ((cb = cdnr_cballoc(top, TCETYPE_ELEMENT, NULL)) == NULL)
+ return (NULL);
+
+ tca_import_action(&cb->cb_action, action);
+
+ return (cb);
+}
+
+static int
+element_destroy(cb)
+ struct cdnr_block *cb;
+{
+ if (cb->cb_ref > 0)
+ return (EBUSY);
+
+ tca_invalidate_action(&cb->cb_action);
+
+ cdnr_cbdestroy(cb);
+ return (0);
+}
+
+/*
+ * internal representation of token bucket parameters
+ * rate: byte_per_unittime << 32
+ * (((bits_per_sec) / 8) << 32) / machclk_freq
+ * depth: byte << 32
+ *
+ */
+#define TB_SHIFT 32
+#define TB_SCALE(x) ((u_int64_t)(x) << TB_SHIFT)
+#define TB_UNSCALE(x) ((x) >> TB_SHIFT)
+
+static void
+tb_import_profile(tb, profile)
+ struct tbe *tb;
+ struct tb_profile *profile;
+{
+ tb->rate = TB_SCALE(profile->rate / 8) / machclk_freq;
+ tb->depth = TB_SCALE(profile->depth);
+ if (tb->rate > 0)
+ tb->filluptime = tb->depth / tb->rate;
+ else
+ tb->filluptime = 0xffffffffffffffffLL;
+ tb->token = tb->depth;
+ tb->last = read_machclk();
+}
+
+/*
+ * simple token bucket meter
+ */
+static struct tbmeter *
+tbm_create(top, profile, in_action, out_action)
+ struct top_cdnr *top;
+ struct tb_profile *profile;
+ struct tc_action *in_action, *out_action;
+{
+ struct tbmeter *tbm = NULL;
+
+ if (tca_verify_action(in_action) < 0
+ || tca_verify_action(out_action) < 0)
+ return (NULL);
+
+ if ((tbm = cdnr_cballoc(top, TCETYPE_TBMETER,
+ tbm_input)) == NULL)
+ return (NULL);
+
+ tb_import_profile(&tbm->tb, profile);
+
+ tca_import_action(&tbm->in_action, in_action);
+ tca_import_action(&tbm->out_action, out_action);
+
+ return (tbm);
+}
+
+static int
+tbm_destroy(tbm)
+ struct tbmeter *tbm;
+{
+ if (tbm->cdnrblk.cb_ref > 0)
+ return (EBUSY);
+
+ tca_invalidate_action(&tbm->in_action);
+ tca_invalidate_action(&tbm->out_action);
+
+ cdnr_cbdestroy(tbm);
+ return (0);
+}
+
+static struct tc_action *
+tbm_input(cb, pktinfo)
+ struct cdnr_block *cb;
+ struct cdnr_pktinfo *pktinfo;
+{
+ struct tbmeter *tbm = (struct tbmeter *)cb;
+ u_int64_t len;
+ u_int64_t interval, now;
+
+ len = TB_SCALE(pktinfo->pkt_len);
+
+ if (tbm->tb.token < len) {
+ now = read_machclk();
+ interval = now - tbm->tb.last;
+ if (interval >= tbm->tb.filluptime)
+ tbm->tb.token = tbm->tb.depth;
+ else {
+ tbm->tb.token += interval * tbm->tb.rate;
+ if (tbm->tb.token > tbm->tb.depth)
+ tbm->tb.token = tbm->tb.depth;
+ }
+ tbm->tb.last = now;
+ }
+
+ if (tbm->tb.token < len) {
+ PKTCNTR_ADD(&tbm->out_cnt, pktinfo->pkt_len);
+ return (&tbm->out_action);
+ }
+
+ tbm->tb.token -= len;
+ PKTCNTR_ADD(&tbm->in_cnt, pktinfo->pkt_len);
+ return (&tbm->in_action);
+}
+
+/*
+ * two rate three color marker
+ * as described in draft-heinanen-diffserv-trtcm-01.txt
+ */
+static struct trtcm *
+trtcm_create(top, cmtd_profile, peak_profile,
+ green_action, yellow_action, red_action, coloraware)
+ struct top_cdnr *top;
+ struct tb_profile *cmtd_profile, *peak_profile;
+ struct tc_action *green_action, *yellow_action, *red_action;
+ int coloraware;
+{
+ struct trtcm *tcm = NULL;
+
+ if (tca_verify_action(green_action) < 0
+ || tca_verify_action(yellow_action) < 0
+ || tca_verify_action(red_action) < 0)
+ return (NULL);
+
+ if ((tcm = cdnr_cballoc(top, TCETYPE_TRTCM,
+ trtcm_input)) == NULL)
+ return (NULL);
+
+ tb_import_profile(&tcm->cmtd_tb, cmtd_profile);
+ tb_import_profile(&tcm->peak_tb, peak_profile);
+
+ tca_import_action(&tcm->green_action, green_action);
+ tca_import_action(&tcm->yellow_action, yellow_action);
+ tca_import_action(&tcm->red_action, red_action);
+
+ /* set dscps to use */
+ if (tcm->green_action.tca_code == TCACODE_MARK)
+ tcm->green_dscp = tcm->green_action.tca_dscp & DSCP_MASK;
+ else
+ tcm->green_dscp = DSCP_AF11;
+ if (tcm->yellow_action.tca_code == TCACODE_MARK)
+ tcm->yellow_dscp = tcm->yellow_action.tca_dscp & DSCP_MASK;
+ else
+ tcm->yellow_dscp = DSCP_AF12;
+ if (tcm->red_action.tca_code == TCACODE_MARK)
+ tcm->red_dscp = tcm->red_action.tca_dscp & DSCP_MASK;
+ else
+ tcm->red_dscp = DSCP_AF13;
+
+ tcm->coloraware = coloraware;
+
+ return (tcm);
+}
+
+static int
+trtcm_destroy(tcm)
+ struct trtcm *tcm;
+{
+ if (tcm->cdnrblk.cb_ref > 0)
+ return (EBUSY);
+
+ tca_invalidate_action(&tcm->green_action);
+ tca_invalidate_action(&tcm->yellow_action);
+ tca_invalidate_action(&tcm->red_action);
+
+ cdnr_cbdestroy(tcm);
+ return (0);
+}
+
+static struct tc_action *
+trtcm_input(cb, pktinfo)
+ struct cdnr_block *cb;
+ struct cdnr_pktinfo *pktinfo;
+{
+ struct trtcm *tcm = (struct trtcm *)cb;
+ u_int64_t len;
+ u_int64_t interval, now;
+ u_int8_t color;
+
+ len = TB_SCALE(pktinfo->pkt_len);
+ if (tcm->coloraware) {
+ color = pktinfo->pkt_dscp;
+ if (color != tcm->yellow_dscp && color != tcm->red_dscp)
+ color = tcm->green_dscp;
+ } else {
+ /* if color-blind, precolor it as green */
+ color = tcm->green_dscp;
+ }
+
+ now = read_machclk();
+ if (tcm->cmtd_tb.token < len) {
+ interval = now - tcm->cmtd_tb.last;
+ if (interval >= tcm->cmtd_tb.filluptime)
+ tcm->cmtd_tb.token = tcm->cmtd_tb.depth;
+ else {
+ tcm->cmtd_tb.token += interval * tcm->cmtd_tb.rate;
+ if (tcm->cmtd_tb.token > tcm->cmtd_tb.depth)
+ tcm->cmtd_tb.token = tcm->cmtd_tb.depth;
+ }
+ tcm->cmtd_tb.last = now;
+ }
+ if (tcm->peak_tb.token < len) {
+ interval = now - tcm->peak_tb.last;
+ if (interval >= tcm->peak_tb.filluptime)
+ tcm->peak_tb.token = tcm->peak_tb.depth;
+ else {
+ tcm->peak_tb.token += interval * tcm->peak_tb.rate;
+ if (tcm->peak_tb.token > tcm->peak_tb.depth)
+ tcm->peak_tb.token = tcm->peak_tb.depth;
+ }
+ tcm->peak_tb.last = now;
+ }
+
+ if (color == tcm->red_dscp || tcm->peak_tb.token < len) {
+ pktinfo->pkt_dscp = tcm->red_dscp;
+ PKTCNTR_ADD(&tcm->red_cnt, pktinfo->pkt_len);
+ return (&tcm->red_action);
+ }
+
+ if (color == tcm->yellow_dscp || tcm->cmtd_tb.token < len) {
+ pktinfo->pkt_dscp = tcm->yellow_dscp;
+ tcm->peak_tb.token -= len;
+ PKTCNTR_ADD(&tcm->yellow_cnt, pktinfo->pkt_len);
+ return (&tcm->yellow_action);
+ }
+
+ pktinfo->pkt_dscp = tcm->green_dscp;
+ tcm->cmtd_tb.token -= len;
+ tcm->peak_tb.token -= len;
+ PKTCNTR_ADD(&tcm->green_cnt, pktinfo->pkt_len);
+ return (&tcm->green_action);
+}
+
+/*
+ * time sliding window three color marker
+ * as described in draft-fang-diffserv-tc-tswtcm-00.txt
+ */
+static struct tswtcm *
+tswtcm_create(top, cmtd_rate, peak_rate, avg_interval,
+ green_action, yellow_action, red_action)
+ struct top_cdnr *top;
+ u_int32_t cmtd_rate, peak_rate, avg_interval;
+ struct tc_action *green_action, *yellow_action, *red_action;
+{
+ struct tswtcm *tsw;
+
+ if (tca_verify_action(green_action) < 0
+ || tca_verify_action(yellow_action) < 0
+ || tca_verify_action(red_action) < 0)
+ return (NULL);
+
+ if ((tsw = cdnr_cballoc(top, TCETYPE_TSWTCM,
+ tswtcm_input)) == NULL)
+ return (NULL);
+
+ tca_import_action(&tsw->green_action, green_action);
+ tca_import_action(&tsw->yellow_action, yellow_action);
+ tca_import_action(&tsw->red_action, red_action);
+
+ /* set dscps to use */
+ if (tsw->green_action.tca_code == TCACODE_MARK)
+ tsw->green_dscp = tsw->green_action.tca_dscp & DSCP_MASK;
+ else
+ tsw->green_dscp = DSCP_AF11;
+ if (tsw->yellow_action.tca_code == TCACODE_MARK)
+ tsw->yellow_dscp = tsw->yellow_action.tca_dscp & DSCP_MASK;
+ else
+ tsw->yellow_dscp = DSCP_AF12;
+ if (tsw->red_action.tca_code == TCACODE_MARK)
+ tsw->red_dscp = tsw->red_action.tca_dscp & DSCP_MASK;
+ else
+ tsw->red_dscp = DSCP_AF13;
+
+ /* convert rates from bits/sec to bytes/sec */
+ tsw->cmtd_rate = cmtd_rate / 8;
+ tsw->peak_rate = peak_rate / 8;
+ tsw->avg_rate = 0;
+
+ /* timewin is converted from msec to machine clock unit */
+ tsw->timewin = (u_int64_t)machclk_freq * avg_interval / 1000;
+
+ return (tsw);
+}
+
+static int
+tswtcm_destroy(tsw)
+ struct tswtcm *tsw;
+{
+ if (tsw->cdnrblk.cb_ref > 0)
+ return (EBUSY);
+
+ tca_invalidate_action(&tsw->green_action);
+ tca_invalidate_action(&tsw->yellow_action);
+ tca_invalidate_action(&tsw->red_action);
+
+ cdnr_cbdestroy(tsw);
+ return (0);
+}
+
+static struct tc_action *
+tswtcm_input(cb, pktinfo)
+ struct cdnr_block *cb;
+ struct cdnr_pktinfo *pktinfo;
+{
+ struct tswtcm *tsw = (struct tswtcm *)cb;
+ int len;
+ u_int32_t avg_rate;
+ u_int64_t interval, now, tmp;
+
+ /*
+ * rate estimator
+ */
+ len = pktinfo->pkt_len;
+ now = read_machclk();
+
+ interval = now - tsw->t_front;
+ /*
+ * calculate average rate:
+ * avg = (avg * timewin + pkt_len)/(timewin + interval)
+ * pkt_len needs to be multiplied by machclk_freq in order to
+ * get (bytes/sec).
+ * note: when avg_rate (bytes/sec) and timewin (machclk unit) are
+ * less than 32 bits, the following 64-bit operation has enough
+ * precision.
+ */
+ tmp = ((u_int64_t)tsw->avg_rate * tsw->timewin
+ + (u_int64_t)len * machclk_freq) / (tsw->timewin + interval);
+ tsw->avg_rate = avg_rate = (u_int32_t)tmp;
+ tsw->t_front = now;
+
+ /*
+ * marker
+ */
+ if (avg_rate > tsw->cmtd_rate) {
+ u_int32_t randval = arc4random() % avg_rate;
+
+ if (avg_rate > tsw->peak_rate) {
+ if (randval < avg_rate - tsw->peak_rate) {
+ /* mark red */
+ pktinfo->pkt_dscp = tsw->red_dscp;
+ PKTCNTR_ADD(&tsw->red_cnt, len);
+ return (&tsw->red_action);
+ } else if (randval < avg_rate - tsw->cmtd_rate)
+ goto mark_yellow;
+ } else {
+ /* peak_rate >= avg_rate > cmtd_rate */
+ if (randval < avg_rate - tsw->cmtd_rate) {
+ mark_yellow:
+ pktinfo->pkt_dscp = tsw->yellow_dscp;
+ PKTCNTR_ADD(&tsw->yellow_cnt, len);
+ return (&tsw->yellow_action);
+ }
+ }
+ }
+
+ /* mark green */
+ pktinfo->pkt_dscp = tsw->green_dscp;
+ PKTCNTR_ADD(&tsw->green_cnt, len);
+ return (&tsw->green_action);
+}
+
+/*
+ * ioctl requests
+ */
+static int
+cdnrcmd_if_attach(ifname)
+ char *ifname;
+{
+ struct ifnet *ifp;
+ struct top_cdnr *top;
+
+ if ((ifp = ifunit(ifname)) == NULL)
+ return (EBADF);
+
+ if (ifp->if_snd.altq_cdnr != NULL)
+ return (EBUSY);
+
+ if ((top = top_create(&ifp->if_snd)) == NULL)
+ return (ENOMEM);
+ return (0);
+}
+
+static int
+cdnrcmd_if_detach(ifname)
+ char *ifname;
+{
+ struct top_cdnr *top;
+
+ if ((top = tcb_lookup(ifname)) == NULL)
+ return (EBADF);
+
+ return top_destroy(top);
+}
+
+static int
+cdnrcmd_add_element(ap)
+ struct cdnr_add_element *ap;
+{
+ struct top_cdnr *top;
+ struct cdnr_block *cb;
+
+ if ((top = tcb_lookup(ap->iface.cdnr_ifname)) == NULL)
+ return (EBADF);
+
+ cb = element_create(top, &ap->action);
+ if (cb == NULL)
+ return (EINVAL);
+ /* return a class handle to the user */
+ ap->cdnr_handle = cdnr_cb2handle(cb);
+ return (0);
+}
+
+static int
+cdnrcmd_delete_element(ap)
+ struct cdnr_delete_element *ap;
+{
+ struct top_cdnr *top;
+ struct cdnr_block *cb;
+
+ if ((top = tcb_lookup(ap->iface.cdnr_ifname)) == NULL)
+ return (EBADF);
+
+ if ((cb = cdnr_handle2cb(ap->cdnr_handle)) == NULL)
+ return (EINVAL);
+
+ if (cb->cb_type != TCETYPE_ELEMENT)
+ return generic_element_destroy(cb);
+
+ return element_destroy(cb);
+}
+
+static int
+cdnrcmd_add_filter(ap)
+ struct cdnr_add_filter *ap;
+{
+ struct top_cdnr *top;
+ struct cdnr_block *cb;
+
+ if ((top = tcb_lookup(ap->iface.cdnr_ifname)) == NULL)
+ return (EBADF);
+
+ if ((cb = cdnr_handle2cb(ap->cdnr_handle)) == NULL)
+ return (EINVAL);
+
+ return acc_add_filter(&top->tc_classifier, &ap->filter,
+ cb, &ap->filter_handle);
+}
+
+static int
+cdnrcmd_delete_filter(ap)
+ struct cdnr_delete_filter *ap;
+{
+ struct top_cdnr *top;
+
+ if ((top = tcb_lookup(ap->iface.cdnr_ifname)) == NULL)
+ return (EBADF);
+
+ return acc_delete_filter(&top->tc_classifier, ap->filter_handle);
+}
+
+static int
+cdnrcmd_add_tbm(ap)
+ struct cdnr_add_tbmeter *ap;
+{
+ struct top_cdnr *top;
+ struct tbmeter *tbm;
+
+ if ((top = tcb_lookup(ap->iface.cdnr_ifname)) == NULL)
+ return (EBADF);
+
+ tbm = tbm_create(top, &ap->profile, &ap->in_action, &ap->out_action);
+ if (tbm == NULL)
+ return (EINVAL);
+ /* return a class handle to the user */
+ ap->cdnr_handle = cdnr_cb2handle(&tbm->cdnrblk);
+ return (0);
+}
+
+static int
+cdnrcmd_modify_tbm(ap)
+ struct cdnr_modify_tbmeter *ap;
+{
+ struct tbmeter *tbm;
+
+ if ((tbm = (struct tbmeter *)cdnr_handle2cb(ap->cdnr_handle)) == NULL)
+ return (EINVAL);
+
+ tb_import_profile(&tbm->tb, &ap->profile);
+
+ return (0);
+}
+
+static int
+cdnrcmd_tbm_stats(ap)
+ struct cdnr_tbmeter_stats *ap;
+{
+ struct tbmeter *tbm;
+
+ if ((tbm = (struct tbmeter *)cdnr_handle2cb(ap->cdnr_handle)) == NULL)
+ return (EINVAL);
+
+ ap->in_cnt = tbm->in_cnt;
+ ap->out_cnt = tbm->out_cnt;
+
+ return (0);
+}
+
+static int
+cdnrcmd_add_trtcm(ap)
+ struct cdnr_add_trtcm *ap;
+{
+ struct top_cdnr *top;
+ struct trtcm *tcm;
+
+ if ((top = tcb_lookup(ap->iface.cdnr_ifname)) == NULL)
+ return (EBADF);
+
+ tcm = trtcm_create(top, &ap->cmtd_profile, &ap->peak_profile,
+ &ap->green_action, &ap->yellow_action,
+ &ap->red_action, ap->coloraware);
+ if (tcm == NULL)
+ return (EINVAL);
+
+ /* return a class handle to the user */
+ ap->cdnr_handle = cdnr_cb2handle(&tcm->cdnrblk);
+ return (0);
+}
+
+static int
+cdnrcmd_modify_trtcm(ap)
+ struct cdnr_modify_trtcm *ap;
+{
+ struct trtcm *tcm;
+
+ if ((tcm = (struct trtcm *)cdnr_handle2cb(ap->cdnr_handle)) == NULL)
+ return (EINVAL);
+
+ tb_import_profile(&tcm->cmtd_tb, &ap->cmtd_profile);
+ tb_import_profile(&tcm->peak_tb, &ap->peak_profile);
+
+ return (0);
+}
+
+static int
+cdnrcmd_tcm_stats(ap)
+ struct cdnr_tcm_stats *ap;
+{
+ struct cdnr_block *cb;
+
+ if ((cb = cdnr_handle2cb(ap->cdnr_handle)) == NULL)
+ return (EINVAL);
+
+ if (cb->cb_type == TCETYPE_TRTCM) {
+ struct trtcm *tcm = (struct trtcm *)cb;
+
+ ap->green_cnt = tcm->green_cnt;
+ ap->yellow_cnt = tcm->yellow_cnt;
+ ap->red_cnt = tcm->red_cnt;
+ } else if (cb->cb_type == TCETYPE_TSWTCM) {
+ struct tswtcm *tsw = (struct tswtcm *)cb;
+
+ ap->green_cnt = tsw->green_cnt;
+ ap->yellow_cnt = tsw->yellow_cnt;
+ ap->red_cnt = tsw->red_cnt;
+ } else
+ return (EINVAL);
+
+ return (0);
+}
+
+static int
+cdnrcmd_add_tswtcm(ap)
+ struct cdnr_add_tswtcm *ap;
+{
+ struct top_cdnr *top;
+ struct tswtcm *tsw;
+
+ if ((top = tcb_lookup(ap->iface.cdnr_ifname)) == NULL)
+ return (EBADF);
+
+ if (ap->cmtd_rate > ap->peak_rate)
+ return (EINVAL);
+
+ tsw = tswtcm_create(top, ap->cmtd_rate, ap->peak_rate,
+ ap->avg_interval, &ap->green_action,
+ &ap->yellow_action, &ap->red_action);
+ if (tsw == NULL)
+ return (EINVAL);
+
+ /* return a class handle to the user */
+ ap->cdnr_handle = cdnr_cb2handle(&tsw->cdnrblk);
+ return (0);
+}
+
+static int
+cdnrcmd_modify_tswtcm(ap)
+ struct cdnr_modify_tswtcm *ap;
+{
+ struct tswtcm *tsw;
+
+ if ((tsw = (struct tswtcm *)cdnr_handle2cb(ap->cdnr_handle)) == NULL)
+ return (EINVAL);
+
+ if (ap->cmtd_rate > ap->peak_rate)
+ return (EINVAL);
+
+ /* convert rates from bits/sec to bytes/sec */
+ tsw->cmtd_rate = ap->cmtd_rate / 8;
+ tsw->peak_rate = ap->peak_rate / 8;
+ tsw->avg_rate = 0;
+
+ /* timewin is converted from msec to machine clock unit */
+ tsw->timewin = (u_int64_t)machclk_freq * ap->avg_interval / 1000;
+
+ return (0);
+}
+
+static int
+cdnrcmd_get_stats(ap)
+ struct cdnr_get_stats *ap;
+{
+ struct top_cdnr *top;
+ struct cdnr_block *cb;
+ struct tbmeter *tbm;
+ struct trtcm *tcm;
+ struct tswtcm *tsw;
+ struct tce_stats tce, *usp;
+ int error, n, nskip, nelements;
+
+ if ((top = tcb_lookup(ap->iface.cdnr_ifname)) == NULL)
+ return (EBADF);
+
+ /* copy action stats */
+ bcopy(top->tc_cnts, ap->cnts, sizeof(ap->cnts));
+
+ /* stats for each element */
+ nelements = ap->nelements;
+ usp = ap->tce_stats;
+ if (nelements <= 0 || usp == NULL)
+ return (0);
+
+ nskip = ap->nskip;
+ n = 0;
+ LIST_FOREACH(cb, &top->tc_elements, cb_next) {
+ if (nskip > 0) {
+ nskip--;
+ continue;
+ }
+
+ bzero(&tce, sizeof(tce));
+ tce.tce_handle = cb->cb_handle;
+ tce.tce_type = cb->cb_type;
+ switch (cb->cb_type) {
+ case TCETYPE_TBMETER:
+ tbm = (struct tbmeter *)cb;
+ tce.tce_cnts[0] = tbm->in_cnt;
+ tce.tce_cnts[1] = tbm->out_cnt;
+ break;
+ case TCETYPE_TRTCM:
+ tcm = (struct trtcm *)cb;
+ tce.tce_cnts[0] = tcm->green_cnt;
+ tce.tce_cnts[1] = tcm->yellow_cnt;
+ tce.tce_cnts[2] = tcm->red_cnt;
+ break;
+ case TCETYPE_TSWTCM:
+ tsw = (struct tswtcm *)cb;
+ tce.tce_cnts[0] = tsw->green_cnt;
+ tce.tce_cnts[1] = tsw->yellow_cnt;
+ tce.tce_cnts[2] = tsw->red_cnt;
+ break;
+ default:
+ continue;
+ }
+
+ if ((error = copyout((caddr_t)&tce, (caddr_t)usp++,
+ sizeof(tce))) != 0)
+ return (error);
+
+ if (++n == nelements)
+ break;
+ }
+ ap->nelements = n;
+
+ return (0);
+}
+
+/*
+ * conditioner device interface
+ */
+int
+cdnropen(dev, flag, fmt, p)
+ dev_t dev;
+ int flag, fmt;
+#if (__FreeBSD_version > 500000)
+ struct thread *p;
+#else
+ struct proc *p;
+#endif
+{
+ if (machclk_freq == 0)
+ init_machclk();
+
+ if (machclk_freq == 0) {
+ printf("cdnr: no cpu clock available!\n");
+ return (ENXIO);
+ }
+
+ /* everything will be done when the queueing scheme is attached. */
+ return 0;
+}
+
+int
+cdnrclose(dev, flag, fmt, p)
+ dev_t dev;
+ int flag, fmt;
+#if (__FreeBSD_version > 500000)
+ struct thread *p;
+#else
+ struct proc *p;
+#endif
+{
+ struct top_cdnr *top;
+ int err, error = 0;
+
+ while ((top = LIST_FIRST(&tcb_list)) != NULL) {
+ /* destroy all */
+ err = top_destroy(top);
+ if (err != 0 && error == 0)
+ error = err;
+ }
+ altq_input = NULL;
+
+ return (error);
+}
+
+int
+cdnrioctl(dev, cmd, addr, flag, p)
+ dev_t dev;
+ ioctlcmd_t cmd;
+ caddr_t addr;
+ int flag;
+#if (__FreeBSD_version > 500000)
+ struct thread *p;
+#else
+ struct proc *p;
+#endif
+{
+ struct top_cdnr *top;
+ struct cdnr_interface *ifacep;
+ int s, error = 0;
+
+ /* check super-user privilege */
+ switch (cmd) {
+ case CDNR_GETSTATS:
+ break;
+ default:
+#if (__FreeBSD_version > 700000)
+ if ((error = priv_check(p, PRIV_ALTQ_MANAGE)) != 0)
+#elsif (__FreeBSD_version > 400000)
+ if ((error = suser(p)) != 0)
+#else
+ if ((error = suser(p->p_ucred, &p->p_acflag)) != 0)
+#endif
+ return (error);
+ break;
+ }
+
+#ifdef __NetBSD__
+ s = splnet();
+#else
+ s = splimp();
+#endif
+ switch (cmd) {
+
+ case CDNR_IF_ATTACH:
+ ifacep = (struct cdnr_interface *)addr;
+ error = cdnrcmd_if_attach(ifacep->cdnr_ifname);
+ break;
+
+ case CDNR_IF_DETACH:
+ ifacep = (struct cdnr_interface *)addr;
+ error = cdnrcmd_if_detach(ifacep->cdnr_ifname);
+ break;
+
+ case CDNR_ENABLE:
+ case CDNR_DISABLE:
+ ifacep = (struct cdnr_interface *)addr;
+ if ((top = tcb_lookup(ifacep->cdnr_ifname)) == NULL) {
+ error = EBADF;
+ break;
+ }
+
+ switch (cmd) {
+
+ case CDNR_ENABLE:
+ ALTQ_SET_CNDTNING(top->tc_ifq);
+ if (altq_input == NULL)
+ altq_input = altq_cdnr_input;
+ break;
+
+ case CDNR_DISABLE:
+ ALTQ_CLEAR_CNDTNING(top->tc_ifq);
+ LIST_FOREACH(top, &tcb_list, tc_next)
+ if (ALTQ_IS_CNDTNING(top->tc_ifq))
+ break;
+ if (top == NULL)
+ altq_input = NULL;
+ break;
+ }
+ break;
+
+ case CDNR_ADD_ELEM:
+ error = cdnrcmd_add_element((struct cdnr_add_element *)addr);
+ break;
+
+ case CDNR_DEL_ELEM:
+ error = cdnrcmd_delete_element((struct cdnr_delete_element *)addr);
+ break;
+
+ case CDNR_ADD_TBM:
+ error = cdnrcmd_add_tbm((struct cdnr_add_tbmeter *)addr);
+ break;
+
+ case CDNR_MOD_TBM:
+ error = cdnrcmd_modify_tbm((struct cdnr_modify_tbmeter *)addr);
+ break;
+
+ case CDNR_TBM_STATS:
+ error = cdnrcmd_tbm_stats((struct cdnr_tbmeter_stats *)addr);
+ break;
+
+ case CDNR_ADD_TCM:
+ error = cdnrcmd_add_trtcm((struct cdnr_add_trtcm *)addr);
+ break;
+
+ case CDNR_MOD_TCM:
+ error = cdnrcmd_modify_trtcm((struct cdnr_modify_trtcm *)addr);
+ break;
+
+ case CDNR_TCM_STATS:
+ error = cdnrcmd_tcm_stats((struct cdnr_tcm_stats *)addr);
+ break;
+
+ case CDNR_ADD_FILTER:
+ error = cdnrcmd_add_filter((struct cdnr_add_filter *)addr);
+ break;
+
+ case CDNR_DEL_FILTER:
+ error = cdnrcmd_delete_filter((struct cdnr_delete_filter *)addr);
+ break;
+
+ case CDNR_GETSTATS:
+ error = cdnrcmd_get_stats((struct cdnr_get_stats *)addr);
+ break;
+
+ case CDNR_ADD_TSW:
+ error = cdnrcmd_add_tswtcm((struct cdnr_add_tswtcm *)addr);
+ break;
+
+ case CDNR_MOD_TSW:
+ error = cdnrcmd_modify_tswtcm((struct cdnr_modify_tswtcm *)addr);
+ break;
+
+ default:
+ error = EINVAL;
+ break;
+ }
+ splx(s);
+
+ return error;
+}
+
+#ifdef KLD_MODULE
+
+static struct altqsw cdnr_sw =
+ {"cdnr", cdnropen, cdnrclose, cdnrioctl};
+
+ALTQ_MODULE(altq_cdnr, ALTQT_CDNR, &cdnr_sw);
+
+#endif /* KLD_MODULE */
+
+#endif /* ALTQ3_COMPAT */
+#endif /* ALTQ_CDNR */
diff --git a/contrib/altq/rtems/freebsd/altq/altq_cdnr.h b/contrib/altq/rtems/freebsd/altq/altq_cdnr.h
new file mode 100644
index 00000000..002e3c38
--- /dev/null
+++ b/contrib/altq/rtems/freebsd/altq/altq_cdnr.h
@@ -0,0 +1,335 @@
+/* $KAME: altq_cdnr.h,v 1.9 2003/07/10 12:07:48 kjc Exp $ */
+
+/*
+ * Copyright (C) 1999-2002
+ * Sony Computer Science Laboratories Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY SONY CSL AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL SONY CSL OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifndef _ALTQ_ALTQ_CDNR_HH_
+#define _ALTQ_ALTQ_CDNR_HH_
+
+#include <rtems/freebsd/altq/altq.h>
+
+/*
+ * traffic conditioner element types
+ */
+#define TCETYPE_NONE 0
+#define TCETYPE_TOP 1 /* top level conditioner */
+#define TCETYPE_ELEMENT 2 /* a simple tc element */
+#define TCETYPE_TBMETER 3 /* token bucket meter */
+#define TCETYPE_TRTCM 4 /* (two-rate) three color marker */
+#define TCETYPE_TSWTCM 5 /* time sliding window 3-color maker */
+
+/*
+ * traffic conditioner action
+ */
+struct cdnr_block;
+
+struct tc_action {
+ int tca_code; /* e.g., TCACODE_PASS */
+ /* tca_code dependent variable */
+ union {
+ u_long un_value; /* template */
+ u_int8_t un_dscp; /* diffserv code point */
+ u_long un_handle; /* tc action handle */
+ struct cdnr_block *un_next; /* next tc element block */
+ } tca_un;
+};
+#define tca_value tca_un.un_value
+#define tca_dscp tca_un.un_dscp
+#define tca_handle tca_un.un_handle
+#define tca_next tca_un.un_next
+
+#define TCACODE_NONE 0 /* action is not set */
+#define TCACODE_PASS 1 /* pass this packet */
+#define TCACODE_DROP 2 /* discard this packet */
+#define TCACODE_RETURN 3 /* do not process this packet */
+#define TCACODE_MARK 4 /* mark dscp */
+#define TCACODE_HANDLE 5 /* take action specified by handle */
+#define TCACODE_NEXT 6 /* take action in the next tc element */
+#define TCACODE_MAX 6
+
+#define CDNR_NULL_HANDLE 0
+
+struct cdnr_interface {
+ char cdnr_ifname[IFNAMSIZ]; /* interface name (e.g., fxp0) */
+};
+
+/* simple element operations */
+struct cdnr_add_element {
+ struct cdnr_interface iface;
+ struct tc_action action;
+
+ u_long cdnr_handle; /* return value */
+};
+
+struct cdnr_delete_element {
+ struct cdnr_interface iface;
+ u_long cdnr_handle;
+};
+
+/* token-bucket meter operations */
+struct cdnr_add_tbmeter {
+ struct cdnr_interface iface;
+ struct tb_profile profile;
+ struct tc_action in_action;
+ struct tc_action out_action;
+
+ u_long cdnr_handle; /* return value */
+};
+
+struct cdnr_modify_tbmeter {
+ struct cdnr_interface iface;
+ u_long cdnr_handle;
+ struct tb_profile profile;
+};
+
+struct cdnr_tbmeter_stats {
+ struct cdnr_interface iface;
+ u_long cdnr_handle;
+ struct pktcntr in_cnt;
+ struct pktcntr out_cnt;
+};
+
+/* two-rate three-color marker operations */
+struct cdnr_add_trtcm {
+ struct cdnr_interface iface;
+ struct tb_profile cmtd_profile; /* profile for committed tb */
+ struct tb_profile peak_profile; /* profile for peak tb */
+ struct tc_action green_action; /* action for green packets */
+ struct tc_action yellow_action; /* action for yellow packets */
+ struct tc_action red_action; /* action for red packets */
+ int coloraware; /* color-aware/color-blind */
+
+ u_long cdnr_handle; /* return value */
+};
+
+struct cdnr_modify_trtcm {
+ struct cdnr_interface iface;
+ u_long cdnr_handle;
+ struct tb_profile cmtd_profile; /* profile for committed tb */
+ struct tb_profile peak_profile; /* profile for peak tb */
+ int coloraware; /* color-aware/color-blind */
+};
+
+struct cdnr_tcm_stats {
+ struct cdnr_interface iface;
+ u_long cdnr_handle;
+ struct pktcntr green_cnt;
+ struct pktcntr yellow_cnt;
+ struct pktcntr red_cnt;
+};
+
+/* time sliding window three-color marker operations */
+struct cdnr_add_tswtcm {
+ struct cdnr_interface iface;
+ u_int32_t cmtd_rate; /* committed rate (bits/sec) */
+ u_int32_t peak_rate; /* peak rate (bits/sec) */
+ u_int32_t avg_interval; /* averaging interval (msec) */
+ struct tc_action green_action; /* action for green packets */
+ struct tc_action yellow_action; /* action for yellow packets */
+ struct tc_action red_action; /* action for red packets */
+
+ u_long cdnr_handle; /* return value */
+};
+
+struct cdnr_modify_tswtcm {
+ struct cdnr_interface iface;
+ u_long cdnr_handle;
+ u_int32_t cmtd_rate; /* committed rate (bits/sec) */
+ u_int32_t peak_rate; /* peak rate (bits/sec) */
+ u_int32_t avg_interval; /* averaging interval (msec) */
+};
+
+struct cdnr_add_filter {
+ struct cdnr_interface iface;
+ u_long cdnr_handle;
+#ifdef ALTQ3_CLFIER_COMPAT
+ struct flow_filter filter;
+#endif
+ u_long filter_handle; /* return value */
+};
+
+struct cdnr_delete_filter {
+ struct cdnr_interface iface;
+ u_long filter_handle;
+};
+
+struct tce_stats {
+ u_long tce_handle; /* tc element handle */
+ int tce_type; /* e.g., TCETYPE_ELEMENT */
+ struct pktcntr tce_cnts[3]; /* tcm returns 3 counters */
+};
+
+struct cdnr_get_stats {
+ struct cdnr_interface iface;
+ struct pktcntr cnts[TCACODE_MAX+1];
+
+ /* element stats */
+ int nskip; /* skip # of elements */
+ int nelements; /* # of element stats (WR) */
+ struct tce_stats *tce_stats; /* pointer to stats array */
+};
+
+#define CDNR_IF_ATTACH _IOW('Q', 1, struct cdnr_interface)
+#define CDNR_IF_DETACH _IOW('Q', 2, struct cdnr_interface)
+#define CDNR_ENABLE _IOW('Q', 3, struct cdnr_interface)
+#define CDNR_DISABLE _IOW('Q', 4, struct cdnr_interface)
+#define CDNR_ADD_FILTER _IOWR('Q', 10, struct cdnr_add_filter)
+#define CDNR_DEL_FILTER _IOW('Q', 11, struct cdnr_delete_filter)
+#define CDNR_GETSTATS _IOWR('Q', 12, struct cdnr_get_stats)
+#define CDNR_ADD_ELEM _IOWR('Q', 30, struct cdnr_add_element)
+#define CDNR_DEL_ELEM _IOW('Q', 31, struct cdnr_delete_element)
+#define CDNR_ADD_TBM _IOWR('Q', 32, struct cdnr_add_tbmeter)
+#define CDNR_MOD_TBM _IOW('Q', 33, struct cdnr_modify_tbmeter)
+#define CDNR_TBM_STATS _IOWR('Q', 34, struct cdnr_tbmeter_stats)
+#define CDNR_ADD_TCM _IOWR('Q', 35, struct cdnr_add_trtcm)
+#define CDNR_MOD_TCM _IOWR('Q', 36, struct cdnr_modify_trtcm)
+#define CDNR_TCM_STATS _IOWR('Q', 37, struct cdnr_tcm_stats)
+#define CDNR_ADD_TSW _IOWR('Q', 38, struct cdnr_add_tswtcm)
+#define CDNR_MOD_TSW _IOWR('Q', 39, struct cdnr_modify_tswtcm)
+
+#ifndef DSCP_EF
+/* diffserve code points */
+#define DSCP_MASK 0xfc
+#define DSCP_CUMASK 0x03
+#define DSCP_EF 0xb8
+#define DSCP_AF11 0x28
+#define DSCP_AF12 0x30
+#define DSCP_AF13 0x38
+#define DSCP_AF21 0x48
+#define DSCP_AF22 0x50
+#define DSCP_AF23 0x58
+#define DSCP_AF31 0x68
+#define DSCP_AF32 0x70
+#define DSCP_AF33 0x78
+#define DSCP_AF41 0x88
+#define DSCP_AF42 0x90
+#define DSCP_AF43 0x98
+#define AF_CLASSMASK 0xe0
+#define AF_DROPPRECMASK 0x18
+#endif
+
+#ifdef _KERNEL
+
+/*
+ * packet information passed to the input function of tc elements
+ */
+struct cdnr_pktinfo {
+ int pkt_len; /* packet length */
+ u_int8_t pkt_dscp; /* diffserv code point */
+};
+
+/*
+ * traffic conditioner control block common to all types of tc elements
+ */
+struct cdnr_block {
+ LIST_ENTRY(cdnr_block) cb_next;
+ int cb_len; /* size of this tc element */
+ int cb_type; /* cdnr block type */
+ int cb_ref; /* reference count of this element */
+ u_long cb_handle; /* handle of this tc element */
+ struct top_cdnr *cb_top; /* back pointer to top */
+ struct tc_action cb_action; /* top level action for this tcb */
+ struct tc_action *(*cb_input)(struct cdnr_block *,
+ struct cdnr_pktinfo *);
+};
+
+/*
+ * top level traffic conditioner structure for an interface
+ */
+struct top_cdnr {
+ struct cdnr_block tc_block;
+
+ LIST_ENTRY(top_cdnr) tc_next;
+ struct ifaltq *tc_ifq;
+
+ LIST_HEAD(, cdnr_block) tc_elements;
+#ifdef ALTQ3_CLFIER_COMPAT
+ struct acc_classifier tc_classifier;
+#endif
+ struct pktcntr tc_cnts[TCACODE_MAX+1];
+};
+
+/* token bucket element */
+struct tbe {
+ u_int64_t rate;
+ u_int64_t depth;
+
+ u_int64_t token;
+ u_int64_t filluptime;
+ u_int64_t last;
+};
+
+/* token bucket meter structure */
+struct tbmeter {
+ struct cdnr_block cdnrblk; /* conditioner block */
+ struct tbe tb; /* token bucket */
+ struct tc_action in_action; /* actions for IN/OUT */
+ struct tc_action out_action; /* actions for IN/OUT */
+ struct pktcntr in_cnt; /* statistics for IN/OUT */
+ struct pktcntr out_cnt; /* statistics for IN/OUT */
+};
+
+/* two-rate three-color marker structure */
+struct trtcm {
+ struct cdnr_block cdnrblk; /* conditioner block */
+ struct tbe cmtd_tb; /* committed tb profile */
+ struct tbe peak_tb; /* peak tb profile */
+ struct tc_action green_action;
+ struct tc_action yellow_action;
+ struct tc_action red_action;
+ int coloraware;
+ u_int8_t green_dscp;
+ u_int8_t yellow_dscp;
+ u_int8_t red_dscp;
+ struct pktcntr green_cnt;
+ struct pktcntr yellow_cnt;
+ struct pktcntr red_cnt;
+};
+
+/* time sliding window three-color marker structure */
+struct tswtcm {
+ struct cdnr_block cdnrblk; /* conditioner block */
+
+ u_int32_t avg_rate; /* average rate (bytes/sec) */
+ u_int64_t t_front; /* timestamp of last update */
+
+ u_int64_t timewin; /* average interval */
+ u_int32_t cmtd_rate; /* committed target rate */
+ u_int32_t peak_rate; /* peak target rate */
+ struct tc_action green_action;
+ struct tc_action yellow_action;
+ struct tc_action red_action;
+ u_int8_t green_dscp;
+ u_int8_t yellow_dscp;
+ u_int8_t red_dscp;
+ struct pktcntr green_cnt;
+ struct pktcntr yellow_cnt;
+ struct pktcntr red_cnt;
+};
+
+#endif /* _KERNEL */
+
+#endif /* _ALTQ_ALTQ_CDNR_HH_ */
diff --git a/contrib/altq/rtems/freebsd/altq/altq_classq.h b/contrib/altq/rtems/freebsd/altq/altq_classq.h
new file mode 100644
index 00000000..c3cfea37
--- /dev/null
+++ b/contrib/altq/rtems/freebsd/altq/altq_classq.h
@@ -0,0 +1,206 @@
+/* $KAME: altq_classq.h,v 1.6 2003/01/07 07:33:38 kjc Exp $ */
+
+/*
+ * Copyright (c) 1991-1997 Regents of the University of California.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the Network Research
+ * Group at Lawrence Berkeley Laboratory.
+ * 4. Neither the name of the University nor of the Laboratory may be used
+ * to endorse or promote products derived from this software without
+ * specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+/*
+ * class queue definitions extracted from rm_class.h.
+ */
+#ifndef _ALTQ_ALTQ_CLASSQ_HH_
+#define _ALTQ_ALTQ_CLASSQ_HH_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/*
+ * Packet Queue types: RED or DROPHEAD.
+ */
+#define Q_DROPHEAD 0x00
+#define Q_RED 0x01
+#define Q_RIO 0x02
+#define Q_DROPTAIL 0x03
+
+#ifdef _KERNEL
+
+/*
+ * Packet Queue structures and macros to manipulate them.
+ */
+struct _class_queue_ {
+ struct mbuf *tail_; /* Tail of packet queue */
+ int qlen_; /* Queue length (in number of packets) */
+ int qlim_; /* Queue limit (in number of packets*) */
+ int qtype_; /* Queue type */
+};
+
+typedef struct _class_queue_ class_queue_t;
+
+#define qtype(q) (q)->qtype_ /* Get queue type */
+#define qlimit(q) (q)->qlim_ /* Max packets to be queued */
+#define qlen(q) (q)->qlen_ /* Current queue length. */
+#define qtail(q) (q)->tail_ /* Tail of the queue */
+#define qhead(q) ((q)->tail_ ? (q)->tail_->m_nextpkt : NULL)
+
+#define qempty(q) ((q)->qlen_ == 0) /* Is the queue empty?? */
+#define q_is_red(q) ((q)->qtype_ == Q_RED) /* Is the queue a red queue */
+#define q_is_rio(q) ((q)->qtype_ == Q_RIO) /* Is the queue a rio queue */
+#define q_is_red_or_rio(q) ((q)->qtype_ == Q_RED || (q)->qtype_ == Q_RIO)
+
+#if !defined(__GNUC__) || defined(ALTQ_DEBUG)
+
+extern void _addq(class_queue_t *, struct mbuf *);
+extern struct mbuf *_getq(class_queue_t *);
+extern struct mbuf *_getq_tail(class_queue_t *);
+extern struct mbuf *_getq_random(class_queue_t *);
+extern void _removeq(class_queue_t *, struct mbuf *);
+extern void _flushq(class_queue_t *);
+
+#else /* __GNUC__ && !ALTQ_DEBUG */
+/*
+ * inlined versions
+ */
+static __inline void
+_addq(class_queue_t *q, struct mbuf *m)
+{
+ struct mbuf *m0;
+
+ if ((m0 = qtail(q)) != NULL)
+ m->m_nextpkt = m0->m_nextpkt;
+ else
+ m0 = m;
+ m0->m_nextpkt = m;
+ qtail(q) = m;
+ qlen(q)++;
+}
+
+static __inline struct mbuf *
+_getq(class_queue_t *q)
+{
+ struct mbuf *m, *m0;
+
+ if ((m = qtail(q)) == NULL)
+ return (NULL);
+ if ((m0 = m->m_nextpkt) != m)
+ m->m_nextpkt = m0->m_nextpkt;
+ else
+ qtail(q) = NULL;
+ qlen(q)--;
+ m0->m_nextpkt = NULL;
+ return (m0);
+}
+
+/* drop a packet at the tail of the queue */
+static __inline struct mbuf *
+_getq_tail(class_queue_t *q)
+{
+ struct mbuf *m, *m0, *prev;
+
+ if ((m = m0 = qtail(q)) == NULL)
+ return NULL;
+ do {
+ prev = m0;
+ m0 = m0->m_nextpkt;
+ } while (m0 != m);
+ prev->m_nextpkt = m->m_nextpkt;
+ if (prev == m)
+ qtail(q) = NULL;
+ else
+ qtail(q) = prev;
+ qlen(q)--;
+ m->m_nextpkt = NULL;
+ return (m);
+}
+
+/* randomly select a packet in the queue */
+static __inline struct mbuf *
+_getq_random(class_queue_t *q)
+{
+ struct mbuf *m;
+ int i, n;
+
+ if ((m = qtail(q)) == NULL)
+ return NULL;
+ if (m->m_nextpkt == m)
+ qtail(q) = NULL;
+ else {
+ struct mbuf *prev = NULL;
+
+ n = random() % qlen(q) + 1;
+ for (i = 0; i < n; i++) {
+ prev = m;
+ m = m->m_nextpkt;
+ }
+ prev->m_nextpkt = m->m_nextpkt;
+ if (m == qtail(q))
+ qtail(q) = prev;
+ }
+ qlen(q)--;
+ m->m_nextpkt = NULL;
+ return (m);
+}
+
+static __inline void
+_removeq(class_queue_t *q, struct mbuf *m)
+{
+ struct mbuf *m0, *prev;
+
+ m0 = qtail(q);
+ do {
+ prev = m0;
+ m0 = m0->m_nextpkt;
+ } while (m0 != m);
+ prev->m_nextpkt = m->m_nextpkt;
+ if (prev == m)
+ qtail(q) = NULL;
+ else if (qtail(q) == m)
+ qtail(q) = prev;
+ qlen(q)--;
+}
+
+static __inline void
+_flushq(class_queue_t *q)
+{
+ struct mbuf *m;
+
+ while ((m = _getq(q)) != NULL)
+ m_freem(m);
+}
+
+#endif /* __GNUC__ && !ALTQ_DEBUG */
+
+#endif /* _KERNEL */
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _ALTQ_ALTQ_CLASSQ_HH_ */
diff --git a/contrib/altq/rtems/freebsd/altq/altq_hfsc.c b/contrib/altq/rtems/freebsd/altq/altq_hfsc.c
new file mode 100644
index 00000000..6a97b3eb
--- /dev/null
+++ b/contrib/altq/rtems/freebsd/altq/altq_hfsc.c
@@ -0,0 +1,2279 @@
+#include <rtems/freebsd/machine/rtems-bsd-config.h>
+
+/* $FreeBSD$ */
+/* $KAME: altq_hfsc.c,v 1.24 2003/12/05 05:40:46 kjc Exp $ */
+
+/*
+ * Copyright (c) 1997-1999 Carnegie Mellon University. All Rights Reserved.
+ *
+ * Permission to use, copy, modify, and distribute this software and
+ * its documentation is hereby granted (including for commercial or
+ * for-profit use), provided that both the copyright notice and this
+ * permission notice appear in all copies of the software, derivative
+ * works, or modified versions, and any portions thereof.
+ *
+ * THIS SOFTWARE IS EXPERIMENTAL AND IS KNOWN TO HAVE BUGS, SOME OF
+ * WHICH MAY HAVE SERIOUS CONSEQUENCES. CARNEGIE MELLON PROVIDES THIS
+ * SOFTWARE IN ITS ``AS IS'' CONDITION, AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL CARNEGIE MELLON UNIVERSITY BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
+ * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+ * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+ * USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
+ * DAMAGE.
+ *
+ * Carnegie Mellon encourages (but does not require) users of this
+ * software to return any improvements or extensions that they make,
+ * and to grant Carnegie Mellon the rights to redistribute these
+ * changes without encumbrance.
+ */
+/*
+ * H-FSC is described in Proceedings of SIGCOMM'97,
+ * "A Hierarchical Fair Service Curve Algorithm for Link-Sharing,
+ * Real-Time and Priority Service"
+ * by Ion Stoica, Hui Zhang, and T. S. Eugene Ng.
+ *
+ * Oleg Cherevko <olwi@aq.ml.com.ua> added the upperlimit for link-sharing.
+ * when a class has an upperlimit, the fit-time is computed from the
+ * upperlimit service curve. the link-sharing scheduler does not schedule
+ * a class whose fit-time exceeds the current time.
+ */
+
+#if defined(__FreeBSD__) || defined(__NetBSD__)
+#include <rtems/freebsd/local/opt_altq.h>
+#if (__FreeBSD__ != 2)
+#include <rtems/freebsd/local/opt_inet.h>
+#ifdef __FreeBSD__
+#include <rtems/freebsd/local/opt_inet6.h>
+#endif
+#endif
+#endif /* __FreeBSD__ || __NetBSD__ */
+
+#ifdef ALTQ_HFSC /* hfsc is enabled by ALTQ_HFSC option in opt_altq.h */
+
+#include <rtems/freebsd/sys/param.h>
+#include <rtems/freebsd/sys/malloc.h>
+#include <rtems/freebsd/sys/mbuf.h>
+#include <rtems/freebsd/sys/socket.h>
+#include <rtems/freebsd/sys/systm.h>
+#include <rtems/freebsd/sys/errno.h>
+#include <rtems/freebsd/sys/queue.h>
+#if 1 /* ALTQ3_COMPAT */
+#include <rtems/freebsd/sys/sockio.h>
+#include <rtems/freebsd/sys/proc.h>
+#include <rtems/freebsd/sys/kernel.h>
+#endif /* ALTQ3_COMPAT */
+
+#include <rtems/freebsd/net/if.h>
+#include <rtems/freebsd/netinet/in.h>
+
+#include <rtems/freebsd/net/pfvar.h>
+#include <rtems/freebsd/altq/altq.h>
+#include <rtems/freebsd/altq/altq_hfsc.h>
+#ifdef ALTQ3_COMPAT
+#include <rtems/freebsd/altq/altq_conf.h>
+#endif
+
+/*
+ * function prototypes
+ */
+static int hfsc_clear_interface(struct hfsc_if *);
+static int hfsc_request(struct ifaltq *, int, void *);
+static void hfsc_purge(struct hfsc_if *);
+static struct hfsc_class *hfsc_class_create(struct hfsc_if *,
+ struct service_curve *, struct service_curve *, struct service_curve *,
+ struct hfsc_class *, int, int, int);
+static int hfsc_class_destroy(struct hfsc_class *);
+static struct hfsc_class *hfsc_nextclass(struct hfsc_class *);
+static int hfsc_enqueue(struct ifaltq *, struct mbuf *,
+ struct altq_pktattr *);
+static struct mbuf *hfsc_dequeue(struct ifaltq *, int);
+
+static int hfsc_addq(struct hfsc_class *, struct mbuf *);
+static struct mbuf *hfsc_getq(struct hfsc_class *);
+static struct mbuf *hfsc_pollq(struct hfsc_class *);
+static void hfsc_purgeq(struct hfsc_class *);
+
+static void update_cfmin(struct hfsc_class *);
+static void set_active(struct hfsc_class *, int);
+static void set_passive(struct hfsc_class *);
+
+static void init_ed(struct hfsc_class *, int);
+static void update_ed(struct hfsc_class *, int);
+static void update_d(struct hfsc_class *, int);
+static void init_vf(struct hfsc_class *, int);
+static void update_vf(struct hfsc_class *, int, u_int64_t);
+static ellist_t *ellist_alloc(void);
+static void ellist_destroy(ellist_t *);
+static void ellist_insert(struct hfsc_class *);
+static void ellist_remove(struct hfsc_class *);
+static void ellist_update(struct hfsc_class *);
+struct hfsc_class *ellist_get_mindl(ellist_t *, u_int64_t);
+static actlist_t *actlist_alloc(void);
+static void actlist_destroy(actlist_t *);
+static void actlist_insert(struct hfsc_class *);
+static void actlist_remove(struct hfsc_class *);
+static void actlist_update(struct hfsc_class *);
+
+static struct hfsc_class *actlist_firstfit(struct hfsc_class *,
+ u_int64_t);
+
+static __inline u_int64_t seg_x2y(u_int64_t, u_int64_t);
+static __inline u_int64_t seg_y2x(u_int64_t, u_int64_t);
+static __inline u_int64_t m2sm(u_int);
+static __inline u_int64_t m2ism(u_int);
+static __inline u_int64_t d2dx(u_int);
+static u_int sm2m(u_int64_t);
+static u_int dx2d(u_int64_t);
+
+static void sc2isc(struct service_curve *, struct internal_sc *);
+static void rtsc_init(struct runtime_sc *, struct internal_sc *,
+ u_int64_t, u_int64_t);
+static u_int64_t rtsc_y2x(struct runtime_sc *, u_int64_t);
+static u_int64_t rtsc_x2y(struct runtime_sc *, u_int64_t);
+static void rtsc_min(struct runtime_sc *, struct internal_sc *,
+ u_int64_t, u_int64_t);
+
+static void get_class_stats(struct hfsc_classstats *,
+ struct hfsc_class *);
+static struct hfsc_class *clh_to_clp(struct hfsc_if *, u_int32_t);
+
+
+#ifdef ALTQ3_COMPAT
+static struct hfsc_if *hfsc_attach(struct ifaltq *, u_int);
+static int hfsc_detach(struct hfsc_if *);
+static int hfsc_class_modify(struct hfsc_class *, struct service_curve *,
+ struct service_curve *, struct service_curve *);
+
+static int hfsccmd_if_attach(struct hfsc_attach *);
+static int hfsccmd_if_detach(struct hfsc_interface *);
+static int hfsccmd_add_class(struct hfsc_add_class *);
+static int hfsccmd_delete_class(struct hfsc_delete_class *);
+static int hfsccmd_modify_class(struct hfsc_modify_class *);
+static int hfsccmd_add_filter(struct hfsc_add_filter *);
+static int hfsccmd_delete_filter(struct hfsc_delete_filter *);
+static int hfsccmd_class_stats(struct hfsc_class_stats *);
+
+altqdev_decl(hfsc);
+#endif /* ALTQ3_COMPAT */
+
+/*
+ * macros
+ */
+#define is_a_parent_class(cl) ((cl)->cl_children != NULL)
+
+#define HT_INFINITY 0xffffffffffffffffLL /* infinite time value */
+
+#ifdef ALTQ3_COMPAT
+/* hif_list keeps all hfsc_if's allocated. */
+static struct hfsc_if *hif_list = NULL;
+#endif /* ALTQ3_COMPAT */
+
+int
+hfsc_pfattach(struct pf_altq *a)
+{
+ struct ifnet *ifp;
+ int s, error;
+
+ if ((ifp = ifunit(a->ifname)) == NULL || a->altq_disc == NULL)
+ return (EINVAL);
+#ifdef __NetBSD__
+ s = splnet();
+#else
+ s = splimp();
+#endif
+ error = altq_attach(&ifp->if_snd, ALTQT_HFSC, a->altq_disc,
+ hfsc_enqueue, hfsc_dequeue, hfsc_request, NULL, NULL);
+ splx(s);
+ return (error);
+}
+
+int
+hfsc_add_altq(struct pf_altq *a)
+{
+ struct hfsc_if *hif;
+ struct ifnet *ifp;
+
+ if ((ifp = ifunit(a->ifname)) == NULL)
+ return (EINVAL);
+ if (!ALTQ_IS_READY(&ifp->if_snd))
+ return (ENODEV);
+
+ hif = malloc(sizeof(struct hfsc_if), M_DEVBUF, M_WAITOK);
+ if (hif == NULL)
+ return (ENOMEM);
+ bzero(hif, sizeof(struct hfsc_if));
+
+ hif->hif_eligible = ellist_alloc();
+ if (hif->hif_eligible == NULL) {
+ free(hif, M_DEVBUF);
+ return (ENOMEM);
+ }
+
+ hif->hif_ifq = &ifp->if_snd;
+
+ /* keep the state in pf_altq */
+ a->altq_disc = hif;
+
+ return (0);
+}
+
+int
+hfsc_remove_altq(struct pf_altq *a)
+{
+ struct hfsc_if *hif;
+
+ if ((hif = a->altq_disc) == NULL)
+ return (EINVAL);
+ a->altq_disc = NULL;
+
+ (void)hfsc_clear_interface(hif);
+ (void)hfsc_class_destroy(hif->hif_rootclass);
+
+ ellist_destroy(hif->hif_eligible);
+
+ free(hif, M_DEVBUF);
+
+ return (0);
+}
+
+int
+hfsc_add_queue(struct pf_altq *a)
+{
+ struct hfsc_if *hif;
+ struct hfsc_class *cl, *parent;
+ struct hfsc_opts *opts;
+ struct service_curve rtsc, lssc, ulsc;
+
+ if ((hif = a->altq_disc) == NULL)
+ return (EINVAL);
+
+ opts = &a->pq_u.hfsc_opts;
+
+ if (a->parent_qid == HFSC_NULLCLASS_HANDLE &&
+ hif->hif_rootclass == NULL)
+ parent = NULL;
+ else if ((parent = clh_to_clp(hif, a->parent_qid)) == NULL)
+ return (EINVAL);
+
+ if (a->qid == 0)
+ return (EINVAL);
+
+ if (clh_to_clp(hif, a->qid) != NULL)
+ return (EBUSY);
+
+ rtsc.m1 = opts->rtsc_m1;
+ rtsc.d = opts->rtsc_d;
+ rtsc.m2 = opts->rtsc_m2;
+ lssc.m1 = opts->lssc_m1;
+ lssc.d = opts->lssc_d;
+ lssc.m2 = opts->lssc_m2;
+ ulsc.m1 = opts->ulsc_m1;
+ ulsc.d = opts->ulsc_d;
+ ulsc.m2 = opts->ulsc_m2;
+
+ cl = hfsc_class_create(hif, &rtsc, &lssc, &ulsc,
+ parent, a->qlimit, opts->flags, a->qid);
+ if (cl == NULL)
+ return (ENOMEM);
+
+ return (0);
+}
+
+int
+hfsc_remove_queue(struct pf_altq *a)
+{
+ struct hfsc_if *hif;
+ struct hfsc_class *cl;
+
+ if ((hif = a->altq_disc) == NULL)
+ return (EINVAL);
+
+ if ((cl = clh_to_clp(hif, a->qid)) == NULL)
+ return (EINVAL);
+
+ return (hfsc_class_destroy(cl));
+}
+
+int
+hfsc_getqstats(struct pf_altq *a, void *ubuf, int *nbytes)
+{
+ struct hfsc_if *hif;
+ struct hfsc_class *cl;
+ struct hfsc_classstats stats;
+ int error = 0;
+
+ if ((hif = altq_lookup(a->ifname, ALTQT_HFSC)) == NULL)
+ return (EBADF);
+
+ if ((cl = clh_to_clp(hif, a->qid)) == NULL)
+ return (EINVAL);
+
+ if (*nbytes < sizeof(stats))
+ return (EINVAL);
+
+ get_class_stats(&stats, cl);
+
+ if ((error = copyout((caddr_t)&stats, ubuf, sizeof(stats))) != 0)
+ return (error);
+ *nbytes = sizeof(stats);
+ return (0);
+}
+
+/*
+ * bring the interface back to the initial state by discarding
+ * all the filters and classes except the root class.
+ */
+static int
+hfsc_clear_interface(struct hfsc_if *hif)
+{
+ struct hfsc_class *cl;
+
+#ifdef ALTQ3_COMPAT
+ /* free the filters for this interface */
+ acc_discard_filters(&hif->hif_classifier, NULL, 1);
+#endif
+
+ /* clear out the classes */
+ while (hif->hif_rootclass != NULL &&
+ (cl = hif->hif_rootclass->cl_children) != NULL) {
+ /*
+ * remove the first leaf class found in the hierarchy
+ * then start over
+ */
+ for (; cl != NULL; cl = hfsc_nextclass(cl)) {
+ if (!is_a_parent_class(cl)) {
+ (void)hfsc_class_destroy(cl);
+ break;
+ }
+ }
+ }
+
+ return (0);
+}
+
+static int
+hfsc_request(struct ifaltq *ifq, int req, void *arg)
+{
+ struct hfsc_if *hif = (struct hfsc_if *)ifq->altq_disc;
+
+ IFQ_LOCK_ASSERT(ifq);
+
+ switch (req) {
+ case ALTRQ_PURGE:
+ hfsc_purge(hif);
+ break;
+ }
+ return (0);
+}
+
+/* discard all the queued packets on the interface */
+static void
+hfsc_purge(struct hfsc_if *hif)
+{
+ struct hfsc_class *cl;
+
+ for (cl = hif->hif_rootclass; cl != NULL; cl = hfsc_nextclass(cl))
+ if (!qempty(cl->cl_q))
+ hfsc_purgeq(cl);
+ if (ALTQ_IS_ENABLED(hif->hif_ifq))
+ hif->hif_ifq->ifq_len = 0;
+}
+
+struct hfsc_class *
+hfsc_class_create(struct hfsc_if *hif, struct service_curve *rsc,
+ struct service_curve *fsc, struct service_curve *usc,
+ struct hfsc_class *parent, int qlimit, int flags, int qid)
+{
+ struct hfsc_class *cl, *p;
+ int i, s;
+
+ if (hif->hif_classes >= HFSC_MAX_CLASSES)
+ return (NULL);
+
+#ifndef ALTQ_RED
+ if (flags & HFCF_RED) {
+#ifdef ALTQ_DEBUG
+ printf("hfsc_class_create: RED not configured for HFSC!\n");
+#endif
+ return (NULL);
+ }
+#endif
+
+ cl = malloc(sizeof(struct hfsc_class), M_DEVBUF, M_WAITOK);
+ if (cl == NULL)
+ return (NULL);
+ bzero(cl, sizeof(struct hfsc_class));
+
+ cl->cl_q = malloc(sizeof(class_queue_t), M_DEVBUF, M_WAITOK);
+ if (cl->cl_q == NULL)
+ goto err_ret;
+ bzero(cl->cl_q, sizeof(class_queue_t));
+
+ cl->cl_actc = actlist_alloc();
+ if (cl->cl_actc == NULL)
+ goto err_ret;
+
+ if (qlimit == 0)
+ qlimit = 50; /* use default */
+ qlimit(cl->cl_q) = qlimit;
+ qtype(cl->cl_q) = Q_DROPTAIL;
+ qlen(cl->cl_q) = 0;
+ cl->cl_flags = flags;
+#ifdef ALTQ_RED
+ if (flags & (HFCF_RED|HFCF_RIO)) {
+ int red_flags, red_pkttime;
+ u_int m2;
+
+ m2 = 0;
+ if (rsc != NULL && rsc->m2 > m2)
+ m2 = rsc->m2;
+ if (fsc != NULL && fsc->m2 > m2)
+ m2 = fsc->m2;
+ if (usc != NULL && usc->m2 > m2)
+ m2 = usc->m2;
+
+ red_flags = 0;
+ if (flags & HFCF_ECN)
+ red_flags |= REDF_ECN;
+#ifdef ALTQ_RIO
+ if (flags & HFCF_CLEARDSCP)
+ red_flags |= RIOF_CLEARDSCP;
+#endif
+ if (m2 < 8)
+ red_pkttime = 1000 * 1000 * 1000; /* 1 sec */
+ else
+ red_pkttime = (int64_t)hif->hif_ifq->altq_ifp->if_mtu
+ * 1000 * 1000 * 1000 / (m2 / 8);
+ if (flags & HFCF_RED) {
+ cl->cl_red = red_alloc(0, 0,
+ qlimit(cl->cl_q) * 10/100,
+ qlimit(cl->cl_q) * 30/100,
+ red_flags, red_pkttime);
+ if (cl->cl_red != NULL)
+ qtype(cl->cl_q) = Q_RED;
+ }
+#ifdef ALTQ_RIO
+ else {
+ cl->cl_red = (red_t *)rio_alloc(0, NULL,
+ red_flags, red_pkttime);
+ if (cl->cl_red != NULL)
+ qtype(cl->cl_q) = Q_RIO;
+ }
+#endif
+ }
+#endif /* ALTQ_RED */
+
+ if (rsc != NULL && (rsc->m1 != 0 || rsc->m2 != 0)) {
+ cl->cl_rsc = malloc(sizeof(struct internal_sc),
+ M_DEVBUF, M_WAITOK);
+ if (cl->cl_rsc == NULL)
+ goto err_ret;
+ sc2isc(rsc, cl->cl_rsc);
+ rtsc_init(&cl->cl_deadline, cl->cl_rsc, 0, 0);
+ rtsc_init(&cl->cl_eligible, cl->cl_rsc, 0, 0);
+ }
+ if (fsc != NULL && (fsc->m1 != 0 || fsc->m2 != 0)) {
+ cl->cl_fsc = malloc(sizeof(struct internal_sc),
+ M_DEVBUF, M_WAITOK);
+ if (cl->cl_fsc == NULL)
+ goto err_ret;
+ sc2isc(fsc, cl->cl_fsc);
+ rtsc_init(&cl->cl_virtual, cl->cl_fsc, 0, 0);
+ }
+ if (usc != NULL && (usc->m1 != 0 || usc->m2 != 0)) {
+ cl->cl_usc = malloc(sizeof(struct internal_sc),
+ M_DEVBUF, M_WAITOK);
+ if (cl->cl_usc == NULL)
+ goto err_ret;
+ sc2isc(usc, cl->cl_usc);
+ rtsc_init(&cl->cl_ulimit, cl->cl_usc, 0, 0);
+ }
+
+ cl->cl_id = hif->hif_classid++;
+ cl->cl_handle = qid;
+ cl->cl_hif = hif;
+ cl->cl_parent = parent;
+
+#ifdef __NetBSD__
+ s = splnet();
+#else
+ s = splimp();
+#endif
+ IFQ_LOCK(hif->hif_ifq);
+ hif->hif_classes++;
+
+ /*
+ * find a free slot in the class table. if the slot matching
+ * the lower bits of qid is free, use this slot. otherwise,
+ * use the first free slot.
+ */
+ i = qid % HFSC_MAX_CLASSES;
+ if (hif->hif_class_tbl[i] == NULL)
+ hif->hif_class_tbl[i] = cl;
+ else {
+ for (i = 0; i < HFSC_MAX_CLASSES; i++)
+ if (hif->hif_class_tbl[i] == NULL) {
+ hif->hif_class_tbl[i] = cl;
+ break;
+ }
+ if (i == HFSC_MAX_CLASSES) {
+ IFQ_UNLOCK(hif->hif_ifq);
+ splx(s);
+ goto err_ret;
+ }
+ }
+
+ if (flags & HFCF_DEFAULTCLASS)
+ hif->hif_defaultclass = cl;
+
+ if (parent == NULL) {
+ /* this is root class */
+ hif->hif_rootclass = cl;
+ } else {
+ /* add this class to the children list of the parent */
+ if ((p = parent->cl_children) == NULL)
+ parent->cl_children = cl;
+ else {
+ while (p->cl_siblings != NULL)
+ p = p->cl_siblings;
+ p->cl_siblings = cl;
+ }
+ }
+ IFQ_UNLOCK(hif->hif_ifq);
+ splx(s);
+
+ return (cl);
+
+ err_ret:
+ if (cl->cl_actc != NULL)
+ actlist_destroy(cl->cl_actc);
+ if (cl->cl_red != NULL) {
+#ifdef ALTQ_RIO
+ if (q_is_rio(cl->cl_q))
+ rio_destroy((rio_t *)cl->cl_red);
+#endif
+#ifdef ALTQ_RED
+ if (q_is_red(cl->cl_q))
+ red_destroy(cl->cl_red);
+#endif
+ }
+ if (cl->cl_fsc != NULL)
+ free(cl->cl_fsc, M_DEVBUF);
+ if (cl->cl_rsc != NULL)
+ free(cl->cl_rsc, M_DEVBUF);
+ if (cl->cl_usc != NULL)
+ free(cl->cl_usc, M_DEVBUF);
+ if (cl->cl_q != NULL)
+ free(cl->cl_q, M_DEVBUF);
+ free(cl, M_DEVBUF);
+ return (NULL);
+}
+
+static int
+hfsc_class_destroy(struct hfsc_class *cl)
+{
+ int i, s;
+
+ if (cl == NULL)
+ return (0);
+
+ if (is_a_parent_class(cl))
+ return (EBUSY);
+
+#ifdef __NetBSD__
+ s = splnet();
+#else
+ s = splimp();
+#endif
+ IFQ_LOCK(cl->cl_hif->hif_ifq);
+
+#ifdef ALTQ3_COMPAT
+ /* delete filters referencing to this class */
+ acc_discard_filters(&cl->cl_hif->hif_classifier, cl, 0);
+#endif /* ALTQ3_COMPAT */
+
+ if (!qempty(cl->cl_q))
+ hfsc_purgeq(cl);
+
+ if (cl->cl_parent == NULL) {
+ /* this is root class */
+ } else {
+ struct hfsc_class *p = cl->cl_parent->cl_children;
+
+ if (p == cl)
+ cl->cl_parent->cl_children = cl->cl_siblings;
+ else do {
+ if (p->cl_siblings == cl) {
+ p->cl_siblings = cl->cl_siblings;
+ break;
+ }
+ } while ((p = p->cl_siblings) != NULL);
+ ASSERT(p != NULL);
+ }
+
+ for (i = 0; i < HFSC_MAX_CLASSES; i++)
+ if (cl->cl_hif->hif_class_tbl[i] == cl) {
+ cl->cl_hif->hif_class_tbl[i] = NULL;
+ break;
+ }
+
+ cl->cl_hif->hif_classes--;
+ IFQ_UNLOCK(cl->cl_hif->hif_ifq);
+ splx(s);
+
+ actlist_destroy(cl->cl_actc);
+
+ if (cl->cl_red != NULL) {
+#ifdef ALTQ_RIO
+ if (q_is_rio(cl->cl_q))
+ rio_destroy((rio_t *)cl->cl_red);
+#endif
+#ifdef ALTQ_RED
+ if (q_is_red(cl->cl_q))
+ red_destroy(cl->cl_red);
+#endif
+ }
+
+ IFQ_LOCK(cl->cl_hif->hif_ifq);
+ if (cl == cl->cl_hif->hif_rootclass)
+ cl->cl_hif->hif_rootclass = NULL;
+ if (cl == cl->cl_hif->hif_defaultclass)
+ cl->cl_hif->hif_defaultclass = NULL;
+ IFQ_UNLOCK(cl->cl_hif->hif_ifq);
+
+ if (cl->cl_usc != NULL)
+ free(cl->cl_usc, M_DEVBUF);
+ if (cl->cl_fsc != NULL)
+ free(cl->cl_fsc, M_DEVBUF);
+ if (cl->cl_rsc != NULL)
+ free(cl->cl_rsc, M_DEVBUF);
+ free(cl->cl_q, M_DEVBUF);
+ free(cl, M_DEVBUF);
+
+ return (0);
+}
+
+/*
+ * hfsc_nextclass returns the next class in the tree.
+ * usage:
+ * for (cl = hif->hif_rootclass; cl != NULL; cl = hfsc_nextclass(cl))
+ * do_something;
+ */
+static struct hfsc_class *
+hfsc_nextclass(struct hfsc_class *cl)
+{
+ if (cl->cl_children != NULL)
+ cl = cl->cl_children;
+ else if (cl->cl_siblings != NULL)
+ cl = cl->cl_siblings;
+ else {
+ while ((cl = cl->cl_parent) != NULL)
+ if (cl->cl_siblings) {
+ cl = cl->cl_siblings;
+ break;
+ }
+ }
+
+ return (cl);
+}
+
+/*
+ * hfsc_enqueue is an enqueue function to be registered to
+ * (*altq_enqueue) in struct ifaltq.
+ */
+static int
+hfsc_enqueue(struct ifaltq *ifq, struct mbuf *m, struct altq_pktattr *pktattr)
+{
+ struct hfsc_if *hif = (struct hfsc_if *)ifq->altq_disc;
+ struct hfsc_class *cl;
+ struct pf_mtag *t;
+ int len;
+
+ IFQ_LOCK_ASSERT(ifq);
+
+ /* grab class set by classifier */
+ if ((m->m_flags & M_PKTHDR) == 0) {
+ /* should not happen */
+#if defined(__NetBSD__) || defined(__OpenBSD__)\
+ || (defined(__FreeBSD__) && __FreeBSD_version >= 501113)
+ printf("altq: packet for %s does not have pkthdr\n",
+ ifq->altq_ifp->if_xname);
+#else
+ printf("altq: packet for %s%d does not have pkthdr\n",
+ ifq->altq_ifp->if_name, ifq->altq_ifp->if_unit);
+#endif
+ m_freem(m);
+ return (ENOBUFS);
+ }
+ cl = NULL;
+ if ((t = pf_find_mtag(m)) != NULL)
+ cl = clh_to_clp(hif, t->qid);
+#ifdef ALTQ3_COMPAT
+ else if ((ifq->altq_flags & ALTQF_CLASSIFY) && pktattr != NULL)
+ cl = pktattr->pattr_class;
+#endif
+ if (cl == NULL || is_a_parent_class(cl)) {
+ cl = hif->hif_defaultclass;
+ if (cl == NULL) {
+ m_freem(m);
+ return (ENOBUFS);
+ }
+ }
+#ifdef ALTQ3_COMPAT
+ if (pktattr != NULL)
+ cl->cl_pktattr = pktattr; /* save proto hdr used by ECN */
+ else
+#endif
+ cl->cl_pktattr = NULL;
+ len = m_pktlen(m);
+ if (hfsc_addq(cl, m) != 0) {
+ /* drop occurred. mbuf was freed in hfsc_addq. */
+ PKTCNTR_ADD(&cl->cl_stats.drop_cnt, len);
+ return (ENOBUFS);
+ }
+ IFQ_INC_LEN(ifq);
+ cl->cl_hif->hif_packets++;
+
+ /* successfully queued. */
+ if (qlen(cl->cl_q) == 1)
+ set_active(cl, m_pktlen(m));
+
+ return (0);
+}
+
+/*
+ * hfsc_dequeue is a dequeue function to be registered to
+ * (*altq_dequeue) in struct ifaltq.
+ *
+ * note: ALTDQ_POLL returns the next packet without removing the packet
+ * from the queue. ALTDQ_REMOVE is a normal dequeue operation.
+ * ALTDQ_REMOVE must return the same packet if called immediately
+ * after ALTDQ_POLL.
+ */
+static struct mbuf *
+hfsc_dequeue(struct ifaltq *ifq, int op)
+{
+ struct hfsc_if *hif = (struct hfsc_if *)ifq->altq_disc;
+ struct hfsc_class *cl;
+ struct mbuf *m;
+ int len, next_len;
+ int realtime = 0;
+ u_int64_t cur_time;
+
+ IFQ_LOCK_ASSERT(ifq);
+
+ if (hif->hif_packets == 0)
+ /* no packet in the tree */
+ return (NULL);
+
+ cur_time = read_machclk();
+
+ if (op == ALTDQ_REMOVE && hif->hif_pollcache != NULL) {
+
+ cl = hif->hif_pollcache;
+ hif->hif_pollcache = NULL;
+ /* check if the class was scheduled by real-time criteria */
+ if (cl->cl_rsc != NULL)
+ realtime = (cl->cl_e <= cur_time);
+ } else {
+ /*
+ * if there are eligible classes, use real-time criteria.
+ * find the class with the minimum deadline among
+ * the eligible classes.
+ */
+ if ((cl = ellist_get_mindl(hif->hif_eligible, cur_time))
+ != NULL) {
+ realtime = 1;
+ } else {
+#ifdef ALTQ_DEBUG
+ int fits = 0;
+#endif
+ /*
+ * use link-sharing criteria
+ * get the class with the minimum vt in the hierarchy
+ */
+ cl = hif->hif_rootclass;
+ while (is_a_parent_class(cl)) {
+
+ cl = actlist_firstfit(cl, cur_time);
+ if (cl == NULL) {
+#ifdef ALTQ_DEBUG
+ if (fits > 0)
+ printf("%d fit but none found\n",fits);
+#endif
+ return (NULL);
+ }
+ /*
+ * update parent's cl_cvtmin.
+ * don't update if the new vt is smaller.
+ */
+ if (cl->cl_parent->cl_cvtmin < cl->cl_vt)
+ cl->cl_parent->cl_cvtmin = cl->cl_vt;
+#ifdef ALTQ_DEBUG
+ fits++;
+#endif
+ }
+ }
+
+ if (op == ALTDQ_POLL) {
+ hif->hif_pollcache = cl;
+ m = hfsc_pollq(cl);
+ return (m);
+ }
+ }
+
+ m = hfsc_getq(cl);
+ if (m == NULL)
+ panic("hfsc_dequeue:");
+ len = m_pktlen(m);
+ cl->cl_hif->hif_packets--;
+ IFQ_DEC_LEN(ifq);
+ PKTCNTR_ADD(&cl->cl_stats.xmit_cnt, len);
+
+ update_vf(cl, len, cur_time);
+ if (realtime)
+ cl->cl_cumul += len;
+
+ if (!qempty(cl->cl_q)) {
+ if (cl->cl_rsc != NULL) {
+ /* update ed */
+ next_len = m_pktlen(qhead(cl->cl_q));
+
+ if (realtime)
+ update_ed(cl, next_len);
+ else
+ update_d(cl, next_len);
+ }
+ } else {
+ /* the class becomes passive */
+ set_passive(cl);
+ }
+
+ return (m);
+}
+
+static int
+hfsc_addq(struct hfsc_class *cl, struct mbuf *m)
+{
+
+#ifdef ALTQ_RIO
+ if (q_is_rio(cl->cl_q))
+ return rio_addq((rio_t *)cl->cl_red, cl->cl_q,
+ m, cl->cl_pktattr);
+#endif
+#ifdef ALTQ_RED
+ if (q_is_red(cl->cl_q))
+ return red_addq(cl->cl_red, cl->cl_q, m, cl->cl_pktattr);
+#endif
+ if (qlen(cl->cl_q) >= qlimit(cl->cl_q)) {
+ m_freem(m);
+ return (-1);
+ }
+
+ if (cl->cl_flags & HFCF_CLEARDSCP)
+ write_dsfield(m, cl->cl_pktattr, 0);
+
+ _addq(cl->cl_q, m);
+
+ return (0);
+}
+
+static struct mbuf *
+hfsc_getq(struct hfsc_class *cl)
+{
+#ifdef ALTQ_RIO
+ if (q_is_rio(cl->cl_q))
+ return rio_getq((rio_t *)cl->cl_red, cl->cl_q);
+#endif
+#ifdef ALTQ_RED
+ if (q_is_red(cl->cl_q))
+ return red_getq(cl->cl_red, cl->cl_q);
+#endif
+ return _getq(cl->cl_q);
+}
+
+static struct mbuf *
+hfsc_pollq(struct hfsc_class *cl)
+{
+ return qhead(cl->cl_q);
+}
+
+static void
+hfsc_purgeq(struct hfsc_class *cl)
+{
+ struct mbuf *m;
+
+ if (qempty(cl->cl_q))
+ return;
+
+ while ((m = _getq(cl->cl_q)) != NULL) {
+ PKTCNTR_ADD(&cl->cl_stats.drop_cnt, m_pktlen(m));
+ m_freem(m);
+ cl->cl_hif->hif_packets--;
+ IFQ_DEC_LEN(cl->cl_hif->hif_ifq);
+ }
+ ASSERT(qlen(cl->cl_q) == 0);
+
+ update_vf(cl, 0, 0); /* remove cl from the actlist */
+ set_passive(cl);
+}
+
+static void
+set_active(struct hfsc_class *cl, int len)
+{
+ if (cl->cl_rsc != NULL)
+ init_ed(cl, len);
+ if (cl->cl_fsc != NULL)
+ init_vf(cl, len);
+
+ cl->cl_stats.period++;
+}
+
+static void
+set_passive(struct hfsc_class *cl)
+{
+ if (cl->cl_rsc != NULL)
+ ellist_remove(cl);
+
+ /*
+ * actlist is now handled in update_vf() so that update_vf(cl, 0, 0)
+ * needs to be called explicitly to remove a class from actlist
+ */
+}
+
+static void
+init_ed(struct hfsc_class *cl, int next_len)
+{
+ u_int64_t cur_time;
+
+ cur_time = read_machclk();
+
+ /* update the deadline curve */
+ rtsc_min(&cl->cl_deadline, cl->cl_rsc, cur_time, cl->cl_cumul);
+
+ /*
+ * update the eligible curve.
+ * for concave, it is equal to the deadline curve.
+ * for convex, it is a linear curve with slope m2.
+ */
+ cl->cl_eligible = cl->cl_deadline;
+ if (cl->cl_rsc->sm1 <= cl->cl_rsc->sm2) {
+ cl->cl_eligible.dx = 0;
+ cl->cl_eligible.dy = 0;
+ }
+
+ /* compute e and d */
+ cl->cl_e = rtsc_y2x(&cl->cl_eligible, cl->cl_cumul);
+ cl->cl_d = rtsc_y2x(&cl->cl_deadline, cl->cl_cumul + next_len);
+
+ ellist_insert(cl);
+}
+
+static void
+update_ed(struct hfsc_class *cl, int next_len)
+{
+ cl->cl_e = rtsc_y2x(&cl->cl_eligible, cl->cl_cumul);
+ cl->cl_d = rtsc_y2x(&cl->cl_deadline, cl->cl_cumul + next_len);
+
+ ellist_update(cl);
+}
+
+static void
+update_d(struct hfsc_class *cl, int next_len)
+{
+ cl->cl_d = rtsc_y2x(&cl->cl_deadline, cl->cl_cumul + next_len);
+}
+
+static void
+init_vf(struct hfsc_class *cl, int len)
+{
+ struct hfsc_class *max_cl, *p;
+ u_int64_t vt, f, cur_time;
+ int go_active;
+
+ cur_time = 0;
+ go_active = 1;
+ for ( ; cl->cl_parent != NULL; cl = cl->cl_parent) {
+
+ if (go_active && cl->cl_nactive++ == 0)
+ go_active = 1;
+ else
+ go_active = 0;
+
+ if (go_active) {
+ max_cl = actlist_last(cl->cl_parent->cl_actc);
+ if (max_cl != NULL) {
+ /*
+ * set vt to the average of the min and max
+ * classes. if the parent's period didn't
+ * change, don't decrease vt of the class.
+ */
+ vt = max_cl->cl_vt;
+ if (cl->cl_parent->cl_cvtmin != 0)
+ vt = (cl->cl_parent->cl_cvtmin + vt)/2;
+
+ if (cl->cl_parent->cl_vtperiod !=
+ cl->cl_parentperiod || vt > cl->cl_vt)
+ cl->cl_vt = vt;
+ } else {
+ /*
+ * first child for a new parent backlog period.
+ * add parent's cvtmax to vtoff of children
+ * to make a new vt (vtoff + vt) larger than
+ * the vt in the last period for all children.
+ */
+ vt = cl->cl_parent->cl_cvtmax;
+ for (p = cl->cl_parent->cl_children; p != NULL;
+ p = p->cl_siblings)
+ p->cl_vtoff += vt;
+ cl->cl_vt = 0;
+ cl->cl_parent->cl_cvtmax = 0;
+ cl->cl_parent->cl_cvtmin = 0;
+ }
+ cl->cl_initvt = cl->cl_vt;
+
+ /* update the virtual curve */
+ vt = cl->cl_vt + cl->cl_vtoff;
+ rtsc_min(&cl->cl_virtual, cl->cl_fsc, vt, cl->cl_total);
+ if (cl->cl_virtual.x == vt) {
+ cl->cl_virtual.x -= cl->cl_vtoff;
+ cl->cl_vtoff = 0;
+ }
+ cl->cl_vtadj = 0;
+
+ cl->cl_vtperiod++; /* increment vt period */
+ cl->cl_parentperiod = cl->cl_parent->cl_vtperiod;
+ if (cl->cl_parent->cl_nactive == 0)
+ cl->cl_parentperiod++;
+ cl->cl_f = 0;
+
+ actlist_insert(cl);
+
+ if (cl->cl_usc != NULL) {
+ /* class has upper limit curve */
+ if (cur_time == 0)
+ cur_time = read_machclk();
+
+ /* update the ulimit curve */
+ rtsc_min(&cl->cl_ulimit, cl->cl_usc, cur_time,
+ cl->cl_total);
+ /* compute myf */
+ cl->cl_myf = rtsc_y2x(&cl->cl_ulimit,
+ cl->cl_total);
+ cl->cl_myfadj = 0;
+ }
+ }
+
+ if (cl->cl_myf > cl->cl_cfmin)
+ f = cl->cl_myf;
+ else
+ f = cl->cl_cfmin;
+ if (f != cl->cl_f) {
+ cl->cl_f = f;
+ update_cfmin(cl->cl_parent);
+ }
+ }
+}
+
+static void
+update_vf(struct hfsc_class *cl, int len, u_int64_t cur_time)
+{
+ u_int64_t f, myf_bound, delta;
+ int go_passive;
+
+ go_passive = qempty(cl->cl_q);
+
+ for (; cl->cl_parent != NULL; cl = cl->cl_parent) {
+
+ cl->cl_total += len;
+
+ if (cl->cl_fsc == NULL || cl->cl_nactive == 0)
+ continue;
+
+ if (go_passive && --cl->cl_nactive == 0)
+ go_passive = 1;
+ else
+ go_passive = 0;
+
+ if (go_passive) {
+ /* no more active child, going passive */
+
+ /* update cvtmax of the parent class */
+ if (cl->cl_vt > cl->cl_parent->cl_cvtmax)
+ cl->cl_parent->cl_cvtmax = cl->cl_vt;
+
+ /* remove this class from the vt list */
+ actlist_remove(cl);
+
+ update_cfmin(cl->cl_parent);
+
+ continue;
+ }
+
+ /*
+ * update vt and f
+ */
+ cl->cl_vt = rtsc_y2x(&cl->cl_virtual, cl->cl_total)
+ - cl->cl_vtoff + cl->cl_vtadj;
+
+ /*
+ * if vt of the class is smaller than cvtmin,
+ * the class was skipped in the past due to non-fit.
+ * if so, we need to adjust vtadj.
+ */
+ if (cl->cl_vt < cl->cl_parent->cl_cvtmin) {
+ cl->cl_vtadj += cl->cl_parent->cl_cvtmin - cl->cl_vt;
+ cl->cl_vt = cl->cl_parent->cl_cvtmin;
+ }
+
+ /* update the vt list */
+ actlist_update(cl);
+
+ if (cl->cl_usc != NULL) {
+ cl->cl_myf = cl->cl_myfadj
+ + rtsc_y2x(&cl->cl_ulimit, cl->cl_total);
+
+ /*
+ * if myf lags behind by more than one clock tick
+ * from the current time, adjust myfadj to prevent
+ * a rate-limited class from going greedy.
+ * in a steady state under rate-limiting, myf
+ * fluctuates within one clock tick.
+ */
+ myf_bound = cur_time - machclk_per_tick;
+ if (cl->cl_myf < myf_bound) {
+ delta = cur_time - cl->cl_myf;
+ cl->cl_myfadj += delta;
+ cl->cl_myf += delta;
+ }
+ }
+
+ /* cl_f is max(cl_myf, cl_cfmin) */
+ if (cl->cl_myf > cl->cl_cfmin)
+ f = cl->cl_myf;
+ else
+ f = cl->cl_cfmin;
+ if (f != cl->cl_f) {
+ cl->cl_f = f;
+ update_cfmin(cl->cl_parent);
+ }
+ }
+}
+
+static void
+update_cfmin(struct hfsc_class *cl)
+{
+ struct hfsc_class *p;
+ u_int64_t cfmin;
+
+ if (TAILQ_EMPTY(cl->cl_actc)) {
+ cl->cl_cfmin = 0;
+ return;
+ }
+ cfmin = HT_INFINITY;
+ TAILQ_FOREACH(p, cl->cl_actc, cl_actlist) {
+ if (p->cl_f == 0) {
+ cl->cl_cfmin = 0;
+ return;
+ }
+ if (p->cl_f < cfmin)
+ cfmin = p->cl_f;
+ }
+ cl->cl_cfmin = cfmin;
+}
+
+/*
+ * TAILQ based ellist and actlist implementation
+ * (ion wanted to make a calendar queue based implementation)
+ */
+/*
+ * eligible list holds backlogged classes being sorted by their eligible times.
+ * there is one eligible list per interface.
+ */
+
+static ellist_t *
+ellist_alloc(void)
+{
+ ellist_t *head;
+
+ head = malloc(sizeof(ellist_t), M_DEVBUF, M_WAITOK);
+ TAILQ_INIT(head);
+ return (head);
+}
+
+static void
+ellist_destroy(ellist_t *head)
+{
+ free(head, M_DEVBUF);
+}
+
+static void
+ellist_insert(struct hfsc_class *cl)
+{
+ struct hfsc_if *hif = cl->cl_hif;
+ struct hfsc_class *p;
+
+ /* check the last entry first */
+ if ((p = TAILQ_LAST(hif->hif_eligible, _eligible)) == NULL ||
+ p->cl_e <= cl->cl_e) {
+ TAILQ_INSERT_TAIL(hif->hif_eligible, cl, cl_ellist);
+ return;
+ }
+
+ TAILQ_FOREACH(p, hif->hif_eligible, cl_ellist) {
+ if (cl->cl_e < p->cl_e) {
+ TAILQ_INSERT_BEFORE(p, cl, cl_ellist);
+ return;
+ }
+ }
+ ASSERT(0); /* should not reach here */
+}
+
+static void
+ellist_remove(struct hfsc_class *cl)
+{
+ struct hfsc_if *hif = cl->cl_hif;
+
+ TAILQ_REMOVE(hif->hif_eligible, cl, cl_ellist);
+}
+
+static void
+ellist_update(struct hfsc_class *cl)
+{
+ struct hfsc_if *hif = cl->cl_hif;
+ struct hfsc_class *p, *last;
+
+ /*
+ * the eligible time of a class increases monotonically.
+ * if the next entry has a larger eligible time, nothing to do.
+ */
+ p = TAILQ_NEXT(cl, cl_ellist);
+ if (p == NULL || cl->cl_e <= p->cl_e)
+ return;
+
+ /* check the last entry */
+ last = TAILQ_LAST(hif->hif_eligible, _eligible);
+ ASSERT(last != NULL);
+ if (last->cl_e <= cl->cl_e) {
+ TAILQ_REMOVE(hif->hif_eligible, cl, cl_ellist);
+ TAILQ_INSERT_TAIL(hif->hif_eligible, cl, cl_ellist);
+ return;
+ }
+
+ /*
+ * the new position must be between the next entry
+ * and the last entry
+ */
+ while ((p = TAILQ_NEXT(p, cl_ellist)) != NULL) {
+ if (cl->cl_e < p->cl_e) {
+ TAILQ_REMOVE(hif->hif_eligible, cl, cl_ellist);
+ TAILQ_INSERT_BEFORE(p, cl, cl_ellist);
+ return;
+ }
+ }
+ ASSERT(0); /* should not reach here */
+}
+
+/* find the class with the minimum deadline among the eligible classes */
+struct hfsc_class *
+ellist_get_mindl(ellist_t *head, u_int64_t cur_time)
+{
+ struct hfsc_class *p, *cl = NULL;
+
+ TAILQ_FOREACH(p, head, cl_ellist) {
+ if (p->cl_e > cur_time)
+ break;
+ if (cl == NULL || p->cl_d < cl->cl_d)
+ cl = p;
+ }
+ return (cl);
+}
+
+/*
+ * active children list holds backlogged child classes being sorted
+ * by their virtual time.
+ * each intermediate class has one active children list.
+ */
+static actlist_t *
+actlist_alloc(void)
+{
+ actlist_t *head;
+
+ head = malloc(sizeof(actlist_t), M_DEVBUF, M_WAITOK);
+ TAILQ_INIT(head);
+ return (head);
+}
+
+static void
+actlist_destroy(actlist_t *head)
+{
+ free(head, M_DEVBUF);
+}
+static void
+actlist_insert(struct hfsc_class *cl)
+{
+ struct hfsc_class *p;
+
+ /* check the last entry first */
+ if ((p = TAILQ_LAST(cl->cl_parent->cl_actc, _active)) == NULL
+ || p->cl_vt <= cl->cl_vt) {
+ TAILQ_INSERT_TAIL(cl->cl_parent->cl_actc, cl, cl_actlist);
+ return;
+ }
+
+ TAILQ_FOREACH(p, cl->cl_parent->cl_actc, cl_actlist) {
+ if (cl->cl_vt < p->cl_vt) {
+ TAILQ_INSERT_BEFORE(p, cl, cl_actlist);
+ return;
+ }
+ }
+ ASSERT(0); /* should not reach here */
+}
+
+static void
+actlist_remove(struct hfsc_class *cl)
+{
+ TAILQ_REMOVE(cl->cl_parent->cl_actc, cl, cl_actlist);
+}
+
+static void
+actlist_update(struct hfsc_class *cl)
+{
+ struct hfsc_class *p, *last;
+
+ /*
+ * the virtual time of a class increases monotonically during its
+ * backlogged period.
+ * if the next entry has a larger virtual time, nothing to do.
+ */
+ p = TAILQ_NEXT(cl, cl_actlist);
+ if (p == NULL || cl->cl_vt < p->cl_vt)
+ return;
+
+ /* check the last entry */
+ last = TAILQ_LAST(cl->cl_parent->cl_actc, _active);
+ ASSERT(last != NULL);
+ if (last->cl_vt <= cl->cl_vt) {
+ TAILQ_REMOVE(cl->cl_parent->cl_actc, cl, cl_actlist);
+ TAILQ_INSERT_TAIL(cl->cl_parent->cl_actc, cl, cl_actlist);
+ return;
+ }
+
+ /*
+ * the new position must be between the next entry
+ * and the last entry
+ */
+ while ((p = TAILQ_NEXT(p, cl_actlist)) != NULL) {
+ if (cl->cl_vt < p->cl_vt) {
+ TAILQ_REMOVE(cl->cl_parent->cl_actc, cl, cl_actlist);
+ TAILQ_INSERT_BEFORE(p, cl, cl_actlist);
+ return;
+ }
+ }
+ ASSERT(0); /* should not reach here */
+}
+
+static struct hfsc_class *
+actlist_firstfit(struct hfsc_class *cl, u_int64_t cur_time)
+{
+ struct hfsc_class *p;
+
+ TAILQ_FOREACH(p, cl->cl_actc, cl_actlist) {
+ if (p->cl_f <= cur_time)
+ return (p);
+ }
+ return (NULL);
+}
+
+/*
+ * service curve support functions
+ *
+ * external service curve parameters
+ * m: bits/sec
+ * d: msec
+ * internal service curve parameters
+ * sm: (bytes/tsc_interval) << SM_SHIFT
+ * ism: (tsc_count/byte) << ISM_SHIFT
+ * dx: tsc_count
+ *
+ * SM_SHIFT and ISM_SHIFT are scaled in order to keep effective digits.
+ * we should be able to handle 100K-1Gbps linkspeed with 200Hz-1GHz CPU
+ * speed. SM_SHIFT and ISM_SHIFT are selected to have at least 3 effective
+ * digits in decimal using the following table.
+ *
+ * bits/sec 100Kbps 1Mbps 10Mbps 100Mbps 1Gbps
+ * ----------+-------------------------------------------------------
+ * bytes/nsec 12.5e-6 125e-6 1250e-6 12500e-6 125000e-6
+ * sm(500MHz) 25.0e-6 250e-6 2500e-6 25000e-6 250000e-6
+ * sm(200MHz) 62.5e-6 625e-6 6250e-6 62500e-6 625000e-6
+ *
+ * nsec/byte 80000 8000 800 80 8
+ * ism(500MHz) 40000 4000 400 40 4
+ * ism(200MHz) 16000 1600 160 16 1.6
+ */
+#define SM_SHIFT 24
+#define ISM_SHIFT 10
+
+#define SM_MASK ((1LL << SM_SHIFT) - 1)
+#define ISM_MASK ((1LL << ISM_SHIFT) - 1)
+
+static __inline u_int64_t
+seg_x2y(u_int64_t x, u_int64_t sm)
+{
+ u_int64_t y;
+
+ /*
+ * compute
+ * y = x * sm >> SM_SHIFT
+ * but divide it for the upper and lower bits to avoid overflow
+ */
+ y = (x >> SM_SHIFT) * sm + (((x & SM_MASK) * sm) >> SM_SHIFT);
+ return (y);
+}
+
+static __inline u_int64_t
+seg_y2x(u_int64_t y, u_int64_t ism)
+{
+ u_int64_t x;
+
+ if (y == 0)
+ x = 0;
+ else if (ism == HT_INFINITY)
+ x = HT_INFINITY;
+ else {
+ x = (y >> ISM_SHIFT) * ism
+ + (((y & ISM_MASK) * ism) >> ISM_SHIFT);
+ }
+ return (x);
+}
+
+static __inline u_int64_t
+m2sm(u_int m)
+{
+ u_int64_t sm;
+
+ sm = ((u_int64_t)m << SM_SHIFT) / 8 / machclk_freq;
+ return (sm);
+}
+
+static __inline u_int64_t
+m2ism(u_int m)
+{
+ u_int64_t ism;
+
+ if (m == 0)
+ ism = HT_INFINITY;
+ else
+ ism = ((u_int64_t)machclk_freq << ISM_SHIFT) * 8 / m;
+ return (ism);
+}
+
+static __inline u_int64_t
+d2dx(u_int d)
+{
+ u_int64_t dx;
+
+ dx = ((u_int64_t)d * machclk_freq) / 1000;
+ return (dx);
+}
+
+static u_int
+sm2m(u_int64_t sm)
+{
+ u_int64_t m;
+
+ m = (sm * 8 * machclk_freq) >> SM_SHIFT;
+ return ((u_int)m);
+}
+
+static u_int
+dx2d(u_int64_t dx)
+{
+ u_int64_t d;
+
+ d = dx * 1000 / machclk_freq;
+ return ((u_int)d);
+}
+
+static void
+sc2isc(struct service_curve *sc, struct internal_sc *isc)
+{
+ isc->sm1 = m2sm(sc->m1);
+ isc->ism1 = m2ism(sc->m1);
+ isc->dx = d2dx(sc->d);
+ isc->dy = seg_x2y(isc->dx, isc->sm1);
+ isc->sm2 = m2sm(sc->m2);
+ isc->ism2 = m2ism(sc->m2);
+}
+
+/*
+ * initialize the runtime service curve with the given internal
+ * service curve starting at (x, y).
+ */
+static void
+rtsc_init(struct runtime_sc *rtsc, struct internal_sc * isc, u_int64_t x,
+ u_int64_t y)
+{
+ rtsc->x = x;
+ rtsc->y = y;
+ rtsc->sm1 = isc->sm1;
+ rtsc->ism1 = isc->ism1;
+ rtsc->dx = isc->dx;
+ rtsc->dy = isc->dy;
+ rtsc->sm2 = isc->sm2;
+ rtsc->ism2 = isc->ism2;
+}
+
+/*
+ * calculate the y-projection of the runtime service curve by the
+ * given x-projection value
+ */
+static u_int64_t
+rtsc_y2x(struct runtime_sc *rtsc, u_int64_t y)
+{
+ u_int64_t x;
+
+ if (y < rtsc->y)
+ x = rtsc->x;
+ else if (y <= rtsc->y + rtsc->dy) {
+ /* x belongs to the 1st segment */
+ if (rtsc->dy == 0)
+ x = rtsc->x + rtsc->dx;
+ else
+ x = rtsc->x + seg_y2x(y - rtsc->y, rtsc->ism1);
+ } else {
+ /* x belongs to the 2nd segment */
+ x = rtsc->x + rtsc->dx
+ + seg_y2x(y - rtsc->y - rtsc->dy, rtsc->ism2);
+ }
+ return (x);
+}
+
+static u_int64_t
+rtsc_x2y(struct runtime_sc *rtsc, u_int64_t x)
+{
+ u_int64_t y;
+
+ if (x <= rtsc->x)
+ y = rtsc->y;
+ else if (x <= rtsc->x + rtsc->dx)
+ /* y belongs to the 1st segment */
+ y = rtsc->y + seg_x2y(x - rtsc->x, rtsc->sm1);
+ else
+ /* y belongs to the 2nd segment */
+ y = rtsc->y + rtsc->dy
+ + seg_x2y(x - rtsc->x - rtsc->dx, rtsc->sm2);
+ return (y);
+}
+
+/*
+ * update the runtime service curve by taking the minimum of the current
+ * runtime service curve and the service curve starting at (x, y).
+ */
+static void
+rtsc_min(struct runtime_sc *rtsc, struct internal_sc *isc, u_int64_t x,
+ u_int64_t y)
+{
+ u_int64_t y1, y2, dx, dy;
+
+ if (isc->sm1 <= isc->sm2) {
+ /* service curve is convex */
+ y1 = rtsc_x2y(rtsc, x);
+ if (y1 < y)
+ /* the current rtsc is smaller */
+ return;
+ rtsc->x = x;
+ rtsc->y = y;
+ return;
+ }
+
+ /*
+ * service curve is concave
+ * compute the two y values of the current rtsc
+ * y1: at x
+ * y2: at (x + dx)
+ */
+ y1 = rtsc_x2y(rtsc, x);
+ if (y1 <= y) {
+ /* rtsc is below isc, no change to rtsc */
+ return;
+ }
+
+ y2 = rtsc_x2y(rtsc, x + isc->dx);
+ if (y2 >= y + isc->dy) {
+ /* rtsc is above isc, replace rtsc by isc */
+ rtsc->x = x;
+ rtsc->y = y;
+ rtsc->dx = isc->dx;
+ rtsc->dy = isc->dy;
+ return;
+ }
+
+ /*
+ * the two curves intersect
+ * compute the offsets (dx, dy) using the reverse
+ * function of seg_x2y()
+ * seg_x2y(dx, sm1) == seg_x2y(dx, sm2) + (y1 - y)
+ */
+ dx = ((y1 - y) << SM_SHIFT) / (isc->sm1 - isc->sm2);
+ /*
+ * check if (x, y1) belongs to the 1st segment of rtsc.
+ * if so, add the offset.
+ */
+ if (rtsc->x + rtsc->dx > x)
+ dx += rtsc->x + rtsc->dx - x;
+ dy = seg_x2y(dx, isc->sm1);
+
+ rtsc->x = x;
+ rtsc->y = y;
+ rtsc->dx = dx;
+ rtsc->dy = dy;
+ return;
+}
+
+static void
+get_class_stats(struct hfsc_classstats *sp, struct hfsc_class *cl)
+{
+ sp->class_id = cl->cl_id;
+ sp->class_handle = cl->cl_handle;
+
+ if (cl->cl_rsc != NULL) {
+ sp->rsc.m1 = sm2m(cl->cl_rsc->sm1);
+ sp->rsc.d = dx2d(cl->cl_rsc->dx);
+ sp->rsc.m2 = sm2m(cl->cl_rsc->sm2);
+ } else {
+ sp->rsc.m1 = 0;
+ sp->rsc.d = 0;
+ sp->rsc.m2 = 0;
+ }
+ if (cl->cl_fsc != NULL) {
+ sp->fsc.m1 = sm2m(cl->cl_fsc->sm1);
+ sp->fsc.d = dx2d(cl->cl_fsc->dx);
+ sp->fsc.m2 = sm2m(cl->cl_fsc->sm2);
+ } else {
+ sp->fsc.m1 = 0;
+ sp->fsc.d = 0;
+ sp->fsc.m2 = 0;
+ }
+ if (cl->cl_usc != NULL) {
+ sp->usc.m1 = sm2m(cl->cl_usc->sm1);
+ sp->usc.d = dx2d(cl->cl_usc->dx);
+ sp->usc.m2 = sm2m(cl->cl_usc->sm2);
+ } else {
+ sp->usc.m1 = 0;
+ sp->usc.d = 0;
+ sp->usc.m2 = 0;
+ }
+
+ sp->total = cl->cl_total;
+ sp->cumul = cl->cl_cumul;
+
+ sp->d = cl->cl_d;
+ sp->e = cl->cl_e;
+ sp->vt = cl->cl_vt;
+ sp->f = cl->cl_f;
+
+ sp->initvt = cl->cl_initvt;
+ sp->vtperiod = cl->cl_vtperiod;
+ sp->parentperiod = cl->cl_parentperiod;
+ sp->nactive = cl->cl_nactive;
+ sp->vtoff = cl->cl_vtoff;
+ sp->cvtmax = cl->cl_cvtmax;
+ sp->myf = cl->cl_myf;
+ sp->cfmin = cl->cl_cfmin;
+ sp->cvtmin = cl->cl_cvtmin;
+ sp->myfadj = cl->cl_myfadj;
+ sp->vtadj = cl->cl_vtadj;
+
+ sp->cur_time = read_machclk();
+ sp->machclk_freq = machclk_freq;
+
+ sp->qlength = qlen(cl->cl_q);
+ sp->qlimit = qlimit(cl->cl_q);
+ sp->xmit_cnt = cl->cl_stats.xmit_cnt;
+ sp->drop_cnt = cl->cl_stats.drop_cnt;
+ sp->period = cl->cl_stats.period;
+
+ sp->qtype = qtype(cl->cl_q);
+#ifdef ALTQ_RED
+ if (q_is_red(cl->cl_q))
+ red_getstats(cl->cl_red, &sp->red[0]);
+#endif
+#ifdef ALTQ_RIO
+ if (q_is_rio(cl->cl_q))
+ rio_getstats((rio_t *)cl->cl_red, &sp->red[0]);
+#endif
+}
+
+/* convert a class handle to the corresponding class pointer */
+static struct hfsc_class *
+clh_to_clp(struct hfsc_if *hif, u_int32_t chandle)
+{
+ int i;
+ struct hfsc_class *cl;
+
+ if (chandle == 0)
+ return (NULL);
+ /*
+ * first, try optimistically the slot matching the lower bits of
+ * the handle. if it fails, do the linear table search.
+ */
+ i = chandle % HFSC_MAX_CLASSES;
+ if ((cl = hif->hif_class_tbl[i]) != NULL && cl->cl_handle == chandle)
+ return (cl);
+ for (i = 0; i < HFSC_MAX_CLASSES; i++)
+ if ((cl = hif->hif_class_tbl[i]) != NULL &&
+ cl->cl_handle == chandle)
+ return (cl);
+ return (NULL);
+}
+
+#ifdef ALTQ3_COMPAT
+static struct hfsc_if *
+hfsc_attach(ifq, bandwidth)
+ struct ifaltq *ifq;
+ u_int bandwidth;
+{
+ struct hfsc_if *hif;
+
+ hif = malloc(sizeof(struct hfsc_if), M_DEVBUF, M_WAITOK);
+ if (hif == NULL)
+ return (NULL);
+ bzero(hif, sizeof(struct hfsc_if));
+
+ hif->hif_eligible = ellist_alloc();
+ if (hif->hif_eligible == NULL) {
+ free(hif, M_DEVBUF);
+ return NULL;
+ }
+
+ hif->hif_ifq = ifq;
+
+ /* add this state to the hfsc list */
+ hif->hif_next = hif_list;
+ hif_list = hif;
+
+ return (hif);
+}
+
+static int
+hfsc_detach(hif)
+ struct hfsc_if *hif;
+{
+ (void)hfsc_clear_interface(hif);
+ (void)hfsc_class_destroy(hif->hif_rootclass);
+
+ /* remove this interface from the hif list */
+ if (hif_list == hif)
+ hif_list = hif->hif_next;
+ else {
+ struct hfsc_if *h;
+
+ for (h = hif_list; h != NULL; h = h->hif_next)
+ if (h->hif_next == hif) {
+ h->hif_next = hif->hif_next;
+ break;
+ }
+ ASSERT(h != NULL);
+ }
+
+ ellist_destroy(hif->hif_eligible);
+
+ free(hif, M_DEVBUF);
+
+ return (0);
+}
+
+static int
+hfsc_class_modify(cl, rsc, fsc, usc)
+ struct hfsc_class *cl;
+ struct service_curve *rsc, *fsc, *usc;
+{
+ struct internal_sc *rsc_tmp, *fsc_tmp, *usc_tmp;
+ u_int64_t cur_time;
+ int s;
+
+ rsc_tmp = fsc_tmp = usc_tmp = NULL;
+ if (rsc != NULL && (rsc->m1 != 0 || rsc->m2 != 0) &&
+ cl->cl_rsc == NULL) {
+ rsc_tmp = malloc(sizeof(struct internal_sc),
+ M_DEVBUF, M_WAITOK);
+ if (rsc_tmp == NULL)
+ return (ENOMEM);
+ }
+ if (fsc != NULL && (fsc->m1 != 0 || fsc->m2 != 0) &&
+ cl->cl_fsc == NULL) {
+ fsc_tmp = malloc(sizeof(struct internal_sc),
+ M_DEVBUF, M_WAITOK);
+ if (fsc_tmp == NULL) {
+ free(rsc_tmp);
+ return (ENOMEM);
+ }
+ }
+ if (usc != NULL && (usc->m1 != 0 || usc->m2 != 0) &&
+ cl->cl_usc == NULL) {
+ usc_tmp = malloc(sizeof(struct internal_sc),
+ M_DEVBUF, M_WAITOK);
+ if (usc_tmp == NULL) {
+ free(rsc_tmp);
+ free(fsc_tmp);
+ return (ENOMEM);
+ }
+ }
+
+ cur_time = read_machclk();
+#ifdef __NetBSD__
+ s = splnet();
+#else
+ s = splimp();
+#endif
+ IFQ_LOCK(cl->cl_hif->hif_ifq);
+
+ if (rsc != NULL) {
+ if (rsc->m1 == 0 && rsc->m2 == 0) {
+ if (cl->cl_rsc != NULL) {
+ if (!qempty(cl->cl_q))
+ hfsc_purgeq(cl);
+ free(cl->cl_rsc, M_DEVBUF);
+ cl->cl_rsc = NULL;
+ }
+ } else {
+ if (cl->cl_rsc == NULL)
+ cl->cl_rsc = rsc_tmp;
+ sc2isc(rsc, cl->cl_rsc);
+ rtsc_init(&cl->cl_deadline, cl->cl_rsc, cur_time,
+ cl->cl_cumul);
+ cl->cl_eligible = cl->cl_deadline;
+ if (cl->cl_rsc->sm1 <= cl->cl_rsc->sm2) {
+ cl->cl_eligible.dx = 0;
+ cl->cl_eligible.dy = 0;
+ }
+ }
+ }
+
+ if (fsc != NULL) {
+ if (fsc->m1 == 0 && fsc->m2 == 0) {
+ if (cl->cl_fsc != NULL) {
+ if (!qempty(cl->cl_q))
+ hfsc_purgeq(cl);
+ free(cl->cl_fsc, M_DEVBUF);
+ cl->cl_fsc = NULL;
+ }
+ } else {
+ if (cl->cl_fsc == NULL)
+ cl->cl_fsc = fsc_tmp;
+ sc2isc(fsc, cl->cl_fsc);
+ rtsc_init(&cl->cl_virtual, cl->cl_fsc, cl->cl_vt,
+ cl->cl_total);
+ }
+ }
+
+ if (usc != NULL) {
+ if (usc->m1 == 0 && usc->m2 == 0) {
+ if (cl->cl_usc != NULL) {
+ free(cl->cl_usc, M_DEVBUF);
+ cl->cl_usc = NULL;
+ cl->cl_myf = 0;
+ }
+ } else {
+ if (cl->cl_usc == NULL)
+ cl->cl_usc = usc_tmp;
+ sc2isc(usc, cl->cl_usc);
+ rtsc_init(&cl->cl_ulimit, cl->cl_usc, cur_time,
+ cl->cl_total);
+ }
+ }
+
+ if (!qempty(cl->cl_q)) {
+ if (cl->cl_rsc != NULL)
+ update_ed(cl, m_pktlen(qhead(cl->cl_q)));
+ if (cl->cl_fsc != NULL)
+ update_vf(cl, 0, cur_time);
+ /* is this enough? */
+ }
+
+ IFQ_UNLOCK(cl->cl_hif->hif_ifq);
+ splx(s);
+
+ return (0);
+}
+
+/*
+ * hfsc device interface
+ */
+int
+hfscopen(dev, flag, fmt, p)
+ dev_t dev;
+ int flag, fmt;
+#if (__FreeBSD_version > 500000)
+ struct thread *p;
+#else
+ struct proc *p;
+#endif
+{
+ if (machclk_freq == 0)
+ init_machclk();
+
+ if (machclk_freq == 0) {
+ printf("hfsc: no cpu clock available!\n");
+ return (ENXIO);
+ }
+
+ /* everything will be done when the queueing scheme is attached. */
+ return 0;
+}
+
+int
+hfscclose(dev, flag, fmt, p)
+ dev_t dev;
+ int flag, fmt;
+#if (__FreeBSD_version > 500000)
+ struct thread *p;
+#else
+ struct proc *p;
+#endif
+{
+ struct hfsc_if *hif;
+ int err, error = 0;
+
+ while ((hif = hif_list) != NULL) {
+ /* destroy all */
+ if (ALTQ_IS_ENABLED(hif->hif_ifq))
+ altq_disable(hif->hif_ifq);
+
+ err = altq_detach(hif->hif_ifq);
+ if (err == 0)
+ err = hfsc_detach(hif);
+ if (err != 0 && error == 0)
+ error = err;
+ }
+
+ return error;
+}
+
+int
+hfscioctl(dev, cmd, addr, flag, p)
+ dev_t dev;
+ ioctlcmd_t cmd;
+ caddr_t addr;
+ int flag;
+#if (__FreeBSD_version > 500000)
+ struct thread *p;
+#else
+ struct proc *p;
+#endif
+{
+ struct hfsc_if *hif;
+ struct hfsc_interface *ifacep;
+ int error = 0;
+
+ /* check super-user privilege */
+ switch (cmd) {
+ case HFSC_GETSTATS:
+ break;
+ default:
+#if (__FreeBSD_version > 700000)
+ if ((error = priv_check(p, PRIV_ALTQ_MANAGE)) != 0)
+ return (error);
+#elsif (__FreeBSD_version > 400000)
+ if ((error = suser(p)) != 0)
+ return (error);
+#else
+ if ((error = suser(p->p_ucred, &p->p_acflag)) != 0)
+ return (error);
+#endif
+ break;
+ }
+
+ switch (cmd) {
+
+ case HFSC_IF_ATTACH:
+ error = hfsccmd_if_attach((struct hfsc_attach *)addr);
+ break;
+
+ case HFSC_IF_DETACH:
+ error = hfsccmd_if_detach((struct hfsc_interface *)addr);
+ break;
+
+ case HFSC_ENABLE:
+ case HFSC_DISABLE:
+ case HFSC_CLEAR_HIERARCHY:
+ ifacep = (struct hfsc_interface *)addr;
+ if ((hif = altq_lookup(ifacep->hfsc_ifname,
+ ALTQT_HFSC)) == NULL) {
+ error = EBADF;
+ break;
+ }
+
+ switch (cmd) {
+
+ case HFSC_ENABLE:
+ if (hif->hif_defaultclass == NULL) {
+#ifdef ALTQ_DEBUG
+ printf("hfsc: no default class\n");
+#endif
+ error = EINVAL;
+ break;
+ }
+ error = altq_enable(hif->hif_ifq);
+ break;
+
+ case HFSC_DISABLE:
+ error = altq_disable(hif->hif_ifq);
+ break;
+
+ case HFSC_CLEAR_HIERARCHY:
+ hfsc_clear_interface(hif);
+ break;
+ }
+ break;
+
+ case HFSC_ADD_CLASS:
+ error = hfsccmd_add_class((struct hfsc_add_class *)addr);
+ break;
+
+ case HFSC_DEL_CLASS:
+ error = hfsccmd_delete_class((struct hfsc_delete_class *)addr);
+ break;
+
+ case HFSC_MOD_CLASS:
+ error = hfsccmd_modify_class((struct hfsc_modify_class *)addr);
+ break;
+
+ case HFSC_ADD_FILTER:
+ error = hfsccmd_add_filter((struct hfsc_add_filter *)addr);
+ break;
+
+ case HFSC_DEL_FILTER:
+ error = hfsccmd_delete_filter((struct hfsc_delete_filter *)addr);
+ break;
+
+ case HFSC_GETSTATS:
+ error = hfsccmd_class_stats((struct hfsc_class_stats *)addr);
+ break;
+
+ default:
+ error = EINVAL;
+ break;
+ }
+ return error;
+}
+
+static int
+hfsccmd_if_attach(ap)
+ struct hfsc_attach *ap;
+{
+ struct hfsc_if *hif;
+ struct ifnet *ifp;
+ int error;
+
+ if ((ifp = ifunit(ap->iface.hfsc_ifname)) == NULL)
+ return (ENXIO);
+
+ if ((hif = hfsc_attach(&ifp->if_snd, ap->bandwidth)) == NULL)
+ return (ENOMEM);
+
+ /*
+ * set HFSC to this ifnet structure.
+ */
+ if ((error = altq_attach(&ifp->if_snd, ALTQT_HFSC, hif,
+ hfsc_enqueue, hfsc_dequeue, hfsc_request,
+ &hif->hif_classifier, acc_classify)) != 0)
+ (void)hfsc_detach(hif);
+
+ return (error);
+}
+
+static int
+hfsccmd_if_detach(ap)
+ struct hfsc_interface *ap;
+{
+ struct hfsc_if *hif;
+ int error;
+
+ if ((hif = altq_lookup(ap->hfsc_ifname, ALTQT_HFSC)) == NULL)
+ return (EBADF);
+
+ if (ALTQ_IS_ENABLED(hif->hif_ifq))
+ altq_disable(hif->hif_ifq);
+
+ if ((error = altq_detach(hif->hif_ifq)))
+ return (error);
+
+ return hfsc_detach(hif);
+}
+
+static int
+hfsccmd_add_class(ap)
+ struct hfsc_add_class *ap;
+{
+ struct hfsc_if *hif;
+ struct hfsc_class *cl, *parent;
+ int i;
+
+ if ((hif = altq_lookup(ap->iface.hfsc_ifname, ALTQT_HFSC)) == NULL)
+ return (EBADF);
+
+ if (ap->parent_handle == HFSC_NULLCLASS_HANDLE &&
+ hif->hif_rootclass == NULL)
+ parent = NULL;
+ else if ((parent = clh_to_clp(hif, ap->parent_handle)) == NULL)
+ return (EINVAL);
+
+ /* assign a class handle (use a free slot number for now) */
+ for (i = 1; i < HFSC_MAX_CLASSES; i++)
+ if (hif->hif_class_tbl[i] == NULL)
+ break;
+ if (i == HFSC_MAX_CLASSES)
+ return (EBUSY);
+
+ if ((cl = hfsc_class_create(hif, &ap->service_curve, NULL, NULL,
+ parent, ap->qlimit, ap->flags, i)) == NULL)
+ return (ENOMEM);
+
+ /* return a class handle to the user */
+ ap->class_handle = i;
+
+ return (0);
+}
+
+static int
+hfsccmd_delete_class(ap)
+ struct hfsc_delete_class *ap;
+{
+ struct hfsc_if *hif;
+ struct hfsc_class *cl;
+
+ if ((hif = altq_lookup(ap->iface.hfsc_ifname, ALTQT_HFSC)) == NULL)
+ return (EBADF);
+
+ if ((cl = clh_to_clp(hif, ap->class_handle)) == NULL)
+ return (EINVAL);
+
+ return hfsc_class_destroy(cl);
+}
+
+static int
+hfsccmd_modify_class(ap)
+ struct hfsc_modify_class *ap;
+{
+ struct hfsc_if *hif;
+ struct hfsc_class *cl;
+ struct service_curve *rsc = NULL;
+ struct service_curve *fsc = NULL;
+ struct service_curve *usc = NULL;
+
+ if ((hif = altq_lookup(ap->iface.hfsc_ifname, ALTQT_HFSC)) == NULL)
+ return (EBADF);
+
+ if ((cl = clh_to_clp(hif, ap->class_handle)) == NULL)
+ return (EINVAL);
+
+ if (ap->sctype & HFSC_REALTIMESC)
+ rsc = &ap->service_curve;
+ if (ap->sctype & HFSC_LINKSHARINGSC)
+ fsc = &ap->service_curve;
+ if (ap->sctype & HFSC_UPPERLIMITSC)
+ usc = &ap->service_curve;
+
+ return hfsc_class_modify(cl, rsc, fsc, usc);
+}
+
+static int
+hfsccmd_add_filter(ap)
+ struct hfsc_add_filter *ap;
+{
+ struct hfsc_if *hif;
+ struct hfsc_class *cl;
+
+ if ((hif = altq_lookup(ap->iface.hfsc_ifname, ALTQT_HFSC)) == NULL)
+ return (EBADF);
+
+ if ((cl = clh_to_clp(hif, ap->class_handle)) == NULL)
+ return (EINVAL);
+
+ if (is_a_parent_class(cl)) {
+#ifdef ALTQ_DEBUG
+ printf("hfsccmd_add_filter: not a leaf class!\n");
+#endif
+ return (EINVAL);
+ }
+
+ return acc_add_filter(&hif->hif_classifier, &ap->filter,
+ cl, &ap->filter_handle);
+}
+
+static int
+hfsccmd_delete_filter(ap)
+ struct hfsc_delete_filter *ap;
+{
+ struct hfsc_if *hif;
+
+ if ((hif = altq_lookup(ap->iface.hfsc_ifname, ALTQT_HFSC)) == NULL)
+ return (EBADF);
+
+ return acc_delete_filter(&hif->hif_classifier,
+ ap->filter_handle);
+}
+
+static int
+hfsccmd_class_stats(ap)
+ struct hfsc_class_stats *ap;
+{
+ struct hfsc_if *hif;
+ struct hfsc_class *cl;
+ struct hfsc_classstats stats, *usp;
+ int n, nclasses, error;
+
+ if ((hif = altq_lookup(ap->iface.hfsc_ifname, ALTQT_HFSC)) == NULL)
+ return (EBADF);
+
+ ap->cur_time = read_machclk();
+ ap->machclk_freq = machclk_freq;
+ ap->hif_classes = hif->hif_classes;
+ ap->hif_packets = hif->hif_packets;
+
+ /* skip the first N classes in the tree */
+ nclasses = ap->nskip;
+ for (cl = hif->hif_rootclass, n = 0; cl != NULL && n < nclasses;
+ cl = hfsc_nextclass(cl), n++)
+ ;
+ if (n != nclasses)
+ return (EINVAL);
+
+ /* then, read the next N classes in the tree */
+ nclasses = ap->nclasses;
+ usp = ap->stats;
+ for (n = 0; cl != NULL && n < nclasses; cl = hfsc_nextclass(cl), n++) {
+
+ get_class_stats(&stats, cl);
+
+ if ((error = copyout((caddr_t)&stats, (caddr_t)usp++,
+ sizeof(stats))) != 0)
+ return (error);
+ }
+
+ ap->nclasses = n;
+
+ return (0);
+}
+
+#ifdef KLD_MODULE
+
+static struct altqsw hfsc_sw =
+ {"hfsc", hfscopen, hfscclose, hfscioctl};
+
+ALTQ_MODULE(altq_hfsc, ALTQT_HFSC, &hfsc_sw);
+MODULE_DEPEND(altq_hfsc, altq_red, 1, 1, 1);
+MODULE_DEPEND(altq_hfsc, altq_rio, 1, 1, 1);
+
+#endif /* KLD_MODULE */
+#endif /* ALTQ3_COMPAT */
+
+#endif /* ALTQ_HFSC */
diff --git a/contrib/altq/rtems/freebsd/altq/altq_hfsc.h b/contrib/altq/rtems/freebsd/altq/altq_hfsc.h
new file mode 100644
index 00000000..29ce60bf
--- /dev/null
+++ b/contrib/altq/rtems/freebsd/altq/altq_hfsc.h
@@ -0,0 +1,320 @@
+/* $KAME: altq_hfsc.h,v 1.12 2003/12/05 05:40:46 kjc Exp $ */
+
+/*
+ * Copyright (c) 1997-1999 Carnegie Mellon University. All Rights Reserved.
+ *
+ * Permission to use, copy, modify, and distribute this software and
+ * its documentation is hereby granted (including for commercial or
+ * for-profit use), provided that both the copyright notice and this
+ * permission notice appear in all copies of the software, derivative
+ * works, or modified versions, and any portions thereof.
+ *
+ * THIS SOFTWARE IS EXPERIMENTAL AND IS KNOWN TO HAVE BUGS, SOME OF
+ * WHICH MAY HAVE SERIOUS CONSEQUENCES. CARNEGIE MELLON PROVIDES THIS
+ * SOFTWARE IN ITS ``AS IS'' CONDITION, AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL CARNEGIE MELLON UNIVERSITY BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
+ * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+ * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+ * USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
+ * DAMAGE.
+ *
+ * Carnegie Mellon encourages (but does not require) users of this
+ * software to return any improvements or extensions that they make,
+ * and to grant Carnegie Mellon the rights to redistribute these
+ * changes without encumbrance.
+ */
+#ifndef _ALTQ_ALTQ_HFSC_HH_
+#define _ALTQ_ALTQ_HFSC_HH_
+
+#include <rtems/freebsd/altq/altq.h>
+#include <rtems/freebsd/altq/altq_classq.h>
+#include <rtems/freebsd/altq/altq_red.h>
+#include <rtems/freebsd/altq/altq_rio.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+struct service_curve {
+ u_int m1; /* slope of the first segment in bits/sec */
+ u_int d; /* the x-projection of the first segment in msec */
+ u_int m2; /* slope of the second segment in bits/sec */
+};
+
+/* special class handles */
+#define HFSC_NULLCLASS_HANDLE 0
+#define HFSC_MAX_CLASSES 64
+
+/* hfsc class flags */
+#define HFCF_RED 0x0001 /* use RED */
+#define HFCF_ECN 0x0002 /* use RED/ECN */
+#define HFCF_RIO 0x0004 /* use RIO */
+#define HFCF_CLEARDSCP 0x0010 /* clear diffserv codepoint */
+#define HFCF_DEFAULTCLASS 0x1000 /* default class */
+
+/* service curve types */
+#define HFSC_REALTIMESC 1
+#define HFSC_LINKSHARINGSC 2
+#define HFSC_UPPERLIMITSC 4
+#define HFSC_DEFAULTSC (HFSC_REALTIMESC|HFSC_LINKSHARINGSC)
+
+struct hfsc_classstats {
+ u_int class_id;
+ u_int32_t class_handle;
+ struct service_curve rsc;
+ struct service_curve fsc;
+ struct service_curve usc; /* upper limit service curve */
+
+ u_int64_t total; /* total work in bytes */
+ u_int64_t cumul; /* cumulative work in bytes
+ done by real-time criteria */
+ u_int64_t d; /* deadline */
+ u_int64_t e; /* eligible time */
+ u_int64_t vt; /* virtual time */
+ u_int64_t f; /* fit time for upper-limit */
+
+ /* info helpful for debugging */
+ u_int64_t initvt; /* init virtual time */
+ u_int64_t vtoff; /* cl_vt_ipoff */
+ u_int64_t cvtmax; /* cl_maxvt */
+ u_int64_t myf; /* cl_myf */
+ u_int64_t cfmin; /* cl_mincf */
+ u_int64_t cvtmin; /* cl_mincvt */
+ u_int64_t myfadj; /* cl_myfadj */
+ u_int64_t vtadj; /* cl_vtadj */
+ u_int64_t cur_time;
+ u_int32_t machclk_freq;
+
+ u_int qlength;
+ u_int qlimit;
+ struct pktcntr xmit_cnt;
+ struct pktcntr drop_cnt;
+ u_int period;
+
+ u_int vtperiod; /* vt period sequence no */
+ u_int parentperiod; /* parent's vt period seqno */
+ int nactive; /* number of active children */
+
+ /* red and rio related info */
+ int qtype;
+ struct redstats red[3];
+};
+
+#ifdef ALTQ3_COMPAT
+struct hfsc_interface {
+ char hfsc_ifname[IFNAMSIZ]; /* interface name (e.g., fxp0) */
+};
+
+struct hfsc_attach {
+ struct hfsc_interface iface;
+ u_int bandwidth; /* link bandwidth in bits/sec */
+};
+
+struct hfsc_add_class {
+ struct hfsc_interface iface;
+ u_int32_t parent_handle;
+ struct service_curve service_curve;
+ int qlimit;
+ int flags;
+
+ u_int32_t class_handle; /* return value */
+};
+
+struct hfsc_delete_class {
+ struct hfsc_interface iface;
+ u_int32_t class_handle;
+};
+
+struct hfsc_modify_class {
+ struct hfsc_interface iface;
+ u_int32_t class_handle;
+ struct service_curve service_curve;
+ int sctype;
+};
+
+struct hfsc_add_filter {
+ struct hfsc_interface iface;
+ u_int32_t class_handle;
+ struct flow_filter filter;
+
+ u_long filter_handle; /* return value */
+};
+
+struct hfsc_delete_filter {
+ struct hfsc_interface iface;
+ u_long filter_handle;
+};
+
+struct hfsc_class_stats {
+ struct hfsc_interface iface;
+ int nskip; /* skip # of classes */
+ int nclasses; /* # of class stats (WR) */
+ u_int64_t cur_time; /* current time */
+ u_int32_t machclk_freq; /* machine clock frequency */
+ u_int hif_classes; /* # of classes in the tree */
+ u_int hif_packets; /* # of packets in the tree */
+ struct hfsc_classstats *stats; /* pointer to stats array */
+};
+
+#define HFSC_IF_ATTACH _IOW('Q', 1, struct hfsc_attach)
+#define HFSC_IF_DETACH _IOW('Q', 2, struct hfsc_interface)
+#define HFSC_ENABLE _IOW('Q', 3, struct hfsc_interface)
+#define HFSC_DISABLE _IOW('Q', 4, struct hfsc_interface)
+#define HFSC_CLEAR_HIERARCHY _IOW('Q', 5, struct hfsc_interface)
+#define HFSC_ADD_CLASS _IOWR('Q', 7, struct hfsc_add_class)
+#define HFSC_DEL_CLASS _IOW('Q', 8, struct hfsc_delete_class)
+#define HFSC_MOD_CLASS _IOW('Q', 9, struct hfsc_modify_class)
+#define HFSC_ADD_FILTER _IOWR('Q', 10, struct hfsc_add_filter)
+#define HFSC_DEL_FILTER _IOW('Q', 11, struct hfsc_delete_filter)
+#define HFSC_GETSTATS _IOWR('Q', 12, struct hfsc_class_stats)
+#endif /* ALTQ3_COMPAT */
+
+#ifdef _KERNEL
+/*
+ * kernel internal service curve representation
+ * coordinates are given by 64 bit unsigned integers.
+ * x-axis: unit is clock count. for the intel x86 architecture,
+ * the raw Pentium TSC (Timestamp Counter) value is used.
+ * virtual time is also calculated in this time scale.
+ * y-axis: unit is byte.
+ *
+ * the service curve parameters are converted to the internal
+ * representation.
+ * the slope values are scaled to avoid overflow.
+ * the inverse slope values as well as the y-projection of the 1st
+ * segment are kept in order to to avoid 64-bit divide operations
+ * that are expensive on 32-bit architectures.
+ *
+ * note: Intel Pentium TSC never wraps around in several thousands of years.
+ * x-axis doesn't wrap around for 1089 years with 1GHz clock.
+ * y-axis doesn't wrap around for 4358 years with 1Gbps bandwidth.
+ */
+
+/* kernel internal representation of a service curve */
+struct internal_sc {
+ u_int64_t sm1; /* scaled slope of the 1st segment */
+ u_int64_t ism1; /* scaled inverse-slope of the 1st segment */
+ u_int64_t dx; /* the x-projection of the 1st segment */
+ u_int64_t dy; /* the y-projection of the 1st segment */
+ u_int64_t sm2; /* scaled slope of the 2nd segment */
+ u_int64_t ism2; /* scaled inverse-slope of the 2nd segment */
+};
+
+/* runtime service curve */
+struct runtime_sc {
+ u_int64_t x; /* current starting position on x-axis */
+ u_int64_t y; /* current starting position on x-axis */
+ u_int64_t sm1; /* scaled slope of the 1st segment */
+ u_int64_t ism1; /* scaled inverse-slope of the 1st segment */
+ u_int64_t dx; /* the x-projection of the 1st segment */
+ u_int64_t dy; /* the y-projection of the 1st segment */
+ u_int64_t sm2; /* scaled slope of the 2nd segment */
+ u_int64_t ism2; /* scaled inverse-slope of the 2nd segment */
+};
+
+/* for TAILQ based ellist and actlist implementation */
+struct hfsc_class;
+typedef TAILQ_HEAD(_eligible, hfsc_class) ellist_t;
+typedef TAILQ_ENTRY(hfsc_class) elentry_t;
+typedef TAILQ_HEAD(_active, hfsc_class) actlist_t;
+typedef TAILQ_ENTRY(hfsc_class) actentry_t;
+#define ellist_first(s) TAILQ_FIRST(s)
+#define actlist_first(s) TAILQ_FIRST(s)
+#define actlist_last(s) TAILQ_LAST(s, _active)
+
+struct hfsc_class {
+ u_int cl_id; /* class id (just for debug) */
+ u_int32_t cl_handle; /* class handle */
+ struct hfsc_if *cl_hif; /* back pointer to struct hfsc_if */
+ int cl_flags; /* misc flags */
+
+ struct hfsc_class *cl_parent; /* parent class */
+ struct hfsc_class *cl_siblings; /* sibling classes */
+ struct hfsc_class *cl_children; /* child classes */
+
+ class_queue_t *cl_q; /* class queue structure */
+ struct red *cl_red; /* RED state */
+ struct altq_pktattr *cl_pktattr; /* saved header used by ECN */
+
+ u_int64_t cl_total; /* total work in bytes */
+ u_int64_t cl_cumul; /* cumulative work in bytes
+ done by real-time criteria */
+ u_int64_t cl_d; /* deadline */
+ u_int64_t cl_e; /* eligible time */
+ u_int64_t cl_vt; /* virtual time */
+ u_int64_t cl_f; /* time when this class will fit for
+ link-sharing, max(myf, cfmin) */
+ u_int64_t cl_myf; /* my fit-time (as calculated from this
+ class's own upperlimit curve) */
+ u_int64_t cl_myfadj; /* my fit-time adjustment
+ (to cancel history dependence) */
+ u_int64_t cl_cfmin; /* earliest children's fit-time (used
+ with cl_myf to obtain cl_f) */
+ u_int64_t cl_cvtmin; /* minimal virtual time among the
+ children fit for link-sharing
+ (monotonic within a period) */
+ u_int64_t cl_vtadj; /* intra-period cumulative vt
+ adjustment */
+ u_int64_t cl_vtoff; /* inter-period cumulative vt offset */
+ u_int64_t cl_cvtmax; /* max child's vt in the last period */
+
+ u_int64_t cl_initvt; /* init virtual time (for debugging) */
+
+ struct internal_sc *cl_rsc; /* internal real-time service curve */
+ struct internal_sc *cl_fsc; /* internal fair service curve */
+ struct internal_sc *cl_usc; /* internal upperlimit service curve */
+ struct runtime_sc cl_deadline; /* deadline curve */
+ struct runtime_sc cl_eligible; /* eligible curve */
+ struct runtime_sc cl_virtual; /* virtual curve */
+ struct runtime_sc cl_ulimit; /* upperlimit curve */
+
+ u_int cl_vtperiod; /* vt period sequence no */
+ u_int cl_parentperiod; /* parent's vt period seqno */
+ int cl_nactive; /* number of active children */
+ actlist_t *cl_actc; /* active children list */
+
+ actentry_t cl_actlist; /* active children list entry */
+ elentry_t cl_ellist; /* eligible list entry */
+
+ struct {
+ struct pktcntr xmit_cnt;
+ struct pktcntr drop_cnt;
+ u_int period;
+ } cl_stats;
+};
+
+/*
+ * hfsc interface state
+ */
+struct hfsc_if {
+ struct hfsc_if *hif_next; /* interface state list */
+ struct ifaltq *hif_ifq; /* backpointer to ifaltq */
+ struct hfsc_class *hif_rootclass; /* root class */
+ struct hfsc_class *hif_defaultclass; /* default class */
+ struct hfsc_class *hif_class_tbl[HFSC_MAX_CLASSES];
+ struct hfsc_class *hif_pollcache; /* cache for poll operation */
+
+ u_int hif_classes; /* # of classes in the tree */
+ u_int hif_packets; /* # of packets in the tree */
+ u_int hif_classid; /* class id sequence number */
+
+ ellist_t *hif_eligible; /* eligible list */
+
+#ifdef ALTQ3_CLFIER_COMPAT
+ struct acc_classifier hif_classifier;
+#endif
+};
+
+#endif /* _KERNEL */
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _ALTQ_ALTQ_HFSC_HH_ */
diff --git a/contrib/altq/rtems/freebsd/altq/altq_priq.c b/contrib/altq/rtems/freebsd/altq/altq_priq.c
new file mode 100644
index 00000000..de7f5f0a
--- /dev/null
+++ b/contrib/altq/rtems/freebsd/altq/altq_priq.c
@@ -0,0 +1,1055 @@
+#include <rtems/freebsd/machine/rtems-bsd-config.h>
+
+/* $FreeBSD$ */
+/* $KAME: altq_priq.c,v 1.11 2003/09/17 14:23:25 kjc Exp $ */
+/*
+ * Copyright (C) 2000-2003
+ * Sony Computer Science Laboratories Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY SONY CSL AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL SONY CSL OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+/*
+ * priority queue
+ */
+
+#if defined(__FreeBSD__) || defined(__NetBSD__)
+#include <rtems/freebsd/local/opt_altq.h>
+#if (__FreeBSD__ != 2)
+#include <rtems/freebsd/local/opt_inet.h>
+#ifdef __FreeBSD__
+#include <rtems/freebsd/local/opt_inet6.h>
+#endif
+#endif
+#endif /* __FreeBSD__ || __NetBSD__ */
+
+#ifdef ALTQ_PRIQ /* priq is enabled by ALTQ_PRIQ option in opt_altq.h */
+
+#include <rtems/freebsd/sys/param.h>
+#include <rtems/freebsd/sys/malloc.h>
+#include <rtems/freebsd/sys/mbuf.h>
+#include <rtems/freebsd/sys/socket.h>
+#include <rtems/freebsd/sys/sockio.h>
+#include <rtems/freebsd/sys/systm.h>
+#include <rtems/freebsd/sys/proc.h>
+#include <rtems/freebsd/sys/errno.h>
+#include <rtems/freebsd/sys/kernel.h>
+#include <rtems/freebsd/sys/queue.h>
+
+#include <rtems/freebsd/net/if.h>
+#include <rtems/freebsd/netinet/in.h>
+
+#include <rtems/freebsd/net/pfvar.h>
+#include <rtems/freebsd/altq/altq.h>
+#ifdef ALTQ3_COMPAT
+#include <rtems/freebsd/altq/altq_conf.h>
+#endif
+#include <rtems/freebsd/altq/altq_priq.h>
+
+/*
+ * function prototypes
+ */
+#ifdef ALTQ3_COMPAT
+static struct priq_if *priq_attach(struct ifaltq *, u_int);
+static int priq_detach(struct priq_if *);
+#endif
+static int priq_clear_interface(struct priq_if *);
+static int priq_request(struct ifaltq *, int, void *);
+static void priq_purge(struct priq_if *);
+static struct priq_class *priq_class_create(struct priq_if *, int, int, int,
+ int);
+static int priq_class_destroy(struct priq_class *);
+static int priq_enqueue(struct ifaltq *, struct mbuf *, struct altq_pktattr *);
+static struct mbuf *priq_dequeue(struct ifaltq *, int);
+
+static int priq_addq(struct priq_class *, struct mbuf *);
+static struct mbuf *priq_getq(struct priq_class *);
+static struct mbuf *priq_pollq(struct priq_class *);
+static void priq_purgeq(struct priq_class *);
+
+#ifdef ALTQ3_COMPAT
+static int priqcmd_if_attach(struct priq_interface *);
+static int priqcmd_if_detach(struct priq_interface *);
+static int priqcmd_add_class(struct priq_add_class *);
+static int priqcmd_delete_class(struct priq_delete_class *);
+static int priqcmd_modify_class(struct priq_modify_class *);
+static int priqcmd_add_filter(struct priq_add_filter *);
+static int priqcmd_delete_filter(struct priq_delete_filter *);
+static int priqcmd_class_stats(struct priq_class_stats *);
+#endif /* ALTQ3_COMPAT */
+
+static void get_class_stats(struct priq_classstats *, struct priq_class *);
+static struct priq_class *clh_to_clp(struct priq_if *, u_int32_t);
+
+#ifdef ALTQ3_COMPAT
+altqdev_decl(priq);
+
+/* pif_list keeps all priq_if's allocated. */
+static struct priq_if *pif_list = NULL;
+#endif /* ALTQ3_COMPAT */
+
+int
+priq_pfattach(struct pf_altq *a)
+{
+ struct ifnet *ifp;
+ int s, error;
+
+ if ((ifp = ifunit(a->ifname)) == NULL || a->altq_disc == NULL)
+ return (EINVAL);
+#ifdef __NetBSD__
+ s = splnet();
+#else
+ s = splimp();
+#endif
+ error = altq_attach(&ifp->if_snd, ALTQT_PRIQ, a->altq_disc,
+ priq_enqueue, priq_dequeue, priq_request, NULL, NULL);
+ splx(s);
+ return (error);
+}
+
+int
+priq_add_altq(struct pf_altq *a)
+{
+ struct priq_if *pif;
+ struct ifnet *ifp;
+
+ if ((ifp = ifunit(a->ifname)) == NULL)
+ return (EINVAL);
+ if (!ALTQ_IS_READY(&ifp->if_snd))
+ return (ENODEV);
+
+ pif = malloc(sizeof(struct priq_if),
+ M_DEVBUF, M_WAITOK);
+ if (pif == NULL)
+ return (ENOMEM);
+ bzero(pif, sizeof(struct priq_if));
+ pif->pif_bandwidth = a->ifbandwidth;
+ pif->pif_maxpri = -1;
+ pif->pif_ifq = &ifp->if_snd;
+
+ /* keep the state in pf_altq */
+ a->altq_disc = pif;
+
+ return (0);
+}
+
+int
+priq_remove_altq(struct pf_altq *a)
+{
+ struct priq_if *pif;
+
+ if ((pif = a->altq_disc) == NULL)
+ return (EINVAL);
+ a->altq_disc = NULL;
+
+ (void)priq_clear_interface(pif);
+
+ free(pif, M_DEVBUF);
+ return (0);
+}
+
+int
+priq_add_queue(struct pf_altq *a)
+{
+ struct priq_if *pif;
+ struct priq_class *cl;
+
+ if ((pif = a->altq_disc) == NULL)
+ return (EINVAL);
+
+ /* check parameters */
+ if (a->priority >= PRIQ_MAXPRI)
+ return (EINVAL);
+ if (a->qid == 0)
+ return (EINVAL);
+ if (pif->pif_classes[a->priority] != NULL)
+ return (EBUSY);
+ if (clh_to_clp(pif, a->qid) != NULL)
+ return (EBUSY);
+
+ cl = priq_class_create(pif, a->priority, a->qlimit,
+ a->pq_u.priq_opts.flags, a->qid);
+ if (cl == NULL)
+ return (ENOMEM);
+
+ return (0);
+}
+
+int
+priq_remove_queue(struct pf_altq *a)
+{
+ struct priq_if *pif;
+ struct priq_class *cl;
+
+ if ((pif = a->altq_disc) == NULL)
+ return (EINVAL);
+
+ if ((cl = clh_to_clp(pif, a->qid)) == NULL)
+ return (EINVAL);
+
+ return (priq_class_destroy(cl));
+}
+
+int
+priq_getqstats(struct pf_altq *a, void *ubuf, int *nbytes)
+{
+ struct priq_if *pif;
+ struct priq_class *cl;
+ struct priq_classstats stats;
+ int error = 0;
+
+ if ((pif = altq_lookup(a->ifname, ALTQT_PRIQ)) == NULL)
+ return (EBADF);
+
+ if ((cl = clh_to_clp(pif, a->qid)) == NULL)
+ return (EINVAL);
+
+ if (*nbytes < sizeof(stats))
+ return (EINVAL);
+
+ get_class_stats(&stats, cl);
+
+ if ((error = copyout((caddr_t)&stats, ubuf, sizeof(stats))) != 0)
+ return (error);
+ *nbytes = sizeof(stats);
+ return (0);
+}
+
+/*
+ * bring the interface back to the initial state by discarding
+ * all the filters and classes.
+ */
+static int
+priq_clear_interface(struct priq_if *pif)
+{
+ struct priq_class *cl;
+ int pri;
+
+#ifdef ALTQ3_CLFIER_COMPAT
+ /* free the filters for this interface */
+ acc_discard_filters(&pif->pif_classifier, NULL, 1);
+#endif
+
+ /* clear out the classes */
+ for (pri = 0; pri <= pif->pif_maxpri; pri++)
+ if ((cl = pif->pif_classes[pri]) != NULL)
+ priq_class_destroy(cl);
+
+ return (0);
+}
+
+static int
+priq_request(struct ifaltq *ifq, int req, void *arg)
+{
+ struct priq_if *pif = (struct priq_if *)ifq->altq_disc;
+
+ IFQ_LOCK_ASSERT(ifq);
+
+ switch (req) {
+ case ALTRQ_PURGE:
+ priq_purge(pif);
+ break;
+ }
+ return (0);
+}
+
+/* discard all the queued packets on the interface */
+static void
+priq_purge(struct priq_if *pif)
+{
+ struct priq_class *cl;
+ int pri;
+
+ for (pri = 0; pri <= pif->pif_maxpri; pri++) {
+ if ((cl = pif->pif_classes[pri]) != NULL && !qempty(cl->cl_q))
+ priq_purgeq(cl);
+ }
+ if (ALTQ_IS_ENABLED(pif->pif_ifq))
+ pif->pif_ifq->ifq_len = 0;
+}
+
+static struct priq_class *
+priq_class_create(struct priq_if *pif, int pri, int qlimit, int flags, int qid)
+{
+ struct priq_class *cl;
+ int s;
+
+#ifndef ALTQ_RED
+ if (flags & PRCF_RED) {
+#ifdef ALTQ_DEBUG
+ printf("priq_class_create: RED not configured for PRIQ!\n");
+#endif
+ return (NULL);
+ }
+#endif
+
+ if ((cl = pif->pif_classes[pri]) != NULL) {
+ /* modify the class instead of creating a new one */
+#ifdef __NetBSD__
+ s = splnet();
+#else
+ s = splimp();
+#endif
+ IFQ_LOCK(cl->cl_pif->pif_ifq);
+ if (!qempty(cl->cl_q))
+ priq_purgeq(cl);
+ IFQ_UNLOCK(cl->cl_pif->pif_ifq);
+ splx(s);
+#ifdef ALTQ_RIO
+ if (q_is_rio(cl->cl_q))
+ rio_destroy((rio_t *)cl->cl_red);
+#endif
+#ifdef ALTQ_RED
+ if (q_is_red(cl->cl_q))
+ red_destroy(cl->cl_red);
+#endif
+ } else {
+ cl = malloc(sizeof(struct priq_class),
+ M_DEVBUF, M_WAITOK);
+ if (cl == NULL)
+ return (NULL);
+ bzero(cl, sizeof(struct priq_class));
+
+ cl->cl_q = malloc(sizeof(class_queue_t),
+ M_DEVBUF, M_WAITOK);
+ if (cl->cl_q == NULL)
+ goto err_ret;
+ bzero(cl->cl_q, sizeof(class_queue_t));
+ }
+
+ pif->pif_classes[pri] = cl;
+ if (flags & PRCF_DEFAULTCLASS)
+ pif->pif_default = cl;
+ if (qlimit == 0)
+ qlimit = 50; /* use default */
+ qlimit(cl->cl_q) = qlimit;
+ qtype(cl->cl_q) = Q_DROPTAIL;
+ qlen(cl->cl_q) = 0;
+ cl->cl_flags = flags;
+ cl->cl_pri = pri;
+ if (pri > pif->pif_maxpri)
+ pif->pif_maxpri = pri;
+ cl->cl_pif = pif;
+ cl->cl_handle = qid;
+
+#ifdef ALTQ_RED
+ if (flags & (PRCF_RED|PRCF_RIO)) {
+ int red_flags, red_pkttime;
+
+ red_flags = 0;
+ if (flags & PRCF_ECN)
+ red_flags |= REDF_ECN;
+#ifdef ALTQ_RIO
+ if (flags & PRCF_CLEARDSCP)
+ red_flags |= RIOF_CLEARDSCP;
+#endif
+ if (pif->pif_bandwidth < 8)
+ red_pkttime = 1000 * 1000 * 1000; /* 1 sec */
+ else
+ red_pkttime = (int64_t)pif->pif_ifq->altq_ifp->if_mtu
+ * 1000 * 1000 * 1000 / (pif->pif_bandwidth / 8);
+#ifdef ALTQ_RIO
+ if (flags & PRCF_RIO) {
+ cl->cl_red = (red_t *)rio_alloc(0, NULL,
+ red_flags, red_pkttime);
+ if (cl->cl_red != NULL)
+ qtype(cl->cl_q) = Q_RIO;
+ } else
+#endif
+ if (flags & PRCF_RED) {
+ cl->cl_red = red_alloc(0, 0,
+ qlimit(cl->cl_q) * 10/100,
+ qlimit(cl->cl_q) * 30/100,
+ red_flags, red_pkttime);
+ if (cl->cl_red != NULL)
+ qtype(cl->cl_q) = Q_RED;
+ }
+ }
+#endif /* ALTQ_RED */
+
+ return (cl);
+
+ err_ret:
+ if (cl->cl_red != NULL) {
+#ifdef ALTQ_RIO
+ if (q_is_rio(cl->cl_q))
+ rio_destroy((rio_t *)cl->cl_red);
+#endif
+#ifdef ALTQ_RED
+ if (q_is_red(cl->cl_q))
+ red_destroy(cl->cl_red);
+#endif
+ }
+ if (cl->cl_q != NULL)
+ free(cl->cl_q, M_DEVBUF);
+ free(cl, M_DEVBUF);
+ return (NULL);
+}
+
+static int
+priq_class_destroy(struct priq_class *cl)
+{
+ struct priq_if *pif;
+ int s, pri;
+
+#ifdef __NetBSD__
+ s = splnet();
+#else
+ s = splimp();
+#endif
+ IFQ_LOCK(cl->cl_pif->pif_ifq);
+
+#ifdef ALTQ3_CLFIER_COMPAT
+ /* delete filters referencing to this class */
+ acc_discard_filters(&cl->cl_pif->pif_classifier, cl, 0);
+#endif
+
+ if (!qempty(cl->cl_q))
+ priq_purgeq(cl);
+
+ pif = cl->cl_pif;
+ pif->pif_classes[cl->cl_pri] = NULL;
+ if (pif->pif_maxpri == cl->cl_pri) {
+ for (pri = cl->cl_pri; pri >= 0; pri--)
+ if (pif->pif_classes[pri] != NULL) {
+ pif->pif_maxpri = pri;
+ break;
+ }
+ if (pri < 0)
+ pif->pif_maxpri = -1;
+ }
+ IFQ_UNLOCK(cl->cl_pif->pif_ifq);
+ splx(s);
+
+ if (cl->cl_red != NULL) {
+#ifdef ALTQ_RIO
+ if (q_is_rio(cl->cl_q))
+ rio_destroy((rio_t *)cl->cl_red);
+#endif
+#ifdef ALTQ_RED
+ if (q_is_red(cl->cl_q))
+ red_destroy(cl->cl_red);
+#endif
+ }
+ free(cl->cl_q, M_DEVBUF);
+ free(cl, M_DEVBUF);
+ return (0);
+}
+
+/*
+ * priq_enqueue is an enqueue function to be registered to
+ * (*altq_enqueue) in struct ifaltq.
+ */
+static int
+priq_enqueue(struct ifaltq *ifq, struct mbuf *m, struct altq_pktattr *pktattr)
+{
+ struct priq_if *pif = (struct priq_if *)ifq->altq_disc;
+ struct priq_class *cl;
+ struct pf_mtag *t;
+ int len;
+
+ IFQ_LOCK_ASSERT(ifq);
+
+ /* grab class set by classifier */
+ if ((m->m_flags & M_PKTHDR) == 0) {
+ /* should not happen */
+#if defined(__NetBSD__) || defined(__OpenBSD__)\
+ || (defined(__FreeBSD__) && __FreeBSD_version >= 501113)
+ printf("altq: packet for %s does not have pkthdr\n",
+ ifq->altq_ifp->if_xname);
+#else
+ printf("altq: packet for %s%d does not have pkthdr\n",
+ ifq->altq_ifp->if_name, ifq->altq_ifp->if_unit);
+#endif
+ m_freem(m);
+ return (ENOBUFS);
+ }
+ cl = NULL;
+ if ((t = pf_find_mtag(m)) != NULL)
+ cl = clh_to_clp(pif, t->qid);
+#ifdef ALTQ3_COMPAT
+ else if ((ifq->altq_flags & ALTQF_CLASSIFY) && pktattr != NULL)
+ cl = pktattr->pattr_class;
+#endif
+ if (cl == NULL) {
+ cl = pif->pif_default;
+ if (cl == NULL) {
+ m_freem(m);
+ return (ENOBUFS);
+ }
+ }
+#ifdef ALTQ3_COMPAT
+ if (pktattr != NULL)
+ cl->cl_pktattr = pktattr; /* save proto hdr used by ECN */
+ else
+#endif
+ cl->cl_pktattr = NULL;
+ len = m_pktlen(m);
+ if (priq_addq(cl, m) != 0) {
+ /* drop occurred. mbuf was freed in priq_addq. */
+ PKTCNTR_ADD(&cl->cl_dropcnt, len);
+ return (ENOBUFS);
+ }
+ IFQ_INC_LEN(ifq);
+
+ /* successfully queued. */
+ return (0);
+}
+
+/*
+ * priq_dequeue is a dequeue function to be registered to
+ * (*altq_dequeue) in struct ifaltq.
+ *
+ * note: ALTDQ_POLL returns the next packet without removing the packet
+ * from the queue. ALTDQ_REMOVE is a normal dequeue operation.
+ * ALTDQ_REMOVE must return the same packet if called immediately
+ * after ALTDQ_POLL.
+ */
+static struct mbuf *
+priq_dequeue(struct ifaltq *ifq, int op)
+{
+ struct priq_if *pif = (struct priq_if *)ifq->altq_disc;
+ struct priq_class *cl;
+ struct mbuf *m;
+ int pri;
+
+ IFQ_LOCK_ASSERT(ifq);
+
+ if (IFQ_IS_EMPTY(ifq))
+ /* no packet in the queue */
+ return (NULL);
+
+ for (pri = pif->pif_maxpri; pri >= 0; pri--) {
+ if ((cl = pif->pif_classes[pri]) != NULL &&
+ !qempty(cl->cl_q)) {
+ if (op == ALTDQ_POLL)
+ return (priq_pollq(cl));
+
+ m = priq_getq(cl);
+ if (m != NULL) {
+ IFQ_DEC_LEN(ifq);
+ if (qempty(cl->cl_q))
+ cl->cl_period++;
+ PKTCNTR_ADD(&cl->cl_xmitcnt, m_pktlen(m));
+ }
+ return (m);
+ }
+ }
+ return (NULL);
+}
+
+static int
+priq_addq(struct priq_class *cl, struct mbuf *m)
+{
+
+#ifdef ALTQ_RIO
+ if (q_is_rio(cl->cl_q))
+ return rio_addq((rio_t *)cl->cl_red, cl->cl_q, m,
+ cl->cl_pktattr);
+#endif
+#ifdef ALTQ_RED
+ if (q_is_red(cl->cl_q))
+ return red_addq(cl->cl_red, cl->cl_q, m, cl->cl_pktattr);
+#endif
+ if (qlen(cl->cl_q) >= qlimit(cl->cl_q)) {
+ m_freem(m);
+ return (-1);
+ }
+
+ if (cl->cl_flags & PRCF_CLEARDSCP)
+ write_dsfield(m, cl->cl_pktattr, 0);
+
+ _addq(cl->cl_q, m);
+
+ return (0);
+}
+
+static struct mbuf *
+priq_getq(struct priq_class *cl)
+{
+#ifdef ALTQ_RIO
+ if (q_is_rio(cl->cl_q))
+ return rio_getq((rio_t *)cl->cl_red, cl->cl_q);
+#endif
+#ifdef ALTQ_RED
+ if (q_is_red(cl->cl_q))
+ return red_getq(cl->cl_red, cl->cl_q);
+#endif
+ return _getq(cl->cl_q);
+}
+
+static struct mbuf *
+priq_pollq(cl)
+ struct priq_class *cl;
+{
+ return qhead(cl->cl_q);
+}
+
+static void
+priq_purgeq(struct priq_class *cl)
+{
+ struct mbuf *m;
+
+ if (qempty(cl->cl_q))
+ return;
+
+ while ((m = _getq(cl->cl_q)) != NULL) {
+ PKTCNTR_ADD(&cl->cl_dropcnt, m_pktlen(m));
+ m_freem(m);
+ }
+ ASSERT(qlen(cl->cl_q) == 0);
+}
+
+static void
+get_class_stats(struct priq_classstats *sp, struct priq_class *cl)
+{
+ sp->class_handle = cl->cl_handle;
+ sp->qlength = qlen(cl->cl_q);
+ sp->qlimit = qlimit(cl->cl_q);
+ sp->period = cl->cl_period;
+ sp->xmitcnt = cl->cl_xmitcnt;
+ sp->dropcnt = cl->cl_dropcnt;
+
+ sp->qtype = qtype(cl->cl_q);
+#ifdef ALTQ_RED
+ if (q_is_red(cl->cl_q))
+ red_getstats(cl->cl_red, &sp->red[0]);
+#endif
+#ifdef ALTQ_RIO
+ if (q_is_rio(cl->cl_q))
+ rio_getstats((rio_t *)cl->cl_red, &sp->red[0]);
+#endif
+
+}
+
+/* convert a class handle to the corresponding class pointer */
+static struct priq_class *
+clh_to_clp(struct priq_if *pif, u_int32_t chandle)
+{
+ struct priq_class *cl;
+ int idx;
+
+ if (chandle == 0)
+ return (NULL);
+
+ for (idx = pif->pif_maxpri; idx >= 0; idx--)
+ if ((cl = pif->pif_classes[idx]) != NULL &&
+ cl->cl_handle == chandle)
+ return (cl);
+
+ return (NULL);
+}
+
+
+#ifdef ALTQ3_COMPAT
+
+static struct priq_if *
+priq_attach(ifq, bandwidth)
+ struct ifaltq *ifq;
+ u_int bandwidth;
+{
+ struct priq_if *pif;
+
+ pif = malloc(sizeof(struct priq_if),
+ M_DEVBUF, M_WAITOK);
+ if (pif == NULL)
+ return (NULL);
+ bzero(pif, sizeof(struct priq_if));
+ pif->pif_bandwidth = bandwidth;
+ pif->pif_maxpri = -1;
+ pif->pif_ifq = ifq;
+
+ /* add this state to the priq list */
+ pif->pif_next = pif_list;
+ pif_list = pif;
+
+ return (pif);
+}
+
+static int
+priq_detach(pif)
+ struct priq_if *pif;
+{
+ (void)priq_clear_interface(pif);
+
+ /* remove this interface from the pif list */
+ if (pif_list == pif)
+ pif_list = pif->pif_next;
+ else {
+ struct priq_if *p;
+
+ for (p = pif_list; p != NULL; p = p->pif_next)
+ if (p->pif_next == pif) {
+ p->pif_next = pif->pif_next;
+ break;
+ }
+ ASSERT(p != NULL);
+ }
+
+ free(pif, M_DEVBUF);
+ return (0);
+}
+
+/*
+ * priq device interface
+ */
+int
+priqopen(dev, flag, fmt, p)
+ dev_t dev;
+ int flag, fmt;
+#if (__FreeBSD_version > 500000)
+ struct thread *p;
+#else
+ struct proc *p;
+#endif
+{
+ /* everything will be done when the queueing scheme is attached. */
+ return 0;
+}
+
+int
+priqclose(dev, flag, fmt, p)
+ dev_t dev;
+ int flag, fmt;
+#if (__FreeBSD_version > 500000)
+ struct thread *p;
+#else
+ struct proc *p;
+#endif
+{
+ struct priq_if *pif;
+ int err, error = 0;
+
+ while ((pif = pif_list) != NULL) {
+ /* destroy all */
+ if (ALTQ_IS_ENABLED(pif->pif_ifq))
+ altq_disable(pif->pif_ifq);
+
+ err = altq_detach(pif->pif_ifq);
+ if (err == 0)
+ err = priq_detach(pif);
+ if (err != 0 && error == 0)
+ error = err;
+ }
+
+ return error;
+}
+
+int
+priqioctl(dev, cmd, addr, flag, p)
+ dev_t dev;
+ ioctlcmd_t cmd;
+ caddr_t addr;
+ int flag;
+#if (__FreeBSD_version > 500000)
+ struct thread *p;
+#else
+ struct proc *p;
+#endif
+{
+ struct priq_if *pif;
+ struct priq_interface *ifacep;
+ int error = 0;
+
+ /* check super-user privilege */
+ switch (cmd) {
+ case PRIQ_GETSTATS:
+ break;
+ default:
+#if (__FreeBSD_version > 700000)
+ if ((error = priv_check(p, PRIV_ALTQ_MANAGE)) != 0)
+ return (error);
+#elsif (__FreeBSD_version > 400000)
+ if ((error = suser(p)) != 0)
+ return (error);
+#else
+ if ((error = suser(p->p_ucred, &p->p_acflag)) != 0)
+ return (error);
+#endif
+ break;
+ }
+
+ switch (cmd) {
+
+ case PRIQ_IF_ATTACH:
+ error = priqcmd_if_attach((struct priq_interface *)addr);
+ break;
+
+ case PRIQ_IF_DETACH:
+ error = priqcmd_if_detach((struct priq_interface *)addr);
+ break;
+
+ case PRIQ_ENABLE:
+ case PRIQ_DISABLE:
+ case PRIQ_CLEAR:
+ ifacep = (struct priq_interface *)addr;
+ if ((pif = altq_lookup(ifacep->ifname,
+ ALTQT_PRIQ)) == NULL) {
+ error = EBADF;
+ break;
+ }
+
+ switch (cmd) {
+ case PRIQ_ENABLE:
+ if (pif->pif_default == NULL) {
+#ifdef ALTQ_DEBUG
+ printf("priq: no default class\n");
+#endif
+ error = EINVAL;
+ break;
+ }
+ error = altq_enable(pif->pif_ifq);
+ break;
+
+ case PRIQ_DISABLE:
+ error = altq_disable(pif->pif_ifq);
+ break;
+
+ case PRIQ_CLEAR:
+ priq_clear_interface(pif);
+ break;
+ }
+ break;
+
+ case PRIQ_ADD_CLASS:
+ error = priqcmd_add_class((struct priq_add_class *)addr);
+ break;
+
+ case PRIQ_DEL_CLASS:
+ error = priqcmd_delete_class((struct priq_delete_class *)addr);
+ break;
+
+ case PRIQ_MOD_CLASS:
+ error = priqcmd_modify_class((struct priq_modify_class *)addr);
+ break;
+
+ case PRIQ_ADD_FILTER:
+ error = priqcmd_add_filter((struct priq_add_filter *)addr);
+ break;
+
+ case PRIQ_DEL_FILTER:
+ error = priqcmd_delete_filter((struct priq_delete_filter *)addr);
+ break;
+
+ case PRIQ_GETSTATS:
+ error = priqcmd_class_stats((struct priq_class_stats *)addr);
+ break;
+
+ default:
+ error = EINVAL;
+ break;
+ }
+ return error;
+}
+
+static int
+priqcmd_if_attach(ap)
+ struct priq_interface *ap;
+{
+ struct priq_if *pif;
+ struct ifnet *ifp;
+ int error;
+
+ if ((ifp = ifunit(ap->ifname)) == NULL)
+ return (ENXIO);
+
+ if ((pif = priq_attach(&ifp->if_snd, ap->arg)) == NULL)
+ return (ENOMEM);
+
+ /*
+ * set PRIQ to this ifnet structure.
+ */
+ if ((error = altq_attach(&ifp->if_snd, ALTQT_PRIQ, pif,
+ priq_enqueue, priq_dequeue, priq_request,
+ &pif->pif_classifier, acc_classify)) != 0)
+ (void)priq_detach(pif);
+
+ return (error);
+}
+
+static int
+priqcmd_if_detach(ap)
+ struct priq_interface *ap;
+{
+ struct priq_if *pif;
+ int error;
+
+ if ((pif = altq_lookup(ap->ifname, ALTQT_PRIQ)) == NULL)
+ return (EBADF);
+
+ if (ALTQ_IS_ENABLED(pif->pif_ifq))
+ altq_disable(pif->pif_ifq);
+
+ if ((error = altq_detach(pif->pif_ifq)))
+ return (error);
+
+ return priq_detach(pif);
+}
+
+static int
+priqcmd_add_class(ap)
+ struct priq_add_class *ap;
+{
+ struct priq_if *pif;
+ struct priq_class *cl;
+ int qid;
+
+ if ((pif = altq_lookup(ap->iface.ifname, ALTQT_PRIQ)) == NULL)
+ return (EBADF);
+
+ if (ap->pri < 0 || ap->pri >= PRIQ_MAXPRI)
+ return (EINVAL);
+ if (pif->pif_classes[ap->pri] != NULL)
+ return (EBUSY);
+
+ qid = ap->pri + 1;
+ if ((cl = priq_class_create(pif, ap->pri,
+ ap->qlimit, ap->flags, qid)) == NULL)
+ return (ENOMEM);
+
+ /* return a class handle to the user */
+ ap->class_handle = cl->cl_handle;
+
+ return (0);
+}
+
+static int
+priqcmd_delete_class(ap)
+ struct priq_delete_class *ap;
+{
+ struct priq_if *pif;
+ struct priq_class *cl;
+
+ if ((pif = altq_lookup(ap->iface.ifname, ALTQT_PRIQ)) == NULL)
+ return (EBADF);
+
+ if ((cl = clh_to_clp(pif, ap->class_handle)) == NULL)
+ return (EINVAL);
+
+ return priq_class_destroy(cl);
+}
+
+static int
+priqcmd_modify_class(ap)
+ struct priq_modify_class *ap;
+{
+ struct priq_if *pif;
+ struct priq_class *cl;
+
+ if ((pif = altq_lookup(ap->iface.ifname, ALTQT_PRIQ)) == NULL)
+ return (EBADF);
+
+ if (ap->pri < 0 || ap->pri >= PRIQ_MAXPRI)
+ return (EINVAL);
+
+ if ((cl = clh_to_clp(pif, ap->class_handle)) == NULL)
+ return (EINVAL);
+
+ /*
+ * if priority is changed, move the class to the new priority
+ */
+ if (pif->pif_classes[ap->pri] != cl) {
+ if (pif->pif_classes[ap->pri] != NULL)
+ return (EEXIST);
+ pif->pif_classes[cl->cl_pri] = NULL;
+ pif->pif_classes[ap->pri] = cl;
+ cl->cl_pri = ap->pri;
+ }
+
+ /* call priq_class_create to change class parameters */
+ if ((cl = priq_class_create(pif, ap->pri,
+ ap->qlimit, ap->flags, ap->class_handle)) == NULL)
+ return (ENOMEM);
+ return 0;
+}
+
+static int
+priqcmd_add_filter(ap)
+ struct priq_add_filter *ap;
+{
+ struct priq_if *pif;
+ struct priq_class *cl;
+
+ if ((pif = altq_lookup(ap->iface.ifname, ALTQT_PRIQ)) == NULL)
+ return (EBADF);
+
+ if ((cl = clh_to_clp(pif, ap->class_handle)) == NULL)
+ return (EINVAL);
+
+ return acc_add_filter(&pif->pif_classifier, &ap->filter,
+ cl, &ap->filter_handle);
+}
+
+static int
+priqcmd_delete_filter(ap)
+ struct priq_delete_filter *ap;
+{
+ struct priq_if *pif;
+
+ if ((pif = altq_lookup(ap->iface.ifname, ALTQT_PRIQ)) == NULL)
+ return (EBADF);
+
+ return acc_delete_filter(&pif->pif_classifier,
+ ap->filter_handle);
+}
+
+static int
+priqcmd_class_stats(ap)
+ struct priq_class_stats *ap;
+{
+ struct priq_if *pif;
+ struct priq_class *cl;
+ struct priq_classstats stats, *usp;
+ int pri, error;
+
+ if ((pif = altq_lookup(ap->iface.ifname, ALTQT_PRIQ)) == NULL)
+ return (EBADF);
+
+ ap->maxpri = pif->pif_maxpri;
+
+ /* then, read the next N classes in the tree */
+ usp = ap->stats;
+ for (pri = 0; pri <= pif->pif_maxpri; pri++) {
+ cl = pif->pif_classes[pri];
+ if (cl != NULL)
+ get_class_stats(&stats, cl);
+ else
+ bzero(&stats, sizeof(stats));
+ if ((error = copyout((caddr_t)&stats, (caddr_t)usp++,
+ sizeof(stats))) != 0)
+ return (error);
+ }
+ return (0);
+}
+
+#ifdef KLD_MODULE
+
+static struct altqsw priq_sw =
+ {"priq", priqopen, priqclose, priqioctl};
+
+ALTQ_MODULE(altq_priq, ALTQT_PRIQ, &priq_sw);
+MODULE_DEPEND(altq_priq, altq_red, 1, 1, 1);
+MODULE_DEPEND(altq_priq, altq_rio, 1, 1, 1);
+
+#endif /* KLD_MODULE */
+
+#endif /* ALTQ3_COMPAT */
+#endif /* ALTQ_PRIQ */
diff --git a/contrib/altq/rtems/freebsd/altq/altq_priq.h b/contrib/altq/rtems/freebsd/altq/altq_priq.h
new file mode 100644
index 00000000..8f456c91
--- /dev/null
+++ b/contrib/altq/rtems/freebsd/altq/altq_priq.h
@@ -0,0 +1,170 @@
+/* $KAME: altq_priq.h,v 1.7 2003/10/03 05:05:15 kjc Exp $ */
+/*
+ * Copyright (C) 2000-2003
+ * Sony Computer Science Laboratories Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY SONY CSL AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL SONY CSL OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifndef _ALTQ_ALTQ_PRIQ_HH_
+#define _ALTQ_ALTQ_PRIQ_HH_
+
+#include <rtems/freebsd/altq/altq.h>
+#include <rtems/freebsd/altq/altq_classq.h>
+#include <rtems/freebsd/altq/altq_red.h>
+#include <rtems/freebsd/altq/altq_rio.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#define PRIQ_MAXPRI 16 /* upper limit of the number of priorities */
+
+#ifdef ALTQ3_COMPAT
+struct priq_interface {
+ char ifname[IFNAMSIZ]; /* interface name (e.g., fxp0) */
+ u_long arg; /* request-specific argument */
+};
+
+struct priq_add_class {
+ struct priq_interface iface;
+ int pri; /* priority (0 is the lowest) */
+ int qlimit; /* queue size limit */
+ int flags; /* misc flags (see below) */
+
+ u_int32_t class_handle; /* return value */
+};
+#endif /* ALTQ3_COMPAT */
+
+/* priq class flags */
+#define PRCF_RED 0x0001 /* use RED */
+#define PRCF_ECN 0x0002 /* use RED/ECN */
+#define PRCF_RIO 0x0004 /* use RIO */
+#define PRCF_CLEARDSCP 0x0010 /* clear diffserv codepoint */
+#define PRCF_DEFAULTCLASS 0x1000 /* default class */
+
+/* special class handles */
+#define PRIQ_NULLCLASS_HANDLE 0
+
+#ifdef ALTQ3_COMPAT
+struct priq_delete_class {
+ struct priq_interface iface;
+ u_int32_t class_handle;
+};
+
+struct priq_modify_class {
+ struct priq_interface iface;
+ u_int32_t class_handle;
+ int pri;
+ int qlimit;
+ int flags;
+};
+
+struct priq_add_filter {
+ struct priq_interface iface;
+ u_int32_t class_handle;
+ struct flow_filter filter;
+
+ u_long filter_handle; /* return value */
+};
+
+struct priq_delete_filter {
+ struct priq_interface iface;
+ u_long filter_handle;
+};
+#endif /* ALTQ3_COMPAT */
+
+struct priq_classstats {
+ u_int32_t class_handle;
+
+ u_int qlength;
+ u_int qlimit;
+ u_int period;
+ struct pktcntr xmitcnt; /* transmitted packet counter */
+ struct pktcntr dropcnt; /* dropped packet counter */
+
+ /* red and rio related info */
+ int qtype;
+ struct redstats red[3]; /* rio has 3 red stats */
+};
+
+#ifdef ALTQ3_COMPAT
+struct priq_class_stats {
+ struct priq_interface iface;
+ int maxpri; /* in/out */
+
+ struct priq_classstats *stats; /* pointer to stats array */
+};
+
+#define PRIQ_IF_ATTACH _IOW('Q', 1, struct priq_interface)
+#define PRIQ_IF_DETACH _IOW('Q', 2, struct priq_interface)
+#define PRIQ_ENABLE _IOW('Q', 3, struct priq_interface)
+#define PRIQ_DISABLE _IOW('Q', 4, struct priq_interface)
+#define PRIQ_CLEAR _IOW('Q', 5, struct priq_interface)
+#define PRIQ_ADD_CLASS _IOWR('Q', 7, struct priq_add_class)
+#define PRIQ_DEL_CLASS _IOW('Q', 8, struct priq_delete_class)
+#define PRIQ_MOD_CLASS _IOW('Q', 9, struct priq_modify_class)
+#define PRIQ_ADD_FILTER _IOWR('Q', 10, struct priq_add_filter)
+#define PRIQ_DEL_FILTER _IOW('Q', 11, struct priq_delete_filter)
+#define PRIQ_GETSTATS _IOWR('Q', 12, struct priq_class_stats)
+
+#endif /* ALTQ3_COMPAT */
+
+#ifdef _KERNEL
+
+struct priq_class {
+ u_int32_t cl_handle; /* class handle */
+ class_queue_t *cl_q; /* class queue structure */
+ struct red *cl_red; /* RED state */
+ int cl_pri; /* priority */
+ int cl_flags; /* class flags */
+ struct priq_if *cl_pif; /* back pointer to pif */
+ struct altq_pktattr *cl_pktattr; /* saved header used by ECN */
+
+ /* statistics */
+ u_int cl_period; /* backlog period */
+ struct pktcntr cl_xmitcnt; /* transmitted packet counter */
+ struct pktcntr cl_dropcnt; /* dropped packet counter */
+};
+
+/*
+ * priq interface state
+ */
+struct priq_if {
+ struct priq_if *pif_next; /* interface state list */
+ struct ifaltq *pif_ifq; /* backpointer to ifaltq */
+ u_int pif_bandwidth; /* link bandwidth in bps */
+ int pif_maxpri; /* max priority in use */
+ struct priq_class *pif_default; /* default class */
+ struct priq_class *pif_classes[PRIQ_MAXPRI]; /* classes */
+#ifdef ALTQ3_CLFIER_COMPAT
+ struct acc_classifier pif_classifier; /* classifier */
+#endif
+};
+
+#endif /* _KERNEL */
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _ALTQ_ALTQ_PRIQ_HH_ */
diff --git a/contrib/altq/rtems/freebsd/altq/altq_red.c b/contrib/altq/rtems/freebsd/altq/altq_red.c
new file mode 100644
index 00000000..0b76e3d7
--- /dev/null
+++ b/contrib/altq/rtems/freebsd/altq/altq_red.c
@@ -0,0 +1,1503 @@
+#include <rtems/freebsd/machine/rtems-bsd-config.h>
+
+/* $FreeBSD$ */
+/* $KAME: altq_red.c,v 1.18 2003/09/05 22:40:36 itojun Exp $ */
+
+/*
+ * Copyright (C) 1997-2003
+ * Sony Computer Science Laboratories Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY SONY CSL AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL SONY CSL OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ */
+/*
+ * Copyright (c) 1990-1994 Regents of the University of California.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the Computer Systems
+ * Engineering Group at Lawrence Berkeley Laboratory.
+ * 4. Neither the name of the University nor of the Laboratory may be used
+ * to endorse or promote products derived from this software without
+ * specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#if defined(__FreeBSD__) || defined(__NetBSD__)
+#include <rtems/freebsd/local/opt_altq.h>
+#if (__FreeBSD__ != 2)
+#include <rtems/freebsd/local/opt_inet.h>
+#ifdef __FreeBSD__
+#include <rtems/freebsd/local/opt_inet6.h>
+#endif
+#endif
+#endif /* __FreeBSD__ || __NetBSD__ */
+#ifdef ALTQ_RED /* red is enabled by ALTQ_RED option in opt_altq.h */
+
+#include <rtems/freebsd/sys/param.h>
+#include <rtems/freebsd/sys/malloc.h>
+#include <rtems/freebsd/sys/mbuf.h>
+#include <rtems/freebsd/sys/socket.h>
+#include <rtems/freebsd/sys/systm.h>
+#include <rtems/freebsd/sys/errno.h>
+#if 1 /* ALTQ3_COMPAT */
+#include <rtems/freebsd/sys/sockio.h>
+#include <rtems/freebsd/sys/proc.h>
+#include <rtems/freebsd/sys/kernel.h>
+#ifdef ALTQ_FLOWVALVE
+#include <rtems/freebsd/sys/queue.h>
+#include <rtems/freebsd/sys/time.h>
+#endif
+#endif /* ALTQ3_COMPAT */
+
+#include <rtems/freebsd/net/if.h>
+
+#include <rtems/freebsd/netinet/in.h>
+#include <rtems/freebsd/netinet/in_systm.h>
+#include <rtems/freebsd/netinet/ip.h>
+#ifdef INET6
+#include <rtems/freebsd/netinet/ip6.h>
+#endif
+
+#include <rtems/freebsd/net/pfvar.h>
+#include <rtems/freebsd/altq/altq.h>
+#include <rtems/freebsd/altq/altq_red.h>
+#ifdef ALTQ3_COMPAT
+#include <rtems/freebsd/altq/altq_conf.h>
+#ifdef ALTQ_FLOWVALVE
+#include <rtems/freebsd/altq/altq_flowvalve.h>
+#endif
+#endif
+
+/*
+ * ALTQ/RED (Random Early Detection) implementation using 32-bit
+ * fixed-point calculation.
+ *
+ * written by kjc using the ns code as a reference.
+ * you can learn more about red and ns from Sally's home page at
+ * http://www-nrg.ee.lbl.gov/floyd/
+ *
+ * most of the red parameter values are fixed in this implementation
+ * to prevent fixed-point overflow/underflow.
+ * if you change the parameters, watch out for overflow/underflow!
+ *
+ * the parameters used are recommended values by Sally.
+ * the corresponding ns config looks:
+ * q_weight=0.00195
+ * minthresh=5 maxthresh=15 queue-size=60
+ * linterm=30
+ * dropmech=drop-tail
+ * bytes=false (can't be handled by 32-bit fixed-point)
+ * doubleq=false dqthresh=false
+ * wait=true
+ */
+/*
+ * alternative red parameters for a slow link.
+ *
+ * assume the queue length becomes from zero to L and keeps L, it takes
+ * N packets for q_avg to reach 63% of L.
+ * when q_weight is 0.002, N is about 500 packets.
+ * for a slow link like dial-up, 500 packets takes more than 1 minute!
+ * when q_weight is 0.008, N is about 127 packets.
+ * when q_weight is 0.016, N is about 63 packets.
+ * bursts of 50 packets are allowed for 0.002, bursts of 25 packets
+ * are allowed for 0.016.
+ * see Sally's paper for more details.
+ */
+/* normal red parameters */
+#define W_WEIGHT 512 /* inverse of weight of EWMA (511/512) */
+ /* q_weight = 0.00195 */
+
+/* red parameters for a slow link */
+#define W_WEIGHT_1 128 /* inverse of weight of EWMA (127/128) */
+ /* q_weight = 0.0078125 */
+
+/* red parameters for a very slow link (e.g., dialup) */
+#define W_WEIGHT_2 64 /* inverse of weight of EWMA (63/64) */
+ /* q_weight = 0.015625 */
+
+/* fixed-point uses 12-bit decimal places */
+#define FP_SHIFT 12 /* fixed-point shift */
+
+/* red parameters for drop probability */
+#define INV_P_MAX 10 /* inverse of max drop probability */
+#define TH_MIN 5 /* min threshold */
+#define TH_MAX 15 /* max threshold */
+
+#define RED_LIMIT 60 /* default max queue lenght */
+#define RED_STATS /* collect statistics */
+
+/*
+ * our default policy for forced-drop is drop-tail.
+ * (in altq-1.1.2 or earlier, the default was random-drop.
+ * but it makes more sense to punish the cause of the surge.)
+ * to switch to the random-drop policy, define "RED_RANDOM_DROP".
+ */
+
+#ifdef ALTQ3_COMPAT
+#ifdef ALTQ_FLOWVALVE
+/*
+ * flow-valve is an extention to protect red from unresponsive flows
+ * and to promote end-to-end congestion control.
+ * flow-valve observes the average drop rates of the flows that have
+ * experienced packet drops in the recent past.
+ * when the average drop rate exceeds the threshold, the flow is
+ * blocked by the flow-valve. the trapped flow should back off
+ * exponentially to escape from the flow-valve.
+ */
+#ifdef RED_RANDOM_DROP
+#error "random-drop can't be used with flow-valve!"
+#endif
+#endif /* ALTQ_FLOWVALVE */
+
+/* red_list keeps all red_queue_t's allocated. */
+static red_queue_t *red_list = NULL;
+
+#endif /* ALTQ3_COMPAT */
+
+/* default red parameter values */
+static int default_th_min = TH_MIN;
+static int default_th_max = TH_MAX;
+static int default_inv_pmax = INV_P_MAX;
+
+#ifdef ALTQ3_COMPAT
+/* internal function prototypes */
+static int red_enqueue(struct ifaltq *, struct mbuf *, struct altq_pktattr *);
+static struct mbuf *red_dequeue(struct ifaltq *, int);
+static int red_request(struct ifaltq *, int, void *);
+static void red_purgeq(red_queue_t *);
+static int red_detach(red_queue_t *);
+#ifdef ALTQ_FLOWVALVE
+static __inline struct fve *flowlist_lookup(struct flowvalve *,
+ struct altq_pktattr *, struct timeval *);
+static __inline struct fve *flowlist_reclaim(struct flowvalve *,
+ struct altq_pktattr *);
+static __inline void flowlist_move_to_head(struct flowvalve *, struct fve *);
+static __inline int fv_p2f(struct flowvalve *, int);
+#if 0 /* XXX: make the compiler happy (fv_alloc unused) */
+static struct flowvalve *fv_alloc(struct red *);
+#endif
+static void fv_destroy(struct flowvalve *);
+static int fv_checkflow(struct flowvalve *, struct altq_pktattr *,
+ struct fve **);
+static void fv_dropbyred(struct flowvalve *fv, struct altq_pktattr *,
+ struct fve *);
+#endif
+#endif /* ALTQ3_COMPAT */
+
+/*
+ * red support routines
+ */
+red_t *
+red_alloc(int weight, int inv_pmax, int th_min, int th_max, int flags,
+ int pkttime)
+{
+ red_t *rp;
+ int w, i;
+ int npkts_per_sec;
+
+ rp = malloc(sizeof(red_t), M_DEVBUF, M_WAITOK);
+ if (rp == NULL)
+ return (NULL);
+ bzero(rp, sizeof(red_t));
+
+ rp->red_avg = 0;
+ rp->red_idle = 1;
+
+ if (weight == 0)
+ rp->red_weight = W_WEIGHT;
+ else
+ rp->red_weight = weight;
+ if (inv_pmax == 0)
+ rp->red_inv_pmax = default_inv_pmax;
+ else
+ rp->red_inv_pmax = inv_pmax;
+ if (th_min == 0)
+ rp->red_thmin = default_th_min;
+ else
+ rp->red_thmin = th_min;
+ if (th_max == 0)
+ rp->red_thmax = default_th_max;
+ else
+ rp->red_thmax = th_max;
+
+ rp->red_flags = flags;
+
+ if (pkttime == 0)
+ /* default packet time: 1000 bytes / 10Mbps * 8 * 1000000 */
+ rp->red_pkttime = 800;
+ else
+ rp->red_pkttime = pkttime;
+
+ if (weight == 0) {
+ /* when the link is very slow, adjust red parameters */
+ npkts_per_sec = 1000000 / rp->red_pkttime;
+ if (npkts_per_sec < 50) {
+ /* up to about 400Kbps */
+ rp->red_weight = W_WEIGHT_2;
+ } else if (npkts_per_sec < 300) {
+ /* up to about 2.4Mbps */
+ rp->red_weight = W_WEIGHT_1;
+ }
+ }
+
+ /* calculate wshift. weight must be power of 2 */
+ w = rp->red_weight;
+ for (i = 0; w > 1; i++)
+ w = w >> 1;
+ rp->red_wshift = i;
+ w = 1 << rp->red_wshift;
+ if (w != rp->red_weight) {
+ printf("invalid weight value %d for red! use %d\n",
+ rp->red_weight, w);
+ rp->red_weight = w;
+ }
+
+ /*
+ * thmin_s and thmax_s are scaled versions of th_min and th_max
+ * to be compared with avg.
+ */
+ rp->red_thmin_s = rp->red_thmin << (rp->red_wshift + FP_SHIFT);
+ rp->red_thmax_s = rp->red_thmax << (rp->red_wshift + FP_SHIFT);
+
+ /*
+ * precompute probability denominator
+ * probd = (2 * (TH_MAX-TH_MIN) / pmax) in fixed-point
+ */
+ rp->red_probd = (2 * (rp->red_thmax - rp->red_thmin)
+ * rp->red_inv_pmax) << FP_SHIFT;
+
+ /* allocate weight table */
+ rp->red_wtab = wtab_alloc(rp->red_weight);
+
+ microtime(&rp->red_last);
+ return (rp);
+}
+
+void
+red_destroy(red_t *rp)
+{
+#ifdef ALTQ3_COMPAT
+#ifdef ALTQ_FLOWVALVE
+ if (rp->red_flowvalve != NULL)
+ fv_destroy(rp->red_flowvalve);
+#endif
+#endif /* ALTQ3_COMPAT */
+ wtab_destroy(rp->red_wtab);
+ free(rp, M_DEVBUF);
+}
+
+void
+red_getstats(red_t *rp, struct redstats *sp)
+{
+ sp->q_avg = rp->red_avg >> rp->red_wshift;
+ sp->xmit_cnt = rp->red_stats.xmit_cnt;
+ sp->drop_cnt = rp->red_stats.drop_cnt;
+ sp->drop_forced = rp->red_stats.drop_forced;
+ sp->drop_unforced = rp->red_stats.drop_unforced;
+ sp->marked_packets = rp->red_stats.marked_packets;
+}
+
+int
+red_addq(red_t *rp, class_queue_t *q, struct mbuf *m,
+ struct altq_pktattr *pktattr)
+{
+ int avg, droptype;
+ int n;
+#ifdef ALTQ3_COMPAT
+#ifdef ALTQ_FLOWVALVE
+ struct fve *fve = NULL;
+
+ if (rp->red_flowvalve != NULL && rp->red_flowvalve->fv_flows > 0)
+ if (fv_checkflow(rp->red_flowvalve, pktattr, &fve)) {
+ m_freem(m);
+ return (-1);
+ }
+#endif
+#endif /* ALTQ3_COMPAT */
+
+ avg = rp->red_avg;
+
+ /*
+ * if we were idle, we pretend that n packets arrived during
+ * the idle period.
+ */
+ if (rp->red_idle) {
+ struct timeval now;
+ int t;
+
+ rp->red_idle = 0;
+ microtime(&now);
+ t = (now.tv_sec - rp->red_last.tv_sec);
+ if (t > 60) {
+ /*
+ * being idle for more than 1 minute, set avg to zero.
+ * this prevents t from overflow.
+ */
+ avg = 0;
+ } else {
+ t = t * 1000000 + (now.tv_usec - rp->red_last.tv_usec);
+ n = t / rp->red_pkttime - 1;
+
+ /* the following line does (avg = (1 - Wq)^n * avg) */
+ if (n > 0)
+ avg = (avg >> FP_SHIFT) *
+ pow_w(rp->red_wtab, n);
+ }
+ }
+
+ /* run estimator. (note: avg is scaled by WEIGHT in fixed-point) */
+ avg += (qlen(q) << FP_SHIFT) - (avg >> rp->red_wshift);
+ rp->red_avg = avg; /* save the new value */
+
+ /*
+ * red_count keeps a tally of arriving traffic that has not
+ * been dropped.
+ */
+ rp->red_count++;
+
+ /* see if we drop early */
+ droptype = DTYPE_NODROP;
+ if (avg >= rp->red_thmin_s && qlen(q) > 1) {
+ if (avg >= rp->red_thmax_s) {
+ /* avg >= th_max: forced drop */
+ droptype = DTYPE_FORCED;
+ } else if (rp->red_old == 0) {
+ /* first exceeds th_min */
+ rp->red_count = 1;
+ rp->red_old = 1;
+ } else if (drop_early((avg - rp->red_thmin_s) >> rp->red_wshift,
+ rp->red_probd, rp->red_count)) {
+ /* mark or drop by red */
+ if ((rp->red_flags & REDF_ECN) &&
+ mark_ecn(m, pktattr, rp->red_flags)) {
+ /* successfully marked. do not drop. */
+ rp->red_count = 0;
+#ifdef RED_STATS
+ rp->red_stats.marked_packets++;
+#endif
+ } else {
+ /* unforced drop by red */
+ droptype = DTYPE_EARLY;
+ }
+ }
+ } else {
+ /* avg < th_min */
+ rp->red_old = 0;
+ }
+
+ /*
+ * if the queue length hits the hard limit, it's a forced drop.
+ */
+ if (droptype == DTYPE_NODROP && qlen(q) >= qlimit(q))
+ droptype = DTYPE_FORCED;
+
+#ifdef RED_RANDOM_DROP
+ /* if successful or forced drop, enqueue this packet. */
+ if (droptype != DTYPE_EARLY)
+ _addq(q, m);
+#else
+ /* if successful, enqueue this packet. */
+ if (droptype == DTYPE_NODROP)
+ _addq(q, m);
+#endif
+ if (droptype != DTYPE_NODROP) {
+ if (droptype == DTYPE_EARLY) {
+ /* drop the incoming packet */
+#ifdef RED_STATS
+ rp->red_stats.drop_unforced++;
+#endif
+ } else {
+ /* forced drop, select a victim packet in the queue. */
+#ifdef RED_RANDOM_DROP
+ m = _getq_random(q);
+#endif
+#ifdef RED_STATS
+ rp->red_stats.drop_forced++;
+#endif
+ }
+#ifdef RED_STATS
+ PKTCNTR_ADD(&rp->red_stats.drop_cnt, m_pktlen(m));
+#endif
+ rp->red_count = 0;
+#ifdef ALTQ3_COMPAT
+#ifdef ALTQ_FLOWVALVE
+ if (rp->red_flowvalve != NULL)
+ fv_dropbyred(rp->red_flowvalve, pktattr, fve);
+#endif
+#endif /* ALTQ3_COMPAT */
+ m_freem(m);
+ return (-1);
+ }
+ /* successfully queued */
+#ifdef RED_STATS
+ PKTCNTR_ADD(&rp->red_stats.xmit_cnt, m_pktlen(m));
+#endif
+ return (0);
+}
+
+/*
+ * early-drop probability is calculated as follows:
+ * prob = p_max * (avg - th_min) / (th_max - th_min)
+ * prob_a = prob / (2 - count*prob)
+ * = (avg-th_min) / (2*(th_max-th_min)*inv_p_max - count*(avg-th_min))
+ * here prob_a increases as successive undrop count increases.
+ * (prob_a starts from prob/2, becomes prob when (count == (1 / prob)),
+ * becomes 1 when (count >= (2 / prob))).
+ */
+int
+drop_early(int fp_len, int fp_probd, int count)
+{
+ int d; /* denominator of drop-probability */
+
+ d = fp_probd - count * fp_len;
+ if (d <= 0)
+ /* count exceeds the hard limit: drop or mark */
+ return (1);
+
+ /*
+ * now the range of d is [1..600] in fixed-point. (when
+ * th_max-th_min=10 and p_max=1/30)
+ * drop probability = (avg - TH_MIN) / d
+ */
+
+ if ((arc4random() % d) < fp_len) {
+ /* drop or mark */
+ return (1);
+ }
+ /* no drop/mark */
+ return (0);
+}
+
+/*
+ * try to mark CE bit to the packet.
+ * returns 1 if successfully marked, 0 otherwise.
+ */
+int
+mark_ecn(struct mbuf *m, struct altq_pktattr *pktattr, int flags)
+{
+ struct mbuf *m0;
+ struct pf_mtag *at;
+ void *hdr;
+ int af;
+
+ at = pf_find_mtag(m);
+ if (at != NULL) {
+ af = at->af;
+ hdr = at->hdr;
+#ifdef ALTQ3_COMPAT
+ } else if (pktattr != NULL) {
+ af = pktattr->pattr_af;
+ hdr = pktattr->pattr_hdr;
+#endif /* ALTQ3_COMPAT */
+ } else
+ return (0);
+
+ if (af != AF_INET && af != AF_INET6)
+ return (0);
+
+ /* verify that pattr_hdr is within the mbuf data */
+ for (m0 = m; m0 != NULL; m0 = m0->m_next)
+ if (((caddr_t)hdr >= m0->m_data) &&
+ ((caddr_t)hdr < m0->m_data + m0->m_len))
+ break;
+ if (m0 == NULL) {
+ /* ick, tag info is stale */
+ return (0);
+ }
+
+ switch (af) {
+ case AF_INET:
+ if (flags & REDF_ECN4) {
+ struct ip *ip = hdr;
+ u_int8_t otos;
+ int sum;
+
+ if (ip->ip_v != 4)
+ return (0); /* version mismatch! */
+
+ if ((ip->ip_tos & IPTOS_ECN_MASK) == IPTOS_ECN_NOTECT)
+ return (0); /* not-ECT */
+ if ((ip->ip_tos & IPTOS_ECN_MASK) == IPTOS_ECN_CE)
+ return (1); /* already marked */
+
+ /*
+ * ecn-capable but not marked,
+ * mark CE and update checksum
+ */
+ otos = ip->ip_tos;
+ ip->ip_tos |= IPTOS_ECN_CE;
+ /*
+ * update checksum (from RFC1624)
+ * HC' = ~(~HC + ~m + m')
+ */
+ sum = ~ntohs(ip->ip_sum) & 0xffff;
+ sum += (~otos & 0xffff) + ip->ip_tos;
+ sum = (sum >> 16) + (sum & 0xffff);
+ sum += (sum >> 16); /* add carry */
+ ip->ip_sum = htons(~sum & 0xffff);
+ return (1);
+ }
+ break;
+#ifdef INET6
+ case AF_INET6:
+ if (flags & REDF_ECN6) {
+ struct ip6_hdr *ip6 = hdr;
+ u_int32_t flowlabel;
+
+ flowlabel = ntohl(ip6->ip6_flow);
+ if ((flowlabel >> 28) != 6)
+ return (0); /* version mismatch! */
+ if ((flowlabel & (IPTOS_ECN_MASK << 20)) ==
+ (IPTOS_ECN_NOTECT << 20))
+ return (0); /* not-ECT */
+ if ((flowlabel & (IPTOS_ECN_MASK << 20)) ==
+ (IPTOS_ECN_CE << 20))
+ return (1); /* already marked */
+ /*
+ * ecn-capable but not marked, mark CE
+ */
+ flowlabel |= (IPTOS_ECN_CE << 20);
+ ip6->ip6_flow = htonl(flowlabel);
+ return (1);
+ }
+ break;
+#endif /* INET6 */
+ }
+
+ /* not marked */
+ return (0);
+}
+
+struct mbuf *
+red_getq(rp, q)
+ red_t *rp;
+ class_queue_t *q;
+{
+ struct mbuf *m;
+
+ if ((m = _getq(q)) == NULL) {
+ if (rp->red_idle == 0) {
+ rp->red_idle = 1;
+ microtime(&rp->red_last);
+ }
+ return NULL;
+ }
+
+ rp->red_idle = 0;
+ return (m);
+}
+
+/*
+ * helper routine to calibrate avg during idle.
+ * pow_w(wtab, n) returns (1 - Wq)^n in fixed-point
+ * here Wq = 1/weight and the code assumes Wq is close to zero.
+ *
+ * w_tab[n] holds ((1 - Wq)^(2^n)) in fixed-point.
+ */
+static struct wtab *wtab_list = NULL; /* pointer to wtab list */
+
+struct wtab *
+wtab_alloc(int weight)
+{
+ struct wtab *w;
+ int i;
+
+ for (w = wtab_list; w != NULL; w = w->w_next)
+ if (w->w_weight == weight) {
+ w->w_refcount++;
+ return (w);
+ }
+
+ w = malloc(sizeof(struct wtab), M_DEVBUF, M_WAITOK);
+ if (w == NULL)
+ panic("wtab_alloc: malloc failed!");
+ bzero(w, sizeof(struct wtab));
+ w->w_weight = weight;
+ w->w_refcount = 1;
+ w->w_next = wtab_list;
+ wtab_list = w;
+
+ /* initialize the weight table */
+ w->w_tab[0] = ((weight - 1) << FP_SHIFT) / weight;
+ for (i = 1; i < 32; i++) {
+ w->w_tab[i] = (w->w_tab[i-1] * w->w_tab[i-1]) >> FP_SHIFT;
+ if (w->w_tab[i] == 0 && w->w_param_max == 0)
+ w->w_param_max = 1 << i;
+ }
+
+ return (w);
+}
+
+int
+wtab_destroy(struct wtab *w)
+{
+ struct wtab *prev;
+
+ if (--w->w_refcount > 0)
+ return (0);
+
+ if (wtab_list == w)
+ wtab_list = w->w_next;
+ else for (prev = wtab_list; prev->w_next != NULL; prev = prev->w_next)
+ if (prev->w_next == w) {
+ prev->w_next = w->w_next;
+ break;
+ }
+
+ free(w, M_DEVBUF);
+ return (0);
+}
+
+int32_t
+pow_w(struct wtab *w, int n)
+{
+ int i, bit;
+ int32_t val;
+
+ if (n >= w->w_param_max)
+ return (0);
+
+ val = 1 << FP_SHIFT;
+ if (n <= 0)
+ return (val);
+
+ bit = 1;
+ i = 0;
+ while (n) {
+ if (n & bit) {
+ val = (val * w->w_tab[i]) >> FP_SHIFT;
+ n &= ~bit;
+ }
+ i++;
+ bit <<= 1;
+ }
+ return (val);
+}
+
+#ifdef ALTQ3_COMPAT
+/*
+ * red device interface
+ */
+altqdev_decl(red);
+
+int
+redopen(dev, flag, fmt, p)
+ dev_t dev;
+ int flag, fmt;
+#if (__FreeBSD_version > 500000)
+ struct thread *p;
+#else
+ struct proc *p;
+#endif
+{
+ /* everything will be done when the queueing scheme is attached. */
+ return 0;
+}
+
+int
+redclose(dev, flag, fmt, p)
+ dev_t dev;
+ int flag, fmt;
+#if (__FreeBSD_version > 500000)
+ struct thread *p;
+#else
+ struct proc *p;
+#endif
+{
+ red_queue_t *rqp;
+ int err, error = 0;
+
+ while ((rqp = red_list) != NULL) {
+ /* destroy all */
+ err = red_detach(rqp);
+ if (err != 0 && error == 0)
+ error = err;
+ }
+
+ return error;
+}
+
+int
+redioctl(dev, cmd, addr, flag, p)
+ dev_t dev;
+ ioctlcmd_t cmd;
+ caddr_t addr;
+ int flag;
+#if (__FreeBSD_version > 500000)
+ struct thread *p;
+#else
+ struct proc *p;
+#endif
+{
+ red_queue_t *rqp;
+ struct red_interface *ifacep;
+ struct ifnet *ifp;
+ int error = 0;
+
+ /* check super-user privilege */
+ switch (cmd) {
+ case RED_GETSTATS:
+ break;
+ default:
+#if (__FreeBSD_version > 700000)
+ if ((error = priv_check(p, PRIV_ALTQ_MANAGE)) != 0)
+#elsif (__FreeBSD_version > 400000)
+ if ((error = suser(p)) != 0)
+#else
+ if ((error = suser(p->p_ucred, &p->p_acflag)) != 0)
+#endif
+ return (error);
+ break;
+ }
+
+ switch (cmd) {
+
+ case RED_ENABLE:
+ ifacep = (struct red_interface *)addr;
+ if ((rqp = altq_lookup(ifacep->red_ifname, ALTQT_RED)) == NULL) {
+ error = EBADF;
+ break;
+ }
+ error = altq_enable(rqp->rq_ifq);
+ break;
+
+ case RED_DISABLE:
+ ifacep = (struct red_interface *)addr;
+ if ((rqp = altq_lookup(ifacep->red_ifname, ALTQT_RED)) == NULL) {
+ error = EBADF;
+ break;
+ }
+ error = altq_disable(rqp->rq_ifq);
+ break;
+
+ case RED_IF_ATTACH:
+ ifp = ifunit(((struct red_interface *)addr)->red_ifname);
+ if (ifp == NULL) {
+ error = ENXIO;
+ break;
+ }
+
+ /* allocate and initialize red_queue_t */
+ rqp = malloc(sizeof(red_queue_t), M_DEVBUF, M_WAITOK);
+ if (rqp == NULL) {
+ error = ENOMEM;
+ break;
+ }
+ bzero(rqp, sizeof(red_queue_t));
+
+ rqp->rq_q = malloc(sizeof(class_queue_t),
+ M_DEVBUF, M_WAITOK);
+ if (rqp->rq_q == NULL) {
+ free(rqp, M_DEVBUF);
+ error = ENOMEM;
+ break;
+ }
+ bzero(rqp->rq_q, sizeof(class_queue_t));
+
+ rqp->rq_red = red_alloc(0, 0, 0, 0, 0, 0);
+ if (rqp->rq_red == NULL) {
+ free(rqp->rq_q, M_DEVBUF);
+ free(rqp, M_DEVBUF);
+ error = ENOMEM;
+ break;
+ }
+
+ rqp->rq_ifq = &ifp->if_snd;
+ qtail(rqp->rq_q) = NULL;
+ qlen(rqp->rq_q) = 0;
+ qlimit(rqp->rq_q) = RED_LIMIT;
+ qtype(rqp->rq_q) = Q_RED;
+
+ /*
+ * set RED to this ifnet structure.
+ */
+ error = altq_attach(rqp->rq_ifq, ALTQT_RED, rqp,
+ red_enqueue, red_dequeue, red_request,
+ NULL, NULL);
+ if (error) {
+ red_destroy(rqp->rq_red);
+ free(rqp->rq_q, M_DEVBUF);
+ free(rqp, M_DEVBUF);
+ break;
+ }
+
+ /* add this state to the red list */
+ rqp->rq_next = red_list;
+ red_list = rqp;
+ break;
+
+ case RED_IF_DETACH:
+ ifacep = (struct red_interface *)addr;
+ if ((rqp = altq_lookup(ifacep->red_ifname, ALTQT_RED)) == NULL) {
+ error = EBADF;
+ break;
+ }
+ error = red_detach(rqp);
+ break;
+
+ case RED_GETSTATS:
+ do {
+ struct red_stats *q_stats;
+ red_t *rp;
+
+ q_stats = (struct red_stats *)addr;
+ if ((rqp = altq_lookup(q_stats->iface.red_ifname,
+ ALTQT_RED)) == NULL) {
+ error = EBADF;
+ break;
+ }
+
+ q_stats->q_len = qlen(rqp->rq_q);
+ q_stats->q_limit = qlimit(rqp->rq_q);
+
+ rp = rqp->rq_red;
+ q_stats->q_avg = rp->red_avg >> rp->red_wshift;
+ q_stats->xmit_cnt = rp->red_stats.xmit_cnt;
+ q_stats->drop_cnt = rp->red_stats.drop_cnt;
+ q_stats->drop_forced = rp->red_stats.drop_forced;
+ q_stats->drop_unforced = rp->red_stats.drop_unforced;
+ q_stats->marked_packets = rp->red_stats.marked_packets;
+
+ q_stats->weight = rp->red_weight;
+ q_stats->inv_pmax = rp->red_inv_pmax;
+ q_stats->th_min = rp->red_thmin;
+ q_stats->th_max = rp->red_thmax;
+
+#ifdef ALTQ_FLOWVALVE
+ if (rp->red_flowvalve != NULL) {
+ struct flowvalve *fv = rp->red_flowvalve;
+ q_stats->fv_flows = fv->fv_flows;
+ q_stats->fv_pass = fv->fv_stats.pass;
+ q_stats->fv_predrop = fv->fv_stats.predrop;
+ q_stats->fv_alloc = fv->fv_stats.alloc;
+ q_stats->fv_escape = fv->fv_stats.escape;
+ } else {
+#endif /* ALTQ_FLOWVALVE */
+ q_stats->fv_flows = 0;
+ q_stats->fv_pass = 0;
+ q_stats->fv_predrop = 0;
+ q_stats->fv_alloc = 0;
+ q_stats->fv_escape = 0;
+#ifdef ALTQ_FLOWVALVE
+ }
+#endif /* ALTQ_FLOWVALVE */
+ } while (/*CONSTCOND*/ 0);
+ break;
+
+ case RED_CONFIG:
+ do {
+ struct red_conf *fc;
+ red_t *new;
+ int s, limit;
+
+ fc = (struct red_conf *)addr;
+ if ((rqp = altq_lookup(fc->iface.red_ifname,
+ ALTQT_RED)) == NULL) {
+ error = EBADF;
+ break;
+ }
+ new = red_alloc(fc->red_weight,
+ fc->red_inv_pmax,
+ fc->red_thmin,
+ fc->red_thmax,
+ fc->red_flags,
+ fc->red_pkttime);
+ if (new == NULL) {
+ error = ENOMEM;
+ break;
+ }
+
+#ifdef __NetBSD__
+ s = splnet();
+#else
+ s = splimp();
+#endif
+ red_purgeq(rqp);
+ limit = fc->red_limit;
+ if (limit < fc->red_thmax)
+ limit = fc->red_thmax;
+ qlimit(rqp->rq_q) = limit;
+ fc->red_limit = limit; /* write back the new value */
+
+ red_destroy(rqp->rq_red);
+ rqp->rq_red = new;
+
+ splx(s);
+
+ /* write back new values */
+ fc->red_limit = limit;
+ fc->red_inv_pmax = rqp->rq_red->red_inv_pmax;
+ fc->red_thmin = rqp->rq_red->red_thmin;
+ fc->red_thmax = rqp->rq_red->red_thmax;
+
+ } while (/*CONSTCOND*/ 0);
+ break;
+
+ case RED_SETDEFAULTS:
+ do {
+ struct redparams *rp;
+
+ rp = (struct redparams *)addr;
+
+ default_th_min = rp->th_min;
+ default_th_max = rp->th_max;
+ default_inv_pmax = rp->inv_pmax;
+ } while (/*CONSTCOND*/ 0);
+ break;
+
+ default:
+ error = EINVAL;
+ break;
+ }
+ return error;
+}
+
+static int
+red_detach(rqp)
+ red_queue_t *rqp;
+{
+ red_queue_t *tmp;
+ int error = 0;
+
+ if (ALTQ_IS_ENABLED(rqp->rq_ifq))
+ altq_disable(rqp->rq_ifq);
+
+ if ((error = altq_detach(rqp->rq_ifq)))
+ return (error);
+
+ if (red_list == rqp)
+ red_list = rqp->rq_next;
+ else {
+ for (tmp = red_list; tmp != NULL; tmp = tmp->rq_next)
+ if (tmp->rq_next == rqp) {
+ tmp->rq_next = rqp->rq_next;
+ break;
+ }
+ if (tmp == NULL)
+ printf("red_detach: no state found in red_list!\n");
+ }
+
+ red_destroy(rqp->rq_red);
+ free(rqp->rq_q, M_DEVBUF);
+ free(rqp, M_DEVBUF);
+ return (error);
+}
+
+/*
+ * enqueue routine:
+ *
+ * returns: 0 when successfully queued.
+ * ENOBUFS when drop occurs.
+ */
+static int
+red_enqueue(ifq, m, pktattr)
+ struct ifaltq *ifq;
+ struct mbuf *m;
+ struct altq_pktattr *pktattr;
+{
+ red_queue_t *rqp = (red_queue_t *)ifq->altq_disc;
+
+ IFQ_LOCK_ASSERT(ifq);
+
+ if (red_addq(rqp->rq_red, rqp->rq_q, m, pktattr) < 0)
+ return ENOBUFS;
+ ifq->ifq_len++;
+ return 0;
+}
+
+/*
+ * dequeue routine:
+ * must be called in splimp.
+ *
+ * returns: mbuf dequeued.
+ * NULL when no packet is available in the queue.
+ */
+
+static struct mbuf *
+red_dequeue(ifq, op)
+ struct ifaltq *ifq;
+ int op;
+{
+ red_queue_t *rqp = (red_queue_t *)ifq->altq_disc;
+ struct mbuf *m;
+
+ IFQ_LOCK_ASSERT(ifq);
+
+ if (op == ALTDQ_POLL)
+ return qhead(rqp->rq_q);
+
+ /* op == ALTDQ_REMOVE */
+ m = red_getq(rqp->rq_red, rqp->rq_q);
+ if (m != NULL)
+ ifq->ifq_len--;
+ return (m);
+}
+
+static int
+red_request(ifq, req, arg)
+ struct ifaltq *ifq;
+ int req;
+ void *arg;
+{
+ red_queue_t *rqp = (red_queue_t *)ifq->altq_disc;
+
+ IFQ_LOCK_ASSERT(ifq);
+
+ switch (req) {
+ case ALTRQ_PURGE:
+ red_purgeq(rqp);
+ break;
+ }
+ return (0);
+}
+
+static void
+red_purgeq(rqp)
+ red_queue_t *rqp;
+{
+ _flushq(rqp->rq_q);
+ if (ALTQ_IS_ENABLED(rqp->rq_ifq))
+ rqp->rq_ifq->ifq_len = 0;
+}
+
+#ifdef ALTQ_FLOWVALVE
+
+#define FV_PSHIFT 7 /* weight of average drop rate -- 1/128 */
+#define FV_PSCALE(x) ((x) << FV_PSHIFT)
+#define FV_PUNSCALE(x) ((x) >> FV_PSHIFT)
+#define FV_FSHIFT 5 /* weight of average fraction -- 1/32 */
+#define FV_FSCALE(x) ((x) << FV_FSHIFT)
+#define FV_FUNSCALE(x) ((x) >> FV_FSHIFT)
+
+#define FV_TIMER (3 * hz) /* timer value for garbage collector */
+#define FV_FLOWLISTSIZE 64 /* how many flows in flowlist */
+
+#define FV_N 10 /* update fve_f every FV_N packets */
+
+#define FV_BACKOFFTHRESH 1 /* backoff threshold interval in second */
+#define FV_TTHRESH 3 /* time threshold to delete fve */
+#define FV_ALPHA 5 /* extra packet count */
+
+#define FV_STATS
+
+#if (__FreeBSD_version > 300000)
+#define FV_TIMESTAMP(tp) getmicrotime(tp)
+#else
+#define FV_TIMESTAMP(tp) { (*(tp)) = time; }
+#endif
+
+/*
+ * Brtt table: 127 entry table to convert drop rate (p) to
+ * the corresponding bandwidth fraction (f)
+ * the following equation is implemented to use scaled values,
+ * fve_p and fve_f, in the fixed point format.
+ *
+ * Brtt(p) = 1 /(sqrt(4*p/3) + min(1,3*sqrt(p*6/8)) * p * (1+32 * p*p))
+ * f = Brtt(p) / (max_th + alpha)
+ */
+#define BRTT_SIZE 128
+#define BRTT_SHIFT 12
+#define BRTT_MASK 0x0007f000
+#define BRTT_PMAX (1 << (FV_PSHIFT + FP_SHIFT))
+
+const int brtt_tab[BRTT_SIZE] = {
+ 0, 1262010, 877019, 703694, 598706, 525854, 471107, 427728,
+ 392026, 361788, 335598, 312506, 291850, 273158, 256081, 240361,
+ 225800, 212247, 199585, 187788, 178388, 169544, 161207, 153333,
+ 145888, 138841, 132165, 125836, 119834, 114141, 108739, 103612,
+ 98747, 94129, 89746, 85585, 81637, 77889, 74333, 70957,
+ 67752, 64711, 61824, 59084, 56482, 54013, 51667, 49440,
+ 47325, 45315, 43406, 41591, 39866, 38227, 36667, 35184,
+ 33773, 32430, 31151, 29933, 28774, 27668, 26615, 25611,
+ 24653, 23740, 22868, 22035, 21240, 20481, 19755, 19062,
+ 18399, 17764, 17157, 16576, 16020, 15487, 14976, 14487,
+ 14017, 13567, 13136, 12721, 12323, 11941, 11574, 11222,
+ 10883, 10557, 10243, 9942, 9652, 9372, 9103, 8844,
+ 8594, 8354, 8122, 7898, 7682, 7474, 7273, 7079,
+ 6892, 6711, 6536, 6367, 6204, 6046, 5893, 5746,
+ 5603, 5464, 5330, 5201, 5075, 4954, 4836, 4722,
+ 4611, 4504, 4400, 4299, 4201, 4106, 4014, 3924
+};
+
+static __inline struct fve *
+flowlist_lookup(fv, pktattr, now)
+ struct flowvalve *fv;
+ struct altq_pktattr *pktattr;
+ struct timeval *now;
+{
+ struct fve *fve;
+ int flows;
+ struct ip *ip;
+#ifdef INET6
+ struct ip6_hdr *ip6;
+#endif
+ struct timeval tthresh;
+
+ if (pktattr == NULL)
+ return (NULL);
+
+ tthresh.tv_sec = now->tv_sec - FV_TTHRESH;
+ flows = 0;
+ /*
+ * search the flow list
+ */
+ switch (pktattr->pattr_af) {
+ case AF_INET:
+ ip = (struct ip *)pktattr->pattr_hdr;
+ TAILQ_FOREACH(fve, &fv->fv_flowlist, fve_lru){
+ if (fve->fve_lastdrop.tv_sec == 0)
+ break;
+ if (fve->fve_lastdrop.tv_sec < tthresh.tv_sec) {
+ fve->fve_lastdrop.tv_sec = 0;
+ break;
+ }
+ if (fve->fve_flow.flow_af == AF_INET &&
+ fve->fve_flow.flow_ip.ip_src.s_addr ==
+ ip->ip_src.s_addr &&
+ fve->fve_flow.flow_ip.ip_dst.s_addr ==
+ ip->ip_dst.s_addr)
+ return (fve);
+ flows++;
+ }
+ break;
+#ifdef INET6
+ case AF_INET6:
+ ip6 = (struct ip6_hdr *)pktattr->pattr_hdr;
+ TAILQ_FOREACH(fve, &fv->fv_flowlist, fve_lru){
+ if (fve->fve_lastdrop.tv_sec == 0)
+ break;
+ if (fve->fve_lastdrop.tv_sec < tthresh.tv_sec) {
+ fve->fve_lastdrop.tv_sec = 0;
+ break;
+ }
+ if (fve->fve_flow.flow_af == AF_INET6 &&
+ IN6_ARE_ADDR_EQUAL(&fve->fve_flow.flow_ip6.ip6_src,
+ &ip6->ip6_src) &&
+ IN6_ARE_ADDR_EQUAL(&fve->fve_flow.flow_ip6.ip6_dst,
+ &ip6->ip6_dst))
+ return (fve);
+ flows++;
+ }
+ break;
+#endif /* INET6 */
+
+ default:
+ /* unknown protocol. no drop. */
+ return (NULL);
+ }
+ fv->fv_flows = flows; /* save the number of active fve's */
+ return (NULL);
+}
+
+static __inline struct fve *
+flowlist_reclaim(fv, pktattr)
+ struct flowvalve *fv;
+ struct altq_pktattr *pktattr;
+{
+ struct fve *fve;
+ struct ip *ip;
+#ifdef INET6
+ struct ip6_hdr *ip6;
+#endif
+
+ /*
+ * get an entry from the tail of the LRU list.
+ */
+ fve = TAILQ_LAST(&fv->fv_flowlist, fv_flowhead);
+
+ switch (pktattr->pattr_af) {
+ case AF_INET:
+ ip = (struct ip *)pktattr->pattr_hdr;
+ fve->fve_flow.flow_af = AF_INET;
+ fve->fve_flow.flow_ip.ip_src = ip->ip_src;
+ fve->fve_flow.flow_ip.ip_dst = ip->ip_dst;
+ break;
+#ifdef INET6
+ case AF_INET6:
+ ip6 = (struct ip6_hdr *)pktattr->pattr_hdr;
+ fve->fve_flow.flow_af = AF_INET6;
+ fve->fve_flow.flow_ip6.ip6_src = ip6->ip6_src;
+ fve->fve_flow.flow_ip6.ip6_dst = ip6->ip6_dst;
+ break;
+#endif
+ }
+
+ fve->fve_state = Green;
+ fve->fve_p = 0.0;
+ fve->fve_f = 0.0;
+ fve->fve_ifseq = fv->fv_ifseq - 1;
+ fve->fve_count = 0;
+
+ fv->fv_flows++;
+#ifdef FV_STATS
+ fv->fv_stats.alloc++;
+#endif
+ return (fve);
+}
+
+static __inline void
+flowlist_move_to_head(fv, fve)
+ struct flowvalve *fv;
+ struct fve *fve;
+{
+ if (TAILQ_FIRST(&fv->fv_flowlist) != fve) {
+ TAILQ_REMOVE(&fv->fv_flowlist, fve, fve_lru);
+ TAILQ_INSERT_HEAD(&fv->fv_flowlist, fve, fve_lru);
+ }
+}
+
+#if 0 /* XXX: make the compiler happy (fv_alloc unused) */
+/*
+ * allocate flowvalve structure
+ */
+static struct flowvalve *
+fv_alloc(rp)
+ struct red *rp;
+{
+ struct flowvalve *fv;
+ struct fve *fve;
+ int i, num;
+
+ num = FV_FLOWLISTSIZE;
+ fv = malloc(sizeof(struct flowvalve),
+ M_DEVBUF, M_WAITOK);
+ if (fv == NULL)
+ return (NULL);
+ bzero(fv, sizeof(struct flowvalve));
+
+ fv->fv_fves = malloc(sizeof(struct fve) * num,
+ M_DEVBUF, M_WAITOK);
+ if (fv->fv_fves == NULL) {
+ free(fv, M_DEVBUF);
+ return (NULL);
+ }
+ bzero(fv->fv_fves, sizeof(struct fve) * num);
+
+ fv->fv_flows = 0;
+ TAILQ_INIT(&fv->fv_flowlist);
+ for (i = 0; i < num; i++) {
+ fve = &fv->fv_fves[i];
+ fve->fve_lastdrop.tv_sec = 0;
+ TAILQ_INSERT_TAIL(&fv->fv_flowlist, fve, fve_lru);
+ }
+
+ /* initialize drop rate threshold in scaled fixed-point */
+ fv->fv_pthresh = (FV_PSCALE(1) << FP_SHIFT) / rp->red_inv_pmax;
+
+ /* initialize drop rate to fraction table */
+ fv->fv_p2ftab = malloc(sizeof(int) * BRTT_SIZE,
+ M_DEVBUF, M_WAITOK);
+ if (fv->fv_p2ftab == NULL) {
+ free(fv->fv_fves, M_DEVBUF);
+ free(fv, M_DEVBUF);
+ return (NULL);
+ }
+ /*
+ * create the p2f table.
+ * (shift is used to keep the precision)
+ */
+ for (i = 1; i < BRTT_SIZE; i++) {
+ int f;
+
+ f = brtt_tab[i] << 8;
+ fv->fv_p2ftab[i] = (f / (rp->red_thmax + FV_ALPHA)) >> 8;
+ }
+
+ return (fv);
+}
+#endif
+
+static void fv_destroy(fv)
+ struct flowvalve *fv;
+{
+ free(fv->fv_p2ftab, M_DEVBUF);
+ free(fv->fv_fves, M_DEVBUF);
+ free(fv, M_DEVBUF);
+}
+
+static __inline int
+fv_p2f(fv, p)
+ struct flowvalve *fv;
+ int p;
+{
+ int val, f;
+
+ if (p >= BRTT_PMAX)
+ f = fv->fv_p2ftab[BRTT_SIZE-1];
+ else if ((val = (p & BRTT_MASK)))
+ f = fv->fv_p2ftab[(val >> BRTT_SHIFT)];
+ else
+ f = fv->fv_p2ftab[1];
+ return (f);
+}
+
+/*
+ * check if an arriving packet should be pre-dropped.
+ * called from red_addq() when a packet arrives.
+ * returns 1 when the packet should be pre-dropped.
+ * should be called in splimp.
+ */
+static int
+fv_checkflow(fv, pktattr, fcache)
+ struct flowvalve *fv;
+ struct altq_pktattr *pktattr;
+ struct fve **fcache;
+{
+ struct fve *fve;
+ struct timeval now;
+
+ fv->fv_ifseq++;
+ FV_TIMESTAMP(&now);
+
+ if ((fve = flowlist_lookup(fv, pktattr, &now)) == NULL)
+ /* no matching entry in the flowlist */
+ return (0);
+
+ *fcache = fve;
+
+ /* update fraction f for every FV_N packets */
+ if (++fve->fve_count == FV_N) {
+ /*
+ * f = Wf * N / (fv_ifseq - fve_ifseq) + (1 - Wf) * f
+ */
+ fve->fve_f =
+ (FV_N << FP_SHIFT) / (fv->fv_ifseq - fve->fve_ifseq)
+ + fve->fve_f - FV_FUNSCALE(fve->fve_f);
+ fve->fve_ifseq = fv->fv_ifseq;
+ fve->fve_count = 0;
+ }
+
+ /*
+ * overpumping test
+ */
+ if (fve->fve_state == Green && fve->fve_p > fv->fv_pthresh) {
+ int fthresh;
+
+ /* calculate a threshold */
+ fthresh = fv_p2f(fv, fve->fve_p);
+ if (fve->fve_f > fthresh)
+ fve->fve_state = Red;
+ }
+
+ if (fve->fve_state == Red) {
+ /*
+ * backoff test
+ */
+ if (now.tv_sec - fve->fve_lastdrop.tv_sec > FV_BACKOFFTHRESH) {
+ /* no drop for at least FV_BACKOFFTHRESH sec */
+ fve->fve_p = 0;
+ fve->fve_state = Green;
+#ifdef FV_STATS
+ fv->fv_stats.escape++;
+#endif
+ } else {
+ /* block this flow */
+ flowlist_move_to_head(fv, fve);
+ fve->fve_lastdrop = now;
+#ifdef FV_STATS
+ fv->fv_stats.predrop++;
+#endif
+ return (1);
+ }
+ }
+
+ /*
+ * p = (1 - Wp) * p
+ */
+ fve->fve_p -= FV_PUNSCALE(fve->fve_p);
+ if (fve->fve_p < 0)
+ fve->fve_p = 0;
+#ifdef FV_STATS
+ fv->fv_stats.pass++;
+#endif
+ return (0);
+}
+
+/*
+ * called from red_addq when a packet is dropped by red.
+ * should be called in splimp.
+ */
+static void fv_dropbyred(fv, pktattr, fcache)
+ struct flowvalve *fv;
+ struct altq_pktattr *pktattr;
+ struct fve *fcache;
+{
+ struct fve *fve;
+ struct timeval now;
+
+ if (pktattr == NULL)
+ return;
+ FV_TIMESTAMP(&now);
+
+ if (fcache != NULL)
+ /* the fve of this packet is already cached */
+ fve = fcache;
+ else if ((fve = flowlist_lookup(fv, pktattr, &now)) == NULL)
+ fve = flowlist_reclaim(fv, pktattr);
+
+ flowlist_move_to_head(fv, fve);
+
+ /*
+ * update p: the following line cancels the update
+ * in fv_checkflow() and calculate
+ * p = Wp + (1 - Wp) * p
+ */
+ fve->fve_p = (1 << FP_SHIFT) + fve->fve_p;
+
+ fve->fve_lastdrop = now;
+}
+
+#endif /* ALTQ_FLOWVALVE */
+
+#ifdef KLD_MODULE
+
+static struct altqsw red_sw =
+ {"red", redopen, redclose, redioctl};
+
+ALTQ_MODULE(altq_red, ALTQT_RED, &red_sw);
+MODULE_VERSION(altq_red, 1);
+
+#endif /* KLD_MODULE */
+#endif /* ALTQ3_COMPAT */
+
+#endif /* ALTQ_RED */
diff --git a/contrib/altq/rtems/freebsd/altq/altq_red.h b/contrib/altq/rtems/freebsd/altq/altq_red.h
new file mode 100644
index 00000000..0876464c
--- /dev/null
+++ b/contrib/altq/rtems/freebsd/altq/altq_red.h
@@ -0,0 +1,198 @@
+/* $KAME: altq_red.h,v 1.8 2003/07/10 12:07:49 kjc Exp $ */
+
+/*
+ * Copyright (C) 1997-2003
+ * Sony Computer Science Laboratories Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY SONY CSL AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL SONY CSL OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifndef _ALTQ_ALTQ_RED_HH_
+#define _ALTQ_ALTQ_RED_HH_
+
+#include <rtems/freebsd/altq/altq_classq.h>
+
+#ifdef ALTQ3_COMPAT
+struct red_interface {
+ char red_ifname[IFNAMSIZ];
+};
+
+struct red_stats {
+ struct red_interface iface;
+ int q_len;
+ int q_avg;
+
+ struct pktcntr xmit_cnt;
+ struct pktcntr drop_cnt;
+ u_int drop_forced;
+ u_int drop_unforced;
+ u_int marked_packets;
+
+ /* static red parameters */
+ int q_limit;
+ int weight;
+ int inv_pmax;
+ int th_min;
+ int th_max;
+
+ /* flowvalve related stuff */
+ u_int fv_flows;
+ u_int fv_pass;
+ u_int fv_predrop;
+ u_int fv_alloc;
+ u_int fv_escape;
+};
+
+struct red_conf {
+ struct red_interface iface;
+ int red_weight; /* weight for EWMA */
+ int red_inv_pmax; /* inverse of max drop probability */
+ int red_thmin; /* red min threshold */
+ int red_thmax; /* red max threshold */
+ int red_limit; /* max queue length */
+ int red_pkttime; /* average packet time in usec */
+ int red_flags; /* see below */
+};
+#endif /* ALTQ3_COMPAT */
+
+/* red flags */
+#define REDF_ECN4 0x01 /* use packet marking for IPv4 packets */
+#define REDF_ECN6 0x02 /* use packet marking for IPv6 packets */
+#define REDF_ECN (REDF_ECN4 | REDF_ECN6)
+#define REDF_FLOWVALVE 0x04 /* use flowvalve (aka penalty-box) */
+
+/*
+ * simpler versions of red parameters and statistics used by other
+ * disciplines (e.g., CBQ)
+ */
+struct redparams {
+ int th_min; /* red min threshold */
+ int th_max; /* red max threshold */
+ int inv_pmax; /* inverse of max drop probability */
+};
+
+struct redstats {
+ int q_avg;
+ struct pktcntr xmit_cnt;
+ struct pktcntr drop_cnt;
+ u_int drop_forced;
+ u_int drop_unforced;
+ u_int marked_packets;
+};
+
+#ifdef ALTQ3_COMPAT
+/*
+ * IOCTLs for RED
+ */
+#define RED_IF_ATTACH _IOW('Q', 1, struct red_interface)
+#define RED_IF_DETACH _IOW('Q', 2, struct red_interface)
+#define RED_ENABLE _IOW('Q', 3, struct red_interface)
+#define RED_DISABLE _IOW('Q', 4, struct red_interface)
+#define RED_CONFIG _IOWR('Q', 6, struct red_conf)
+#define RED_GETSTATS _IOWR('Q', 12, struct red_stats)
+#define RED_SETDEFAULTS _IOW('Q', 30, struct redparams)
+#endif /* ALTQ3_COMPAT */
+
+#ifdef _KERNEL
+
+#ifdef ALTQ3_COMPAT
+struct flowvalve;
+#endif
+
+/* weight table structure for idle time calibration */
+struct wtab {
+ struct wtab *w_next;
+ int w_weight;
+ int w_param_max;
+ int w_refcount;
+ int32_t w_tab[32];
+};
+
+typedef struct red {
+ int red_pkttime; /* average packet time in micro sec
+ used for idle calibration */
+ int red_flags; /* red flags */
+
+ /* red parameters */
+ int red_weight; /* weight for EWMA */
+ int red_inv_pmax; /* inverse of max drop probability */
+ int red_thmin; /* red min threshold */
+ int red_thmax; /* red max threshold */
+
+ /* variables for internal use */
+ int red_wshift; /* log(red_weight) */
+ int red_thmin_s; /* th_min scaled by avgshift */
+ int red_thmax_s; /* th_max scaled by avgshift */
+ int red_probd; /* drop probability denominator */
+
+ int red_avg; /* queue len avg scaled by avgshift */
+ int red_count; /* packet count since last dropped/
+ marked packet */
+ int red_idle; /* queue was empty */
+ int red_old; /* avg is above th_min */
+ struct wtab *red_wtab; /* weight table */
+ struct timeval red_last; /* time when the queue becomes idle */
+
+#ifdef ALTQ3_COMPAT
+ struct flowvalve *red_flowvalve; /* flowvalve state */
+#endif
+
+ struct {
+ struct pktcntr xmit_cnt;
+ struct pktcntr drop_cnt;
+ u_int drop_forced;
+ u_int drop_unforced;
+ u_int marked_packets;
+ } red_stats;
+} red_t;
+
+#ifdef ALTQ3_COMPAT
+typedef struct red_queue {
+ struct red_queue *rq_next; /* next red_state in the list */
+ struct ifaltq *rq_ifq; /* backpointer to ifaltq */
+
+ class_queue_t *rq_q;
+
+ red_t *rq_red;
+} red_queue_t;
+#endif /* ALTQ3_COMPAT */
+
+/* red drop types */
+#define DTYPE_NODROP 0 /* no drop */
+#define DTYPE_FORCED 1 /* a "forced" drop */
+#define DTYPE_EARLY 2 /* an "unforced" (early) drop */
+
+extern red_t *red_alloc(int, int, int, int, int, int);
+extern void red_destroy(red_t *);
+extern void red_getstats(red_t *, struct redstats *);
+extern int red_addq(red_t *, class_queue_t *, struct mbuf *,
+ struct altq_pktattr *);
+extern struct mbuf *red_getq(red_t *, class_queue_t *);
+extern int drop_early(int, int, int);
+extern int mark_ecn(struct mbuf *, struct altq_pktattr *, int);
+extern struct wtab *wtab_alloc(int);
+extern int wtab_destroy(struct wtab *);
+extern int32_t pow_w(struct wtab *, int);
+
+#endif /* _KERNEL */
+
+#endif /* _ALTQ_ALTQ_RED_HH_ */
diff --git a/contrib/altq/rtems/freebsd/altq/altq_rio.c b/contrib/altq/rtems/freebsd/altq/altq_rio.c
new file mode 100644
index 00000000..5055a2e0
--- /dev/null
+++ b/contrib/altq/rtems/freebsd/altq/altq_rio.c
@@ -0,0 +1,855 @@
+#include <rtems/freebsd/machine/rtems-bsd-config.h>
+
+/* $FreeBSD$ */
+/* $KAME: altq_rio.c,v 1.17 2003/07/10 12:07:49 kjc Exp $ */
+
+/*
+ * Copyright (C) 1998-2003
+ * Sony Computer Science Laboratories Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY SONY CSL AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL SONY CSL OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+/*
+ * Copyright (c) 1990-1994 Regents of the University of California.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the Computer Systems
+ * Engineering Group at Lawrence Berkeley Laboratory.
+ * 4. Neither the name of the University nor of the Laboratory may be used
+ * to endorse or promote products derived from this software without
+ * specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#if defined(__FreeBSD__) || defined(__NetBSD__)
+#include <rtems/freebsd/local/opt_altq.h>
+#if (__FreeBSD__ != 2)
+#include <rtems/freebsd/local/opt_inet.h>
+#ifdef __FreeBSD__
+#include <rtems/freebsd/local/opt_inet6.h>
+#endif
+#endif
+#endif /* __FreeBSD__ || __NetBSD__ */
+#ifdef ALTQ_RIO /* rio is enabled by ALTQ_RIO option in opt_altq.h */
+
+#include <rtems/freebsd/sys/param.h>
+#include <rtems/freebsd/sys/malloc.h>
+#include <rtems/freebsd/sys/mbuf.h>
+#include <rtems/freebsd/sys/socket.h>
+#include <rtems/freebsd/sys/systm.h>
+#include <rtems/freebsd/sys/errno.h>
+#if 1 /* ALTQ3_COMPAT */
+#include <rtems/freebsd/sys/proc.h>
+#include <rtems/freebsd/sys/sockio.h>
+#include <rtems/freebsd/sys/kernel.h>
+#endif
+
+#include <rtems/freebsd/net/if.h>
+
+#include <rtems/freebsd/netinet/in.h>
+#include <rtems/freebsd/netinet/in_systm.h>
+#include <rtems/freebsd/netinet/ip.h>
+#ifdef INET6
+#include <rtems/freebsd/netinet/ip6.h>
+#endif
+
+#include <rtems/freebsd/net/pfvar.h>
+#include <rtems/freebsd/altq/altq.h>
+#include <rtems/freebsd/altq/altq_cdnr.h>
+#include <rtems/freebsd/altq/altq_red.h>
+#include <rtems/freebsd/altq/altq_rio.h>
+#ifdef ALTQ3_COMPAT
+#include <rtems/freebsd/altq/altq_conf.h>
+#endif
+
+/*
+ * RIO: RED with IN/OUT bit
+ * described in
+ * "Explicit Allocation of Best Effort Packet Delivery Service"
+ * David D. Clark and Wenjia Fang, MIT Lab for Computer Science
+ * http://diffserv.lcs.mit.edu/Papers/exp-alloc-ddc-wf.{ps,pdf}
+ *
+ * this implementation is extended to support more than 2 drop precedence
+ * values as described in RFC2597 (Assured Forwarding PHB Group).
+ *
+ */
+/*
+ * AF DS (differentiated service) codepoints.
+ * (classes can be mapped to CBQ or H-FSC classes.)
+ *
+ * 0 1 2 3 4 5 6 7
+ * +---+---+---+---+---+---+---+---+
+ * | CLASS |DropPre| 0 | CU |
+ * +---+---+---+---+---+---+---+---+
+ *
+ * class 1: 001
+ * class 2: 010
+ * class 3: 011
+ * class 4: 100
+ *
+ * low drop prec: 01
+ * medium drop prec: 10
+ * high drop prec: 01
+ */
+
+/* normal red parameters */
+#define W_WEIGHT 512 /* inverse of weight of EWMA (511/512) */
+ /* q_weight = 0.00195 */
+
+/* red parameters for a slow link */
+#define W_WEIGHT_1 128 /* inverse of weight of EWMA (127/128) */
+ /* q_weight = 0.0078125 */
+
+/* red parameters for a very slow link (e.g., dialup) */
+#define W_WEIGHT_2 64 /* inverse of weight of EWMA (63/64) */
+ /* q_weight = 0.015625 */
+
+/* fixed-point uses 12-bit decimal places */
+#define FP_SHIFT 12 /* fixed-point shift */
+
+/* red parameters for drop probability */
+#define INV_P_MAX 10 /* inverse of max drop probability */
+#define TH_MIN 5 /* min threshold */
+#define TH_MAX 15 /* max threshold */
+
+#define RIO_LIMIT 60 /* default max queue lenght */
+#define RIO_STATS /* collect statistics */
+
+#define TV_DELTA(a, b, delta) { \
+ register int xxs; \
+ \
+ delta = (a)->tv_usec - (b)->tv_usec; \
+ if ((xxs = (a)->tv_sec - (b)->tv_sec) != 0) { \
+ if (xxs < 0) { \
+ delta = 60000000; \
+ } else if (xxs > 4) { \
+ if (xxs > 60) \
+ delta = 60000000; \
+ else \
+ delta += xxs * 1000000; \
+ } else while (xxs > 0) { \
+ delta += 1000000; \
+ xxs--; \
+ } \
+ } \
+}
+
+#ifdef ALTQ3_COMPAT
+/* rio_list keeps all rio_queue_t's allocated. */
+static rio_queue_t *rio_list = NULL;
+#endif
+/* default rio parameter values */
+static struct redparams default_rio_params[RIO_NDROPPREC] = {
+ /* th_min, th_max, inv_pmax */
+ { TH_MAX * 2 + TH_MIN, TH_MAX * 3, INV_P_MAX }, /* low drop precedence */
+ { TH_MAX + TH_MIN, TH_MAX * 2, INV_P_MAX }, /* medium drop precedence */
+ { TH_MIN, TH_MAX, INV_P_MAX } /* high drop precedence */
+};
+
+/* internal function prototypes */
+static int dscp2index(u_int8_t);
+#ifdef ALTQ3_COMPAT
+static int rio_enqueue(struct ifaltq *, struct mbuf *, struct altq_pktattr *);
+static struct mbuf *rio_dequeue(struct ifaltq *, int);
+static int rio_request(struct ifaltq *, int, void *);
+static int rio_detach(rio_queue_t *);
+
+/*
+ * rio device interface
+ */
+altqdev_decl(rio);
+
+#endif /* ALTQ3_COMPAT */
+
+rio_t *
+rio_alloc(int weight, struct redparams *params, int flags, int pkttime)
+{
+ rio_t *rp;
+ int w, i;
+ int npkts_per_sec;
+
+ rp = malloc(sizeof(rio_t), M_DEVBUF, M_WAITOK);
+ if (rp == NULL)
+ return (NULL);
+ bzero(rp, sizeof(rio_t));
+
+ rp->rio_flags = flags;
+ if (pkttime == 0)
+ /* default packet time: 1000 bytes / 10Mbps * 8 * 1000000 */
+ rp->rio_pkttime = 800;
+ else
+ rp->rio_pkttime = pkttime;
+
+ if (weight != 0)
+ rp->rio_weight = weight;
+ else {
+ /* use default */
+ rp->rio_weight = W_WEIGHT;
+
+ /* when the link is very slow, adjust red parameters */
+ npkts_per_sec = 1000000 / rp->rio_pkttime;
+ if (npkts_per_sec < 50) {
+ /* up to about 400Kbps */
+ rp->rio_weight = W_WEIGHT_2;
+ } else if (npkts_per_sec < 300) {
+ /* up to about 2.4Mbps */
+ rp->rio_weight = W_WEIGHT_1;
+ }
+ }
+
+ /* calculate wshift. weight must be power of 2 */
+ w = rp->rio_weight;
+ for (i = 0; w > 1; i++)
+ w = w >> 1;
+ rp->rio_wshift = i;
+ w = 1 << rp->rio_wshift;
+ if (w != rp->rio_weight) {
+ printf("invalid weight value %d for red! use %d\n",
+ rp->rio_weight, w);
+ rp->rio_weight = w;
+ }
+
+ /* allocate weight table */
+ rp->rio_wtab = wtab_alloc(rp->rio_weight);
+
+ for (i = 0; i < RIO_NDROPPREC; i++) {
+ struct dropprec_state *prec = &rp->rio_precstate[i];
+
+ prec->avg = 0;
+ prec->idle = 1;
+
+ if (params == NULL || params[i].inv_pmax == 0)
+ prec->inv_pmax = default_rio_params[i].inv_pmax;
+ else
+ prec->inv_pmax = params[i].inv_pmax;
+ if (params == NULL || params[i].th_min == 0)
+ prec->th_min = default_rio_params[i].th_min;
+ else
+ prec->th_min = params[i].th_min;
+ if (params == NULL || params[i].th_max == 0)
+ prec->th_max = default_rio_params[i].th_max;
+ else
+ prec->th_max = params[i].th_max;
+
+ /*
+ * th_min_s and th_max_s are scaled versions of th_min
+ * and th_max to be compared with avg.
+ */
+ prec->th_min_s = prec->th_min << (rp->rio_wshift + FP_SHIFT);
+ prec->th_max_s = prec->th_max << (rp->rio_wshift + FP_SHIFT);
+
+ /*
+ * precompute probability denominator
+ * probd = (2 * (TH_MAX-TH_MIN) / pmax) in fixed-point
+ */
+ prec->probd = (2 * (prec->th_max - prec->th_min)
+ * prec->inv_pmax) << FP_SHIFT;
+
+ microtime(&prec->last);
+ }
+
+ return (rp);
+}
+
+void
+rio_destroy(rio_t *rp)
+{
+ wtab_destroy(rp->rio_wtab);
+ free(rp, M_DEVBUF);
+}
+
+void
+rio_getstats(rio_t *rp, struct redstats *sp)
+{
+ int i;
+
+ for (i = 0; i < RIO_NDROPPREC; i++) {
+ bcopy(&rp->q_stats[i], sp, sizeof(struct redstats));
+ sp->q_avg = rp->rio_precstate[i].avg >> rp->rio_wshift;
+ sp++;
+ }
+}
+
+#if (RIO_NDROPPREC == 3)
+/*
+ * internally, a drop precedence value is converted to an index
+ * starting from 0.
+ */
+static int
+dscp2index(u_int8_t dscp)
+{
+ int dpindex = dscp & AF_DROPPRECMASK;
+
+ if (dpindex == 0)
+ return (0);
+ return ((dpindex >> 3) - 1);
+}
+#endif
+
+#if 1
+/*
+ * kludge: when a packet is dequeued, we need to know its drop precedence
+ * in order to keep the queue length of each drop precedence.
+ * use m_pkthdr.rcvif to pass this info.
+ */
+#define RIOM_SET_PRECINDEX(m, idx) \
+ do { (m)->m_pkthdr.rcvif = (void *)((long)(idx)); } while (0)
+#define RIOM_GET_PRECINDEX(m) \
+ ({ long idx; idx = (long)((m)->m_pkthdr.rcvif); \
+ (m)->m_pkthdr.rcvif = NULL; idx; })
+#endif
+
+int
+rio_addq(rio_t *rp, class_queue_t *q, struct mbuf *m,
+ struct altq_pktattr *pktattr)
+{
+ int avg, droptype;
+ u_int8_t dsfield, odsfield;
+ int dpindex, i, n, t;
+ struct timeval now;
+ struct dropprec_state *prec;
+
+ dsfield = odsfield = read_dsfield(m, pktattr);
+ dpindex = dscp2index(dsfield);
+
+ /*
+ * update avg of the precedence states whose drop precedence
+ * is larger than or equal to the drop precedence of the packet
+ */
+ now.tv_sec = 0;
+ for (i = dpindex; i < RIO_NDROPPREC; i++) {
+ prec = &rp->rio_precstate[i];
+ avg = prec->avg;
+ if (prec->idle) {
+ prec->idle = 0;
+ if (now.tv_sec == 0)
+ microtime(&now);
+ t = (now.tv_sec - prec->last.tv_sec);
+ if (t > 60)
+ avg = 0;
+ else {
+ t = t * 1000000 +
+ (now.tv_usec - prec->last.tv_usec);
+ n = t / rp->rio_pkttime;
+ /* calculate (avg = (1 - Wq)^n * avg) */
+ if (n > 0)
+ avg = (avg >> FP_SHIFT) *
+ pow_w(rp->rio_wtab, n);
+ }
+ }
+
+ /* run estimator. (avg is scaled by WEIGHT in fixed-point) */
+ avg += (prec->qlen << FP_SHIFT) - (avg >> rp->rio_wshift);
+ prec->avg = avg; /* save the new value */
+ /*
+ * count keeps a tally of arriving traffic that has not
+ * been dropped.
+ */
+ prec->count++;
+ }
+
+ prec = &rp->rio_precstate[dpindex];
+ avg = prec->avg;
+
+ /* see if we drop early */
+ droptype = DTYPE_NODROP;
+ if (avg >= prec->th_min_s && prec->qlen > 1) {
+ if (avg >= prec->th_max_s) {
+ /* avg >= th_max: forced drop */
+ droptype = DTYPE_FORCED;
+ } else if (prec->old == 0) {
+ /* first exceeds th_min */
+ prec->count = 1;
+ prec->old = 1;
+ } else if (drop_early((avg - prec->th_min_s) >> rp->rio_wshift,
+ prec->probd, prec->count)) {
+ /* unforced drop by red */
+ droptype = DTYPE_EARLY;
+ }
+ } else {
+ /* avg < th_min */
+ prec->old = 0;
+ }
+
+ /*
+ * if the queue length hits the hard limit, it's a forced drop.
+ */
+ if (droptype == DTYPE_NODROP && qlen(q) >= qlimit(q))
+ droptype = DTYPE_FORCED;
+
+ if (droptype != DTYPE_NODROP) {
+ /* always drop incoming packet (as opposed to randomdrop) */
+ for (i = dpindex; i < RIO_NDROPPREC; i++)
+ rp->rio_precstate[i].count = 0;
+#ifdef RIO_STATS
+ if (droptype == DTYPE_EARLY)
+ rp->q_stats[dpindex].drop_unforced++;
+ else
+ rp->q_stats[dpindex].drop_forced++;
+ PKTCNTR_ADD(&rp->q_stats[dpindex].drop_cnt, m_pktlen(m));
+#endif
+ m_freem(m);
+ return (-1);
+ }
+
+ for (i = dpindex; i < RIO_NDROPPREC; i++)
+ rp->rio_precstate[i].qlen++;
+
+ /* save drop precedence index in mbuf hdr */
+ RIOM_SET_PRECINDEX(m, dpindex);
+
+ if (rp->rio_flags & RIOF_CLEARDSCP)
+ dsfield &= ~DSCP_MASK;
+
+ if (dsfield != odsfield)
+ write_dsfield(m, pktattr, dsfield);
+
+ _addq(q, m);
+
+#ifdef RIO_STATS
+ PKTCNTR_ADD(&rp->q_stats[dpindex].xmit_cnt, m_pktlen(m));
+#endif
+ return (0);
+}
+
+struct mbuf *
+rio_getq(rio_t *rp, class_queue_t *q)
+{
+ struct mbuf *m;
+ int dpindex, i;
+
+ if ((m = _getq(q)) == NULL)
+ return NULL;
+
+ dpindex = RIOM_GET_PRECINDEX(m);
+ for (i = dpindex; i < RIO_NDROPPREC; i++) {
+ if (--rp->rio_precstate[i].qlen == 0) {
+ if (rp->rio_precstate[i].idle == 0) {
+ rp->rio_precstate[i].idle = 1;
+ microtime(&rp->rio_precstate[i].last);
+ }
+ }
+ }
+ return (m);
+}
+
+#ifdef ALTQ3_COMPAT
+int
+rioopen(dev, flag, fmt, p)
+ dev_t dev;
+ int flag, fmt;
+#if (__FreeBSD_version > 500000)
+ struct thread *p;
+#else
+ struct proc *p;
+#endif
+{
+ /* everything will be done when the queueing scheme is attached. */
+ return 0;
+}
+
+int
+rioclose(dev, flag, fmt, p)
+ dev_t dev;
+ int flag, fmt;
+#if (__FreeBSD_version > 500000)
+ struct thread *p;
+#else
+ struct proc *p;
+#endif
+{
+ rio_queue_t *rqp;
+ int err, error = 0;
+
+ while ((rqp = rio_list) != NULL) {
+ /* destroy all */
+ err = rio_detach(rqp);
+ if (err != 0 && error == 0)
+ error = err;
+ }
+
+ return error;
+}
+
+int
+rioioctl(dev, cmd, addr, flag, p)
+ dev_t dev;
+ ioctlcmd_t cmd;
+ caddr_t addr;
+ int flag;
+#if (__FreeBSD_version > 500000)
+ struct thread *p;
+#else
+ struct proc *p;
+#endif
+{
+ rio_queue_t *rqp;
+ struct rio_interface *ifacep;
+ struct ifnet *ifp;
+ int error = 0;
+
+ /* check super-user privilege */
+ switch (cmd) {
+ case RIO_GETSTATS:
+ break;
+ default:
+#if (__FreeBSD_version > 700000)
+ if ((error = priv_check(p, PRIV_ALTQ_MANAGE)) != 0)
+ return (error);
+#elsif (__FreeBSD_version > 400000)
+ if ((error = suser(p)) != 0)
+ return (error);
+#else
+ if ((error = suser(p->p_ucred, &p->p_acflag)) != 0)
+ return (error);
+#endif
+ break;
+ }
+
+ switch (cmd) {
+
+ case RIO_ENABLE:
+ ifacep = (struct rio_interface *)addr;
+ if ((rqp = altq_lookup(ifacep->rio_ifname, ALTQT_RIO)) == NULL) {
+ error = EBADF;
+ break;
+ }
+ error = altq_enable(rqp->rq_ifq);
+ break;
+
+ case RIO_DISABLE:
+ ifacep = (struct rio_interface *)addr;
+ if ((rqp = altq_lookup(ifacep->rio_ifname, ALTQT_RIO)) == NULL) {
+ error = EBADF;
+ break;
+ }
+ error = altq_disable(rqp->rq_ifq);
+ break;
+
+ case RIO_IF_ATTACH:
+ ifp = ifunit(((struct rio_interface *)addr)->rio_ifname);
+ if (ifp == NULL) {
+ error = ENXIO;
+ break;
+ }
+
+ /* allocate and initialize rio_queue_t */
+ rqp = malloc(sizeof(rio_queue_t), M_DEVBUF, M_WAITOK);
+ if (rqp == NULL) {
+ error = ENOMEM;
+ break;
+ }
+ bzero(rqp, sizeof(rio_queue_t));
+
+ rqp->rq_q = malloc(sizeof(class_queue_t),
+ M_DEVBUF, M_WAITOK);
+ if (rqp->rq_q == NULL) {
+ free(rqp, M_DEVBUF);
+ error = ENOMEM;
+ break;
+ }
+ bzero(rqp->rq_q, sizeof(class_queue_t));
+
+ rqp->rq_rio = rio_alloc(0, NULL, 0, 0);
+ if (rqp->rq_rio == NULL) {
+ free(rqp->rq_q, M_DEVBUF);
+ free(rqp, M_DEVBUF);
+ error = ENOMEM;
+ break;
+ }
+
+ rqp->rq_ifq = &ifp->if_snd;
+ qtail(rqp->rq_q) = NULL;
+ qlen(rqp->rq_q) = 0;
+ qlimit(rqp->rq_q) = RIO_LIMIT;
+ qtype(rqp->rq_q) = Q_RIO;
+
+ /*
+ * set RIO to this ifnet structure.
+ */
+ error = altq_attach(rqp->rq_ifq, ALTQT_RIO, rqp,
+ rio_enqueue, rio_dequeue, rio_request,
+ NULL, NULL);
+ if (error) {
+ rio_destroy(rqp->rq_rio);
+ free(rqp->rq_q, M_DEVBUF);
+ free(rqp, M_DEVBUF);
+ break;
+ }
+
+ /* add this state to the rio list */
+ rqp->rq_next = rio_list;
+ rio_list = rqp;
+ break;
+
+ case RIO_IF_DETACH:
+ ifacep = (struct rio_interface *)addr;
+ if ((rqp = altq_lookup(ifacep->rio_ifname, ALTQT_RIO)) == NULL) {
+ error = EBADF;
+ break;
+ }
+ error = rio_detach(rqp);
+ break;
+
+ case RIO_GETSTATS:
+ do {
+ struct rio_stats *q_stats;
+ rio_t *rp;
+ int i;
+
+ q_stats = (struct rio_stats *)addr;
+ if ((rqp = altq_lookup(q_stats->iface.rio_ifname,
+ ALTQT_RIO)) == NULL) {
+ error = EBADF;
+ break;
+ }
+
+ rp = rqp->rq_rio;
+
+ q_stats->q_limit = qlimit(rqp->rq_q);
+ q_stats->weight = rp->rio_weight;
+ q_stats->flags = rp->rio_flags;
+
+ for (i = 0; i < RIO_NDROPPREC; i++) {
+ q_stats->q_len[i] = rp->rio_precstate[i].qlen;
+ bcopy(&rp->q_stats[i], &q_stats->q_stats[i],
+ sizeof(struct redstats));
+ q_stats->q_stats[i].q_avg =
+ rp->rio_precstate[i].avg >> rp->rio_wshift;
+
+ q_stats->q_params[i].inv_pmax
+ = rp->rio_precstate[i].inv_pmax;
+ q_stats->q_params[i].th_min
+ = rp->rio_precstate[i].th_min;
+ q_stats->q_params[i].th_max
+ = rp->rio_precstate[i].th_max;
+ }
+ } while (/*CONSTCOND*/ 0);
+ break;
+
+ case RIO_CONFIG:
+ do {
+ struct rio_conf *fc;
+ rio_t *new;
+ int s, limit, i;
+
+ fc = (struct rio_conf *)addr;
+ if ((rqp = altq_lookup(fc->iface.rio_ifname,
+ ALTQT_RIO)) == NULL) {
+ error = EBADF;
+ break;
+ }
+
+ new = rio_alloc(fc->rio_weight, &fc->q_params[0],
+ fc->rio_flags, fc->rio_pkttime);
+ if (new == NULL) {
+ error = ENOMEM;
+ break;
+ }
+
+#ifdef __NetBSD__
+ s = splnet();
+#else
+ s = splimp();
+#endif
+ _flushq(rqp->rq_q);
+ limit = fc->rio_limit;
+ if (limit < fc->q_params[RIO_NDROPPREC-1].th_max)
+ limit = fc->q_params[RIO_NDROPPREC-1].th_max;
+ qlimit(rqp->rq_q) = limit;
+
+ rio_destroy(rqp->rq_rio);
+ rqp->rq_rio = new;
+
+ splx(s);
+
+ /* write back new values */
+ fc->rio_limit = limit;
+ for (i = 0; i < RIO_NDROPPREC; i++) {
+ fc->q_params[i].inv_pmax =
+ rqp->rq_rio->rio_precstate[i].inv_pmax;
+ fc->q_params[i].th_min =
+ rqp->rq_rio->rio_precstate[i].th_min;
+ fc->q_params[i].th_max =
+ rqp->rq_rio->rio_precstate[i].th_max;
+ }
+ } while (/*CONSTCOND*/ 0);
+ break;
+
+ case RIO_SETDEFAULTS:
+ do {
+ struct redparams *rp;
+ int i;
+
+ rp = (struct redparams *)addr;
+ for (i = 0; i < RIO_NDROPPREC; i++)
+ default_rio_params[i] = rp[i];
+ } while (/*CONSTCOND*/ 0);
+ break;
+
+ default:
+ error = EINVAL;
+ break;
+ }
+
+ return error;
+}
+
+static int
+rio_detach(rqp)
+ rio_queue_t *rqp;
+{
+ rio_queue_t *tmp;
+ int error = 0;
+
+ if (ALTQ_IS_ENABLED(rqp->rq_ifq))
+ altq_disable(rqp->rq_ifq);
+
+ if ((error = altq_detach(rqp->rq_ifq)))
+ return (error);
+
+ if (rio_list == rqp)
+ rio_list = rqp->rq_next;
+ else {
+ for (tmp = rio_list; tmp != NULL; tmp = tmp->rq_next)
+ if (tmp->rq_next == rqp) {
+ tmp->rq_next = rqp->rq_next;
+ break;
+ }
+ if (tmp == NULL)
+ printf("rio_detach: no state found in rio_list!\n");
+ }
+
+ rio_destroy(rqp->rq_rio);
+ free(rqp->rq_q, M_DEVBUF);
+ free(rqp, M_DEVBUF);
+ return (error);
+}
+
+/*
+ * rio support routines
+ */
+static int
+rio_request(ifq, req, arg)
+ struct ifaltq *ifq;
+ int req;
+ void *arg;
+{
+ rio_queue_t *rqp = (rio_queue_t *)ifq->altq_disc;
+
+ IFQ_LOCK_ASSERT(ifq);
+
+ switch (req) {
+ case ALTRQ_PURGE:
+ _flushq(rqp->rq_q);
+ if (ALTQ_IS_ENABLED(ifq))
+ ifq->ifq_len = 0;
+ break;
+ }
+ return (0);
+}
+
+/*
+ * enqueue routine:
+ *
+ * returns: 0 when successfully queued.
+ * ENOBUFS when drop occurs.
+ */
+static int
+rio_enqueue(ifq, m, pktattr)
+ struct ifaltq *ifq;
+ struct mbuf *m;
+ struct altq_pktattr *pktattr;
+{
+ rio_queue_t *rqp = (rio_queue_t *)ifq->altq_disc;
+ int error = 0;
+
+ IFQ_LOCK_ASSERT(ifq);
+
+ if (rio_addq(rqp->rq_rio, rqp->rq_q, m, pktattr) == 0)
+ ifq->ifq_len++;
+ else
+ error = ENOBUFS;
+ return error;
+}
+
+/*
+ * dequeue routine:
+ * must be called in splimp.
+ *
+ * returns: mbuf dequeued.
+ * NULL when no packet is available in the queue.
+ */
+
+static struct mbuf *
+rio_dequeue(ifq, op)
+ struct ifaltq *ifq;
+ int op;
+{
+ rio_queue_t *rqp = (rio_queue_t *)ifq->altq_disc;
+ struct mbuf *m = NULL;
+
+ IFQ_LOCK_ASSERT(ifq);
+
+ if (op == ALTDQ_POLL)
+ return qhead(rqp->rq_q);
+
+ m = rio_getq(rqp->rq_rio, rqp->rq_q);
+ if (m != NULL)
+ ifq->ifq_len--;
+ return m;
+}
+
+#ifdef KLD_MODULE
+
+static struct altqsw rio_sw =
+ {"rio", rioopen, rioclose, rioioctl};
+
+ALTQ_MODULE(altq_rio, ALTQT_RIO, &rio_sw);
+MODULE_VERSION(altq_rio, 1);
+MODULE_DEPEND(altq_rio, altq_red, 1, 1, 1);
+
+#endif /* KLD_MODULE */
+#endif /* ALTQ3_COMPAT */
+
+#endif /* ALTQ_RIO */
diff --git a/contrib/altq/rtems/freebsd/altq/altq_rio.h b/contrib/altq/rtems/freebsd/altq/altq_rio.h
new file mode 100644
index 00000000..b27951e2
--- /dev/null
+++ b/contrib/altq/rtems/freebsd/altq/altq_rio.h
@@ -0,0 +1,144 @@
+/* $KAME: altq_rio.h,v 1.9 2003/07/10 12:07:49 kjc Exp $ */
+
+/*
+ * Copyright (C) 1998-2003
+ * Sony Computer Science Laboratories Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY SONY CSL AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL SONY CSL OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifndef _ALTQ_ALTQ_RIO_HH_
+#define _ALTQ_ALTQ_RIO_HH_
+
+#include <rtems/freebsd/altq/altq_classq.h>
+
+/*
+ * RIO: RED with IN/OUT bit
+ * (extended to support more than 2 drop precedence values)
+ */
+#define RIO_NDROPPREC 3 /* number of drop precedence values */
+
+#ifdef ALTQ3_COMPAT
+struct rio_interface {
+ char rio_ifname[IFNAMSIZ];
+};
+
+struct rio_stats {
+ struct rio_interface iface;
+ int q_len[RIO_NDROPPREC];
+ struct redstats q_stats[RIO_NDROPPREC];
+
+ /* static red parameters */
+ int q_limit;
+ int weight;
+ int flags;
+ struct redparams q_params[RIO_NDROPPREC];
+};
+
+struct rio_conf {
+ struct rio_interface iface;
+ struct redparams q_params[RIO_NDROPPREC];
+ int rio_weight; /* weight for EWMA */
+ int rio_limit; /* max queue length */
+ int rio_pkttime; /* average packet time in usec */
+ int rio_flags; /* see below */
+};
+#endif /* ALTQ3_COMPAT */
+
+/* rio flags */
+#define RIOF_ECN4 0x01 /* use packet marking for IPv4 packets */
+#define RIOF_ECN6 0x02 /* use packet marking for IPv6 packets */
+#define RIOF_ECN (RIOF_ECN4 | RIOF_ECN6)
+#define RIOF_CLEARDSCP 0x200 /* clear diffserv codepoint */
+
+#ifdef ALTQ3_COMPAT
+/*
+ * IOCTLs for RIO
+ */
+#define RIO_IF_ATTACH _IOW('Q', 1, struct rio_interface)
+#define RIO_IF_DETACH _IOW('Q', 2, struct rio_interface)
+#define RIO_ENABLE _IOW('Q', 3, struct rio_interface)
+#define RIO_DISABLE _IOW('Q', 4, struct rio_interface)
+#define RIO_CONFIG _IOWR('Q', 6, struct rio_conf)
+#define RIO_GETSTATS _IOWR('Q', 12, struct rio_stats)
+#define RIO_SETDEFAULTS _IOW('Q', 30, struct redparams[RIO_NDROPPREC])
+#endif /* ALTQ3_COMPAT */
+
+#ifdef _KERNEL
+
+typedef struct rio {
+ /* per drop precedence structure */
+ struct dropprec_state {
+ /* red parameters */
+ int inv_pmax; /* inverse of max drop probability */
+ int th_min; /* red min threshold */
+ int th_max; /* red max threshold */
+
+ /* variables for internal use */
+ int th_min_s; /* th_min scaled by avgshift */
+ int th_max_s; /* th_max scaled by avgshift */
+ int probd; /* drop probability denominator */
+
+ int qlen; /* queue length */
+ int avg; /* (scaled) queue length average */
+ int count; /* packet count since the last dropped/
+ marked packet */
+ int idle; /* queue was empty */
+ int old; /* avg is above th_min */
+ struct timeval last; /* timestamp when queue becomes idle */
+ } rio_precstate[RIO_NDROPPREC];
+
+ int rio_wshift; /* log(red_weight) */
+ int rio_weight; /* weight for EWMA */
+ struct wtab *rio_wtab; /* weight table */
+
+ int rio_pkttime; /* average packet time in micro sec
+ used for idle calibration */
+ int rio_flags; /* rio flags */
+
+ u_int8_t rio_codepoint; /* codepoint value to tag packets */
+ u_int8_t rio_codepointmask; /* codepoint mask bits */
+
+ struct redstats q_stats[RIO_NDROPPREC]; /* statistics */
+} rio_t;
+
+#ifdef ALTQ3_COMPAT
+typedef struct rio_queue {
+ struct rio_queue *rq_next; /* next red_state in the list */
+ struct ifaltq *rq_ifq; /* backpointer to ifaltq */
+
+ class_queue_t *rq_q;
+
+ rio_t *rq_rio;
+} rio_queue_t;
+#endif /* ALTQ3_COMPAT */
+
+extern rio_t *rio_alloc(int, struct redparams *, int, int);
+extern void rio_destroy(rio_t *);
+extern void rio_getstats(rio_t *, struct redstats *);
+extern int rio_addq(rio_t *, class_queue_t *, struct mbuf *,
+ struct altq_pktattr *);
+extern struct mbuf *rio_getq(rio_t *, class_queue_t *);
+
+#endif /* _KERNEL */
+
+#endif /* _ALTQ_ALTQ_RIO_HH_ */
diff --git a/contrib/altq/rtems/freebsd/altq/altq_rmclass.c b/contrib/altq/rtems/freebsd/altq/altq_rmclass.c
new file mode 100644
index 00000000..027f2a2a
--- /dev/null
+++ b/contrib/altq/rtems/freebsd/altq/altq_rmclass.c
@@ -0,0 +1,1843 @@
+#include <rtems/freebsd/machine/rtems-bsd-config.h>
+
+/* $FreeBSD$ */
+/* $KAME: altq_rmclass.c,v 1.18 2003/11/06 06:32:53 kjc Exp $ */
+
+/*
+ * Copyright (c) 1991-1997 Regents of the University of California.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the Network Research
+ * Group at Lawrence Berkeley Laboratory.
+ * 4. Neither the name of the University nor of the Laboratory may be used
+ * to endorse or promote products derived from this software without
+ * specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * LBL code modified by speer@eng.sun.com, May 1977.
+ * For questions and/or comments, please send mail to cbq@ee.lbl.gov
+ */
+
+#ident "@(#)rm_class.c 1.48 97/12/05 SMI"
+
+#if defined(__FreeBSD__) || defined(__NetBSD__)
+#include <rtems/freebsd/local/opt_altq.h>
+#if (__FreeBSD__ != 2)
+#include <rtems/freebsd/local/opt_inet.h>
+#ifdef __FreeBSD__
+#include <rtems/freebsd/local/opt_inet6.h>
+#endif
+#endif
+#endif /* __FreeBSD__ || __NetBSD__ */
+#ifdef ALTQ_CBQ /* cbq is enabled by ALTQ_CBQ option in opt_altq.h */
+
+#include <rtems/freebsd/sys/param.h>
+#include <rtems/freebsd/sys/malloc.h>
+#include <rtems/freebsd/sys/mbuf.h>
+#include <rtems/freebsd/sys/socket.h>
+#include <rtems/freebsd/sys/systm.h>
+#include <rtems/freebsd/sys/errno.h>
+#include <rtems/freebsd/sys/time.h>
+#ifdef ALTQ3_COMPAT
+#include <rtems/freebsd/sys/kernel.h>
+#endif
+
+#include <rtems/freebsd/net/if.h>
+#ifdef ALTQ3_COMPAT
+#include <rtems/freebsd/netinet/in.h>
+#include <rtems/freebsd/netinet/in_systm.h>
+#include <rtems/freebsd/netinet/ip.h>
+#endif
+
+#include <rtems/freebsd/altq/altq.h>
+#include <rtems/freebsd/altq/altq_rmclass.h>
+#include <rtems/freebsd/altq/altq_rmclass_debug.h>
+#include <rtems/freebsd/altq/altq_red.h>
+#include <rtems/freebsd/altq/altq_rio.h>
+
+/*
+ * Local Macros
+ */
+
+#define reset_cutoff(ifd) { ifd->cutoff_ = RM_MAXDEPTH; }
+
+/*
+ * Local routines.
+ */
+
+static int rmc_satisfied(struct rm_class *, struct timeval *);
+static void rmc_wrr_set_weights(struct rm_ifdat *);
+static void rmc_depth_compute(struct rm_class *);
+static void rmc_depth_recompute(rm_class_t *);
+
+static mbuf_t *_rmc_wrr_dequeue_next(struct rm_ifdat *, int);
+static mbuf_t *_rmc_prr_dequeue_next(struct rm_ifdat *, int);
+
+static int _rmc_addq(rm_class_t *, mbuf_t *);
+static void _rmc_dropq(rm_class_t *);
+static mbuf_t *_rmc_getq(rm_class_t *);
+static mbuf_t *_rmc_pollq(rm_class_t *);
+
+static int rmc_under_limit(struct rm_class *, struct timeval *);
+static void rmc_tl_satisfied(struct rm_ifdat *, struct timeval *);
+static void rmc_drop_action(struct rm_class *);
+static void rmc_restart(struct rm_class *);
+static void rmc_root_overlimit(struct rm_class *, struct rm_class *);
+
+#define BORROW_OFFTIME
+/*
+ * BORROW_OFFTIME (experimental):
+ * borrow the offtime of the class borrowing from.
+ * the reason is that when its own offtime is set, the class is unable
+ * to borrow much, especially when cutoff is taking effect.
+ * but when the borrowed class is overloaded (advidle is close to minidle),
+ * use the borrowing class's offtime to avoid overload.
+ */
+#define ADJUST_CUTOFF
+/*
+ * ADJUST_CUTOFF (experimental):
+ * if no underlimit class is found due to cutoff, increase cutoff and
+ * retry the scheduling loop.
+ * also, don't invoke delay_actions while cutoff is taking effect,
+ * since a sleeping class won't have a chance to be scheduled in the
+ * next loop.
+ *
+ * now heuristics for setting the top-level variable (cutoff_) becomes:
+ * 1. if a packet arrives for a not-overlimit class, set cutoff
+ * to the depth of the class.
+ * 2. if cutoff is i, and a packet arrives for an overlimit class
+ * with an underlimit ancestor at a lower level than i (say j),
+ * then set cutoff to j.
+ * 3. at scheduling a packet, if there is no underlimit class
+ * due to the current cutoff level, increase cutoff by 1 and
+ * then try to schedule again.
+ */
+
+/*
+ * rm_class_t *
+ * rmc_newclass(...) - Create a new resource management class at priority
+ * 'pri' on the interface given by 'ifd'.
+ *
+ * nsecPerByte is the data rate of the interface in nanoseconds/byte.
+ * E.g., 800 for a 10Mb/s ethernet. If the class gets less
+ * than 100% of the bandwidth, this number should be the
+ * 'effective' rate for the class. Let f be the
+ * bandwidth fraction allocated to this class, and let
+ * nsPerByte be the data rate of the output link in
+ * nanoseconds/byte. Then nsecPerByte is set to
+ * nsPerByte / f. E.g., 1600 (= 800 / .5)
+ * for a class that gets 50% of an ethernet's bandwidth.
+ *
+ * action the routine to call when the class is over limit.
+ *
+ * maxq max allowable queue size for class (in packets).
+ *
+ * parent parent class pointer.
+ *
+ * borrow class to borrow from (should be either 'parent' or null).
+ *
+ * maxidle max value allowed for class 'idle' time estimate (this
+ * parameter determines how large an initial burst of packets
+ * can be before overlimit action is invoked.
+ *
+ * offtime how long 'delay' action will delay when class goes over
+ * limit (this parameter determines the steady-state burst
+ * size when a class is running over its limit).
+ *
+ * Maxidle and offtime have to be computed from the following: If the
+ * average packet size is s, the bandwidth fraction allocated to this
+ * class is f, we want to allow b packet bursts, and the gain of the
+ * averaging filter is g (= 1 - 2^(-RM_FILTER_GAIN)), then:
+ *
+ * ptime = s * nsPerByte * (1 - f) / f
+ * maxidle = ptime * (1 - g^b) / g^b
+ * minidle = -ptime * (1 / (f - 1))
+ * offtime = ptime * (1 + 1/(1 - g) * (1 - g^(b - 1)) / g^(b - 1)
+ *
+ * Operationally, it's convenient to specify maxidle & offtime in units
+ * independent of the link bandwidth so the maxidle & offtime passed to
+ * this routine are the above values multiplied by 8*f/(1000*nsPerByte).
+ * (The constant factor is a scale factor needed to make the parameters
+ * integers. This scaling also means that the 'unscaled' values of
+ * maxidle*nsecPerByte/8 and offtime*nsecPerByte/8 will be in microseconds,
+ * not nanoseconds.) Also note that the 'idle' filter computation keeps
+ * an estimate scaled upward by 2^RM_FILTER_GAIN so the passed value of
+ * maxidle also must be scaled upward by this value. Thus, the passed
+ * values for maxidle and offtime can be computed as follows:
+ *
+ * maxidle = maxidle * 2^RM_FILTER_GAIN * 8 / (1000 * nsecPerByte)
+ * offtime = offtime * 8 / (1000 * nsecPerByte)
+ *
+ * When USE_HRTIME is employed, then maxidle and offtime become:
+ * maxidle = maxilde * (8.0 / nsecPerByte);
+ * offtime = offtime * (8.0 / nsecPerByte);
+ */
+struct rm_class *
+rmc_newclass(int pri, struct rm_ifdat *ifd, u_int nsecPerByte,
+ void (*action)(rm_class_t *, rm_class_t *), int maxq,
+ struct rm_class *parent, struct rm_class *borrow, u_int maxidle,
+ int minidle, u_int offtime, int pktsize, int flags)
+{
+ struct rm_class *cl;
+ struct rm_class *peer;
+ int s;
+
+ if (pri >= RM_MAXPRIO)
+ return (NULL);
+#ifndef ALTQ_RED
+ if (flags & RMCF_RED) {
+#ifdef ALTQ_DEBUG
+ printf("rmc_newclass: RED not configured for CBQ!\n");
+#endif
+ return (NULL);
+ }
+#endif
+#ifndef ALTQ_RIO
+ if (flags & RMCF_RIO) {
+#ifdef ALTQ_DEBUG
+ printf("rmc_newclass: RIO not configured for CBQ!\n");
+#endif
+ return (NULL);
+ }
+#endif
+
+ cl = malloc(sizeof(struct rm_class),
+ M_DEVBUF, M_WAITOK);
+ if (cl == NULL)
+ return (NULL);
+ bzero(cl, sizeof(struct rm_class));
+ CALLOUT_INIT(&cl->callout_);
+ cl->q_ = malloc(sizeof(class_queue_t),
+ M_DEVBUF, M_WAITOK);
+ if (cl->q_ == NULL) {
+ free(cl, M_DEVBUF);
+ return (NULL);
+ }
+ bzero(cl->q_, sizeof(class_queue_t));
+
+ /*
+ * Class initialization.
+ */
+ cl->children_ = NULL;
+ cl->parent_ = parent;
+ cl->borrow_ = borrow;
+ cl->leaf_ = 1;
+ cl->ifdat_ = ifd;
+ cl->pri_ = pri;
+ cl->allotment_ = RM_NS_PER_SEC / nsecPerByte; /* Bytes per sec */
+ cl->depth_ = 0;
+ cl->qthresh_ = 0;
+ cl->ns_per_byte_ = nsecPerByte;
+
+ qlimit(cl->q_) = maxq;
+ qtype(cl->q_) = Q_DROPHEAD;
+ qlen(cl->q_) = 0;
+ cl->flags_ = flags;
+
+#if 1 /* minidle is also scaled in ALTQ */
+ cl->minidle_ = (minidle * (int)nsecPerByte) / 8;
+ if (cl->minidle_ > 0)
+ cl->minidle_ = 0;
+#else
+ cl->minidle_ = minidle;
+#endif
+ cl->maxidle_ = (maxidle * nsecPerByte) / 8;
+ if (cl->maxidle_ == 0)
+ cl->maxidle_ = 1;
+#if 1 /* offtime is also scaled in ALTQ */
+ cl->avgidle_ = cl->maxidle_;
+ cl->offtime_ = ((offtime * nsecPerByte) / 8) >> RM_FILTER_GAIN;
+ if (cl->offtime_ == 0)
+ cl->offtime_ = 1;
+#else
+ cl->avgidle_ = 0;
+ cl->offtime_ = (offtime * nsecPerByte) / 8;
+#endif
+ cl->overlimit = action;
+
+#ifdef ALTQ_RED
+ if (flags & (RMCF_RED|RMCF_RIO)) {
+ int red_flags, red_pkttime;
+
+ red_flags = 0;
+ if (flags & RMCF_ECN)
+ red_flags |= REDF_ECN;
+ if (flags & RMCF_FLOWVALVE)
+ red_flags |= REDF_FLOWVALVE;
+#ifdef ALTQ_RIO
+ if (flags & RMCF_CLEARDSCP)
+ red_flags |= RIOF_CLEARDSCP;
+#endif
+ red_pkttime = nsecPerByte * pktsize / 1000;
+
+ if (flags & RMCF_RED) {
+ cl->red_ = red_alloc(0, 0,
+ qlimit(cl->q_) * 10/100,
+ qlimit(cl->q_) * 30/100,
+ red_flags, red_pkttime);
+ if (cl->red_ != NULL)
+ qtype(cl->q_) = Q_RED;
+ }
+#ifdef ALTQ_RIO
+ else {
+ cl->red_ = (red_t *)rio_alloc(0, NULL,
+ red_flags, red_pkttime);
+ if (cl->red_ != NULL)
+ qtype(cl->q_) = Q_RIO;
+ }
+#endif
+ }
+#endif /* ALTQ_RED */
+
+ /*
+ * put the class into the class tree
+ */
+#ifdef __NetBSD__
+ s = splnet();
+#else
+ s = splimp();
+#endif
+ IFQ_LOCK(ifd->ifq_);
+ if ((peer = ifd->active_[pri]) != NULL) {
+ /* find the last class at this pri */
+ cl->peer_ = peer;
+ while (peer->peer_ != ifd->active_[pri])
+ peer = peer->peer_;
+ peer->peer_ = cl;
+ } else {
+ ifd->active_[pri] = cl;
+ cl->peer_ = cl;
+ }
+
+ if (cl->parent_) {
+ cl->next_ = parent->children_;
+ parent->children_ = cl;
+ parent->leaf_ = 0;
+ }
+
+ /*
+ * Compute the depth of this class and its ancestors in the class
+ * hierarchy.
+ */
+ rmc_depth_compute(cl);
+
+ /*
+ * If CBQ's WRR is enabled, then initialize the class WRR state.
+ */
+ if (ifd->wrr_) {
+ ifd->num_[pri]++;
+ ifd->alloc_[pri] += cl->allotment_;
+ rmc_wrr_set_weights(ifd);
+ }
+ IFQ_UNLOCK(ifd->ifq_);
+ splx(s);
+ return (cl);
+}
+
+int
+rmc_modclass(struct rm_class *cl, u_int nsecPerByte, int maxq, u_int maxidle,
+ int minidle, u_int offtime, int pktsize)
+{
+ struct rm_ifdat *ifd;
+ u_int old_allotment;
+ int s;
+
+ ifd = cl->ifdat_;
+ old_allotment = cl->allotment_;
+
+#ifdef __NetBSD__
+ s = splnet();
+#else
+ s = splimp();
+#endif
+ IFQ_LOCK(ifd->ifq_);
+ cl->allotment_ = RM_NS_PER_SEC / nsecPerByte; /* Bytes per sec */
+ cl->qthresh_ = 0;
+ cl->ns_per_byte_ = nsecPerByte;
+
+ qlimit(cl->q_) = maxq;
+
+#if 1 /* minidle is also scaled in ALTQ */
+ cl->minidle_ = (minidle * nsecPerByte) / 8;
+ if (cl->minidle_ > 0)
+ cl->minidle_ = 0;
+#else
+ cl->minidle_ = minidle;
+#endif
+ cl->maxidle_ = (maxidle * nsecPerByte) / 8;
+ if (cl->maxidle_ == 0)
+ cl->maxidle_ = 1;
+#if 1 /* offtime is also scaled in ALTQ */
+ cl->avgidle_ = cl->maxidle_;
+ cl->offtime_ = ((offtime * nsecPerByte) / 8) >> RM_FILTER_GAIN;
+ if (cl->offtime_ == 0)
+ cl->offtime_ = 1;
+#else
+ cl->avgidle_ = 0;
+ cl->offtime_ = (offtime * nsecPerByte) / 8;
+#endif
+
+ /*
+ * If CBQ's WRR is enabled, then initialize the class WRR state.
+ */
+ if (ifd->wrr_) {
+ ifd->alloc_[cl->pri_] += cl->allotment_ - old_allotment;
+ rmc_wrr_set_weights(ifd);
+ }
+ IFQ_UNLOCK(ifd->ifq_);
+ splx(s);
+ return (0);
+}
+
+/*
+ * static void
+ * rmc_wrr_set_weights(struct rm_ifdat *ifdat) - This function computes
+ * the appropriate run robin weights for the CBQ weighted round robin
+ * algorithm.
+ *
+ * Returns: NONE
+ */
+
+static void
+rmc_wrr_set_weights(struct rm_ifdat *ifd)
+{
+ int i;
+ struct rm_class *cl, *clh;
+
+ for (i = 0; i < RM_MAXPRIO; i++) {
+ /*
+ * This is inverted from that of the simulator to
+ * maintain precision.
+ */
+ if (ifd->num_[i] == 0)
+ ifd->M_[i] = 0;
+ else
+ ifd->M_[i] = ifd->alloc_[i] /
+ (ifd->num_[i] * ifd->maxpkt_);
+ /*
+ * Compute the weighted allotment for each class.
+ * This takes the expensive div instruction out
+ * of the main loop for the wrr scheduling path.
+ * These only get recomputed when a class comes or
+ * goes.
+ */
+ if (ifd->active_[i] != NULL) {
+ clh = cl = ifd->active_[i];
+ do {
+ /* safe-guard for slow link or alloc_ == 0 */
+ if (ifd->M_[i] == 0)
+ cl->w_allotment_ = 0;
+ else
+ cl->w_allotment_ = cl->allotment_ /
+ ifd->M_[i];
+ cl = cl->peer_;
+ } while ((cl != NULL) && (cl != clh));
+ }
+ }
+}
+
+int
+rmc_get_weight(struct rm_ifdat *ifd, int pri)
+{
+ if ((pri >= 0) && (pri < RM_MAXPRIO))
+ return (ifd->M_[pri]);
+ else
+ return (0);
+}
+
+/*
+ * static void
+ * rmc_depth_compute(struct rm_class *cl) - This function computes the
+ * appropriate depth of class 'cl' and its ancestors.
+ *
+ * Returns: NONE
+ */
+
+static void
+rmc_depth_compute(struct rm_class *cl)
+{
+ rm_class_t *t = cl, *p;
+
+ /*
+ * Recompute the depth for the branch of the tree.
+ */
+ while (t != NULL) {
+ p = t->parent_;
+ if (p && (t->depth_ >= p->depth_)) {
+ p->depth_ = t->depth_ + 1;
+ t = p;
+ } else
+ t = NULL;
+ }
+}
+
+/*
+ * static void
+ * rmc_depth_recompute(struct rm_class *cl) - This function re-computes
+ * the depth of the tree after a class has been deleted.
+ *
+ * Returns: NONE
+ */
+
+static void
+rmc_depth_recompute(rm_class_t *cl)
+{
+#if 1 /* ALTQ */
+ rm_class_t *p, *t;
+
+ p = cl;
+ while (p != NULL) {
+ if ((t = p->children_) == NULL) {
+ p->depth_ = 0;
+ } else {
+ int cdepth = 0;
+
+ while (t != NULL) {
+ if (t->depth_ > cdepth)
+ cdepth = t->depth_;
+ t = t->next_;
+ }
+
+ if (p->depth_ == cdepth + 1)
+ /* no change to this parent */
+ return;
+
+ p->depth_ = cdepth + 1;
+ }
+
+ p = p->parent_;
+ }
+#else
+ rm_class_t *t;
+
+ if (cl->depth_ >= 1) {
+ if (cl->children_ == NULL) {
+ cl->depth_ = 0;
+ } else if ((t = cl->children_) != NULL) {
+ while (t != NULL) {
+ if (t->children_ != NULL)
+ rmc_depth_recompute(t);
+ t = t->next_;
+ }
+ } else
+ rmc_depth_compute(cl);
+ }
+#endif
+}
+
+/*
+ * void
+ * rmc_delete_class(struct rm_ifdat *ifdat, struct rm_class *cl) - This
+ * function deletes a class from the link-sharing structure and frees
+ * all resources associated with the class.
+ *
+ * Returns: NONE
+ */
+
+void
+rmc_delete_class(struct rm_ifdat *ifd, struct rm_class *cl)
+{
+ struct rm_class *p, *head, *previous;
+ int s;
+
+ ASSERT(cl->children_ == NULL);
+
+ if (cl->sleeping_)
+ CALLOUT_STOP(&cl->callout_);
+
+#ifdef __NetBSD__
+ s = splnet();
+#else
+ s = splimp();
+#endif
+ IFQ_LOCK(ifd->ifq_);
+ /*
+ * Free packets in the packet queue.
+ * XXX - this may not be a desired behavior. Packets should be
+ * re-queued.
+ */
+ rmc_dropall(cl);
+
+ /*
+ * If the class has a parent, then remove the class from the
+ * class from the parent's children chain.
+ */
+ if (cl->parent_ != NULL) {
+ head = cl->parent_->children_;
+ p = previous = head;
+ if (head->next_ == NULL) {
+ ASSERT(head == cl);
+ cl->parent_->children_ = NULL;
+ cl->parent_->leaf_ = 1;
+ } else while (p != NULL) {
+ if (p == cl) {
+ if (cl == head)
+ cl->parent_->children_ = cl->next_;
+ else
+ previous->next_ = cl->next_;
+ cl->next_ = NULL;
+ p = NULL;
+ } else {
+ previous = p;
+ p = p->next_;
+ }
+ }
+ }
+
+ /*
+ * Delete class from class priority peer list.
+ */
+ if ((p = ifd->active_[cl->pri_]) != NULL) {
+ /*
+ * If there is more than one member of this priority
+ * level, then look for class(cl) in the priority level.
+ */
+ if (p != p->peer_) {
+ while (p->peer_ != cl)
+ p = p->peer_;
+ p->peer_ = cl->peer_;
+
+ if (ifd->active_[cl->pri_] == cl)
+ ifd->active_[cl->pri_] = cl->peer_;
+ } else {
+ ASSERT(p == cl);
+ ifd->active_[cl->pri_] = NULL;
+ }
+ }
+
+ /*
+ * Recompute the WRR weights.
+ */
+ if (ifd->wrr_) {
+ ifd->alloc_[cl->pri_] -= cl->allotment_;
+ ifd->num_[cl->pri_]--;
+ rmc_wrr_set_weights(ifd);
+ }
+
+ /*
+ * Re-compute the depth of the tree.
+ */
+#if 1 /* ALTQ */
+ rmc_depth_recompute(cl->parent_);
+#else
+ rmc_depth_recompute(ifd->root_);
+#endif
+
+ IFQ_UNLOCK(ifd->ifq_);
+ splx(s);
+
+ /*
+ * Free the class structure.
+ */
+ if (cl->red_ != NULL) {
+#ifdef ALTQ_RIO
+ if (q_is_rio(cl->q_))
+ rio_destroy((rio_t *)cl->red_);
+#endif
+#ifdef ALTQ_RED
+ if (q_is_red(cl->q_))
+ red_destroy(cl->red_);
+#endif
+ }
+ free(cl->q_, M_DEVBUF);
+ free(cl, M_DEVBUF);
+}
+
+
+/*
+ * void
+ * rmc_init(...) - Initialize the resource management data structures
+ * associated with the output portion of interface 'ifp'. 'ifd' is
+ * where the structures will be built (for backwards compatibility, the
+ * structures aren't kept in the ifnet struct). 'nsecPerByte'
+ * gives the link speed (inverse of bandwidth) in nanoseconds/byte.
+ * 'restart' is the driver-specific routine that the generic 'delay
+ * until under limit' action will call to restart output. `maxq'
+ * is the queue size of the 'link' & 'default' classes. 'maxqueued'
+ * is the maximum number of packets that the resource management
+ * code will allow to be queued 'downstream' (this is typically 1).
+ *
+ * Returns: NONE
+ */
+
+void
+rmc_init(struct ifaltq *ifq, struct rm_ifdat *ifd, u_int nsecPerByte,
+ void (*restart)(struct ifaltq *), int maxq, int maxqueued, u_int maxidle,
+ int minidle, u_int offtime, int flags)
+{
+ int i, mtu;
+
+ /*
+ * Initialize the CBQ tracing/debug facility.
+ */
+ CBQTRACEINIT();
+
+ bzero((char *)ifd, sizeof (*ifd));
+ mtu = ifq->altq_ifp->if_mtu;
+ ifd->ifq_ = ifq;
+ ifd->restart = restart;
+ ifd->maxqueued_ = maxqueued;
+ ifd->ns_per_byte_ = nsecPerByte;
+ ifd->maxpkt_ = mtu;
+ ifd->wrr_ = (flags & RMCF_WRR) ? 1 : 0;
+ ifd->efficient_ = (flags & RMCF_EFFICIENT) ? 1 : 0;
+#if 1
+ ifd->maxiftime_ = mtu * nsecPerByte / 1000 * 16;
+ if (mtu * nsecPerByte > 10 * 1000000)
+ ifd->maxiftime_ /= 4;
+#endif
+
+ reset_cutoff(ifd);
+ CBQTRACE(rmc_init, 'INIT', ifd->cutoff_);
+
+ /*
+ * Initialize the CBQ's WRR state.
+ */
+ for (i = 0; i < RM_MAXPRIO; i++) {
+ ifd->alloc_[i] = 0;
+ ifd->M_[i] = 0;
+ ifd->num_[i] = 0;
+ ifd->na_[i] = 0;
+ ifd->active_[i] = NULL;
+ }
+
+ /*
+ * Initialize current packet state.
+ */
+ ifd->qi_ = 0;
+ ifd->qo_ = 0;
+ for (i = 0; i < RM_MAXQUEUED; i++) {
+ ifd->class_[i] = NULL;
+ ifd->curlen_[i] = 0;
+ ifd->borrowed_[i] = NULL;
+ }
+
+ /*
+ * Create the root class of the link-sharing structure.
+ */
+ if ((ifd->root_ = rmc_newclass(0, ifd,
+ nsecPerByte,
+ rmc_root_overlimit, maxq, 0, 0,
+ maxidle, minidle, offtime,
+ 0, 0)) == NULL) {
+ printf("rmc_init: root class not allocated\n");
+ return ;
+ }
+ ifd->root_->depth_ = 0;
+}
+
+/*
+ * void
+ * rmc_queue_packet(struct rm_class *cl, mbuf_t *m) - Add packet given by
+ * mbuf 'm' to queue for resource class 'cl'. This routine is called
+ * by a driver's if_output routine. This routine must be called with
+ * output packet completion interrupts locked out (to avoid racing with
+ * rmc_dequeue_next).
+ *
+ * Returns: 0 on successful queueing
+ * -1 when packet drop occurs
+ */
+int
+rmc_queue_packet(struct rm_class *cl, mbuf_t *m)
+{
+ struct timeval now;
+ struct rm_ifdat *ifd = cl->ifdat_;
+ int cpri = cl->pri_;
+ int is_empty = qempty(cl->q_);
+
+ RM_GETTIME(now);
+ if (ifd->cutoff_ > 0) {
+ if (TV_LT(&cl->undertime_, &now)) {
+ if (ifd->cutoff_ > cl->depth_)
+ ifd->cutoff_ = cl->depth_;
+ CBQTRACE(rmc_queue_packet, 'ffoc', cl->depth_);
+ }
+#if 1 /* ALTQ */
+ else {
+ /*
+ * the class is overlimit. if the class has
+ * underlimit ancestors, set cutoff to the lowest
+ * depth among them.
+ */
+ struct rm_class *borrow = cl->borrow_;
+
+ while (borrow != NULL &&
+ borrow->depth_ < ifd->cutoff_) {
+ if (TV_LT(&borrow->undertime_, &now)) {
+ ifd->cutoff_ = borrow->depth_;
+ CBQTRACE(rmc_queue_packet, 'ffob', ifd->cutoff_);
+ break;
+ }
+ borrow = borrow->borrow_;
+ }
+ }
+#else /* !ALTQ */
+ else if ((ifd->cutoff_ > 1) && cl->borrow_) {
+ if (TV_LT(&cl->borrow_->undertime_, &now)) {
+ ifd->cutoff_ = cl->borrow_->depth_;
+ CBQTRACE(rmc_queue_packet, 'ffob',
+ cl->borrow_->depth_);
+ }
+ }
+#endif /* !ALTQ */
+ }
+
+ if (_rmc_addq(cl, m) < 0)
+ /* failed */
+ return (-1);
+
+ if (is_empty) {
+ CBQTRACE(rmc_queue_packet, 'ytpe', cl->stats_.handle);
+ ifd->na_[cpri]++;
+ }
+
+ if (qlen(cl->q_) > qlimit(cl->q_)) {
+ /* note: qlimit can be set to 0 or 1 */
+ rmc_drop_action(cl);
+ return (-1);
+ }
+ return (0);
+}
+
+/*
+ * void
+ * rmc_tl_satisfied(struct rm_ifdat *ifd, struct timeval *now) - Check all
+ * classes to see if there are satified.
+ */
+
+static void
+rmc_tl_satisfied(struct rm_ifdat *ifd, struct timeval *now)
+{
+ int i;
+ rm_class_t *p, *bp;
+
+ for (i = RM_MAXPRIO - 1; i >= 0; i--) {
+ if ((bp = ifd->active_[i]) != NULL) {
+ p = bp;
+ do {
+ if (!rmc_satisfied(p, now)) {
+ ifd->cutoff_ = p->depth_;
+ return;
+ }
+ p = p->peer_;
+ } while (p != bp);
+ }
+ }
+
+ reset_cutoff(ifd);
+}
+
+/*
+ * rmc_satisfied - Return 1 of the class is satisfied. O, otherwise.
+ */
+
+static int
+rmc_satisfied(struct rm_class *cl, struct timeval *now)
+{
+ rm_class_t *p;
+
+ if (cl == NULL)
+ return (1);
+ if (TV_LT(now, &cl->undertime_))
+ return (1);
+ if (cl->depth_ == 0) {
+ if (!cl->sleeping_ && (qlen(cl->q_) > cl->qthresh_))
+ return (0);
+ else
+ return (1);
+ }
+ if (cl->children_ != NULL) {
+ p = cl->children_;
+ while (p != NULL) {
+ if (!rmc_satisfied(p, now))
+ return (0);
+ p = p->next_;
+ }
+ }
+
+ return (1);
+}
+
+/*
+ * Return 1 if class 'cl' is under limit or can borrow from a parent,
+ * 0 if overlimit. As a side-effect, this routine will invoke the
+ * class overlimit action if the class if overlimit.
+ */
+
+static int
+rmc_under_limit(struct rm_class *cl, struct timeval *now)
+{
+ rm_class_t *p = cl;
+ rm_class_t *top;
+ struct rm_ifdat *ifd = cl->ifdat_;
+
+ ifd->borrowed_[ifd->qi_] = NULL;
+ /*
+ * If cl is the root class, then always return that it is
+ * underlimit. Otherwise, check to see if the class is underlimit.
+ */
+ if (cl->parent_ == NULL)
+ return (1);
+
+ if (cl->sleeping_) {
+ if (TV_LT(now, &cl->undertime_))
+ return (0);
+
+ CALLOUT_STOP(&cl->callout_);
+ cl->sleeping_ = 0;
+ cl->undertime_.tv_sec = 0;
+ return (1);
+ }
+
+ top = NULL;
+ while (cl->undertime_.tv_sec && TV_LT(now, &cl->undertime_)) {
+ if (((cl = cl->borrow_) == NULL) ||
+ (cl->depth_ > ifd->cutoff_)) {
+#ifdef ADJUST_CUTOFF
+ if (cl != NULL)
+ /* cutoff is taking effect, just
+ return false without calling
+ the delay action. */
+ return (0);
+#endif
+#ifdef BORROW_OFFTIME
+ /*
+ * check if the class can borrow offtime too.
+ * borrow offtime from the top of the borrow
+ * chain if the top class is not overloaded.
+ */
+ if (cl != NULL) {
+ /* cutoff is taking effect, use this class as top. */
+ top = cl;
+ CBQTRACE(rmc_under_limit, 'ffou', ifd->cutoff_);
+ }
+ if (top != NULL && top->avgidle_ == top->minidle_)
+ top = NULL;
+ p->overtime_ = *now;
+ (p->overlimit)(p, top);
+#else
+ p->overtime_ = *now;
+ (p->overlimit)(p, NULL);
+#endif
+ return (0);
+ }
+ top = cl;
+ }
+
+ if (cl != p)
+ ifd->borrowed_[ifd->qi_] = cl;
+ return (1);
+}
+
+/*
+ * _rmc_wrr_dequeue_next() - This is scheduler for WRR as opposed to
+ * Packet-by-packet round robin.
+ *
+ * The heart of the weighted round-robin scheduler, which decides which
+ * class next gets to send a packet. Highest priority first, then
+ * weighted round-robin within priorites.
+ *
+ * Each able-to-send class gets to send until its byte allocation is
+ * exhausted. Thus, the active pointer is only changed after a class has
+ * exhausted its allocation.
+ *
+ * If the scheduler finds no class that is underlimit or able to borrow,
+ * then the first class found that had a nonzero queue and is allowed to
+ * borrow gets to send.
+ */
+
+static mbuf_t *
+_rmc_wrr_dequeue_next(struct rm_ifdat *ifd, int op)
+{
+ struct rm_class *cl = NULL, *first = NULL;
+ u_int deficit;
+ int cpri;
+ mbuf_t *m;
+ struct timeval now;
+
+ RM_GETTIME(now);
+
+ /*
+ * if the driver polls the top of the queue and then removes
+ * the polled packet, we must return the same packet.
+ */
+ if (op == ALTDQ_REMOVE && ifd->pollcache_) {
+ cl = ifd->pollcache_;
+ cpri = cl->pri_;
+ if (ifd->efficient_) {
+ /* check if this class is overlimit */
+ if (cl->undertime_.tv_sec != 0 &&
+ rmc_under_limit(cl, &now) == 0)
+ first = cl;
+ }
+ ifd->pollcache_ = NULL;
+ goto _wrr_out;
+ }
+ else {
+ /* mode == ALTDQ_POLL || pollcache == NULL */
+ ifd->pollcache_ = NULL;
+ ifd->borrowed_[ifd->qi_] = NULL;
+ }
+#ifdef ADJUST_CUTOFF
+ _again:
+#endif
+ for (cpri = RM_MAXPRIO - 1; cpri >= 0; cpri--) {
+ if (ifd->na_[cpri] == 0)
+ continue;
+ deficit = 0;
+ /*
+ * Loop through twice for a priority level, if some class
+ * was unable to send a packet the first round because
+ * of the weighted round-robin mechanism.
+ * During the second loop at this level, deficit==2.
+ * (This second loop is not needed if for every class,
+ * "M[cl->pri_])" times "cl->allotment" is greater than
+ * the byte size for the largest packet in the class.)
+ */
+ _wrr_loop:
+ cl = ifd->active_[cpri];
+ ASSERT(cl != NULL);
+ do {
+ if ((deficit < 2) && (cl->bytes_alloc_ <= 0))
+ cl->bytes_alloc_ += cl->w_allotment_;
+ if (!qempty(cl->q_)) {
+ if ((cl->undertime_.tv_sec == 0) ||
+ rmc_under_limit(cl, &now)) {
+ if (cl->bytes_alloc_ > 0 || deficit > 1)
+ goto _wrr_out;
+
+ /* underlimit but no alloc */
+ deficit = 1;
+#if 1
+ ifd->borrowed_[ifd->qi_] = NULL;
+#endif
+ }
+ else if (first == NULL && cl->borrow_ != NULL)
+ first = cl; /* borrowing candidate */
+ }
+
+ cl->bytes_alloc_ = 0;
+ cl = cl->peer_;
+ } while (cl != ifd->active_[cpri]);
+
+ if (deficit == 1) {
+ /* first loop found an underlimit class with deficit */
+ /* Loop on same priority level, with new deficit. */
+ deficit = 2;
+ goto _wrr_loop;
+ }
+ }
+
+#ifdef ADJUST_CUTOFF
+ /*
+ * no underlimit class found. if cutoff is taking effect,
+ * increase cutoff and try again.
+ */
+ if (first != NULL && ifd->cutoff_ < ifd->root_->depth_) {
+ ifd->cutoff_++;
+ CBQTRACE(_rmc_wrr_dequeue_next, 'ojda', ifd->cutoff_);
+ goto _again;
+ }
+#endif /* ADJUST_CUTOFF */
+ /*
+ * If LINK_EFFICIENCY is turned on, then the first overlimit
+ * class we encounter will send a packet if all the classes
+ * of the link-sharing structure are overlimit.
+ */
+ reset_cutoff(ifd);
+ CBQTRACE(_rmc_wrr_dequeue_next, 'otsr', ifd->cutoff_);
+
+ if (!ifd->efficient_ || first == NULL)
+ return (NULL);
+
+ cl = first;
+ cpri = cl->pri_;
+#if 0 /* too time-consuming for nothing */
+ if (cl->sleeping_)
+ CALLOUT_STOP(&cl->callout_);
+ cl->sleeping_ = 0;
+ cl->undertime_.tv_sec = 0;
+#endif
+ ifd->borrowed_[ifd->qi_] = cl->borrow_;
+ ifd->cutoff_ = cl->borrow_->depth_;
+
+ /*
+ * Deque the packet and do the book keeping...
+ */
+ _wrr_out:
+ if (op == ALTDQ_REMOVE) {
+ m = _rmc_getq(cl);
+ if (m == NULL)
+ panic("_rmc_wrr_dequeue_next");
+ if (qempty(cl->q_))
+ ifd->na_[cpri]--;
+
+ /*
+ * Update class statistics and link data.
+ */
+ if (cl->bytes_alloc_ > 0)
+ cl->bytes_alloc_ -= m_pktlen(m);
+
+ if ((cl->bytes_alloc_ <= 0) || first == cl)
+ ifd->active_[cl->pri_] = cl->peer_;
+ else
+ ifd->active_[cl->pri_] = cl;
+
+ ifd->class_[ifd->qi_] = cl;
+ ifd->curlen_[ifd->qi_] = m_pktlen(m);
+ ifd->now_[ifd->qi_] = now;
+ ifd->qi_ = (ifd->qi_ + 1) % ifd->maxqueued_;
+ ifd->queued_++;
+ } else {
+ /* mode == ALTDQ_PPOLL */
+ m = _rmc_pollq(cl);
+ ifd->pollcache_ = cl;
+ }
+ return (m);
+}
+
+/*
+ * Dequeue & return next packet from the highest priority class that
+ * has a packet to send & has enough allocation to send it. This
+ * routine is called by a driver whenever it needs a new packet to
+ * output.
+ */
+static mbuf_t *
+_rmc_prr_dequeue_next(struct rm_ifdat *ifd, int op)
+{
+ mbuf_t *m;
+ int cpri;
+ struct rm_class *cl, *first = NULL;
+ struct timeval now;
+
+ RM_GETTIME(now);
+
+ /*
+ * if the driver polls the top of the queue and then removes
+ * the polled packet, we must return the same packet.
+ */
+ if (op == ALTDQ_REMOVE && ifd->pollcache_) {
+ cl = ifd->pollcache_;
+ cpri = cl->pri_;
+ ifd->pollcache_ = NULL;
+ goto _prr_out;
+ } else {
+ /* mode == ALTDQ_POLL || pollcache == NULL */
+ ifd->pollcache_ = NULL;
+ ifd->borrowed_[ifd->qi_] = NULL;
+ }
+#ifdef ADJUST_CUTOFF
+ _again:
+#endif
+ for (cpri = RM_MAXPRIO - 1; cpri >= 0; cpri--) {
+ if (ifd->na_[cpri] == 0)
+ continue;
+ cl = ifd->active_[cpri];
+ ASSERT(cl != NULL);
+ do {
+ if (!qempty(cl->q_)) {
+ if ((cl->undertime_.tv_sec == 0) ||
+ rmc_under_limit(cl, &now))
+ goto _prr_out;
+ if (first == NULL && cl->borrow_ != NULL)
+ first = cl;
+ }
+ cl = cl->peer_;
+ } while (cl != ifd->active_[cpri]);
+ }
+
+#ifdef ADJUST_CUTOFF
+ /*
+ * no underlimit class found. if cutoff is taking effect, increase
+ * cutoff and try again.
+ */
+ if (first != NULL && ifd->cutoff_ < ifd->root_->depth_) {
+ ifd->cutoff_++;
+ goto _again;
+ }
+#endif /* ADJUST_CUTOFF */
+ /*
+ * If LINK_EFFICIENCY is turned on, then the first overlimit
+ * class we encounter will send a packet if all the classes
+ * of the link-sharing structure are overlimit.
+ */
+ reset_cutoff(ifd);
+ if (!ifd->efficient_ || first == NULL)
+ return (NULL);
+
+ cl = first;
+ cpri = cl->pri_;
+#if 0 /* too time-consuming for nothing */
+ if (cl->sleeping_)
+ CALLOUT_STOP(&cl->callout_);
+ cl->sleeping_ = 0;
+ cl->undertime_.tv_sec = 0;
+#endif
+ ifd->borrowed_[ifd->qi_] = cl->borrow_;
+ ifd->cutoff_ = cl->borrow_->depth_;
+
+ /*
+ * Deque the packet and do the book keeping...
+ */
+ _prr_out:
+ if (op == ALTDQ_REMOVE) {
+ m = _rmc_getq(cl);
+ if (m == NULL)
+ panic("_rmc_prr_dequeue_next");
+ if (qempty(cl->q_))
+ ifd->na_[cpri]--;
+
+ ifd->active_[cpri] = cl->peer_;
+
+ ifd->class_[ifd->qi_] = cl;
+ ifd->curlen_[ifd->qi_] = m_pktlen(m);
+ ifd->now_[ifd->qi_] = now;
+ ifd->qi_ = (ifd->qi_ + 1) % ifd->maxqueued_;
+ ifd->queued_++;
+ } else {
+ /* mode == ALTDQ_POLL */
+ m = _rmc_pollq(cl);
+ ifd->pollcache_ = cl;
+ }
+ return (m);
+}
+
+/*
+ * mbuf_t *
+ * rmc_dequeue_next(struct rm_ifdat *ifd, struct timeval *now) - this function
+ * is invoked by the packet driver to get the next packet to be
+ * dequeued and output on the link. If WRR is enabled, then the
+ * WRR dequeue next routine will determine the next packet to sent.
+ * Otherwise, packet-by-packet round robin is invoked.
+ *
+ * Returns: NULL, if a packet is not available or if all
+ * classes are overlimit.
+ *
+ * Otherwise, Pointer to the next packet.
+ */
+
+mbuf_t *
+rmc_dequeue_next(struct rm_ifdat *ifd, int mode)
+{
+ if (ifd->queued_ >= ifd->maxqueued_)
+ return (NULL);
+ else if (ifd->wrr_)
+ return (_rmc_wrr_dequeue_next(ifd, mode));
+ else
+ return (_rmc_prr_dequeue_next(ifd, mode));
+}
+
+/*
+ * Update the utilization estimate for the packet that just completed.
+ * The packet's class & the parent(s) of that class all get their
+ * estimators updated. This routine is called by the driver's output-
+ * packet-completion interrupt service routine.
+ */
+
+/*
+ * a macro to approximate "divide by 1000" that gives 0.000999,
+ * if a value has enough effective digits.
+ * (on pentium, mul takes 9 cycles but div takes 46!)
+ */
+#define NSEC_TO_USEC(t) (((t) >> 10) + ((t) >> 16) + ((t) >> 17))
+void
+rmc_update_class_util(struct rm_ifdat *ifd)
+{
+ int idle, avgidle, pktlen;
+ int pkt_time, tidle;
+ rm_class_t *cl, *borrowed;
+ rm_class_t *borrows;
+ struct timeval *nowp;
+
+ /*
+ * Get the most recent completed class.
+ */
+ if ((cl = ifd->class_[ifd->qo_]) == NULL)
+ return;
+
+ pktlen = ifd->curlen_[ifd->qo_];
+ borrowed = ifd->borrowed_[ifd->qo_];
+ borrows = borrowed;
+
+ PKTCNTR_ADD(&cl->stats_.xmit_cnt, pktlen);
+
+ /*
+ * Run estimator on class and its ancestors.
+ */
+ /*
+ * rm_update_class_util is designed to be called when the
+ * transfer is completed from a xmit complete interrupt,
+ * but most drivers don't implement an upcall for that.
+ * so, just use estimated completion time.
+ * as a result, ifd->qi_ and ifd->qo_ are always synced.
+ */
+ nowp = &ifd->now_[ifd->qo_];
+ /* get pkt_time (for link) in usec */
+#if 1 /* use approximation */
+ pkt_time = ifd->curlen_[ifd->qo_] * ifd->ns_per_byte_;
+ pkt_time = NSEC_TO_USEC(pkt_time);
+#else
+ pkt_time = ifd->curlen_[ifd->qo_] * ifd->ns_per_byte_ / 1000;
+#endif
+#if 1 /* ALTQ4PPP */
+ if (TV_LT(nowp, &ifd->ifnow_)) {
+ int iftime;
+
+ /*
+ * make sure the estimated completion time does not go
+ * too far. it can happen when the link layer supports
+ * data compression or the interface speed is set to
+ * a much lower value.
+ */
+ TV_DELTA(&ifd->ifnow_, nowp, iftime);
+ if (iftime+pkt_time < ifd->maxiftime_) {
+ TV_ADD_DELTA(&ifd->ifnow_, pkt_time, &ifd->ifnow_);
+ } else {
+ TV_ADD_DELTA(nowp, ifd->maxiftime_, &ifd->ifnow_);
+ }
+ } else {
+ TV_ADD_DELTA(nowp, pkt_time, &ifd->ifnow_);
+ }
+#else
+ if (TV_LT(nowp, &ifd->ifnow_)) {
+ TV_ADD_DELTA(&ifd->ifnow_, pkt_time, &ifd->ifnow_);
+ } else {
+ TV_ADD_DELTA(nowp, pkt_time, &ifd->ifnow_);
+ }
+#endif
+
+ while (cl != NULL) {
+ TV_DELTA(&ifd->ifnow_, &cl->last_, idle);
+ if (idle >= 2000000)
+ /*
+ * this class is idle enough, reset avgidle.
+ * (TV_DELTA returns 2000000 us when delta is large.)
+ */
+ cl->avgidle_ = cl->maxidle_;
+
+ /* get pkt_time (for class) in usec */
+#if 1 /* use approximation */
+ pkt_time = pktlen * cl->ns_per_byte_;
+ pkt_time = NSEC_TO_USEC(pkt_time);
+#else
+ pkt_time = pktlen * cl->ns_per_byte_ / 1000;
+#endif
+ idle -= pkt_time;
+
+ avgidle = cl->avgidle_;
+ avgidle += idle - (avgidle >> RM_FILTER_GAIN);
+ cl->avgidle_ = avgidle;
+
+ /* Are we overlimit ? */
+ if (avgidle <= 0) {
+ CBQTRACE(rmc_update_class_util, 'milo', cl->stats_.handle);
+#if 1 /* ALTQ */
+ /*
+ * need some lower bound for avgidle, otherwise
+ * a borrowing class gets unbounded penalty.
+ */
+ if (avgidle < cl->minidle_)
+ avgidle = cl->avgidle_ = cl->minidle_;
+#endif
+ /* set next idle to make avgidle 0 */
+ tidle = pkt_time +
+ (((1 - RM_POWER) * avgidle) >> RM_FILTER_GAIN);
+ TV_ADD_DELTA(nowp, tidle, &cl->undertime_);
+ ++cl->stats_.over;
+ } else {
+ cl->avgidle_ =
+ (avgidle > cl->maxidle_) ? cl->maxidle_ : avgidle;
+ cl->undertime_.tv_sec = 0;
+ if (cl->sleeping_) {
+ CALLOUT_STOP(&cl->callout_);
+ cl->sleeping_ = 0;
+ }
+ }
+
+ if (borrows != NULL) {
+ if (borrows != cl)
+ ++cl->stats_.borrows;
+ else
+ borrows = NULL;
+ }
+ cl->last_ = ifd->ifnow_;
+ cl->last_pkttime_ = pkt_time;
+
+#if 1
+ if (cl->parent_ == NULL) {
+ /* take stats of root class */
+ PKTCNTR_ADD(&cl->stats_.xmit_cnt, pktlen);
+ }
+#endif
+
+ cl = cl->parent_;
+ }
+
+ /*
+ * Check to see if cutoff needs to set to a new level.
+ */
+ cl = ifd->class_[ifd->qo_];
+ if (borrowed && (ifd->cutoff_ >= borrowed->depth_)) {
+#if 1 /* ALTQ */
+ if ((qlen(cl->q_) <= 0) || TV_LT(nowp, &borrowed->undertime_)) {
+ rmc_tl_satisfied(ifd, nowp);
+ CBQTRACE(rmc_update_class_util, 'broe', ifd->cutoff_);
+ } else {
+ ifd->cutoff_ = borrowed->depth_;
+ CBQTRACE(rmc_update_class_util, 'ffob', borrowed->depth_);
+ }
+#else /* !ALTQ */
+ if ((qlen(cl->q_) <= 1) || TV_LT(&now, &borrowed->undertime_)) {
+ reset_cutoff(ifd);
+#ifdef notdef
+ rmc_tl_satisfied(ifd, &now);
+#endif
+ CBQTRACE(rmc_update_class_util, 'broe', ifd->cutoff_);
+ } else {
+ ifd->cutoff_ = borrowed->depth_;
+ CBQTRACE(rmc_update_class_util, 'ffob', borrowed->depth_);
+ }
+#endif /* !ALTQ */
+ }
+
+ /*
+ * Release class slot
+ */
+ ifd->borrowed_[ifd->qo_] = NULL;
+ ifd->class_[ifd->qo_] = NULL;
+ ifd->qo_ = (ifd->qo_ + 1) % ifd->maxqueued_;
+ ifd->queued_--;
+}
+
+/*
+ * void
+ * rmc_drop_action(struct rm_class *cl) - Generic (not protocol-specific)
+ * over-limit action routines. These get invoked by rmc_under_limit()
+ * if a class with packets to send if over its bandwidth limit & can't
+ * borrow from a parent class.
+ *
+ * Returns: NONE
+ */
+
+static void
+rmc_drop_action(struct rm_class *cl)
+{
+ struct rm_ifdat *ifd = cl->ifdat_;
+
+ ASSERT(qlen(cl->q_) > 0);
+ _rmc_dropq(cl);
+ if (qempty(cl->q_))
+ ifd->na_[cl->pri_]--;
+}
+
+void rmc_dropall(struct rm_class *cl)
+{
+ struct rm_ifdat *ifd = cl->ifdat_;
+
+ if (!qempty(cl->q_)) {
+ _flushq(cl->q_);
+
+ ifd->na_[cl->pri_]--;
+ }
+}
+
+#if (__FreeBSD_version > 300000)
+/* hzto() is removed from FreeBSD-3.0 */
+static int hzto(struct timeval *);
+
+static int
+hzto(tv)
+ struct timeval *tv;
+{
+ struct timeval t2;
+
+ getmicrotime(&t2);
+ t2.tv_sec = tv->tv_sec - t2.tv_sec;
+ t2.tv_usec = tv->tv_usec - t2.tv_usec;
+ return (tvtohz(&t2));
+}
+#endif /* __FreeBSD_version > 300000 */
+
+/*
+ * void
+ * rmc_delay_action(struct rm_class *cl) - This function is the generic CBQ
+ * delay action routine. It is invoked via rmc_under_limit when the
+ * packet is discoverd to be overlimit.
+ *
+ * If the delay action is result of borrow class being overlimit, then
+ * delay for the offtime of the borrowing class that is overlimit.
+ *
+ * Returns: NONE
+ */
+
+void
+rmc_delay_action(struct rm_class *cl, struct rm_class *borrow)
+{
+ int delay, t, extradelay;
+
+ cl->stats_.overactions++;
+ TV_DELTA(&cl->undertime_, &cl->overtime_, delay);
+#ifndef BORROW_OFFTIME
+ delay += cl->offtime_;
+#endif
+
+ if (!cl->sleeping_) {
+ CBQTRACE(rmc_delay_action, 'yled', cl->stats_.handle);
+#ifdef BORROW_OFFTIME
+ if (borrow != NULL)
+ extradelay = borrow->offtime_;
+ else
+#endif
+ extradelay = cl->offtime_;
+
+#ifdef ALTQ
+ /*
+ * XXX recalculate suspend time:
+ * current undertime is (tidle + pkt_time) calculated
+ * from the last transmission.
+ * tidle: time required to bring avgidle back to 0
+ * pkt_time: target waiting time for this class
+ * we need to replace pkt_time by offtime
+ */
+ extradelay -= cl->last_pkttime_;
+#endif
+ if (extradelay > 0) {
+ TV_ADD_DELTA(&cl->undertime_, extradelay, &cl->undertime_);
+ delay += extradelay;
+ }
+
+ cl->sleeping_ = 1;
+ cl->stats_.delays++;
+
+ /*
+ * Since packets are phased randomly with respect to the
+ * clock, 1 tick (the next clock tick) can be an arbitrarily
+ * short time so we have to wait for at least two ticks.
+ * NOTE: If there's no other traffic, we need the timer as
+ * a 'backstop' to restart this class.
+ */
+ if (delay > tick * 2) {
+#ifdef __FreeBSD__
+ /* FreeBSD rounds up the tick */
+ t = hzto(&cl->undertime_);
+#else
+ /* other BSDs round down the tick */
+ t = hzto(&cl->undertime_) + 1;
+#endif
+ } else
+ t = 2;
+ CALLOUT_RESET(&cl->callout_, t,
+ (timeout_t *)rmc_restart, (caddr_t)cl);
+ }
+}
+
+/*
+ * void
+ * rmc_restart() - is just a helper routine for rmc_delay_action -- it is
+ * called by the system timer code & is responsible checking if the
+ * class is still sleeping (it might have been restarted as a side
+ * effect of the queue scan on a packet arrival) and, if so, restarting
+ * output for the class. Inspecting the class state & restarting output
+ * require locking the class structure. In general the driver is
+ * responsible for locking but this is the only routine that is not
+ * called directly or indirectly from the interface driver so it has
+ * know about system locking conventions. Under bsd, locking is done
+ * by raising IPL to splimp so that's what's implemented here. On a
+ * different system this would probably need to be changed.
+ *
+ * Returns: NONE
+ */
+
+static void
+rmc_restart(struct rm_class *cl)
+{
+ struct rm_ifdat *ifd = cl->ifdat_;
+ int s;
+
+#ifdef __NetBSD__
+ s = splnet();
+#else
+ s = splimp();
+#endif
+ IFQ_LOCK(ifd->ifq_);
+ if (cl->sleeping_) {
+ cl->sleeping_ = 0;
+ cl->undertime_.tv_sec = 0;
+
+ if (ifd->queued_ < ifd->maxqueued_ && ifd->restart != NULL) {
+ CBQTRACE(rmc_restart, 'trts', cl->stats_.handle);
+ (ifd->restart)(ifd->ifq_);
+ }
+ }
+ IFQ_UNLOCK(ifd->ifq_);
+ splx(s);
+}
+
+/*
+ * void
+ * rmc_root_overlimit(struct rm_class *cl) - This the generic overlimit
+ * handling routine for the root class of the link sharing structure.
+ *
+ * Returns: NONE
+ */
+
+static void
+rmc_root_overlimit(struct rm_class *cl, struct rm_class *borrow)
+{
+ panic("rmc_root_overlimit");
+}
+
+/*
+ * Packet Queue handling routines. Eventually, this is to localize the
+ * effects on the code whether queues are red queues or droptail
+ * queues.
+ */
+
+static int
+_rmc_addq(rm_class_t *cl, mbuf_t *m)
+{
+#ifdef ALTQ_RIO
+ if (q_is_rio(cl->q_))
+ return rio_addq((rio_t *)cl->red_, cl->q_, m, cl->pktattr_);
+#endif
+#ifdef ALTQ_RED
+ if (q_is_red(cl->q_))
+ return red_addq(cl->red_, cl->q_, m, cl->pktattr_);
+#endif /* ALTQ_RED */
+
+ if (cl->flags_ & RMCF_CLEARDSCP)
+ write_dsfield(m, cl->pktattr_, 0);
+
+ _addq(cl->q_, m);
+ return (0);
+}
+
+/* note: _rmc_dropq is not called for red */
+static void
+_rmc_dropq(rm_class_t *cl)
+{
+ mbuf_t *m;
+
+ if ((m = _getq(cl->q_)) != NULL)
+ m_freem(m);
+}
+
+static mbuf_t *
+_rmc_getq(rm_class_t *cl)
+{
+#ifdef ALTQ_RIO
+ if (q_is_rio(cl->q_))
+ return rio_getq((rio_t *)cl->red_, cl->q_);
+#endif
+#ifdef ALTQ_RED
+ if (q_is_red(cl->q_))
+ return red_getq(cl->red_, cl->q_);
+#endif
+ return _getq(cl->q_);
+}
+
+static mbuf_t *
+_rmc_pollq(rm_class_t *cl)
+{
+ return qhead(cl->q_);
+}
+
+#ifdef CBQ_TRACE
+
+struct cbqtrace cbqtrace_buffer[NCBQTRACE+1];
+struct cbqtrace *cbqtrace_ptr = NULL;
+int cbqtrace_count;
+
+/*
+ * DDB hook to trace cbq events:
+ * the last 1024 events are held in a circular buffer.
+ * use "call cbqtrace_dump(N)" to display 20 events from Nth event.
+ */
+void cbqtrace_dump(int);
+static char *rmc_funcname(void *);
+
+static struct rmc_funcs {
+ void *func;
+ char *name;
+} rmc_funcs[] =
+{
+ rmc_init, "rmc_init",
+ rmc_queue_packet, "rmc_queue_packet",
+ rmc_under_limit, "rmc_under_limit",
+ rmc_update_class_util, "rmc_update_class_util",
+ rmc_delay_action, "rmc_delay_action",
+ rmc_restart, "rmc_restart",
+ _rmc_wrr_dequeue_next, "_rmc_wrr_dequeue_next",
+ NULL, NULL
+};
+
+static char *rmc_funcname(void *func)
+{
+ struct rmc_funcs *fp;
+
+ for (fp = rmc_funcs; fp->func != NULL; fp++)
+ if (fp->func == func)
+ return (fp->name);
+ return ("unknown");
+}
+
+void cbqtrace_dump(int counter)
+{
+ int i, *p;
+ char *cp;
+
+ counter = counter % NCBQTRACE;
+ p = (int *)&cbqtrace_buffer[counter];
+
+ for (i=0; i<20; i++) {
+ printf("[0x%x] ", *p++);
+ printf("%s: ", rmc_funcname((void *)*p++));
+ cp = (char *)p++;
+ printf("%c%c%c%c: ", cp[0], cp[1], cp[2], cp[3]);
+ printf("%d\n",*p++);
+
+ if (p >= (int *)&cbqtrace_buffer[NCBQTRACE])
+ p = (int *)cbqtrace_buffer;
+ }
+}
+#endif /* CBQ_TRACE */
+#endif /* ALTQ_CBQ */
+
+#if defined(ALTQ_CBQ) || defined(ALTQ_RED) || defined(ALTQ_RIO) || defined(ALTQ_HFSC) || defined(ALTQ_PRIQ)
+#if !defined(__GNUC__) || defined(ALTQ_DEBUG)
+
+void
+_addq(class_queue_t *q, mbuf_t *m)
+{
+ mbuf_t *m0;
+
+ if ((m0 = qtail(q)) != NULL)
+ m->m_nextpkt = m0->m_nextpkt;
+ else
+ m0 = m;
+ m0->m_nextpkt = m;
+ qtail(q) = m;
+ qlen(q)++;
+}
+
+mbuf_t *
+_getq(class_queue_t *q)
+{
+ mbuf_t *m, *m0;
+
+ if ((m = qtail(q)) == NULL)
+ return (NULL);
+ if ((m0 = m->m_nextpkt) != m)
+ m->m_nextpkt = m0->m_nextpkt;
+ else {
+ ASSERT(qlen(q) == 1);
+ qtail(q) = NULL;
+ }
+ qlen(q)--;
+ m0->m_nextpkt = NULL;
+ return (m0);
+}
+
+/* drop a packet at the tail of the queue */
+mbuf_t *
+_getq_tail(class_queue_t *q)
+{
+ mbuf_t *m, *m0, *prev;
+
+ if ((m = m0 = qtail(q)) == NULL)
+ return NULL;
+ do {
+ prev = m0;
+ m0 = m0->m_nextpkt;
+ } while (m0 != m);
+ prev->m_nextpkt = m->m_nextpkt;
+ if (prev == m) {
+ ASSERT(qlen(q) == 1);
+ qtail(q) = NULL;
+ } else
+ qtail(q) = prev;
+ qlen(q)--;
+ m->m_nextpkt = NULL;
+ return (m);
+}
+
+/* randomly select a packet in the queue */
+mbuf_t *
+_getq_random(class_queue_t *q)
+{
+ struct mbuf *m;
+ int i, n;
+
+ if ((m = qtail(q)) == NULL)
+ return NULL;
+ if (m->m_nextpkt == m) {
+ ASSERT(qlen(q) == 1);
+ qtail(q) = NULL;
+ } else {
+ struct mbuf *prev = NULL;
+
+ n = arc4random() % qlen(q) + 1;
+ for (i = 0; i < n; i++) {
+ prev = m;
+ m = m->m_nextpkt;
+ }
+ prev->m_nextpkt = m->m_nextpkt;
+ if (m == qtail(q))
+ qtail(q) = prev;
+ }
+ qlen(q)--;
+ m->m_nextpkt = NULL;
+ return (m);
+}
+
+void
+_removeq(class_queue_t *q, mbuf_t *m)
+{
+ mbuf_t *m0, *prev;
+
+ m0 = qtail(q);
+ do {
+ prev = m0;
+ m0 = m0->m_nextpkt;
+ } while (m0 != m);
+ prev->m_nextpkt = m->m_nextpkt;
+ if (prev == m)
+ qtail(q) = NULL;
+ else if (qtail(q) == m)
+ qtail(q) = prev;
+ qlen(q)--;
+}
+
+void
+_flushq(class_queue_t *q)
+{
+ mbuf_t *m;
+
+ while ((m = _getq(q)) != NULL)
+ m_freem(m);
+ ASSERT(qlen(q) == 0);
+}
+
+#endif /* !__GNUC__ || ALTQ_DEBUG */
+#endif /* ALTQ_CBQ || ALTQ_RED || ALTQ_RIO || ALTQ_HFSC || ALTQ_PRIQ */
diff --git a/contrib/altq/rtems/freebsd/altq/altq_rmclass.h b/contrib/altq/rtems/freebsd/altq/altq_rmclass.h
new file mode 100644
index 00000000..19693173
--- /dev/null
+++ b/contrib/altq/rtems/freebsd/altq/altq_rmclass.h
@@ -0,0 +1,266 @@
+/* $KAME: altq_rmclass.h,v 1.10 2003/08/20 23:30:23 itojun Exp $ */
+
+/*
+ * Copyright (c) 1991-1997 Regents of the University of California.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the Network Research
+ * Group at Lawrence Berkeley Laboratory.
+ * 4. Neither the name of the University nor of the Laboratory may be used
+ * to endorse or promote products derived from this software without
+ * specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifndef _ALTQ_ALTQ_RMCLASS_HH_
+#define _ALTQ_ALTQ_RMCLASS_HH_
+
+#include <rtems/freebsd/altq/altq_classq.h>
+
+/* #pragma ident "@(#)rm_class.h 1.20 97/10/23 SMI" */
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#define RM_MAXPRIO 8 /* Max priority */
+
+#ifdef _KERNEL
+
+typedef struct mbuf mbuf_t;
+typedef struct rm_ifdat rm_ifdat_t;
+typedef struct rm_class rm_class_t;
+
+struct red;
+
+/*
+ * Macros for dealing with time values. We assume all times are
+ * 'timevals'. `microtime' is used to get the best available clock
+ * resolution. If `microtime' *doesn't* return a value that's about
+ * ten times smaller than the average packet time on the fastest
+ * link that will use these routines, a slightly different clock
+ * scheme than this one should be used.
+ * (Bias due to truncation error in this scheme will overestimate utilization
+ * and discriminate against high bandwidth classes. To remove this bias an
+ * integrator needs to be added. The simplest integrator uses a history of
+ * 10 * avg.packet.time / min.tick.time packet completion entries. This is
+ * straight forward to add but we don't want to pay the extra memory
+ * traffic to maintain it if it's not necessary (occasionally a vendor
+ * accidentally builds a workstation with a decent clock - e.g., Sun & HP).)
+ */
+
+#define RM_GETTIME(now) microtime(&now)
+
+#define TV_LT(a, b) (((a)->tv_sec < (b)->tv_sec) || \
+ (((a)->tv_usec < (b)->tv_usec) && ((a)->tv_sec <= (b)->tv_sec)))
+
+#define TV_DELTA(a, b, delta) { \
+ register int xxs; \
+ \
+ delta = (a)->tv_usec - (b)->tv_usec; \
+ if ((xxs = (a)->tv_sec - (b)->tv_sec)) { \
+ switch (xxs) { \
+ default: \
+ /* if (xxs < 0) \
+ printf("rm_class: bogus time values\n"); */ \
+ delta = 0; \
+ /* fall through */ \
+ case 2: \
+ delta += 1000000; \
+ /* fall through */ \
+ case 1: \
+ delta += 1000000; \
+ break; \
+ } \
+ } \
+}
+
+#define TV_ADD_DELTA(a, delta, res) { \
+ register int xxus = (a)->tv_usec + (delta); \
+ \
+ (res)->tv_sec = (a)->tv_sec; \
+ while (xxus >= 1000000) { \
+ ++((res)->tv_sec); \
+ xxus -= 1000000; \
+ } \
+ (res)->tv_usec = xxus; \
+}
+
+#define RM_TIMEOUT 2 /* 1 Clock tick. */
+
+#if 1
+#define RM_MAXQUEUED 1 /* this isn't used in ALTQ/CBQ */
+#else
+#define RM_MAXQUEUED 16 /* Max number of packets downstream of CBQ */
+#endif
+#define RM_MAXQUEUE 64 /* Max queue length */
+#define RM_FILTER_GAIN 5 /* log2 of gain, e.g., 5 => 31/32 */
+#define RM_POWER (1 << RM_FILTER_GAIN)
+#define RM_MAXDEPTH 32
+#define RM_NS_PER_SEC (1000000000)
+
+typedef struct _rm_class_stats_ {
+ u_int handle;
+ u_int depth;
+
+ struct pktcntr xmit_cnt; /* packets sent in this class */
+ struct pktcntr drop_cnt; /* dropped packets */
+ u_int over; /* # times went over limit */
+ u_int borrows; /* # times tried to borrow */
+ u_int overactions; /* # times invoked overlimit action */
+ u_int delays; /* # times invoked delay actions */
+} rm_class_stats_t;
+
+/*
+ * CBQ Class state structure
+ */
+struct rm_class {
+ class_queue_t *q_; /* Queue of packets */
+ rm_ifdat_t *ifdat_;
+ int pri_; /* Class priority. */
+ int depth_; /* Class depth */
+ u_int ns_per_byte_; /* NanoSeconds per byte. */
+ u_int maxrate_; /* Bytes per second for this class. */
+ u_int allotment_; /* Fraction of link bandwidth. */
+ u_int w_allotment_; /* Weighted allotment for WRR */
+ int bytes_alloc_; /* Allocation for round of WRR */
+
+ int avgidle_;
+ int maxidle_;
+ int minidle_;
+ int offtime_;
+ int sleeping_; /* != 0 if delaying */
+ int qthresh_; /* Queue threshold for formal link sharing */
+ int leaf_; /* Note whether leaf class or not.*/
+
+ rm_class_t *children_; /* Children of this class */
+ rm_class_t *next_; /* Next pointer, used if child */
+
+ rm_class_t *peer_; /* Peer class */
+ rm_class_t *borrow_; /* Borrow class */
+ rm_class_t *parent_; /* Parent class */
+
+ void (*overlimit)(struct rm_class *, struct rm_class *);
+ void (*drop)(struct rm_class *); /* Class drop action. */
+
+ struct red *red_; /* RED state pointer */
+ struct altq_pktattr *pktattr_; /* saved hdr used by RED/ECN */
+ int flags_;
+
+ int last_pkttime_; /* saved pkt_time */
+ struct timeval undertime_; /* time can next send */
+ struct timeval last_; /* time last packet sent */
+ struct timeval overtime_;
+ struct callout callout_; /* for timeout() calls */
+
+ rm_class_stats_t stats_; /* Class Statistics */
+};
+
+/*
+ * CBQ Interface state
+ */
+struct rm_ifdat {
+ int queued_; /* # pkts queued downstream */
+ int efficient_; /* Link Efficency bit */
+ int wrr_; /* Enable Weighted Round-Robin */
+ u_long ns_per_byte_; /* Link byte speed. */
+ int maxqueued_; /* Max packets to queue */
+ int maxpkt_; /* Max packet size. */
+ int qi_; /* In/out pointers for downstream */
+ int qo_; /* packets */
+
+ /*
+ * Active class state and WRR state.
+ */
+ rm_class_t *active_[RM_MAXPRIO]; /* Active cl's in each pri */
+ int na_[RM_MAXPRIO]; /* # of active cl's in a pri */
+ int num_[RM_MAXPRIO]; /* # of cl's per pri */
+ int alloc_[RM_MAXPRIO]; /* Byte Allocation */
+ u_long M_[RM_MAXPRIO]; /* WRR weights. */
+
+ /*
+ * Network Interface/Solaris Queue state pointer.
+ */
+ struct ifaltq *ifq_;
+ rm_class_t *default_; /* Default Pkt class, BE */
+ rm_class_t *root_; /* Root Link class. */
+ rm_class_t *ctl_; /* Control Traffic class. */
+ void (*restart)(struct ifaltq *); /* Restart routine. */
+
+ /*
+ * Current packet downstream packet state and dynamic state.
+ */
+ rm_class_t *borrowed_[RM_MAXQUEUED]; /* Class borrowed last */
+ rm_class_t *class_[RM_MAXQUEUED]; /* class sending */
+ int curlen_[RM_MAXQUEUED]; /* Current pktlen */
+ struct timeval now_[RM_MAXQUEUED]; /* Current packet time. */
+ int is_overlimit_[RM_MAXQUEUED];/* Current packet time. */
+
+ int cutoff_; /* Cut-off depth for borrowing */
+
+ struct timeval ifnow_; /* expected xmit completion time */
+#if 1 /* ALTQ4PPP */
+ int maxiftime_; /* max delay inside interface */
+#endif
+ rm_class_t *pollcache_; /* cached rm_class by poll operation */
+};
+
+/* flags for rmc_init and rmc_newclass */
+/* class flags */
+#define RMCF_RED 0x0001
+#define RMCF_ECN 0x0002
+#define RMCF_RIO 0x0004
+#define RMCF_FLOWVALVE 0x0008 /* use flowvalve (aka penalty-box) */
+#define RMCF_CLEARDSCP 0x0010 /* clear diffserv codepoint */
+
+/* flags for rmc_init */
+#define RMCF_WRR 0x0100
+#define RMCF_EFFICIENT 0x0200
+
+#define is_a_parent_class(cl) ((cl)->children_ != NULL)
+
+extern rm_class_t *rmc_newclass(int, struct rm_ifdat *, u_int,
+ void (*)(struct rm_class *, struct rm_class *),
+ int, struct rm_class *, struct rm_class *,
+ u_int, int, u_int, int, int);
+extern void rmc_delete_class(struct rm_ifdat *, struct rm_class *);
+extern int rmc_modclass(struct rm_class *, u_int, int,
+ u_int, int, u_int, int);
+extern void rmc_init(struct ifaltq *, struct rm_ifdat *, u_int,
+ void (*)(struct ifaltq *),
+ int, int, u_int, int, u_int, int);
+extern int rmc_queue_packet(struct rm_class *, mbuf_t *);
+extern mbuf_t *rmc_dequeue_next(struct rm_ifdat *, int);
+extern void rmc_update_class_util(struct rm_ifdat *);
+extern void rmc_delay_action(struct rm_class *, struct rm_class *);
+extern void rmc_dropall(struct rm_class *);
+extern int rmc_get_weight(struct rm_ifdat *, int);
+
+#endif /* _KERNEL */
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _ALTQ_ALTQ_RMCLASS_HH_ */
diff --git a/contrib/altq/rtems/freebsd/altq/altq_rmclass_debug.h b/contrib/altq/rtems/freebsd/altq/altq_rmclass_debug.h
new file mode 100644
index 00000000..6723a4b7
--- /dev/null
+++ b/contrib/altq/rtems/freebsd/altq/altq_rmclass_debug.h
@@ -0,0 +1,112 @@
+/* $KAME: altq_rmclass_debug.h,v 1.3 2002/11/29 04:36:24 kjc Exp $ */
+
+/*
+ * Copyright (c) Sun Microsystems, Inc. 1998 All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the SMCC Technology
+ * Development Group at Sun Microsystems, Inc.
+ *
+ * 4. The name of the Sun Microsystems, Inc nor may not be used to endorse or
+ * promote products derived from this software without specific prior
+ * written permission.
+ *
+ * SUN MICROSYSTEMS DOES NOT CLAIM MERCHANTABILITY OF THIS SOFTWARE OR THE
+ * SUITABILITY OF THIS SOFTWARE FOR ANY PARTICULAR PURPOSE. The software is
+ * provided "as is" without express or implied warranty of any kind.
+ *
+ * These notices must be retained in any copies of any part of this software.
+ */
+
+#ifndef _ALTQ_ALTQ_RMCLASS_DEBUG_HH_
+#define _ALTQ_ALTQ_RMCLASS_DEBUG_HH_
+
+/* #pragma ident "@(#)rm_class_debug.h 1.7 98/05/04 SMI" */
+
+/*
+ * Cbq debugging macros
+ */
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#ifdef CBQ_TRACE
+#ifndef NCBQTRACE
+#define NCBQTRACE (16 * 1024)
+#endif
+
+/*
+ * To view the trace output, using adb, type:
+ * adb -k /dev/ksyms /dev/mem <cr>, then type
+ * cbqtrace_count/D to get the count, then type
+ * cbqtrace_buffer,0tcount/Dp4C" "Xn
+ * This will dump the trace buffer from 0 to count.
+ */
+/*
+ * in ALTQ, "call cbqtrace_dump(N)" from DDB to display 20 events
+ * from Nth event in the circular buffer.
+ */
+
+struct cbqtrace {
+ int count;
+ int function; /* address of function */
+ int trace_action; /* descriptive 4 characters */
+ int object; /* object operated on */
+};
+
+extern struct cbqtrace cbqtrace_buffer[];
+extern struct cbqtrace *cbqtrace_ptr;
+extern int cbqtrace_count;
+
+#define CBQTRACEINIT() { \
+ if (cbqtrace_ptr == NULL) \
+ cbqtrace_ptr = cbqtrace_buffer; \
+ else { \
+ cbqtrace_ptr = cbqtrace_buffer; \
+ bzero((void *)cbqtrace_ptr, sizeof(cbqtrace_buffer)); \
+ cbqtrace_count = 0; \
+ } \
+}
+
+#define LOCK_TRACE() splimp()
+#define UNLOCK_TRACE(x) splx(x)
+
+#define CBQTRACE(func, act, obj) { \
+ int __s = LOCK_TRACE(); \
+ int *_p = &cbqtrace_ptr->count; \
+ *_p++ = ++cbqtrace_count; \
+ *_p++ = (int)(func); \
+ *_p++ = (int)(act); \
+ *_p++ = (int)(obj); \
+ if ((struct cbqtrace *)(void *)_p >= &cbqtrace_buffer[NCBQTRACE])\
+ cbqtrace_ptr = cbqtrace_buffer; \
+ else \
+ cbqtrace_ptr = (struct cbqtrace *)(void *)_p; \
+ UNLOCK_TRACE(__s); \
+ }
+#else
+
+/* If no tracing, define no-ops */
+#define CBQTRACEINIT()
+#define CBQTRACE(a, b, c)
+
+#endif /* !CBQ_TRACE */
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _ALTQ_ALTQ_RMCLASS_DEBUG_HH_ */
diff --git a/contrib/altq/rtems/freebsd/altq/altq_subr.c b/contrib/altq/rtems/freebsd/altq/altq_subr.c
new file mode 100644
index 00000000..ab6adc7f
--- /dev/null
+++ b/contrib/altq/rtems/freebsd/altq/altq_subr.c
@@ -0,0 +1,2032 @@
+#include <rtems/freebsd/machine/rtems-bsd-config.h>
+
+/* $FreeBSD$ */
+/* $KAME: altq_subr.c,v 1.21 2003/11/06 06:32:53 kjc Exp $ */
+
+/*
+ * Copyright (C) 1997-2003
+ * Sony Computer Science Laboratories Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY SONY CSL AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL SONY CSL OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#if defined(__FreeBSD__) || defined(__NetBSD__)
+#include <rtems/freebsd/local/opt_altq.h>
+#include <rtems/freebsd/local/opt_inet.h>
+#ifdef __FreeBSD__
+#include <rtems/freebsd/local/opt_inet6.h>
+#endif
+#endif /* __FreeBSD__ || __NetBSD__ */
+
+#include <rtems/freebsd/sys/param.h>
+#include <rtems/freebsd/sys/malloc.h>
+#include <rtems/freebsd/sys/mbuf.h>
+#include <rtems/freebsd/sys/systm.h>
+#include <rtems/freebsd/sys/proc.h>
+#include <rtems/freebsd/sys/socket.h>
+#include <rtems/freebsd/sys/socketvar.h>
+#include <rtems/freebsd/sys/kernel.h>
+#include <rtems/freebsd/sys/errno.h>
+#include <rtems/freebsd/sys/syslog.h>
+#include <rtems/freebsd/sys/sysctl.h>
+#include <rtems/freebsd/sys/queue.h>
+
+#include <rtems/freebsd/net/if.h>
+#include <rtems/freebsd/net/if_dl.h>
+#include <rtems/freebsd/net/if_types.h>
+#ifdef __FreeBSD__
+#include <rtems/freebsd/net/vnet.h>
+#endif
+
+#include <rtems/freebsd/netinet/in.h>
+#include <rtems/freebsd/netinet/in_systm.h>
+#include <rtems/freebsd/netinet/ip.h>
+#ifdef INET6
+#include <rtems/freebsd/netinet/ip6.h>
+#endif
+#include <rtems/freebsd/netinet/tcp.h>
+#include <rtems/freebsd/netinet/udp.h>
+
+#include <rtems/freebsd/net/pfvar.h>
+#include <rtems/freebsd/altq/altq.h>
+#ifdef ALTQ3_COMPAT
+#include <rtems/freebsd/altq/altq_conf.h>
+#endif
+
+/* machine dependent clock related includes */
+#ifdef __FreeBSD__
+#if __FreeBSD__ < 3
+#include <rtems/freebsd/local/opt_cpu.h> /* for FreeBSD-2.2.8 to get i586_ctr_freq */
+#endif
+#include <rtems/freebsd/sys/bus.h>
+#include <rtems/freebsd/sys/cpu.h>
+#include <rtems/freebsd/sys/eventhandler.h>
+#include <rtems/freebsd/machine/clock.h>
+#endif
+#if defined(__i386__)
+#include <rtems/freebsd/machine/cpufunc.h> /* for pentium tsc */
+#include <rtems/freebsd/machine/specialreg.h> /* for CPUID_TSC */
+#ifdef __FreeBSD__
+#include <rtems/freebsd/machine/md_var.h> /* for cpu_feature */
+#elif defined(__NetBSD__) || defined(__OpenBSD__)
+#include <rtems/freebsd/machine/cpu.h> /* for cpu_feature */
+#endif
+#endif /* __i386__ */
+
+/*
+ * internal function prototypes
+ */
+static void tbr_timeout(void *);
+int (*altq_input)(struct mbuf *, int) = NULL;
+static struct mbuf *tbr_dequeue(struct ifaltq *, int);
+static int tbr_timer = 0; /* token bucket regulator timer */
+#if !defined(__FreeBSD__) || (__FreeBSD_version < 600000)
+static struct callout tbr_callout = CALLOUT_INITIALIZER;
+#else
+static struct callout tbr_callout;
+#endif
+
+#ifdef ALTQ3_CLFIER_COMPAT
+static int extract_ports4(struct mbuf *, struct ip *, struct flowinfo_in *);
+#ifdef INET6
+static int extract_ports6(struct mbuf *, struct ip6_hdr *,
+ struct flowinfo_in6 *);
+#endif
+static int apply_filter4(u_int32_t, struct flow_filter *,
+ struct flowinfo_in *);
+static int apply_ppfilter4(u_int32_t, struct flow_filter *,
+ struct flowinfo_in *);
+#ifdef INET6
+static int apply_filter6(u_int32_t, struct flow_filter6 *,
+ struct flowinfo_in6 *);
+#endif
+static int apply_tosfilter4(u_int32_t, struct flow_filter *,
+ struct flowinfo_in *);
+static u_long get_filt_handle(struct acc_classifier *, int);
+static struct acc_filter *filth_to_filtp(struct acc_classifier *, u_long);
+static u_int32_t filt2fibmask(struct flow_filter *);
+
+static void ip4f_cache(struct ip *, struct flowinfo_in *);
+static int ip4f_lookup(struct ip *, struct flowinfo_in *);
+static int ip4f_init(void);
+static struct ip4_frag *ip4f_alloc(void);
+static void ip4f_free(struct ip4_frag *);
+#endif /* ALTQ3_CLFIER_COMPAT */
+
+/*
+ * alternate queueing support routines
+ */
+
+/* look up the queue state by the interface name and the queueing type. */
+void *
+altq_lookup(name, type)
+ char *name;
+ int type;
+{
+ struct ifnet *ifp;
+
+ if ((ifp = ifunit(name)) != NULL) {
+ /* read if_snd unlocked */
+ if (type != ALTQT_NONE && ifp->if_snd.altq_type == type)
+ return (ifp->if_snd.altq_disc);
+ }
+
+ return NULL;
+}
+
+int
+altq_attach(ifq, type, discipline, enqueue, dequeue, request, clfier, classify)
+ struct ifaltq *ifq;
+ int type;
+ void *discipline;
+ int (*enqueue)(struct ifaltq *, struct mbuf *, struct altq_pktattr *);
+ struct mbuf *(*dequeue)(struct ifaltq *, int);
+ int (*request)(struct ifaltq *, int, void *);
+ void *clfier;
+ void *(*classify)(void *, struct mbuf *, int);
+{
+ IFQ_LOCK(ifq);
+ if (!ALTQ_IS_READY(ifq)) {
+ IFQ_UNLOCK(ifq);
+ return ENXIO;
+ }
+
+#ifdef ALTQ3_COMPAT
+ /*
+ * pfaltq can override the existing discipline, but altq3 cannot.
+ * check these if clfier is not NULL (which implies altq3).
+ */
+ if (clfier != NULL) {
+ if (ALTQ_IS_ENABLED(ifq)) {
+ IFQ_UNLOCK(ifq);
+ return EBUSY;
+ }
+ if (ALTQ_IS_ATTACHED(ifq)) {
+ IFQ_UNLOCK(ifq);
+ return EEXIST;
+ }
+ }
+#endif
+ ifq->altq_type = type;
+ ifq->altq_disc = discipline;
+ ifq->altq_enqueue = enqueue;
+ ifq->altq_dequeue = dequeue;
+ ifq->altq_request = request;
+ ifq->altq_clfier = clfier;
+ ifq->altq_classify = classify;
+ ifq->altq_flags &= (ALTQF_CANTCHANGE|ALTQF_ENABLED);
+#ifdef ALTQ3_COMPAT
+#ifdef ALTQ_KLD
+ altq_module_incref(type);
+#endif
+#endif
+ IFQ_UNLOCK(ifq);
+ return 0;
+}
+
+int
+altq_detach(ifq)
+ struct ifaltq *ifq;
+{
+ IFQ_LOCK(ifq);
+
+ if (!ALTQ_IS_READY(ifq)) {
+ IFQ_UNLOCK(ifq);
+ return ENXIO;
+ }
+ if (ALTQ_IS_ENABLED(ifq)) {
+ IFQ_UNLOCK(ifq);
+ return EBUSY;
+ }
+ if (!ALTQ_IS_ATTACHED(ifq)) {
+ IFQ_UNLOCK(ifq);
+ return (0);
+ }
+#ifdef ALTQ3_COMPAT
+#ifdef ALTQ_KLD
+ altq_module_declref(ifq->altq_type);
+#endif
+#endif
+
+ ifq->altq_type = ALTQT_NONE;
+ ifq->altq_disc = NULL;
+ ifq->altq_enqueue = NULL;
+ ifq->altq_dequeue = NULL;
+ ifq->altq_request = NULL;
+ ifq->altq_clfier = NULL;
+ ifq->altq_classify = NULL;
+ ifq->altq_flags &= ALTQF_CANTCHANGE;
+
+ IFQ_UNLOCK(ifq);
+ return 0;
+}
+
+int
+altq_enable(ifq)
+ struct ifaltq *ifq;
+{
+ int s;
+
+ IFQ_LOCK(ifq);
+
+ if (!ALTQ_IS_READY(ifq)) {
+ IFQ_UNLOCK(ifq);
+ return ENXIO;
+ }
+ if (ALTQ_IS_ENABLED(ifq)) {
+ IFQ_UNLOCK(ifq);
+ return 0;
+ }
+
+#ifdef __NetBSD__
+ s = splnet();
+#else
+ s = splimp();
+#endif
+ IFQ_PURGE_NOLOCK(ifq);
+ ASSERT(ifq->ifq_len == 0);
+ ifq->ifq_drv_maxlen = 0; /* disable bulk dequeue */
+ ifq->altq_flags |= ALTQF_ENABLED;
+ if (ifq->altq_clfier != NULL)
+ ifq->altq_flags |= ALTQF_CLASSIFY;
+ splx(s);
+
+ IFQ_UNLOCK(ifq);
+ return 0;
+}
+
+int
+altq_disable(ifq)
+ struct ifaltq *ifq;
+{
+ int s;
+
+ IFQ_LOCK(ifq);
+ if (!ALTQ_IS_ENABLED(ifq)) {
+ IFQ_UNLOCK(ifq);
+ return 0;
+ }
+
+#ifdef __NetBSD__
+ s = splnet();
+#else
+ s = splimp();
+#endif
+ IFQ_PURGE_NOLOCK(ifq);
+ ASSERT(ifq->ifq_len == 0);
+ ifq->altq_flags &= ~(ALTQF_ENABLED|ALTQF_CLASSIFY);
+ splx(s);
+
+ IFQ_UNLOCK(ifq);
+ return 0;
+}
+
+#ifdef ALTQ_DEBUG
+void
+altq_assert(file, line, failedexpr)
+ const char *file, *failedexpr;
+ int line;
+{
+ (void)printf("altq assertion \"%s\" failed: file \"%s\", line %d\n",
+ failedexpr, file, line);
+ panic("altq assertion");
+ /* NOTREACHED */
+}
+#endif
+
+/*
+ * internal representation of token bucket parameters
+ * rate: byte_per_unittime << 32
+ * (((bits_per_sec) / 8) << 32) / machclk_freq
+ * depth: byte << 32
+ *
+ */
+#define TBR_SHIFT 32
+#define TBR_SCALE(x) ((int64_t)(x) << TBR_SHIFT)
+#define TBR_UNSCALE(x) ((x) >> TBR_SHIFT)
+
+static struct mbuf *
+tbr_dequeue(ifq, op)
+ struct ifaltq *ifq;
+ int op;
+{
+ struct tb_regulator *tbr;
+ struct mbuf *m;
+ int64_t interval;
+ u_int64_t now;
+
+ IFQ_LOCK_ASSERT(ifq);
+ tbr = ifq->altq_tbr;
+ if (op == ALTDQ_REMOVE && tbr->tbr_lastop == ALTDQ_POLL) {
+ /* if this is a remove after poll, bypass tbr check */
+ } else {
+ /* update token only when it is negative */
+ if (tbr->tbr_token <= 0) {
+ now = read_machclk();
+ interval = now - tbr->tbr_last;
+ if (interval >= tbr->tbr_filluptime)
+ tbr->tbr_token = tbr->tbr_depth;
+ else {
+ tbr->tbr_token += interval * tbr->tbr_rate;
+ if (tbr->tbr_token > tbr->tbr_depth)
+ tbr->tbr_token = tbr->tbr_depth;
+ }
+ tbr->tbr_last = now;
+ }
+ /* if token is still negative, don't allow dequeue */
+ if (tbr->tbr_token <= 0)
+ return (NULL);
+ }
+
+ if (ALTQ_IS_ENABLED(ifq))
+ m = (*ifq->altq_dequeue)(ifq, op);
+ else {
+ if (op == ALTDQ_POLL)
+ _IF_POLL(ifq, m);
+ else
+ _IF_DEQUEUE(ifq, m);
+ }
+
+ if (m != NULL && op == ALTDQ_REMOVE)
+ tbr->tbr_token -= TBR_SCALE(m_pktlen(m));
+ tbr->tbr_lastop = op;
+ return (m);
+}
+
+/*
+ * set a token bucket regulator.
+ * if the specified rate is zero, the token bucket regulator is deleted.
+ */
+int
+tbr_set(ifq, profile)
+ struct ifaltq *ifq;
+ struct tb_profile *profile;
+{
+ struct tb_regulator *tbr, *otbr;
+
+ if (tbr_dequeue_ptr == NULL)
+ tbr_dequeue_ptr = tbr_dequeue;
+
+ if (machclk_freq == 0)
+ init_machclk();
+ if (machclk_freq == 0) {
+ printf("tbr_set: no cpu clock available!\n");
+ return (ENXIO);
+ }
+
+ IFQ_LOCK(ifq);
+ if (profile->rate == 0) {
+ /* delete this tbr */
+ if ((tbr = ifq->altq_tbr) == NULL) {
+ IFQ_UNLOCK(ifq);
+ return (ENOENT);
+ }
+ ifq->altq_tbr = NULL;
+ free(tbr, M_DEVBUF);
+ IFQ_UNLOCK(ifq);
+ return (0);
+ }
+
+ IFQ_UNLOCK(ifq);
+ tbr = malloc(sizeof(struct tb_regulator),
+ M_DEVBUF, M_WAITOK);
+ if (tbr == NULL) { /* can not happen */
+ IFQ_UNLOCK(ifq);
+ return (ENOMEM);
+ }
+ bzero(tbr, sizeof(struct tb_regulator));
+
+ tbr->tbr_rate = TBR_SCALE(profile->rate / 8) / machclk_freq;
+ tbr->tbr_depth = TBR_SCALE(profile->depth);
+ if (tbr->tbr_rate > 0)
+ tbr->tbr_filluptime = tbr->tbr_depth / tbr->tbr_rate;
+ else
+ tbr->tbr_filluptime = 0xffffffffffffffffLL;
+ tbr->tbr_token = tbr->tbr_depth;
+ tbr->tbr_last = read_machclk();
+ tbr->tbr_lastop = ALTDQ_REMOVE;
+
+ IFQ_LOCK(ifq);
+ otbr = ifq->altq_tbr;
+ ifq->altq_tbr = tbr; /* set the new tbr */
+
+ if (otbr != NULL)
+ free(otbr, M_DEVBUF);
+ else {
+ if (tbr_timer == 0) {
+ CALLOUT_RESET(&tbr_callout, 1, tbr_timeout, (void *)0);
+ tbr_timer = 1;
+ }
+ }
+ IFQ_UNLOCK(ifq);
+ return (0);
+}
+
+/*
+ * tbr_timeout goes through the interface list, and kicks the drivers
+ * if necessary.
+ *
+ * MPSAFE
+ */
+static void
+tbr_timeout(arg)
+ void *arg;
+{
+#if defined(__FreeBSD__)
+ VNET_ITERATOR_DECL(vnet_iter);
+#endif
+ struct ifnet *ifp;
+ int active, s;
+
+ active = 0;
+#ifdef __NetBSD__
+ s = splnet();
+#else
+ s = splimp();
+#endif
+#if defined(__FreeBSD__) && (__FreeBSD_version >= 500000)
+ IFNET_RLOCK_NOSLEEP();
+ VNET_LIST_RLOCK_NOSLEEP();
+ VNET_FOREACH(vnet_iter) {
+ CURVNET_SET(vnet_iter);
+#endif
+ for (ifp = TAILQ_FIRST(&V_ifnet); ifp;
+ ifp = TAILQ_NEXT(ifp, if_list)) {
+ /* read from if_snd unlocked */
+ if (!TBR_IS_ENABLED(&ifp->if_snd))
+ continue;
+ active++;
+ if (!IFQ_IS_EMPTY(&ifp->if_snd) &&
+ ifp->if_start != NULL)
+ (*ifp->if_start)(ifp);
+ }
+#if defined(__FreeBSD__) && (__FreeBSD_version >= 500000)
+ CURVNET_RESTORE();
+ }
+ VNET_LIST_RUNLOCK_NOSLEEP();
+ IFNET_RUNLOCK_NOSLEEP();
+#endif
+ splx(s);
+ if (active > 0)
+ CALLOUT_RESET(&tbr_callout, 1, tbr_timeout, (void *)0);
+ else
+ tbr_timer = 0; /* don't need tbr_timer anymore */
+#if defined(__alpha__) && !defined(ALTQ_NOPCC)
+ {
+ /*
+ * XXX read out the machine dependent clock once a second
+ * to detect counter wrap-around.
+ */
+ static u_int cnt;
+
+ if (++cnt >= hz) {
+ (void)read_machclk();
+ cnt = 0;
+ }
+ }
+#endif /* __alpha__ && !ALTQ_NOPCC */
+}
+
+/*
+ * get token bucket regulator profile
+ */
+int
+tbr_get(ifq, profile)
+ struct ifaltq *ifq;
+ struct tb_profile *profile;
+{
+ struct tb_regulator *tbr;
+
+ IFQ_LOCK(ifq);
+ if ((tbr = ifq->altq_tbr) == NULL) {
+ profile->rate = 0;
+ profile->depth = 0;
+ } else {
+ profile->rate =
+ (u_int)TBR_UNSCALE(tbr->tbr_rate * 8 * machclk_freq);
+ profile->depth = (u_int)TBR_UNSCALE(tbr->tbr_depth);
+ }
+ IFQ_UNLOCK(ifq);
+ return (0);
+}
+
+/*
+ * attach a discipline to the interface. if one already exists, it is
+ * overridden.
+ * Locking is done in the discipline specific attach functions. Basically
+ * they call back to altq_attach which takes care of the attach and locking.
+ */
+int
+altq_pfattach(struct pf_altq *a)
+{
+ int error = 0;
+
+ switch (a->scheduler) {
+ case ALTQT_NONE:
+ break;
+#ifdef ALTQ_CBQ
+ case ALTQT_CBQ:
+ error = cbq_pfattach(a);
+ break;
+#endif
+#ifdef ALTQ_PRIQ
+ case ALTQT_PRIQ:
+ error = priq_pfattach(a);
+ break;
+#endif
+#ifdef ALTQ_HFSC
+ case ALTQT_HFSC:
+ error = hfsc_pfattach(a);
+ break;
+#endif
+ default:
+ error = ENXIO;
+ }
+
+ return (error);
+}
+
+/*
+ * detach a discipline from the interface.
+ * it is possible that the discipline was already overridden by another
+ * discipline.
+ */
+int
+altq_pfdetach(struct pf_altq *a)
+{
+ struct ifnet *ifp;
+ int s, error = 0;
+
+ if ((ifp = ifunit(a->ifname)) == NULL)
+ return (EINVAL);
+
+ /* if this discipline is no longer referenced, just return */
+ /* read unlocked from if_snd */
+ if (a->altq_disc == NULL || a->altq_disc != ifp->if_snd.altq_disc)
+ return (0);
+
+#ifdef __NetBSD__
+ s = splnet();
+#else
+ s = splimp();
+#endif
+ /* read unlocked from if_snd, _disable and _detach take care */
+ if (ALTQ_IS_ENABLED(&ifp->if_snd))
+ error = altq_disable(&ifp->if_snd);
+ if (error == 0)
+ error = altq_detach(&ifp->if_snd);
+ splx(s);
+
+ return (error);
+}
+
+/*
+ * add a discipline or a queue
+ * Locking is done in the discipline specific functions with regards to
+ * malloc with WAITOK, also it is not yet clear which lock to use.
+ */
+int
+altq_add(struct pf_altq *a)
+{
+ int error = 0;
+
+ if (a->qname[0] != 0)
+ return (altq_add_queue(a));
+
+ if (machclk_freq == 0)
+ init_machclk();
+ if (machclk_freq == 0)
+ panic("altq_add: no cpu clock");
+
+ switch (a->scheduler) {
+#ifdef ALTQ_CBQ
+ case ALTQT_CBQ:
+ error = cbq_add_altq(a);
+ break;
+#endif
+#ifdef ALTQ_PRIQ
+ case ALTQT_PRIQ:
+ error = priq_add_altq(a);
+ break;
+#endif
+#ifdef ALTQ_HFSC
+ case ALTQT_HFSC:
+ error = hfsc_add_altq(a);
+ break;
+#endif
+ default:
+ error = ENXIO;
+ }
+
+ return (error);
+}
+
+/*
+ * remove a discipline or a queue
+ * It is yet unclear what lock to use to protect this operation, the
+ * discipline specific functions will determine and grab it
+ */
+int
+altq_remove(struct pf_altq *a)
+{
+ int error = 0;
+
+ if (a->qname[0] != 0)
+ return (altq_remove_queue(a));
+
+ switch (a->scheduler) {
+#ifdef ALTQ_CBQ
+ case ALTQT_CBQ:
+ error = cbq_remove_altq(a);
+ break;
+#endif
+#ifdef ALTQ_PRIQ
+ case ALTQT_PRIQ:
+ error = priq_remove_altq(a);
+ break;
+#endif
+#ifdef ALTQ_HFSC
+ case ALTQT_HFSC:
+ error = hfsc_remove_altq(a);
+ break;
+#endif
+ default:
+ error = ENXIO;
+ }
+
+ return (error);
+}
+
+/*
+ * add a queue to the discipline
+ * It is yet unclear what lock to use to protect this operation, the
+ * discipline specific functions will determine and grab it
+ */
+int
+altq_add_queue(struct pf_altq *a)
+{
+ int error = 0;
+
+ switch (a->scheduler) {
+#ifdef ALTQ_CBQ
+ case ALTQT_CBQ:
+ error = cbq_add_queue(a);
+ break;
+#endif
+#ifdef ALTQ_PRIQ
+ case ALTQT_PRIQ:
+ error = priq_add_queue(a);
+ break;
+#endif
+#ifdef ALTQ_HFSC
+ case ALTQT_HFSC:
+ error = hfsc_add_queue(a);
+ break;
+#endif
+ default:
+ error = ENXIO;
+ }
+
+ return (error);
+}
+
+/*
+ * remove a queue from the discipline
+ * It is yet unclear what lock to use to protect this operation, the
+ * discipline specific functions will determine and grab it
+ */
+int
+altq_remove_queue(struct pf_altq *a)
+{
+ int error = 0;
+
+ switch (a->scheduler) {
+#ifdef ALTQ_CBQ
+ case ALTQT_CBQ:
+ error = cbq_remove_queue(a);
+ break;
+#endif
+#ifdef ALTQ_PRIQ
+ case ALTQT_PRIQ:
+ error = priq_remove_queue(a);
+ break;
+#endif
+#ifdef ALTQ_HFSC
+ case ALTQT_HFSC:
+ error = hfsc_remove_queue(a);
+ break;
+#endif
+ default:
+ error = ENXIO;
+ }
+
+ return (error);
+}
+
+/*
+ * get queue statistics
+ * Locking is done in the discipline specific functions with regards to
+ * copyout operations, also it is not yet clear which lock to use.
+ */
+int
+altq_getqstats(struct pf_altq *a, void *ubuf, int *nbytes)
+{
+ int error = 0;
+
+ switch (a->scheduler) {
+#ifdef ALTQ_CBQ
+ case ALTQT_CBQ:
+ error = cbq_getqstats(a, ubuf, nbytes);
+ break;
+#endif
+#ifdef ALTQ_PRIQ
+ case ALTQT_PRIQ:
+ error = priq_getqstats(a, ubuf, nbytes);
+ break;
+#endif
+#ifdef ALTQ_HFSC
+ case ALTQT_HFSC:
+ error = hfsc_getqstats(a, ubuf, nbytes);
+ break;
+#endif
+ default:
+ error = ENXIO;
+ }
+
+ return (error);
+}
+
+/*
+ * read and write diffserv field in IPv4 or IPv6 header
+ */
+u_int8_t
+read_dsfield(m, pktattr)
+ struct mbuf *m;
+ struct altq_pktattr *pktattr;
+{
+ struct mbuf *m0;
+ u_int8_t ds_field = 0;
+
+ if (pktattr == NULL ||
+ (pktattr->pattr_af != AF_INET && pktattr->pattr_af != AF_INET6))
+ return ((u_int8_t)0);
+
+ /* verify that pattr_hdr is within the mbuf data */
+ for (m0 = m; m0 != NULL; m0 = m0->m_next)
+ if ((pktattr->pattr_hdr >= m0->m_data) &&
+ (pktattr->pattr_hdr < m0->m_data + m0->m_len))
+ break;
+ if (m0 == NULL) {
+ /* ick, pattr_hdr is stale */
+ pktattr->pattr_af = AF_UNSPEC;
+#ifdef ALTQ_DEBUG
+ printf("read_dsfield: can't locate header!\n");
+#endif
+ return ((u_int8_t)0);
+ }
+
+ if (pktattr->pattr_af == AF_INET) {
+ struct ip *ip = (struct ip *)pktattr->pattr_hdr;
+
+ if (ip->ip_v != 4)
+ return ((u_int8_t)0); /* version mismatch! */
+ ds_field = ip->ip_tos;
+ }
+#ifdef INET6
+ else if (pktattr->pattr_af == AF_INET6) {
+ struct ip6_hdr *ip6 = (struct ip6_hdr *)pktattr->pattr_hdr;
+ u_int32_t flowlabel;
+
+ flowlabel = ntohl(ip6->ip6_flow);
+ if ((flowlabel >> 28) != 6)
+ return ((u_int8_t)0); /* version mismatch! */
+ ds_field = (flowlabel >> 20) & 0xff;
+ }
+#endif
+ return (ds_field);
+}
+
+void
+write_dsfield(struct mbuf *m, struct altq_pktattr *pktattr, u_int8_t dsfield)
+{
+ struct mbuf *m0;
+
+ if (pktattr == NULL ||
+ (pktattr->pattr_af != AF_INET && pktattr->pattr_af != AF_INET6))
+ return;
+
+ /* verify that pattr_hdr is within the mbuf data */
+ for (m0 = m; m0 != NULL; m0 = m0->m_next)
+ if ((pktattr->pattr_hdr >= m0->m_data) &&
+ (pktattr->pattr_hdr < m0->m_data + m0->m_len))
+ break;
+ if (m0 == NULL) {
+ /* ick, pattr_hdr is stale */
+ pktattr->pattr_af = AF_UNSPEC;
+#ifdef ALTQ_DEBUG
+ printf("write_dsfield: can't locate header!\n");
+#endif
+ return;
+ }
+
+ if (pktattr->pattr_af == AF_INET) {
+ struct ip *ip = (struct ip *)pktattr->pattr_hdr;
+ u_int8_t old;
+ int32_t sum;
+
+ if (ip->ip_v != 4)
+ return; /* version mismatch! */
+ old = ip->ip_tos;
+ dsfield |= old & 3; /* leave CU bits */
+ if (old == dsfield)
+ return;
+ ip->ip_tos = dsfield;
+ /*
+ * update checksum (from RFC1624)
+ * HC' = ~(~HC + ~m + m')
+ */
+ sum = ~ntohs(ip->ip_sum) & 0xffff;
+ sum += 0xff00 + (~old & 0xff) + dsfield;
+ sum = (sum >> 16) + (sum & 0xffff);
+ sum += (sum >> 16); /* add carry */
+
+ ip->ip_sum = htons(~sum & 0xffff);
+ }
+#ifdef INET6
+ else if (pktattr->pattr_af == AF_INET6) {
+ struct ip6_hdr *ip6 = (struct ip6_hdr *)pktattr->pattr_hdr;
+ u_int32_t flowlabel;
+
+ flowlabel = ntohl(ip6->ip6_flow);
+ if ((flowlabel >> 28) != 6)
+ return; /* version mismatch! */
+ flowlabel = (flowlabel & 0xf03fffff) | (dsfield << 20);
+ ip6->ip6_flow = htonl(flowlabel);
+ }
+#endif
+ return;
+}
+
+
+/*
+ * high resolution clock support taking advantage of a machine dependent
+ * high resolution time counter (e.g., timestamp counter of intel pentium).
+ * we assume
+ * - 64-bit-long monotonically-increasing counter
+ * - frequency range is 100M-4GHz (CPU speed)
+ */
+/* if pcc is not available or disabled, emulate 256MHz using microtime() */
+#define MACHCLK_SHIFT 8
+
+int machclk_usepcc;
+u_int32_t machclk_freq;
+u_int32_t machclk_per_tick;
+
+#ifdef __alpha__
+#ifdef __FreeBSD__
+extern u_int32_t cycles_per_sec; /* alpha cpu clock frequency */
+#elif defined(__NetBSD__) || defined(__OpenBSD__)
+extern u_int64_t cycles_per_usec; /* alpha cpu clock frequency */
+#endif
+#endif /* __alpha__ */
+#if defined(__i386__) && defined(__NetBSD__)
+extern u_int64_t cpu_tsc_freq;
+#endif /* __alpha__ */
+
+#if (__FreeBSD_version >= 700035)
+/* Update TSC freq with the value indicated by the caller. */
+static void
+tsc_freq_changed(void *arg, const struct cf_level *level, int status)
+{
+ /* If there was an error during the transition, don't do anything. */
+ if (status != 0)
+ return;
+
+#if (__FreeBSD_version >= 701102) && (defined(__amd64__) || defined(__i386__))
+ /* If TSC is P-state invariant, don't do anything. */
+ if (tsc_is_invariant)
+ return;
+#endif
+
+ /* Total setting for this level gives the new frequency in MHz. */
+ init_machclk();
+}
+EVENTHANDLER_DEFINE(cpufreq_post_change, tsc_freq_changed, NULL,
+ EVENTHANDLER_PRI_LAST);
+#endif /* __FreeBSD_version >= 700035 */
+
+static void
+init_machclk_setup(void)
+{
+#if (__FreeBSD_version >= 600000)
+ callout_init(&tbr_callout, 0);
+#endif
+
+ machclk_usepcc = 1;
+
+#if (!defined(__i386__) && !defined(__alpha__)) || defined(ALTQ_NOPCC)
+ machclk_usepcc = 0;
+#endif
+#if defined(__FreeBSD__) && defined(SMP)
+ machclk_usepcc = 0;
+#endif
+#if defined(__NetBSD__) && defined(MULTIPROCESSOR)
+ machclk_usepcc = 0;
+#endif
+#ifdef __i386__
+ /* check if TSC is available */
+ if (machclk_usepcc == 1 && ((cpu_feature & CPUID_TSC) == 0 ||
+ tsc_is_broken))
+ machclk_usepcc = 0;
+#endif
+}
+
+void
+init_machclk(void)
+{
+ static int called;
+
+ /* Call one-time initialization function. */
+ if (!called) {
+ init_machclk_setup();
+ called = 1;
+ }
+
+ if (machclk_usepcc == 0) {
+ /* emulate 256MHz using microtime() */
+ machclk_freq = 1000000 << MACHCLK_SHIFT;
+ machclk_per_tick = machclk_freq / hz;
+#ifdef ALTQ_DEBUG
+ printf("altq: emulate %uHz cpu clock\n", machclk_freq);
+#endif
+ return;
+ }
+
+ /*
+ * if the clock frequency (of Pentium TSC or Alpha PCC) is
+ * accessible, just use it.
+ */
+#ifdef __i386__
+#ifdef __FreeBSD__
+#if (__FreeBSD_version > 300000)
+ machclk_freq = tsc_freq;
+#else
+ machclk_freq = i586_ctr_freq;
+#endif
+#elif defined(__NetBSD__)
+ machclk_freq = (u_int32_t)cpu_tsc_freq;
+#elif defined(__OpenBSD__) && (defined(I586_CPU) || defined(I686_CPU))
+ machclk_freq = pentium_mhz * 1000000;
+#endif
+#elif defined(__alpha__)
+#ifdef __FreeBSD__
+ machclk_freq = cycles_per_sec;
+#elif defined(__NetBSD__) || defined(__OpenBSD__)
+ machclk_freq = (u_int32_t)(cycles_per_usec * 1000000);
+#endif
+#endif /* __alpha__ */
+
+ /*
+ * if we don't know the clock frequency, measure it.
+ */
+ if (machclk_freq == 0) {
+ static int wait;
+ struct timeval tv_start, tv_end;
+ u_int64_t start, end, diff;
+ int timo;
+
+ microtime(&tv_start);
+ start = read_machclk();
+ timo = hz; /* 1 sec */
+ (void)tsleep(&wait, PWAIT | PCATCH, "init_machclk", timo);
+ microtime(&tv_end);
+ end = read_machclk();
+ diff = (u_int64_t)(tv_end.tv_sec - tv_start.tv_sec) * 1000000
+ + tv_end.tv_usec - tv_start.tv_usec;
+ if (diff != 0)
+ machclk_freq = (u_int)((end - start) * 1000000 / diff);
+ }
+
+ machclk_per_tick = machclk_freq / hz;
+
+#ifdef ALTQ_DEBUG
+ printf("altq: CPU clock: %uHz\n", machclk_freq);
+#endif
+}
+
+#if defined(__OpenBSD__) && defined(__i386__)
+static __inline u_int64_t
+rdtsc(void)
+{
+ u_int64_t rv;
+ __asm __volatile(".byte 0x0f, 0x31" : "=A" (rv));
+ return (rv);
+}
+#endif /* __OpenBSD__ && __i386__ */
+
+u_int64_t
+read_machclk(void)
+{
+ u_int64_t val;
+
+ if (machclk_usepcc) {
+#if defined(__i386__)
+ val = rdtsc();
+#elif defined(__alpha__)
+ static u_int32_t last_pcc, upper;
+ u_int32_t pcc;
+
+ /*
+ * for alpha, make a 64bit counter value out of the 32bit
+ * alpha processor cycle counter.
+ * read_machclk must be called within a half of its
+ * wrap-around cycle (about 5 sec for 400MHz cpu) to properly
+ * detect a counter wrap-around.
+ * tbr_timeout calls read_machclk once a second.
+ */
+ pcc = (u_int32_t)alpha_rpcc();
+ if (pcc <= last_pcc)
+ upper++;
+ last_pcc = pcc;
+ val = ((u_int64_t)upper << 32) + pcc;
+#else
+ panic("read_machclk");
+#endif
+ } else {
+ struct timeval tv;
+
+ microtime(&tv);
+ val = (((u_int64_t)(tv.tv_sec - boottime.tv_sec) * 1000000
+ + tv.tv_usec) << MACHCLK_SHIFT);
+ }
+ return (val);
+}
+
+#ifdef ALTQ3_CLFIER_COMPAT
+
+#ifndef IPPROTO_ESP
+#define IPPROTO_ESP 50 /* encapsulating security payload */
+#endif
+#ifndef IPPROTO_AH
+#define IPPROTO_AH 51 /* authentication header */
+#endif
+
+/*
+ * extract flow information from a given packet.
+ * filt_mask shows flowinfo fields required.
+ * we assume the ip header is in one mbuf, and addresses and ports are
+ * in network byte order.
+ */
+int
+altq_extractflow(m, af, flow, filt_bmask)
+ struct mbuf *m;
+ int af;
+ struct flowinfo *flow;
+ u_int32_t filt_bmask;
+{
+
+ switch (af) {
+ case PF_INET: {
+ struct flowinfo_in *fin;
+ struct ip *ip;
+
+ ip = mtod(m, struct ip *);
+
+ if (ip->ip_v != 4)
+ break;
+
+ fin = (struct flowinfo_in *)flow;
+ fin->fi_len = sizeof(struct flowinfo_in);
+ fin->fi_family = AF_INET;
+
+ fin->fi_proto = ip->ip_p;
+ fin->fi_tos = ip->ip_tos;
+
+ fin->fi_src.s_addr = ip->ip_src.s_addr;
+ fin->fi_dst.s_addr = ip->ip_dst.s_addr;
+
+ if (filt_bmask & FIMB4_PORTS)
+ /* if port info is required, extract port numbers */
+ extract_ports4(m, ip, fin);
+ else {
+ fin->fi_sport = 0;
+ fin->fi_dport = 0;
+ fin->fi_gpi = 0;
+ }
+ return (1);
+ }
+
+#ifdef INET6
+ case PF_INET6: {
+ struct flowinfo_in6 *fin6;
+ struct ip6_hdr *ip6;
+
+ ip6 = mtod(m, struct ip6_hdr *);
+ /* should we check the ip version? */
+
+ fin6 = (struct flowinfo_in6 *)flow;
+ fin6->fi6_len = sizeof(struct flowinfo_in6);
+ fin6->fi6_family = AF_INET6;
+
+ fin6->fi6_proto = ip6->ip6_nxt;
+ fin6->fi6_tclass = (ntohl(ip6->ip6_flow) >> 20) & 0xff;
+
+ fin6->fi6_flowlabel = ip6->ip6_flow & htonl(0x000fffff);
+ fin6->fi6_src = ip6->ip6_src;
+ fin6->fi6_dst = ip6->ip6_dst;
+
+ if ((filt_bmask & FIMB6_PORTS) ||
+ ((filt_bmask & FIMB6_PROTO)
+ && ip6->ip6_nxt > IPPROTO_IPV6))
+ /*
+ * if port info is required, or proto is required
+ * but there are option headers, extract port
+ * and protocol numbers.
+ */
+ extract_ports6(m, ip6, fin6);
+ else {
+ fin6->fi6_sport = 0;
+ fin6->fi6_dport = 0;
+ fin6->fi6_gpi = 0;
+ }
+ return (1);
+ }
+#endif /* INET6 */
+
+ default:
+ break;
+ }
+
+ /* failed */
+ flow->fi_len = sizeof(struct flowinfo);
+ flow->fi_family = AF_UNSPEC;
+ return (0);
+}
+
+/*
+ * helper routine to extract port numbers
+ */
+/* structure for ipsec and ipv6 option header template */
+struct _opt6 {
+ u_int8_t opt6_nxt; /* next header */
+ u_int8_t opt6_hlen; /* header extension length */
+ u_int16_t _pad;
+ u_int32_t ah_spi; /* security parameter index
+ for authentication header */
+};
+
+/*
+ * extract port numbers from a ipv4 packet.
+ */
+static int
+extract_ports4(m, ip, fin)
+ struct mbuf *m;
+ struct ip *ip;
+ struct flowinfo_in *fin;
+{
+ struct mbuf *m0;
+ u_short ip_off;
+ u_int8_t proto;
+ int off;
+
+ fin->fi_sport = 0;
+ fin->fi_dport = 0;
+ fin->fi_gpi = 0;
+
+ ip_off = ntohs(ip->ip_off);
+ /* if it is a fragment, try cached fragment info */
+ if (ip_off & IP_OFFMASK) {
+ ip4f_lookup(ip, fin);
+ return (1);
+ }
+
+ /* locate the mbuf containing the protocol header */
+ for (m0 = m; m0 != NULL; m0 = m0->m_next)
+ if (((caddr_t)ip >= m0->m_data) &&
+ ((caddr_t)ip < m0->m_data + m0->m_len))
+ break;
+ if (m0 == NULL) {
+#ifdef ALTQ_DEBUG
+ printf("extract_ports4: can't locate header! ip=%p\n", ip);
+#endif
+ return (0);
+ }
+ off = ((caddr_t)ip - m0->m_data) + (ip->ip_hl << 2);
+ proto = ip->ip_p;
+
+#ifdef ALTQ_IPSEC
+ again:
+#endif
+ while (off >= m0->m_len) {
+ off -= m0->m_len;
+ m0 = m0->m_next;
+ if (m0 == NULL)
+ return (0); /* bogus ip_hl! */
+ }
+ if (m0->m_len < off + 4)
+ return (0);
+
+ switch (proto) {
+ case IPPROTO_TCP:
+ case IPPROTO_UDP: {
+ struct udphdr *udp;
+
+ udp = (struct udphdr *)(mtod(m0, caddr_t) + off);
+ fin->fi_sport = udp->uh_sport;
+ fin->fi_dport = udp->uh_dport;
+ fin->fi_proto = proto;
+ }
+ break;
+
+#ifdef ALTQ_IPSEC
+ case IPPROTO_ESP:
+ if (fin->fi_gpi == 0){
+ u_int32_t *gpi;
+
+ gpi = (u_int32_t *)(mtod(m0, caddr_t) + off);
+ fin->fi_gpi = *gpi;
+ }
+ fin->fi_proto = proto;
+ break;
+
+ case IPPROTO_AH: {
+ /* get next header and header length */
+ struct _opt6 *opt6;
+
+ opt6 = (struct _opt6 *)(mtod(m0, caddr_t) + off);
+ proto = opt6->opt6_nxt;
+ off += 8 + (opt6->opt6_hlen * 4);
+ if (fin->fi_gpi == 0 && m0->m_len >= off + 8)
+ fin->fi_gpi = opt6->ah_spi;
+ }
+ /* goto the next header */
+ goto again;
+#endif /* ALTQ_IPSEC */
+
+ default:
+ fin->fi_proto = proto;
+ return (0);
+ }
+
+ /* if this is a first fragment, cache it. */
+ if (ip_off & IP_MF)
+ ip4f_cache(ip, fin);
+
+ return (1);
+}
+
+#ifdef INET6
+static int
+extract_ports6(m, ip6, fin6)
+ struct mbuf *m;
+ struct ip6_hdr *ip6;
+ struct flowinfo_in6 *fin6;
+{
+ struct mbuf *m0;
+ int off;
+ u_int8_t proto;
+
+ fin6->fi6_gpi = 0;
+ fin6->fi6_sport = 0;
+ fin6->fi6_dport = 0;
+
+ /* locate the mbuf containing the protocol header */
+ for (m0 = m; m0 != NULL; m0 = m0->m_next)
+ if (((caddr_t)ip6 >= m0->m_data) &&
+ ((caddr_t)ip6 < m0->m_data + m0->m_len))
+ break;
+ if (m0 == NULL) {
+#ifdef ALTQ_DEBUG
+ printf("extract_ports6: can't locate header! ip6=%p\n", ip6);
+#endif
+ return (0);
+ }
+ off = ((caddr_t)ip6 - m0->m_data) + sizeof(struct ip6_hdr);
+
+ proto = ip6->ip6_nxt;
+ do {
+ while (off >= m0->m_len) {
+ off -= m0->m_len;
+ m0 = m0->m_next;
+ if (m0 == NULL)
+ return (0);
+ }
+ if (m0->m_len < off + 4)
+ return (0);
+
+ switch (proto) {
+ case IPPROTO_TCP:
+ case IPPROTO_UDP: {
+ struct udphdr *udp;
+
+ udp = (struct udphdr *)(mtod(m0, caddr_t) + off);
+ fin6->fi6_sport = udp->uh_sport;
+ fin6->fi6_dport = udp->uh_dport;
+ fin6->fi6_proto = proto;
+ }
+ return (1);
+
+ case IPPROTO_ESP:
+ if (fin6->fi6_gpi == 0) {
+ u_int32_t *gpi;
+
+ gpi = (u_int32_t *)(mtod(m0, caddr_t) + off);
+ fin6->fi6_gpi = *gpi;
+ }
+ fin6->fi6_proto = proto;
+ return (1);
+
+ case IPPROTO_AH: {
+ /* get next header and header length */
+ struct _opt6 *opt6;
+
+ opt6 = (struct _opt6 *)(mtod(m0, caddr_t) + off);
+ if (fin6->fi6_gpi == 0 && m0->m_len >= off + 8)
+ fin6->fi6_gpi = opt6->ah_spi;
+ proto = opt6->opt6_nxt;
+ off += 8 + (opt6->opt6_hlen * 4);
+ /* goto the next header */
+ break;
+ }
+
+ case IPPROTO_HOPOPTS:
+ case IPPROTO_ROUTING:
+ case IPPROTO_DSTOPTS: {
+ /* get next header and header length */
+ struct _opt6 *opt6;
+
+ opt6 = (struct _opt6 *)(mtod(m0, caddr_t) + off);
+ proto = opt6->opt6_nxt;
+ off += (opt6->opt6_hlen + 1) * 8;
+ /* goto the next header */
+ break;
+ }
+
+ case IPPROTO_FRAGMENT:
+ /* ipv6 fragmentations are not supported yet */
+ default:
+ fin6->fi6_proto = proto;
+ return (0);
+ }
+ } while (1);
+ /*NOTREACHED*/
+}
+#endif /* INET6 */
+
+/*
+ * altq common classifier
+ */
+int
+acc_add_filter(classifier, filter, class, phandle)
+ struct acc_classifier *classifier;
+ struct flow_filter *filter;
+ void *class;
+ u_long *phandle;
+{
+ struct acc_filter *afp, *prev, *tmp;
+ int i, s;
+
+#ifdef INET6
+ if (filter->ff_flow.fi_family != AF_INET &&
+ filter->ff_flow.fi_family != AF_INET6)
+ return (EINVAL);
+#else
+ if (filter->ff_flow.fi_family != AF_INET)
+ return (EINVAL);
+#endif
+
+ afp = malloc(sizeof(struct acc_filter),
+ M_DEVBUF, M_WAITOK);
+ if (afp == NULL)
+ return (ENOMEM);
+ bzero(afp, sizeof(struct acc_filter));
+
+ afp->f_filter = *filter;
+ afp->f_class = class;
+
+ i = ACC_WILDCARD_INDEX;
+ if (filter->ff_flow.fi_family == AF_INET) {
+ struct flow_filter *filter4 = &afp->f_filter;
+
+ /*
+ * if address is 0, it's a wildcard. if address mask
+ * isn't set, use full mask.
+ */
+ if (filter4->ff_flow.fi_dst.s_addr == 0)
+ filter4->ff_mask.mask_dst.s_addr = 0;
+ else if (filter4->ff_mask.mask_dst.s_addr == 0)
+ filter4->ff_mask.mask_dst.s_addr = 0xffffffff;
+ if (filter4->ff_flow.fi_src.s_addr == 0)
+ filter4->ff_mask.mask_src.s_addr = 0;
+ else if (filter4->ff_mask.mask_src.s_addr == 0)
+ filter4->ff_mask.mask_src.s_addr = 0xffffffff;
+
+ /* clear extra bits in addresses */
+ filter4->ff_flow.fi_dst.s_addr &=
+ filter4->ff_mask.mask_dst.s_addr;
+ filter4->ff_flow.fi_src.s_addr &=
+ filter4->ff_mask.mask_src.s_addr;
+
+ /*
+ * if dst address is a wildcard, use hash-entry
+ * ACC_WILDCARD_INDEX.
+ */
+ if (filter4->ff_mask.mask_dst.s_addr != 0xffffffff)
+ i = ACC_WILDCARD_INDEX;
+ else
+ i = ACC_GET_HASH_INDEX(filter4->ff_flow.fi_dst.s_addr);
+ }
+#ifdef INET6
+ else if (filter->ff_flow.fi_family == AF_INET6) {
+ struct flow_filter6 *filter6 =
+ (struct flow_filter6 *)&afp->f_filter;
+#ifndef IN6MASK0 /* taken from kame ipv6 */
+#define IN6MASK0 {{{ 0, 0, 0, 0 }}}
+#define IN6MASK128 {{{ 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff }}}
+ const struct in6_addr in6mask0 = IN6MASK0;
+ const struct in6_addr in6mask128 = IN6MASK128;
+#endif
+
+ if (IN6_IS_ADDR_UNSPECIFIED(&filter6->ff_flow6.fi6_dst))
+ filter6->ff_mask6.mask6_dst = in6mask0;
+ else if (IN6_IS_ADDR_UNSPECIFIED(&filter6->ff_mask6.mask6_dst))
+ filter6->ff_mask6.mask6_dst = in6mask128;
+ if (IN6_IS_ADDR_UNSPECIFIED(&filter6->ff_flow6.fi6_src))
+ filter6->ff_mask6.mask6_src = in6mask0;
+ else if (IN6_IS_ADDR_UNSPECIFIED(&filter6->ff_mask6.mask6_src))
+ filter6->ff_mask6.mask6_src = in6mask128;
+
+ /* clear extra bits in addresses */
+ for (i = 0; i < 16; i++)
+ filter6->ff_flow6.fi6_dst.s6_addr[i] &=
+ filter6->ff_mask6.mask6_dst.s6_addr[i];
+ for (i = 0; i < 16; i++)
+ filter6->ff_flow6.fi6_src.s6_addr[i] &=
+ filter6->ff_mask6.mask6_src.s6_addr[i];
+
+ if (filter6->ff_flow6.fi6_flowlabel == 0)
+ i = ACC_WILDCARD_INDEX;
+ else
+ i = ACC_GET_HASH_INDEX(filter6->ff_flow6.fi6_flowlabel);
+ }
+#endif /* INET6 */
+
+ afp->f_handle = get_filt_handle(classifier, i);
+
+ /* update filter bitmask */
+ afp->f_fbmask = filt2fibmask(filter);
+ classifier->acc_fbmask |= afp->f_fbmask;
+
+ /*
+ * add this filter to the filter list.
+ * filters are ordered from the highest rule number.
+ */
+#ifdef __NetBSD__
+ s = splnet();
+#else
+ s = splimp();
+#endif
+ prev = NULL;
+ LIST_FOREACH(tmp, &classifier->acc_filters[i], f_chain) {
+ if (tmp->f_filter.ff_ruleno > afp->f_filter.ff_ruleno)
+ prev = tmp;
+ else
+ break;
+ }
+ if (prev == NULL)
+ LIST_INSERT_HEAD(&classifier->acc_filters[i], afp, f_chain);
+ else
+ LIST_INSERT_AFTER(prev, afp, f_chain);
+ splx(s);
+
+ *phandle = afp->f_handle;
+ return (0);
+}
+
+int
+acc_delete_filter(classifier, handle)
+ struct acc_classifier *classifier;
+ u_long handle;
+{
+ struct acc_filter *afp;
+ int s;
+
+ if ((afp = filth_to_filtp(classifier, handle)) == NULL)
+ return (EINVAL);
+
+#ifdef __NetBSD__
+ s = splnet();
+#else
+ s = splimp();
+#endif
+ LIST_REMOVE(afp, f_chain);
+ splx(s);
+
+ free(afp, M_DEVBUF);
+
+ /* todo: update filt_bmask */
+
+ return (0);
+}
+
+/*
+ * delete filters referencing to the specified class.
+ * if the all flag is not 0, delete all the filters.
+ */
+int
+acc_discard_filters(classifier, class, all)
+ struct acc_classifier *classifier;
+ void *class;
+ int all;
+{
+ struct acc_filter *afp;
+ int i, s;
+
+#ifdef __NetBSD__
+ s = splnet();
+#else
+ s = splimp();
+#endif
+ for (i = 0; i < ACC_FILTER_TABLESIZE; i++) {
+ do {
+ LIST_FOREACH(afp, &classifier->acc_filters[i], f_chain)
+ if (all || afp->f_class == class) {
+ LIST_REMOVE(afp, f_chain);
+ free(afp, M_DEVBUF);
+ /* start again from the head */
+ break;
+ }
+ } while (afp != NULL);
+ }
+ splx(s);
+
+ if (all)
+ classifier->acc_fbmask = 0;
+
+ return (0);
+}
+
+void *
+acc_classify(clfier, m, af)
+ void *clfier;
+ struct mbuf *m;
+ int af;
+{
+ struct acc_classifier *classifier;
+ struct flowinfo flow;
+ struct acc_filter *afp;
+ int i;
+
+ classifier = (struct acc_classifier *)clfier;
+ altq_extractflow(m, af, &flow, classifier->acc_fbmask);
+
+ if (flow.fi_family == AF_INET) {
+ struct flowinfo_in *fp = (struct flowinfo_in *)&flow;
+
+ if ((classifier->acc_fbmask & FIMB4_ALL) == FIMB4_TOS) {
+ /* only tos is used */
+ LIST_FOREACH(afp,
+ &classifier->acc_filters[ACC_WILDCARD_INDEX],
+ f_chain)
+ if (apply_tosfilter4(afp->f_fbmask,
+ &afp->f_filter, fp))
+ /* filter matched */
+ return (afp->f_class);
+ } else if ((classifier->acc_fbmask &
+ (~(FIMB4_PROTO|FIMB4_SPORT|FIMB4_DPORT) & FIMB4_ALL))
+ == 0) {
+ /* only proto and ports are used */
+ LIST_FOREACH(afp,
+ &classifier->acc_filters[ACC_WILDCARD_INDEX],
+ f_chain)
+ if (apply_ppfilter4(afp->f_fbmask,
+ &afp->f_filter, fp))
+ /* filter matched */
+ return (afp->f_class);
+ } else {
+ /* get the filter hash entry from its dest address */
+ i = ACC_GET_HASH_INDEX(fp->fi_dst.s_addr);
+ do {
+ /*
+ * go through this loop twice. first for dst
+ * hash, second for wildcards.
+ */
+ LIST_FOREACH(afp, &classifier->acc_filters[i],
+ f_chain)
+ if (apply_filter4(afp->f_fbmask,
+ &afp->f_filter, fp))
+ /* filter matched */
+ return (afp->f_class);
+
+ /*
+ * check again for filters with a dst addr
+ * wildcard.
+ * (daddr == 0 || dmask != 0xffffffff).
+ */
+ if (i != ACC_WILDCARD_INDEX)
+ i = ACC_WILDCARD_INDEX;
+ else
+ break;
+ } while (1);
+ }
+ }
+#ifdef INET6
+ else if (flow.fi_family == AF_INET6) {
+ struct flowinfo_in6 *fp6 = (struct flowinfo_in6 *)&flow;
+
+ /* get the filter hash entry from its flow ID */
+ if (fp6->fi6_flowlabel != 0)
+ i = ACC_GET_HASH_INDEX(fp6->fi6_flowlabel);
+ else
+ /* flowlable can be zero */
+ i = ACC_WILDCARD_INDEX;
+
+ /* go through this loop twice. first for flow hash, second
+ for wildcards. */
+ do {
+ LIST_FOREACH(afp, &classifier->acc_filters[i], f_chain)
+ if (apply_filter6(afp->f_fbmask,
+ (struct flow_filter6 *)&afp->f_filter,
+ fp6))
+ /* filter matched */
+ return (afp->f_class);
+
+ /*
+ * check again for filters with a wildcard.
+ */
+ if (i != ACC_WILDCARD_INDEX)
+ i = ACC_WILDCARD_INDEX;
+ else
+ break;
+ } while (1);
+ }
+#endif /* INET6 */
+
+ /* no filter matched */
+ return (NULL);
+}
+
+static int
+apply_filter4(fbmask, filt, pkt)
+ u_int32_t fbmask;
+ struct flow_filter *filt;
+ struct flowinfo_in *pkt;
+{
+ if (filt->ff_flow.fi_family != AF_INET)
+ return (0);
+ if ((fbmask & FIMB4_SPORT) && filt->ff_flow.fi_sport != pkt->fi_sport)
+ return (0);
+ if ((fbmask & FIMB4_DPORT) && filt->ff_flow.fi_dport != pkt->fi_dport)
+ return (0);
+ if ((fbmask & FIMB4_DADDR) &&
+ filt->ff_flow.fi_dst.s_addr !=
+ (pkt->fi_dst.s_addr & filt->ff_mask.mask_dst.s_addr))
+ return (0);
+ if ((fbmask & FIMB4_SADDR) &&
+ filt->ff_flow.fi_src.s_addr !=
+ (pkt->fi_src.s_addr & filt->ff_mask.mask_src.s_addr))
+ return (0);
+ if ((fbmask & FIMB4_PROTO) && filt->ff_flow.fi_proto != pkt->fi_proto)
+ return (0);
+ if ((fbmask & FIMB4_TOS) && filt->ff_flow.fi_tos !=
+ (pkt->fi_tos & filt->ff_mask.mask_tos))
+ return (0);
+ if ((fbmask & FIMB4_GPI) && filt->ff_flow.fi_gpi != (pkt->fi_gpi))
+ return (0);
+ /* match */
+ return (1);
+}
+
+/*
+ * filter matching function optimized for a common case that checks
+ * only protocol and port numbers
+ */
+static int
+apply_ppfilter4(fbmask, filt, pkt)
+ u_int32_t fbmask;
+ struct flow_filter *filt;
+ struct flowinfo_in *pkt;
+{
+ if (filt->ff_flow.fi_family != AF_INET)
+ return (0);
+ if ((fbmask & FIMB4_SPORT) && filt->ff_flow.fi_sport != pkt->fi_sport)
+ return (0);
+ if ((fbmask & FIMB4_DPORT) && filt->ff_flow.fi_dport != pkt->fi_dport)
+ return (0);
+ if ((fbmask & FIMB4_PROTO) && filt->ff_flow.fi_proto != pkt->fi_proto)
+ return (0);
+ /* match */
+ return (1);
+}
+
+/*
+ * filter matching function only for tos field.
+ */
+static int
+apply_tosfilter4(fbmask, filt, pkt)
+ u_int32_t fbmask;
+ struct flow_filter *filt;
+ struct flowinfo_in *pkt;
+{
+ if (filt->ff_flow.fi_family != AF_INET)
+ return (0);
+ if ((fbmask & FIMB4_TOS) && filt->ff_flow.fi_tos !=
+ (pkt->fi_tos & filt->ff_mask.mask_tos))
+ return (0);
+ /* match */
+ return (1);
+}
+
+#ifdef INET6
+static int
+apply_filter6(fbmask, filt, pkt)
+ u_int32_t fbmask;
+ struct flow_filter6 *filt;
+ struct flowinfo_in6 *pkt;
+{
+ int i;
+
+ if (filt->ff_flow6.fi6_family != AF_INET6)
+ return (0);
+ if ((fbmask & FIMB6_FLABEL) &&
+ filt->ff_flow6.fi6_flowlabel != pkt->fi6_flowlabel)
+ return (0);
+ if ((fbmask & FIMB6_PROTO) &&
+ filt->ff_flow6.fi6_proto != pkt->fi6_proto)
+ return (0);
+ if ((fbmask & FIMB6_SPORT) &&
+ filt->ff_flow6.fi6_sport != pkt->fi6_sport)
+ return (0);
+ if ((fbmask & FIMB6_DPORT) &&
+ filt->ff_flow6.fi6_dport != pkt->fi6_dport)
+ return (0);
+ if (fbmask & FIMB6_SADDR) {
+ for (i = 0; i < 4; i++)
+ if (filt->ff_flow6.fi6_src.s6_addr32[i] !=
+ (pkt->fi6_src.s6_addr32[i] &
+ filt->ff_mask6.mask6_src.s6_addr32[i]))
+ return (0);
+ }
+ if (fbmask & FIMB6_DADDR) {
+ for (i = 0; i < 4; i++)
+ if (filt->ff_flow6.fi6_dst.s6_addr32[i] !=
+ (pkt->fi6_dst.s6_addr32[i] &
+ filt->ff_mask6.mask6_dst.s6_addr32[i]))
+ return (0);
+ }
+ if ((fbmask & FIMB6_TCLASS) &&
+ filt->ff_flow6.fi6_tclass !=
+ (pkt->fi6_tclass & filt->ff_mask6.mask6_tclass))
+ return (0);
+ if ((fbmask & FIMB6_GPI) &&
+ filt->ff_flow6.fi6_gpi != pkt->fi6_gpi)
+ return (0);
+ /* match */
+ return (1);
+}
+#endif /* INET6 */
+
+/*
+ * filter handle:
+ * bit 20-28: index to the filter hash table
+ * bit 0-19: unique id in the hash bucket.
+ */
+static u_long
+get_filt_handle(classifier, i)
+ struct acc_classifier *classifier;
+ int i;
+{
+ static u_long handle_number = 1;
+ u_long handle;
+ struct acc_filter *afp;
+
+ while (1) {
+ handle = handle_number++ & 0x000fffff;
+
+ if (LIST_EMPTY(&classifier->acc_filters[i]))
+ break;
+
+ LIST_FOREACH(afp, &classifier->acc_filters[i], f_chain)
+ if ((afp->f_handle & 0x000fffff) == handle)
+ break;
+ if (afp == NULL)
+ break;
+ /* this handle is already used, try again */
+ }
+
+ return ((i << 20) | handle);
+}
+
+/* convert filter handle to filter pointer */
+static struct acc_filter *
+filth_to_filtp(classifier, handle)
+ struct acc_classifier *classifier;
+ u_long handle;
+{
+ struct acc_filter *afp;
+ int i;
+
+ i = ACC_GET_HINDEX(handle);
+
+ LIST_FOREACH(afp, &classifier->acc_filters[i], f_chain)
+ if (afp->f_handle == handle)
+ return (afp);
+
+ return (NULL);
+}
+
+/* create flowinfo bitmask */
+static u_int32_t
+filt2fibmask(filt)
+ struct flow_filter *filt;
+{
+ u_int32_t mask = 0;
+#ifdef INET6
+ struct flow_filter6 *filt6;
+#endif
+
+ switch (filt->ff_flow.fi_family) {
+ case AF_INET:
+ if (filt->ff_flow.fi_proto != 0)
+ mask |= FIMB4_PROTO;
+ if (filt->ff_flow.fi_tos != 0)
+ mask |= FIMB4_TOS;
+ if (filt->ff_flow.fi_dst.s_addr != 0)
+ mask |= FIMB4_DADDR;
+ if (filt->ff_flow.fi_src.s_addr != 0)
+ mask |= FIMB4_SADDR;
+ if (filt->ff_flow.fi_sport != 0)
+ mask |= FIMB4_SPORT;
+ if (filt->ff_flow.fi_dport != 0)
+ mask |= FIMB4_DPORT;
+ if (filt->ff_flow.fi_gpi != 0)
+ mask |= FIMB4_GPI;
+ break;
+#ifdef INET6
+ case AF_INET6:
+ filt6 = (struct flow_filter6 *)filt;
+
+ if (filt6->ff_flow6.fi6_proto != 0)
+ mask |= FIMB6_PROTO;
+ if (filt6->ff_flow6.fi6_tclass != 0)
+ mask |= FIMB6_TCLASS;
+ if (!IN6_IS_ADDR_UNSPECIFIED(&filt6->ff_flow6.fi6_dst))
+ mask |= FIMB6_DADDR;
+ if (!IN6_IS_ADDR_UNSPECIFIED(&filt6->ff_flow6.fi6_src))
+ mask |= FIMB6_SADDR;
+ if (filt6->ff_flow6.fi6_sport != 0)
+ mask |= FIMB6_SPORT;
+ if (filt6->ff_flow6.fi6_dport != 0)
+ mask |= FIMB6_DPORT;
+ if (filt6->ff_flow6.fi6_gpi != 0)
+ mask |= FIMB6_GPI;
+ if (filt6->ff_flow6.fi6_flowlabel != 0)
+ mask |= FIMB6_FLABEL;
+ break;
+#endif /* INET6 */
+ }
+ return (mask);
+}
+
+
+/*
+ * helper functions to handle IPv4 fragments.
+ * currently only in-sequence fragments are handled.
+ * - fragment info is cached in a LRU list.
+ * - when a first fragment is found, cache its flow info.
+ * - when a non-first fragment is found, lookup the cache.
+ */
+
+struct ip4_frag {
+ TAILQ_ENTRY(ip4_frag) ip4f_chain;
+ char ip4f_valid;
+ u_short ip4f_id;
+ struct flowinfo_in ip4f_info;
+};
+
+static TAILQ_HEAD(ip4f_list, ip4_frag) ip4f_list; /* IPv4 fragment cache */
+
+#define IP4F_TABSIZE 16 /* IPv4 fragment cache size */
+
+
+static void
+ip4f_cache(ip, fin)
+ struct ip *ip;
+ struct flowinfo_in *fin;
+{
+ struct ip4_frag *fp;
+
+ if (TAILQ_EMPTY(&ip4f_list)) {
+ /* first time call, allocate fragment cache entries. */
+ if (ip4f_init() < 0)
+ /* allocation failed! */
+ return;
+ }
+
+ fp = ip4f_alloc();
+ fp->ip4f_id = ip->ip_id;
+ fp->ip4f_info.fi_proto = ip->ip_p;
+ fp->ip4f_info.fi_src.s_addr = ip->ip_src.s_addr;
+ fp->ip4f_info.fi_dst.s_addr = ip->ip_dst.s_addr;
+
+ /* save port numbers */
+ fp->ip4f_info.fi_sport = fin->fi_sport;
+ fp->ip4f_info.fi_dport = fin->fi_dport;
+ fp->ip4f_info.fi_gpi = fin->fi_gpi;
+}
+
+static int
+ip4f_lookup(ip, fin)
+ struct ip *ip;
+ struct flowinfo_in *fin;
+{
+ struct ip4_frag *fp;
+
+ for (fp = TAILQ_FIRST(&ip4f_list); fp != NULL && fp->ip4f_valid;
+ fp = TAILQ_NEXT(fp, ip4f_chain))
+ if (ip->ip_id == fp->ip4f_id &&
+ ip->ip_src.s_addr == fp->ip4f_info.fi_src.s_addr &&
+ ip->ip_dst.s_addr == fp->ip4f_info.fi_dst.s_addr &&
+ ip->ip_p == fp->ip4f_info.fi_proto) {
+
+ /* found the matching entry */
+ fin->fi_sport = fp->ip4f_info.fi_sport;
+ fin->fi_dport = fp->ip4f_info.fi_dport;
+ fin->fi_gpi = fp->ip4f_info.fi_gpi;
+
+ if ((ntohs(ip->ip_off) & IP_MF) == 0)
+ /* this is the last fragment,
+ release the entry. */
+ ip4f_free(fp);
+
+ return (1);
+ }
+
+ /* no matching entry found */
+ return (0);
+}
+
+static int
+ip4f_init(void)
+{
+ struct ip4_frag *fp;
+ int i;
+
+ TAILQ_INIT(&ip4f_list);
+ for (i=0; i<IP4F_TABSIZE; i++) {
+ fp = malloc(sizeof(struct ip4_frag),
+ M_DEVBUF, M_NOWAIT);
+ if (fp == NULL) {
+ printf("ip4f_init: can't alloc %dth entry!\n", i);
+ if (i == 0)
+ return (-1);
+ return (0);
+ }
+ fp->ip4f_valid = 0;
+ TAILQ_INSERT_TAIL(&ip4f_list, fp, ip4f_chain);
+ }
+ return (0);
+}
+
+static struct ip4_frag *
+ip4f_alloc(void)
+{
+ struct ip4_frag *fp;
+
+ /* reclaim an entry at the tail, put it at the head */
+ fp = TAILQ_LAST(&ip4f_list, ip4f_list);
+ TAILQ_REMOVE(&ip4f_list, fp, ip4f_chain);
+ fp->ip4f_valid = 1;
+ TAILQ_INSERT_HEAD(&ip4f_list, fp, ip4f_chain);
+ return (fp);
+}
+
+static void
+ip4f_free(fp)
+ struct ip4_frag *fp;
+{
+ TAILQ_REMOVE(&ip4f_list, fp, ip4f_chain);
+ fp->ip4f_valid = 0;
+ TAILQ_INSERT_TAIL(&ip4f_list, fp, ip4f_chain);
+}
+
+#endif /* ALTQ3_CLFIER_COMPAT */
diff --git a/contrib/altq/rtems/freebsd/altq/altq_var.h b/contrib/altq/rtems/freebsd/altq/altq_var.h
new file mode 100644
index 00000000..58384e15
--- /dev/null
+++ b/contrib/altq/rtems/freebsd/altq/altq_var.h
@@ -0,0 +1,265 @@
+/* $FreeBSD$ */
+/* $KAME: altq_var.h,v 1.16 2003/10/03 05:05:15 kjc Exp $ */
+
+/*
+ * Copyright (C) 1998-2003
+ * Sony Computer Science Laboratories Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY SONY CSL AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL SONY CSL OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+#ifndef _ALTQ_ALTQ_VAR_HH_
+#define _ALTQ_ALTQ_VAR_HH_
+
+#ifdef _KERNEL
+
+#include <rtems/freebsd/sys/param.h>
+#include <rtems/freebsd/sys/kernel.h>
+#include <rtems/freebsd/sys/queue.h>
+
+#ifdef ALTQ3_CLFIER_COMPAT
+/*
+ * filter structure for altq common classifier
+ */
+struct acc_filter {
+ LIST_ENTRY(acc_filter) f_chain;
+ void *f_class; /* pointer to the class */
+ u_long f_handle; /* filter id */
+ u_int32_t f_fbmask; /* filter bitmask */
+ struct flow_filter f_filter; /* filter value */
+};
+
+/*
+ * XXX ACC_FILTER_TABLESIZE can't be larger than 2048 unless we fix
+ * the handle assignment.
+ */
+#define ACC_FILTER_TABLESIZE (256+1)
+#define ACC_FILTER_MASK (ACC_FILTER_TABLESIZE - 2)
+#define ACC_WILDCARD_INDEX (ACC_FILTER_TABLESIZE - 1)
+#ifdef __GNUC__
+#define ACC_GET_HASH_INDEX(addr) \
+ ({int x = (addr) + ((addr) >> 16); (x + (x >> 8)) & ACC_FILTER_MASK;})
+#else
+#define ACC_GET_HASH_INDEX(addr) \
+ (((addr) + ((addr) >> 8) + ((addr) >> 16) + ((addr) >> 24)) \
+ & ACC_FILTER_MASK)
+#endif
+#define ACC_GET_HINDEX(handle) ((handle) >> 20)
+
+#if (__FreeBSD_version > 500000)
+#define ACC_LOCK_INIT(ac) mtx_init(&(ac)->acc_mtx, "classifier", MTX_DEF)
+#define ACC_LOCK_DESTROY(ac) mtx_destroy(&(ac)->acc_mtx)
+#define ACC_LOCK(ac) mtx_lock(&(ac)->acc_mtx)
+#define ACC_UNLOCK(ac) mtx_unlock(&(ac)->acc_mtx)
+#else
+#define ACC_LOCK_INIT(ac)
+#define ACC_LOCK_DESTROY(ac)
+#define ACC_LOCK(ac)
+#define ACC_UNLOCK(ac)
+#endif
+
+struct acc_classifier {
+ u_int32_t acc_fbmask;
+ LIST_HEAD(filt, acc_filter) acc_filters[ACC_FILTER_TABLESIZE];
+
+#if (__FreeBSD_version > 500000)
+ struct mtx acc_mtx;
+#endif
+};
+
+/*
+ * flowinfo mask bits used by classifier
+ */
+/* for ipv4 */
+#define FIMB4_PROTO 0x0001
+#define FIMB4_TOS 0x0002
+#define FIMB4_DADDR 0x0004
+#define FIMB4_SADDR 0x0008
+#define FIMB4_DPORT 0x0010
+#define FIMB4_SPORT 0x0020
+#define FIMB4_GPI 0x0040
+#define FIMB4_ALL 0x007f
+/* for ipv6 */
+#define FIMB6_PROTO 0x0100
+#define FIMB6_TCLASS 0x0200
+#define FIMB6_DADDR 0x0400
+#define FIMB6_SADDR 0x0800
+#define FIMB6_DPORT 0x1000
+#define FIMB6_SPORT 0x2000
+#define FIMB6_GPI 0x4000
+#define FIMB6_FLABEL 0x8000
+#define FIMB6_ALL 0xff00
+
+#define FIMB_ALL (FIMB4_ALL|FIMB6_ALL)
+
+#define FIMB4_PORTS (FIMB4_DPORT|FIMB4_SPORT|FIMB4_GPI)
+#define FIMB6_PORTS (FIMB6_DPORT|FIMB6_SPORT|FIMB6_GPI)
+#endif /* ALTQ3_CLFIER_COMPAT */
+
+/*
+ * machine dependent clock
+ * a 64bit high resolution time counter.
+ */
+extern int machclk_usepcc;
+extern u_int32_t machclk_freq;
+extern u_int32_t machclk_per_tick;
+extern void init_machclk(void);
+extern u_int64_t read_machclk(void);
+
+/*
+ * debug support
+ */
+#ifdef ALTQ_DEBUG
+#ifdef __STDC__
+#define ASSERT(e) ((e) ? (void)0 : altq_assert(__FILE__, __LINE__, #e))
+#else /* PCC */
+#define ASSERT(e) ((e) ? (void)0 : altq_assert(__FILE__, __LINE__, "e"))
+#endif
+#else
+#define ASSERT(e) ((void)0)
+#endif
+
+/*
+ * misc stuff for compatibility
+ */
+/* ioctl cmd type */
+#if defined(__FreeBSD__) && (__FreeBSD__ < 3)
+typedef int ioctlcmd_t;
+#else
+typedef u_long ioctlcmd_t;
+#endif
+
+/*
+ * queue macros:
+ * the interface of TAILQ_LAST macro changed after the introduction
+ * of softupdate. redefine it here to make it work with pre-2.2.7.
+ */
+#undef TAILQ_LAST
+#define TAILQ_LAST(head, headname) \
+ (*(((struct headname *)((head)->tqh_last))->tqh_last))
+
+#ifndef TAILQ_EMPTY
+#define TAILQ_EMPTY(head) ((head)->tqh_first == NULL)
+#endif
+#ifndef TAILQ_FOREACH
+#define TAILQ_FOREACH(var, head, field) \
+ for (var = TAILQ_FIRST(head); var; var = TAILQ_NEXT(var, field))
+#endif
+
+/* macro for timeout/untimeout */
+#if (__FreeBSD_version > 300000) || defined(__NetBSD__)
+/* use callout */
+#include <rtems/freebsd/sys/callout.h>
+
+#if (__FreeBSD_version > 500000)
+#define CALLOUT_INIT(c) callout_init((c), 0)
+#else
+#define CALLOUT_INIT(c) callout_init((c))
+#endif
+#define CALLOUT_RESET(c,t,f,a) callout_reset((c),(t),(f),(a))
+#define CALLOUT_STOP(c) callout_stop((c))
+#if !defined(CALLOUT_INITIALIZER) && (__FreeBSD_version < 600000)
+#define CALLOUT_INITIALIZER { { { NULL } }, 0, NULL, NULL, 0 }
+#endif
+#elif defined(__OpenBSD__)
+#include <rtems/freebsd/sys/timeout.h>
+/* callout structure as a wrapper of struct timeout */
+struct callout {
+ struct timeout c_to;
+};
+#define CALLOUT_INIT(c) do { bzero((c), sizeof(*(c))); } while (/*CONSTCOND*/ 0)
+#define CALLOUT_RESET(c,t,f,a) do { if (!timeout_initialized(&(c)->c_to)) \
+ timeout_set(&(c)->c_to, (f), (a)); \
+ timeout_add(&(c)->c_to, (t)); } while (/*CONSTCOND*/ 0)
+#define CALLOUT_STOP(c) timeout_del(&(c)->c_to)
+#define CALLOUT_INITIALIZER { { { NULL }, NULL, NULL, 0, 0 } }
+#else
+/* use old-style timeout/untimeout */
+/* dummy callout structure */
+struct callout {
+ void *c_arg; /* function argument */
+ void (*c_func)(void *); /* functiuon to call */
+};
+#define CALLOUT_INIT(c) do { bzero((c), sizeof(*(c))); } while (/*CONSTCOND*/ 0)
+#define CALLOUT_RESET(c,t,f,a) do { (c)->c_arg = (a); \
+ (c)->c_func = (f); \
+ timeout((f),(a),(t)); } while (/*CONSTCOND*/ 0)
+#define CALLOUT_STOP(c) untimeout((c)->c_func,(c)->c_arg)
+#define CALLOUT_INITIALIZER { NULL, NULL }
+#endif
+#if !defined(__FreeBSD__)
+typedef void (timeout_t)(void *);
+#endif
+
+#define m_pktlen(m) ((m)->m_pkthdr.len)
+
+struct ifnet; struct mbuf;
+struct pf_altq;
+#ifdef ALTQ3_CLFIER_COMPAT
+struct flowinfo;
+#endif
+
+void *altq_lookup(char *, int);
+#ifdef ALTQ3_CLFIER_COMPAT
+int altq_extractflow(struct mbuf *, int, struct flowinfo *, u_int32_t);
+int acc_add_filter(struct acc_classifier *, struct flow_filter *,
+ void *, u_long *);
+int acc_delete_filter(struct acc_classifier *, u_long);
+int acc_discard_filters(struct acc_classifier *, void *, int);
+void *acc_classify(void *, struct mbuf *, int);
+#endif
+u_int8_t read_dsfield(struct mbuf *, struct altq_pktattr *);
+void write_dsfield(struct mbuf *, struct altq_pktattr *, u_int8_t);
+void altq_assert(const char *, int, const char *);
+int tbr_set(struct ifaltq *, struct tb_profile *);
+int tbr_get(struct ifaltq *, struct tb_profile *);
+
+int altq_pfattach(struct pf_altq *);
+int altq_pfdetach(struct pf_altq *);
+int altq_add(struct pf_altq *);
+int altq_remove(struct pf_altq *);
+int altq_add_queue(struct pf_altq *);
+int altq_remove_queue(struct pf_altq *);
+int altq_getqstats(struct pf_altq *, void *, int *);
+
+int cbq_pfattach(struct pf_altq *);
+int cbq_add_altq(struct pf_altq *);
+int cbq_remove_altq(struct pf_altq *);
+int cbq_add_queue(struct pf_altq *);
+int cbq_remove_queue(struct pf_altq *);
+int cbq_getqstats(struct pf_altq *, void *, int *);
+
+int priq_pfattach(struct pf_altq *);
+int priq_add_altq(struct pf_altq *);
+int priq_remove_altq(struct pf_altq *);
+int priq_add_queue(struct pf_altq *);
+int priq_remove_queue(struct pf_altq *);
+int priq_getqstats(struct pf_altq *, void *, int *);
+
+int hfsc_pfattach(struct pf_altq *);
+int hfsc_add_altq(struct pf_altq *);
+int hfsc_remove_altq(struct pf_altq *);
+int hfsc_add_queue(struct pf_altq *);
+int hfsc_remove_queue(struct pf_altq *);
+int hfsc_getqstats(struct pf_altq *, void *, int *);
+
+#endif /* _KERNEL */
+#endif /* _ALTQ_ALTQ_VAR_HH_ */
diff --git a/contrib/altq/rtems/freebsd/altq/altqconf.h b/contrib/altq/rtems/freebsd/altq/altqconf.h
new file mode 100644
index 00000000..69f8d0b7
--- /dev/null
+++ b/contrib/altq/rtems/freebsd/altq/altqconf.h
@@ -0,0 +1,29 @@
+/* $OpenBSD: altqconf.h,v 1.1 2001/06/27 05:28:36 kjc Exp $ */
+/* $NetBSD: altqconf.h,v 1.2 2001/05/30 11:57:16 mrg Exp $ */
+
+#if defined(_KERNEL_OPT) || defined(__OpenBSD__)
+
+#if defined(_KERNEL_OPT)
+#include <rtems/freebsd/local/opt_altq_enabled.h>
+#endif
+
+#include <rtems/freebsd/sys/conf.h>
+
+#ifdef ALTQ
+#define NALTQ 1
+#else
+#define NALTQ 0
+#endif
+
+cdev_decl(altq);
+
+#ifdef __OpenBSD__
+#define cdev_altq_init(c,n) { \
+ dev_init(c,n,open), dev_init(c,n,close), (dev_type_read((*))) enodev, \
+ (dev_type_write((*))) enodev, dev_init(c,n,ioctl), \
+ (dev_type_stop((*))) enodev, 0, (dev_type_select((*))) enodev, \
+ (dev_type_mmap((*))) enodev }
+#else
+#define cdev_altq_init(x,y) cdev__oci_init(x,y)
+#endif
+#endif /* defined(_KERNEL_OPT) || defined(__OpenBSD__) */
diff --git a/contrib/altq/rtems/freebsd/altq/if_altq.h b/contrib/altq/rtems/freebsd/altq/if_altq.h
new file mode 100644
index 00000000..ddc2b08f
--- /dev/null
+++ b/contrib/altq/rtems/freebsd/altq/if_altq.h
@@ -0,0 +1,191 @@
+/* $FreeBSD$ */
+/* $KAME: if_altq.h,v 1.11 2003/07/10 12:07:50 kjc Exp $ */
+
+/*
+ * Copyright (C) 1997-2003
+ * Sony Computer Science Laboratories Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY SONY CSL AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL SONY CSL OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+#ifndef _ALTQ_IF_ALTQ_HH_
+#define _ALTQ_IF_ALTQ_HH_
+
+#if (defined(__FreeBSD__) && __FreeBSD_version >= 500000)
+#include <rtems/freebsd/sys/lock.h> /* XXX */
+#include <rtems/freebsd/sys/mutex.h> /* XXX */
+#include <rtems/freebsd/sys/event.h> /* XXX */
+#endif
+
+#ifdef _KERNEL_OPT
+#include <rtems/freebsd/altq/altqconf.h>
+#endif
+
+struct altq_pktattr; struct tb_regulator; struct top_cdnr;
+
+/*
+ * Structure defining a queue for a network interface.
+ */
+struct ifaltq {
+ /* fields compatible with struct ifqueue */
+ struct mbuf *ifq_head;
+ struct mbuf *ifq_tail;
+ int ifq_len;
+ int ifq_maxlen;
+ int ifq_drops;
+#if (defined(__FreeBSD__) && __FreeBSD_version >= 500000)
+ struct mtx ifq_mtx;
+#endif
+
+ /* driver owned queue (used for bulk dequeue and prepend) UNLOCKED */
+ struct mbuf *ifq_drv_head;
+ struct mbuf *ifq_drv_tail;
+ int ifq_drv_len;
+ int ifq_drv_maxlen;
+
+ /* alternate queueing related fields */
+ int altq_type; /* discipline type */
+ int altq_flags; /* flags (e.g. ready, in-use) */
+ void *altq_disc; /* for discipline-specific use */
+ struct ifnet *altq_ifp; /* back pointer to interface */
+
+ int (*altq_enqueue)(struct ifaltq *, struct mbuf *,
+ struct altq_pktattr *);
+ struct mbuf *(*altq_dequeue)(struct ifaltq *, int);
+ int (*altq_request)(struct ifaltq *, int, void *);
+
+ /* classifier fields */
+ void *altq_clfier; /* classifier-specific use */
+ void *(*altq_classify)(void *, struct mbuf *, int);
+
+ /* token bucket regulator */
+ struct tb_regulator *altq_tbr;
+
+ /* input traffic conditioner (doesn't belong to the output queue...) */
+ struct top_cdnr *altq_cdnr;
+};
+
+
+#ifdef _KERNEL
+
+/*
+ * packet attributes used by queueing disciplines.
+ * pattr_class is a discipline-dependent scheduling class that is
+ * set by a classifier.
+ * pattr_hdr and pattr_af may be used by a discipline to access
+ * the header within a mbuf. (e.g. ECN needs to update the CE bit)
+ * note that pattr_hdr could be stale after m_pullup, though link
+ * layer output routines usually don't use m_pullup. link-level
+ * compression also invalidates these fields. thus, pattr_hdr needs
+ * to be verified when a discipline touches the header.
+ */
+struct altq_pktattr {
+ void *pattr_class; /* sched class set by classifier */
+ int pattr_af; /* address family */
+ caddr_t pattr_hdr; /* saved header position in mbuf */
+};
+
+/*
+ * mbuf tag to carry a queue id (and hints for ECN).
+ */
+struct altq_tag {
+ u_int32_t qid; /* queue id */
+ /* hints for ecn */
+ int af; /* address family */
+ void *hdr; /* saved header position in mbuf */
+};
+
+/*
+ * a token-bucket regulator limits the rate that a network driver can
+ * dequeue packets from the output queue.
+ * modern cards are able to buffer a large amount of packets and dequeue
+ * too many packets at a time. this bursty dequeue behavior makes it
+ * impossible to schedule packets by queueing disciplines.
+ * a token-bucket is used to control the burst size in a device
+ * independent manner.
+ */
+struct tb_regulator {
+ int64_t tbr_rate; /* (scaled) token bucket rate */
+ int64_t tbr_depth; /* (scaled) token bucket depth */
+
+ int64_t tbr_token; /* (scaled) current token */
+ int64_t tbr_filluptime; /* (scaled) time to fill up bucket */
+ u_int64_t tbr_last; /* last time token was updated */
+
+ int tbr_lastop; /* last dequeue operation type
+ needed for poll-and-dequeue */
+};
+
+/* if_altqflags */
+#define ALTQF_READY 0x01 /* driver supports alternate queueing */
+#define ALTQF_ENABLED 0x02 /* altq is in use */
+#define ALTQF_CLASSIFY 0x04 /* classify packets */
+#define ALTQF_CNDTNING 0x08 /* altq traffic conditioning is enabled */
+#define ALTQF_DRIVER1 0x40 /* driver specific */
+
+/* if_altqflags set internally only: */
+#define ALTQF_CANTCHANGE (ALTQF_READY)
+
+/* altq_dequeue 2nd arg */
+#define ALTDQ_REMOVE 1 /* dequeue mbuf from the queue */
+#define ALTDQ_POLL 2 /* don't dequeue mbuf from the queue */
+
+/* altq request types (currently only purge is defined) */
+#define ALTRQ_PURGE 1 /* purge all packets */
+
+#define ALTQ_IS_READY(ifq) ((ifq)->altq_flags & ALTQF_READY)
+#define ALTQ_IS_ENABLED(ifq) ((ifq)->altq_flags & ALTQF_ENABLED)
+#define ALTQ_NEEDS_CLASSIFY(ifq) ((ifq)->altq_flags & ALTQF_CLASSIFY)
+#define ALTQ_IS_CNDTNING(ifq) ((ifq)->altq_flags & ALTQF_CNDTNING)
+
+#define ALTQ_SET_CNDTNING(ifq) ((ifq)->altq_flags |= ALTQF_CNDTNING)
+#define ALTQ_CLEAR_CNDTNING(ifq) ((ifq)->altq_flags &= ~ALTQF_CNDTNING)
+#define ALTQ_IS_ATTACHED(ifq) ((ifq)->altq_disc != NULL)
+
+#define ALTQ_ENQUEUE(ifq, m, pa, err) \
+ (err) = (*(ifq)->altq_enqueue)((ifq),(m),(pa))
+#define ALTQ_DEQUEUE(ifq, m) \
+ (m) = (*(ifq)->altq_dequeue)((ifq), ALTDQ_REMOVE)
+#define ALTQ_POLL(ifq, m) \
+ (m) = (*(ifq)->altq_dequeue)((ifq), ALTDQ_POLL)
+#define ALTQ_PURGE(ifq) \
+ (void)(*(ifq)->altq_request)((ifq), ALTRQ_PURGE, (void *)0)
+#define ALTQ_IS_EMPTY(ifq) ((ifq)->ifq_len == 0)
+#define TBR_IS_ENABLED(ifq) ((ifq)->altq_tbr != NULL)
+
+extern int altq_attach(struct ifaltq *, int, void *,
+ int (*)(struct ifaltq *, struct mbuf *,
+ struct altq_pktattr *),
+ struct mbuf *(*)(struct ifaltq *, int),
+ int (*)(struct ifaltq *, int, void *),
+ void *,
+ void *(*)(void *, struct mbuf *, int));
+extern int altq_detach(struct ifaltq *);
+extern int altq_enable(struct ifaltq *);
+extern int altq_disable(struct ifaltq *);
+extern struct mbuf *(*tbr_dequeue_ptr)(struct ifaltq *, int);
+extern int (*altq_input)(struct mbuf *, int);
+#if 0 /* ALTQ3_CLFIER_COMPAT */
+void altq_etherclassify(struct ifaltq *, struct mbuf *, struct altq_pktattr *);
+#endif
+#endif /* _KERNEL */
+
+#endif /* _ALTQ_IF_ALTQ_HH_ */
diff --git a/contrib/pf/rtems/freebsd/net/if_pflog.c b/contrib/pf/rtems/freebsd/net/if_pflog.c
new file mode 100644
index 00000000..cad97218
--- /dev/null
+++ b/contrib/pf/rtems/freebsd/net/if_pflog.c
@@ -0,0 +1,438 @@
+#include <rtems/freebsd/machine/rtems-bsd-config.h>
+
+/* $OpenBSD: if_pflog.c,v 1.22 2006/12/15 09:31:20 otto Exp $ */
+/*
+ * The authors of this code are John Ioannidis (ji@tla.org),
+ * Angelos D. Keromytis (kermit@csd.uch.gr) and
+ * Niels Provos (provos@physnet.uni-hamburg.de).
+ *
+ * This code was written by John Ioannidis for BSD/OS in Athens, Greece,
+ * in November 1995.
+ *
+ * Ported to OpenBSD and NetBSD, with additional transforms, in December 1996,
+ * by Angelos D. Keromytis.
+ *
+ * Additional transforms and features in 1997 and 1998 by Angelos D. Keromytis
+ * and Niels Provos.
+ *
+ * Copyright (C) 1995, 1996, 1997, 1998 by John Ioannidis, Angelos D. Keromytis
+ * and Niels Provos.
+ * Copyright (c) 2001, Angelos D. Keromytis, Niels Provos.
+ *
+ * Permission to use, copy, and modify this software with or without fee
+ * is hereby granted, provided that this entire notice is included in
+ * all copies of any software which is or includes a copy or
+ * modification of this software.
+ * You may use this code under the GNU public license if you so wish. Please
+ * contribute changes back to the authors under this freer than GPL license
+ * so that we may further the use of strong encryption without limitations to
+ * all.
+ *
+ * THIS SOFTWARE IS BEING PROVIDED "AS IS", WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTY. IN PARTICULAR, NONE OF THE AUTHORS MAKES ANY
+ * REPRESENTATION OR WARRANTY OF ANY KIND CONCERNING THE
+ * MERCHANTABILITY OF THIS SOFTWARE OR ITS FITNESS FOR ANY PARTICULAR
+ * PURPOSE.
+ */
+
+#ifdef __FreeBSD__
+#include <rtems/freebsd/local/opt_inet.h>
+#include <rtems/freebsd/local/opt_inet6.h>
+#include <rtems/freebsd/local/opt_bpf.h>
+#include <rtems/freebsd/local/opt_pf.h>
+
+#include <rtems/freebsd/sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#ifdef DEV_BPF
+#define NBPFILTER DEV_BPF
+#else
+#define NBPFILTER 0
+#endif
+
+#ifdef DEV_PFLOG
+#define NPFLOG DEV_PFLOG
+#else
+#define NPFLOG 0
+#endif
+
+#else /* ! __FreeBSD__ */
+#include <rtems/freebsd/local/bpfilter.h>
+#include <rtems/freebsd/local/pflog.h>
+#endif /* __FreeBSD__ */
+
+#include <rtems/freebsd/sys/param.h>
+#include <rtems/freebsd/sys/systm.h>
+#include <rtems/freebsd/sys/mbuf.h>
+#include <rtems/freebsd/sys/proc.h>
+#include <rtems/freebsd/sys/socket.h>
+#ifdef __FreeBSD__
+#include <rtems/freebsd/sys/kernel.h>
+#include <rtems/freebsd/sys/limits.h>
+#include <rtems/freebsd/sys/malloc.h>
+#include <rtems/freebsd/sys/module.h>
+#include <rtems/freebsd/sys/sockio.h>
+#else
+#include <rtems/freebsd/sys/ioctl.h>
+#endif
+
+#include <rtems/freebsd/net/if.h>
+#ifdef __FreeBSD__
+#include <rtems/freebsd/net/if_clone.h>
+#endif
+#include <rtems/freebsd/net/if_types.h>
+#include <rtems/freebsd/net/route.h>
+#include <rtems/freebsd/net/bpf.h>
+
+#ifdef INET
+#include <rtems/freebsd/netinet/in.h>
+#include <rtems/freebsd/netinet/in_var.h>
+#include <rtems/freebsd/netinet/in_systm.h>
+#include <rtems/freebsd/netinet/ip.h>
+#endif
+
+#ifdef INET6
+#ifndef INET
+#include <rtems/freebsd/netinet/in.h>
+#endif
+#include <rtems/freebsd/netinet6/nd6.h>
+#endif /* INET6 */
+
+#include <rtems/freebsd/net/pfvar.h>
+#include <rtems/freebsd/net/if_pflog.h>
+
+#ifdef INET
+#ifdef __FreeBSD__
+#include <rtems/freebsd/machine/in_cksum.h>
+#endif
+#endif
+
+#define PFLOGMTU (32768 + MHLEN + MLEN)
+
+#ifdef PFLOGDEBUG
+#define DPRINTF(x) do { if (pflogdebug) printf x ; } while (0)
+#else
+#define DPRINTF(x)
+#endif
+
+void pflogattach(int);
+int pflogoutput(struct ifnet *, struct mbuf *, struct sockaddr *,
+ struct route *);
+int pflogioctl(struct ifnet *, u_long, caddr_t);
+void pflogstart(struct ifnet *);
+#ifdef __FreeBSD__
+static int pflog_clone_create(struct if_clone *, int, caddr_t);
+static void pflog_clone_destroy(struct ifnet *);
+#else
+int pflog_clone_create(struct if_clone *, int);
+int pflog_clone_destroy(struct ifnet *);
+#endif
+
+LIST_HEAD(, pflog_softc) pflogif_list;
+#ifdef __FreeBSD__
+IFC_SIMPLE_DECLARE(pflog, 1);
+#else
+struct if_clone pflog_cloner =
+ IF_CLONE_INITIALIZER("pflog", pflog_clone_create, pflog_clone_destroy);
+#endif
+
+struct ifnet *pflogifs[PFLOGIFS_MAX]; /* for fast access */
+
+#ifndef __FreeBSD__
+extern int ifqmaxlen;
+#endif
+
+void
+pflogattach(int npflog)
+{
+ int i;
+ LIST_INIT(&pflogif_list);
+ for (i = 0; i < PFLOGIFS_MAX; i++)
+ pflogifs[i] = NULL;
+#ifndef __FreeBSD__
+ (void) pflog_clone_create(&pflog_cloner, 0);
+#endif
+ if_clone_attach(&pflog_cloner);
+}
+
+#ifdef __FreeBSD__
+static int
+pflog_clone_create(struct if_clone *ifc, int unit, caddr_t param)
+#else
+int
+pflog_clone_create(struct if_clone *ifc, int unit)
+#endif
+{
+ struct ifnet *ifp;
+ struct pflog_softc *pflogif;
+ int s;
+
+ if (unit >= PFLOGIFS_MAX)
+ return (EINVAL);
+
+ if ((pflogif = malloc(sizeof(*pflogif), M_DEVBUF, M_NOWAIT)) == NULL)
+ return (ENOMEM);
+ bzero(pflogif, sizeof(*pflogif));
+
+ pflogif->sc_unit = unit;
+#ifdef __FreeBSD__
+ ifp = pflogif->sc_ifp = if_alloc(IFT_PFLOG);
+ if (ifp == NULL) {
+ free(pflogif, M_DEVBUF);
+ return (ENOSPC);
+ }
+ if_initname(ifp, ifc->ifc_name, unit);
+#else
+ ifp = &pflogif->sc_if;
+ snprintf(ifp->if_xname, sizeof ifp->if_xname, "pflog%d", unit);
+#endif
+ ifp->if_softc = pflogif;
+ ifp->if_mtu = PFLOGMTU;
+ ifp->if_ioctl = pflogioctl;
+ ifp->if_output = pflogoutput;
+ ifp->if_start = pflogstart;
+#ifndef __FreeBSD__
+ ifp->if_type = IFT_PFLOG;
+#endif
+ ifp->if_snd.ifq_maxlen = ifqmaxlen;
+ ifp->if_hdrlen = PFLOG_HDRLEN;
+ if_attach(ifp);
+#ifndef __FreeBSD__
+ if_alloc_sadl(ifp);
+#endif
+
+#if NBPFILTER > 0
+#ifdef __FreeBSD__
+ bpfattach(ifp, DLT_PFLOG, PFLOG_HDRLEN);
+#else
+ bpfattach(&pflogif->sc_if.if_bpf, ifp, DLT_PFLOG, PFLOG_HDRLEN);
+#endif
+#endif
+
+ s = splnet();
+#ifdef __FreeBSD__
+ PF_LOCK();
+#endif
+ LIST_INSERT_HEAD(&pflogif_list, pflogif, sc_list);
+ pflogifs[unit] = ifp;
+#ifdef __FreeBSD__
+ PF_UNLOCK();
+#endif
+ splx(s);
+
+ return (0);
+}
+
+#ifdef __FreeBSD__
+static void
+pflog_clone_destroy(struct ifnet *ifp)
+#else
+int
+pflog_clone_destroy(struct ifnet *ifp)
+#endif
+{
+ struct pflog_softc *pflogif = ifp->if_softc;
+ int s;
+
+ s = splnet();
+#ifdef __FreeBSD__
+ PF_LOCK();
+#endif
+ pflogifs[pflogif->sc_unit] = NULL;
+ LIST_REMOVE(pflogif, sc_list);
+#ifdef __FreeBSD__
+ PF_UNLOCK();
+#endif
+ splx(s);
+
+#if NBPFILTER > 0
+ bpfdetach(ifp);
+#endif
+ if_detach(ifp);
+#ifdef __FreeBSD__
+ if_free(ifp);
+#endif
+ free(pflogif, M_DEVBUF);
+#ifndef __FreeBSD__
+ return (0);
+#endif
+}
+
+/*
+ * Start output on the pflog interface.
+ */
+void
+pflogstart(struct ifnet *ifp)
+{
+ struct mbuf *m;
+#ifndef __FreeBSD__
+ int s;
+#endif
+
+ for (;;) {
+#ifdef __FreeBSD__
+ IF_LOCK(&ifp->if_snd);
+ _IF_DROP(&ifp->if_snd);
+ _IF_DEQUEUE(&ifp->if_snd, m);
+ IF_UNLOCK(&ifp->if_snd);
+#else
+ s = splnet();
+ IF_DROP(&ifp->if_snd);
+ IF_DEQUEUE(&ifp->if_snd, m);
+ splx(s);
+#endif
+
+ if (m == NULL)
+ return;
+ else
+ m_freem(m);
+ }
+}
+
+int
+pflogoutput(struct ifnet *ifp, struct mbuf *m, struct sockaddr *dst,
+ struct route *ro)
+{
+ m_freem(m);
+ return (0);
+}
+
+/* ARGSUSED */
+int
+pflogioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
+{
+ switch (cmd) {
+ case SIOCSIFADDR:
+ case SIOCAIFADDR:
+ case SIOCSIFDSTADDR:
+ case SIOCSIFFLAGS:
+#ifdef __FreeBSD__
+ if (ifp->if_flags & IFF_UP)
+ ifp->if_drv_flags |= IFF_DRV_RUNNING;
+ else
+ ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
+#else
+ if (ifp->if_flags & IFF_UP)
+ ifp->if_flags |= IFF_RUNNING;
+ else
+ ifp->if_flags &= ~IFF_RUNNING;
+#endif
+ break;
+ default:
+ return (EINVAL);
+ }
+
+ return (0);
+}
+
+int
+pflog_packet(struct pfi_kif *kif, struct mbuf *m, sa_family_t af, u_int8_t dir,
+ u_int8_t reason, struct pf_rule *rm, struct pf_rule *am,
+ struct pf_ruleset *ruleset, struct pf_pdesc *pd)
+{
+#if NBPFILTER > 0
+ struct ifnet *ifn;
+ struct pfloghdr hdr;
+
+ if (kif == NULL || m == NULL || rm == NULL || pd == NULL)
+ return (-1);
+
+ if ((ifn = pflogifs[rm->logif]) == NULL || !ifn->if_bpf)
+ return (0);
+
+ bzero(&hdr, sizeof(hdr));
+ hdr.length = PFLOG_REAL_HDRLEN;
+ hdr.af = af;
+ hdr.action = rm->action;
+ hdr.reason = reason;
+ memcpy(hdr.ifname, kif->pfik_name, sizeof(hdr.ifname));
+
+ if (am == NULL) {
+ hdr.rulenr = htonl(rm->nr);
+ hdr.subrulenr = -1;
+ } else {
+ hdr.rulenr = htonl(am->nr);
+ hdr.subrulenr = htonl(rm->nr);
+ if (ruleset != NULL && ruleset->anchor != NULL)
+ strlcpy(hdr.ruleset, ruleset->anchor->name,
+ sizeof(hdr.ruleset));
+ }
+ if (rm->log & PF_LOG_SOCKET_LOOKUP && !pd->lookup.done)
+#ifdef __FreeBSD__
+ /*
+ * XXX: This should not happen as we force an early lookup
+ * via debug.pfugidhack
+ */
+ ; /* empty */
+#else
+ pd->lookup.done = pf_socket_lookup(dir, pd);
+#endif
+ if (pd->lookup.done > 0) {
+ hdr.uid = pd->lookup.uid;
+ hdr.pid = pd->lookup.pid;
+ } else {
+ hdr.uid = UID_MAX;
+ hdr.pid = NO_PID;
+ }
+ hdr.rule_uid = rm->cuid;
+ hdr.rule_pid = rm->cpid;
+ hdr.dir = dir;
+
+#ifdef INET
+ if (af == AF_INET && dir == PF_OUT) {
+ struct ip *ip;
+
+ ip = mtod(m, struct ip *);
+ ip->ip_sum = 0;
+ ip->ip_sum = in_cksum(m, ip->ip_hl << 2);
+ }
+#endif /* INET */
+
+ ifn->if_opackets++;
+ ifn->if_obytes += m->m_pkthdr.len;
+#ifdef __FreeBSD__
+ BPF_MTAP2(ifn, &hdr, PFLOG_HDRLEN, m);
+#else
+ bpf_mtap_hdr(ifn->if_bpf, (char *)&hdr, PFLOG_HDRLEN, m,
+ BPF_DIRECTION_OUT);
+#endif
+#endif
+
+ return (0);
+}
+
+#ifdef __FreeBSD__
+static int
+pflog_modevent(module_t mod, int type, void *data)
+{
+ int error = 0;
+
+ switch (type) {
+ case MOD_LOAD:
+ pflogattach(1);
+ PF_LOCK();
+ pflog_packet_ptr = pflog_packet;
+ PF_UNLOCK();
+ break;
+ case MOD_UNLOAD:
+ PF_LOCK();
+ pflog_packet_ptr = NULL;
+ PF_UNLOCK();
+ if_clone_detach(&pflog_cloner);
+ break;
+ default:
+ error = EINVAL;
+ break;
+ }
+
+ return error;
+}
+
+static moduledata_t pflog_mod = { "pflog", pflog_modevent, 0 };
+
+#define PFLOG_MODVER 1
+
+DECLARE_MODULE(pflog, pflog_mod, SI_SUB_PROTO_IFATTACHDOMAIN, SI_ORDER_ANY);
+MODULE_VERSION(pflog, PFLOG_MODVER);
+MODULE_DEPEND(pflog, pf, PF_MODVER, PF_MODVER, PF_MODVER);
+#endif /* __FreeBSD__ */
diff --git a/contrib/pf/rtems/freebsd/net/if_pflog.h b/contrib/pf/rtems/freebsd/net/if_pflog.h
new file mode 100644
index 00000000..9e9efbef
--- /dev/null
+++ b/contrib/pf/rtems/freebsd/net/if_pflog.h
@@ -0,0 +1,103 @@
+/* $FreeBSD$ */
+/* $OpenBSD: if_pflog.h,v 1.14 2006/10/25 11:27:01 henning Exp $ */
+/*
+ * Copyright 2001 Niels Provos <provos@citi.umich.edu>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _NET_IF_PFLOG_HH_
+#define _NET_IF_PFLOG_HH_
+
+#define PFLOGIFS_MAX 16
+
+#ifdef _KERNEL
+struct pflog_softc {
+#ifdef __FreeBSD__
+ struct ifnet *sc_ifp; /* the interface pointer */
+#else
+ struct ifnet sc_if; /* the interface */
+#endif
+ int sc_unit;
+ LIST_ENTRY(pflog_softc) sc_list;
+};
+#endif /* _KERNEL */
+
+#define PFLOG_RULESET_NAME_SIZE 16
+
+struct pfloghdr {
+ u_int8_t length;
+ sa_family_t af;
+ u_int8_t action;
+ u_int8_t reason;
+ char ifname[IFNAMSIZ];
+ char ruleset[PFLOG_RULESET_NAME_SIZE];
+ u_int32_t rulenr;
+ u_int32_t subrulenr;
+ uid_t uid;
+ pid_t pid;
+ uid_t rule_uid;
+ pid_t rule_pid;
+ u_int8_t dir;
+ u_int8_t pad[3];
+};
+
+#define PFLOG_HDRLEN sizeof(struct pfloghdr)
+/* minus pad, also used as a signature */
+#define PFLOG_REAL_HDRLEN offsetof(struct pfloghdr, pad)
+
+/* XXX remove later when old format logs are no longer needed */
+struct old_pfloghdr {
+ u_int32_t af;
+ char ifname[IFNAMSIZ];
+ short rnr;
+ u_short reason;
+ u_short action;
+ u_short dir;
+};
+#define OLD_PFLOG_HDRLEN sizeof(struct old_pfloghdr)
+
+#ifdef _KERNEL
+
+#ifdef __FreeBSD__
+struct pf_rule;
+struct pf_ruleset;
+struct pfi_kif;
+struct pf_pdesc;
+
+typedef int pflog_packet_t(struct pfi_kif *, struct mbuf *, sa_family_t,
+ u_int8_t, u_int8_t, struct pf_rule *, struct pf_rule *,
+ struct pf_ruleset *, struct pf_pdesc *);
+extern pflog_packet_t *pflog_packet_ptr;
+#define PFLOG_PACKET(i,x,a,b,c,d,e,f,g,h) do { \
+ if (pflog_packet_ptr != NULL) \
+ pflog_packet_ptr(i,a,b,c,d,e,f,g,h); \
+} while (0)
+#else /* ! __FreeBSD__ */
+#if NPFLOG > 0
+#define PFLOG_PACKET(i,x,a,b,c,d,e,f,g,h) pflog_packet(i,a,b,c,d,e,f,g,h)
+#else
+#define PFLOG_PACKET(i,x,a,b,c,d,e,f,g,h) ((void)0)
+#endif /* NPFLOG > 0 */
+#endif /* __FreeBSD__ */
+#endif /* _KERNEL */
+#endif /* _NET_IF_PFLOG_HH_ */
diff --git a/contrib/pf/rtems/freebsd/net/if_pfsync.c b/contrib/pf/rtems/freebsd/net/if_pfsync.c
new file mode 100644
index 00000000..3a48046e
--- /dev/null
+++ b/contrib/pf/rtems/freebsd/net/if_pfsync.c
@@ -0,0 +1,2331 @@
+#include <rtems/freebsd/machine/rtems-bsd-config.h>
+
+/* $OpenBSD: if_pfsync.c,v 1.73 2006/11/16 13:13:38 henning Exp $ */
+
+/*
+ * Copyright (c) 2002 Michael Shalayeff
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR OR HIS RELATIVES BE LIABLE FOR ANY DIRECT,
+ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF MIND, USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifdef __FreeBSD__
+#include <rtems/freebsd/local/opt_inet.h>
+#include <rtems/freebsd/local/opt_inet6.h>
+#include <rtems/freebsd/local/opt_carp.h>
+#include <rtems/freebsd/local/opt_bpf.h>
+#include <rtems/freebsd/local/opt_pf.h>
+
+#include <rtems/freebsd/sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#ifdef DEV_BPF
+#define NBPFILTER DEV_BPF
+#else
+#define NBPFILTER 0
+#endif
+
+#ifdef DEV_PFSYNC
+#define NPFSYNC DEV_PFSYNC
+#else
+#define NPFSYNC 0
+#endif
+
+#ifdef DEV_CARP
+#define NCARP DEV_CARP
+#else
+#define NCARP 0
+#endif
+#endif /* __FreeBSD__ */
+
+#include <rtems/freebsd/sys/param.h>
+#ifdef __FreeBSD__
+#include <rtems/freebsd/sys/priv.h>
+#endif
+#include <rtems/freebsd/sys/proc.h>
+#include <rtems/freebsd/sys/systm.h>
+#include <rtems/freebsd/sys/time.h>
+#include <rtems/freebsd/sys/mbuf.h>
+#include <rtems/freebsd/sys/socket.h>
+#ifdef __FreeBSD__
+#include <rtems/freebsd/sys/endian.h>
+#include <rtems/freebsd/sys/malloc.h>
+#include <rtems/freebsd/sys/module.h>
+#include <rtems/freebsd/sys/sockio.h>
+#include <rtems/freebsd/sys/taskqueue.h>
+#include <rtems/freebsd/sys/lock.h>
+#include <rtems/freebsd/sys/mutex.h>
+#include <rtems/freebsd/sys/sysctl.h>
+#else
+#include <rtems/freebsd/sys/ioctl.h>
+#include <rtems/freebsd/sys/timeout.h>
+#endif
+#include <rtems/freebsd/sys/kernel.h>
+
+#include <rtems/freebsd/net/if.h>
+#ifdef __FreeBSD__
+#include <rtems/freebsd/net/if_clone.h>
+#endif
+#include <rtems/freebsd/net/if_types.h>
+#include <rtems/freebsd/net/route.h>
+#include <rtems/freebsd/net/bpf.h>
+#include <rtems/freebsd/netinet/in.h>
+#include <rtems/freebsd/netinet/if_ether.h>
+#include <rtems/freebsd/netinet/tcp.h>
+#include <rtems/freebsd/netinet/tcp_seq.h>
+
+#ifdef INET
+#include <rtems/freebsd/netinet/in_systm.h>
+#include <rtems/freebsd/netinet/in_var.h>
+#include <rtems/freebsd/netinet/ip.h>
+#include <rtems/freebsd/netinet/ip_var.h>
+#endif
+
+#ifdef INET6
+#include <rtems/freebsd/netinet6/nd6.h>
+#endif /* INET6 */
+
+#ifndef __FreeBSD__
+#include <rtems/freebsd/local/carp.h>
+#endif
+#if NCARP > 0
+#include <rtems/freebsd/netinet/ip_carp.h>
+#endif
+
+#include <rtems/freebsd/net/pfvar.h>
+#include <rtems/freebsd/net/if_pfsync.h>
+
+#ifndef __FreeBSD__
+#include <rtems/freebsd/local/bpfilter.h>
+#include <rtems/freebsd/local/pfsync.h>
+#endif
+
+#define PFSYNC_MINMTU \
+ (sizeof(struct pfsync_header) + sizeof(struct pf_state))
+
+#ifdef PFSYNCDEBUG
+#define DPRINTF(x) do { if (pfsyncdebug) printf x ; } while (0)
+int pfsyncdebug;
+#else
+#define DPRINTF(x)
+#endif
+
+struct pfsync_softc *pfsyncif = NULL;
+struct pfsyncstats pfsyncstats;
+#ifdef __FreeBSD__
+SYSCTL_DECL(_net_inet_pfsync);
+SYSCTL_STRUCT(_net_inet_pfsync, 0, stats, CTLFLAG_RW,
+ &pfsyncstats, pfsyncstats,
+ "PFSYNC statistics (struct pfsyncstats, net/if_pfsync.h)");
+#endif
+
+void pfsyncattach(int);
+#ifdef __FreeBSD__
+int pfsync_clone_create(struct if_clone *, int, caddr_t);
+void pfsync_clone_destroy(struct ifnet *);
+#else
+int pfsync_clone_create(struct if_clone *, int);
+int pfsync_clone_destroy(struct ifnet *);
+#endif
+void pfsync_setmtu(struct pfsync_softc *, int);
+int pfsync_alloc_scrub_memory(struct pfsync_state_peer *,
+ struct pf_state_peer *);
+int pfsync_insert_net_state(struct pfsync_state *, u_int8_t);
+#ifdef PFSYNC_TDB
+void pfsync_update_net_tdb(struct pfsync_tdb *);
+#endif
+int pfsyncoutput(struct ifnet *, struct mbuf *, struct sockaddr *,
+ struct route *);
+int pfsyncioctl(struct ifnet *, u_long, caddr_t);
+void pfsyncstart(struct ifnet *);
+
+struct mbuf *pfsync_get_mbuf(struct pfsync_softc *, u_int8_t, void **);
+int pfsync_request_update(struct pfsync_state_upd *, struct in_addr *);
+int pfsync_sendout(struct pfsync_softc *);
+#ifdef PFSYNC_TDB
+int pfsync_tdb_sendout(struct pfsync_softc *);
+#endif
+int pfsync_sendout_mbuf(struct pfsync_softc *, struct mbuf *);
+void pfsync_timeout(void *);
+#ifdef PFSYNC_TDB
+void pfsync_tdb_timeout(void *);
+#endif
+void pfsync_send_bus(struct pfsync_softc *, u_int8_t);
+void pfsync_bulk_update(void *);
+void pfsync_bulkfail(void *);
+
+#ifdef __FreeBSD__
+void pfsync_ifdetach(void *, struct ifnet *);
+void pfsync_senddef(void *, int);
+
+/* XXX: ugly */
+#define betoh64 (unsigned long long)be64toh
+#define timeout_del callout_stop
+#endif
+
+int pfsync_sync_ok;
+#ifndef __FreeBSD__
+extern int ifqmaxlen;
+#endif
+
+#ifdef __FreeBSD__
+IFC_SIMPLE_DECLARE(pfsync, 1);
+#else
+struct if_clone pfsync_cloner =
+ IF_CLONE_INITIALIZER("pfsync", pfsync_clone_create, pfsync_clone_destroy);
+#endif
+
+void
+pfsyncattach(int npfsync)
+{
+ if_clone_attach(&pfsync_cloner);
+}
+
+int
+#ifdef __FreeBSD__
+pfsync_clone_create(struct if_clone *ifc, int unit, caddr_t param)
+#else
+pfsync_clone_create(struct if_clone *ifc, int unit)
+#endif
+{
+ struct ifnet *ifp;
+
+ if (unit != 0)
+ return (EINVAL);
+
+ pfsync_sync_ok = 1;
+ if ((pfsyncif = malloc(sizeof(*pfsyncif), M_DEVBUF, M_NOWAIT)) == NULL)
+ return (ENOMEM);
+ bzero(pfsyncif, sizeof(*pfsyncif));
+#ifdef __FreeBSD__
+ if ((pfsyncif->sc_imo.imo_membership = (struct in_multi **)malloc(
+ (sizeof(struct in_multi *) * IP_MIN_MEMBERSHIPS), M_DEVBUF,
+ M_NOWAIT)) == NULL) {
+ free(pfsyncif, M_DEVBUF);
+ return (ENOSPC);
+ }
+ pfsyncif->sc_imo.imo_mfilters = NULL;
+ pfsyncif->sc_imo.imo_max_memberships = IP_MIN_MEMBERSHIPS;
+ pfsyncif->sc_imo.imo_multicast_vif = -1;
+
+ ifp = pfsyncif->sc_ifp = if_alloc(IFT_PFSYNC);
+ if (ifp == NULL) {
+ free(pfsyncif->sc_imo.imo_membership, M_DEVBUF);
+ free(pfsyncif, M_DEVBUF);
+ return (ENOSPC);
+ }
+ if_initname(ifp, ifc->ifc_name, unit);
+
+ pfsyncif->sc_detachtag = EVENTHANDLER_REGISTER(ifnet_departure_event,
+ pfsync_ifdetach, pfsyncif, EVENTHANDLER_PRI_ANY);
+ if (pfsyncif->sc_detachtag == NULL) {
+ if_free(ifp);
+ free(pfsyncif->sc_imo.imo_membership, M_DEVBUF);
+ free(pfsyncif, M_DEVBUF);
+ return (ENOSPC);
+ }
+
+ pfsyncif->sc_ifq.ifq_maxlen = ifqmaxlen;
+ mtx_init(&pfsyncif->sc_ifq.ifq_mtx, ifp->if_xname,
+ "pfsync send queue", MTX_DEF);
+ TASK_INIT(&pfsyncif->sc_send_task, 0, pfsync_senddef, pfsyncif);
+#endif
+ pfsyncif->sc_mbuf = NULL;
+ pfsyncif->sc_mbuf_net = NULL;
+#ifdef PFSYNC_TDB
+ pfsyncif->sc_mbuf_tdb = NULL;
+#endif
+ pfsyncif->sc_statep.s = NULL;
+ pfsyncif->sc_statep_net.s = NULL;
+#ifdef PFSYNC_TDB
+ pfsyncif->sc_statep_tdb.t = NULL;
+#endif
+ pfsyncif->sc_maxupdates = 128;
+#ifdef __FreeBSD__
+ pfsyncif->sc_sync_peer.s_addr = htonl(INADDR_PFSYNC_GROUP);
+ pfsyncif->sc_sendaddr.s_addr = htonl(INADDR_PFSYNC_GROUP);
+#else
+ pfsyncif->sc_sync_peer.s_addr = INADDR_PFSYNC_GROUP;
+ pfsyncif->sc_sendaddr.s_addr = INADDR_PFSYNC_GROUP;
+#endif
+ pfsyncif->sc_ureq_received = 0;
+ pfsyncif->sc_ureq_sent = 0;
+ pfsyncif->sc_bulk_send_next = NULL;
+ pfsyncif->sc_bulk_terminator = NULL;
+#ifndef __FreeBSD__
+ ifp = &pfsyncif->sc_if;
+ snprintf(ifp->if_xname, sizeof ifp->if_xname, "pfsync%d", unit);
+#endif
+ ifp->if_softc = pfsyncif;
+ ifp->if_ioctl = pfsyncioctl;
+ ifp->if_output = pfsyncoutput;
+ ifp->if_start = pfsyncstart;
+ ifp->if_type = IFT_PFSYNC;
+ ifp->if_snd.ifq_maxlen = ifqmaxlen;
+ ifp->if_hdrlen = PFSYNC_HDRLEN;
+ pfsync_setmtu(pfsyncif, ETHERMTU);
+#ifdef __FreeBSD__
+ callout_init(&pfsyncif->sc_tmo, CALLOUT_MPSAFE);
+#ifdef PFSYNC_TDB
+ callout_init(&pfsyncif->sc_tdb_tmo, CALLOUT_MPSAFE);
+#endif
+ callout_init(&pfsyncif->sc_bulk_tmo, CALLOUT_MPSAFE);
+ callout_init(&pfsyncif->sc_bulkfail_tmo, CALLOUT_MPSAFE);
+#else
+ timeout_set(&pfsyncif->sc_tmo, pfsync_timeout, pfsyncif);
+ timeout_set(&pfsyncif->sc_tdb_tmo, pfsync_tdb_timeout, pfsyncif);
+ timeout_set(&pfsyncif->sc_bulk_tmo, pfsync_bulk_update, pfsyncif);
+ timeout_set(&pfsyncif->sc_bulkfail_tmo, pfsync_bulkfail, pfsyncif);
+#endif
+ if_attach(ifp);
+#ifndef __FreeBSD__
+ if_alloc_sadl(ifp);
+#endif
+
+#if NCARP > 0
+ if_addgroup(ifp, "carp");
+#endif
+
+#if NBPFILTER > 0
+#ifdef __FreeBSD__
+ bpfattach(ifp, DLT_PFSYNC, PFSYNC_HDRLEN);
+#else
+ bpfattach(&pfsyncif->sc_if.if_bpf, ifp, DLT_PFSYNC, PFSYNC_HDRLEN);
+#endif
+#endif
+
+ return (0);
+}
+
+#ifdef __FreeBSD__
+void
+#else
+int
+#endif
+pfsync_clone_destroy(struct ifnet *ifp)
+{
+#ifdef __FreeBSD__
+ EVENTHANDLER_DEREGISTER(ifnet_departure_event, pfsyncif->sc_detachtag);
+ callout_stop(&pfsyncif->sc_tmo);
+#ifdef PFSYNC_TDB
+ callout_stop(&pfsyncif->sc_tdb_tmo);
+#endif
+ callout_stop(&pfsyncif->sc_bulk_tmo);
+ callout_stop(&pfsyncif->sc_bulkfail_tmo);
+ /* XXX: more? */
+#endif
+
+#if NBPFILTER > 0
+ bpfdetach(ifp);
+#endif
+ if_detach(ifp);
+#ifdef __FreeBSD__
+ if_free(ifp);
+ free(pfsyncif->sc_imo.imo_membership, M_DEVBUF);
+#endif
+ free(pfsyncif, M_DEVBUF);
+ pfsyncif = NULL;
+#ifndef __FreeBSD__
+ return (0);
+#endif
+}
+
+/*
+ * Start output on the pfsync interface.
+ */
+void
+pfsyncstart(struct ifnet *ifp)
+{
+ struct mbuf *m;
+#ifndef __FreeBSD__
+ int s;
+#endif
+
+ for (;;) {
+#ifdef __FreeBSD__
+ IF_LOCK(&ifp->if_snd);
+ _IF_DROP(&ifp->if_snd);
+ _IF_DEQUEUE(&ifp->if_snd, m);
+ IF_UNLOCK(&ifp->if_snd);
+#else
+ s = splnet();
+ IF_DROP(&ifp->if_snd);
+ IF_DEQUEUE(&ifp->if_snd, m);
+ splx(s);
+#endif
+
+ if (m == NULL)
+ return;
+ else
+ m_freem(m);
+ }
+}
+
+int
+pfsync_alloc_scrub_memory(struct pfsync_state_peer *s,
+ struct pf_state_peer *d)
+{
+ if (s->scrub.scrub_flag && d->scrub == NULL) {
+ d->scrub = pool_get(&pf_state_scrub_pl, PR_NOWAIT);
+ if (d->scrub == NULL)
+ return (ENOMEM);
+ bzero(d->scrub, sizeof(*d->scrub));
+ }
+
+ return (0);
+}
+
+int
+pfsync_insert_net_state(struct pfsync_state *sp, u_int8_t chksum_flag)
+{
+ struct pf_state *st = NULL;
+ struct pf_rule *r = NULL;
+ struct pfi_kif *kif;
+
+ if (sp->creatorid == 0 && pf_status.debug >= PF_DEBUG_MISC) {
+ printf("pfsync_insert_net_state: invalid creator id:"
+ " %08x\n", ntohl(sp->creatorid));
+ return (EINVAL);
+ }
+
+ kif = pfi_kif_get(sp->ifname);
+ if (kif == NULL) {
+ if (pf_status.debug >= PF_DEBUG_MISC)
+ printf("pfsync_insert_net_state: "
+ "unknown interface: %s\n", sp->ifname);
+ /* skip this state */
+ return (0);
+ }
+
+ /*
+ * If the ruleset checksums match, it's safe to associate the state
+ * with the rule of that number.
+ */
+ if (sp->rule != htonl(-1) && sp->anchor == htonl(-1) && chksum_flag)
+ r = pf_main_ruleset.rules[
+ PF_RULESET_FILTER].active.ptr_array[ntohl(sp->rule)];
+ else
+ r = &pf_default_rule;
+
+ if (!r->max_states || r->states < r->max_states)
+ st = pool_get(&pf_state_pl, PR_NOWAIT);
+ if (st == NULL) {
+ pfi_kif_unref(kif, PFI_KIF_REF_NONE);
+ return (ENOMEM);
+ }
+ bzero(st, sizeof(*st));
+
+ /* allocate memory for scrub info */
+ if (pfsync_alloc_scrub_memory(&sp->src, &st->src) ||
+ pfsync_alloc_scrub_memory(&sp->dst, &st->dst)) {
+ pfi_kif_unref(kif, PFI_KIF_REF_NONE);
+ if (st->src.scrub)
+ pool_put(&pf_state_scrub_pl, st->src.scrub);
+ pool_put(&pf_state_pl, st);
+ return (ENOMEM);
+ }
+
+ st->rule.ptr = r;
+ /* XXX get pointers to nat_rule and anchor */
+
+ /* XXX when we have nat_rule/anchors, use STATE_INC_COUNTERS */
+ r->states++;
+
+ /* fill in the rest of the state entry */
+ pf_state_host_ntoh(&sp->lan, &st->lan);
+ pf_state_host_ntoh(&sp->gwy, &st->gwy);
+ pf_state_host_ntoh(&sp->ext, &st->ext);
+
+ pf_state_peer_ntoh(&sp->src, &st->src);
+ pf_state_peer_ntoh(&sp->dst, &st->dst);
+
+ bcopy(&sp->rt_addr, &st->rt_addr, sizeof(st->rt_addr));
+ st->creation = time_second - ntohl(sp->creation);
+ st->expire = ntohl(sp->expire) + time_second;
+
+ st->af = sp->af;
+ st->proto = sp->proto;
+ st->direction = sp->direction;
+ st->log = sp->log;
+ st->timeout = sp->timeout;
+ st->state_flags = sp->state_flags;
+
+ bcopy(sp->id, &st->id, sizeof(st->id));
+ st->creatorid = sp->creatorid;
+ st->sync_flags = PFSTATE_FROMSYNC;
+
+ if (pf_insert_state(kif, st)) {
+ pfi_kif_unref(kif, PFI_KIF_REF_NONE);
+ /* XXX when we have nat_rule/anchors, use STATE_DEC_COUNTERS */
+ r->states--;
+ if (st->dst.scrub)
+ pool_put(&pf_state_scrub_pl, st->dst.scrub);
+ if (st->src.scrub)
+ pool_put(&pf_state_scrub_pl, st->src.scrub);
+ pool_put(&pf_state_pl, st);
+ return (EINVAL);
+ }
+
+ return (0);
+}
+
+void
+#ifdef __FreeBSD__
+pfsync_input(struct mbuf *m, __unused int off)
+#else
+pfsync_input(struct mbuf *m, ...)
+#endif
+{
+ struct ip *ip = mtod(m, struct ip *);
+ struct pfsync_header *ph;
+ struct pfsync_softc *sc = pfsyncif;
+ struct pf_state *st;
+ struct pf_state_cmp key;
+ struct pfsync_state *sp;
+ struct pfsync_state_upd *up;
+ struct pfsync_state_del *dp;
+ struct pfsync_state_clr *cp;
+ struct pfsync_state_upd_req *rup;
+ struct pfsync_state_bus *bus;
+#ifdef PFSYNC_TDB
+ struct pfsync_tdb *pt;
+#endif
+ struct in_addr src;
+ struct mbuf *mp;
+ int iplen, action, error, i, s, count, offp, sfail, stale = 0;
+ u_int8_t chksum_flag = 0;
+
+ pfsyncstats.pfsyncs_ipackets++;
+
+ /* verify that we have a sync interface configured */
+ if (!sc || !sc->sc_sync_ifp || !pf_status.running)
+ goto done;
+
+ /* verify that the packet came in on the right interface */
+ if (sc->sc_sync_ifp != m->m_pkthdr.rcvif) {
+ pfsyncstats.pfsyncs_badif++;
+ goto done;
+ }
+
+ /* verify that the IP TTL is 255. */
+ if (ip->ip_ttl != PFSYNC_DFLTTL) {
+ pfsyncstats.pfsyncs_badttl++;
+ goto done;
+ }
+
+ iplen = ip->ip_hl << 2;
+
+ if (m->m_pkthdr.len < iplen + sizeof(*ph)) {
+ pfsyncstats.pfsyncs_hdrops++;
+ goto done;
+ }
+
+ if (iplen + sizeof(*ph) > m->m_len) {
+ if ((m = m_pullup(m, iplen + sizeof(*ph))) == NULL) {
+ pfsyncstats.pfsyncs_hdrops++;
+ goto done;
+ }
+ ip = mtod(m, struct ip *);
+ }
+ ph = (struct pfsync_header *)((char *)ip + iplen);
+
+ /* verify the version */
+ if (ph->version != PFSYNC_VERSION) {
+ pfsyncstats.pfsyncs_badver++;
+ goto done;
+ }
+
+ action = ph->action;
+ count = ph->count;
+
+ /* make sure it's a valid action code */
+ if (action >= PFSYNC_ACT_MAX) {
+ pfsyncstats.pfsyncs_badact++;
+ goto done;
+ }
+
+ /* Cheaper to grab this now than having to mess with mbufs later */
+ src = ip->ip_src;
+
+ if (!bcmp(&ph->pf_chksum, &pf_status.pf_chksum, PF_MD5_DIGEST_LENGTH))
+ chksum_flag++;
+
+ switch (action) {
+ case PFSYNC_ACT_CLR: {
+ struct pf_state *nexts;
+ struct pfi_kif *kif;
+ u_int32_t creatorid;
+ if ((mp = m_pulldown(m, iplen + sizeof(*ph),
+ sizeof(*cp), &offp)) == NULL) {
+ pfsyncstats.pfsyncs_badlen++;
+ return;
+ }
+ cp = (struct pfsync_state_clr *)(mp->m_data + offp);
+ creatorid = cp->creatorid;
+
+ s = splsoftnet();
+#ifdef __FreeBSD__
+ PF_LOCK();
+#endif
+ if (cp->ifname[0] == '\0') {
+ for (st = RB_MIN(pf_state_tree_id, &tree_id);
+ st; st = nexts) {
+ nexts = RB_NEXT(pf_state_tree_id, &tree_id, st);
+ if (st->creatorid == creatorid) {
+ st->sync_flags |= PFSTATE_FROMSYNC;
+ pf_unlink_state(st);
+ }
+ }
+ } else {
+ if ((kif = pfi_kif_get(cp->ifname)) == NULL) {
+#ifdef __FreeBSD__
+ PF_UNLOCK();
+#endif
+ splx(s);
+ return;
+ }
+ for (st = RB_MIN(pf_state_tree_lan_ext,
+ &kif->pfik_lan_ext); st; st = nexts) {
+ nexts = RB_NEXT(pf_state_tree_lan_ext,
+ &kif->pfik_lan_ext, st);
+ if (st->creatorid == creatorid) {
+ st->sync_flags |= PFSTATE_FROMSYNC;
+ pf_unlink_state(st);
+ }
+ }
+ }
+#ifdef __FreeBSD__
+ PF_UNLOCK();
+#endif
+ splx(s);
+
+ break;
+ }
+ case PFSYNC_ACT_INS:
+ if ((mp = m_pulldown(m, iplen + sizeof(*ph),
+ count * sizeof(*sp), &offp)) == NULL) {
+ pfsyncstats.pfsyncs_badlen++;
+ return;
+ }
+
+ s = splsoftnet();
+#ifdef __FreeBSD__
+ PF_LOCK();
+#endif
+ for (i = 0, sp = (struct pfsync_state *)(mp->m_data + offp);
+ i < count; i++, sp++) {
+ /* check for invalid values */
+ if (sp->timeout >= PFTM_MAX ||
+ sp->src.state > PF_TCPS_PROXY_DST ||
+ sp->dst.state > PF_TCPS_PROXY_DST ||
+ sp->direction > PF_OUT ||
+ (sp->af != AF_INET && sp->af != AF_INET6)) {
+ if (pf_status.debug >= PF_DEBUG_MISC)
+ printf("pfsync_insert: PFSYNC_ACT_INS: "
+ "invalid value\n");
+ pfsyncstats.pfsyncs_badstate++;
+ continue;
+ }
+
+ if ((error = pfsync_insert_net_state(sp,
+ chksum_flag))) {
+ if (error == ENOMEM) {
+#ifdef __FreeBSD__
+ PF_UNLOCK();
+#endif
+ splx(s);
+ goto done;
+ }
+ continue;
+ }
+ }
+#ifdef __FreeBSD__
+ PF_UNLOCK();
+#endif
+ splx(s);
+ break;
+ case PFSYNC_ACT_UPD:
+ if ((mp = m_pulldown(m, iplen + sizeof(*ph),
+ count * sizeof(*sp), &offp)) == NULL) {
+ pfsyncstats.pfsyncs_badlen++;
+ return;
+ }
+
+ s = splsoftnet();
+#ifdef __FreeBSD__
+ PF_LOCK();
+#endif
+ for (i = 0, sp = (struct pfsync_state *)(mp->m_data + offp);
+ i < count; i++, sp++) {
+ int flags = PFSYNC_FLAG_STALE;
+
+ /* check for invalid values */
+ if (sp->timeout >= PFTM_MAX ||
+ sp->src.state > PF_TCPS_PROXY_DST ||
+ sp->dst.state > PF_TCPS_PROXY_DST) {
+ if (pf_status.debug >= PF_DEBUG_MISC)
+ printf("pfsync_insert: PFSYNC_ACT_UPD: "
+ "invalid value\n");
+ pfsyncstats.pfsyncs_badstate++;
+ continue;
+ }
+
+ bcopy(sp->id, &key.id, sizeof(key.id));
+ key.creatorid = sp->creatorid;
+
+ st = pf_find_state_byid(&key);
+ if (st == NULL) {
+ /* insert the update */
+ if (pfsync_insert_net_state(sp, chksum_flag))
+ pfsyncstats.pfsyncs_badstate++;
+ continue;
+ }
+ sfail = 0;
+ if (st->proto == IPPROTO_TCP) {
+ /*
+ * The state should never go backwards except
+ * for syn-proxy states. Neither should the
+ * sequence window slide backwards.
+ */
+ if (st->src.state > sp->src.state &&
+ (st->src.state < PF_TCPS_PROXY_SRC ||
+ sp->src.state >= PF_TCPS_PROXY_SRC))
+ sfail = 1;
+ else if (SEQ_GT(st->src.seqlo,
+ ntohl(sp->src.seqlo)))
+ sfail = 3;
+ else if (st->dst.state > sp->dst.state) {
+ /* There might still be useful
+ * information about the src state here,
+ * so import that part of the update,
+ * then "fail" so we send the updated
+ * state back to the peer who is missing
+ * our what we know. */
+ pf_state_peer_ntoh(&sp->src, &st->src);
+ /* XXX do anything with timeouts? */
+ sfail = 7;
+ flags = 0;
+ } else if (st->dst.state >= TCPS_SYN_SENT &&
+ SEQ_GT(st->dst.seqlo, ntohl(sp->dst.seqlo)))
+ sfail = 4;
+ } else {
+ /*
+ * Non-TCP protocol state machine always go
+ * forwards
+ */
+ if (st->src.state > sp->src.state)
+ sfail = 5;
+ else if (st->dst.state > sp->dst.state)
+ sfail = 6;
+ }
+ if (sfail) {
+ if (pf_status.debug >= PF_DEBUG_MISC)
+ printf("pfsync: %s stale update "
+ "(%d) id: %016llx "
+ "creatorid: %08x\n",
+ (sfail < 7 ? "ignoring"
+ : "partial"), sfail,
+ betoh64(st->id),
+ ntohl(st->creatorid));
+ pfsyncstats.pfsyncs_badstate++;
+
+ if (!(sp->sync_flags & PFSTATE_STALE)) {
+ /* we have a better state, send it */
+ if (sc->sc_mbuf != NULL && !stale)
+ pfsync_sendout(sc);
+ stale++;
+ if (!st->sync_flags)
+ pfsync_pack_state(
+ PFSYNC_ACT_UPD, st, flags);
+ }
+ continue;
+ }
+ pfsync_alloc_scrub_memory(&sp->dst, &st->dst);
+ pf_state_peer_ntoh(&sp->src, &st->src);
+ pf_state_peer_ntoh(&sp->dst, &st->dst);
+ st->expire = ntohl(sp->expire) + time_second;
+ st->timeout = sp->timeout;
+ }
+ if (stale && sc->sc_mbuf != NULL)
+ pfsync_sendout(sc);
+#ifdef __FreeBSD__
+ PF_UNLOCK();
+#endif
+ splx(s);
+ break;
+ /*
+ * It's not strictly necessary for us to support the "uncompressed"
+ * delete action, but it's relatively simple and maintains consistency.
+ */
+ case PFSYNC_ACT_DEL:
+ if ((mp = m_pulldown(m, iplen + sizeof(*ph),
+ count * sizeof(*sp), &offp)) == NULL) {
+ pfsyncstats.pfsyncs_badlen++;
+ return;
+ }
+
+ s = splsoftnet();
+#ifdef __FreeBSD__
+ PF_LOCK();
+#endif
+ for (i = 0, sp = (struct pfsync_state *)(mp->m_data + offp);
+ i < count; i++, sp++) {
+ bcopy(sp->id, &key.id, sizeof(key.id));
+ key.creatorid = sp->creatorid;
+
+ st = pf_find_state_byid(&key);
+ if (st == NULL) {
+ pfsyncstats.pfsyncs_badstate++;
+ continue;
+ }
+ st->sync_flags |= PFSTATE_FROMSYNC;
+ pf_unlink_state(st);
+ }
+#ifdef __FreeBSD__
+ PF_UNLOCK();
+#endif
+ splx(s);
+ break;
+ case PFSYNC_ACT_UPD_C: {
+ int update_requested = 0;
+
+ if ((mp = m_pulldown(m, iplen + sizeof(*ph),
+ count * sizeof(*up), &offp)) == NULL) {
+ pfsyncstats.pfsyncs_badlen++;
+ return;
+ }
+
+ s = splsoftnet();
+#ifdef __FreeBSD__
+ PF_LOCK();
+#endif
+ for (i = 0, up = (struct pfsync_state_upd *)(mp->m_data + offp);
+ i < count; i++, up++) {
+ /* check for invalid values */
+ if (up->timeout >= PFTM_MAX ||
+ up->src.state > PF_TCPS_PROXY_DST ||
+ up->dst.state > PF_TCPS_PROXY_DST) {
+ if (pf_status.debug >= PF_DEBUG_MISC)
+ printf("pfsync_insert: "
+ "PFSYNC_ACT_UPD_C: "
+ "invalid value\n");
+ pfsyncstats.pfsyncs_badstate++;
+ continue;
+ }
+
+ bcopy(up->id, &key.id, sizeof(key.id));
+ key.creatorid = up->creatorid;
+
+ st = pf_find_state_byid(&key);
+ if (st == NULL) {
+ /* We don't have this state. Ask for it. */
+ error = pfsync_request_update(up, &src);
+ if (error == ENOMEM) {
+#ifdef __FreeBSD__
+ PF_UNLOCK();
+#endif
+ splx(s);
+ goto done;
+ }
+ update_requested = 1;
+ pfsyncstats.pfsyncs_badstate++;
+ continue;
+ }
+ sfail = 0;
+ if (st->proto == IPPROTO_TCP) {
+ /*
+ * The state should never go backwards except
+ * for syn-proxy states. Neither should the
+ * sequence window slide backwards.
+ */
+ if (st->src.state > up->src.state &&
+ (st->src.state < PF_TCPS_PROXY_SRC ||
+ up->src.state >= PF_TCPS_PROXY_SRC))
+ sfail = 1;
+ else if (st->dst.state > up->dst.state)
+ sfail = 2;
+ else if (SEQ_GT(st->src.seqlo,
+ ntohl(up->src.seqlo)))
+ sfail = 3;
+ else if (st->dst.state >= TCPS_SYN_SENT &&
+ SEQ_GT(st->dst.seqlo, ntohl(up->dst.seqlo)))
+ sfail = 4;
+ } else {
+ /*
+ * Non-TCP protocol state machine always go
+ * forwards
+ */
+ if (st->src.state > up->src.state)
+ sfail = 5;
+ else if (st->dst.state > up->dst.state)
+ sfail = 6;
+ }
+ if (sfail) {
+ if (pf_status.debug >= PF_DEBUG_MISC)
+ printf("pfsync: ignoring stale update "
+ "(%d) id: %016llx "
+ "creatorid: %08x\n", sfail,
+ betoh64(st->id),
+ ntohl(st->creatorid));
+ pfsyncstats.pfsyncs_badstate++;
+
+ /* we have a better state, send it out */
+ if ((!stale || update_requested) &&
+ sc->sc_mbuf != NULL) {
+ pfsync_sendout(sc);
+ update_requested = 0;
+ }
+ stale++;
+ if (!st->sync_flags)
+ pfsync_pack_state(PFSYNC_ACT_UPD, st,
+ PFSYNC_FLAG_STALE);
+ continue;
+ }
+ pfsync_alloc_scrub_memory(&up->dst, &st->dst);
+ pf_state_peer_ntoh(&up->src, &st->src);
+ pf_state_peer_ntoh(&up->dst, &st->dst);
+ st->expire = ntohl(up->expire) + time_second;
+ st->timeout = up->timeout;
+ }
+ if ((update_requested || stale) && sc->sc_mbuf)
+ pfsync_sendout(sc);
+#ifdef __FreeBSD__
+ PF_UNLOCK();
+#endif
+ splx(s);
+ break;
+ }
+ case PFSYNC_ACT_DEL_C:
+ if ((mp = m_pulldown(m, iplen + sizeof(*ph),
+ count * sizeof(*dp), &offp)) == NULL) {
+ pfsyncstats.pfsyncs_badlen++;
+ return;
+ }
+
+ s = splsoftnet();
+#ifdef __FreeBSD__
+ PF_LOCK();
+#endif
+ for (i = 0, dp = (struct pfsync_state_del *)(mp->m_data + offp);
+ i < count; i++, dp++) {
+ bcopy(dp->id, &key.id, sizeof(key.id));
+ key.creatorid = dp->creatorid;
+
+ st = pf_find_state_byid(&key);
+ if (st == NULL) {
+ pfsyncstats.pfsyncs_badstate++;
+ continue;
+ }
+ st->sync_flags |= PFSTATE_FROMSYNC;
+ pf_unlink_state(st);
+ }
+#ifdef __FreeBSD__
+ PF_UNLOCK();
+#endif
+ splx(s);
+ break;
+ case PFSYNC_ACT_INS_F:
+ case PFSYNC_ACT_DEL_F:
+ /* not implemented */
+ break;
+ case PFSYNC_ACT_UREQ:
+ if ((mp = m_pulldown(m, iplen + sizeof(*ph),
+ count * sizeof(*rup), &offp)) == NULL) {
+ pfsyncstats.pfsyncs_badlen++;
+ return;
+ }
+
+ s = splsoftnet();
+#ifdef __FreeBSD__
+ PF_LOCK();
+#endif
+ if (sc->sc_mbuf != NULL)
+ pfsync_sendout(sc);
+ for (i = 0,
+ rup = (struct pfsync_state_upd_req *)(mp->m_data + offp);
+ i < count; i++, rup++) {
+ bcopy(rup->id, &key.id, sizeof(key.id));
+ key.creatorid = rup->creatorid;
+
+ if (key.id == 0 && key.creatorid == 0) {
+ sc->sc_ureq_received = time_uptime;
+ if (sc->sc_bulk_send_next == NULL)
+ sc->sc_bulk_send_next =
+ TAILQ_FIRST(&state_list);
+ sc->sc_bulk_terminator = sc->sc_bulk_send_next;
+ if (pf_status.debug >= PF_DEBUG_MISC)
+ printf("pfsync: received "
+ "bulk update request\n");
+ pfsync_send_bus(sc, PFSYNC_BUS_START);
+#ifdef __FreeBSD__
+ callout_reset(&sc->sc_bulk_tmo, 1 * hz,
+ pfsync_bulk_update, pfsyncif);
+#else
+ timeout_add(&sc->sc_bulk_tmo, 1 * hz);
+#endif
+ } else {
+ st = pf_find_state_byid(&key);
+ if (st == NULL) {
+ pfsyncstats.pfsyncs_badstate++;
+ continue;
+ }
+ if (!st->sync_flags)
+ pfsync_pack_state(PFSYNC_ACT_UPD,
+ st, 0);
+ }
+ }
+ if (sc->sc_mbuf != NULL)
+ pfsync_sendout(sc);
+#ifdef __FreeBSD__
+ PF_UNLOCK();
+#endif
+ splx(s);
+ break;
+ case PFSYNC_ACT_BUS:
+ /* If we're not waiting for a bulk update, who cares. */
+ if (sc->sc_ureq_sent == 0)
+ break;
+
+ if ((mp = m_pulldown(m, iplen + sizeof(*ph),
+ sizeof(*bus), &offp)) == NULL) {
+ pfsyncstats.pfsyncs_badlen++;
+ return;
+ }
+ bus = (struct pfsync_state_bus *)(mp->m_data + offp);
+ switch (bus->status) {
+ case PFSYNC_BUS_START:
+#ifdef __FreeBSD__
+ callout_reset(&sc->sc_bulkfail_tmo,
+ pf_pool_limits[PF_LIMIT_STATES].limit /
+ (PFSYNC_BULKPACKETS * sc->sc_maxcount),
+ pfsync_bulkfail, pfsyncif);
+#else
+ timeout_add(&sc->sc_bulkfail_tmo,
+ pf_pool_limits[PF_LIMIT_STATES].limit /
+ (PFSYNC_BULKPACKETS * sc->sc_maxcount));
+#endif
+ if (pf_status.debug >= PF_DEBUG_MISC)
+ printf("pfsync: received bulk "
+ "update start\n");
+ break;
+ case PFSYNC_BUS_END:
+ if (time_uptime - ntohl(bus->endtime) >=
+ sc->sc_ureq_sent) {
+ /* that's it, we're happy */
+ sc->sc_ureq_sent = 0;
+ sc->sc_bulk_tries = 0;
+ timeout_del(&sc->sc_bulkfail_tmo);
+#if NCARP > 0
+ if (!pfsync_sync_ok)
+#ifdef __FreeBSD__
+#ifdef CARP_ADVANCED
+ carp_group_demote_adj(sc->sc_ifp, -1);
+#endif
+#else
+ carp_group_demote_adj(&sc->sc_if, -1);
+#endif
+#endif
+ pfsync_sync_ok = 1;
+ if (pf_status.debug >= PF_DEBUG_MISC)
+ printf("pfsync: received valid "
+ "bulk update end\n");
+ } else {
+ if (pf_status.debug >= PF_DEBUG_MISC)
+ printf("pfsync: received invalid "
+ "bulk update end: bad timestamp\n");
+ }
+ break;
+ }
+ break;
+#ifdef PFSYNC_TDB
+ case PFSYNC_ACT_TDB_UPD:
+ if ((mp = m_pulldown(m, iplen + sizeof(*ph),
+ count * sizeof(*pt), &offp)) == NULL) {
+ pfsyncstats.pfsyncs_badlen++;
+ return;
+ }
+ s = splsoftnet();
+#ifdef __FreeBSD__
+ PF_LOCK();
+#endif
+ for (i = 0, pt = (struct pfsync_tdb *)(mp->m_data + offp);
+ i < count; i++, pt++)
+ pfsync_update_net_tdb(pt);
+#ifdef __FreeBSD__
+ PF_UNLOCK();
+#endif
+ splx(s);
+ break;
+#endif
+ }
+
+done:
+ if (m)
+ m_freem(m);
+}
+
+int
+pfsyncoutput(struct ifnet *ifp, struct mbuf *m, struct sockaddr *dst,
+ struct route *ro)
+{
+ m_freem(m);
+ return (0);
+}
+
+/* ARGSUSED */
+int
+pfsyncioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
+{
+#ifndef __FreeBSD__
+ struct proc *p = curproc;
+#endif
+ struct pfsync_softc *sc = ifp->if_softc;
+ struct ifreq *ifr = (struct ifreq *)data;
+ struct ip_moptions *imo = &sc->sc_imo;
+ struct pfsyncreq pfsyncr;
+ struct ifnet *sifp;
+ int s, error;
+
+ switch (cmd) {
+ case SIOCSIFADDR:
+ case SIOCAIFADDR:
+ case SIOCSIFDSTADDR:
+ case SIOCSIFFLAGS:
+#ifdef __FreeBSD__
+ if (ifp->if_flags & IFF_UP)
+ ifp->if_drv_flags |= IFF_DRV_RUNNING;
+ else
+ ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
+#else
+ if (ifp->if_flags & IFF_UP)
+ ifp->if_flags |= IFF_RUNNING;
+ else
+ ifp->if_flags &= ~IFF_RUNNING;
+#endif
+ break;
+ case SIOCSIFMTU:
+ if (ifr->ifr_mtu < PFSYNC_MINMTU)
+ return (EINVAL);
+ if (ifr->ifr_mtu > MCLBYTES)
+ ifr->ifr_mtu = MCLBYTES;
+ s = splnet();
+#ifdef __FreeBSD__
+ PF_LOCK();
+#endif
+ if (ifr->ifr_mtu < ifp->if_mtu)
+ pfsync_sendout(sc);
+ pfsync_setmtu(sc, ifr->ifr_mtu);
+#ifdef __FreeBSD__
+ PF_UNLOCK();
+#endif
+ splx(s);
+ break;
+ case SIOCGETPFSYNC:
+ bzero(&pfsyncr, sizeof(pfsyncr));
+ if (sc->sc_sync_ifp)
+ strlcpy(pfsyncr.pfsyncr_syncdev,
+ sc->sc_sync_ifp->if_xname, IFNAMSIZ);
+ pfsyncr.pfsyncr_syncpeer = sc->sc_sync_peer;
+ pfsyncr.pfsyncr_maxupdates = sc->sc_maxupdates;
+ if ((error = copyout(&pfsyncr, ifr->ifr_data, sizeof(pfsyncr))))
+ return (error);
+ break;
+ case SIOCSETPFSYNC:
+#ifdef __FreeBSD__
+ if ((error = priv_check(curthread, PRIV_NETINET_PF)) != 0)
+#else
+ if ((error = suser(p, p->p_acflag)) != 0)
+#endif
+ return (error);
+ if ((error = copyin(ifr->ifr_data, &pfsyncr, sizeof(pfsyncr))))
+ return (error);
+
+#ifdef __FreeBSD__
+ PF_LOCK();
+#endif
+ if (pfsyncr.pfsyncr_syncpeer.s_addr == 0)
+#ifdef __FreeBSD__
+ sc->sc_sync_peer.s_addr = htonl(INADDR_PFSYNC_GROUP);
+#else
+ sc->sc_sync_peer.s_addr = INADDR_PFSYNC_GROUP;
+#endif
+ else
+ sc->sc_sync_peer.s_addr =
+ pfsyncr.pfsyncr_syncpeer.s_addr;
+
+ if (pfsyncr.pfsyncr_maxupdates > 255)
+#ifdef __FreeBSD__
+ {
+ PF_UNLOCK();
+#endif
+ return (EINVAL);
+#ifdef __FreeBSD__
+ }
+#endif
+ sc->sc_maxupdates = pfsyncr.pfsyncr_maxupdates;
+
+ if (pfsyncr.pfsyncr_syncdev[0] == 0) {
+ sc->sc_sync_ifp = NULL;
+ if (sc->sc_mbuf_net != NULL) {
+ /* Don't keep stale pfsync packets around. */
+ s = splnet();
+ m_freem(sc->sc_mbuf_net);
+ sc->sc_mbuf_net = NULL;
+ sc->sc_statep_net.s = NULL;
+ splx(s);
+ }
+#ifdef __FreeBSD__
+ PF_UNLOCK();
+#endif
+ if (imo->imo_num_memberships > 0) {
+ in_delmulti(imo->imo_membership[--imo->imo_num_memberships]);
+ imo->imo_multicast_ifp = NULL;
+ }
+ break;
+ }
+
+#ifdef __FreeBSD__
+ PF_UNLOCK();
+#endif
+ if ((sifp = ifunit(pfsyncr.pfsyncr_syncdev)) == NULL)
+ return (EINVAL);
+#ifdef __FreeBSD__
+ PF_LOCK();
+#endif
+
+ s = splnet();
+#ifdef __FreeBSD__
+ if (sifp->if_mtu < sc->sc_ifp->if_mtu ||
+#else
+ if (sifp->if_mtu < sc->sc_if.if_mtu ||
+#endif
+ (sc->sc_sync_ifp != NULL &&
+ sifp->if_mtu < sc->sc_sync_ifp->if_mtu) ||
+ sifp->if_mtu < MCLBYTES - sizeof(struct ip))
+ pfsync_sendout(sc);
+ sc->sc_sync_ifp = sifp;
+
+#ifdef __FreeBSD__
+ pfsync_setmtu(sc, sc->sc_ifp->if_mtu);
+#else
+ pfsync_setmtu(sc, sc->sc_if.if_mtu);
+#endif
+
+ if (imo->imo_num_memberships > 0) {
+#ifdef __FreeBSD__
+ PF_UNLOCK();
+#endif
+ in_delmulti(imo->imo_membership[--imo->imo_num_memberships]);
+#ifdef __FreeBSD__
+ PF_LOCK();
+#endif
+ imo->imo_multicast_ifp = NULL;
+ }
+
+ if (sc->sc_sync_ifp &&
+#ifdef __FreeBSD__
+ sc->sc_sync_peer.s_addr == htonl(INADDR_PFSYNC_GROUP)) {
+#else
+ sc->sc_sync_peer.s_addr == INADDR_PFSYNC_GROUP) {
+#endif
+ struct in_addr addr;
+
+ if (!(sc->sc_sync_ifp->if_flags & IFF_MULTICAST)) {
+ sc->sc_sync_ifp = NULL;
+#ifdef __FreeBSD__
+ PF_UNLOCK();
+#endif
+ splx(s);
+ return (EADDRNOTAVAIL);
+ }
+
+#ifdef __FreeBSD__
+ addr.s_addr = htonl(INADDR_PFSYNC_GROUP);
+#else
+ addr.s_addr = INADDR_PFSYNC_GROUP;
+#endif
+
+#ifdef __FreeBSD__
+ PF_UNLOCK();
+#endif
+ if ((imo->imo_membership[0] =
+ in_addmulti(&addr, sc->sc_sync_ifp)) == NULL) {
+ sc->sc_sync_ifp = NULL;
+ splx(s);
+ return (ENOBUFS);
+ }
+#ifdef __FreeBSD__
+ PF_LOCK();
+#endif
+ imo->imo_num_memberships++;
+ imo->imo_multicast_ifp = sc->sc_sync_ifp;
+ imo->imo_multicast_ttl = PFSYNC_DFLTTL;
+ imo->imo_multicast_loop = 0;
+ }
+
+ if (sc->sc_sync_ifp ||
+#ifdef __FreeBSD__
+ sc->sc_sendaddr.s_addr != htonl(INADDR_PFSYNC_GROUP)) {
+#else
+ sc->sc_sendaddr.s_addr != INADDR_PFSYNC_GROUP) {
+#endif
+ /* Request a full state table update. */
+ sc->sc_ureq_sent = time_uptime;
+#if NCARP > 0
+ if (pfsync_sync_ok)
+#ifdef __FreeBSD__
+#ifdef CARP_ADVANCED
+ carp_group_demote_adj(sc->sc_ifp, 1);
+#endif
+#else
+ carp_group_demote_adj(&sc->sc_if, 1);
+#endif
+#endif
+ pfsync_sync_ok = 0;
+ if (pf_status.debug >= PF_DEBUG_MISC)
+ printf("pfsync: requesting bulk update\n");
+#ifdef __FreeBSD__
+ callout_reset(&sc->sc_bulkfail_tmo, 5 * hz,
+ pfsync_bulkfail, pfsyncif);
+#else
+ timeout_add(&sc->sc_bulkfail_tmo, 5 * hz);
+#endif
+ error = pfsync_request_update(NULL, NULL);
+ if (error == ENOMEM) {
+#ifdef __FreeBSD__
+ PF_UNLOCK();
+#endif
+ splx(s);
+ return (ENOMEM);
+ }
+ pfsync_sendout(sc);
+ }
+#ifdef __FreeBSD__
+ PF_UNLOCK();
+#endif
+ splx(s);
+
+ break;
+
+ default:
+ return (ENOTTY);
+ }
+
+ return (0);
+}
+
+void
+pfsync_setmtu(struct pfsync_softc *sc, int mtu_req)
+{
+ int mtu;
+
+ if (sc->sc_sync_ifp && sc->sc_sync_ifp->if_mtu < mtu_req)
+ mtu = sc->sc_sync_ifp->if_mtu;
+ else
+ mtu = mtu_req;
+
+ sc->sc_maxcount = (mtu - sizeof(struct pfsync_header)) /
+ sizeof(struct pfsync_state);
+ if (sc->sc_maxcount > 254)
+ sc->sc_maxcount = 254;
+#ifdef __FreeBSD__
+ sc->sc_ifp->if_mtu = sizeof(struct pfsync_header) +
+#else
+ sc->sc_if.if_mtu = sizeof(struct pfsync_header) +
+#endif
+ sc->sc_maxcount * sizeof(struct pfsync_state);
+}
+
+struct mbuf *
+pfsync_get_mbuf(struct pfsync_softc *sc, u_int8_t action, void **sp)
+{
+ struct pfsync_header *h;
+ struct mbuf *m;
+ int len;
+
+ MGETHDR(m, M_DONTWAIT, MT_DATA);
+ if (m == NULL) {
+#ifdef __FreeBSD__
+ sc->sc_ifp->if_oerrors++;
+#else
+ sc->sc_if.if_oerrors++;
+#endif
+ return (NULL);
+ }
+
+ switch (action) {
+ case PFSYNC_ACT_CLR:
+ len = sizeof(struct pfsync_header) +
+ sizeof(struct pfsync_state_clr);
+ break;
+ case PFSYNC_ACT_UPD_C:
+ len = (sc->sc_maxcount * sizeof(struct pfsync_state_upd)) +
+ sizeof(struct pfsync_header);
+ break;
+ case PFSYNC_ACT_DEL_C:
+ len = (sc->sc_maxcount * sizeof(struct pfsync_state_del)) +
+ sizeof(struct pfsync_header);
+ break;
+ case PFSYNC_ACT_UREQ:
+ len = (sc->sc_maxcount * sizeof(struct pfsync_state_upd_req)) +
+ sizeof(struct pfsync_header);
+ break;
+ case PFSYNC_ACT_BUS:
+ len = sizeof(struct pfsync_header) +
+ sizeof(struct pfsync_state_bus);
+ break;
+#ifdef PFSYNC_TDB
+ case PFSYNC_ACT_TDB_UPD:
+ len = (sc->sc_maxcount * sizeof(struct pfsync_tdb)) +
+ sizeof(struct pfsync_header);
+ break;
+#endif
+ default:
+ len = (sc->sc_maxcount * sizeof(struct pfsync_state)) +
+ sizeof(struct pfsync_header);
+ break;
+ }
+
+ if (len > MHLEN) {
+ MCLGET(m, M_DONTWAIT);
+ if ((m->m_flags & M_EXT) == 0) {
+ m_free(m);
+#ifdef __FreeBSD__
+ sc->sc_ifp->if_oerrors++;
+#else
+ sc->sc_if.if_oerrors++;
+#endif
+ return (NULL);
+ }
+ m->m_data += (MCLBYTES - len) &~ (sizeof(long) - 1);
+ } else
+ MH_ALIGN(m, len);
+
+ m->m_pkthdr.rcvif = NULL;
+ m->m_pkthdr.len = m->m_len = sizeof(struct pfsync_header);
+ h = mtod(m, struct pfsync_header *);
+ h->version = PFSYNC_VERSION;
+ h->af = 0;
+ h->count = 0;
+ h->action = action;
+#ifndef PFSYNC_TDB
+ if (action != PFSYNC_ACT_TDB_UPD)
+#endif
+ bcopy(&pf_status.pf_chksum, &h->pf_chksum,
+ PF_MD5_DIGEST_LENGTH);
+
+ *sp = (void *)((char *)h + PFSYNC_HDRLEN);
+#ifdef PFSYNC_TDB
+ if (action == PFSYNC_ACT_TDB_UPD)
+#ifdef __FreeBSD__
+ callout_reset(&sc->sc_tdb_tmo, hz, pfsync_tdb_timeout,
+ pfsyncif);
+#else
+ timeout_add(&sc->sc_tdb_tmo, hz);
+#endif
+ else
+#endif
+#ifdef __FreeBSD__
+ callout_reset(&sc->sc_tmo, hz, pfsync_timeout, pfsyncif);
+#else
+ timeout_add(&sc->sc_tmo, hz);
+#endif
+ return (m);
+}
+
+int
+pfsync_pack_state(u_int8_t action, struct pf_state *st, int flags)
+{
+ struct ifnet *ifp = NULL;
+ struct pfsync_softc *sc = pfsyncif;
+ struct pfsync_header *h, *h_net;
+ struct pfsync_state *sp = NULL;
+ struct pfsync_state_upd *up = NULL;
+ struct pfsync_state_del *dp = NULL;
+ struct pf_rule *r;
+ u_long secs;
+ int s, ret = 0;
+ u_int8_t i = 255, newaction = 0;
+
+ if (sc == NULL)
+ return (0);
+#ifdef __FreeBSD__
+ ifp = sc->sc_ifp;
+#else
+ ifp = &sc->sc_if;
+#endif
+
+ /*
+ * If a packet falls in the forest and there's nobody around to
+ * hear, does it make a sound?
+ */
+ if (ifp->if_bpf == NULL && sc->sc_sync_ifp == NULL &&
+#ifdef __FreeBSD__
+ sc->sc_sync_peer.s_addr == htonl(INADDR_PFSYNC_GROUP)) {
+#else
+ sc->sc_sync_peer.s_addr == INADDR_PFSYNC_GROUP) {
+#endif
+ /* Don't leave any stale pfsync packets hanging around. */
+ if (sc->sc_mbuf != NULL) {
+ m_freem(sc->sc_mbuf);
+ sc->sc_mbuf = NULL;
+ sc->sc_statep.s = NULL;
+ }
+ return (0);
+ }
+
+ if (action >= PFSYNC_ACT_MAX)
+ return (EINVAL);
+
+ s = splnet();
+#ifdef __FreeBSD__
+ PF_ASSERT(MA_OWNED);
+#endif
+ if (sc->sc_mbuf == NULL) {
+ if ((sc->sc_mbuf = pfsync_get_mbuf(sc, action,
+ (void *)&sc->sc_statep.s)) == NULL) {
+ splx(s);
+ return (ENOMEM);
+ }
+ h = mtod(sc->sc_mbuf, struct pfsync_header *);
+ } else {
+ h = mtod(sc->sc_mbuf, struct pfsync_header *);
+ if (h->action != action) {
+ pfsync_sendout(sc);
+ if ((sc->sc_mbuf = pfsync_get_mbuf(sc, action,
+ (void *)&sc->sc_statep.s)) == NULL) {
+ splx(s);
+ return (ENOMEM);
+ }
+ h = mtod(sc->sc_mbuf, struct pfsync_header *);
+ } else {
+ /*
+ * If it's an update, look in the packet to see if
+ * we already have an update for the state.
+ */
+ if (action == PFSYNC_ACT_UPD && sc->sc_maxupdates) {
+ struct pfsync_state *usp =
+ (void *)((char *)h + PFSYNC_HDRLEN);
+
+ for (i = 0; i < h->count; i++) {
+ if (!memcmp(usp->id, &st->id,
+ PFSYNC_ID_LEN) &&
+ usp->creatorid == st->creatorid) {
+ sp = usp;
+ sp->updates++;
+ break;
+ }
+ usp++;
+ }
+ }
+ }
+ }
+
+ secs = time_second;
+
+ st->pfsync_time = time_uptime;
+
+ if (sp == NULL) {
+ /* not a "duplicate" update */
+ i = 255;
+ sp = sc->sc_statep.s++;
+ sc->sc_mbuf->m_pkthdr.len =
+ sc->sc_mbuf->m_len += sizeof(struct pfsync_state);
+ h->count++;
+ bzero(sp, sizeof(*sp));
+
+ bcopy(&st->id, sp->id, sizeof(sp->id));
+ sp->creatorid = st->creatorid;
+
+ strlcpy(sp->ifname, st->u.s.kif->pfik_name, sizeof(sp->ifname));
+ pf_state_host_hton(&st->lan, &sp->lan);
+ pf_state_host_hton(&st->gwy, &sp->gwy);
+ pf_state_host_hton(&st->ext, &sp->ext);
+
+ bcopy(&st->rt_addr, &sp->rt_addr, sizeof(sp->rt_addr));
+
+ sp->creation = htonl(secs - st->creation);
+ pf_state_counter_hton(st->packets[0], sp->packets[0]);
+ pf_state_counter_hton(st->packets[1], sp->packets[1]);
+ pf_state_counter_hton(st->bytes[0], sp->bytes[0]);
+ pf_state_counter_hton(st->bytes[1], sp->bytes[1]);
+ if ((r = st->rule.ptr) == NULL)
+ sp->rule = htonl(-1);
+ else
+ sp->rule = htonl(r->nr);
+ if ((r = st->anchor.ptr) == NULL)
+ sp->anchor = htonl(-1);
+ else
+ sp->anchor = htonl(r->nr);
+ sp->af = st->af;
+ sp->proto = st->proto;
+ sp->direction = st->direction;
+ sp->log = st->log;
+ sp->state_flags = st->state_flags;
+ sp->timeout = st->timeout;
+
+ if (flags & PFSYNC_FLAG_STALE)
+ sp->sync_flags |= PFSTATE_STALE;
+ }
+
+ pf_state_peer_hton(&st->src, &sp->src);
+ pf_state_peer_hton(&st->dst, &sp->dst);
+
+ if (st->expire <= secs)
+ sp->expire = htonl(0);
+ else
+ sp->expire = htonl(st->expire - secs);
+
+ /* do we need to build "compressed" actions for network transfer? */
+ if (sc->sc_sync_ifp && flags & PFSYNC_FLAG_COMPRESS) {
+ switch (action) {
+ case PFSYNC_ACT_UPD:
+ newaction = PFSYNC_ACT_UPD_C;
+ break;
+ case PFSYNC_ACT_DEL:
+ newaction = PFSYNC_ACT_DEL_C;
+ break;
+ default:
+ /* by default we just send the uncompressed states */
+ break;
+ }
+ }
+
+ if (newaction) {
+ if (sc->sc_mbuf_net == NULL) {
+ if ((sc->sc_mbuf_net = pfsync_get_mbuf(sc, newaction,
+ (void *)&sc->sc_statep_net.s)) == NULL) {
+ splx(s);
+ return (ENOMEM);
+ }
+ }
+ h_net = mtod(sc->sc_mbuf_net, struct pfsync_header *);
+
+ switch (newaction) {
+ case PFSYNC_ACT_UPD_C:
+ if (i != 255) {
+ up = (void *)((char *)h_net +
+ PFSYNC_HDRLEN + (i * sizeof(*up)));
+ up->updates++;
+ } else {
+ h_net->count++;
+ sc->sc_mbuf_net->m_pkthdr.len =
+ sc->sc_mbuf_net->m_len += sizeof(*up);
+ up = sc->sc_statep_net.u++;
+
+ bzero(up, sizeof(*up));
+ bcopy(&st->id, up->id, sizeof(up->id));
+ up->creatorid = st->creatorid;
+ }
+ up->timeout = st->timeout;
+ up->expire = sp->expire;
+ up->src = sp->src;
+ up->dst = sp->dst;
+ break;
+ case PFSYNC_ACT_DEL_C:
+ sc->sc_mbuf_net->m_pkthdr.len =
+ sc->sc_mbuf_net->m_len += sizeof(*dp);
+ dp = sc->sc_statep_net.d++;
+ h_net->count++;
+
+ bzero(dp, sizeof(*dp));
+ bcopy(&st->id, dp->id, sizeof(dp->id));
+ dp->creatorid = st->creatorid;
+ break;
+ }
+ }
+
+ if (h->count == sc->sc_maxcount ||
+ (sc->sc_maxupdates && (sp->updates >= sc->sc_maxupdates)))
+ ret = pfsync_sendout(sc);
+
+ splx(s);
+ return (ret);
+}
+
+/* This must be called in splnet() */
+int
+pfsync_request_update(struct pfsync_state_upd *up, struct in_addr *src)
+{
+ struct ifnet *ifp = NULL;
+ struct pfsync_header *h;
+ struct pfsync_softc *sc = pfsyncif;
+ struct pfsync_state_upd_req *rup;
+ int ret = 0;
+
+ if (sc == NULL)
+ return (0);
+
+#ifdef __FreeBSD__
+ ifp = sc->sc_ifp;
+#else
+ ifp = &sc->sc_if;
+#endif
+ if (sc->sc_mbuf == NULL) {
+ if ((sc->sc_mbuf = pfsync_get_mbuf(sc, PFSYNC_ACT_UREQ,
+ (void *)&sc->sc_statep.s)) == NULL)
+ return (ENOMEM);
+ h = mtod(sc->sc_mbuf, struct pfsync_header *);
+ } else {
+ h = mtod(sc->sc_mbuf, struct pfsync_header *);
+ if (h->action != PFSYNC_ACT_UREQ) {
+ pfsync_sendout(sc);
+ if ((sc->sc_mbuf = pfsync_get_mbuf(sc, PFSYNC_ACT_UREQ,
+ (void *)&sc->sc_statep.s)) == NULL)
+ return (ENOMEM);
+ h = mtod(sc->sc_mbuf, struct pfsync_header *);
+ }
+ }
+
+ if (src != NULL)
+ sc->sc_sendaddr = *src;
+ sc->sc_mbuf->m_pkthdr.len = sc->sc_mbuf->m_len += sizeof(*rup);
+ h->count++;
+ rup = sc->sc_statep.r++;
+ bzero(rup, sizeof(*rup));
+ if (up != NULL) {
+ bcopy(up->id, rup->id, sizeof(rup->id));
+ rup->creatorid = up->creatorid;
+ }
+
+ if (h->count == sc->sc_maxcount)
+ ret = pfsync_sendout(sc);
+
+ return (ret);
+}
+
+int
+pfsync_clear_states(u_int32_t creatorid, char *ifname)
+{
+ struct ifnet *ifp = NULL;
+ struct pfsync_softc *sc = pfsyncif;
+ struct pfsync_state_clr *cp;
+ int s, ret;
+
+ if (sc == NULL)
+ return (0);
+
+#ifdef __FreeBSD__
+ ifp = sc->sc_ifp;
+#else
+ ifp = &sc->sc_if;
+#endif
+#ifdef __FreeBSD__
+ PF_ASSERT(MA_OWNED);
+#endif
+ s = splnet();
+ if (sc->sc_mbuf != NULL)
+ pfsync_sendout(sc);
+ if ((sc->sc_mbuf = pfsync_get_mbuf(sc, PFSYNC_ACT_CLR,
+ (void *)&sc->sc_statep.c)) == NULL) {
+ splx(s);
+ return (ENOMEM);
+ }
+ sc->sc_mbuf->m_pkthdr.len = sc->sc_mbuf->m_len += sizeof(*cp);
+ cp = sc->sc_statep.c;
+ cp->creatorid = creatorid;
+ if (ifname != NULL)
+ strlcpy(cp->ifname, ifname, IFNAMSIZ);
+
+ ret = (pfsync_sendout(sc));
+ splx(s);
+ return (ret);
+}
+
+void
+pfsync_timeout(void *v)
+{
+ struct pfsync_softc *sc = v;
+ int s;
+
+ s = splnet();
+#ifdef __FreeBSD__
+ PF_LOCK();
+#endif
+ pfsync_sendout(sc);
+#ifdef __FreeBSD__
+ PF_UNLOCK();
+#endif
+ splx(s);
+}
+
+#ifdef PFSYNC_TDB
+void
+pfsync_tdb_timeout(void *v)
+{
+ struct pfsync_softc *sc = v;
+ int s;
+
+ s = splnet();
+#ifdef __FreeBSD__
+ PF_LOCK();
+#endif
+ pfsync_tdb_sendout(sc);
+#ifdef __FreeBSD__
+ PF_UNLOCK();
+#endif
+ splx(s);
+}
+#endif
+
+/* This must be called in splnet() */
+void
+pfsync_send_bus(struct pfsync_softc *sc, u_int8_t status)
+{
+ struct pfsync_state_bus *bus;
+
+#ifdef __FreeBSD__
+ PF_ASSERT(MA_OWNED);
+#endif
+ if (sc->sc_mbuf != NULL)
+ pfsync_sendout(sc);
+
+ if (pfsync_sync_ok &&
+ (sc->sc_mbuf = pfsync_get_mbuf(sc, PFSYNC_ACT_BUS,
+ (void *)&sc->sc_statep.b)) != NULL) {
+ sc->sc_mbuf->m_pkthdr.len = sc->sc_mbuf->m_len += sizeof(*bus);
+ bus = sc->sc_statep.b;
+ bus->creatorid = pf_status.hostid;
+ bus->status = status;
+ bus->endtime = htonl(time_uptime - sc->sc_ureq_received);
+ pfsync_sendout(sc);
+ }
+}
+
+void
+pfsync_bulk_update(void *v)
+{
+ struct pfsync_softc *sc = v;
+ int s, i = 0;
+ struct pf_state *state;
+
+ s = splnet();
+#ifdef __FreeBSD__
+ PF_LOCK();
+#endif
+ if (sc->sc_mbuf != NULL)
+ pfsync_sendout(sc);
+
+ /*
+ * Grab at most PFSYNC_BULKPACKETS worth of states which have not
+ * been sent since the latest request was made.
+ */
+ state = sc->sc_bulk_send_next;
+ if (state)
+ do {
+ /* send state update if syncable and not already sent */
+ if (!state->sync_flags
+ && state->timeout < PFTM_MAX
+ && state->pfsync_time <= sc->sc_ureq_received) {
+ pfsync_pack_state(PFSYNC_ACT_UPD, state, 0);
+ i++;
+ }
+
+ /* figure next state to send */
+ state = TAILQ_NEXT(state, u.s.entry_list);
+
+ /* wrap to start of list if we hit the end */
+ if (!state)
+ state = TAILQ_FIRST(&state_list);
+ } while (i < sc->sc_maxcount * PFSYNC_BULKPACKETS &&
+ state != sc->sc_bulk_terminator);
+
+ if (!state || state == sc->sc_bulk_terminator) {
+ /* we're done */
+ pfsync_send_bus(sc, PFSYNC_BUS_END);
+ sc->sc_ureq_received = 0;
+ sc->sc_bulk_send_next = NULL;
+ sc->sc_bulk_terminator = NULL;
+ timeout_del(&sc->sc_bulk_tmo);
+ if (pf_status.debug >= PF_DEBUG_MISC)
+ printf("pfsync: bulk update complete\n");
+ } else {
+ /* look again for more in a bit */
+#ifdef __FreeBSD__
+ callout_reset(&sc->sc_bulk_tmo, 1, pfsync_bulk_update,
+ pfsyncif);
+#else
+ timeout_add(&sc->sc_bulk_tmo, 1);
+#endif
+ sc->sc_bulk_send_next = state;
+ }
+ if (sc->sc_mbuf != NULL)
+ pfsync_sendout(sc);
+ splx(s);
+#ifdef __FreeBSD__
+ PF_UNLOCK();
+#endif
+}
+
+void
+pfsync_bulkfail(void *v)
+{
+ struct pfsync_softc *sc = v;
+ int s, error;
+
+#ifdef __FreeBSD__
+ PF_LOCK();
+#endif
+ if (sc->sc_bulk_tries++ < PFSYNC_MAX_BULKTRIES) {
+ /* Try again in a bit */
+#ifdef __FreeBSD__
+ callout_reset(&sc->sc_bulkfail_tmo, 5 * hz, pfsync_bulkfail,
+ pfsyncif);
+#else
+ timeout_add(&sc->sc_bulkfail_tmo, 5 * hz);
+#endif
+ s = splnet();
+ error = pfsync_request_update(NULL, NULL);
+ if (error == ENOMEM) {
+ if (pf_status.debug >= PF_DEBUG_MISC)
+ printf("pfsync: cannot allocate mbufs for "
+ "bulk update\n");
+ } else
+ pfsync_sendout(sc);
+ splx(s);
+ } else {
+ /* Pretend like the transfer was ok */
+ sc->sc_ureq_sent = 0;
+ sc->sc_bulk_tries = 0;
+#if NCARP > 0
+ if (!pfsync_sync_ok)
+#ifdef __FreeBSD__
+#ifdef CARP_ADVANCED
+ carp_group_demote_adj(sc->sc_ifp, -1);
+#endif
+#else
+ carp_group_demote_adj(&sc->sc_if, -1);
+#endif
+#endif
+ pfsync_sync_ok = 1;
+ if (pf_status.debug >= PF_DEBUG_MISC)
+ printf("pfsync: failed to receive "
+ "bulk update status\n");
+ timeout_del(&sc->sc_bulkfail_tmo);
+ }
+#ifdef __FreeBSD__
+ PF_UNLOCK();
+#endif
+}
+
+/* This must be called in splnet() */
+int
+pfsync_sendout(struct pfsync_softc *sc)
+{
+#if NBPFILTER > 0
+#ifdef __FreeBSD__
+ struct ifnet *ifp = sc->sc_ifp;
+#else
+ struct ifnet *ifp = &sc->sc_if;
+#endif
+#endif
+ struct mbuf *m;
+
+#ifdef __FreeBSD__
+ PF_ASSERT(MA_OWNED);
+#endif
+ timeout_del(&sc->sc_tmo);
+
+ if (sc->sc_mbuf == NULL)
+ return (0);
+ m = sc->sc_mbuf;
+ sc->sc_mbuf = NULL;
+ sc->sc_statep.s = NULL;
+
+#if NBPFILTER > 0
+ if (ifp->if_bpf)
+#ifdef __FreeBSD__
+ BPF_MTAP(ifp, m);
+#else
+ bpf_mtap(ifp->if_bpf, m, BPF_DIRECTION_OUT);
+#endif
+#endif
+
+ if (sc->sc_mbuf_net) {
+ m_freem(m);
+ m = sc->sc_mbuf_net;
+ sc->sc_mbuf_net = NULL;
+ sc->sc_statep_net.s = NULL;
+ }
+
+ return pfsync_sendout_mbuf(sc, m);
+}
+
+#ifdef PFSYNC_TDB
+int
+pfsync_tdb_sendout(struct pfsync_softc *sc)
+{
+#if NBPFILTER > 0
+#ifdef __FreeBSD__
+ struct ifnet *ifp = sc->sc_ifp;
+#else
+ struct ifnet *ifp = &sc->sc_if;
+#endif
+#endif
+ struct mbuf *m;
+
+#ifdef __FreeBSD__
+ PF_ASSERT(MA_OWNED);
+#endif
+ timeout_del(&sc->sc_tdb_tmo);
+
+ if (sc->sc_mbuf_tdb == NULL)
+ return (0);
+ m = sc->sc_mbuf_tdb;
+ sc->sc_mbuf_tdb = NULL;
+ sc->sc_statep_tdb.t = NULL;
+
+#if NBPFILTER > 0
+ if (ifp->if_bpf)
+#ifdef __FreeBSD__
+ BPF_MTAP(ifp, m);
+#else
+ bpf_mtap(ifp->if_bpf, m, BPF_DIRECTION_OUT);
+#endif
+#endif
+
+ return pfsync_sendout_mbuf(sc, m);
+}
+#endif
+
+int
+pfsync_sendout_mbuf(struct pfsync_softc *sc, struct mbuf *m)
+{
+ struct sockaddr sa;
+ struct ip *ip;
+
+#ifdef __FreeBSD__
+ PF_ASSERT(MA_OWNED);
+#endif
+ if (sc->sc_sync_ifp ||
+#ifdef __FreeBSD__
+ sc->sc_sync_peer.s_addr != htonl(INADDR_PFSYNC_GROUP)) {
+#else
+ sc->sc_sync_peer.s_addr != INADDR_PFSYNC_GROUP) {
+#endif
+ M_PREPEND(m, sizeof(struct ip), M_DONTWAIT);
+ if (m == NULL) {
+ pfsyncstats.pfsyncs_onomem++;
+ return (0);
+ }
+ ip = mtod(m, struct ip *);
+ ip->ip_v = IPVERSION;
+ ip->ip_hl = sizeof(*ip) >> 2;
+ ip->ip_tos = IPTOS_LOWDELAY;
+#ifdef __FreeBSD__
+ ip->ip_len = m->m_pkthdr.len;
+#else
+ ip->ip_len = htons(m->m_pkthdr.len);
+#endif
+ ip->ip_id = htons(ip_randomid());
+#ifdef __FreeBSD__
+ ip->ip_off = IP_DF;
+#else
+ ip->ip_off = htons(IP_DF);
+#endif
+ ip->ip_ttl = PFSYNC_DFLTTL;
+ ip->ip_p = IPPROTO_PFSYNC;
+ ip->ip_sum = 0;
+
+ bzero(&sa, sizeof(sa));
+ ip->ip_src.s_addr = INADDR_ANY;
+
+#ifdef __FreeBSD__
+ if (sc->sc_sendaddr.s_addr == htonl(INADDR_PFSYNC_GROUP))
+#else
+ if (sc->sc_sendaddr.s_addr == INADDR_PFSYNC_GROUP)
+#endif
+ m->m_flags |= M_MCAST;
+ ip->ip_dst = sc->sc_sendaddr;
+ sc->sc_sendaddr.s_addr = sc->sc_sync_peer.s_addr;
+
+ pfsyncstats.pfsyncs_opackets++;
+
+#ifdef __FreeBSD__
+ if (!IF_HANDOFF(&sc->sc_ifq, m, NULL))
+ pfsyncstats.pfsyncs_oerrors++;
+ taskqueue_enqueue(taskqueue_thread, &pfsyncif->sc_send_task);
+#else
+ if (ip_output(m, NULL, NULL, IP_RAWOUTPUT, &sc->sc_imo, NULL))
+ pfsyncstats.pfsyncs_oerrors++;
+#endif
+ } else
+ m_freem(m);
+
+ return (0);
+}
+
+#ifdef PFSYNC_TDB
+/* Update an in-kernel tdb. Silently fail if no tdb is found. */
+void
+pfsync_update_net_tdb(struct pfsync_tdb *pt)
+{
+ struct tdb *tdb;
+ int s;
+
+ /* check for invalid values */
+ if (ntohl(pt->spi) <= SPI_RESERVED_MAX ||
+ (pt->dst.sa.sa_family != AF_INET &&
+ pt->dst.sa.sa_family != AF_INET6))
+ goto bad;
+
+ s = spltdb();
+ tdb = gettdb(pt->spi, &pt->dst, pt->sproto);
+ if (tdb) {
+ pt->rpl = ntohl(pt->rpl);
+ pt->cur_bytes = betoh64(pt->cur_bytes);
+
+ /* Neither replay nor byte counter should ever decrease. */
+ if (pt->rpl < tdb->tdb_rpl ||
+ pt->cur_bytes < tdb->tdb_cur_bytes) {
+ splx(s);
+ goto bad;
+ }
+
+ tdb->tdb_rpl = pt->rpl;
+ tdb->tdb_cur_bytes = pt->cur_bytes;
+ }
+ splx(s);
+ return;
+
+ bad:
+ if (pf_status.debug >= PF_DEBUG_MISC)
+ printf("pfsync_insert: PFSYNC_ACT_TDB_UPD: "
+ "invalid value\n");
+ pfsyncstats.pfsyncs_badstate++;
+ return;
+}
+
+/* One of our local tdbs have been updated, need to sync rpl with others */
+int
+pfsync_update_tdb(struct tdb *tdb, int output)
+{
+ struct ifnet *ifp = NULL;
+ struct pfsync_softc *sc = pfsyncif;
+ struct pfsync_header *h;
+ struct pfsync_tdb *pt = NULL;
+ int s, i, ret;
+
+ if (sc == NULL)
+ return (0);
+
+#ifdef __FreeBSD__
+ ifp = sc->sc_ifp;
+#else
+ ifp = &sc->sc_if;
+#endif
+ if (ifp->if_bpf == NULL && sc->sc_sync_ifp == NULL &&
+#ifdef __FreeBSD__
+ sc->sc_sync_peer.s_addr == htonl(INADDR_PFSYNC_GROUP)) {
+#else
+ sc->sc_sync_peer.s_addr == INADDR_PFSYNC_GROUP) {
+#endif
+ /* Don't leave any stale pfsync packets hanging around. */
+ if (sc->sc_mbuf_tdb != NULL) {
+ m_freem(sc->sc_mbuf_tdb);
+ sc->sc_mbuf_tdb = NULL;
+ sc->sc_statep_tdb.t = NULL;
+ }
+ return (0);
+ }
+
+#ifdef __FreeBSD__
+ PF_ASSERT(MA_OWNED);
+#endif
+ s = splnet();
+ if (sc->sc_mbuf_tdb == NULL) {
+ if ((sc->sc_mbuf_tdb = pfsync_get_mbuf(sc, PFSYNC_ACT_TDB_UPD,
+ (void *)&sc->sc_statep_tdb.t)) == NULL) {
+ splx(s);
+ return (ENOMEM);
+ }
+ h = mtod(sc->sc_mbuf_tdb, struct pfsync_header *);
+ } else {
+ h = mtod(sc->sc_mbuf_tdb, struct pfsync_header *);
+ if (h->action != PFSYNC_ACT_TDB_UPD) {
+ /*
+ * XXX will never happen as long as there's
+ * only one "TDB action".
+ */
+ pfsync_tdb_sendout(sc);
+ sc->sc_mbuf_tdb = pfsync_get_mbuf(sc,
+ PFSYNC_ACT_TDB_UPD, (void *)&sc->sc_statep_tdb.t);
+ if (sc->sc_mbuf_tdb == NULL) {
+ splx(s);
+ return (ENOMEM);
+ }
+ h = mtod(sc->sc_mbuf_tdb, struct pfsync_header *);
+ } else if (sc->sc_maxupdates) {
+ /*
+ * If it's an update, look in the packet to see if
+ * we already have an update for the state.
+ */
+ struct pfsync_tdb *u =
+ (void *)((char *)h + PFSYNC_HDRLEN);
+
+ for (i = 0; !pt && i < h->count; i++) {
+ if (tdb->tdb_spi == u->spi &&
+ tdb->tdb_sproto == u->sproto &&
+ !bcmp(&tdb->tdb_dst, &u->dst,
+ SA_LEN(&u->dst.sa))) {
+ pt = u;
+ pt->updates++;
+ }
+ u++;
+ }
+ }
+ }
+
+ if (pt == NULL) {
+ /* not a "duplicate" update */
+ pt = sc->sc_statep_tdb.t++;
+ sc->sc_mbuf_tdb->m_pkthdr.len =
+ sc->sc_mbuf_tdb->m_len += sizeof(struct pfsync_tdb);
+ h->count++;
+ bzero(pt, sizeof(*pt));
+
+ pt->spi = tdb->tdb_spi;
+ memcpy(&pt->dst, &tdb->tdb_dst, sizeof pt->dst);
+ pt->sproto = tdb->tdb_sproto;
+ }
+
+ /*
+ * When a failover happens, the master's rpl is probably above
+ * what we see here (we may be up to a second late), so
+ * increase it a bit for outbound tdbs to manage most such
+ * situations.
+ *
+ * For now, just add an offset that is likely to be larger
+ * than the number of packets we can see in one second. The RFC
+ * just says the next packet must have a higher seq value.
+ *
+ * XXX What is a good algorithm for this? We could use
+ * a rate-determined increase, but to know it, we would have
+ * to extend struct tdb.
+ * XXX pt->rpl can wrap over MAXINT, but if so the real tdb
+ * will soon be replaced anyway. For now, just don't handle
+ * this edge case.
+ */
+#define RPL_INCR 16384
+ pt->rpl = htonl(tdb->tdb_rpl + (output ? RPL_INCR : 0));
+ pt->cur_bytes = htobe64(tdb->tdb_cur_bytes);
+
+ if (h->count == sc->sc_maxcount ||
+ (sc->sc_maxupdates && (pt->updates >= sc->sc_maxupdates)))
+ ret = pfsync_tdb_sendout(sc);
+
+ splx(s);
+ return (ret);
+}
+#endif /* PFSYNC_TDB */
+
+#ifdef __FreeBSD__
+void
+pfsync_ifdetach(void *arg, struct ifnet *ifp)
+{
+ struct pfsync_softc *sc = (struct pfsync_softc *)arg;
+ struct ip_moptions *imo;
+
+ if (sc == NULL || sc->sc_sync_ifp != ifp)
+ return; /* not for us; unlocked read */
+
+ PF_LOCK();
+
+ /* Deal with a member interface going away from under us. */
+ sc->sc_sync_ifp = NULL;
+ if (sc->sc_mbuf_net != NULL) {
+ m_freem(sc->sc_mbuf_net);
+ sc->sc_mbuf_net = NULL;
+ sc->sc_statep_net.s = NULL;
+ }
+ imo = &sc->sc_imo;
+ if (imo->imo_num_memberships > 0) {
+ KASSERT(imo->imo_num_memberships == 1,
+ ("%s: imo_num_memberships != 1", __func__));
+ /*
+ * Our event handler is always called after protocol
+ * domains have been detached from the underlying ifnet.
+ * Do not call in_delmulti(); we held a single reference
+ * which the protocol domain has purged in in_purgemaddrs().
+ */
+ PF_UNLOCK();
+ imo->imo_membership[--imo->imo_num_memberships] = NULL;
+ PF_LOCK();
+ imo->imo_multicast_ifp = NULL;
+ }
+
+ PF_UNLOCK();
+}
+
+void
+pfsync_senddef(void *arg, __unused int pending)
+{
+ struct pfsync_softc *sc = (struct pfsync_softc *)arg;
+ struct mbuf *m;
+
+ for(;;) {
+ IF_DEQUEUE(&sc->sc_ifq, m);
+ if (m == NULL)
+ break;
+ /* Deal with a member interface going away from under us. */
+ if (sc->sc_sync_ifp == NULL) {
+ pfsyncstats.pfsyncs_oerrors++;
+ m_freem(m);
+ continue;
+ }
+ if (ip_output(m, NULL, NULL, IP_RAWOUTPUT, &sc->sc_imo, NULL))
+ pfsyncstats.pfsyncs_oerrors++;
+ }
+}
+
+static int
+pfsync_modevent(module_t mod, int type, void *data)
+{
+ int error = 0;
+
+ switch (type) {
+ case MOD_LOAD:
+ pfsyncattach(0);
+ break;
+ case MOD_UNLOAD:
+ if_clone_detach(&pfsync_cloner);
+ break;
+ default:
+ error = EINVAL;
+ break;
+ }
+
+ return error;
+}
+
+static moduledata_t pfsync_mod = {
+ "pfsync",
+ pfsync_modevent,
+ 0
+};
+
+#define PFSYNC_MODVER 1
+
+DECLARE_MODULE(pfsync, pfsync_mod, SI_SUB_PROTO_IFATTACHDOMAIN, SI_ORDER_ANY);
+MODULE_VERSION(pfsync, PFSYNC_MODVER);
+MODULE_DEPEND(pflog, pf, PF_MODVER, PF_MODVER, PF_MODVER);
+#endif /* __FreeBSD__ */
diff --git a/contrib/pf/rtems/freebsd/net/if_pfsync.h b/contrib/pf/rtems/freebsd/net/if_pfsync.h
new file mode 100644
index 00000000..e3e6caf9
--- /dev/null
+++ b/contrib/pf/rtems/freebsd/net/if_pfsync.h
@@ -0,0 +1,375 @@
+/* $FreeBSD$ */
+/* $OpenBSD: if_pfsync.h,v 1.30 2006/10/31 14:49:01 henning Exp $ */
+
+/*
+ * Copyright (c) 2001 Michael Shalayeff
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR OR HIS RELATIVES BE LIABLE FOR ANY DIRECT,
+ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF MIND, USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _NET_IF_PFSYNC_HH_
+#define _NET_IF_PFSYNC_HH_
+
+
+#define PFSYNC_ID_LEN sizeof(u_int64_t)
+
+struct pfsync_state_scrub {
+ u_int16_t pfss_flags;
+ u_int8_t pfss_ttl; /* stashed TTL */
+#define PFSYNC_SCRUB_FLAG_VALID 0x01
+ u_int8_t scrub_flag;
+ u_int32_t pfss_ts_mod; /* timestamp modulation */
+} __packed;
+
+struct pfsync_state_host {
+ struct pf_addr addr;
+ u_int16_t port;
+ u_int16_t pad[3];
+} __packed;
+
+struct pfsync_state_peer {
+ struct pfsync_state_scrub scrub; /* state is scrubbed */
+ u_int32_t seqlo; /* Max sequence number sent */
+ u_int32_t seqhi; /* Max the other end ACKd + win */
+ u_int32_t seqdiff; /* Sequence number modulator */
+ u_int16_t max_win; /* largest window (pre scaling) */
+ u_int16_t mss; /* Maximum segment size option */
+ u_int8_t state; /* active state level */
+ u_int8_t wscale; /* window scaling factor */
+ u_int8_t pad[6];
+} __packed;
+
+struct pfsync_state {
+ u_int32_t id[2];
+ char ifname[IFNAMSIZ];
+ struct pfsync_state_host lan;
+ struct pfsync_state_host gwy;
+ struct pfsync_state_host ext;
+ struct pfsync_state_peer src;
+ struct pfsync_state_peer dst;
+ struct pf_addr rt_addr;
+ u_int32_t rule;
+ u_int32_t anchor;
+ u_int32_t nat_rule;
+ u_int32_t creation;
+ u_int32_t expire;
+ u_int32_t packets[2][2];
+ u_int32_t bytes[2][2];
+ u_int32_t creatorid;
+ sa_family_t af;
+ u_int8_t proto;
+ u_int8_t direction;
+ u_int8_t log;
+ u_int8_t state_flags;
+ u_int8_t timeout;
+ u_int8_t sync_flags;
+ u_int8_t updates;
+} __packed;
+
+#define PFSYNC_FLAG_COMPRESS 0x01
+#define PFSYNC_FLAG_STALE 0x02
+
+#ifdef PFSYNC_TDB
+struct pfsync_tdb {
+ u_int32_t spi;
+ union sockaddr_union dst;
+ u_int32_t rpl;
+ u_int64_t cur_bytes;
+ u_int8_t sproto;
+ u_int8_t updates;
+ u_int8_t pad[2];
+} __packed;
+#endif
+
+struct pfsync_state_upd {
+ u_int32_t id[2];
+ struct pfsync_state_peer src;
+ struct pfsync_state_peer dst;
+ u_int32_t creatorid;
+ u_int32_t expire;
+ u_int8_t timeout;
+ u_int8_t updates;
+ u_int8_t pad[6];
+} __packed;
+
+struct pfsync_state_del {
+ u_int32_t id[2];
+ u_int32_t creatorid;
+ struct {
+ u_int8_t state;
+ } src;
+ struct {
+ u_int8_t state;
+ } dst;
+ u_int8_t pad[2];
+} __packed;
+
+struct pfsync_state_upd_req {
+ u_int32_t id[2];
+ u_int32_t creatorid;
+ u_int32_t pad;
+} __packed;
+
+struct pfsync_state_clr {
+ char ifname[IFNAMSIZ];
+ u_int32_t creatorid;
+ u_int32_t pad;
+} __packed;
+
+struct pfsync_state_bus {
+ u_int32_t creatorid;
+ u_int32_t endtime;
+ u_int8_t status;
+#define PFSYNC_BUS_START 1
+#define PFSYNC_BUS_END 2
+ u_int8_t pad[7];
+} __packed;
+
+#ifdef _KERNEL
+
+union sc_statep {
+ struct pfsync_state *s;
+ struct pfsync_state_upd *u;
+ struct pfsync_state_del *d;
+ struct pfsync_state_clr *c;
+ struct pfsync_state_bus *b;
+ struct pfsync_state_upd_req *r;
+};
+
+#ifdef PFSYNC_TDB
+union sc_tdb_statep {
+ struct pfsync_tdb *t;
+};
+#endif
+
+extern int pfsync_sync_ok;
+
+struct pfsync_softc {
+#ifdef __FreeBSD__
+ struct ifnet *sc_ifp;
+#else
+ struct ifnet sc_if;
+#endif
+ struct ifnet *sc_sync_ifp;
+
+ struct ip_moptions sc_imo;
+#ifdef __FreeBSD__
+ struct callout sc_tmo;
+#ifdef PFSYNC_TDB
+ struct callout sc_tdb_tmo;
+#endif
+ struct callout sc_bulk_tmo;
+ struct callout sc_bulkfail_tmo;
+#else
+ struct timeout sc_tmo;
+ struct timeout sc_tdb_tmo;
+ struct timeout sc_bulk_tmo;
+ struct timeout sc_bulkfail_tmo;
+#endif
+ struct in_addr sc_sync_peer;
+ struct in_addr sc_sendaddr;
+ struct mbuf *sc_mbuf; /* current cumulative mbuf */
+ struct mbuf *sc_mbuf_net; /* current cumulative mbuf */
+#ifdef PFSYNC_TDB
+ struct mbuf *sc_mbuf_tdb; /* dito for TDB updates */
+#endif
+#ifdef __FreeBSD__
+ struct ifqueue sc_ifq;
+ struct task sc_send_task;
+#endif
+ union sc_statep sc_statep;
+ union sc_statep sc_statep_net;
+#ifdef PFSYNC_TDB
+ union sc_tdb_statep sc_statep_tdb;
+#endif
+ u_int32_t sc_ureq_received;
+ u_int32_t sc_ureq_sent;
+ struct pf_state *sc_bulk_send_next;
+ struct pf_state *sc_bulk_terminator;
+ int sc_bulk_tries;
+ int sc_maxcount; /* number of states in mtu */
+ int sc_maxupdates; /* number of updates/state */
+#ifdef __FreeBSD__
+ eventhandler_tag sc_detachtag;
+#endif
+};
+
+extern struct pfsync_softc *pfsyncif;
+#endif
+
+
+struct pfsync_header {
+ u_int8_t version;
+#define PFSYNC_VERSION 3
+ u_int8_t af;
+ u_int8_t action;
+#define PFSYNC_ACT_CLR 0 /* clear all states */
+#define PFSYNC_ACT_INS 1 /* insert state */
+#define PFSYNC_ACT_UPD 2 /* update state */
+#define PFSYNC_ACT_DEL 3 /* delete state */
+#define PFSYNC_ACT_UPD_C 4 /* "compressed" state update */
+#define PFSYNC_ACT_DEL_C 5 /* "compressed" state delete */
+#define PFSYNC_ACT_INS_F 6 /* insert fragment */
+#define PFSYNC_ACT_DEL_F 7 /* delete fragments */
+#define PFSYNC_ACT_UREQ 8 /* request "uncompressed" state */
+#define PFSYNC_ACT_BUS 9 /* Bulk Update Status */
+#define PFSYNC_ACT_TDB_UPD 10 /* TDB replay counter update */
+#define PFSYNC_ACT_MAX 11
+ u_int8_t count;
+ u_int8_t pf_chksum[PF_MD5_DIGEST_LENGTH];
+} __packed;
+
+#define PFSYNC_BULKPACKETS 1 /* # of packets per timeout */
+#define PFSYNC_MAX_BULKTRIES 12
+#define PFSYNC_HDRLEN sizeof(struct pfsync_header)
+#define PFSYNC_ACTIONS \
+ "CLR ST", "INS ST", "UPD ST", "DEL ST", \
+ "UPD ST COMP", "DEL ST COMP", "INS FR", "DEL FR", \
+ "UPD REQ", "BLK UPD STAT", "TDB UPD"
+
+#define PFSYNC_DFLTTL 255
+
+struct pfsyncstats {
+ u_int64_t pfsyncs_ipackets; /* total input packets, IPv4 */
+ u_int64_t pfsyncs_ipackets6; /* total input packets, IPv6 */
+ u_int64_t pfsyncs_badif; /* not the right interface */
+ u_int64_t pfsyncs_badttl; /* TTL is not PFSYNC_DFLTTL */
+ u_int64_t pfsyncs_hdrops; /* packets shorter than hdr */
+ u_int64_t pfsyncs_badver; /* bad (incl unsupp) version */
+ u_int64_t pfsyncs_badact; /* bad action */
+ u_int64_t pfsyncs_badlen; /* data length does not match */
+ u_int64_t pfsyncs_badauth; /* bad authentication */
+ u_int64_t pfsyncs_stale; /* stale state */
+ u_int64_t pfsyncs_badval; /* bad values */
+ u_int64_t pfsyncs_badstate; /* insert/lookup failed */
+
+ u_int64_t pfsyncs_opackets; /* total output packets, IPv4 */
+ u_int64_t pfsyncs_opackets6; /* total output packets, IPv6 */
+ u_int64_t pfsyncs_onomem; /* no memory for an mbuf */
+ u_int64_t pfsyncs_oerrors; /* ip output error */
+};
+
+/*
+ * Configuration structure for SIOCSETPFSYNC SIOCGETPFSYNC
+ */
+struct pfsyncreq {
+ char pfsyncr_syncdev[IFNAMSIZ];
+ struct in_addr pfsyncr_syncpeer;
+ int pfsyncr_maxupdates;
+ int pfsyncr_authlevel;
+};
+
+#ifdef __FreeBSD__
+#define SIOCSETPFSYNC _IOW('i', 247, struct ifreq)
+#define SIOCGETPFSYNC _IOWR('i', 248, struct ifreq)
+#endif
+
+#define pf_state_peer_hton(s,d) do { \
+ (d)->seqlo = htonl((s)->seqlo); \
+ (d)->seqhi = htonl((s)->seqhi); \
+ (d)->seqdiff = htonl((s)->seqdiff); \
+ (d)->max_win = htons((s)->max_win); \
+ (d)->mss = htons((s)->mss); \
+ (d)->state = (s)->state; \
+ (d)->wscale = (s)->wscale; \
+ if ((s)->scrub) { \
+ (d)->scrub.pfss_flags = \
+ htons((s)->scrub->pfss_flags & PFSS_TIMESTAMP); \
+ (d)->scrub.pfss_ttl = (s)->scrub->pfss_ttl; \
+ (d)->scrub.pfss_ts_mod = htonl((s)->scrub->pfss_ts_mod);\
+ (d)->scrub.scrub_flag = PFSYNC_SCRUB_FLAG_VALID; \
+ } \
+} while (0)
+
+#define pf_state_peer_ntoh(s,d) do { \
+ (d)->seqlo = ntohl((s)->seqlo); \
+ (d)->seqhi = ntohl((s)->seqhi); \
+ (d)->seqdiff = ntohl((s)->seqdiff); \
+ (d)->max_win = ntohs((s)->max_win); \
+ (d)->mss = ntohs((s)->mss); \
+ (d)->state = (s)->state; \
+ (d)->wscale = (s)->wscale; \
+ if ((s)->scrub.scrub_flag == PFSYNC_SCRUB_FLAG_VALID && \
+ (d)->scrub != NULL) { \
+ (d)->scrub->pfss_flags = \
+ ntohs((s)->scrub.pfss_flags) & PFSS_TIMESTAMP; \
+ (d)->scrub->pfss_ttl = (s)->scrub.pfss_ttl; \
+ (d)->scrub->pfss_ts_mod = ntohl((s)->scrub.pfss_ts_mod);\
+ } \
+} while (0)
+
+#define pf_state_host_hton(s,d) do { \
+ bcopy(&(s)->addr, &(d)->addr, sizeof((d)->addr)); \
+ (d)->port = (s)->port; \
+} while (0)
+
+#define pf_state_host_ntoh(s,d) do { \
+ bcopy(&(s)->addr, &(d)->addr, sizeof((d)->addr)); \
+ (d)->port = (s)->port; \
+} while (0)
+
+#define pf_state_counter_hton(s,d) do { \
+ d[0] = htonl((s>>32)&0xffffffff); \
+ d[1] = htonl(s&0xffffffff); \
+} while (0)
+
+#define pf_state_counter_ntoh(s,d) do { \
+ d = ntohl(s[0]); \
+ d = d<<32; \
+ d += ntohl(s[1]); \
+} while (0)
+
+#ifdef _KERNEL
+#ifdef __FreeBSD__
+void pfsync_input(struct mbuf *, __unused int);
+#else
+void pfsync_input(struct mbuf *, ...);
+#endif
+int pfsync_clear_states(u_int32_t, char *);
+int pfsync_pack_state(u_int8_t, struct pf_state *, int);
+#define pfsync_insert_state(st) do { \
+ if ((st->rule.ptr->rule_flag & PFRULE_NOSYNC) || \
+ (st->proto == IPPROTO_PFSYNC)) \
+ st->sync_flags |= PFSTATE_NOSYNC; \
+ else if (!st->sync_flags) \
+ pfsync_pack_state(PFSYNC_ACT_INS, (st), \
+ PFSYNC_FLAG_COMPRESS); \
+ st->sync_flags &= ~PFSTATE_FROMSYNC; \
+} while (0)
+#define pfsync_update_state(st) do { \
+ if (!st->sync_flags) \
+ pfsync_pack_state(PFSYNC_ACT_UPD, (st), \
+ PFSYNC_FLAG_COMPRESS); \
+ st->sync_flags &= ~PFSTATE_FROMSYNC; \
+} while (0)
+#define pfsync_delete_state(st) do { \
+ if (!st->sync_flags) \
+ pfsync_pack_state(PFSYNC_ACT_DEL, (st), \
+ PFSYNC_FLAG_COMPRESS); \
+} while (0)
+#ifdef PFSYNC_TDB
+int pfsync_update_tdb(struct tdb *, int);
+#endif
+#endif
+
+#endif /* _NET_IF_PFSYNC_HH_ */
diff --git a/contrib/pf/rtems/freebsd/net/pf.c b/contrib/pf/rtems/freebsd/net/pf.c
new file mode 100644
index 00000000..ea1a642a
--- /dev/null
+++ b/contrib/pf/rtems/freebsd/net/pf.c
@@ -0,0 +1,7771 @@
+#include <rtems/freebsd/machine/rtems-bsd-config.h>
+
+/* $OpenBSD: pf.c,v 1.527 2007/02/22 15:23:23 pyr Exp $ */
+/* add: $OpenBSD: pf.c,v 1.559 2007/09/18 18:45:59 markus Exp $ */
+
+/*
+ * Copyright (c) 2001 Daniel Hartmeier
+ * Copyright (c) 2002,2003 Henning Brauer
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * - Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+ * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+ * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+ * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ * Effort sponsored in part by the Defense Advanced Research Projects
+ * Agency (DARPA) and Air Force Research Laboratory, Air Force
+ * Materiel Command, USAF, under agreement number F30602-01-2-0537.
+ *
+ */
+
+#ifdef __FreeBSD__
+#include <rtems/freebsd/local/opt_inet.h>
+#include <rtems/freebsd/local/opt_inet6.h>
+
+#include <rtems/freebsd/sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+#endif
+
+#ifdef __FreeBSD__
+#include <rtems/freebsd/local/opt_bpf.h>
+#include <rtems/freebsd/local/opt_pf.h>
+
+#ifdef DEV_BPF
+#define NBPFILTER DEV_BPF
+#else
+#define NBPFILTER 0
+#endif
+
+#ifdef DEV_PFLOG
+#define NPFLOG DEV_PFLOG
+#else
+#define NPFLOG 0
+#endif
+
+#ifdef DEV_PFSYNC
+#define NPFSYNC DEV_PFSYNC
+#else
+#define NPFSYNC 0
+#endif
+
+#else
+#include <rtems/freebsd/local/bpfilter.h>
+#include <rtems/freebsd/local/pflog.h>
+#include <rtems/freebsd/local/pfsync.h>
+#endif
+
+#include <rtems/freebsd/sys/param.h>
+#include <rtems/freebsd/sys/systm.h>
+#include <rtems/freebsd/sys/mbuf.h>
+#include <rtems/freebsd/sys/filio.h>
+#include <rtems/freebsd/sys/socket.h>
+#include <rtems/freebsd/sys/socketvar.h>
+#include <rtems/freebsd/sys/kernel.h>
+#include <rtems/freebsd/sys/time.h>
+#ifdef __FreeBSD__
+#include <rtems/freebsd/sys/sysctl.h>
+#include <rtems/freebsd/sys/endian.h>
+#else
+#include <rtems/freebsd/sys/pool.h>
+#endif
+#include <rtems/freebsd/sys/proc.h>
+#ifdef __FreeBSD__
+#include <rtems/freebsd/sys/kthread.h>
+#include <rtems/freebsd/sys/lock.h>
+#include <rtems/freebsd/sys/sx.h>
+#else
+#include <rtems/freebsd/sys/rwlock.h>
+#endif
+
+#include <rtems/freebsd/net/if.h>
+#include <rtems/freebsd/net/if_types.h>
+#include <rtems/freebsd/net/bpf.h>
+#include <rtems/freebsd/net/route.h>
+#ifndef __FreeBSD__
+#include <rtems/freebsd/net/radix_mpath.h>
+#endif
+
+#include <rtems/freebsd/netinet/in.h>
+#include <rtems/freebsd/netinet/in_var.h>
+#include <rtems/freebsd/netinet/in_systm.h>
+#include <rtems/freebsd/netinet/ip.h>
+#include <rtems/freebsd/netinet/ip_var.h>
+#include <rtems/freebsd/netinet/tcp.h>
+#include <rtems/freebsd/netinet/tcp_seq.h>
+#include <rtems/freebsd/netinet/udp.h>
+#include <rtems/freebsd/netinet/ip_icmp.h>
+#include <rtems/freebsd/netinet/in_pcb.h>
+#include <rtems/freebsd/netinet/tcp_timer.h>
+#include <rtems/freebsd/netinet/tcp_var.h>
+#include <rtems/freebsd/netinet/udp_var.h>
+#include <rtems/freebsd/netinet/icmp_var.h>
+#include <rtems/freebsd/netinet/if_ether.h>
+
+#ifndef __FreeBSD__
+#include <rtems/freebsd/dev/rndvar.h>
+#endif
+#include <rtems/freebsd/net/pfvar.h>
+#include <rtems/freebsd/net/if_pflog.h>
+
+#if NPFSYNC > 0
+#include <rtems/freebsd/net/if_pfsync.h>
+#endif /* NPFSYNC > 0 */
+
+#ifdef INET6
+#include <rtems/freebsd/netinet/ip6.h>
+#include <rtems/freebsd/netinet/in_pcb.h>
+#include <rtems/freebsd/netinet/icmp6.h>
+#include <rtems/freebsd/netinet6/nd6.h>
+#ifdef __FreeBSD__
+#include <rtems/freebsd/netinet6/ip6_var.h>
+#include <rtems/freebsd/netinet6/in6_pcb.h>
+#endif
+#endif /* INET6 */
+
+#ifdef __FreeBSD__
+#include <rtems/freebsd/machine/in_cksum.h>
+#include <rtems/freebsd/sys/limits.h>
+#include <rtems/freebsd/sys/ucred.h>
+#include <rtems/freebsd/security/mac/mac_framework.h>
+
+extern int ip_optcopy(struct ip *, struct ip *);
+extern int debug_pfugidhack;
+#endif
+
+#define DPFPRINTF(n, x) if (pf_status.debug >= (n)) printf x
+
+/*
+ * Global variables
+ */
+
+struct pf_altqqueue pf_altqs[2];
+struct pf_palist pf_pabuf;
+struct pf_altqqueue *pf_altqs_active;
+struct pf_altqqueue *pf_altqs_inactive;
+struct pf_status pf_status;
+
+u_int32_t ticket_altqs_active;
+u_int32_t ticket_altqs_inactive;
+int altqs_inactive_open;
+u_int32_t ticket_pabuf;
+
+struct pf_anchor_stackframe {
+ struct pf_ruleset *rs;
+ struct pf_rule *r;
+ struct pf_anchor_node *parent;
+ struct pf_anchor *child;
+} pf_anchor_stack[64];
+
+#ifdef __FreeBSD__
+uma_zone_t pf_src_tree_pl, pf_rule_pl;
+uma_zone_t pf_state_pl, pf_altq_pl, pf_pooladdr_pl;
+#else
+struct pool pf_src_tree_pl, pf_rule_pl;
+struct pool pf_state_pl, pf_altq_pl, pf_pooladdr_pl;
+#endif
+
+void pf_print_host(struct pf_addr *, u_int16_t, u_int8_t);
+
+void pf_init_threshold(struct pf_threshold *, u_int32_t,
+ u_int32_t);
+void pf_add_threshold(struct pf_threshold *);
+int pf_check_threshold(struct pf_threshold *);
+
+void pf_change_ap(struct pf_addr *, u_int16_t *,
+ u_int16_t *, u_int16_t *, struct pf_addr *,
+ u_int16_t, u_int8_t, sa_family_t);
+int pf_modulate_sack(struct mbuf *, int, struct pf_pdesc *,
+ struct tcphdr *, struct pf_state_peer *);
+#ifdef INET6
+void pf_change_a6(struct pf_addr *, u_int16_t *,
+ struct pf_addr *, u_int8_t);
+#endif /* INET6 */
+void pf_change_icmp(struct pf_addr *, u_int16_t *,
+ struct pf_addr *, struct pf_addr *, u_int16_t,
+ u_int16_t *, u_int16_t *, u_int16_t *,
+ u_int16_t *, u_int8_t, sa_family_t);
+#ifdef __FreeBSD__
+void pf_send_tcp(struct mbuf *,
+ const struct pf_rule *, sa_family_t,
+#else
+void pf_send_tcp(const struct pf_rule *, sa_family_t,
+#endif
+ const struct pf_addr *, const struct pf_addr *,
+ u_int16_t, u_int16_t, u_int32_t, u_int32_t,
+ u_int8_t, u_int16_t, u_int16_t, u_int8_t, int,
+ u_int16_t, struct ether_header *, struct ifnet *);
+void pf_send_icmp(struct mbuf *, u_int8_t, u_int8_t,
+ sa_family_t, struct pf_rule *);
+struct pf_rule *pf_match_translation(struct pf_pdesc *, struct mbuf *,
+ int, int, struct pfi_kif *,
+ struct pf_addr *, u_int16_t, struct pf_addr *,
+ u_int16_t, int);
+struct pf_rule *pf_get_translation(struct pf_pdesc *, struct mbuf *,
+ int, int, struct pfi_kif *, struct pf_src_node **,
+ struct pf_addr *, u_int16_t,
+ struct pf_addr *, u_int16_t,
+ struct pf_addr *, u_int16_t *);
+int pf_test_tcp(struct pf_rule **, struct pf_state **,
+ int, struct pfi_kif *, struct mbuf *, int,
+ void *, struct pf_pdesc *, struct pf_rule **,
+#ifdef __FreeBSD__
+ struct pf_ruleset **, struct ifqueue *,
+ struct inpcb *);
+#else
+ struct pf_ruleset **, struct ifqueue *);
+#endif
+int pf_test_udp(struct pf_rule **, struct pf_state **,
+ int, struct pfi_kif *, struct mbuf *, int,
+ void *, struct pf_pdesc *, struct pf_rule **,
+#ifdef __FreeBSD__
+ struct pf_ruleset **, struct ifqueue *,
+ struct inpcb *);
+#else
+ struct pf_ruleset **, struct ifqueue *);
+#endif
+int pf_test_icmp(struct pf_rule **, struct pf_state **,
+ int, struct pfi_kif *, struct mbuf *, int,
+ void *, struct pf_pdesc *, struct pf_rule **,
+ struct pf_ruleset **, struct ifqueue *);
+int pf_test_other(struct pf_rule **, struct pf_state **,
+ int, struct pfi_kif *, struct mbuf *, int, void *,
+ struct pf_pdesc *, struct pf_rule **,
+ struct pf_ruleset **, struct ifqueue *);
+int pf_test_fragment(struct pf_rule **, int,
+ struct pfi_kif *, struct mbuf *, void *,
+ struct pf_pdesc *, struct pf_rule **,
+ struct pf_ruleset **);
+int pf_tcp_track_full(struct pf_state_peer *,
+ struct pf_state_peer *, struct pf_state **,
+ struct pfi_kif *, struct mbuf *, int,
+ struct pf_pdesc *, u_short *, int *);
+int pf_tcp_track_sloppy(struct pf_state_peer *,
+ struct pf_state_peer *, struct pf_state **,
+ struct pf_pdesc *, u_short *);
+int pf_test_state_tcp(struct pf_state **, int,
+ struct pfi_kif *, struct mbuf *, int,
+ void *, struct pf_pdesc *, u_short *);
+int pf_test_state_udp(struct pf_state **, int,
+ struct pfi_kif *, struct mbuf *, int,
+ void *, struct pf_pdesc *);
+int pf_test_state_icmp(struct pf_state **, int,
+ struct pfi_kif *, struct mbuf *, int,
+ void *, struct pf_pdesc *, u_short *);
+int pf_test_state_other(struct pf_state **, int,
+ struct pfi_kif *, struct pf_pdesc *);
+int pf_match_tag(struct mbuf *, struct pf_rule *,
+ struct pf_mtag *, int *);
+int pf_step_out_of_anchor(int *, struct pf_ruleset **,
+ int, struct pf_rule **, struct pf_rule **,
+ int *);
+void pf_hash(struct pf_addr *, struct pf_addr *,
+ struct pf_poolhashkey *, sa_family_t);
+int pf_map_addr(u_int8_t, struct pf_rule *,
+ struct pf_addr *, struct pf_addr *,
+ struct pf_addr *, struct pf_src_node **);
+int pf_get_sport(sa_family_t, u_int8_t, struct pf_rule *,
+ struct pf_addr *, struct pf_addr *, u_int16_t,
+ struct pf_addr *, u_int16_t*, u_int16_t, u_int16_t,
+ struct pf_src_node **);
+void pf_route(struct mbuf **, struct pf_rule *, int,
+ struct ifnet *, struct pf_state *,
+ struct pf_pdesc *);
+void pf_route6(struct mbuf **, struct pf_rule *, int,
+ struct ifnet *, struct pf_state *,
+ struct pf_pdesc *);
+#ifdef __FreeBSD__
+/* XXX: import */
+#else
+int pf_socket_lookup(int, struct pf_pdesc *);
+#endif
+u_int8_t pf_get_wscale(struct mbuf *, int, u_int16_t,
+ sa_family_t);
+u_int16_t pf_get_mss(struct mbuf *, int, u_int16_t,
+ sa_family_t);
+u_int16_t pf_calc_mss(struct pf_addr *, sa_family_t,
+ u_int16_t);
+void pf_set_rt_ifp(struct pf_state *,
+ struct pf_addr *);
+int pf_check_proto_cksum(struct mbuf *, int, int,
+ u_int8_t, sa_family_t);
+int pf_addr_wrap_neq(struct pf_addr_wrap *,
+ struct pf_addr_wrap *);
+struct pf_state *pf_find_state_recurse(struct pfi_kif *,
+ struct pf_state_cmp *, u_int8_t);
+int pf_src_connlimit(struct pf_state **);
+int pf_check_congestion(struct ifqueue *);
+
+#ifdef __FreeBSD__
+int in4_cksum(struct mbuf *m, u_int8_t nxt, int off, int len);
+
+extern int pf_end_threads;
+
+struct pf_pool_limit pf_pool_limits[PF_LIMIT_MAX];
+#else
+extern struct pool pfr_ktable_pl;
+extern struct pool pfr_kentry_pl;
+
+struct pf_pool_limit pf_pool_limits[PF_LIMIT_MAX] = {
+ { &pf_state_pl, PFSTATE_HIWAT },
+ { &pf_src_tree_pl, PFSNODE_HIWAT },
+ { &pf_frent_pl, PFFRAG_FRENT_HIWAT },
+ { &pfr_ktable_pl, PFR_KTABLE_HIWAT },
+ { &pfr_kentry_pl, PFR_KENTRY_HIWAT }
+};
+#endif
+
+#define STATE_LOOKUP() \
+ do { \
+ if (direction == PF_IN) \
+ *state = pf_find_state_recurse( \
+ kif, &key, PF_EXT_GWY); \
+ else \
+ *state = pf_find_state_recurse( \
+ kif, &key, PF_LAN_EXT); \
+ if (*state == NULL || (*state)->timeout == PFTM_PURGE) \
+ return (PF_DROP); \
+ if (direction == PF_OUT && \
+ (((*state)->rule.ptr->rt == PF_ROUTETO && \
+ (*state)->rule.ptr->direction == PF_OUT) || \
+ ((*state)->rule.ptr->rt == PF_REPLYTO && \
+ (*state)->rule.ptr->direction == PF_IN)) && \
+ (*state)->rt_kif != NULL && \
+ (*state)->rt_kif != kif) \
+ return (PF_PASS); \
+ } while (0)
+
+#define STATE_TRANSLATE(s) \
+ (s)->lan.addr.addr32[0] != (s)->gwy.addr.addr32[0] || \
+ ((s)->af == AF_INET6 && \
+ ((s)->lan.addr.addr32[1] != (s)->gwy.addr.addr32[1] || \
+ (s)->lan.addr.addr32[2] != (s)->gwy.addr.addr32[2] || \
+ (s)->lan.addr.addr32[3] != (s)->gwy.addr.addr32[3])) || \
+ (s)->lan.port != (s)->gwy.port
+
+#define BOUND_IFACE(r, k) \
+ ((r)->rule_flag & PFRULE_IFBOUND) ? (k) : pfi_all
+
+#define STATE_INC_COUNTERS(s) \
+ do { \
+ s->rule.ptr->states++; \
+ if (s->anchor.ptr != NULL) \
+ s->anchor.ptr->states++; \
+ if (s->nat_rule.ptr != NULL) \
+ s->nat_rule.ptr->states++; \
+ } while (0)
+
+#define STATE_DEC_COUNTERS(s) \
+ do { \
+ if (s->nat_rule.ptr != NULL) \
+ s->nat_rule.ptr->states--; \
+ if (s->anchor.ptr != NULL) \
+ s->anchor.ptr->states--; \
+ s->rule.ptr->states--; \
+ } while (0)
+
+struct pf_src_tree tree_src_tracking;
+
+struct pf_state_tree_id tree_id;
+struct pf_state_queue state_list;
+
+#ifdef __FreeBSD__
+static int pf_src_compare(struct pf_src_node *, struct pf_src_node *);
+static int pf_state_compare_lan_ext(struct pf_state *, struct pf_state *);
+static int pf_state_compare_ext_gwy(struct pf_state *, struct pf_state *);
+static int pf_state_compare_id(struct pf_state *, struct pf_state *);
+#endif
+
+RB_GENERATE(pf_src_tree, pf_src_node, entry, pf_src_compare);
+RB_GENERATE(pf_state_tree_lan_ext, pf_state,
+ u.s.entry_lan_ext, pf_state_compare_lan_ext);
+RB_GENERATE(pf_state_tree_ext_gwy, pf_state,
+ u.s.entry_ext_gwy, pf_state_compare_ext_gwy);
+RB_GENERATE(pf_state_tree_id, pf_state,
+ u.s.entry_id, pf_state_compare_id);
+
+#ifdef __FreeBSD__
+static int
+#else
+static __inline int
+#endif
+pf_src_compare(struct pf_src_node *a, struct pf_src_node *b)
+{
+ int diff;
+
+ if (a->rule.ptr > b->rule.ptr)
+ return (1);
+ if (a->rule.ptr < b->rule.ptr)
+ return (-1);
+ if ((diff = a->af - b->af) != 0)
+ return (diff);
+ switch (a->af) {
+#ifdef INET
+ case AF_INET:
+ if (a->addr.addr32[0] > b->addr.addr32[0])
+ return (1);
+ if (a->addr.addr32[0] < b->addr.addr32[0])
+ return (-1);
+ break;
+#endif /* INET */
+#ifdef INET6
+ case AF_INET6:
+ if (a->addr.addr32[3] > b->addr.addr32[3])
+ return (1);
+ if (a->addr.addr32[3] < b->addr.addr32[3])
+ return (-1);
+ if (a->addr.addr32[2] > b->addr.addr32[2])
+ return (1);
+ if (a->addr.addr32[2] < b->addr.addr32[2])
+ return (-1);
+ if (a->addr.addr32[1] > b->addr.addr32[1])
+ return (1);
+ if (a->addr.addr32[1] < b->addr.addr32[1])
+ return (-1);
+ if (a->addr.addr32[0] > b->addr.addr32[0])
+ return (1);
+ if (a->addr.addr32[0] < b->addr.addr32[0])
+ return (-1);
+ break;
+#endif /* INET6 */
+ }
+ return (0);
+}
+
+#ifdef __FreeBSD__
+static int
+#else
+static __inline int
+#endif
+pf_state_compare_lan_ext(struct pf_state *a, struct pf_state *b)
+{
+ int diff;
+
+ if ((diff = a->proto - b->proto) != 0)
+ return (diff);
+ if ((diff = a->af - b->af) != 0)
+ return (diff);
+ switch (a->af) {
+#ifdef INET
+ case AF_INET:
+ if (a->lan.addr.addr32[0] > b->lan.addr.addr32[0])
+ return (1);
+ if (a->lan.addr.addr32[0] < b->lan.addr.addr32[0])
+ return (-1);
+ if (a->ext.addr.addr32[0] > b->ext.addr.addr32[0])
+ return (1);
+ if (a->ext.addr.addr32[0] < b->ext.addr.addr32[0])
+ return (-1);
+ break;
+#endif /* INET */
+#ifdef INET6
+ case AF_INET6:
+ if (a->lan.addr.addr32[3] > b->lan.addr.addr32[3])
+ return (1);
+ if (a->lan.addr.addr32[3] < b->lan.addr.addr32[3])
+ return (-1);
+ if (a->ext.addr.addr32[3] > b->ext.addr.addr32[3])
+ return (1);
+ if (a->ext.addr.addr32[3] < b->ext.addr.addr32[3])
+ return (-1);
+ if (a->lan.addr.addr32[2] > b->lan.addr.addr32[2])
+ return (1);
+ if (a->lan.addr.addr32[2] < b->lan.addr.addr32[2])
+ return (-1);
+ if (a->ext.addr.addr32[2] > b->ext.addr.addr32[2])
+ return (1);
+ if (a->ext.addr.addr32[2] < b->ext.addr.addr32[2])
+ return (-1);
+ if (a->lan.addr.addr32[1] > b->lan.addr.addr32[1])
+ return (1);
+ if (a->lan.addr.addr32[1] < b->lan.addr.addr32[1])
+ return (-1);
+ if (a->ext.addr.addr32[1] > b->ext.addr.addr32[1])
+ return (1);
+ if (a->ext.addr.addr32[1] < b->ext.addr.addr32[1])
+ return (-1);
+ if (a->lan.addr.addr32[0] > b->lan.addr.addr32[0])
+ return (1);
+ if (a->lan.addr.addr32[0] < b->lan.addr.addr32[0])
+ return (-1);
+ if (a->ext.addr.addr32[0] > b->ext.addr.addr32[0])
+ return (1);
+ if (a->ext.addr.addr32[0] < b->ext.addr.addr32[0])
+ return (-1);
+ break;
+#endif /* INET6 */
+ }
+
+ if ((diff = a->lan.port - b->lan.port) != 0)
+ return (diff);
+ if ((diff = a->ext.port - b->ext.port) != 0)
+ return (diff);
+
+ return (0);
+}
+
+#ifdef __FreeBSD__
+static int
+#else
+static __inline int
+#endif
+pf_state_compare_ext_gwy(struct pf_state *a, struct pf_state *b)
+{
+ int diff;
+
+ if ((diff = a->proto - b->proto) != 0)
+ return (diff);
+ if ((diff = a->af - b->af) != 0)
+ return (diff);
+ switch (a->af) {
+#ifdef INET
+ case AF_INET:
+ if (a->ext.addr.addr32[0] > b->ext.addr.addr32[0])
+ return (1);
+ if (a->ext.addr.addr32[0] < b->ext.addr.addr32[0])
+ return (-1);
+ if (a->gwy.addr.addr32[0] > b->gwy.addr.addr32[0])
+ return (1);
+ if (a->gwy.addr.addr32[0] < b->gwy.addr.addr32[0])
+ return (-1);
+ break;
+#endif /* INET */
+#ifdef INET6
+ case AF_INET6:
+ if (a->ext.addr.addr32[3] > b->ext.addr.addr32[3])
+ return (1);
+ if (a->ext.addr.addr32[3] < b->ext.addr.addr32[3])
+ return (-1);
+ if (a->gwy.addr.addr32[3] > b->gwy.addr.addr32[3])
+ return (1);
+ if (a->gwy.addr.addr32[3] < b->gwy.addr.addr32[3])
+ return (-1);
+ if (a->ext.addr.addr32[2] > b->ext.addr.addr32[2])
+ return (1);
+ if (a->ext.addr.addr32[2] < b->ext.addr.addr32[2])
+ return (-1);
+ if (a->gwy.addr.addr32[2] > b->gwy.addr.addr32[2])
+ return (1);
+ if (a->gwy.addr.addr32[2] < b->gwy.addr.addr32[2])
+ return (-1);
+ if (a->ext.addr.addr32[1] > b->ext.addr.addr32[1])
+ return (1);
+ if (a->ext.addr.addr32[1] < b->ext.addr.addr32[1])
+ return (-1);
+ if (a->gwy.addr.addr32[1] > b->gwy.addr.addr32[1])
+ return (1);
+ if (a->gwy.addr.addr32[1] < b->gwy.addr.addr32[1])
+ return (-1);
+ if (a->ext.addr.addr32[0] > b->ext.addr.addr32[0])
+ return (1);
+ if (a->ext.addr.addr32[0] < b->ext.addr.addr32[0])
+ return (-1);
+ if (a->gwy.addr.addr32[0] > b->gwy.addr.addr32[0])
+ return (1);
+ if (a->gwy.addr.addr32[0] < b->gwy.addr.addr32[0])
+ return (-1);
+ break;
+#endif /* INET6 */
+ }
+
+ if ((diff = a->ext.port - b->ext.port) != 0)
+ return (diff);
+ if ((diff = a->gwy.port - b->gwy.port) != 0)
+ return (diff);
+
+ return (0);
+}
+
+#ifdef __FreeBSD__
+static int
+#else
+static __inline int
+#endif
+pf_state_compare_id(struct pf_state *a, struct pf_state *b)
+{
+ if (a->id > b->id)
+ return (1);
+ if (a->id < b->id)
+ return (-1);
+ if (a->creatorid > b->creatorid)
+ return (1);
+ if (a->creatorid < b->creatorid)
+ return (-1);
+
+ return (0);
+}
+
+#ifdef INET6
+void
+pf_addrcpy(struct pf_addr *dst, struct pf_addr *src, sa_family_t af)
+{
+ switch (af) {
+#ifdef INET
+ case AF_INET:
+ dst->addr32[0] = src->addr32[0];
+ break;
+#endif /* INET */
+ case AF_INET6:
+ dst->addr32[0] = src->addr32[0];
+ dst->addr32[1] = src->addr32[1];
+ dst->addr32[2] = src->addr32[2];
+ dst->addr32[3] = src->addr32[3];
+ break;
+ }
+}
+#endif /* INET6 */
+
+struct pf_state *
+pf_find_state_byid(struct pf_state_cmp *key)
+{
+ pf_status.fcounters[FCNT_STATE_SEARCH]++;
+ return (RB_FIND(pf_state_tree_id, &tree_id, (struct pf_state *)key));
+}
+
+struct pf_state *
+pf_find_state_recurse(struct pfi_kif *kif, struct pf_state_cmp *key, u_int8_t tree)
+{
+ struct pf_state *s;
+
+ pf_status.fcounters[FCNT_STATE_SEARCH]++;
+
+ switch (tree) {
+ case PF_LAN_EXT:
+ if ((s = RB_FIND(pf_state_tree_lan_ext, &kif->pfik_lan_ext,
+ (struct pf_state *)key)) != NULL)
+ return (s);
+ if ((s = RB_FIND(pf_state_tree_lan_ext, &pfi_all->pfik_lan_ext,
+ (struct pf_state *)key)) != NULL)
+ return (s);
+ return (NULL);
+ case PF_EXT_GWY:
+ if ((s = RB_FIND(pf_state_tree_ext_gwy, &kif->pfik_ext_gwy,
+ (struct pf_state *)key)) != NULL)
+ return (s);
+ if ((s = RB_FIND(pf_state_tree_ext_gwy, &pfi_all->pfik_ext_gwy,
+ (struct pf_state *)key)) != NULL)
+ return (s);
+ return (NULL);
+ default:
+ panic("pf_find_state_recurse");
+ }
+}
+
+struct pf_state *
+pf_find_state_all(struct pf_state_cmp *key, u_int8_t tree, int *more)
+{
+ struct pf_state *s, *ss = NULL;
+ struct pfi_kif *kif;
+
+ pf_status.fcounters[FCNT_STATE_SEARCH]++;
+
+ switch (tree) {
+ case PF_LAN_EXT:
+ TAILQ_FOREACH(kif, &pfi_statehead, pfik_w_states) {
+ s = RB_FIND(pf_state_tree_lan_ext,
+ &kif->pfik_lan_ext, (struct pf_state *)key);
+ if (s == NULL)
+ continue;
+ if (more == NULL)
+ return (s);
+ ss = s;
+ (*more)++;
+ }
+ return (ss);
+ case PF_EXT_GWY:
+ TAILQ_FOREACH(kif, &pfi_statehead, pfik_w_states) {
+ s = RB_FIND(pf_state_tree_ext_gwy,
+ &kif->pfik_ext_gwy, (struct pf_state *)key);
+ if (s == NULL)
+ continue;
+ if (more == NULL)
+ return (s);
+ ss = s;
+ (*more)++;
+ }
+ return (ss);
+ default:
+ panic("pf_find_state_all");
+ }
+}
+
+void
+pf_init_threshold(struct pf_threshold *threshold,
+ u_int32_t limit, u_int32_t seconds)
+{
+ threshold->limit = limit * PF_THRESHOLD_MULT;
+ threshold->seconds = seconds;
+ threshold->count = 0;
+ threshold->last = time_second;
+}
+
+void
+pf_add_threshold(struct pf_threshold *threshold)
+{
+ u_int32_t t = time_second, diff = t - threshold->last;
+
+ if (diff >= threshold->seconds)
+ threshold->count = 0;
+ else
+ threshold->count -= threshold->count * diff /
+ threshold->seconds;
+ threshold->count += PF_THRESHOLD_MULT;
+ threshold->last = t;
+}
+
+int
+pf_check_threshold(struct pf_threshold *threshold)
+{
+ return (threshold->count > threshold->limit);
+}
+
+int
+pf_src_connlimit(struct pf_state **state)
+{
+ struct pf_state *s;
+ int bad = 0;
+
+ (*state)->src_node->conn++;
+ (*state)->src.tcp_est = 1;
+ pf_add_threshold(&(*state)->src_node->conn_rate);
+
+ if ((*state)->rule.ptr->max_src_conn &&
+ (*state)->rule.ptr->max_src_conn <
+ (*state)->src_node->conn) {
+ pf_status.lcounters[LCNT_SRCCONN]++;
+ bad++;
+ }
+
+ if ((*state)->rule.ptr->max_src_conn_rate.limit &&
+ pf_check_threshold(&(*state)->src_node->conn_rate)) {
+ pf_status.lcounters[LCNT_SRCCONNRATE]++;
+ bad++;
+ }
+
+ if (!bad)
+ return (0);
+
+ if ((*state)->rule.ptr->overload_tbl) {
+ struct pfr_addr p;
+ u_int32_t killed = 0;
+
+ pf_status.lcounters[LCNT_OVERLOAD_TABLE]++;
+ if (pf_status.debug >= PF_DEBUG_MISC) {
+ printf("pf_src_connlimit: blocking address ");
+ pf_print_host(&(*state)->src_node->addr, 0,
+ (*state)->af);
+ }
+
+ bzero(&p, sizeof(p));
+ p.pfra_af = (*state)->af;
+ switch ((*state)->af) {
+#ifdef INET
+ case AF_INET:
+ p.pfra_net = 32;
+ p.pfra_ip4addr = (*state)->src_node->addr.v4;
+ break;
+#endif /* INET */
+#ifdef INET6
+ case AF_INET6:
+ p.pfra_net = 128;
+ p.pfra_ip6addr = (*state)->src_node->addr.v6;
+ break;
+#endif /* INET6 */
+ }
+
+ pfr_insert_kentry((*state)->rule.ptr->overload_tbl,
+ &p, time_second);
+
+ /* kill existing states if that's required. */
+ if ((*state)->rule.ptr->flush) {
+ pf_status.lcounters[LCNT_OVERLOAD_FLUSH]++;
+
+ RB_FOREACH(s, pf_state_tree_id, &tree_id) {
+ /*
+ * Kill states from this source. (Only those
+ * from the same rule if PF_FLUSH_GLOBAL is not
+ * set)
+ */
+ if (s->af == (*state)->af &&
+ (((*state)->direction == PF_OUT &&
+ PF_AEQ(&(*state)->src_node->addr,
+ &s->lan.addr, s->af)) ||
+ ((*state)->direction == PF_IN &&
+ PF_AEQ(&(*state)->src_node->addr,
+ &s->ext.addr, s->af))) &&
+ ((*state)->rule.ptr->flush &
+ PF_FLUSH_GLOBAL ||
+ (*state)->rule.ptr == s->rule.ptr)) {
+ s->timeout = PFTM_PURGE;
+ s->src.state = s->dst.state =
+ TCPS_CLOSED;
+ killed++;
+ }
+ }
+ if (pf_status.debug >= PF_DEBUG_MISC)
+ printf(", %u states killed", killed);
+ }
+ if (pf_status.debug >= PF_DEBUG_MISC)
+ printf("\n");
+ }
+
+ /* kill this state */
+ (*state)->timeout = PFTM_PURGE;
+ (*state)->src.state = (*state)->dst.state = TCPS_CLOSED;
+ return (1);
+}
+
+int
+pf_insert_src_node(struct pf_src_node **sn, struct pf_rule *rule,
+ struct pf_addr *src, sa_family_t af)
+{
+ struct pf_src_node k;
+
+ if (*sn == NULL) {
+ k.af = af;
+ PF_ACPY(&k.addr, src, af);
+ if (rule->rule_flag & PFRULE_RULESRCTRACK ||
+ rule->rpool.opts & PF_POOL_STICKYADDR)
+ k.rule.ptr = rule;
+ else
+ k.rule.ptr = NULL;
+ pf_status.scounters[SCNT_SRC_NODE_SEARCH]++;
+ *sn = RB_FIND(pf_src_tree, &tree_src_tracking, &k);
+ }
+ if (*sn == NULL) {
+ if (!rule->max_src_nodes ||
+ rule->src_nodes < rule->max_src_nodes)
+ (*sn) = pool_get(&pf_src_tree_pl, PR_NOWAIT);
+ else
+ pf_status.lcounters[LCNT_SRCNODES]++;
+ if ((*sn) == NULL)
+ return (-1);
+ bzero(*sn, sizeof(struct pf_src_node));
+
+ pf_init_threshold(&(*sn)->conn_rate,
+ rule->max_src_conn_rate.limit,
+ rule->max_src_conn_rate.seconds);
+
+ (*sn)->af = af;
+ if (rule->rule_flag & PFRULE_RULESRCTRACK ||
+ rule->rpool.opts & PF_POOL_STICKYADDR)
+ (*sn)->rule.ptr = rule;
+ else
+ (*sn)->rule.ptr = NULL;
+ PF_ACPY(&(*sn)->addr, src, af);
+ if (RB_INSERT(pf_src_tree,
+ &tree_src_tracking, *sn) != NULL) {
+ if (pf_status.debug >= PF_DEBUG_MISC) {
+ printf("pf: src_tree insert failed: ");
+ pf_print_host(&(*sn)->addr, 0, af);
+ printf("\n");
+ }
+ pool_put(&pf_src_tree_pl, *sn);
+ return (-1);
+ }
+ (*sn)->creation = time_second;
+ (*sn)->ruletype = rule->action;
+ if ((*sn)->rule.ptr != NULL)
+ (*sn)->rule.ptr->src_nodes++;
+ pf_status.scounters[SCNT_SRC_NODE_INSERT]++;
+ pf_status.src_nodes++;
+ } else {
+ if (rule->max_src_states &&
+ (*sn)->states >= rule->max_src_states) {
+ pf_status.lcounters[LCNT_SRCSTATES]++;
+ return (-1);
+ }
+ }
+ return (0);
+}
+
+int
+pf_insert_state(struct pfi_kif *kif, struct pf_state *state)
+{
+ /* Thou MUST NOT insert multiple duplicate keys */
+ state->u.s.kif = kif;
+ if (RB_INSERT(pf_state_tree_lan_ext, &kif->pfik_lan_ext, state)) {
+ if (pf_status.debug >= PF_DEBUG_MISC) {
+ printf("pf: state insert failed: tree_lan_ext");
+ printf(" lan: ");
+ pf_print_host(&state->lan.addr, state->lan.port,
+ state->af);
+ printf(" gwy: ");
+ pf_print_host(&state->gwy.addr, state->gwy.port,
+ state->af);
+ printf(" ext: ");
+ pf_print_host(&state->ext.addr, state->ext.port,
+ state->af);
+ if (state->sync_flags & PFSTATE_FROMSYNC)
+ printf(" (from sync)");
+ printf("\n");
+ }
+ return (-1);
+ }
+
+ if (RB_INSERT(pf_state_tree_ext_gwy, &kif->pfik_ext_gwy, state)) {
+ if (pf_status.debug >= PF_DEBUG_MISC) {
+ printf("pf: state insert failed: tree_ext_gwy");
+ printf(" lan: ");
+ pf_print_host(&state->lan.addr, state->lan.port,
+ state->af);
+ printf(" gwy: ");
+ pf_print_host(&state->gwy.addr, state->gwy.port,
+ state->af);
+ printf(" ext: ");
+ pf_print_host(&state->ext.addr, state->ext.port,
+ state->af);
+ if (state->sync_flags & PFSTATE_FROMSYNC)
+ printf(" (from sync)");
+ printf("\n");
+ }
+ RB_REMOVE(pf_state_tree_lan_ext, &kif->pfik_lan_ext, state);
+ return (-1);
+ }
+
+ if (state->id == 0 && state->creatorid == 0) {
+ state->id = htobe64(pf_status.stateid++);
+ state->creatorid = pf_status.hostid;
+ }
+ if (RB_INSERT(pf_state_tree_id, &tree_id, state) != NULL) {
+ if (pf_status.debug >= PF_DEBUG_MISC) {
+#ifdef __FreeBSD__
+ printf("pf: state insert failed: "
+ "id: %016llx creatorid: %08x",
+ (long long)be64toh(state->id),
+ ntohl(state->creatorid));
+#else
+ printf("pf: state insert failed: "
+ "id: %016llx creatorid: %08x",
+ betoh64(state->id), ntohl(state->creatorid));
+#endif
+ if (state->sync_flags & PFSTATE_FROMSYNC)
+ printf(" (from sync)");
+ printf("\n");
+ }
+ RB_REMOVE(pf_state_tree_lan_ext, &kif->pfik_lan_ext, state);
+ RB_REMOVE(pf_state_tree_ext_gwy, &kif->pfik_ext_gwy, state);
+ return (-1);
+ }
+ TAILQ_INSERT_TAIL(&state_list, state, u.s.entry_list);
+ pf_status.fcounters[FCNT_STATE_INSERT]++;
+ pf_status.states++;
+ pfi_kif_ref(kif, PFI_KIF_REF_STATE);
+#if NPFSYNC
+ pfsync_insert_state(state);
+#endif
+ return (0);
+}
+
+void
+pf_purge_thread(void *v)
+{
+ int nloops = 0, s;
+#ifdef __FreeBSD__
+ int locked;
+#endif
+
+ for (;;) {
+ tsleep(pf_purge_thread, PWAIT, "pftm", 1 * hz);
+
+#ifdef __FreeBSD__
+ sx_slock(&pf_consistency_lock);
+ PF_LOCK();
+ locked = 0;
+
+ if (pf_end_threads) {
+ PF_UNLOCK();
+ sx_sunlock(&pf_consistency_lock);
+ sx_xlock(&pf_consistency_lock);
+ PF_LOCK();
+ pf_purge_expired_states(pf_status.states, 1);
+ pf_purge_expired_fragments();
+ pf_purge_expired_src_nodes(1);
+ pf_end_threads++;
+
+ sx_xunlock(&pf_consistency_lock);
+ PF_UNLOCK();
+ wakeup(pf_purge_thread);
+ kproc_exit(0);
+ }
+#endif
+ s = splsoftnet();
+
+ /* process a fraction of the state table every second */
+#ifdef __FreeBSD__
+ if(!pf_purge_expired_states(1 + (pf_status.states
+ / pf_default_rule.timeout[PFTM_INTERVAL]), 0)) {
+ PF_UNLOCK();
+ sx_sunlock(&pf_consistency_lock);
+ sx_xlock(&pf_consistency_lock);
+ PF_LOCK();
+ locked = 1;
+
+ pf_purge_expired_states(1 + (pf_status.states
+ / pf_default_rule.timeout[PFTM_INTERVAL]), 1);
+ }
+#else
+ pf_purge_expired_states(1 + (pf_status.states
+ / pf_default_rule.timeout[PFTM_INTERVAL]));
+#endif
+
+ /* purge other expired types every PFTM_INTERVAL seconds */
+ if (++nloops >= pf_default_rule.timeout[PFTM_INTERVAL]) {
+ pf_purge_expired_fragments();
+ if (!pf_purge_expired_src_nodes(locked)) {
+ PF_UNLOCK();
+ sx_sunlock(&pf_consistency_lock);
+ sx_xlock(&pf_consistency_lock);
+ PF_LOCK();
+ locked = 1;
+ pf_purge_expired_src_nodes(1);
+ }
+ nloops = 0;
+ }
+
+ splx(s);
+#ifdef __FreeBSD__
+ PF_UNLOCK();
+ if (locked)
+ sx_xunlock(&pf_consistency_lock);
+ else
+ sx_sunlock(&pf_consistency_lock);
+#endif
+ }
+}
+
+u_int32_t
+pf_state_expires(const struct pf_state *state)
+{
+ u_int32_t timeout;
+ u_int32_t start;
+ u_int32_t end;
+ u_int32_t states;
+
+ /* handle all PFTM_* > PFTM_MAX here */
+ if (state->timeout == PFTM_PURGE)
+ return (time_second);
+ if (state->timeout == PFTM_UNTIL_PACKET)
+ return (0);
+#ifdef __FreeBSD__
+ KASSERT(state->timeout != PFTM_UNLINKED,
+ ("pf_state_expires: timeout == PFTM_UNLINKED"));
+ KASSERT((state->timeout < PFTM_MAX),
+ ("pf_state_expires: timeout > PFTM_MAX"));
+#else
+ KASSERT(state->timeout != PFTM_UNLINKED);
+ KASSERT(state->timeout < PFTM_MAX);
+#endif
+ timeout = state->rule.ptr->timeout[state->timeout];
+ if (!timeout)
+ timeout = pf_default_rule.timeout[state->timeout];
+ start = state->rule.ptr->timeout[PFTM_ADAPTIVE_START];
+ if (start) {
+ end = state->rule.ptr->timeout[PFTM_ADAPTIVE_END];
+ states = state->rule.ptr->states;
+ } else {
+ start = pf_default_rule.timeout[PFTM_ADAPTIVE_START];
+ end = pf_default_rule.timeout[PFTM_ADAPTIVE_END];
+ states = pf_status.states;
+ }
+ if (end && states > start && start < end) {
+ if (states < end)
+ return (state->expire + timeout * (end - states) /
+ (end - start));
+ else
+ return (time_second);
+ }
+ return (state->expire + timeout);
+}
+
+#ifdef __FreeBSD__
+int
+pf_purge_expired_src_nodes(int waslocked)
+#else
+void
+pf_purge_expired_src_nodes(int waslocked)
+#endif
+{
+ struct pf_src_node *cur, *next;
+ int locked = waslocked;
+
+ for (cur = RB_MIN(pf_src_tree, &tree_src_tracking); cur; cur = next) {
+ next = RB_NEXT(pf_src_tree, &tree_src_tracking, cur);
+
+ if (cur->states <= 0 && cur->expire <= time_second) {
+ if (! locked) {
+#ifdef __FreeBSD__
+ if (!sx_try_upgrade(&pf_consistency_lock))
+ return (0);
+#else
+ rw_enter_write(&pf_consistency_lock);
+#endif
+ next = RB_NEXT(pf_src_tree,
+ &tree_src_tracking, cur);
+ locked = 1;
+ }
+ if (cur->rule.ptr != NULL) {
+ cur->rule.ptr->src_nodes--;
+ if (cur->rule.ptr->states <= 0 &&
+ cur->rule.ptr->max_src_nodes <= 0)
+ pf_rm_rule(NULL, cur->rule.ptr);
+ }
+ RB_REMOVE(pf_src_tree, &tree_src_tracking, cur);
+ pf_status.scounters[SCNT_SRC_NODE_REMOVALS]++;
+ pf_status.src_nodes--;
+ pool_put(&pf_src_tree_pl, cur);
+ }
+ }
+
+ if (locked && !waslocked)
+#ifdef __FreeBSD__
+ sx_downgrade(&pf_consistency_lock);
+#else
+ rw_exit_write(&pf_consistency_lock);
+#endif
+
+#ifdef __FreeBSD__
+ return (1);
+#endif
+}
+
+void
+pf_src_tree_remove_state(struct pf_state *s)
+{
+ u_int32_t timeout;
+
+ if (s->src_node != NULL) {
+ if (s->proto == IPPROTO_TCP) {
+ if (s->src.tcp_est)
+ --s->src_node->conn;
+ }
+ if (--s->src_node->states <= 0) {
+ timeout = s->rule.ptr->timeout[PFTM_SRC_NODE];
+ if (!timeout)
+ timeout =
+ pf_default_rule.timeout[PFTM_SRC_NODE];
+ s->src_node->expire = time_second + timeout;
+ }
+ }
+ if (s->nat_src_node != s->src_node && s->nat_src_node != NULL) {
+ if (--s->nat_src_node->states <= 0) {
+ timeout = s->rule.ptr->timeout[PFTM_SRC_NODE];
+ if (!timeout)
+ timeout =
+ pf_default_rule.timeout[PFTM_SRC_NODE];
+ s->nat_src_node->expire = time_second + timeout;
+ }
+ }
+ s->src_node = s->nat_src_node = NULL;
+}
+
+/* callers should be at splsoftnet */
+void
+pf_unlink_state(struct pf_state *cur)
+{
+#ifdef __FreeBSD__
+ if (cur->local_flags & PFSTATE_EXPIRING)
+ return;
+ cur->local_flags |= PFSTATE_EXPIRING;
+#endif
+ if (cur->src.state == PF_TCPS_PROXY_DST) {
+#ifdef __FreeBSD__
+ pf_send_tcp(NULL, cur->rule.ptr, cur->af,
+#else
+ pf_send_tcp(cur->rule.ptr, cur->af,
+#endif
+ &cur->ext.addr, &cur->lan.addr,
+ cur->ext.port, cur->lan.port,
+ cur->src.seqhi, cur->src.seqlo + 1,
+ TH_RST|TH_ACK, 0, 0, 0, 1, cur->tag, NULL, NULL);
+ }
+ RB_REMOVE(pf_state_tree_ext_gwy,
+ &cur->u.s.kif->pfik_ext_gwy, cur);
+ RB_REMOVE(pf_state_tree_lan_ext,
+ &cur->u.s.kif->pfik_lan_ext, cur);
+ RB_REMOVE(pf_state_tree_id, &tree_id, cur);
+#if NPFSYNC
+ if (cur->creatorid == pf_status.hostid)
+ pfsync_delete_state(cur);
+#endif
+ cur->timeout = PFTM_UNLINKED;
+ pf_src_tree_remove_state(cur);
+}
+
+/* callers should be at splsoftnet and hold the
+ * write_lock on pf_consistency_lock */
+void
+pf_free_state(struct pf_state *cur)
+{
+#if NPFSYNC
+ if (pfsyncif != NULL &&
+ (pfsyncif->sc_bulk_send_next == cur ||
+ pfsyncif->sc_bulk_terminator == cur))
+ return;
+#endif
+#ifdef __FreeBSD__
+ KASSERT(cur->timeout == PFTM_UNLINKED,
+ ("pf_free_state: cur->timeout != PFTM_UNLINKED"));
+#else
+ KASSERT(cur->timeout == PFTM_UNLINKED);
+#endif
+ if (--cur->rule.ptr->states <= 0 &&
+ cur->rule.ptr->src_nodes <= 0)
+ pf_rm_rule(NULL, cur->rule.ptr);
+ if (cur->nat_rule.ptr != NULL)
+ if (--cur->nat_rule.ptr->states <= 0 &&
+ cur->nat_rule.ptr->src_nodes <= 0)
+ pf_rm_rule(NULL, cur->nat_rule.ptr);
+ if (cur->anchor.ptr != NULL)
+ if (--cur->anchor.ptr->states <= 0)
+ pf_rm_rule(NULL, cur->anchor.ptr);
+ pf_normalize_tcp_cleanup(cur);
+ pfi_kif_unref(cur->u.s.kif, PFI_KIF_REF_STATE);
+ TAILQ_REMOVE(&state_list, cur, u.s.entry_list);
+ if (cur->tag)
+ pf_tag_unref(cur->tag);
+ pool_put(&pf_state_pl, cur);
+ pf_status.fcounters[FCNT_STATE_REMOVALS]++;
+ pf_status.states--;
+}
+
+#ifdef __FreeBSD__
+int
+pf_purge_expired_states(u_int32_t maxcheck, int waslocked)
+#else
+void
+pf_purge_expired_states(u_int32_t maxcheck)
+#endif
+{
+ static struct pf_state *cur = NULL;
+ struct pf_state *next;
+#ifdef __FreeBSD__
+ int locked = waslocked;
+#else
+ int locked = 0;
+#endif
+
+ while (maxcheck--) {
+ /* wrap to start of list when we hit the end */
+ if (cur == NULL) {
+ cur = TAILQ_FIRST(&state_list);
+ if (cur == NULL)
+ break; /* list empty */
+ }
+
+ /* get next state, as cur may get deleted */
+ next = TAILQ_NEXT(cur, u.s.entry_list);
+
+ if (cur->timeout == PFTM_UNLINKED) {
+ /* free unlinked state */
+ if (! locked) {
+#ifdef __FreeBSD__
+ if (!sx_try_upgrade(&pf_consistency_lock))
+ return (0);
+#else
+ rw_enter_write(&pf_consistency_lock);
+#endif
+ locked = 1;
+ }
+ pf_free_state(cur);
+ } else if (pf_state_expires(cur) <= time_second) {
+ /* unlink and free expired state */
+ pf_unlink_state(cur);
+ if (! locked) {
+#ifdef __FreeBSD__
+ if (!sx_try_upgrade(&pf_consistency_lock))
+ return (0);
+#else
+ rw_enter_write(&pf_consistency_lock);
+#endif
+ locked = 1;
+ }
+ pf_free_state(cur);
+ }
+ cur = next;
+ }
+
+#ifdef __FreeBSD__
+ if (!waslocked && locked)
+ sx_downgrade(&pf_consistency_lock);
+
+ return (1);
+#else
+ if (locked)
+ rw_exit_write(&pf_consistency_lock);
+#endif
+}
+
+int
+pf_tbladdr_setup(struct pf_ruleset *rs, struct pf_addr_wrap *aw)
+{
+ if (aw->type != PF_ADDR_TABLE)
+ return (0);
+ if ((aw->p.tbl = pfr_attach_table(rs, aw->v.tblname)) == NULL)
+ return (1);
+ return (0);
+}
+
+void
+pf_tbladdr_remove(struct pf_addr_wrap *aw)
+{
+ if (aw->type != PF_ADDR_TABLE || aw->p.tbl == NULL)
+ return;
+ pfr_detach_table(aw->p.tbl);
+ aw->p.tbl = NULL;
+}
+
+void
+pf_tbladdr_copyout(struct pf_addr_wrap *aw)
+{
+ struct pfr_ktable *kt = aw->p.tbl;
+
+ if (aw->type != PF_ADDR_TABLE || kt == NULL)
+ return;
+ if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE) && kt->pfrkt_root != NULL)
+ kt = kt->pfrkt_root;
+ aw->p.tbl = NULL;
+ aw->p.tblcnt = (kt->pfrkt_flags & PFR_TFLAG_ACTIVE) ?
+ kt->pfrkt_cnt : -1;
+}
+
+void
+pf_print_host(struct pf_addr *addr, u_int16_t p, sa_family_t af)
+{
+ switch (af) {
+#ifdef INET
+ case AF_INET: {
+ u_int32_t a = ntohl(addr->addr32[0]);
+ printf("%u.%u.%u.%u", (a>>24)&255, (a>>16)&255,
+ (a>>8)&255, a&255);
+ if (p) {
+ p = ntohs(p);
+ printf(":%u", p);
+ }
+ break;
+ }
+#endif /* INET */
+#ifdef INET6
+ case AF_INET6: {
+ u_int16_t b;
+ u_int8_t i, curstart = 255, curend = 0,
+ maxstart = 0, maxend = 0;
+ for (i = 0; i < 8; i++) {
+ if (!addr->addr16[i]) {
+ if (curstart == 255)
+ curstart = i;
+ else
+ curend = i;
+ } else {
+ if (curstart) {
+ if ((curend - curstart) >
+ (maxend - maxstart)) {
+ maxstart = curstart;
+ maxend = curend;
+ curstart = 255;
+ }
+ }
+ }
+ }
+ for (i = 0; i < 8; i++) {
+ if (i >= maxstart && i <= maxend) {
+ if (maxend != 7) {
+ if (i == maxstart)
+ printf(":");
+ } else {
+ if (i == maxend)
+ printf(":");
+ }
+ } else {
+ b = ntohs(addr->addr16[i]);
+ printf("%x", b);
+ if (i < 7)
+ printf(":");
+ }
+ }
+ if (p) {
+ p = ntohs(p);
+ printf("[%u]", p);
+ }
+ break;
+ }
+#endif /* INET6 */
+ }
+}
+
+void
+pf_print_state(struct pf_state *s)
+{
+ switch (s->proto) {
+ case IPPROTO_TCP:
+ printf("TCP ");
+ break;
+ case IPPROTO_UDP:
+ printf("UDP ");
+ break;
+ case IPPROTO_ICMP:
+ printf("ICMP ");
+ break;
+ case IPPROTO_ICMPV6:
+ printf("ICMPV6 ");
+ break;
+ default:
+ printf("%u ", s->proto);
+ break;
+ }
+ pf_print_host(&s->lan.addr, s->lan.port, s->af);
+ printf(" ");
+ pf_print_host(&s->gwy.addr, s->gwy.port, s->af);
+ printf(" ");
+ pf_print_host(&s->ext.addr, s->ext.port, s->af);
+ printf(" [lo=%u high=%u win=%u modulator=%u", s->src.seqlo,
+ s->src.seqhi, s->src.max_win, s->src.seqdiff);
+ if (s->src.wscale && s->dst.wscale)
+ printf(" wscale=%u", s->src.wscale & PF_WSCALE_MASK);
+ printf("]");
+ printf(" [lo=%u high=%u win=%u modulator=%u", s->dst.seqlo,
+ s->dst.seqhi, s->dst.max_win, s->dst.seqdiff);
+ if (s->src.wscale && s->dst.wscale)
+ printf(" wscale=%u", s->dst.wscale & PF_WSCALE_MASK);
+ printf("]");
+ printf(" %u:%u", s->src.state, s->dst.state);
+}
+
+void
+pf_print_flags(u_int8_t f)
+{
+ if (f)
+ printf(" ");
+ if (f & TH_FIN)
+ printf("F");
+ if (f & TH_SYN)
+ printf("S");
+ if (f & TH_RST)
+ printf("R");
+ if (f & TH_PUSH)
+ printf("P");
+ if (f & TH_ACK)
+ printf("A");
+ if (f & TH_URG)
+ printf("U");
+ if (f & TH_ECE)
+ printf("E");
+ if (f & TH_CWR)
+ printf("W");
+}
+
+#define PF_SET_SKIP_STEPS(i) \
+ do { \
+ while (head[i] != cur) { \
+ head[i]->skip[i].ptr = cur; \
+ head[i] = TAILQ_NEXT(head[i], entries); \
+ } \
+ } while (0)
+
+void
+pf_calc_skip_steps(struct pf_rulequeue *rules)
+{
+ struct pf_rule *cur, *prev, *head[PF_SKIP_COUNT];
+ int i;
+
+ cur = TAILQ_FIRST(rules);
+ prev = cur;
+ for (i = 0; i < PF_SKIP_COUNT; ++i)
+ head[i] = cur;
+ while (cur != NULL) {
+
+ if (cur->kif != prev->kif || cur->ifnot != prev->ifnot)
+ PF_SET_SKIP_STEPS(PF_SKIP_IFP);
+ if (cur->direction != prev->direction)
+ PF_SET_SKIP_STEPS(PF_SKIP_DIR);
+ if (cur->af != prev->af)
+ PF_SET_SKIP_STEPS(PF_SKIP_AF);
+ if (cur->proto != prev->proto)
+ PF_SET_SKIP_STEPS(PF_SKIP_PROTO);
+ if (cur->src.neg != prev->src.neg ||
+ pf_addr_wrap_neq(&cur->src.addr, &prev->src.addr))
+ PF_SET_SKIP_STEPS(PF_SKIP_SRC_ADDR);
+ if (cur->src.port[0] != prev->src.port[0] ||
+ cur->src.port[1] != prev->src.port[1] ||
+ cur->src.port_op != prev->src.port_op)
+ PF_SET_SKIP_STEPS(PF_SKIP_SRC_PORT);
+ if (cur->dst.neg != prev->dst.neg ||
+ pf_addr_wrap_neq(&cur->dst.addr, &prev->dst.addr))
+ PF_SET_SKIP_STEPS(PF_SKIP_DST_ADDR);
+ if (cur->dst.port[0] != prev->dst.port[0] ||
+ cur->dst.port[1] != prev->dst.port[1] ||
+ cur->dst.port_op != prev->dst.port_op)
+ PF_SET_SKIP_STEPS(PF_SKIP_DST_PORT);
+
+ prev = cur;
+ cur = TAILQ_NEXT(cur, entries);
+ }
+ for (i = 0; i < PF_SKIP_COUNT; ++i)
+ PF_SET_SKIP_STEPS(i);
+}
+
+int
+pf_addr_wrap_neq(struct pf_addr_wrap *aw1, struct pf_addr_wrap *aw2)
+{
+ if (aw1->type != aw2->type)
+ return (1);
+ switch (aw1->type) {
+ case PF_ADDR_ADDRMASK:
+ if (PF_ANEQ(&aw1->v.a.addr, &aw2->v.a.addr, 0))
+ return (1);
+ if (PF_ANEQ(&aw1->v.a.mask, &aw2->v.a.mask, 0))
+ return (1);
+ return (0);
+ case PF_ADDR_DYNIFTL:
+ return (aw1->p.dyn->pfid_kt != aw2->p.dyn->pfid_kt);
+ case PF_ADDR_NOROUTE:
+ case PF_ADDR_URPFFAILED:
+ return (0);
+ case PF_ADDR_TABLE:
+ return (aw1->p.tbl != aw2->p.tbl);
+ case PF_ADDR_RTLABEL:
+ return (aw1->v.rtlabel != aw2->v.rtlabel);
+ default:
+ printf("invalid address type: %d\n", aw1->type);
+ return (1);
+ }
+}
+
+u_int16_t
+pf_cksum_fixup(u_int16_t cksum, u_int16_t old, u_int16_t new, u_int8_t udp)
+{
+ u_int32_t l;
+
+ if (udp && !cksum)
+ return (0x0000);
+ l = cksum + old - new;
+ l = (l >> 16) + (l & 65535);
+ l = l & 65535;
+ if (udp && !l)
+ return (0xFFFF);
+ return (l);
+}
+
+void
+pf_change_ap(struct pf_addr *a, u_int16_t *p, u_int16_t *ic, u_int16_t *pc,
+ struct pf_addr *an, u_int16_t pn, u_int8_t u, sa_family_t af)
+{
+ struct pf_addr ao;
+ u_int16_t po = *p;
+
+ PF_ACPY(&ao, a, af);
+ PF_ACPY(a, an, af);
+
+ *p = pn;
+
+ switch (af) {
+#ifdef INET
+ case AF_INET:
+ *ic = pf_cksum_fixup(pf_cksum_fixup(*ic,
+ ao.addr16[0], an->addr16[0], 0),
+ ao.addr16[1], an->addr16[1], 0);
+ *p = pn;
+ *pc = pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup(*pc,
+ ao.addr16[0], an->addr16[0], u),
+ ao.addr16[1], an->addr16[1], u),
+ po, pn, u);
+ break;
+#endif /* INET */
+#ifdef INET6
+ case AF_INET6:
+ *pc = pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup(
+ pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup(
+ pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup(*pc,
+ ao.addr16[0], an->addr16[0], u),
+ ao.addr16[1], an->addr16[1], u),
+ ao.addr16[2], an->addr16[2], u),
+ ao.addr16[3], an->addr16[3], u),
+ ao.addr16[4], an->addr16[4], u),
+ ao.addr16[5], an->addr16[5], u),
+ ao.addr16[6], an->addr16[6], u),
+ ao.addr16[7], an->addr16[7], u),
+ po, pn, u);
+ break;
+#endif /* INET6 */
+ }
+}
+
+
+/* Changes a u_int32_t. Uses a void * so there are no align restrictions */
+void
+pf_change_a(void *a, u_int16_t *c, u_int32_t an, u_int8_t u)
+{
+ u_int32_t ao;
+
+ memcpy(&ao, a, sizeof(ao));
+ memcpy(a, &an, sizeof(u_int32_t));
+ *c = pf_cksum_fixup(pf_cksum_fixup(*c, ao / 65536, an / 65536, u),
+ ao % 65536, an % 65536, u);
+}
+
+#ifdef INET6
+void
+pf_change_a6(struct pf_addr *a, u_int16_t *c, struct pf_addr *an, u_int8_t u)
+{
+ struct pf_addr ao;
+
+ PF_ACPY(&ao, a, AF_INET6);
+ PF_ACPY(a, an, AF_INET6);
+
+ *c = pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup(
+ pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup(
+ pf_cksum_fixup(pf_cksum_fixup(*c,
+ ao.addr16[0], an->addr16[0], u),
+ ao.addr16[1], an->addr16[1], u),
+ ao.addr16[2], an->addr16[2], u),
+ ao.addr16[3], an->addr16[3], u),
+ ao.addr16[4], an->addr16[4], u),
+ ao.addr16[5], an->addr16[5], u),
+ ao.addr16[6], an->addr16[6], u),
+ ao.addr16[7], an->addr16[7], u);
+}
+#endif /* INET6 */
+
+void
+pf_change_icmp(struct pf_addr *ia, u_int16_t *ip, struct pf_addr *oa,
+ struct pf_addr *na, u_int16_t np, u_int16_t *pc, u_int16_t *h2c,
+ u_int16_t *ic, u_int16_t *hc, u_int8_t u, sa_family_t af)
+{
+ struct pf_addr oia, ooa;
+
+ PF_ACPY(&oia, ia, af);
+ PF_ACPY(&ooa, oa, af);
+
+ /* Change inner protocol port, fix inner protocol checksum. */
+ if (ip != NULL) {
+ u_int16_t oip = *ip;
+ u_int32_t opc = 0; /* make the compiler happy */
+
+ if (pc != NULL)
+ opc = *pc;
+ *ip = np;
+ if (pc != NULL)
+ *pc = pf_cksum_fixup(*pc, oip, *ip, u);
+ *ic = pf_cksum_fixup(*ic, oip, *ip, 0);
+ if (pc != NULL)
+ *ic = pf_cksum_fixup(*ic, opc, *pc, 0);
+ }
+ /* Change inner ip address, fix inner ip and icmp checksums. */
+ PF_ACPY(ia, na, af);
+ switch (af) {
+#ifdef INET
+ case AF_INET: {
+ u_int32_t oh2c = *h2c;
+
+ *h2c = pf_cksum_fixup(pf_cksum_fixup(*h2c,
+ oia.addr16[0], ia->addr16[0], 0),
+ oia.addr16[1], ia->addr16[1], 0);
+ *ic = pf_cksum_fixup(pf_cksum_fixup(*ic,
+ oia.addr16[0], ia->addr16[0], 0),
+ oia.addr16[1], ia->addr16[1], 0);
+ *ic = pf_cksum_fixup(*ic, oh2c, *h2c, 0);
+ break;
+ }
+#endif /* INET */
+#ifdef INET6
+ case AF_INET6:
+ *ic = pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup(
+ pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup(
+ pf_cksum_fixup(pf_cksum_fixup(*ic,
+ oia.addr16[0], ia->addr16[0], u),
+ oia.addr16[1], ia->addr16[1], u),
+ oia.addr16[2], ia->addr16[2], u),
+ oia.addr16[3], ia->addr16[3], u),
+ oia.addr16[4], ia->addr16[4], u),
+ oia.addr16[5], ia->addr16[5], u),
+ oia.addr16[6], ia->addr16[6], u),
+ oia.addr16[7], ia->addr16[7], u);
+ break;
+#endif /* INET6 */
+ }
+ /* Change outer ip address, fix outer ip or icmpv6 checksum. */
+ PF_ACPY(oa, na, af);
+ switch (af) {
+#ifdef INET
+ case AF_INET:
+ *hc = pf_cksum_fixup(pf_cksum_fixup(*hc,
+ ooa.addr16[0], oa->addr16[0], 0),
+ ooa.addr16[1], oa->addr16[1], 0);
+ break;
+#endif /* INET */
+#ifdef INET6
+ case AF_INET6:
+ *ic = pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup(
+ pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup(
+ pf_cksum_fixup(pf_cksum_fixup(*ic,
+ ooa.addr16[0], oa->addr16[0], u),
+ ooa.addr16[1], oa->addr16[1], u),
+ ooa.addr16[2], oa->addr16[2], u),
+ ooa.addr16[3], oa->addr16[3], u),
+ ooa.addr16[4], oa->addr16[4], u),
+ ooa.addr16[5], oa->addr16[5], u),
+ ooa.addr16[6], oa->addr16[6], u),
+ ooa.addr16[7], oa->addr16[7], u);
+ break;
+#endif /* INET6 */
+ }
+}
+
+
+/*
+ * Need to modulate the sequence numbers in the TCP SACK option
+ * (credits to Krzysztof Pfaff for report and patch)
+ */
+int
+pf_modulate_sack(struct mbuf *m, int off, struct pf_pdesc *pd,
+ struct tcphdr *th, struct pf_state_peer *dst)
+{
+ int hlen = (th->th_off << 2) - sizeof(*th), thoptlen = hlen;
+#ifdef __FreeBSD__
+ u_int8_t opts[TCP_MAXOLEN], *opt = opts;
+#else
+ u_int8_t opts[MAX_TCPOPTLEN], *opt = opts;
+#endif
+ int copyback = 0, i, olen;
+ struct sackblk sack;
+
+#define TCPOLEN_SACKLEN (TCPOLEN_SACK + 2)
+ if (hlen < TCPOLEN_SACKLEN ||
+ !pf_pull_hdr(m, off + sizeof(*th), opts, hlen, NULL, NULL, pd->af))
+ return 0;
+
+ while (hlen >= TCPOLEN_SACKLEN) {
+ olen = opt[1];
+ switch (*opt) {
+ case TCPOPT_EOL: /* FALLTHROUGH */
+ case TCPOPT_NOP:
+ opt++;
+ hlen--;
+ break;
+ case TCPOPT_SACK:
+ if (olen > hlen)
+ olen = hlen;
+ if (olen >= TCPOLEN_SACKLEN) {
+ for (i = 2; i + TCPOLEN_SACK <= olen;
+ i += TCPOLEN_SACK) {
+ memcpy(&sack, &opt[i], sizeof(sack));
+ pf_change_a(&sack.start, &th->th_sum,
+ htonl(ntohl(sack.start) -
+ dst->seqdiff), 0);
+ pf_change_a(&sack.end, &th->th_sum,
+ htonl(ntohl(sack.end) -
+ dst->seqdiff), 0);
+ memcpy(&opt[i], &sack, sizeof(sack));
+ }
+ copyback = 1;
+ }
+ /* FALLTHROUGH */
+ default:
+ if (olen < 2)
+ olen = 2;
+ hlen -= olen;
+ opt += olen;
+ }
+ }
+
+ if (copyback)
+#ifdef __FreeBSD__
+ m_copyback(m, off + sizeof(*th), thoptlen, (caddr_t)opts);
+#else
+ m_copyback(m, off + sizeof(*th), thoptlen, opts);
+#endif
+ return (copyback);
+}
+
+void
+#ifdef __FreeBSD__
+pf_send_tcp(struct mbuf *replyto, const struct pf_rule *r, sa_family_t af,
+#else
+pf_send_tcp(const struct pf_rule *r, sa_family_t af,
+#endif
+ const struct pf_addr *saddr, const struct pf_addr *daddr,
+ u_int16_t sport, u_int16_t dport, u_int32_t seq, u_int32_t ack,
+ u_int8_t flags, u_int16_t win, u_int16_t mss, u_int8_t ttl, int tag,
+ u_int16_t rtag, struct ether_header *eh, struct ifnet *ifp)
+{
+ struct mbuf *m;
+ int len, tlen;
+#ifdef INET
+ struct ip *h;
+#endif /* INET */
+#ifdef INET6
+ struct ip6_hdr *h6;
+#endif /* INET6 */
+ struct tcphdr *th;
+ char *opt;
+ struct pf_mtag *pf_mtag;
+
+#ifdef __FreeBSD__
+ KASSERT(
+#ifdef INET
+ af == AF_INET
+#else
+ 0
+#endif
+ ||
+#ifdef INET6
+ af == AF_INET6
+#else
+ 0
+#endif
+ , ("Unsupported AF %d", af));
+ len = 0;
+ th = NULL;
+#ifdef INET
+ h = NULL;
+#endif
+#ifdef INET6
+ h6 = NULL;
+#endif
+#endif
+
+ /* maximum segment size tcp option */
+ tlen = sizeof(struct tcphdr);
+ if (mss)
+ tlen += 4;
+
+ switch (af) {
+#ifdef INET
+ case AF_INET:
+ len = sizeof(struct ip) + tlen;
+ break;
+#endif /* INET */
+#ifdef INET6
+ case AF_INET6:
+ len = sizeof(struct ip6_hdr) + tlen;
+ break;
+#endif /* INET6 */
+ }
+
+ /* create outgoing mbuf */
+ m = m_gethdr(M_DONTWAIT, MT_HEADER);
+ if (m == NULL)
+ return;
+#ifdef __FreeBSD__
+#ifdef MAC
+ if (replyto)
+ mac_netinet_firewall_reply(replyto, m);
+ else
+ mac_netinet_firewall_send(m);
+#else
+ (void)replyto;
+#endif
+#endif
+ if ((pf_mtag = pf_get_mtag(m)) == NULL) {
+ m_freem(m);
+ return;
+ }
+ if (tag)
+#ifdef __FreeBSD__
+ m->m_flags |= M_SKIP_FIREWALL;
+#else
+ pf_mtag->flags |= PF_TAG_GENERATED;
+#endif
+
+ pf_mtag->tag = rtag;
+
+ if (r != NULL && r->rtableid >= 0)
+#ifdef __FreeBSD__
+ {
+ M_SETFIB(m, r->rtableid);
+#endif
+ pf_mtag->rtableid = r->rtableid;
+#ifdef __FreeBSD__
+ }
+#endif
+#ifdef ALTQ
+ if (r != NULL && r->qid) {
+ pf_mtag->qid = r->qid;
+ /* add hints for ecn */
+ pf_mtag->af = af;
+ pf_mtag->hdr = mtod(m, struct ip *);
+ }
+#endif /* ALTQ */
+ m->m_data += max_linkhdr;
+ m->m_pkthdr.len = m->m_len = len;
+ m->m_pkthdr.rcvif = NULL;
+ bzero(m->m_data, len);
+ switch (af) {
+#ifdef INET
+ case AF_INET:
+ h = mtod(m, struct ip *);
+
+ /* IP header fields included in the TCP checksum */
+ h->ip_p = IPPROTO_TCP;
+ h->ip_len = htons(tlen);
+ h->ip_src.s_addr = saddr->v4.s_addr;
+ h->ip_dst.s_addr = daddr->v4.s_addr;
+
+ th = (struct tcphdr *)((caddr_t)h + sizeof(struct ip));
+ break;
+#endif /* INET */
+#ifdef INET6
+ case AF_INET6:
+ h6 = mtod(m, struct ip6_hdr *);
+
+ /* IP header fields included in the TCP checksum */
+ h6->ip6_nxt = IPPROTO_TCP;
+ h6->ip6_plen = htons(tlen);
+ memcpy(&h6->ip6_src, &saddr->v6, sizeof(struct in6_addr));
+ memcpy(&h6->ip6_dst, &daddr->v6, sizeof(struct in6_addr));
+
+ th = (struct tcphdr *)((caddr_t)h6 + sizeof(struct ip6_hdr));
+ break;
+#endif /* INET6 */
+ }
+
+ /* TCP header */
+ th->th_sport = sport;
+ th->th_dport = dport;
+ th->th_seq = htonl(seq);
+ th->th_ack = htonl(ack);
+ th->th_off = tlen >> 2;
+ th->th_flags = flags;
+ th->th_win = htons(win);
+
+ if (mss) {
+ opt = (char *)(th + 1);
+ opt[0] = TCPOPT_MAXSEG;
+ opt[1] = 4;
+ HTONS(mss);
+ bcopy((caddr_t)&mss, (caddr_t)(opt + 2), 2);
+ }
+
+ switch (af) {
+#ifdef INET
+ case AF_INET:
+ /* TCP checksum */
+ th->th_sum = in_cksum(m, len);
+
+ /* Finish the IP header */
+ h->ip_v = 4;
+ h->ip_hl = sizeof(*h) >> 2;
+ h->ip_tos = IPTOS_LOWDELAY;
+#ifdef __FreeBSD__
+ h->ip_off = V_path_mtu_discovery ? IP_DF : 0;
+ h->ip_len = len;
+#else
+ h->ip_off = htons(ip_mtudisc ? IP_DF : 0);
+ h->ip_len = htons(len);
+#endif
+ h->ip_ttl = ttl ? ttl : V_ip_defttl;
+ h->ip_sum = 0;
+ if (eh == NULL) {
+#ifdef __FreeBSD__
+ PF_UNLOCK();
+ ip_output(m, (void *)NULL, (void *)NULL, 0,
+ (void *)NULL, (void *)NULL);
+ PF_LOCK();
+#else /* ! __FreeBSD__ */
+ ip_output(m, (void *)NULL, (void *)NULL, 0,
+ (void *)NULL, (void *)NULL);
+#endif
+ } else {
+ struct route ro;
+ struct rtentry rt;
+ struct ether_header *e = (void *)ro.ro_dst.sa_data;
+
+ if (ifp == NULL) {
+ m_freem(m);
+ return;
+ }
+ rt.rt_ifp = ifp;
+ ro.ro_rt = &rt;
+ ro.ro_dst.sa_len = sizeof(ro.ro_dst);
+ ro.ro_dst.sa_family = pseudo_AF_HDRCMPLT;
+ bcopy(eh->ether_dhost, e->ether_shost, ETHER_ADDR_LEN);
+ bcopy(eh->ether_shost, e->ether_dhost, ETHER_ADDR_LEN);
+ e->ether_type = eh->ether_type;
+#ifdef __FreeBSD__
+ PF_UNLOCK();
+ /* XXX_IMPORT: later */
+ ip_output(m, (void *)NULL, &ro, 0,
+ (void *)NULL, (void *)NULL);
+ PF_LOCK();
+#else /* ! __FreeBSD__ */
+ ip_output(m, (void *)NULL, &ro, IP_ROUTETOETHER,
+ (void *)NULL, (void *)NULL);
+#endif
+ }
+ break;
+#endif /* INET */
+#ifdef INET6
+ case AF_INET6:
+ /* TCP checksum */
+ th->th_sum = in6_cksum(m, IPPROTO_TCP,
+ sizeof(struct ip6_hdr), tlen);
+
+ h6->ip6_vfc |= IPV6_VERSION;
+ h6->ip6_hlim = IPV6_DEFHLIM;
+
+#ifdef __FreeBSD__
+ PF_UNLOCK();
+ ip6_output(m, NULL, NULL, 0, NULL, NULL, NULL);
+ PF_LOCK();
+#else
+ ip6_output(m, NULL, NULL, 0, NULL, NULL);
+#endif
+ break;
+#endif /* INET6 */
+ }
+}
+
+void
+pf_send_icmp(struct mbuf *m, u_int8_t type, u_int8_t code, sa_family_t af,
+ struct pf_rule *r)
+{
+ struct pf_mtag *pf_mtag;
+ struct mbuf *m0;
+#ifdef __FreeBSD__
+ struct ip *ip;
+#endif
+
+#ifdef __FreeBSD__
+ m0 = m_copypacket(m, M_DONTWAIT);
+ if (m0 == NULL)
+ return;
+#else
+ m0 = m_copy(m, 0, M_COPYALL);
+#endif
+ if ((pf_mtag = pf_get_mtag(m0)) == NULL)
+ return;
+#ifdef __FreeBSD__
+ /* XXX: revisit */
+ m0->m_flags |= M_SKIP_FIREWALL;
+#else
+ pf_mtag->flags |= PF_TAG_GENERATED;
+#endif
+
+ if (r->rtableid >= 0)
+#ifdef __FreeBSD__
+ {
+ M_SETFIB(m0, r->rtableid);
+#endif
+ pf_mtag->rtableid = r->rtableid;
+#ifdef __FreeBSD__
+ }
+#endif
+
+#ifdef ALTQ
+ if (r->qid) {
+ pf_mtag->qid = r->qid;
+ /* add hints for ecn */
+ pf_mtag->af = af;
+ pf_mtag->hdr = mtod(m0, struct ip *);
+ }
+#endif /* ALTQ */
+
+ switch (af) {
+#ifdef INET
+ case AF_INET:
+#ifdef __FreeBSD__
+ /* icmp_error() expects host byte ordering */
+ ip = mtod(m0, struct ip *);
+ NTOHS(ip->ip_len);
+ NTOHS(ip->ip_off);
+ PF_UNLOCK();
+ icmp_error(m0, type, code, 0, 0);
+ PF_LOCK();
+#else
+ icmp_error(m0, type, code, 0, 0);
+#endif
+ break;
+#endif /* INET */
+#ifdef INET6
+ case AF_INET6:
+#ifdef __FreeBSD__
+ PF_UNLOCK();
+#endif
+ icmp6_error(m0, type, code, 0);
+#ifdef __FreeBSD__
+ PF_LOCK();
+#endif
+ break;
+#endif /* INET6 */
+ }
+}
+
+/*
+ * Return 1 if the addresses a and b match (with mask m), otherwise return 0.
+ * If n is 0, they match if they are equal. If n is != 0, they match if they
+ * are different.
+ */
+int
+pf_match_addr(u_int8_t n, struct pf_addr *a, struct pf_addr *m,
+ struct pf_addr *b, sa_family_t af)
+{
+ int match = 0;
+
+ switch (af) {
+#ifdef INET
+ case AF_INET:
+ if ((a->addr32[0] & m->addr32[0]) ==
+ (b->addr32[0] & m->addr32[0]))
+ match++;
+ break;
+#endif /* INET */
+#ifdef INET6
+ case AF_INET6:
+ if (((a->addr32[0] & m->addr32[0]) ==
+ (b->addr32[0] & m->addr32[0])) &&
+ ((a->addr32[1] & m->addr32[1]) ==
+ (b->addr32[1] & m->addr32[1])) &&
+ ((a->addr32[2] & m->addr32[2]) ==
+ (b->addr32[2] & m->addr32[2])) &&
+ ((a->addr32[3] & m->addr32[3]) ==
+ (b->addr32[3] & m->addr32[3])))
+ match++;
+ break;
+#endif /* INET6 */
+ }
+ if (match) {
+ if (n)
+ return (0);
+ else
+ return (1);
+ } else {
+ if (n)
+ return (1);
+ else
+ return (0);
+ }
+}
+
+int
+pf_match(u_int8_t op, u_int32_t a1, u_int32_t a2, u_int32_t p)
+{
+ switch (op) {
+ case PF_OP_IRG:
+ return ((p > a1) && (p < a2));
+ case PF_OP_XRG:
+ return ((p < a1) || (p > a2));
+ case PF_OP_RRG:
+ return ((p >= a1) && (p <= a2));
+ case PF_OP_EQ:
+ return (p == a1);
+ case PF_OP_NE:
+ return (p != a1);
+ case PF_OP_LT:
+ return (p < a1);
+ case PF_OP_LE:
+ return (p <= a1);
+ case PF_OP_GT:
+ return (p > a1);
+ case PF_OP_GE:
+ return (p >= a1);
+ }
+ return (0); /* never reached */
+}
+
+int
+pf_match_port(u_int8_t op, u_int16_t a1, u_int16_t a2, u_int16_t p)
+{
+ NTOHS(a1);
+ NTOHS(a2);
+ NTOHS(p);
+ return (pf_match(op, a1, a2, p));
+}
+
+int
+pf_match_uid(u_int8_t op, uid_t a1, uid_t a2, uid_t u)
+{
+ if (u == UID_MAX && op != PF_OP_EQ && op != PF_OP_NE)
+ return (0);
+ return (pf_match(op, a1, a2, u));
+}
+
+int
+pf_match_gid(u_int8_t op, gid_t a1, gid_t a2, gid_t g)
+{
+ if (g == GID_MAX && op != PF_OP_EQ && op != PF_OP_NE)
+ return (0);
+ return (pf_match(op, a1, a2, g));
+}
+
+#ifndef __FreeBSD__
+struct pf_mtag *
+pf_find_mtag(struct mbuf *m)
+{
+ struct m_tag *mtag;
+
+ if ((mtag = m_tag_find(m, PACKET_TAG_PF, NULL)) == NULL)
+ return (NULL);
+
+ return ((struct pf_mtag *)(mtag + 1));
+}
+
+struct pf_mtag *
+pf_get_mtag(struct mbuf *m)
+{
+ struct m_tag *mtag;
+
+ if ((mtag = m_tag_find(m, PACKET_TAG_PF, NULL)) == NULL) {
+ mtag = m_tag_get(PACKET_TAG_PF, sizeof(struct pf_mtag),
+ M_NOWAIT);
+ if (mtag == NULL)
+ return (NULL);
+ bzero(mtag + 1, sizeof(struct pf_mtag));
+ m_tag_prepend(m, mtag);
+ }
+
+ return ((struct pf_mtag *)(mtag + 1));
+}
+#endif
+
+int
+pf_match_tag(struct mbuf *m, struct pf_rule *r, struct pf_mtag *pf_mtag,
+ int *tag)
+{
+ if (*tag == -1)
+ *tag = pf_mtag->tag;
+
+ return ((!r->match_tag_not && r->match_tag == *tag) ||
+ (r->match_tag_not && r->match_tag != *tag));
+}
+
+int
+pf_tag_packet(struct mbuf *m, struct pf_mtag *pf_mtag, int tag, int rtableid)
+{
+ if (tag <= 0 && rtableid < 0)
+ return (0);
+
+ if (pf_mtag == NULL)
+ if ((pf_mtag = pf_get_mtag(m)) == NULL)
+ return (1);
+ if (tag > 0)
+ pf_mtag->tag = tag;
+ if (rtableid >= 0)
+#ifdef __FreeBSD__
+ {
+ M_SETFIB(m, rtableid);
+#endif
+ pf_mtag->rtableid = rtableid;
+#ifdef __FreeBSD__
+ }
+#endif
+
+ return (0);
+}
+
+static void
+pf_step_into_anchor(int *depth, struct pf_ruleset **rs, int n,
+ struct pf_rule **r, struct pf_rule **a, int *match)
+{
+ struct pf_anchor_stackframe *f;
+
+ (*r)->anchor->match = 0;
+ if (match)
+ *match = 0;
+ if (*depth >= sizeof(pf_anchor_stack) /
+ sizeof(pf_anchor_stack[0])) {
+ printf("pf_step_into_anchor: stack overflow\n");
+ *r = TAILQ_NEXT(*r, entries);
+ return;
+ } else if (*depth == 0 && a != NULL)
+ *a = *r;
+ f = pf_anchor_stack + (*depth)++;
+ f->rs = *rs;
+ f->r = *r;
+ if ((*r)->anchor_wildcard) {
+ f->parent = &(*r)->anchor->children;
+ if ((f->child = RB_MIN(pf_anchor_node, f->parent)) ==
+ NULL) {
+ *r = NULL;
+ return;
+ }
+ *rs = &f->child->ruleset;
+ } else {
+ f->parent = NULL;
+ f->child = NULL;
+ *rs = &(*r)->anchor->ruleset;
+ }
+ *r = TAILQ_FIRST((*rs)->rules[n].active.ptr);
+}
+
+int
+pf_step_out_of_anchor(int *depth, struct pf_ruleset **rs, int n,
+ struct pf_rule **r, struct pf_rule **a, int *match)
+{
+ struct pf_anchor_stackframe *f;
+ int quick = 0;
+
+ do {
+ if (*depth <= 0)
+ break;
+ f = pf_anchor_stack + *depth - 1;
+ if (f->parent != NULL && f->child != NULL) {
+ if (f->child->match ||
+ (match != NULL && *match)) {
+ f->r->anchor->match = 1;
+ *match = 0;
+ }
+ f->child = RB_NEXT(pf_anchor_node, f->parent, f->child);
+ if (f->child != NULL) {
+ *rs = &f->child->ruleset;
+ *r = TAILQ_FIRST((*rs)->rules[n].active.ptr);
+ if (*r == NULL)
+ continue;
+ else
+ break;
+ }
+ }
+ (*depth)--;
+ if (*depth == 0 && a != NULL)
+ *a = NULL;
+ *rs = f->rs;
+ if (f->r->anchor->match || (match != NULL && *match))
+ quick = f->r->quick;
+ *r = TAILQ_NEXT(f->r, entries);
+ } while (*r == NULL);
+
+ return (quick);
+}
+
+#ifdef INET6
+void
+pf_poolmask(struct pf_addr *naddr, struct pf_addr *raddr,
+ struct pf_addr *rmask, struct pf_addr *saddr, sa_family_t af)
+{
+ switch (af) {
+#ifdef INET
+ case AF_INET:
+ naddr->addr32[0] = (raddr->addr32[0] & rmask->addr32[0]) |
+ ((rmask->addr32[0] ^ 0xffffffff ) & saddr->addr32[0]);
+ break;
+#endif /* INET */
+ case AF_INET6:
+ naddr->addr32[0] = (raddr->addr32[0] & rmask->addr32[0]) |
+ ((rmask->addr32[0] ^ 0xffffffff ) & saddr->addr32[0]);
+ naddr->addr32[1] = (raddr->addr32[1] & rmask->addr32[1]) |
+ ((rmask->addr32[1] ^ 0xffffffff ) & saddr->addr32[1]);
+ naddr->addr32[2] = (raddr->addr32[2] & rmask->addr32[2]) |
+ ((rmask->addr32[2] ^ 0xffffffff ) & saddr->addr32[2]);
+ naddr->addr32[3] = (raddr->addr32[3] & rmask->addr32[3]) |
+ ((rmask->addr32[3] ^ 0xffffffff ) & saddr->addr32[3]);
+ break;
+ }
+}
+
+void
+pf_addr_inc(struct pf_addr *addr, sa_family_t af)
+{
+ switch (af) {
+#ifdef INET
+ case AF_INET:
+ addr->addr32[0] = htonl(ntohl(addr->addr32[0]) + 1);
+ break;
+#endif /* INET */
+ case AF_INET6:
+ if (addr->addr32[3] == 0xffffffff) {
+ addr->addr32[3] = 0;
+ if (addr->addr32[2] == 0xffffffff) {
+ addr->addr32[2] = 0;
+ if (addr->addr32[1] == 0xffffffff) {
+ addr->addr32[1] = 0;
+ addr->addr32[0] =
+ htonl(ntohl(addr->addr32[0]) + 1);
+ } else
+ addr->addr32[1] =
+ htonl(ntohl(addr->addr32[1]) + 1);
+ } else
+ addr->addr32[2] =
+ htonl(ntohl(addr->addr32[2]) + 1);
+ } else
+ addr->addr32[3] =
+ htonl(ntohl(addr->addr32[3]) + 1);
+ break;
+ }
+}
+#endif /* INET6 */
+
+#define mix(a,b,c) \
+ do { \
+ a -= b; a -= c; a ^= (c >> 13); \
+ b -= c; b -= a; b ^= (a << 8); \
+ c -= a; c -= b; c ^= (b >> 13); \
+ a -= b; a -= c; a ^= (c >> 12); \
+ b -= c; b -= a; b ^= (a << 16); \
+ c -= a; c -= b; c ^= (b >> 5); \
+ a -= b; a -= c; a ^= (c >> 3); \
+ b -= c; b -= a; b ^= (a << 10); \
+ c -= a; c -= b; c ^= (b >> 15); \
+ } while (0)
+
+/*
+ * hash function based on bridge_hash in if_bridge.c
+ */
+void
+pf_hash(struct pf_addr *inaddr, struct pf_addr *hash,
+ struct pf_poolhashkey *key, sa_family_t af)
+{
+ u_int32_t a = 0x9e3779b9, b = 0x9e3779b9, c = key->key32[0];
+
+ switch (af) {
+#ifdef INET
+ case AF_INET:
+ a += inaddr->addr32[0];
+ b += key->key32[1];
+ mix(a, b, c);
+ hash->addr32[0] = c + key->key32[2];
+ break;
+#endif /* INET */
+#ifdef INET6
+ case AF_INET6:
+ a += inaddr->addr32[0];
+ b += inaddr->addr32[2];
+ mix(a, b, c);
+ hash->addr32[0] = c;
+ a += inaddr->addr32[1];
+ b += inaddr->addr32[3];
+ c += key->key32[1];
+ mix(a, b, c);
+ hash->addr32[1] = c;
+ a += inaddr->addr32[2];
+ b += inaddr->addr32[1];
+ c += key->key32[2];
+ mix(a, b, c);
+ hash->addr32[2] = c;
+ a += inaddr->addr32[3];
+ b += inaddr->addr32[0];
+ c += key->key32[3];
+ mix(a, b, c);
+ hash->addr32[3] = c;
+ break;
+#endif /* INET6 */
+ }
+}
+
+int
+pf_map_addr(sa_family_t af, struct pf_rule *r, struct pf_addr *saddr,
+ struct pf_addr *naddr, struct pf_addr *init_addr, struct pf_src_node **sn)
+{
+ unsigned char hash[16];
+ struct pf_pool *rpool = &r->rpool;
+ struct pf_addr *raddr = &rpool->cur->addr.v.a.addr;
+ struct pf_addr *rmask = &rpool->cur->addr.v.a.mask;
+ struct pf_pooladdr *acur = rpool->cur;
+ struct pf_src_node k;
+
+ if (*sn == NULL && r->rpool.opts & PF_POOL_STICKYADDR &&
+ (r->rpool.opts & PF_POOL_TYPEMASK) != PF_POOL_NONE) {
+ k.af = af;
+ PF_ACPY(&k.addr, saddr, af);
+ if (r->rule_flag & PFRULE_RULESRCTRACK ||
+ r->rpool.opts & PF_POOL_STICKYADDR)
+ k.rule.ptr = r;
+ else
+ k.rule.ptr = NULL;
+ pf_status.scounters[SCNT_SRC_NODE_SEARCH]++;
+ *sn = RB_FIND(pf_src_tree, &tree_src_tracking, &k);
+ if (*sn != NULL && !PF_AZERO(&(*sn)->raddr, af)) {
+ PF_ACPY(naddr, &(*sn)->raddr, af);
+ if (pf_status.debug >= PF_DEBUG_MISC) {
+ printf("pf_map_addr: src tracking maps ");
+ pf_print_host(&k.addr, 0, af);
+ printf(" to ");
+ pf_print_host(naddr, 0, af);
+ printf("\n");
+ }
+ return (0);
+ }
+ }
+
+ if (rpool->cur->addr.type == PF_ADDR_NOROUTE)
+ return (1);
+ if (rpool->cur->addr.type == PF_ADDR_DYNIFTL) {
+ switch (af) {
+#ifdef INET
+ case AF_INET:
+ if (rpool->cur->addr.p.dyn->pfid_acnt4 < 1 &&
+ (rpool->opts & PF_POOL_TYPEMASK) !=
+ PF_POOL_ROUNDROBIN)
+ return (1);
+ raddr = &rpool->cur->addr.p.dyn->pfid_addr4;
+ rmask = &rpool->cur->addr.p.dyn->pfid_mask4;
+ break;
+#endif /* INET */
+#ifdef INET6
+ case AF_INET6:
+ if (rpool->cur->addr.p.dyn->pfid_acnt6 < 1 &&
+ (rpool->opts & PF_POOL_TYPEMASK) !=
+ PF_POOL_ROUNDROBIN)
+ return (1);
+ raddr = &rpool->cur->addr.p.dyn->pfid_addr6;
+ rmask = &rpool->cur->addr.p.dyn->pfid_mask6;
+ break;
+#endif /* INET6 */
+ }
+ } else if (rpool->cur->addr.type == PF_ADDR_TABLE) {
+ if ((rpool->opts & PF_POOL_TYPEMASK) != PF_POOL_ROUNDROBIN)
+ return (1); /* unsupported */
+ } else {
+ raddr = &rpool->cur->addr.v.a.addr;
+ rmask = &rpool->cur->addr.v.a.mask;
+ }
+
+ switch (rpool->opts & PF_POOL_TYPEMASK) {
+ case PF_POOL_NONE:
+ PF_ACPY(naddr, raddr, af);
+ break;
+ case PF_POOL_BITMASK:
+ PF_POOLMASK(naddr, raddr, rmask, saddr, af);
+ break;
+ case PF_POOL_RANDOM:
+ if (init_addr != NULL && PF_AZERO(init_addr, af)) {
+ switch (af) {
+#ifdef INET
+ case AF_INET:
+ rpool->counter.addr32[0] = htonl(arc4random());
+ break;
+#endif /* INET */
+#ifdef INET6
+ case AF_INET6:
+ if (rmask->addr32[3] != 0xffffffff)
+ rpool->counter.addr32[3] =
+ htonl(arc4random());
+ else
+ break;
+ if (rmask->addr32[2] != 0xffffffff)
+ rpool->counter.addr32[2] =
+ htonl(arc4random());
+ else
+ break;
+ if (rmask->addr32[1] != 0xffffffff)
+ rpool->counter.addr32[1] =
+ htonl(arc4random());
+ else
+ break;
+ if (rmask->addr32[0] != 0xffffffff)
+ rpool->counter.addr32[0] =
+ htonl(arc4random());
+ break;
+#endif /* INET6 */
+ }
+ PF_POOLMASK(naddr, raddr, rmask, &rpool->counter, af);
+ PF_ACPY(init_addr, naddr, af);
+
+ } else {
+ PF_AINC(&rpool->counter, af);
+ PF_POOLMASK(naddr, raddr, rmask, &rpool->counter, af);
+ }
+ break;
+ case PF_POOL_SRCHASH:
+ pf_hash(saddr, (struct pf_addr *)&hash, &rpool->key, af);
+ PF_POOLMASK(naddr, raddr, rmask, (struct pf_addr *)&hash, af);
+ break;
+ case PF_POOL_ROUNDROBIN:
+ if (rpool->cur->addr.type == PF_ADDR_TABLE) {
+ if (!pfr_pool_get(rpool->cur->addr.p.tbl,
+ &rpool->tblidx, &rpool->counter,
+ &raddr, &rmask, af))
+ goto get_addr;
+ } else if (rpool->cur->addr.type == PF_ADDR_DYNIFTL) {
+ if (!pfr_pool_get(rpool->cur->addr.p.dyn->pfid_kt,
+ &rpool->tblidx, &rpool->counter,
+ &raddr, &rmask, af))
+ goto get_addr;
+ } else if (pf_match_addr(0, raddr, rmask, &rpool->counter, af))
+ goto get_addr;
+
+ try_next:
+ if ((rpool->cur = TAILQ_NEXT(rpool->cur, entries)) == NULL)
+ rpool->cur = TAILQ_FIRST(&rpool->list);
+ if (rpool->cur->addr.type == PF_ADDR_TABLE) {
+ rpool->tblidx = -1;
+ if (pfr_pool_get(rpool->cur->addr.p.tbl,
+ &rpool->tblidx, &rpool->counter,
+ &raddr, &rmask, af)) {
+ /* table contains no address of type 'af' */
+ if (rpool->cur != acur)
+ goto try_next;
+ return (1);
+ }
+ } else if (rpool->cur->addr.type == PF_ADDR_DYNIFTL) {
+ rpool->tblidx = -1;
+ if (pfr_pool_get(rpool->cur->addr.p.dyn->pfid_kt,
+ &rpool->tblidx, &rpool->counter,
+ &raddr, &rmask, af)) {
+ /* table contains no address of type 'af' */
+ if (rpool->cur != acur)
+ goto try_next;
+ return (1);
+ }
+ } else {
+ raddr = &rpool->cur->addr.v.a.addr;
+ rmask = &rpool->cur->addr.v.a.mask;
+ PF_ACPY(&rpool->counter, raddr, af);
+ }
+
+ get_addr:
+ PF_ACPY(naddr, &rpool->counter, af);
+ if (init_addr != NULL && PF_AZERO(init_addr, af))
+ PF_ACPY(init_addr, naddr, af);
+ PF_AINC(&rpool->counter, af);
+ break;
+ }
+ if (*sn != NULL)
+ PF_ACPY(&(*sn)->raddr, naddr, af);
+
+ if (pf_status.debug >= PF_DEBUG_MISC &&
+ (rpool->opts & PF_POOL_TYPEMASK) != PF_POOL_NONE) {
+ printf("pf_map_addr: selected address ");
+ pf_print_host(naddr, 0, af);
+ printf("\n");
+ }
+
+ return (0);
+}
+
+int
+pf_get_sport(sa_family_t af, u_int8_t proto, struct pf_rule *r,
+ struct pf_addr *saddr, struct pf_addr *daddr, u_int16_t dport,
+ struct pf_addr *naddr, u_int16_t *nport, u_int16_t low, u_int16_t high,
+ struct pf_src_node **sn)
+{
+ struct pf_state_cmp key;
+ struct pf_addr init_addr;
+ u_int16_t cut;
+
+ bzero(&init_addr, sizeof(init_addr));
+ if (pf_map_addr(af, r, saddr, naddr, &init_addr, sn))
+ return (1);
+
+ if (proto == IPPROTO_ICMP) {
+ low = 1;
+ high = 65535;
+ }
+
+ do {
+ key.af = af;
+ key.proto = proto;
+ PF_ACPY(&key.ext.addr, daddr, key.af);
+ PF_ACPY(&key.gwy.addr, naddr, key.af);
+ key.ext.port = dport;
+
+ /*
+ * port search; start random, step;
+ * similar 2 portloop in in_pcbbind
+ */
+ if (!(proto == IPPROTO_TCP || proto == IPPROTO_UDP ||
+ proto == IPPROTO_ICMP)) {
+ key.gwy.port = dport;
+ if (pf_find_state_all(&key, PF_EXT_GWY, NULL) == NULL)
+ return (0);
+ } else if (low == 0 && high == 0) {
+ key.gwy.port = *nport;
+ if (pf_find_state_all(&key, PF_EXT_GWY, NULL) == NULL)
+ return (0);
+ } else if (low == high) {
+ key.gwy.port = htons(low);
+ if (pf_find_state_all(&key, PF_EXT_GWY, NULL) == NULL) {
+ *nport = htons(low);
+ return (0);
+ }
+ } else {
+ u_int16_t tmp;
+
+ if (low > high) {
+ tmp = low;
+ low = high;
+ high = tmp;
+ }
+ /* low < high */
+ cut = htonl(arc4random()) % (1 + high - low) + low;
+ /* low <= cut <= high */
+ for (tmp = cut; tmp <= high; ++(tmp)) {
+ key.gwy.port = htons(tmp);
+ if (pf_find_state_all(&key, PF_EXT_GWY, NULL) ==
+ NULL) {
+ *nport = htons(tmp);
+ return (0);
+ }
+ }
+ for (tmp = cut - 1; tmp >= low; --(tmp)) {
+ key.gwy.port = htons(tmp);
+ if (pf_find_state_all(&key, PF_EXT_GWY, NULL) ==
+ NULL) {
+ *nport = htons(tmp);
+ return (0);
+ }
+ }
+ }
+
+ switch (r->rpool.opts & PF_POOL_TYPEMASK) {
+ case PF_POOL_RANDOM:
+ case PF_POOL_ROUNDROBIN:
+ if (pf_map_addr(af, r, saddr, naddr, &init_addr, sn))
+ return (1);
+ break;
+ case PF_POOL_NONE:
+ case PF_POOL_SRCHASH:
+ case PF_POOL_BITMASK:
+ default:
+ return (1);
+ }
+ } while (! PF_AEQ(&init_addr, naddr, af) );
+
+ return (1); /* none available */
+}
+
+struct pf_rule *
+pf_match_translation(struct pf_pdesc *pd, struct mbuf *m, int off,
+ int direction, struct pfi_kif *kif, struct pf_addr *saddr, u_int16_t sport,
+ struct pf_addr *daddr, u_int16_t dport, int rs_num)
+{
+ struct pf_rule *r, *rm = NULL;
+ struct pf_ruleset *ruleset = NULL;
+ int tag = -1;
+ int rtableid = -1;
+ int asd = 0;
+
+ r = TAILQ_FIRST(pf_main_ruleset.rules[rs_num].active.ptr);
+ while (r && rm == NULL) {
+ struct pf_rule_addr *src = NULL, *dst = NULL;
+ struct pf_addr_wrap *xdst = NULL;
+
+ if (r->action == PF_BINAT && direction == PF_IN) {
+ src = &r->dst;
+ if (r->rpool.cur != NULL)
+ xdst = &r->rpool.cur->addr;
+ } else {
+ src = &r->src;
+ dst = &r->dst;
+ }
+
+ r->evaluations++;
+ if (pfi_kif_match(r->kif, kif) == r->ifnot)
+ r = r->skip[PF_SKIP_IFP].ptr;
+ else if (r->direction && r->direction != direction)
+ r = r->skip[PF_SKIP_DIR].ptr;
+ else if (r->af && r->af != pd->af)
+ r = r->skip[PF_SKIP_AF].ptr;
+ else if (r->proto && r->proto != pd->proto)
+ r = r->skip[PF_SKIP_PROTO].ptr;
+ else if (PF_MISMATCHAW(&src->addr, saddr, pd->af,
+ src->neg, kif))
+ r = r->skip[src == &r->src ? PF_SKIP_SRC_ADDR :
+ PF_SKIP_DST_ADDR].ptr;
+ else if (src->port_op && !pf_match_port(src->port_op,
+ src->port[0], src->port[1], sport))
+ r = r->skip[src == &r->src ? PF_SKIP_SRC_PORT :
+ PF_SKIP_DST_PORT].ptr;
+ else if (dst != NULL &&
+ PF_MISMATCHAW(&dst->addr, daddr, pd->af, dst->neg, NULL))
+ r = r->skip[PF_SKIP_DST_ADDR].ptr;
+ else if (xdst != NULL && PF_MISMATCHAW(xdst, daddr, pd->af,
+ 0, NULL))
+ r = TAILQ_NEXT(r, entries);
+ else if (dst != NULL && dst->port_op &&
+ !pf_match_port(dst->port_op, dst->port[0],
+ dst->port[1], dport))
+ r = r->skip[PF_SKIP_DST_PORT].ptr;
+ else if (r->match_tag && !pf_match_tag(m, r, pd->pf_mtag, &tag))
+ r = TAILQ_NEXT(r, entries);
+ else if (r->os_fingerprint != PF_OSFP_ANY && (pd->proto !=
+ IPPROTO_TCP || !pf_osfp_match(pf_osfp_fingerprint(pd, m,
+ off, pd->hdr.tcp), r->os_fingerprint)))
+ r = TAILQ_NEXT(r, entries);
+ else {
+ if (r->tag)
+ tag = r->tag;
+ if (r->rtableid >= 0)
+ rtableid = r->rtableid;
+ if (r->anchor == NULL) {
+ rm = r;
+ } else
+ pf_step_into_anchor(&asd, &ruleset, rs_num,
+ &r, NULL, NULL);
+ }
+ if (r == NULL)
+ pf_step_out_of_anchor(&asd, &ruleset, rs_num, &r,
+ NULL, NULL);
+ }
+ if (pf_tag_packet(m, pd->pf_mtag, tag, rtableid))
+ return (NULL);
+ if (rm != NULL && (rm->action == PF_NONAT ||
+ rm->action == PF_NORDR || rm->action == PF_NOBINAT))
+ return (NULL);
+ return (rm);
+}
+
+struct pf_rule *
+pf_get_translation(struct pf_pdesc *pd, struct mbuf *m, int off, int direction,
+ struct pfi_kif *kif, struct pf_src_node **sn,
+ struct pf_addr *saddr, u_int16_t sport,
+ struct pf_addr *daddr, u_int16_t dport,
+ struct pf_addr *naddr, u_int16_t *nport)
+{
+ struct pf_rule *r = NULL;
+
+ if (direction == PF_OUT) {
+ r = pf_match_translation(pd, m, off, direction, kif, saddr,
+ sport, daddr, dport, PF_RULESET_BINAT);
+ if (r == NULL)
+ r = pf_match_translation(pd, m, off, direction, kif,
+ saddr, sport, daddr, dport, PF_RULESET_NAT);
+ } else {
+ r = pf_match_translation(pd, m, off, direction, kif, saddr,
+ sport, daddr, dport, PF_RULESET_RDR);
+ if (r == NULL)
+ r = pf_match_translation(pd, m, off, direction, kif,
+ saddr, sport, daddr, dport, PF_RULESET_BINAT);
+ }
+
+ if (r != NULL) {
+ switch (r->action) {
+ case PF_NONAT:
+ case PF_NOBINAT:
+ case PF_NORDR:
+ return (NULL);
+ case PF_NAT:
+ if (pf_get_sport(pd->af, pd->proto, r, saddr,
+ daddr, dport, naddr, nport, r->rpool.proxy_port[0],
+ r->rpool.proxy_port[1], sn)) {
+ DPFPRINTF(PF_DEBUG_MISC,
+ ("pf: NAT proxy port allocation "
+ "(%u-%u) failed\n",
+ r->rpool.proxy_port[0],
+ r->rpool.proxy_port[1]));
+ return (NULL);
+ }
+ break;
+ case PF_BINAT:
+ switch (direction) {
+ case PF_OUT:
+ if (r->rpool.cur->addr.type == PF_ADDR_DYNIFTL){
+ switch (pd->af) {
+#ifdef INET
+ case AF_INET:
+ if (r->rpool.cur->addr.p.dyn->
+ pfid_acnt4 < 1)
+ return (NULL);
+ PF_POOLMASK(naddr,
+ &r->rpool.cur->addr.p.dyn->
+ pfid_addr4,
+ &r->rpool.cur->addr.p.dyn->
+ pfid_mask4,
+ saddr, AF_INET);
+ break;
+#endif /* INET */
+#ifdef INET6
+ case AF_INET6:
+ if (r->rpool.cur->addr.p.dyn->
+ pfid_acnt6 < 1)
+ return (NULL);
+ PF_POOLMASK(naddr,
+ &r->rpool.cur->addr.p.dyn->
+ pfid_addr6,
+ &r->rpool.cur->addr.p.dyn->
+ pfid_mask6,
+ saddr, AF_INET6);
+ break;
+#endif /* INET6 */
+ }
+ } else
+ PF_POOLMASK(naddr,
+ &r->rpool.cur->addr.v.a.addr,
+ &r->rpool.cur->addr.v.a.mask,
+ saddr, pd->af);
+ break;
+ case PF_IN:
+ if (r->src.addr.type == PF_ADDR_DYNIFTL) {
+ switch (pd->af) {
+#ifdef INET
+ case AF_INET:
+ if (r->src.addr.p.dyn->
+ pfid_acnt4 < 1)
+ return (NULL);
+ PF_POOLMASK(naddr,
+ &r->src.addr.p.dyn->
+ pfid_addr4,
+ &r->src.addr.p.dyn->
+ pfid_mask4,
+ daddr, AF_INET);
+ break;
+#endif /* INET */
+#ifdef INET6
+ case AF_INET6:
+ if (r->src.addr.p.dyn->
+ pfid_acnt6 < 1)
+ return (NULL);
+ PF_POOLMASK(naddr,
+ &r->src.addr.p.dyn->
+ pfid_addr6,
+ &r->src.addr.p.dyn->
+ pfid_mask6,
+ daddr, AF_INET6);
+ break;
+#endif /* INET6 */
+ }
+ } else
+ PF_POOLMASK(naddr,
+ &r->src.addr.v.a.addr,
+ &r->src.addr.v.a.mask, daddr,
+ pd->af);
+ break;
+ }
+ break;
+ case PF_RDR: {
+ if (pf_map_addr(pd->af, r, saddr, naddr, NULL, sn))
+ return (NULL);
+ if ((r->rpool.opts & PF_POOL_TYPEMASK) ==
+ PF_POOL_BITMASK)
+ PF_POOLMASK(naddr, naddr,
+ &r->rpool.cur->addr.v.a.mask, daddr,
+ pd->af);
+
+ if (r->rpool.proxy_port[1]) {
+ u_int32_t tmp_nport;
+
+ tmp_nport = ((ntohs(dport) -
+ ntohs(r->dst.port[0])) %
+ (r->rpool.proxy_port[1] -
+ r->rpool.proxy_port[0] + 1)) +
+ r->rpool.proxy_port[0];
+
+ /* wrap around if necessary */
+ if (tmp_nport > 65535)
+ tmp_nport -= 65535;
+ *nport = htons((u_int16_t)tmp_nport);
+ } else if (r->rpool.proxy_port[0])
+ *nport = htons(r->rpool.proxy_port[0]);
+ break;
+ }
+ default:
+ return (NULL);
+ }
+ }
+
+ return (r);
+}
+
+int
+#ifdef __FreeBSD__
+pf_socket_lookup(int direction, struct pf_pdesc *pd, struct inpcb *inp_arg)
+#else
+pf_socket_lookup(int direction, struct pf_pdesc *pd)
+#endif
+{
+ struct pf_addr *saddr, *daddr;
+ u_int16_t sport, dport;
+#ifdef __FreeBSD__
+ struct inpcbinfo *pi;
+#else
+ struct inpcbtable *tb;
+#endif
+ struct inpcb *inp;
+
+ if (pd == NULL)
+ return (-1);
+ pd->lookup.uid = UID_MAX;
+ pd->lookup.gid = GID_MAX;
+ pd->lookup.pid = NO_PID; /* XXX: revisit */
+#ifdef __FreeBSD__
+ if (inp_arg != NULL) {
+ INP_LOCK_ASSERT(inp_arg);
+ pd->lookup.uid = inp_arg->inp_cred->cr_uid;
+ pd->lookup.gid = inp_arg->inp_cred->cr_groups[0];
+ return (1);
+ }
+#endif
+ switch (pd->proto) {
+ case IPPROTO_TCP:
+ if (pd->hdr.tcp == NULL)
+ return (-1);
+ sport = pd->hdr.tcp->th_sport;
+ dport = pd->hdr.tcp->th_dport;
+#ifdef __FreeBSD__
+ pi = &V_tcbinfo;
+#else
+ tb = &tcbtable;
+#endif
+ break;
+ case IPPROTO_UDP:
+ if (pd->hdr.udp == NULL)
+ return (-1);
+ sport = pd->hdr.udp->uh_sport;
+ dport = pd->hdr.udp->uh_dport;
+#ifdef __FreeBSD__
+ pi = &V_udbinfo;
+#else
+ tb = &udbtable;
+#endif
+ break;
+ default:
+ return (-1);
+ }
+ if (direction == PF_IN) {
+ saddr = pd->src;
+ daddr = pd->dst;
+ } else {
+ u_int16_t p;
+
+ p = sport;
+ sport = dport;
+ dport = p;
+ saddr = pd->dst;
+ daddr = pd->src;
+ }
+ switch (pd->af) {
+#ifdef INET
+ case AF_INET:
+#ifdef __FreeBSD__
+ INP_INFO_RLOCK(pi); /* XXX LOR */
+ inp = in_pcblookup_hash(pi, saddr->v4, sport, daddr->v4,
+ dport, 0, NULL);
+ if (inp == NULL) {
+ inp = in_pcblookup_hash(pi, saddr->v4, sport,
+ daddr->v4, dport, INPLOOKUP_WILDCARD, NULL);
+ if(inp == NULL) {
+ INP_INFO_RUNLOCK(pi);
+ return (-1);
+ }
+ }
+#else
+ inp = in_pcbhashlookup(tb, saddr->v4, sport, daddr->v4, dport);
+ if (inp == NULL) {
+ inp = in_pcblookup_listen(tb, daddr->v4, dport, 0);
+ if (inp == NULL)
+ return (-1);
+ }
+#endif
+ break;
+#endif /* INET */
+#ifdef INET6
+ case AF_INET6:
+#ifdef __FreeBSD__
+ INP_INFO_RLOCK(pi);
+ inp = in6_pcblookup_hash(pi, &saddr->v6, sport,
+ &daddr->v6, dport, 0, NULL);
+ if (inp == NULL) {
+ inp = in6_pcblookup_hash(pi, &saddr->v6, sport,
+ &daddr->v6, dport, INPLOOKUP_WILDCARD, NULL);
+ if (inp == NULL) {
+ INP_INFO_RUNLOCK(pi);
+ return (-1);
+ }
+ }
+#else
+ inp = in6_pcbhashlookup(tb, &saddr->v6, sport, &daddr->v6,
+ dport);
+ if (inp == NULL) {
+ inp = in6_pcblookup_listen(tb, &daddr->v6, dport, 0);
+ if (inp == NULL)
+ return (-1);
+ }
+#endif
+ break;
+#endif /* INET6 */
+
+ default:
+ return (-1);
+ }
+#ifdef __FreeBSD__
+ pd->lookup.uid = inp->inp_cred->cr_uid;
+ pd->lookup.gid = inp->inp_cred->cr_groups[0];
+ INP_INFO_RUNLOCK(pi);
+#else
+ pd->lookup.uid = inp->inp_socket->so_euid;
+ pd->lookup.gid = inp->inp_socket->so_egid;
+ pd->lookup.pid = inp->inp_socket->so_cpid;
+#endif
+ return (1);
+}
+
+u_int8_t
+pf_get_wscale(struct mbuf *m, int off, u_int16_t th_off, sa_family_t af)
+{
+ int hlen;
+ u_int8_t hdr[60];
+ u_int8_t *opt, optlen;
+ u_int8_t wscale = 0;
+
+ hlen = th_off << 2; /* hlen <= sizeof(hdr) */
+ if (hlen <= sizeof(struct tcphdr))
+ return (0);
+ if (!pf_pull_hdr(m, off, hdr, hlen, NULL, NULL, af))
+ return (0);
+ opt = hdr + sizeof(struct tcphdr);
+ hlen -= sizeof(struct tcphdr);
+ while (hlen >= 3) {
+ switch (*opt) {
+ case TCPOPT_EOL:
+ case TCPOPT_NOP:
+ ++opt;
+ --hlen;
+ break;
+ case TCPOPT_WINDOW:
+ wscale = opt[2];
+ if (wscale > TCP_MAX_WINSHIFT)
+ wscale = TCP_MAX_WINSHIFT;
+ wscale |= PF_WSCALE_FLAG;
+ /* FALLTHROUGH */
+ default:
+ optlen = opt[1];
+ if (optlen < 2)
+ optlen = 2;
+ hlen -= optlen;
+ opt += optlen;
+ break;
+ }
+ }
+ return (wscale);
+}
+
+u_int16_t
+pf_get_mss(struct mbuf *m, int off, u_int16_t th_off, sa_family_t af)
+{
+ int hlen;
+ u_int8_t hdr[60];
+ u_int8_t *opt, optlen;
+ u_int16_t mss = V_tcp_mssdflt;
+
+ hlen = th_off << 2; /* hlen <= sizeof(hdr) */
+ if (hlen <= sizeof(struct tcphdr))
+ return (0);
+ if (!pf_pull_hdr(m, off, hdr, hlen, NULL, NULL, af))
+ return (0);
+ opt = hdr + sizeof(struct tcphdr);
+ hlen -= sizeof(struct tcphdr);
+ while (hlen >= TCPOLEN_MAXSEG) {
+ switch (*opt) {
+ case TCPOPT_EOL:
+ case TCPOPT_NOP:
+ ++opt;
+ --hlen;
+ break;
+ case TCPOPT_MAXSEG:
+ bcopy((caddr_t)(opt + 2), (caddr_t)&mss, 2);
+ NTOHS(mss);
+ /* FALLTHROUGH */
+ default:
+ optlen = opt[1];
+ if (optlen < 2)
+ optlen = 2;
+ hlen -= optlen;
+ opt += optlen;
+ break;
+ }
+ }
+ return (mss);
+}
+
+u_int16_t
+pf_calc_mss(struct pf_addr *addr, sa_family_t af, u_int16_t offer)
+{
+#ifdef INET
+ struct sockaddr_in *dst;
+ struct route ro;
+#endif /* INET */
+#ifdef INET6
+ struct sockaddr_in6 *dst6;
+ struct route_in6 ro6;
+#endif /* INET6 */
+ struct rtentry *rt = NULL;
+ int hlen = 0; /* make the compiler happy */
+ u_int16_t mss = V_tcp_mssdflt;
+
+ switch (af) {
+#ifdef INET
+ case AF_INET:
+ hlen = sizeof(struct ip);
+ bzero(&ro, sizeof(ro));
+ dst = (struct sockaddr_in *)&ro.ro_dst;
+ dst->sin_family = AF_INET;
+ dst->sin_len = sizeof(*dst);
+ dst->sin_addr = addr->v4;
+#ifdef __FreeBSD__
+#ifdef RTF_PRCLONING
+ rtalloc_ign(&ro, (RTF_CLONING | RTF_PRCLONING));
+#else /* !RTF_PRCLONING */
+ in_rtalloc_ign(&ro, 0, 0);
+#endif
+#else /* ! __FreeBSD__ */
+ rtalloc_noclone(&ro, NO_CLONING);
+#endif
+ rt = ro.ro_rt;
+ break;
+#endif /* INET */
+#ifdef INET6
+ case AF_INET6:
+ hlen = sizeof(struct ip6_hdr);
+ bzero(&ro6, sizeof(ro6));
+ dst6 = (struct sockaddr_in6 *)&ro6.ro_dst;
+ dst6->sin6_family = AF_INET6;
+ dst6->sin6_len = sizeof(*dst6);
+ dst6->sin6_addr = addr->v6;
+#ifdef __FreeBSD__
+#ifdef RTF_PRCLONING
+ rtalloc_ign((struct route *)&ro6,
+ (RTF_CLONING | RTF_PRCLONING));
+#else /* !RTF_PRCLONING */
+ rtalloc_ign((struct route *)&ro6, 0);
+#endif
+#else /* ! __FreeBSD__ */
+ rtalloc_noclone((struct route *)&ro6, NO_CLONING);
+#endif
+ rt = ro6.ro_rt;
+ break;
+#endif /* INET6 */
+ }
+
+ if (rt && rt->rt_ifp) {
+ mss = rt->rt_ifp->if_mtu - hlen - sizeof(struct tcphdr);
+ mss = max(V_tcp_mssdflt, mss);
+ RTFREE(rt);
+ }
+ mss = min(mss, offer);
+ mss = max(mss, 64); /* sanity - at least max opt space */
+ return (mss);
+}
+
+void
+pf_set_rt_ifp(struct pf_state *s, struct pf_addr *saddr)
+{
+ struct pf_rule *r = s->rule.ptr;
+
+ s->rt_kif = NULL;
+ if (!r->rt || r->rt == PF_FASTROUTE)
+ return;
+ switch (s->af) {
+#ifdef INET
+ case AF_INET:
+ pf_map_addr(AF_INET, r, saddr, &s->rt_addr, NULL,
+ &s->nat_src_node);
+ s->rt_kif = r->rpool.cur->kif;
+ break;
+#endif /* INET */
+#ifdef INET6
+ case AF_INET6:
+ pf_map_addr(AF_INET6, r, saddr, &s->rt_addr, NULL,
+ &s->nat_src_node);
+ s->rt_kif = r->rpool.cur->kif;
+ break;
+#endif /* INET6 */
+ }
+}
+
+int
+pf_test_tcp(struct pf_rule **rm, struct pf_state **sm, int direction,
+ struct pfi_kif *kif, struct mbuf *m, int off, void *h,
+#ifdef __FreeBSD__
+ struct pf_pdesc *pd, struct pf_rule **am, struct pf_ruleset **rsm,
+ struct ifqueue *ifq, struct inpcb *inp)
+#else
+ struct pf_pdesc *pd, struct pf_rule **am, struct pf_ruleset **rsm,
+ struct ifqueue *ifq)
+#endif
+{
+ struct pf_rule *nr = NULL;
+ struct pf_addr *saddr = pd->src, *daddr = pd->dst;
+ struct tcphdr *th = pd->hdr.tcp;
+ u_int16_t bport, nport = 0;
+ sa_family_t af = pd->af;
+ struct pf_rule *r, *a = NULL;
+ struct pf_ruleset *ruleset = NULL;
+ struct pf_src_node *nsn = NULL;
+ u_short reason;
+ int rewrite = 0;
+ int tag = -1, rtableid = -1;
+ u_int16_t mss = V_tcp_mssdflt;
+ int asd = 0;
+ int match = 0;
+
+ if (pf_check_congestion(ifq)) {
+ REASON_SET(&reason, PFRES_CONGEST);
+ return (PF_DROP);
+ }
+
+#ifdef __FreeBSD__
+ if (inp != NULL)
+ pd->lookup.done = pf_socket_lookup(direction, pd, inp);
+ else if (debug_pfugidhack) {
+ PF_UNLOCK();
+ DPFPRINTF(PF_DEBUG_MISC, ("pf: unlocked lookup\n"));
+ pd->lookup.done = pf_socket_lookup(direction, pd, inp);
+ PF_LOCK();
+ }
+#endif
+
+ r = TAILQ_FIRST(pf_main_ruleset.rules[PF_RULESET_FILTER].active.ptr);
+
+ if (direction == PF_OUT) {
+ bport = nport = th->th_sport;
+ /* check outgoing packet for BINAT/NAT */
+ if ((nr = pf_get_translation(pd, m, off, PF_OUT, kif, &nsn,
+ saddr, th->th_sport, daddr, th->th_dport,
+ &pd->naddr, &nport)) != NULL) {
+ PF_ACPY(&pd->baddr, saddr, af);
+ pf_change_ap(saddr, &th->th_sport, pd->ip_sum,
+ &th->th_sum, &pd->naddr, nport, 0, af);
+ rewrite++;
+ if (nr->natpass)
+ r = NULL;
+ pd->nat_rule = nr;
+ }
+ } else {
+ bport = nport = th->th_dport;
+ /* check incoming packet for BINAT/RDR */
+ if ((nr = pf_get_translation(pd, m, off, PF_IN, kif, &nsn,
+ saddr, th->th_sport, daddr, th->th_dport,
+ &pd->naddr, &nport)) != NULL) {
+ PF_ACPY(&pd->baddr, daddr, af);
+ pf_change_ap(daddr, &th->th_dport, pd->ip_sum,
+ &th->th_sum, &pd->naddr, nport, 0, af);
+ rewrite++;
+ if (nr->natpass)
+ r = NULL;
+ pd->nat_rule = nr;
+ }
+ }
+
+ while (r != NULL) {
+ r->evaluations++;
+ if (pfi_kif_match(r->kif, kif) == r->ifnot)
+ r = r->skip[PF_SKIP_IFP].ptr;
+ else if (r->direction && r->direction != direction)
+ r = r->skip[PF_SKIP_DIR].ptr;
+ else if (r->af && r->af != af)
+ r = r->skip[PF_SKIP_AF].ptr;
+ else if (r->proto && r->proto != IPPROTO_TCP)
+ r = r->skip[PF_SKIP_PROTO].ptr;
+ else if (PF_MISMATCHAW(&r->src.addr, saddr, af,
+ r->src.neg, kif))
+ r = r->skip[PF_SKIP_SRC_ADDR].ptr;
+ else if (r->src.port_op && !pf_match_port(r->src.port_op,
+ r->src.port[0], r->src.port[1], th->th_sport))
+ r = r->skip[PF_SKIP_SRC_PORT].ptr;
+ else if (PF_MISMATCHAW(&r->dst.addr, daddr, af,
+ r->dst.neg, NULL))
+ r = r->skip[PF_SKIP_DST_ADDR].ptr;
+ else if (r->dst.port_op && !pf_match_port(r->dst.port_op,
+ r->dst.port[0], r->dst.port[1], th->th_dport))
+ r = r->skip[PF_SKIP_DST_PORT].ptr;
+ else if (r->tos && !(r->tos == pd->tos))
+ r = TAILQ_NEXT(r, entries);
+ else if (r->rule_flag & PFRULE_FRAGMENT)
+ r = TAILQ_NEXT(r, entries);
+ else if ((r->flagset & th->th_flags) != r->flags)
+ r = TAILQ_NEXT(r, entries);
+ else if (r->uid.op && (pd->lookup.done || (pd->lookup.done =
+#ifdef __FreeBSD__
+ pf_socket_lookup(direction, pd, inp), 1)) &&
+#else
+ pf_socket_lookup(direction, pd), 1)) &&
+#endif
+ !pf_match_uid(r->uid.op, r->uid.uid[0], r->uid.uid[1],
+ pd->lookup.uid))
+ r = TAILQ_NEXT(r, entries);
+ else if (r->gid.op && (pd->lookup.done || (pd->lookup.done =
+#ifdef __FreeBSD__
+ pf_socket_lookup(direction, pd, inp), 1)) &&
+#else
+ pf_socket_lookup(direction, pd), 1)) &&
+#endif
+ !pf_match_gid(r->gid.op, r->gid.gid[0], r->gid.gid[1],
+ pd->lookup.gid))
+ r = TAILQ_NEXT(r, entries);
+ else if (r->prob && r->prob <= arc4random())
+ r = TAILQ_NEXT(r, entries);
+ else if (r->match_tag && !pf_match_tag(m, r, pd->pf_mtag, &tag))
+ r = TAILQ_NEXT(r, entries);
+ else if (r->os_fingerprint != PF_OSFP_ANY && !pf_osfp_match(
+ pf_osfp_fingerprint(pd, m, off, th), r->os_fingerprint))
+ r = TAILQ_NEXT(r, entries);
+ else {
+ if (r->tag)
+ tag = r->tag;
+ if (r->rtableid >= 0)
+ rtableid = r->rtableid;
+ if (r->anchor == NULL) {
+ match = 1;
+ *rm = r;
+ *am = a;
+ *rsm = ruleset;
+ if ((*rm)->quick)
+ break;
+ r = TAILQ_NEXT(r, entries);
+ } else
+ pf_step_into_anchor(&asd, &ruleset,
+ PF_RULESET_FILTER, &r, &a, &match);
+ }
+ if (r == NULL && pf_step_out_of_anchor(&asd, &ruleset,
+ PF_RULESET_FILTER, &r, &a, &match))
+ break;
+ }
+ r = *rm;
+ a = *am;
+ ruleset = *rsm;
+
+ REASON_SET(&reason, PFRES_MATCH);
+
+ if (r->log || (nr != NULL && nr->natpass && nr->log)) {
+ if (rewrite)
+#ifdef __FreeBSD__
+ m_copyback(m, off, sizeof(*th), (caddr_t)th);
+#else
+ m_copyback(m, off, sizeof(*th), th);
+#endif
+ PFLOG_PACKET(kif, h, m, af, direction, reason, r->log ? r : nr,
+ a, ruleset, pd);
+ }
+
+ if ((r->action == PF_DROP) &&
+ ((r->rule_flag & PFRULE_RETURNRST) ||
+ (r->rule_flag & PFRULE_RETURNICMP) ||
+ (r->rule_flag & PFRULE_RETURN))) {
+ /* undo NAT changes, if they have taken place */
+ if (nr != NULL) {
+ if (direction == PF_OUT) {
+ pf_change_ap(saddr, &th->th_sport, pd->ip_sum,
+ &th->th_sum, &pd->baddr, bport, 0, af);
+ rewrite++;
+ } else {
+ pf_change_ap(daddr, &th->th_dport, pd->ip_sum,
+ &th->th_sum, &pd->baddr, bport, 0, af);
+ rewrite++;
+ }
+ }
+ if (((r->rule_flag & PFRULE_RETURNRST) ||
+ (r->rule_flag & PFRULE_RETURN)) &&
+ !(th->th_flags & TH_RST)) {
+ u_int32_t ack = ntohl(th->th_seq) + pd->p_len;
+
+ if (th->th_flags & TH_SYN)
+ ack++;
+ if (th->th_flags & TH_FIN)
+ ack++;
+#ifdef __FreeBSD__
+ pf_send_tcp(m, r, af, pd->dst,
+#else
+ pf_send_tcp(r, af, pd->dst,
+#endif
+ pd->src, th->th_dport, th->th_sport,
+ ntohl(th->th_ack), ack, TH_RST|TH_ACK, 0, 0,
+ r->return_ttl, 1, 0, pd->eh, kif->pfik_ifp);
+ } else if ((af == AF_INET) && r->return_icmp)
+ pf_send_icmp(m, r->return_icmp >> 8,
+ r->return_icmp & 255, af, r);
+ else if ((af == AF_INET6) && r->return_icmp6)
+ pf_send_icmp(m, r->return_icmp6 >> 8,
+ r->return_icmp6 & 255, af, r);
+ }
+
+ if (r->action == PF_DROP)
+ return (PF_DROP);
+
+ if (pf_tag_packet(m, pd->pf_mtag, tag, rtableid)) {
+ REASON_SET(&reason, PFRES_MEMORY);
+ return (PF_DROP);
+ }
+
+ if (r->keep_state || nr != NULL ||
+ (pd->flags & PFDESC_TCP_NORM)) {
+ /* create new state */
+ u_int16_t len;
+ struct pf_state *s = NULL;
+ struct pf_src_node *sn = NULL;
+
+ len = pd->tot_len - off - (th->th_off << 2);
+
+ /* check maximums */
+ if (r->max_states && (r->states >= r->max_states)) {
+ pf_status.lcounters[LCNT_STATES]++;
+ REASON_SET(&reason, PFRES_MAXSTATES);
+ goto cleanup;
+ }
+ /* src node for filter rule */
+ if ((r->rule_flag & PFRULE_SRCTRACK ||
+ r->rpool.opts & PF_POOL_STICKYADDR) &&
+ pf_insert_src_node(&sn, r, saddr, af) != 0) {
+ REASON_SET(&reason, PFRES_SRCLIMIT);
+ goto cleanup;
+ }
+ /* src node for translation rule */
+ if (nr != NULL && (nr->rpool.opts & PF_POOL_STICKYADDR) &&
+ ((direction == PF_OUT &&
+ pf_insert_src_node(&nsn, nr, &pd->baddr, af) != 0) ||
+ (pf_insert_src_node(&nsn, nr, saddr, af) != 0))) {
+ REASON_SET(&reason, PFRES_SRCLIMIT);
+ goto cleanup;
+ }
+ s = pool_get(&pf_state_pl, PR_NOWAIT);
+ if (s == NULL) {
+ REASON_SET(&reason, PFRES_MEMORY);
+cleanup:
+ if (sn != NULL && sn->states == 0 && sn->expire == 0) {
+ RB_REMOVE(pf_src_tree, &tree_src_tracking, sn);
+ pf_status.scounters[SCNT_SRC_NODE_REMOVALS]++;
+ pf_status.src_nodes--;
+ pool_put(&pf_src_tree_pl, sn);
+ }
+ if (nsn != sn && nsn != NULL && nsn->states == 0 &&
+ nsn->expire == 0) {
+ RB_REMOVE(pf_src_tree, &tree_src_tracking, nsn);
+ pf_status.scounters[SCNT_SRC_NODE_REMOVALS]++;
+ pf_status.src_nodes--;
+ pool_put(&pf_src_tree_pl, nsn);
+ }
+ return (PF_DROP);
+ }
+ bzero(s, sizeof(*s));
+ s->rule.ptr = r;
+ s->nat_rule.ptr = nr;
+ s->anchor.ptr = a;
+ STATE_INC_COUNTERS(s);
+ if (r->allow_opts)
+ s->state_flags |= PFSTATE_ALLOWOPTS;
+ if (r->rule_flag & PFRULE_STATESLOPPY)
+ s->state_flags |= PFSTATE_SLOPPY;
+ s->log = r->log & PF_LOG_ALL;
+ if (nr != NULL)
+ s->log |= nr->log & PF_LOG_ALL;
+ s->proto = IPPROTO_TCP;
+ s->direction = direction;
+ s->af = af;
+ if (direction == PF_OUT) {
+ PF_ACPY(&s->gwy.addr, saddr, af);
+ s->gwy.port = th->th_sport; /* sport */
+ PF_ACPY(&s->ext.addr, daddr, af);
+ s->ext.port = th->th_dport;
+ if (nr != NULL) {
+ PF_ACPY(&s->lan.addr, &pd->baddr, af);
+ s->lan.port = bport;
+ } else {
+ PF_ACPY(&s->lan.addr, &s->gwy.addr, af);
+ s->lan.port = s->gwy.port;
+ }
+ } else {
+ PF_ACPY(&s->lan.addr, daddr, af);
+ s->lan.port = th->th_dport;
+ PF_ACPY(&s->ext.addr, saddr, af);
+ s->ext.port = th->th_sport;
+ if (nr != NULL) {
+ PF_ACPY(&s->gwy.addr, &pd->baddr, af);
+ s->gwy.port = bport;
+ } else {
+ PF_ACPY(&s->gwy.addr, &s->lan.addr, af);
+ s->gwy.port = s->lan.port;
+ }
+ }
+
+ s->src.seqlo = ntohl(th->th_seq);
+ s->src.seqhi = s->src.seqlo + len + 1;
+ if ((th->th_flags & (TH_SYN|TH_ACK)) == TH_SYN &&
+ r->keep_state == PF_STATE_MODULATE) {
+ /* Generate sequence number modulator */
+#ifdef __FreeBSD__
+ while ((s->src.seqdiff =
+ pf_new_isn(s) - s->src.seqlo) == 0)
+ ;
+#else
+ while ((s->src.seqdiff =
+ tcp_rndiss_next() - s->src.seqlo) == 0)
+ ;
+#endif
+ pf_change_a(&th->th_seq, &th->th_sum,
+ htonl(s->src.seqlo + s->src.seqdiff), 0);
+ rewrite = 1;
+ } else
+ s->src.seqdiff = 0;
+ if (th->th_flags & TH_SYN) {
+ s->src.seqhi++;
+ s->src.wscale = pf_get_wscale(m, off, th->th_off, af);
+ }
+ s->src.max_win = MAX(ntohs(th->th_win), 1);
+ if (s->src.wscale & PF_WSCALE_MASK) {
+ /* Remove scale factor from initial window */
+ int win = s->src.max_win;
+ win += 1 << (s->src.wscale & PF_WSCALE_MASK);
+ s->src.max_win = (win - 1) >>
+ (s->src.wscale & PF_WSCALE_MASK);
+ }
+ if (th->th_flags & TH_FIN)
+ s->src.seqhi++;
+ s->dst.seqhi = 1;
+ s->dst.max_win = 1;
+ s->src.state = TCPS_SYN_SENT;
+ s->dst.state = TCPS_CLOSED;
+ s->creation = time_second;
+ s->expire = time_second;
+ s->timeout = PFTM_TCP_FIRST_PACKET;
+ pf_set_rt_ifp(s, saddr);
+ if (sn != NULL) {
+ s->src_node = sn;
+ s->src_node->states++;
+ }
+ if (nsn != NULL) {
+ PF_ACPY(&nsn->raddr, &pd->naddr, af);
+ s->nat_src_node = nsn;
+ s->nat_src_node->states++;
+ }
+ if ((pd->flags & PFDESC_TCP_NORM) && pf_normalize_tcp_init(m,
+ off, pd, th, &s->src, &s->dst)) {
+ REASON_SET(&reason, PFRES_MEMORY);
+ pf_src_tree_remove_state(s);
+ STATE_DEC_COUNTERS(s);
+ pool_put(&pf_state_pl, s);
+ return (PF_DROP);
+ }
+ if ((pd->flags & PFDESC_TCP_NORM) && s->src.scrub &&
+ pf_normalize_tcp_stateful(m, off, pd, &reason, th, s,
+ &s->src, &s->dst, &rewrite)) {
+ /* This really shouldn't happen!!! */
+ DPFPRINTF(PF_DEBUG_URGENT,
+ ("pf_normalize_tcp_stateful failed on first pkt"));
+ pf_normalize_tcp_cleanup(s);
+ pf_src_tree_remove_state(s);
+ STATE_DEC_COUNTERS(s);
+ pool_put(&pf_state_pl, s);
+ return (PF_DROP);
+ }
+ if (pf_insert_state(BOUND_IFACE(r, kif), s)) {
+ pf_normalize_tcp_cleanup(s);
+ REASON_SET(&reason, PFRES_STATEINS);
+ pf_src_tree_remove_state(s);
+ STATE_DEC_COUNTERS(s);
+ pool_put(&pf_state_pl, s);
+ return (PF_DROP);
+ } else
+ *sm = s;
+ if (tag > 0) {
+ pf_tag_ref(tag);
+ s->tag = tag;
+ }
+ if ((th->th_flags & (TH_SYN|TH_ACK)) == TH_SYN &&
+ r->keep_state == PF_STATE_SYNPROXY) {
+ s->src.state = PF_TCPS_PROXY_SRC;
+ if (nr != NULL) {
+ if (direction == PF_OUT) {
+ pf_change_ap(saddr, &th->th_sport,
+ pd->ip_sum, &th->th_sum, &pd->baddr,
+ bport, 0, af);
+ } else {
+ pf_change_ap(daddr, &th->th_dport,
+ pd->ip_sum, &th->th_sum, &pd->baddr,
+ bport, 0, af);
+ }
+ }
+ s->src.seqhi = htonl(arc4random());
+ /* Find mss option */
+ mss = pf_get_mss(m, off, th->th_off, af);
+ mss = pf_calc_mss(saddr, af, mss);
+ mss = pf_calc_mss(daddr, af, mss);
+ s->src.mss = mss;
+#ifdef __FreeBSD__
+ pf_send_tcp(NULL, r, af, daddr, saddr, th->th_dport,
+#else
+ pf_send_tcp(r, af, daddr, saddr, th->th_dport,
+#endif
+ th->th_sport, s->src.seqhi, ntohl(th->th_seq) + 1,
+ TH_SYN|TH_ACK, 0, s->src.mss, 0, 1, 0, NULL, NULL);
+ REASON_SET(&reason, PFRES_SYNPROXY);
+ return (PF_SYNPROXY_DROP);
+ }
+ }
+
+ /* copy back packet headers if we performed NAT operations */
+ if (rewrite)
+ m_copyback(m, off, sizeof(*th), (caddr_t)th);
+
+ return (PF_PASS);
+}
+
+int
+pf_test_udp(struct pf_rule **rm, struct pf_state **sm, int direction,
+ struct pfi_kif *kif, struct mbuf *m, int off, void *h,
+#ifdef __FreeBSD__
+ struct pf_pdesc *pd, struct pf_rule **am, struct pf_ruleset **rsm,
+ struct ifqueue *ifq, struct inpcb *inp)
+#else
+ struct pf_pdesc *pd, struct pf_rule **am, struct pf_ruleset **rsm,
+ struct ifqueue *ifq)
+#endif
+{
+ struct pf_rule *nr = NULL;
+ struct pf_addr *saddr = pd->src, *daddr = pd->dst;
+ struct udphdr *uh = pd->hdr.udp;
+ u_int16_t bport, nport = 0;
+ sa_family_t af = pd->af;
+ struct pf_rule *r, *a = NULL;
+ struct pf_ruleset *ruleset = NULL;
+ struct pf_src_node *nsn = NULL;
+ u_short reason;
+ int rewrite = 0;
+ int tag = -1, rtableid = -1;
+ int asd = 0;
+ int match = 0;
+
+ if (pf_check_congestion(ifq)) {
+ REASON_SET(&reason, PFRES_CONGEST);
+ return (PF_DROP);
+ }
+
+#ifdef __FreeBSD__
+ if (inp != NULL)
+ pd->lookup.done = pf_socket_lookup(direction, pd, inp);
+ else if (debug_pfugidhack) {
+ PF_UNLOCK();
+ DPFPRINTF(PF_DEBUG_MISC, ("pf: unlocked lookup\n"));
+ pd->lookup.done = pf_socket_lookup(direction, pd, inp);
+ PF_LOCK();
+ }
+#endif
+
+ r = TAILQ_FIRST(pf_main_ruleset.rules[PF_RULESET_FILTER].active.ptr);
+
+ if (direction == PF_OUT) {
+ bport = nport = uh->uh_sport;
+ /* check outgoing packet for BINAT/NAT */
+ if ((nr = pf_get_translation(pd, m, off, PF_OUT, kif, &nsn,
+ saddr, uh->uh_sport, daddr, uh->uh_dport,
+ &pd->naddr, &nport)) != NULL) {
+ PF_ACPY(&pd->baddr, saddr, af);
+ pf_change_ap(saddr, &uh->uh_sport, pd->ip_sum,
+ &uh->uh_sum, &pd->naddr, nport, 1, af);
+ rewrite++;
+ if (nr->natpass)
+ r = NULL;
+ pd->nat_rule = nr;
+ }
+ } else {
+ bport = nport = uh->uh_dport;
+ /* check incoming packet for BINAT/RDR */
+ if ((nr = pf_get_translation(pd, m, off, PF_IN, kif, &nsn,
+ saddr, uh->uh_sport, daddr, uh->uh_dport, &pd->naddr,
+ &nport)) != NULL) {
+ PF_ACPY(&pd->baddr, daddr, af);
+ pf_change_ap(daddr, &uh->uh_dport, pd->ip_sum,
+ &uh->uh_sum, &pd->naddr, nport, 1, af);
+ rewrite++;
+ if (nr->natpass)
+ r = NULL;
+ pd->nat_rule = nr;
+ }
+ }
+
+ while (r != NULL) {
+ r->evaluations++;
+ if (pfi_kif_match(r->kif, kif) == r->ifnot)
+ r = r->skip[PF_SKIP_IFP].ptr;
+ else if (r->direction && r->direction != direction)
+ r = r->skip[PF_SKIP_DIR].ptr;
+ else if (r->af && r->af != af)
+ r = r->skip[PF_SKIP_AF].ptr;
+ else if (r->proto && r->proto != IPPROTO_UDP)
+ r = r->skip[PF_SKIP_PROTO].ptr;
+ else if (PF_MISMATCHAW(&r->src.addr, saddr, af,
+ r->src.neg, kif))
+ r = r->skip[PF_SKIP_SRC_ADDR].ptr;
+ else if (r->src.port_op && !pf_match_port(r->src.port_op,
+ r->src.port[0], r->src.port[1], uh->uh_sport))
+ r = r->skip[PF_SKIP_SRC_PORT].ptr;
+ else if (PF_MISMATCHAW(&r->dst.addr, daddr, af,
+ r->dst.neg, NULL))
+ r = r->skip[PF_SKIP_DST_ADDR].ptr;
+ else if (r->dst.port_op && !pf_match_port(r->dst.port_op,
+ r->dst.port[0], r->dst.port[1], uh->uh_dport))
+ r = r->skip[PF_SKIP_DST_PORT].ptr;
+ else if (r->tos && !(r->tos == pd->tos))
+ r = TAILQ_NEXT(r, entries);
+ else if (r->rule_flag & PFRULE_FRAGMENT)
+ r = TAILQ_NEXT(r, entries);
+ else if (r->uid.op && (pd->lookup.done || (pd->lookup.done =
+#ifdef __FreeBSD__
+ pf_socket_lookup(direction, pd, inp), 1)) &&
+#else
+ pf_socket_lookup(direction, pd), 1)) &&
+#endif
+ !pf_match_uid(r->uid.op, r->uid.uid[0], r->uid.uid[1],
+ pd->lookup.uid))
+ r = TAILQ_NEXT(r, entries);
+ else if (r->gid.op && (pd->lookup.done || (pd->lookup.done =
+#ifdef __FreeBSD__
+ pf_socket_lookup(direction, pd, inp), 1)) &&
+#else
+ pf_socket_lookup(direction, pd), 1)) &&
+#endif
+ !pf_match_gid(r->gid.op, r->gid.gid[0], r->gid.gid[1],
+ pd->lookup.gid))
+ r = TAILQ_NEXT(r, entries);
+ else if (r->prob && r->prob <= arc4random())
+ r = TAILQ_NEXT(r, entries);
+ else if (r->match_tag && !pf_match_tag(m, r, pd->pf_mtag, &tag))
+ r = TAILQ_NEXT(r, entries);
+ else if (r->os_fingerprint != PF_OSFP_ANY)
+ r = TAILQ_NEXT(r, entries);
+ else {
+ if (r->tag)
+ tag = r->tag;
+ if (r->rtableid >= 0)
+ rtableid = r->rtableid;
+ if (r->anchor == NULL) {
+ match = 1;
+ *rm = r;
+ *am = a;
+ *rsm = ruleset;
+ if ((*rm)->quick)
+ break;
+ r = TAILQ_NEXT(r, entries);
+ } else
+ pf_step_into_anchor(&asd, &ruleset,
+ PF_RULESET_FILTER, &r, &a, &match);
+ }
+ if (r == NULL && pf_step_out_of_anchor(&asd, &ruleset,
+ PF_RULESET_FILTER, &r, &a, &match))
+ break;
+ }
+ r = *rm;
+ a = *am;
+ ruleset = *rsm;
+
+ REASON_SET(&reason, PFRES_MATCH);
+
+ if (r->log || (nr != NULL && nr->natpass && nr->log)) {
+ if (rewrite)
+#ifdef __FreeBSD__
+ m_copyback(m, off, sizeof(*uh), (caddr_t)uh);
+#else
+ m_copyback(m, off, sizeof(*uh), uh);
+#endif
+ PFLOG_PACKET(kif, h, m, af, direction, reason, r->log ? r : nr,
+ a, ruleset, pd);
+ }
+
+ if ((r->action == PF_DROP) &&
+ ((r->rule_flag & PFRULE_RETURNICMP) ||
+ (r->rule_flag & PFRULE_RETURN))) {
+ /* undo NAT changes, if they have taken place */
+ if (nr != NULL) {
+ if (direction == PF_OUT) {
+ pf_change_ap(saddr, &uh->uh_sport, pd->ip_sum,
+ &uh->uh_sum, &pd->baddr, bport, 1, af);
+ rewrite++;
+ } else {
+ pf_change_ap(daddr, &uh->uh_dport, pd->ip_sum,
+ &uh->uh_sum, &pd->baddr, bport, 1, af);
+ rewrite++;
+ }
+ }
+ if ((af == AF_INET) && r->return_icmp)
+ pf_send_icmp(m, r->return_icmp >> 8,
+ r->return_icmp & 255, af, r);
+ else if ((af == AF_INET6) && r->return_icmp6)
+ pf_send_icmp(m, r->return_icmp6 >> 8,
+ r->return_icmp6 & 255, af, r);
+ }
+
+ if (r->action == PF_DROP)
+ return (PF_DROP);
+
+ if (pf_tag_packet(m, pd->pf_mtag, tag, rtableid)) {
+ REASON_SET(&reason, PFRES_MEMORY);
+ return (PF_DROP);
+ }
+
+ if (r->keep_state || nr != NULL) {
+ /* create new state */
+ struct pf_state *s = NULL;
+ struct pf_src_node *sn = NULL;
+
+ /* check maximums */
+ if (r->max_states && (r->states >= r->max_states)) {
+ pf_status.lcounters[LCNT_STATES]++;
+ REASON_SET(&reason, PFRES_MAXSTATES);
+ goto cleanup;
+ }
+ /* src node for filter rule */
+ if ((r->rule_flag & PFRULE_SRCTRACK ||
+ r->rpool.opts & PF_POOL_STICKYADDR) &&
+ pf_insert_src_node(&sn, r, saddr, af) != 0) {
+ REASON_SET(&reason, PFRES_SRCLIMIT);
+ goto cleanup;
+ }
+ /* src node for translation rule */
+ if (nr != NULL && (nr->rpool.opts & PF_POOL_STICKYADDR) &&
+ ((direction == PF_OUT &&
+ pf_insert_src_node(&nsn, nr, &pd->baddr, af) != 0) ||
+ (pf_insert_src_node(&nsn, nr, saddr, af) != 0))) {
+ REASON_SET(&reason, PFRES_SRCLIMIT);
+ goto cleanup;
+ }
+ s = pool_get(&pf_state_pl, PR_NOWAIT);
+ if (s == NULL) {
+ REASON_SET(&reason, PFRES_MEMORY);
+cleanup:
+ if (sn != NULL && sn->states == 0 && sn->expire == 0) {
+ RB_REMOVE(pf_src_tree, &tree_src_tracking, sn);
+ pf_status.scounters[SCNT_SRC_NODE_REMOVALS]++;
+ pf_status.src_nodes--;
+ pool_put(&pf_src_tree_pl, sn);
+ }
+ if (nsn != sn && nsn != NULL && nsn->states == 0 &&
+ nsn->expire == 0) {
+ RB_REMOVE(pf_src_tree, &tree_src_tracking, nsn);
+ pf_status.scounters[SCNT_SRC_NODE_REMOVALS]++;
+ pf_status.src_nodes--;
+ pool_put(&pf_src_tree_pl, nsn);
+ }
+ return (PF_DROP);
+ }
+ bzero(s, sizeof(*s));
+ s->rule.ptr = r;
+ s->nat_rule.ptr = nr;
+ s->anchor.ptr = a;
+ STATE_INC_COUNTERS(s);
+ if (r->allow_opts)
+ s->state_flags |= PFSTATE_ALLOWOPTS;
+ if (r->rule_flag & PFRULE_STATESLOPPY)
+ s->state_flags |= PFSTATE_SLOPPY;
+ s->log = r->log & PF_LOG_ALL;
+ if (nr != NULL)
+ s->log |= nr->log & PF_LOG_ALL;
+ s->proto = IPPROTO_UDP;
+ s->direction = direction;
+ s->af = af;
+ if (direction == PF_OUT) {
+ PF_ACPY(&s->gwy.addr, saddr, af);
+ s->gwy.port = uh->uh_sport;
+ PF_ACPY(&s->ext.addr, daddr, af);
+ s->ext.port = uh->uh_dport;
+ if (nr != NULL) {
+ PF_ACPY(&s->lan.addr, &pd->baddr, af);
+ s->lan.port = bport;
+ } else {
+ PF_ACPY(&s->lan.addr, &s->gwy.addr, af);
+ s->lan.port = s->gwy.port;
+ }
+ } else {
+ PF_ACPY(&s->lan.addr, daddr, af);
+ s->lan.port = uh->uh_dport;
+ PF_ACPY(&s->ext.addr, saddr, af);
+ s->ext.port = uh->uh_sport;
+ if (nr != NULL) {
+ PF_ACPY(&s->gwy.addr, &pd->baddr, af);
+ s->gwy.port = bport;
+ } else {
+ PF_ACPY(&s->gwy.addr, &s->lan.addr, af);
+ s->gwy.port = s->lan.port;
+ }
+ }
+ s->src.state = PFUDPS_SINGLE;
+ s->dst.state = PFUDPS_NO_TRAFFIC;
+ s->creation = time_second;
+ s->expire = time_second;
+ s->timeout = PFTM_UDP_FIRST_PACKET;
+ pf_set_rt_ifp(s, saddr);
+ if (sn != NULL) {
+ s->src_node = sn;
+ s->src_node->states++;
+ }
+ if (nsn != NULL) {
+ PF_ACPY(&nsn->raddr, &pd->naddr, af);
+ s->nat_src_node = nsn;
+ s->nat_src_node->states++;
+ }
+ if (pf_insert_state(BOUND_IFACE(r, kif), s)) {
+ REASON_SET(&reason, PFRES_STATEINS);
+ pf_src_tree_remove_state(s);
+ STATE_DEC_COUNTERS(s);
+ pool_put(&pf_state_pl, s);
+ return (PF_DROP);
+ } else
+ *sm = s;
+ if (tag > 0) {
+ pf_tag_ref(tag);
+ s->tag = tag;
+ }
+ }
+
+ /* copy back packet headers if we performed NAT operations */
+ if (rewrite)
+ m_copyback(m, off, sizeof(*uh), (caddr_t)uh);
+
+ return (PF_PASS);
+}
+
+int
+pf_test_icmp(struct pf_rule **rm, struct pf_state **sm, int direction,
+ struct pfi_kif *kif, struct mbuf *m, int off, void *h,
+ struct pf_pdesc *pd, struct pf_rule **am, struct pf_ruleset **rsm,
+ struct ifqueue *ifq)
+{
+ struct pf_rule *nr = NULL;
+ struct pf_addr *saddr = pd->src, *daddr = pd->dst;
+ struct pf_rule *r, *a = NULL;
+ struct pf_ruleset *ruleset = NULL;
+ struct pf_src_node *nsn = NULL;
+ u_short reason;
+ u_int16_t icmpid = 0, bport, nport = 0;
+ sa_family_t af = pd->af;
+ u_int8_t icmptype = 0; /* make the compiler happy */
+ u_int8_t icmpcode = 0; /* make the compiler happy */
+ int state_icmp = 0;
+ int tag = -1, rtableid = -1;
+#ifdef INET6
+ int rewrite = 0;
+#endif /* INET6 */
+ int asd = 0;
+ int match = 0;
+
+ if (pf_check_congestion(ifq)) {
+ REASON_SET(&reason, PFRES_CONGEST);
+ return (PF_DROP);
+ }
+
+ switch (pd->proto) {
+#ifdef INET
+ case IPPROTO_ICMP:
+ icmptype = pd->hdr.icmp->icmp_type;
+ icmpcode = pd->hdr.icmp->icmp_code;
+ icmpid = pd->hdr.icmp->icmp_id;
+
+ if (icmptype == ICMP_UNREACH ||
+ icmptype == ICMP_SOURCEQUENCH ||
+ icmptype == ICMP_REDIRECT ||
+ icmptype == ICMP_TIMXCEED ||
+ icmptype == ICMP_PARAMPROB)
+ state_icmp++;
+ break;
+#endif /* INET */
+#ifdef INET6
+ case IPPROTO_ICMPV6:
+ icmptype = pd->hdr.icmp6->icmp6_type;
+ icmpcode = pd->hdr.icmp6->icmp6_code;
+ icmpid = pd->hdr.icmp6->icmp6_id;
+
+ if (icmptype == ICMP6_DST_UNREACH ||
+ icmptype == ICMP6_PACKET_TOO_BIG ||
+ icmptype == ICMP6_TIME_EXCEEDED ||
+ icmptype == ICMP6_PARAM_PROB)
+ state_icmp++;
+ break;
+#endif /* INET6 */
+ }
+
+ r = TAILQ_FIRST(pf_main_ruleset.rules[PF_RULESET_FILTER].active.ptr);
+
+ if (direction == PF_OUT) {
+ bport = nport = icmpid;
+ /* check outgoing packet for BINAT/NAT */
+ if ((nr = pf_get_translation(pd, m, off, PF_OUT, kif, &nsn,
+ saddr, icmpid, daddr, icmpid, &pd->naddr, &nport)) !=
+ NULL) {
+ PF_ACPY(&pd->baddr, saddr, af);
+ switch (af) {
+#ifdef INET
+ case AF_INET:
+ pf_change_a(&saddr->v4.s_addr, pd->ip_sum,
+ pd->naddr.v4.s_addr, 0);
+ pd->hdr.icmp->icmp_cksum = pf_cksum_fixup(
+ pd->hdr.icmp->icmp_cksum, icmpid, nport, 0);
+ pd->hdr.icmp->icmp_id = nport;
+ m_copyback(m, off, ICMP_MINLEN,
+ (caddr_t)pd->hdr.icmp);
+ break;
+#endif /* INET */
+#ifdef INET6
+ case AF_INET6:
+ pf_change_a6(saddr, &pd->hdr.icmp6->icmp6_cksum,
+ &pd->naddr, 0);
+ rewrite++;
+ break;
+#endif /* INET6 */
+ }
+ if (nr->natpass)
+ r = NULL;
+ pd->nat_rule = nr;
+ }
+ } else {
+ bport = nport = icmpid;
+ /* check incoming packet for BINAT/RDR */
+ if ((nr = pf_get_translation(pd, m, off, PF_IN, kif, &nsn,
+ saddr, icmpid, daddr, icmpid, &pd->naddr, &nport)) !=
+ NULL) {
+ PF_ACPY(&pd->baddr, daddr, af);
+ switch (af) {
+#ifdef INET
+ case AF_INET:
+ pf_change_a(&daddr->v4.s_addr,
+ pd->ip_sum, pd->naddr.v4.s_addr, 0);
+ break;
+#endif /* INET */
+#ifdef INET6
+ case AF_INET6:
+ pf_change_a6(daddr, &pd->hdr.icmp6->icmp6_cksum,
+ &pd->naddr, 0);
+ rewrite++;
+ break;
+#endif /* INET6 */
+ }
+ if (nr->natpass)
+ r = NULL;
+ pd->nat_rule = nr;
+ }
+ }
+
+ while (r != NULL) {
+ r->evaluations++;
+ if (pfi_kif_match(r->kif, kif) == r->ifnot)
+ r = r->skip[PF_SKIP_IFP].ptr;
+ else if (r->direction && r->direction != direction)
+ r = r->skip[PF_SKIP_DIR].ptr;
+ else if (r->af && r->af != af)
+ r = r->skip[PF_SKIP_AF].ptr;
+ else if (r->proto && r->proto != pd->proto)
+ r = r->skip[PF_SKIP_PROTO].ptr;
+ else if (PF_MISMATCHAW(&r->src.addr, saddr, af,
+ r->src.neg, kif))
+ r = r->skip[PF_SKIP_SRC_ADDR].ptr;
+ else if (PF_MISMATCHAW(&r->dst.addr, daddr, af,
+ r->dst.neg, NULL))
+ r = r->skip[PF_SKIP_DST_ADDR].ptr;
+ else if (r->type && r->type != icmptype + 1)
+ r = TAILQ_NEXT(r, entries);
+ else if (r->code && r->code != icmpcode + 1)
+ r = TAILQ_NEXT(r, entries);
+ else if (r->tos && !(r->tos == pd->tos))
+ r = TAILQ_NEXT(r, entries);
+ else if (r->rule_flag & PFRULE_FRAGMENT)
+ r = TAILQ_NEXT(r, entries);
+ else if (r->prob && r->prob <= arc4random())
+ r = TAILQ_NEXT(r, entries);
+ else if (r->match_tag && !pf_match_tag(m, r, pd->pf_mtag, &tag))
+ r = TAILQ_NEXT(r, entries);
+ else if (r->os_fingerprint != PF_OSFP_ANY)
+ r = TAILQ_NEXT(r, entries);
+ else {
+ if (r->tag)
+ tag = r->tag;
+ if (r->rtableid >= 0)
+ rtableid = r->rtableid;
+ if (r->anchor == NULL) {
+ match = 1;
+ *rm = r;
+ *am = a;
+ *rsm = ruleset;
+ if ((*rm)->quick)
+ break;
+ r = TAILQ_NEXT(r, entries);
+ } else
+ pf_step_into_anchor(&asd, &ruleset,
+ PF_RULESET_FILTER, &r, &a, &match);
+ }
+ if (r == NULL && pf_step_out_of_anchor(&asd, &ruleset,
+ PF_RULESET_FILTER, &r, &a, &match))
+ break;
+ }
+ r = *rm;
+ a = *am;
+ ruleset = *rsm;
+
+ REASON_SET(&reason, PFRES_MATCH);
+
+ if (r->log || (nr != NULL && nr->natpass && nr->log)) {
+#ifdef INET6
+ if (rewrite)
+ m_copyback(m, off, sizeof(struct icmp6_hdr),
+ (caddr_t)pd->hdr.icmp6);
+#endif /* INET6 */
+ PFLOG_PACKET(kif, h, m, af, direction, reason, r->log ? r : nr,
+ a, ruleset, pd);
+ }
+
+ if (r->action != PF_PASS)
+ return (PF_DROP);
+
+ if (pf_tag_packet(m, pd->pf_mtag, tag, rtableid)) {
+ REASON_SET(&reason, PFRES_MEMORY);
+ return (PF_DROP);
+ }
+
+ if (!state_icmp && (r->keep_state || nr != NULL)) {
+ /* create new state */
+ struct pf_state *s = NULL;
+ struct pf_src_node *sn = NULL;
+
+ /* check maximums */
+ if (r->max_states && (r->states >= r->max_states)) {
+ pf_status.lcounters[LCNT_STATES]++;
+ REASON_SET(&reason, PFRES_MAXSTATES);
+ goto cleanup;
+ }
+ /* src node for filter rule */
+ if ((r->rule_flag & PFRULE_SRCTRACK ||
+ r->rpool.opts & PF_POOL_STICKYADDR) &&
+ pf_insert_src_node(&sn, r, saddr, af) != 0) {
+ REASON_SET(&reason, PFRES_SRCLIMIT);
+ goto cleanup;
+ }
+ /* src node for translation rule */
+ if (nr != NULL && (nr->rpool.opts & PF_POOL_STICKYADDR) &&
+ ((direction == PF_OUT &&
+ pf_insert_src_node(&nsn, nr, &pd->baddr, af) != 0) ||
+ (pf_insert_src_node(&nsn, nr, saddr, af) != 0))) {
+ REASON_SET(&reason, PFRES_SRCLIMIT);
+ goto cleanup;
+ }
+ s = pool_get(&pf_state_pl, PR_NOWAIT);
+ if (s == NULL) {
+ REASON_SET(&reason, PFRES_MEMORY);
+cleanup:
+ if (sn != NULL && sn->states == 0 && sn->expire == 0) {
+ RB_REMOVE(pf_src_tree, &tree_src_tracking, sn);
+ pf_status.scounters[SCNT_SRC_NODE_REMOVALS]++;
+ pf_status.src_nodes--;
+ pool_put(&pf_src_tree_pl, sn);
+ }
+ if (nsn != sn && nsn != NULL && nsn->states == 0 &&
+ nsn->expire == 0) {
+ RB_REMOVE(pf_src_tree, &tree_src_tracking, nsn);
+ pf_status.scounters[SCNT_SRC_NODE_REMOVALS]++;
+ pf_status.src_nodes--;
+ pool_put(&pf_src_tree_pl, nsn);
+ }
+ return (PF_DROP);
+ }
+ bzero(s, sizeof(*s));
+ s->rule.ptr = r;
+ s->nat_rule.ptr = nr;
+ s->anchor.ptr = a;
+ STATE_INC_COUNTERS(s);
+ if (r->allow_opts)
+ s->state_flags |= PFSTATE_ALLOWOPTS;
+ if (r->rule_flag & PFRULE_STATESLOPPY)
+ s->state_flags |= PFSTATE_SLOPPY;
+ s->log = r->log & PF_LOG_ALL;
+ if (nr != NULL)
+ s->log |= nr->log & PF_LOG_ALL;
+ s->proto = pd->proto;
+ s->direction = direction;
+ s->af = af;
+ if (direction == PF_OUT) {
+ PF_ACPY(&s->gwy.addr, saddr, af);
+ s->gwy.port = nport;
+ PF_ACPY(&s->ext.addr, daddr, af);
+ s->ext.port = 0;
+ if (nr != NULL) {
+ PF_ACPY(&s->lan.addr, &pd->baddr, af);
+ s->lan.port = bport;
+ } else {
+ PF_ACPY(&s->lan.addr, &s->gwy.addr, af);
+ s->lan.port = s->gwy.port;
+ }
+ } else {
+ PF_ACPY(&s->lan.addr, daddr, af);
+ s->lan.port = nport;
+ PF_ACPY(&s->ext.addr, saddr, af);
+ s->ext.port = 0;
+ if (nr != NULL) {
+ PF_ACPY(&s->gwy.addr, &pd->baddr, af);
+ s->gwy.port = bport;
+ } else {
+ PF_ACPY(&s->gwy.addr, &s->lan.addr, af);
+ s->gwy.port = s->lan.port;
+ }
+ }
+ s->creation = time_second;
+ s->expire = time_second;
+ s->timeout = PFTM_ICMP_FIRST_PACKET;
+ pf_set_rt_ifp(s, saddr);
+ if (sn != NULL) {
+ s->src_node = sn;
+ s->src_node->states++;
+ }
+ if (nsn != NULL) {
+ PF_ACPY(&nsn->raddr, &pd->naddr, af);
+ s->nat_src_node = nsn;
+ s->nat_src_node->states++;
+ }
+ if (pf_insert_state(BOUND_IFACE(r, kif), s)) {
+ REASON_SET(&reason, PFRES_STATEINS);
+ pf_src_tree_remove_state(s);
+ STATE_DEC_COUNTERS(s);
+ pool_put(&pf_state_pl, s);
+ return (PF_DROP);
+ } else
+ *sm = s;
+ if (tag > 0) {
+ pf_tag_ref(tag);
+ s->tag = tag;
+ }
+ }
+
+#ifdef INET6
+ /* copy back packet headers if we performed IPv6 NAT operations */
+ if (rewrite)
+ m_copyback(m, off, sizeof(struct icmp6_hdr),
+ (caddr_t)pd->hdr.icmp6);
+#endif /* INET6 */
+
+ return (PF_PASS);
+}
+
+int
+pf_test_other(struct pf_rule **rm, struct pf_state **sm, int direction,
+ struct pfi_kif *kif, struct mbuf *m, int off, void *h, struct pf_pdesc *pd,
+ struct pf_rule **am, struct pf_ruleset **rsm, struct ifqueue *ifq)
+{
+ struct pf_rule *nr = NULL;
+ struct pf_rule *r, *a = NULL;
+ struct pf_ruleset *ruleset = NULL;
+ struct pf_src_node *nsn = NULL;
+ struct pf_addr *saddr = pd->src, *daddr = pd->dst;
+ sa_family_t af = pd->af;
+ u_short reason;
+ int tag = -1, rtableid = -1;
+ int asd = 0;
+ int match = 0;
+
+ if (pf_check_congestion(ifq)) {
+ REASON_SET(&reason, PFRES_CONGEST);
+ return (PF_DROP);
+ }
+
+ r = TAILQ_FIRST(pf_main_ruleset.rules[PF_RULESET_FILTER].active.ptr);
+
+ if (direction == PF_OUT) {
+ /* check outgoing packet for BINAT/NAT */
+ if ((nr = pf_get_translation(pd, m, off, PF_OUT, kif, &nsn,
+ saddr, 0, daddr, 0, &pd->naddr, NULL)) != NULL) {
+ PF_ACPY(&pd->baddr, saddr, af);
+ switch (af) {
+#ifdef INET
+ case AF_INET:
+ pf_change_a(&saddr->v4.s_addr, pd->ip_sum,
+ pd->naddr.v4.s_addr, 0);
+ break;
+#endif /* INET */
+#ifdef INET6
+ case AF_INET6:
+ PF_ACPY(saddr, &pd->naddr, af);
+ break;
+#endif /* INET6 */
+ }
+ if (nr->natpass)
+ r = NULL;
+ pd->nat_rule = nr;
+ }
+ } else {
+ /* check incoming packet for BINAT/RDR */
+ if ((nr = pf_get_translation(pd, m, off, PF_IN, kif, &nsn,
+ saddr, 0, daddr, 0, &pd->naddr, NULL)) != NULL) {
+ PF_ACPY(&pd->baddr, daddr, af);
+ switch (af) {
+#ifdef INET
+ case AF_INET:
+ pf_change_a(&daddr->v4.s_addr,
+ pd->ip_sum, pd->naddr.v4.s_addr, 0);
+ break;
+#endif /* INET */
+#ifdef INET6
+ case AF_INET6:
+ PF_ACPY(daddr, &pd->naddr, af);
+ break;
+#endif /* INET6 */
+ }
+ if (nr->natpass)
+ r = NULL;
+ pd->nat_rule = nr;
+ }
+ }
+
+ while (r != NULL) {
+ r->evaluations++;
+ if (pfi_kif_match(r->kif, kif) == r->ifnot)
+ r = r->skip[PF_SKIP_IFP].ptr;
+ else if (r->direction && r->direction != direction)
+ r = r->skip[PF_SKIP_DIR].ptr;
+ else if (r->af && r->af != af)
+ r = r->skip[PF_SKIP_AF].ptr;
+ else if (r->proto && r->proto != pd->proto)
+ r = r->skip[PF_SKIP_PROTO].ptr;
+ else if (PF_MISMATCHAW(&r->src.addr, pd->src, af,
+ r->src.neg, kif))
+ r = r->skip[PF_SKIP_SRC_ADDR].ptr;
+ else if (PF_MISMATCHAW(&r->dst.addr, pd->dst, af,
+ r->dst.neg, NULL))
+ r = r->skip[PF_SKIP_DST_ADDR].ptr;
+ else if (r->tos && !(r->tos == pd->tos))
+ r = TAILQ_NEXT(r, entries);
+ else if (r->rule_flag & PFRULE_FRAGMENT)
+ r = TAILQ_NEXT(r, entries);
+ else if (r->prob && r->prob <= arc4random())
+ r = TAILQ_NEXT(r, entries);
+ else if (r->match_tag && !pf_match_tag(m, r, pd->pf_mtag, &tag))
+ r = TAILQ_NEXT(r, entries);
+ else if (r->os_fingerprint != PF_OSFP_ANY)
+ r = TAILQ_NEXT(r, entries);
+ else {
+ if (r->tag)
+ tag = r->tag;
+ if (r->rtableid >= 0)
+ rtableid = r->rtableid;
+ if (r->anchor == NULL) {
+ match = 1;
+ *rm = r;
+ *am = a;
+ *rsm = ruleset;
+ if ((*rm)->quick)
+ break;
+ r = TAILQ_NEXT(r, entries);
+ } else
+ pf_step_into_anchor(&asd, &ruleset,
+ PF_RULESET_FILTER, &r, &a, &match);
+ }
+ if (r == NULL && pf_step_out_of_anchor(&asd, &ruleset,
+ PF_RULESET_FILTER, &r, &a, &match))
+ break;
+ }
+ r = *rm;
+ a = *am;
+ ruleset = *rsm;
+
+ REASON_SET(&reason, PFRES_MATCH);
+
+ if (r->log || (nr != NULL && nr->natpass && nr->log))
+ PFLOG_PACKET(kif, h, m, af, direction, reason, r->log ? r : nr,
+ a, ruleset, pd);
+
+ if ((r->action == PF_DROP) &&
+ ((r->rule_flag & PFRULE_RETURNICMP) ||
+ (r->rule_flag & PFRULE_RETURN))) {
+ struct pf_addr *a = NULL;
+
+ if (nr != NULL) {
+ if (direction == PF_OUT)
+ a = saddr;
+ else
+ a = daddr;
+ }
+ if (a != NULL) {
+ switch (af) {
+#ifdef INET
+ case AF_INET:
+ pf_change_a(&a->v4.s_addr, pd->ip_sum,
+ pd->baddr.v4.s_addr, 0);
+ break;
+#endif /* INET */
+#ifdef INET6
+ case AF_INET6:
+ PF_ACPY(a, &pd->baddr, af);
+ break;
+#endif /* INET6 */
+ }
+ }
+ if ((af == AF_INET) && r->return_icmp)
+ pf_send_icmp(m, r->return_icmp >> 8,
+ r->return_icmp & 255, af, r);
+ else if ((af == AF_INET6) && r->return_icmp6)
+ pf_send_icmp(m, r->return_icmp6 >> 8,
+ r->return_icmp6 & 255, af, r);
+ }
+
+ if (r->action != PF_PASS)
+ return (PF_DROP);
+
+ if (pf_tag_packet(m, pd->pf_mtag, tag, rtableid)) {
+ REASON_SET(&reason, PFRES_MEMORY);
+ return (PF_DROP);
+ }
+
+ if (r->keep_state || nr != NULL) {
+ /* create new state */
+ struct pf_state *s = NULL;
+ struct pf_src_node *sn = NULL;
+
+ /* check maximums */
+ if (r->max_states && (r->states >= r->max_states)) {
+ pf_status.lcounters[LCNT_STATES]++;
+ REASON_SET(&reason, PFRES_MAXSTATES);
+ goto cleanup;
+ }
+ /* src node for filter rule */
+ if ((r->rule_flag & PFRULE_SRCTRACK ||
+ r->rpool.opts & PF_POOL_STICKYADDR) &&
+ pf_insert_src_node(&sn, r, saddr, af) != 0) {
+ REASON_SET(&reason, PFRES_SRCLIMIT);
+ goto cleanup;
+ }
+ /* src node for translation rule */
+ if (nr != NULL && (nr->rpool.opts & PF_POOL_STICKYADDR) &&
+ ((direction == PF_OUT &&
+ pf_insert_src_node(&nsn, nr, &pd->baddr, af) != 0) ||
+ (pf_insert_src_node(&nsn, nr, saddr, af) != 0))) {
+ REASON_SET(&reason, PFRES_SRCLIMIT);
+ goto cleanup;
+ }
+ s = pool_get(&pf_state_pl, PR_NOWAIT);
+ if (s == NULL) {
+ REASON_SET(&reason, PFRES_MEMORY);
+cleanup:
+ if (sn != NULL && sn->states == 0 && sn->expire == 0) {
+ RB_REMOVE(pf_src_tree, &tree_src_tracking, sn);
+ pf_status.scounters[SCNT_SRC_NODE_REMOVALS]++;
+ pf_status.src_nodes--;
+ pool_put(&pf_src_tree_pl, sn);
+ }
+ if (nsn != sn && nsn != NULL && nsn->states == 0 &&
+ nsn->expire == 0) {
+ RB_REMOVE(pf_src_tree, &tree_src_tracking, nsn);
+ pf_status.scounters[SCNT_SRC_NODE_REMOVALS]++;
+ pf_status.src_nodes--;
+ pool_put(&pf_src_tree_pl, nsn);
+ }
+ return (PF_DROP);
+ }
+ bzero(s, sizeof(*s));
+ s->rule.ptr = r;
+ s->nat_rule.ptr = nr;
+ s->anchor.ptr = a;
+ STATE_INC_COUNTERS(s);
+ if (r->allow_opts)
+ s->state_flags |= PFSTATE_ALLOWOPTS;
+ if (r->rule_flag & PFRULE_STATESLOPPY)
+ s->state_flags |= PFSTATE_SLOPPY;
+ s->log = r->log & PF_LOG_ALL;
+ if (nr != NULL)
+ s->log |= nr->log & PF_LOG_ALL;
+ s->proto = pd->proto;
+ s->direction = direction;
+ s->af = af;
+ if (direction == PF_OUT) {
+ PF_ACPY(&s->gwy.addr, saddr, af);
+ PF_ACPY(&s->ext.addr, daddr, af);
+ if (nr != NULL)
+ PF_ACPY(&s->lan.addr, &pd->baddr, af);
+ else
+ PF_ACPY(&s->lan.addr, &s->gwy.addr, af);
+ } else {
+ PF_ACPY(&s->lan.addr, daddr, af);
+ PF_ACPY(&s->ext.addr, saddr, af);
+ if (nr != NULL)
+ PF_ACPY(&s->gwy.addr, &pd->baddr, af);
+ else
+ PF_ACPY(&s->gwy.addr, &s->lan.addr, af);
+ }
+ s->src.state = PFOTHERS_SINGLE;
+ s->dst.state = PFOTHERS_NO_TRAFFIC;
+ s->creation = time_second;
+ s->expire = time_second;
+ s->timeout = PFTM_OTHER_FIRST_PACKET;
+ pf_set_rt_ifp(s, saddr);
+ if (sn != NULL) {
+ s->src_node = sn;
+ s->src_node->states++;
+ }
+ if (nsn != NULL) {
+ PF_ACPY(&nsn->raddr, &pd->naddr, af);
+ s->nat_src_node = nsn;
+ s->nat_src_node->states++;
+ }
+ if (pf_insert_state(BOUND_IFACE(r, kif), s)) {
+ REASON_SET(&reason, PFRES_STATEINS);
+ pf_src_tree_remove_state(s);
+ STATE_DEC_COUNTERS(s);
+ pool_put(&pf_state_pl, s);
+ return (PF_DROP);
+ } else
+ *sm = s;
+ if (tag > 0) {
+ pf_tag_ref(tag);
+ s->tag = tag;
+ }
+ }
+
+ return (PF_PASS);
+}
+
+int
+pf_test_fragment(struct pf_rule **rm, int direction, struct pfi_kif *kif,
+ struct mbuf *m, void *h, struct pf_pdesc *pd, struct pf_rule **am,
+ struct pf_ruleset **rsm)
+{
+ struct pf_rule *r, *a = NULL;
+ struct pf_ruleset *ruleset = NULL;
+ sa_family_t af = pd->af;
+ u_short reason;
+ int tag = -1;
+ int asd = 0;
+ int match = 0;
+
+ r = TAILQ_FIRST(pf_main_ruleset.rules[PF_RULESET_FILTER].active.ptr);
+ while (r != NULL) {
+ r->evaluations++;
+ if (pfi_kif_match(r->kif, kif) == r->ifnot)
+ r = r->skip[PF_SKIP_IFP].ptr;
+ else if (r->direction && r->direction != direction)
+ r = r->skip[PF_SKIP_DIR].ptr;
+ else if (r->af && r->af != af)
+ r = r->skip[PF_SKIP_AF].ptr;
+ else if (r->proto && r->proto != pd->proto)
+ r = r->skip[PF_SKIP_PROTO].ptr;
+ else if (PF_MISMATCHAW(&r->src.addr, pd->src, af,
+ r->src.neg, kif))
+ r = r->skip[PF_SKIP_SRC_ADDR].ptr;
+ else if (PF_MISMATCHAW(&r->dst.addr, pd->dst, af,
+ r->dst.neg, NULL))
+ r = r->skip[PF_SKIP_DST_ADDR].ptr;
+ else if (r->tos && !(r->tos == pd->tos))
+ r = TAILQ_NEXT(r, entries);
+ else if (r->os_fingerprint != PF_OSFP_ANY)
+ r = TAILQ_NEXT(r, entries);
+ else if (pd->proto == IPPROTO_UDP &&
+ (r->src.port_op || r->dst.port_op))
+ r = TAILQ_NEXT(r, entries);
+ else if (pd->proto == IPPROTO_TCP &&
+ (r->src.port_op || r->dst.port_op || r->flagset))
+ r = TAILQ_NEXT(r, entries);
+ else if ((pd->proto == IPPROTO_ICMP ||
+ pd->proto == IPPROTO_ICMPV6) &&
+ (r->type || r->code))
+ r = TAILQ_NEXT(r, entries);
+ else if (r->prob && r->prob <= arc4random())
+ r = TAILQ_NEXT(r, entries);
+ else if (r->match_tag && !pf_match_tag(m, r, pd->pf_mtag, &tag))
+ r = TAILQ_NEXT(r, entries);
+ else {
+ if (r->anchor == NULL) {
+ match = 1;
+ *rm = r;
+ *am = a;
+ *rsm = ruleset;
+ if ((*rm)->quick)
+ break;
+ r = TAILQ_NEXT(r, entries);
+ } else
+ pf_step_into_anchor(&asd, &ruleset,
+ PF_RULESET_FILTER, &r, &a, &match);
+ }
+ if (r == NULL && pf_step_out_of_anchor(&asd, &ruleset,
+ PF_RULESET_FILTER, &r, &a, &match))
+ break;
+ }
+ r = *rm;
+ a = *am;
+ ruleset = *rsm;
+
+ REASON_SET(&reason, PFRES_MATCH);
+
+ if (r->log)
+ PFLOG_PACKET(kif, h, m, af, direction, reason, r, a, ruleset,
+ pd);
+
+ if (r->action != PF_PASS)
+ return (PF_DROP);
+
+ if (pf_tag_packet(m, pd->pf_mtag, tag, -1)) {
+ REASON_SET(&reason, PFRES_MEMORY);
+ return (PF_DROP);
+ }
+
+ return (PF_PASS);
+}
+
+int
+pf_tcp_track_full(struct pf_state_peer *src, struct pf_state_peer *dst,
+ struct pf_state **state, struct pfi_kif *kif, struct mbuf *m, int off,
+ struct pf_pdesc *pd, u_short *reason, int *copyback)
+{
+ struct tcphdr *th = pd->hdr.tcp;
+ u_int16_t win = ntohs(th->th_win);
+ u_int32_t ack, end, seq, orig_seq;
+ u_int8_t sws, dws;
+ int ackskew;
+
+ if (src->wscale && dst->wscale && !(th->th_flags & TH_SYN)) {
+ sws = src->wscale & PF_WSCALE_MASK;
+ dws = dst->wscale & PF_WSCALE_MASK;
+ } else
+ sws = dws = 0;
+
+ /*
+ * Sequence tracking algorithm from Guido van Rooij's paper:
+ * http://www.madison-gurkha.com/publications/tcp_filtering/
+ * tcp_filtering.ps
+ */
+
+ orig_seq = seq = ntohl(th->th_seq);
+ if (src->seqlo == 0) {
+ /* First packet from this end. Set its state */
+
+ if ((pd->flags & PFDESC_TCP_NORM || dst->scrub) &&
+ src->scrub == NULL) {
+ if (pf_normalize_tcp_init(m, off, pd, th, src, dst)) {
+ REASON_SET(reason, PFRES_MEMORY);
+ return (PF_DROP);
+ }
+ }
+
+ /* Deferred generation of sequence number modulator */
+ if (dst->seqdiff && !src->seqdiff) {
+#ifdef __FreeBSD__
+ while ((src->seqdiff = pf_new_isn(*state) - seq) == 0)
+ ;
+#else
+ while ((src->seqdiff = tcp_rndiss_next() - seq) == 0)
+ ;
+#endif
+ ack = ntohl(th->th_ack) - dst->seqdiff;
+ pf_change_a(&th->th_seq, &th->th_sum, htonl(seq +
+ src->seqdiff), 0);
+ pf_change_a(&th->th_ack, &th->th_sum, htonl(ack), 0);
+ *copyback = 1;
+ } else {
+ ack = ntohl(th->th_ack);
+ }
+
+ end = seq + pd->p_len;
+ if (th->th_flags & TH_SYN) {
+ end++;
+ if (dst->wscale & PF_WSCALE_FLAG) {
+ src->wscale = pf_get_wscale(m, off, th->th_off,
+ pd->af);
+ if (src->wscale & PF_WSCALE_FLAG) {
+ /* Remove scale factor from initial
+ * window */
+ sws = src->wscale & PF_WSCALE_MASK;
+ win = ((u_int32_t)win + (1 << sws) - 1)
+ >> sws;
+ dws = dst->wscale & PF_WSCALE_MASK;
+ } else {
+ /* fixup other window */
+ dst->max_win <<= dst->wscale &
+ PF_WSCALE_MASK;
+ /* in case of a retrans SYN|ACK */
+ dst->wscale = 0;
+ }
+ }
+ }
+ if (th->th_flags & TH_FIN)
+ end++;
+
+ src->seqlo = seq;
+ if (src->state < TCPS_SYN_SENT)
+ src->state = TCPS_SYN_SENT;
+
+ /*
+ * May need to slide the window (seqhi may have been set by
+ * the crappy stack check or if we picked up the connection
+ * after establishment)
+ */
+ if (src->seqhi == 1 ||
+ SEQ_GEQ(end + MAX(1, dst->max_win << dws), src->seqhi))
+ src->seqhi = end + MAX(1, dst->max_win << dws);
+ if (win > src->max_win)
+ src->max_win = win;
+
+ } else {
+ ack = ntohl(th->th_ack) - dst->seqdiff;
+ if (src->seqdiff) {
+ /* Modulate sequence numbers */
+ pf_change_a(&th->th_seq, &th->th_sum, htonl(seq +
+ src->seqdiff), 0);
+ pf_change_a(&th->th_ack, &th->th_sum, htonl(ack), 0);
+ *copyback = 1;
+ }
+ end = seq + pd->p_len;
+ if (th->th_flags & TH_SYN)
+ end++;
+ if (th->th_flags & TH_FIN)
+ end++;
+ }
+
+ if ((th->th_flags & TH_ACK) == 0) {
+ /* Let it pass through the ack skew check */
+ ack = dst->seqlo;
+ } else if ((ack == 0 &&
+ (th->th_flags & (TH_ACK|TH_RST)) == (TH_ACK|TH_RST)) ||
+ /* broken tcp stacks do not set ack */
+ (dst->state < TCPS_SYN_SENT)) {
+ /*
+ * Many stacks (ours included) will set the ACK number in an
+ * FIN|ACK if the SYN times out -- no sequence to ACK.
+ */
+ ack = dst->seqlo;
+ }
+
+ if (seq == end) {
+ /* Ease sequencing restrictions on no data packets */
+ seq = src->seqlo;
+ end = seq;
+ }
+
+ ackskew = dst->seqlo - ack;
+
+
+ /*
+ * Need to demodulate the sequence numbers in any TCP SACK options
+ * (Selective ACK). We could optionally validate the SACK values
+ * against the current ACK window, either forwards or backwards, but
+ * I'm not confident that SACK has been implemented properly
+ * everywhere. It wouldn't surprise me if several stacks accidently
+ * SACK too far backwards of previously ACKed data. There really aren't
+ * any security implications of bad SACKing unless the target stack
+ * doesn't validate the option length correctly. Someone trying to
+ * spoof into a TCP connection won't bother blindly sending SACK
+ * options anyway.
+ */
+ if (dst->seqdiff && (th->th_off << 2) > sizeof(struct tcphdr)) {
+ if (pf_modulate_sack(m, off, pd, th, dst))
+ *copyback = 1;
+ }
+
+
+#define MAXACKWINDOW (0xffff + 1500) /* 1500 is an arbitrary fudge factor */
+ if (SEQ_GEQ(src->seqhi, end) &&
+ /* Last octet inside other's window space */
+ SEQ_GEQ(seq, src->seqlo - (dst->max_win << dws)) &&
+ /* Retrans: not more than one window back */
+ (ackskew >= -MAXACKWINDOW) &&
+ /* Acking not more than one reassembled fragment backwards */
+ (ackskew <= (MAXACKWINDOW << sws)) &&
+ /* Acking not more than one window forward */
+ ((th->th_flags & TH_RST) == 0 || orig_seq == src->seqlo ||
+ (orig_seq == src->seqlo + 1) || (pd->flags & PFDESC_IP_REAS) == 0)) {
+ /* Require an exact/+1 sequence match on resets when possible */
+
+ if (dst->scrub || src->scrub) {
+ if (pf_normalize_tcp_stateful(m, off, pd, reason, th,
+ *state, src, dst, copyback))
+ return (PF_DROP);
+ }
+
+ /* update max window */
+ if (src->max_win < win)
+ src->max_win = win;
+ /* synchronize sequencing */
+ if (SEQ_GT(end, src->seqlo))
+ src->seqlo = end;
+ /* slide the window of what the other end can send */
+ if (SEQ_GEQ(ack + (win << sws), dst->seqhi))
+ dst->seqhi = ack + MAX((win << sws), 1);
+
+
+ /* update states */
+ if (th->th_flags & TH_SYN)
+ if (src->state < TCPS_SYN_SENT)
+ src->state = TCPS_SYN_SENT;
+ if (th->th_flags & TH_FIN)
+ if (src->state < TCPS_CLOSING)
+ src->state = TCPS_CLOSING;
+ if (th->th_flags & TH_ACK) {
+ if (dst->state == TCPS_SYN_SENT) {
+ dst->state = TCPS_ESTABLISHED;
+ if (src->state == TCPS_ESTABLISHED &&
+ (*state)->src_node != NULL &&
+ pf_src_connlimit(state)) {
+ REASON_SET(reason, PFRES_SRCLIMIT);
+ return (PF_DROP);
+ }
+ } else if (dst->state == TCPS_CLOSING)
+ dst->state = TCPS_FIN_WAIT_2;
+ }
+ if (th->th_flags & TH_RST)
+ src->state = dst->state = TCPS_TIME_WAIT;
+
+ /* update expire time */
+ (*state)->expire = time_second;
+ if (src->state >= TCPS_FIN_WAIT_2 &&
+ dst->state >= TCPS_FIN_WAIT_2)
+ (*state)->timeout = PFTM_TCP_CLOSED;
+ else if (src->state >= TCPS_CLOSING &&
+ dst->state >= TCPS_CLOSING)
+ (*state)->timeout = PFTM_TCP_FIN_WAIT;
+ else if (src->state < TCPS_ESTABLISHED ||
+ dst->state < TCPS_ESTABLISHED)
+ (*state)->timeout = PFTM_TCP_OPENING;
+ else if (src->state >= TCPS_CLOSING ||
+ dst->state >= TCPS_CLOSING)
+ (*state)->timeout = PFTM_TCP_CLOSING;
+ else
+ (*state)->timeout = PFTM_TCP_ESTABLISHED;
+
+ /* Fall through to PASS packet */
+
+ } else if ((dst->state < TCPS_SYN_SENT ||
+ dst->state >= TCPS_FIN_WAIT_2 ||
+ src->state >= TCPS_FIN_WAIT_2) &&
+ SEQ_GEQ(src->seqhi + MAXACKWINDOW, end) &&
+ /* Within a window forward of the originating packet */
+ SEQ_GEQ(seq, src->seqlo - MAXACKWINDOW)) {
+ /* Within a window backward of the originating packet */
+
+ /*
+ * This currently handles three situations:
+ * 1) Stupid stacks will shotgun SYNs before their peer
+ * replies.
+ * 2) When PF catches an already established stream (the
+ * firewall rebooted, the state table was flushed, routes
+ * changed...)
+ * 3) Packets get funky immediately after the connection
+ * closes (this should catch Solaris spurious ACK|FINs
+ * that web servers like to spew after a close)
+ *
+ * This must be a little more careful than the above code
+ * since packet floods will also be caught here. We don't
+ * update the TTL here to mitigate the damage of a packet
+ * flood and so the same code can handle awkward establishment
+ * and a loosened connection close.
+ * In the establishment case, a correct peer response will
+ * validate the connection, go through the normal state code
+ * and keep updating the state TTL.
+ */
+
+ if (pf_status.debug >= PF_DEBUG_MISC) {
+ printf("pf: loose state match: ");
+ pf_print_state(*state);
+ pf_print_flags(th->th_flags);
+ printf(" seq=%u (%u) ack=%u len=%u ackskew=%d "
+ "pkts=%llu:%llu\n", seq, orig_seq, ack, pd->p_len,
+#ifdef __FreeBSD__
+ ackskew, (unsigned long long)(*state)->packets[0],
+ (unsigned long long)(*state)->packets[1]);
+#else
+ ackskew, (*state)->packets[0],
+ (*state)->packets[1]);
+#endif
+ }
+
+ if (dst->scrub || src->scrub) {
+ if (pf_normalize_tcp_stateful(m, off, pd, reason, th,
+ *state, src, dst, copyback))
+ return (PF_DROP);
+ }
+
+ /* update max window */
+ if (src->max_win < win)
+ src->max_win = win;
+ /* synchronize sequencing */
+ if (SEQ_GT(end, src->seqlo))
+ src->seqlo = end;
+ /* slide the window of what the other end can send */
+ if (SEQ_GEQ(ack + (win << sws), dst->seqhi))
+ dst->seqhi = ack + MAX((win << sws), 1);
+
+ /*
+ * Cannot set dst->seqhi here since this could be a shotgunned
+ * SYN and not an already established connection.
+ */
+
+ if (th->th_flags & TH_FIN)
+ if (src->state < TCPS_CLOSING)
+ src->state = TCPS_CLOSING;
+ if (th->th_flags & TH_RST)
+ src->state = dst->state = TCPS_TIME_WAIT;
+
+ /* Fall through to PASS packet */
+
+ } else {
+ if ((*state)->dst.state == TCPS_SYN_SENT &&
+ (*state)->src.state == TCPS_SYN_SENT) {
+ /* Send RST for state mismatches during handshake */
+ if (!(th->th_flags & TH_RST))
+#ifdef __FreeBSD__
+ pf_send_tcp(m, (*state)->rule.ptr, pd->af,
+#else
+ pf_send_tcp((*state)->rule.ptr, pd->af,
+#endif
+ pd->dst, pd->src, th->th_dport,
+ th->th_sport, ntohl(th->th_ack), 0,
+ TH_RST, 0, 0,
+ (*state)->rule.ptr->return_ttl, 1, 0,
+ pd->eh, kif->pfik_ifp);
+ src->seqlo = 0;
+ src->seqhi = 1;
+ src->max_win = 1;
+ } else if (pf_status.debug >= PF_DEBUG_MISC) {
+ printf("pf: BAD state: ");
+ pf_print_state(*state);
+ pf_print_flags(th->th_flags);
+ printf(" seq=%u (%u) ack=%u len=%u ackskew=%d "
+#ifdef notyet
+ "pkts=%llu:%llu dir=%s,%s\n",
+#else
+ "pkts=%llu:%llu%s\n",
+#endif
+ seq, orig_seq, ack, pd->p_len, ackskew,
+#ifdef __FreeBSD__
+ (unsigned long long)(*state)->packets[0],
+ (unsigned long long)(*state)->packets[1],
+#else
+ (*state)->packets[0], (*state)->packets[1],
+#endif
+#ifdef notyet
+ direction == PF_IN ? "in" : "out",
+ direction == (*state)->direction ? "fwd" : "rev");
+#else
+ "");
+#endif
+ printf("pf: State failure on: %c %c %c %c | %c %c\n",
+ SEQ_GEQ(src->seqhi, end) ? ' ' : '1',
+ SEQ_GEQ(seq, src->seqlo - (dst->max_win << dws)) ?
+ ' ': '2',
+ (ackskew >= -MAXACKWINDOW) ? ' ' : '3',
+ (ackskew <= (MAXACKWINDOW << sws)) ? ' ' : '4',
+ SEQ_GEQ(src->seqhi + MAXACKWINDOW, end) ?' ' :'5',
+ SEQ_GEQ(seq, src->seqlo - MAXACKWINDOW) ?' ' :'6');
+ }
+ REASON_SET(reason, PFRES_BADSTATE);
+ return (PF_DROP);
+ }
+
+ /* Any packets which have gotten here are to be passed */
+ return (PF_PASS);
+}
+
+int
+pf_tcp_track_sloppy(struct pf_state_peer *src, struct pf_state_peer *dst,
+ struct pf_state **state, struct pf_pdesc *pd, u_short *reason)
+{
+ struct tcphdr *th = pd->hdr.tcp;
+
+ if (th->th_flags & TH_SYN)
+ if (src->state < TCPS_SYN_SENT)
+ src->state = TCPS_SYN_SENT;
+ if (th->th_flags & TH_FIN)
+ if (src->state < TCPS_CLOSING)
+ src->state = TCPS_CLOSING;
+ if (th->th_flags & TH_ACK) {
+ if (dst->state == TCPS_SYN_SENT) {
+ dst->state = TCPS_ESTABLISHED;
+ if (src->state == TCPS_ESTABLISHED &&
+ (*state)->src_node != NULL &&
+ pf_src_connlimit(state)) {
+ REASON_SET(reason, PFRES_SRCLIMIT);
+ return (PF_DROP);
+ }
+ } else if (dst->state == TCPS_CLOSING) {
+ dst->state = TCPS_FIN_WAIT_2;
+ } else if (src->state == TCPS_SYN_SENT &&
+ dst->state < TCPS_SYN_SENT) {
+ /*
+ * Handle a special sloppy case where we only see one
+ * half of the connection. If there is a ACK after
+ * the initial SYN without ever seeing a packet from
+ * the destination, set the connection to established.
+ */
+ dst->state = src->state = TCPS_ESTABLISHED;
+ if ((*state)->src_node != NULL &&
+ pf_src_connlimit(state)) {
+ REASON_SET(reason, PFRES_SRCLIMIT);
+ return (PF_DROP);
+ }
+ } else if (src->state == TCPS_CLOSING &&
+ dst->state == TCPS_ESTABLISHED &&
+ dst->seqlo == 0) {
+ /*
+ * Handle the closing of half connections where we
+ * don't see the full bidirectional FIN/ACK+ACK
+ * handshake.
+ */
+ dst->state = TCPS_CLOSING;
+ }
+ }
+ if (th->th_flags & TH_RST)
+ src->state = dst->state = TCPS_TIME_WAIT;
+
+ /* update expire time */
+ (*state)->expire = time_second;
+ if (src->state >= TCPS_FIN_WAIT_2 &&
+ dst->state >= TCPS_FIN_WAIT_2)
+ (*state)->timeout = PFTM_TCP_CLOSED;
+ else if (src->state >= TCPS_CLOSING &&
+ dst->state >= TCPS_CLOSING)
+ (*state)->timeout = PFTM_TCP_FIN_WAIT;
+ else if (src->state < TCPS_ESTABLISHED ||
+ dst->state < TCPS_ESTABLISHED)
+ (*state)->timeout = PFTM_TCP_OPENING;
+ else if (src->state >= TCPS_CLOSING ||
+ dst->state >= TCPS_CLOSING)
+ (*state)->timeout = PFTM_TCP_CLOSING;
+ else
+ (*state)->timeout = PFTM_TCP_ESTABLISHED;
+
+ return (PF_PASS);
+}
+
+
+int
+pf_test_state_tcp(struct pf_state **state, int direction, struct pfi_kif *kif,
+ struct mbuf *m, int off, void *h, struct pf_pdesc *pd,
+ u_short *reason)
+{
+ struct pf_state_cmp key;
+ struct tcphdr *th = pd->hdr.tcp;
+ int copyback = 0;
+ struct pf_state_peer *src, *dst;
+
+ key.af = pd->af;
+ key.proto = IPPROTO_TCP;
+ if (direction == PF_IN) {
+ PF_ACPY(&key.ext.addr, pd->src, key.af);
+ PF_ACPY(&key.gwy.addr, pd->dst, key.af);
+ key.ext.port = th->th_sport;
+ key.gwy.port = th->th_dport;
+ } else {
+ PF_ACPY(&key.lan.addr, pd->src, key.af);
+ PF_ACPY(&key.ext.addr, pd->dst, key.af);
+ key.lan.port = th->th_sport;
+ key.ext.port = th->th_dport;
+ }
+
+ STATE_LOOKUP();
+
+ if (direction == (*state)->direction) {
+ src = &(*state)->src;
+ dst = &(*state)->dst;
+ } else {
+ src = &(*state)->dst;
+ dst = &(*state)->src;
+ }
+
+ if ((*state)->src.state == PF_TCPS_PROXY_SRC) {
+ if (direction != (*state)->direction) {
+ REASON_SET(reason, PFRES_SYNPROXY);
+ return (PF_SYNPROXY_DROP);
+ }
+ if (th->th_flags & TH_SYN) {
+ if (ntohl(th->th_seq) != (*state)->src.seqlo) {
+ REASON_SET(reason, PFRES_SYNPROXY);
+ return (PF_DROP);
+ }
+#ifdef __FreeBSD__
+ pf_send_tcp(NULL, (*state)->rule.ptr, pd->af, pd->dst,
+#else
+ pf_send_tcp((*state)->rule.ptr, pd->af, pd->dst,
+#endif
+ pd->src, th->th_dport, th->th_sport,
+ (*state)->src.seqhi, ntohl(th->th_seq) + 1,
+ TH_SYN|TH_ACK, 0, (*state)->src.mss, 0, 1,
+ 0, NULL, NULL);
+ REASON_SET(reason, PFRES_SYNPROXY);
+ return (PF_SYNPROXY_DROP);
+ } else if (!(th->th_flags & TH_ACK) ||
+ (ntohl(th->th_ack) != (*state)->src.seqhi + 1) ||
+ (ntohl(th->th_seq) != (*state)->src.seqlo + 1)) {
+ REASON_SET(reason, PFRES_SYNPROXY);
+ return (PF_DROP);
+ } else if ((*state)->src_node != NULL &&
+ pf_src_connlimit(state)) {
+ REASON_SET(reason, PFRES_SRCLIMIT);
+ return (PF_DROP);
+ } else
+ (*state)->src.state = PF_TCPS_PROXY_DST;
+ }
+ if ((*state)->src.state == PF_TCPS_PROXY_DST) {
+ struct pf_state_host *src, *dst;
+
+ if (direction == PF_OUT) {
+ src = &(*state)->gwy;
+ dst = &(*state)->ext;
+ } else {
+ src = &(*state)->ext;
+ dst = &(*state)->lan;
+ }
+ if (direction == (*state)->direction) {
+ if (((th->th_flags & (TH_SYN|TH_ACK)) != TH_ACK) ||
+ (ntohl(th->th_ack) != (*state)->src.seqhi + 1) ||
+ (ntohl(th->th_seq) != (*state)->src.seqlo + 1)) {
+ REASON_SET(reason, PFRES_SYNPROXY);
+ return (PF_DROP);
+ }
+ (*state)->src.max_win = MAX(ntohs(th->th_win), 1);
+ if ((*state)->dst.seqhi == 1)
+ (*state)->dst.seqhi = htonl(arc4random());
+#ifdef __FreeBSD__
+ pf_send_tcp(NULL, (*state)->rule.ptr, pd->af,
+ &src->addr,
+#else
+ pf_send_tcp((*state)->rule.ptr, pd->af, &src->addr,
+#endif
+ &dst->addr, src->port, dst->port,
+ (*state)->dst.seqhi, 0, TH_SYN, 0,
+ (*state)->src.mss, 0, 0, (*state)->tag, NULL, NULL);
+ REASON_SET(reason, PFRES_SYNPROXY);
+ return (PF_SYNPROXY_DROP);
+ } else if (((th->th_flags & (TH_SYN|TH_ACK)) !=
+ (TH_SYN|TH_ACK)) ||
+ (ntohl(th->th_ack) != (*state)->dst.seqhi + 1)) {
+ REASON_SET(reason, PFRES_SYNPROXY);
+ return (PF_DROP);
+ } else {
+ (*state)->dst.max_win = MAX(ntohs(th->th_win), 1);
+ (*state)->dst.seqlo = ntohl(th->th_seq);
+#ifdef __FreeBSD__
+ pf_send_tcp(NULL, (*state)->rule.ptr, pd->af, pd->dst,
+#else
+ pf_send_tcp((*state)->rule.ptr, pd->af, pd->dst,
+#endif
+ pd->src, th->th_dport, th->th_sport,
+ ntohl(th->th_ack), ntohl(th->th_seq) + 1,
+ TH_ACK, (*state)->src.max_win, 0, 0, 0,
+ (*state)->tag, NULL, NULL);
+#ifdef __FreeBSD__
+ pf_send_tcp(NULL, (*state)->rule.ptr, pd->af,
+ &src->addr,
+#else
+ pf_send_tcp((*state)->rule.ptr, pd->af, &src->addr,
+#endif
+ &dst->addr, src->port, dst->port,
+ (*state)->src.seqhi + 1, (*state)->src.seqlo + 1,
+ TH_ACK, (*state)->dst.max_win, 0, 0, 1,
+ 0, NULL, NULL);
+ (*state)->src.seqdiff = (*state)->dst.seqhi -
+ (*state)->src.seqlo;
+ (*state)->dst.seqdiff = (*state)->src.seqhi -
+ (*state)->dst.seqlo;
+ (*state)->src.seqhi = (*state)->src.seqlo +
+ (*state)->dst.max_win;
+ (*state)->dst.seqhi = (*state)->dst.seqlo +
+ (*state)->src.max_win;
+ (*state)->src.wscale = (*state)->dst.wscale = 0;
+ (*state)->src.state = (*state)->dst.state =
+ TCPS_ESTABLISHED;
+ REASON_SET(reason, PFRES_SYNPROXY);
+ return (PF_SYNPROXY_DROP);
+ }
+ }
+
+ if (((th->th_flags & (TH_SYN|TH_ACK)) == TH_SYN) &&
+ dst->state >= TCPS_FIN_WAIT_2 &&
+ src->state >= TCPS_FIN_WAIT_2) {
+ if (pf_status.debug >= PF_DEBUG_MISC) {
+ printf("pf: state reuse ");
+ pf_print_state(*state);
+ pf_print_flags(th->th_flags);
+ printf("\n");
+ }
+ /* XXX make sure it's the same direction ?? */
+ (*state)->src.state = (*state)->dst.state = TCPS_CLOSED;
+ pf_unlink_state(*state);
+ *state = NULL;
+ return (PF_DROP);
+ }
+
+ if ((*state)->state_flags & PFSTATE_SLOPPY) {
+ if (pf_tcp_track_sloppy(src, dst, state, pd, reason) == PF_DROP)
+ return (PF_DROP);
+ } else {
+ if (pf_tcp_track_full(src, dst, state, kif, m, off, pd, reason,
+ &copyback) == PF_DROP)
+ return (PF_DROP);
+ }
+
+ /* translate source/destination address, if necessary */
+ if (STATE_TRANSLATE(*state)) {
+ if (direction == PF_OUT)
+ pf_change_ap(pd->src, &th->th_sport, pd->ip_sum,
+ &th->th_sum, &(*state)->gwy.addr,
+ (*state)->gwy.port, 0, pd->af);
+ else
+ pf_change_ap(pd->dst, &th->th_dport, pd->ip_sum,
+ &th->th_sum, &(*state)->lan.addr,
+ (*state)->lan.port, 0, pd->af);
+ m_copyback(m, off, sizeof(*th), (caddr_t)th);
+ } else if (copyback) {
+ /* Copyback sequence modulation or stateful scrub changes */
+ m_copyback(m, off, sizeof(*th), (caddr_t)th);
+ }
+
+ return (PF_PASS);
+}
+
+int
+pf_test_state_udp(struct pf_state **state, int direction, struct pfi_kif *kif,
+ struct mbuf *m, int off, void *h, struct pf_pdesc *pd)
+{
+ struct pf_state_peer *src, *dst;
+ struct pf_state_cmp key;
+ struct udphdr *uh = pd->hdr.udp;
+
+ key.af = pd->af;
+ key.proto = IPPROTO_UDP;
+ if (direction == PF_IN) {
+ PF_ACPY(&key.ext.addr, pd->src, key.af);
+ PF_ACPY(&key.gwy.addr, pd->dst, key.af);
+ key.ext.port = uh->uh_sport;
+ key.gwy.port = uh->uh_dport;
+ } else {
+ PF_ACPY(&key.lan.addr, pd->src, key.af);
+ PF_ACPY(&key.ext.addr, pd->dst, key.af);
+ key.lan.port = uh->uh_sport;
+ key.ext.port = uh->uh_dport;
+ }
+
+ STATE_LOOKUP();
+
+ if (direction == (*state)->direction) {
+ src = &(*state)->src;
+ dst = &(*state)->dst;
+ } else {
+ src = &(*state)->dst;
+ dst = &(*state)->src;
+ }
+
+ /* update states */
+ if (src->state < PFUDPS_SINGLE)
+ src->state = PFUDPS_SINGLE;
+ if (dst->state == PFUDPS_SINGLE)
+ dst->state = PFUDPS_MULTIPLE;
+
+ /* update expire time */
+ (*state)->expire = time_second;
+ if (src->state == PFUDPS_MULTIPLE && dst->state == PFUDPS_MULTIPLE)
+ (*state)->timeout = PFTM_UDP_MULTIPLE;
+ else
+ (*state)->timeout = PFTM_UDP_SINGLE;
+
+ /* translate source/destination address, if necessary */
+ if (STATE_TRANSLATE(*state)) {
+ if (direction == PF_OUT)
+ pf_change_ap(pd->src, &uh->uh_sport, pd->ip_sum,
+ &uh->uh_sum, &(*state)->gwy.addr,
+ (*state)->gwy.port, 1, pd->af);
+ else
+ pf_change_ap(pd->dst, &uh->uh_dport, pd->ip_sum,
+ &uh->uh_sum, &(*state)->lan.addr,
+ (*state)->lan.port, 1, pd->af);
+ m_copyback(m, off, sizeof(*uh), (caddr_t)uh);
+ }
+
+ return (PF_PASS);
+}
+
+int
+pf_test_state_icmp(struct pf_state **state, int direction, struct pfi_kif *kif,
+ struct mbuf *m, int off, void *h, struct pf_pdesc *pd, u_short *reason)
+{
+ struct pf_addr *saddr = pd->src, *daddr = pd->dst;
+ u_int16_t icmpid = 0; /* make the compiler happy */
+ u_int16_t *icmpsum = NULL; /* make the compiler happy */
+ u_int8_t icmptype = 0; /* make the compiler happy */
+ int state_icmp = 0;
+ struct pf_state_cmp key;
+
+ switch (pd->proto) {
+#ifdef INET
+ case IPPROTO_ICMP:
+ icmptype = pd->hdr.icmp->icmp_type;
+ icmpid = pd->hdr.icmp->icmp_id;
+ icmpsum = &pd->hdr.icmp->icmp_cksum;
+
+ if (icmptype == ICMP_UNREACH ||
+ icmptype == ICMP_SOURCEQUENCH ||
+ icmptype == ICMP_REDIRECT ||
+ icmptype == ICMP_TIMXCEED ||
+ icmptype == ICMP_PARAMPROB)
+ state_icmp++;
+ break;
+#endif /* INET */
+#ifdef INET6
+ case IPPROTO_ICMPV6:
+ icmptype = pd->hdr.icmp6->icmp6_type;
+ icmpid = pd->hdr.icmp6->icmp6_id;
+ icmpsum = &pd->hdr.icmp6->icmp6_cksum;
+
+ if (icmptype == ICMP6_DST_UNREACH ||
+ icmptype == ICMP6_PACKET_TOO_BIG ||
+ icmptype == ICMP6_TIME_EXCEEDED ||
+ icmptype == ICMP6_PARAM_PROB)
+ state_icmp++;
+ break;
+#endif /* INET6 */
+ }
+
+ if (!state_icmp) {
+
+ /*
+ * ICMP query/reply message not related to a TCP/UDP packet.
+ * Search for an ICMP state.
+ */
+ key.af = pd->af;
+ key.proto = pd->proto;
+ if (direction == PF_IN) {
+ PF_ACPY(&key.ext.addr, pd->src, key.af);
+ PF_ACPY(&key.gwy.addr, pd->dst, key.af);
+ key.ext.port = 0;
+ key.gwy.port = icmpid;
+ } else {
+ PF_ACPY(&key.lan.addr, pd->src, key.af);
+ PF_ACPY(&key.ext.addr, pd->dst, key.af);
+ key.lan.port = icmpid;
+ key.ext.port = 0;
+ }
+
+ STATE_LOOKUP();
+
+ (*state)->expire = time_second;
+ (*state)->timeout = PFTM_ICMP_ERROR_REPLY;
+
+ /* translate source/destination address, if necessary */
+ if (STATE_TRANSLATE(*state)) {
+ if (direction == PF_OUT) {
+ switch (pd->af) {
+#ifdef INET
+ case AF_INET:
+ pf_change_a(&saddr->v4.s_addr,
+ pd->ip_sum,
+ (*state)->gwy.addr.v4.s_addr, 0);
+ pd->hdr.icmp->icmp_cksum =
+ pf_cksum_fixup(
+ pd->hdr.icmp->icmp_cksum, icmpid,
+ (*state)->gwy.port, 0);
+ pd->hdr.icmp->icmp_id =
+ (*state)->gwy.port;
+ m_copyback(m, off, ICMP_MINLEN,
+ (caddr_t)pd->hdr.icmp);
+ break;
+#endif /* INET */
+#ifdef INET6
+ case AF_INET6:
+ pf_change_a6(saddr,
+ &pd->hdr.icmp6->icmp6_cksum,
+ &(*state)->gwy.addr, 0);
+ m_copyback(m, off,
+ sizeof(struct icmp6_hdr),
+ (caddr_t)pd->hdr.icmp6);
+ break;
+#endif /* INET6 */
+ }
+ } else {
+ switch (pd->af) {
+#ifdef INET
+ case AF_INET:
+ pf_change_a(&daddr->v4.s_addr,
+ pd->ip_sum,
+ (*state)->lan.addr.v4.s_addr, 0);
+ pd->hdr.icmp->icmp_cksum =
+ pf_cksum_fixup(
+ pd->hdr.icmp->icmp_cksum, icmpid,
+ (*state)->lan.port, 0);
+ pd->hdr.icmp->icmp_id =
+ (*state)->lan.port;
+ m_copyback(m, off, ICMP_MINLEN,
+ (caddr_t)pd->hdr.icmp);
+ break;
+#endif /* INET */
+#ifdef INET6
+ case AF_INET6:
+ pf_change_a6(daddr,
+ &pd->hdr.icmp6->icmp6_cksum,
+ &(*state)->lan.addr, 0);
+ m_copyback(m, off,
+ sizeof(struct icmp6_hdr),
+ (caddr_t)pd->hdr.icmp6);
+ break;
+#endif /* INET6 */
+ }
+ }
+ }
+
+ return (PF_PASS);
+
+ } else {
+ /*
+ * ICMP error message in response to a TCP/UDP packet.
+ * Extract the inner TCP/UDP header and search for that state.
+ */
+
+ struct pf_pdesc pd2;
+#ifdef INET
+ struct ip h2;
+#endif /* INET */
+#ifdef INET6
+ struct ip6_hdr h2_6;
+ int terminal = 0;
+#endif /* INET6 */
+ int ipoff2 = 0; /* make the compiler happy */
+ int off2 = 0; /* make the compiler happy */
+
+ pd2.af = pd->af;
+ switch (pd->af) {
+#ifdef INET
+ case AF_INET:
+ /* offset of h2 in mbuf chain */
+ ipoff2 = off + ICMP_MINLEN;
+
+ if (!pf_pull_hdr(m, ipoff2, &h2, sizeof(h2),
+ NULL, reason, pd2.af)) {
+ DPFPRINTF(PF_DEBUG_MISC,
+ ("pf: ICMP error message too short "
+ "(ip)\n"));
+ return (PF_DROP);
+ }
+ /*
+ * ICMP error messages don't refer to non-first
+ * fragments
+ */
+ if (h2.ip_off & htons(IP_OFFMASK)) {
+ REASON_SET(reason, PFRES_FRAG);
+ return (PF_DROP);
+ }
+
+ /* offset of protocol header that follows h2 */
+ off2 = ipoff2 + (h2.ip_hl << 2);
+
+ pd2.proto = h2.ip_p;
+ pd2.src = (struct pf_addr *)&h2.ip_src;
+ pd2.dst = (struct pf_addr *)&h2.ip_dst;
+ pd2.ip_sum = &h2.ip_sum;
+ break;
+#endif /* INET */
+#ifdef INET6
+ case AF_INET6:
+ ipoff2 = off + sizeof(struct icmp6_hdr);
+
+ if (!pf_pull_hdr(m, ipoff2, &h2_6, sizeof(h2_6),
+ NULL, reason, pd2.af)) {
+ DPFPRINTF(PF_DEBUG_MISC,
+ ("pf: ICMP error message too short "
+ "(ip6)\n"));
+ return (PF_DROP);
+ }
+ pd2.proto = h2_6.ip6_nxt;
+ pd2.src = (struct pf_addr *)&h2_6.ip6_src;
+ pd2.dst = (struct pf_addr *)&h2_6.ip6_dst;
+ pd2.ip_sum = NULL;
+ off2 = ipoff2 + sizeof(h2_6);
+ do {
+ switch (pd2.proto) {
+ case IPPROTO_FRAGMENT:
+ /*
+ * ICMPv6 error messages for
+ * non-first fragments
+ */
+ REASON_SET(reason, PFRES_FRAG);
+ return (PF_DROP);
+ case IPPROTO_AH:
+ case IPPROTO_HOPOPTS:
+ case IPPROTO_ROUTING:
+ case IPPROTO_DSTOPTS: {
+ /* get next header and header length */
+ struct ip6_ext opt6;
+
+ if (!pf_pull_hdr(m, off2, &opt6,
+ sizeof(opt6), NULL, reason,
+ pd2.af)) {
+ DPFPRINTF(PF_DEBUG_MISC,
+ ("pf: ICMPv6 short opt\n"));
+ return (PF_DROP);
+ }
+ if (pd2.proto == IPPROTO_AH)
+ off2 += (opt6.ip6e_len + 2) * 4;
+ else
+ off2 += (opt6.ip6e_len + 1) * 8;
+ pd2.proto = opt6.ip6e_nxt;
+ /* goto the next header */
+ break;
+ }
+ default:
+ terminal++;
+ break;
+ }
+ } while (!terminal);
+ break;
+#endif /* INET6 */
+#ifdef __FreeBSD__
+ default:
+ panic("AF not supported: %d", pd->af);
+#endif
+ }
+
+ switch (pd2.proto) {
+ case IPPROTO_TCP: {
+ struct tcphdr th;
+ u_int32_t seq;
+ struct pf_state_peer *src, *dst;
+ u_int8_t dws;
+ int copyback = 0;
+
+ /*
+ * Only the first 8 bytes of the TCP header can be
+ * expected. Don't access any TCP header fields after
+ * th_seq, an ackskew test is not possible.
+ */
+ if (!pf_pull_hdr(m, off2, &th, 8, NULL, reason,
+ pd2.af)) {
+ DPFPRINTF(PF_DEBUG_MISC,
+ ("pf: ICMP error message too short "
+ "(tcp)\n"));
+ return (PF_DROP);
+ }
+
+ key.af = pd2.af;
+ key.proto = IPPROTO_TCP;
+ if (direction == PF_IN) {
+ PF_ACPY(&key.ext.addr, pd2.dst, key.af);
+ PF_ACPY(&key.gwy.addr, pd2.src, key.af);
+ key.ext.port = th.th_dport;
+ key.gwy.port = th.th_sport;
+ } else {
+ PF_ACPY(&key.lan.addr, pd2.dst, key.af);
+ PF_ACPY(&key.ext.addr, pd2.src, key.af);
+ key.lan.port = th.th_dport;
+ key.ext.port = th.th_sport;
+ }
+
+ STATE_LOOKUP();
+
+ if (direction == (*state)->direction) {
+ src = &(*state)->dst;
+ dst = &(*state)->src;
+ } else {
+ src = &(*state)->src;
+ dst = &(*state)->dst;
+ }
+
+ if (src->wscale && dst->wscale)
+ dws = dst->wscale & PF_WSCALE_MASK;
+ else
+ dws = 0;
+
+ /* Demodulate sequence number */
+ seq = ntohl(th.th_seq) - src->seqdiff;
+ if (src->seqdiff) {
+ pf_change_a(&th.th_seq, icmpsum,
+ htonl(seq), 0);
+ copyback = 1;
+ }
+
+ if (!((*state)->state_flags & PFSTATE_SLOPPY) &&
+ (!SEQ_GEQ(src->seqhi, seq) ||
+ !SEQ_GEQ(seq, src->seqlo - (dst->max_win << dws)))) {
+ if (pf_status.debug >= PF_DEBUG_MISC) {
+ printf("pf: BAD ICMP %d:%d ",
+ icmptype, pd->hdr.icmp->icmp_code);
+ pf_print_host(pd->src, 0, pd->af);
+ printf(" -> ");
+ pf_print_host(pd->dst, 0, pd->af);
+ printf(" state: ");
+ pf_print_state(*state);
+ printf(" seq=%u\n", seq);
+ }
+ REASON_SET(reason, PFRES_BADSTATE);
+ return (PF_DROP);
+ }
+
+ if (STATE_TRANSLATE(*state)) {
+ if (direction == PF_IN) {
+ pf_change_icmp(pd2.src, &th.th_sport,
+ daddr, &(*state)->lan.addr,
+ (*state)->lan.port, NULL,
+ pd2.ip_sum, icmpsum,
+ pd->ip_sum, 0, pd2.af);
+ } else {
+ pf_change_icmp(pd2.dst, &th.th_dport,
+ saddr, &(*state)->gwy.addr,
+ (*state)->gwy.port, NULL,
+ pd2.ip_sum, icmpsum,
+ pd->ip_sum, 0, pd2.af);
+ }
+ copyback = 1;
+ }
+
+ if (copyback) {
+ switch (pd2.af) {
+#ifdef INET
+ case AF_INET:
+ m_copyback(m, off, ICMP_MINLEN,
+ (caddr_t)pd->hdr.icmp);
+ m_copyback(m, ipoff2, sizeof(h2),
+ (caddr_t)&h2);
+ break;
+#endif /* INET */
+#ifdef INET6
+ case AF_INET6:
+ m_copyback(m, off,
+ sizeof(struct icmp6_hdr),
+ (caddr_t)pd->hdr.icmp6);
+ m_copyback(m, ipoff2, sizeof(h2_6),
+ (caddr_t)&h2_6);
+ break;
+#endif /* INET6 */
+ }
+ m_copyback(m, off2, 8, (caddr_t)&th);
+ }
+
+ return (PF_PASS);
+ break;
+ }
+ case IPPROTO_UDP: {
+ struct udphdr uh;
+
+ if (!pf_pull_hdr(m, off2, &uh, sizeof(uh),
+ NULL, reason, pd2.af)) {
+ DPFPRINTF(PF_DEBUG_MISC,
+ ("pf: ICMP error message too short "
+ "(udp)\n"));
+ return (PF_DROP);
+ }
+
+ key.af = pd2.af;
+ key.proto = IPPROTO_UDP;
+ if (direction == PF_IN) {
+ PF_ACPY(&key.ext.addr, pd2.dst, key.af);
+ PF_ACPY(&key.gwy.addr, pd2.src, key.af);
+ key.ext.port = uh.uh_dport;
+ key.gwy.port = uh.uh_sport;
+ } else {
+ PF_ACPY(&key.lan.addr, pd2.dst, key.af);
+ PF_ACPY(&key.ext.addr, pd2.src, key.af);
+ key.lan.port = uh.uh_dport;
+ key.ext.port = uh.uh_sport;
+ }
+
+ STATE_LOOKUP();
+
+ if (STATE_TRANSLATE(*state)) {
+ if (direction == PF_IN) {
+ pf_change_icmp(pd2.src, &uh.uh_sport,
+ daddr, &(*state)->lan.addr,
+ (*state)->lan.port, &uh.uh_sum,
+ pd2.ip_sum, icmpsum,
+ pd->ip_sum, 1, pd2.af);
+ } else {
+ pf_change_icmp(pd2.dst, &uh.uh_dport,
+ saddr, &(*state)->gwy.addr,
+ (*state)->gwy.port, &uh.uh_sum,
+ pd2.ip_sum, icmpsum,
+ pd->ip_sum, 1, pd2.af);
+ }
+ switch (pd2.af) {
+#ifdef INET
+ case AF_INET:
+ m_copyback(m, off, ICMP_MINLEN,
+ (caddr_t)pd->hdr.icmp);
+ m_copyback(m, ipoff2, sizeof(h2),
+ (caddr_t)&h2);
+ break;
+#endif /* INET */
+#ifdef INET6
+ case AF_INET6:
+ m_copyback(m, off,
+ sizeof(struct icmp6_hdr),
+ (caddr_t)pd->hdr.icmp6);
+ m_copyback(m, ipoff2, sizeof(h2_6),
+ (caddr_t)&h2_6);
+ break;
+#endif /* INET6 */
+ }
+ m_copyback(m, off2, sizeof(uh),
+ (caddr_t)&uh);
+ }
+
+ return (PF_PASS);
+ break;
+ }
+#ifdef INET
+ case IPPROTO_ICMP: {
+ struct icmp iih;
+
+ if (!pf_pull_hdr(m, off2, &iih, ICMP_MINLEN,
+ NULL, reason, pd2.af)) {
+ DPFPRINTF(PF_DEBUG_MISC,
+ ("pf: ICMP error message too short i"
+ "(icmp)\n"));
+ return (PF_DROP);
+ }
+
+ key.af = pd2.af;
+ key.proto = IPPROTO_ICMP;
+ if (direction == PF_IN) {
+ PF_ACPY(&key.ext.addr, pd2.dst, key.af);
+ PF_ACPY(&key.gwy.addr, pd2.src, key.af);
+ key.ext.port = 0;
+ key.gwy.port = iih.icmp_id;
+ } else {
+ PF_ACPY(&key.lan.addr, pd2.dst, key.af);
+ PF_ACPY(&key.ext.addr, pd2.src, key.af);
+ key.lan.port = iih.icmp_id;
+ key.ext.port = 0;
+ }
+
+ STATE_LOOKUP();
+
+ if (STATE_TRANSLATE(*state)) {
+ if (direction == PF_IN) {
+ pf_change_icmp(pd2.src, &iih.icmp_id,
+ daddr, &(*state)->lan.addr,
+ (*state)->lan.port, NULL,
+ pd2.ip_sum, icmpsum,
+ pd->ip_sum, 0, AF_INET);
+ } else {
+ pf_change_icmp(pd2.dst, &iih.icmp_id,
+ saddr, &(*state)->gwy.addr,
+ (*state)->gwy.port, NULL,
+ pd2.ip_sum, icmpsum,
+ pd->ip_sum, 0, AF_INET);
+ }
+ m_copyback(m, off, ICMP_MINLEN,
+ (caddr_t)pd->hdr.icmp);
+ m_copyback(m, ipoff2, sizeof(h2),
+ (caddr_t)&h2);
+ m_copyback(m, off2, ICMP_MINLEN,
+ (caddr_t)&iih);
+ }
+
+ return (PF_PASS);
+ break;
+ }
+#endif /* INET */
+#ifdef INET6
+ case IPPROTO_ICMPV6: {
+ struct icmp6_hdr iih;
+
+ if (!pf_pull_hdr(m, off2, &iih,
+ sizeof(struct icmp6_hdr), NULL, reason, pd2.af)) {
+ DPFPRINTF(PF_DEBUG_MISC,
+ ("pf: ICMP error message too short "
+ "(icmp6)\n"));
+ return (PF_DROP);
+ }
+
+ key.af = pd2.af;
+ key.proto = IPPROTO_ICMPV6;
+ if (direction == PF_IN) {
+ PF_ACPY(&key.ext.addr, pd2.dst, key.af);
+ PF_ACPY(&key.gwy.addr, pd2.src, key.af);
+ key.ext.port = 0;
+ key.gwy.port = iih.icmp6_id;
+ } else {
+ PF_ACPY(&key.lan.addr, pd2.dst, key.af);
+ PF_ACPY(&key.ext.addr, pd2.src, key.af);
+ key.lan.port = iih.icmp6_id;
+ key.ext.port = 0;
+ }
+
+ STATE_LOOKUP();
+
+ if (STATE_TRANSLATE(*state)) {
+ if (direction == PF_IN) {
+ pf_change_icmp(pd2.src, &iih.icmp6_id,
+ daddr, &(*state)->lan.addr,
+ (*state)->lan.port, NULL,
+ pd2.ip_sum, icmpsum,
+ pd->ip_sum, 0, AF_INET6);
+ } else {
+ pf_change_icmp(pd2.dst, &iih.icmp6_id,
+ saddr, &(*state)->gwy.addr,
+ (*state)->gwy.port, NULL,
+ pd2.ip_sum, icmpsum,
+ pd->ip_sum, 0, AF_INET6);
+ }
+ m_copyback(m, off, sizeof(struct icmp6_hdr),
+ (caddr_t)pd->hdr.icmp6);
+ m_copyback(m, ipoff2, sizeof(h2_6),
+ (caddr_t)&h2_6);
+ m_copyback(m, off2, sizeof(struct icmp6_hdr),
+ (caddr_t)&iih);
+ }
+
+ return (PF_PASS);
+ break;
+ }
+#endif /* INET6 */
+ default: {
+ key.af = pd2.af;
+ key.proto = pd2.proto;
+ if (direction == PF_IN) {
+ PF_ACPY(&key.ext.addr, pd2.dst, key.af);
+ PF_ACPY(&key.gwy.addr, pd2.src, key.af);
+ key.ext.port = 0;
+ key.gwy.port = 0;
+ } else {
+ PF_ACPY(&key.lan.addr, pd2.dst, key.af);
+ PF_ACPY(&key.ext.addr, pd2.src, key.af);
+ key.lan.port = 0;
+ key.ext.port = 0;
+ }
+
+ STATE_LOOKUP();
+
+ if (STATE_TRANSLATE(*state)) {
+ if (direction == PF_IN) {
+ pf_change_icmp(pd2.src, NULL,
+ daddr, &(*state)->lan.addr,
+ 0, NULL,
+ pd2.ip_sum, icmpsum,
+ pd->ip_sum, 0, pd2.af);
+ } else {
+ pf_change_icmp(pd2.dst, NULL,
+ saddr, &(*state)->gwy.addr,
+ 0, NULL,
+ pd2.ip_sum, icmpsum,
+ pd->ip_sum, 0, pd2.af);
+ }
+ switch (pd2.af) {
+#ifdef INET
+ case AF_INET:
+ m_copyback(m, off, ICMP_MINLEN,
+ (caddr_t)pd->hdr.icmp);
+ m_copyback(m, ipoff2, sizeof(h2),
+ (caddr_t)&h2);
+ break;
+#endif /* INET */
+#ifdef INET6
+ case AF_INET6:
+ m_copyback(m, off,
+ sizeof(struct icmp6_hdr),
+ (caddr_t)pd->hdr.icmp6);
+ m_copyback(m, ipoff2, sizeof(h2_6),
+ (caddr_t)&h2_6);
+ break;
+#endif /* INET6 */
+ }
+ }
+
+ return (PF_PASS);
+ break;
+ }
+ }
+ }
+}
+
+int
+pf_test_state_other(struct pf_state **state, int direction, struct pfi_kif *kif,
+ struct pf_pdesc *pd)
+{
+ struct pf_state_peer *src, *dst;
+ struct pf_state_cmp key;
+
+ key.af = pd->af;
+ key.proto = pd->proto;
+ if (direction == PF_IN) {
+ PF_ACPY(&key.ext.addr, pd->src, key.af);
+ PF_ACPY(&key.gwy.addr, pd->dst, key.af);
+ key.ext.port = 0;
+ key.gwy.port = 0;
+ } else {
+ PF_ACPY(&key.lan.addr, pd->src, key.af);
+ PF_ACPY(&key.ext.addr, pd->dst, key.af);
+ key.lan.port = 0;
+ key.ext.port = 0;
+ }
+
+ STATE_LOOKUP();
+
+ if (direction == (*state)->direction) {
+ src = &(*state)->src;
+ dst = &(*state)->dst;
+ } else {
+ src = &(*state)->dst;
+ dst = &(*state)->src;
+ }
+
+ /* update states */
+ if (src->state < PFOTHERS_SINGLE)
+ src->state = PFOTHERS_SINGLE;
+ if (dst->state == PFOTHERS_SINGLE)
+ dst->state = PFOTHERS_MULTIPLE;
+
+ /* update expire time */
+ (*state)->expire = time_second;
+ if (src->state == PFOTHERS_MULTIPLE && dst->state == PFOTHERS_MULTIPLE)
+ (*state)->timeout = PFTM_OTHER_MULTIPLE;
+ else
+ (*state)->timeout = PFTM_OTHER_SINGLE;
+
+ /* translate source/destination address, if necessary */
+ if (STATE_TRANSLATE(*state)) {
+ if (direction == PF_OUT)
+ switch (pd->af) {
+#ifdef INET
+ case AF_INET:
+ pf_change_a(&pd->src->v4.s_addr,
+ pd->ip_sum, (*state)->gwy.addr.v4.s_addr,
+ 0);
+ break;
+#endif /* INET */
+#ifdef INET6
+ case AF_INET6:
+ PF_ACPY(pd->src, &(*state)->gwy.addr, pd->af);
+ break;
+#endif /* INET6 */
+ }
+ else
+ switch (pd->af) {
+#ifdef INET
+ case AF_INET:
+ pf_change_a(&pd->dst->v4.s_addr,
+ pd->ip_sum, (*state)->lan.addr.v4.s_addr,
+ 0);
+ break;
+#endif /* INET */
+#ifdef INET6
+ case AF_INET6:
+ PF_ACPY(pd->dst, &(*state)->lan.addr, pd->af);
+ break;
+#endif /* INET6 */
+ }
+ }
+
+ return (PF_PASS);
+}
+
+/*
+ * ipoff and off are measured from the start of the mbuf chain.
+ * h must be at "ipoff" on the mbuf chain.
+ */
+void *
+pf_pull_hdr(struct mbuf *m, int off, void *p, int len,
+ u_short *actionp, u_short *reasonp, sa_family_t af)
+{
+ switch (af) {
+#ifdef INET
+ case AF_INET: {
+ struct ip *h = mtod(m, struct ip *);
+ u_int16_t fragoff = (ntohs(h->ip_off) & IP_OFFMASK) << 3;
+
+ if (fragoff) {
+ if (fragoff >= len)
+ ACTION_SET(actionp, PF_PASS);
+ else {
+ ACTION_SET(actionp, PF_DROP);
+ REASON_SET(reasonp, PFRES_FRAG);
+ }
+ return (NULL);
+ }
+ if (m->m_pkthdr.len < off + len ||
+ ntohs(h->ip_len) < off + len) {
+ ACTION_SET(actionp, PF_DROP);
+ REASON_SET(reasonp, PFRES_SHORT);
+ return (NULL);
+ }
+ break;
+ }
+#endif /* INET */
+#ifdef INET6
+ case AF_INET6: {
+ struct ip6_hdr *h = mtod(m, struct ip6_hdr *);
+
+ if (m->m_pkthdr.len < off + len ||
+ (ntohs(h->ip6_plen) + sizeof(struct ip6_hdr)) <
+ (unsigned)(off + len)) {
+ ACTION_SET(actionp, PF_DROP);
+ REASON_SET(reasonp, PFRES_SHORT);
+ return (NULL);
+ }
+ break;
+ }
+#endif /* INET6 */
+ }
+ m_copydata(m, off, len, p);
+ return (p);
+}
+
+int
+pf_routable(struct pf_addr *addr, sa_family_t af, struct pfi_kif *kif)
+{
+ struct sockaddr_in *dst;
+ int ret = 1;
+ int check_mpath;
+#ifndef __FreeBSD__
+ extern int ipmultipath;
+#endif
+#ifdef INET6
+#ifndef __FreeBSD__
+ extern int ip6_multipath;
+#endif
+ struct sockaddr_in6 *dst6;
+ struct route_in6 ro;
+#else
+ struct route ro;
+#endif
+ struct radix_node *rn;
+ struct rtentry *rt;
+ struct ifnet *ifp;
+
+ check_mpath = 0;
+ bzero(&ro, sizeof(ro));
+ switch (af) {
+ case AF_INET:
+ dst = satosin(&ro.ro_dst);
+ dst->sin_family = AF_INET;
+ dst->sin_len = sizeof(*dst);
+ dst->sin_addr = addr->v4;
+#ifndef __FreeBSD__ /* MULTIPATH_ROUTING */
+ if (ipmultipath)
+ check_mpath = 1;
+#endif
+ break;
+#ifdef INET6
+ case AF_INET6:
+ dst6 = (struct sockaddr_in6 *)&ro.ro_dst;
+ dst6->sin6_family = AF_INET6;
+ dst6->sin6_len = sizeof(*dst6);
+ dst6->sin6_addr = addr->v6;
+#ifndef __FreeBSD__ /* MULTIPATH_ROUTING */
+ if (ip6_multipath)
+ check_mpath = 1;
+#endif
+ break;
+#endif /* INET6 */
+ default:
+ return (0);
+ }
+
+ /* Skip checks for ipsec interfaces */
+ if (kif != NULL && kif->pfik_ifp->if_type == IFT_ENC)
+ goto out;
+
+#ifdef __FreeBSD__
+/* XXX MRT not always INET */ /* stick with table 0 though */
+ if (af == AF_INET)
+ in_rtalloc_ign((struct route *)&ro, 0, 0);
+ else
+ rtalloc_ign((struct route *)&ro, 0);
+#else /* ! __FreeBSD__ */
+ rtalloc_noclone((struct route *)&ro, NO_CLONING);
+#endif
+
+ if (ro.ro_rt != NULL) {
+ /* No interface given, this is a no-route check */
+ if (kif == NULL)
+ goto out;
+
+ if (kif->pfik_ifp == NULL) {
+ ret = 0;
+ goto out;
+ }
+
+ /* Perform uRPF check if passed input interface */
+ ret = 0;
+ rn = (struct radix_node *)ro.ro_rt;
+ do {
+ rt = (struct rtentry *)rn;
+#ifndef __FreeBSD__ /* CARPDEV */
+ if (rt->rt_ifp->if_type == IFT_CARP)
+ ifp = rt->rt_ifp->if_carpdev;
+ else
+#endif
+ ifp = rt->rt_ifp;
+
+ if (kif->pfik_ifp == ifp)
+ ret = 1;
+#ifdef __FreeBSD__ /* MULTIPATH_ROUTING */
+ rn = NULL;
+#else
+ rn = rn_mpath_next(rn);
+#endif
+ } while (check_mpath == 1 && rn != NULL && ret == 0);
+ } else
+ ret = 0;
+out:
+ if (ro.ro_rt != NULL)
+ RTFREE(ro.ro_rt);
+ return (ret);
+}
+
+int
+pf_rtlabel_match(struct pf_addr *addr, sa_family_t af, struct pf_addr_wrap *aw)
+{
+ struct sockaddr_in *dst;
+#ifdef INET6
+ struct sockaddr_in6 *dst6;
+ struct route_in6 ro;
+#else
+ struct route ro;
+#endif
+ int ret = 0;
+
+ bzero(&ro, sizeof(ro));
+ switch (af) {
+ case AF_INET:
+ dst = satosin(&ro.ro_dst);
+ dst->sin_family = AF_INET;
+ dst->sin_len = sizeof(*dst);
+ dst->sin_addr = addr->v4;
+ break;
+#ifdef INET6
+ case AF_INET6:
+ dst6 = (struct sockaddr_in6 *)&ro.ro_dst;
+ dst6->sin6_family = AF_INET6;
+ dst6->sin6_len = sizeof(*dst6);
+ dst6->sin6_addr = addr->v6;
+ break;
+#endif /* INET6 */
+ default:
+ return (0);
+ }
+
+#ifdef __FreeBSD__
+# ifdef RTF_PRCLONING
+ rtalloc_ign((struct route *)&ro, (RTF_CLONING|RTF_PRCLONING));
+# else /* !RTF_PRCLONING */
+ if (af == AF_INET)
+ in_rtalloc_ign((struct route *)&ro, 0, 0);
+ else
+ rtalloc_ign((struct route *)&ro, 0);
+# endif
+#else /* ! __FreeBSD__ */
+ rtalloc_noclone((struct route *)&ro, NO_CLONING);
+#endif
+
+ if (ro.ro_rt != NULL) {
+#ifdef __FreeBSD__
+ /* XXX_IMPORT: later */
+#else
+ if (ro.ro_rt->rt_labelid == aw->v.rtlabel)
+ ret = 1;
+#endif
+ RTFREE(ro.ro_rt);
+ }
+
+ return (ret);
+}
+
+#ifdef INET
+
+void
+pf_route(struct mbuf **m, struct pf_rule *r, int dir, struct ifnet *oifp,
+ struct pf_state *s, struct pf_pdesc *pd)
+{
+ struct mbuf *m0, *m1;
+ struct route iproute;
+ struct route *ro = NULL;
+ struct sockaddr_in *dst;
+ struct ip *ip;
+ struct ifnet *ifp = NULL;
+ struct pf_addr naddr;
+ struct pf_src_node *sn = NULL;
+ int error = 0;
+#ifdef __FreeBSD__
+ int sw_csum;
+#endif
+#ifdef IPSEC
+ struct m_tag *mtag;
+#endif /* IPSEC */
+
+ if (m == NULL || *m == NULL || r == NULL ||
+ (dir != PF_IN && dir != PF_OUT) || oifp == NULL)
+ panic("pf_route: invalid parameters");
+
+ if (pd->pf_mtag->routed++ > 3) {
+ m0 = *m;
+ *m = NULL;
+ goto bad;
+ }
+
+ if (r->rt == PF_DUPTO) {
+#ifdef __FreeBSD__
+ if ((m0 = m_dup(*m, M_DONTWAIT)) == NULL)
+#else
+ if ((m0 = m_copym2(*m, 0, M_COPYALL, M_NOWAIT)) == NULL)
+#endif
+ return;
+ } else {
+ if ((r->rt == PF_REPLYTO) == (r->direction == dir))
+ return;
+ m0 = *m;
+ }
+
+ if (m0->m_len < sizeof(struct ip)) {
+ DPFPRINTF(PF_DEBUG_URGENT,
+ ("pf_route: m0->m_len < sizeof(struct ip)\n"));
+ goto bad;
+ }
+
+ ip = mtod(m0, struct ip *);
+
+ ro = &iproute;
+ bzero((caddr_t)ro, sizeof(*ro));
+ dst = satosin(&ro->ro_dst);
+ dst->sin_family = AF_INET;
+ dst->sin_len = sizeof(*dst);
+ dst->sin_addr = ip->ip_dst;
+
+ if (r->rt == PF_FASTROUTE) {
+ in_rtalloc(ro, 0);
+ if (ro->ro_rt == 0) {
+ KMOD_IPSTAT_INC(ips_noroute);
+ goto bad;
+ }
+
+ ifp = ro->ro_rt->rt_ifp;
+ ro->ro_rt->rt_use++;
+
+ if (ro->ro_rt->rt_flags & RTF_GATEWAY)
+ dst = satosin(ro->ro_rt->rt_gateway);
+ } else {
+ if (TAILQ_EMPTY(&r->rpool.list)) {
+ DPFPRINTF(PF_DEBUG_URGENT,
+ ("pf_route: TAILQ_EMPTY(&r->rpool.list)\n"));
+ goto bad;
+ }
+ if (s == NULL) {
+ pf_map_addr(AF_INET, r, (struct pf_addr *)&ip->ip_src,
+ &naddr, NULL, &sn);
+ if (!PF_AZERO(&naddr, AF_INET))
+ dst->sin_addr.s_addr = naddr.v4.s_addr;
+ ifp = r->rpool.cur->kif ?
+ r->rpool.cur->kif->pfik_ifp : NULL;
+ } else {
+ if (!PF_AZERO(&s->rt_addr, AF_INET))
+ dst->sin_addr.s_addr =
+ s->rt_addr.v4.s_addr;
+ ifp = s->rt_kif ? s->rt_kif->pfik_ifp : NULL;
+ }
+ }
+ if (ifp == NULL)
+ goto bad;
+
+ if (oifp != ifp) {
+#ifdef __FreeBSD__
+ PF_UNLOCK();
+ if (pf_test(PF_OUT, ifp, &m0, NULL, NULL) != PF_PASS) {
+ PF_LOCK();
+ goto bad;
+ } else if (m0 == NULL) {
+ PF_LOCK();
+ goto done;
+ }
+ PF_LOCK();
+#else
+ if (pf_test(PF_OUT, ifp, &m0, NULL) != PF_PASS)
+ goto bad;
+ else if (m0 == NULL)
+ goto done;
+#endif
+ if (m0->m_len < sizeof(struct ip)) {
+ DPFPRINTF(PF_DEBUG_URGENT,
+ ("pf_route: m0->m_len < sizeof(struct ip)\n"));
+ goto bad;
+ }
+ ip = mtod(m0, struct ip *);
+ }
+
+#ifdef __FreeBSD__
+ /* Copied from FreeBSD 5.1-CURRENT ip_output. */
+ m0->m_pkthdr.csum_flags |= CSUM_IP;
+ sw_csum = m0->m_pkthdr.csum_flags & ~ifp->if_hwassist;
+ if (sw_csum & CSUM_DELAY_DATA) {
+ /*
+ * XXX: in_delayed_cksum assumes HBO for ip->ip_len (at least)
+ */
+ NTOHS(ip->ip_len);
+ NTOHS(ip->ip_off); /* XXX: needed? */
+ in_delayed_cksum(m0);
+ HTONS(ip->ip_len);
+ HTONS(ip->ip_off);
+ sw_csum &= ~CSUM_DELAY_DATA;
+ }
+ m0->m_pkthdr.csum_flags &= ifp->if_hwassist;
+
+ if (ntohs(ip->ip_len) <= ifp->if_mtu ||
+ (m0->m_pkthdr.csum_flags & ifp->if_hwassist & CSUM_TSO) != 0 ||
+ (ifp->if_hwassist & CSUM_FRAGMENT &&
+ ((ip->ip_off & htons(IP_DF)) == 0))) {
+ /*
+ * ip->ip_len = htons(ip->ip_len);
+ * ip->ip_off = htons(ip->ip_off);
+ */
+ ip->ip_sum = 0;
+ if (sw_csum & CSUM_DELAY_IP) {
+ /* From KAME */
+ if (ip->ip_v == IPVERSION &&
+ (ip->ip_hl << 2) == sizeof(*ip)) {
+ ip->ip_sum = in_cksum_hdr(ip);
+ } else {
+ ip->ip_sum = in_cksum(m0, ip->ip_hl << 2);
+ }
+ }
+ PF_UNLOCK();
+ error = (*ifp->if_output)(ifp, m0, sintosa(dst), ro);
+ PF_LOCK();
+ goto done;
+ }
+
+#else
+ /* Copied from ip_output. */
+#ifdef IPSEC
+ /*
+ * If deferred crypto processing is needed, check that the
+ * interface supports it.
+ */
+ if ((mtag = m_tag_find(m0, PACKET_TAG_IPSEC_OUT_CRYPTO_NEEDED, NULL))
+ != NULL && (ifp->if_capabilities & IFCAP_IPSEC) == 0) {
+ /* Notify IPsec to do its own crypto. */
+ ipsp_skipcrypto_unmark((struct tdb_ident *)(mtag + 1));
+ goto bad;
+ }
+#endif /* IPSEC */
+
+ /* Catch routing changes wrt. hardware checksumming for TCP or UDP. */
+ if (m0->m_pkthdr.csum_flags & M_TCPV4_CSUM_OUT) {
+ if (!(ifp->if_capabilities & IFCAP_CSUM_TCPv4) ||
+ ifp->if_bridge != NULL) {
+ in_delayed_cksum(m0);
+ m0->m_pkthdr.csum_flags &= ~M_TCPV4_CSUM_OUT; /* Clear */
+ }
+ } else if (m0->m_pkthdr.csum_flags & M_UDPV4_CSUM_OUT) {
+ if (!(ifp->if_capabilities & IFCAP_CSUM_UDPv4) ||
+ ifp->if_bridge != NULL) {
+ in_delayed_cksum(m0);
+ m0->m_pkthdr.csum_flags &= ~M_UDPV4_CSUM_OUT; /* Clear */
+ }
+ }
+
+ if (ntohs(ip->ip_len) <= ifp->if_mtu) {
+ if ((ifp->if_capabilities & IFCAP_CSUM_IPv4) &&
+ ifp->if_bridge == NULL) {
+ m0->m_pkthdr.csum_flags |= M_IPV4_CSUM_OUT;
+ KMOD_IPSTAT_INC(ips_outhwcsum);
+ } else {
+ ip->ip_sum = 0;
+ ip->ip_sum = in_cksum(m0, ip->ip_hl << 2);
+ }
+ /* Update relevant hardware checksum stats for TCP/UDP */
+ if (m0->m_pkthdr.csum_flags & M_TCPV4_CSUM_OUT)
+ KMOD_TCPSTAT_INC(tcps_outhwcsum);
+ else if (m0->m_pkthdr.csum_flags & M_UDPV4_CSUM_OUT)
+ KMOD_UDPSTAT_INC(udps_outhwcsum);
+ error = (*ifp->if_output)(ifp, m0, sintosa(dst), NULL);
+ goto done;
+ }
+#endif
+ /*
+ * Too large for interface; fragment if possible.
+ * Must be able to put at least 8 bytes per fragment.
+ */
+ if (ip->ip_off & htons(IP_DF) || (m0->m_pkthdr.csum_flags & CSUM_TSO)) {
+ KMOD_IPSTAT_INC(ips_cantfrag);
+ if (r->rt != PF_DUPTO) {
+#ifdef __FreeBSD__
+ /* icmp_error() expects host byte ordering */
+ NTOHS(ip->ip_len);
+ NTOHS(ip->ip_off);
+ PF_UNLOCK();
+ icmp_error(m0, ICMP_UNREACH, ICMP_UNREACH_NEEDFRAG, 0,
+ ifp->if_mtu);
+ PF_LOCK();
+#else
+ icmp_error(m0, ICMP_UNREACH, ICMP_UNREACH_NEEDFRAG, 0,
+ ifp->if_mtu);
+#endif
+ goto done;
+ } else
+ goto bad;
+ }
+
+ m1 = m0;
+#ifdef __FreeBSD__
+ /*
+ * XXX: is cheaper + less error prone than own function
+ */
+ NTOHS(ip->ip_len);
+ NTOHS(ip->ip_off);
+ error = ip_fragment(ip, &m0, ifp->if_mtu, ifp->if_hwassist, sw_csum);
+#else
+ error = ip_fragment(m0, ifp, ifp->if_mtu);
+#endif
+ if (error) {
+#ifndef __FreeBSD__ /* ip_fragment does not do m_freem() on FreeBSD */
+ m0 = NULL;
+#endif
+ goto bad;
+ }
+
+ for (m0 = m1; m0; m0 = m1) {
+ m1 = m0->m_nextpkt;
+ m0->m_nextpkt = 0;
+#ifdef __FreeBSD__
+ if (error == 0) {
+ PF_UNLOCK();
+ error = (*ifp->if_output)(ifp, m0, sintosa(dst),
+ NULL);
+ PF_LOCK();
+ } else
+#else
+ if (error == 0)
+ error = (*ifp->if_output)(ifp, m0, sintosa(dst),
+ NULL);
+ else
+#endif
+ m_freem(m0);
+ }
+
+ if (error == 0)
+ KMOD_IPSTAT_INC(ips_fragmented);
+
+done:
+ if (r->rt != PF_DUPTO)
+ *m = NULL;
+ if (ro == &iproute && ro->ro_rt)
+ RTFREE(ro->ro_rt);
+ return;
+
+bad:
+ m_freem(m0);
+ goto done;
+}
+#endif /* INET */
+
+#ifdef INET6
+void
+pf_route6(struct mbuf **m, struct pf_rule *r, int dir, struct ifnet *oifp,
+ struct pf_state *s, struct pf_pdesc *pd)
+{
+ struct mbuf *m0;
+ struct route_in6 ip6route;
+ struct route_in6 *ro;
+ struct sockaddr_in6 *dst;
+ struct ip6_hdr *ip6;
+ struct ifnet *ifp = NULL;
+ struct pf_addr naddr;
+ struct pf_src_node *sn = NULL;
+ int error = 0;
+
+ if (m == NULL || *m == NULL || r == NULL ||
+ (dir != PF_IN && dir != PF_OUT) || oifp == NULL)
+ panic("pf_route6: invalid parameters");
+
+ if (pd->pf_mtag->routed++ > 3) {
+ m0 = *m;
+ *m = NULL;
+ goto bad;
+ }
+
+ if (r->rt == PF_DUPTO) {
+#ifdef __FreeBSD__
+ if ((m0 = m_dup(*m, M_DONTWAIT)) == NULL)
+#else
+ if ((m0 = m_copym2(*m, 0, M_COPYALL, M_NOWAIT)) == NULL)
+#endif
+ return;
+ } else {
+ if ((r->rt == PF_REPLYTO) == (r->direction == dir))
+ return;
+ m0 = *m;
+ }
+
+ if (m0->m_len < sizeof(struct ip6_hdr)) {
+ DPFPRINTF(PF_DEBUG_URGENT,
+ ("pf_route6: m0->m_len < sizeof(struct ip6_hdr)\n"));
+ goto bad;
+ }
+ ip6 = mtod(m0, struct ip6_hdr *);
+
+ ro = &ip6route;
+ bzero((caddr_t)ro, sizeof(*ro));
+ dst = (struct sockaddr_in6 *)&ro->ro_dst;
+ dst->sin6_family = AF_INET6;
+ dst->sin6_len = sizeof(*dst);
+ dst->sin6_addr = ip6->ip6_dst;
+
+ /* Cheat. XXX why only in the v6 case??? */
+ if (r->rt == PF_FASTROUTE) {
+#ifdef __FreeBSD__
+ m0->m_flags |= M_SKIP_FIREWALL;
+ PF_UNLOCK();
+ ip6_output(m0, NULL, NULL, 0, NULL, NULL, NULL);
+ PF_LOCK();
+#else
+ mtag = m_tag_get(PACKET_TAG_PF_GENERATED, 0, M_NOWAIT);
+ if (mtag == NULL)
+ goto bad;
+ m_tag_prepend(m0, mtag);
+ pd->pf_mtag->flags |= PF_TAG_GENERATED;
+ ip6_output(m0, NULL, NULL, 0, NULL, NULL);
+#endif
+ return;
+ }
+
+ if (TAILQ_EMPTY(&r->rpool.list)) {
+ DPFPRINTF(PF_DEBUG_URGENT,
+ ("pf_route6: TAILQ_EMPTY(&r->rpool.list)\n"));
+ goto bad;
+ }
+ if (s == NULL) {
+ pf_map_addr(AF_INET6, r, (struct pf_addr *)&ip6->ip6_src,
+ &naddr, NULL, &sn);
+ if (!PF_AZERO(&naddr, AF_INET6))
+ PF_ACPY((struct pf_addr *)&dst->sin6_addr,
+ &naddr, AF_INET6);
+ ifp = r->rpool.cur->kif ? r->rpool.cur->kif->pfik_ifp : NULL;
+ } else {
+ if (!PF_AZERO(&s->rt_addr, AF_INET6))
+ PF_ACPY((struct pf_addr *)&dst->sin6_addr,
+ &s->rt_addr, AF_INET6);
+ ifp = s->rt_kif ? s->rt_kif->pfik_ifp : NULL;
+ }
+ if (ifp == NULL)
+ goto bad;
+
+ if (oifp != ifp) {
+#ifdef __FreeBSD__
+ PF_UNLOCK();
+ if (pf_test6(PF_OUT, ifp, &m0, NULL, NULL) != PF_PASS) {
+ PF_LOCK();
+ goto bad;
+ } else if (m0 == NULL) {
+ PF_LOCK();
+ goto done;
+ }
+ PF_LOCK();
+#else
+ if (pf_test6(PF_OUT, ifp, &m0, NULL) != PF_PASS)
+ goto bad;
+ else if (m0 == NULL)
+ goto done;
+#endif
+ if (m0->m_len < sizeof(struct ip6_hdr)) {
+ DPFPRINTF(PF_DEBUG_URGENT,
+ ("pf_route6: m0->m_len < sizeof(struct ip6_hdr)\n"));
+ goto bad;
+ }
+ ip6 = mtod(m0, struct ip6_hdr *);
+ }
+
+ /*
+ * If the packet is too large for the outgoing interface,
+ * send back an icmp6 error.
+ */
+ if (IN6_IS_SCOPE_EMBED(&dst->sin6_addr))
+ dst->sin6_addr.s6_addr16[1] = htons(ifp->if_index);
+ if ((u_long)m0->m_pkthdr.len <= ifp->if_mtu) {
+#ifdef __FreeBSD__
+ PF_UNLOCK();
+#endif
+ error = nd6_output(ifp, ifp, m0, dst, NULL);
+#ifdef __FreeBSD__
+ PF_LOCK();
+#endif
+ } else {
+ in6_ifstat_inc(ifp, ifs6_in_toobig);
+#ifdef __FreeBSD__
+ if (r->rt != PF_DUPTO) {
+ PF_UNLOCK();
+ icmp6_error(m0, ICMP6_PACKET_TOO_BIG, 0, ifp->if_mtu);
+ PF_LOCK();
+ } else
+#else
+ if (r->rt != PF_DUPTO)
+ icmp6_error(m0, ICMP6_PACKET_TOO_BIG, 0, ifp->if_mtu);
+ else
+#endif
+ goto bad;
+ }
+
+done:
+ if (r->rt != PF_DUPTO)
+ *m = NULL;
+ return;
+
+bad:
+ m_freem(m0);
+ goto done;
+}
+#endif /* INET6 */
+
+
+#ifdef __FreeBSD__
+/*
+ * FreeBSD supports cksum offloads for the following drivers.
+ * em(4), fxp(4), ixgb(4), lge(4), ndis(4), nge(4), re(4),
+ * ti(4), txp(4), xl(4)
+ *
+ * CSUM_DATA_VALID | CSUM_PSEUDO_HDR :
+ * network driver performed cksum including pseudo header, need to verify
+ * csum_data
+ * CSUM_DATA_VALID :
+ * network driver performed cksum, needs to additional pseudo header
+ * cksum computation with partial csum_data(i.e. lack of H/W support for
+ * pseudo header, for instance hme(4), sk(4) and possibly gem(4))
+ *
+ * After validating the cksum of packet, set both flag CSUM_DATA_VALID and
+ * CSUM_PSEUDO_HDR in order to avoid recomputation of the cksum in upper
+ * TCP/UDP layer.
+ * Also, set csum_data to 0xffff to force cksum validation.
+ */
+int
+pf_check_proto_cksum(struct mbuf *m, int off, int len, u_int8_t p, sa_family_t af)
+{
+ u_int16_t sum = 0;
+ int hw_assist = 0;
+ struct ip *ip;
+
+ if (off < sizeof(struct ip) || len < sizeof(struct udphdr))
+ return (1);
+ if (m->m_pkthdr.len < off + len)
+ return (1);
+
+ switch (p) {
+ case IPPROTO_TCP:
+ if (m->m_pkthdr.csum_flags & CSUM_DATA_VALID) {
+ if (m->m_pkthdr.csum_flags & CSUM_PSEUDO_HDR) {
+ sum = m->m_pkthdr.csum_data;
+ } else {
+ ip = mtod(m, struct ip *);
+ sum = in_pseudo(ip->ip_src.s_addr,
+ ip->ip_dst.s_addr, htonl((u_short)len +
+ m->m_pkthdr.csum_data + IPPROTO_TCP));
+ }
+ sum ^= 0xffff;
+ ++hw_assist;
+ }
+ break;
+ case IPPROTO_UDP:
+ if (m->m_pkthdr.csum_flags & CSUM_DATA_VALID) {
+ if (m->m_pkthdr.csum_flags & CSUM_PSEUDO_HDR) {
+ sum = m->m_pkthdr.csum_data;
+ } else {
+ ip = mtod(m, struct ip *);
+ sum = in_pseudo(ip->ip_src.s_addr,
+ ip->ip_dst.s_addr, htonl((u_short)len +
+ m->m_pkthdr.csum_data + IPPROTO_UDP));
+ }
+ sum ^= 0xffff;
+ ++hw_assist;
+ }
+ break;
+ case IPPROTO_ICMP:
+#ifdef INET6
+ case IPPROTO_ICMPV6:
+#endif /* INET6 */
+ break;
+ default:
+ return (1);
+ }
+
+ if (!hw_assist) {
+ switch (af) {
+ case AF_INET:
+ if (p == IPPROTO_ICMP) {
+ if (m->m_len < off)
+ return (1);
+ m->m_data += off;
+ m->m_len -= off;
+ sum = in_cksum(m, len);
+ m->m_data -= off;
+ m->m_len += off;
+ } else {
+ if (m->m_len < sizeof(struct ip))
+ return (1);
+ sum = in4_cksum(m, p, off, len);
+ }
+ break;
+#ifdef INET6
+ case AF_INET6:
+ if (m->m_len < sizeof(struct ip6_hdr))
+ return (1);
+ sum = in6_cksum(m, p, off, len);
+ break;
+#endif /* INET6 */
+ default:
+ return (1);
+ }
+ }
+ if (sum) {
+ switch (p) {
+ case IPPROTO_TCP:
+ {
+ KMOD_TCPSTAT_INC(tcps_rcvbadsum);
+ break;
+ }
+ case IPPROTO_UDP:
+ {
+ KMOD_UDPSTAT_INC(udps_badsum);
+ break;
+ }
+ case IPPROTO_ICMP:
+ {
+ KMOD_ICMPSTAT_INC(icps_checksum);
+ break;
+ }
+#ifdef INET6
+ case IPPROTO_ICMPV6:
+ {
+ KMOD_ICMP6STAT_INC(icp6s_checksum);
+ break;
+ }
+#endif /* INET6 */
+ }
+ return (1);
+ } else {
+ if (p == IPPROTO_TCP || p == IPPROTO_UDP) {
+ m->m_pkthdr.csum_flags |=
+ (CSUM_DATA_VALID | CSUM_PSEUDO_HDR);
+ m->m_pkthdr.csum_data = 0xffff;
+ }
+ }
+ return (0);
+}
+#else /* !__FreeBSD__ */
+/*
+ * check protocol (tcp/udp/icmp/icmp6) checksum and set mbuf flag
+ * off is the offset where the protocol header starts
+ * len is the total length of protocol header plus payload
+ * returns 0 when the checksum is valid, otherwise returns 1.
+ */
+int
+pf_check_proto_cksum(struct mbuf *m, int off, int len, u_int8_t p,
+ sa_family_t af)
+{
+ u_int16_t flag_ok, flag_bad;
+ u_int16_t sum;
+
+ switch (p) {
+ case IPPROTO_TCP:
+ flag_ok = M_TCP_CSUM_IN_OK;
+ flag_bad = M_TCP_CSUM_IN_BAD;
+ break;
+ case IPPROTO_UDP:
+ flag_ok = M_UDP_CSUM_IN_OK;
+ flag_bad = M_UDP_CSUM_IN_BAD;
+ break;
+ case IPPROTO_ICMP:
+#ifdef INET6
+ case IPPROTO_ICMPV6:
+#endif /* INET6 */
+ flag_ok = flag_bad = 0;
+ break;
+ default:
+ return (1);
+ }
+ if (m->m_pkthdr.csum_flags & flag_ok)
+ return (0);
+ if (m->m_pkthdr.csum_flags & flag_bad)
+ return (1);
+ if (off < sizeof(struct ip) || len < sizeof(struct udphdr))
+ return (1);
+ if (m->m_pkthdr.len < off + len)
+ return (1);
+ switch (af) {
+#ifdef INET
+ case AF_INET:
+ if (p == IPPROTO_ICMP) {
+ if (m->m_len < off)
+ return (1);
+ m->m_data += off;
+ m->m_len -= off;
+ sum = in_cksum(m, len);
+ m->m_data -= off;
+ m->m_len += off;
+ } else {
+ if (m->m_len < sizeof(struct ip))
+ return (1);
+ sum = in4_cksum(m, p, off, len);
+ }
+ break;
+#endif /* INET */
+#ifdef INET6
+ case AF_INET6:
+ if (m->m_len < sizeof(struct ip6_hdr))
+ return (1);
+ sum = in6_cksum(m, p, off, len);
+ break;
+#endif /* INET6 */
+ default:
+ return (1);
+ }
+ if (sum) {
+ m->m_pkthdr.csum_flags |= flag_bad;
+ switch (p) {
+ case IPPROTO_TCP:
+ KMOD_TCPSTAT_INC(tcps_rcvbadsum);
+ break;
+ case IPPROTO_UDP:
+ KMOD_UDPSTAT_INC(udps_badsum);
+ break;
+ case IPPROTO_ICMP:
+ KMOD_ICMPSTAT_INC(icps_checksum);
+ break;
+#ifdef INET6
+ case IPPROTO_ICMPV6:
+ KMOD_ICMP6STAT_INC(icp6s_checksum);
+ break;
+#endif /* INET6 */
+ }
+ return (1);
+ }
+ m->m_pkthdr.csum_flags |= flag_ok;
+ return (0);
+}
+#endif /* __FreeBSD__ */
+
+#ifdef INET
+int
+#ifdef __FreeBSD__
+pf_test(int dir, struct ifnet *ifp, struct mbuf **m0,
+ struct ether_header *eh, struct inpcb *inp)
+#else
+pf_test(int dir, struct ifnet *ifp, struct mbuf **m0,
+ struct ether_header *eh)
+#endif
+{
+ struct pfi_kif *kif;
+ u_short action, reason = 0, log = 0;
+ struct mbuf *m = *m0;
+ struct ip *h = NULL; /* make the compiler happy */
+ struct pf_rule *a = NULL, *r = &pf_default_rule, *tr, *nr;
+ struct pf_state *s = NULL;
+ struct pf_ruleset *ruleset = NULL;
+ struct pf_pdesc pd;
+ int off, dirndx, pqid = 0;
+
+#ifdef __FreeBSD__
+ PF_LOCK();
+#endif
+ if (!pf_status.running)
+#ifdef __FreeBSD__
+ {
+ PF_UNLOCK();
+#endif
+ return (PF_PASS);
+#ifdef __FreeBSD__
+ }
+#endif
+
+ memset(&pd, 0, sizeof(pd));
+ if ((pd.pf_mtag = pf_get_mtag(m)) == NULL) {
+#ifdef __FreeBSD__
+ PF_UNLOCK();
+#endif
+ DPFPRINTF(PF_DEBUG_URGENT,
+ ("pf_test: pf_get_mtag returned NULL\n"));
+ return (PF_DROP);
+ }
+#ifdef __FreeBSD__
+ if (m->m_flags & M_SKIP_FIREWALL) {
+ PF_UNLOCK();
+ return (PF_PASS);
+ }
+#else
+ if (pd.pf_mtag->flags & PF_TAG_GENERATED)
+ return (PF_PASS);
+#endif
+
+#ifdef __FreeBSD__
+ /* XXX_IMPORT: later */
+#else
+ if (ifp->if_type == IFT_CARP && ifp->if_carpdev)
+ ifp = ifp->if_carpdev;
+#endif
+
+ kif = (struct pfi_kif *)ifp->if_pf_kif;
+ if (kif == NULL) {
+#ifdef __FreeBSD__
+ PF_UNLOCK();
+#endif
+ DPFPRINTF(PF_DEBUG_URGENT,
+ ("pf_test: kif == NULL, if_xname %s\n", ifp->if_xname));
+ return (PF_DROP);
+ }
+ if (kif->pfik_flags & PFI_IFLAG_SKIP) {
+#ifdef __FreeBSD__
+ PF_UNLOCK();
+#endif
+ return (PF_PASS);
+ }
+
+#ifdef __FreeBSD__
+ M_ASSERTPKTHDR(m);
+#else
+#ifdef DIAGNOSTIC
+ if ((m->m_flags & M_PKTHDR) == 0)
+ panic("non-M_PKTHDR is passed to pf_test");
+#endif /* DIAGNOSTIC */
+#endif /* __FreeBSD__ */
+
+ if (m->m_pkthdr.len < (int)sizeof(*h)) {
+ action = PF_DROP;
+ REASON_SET(&reason, PFRES_SHORT);
+ log = 1;
+ goto done;
+ }
+
+ /* We do IP header normalization and packet reassembly here */
+ if (pf_normalize_ip(m0, dir, kif, &reason, &pd) != PF_PASS) {
+ action = PF_DROP;
+ goto done;
+ }
+ m = *m0;
+ h = mtod(m, struct ip *);
+
+ off = h->ip_hl << 2;
+ if (off < (int)sizeof(*h)) {
+ action = PF_DROP;
+ REASON_SET(&reason, PFRES_SHORT);
+ log = 1;
+ goto done;
+ }
+
+ pd.src = (struct pf_addr *)&h->ip_src;
+ pd.dst = (struct pf_addr *)&h->ip_dst;
+ PF_ACPY(&pd.baddr, dir == PF_OUT ? pd.src : pd.dst, AF_INET);
+ pd.ip_sum = &h->ip_sum;
+ pd.proto = h->ip_p;
+ pd.af = AF_INET;
+ pd.tos = h->ip_tos;
+ pd.tot_len = ntohs(h->ip_len);
+ pd.eh = eh;
+
+ /* handle fragments that didn't get reassembled by normalization */
+ if (h->ip_off & htons(IP_MF | IP_OFFMASK)) {
+ action = pf_test_fragment(&r, dir, kif, m, h,
+ &pd, &a, &ruleset);
+ goto done;
+ }
+
+ switch (h->ip_p) {
+
+ case IPPROTO_TCP: {
+ struct tcphdr th;
+
+ pd.hdr.tcp = &th;
+ if (!pf_pull_hdr(m, off, &th, sizeof(th),
+ &action, &reason, AF_INET)) {
+ log = action != PF_PASS;
+ goto done;
+ }
+ if (dir == PF_IN && pf_check_proto_cksum(m, off,
+ ntohs(h->ip_len) - off, IPPROTO_TCP, AF_INET)) {
+ REASON_SET(&reason, PFRES_PROTCKSUM);
+ action = PF_DROP;
+ goto done;
+ }
+ pd.p_len = pd.tot_len - off - (th.th_off << 2);
+ if ((th.th_flags & TH_ACK) && pd.p_len == 0)
+ pqid = 1;
+ action = pf_normalize_tcp(dir, kif, m, 0, off, h, &pd);
+ if (action == PF_DROP)
+ goto done;
+ action = pf_test_state_tcp(&s, dir, kif, m, off, h, &pd,
+ &reason);
+ if (action == PF_PASS) {
+#if NPFSYNC
+ pfsync_update_state(s);
+#endif /* NPFSYNC */
+ r = s->rule.ptr;
+ a = s->anchor.ptr;
+ log = s->log;
+ } else if (s == NULL)
+#ifdef __FreeBSD__
+ action = pf_test_tcp(&r, &s, dir, kif,
+ m, off, h, &pd, &a, &ruleset, NULL, inp);
+#else
+ action = pf_test_tcp(&r, &s, dir, kif,
+ m, off, h, &pd, &a, &ruleset, &ipintrq);
+#endif
+ break;
+ }
+
+ case IPPROTO_UDP: {
+ struct udphdr uh;
+
+ pd.hdr.udp = &uh;
+ if (!pf_pull_hdr(m, off, &uh, sizeof(uh),
+ &action, &reason, AF_INET)) {
+ log = action != PF_PASS;
+ goto done;
+ }
+ if (dir == PF_IN && uh.uh_sum && pf_check_proto_cksum(m,
+ off, ntohs(h->ip_len) - off, IPPROTO_UDP, AF_INET)) {
+ action = PF_DROP;
+ REASON_SET(&reason, PFRES_PROTCKSUM);
+ goto done;
+ }
+ if (uh.uh_dport == 0 ||
+ ntohs(uh.uh_ulen) > m->m_pkthdr.len - off ||
+ ntohs(uh.uh_ulen) < sizeof(struct udphdr)) {
+ action = PF_DROP;
+ REASON_SET(&reason, PFRES_SHORT);
+ goto done;
+ }
+ action = pf_test_state_udp(&s, dir, kif, m, off, h, &pd);
+ if (action == PF_PASS) {
+#if NPFSYNC
+ pfsync_update_state(s);
+#endif /* NPFSYNC */
+ r = s->rule.ptr;
+ a = s->anchor.ptr;
+ log = s->log;
+ } else if (s == NULL)
+#ifdef __FreeBSD__
+ action = pf_test_udp(&r, &s, dir, kif,
+ m, off, h, &pd, &a, &ruleset, NULL, inp);
+#else
+ action = pf_test_udp(&r, &s, dir, kif,
+ m, off, h, &pd, &a, &ruleset, &ipintrq);
+#endif
+ break;
+ }
+
+ case IPPROTO_ICMP: {
+ struct icmp ih;
+
+ pd.hdr.icmp = &ih;
+ if (!pf_pull_hdr(m, off, &ih, ICMP_MINLEN,
+ &action, &reason, AF_INET)) {
+ log = action != PF_PASS;
+ goto done;
+ }
+ if (dir == PF_IN && pf_check_proto_cksum(m, off,
+ ntohs(h->ip_len) - off, IPPROTO_ICMP, AF_INET)) {
+ action = PF_DROP;
+ REASON_SET(&reason, PFRES_PROTCKSUM);
+ goto done;
+ }
+ action = pf_test_state_icmp(&s, dir, kif, m, off, h, &pd,
+ &reason);
+ if (action == PF_PASS) {
+#if NPFSYNC
+ pfsync_update_state(s);
+#endif /* NPFSYNC */
+ r = s->rule.ptr;
+ a = s->anchor.ptr;
+ log = s->log;
+ } else if (s == NULL)
+#ifdef __FreeBSD__
+ action = pf_test_icmp(&r, &s, dir, kif,
+ m, off, h, &pd, &a, &ruleset, NULL);
+#else
+ action = pf_test_icmp(&r, &s, dir, kif,
+ m, off, h, &pd, &a, &ruleset, &ipintrq);
+#endif
+ break;
+ }
+
+ default:
+ action = pf_test_state_other(&s, dir, kif, &pd);
+ if (action == PF_PASS) {
+#if NPFSYNC
+ pfsync_update_state(s);
+#endif /* NPFSYNC */
+ r = s->rule.ptr;
+ a = s->anchor.ptr;
+ log = s->log;
+ } else if (s == NULL)
+#ifdef __FreeBSD__
+ action = pf_test_other(&r, &s, dir, kif, m, off, h,
+ &pd, &a, &ruleset, NULL);
+#else
+ action = pf_test_other(&r, &s, dir, kif, m, off, h,
+ &pd, &a, &ruleset, &ipintrq);
+#endif
+ break;
+ }
+
+done:
+ if (action == PF_PASS && h->ip_hl > 5 &&
+ !((s && s->state_flags & PFSTATE_ALLOWOPTS) || r->allow_opts)) {
+ action = PF_DROP;
+ REASON_SET(&reason, PFRES_IPOPTIONS);
+ log = 1;
+ DPFPRINTF(PF_DEBUG_MISC,
+ ("pf: dropping packet with ip options\n"));
+ }
+
+ if ((s && s->tag) || r->rtableid)
+ pf_tag_packet(m, pd.pf_mtag, s ? s->tag : 0, r->rtableid);
+
+#ifdef ALTQ
+ if (action == PF_PASS && r->qid) {
+ if (pqid || (pd.tos & IPTOS_LOWDELAY))
+ pd.pf_mtag->qid = r->pqid;
+ else
+ pd.pf_mtag->qid = r->qid;
+ /* add hints for ecn */
+ pd.pf_mtag->af = AF_INET;
+ pd.pf_mtag->hdr = h;
+ }
+#endif /* ALTQ */
+
+ /*
+ * connections redirected to loopback should not match sockets
+ * bound specifically to loopback due to security implications,
+ * see tcp_input() and in_pcblookup_listen().
+ */
+ if (dir == PF_IN && action == PF_PASS && (pd.proto == IPPROTO_TCP ||
+ pd.proto == IPPROTO_UDP) && s != NULL && s->nat_rule.ptr != NULL &&
+ (s->nat_rule.ptr->action == PF_RDR ||
+ s->nat_rule.ptr->action == PF_BINAT) &&
+ (ntohl(pd.dst->v4.s_addr) >> IN_CLASSA_NSHIFT) == IN_LOOPBACKNET)
+ pd.pf_mtag->flags |= PF_TAG_TRANSLATE_LOCALHOST;
+
+ if (log) {
+ struct pf_rule *lr;
+
+ if (s != NULL && s->nat_rule.ptr != NULL &&
+ s->nat_rule.ptr->log & PF_LOG_ALL)
+ lr = s->nat_rule.ptr;
+ else
+ lr = r;
+ PFLOG_PACKET(kif, h, m, AF_INET, dir, reason, lr, a, ruleset,
+ &pd);
+ }
+
+ kif->pfik_bytes[0][dir == PF_OUT][action != PF_PASS] += pd.tot_len;
+ kif->pfik_packets[0][dir == PF_OUT][action != PF_PASS]++;
+
+ if (action == PF_PASS || r->action == PF_DROP) {
+ dirndx = (dir == PF_OUT);
+ r->packets[dirndx]++;
+ r->bytes[dirndx] += pd.tot_len;
+ if (a != NULL) {
+ a->packets[dirndx]++;
+ a->bytes[dirndx] += pd.tot_len;
+ }
+ if (s != NULL) {
+ if (s->nat_rule.ptr != NULL) {
+ s->nat_rule.ptr->packets[dirndx]++;
+ s->nat_rule.ptr->bytes[dirndx] += pd.tot_len;
+ }
+ if (s->src_node != NULL) {
+ s->src_node->packets[dirndx]++;
+ s->src_node->bytes[dirndx] += pd.tot_len;
+ }
+ if (s->nat_src_node != NULL) {
+ s->nat_src_node->packets[dirndx]++;
+ s->nat_src_node->bytes[dirndx] += pd.tot_len;
+ }
+ dirndx = (dir == s->direction) ? 0 : 1;
+ s->packets[dirndx]++;
+ s->bytes[dirndx] += pd.tot_len;
+ }
+ tr = r;
+ nr = (s != NULL) ? s->nat_rule.ptr : pd.nat_rule;
+ if (nr != NULL) {
+ struct pf_addr *x;
+ /*
+ * XXX: we need to make sure that the addresses
+ * passed to pfr_update_stats() are the same than
+ * the addresses used during matching (pfr_match)
+ */
+ if (r == &pf_default_rule) {
+ tr = nr;
+ x = (s == NULL || s->direction == dir) ?
+ &pd.baddr : &pd.naddr;
+ } else
+ x = (s == NULL || s->direction == dir) ?
+ &pd.naddr : &pd.baddr;
+ if (x == &pd.baddr || s == NULL) {
+ /* we need to change the address */
+ if (dir == PF_OUT)
+ pd.src = x;
+ else
+ pd.dst = x;
+ }
+ }
+ if (tr->src.addr.type == PF_ADDR_TABLE)
+ pfr_update_stats(tr->src.addr.p.tbl, (s == NULL ||
+ s->direction == dir) ? pd.src : pd.dst, pd.af,
+ pd.tot_len, dir == PF_OUT, r->action == PF_PASS,
+ tr->src.neg);
+ if (tr->dst.addr.type == PF_ADDR_TABLE)
+ pfr_update_stats(tr->dst.addr.p.tbl, (s == NULL ||
+ s->direction == dir) ? pd.dst : pd.src, pd.af,
+ pd.tot_len, dir == PF_OUT, r->action == PF_PASS,
+ tr->dst.neg);
+ }
+
+
+ if (action == PF_SYNPROXY_DROP) {
+ m_freem(*m0);
+ *m0 = NULL;
+ action = PF_PASS;
+ } else if (r->rt)
+ /* pf_route can free the mbuf causing *m0 to become NULL */
+ pf_route(m0, r, dir, ifp, s, &pd);
+
+#ifdef __FreeBSD__
+ PF_UNLOCK();
+#endif
+
+ return (action);
+}
+#endif /* INET */
+
+#ifdef INET6
+int
+#ifdef __FreeBSD__
+pf_test6(int dir, struct ifnet *ifp, struct mbuf **m0,
+ struct ether_header *eh, struct inpcb *inp)
+#else
+pf_test6(int dir, struct ifnet *ifp, struct mbuf **m0,
+ struct ether_header *eh)
+#endif
+{
+ struct pfi_kif *kif;
+ u_short action, reason = 0, log = 0;
+ struct mbuf *m = *m0, *n = NULL;
+ struct ip6_hdr *h;
+ struct pf_rule *a = NULL, *r = &pf_default_rule, *tr, *nr;
+ struct pf_state *s = NULL;
+ struct pf_ruleset *ruleset = NULL;
+ struct pf_pdesc pd;
+ int off, terminal = 0, dirndx, rh_cnt = 0;
+
+#ifdef __FreeBSD__
+ PF_LOCK();
+#endif
+
+ if (!pf_status.running)
+#ifdef __FreeBSD__
+ {
+ PF_UNLOCK();
+#endif
+ return (PF_PASS);
+#ifdef __FreeBSD__
+ }
+#endif
+
+ memset(&pd, 0, sizeof(pd));
+ if ((pd.pf_mtag = pf_get_mtag(m)) == NULL) {
+#ifdef __FreeBSD__
+ PF_UNLOCK();
+#endif
+ DPFPRINTF(PF_DEBUG_URGENT,
+ ("pf_test6: pf_get_mtag returned NULL\n"));
+ return (PF_DROP);
+ }
+ if (pd.pf_mtag->flags & PF_TAG_GENERATED)
+ return (PF_PASS);
+
+#ifdef __FreeBSD__
+ /* XXX_IMPORT: later */
+#else
+ if (ifp->if_type == IFT_CARP && ifp->if_carpdev)
+ ifp = ifp->if_carpdev;
+#endif
+
+ kif = (struct pfi_kif *)ifp->if_pf_kif;
+ if (kif == NULL) {
+#ifdef __FreeBSD__
+ PF_UNLOCK();
+#endif
+ DPFPRINTF(PF_DEBUG_URGENT,
+ ("pf_test6: kif == NULL, if_xname %s\n", ifp->if_xname));
+ return (PF_DROP);
+ }
+ if (kif->pfik_flags & PFI_IFLAG_SKIP) {
+#ifdef __FreeBSD__
+ PF_UNLOCK();
+#endif
+ return (PF_PASS);
+ }
+
+#ifdef __FreeBSD__
+ M_ASSERTPKTHDR(m);
+#else
+#ifdef DIAGNOSTIC
+ if ((m->m_flags & M_PKTHDR) == 0)
+ panic("non-M_PKTHDR is passed to pf_test6");
+#endif /* DIAGNOSTIC */
+#endif
+
+#ifdef __FreeBSD__
+ h = NULL; /* make the compiler happy */
+#endif
+
+ if (m->m_pkthdr.len < (int)sizeof(*h)) {
+ action = PF_DROP;
+ REASON_SET(&reason, PFRES_SHORT);
+ log = 1;
+ goto done;
+ }
+
+ /* We do IP header normalization and packet reassembly here */
+ if (pf_normalize_ip6(m0, dir, kif, &reason, &pd) != PF_PASS) {
+ action = PF_DROP;
+ goto done;
+ }
+ m = *m0;
+ h = mtod(m, struct ip6_hdr *);
+
+#if 1
+ /*
+ * we do not support jumbogram yet. if we keep going, zero ip6_plen
+ * will do something bad, so drop the packet for now.
+ */
+ if (htons(h->ip6_plen) == 0) {
+ action = PF_DROP;
+ REASON_SET(&reason, PFRES_NORM); /*XXX*/
+ goto done;
+ }
+#endif
+
+ pd.src = (struct pf_addr *)&h->ip6_src;
+ pd.dst = (struct pf_addr *)&h->ip6_dst;
+ PF_ACPY(&pd.baddr, dir == PF_OUT ? pd.src : pd.dst, AF_INET6);
+ pd.ip_sum = NULL;
+ pd.af = AF_INET6;
+ pd.tos = 0;
+ pd.tot_len = ntohs(h->ip6_plen) + sizeof(struct ip6_hdr);
+ pd.eh = eh;
+
+ off = ((caddr_t)h - m->m_data) + sizeof(struct ip6_hdr);
+ pd.proto = h->ip6_nxt;
+ do {
+ switch (pd.proto) {
+ case IPPROTO_FRAGMENT:
+ action = pf_test_fragment(&r, dir, kif, m, h,
+ &pd, &a, &ruleset);
+ if (action == PF_DROP)
+ REASON_SET(&reason, PFRES_FRAG);
+ goto done;
+ case IPPROTO_ROUTING: {
+ struct ip6_rthdr rthdr;
+
+ if (rh_cnt++) {
+ DPFPRINTF(PF_DEBUG_MISC,
+ ("pf: IPv6 more than one rthdr\n"));
+ action = PF_DROP;
+ REASON_SET(&reason, PFRES_IPOPTIONS);
+ log = 1;
+ goto done;
+ }
+ if (!pf_pull_hdr(m, off, &rthdr, sizeof(rthdr), NULL,
+ &reason, pd.af)) {
+ DPFPRINTF(PF_DEBUG_MISC,
+ ("pf: IPv6 short rthdr\n"));
+ action = PF_DROP;
+ REASON_SET(&reason, PFRES_SHORT);
+ log = 1;
+ goto done;
+ }
+ if (rthdr.ip6r_type == IPV6_RTHDR_TYPE_0) {
+ DPFPRINTF(PF_DEBUG_MISC,
+ ("pf: IPv6 rthdr0\n"));
+ action = PF_DROP;
+ REASON_SET(&reason, PFRES_IPOPTIONS);
+ log = 1;
+ goto done;
+ }
+ /* fallthrough */
+ }
+ case IPPROTO_AH:
+ case IPPROTO_HOPOPTS:
+ case IPPROTO_DSTOPTS: {
+ /* get next header and header length */
+ struct ip6_ext opt6;
+
+ if (!pf_pull_hdr(m, off, &opt6, sizeof(opt6),
+ NULL, &reason, pd.af)) {
+ DPFPRINTF(PF_DEBUG_MISC,
+ ("pf: IPv6 short opt\n"));
+ action = PF_DROP;
+ log = 1;
+ goto done;
+ }
+ if (pd.proto == IPPROTO_AH)
+ off += (opt6.ip6e_len + 2) * 4;
+ else
+ off += (opt6.ip6e_len + 1) * 8;
+ pd.proto = opt6.ip6e_nxt;
+ /* goto the next header */
+ break;
+ }
+ default:
+ terminal++;
+ break;
+ }
+ } while (!terminal);
+
+ /* if there's no routing header, use unmodified mbuf for checksumming */
+ if (!n)
+ n = m;
+
+ switch (pd.proto) {
+
+ case IPPROTO_TCP: {
+ struct tcphdr th;
+
+ pd.hdr.tcp = &th;
+ if (!pf_pull_hdr(m, off, &th, sizeof(th),
+ &action, &reason, AF_INET6)) {
+ log = action != PF_PASS;
+ goto done;
+ }
+ if (dir == PF_IN && pf_check_proto_cksum(n, off,
+ ntohs(h->ip6_plen) - (off - sizeof(struct ip6_hdr)),
+ IPPROTO_TCP, AF_INET6)) {
+ action = PF_DROP;
+ REASON_SET(&reason, PFRES_PROTCKSUM);
+ goto done;
+ }
+ pd.p_len = pd.tot_len - off - (th.th_off << 2);
+ action = pf_normalize_tcp(dir, kif, m, 0, off, h, &pd);
+ if (action == PF_DROP)
+ goto done;
+ action = pf_test_state_tcp(&s, dir, kif, m, off, h, &pd,
+ &reason);
+ if (action == PF_PASS) {
+#if NPFSYNC
+ pfsync_update_state(s);
+#endif /* NPFSYNC */
+ r = s->rule.ptr;
+ a = s->anchor.ptr;
+ log = s->log;
+ } else if (s == NULL)
+#ifdef __FreeBSD__
+ action = pf_test_tcp(&r, &s, dir, kif,
+ m, off, h, &pd, &a, &ruleset, NULL, inp);
+#else
+ action = pf_test_tcp(&r, &s, dir, kif,
+ m, off, h, &pd, &a, &ruleset, &ip6intrq);
+#endif
+ break;
+ }
+
+ case IPPROTO_UDP: {
+ struct udphdr uh;
+
+ pd.hdr.udp = &uh;
+ if (!pf_pull_hdr(m, off, &uh, sizeof(uh),
+ &action, &reason, AF_INET6)) {
+ log = action != PF_PASS;
+ goto done;
+ }
+ if (dir == PF_IN && uh.uh_sum && pf_check_proto_cksum(n,
+ off, ntohs(h->ip6_plen) - (off - sizeof(struct ip6_hdr)),
+ IPPROTO_UDP, AF_INET6)) {
+ action = PF_DROP;
+ REASON_SET(&reason, PFRES_PROTCKSUM);
+ goto done;
+ }
+ if (uh.uh_dport == 0 ||
+ ntohs(uh.uh_ulen) > m->m_pkthdr.len - off ||
+ ntohs(uh.uh_ulen) < sizeof(struct udphdr)) {
+ action = PF_DROP;
+ REASON_SET(&reason, PFRES_SHORT);
+ goto done;
+ }
+ action = pf_test_state_udp(&s, dir, kif, m, off, h, &pd);
+ if (action == PF_PASS) {
+#if NPFSYNC
+ pfsync_update_state(s);
+#endif /* NPFSYNC */
+ r = s->rule.ptr;
+ a = s->anchor.ptr;
+ log = s->log;
+ } else if (s == NULL)
+#ifdef __FreeBSD__
+ action = pf_test_udp(&r, &s, dir, kif,
+ m, off, h, &pd, &a, &ruleset, NULL, inp);
+#else
+ action = pf_test_udp(&r, &s, dir, kif,
+ m, off, h, &pd, &a, &ruleset, &ip6intrq);
+#endif
+ break;
+ }
+
+ case IPPROTO_ICMPV6: {
+ struct icmp6_hdr ih;
+
+ pd.hdr.icmp6 = &ih;
+ if (!pf_pull_hdr(m, off, &ih, sizeof(ih),
+ &action, &reason, AF_INET6)) {
+ log = action != PF_PASS;
+ goto done;
+ }
+ if (dir == PF_IN && pf_check_proto_cksum(n, off,
+ ntohs(h->ip6_plen) - (off - sizeof(struct ip6_hdr)),
+ IPPROTO_ICMPV6, AF_INET6)) {
+ action = PF_DROP;
+ REASON_SET(&reason, PFRES_PROTCKSUM);
+ goto done;
+ }
+ action = pf_test_state_icmp(&s, dir, kif,
+ m, off, h, &pd, &reason);
+ if (action == PF_PASS) {
+#if NPFSYNC
+ pfsync_update_state(s);
+#endif /* NPFSYNC */
+ r = s->rule.ptr;
+ a = s->anchor.ptr;
+ log = s->log;
+ } else if (s == NULL)
+#ifdef __FreeBSD__
+ action = pf_test_icmp(&r, &s, dir, kif,
+ m, off, h, &pd, &a, &ruleset, NULL);
+#else
+ action = pf_test_icmp(&r, &s, dir, kif,
+ m, off, h, &pd, &a, &ruleset, &ip6intrq);
+#endif
+ break;
+ }
+
+ default:
+ action = pf_test_state_other(&s, dir, kif, &pd);
+ if (action == PF_PASS) {
+#if NPFSYNC
+ pfsync_update_state(s);
+#endif /* NPFSYNC */
+ r = s->rule.ptr;
+ a = s->anchor.ptr;
+ log = s->log;
+ } else if (s == NULL)
+#ifdef __FreeBSD__
+ action = pf_test_other(&r, &s, dir, kif, m, off, h,
+ &pd, &a, &ruleset, NULL);
+#else
+ action = pf_test_other(&r, &s, dir, kif, m, off, h,
+ &pd, &a, &ruleset, &ip6intrq);
+#endif
+ break;
+ }
+
+done:
+ /* handle dangerous IPv6 extension headers. */
+ if (action == PF_PASS && rh_cnt &&
+ !((s && s->state_flags & PFSTATE_ALLOWOPTS) || r->allow_opts)) {
+ action = PF_DROP;
+ REASON_SET(&reason, PFRES_IPOPTIONS);
+ log = 1;
+ DPFPRINTF(PF_DEBUG_MISC,
+ ("pf: dropping packet with dangerous v6 headers\n"));
+ }
+
+ if ((s && s->tag) || r->rtableid)
+ pf_tag_packet(m, pd.pf_mtag, s ? s->tag : 0, r->rtableid);
+
+#ifdef ALTQ
+ if (action == PF_PASS && r->qid) {
+ if (pd.tos & IPTOS_LOWDELAY)
+ pd.pf_mtag->qid = r->pqid;
+ else
+ pd.pf_mtag->qid = r->qid;
+ /* add hints for ecn */
+ pd.pf_mtag->af = AF_INET6;
+ pd.pf_mtag->hdr = h;
+ }
+#endif /* ALTQ */
+
+ if (dir == PF_IN && action == PF_PASS && (pd.proto == IPPROTO_TCP ||
+ pd.proto == IPPROTO_UDP) && s != NULL && s->nat_rule.ptr != NULL &&
+ (s->nat_rule.ptr->action == PF_RDR ||
+ s->nat_rule.ptr->action == PF_BINAT) &&
+ IN6_IS_ADDR_LOOPBACK(&pd.dst->v6))
+ pd.pf_mtag->flags |= PF_TAG_TRANSLATE_LOCALHOST;
+
+ if (log) {
+ struct pf_rule *lr;
+
+ if (s != NULL && s->nat_rule.ptr != NULL &&
+ s->nat_rule.ptr->log & PF_LOG_ALL)
+ lr = s->nat_rule.ptr;
+ else
+ lr = r;
+ PFLOG_PACKET(kif, h, m, AF_INET6, dir, reason, lr, a, ruleset,
+ &pd);
+ }
+
+ kif->pfik_bytes[1][dir == PF_OUT][action != PF_PASS] += pd.tot_len;
+ kif->pfik_packets[1][dir == PF_OUT][action != PF_PASS]++;
+
+ if (action == PF_PASS || r->action == PF_DROP) {
+ dirndx = (dir == PF_OUT);
+ r->packets[dirndx]++;
+ r->bytes[dirndx] += pd.tot_len;
+ if (a != NULL) {
+ a->packets[dirndx]++;
+ a->bytes[dirndx] += pd.tot_len;
+ }
+ if (s != NULL) {
+ if (s->nat_rule.ptr != NULL) {
+ s->nat_rule.ptr->packets[dirndx]++;
+ s->nat_rule.ptr->bytes[dirndx] += pd.tot_len;
+ }
+ if (s->src_node != NULL) {
+ s->src_node->packets[dirndx]++;
+ s->src_node->bytes[dirndx] += pd.tot_len;
+ }
+ if (s->nat_src_node != NULL) {
+ s->nat_src_node->packets[dirndx]++;
+ s->nat_src_node->bytes[dirndx] += pd.tot_len;
+ }
+ dirndx = (dir == s->direction) ? 0 : 1;
+ s->packets[dirndx]++;
+ s->bytes[dirndx] += pd.tot_len;
+ }
+ tr = r;
+ nr = (s != NULL) ? s->nat_rule.ptr : pd.nat_rule;
+ if (nr != NULL) {
+ struct pf_addr *x;
+ /*
+ * XXX: we need to make sure that the addresses
+ * passed to pfr_update_stats() are the same than
+ * the addresses used during matching (pfr_match)
+ */
+ if (r == &pf_default_rule) {
+ tr = nr;
+ x = (s == NULL || s->direction == dir) ?
+ &pd.baddr : &pd.naddr;
+ } else {
+ x = (s == NULL || s->direction == dir) ?
+ &pd.naddr : &pd.baddr;
+ }
+ if (x == &pd.baddr || s == NULL) {
+ if (dir == PF_OUT)
+ pd.src = x;
+ else
+ pd.dst = x;
+ }
+ }
+ if (tr->src.addr.type == PF_ADDR_TABLE)
+ pfr_update_stats(tr->src.addr.p.tbl, (s == NULL ||
+ s->direction == dir) ? pd.src : pd.dst, pd.af,
+ pd.tot_len, dir == PF_OUT, r->action == PF_PASS,
+ tr->src.neg);
+ if (tr->dst.addr.type == PF_ADDR_TABLE)
+ pfr_update_stats(tr->dst.addr.p.tbl, (s == NULL ||
+ s->direction == dir) ? pd.dst : pd.src, pd.af,
+ pd.tot_len, dir == PF_OUT, r->action == PF_PASS,
+ tr->dst.neg);
+ }
+
+
+ if (action == PF_SYNPROXY_DROP) {
+ m_freem(*m0);
+ *m0 = NULL;
+ action = PF_PASS;
+ } else if (r->rt)
+ /* pf_route6 can free the mbuf causing *m0 to become NULL */
+ pf_route6(m0, r, dir, ifp, s, &pd);
+
+#ifdef __FreeBSD__
+ PF_UNLOCK();
+#endif
+ return (action);
+}
+#endif /* INET6 */
+
+int
+pf_check_congestion(struct ifqueue *ifq)
+{
+#ifdef __FreeBSD__
+ /* XXX_IMPORT: later */
+ return (0);
+#else
+ if (ifq->ifq_congestion)
+ return (1);
+ else
+ return (0);
+#endif
+}
diff --git a/contrib/pf/rtems/freebsd/net/pf_if.c b/contrib/pf/rtems/freebsd/net/pf_if.c
new file mode 100644
index 00000000..f286da5b
--- /dev/null
+++ b/contrib/pf/rtems/freebsd/net/pf_if.c
@@ -0,0 +1,950 @@
+#include <rtems/freebsd/machine/rtems-bsd-config.h>
+
+/* $OpenBSD: pf_if.c,v 1.46 2006/12/13 09:01:59 itojun Exp $ */
+
+/*
+ * Copyright 2005 Henning Brauer <henning@openbsd.org>
+ * Copyright 2005 Ryan McBride <mcbride@openbsd.org>
+ * Copyright (c) 2001 Daniel Hartmeier
+ * Copyright (c) 2003 Cedric Berger
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * - Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+ * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+ * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+ * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#if defined(__FreeBSD__)
+#include <rtems/freebsd/local/opt_inet.h>
+#include <rtems/freebsd/local/opt_inet6.h>
+
+#include <rtems/freebsd/sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+#endif
+
+#include <rtems/freebsd/sys/param.h>
+#include <rtems/freebsd/sys/systm.h>
+#ifdef __FreeBSD__
+#include <rtems/freebsd/sys/malloc.h>
+#endif
+#include <rtems/freebsd/sys/mbuf.h>
+#include <rtems/freebsd/sys/filio.h>
+#include <rtems/freebsd/sys/socket.h>
+#include <rtems/freebsd/sys/socketvar.h>
+#include <rtems/freebsd/sys/kernel.h>
+#ifndef __FreeBSD__
+#include <rtems/freebsd/sys/device.h>
+#endif
+#include <rtems/freebsd/sys/time.h>
+
+#include <rtems/freebsd/net/if.h>
+#include <rtems/freebsd/net/if_types.h>
+#ifdef __FreeBSD__
+#include <rtems/freebsd/net/vnet.h>
+#endif
+
+#include <rtems/freebsd/netinet/in.h>
+#include <rtems/freebsd/netinet/in_var.h>
+#include <rtems/freebsd/netinet/in_systm.h>
+#include <rtems/freebsd/netinet/ip.h>
+#include <rtems/freebsd/netinet/ip_var.h>
+
+#include <rtems/freebsd/net/pfvar.h>
+
+#ifdef INET6
+#include <rtems/freebsd/netinet/ip6.h>
+#endif /* INET6 */
+
+struct pfi_kif *pfi_all = NULL;
+struct pfi_statehead pfi_statehead;
+#ifdef __FreeBSD__
+uma_zone_t pfi_addr_pl;
+#else
+struct pool pfi_addr_pl;
+#endif
+struct pfi_ifhead pfi_ifs;
+long pfi_update = 1;
+struct pfr_addr *pfi_buffer;
+int pfi_buffer_cnt;
+int pfi_buffer_max;
+#ifdef __FreeBSD__
+eventhandler_tag pfi_attach_cookie = NULL;
+eventhandler_tag pfi_detach_cookie = NULL;
+eventhandler_tag pfi_attach_group_cookie = NULL;
+eventhandler_tag pfi_change_group_cookie = NULL;
+eventhandler_tag pfi_detach_group_cookie = NULL;
+eventhandler_tag pfi_ifaddr_event_cookie = NULL;
+#endif
+
+void pfi_kif_update(struct pfi_kif *);
+void pfi_dynaddr_update(struct pfi_dynaddr *dyn);
+void pfi_table_update(struct pfr_ktable *, struct pfi_kif *,
+ int, int);
+void pfi_kifaddr_update(void *);
+void pfi_instance_add(struct ifnet *, int, int);
+void pfi_address_add(struct sockaddr *, int, int);
+int pfi_if_compare(struct pfi_kif *, struct pfi_kif *);
+int pfi_skip_if(const char *, struct pfi_kif *);
+int pfi_unmask(void *);
+#ifdef __FreeBSD__
+void pfi_attach_ifnet_event(void * __unused, struct ifnet *);
+void pfi_detach_ifnet_event(void * __unused, struct ifnet *);
+void pfi_attach_group_event(void * __unused, struct ifg_group *);
+void pfi_change_group_event(void * __unused, char *);
+void pfi_detach_group_event(void * __unused, struct ifg_group *);
+void pfi_ifaddr_event(void * __unused, struct ifnet *);
+
+#endif
+
+RB_PROTOTYPE(pfi_ifhead, pfi_kif, pfik_tree, pfi_if_compare);
+RB_GENERATE(pfi_ifhead, pfi_kif, pfik_tree, pfi_if_compare);
+
+#define PFI_BUFFER_MAX 0x10000
+#define PFI_MTYPE M_IFADDR
+
+void
+pfi_initialize(void)
+{
+
+ if (pfi_all != NULL) /* already initialized */
+ return;
+
+ TAILQ_INIT(&pfi_statehead);
+#ifndef __FreeBSD__
+ pool_init(&pfi_addr_pl, sizeof(struct pfi_dynaddr), 0, 0, 0,
+ "pfiaddrpl", &pool_allocator_nointr);
+#endif
+ pfi_buffer_max = 64;
+ pfi_buffer = malloc(pfi_buffer_max * sizeof(*pfi_buffer),
+ PFI_MTYPE, M_WAITOK);
+
+ if ((pfi_all = pfi_kif_get(IFG_ALL)) == NULL)
+ panic("pfi_kif_get for pfi_all failed");
+
+#ifdef __FreeBSD__
+ struct ifg_group *ifg;
+ struct ifnet *ifp;
+
+ IFNET_RLOCK();
+ TAILQ_FOREACH(ifg, &V_ifg_head, ifg_next)
+ pfi_attach_ifgroup(ifg);
+ TAILQ_FOREACH(ifp, &V_ifnet, if_link)
+ pfi_attach_ifnet(ifp);
+ IFNET_RUNLOCK();
+
+ pfi_attach_cookie = EVENTHANDLER_REGISTER(ifnet_arrival_event,
+ pfi_attach_ifnet_event, NULL, EVENTHANDLER_PRI_ANY);
+ pfi_detach_cookie = EVENTHANDLER_REGISTER(ifnet_departure_event,
+ pfi_detach_ifnet_event, NULL, EVENTHANDLER_PRI_ANY);
+ pfi_attach_group_cookie = EVENTHANDLER_REGISTER(group_attach_event,
+ pfi_attach_group_event, NULL, EVENTHANDLER_PRI_ANY);
+ pfi_change_group_cookie = EVENTHANDLER_REGISTER(group_change_event,
+ pfi_change_group_event, NULL, EVENTHANDLER_PRI_ANY);
+ pfi_detach_group_cookie = EVENTHANDLER_REGISTER(group_detach_event,
+ pfi_detach_group_event, NULL, EVENTHANDLER_PRI_ANY);
+ pfi_ifaddr_event_cookie = EVENTHANDLER_REGISTER(ifaddr_event,
+ pfi_ifaddr_event, NULL, EVENTHANDLER_PRI_ANY);
+#endif
+}
+
+#ifdef __FreeBSD__
+void
+pfi_cleanup(void)
+{
+ struct pfi_kif *p;
+
+ PF_UNLOCK();
+ EVENTHANDLER_DEREGISTER(ifnet_arrival_event, pfi_attach_cookie);
+ EVENTHANDLER_DEREGISTER(ifnet_departure_event, pfi_detach_cookie);
+ EVENTHANDLER_DEREGISTER(group_attach_event, pfi_attach_group_cookie);
+ EVENTHANDLER_DEREGISTER(group_change_event, pfi_change_group_cookie);
+ EVENTHANDLER_DEREGISTER(group_detach_event, pfi_detach_group_cookie);
+ EVENTHANDLER_DEREGISTER(ifaddr_event, pfi_ifaddr_event_cookie);
+ PF_LOCK();
+
+ pfi_all = NULL;
+ while ((p = RB_MIN(pfi_ifhead, &pfi_ifs))) {
+ if (p->pfik_rules || p->pfik_states) {
+ printf("pfi_cleanup: dangling refs for %s\n",
+ p->pfik_name);
+ }
+
+ RB_REMOVE(pfi_ifhead, &pfi_ifs, p);
+ free(p, PFI_MTYPE);
+ }
+
+ free(pfi_buffer, PFI_MTYPE);
+}
+#endif
+
+struct pfi_kif *
+pfi_kif_get(const char *kif_name)
+{
+ struct pfi_kif *kif;
+ struct pfi_kif_cmp s;
+
+ bzero(&s, sizeof(s));
+ strlcpy(s.pfik_name, kif_name, sizeof(s.pfik_name));
+ if ((kif = RB_FIND(pfi_ifhead, &pfi_ifs, (struct pfi_kif *)&s)) != NULL)
+ return (kif);
+
+ /* create new one */
+#ifdef __FreeBSD__
+ if ((kif = malloc(sizeof(*kif), PFI_MTYPE, M_NOWAIT)) == NULL)
+#else
+ if ((kif = malloc(sizeof(*kif), PFI_MTYPE, M_DONTWAIT)) == NULL)
+#endif
+ return (NULL);
+
+ bzero(kif, sizeof(*kif));
+ strlcpy(kif->pfik_name, kif_name, sizeof(kif->pfik_name));
+#ifdef __FreeBSD__
+ /*
+ * It seems that the value of time_second is in unintialzied state
+ * when pf sets interface statistics clear time in boot phase if pf
+ * was statically linked to kernel. Instead of setting the bogus
+ * time value have pfi_get_ifaces handle this case. In
+ * pfi_get_ifaces it uses boottime.tv_sec if it sees the time is 0.
+ */
+ kif->pfik_tzero = time_second > 1 ? time_second : 0;
+#else
+ kif->pfik_tzero = time_second;
+#endif
+ TAILQ_INIT(&kif->pfik_dynaddrs);
+
+ RB_INSERT(pfi_ifhead, &pfi_ifs, kif);
+ return (kif);
+}
+
+void
+pfi_kif_ref(struct pfi_kif *kif, enum pfi_kif_refs what)
+{
+ switch (what) {
+ case PFI_KIF_REF_RULE:
+ kif->pfik_rules++;
+ break;
+ case PFI_KIF_REF_STATE:
+ if (!kif->pfik_states++)
+ TAILQ_INSERT_TAIL(&pfi_statehead, kif, pfik_w_states);
+ break;
+ default:
+ panic("pfi_kif_ref with unknown type");
+ }
+}
+
+void
+pfi_kif_unref(struct pfi_kif *kif, enum pfi_kif_refs what)
+{
+ if (kif == NULL)
+ return;
+
+ switch (what) {
+ case PFI_KIF_REF_NONE:
+ break;
+ case PFI_KIF_REF_RULE:
+ if (kif->pfik_rules <= 0) {
+ printf("pfi_kif_unref: rules refcount <= 0\n");
+ return;
+ }
+ kif->pfik_rules--;
+ break;
+ case PFI_KIF_REF_STATE:
+ if (kif->pfik_states <= 0) {
+ printf("pfi_kif_unref: state refcount <= 0\n");
+ return;
+ }
+ if (!--kif->pfik_states)
+ TAILQ_REMOVE(&pfi_statehead, kif, pfik_w_states);
+ break;
+ default:
+ panic("pfi_kif_unref with unknown type");
+ }
+
+ if (kif->pfik_ifp != NULL || kif->pfik_group != NULL || kif == pfi_all)
+ return;
+
+ if (kif->pfik_rules || kif->pfik_states)
+ return;
+
+ RB_REMOVE(pfi_ifhead, &pfi_ifs, kif);
+ free(kif, PFI_MTYPE);
+}
+
+int
+pfi_kif_match(struct pfi_kif *rule_kif, struct pfi_kif *packet_kif)
+{
+ struct ifg_list *p;
+
+ if (rule_kif == NULL || rule_kif == packet_kif)
+ return (1);
+
+ if (rule_kif->pfik_group != NULL)
+ TAILQ_FOREACH(p, &packet_kif->pfik_ifp->if_groups, ifgl_next)
+ if (p->ifgl_group == rule_kif->pfik_group)
+ return (1);
+
+ return (0);
+}
+
+void
+pfi_attach_ifnet(struct ifnet *ifp)
+{
+ struct pfi_kif *kif;
+ int s;
+
+ pfi_initialize();
+ s = splsoftnet();
+ pfi_update++;
+ if ((kif = pfi_kif_get(ifp->if_xname)) == NULL)
+ panic("pfi_kif_get failed");
+
+ kif->pfik_ifp = ifp;
+ ifp->if_pf_kif = (caddr_t)kif;
+
+#ifndef __FreeBSD__
+ if ((kif->pfik_ah_cookie = hook_establish(ifp->if_addrhooks, 1,
+ pfi_kifaddr_update, kif)) == NULL)
+ panic("pfi_attach_ifnet: cannot allocate '%s' address hook",
+ ifp->if_xname);
+#endif
+
+ pfi_kif_update(kif);
+
+ splx(s);
+}
+
+void
+pfi_detach_ifnet(struct ifnet *ifp)
+{
+ int s;
+ struct pfi_kif *kif;
+
+ if ((kif = (struct pfi_kif *)ifp->if_pf_kif) == NULL)
+ return;
+
+ s = splsoftnet();
+ pfi_update++;
+#ifndef __FreeBSD__
+ hook_disestablish(ifp->if_addrhooks, kif->pfik_ah_cookie);
+#endif
+ pfi_kif_update(kif);
+
+ kif->pfik_ifp = NULL;
+ ifp->if_pf_kif = NULL;
+ pfi_kif_unref(kif, PFI_KIF_REF_NONE);
+ splx(s);
+}
+
+void
+pfi_attach_ifgroup(struct ifg_group *ifg)
+{
+ struct pfi_kif *kif;
+ int s;
+
+ pfi_initialize();
+ s = splsoftnet();
+ pfi_update++;
+ if ((kif = pfi_kif_get(ifg->ifg_group)) == NULL)
+ panic("pfi_kif_get failed");
+
+ kif->pfik_group = ifg;
+ ifg->ifg_pf_kif = (caddr_t)kif;
+
+ splx(s);
+}
+
+void
+pfi_detach_ifgroup(struct ifg_group *ifg)
+{
+ int s;
+ struct pfi_kif *kif;
+
+ if ((kif = (struct pfi_kif *)ifg->ifg_pf_kif) == NULL)
+ return;
+
+ s = splsoftnet();
+ pfi_update++;
+
+ kif->pfik_group = NULL;
+ ifg->ifg_pf_kif = NULL;
+ pfi_kif_unref(kif, PFI_KIF_REF_NONE);
+ splx(s);
+}
+
+void
+pfi_group_change(const char *group)
+{
+ struct pfi_kif *kif;
+ int s;
+
+ s = splsoftnet();
+ pfi_update++;
+ if ((kif = pfi_kif_get(group)) == NULL)
+ panic("pfi_kif_get failed");
+
+ pfi_kif_update(kif);
+
+ splx(s);
+}
+
+int
+pfi_match_addr(struct pfi_dynaddr *dyn, struct pf_addr *a, sa_family_t af)
+{
+ switch (af) {
+#ifdef INET
+ case AF_INET:
+ switch (dyn->pfid_acnt4) {
+ case 0:
+ return (0);
+ case 1:
+ return (PF_MATCHA(0, &dyn->pfid_addr4,
+ &dyn->pfid_mask4, a, AF_INET));
+ default:
+ return (pfr_match_addr(dyn->pfid_kt, a, AF_INET));
+ }
+ break;
+#endif /* INET */
+#ifdef INET6
+ case AF_INET6:
+ switch (dyn->pfid_acnt6) {
+ case 0:
+ return (0);
+ case 1:
+ return (PF_MATCHA(0, &dyn->pfid_addr6,
+ &dyn->pfid_mask6, a, AF_INET6));
+ default:
+ return (pfr_match_addr(dyn->pfid_kt, a, AF_INET6));
+ }
+ break;
+#endif /* INET6 */
+ default:
+ return (0);
+ }
+}
+
+int
+pfi_dynaddr_setup(struct pf_addr_wrap *aw, sa_family_t af)
+{
+ struct pfi_dynaddr *dyn;
+ char tblname[PF_TABLE_NAME_SIZE];
+ struct pf_ruleset *ruleset = NULL;
+ int s, rv = 0;
+
+ if (aw->type != PF_ADDR_DYNIFTL)
+ return (0);
+ if ((dyn = pool_get(&pfi_addr_pl, PR_NOWAIT)) == NULL)
+ return (1);
+ bzero(dyn, sizeof(*dyn));
+
+ s = splsoftnet();
+ if (!strcmp(aw->v.ifname, "self"))
+ dyn->pfid_kif = pfi_kif_get(IFG_ALL);
+ else
+ dyn->pfid_kif = pfi_kif_get(aw->v.ifname);
+ if (dyn->pfid_kif == NULL) {
+ rv = 1;
+ goto _bad;
+ }
+ pfi_kif_ref(dyn->pfid_kif, PFI_KIF_REF_RULE);
+
+ dyn->pfid_net = pfi_unmask(&aw->v.a.mask);
+ if (af == AF_INET && dyn->pfid_net == 32)
+ dyn->pfid_net = 128;
+ strlcpy(tblname, aw->v.ifname, sizeof(tblname));
+ if (aw->iflags & PFI_AFLAG_NETWORK)
+ strlcat(tblname, ":network", sizeof(tblname));
+ if (aw->iflags & PFI_AFLAG_BROADCAST)
+ strlcat(tblname, ":broadcast", sizeof(tblname));
+ if (aw->iflags & PFI_AFLAG_PEER)
+ strlcat(tblname, ":peer", sizeof(tblname));
+ if (aw->iflags & PFI_AFLAG_NOALIAS)
+ strlcat(tblname, ":0", sizeof(tblname));
+ if (dyn->pfid_net != 128)
+ snprintf(tblname + strlen(tblname),
+ sizeof(tblname) - strlen(tblname), "/%d", dyn->pfid_net);
+ if ((ruleset = pf_find_or_create_ruleset(PF_RESERVED_ANCHOR)) == NULL) {
+ rv = 1;
+ goto _bad;
+ }
+
+ if ((dyn->pfid_kt = pfr_attach_table(ruleset, tblname)) == NULL) {
+ rv = 1;
+ goto _bad;
+ }
+
+ dyn->pfid_kt->pfrkt_flags |= PFR_TFLAG_ACTIVE;
+ dyn->pfid_iflags = aw->iflags;
+ dyn->pfid_af = af;
+
+ TAILQ_INSERT_TAIL(&dyn->pfid_kif->pfik_dynaddrs, dyn, entry);
+ aw->p.dyn = dyn;
+ pfi_kif_update(dyn->pfid_kif);
+ splx(s);
+ return (0);
+
+_bad:
+ if (dyn->pfid_kt != NULL)
+ pfr_detach_table(dyn->pfid_kt);
+ if (ruleset != NULL)
+ pf_remove_if_empty_ruleset(ruleset);
+ if (dyn->pfid_kif != NULL)
+ pfi_kif_unref(dyn->pfid_kif, PFI_KIF_REF_RULE);
+ pool_put(&pfi_addr_pl, dyn);
+ splx(s);
+ return (rv);
+}
+
+void
+pfi_kif_update(struct pfi_kif *kif)
+{
+ struct ifg_list *ifgl;
+ struct pfi_dynaddr *p;
+
+ /* update all dynaddr */
+ TAILQ_FOREACH(p, &kif->pfik_dynaddrs, entry)
+ pfi_dynaddr_update(p);
+
+ /* again for all groups kif is member of */
+ if (kif->pfik_ifp != NULL)
+ TAILQ_FOREACH(ifgl, &kif->pfik_ifp->if_groups, ifgl_next)
+ pfi_kif_update((struct pfi_kif *)
+ ifgl->ifgl_group->ifg_pf_kif);
+}
+
+void
+pfi_dynaddr_update(struct pfi_dynaddr *dyn)
+{
+ struct pfi_kif *kif;
+ struct pfr_ktable *kt;
+
+ if (dyn == NULL || dyn->pfid_kif == NULL || dyn->pfid_kt == NULL)
+ panic("pfi_dynaddr_update");
+
+ kif = dyn->pfid_kif;
+ kt = dyn->pfid_kt;
+
+ if (kt->pfrkt_larg != pfi_update) {
+ /* this table needs to be brought up-to-date */
+ pfi_table_update(kt, kif, dyn->pfid_net, dyn->pfid_iflags);
+ kt->pfrkt_larg = pfi_update;
+ }
+ pfr_dynaddr_update(kt, dyn);
+}
+
+void
+pfi_table_update(struct pfr_ktable *kt, struct pfi_kif *kif, int net, int flags)
+{
+ int e, size2 = 0;
+ struct ifg_member *ifgm;
+
+ pfi_buffer_cnt = 0;
+
+ if (kif->pfik_ifp != NULL)
+ pfi_instance_add(kif->pfik_ifp, net, flags);
+ else if (kif->pfik_group != NULL)
+ TAILQ_FOREACH(ifgm, &kif->pfik_group->ifg_members, ifgm_next)
+ pfi_instance_add(ifgm->ifgm_ifp, net, flags);
+
+ if ((e = pfr_set_addrs(&kt->pfrkt_t, pfi_buffer, pfi_buffer_cnt, &size2,
+ NULL, NULL, NULL, 0, PFR_TFLAG_ALLMASK)))
+ printf("pfi_table_update: cannot set %d new addresses "
+ "into table %s: %d\n", pfi_buffer_cnt, kt->pfrkt_name, e);
+}
+
+void
+pfi_instance_add(struct ifnet *ifp, int net, int flags)
+{
+ struct ifaddr *ia;
+ int got4 = 0, got6 = 0;
+ int net2, af;
+
+ if (ifp == NULL)
+ return;
+ TAILQ_FOREACH(ia, &ifp->if_addrlist, ifa_list) {
+ if (ia->ifa_addr == NULL)
+ continue;
+ af = ia->ifa_addr->sa_family;
+ if (af != AF_INET && af != AF_INET6)
+ continue;
+#ifdef __FreeBSD__
+ /*
+ * XXX: For point-to-point interfaces, (ifname:0) and IPv4,
+ * jump over addresses without a proper route to work
+ * around a problem with ppp not fully removing the
+ * address used during IPCP.
+ */
+ if ((ifp->if_flags & IFF_POINTOPOINT) &&
+ !(ia->ifa_flags & IFA_ROUTE) &&
+ (flags & PFI_AFLAG_NOALIAS) && (af == AF_INET))
+ continue;
+#endif
+ if ((flags & PFI_AFLAG_BROADCAST) && af == AF_INET6)
+ continue;
+ if ((flags & PFI_AFLAG_BROADCAST) &&
+ !(ifp->if_flags & IFF_BROADCAST))
+ continue;
+ if ((flags & PFI_AFLAG_PEER) &&
+ !(ifp->if_flags & IFF_POINTOPOINT))
+ continue;
+ if ((flags & PFI_AFLAG_NETWORK) && af == AF_INET6 &&
+ IN6_IS_ADDR_LINKLOCAL(
+ &((struct sockaddr_in6 *)ia->ifa_addr)->sin6_addr))
+ continue;
+ if (flags & PFI_AFLAG_NOALIAS) {
+ if (af == AF_INET && got4)
+ continue;
+ if (af == AF_INET6 && got6)
+ continue;
+ }
+ if (af == AF_INET)
+ got4 = 1;
+ else if (af == AF_INET6)
+ got6 = 1;
+ net2 = net;
+ if (net2 == 128 && (flags & PFI_AFLAG_NETWORK)) {
+ if (af == AF_INET)
+ net2 = pfi_unmask(&((struct sockaddr_in *)
+ ia->ifa_netmask)->sin_addr);
+ else if (af == AF_INET6)
+ net2 = pfi_unmask(&((struct sockaddr_in6 *)
+ ia->ifa_netmask)->sin6_addr);
+ }
+ if (af == AF_INET && net2 > 32)
+ net2 = 32;
+ if (flags & PFI_AFLAG_BROADCAST)
+ pfi_address_add(ia->ifa_broadaddr, af, net2);
+ else if (flags & PFI_AFLAG_PEER)
+ pfi_address_add(ia->ifa_dstaddr, af, net2);
+ else
+ pfi_address_add(ia->ifa_addr, af, net2);
+ }
+}
+
+void
+pfi_address_add(struct sockaddr *sa, int af, int net)
+{
+ struct pfr_addr *p;
+ int i;
+
+ if (pfi_buffer_cnt >= pfi_buffer_max) {
+ int new_max = pfi_buffer_max * 2;
+
+ if (new_max > PFI_BUFFER_MAX) {
+ printf("pfi_address_add: address buffer full (%d/%d)\n",
+ pfi_buffer_cnt, PFI_BUFFER_MAX);
+ return;
+ }
+ p = malloc(new_max * sizeof(*pfi_buffer), PFI_MTYPE,
+#ifdef __FreeBSD__
+ M_NOWAIT);
+#else
+ M_DONTWAIT);
+#endif
+ if (p == NULL) {
+ printf("pfi_address_add: no memory to grow buffer "
+ "(%d/%d)\n", pfi_buffer_cnt, PFI_BUFFER_MAX);
+ return;
+ }
+ memcpy(p, pfi_buffer, pfi_buffer_max * sizeof(*pfi_buffer));
+ /* no need to zero buffer */
+ free(pfi_buffer, PFI_MTYPE);
+ pfi_buffer = p;
+ pfi_buffer_max = new_max;
+ }
+ if (af == AF_INET && net > 32)
+ net = 128;
+ p = pfi_buffer + pfi_buffer_cnt++;
+ bzero(p, sizeof(*p));
+ p->pfra_af = af;
+ p->pfra_net = net;
+ if (af == AF_INET)
+ p->pfra_ip4addr = ((struct sockaddr_in *)sa)->sin_addr;
+ else if (af == AF_INET6) {
+ p->pfra_ip6addr = ((struct sockaddr_in6 *)sa)->sin6_addr;
+ if (IN6_IS_SCOPE_EMBED(&p->pfra_ip6addr))
+ p->pfra_ip6addr.s6_addr16[1] = 0;
+ }
+ /* mask network address bits */
+ if (net < 128)
+ ((caddr_t)p)[p->pfra_net/8] &= ~(0xFF >> (p->pfra_net%8));
+ for (i = (p->pfra_net+7)/8; i < sizeof(p->pfra_u); i++)
+ ((caddr_t)p)[i] = 0;
+}
+
+void
+pfi_dynaddr_remove(struct pf_addr_wrap *aw)
+{
+ int s;
+
+ if (aw->type != PF_ADDR_DYNIFTL || aw->p.dyn == NULL ||
+ aw->p.dyn->pfid_kif == NULL || aw->p.dyn->pfid_kt == NULL)
+ return;
+
+ s = splsoftnet();
+ TAILQ_REMOVE(&aw->p.dyn->pfid_kif->pfik_dynaddrs, aw->p.dyn, entry);
+ pfi_kif_unref(aw->p.dyn->pfid_kif, PFI_KIF_REF_RULE);
+ aw->p.dyn->pfid_kif = NULL;
+ pfr_detach_table(aw->p.dyn->pfid_kt);
+ aw->p.dyn->pfid_kt = NULL;
+ pool_put(&pfi_addr_pl, aw->p.dyn);
+ aw->p.dyn = NULL;
+ splx(s);
+}
+
+void
+pfi_dynaddr_copyout(struct pf_addr_wrap *aw)
+{
+ if (aw->type != PF_ADDR_DYNIFTL || aw->p.dyn == NULL ||
+ aw->p.dyn->pfid_kif == NULL)
+ return;
+ aw->p.dyncnt = aw->p.dyn->pfid_acnt4 + aw->p.dyn->pfid_acnt6;
+}
+
+void
+pfi_kifaddr_update(void *v)
+{
+ int s;
+ struct pfi_kif *kif = (struct pfi_kif *)v;
+
+ s = splsoftnet();
+ pfi_update++;
+ pfi_kif_update(kif);
+ splx(s);
+}
+
+int
+pfi_if_compare(struct pfi_kif *p, struct pfi_kif *q)
+{
+ return (strncmp(p->pfik_name, q->pfik_name, IFNAMSIZ));
+}
+
+void
+pfi_fill_oldstatus(struct pf_status *pfs)
+{
+ struct pfi_kif *p;
+ struct pfi_kif_cmp key;
+ int i, j, k, s;
+
+ strlcpy(key.pfik_name, pfs->ifname, sizeof(key.pfik_name));
+ s = splsoftnet();
+ p = RB_FIND(pfi_ifhead, &pfi_ifs, (struct pfi_kif *)&key);
+ if (p == NULL) {
+ splx(s);
+ return;
+ }
+ bzero(pfs->pcounters, sizeof(pfs->pcounters));
+ bzero(pfs->bcounters, sizeof(pfs->bcounters));
+ for (i = 0; i < 2; i++)
+ for (j = 0; j < 2; j++)
+ for (k = 0; k < 2; k++) {
+ pfs->pcounters[i][j][k] =
+ p->pfik_packets[i][j][k];
+ pfs->bcounters[i][j] +=
+ p->pfik_bytes[i][j][k];
+ }
+ splx(s);
+}
+
+int
+pfi_clr_istats(const char *name)
+{
+ struct pfi_kif *p;
+ int s;
+
+ s = splsoftnet();
+ RB_FOREACH(p, pfi_ifhead, &pfi_ifs) {
+ if (pfi_skip_if(name, p))
+ continue;
+ bzero(p->pfik_packets, sizeof(p->pfik_packets));
+ bzero(p->pfik_bytes, sizeof(p->pfik_bytes));
+ p->pfik_tzero = time_second;
+ }
+ splx(s);
+
+ return (0);
+}
+
+int
+pfi_get_ifaces(const char *name, struct pfi_kif *buf, int *size)
+{
+ struct pfi_kif *p, *nextp;
+ int s, n = 0;
+#ifdef __FreeBSD__
+ int error;
+#endif
+
+ s = splsoftnet();
+ for (p = RB_MIN(pfi_ifhead, &pfi_ifs); p; p = nextp) {
+ nextp = RB_NEXT(pfi_ifhead, &pfi_ifs, p);
+ if (pfi_skip_if(name, p))
+ continue;
+ if (*size > n++) {
+ if (!p->pfik_tzero)
+ p->pfik_tzero = time_second;
+ pfi_kif_ref(p, PFI_KIF_REF_RULE);
+#ifdef __FreeBSD__
+ PF_COPYOUT(p, buf++, sizeof(*buf), error);
+ if (error) {
+#else
+ if (copyout(p, buf++, sizeof(*buf))) {
+#endif
+ pfi_kif_unref(p, PFI_KIF_REF_RULE);
+ splx(s);
+ return (EFAULT);
+ }
+ nextp = RB_NEXT(pfi_ifhead, &pfi_ifs, p);
+ pfi_kif_unref(p, PFI_KIF_REF_RULE);
+ }
+ }
+ splx(s);
+ *size = n;
+ return (0);
+}
+
+int
+pfi_skip_if(const char *filter, struct pfi_kif *p)
+{
+ int n;
+
+ if (filter == NULL || !*filter)
+ return (0);
+ if (!strcmp(p->pfik_name, filter))
+ return (0); /* exact match */
+ n = strlen(filter);
+ if (n < 1 || n >= IFNAMSIZ)
+ return (1); /* sanity check */
+ if (filter[n-1] >= '0' && filter[n-1] <= '9')
+ return (1); /* only do exact match in that case */
+ if (strncmp(p->pfik_name, filter, n))
+ return (1); /* prefix doesn't match */
+ return (p->pfik_name[n] < '0' || p->pfik_name[n] > '9');
+}
+
+int
+pfi_set_flags(const char *name, int flags)
+{
+ struct pfi_kif *p;
+ int s;
+
+ s = splsoftnet();
+ RB_FOREACH(p, pfi_ifhead, &pfi_ifs) {
+ if (pfi_skip_if(name, p))
+ continue;
+ p->pfik_flags |= flags;
+ }
+ splx(s);
+ return (0);
+}
+
+int
+pfi_clear_flags(const char *name, int flags)
+{
+ struct pfi_kif *p;
+ int s;
+
+ s = splsoftnet();
+ RB_FOREACH(p, pfi_ifhead, &pfi_ifs) {
+ if (pfi_skip_if(name, p))
+ continue;
+ p->pfik_flags &= ~flags;
+ }
+ splx(s);
+ return (0);
+}
+
+/* from pf_print_state.c */
+int
+pfi_unmask(void *addr)
+{
+ struct pf_addr *m = addr;
+ int i = 31, j = 0, b = 0;
+ u_int32_t tmp;
+
+ while (j < 4 && m->addr32[j] == 0xffffffff) {
+ b += 32;
+ j++;
+ }
+ if (j < 4) {
+ tmp = ntohl(m->addr32[j]);
+ for (i = 31; tmp & (1 << i); --i)
+ b++;
+ }
+ return (b);
+}
+
+#ifdef __FreeBSD__
+void
+pfi_attach_ifnet_event(void *arg __unused, struct ifnet *ifp)
+{
+ PF_LOCK();
+ pfi_attach_ifnet(ifp);
+#ifdef ALTQ
+ pf_altq_ifnet_event(ifp, 0);
+#endif
+ PF_UNLOCK();
+}
+
+void
+pfi_detach_ifnet_event(void *arg __unused, struct ifnet *ifp)
+{
+ PF_LOCK();
+ pfi_detach_ifnet(ifp);
+#ifdef ALTQ
+ pf_altq_ifnet_event(ifp, 1);
+#endif
+ PF_UNLOCK();
+}
+
+void
+pfi_attach_group_event(void *arg __unused, struct ifg_group *ifg)
+{
+ PF_LOCK();
+ pfi_attach_ifgroup(ifg);
+ PF_UNLOCK();
+}
+
+void
+pfi_change_group_event(void *arg __unused, char *gname)
+{
+ PF_LOCK();
+ pfi_group_change(gname);
+ PF_UNLOCK();
+}
+
+void
+pfi_detach_group_event(void *arg __unused, struct ifg_group *ifg)
+{
+ PF_LOCK();
+ pfi_detach_ifgroup(ifg);
+ PF_UNLOCK();
+}
+
+void
+pfi_ifaddr_event(void *arg __unused, struct ifnet *ifp)
+{
+ PF_LOCK();
+ if (ifp && ifp->if_pf_kif)
+ pfi_kifaddr_update(ifp->if_pf_kif);
+ PF_UNLOCK();
+}
+#endif /* __FreeBSD__ */
diff --git a/contrib/pf/rtems/freebsd/net/pf_ioctl.c b/contrib/pf/rtems/freebsd/net/pf_ioctl.c
new file mode 100644
index 00000000..21032fa8
--- /dev/null
+++ b/contrib/pf/rtems/freebsd/net/pf_ioctl.c
@@ -0,0 +1,3896 @@
+#include <rtems/freebsd/machine/rtems-bsd-config.h>
+
+/* $OpenBSD: pf_ioctl.c,v 1.175 2007/02/26 22:47:43 deraadt Exp $ */
+
+/*
+ * Copyright (c) 2001 Daniel Hartmeier
+ * Copyright (c) 2002,2003 Henning Brauer
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * - Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+ * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+ * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+ * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ * Effort sponsored in part by the Defense Advanced Research Projects
+ * Agency (DARPA) and Air Force Research Laboratory, Air Force
+ * Materiel Command, USAF, under agreement number F30602-01-2-0537.
+ *
+ */
+
+#ifdef __FreeBSD__
+#include <rtems/freebsd/sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <rtems/freebsd/local/opt_inet.h>
+#include <rtems/freebsd/local/opt_inet6.h>
+#include <rtems/freebsd/local/opt_bpf.h>
+#include <rtems/freebsd/local/opt_pf.h>
+
+#ifdef DEV_BPF
+#define NBPFILTER DEV_BPF
+#else
+#define NBPFILTER 0
+#endif
+
+#ifdef DEV_PFLOG
+#define NPFLOG DEV_PFLOG
+#else
+#define NPFLOG 0
+#endif
+
+#ifdef DEV_PFSYNC
+#define NPFSYNC DEV_PFSYNC
+#else
+#define NPFSYNC 0
+#endif
+
+#else
+#include <rtems/freebsd/local/bpfilter.h>
+#include <rtems/freebsd/local/pflog.h>
+#include <rtems/freebsd/local/pfsync.h>
+#endif
+
+#include <rtems/freebsd/sys/param.h>
+#include <rtems/freebsd/sys/systm.h>
+#include <rtems/freebsd/sys/mbuf.h>
+#include <rtems/freebsd/sys/filio.h>
+#include <rtems/freebsd/sys/fcntl.h>
+#include <rtems/freebsd/sys/socket.h>
+#include <rtems/freebsd/sys/socketvar.h>
+#include <rtems/freebsd/sys/kernel.h>
+#include <rtems/freebsd/sys/time.h>
+#include <rtems/freebsd/sys/malloc.h>
+#ifdef __FreeBSD__
+#include <rtems/freebsd/sys/module.h>
+#include <rtems/freebsd/sys/conf.h>
+#include <rtems/freebsd/sys/proc.h>
+#include <rtems/freebsd/sys/sysctl.h>
+#else
+#include <rtems/freebsd/sys/timeout.h>
+#include <rtems/freebsd/sys/pool.h>
+#endif
+#include <rtems/freebsd/sys/proc.h>
+#include <rtems/freebsd/sys/malloc.h>
+#include <rtems/freebsd/sys/kthread.h>
+#ifndef __FreeBSD__
+#include <rtems/freebsd/sys/rwlock.h>
+#include <rtems/freebsd/uvm/uvm_extern.h>
+#endif
+
+#include <rtems/freebsd/net/if.h>
+#include <rtems/freebsd/net/if_types.h>
+#ifdef __FreeBSD__
+#include <rtems/freebsd/net/vnet.h>
+#endif
+
+#include <rtems/freebsd/netinet/in.h>
+#include <rtems/freebsd/netinet/in_var.h>
+#include <rtems/freebsd/netinet/in_systm.h>
+#include <rtems/freebsd/netinet/ip.h>
+#include <rtems/freebsd/netinet/ip_var.h>
+#include <rtems/freebsd/netinet/ip_icmp.h>
+
+#ifdef __FreeBSD__
+#include <rtems/freebsd/sys/md5.h>
+#else
+#include <rtems/freebsd/dev/rndvar.h>
+#include <rtems/freebsd/crypto/md5.h>
+#endif
+#include <rtems/freebsd/net/pfvar.h>
+
+#if NPFSYNC > 0
+#include <rtems/freebsd/net/if_pfsync.h>
+#endif /* NPFSYNC > 0 */
+
+#include <rtems/freebsd/net/if_pflog.h>
+
+#ifdef INET6
+#include <rtems/freebsd/netinet/ip6.h>
+#include <rtems/freebsd/netinet/in_pcb.h>
+#endif /* INET6 */
+
+#ifdef ALTQ
+#include <rtems/freebsd/altq/altq.h>
+#endif
+
+#ifdef __FreeBSD__
+#include <rtems/freebsd/sys/limits.h>
+#include <rtems/freebsd/sys/lock.h>
+#include <rtems/freebsd/sys/mutex.h>
+#include <rtems/freebsd/net/pfil.h>
+#endif /* __FreeBSD__ */
+
+#ifdef __FreeBSD__
+void init_zone_var(void);
+void cleanup_pf_zone(void);
+int pfattach(void);
+#else
+void pfattach(int);
+void pf_thread_create(void *);
+int pfopen(dev_t, int, int, struct proc *);
+int pfclose(dev_t, int, int, struct proc *);
+#endif
+struct pf_pool *pf_get_pool(char *, u_int32_t, u_int8_t, u_int32_t,
+ u_int8_t, u_int8_t, u_int8_t);
+
+void pf_mv_pool(struct pf_palist *, struct pf_palist *);
+void pf_empty_pool(struct pf_palist *);
+#ifdef __FreeBSD__
+int pfioctl(struct cdev *, u_long, caddr_t, int, struct thread *);
+#else
+int pfioctl(struct cdev *, u_long, caddr_t, int, struct proc *);
+#endif
+#ifdef ALTQ
+int pf_begin_altq(u_int32_t *);
+int pf_rollback_altq(u_int32_t);
+int pf_commit_altq(u_int32_t);
+int pf_enable_altq(struct pf_altq *);
+int pf_disable_altq(struct pf_altq *);
+#endif /* ALTQ */
+int pf_begin_rules(u_int32_t *, int, const char *);
+int pf_rollback_rules(u_int32_t, int, char *);
+int pf_setup_pfsync_matching(struct pf_ruleset *);
+void pf_hash_rule(MD5_CTX *, struct pf_rule *);
+void pf_hash_rule_addr(MD5_CTX *, struct pf_rule_addr *);
+int pf_commit_rules(u_int32_t, int, char *);
+
+struct pf_rule pf_default_rule;
+#ifdef __FreeBSD__
+struct sx pf_consistency_lock;
+SX_SYSINIT(pf_consistency_lock, &pf_consistency_lock, "pf_statetbl_lock");
+#else
+struct rwlock pf_consistency_lock = RWLOCK_INITIALIZER;
+#endif
+#ifdef ALTQ
+static int pf_altq_running;
+#endif
+
+#define TAGID_MAX 50000
+TAILQ_HEAD(pf_tags, pf_tagname) pf_tags = TAILQ_HEAD_INITIALIZER(pf_tags),
+ pf_qids = TAILQ_HEAD_INITIALIZER(pf_qids);
+
+#if (PF_QNAME_SIZE != PF_TAG_NAME_SIZE)
+#error PF_QNAME_SIZE must be equal to PF_TAG_NAME_SIZE
+#endif
+u_int16_t tagname2tag(struct pf_tags *, char *);
+void tag2tagname(struct pf_tags *, u_int16_t, char *);
+void tag_unref(struct pf_tags *, u_int16_t);
+int pf_rtlabel_add(struct pf_addr_wrap *);
+void pf_rtlabel_remove(struct pf_addr_wrap *);
+void pf_rtlabel_copyout(struct pf_addr_wrap *);
+
+#define DPFPRINTF(n, x) if (pf_status.debug >= (n)) printf x
+
+
+#ifdef __FreeBSD__
+static struct cdev *pf_dev;
+
+/*
+ * XXX - These are new and need to be checked when moveing to a new version
+ */
+static void pf_clear_states(void);
+static int pf_clear_tables(void);
+static void pf_clear_srcnodes(void);
+/*
+ * XXX - These are new and need to be checked when moveing to a new version
+ */
+
+/*
+ * Wrapper functions for pfil(9) hooks
+ */
+static int pf_check_in(void *arg, struct mbuf **m, struct ifnet *ifp,
+ int dir, struct inpcb *inp);
+static int pf_check_out(void *arg, struct mbuf **m, struct ifnet *ifp,
+ int dir, struct inpcb *inp);
+#ifdef INET6
+static int pf_check6_in(void *arg, struct mbuf **m, struct ifnet *ifp,
+ int dir, struct inpcb *inp);
+static int pf_check6_out(void *arg, struct mbuf **m, struct ifnet *ifp,
+ int dir, struct inpcb *inp);
+#endif
+
+static int hook_pf(void);
+static int dehook_pf(void);
+static int shutdown_pf(void);
+static int pf_load(void);
+static int pf_unload(void);
+
+static struct cdevsw pf_cdevsw = {
+ .d_ioctl = pfioctl,
+ .d_name = PF_NAME,
+ .d_version = D_VERSION,
+};
+
+static volatile int pf_pfil_hooked = 0;
+int pf_end_threads = 0;
+struct mtx pf_task_mtx;
+pflog_packet_t *pflog_packet_ptr = NULL;
+
+int debug_pfugidhack = 0;
+SYSCTL_INT(_debug, OID_AUTO, pfugidhack, CTLFLAG_RW, &debug_pfugidhack, 0,
+ "Enable/disable pf user/group rules mpsafe hack");
+
+void
+init_pf_mutex(void)
+{
+ mtx_init(&pf_task_mtx, "pf task mtx", NULL, MTX_DEF);
+}
+
+void
+destroy_pf_mutex(void)
+{
+ mtx_destroy(&pf_task_mtx);
+}
+
+void
+init_zone_var(void)
+{
+ pf_src_tree_pl = pf_rule_pl = NULL;
+ pf_state_pl = pf_altq_pl = pf_pooladdr_pl = NULL;
+ pf_frent_pl = pf_frag_pl = pf_cache_pl = pf_cent_pl = NULL;
+ pf_state_scrub_pl = NULL;
+ pfr_ktable_pl = pfr_kentry_pl = NULL;
+}
+
+void
+cleanup_pf_zone(void)
+{
+ UMA_DESTROY(pf_src_tree_pl);
+ UMA_DESTROY(pf_rule_pl);
+ UMA_DESTROY(pf_state_pl);
+ UMA_DESTROY(pf_altq_pl);
+ UMA_DESTROY(pf_pooladdr_pl);
+ UMA_DESTROY(pf_frent_pl);
+ UMA_DESTROY(pf_frag_pl);
+ UMA_DESTROY(pf_cache_pl);
+ UMA_DESTROY(pf_cent_pl);
+ UMA_DESTROY(pfr_ktable_pl);
+ UMA_DESTROY(pfr_kentry_pl2);
+ UMA_DESTROY(pfr_kentry_pl);
+ UMA_DESTROY(pf_state_scrub_pl);
+ UMA_DESTROY(pfi_addr_pl);
+}
+
+int
+pfattach(void)
+{
+ u_int32_t *my_timeout = pf_default_rule.timeout;
+ int error = 1;
+
+ do {
+ UMA_CREATE(pf_src_tree_pl,struct pf_src_node, "pfsrctrpl");
+ UMA_CREATE(pf_rule_pl, struct pf_rule, "pfrulepl");
+ UMA_CREATE(pf_state_pl, struct pf_state, "pfstatepl");
+ UMA_CREATE(pf_altq_pl, struct pf_altq, "pfaltqpl");
+ UMA_CREATE(pf_pooladdr_pl, struct pf_pooladdr, "pfpooladdrpl");
+ UMA_CREATE(pfr_ktable_pl, struct pfr_ktable, "pfrktable");
+ UMA_CREATE(pfr_kentry_pl, struct pfr_kentry, "pfrkentry");
+ UMA_CREATE(pfr_kentry_pl2, struct pfr_kentry, "pfrkentry2");
+ UMA_CREATE(pf_frent_pl, struct pf_frent, "pffrent");
+ UMA_CREATE(pf_frag_pl, struct pf_fragment, "pffrag");
+ UMA_CREATE(pf_cache_pl, struct pf_fragment, "pffrcache");
+ UMA_CREATE(pf_cent_pl, struct pf_frcache, "pffrcent");
+ UMA_CREATE(pf_state_scrub_pl, struct pf_state_scrub,
+ "pfstatescrub");
+ UMA_CREATE(pfi_addr_pl, struct pfi_dynaddr, "pfiaddrpl");
+ error = 0;
+ } while(0);
+ if (error) {
+ cleanup_pf_zone();
+ return (error);
+ }
+ pfr_initialize();
+ pfi_initialize();
+ if ( (error = pf_osfp_initialize()) ) {
+ cleanup_pf_zone();
+ pf_osfp_cleanup();
+ return (error);
+ }
+
+ pf_pool_limits[PF_LIMIT_STATES].pp = pf_state_pl;
+ pf_pool_limits[PF_LIMIT_STATES].limit = PFSTATE_HIWAT;
+ pf_pool_limits[PF_LIMIT_SRC_NODES].pp = pf_src_tree_pl;
+ pf_pool_limits[PF_LIMIT_SRC_NODES].limit = PFSNODE_HIWAT;
+ pf_pool_limits[PF_LIMIT_FRAGS].pp = pf_frent_pl;
+ pf_pool_limits[PF_LIMIT_FRAGS].limit = PFFRAG_FRENT_HIWAT;
+ pf_pool_limits[PF_LIMIT_TABLES].pp = pfr_ktable_pl;
+ pf_pool_limits[PF_LIMIT_TABLES].limit = PFR_KTABLE_HIWAT;
+ pf_pool_limits[PF_LIMIT_TABLE_ENTRIES].pp = pfr_kentry_pl;
+ pf_pool_limits[PF_LIMIT_TABLE_ENTRIES].limit = PFR_KENTRY_HIWAT;
+ uma_zone_set_max(pf_pool_limits[PF_LIMIT_STATES].pp,
+ pf_pool_limits[PF_LIMIT_STATES].limit);
+
+ RB_INIT(&tree_src_tracking);
+ RB_INIT(&pf_anchors);
+ pf_init_ruleset(&pf_main_ruleset);
+ TAILQ_INIT(&pf_altqs[0]);
+ TAILQ_INIT(&pf_altqs[1]);
+ TAILQ_INIT(&pf_pabuf);
+ pf_altqs_active = &pf_altqs[0];
+ pf_altqs_inactive = &pf_altqs[1];
+ TAILQ_INIT(&state_list);
+
+ /* default rule should never be garbage collected */
+ pf_default_rule.entries.tqe_prev = &pf_default_rule.entries.tqe_next;
+ pf_default_rule.action = PF_PASS;
+ pf_default_rule.nr = -1;
+ pf_default_rule.rtableid = -1;
+
+ /* initialize default timeouts */
+ my_timeout[PFTM_TCP_FIRST_PACKET] = PFTM_TCP_FIRST_PACKET_VAL;
+ my_timeout[PFTM_TCP_OPENING] = PFTM_TCP_OPENING_VAL;
+ my_timeout[PFTM_TCP_ESTABLISHED] = PFTM_TCP_ESTABLISHED_VAL;
+ my_timeout[PFTM_TCP_CLOSING] = PFTM_TCP_CLOSING_VAL;
+ my_timeout[PFTM_TCP_FIN_WAIT] = PFTM_TCP_FIN_WAIT_VAL;
+ my_timeout[PFTM_TCP_CLOSED] = PFTM_TCP_CLOSED_VAL;
+ my_timeout[PFTM_UDP_FIRST_PACKET] = PFTM_UDP_FIRST_PACKET_VAL;
+ my_timeout[PFTM_UDP_SINGLE] = PFTM_UDP_SINGLE_VAL;
+ my_timeout[PFTM_UDP_MULTIPLE] = PFTM_UDP_MULTIPLE_VAL;
+ my_timeout[PFTM_ICMP_FIRST_PACKET] = PFTM_ICMP_FIRST_PACKET_VAL;
+ my_timeout[PFTM_ICMP_ERROR_REPLY] = PFTM_ICMP_ERROR_REPLY_VAL;
+ my_timeout[PFTM_OTHER_FIRST_PACKET] = PFTM_OTHER_FIRST_PACKET_VAL;
+ my_timeout[PFTM_OTHER_SINGLE] = PFTM_OTHER_SINGLE_VAL;
+ my_timeout[PFTM_OTHER_MULTIPLE] = PFTM_OTHER_MULTIPLE_VAL;
+ my_timeout[PFTM_FRAG] = PFTM_FRAG_VAL;
+ my_timeout[PFTM_INTERVAL] = PFTM_INTERVAL_VAL;
+ my_timeout[PFTM_SRC_NODE] = PFTM_SRC_NODE_VAL;
+ my_timeout[PFTM_TS_DIFF] = PFTM_TS_DIFF_VAL;
+ my_timeout[PFTM_ADAPTIVE_START] = PFSTATE_ADAPT_START;
+ my_timeout[PFTM_ADAPTIVE_END] = PFSTATE_ADAPT_END;
+
+ pf_normalize_init();
+ bzero(&pf_status, sizeof(pf_status));
+ pf_status.debug = PF_DEBUG_URGENT;
+
+ pf_pfil_hooked = 0;
+
+ /* XXX do our best to avoid a conflict */
+ pf_status.hostid = arc4random();
+
+ if (kproc_create(pf_purge_thread, NULL, NULL, 0, 0, "pfpurge"))
+ return (ENXIO);
+
+ return (error);
+}
+#else /* !__FreeBSD__ */
+void
+pfattach(int num)
+{
+ u_int32_t *timeout = pf_default_rule.timeout;
+
+ pool_init(&pf_rule_pl, sizeof(struct pf_rule), 0, 0, 0, "pfrulepl",
+ &pool_allocator_nointr);
+ pool_init(&pf_src_tree_pl, sizeof(struct pf_src_node), 0, 0, 0,
+ "pfsrctrpl", NULL);
+ pool_init(&pf_state_pl, sizeof(struct pf_state), 0, 0, 0, "pfstatepl",
+ NULL);
+ pool_init(&pf_altq_pl, sizeof(struct pf_altq), 0, 0, 0, "pfaltqpl",
+ &pool_allocator_nointr);
+ pool_init(&pf_pooladdr_pl, sizeof(struct pf_pooladdr), 0, 0, 0,
+ "pfpooladdrpl", &pool_allocator_nointr);
+ pfr_initialize();
+ pfi_initialize();
+ pf_osfp_initialize();
+
+ pool_sethardlimit(pf_pool_limits[PF_LIMIT_STATES].pp,
+ pf_pool_limits[PF_LIMIT_STATES].limit, NULL, 0);
+
+ if (ctob(physmem) <= 100*1024*1024)
+ pf_pool_limits[PF_LIMIT_TABLE_ENTRIES].limit =
+ PFR_KENTRY_HIWAT_SMALL;
+
+ RB_INIT(&tree_src_tracking);
+ RB_INIT(&pf_anchors);
+ pf_init_ruleset(&pf_main_ruleset);
+ TAILQ_INIT(&pf_altqs[0]);
+ TAILQ_INIT(&pf_altqs[1]);
+ TAILQ_INIT(&pf_pabuf);
+ pf_altqs_active = &pf_altqs[0];
+ pf_altqs_inactive = &pf_altqs[1];
+ TAILQ_INIT(&state_list);
+
+ /* default rule should never be garbage collected */
+ pf_default_rule.entries.tqe_prev = &pf_default_rule.entries.tqe_next;
+ pf_default_rule.action = PF_PASS;
+ pf_default_rule.nr = -1;
+ pf_default_rule.rtableid = -1;
+
+ /* initialize default timeouts */
+ timeout[PFTM_TCP_FIRST_PACKET] = PFTM_TCP_FIRST_PACKET_VAL;
+ timeout[PFTM_TCP_OPENING] = PFTM_TCP_OPENING_VAL;
+ timeout[PFTM_TCP_ESTABLISHED] = PFTM_TCP_ESTABLISHED_VAL;
+ timeout[PFTM_TCP_CLOSING] = PFTM_TCP_CLOSING_VAL;
+ timeout[PFTM_TCP_FIN_WAIT] = PFTM_TCP_FIN_WAIT_VAL;
+ timeout[PFTM_TCP_CLOSED] = PFTM_TCP_CLOSED_VAL;
+ timeout[PFTM_UDP_FIRST_PACKET] = PFTM_UDP_FIRST_PACKET_VAL;
+ timeout[PFTM_UDP_SINGLE] = PFTM_UDP_SINGLE_VAL;
+ timeout[PFTM_UDP_MULTIPLE] = PFTM_UDP_MULTIPLE_VAL;
+ timeout[PFTM_ICMP_FIRST_PACKET] = PFTM_ICMP_FIRST_PACKET_VAL;
+ timeout[PFTM_ICMP_ERROR_REPLY] = PFTM_ICMP_ERROR_REPLY_VAL;
+ timeout[PFTM_OTHER_FIRST_PACKET] = PFTM_OTHER_FIRST_PACKET_VAL;
+ timeout[PFTM_OTHER_SINGLE] = PFTM_OTHER_SINGLE_VAL;
+ timeout[PFTM_OTHER_MULTIPLE] = PFTM_OTHER_MULTIPLE_VAL;
+ timeout[PFTM_FRAG] = PFTM_FRAG_VAL;
+ timeout[PFTM_INTERVAL] = PFTM_INTERVAL_VAL;
+ timeout[PFTM_SRC_NODE] = PFTM_SRC_NODE_VAL;
+ timeout[PFTM_TS_DIFF] = PFTM_TS_DIFF_VAL;
+ timeout[PFTM_ADAPTIVE_START] = PFSTATE_ADAPT_START;
+ timeout[PFTM_ADAPTIVE_END] = PFSTATE_ADAPT_END;
+
+ pf_normalize_init();
+ bzero(&pf_status, sizeof(pf_status));
+ pf_status.debug = PF_DEBUG_URGENT;
+
+ /* XXX do our best to avoid a conflict */
+ pf_status.hostid = arc4random();
+
+ /* require process context to purge states, so perform in a thread */
+ kproc_create_deferred(pf_thread_create, NULL);
+}
+
+void
+pf_thread_create(void *v)
+{
+ if (kproc_create(pf_purge_thread, NULL, NULL, "pfpurge"))
+ panic("pfpurge thread");
+}
+
+int
+pfopen(struct cdev *dev, int flags, int fmt, struct proc *p)
+{
+ if (dev2unit(dev) >= 1)
+ return (ENXIO);
+ return (0);
+}
+
+int
+pfclose(struct cdev *dev, int flags, int fmt, struct proc *p)
+{
+ if (dev2unit(dev) >= 1)
+ return (ENXIO);
+ return (0);
+}
+#endif /* __FreeBSD__ */
+
+struct pf_pool *
+pf_get_pool(char *anchor, u_int32_t ticket, u_int8_t rule_action,
+ u_int32_t rule_number, u_int8_t r_last, u_int8_t active,
+ u_int8_t check_ticket)
+{
+ struct pf_ruleset *ruleset;
+ struct pf_rule *rule;
+ int rs_num;
+
+ ruleset = pf_find_ruleset(anchor);
+ if (ruleset == NULL)
+ return (NULL);
+ rs_num = pf_get_ruleset_number(rule_action);
+ if (rs_num >= PF_RULESET_MAX)
+ return (NULL);
+ if (active) {
+ if (check_ticket && ticket !=
+ ruleset->rules[rs_num].active.ticket)
+ return (NULL);
+ if (r_last)
+ rule = TAILQ_LAST(ruleset->rules[rs_num].active.ptr,
+ pf_rulequeue);
+ else
+ rule = TAILQ_FIRST(ruleset->rules[rs_num].active.ptr);
+ } else {
+ if (check_ticket && ticket !=
+ ruleset->rules[rs_num].inactive.ticket)
+ return (NULL);
+ if (r_last)
+ rule = TAILQ_LAST(ruleset->rules[rs_num].inactive.ptr,
+ pf_rulequeue);
+ else
+ rule = TAILQ_FIRST(ruleset->rules[rs_num].inactive.ptr);
+ }
+ if (!r_last) {
+ while ((rule != NULL) && (rule->nr != rule_number))
+ rule = TAILQ_NEXT(rule, entries);
+ }
+ if (rule == NULL)
+ return (NULL);
+
+ return (&rule->rpool);
+}
+
+void
+pf_mv_pool(struct pf_palist *poola, struct pf_palist *poolb)
+{
+ struct pf_pooladdr *mv_pool_pa;
+
+ while ((mv_pool_pa = TAILQ_FIRST(poola)) != NULL) {
+ TAILQ_REMOVE(poola, mv_pool_pa, entries);
+ TAILQ_INSERT_TAIL(poolb, mv_pool_pa, entries);
+ }
+}
+
+void
+pf_empty_pool(struct pf_palist *poola)
+{
+ struct pf_pooladdr *empty_pool_pa;
+
+ while ((empty_pool_pa = TAILQ_FIRST(poola)) != NULL) {
+ pfi_dynaddr_remove(&empty_pool_pa->addr);
+ pf_tbladdr_remove(&empty_pool_pa->addr);
+ pfi_kif_unref(empty_pool_pa->kif, PFI_KIF_REF_RULE);
+ TAILQ_REMOVE(poola, empty_pool_pa, entries);
+ pool_put(&pf_pooladdr_pl, empty_pool_pa);
+ }
+}
+
+void
+pf_rm_rule(struct pf_rulequeue *rulequeue, struct pf_rule *rule)
+{
+ if (rulequeue != NULL) {
+ if (rule->states <= 0) {
+ /*
+ * XXX - we need to remove the table *before* detaching
+ * the rule to make sure the table code does not delete
+ * the anchor under our feet.
+ */
+ pf_tbladdr_remove(&rule->src.addr);
+ pf_tbladdr_remove(&rule->dst.addr);
+ if (rule->overload_tbl)
+ pfr_detach_table(rule->overload_tbl);
+ }
+ TAILQ_REMOVE(rulequeue, rule, entries);
+ rule->entries.tqe_prev = NULL;
+ rule->nr = -1;
+ }
+
+ if (rule->states > 0 || rule->src_nodes > 0 ||
+ rule->entries.tqe_prev != NULL)
+ return;
+ pf_tag_unref(rule->tag);
+ pf_tag_unref(rule->match_tag);
+#ifdef ALTQ
+ if (rule->pqid != rule->qid)
+ pf_qid_unref(rule->pqid);
+ pf_qid_unref(rule->qid);
+#endif
+ pf_rtlabel_remove(&rule->src.addr);
+ pf_rtlabel_remove(&rule->dst.addr);
+ pfi_dynaddr_remove(&rule->src.addr);
+ pfi_dynaddr_remove(&rule->dst.addr);
+ if (rulequeue == NULL) {
+ pf_tbladdr_remove(&rule->src.addr);
+ pf_tbladdr_remove(&rule->dst.addr);
+ if (rule->overload_tbl)
+ pfr_detach_table(rule->overload_tbl);
+ }
+ pfi_kif_unref(rule->kif, PFI_KIF_REF_RULE);
+ pf_anchor_remove(rule);
+ pf_empty_pool(&rule->rpool.list);
+ pool_put(&pf_rule_pl, rule);
+}
+
+u_int16_t
+tagname2tag(struct pf_tags *head, char *tagname)
+{
+ struct pf_tagname *tag, *p = NULL;
+ u_int16_t new_tagid = 1;
+
+ TAILQ_FOREACH(tag, head, entries)
+ if (strcmp(tagname, tag->name) == 0) {
+ tag->ref++;
+ return (tag->tag);
+ }
+
+ /*
+ * to avoid fragmentation, we do a linear search from the beginning
+ * and take the first free slot we find. if there is none or the list
+ * is empty, append a new entry at the end.
+ */
+
+ /* new entry */
+ if (!TAILQ_EMPTY(head))
+ for (p = TAILQ_FIRST(head); p != NULL &&
+ p->tag == new_tagid; p = TAILQ_NEXT(p, entries))
+ new_tagid = p->tag + 1;
+
+ if (new_tagid > TAGID_MAX)
+ return (0);
+
+ /* allocate and fill new struct pf_tagname */
+ tag = (struct pf_tagname *)malloc(sizeof(struct pf_tagname),
+ M_TEMP, M_NOWAIT);
+ if (tag == NULL)
+ return (0);
+ bzero(tag, sizeof(struct pf_tagname));
+ strlcpy(tag->name, tagname, sizeof(tag->name));
+ tag->tag = new_tagid;
+ tag->ref++;
+
+ if (p != NULL) /* insert new entry before p */
+ TAILQ_INSERT_BEFORE(p, tag, entries);
+ else /* either list empty or no free slot in between */
+ TAILQ_INSERT_TAIL(head, tag, entries);
+
+ return (tag->tag);
+}
+
+void
+tag2tagname(struct pf_tags *head, u_int16_t tagid, char *p)
+{
+ struct pf_tagname *tag;
+
+ TAILQ_FOREACH(tag, head, entries)
+ if (tag->tag == tagid) {
+ strlcpy(p, tag->name, PF_TAG_NAME_SIZE);
+ return;
+ }
+}
+
+void
+tag_unref(struct pf_tags *head, u_int16_t tag)
+{
+ struct pf_tagname *p, *next;
+
+ if (tag == 0)
+ return;
+
+ for (p = TAILQ_FIRST(head); p != NULL; p = next) {
+ next = TAILQ_NEXT(p, entries);
+ if (tag == p->tag) {
+ if (--p->ref == 0) {
+ TAILQ_REMOVE(head, p, entries);
+ free(p, M_TEMP);
+ }
+ break;
+ }
+ }
+}
+
+u_int16_t
+pf_tagname2tag(char *tagname)
+{
+ return (tagname2tag(&pf_tags, tagname));
+}
+
+void
+pf_tag2tagname(u_int16_t tagid, char *p)
+{
+ tag2tagname(&pf_tags, tagid, p);
+}
+
+void
+pf_tag_ref(u_int16_t tag)
+{
+ struct pf_tagname *t;
+
+ TAILQ_FOREACH(t, &pf_tags, entries)
+ if (t->tag == tag)
+ break;
+ if (t != NULL)
+ t->ref++;
+}
+
+void
+pf_tag_unref(u_int16_t tag)
+{
+ tag_unref(&pf_tags, tag);
+}
+
+int
+pf_rtlabel_add(struct pf_addr_wrap *a)
+{
+#ifdef __FreeBSD__
+ /* XXX_IMPORT: later */
+ return (0);
+#else
+ if (a->type == PF_ADDR_RTLABEL &&
+ (a->v.rtlabel = rtlabel_name2id(a->v.rtlabelname)) == 0)
+ return (-1);
+ return (0);
+#endif
+}
+
+void
+pf_rtlabel_remove(struct pf_addr_wrap *a)
+{
+#ifdef __FreeBSD__
+ /* XXX_IMPORT: later */
+#else
+ if (a->type == PF_ADDR_RTLABEL)
+ rtlabel_unref(a->v.rtlabel);
+#endif
+}
+
+void
+pf_rtlabel_copyout(struct pf_addr_wrap *a)
+{
+#ifdef __FreeBSD__
+ /* XXX_IMPORT: later */
+ if (a->type == PF_ADDR_RTLABEL && a->v.rtlabel)
+ strlcpy(a->v.rtlabelname, "?", sizeof(a->v.rtlabelname));
+#else
+ const char *name;
+
+ if (a->type == PF_ADDR_RTLABEL && a->v.rtlabel) {
+ if ((name = rtlabel_id2name(a->v.rtlabel)) == NULL)
+ strlcpy(a->v.rtlabelname, "?",
+ sizeof(a->v.rtlabelname));
+ else
+ strlcpy(a->v.rtlabelname, name,
+ sizeof(a->v.rtlabelname));
+ }
+#endif
+}
+
+#ifdef ALTQ
+u_int32_t
+pf_qname2qid(char *qname)
+{
+ return ((u_int32_t)tagname2tag(&pf_qids, qname));
+}
+
+void
+pf_qid2qname(u_int32_t qid, char *p)
+{
+ tag2tagname(&pf_qids, (u_int16_t)qid, p);
+}
+
+void
+pf_qid_unref(u_int32_t qid)
+{
+ tag_unref(&pf_qids, (u_int16_t)qid);
+}
+
+int
+pf_begin_altq(u_int32_t *ticket)
+{
+ struct pf_altq *altq;
+ int error = 0;
+
+ /* Purge the old altq list */
+ while ((altq = TAILQ_FIRST(pf_altqs_inactive)) != NULL) {
+ TAILQ_REMOVE(pf_altqs_inactive, altq, entries);
+#ifdef __FreeBSD__
+ if (altq->qname[0] == 0 &&
+ (altq->local_flags & PFALTQ_FLAG_IF_REMOVED) == 0) {
+#else
+ if (altq->qname[0] == 0) {
+#endif
+ /* detach and destroy the discipline */
+ error = altq_remove(altq);
+ } else
+ pf_qid_unref(altq->qid);
+ pool_put(&pf_altq_pl, altq);
+ }
+ if (error)
+ return (error);
+ *ticket = ++ticket_altqs_inactive;
+ altqs_inactive_open = 1;
+ return (0);
+}
+
+int
+pf_rollback_altq(u_int32_t ticket)
+{
+ struct pf_altq *altq;
+ int error = 0;
+
+ if (!altqs_inactive_open || ticket != ticket_altqs_inactive)
+ return (0);
+ /* Purge the old altq list */
+ while ((altq = TAILQ_FIRST(pf_altqs_inactive)) != NULL) {
+ TAILQ_REMOVE(pf_altqs_inactive, altq, entries);
+#ifdef __FreeBSD__
+ if (altq->qname[0] == 0 &&
+ (altq->local_flags & PFALTQ_FLAG_IF_REMOVED) == 0) {
+#else
+ if (altq->qname[0] == 0) {
+#endif
+ /* detach and destroy the discipline */
+ error = altq_remove(altq);
+ } else
+ pf_qid_unref(altq->qid);
+ pool_put(&pf_altq_pl, altq);
+ }
+ altqs_inactive_open = 0;
+ return (error);
+}
+
+int
+pf_commit_altq(u_int32_t ticket)
+{
+ struct pf_altqqueue *old_altqs;
+ struct pf_altq *altq;
+ int s, err, error = 0;
+
+ if (!altqs_inactive_open || ticket != ticket_altqs_inactive)
+ return (EBUSY);
+
+ /* swap altqs, keep the old. */
+ s = splsoftnet();
+ old_altqs = pf_altqs_active;
+ pf_altqs_active = pf_altqs_inactive;
+ pf_altqs_inactive = old_altqs;
+ ticket_altqs_active = ticket_altqs_inactive;
+
+ /* Attach new disciplines */
+ TAILQ_FOREACH(altq, pf_altqs_active, entries) {
+#ifdef __FreeBSD__
+ if (altq->qname[0] == 0 &&
+ (altq->local_flags & PFALTQ_FLAG_IF_REMOVED) == 0) {
+#else
+ if (altq->qname[0] == 0) {
+#endif
+ /* attach the discipline */
+ error = altq_pfattach(altq);
+ if (error == 0 && pf_altq_running)
+ error = pf_enable_altq(altq);
+ if (error != 0) {
+ splx(s);
+ return (error);
+ }
+ }
+ }
+
+ /* Purge the old altq list */
+ while ((altq = TAILQ_FIRST(pf_altqs_inactive)) != NULL) {
+ TAILQ_REMOVE(pf_altqs_inactive, altq, entries);
+#ifdef __FreeBSD__
+ if (altq->qname[0] == 0 &&
+ (altq->local_flags & PFALTQ_FLAG_IF_REMOVED) == 0) {
+#else
+ if (altq->qname[0] == 0) {
+#endif
+ /* detach and destroy the discipline */
+ if (pf_altq_running)
+ error = pf_disable_altq(altq);
+ err = altq_pfdetach(altq);
+ if (err != 0 && error == 0)
+ error = err;
+ err = altq_remove(altq);
+ if (err != 0 && error == 0)
+ error = err;
+ } else
+ pf_qid_unref(altq->qid);
+ pool_put(&pf_altq_pl, altq);
+ }
+ splx(s);
+
+ altqs_inactive_open = 0;
+ return (error);
+}
+
+int
+pf_enable_altq(struct pf_altq *altq)
+{
+ struct ifnet *ifp;
+ struct tb_profile tb;
+ int s, error = 0;
+
+ if ((ifp = ifunit(altq->ifname)) == NULL)
+ return (EINVAL);
+
+ if (ifp->if_snd.altq_type != ALTQT_NONE)
+ error = altq_enable(&ifp->if_snd);
+
+ /* set tokenbucket regulator */
+ if (error == 0 && ifp != NULL && ALTQ_IS_ENABLED(&ifp->if_snd)) {
+ tb.rate = altq->ifbandwidth;
+ tb.depth = altq->tbrsize;
+ s = splnet();
+#ifdef __FreeBSD__
+ PF_UNLOCK();
+#endif
+ error = tbr_set(&ifp->if_snd, &tb);
+#ifdef __FreeBSD__
+ PF_LOCK();
+#endif
+ splx(s);
+ }
+
+ return (error);
+}
+
+int
+pf_disable_altq(struct pf_altq *altq)
+{
+ struct ifnet *ifp;
+ struct tb_profile tb;
+ int s, error;
+
+ if ((ifp = ifunit(altq->ifname)) == NULL)
+ return (EINVAL);
+
+ /*
+ * when the discipline is no longer referenced, it was overridden
+ * by a new one. if so, just return.
+ */
+ if (altq->altq_disc != ifp->if_snd.altq_disc)
+ return (0);
+
+ error = altq_disable(&ifp->if_snd);
+
+ if (error == 0) {
+ /* clear tokenbucket regulator */
+ tb.rate = 0;
+ s = splnet();
+#ifdef __FreeBSD__
+ PF_UNLOCK();
+#endif
+ error = tbr_set(&ifp->if_snd, &tb);
+#ifdef __FreeBSD__
+ PF_LOCK();
+#endif
+ splx(s);
+ }
+
+ return (error);
+}
+
+#ifdef __FreeBSD__
+void
+pf_altq_ifnet_event(struct ifnet *ifp, int remove)
+{
+ struct ifnet *ifp1;
+ struct pf_altq *a1, *a2, *a3;
+ u_int32_t ticket;
+ int error = 0;
+
+ /* Interrupt userland queue modifications */
+ if (altqs_inactive_open)
+ pf_rollback_altq(ticket_altqs_inactive);
+
+ /* Start new altq ruleset */
+ if (pf_begin_altq(&ticket))
+ return;
+
+ /* Copy the current active set */
+ TAILQ_FOREACH(a1, pf_altqs_active, entries) {
+ a2 = pool_get(&pf_altq_pl, PR_NOWAIT);
+ if (a2 == NULL) {
+ error = ENOMEM;
+ break;
+ }
+ bcopy(a1, a2, sizeof(struct pf_altq));
+
+ if (a2->qname[0] != 0) {
+ if ((a2->qid = pf_qname2qid(a2->qname)) == 0) {
+ error = EBUSY;
+ pool_put(&pf_altq_pl, a2);
+ break;
+ }
+ a2->altq_disc = NULL;
+ TAILQ_FOREACH(a3, pf_altqs_inactive, entries) {
+ if (strncmp(a3->ifname, a2->ifname,
+ IFNAMSIZ) == 0 && a3->qname[0] == 0) {
+ a2->altq_disc = a3->altq_disc;
+ break;
+ }
+ }
+ }
+ /* Deactivate the interface in question */
+ a2->local_flags &= ~PFALTQ_FLAG_IF_REMOVED;
+ if ((ifp1 = ifunit(a2->ifname)) == NULL ||
+ (remove && ifp1 == ifp)) {
+ a2->local_flags |= PFALTQ_FLAG_IF_REMOVED;
+ } else {
+ PF_UNLOCK();
+ error = altq_add(a2);
+ PF_LOCK();
+
+ if (ticket != ticket_altqs_inactive)
+ error = EBUSY;
+
+ if (error) {
+ pool_put(&pf_altq_pl, a2);
+ break;
+ }
+ }
+
+ TAILQ_INSERT_TAIL(pf_altqs_inactive, a2, entries);
+ }
+
+ if (error != 0)
+ pf_rollback_altq(ticket);
+ else
+ pf_commit_altq(ticket);
+}
+#endif
+#endif /* ALTQ */
+
+int
+pf_begin_rules(u_int32_t *ticket, int rs_num, const char *anchor)
+{
+ struct pf_ruleset *rs;
+ struct pf_rule *rule;
+
+ if (rs_num < 0 || rs_num >= PF_RULESET_MAX)
+ return (EINVAL);
+ rs = pf_find_or_create_ruleset(anchor);
+ if (rs == NULL)
+ return (EINVAL);
+ while ((rule = TAILQ_FIRST(rs->rules[rs_num].inactive.ptr)) != NULL) {
+ pf_rm_rule(rs->rules[rs_num].inactive.ptr, rule);
+ rs->rules[rs_num].inactive.rcount--;
+ }
+ *ticket = ++rs->rules[rs_num].inactive.ticket;
+ rs->rules[rs_num].inactive.open = 1;
+ return (0);
+}
+
+int
+pf_rollback_rules(u_int32_t ticket, int rs_num, char *anchor)
+{
+ struct pf_ruleset *rs;
+ struct pf_rule *rule;
+
+ if (rs_num < 0 || rs_num >= PF_RULESET_MAX)
+ return (EINVAL);
+ rs = pf_find_ruleset(anchor);
+ if (rs == NULL || !rs->rules[rs_num].inactive.open ||
+ rs->rules[rs_num].inactive.ticket != ticket)
+ return (0);
+ while ((rule = TAILQ_FIRST(rs->rules[rs_num].inactive.ptr)) != NULL) {
+ pf_rm_rule(rs->rules[rs_num].inactive.ptr, rule);
+ rs->rules[rs_num].inactive.rcount--;
+ }
+ rs->rules[rs_num].inactive.open = 0;
+ return (0);
+}
+
+#define PF_MD5_UPD(st, elm) \
+ MD5Update(ctx, (u_int8_t *) &(st)->elm, sizeof((st)->elm))
+
+#define PF_MD5_UPD_STR(st, elm) \
+ MD5Update(ctx, (u_int8_t *) (st)->elm, strlen((st)->elm))
+
+#define PF_MD5_UPD_HTONL(st, elm, stor) do { \
+ (stor) = htonl((st)->elm); \
+ MD5Update(ctx, (u_int8_t *) &(stor), sizeof(u_int32_t));\
+} while (0)
+
+#define PF_MD5_UPD_HTONS(st, elm, stor) do { \
+ (stor) = htons((st)->elm); \
+ MD5Update(ctx, (u_int8_t *) &(stor), sizeof(u_int16_t));\
+} while (0)
+
+void
+pf_hash_rule_addr(MD5_CTX *ctx, struct pf_rule_addr *pfr)
+{
+ PF_MD5_UPD(pfr, addr.type);
+ switch (pfr->addr.type) {
+ case PF_ADDR_DYNIFTL:
+ PF_MD5_UPD(pfr, addr.v.ifname);
+ PF_MD5_UPD(pfr, addr.iflags);
+ break;
+ case PF_ADDR_TABLE:
+ PF_MD5_UPD(pfr, addr.v.tblname);
+ break;
+ case PF_ADDR_ADDRMASK:
+ /* XXX ignore af? */
+ PF_MD5_UPD(pfr, addr.v.a.addr.addr32);
+ PF_MD5_UPD(pfr, addr.v.a.mask.addr32);
+ break;
+ case PF_ADDR_RTLABEL:
+ PF_MD5_UPD(pfr, addr.v.rtlabelname);
+ break;
+ }
+
+ PF_MD5_UPD(pfr, port[0]);
+ PF_MD5_UPD(pfr, port[1]);
+ PF_MD5_UPD(pfr, neg);
+ PF_MD5_UPD(pfr, port_op);
+}
+
+void
+pf_hash_rule(MD5_CTX *ctx, struct pf_rule *rule)
+{
+ u_int16_t x;
+ u_int32_t y;
+
+ pf_hash_rule_addr(ctx, &rule->src);
+ pf_hash_rule_addr(ctx, &rule->dst);
+ PF_MD5_UPD_STR(rule, label);
+ PF_MD5_UPD_STR(rule, ifname);
+ PF_MD5_UPD_STR(rule, match_tagname);
+ PF_MD5_UPD_HTONS(rule, match_tag, x); /* dup? */
+ PF_MD5_UPD_HTONL(rule, os_fingerprint, y);
+ PF_MD5_UPD_HTONL(rule, prob, y);
+ PF_MD5_UPD_HTONL(rule, uid.uid[0], y);
+ PF_MD5_UPD_HTONL(rule, uid.uid[1], y);
+ PF_MD5_UPD(rule, uid.op);
+ PF_MD5_UPD_HTONL(rule, gid.gid[0], y);
+ PF_MD5_UPD_HTONL(rule, gid.gid[1], y);
+ PF_MD5_UPD(rule, gid.op);
+ PF_MD5_UPD_HTONL(rule, rule_flag, y);
+ PF_MD5_UPD(rule, action);
+ PF_MD5_UPD(rule, direction);
+ PF_MD5_UPD(rule, af);
+ PF_MD5_UPD(rule, quick);
+ PF_MD5_UPD(rule, ifnot);
+ PF_MD5_UPD(rule, match_tag_not);
+ PF_MD5_UPD(rule, natpass);
+ PF_MD5_UPD(rule, keep_state);
+ PF_MD5_UPD(rule, proto);
+ PF_MD5_UPD(rule, type);
+ PF_MD5_UPD(rule, code);
+ PF_MD5_UPD(rule, flags);
+ PF_MD5_UPD(rule, flagset);
+ PF_MD5_UPD(rule, allow_opts);
+ PF_MD5_UPD(rule, rt);
+ PF_MD5_UPD(rule, tos);
+}
+
+int
+pf_commit_rules(u_int32_t ticket, int rs_num, char *anchor)
+{
+ struct pf_ruleset *rs;
+ struct pf_rule *rule, **old_array;
+ struct pf_rulequeue *old_rules;
+ int s, error;
+ u_int32_t old_rcount;
+
+ if (rs_num < 0 || rs_num >= PF_RULESET_MAX)
+ return (EINVAL);
+ rs = pf_find_ruleset(anchor);
+ if (rs == NULL || !rs->rules[rs_num].inactive.open ||
+ ticket != rs->rules[rs_num].inactive.ticket)
+ return (EBUSY);
+
+ /* Calculate checksum for the main ruleset */
+ if (rs == &pf_main_ruleset) {
+ error = pf_setup_pfsync_matching(rs);
+ if (error != 0)
+ return (error);
+ }
+
+ /* Swap rules, keep the old. */
+ s = splsoftnet();
+ old_rules = rs->rules[rs_num].active.ptr;
+ old_rcount = rs->rules[rs_num].active.rcount;
+ old_array = rs->rules[rs_num].active.ptr_array;
+
+ rs->rules[rs_num].active.ptr =
+ rs->rules[rs_num].inactive.ptr;
+ rs->rules[rs_num].active.ptr_array =
+ rs->rules[rs_num].inactive.ptr_array;
+ rs->rules[rs_num].active.rcount =
+ rs->rules[rs_num].inactive.rcount;
+ rs->rules[rs_num].inactive.ptr = old_rules;
+ rs->rules[rs_num].inactive.ptr_array = old_array;
+ rs->rules[rs_num].inactive.rcount = old_rcount;
+
+ rs->rules[rs_num].active.ticket =
+ rs->rules[rs_num].inactive.ticket;
+ pf_calc_skip_steps(rs->rules[rs_num].active.ptr);
+
+
+ /* Purge the old rule list. */
+ while ((rule = TAILQ_FIRST(old_rules)) != NULL)
+ pf_rm_rule(old_rules, rule);
+ if (rs->rules[rs_num].inactive.ptr_array)
+ free(rs->rules[rs_num].inactive.ptr_array, M_TEMP);
+ rs->rules[rs_num].inactive.ptr_array = NULL;
+ rs->rules[rs_num].inactive.rcount = 0;
+ rs->rules[rs_num].inactive.open = 0;
+ pf_remove_if_empty_ruleset(rs);
+ splx(s);
+ return (0);
+}
+
+int
+pf_setup_pfsync_matching(struct pf_ruleset *rs)
+{
+ MD5_CTX ctx;
+ struct pf_rule *rule;
+ int rs_cnt;
+ u_int8_t digest[PF_MD5_DIGEST_LENGTH];
+
+ MD5Init(&ctx);
+ for (rs_cnt = 0; rs_cnt < PF_RULESET_MAX; rs_cnt++) {
+ /* XXX PF_RULESET_SCRUB as well? */
+ if (rs_cnt == PF_RULESET_SCRUB)
+ continue;
+
+ if (rs->rules[rs_cnt].inactive.ptr_array)
+ free(rs->rules[rs_cnt].inactive.ptr_array, M_TEMP);
+ rs->rules[rs_cnt].inactive.ptr_array = NULL;
+
+ if (rs->rules[rs_cnt].inactive.rcount) {
+ rs->rules[rs_cnt].inactive.ptr_array =
+ malloc(sizeof(caddr_t) *
+ rs->rules[rs_cnt].inactive.rcount,
+ M_TEMP, M_NOWAIT);
+
+ if (!rs->rules[rs_cnt].inactive.ptr_array)
+ return (ENOMEM);
+ }
+
+ TAILQ_FOREACH(rule, rs->rules[rs_cnt].inactive.ptr,
+ entries) {
+ pf_hash_rule(&ctx, rule);
+ (rs->rules[rs_cnt].inactive.ptr_array)[rule->nr] = rule;
+ }
+ }
+
+ MD5Final(digest, &ctx);
+ memcpy(pf_status.pf_chksum, digest, sizeof(pf_status.pf_chksum));
+ return (0);
+}
+
+int
+#ifdef __FreeBSD__
+pfioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flags, struct thread *td)
+#else
+pfioctl(dev_t dev, u_long cmd, caddr_t addr, int flags, struct proc *p)
+#endif
+{
+ struct pf_pooladdr *pa = NULL;
+ struct pf_pool *pool = NULL;
+#ifndef __FreeBSD__
+ int s;
+#endif
+ int error = 0;
+
+ /* XXX keep in sync with switch() below */
+#ifdef __FreeBSD__
+ if (securelevel_gt(td->td_ucred, 2))
+#else
+ if (securelevel > 1)
+#endif
+ switch (cmd) {
+ case DIOCGETRULES:
+ case DIOCGETRULE:
+ case DIOCGETADDRS:
+ case DIOCGETADDR:
+ case DIOCGETSTATE:
+ case DIOCSETSTATUSIF:
+ case DIOCGETSTATUS:
+ case DIOCCLRSTATUS:
+ case DIOCNATLOOK:
+ case DIOCSETDEBUG:
+ case DIOCGETSTATES:
+ case DIOCGETTIMEOUT:
+ case DIOCCLRRULECTRS:
+ case DIOCGETLIMIT:
+ case DIOCGETALTQS:
+ case DIOCGETALTQ:
+ case DIOCGETQSTATS:
+ case DIOCGETRULESETS:
+ case DIOCGETRULESET:
+ case DIOCRGETTABLES:
+ case DIOCRGETTSTATS:
+ case DIOCRCLRTSTATS:
+ case DIOCRCLRADDRS:
+ case DIOCRADDADDRS:
+ case DIOCRDELADDRS:
+ case DIOCRSETADDRS:
+ case DIOCRGETADDRS:
+ case DIOCRGETASTATS:
+ case DIOCRCLRASTATS:
+ case DIOCRTSTADDRS:
+ case DIOCOSFPGET:
+ case DIOCGETSRCNODES:
+ case DIOCCLRSRCNODES:
+ case DIOCIGETIFACES:
+#ifdef __FreeBSD__
+ case DIOCGIFSPEED:
+#endif
+ case DIOCSETIFFLAG:
+ case DIOCCLRIFFLAG:
+ break;
+ case DIOCRCLRTABLES:
+ case DIOCRADDTABLES:
+ case DIOCRDELTABLES:
+ case DIOCRSETTFLAGS:
+ if (((struct pfioc_table *)addr)->pfrio_flags &
+ PFR_FLAG_DUMMY)
+ break; /* dummy operation ok */
+ return (EPERM);
+ default:
+ return (EPERM);
+ }
+
+ if (!(flags & FWRITE))
+ switch (cmd) {
+ case DIOCGETRULES:
+ case DIOCGETADDRS:
+ case DIOCGETADDR:
+ case DIOCGETSTATE:
+ case DIOCGETSTATUS:
+ case DIOCGETSTATES:
+ case DIOCGETTIMEOUT:
+ case DIOCGETLIMIT:
+ case DIOCGETALTQS:
+ case DIOCGETALTQ:
+ case DIOCGETQSTATS:
+ case DIOCGETRULESETS:
+ case DIOCGETRULESET:
+ case DIOCNATLOOK:
+ case DIOCRGETTABLES:
+ case DIOCRGETTSTATS:
+ case DIOCRGETADDRS:
+ case DIOCRGETASTATS:
+ case DIOCRTSTADDRS:
+ case DIOCOSFPGET:
+ case DIOCGETSRCNODES:
+ case DIOCIGETIFACES:
+#ifdef __FreeBSD__
+ case DIOCGIFSPEED:
+#endif
+ break;
+ case DIOCRCLRTABLES:
+ case DIOCRADDTABLES:
+ case DIOCRDELTABLES:
+ case DIOCRCLRTSTATS:
+ case DIOCRCLRADDRS:
+ case DIOCRADDADDRS:
+ case DIOCRDELADDRS:
+ case DIOCRSETADDRS:
+ case DIOCRSETTFLAGS:
+ if (((struct pfioc_table *)addr)->pfrio_flags &
+ PFR_FLAG_DUMMY) {
+ flags |= FWRITE; /* need write lock for dummy */
+ break; /* dummy operation ok */
+ }
+ return (EACCES);
+ case DIOCGETRULE:
+ if (((struct pfioc_rule *)addr)->action == PF_GET_CLR_CNTR)
+ return (EACCES);
+ break;
+ default:
+ return (EACCES);
+ }
+
+ if (flags & FWRITE)
+#ifdef __FreeBSD__
+ sx_xlock(&pf_consistency_lock);
+ else
+ sx_slock(&pf_consistency_lock);
+#else
+ rw_enter_write(&pf_consistency_lock);
+ else
+ rw_enter_read(&pf_consistency_lock);
+#endif
+
+#ifdef __FreeBSD__
+ PF_LOCK();
+#else
+ s = splsoftnet();
+#endif
+ switch (cmd) {
+
+ case DIOCSTART:
+ if (pf_status.running)
+ error = EEXIST;
+ else {
+#ifdef __FreeBSD__
+ PF_UNLOCK();
+ error = hook_pf();
+ PF_LOCK();
+ if (error) {
+ DPFPRINTF(PF_DEBUG_MISC,
+ ("pf: pfil registeration fail\n"));
+ break;
+ }
+#endif
+ pf_status.running = 1;
+ pf_status.since = time_second;
+ if (pf_status.stateid == 0) {
+ pf_status.stateid = time_second;
+ pf_status.stateid = pf_status.stateid << 32;
+ }
+ DPFPRINTF(PF_DEBUG_MISC, ("pf: started\n"));
+ }
+ break;
+
+ case DIOCSTOP:
+ if (!pf_status.running)
+ error = ENOENT;
+ else {
+ pf_status.running = 0;
+#ifdef __FreeBSD__
+ PF_UNLOCK();
+ error = dehook_pf();
+ PF_LOCK();
+ if (error) {
+ pf_status.running = 1;
+ DPFPRINTF(PF_DEBUG_MISC,
+ ("pf: pfil unregisteration failed\n"));
+ }
+#endif
+ pf_status.since = time_second;
+ DPFPRINTF(PF_DEBUG_MISC, ("pf: stopped\n"));
+ }
+ break;
+
+ case DIOCADDRULE: {
+ struct pfioc_rule *pr = (struct pfioc_rule *)addr;
+ struct pf_ruleset *ruleset;
+ struct pf_rule *rule, *tail;
+ struct pf_pooladdr *pa;
+ int rs_num;
+
+ pr->anchor[sizeof(pr->anchor) - 1] = 0;
+ ruleset = pf_find_ruleset(pr->anchor);
+ if (ruleset == NULL) {
+ error = EINVAL;
+ break;
+ }
+ rs_num = pf_get_ruleset_number(pr->rule.action);
+ if (rs_num >= PF_RULESET_MAX) {
+ error = EINVAL;
+ break;
+ }
+ if (pr->rule.return_icmp >> 8 > ICMP_MAXTYPE) {
+ error = EINVAL;
+ break;
+ }
+ if (pr->ticket != ruleset->rules[rs_num].inactive.ticket) {
+#ifdef __FreeBSD__
+ DPFPRINTF(PF_DEBUG_MISC,
+ ("ticket: %d != [%d]%d\n", pr->ticket, rs_num,
+ ruleset->rules[rs_num].inactive.ticket));
+#endif
+ error = EBUSY;
+ break;
+ }
+ if (pr->pool_ticket != ticket_pabuf) {
+#ifdef __FreeBSD__
+ DPFPRINTF(PF_DEBUG_MISC,
+ ("pool_ticket: %d != %d\n", pr->pool_ticket,
+ ticket_pabuf));
+#endif
+ error = EBUSY;
+ break;
+ }
+ rule = pool_get(&pf_rule_pl, PR_NOWAIT);
+ if (rule == NULL) {
+ error = ENOMEM;
+ break;
+ }
+ bcopy(&pr->rule, rule, sizeof(struct pf_rule));
+#ifdef __FreeBSD__
+ rule->cuid = td->td_ucred->cr_ruid;
+ rule->cpid = td->td_proc ? td->td_proc->p_pid : 0;
+#else
+ rule->cuid = p->p_cred->p_ruid;
+ rule->cpid = p->p_pid;
+#endif
+ rule->anchor = NULL;
+ rule->kif = NULL;
+ TAILQ_INIT(&rule->rpool.list);
+ /* initialize refcounting */
+ rule->states = 0;
+ rule->src_nodes = 0;
+ rule->entries.tqe_prev = NULL;
+#ifndef INET
+ if (rule->af == AF_INET) {
+ pool_put(&pf_rule_pl, rule);
+ error = EAFNOSUPPORT;
+ break;
+ }
+#endif /* INET */
+#ifndef INET6
+ if (rule->af == AF_INET6) {
+ pool_put(&pf_rule_pl, rule);
+ error = EAFNOSUPPORT;
+ break;
+ }
+#endif /* INET6 */
+ tail = TAILQ_LAST(ruleset->rules[rs_num].inactive.ptr,
+ pf_rulequeue);
+ if (tail)
+ rule->nr = tail->nr + 1;
+ else
+ rule->nr = 0;
+ if (rule->ifname[0]) {
+ rule->kif = pfi_kif_get(rule->ifname);
+ if (rule->kif == NULL) {
+ pool_put(&pf_rule_pl, rule);
+ error = EINVAL;
+ break;
+ }
+ pfi_kif_ref(rule->kif, PFI_KIF_REF_RULE);
+ }
+
+#ifdef __FreeBSD__ /* ROUTING */
+ if (rule->rtableid > 0 && rule->rtableid > rt_numfibs)
+#else
+ if (rule->rtableid > 0 && !rtable_exists(rule->rtableid))
+#endif
+ error = EBUSY;
+
+#ifdef ALTQ
+ /* set queue IDs */
+ if (rule->qname[0] != 0) {
+ if ((rule->qid = pf_qname2qid(rule->qname)) == 0)
+ error = EBUSY;
+ else if (rule->pqname[0] != 0) {
+ if ((rule->pqid =
+ pf_qname2qid(rule->pqname)) == 0)
+ error = EBUSY;
+ } else
+ rule->pqid = rule->qid;
+ }
+#endif
+ if (rule->tagname[0])
+ if ((rule->tag = pf_tagname2tag(rule->tagname)) == 0)
+ error = EBUSY;
+ if (rule->match_tagname[0])
+ if ((rule->match_tag =
+ pf_tagname2tag(rule->match_tagname)) == 0)
+ error = EBUSY;
+ if (rule->rt && !rule->direction)
+ error = EINVAL;
+#if NPFLOG > 0
+#ifdef __FreeBSD__
+ if (!rule->log)
+ rule->logif = 0;
+#endif
+ if (rule->logif >= PFLOGIFS_MAX)
+ error = EINVAL;
+#endif
+ if (pf_rtlabel_add(&rule->src.addr) ||
+ pf_rtlabel_add(&rule->dst.addr))
+ error = EBUSY;
+ if (pfi_dynaddr_setup(&rule->src.addr, rule->af))
+ error = EINVAL;
+ if (pfi_dynaddr_setup(&rule->dst.addr, rule->af))
+ error = EINVAL;
+ if (pf_tbladdr_setup(ruleset, &rule->src.addr))
+ error = EINVAL;
+ if (pf_tbladdr_setup(ruleset, &rule->dst.addr))
+ error = EINVAL;
+ if (pf_anchor_setup(rule, ruleset, pr->anchor_call))
+ error = EINVAL;
+ TAILQ_FOREACH(pa, &pf_pabuf, entries)
+ if (pf_tbladdr_setup(ruleset, &pa->addr))
+ error = EINVAL;
+
+ if (rule->overload_tblname[0]) {
+ if ((rule->overload_tbl = pfr_attach_table(ruleset,
+ rule->overload_tblname)) == NULL)
+ error = EINVAL;
+ else
+ rule->overload_tbl->pfrkt_flags |=
+ PFR_TFLAG_ACTIVE;
+ }
+
+ pf_mv_pool(&pf_pabuf, &rule->rpool.list);
+ if (((((rule->action == PF_NAT) || (rule->action == PF_RDR) ||
+ (rule->action == PF_BINAT)) && rule->anchor == NULL) ||
+ (rule->rt > PF_FASTROUTE)) &&
+ (TAILQ_FIRST(&rule->rpool.list) == NULL))
+ error = EINVAL;
+
+ if (error) {
+ pf_rm_rule(NULL, rule);
+ break;
+ }
+
+#ifdef __FreeBSD__
+ if (!debug_pfugidhack && (rule->uid.op || rule->gid.op ||
+ rule->log & PF_LOG_SOCKET_LOOKUP)) {
+ DPFPRINTF(PF_DEBUG_MISC,
+ ("pf: debug.pfugidhack enabled\n"));
+ debug_pfugidhack = 1;
+ }
+#endif
+
+ rule->rpool.cur = TAILQ_FIRST(&rule->rpool.list);
+ rule->evaluations = rule->packets[0] = rule->packets[1] =
+ rule->bytes[0] = rule->bytes[1] = 0;
+ TAILQ_INSERT_TAIL(ruleset->rules[rs_num].inactive.ptr,
+ rule, entries);
+ ruleset->rules[rs_num].inactive.rcount++;
+ break;
+ }
+
+ case DIOCGETRULES: {
+ struct pfioc_rule *pr = (struct pfioc_rule *)addr;
+ struct pf_ruleset *ruleset;
+ struct pf_rule *tail;
+ int rs_num;
+
+ pr->anchor[sizeof(pr->anchor) - 1] = 0;
+ ruleset = pf_find_ruleset(pr->anchor);
+ if (ruleset == NULL) {
+ error = EINVAL;
+ break;
+ }
+ rs_num = pf_get_ruleset_number(pr->rule.action);
+ if (rs_num >= PF_RULESET_MAX) {
+ error = EINVAL;
+ break;
+ }
+ tail = TAILQ_LAST(ruleset->rules[rs_num].active.ptr,
+ pf_rulequeue);
+ if (tail)
+ pr->nr = tail->nr + 1;
+ else
+ pr->nr = 0;
+ pr->ticket = ruleset->rules[rs_num].active.ticket;
+ break;
+ }
+
+ case DIOCGETRULE: {
+ struct pfioc_rule *pr = (struct pfioc_rule *)addr;
+ struct pf_ruleset *ruleset;
+ struct pf_rule *rule;
+ int rs_num, i;
+
+ pr->anchor[sizeof(pr->anchor) - 1] = 0;
+ ruleset = pf_find_ruleset(pr->anchor);
+ if (ruleset == NULL) {
+ error = EINVAL;
+ break;
+ }
+ rs_num = pf_get_ruleset_number(pr->rule.action);
+ if (rs_num >= PF_RULESET_MAX) {
+ error = EINVAL;
+ break;
+ }
+ if (pr->ticket != ruleset->rules[rs_num].active.ticket) {
+ error = EBUSY;
+ break;
+ }
+ rule = TAILQ_FIRST(ruleset->rules[rs_num].active.ptr);
+ while ((rule != NULL) && (rule->nr != pr->nr))
+ rule = TAILQ_NEXT(rule, entries);
+ if (rule == NULL) {
+ error = EBUSY;
+ break;
+ }
+ bcopy(rule, &pr->rule, sizeof(struct pf_rule));
+ if (pf_anchor_copyout(ruleset, rule, pr)) {
+ error = EBUSY;
+ break;
+ }
+ pfi_dynaddr_copyout(&pr->rule.src.addr);
+ pfi_dynaddr_copyout(&pr->rule.dst.addr);
+ pf_tbladdr_copyout(&pr->rule.src.addr);
+ pf_tbladdr_copyout(&pr->rule.dst.addr);
+ pf_rtlabel_copyout(&pr->rule.src.addr);
+ pf_rtlabel_copyout(&pr->rule.dst.addr);
+ for (i = 0; i < PF_SKIP_COUNT; ++i)
+ if (rule->skip[i].ptr == NULL)
+ pr->rule.skip[i].nr = -1;
+ else
+ pr->rule.skip[i].nr =
+ rule->skip[i].ptr->nr;
+
+ if (pr->action == PF_GET_CLR_CNTR) {
+ rule->evaluations = 0;
+ rule->packets[0] = rule->packets[1] = 0;
+ rule->bytes[0] = rule->bytes[1] = 0;
+ }
+ break;
+ }
+
+ case DIOCCHANGERULE: {
+ struct pfioc_rule *pcr = (struct pfioc_rule *)addr;
+ struct pf_ruleset *ruleset;
+ struct pf_rule *oldrule = NULL, *newrule = NULL;
+ u_int32_t nr = 0;
+ int rs_num;
+
+ if (!(pcr->action == PF_CHANGE_REMOVE ||
+ pcr->action == PF_CHANGE_GET_TICKET) &&
+ pcr->pool_ticket != ticket_pabuf) {
+ error = EBUSY;
+ break;
+ }
+
+ if (pcr->action < PF_CHANGE_ADD_HEAD ||
+ pcr->action > PF_CHANGE_GET_TICKET) {
+ error = EINVAL;
+ break;
+ }
+ ruleset = pf_find_ruleset(pcr->anchor);
+ if (ruleset == NULL) {
+ error = EINVAL;
+ break;
+ }
+ rs_num = pf_get_ruleset_number(pcr->rule.action);
+ if (rs_num >= PF_RULESET_MAX) {
+ error = EINVAL;
+ break;
+ }
+
+ if (pcr->action == PF_CHANGE_GET_TICKET) {
+ pcr->ticket = ++ruleset->rules[rs_num].active.ticket;
+ break;
+ } else {
+ if (pcr->ticket !=
+ ruleset->rules[rs_num].active.ticket) {
+ error = EINVAL;
+ break;
+ }
+ if (pcr->rule.return_icmp >> 8 > ICMP_MAXTYPE) {
+ error = EINVAL;
+ break;
+ }
+ }
+
+ if (pcr->action != PF_CHANGE_REMOVE) {
+ newrule = pool_get(&pf_rule_pl, PR_NOWAIT);
+ if (newrule == NULL) {
+ error = ENOMEM;
+ break;
+ }
+ bcopy(&pcr->rule, newrule, sizeof(struct pf_rule));
+#ifdef __FreeBSD__
+ newrule->cuid = td->td_ucred->cr_ruid;
+ newrule->cpid = td->td_proc ? td->td_proc->p_pid : 0;
+#else
+ newrule->cuid = p->p_cred->p_ruid;
+ newrule->cpid = p->p_pid;
+#endif
+ TAILQ_INIT(&newrule->rpool.list);
+ /* initialize refcounting */
+ newrule->states = 0;
+ newrule->entries.tqe_prev = NULL;
+#ifndef INET
+ if (newrule->af == AF_INET) {
+ pool_put(&pf_rule_pl, newrule);
+ error = EAFNOSUPPORT;
+ break;
+ }
+#endif /* INET */
+#ifndef INET6
+ if (newrule->af == AF_INET6) {
+ pool_put(&pf_rule_pl, newrule);
+ error = EAFNOSUPPORT;
+ break;
+ }
+#endif /* INET6 */
+ if (newrule->ifname[0]) {
+ newrule->kif = pfi_kif_get(newrule->ifname);
+ if (newrule->kif == NULL) {
+ pool_put(&pf_rule_pl, newrule);
+ error = EINVAL;
+ break;
+ }
+ pfi_kif_ref(newrule->kif, PFI_KIF_REF_RULE);
+ } else
+ newrule->kif = NULL;
+
+ if (newrule->rtableid > 0 &&
+#ifdef __FreeBSD__ /* ROUTING */
+ newrule->rtableid > rt_numfibs)
+#else
+ !rtable_exists(newrule->rtableid))
+#endif
+ error = EBUSY;
+
+#ifdef ALTQ
+ /* set queue IDs */
+ if (newrule->qname[0] != 0) {
+ if ((newrule->qid =
+ pf_qname2qid(newrule->qname)) == 0)
+ error = EBUSY;
+ else if (newrule->pqname[0] != 0) {
+ if ((newrule->pqid =
+ pf_qname2qid(newrule->pqname)) == 0)
+ error = EBUSY;
+ } else
+ newrule->pqid = newrule->qid;
+ }
+#endif /* ALTQ */
+ if (newrule->tagname[0])
+ if ((newrule->tag =
+ pf_tagname2tag(newrule->tagname)) == 0)
+ error = EBUSY;
+ if (newrule->match_tagname[0])
+ if ((newrule->match_tag = pf_tagname2tag(
+ newrule->match_tagname)) == 0)
+ error = EBUSY;
+ if (newrule->rt && !newrule->direction)
+ error = EINVAL;
+#ifdef __FreeBSD__
+#if NPFLOG > 0
+ if (!newrule->log)
+ newrule->logif = 0;
+ if (newrule->logif >= PFLOGIFS_MAX)
+ error = EINVAL;
+#endif
+#endif
+ if (pf_rtlabel_add(&newrule->src.addr) ||
+ pf_rtlabel_add(&newrule->dst.addr))
+ error = EBUSY;
+ if (pfi_dynaddr_setup(&newrule->src.addr, newrule->af))
+ error = EINVAL;
+ if (pfi_dynaddr_setup(&newrule->dst.addr, newrule->af))
+ error = EINVAL;
+ if (pf_tbladdr_setup(ruleset, &newrule->src.addr))
+ error = EINVAL;
+ if (pf_tbladdr_setup(ruleset, &newrule->dst.addr))
+ error = EINVAL;
+ if (pf_anchor_setup(newrule, ruleset, pcr->anchor_call))
+ error = EINVAL;
+ TAILQ_FOREACH(pa, &pf_pabuf, entries)
+ if (pf_tbladdr_setup(ruleset, &pa->addr))
+ error = EINVAL;
+
+ if (newrule->overload_tblname[0]) {
+ if ((newrule->overload_tbl = pfr_attach_table(
+ ruleset, newrule->overload_tblname)) ==
+ NULL)
+ error = EINVAL;
+ else
+ newrule->overload_tbl->pfrkt_flags |=
+ PFR_TFLAG_ACTIVE;
+ }
+
+ pf_mv_pool(&pf_pabuf, &newrule->rpool.list);
+ if (((((newrule->action == PF_NAT) ||
+ (newrule->action == PF_RDR) ||
+ (newrule->action == PF_BINAT) ||
+ (newrule->rt > PF_FASTROUTE)) &&
+ !newrule->anchor)) &&
+ (TAILQ_FIRST(&newrule->rpool.list) == NULL))
+ error = EINVAL;
+
+ if (error) {
+ pf_rm_rule(NULL, newrule);
+ break;
+ }
+
+#ifdef __FreeBSD__
+ if (!debug_pfugidhack && (newrule->uid.op ||
+ newrule->gid.op ||
+ newrule->log & PF_LOG_SOCKET_LOOKUP)) {
+ DPFPRINTF(PF_DEBUG_MISC,
+ ("pf: debug.pfugidhack enabled\n"));
+ debug_pfugidhack = 1;
+ }
+#endif
+
+ newrule->rpool.cur = TAILQ_FIRST(&newrule->rpool.list);
+ newrule->evaluations = 0;
+ newrule->packets[0] = newrule->packets[1] = 0;
+ newrule->bytes[0] = newrule->bytes[1] = 0;
+ }
+ pf_empty_pool(&pf_pabuf);
+
+ if (pcr->action == PF_CHANGE_ADD_HEAD)
+ oldrule = TAILQ_FIRST(
+ ruleset->rules[rs_num].active.ptr);
+ else if (pcr->action == PF_CHANGE_ADD_TAIL)
+ oldrule = TAILQ_LAST(
+ ruleset->rules[rs_num].active.ptr, pf_rulequeue);
+ else {
+ oldrule = TAILQ_FIRST(
+ ruleset->rules[rs_num].active.ptr);
+ while ((oldrule != NULL) && (oldrule->nr != pcr->nr))
+ oldrule = TAILQ_NEXT(oldrule, entries);
+ if (oldrule == NULL) {
+ if (newrule != NULL)
+ pf_rm_rule(NULL, newrule);
+ error = EINVAL;
+ break;
+ }
+ }
+
+ if (pcr->action == PF_CHANGE_REMOVE) {
+ pf_rm_rule(ruleset->rules[rs_num].active.ptr, oldrule);
+ ruleset->rules[rs_num].active.rcount--;
+ } else {
+ if (oldrule == NULL)
+ TAILQ_INSERT_TAIL(
+ ruleset->rules[rs_num].active.ptr,
+ newrule, entries);
+ else if (pcr->action == PF_CHANGE_ADD_HEAD ||
+ pcr->action == PF_CHANGE_ADD_BEFORE)
+ TAILQ_INSERT_BEFORE(oldrule, newrule, entries);
+ else
+ TAILQ_INSERT_AFTER(
+ ruleset->rules[rs_num].active.ptr,
+ oldrule, newrule, entries);
+ ruleset->rules[rs_num].active.rcount++;
+ }
+
+ nr = 0;
+ TAILQ_FOREACH(oldrule,
+ ruleset->rules[rs_num].active.ptr, entries)
+ oldrule->nr = nr++;
+
+ ruleset->rules[rs_num].active.ticket++;
+
+ pf_calc_skip_steps(ruleset->rules[rs_num].active.ptr);
+ pf_remove_if_empty_ruleset(ruleset);
+
+ break;
+ }
+
+ case DIOCCLRSTATES: {
+ struct pf_state *state, *nexts;
+ struct pfioc_state_kill *psk = (struct pfioc_state_kill *)addr;
+ int killed = 0;
+
+ for (state = RB_MIN(pf_state_tree_id, &tree_id); state;
+ state = nexts) {
+ nexts = RB_NEXT(pf_state_tree_id, &tree_id, state);
+
+ if (!psk->psk_ifname[0] || !strcmp(psk->psk_ifname,
+ state->u.s.kif->pfik_name)) {
+#if NPFSYNC
+ /* don't send out individual delete messages */
+ state->sync_flags = PFSTATE_NOSYNC;
+#endif
+ pf_unlink_state(state);
+ killed++;
+ }
+ }
+ psk->psk_af = killed;
+#if NPFSYNC
+ pfsync_clear_states(pf_status.hostid, psk->psk_ifname);
+#endif
+ break;
+ }
+
+ case DIOCKILLSTATES: {
+ struct pf_state *state, *nexts;
+ struct pf_state_host *src, *dst;
+ struct pfioc_state_kill *psk = (struct pfioc_state_kill *)addr;
+ int killed = 0;
+
+ for (state = RB_MIN(pf_state_tree_id, &tree_id); state;
+ state = nexts) {
+ nexts = RB_NEXT(pf_state_tree_id, &tree_id, state);
+
+ if (state->direction == PF_OUT) {
+ src = &state->lan;
+ dst = &state->ext;
+ } else {
+ src = &state->ext;
+ dst = &state->lan;
+ }
+ if ((!psk->psk_af || state->af == psk->psk_af)
+ && (!psk->psk_proto || psk->psk_proto ==
+ state->proto) &&
+ PF_MATCHA(psk->psk_src.neg,
+ &psk->psk_src.addr.v.a.addr,
+ &psk->psk_src.addr.v.a.mask,
+ &src->addr, state->af) &&
+ PF_MATCHA(psk->psk_dst.neg,
+ &psk->psk_dst.addr.v.a.addr,
+ &psk->psk_dst.addr.v.a.mask,
+ &dst->addr, state->af) &&
+ (psk->psk_src.port_op == 0 ||
+ pf_match_port(psk->psk_src.port_op,
+ psk->psk_src.port[0], psk->psk_src.port[1],
+ src->port)) &&
+ (psk->psk_dst.port_op == 0 ||
+ pf_match_port(psk->psk_dst.port_op,
+ psk->psk_dst.port[0], psk->psk_dst.port[1],
+ dst->port)) &&
+ (!psk->psk_ifname[0] || !strcmp(psk->psk_ifname,
+ state->u.s.kif->pfik_name))) {
+#if NPFSYNC > 0
+ /* send immediate delete of state */
+ pfsync_delete_state(state);
+ state->sync_flags |= PFSTATE_NOSYNC;
+#endif
+ pf_unlink_state(state);
+ killed++;
+ }
+ }
+ psk->psk_af = killed;
+ break;
+ }
+
+ case DIOCADDSTATE: {
+ struct pfioc_state *ps = (struct pfioc_state *)addr;
+ struct pf_state *state;
+ struct pfi_kif *kif;
+
+ if (ps->state.timeout >= PFTM_MAX &&
+ ps->state.timeout != PFTM_UNTIL_PACKET) {
+ error = EINVAL;
+ break;
+ }
+ state = pool_get(&pf_state_pl, PR_NOWAIT);
+ if (state == NULL) {
+ error = ENOMEM;
+ break;
+ }
+ kif = pfi_kif_get(ps->state.u.ifname);
+ if (kif == NULL) {
+ pool_put(&pf_state_pl, state);
+ error = ENOENT;
+ break;
+ }
+ bcopy(&ps->state, state, sizeof(struct pf_state));
+ bzero(&state->u, sizeof(state->u));
+ state->rule.ptr = &pf_default_rule;
+ state->nat_rule.ptr = NULL;
+ state->anchor.ptr = NULL;
+ state->rt_kif = NULL;
+ state->creation = time_second;
+ state->pfsync_time = 0;
+ state->packets[0] = state->packets[1] = 0;
+ state->bytes[0] = state->bytes[1] = 0;
+
+ if (pf_insert_state(kif, state)) {
+ pfi_kif_unref(kif, PFI_KIF_REF_NONE);
+ pool_put(&pf_state_pl, state);
+ error = ENOMEM;
+ }
+ break;
+ }
+
+ case DIOCGETSTATE: {
+ struct pfioc_state *ps = (struct pfioc_state *)addr;
+ struct pf_state *state;
+ u_int32_t nr;
+ int secs;
+
+ nr = 0;
+ RB_FOREACH(state, pf_state_tree_id, &tree_id) {
+ if (nr >= ps->nr)
+ break;
+ nr++;
+ }
+ if (state == NULL) {
+ error = EBUSY;
+ break;
+ }
+ secs = time_second;
+ bcopy(state, &ps->state, sizeof(ps->state));
+ strlcpy(ps->state.u.ifname, state->u.s.kif->pfik_name,
+ sizeof(ps->state.u.ifname));
+ ps->state.rule.nr = state->rule.ptr->nr;
+ ps->state.nat_rule.nr = (state->nat_rule.ptr == NULL) ?
+ -1 : state->nat_rule.ptr->nr;
+ ps->state.anchor.nr = (state->anchor.ptr == NULL) ?
+ -1 : state->anchor.ptr->nr;
+ ps->state.creation = secs - ps->state.creation;
+ ps->state.expire = pf_state_expires(state);
+ if (ps->state.expire > secs)
+ ps->state.expire -= secs;
+ else
+ ps->state.expire = 0;
+ break;
+ }
+
+ case DIOCGETSTATES: {
+ struct pfioc_states *ps = (struct pfioc_states *)addr;
+ struct pf_state *state;
+ struct pf_state *p, *pstore;
+ u_int32_t nr = 0;
+ int space = ps->ps_len;
+
+ if (space == 0) {
+ nr = pf_status.states;
+ ps->ps_len = sizeof(struct pf_state) * nr;
+ break;
+ }
+
+#ifdef __FreeBSD__
+ PF_UNLOCK();
+#endif
+ pstore = malloc(sizeof(*pstore), M_TEMP, M_WAITOK);
+#ifdef __FreeBSD__
+ PF_LOCK();
+#endif
+
+ p = ps->ps_states;
+
+ state = TAILQ_FIRST(&state_list);
+ while (state) {
+ if (state->timeout != PFTM_UNLINKED) {
+ int secs = time_second;
+
+ if ((nr+1) * sizeof(*p) > (unsigned)ps->ps_len)
+ break;
+
+ bcopy(state, pstore, sizeof(*pstore));
+ strlcpy(pstore->u.ifname,
+ state->u.s.kif->pfik_name,
+ sizeof(pstore->u.ifname));
+ pstore->rule.nr = state->rule.ptr->nr;
+ pstore->nat_rule.nr = (state->nat_rule.ptr ==
+ NULL) ? -1 : state->nat_rule.ptr->nr;
+ pstore->anchor.nr = (state->anchor.ptr ==
+ NULL) ? -1 : state->anchor.ptr->nr;
+ pstore->creation = secs - pstore->creation;
+ pstore->expire = pf_state_expires(state);
+ if (pstore->expire > secs)
+ pstore->expire -= secs;
+ else
+ pstore->expire = 0;
+#ifdef __FreeBSD__
+ PF_COPYOUT(pstore, p, sizeof(*p), error);
+#else
+ error = copyout(pstore, p, sizeof(*p));
+#endif
+ if (error) {
+ free(pstore, M_TEMP);
+ goto fail;
+ }
+ p++;
+ nr++;
+ }
+ state = TAILQ_NEXT(state, u.s.entry_list);
+ }
+
+ ps->ps_len = sizeof(struct pf_state) * nr;
+
+ free(pstore, M_TEMP);
+ break;
+ }
+
+ case DIOCGETSTATUS: {
+ struct pf_status *s = (struct pf_status *)addr;
+ bcopy(&pf_status, s, sizeof(struct pf_status));
+ pfi_fill_oldstatus(s);
+ break;
+ }
+
+ case DIOCSETSTATUSIF: {
+ struct pfioc_if *pi = (struct pfioc_if *)addr;
+
+ if (pi->ifname[0] == 0) {
+ bzero(pf_status.ifname, IFNAMSIZ);
+ break;
+ }
+ if (ifunit(pi->ifname) == NULL) {
+ error = EINVAL;
+ break;
+ }
+ strlcpy(pf_status.ifname, pi->ifname, IFNAMSIZ);
+ break;
+ }
+
+ case DIOCCLRSTATUS: {
+ bzero(pf_status.counters, sizeof(pf_status.counters));
+ bzero(pf_status.fcounters, sizeof(pf_status.fcounters));
+ bzero(pf_status.scounters, sizeof(pf_status.scounters));
+ pf_status.since = time_second;
+ if (*pf_status.ifname)
+ pfi_clr_istats(pf_status.ifname);
+ break;
+ }
+
+ case DIOCNATLOOK: {
+ struct pfioc_natlook *pnl = (struct pfioc_natlook *)addr;
+ struct pf_state *state;
+ struct pf_state_cmp key;
+ int m = 0, direction = pnl->direction;
+
+ key.af = pnl->af;
+ key.proto = pnl->proto;
+
+ if (!pnl->proto ||
+ PF_AZERO(&pnl->saddr, pnl->af) ||
+ PF_AZERO(&pnl->daddr, pnl->af) ||
+ ((pnl->proto == IPPROTO_TCP ||
+ pnl->proto == IPPROTO_UDP) &&
+ (!pnl->dport || !pnl->sport)))
+ error = EINVAL;
+ else {
+ /*
+ * userland gives us source and dest of connection,
+ * reverse the lookup so we ask for what happens with
+ * the return traffic, enabling us to find it in the
+ * state tree.
+ */
+ if (direction == PF_IN) {
+ PF_ACPY(&key.ext.addr, &pnl->daddr, pnl->af);
+ key.ext.port = pnl->dport;
+ PF_ACPY(&key.gwy.addr, &pnl->saddr, pnl->af);
+ key.gwy.port = pnl->sport;
+ state = pf_find_state_all(&key, PF_EXT_GWY, &m);
+ } else {
+ PF_ACPY(&key.lan.addr, &pnl->daddr, pnl->af);
+ key.lan.port = pnl->dport;
+ PF_ACPY(&key.ext.addr, &pnl->saddr, pnl->af);
+ key.ext.port = pnl->sport;
+ state = pf_find_state_all(&key, PF_LAN_EXT, &m);
+ }
+ if (m > 1)
+ error = E2BIG; /* more than one state */
+ else if (state != NULL) {
+ if (direction == PF_IN) {
+ PF_ACPY(&pnl->rsaddr, &state->lan.addr,
+ state->af);
+ pnl->rsport = state->lan.port;
+ PF_ACPY(&pnl->rdaddr, &pnl->daddr,
+ pnl->af);
+ pnl->rdport = pnl->dport;
+ } else {
+ PF_ACPY(&pnl->rdaddr, &state->gwy.addr,
+ state->af);
+ pnl->rdport = state->gwy.port;
+ PF_ACPY(&pnl->rsaddr, &pnl->saddr,
+ pnl->af);
+ pnl->rsport = pnl->sport;
+ }
+ } else
+ error = ENOENT;
+ }
+ break;
+ }
+
+ case DIOCSETTIMEOUT: {
+ struct pfioc_tm *pt = (struct pfioc_tm *)addr;
+ int old;
+
+ if (pt->timeout < 0 || pt->timeout >= PFTM_MAX ||
+ pt->seconds < 0) {
+ error = EINVAL;
+ goto fail;
+ }
+ old = pf_default_rule.timeout[pt->timeout];
+ if (pt->timeout == PFTM_INTERVAL && pt->seconds == 0)
+ pt->seconds = 1;
+ pf_default_rule.timeout[pt->timeout] = pt->seconds;
+ if (pt->timeout == PFTM_INTERVAL && pt->seconds < old)
+ wakeup(pf_purge_thread);
+ pt->seconds = old;
+ break;
+ }
+
+ case DIOCGETTIMEOUT: {
+ struct pfioc_tm *pt = (struct pfioc_tm *)addr;
+
+ if (pt->timeout < 0 || pt->timeout >= PFTM_MAX) {
+ error = EINVAL;
+ goto fail;
+ }
+ pt->seconds = pf_default_rule.timeout[pt->timeout];
+ break;
+ }
+
+ case DIOCGETLIMIT: {
+ struct pfioc_limit *pl = (struct pfioc_limit *)addr;
+
+ if (pl->index < 0 || pl->index >= PF_LIMIT_MAX) {
+ error = EINVAL;
+ goto fail;
+ }
+ pl->limit = pf_pool_limits[pl->index].limit;
+ break;
+ }
+
+ case DIOCSETLIMIT: {
+ struct pfioc_limit *pl = (struct pfioc_limit *)addr;
+ int old_limit;
+
+ if (pl->index < 0 || pl->index >= PF_LIMIT_MAX ||
+ pf_pool_limits[pl->index].pp == NULL) {
+ error = EINVAL;
+ goto fail;
+ }
+#ifdef __FreeBSD__
+ uma_zone_set_max(pf_pool_limits[pl->index].pp, pl->limit);
+#else
+ if (pool_sethardlimit(pf_pool_limits[pl->index].pp,
+ pl->limit, NULL, 0) != 0) {
+ error = EBUSY;
+ goto fail;
+ }
+#endif
+ old_limit = pf_pool_limits[pl->index].limit;
+ pf_pool_limits[pl->index].limit = pl->limit;
+ pl->limit = old_limit;
+ break;
+ }
+
+ case DIOCSETDEBUG: {
+ u_int32_t *level = (u_int32_t *)addr;
+
+ pf_status.debug = *level;
+ break;
+ }
+
+ case DIOCCLRRULECTRS: {
+ /* obsoleted by DIOCGETRULE with action=PF_GET_CLR_CNTR */
+ struct pf_ruleset *ruleset = &pf_main_ruleset;
+ struct pf_rule *rule;
+
+ TAILQ_FOREACH(rule,
+ ruleset->rules[PF_RULESET_FILTER].active.ptr, entries) {
+ rule->evaluations = 0;
+ rule->packets[0] = rule->packets[1] = 0;
+ rule->bytes[0] = rule->bytes[1] = 0;
+ }
+ break;
+ }
+
+#ifdef __FreeBSD__
+ case DIOCGIFSPEED: {
+ struct pf_ifspeed *psp = (struct pf_ifspeed *)addr;
+ struct pf_ifspeed ps;
+ struct ifnet *ifp;
+
+ if (psp->ifname[0] != 0) {
+ /* Can we completely trust user-land? */
+ strlcpy(ps.ifname, psp->ifname, IFNAMSIZ);
+ ifp = ifunit(ps.ifname);
+ if (ifp != NULL)
+ psp->baudrate = ifp->if_baudrate;
+ else
+ error = EINVAL;
+ } else
+ error = EINVAL;
+ break;
+ }
+#endif /* __FreeBSD__ */
+
+#ifdef ALTQ
+ case DIOCSTARTALTQ: {
+ struct pf_altq *altq;
+
+ /* enable all altq interfaces on active list */
+ TAILQ_FOREACH(altq, pf_altqs_active, entries) {
+#ifdef __FreeBSD__
+ if (altq->qname[0] == 0 && (altq->local_flags &
+ PFALTQ_FLAG_IF_REMOVED) == 0) {
+#else
+ if (altq->qname[0] == 0) {
+#endif
+ error = pf_enable_altq(altq);
+ if (error != 0)
+ break;
+ }
+ }
+ if (error == 0)
+ pf_altq_running = 1;
+ DPFPRINTF(PF_DEBUG_MISC, ("altq: started\n"));
+ break;
+ }
+
+ case DIOCSTOPALTQ: {
+ struct pf_altq *altq;
+
+ /* disable all altq interfaces on active list */
+ TAILQ_FOREACH(altq, pf_altqs_active, entries) {
+#ifdef __FreeBSD__
+ if (altq->qname[0] == 0 && (altq->local_flags &
+ PFALTQ_FLAG_IF_REMOVED) == 0) {
+#else
+ if (altq->qname[0] == 0) {
+#endif
+ error = pf_disable_altq(altq);
+ if (error != 0)
+ break;
+ }
+ }
+ if (error == 0)
+ pf_altq_running = 0;
+ DPFPRINTF(PF_DEBUG_MISC, ("altq: stopped\n"));
+ break;
+ }
+
+ case DIOCADDALTQ: {
+ struct pfioc_altq *pa = (struct pfioc_altq *)addr;
+ struct pf_altq *altq, *a;
+
+ if (pa->ticket != ticket_altqs_inactive) {
+ error = EBUSY;
+ break;
+ }
+ altq = pool_get(&pf_altq_pl, PR_NOWAIT);
+ if (altq == NULL) {
+ error = ENOMEM;
+ break;
+ }
+ bcopy(&pa->altq, altq, sizeof(struct pf_altq));
+#ifdef __FreeBSD__
+ altq->local_flags = 0;
+#endif
+
+ /*
+ * if this is for a queue, find the discipline and
+ * copy the necessary fields
+ */
+ if (altq->qname[0] != 0) {
+ if ((altq->qid = pf_qname2qid(altq->qname)) == 0) {
+ error = EBUSY;
+ pool_put(&pf_altq_pl, altq);
+ break;
+ }
+ altq->altq_disc = NULL;
+ TAILQ_FOREACH(a, pf_altqs_inactive, entries) {
+ if (strncmp(a->ifname, altq->ifname,
+ IFNAMSIZ) == 0 && a->qname[0] == 0) {
+ altq->altq_disc = a->altq_disc;
+ break;
+ }
+ }
+ }
+
+#ifdef __FreeBSD__
+ struct ifnet *ifp;
+
+ if ((ifp = ifunit(altq->ifname)) == NULL) {
+ altq->local_flags |= PFALTQ_FLAG_IF_REMOVED;
+ } else {
+ PF_UNLOCK();
+#endif
+ error = altq_add(altq);
+#ifdef __FreeBSD__
+ PF_LOCK();
+ }
+#endif
+ if (error) {
+ pool_put(&pf_altq_pl, altq);
+ break;
+ }
+
+ TAILQ_INSERT_TAIL(pf_altqs_inactive, altq, entries);
+ bcopy(altq, &pa->altq, sizeof(struct pf_altq));
+ break;
+ }
+
+ case DIOCGETALTQS: {
+ struct pfioc_altq *pa = (struct pfioc_altq *)addr;
+ struct pf_altq *altq;
+
+ pa->nr = 0;
+ TAILQ_FOREACH(altq, pf_altqs_active, entries)
+ pa->nr++;
+ pa->ticket = ticket_altqs_active;
+ break;
+ }
+
+ case DIOCGETALTQ: {
+ struct pfioc_altq *pa = (struct pfioc_altq *)addr;
+ struct pf_altq *altq;
+ u_int32_t nr;
+
+ if (pa->ticket != ticket_altqs_active) {
+ error = EBUSY;
+ break;
+ }
+ nr = 0;
+ altq = TAILQ_FIRST(pf_altqs_active);
+ while ((altq != NULL) && (nr < pa->nr)) {
+ altq = TAILQ_NEXT(altq, entries);
+ nr++;
+ }
+ if (altq == NULL) {
+ error = EBUSY;
+ break;
+ }
+ bcopy(altq, &pa->altq, sizeof(struct pf_altq));
+ break;
+ }
+
+ case DIOCCHANGEALTQ:
+ /* CHANGEALTQ not supported yet! */
+ error = ENODEV;
+ break;
+
+ case DIOCGETQSTATS: {
+ struct pfioc_qstats *pq = (struct pfioc_qstats *)addr;
+ struct pf_altq *altq;
+ u_int32_t nr;
+ int nbytes;
+
+ if (pq->ticket != ticket_altqs_active) {
+ error = EBUSY;
+ break;
+ }
+ nbytes = pq->nbytes;
+ nr = 0;
+ altq = TAILQ_FIRST(pf_altqs_active);
+ while ((altq != NULL) && (nr < pq->nr)) {
+ altq = TAILQ_NEXT(altq, entries);
+ nr++;
+ }
+ if (altq == NULL) {
+ error = EBUSY;
+ break;
+ }
+#ifdef __FreeBSD__
+ if ((altq->local_flags & PFALTQ_FLAG_IF_REMOVED) != 0) {
+ error = ENXIO;
+ break;
+ }
+ PF_UNLOCK();
+#endif
+ error = altq_getqstats(altq, pq->buf, &nbytes);
+#ifdef __FreeBSD__
+ PF_LOCK();
+#endif
+ if (error == 0) {
+ pq->scheduler = altq->scheduler;
+ pq->nbytes = nbytes;
+ }
+ break;
+ }
+#endif /* ALTQ */
+
+ case DIOCBEGINADDRS: {
+ struct pfioc_pooladdr *pp = (struct pfioc_pooladdr *)addr;
+
+ pf_empty_pool(&pf_pabuf);
+ pp->ticket = ++ticket_pabuf;
+ break;
+ }
+
+ case DIOCADDADDR: {
+ struct pfioc_pooladdr *pp = (struct pfioc_pooladdr *)addr;
+
+ if (pp->ticket != ticket_pabuf) {
+ error = EBUSY;
+ break;
+ }
+#ifndef INET
+ if (pp->af == AF_INET) {
+ error = EAFNOSUPPORT;
+ break;
+ }
+#endif /* INET */
+#ifndef INET6
+ if (pp->af == AF_INET6) {
+ error = EAFNOSUPPORT;
+ break;
+ }
+#endif /* INET6 */
+ if (pp->addr.addr.type != PF_ADDR_ADDRMASK &&
+ pp->addr.addr.type != PF_ADDR_DYNIFTL &&
+ pp->addr.addr.type != PF_ADDR_TABLE) {
+ error = EINVAL;
+ break;
+ }
+ pa = pool_get(&pf_pooladdr_pl, PR_NOWAIT);
+ if (pa == NULL) {
+ error = ENOMEM;
+ break;
+ }
+ bcopy(&pp->addr, pa, sizeof(struct pf_pooladdr));
+ if (pa->ifname[0]) {
+ pa->kif = pfi_kif_get(pa->ifname);
+ if (pa->kif == NULL) {
+ pool_put(&pf_pooladdr_pl, pa);
+ error = EINVAL;
+ break;
+ }
+ pfi_kif_ref(pa->kif, PFI_KIF_REF_RULE);
+ }
+ if (pfi_dynaddr_setup(&pa->addr, pp->af)) {
+ pfi_dynaddr_remove(&pa->addr);
+ pfi_kif_unref(pa->kif, PFI_KIF_REF_RULE);
+ pool_put(&pf_pooladdr_pl, pa);
+ error = EINVAL;
+ break;
+ }
+ TAILQ_INSERT_TAIL(&pf_pabuf, pa, entries);
+ break;
+ }
+
+ case DIOCGETADDRS: {
+ struct pfioc_pooladdr *pp = (struct pfioc_pooladdr *)addr;
+
+ pp->nr = 0;
+ pool = pf_get_pool(pp->anchor, pp->ticket, pp->r_action,
+ pp->r_num, 0, 1, 0);
+ if (pool == NULL) {
+ error = EBUSY;
+ break;
+ }
+ TAILQ_FOREACH(pa, &pool->list, entries)
+ pp->nr++;
+ break;
+ }
+
+ case DIOCGETADDR: {
+ struct pfioc_pooladdr *pp = (struct pfioc_pooladdr *)addr;
+ u_int32_t nr = 0;
+
+ pool = pf_get_pool(pp->anchor, pp->ticket, pp->r_action,
+ pp->r_num, 0, 1, 1);
+ if (pool == NULL) {
+ error = EBUSY;
+ break;
+ }
+ pa = TAILQ_FIRST(&pool->list);
+ while ((pa != NULL) && (nr < pp->nr)) {
+ pa = TAILQ_NEXT(pa, entries);
+ nr++;
+ }
+ if (pa == NULL) {
+ error = EBUSY;
+ break;
+ }
+ bcopy(pa, &pp->addr, sizeof(struct pf_pooladdr));
+ pfi_dynaddr_copyout(&pp->addr.addr);
+ pf_tbladdr_copyout(&pp->addr.addr);
+ pf_rtlabel_copyout(&pp->addr.addr);
+ break;
+ }
+
+ case DIOCCHANGEADDR: {
+ struct pfioc_pooladdr *pca = (struct pfioc_pooladdr *)addr;
+ struct pf_pooladdr *oldpa = NULL, *newpa = NULL;
+ struct pf_ruleset *ruleset;
+
+ if (pca->action < PF_CHANGE_ADD_HEAD ||
+ pca->action > PF_CHANGE_REMOVE) {
+ error = EINVAL;
+ break;
+ }
+ if (pca->addr.addr.type != PF_ADDR_ADDRMASK &&
+ pca->addr.addr.type != PF_ADDR_DYNIFTL &&
+ pca->addr.addr.type != PF_ADDR_TABLE) {
+ error = EINVAL;
+ break;
+ }
+
+ ruleset = pf_find_ruleset(pca->anchor);
+ if (ruleset == NULL) {
+ error = EBUSY;
+ break;
+ }
+ pool = pf_get_pool(pca->anchor, pca->ticket, pca->r_action,
+ pca->r_num, pca->r_last, 1, 1);
+ if (pool == NULL) {
+ error = EBUSY;
+ break;
+ }
+ if (pca->action != PF_CHANGE_REMOVE) {
+ newpa = pool_get(&pf_pooladdr_pl, PR_NOWAIT);
+ if (newpa == NULL) {
+ error = ENOMEM;
+ break;
+ }
+ bcopy(&pca->addr, newpa, sizeof(struct pf_pooladdr));
+#ifndef INET
+ if (pca->af == AF_INET) {
+ pool_put(&pf_pooladdr_pl, newpa);
+ error = EAFNOSUPPORT;
+ break;
+ }
+#endif /* INET */
+#ifndef INET6
+ if (pca->af == AF_INET6) {
+ pool_put(&pf_pooladdr_pl, newpa);
+ error = EAFNOSUPPORT;
+ break;
+ }
+#endif /* INET6 */
+ if (newpa->ifname[0]) {
+ newpa->kif = pfi_kif_get(newpa->ifname);
+ if (newpa->kif == NULL) {
+ pool_put(&pf_pooladdr_pl, newpa);
+ error = EINVAL;
+ break;
+ }
+ pfi_kif_ref(newpa->kif, PFI_KIF_REF_RULE);
+ } else
+ newpa->kif = NULL;
+ if (pfi_dynaddr_setup(&newpa->addr, pca->af) ||
+ pf_tbladdr_setup(ruleset, &newpa->addr)) {
+ pfi_dynaddr_remove(&newpa->addr);
+ pfi_kif_unref(newpa->kif, PFI_KIF_REF_RULE);
+ pool_put(&pf_pooladdr_pl, newpa);
+ error = EINVAL;
+ break;
+ }
+ }
+
+ if (pca->action == PF_CHANGE_ADD_HEAD)
+ oldpa = TAILQ_FIRST(&pool->list);
+ else if (pca->action == PF_CHANGE_ADD_TAIL)
+ oldpa = TAILQ_LAST(&pool->list, pf_palist);
+ else {
+ int i = 0;
+
+ oldpa = TAILQ_FIRST(&pool->list);
+ while ((oldpa != NULL) && (i < pca->nr)) {
+ oldpa = TAILQ_NEXT(oldpa, entries);
+ i++;
+ }
+ if (oldpa == NULL) {
+ error = EINVAL;
+ break;
+ }
+ }
+
+ if (pca->action == PF_CHANGE_REMOVE) {
+ TAILQ_REMOVE(&pool->list, oldpa, entries);
+ pfi_dynaddr_remove(&oldpa->addr);
+ pf_tbladdr_remove(&oldpa->addr);
+ pfi_kif_unref(oldpa->kif, PFI_KIF_REF_RULE);
+ pool_put(&pf_pooladdr_pl, oldpa);
+ } else {
+ if (oldpa == NULL)
+ TAILQ_INSERT_TAIL(&pool->list, newpa, entries);
+ else if (pca->action == PF_CHANGE_ADD_HEAD ||
+ pca->action == PF_CHANGE_ADD_BEFORE)
+ TAILQ_INSERT_BEFORE(oldpa, newpa, entries);
+ else
+ TAILQ_INSERT_AFTER(&pool->list, oldpa,
+ newpa, entries);
+ }
+
+ pool->cur = TAILQ_FIRST(&pool->list);
+ PF_ACPY(&pool->counter, &pool->cur->addr.v.a.addr,
+ pca->af);
+ break;
+ }
+
+ case DIOCGETRULESETS: {
+ struct pfioc_ruleset *pr = (struct pfioc_ruleset *)addr;
+ struct pf_ruleset *ruleset;
+ struct pf_anchor *anchor;
+
+ pr->path[sizeof(pr->path) - 1] = 0;
+ if ((ruleset = pf_find_ruleset(pr->path)) == NULL) {
+ error = EINVAL;
+ break;
+ }
+ pr->nr = 0;
+ if (ruleset->anchor == NULL) {
+ /* XXX kludge for pf_main_ruleset */
+ RB_FOREACH(anchor, pf_anchor_global, &pf_anchors)
+ if (anchor->parent == NULL)
+ pr->nr++;
+ } else {
+ RB_FOREACH(anchor, pf_anchor_node,
+ &ruleset->anchor->children)
+ pr->nr++;
+ }
+ break;
+ }
+
+ case DIOCGETRULESET: {
+ struct pfioc_ruleset *pr = (struct pfioc_ruleset *)addr;
+ struct pf_ruleset *ruleset;
+ struct pf_anchor *anchor;
+ u_int32_t nr = 0;
+
+ pr->path[sizeof(pr->path) - 1] = 0;
+ if ((ruleset = pf_find_ruleset(pr->path)) == NULL) {
+ error = EINVAL;
+ break;
+ }
+ pr->name[0] = 0;
+ if (ruleset->anchor == NULL) {
+ /* XXX kludge for pf_main_ruleset */
+ RB_FOREACH(anchor, pf_anchor_global, &pf_anchors)
+ if (anchor->parent == NULL && nr++ == pr->nr) {
+ strlcpy(pr->name, anchor->name,
+ sizeof(pr->name));
+ break;
+ }
+ } else {
+ RB_FOREACH(anchor, pf_anchor_node,
+ &ruleset->anchor->children)
+ if (nr++ == pr->nr) {
+ strlcpy(pr->name, anchor->name,
+ sizeof(pr->name));
+ break;
+ }
+ }
+ if (!pr->name[0])
+ error = EBUSY;
+ break;
+ }
+
+ case DIOCRCLRTABLES: {
+ struct pfioc_table *io = (struct pfioc_table *)addr;
+
+ if (io->pfrio_esize != 0) {
+ error = ENODEV;
+ break;
+ }
+ error = pfr_clr_tables(&io->pfrio_table, &io->pfrio_ndel,
+ io->pfrio_flags | PFR_FLAG_USERIOCTL);
+ break;
+ }
+
+ case DIOCRADDTABLES: {
+ struct pfioc_table *io = (struct pfioc_table *)addr;
+
+ if (io->pfrio_esize != sizeof(struct pfr_table)) {
+ error = ENODEV;
+ break;
+ }
+ error = pfr_add_tables(io->pfrio_buffer, io->pfrio_size,
+ &io->pfrio_nadd, io->pfrio_flags | PFR_FLAG_USERIOCTL);
+ break;
+ }
+
+ case DIOCRDELTABLES: {
+ struct pfioc_table *io = (struct pfioc_table *)addr;
+
+ if (io->pfrio_esize != sizeof(struct pfr_table)) {
+ error = ENODEV;
+ break;
+ }
+ error = pfr_del_tables(io->pfrio_buffer, io->pfrio_size,
+ &io->pfrio_ndel, io->pfrio_flags | PFR_FLAG_USERIOCTL);
+ break;
+ }
+
+ case DIOCRGETTABLES: {
+ struct pfioc_table *io = (struct pfioc_table *)addr;
+
+ if (io->pfrio_esize != sizeof(struct pfr_table)) {
+ error = ENODEV;
+ break;
+ }
+ error = pfr_get_tables(&io->pfrio_table, io->pfrio_buffer,
+ &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL);
+ break;
+ }
+
+ case DIOCRGETTSTATS: {
+ struct pfioc_table *io = (struct pfioc_table *)addr;
+
+ if (io->pfrio_esize != sizeof(struct pfr_tstats)) {
+ error = ENODEV;
+ break;
+ }
+ error = pfr_get_tstats(&io->pfrio_table, io->pfrio_buffer,
+ &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL);
+ break;
+ }
+
+ case DIOCRCLRTSTATS: {
+ struct pfioc_table *io = (struct pfioc_table *)addr;
+
+ if (io->pfrio_esize != sizeof(struct pfr_table)) {
+ error = ENODEV;
+ break;
+ }
+ error = pfr_clr_tstats(io->pfrio_buffer, io->pfrio_size,
+ &io->pfrio_nzero, io->pfrio_flags | PFR_FLAG_USERIOCTL);
+ break;
+ }
+
+ case DIOCRSETTFLAGS: {
+ struct pfioc_table *io = (struct pfioc_table *)addr;
+
+ if (io->pfrio_esize != sizeof(struct pfr_table)) {
+ error = ENODEV;
+ break;
+ }
+ error = pfr_set_tflags(io->pfrio_buffer, io->pfrio_size,
+ io->pfrio_setflag, io->pfrio_clrflag, &io->pfrio_nchange,
+ &io->pfrio_ndel, io->pfrio_flags | PFR_FLAG_USERIOCTL);
+ break;
+ }
+
+ case DIOCRCLRADDRS: {
+ struct pfioc_table *io = (struct pfioc_table *)addr;
+
+ if (io->pfrio_esize != 0) {
+ error = ENODEV;
+ break;
+ }
+ error = pfr_clr_addrs(&io->pfrio_table, &io->pfrio_ndel,
+ io->pfrio_flags | PFR_FLAG_USERIOCTL);
+ break;
+ }
+
+ case DIOCRADDADDRS: {
+ struct pfioc_table *io = (struct pfioc_table *)addr;
+
+ if (io->pfrio_esize != sizeof(struct pfr_addr)) {
+ error = ENODEV;
+ break;
+ }
+ error = pfr_add_addrs(&io->pfrio_table, io->pfrio_buffer,
+ io->pfrio_size, &io->pfrio_nadd, io->pfrio_flags |
+ PFR_FLAG_USERIOCTL);
+ break;
+ }
+
+ case DIOCRDELADDRS: {
+ struct pfioc_table *io = (struct pfioc_table *)addr;
+
+ if (io->pfrio_esize != sizeof(struct pfr_addr)) {
+ error = ENODEV;
+ break;
+ }
+ error = pfr_del_addrs(&io->pfrio_table, io->pfrio_buffer,
+ io->pfrio_size, &io->pfrio_ndel, io->pfrio_flags |
+ PFR_FLAG_USERIOCTL);
+ break;
+ }
+
+ case DIOCRSETADDRS: {
+ struct pfioc_table *io = (struct pfioc_table *)addr;
+
+ if (io->pfrio_esize != sizeof(struct pfr_addr)) {
+ error = ENODEV;
+ break;
+ }
+ error = pfr_set_addrs(&io->pfrio_table, io->pfrio_buffer,
+ io->pfrio_size, &io->pfrio_size2, &io->pfrio_nadd,
+ &io->pfrio_ndel, &io->pfrio_nchange, io->pfrio_flags |
+ PFR_FLAG_USERIOCTL, 0);
+ break;
+ }
+
+ case DIOCRGETADDRS: {
+ struct pfioc_table *io = (struct pfioc_table *)addr;
+
+ if (io->pfrio_esize != sizeof(struct pfr_addr)) {
+ error = ENODEV;
+ break;
+ }
+ error = pfr_get_addrs(&io->pfrio_table, io->pfrio_buffer,
+ &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL);
+ break;
+ }
+
+ case DIOCRGETASTATS: {
+ struct pfioc_table *io = (struct pfioc_table *)addr;
+
+ if (io->pfrio_esize != sizeof(struct pfr_astats)) {
+ error = ENODEV;
+ break;
+ }
+ error = pfr_get_astats(&io->pfrio_table, io->pfrio_buffer,
+ &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL);
+ break;
+ }
+
+ case DIOCRCLRASTATS: {
+ struct pfioc_table *io = (struct pfioc_table *)addr;
+
+ if (io->pfrio_esize != sizeof(struct pfr_addr)) {
+ error = ENODEV;
+ break;
+ }
+ error = pfr_clr_astats(&io->pfrio_table, io->pfrio_buffer,
+ io->pfrio_size, &io->pfrio_nzero, io->pfrio_flags |
+ PFR_FLAG_USERIOCTL);
+ break;
+ }
+
+ case DIOCRTSTADDRS: {
+ struct pfioc_table *io = (struct pfioc_table *)addr;
+
+ if (io->pfrio_esize != sizeof(struct pfr_addr)) {
+ error = ENODEV;
+ break;
+ }
+ error = pfr_tst_addrs(&io->pfrio_table, io->pfrio_buffer,
+ io->pfrio_size, &io->pfrio_nmatch, io->pfrio_flags |
+ PFR_FLAG_USERIOCTL);
+ break;
+ }
+
+ case DIOCRINADEFINE: {
+ struct pfioc_table *io = (struct pfioc_table *)addr;
+
+ if (io->pfrio_esize != sizeof(struct pfr_addr)) {
+ error = ENODEV;
+ break;
+ }
+ error = pfr_ina_define(&io->pfrio_table, io->pfrio_buffer,
+ io->pfrio_size, &io->pfrio_nadd, &io->pfrio_naddr,
+ io->pfrio_ticket, io->pfrio_flags | PFR_FLAG_USERIOCTL);
+ break;
+ }
+
+ case DIOCOSFPADD: {
+ struct pf_osfp_ioctl *io = (struct pf_osfp_ioctl *)addr;
+ error = pf_osfp_add(io);
+ break;
+ }
+
+ case DIOCOSFPGET: {
+ struct pf_osfp_ioctl *io = (struct pf_osfp_ioctl *)addr;
+ error = pf_osfp_get(io);
+ break;
+ }
+
+ case DIOCXBEGIN: {
+ struct pfioc_trans *io = (struct pfioc_trans *)addr;
+ struct pfioc_trans_e *ioe;
+ struct pfr_table *table;
+ int i;
+
+ if (io->esize != sizeof(*ioe)) {
+ error = ENODEV;
+ goto fail;
+ }
+#ifdef __FreeBSD__
+ PF_UNLOCK();
+#endif
+ ioe = (struct pfioc_trans_e *)malloc(sizeof(*ioe),
+ M_TEMP, M_WAITOK);
+ table = (struct pfr_table *)malloc(sizeof(*table),
+ M_TEMP, M_WAITOK);
+#ifdef __FreeBSD__
+ PF_LOCK();
+#endif
+ for (i = 0; i < io->size; i++) {
+#ifdef __FreeBSD__
+ PF_COPYIN(io->array+i, ioe, sizeof(*ioe), error);
+ if (error) {
+#else
+ if (copyin(io->array+i, ioe, sizeof(*ioe))) {
+#endif
+ free(table, M_TEMP);
+ free(ioe, M_TEMP);
+ error = EFAULT;
+ goto fail;
+ }
+ switch (ioe->rs_num) {
+#ifdef ALTQ
+ case PF_RULESET_ALTQ:
+ if (ioe->anchor[0]) {
+ free(table, M_TEMP);
+ free(ioe, M_TEMP);
+ error = EINVAL;
+ goto fail;
+ }
+ if ((error = pf_begin_altq(&ioe->ticket))) {
+ free(table, M_TEMP);
+ free(ioe, M_TEMP);
+ goto fail;
+ }
+ break;
+#endif /* ALTQ */
+ case PF_RULESET_TABLE:
+ bzero(table, sizeof(*table));
+ strlcpy(table->pfrt_anchor, ioe->anchor,
+ sizeof(table->pfrt_anchor));
+ if ((error = pfr_ina_begin(table,
+ &ioe->ticket, NULL, 0))) {
+ free(table, M_TEMP);
+ free(ioe, M_TEMP);
+ goto fail;
+ }
+ break;
+ default:
+ if ((error = pf_begin_rules(&ioe->ticket,
+ ioe->rs_num, ioe->anchor))) {
+ free(table, M_TEMP);
+ free(ioe, M_TEMP);
+ goto fail;
+ }
+ break;
+ }
+#ifdef __FreeBSD__
+ PF_COPYOUT(ioe, io->array+i, sizeof(io->array[i]),
+ error);
+ if (error) {
+#else
+ if (copyout(ioe, io->array+i, sizeof(io->array[i]))) {
+#endif
+ free(table, M_TEMP);
+ free(ioe, M_TEMP);
+ error = EFAULT;
+ goto fail;
+ }
+ }
+ free(table, M_TEMP);
+ free(ioe, M_TEMP);
+ break;
+ }
+
+ case DIOCXROLLBACK: {
+ struct pfioc_trans *io = (struct pfioc_trans *)addr;
+ struct pfioc_trans_e *ioe;
+ struct pfr_table *table;
+ int i;
+
+ if (io->esize != sizeof(*ioe)) {
+ error = ENODEV;
+ goto fail;
+ }
+#ifdef __FreeBSD__
+ PF_UNLOCK();
+#endif
+ ioe = (struct pfioc_trans_e *)malloc(sizeof(*ioe),
+ M_TEMP, M_WAITOK);
+ table = (struct pfr_table *)malloc(sizeof(*table),
+ M_TEMP, M_WAITOK);
+#ifdef __FreeBSD__
+ PF_LOCK();
+#endif
+ for (i = 0; i < io->size; i++) {
+#ifdef __FreeBSD__
+ PF_COPYIN(io->array+i, ioe, sizeof(*ioe), error);
+ if (error) {
+#else
+ if (copyin(io->array+i, ioe, sizeof(*ioe))) {
+#endif
+ free(table, M_TEMP);
+ free(ioe, M_TEMP);
+ error = EFAULT;
+ goto fail;
+ }
+ switch (ioe->rs_num) {
+#ifdef ALTQ
+ case PF_RULESET_ALTQ:
+ if (ioe->anchor[0]) {
+ free(table, M_TEMP);
+ free(ioe, M_TEMP);
+ error = EINVAL;
+ goto fail;
+ }
+ if ((error = pf_rollback_altq(ioe->ticket))) {
+ free(table, M_TEMP);
+ free(ioe, M_TEMP);
+ goto fail; /* really bad */
+ }
+ break;
+#endif /* ALTQ */
+ case PF_RULESET_TABLE:
+ bzero(table, sizeof(*table));
+ strlcpy(table->pfrt_anchor, ioe->anchor,
+ sizeof(table->pfrt_anchor));
+ if ((error = pfr_ina_rollback(table,
+ ioe->ticket, NULL, 0))) {
+ free(table, M_TEMP);
+ free(ioe, M_TEMP);
+ goto fail; /* really bad */
+ }
+ break;
+ default:
+ if ((error = pf_rollback_rules(ioe->ticket,
+ ioe->rs_num, ioe->anchor))) {
+ free(table, M_TEMP);
+ free(ioe, M_TEMP);
+ goto fail; /* really bad */
+ }
+ break;
+ }
+ }
+ free(table, M_TEMP);
+ free(ioe, M_TEMP);
+ break;
+ }
+
+ case DIOCXCOMMIT: {
+ struct pfioc_trans *io = (struct pfioc_trans *)addr;
+ struct pfioc_trans_e *ioe;
+ struct pfr_table *table;
+ struct pf_ruleset *rs;
+ int i;
+
+ if (io->esize != sizeof(*ioe)) {
+ error = ENODEV;
+ goto fail;
+ }
+#ifdef __FreeBSD__
+ PF_UNLOCK();
+#endif
+ ioe = (struct pfioc_trans_e *)malloc(sizeof(*ioe),
+ M_TEMP, M_WAITOK);
+ table = (struct pfr_table *)malloc(sizeof(*table),
+ M_TEMP, M_WAITOK);
+#ifdef __FreeBSD__
+ PF_LOCK();
+#endif
+ /* first makes sure everything will succeed */
+ for (i = 0; i < io->size; i++) {
+#ifdef __FreeBSD__
+ PF_COPYIN(io->array+i, ioe, sizeof(*ioe), error);
+ if (error) {
+#else
+ if (copyin(io->array+i, ioe, sizeof(*ioe))) {
+#endif
+ free(table, M_TEMP);
+ free(ioe, M_TEMP);
+ error = EFAULT;
+ goto fail;
+ }
+ switch (ioe->rs_num) {
+#ifdef ALTQ
+ case PF_RULESET_ALTQ:
+ if (ioe->anchor[0]) {
+ free(table, M_TEMP);
+ free(ioe, M_TEMP);
+ error = EINVAL;
+ goto fail;
+ }
+ if (!altqs_inactive_open || ioe->ticket !=
+ ticket_altqs_inactive) {
+ free(table, M_TEMP);
+ free(ioe, M_TEMP);
+ error = EBUSY;
+ goto fail;
+ }
+ break;
+#endif /* ALTQ */
+ case PF_RULESET_TABLE:
+ rs = pf_find_ruleset(ioe->anchor);
+ if (rs == NULL || !rs->topen || ioe->ticket !=
+ rs->tticket) {
+ free(table, M_TEMP);
+ free(ioe, M_TEMP);
+ error = EBUSY;
+ goto fail;
+ }
+ break;
+ default:
+ if (ioe->rs_num < 0 || ioe->rs_num >=
+ PF_RULESET_MAX) {
+ free(table, M_TEMP);
+ free(ioe, M_TEMP);
+ error = EINVAL;
+ goto fail;
+ }
+ rs = pf_find_ruleset(ioe->anchor);
+ if (rs == NULL ||
+ !rs->rules[ioe->rs_num].inactive.open ||
+ rs->rules[ioe->rs_num].inactive.ticket !=
+ ioe->ticket) {
+ free(table, M_TEMP);
+ free(ioe, M_TEMP);
+ error = EBUSY;
+ goto fail;
+ }
+ break;
+ }
+ }
+ /* now do the commit - no errors should happen here */
+ for (i = 0; i < io->size; i++) {
+#ifdef __FreeBSD__
+ PF_COPYIN(io->array+i, ioe, sizeof(*ioe), error);
+ if (error) {
+#else
+ if (copyin(io->array+i, ioe, sizeof(*ioe))) {
+#endif
+ free(table, M_TEMP);
+ free(ioe, M_TEMP);
+ error = EFAULT;
+ goto fail;
+ }
+ switch (ioe->rs_num) {
+#ifdef ALTQ
+ case PF_RULESET_ALTQ:
+ if ((error = pf_commit_altq(ioe->ticket))) {
+ free(table, M_TEMP);
+ free(ioe, M_TEMP);
+ goto fail; /* really bad */
+ }
+ break;
+#endif /* ALTQ */
+ case PF_RULESET_TABLE:
+ bzero(table, sizeof(*table));
+ strlcpy(table->pfrt_anchor, ioe->anchor,
+ sizeof(table->pfrt_anchor));
+ if ((error = pfr_ina_commit(table, ioe->ticket,
+ NULL, NULL, 0))) {
+ free(table, M_TEMP);
+ free(ioe, M_TEMP);
+ goto fail; /* really bad */
+ }
+ break;
+ default:
+ if ((error = pf_commit_rules(ioe->ticket,
+ ioe->rs_num, ioe->anchor))) {
+ free(table, M_TEMP);
+ free(ioe, M_TEMP);
+ goto fail; /* really bad */
+ }
+ break;
+ }
+ }
+ free(table, M_TEMP);
+ free(ioe, M_TEMP);
+ break;
+ }
+
+ case DIOCGETSRCNODES: {
+ struct pfioc_src_nodes *psn = (struct pfioc_src_nodes *)addr;
+ struct pf_src_node *n, *p, *pstore;
+ u_int32_t nr = 0;
+ int space = psn->psn_len;
+
+ if (space == 0) {
+ RB_FOREACH(n, pf_src_tree, &tree_src_tracking)
+ nr++;
+ psn->psn_len = sizeof(struct pf_src_node) * nr;
+ break;
+ }
+
+#ifdef __FreeBSD__
+ PF_UNLOCK();
+#endif
+ pstore = malloc(sizeof(*pstore), M_TEMP, M_WAITOK);
+#ifdef __FreeBSD__
+ PF_LOCK();
+#endif
+
+ p = psn->psn_src_nodes;
+ RB_FOREACH(n, pf_src_tree, &tree_src_tracking) {
+ int secs = time_second, diff;
+
+ if ((nr + 1) * sizeof(*p) > (unsigned)psn->psn_len)
+ break;
+
+ bcopy(n, pstore, sizeof(*pstore));
+ if (n->rule.ptr != NULL)
+ pstore->rule.nr = n->rule.ptr->nr;
+ pstore->creation = secs - pstore->creation;
+ if (pstore->expire > secs)
+ pstore->expire -= secs;
+ else
+ pstore->expire = 0;
+
+ /* adjust the connection rate estimate */
+ diff = secs - n->conn_rate.last;
+ if (diff >= n->conn_rate.seconds)
+ pstore->conn_rate.count = 0;
+ else
+ pstore->conn_rate.count -=
+ n->conn_rate.count * diff /
+ n->conn_rate.seconds;
+
+#ifdef __FreeBSD__
+ PF_COPYOUT(pstore, p, sizeof(*p), error);
+#else
+ error = copyout(pstore, p, sizeof(*p));
+#endif
+ if (error) {
+ free(pstore, M_TEMP);
+ goto fail;
+ }
+ p++;
+ nr++;
+ }
+ psn->psn_len = sizeof(struct pf_src_node) * nr;
+
+ free(pstore, M_TEMP);
+ break;
+ }
+
+ case DIOCCLRSRCNODES: {
+ struct pf_src_node *n;
+ struct pf_state *state;
+
+ RB_FOREACH(state, pf_state_tree_id, &tree_id) {
+ state->src_node = NULL;
+ state->nat_src_node = NULL;
+ }
+ RB_FOREACH(n, pf_src_tree, &tree_src_tracking) {
+ n->expire = 1;
+ n->states = 0;
+ }
+ pf_purge_expired_src_nodes(1);
+ pf_status.src_nodes = 0;
+ break;
+ }
+
+ case DIOCKILLSRCNODES: {
+ struct pf_src_node *sn;
+ struct pf_state *s;
+ struct pfioc_src_node_kill *psnk = \
+ (struct pfioc_src_node_kill *) addr;
+ int killed = 0;
+
+ RB_FOREACH(sn, pf_src_tree, &tree_src_tracking) {
+ if (PF_MATCHA(psnk->psnk_src.neg, \
+ &psnk->psnk_src.addr.v.a.addr, \
+ &psnk->psnk_src.addr.v.a.mask, \
+ &sn->addr, sn->af) &&
+ PF_MATCHA(psnk->psnk_dst.neg, \
+ &psnk->psnk_dst.addr.v.a.addr, \
+ &psnk->psnk_dst.addr.v.a.mask, \
+ &sn->raddr, sn->af)) {
+ /* Handle state to src_node linkage */
+ if (sn->states != 0) {
+ RB_FOREACH(s, pf_state_tree_id,
+ &tree_id) {
+ if (s->src_node == sn)
+ s->src_node = NULL;
+ if (s->nat_src_node == sn)
+ s->nat_src_node = NULL;
+ }
+ sn->states = 0;
+ }
+ sn->expire = 1;
+ killed++;
+ }
+ }
+
+ if (killed > 0)
+ pf_purge_expired_src_nodes(1);
+
+ psnk->psnk_af = killed;
+ break;
+ }
+
+ case DIOCSETHOSTID: {
+ u_int32_t *hostid = (u_int32_t *)addr;
+
+ if (*hostid == 0)
+ pf_status.hostid = arc4random();
+ else
+ pf_status.hostid = *hostid;
+ break;
+ }
+
+ case DIOCOSFPFLUSH:
+ pf_osfp_flush();
+ break;
+
+ case DIOCIGETIFACES: {
+ struct pfioc_iface *io = (struct pfioc_iface *)addr;
+
+ if (io->pfiio_esize != sizeof(struct pfi_kif)) {
+ error = ENODEV;
+ break;
+ }
+ error = pfi_get_ifaces(io->pfiio_name, io->pfiio_buffer,
+ &io->pfiio_size);
+ break;
+ }
+
+ case DIOCSETIFFLAG: {
+ struct pfioc_iface *io = (struct pfioc_iface *)addr;
+
+ error = pfi_set_flags(io->pfiio_name, io->pfiio_flags);
+ break;
+ }
+
+ case DIOCCLRIFFLAG: {
+ struct pfioc_iface *io = (struct pfioc_iface *)addr;
+
+ error = pfi_clear_flags(io->pfiio_name, io->pfiio_flags);
+ break;
+ }
+
+ default:
+ error = ENODEV;
+ break;
+ }
+fail:
+#ifdef __FreeBSD__
+ PF_UNLOCK();
+
+ if (flags & FWRITE)
+ sx_xunlock(&pf_consistency_lock);
+ else
+ sx_sunlock(&pf_consistency_lock);
+#else
+ splx(s);
+ /* XXX: Lock order? */
+ if (flags & FWRITE)
+ rw_exit_write(&pf_consistency_lock);
+ else
+ rw_exit_read(&pf_consistency_lock);
+#endif
+ return (error);
+}
+
+#ifdef __FreeBSD__
+/*
+ * XXX - Check for version missmatch!!!
+ */
+static void
+pf_clear_states(void)
+{
+ struct pf_state *state;
+
+ RB_FOREACH(state, pf_state_tree_id, &tree_id) {
+ state->timeout = PFTM_PURGE;
+#if NPFSYNC
+ /* don't send out individual delete messages */
+ state->sync_flags = PFSTATE_NOSYNC;
+#endif
+ pf_unlink_state(state);
+ }
+
+#if 0 /* NPFSYNC */
+/*
+ * XXX This is called on module unload, we do not want to sync that over? */
+ */
+ pfsync_clear_states(pf_status.hostid, psk->psk_ifname);
+#endif
+}
+
+static int
+pf_clear_tables(void)
+{
+ struct pfioc_table io;
+ int error;
+
+ bzero(&io, sizeof(io));
+
+ error = pfr_clr_tables(&io.pfrio_table, &io.pfrio_ndel,
+ io.pfrio_flags);
+
+ return (error);
+}
+
+static void
+pf_clear_srcnodes(void)
+{
+ struct pf_src_node *n;
+ struct pf_state *state;
+
+ RB_FOREACH(state, pf_state_tree_id, &tree_id) {
+ state->src_node = NULL;
+ state->nat_src_node = NULL;
+ }
+ RB_FOREACH(n, pf_src_tree, &tree_src_tracking) {
+ n->expire = 1;
+ n->states = 0;
+ }
+}
+/*
+ * XXX - Check for version missmatch!!!
+ */
+
+/*
+ * Duplicate pfctl -Fa operation to get rid of as much as we can.
+ */
+static int
+shutdown_pf(void)
+{
+ int error = 0;
+ u_int32_t t[5];
+ char nn = '\0';
+
+ pf_status.running = 0;
+ do {
+ if ((error = pf_begin_rules(&t[0], PF_RULESET_SCRUB, &nn))
+ != 0) {
+ DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: SCRUB\n"));
+ break;
+ }
+ if ((error = pf_begin_rules(&t[1], PF_RULESET_FILTER, &nn))
+ != 0) {
+ DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: FILTER\n"));
+ break; /* XXX: rollback? */
+ }
+ if ((error = pf_begin_rules(&t[2], PF_RULESET_NAT, &nn))
+ != 0) {
+ DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: NAT\n"));
+ break; /* XXX: rollback? */
+ }
+ if ((error = pf_begin_rules(&t[3], PF_RULESET_BINAT, &nn))
+ != 0) {
+ DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: BINAT\n"));
+ break; /* XXX: rollback? */
+ }
+ if ((error = pf_begin_rules(&t[4], PF_RULESET_RDR, &nn))
+ != 0) {
+ DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: RDR\n"));
+ break; /* XXX: rollback? */
+ }
+
+ /* XXX: these should always succeed here */
+ pf_commit_rules(t[0], PF_RULESET_SCRUB, &nn);
+ pf_commit_rules(t[1], PF_RULESET_FILTER, &nn);
+ pf_commit_rules(t[2], PF_RULESET_NAT, &nn);
+ pf_commit_rules(t[3], PF_RULESET_BINAT, &nn);
+ pf_commit_rules(t[4], PF_RULESET_RDR, &nn);
+
+ if ((error = pf_clear_tables()) != 0)
+ break;
+
+#ifdef ALTQ
+ if ((error = pf_begin_altq(&t[0])) != 0) {
+ DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: ALTQ\n"));
+ break;
+ }
+ pf_commit_altq(t[0]);
+#endif
+
+ pf_clear_states();
+
+ pf_clear_srcnodes();
+
+ /* status does not use malloced mem so no need to cleanup */
+ /* fingerprints and interfaces have thier own cleanup code */
+ } while(0);
+
+ return (error);
+}
+
+static int
+pf_check_in(void *arg, struct mbuf **m, struct ifnet *ifp, int dir,
+ struct inpcb *inp)
+{
+ /*
+ * XXX Wed Jul 9 22:03:16 2003 UTC
+ * OpenBSD has changed its byte ordering convention on ip_len/ip_off
+ * in network stack. OpenBSD's network stack have converted
+ * ip_len/ip_off to host byte order frist as FreeBSD.
+ * Now this is not true anymore , so we should convert back to network
+ * byte order.
+ */
+ struct ip *h = NULL;
+ int chk;
+
+ if ((*m)->m_pkthdr.len >= (int)sizeof(struct ip)) {
+ /* if m_pkthdr.len is less than ip header, pf will handle. */
+ h = mtod(*m, struct ip *);
+ HTONS(h->ip_len);
+ HTONS(h->ip_off);
+ }
+ chk = pf_test(PF_IN, ifp, m, NULL, inp);
+ if (chk && *m) {
+ m_freem(*m);
+ *m = NULL;
+ }
+ if (*m != NULL) {
+ /* pf_test can change ip header location */
+ h = mtod(*m, struct ip *);
+ NTOHS(h->ip_len);
+ NTOHS(h->ip_off);
+ }
+ return chk;
+}
+
+static int
+pf_check_out(void *arg, struct mbuf **m, struct ifnet *ifp, int dir,
+ struct inpcb *inp)
+{
+ /*
+ * XXX Wed Jul 9 22:03:16 2003 UTC
+ * OpenBSD has changed its byte ordering convention on ip_len/ip_off
+ * in network stack. OpenBSD's network stack have converted
+ * ip_len/ip_off to host byte order frist as FreeBSD.
+ * Now this is not true anymore , so we should convert back to network
+ * byte order.
+ */
+ struct ip *h = NULL;
+ int chk;
+
+ /* We need a proper CSUM befor we start (s. OpenBSD ip_output) */
+ if ((*m)->m_pkthdr.csum_flags & CSUM_DELAY_DATA) {
+ in_delayed_cksum(*m);
+ (*m)->m_pkthdr.csum_flags &= ~CSUM_DELAY_DATA;
+ }
+ if ((*m)->m_pkthdr.len >= (int)sizeof(*h)) {
+ /* if m_pkthdr.len is less than ip header, pf will handle. */
+ h = mtod(*m, struct ip *);
+ HTONS(h->ip_len);
+ HTONS(h->ip_off);
+ }
+ chk = pf_test(PF_OUT, ifp, m, NULL, inp);
+ if (chk && *m) {
+ m_freem(*m);
+ *m = NULL;
+ }
+ if (*m != NULL) {
+ /* pf_test can change ip header location */
+ h = mtod(*m, struct ip *);
+ NTOHS(h->ip_len);
+ NTOHS(h->ip_off);
+ }
+ return chk;
+}
+
+#ifdef INET6
+static int
+pf_check6_in(void *arg, struct mbuf **m, struct ifnet *ifp, int dir,
+ struct inpcb *inp)
+{
+
+ /*
+ * IPv6 is not affected by ip_len/ip_off byte order changes.
+ */
+ int chk;
+
+ /*
+ * In case of loopback traffic IPv6 uses the real interface in
+ * order to support scoped addresses. In order to support stateful
+ * filtering we have change this to lo0 as it is the case in IPv4.
+ */
+ chk = pf_test6(PF_IN, (*m)->m_flags & M_LOOP ? V_loif : ifp, m,
+ NULL, inp);
+ if (chk && *m) {
+ m_freem(*m);
+ *m = NULL;
+ }
+ return chk;
+}
+
+static int
+pf_check6_out(void *arg, struct mbuf **m, struct ifnet *ifp, int dir,
+ struct inpcb *inp)
+{
+ /*
+ * IPv6 does not affected ip_len/ip_off byte order changes.
+ */
+ int chk;
+
+ /* We need a proper CSUM befor we start (s. OpenBSD ip_output) */
+ if ((*m)->m_pkthdr.csum_flags & CSUM_DELAY_DATA) {
+ in_delayed_cksum(*m);
+ (*m)->m_pkthdr.csum_flags &= ~CSUM_DELAY_DATA;
+ }
+ chk = pf_test6(PF_OUT, ifp, m, NULL, inp);
+ if (chk && *m) {
+ m_freem(*m);
+ *m = NULL;
+ }
+ return chk;
+}
+#endif /* INET6 */
+
+static int
+hook_pf(void)
+{
+ struct pfil_head *pfh_inet;
+#ifdef INET6
+ struct pfil_head *pfh_inet6;
+#endif
+
+ PF_ASSERT(MA_NOTOWNED);
+
+ if (pf_pfil_hooked)
+ return (0);
+
+ pfh_inet = pfil_head_get(PFIL_TYPE_AF, AF_INET);
+ if (pfh_inet == NULL)
+ return (ESRCH); /* XXX */
+ pfil_add_hook(pf_check_in, NULL, PFIL_IN | PFIL_WAITOK, pfh_inet);
+ pfil_add_hook(pf_check_out, NULL, PFIL_OUT | PFIL_WAITOK, pfh_inet);
+#ifdef INET6
+ pfh_inet6 = pfil_head_get(PFIL_TYPE_AF, AF_INET6);
+ if (pfh_inet6 == NULL) {
+ pfil_remove_hook(pf_check_in, NULL, PFIL_IN | PFIL_WAITOK,
+ pfh_inet);
+ pfil_remove_hook(pf_check_out, NULL, PFIL_OUT | PFIL_WAITOK,
+ pfh_inet);
+ return (ESRCH); /* XXX */
+ }
+ pfil_add_hook(pf_check6_in, NULL, PFIL_IN | PFIL_WAITOK, pfh_inet6);
+ pfil_add_hook(pf_check6_out, NULL, PFIL_OUT | PFIL_WAITOK, pfh_inet6);
+#endif
+
+ pf_pfil_hooked = 1;
+ return (0);
+}
+
+static int
+dehook_pf(void)
+{
+ struct pfil_head *pfh_inet;
+#ifdef INET6
+ struct pfil_head *pfh_inet6;
+#endif
+
+ PF_ASSERT(MA_NOTOWNED);
+
+ if (pf_pfil_hooked == 0)
+ return (0);
+
+ pfh_inet = pfil_head_get(PFIL_TYPE_AF, AF_INET);
+ if (pfh_inet == NULL)
+ return (ESRCH); /* XXX */
+ pfil_remove_hook(pf_check_in, NULL, PFIL_IN | PFIL_WAITOK,
+ pfh_inet);
+ pfil_remove_hook(pf_check_out, NULL, PFIL_OUT | PFIL_WAITOK,
+ pfh_inet);
+#ifdef INET6
+ pfh_inet6 = pfil_head_get(PFIL_TYPE_AF, AF_INET6);
+ if (pfh_inet6 == NULL)
+ return (ESRCH); /* XXX */
+ pfil_remove_hook(pf_check6_in, NULL, PFIL_IN | PFIL_WAITOK,
+ pfh_inet6);
+ pfil_remove_hook(pf_check6_out, NULL, PFIL_OUT | PFIL_WAITOK,
+ pfh_inet6);
+#endif
+
+ pf_pfil_hooked = 0;
+ return (0);
+}
+
+static int
+pf_load(void)
+{
+ init_zone_var();
+ init_pf_mutex();
+ pf_dev = make_dev(&pf_cdevsw, 0, 0, 0, 0600, PF_NAME);
+ if (pfattach() < 0) {
+ destroy_dev(pf_dev);
+ destroy_pf_mutex();
+ return (ENOMEM);
+ }
+ return (0);
+}
+
+static int
+pf_unload(void)
+{
+ int error = 0;
+
+ PF_LOCK();
+ pf_status.running = 0;
+ PF_UNLOCK();
+ error = dehook_pf();
+ if (error) {
+ /*
+ * Should not happen!
+ * XXX Due to error code ESRCH, kldunload will show
+ * a message like 'No such process'.
+ */
+ printf("%s : pfil unregisteration fail\n", __FUNCTION__);
+ return error;
+ }
+ PF_LOCK();
+ shutdown_pf();
+ pf_end_threads = 1;
+ while (pf_end_threads < 2) {
+ wakeup_one(pf_purge_thread);
+ msleep(pf_purge_thread, &pf_task_mtx, 0, "pftmo", hz);
+ }
+ pfi_cleanup();
+ pf_osfp_flush();
+ pf_osfp_cleanup();
+ cleanup_pf_zone();
+ PF_UNLOCK();
+ destroy_dev(pf_dev);
+ destroy_pf_mutex();
+ return error;
+}
+
+static int
+pf_modevent(module_t mod, int type, void *data)
+{
+ int error = 0;
+
+ switch(type) {
+ case MOD_LOAD:
+ error = pf_load();
+ break;
+
+ case MOD_UNLOAD:
+ error = pf_unload();
+ break;
+ default:
+ error = EINVAL;
+ break;
+ }
+ return error;
+}
+
+static moduledata_t pf_mod = {
+ "pf",
+ pf_modevent,
+ 0
+};
+
+DECLARE_MODULE(pf, pf_mod, SI_SUB_PROTO_IFATTACHDOMAIN, SI_ORDER_FIRST);
+MODULE_VERSION(pf, PF_MODVER);
+#endif /* __FreeBSD__ */
diff --git a/contrib/pf/rtems/freebsd/net/pf_mtag.h b/contrib/pf/rtems/freebsd/net/pf_mtag.h
new file mode 100644
index 00000000..09aeb25c
--- /dev/null
+++ b/contrib/pf/rtems/freebsd/net/pf_mtag.h
@@ -0,0 +1,82 @@
+/* $FreeBSD$ */
+/*
+ * Copyright (c) 2001 Daniel Hartmeier
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * - Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+ * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+ * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+ * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#ifndef _NET_PF_MTAG_HH_
+#define _NET_PF_MTAG_HH_
+
+#ifdef _KERNEL
+
+#define PF_TAG_GENERATED 0x01
+#define PF_TAG_FRAGCACHE 0x02
+#define PF_TAG_TRANSLATE_LOCALHOST 0x04
+
+struct pf_mtag {
+ void *hdr; /* saved hdr pos in mbuf, for ECN */
+ u_int rtableid; /* alternate routing table id */
+ u_int32_t qid; /* queue id */
+ u_int16_t tag; /* tag id */
+ u_int8_t flags;
+ u_int8_t routed;
+ sa_family_t af; /* for ECN */
+};
+
+static __inline struct pf_mtag *pf_find_mtag(struct mbuf *);
+static __inline struct pf_mtag *pf_get_mtag(struct mbuf *);
+
+static __inline struct pf_mtag *
+pf_find_mtag(struct mbuf *m)
+{
+ struct m_tag *mtag;
+
+ if ((mtag = m_tag_find(m, PACKET_TAG_PF, NULL)) == NULL)
+ return (NULL);
+
+ return ((struct pf_mtag *)(mtag + 1));
+}
+
+static __inline struct pf_mtag *
+pf_get_mtag(struct mbuf *m)
+{
+ struct m_tag *mtag;
+
+ if ((mtag = m_tag_find(m, PACKET_TAG_PF, NULL)) == NULL) {
+ mtag = m_tag_get(PACKET_TAG_PF, sizeof(struct pf_mtag),
+ M_NOWAIT);
+ if (mtag == NULL)
+ return (NULL);
+ bzero(mtag + 1, sizeof(struct pf_mtag));
+ m_tag_prepend(m, mtag);
+ }
+
+ return ((struct pf_mtag *)(mtag + 1));
+}
+#endif /* _KERNEL */
+#endif /* _NET_PF_MTAG_HH_ */
diff --git a/contrib/pf/rtems/freebsd/net/pf_norm.c b/contrib/pf/rtems/freebsd/net/pf_norm.c
new file mode 100644
index 00000000..22f24506
--- /dev/null
+++ b/contrib/pf/rtems/freebsd/net/pf_norm.c
@@ -0,0 +1,2062 @@
+#include <rtems/freebsd/machine/rtems-bsd-config.h>
+
+/* $OpenBSD: pf_norm.c,v 1.107 2006/04/16 00:59:52 pascoe Exp $ */
+
+/*
+ * Copyright 2001 Niels Provos <provos@citi.umich.edu>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifdef __FreeBSD__
+#include <rtems/freebsd/local/opt_inet.h>
+#include <rtems/freebsd/local/opt_inet6.h>
+#include <rtems/freebsd/local/opt_pf.h>
+
+#include <rtems/freebsd/sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#ifdef DEV_PFLOG
+#define NPFLOG DEV_PFLOG
+#else
+#define NPFLOG 0
+#endif
+#else
+#include <rtems/freebsd/local/pflog.h>
+#endif
+
+#include <rtems/freebsd/sys/param.h>
+#include <rtems/freebsd/sys/systm.h>
+#include <rtems/freebsd/sys/mbuf.h>
+#include <rtems/freebsd/sys/filio.h>
+#include <rtems/freebsd/sys/fcntl.h>
+#include <rtems/freebsd/sys/socket.h>
+#include <rtems/freebsd/sys/kernel.h>
+#include <rtems/freebsd/sys/time.h>
+#ifndef __FreeBSD__
+#include <rtems/freebsd/sys/pool.h>
+
+#include <rtems/freebsd/dev/rndvar.h>
+#endif
+#include <rtems/freebsd/net/if.h>
+#include <rtems/freebsd/net/if_types.h>
+#include <rtems/freebsd/net/bpf.h>
+#include <rtems/freebsd/net/route.h>
+#include <rtems/freebsd/net/if_pflog.h>
+
+#include <rtems/freebsd/netinet/in.h>
+#include <rtems/freebsd/netinet/in_var.h>
+#include <rtems/freebsd/netinet/in_systm.h>
+#include <rtems/freebsd/netinet/ip.h>
+#include <rtems/freebsd/netinet/ip_var.h>
+#include <rtems/freebsd/netinet/tcp.h>
+#include <rtems/freebsd/netinet/tcp_seq.h>
+#include <rtems/freebsd/netinet/udp.h>
+#include <rtems/freebsd/netinet/ip_icmp.h>
+
+#ifdef INET6
+#include <rtems/freebsd/netinet/ip6.h>
+#endif /* INET6 */
+
+#include <rtems/freebsd/net/pfvar.h>
+
+#ifndef __FreeBSD__
+#include <rtems/freebsd/inttypes.h>
+
+struct pf_frent {
+ LIST_ENTRY(pf_frent) fr_next;
+ struct ip *fr_ip;
+ struct mbuf *fr_m;
+};
+
+struct pf_frcache {
+ LIST_ENTRY(pf_frcache) fr_next;
+ uint16_t fr_off;
+ uint16_t fr_end;
+};
+#endif
+
+#define PFFRAG_SEENLAST 0x0001 /* Seen the last fragment for this */
+#define PFFRAG_NOBUFFER 0x0002 /* Non-buffering fragment cache */
+#define PFFRAG_DROP 0x0004 /* Drop all fragments */
+#define BUFFER_FRAGMENTS(fr) (!((fr)->fr_flags & PFFRAG_NOBUFFER))
+
+#ifndef __FreeBSD__
+struct pf_fragment {
+ RB_ENTRY(pf_fragment) fr_entry;
+ TAILQ_ENTRY(pf_fragment) frag_next;
+ struct in_addr fr_src;
+ struct in_addr fr_dst;
+ u_int8_t fr_p; /* protocol of this fragment */
+ u_int8_t fr_flags; /* status flags */
+ u_int16_t fr_id; /* fragment id for reassemble */
+ u_int16_t fr_max; /* fragment data max */
+ u_int32_t fr_timeout;
+#define fr_queue fr_u.fru_queue
+#define fr_cache fr_u.fru_cache
+ union {
+ LIST_HEAD(pf_fragq, pf_frent) fru_queue; /* buffering */
+ LIST_HEAD(pf_cacheq, pf_frcache) fru_cache; /* non-buf */
+ } fr_u;
+};
+#endif
+
+TAILQ_HEAD(pf_fragqueue, pf_fragment) pf_fragqueue;
+TAILQ_HEAD(pf_cachequeue, pf_fragment) pf_cachequeue;
+
+#ifndef __FreeBSD__
+static __inline int pf_frag_compare(struct pf_fragment *,
+ struct pf_fragment *);
+#else
+static int pf_frag_compare(struct pf_fragment *,
+ struct pf_fragment *);
+#endif
+RB_HEAD(pf_frag_tree, pf_fragment) pf_frag_tree, pf_cache_tree;
+RB_PROTOTYPE(pf_frag_tree, pf_fragment, fr_entry, pf_frag_compare);
+RB_GENERATE(pf_frag_tree, pf_fragment, fr_entry, pf_frag_compare);
+
+/* Private prototypes */
+void pf_ip2key(struct pf_fragment *, struct ip *);
+void pf_remove_fragment(struct pf_fragment *);
+void pf_flush_fragments(void);
+void pf_free_fragment(struct pf_fragment *);
+struct pf_fragment *pf_find_fragment(struct ip *, struct pf_frag_tree *);
+struct mbuf *pf_reassemble(struct mbuf **, struct pf_fragment **,
+ struct pf_frent *, int);
+struct mbuf *pf_fragcache(struct mbuf **, struct ip*,
+ struct pf_fragment **, int, int, int *);
+int pf_normalize_tcpopt(struct pf_rule *, struct mbuf *,
+ struct tcphdr *, int);
+
+#define DPFPRINTF(x) do { \
+ if (pf_status.debug >= PF_DEBUG_MISC) { \
+ printf("%s: ", __func__); \
+ printf x ; \
+ } \
+} while(0)
+
+/* Globals */
+#ifdef __FreeBSD__
+uma_zone_t pf_frent_pl, pf_frag_pl, pf_cache_pl, pf_cent_pl;
+uma_zone_t pf_state_scrub_pl;
+#else
+struct pool pf_frent_pl, pf_frag_pl, pf_cache_pl, pf_cent_pl;
+struct pool pf_state_scrub_pl;
+#endif
+int pf_nfrents, pf_ncache;
+
+void
+pf_normalize_init(void)
+{
+#ifdef __FreeBSD__
+ /*
+ * XXX
+ * No high water mark support(It's hint not hard limit).
+ * uma_zone_set_max(pf_frag_pl, PFFRAG_FRAG_HIWAT);
+ */
+ uma_zone_set_max(pf_frent_pl, PFFRAG_FRENT_HIWAT);
+ uma_zone_set_max(pf_cache_pl, PFFRAG_FRCACHE_HIWAT);
+ uma_zone_set_max(pf_cent_pl, PFFRAG_FRCENT_HIWAT);
+#else
+ pool_init(&pf_frent_pl, sizeof(struct pf_frent), 0, 0, 0, "pffrent",
+ NULL);
+ pool_init(&pf_frag_pl, sizeof(struct pf_fragment), 0, 0, 0, "pffrag",
+ NULL);
+ pool_init(&pf_cache_pl, sizeof(struct pf_fragment), 0, 0, 0,
+ "pffrcache", NULL);
+ pool_init(&pf_cent_pl, sizeof(struct pf_frcache), 0, 0, 0, "pffrcent",
+ NULL);
+ pool_init(&pf_state_scrub_pl, sizeof(struct pf_state_scrub), 0, 0, 0,
+ "pfstscr", NULL);
+
+ pool_sethiwat(&pf_frag_pl, PFFRAG_FRAG_HIWAT);
+ pool_sethardlimit(&pf_frent_pl, PFFRAG_FRENT_HIWAT, NULL, 0);
+ pool_sethardlimit(&pf_cache_pl, PFFRAG_FRCACHE_HIWAT, NULL, 0);
+ pool_sethardlimit(&pf_cent_pl, PFFRAG_FRCENT_HIWAT, NULL, 0);
+#endif
+
+ TAILQ_INIT(&pf_fragqueue);
+ TAILQ_INIT(&pf_cachequeue);
+}
+
+#ifdef __FreeBSD__
+static int
+#else
+static __inline int
+#endif
+pf_frag_compare(struct pf_fragment *a, struct pf_fragment *b)
+{
+ int diff;
+
+ if ((diff = a->fr_id - b->fr_id))
+ return (diff);
+ else if ((diff = a->fr_p - b->fr_p))
+ return (diff);
+ else if (a->fr_src.s_addr < b->fr_src.s_addr)
+ return (-1);
+ else if (a->fr_src.s_addr > b->fr_src.s_addr)
+ return (1);
+ else if (a->fr_dst.s_addr < b->fr_dst.s_addr)
+ return (-1);
+ else if (a->fr_dst.s_addr > b->fr_dst.s_addr)
+ return (1);
+ return (0);
+}
+
+void
+pf_purge_expired_fragments(void)
+{
+ struct pf_fragment *frag;
+ u_int32_t expire = time_second -
+ pf_default_rule.timeout[PFTM_FRAG];
+
+ while ((frag = TAILQ_LAST(&pf_fragqueue, pf_fragqueue)) != NULL) {
+#ifdef __FreeBSD__
+ KASSERT((BUFFER_FRAGMENTS(frag)),
+ ("BUFFER_FRAGMENTS(frag) == 0: %s", __FUNCTION__));
+#else
+ KASSERT(BUFFER_FRAGMENTS(frag));
+#endif
+ if (frag->fr_timeout > expire)
+ break;
+
+ DPFPRINTF(("expiring %d(%p)\n", frag->fr_id, frag));
+ pf_free_fragment(frag);
+ }
+
+ while ((frag = TAILQ_LAST(&pf_cachequeue, pf_cachequeue)) != NULL) {
+#ifdef __FreeBSD__
+ KASSERT((!BUFFER_FRAGMENTS(frag)),
+ ("BUFFER_FRAGMENTS(frag) != 0: %s", __FUNCTION__));
+#else
+ KASSERT(!BUFFER_FRAGMENTS(frag));
+#endif
+ if (frag->fr_timeout > expire)
+ break;
+
+ DPFPRINTF(("expiring %d(%p)\n", frag->fr_id, frag));
+ pf_free_fragment(frag);
+#ifdef __FreeBSD__
+ KASSERT((TAILQ_EMPTY(&pf_cachequeue) ||
+ TAILQ_LAST(&pf_cachequeue, pf_cachequeue) != frag),
+ ("!(TAILQ_EMPTY() || TAILQ_LAST() == farg): %s",
+ __FUNCTION__));
+#else
+ KASSERT(TAILQ_EMPTY(&pf_cachequeue) ||
+ TAILQ_LAST(&pf_cachequeue, pf_cachequeue) != frag);
+#endif
+ }
+}
+
+/*
+ * Try to flush old fragments to make space for new ones
+ */
+
+void
+pf_flush_fragments(void)
+{
+ struct pf_fragment *frag;
+ int goal;
+
+ goal = pf_nfrents * 9 / 10;
+ DPFPRINTF(("trying to free > %d frents\n",
+ pf_nfrents - goal));
+ while (goal < pf_nfrents) {
+ frag = TAILQ_LAST(&pf_fragqueue, pf_fragqueue);
+ if (frag == NULL)
+ break;
+ pf_free_fragment(frag);
+ }
+
+
+ goal = pf_ncache * 9 / 10;
+ DPFPRINTF(("trying to free > %d cache entries\n",
+ pf_ncache - goal));
+ while (goal < pf_ncache) {
+ frag = TAILQ_LAST(&pf_cachequeue, pf_cachequeue);
+ if (frag == NULL)
+ break;
+ pf_free_fragment(frag);
+ }
+}
+
+/* Frees the fragments and all associated entries */
+
+void
+pf_free_fragment(struct pf_fragment *frag)
+{
+ struct pf_frent *frent;
+ struct pf_frcache *frcache;
+
+ /* Free all fragments */
+ if (BUFFER_FRAGMENTS(frag)) {
+ for (frent = LIST_FIRST(&frag->fr_queue); frent;
+ frent = LIST_FIRST(&frag->fr_queue)) {
+ LIST_REMOVE(frent, fr_next);
+
+ m_freem(frent->fr_m);
+ pool_put(&pf_frent_pl, frent);
+ pf_nfrents--;
+ }
+ } else {
+ for (frcache = LIST_FIRST(&frag->fr_cache); frcache;
+ frcache = LIST_FIRST(&frag->fr_cache)) {
+ LIST_REMOVE(frcache, fr_next);
+
+#ifdef __FreeBSD__
+ KASSERT((LIST_EMPTY(&frag->fr_cache) ||
+ LIST_FIRST(&frag->fr_cache)->fr_off >
+ frcache->fr_end),
+ ("! (LIST_EMPTY() || LIST_FIRST()->fr_off >"
+ " frcache->fr_end): %s", __FUNCTION__));
+#else
+ KASSERT(LIST_EMPTY(&frag->fr_cache) ||
+ LIST_FIRST(&frag->fr_cache)->fr_off >
+ frcache->fr_end);
+#endif
+
+ pool_put(&pf_cent_pl, frcache);
+ pf_ncache--;
+ }
+ }
+
+ pf_remove_fragment(frag);
+}
+
+void
+pf_ip2key(struct pf_fragment *key, struct ip *ip)
+{
+ key->fr_p = ip->ip_p;
+ key->fr_id = ip->ip_id;
+ key->fr_src.s_addr = ip->ip_src.s_addr;
+ key->fr_dst.s_addr = ip->ip_dst.s_addr;
+}
+
+struct pf_fragment *
+pf_find_fragment(struct ip *ip, struct pf_frag_tree *tree)
+{
+ struct pf_fragment key;
+ struct pf_fragment *frag;
+
+ pf_ip2key(&key, ip);
+
+ frag = RB_FIND(pf_frag_tree, tree, &key);
+ if (frag != NULL) {
+ /* XXX Are we sure we want to update the timeout? */
+ frag->fr_timeout = time_second;
+ if (BUFFER_FRAGMENTS(frag)) {
+ TAILQ_REMOVE(&pf_fragqueue, frag, frag_next);
+ TAILQ_INSERT_HEAD(&pf_fragqueue, frag, frag_next);
+ } else {
+ TAILQ_REMOVE(&pf_cachequeue, frag, frag_next);
+ TAILQ_INSERT_HEAD(&pf_cachequeue, frag, frag_next);
+ }
+ }
+
+ return (frag);
+}
+
+/* Removes a fragment from the fragment queue and frees the fragment */
+
+void
+pf_remove_fragment(struct pf_fragment *frag)
+{
+ if (BUFFER_FRAGMENTS(frag)) {
+ RB_REMOVE(pf_frag_tree, &pf_frag_tree, frag);
+ TAILQ_REMOVE(&pf_fragqueue, frag, frag_next);
+ pool_put(&pf_frag_pl, frag);
+ } else {
+ RB_REMOVE(pf_frag_tree, &pf_cache_tree, frag);
+ TAILQ_REMOVE(&pf_cachequeue, frag, frag_next);
+ pool_put(&pf_cache_pl, frag);
+ }
+}
+
+#define FR_IP_OFF(fr) ((ntohs((fr)->fr_ip->ip_off) & IP_OFFMASK) << 3)
+struct mbuf *
+pf_reassemble(struct mbuf **m0, struct pf_fragment **frag,
+ struct pf_frent *frent, int mff)
+{
+ struct mbuf *m = *m0, *m2;
+ struct pf_frent *frea, *next;
+ struct pf_frent *frep = NULL;
+ struct ip *ip = frent->fr_ip;
+ int hlen = ip->ip_hl << 2;
+ u_int16_t off = (ntohs(ip->ip_off) & IP_OFFMASK) << 3;
+ u_int16_t ip_len = ntohs(ip->ip_len) - ip->ip_hl * 4;
+ u_int16_t max = ip_len + off;
+
+#ifdef __FreeBSD__
+ KASSERT((*frag == NULL || BUFFER_FRAGMENTS(*frag)),
+ ("! (*frag == NULL || BUFFER_FRAGMENTS(*frag)): %s", __FUNCTION__));
+#else
+ KASSERT(*frag == NULL || BUFFER_FRAGMENTS(*frag));
+#endif
+
+ /* Strip off ip header */
+ m->m_data += hlen;
+ m->m_len -= hlen;
+
+ /* Create a new reassembly queue for this packet */
+ if (*frag == NULL) {
+ *frag = pool_get(&pf_frag_pl, PR_NOWAIT);
+ if (*frag == NULL) {
+ pf_flush_fragments();
+ *frag = pool_get(&pf_frag_pl, PR_NOWAIT);
+ if (*frag == NULL)
+ goto drop_fragment;
+ }
+
+ (*frag)->fr_flags = 0;
+ (*frag)->fr_max = 0;
+ (*frag)->fr_src = frent->fr_ip->ip_src;
+ (*frag)->fr_dst = frent->fr_ip->ip_dst;
+ (*frag)->fr_p = frent->fr_ip->ip_p;
+ (*frag)->fr_id = frent->fr_ip->ip_id;
+ (*frag)->fr_timeout = time_second;
+ LIST_INIT(&(*frag)->fr_queue);
+
+ RB_INSERT(pf_frag_tree, &pf_frag_tree, *frag);
+ TAILQ_INSERT_HEAD(&pf_fragqueue, *frag, frag_next);
+
+ /* We do not have a previous fragment */
+ frep = NULL;
+ goto insert;
+ }
+
+ /*
+ * Find a fragment after the current one:
+ * - off contains the real shifted offset.
+ */
+ LIST_FOREACH(frea, &(*frag)->fr_queue, fr_next) {
+ if (FR_IP_OFF(frea) > off)
+ break;
+ frep = frea;
+ }
+
+#ifdef __FreeBSD__
+ KASSERT((frep != NULL || frea != NULL),
+ ("!(frep != NULL || frea != NULL): %s", __FUNCTION__));;
+#else
+ KASSERT(frep != NULL || frea != NULL);
+#endif
+
+ if (frep != NULL &&
+ FR_IP_OFF(frep) + ntohs(frep->fr_ip->ip_len) - frep->fr_ip->ip_hl *
+ 4 > off)
+ {
+ u_int16_t precut;
+
+ precut = FR_IP_OFF(frep) + ntohs(frep->fr_ip->ip_len) -
+ frep->fr_ip->ip_hl * 4 - off;
+ if (precut >= ip_len)
+ goto drop_fragment;
+ m_adj(frent->fr_m, precut);
+ DPFPRINTF(("overlap -%d\n", precut));
+ /* Enforce 8 byte boundaries */
+ ip->ip_off = htons(ntohs(ip->ip_off) + (precut >> 3));
+ off = (ntohs(ip->ip_off) & IP_OFFMASK) << 3;
+ ip_len -= precut;
+ ip->ip_len = htons(ip_len);
+ }
+
+ for (; frea != NULL && ip_len + off > FR_IP_OFF(frea);
+ frea = next)
+ {
+ u_int16_t aftercut;
+
+ aftercut = ip_len + off - FR_IP_OFF(frea);
+ DPFPRINTF(("adjust overlap %d\n", aftercut));
+ if (aftercut < ntohs(frea->fr_ip->ip_len) - frea->fr_ip->ip_hl
+ * 4)
+ {
+ frea->fr_ip->ip_len =
+ htons(ntohs(frea->fr_ip->ip_len) - aftercut);
+ frea->fr_ip->ip_off = htons(ntohs(frea->fr_ip->ip_off) +
+ (aftercut >> 3));
+ m_adj(frea->fr_m, aftercut);
+ break;
+ }
+
+ /* This fragment is completely overlapped, lose it */
+ next = LIST_NEXT(frea, fr_next);
+ m_freem(frea->fr_m);
+ LIST_REMOVE(frea, fr_next);
+ pool_put(&pf_frent_pl, frea);
+ pf_nfrents--;
+ }
+
+ insert:
+ /* Update maximum data size */
+ if ((*frag)->fr_max < max)
+ (*frag)->fr_max = max;
+ /* This is the last segment */
+ if (!mff)
+ (*frag)->fr_flags |= PFFRAG_SEENLAST;
+
+ if (frep == NULL)
+ LIST_INSERT_HEAD(&(*frag)->fr_queue, frent, fr_next);
+ else
+ LIST_INSERT_AFTER(frep, frent, fr_next);
+
+ /* Check if we are completely reassembled */
+ if (!((*frag)->fr_flags & PFFRAG_SEENLAST))
+ return (NULL);
+
+ /* Check if we have all the data */
+ off = 0;
+ for (frep = LIST_FIRST(&(*frag)->fr_queue); frep; frep = next) {
+ next = LIST_NEXT(frep, fr_next);
+
+ off += ntohs(frep->fr_ip->ip_len) - frep->fr_ip->ip_hl * 4;
+ if (off < (*frag)->fr_max &&
+ (next == NULL || FR_IP_OFF(next) != off))
+ {
+ DPFPRINTF(("missing fragment at %d, next %d, max %d\n",
+ off, next == NULL ? -1 : FR_IP_OFF(next),
+ (*frag)->fr_max));
+ return (NULL);
+ }
+ }
+ DPFPRINTF(("%d < %d?\n", off, (*frag)->fr_max));
+ if (off < (*frag)->fr_max)
+ return (NULL);
+
+ /* We have all the data */
+ frent = LIST_FIRST(&(*frag)->fr_queue);
+#ifdef __FreeBSD__
+ KASSERT((frent != NULL), ("frent == NULL: %s", __FUNCTION__));
+#else
+ KASSERT(frent != NULL);
+#endif
+ if ((frent->fr_ip->ip_hl << 2) + off > IP_MAXPACKET) {
+ DPFPRINTF(("drop: too big: %d\n", off));
+ pf_free_fragment(*frag);
+ *frag = NULL;
+ return (NULL);
+ }
+ next = LIST_NEXT(frent, fr_next);
+
+ /* Magic from ip_input */
+ ip = frent->fr_ip;
+ m = frent->fr_m;
+ m2 = m->m_next;
+ m->m_next = NULL;
+ m_cat(m, m2);
+ pool_put(&pf_frent_pl, frent);
+ pf_nfrents--;
+ for (frent = next; frent != NULL; frent = next) {
+ next = LIST_NEXT(frent, fr_next);
+
+ m2 = frent->fr_m;
+ pool_put(&pf_frent_pl, frent);
+ pf_nfrents--;
+#ifdef __FreeBSD__
+ m->m_pkthdr.csum_flags &= m2->m_pkthdr.csum_flags;
+ m->m_pkthdr.csum_data += m2->m_pkthdr.csum_data;
+#endif
+ m_cat(m, m2);
+ }
+#ifdef __FreeBSD__
+ while (m->m_pkthdr.csum_data & 0xffff0000)
+ m->m_pkthdr.csum_data = (m->m_pkthdr.csum_data & 0xffff) +
+ (m->m_pkthdr.csum_data >> 16);
+#endif
+
+ ip->ip_src = (*frag)->fr_src;
+ ip->ip_dst = (*frag)->fr_dst;
+
+ /* Remove from fragment queue */
+ pf_remove_fragment(*frag);
+ *frag = NULL;
+
+ hlen = ip->ip_hl << 2;
+ ip->ip_len = htons(off + hlen);
+ m->m_len += hlen;
+ m->m_data -= hlen;
+
+ /* some debugging cruft by sklower, below, will go away soon */
+ /* XXX this should be done elsewhere */
+ if (m->m_flags & M_PKTHDR) {
+ int plen = 0;
+ for (m2 = m; m2; m2 = m2->m_next)
+ plen += m2->m_len;
+ m->m_pkthdr.len = plen;
+ }
+
+ DPFPRINTF(("complete: %p(%d)\n", m, ntohs(ip->ip_len)));
+ return (m);
+
+ drop_fragment:
+ /* Oops - fail safe - drop packet */
+ pool_put(&pf_frent_pl, frent);
+ pf_nfrents--;
+ m_freem(m);
+ return (NULL);
+}
+
+struct mbuf *
+pf_fragcache(struct mbuf **m0, struct ip *h, struct pf_fragment **frag, int mff,
+ int drop, int *nomem)
+{
+ struct mbuf *m = *m0;
+ struct pf_frcache *frp, *fra, *cur = NULL;
+ int ip_len = ntohs(h->ip_len) - (h->ip_hl << 2);
+ u_int16_t off = ntohs(h->ip_off) << 3;
+ u_int16_t max = ip_len + off;
+ int hosed = 0;
+
+#ifdef __FreeBSD__
+ KASSERT((*frag == NULL || !BUFFER_FRAGMENTS(*frag)),
+ ("!(*frag == NULL || !BUFFER_FRAGMENTS(*frag)): %s", __FUNCTION__));
+#else
+ KASSERT(*frag == NULL || !BUFFER_FRAGMENTS(*frag));
+#endif
+
+ /* Create a new range queue for this packet */
+ if (*frag == NULL) {
+ *frag = pool_get(&pf_cache_pl, PR_NOWAIT);
+ if (*frag == NULL) {
+ pf_flush_fragments();
+ *frag = pool_get(&pf_cache_pl, PR_NOWAIT);
+ if (*frag == NULL)
+ goto no_mem;
+ }
+
+ /* Get an entry for the queue */
+ cur = pool_get(&pf_cent_pl, PR_NOWAIT);
+ if (cur == NULL) {
+ pool_put(&pf_cache_pl, *frag);
+ *frag = NULL;
+ goto no_mem;
+ }
+ pf_ncache++;
+
+ (*frag)->fr_flags = PFFRAG_NOBUFFER;
+ (*frag)->fr_max = 0;
+ (*frag)->fr_src = h->ip_src;
+ (*frag)->fr_dst = h->ip_dst;
+ (*frag)->fr_p = h->ip_p;
+ (*frag)->fr_id = h->ip_id;
+ (*frag)->fr_timeout = time_second;
+
+ cur->fr_off = off;
+ cur->fr_end = max;
+ LIST_INIT(&(*frag)->fr_cache);
+ LIST_INSERT_HEAD(&(*frag)->fr_cache, cur, fr_next);
+
+ RB_INSERT(pf_frag_tree, &pf_cache_tree, *frag);
+ TAILQ_INSERT_HEAD(&pf_cachequeue, *frag, frag_next);
+
+ DPFPRINTF(("fragcache[%d]: new %d-%d\n", h->ip_id, off, max));
+
+ goto pass;
+ }
+
+ /*
+ * Find a fragment after the current one:
+ * - off contains the real shifted offset.
+ */
+ frp = NULL;
+ LIST_FOREACH(fra, &(*frag)->fr_cache, fr_next) {
+ if (fra->fr_off > off)
+ break;
+ frp = fra;
+ }
+
+#ifdef __FreeBSD__
+ KASSERT((frp != NULL || fra != NULL),
+ ("!(frp != NULL || fra != NULL): %s", __FUNCTION__));
+#else
+ KASSERT(frp != NULL || fra != NULL);
+#endif
+
+ if (frp != NULL) {
+ int precut;
+
+ precut = frp->fr_end - off;
+ if (precut >= ip_len) {
+ /* Fragment is entirely a duplicate */
+ DPFPRINTF(("fragcache[%d]: dead (%d-%d) %d-%d\n",
+ h->ip_id, frp->fr_off, frp->fr_end, off, max));
+ goto drop_fragment;
+ }
+ if (precut == 0) {
+ /* They are adjacent. Fixup cache entry */
+ DPFPRINTF(("fragcache[%d]: adjacent (%d-%d) %d-%d\n",
+ h->ip_id, frp->fr_off, frp->fr_end, off, max));
+ frp->fr_end = max;
+ } else if (precut > 0) {
+ /* The first part of this payload overlaps with a
+ * fragment that has already been passed.
+ * Need to trim off the first part of the payload.
+ * But to do so easily, we need to create another
+ * mbuf to throw the original header into.
+ */
+
+ DPFPRINTF(("fragcache[%d]: chop %d (%d-%d) %d-%d\n",
+ h->ip_id, precut, frp->fr_off, frp->fr_end, off,
+ max));
+
+ off += precut;
+ max -= precut;
+ /* Update the previous frag to encompass this one */
+ frp->fr_end = max;
+
+ if (!drop) {
+ /* XXX Optimization opportunity
+ * This is a very heavy way to trim the payload.
+ * we could do it much faster by diddling mbuf
+ * internals but that would be even less legible
+ * than this mbuf magic. For my next trick,
+ * I'll pull a rabbit out of my laptop.
+ */
+#ifdef __FreeBSD__
+ *m0 = m_dup(m, M_DONTWAIT);
+#else
+ *m0 = m_copym2(m, 0, h->ip_hl << 2, M_NOWAIT);
+#endif
+ if (*m0 == NULL)
+ goto no_mem;
+#ifdef __FreeBSD__
+ /* From KAME Project : We have missed this! */
+ m_adj(*m0, (h->ip_hl << 2) -
+ (*m0)->m_pkthdr.len);
+
+ KASSERT(((*m0)->m_next == NULL),
+ ("(*m0)->m_next != NULL: %s",
+ __FUNCTION__));
+#else
+ KASSERT((*m0)->m_next == NULL);
+#endif
+ m_adj(m, precut + (h->ip_hl << 2));
+ m_cat(*m0, m);
+ m = *m0;
+ if (m->m_flags & M_PKTHDR) {
+ int plen = 0;
+ struct mbuf *t;
+ for (t = m; t; t = t->m_next)
+ plen += t->m_len;
+ m->m_pkthdr.len = plen;
+ }
+
+
+ h = mtod(m, struct ip *);
+
+#ifdef __FreeBSD__
+ KASSERT(((int)m->m_len ==
+ ntohs(h->ip_len) - precut),
+ ("m->m_len != ntohs(h->ip_len) - precut: %s",
+ __FUNCTION__));
+#else
+ KASSERT((int)m->m_len ==
+ ntohs(h->ip_len) - precut);
+#endif
+ h->ip_off = htons(ntohs(h->ip_off) +
+ (precut >> 3));
+ h->ip_len = htons(ntohs(h->ip_len) - precut);
+ } else {
+ hosed++;
+ }
+ } else {
+ /* There is a gap between fragments */
+
+ DPFPRINTF(("fragcache[%d]: gap %d (%d-%d) %d-%d\n",
+ h->ip_id, -precut, frp->fr_off, frp->fr_end, off,
+ max));
+
+ cur = pool_get(&pf_cent_pl, PR_NOWAIT);
+ if (cur == NULL)
+ goto no_mem;
+ pf_ncache++;
+
+ cur->fr_off = off;
+ cur->fr_end = max;
+ LIST_INSERT_AFTER(frp, cur, fr_next);
+ }
+ }
+
+ if (fra != NULL) {
+ int aftercut;
+ int merge = 0;
+
+ aftercut = max - fra->fr_off;
+ if (aftercut == 0) {
+ /* Adjacent fragments */
+ DPFPRINTF(("fragcache[%d]: adjacent %d-%d (%d-%d)\n",
+ h->ip_id, off, max, fra->fr_off, fra->fr_end));
+ fra->fr_off = off;
+ merge = 1;
+ } else if (aftercut > 0) {
+ /* Need to chop off the tail of this fragment */
+ DPFPRINTF(("fragcache[%d]: chop %d %d-%d (%d-%d)\n",
+ h->ip_id, aftercut, off, max, fra->fr_off,
+ fra->fr_end));
+ fra->fr_off = off;
+ max -= aftercut;
+
+ merge = 1;
+
+ if (!drop) {
+ m_adj(m, -aftercut);
+ if (m->m_flags & M_PKTHDR) {
+ int plen = 0;
+ struct mbuf *t;
+ for (t = m; t; t = t->m_next)
+ plen += t->m_len;
+ m->m_pkthdr.len = plen;
+ }
+ h = mtod(m, struct ip *);
+#ifdef __FreeBSD__
+ KASSERT(((int)m->m_len == ntohs(h->ip_len) - aftercut),
+ ("m->m_len != ntohs(h->ip_len) - aftercut: %s",
+ __FUNCTION__));
+#else
+ KASSERT((int)m->m_len ==
+ ntohs(h->ip_len) - aftercut);
+#endif
+ h->ip_len = htons(ntohs(h->ip_len) - aftercut);
+ } else {
+ hosed++;
+ }
+ } else if (frp == NULL) {
+ /* There is a gap between fragments */
+ DPFPRINTF(("fragcache[%d]: gap %d %d-%d (%d-%d)\n",
+ h->ip_id, -aftercut, off, max, fra->fr_off,
+ fra->fr_end));
+
+ cur = pool_get(&pf_cent_pl, PR_NOWAIT);
+ if (cur == NULL)
+ goto no_mem;
+ pf_ncache++;
+
+ cur->fr_off = off;
+ cur->fr_end = max;
+ LIST_INSERT_BEFORE(fra, cur, fr_next);
+ }
+
+
+ /* Need to glue together two separate fragment descriptors */
+ if (merge) {
+ if (cur && fra->fr_off <= cur->fr_end) {
+ /* Need to merge in a previous 'cur' */
+ DPFPRINTF(("fragcache[%d]: adjacent(merge "
+ "%d-%d) %d-%d (%d-%d)\n",
+ h->ip_id, cur->fr_off, cur->fr_end, off,
+ max, fra->fr_off, fra->fr_end));
+ fra->fr_off = cur->fr_off;
+ LIST_REMOVE(cur, fr_next);
+ pool_put(&pf_cent_pl, cur);
+ pf_ncache--;
+ cur = NULL;
+
+ } else if (frp && fra->fr_off <= frp->fr_end) {
+ /* Need to merge in a modified 'frp' */
+#ifdef __FreeBSD__
+ KASSERT((cur == NULL), ("cur != NULL: %s",
+ __FUNCTION__));
+#else
+ KASSERT(cur == NULL);
+#endif
+ DPFPRINTF(("fragcache[%d]: adjacent(merge "
+ "%d-%d) %d-%d (%d-%d)\n",
+ h->ip_id, frp->fr_off, frp->fr_end, off,
+ max, fra->fr_off, fra->fr_end));
+ fra->fr_off = frp->fr_off;
+ LIST_REMOVE(frp, fr_next);
+ pool_put(&pf_cent_pl, frp);
+ pf_ncache--;
+ frp = NULL;
+
+ }
+ }
+ }
+
+ if (hosed) {
+ /*
+ * We must keep tracking the overall fragment even when
+ * we're going to drop it anyway so that we know when to
+ * free the overall descriptor. Thus we drop the frag late.
+ */
+ goto drop_fragment;
+ }
+
+
+ pass:
+ /* Update maximum data size */
+ if ((*frag)->fr_max < max)
+ (*frag)->fr_max = max;
+
+ /* This is the last segment */
+ if (!mff)
+ (*frag)->fr_flags |= PFFRAG_SEENLAST;
+
+ /* Check if we are completely reassembled */
+ if (((*frag)->fr_flags & PFFRAG_SEENLAST) &&
+ LIST_FIRST(&(*frag)->fr_cache)->fr_off == 0 &&
+ LIST_FIRST(&(*frag)->fr_cache)->fr_end == (*frag)->fr_max) {
+ /* Remove from fragment queue */
+ DPFPRINTF(("fragcache[%d]: done 0-%d\n", h->ip_id,
+ (*frag)->fr_max));
+ pf_free_fragment(*frag);
+ *frag = NULL;
+ }
+
+ return (m);
+
+ no_mem:
+ *nomem = 1;
+
+ /* Still need to pay attention to !IP_MF */
+ if (!mff && *frag != NULL)
+ (*frag)->fr_flags |= PFFRAG_SEENLAST;
+
+ m_freem(m);
+ return (NULL);
+
+ drop_fragment:
+
+ /* Still need to pay attention to !IP_MF */
+ if (!mff && *frag != NULL)
+ (*frag)->fr_flags |= PFFRAG_SEENLAST;
+
+ if (drop) {
+ /* This fragment has been deemed bad. Don't reass */
+ if (((*frag)->fr_flags & PFFRAG_DROP) == 0)
+ DPFPRINTF(("fragcache[%d]: dropping overall fragment\n",
+ h->ip_id));
+ (*frag)->fr_flags |= PFFRAG_DROP;
+ }
+
+ m_freem(m);
+ return (NULL);
+}
+
+int
+pf_normalize_ip(struct mbuf **m0, int dir, struct pfi_kif *kif, u_short *reason,
+ struct pf_pdesc *pd)
+{
+ struct mbuf *m = *m0;
+ struct pf_rule *r;
+ struct pf_frent *frent;
+ struct pf_fragment *frag = NULL;
+ struct ip *h = mtod(m, struct ip *);
+ int mff = (ntohs(h->ip_off) & IP_MF);
+ int hlen = h->ip_hl << 2;
+ u_int16_t fragoff = (ntohs(h->ip_off) & IP_OFFMASK) << 3;
+ u_int16_t max;
+ int ip_len;
+ int ip_off;
+
+ r = TAILQ_FIRST(pf_main_ruleset.rules[PF_RULESET_SCRUB].active.ptr);
+ while (r != NULL) {
+ r->evaluations++;
+ if (pfi_kif_match(r->kif, kif) == r->ifnot)
+ r = r->skip[PF_SKIP_IFP].ptr;
+ else if (r->direction && r->direction != dir)
+ r = r->skip[PF_SKIP_DIR].ptr;
+ else if (r->af && r->af != AF_INET)
+ r = r->skip[PF_SKIP_AF].ptr;
+ else if (r->proto && r->proto != h->ip_p)
+ r = r->skip[PF_SKIP_PROTO].ptr;
+ else if (PF_MISMATCHAW(&r->src.addr,
+ (struct pf_addr *)&h->ip_src.s_addr, AF_INET,
+ r->src.neg, kif))
+ r = r->skip[PF_SKIP_SRC_ADDR].ptr;
+ else if (PF_MISMATCHAW(&r->dst.addr,
+ (struct pf_addr *)&h->ip_dst.s_addr, AF_INET,
+ r->dst.neg, NULL))
+ r = r->skip[PF_SKIP_DST_ADDR].ptr;
+ else
+ break;
+ }
+
+ if (r == NULL || r->action == PF_NOSCRUB)
+ return (PF_PASS);
+ else {
+ r->packets[dir == PF_OUT]++;
+ r->bytes[dir == PF_OUT] += pd->tot_len;
+ }
+
+ /* Check for illegal packets */
+ if (hlen < (int)sizeof(struct ip))
+ goto drop;
+
+ if (hlen > ntohs(h->ip_len))
+ goto drop;
+
+ /* Clear IP_DF if the rule uses the no-df option */
+ if (r->rule_flag & PFRULE_NODF && h->ip_off & htons(IP_DF)) {
+ u_int16_t ip_off = h->ip_off;
+
+ h->ip_off &= htons(~IP_DF);
+ h->ip_sum = pf_cksum_fixup(h->ip_sum, ip_off, h->ip_off, 0);
+ }
+
+ /* We will need other tests here */
+ if (!fragoff && !mff)
+ goto no_fragment;
+
+ /* We're dealing with a fragment now. Don't allow fragments
+ * with IP_DF to enter the cache. If the flag was cleared by
+ * no-df above, fine. Otherwise drop it.
+ */
+ if (h->ip_off & htons(IP_DF)) {
+ DPFPRINTF(("IP_DF\n"));
+ goto bad;
+ }
+
+ ip_len = ntohs(h->ip_len) - hlen;
+ ip_off = (ntohs(h->ip_off) & IP_OFFMASK) << 3;
+
+ /* All fragments are 8 byte aligned */
+ if (mff && (ip_len & 0x7)) {
+ DPFPRINTF(("mff and %d\n", ip_len));
+ goto bad;
+ }
+
+ /* Respect maximum length */
+ if (fragoff + ip_len > IP_MAXPACKET) {
+ DPFPRINTF(("max packet %d\n", fragoff + ip_len));
+ goto bad;
+ }
+ max = fragoff + ip_len;
+
+ if ((r->rule_flag & (PFRULE_FRAGCROP|PFRULE_FRAGDROP)) == 0) {
+ /* Fully buffer all of the fragments */
+
+ frag = pf_find_fragment(h, &pf_frag_tree);
+
+ /* Check if we saw the last fragment already */
+ if (frag != NULL && (frag->fr_flags & PFFRAG_SEENLAST) &&
+ max > frag->fr_max)
+ goto bad;
+
+ /* Get an entry for the fragment queue */
+ frent = pool_get(&pf_frent_pl, PR_NOWAIT);
+ if (frent == NULL) {
+ REASON_SET(reason, PFRES_MEMORY);
+ return (PF_DROP);
+ }
+ pf_nfrents++;
+ frent->fr_ip = h;
+ frent->fr_m = m;
+
+ /* Might return a completely reassembled mbuf, or NULL */
+ DPFPRINTF(("reass frag %d @ %d-%d\n", h->ip_id, fragoff, max));
+ *m0 = m = pf_reassemble(m0, &frag, frent, mff);
+
+ if (m == NULL)
+ return (PF_DROP);
+
+ /* use mtag from concatenated mbuf chain */
+ pd->pf_mtag = pf_find_mtag(m);
+#ifdef DIAGNOSTIC
+ if (pd->pf_mtag == NULL) {
+ printf("%s: pf_find_mtag returned NULL(1)\n", __func__);
+ if ((pd->pf_mtag = pf_get_mtag(m)) == NULL) {
+ m_freem(m);
+ *m0 = NULL;
+ goto no_mem;
+ }
+ }
+#endif
+ if (frag != NULL && (frag->fr_flags & PFFRAG_DROP))
+ goto drop;
+
+ h = mtod(m, struct ip *);
+ } else {
+ /* non-buffering fragment cache (drops or masks overlaps) */
+ int nomem = 0;
+
+ if (dir == PF_OUT && pd->pf_mtag->flags & PF_TAG_FRAGCACHE) {
+ /*
+ * Already passed the fragment cache in the
+ * input direction. If we continued, it would
+ * appear to be a dup and would be dropped.
+ */
+ goto fragment_pass;
+ }
+
+ frag = pf_find_fragment(h, &pf_cache_tree);
+
+ /* Check if we saw the last fragment already */
+ if (frag != NULL && (frag->fr_flags & PFFRAG_SEENLAST) &&
+ max > frag->fr_max) {
+ if (r->rule_flag & PFRULE_FRAGDROP)
+ frag->fr_flags |= PFFRAG_DROP;
+ goto bad;
+ }
+
+ *m0 = m = pf_fragcache(m0, h, &frag, mff,
+ (r->rule_flag & PFRULE_FRAGDROP) ? 1 : 0, &nomem);
+ if (m == NULL) {
+ if (nomem)
+ goto no_mem;
+ goto drop;
+ }
+
+ /* use mtag from copied and trimmed mbuf chain */
+ pd->pf_mtag = pf_find_mtag(m);
+#ifdef DIAGNOSTIC
+ if (pd->pf_mtag == NULL) {
+ printf("%s: pf_find_mtag returned NULL(2)\n", __func__);
+ if ((pd->pf_mtag = pf_get_mtag(m)) == NULL) {
+ m_freem(m);
+ *m0 = NULL;
+ goto no_mem;
+ }
+ }
+#endif
+ if (dir == PF_IN)
+ pd->pf_mtag->flags |= PF_TAG_FRAGCACHE;
+
+ if (frag != NULL && (frag->fr_flags & PFFRAG_DROP))
+ goto drop;
+ goto fragment_pass;
+ }
+
+ no_fragment:
+ /* At this point, only IP_DF is allowed in ip_off */
+ if (h->ip_off & ~htons(IP_DF)) {
+ u_int16_t ip_off = h->ip_off;
+
+ h->ip_off &= htons(IP_DF);
+ h->ip_sum = pf_cksum_fixup(h->ip_sum, ip_off, h->ip_off, 0);
+ }
+
+ /* Enforce a minimum ttl, may cause endless packet loops */
+ if (r->min_ttl && h->ip_ttl < r->min_ttl) {
+ u_int16_t ip_ttl = h->ip_ttl;
+
+ h->ip_ttl = r->min_ttl;
+ h->ip_sum = pf_cksum_fixup(h->ip_sum, ip_ttl, h->ip_ttl, 0);
+ }
+
+ if (r->rule_flag & PFRULE_RANDOMID) {
+ u_int16_t ip_id = h->ip_id;
+
+ h->ip_id = ip_randomid();
+ h->ip_sum = pf_cksum_fixup(h->ip_sum, ip_id, h->ip_id, 0);
+ }
+ if ((r->rule_flag & (PFRULE_FRAGCROP|PFRULE_FRAGDROP)) == 0)
+ pd->flags |= PFDESC_IP_REAS;
+
+ return (PF_PASS);
+
+ fragment_pass:
+ /* Enforce a minimum ttl, may cause endless packet loops */
+ if (r->min_ttl && h->ip_ttl < r->min_ttl) {
+ u_int16_t ip_ttl = h->ip_ttl;
+
+ h->ip_ttl = r->min_ttl;
+ h->ip_sum = pf_cksum_fixup(h->ip_sum, ip_ttl, h->ip_ttl, 0);
+ }
+ if ((r->rule_flag & (PFRULE_FRAGCROP|PFRULE_FRAGDROP)) == 0)
+ pd->flags |= PFDESC_IP_REAS;
+ return (PF_PASS);
+
+ no_mem:
+ REASON_SET(reason, PFRES_MEMORY);
+ if (r != NULL && r->log)
+ PFLOG_PACKET(kif, h, m, AF_INET, dir, *reason, r, NULL, NULL, pd);
+ return (PF_DROP);
+
+ drop:
+ REASON_SET(reason, PFRES_NORM);
+ if (r != NULL && r->log)
+ PFLOG_PACKET(kif, h, m, AF_INET, dir, *reason, r, NULL, NULL, pd);
+ return (PF_DROP);
+
+ bad:
+ DPFPRINTF(("dropping bad fragment\n"));
+
+ /* Free associated fragments */
+ if (frag != NULL)
+ pf_free_fragment(frag);
+
+ REASON_SET(reason, PFRES_FRAG);
+ if (r != NULL && r->log)
+ PFLOG_PACKET(kif, h, m, AF_INET, dir, *reason, r, NULL, NULL, pd);
+
+ return (PF_DROP);
+}
+
+#ifdef INET6
+int
+pf_normalize_ip6(struct mbuf **m0, int dir, struct pfi_kif *kif,
+ u_short *reason, struct pf_pdesc *pd)
+{
+ struct mbuf *m = *m0;
+ struct pf_rule *r;
+ struct ip6_hdr *h = mtod(m, struct ip6_hdr *);
+ int off;
+ struct ip6_ext ext;
+ struct ip6_opt opt;
+ struct ip6_opt_jumbo jumbo;
+ struct ip6_frag frag;
+ u_int32_t jumbolen = 0, plen;
+ u_int16_t fragoff = 0;
+ int optend;
+ int ooff;
+ u_int8_t proto;
+ int terminal;
+
+ r = TAILQ_FIRST(pf_main_ruleset.rules[PF_RULESET_SCRUB].active.ptr);
+ while (r != NULL) {
+ r->evaluations++;
+ if (pfi_kif_match(r->kif, kif) == r->ifnot)
+ r = r->skip[PF_SKIP_IFP].ptr;
+ else if (r->direction && r->direction != dir)
+ r = r->skip[PF_SKIP_DIR].ptr;
+ else if (r->af && r->af != AF_INET6)
+ r = r->skip[PF_SKIP_AF].ptr;
+#if 0 /* header chain! */
+ else if (r->proto && r->proto != h->ip6_nxt)
+ r = r->skip[PF_SKIP_PROTO].ptr;
+#endif
+ else if (PF_MISMATCHAW(&r->src.addr,
+ (struct pf_addr *)&h->ip6_src, AF_INET6,
+ r->src.neg, kif))
+ r = r->skip[PF_SKIP_SRC_ADDR].ptr;
+ else if (PF_MISMATCHAW(&r->dst.addr,
+ (struct pf_addr *)&h->ip6_dst, AF_INET6,
+ r->dst.neg, NULL))
+ r = r->skip[PF_SKIP_DST_ADDR].ptr;
+ else
+ break;
+ }
+
+ if (r == NULL || r->action == PF_NOSCRUB)
+ return (PF_PASS);
+ else {
+ r->packets[dir == PF_OUT]++;
+ r->bytes[dir == PF_OUT] += pd->tot_len;
+ }
+
+ /* Check for illegal packets */
+ if (sizeof(struct ip6_hdr) + IPV6_MAXPACKET < m->m_pkthdr.len)
+ goto drop;
+
+ off = sizeof(struct ip6_hdr);
+ proto = h->ip6_nxt;
+ terminal = 0;
+ do {
+ switch (proto) {
+ case IPPROTO_FRAGMENT:
+ goto fragment;
+ break;
+ case IPPROTO_AH:
+ case IPPROTO_ROUTING:
+ case IPPROTO_DSTOPTS:
+ if (!pf_pull_hdr(m, off, &ext, sizeof(ext), NULL,
+ NULL, AF_INET6))
+ goto shortpkt;
+ if (proto == IPPROTO_AH)
+ off += (ext.ip6e_len + 2) * 4;
+ else
+ off += (ext.ip6e_len + 1) * 8;
+ proto = ext.ip6e_nxt;
+ break;
+ case IPPROTO_HOPOPTS:
+ if (!pf_pull_hdr(m, off, &ext, sizeof(ext), NULL,
+ NULL, AF_INET6))
+ goto shortpkt;
+ optend = off + (ext.ip6e_len + 1) * 8;
+ ooff = off + sizeof(ext);
+ do {
+ if (!pf_pull_hdr(m, ooff, &opt.ip6o_type,
+ sizeof(opt.ip6o_type), NULL, NULL,
+ AF_INET6))
+ goto shortpkt;
+ if (opt.ip6o_type == IP6OPT_PAD1) {
+ ooff++;
+ continue;
+ }
+ if (!pf_pull_hdr(m, ooff, &opt, sizeof(opt),
+ NULL, NULL, AF_INET6))
+ goto shortpkt;
+ if (ooff + sizeof(opt) + opt.ip6o_len > optend)
+ goto drop;
+ switch (opt.ip6o_type) {
+ case IP6OPT_JUMBO:
+ if (h->ip6_plen != 0)
+ goto drop;
+ if (!pf_pull_hdr(m, ooff, &jumbo,
+ sizeof(jumbo), NULL, NULL,
+ AF_INET6))
+ goto shortpkt;
+ memcpy(&jumbolen, jumbo.ip6oj_jumbo_len,
+ sizeof(jumbolen));
+ jumbolen = ntohl(jumbolen);
+ if (jumbolen <= IPV6_MAXPACKET)
+ goto drop;
+ if (sizeof(struct ip6_hdr) + jumbolen !=
+ m->m_pkthdr.len)
+ goto drop;
+ break;
+ default:
+ break;
+ }
+ ooff += sizeof(opt) + opt.ip6o_len;
+ } while (ooff < optend);
+
+ off = optend;
+ proto = ext.ip6e_nxt;
+ break;
+ default:
+ terminal = 1;
+ break;
+ }
+ } while (!terminal);
+
+ /* jumbo payload option must be present, or plen > 0 */
+ if (ntohs(h->ip6_plen) == 0)
+ plen = jumbolen;
+ else
+ plen = ntohs(h->ip6_plen);
+ if (plen == 0)
+ goto drop;
+ if (sizeof(struct ip6_hdr) + plen > m->m_pkthdr.len)
+ goto shortpkt;
+
+ /* Enforce a minimum ttl, may cause endless packet loops */
+ if (r->min_ttl && h->ip6_hlim < r->min_ttl)
+ h->ip6_hlim = r->min_ttl;
+
+ return (PF_PASS);
+
+ fragment:
+ if (ntohs(h->ip6_plen) == 0 || jumbolen)
+ goto drop;
+ plen = ntohs(h->ip6_plen);
+
+ if (!pf_pull_hdr(m, off, &frag, sizeof(frag), NULL, NULL, AF_INET6))
+ goto shortpkt;
+ fragoff = ntohs(frag.ip6f_offlg & IP6F_OFF_MASK);
+ if (fragoff + (plen - off - sizeof(frag)) > IPV6_MAXPACKET)
+ goto badfrag;
+
+ /* do something about it */
+ /* remember to set pd->flags |= PFDESC_IP_REAS */
+ return (PF_PASS);
+
+ shortpkt:
+ REASON_SET(reason, PFRES_SHORT);
+ if (r != NULL && r->log)
+ PFLOG_PACKET(kif, h, m, AF_INET6, dir, *reason, r, NULL, NULL, pd);
+ return (PF_DROP);
+
+ drop:
+ REASON_SET(reason, PFRES_NORM);
+ if (r != NULL && r->log)
+ PFLOG_PACKET(kif, h, m, AF_INET6, dir, *reason, r, NULL, NULL, pd);
+ return (PF_DROP);
+
+ badfrag:
+ REASON_SET(reason, PFRES_FRAG);
+ if (r != NULL && r->log)
+ PFLOG_PACKET(kif, h, m, AF_INET6, dir, *reason, r, NULL, NULL, pd);
+ return (PF_DROP);
+}
+#endif /* INET6 */
+
+int
+pf_normalize_tcp(int dir, struct pfi_kif *kif, struct mbuf *m, int ipoff,
+ int off, void *h, struct pf_pdesc *pd)
+{
+ struct pf_rule *r, *rm = NULL;
+ struct tcphdr *th = pd->hdr.tcp;
+ int rewrite = 0;
+ u_short reason;
+ u_int8_t flags;
+ sa_family_t af = pd->af;
+
+ r = TAILQ_FIRST(pf_main_ruleset.rules[PF_RULESET_SCRUB].active.ptr);
+ while (r != NULL) {
+ r->evaluations++;
+ if (pfi_kif_match(r->kif, kif) == r->ifnot)
+ r = r->skip[PF_SKIP_IFP].ptr;
+ else if (r->direction && r->direction != dir)
+ r = r->skip[PF_SKIP_DIR].ptr;
+ else if (r->af && r->af != af)
+ r = r->skip[PF_SKIP_AF].ptr;
+ else if (r->proto && r->proto != pd->proto)
+ r = r->skip[PF_SKIP_PROTO].ptr;
+ else if (PF_MISMATCHAW(&r->src.addr, pd->src, af,
+ r->src.neg, kif))
+ r = r->skip[PF_SKIP_SRC_ADDR].ptr;
+ else if (r->src.port_op && !pf_match_port(r->src.port_op,
+ r->src.port[0], r->src.port[1], th->th_sport))
+ r = r->skip[PF_SKIP_SRC_PORT].ptr;
+ else if (PF_MISMATCHAW(&r->dst.addr, pd->dst, af,
+ r->dst.neg, NULL))
+ r = r->skip[PF_SKIP_DST_ADDR].ptr;
+ else if (r->dst.port_op && !pf_match_port(r->dst.port_op,
+ r->dst.port[0], r->dst.port[1], th->th_dport))
+ r = r->skip[PF_SKIP_DST_PORT].ptr;
+ else if (r->os_fingerprint != PF_OSFP_ANY && !pf_osfp_match(
+ pf_osfp_fingerprint(pd, m, off, th),
+ r->os_fingerprint))
+ r = TAILQ_NEXT(r, entries);
+ else {
+ rm = r;
+ break;
+ }
+ }
+
+ if (rm == NULL || rm->action == PF_NOSCRUB)
+ return (PF_PASS);
+ else {
+ r->packets[dir == PF_OUT]++;
+ r->bytes[dir == PF_OUT] += pd->tot_len;
+ }
+
+ if (rm->rule_flag & PFRULE_REASSEMBLE_TCP)
+ pd->flags |= PFDESC_TCP_NORM;
+
+ flags = th->th_flags;
+ if (flags & TH_SYN) {
+ /* Illegal packet */
+ if (flags & TH_RST)
+ goto tcp_drop;
+
+ if (flags & TH_FIN)
+ flags &= ~TH_FIN;
+ } else {
+ /* Illegal packet */
+ if (!(flags & (TH_ACK|TH_RST)))
+ goto tcp_drop;
+ }
+
+ if (!(flags & TH_ACK)) {
+ /* These flags are only valid if ACK is set */
+ if ((flags & TH_FIN) || (flags & TH_PUSH) || (flags & TH_URG))
+ goto tcp_drop;
+ }
+
+ /* Check for illegal header length */
+ if (th->th_off < (sizeof(struct tcphdr) >> 2))
+ goto tcp_drop;
+
+ /* If flags changed, or reserved data set, then adjust */
+ if (flags != th->th_flags || th->th_x2 != 0) {
+ u_int16_t ov, nv;
+
+ ov = *(u_int16_t *)(&th->th_ack + 1);
+ th->th_flags = flags;
+ th->th_x2 = 0;
+ nv = *(u_int16_t *)(&th->th_ack + 1);
+
+ th->th_sum = pf_cksum_fixup(th->th_sum, ov, nv, 0);
+ rewrite = 1;
+ }
+
+ /* Remove urgent pointer, if TH_URG is not set */
+ if (!(flags & TH_URG) && th->th_urp) {
+ th->th_sum = pf_cksum_fixup(th->th_sum, th->th_urp, 0, 0);
+ th->th_urp = 0;
+ rewrite = 1;
+ }
+
+ /* Process options */
+ if (r->max_mss && pf_normalize_tcpopt(r, m, th, off))
+ rewrite = 1;
+
+ /* copy back packet headers if we sanitized */
+ if (rewrite)
+ m_copyback(m, off, sizeof(*th), (caddr_t)th);
+
+ return (PF_PASS);
+
+ tcp_drop:
+ REASON_SET(&reason, PFRES_NORM);
+ if (rm != NULL && r->log)
+ PFLOG_PACKET(kif, h, m, AF_INET, dir, reason, r, NULL, NULL, pd);
+ return (PF_DROP);
+}
+
+int
+pf_normalize_tcp_init(struct mbuf *m, int off, struct pf_pdesc *pd,
+ struct tcphdr *th, struct pf_state_peer *src, struct pf_state_peer *dst)
+{
+ u_int32_t tsval, tsecr;
+ u_int8_t hdr[60];
+ u_int8_t *opt;
+
+#ifdef __FreeBSD__
+ KASSERT((src->scrub == NULL),
+ ("pf_normalize_tcp_init: src->scrub != NULL"));
+#else
+ KASSERT(src->scrub == NULL);
+#endif
+
+ src->scrub = pool_get(&pf_state_scrub_pl, PR_NOWAIT);
+ if (src->scrub == NULL)
+ return (1);
+ bzero(src->scrub, sizeof(*src->scrub));
+
+ switch (pd->af) {
+#ifdef INET
+ case AF_INET: {
+ struct ip *h = mtod(m, struct ip *);
+ src->scrub->pfss_ttl = h->ip_ttl;
+ break;
+ }
+#endif /* INET */
+#ifdef INET6
+ case AF_INET6: {
+ struct ip6_hdr *h = mtod(m, struct ip6_hdr *);
+ src->scrub->pfss_ttl = h->ip6_hlim;
+ break;
+ }
+#endif /* INET6 */
+ }
+
+
+ /*
+ * All normalizations below are only begun if we see the start of
+ * the connections. They must all set an enabled bit in pfss_flags
+ */
+ if ((th->th_flags & TH_SYN) == 0)
+ return (0);
+
+
+ if (th->th_off > (sizeof(struct tcphdr) >> 2) && src->scrub &&
+ pf_pull_hdr(m, off, hdr, th->th_off << 2, NULL, NULL, pd->af)) {
+ /* Diddle with TCP options */
+ int hlen;
+ opt = hdr + sizeof(struct tcphdr);
+ hlen = (th->th_off << 2) - sizeof(struct tcphdr);
+ while (hlen >= TCPOLEN_TIMESTAMP) {
+ switch (*opt) {
+ case TCPOPT_EOL: /* FALLTHROUGH */
+ case TCPOPT_NOP:
+ opt++;
+ hlen--;
+ break;
+ case TCPOPT_TIMESTAMP:
+ if (opt[1] >= TCPOLEN_TIMESTAMP) {
+ src->scrub->pfss_flags |=
+ PFSS_TIMESTAMP;
+ src->scrub->pfss_ts_mod =
+ htonl(arc4random());
+
+ /* note PFSS_PAWS not set yet */
+ memcpy(&tsval, &opt[2],
+ sizeof(u_int32_t));
+ memcpy(&tsecr, &opt[6],
+ sizeof(u_int32_t));
+ src->scrub->pfss_tsval0 = ntohl(tsval);
+ src->scrub->pfss_tsval = ntohl(tsval);
+ src->scrub->pfss_tsecr = ntohl(tsecr);
+ getmicrouptime(&src->scrub->pfss_last);
+ }
+ /* FALLTHROUGH */
+ default:
+ hlen -= MAX(opt[1], 2);
+ opt += MAX(opt[1], 2);
+ break;
+ }
+ }
+ }
+
+ return (0);
+}
+
+void
+pf_normalize_tcp_cleanup(struct pf_state *state)
+{
+ if (state->src.scrub)
+ pool_put(&pf_state_scrub_pl, state->src.scrub);
+ if (state->dst.scrub)
+ pool_put(&pf_state_scrub_pl, state->dst.scrub);
+
+ /* Someday... flush the TCP segment reassembly descriptors. */
+}
+
+int
+pf_normalize_tcp_stateful(struct mbuf *m, int off, struct pf_pdesc *pd,
+ u_short *reason, struct tcphdr *th, struct pf_state *state,
+ struct pf_state_peer *src, struct pf_state_peer *dst, int *writeback)
+{
+ struct timeval uptime;
+ u_int32_t tsval, tsecr;
+ u_int tsval_from_last;
+ u_int8_t hdr[60];
+ u_int8_t *opt;
+ int copyback = 0;
+ int got_ts = 0;
+
+#ifdef __FreeBSD__
+ KASSERT((src->scrub || dst->scrub),
+ ("pf_normalize_tcp_statefull: src->scrub && dst->scrub!"));
+#else
+ KASSERT(src->scrub || dst->scrub);
+#endif
+
+ /*
+ * Enforce the minimum TTL seen for this connection. Negate a common
+ * technique to evade an intrusion detection system and confuse
+ * firewall state code.
+ */
+ switch (pd->af) {
+#ifdef INET
+ case AF_INET: {
+ if (src->scrub) {
+ struct ip *h = mtod(m, struct ip *);
+ if (h->ip_ttl > src->scrub->pfss_ttl)
+ src->scrub->pfss_ttl = h->ip_ttl;
+ h->ip_ttl = src->scrub->pfss_ttl;
+ }
+ break;
+ }
+#endif /* INET */
+#ifdef INET6
+ case AF_INET6: {
+ if (src->scrub) {
+ struct ip6_hdr *h = mtod(m, struct ip6_hdr *);
+ if (h->ip6_hlim > src->scrub->pfss_ttl)
+ src->scrub->pfss_ttl = h->ip6_hlim;
+ h->ip6_hlim = src->scrub->pfss_ttl;
+ }
+ break;
+ }
+#endif /* INET6 */
+ }
+
+ if (th->th_off > (sizeof(struct tcphdr) >> 2) &&
+ ((src->scrub && (src->scrub->pfss_flags & PFSS_TIMESTAMP)) ||
+ (dst->scrub && (dst->scrub->pfss_flags & PFSS_TIMESTAMP))) &&
+ pf_pull_hdr(m, off, hdr, th->th_off << 2, NULL, NULL, pd->af)) {
+ /* Diddle with TCP options */
+ int hlen;
+ opt = hdr + sizeof(struct tcphdr);
+ hlen = (th->th_off << 2) - sizeof(struct tcphdr);
+ while (hlen >= TCPOLEN_TIMESTAMP) {
+ switch (*opt) {
+ case TCPOPT_EOL: /* FALLTHROUGH */
+ case TCPOPT_NOP:
+ opt++;
+ hlen--;
+ break;
+ case TCPOPT_TIMESTAMP:
+ /* Modulate the timestamps. Can be used for
+ * NAT detection, OS uptime determination or
+ * reboot detection.
+ */
+
+ if (got_ts) {
+ /* Huh? Multiple timestamps!? */
+ if (pf_status.debug >= PF_DEBUG_MISC) {
+ DPFPRINTF(("multiple TS??"));
+ pf_print_state(state);
+ printf("\n");
+ }
+ REASON_SET(reason, PFRES_TS);
+ return (PF_DROP);
+ }
+ if (opt[1] >= TCPOLEN_TIMESTAMP) {
+ memcpy(&tsval, &opt[2],
+ sizeof(u_int32_t));
+ if (tsval && src->scrub &&
+ (src->scrub->pfss_flags &
+ PFSS_TIMESTAMP)) {
+ tsval = ntohl(tsval);
+ pf_change_a(&opt[2],
+ &th->th_sum,
+ htonl(tsval +
+ src->scrub->pfss_ts_mod),
+ 0);
+ copyback = 1;
+ }
+
+ /* Modulate TS reply iff valid (!0) */
+ memcpy(&tsecr, &opt[6],
+ sizeof(u_int32_t));
+ if (tsecr && dst->scrub &&
+ (dst->scrub->pfss_flags &
+ PFSS_TIMESTAMP)) {
+ tsecr = ntohl(tsecr)
+ - dst->scrub->pfss_ts_mod;
+ pf_change_a(&opt[6],
+ &th->th_sum, htonl(tsecr),
+ 0);
+ copyback = 1;
+ }
+ got_ts = 1;
+ }
+ /* FALLTHROUGH */
+ default:
+ hlen -= MAX(opt[1], 2);
+ opt += MAX(opt[1], 2);
+ break;
+ }
+ }
+ if (copyback) {
+ /* Copyback the options, caller copys back header */
+ *writeback = 1;
+ m_copyback(m, off + sizeof(struct tcphdr),
+ (th->th_off << 2) - sizeof(struct tcphdr), hdr +
+ sizeof(struct tcphdr));
+ }
+ }
+
+
+ /*
+ * Must invalidate PAWS checks on connections idle for too long.
+ * The fastest allowed timestamp clock is 1ms. That turns out to
+ * be about 24 days before it wraps. XXX Right now our lowerbound
+ * TS echo check only works for the first 12 days of a connection
+ * when the TS has exhausted half its 32bit space
+ */
+#define TS_MAX_IDLE (24*24*60*60)
+#define TS_MAX_CONN (12*24*60*60) /* XXX remove when better tsecr check */
+
+ getmicrouptime(&uptime);
+ if (src->scrub && (src->scrub->pfss_flags & PFSS_PAWS) &&
+ (uptime.tv_sec - src->scrub->pfss_last.tv_sec > TS_MAX_IDLE ||
+ time_second - state->creation > TS_MAX_CONN)) {
+ if (pf_status.debug >= PF_DEBUG_MISC) {
+ DPFPRINTF(("src idled out of PAWS\n"));
+ pf_print_state(state);
+ printf("\n");
+ }
+ src->scrub->pfss_flags = (src->scrub->pfss_flags & ~PFSS_PAWS)
+ | PFSS_PAWS_IDLED;
+ }
+ if (dst->scrub && (dst->scrub->pfss_flags & PFSS_PAWS) &&
+ uptime.tv_sec - dst->scrub->pfss_last.tv_sec > TS_MAX_IDLE) {
+ if (pf_status.debug >= PF_DEBUG_MISC) {
+ DPFPRINTF(("dst idled out of PAWS\n"));
+ pf_print_state(state);
+ printf("\n");
+ }
+ dst->scrub->pfss_flags = (dst->scrub->pfss_flags & ~PFSS_PAWS)
+ | PFSS_PAWS_IDLED;
+ }
+
+ if (got_ts && src->scrub && dst->scrub &&
+ (src->scrub->pfss_flags & PFSS_PAWS) &&
+ (dst->scrub->pfss_flags & PFSS_PAWS)) {
+ /* Validate that the timestamps are "in-window".
+ * RFC1323 describes TCP Timestamp options that allow
+ * measurement of RTT (round trip time) and PAWS
+ * (protection against wrapped sequence numbers). PAWS
+ * gives us a set of rules for rejecting packets on
+ * long fat pipes (packets that were somehow delayed
+ * in transit longer than the time it took to send the
+ * full TCP sequence space of 4Gb). We can use these
+ * rules and infer a few others that will let us treat
+ * the 32bit timestamp and the 32bit echoed timestamp
+ * as sequence numbers to prevent a blind attacker from
+ * inserting packets into a connection.
+ *
+ * RFC1323 tells us:
+ * - The timestamp on this packet must be greater than
+ * or equal to the last value echoed by the other
+ * endpoint. The RFC says those will be discarded
+ * since it is a dup that has already been acked.
+ * This gives us a lowerbound on the timestamp.
+ * timestamp >= other last echoed timestamp
+ * - The timestamp will be less than or equal to
+ * the last timestamp plus the time between the
+ * last packet and now. The RFC defines the max
+ * clock rate as 1ms. We will allow clocks to be
+ * up to 10% fast and will allow a total difference
+ * or 30 seconds due to a route change. And this
+ * gives us an upperbound on the timestamp.
+ * timestamp <= last timestamp + max ticks
+ * We have to be careful here. Windows will send an
+ * initial timestamp of zero and then initialize it
+ * to a random value after the 3whs; presumably to
+ * avoid a DoS by having to call an expensive RNG
+ * during a SYN flood. Proof MS has at least one
+ * good security geek.
+ *
+ * - The TCP timestamp option must also echo the other
+ * endpoints timestamp. The timestamp echoed is the
+ * one carried on the earliest unacknowledged segment
+ * on the left edge of the sequence window. The RFC
+ * states that the host will reject any echoed
+ * timestamps that were larger than any ever sent.
+ * This gives us an upperbound on the TS echo.
+ * tescr <= largest_tsval
+ * - The lowerbound on the TS echo is a little more
+ * tricky to determine. The other endpoint's echoed
+ * values will not decrease. But there may be
+ * network conditions that re-order packets and
+ * cause our view of them to decrease. For now the
+ * only lowerbound we can safely determine is that
+ * the TS echo will never be less than the orginal
+ * TS. XXX There is probably a better lowerbound.
+ * Remove TS_MAX_CONN with better lowerbound check.
+ * tescr >= other original TS
+ *
+ * It is also important to note that the fastest
+ * timestamp clock of 1ms will wrap its 32bit space in
+ * 24 days. So we just disable TS checking after 24
+ * days of idle time. We actually must use a 12d
+ * connection limit until we can come up with a better
+ * lowerbound to the TS echo check.
+ */
+ struct timeval delta_ts;
+ int ts_fudge;
+
+
+ /*
+ * PFTM_TS_DIFF is how many seconds of leeway to allow
+ * a host's timestamp. This can happen if the previous
+ * packet got delayed in transit for much longer than
+ * this packet.
+ */
+ if ((ts_fudge = state->rule.ptr->timeout[PFTM_TS_DIFF]) == 0)
+ ts_fudge = pf_default_rule.timeout[PFTM_TS_DIFF];
+
+
+ /* Calculate max ticks since the last timestamp */
+#define TS_MAXFREQ 1100 /* RFC max TS freq of 1Khz + 10% skew */
+#define TS_MICROSECS 1000000 /* microseconds per second */
+#ifdef __FreeBSD__
+#ifndef timersub
+#define timersub(tvp, uvp, vvp) \
+ do { \
+ (vvp)->tv_sec = (tvp)->tv_sec - (uvp)->tv_sec; \
+ (vvp)->tv_usec = (tvp)->tv_usec - (uvp)->tv_usec; \
+ if ((vvp)->tv_usec < 0) { \
+ (vvp)->tv_sec--; \
+ (vvp)->tv_usec += 1000000; \
+ } \
+ } while (0)
+#endif
+#endif
+ timersub(&uptime, &src->scrub->pfss_last, &delta_ts);
+ tsval_from_last = (delta_ts.tv_sec + ts_fudge) * TS_MAXFREQ;
+ tsval_from_last += delta_ts.tv_usec / (TS_MICROSECS/TS_MAXFREQ);
+
+
+ if ((src->state >= TCPS_ESTABLISHED &&
+ dst->state >= TCPS_ESTABLISHED) &&
+ (SEQ_LT(tsval, dst->scrub->pfss_tsecr) ||
+ SEQ_GT(tsval, src->scrub->pfss_tsval + tsval_from_last) ||
+ (tsecr && (SEQ_GT(tsecr, dst->scrub->pfss_tsval) ||
+ SEQ_LT(tsecr, dst->scrub->pfss_tsval0))))) {
+ /* Bad RFC1323 implementation or an insertion attack.
+ *
+ * - Solaris 2.6 and 2.7 are known to send another ACK
+ * after the FIN,FIN|ACK,ACK closing that carries
+ * an old timestamp.
+ */
+
+ DPFPRINTF(("Timestamp failed %c%c%c%c\n",
+ SEQ_LT(tsval, dst->scrub->pfss_tsecr) ? '0' : ' ',
+ SEQ_GT(tsval, src->scrub->pfss_tsval +
+ tsval_from_last) ? '1' : ' ',
+ SEQ_GT(tsecr, dst->scrub->pfss_tsval) ? '2' : ' ',
+ SEQ_LT(tsecr, dst->scrub->pfss_tsval0)? '3' : ' '));
+#ifdef __FreeBSD__
+ DPFPRINTF((" tsval: %u tsecr: %u +ticks: %u "
+ "idle: %jus %lums\n",
+ tsval, tsecr, tsval_from_last,
+ (uintmax_t)delta_ts.tv_sec,
+ delta_ts.tv_usec / 1000));
+ DPFPRINTF((" src->tsval: %u tsecr: %u\n",
+ src->scrub->pfss_tsval, src->scrub->pfss_tsecr));
+ DPFPRINTF((" dst->tsval: %u tsecr: %u tsval0: %u"
+ "\n", dst->scrub->pfss_tsval,
+ dst->scrub->pfss_tsecr, dst->scrub->pfss_tsval0));
+#else
+ DPFPRINTF((" tsval: %lu tsecr: %lu +ticks: %lu "
+ "idle: %lus %lums\n",
+ tsval, tsecr, tsval_from_last, delta_ts.tv_sec,
+ delta_ts.tv_usec / 1000));
+ DPFPRINTF((" src->tsval: %lu tsecr: %lu\n",
+ src->scrub->pfss_tsval, src->scrub->pfss_tsecr));
+ DPFPRINTF((" dst->tsval: %lu tsecr: %lu tsval0: %lu"
+ "\n", dst->scrub->pfss_tsval,
+ dst->scrub->pfss_tsecr, dst->scrub->pfss_tsval0));
+#endif
+ if (pf_status.debug >= PF_DEBUG_MISC) {
+ pf_print_state(state);
+ pf_print_flags(th->th_flags);
+ printf("\n");
+ }
+ REASON_SET(reason, PFRES_TS);
+ return (PF_DROP);
+ }
+
+ /* XXX I'd really like to require tsecr but it's optional */
+
+ } else if (!got_ts && (th->th_flags & TH_RST) == 0 &&
+ ((src->state == TCPS_ESTABLISHED && dst->state == TCPS_ESTABLISHED)
+ || pd->p_len > 0 || (th->th_flags & TH_SYN)) &&
+ src->scrub && dst->scrub &&
+ (src->scrub->pfss_flags & PFSS_PAWS) &&
+ (dst->scrub->pfss_flags & PFSS_PAWS)) {
+ /* Didn't send a timestamp. Timestamps aren't really useful
+ * when:
+ * - connection opening or closing (often not even sent).
+ * but we must not let an attacker to put a FIN on a
+ * data packet to sneak it through our ESTABLISHED check.
+ * - on a TCP reset. RFC suggests not even looking at TS.
+ * - on an empty ACK. The TS will not be echoed so it will
+ * probably not help keep the RTT calculation in sync and
+ * there isn't as much danger when the sequence numbers
+ * got wrapped. So some stacks don't include TS on empty
+ * ACKs :-(
+ *
+ * To minimize the disruption to mostly RFC1323 conformant
+ * stacks, we will only require timestamps on data packets.
+ *
+ * And what do ya know, we cannot require timestamps on data
+ * packets. There appear to be devices that do legitimate
+ * TCP connection hijacking. There are HTTP devices that allow
+ * a 3whs (with timestamps) and then buffer the HTTP request.
+ * If the intermediate device has the HTTP response cache, it
+ * will spoof the response but not bother timestamping its
+ * packets. So we can look for the presence of a timestamp in
+ * the first data packet and if there, require it in all future
+ * packets.
+ */
+
+ if (pd->p_len > 0 && (src->scrub->pfss_flags & PFSS_DATA_TS)) {
+ /*
+ * Hey! Someone tried to sneak a packet in. Or the
+ * stack changed its RFC1323 behavior?!?!
+ */
+ if (pf_status.debug >= PF_DEBUG_MISC) {
+ DPFPRINTF(("Did not receive expected RFC1323 "
+ "timestamp\n"));
+ pf_print_state(state);
+ pf_print_flags(th->th_flags);
+ printf("\n");
+ }
+ REASON_SET(reason, PFRES_TS);
+ return (PF_DROP);
+ }
+ }
+
+
+ /*
+ * We will note if a host sends his data packets with or without
+ * timestamps. And require all data packets to contain a timestamp
+ * if the first does. PAWS implicitly requires that all data packets be
+ * timestamped. But I think there are middle-man devices that hijack
+ * TCP streams immediately after the 3whs and don't timestamp their
+ * packets (seen in a WWW accelerator or cache).
+ */
+ if (pd->p_len > 0 && src->scrub && (src->scrub->pfss_flags &
+ (PFSS_TIMESTAMP|PFSS_DATA_TS|PFSS_DATA_NOTS)) == PFSS_TIMESTAMP) {
+ if (got_ts)
+ src->scrub->pfss_flags |= PFSS_DATA_TS;
+ else {
+ src->scrub->pfss_flags |= PFSS_DATA_NOTS;
+ if (pf_status.debug >= PF_DEBUG_MISC && dst->scrub &&
+ (dst->scrub->pfss_flags & PFSS_TIMESTAMP)) {
+ /* Don't warn if other host rejected RFC1323 */
+ DPFPRINTF(("Broken RFC1323 stack did not "
+ "timestamp data packet. Disabled PAWS "
+ "security.\n"));
+ pf_print_state(state);
+ pf_print_flags(th->th_flags);
+ printf("\n");
+ }
+ }
+ }
+
+
+ /*
+ * Update PAWS values
+ */
+ if (got_ts && src->scrub && PFSS_TIMESTAMP == (src->scrub->pfss_flags &
+ (PFSS_PAWS_IDLED|PFSS_TIMESTAMP))) {
+ getmicrouptime(&src->scrub->pfss_last);
+ if (SEQ_GEQ(tsval, src->scrub->pfss_tsval) ||
+ (src->scrub->pfss_flags & PFSS_PAWS) == 0)
+ src->scrub->pfss_tsval = tsval;
+
+ if (tsecr) {
+ if (SEQ_GEQ(tsecr, src->scrub->pfss_tsecr) ||
+ (src->scrub->pfss_flags & PFSS_PAWS) == 0)
+ src->scrub->pfss_tsecr = tsecr;
+
+ if ((src->scrub->pfss_flags & PFSS_PAWS) == 0 &&
+ (SEQ_LT(tsval, src->scrub->pfss_tsval0) ||
+ src->scrub->pfss_tsval0 == 0)) {
+ /* tsval0 MUST be the lowest timestamp */
+ src->scrub->pfss_tsval0 = tsval;
+ }
+
+ /* Only fully initialized after a TS gets echoed */
+ if ((src->scrub->pfss_flags & PFSS_PAWS) == 0)
+ src->scrub->pfss_flags |= PFSS_PAWS;
+ }
+ }
+
+ /* I have a dream.... TCP segment reassembly.... */
+ return (0);
+}
+
+int
+pf_normalize_tcpopt(struct pf_rule *r, struct mbuf *m, struct tcphdr *th,
+ int off)
+{
+ u_int16_t *mss;
+ int thoff;
+ int opt, cnt, optlen = 0;
+ int rewrite = 0;
+ u_char *optp;
+
+ thoff = th->th_off << 2;
+ cnt = thoff - sizeof(struct tcphdr);
+ optp = mtod(m, caddr_t) + off + sizeof(struct tcphdr);
+
+ for (; cnt > 0; cnt -= optlen, optp += optlen) {
+ opt = optp[0];
+ if (opt == TCPOPT_EOL)
+ break;
+ if (opt == TCPOPT_NOP)
+ optlen = 1;
+ else {
+ if (cnt < 2)
+ break;
+ optlen = optp[1];
+ if (optlen < 2 || optlen > cnt)
+ break;
+ }
+ switch (opt) {
+ case TCPOPT_MAXSEG:
+ mss = (u_int16_t *)(optp + 2);
+ if ((ntohs(*mss)) > r->max_mss) {
+ th->th_sum = pf_cksum_fixup(th->th_sum,
+ *mss, htons(r->max_mss), 0);
+ *mss = htons(r->max_mss);
+ rewrite = 1;
+ }
+ break;
+ default:
+ break;
+ }
+ }
+
+ return (rewrite);
+}
diff --git a/contrib/pf/rtems/freebsd/net/pf_osfp.c b/contrib/pf/rtems/freebsd/net/pf_osfp.c
new file mode 100644
index 00000000..e1d7d647
--- /dev/null
+++ b/contrib/pf/rtems/freebsd/net/pf_osfp.c
@@ -0,0 +1,640 @@
+#include <rtems/freebsd/machine/rtems-bsd-config.h>
+
+/* $OpenBSD: pf_osfp.c,v 1.12 2006/12/13 18:14:10 itojun Exp $ */
+
+/*
+ * Copyright (c) 2003 Mike Frantzen <frantzen@w4g.org>
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ *
+ */
+
+#ifdef __FreeBSD__
+#include <rtems/freebsd/sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+#endif
+
+#include <rtems/freebsd/sys/param.h>
+#include <rtems/freebsd/sys/socket.h>
+#ifdef _KERNEL
+# include <rtems/freebsd/sys/systm.h>
+#endif /* _KERNEL */
+#include <rtems/freebsd/sys/mbuf.h>
+
+#include <rtems/freebsd/netinet/in.h>
+#include <rtems/freebsd/netinet/in_systm.h>
+#include <rtems/freebsd/netinet/ip.h>
+#include <rtems/freebsd/netinet/tcp.h>
+
+#include <rtems/freebsd/net/if.h>
+#include <rtems/freebsd/net/pfvar.h>
+
+#include <rtems/freebsd/netinet/ip6.h>
+#ifdef _KERNEL
+#include <rtems/freebsd/netinet6/in6_var.h>
+#endif
+
+#ifdef _KERNEL
+# define DPFPRINTF(format, x...) \
+ if (pf_status.debug >= PF_DEBUG_NOISY) \
+ printf(format , ##x)
+#ifdef __FreeBSD__
+typedef uma_zone_t pool_t;
+#else
+typedef struct pool pool_t;
+#endif
+
+#else
+/* Userland equivalents so we can lend code to tcpdump et al. */
+
+# include <rtems/freebsd/arpa/inet.h>
+# include <rtems/freebsd/errno.h>
+# include <rtems/freebsd/stdio.h>
+# include <rtems/freebsd/stdlib.h>
+# include <rtems/freebsd/string.h>
+# include <rtems/freebsd/netdb.h>
+# define pool_t int
+# define pool_get(pool, flags) malloc(*(pool))
+# define pool_put(pool, item) free(item)
+# define pool_init(pool, size, a, ao, f, m, p) (*(pool)) = (size)
+
+# ifdef __FreeBSD__
+# define NTOHS(x) (x) = ntohs((u_int16_t)(x))
+# endif
+
+# ifdef PFDEBUG
+# include <rtems/freebsd/sys/stdarg.h>
+# define DPFPRINTF(format, x...) fprintf(stderr, format , ##x)
+# else
+# define DPFPRINTF(format, x...) ((void)0)
+# endif /* PFDEBUG */
+#endif /* _KERNEL */
+
+
+SLIST_HEAD(pf_osfp_list, pf_os_fingerprint) pf_osfp_list;
+pool_t pf_osfp_entry_pl;
+pool_t pf_osfp_pl;
+
+struct pf_os_fingerprint *pf_osfp_find(struct pf_osfp_list *,
+ struct pf_os_fingerprint *, u_int8_t);
+struct pf_os_fingerprint *pf_osfp_find_exact(struct pf_osfp_list *,
+ struct pf_os_fingerprint *);
+void pf_osfp_insert(struct pf_osfp_list *,
+ struct pf_os_fingerprint *);
+
+
+#ifdef _KERNEL
+/*
+ * Passively fingerprint the OS of the host (IPv4 TCP SYN packets only)
+ * Returns the list of possible OSes.
+ */
+struct pf_osfp_enlist *
+pf_osfp_fingerprint(struct pf_pdesc *pd, struct mbuf *m, int off,
+ const struct tcphdr *tcp)
+{
+ struct ip *ip;
+ struct ip6_hdr *ip6;
+ char hdr[60];
+
+ if ((pd->af != PF_INET && pd->af != PF_INET6) ||
+ pd->proto != IPPROTO_TCP || (tcp->th_off << 2) < sizeof(*tcp))
+ return (NULL);
+
+ if (pd->af == PF_INET) {
+ ip = mtod(m, struct ip *);
+ ip6 = (struct ip6_hdr *)NULL;
+ } else {
+ ip = (struct ip *)NULL;
+ ip6 = mtod(m, struct ip6_hdr *);
+ }
+ if (!pf_pull_hdr(m, off, hdr, tcp->th_off << 2, NULL, NULL,
+ pd->af)) return (NULL);
+
+ return (pf_osfp_fingerprint_hdr(ip, ip6, (struct tcphdr *)hdr));
+}
+#endif /* _KERNEL */
+
+struct pf_osfp_enlist *
+pf_osfp_fingerprint_hdr(const struct ip *ip, const struct ip6_hdr *ip6, const struct tcphdr *tcp)
+{
+ struct pf_os_fingerprint fp, *fpresult;
+ int cnt, optlen = 0;
+ const u_int8_t *optp;
+#ifdef _KERNEL
+ char srcname[128];
+#else
+ char srcname[NI_MAXHOST];
+#endif
+#ifdef __rtems__
+#ifdef INET6
+char ip6buf[INET6_ADDRSTRLEN];
+#endif //INET6
+#endif //__rtems__
+
+ if ((tcp->th_flags & (TH_SYN|TH_ACK)) != TH_SYN)
+ return (NULL);
+ if (ip) {
+ if ((ip->ip_off & htons(IP_OFFMASK)) != 0)
+ return (NULL);
+ }
+
+ memset(&fp, 0, sizeof(fp));
+
+ if (ip) {
+#ifndef _KERNEL
+ struct sockaddr_in sin;
+#endif
+
+ fp.fp_psize = ntohs(ip->ip_len);
+ fp.fp_ttl = ip->ip_ttl;
+ if (ip->ip_off & htons(IP_DF))
+ fp.fp_flags |= PF_OSFP_DF;
+#ifdef _KERNEL
+ strlcpy(srcname, inet_ntoa(ip->ip_src), sizeof(srcname));
+#else
+ memset(&sin, 0, sizeof(sin));
+ sin.sin_family = AF_INET;
+ sin.sin_len = sizeof(struct sockaddr_in);
+ sin.sin_addr = ip->ip_src;
+ (void)getnameinfo((struct sockaddr *)&sin,
+ sizeof(struct sockaddr_in), srcname, sizeof(srcname),
+ NULL, 0, NI_NUMERICHOST);
+#endif
+ }
+#ifdef INET6
+ else if (ip6) {
+#ifndef _KERNEL
+ struct sockaddr_in6 sin6;
+#endif
+
+ /* jumbo payload? */
+ fp.fp_psize = sizeof(struct ip6_hdr) + ntohs(ip6->ip6_plen);
+ fp.fp_ttl = ip6->ip6_hlim;
+ fp.fp_flags |= PF_OSFP_DF;
+ fp.fp_flags |= PF_OSFP_INET6;
+#ifdef _KERNEL
+#ifndef __rtems__
+ strlcpy(srcname, ip6_sprintf((struct in6_addr *)&ip6->ip6_src),
+ sizeof(srcname));
+#else
+ strlcpy(srcname, ip6_sprintf(&ip6buf, (struct in6_addr *)&ip6->ip6_src),
+ sizeof(srcname));
+#endif
+#else
+ memset(&sin6, 0, sizeof(sin6));
+ sin6.sin6_family = AF_INET6;
+ sin6.sin6_len = sizeof(struct sockaddr_in6);
+ sin6.sin6_addr = ip6->ip6_src;
+ (void)getnameinfo((struct sockaddr *)&sin6,
+ sizeof(struct sockaddr_in6), srcname, sizeof(srcname),
+ NULL, 0, NI_NUMERICHOST);
+#endif
+ }
+#endif
+ else
+ return (NULL);
+ fp.fp_wsize = ntohs(tcp->th_win);
+
+
+ cnt = (tcp->th_off << 2) - sizeof(*tcp);
+ optp = (const u_int8_t *)((const char *)tcp + sizeof(*tcp));
+ for (; cnt > 0; cnt -= optlen, optp += optlen) {
+ if (*optp == TCPOPT_EOL)
+ break;
+
+ fp.fp_optcnt++;
+ if (*optp == TCPOPT_NOP) {
+ fp.fp_tcpopts = (fp.fp_tcpopts << PF_OSFP_TCPOPT_BITS) |
+ PF_OSFP_TCPOPT_NOP;
+ optlen = 1;
+ } else {
+ if (cnt < 2)
+ return (NULL);
+ optlen = optp[1];
+ if (optlen > cnt || optlen < 2)
+ return (NULL);
+ switch (*optp) {
+ case TCPOPT_MAXSEG:
+ if (optlen >= TCPOLEN_MAXSEG)
+ memcpy(&fp.fp_mss, &optp[2],
+ sizeof(fp.fp_mss));
+ fp.fp_tcpopts = (fp.fp_tcpopts <<
+ PF_OSFP_TCPOPT_BITS) | PF_OSFP_TCPOPT_MSS;
+ NTOHS(fp.fp_mss);
+ break;
+ case TCPOPT_WINDOW:
+ if (optlen >= TCPOLEN_WINDOW)
+ memcpy(&fp.fp_wscale, &optp[2],
+ sizeof(fp.fp_wscale));
+ NTOHS(fp.fp_wscale);
+ fp.fp_tcpopts = (fp.fp_tcpopts <<
+ PF_OSFP_TCPOPT_BITS) |
+ PF_OSFP_TCPOPT_WSCALE;
+ break;
+ case TCPOPT_SACK_PERMITTED:
+ fp.fp_tcpopts = (fp.fp_tcpopts <<
+ PF_OSFP_TCPOPT_BITS) | PF_OSFP_TCPOPT_SACK;
+ break;
+ case TCPOPT_TIMESTAMP:
+ if (optlen >= TCPOLEN_TIMESTAMP) {
+ u_int32_t ts;
+ memcpy(&ts, &optp[2], sizeof(ts));
+ if (ts == 0)
+ fp.fp_flags |= PF_OSFP_TS0;
+
+ }
+ fp.fp_tcpopts = (fp.fp_tcpopts <<
+ PF_OSFP_TCPOPT_BITS) | PF_OSFP_TCPOPT_TS;
+ break;
+ default:
+ return (NULL);
+ }
+ }
+ optlen = MAX(optlen, 1); /* paranoia */
+ }
+
+ DPFPRINTF("fingerprinted %s:%d %d:%d:%d:%d:%llx (%d) "
+ "(TS=%s,M=%s%d,W=%s%d)\n",
+ srcname, ntohs(tcp->th_sport),
+ fp.fp_wsize, fp.fp_ttl, (fp.fp_flags & PF_OSFP_DF) != 0,
+ fp.fp_psize, (long long int)fp.fp_tcpopts, fp.fp_optcnt,
+ (fp.fp_flags & PF_OSFP_TS0) ? "0" : "",
+ (fp.fp_flags & PF_OSFP_MSS_MOD) ? "%" :
+ (fp.fp_flags & PF_OSFP_MSS_DC) ? "*" : "",
+ fp.fp_mss,
+ (fp.fp_flags & PF_OSFP_WSCALE_MOD) ? "%" :
+ (fp.fp_flags & PF_OSFP_WSCALE_DC) ? "*" : "",
+ fp.fp_wscale);
+
+ if ((fpresult = pf_osfp_find(&pf_osfp_list, &fp,
+ PF_OSFP_MAXTTL_OFFSET)))
+ return (&fpresult->fp_oses);
+ return (NULL);
+}
+
+/* Match a fingerprint ID against a list of OSes */
+int
+pf_osfp_match(struct pf_osfp_enlist *list, pf_osfp_t os)
+{
+ struct pf_osfp_entry *entry;
+ int os_class, os_version, os_subtype;
+ int en_class, en_version, en_subtype;
+
+ if (os == PF_OSFP_ANY)
+ return (1);
+ if (list == NULL) {
+ DPFPRINTF("osfp no match against %x\n", os);
+ return (os == PF_OSFP_UNKNOWN);
+ }
+ PF_OSFP_UNPACK(os, os_class, os_version, os_subtype);
+ SLIST_FOREACH(entry, list, fp_entry) {
+ PF_OSFP_UNPACK(entry->fp_os, en_class, en_version, en_subtype);
+ if ((os_class == PF_OSFP_ANY || en_class == os_class) &&
+ (os_version == PF_OSFP_ANY || en_version == os_version) &&
+ (os_subtype == PF_OSFP_ANY || en_subtype == os_subtype)) {
+ DPFPRINTF("osfp matched %s %s %s %x==%x\n",
+ entry->fp_class_nm, entry->fp_version_nm,
+ entry->fp_subtype_nm, os, entry->fp_os);
+ return (1);
+ }
+ }
+ DPFPRINTF("fingerprint 0x%x didn't match\n", os);
+ return (0);
+}
+
+/* Initialize the OS fingerprint system */
+#ifdef __FreeBSD__
+int
+#else
+void
+#endif
+pf_osfp_initialize(void)
+{
+#if defined(__FreeBSD__) && defined(_KERNEL)
+ int error = ENOMEM;
+
+ do {
+ pf_osfp_entry_pl = pf_osfp_pl = NULL;
+ UMA_CREATE(pf_osfp_entry_pl, struct pf_osfp_entry, "pfospfen");
+ UMA_CREATE(pf_osfp_pl, struct pf_os_fingerprint, "pfosfp");
+ error = 0;
+ } while(0);
+#else
+ pool_init(&pf_osfp_entry_pl, sizeof(struct pf_osfp_entry), 0, 0, 0,
+ "pfosfpen", &pool_allocator_nointr);
+ pool_init(&pf_osfp_pl, sizeof(struct pf_os_fingerprint), 0, 0, 0,
+ "pfosfp", &pool_allocator_nointr);
+#endif
+ SLIST_INIT(&pf_osfp_list);
+#ifdef __FreeBSD__
+#ifdef _KERNEL
+ return (error);
+#else
+ return (0);
+#endif
+#endif
+}
+
+#if defined(__FreeBSD__) && (_KERNEL)
+void
+pf_osfp_cleanup(void)
+{
+ UMA_DESTROY(pf_osfp_entry_pl);
+ UMA_DESTROY(pf_osfp_pl);
+}
+#endif
+
+/* Flush the fingerprint list */
+void
+pf_osfp_flush(void)
+{
+ struct pf_os_fingerprint *fp;
+ struct pf_osfp_entry *entry;
+
+ while ((fp = SLIST_FIRST(&pf_osfp_list))) {
+ SLIST_REMOVE_HEAD(&pf_osfp_list, fp_next);
+ while ((entry = SLIST_FIRST(&fp->fp_oses))) {
+ SLIST_REMOVE_HEAD(&fp->fp_oses, fp_entry);
+ pool_put(&pf_osfp_entry_pl, entry);
+ }
+ pool_put(&pf_osfp_pl, fp);
+ }
+}
+
+
+/* Add a fingerprint */
+int
+pf_osfp_add(struct pf_osfp_ioctl *fpioc)
+{
+ struct pf_os_fingerprint *fp, fpadd;
+ struct pf_osfp_entry *entry;
+
+ memset(&fpadd, 0, sizeof(fpadd));
+ fpadd.fp_tcpopts = fpioc->fp_tcpopts;
+ fpadd.fp_wsize = fpioc->fp_wsize;
+ fpadd.fp_psize = fpioc->fp_psize;
+ fpadd.fp_mss = fpioc->fp_mss;
+ fpadd.fp_flags = fpioc->fp_flags;
+ fpadd.fp_optcnt = fpioc->fp_optcnt;
+ fpadd.fp_wscale = fpioc->fp_wscale;
+ fpadd.fp_ttl = fpioc->fp_ttl;
+
+ DPFPRINTF("adding osfp %s %s %s = %s%d:%d:%d:%s%d:0x%llx %d "
+ "(TS=%s,M=%s%d,W=%s%d) %x\n",
+ fpioc->fp_os.fp_class_nm, fpioc->fp_os.fp_version_nm,
+ fpioc->fp_os.fp_subtype_nm,
+ (fpadd.fp_flags & PF_OSFP_WSIZE_MOD) ? "%" :
+ (fpadd.fp_flags & PF_OSFP_WSIZE_MSS) ? "S" :
+ (fpadd.fp_flags & PF_OSFP_WSIZE_MTU) ? "T" :
+ (fpadd.fp_flags & PF_OSFP_WSIZE_DC) ? "*" : "",
+ fpadd.fp_wsize,
+ fpadd.fp_ttl,
+ (fpadd.fp_flags & PF_OSFP_DF) ? 1 : 0,
+ (fpadd.fp_flags & PF_OSFP_PSIZE_MOD) ? "%" :
+ (fpadd.fp_flags & PF_OSFP_PSIZE_DC) ? "*" : "",
+ fpadd.fp_psize,
+ (long long int)fpadd.fp_tcpopts, fpadd.fp_optcnt,
+ (fpadd.fp_flags & PF_OSFP_TS0) ? "0" : "",
+ (fpadd.fp_flags & PF_OSFP_MSS_MOD) ? "%" :
+ (fpadd.fp_flags & PF_OSFP_MSS_DC) ? "*" : "",
+ fpadd.fp_mss,
+ (fpadd.fp_flags & PF_OSFP_WSCALE_MOD) ? "%" :
+ (fpadd.fp_flags & PF_OSFP_WSCALE_DC) ? "*" : "",
+ fpadd.fp_wscale,
+ fpioc->fp_os.fp_os);
+
+
+ if ((fp = pf_osfp_find_exact(&pf_osfp_list, &fpadd))) {
+ SLIST_FOREACH(entry, &fp->fp_oses, fp_entry) {
+ if (PF_OSFP_ENTRY_EQ(entry, &fpioc->fp_os))
+ return (EEXIST);
+ }
+ if ((entry = pool_get(&pf_osfp_entry_pl, PR_NOWAIT)) == NULL)
+ return (ENOMEM);
+ } else {
+ if ((fp = pool_get(&pf_osfp_pl, PR_NOWAIT)) == NULL)
+ return (ENOMEM);
+ memset(fp, 0, sizeof(*fp));
+ fp->fp_tcpopts = fpioc->fp_tcpopts;
+ fp->fp_wsize = fpioc->fp_wsize;
+ fp->fp_psize = fpioc->fp_psize;
+ fp->fp_mss = fpioc->fp_mss;
+ fp->fp_flags = fpioc->fp_flags;
+ fp->fp_optcnt = fpioc->fp_optcnt;
+ fp->fp_wscale = fpioc->fp_wscale;
+ fp->fp_ttl = fpioc->fp_ttl;
+ SLIST_INIT(&fp->fp_oses);
+ if ((entry = pool_get(&pf_osfp_entry_pl, PR_NOWAIT)) == NULL) {
+ pool_put(&pf_osfp_pl, fp);
+ return (ENOMEM);
+ }
+ pf_osfp_insert(&pf_osfp_list, fp);
+ }
+ memcpy(entry, &fpioc->fp_os, sizeof(*entry));
+
+ /* Make sure the strings are NUL terminated */
+ entry->fp_class_nm[sizeof(entry->fp_class_nm)-1] = '\0';
+ entry->fp_version_nm[sizeof(entry->fp_version_nm)-1] = '\0';
+ entry->fp_subtype_nm[sizeof(entry->fp_subtype_nm)-1] = '\0';
+
+ SLIST_INSERT_HEAD(&fp->fp_oses, entry, fp_entry);
+
+#ifdef PFDEBUG
+ if ((fp = pf_osfp_validate()))
+ printf("Invalid fingerprint list\n");
+#endif /* PFDEBUG */
+ return (0);
+}
+
+
+/* Find a fingerprint in the list */
+struct pf_os_fingerprint *
+pf_osfp_find(struct pf_osfp_list *list, struct pf_os_fingerprint *find,
+ u_int8_t ttldiff)
+{
+ struct pf_os_fingerprint *f;
+
+#define MATCH_INT(_MOD, _DC, _field) \
+ if ((f->fp_flags & _DC) == 0) { \
+ if ((f->fp_flags & _MOD) == 0) { \
+ if (f->_field != find->_field) \
+ continue; \
+ } else { \
+ if (f->_field == 0 || find->_field % f->_field) \
+ continue; \
+ } \
+ }
+
+ SLIST_FOREACH(f, list, fp_next) {
+ if (f->fp_tcpopts != find->fp_tcpopts ||
+ f->fp_optcnt != find->fp_optcnt ||
+ f->fp_ttl < find->fp_ttl ||
+ f->fp_ttl - find->fp_ttl > ttldiff ||
+ (f->fp_flags & (PF_OSFP_DF|PF_OSFP_TS0)) !=
+ (find->fp_flags & (PF_OSFP_DF|PF_OSFP_TS0)))
+ continue;
+
+ MATCH_INT(PF_OSFP_PSIZE_MOD, PF_OSFP_PSIZE_DC, fp_psize)
+ MATCH_INT(PF_OSFP_MSS_MOD, PF_OSFP_MSS_DC, fp_mss)
+ MATCH_INT(PF_OSFP_WSCALE_MOD, PF_OSFP_WSCALE_DC, fp_wscale)
+ if ((f->fp_flags & PF_OSFP_WSIZE_DC) == 0) {
+ if (f->fp_flags & PF_OSFP_WSIZE_MSS) {
+ if (find->fp_mss == 0)
+ continue;
+
+/* Some "smart" NAT devices and DSL routers will tweak the MSS size and
+ * will set it to whatever is suitable for the link type.
+ */
+#define SMART_MSS 1460
+ if ((find->fp_wsize % find->fp_mss ||
+ find->fp_wsize / find->fp_mss !=
+ f->fp_wsize) &&
+ (find->fp_wsize % SMART_MSS ||
+ find->fp_wsize / SMART_MSS !=
+ f->fp_wsize))
+ continue;
+ } else if (f->fp_flags & PF_OSFP_WSIZE_MTU) {
+ if (find->fp_mss == 0)
+ continue;
+
+#define MTUOFF (sizeof(struct ip) + sizeof(struct tcphdr))
+#define SMART_MTU (SMART_MSS + MTUOFF)
+ if ((find->fp_wsize % (find->fp_mss + MTUOFF) ||
+ find->fp_wsize / (find->fp_mss + MTUOFF) !=
+ f->fp_wsize) &&
+ (find->fp_wsize % SMART_MTU ||
+ find->fp_wsize / SMART_MTU !=
+ f->fp_wsize))
+ continue;
+ } else if (f->fp_flags & PF_OSFP_WSIZE_MOD) {
+ if (f->fp_wsize == 0 || find->fp_wsize %
+ f->fp_wsize)
+ continue;
+ } else {
+ if (f->fp_wsize != find->fp_wsize)
+ continue;
+ }
+ }
+ return (f);
+ }
+
+ return (NULL);
+}
+
+/* Find an exact fingerprint in the list */
+struct pf_os_fingerprint *
+pf_osfp_find_exact(struct pf_osfp_list *list, struct pf_os_fingerprint *find)
+{
+ struct pf_os_fingerprint *f;
+
+ SLIST_FOREACH(f, list, fp_next) {
+ if (f->fp_tcpopts == find->fp_tcpopts &&
+ f->fp_wsize == find->fp_wsize &&
+ f->fp_psize == find->fp_psize &&
+ f->fp_mss == find->fp_mss &&
+ f->fp_flags == find->fp_flags &&
+ f->fp_optcnt == find->fp_optcnt &&
+ f->fp_wscale == find->fp_wscale &&
+ f->fp_ttl == find->fp_ttl)
+ return (f);
+ }
+
+ return (NULL);
+}
+
+/* Insert a fingerprint into the list */
+void
+pf_osfp_insert(struct pf_osfp_list *list, struct pf_os_fingerprint *ins)
+{
+ struct pf_os_fingerprint *f, *prev = NULL;
+
+ /* XXX need to go semi tree based. can key on tcp options */
+
+ SLIST_FOREACH(f, list, fp_next)
+ prev = f;
+ if (prev)
+ SLIST_INSERT_AFTER(prev, ins, fp_next);
+ else
+ SLIST_INSERT_HEAD(list, ins, fp_next);
+}
+
+/* Fill a fingerprint by its number (from an ioctl) */
+int
+pf_osfp_get(struct pf_osfp_ioctl *fpioc)
+{
+ struct pf_os_fingerprint *fp;
+ struct pf_osfp_entry *entry;
+ int num = fpioc->fp_getnum;
+ int i = 0;
+
+
+ memset(fpioc, 0, sizeof(*fpioc));
+ SLIST_FOREACH(fp, &pf_osfp_list, fp_next) {
+ SLIST_FOREACH(entry, &fp->fp_oses, fp_entry) {
+ if (i++ == num) {
+ fpioc->fp_mss = fp->fp_mss;
+ fpioc->fp_wsize = fp->fp_wsize;
+ fpioc->fp_flags = fp->fp_flags;
+ fpioc->fp_psize = fp->fp_psize;
+ fpioc->fp_ttl = fp->fp_ttl;
+ fpioc->fp_wscale = fp->fp_wscale;
+ fpioc->fp_getnum = num;
+ memcpy(&fpioc->fp_os, entry,
+ sizeof(fpioc->fp_os));
+ return (0);
+ }
+ }
+ }
+
+ return (EBUSY);
+}
+
+
+/* Validate that each signature is reachable */
+struct pf_os_fingerprint *
+pf_osfp_validate(void)
+{
+ struct pf_os_fingerprint *f, *f2, find;
+
+ SLIST_FOREACH(f, &pf_osfp_list, fp_next) {
+ memcpy(&find, f, sizeof(find));
+
+ /* We do a few MSS/th_win percolations to make things unique */
+ if (find.fp_mss == 0)
+ find.fp_mss = 128;
+ if (f->fp_flags & PF_OSFP_WSIZE_MSS)
+ find.fp_wsize *= find.fp_mss, 1;
+ else if (f->fp_flags & PF_OSFP_WSIZE_MTU)
+ find.fp_wsize *= (find.fp_mss + 40);
+ else if (f->fp_flags & PF_OSFP_WSIZE_MOD)
+ find.fp_wsize *= 2;
+ if (f != (f2 = pf_osfp_find(&pf_osfp_list, &find, 0))) {
+ if (f2)
+ printf("Found \"%s %s %s\" instead of "
+ "\"%s %s %s\"\n",
+ SLIST_FIRST(&f2->fp_oses)->fp_class_nm,
+ SLIST_FIRST(&f2->fp_oses)->fp_version_nm,
+ SLIST_FIRST(&f2->fp_oses)->fp_subtype_nm,
+ SLIST_FIRST(&f->fp_oses)->fp_class_nm,
+ SLIST_FIRST(&f->fp_oses)->fp_version_nm,
+ SLIST_FIRST(&f->fp_oses)->fp_subtype_nm);
+ else
+ printf("Couldn't find \"%s %s %s\"\n",
+ SLIST_FIRST(&f->fp_oses)->fp_class_nm,
+ SLIST_FIRST(&f->fp_oses)->fp_version_nm,
+ SLIST_FIRST(&f->fp_oses)->fp_subtype_nm);
+ return (f);
+ }
+ }
+ return (NULL);
+}
diff --git a/contrib/pf/rtems/freebsd/net/pf_ruleset.c b/contrib/pf/rtems/freebsd/net/pf_ruleset.c
new file mode 100644
index 00000000..147bc8cc
--- /dev/null
+++ b/contrib/pf/rtems/freebsd/net/pf_ruleset.c
@@ -0,0 +1,433 @@
+#include <rtems/freebsd/machine/rtems-bsd-config.h>
+
+/* $OpenBSD: pf_ruleset.c,v 1.1 2006/10/27 13:56:51 mcbride Exp $ */
+
+/*
+ * Copyright (c) 2001 Daniel Hartmeier
+ * Copyright (c) 2002,2003 Henning Brauer
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * - Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+ * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+ * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+ * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ * Effort sponsored in part by the Defense Advanced Research Projects
+ * Agency (DARPA) and Air Force Research Laboratory, Air Force
+ * Materiel Command, USAF, under agreement number F30602-01-2-0537.
+ *
+ */
+
+#ifdef __FreeBSD__
+#include <rtems/freebsd/sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+#endif
+
+#include <rtems/freebsd/sys/param.h>
+#include <rtems/freebsd/sys/socket.h>
+#ifdef _KERNEL
+# include <rtems/freebsd/sys/systm.h>
+#endif /* _KERNEL */
+#include <rtems/freebsd/sys/mbuf.h>
+
+#include <rtems/freebsd/netinet/in.h>
+#include <rtems/freebsd/netinet/in_systm.h>
+#include <rtems/freebsd/netinet/ip.h>
+#include <rtems/freebsd/netinet/tcp.h>
+
+#include <rtems/freebsd/net/if.h>
+#include <rtems/freebsd/net/pfvar.h>
+
+#ifdef INET6
+#include <rtems/freebsd/netinet/ip6.h>
+#endif /* INET6 */
+
+
+#ifdef _KERNEL
+# define DPFPRINTF(format, x...) \
+ if (pf_status.debug >= PF_DEBUG_NOISY) \
+ printf(format , ##x)
+#ifdef __FreeBSD__
+#define rs_malloc(x) malloc(x, M_TEMP, M_NOWAIT)
+#else
+#define rs_malloc(x) malloc(x, M_TEMP, M_WAITOK)
+#endif
+#define rs_free(x) free(x, M_TEMP)
+
+#else
+/* Userland equivalents so we can lend code to pfctl et al. */
+
+# include <rtems/freebsd/arpa/inet.h>
+# include <rtems/freebsd/errno.h>
+# include <rtems/freebsd/stdio.h>
+# include <rtems/freebsd/stdlib.h>
+# include <rtems/freebsd/string.h>
+# define rs_malloc(x) malloc(x)
+# define rs_free(x) free(x)
+
+# ifdef PFDEBUG
+# include <rtems/freebsd/sys/stdarg.h>
+# define DPFPRINTF(format, x...) fprintf(stderr, format , ##x)
+# else
+# define DPFPRINTF(format, x...) ((void)0)
+# endif /* PFDEBUG */
+#endif /* _KERNEL */
+
+
+struct pf_anchor_global pf_anchors;
+struct pf_anchor pf_main_anchor;
+
+#ifndef __FreeBSD__
+/* XXX: hum? */
+int pf_get_ruleset_number(u_int8_t);
+void pf_init_ruleset(struct pf_ruleset *);
+int pf_anchor_setup(struct pf_rule *,
+ const struct pf_ruleset *, const char *);
+int pf_anchor_copyout(const struct pf_ruleset *,
+ const struct pf_rule *, struct pfioc_rule *);
+void pf_anchor_remove(struct pf_rule *);
+#endif
+
+static __inline int pf_anchor_compare(struct pf_anchor *, struct pf_anchor *);
+
+RB_GENERATE(pf_anchor_global, pf_anchor, entry_global, pf_anchor_compare);
+RB_GENERATE(pf_anchor_node, pf_anchor, entry_node, pf_anchor_compare);
+
+static __inline int
+pf_anchor_compare(struct pf_anchor *a, struct pf_anchor *b)
+{
+ int c = strcmp(a->path, b->path);
+
+ return (c ? (c < 0 ? -1 : 1) : 0);
+}
+
+int
+pf_get_ruleset_number(u_int8_t action)
+{
+ switch (action) {
+ case PF_SCRUB:
+ case PF_NOSCRUB:
+ return (PF_RULESET_SCRUB);
+ break;
+ case PF_PASS:
+ case PF_DROP:
+ return (PF_RULESET_FILTER);
+ break;
+ case PF_NAT:
+ case PF_NONAT:
+ return (PF_RULESET_NAT);
+ break;
+ case PF_BINAT:
+ case PF_NOBINAT:
+ return (PF_RULESET_BINAT);
+ break;
+ case PF_RDR:
+ case PF_NORDR:
+ return (PF_RULESET_RDR);
+ break;
+ default:
+ return (PF_RULESET_MAX);
+ break;
+ }
+}
+
+void
+pf_init_ruleset(struct pf_ruleset *ruleset)
+{
+ int i;
+
+ memset(ruleset, 0, sizeof(struct pf_ruleset));
+ for (i = 0; i < PF_RULESET_MAX; i++) {
+ TAILQ_INIT(&ruleset->rules[i].queues[0]);
+ TAILQ_INIT(&ruleset->rules[i].queues[1]);
+ ruleset->rules[i].active.ptr = &ruleset->rules[i].queues[0];
+ ruleset->rules[i].inactive.ptr = &ruleset->rules[i].queues[1];
+ }
+}
+
+struct pf_anchor *
+pf_find_anchor(const char *path)
+{
+ struct pf_anchor *key, *found;
+
+ key = (struct pf_anchor *)rs_malloc(sizeof(*key));
+ memset(key, 0, sizeof(*key));
+ strlcpy(key->path, path, sizeof(key->path));
+ found = RB_FIND(pf_anchor_global, &pf_anchors, key);
+ rs_free(key);
+ return (found);
+}
+
+struct pf_ruleset *
+pf_find_ruleset(const char *path)
+{
+ struct pf_anchor *anchor;
+
+ while (*path == '/')
+ path++;
+ if (!*path)
+ return (&pf_main_ruleset);
+ anchor = pf_find_anchor(path);
+ if (anchor == NULL)
+ return (NULL);
+ else
+ return (&anchor->ruleset);
+}
+
+struct pf_ruleset *
+pf_find_or_create_ruleset(const char *path)
+{
+ char *p, *q, *r;
+ struct pf_ruleset *ruleset;
+#ifdef __FreeBSD__
+ struct pf_anchor *anchor = NULL, *dup, *parent = NULL;
+#else
+ struct pf_anchor *anchor, *dup, *parent = NULL;
+#endif
+
+ if (path[0] == 0)
+ return (&pf_main_ruleset);
+ while (*path == '/')
+ path++;
+ ruleset = pf_find_ruleset(path);
+ if (ruleset != NULL)
+ return (ruleset);
+ p = (char *)rs_malloc(MAXPATHLEN);
+ bzero(p, MAXPATHLEN);
+ strlcpy(p, path, MAXPATHLEN);
+ while (parent == NULL && (q = strrchr(p, '/')) != NULL) {
+ *q = 0;
+ if ((ruleset = pf_find_ruleset(p)) != NULL) {
+ parent = ruleset->anchor;
+ break;
+ }
+ }
+ if (q == NULL)
+ q = p;
+ else
+ q++;
+ strlcpy(p, path, MAXPATHLEN);
+ if (!*q) {
+ rs_free(p);
+ return (NULL);
+ }
+ while ((r = strchr(q, '/')) != NULL || *q) {
+ if (r != NULL)
+ *r = 0;
+ if (!*q || strlen(q) >= PF_ANCHOR_NAME_SIZE ||
+ (parent != NULL && strlen(parent->path) >=
+ MAXPATHLEN - PF_ANCHOR_NAME_SIZE - 1)) {
+ rs_free(p);
+ return (NULL);
+ }
+ anchor = (struct pf_anchor *)rs_malloc(sizeof(*anchor));
+ if (anchor == NULL) {
+ rs_free(p);
+ return (NULL);
+ }
+ memset(anchor, 0, sizeof(*anchor));
+ RB_INIT(&anchor->children);
+ strlcpy(anchor->name, q, sizeof(anchor->name));
+ if (parent != NULL) {
+ strlcpy(anchor->path, parent->path,
+ sizeof(anchor->path));
+ strlcat(anchor->path, "/", sizeof(anchor->path));
+ }
+ strlcat(anchor->path, anchor->name, sizeof(anchor->path));
+ if ((dup = RB_INSERT(pf_anchor_global, &pf_anchors, anchor)) !=
+ NULL) {
+ printf("pf_find_or_create_ruleset: RB_INSERT1 "
+ "'%s' '%s' collides with '%s' '%s'\n",
+ anchor->path, anchor->name, dup->path, dup->name);
+ rs_free(anchor);
+ rs_free(p);
+ return (NULL);
+ }
+ if (parent != NULL) {
+ anchor->parent = parent;
+ if ((dup = RB_INSERT(pf_anchor_node, &parent->children,
+ anchor)) != NULL) {
+ printf("pf_find_or_create_ruleset: "
+ "RB_INSERT2 '%s' '%s' collides with "
+ "'%s' '%s'\n", anchor->path, anchor->name,
+ dup->path, dup->name);
+ RB_REMOVE(pf_anchor_global, &pf_anchors,
+ anchor);
+ rs_free(anchor);
+ rs_free(p);
+ return (NULL);
+ }
+ }
+ pf_init_ruleset(&anchor->ruleset);
+ anchor->ruleset.anchor = anchor;
+ parent = anchor;
+ if (r != NULL)
+ q = r + 1;
+ else
+ *q = 0;
+ }
+ rs_free(p);
+ return (&anchor->ruleset);
+}
+
+void
+pf_remove_if_empty_ruleset(struct pf_ruleset *ruleset)
+{
+ struct pf_anchor *parent;
+ int i;
+
+ while (ruleset != NULL) {
+ if (ruleset == &pf_main_ruleset || ruleset->anchor == NULL ||
+ !RB_EMPTY(&ruleset->anchor->children) ||
+ ruleset->anchor->refcnt > 0 || ruleset->tables > 0 ||
+ ruleset->topen)
+ return;
+ for (i = 0; i < PF_RULESET_MAX; ++i)
+ if (!TAILQ_EMPTY(ruleset->rules[i].active.ptr) ||
+ !TAILQ_EMPTY(ruleset->rules[i].inactive.ptr) ||
+ ruleset->rules[i].inactive.open)
+ return;
+ RB_REMOVE(pf_anchor_global, &pf_anchors, ruleset->anchor);
+ if ((parent = ruleset->anchor->parent) != NULL)
+ RB_REMOVE(pf_anchor_node, &parent->children,
+ ruleset->anchor);
+ rs_free(ruleset->anchor);
+ if (parent == NULL)
+ return;
+ ruleset = &parent->ruleset;
+ }
+}
+
+int
+pf_anchor_setup(struct pf_rule *r, const struct pf_ruleset *s,
+ const char *name)
+{
+ char *p, *path;
+ struct pf_ruleset *ruleset;
+
+ r->anchor = NULL;
+ r->anchor_relative = 0;
+ r->anchor_wildcard = 0;
+ if (!name[0])
+ return (0);
+ path = (char *)rs_malloc(MAXPATHLEN);
+ bzero(path, MAXPATHLEN);
+ if (name[0] == '/')
+ strlcpy(path, name + 1, MAXPATHLEN);
+ else {
+ /* relative path */
+ r->anchor_relative = 1;
+ if (s->anchor == NULL || !s->anchor->path[0])
+ path[0] = 0;
+ else
+ strlcpy(path, s->anchor->path, MAXPATHLEN);
+ while (name[0] == '.' && name[1] == '.' && name[2] == '/') {
+ if (!path[0]) {
+ printf("pf_anchor_setup: .. beyond root\n");
+ rs_free(path);
+ return (1);
+ }
+ if ((p = strrchr(path, '/')) != NULL)
+ *p = 0;
+ else
+ path[0] = 0;
+ r->anchor_relative++;
+ name += 3;
+ }
+ if (path[0])
+ strlcat(path, "/", MAXPATHLEN);
+ strlcat(path, name, MAXPATHLEN);
+ }
+ if ((p = strrchr(path, '/')) != NULL && !strcmp(p, "/*")) {
+ r->anchor_wildcard = 1;
+ *p = 0;
+ }
+ ruleset = pf_find_or_create_ruleset(path);
+ rs_free(path);
+ if (ruleset == NULL || ruleset->anchor == NULL) {
+ printf("pf_anchor_setup: ruleset\n");
+ return (1);
+ }
+ r->anchor = ruleset->anchor;
+ r->anchor->refcnt++;
+ return (0);
+}
+
+int
+pf_anchor_copyout(const struct pf_ruleset *rs, const struct pf_rule *r,
+ struct pfioc_rule *pr)
+{
+ pr->anchor_call[0] = 0;
+ if (r->anchor == NULL)
+ return (0);
+ if (!r->anchor_relative) {
+ strlcpy(pr->anchor_call, "/", sizeof(pr->anchor_call));
+ strlcat(pr->anchor_call, r->anchor->path,
+ sizeof(pr->anchor_call));
+ } else {
+ char *a, *p;
+ int i;
+
+ a = (char *)rs_malloc(MAXPATHLEN);
+ bzero(a, MAXPATHLEN);
+ if (rs->anchor == NULL)
+ a[0] = 0;
+ else
+ strlcpy(a, rs->anchor->path, MAXPATHLEN);
+ for (i = 1; i < r->anchor_relative; ++i) {
+ if ((p = strrchr(a, '/')) == NULL)
+ p = a;
+ *p = 0;
+ strlcat(pr->anchor_call, "../",
+ sizeof(pr->anchor_call));
+ }
+ if (strncmp(a, r->anchor->path, strlen(a))) {
+ printf("pf_anchor_copyout: '%s' '%s'\n", a,
+ r->anchor->path);
+ rs_free(a);
+ return (1);
+ }
+ if (strlen(r->anchor->path) > strlen(a))
+ strlcat(pr->anchor_call, r->anchor->path + (a[0] ?
+ strlen(a) + 1 : 0), sizeof(pr->anchor_call));
+ rs_free(a);
+ }
+ if (r->anchor_wildcard)
+ strlcat(pr->anchor_call, pr->anchor_call[0] ? "/*" : "*",
+ sizeof(pr->anchor_call));
+ return (0);
+}
+
+void
+pf_anchor_remove(struct pf_rule *r)
+{
+ if (r->anchor == NULL)
+ return;
+ if (r->anchor->refcnt <= 0) {
+ printf("pf_anchor_remove: broken refcount\n");
+ r->anchor = NULL;
+ return;
+ }
+ if (!--r->anchor->refcnt)
+ pf_remove_if_empty_ruleset(&r->anchor->ruleset);
+ r->anchor = NULL;
+}
diff --git a/contrib/pf/rtems/freebsd/net/pf_subr.c b/contrib/pf/rtems/freebsd/net/pf_subr.c
new file mode 100644
index 00000000..5da77484
--- /dev/null
+++ b/contrib/pf/rtems/freebsd/net/pf_subr.c
@@ -0,0 +1,170 @@
+#include <rtems/freebsd/machine/rtems-bsd-config.h>
+
+/*-
+ * Copyright (c) 1982, 1986, 1988, 1990, 1993, 1995
+ * The Regents of the University of California. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ */
+
+#include <rtems/freebsd/local/opt_inet.h>
+#include <rtems/freebsd/local/opt_inet6.h>
+
+#include <rtems/freebsd/sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <rtems/freebsd/sys/param.h>
+#include <rtems/freebsd/sys/kernel.h>
+#include <rtems/freebsd/sys/libkern.h>
+#include <rtems/freebsd/sys/mbuf.h>
+#include <rtems/freebsd/sys/md5.h>
+#include <rtems/freebsd/sys/time.h>
+#include <rtems/freebsd/sys/random.h>
+#include <rtems/freebsd/sys/socket.h>
+#include <rtems/freebsd/sys/socketvar.h>
+#include <rtems/freebsd/sys/systm.h>
+#include <rtems/freebsd/sys/time.h>
+
+#include <rtems/freebsd/net/if.h>
+#include <rtems/freebsd/net/if_types.h>
+#include <rtems/freebsd/net/bpf.h>
+#include <rtems/freebsd/net/route.h>
+
+#include <rtems/freebsd/netinet/in.h>
+#include <rtems/freebsd/netinet/in_var.h>
+#include <rtems/freebsd/netinet/in_systm.h>
+#include <rtems/freebsd/netinet/ip.h>
+#include <rtems/freebsd/netinet/ip_var.h>
+#include <rtems/freebsd/netinet/tcp.h>
+#include <rtems/freebsd/netinet/tcp_seq.h>
+#include <rtems/freebsd/netinet/udp.h>
+#include <rtems/freebsd/netinet/ip_icmp.h>
+#include <rtems/freebsd/netinet/in_pcb.h>
+#include <rtems/freebsd/netinet/tcp_timer.h>
+#include <rtems/freebsd/netinet/tcp_var.h>
+#include <rtems/freebsd/netinet/if_ether.h>
+#include <rtems/freebsd/net/pfvar.h>
+
+/*
+ * Following is where TCP initial sequence number generation occurs.
+ *
+ * There are two places where we must use initial sequence numbers:
+ * 1. In SYN-ACK packets.
+ * 2. In SYN packets.
+ *
+ * All ISNs for SYN-ACK packets are generated by the syncache. See
+ * tcp_syncache.c for details.
+ *
+ * The ISNs in SYN packets must be monotonic; TIME_WAIT recycling
+ * depends on this property. In addition, these ISNs should be
+ * unguessable so as to prevent connection hijacking. To satisfy
+ * the requirements of this situation, the algorithm outlined in
+ * RFC 1948 is used, with only small modifications.
+ *
+ * Implementation details:
+ *
+ * Time is based off the system timer, and is corrected so that it
+ * increases by one megabyte per second. This allows for proper
+ * recycling on high speed LANs while still leaving over an hour
+ * before rollover.
+ *
+ * As reading the *exact* system time is too expensive to be done
+ * whenever setting up a TCP connection, we increment the time
+ * offset in two ways. First, a small random positive increment
+ * is added to isn_offset for each connection that is set up.
+ * Second, the function tcp_isn_tick fires once per clock tick
+ * and increments isn_offset as necessary so that sequence numbers
+ * are incremented at approximately ISN_BYTES_PER_SECOND. The
+ * random positive increments serve only to ensure that the same
+ * exact sequence number is never sent out twice (as could otherwise
+ * happen when a port is recycled in less than the system tick
+ * interval.)
+ *
+ * net.inet.tcp.isn_reseed_interval controls the number of seconds
+ * between seeding of isn_secret. This is normally set to zero,
+ * as reseeding should not be necessary.
+ *
+ * Locking of the global variables isn_secret, isn_last_reseed, isn_offset,
+ * isn_offset_old, and isn_ctx is performed using the TCP pcbinfo lock. In
+ * general, this means holding an exclusive (write) lock.
+ */
+
+#define ISN_BYTES_PER_SECOND 1048576
+#define ISN_STATIC_INCREMENT 4096
+#define ISN_RANDOM_INCREMENT (4096 - 1)
+
+static u_char pf_isn_secret[32];
+static int pf_isn_last_reseed;
+static u_int32_t pf_isn_offset;
+
+u_int32_t
+pf_new_isn(struct pf_state *s)
+{
+ MD5_CTX isn_ctx;
+ u_int32_t md5_buffer[4];
+ u_int32_t new_isn;
+ struct pf_state_host *src, *dst;
+
+ /* Seed if this is the first use, reseed if requested. */
+ if (pf_isn_last_reseed == 0) {
+ read_random(&pf_isn_secret, sizeof(pf_isn_secret));
+ pf_isn_last_reseed = ticks;
+ }
+
+ if (s->direction == PF_IN) {
+ src = &s->ext;
+ dst = &s->gwy;
+ } else {
+ src = &s->lan;
+ dst = &s->ext;
+ }
+
+ /* Compute the md5 hash and return the ISN. */
+ MD5Init(&isn_ctx);
+ MD5Update(&isn_ctx, (u_char *) &dst->port, sizeof(u_short));
+ MD5Update(&isn_ctx, (u_char *) &src->port, sizeof(u_short));
+#ifdef INET6
+ if (s->af == AF_INET6) {
+ MD5Update(&isn_ctx, (u_char *) &dst->addr,
+ sizeof(struct in6_addr));
+ MD5Update(&isn_ctx, (u_char *) &src->addr,
+ sizeof(struct in6_addr));
+ } else
+#endif
+ {
+ MD5Update(&isn_ctx, (u_char *) &dst->addr,
+ sizeof(struct in_addr));
+ MD5Update(&isn_ctx, (u_char *) &src->addr,
+ sizeof(struct in_addr));
+ }
+ MD5Update(&isn_ctx, (u_char *) &pf_isn_secret, sizeof(pf_isn_secret));
+ MD5Final((u_char *) &md5_buffer, &isn_ctx);
+ new_isn = (tcp_seq) md5_buffer[0];
+ pf_isn_offset += ISN_STATIC_INCREMENT +
+ (arc4random() & ISN_RANDOM_INCREMENT);
+ new_isn += pf_isn_offset;
+ return (new_isn);
+}
diff --git a/contrib/pf/rtems/freebsd/net/pf_table.c b/contrib/pf/rtems/freebsd/net/pf_table.c
new file mode 100644
index 00000000..391077df
--- /dev/null
+++ b/contrib/pf/rtems/freebsd/net/pf_table.c
@@ -0,0 +1,2363 @@
+#include <rtems/freebsd/machine/rtems-bsd-config.h>
+
+/* $OpenBSD: pf_table.c,v 1.68 2006/05/02 10:08:45 dhartmei Exp $ */
+
+/*
+ * Copyright (c) 2002 Cedric Berger
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * - Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+ * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+ * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+ * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#ifdef __FreeBSD__
+#include <rtems/freebsd/local/opt_inet.h>
+#include <rtems/freebsd/local/opt_inet6.h>
+
+#include <rtems/freebsd/sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+#endif
+
+#include <rtems/freebsd/sys/param.h>
+#include <rtems/freebsd/sys/systm.h>
+#include <rtems/freebsd/sys/socket.h>
+#include <rtems/freebsd/sys/mbuf.h>
+#include <rtems/freebsd/sys/kernel.h>
+#include <rtems/freebsd/sys/lock.h>
+#include <rtems/freebsd/sys/rwlock.h>
+#ifdef __FreeBSD__
+#include <rtems/freebsd/sys/malloc.h>
+#endif
+
+#include <rtems/freebsd/net/if.h>
+#include <rtems/freebsd/net/route.h>
+#include <rtems/freebsd/netinet/in.h>
+#ifndef __FreeBSD__
+#include <rtems/freebsd/netinet/ip_ipsp.h>
+#endif
+
+#include <rtems/freebsd/net/pfvar.h>
+
+#define ACCEPT_FLAGS(oklist) \
+ do { \
+ if ((flags & ~(oklist)) & \
+ PFR_FLAG_ALLMASK) \
+ return (EINVAL); \
+ } while (0)
+
+#ifdef __FreeBSD__
+static inline int
+_copyin(const void *uaddr, void *kaddr, size_t len)
+{
+ int r;
+
+ PF_UNLOCK();
+ r = copyin(uaddr, kaddr, len);
+ PF_LOCK();
+
+ return (r);
+}
+
+static inline int
+_copyout(const void *uaddr, void *kaddr, size_t len)
+{
+ int r;
+
+ PF_UNLOCK();
+ r = copyout(uaddr, kaddr, len);
+ PF_LOCK();
+
+ return (r);
+}
+
+#define COPYIN(from, to, size) \
+ ((flags & PFR_FLAG_USERIOCTL) ? \
+ _copyin((from), (to), (size)) : \
+ (bcopy((from), (to), (size)), 0))
+
+#define COPYOUT(from, to, size) \
+ ((flags & PFR_FLAG_USERIOCTL) ? \
+ _copyout((from), (to), (size)) : \
+ (bcopy((from), (to), (size)), 0))
+
+#else
+
+#define COPYIN(from, to, size) \
+ ((flags & PFR_FLAG_USERIOCTL) ? \
+ copyin((from), (to), (size)) : \
+ (bcopy((from), (to), (size)), 0))
+
+#define COPYOUT(from, to, size) \
+ ((flags & PFR_FLAG_USERIOCTL) ? \
+ copyout((from), (to), (size)) : \
+ (bcopy((from), (to), (size)), 0))
+
+#endif
+
+#define FILLIN_SIN(sin, addr) \
+ do { \
+ (sin).sin_len = sizeof(sin); \
+ (sin).sin_family = AF_INET; \
+ (sin).sin_addr = (addr); \
+ } while (0)
+
+#define FILLIN_SIN6(sin6, addr) \
+ do { \
+ (sin6).sin6_len = sizeof(sin6); \
+ (sin6).sin6_family = AF_INET6; \
+ (sin6).sin6_addr = (addr); \
+ } while (0)
+
+#define SWAP(type, a1, a2) \
+ do { \
+ type tmp = a1; \
+ a1 = a2; \
+ a2 = tmp; \
+ } while (0)
+
+#define SUNION2PF(su, af) (((af)==AF_INET) ? \
+ (struct pf_addr *)&(su)->sin.sin_addr : \
+ (struct pf_addr *)&(su)->sin6.sin6_addr)
+
+#define AF_BITS(af) (((af)==AF_INET)?32:128)
+#define ADDR_NETWORK(ad) ((ad)->pfra_net < AF_BITS((ad)->pfra_af))
+#define KENTRY_NETWORK(ke) ((ke)->pfrke_net < AF_BITS((ke)->pfrke_af))
+#define KENTRY_RNF_ROOT(ke) \
+ ((((struct radix_node *)(ke))->rn_flags & RNF_ROOT) != 0)
+
+#define NO_ADDRESSES (-1)
+#define ENQUEUE_UNMARKED_ONLY (1)
+#define INVERT_NEG_FLAG (1)
+
+struct pfr_walktree {
+ enum pfrw_op {
+ PFRW_MARK,
+ PFRW_SWEEP,
+ PFRW_ENQUEUE,
+ PFRW_GET_ADDRS,
+ PFRW_GET_ASTATS,
+ PFRW_POOL_GET,
+ PFRW_DYNADDR_UPDATE
+ } pfrw_op;
+ union {
+ struct pfr_addr *pfrw1_addr;
+ struct pfr_astats *pfrw1_astats;
+ struct pfr_kentryworkq *pfrw1_workq;
+ struct pfr_kentry *pfrw1_kentry;
+ struct pfi_dynaddr *pfrw1_dyn;
+ } pfrw_1;
+ int pfrw_free;
+ int pfrw_flags;
+};
+#define pfrw_addr pfrw_1.pfrw1_addr
+#define pfrw_astats pfrw_1.pfrw1_astats
+#define pfrw_workq pfrw_1.pfrw1_workq
+#define pfrw_kentry pfrw_1.pfrw1_kentry
+#define pfrw_dyn pfrw_1.pfrw1_dyn
+#define pfrw_cnt pfrw_free
+
+#define senderr(e) do { rv = (e); goto _bad; } while (0)
+
+#ifdef __FreeBSD__
+uma_zone_t pfr_ktable_pl;
+uma_zone_t pfr_kentry_pl;
+uma_zone_t pfr_kentry_pl2;
+#else
+struct pool pfr_ktable_pl;
+struct pool pfr_kentry_pl;
+struct pool pfr_kentry_pl2;
+#endif
+struct sockaddr_in pfr_sin;
+struct sockaddr_in6 pfr_sin6;
+union sockaddr_union pfr_mask;
+struct pf_addr pfr_ffaddr;
+
+void pfr_copyout_addr(struct pfr_addr *,
+ struct pfr_kentry *ke);
+int pfr_validate_addr(struct pfr_addr *);
+void pfr_enqueue_addrs(struct pfr_ktable *,
+ struct pfr_kentryworkq *, int *, int);
+void pfr_mark_addrs(struct pfr_ktable *);
+struct pfr_kentry *pfr_lookup_addr(struct pfr_ktable *,
+ struct pfr_addr *, int);
+struct pfr_kentry *pfr_create_kentry(struct pfr_addr *, int);
+void pfr_destroy_kentries(struct pfr_kentryworkq *);
+void pfr_destroy_kentry(struct pfr_kentry *);
+void pfr_insert_kentries(struct pfr_ktable *,
+ struct pfr_kentryworkq *, long);
+void pfr_remove_kentries(struct pfr_ktable *,
+ struct pfr_kentryworkq *);
+void pfr_clstats_kentries(struct pfr_kentryworkq *, long,
+ int);
+void pfr_reset_feedback(struct pfr_addr *, int, int);
+void pfr_prepare_network(union sockaddr_union *, int, int);
+int pfr_route_kentry(struct pfr_ktable *,
+ struct pfr_kentry *);
+int pfr_unroute_kentry(struct pfr_ktable *,
+ struct pfr_kentry *);
+int pfr_walktree(struct radix_node *, void *);
+int pfr_validate_table(struct pfr_table *, int, int);
+int pfr_fix_anchor(char *);
+void pfr_commit_ktable(struct pfr_ktable *, long);
+void pfr_insert_ktables(struct pfr_ktableworkq *);
+void pfr_insert_ktable(struct pfr_ktable *);
+void pfr_setflags_ktables(struct pfr_ktableworkq *);
+void pfr_setflags_ktable(struct pfr_ktable *, int);
+void pfr_clstats_ktables(struct pfr_ktableworkq *, long,
+ int);
+void pfr_clstats_ktable(struct pfr_ktable *, long, int);
+struct pfr_ktable *pfr_create_ktable(struct pfr_table *, long, int);
+void pfr_destroy_ktables(struct pfr_ktableworkq *, int);
+void pfr_destroy_ktable(struct pfr_ktable *, int);
+int pfr_ktable_compare(struct pfr_ktable *,
+ struct pfr_ktable *);
+struct pfr_ktable *pfr_lookup_table(struct pfr_table *);
+void pfr_clean_node_mask(struct pfr_ktable *,
+ struct pfr_kentryworkq *);
+int pfr_table_count(struct pfr_table *, int);
+int pfr_skip_table(struct pfr_table *,
+ struct pfr_ktable *, int);
+struct pfr_kentry *pfr_kentry_byidx(struct pfr_ktable *, int, int);
+
+RB_PROTOTYPE(pfr_ktablehead, pfr_ktable, pfrkt_tree, pfr_ktable_compare);
+RB_GENERATE(pfr_ktablehead, pfr_ktable, pfrkt_tree, pfr_ktable_compare);
+
+struct pfr_ktablehead pfr_ktables;
+struct pfr_table pfr_nulltable;
+int pfr_ktable_cnt;
+
+void
+pfr_initialize(void)
+{
+#ifndef __FreeBSD__
+ pool_init(&pfr_ktable_pl, sizeof(struct pfr_ktable), 0, 0, 0,
+ "pfrktable", &pool_allocator_oldnointr);
+ pool_init(&pfr_kentry_pl, sizeof(struct pfr_kentry), 0, 0, 0,
+ "pfrkentry", &pool_allocator_oldnointr);
+ pool_init(&pfr_kentry_pl2, sizeof(struct pfr_kentry), 0, 0, 0,
+ "pfrkentry2", NULL);
+#endif
+
+ pfr_sin.sin_len = sizeof(pfr_sin);
+ pfr_sin.sin_family = AF_INET;
+ pfr_sin6.sin6_len = sizeof(pfr_sin6);
+ pfr_sin6.sin6_family = AF_INET6;
+
+ memset(&pfr_ffaddr, 0xff, sizeof(pfr_ffaddr));
+}
+
+int
+pfr_clr_addrs(struct pfr_table *tbl, int *ndel, int flags)
+{
+ struct pfr_ktable *kt;
+ struct pfr_kentryworkq workq;
+ int s;
+
+ ACCEPT_FLAGS(PFR_FLAG_ATOMIC+PFR_FLAG_DUMMY);
+ if (pfr_validate_table(tbl, 0, flags & PFR_FLAG_USERIOCTL))
+ return (EINVAL);
+ kt = pfr_lookup_table(tbl);
+ if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
+ return (ESRCH);
+ if (kt->pfrkt_flags & PFR_TFLAG_CONST)
+ return (EPERM);
+ pfr_enqueue_addrs(kt, &workq, ndel, 0);
+
+ if (!(flags & PFR_FLAG_DUMMY)) {
+ s = 0;
+ if (flags & PFR_FLAG_ATOMIC)
+ s = splsoftnet();
+ pfr_remove_kentries(kt, &workq);
+ if (flags & PFR_FLAG_ATOMIC)
+ splx(s);
+ if (kt->pfrkt_cnt) {
+ printf("pfr_clr_addrs: corruption detected (%d).\n",
+ kt->pfrkt_cnt);
+ kt->pfrkt_cnt = 0;
+ }
+ }
+ return (0);
+}
+
+int
+pfr_add_addrs(struct pfr_table *tbl, struct pfr_addr *addr, int size,
+ int *nadd, int flags)
+{
+ struct pfr_ktable *kt, *tmpkt;
+ struct pfr_kentryworkq workq;
+ struct pfr_kentry *p, *q;
+ struct pfr_addr ad;
+ int i, rv, s = 0, xadd = 0;
+ long tzero = time_second;
+
+ ACCEPT_FLAGS(PFR_FLAG_ATOMIC+PFR_FLAG_DUMMY+PFR_FLAG_FEEDBACK);
+ if (pfr_validate_table(tbl, 0, flags & PFR_FLAG_USERIOCTL))
+ return (EINVAL);
+ kt = pfr_lookup_table(tbl);
+ if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
+ return (ESRCH);
+ if (kt->pfrkt_flags & PFR_TFLAG_CONST)
+ return (EPERM);
+ tmpkt = pfr_create_ktable(&pfr_nulltable, 0, 0);
+ if (tmpkt == NULL)
+ return (ENOMEM);
+ SLIST_INIT(&workq);
+ for (i = 0; i < size; i++) {
+ if (COPYIN(addr+i, &ad, sizeof(ad)))
+ senderr(EFAULT);
+ if (pfr_validate_addr(&ad))
+ senderr(EINVAL);
+ p = pfr_lookup_addr(kt, &ad, 1);
+ q = pfr_lookup_addr(tmpkt, &ad, 1);
+ if (flags & PFR_FLAG_FEEDBACK) {
+ if (q != NULL)
+ ad.pfra_fback = PFR_FB_DUPLICATE;
+ else if (p == NULL)
+ ad.pfra_fback = PFR_FB_ADDED;
+ else if (p->pfrke_not != ad.pfra_not)
+ ad.pfra_fback = PFR_FB_CONFLICT;
+ else
+ ad.pfra_fback = PFR_FB_NONE;
+ }
+ if (p == NULL && q == NULL) {
+ p = pfr_create_kentry(&ad, 0);
+ if (p == NULL)
+ senderr(ENOMEM);
+ if (pfr_route_kentry(tmpkt, p)) {
+ pfr_destroy_kentry(p);
+ ad.pfra_fback = PFR_FB_NONE;
+ } else {
+ SLIST_INSERT_HEAD(&workq, p, pfrke_workq);
+ xadd++;
+ }
+ }
+ if (flags & PFR_FLAG_FEEDBACK) {
+ if (COPYOUT(&ad, addr+i, sizeof(ad)))
+ senderr(EFAULT);
+ }
+ }
+ pfr_clean_node_mask(tmpkt, &workq);
+ if (!(flags & PFR_FLAG_DUMMY)) {
+ if (flags & PFR_FLAG_ATOMIC)
+ s = splsoftnet();
+ pfr_insert_kentries(kt, &workq, tzero);
+ if (flags & PFR_FLAG_ATOMIC)
+ splx(s);
+ } else
+ pfr_destroy_kentries(&workq);
+ if (nadd != NULL)
+ *nadd = xadd;
+ pfr_destroy_ktable(tmpkt, 0);
+ return (0);
+_bad:
+ pfr_clean_node_mask(tmpkt, &workq);
+ pfr_destroy_kentries(&workq);
+ if (flags & PFR_FLAG_FEEDBACK)
+ pfr_reset_feedback(addr, size, flags);
+ pfr_destroy_ktable(tmpkt, 0);
+ return (rv);
+}
+
+int
+pfr_del_addrs(struct pfr_table *tbl, struct pfr_addr *addr, int size,
+ int *ndel, int flags)
+{
+ struct pfr_ktable *kt;
+ struct pfr_kentryworkq workq;
+ struct pfr_kentry *p;
+ struct pfr_addr ad;
+ int i, rv, s = 0, xdel = 0, log = 1;
+
+ ACCEPT_FLAGS(PFR_FLAG_ATOMIC+PFR_FLAG_DUMMY+PFR_FLAG_FEEDBACK);
+ if (pfr_validate_table(tbl, 0, flags & PFR_FLAG_USERIOCTL))
+ return (EINVAL);
+ kt = pfr_lookup_table(tbl);
+ if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
+ return (ESRCH);
+ if (kt->pfrkt_flags & PFR_TFLAG_CONST)
+ return (EPERM);
+ /*
+ * there are two algorithms to choose from here.
+ * with:
+ * n: number of addresses to delete
+ * N: number of addresses in the table
+ *
+ * one is O(N) and is better for large 'n'
+ * one is O(n*LOG(N)) and is better for small 'n'
+ *
+ * following code try to decide which one is best.
+ */
+ for (i = kt->pfrkt_cnt; i > 0; i >>= 1)
+ log++;
+ if (size > kt->pfrkt_cnt/log) {
+ /* full table scan */
+ pfr_mark_addrs(kt);
+ } else {
+ /* iterate over addresses to delete */
+ for (i = 0; i < size; i++) {
+ if (COPYIN(addr+i, &ad, sizeof(ad)))
+ return (EFAULT);
+ if (pfr_validate_addr(&ad))
+ return (EINVAL);
+ p = pfr_lookup_addr(kt, &ad, 1);
+ if (p != NULL)
+ p->pfrke_mark = 0;
+ }
+ }
+ SLIST_INIT(&workq);
+ for (i = 0; i < size; i++) {
+ if (COPYIN(addr+i, &ad, sizeof(ad)))
+ senderr(EFAULT);
+ if (pfr_validate_addr(&ad))
+ senderr(EINVAL);
+ p = pfr_lookup_addr(kt, &ad, 1);
+ if (flags & PFR_FLAG_FEEDBACK) {
+ if (p == NULL)
+ ad.pfra_fback = PFR_FB_NONE;
+ else if (p->pfrke_not != ad.pfra_not)
+ ad.pfra_fback = PFR_FB_CONFLICT;
+ else if (p->pfrke_mark)
+ ad.pfra_fback = PFR_FB_DUPLICATE;
+ else
+ ad.pfra_fback = PFR_FB_DELETED;
+ }
+ if (p != NULL && p->pfrke_not == ad.pfra_not &&
+ !p->pfrke_mark) {
+ p->pfrke_mark = 1;
+ SLIST_INSERT_HEAD(&workq, p, pfrke_workq);
+ xdel++;
+ }
+ if (flags & PFR_FLAG_FEEDBACK)
+ if (COPYOUT(&ad, addr+i, sizeof(ad)))
+ senderr(EFAULT);
+ }
+ if (!(flags & PFR_FLAG_DUMMY)) {
+ if (flags & PFR_FLAG_ATOMIC)
+ s = splsoftnet();
+ pfr_remove_kentries(kt, &workq);
+ if (flags & PFR_FLAG_ATOMIC)
+ splx(s);
+ }
+ if (ndel != NULL)
+ *ndel = xdel;
+ return (0);
+_bad:
+ if (flags & PFR_FLAG_FEEDBACK)
+ pfr_reset_feedback(addr, size, flags);
+ return (rv);
+}
+
+int
+pfr_set_addrs(struct pfr_table *tbl, struct pfr_addr *addr, int size,
+ int *size2, int *nadd, int *ndel, int *nchange, int flags,
+ u_int32_t ignore_pfrt_flags)
+{
+ struct pfr_ktable *kt, *tmpkt;
+ struct pfr_kentryworkq addq, delq, changeq;
+ struct pfr_kentry *p, *q;
+ struct pfr_addr ad;
+ int i, rv, s = 0, xadd = 0, xdel = 0, xchange = 0;
+ long tzero = time_second;
+
+ ACCEPT_FLAGS(PFR_FLAG_ATOMIC+PFR_FLAG_DUMMY+PFR_FLAG_FEEDBACK);
+ if (pfr_validate_table(tbl, ignore_pfrt_flags, flags &
+ PFR_FLAG_USERIOCTL))
+ return (EINVAL);
+ kt = pfr_lookup_table(tbl);
+ if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
+ return (ESRCH);
+ if (kt->pfrkt_flags & PFR_TFLAG_CONST)
+ return (EPERM);
+ tmpkt = pfr_create_ktable(&pfr_nulltable, 0, 0);
+ if (tmpkt == NULL)
+ return (ENOMEM);
+ pfr_mark_addrs(kt);
+ SLIST_INIT(&addq);
+ SLIST_INIT(&delq);
+ SLIST_INIT(&changeq);
+ for (i = 0; i < size; i++) {
+ if (COPYIN(addr+i, &ad, sizeof(ad)))
+ senderr(EFAULT);
+ if (pfr_validate_addr(&ad))
+ senderr(EINVAL);
+ ad.pfra_fback = PFR_FB_NONE;
+ p = pfr_lookup_addr(kt, &ad, 1);
+ if (p != NULL) {
+ if (p->pfrke_mark) {
+ ad.pfra_fback = PFR_FB_DUPLICATE;
+ goto _skip;
+ }
+ p->pfrke_mark = 1;
+ if (p->pfrke_not != ad.pfra_not) {
+ SLIST_INSERT_HEAD(&changeq, p, pfrke_workq);
+ ad.pfra_fback = PFR_FB_CHANGED;
+ xchange++;
+ }
+ } else {
+ q = pfr_lookup_addr(tmpkt, &ad, 1);
+ if (q != NULL) {
+ ad.pfra_fback = PFR_FB_DUPLICATE;
+ goto _skip;
+ }
+ p = pfr_create_kentry(&ad, 0);
+ if (p == NULL)
+ senderr(ENOMEM);
+ if (pfr_route_kentry(tmpkt, p)) {
+ pfr_destroy_kentry(p);
+ ad.pfra_fback = PFR_FB_NONE;
+ } else {
+ SLIST_INSERT_HEAD(&addq, p, pfrke_workq);
+ ad.pfra_fback = PFR_FB_ADDED;
+ xadd++;
+ }
+ }
+_skip:
+ if (flags & PFR_FLAG_FEEDBACK)
+ if (COPYOUT(&ad, addr+i, sizeof(ad)))
+ senderr(EFAULT);
+ }
+ pfr_enqueue_addrs(kt, &delq, &xdel, ENQUEUE_UNMARKED_ONLY);
+ if ((flags & PFR_FLAG_FEEDBACK) && *size2) {
+ if (*size2 < size+xdel) {
+ *size2 = size+xdel;
+ senderr(0);
+ }
+ i = 0;
+ SLIST_FOREACH(p, &delq, pfrke_workq) {
+ pfr_copyout_addr(&ad, p);
+ ad.pfra_fback = PFR_FB_DELETED;
+ if (COPYOUT(&ad, addr+size+i, sizeof(ad)))
+ senderr(EFAULT);
+ i++;
+ }
+ }
+ pfr_clean_node_mask(tmpkt, &addq);
+ if (!(flags & PFR_FLAG_DUMMY)) {
+ if (flags & PFR_FLAG_ATOMIC)
+ s = splsoftnet();
+ pfr_insert_kentries(kt, &addq, tzero);
+ pfr_remove_kentries(kt, &delq);
+ pfr_clstats_kentries(&changeq, tzero, INVERT_NEG_FLAG);
+ if (flags & PFR_FLAG_ATOMIC)
+ splx(s);
+ } else
+ pfr_destroy_kentries(&addq);
+ if (nadd != NULL)
+ *nadd = xadd;
+ if (ndel != NULL)
+ *ndel = xdel;
+ if (nchange != NULL)
+ *nchange = xchange;
+ if ((flags & PFR_FLAG_FEEDBACK) && size2)
+ *size2 = size+xdel;
+ pfr_destroy_ktable(tmpkt, 0);
+ return (0);
+_bad:
+ pfr_clean_node_mask(tmpkt, &addq);
+ pfr_destroy_kentries(&addq);
+ if (flags & PFR_FLAG_FEEDBACK)
+ pfr_reset_feedback(addr, size, flags);
+ pfr_destroy_ktable(tmpkt, 0);
+ return (rv);
+}
+
+int
+pfr_tst_addrs(struct pfr_table *tbl, struct pfr_addr *addr, int size,
+ int *nmatch, int flags)
+{
+ struct pfr_ktable *kt;
+ struct pfr_kentry *p;
+ struct pfr_addr ad;
+ int i, xmatch = 0;
+
+ ACCEPT_FLAGS(PFR_FLAG_REPLACE);
+ if (pfr_validate_table(tbl, 0, 0))
+ return (EINVAL);
+ kt = pfr_lookup_table(tbl);
+ if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
+ return (ESRCH);
+
+ for (i = 0; i < size; i++) {
+ if (COPYIN(addr+i, &ad, sizeof(ad)))
+ return (EFAULT);
+ if (pfr_validate_addr(&ad))
+ return (EINVAL);
+ if (ADDR_NETWORK(&ad))
+ return (EINVAL);
+ p = pfr_lookup_addr(kt, &ad, 0);
+ if (flags & PFR_FLAG_REPLACE)
+ pfr_copyout_addr(&ad, p);
+ ad.pfra_fback = (p == NULL) ? PFR_FB_NONE :
+ (p->pfrke_not ? PFR_FB_NOTMATCH : PFR_FB_MATCH);
+ if (p != NULL && !p->pfrke_not)
+ xmatch++;
+ if (COPYOUT(&ad, addr+i, sizeof(ad)))
+ return (EFAULT);
+ }
+ if (nmatch != NULL)
+ *nmatch = xmatch;
+ return (0);
+}
+
+int
+pfr_get_addrs(struct pfr_table *tbl, struct pfr_addr *addr, int *size,
+ int flags)
+{
+ struct pfr_ktable *kt;
+ struct pfr_walktree w;
+ int rv;
+
+ ACCEPT_FLAGS(0);
+ if (pfr_validate_table(tbl, 0, 0))
+ return (EINVAL);
+ kt = pfr_lookup_table(tbl);
+ if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
+ return (ESRCH);
+ if (kt->pfrkt_cnt > *size) {
+ *size = kt->pfrkt_cnt;
+ return (0);
+ }
+
+ bzero(&w, sizeof(w));
+ w.pfrw_op = PFRW_GET_ADDRS;
+ w.pfrw_addr = addr;
+ w.pfrw_free = kt->pfrkt_cnt;
+ w.pfrw_flags = flags;
+#ifdef __FreeBSD__
+ rv = kt->pfrkt_ip4->rnh_walktree(kt->pfrkt_ip4, pfr_walktree, &w);
+#else
+ rv = rn_walktree(kt->pfrkt_ip4, pfr_walktree, &w);
+#endif
+ if (!rv)
+#ifdef __FreeBSD__
+ rv = kt->pfrkt_ip6->rnh_walktree(kt->pfrkt_ip6, pfr_walktree,
+ &w);
+#else
+ rv = rn_walktree(kt->pfrkt_ip6, pfr_walktree, &w);
+#endif
+ if (rv)
+ return (rv);
+
+ if (w.pfrw_free) {
+ printf("pfr_get_addrs: corruption detected (%d).\n",
+ w.pfrw_free);
+ return (ENOTTY);
+ }
+ *size = kt->pfrkt_cnt;
+ return (0);
+}
+
+int
+pfr_get_astats(struct pfr_table *tbl, struct pfr_astats *addr, int *size,
+ int flags)
+{
+ struct pfr_ktable *kt;
+ struct pfr_walktree w;
+ struct pfr_kentryworkq workq;
+ int rv, s = 0;
+ long tzero = time_second;
+
+ ACCEPT_FLAGS(PFR_FLAG_ATOMIC); /* XXX PFR_FLAG_CLSTATS disabled */
+ if (pfr_validate_table(tbl, 0, 0))
+ return (EINVAL);
+ kt = pfr_lookup_table(tbl);
+ if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
+ return (ESRCH);
+ if (kt->pfrkt_cnt > *size) {
+ *size = kt->pfrkt_cnt;
+ return (0);
+ }
+
+ bzero(&w, sizeof(w));
+ w.pfrw_op = PFRW_GET_ASTATS;
+ w.pfrw_astats = addr;
+ w.pfrw_free = kt->pfrkt_cnt;
+ w.pfrw_flags = flags;
+ if (flags & PFR_FLAG_ATOMIC)
+ s = splsoftnet();
+#ifdef __FreeBSD__
+ rv = kt->pfrkt_ip4->rnh_walktree(kt->pfrkt_ip4, pfr_walktree, &w);
+#else
+ rv = rn_walktree(kt->pfrkt_ip4, pfr_walktree, &w);
+#endif
+ if (!rv)
+#ifdef __FreeBSD__
+ rv = kt->pfrkt_ip6->rnh_walktree(kt->pfrkt_ip6, pfr_walktree,
+ &w);
+#else
+ rv = rn_walktree(kt->pfrkt_ip6, pfr_walktree, &w);
+#endif
+ if (!rv && (flags & PFR_FLAG_CLSTATS)) {
+ pfr_enqueue_addrs(kt, &workq, NULL, 0);
+ pfr_clstats_kentries(&workq, tzero, 0);
+ }
+ if (flags & PFR_FLAG_ATOMIC)
+ splx(s);
+ if (rv)
+ return (rv);
+
+ if (w.pfrw_free) {
+ printf("pfr_get_astats: corruption detected (%d).\n",
+ w.pfrw_free);
+ return (ENOTTY);
+ }
+ *size = kt->pfrkt_cnt;
+ return (0);
+}
+
+int
+pfr_clr_astats(struct pfr_table *tbl, struct pfr_addr *addr, int size,
+ int *nzero, int flags)
+{
+ struct pfr_ktable *kt;
+ struct pfr_kentryworkq workq;
+ struct pfr_kentry *p;
+ struct pfr_addr ad;
+ int i, rv, s = 0, xzero = 0;
+
+ ACCEPT_FLAGS(PFR_FLAG_ATOMIC+PFR_FLAG_DUMMY+PFR_FLAG_FEEDBACK);
+ if (pfr_validate_table(tbl, 0, 0))
+ return (EINVAL);
+ kt = pfr_lookup_table(tbl);
+ if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
+ return (ESRCH);
+ SLIST_INIT(&workq);
+ for (i = 0; i < size; i++) {
+ if (COPYIN(addr+i, &ad, sizeof(ad)))
+ senderr(EFAULT);
+ if (pfr_validate_addr(&ad))
+ senderr(EINVAL);
+ p = pfr_lookup_addr(kt, &ad, 1);
+ if (flags & PFR_FLAG_FEEDBACK) {
+ ad.pfra_fback = (p != NULL) ?
+ PFR_FB_CLEARED : PFR_FB_NONE;
+ if (COPYOUT(&ad, addr+i, sizeof(ad)))
+ senderr(EFAULT);
+ }
+ if (p != NULL) {
+ SLIST_INSERT_HEAD(&workq, p, pfrke_workq);
+ xzero++;
+ }
+ }
+
+ if (!(flags & PFR_FLAG_DUMMY)) {
+ if (flags & PFR_FLAG_ATOMIC)
+ s = splsoftnet();
+ pfr_clstats_kentries(&workq, 0, 0);
+ if (flags & PFR_FLAG_ATOMIC)
+ splx(s);
+ }
+ if (nzero != NULL)
+ *nzero = xzero;
+ return (0);
+_bad:
+ if (flags & PFR_FLAG_FEEDBACK)
+ pfr_reset_feedback(addr, size, flags);
+ return (rv);
+}
+
+int
+pfr_validate_addr(struct pfr_addr *ad)
+{
+ int i;
+
+ switch (ad->pfra_af) {
+#ifdef INET
+ case AF_INET:
+ if (ad->pfra_net > 32)
+ return (-1);
+ break;
+#endif /* INET */
+#ifdef INET6
+ case AF_INET6:
+ if (ad->pfra_net > 128)
+ return (-1);
+ break;
+#endif /* INET6 */
+ default:
+ return (-1);
+ }
+ if (ad->pfra_net < 128 &&
+ (((caddr_t)ad)[ad->pfra_net/8] & (0xFF >> (ad->pfra_net%8))))
+ return (-1);
+ for (i = (ad->pfra_net+7)/8; i < sizeof(ad->pfra_u); i++)
+ if (((caddr_t)ad)[i])
+ return (-1);
+ if (ad->pfra_not && ad->pfra_not != 1)
+ return (-1);
+ if (ad->pfra_fback)
+ return (-1);
+ return (0);
+}
+
+void
+pfr_enqueue_addrs(struct pfr_ktable *kt, struct pfr_kentryworkq *workq,
+ int *naddr, int sweep)
+{
+ struct pfr_walktree w;
+
+ SLIST_INIT(workq);
+ bzero(&w, sizeof(w));
+ w.pfrw_op = sweep ? PFRW_SWEEP : PFRW_ENQUEUE;
+ w.pfrw_workq = workq;
+ if (kt->pfrkt_ip4 != NULL)
+#ifdef __FreeBSD__
+ if (kt->pfrkt_ip4->rnh_walktree(kt->pfrkt_ip4, pfr_walktree,
+ &w))
+#else
+ if (rn_walktree(kt->pfrkt_ip4, pfr_walktree, &w))
+#endif
+ printf("pfr_enqueue_addrs: IPv4 walktree failed.\n");
+ if (kt->pfrkt_ip6 != NULL)
+#ifdef __FreeBSD__
+ if (kt->pfrkt_ip6->rnh_walktree(kt->pfrkt_ip6, pfr_walktree,
+ &w))
+#else
+ if (rn_walktree(kt->pfrkt_ip6, pfr_walktree, &w))
+#endif
+ printf("pfr_enqueue_addrs: IPv6 walktree failed.\n");
+ if (naddr != NULL)
+ *naddr = w.pfrw_cnt;
+}
+
+void
+pfr_mark_addrs(struct pfr_ktable *kt)
+{
+ struct pfr_walktree w;
+
+ bzero(&w, sizeof(w));
+ w.pfrw_op = PFRW_MARK;
+#ifdef __FreeBSD__
+ if (kt->pfrkt_ip4->rnh_walktree(kt->pfrkt_ip4, pfr_walktree, &w))
+#else
+ if (rn_walktree(kt->pfrkt_ip4, pfr_walktree, &w))
+#endif
+ printf("pfr_mark_addrs: IPv4 walktree failed.\n");
+#ifdef __FreeBSD__
+ if (kt->pfrkt_ip6->rnh_walktree(kt->pfrkt_ip6, pfr_walktree, &w))
+#else
+ if (rn_walktree(kt->pfrkt_ip6, pfr_walktree, &w))
+#endif
+ printf("pfr_mark_addrs: IPv6 walktree failed.\n");
+}
+
+
+struct pfr_kentry *
+pfr_lookup_addr(struct pfr_ktable *kt, struct pfr_addr *ad, int exact)
+{
+ union sockaddr_union sa, mask;
+ struct radix_node_head *head = NULL; /* make the compiler happy */
+ struct pfr_kentry *ke;
+ int s;
+
+ bzero(&sa, sizeof(sa));
+ if (ad->pfra_af == AF_INET) {
+ FILLIN_SIN(sa.sin, ad->pfra_ip4addr);
+ head = kt->pfrkt_ip4;
+ } else if ( ad->pfra_af == AF_INET6 ) {
+ FILLIN_SIN6(sa.sin6, ad->pfra_ip6addr);
+ head = kt->pfrkt_ip6;
+ }
+ if (ADDR_NETWORK(ad)) {
+ pfr_prepare_network(&mask, ad->pfra_af, ad->pfra_net);
+ s = splsoftnet(); /* rn_lookup makes use of globals */
+#ifdef __FreeBSD__
+ PF_ASSERT(MA_OWNED);
+#endif
+ ke = (struct pfr_kentry *)rn_lookup(&sa, &mask, head);
+ splx(s);
+ if (ke && KENTRY_RNF_ROOT(ke))
+ ke = NULL;
+ } else {
+ ke = (struct pfr_kentry *)rn_match(&sa, head);
+ if (ke && KENTRY_RNF_ROOT(ke))
+ ke = NULL;
+ if (exact && ke && KENTRY_NETWORK(ke))
+ ke = NULL;
+ }
+ return (ke);
+}
+
+struct pfr_kentry *
+pfr_create_kentry(struct pfr_addr *ad, int intr)
+{
+ struct pfr_kentry *ke;
+
+ if (intr)
+ ke = pool_get(&pfr_kentry_pl2, PR_NOWAIT);
+ else
+ ke = pool_get(&pfr_kentry_pl, PR_NOWAIT);
+ if (ke == NULL)
+ return (NULL);
+ bzero(ke, sizeof(*ke));
+
+ if (ad->pfra_af == AF_INET)
+ FILLIN_SIN(ke->pfrke_sa.sin, ad->pfra_ip4addr);
+ else if (ad->pfra_af == AF_INET6)
+ FILLIN_SIN6(ke->pfrke_sa.sin6, ad->pfra_ip6addr);
+ ke->pfrke_af = ad->pfra_af;
+ ke->pfrke_net = ad->pfra_net;
+ ke->pfrke_not = ad->pfra_not;
+ ke->pfrke_intrpool = intr;
+ return (ke);
+}
+
+void
+pfr_destroy_kentries(struct pfr_kentryworkq *workq)
+{
+ struct pfr_kentry *p, *q;
+
+ for (p = SLIST_FIRST(workq); p != NULL; p = q) {
+ q = SLIST_NEXT(p, pfrke_workq);
+ pfr_destroy_kentry(p);
+ }
+}
+
+void
+pfr_destroy_kentry(struct pfr_kentry *ke)
+{
+ if (ke->pfrke_intrpool)
+ pool_put(&pfr_kentry_pl2, ke);
+ else
+ pool_put(&pfr_kentry_pl, ke);
+}
+
+void
+pfr_insert_kentries(struct pfr_ktable *kt,
+ struct pfr_kentryworkq *workq, long tzero)
+{
+ struct pfr_kentry *p;
+ int rv, n = 0;
+
+ SLIST_FOREACH(p, workq, pfrke_workq) {
+ rv = pfr_route_kentry(kt, p);
+ if (rv) {
+ printf("pfr_insert_kentries: cannot route entry "
+ "(code=%d).\n", rv);
+ break;
+ }
+ p->pfrke_tzero = tzero;
+ n++;
+ }
+ kt->pfrkt_cnt += n;
+}
+
+int
+pfr_insert_kentry(struct pfr_ktable *kt, struct pfr_addr *ad, long tzero)
+{
+ struct pfr_kentry *p;
+ int rv;
+
+ p = pfr_lookup_addr(kt, ad, 1);
+ if (p != NULL)
+ return (0);
+ p = pfr_create_kentry(ad, 1);
+ if (p == NULL)
+ return (EINVAL);
+
+ rv = pfr_route_kentry(kt, p);
+ if (rv)
+ return (rv);
+
+ p->pfrke_tzero = tzero;
+ kt->pfrkt_cnt++;
+
+ return (0);
+}
+
+void
+pfr_remove_kentries(struct pfr_ktable *kt,
+ struct pfr_kentryworkq *workq)
+{
+ struct pfr_kentry *p;
+ int n = 0;
+
+ SLIST_FOREACH(p, workq, pfrke_workq) {
+ pfr_unroute_kentry(kt, p);
+ n++;
+ }
+ kt->pfrkt_cnt -= n;
+ pfr_destroy_kentries(workq);
+}
+
+void
+pfr_clean_node_mask(struct pfr_ktable *kt,
+ struct pfr_kentryworkq *workq)
+{
+ struct pfr_kentry *p;
+
+ SLIST_FOREACH(p, workq, pfrke_workq)
+ pfr_unroute_kentry(kt, p);
+}
+
+void
+pfr_clstats_kentries(struct pfr_kentryworkq *workq, long tzero, int negchange)
+{
+ struct pfr_kentry *p;
+ int s;
+
+ SLIST_FOREACH(p, workq, pfrke_workq) {
+ s = splsoftnet();
+ if (negchange)
+ p->pfrke_not = !p->pfrke_not;
+ bzero(p->pfrke_packets, sizeof(p->pfrke_packets));
+ bzero(p->pfrke_bytes, sizeof(p->pfrke_bytes));
+ splx(s);
+ p->pfrke_tzero = tzero;
+ }
+}
+
+void
+pfr_reset_feedback(struct pfr_addr *addr, int size, int flags)
+{
+ struct pfr_addr ad;
+ int i;
+
+ for (i = 0; i < size; i++) {
+ if (COPYIN(addr+i, &ad, sizeof(ad)))
+ break;
+ ad.pfra_fback = PFR_FB_NONE;
+ if (COPYOUT(&ad, addr+i, sizeof(ad)))
+ break;
+ }
+}
+
+void
+pfr_prepare_network(union sockaddr_union *sa, int af, int net)
+{
+ int i;
+
+ bzero(sa, sizeof(*sa));
+ if (af == AF_INET) {
+ sa->sin.sin_len = sizeof(sa->sin);
+ sa->sin.sin_family = AF_INET;
+ sa->sin.sin_addr.s_addr = net ? htonl(-1 << (32-net)) : 0;
+ } else if (af == AF_INET6) {
+ sa->sin6.sin6_len = sizeof(sa->sin6);
+ sa->sin6.sin6_family = AF_INET6;
+ for (i = 0; i < 4; i++) {
+ if (net <= 32) {
+ sa->sin6.sin6_addr.s6_addr32[i] =
+ net ? htonl(-1 << (32-net)) : 0;
+ break;
+ }
+ sa->sin6.sin6_addr.s6_addr32[i] = 0xFFFFFFFF;
+ net -= 32;
+ }
+ }
+}
+
+int
+pfr_route_kentry(struct pfr_ktable *kt, struct pfr_kentry *ke)
+{
+ union sockaddr_union mask;
+ struct radix_node *rn;
+ struct radix_node_head *head = NULL; /* make the compiler happy */
+ int s;
+
+ bzero(ke->pfrke_node, sizeof(ke->pfrke_node));
+ if (ke->pfrke_af == AF_INET)
+ head = kt->pfrkt_ip4;
+ else if (ke->pfrke_af == AF_INET6)
+ head = kt->pfrkt_ip6;
+
+ s = splsoftnet();
+#ifdef __FreeBSD__
+ PF_ASSERT(MA_OWNED);
+#endif
+ if (KENTRY_NETWORK(ke)) {
+ pfr_prepare_network(&mask, ke->pfrke_af, ke->pfrke_net);
+ rn = rn_addroute(&ke->pfrke_sa, &mask, head, ke->pfrke_node);
+ } else
+ rn = rn_addroute(&ke->pfrke_sa, NULL, head, ke->pfrke_node);
+ splx(s);
+
+ return (rn == NULL ? -1 : 0);
+}
+
+int
+pfr_unroute_kentry(struct pfr_ktable *kt, struct pfr_kentry *ke)
+{
+ union sockaddr_union mask;
+ struct radix_node *rn;
+ struct radix_node_head *head = NULL; /* make the compiler happy */
+ int s;
+
+ if (ke->pfrke_af == AF_INET)
+ head = kt->pfrkt_ip4;
+ else if (ke->pfrke_af == AF_INET6)
+ head = kt->pfrkt_ip6;
+
+ s = splsoftnet();
+#ifdef __FreeBSD__
+ PF_ASSERT(MA_OWNED);
+#endif
+ if (KENTRY_NETWORK(ke)) {
+ pfr_prepare_network(&mask, ke->pfrke_af, ke->pfrke_net);
+#ifdef __FreeBSD__
+ rn = rn_delete(&ke->pfrke_sa, &mask, head);
+#else
+ rn = rn_delete(&ke->pfrke_sa, &mask, head, NULL);
+#endif
+ } else
+#ifdef __FreeBSD__
+ rn = rn_delete(&ke->pfrke_sa, NULL, head);
+#else
+ rn = rn_delete(&ke->pfrke_sa, NULL, head, NULL);
+#endif
+ splx(s);
+
+ if (rn == NULL) {
+ printf("pfr_unroute_kentry: delete failed.\n");
+ return (-1);
+ }
+ return (0);
+}
+
+void
+pfr_copyout_addr(struct pfr_addr *ad, struct pfr_kentry *ke)
+{
+ bzero(ad, sizeof(*ad));
+ if (ke == NULL)
+ return;
+ ad->pfra_af = ke->pfrke_af;
+ ad->pfra_net = ke->pfrke_net;
+ ad->pfra_not = ke->pfrke_not;
+ if (ad->pfra_af == AF_INET)
+ ad->pfra_ip4addr = ke->pfrke_sa.sin.sin_addr;
+ else if (ad->pfra_af == AF_INET6)
+ ad->pfra_ip6addr = ke->pfrke_sa.sin6.sin6_addr;
+}
+
+int
+pfr_walktree(struct radix_node *rn, void *arg)
+{
+ struct pfr_kentry *ke = (struct pfr_kentry *)rn;
+ struct pfr_walktree *w = arg;
+ int s, flags = w->pfrw_flags;
+
+ switch (w->pfrw_op) {
+ case PFRW_MARK:
+ ke->pfrke_mark = 0;
+ break;
+ case PFRW_SWEEP:
+ if (ke->pfrke_mark)
+ break;
+ /* FALLTHROUGH */
+ case PFRW_ENQUEUE:
+ SLIST_INSERT_HEAD(w->pfrw_workq, ke, pfrke_workq);
+ w->pfrw_cnt++;
+ break;
+ case PFRW_GET_ADDRS:
+ if (w->pfrw_free-- > 0) {
+ struct pfr_addr ad;
+
+ pfr_copyout_addr(&ad, ke);
+ if (COPYOUT(&ad, w->pfrw_addr, sizeof(ad)))
+ return (EFAULT);
+ w->pfrw_addr++;
+ }
+ break;
+ case PFRW_GET_ASTATS:
+ if (w->pfrw_free-- > 0) {
+ struct pfr_astats as;
+
+ pfr_copyout_addr(&as.pfras_a, ke);
+
+ s = splsoftnet();
+ bcopy(ke->pfrke_packets, as.pfras_packets,
+ sizeof(as.pfras_packets));
+ bcopy(ke->pfrke_bytes, as.pfras_bytes,
+ sizeof(as.pfras_bytes));
+ splx(s);
+ as.pfras_tzero = ke->pfrke_tzero;
+
+ if (COPYOUT(&as, w->pfrw_astats, sizeof(as)))
+ return (EFAULT);
+ w->pfrw_astats++;
+ }
+ break;
+ case PFRW_POOL_GET:
+ if (ke->pfrke_not)
+ break; /* negative entries are ignored */
+ if (!w->pfrw_cnt--) {
+ w->pfrw_kentry = ke;
+ return (1); /* finish search */
+ }
+ break;
+ case PFRW_DYNADDR_UPDATE:
+ if (ke->pfrke_af == AF_INET) {
+ if (w->pfrw_dyn->pfid_acnt4++ > 0)
+ break;
+ pfr_prepare_network(&pfr_mask, AF_INET, ke->pfrke_net);
+ w->pfrw_dyn->pfid_addr4 = *SUNION2PF(
+ &ke->pfrke_sa, AF_INET);
+ w->pfrw_dyn->pfid_mask4 = *SUNION2PF(
+ &pfr_mask, AF_INET);
+ } else if (ke->pfrke_af == AF_INET6){
+ if (w->pfrw_dyn->pfid_acnt6++ > 0)
+ break;
+ pfr_prepare_network(&pfr_mask, AF_INET6, ke->pfrke_net);
+ w->pfrw_dyn->pfid_addr6 = *SUNION2PF(
+ &ke->pfrke_sa, AF_INET6);
+ w->pfrw_dyn->pfid_mask6 = *SUNION2PF(
+ &pfr_mask, AF_INET6);
+ }
+ break;
+ }
+ return (0);
+}
+
+int
+pfr_clr_tables(struct pfr_table *filter, int *ndel, int flags)
+{
+ struct pfr_ktableworkq workq;
+ struct pfr_ktable *p;
+ int s = 0, xdel = 0;
+
+ ACCEPT_FLAGS(PFR_FLAG_ATOMIC+PFR_FLAG_DUMMY+PFR_FLAG_ALLRSETS);
+ if (pfr_fix_anchor(filter->pfrt_anchor))
+ return (EINVAL);
+ if (pfr_table_count(filter, flags) < 0)
+ return (ENOENT);
+
+ SLIST_INIT(&workq);
+ RB_FOREACH(p, pfr_ktablehead, &pfr_ktables) {
+ if (pfr_skip_table(filter, p, flags))
+ continue;
+ if (!strcmp(p->pfrkt_anchor, PF_RESERVED_ANCHOR))
+ continue;
+ if (!(p->pfrkt_flags & PFR_TFLAG_ACTIVE))
+ continue;
+ p->pfrkt_nflags = p->pfrkt_flags & ~PFR_TFLAG_ACTIVE;
+ SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
+ xdel++;
+ }
+ if (!(flags & PFR_FLAG_DUMMY)) {
+ if (flags & PFR_FLAG_ATOMIC)
+ s = splsoftnet();
+ pfr_setflags_ktables(&workq);
+ if (flags & PFR_FLAG_ATOMIC)
+ splx(s);
+ }
+ if (ndel != NULL)
+ *ndel = xdel;
+ return (0);
+}
+
+int
+pfr_add_tables(struct pfr_table *tbl, int size, int *nadd, int flags)
+{
+ struct pfr_ktableworkq addq, changeq;
+ struct pfr_ktable *p, *q, *r, key;
+ int i, rv, s = 0, xadd = 0;
+ long tzero = time_second;
+
+ ACCEPT_FLAGS(PFR_FLAG_ATOMIC+PFR_FLAG_DUMMY);
+ SLIST_INIT(&addq);
+ SLIST_INIT(&changeq);
+ for (i = 0; i < size; i++) {
+ if (COPYIN(tbl+i, &key.pfrkt_t, sizeof(key.pfrkt_t)))
+ senderr(EFAULT);
+ if (pfr_validate_table(&key.pfrkt_t, PFR_TFLAG_USRMASK,
+ flags & PFR_FLAG_USERIOCTL))
+ senderr(EINVAL);
+ key.pfrkt_flags |= PFR_TFLAG_ACTIVE;
+ p = RB_FIND(pfr_ktablehead, &pfr_ktables, &key);
+ if (p == NULL) {
+ p = pfr_create_ktable(&key.pfrkt_t, tzero, 1);
+ if (p == NULL)
+ senderr(ENOMEM);
+ SLIST_FOREACH(q, &addq, pfrkt_workq) {
+ if (!pfr_ktable_compare(p, q))
+ goto _skip;
+ }
+ SLIST_INSERT_HEAD(&addq, p, pfrkt_workq);
+ xadd++;
+ if (!key.pfrkt_anchor[0])
+ goto _skip;
+
+ /* find or create root table */
+ bzero(key.pfrkt_anchor, sizeof(key.pfrkt_anchor));
+ r = RB_FIND(pfr_ktablehead, &pfr_ktables, &key);
+ if (r != NULL) {
+ p->pfrkt_root = r;
+ goto _skip;
+ }
+ SLIST_FOREACH(q, &addq, pfrkt_workq) {
+ if (!pfr_ktable_compare(&key, q)) {
+ p->pfrkt_root = q;
+ goto _skip;
+ }
+ }
+ key.pfrkt_flags = 0;
+ r = pfr_create_ktable(&key.pfrkt_t, 0, 1);
+ if (r == NULL)
+ senderr(ENOMEM);
+ SLIST_INSERT_HEAD(&addq, r, pfrkt_workq);
+ p->pfrkt_root = r;
+ } else if (!(p->pfrkt_flags & PFR_TFLAG_ACTIVE)) {
+ SLIST_FOREACH(q, &changeq, pfrkt_workq)
+ if (!pfr_ktable_compare(&key, q))
+ goto _skip;
+ p->pfrkt_nflags = (p->pfrkt_flags &
+ ~PFR_TFLAG_USRMASK) | key.pfrkt_flags;
+ SLIST_INSERT_HEAD(&changeq, p, pfrkt_workq);
+ xadd++;
+ }
+_skip:
+ ;
+ }
+ if (!(flags & PFR_FLAG_DUMMY)) {
+ if (flags & PFR_FLAG_ATOMIC)
+ s = splsoftnet();
+ pfr_insert_ktables(&addq);
+ pfr_setflags_ktables(&changeq);
+ if (flags & PFR_FLAG_ATOMIC)
+ splx(s);
+ } else
+ pfr_destroy_ktables(&addq, 0);
+ if (nadd != NULL)
+ *nadd = xadd;
+ return (0);
+_bad:
+ pfr_destroy_ktables(&addq, 0);
+ return (rv);
+}
+
+int
+pfr_del_tables(struct pfr_table *tbl, int size, int *ndel, int flags)
+{
+ struct pfr_ktableworkq workq;
+ struct pfr_ktable *p, *q, key;
+ int i, s = 0, xdel = 0;
+
+ ACCEPT_FLAGS(PFR_FLAG_ATOMIC+PFR_FLAG_DUMMY);
+ SLIST_INIT(&workq);
+ for (i = 0; i < size; i++) {
+ if (COPYIN(tbl+i, &key.pfrkt_t, sizeof(key.pfrkt_t)))
+ return (EFAULT);
+ if (pfr_validate_table(&key.pfrkt_t, 0,
+ flags & PFR_FLAG_USERIOCTL))
+ return (EINVAL);
+ p = RB_FIND(pfr_ktablehead, &pfr_ktables, &key);
+ if (p != NULL && (p->pfrkt_flags & PFR_TFLAG_ACTIVE)) {
+ SLIST_FOREACH(q, &workq, pfrkt_workq)
+ if (!pfr_ktable_compare(p, q))
+ goto _skip;
+ p->pfrkt_nflags = p->pfrkt_flags & ~PFR_TFLAG_ACTIVE;
+ SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
+ xdel++;
+ }
+_skip:
+ ;
+ }
+
+ if (!(flags & PFR_FLAG_DUMMY)) {
+ if (flags & PFR_FLAG_ATOMIC)
+ s = splsoftnet();
+ pfr_setflags_ktables(&workq);
+ if (flags & PFR_FLAG_ATOMIC)
+ splx(s);
+ }
+ if (ndel != NULL)
+ *ndel = xdel;
+ return (0);
+}
+
+int
+pfr_get_tables(struct pfr_table *filter, struct pfr_table *tbl, int *size,
+ int flags)
+{
+ struct pfr_ktable *p;
+ int n, nn;
+
+ ACCEPT_FLAGS(PFR_FLAG_ALLRSETS);
+ if (pfr_fix_anchor(filter->pfrt_anchor))
+ return (EINVAL);
+ n = nn = pfr_table_count(filter, flags);
+ if (n < 0)
+ return (ENOENT);
+ if (n > *size) {
+ *size = n;
+ return (0);
+ }
+ RB_FOREACH(p, pfr_ktablehead, &pfr_ktables) {
+ if (pfr_skip_table(filter, p, flags))
+ continue;
+ if (n-- <= 0)
+ continue;
+ if (COPYOUT(&p->pfrkt_t, tbl++, sizeof(*tbl)))
+ return (EFAULT);
+ }
+ if (n) {
+ printf("pfr_get_tables: corruption detected (%d).\n", n);
+ return (ENOTTY);
+ }
+ *size = nn;
+ return (0);
+}
+
+int
+pfr_get_tstats(struct pfr_table *filter, struct pfr_tstats *tbl, int *size,
+ int flags)
+{
+ struct pfr_ktable *p;
+ struct pfr_ktableworkq workq;
+ int s = 0, n, nn;
+ long tzero = time_second;
+
+ ACCEPT_FLAGS(PFR_FLAG_ATOMIC|PFR_FLAG_ALLRSETS);
+ /* XXX PFR_FLAG_CLSTATS disabled */
+ if (pfr_fix_anchor(filter->pfrt_anchor))
+ return (EINVAL);
+ n = nn = pfr_table_count(filter, flags);
+ if (n < 0)
+ return (ENOENT);
+ if (n > *size) {
+ *size = n;
+ return (0);
+ }
+ SLIST_INIT(&workq);
+ if (flags & PFR_FLAG_ATOMIC)
+ s = splsoftnet();
+ RB_FOREACH(p, pfr_ktablehead, &pfr_ktables) {
+ if (pfr_skip_table(filter, p, flags))
+ continue;
+ if (n-- <= 0)
+ continue;
+ if (!(flags & PFR_FLAG_ATOMIC))
+ s = splsoftnet();
+ if (COPYOUT(&p->pfrkt_ts, tbl++, sizeof(*tbl))) {
+ if (!(flags & PFR_FLAG_ATOMIC))
+ splx(s);
+ return (EFAULT);
+ }
+ if (!(flags & PFR_FLAG_ATOMIC))
+ splx(s);
+ SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
+ }
+ if (flags & PFR_FLAG_CLSTATS)
+ pfr_clstats_ktables(&workq, tzero,
+ flags & PFR_FLAG_ADDRSTOO);
+ if (flags & PFR_FLAG_ATOMIC)
+ splx(s);
+ if (n) {
+ printf("pfr_get_tstats: corruption detected (%d).\n", n);
+ return (ENOTTY);
+ }
+ *size = nn;
+ return (0);
+}
+
+int
+pfr_clr_tstats(struct pfr_table *tbl, int size, int *nzero, int flags)
+{
+ struct pfr_ktableworkq workq;
+ struct pfr_ktable *p, key;
+ int i, s = 0, xzero = 0;
+ long tzero = time_second;
+
+ ACCEPT_FLAGS(PFR_FLAG_ATOMIC+PFR_FLAG_DUMMY+PFR_FLAG_ADDRSTOO);
+ SLIST_INIT(&workq);
+ for (i = 0; i < size; i++) {
+ if (COPYIN(tbl+i, &key.pfrkt_t, sizeof(key.pfrkt_t)))
+ return (EFAULT);
+ if (pfr_validate_table(&key.pfrkt_t, 0, 0))
+ return (EINVAL);
+ p = RB_FIND(pfr_ktablehead, &pfr_ktables, &key);
+ if (p != NULL) {
+ SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
+ xzero++;
+ }
+ }
+ if (!(flags & PFR_FLAG_DUMMY)) {
+ if (flags & PFR_FLAG_ATOMIC)
+ s = splsoftnet();
+ pfr_clstats_ktables(&workq, tzero, flags & PFR_FLAG_ADDRSTOO);
+ if (flags & PFR_FLAG_ATOMIC)
+ splx(s);
+ }
+ if (nzero != NULL)
+ *nzero = xzero;
+ return (0);
+}
+
+int
+pfr_set_tflags(struct pfr_table *tbl, int size, int setflag, int clrflag,
+ int *nchange, int *ndel, int flags)
+{
+ struct pfr_ktableworkq workq;
+ struct pfr_ktable *p, *q, key;
+ int i, s = 0, xchange = 0, xdel = 0;
+
+ ACCEPT_FLAGS(PFR_FLAG_ATOMIC+PFR_FLAG_DUMMY);
+ if ((setflag & ~PFR_TFLAG_USRMASK) ||
+ (clrflag & ~PFR_TFLAG_USRMASK) ||
+ (setflag & clrflag))
+ return (EINVAL);
+ SLIST_INIT(&workq);
+ for (i = 0; i < size; i++) {
+ if (COPYIN(tbl+i, &key.pfrkt_t, sizeof(key.pfrkt_t)))
+ return (EFAULT);
+ if (pfr_validate_table(&key.pfrkt_t, 0,
+ flags & PFR_FLAG_USERIOCTL))
+ return (EINVAL);
+ p = RB_FIND(pfr_ktablehead, &pfr_ktables, &key);
+ if (p != NULL && (p->pfrkt_flags & PFR_TFLAG_ACTIVE)) {
+ p->pfrkt_nflags = (p->pfrkt_flags | setflag) &
+ ~clrflag;
+ if (p->pfrkt_nflags == p->pfrkt_flags)
+ goto _skip;
+ SLIST_FOREACH(q, &workq, pfrkt_workq)
+ if (!pfr_ktable_compare(p, q))
+ goto _skip;
+ SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
+ if ((p->pfrkt_flags & PFR_TFLAG_PERSIST) &&
+ (clrflag & PFR_TFLAG_PERSIST) &&
+ !(p->pfrkt_flags & PFR_TFLAG_REFERENCED))
+ xdel++;
+ else
+ xchange++;
+ }
+_skip:
+ ;
+ }
+ if (!(flags & PFR_FLAG_DUMMY)) {
+ if (flags & PFR_FLAG_ATOMIC)
+ s = splsoftnet();
+ pfr_setflags_ktables(&workq);
+ if (flags & PFR_FLAG_ATOMIC)
+ splx(s);
+ }
+ if (nchange != NULL)
+ *nchange = xchange;
+ if (ndel != NULL)
+ *ndel = xdel;
+ return (0);
+}
+
+int
+pfr_ina_begin(struct pfr_table *trs, u_int32_t *ticket, int *ndel, int flags)
+{
+ struct pfr_ktableworkq workq;
+ struct pfr_ktable *p;
+ struct pf_ruleset *rs;
+ int xdel = 0;
+
+ ACCEPT_FLAGS(PFR_FLAG_DUMMY);
+ rs = pf_find_or_create_ruleset(trs->pfrt_anchor);
+ if (rs == NULL)
+ return (ENOMEM);
+ SLIST_INIT(&workq);
+ RB_FOREACH(p, pfr_ktablehead, &pfr_ktables) {
+ if (!(p->pfrkt_flags & PFR_TFLAG_INACTIVE) ||
+ pfr_skip_table(trs, p, 0))
+ continue;
+ p->pfrkt_nflags = p->pfrkt_flags & ~PFR_TFLAG_INACTIVE;
+ SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
+ xdel++;
+ }
+ if (!(flags & PFR_FLAG_DUMMY)) {
+ pfr_setflags_ktables(&workq);
+ if (ticket != NULL)
+ *ticket = ++rs->tticket;
+ rs->topen = 1;
+ } else
+ pf_remove_if_empty_ruleset(rs);
+ if (ndel != NULL)
+ *ndel = xdel;
+ return (0);
+}
+
+int
+pfr_ina_define(struct pfr_table *tbl, struct pfr_addr *addr, int size,
+ int *nadd, int *naddr, u_int32_t ticket, int flags)
+{
+ struct pfr_ktableworkq tableq;
+ struct pfr_kentryworkq addrq;
+ struct pfr_ktable *kt, *rt, *shadow, key;
+ struct pfr_kentry *p;
+ struct pfr_addr ad;
+ struct pf_ruleset *rs;
+ int i, rv, xadd = 0, xaddr = 0;
+
+ ACCEPT_FLAGS(PFR_FLAG_DUMMY|PFR_FLAG_ADDRSTOO);
+ if (size && !(flags & PFR_FLAG_ADDRSTOO))
+ return (EINVAL);
+ if (pfr_validate_table(tbl, PFR_TFLAG_USRMASK,
+ flags & PFR_FLAG_USERIOCTL))
+ return (EINVAL);
+ rs = pf_find_ruleset(tbl->pfrt_anchor);
+ if (rs == NULL || !rs->topen || ticket != rs->tticket)
+ return (EBUSY);
+ tbl->pfrt_flags |= PFR_TFLAG_INACTIVE;
+ SLIST_INIT(&tableq);
+ kt = RB_FIND(pfr_ktablehead, &pfr_ktables, (struct pfr_ktable *)tbl);
+ if (kt == NULL) {
+ kt = pfr_create_ktable(tbl, 0, 1);
+ if (kt == NULL)
+ return (ENOMEM);
+ SLIST_INSERT_HEAD(&tableq, kt, pfrkt_workq);
+ xadd++;
+ if (!tbl->pfrt_anchor[0])
+ goto _skip;
+
+ /* find or create root table */
+ bzero(&key, sizeof(key));
+ strlcpy(key.pfrkt_name, tbl->pfrt_name, sizeof(key.pfrkt_name));
+ rt = RB_FIND(pfr_ktablehead, &pfr_ktables, &key);
+ if (rt != NULL) {
+ kt->pfrkt_root = rt;
+ goto _skip;
+ }
+ rt = pfr_create_ktable(&key.pfrkt_t, 0, 1);
+ if (rt == NULL) {
+ pfr_destroy_ktables(&tableq, 0);
+ return (ENOMEM);
+ }
+ SLIST_INSERT_HEAD(&tableq, rt, pfrkt_workq);
+ kt->pfrkt_root = rt;
+ } else if (!(kt->pfrkt_flags & PFR_TFLAG_INACTIVE))
+ xadd++;
+_skip:
+ shadow = pfr_create_ktable(tbl, 0, 0);
+ if (shadow == NULL) {
+ pfr_destroy_ktables(&tableq, 0);
+ return (ENOMEM);
+ }
+ SLIST_INIT(&addrq);
+ for (i = 0; i < size; i++) {
+ if (COPYIN(addr+i, &ad, sizeof(ad)))
+ senderr(EFAULT);
+ if (pfr_validate_addr(&ad))
+ senderr(EINVAL);
+ if (pfr_lookup_addr(shadow, &ad, 1) != NULL)
+ continue;
+ p = pfr_create_kentry(&ad, 0);
+ if (p == NULL)
+ senderr(ENOMEM);
+ if (pfr_route_kentry(shadow, p)) {
+ pfr_destroy_kentry(p);
+ continue;
+ }
+ SLIST_INSERT_HEAD(&addrq, p, pfrke_workq);
+ xaddr++;
+ }
+ if (!(flags & PFR_FLAG_DUMMY)) {
+ if (kt->pfrkt_shadow != NULL)
+ pfr_destroy_ktable(kt->pfrkt_shadow, 1);
+ kt->pfrkt_flags |= PFR_TFLAG_INACTIVE;
+ pfr_insert_ktables(&tableq);
+ shadow->pfrkt_cnt = (flags & PFR_FLAG_ADDRSTOO) ?
+ xaddr : NO_ADDRESSES;
+ kt->pfrkt_shadow = shadow;
+ } else {
+ pfr_clean_node_mask(shadow, &addrq);
+ pfr_destroy_ktable(shadow, 0);
+ pfr_destroy_ktables(&tableq, 0);
+ pfr_destroy_kentries(&addrq);
+ }
+ if (nadd != NULL)
+ *nadd = xadd;
+ if (naddr != NULL)
+ *naddr = xaddr;
+ return (0);
+_bad:
+ pfr_destroy_ktable(shadow, 0);
+ pfr_destroy_ktables(&tableq, 0);
+ pfr_destroy_kentries(&addrq);
+ return (rv);
+}
+
+int
+pfr_ina_rollback(struct pfr_table *trs, u_int32_t ticket, int *ndel, int flags)
+{
+ struct pfr_ktableworkq workq;
+ struct pfr_ktable *p;
+ struct pf_ruleset *rs;
+ int xdel = 0;
+
+ ACCEPT_FLAGS(PFR_FLAG_DUMMY);
+ rs = pf_find_ruleset(trs->pfrt_anchor);
+ if (rs == NULL || !rs->topen || ticket != rs->tticket)
+ return (0);
+ SLIST_INIT(&workq);
+ RB_FOREACH(p, pfr_ktablehead, &pfr_ktables) {
+ if (!(p->pfrkt_flags & PFR_TFLAG_INACTIVE) ||
+ pfr_skip_table(trs, p, 0))
+ continue;
+ p->pfrkt_nflags = p->pfrkt_flags & ~PFR_TFLAG_INACTIVE;
+ SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
+ xdel++;
+ }
+ if (!(flags & PFR_FLAG_DUMMY)) {
+ pfr_setflags_ktables(&workq);
+ rs->topen = 0;
+ pf_remove_if_empty_ruleset(rs);
+ }
+ if (ndel != NULL)
+ *ndel = xdel;
+ return (0);
+}
+
+int
+pfr_ina_commit(struct pfr_table *trs, u_int32_t ticket, int *nadd,
+ int *nchange, int flags)
+{
+ struct pfr_ktable *p, *q;
+ struct pfr_ktableworkq workq;
+ struct pf_ruleset *rs;
+ int s = 0, xadd = 0, xchange = 0;
+ long tzero = time_second;
+
+ ACCEPT_FLAGS(PFR_FLAG_ATOMIC+PFR_FLAG_DUMMY);
+ rs = pf_find_ruleset(trs->pfrt_anchor);
+ if (rs == NULL || !rs->topen || ticket != rs->tticket)
+ return (EBUSY);
+
+ SLIST_INIT(&workq);
+ RB_FOREACH(p, pfr_ktablehead, &pfr_ktables) {
+ if (!(p->pfrkt_flags & PFR_TFLAG_INACTIVE) ||
+ pfr_skip_table(trs, p, 0))
+ continue;
+ SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
+ if (p->pfrkt_flags & PFR_TFLAG_ACTIVE)
+ xchange++;
+ else
+ xadd++;
+ }
+
+ if (!(flags & PFR_FLAG_DUMMY)) {
+ if (flags & PFR_FLAG_ATOMIC)
+ s = splsoftnet();
+ for (p = SLIST_FIRST(&workq); p != NULL; p = q) {
+ q = SLIST_NEXT(p, pfrkt_workq);
+ pfr_commit_ktable(p, tzero);
+ }
+ if (flags & PFR_FLAG_ATOMIC)
+ splx(s);
+ rs->topen = 0;
+ pf_remove_if_empty_ruleset(rs);
+ }
+ if (nadd != NULL)
+ *nadd = xadd;
+ if (nchange != NULL)
+ *nchange = xchange;
+
+ return (0);
+}
+
+void
+pfr_commit_ktable(struct pfr_ktable *kt, long tzero)
+{
+ struct pfr_ktable *shadow = kt->pfrkt_shadow;
+ int nflags;
+
+ if (shadow->pfrkt_cnt == NO_ADDRESSES) {
+ if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
+ pfr_clstats_ktable(kt, tzero, 1);
+ } else if (kt->pfrkt_flags & PFR_TFLAG_ACTIVE) {
+ /* kt might contain addresses */
+ struct pfr_kentryworkq addrq, addq, changeq, delq, garbageq;
+ struct pfr_kentry *p, *q, *next;
+ struct pfr_addr ad;
+
+ pfr_enqueue_addrs(shadow, &addrq, NULL, 0);
+ pfr_mark_addrs(kt);
+ SLIST_INIT(&addq);
+ SLIST_INIT(&changeq);
+ SLIST_INIT(&delq);
+ SLIST_INIT(&garbageq);
+ pfr_clean_node_mask(shadow, &addrq);
+ for (p = SLIST_FIRST(&addrq); p != NULL; p = next) {
+ next = SLIST_NEXT(p, pfrke_workq); /* XXX */
+ pfr_copyout_addr(&ad, p);
+ q = pfr_lookup_addr(kt, &ad, 1);
+ if (q != NULL) {
+ if (q->pfrke_not != p->pfrke_not)
+ SLIST_INSERT_HEAD(&changeq, q,
+ pfrke_workq);
+ q->pfrke_mark = 1;
+ SLIST_INSERT_HEAD(&garbageq, p, pfrke_workq);
+ } else {
+ p->pfrke_tzero = tzero;
+ SLIST_INSERT_HEAD(&addq, p, pfrke_workq);
+ }
+ }
+ pfr_enqueue_addrs(kt, &delq, NULL, ENQUEUE_UNMARKED_ONLY);
+ pfr_insert_kentries(kt, &addq, tzero);
+ pfr_remove_kentries(kt, &delq);
+ pfr_clstats_kentries(&changeq, tzero, INVERT_NEG_FLAG);
+ pfr_destroy_kentries(&garbageq);
+ } else {
+ /* kt cannot contain addresses */
+ SWAP(struct radix_node_head *, kt->pfrkt_ip4,
+ shadow->pfrkt_ip4);
+ SWAP(struct radix_node_head *, kt->pfrkt_ip6,
+ shadow->pfrkt_ip6);
+ SWAP(int, kt->pfrkt_cnt, shadow->pfrkt_cnt);
+ pfr_clstats_ktable(kt, tzero, 1);
+ }
+ nflags = ((shadow->pfrkt_flags & PFR_TFLAG_USRMASK) |
+ (kt->pfrkt_flags & PFR_TFLAG_SETMASK) | PFR_TFLAG_ACTIVE)
+ & ~PFR_TFLAG_INACTIVE;
+ pfr_destroy_ktable(shadow, 0);
+ kt->pfrkt_shadow = NULL;
+ pfr_setflags_ktable(kt, nflags);
+}
+
+int
+pfr_validate_table(struct pfr_table *tbl, int allowedflags, int no_reserved)
+{
+ int i;
+
+ if (!tbl->pfrt_name[0])
+ return (-1);
+ if (no_reserved && !strcmp(tbl->pfrt_anchor, PF_RESERVED_ANCHOR))
+ return (-1);
+ if (tbl->pfrt_name[PF_TABLE_NAME_SIZE-1])
+ return (-1);
+ for (i = strlen(tbl->pfrt_name); i < PF_TABLE_NAME_SIZE; i++)
+ if (tbl->pfrt_name[i])
+ return (-1);
+ if (pfr_fix_anchor(tbl->pfrt_anchor))
+ return (-1);
+ if (tbl->pfrt_flags & ~allowedflags)
+ return (-1);
+ return (0);
+}
+
+/*
+ * Rewrite anchors referenced by tables to remove slashes
+ * and check for validity.
+ */
+int
+pfr_fix_anchor(char *anchor)
+{
+ size_t siz = MAXPATHLEN;
+ int i;
+
+ if (anchor[0] == '/') {
+ char *path;
+ int off;
+
+ path = anchor;
+ off = 1;
+ while (*++path == '/')
+ off++;
+ bcopy(path, anchor, siz - off);
+ memset(anchor + siz - off, 0, off);
+ }
+ if (anchor[siz - 1])
+ return (-1);
+ for (i = strlen(anchor); i < siz; i++)
+ if (anchor[i])
+ return (-1);
+ return (0);
+}
+
+int
+pfr_table_count(struct pfr_table *filter, int flags)
+{
+ struct pf_ruleset *rs;
+
+ if (flags & PFR_FLAG_ALLRSETS)
+ return (pfr_ktable_cnt);
+ if (filter->pfrt_anchor[0]) {
+ rs = pf_find_ruleset(filter->pfrt_anchor);
+ return ((rs != NULL) ? rs->tables : -1);
+ }
+ return (pf_main_ruleset.tables);
+}
+
+int
+pfr_skip_table(struct pfr_table *filter, struct pfr_ktable *kt, int flags)
+{
+ if (flags & PFR_FLAG_ALLRSETS)
+ return (0);
+ if (strcmp(filter->pfrt_anchor, kt->pfrkt_anchor))
+ return (1);
+ return (0);
+}
+
+void
+pfr_insert_ktables(struct pfr_ktableworkq *workq)
+{
+ struct pfr_ktable *p;
+
+ SLIST_FOREACH(p, workq, pfrkt_workq)
+ pfr_insert_ktable(p);
+}
+
+void
+pfr_insert_ktable(struct pfr_ktable *kt)
+{
+ RB_INSERT(pfr_ktablehead, &pfr_ktables, kt);
+ pfr_ktable_cnt++;
+ if (kt->pfrkt_root != NULL)
+ if (!kt->pfrkt_root->pfrkt_refcnt[PFR_REFCNT_ANCHOR]++)
+ pfr_setflags_ktable(kt->pfrkt_root,
+ kt->pfrkt_root->pfrkt_flags|PFR_TFLAG_REFDANCHOR);
+}
+
+void
+pfr_setflags_ktables(struct pfr_ktableworkq *workq)
+{
+ struct pfr_ktable *p, *q;
+
+ for (p = SLIST_FIRST(workq); p; p = q) {
+ q = SLIST_NEXT(p, pfrkt_workq);
+ pfr_setflags_ktable(p, p->pfrkt_nflags);
+ }
+}
+
+void
+pfr_setflags_ktable(struct pfr_ktable *kt, int newf)
+{
+ struct pfr_kentryworkq addrq;
+
+ if (!(newf & PFR_TFLAG_REFERENCED) &&
+ !(newf & PFR_TFLAG_PERSIST))
+ newf &= ~PFR_TFLAG_ACTIVE;
+ if (!(newf & PFR_TFLAG_ACTIVE))
+ newf &= ~PFR_TFLAG_USRMASK;
+ if (!(newf & PFR_TFLAG_SETMASK)) {
+ RB_REMOVE(pfr_ktablehead, &pfr_ktables, kt);
+ if (kt->pfrkt_root != NULL)
+ if (!--kt->pfrkt_root->pfrkt_refcnt[PFR_REFCNT_ANCHOR])
+ pfr_setflags_ktable(kt->pfrkt_root,
+ kt->pfrkt_root->pfrkt_flags &
+ ~PFR_TFLAG_REFDANCHOR);
+ pfr_destroy_ktable(kt, 1);
+ pfr_ktable_cnt--;
+ return;
+ }
+ if (!(newf & PFR_TFLAG_ACTIVE) && kt->pfrkt_cnt) {
+ pfr_enqueue_addrs(kt, &addrq, NULL, 0);
+ pfr_remove_kentries(kt, &addrq);
+ }
+ if (!(newf & PFR_TFLAG_INACTIVE) && kt->pfrkt_shadow != NULL) {
+ pfr_destroy_ktable(kt->pfrkt_shadow, 1);
+ kt->pfrkt_shadow = NULL;
+ }
+ kt->pfrkt_flags = newf;
+}
+
+void
+pfr_clstats_ktables(struct pfr_ktableworkq *workq, long tzero, int recurse)
+{
+ struct pfr_ktable *p;
+
+ SLIST_FOREACH(p, workq, pfrkt_workq)
+ pfr_clstats_ktable(p, tzero, recurse);
+}
+
+void
+pfr_clstats_ktable(struct pfr_ktable *kt, long tzero, int recurse)
+{
+ struct pfr_kentryworkq addrq;
+ int s;
+
+ if (recurse) {
+ pfr_enqueue_addrs(kt, &addrq, NULL, 0);
+ pfr_clstats_kentries(&addrq, tzero, 0);
+ }
+ s = splsoftnet();
+ bzero(kt->pfrkt_packets, sizeof(kt->pfrkt_packets));
+ bzero(kt->pfrkt_bytes, sizeof(kt->pfrkt_bytes));
+ kt->pfrkt_match = kt->pfrkt_nomatch = 0;
+ splx(s);
+ kt->pfrkt_tzero = tzero;
+}
+
+struct pfr_ktable *
+pfr_create_ktable(struct pfr_table *tbl, long tzero, int attachruleset)
+{
+ struct pfr_ktable *kt;
+ struct pf_ruleset *rs;
+
+ kt = pool_get(&pfr_ktable_pl, PR_NOWAIT);
+ if (kt == NULL)
+ return (NULL);
+ bzero(kt, sizeof(*kt));
+ kt->pfrkt_t = *tbl;
+
+ if (attachruleset) {
+ rs = pf_find_or_create_ruleset(tbl->pfrt_anchor);
+ if (!rs) {
+ pfr_destroy_ktable(kt, 0);
+ return (NULL);
+ }
+ kt->pfrkt_rs = rs;
+ rs->tables++;
+ }
+
+ if (!rn_inithead((void **)&kt->pfrkt_ip4,
+ offsetof(struct sockaddr_in, sin_addr) * 8) ||
+ !rn_inithead((void **)&kt->pfrkt_ip6,
+ offsetof(struct sockaddr_in6, sin6_addr) * 8)) {
+ pfr_destroy_ktable(kt, 0);
+ return (NULL);
+ }
+ kt->pfrkt_tzero = tzero;
+
+ return (kt);
+}
+
+void
+pfr_destroy_ktables(struct pfr_ktableworkq *workq, int flushaddr)
+{
+ struct pfr_ktable *p, *q;
+
+ for (p = SLIST_FIRST(workq); p; p = q) {
+ q = SLIST_NEXT(p, pfrkt_workq);
+ pfr_destroy_ktable(p, flushaddr);
+ }
+}
+
+void
+pfr_destroy_ktable(struct pfr_ktable *kt, int flushaddr)
+{
+ struct pfr_kentryworkq addrq;
+
+ if (flushaddr) {
+ pfr_enqueue_addrs(kt, &addrq, NULL, 0);
+ pfr_clean_node_mask(kt, &addrq);
+ pfr_destroy_kentries(&addrq);
+ }
+#if defined(__FreeBSD__) && (__FreeBSD_version >= 500100)
+ if (kt->pfrkt_ip4 != NULL) {
+ RADIX_NODE_HEAD_DESTROY(kt->pfrkt_ip4);
+ free((caddr_t)kt->pfrkt_ip4, M_RTABLE);
+ }
+ if (kt->pfrkt_ip6 != NULL) {
+ RADIX_NODE_HEAD_DESTROY(kt->pfrkt_ip6);
+ free((caddr_t)kt->pfrkt_ip6, M_RTABLE);
+ }
+#else
+ if (kt->pfrkt_ip4 != NULL)
+ free((caddr_t)kt->pfrkt_ip4, M_RTABLE);
+ if (kt->pfrkt_ip6 != NULL)
+ free((caddr_t)kt->pfrkt_ip6, M_RTABLE);
+#endif
+ if (kt->pfrkt_shadow != NULL)
+ pfr_destroy_ktable(kt->pfrkt_shadow, flushaddr);
+ if (kt->pfrkt_rs != NULL) {
+ kt->pfrkt_rs->tables--;
+ pf_remove_if_empty_ruleset(kt->pfrkt_rs);
+ }
+ pool_put(&pfr_ktable_pl, kt);
+}
+
+int
+pfr_ktable_compare(struct pfr_ktable *p, struct pfr_ktable *q)
+{
+ int d;
+
+ if ((d = strncmp(p->pfrkt_name, q->pfrkt_name, PF_TABLE_NAME_SIZE)))
+ return (d);
+ return (strcmp(p->pfrkt_anchor, q->pfrkt_anchor));
+}
+
+struct pfr_ktable *
+pfr_lookup_table(struct pfr_table *tbl)
+{
+ /* struct pfr_ktable start like a struct pfr_table */
+ return (RB_FIND(pfr_ktablehead, &pfr_ktables,
+ (struct pfr_ktable *)tbl));
+}
+
+int
+pfr_match_addr(struct pfr_ktable *kt, struct pf_addr *a, sa_family_t af)
+{
+ struct pfr_kentry *ke = NULL;
+ int match;
+
+ if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE) && kt->pfrkt_root != NULL)
+ kt = kt->pfrkt_root;
+ if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
+ return (0);
+
+ switch (af) {
+#ifdef INET
+ case AF_INET:
+ pfr_sin.sin_addr.s_addr = a->addr32[0];
+ ke = (struct pfr_kentry *)rn_match(&pfr_sin, kt->pfrkt_ip4);
+ if (ke && KENTRY_RNF_ROOT(ke))
+ ke = NULL;
+ break;
+#endif /* INET */
+#ifdef INET6
+ case AF_INET6:
+ bcopy(a, &pfr_sin6.sin6_addr, sizeof(pfr_sin6.sin6_addr));
+ ke = (struct pfr_kentry *)rn_match(&pfr_sin6, kt->pfrkt_ip6);
+ if (ke && KENTRY_RNF_ROOT(ke))
+ ke = NULL;
+ break;
+#endif /* INET6 */
+ }
+ match = (ke && !ke->pfrke_not);
+ if (match)
+ kt->pfrkt_match++;
+ else
+ kt->pfrkt_nomatch++;
+ return (match);
+}
+
+void
+pfr_update_stats(struct pfr_ktable *kt, struct pf_addr *a, sa_family_t af,
+ u_int64_t len, int dir_out, int op_pass, int notrule)
+{
+ struct pfr_kentry *ke = NULL;
+
+ if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE) && kt->pfrkt_root != NULL)
+ kt = kt->pfrkt_root;
+ if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
+ return;
+
+ switch (af) {
+#ifdef INET
+ case AF_INET:
+ pfr_sin.sin_addr.s_addr = a->addr32[0];
+ ke = (struct pfr_kentry *)rn_match(&pfr_sin, kt->pfrkt_ip4);
+ if (ke && KENTRY_RNF_ROOT(ke))
+ ke = NULL;
+ break;
+#endif /* INET */
+#ifdef INET6
+ case AF_INET6:
+ bcopy(a, &pfr_sin6.sin6_addr, sizeof(pfr_sin6.sin6_addr));
+ ke = (struct pfr_kentry *)rn_match(&pfr_sin6, kt->pfrkt_ip6);
+ if (ke && KENTRY_RNF_ROOT(ke))
+ ke = NULL;
+ break;
+#endif /* INET6 */
+ default:
+ ;
+ }
+ if ((ke == NULL || ke->pfrke_not) != notrule) {
+ if (op_pass != PFR_OP_PASS)
+ printf("pfr_update_stats: assertion failed.\n");
+ op_pass = PFR_OP_XPASS;
+ }
+ kt->pfrkt_packets[dir_out][op_pass]++;
+ kt->pfrkt_bytes[dir_out][op_pass] += len;
+ if (ke != NULL && op_pass != PFR_OP_XPASS) {
+ ke->pfrke_packets[dir_out][op_pass]++;
+ ke->pfrke_bytes[dir_out][op_pass] += len;
+ }
+}
+
+struct pfr_ktable *
+pfr_attach_table(struct pf_ruleset *rs, char *name)
+{
+ struct pfr_ktable *kt, *rt;
+ struct pfr_table tbl;
+ struct pf_anchor *ac = rs->anchor;
+
+ bzero(&tbl, sizeof(tbl));
+ strlcpy(tbl.pfrt_name, name, sizeof(tbl.pfrt_name));
+ if (ac != NULL)
+ strlcpy(tbl.pfrt_anchor, ac->path, sizeof(tbl.pfrt_anchor));
+ kt = pfr_lookup_table(&tbl);
+ if (kt == NULL) {
+ kt = pfr_create_ktable(&tbl, time_second, 1);
+ if (kt == NULL)
+ return (NULL);
+ if (ac != NULL) {
+ bzero(tbl.pfrt_anchor, sizeof(tbl.pfrt_anchor));
+ rt = pfr_lookup_table(&tbl);
+ if (rt == NULL) {
+ rt = pfr_create_ktable(&tbl, 0, 1);
+ if (rt == NULL) {
+ pfr_destroy_ktable(kt, 0);
+ return (NULL);
+ }
+ pfr_insert_ktable(rt);
+ }
+ kt->pfrkt_root = rt;
+ }
+ pfr_insert_ktable(kt);
+ }
+ if (!kt->pfrkt_refcnt[PFR_REFCNT_RULE]++)
+ pfr_setflags_ktable(kt, kt->pfrkt_flags|PFR_TFLAG_REFERENCED);
+ return (kt);
+}
+
+void
+pfr_detach_table(struct pfr_ktable *kt)
+{
+ if (kt->pfrkt_refcnt[PFR_REFCNT_RULE] <= 0)
+ printf("pfr_detach_table: refcount = %d.\n",
+ kt->pfrkt_refcnt[PFR_REFCNT_RULE]);
+ else if (!--kt->pfrkt_refcnt[PFR_REFCNT_RULE])
+ pfr_setflags_ktable(kt, kt->pfrkt_flags&~PFR_TFLAG_REFERENCED);
+}
+
+
+int
+pfr_pool_get(struct pfr_ktable *kt, int *pidx, struct pf_addr *counter,
+ struct pf_addr **raddr, struct pf_addr **rmask, sa_family_t af)
+{
+ struct pfr_kentry *ke, *ke2 = NULL;
+ struct pf_addr *addr = NULL;
+ union sockaddr_union mask;
+ int idx = -1, use_counter = 0;
+
+ if (af == AF_INET)
+ addr = (struct pf_addr *)&pfr_sin.sin_addr;
+ else if (af == AF_INET6)
+ addr = (struct pf_addr *)&pfr_sin6.sin6_addr;
+ if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE) && kt->pfrkt_root != NULL)
+ kt = kt->pfrkt_root;
+ if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
+ return (-1);
+
+ if (pidx != NULL)
+ idx = *pidx;
+ if (counter != NULL && idx >= 0)
+ use_counter = 1;
+ if (idx < 0)
+ idx = 0;
+
+_next_block:
+ ke = pfr_kentry_byidx(kt, idx, af);
+ if (ke == NULL)
+ return (1);
+ pfr_prepare_network(&pfr_mask, af, ke->pfrke_net);
+ *raddr = SUNION2PF(&ke->pfrke_sa, af);
+ *rmask = SUNION2PF(&pfr_mask, af);
+
+ if (use_counter) {
+ /* is supplied address within block? */
+ if (!PF_MATCHA(0, *raddr, *rmask, counter, af)) {
+ /* no, go to next block in table */
+ idx++;
+ use_counter = 0;
+ goto _next_block;
+ }
+ PF_ACPY(addr, counter, af);
+ } else {
+ /* use first address of block */
+ PF_ACPY(addr, *raddr, af);
+ }
+
+ if (!KENTRY_NETWORK(ke)) {
+ /* this is a single IP address - no possible nested block */
+ PF_ACPY(counter, addr, af);
+ *pidx = idx;
+ return (0);
+ }
+ for (;;) {
+ /* we don't want to use a nested block */
+ if (af == AF_INET)
+ ke2 = (struct pfr_kentry *)rn_match(&pfr_sin,
+ kt->pfrkt_ip4);
+ else if (af == AF_INET6)
+ ke2 = (struct pfr_kentry *)rn_match(&pfr_sin6,
+ kt->pfrkt_ip6);
+ /* no need to check KENTRY_RNF_ROOT() here */
+ if (ke2 == ke) {
+ /* lookup return the same block - perfect */
+ PF_ACPY(counter, addr, af);
+ *pidx = idx;
+ return (0);
+ }
+
+ /* we need to increase the counter past the nested block */
+ pfr_prepare_network(&mask, AF_INET, ke2->pfrke_net);
+ PF_POOLMASK(addr, addr, SUNION2PF(&mask, af), &pfr_ffaddr, af);
+ PF_AINC(addr, af);
+ if (!PF_MATCHA(0, *raddr, *rmask, addr, af)) {
+ /* ok, we reached the end of our main block */
+ /* go to next block in table */
+ idx++;
+ use_counter = 0;
+ goto _next_block;
+ }
+ }
+}
+
+struct pfr_kentry *
+pfr_kentry_byidx(struct pfr_ktable *kt, int idx, int af)
+{
+ struct pfr_walktree w;
+
+ bzero(&w, sizeof(w));
+ w.pfrw_op = PFRW_POOL_GET;
+ w.pfrw_cnt = idx;
+
+ switch (af) {
+#ifdef INET
+ case AF_INET:
+#ifdef __FreeBSD__
+ kt->pfrkt_ip4->rnh_walktree(kt->pfrkt_ip4, pfr_walktree, &w);
+#else
+ rn_walktree(kt->pfrkt_ip4, pfr_walktree, &w);
+#endif
+ return (w.pfrw_kentry);
+#endif /* INET */
+#ifdef INET6
+ case AF_INET6:
+#ifdef __FreeBSD__
+ kt->pfrkt_ip6->rnh_walktree(kt->pfrkt_ip6, pfr_walktree, &w);
+#else
+ rn_walktree(kt->pfrkt_ip6, pfr_walktree, &w);
+#endif
+ return (w.pfrw_kentry);
+#endif /* INET6 */
+ default:
+ return (NULL);
+ }
+}
+
+void
+pfr_dynaddr_update(struct pfr_ktable *kt, struct pfi_dynaddr *dyn)
+{
+ struct pfr_walktree w;
+ int s;
+
+ bzero(&w, sizeof(w));
+ w.pfrw_op = PFRW_DYNADDR_UPDATE;
+ w.pfrw_dyn = dyn;
+
+ s = splsoftnet();
+ dyn->pfid_acnt4 = 0;
+ dyn->pfid_acnt6 = 0;
+ if (!dyn->pfid_af || dyn->pfid_af == AF_INET)
+#ifdef __FreeBSD__
+ kt->pfrkt_ip4->rnh_walktree(kt->pfrkt_ip4, pfr_walktree, &w);
+#else
+ rn_walktree(kt->pfrkt_ip4, pfr_walktree, &w);
+#endif
+ if (!dyn->pfid_af || dyn->pfid_af == AF_INET6)
+#ifdef __FreeBSD__
+ kt->pfrkt_ip6->rnh_walktree(kt->pfrkt_ip6, pfr_walktree, &w);
+#else
+ rn_walktree(kt->pfrkt_ip6, pfr_walktree, &w);
+#endif
+ splx(s);
+}
diff --git a/contrib/pf/rtems/freebsd/net/pfvar.h b/contrib/pf/rtems/freebsd/net/pfvar.h
new file mode 100644
index 00000000..d0c0ced0
--- /dev/null
+++ b/contrib/pf/rtems/freebsd/net/pfvar.h
@@ -0,0 +1,1866 @@
+/* $FreeBSD$ */
+/* $OpenBSD: pfvar.h,v 1.244 2007/02/23 21:31:51 deraadt Exp $ */
+
+/*
+ * Copyright (c) 2001 Daniel Hartmeier
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * - Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+ * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+ * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+ * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#ifndef _NET_PFVAR_HH_
+#define _NET_PFVAR_HH_
+
+#include <rtems/freebsd/sys/param.h>
+#include <rtems/freebsd/sys/types.h>
+#include <rtems/freebsd/sys/queue.h>
+#include <rtems/freebsd/sys/tree.h>
+#ifdef __FreeBSD__
+#include <rtems/freebsd/sys/lock.h>
+#include <rtems/freebsd/sys/sx.h>
+#else
+#include <rtems/freebsd/sys/rwlock.h>
+#endif
+
+#include <rtems/freebsd/net/radix.h>
+#include <rtems/freebsd/net/route.h>
+#ifdef __FreeBSD__
+#include <rtems/freebsd/net/if_clone.h>
+#include <rtems/freebsd/net/pf_mtag.h>
+#include <rtems/freebsd/vm/uma.h>
+#else
+#include <rtems/freebsd/netinet/ip_ipsp.h>
+#endif
+
+#ifdef __FreeBSD__
+#include <rtems/freebsd/netinet/in.h>
+#endif
+
+#include <rtems/freebsd/netinet/tcp_fsm.h>
+
+struct ip;
+struct ip6_hdr;
+#ifdef __FreeBSD__
+struct inpcb;
+#endif
+
+#define PF_TCPS_PROXY_SRC ((TCP_NSTATES)+0)
+#define PF_TCPS_PROXY_DST ((TCP_NSTATES)+1)
+
+#define PF_MD5_DIGEST_LENGTH 16
+#ifdef MD5_DIGEST_LENGTH
+#if PF_MD5_DIGEST_LENGTH != MD5_DIGEST_LENGTH
+#error
+#endif
+#endif
+
+enum { PF_INOUT, PF_IN, PF_OUT };
+enum { PF_LAN_EXT, PF_EXT_GWY, PF_ID };
+enum { PF_PASS, PF_DROP, PF_SCRUB, PF_NOSCRUB, PF_NAT, PF_NONAT,
+ PF_BINAT, PF_NOBINAT, PF_RDR, PF_NORDR, PF_SYNPROXY_DROP };
+enum { PF_RULESET_SCRUB, PF_RULESET_FILTER, PF_RULESET_NAT,
+ PF_RULESET_BINAT, PF_RULESET_RDR, PF_RULESET_MAX };
+enum { PF_OP_NONE, PF_OP_IRG, PF_OP_EQ, PF_OP_NE, PF_OP_LT,
+ PF_OP_LE, PF_OP_GT, PF_OP_GE, PF_OP_XRG, PF_OP_RRG };
+enum { PF_DEBUG_NONE, PF_DEBUG_URGENT, PF_DEBUG_MISC, PF_DEBUG_NOISY };
+enum { PF_CHANGE_NONE, PF_CHANGE_ADD_HEAD, PF_CHANGE_ADD_TAIL,
+ PF_CHANGE_ADD_BEFORE, PF_CHANGE_ADD_AFTER,
+ PF_CHANGE_REMOVE, PF_CHANGE_GET_TICKET };
+enum { PF_GET_NONE, PF_GET_CLR_CNTR };
+
+/*
+ * Note about PFTM_*: real indices into pf_rule.timeout[] come before
+ * PFTM_MAX, special cases afterwards. See pf_state_expires().
+ */
+enum { PFTM_TCP_FIRST_PACKET, PFTM_TCP_OPENING, PFTM_TCP_ESTABLISHED,
+ PFTM_TCP_CLOSING, PFTM_TCP_FIN_WAIT, PFTM_TCP_CLOSED,
+ PFTM_UDP_FIRST_PACKET, PFTM_UDP_SINGLE, PFTM_UDP_MULTIPLE,
+ PFTM_ICMP_FIRST_PACKET, PFTM_ICMP_ERROR_REPLY,
+ PFTM_OTHER_FIRST_PACKET, PFTM_OTHER_SINGLE,
+ PFTM_OTHER_MULTIPLE, PFTM_FRAG, PFTM_INTERVAL,
+ PFTM_ADAPTIVE_START, PFTM_ADAPTIVE_END, PFTM_SRC_NODE,
+ PFTM_TS_DIFF, PFTM_MAX, PFTM_PURGE, PFTM_UNLINKED,
+ PFTM_UNTIL_PACKET };
+
+/* PFTM default values */
+#define PFTM_TCP_FIRST_PACKET_VAL 120 /* First TCP packet */
+#define PFTM_TCP_OPENING_VAL 30 /* No response yet */
+#define PFTM_TCP_ESTABLISHED_VAL 24*60*60/* Established */
+#define PFTM_TCP_CLOSING_VAL 15 * 60 /* Half closed */
+#define PFTM_TCP_FIN_WAIT_VAL 45 /* Got both FINs */
+#define PFTM_TCP_CLOSED_VAL 90 /* Got a RST */
+#define PFTM_UDP_FIRST_PACKET_VAL 60 /* First UDP packet */
+#define PFTM_UDP_SINGLE_VAL 30 /* Unidirectional */
+#define PFTM_UDP_MULTIPLE_VAL 60 /* Bidirectional */
+#define PFTM_ICMP_FIRST_PACKET_VAL 20 /* First ICMP packet */
+#define PFTM_ICMP_ERROR_REPLY_VAL 10 /* Got error response */
+#define PFTM_OTHER_FIRST_PACKET_VAL 60 /* First packet */
+#define PFTM_OTHER_SINGLE_VAL 30 /* Unidirectional */
+#define PFTM_OTHER_MULTIPLE_VAL 60 /* Bidirectional */
+#define PFTM_FRAG_VAL 30 /* Fragment expire */
+#define PFTM_INTERVAL_VAL 10 /* Expire interval */
+#define PFTM_SRC_NODE_VAL 0 /* Source tracking */
+#define PFTM_TS_DIFF_VAL 30 /* Allowed TS diff */
+
+enum { PF_NOPFROUTE, PF_FASTROUTE, PF_ROUTETO, PF_DUPTO, PF_REPLYTO };
+enum { PF_LIMIT_STATES, PF_LIMIT_SRC_NODES, PF_LIMIT_FRAGS,
+ PF_LIMIT_TABLES, PF_LIMIT_TABLE_ENTRIES, PF_LIMIT_MAX };
+#define PF_POOL_IDMASK 0x0f
+enum { PF_POOL_NONE, PF_POOL_BITMASK, PF_POOL_RANDOM,
+ PF_POOL_SRCHASH, PF_POOL_ROUNDROBIN };
+enum { PF_ADDR_ADDRMASK, PF_ADDR_NOROUTE, PF_ADDR_DYNIFTL,
+ PF_ADDR_TABLE, PF_ADDR_RTLABEL, PF_ADDR_URPFFAILED };
+#define PF_POOL_TYPEMASK 0x0f
+#define PF_POOL_STICKYADDR 0x20
+#define PF_WSCALE_FLAG 0x80
+#define PF_WSCALE_MASK 0x0f
+
+#define PF_LOG 0x01
+#define PF_LOG_ALL 0x02
+#define PF_LOG_SOCKET_LOOKUP 0x04
+
+struct pf_addr {
+ union {
+ struct in_addr v4;
+ struct in6_addr v6;
+ u_int8_t addr8[16];
+ u_int16_t addr16[8];
+ u_int32_t addr32[4];
+ } pfa; /* 128-bit address */
+#define v4 pfa.v4
+#define v6 pfa.v6
+#define addr8 pfa.addr8
+#define addr16 pfa.addr16
+#define addr32 pfa.addr32
+};
+
+#define PF_TABLE_NAME_SIZE 32
+
+#define PFI_AFLAG_NETWORK 0x01
+#define PFI_AFLAG_BROADCAST 0x02
+#define PFI_AFLAG_PEER 0x04
+#define PFI_AFLAG_MODEMASK 0x07
+#define PFI_AFLAG_NOALIAS 0x08
+
+struct pf_addr_wrap {
+ union {
+ struct {
+ struct pf_addr addr;
+ struct pf_addr mask;
+ } a;
+ char ifname[IFNAMSIZ];
+ char tblname[PF_TABLE_NAME_SIZE];
+#ifdef __FreeBSD__
+#define RTLABEL_LEN 32
+#endif
+ char rtlabelname[RTLABEL_LEN];
+ u_int32_t rtlabel;
+ } v;
+ union {
+ struct pfi_dynaddr *dyn;
+ struct pfr_ktable *tbl;
+ int dyncnt;
+ int tblcnt;
+ } p;
+ u_int8_t type; /* PF_ADDR_* */
+ u_int8_t iflags; /* PFI_AFLAG_* */
+};
+
+#ifdef _KERNEL
+
+struct pfi_dynaddr {
+ TAILQ_ENTRY(pfi_dynaddr) entry;
+ struct pf_addr pfid_addr4;
+ struct pf_addr pfid_mask4;
+ struct pf_addr pfid_addr6;
+ struct pf_addr pfid_mask6;
+ struct pfr_ktable *pfid_kt;
+ struct pfi_kif *pfid_kif;
+ void *pfid_hook_cookie;
+ int pfid_net; /* mask or 128 */
+ int pfid_acnt4; /* address count IPv4 */
+ int pfid_acnt6; /* address count IPv6 */
+ sa_family_t pfid_af; /* rule af */
+ u_int8_t pfid_iflags; /* PFI_AFLAG_* */
+};
+
+/*
+ * Address manipulation macros
+ */
+
+#ifdef __FreeBSD__
+#define splsoftnet() splnet()
+
+#define HTONL(x) (x) = htonl((__uint32_t)(x))
+#define HTONS(x) (x) = htons((__uint16_t)(x))
+#define NTOHL(x) (x) = ntohl((__uint32_t)(x))
+#define NTOHS(x) (x) = ntohs((__uint16_t)(x))
+
+#define PF_NAME "pf"
+
+#define PR_NOWAIT M_NOWAIT
+#define pool_get(p, f) uma_zalloc(*(p), (f))
+#define pool_put(p, o) uma_zfree(*(p), (o))
+
+#define UMA_CREATE(var, type, desc) \
+ var = uma_zcreate(desc, sizeof(type), \
+ NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0); \
+ if (var == NULL) break
+#define UMA_DESTROY(var) \
+ if(var) uma_zdestroy(var)
+
+extern struct mtx pf_task_mtx;
+
+#define PF_ASSERT(h) mtx_assert(&pf_task_mtx, (h))
+
+#define PF_LOCK() do { \
+ PF_ASSERT(MA_NOTOWNED); \
+ mtx_lock(&pf_task_mtx); \
+} while(0)
+#define PF_UNLOCK() do { \
+ PF_ASSERT(MA_OWNED); \
+ mtx_unlock(&pf_task_mtx); \
+} while(0)
+
+#define PF_COPYIN(uaddr, kaddr, len, r) do { \
+ PF_UNLOCK(); \
+ r = copyin((uaddr), (kaddr), (len)); \
+ PF_LOCK(); \
+} while(0)
+
+#define PF_COPYOUT(kaddr, uaddr, len, r) do { \
+ PF_UNLOCK(); \
+ r = copyout((kaddr), (uaddr), (len)); \
+ PF_LOCK(); \
+} while(0)
+
+extern void init_pf_mutex(void);
+extern void destroy_pf_mutex(void);
+
+#define PF_MODVER 1
+#define PFLOG_MODVER 1
+#define PFSYNC_MODVER 1
+
+#define PFLOG_MINVER 1
+#define PFLOG_PREFVER PFLOG_MODVER
+#define PFLOG_MAXVER 1
+#define PFSYNC_MINVER 1
+#define PFSYNC_PREFVER PFSYNC_MODVER
+#define PFSYNC_MAXVER 1
+#endif /* __FreeBSD__ */
+
+#ifdef INET
+#ifndef INET6
+#define PF_INET_ONLY
+#endif /* ! INET6 */
+#endif /* INET */
+
+#ifdef INET6
+#ifndef INET
+#define PF_INET6_ONLY
+#endif /* ! INET */
+#endif /* INET6 */
+
+#ifdef INET
+#ifdef INET6
+#define PF_INET_INET6
+#endif /* INET6 */
+#endif /* INET */
+
+#else
+
+#define PF_INET_INET6
+
+#endif /* _KERNEL */
+
+/* Both IPv4 and IPv6 */
+#ifdef PF_INET_INET6
+
+#define PF_AEQ(a, b, c) \
+ ((c == AF_INET && (a)->addr32[0] == (b)->addr32[0]) || \
+ ((a)->addr32[3] == (b)->addr32[3] && \
+ (a)->addr32[2] == (b)->addr32[2] && \
+ (a)->addr32[1] == (b)->addr32[1] && \
+ (a)->addr32[0] == (b)->addr32[0])) \
+
+#define PF_ANEQ(a, b, c) \
+ ((c == AF_INET && (a)->addr32[0] != (b)->addr32[0]) || \
+ ((a)->addr32[3] != (b)->addr32[3] || \
+ (a)->addr32[2] != (b)->addr32[2] || \
+ (a)->addr32[1] != (b)->addr32[1] || \
+ (a)->addr32[0] != (b)->addr32[0])) \
+
+#define PF_AZERO(a, c) \
+ ((c == AF_INET && !(a)->addr32[0]) || \
+ (!(a)->addr32[0] && !(a)->addr32[1] && \
+ !(a)->addr32[2] && !(a)->addr32[3] )) \
+
+#define PF_MATCHA(n, a, m, b, f) \
+ pf_match_addr(n, a, m, b, f)
+
+#define PF_ACPY(a, b, f) \
+ pf_addrcpy(a, b, f)
+
+#define PF_AINC(a, f) \
+ pf_addr_inc(a, f)
+
+#define PF_POOLMASK(a, b, c, d, f) \
+ pf_poolmask(a, b, c, d, f)
+
+#else
+
+/* Just IPv6 */
+
+#ifdef PF_INET6_ONLY
+
+#define PF_AEQ(a, b, c) \
+ ((a)->addr32[3] == (b)->addr32[3] && \
+ (a)->addr32[2] == (b)->addr32[2] && \
+ (a)->addr32[1] == (b)->addr32[1] && \
+ (a)->addr32[0] == (b)->addr32[0]) \
+
+#define PF_ANEQ(a, b, c) \
+ ((a)->addr32[3] != (b)->addr32[3] || \
+ (a)->addr32[2] != (b)->addr32[2] || \
+ (a)->addr32[1] != (b)->addr32[1] || \
+ (a)->addr32[0] != (b)->addr32[0]) \
+
+#define PF_AZERO(a, c) \
+ (!(a)->addr32[0] && \
+ !(a)->addr32[1] && \
+ !(a)->addr32[2] && \
+ !(a)->addr32[3] ) \
+
+#define PF_MATCHA(n, a, m, b, f) \
+ pf_match_addr(n, a, m, b, f)
+
+#define PF_ACPY(a, b, f) \
+ pf_addrcpy(a, b, f)
+
+#define PF_AINC(a, f) \
+ pf_addr_inc(a, f)
+
+#define PF_POOLMASK(a, b, c, d, f) \
+ pf_poolmask(a, b, c, d, f)
+
+#else
+
+/* Just IPv4 */
+#ifdef PF_INET_ONLY
+
+#define PF_AEQ(a, b, c) \
+ ((a)->addr32[0] == (b)->addr32[0])
+
+#define PF_ANEQ(a, b, c) \
+ ((a)->addr32[0] != (b)->addr32[0])
+
+#define PF_AZERO(a, c) \
+ (!(a)->addr32[0])
+
+#define PF_MATCHA(n, a, m, b, f) \
+ pf_match_addr(n, a, m, b, f)
+
+#define PF_ACPY(a, b, f) \
+ (a)->v4.s_addr = (b)->v4.s_addr
+
+#define PF_AINC(a, f) \
+ do { \
+ (a)->addr32[0] = htonl(ntohl((a)->addr32[0]) + 1); \
+ } while (0)
+
+#define PF_POOLMASK(a, b, c, d, f) \
+ do { \
+ (a)->addr32[0] = ((b)->addr32[0] & (c)->addr32[0]) | \
+ (((c)->addr32[0] ^ 0xffffffff ) & (d)->addr32[0]); \
+ } while (0)
+
+#endif /* PF_INET_ONLY */
+#endif /* PF_INET6_ONLY */
+#endif /* PF_INET_INET6 */
+
+#define PF_MISMATCHAW(aw, x, af, neg, ifp) \
+ ( \
+ (((aw)->type == PF_ADDR_NOROUTE && \
+ pf_routable((x), (af), NULL)) || \
+ (((aw)->type == PF_ADDR_URPFFAILED && (ifp) != NULL && \
+ pf_routable((x), (af), (ifp))) || \
+ ((aw)->type == PF_ADDR_RTLABEL && \
+ !pf_rtlabel_match((x), (af), (aw))) || \
+ ((aw)->type == PF_ADDR_TABLE && \
+ !pfr_match_addr((aw)->p.tbl, (x), (af))) || \
+ ((aw)->type == PF_ADDR_DYNIFTL && \
+ !pfi_match_addr((aw)->p.dyn, (x), (af))) || \
+ ((aw)->type == PF_ADDR_ADDRMASK && \
+ !PF_AZERO(&(aw)->v.a.mask, (af)) && \
+ !PF_MATCHA(0, &(aw)->v.a.addr, \
+ &(aw)->v.a.mask, (x), (af))))) != \
+ (neg) \
+ )
+
+
+struct pf_rule_uid {
+ uid_t uid[2];
+ u_int8_t op;
+};
+
+struct pf_rule_gid {
+ uid_t gid[2];
+ u_int8_t op;
+};
+
+struct pf_rule_addr {
+ struct pf_addr_wrap addr;
+ u_int16_t port[2];
+ u_int8_t neg;
+ u_int8_t port_op;
+};
+
+struct pf_pooladdr {
+ struct pf_addr_wrap addr;
+ TAILQ_ENTRY(pf_pooladdr) entries;
+ char ifname[IFNAMSIZ];
+ struct pfi_kif *kif;
+};
+
+TAILQ_HEAD(pf_palist, pf_pooladdr);
+
+struct pf_poolhashkey {
+ union {
+ u_int8_t key8[16];
+ u_int16_t key16[8];
+ u_int32_t key32[4];
+ } pfk; /* 128-bit hash key */
+#define key8 pfk.key8
+#define key16 pfk.key16
+#define key32 pfk.key32
+};
+
+struct pf_pool {
+ struct pf_palist list;
+ struct pf_pooladdr *cur;
+ struct pf_poolhashkey key;
+ struct pf_addr counter;
+ int tblidx;
+ u_int16_t proxy_port[2];
+ u_int8_t port_op;
+ u_int8_t opts;
+};
+
+
+/* A packed Operating System description for fingerprinting */
+typedef u_int32_t pf_osfp_t;
+#define PF_OSFP_ANY ((pf_osfp_t)0)
+#define PF_OSFP_UNKNOWN ((pf_osfp_t)-1)
+#define PF_OSFP_NOMATCH ((pf_osfp_t)-2)
+
+struct pf_osfp_entry {
+ SLIST_ENTRY(pf_osfp_entry) fp_entry;
+ pf_osfp_t fp_os;
+ int fp_enflags;
+#define PF_OSFP_EXPANDED 0x001 /* expanded entry */
+#define PF_OSFP_GENERIC 0x002 /* generic signature */
+#define PF_OSFP_NODETAIL 0x004 /* no p0f details */
+#define PF_OSFP_LEN 32
+ char fp_class_nm[PF_OSFP_LEN];
+ char fp_version_nm[PF_OSFP_LEN];
+ char fp_subtype_nm[PF_OSFP_LEN];
+};
+#define PF_OSFP_ENTRY_EQ(a, b) \
+ ((a)->fp_os == (b)->fp_os && \
+ memcmp((a)->fp_class_nm, (b)->fp_class_nm, PF_OSFP_LEN) == 0 && \
+ memcmp((a)->fp_version_nm, (b)->fp_version_nm, PF_OSFP_LEN) == 0 && \
+ memcmp((a)->fp_subtype_nm, (b)->fp_subtype_nm, PF_OSFP_LEN) == 0)
+
+/* handle pf_osfp_t packing */
+#define _FP_RESERVED_BIT 1 /* For the special negative #defines */
+#define _FP_UNUSED_BITS 1
+#define _FP_CLASS_BITS 10 /* OS Class (Windows, Linux) */
+#define _FP_VERSION_BITS 10 /* OS version (95, 98, NT, 2.4.54, 3.2) */
+#define _FP_SUBTYPE_BITS 10 /* patch level (NT SP4, SP3, ECN patch) */
+#define PF_OSFP_UNPACK(osfp, class, version, subtype) do { \
+ (class) = ((osfp) >> (_FP_VERSION_BITS+_FP_SUBTYPE_BITS)) & \
+ ((1 << _FP_CLASS_BITS) - 1); \
+ (version) = ((osfp) >> _FP_SUBTYPE_BITS) & \
+ ((1 << _FP_VERSION_BITS) - 1);\
+ (subtype) = (osfp) & ((1 << _FP_SUBTYPE_BITS) - 1); \
+} while(0)
+#define PF_OSFP_PACK(osfp, class, version, subtype) do { \
+ (osfp) = ((class) & ((1 << _FP_CLASS_BITS) - 1)) << (_FP_VERSION_BITS \
+ + _FP_SUBTYPE_BITS); \
+ (osfp) |= ((version) & ((1 << _FP_VERSION_BITS) - 1)) << \
+ _FP_SUBTYPE_BITS; \
+ (osfp) |= (subtype) & ((1 << _FP_SUBTYPE_BITS) - 1); \
+} while(0)
+
+/* the fingerprint of an OSes TCP SYN packet */
+typedef u_int64_t pf_tcpopts_t;
+struct pf_os_fingerprint {
+ SLIST_HEAD(pf_osfp_enlist, pf_osfp_entry) fp_oses; /* list of matches */
+ pf_tcpopts_t fp_tcpopts; /* packed TCP options */
+ u_int16_t fp_wsize; /* TCP window size */
+ u_int16_t fp_psize; /* ip->ip_len */
+ u_int16_t fp_mss; /* TCP MSS */
+ u_int16_t fp_flags;
+#define PF_OSFP_WSIZE_MOD 0x0001 /* Window modulus */
+#define PF_OSFP_WSIZE_DC 0x0002 /* Window don't care */
+#define PF_OSFP_WSIZE_MSS 0x0004 /* Window multiple of MSS */
+#define PF_OSFP_WSIZE_MTU 0x0008 /* Window multiple of MTU */
+#define PF_OSFP_PSIZE_MOD 0x0010 /* packet size modulus */
+#define PF_OSFP_PSIZE_DC 0x0020 /* packet size don't care */
+#define PF_OSFP_WSCALE 0x0040 /* TCP window scaling */
+#define PF_OSFP_WSCALE_MOD 0x0080 /* TCP window scale modulus */
+#define PF_OSFP_WSCALE_DC 0x0100 /* TCP window scale dont-care */
+#define PF_OSFP_MSS 0x0200 /* TCP MSS */
+#define PF_OSFP_MSS_MOD 0x0400 /* TCP MSS modulus */
+#define PF_OSFP_MSS_DC 0x0800 /* TCP MSS dont-care */
+#define PF_OSFP_DF 0x1000 /* IPv4 don't fragment bit */
+#define PF_OSFP_TS0 0x2000 /* Zero timestamp */
+#define PF_OSFP_INET6 0x4000 /* IPv6 */
+ u_int8_t fp_optcnt; /* TCP option count */
+ u_int8_t fp_wscale; /* TCP window scaling */
+ u_int8_t fp_ttl; /* IPv4 TTL */
+#define PF_OSFP_MAXTTL_OFFSET 40
+/* TCP options packing */
+#define PF_OSFP_TCPOPT_NOP 0x0 /* TCP NOP option */
+#define PF_OSFP_TCPOPT_WSCALE 0x1 /* TCP window scaling option */
+#define PF_OSFP_TCPOPT_MSS 0x2 /* TCP max segment size opt */
+#define PF_OSFP_TCPOPT_SACK 0x3 /* TCP SACK OK option */
+#define PF_OSFP_TCPOPT_TS 0x4 /* TCP timestamp option */
+#define PF_OSFP_TCPOPT_BITS 3 /* bits used by each option */
+#define PF_OSFP_MAX_OPTS \
+ (sizeof(((struct pf_os_fingerprint *)0)->fp_tcpopts) * 8) \
+ / PF_OSFP_TCPOPT_BITS
+
+ SLIST_ENTRY(pf_os_fingerprint) fp_next;
+};
+
+struct pf_osfp_ioctl {
+ struct pf_osfp_entry fp_os;
+ pf_tcpopts_t fp_tcpopts; /* packed TCP options */
+ u_int16_t fp_wsize; /* TCP window size */
+ u_int16_t fp_psize; /* ip->ip_len */
+ u_int16_t fp_mss; /* TCP MSS */
+ u_int16_t fp_flags;
+ u_int8_t fp_optcnt; /* TCP option count */
+ u_int8_t fp_wscale; /* TCP window scaling */
+ u_int8_t fp_ttl; /* IPv4 TTL */
+
+ int fp_getnum; /* DIOCOSFPGET number */
+};
+
+
+union pf_rule_ptr {
+ struct pf_rule *ptr;
+ u_int32_t nr;
+};
+
+#define PF_ANCHOR_NAME_SIZE 64
+
+struct pf_rule {
+ struct pf_rule_addr src;
+ struct pf_rule_addr dst;
+#define PF_SKIP_IFP 0
+#define PF_SKIP_DIR 1
+#define PF_SKIP_AF 2
+#define PF_SKIP_PROTO 3
+#define PF_SKIP_SRC_ADDR 4
+#define PF_SKIP_SRC_PORT 5
+#define PF_SKIP_DST_ADDR 6
+#define PF_SKIP_DST_PORT 7
+#define PF_SKIP_COUNT 8
+ union pf_rule_ptr skip[PF_SKIP_COUNT];
+#define PF_RULE_LABEL_SIZE 64
+ char label[PF_RULE_LABEL_SIZE];
+#define PF_QNAME_SIZE 64
+ char ifname[IFNAMSIZ];
+ char qname[PF_QNAME_SIZE];
+ char pqname[PF_QNAME_SIZE];
+#define PF_TAG_NAME_SIZE 64
+ char tagname[PF_TAG_NAME_SIZE];
+ char match_tagname[PF_TAG_NAME_SIZE];
+
+ char overload_tblname[PF_TABLE_NAME_SIZE];
+
+ TAILQ_ENTRY(pf_rule) entries;
+ struct pf_pool rpool;
+
+ u_int64_t evaluations;
+ u_int64_t packets[2];
+ u_int64_t bytes[2];
+
+ struct pfi_kif *kif;
+ struct pf_anchor *anchor;
+ struct pfr_ktable *overload_tbl;
+
+ pf_osfp_t os_fingerprint;
+
+ int rtableid;
+ u_int32_t timeout[PFTM_MAX];
+ u_int32_t states;
+ u_int32_t max_states;
+ u_int32_t src_nodes;
+ u_int32_t max_src_nodes;
+ u_int32_t max_src_states;
+ u_int32_t spare1; /* netgraph */
+ u_int32_t max_src_conn;
+ struct {
+ u_int32_t limit;
+ u_int32_t seconds;
+ } max_src_conn_rate;
+ u_int32_t qid;
+ u_int32_t pqid;
+ u_int32_t rt_listid;
+ u_int32_t nr;
+ u_int32_t prob;
+ uid_t cuid;
+ pid_t cpid;
+
+ u_int16_t return_icmp;
+ u_int16_t return_icmp6;
+ u_int16_t max_mss;
+ u_int16_t tag;
+ u_int16_t match_tag;
+ u_int16_t spare2; /* netgraph */
+
+ struct pf_rule_uid uid;
+ struct pf_rule_gid gid;
+
+ u_int32_t rule_flag;
+ u_int8_t action;
+ u_int8_t direction;
+ u_int8_t log;
+ u_int8_t logif;
+ u_int8_t quick;
+ u_int8_t ifnot;
+ u_int8_t match_tag_not;
+ u_int8_t natpass;
+
+#define PF_STATE_NORMAL 0x1
+#define PF_STATE_MODULATE 0x2
+#define PF_STATE_SYNPROXY 0x3
+ u_int8_t keep_state;
+ sa_family_t af;
+ u_int8_t proto;
+ u_int8_t type;
+ u_int8_t code;
+ u_int8_t flags;
+ u_int8_t flagset;
+ u_int8_t min_ttl;
+ u_int8_t allow_opts;
+ u_int8_t rt;
+ u_int8_t return_ttl;
+ u_int8_t tos;
+ u_int8_t anchor_relative;
+ u_int8_t anchor_wildcard;
+
+#define PF_FLUSH 0x01
+#define PF_FLUSH_GLOBAL 0x02
+ u_int8_t flush;
+};
+
+/* rule flags */
+#define PFRULE_DROP 0x0000
+#define PFRULE_RETURNRST 0x0001
+#define PFRULE_FRAGMENT 0x0002
+#define PFRULE_RETURNICMP 0x0004
+#define PFRULE_RETURN 0x0008
+#define PFRULE_NOSYNC 0x0010
+#define PFRULE_SRCTRACK 0x0020 /* track source states */
+#define PFRULE_RULESRCTRACK 0x0040 /* per rule */
+
+/* scrub flags */
+#define PFRULE_NODF 0x0100
+#define PFRULE_FRAGCROP 0x0200 /* non-buffering frag cache */
+#define PFRULE_FRAGDROP 0x0400 /* drop funny fragments */
+#define PFRULE_RANDOMID 0x0800
+#define PFRULE_REASSEMBLE_TCP 0x1000
+
+/* rule flags again */
+#define PFRULE_IFBOUND 0x00010000 /* if-bound */
+#define PFRULE_STATESLOPPY 0x00020000 /* sloppy state tracking */
+
+#define PFSTATE_HIWAT 10000 /* default state table size */
+#define PFSTATE_ADAPT_START 6000 /* default adaptive timeout start */
+#define PFSTATE_ADAPT_END 12000 /* default adaptive timeout end */
+
+
+struct pf_threshold {
+ u_int32_t limit;
+#define PF_THRESHOLD_MULT 1000
+#define PF_THRESHOLD_MAX 0xffffffff / PF_THRESHOLD_MULT
+ u_int32_t seconds;
+ u_int32_t count;
+ u_int32_t last;
+};
+
+struct pf_src_node {
+ RB_ENTRY(pf_src_node) entry;
+ struct pf_addr addr;
+ struct pf_addr raddr;
+ union pf_rule_ptr rule;
+ struct pfi_kif *kif;
+ u_int64_t bytes[2];
+ u_int64_t packets[2];
+ u_int32_t states;
+ u_int32_t conn;
+ struct pf_threshold conn_rate;
+ u_int32_t creation;
+ u_int32_t expire;
+ sa_family_t af;
+ u_int8_t ruletype;
+};
+
+#define PFSNODE_HIWAT 10000 /* default source node table size */
+
+struct pf_state_scrub {
+ struct timeval pfss_last; /* time received last packet */
+ u_int32_t pfss_tsecr; /* last echoed timestamp */
+ u_int32_t pfss_tsval; /* largest timestamp */
+ u_int32_t pfss_tsval0; /* original timestamp */
+ u_int16_t pfss_flags;
+#define PFSS_TIMESTAMP 0x0001 /* modulate timestamp */
+#define PFSS_PAWS 0x0010 /* stricter PAWS checks */
+#define PFSS_PAWS_IDLED 0x0020 /* was idle too long. no PAWS */
+#define PFSS_DATA_TS 0x0040 /* timestamp on data packets */
+#define PFSS_DATA_NOTS 0x0080 /* no timestamp on data packets */
+ u_int8_t pfss_ttl; /* stashed TTL */
+ u_int8_t pad;
+ u_int32_t pfss_ts_mod; /* timestamp modulation */
+};
+
+struct pf_state_host {
+ struct pf_addr addr;
+ u_int16_t port;
+ u_int16_t pad;
+};
+
+struct pf_state_peer {
+ u_int32_t seqlo; /* Max sequence number sent */
+ u_int32_t seqhi; /* Max the other end ACKd + win */
+ u_int32_t seqdiff; /* Sequence number modulator */
+ u_int16_t max_win; /* largest window (pre scaling) */
+ u_int8_t state; /* active state level */
+ u_int8_t wscale; /* window scaling factor */
+ u_int16_t mss; /* Maximum segment size option */
+ u_int8_t tcp_est; /* Did we reach TCPS_ESTABLISHED */
+ struct pf_state_scrub *scrub; /* state is scrubbed */
+ u_int8_t pad[3];
+};
+
+TAILQ_HEAD(pf_state_queue, pf_state);
+
+/* keep synced with struct pf_state, used in RB_FIND */
+struct pf_state_cmp {
+ u_int64_t id;
+ u_int32_t creatorid;
+ struct pf_state_host lan;
+ struct pf_state_host gwy;
+ struct pf_state_host ext;
+ sa_family_t af;
+ u_int8_t proto;
+ u_int8_t direction;
+ u_int8_t pad;
+};
+
+struct pf_state {
+ u_int64_t id;
+ u_int32_t creatorid;
+ struct pf_state_host lan;
+ struct pf_state_host gwy;
+ struct pf_state_host ext;
+ sa_family_t af;
+ u_int8_t proto;
+ u_int8_t direction;
+#ifdef __FreeBSD__
+ u_int8_t local_flags;
+#define PFSTATE_EXPIRING 0x01
+#else
+ u_int8_t pad;
+#endif
+ u_int8_t log;
+ u_int8_t state_flags;
+#define PFSTATE_ALLOWOPTS 0x01
+#define PFSTATE_SLOPPY 0x02
+ u_int8_t timeout;
+ u_int8_t sync_flags;
+#define PFSTATE_NOSYNC 0x01
+#define PFSTATE_FROMSYNC 0x02
+#define PFSTATE_STALE 0x04
+ union {
+ struct {
+ RB_ENTRY(pf_state) entry_lan_ext;
+ RB_ENTRY(pf_state) entry_ext_gwy;
+ RB_ENTRY(pf_state) entry_id;
+ TAILQ_ENTRY(pf_state) entry_list;
+ struct pfi_kif *kif;
+ } s;
+ char ifname[IFNAMSIZ];
+ } u;
+ struct pf_state_peer src;
+ struct pf_state_peer dst;
+ union pf_rule_ptr rule;
+ union pf_rule_ptr anchor;
+ union pf_rule_ptr nat_rule;
+ struct pf_addr rt_addr;
+ struct pfi_kif *rt_kif;
+ struct pf_src_node *src_node;
+ struct pf_src_node *nat_src_node;
+ u_int64_t packets[2];
+ u_int64_t bytes[2];
+ u_int32_t creation;
+ u_int32_t expire;
+ u_int32_t pfsync_time;
+ u_int16_t tag;
+};
+
+TAILQ_HEAD(pf_rulequeue, pf_rule);
+
+struct pf_anchor;
+
+struct pf_ruleset {
+ struct {
+ struct pf_rulequeue queues[2];
+ struct {
+ struct pf_rulequeue *ptr;
+ struct pf_rule **ptr_array;
+ u_int32_t rcount;
+ u_int32_t ticket;
+ int open;
+ } active, inactive;
+ } rules[PF_RULESET_MAX];
+ struct pf_anchor *anchor;
+ u_int32_t tticket;
+ int tables;
+ int topen;
+};
+
+RB_HEAD(pf_anchor_global, pf_anchor);
+RB_HEAD(pf_anchor_node, pf_anchor);
+struct pf_anchor {
+ RB_ENTRY(pf_anchor) entry_global;
+ RB_ENTRY(pf_anchor) entry_node;
+ struct pf_anchor *parent;
+ struct pf_anchor_node children;
+ char name[PF_ANCHOR_NAME_SIZE];
+ char path[MAXPATHLEN];
+ struct pf_ruleset ruleset;
+ int refcnt; /* anchor rules */
+ int match;
+};
+RB_PROTOTYPE(pf_anchor_global, pf_anchor, entry_global, pf_anchor_compare);
+RB_PROTOTYPE(pf_anchor_node, pf_anchor, entry_node, pf_anchor_compare);
+
+#define PF_RESERVED_ANCHOR "_pf"
+
+#define PFR_TFLAG_PERSIST 0x00000001
+#define PFR_TFLAG_CONST 0x00000002
+#define PFR_TFLAG_ACTIVE 0x00000004
+#define PFR_TFLAG_INACTIVE 0x00000008
+#define PFR_TFLAG_REFERENCED 0x00000010
+#define PFR_TFLAG_REFDANCHOR 0x00000020
+#define PFR_TFLAG_USRMASK 0x00000003
+#define PFR_TFLAG_SETMASK 0x0000003C
+#define PFR_TFLAG_ALLMASK 0x0000003F
+
+struct pfr_table {
+ char pfrt_anchor[MAXPATHLEN];
+ char pfrt_name[PF_TABLE_NAME_SIZE];
+ u_int32_t pfrt_flags;
+ u_int8_t pfrt_fback;
+};
+
+enum { PFR_FB_NONE, PFR_FB_MATCH, PFR_FB_ADDED, PFR_FB_DELETED,
+ PFR_FB_CHANGED, PFR_FB_CLEARED, PFR_FB_DUPLICATE,
+ PFR_FB_NOTMATCH, PFR_FB_CONFLICT, PFR_FB_MAX };
+
+struct pfr_addr {
+ union {
+ struct in_addr _pfra_ip4addr;
+ struct in6_addr _pfra_ip6addr;
+ } pfra_u;
+ u_int8_t pfra_af;
+ u_int8_t pfra_net;
+ u_int8_t pfra_not;
+ u_int8_t pfra_fback;
+};
+#define pfra_ip4addr pfra_u._pfra_ip4addr
+#define pfra_ip6addr pfra_u._pfra_ip6addr
+
+enum { PFR_DIR_IN, PFR_DIR_OUT, PFR_DIR_MAX };
+enum { PFR_OP_BLOCK, PFR_OP_PASS, PFR_OP_ADDR_MAX, PFR_OP_TABLE_MAX };
+#define PFR_OP_XPASS PFR_OP_ADDR_MAX
+
+struct pfr_astats {
+ struct pfr_addr pfras_a;
+ u_int64_t pfras_packets[PFR_DIR_MAX][PFR_OP_ADDR_MAX];
+ u_int64_t pfras_bytes[PFR_DIR_MAX][PFR_OP_ADDR_MAX];
+ long pfras_tzero;
+};
+
+enum { PFR_REFCNT_RULE, PFR_REFCNT_ANCHOR, PFR_REFCNT_MAX };
+
+struct pfr_tstats {
+ struct pfr_table pfrts_t;
+ u_int64_t pfrts_packets[PFR_DIR_MAX][PFR_OP_TABLE_MAX];
+ u_int64_t pfrts_bytes[PFR_DIR_MAX][PFR_OP_TABLE_MAX];
+ u_int64_t pfrts_match;
+ u_int64_t pfrts_nomatch;
+ long pfrts_tzero;
+ int pfrts_cnt;
+ int pfrts_refcnt[PFR_REFCNT_MAX];
+};
+#define pfrts_name pfrts_t.pfrt_name
+#define pfrts_flags pfrts_t.pfrt_flags
+
+#ifndef _SOCKADDR_UNION_DEFINED
+#define _SOCKADDR_UNION_DEFINED
+union sockaddr_union {
+ struct sockaddr sa;
+ struct sockaddr_in sin;
+ struct sockaddr_in6 sin6;
+};
+#endif /* _SOCKADDR_UNION_DEFINED */
+
+SLIST_HEAD(pfr_kentryworkq, pfr_kentry);
+struct pfr_kentry {
+ struct radix_node pfrke_node[2];
+ union sockaddr_union pfrke_sa;
+ u_int64_t pfrke_packets[PFR_DIR_MAX][PFR_OP_ADDR_MAX];
+ u_int64_t pfrke_bytes[PFR_DIR_MAX][PFR_OP_ADDR_MAX];
+ SLIST_ENTRY(pfr_kentry) pfrke_workq;
+ long pfrke_tzero;
+ u_int8_t pfrke_af;
+ u_int8_t pfrke_net;
+ u_int8_t pfrke_not;
+ u_int8_t pfrke_mark;
+ u_int8_t pfrke_intrpool;
+};
+
+SLIST_HEAD(pfr_ktableworkq, pfr_ktable);
+RB_HEAD(pfr_ktablehead, pfr_ktable);
+struct pfr_ktable {
+ struct pfr_tstats pfrkt_ts;
+ RB_ENTRY(pfr_ktable) pfrkt_tree;
+ SLIST_ENTRY(pfr_ktable) pfrkt_workq;
+ struct radix_node_head *pfrkt_ip4;
+ struct radix_node_head *pfrkt_ip6;
+ struct pfr_ktable *pfrkt_shadow;
+ struct pfr_ktable *pfrkt_root;
+ struct pf_ruleset *pfrkt_rs;
+ long pfrkt_larg;
+ int pfrkt_nflags;
+};
+#define pfrkt_t pfrkt_ts.pfrts_t
+#define pfrkt_name pfrkt_t.pfrt_name
+#define pfrkt_anchor pfrkt_t.pfrt_anchor
+#define pfrkt_ruleset pfrkt_t.pfrt_ruleset
+#define pfrkt_flags pfrkt_t.pfrt_flags
+#define pfrkt_cnt pfrkt_ts.pfrts_cnt
+#define pfrkt_refcnt pfrkt_ts.pfrts_refcnt
+#define pfrkt_packets pfrkt_ts.pfrts_packets
+#define pfrkt_bytes pfrkt_ts.pfrts_bytes
+#define pfrkt_match pfrkt_ts.pfrts_match
+#define pfrkt_nomatch pfrkt_ts.pfrts_nomatch
+#define pfrkt_tzero pfrkt_ts.pfrts_tzero
+
+RB_HEAD(pf_state_tree_lan_ext, pf_state);
+RB_PROTOTYPE(pf_state_tree_lan_ext, pf_state,
+ u.s.entry_lan_ext, pf_state_compare_lan_ext);
+
+RB_HEAD(pf_state_tree_ext_gwy, pf_state);
+RB_PROTOTYPE(pf_state_tree_ext_gwy, pf_state,
+ u.s.entry_ext_gwy, pf_state_compare_ext_gwy);
+
+TAILQ_HEAD(pfi_statehead, pfi_kif);
+RB_HEAD(pfi_ifhead, pfi_kif);
+
+/* keep synced with pfi_kif, used in RB_FIND */
+struct pfi_kif_cmp {
+ char pfik_name[IFNAMSIZ];
+};
+
+struct pfi_kif {
+ char pfik_name[IFNAMSIZ];
+ RB_ENTRY(pfi_kif) pfik_tree;
+ u_int64_t pfik_packets[2][2][2];
+ u_int64_t pfik_bytes[2][2][2];
+ u_int32_t pfik_tzero;
+ int pfik_flags;
+ struct pf_state_tree_lan_ext pfik_lan_ext;
+ struct pf_state_tree_ext_gwy pfik_ext_gwy;
+ TAILQ_ENTRY(pfi_kif) pfik_w_states;
+#ifndef __FreeBSD__
+ void *pfik_ah_cookie;
+#endif
+ struct ifnet *pfik_ifp;
+ struct ifg_group *pfik_group;
+ int pfik_states;
+ int pfik_rules;
+ TAILQ_HEAD(, pfi_dynaddr) pfik_dynaddrs;
+};
+
+enum pfi_kif_refs {
+ PFI_KIF_REF_NONE,
+ PFI_KIF_REF_STATE,
+ PFI_KIF_REF_RULE
+};
+
+#define PFI_IFLAG_SKIP 0x0100 /* skip filtering on interface */
+/* XXX: revisist */
+#define PFI_IFLAG_SETABLE_MASK 0x0100 /* setable via DIOC{SET,CLR}IFFLAG */
+#define PFI_IFLAG_PLACEHOLDER 0x8000 /* placeholder group/interface */
+
+struct pf_pdesc {
+ struct {
+ int done;
+ uid_t uid;
+ gid_t gid;
+ pid_t pid;
+ } lookup;
+ u_int64_t tot_len; /* Make Mickey money */
+ union {
+ struct tcphdr *tcp;
+ struct udphdr *udp;
+ struct icmp *icmp;
+#ifdef INET6
+ struct icmp6_hdr *icmp6;
+#endif /* INET6 */
+ void *any;
+ } hdr;
+ struct pf_addr baddr; /* address before translation */
+ struct pf_addr naddr; /* address after translation */
+ struct pf_rule *nat_rule; /* nat/rdr rule applied to packet */
+ struct pf_addr *src;
+ struct pf_addr *dst;
+ struct ether_header
+ *eh;
+ struct pf_mtag *pf_mtag;
+ u_int16_t *ip_sum;
+ u_int32_t p_len; /* total length of payload */
+ u_int16_t flags; /* Let SCRUB trigger behavior in
+ * state code. Easier than tags */
+#define PFDESC_TCP_NORM 0x0001 /* TCP shall be statefully scrubbed */
+#define PFDESC_IP_REAS 0x0002 /* IP frags would've been reassembled */
+ sa_family_t af;
+ u_int8_t proto;
+ u_int8_t tos;
+};
+
+/* flags for RDR options */
+#define PF_DPORT_RANGE 0x01 /* Dest port uses range */
+#define PF_RPORT_RANGE 0x02 /* RDR'ed port uses range */
+
+/* Reasons code for passing/dropping a packet */
+#define PFRES_MATCH 0 /* Explicit match of a rule */
+#define PFRES_BADOFF 1 /* Bad offset for pull_hdr */
+#define PFRES_FRAG 2 /* Dropping following fragment */
+#define PFRES_SHORT 3 /* Dropping short packet */
+#define PFRES_NORM 4 /* Dropping by normalizer */
+#define PFRES_MEMORY 5 /* Dropped due to lacking mem */
+#define PFRES_TS 6 /* Bad TCP Timestamp (RFC1323) */
+#define PFRES_CONGEST 7 /* Congestion (of ipintrq) */
+#define PFRES_IPOPTIONS 8 /* IP option */
+#define PFRES_PROTCKSUM 9 /* Protocol checksum invalid */
+#define PFRES_BADSTATE 10 /* State mismatch */
+#define PFRES_STATEINS 11 /* State insertion failure */
+#define PFRES_MAXSTATES 12 /* State limit */
+#define PFRES_SRCLIMIT 13 /* Source node/conn limit */
+#define PFRES_SYNPROXY 14 /* SYN proxy */
+#define PFRES_MAX 15 /* total+1 */
+
+#define PFRES_NAMES { \
+ "match", \
+ "bad-offset", \
+ "fragment", \
+ "short", \
+ "normalize", \
+ "memory", \
+ "bad-timestamp", \
+ "congestion", \
+ "ip-option", \
+ "proto-cksum", \
+ "state-mismatch", \
+ "state-insert", \
+ "state-limit", \
+ "src-limit", \
+ "synproxy", \
+ NULL \
+}
+
+/* Counters for other things we want to keep track of */
+#define LCNT_STATES 0 /* states */
+#define LCNT_SRCSTATES 1 /* max-src-states */
+#define LCNT_SRCNODES 2 /* max-src-nodes */
+#define LCNT_SRCCONN 3 /* max-src-conn */
+#define LCNT_SRCCONNRATE 4 /* max-src-conn-rate */
+#define LCNT_OVERLOAD_TABLE 5 /* entry added to overload table */
+#define LCNT_OVERLOAD_FLUSH 6 /* state entries flushed */
+#define LCNT_MAX 7 /* total+1 */
+
+#define LCNT_NAMES { \
+ "max states per rule", \
+ "max-src-states", \
+ "max-src-nodes", \
+ "max-src-conn", \
+ "max-src-conn-rate", \
+ "overload table insertion", \
+ "overload flush states", \
+ NULL \
+}
+
+/* UDP state enumeration */
+#define PFUDPS_NO_TRAFFIC 0
+#define PFUDPS_SINGLE 1
+#define PFUDPS_MULTIPLE 2
+
+#define PFUDPS_NSTATES 3 /* number of state levels */
+
+#define PFUDPS_NAMES { \
+ "NO_TRAFFIC", \
+ "SINGLE", \
+ "MULTIPLE", \
+ NULL \
+}
+
+/* Other protocol state enumeration */
+#define PFOTHERS_NO_TRAFFIC 0
+#define PFOTHERS_SINGLE 1
+#define PFOTHERS_MULTIPLE 2
+
+#define PFOTHERS_NSTATES 3 /* number of state levels */
+
+#define PFOTHERS_NAMES { \
+ "NO_TRAFFIC", \
+ "SINGLE", \
+ "MULTIPLE", \
+ NULL \
+}
+
+#define FCNT_STATE_SEARCH 0
+#define FCNT_STATE_INSERT 1
+#define FCNT_STATE_REMOVALS 2
+#define FCNT_MAX 3
+
+#define SCNT_SRC_NODE_SEARCH 0
+#define SCNT_SRC_NODE_INSERT 1
+#define SCNT_SRC_NODE_REMOVALS 2
+#define SCNT_MAX 3
+
+#define ACTION_SET(a, x) \
+ do { \
+ if ((a) != NULL) \
+ *(a) = (x); \
+ } while (0)
+
+#define REASON_SET(a, x) \
+ do { \
+ if ((a) != NULL) \
+ *(a) = (x); \
+ if (x < PFRES_MAX) \
+ pf_status.counters[x]++; \
+ } while (0)
+
+struct pf_status {
+ u_int64_t counters[PFRES_MAX];
+ u_int64_t lcounters[LCNT_MAX]; /* limit counters */
+ u_int64_t fcounters[FCNT_MAX];
+ u_int64_t scounters[SCNT_MAX];
+ u_int64_t pcounters[2][2][3];
+ u_int64_t bcounters[2][2];
+ u_int64_t stateid;
+ u_int32_t running;
+ u_int32_t states;
+ u_int32_t src_nodes;
+ u_int32_t since;
+ u_int32_t debug;
+ u_int32_t hostid;
+ char ifname[IFNAMSIZ];
+ u_int8_t pf_chksum[PF_MD5_DIGEST_LENGTH];
+};
+
+struct cbq_opts {
+ u_int minburst;
+ u_int maxburst;
+ u_int pktsize;
+ u_int maxpktsize;
+ u_int ns_per_byte;
+ u_int maxidle;
+ int minidle;
+ u_int offtime;
+ int flags;
+};
+
+struct priq_opts {
+ int flags;
+};
+
+struct hfsc_opts {
+ /* real-time service curve */
+ u_int rtsc_m1; /* slope of the 1st segment in bps */
+ u_int rtsc_d; /* the x-projection of m1 in msec */
+ u_int rtsc_m2; /* slope of the 2nd segment in bps */
+ /* link-sharing service curve */
+ u_int lssc_m1;
+ u_int lssc_d;
+ u_int lssc_m2;
+ /* upper-limit service curve */
+ u_int ulsc_m1;
+ u_int ulsc_d;
+ u_int ulsc_m2;
+ int flags;
+};
+
+struct pf_altq {
+ char ifname[IFNAMSIZ];
+
+ void *altq_disc; /* discipline-specific state */
+ TAILQ_ENTRY(pf_altq) entries;
+
+ /* scheduler spec */
+ u_int8_t scheduler; /* scheduler type */
+ u_int16_t tbrsize; /* tokenbucket regulator size */
+ u_int32_t ifbandwidth; /* interface bandwidth */
+
+ /* queue spec */
+ char qname[PF_QNAME_SIZE]; /* queue name */
+ char parent[PF_QNAME_SIZE]; /* parent name */
+ u_int32_t parent_qid; /* parent queue id */
+ u_int32_t bandwidth; /* queue bandwidth */
+ u_int8_t priority; /* priority */
+#ifdef __FreeBSD__
+ u_int8_t local_flags; /* dynamic interface */
+#define PFALTQ_FLAG_IF_REMOVED 0x01
+#endif
+ u_int16_t qlimit; /* queue size limit */
+ u_int16_t flags; /* misc flags */
+ union {
+ struct cbq_opts cbq_opts;
+ struct priq_opts priq_opts;
+ struct hfsc_opts hfsc_opts;
+ } pq_u;
+
+ u_int32_t qid; /* return value */
+};
+
+#ifndef __FreeBSD__
+
+#define PF_TAG_GENERATED 0x01
+#define PF_TAG_FRAGCACHE 0x02
+#define PF_TAG_TRANSLATE_LOCALHOST 0x04
+
+struct pf_mtag {
+ void *hdr; /* saved hdr pos in mbuf, for ECN */
+ u_int rtableid; /* alternate routing table id */
+ u_int32_t qid; /* queue id */
+ u_int16_t tag; /* tag id */
+ u_int8_t flags;
+ u_int8_t routed;
+ sa_family_t af; /* for ECN */
+};
+#endif
+
+struct pf_tag {
+ u_int16_t tag; /* tag id */
+};
+
+struct pf_tagname {
+ TAILQ_ENTRY(pf_tagname) entries;
+ char name[PF_TAG_NAME_SIZE];
+ u_int16_t tag;
+ int ref;
+};
+
+#define PFFRAG_FRENT_HIWAT 5000 /* Number of fragment entries */
+#define PFFRAG_FRAG_HIWAT 1000 /* Number of fragmented packets */
+#define PFFRAG_FRCENT_HIWAT 50000 /* Number of fragment cache entries */
+#define PFFRAG_FRCACHE_HIWAT 10000 /* Number of fragment descriptors */
+
+#define PFR_KTABLE_HIWAT 1000 /* Number of tables */
+#define PFR_KENTRY_HIWAT 200000 /* Number of table entries */
+#define PFR_KENTRY_HIWAT_SMALL 100000 /* Number of table entries (tiny hosts) */
+
+/*
+ * ioctl parameter structures
+ */
+
+struct pfioc_pooladdr {
+ u_int32_t action;
+ u_int32_t ticket;
+ u_int32_t nr;
+ u_int32_t r_num;
+ u_int8_t r_action;
+ u_int8_t r_last;
+ u_int8_t af;
+ char anchor[MAXPATHLEN];
+ struct pf_pooladdr addr;
+};
+
+struct pfioc_rule {
+ u_int32_t action;
+ u_int32_t ticket;
+ u_int32_t pool_ticket;
+ u_int32_t nr;
+ char anchor[MAXPATHLEN];
+ char anchor_call[MAXPATHLEN];
+ struct pf_rule rule;
+};
+
+struct pfioc_natlook {
+ struct pf_addr saddr;
+ struct pf_addr daddr;
+ struct pf_addr rsaddr;
+ struct pf_addr rdaddr;
+ u_int16_t sport;
+ u_int16_t dport;
+ u_int16_t rsport;
+ u_int16_t rdport;
+ sa_family_t af;
+ u_int8_t proto;
+ u_int8_t direction;
+};
+
+struct pfioc_state {
+ u_int32_t nr;
+ struct pf_state state;
+};
+
+struct pfioc_src_node_kill {
+ /* XXX returns the number of src nodes killed in psnk_af */
+ sa_family_t psnk_af;
+ struct pf_rule_addr psnk_src;
+ struct pf_rule_addr psnk_dst;
+};
+
+struct pfioc_state_kill {
+ /* XXX returns the number of states killed in psk_af */
+ sa_family_t psk_af;
+ int psk_proto;
+ struct pf_rule_addr psk_src;
+ struct pf_rule_addr psk_dst;
+ char psk_ifname[IFNAMSIZ];
+};
+
+struct pfioc_states {
+ int ps_len;
+ union {
+ caddr_t psu_buf;
+ struct pf_state *psu_states;
+ } ps_u;
+#define ps_buf ps_u.psu_buf
+#define ps_states ps_u.psu_states
+};
+
+struct pfioc_src_nodes {
+ int psn_len;
+ union {
+ caddr_t psu_buf;
+ struct pf_src_node *psu_src_nodes;
+ } psn_u;
+#define psn_buf psn_u.psu_buf
+#define psn_src_nodes psn_u.psu_src_nodes
+};
+
+struct pfioc_if {
+ char ifname[IFNAMSIZ];
+};
+
+struct pfioc_tm {
+ int timeout;
+ int seconds;
+};
+
+struct pfioc_limit {
+ int index;
+ unsigned limit;
+};
+
+struct pfioc_altq {
+ u_int32_t action;
+ u_int32_t ticket;
+ u_int32_t nr;
+ struct pf_altq altq;
+};
+
+struct pfioc_qstats {
+ u_int32_t ticket;
+ u_int32_t nr;
+ void *buf;
+ int nbytes;
+ u_int8_t scheduler;
+};
+
+struct pfioc_ruleset {
+ u_int32_t nr;
+ char path[MAXPATHLEN];
+ char name[PF_ANCHOR_NAME_SIZE];
+};
+
+#define PF_RULESET_ALTQ (PF_RULESET_MAX)
+#define PF_RULESET_TABLE (PF_RULESET_MAX+1)
+struct pfioc_trans {
+ int size; /* number of elements */
+ int esize; /* size of each element in bytes */
+ struct pfioc_trans_e {
+ int rs_num;
+ char anchor[MAXPATHLEN];
+ u_int32_t ticket;
+ } *array;
+};
+
+#define PFR_FLAG_ATOMIC 0x00000001
+#define PFR_FLAG_DUMMY 0x00000002
+#define PFR_FLAG_FEEDBACK 0x00000004
+#define PFR_FLAG_CLSTATS 0x00000008
+#define PFR_FLAG_ADDRSTOO 0x00000010
+#define PFR_FLAG_REPLACE 0x00000020
+#define PFR_FLAG_ALLRSETS 0x00000040
+#define PFR_FLAG_ALLMASK 0x0000007F
+#ifdef _KERNEL
+#define PFR_FLAG_USERIOCTL 0x10000000
+#endif
+
+struct pfioc_table {
+ struct pfr_table pfrio_table;
+ void *pfrio_buffer;
+ int pfrio_esize;
+ int pfrio_size;
+ int pfrio_size2;
+ int pfrio_nadd;
+ int pfrio_ndel;
+ int pfrio_nchange;
+ int pfrio_flags;
+ u_int32_t pfrio_ticket;
+};
+#define pfrio_exists pfrio_nadd
+#define pfrio_nzero pfrio_nadd
+#define pfrio_nmatch pfrio_nadd
+#define pfrio_naddr pfrio_size2
+#define pfrio_setflag pfrio_size2
+#define pfrio_clrflag pfrio_nadd
+
+struct pfioc_iface {
+ char pfiio_name[IFNAMSIZ];
+ void *pfiio_buffer;
+ int pfiio_esize;
+ int pfiio_size;
+ int pfiio_nzero;
+ int pfiio_flags;
+};
+
+
+/*
+ * ioctl operations
+ */
+
+#define DIOCSTART _IO ('D', 1)
+#define DIOCSTOP _IO ('D', 2)
+#define DIOCADDRULE _IOWR('D', 4, struct pfioc_rule)
+#define DIOCGETRULES _IOWR('D', 6, struct pfioc_rule)
+#define DIOCGETRULE _IOWR('D', 7, struct pfioc_rule)
+/* XXX cut 8 - 17 */
+#define DIOCCLRSTATES _IOWR('D', 18, struct pfioc_state_kill)
+#define DIOCGETSTATE _IOWR('D', 19, struct pfioc_state)
+#define DIOCSETSTATUSIF _IOWR('D', 20, struct pfioc_if)
+#define DIOCGETSTATUS _IOWR('D', 21, struct pf_status)
+#define DIOCCLRSTATUS _IO ('D', 22)
+#define DIOCNATLOOK _IOWR('D', 23, struct pfioc_natlook)
+#define DIOCSETDEBUG _IOWR('D', 24, u_int32_t)
+#define DIOCGETSTATES _IOWR('D', 25, struct pfioc_states)
+#define DIOCCHANGERULE _IOWR('D', 26, struct pfioc_rule)
+/* XXX cut 26 - 28 */
+#define DIOCSETTIMEOUT _IOWR('D', 29, struct pfioc_tm)
+#define DIOCGETTIMEOUT _IOWR('D', 30, struct pfioc_tm)
+#define DIOCADDSTATE _IOWR('D', 37, struct pfioc_state)
+#define DIOCCLRRULECTRS _IO ('D', 38)
+#define DIOCGETLIMIT _IOWR('D', 39, struct pfioc_limit)
+#define DIOCSETLIMIT _IOWR('D', 40, struct pfioc_limit)
+#define DIOCKILLSTATES _IOWR('D', 41, struct pfioc_state_kill)
+#define DIOCSTARTALTQ _IO ('D', 42)
+#define DIOCSTOPALTQ _IO ('D', 43)
+#define DIOCADDALTQ _IOWR('D', 45, struct pfioc_altq)
+#define DIOCGETALTQS _IOWR('D', 47, struct pfioc_altq)
+#define DIOCGETALTQ _IOWR('D', 48, struct pfioc_altq)
+#define DIOCCHANGEALTQ _IOWR('D', 49, struct pfioc_altq)
+#define DIOCGETQSTATS _IOWR('D', 50, struct pfioc_qstats)
+#define DIOCBEGINADDRS _IOWR('D', 51, struct pfioc_pooladdr)
+#define DIOCADDADDR _IOWR('D', 52, struct pfioc_pooladdr)
+#define DIOCGETADDRS _IOWR('D', 53, struct pfioc_pooladdr)
+#define DIOCGETADDR _IOWR('D', 54, struct pfioc_pooladdr)
+#define DIOCCHANGEADDR _IOWR('D', 55, struct pfioc_pooladdr)
+/* XXX cut 55 - 57 */
+#define DIOCGETRULESETS _IOWR('D', 58, struct pfioc_ruleset)
+#define DIOCGETRULESET _IOWR('D', 59, struct pfioc_ruleset)
+#define DIOCRCLRTABLES _IOWR('D', 60, struct pfioc_table)
+#define DIOCRADDTABLES _IOWR('D', 61, struct pfioc_table)
+#define DIOCRDELTABLES _IOWR('D', 62, struct pfioc_table)
+#define DIOCRGETTABLES _IOWR('D', 63, struct pfioc_table)
+#define DIOCRGETTSTATS _IOWR('D', 64, struct pfioc_table)
+#define DIOCRCLRTSTATS _IOWR('D', 65, struct pfioc_table)
+#define DIOCRCLRADDRS _IOWR('D', 66, struct pfioc_table)
+#define DIOCRADDADDRS _IOWR('D', 67, struct pfioc_table)
+#define DIOCRDELADDRS _IOWR('D', 68, struct pfioc_table)
+#define DIOCRSETADDRS _IOWR('D', 69, struct pfioc_table)
+#define DIOCRGETADDRS _IOWR('D', 70, struct pfioc_table)
+#define DIOCRGETASTATS _IOWR('D', 71, struct pfioc_table)
+#define DIOCRCLRASTATS _IOWR('D', 72, struct pfioc_table)
+#define DIOCRTSTADDRS _IOWR('D', 73, struct pfioc_table)
+#define DIOCRSETTFLAGS _IOWR('D', 74, struct pfioc_table)
+#define DIOCRINADEFINE _IOWR('D', 77, struct pfioc_table)
+#define DIOCOSFPFLUSH _IO('D', 78)
+#define DIOCOSFPADD _IOWR('D', 79, struct pf_osfp_ioctl)
+#define DIOCOSFPGET _IOWR('D', 80, struct pf_osfp_ioctl)
+#define DIOCXBEGIN _IOWR('D', 81, struct pfioc_trans)
+#define DIOCXCOMMIT _IOWR('D', 82, struct pfioc_trans)
+#define DIOCXROLLBACK _IOWR('D', 83, struct pfioc_trans)
+#define DIOCGETSRCNODES _IOWR('D', 84, struct pfioc_src_nodes)
+#define DIOCCLRSRCNODES _IO('D', 85)
+#define DIOCSETHOSTID _IOWR('D', 86, u_int32_t)
+#define DIOCIGETIFACES _IOWR('D', 87, struct pfioc_iface)
+#define DIOCSETIFFLAG _IOWR('D', 89, struct pfioc_iface)
+#define DIOCCLRIFFLAG _IOWR('D', 90, struct pfioc_iface)
+#define DIOCKILLSRCNODES _IOWR('D', 91, struct pfioc_src_node_kill)
+#ifdef __FreeBSD__
+struct pf_ifspeed {
+ char ifname[IFNAMSIZ];
+ u_int32_t baudrate;
+};
+#define DIOCGIFSPEED _IOWR('D', 92, struct pf_ifspeed)
+#endif
+
+#ifdef _KERNEL
+RB_HEAD(pf_src_tree, pf_src_node);
+RB_PROTOTYPE(pf_src_tree, pf_src_node, entry, pf_src_compare);
+extern struct pf_src_tree tree_src_tracking;
+
+RB_HEAD(pf_state_tree_id, pf_state);
+RB_PROTOTYPE(pf_state_tree_id, pf_state,
+ entry_id, pf_state_compare_id);
+extern struct pf_state_tree_id tree_id;
+extern struct pf_state_queue state_list;
+
+TAILQ_HEAD(pf_poolqueue, pf_pool);
+extern struct pf_poolqueue pf_pools[2];
+TAILQ_HEAD(pf_altqqueue, pf_altq);
+extern struct pf_altqqueue pf_altqs[2];
+extern struct pf_palist pf_pabuf;
+
+extern u_int32_t ticket_altqs_active;
+extern u_int32_t ticket_altqs_inactive;
+extern int altqs_inactive_open;
+extern u_int32_t ticket_pabuf;
+extern struct pf_altqqueue *pf_altqs_active;
+extern struct pf_altqqueue *pf_altqs_inactive;
+extern struct pf_poolqueue *pf_pools_active;
+extern struct pf_poolqueue *pf_pools_inactive;
+extern int pf_tbladdr_setup(struct pf_ruleset *,
+ struct pf_addr_wrap *);
+extern void pf_tbladdr_remove(struct pf_addr_wrap *);
+extern void pf_tbladdr_copyout(struct pf_addr_wrap *);
+extern void pf_calc_skip_steps(struct pf_rulequeue *);
+#ifdef __FreeBSD__
+#ifdef ALTQ
+extern void pf_altq_ifnet_event(struct ifnet *, int);
+#endif
+extern uma_zone_t pf_src_tree_pl, pf_rule_pl;
+extern uma_zone_t pf_state_pl, pf_altq_pl, pf_pooladdr_pl;
+extern uma_zone_t pfr_ktable_pl, pfr_kentry_pl, pfr_kentry_pl2;
+extern uma_zone_t pf_cache_pl, pf_cent_pl;
+extern uma_zone_t pf_state_scrub_pl;
+extern uma_zone_t pfi_addr_pl;
+#else
+extern struct pool pf_src_tree_pl, pf_rule_pl;
+extern struct pool pf_state_pl, pf_altq_pl, pf_pooladdr_pl;
+extern struct pool pf_state_scrub_pl;
+#endif
+extern void pf_purge_thread(void *);
+#ifdef __FreeBSD__
+extern int pf_purge_expired_src_nodes(int);
+extern int pf_purge_expired_states(u_int32_t, int);
+#else
+extern void pf_purge_expired_src_nodes(int);
+extern void pf_purge_expired_states(u_int32_t);
+#endif
+extern void pf_unlink_state(struct pf_state *);
+extern void pf_free_state(struct pf_state *);
+extern int pf_insert_state(struct pfi_kif *,
+ struct pf_state *);
+extern int pf_insert_src_node(struct pf_src_node **,
+ struct pf_rule *, struct pf_addr *,
+ sa_family_t);
+void pf_src_tree_remove_state(struct pf_state *);
+extern struct pf_state *pf_find_state_byid(struct pf_state_cmp *);
+extern struct pf_state *pf_find_state_all(struct pf_state_cmp *key,
+ u_int8_t tree, int *more);
+extern void pf_print_state(struct pf_state *);
+extern void pf_print_flags(u_int8_t);
+extern u_int16_t pf_cksum_fixup(u_int16_t, u_int16_t, u_int16_t,
+ u_int8_t);
+
+extern struct ifnet *sync_ifp;
+extern struct pf_rule pf_default_rule;
+extern void pf_addrcpy(struct pf_addr *, struct pf_addr *,
+ u_int8_t);
+void pf_rm_rule(struct pf_rulequeue *,
+ struct pf_rule *);
+
+#ifdef INET
+#ifdef __FreeBSD__
+int pf_test(int, struct ifnet *, struct mbuf **, struct ether_header *,
+ struct inpcb *);
+#else
+int pf_test(int, struct ifnet *, struct mbuf **, struct ether_header *);
+#endif
+#endif /* INET */
+
+#ifdef INET6
+#ifdef __FreeBSD__
+int pf_test6(int, struct ifnet *, struct mbuf **, struct ether_header *,
+ struct inpcb *);
+#else
+int pf_test6(int, struct ifnet *, struct mbuf **, struct ether_header *);
+#endif
+void pf_poolmask(struct pf_addr *, struct pf_addr*,
+ struct pf_addr *, struct pf_addr *, u_int8_t);
+void pf_addr_inc(struct pf_addr *, sa_family_t);
+#endif /* INET6 */
+
+#ifdef __FreeBSD__
+u_int32_t pf_new_isn(struct pf_state *);
+#endif
+void *pf_pull_hdr(struct mbuf *, int, void *, int, u_short *, u_short *,
+ sa_family_t);
+void pf_change_a(void *, u_int16_t *, u_int32_t, u_int8_t);
+int pflog_packet(struct pfi_kif *, struct mbuf *, sa_family_t, u_int8_t,
+ u_int8_t, struct pf_rule *, struct pf_rule *, struct pf_ruleset *,
+ struct pf_pdesc *);
+int pf_match_addr(u_int8_t, struct pf_addr *, struct pf_addr *,
+ struct pf_addr *, sa_family_t);
+int pf_match(u_int8_t, u_int32_t, u_int32_t, u_int32_t);
+int pf_match_port(u_int8_t, u_int16_t, u_int16_t, u_int16_t);
+int pf_match_uid(u_int8_t, uid_t, uid_t, uid_t);
+int pf_match_gid(u_int8_t, gid_t, gid_t, gid_t);
+
+void pf_normalize_init(void);
+int pf_normalize_ip(struct mbuf **, int, struct pfi_kif *, u_short *,
+ struct pf_pdesc *);
+int pf_normalize_ip6(struct mbuf **, int, struct pfi_kif *, u_short *,
+ struct pf_pdesc *);
+int pf_normalize_tcp(int, struct pfi_kif *, struct mbuf *, int, int, void *,
+ struct pf_pdesc *);
+void pf_normalize_tcp_cleanup(struct pf_state *);
+int pf_normalize_tcp_init(struct mbuf *, int, struct pf_pdesc *,
+ struct tcphdr *, struct pf_state_peer *, struct pf_state_peer *);
+int pf_normalize_tcp_stateful(struct mbuf *, int, struct pf_pdesc *,
+ u_short *, struct tcphdr *, struct pf_state *,
+ struct pf_state_peer *, struct pf_state_peer *, int *);
+u_int32_t
+ pf_state_expires(const struct pf_state *);
+void pf_purge_expired_fragments(void);
+int pf_routable(struct pf_addr *addr, sa_family_t af, struct pfi_kif *);
+int pf_rtlabel_match(struct pf_addr *, sa_family_t, struct pf_addr_wrap *);
+#ifdef __FreeBSD__
+int pf_socket_lookup(int, struct pf_pdesc *, struct inpcb *);
+#else
+int pf_socket_lookup(int, struct pf_pdesc *);
+#endif
+void pfr_initialize(void);
+int pfr_match_addr(struct pfr_ktable *, struct pf_addr *, sa_family_t);
+void pfr_update_stats(struct pfr_ktable *, struct pf_addr *, sa_family_t,
+ u_int64_t, int, int, int);
+int pfr_pool_get(struct pfr_ktable *, int *, struct pf_addr *,
+ struct pf_addr **, struct pf_addr **, sa_family_t);
+void pfr_dynaddr_update(struct pfr_ktable *, struct pfi_dynaddr *);
+struct pfr_ktable *
+ pfr_attach_table(struct pf_ruleset *, char *);
+void pfr_detach_table(struct pfr_ktable *);
+int pfr_clr_tables(struct pfr_table *, int *, int);
+int pfr_add_tables(struct pfr_table *, int, int *, int);
+int pfr_del_tables(struct pfr_table *, int, int *, int);
+int pfr_get_tables(struct pfr_table *, struct pfr_table *, int *, int);
+int pfr_get_tstats(struct pfr_table *, struct pfr_tstats *, int *, int);
+int pfr_clr_tstats(struct pfr_table *, int, int *, int);
+int pfr_set_tflags(struct pfr_table *, int, int, int, int *, int *, int);
+int pfr_clr_addrs(struct pfr_table *, int *, int);
+int pfr_insert_kentry(struct pfr_ktable *, struct pfr_addr *, long);
+int pfr_add_addrs(struct pfr_table *, struct pfr_addr *, int, int *,
+ int);
+int pfr_del_addrs(struct pfr_table *, struct pfr_addr *, int, int *,
+ int);
+int pfr_set_addrs(struct pfr_table *, struct pfr_addr *, int, int *,
+ int *, int *, int *, int, u_int32_t);
+int pfr_get_addrs(struct pfr_table *, struct pfr_addr *, int *, int);
+int pfr_get_astats(struct pfr_table *, struct pfr_astats *, int *, int);
+int pfr_clr_astats(struct pfr_table *, struct pfr_addr *, int, int *,
+ int);
+int pfr_tst_addrs(struct pfr_table *, struct pfr_addr *, int, int *,
+ int);
+int pfr_ina_begin(struct pfr_table *, u_int32_t *, int *, int);
+int pfr_ina_rollback(struct pfr_table *, u_int32_t, int *, int);
+int pfr_ina_commit(struct pfr_table *, u_int32_t, int *, int *, int);
+int pfr_ina_define(struct pfr_table *, struct pfr_addr *, int, int *,
+ int *, u_int32_t, int);
+
+extern struct pfi_statehead pfi_statehead;
+extern struct pfi_kif *pfi_all;
+
+void pfi_initialize(void);
+#ifdef __FreeBSD__
+void pfi_cleanup(void);
+#endif
+struct pfi_kif *pfi_kif_get(const char *);
+void pfi_kif_ref(struct pfi_kif *, enum pfi_kif_refs);
+void pfi_kif_unref(struct pfi_kif *, enum pfi_kif_refs);
+int pfi_kif_match(struct pfi_kif *, struct pfi_kif *);
+void pfi_attach_ifnet(struct ifnet *);
+void pfi_detach_ifnet(struct ifnet *);
+void pfi_attach_ifgroup(struct ifg_group *);
+void pfi_detach_ifgroup(struct ifg_group *);
+void pfi_group_change(const char *);
+int pfi_match_addr(struct pfi_dynaddr *, struct pf_addr *,
+ sa_family_t);
+int pfi_dynaddr_setup(struct pf_addr_wrap *, sa_family_t);
+void pfi_dynaddr_remove(struct pf_addr_wrap *);
+void pfi_dynaddr_copyout(struct pf_addr_wrap *);
+void pfi_fill_oldstatus(struct pf_status *);
+int pfi_clr_istats(const char *);
+int pfi_get_ifaces(const char *, struct pfi_kif *, int *);
+int pfi_set_flags(const char *, int);
+int pfi_clear_flags(const char *, int);
+
+u_int16_t pf_tagname2tag(char *);
+void pf_tag2tagname(u_int16_t, char *);
+void pf_tag_ref(u_int16_t);
+void pf_tag_unref(u_int16_t);
+int pf_tag_packet(struct mbuf *, struct pf_mtag *, int, int);
+u_int32_t pf_qname2qid(char *);
+void pf_qid2qname(u_int32_t, char *);
+void pf_qid_unref(u_int32_t);
+#ifndef __FreeBSD__
+struct pf_mtag *pf_find_mtag(struct mbuf *);
+struct pf_mtag *pf_get_mtag(struct mbuf *);
+#endif
+
+extern struct pf_status pf_status;
+
+#ifdef __FreeBSD__
+extern uma_zone_t pf_frent_pl, pf_frag_pl;
+extern struct sx pf_consistency_lock;
+#else
+extern struct pool pf_frent_pl, pf_frag_pl;
+extern struct rwlock pf_consistency_lock;
+#endif
+
+struct pf_pool_limit {
+ void *pp;
+ unsigned limit;
+};
+extern struct pf_pool_limit pf_pool_limits[PF_LIMIT_MAX];
+
+#ifdef __FreeBSD__
+struct pf_frent {
+ LIST_ENTRY(pf_frent) fr_next;
+ struct ip *fr_ip;
+ struct mbuf *fr_m;
+};
+
+struct pf_frcache {
+ LIST_ENTRY(pf_frcache) fr_next;
+ uint16_t fr_off;
+ uint16_t fr_end;
+};
+
+struct pf_fragment {
+ RB_ENTRY(pf_fragment) fr_entry;
+ TAILQ_ENTRY(pf_fragment) frag_next;
+ struct in_addr fr_src;
+ struct in_addr fr_dst;
+ u_int8_t fr_p; /* protocol of this fragment */
+ u_int8_t fr_flags; /* status flags */
+ u_int16_t fr_id; /* fragment id for reassemble */
+ u_int16_t fr_max; /* fragment data max */
+ u_int32_t fr_timeout;
+#define fr_queue fr_u.fru_queue
+#define fr_cache fr_u.fru_cache
+ union {
+ LIST_HEAD(pf_fragq, pf_frent) fru_queue; /* buffering */
+ LIST_HEAD(pf_cacheq, pf_frcache) fru_cache; /* non-buf */
+ } fr_u;
+};
+#endif /* (__FreeBSD__) */
+
+#endif /* _KERNEL */
+
+extern struct pf_anchor_global pf_anchors;
+extern struct pf_anchor pf_main_anchor;
+#define pf_main_ruleset pf_main_anchor.ruleset
+
+/* these ruleset functions can be linked into userland programs (pfctl) */
+int pf_get_ruleset_number(u_int8_t);
+void pf_init_ruleset(struct pf_ruleset *);
+int pf_anchor_setup(struct pf_rule *,
+ const struct pf_ruleset *, const char *);
+int pf_anchor_copyout(const struct pf_ruleset *,
+ const struct pf_rule *, struct pfioc_rule *);
+void pf_anchor_remove(struct pf_rule *);
+void pf_remove_if_empty_ruleset(struct pf_ruleset *);
+struct pf_anchor *pf_find_anchor(const char *);
+struct pf_ruleset *pf_find_ruleset(const char *);
+struct pf_ruleset *pf_find_or_create_ruleset(const char *);
+void pf_rs_initialize(void);
+
+#ifndef __FreeBSD__
+/* ?!? */
+#ifdef _KERNEL
+int pf_anchor_copyout(const struct pf_ruleset *,
+ const struct pf_rule *, struct pfioc_rule *);
+void pf_anchor_remove(struct pf_rule *);
+
+#endif /* _KERNEL */
+#endif
+
+/* The fingerprint functions can be linked into userland programs (tcpdump) */
+int pf_osfp_add(struct pf_osfp_ioctl *);
+#ifdef _KERNEL
+struct pf_osfp_enlist *
+ pf_osfp_fingerprint(struct pf_pdesc *, struct mbuf *, int,
+ const struct tcphdr *);
+#endif /* _KERNEL */
+struct pf_osfp_enlist *
+ pf_osfp_fingerprint_hdr(const struct ip *, const struct ip6_hdr *,
+ const struct tcphdr *);
+void pf_osfp_flush(void);
+int pf_osfp_get(struct pf_osfp_ioctl *);
+#ifdef __FreeBSD__
+int pf_osfp_initialize(void);
+void pf_osfp_cleanup(void);
+#else
+void pf_osfp_initialize(void);
+#endif
+int pf_osfp_match(struct pf_osfp_enlist *, pf_osfp_t);
+struct pf_os_fingerprint *
+ pf_osfp_validate(void);
+
+#endif /* _NET_PFVAR_HH_ */
diff --git a/contrib/pf/rtems/freebsd/netinet/in4_cksum.c b/contrib/pf/rtems/freebsd/netinet/in4_cksum.c
new file mode 100644
index 00000000..bc11aeb9
--- /dev/null
+++ b/contrib/pf/rtems/freebsd/netinet/in4_cksum.c
@@ -0,0 +1,122 @@
+#include <rtems/freebsd/machine/rtems-bsd-config.h>
+
+/* $FreeBSD$ */
+/* $OpenBSD: in4_cksum.c,v 1.7 2003/06/02 23:28:13 millert Exp $ */
+/* $KAME: in4_cksum.c,v 1.10 2001/11/30 10:06:15 itojun Exp $ */
+/* $NetBSD: in_cksum.c,v 1.13 1996/10/13 02:03:03 christos Exp $ */
+
+/*
+ * Copyright (C) 1999 WIDE Project.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the project nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+/*
+ * Copyright (c) 1988, 1992, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * @(#)in_cksum.c 8.1 (Berkeley) 6/10/93
+ */
+
+#include <rtems/freebsd/sys/param.h>
+#include <rtems/freebsd/sys/systm.h>
+#include <rtems/freebsd/sys/mbuf.h>
+
+#include <rtems/freebsd/netinet/in.h>
+#include <rtems/freebsd/netinet/in_systm.h>
+#include <rtems/freebsd/netinet/ip.h>
+#include <rtems/freebsd/netinet/ip_var.h>
+
+#include <rtems/freebsd/machine/in_cksum.h>
+
+#define ADDCARRY(x) (x > 65535 ? x -= 65535 : x)
+#define REDUCE {l_util.l = sum; sum = l_util.s[0] + l_util.s[1]; ADDCARRY(sum);}
+
+int in4_cksum(struct mbuf *, u_int8_t, int, int);
+
+int
+in4_cksum(struct mbuf *m, u_int8_t nxt, int off, int len)
+{
+ union {
+ struct ipovly ipov;
+ u_int16_t w[10];
+ } u;
+ union {
+ u_int16_t s[2];
+ u_int32_t l;
+ } l_util;
+
+ u_int16_t *w;
+ int psum;
+ int sum = 0;
+
+ if (nxt != 0) {
+ /* pseudo header */
+ if (off < sizeof(struct ipovly))
+ panic("in4_cksum: offset too short");
+ if (m->m_len < sizeof(struct ip))
+ panic("in4_cksum: bad mbuf chain");
+ bzero(&u.ipov, sizeof(u.ipov));
+ u.ipov.ih_len = htons(len);
+ u.ipov.ih_pr = nxt;
+ u.ipov.ih_src = mtod(m, struct ip *)->ip_src;
+ u.ipov.ih_dst = mtod(m, struct ip *)->ip_dst;
+ w = u.w;
+ /* assumes sizeof(ipov) == 20 */
+ sum += w[0]; sum += w[1]; sum += w[2]; sum += w[3]; sum += w[4];
+ sum += w[5]; sum += w[6]; sum += w[7]; sum += w[8]; sum += w[9];
+ }
+
+ psum = in_cksum_skip(m, len + off, off);
+ psum = ~psum & 0xffff;
+ sum += psum;
+ REDUCE;
+ return (~sum & 0xffff);
+}
diff --git a/rtems/freebsd/bsd.h b/rtems/freebsd/bsd.h
new file mode 100644
index 00000000..6c0d961b
--- /dev/null
+++ b/rtems/freebsd/bsd.h
@@ -0,0 +1,43 @@
+/**
+ * @file
+ *
+ * @ingroup rtems_bsd
+ *
+ * @brief TODO.
+ */
+
+/*
+ * Copyright (c) 2009, 2010 embedded brains GmbH. All rights reserved.
+ *
+ * embedded brains GmbH
+ * Obere Lagerstr. 30
+ * 82178 Puchheim
+ * Germany
+ * <rtems@embedded-brains.de>
+ *
+ * The license and distribution terms for this file may be
+ * found in the file LICENSE in this distribution or at
+ * http://www.rtems.com/license/LICENSE.
+ */
+
+#ifndef _RTEMS_BSD_BSD_HH_
+#define _RTEMS_BSD_BSD_HH_
+
+#ifdef __cplusplus
+extern "C" {
+#endif /* __cplusplus */
+
+#include <rtems/freebsd/machine/rtems-bsd-config.h>
+#include <rtems/freebsd/machine/rtems-bsd-select.h>
+
+rtems_status_code rtems_bsd_initialize(void);
+
+rtems_status_code rtems_bsd_initialize_with_interrupt_server(void);
+
+void rtems_bsd_shell_initialize(void);
+
+#ifdef __cplusplus
+}
+#endif /* __cplusplus */
+
+#endif /* _RTEMS_BSD_BSD_HH_ */
diff --git a/rtems/freebsd/bsm/audit.h b/rtems/freebsd/bsm/audit.h
new file mode 100644
index 00000000..b06bf9aa
--- /dev/null
+++ b/rtems/freebsd/bsm/audit.h
@@ -0,0 +1,328 @@
+/*-
+ * Copyright (c) 2005-2009 Apple Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of Apple Inc. ("Apple") nor the names of
+ * its contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * P4: //depot/projects/trustedbsd/openbsm/sys/bsm/audit.h#10
+ * $FreeBSD$
+ */
+
+#ifndef _BSM_AUDIT_H
+#define _BSM_AUDIT_H
+
+#include <rtems/freebsd/sys/param.h>
+#include <rtems/freebsd/sys/types.h>
+
+#define AUDIT_RECORD_MAGIC 0x828a0f1b
+#define MAX_AUDIT_RECORDS 20
+#define MAXAUDITDATA (0x8000 - 1)
+#define MAX_AUDIT_RECORD_SIZE MAXAUDITDATA
+#define MIN_AUDIT_FILE_SIZE (512 * 1024)
+
+/*
+ * Minimum noumber of free blocks on the filesystem containing the audit
+ * log necessary to avoid a hard log rotation. DO NOT SET THIS VALUE TO 0
+ * as the kernel does an unsigned compare, plus we want to leave a few blocks
+ * free so userspace can terminate the log, etc.
+ */
+#define AUDIT_HARD_LIMIT_FREE_BLOCKS 4
+
+/*
+ * Triggers for the audit daemon.
+ */
+#define AUDIT_TRIGGER_MIN 1
+#define AUDIT_TRIGGER_LOW_SPACE 1 /* Below low watermark. */
+#define AUDIT_TRIGGER_ROTATE_KERNEL 2 /* Kernel requests rotate. */
+#define AUDIT_TRIGGER_READ_FILE 3 /* Re-read config file. */
+#define AUDIT_TRIGGER_CLOSE_AND_DIE 4 /* Terminate audit. */
+#define AUDIT_TRIGGER_NO_SPACE 5 /* Below min free space. */
+#define AUDIT_TRIGGER_ROTATE_USER 6 /* User requests rotate. */
+#define AUDIT_TRIGGER_INITIALIZE 7 /* User initialize of auditd. */
+#define AUDIT_TRIGGER_EXPIRE_TRAILS 8 /* User expiration of trails. */
+#define AUDIT_TRIGGER_MAX 8
+
+/*
+ * The special device filename (FreeBSD).
+ */
+#define AUDITDEV_FILENAME "audit"
+#define AUDIT_TRIGGER_FILE ("/dev/" AUDITDEV_FILENAME)
+
+/*
+ * Pre-defined audit IDs
+ */
+#define AU_DEFAUDITID (uid_t)(-1)
+#define AU_DEFAUDITSID 0
+#define AU_ASSIGN_ASID -1
+
+/*
+ * IPC types.
+ */
+#define AT_IPC_MSG ((u_char)1) /* Message IPC id. */
+#define AT_IPC_SEM ((u_char)2) /* Semaphore IPC id. */
+#define AT_IPC_SHM ((u_char)3) /* Shared mem IPC id. */
+
+/*
+ * Audit conditions.
+ */
+#define AUC_UNSET 0
+#define AUC_AUDITING 1
+#define AUC_NOAUDIT 2
+#define AUC_DISABLED -1
+
+/*
+ * auditon(2) commands.
+ */
+#define A_OLDGETPOLICY 2
+#define A_OLDSETPOLICY 3
+#define A_GETKMASK 4
+#define A_SETKMASK 5
+#define A_OLDGETQCTRL 6
+#define A_OLDSETQCTRL 7
+#define A_GETCWD 8
+#define A_GETCAR 9
+#define A_GETSTAT 12
+#define A_SETSTAT 13
+#define A_SETUMASK 14
+#define A_SETSMASK 15
+#define A_OLDGETCOND 20
+#define A_OLDSETCOND 21
+#define A_GETCLASS 22
+#define A_SETCLASS 23
+#define A_GETPINFO 24
+#define A_SETPMASK 25
+#define A_SETFSIZE 26
+#define A_GETFSIZE 27
+#define A_GETPINFO_ADDR 28
+#define A_GETKAUDIT 29
+#define A_SETKAUDIT 30
+#define A_SENDTRIGGER 31
+#define A_GETSINFO_ADDR 32
+#define A_GETPOLICY 33
+#define A_SETPOLICY 34
+#define A_GETQCTRL 35
+#define A_SETQCTRL 36
+#define A_GETCOND 37
+#define A_SETCOND 38
+
+/*
+ * Audit policy controls.
+ */
+#define AUDIT_CNT 0x0001
+#define AUDIT_AHLT 0x0002
+#define AUDIT_ARGV 0x0004
+#define AUDIT_ARGE 0x0008
+#define AUDIT_SEQ 0x0010
+#define AUDIT_WINDATA 0x0020
+#define AUDIT_USER 0x0040
+#define AUDIT_GROUP 0x0080
+#define AUDIT_TRAIL 0x0100
+#define AUDIT_PATH 0x0200
+#define AUDIT_SCNT 0x0400
+#define AUDIT_PUBLIC 0x0800
+#define AUDIT_ZONENAME 0x1000
+#define AUDIT_PERZONE 0x2000
+
+/*
+ * Default audit queue control parameters.
+ */
+#define AQ_HIWATER 100
+#define AQ_MAXHIGH 10000
+#define AQ_LOWATER 10
+#define AQ_BUFSZ MAXAUDITDATA
+#define AQ_MAXBUFSZ 1048576
+
+/*
+ * Default minimum percentage free space on file system.
+ */
+#define AU_FS_MINFREE 20
+
+/*
+ * Type definitions used indicating the length of variable length addresses
+ * in tokens containing addresses, such as header fields.
+ */
+#define AU_IPv4 4
+#define AU_IPv6 16
+
+__BEGIN_DECLS
+
+typedef uid_t au_id_t;
+typedef pid_t au_asid_t;
+typedef u_int16_t au_event_t;
+typedef u_int16_t au_emod_t;
+typedef u_int32_t au_class_t;
+typedef u_int64_t au_asflgs_t __attribute__ ((aligned (8)));
+
+struct au_tid {
+ dev_t port;
+ u_int32_t machine;
+};
+typedef struct au_tid au_tid_t;
+
+struct au_tid_addr {
+ dev_t at_port;
+ u_int32_t at_type;
+ u_int32_t at_addr[4];
+};
+typedef struct au_tid_addr au_tid_addr_t;
+
+struct au_mask {
+ unsigned int am_success; /* Success bits. */
+ unsigned int am_failure; /* Failure bits. */
+};
+typedef struct au_mask au_mask_t;
+
+struct auditinfo {
+ au_id_t ai_auid; /* Audit user ID. */
+ au_mask_t ai_mask; /* Audit masks. */
+ au_tid_t ai_termid; /* Terminal ID. */
+ au_asid_t ai_asid; /* Audit session ID. */
+};
+typedef struct auditinfo auditinfo_t;
+
+struct auditinfo_addr {
+ au_id_t ai_auid; /* Audit user ID. */
+ au_mask_t ai_mask; /* Audit masks. */
+ au_tid_addr_t ai_termid; /* Terminal ID. */
+ au_asid_t ai_asid; /* Audit session ID. */
+ au_asflgs_t ai_flags; /* Audit session flags. */
+};
+typedef struct auditinfo_addr auditinfo_addr_t;
+
+struct auditpinfo {
+ pid_t ap_pid; /* ID of target process. */
+ au_id_t ap_auid; /* Audit user ID. */
+ au_mask_t ap_mask; /* Audit masks. */
+ au_tid_t ap_termid; /* Terminal ID. */
+ au_asid_t ap_asid; /* Audit session ID. */
+};
+typedef struct auditpinfo auditpinfo_t;
+
+struct auditpinfo_addr {
+ pid_t ap_pid; /* ID of target process. */
+ au_id_t ap_auid; /* Audit user ID. */
+ au_mask_t ap_mask; /* Audit masks. */
+ au_tid_addr_t ap_termid; /* Terminal ID. */
+ au_asid_t ap_asid; /* Audit session ID. */
+ au_asflgs_t ap_flags; /* Audit session flags. */
+};
+typedef struct auditpinfo_addr auditpinfo_addr_t;
+
+struct au_session {
+ auditinfo_addr_t *as_aia_p; /* Ptr to full audit info. */
+ au_mask_t as_mask; /* Process Audit Masks. */
+};
+typedef struct au_session au_session_t;
+
+/*
+ * Contents of token_t are opaque outside of libbsm.
+ */
+typedef struct au_token token_t;
+
+/*
+ * Kernel audit queue control parameters:
+ * Default: Maximum:
+ * aq_hiwater: AQ_HIWATER (100) AQ_MAXHIGH (10000)
+ * aq_lowater: AQ_LOWATER (10) <aq_hiwater
+ * aq_bufsz: AQ_BUFSZ (32767) AQ_MAXBUFSZ (1048576)
+ * aq_delay: 20 20000 (not used)
+ */
+struct au_qctrl {
+ int aq_hiwater; /* Max # of audit recs in queue when */
+ /* threads with new ARs get blocked. */
+
+ int aq_lowater; /* # of audit recs in queue when */
+ /* blocked threads get unblocked. */
+
+ int aq_bufsz; /* Max size of audit record for audit(2). */
+ int aq_delay; /* Queue delay (not used). */
+ int aq_minfree; /* Minimum filesystem percent free space. */
+};
+typedef struct au_qctrl au_qctrl_t;
+
+/*
+ * Structure for the audit statistics.
+ */
+struct audit_stat {
+ unsigned int as_version;
+ unsigned int as_numevent;
+ int as_generated;
+ int as_nonattrib;
+ int as_kernel;
+ int as_audit;
+ int as_auditctl;
+ int as_enqueue;
+ int as_written;
+ int as_wblocked;
+ int as_rblocked;
+ int as_dropped;
+ int as_totalsize;
+ unsigned int as_memused;
+};
+typedef struct audit_stat au_stat_t;
+
+/*
+ * Structure for the audit file statistics.
+ */
+struct audit_fstat {
+ u_int64_t af_filesz;
+ u_int64_t af_currsz;
+};
+typedef struct audit_fstat au_fstat_t;
+
+/*
+ * Audit to event class mapping.
+ */
+struct au_evclass_map {
+ au_event_t ec_number;
+ au_class_t ec_class;
+};
+typedef struct au_evclass_map au_evclass_map_t;
+
+/*
+ * Audit system calls.
+ */
+#if !defined(_KERNEL) && !defined(KERNEL)
+int audit(const void *, int);
+int auditon(int, void *, int);
+int auditctl(const char *);
+int getauid(au_id_t *);
+int setauid(const au_id_t *);
+int getaudit(struct auditinfo *);
+int setaudit(const struct auditinfo *);
+int getaudit_addr(struct auditinfo_addr *, int);
+int setaudit_addr(const struct auditinfo_addr *, int);
+
+#ifdef __APPLE_API_PRIVATE
+#include <rtems/freebsd/mach/port.h>
+mach_port_name_t audit_session_self(void);
+au_asid_t audit_session_join(mach_port_name_t port);
+#endif /* __APPLE_API_PRIVATE */
+
+#endif /* defined(_KERNEL) || defined(KERNEL) */
+
+__END_DECLS
+
+#endif /* !_BSM_AUDIT_H */
diff --git a/rtems/freebsd/bsm/audit_kevents.h b/rtems/freebsd/bsm/audit_kevents.h
new file mode 100644
index 00000000..82580920
--- /dev/null
+++ b/rtems/freebsd/bsm/audit_kevents.h
@@ -0,0 +1,799 @@
+/*-
+ * Copyright (c) 2005-2009 Apple Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of Apple Inc. ("Apple") nor the names of
+ * its contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * P4: //depot/projects/trustedbsd/openbsm/sys/bsm/audit_kevents.h#7
+ * $FreeBSD$
+ */
+
+#ifndef _BSM_AUDIT_KEVENTS_HH_
+#define _BSM_AUDIT_KEVENTS_HH_
+
+/*
+ * The reserved event numbers for kernel events are 1...2047 and 43001..44900.
+ */
+#define AUE_IS_A_KEVENT(e) (((e) > 0 && (e) < 2048) || \
+ ((e) > 43000 && (e) < 45000))
+
+/*
+ * Values marked as AUE_NULL are not required to be audited as per CAPP.
+ *
+ * Some conflicts exist in the assignment of name to event number mappings
+ * between BSM implementations. In general, we prefer the OpenSolaris
+ * definition as we consider Solaris BSM to be authoritative. _DARWIN_ has
+ * been inserted for the Darwin variants. If necessary, other tags will be
+ * added in the future.
+ */
+#define AUE_NULL 0
+#define AUE_EXIT 1
+#define AUE_FORK 2
+#define AUE_FORKALL AUE_FORK /* Solaris-specific. */
+#define AUE_OPEN 3
+#define AUE_CREAT 4
+#define AUE_LINK 5
+#define AUE_UNLINK 6
+#define AUE_DELETE AUE_UNLINK /* Darwin-specific. */
+#define AUE_EXEC 7
+#define AUE_CHDIR 8
+#define AUE_MKNOD 9
+#define AUE_CHMOD 10
+#define AUE_CHOWN 11
+#define AUE_UMOUNT 12
+#define AUE_JUNK 13 /* Solaris-specific. */
+#define AUE_ACCESS 14
+#define AUE_KILL 15
+#define AUE_STAT 16
+#define AUE_LSTAT 17
+#define AUE_ACCT 18
+#define AUE_MCTL 19 /* Solaris-specific. */
+#define AUE_REBOOT 20 /* XXX: Darwin conflict. */
+#define AUE_SYMLINK 21
+#define AUE_READLINK 22
+#define AUE_EXECVE 23
+#define AUE_CHROOT 24
+#define AUE_VFORK 25
+#define AUE_SETGROUPS 26
+#define AUE_SETPGRP 27
+#define AUE_SWAPON 28
+#define AUE_SETHOSTNAME 29 /* XXX: Darwin conflict. */
+#define AUE_FCNTL 30
+#define AUE_SETPRIORITY 31 /* XXX: Darwin conflict. */
+#define AUE_CONNECT 32
+#define AUE_ACCEPT 33
+#define AUE_BIND 34
+#define AUE_SETSOCKOPT 35
+#define AUE_VTRACE 36 /* Solaris-specific. */
+#define AUE_SETTIMEOFDAY 37 /* XXX: Darwin conflict. */
+#define AUE_FCHOWN 38
+#define AUE_FCHMOD 39
+#define AUE_SETREUID 40
+#define AUE_SETREGID 41
+#define AUE_RENAME 42
+#define AUE_TRUNCATE 43 /* XXX: Darwin conflict. */
+#define AUE_FTRUNCATE 44 /* XXX: Darwin conflict. */
+#define AUE_FLOCK 45 /* XXX: Darwin conflict. */
+#define AUE_SHUTDOWN 46
+#define AUE_MKDIR 47
+#define AUE_RMDIR 48
+#define AUE_UTIMES 49
+#define AUE_ADJTIME 50
+#define AUE_SETRLIMIT 51
+#define AUE_KILLPG 52
+#define AUE_NFS_SVC 53 /* XXX: Darwin conflict. */
+#define AUE_STATFS 54
+#define AUE_FSTATFS 55
+#define AUE_UNMOUNT 56 /* XXX: Darwin conflict. */
+#define AUE_ASYNC_DAEMON 57
+#define AUE_NFS_GETFH 58 /* XXX: Darwin conflict. */
+#define AUE_SETDOMAINNAME 59
+#define AUE_QUOTACTL 60 /* XXX: Darwin conflict. */
+#define AUE_EXPORTFS 61
+#define AUE_MOUNT 62
+#define AUE_SEMSYS 63
+#define AUE_MSGSYS 64
+#define AUE_SHMSYS 65
+#define AUE_BSMSYS 66 /* Solaris-specific. */
+#define AUE_RFSSYS 67 /* Solaris-specific. */
+#define AUE_FCHDIR 68
+#define AUE_FCHROOT 69
+#define AUE_VPIXSYS 70 /* Solaris-specific. */
+#define AUE_PATHCONF 71
+#define AUE_OPEN_R 72
+#define AUE_OPEN_RC 73
+#define AUE_OPEN_RT 74
+#define AUE_OPEN_RTC 75
+#define AUE_OPEN_W 76
+#define AUE_OPEN_WC 77
+#define AUE_OPEN_WT 78
+#define AUE_OPEN_WTC 79
+#define AUE_OPEN_RW 80
+#define AUE_OPEN_RWC 81
+#define AUE_OPEN_RWT 82
+#define AUE_OPEN_RWTC 83
+#define AUE_MSGCTL 84
+#define AUE_MSGCTL_RMID 85
+#define AUE_MSGCTL_SET 86
+#define AUE_MSGCTL_STAT 87
+#define AUE_MSGGET 88
+#define AUE_MSGRCV 89
+#define AUE_MSGSND 90
+#define AUE_SHMCTL 91
+#define AUE_SHMCTL_RMID 92
+#define AUE_SHMCTL_SET 93
+#define AUE_SHMCTL_STAT 94
+#define AUE_SHMGET 95
+#define AUE_SHMAT 96
+#define AUE_SHMDT 97
+#define AUE_SEMCTL 98
+#define AUE_SEMCTL_RMID 99
+#define AUE_SEMCTL_SET 100
+#define AUE_SEMCTL_STAT 101
+#define AUE_SEMCTL_GETNCNT 102
+#define AUE_SEMCTL_GETPID 103
+#define AUE_SEMCTL_GETVAL 104
+#define AUE_SEMCTL_GETALL 105
+#define AUE_SEMCTL_GETZCNT 106
+#define AUE_SEMCTL_SETVAL 107
+#define AUE_SEMCTL_SETALL 108
+#define AUE_SEMGET 109
+#define AUE_SEMOP 110
+#define AUE_CORE 111 /* Solaris-specific, currently. */
+#define AUE_CLOSE 112
+#define AUE_SYSTEMBOOT 113 /* Solaris-specific. */
+#define AUE_ASYNC_DAEMON_EXIT 114 /* Solaris-specific. */
+#define AUE_NFSSVC_EXIT 115 /* Solaris-specific. */
+#define AUE_WRITEL 128 /* Solaris-specific. */
+#define AUE_WRITEVL 129 /* Solaris-specific. */
+#define AUE_GETAUID 130
+#define AUE_SETAUID 131
+#define AUE_GETAUDIT 132
+#define AUE_SETAUDIT 133
+#define AUE_GETUSERAUDIT 134 /* Solaris-specific. */
+#define AUE_SETUSERAUDIT 135 /* Solaris-specific. */
+#define AUE_AUDITSVC 136 /* Solaris-specific. */
+#define AUE_AUDITUSER 137 /* Solaris-specific. */
+#define AUE_AUDITON 138
+#define AUE_AUDITON_GTERMID 139 /* Solaris-specific. */
+#define AUE_AUDITON_STERMID 140 /* Solaris-specific. */
+#define AUE_AUDITON_GPOLICY 141
+#define AUE_AUDITON_SPOLICY 142
+#define AUE_AUDITON_GQCTRL 145
+#define AUE_AUDITON_SQCTRL 146
+#define AUE_GETKERNSTATE 147 /* Solaris-specific. */
+#define AUE_SETKERNSTATE 148 /* Solaris-specific. */
+#define AUE_GETPORTAUDIT 149 /* Solaris-specific. */
+#define AUE_AUDITSTAT 150 /* Solaris-specific. */
+#define AUE_REVOKE 151
+#define AUE_MAC 152 /* Solaris-specific. */
+#define AUE_ENTERPROM 153 /* Solaris-specific. */
+#define AUE_EXITPROM 154 /* Solaris-specific. */
+#define AUE_IFLOAT 155 /* Solaris-specific. */
+#define AUE_PFLOAT 156 /* Solaris-specific. */
+#define AUE_UPRIV 157 /* Solaris-specific. */
+#define AUE_IOCTL 158
+#define AUE_SOCKET 183
+#define AUE_SENDTO 184
+#define AUE_PIPE 185
+#define AUE_SOCKETPAIR 186 /* XXX: Darwin conflict. */
+#define AUE_SEND 187
+#define AUE_SENDMSG 188
+#define AUE_RECV 189
+#define AUE_RECVMSG 190
+#define AUE_RECVFROM 191
+#define AUE_READ 192
+#define AUE_GETDENTS 193
+#define AUE_LSEEK 194
+#define AUE_WRITE 195
+#define AUE_WRITEV 196
+#define AUE_NFS 197 /* Solaris-specific. */
+#define AUE_READV 198
+#define AUE_OSTAT 199 /* Solaris-specific. */
+#define AUE_SETUID 200 /* XXXRW: Solaris old setuid? */
+#define AUE_STIME 201 /* XXXRW: Solaris old stime? */
+#define AUE_UTIME 202 /* XXXRW: Solaris old utime? */
+#define AUE_NICE 203 /* XXXRW: Solaris old nice? */
+#define AUE_OSETPGRP 204 /* Solaris-specific. */
+#define AUE_SETGID 205
+#define AUE_READL 206 /* Solaris-specific. */
+#define AUE_READVL 207 /* Solaris-specific. */
+#define AUE_FSTAT 208
+#define AUE_DUP2 209
+#define AUE_MMAP 210
+#define AUE_AUDIT 211
+#define AUE_PRIOCNTLSYS 212 /* Solaris-specific. */
+#define AUE_MUNMAP 213
+#define AUE_SETEGID 214
+#define AUE_SETEUID 215
+#define AUE_PUTMSG 216 /* Solaris-specific. */
+#define AUE_GETMSG 217 /* Solaris-specific. */
+#define AUE_PUTPMSG 218 /* Solaris-specific. */
+#define AUE_GETPMSG 219 /* Solaris-specific. */
+#define AUE_AUDITSYS 220 /* Solaris-specific. */
+#define AUE_AUDITON_GETKMASK 221
+#define AUE_AUDITON_SETKMASK 222
+#define AUE_AUDITON_GETCWD 223
+#define AUE_AUDITON_GETCAR 224
+#define AUE_AUDITON_GETSTAT 225
+#define AUE_AUDITON_SETSTAT 226
+#define AUE_AUDITON_SETUMASK 227
+#define AUE_AUDITON_SETSMASK 228
+#define AUE_AUDITON_GETCOND 229
+#define AUE_AUDITON_SETCOND 230
+#define AUE_AUDITON_GETCLASS 231
+#define AUE_AUDITON_SETCLASS 232
+#define AUE_FUSERS 233 /* Solaris-specific; also UTSSYS? */
+#define AUE_STATVFS 234
+#define AUE_XSTAT 235 /* Solaris-specific. */
+#define AUE_LXSTAT 236 /* Solaris-specific. */
+#define AUE_LCHOWN 237
+#define AUE_MEMCNTL 238 /* Solaris-specific. */
+#define AUE_SYSINFO 239 /* Solaris-specific. */
+#define AUE_XMKNOD 240 /* Solaris-specific. */
+#define AUE_FORK1 241
+#define AUE_MODCTL 242 /* Solaris-specific. */
+#define AUE_MODLOAD 243
+#define AUE_MODUNLOAD 244
+#define AUE_MODCONFIG 245 /* Solaris-specific. */
+#define AUE_MODADDMAJ 246 /* Solaris-specific. */
+#define AUE_SOCKACCEPT 247 /* Solaris-specific. */
+#define AUE_SOCKCONNECT 248 /* Solaris-specific. */
+#define AUE_SOCKSEND 249 /* Solaris-specific. */
+#define AUE_SOCKRECEIVE 250 /* Solaris-specific. */
+#define AUE_ACLSET 251
+#define AUE_FACLSET 252
+#define AUE_DOORFS 253 /* Solaris-specific. */
+#define AUE_DOORFS_DOOR_CALL 254 /* Solaris-specific. */
+#define AUE_DOORFS_DOOR_RETURN 255 /* Solaris-specific. */
+#define AUE_DOORFS_DOOR_CREATE 256 /* Solaris-specific. */
+#define AUE_DOORFS_DOOR_REVOKE 257 /* Solaris-specific. */
+#define AUE_DOORFS_DOOR_INFO 258 /* Solaris-specific. */
+#define AUE_DOORFS_DOOR_CRED 259 /* Solaris-specific. */
+#define AUE_DOORFS_DOOR_BIND 260 /* Solaris-specific. */
+#define AUE_DOORFS_DOOR_UNBIND 261 /* Solaris-specific. */
+#define AUE_P_ONLINE 262 /* Solaris-specific. */
+#define AUE_PROCESSOR_BIND 263 /* Solaris-specific. */
+#define AUE_INST_SYNC 264 /* Solaris-specific. */
+#define AUE_SOCKCONFIG 265 /* Solaris-specific. */
+#define AUE_SETAUDIT_ADDR 266
+#define AUE_GETAUDIT_ADDR 267
+#define AUE_UMOUNT2 268 /* Solaris-specific. */
+#define AUE_FSAT 269 /* Solaris-specific. */
+#define AUE_OPENAT_R 270
+#define AUE_OPENAT_RC 271
+#define AUE_OPENAT_RT 272
+#define AUE_OPENAT_RTC 273
+#define AUE_OPENAT_W 274
+#define AUE_OPENAT_WC 275
+#define AUE_OPENAT_WT 276
+#define AUE_OPENAT_WTC 277
+#define AUE_OPENAT_RW 278
+#define AUE_OPENAT_RWC 279
+#define AUE_OPENAT_RWT 280
+#define AUE_OPENAT_RWTC 281
+#define AUE_RENAMEAT 282
+#define AUE_FSTATAT 283
+#define AUE_FCHOWNAT 284
+#define AUE_FUTIMESAT 285
+#define AUE_UNLINKAT 286
+#define AUE_CLOCK_SETTIME 287
+#define AUE_NTP_ADJTIME 288
+#define AUE_SETPPRIV 289 /* Solaris-specific. */
+#define AUE_MODDEVPLCY 290 /* Solaris-specific. */
+#define AUE_MODADDPRIV 291 /* Solaris-specific. */
+#define AUE_CRYPTOADM 292 /* Solaris-specific. */
+#define AUE_CONFIGKSSL 293 /* Solaris-specific. */
+#define AUE_BRANDSYS 294 /* Solaris-specific. */
+#define AUE_PF_POLICY_ADDRULE 295 /* Solaris-specific. */
+#define AUE_PF_POLICY_DELRULE 296 /* Solaris-specific. */
+#define AUE_PF_POLICY_CLONE 297 /* Solaris-specific. */
+#define AUE_PF_POLICY_FLIP 298 /* Solaris-specific. */
+#define AUE_PF_POLICY_FLUSH 299 /* Solaris-specific. */
+#define AUE_PF_POLICY_ALGS 300 /* Solaris-specific. */
+#define AUE_PORTFS 301 /* Solaris-specific. */
+
+/*
+ * Events added for Apple Darwin that potentially collide with future Solaris
+ * BSM events. These are assigned AUE_DARWIN prefixes, and are deprecated in
+ * new trails. Systems generating these events should switch to the new
+ * identifiers that avoid colliding with the Solaris identifier space.
+ */
+#define AUE_DARWIN_GETFSSTAT 301
+#define AUE_DARWIN_PTRACE 302
+#define AUE_DARWIN_CHFLAGS 303
+#define AUE_DARWIN_FCHFLAGS 304
+#define AUE_DARWIN_PROFILE 305
+#define AUE_DARWIN_KTRACE 306
+#define AUE_DARWIN_SETLOGIN 307
+#define AUE_DARWIN_REBOOT 308
+#define AUE_DARWIN_REVOKE 309
+#define AUE_DARWIN_UMASK 310
+#define AUE_DARWIN_MPROTECT 311
+#define AUE_DARWIN_SETPRIORITY 312
+#define AUE_DARWIN_SETTIMEOFDAY 313
+#define AUE_DARWIN_FLOCK 314
+#define AUE_DARWIN_MKFIFO 315
+#define AUE_DARWIN_POLL 316
+#define AUE_DARWIN_SOCKETPAIR 317
+#define AUE_DARWIN_FUTIMES 318
+#define AUE_DARWIN_SETSID 319
+#define AUE_DARWIN_SETPRIVEXEC 320 /* Darwin-specific. */
+#define AUE_DARWIN_NFSSVC 321
+#define AUE_DARWIN_GETFH 322
+#define AUE_DARWIN_QUOTACTL 323
+#define AUE_DARWIN_ADDPROFILE 324 /* Darwin-specific. */
+#define AUE_DARWIN_KDEBUGTRACE 325 /* Darwin-specific. */
+#define AUE_DARWIN_KDBUGTRACE AUE_KDEBUGTRACE
+#define AUE_DARWIN_FSTAT 326
+#define AUE_DARWIN_FPATHCONF 327
+#define AUE_DARWIN_GETDIRENTRIES 328
+#define AUE_DARWIN_TRUNCATE 329
+#define AUE_DARWIN_FTRUNCATE 330
+#define AUE_DARWIN_SYSCTL 331
+#define AUE_DARWIN_MLOCK 332
+#define AUE_DARWIN_MUNLOCK 333
+#define AUE_DARWIN_UNDELETE 334
+#define AUE_DARWIN_GETATTRLIST 335 /* Darwin-specific. */
+#define AUE_DARWIN_SETATTRLIST 336 /* Darwin-specific. */
+#define AUE_DARWIN_GETDIRENTRIESATTR 337 /* Darwin-specific. */
+#define AUE_DARWIN_EXCHANGEDATA 338 /* Darwin-specific. */
+#define AUE_DARWIN_SEARCHFS 339 /* Darwin-specific. */
+#define AUE_DARWIN_MINHERIT 340
+#define AUE_DARWIN_SEMCONFIG 341
+#define AUE_DARWIN_SEMOPEN 342
+#define AUE_DARWIN_SEMCLOSE 343
+#define AUE_DARWIN_SEMUNLINK 344
+#define AUE_DARWIN_SHMOPEN 345
+#define AUE_DARWIN_SHMUNLINK 346
+#define AUE_DARWIN_LOADSHFILE 347 /* Darwin-specific. */
+#define AUE_DARWIN_RESETSHFILE 348 /* Darwin-specific. */
+#define AUE_DARWIN_NEWSYSTEMSHREG 349 /* Darwin-specific. */
+#define AUE_DARWIN_PTHREADKILL 350 /* Darwin-specific. */
+#define AUE_DARWIN_PTHREADSIGMASK 351 /* Darwin-specific. */
+#define AUE_DARWIN_AUDITCTL 352
+#define AUE_DARWIN_RFORK 353
+#define AUE_DARWIN_LCHMOD 354
+#define AUE_DARWIN_SWAPOFF 355
+#define AUE_DARWIN_INITPROCESS 356 /* Darwin-specific. */
+#define AUE_DARWIN_MAPFD 357 /* Darwin-specific. */
+#define AUE_DARWIN_TASKFORPID 358 /* Darwin-specific. */
+#define AUE_DARWIN_PIDFORTASK 359 /* Darwin-specific. */
+#define AUE_DARWIN_SYSCTL_NONADMIN 360
+#define AUE_DARWIN_COPYFILE 361 /* Darwin-specific. */
+
+/*
+ * Audit event identifiers added as part of OpenBSM, generally corresponding
+ * to events in FreeBSD, Darwin, and Linux that were not present in Solaris.
+ * These often duplicate events added to the Solaris set by Darwin, but use
+ * event identifiers in a higher range in order to avoid colliding with
+ * future Solaris additions.
+ *
+ * If an event in this section is later added to Solaris, we prefer the
+ * Solaris event identifier, and add _OPENBSM_ to the OpenBSM-specific
+ * identifier so that old trails can still be processed, but new trails use
+ * the Solaris identifier.
+ */
+#define AUE_GETFSSTAT 43001
+#define AUE_PTRACE 43002
+#define AUE_CHFLAGS 43003
+#define AUE_FCHFLAGS 43004
+#define AUE_PROFILE 43005
+#define AUE_KTRACE 43006
+#define AUE_SETLOGIN 43007
+#define AUE_OPENBSM_REVOKE 43008 /* Solaris event now preferred. */
+#define AUE_UMASK 43009
+#define AUE_MPROTECT 43010
+#define AUE_MKFIFO 43011
+#define AUE_POLL 43012
+#define AUE_FUTIMES 43013
+#define AUE_SETSID 43014
+#define AUE_SETPRIVEXEC 43015 /* Darwin-specific. */
+#define AUE_ADDPROFILE 43016 /* Darwin-specific. */
+#define AUE_KDEBUGTRACE 43017 /* Darwin-specific. */
+#define AUE_KDBUGTRACE AUE_KDEBUGTRACE
+#define AUE_OPENBSM_FSTAT 43018 /* Solaris event now preferred. */
+#define AUE_FPATHCONF 43019
+#define AUE_GETDIRENTRIES 43020
+#define AUE_SYSCTL 43021
+#define AUE_MLOCK 43022
+#define AUE_MUNLOCK 43023
+#define AUE_UNDELETE 43024
+#define AUE_GETATTRLIST 43025 /* Darwin-specific. */
+#define AUE_SETATTRLIST 43026 /* Darwin-specific. */
+#define AUE_GETDIRENTRIESATTR 43027 /* Darwin-specific. */
+#define AUE_EXCHANGEDATA 43028 /* Darwin-specific. */
+#define AUE_SEARCHFS 43029 /* Darwin-specific. */
+#define AUE_MINHERIT 43030
+#define AUE_SEMCONFIG 43031
+#define AUE_SEMOPEN 43032
+#define AUE_SEMCLOSE 43033
+#define AUE_SEMUNLINK 43034
+#define AUE_SHMOPEN 43035
+#define AUE_SHMUNLINK 43036
+#define AUE_LOADSHFILE 43037 /* Darwin-specific. */
+#define AUE_RESETSHFILE 43038 /* Darwin-specific. */
+#define AUE_NEWSYSTEMSHREG 43039 /* Darwin-specific. */
+#define AUE_PTHREADKILL 43040 /* Darwin-specific. */
+#define AUE_PTHREADSIGMASK 43041 /* Darwin-specific. */
+#define AUE_AUDITCTL 43042
+#define AUE_RFORK 43043
+#define AUE_LCHMOD 43044
+#define AUE_SWAPOFF 43045
+#define AUE_INITPROCESS 43046 /* Darwin-specific. */
+#define AUE_MAPFD 43047 /* Darwin-specific. */
+#define AUE_TASKFORPID 43048 /* Darwin-specific. */
+#define AUE_PIDFORTASK 43049 /* Darwin-specific. */
+#define AUE_SYSCTL_NONADMIN 43050
+#define AUE_COPYFILE 43051 /* Darwin-specific. */
+
+/*
+ * Events added to OpenBSM for FreeBSD and Linux; may also be used by Darwin
+ * in the future.
+ */
+#define AUE_LUTIMES 43052
+#define AUE_LCHFLAGS 43053 /* FreeBSD-specific. */
+#define AUE_SENDFILE 43054 /* BSD/Linux-specific. */
+#define AUE_USELIB 43055 /* Linux-specific. */
+#define AUE_GETRESUID 43056
+#define AUE_SETRESUID 43057
+#define AUE_GETRESGID 43058
+#define AUE_SETRESGID 43059
+#define AUE_WAIT4 43060 /* FreeBSD-specific. */
+#define AUE_LGETFH 43061 /* FreeBSD-specific. */
+#define AUE_FHSTATFS 43062 /* FreeBSD-specific. */
+#define AUE_FHOPEN 43063 /* FreeBSD-specific. */
+#define AUE_FHSTAT 43064 /* FreeBSD-specific. */
+#define AUE_JAIL 43065 /* FreeBSD-specific. */
+#define AUE_EACCESS 43066 /* FreeBSD-specific. */
+#define AUE_KQUEUE 43067 /* FreeBSD-specific. */
+#define AUE_KEVENT 43068 /* FreeBSD-specific. */
+#define AUE_FSYNC 43069
+#define AUE_NMOUNT 43070 /* FreeBSD-specific. */
+#define AUE_BDFLUSH 43071 /* Linux-specific. */
+#define AUE_SETFSUID 43072 /* Linux-specific. */
+#define AUE_SETFSGID 43073 /* Linux-specific. */
+#define AUE_PERSONALITY 43074 /* Linux-specific. */
+#define AUE_SCHED_GETSCHEDULER 43075 /* POSIX.1b. */
+#define AUE_SCHED_SETSCHEDULER 43076 /* POSIX.1b. */
+#define AUE_PRCTL 43077 /* Linux-specific. */
+#define AUE_GETCWD 43078 /* FreeBSD/Linux-specific. */
+#define AUE_CAPGET 43079 /* Linux-specific. */
+#define AUE_CAPSET 43080 /* Linux-specific. */
+#define AUE_PIVOT_ROOT 43081 /* Linux-specific. */
+#define AUE_RTPRIO 43082 /* FreeBSD-specific. */
+#define AUE_SCHED_GETPARAM 43083 /* POSIX.1b. */
+#define AUE_SCHED_SETPARAM 43084 /* POSIX.1b. */
+#define AUE_SCHED_GET_PRIORITY_MAX 43085 /* POSIX.1b. */
+#define AUE_SCHED_GET_PRIORITY_MIN 43086 /* POSIX.1b. */
+#define AUE_SCHED_RR_GET_INTERVAL 43087 /* POSIX.1b. */
+#define AUE_ACL_GET_FILE 43088 /* FreeBSD. */
+#define AUE_ACL_SET_FILE 43089 /* FreeBSD. */
+#define AUE_ACL_GET_FD 43090 /* FreeBSD. */
+#define AUE_ACL_SET_FD 43091 /* FreeBSD. */
+#define AUE_ACL_DELETE_FILE 43092 /* FreeBSD. */
+#define AUE_ACL_DELETE_FD 43093 /* FreeBSD. */
+#define AUE_ACL_CHECK_FILE 43094 /* FreeBSD. */
+#define AUE_ACL_CHECK_FD 43095 /* FreeBSD. */
+#define AUE_ACL_GET_LINK 43096 /* FreeBSD. */
+#define AUE_ACL_SET_LINK 43097 /* FreeBSD. */
+#define AUE_ACL_DELETE_LINK 43098 /* FreeBSD. */
+#define AUE_ACL_CHECK_LINK 43099 /* FreeBSD. */
+#define AUE_SYSARCH 43100 /* FreeBSD. */
+#define AUE_EXTATTRCTL 43101 /* FreeBSD. */
+#define AUE_EXTATTR_GET_FILE 43102 /* FreeBSD. */
+#define AUE_EXTATTR_SET_FILE 43103 /* FreeBSD. */
+#define AUE_EXTATTR_LIST_FILE 43104 /* FreeBSD. */
+#define AUE_EXTATTR_DELETE_FILE 43105 /* FreeBSD. */
+#define AUE_EXTATTR_GET_FD 43106 /* FreeBSD. */
+#define AUE_EXTATTR_SET_FD 43107 /* FreeBSD. */
+#define AUE_EXTATTR_LIST_FD 43108 /* FreeBSD. */
+#define AUE_EXTATTR_DELETE_FD 43109 /* FreeBSD. */
+#define AUE_EXTATTR_GET_LINK 43110 /* FreeBSD. */
+#define AUE_EXTATTR_SET_LINK 43111 /* FreeBSD. */
+#define AUE_EXTATTR_LIST_LINK 43112 /* FreeBSD. */
+#define AUE_EXTATTR_DELETE_LINK 43113 /* FreeBSD. */
+#define AUE_KENV 43114 /* FreeBSD. */
+#define AUE_JAIL_ATTACH 43115 /* FreeBSD. */
+#define AUE_SYSCTL_WRITE 43116 /* FreeBSD. */
+#define AUE_IOPERM 43117 /* Linux. */
+#define AUE_READDIR 43118 /* Linux. */
+#define AUE_IOPL 43119 /* Linux. */
+#define AUE_VM86 43120 /* Linux. */
+#define AUE_MAC_GET_PROC 43121 /* FreeBSD/Darwin. */
+#define AUE_MAC_SET_PROC 43122 /* FreeBSD/Darwin. */
+#define AUE_MAC_GET_FD 43123 /* FreeBSD/Darwin. */
+#define AUE_MAC_GET_FILE 43124 /* FreeBSD/Darwin. */
+#define AUE_MAC_SET_FD 43125 /* FreeBSD/Darwin. */
+#define AUE_MAC_SET_FILE 43126 /* FreeBSD/Darwin. */
+#define AUE_MAC_SYSCALL 43127 /* FreeBSD. */
+#define AUE_MAC_GET_PID 43128 /* FreeBSD/Darwin. */
+#define AUE_MAC_GET_LINK 43129 /* FreeBSD/Darwin. */
+#define AUE_MAC_SET_LINK 43130 /* FreeBSD/Darwin. */
+#define AUE_MAC_EXECVE 43131 /* FreeBSD/Darwin. */
+#define AUE_GETPATH_FROMFD 43132 /* FreeBSD. */
+#define AUE_GETPATH_FROMADDR 43133 /* FreeBSD. */
+#define AUE_MQ_OPEN 43134 /* FreeBSD. */
+#define AUE_MQ_SETATTR 43135 /* FreeBSD. */
+#define AUE_MQ_TIMEDRECEIVE 43136 /* FreeBSD. */
+#define AUE_MQ_TIMEDSEND 43137 /* FreeBSD. */
+#define AUE_MQ_NOTIFY 43138 /* FreeBSD. */
+#define AUE_MQ_UNLINK 43139 /* FreeBSD. */
+#define AUE_LISTEN 43140 /* FreeBSD/Darwin/Linux. */
+#define AUE_MLOCKALL 43141 /* FreeBSD. */
+#define AUE_MUNLOCKALL 43142 /* FreeBSD. */
+#define AUE_CLOSEFROM 43143 /* FreeBSD. */
+#define AUE_FEXECVE 43144 /* FreeBSD. */
+#define AUE_FACCESSAT 43145 /* FreeBSD. */
+#define AUE_FCHMODAT 43146 /* FreeBSD. */
+#define AUE_LINKAT 43147 /* FreeBSD. */
+#define AUE_MKDIRAT 43148 /* FreeBSD. */
+#define AUE_MKFIFOAT 43149 /* FreeBSD. */
+#define AUE_MKNODAT 43150 /* FreeBSD. */
+#define AUE_READLINKAT 43151 /* FreeBSD. */
+#define AUE_SYMLINKAT 43152 /* FreeBSD. */
+#define AUE_MAC_GETFSSTAT 43153 /* Darwin. */
+#define AUE_MAC_GET_MOUNT 43154 /* Darwin. */
+#define AUE_MAC_GET_LCID 43155 /* Darwin. */
+#define AUE_MAC_GET_LCTX 43156 /* Darwin. */
+#define AUE_MAC_SET_LCTX 43157 /* Darwin. */
+#define AUE_MAC_MOUNT 43158 /* Darwin. */
+#define AUE_GETLCID 43159 /* Darwin. */
+#define AUE_SETLCID 43160 /* Darwin. */
+#define AUE_TASKNAMEFORPID 43161 /* Darwin. */
+#define AUE_ACCESS_EXTENDED 43162 /* Darwin. */
+#define AUE_CHMOD_EXTENDED 43163 /* Darwin. */
+#define AUE_FCHMOD_EXTENDED 43164 /* Darwin. */
+#define AUE_FSTAT_EXTENDED 43165 /* Darwin. */
+#define AUE_LSTAT_EXTENDED 43166 /* Darwin. */
+#define AUE_MKDIR_EXTENDED 43167 /* Darwin. */
+#define AUE_MKFIFO_EXTENDED 43168 /* Darwin. */
+#define AUE_OPEN_EXTENDED 43169 /* Darwin. */
+#define AUE_OPEN_EXTENDED_R 43170 /* Darwin. */
+#define AUE_OPEN_EXTENDED_RC 43171 /* Darwin. */
+#define AUE_OPEN_EXTENDED_RT 43172 /* Darwin. */
+#define AUE_OPEN_EXTENDED_RTC 43173 /* Darwin. */
+#define AUE_OPEN_EXTENDED_W 43174 /* Darwin. */
+#define AUE_OPEN_EXTENDED_WC 43175 /* Darwin. */
+#define AUE_OPEN_EXTENDED_WT 43176 /* Darwin. */
+#define AUE_OPEN_EXTENDED_WTC 43177 /* Darwin. */
+#define AUE_OPEN_EXTENDED_RW 43178 /* Darwin. */
+#define AUE_OPEN_EXTENDED_RWC 43179 /* Darwin. */
+#define AUE_OPEN_EXTENDED_RWT 43180 /* Darwin. */
+#define AUE_OPEN_EXTENDED_RWTC 43181 /* Darwin. */
+#define AUE_STAT_EXTENDED 43182 /* Darwin. */
+#define AUE_UMASK_EXTENDED 43183 /* Darwin. */
+#define AUE_OPENAT 43184 /* FreeBSD. */
+#define AUE_POSIX_OPENPT 43185 /* FreeBSD. */
+#define AUE_CAP_NEW 43186 /* TrustedBSD. */
+#define AUE_CAP_GETRIGHTS 43187 /* TrustedBSD. */
+#define AUE_CAP_ENTER 43188 /* TrustedBSD. */
+#define AUE_CAP_GETMODE 43189 /* TrustedBSD. */
+#define AUE_POSIX_SPAWN 43190 /* Darwin. */
+#define AUE_FSGETPATH 43191 /* Darwin. */
+#define AUE_PREAD 43192 /* Darwin/FreeBSD. */
+#define AUE_PWRITE 43193 /* Darwin/FreeBSD. */
+#define AUE_FSCTL 43194 /* Darwin. */
+#define AUE_FFSCTL 43195 /* Darwin. */
+#define AUE_LPATHCONF 43196 /* FreeBSD. */
+#define AUE_PDFORK 43197 /* FreeBSD. */
+#define AUE_PDKILL 43198 /* FreeBSD. */
+#define AUE_PDGETPID 43199 /* FreeBSD. */
+#define AUE_PDWAIT 43200 /* FreeBSD. */
+
+/*
+ * Darwin BSM uses a number of AUE_O_* definitions, which are aliased to the
+ * normal Solaris BSM identifiers. _O_ refers to it being an old, or compat
+ * interface. In most cases, Darwin has never implemented these system calls
+ * but picked up the fields in their system call table from their FreeBSD
+ * import. Happily, these have different names than the AUE_O* definitions
+ * in Solaris BSM.
+ */
+#define AUE_O_CREAT AUE_OPEN_RWTC /* Darwin */
+#define AUE_O_EXECVE AUE_NULL /* Darwin */
+#define AUE_O_SBREAK AUE_NULL /* Darwin */
+#define AUE_O_LSEEK AUE_NULL /* Darwin */
+#define AUE_O_MOUNT AUE_NULL /* Darwin */
+#define AUE_O_UMOUNT AUE_NULL /* Darwin */
+#define AUE_O_STAT AUE_STAT /* Darwin */
+#define AUE_O_LSTAT AUE_LSTAT /* Darwin */
+#define AUE_O_FSTAT AUE_FSTAT /* Darwin */
+#define AUE_O_GETPAGESIZE AUE_NULL /* Darwin */
+#define AUE_O_VREAD AUE_NULL /* Darwin */
+#define AUE_O_VWRITE AUE_NULL /* Darwin */
+#define AUE_O_MMAP AUE_MMAP /* Darwin */
+#define AUE_O_VADVISE AUE_NULL /* Darwin */
+#define AUE_O_VHANGUP AUE_NULL /* Darwin */
+#define AUE_O_VLIMIT AUE_NULL /* Darwin */
+#define AUE_O_WAIT AUE_NULL /* Darwin */
+#define AUE_O_GETHOSTNAME AUE_NULL /* Darwin */
+#define AUE_O_SETHOSTNAME AUE_SYSCTL /* Darwin */
+#define AUE_O_GETDOPT AUE_NULL /* Darwin */
+#define AUE_O_SETDOPT AUE_NULL /* Darwin */
+#define AUE_O_ACCEPT AUE_NULL /* Darwin */
+#define AUE_O_SEND AUE_SENDMSG /* Darwin */
+#define AUE_O_RECV AUE_RECVMSG /* Darwin */
+#define AUE_O_VTIMES AUE_NULL /* Darwin */
+#define AUE_O_SIGVEC AUE_NULL /* Darwin */
+#define AUE_O_SIGBLOCK AUE_NULL /* Darwin */
+#define AUE_O_SIGSETMASK AUE_NULL /* Darwin */
+#define AUE_O_SIGSTACK AUE_NULL /* Darwin */
+#define AUE_O_RECVMSG AUE_RECVMSG /* Darwin */
+#define AUE_O_SENDMSG AUE_SENDMSG /* Darwin */
+#define AUE_O_VTRACE AUE_NULL /* Darwin */
+#define AUE_O_RESUBA AUE_NULL /* Darwin */
+#define AUE_O_RECVFROM AUE_RECVFROM /* Darwin */
+#define AUE_O_SETREUID AUE_SETREUID /* Darwin */
+#define AUE_O_SETREGID AUE_SETREGID /* Darwin */
+#define AUE_O_GETDIRENTRIES AUE_GETDIRENTRIES /* Darwin */
+#define AUE_O_TRUNCATE AUE_TRUNCATE /* Darwin */
+#define AUE_O_FTRUNCATE AUE_FTRUNCATE /* Darwin */
+#define AUE_O_GETPEERNAME AUE_NULL /* Darwin */
+#define AUE_O_GETHOSTID AUE_NULL /* Darwin */
+#define AUE_O_SETHOSTID AUE_NULL /* Darwin */
+#define AUE_O_GETRLIMIT AUE_NULL /* Darwin */
+#define AUE_O_SETRLIMIT AUE_SETRLIMIT /* Darwin */
+#define AUE_O_KILLPG AUE_KILL /* Darwin */
+#define AUE_O_SETQUOTA AUE_NULL /* Darwin */
+#define AUE_O_QUOTA AUE_NULL /* Darwin */
+#define AUE_O_GETSOCKNAME AUE_NULL /* Darwin */
+#define AUE_O_GETDIREENTRIES AUE_GETDIREENTRIES /* Darwin */
+#define AUE_O_ASYNCDAEMON AUE_NULL /* Darwin */
+#define AUE_O_GETDOMAINNAME AUE_NULL /* Darwin */
+#define AUE_O_SETDOMAINNAME AUE_SYSCTL /* Darwin */
+#define AUE_O_PCFS_MOUNT AUE_NULL /* Darwin */
+#define AUE_O_EXPORTFS AUE_NULL /* Darwin */
+#define AUE_O_USTATE AUE_NULL /* Darwin */
+#define AUE_O_WAIT3 AUE_NULL /* Darwin */
+#define AUE_O_RPAUSE AUE_NULL /* Darwin */
+#define AUE_O_GETDENTS AUE_NULL /* Darwin */
+
+/*
+ * Possible desired future values based on review of BSD/Darwin system calls.
+ */
+#define AUE_ATGETMSG AUE_NULL
+#define AUE_ATPUTMSG AUE_NULL
+#define AUE_ATSOCKET AUE_NULL
+#define AUE_ATPGETREQ AUE_NULL
+#define AUE_ATPGETRSP AUE_NULL
+#define AUE_ATPSNDREQ AUE_NULL
+#define AUE_ATPSNDRSP AUE_NULL
+#define AUE_BSDTHREADCREATE AUE_NULL
+#define AUE_BSDTHREADTERMINATE AUE_NULL
+#define AUE_BSDTHREADREGISTER AUE_NULL
+#define AUE_CHUD AUE_NULL
+#define AUE_CSOPS AUE_NULL
+#define AUE_DUP AUE_NULL
+#define AUE_FDATASYNC AUE_NULL
+#define AUE_FGETATTRLIST AUE_NULL
+#define AUE_FGETXATTR AUE_NULL
+#define AUE_FLISTXATTR AUE_NULL
+#define AUE_FREMOVEXATTR AUE_NULL
+#define AUE_FSETATTRLIST AUE_NULL
+#define AUE_FSETXATTR AUE_NULL
+#define AUE_FSTATFS64 AUE_NULL
+#define AUE_FSTATV AUE_NULL
+#define AUE_FSTAT64 AUE_NULL
+#define AUE_FSTAT64_EXTENDED AUE_NULL
+#define AUE_GCCONTROL AUE_NULL
+#define AUE_GETDIRENTRIES64 AUE_NULL
+#define AUE_GETDTABLESIZE AUE_NULL
+#define AUE_GETEGID AUE_NULL
+#define AUE_GETEUID AUE_NULL
+#define AUE_GETFSSTAT64 AUE_NULL
+#define AUE_GETGID AUE_NULL
+#define AUE_GETGROUPS AUE_NULL
+#define AUE_GETITIMER AUE_NULL
+#define AUE_GETLOGIN AUE_NULL
+#define AUE_GETPEERNAME AUE_NULL
+#define AUE_GETPGID AUE_NULL
+#define AUE_GETPGRP AUE_NULL
+#define AUE_GETPID AUE_NULL
+#define AUE_GETPPID AUE_NULL
+#define AUE_GETPRIORITY AUE_NULL
+#define AUE_GETRLIMIT AUE_NULL
+#define AUE_GETRUSAGE AUE_NULL
+#define AUE_GETSGROUPS AUE_NULL
+#define AUE_GETSID AUE_NULL
+#define AUE_GETSOCKNAME AUE_NULL
+#define AUE_GETTIMEOFDAY AUE_NULL
+#define AUE_GETTID AUE_NULL
+#define AUE_GETUID AUE_NULL
+#define AUE_GETSOCKOPT AUE_NULL
+#define AUE_GETWGROUPS AUE_NULL
+#define AUE_GETXATTR AUE_NULL
+#define AUE_IDENTITYSVC AUE_NULL
+#define AUE_INITGROUPS AUE_NULL
+#define AUE_IOPOLICYSYS AUE_NULL
+#define AUE_ISSETUGID AUE_NULL
+#define AUE_LIOLISTIO AUE_NULL
+#define AUE_LISTXATTR AUE_NULL
+#define AUE_LSTATV AUE_NULL
+#define AUE_LSTAT64 AUE_NULL
+#define AUE_LSTAT64_EXTENDED AUE_NULL
+#define AUE_MADVISE AUE_NULL
+#define AUE_MINCORE AUE_NULL
+#define AUE_MKCOMPLEX AUE_NULL
+#define AUE_MODWATCH AUE_NULL
+#define AUE_MSGCL AUE_NULL
+#define AUE_MSYNC AUE_NULL
+#define AUE_PREADV AUE_NULL
+#define AUE_PROCINFO AUE_NULL
+#define AUE_PTHREADCANCELED AUE_NULL
+#define AUE_PTHREADCHDIR AUE_NULL
+#define AUE_PTHREADCONDBROADCAST AUE_NULL
+#define AUE_PTHREADCONDDESTORY AUE_NULL
+#define AUE_PTHREADCONDINIT AUE_NULL
+#define AUE_PTHREADCONDSIGNAL AUE_NULL
+#define AUE_PTHREADCONDWAIT AUE_NULL
+#define AUE_PTHREADFCHDIR AUE_NULL
+#define AUE_PTHREADMARK AUE_NULL
+#define AUE_PTHREADMUTEXDESTROY AUE_NULL
+#define AUE_PTHREADMUTEXINIT AUE_NULL
+#define AUE_PTHREADMUTEXTRYLOCK AUE_NULL
+#define AUE_PTHREADMUTEXUNLOCK AUE_NULL
+#define AUE_PWRITEV AUE_NULL
+#define AUE_REMOVEXATTR AUE_NULL
+#define AUE_SBRK AUE_NULL
+#define AUE_SELECT AUE_NULL
+#define AUE_SEMDESTROY AUE_NULL
+#define AUE_SEMGETVALUE AUE_NULL
+#define AUE_SEMINIT AUE_NULL
+#define AUE_SEMPOST AUE_NULL
+#define AUE_SEMTRYWAIT AUE_NULL
+#define AUE_SEMWAIT AUE_NULL
+#define AUE_SEMWAITSIGNAL AUE_NULL
+#define AUE_SETITIMER AUE_NULL
+#define AUE_SETSGROUPS AUE_NULL
+#define AUE_SETTID AUE_NULL
+#define AUE_SETTIDWITHPID AUE_NULL
+#define AUE_SETWGROUPS AUE_NULL
+#define AUE_SETXATTR AUE_NULL
+#define AUE_SHAREDREGIONCHECK AUE_NULL
+#define AUE_SHAREDREGIONMAP AUE_NULL
+#define AUE_SIGACTION AUE_NULL
+#define AUE_SIGALTSTACK AUE_NULL
+#define AUE_SIGPENDING AUE_NULL
+#define AUE_SIGPROCMASK AUE_NULL
+#define AUE_SIGRETURN AUE_NULL
+#define AUE_SIGSUSPEND AUE_NULL
+#define AUE_SIGWAIT AUE_NULL
+#define AUE_SSTK AUE_NULL
+#define AUE_STACKSNAPSHOT AUE_NULL
+#define AUE_STATFS64 AUE_NULL
+#define AUE_STATV AUE_NULL
+#define AUE_STAT64 AUE_NULL
+#define AUE_STAT64_EXTENDED AUE_NULL
+#define AUE_SYNC AUE_NULL
+#define AUE_SYSCALL AUE_NULL
+#define AUE_TABLE AUE_NULL
+#define AUE_VMPRESSUREMONITOR AUE_NULL
+#define AUE_WAITEVENT AUE_NULL
+#define AUE_WAITID AUE_NULL
+#define AUE_WATCHEVENT AUE_NULL
+#define AUE_WORKQOPEN AUE_NULL
+#define AUE_WORKQOPS AUE_NULL
+
+#endif /* !_BSM_AUDIT_KEVENTS_HH_ */
diff --git a/rtems/freebsd/cam/ata/ata_all.h b/rtems/freebsd/cam/ata/ata_all.h
new file mode 100644
index 00000000..1103c34f
--- /dev/null
+++ b/rtems/freebsd/cam/ata/ata_all.h
@@ -0,0 +1,127 @@
+/*-
+ * Copyright (c) 2009 Alexander Motin <mav@FreeBSD.org>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer,
+ * without modification, immediately at the beginning of the file.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef CAM_ATA_ALL_H
+#define CAM_ATA_ALL_H 1
+
+#include <rtems/freebsd/sys/ata.h>
+
+struct ccb_ataio;
+struct cam_periph;
+union ccb;
+
+struct ata_cmd {
+ u_int8_t flags; /* ATA command flags */
+#define CAM_ATAIO_48BIT 0x01 /* Command has 48-bit format */
+#define CAM_ATAIO_FPDMA 0x02 /* FPDMA command */
+#define CAM_ATAIO_CONTROL 0x04 /* Control, not a command */
+#define CAM_ATAIO_NEEDRESULT 0x08 /* Request requires result. */
+#define CAM_ATAIO_DMA 0x10 /* DMA command */
+
+ u_int8_t command;
+ u_int8_t features;
+
+ u_int8_t lba_low;
+ u_int8_t lba_mid;
+ u_int8_t lba_high;
+ u_int8_t device;
+
+ u_int8_t lba_low_exp;
+ u_int8_t lba_mid_exp;
+ u_int8_t lba_high_exp;
+ u_int8_t features_exp;
+
+ u_int8_t sector_count;
+ u_int8_t sector_count_exp;
+ u_int8_t control;
+};
+
+struct ata_res {
+ u_int8_t flags; /* ATA command flags */
+#define CAM_ATAIO_48BIT 0x01 /* Command has 48-bit format */
+
+ u_int8_t status;
+ u_int8_t error;
+
+ u_int8_t lba_low;
+ u_int8_t lba_mid;
+ u_int8_t lba_high;
+ u_int8_t device;
+
+ u_int8_t lba_low_exp;
+ u_int8_t lba_mid_exp;
+ u_int8_t lba_high_exp;
+
+ u_int8_t sector_count;
+ u_int8_t sector_count_exp;
+};
+
+int ata_version(int ver);
+
+char * ata_op_string(struct ata_cmd *cmd);
+char * ata_cmd_string(struct ata_cmd *cmd, char *cmd_string, size_t len);
+char * ata_res_string(struct ata_res *res, char *res_string, size_t len);
+int ata_command_sbuf(struct ccb_ataio *ataio, struct sbuf *sb);
+int ata_status_sbuf(struct ccb_ataio *ataio, struct sbuf *sb);
+int ata_res_sbuf(struct ccb_ataio *ataio, struct sbuf *sb);
+
+void ata_print_ident(struct ata_params *ident_data);
+
+uint32_t ata_logical_sector_size(struct ata_params *ident_data);
+uint64_t ata_physical_sector_size(struct ata_params *ident_data);
+uint64_t ata_logical_sector_offset(struct ata_params *ident_data);
+
+void ata_28bit_cmd(struct ccb_ataio *ataio, uint8_t cmd, uint8_t features,
+ uint32_t lba, uint8_t sector_count);
+void ata_48bit_cmd(struct ccb_ataio *ataio, uint8_t cmd, uint16_t features,
+ uint64_t lba, uint16_t sector_count);
+void ata_ncq_cmd(struct ccb_ataio *ataio, uint8_t cmd,
+ uint64_t lba, uint16_t sector_count);
+void ata_reset_cmd(struct ccb_ataio *ataio);
+void ata_pm_read_cmd(struct ccb_ataio *ataio, int reg, int port);
+void ata_pm_write_cmd(struct ccb_ataio *ataio, int reg, int port, uint32_t val);
+
+void ata_bswap(int8_t *buf, int len);
+void ata_btrim(int8_t *buf, int len);
+void ata_bpack(int8_t *src, int8_t *dst, int len);
+
+int ata_max_pmode(struct ata_params *ap);
+int ata_max_wmode(struct ata_params *ap);
+int ata_max_umode(struct ata_params *ap);
+int ata_max_mode(struct ata_params *ap, int maxmode);
+
+char * ata_mode2string(int mode);
+int ata_string2mode(char *str);
+u_int ata_mode2speed(int mode);
+u_int ata_revision2speed(int revision);
+int ata_speed2revision(u_int speed);
+
+int ata_identify_match(caddr_t identbuffer, caddr_t table_entry);
+int ata_static_identify_match(caddr_t identbuffer, caddr_t table_entry);
+
+#endif
diff --git a/rtems/freebsd/cam/cam.c b/rtems/freebsd/cam/cam.c
new file mode 100644
index 00000000..41566534
--- /dev/null
+++ b/rtems/freebsd/cam/cam.c
@@ -0,0 +1,438 @@
+#include <rtems/freebsd/machine/rtems-bsd-config.h>
+
+/*-
+ * Generic utility routines for the Common Access Method layer.
+ *
+ * Copyright (c) 1997 Justin T. Gibbs.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification, immediately at the beginning of the file.
+ * 2. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
+ * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <rtems/freebsd/sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <rtems/freebsd/sys/param.h>
+#ifdef _KERNEL
+#include <rtems/freebsd/sys/systm.h>
+#include <rtems/freebsd/sys/kernel.h>
+#include <rtems/freebsd/sys/sysctl.h>
+#else /* _KERNEL */
+#include <rtems/freebsd/stdlib.h>
+#include <rtems/freebsd/stdio.h>
+#include <rtems/freebsd/camlib.h>
+#endif /* _KERNEL */
+
+#include <rtems/freebsd/cam/cam.h>
+#include <rtems/freebsd/cam/cam_ccb.h>
+#include <rtems/freebsd/cam/scsi/scsi_all.h>
+#include <rtems/freebsd/sys/sbuf.h>
+
+#ifdef _KERNEL
+#include <rtems/freebsd/sys/libkern.h>
+#include <rtems/freebsd/cam/cam_queue.h>
+#include <rtems/freebsd/cam/cam_xpt.h>
+#endif
+
+static int camstatusentrycomp(const void *key, const void *member);
+
+const struct cam_status_entry cam_status_table[] = {
+ { CAM_REQ_INPROG, "CCB request is in progress" },
+ { CAM_REQ_CMP, "CCB request completed without error" },
+ { CAM_REQ_ABORTED, "CCB request aborted by the host" },
+ { CAM_UA_ABORT, "Unable to abort CCB request" },
+ { CAM_REQ_CMP_ERR, "CCB request completed with an error" },
+ { CAM_BUSY, "CAM subsystem is busy" },
+ { CAM_REQ_INVALID, "CCB request was invalid" },
+ { CAM_PATH_INVALID, "Supplied Path ID is invalid" },
+ { CAM_DEV_NOT_THERE, "Device Not Present" },
+ { CAM_UA_TERMIO, "Unable to terminate I/O CCB request" },
+ { CAM_SEL_TIMEOUT, "Selection Timeout" },
+ { CAM_CMD_TIMEOUT, "Command timeout" },
+ { CAM_SCSI_STATUS_ERROR, "SCSI Status Error" },
+ { CAM_MSG_REJECT_REC, "Message Reject Reveived" },
+ { CAM_SCSI_BUS_RESET, "SCSI Bus Reset Sent/Received" },
+ { CAM_UNCOR_PARITY, "Uncorrectable parity/CRC error" },
+ { CAM_AUTOSENSE_FAIL, "Auto-Sense Retrieval Failed" },
+ { CAM_NO_HBA, "No HBA Detected" },
+ { CAM_DATA_RUN_ERR, "Data Overrun error" },
+ { CAM_UNEXP_BUSFREE, "Unexpected Bus Free" },
+ { CAM_SEQUENCE_FAIL, "Target Bus Phase Sequence Failure" },
+ { CAM_CCB_LEN_ERR, "CCB length supplied is inadequate" },
+ { CAM_PROVIDE_FAIL, "Unable to provide requested capability" },
+ { CAM_BDR_SENT, "SCSI BDR Message Sent" },
+ { CAM_REQ_TERMIO, "CCB request terminated by the host" },
+ { CAM_UNREC_HBA_ERROR, "Unrecoverable Host Bus Adapter Error" },
+ { CAM_REQ_TOO_BIG, "The request was too large for this host" },
+ { CAM_REQUEUE_REQ, "Unconditionally Re-queue Request", },
+ { CAM_ATA_STATUS_ERROR, "ATA Status Error" },
+ { CAM_IDE, "Initiator Detected Error Message Received" },
+ { CAM_RESRC_UNAVAIL, "Resource Unavailable" },
+ { CAM_UNACKED_EVENT, "Unacknowledged Event by Host" },
+ { CAM_MESSAGE_RECV, "Message Received in Host Target Mode" },
+ { CAM_INVALID_CDB, "Invalid CDB received in Host Target Mode" },
+ { CAM_LUN_INVALID, "Invalid Lun" },
+ { CAM_TID_INVALID, "Invalid Target ID" },
+ { CAM_FUNC_NOTAVAIL, "Function Not Available" },
+ { CAM_NO_NEXUS, "Nexus Not Established" },
+ { CAM_IID_INVALID, "Invalid Initiator ID" },
+ { CAM_CDB_RECVD, "CDB Received" },
+ { CAM_LUN_ALRDY_ENA, "LUN Already Enabled for Target Mode" },
+ { CAM_SCSI_BUSY, "SCSI Bus Busy" },
+};
+
+const int num_cam_status_entries =
+ sizeof(cam_status_table)/sizeof(*cam_status_table);
+
+#ifdef _KERNEL
+SYSCTL_NODE(_kern, OID_AUTO, cam, CTLFLAG_RD, 0, "CAM Subsystem");
+#endif
+
+void
+cam_strvis(u_int8_t *dst, const u_int8_t *src, int srclen, int dstlen)
+{
+
+ /* Trim leading/trailing spaces, nulls. */
+ while (srclen > 0 && src[0] == ' ')
+ src++, srclen--;
+ while (srclen > 0
+ && (src[srclen-1] == ' ' || src[srclen-1] == '\0'))
+ srclen--;
+
+ while (srclen > 0 && dstlen > 1) {
+ u_int8_t *cur_pos = dst;
+
+ if (*src < 0x20 || *src >= 0x80) {
+ /* SCSI-II Specifies that these should never occur. */
+ /* non-printable character */
+ if (dstlen > 4) {
+ *cur_pos++ = '\\';
+ *cur_pos++ = ((*src & 0300) >> 6) + '0';
+ *cur_pos++ = ((*src & 0070) >> 3) + '0';
+ *cur_pos++ = ((*src & 0007) >> 0) + '0';
+ } else {
+ *cur_pos++ = '?';
+ }
+ } else {
+ /* normal character */
+ *cur_pos++ = *src;
+ }
+ src++;
+ srclen--;
+ dstlen -= cur_pos - dst;
+ dst = cur_pos;
+ }
+ *dst = '\0';
+}
+
+/*
+ * Compare string with pattern, returning 0 on match.
+ * Short pattern matches trailing blanks in name,
+ * wildcard '*' in pattern matches rest of name,
+ * wildcard '?' matches a single non-space character.
+ */
+int
+cam_strmatch(const u_int8_t *str, const u_int8_t *pattern, int str_len)
+{
+
+ while (*pattern != '\0'&& str_len > 0) {
+
+ if (*pattern == '*') {
+ return (0);
+ }
+ if ((*pattern != *str)
+ && (*pattern != '?' || *str == ' ')) {
+ return (1);
+ }
+ pattern++;
+ str++;
+ str_len--;
+ }
+ while (str_len > 0 && *str == ' ') {
+ str++;
+ str_len--;
+ }
+ if (str_len > 0 && *str == 0)
+ str_len = 0;
+
+ return (str_len);
+}
+
+caddr_t
+cam_quirkmatch(caddr_t target, caddr_t quirk_table, int num_entries,
+ int entry_size, cam_quirkmatch_t *comp_func)
+{
+ for (; num_entries > 0; num_entries--, quirk_table += entry_size) {
+ if ((*comp_func)(target, quirk_table) == 0)
+ return (quirk_table);
+ }
+ return (NULL);
+}
+
+const struct cam_status_entry*
+cam_fetch_status_entry(cam_status status)
+{
+ status &= CAM_STATUS_MASK;
+ return (bsearch(&status, &cam_status_table,
+ num_cam_status_entries,
+ sizeof(*cam_status_table),
+ camstatusentrycomp));
+}
+
+static int
+camstatusentrycomp(const void *key, const void *member)
+{
+ cam_status status;
+ const struct cam_status_entry *table_entry;
+
+ status = *(const cam_status *)key;
+ table_entry = (const struct cam_status_entry *)member;
+
+ return (status - table_entry->status_code);
+}
+
+
+#ifndef __rtems__
+#ifdef _KERNEL
+char *
+cam_error_string(union ccb *ccb, char *str, int str_len,
+ cam_error_string_flags flags,
+ cam_error_proto_flags proto_flags)
+#else /* !_KERNEL */
+char *
+cam_error_string(struct cam_device *device, union ccb *ccb, char *str,
+ int str_len, cam_error_string_flags flags,
+ cam_error_proto_flags proto_flags)
+#endif /* _KERNEL/!_KERNEL */
+{
+ char path_str[64];
+ struct sbuf sb;
+
+ if ((ccb == NULL)
+ || (str == NULL)
+ || (str_len <= 0))
+ return(NULL);
+
+ if (flags == CAM_ESF_NONE)
+ return(NULL);
+
+ switch (ccb->ccb_h.func_code) {
+ case XPT_ATA_IO:
+ switch (proto_flags & CAM_EPF_LEVEL_MASK) {
+ case CAM_EPF_NONE:
+ break;
+ case CAM_EPF_ALL:
+ case CAM_EPF_NORMAL:
+ proto_flags |= CAM_EAF_PRINT_RESULT;
+ /* FALLTHROUGH */
+ case CAM_EPF_MINIMAL:
+ proto_flags |= CAM_EAF_PRINT_STATUS;
+ /* FALLTHROUGH */
+ default:
+ break;
+ }
+ break;
+ case XPT_SCSI_IO:
+ switch (proto_flags & CAM_EPF_LEVEL_MASK) {
+ case CAM_EPF_NONE:
+ break;
+ case CAM_EPF_ALL:
+ case CAM_EPF_NORMAL:
+ proto_flags |= CAM_ESF_PRINT_SENSE;
+ /* FALLTHROUGH */
+ case CAM_EPF_MINIMAL:
+ proto_flags |= CAM_ESF_PRINT_STATUS;
+ /* FALLTHROUGH */
+ default:
+ break;
+ }
+ break;
+ default:
+ break;
+ }
+#ifdef _KERNEL
+ xpt_path_string(ccb->csio.ccb_h.path, path_str, sizeof(path_str));
+#else /* !_KERNEL */
+ cam_path_string(device, path_str, sizeof(path_str));
+#endif /* _KERNEL/!_KERNEL */
+
+ sbuf_new(&sb, str, str_len, 0);
+
+ if (flags & CAM_ESF_COMMAND) {
+ sbuf_cat(&sb, path_str);
+ switch (ccb->ccb_h.func_code) {
+ case XPT_ATA_IO:
+ ata_command_sbuf(&ccb->ataio, &sb);
+ sbuf_printf(&sb, "\n");
+ break;
+ case XPT_SCSI_IO:
+#ifdef _KERNEL
+ scsi_command_string(&ccb->csio, &sb);
+#else /* !_KERNEL */
+ scsi_command_string(device, &ccb->csio, &sb);
+#endif /* _KERNEL/!_KERNEL */
+ sbuf_printf(&sb, "\n");
+ break;
+ default:
+ break;
+ }
+ }
+
+ if (flags & CAM_ESF_CAM_STATUS) {
+ cam_status status;
+ const struct cam_status_entry *entry;
+
+ sbuf_cat(&sb, path_str);
+
+ status = ccb->ccb_h.status & CAM_STATUS_MASK;
+
+ entry = cam_fetch_status_entry(status);
+
+ if (entry == NULL)
+ sbuf_printf(&sb, "CAM status: Unknown (%#x)\n",
+ ccb->ccb_h.status);
+ else
+ sbuf_printf(&sb, "CAM status: %s\n",
+ entry->status_text);
+ }
+
+ if (flags & CAM_ESF_PROTO_STATUS) {
+
+ switch (ccb->ccb_h.func_code) {
+ case XPT_ATA_IO:
+ if ((ccb->ccb_h.status & CAM_STATUS_MASK) !=
+ CAM_ATA_STATUS_ERROR)
+ break;
+ if (proto_flags & CAM_EAF_PRINT_STATUS) {
+ sbuf_cat(&sb, path_str);
+ ata_status_sbuf(&ccb->ataio, &sb);
+ sbuf_printf(&sb, "\n");
+ }
+ if (proto_flags & CAM_EAF_PRINT_RESULT) {
+ sbuf_cat(&sb, path_str);
+ ata_res_sbuf(&ccb->ataio, &sb);
+ sbuf_printf(&sb, "\n");
+ }
+
+ break;
+ case XPT_SCSI_IO:
+ if ((ccb->ccb_h.status & CAM_STATUS_MASK) !=
+ CAM_SCSI_STATUS_ERROR)
+ break;
+
+ if (proto_flags & CAM_ESF_PRINT_STATUS) {
+ sbuf_cat(&sb, path_str);
+ sbuf_printf(&sb, "SCSI status: %s\n",
+ scsi_status_string(&ccb->csio));
+ }
+
+ if ((proto_flags & CAM_ESF_PRINT_SENSE)
+ && (ccb->csio.scsi_status == SCSI_STATUS_CHECK_COND)
+ && (ccb->ccb_h.status & CAM_AUTOSNS_VALID)) {
+
+#ifdef _KERNEL
+ scsi_sense_sbuf(&ccb->csio, &sb,
+ SSS_FLAG_NONE);
+#else /* !_KERNEL */
+ scsi_sense_sbuf(device, &ccb->csio, &sb,
+ SSS_FLAG_NONE);
+#endif /* _KERNEL/!_KERNEL */
+ }
+ break;
+ default:
+ break;
+ }
+ }
+
+ sbuf_finish(&sb);
+
+ return(sbuf_data(&sb));
+}
+
+#ifdef _KERNEL
+
+void
+cam_error_print(union ccb *ccb, cam_error_string_flags flags,
+ cam_error_proto_flags proto_flags)
+{
+ char str[512];
+
+ printf("%s", cam_error_string(ccb, str, sizeof(str), flags,
+ proto_flags));
+}
+
+#else /* !_KERNEL */
+
+void
+cam_error_print(struct cam_device *device, union ccb *ccb,
+ cam_error_string_flags flags, cam_error_proto_flags proto_flags,
+ FILE *ofile)
+{
+ char str[512];
+
+ if ((device == NULL) || (ccb == NULL) || (ofile == NULL))
+ return;
+
+ fprintf(ofile, "%s", cam_error_string(device, ccb, str, sizeof(str),
+ flags, proto_flags));
+}
+
+#endif /* _KERNEL/!_KERNEL */
+
+/*
+ * Common calculate geometry fuction
+ *
+ * Caller should set ccg->volume_size and block_size.
+ * The extended parameter should be zero if extended translation
+ * should not be used.
+ */
+void
+cam_calc_geometry(struct ccb_calc_geometry *ccg, int extended)
+{
+ uint32_t size_mb, secs_per_cylinder;
+
+ if (ccg->block_size == 0) {
+ ccg->ccb_h.status = CAM_REQ_CMP_ERR;
+ return;
+ }
+ size_mb = (1024L * 1024L) / ccg->block_size;
+ if (size_mb == 0) {
+ ccg->ccb_h.status = CAM_REQ_CMP_ERR;
+ return;
+ }
+ size_mb = ccg->volume_size / size_mb;
+ if (size_mb > 1024 && extended) {
+ ccg->heads = 255;
+ ccg->secs_per_track = 63;
+ } else {
+ ccg->heads = 64;
+ ccg->secs_per_track = 32;
+ }
+ secs_per_cylinder = ccg->heads * ccg->secs_per_track;
+ if (secs_per_cylinder == 0) {
+ ccg->ccb_h.status = CAM_REQ_CMP_ERR;
+ return;
+ }
+ ccg->cylinders = ccg->volume_size / secs_per_cylinder;
+ ccg->ccb_h.status = CAM_REQ_CMP;
+}
+#endif /* __rtems__ */
diff --git a/rtems/freebsd/cam/cam.h b/rtems/freebsd/cam/cam.h
new file mode 100644
index 00000000..54cee51e
--- /dev/null
+++ b/rtems/freebsd/cam/cam.h
@@ -0,0 +1,263 @@
+/*-
+ * Data structures and definitions for the CAM system.
+ *
+ * Copyright (c) 1997 Justin T. Gibbs.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification, immediately at the beginning of the file.
+ * 2. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
+ * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _CAM_CAM_H
+#define _CAM_CAM_H 1
+
+#ifdef _KERNEL
+#ifndef __rtems__
+#include <rtems/freebsd/opt_cam.h>
+#else /* __rtems__ */
+#include <rtems/freebsd/local/opt_cam.h>
+#endif /* __rtems__ */
+#endif
+
+#include <rtems/freebsd/sys/cdefs.h>
+
+typedef u_int path_id_t;
+typedef u_int target_id_t;
+typedef u_int lun_id_t;
+
+#define CAM_XPT_PATH_ID ((path_id_t)~0)
+#define CAM_BUS_WILDCARD ((path_id_t)~0)
+#define CAM_TARGET_WILDCARD ((target_id_t)~0)
+#define CAM_LUN_WILDCARD ((lun_id_t)~0)
+
+/*
+ * Maximum length for a CAM CDB.
+ */
+#define CAM_MAX_CDBLEN 16
+
+/*
+ * Definition of a CAM peripheral driver entry. Peripheral drivers instantiate
+ * one of these for each device they wish to communicate with and pass it into
+ * the xpt layer when they wish to schedule work on that device via the
+ * xpt_schedule API.
+ */
+struct cam_periph;
+
+/*
+ * Priority information for a CAM structure.
+ */
+typedef enum {
+ CAM_RL_HOST,
+ CAM_RL_BUS,
+ CAM_RL_XPT,
+ CAM_RL_DEV,
+ CAM_RL_NORMAL,
+ CAM_RL_VALUES
+} cam_rl;
+/*
+ * The generation number is incremented everytime a new entry is entered into
+ * the queue giving round robin per priority level scheduling.
+ */
+typedef struct {
+ u_int32_t priority;
+#define CAM_PRIORITY_HOST ((CAM_RL_HOST << 8) + 0x80)
+#define CAM_PRIORITY_BUS ((CAM_RL_BUS << 8) + 0x80)
+#define CAM_PRIORITY_XPT ((CAM_RL_XPT << 8) + 0x80)
+#define CAM_PRIORITY_DEV ((CAM_RL_DEV << 8) + 0x80)
+#define CAM_PRIORITY_NORMAL ((CAM_RL_NORMAL << 8) + 0x80)
+#define CAM_PRIORITY_NONE (u_int32_t)-1
+#define CAM_PRIORITY_TO_RL(x) ((x) >> 8)
+ u_int32_t generation;
+ int index;
+#define CAM_UNQUEUED_INDEX -1
+#define CAM_ACTIVE_INDEX -2
+#define CAM_DONEQ_INDEX -3
+} cam_pinfo;
+
+/*
+ * Macro to compare two generation numbers. It is used like this:
+ *
+ * if (GENERATIONCMP(a, >=, b))
+ * ...;
+ *
+ * GERERATIONCMP uses modular arithmetic to guard against wraps
+ * wraps in the generation number.
+ */
+#define GENERATIONCMP(x, op, y) ((int32_t)((x) - (y)) op 0)
+
+/* CAM flags XXX Move to cam_periph.h ??? */
+typedef enum {
+ CAM_FLAG_NONE = 0x00,
+ CAM_EXPECT_INQ_CHANGE = 0x01,
+ CAM_RETRY_SELTO = 0x02 /* Retry Selection Timeouts */
+} cam_flags;
+
+/* CAM Status field values */
+typedef enum {
+ CAM_REQ_INPROG, /* CCB request is in progress */
+ CAM_REQ_CMP, /* CCB request completed without error */
+ CAM_REQ_ABORTED, /* CCB request aborted by the host */
+ CAM_UA_ABORT, /* Unable to abort CCB request */
+ CAM_REQ_CMP_ERR, /* CCB request completed with an error */
+ CAM_BUSY, /* CAM subsystem is busy */
+ CAM_REQ_INVALID, /* CCB request was invalid */
+ CAM_PATH_INVALID, /* Supplied Path ID is invalid */
+ CAM_DEV_NOT_THERE, /* SCSI Device Not Installed/there */
+ CAM_UA_TERMIO, /* Unable to terminate I/O CCB request */
+ CAM_SEL_TIMEOUT, /* Target Selection Timeout */
+ CAM_CMD_TIMEOUT, /* Command timeout */
+ CAM_SCSI_STATUS_ERROR, /* SCSI error, look at error code in CCB */
+ CAM_MSG_REJECT_REC, /* Message Reject Received */
+ CAM_SCSI_BUS_RESET, /* SCSI Bus Reset Sent/Received */
+ CAM_UNCOR_PARITY, /* Uncorrectable parity error occurred */
+ CAM_AUTOSENSE_FAIL = 0x10,/* Autosense: request sense cmd fail */
+ CAM_NO_HBA, /* No HBA Detected error */
+ CAM_DATA_RUN_ERR, /* Data Overrun error */
+ CAM_UNEXP_BUSFREE, /* Unexpected Bus Free */
+ CAM_SEQUENCE_FAIL, /* Target Bus Phase Sequence Failure */
+ CAM_CCB_LEN_ERR, /* CCB length supplied is inadequate */
+ CAM_PROVIDE_FAIL, /* Unable to provide requested capability */
+ CAM_BDR_SENT, /* A SCSI BDR msg was sent to target */
+ CAM_REQ_TERMIO, /* CCB request terminated by the host */
+ CAM_UNREC_HBA_ERROR, /* Unrecoverable Host Bus Adapter Error */
+ CAM_REQ_TOO_BIG, /* The request was too large for this host */
+ CAM_REQUEUE_REQ, /*
+ * This request should be requeued to preserve
+ * transaction ordering. This typically occurs
+ * when the SIM recognizes an error that should
+ * freeze the queue and must place additional
+ * requests for the target at the sim level
+ * back into the XPT queue.
+ */
+ CAM_ATA_STATUS_ERROR, /* ATA error, look at error code in CCB */
+ CAM_SCSI_IT_NEXUS_LOST, /* Initiator/Target Nexus lost. */
+ CAM_IDE = 0x33, /* Initiator Detected Error */
+ CAM_RESRC_UNAVAIL, /* Resource Unavailable */
+ CAM_UNACKED_EVENT, /* Unacknowledged Event by Host */
+ CAM_MESSAGE_RECV, /* Message Received in Host Target Mode */
+ CAM_INVALID_CDB, /* Invalid CDB received in Host Target Mode */
+ CAM_LUN_INVALID, /* Lun supplied is invalid */
+ CAM_TID_INVALID, /* Target ID supplied is invalid */
+ CAM_FUNC_NOTAVAIL, /* The requested function is not available */
+ CAM_NO_NEXUS, /* Nexus is not established */
+ CAM_IID_INVALID, /* The initiator ID is invalid */
+ CAM_CDB_RECVD, /* The SCSI CDB has been received */
+ CAM_LUN_ALRDY_ENA, /* The LUN is already enabled for target mode */
+ CAM_SCSI_BUSY, /* SCSI Bus Busy */
+
+ CAM_DEV_QFRZN = 0x40, /* The DEV queue is frozen w/this err */
+
+ /* Autosense data valid for target */
+ CAM_AUTOSNS_VALID = 0x80,
+ CAM_RELEASE_SIMQ = 0x100,/* SIM ready to take more commands */
+ CAM_SIM_QUEUED = 0x200,/* SIM has this command in it's queue */
+
+ CAM_STATUS_MASK = 0x3F, /* Mask bits for just the status # */
+
+ /* Target Specific Adjunct Status */
+ CAM_SENT_SENSE = 0x40000000 /* sent sense with status */
+} cam_status;
+
+typedef enum {
+ CAM_ESF_NONE = 0x00,
+ CAM_ESF_COMMAND = 0x01,
+ CAM_ESF_CAM_STATUS = 0x02,
+ CAM_ESF_PROTO_STATUS = 0x04,
+ CAM_ESF_ALL = 0xff
+} cam_error_string_flags;
+
+typedef enum {
+ CAM_EPF_NONE = 0x00,
+ CAM_EPF_MINIMAL = 0x01,
+ CAM_EPF_NORMAL = 0x02,
+ CAM_EPF_ALL = 0x03,
+ CAM_EPF_LEVEL_MASK = 0x0f
+ /* All bits above bit 3 are protocol-specific */
+} cam_error_proto_flags;
+
+typedef enum {
+ CAM_ESF_PRINT_NONE = 0x00,
+ CAM_ESF_PRINT_STATUS = 0x10,
+ CAM_ESF_PRINT_SENSE = 0x20
+} cam_error_scsi_flags;
+
+typedef enum {
+ CAM_EAF_PRINT_NONE = 0x00,
+ CAM_EAF_PRINT_STATUS = 0x10,
+ CAM_EAF_PRINT_RESULT = 0x20
+} cam_error_ata_flags;
+
+struct cam_status_entry
+{
+ cam_status status_code;
+ const char *status_text;
+};
+
+extern const struct cam_status_entry cam_status_table[];
+extern const int num_cam_status_entries;
+union ccb;
+
+#ifdef SYSCTL_DECL /* from sysctl.h */
+SYSCTL_DECL(_kern_cam);
+#endif
+
+__BEGIN_DECLS
+typedef int (cam_quirkmatch_t)(caddr_t, caddr_t);
+
+caddr_t cam_quirkmatch(caddr_t target, caddr_t quirk_table, int num_entries,
+ int entry_size, cam_quirkmatch_t *comp_func);
+
+void cam_strvis(u_int8_t *dst, const u_int8_t *src, int srclen, int dstlen);
+
+int cam_strmatch(const u_int8_t *str, const u_int8_t *pattern, int str_len);
+const struct cam_status_entry*
+ cam_fetch_status_entry(cam_status status);
+#ifdef _KERNEL
+char * cam_error_string(union ccb *ccb, char *str, int str_len,
+ cam_error_string_flags flags,
+ cam_error_proto_flags proto_flags);
+void cam_error_print(union ccb *ccb, cam_error_string_flags flags,
+ cam_error_proto_flags proto_flags);
+#else /* _KERNEL */
+struct cam_device;
+
+char * cam_error_string(struct cam_device *device, union ccb *ccb, char *str,
+ int str_len, cam_error_string_flags flags,
+ cam_error_proto_flags proto_flags);
+void cam_error_print(struct cam_device *device, union ccb *ccb,
+ cam_error_string_flags flags,
+ cam_error_proto_flags proto_flags, FILE *ofile);
+#endif /* _KERNEL */
+__END_DECLS
+
+#ifdef _KERNEL
+static __inline void cam_init_pinfo(cam_pinfo *pinfo);
+
+static __inline void cam_init_pinfo(cam_pinfo *pinfo)
+{
+ pinfo->priority = CAM_PRIORITY_NONE;
+ pinfo->index = CAM_UNQUEUED_INDEX;
+}
+#endif
+
+#endif /* _CAM_CAM_H */
diff --git a/rtems/freebsd/cam/cam_ccb.h b/rtems/freebsd/cam/cam_ccb.h
new file mode 100644
index 00000000..c91a3ed0
--- /dev/null
+++ b/rtems/freebsd/cam/cam_ccb.h
@@ -0,0 +1,1193 @@
+/*-
+ * Data structures and definitions for CAM Control Blocks (CCBs).
+ *
+ * Copyright (c) 1997, 1998 Justin T. Gibbs.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification, immediately at the beginning of the file.
+ * 2. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
+ * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _CAM_CAM_CCB_H
+#define _CAM_CAM_CCB_H 1
+
+#include <rtems/freebsd/sys/queue.h>
+#include <rtems/freebsd/sys/cdefs.h>
+#include <rtems/freebsd/sys/time.h>
+#include <rtems/freebsd/sys/limits.h>
+#ifndef _KERNEL
+#include <rtems/freebsd/sys/callout.h>
+#endif
+#include <rtems/freebsd/cam/cam_debug.h>
+#include <rtems/freebsd/cam/scsi/scsi_all.h>
+#include <rtems/freebsd/cam/ata/ata_all.h>
+
+#ifdef __rtems__
+#include <rtems/blkdev.h>
+#endif /* __rtems__ */
+
+/* General allocation length definitions for CCB structures */
+#define IOCDBLEN CAM_MAX_CDBLEN /* Space for CDB bytes/pointer */
+#define VUHBALEN 14 /* Vendor Unique HBA length */
+#define SIM_IDLEN 16 /* ASCII string len for SIM ID */
+#define HBA_IDLEN 16 /* ASCII string len for HBA ID */
+#define DEV_IDLEN 16 /* ASCII string len for device names */
+#define CCB_PERIPH_PRIV_SIZE 2 /* size of peripheral private area */
+#define CCB_SIM_PRIV_SIZE 2 /* size of sim private area */
+
+/* Struct definitions for CAM control blocks */
+
+/* Common CCB header */
+/* CAM CCB flags */
+typedef enum {
+ CAM_CDB_POINTER = 0x00000001,/* The CDB field is a pointer */
+ CAM_QUEUE_ENABLE = 0x00000002,/* SIM queue actions are enabled */
+ CAM_CDB_LINKED = 0x00000004,/* CCB contains a linked CDB */
+ CAM_NEGOTIATE = 0x00000008,/*
+ * Perform transport negotiation
+ * with this command.
+ */
+ CAM_SCATTER_VALID = 0x00000010,/* Scatter/gather list is valid */
+ CAM_DIS_AUTOSENSE = 0x00000020,/* Disable autosense feature */
+ CAM_DIR_RESV = 0x00000000,/* Data direction (00:reserved) */
+ CAM_DIR_IN = 0x00000040,/* Data direction (01:DATA IN) */
+ CAM_DIR_OUT = 0x00000080,/* Data direction (10:DATA OUT) */
+ CAM_DIR_NONE = 0x000000C0,/* Data direction (11:no data) */
+ CAM_DIR_MASK = 0x000000C0,/* Data direction Mask */
+ CAM_SOFT_RST_OP = 0x00000100,/* Use Soft reset alternative */
+ CAM_ENG_SYNC = 0x00000200,/* Flush resid bytes on complete */
+ CAM_DEV_QFRZDIS = 0x00000400,/* Disable DEV Q freezing */
+ CAM_DEV_QFREEZE = 0x00000800,/* Freeze DEV Q on execution */
+ CAM_HIGH_POWER = 0x00001000,/* Command takes a lot of power */
+ CAM_SENSE_PTR = 0x00002000,/* Sense data is a pointer */
+ CAM_SENSE_PHYS = 0x00004000,/* Sense pointer is physical addr*/
+ CAM_TAG_ACTION_VALID = 0x00008000,/* Use the tag action in this ccb*/
+ CAM_PASS_ERR_RECOVER = 0x00010000,/* Pass driver does err. recovery*/
+ CAM_DIS_DISCONNECT = 0x00020000,/* Disable disconnect */
+ CAM_SG_LIST_PHYS = 0x00040000,/* SG list has physical addrs. */
+ CAM_MSG_BUF_PHYS = 0x00080000,/* Message buffer ptr is physical*/
+ CAM_SNS_BUF_PHYS = 0x00100000,/* Autosense data ptr is physical*/
+ CAM_DATA_PHYS = 0x00200000,/* SG/Buffer data ptrs are phys. */
+ CAM_CDB_PHYS = 0x00400000,/* CDB poiner is physical */
+ CAM_ENG_SGLIST = 0x00800000,/* SG list is for the HBA engine */
+
+/* Phase cognizant mode flags */
+ CAM_DIS_AUTOSRP = 0x01000000,/* Disable autosave/restore ptrs */
+ CAM_DIS_AUTODISC = 0x02000000,/* Disable auto disconnect */
+ CAM_TGT_CCB_AVAIL = 0x04000000,/* Target CCB available */
+ CAM_TGT_PHASE_MODE = 0x08000000,/* The SIM runs in phase mode */
+ CAM_MSGB_VALID = 0x10000000,/* Message buffer valid */
+ CAM_STATUS_VALID = 0x20000000,/* Status buffer valid */
+ CAM_DATAB_VALID = 0x40000000,/* Data buffer valid */
+
+/* Host target Mode flags */
+ CAM_SEND_SENSE = 0x08000000,/* Send sense data with status */
+ CAM_TERM_IO = 0x10000000,/* Terminate I/O Message sup. */
+ CAM_DISCONNECT = 0x20000000,/* Disconnects are mandatory */
+ CAM_SEND_STATUS = 0x40000000 /* Send status after data phase */
+} ccb_flags;
+
+/* XPT Opcodes for xpt_action */
+typedef enum {
+/* Function code flags are bits greater than 0xff */
+ XPT_FC_QUEUED = 0x100,
+ /* Non-immediate function code */
+ XPT_FC_USER_CCB = 0x200,
+ XPT_FC_XPT_ONLY = 0x400,
+ /* Only for the transport layer device */
+ XPT_FC_DEV_QUEUED = 0x800 | XPT_FC_QUEUED,
+ /* Passes through the device queues */
+/* Common function commands: 0x00->0x0F */
+ XPT_NOOP = 0x00,
+ /* Execute Nothing */
+ XPT_SCSI_IO = 0x01 | XPT_FC_DEV_QUEUED,
+ /* Execute the requested I/O operation */
+ XPT_GDEV_TYPE = 0x02,
+ /* Get type information for specified device */
+ XPT_GDEVLIST = 0x03,
+ /* Get a list of peripheral devices */
+ XPT_PATH_INQ = 0x04,
+ /* Path routing inquiry */
+ XPT_REL_SIMQ = 0x05,
+ /* Release a frozen device queue */
+ XPT_SASYNC_CB = 0x06,
+ /* Set Asynchronous Callback Parameters */
+ XPT_SDEV_TYPE = 0x07,
+ /* Set device type information */
+ XPT_SCAN_BUS = 0x08 | XPT_FC_QUEUED | XPT_FC_USER_CCB
+ | XPT_FC_XPT_ONLY,
+ /* (Re)Scan the SCSI Bus */
+ XPT_DEV_MATCH = 0x09 | XPT_FC_XPT_ONLY,
+ /* Get EDT entries matching the given pattern */
+ XPT_DEBUG = 0x0a,
+ /* Turn on debugging for a bus, target or lun */
+ XPT_PATH_STATS = 0x0b,
+ /* Path statistics (error counts, etc.) */
+ XPT_GDEV_STATS = 0x0c,
+ /* Device statistics (error counts, etc.) */
+ XPT_FREEZE_QUEUE = 0x0d,
+ /* Freeze device queue */
+/* SCSI Control Functions: 0x10->0x1F */
+ XPT_ABORT = 0x10,
+ /* Abort the specified CCB */
+ XPT_RESET_BUS = 0x11 | XPT_FC_XPT_ONLY,
+ /* Reset the specified SCSI bus */
+ XPT_RESET_DEV = 0x12 | XPT_FC_DEV_QUEUED,
+ /* Bus Device Reset the specified SCSI device */
+ XPT_TERM_IO = 0x13,
+ /* Terminate the I/O process */
+ XPT_SCAN_LUN = 0x14 | XPT_FC_QUEUED | XPT_FC_USER_CCB
+ | XPT_FC_XPT_ONLY,
+ /* Scan Logical Unit */
+ XPT_GET_TRAN_SETTINGS = 0x15,
+ /*
+ * Get default/user transfer settings
+ * for the target
+ */
+ XPT_SET_TRAN_SETTINGS = 0x16,
+ /*
+ * Set transfer rate/width
+ * negotiation settings
+ */
+ XPT_CALC_GEOMETRY = 0x17,
+ /*
+ * Calculate the geometry parameters for
+ * a device give the sector size and
+ * volume size.
+ */
+ XPT_ATA_IO = 0x18 | XPT_FC_DEV_QUEUED,
+ /* Execute the requested ATA I/O operation */
+
+ XPT_GET_SIM_KNOB = 0x18,
+ /*
+ * Get SIM specific knob values.
+ */
+
+ XPT_SET_SIM_KNOB = 0x19,
+ /*
+ * Set SIM specific knob values.
+ */
+/* HBA engine commands 0x20->0x2F */
+ XPT_ENG_INQ = 0x20 | XPT_FC_XPT_ONLY,
+ /* HBA engine feature inquiry */
+ XPT_ENG_EXEC = 0x21 | XPT_FC_DEV_QUEUED,
+ /* HBA execute engine request */
+
+/* Target mode commands: 0x30->0x3F */
+ XPT_EN_LUN = 0x30,
+ /* Enable LUN as a target */
+ XPT_TARGET_IO = 0x31 | XPT_FC_DEV_QUEUED,
+ /* Execute target I/O request */
+ XPT_ACCEPT_TARGET_IO = 0x32 | XPT_FC_QUEUED | XPT_FC_USER_CCB,
+ /* Accept Host Target Mode CDB */
+ XPT_CONT_TARGET_IO = 0x33 | XPT_FC_DEV_QUEUED,
+ /* Continue Host Target I/O Connection */
+ XPT_IMMED_NOTIFY = 0x34 | XPT_FC_QUEUED | XPT_FC_USER_CCB,
+ /* Notify Host Target driver of event (obsolete) */
+ XPT_NOTIFY_ACK = 0x35,
+ /* Acknowledgement of event (obsolete) */
+ XPT_IMMEDIATE_NOTIFY = 0x36 | XPT_FC_QUEUED | XPT_FC_USER_CCB,
+ /* Notify Host Target driver of event */
+ XPT_NOTIFY_ACKNOWLEDGE = 0x37 | XPT_FC_QUEUED | XPT_FC_USER_CCB,
+ /* Acknowledgement of event */
+
+/* Vendor Unique codes: 0x80->0x8F */
+ XPT_VUNIQUE = 0x80
+} xpt_opcode;
+
+#define XPT_FC_GROUP_MASK 0xF0
+#define XPT_FC_GROUP(op) ((op) & XPT_FC_GROUP_MASK)
+#define XPT_FC_GROUP_COMMON 0x00
+#define XPT_FC_GROUP_SCSI_CONTROL 0x10
+#define XPT_FC_GROUP_HBA_ENGINE 0x20
+#define XPT_FC_GROUP_TMODE 0x30
+#define XPT_FC_GROUP_VENDOR_UNIQUE 0x80
+
+#define XPT_FC_IS_DEV_QUEUED(ccb) \
+ (((ccb)->ccb_h.func_code & XPT_FC_DEV_QUEUED) == XPT_FC_DEV_QUEUED)
+#define XPT_FC_IS_QUEUED(ccb) \
+ (((ccb)->ccb_h.func_code & XPT_FC_QUEUED) != 0)
+
+typedef enum {
+ PROTO_UNKNOWN,
+ PROTO_UNSPECIFIED,
+ PROTO_SCSI, /* Small Computer System Interface */
+ PROTO_ATA, /* AT Attachment */
+ PROTO_ATAPI, /* AT Attachment Packetized Interface */
+ PROTO_SATAPM, /* SATA Port Multiplier */
+} cam_proto;
+
+typedef enum {
+ XPORT_UNKNOWN,
+ XPORT_UNSPECIFIED,
+ XPORT_SPI, /* SCSI Parallel Interface */
+ XPORT_FC, /* Fiber Channel */
+ XPORT_SSA, /* Serial Storage Architecture */
+ XPORT_USB, /* Universal Serial Bus */
+ XPORT_PPB, /* Parallel Port Bus */
+ XPORT_ATA, /* AT Attachment */
+ XPORT_SAS, /* Serial Attached SCSI */
+ XPORT_SATA, /* Serial AT Attachment */
+ XPORT_ISCSI, /* iSCSI */
+} cam_xport;
+
+#define PROTO_VERSION_UNKNOWN (UINT_MAX - 1)
+#define PROTO_VERSION_UNSPECIFIED UINT_MAX
+#define XPORT_VERSION_UNKNOWN (UINT_MAX - 1)
+#define XPORT_VERSION_UNSPECIFIED UINT_MAX
+
+typedef union {
+ LIST_ENTRY(ccb_hdr) le;
+ SLIST_ENTRY(ccb_hdr) sle;
+ TAILQ_ENTRY(ccb_hdr) tqe;
+ STAILQ_ENTRY(ccb_hdr) stqe;
+} camq_entry;
+
+typedef union {
+ void *ptr;
+ u_long field;
+ u_int8_t bytes[sizeof(uintptr_t)];
+} ccb_priv_entry;
+
+typedef union {
+ ccb_priv_entry entries[CCB_PERIPH_PRIV_SIZE];
+ u_int8_t bytes[CCB_PERIPH_PRIV_SIZE * sizeof(ccb_priv_entry)];
+} ccb_ppriv_area;
+
+typedef union {
+ ccb_priv_entry entries[CCB_SIM_PRIV_SIZE];
+ u_int8_t bytes[CCB_SIM_PRIV_SIZE * sizeof(ccb_priv_entry)];
+} ccb_spriv_area;
+
+struct ccb_hdr {
+#ifndef __rtems__
+ cam_pinfo pinfo; /* Info for priority scheduling */
+ camq_entry xpt_links; /* For chaining in the XPT layer */
+ camq_entry sim_links; /* For chaining in the SIM layer */
+ camq_entry periph_links; /* For chaining in the type driver */
+#else /* __rtems__ */
+ struct cam_sim *sim;
+#endif /* __rtems__ */
+ u_int32_t retry_count;
+ void (*cbfcnp)(struct cam_periph *, union ccb *);
+ /* Callback on completion function */
+ xpt_opcode func_code; /* XPT function code */
+ u_int32_t status; /* Status returned by CAM subsystem */
+#ifndef __rtems__
+ struct cam_path *path; /* Compiled path for this ccb */
+ path_id_t path_id; /* Path ID for the request */
+#endif /* __rtems__ */
+ target_id_t target_id; /* Target device ID */
+ lun_id_t target_lun; /* Target LUN number */
+ u_int32_t flags; /* ccb_flags */
+#ifndef __rtems__
+ ccb_ppriv_area periph_priv;
+ ccb_spriv_area sim_priv;
+#endif /* __rtems__ */
+ u_int32_t timeout; /* Timeout value */
+
+#ifndef __rtems__
+ /*
+ * Deprecated, only for use by non-MPSAFE SIMs. All others must
+ * allocate and initialize their own callout storage.
+ */
+ struct callout_handle timeout_ch;
+#endif /* __rtems__ */
+};
+
+/* Get Device Information CCB */
+struct ccb_getdev {
+ struct ccb_hdr ccb_h;
+ cam_proto protocol;
+ struct scsi_inquiry_data inq_data;
+ struct ata_params ident_data;
+ u_int8_t serial_num[252];
+ u_int8_t inq_flags;
+ u_int8_t serial_num_len;
+};
+
+/* Device Statistics CCB */
+struct ccb_getdevstats {
+ struct ccb_hdr ccb_h;
+ int dev_openings; /* Space left for more work on device*/
+ int dev_active; /* Transactions running on the device */
+ int devq_openings; /* Space left for more queued work */
+ int devq_queued; /* Transactions queued to be sent */
+ int held; /*
+ * CCBs held by peripheral drivers
+ * for this device
+ */
+ int maxtags; /*
+ * Boundary conditions for number of
+ * tagged operations
+ */
+ int mintags;
+ struct timeval last_reset; /* Time of last bus reset/loop init */
+};
+
+typedef enum {
+ CAM_GDEVLIST_LAST_DEVICE,
+ CAM_GDEVLIST_LIST_CHANGED,
+ CAM_GDEVLIST_MORE_DEVS,
+ CAM_GDEVLIST_ERROR
+} ccb_getdevlist_status_e;
+
+struct ccb_getdevlist {
+ struct ccb_hdr ccb_h;
+ char periph_name[DEV_IDLEN];
+ u_int32_t unit_number;
+ unsigned int generation;
+ u_int32_t index;
+ ccb_getdevlist_status_e status;
+};
+
+typedef enum {
+ PERIPH_MATCH_NONE = 0x000,
+ PERIPH_MATCH_PATH = 0x001,
+ PERIPH_MATCH_TARGET = 0x002,
+ PERIPH_MATCH_LUN = 0x004,
+ PERIPH_MATCH_NAME = 0x008,
+ PERIPH_MATCH_UNIT = 0x010,
+ PERIPH_MATCH_ANY = 0x01f
+} periph_pattern_flags;
+
+struct periph_match_pattern {
+ char periph_name[DEV_IDLEN];
+ u_int32_t unit_number;
+ path_id_t path_id;
+ target_id_t target_id;
+ lun_id_t target_lun;
+ periph_pattern_flags flags;
+};
+
+typedef enum {
+ DEV_MATCH_NONE = 0x000,
+ DEV_MATCH_PATH = 0x001,
+ DEV_MATCH_TARGET = 0x002,
+ DEV_MATCH_LUN = 0x004,
+ DEV_MATCH_INQUIRY = 0x008,
+ DEV_MATCH_ANY = 0x00f
+} dev_pattern_flags;
+
+struct device_match_pattern {
+ path_id_t path_id;
+ target_id_t target_id;
+ lun_id_t target_lun;
+ struct scsi_static_inquiry_pattern inq_pat;
+ dev_pattern_flags flags;
+};
+
+typedef enum {
+ BUS_MATCH_NONE = 0x000,
+ BUS_MATCH_PATH = 0x001,
+ BUS_MATCH_NAME = 0x002,
+ BUS_MATCH_UNIT = 0x004,
+ BUS_MATCH_BUS_ID = 0x008,
+ BUS_MATCH_ANY = 0x00f
+} bus_pattern_flags;
+
+struct bus_match_pattern {
+ path_id_t path_id;
+ char dev_name[DEV_IDLEN];
+ u_int32_t unit_number;
+ u_int32_t bus_id;
+ bus_pattern_flags flags;
+};
+
+union match_pattern {
+ struct periph_match_pattern periph_pattern;
+ struct device_match_pattern device_pattern;
+ struct bus_match_pattern bus_pattern;
+};
+
+typedef enum {
+ DEV_MATCH_PERIPH,
+ DEV_MATCH_DEVICE,
+ DEV_MATCH_BUS
+} dev_match_type;
+
+struct dev_match_pattern {
+ dev_match_type type;
+ union match_pattern pattern;
+};
+
+struct periph_match_result {
+ char periph_name[DEV_IDLEN];
+ u_int32_t unit_number;
+ path_id_t path_id;
+ target_id_t target_id;
+ lun_id_t target_lun;
+};
+
+typedef enum {
+ DEV_RESULT_NOFLAG = 0x00,
+ DEV_RESULT_UNCONFIGURED = 0x01
+} dev_result_flags;
+
+struct device_match_result {
+ path_id_t path_id;
+ target_id_t target_id;
+ lun_id_t target_lun;
+ cam_proto protocol;
+ struct scsi_inquiry_data inq_data;
+ struct ata_params ident_data;
+ dev_result_flags flags;
+};
+
+struct bus_match_result {
+ path_id_t path_id;
+ char dev_name[DEV_IDLEN];
+ u_int32_t unit_number;
+ u_int32_t bus_id;
+};
+
+union match_result {
+ struct periph_match_result periph_result;
+ struct device_match_result device_result;
+ struct bus_match_result bus_result;
+};
+
+struct dev_match_result {
+ dev_match_type type;
+ union match_result result;
+};
+
+typedef enum {
+ CAM_DEV_MATCH_LAST,
+ CAM_DEV_MATCH_MORE,
+ CAM_DEV_MATCH_LIST_CHANGED,
+ CAM_DEV_MATCH_SIZE_ERROR,
+ CAM_DEV_MATCH_ERROR
+} ccb_dev_match_status;
+
+typedef enum {
+ CAM_DEV_POS_NONE = 0x000,
+ CAM_DEV_POS_BUS = 0x001,
+ CAM_DEV_POS_TARGET = 0x002,
+ CAM_DEV_POS_DEVICE = 0x004,
+ CAM_DEV_POS_PERIPH = 0x008,
+ CAM_DEV_POS_PDPTR = 0x010,
+ CAM_DEV_POS_TYPEMASK = 0xf00,
+ CAM_DEV_POS_EDT = 0x100,
+ CAM_DEV_POS_PDRV = 0x200
+} dev_pos_type;
+
+struct ccb_dm_cookie {
+ void *bus;
+ void *target;
+ void *device;
+ void *periph;
+ void *pdrv;
+};
+
+struct ccb_dev_position {
+ u_int generations[4];
+#define CAM_BUS_GENERATION 0x00
+#define CAM_TARGET_GENERATION 0x01
+#define CAM_DEV_GENERATION 0x02
+#define CAM_PERIPH_GENERATION 0x03
+ dev_pos_type position_type;
+ struct ccb_dm_cookie cookie;
+};
+
+struct ccb_dev_match {
+ struct ccb_hdr ccb_h;
+ ccb_dev_match_status status;
+ u_int32_t num_patterns;
+ u_int32_t pattern_buf_len;
+ struct dev_match_pattern *patterns;
+ u_int32_t num_matches;
+ u_int32_t match_buf_len;
+ struct dev_match_result *matches;
+ struct ccb_dev_position pos;
+};
+
+/*
+ * Definitions for the path inquiry CCB fields.
+ */
+#define CAM_VERSION 0x15 /* Hex value for current version */
+
+typedef enum {
+ PI_MDP_ABLE = 0x80, /* Supports MDP message */
+ PI_WIDE_32 = 0x40, /* Supports 32 bit wide SCSI */
+ PI_WIDE_16 = 0x20, /* Supports 16 bit wide SCSI */
+ PI_SDTR_ABLE = 0x10, /* Supports SDTR message */
+ PI_LINKED_CDB = 0x08, /* Supports linked CDBs */
+ PI_SATAPM = 0x04, /* Supports SATA PM */
+ PI_TAG_ABLE = 0x02, /* Supports tag queue messages */
+ PI_SOFT_RST = 0x01 /* Supports soft reset alternative */
+} pi_inqflag;
+
+typedef enum {
+ PIT_PROCESSOR = 0x80, /* Target mode processor mode */
+ PIT_PHASE = 0x40, /* Target mode phase cog. mode */
+ PIT_DISCONNECT = 0x20, /* Disconnects supported in target mode */
+ PIT_TERM_IO = 0x10, /* Terminate I/O message supported in TM */
+ PIT_GRP_6 = 0x08, /* Group 6 commands supported */
+ PIT_GRP_7 = 0x04 /* Group 7 commands supported */
+} pi_tmflag;
+
+typedef enum {
+ PIM_SCANHILO = 0x80, /* Bus scans from high ID to low ID */
+ PIM_NOREMOVE = 0x40, /* Removeable devices not included in scan */
+ PIM_NOINITIATOR = 0x20, /* Initiator role not supported. */
+ PIM_NOBUSRESET = 0x10, /* User has disabled initial BUS RESET */
+ PIM_NO_6_BYTE = 0x08, /* Do not send 6-byte commands */
+ PIM_SEQSCAN = 0x04 /* Do bus scans sequentially, not in parallel */
+} pi_miscflag;
+
+/* Path Inquiry CCB */
+struct ccb_pathinq_settings_spi {
+ u_int8_t ppr_options;
+};
+
+struct ccb_pathinq_settings_fc {
+ u_int64_t wwnn; /* world wide node name */
+ u_int64_t wwpn; /* world wide port name */
+ u_int32_t port; /* 24 bit port id, if known */
+ u_int32_t bitrate; /* Mbps */
+};
+
+struct ccb_pathinq_settings_sas {
+ u_int32_t bitrate; /* Mbps */
+};
+#define PATHINQ_SETTINGS_SIZE 128
+
+struct ccb_pathinq {
+ struct ccb_hdr ccb_h;
+ u_int8_t version_num; /* Version number for the SIM/HBA */
+ u_int8_t hba_inquiry; /* Mimic of INQ byte 7 for the HBA */
+ u_int8_t target_sprt; /* Flags for target mode support */
+ u_int8_t hba_misc; /* Misc HBA features */
+ u_int16_t hba_eng_cnt; /* HBA engine count */
+ /* Vendor Unique capabilities */
+ u_int8_t vuhba_flags[VUHBALEN];
+ u_int32_t max_target; /* Maximum supported Target */
+ u_int32_t max_lun; /* Maximum supported Lun */
+ u_int32_t async_flags; /* Installed Async handlers */
+ path_id_t hpath_id; /* Highest Path ID in the subsystem */
+ target_id_t initiator_id; /* ID of the HBA on the SCSI bus */
+ char sim_vid[SIM_IDLEN]; /* Vendor ID of the SIM */
+ char hba_vid[HBA_IDLEN]; /* Vendor ID of the HBA */
+ char dev_name[DEV_IDLEN];/* Device name for SIM */
+ u_int32_t unit_number; /* Unit number for SIM */
+ u_int32_t bus_id; /* Bus ID for SIM */
+ u_int32_t base_transfer_speed;/* Base bus speed in KB/sec */
+ cam_proto protocol;
+ u_int protocol_version;
+ cam_xport transport;
+ u_int transport_version;
+ union {
+ struct ccb_pathinq_settings_spi spi;
+ struct ccb_pathinq_settings_fc fc;
+ struct ccb_pathinq_settings_sas sas;
+ char ccb_pathinq_settings_opaque[PATHINQ_SETTINGS_SIZE];
+ } xport_specific;
+ u_int maxio; /* Max supported I/O size, in bytes. */
+};
+
+/* Path Statistics CCB */
+struct ccb_pathstats {
+ struct ccb_hdr ccb_h;
+ struct timeval last_reset; /* Time of last bus reset/loop init */
+};
+
+typedef union {
+ u_int8_t *sense_ptr; /*
+ * Pointer to storage
+ * for sense information
+ */
+ /* Storage Area for sense information */
+ struct scsi_sense_data sense_buf;
+} sense_t;
+
+typedef union {
+ u_int8_t *cdb_ptr; /* Pointer to the CDB bytes to send */
+ /* Area for the CDB send */
+ u_int8_t cdb_bytes[IOCDBLEN];
+} cdb_t;
+
+/*
+ * SCSI I/O Request CCB used for the XPT_SCSI_IO and XPT_CONT_TARGET_IO
+ * function codes.
+ */
+struct ccb_scsiio {
+ struct ccb_hdr ccb_h;
+ union ccb *next_ccb; /* Ptr for next CCB for action */
+ u_int8_t *req_map; /* Ptr to mapping info */
+ u_int8_t *data_ptr; /* Ptr to the data buf/SG list */
+ u_int32_t dxfer_len; /* Data transfer length */
+ /* Autosense storage */
+ struct scsi_sense_data sense_data;
+ u_int8_t sense_len; /* Number of bytes to autosense */
+ u_int8_t cdb_len; /* Number of bytes for the CDB */
+ u_int16_t sglist_cnt; /* Number of SG list entries */
+ u_int8_t scsi_status; /* Returned SCSI status */
+ u_int8_t sense_resid; /* Autosense resid length: 2's comp */
+ u_int32_t resid; /* Transfer residual length: 2's comp */
+ cdb_t cdb_io; /* Union for CDB bytes/pointer */
+ u_int8_t *msg_ptr; /* Pointer to the message buffer */
+ u_int16_t msg_len; /* Number of bytes for the Message */
+ u_int8_t tag_action; /* What to do for tag queueing */
+ /*
+ * The tag action should be either the define below (to send a
+ * non-tagged transaction) or one of the defined scsi tag messages
+ * from scsi_message.h.
+ */
+#define CAM_TAG_ACTION_NONE 0x00
+ u_int tag_id; /* tag id from initator (target mode) */
+ u_int init_id; /* initiator id of who selected */
+#ifdef __rtems__
+ int readop;
+ rtems_blkdev_sg_buffer *sg_current;
+ rtems_blkdev_sg_buffer *sg_end;
+ rtems_blkdev_request *req;
+#endif /* __rtems__ */
+};
+
+/*
+ * ATA I/O Request CCB used for the XPT_ATA_IO function code.
+ */
+struct ccb_ataio {
+ struct ccb_hdr ccb_h;
+ union ccb *next_ccb; /* Ptr for next CCB for action */
+ struct ata_cmd cmd; /* ATA command register set */
+ struct ata_res res; /* ATA result register set */
+ u_int8_t *data_ptr; /* Ptr to the data buf/SG list */
+ u_int32_t dxfer_len; /* Data transfer length */
+ u_int32_t resid; /* Transfer residual length: 2's comp */
+ u_int8_t tag_action; /* What to do for tag queueing */
+ /*
+ * The tag action should be either the define below (to send a
+ * non-tagged transaction) or one of the defined scsi tag messages
+ * from scsi_message.h.
+ */
+#define CAM_TAG_ACTION_NONE 0x00
+ u_int tag_id; /* tag id from initator (target mode) */
+ u_int init_id; /* initiator id of who selected */
+};
+
+struct ccb_accept_tio {
+ struct ccb_hdr ccb_h;
+ cdb_t cdb_io; /* Union for CDB bytes/pointer */
+ u_int8_t cdb_len; /* Number of bytes for the CDB */
+ u_int8_t tag_action; /* What to do for tag queueing */
+ u_int8_t sense_len; /* Number of bytes of Sense Data */
+ u_int tag_id; /* tag id from initator (target mode) */
+ u_int init_id; /* initiator id of who selected */
+ struct scsi_sense_data sense_data;
+};
+
+/* Release SIM Queue */
+struct ccb_relsim {
+ struct ccb_hdr ccb_h;
+ u_int32_t release_flags;
+#define RELSIM_ADJUST_OPENINGS 0x01
+#define RELSIM_RELEASE_AFTER_TIMEOUT 0x02
+#define RELSIM_RELEASE_AFTER_CMDCMPLT 0x04
+#define RELSIM_RELEASE_AFTER_QEMPTY 0x08
+#define RELSIM_RELEASE_RUNLEVEL 0x10
+ u_int32_t openings;
+ u_int32_t release_timeout; /* Abstract argument. */
+ u_int32_t qfrozen_cnt;
+};
+
+/*
+ * Definitions for the asynchronous callback CCB fields.
+ */
+typedef enum {
+ AC_CONTRACT = 0x1000,/* A contractual callback */
+ AC_GETDEV_CHANGED = 0x800,/* Getdev info might have changed */
+ AC_INQ_CHANGED = 0x400,/* Inquiry info might have changed */
+ AC_TRANSFER_NEG = 0x200,/* New transfer settings in effect */
+ AC_LOST_DEVICE = 0x100,/* A device went away */
+ AC_FOUND_DEVICE = 0x080,/* A new device was found */
+ AC_PATH_DEREGISTERED = 0x040,/* A path has de-registered */
+ AC_PATH_REGISTERED = 0x020,/* A new path has been registered */
+ AC_SENT_BDR = 0x010,/* A BDR message was sent to target */
+ AC_SCSI_AEN = 0x008,/* A SCSI AEN has been received */
+ AC_UNSOL_RESEL = 0x002,/* Unsolicited reselection occurred */
+ AC_BUS_RESET = 0x001 /* A SCSI bus reset occurred */
+} ac_code;
+
+#ifdef __rtems__
+struct cam_path;
+#endif /* __rtems__ */
+
+typedef void ac_callback_t (void *softc, u_int32_t code,
+ struct cam_path *path, void *args);
+
+/*
+ * Generic Asynchronous callbacks.
+ *
+ * Generic arguments passed bac which are then interpreted between a per-system
+ * contract number.
+ */
+#define AC_CONTRACT_DATA_MAX (128 - sizeof (u_int64_t))
+struct ac_contract {
+ u_int64_t contract_number;
+ u_int8_t contract_data[AC_CONTRACT_DATA_MAX];
+};
+
+#define AC_CONTRACT_DEV_CHG 1
+struct ac_device_changed {
+ u_int64_t wwpn;
+ u_int32_t port;
+ target_id_t target;
+ u_int8_t arrived;
+};
+
+/* Set Asynchronous Callback CCB */
+struct ccb_setasync {
+ struct ccb_hdr ccb_h;
+ u_int32_t event_enable; /* Async Event enables */
+ ac_callback_t *callback;
+ void *callback_arg;
+};
+
+/* Set Device Type CCB */
+struct ccb_setdev {
+ struct ccb_hdr ccb_h;
+ u_int8_t dev_type; /* Value for dev type field in EDT */
+};
+
+/* SCSI Control Functions */
+
+/* Abort XPT request CCB */
+struct ccb_abort {
+ struct ccb_hdr ccb_h;
+ union ccb *abort_ccb; /* Pointer to CCB to abort */
+};
+
+/* Reset SCSI Bus CCB */
+struct ccb_resetbus {
+ struct ccb_hdr ccb_h;
+};
+
+/* Reset SCSI Device CCB */
+struct ccb_resetdev {
+ struct ccb_hdr ccb_h;
+};
+
+/* Terminate I/O Process Request CCB */
+struct ccb_termio {
+ struct ccb_hdr ccb_h;
+ union ccb *termio_ccb; /* Pointer to CCB to terminate */
+};
+
+typedef enum {
+ CTS_TYPE_CURRENT_SETTINGS,
+ CTS_TYPE_USER_SETTINGS
+} cts_type;
+
+struct ccb_trans_settings_scsi
+{
+ u_int valid; /* Which fields to honor */
+#define CTS_SCSI_VALID_TQ 0x01
+ u_int flags;
+#define CTS_SCSI_FLAGS_TAG_ENB 0x01
+};
+
+struct ccb_trans_settings_spi
+{
+ u_int valid; /* Which fields to honor */
+#define CTS_SPI_VALID_SYNC_RATE 0x01
+#define CTS_SPI_VALID_SYNC_OFFSET 0x02
+#define CTS_SPI_VALID_BUS_WIDTH 0x04
+#define CTS_SPI_VALID_DISC 0x08
+#define CTS_SPI_VALID_PPR_OPTIONS 0x10
+ u_int flags;
+#define CTS_SPI_FLAGS_DISC_ENB 0x01
+ u_int sync_period;
+ u_int sync_offset;
+ u_int bus_width;
+ u_int ppr_options;
+};
+
+struct ccb_trans_settings_fc {
+ u_int valid; /* Which fields to honor */
+#define CTS_FC_VALID_WWNN 0x8000
+#define CTS_FC_VALID_WWPN 0x4000
+#define CTS_FC_VALID_PORT 0x2000
+#define CTS_FC_VALID_SPEED 0x1000
+ u_int64_t wwnn; /* world wide node name */
+ u_int64_t wwpn; /* world wide port name */
+ u_int32_t port; /* 24 bit port id, if known */
+ u_int32_t bitrate; /* Mbps */
+};
+
+struct ccb_trans_settings_sas {
+ u_int valid; /* Which fields to honor */
+#define CTS_SAS_VALID_SPEED 0x1000
+ u_int32_t bitrate; /* Mbps */
+};
+
+struct ccb_trans_settings_ata {
+ u_int valid; /* Which fields to honor */
+#define CTS_ATA_VALID_MODE 0x01
+#define CTS_ATA_VALID_BYTECOUNT 0x02
+#define CTS_ATA_VALID_ATAPI 0x20
+ int mode; /* Mode */
+ u_int bytecount; /* Length of PIO transaction */
+ u_int atapi; /* Length of ATAPI CDB */
+};
+
+struct ccb_trans_settings_sata {
+ u_int valid; /* Which fields to honor */
+#define CTS_SATA_VALID_MODE 0x01
+#define CTS_SATA_VALID_BYTECOUNT 0x02
+#define CTS_SATA_VALID_REVISION 0x04
+#define CTS_SATA_VALID_PM 0x08
+#define CTS_SATA_VALID_TAGS 0x10
+#define CTS_SATA_VALID_ATAPI 0x20
+#define CTS_SATA_VALID_CAPS 0x40
+ int mode; /* Legacy PATA mode */
+ u_int bytecount; /* Length of PIO transaction */
+ int revision; /* SATA revision */
+ u_int pm_present; /* PM is present (XPT->SIM) */
+ u_int tags; /* Number of allowed tags */
+ u_int atapi; /* Length of ATAPI CDB */
+ u_int caps; /* Device and host SATA caps. */
+#define CTS_SATA_CAPS_H 0x0000ffff
+#define CTS_SATA_CAPS_HH_PMREQ 0x00000001
+#define CTS_SATA_CAPS_HH_APST 0x00000002
+#define CTS_SATA_CAPS_HH_DMAAA 0x00000010 /* Auto-activation */
+#define CTS_SATA_CAPS_D 0xffff0000
+#define CTS_SATA_CAPS_D_PMREQ 0x00010000
+#define CTS_SATA_CAPS_D_APST 0x00020000
+};
+
+/* Get/Set transfer rate/width/disconnection/tag queueing settings */
+struct ccb_trans_settings {
+ struct ccb_hdr ccb_h;
+ cts_type type; /* Current or User settings */
+ cam_proto protocol;
+ u_int protocol_version;
+ cam_xport transport;
+ u_int transport_version;
+ union {
+ u_int valid; /* Which fields to honor */
+ struct ccb_trans_settings_scsi scsi;
+ } proto_specific;
+ union {
+ u_int valid; /* Which fields to honor */
+ struct ccb_trans_settings_spi spi;
+ struct ccb_trans_settings_fc fc;
+ struct ccb_trans_settings_sas sas;
+ struct ccb_trans_settings_ata ata;
+ struct ccb_trans_settings_sata sata;
+ } xport_specific;
+};
+
+
+/*
+ * Calculate the geometry parameters for a device
+ * give the block size and volume size in blocks.
+ */
+struct ccb_calc_geometry {
+ struct ccb_hdr ccb_h;
+ u_int32_t block_size;
+ u_int64_t volume_size;
+ u_int32_t cylinders;
+ u_int8_t heads;
+ u_int8_t secs_per_track;
+};
+
+/*
+ * Set or get SIM (and transport) specific knobs
+ */
+
+#define KNOB_VALID_ADDRESS 0x1
+#define KNOB_VALID_ROLE 0x2
+
+
+#define KNOB_ROLE_NONE 0x0
+#define KNOB_ROLE_INITIATOR 0x1
+#define KNOB_ROLE_TARGET 0x2
+#define KNOB_ROLE_BOTH 0x3
+
+struct ccb_sim_knob_settings_spi {
+ u_int valid;
+ u_int initiator_id;
+ u_int role;
+};
+
+struct ccb_sim_knob_settings_fc {
+ u_int valid;
+ u_int64_t wwnn; /* world wide node name */
+ u_int64_t wwpn; /* world wide port name */
+ u_int role;
+};
+
+struct ccb_sim_knob_settings_sas {
+ u_int valid;
+ u_int64_t wwnn; /* world wide node name */
+ u_int role;
+};
+#define KNOB_SETTINGS_SIZE 128
+
+struct ccb_sim_knob {
+ struct ccb_hdr ccb_h;
+ union {
+ u_int valid; /* Which fields to honor */
+ struct ccb_sim_knob_settings_spi spi;
+ struct ccb_sim_knob_settings_fc fc;
+ struct ccb_sim_knob_settings_sas sas;
+ char pad[KNOB_SETTINGS_SIZE];
+ } xport_specific;
+};
+
+/*
+ * Rescan the given bus, or bus/target/lun
+ */
+struct ccb_rescan {
+ struct ccb_hdr ccb_h;
+ cam_flags flags;
+};
+
+/*
+ * Turn on debugging for the given bus, bus/target, or bus/target/lun.
+ */
+struct ccb_debug {
+ struct ccb_hdr ccb_h;
+ cam_debug_flags flags;
+};
+
+/* Target mode structures. */
+
+struct ccb_en_lun {
+ struct ccb_hdr ccb_h;
+ u_int16_t grp6_len; /* Group 6 VU CDB length */
+ u_int16_t grp7_len; /* Group 7 VU CDB length */
+ u_int8_t enable;
+};
+
+/* old, barely used immediate notify, binary compatibility */
+struct ccb_immed_notify {
+ struct ccb_hdr ccb_h;
+ struct scsi_sense_data sense_data;
+ u_int8_t sense_len; /* Number of bytes in sense buffer */
+ u_int8_t initiator_id; /* Id of initiator that selected */
+ u_int8_t message_args[7]; /* Message Arguments */
+};
+
+struct ccb_notify_ack {
+ struct ccb_hdr ccb_h;
+ u_int16_t seq_id; /* Sequence identifier */
+ u_int8_t event; /* Event flags */
+};
+
+struct ccb_immediate_notify {
+ struct ccb_hdr ccb_h;
+ u_int tag_id; /* Tag for immediate notify */
+ u_int seq_id; /* Tag for target of notify */
+ u_int initiator_id; /* Initiator Identifier */
+ u_int arg; /* Function specific */
+};
+
+struct ccb_notify_acknowledge {
+ struct ccb_hdr ccb_h;
+ u_int tag_id; /* Tag for immediate notify */
+ u_int seq_id; /* Tar for target of notify */
+ u_int initiator_id; /* Initiator Identifier */
+ u_int arg; /* Function specific */
+};
+
+/* HBA engine structures. */
+
+typedef enum {
+ EIT_BUFFER, /* Engine type: buffer memory */
+ EIT_LOSSLESS, /* Engine type: lossless compression */
+ EIT_LOSSY, /* Engine type: lossy compression */
+ EIT_ENCRYPT /* Engine type: encryption */
+} ei_type;
+
+typedef enum {
+ EAD_VUNIQUE, /* Engine algorithm ID: vendor unique */
+ EAD_LZ1V1, /* Engine algorithm ID: LZ1 var.1 */
+ EAD_LZ2V1, /* Engine algorithm ID: LZ2 var.1 */
+ EAD_LZ2V2 /* Engine algorithm ID: LZ2 var.2 */
+} ei_algo;
+
+struct ccb_eng_inq {
+ struct ccb_hdr ccb_h;
+ u_int16_t eng_num; /* The engine number for this inquiry */
+ ei_type eng_type; /* Returned engine type */
+ ei_algo eng_algo; /* Returned engine algorithm type */
+ u_int32_t eng_memeory; /* Returned engine memory size */
+};
+
+struct ccb_eng_exec { /* This structure must match SCSIIO size */
+ struct ccb_hdr ccb_h;
+ u_int8_t *pdrv_ptr; /* Ptr used by the peripheral driver */
+ u_int8_t *req_map; /* Ptr for mapping info on the req. */
+ u_int8_t *data_ptr; /* Pointer to the data buf/SG list */
+ u_int32_t dxfer_len; /* Data transfer length */
+ u_int8_t *engdata_ptr; /* Pointer to the engine buffer data */
+ u_int16_t sglist_cnt; /* Num of scatter gather list entries */
+ u_int32_t dmax_len; /* Destination data maximum length */
+ u_int32_t dest_len; /* Destination data length */
+ int32_t src_resid; /* Source residual length: 2's comp */
+ u_int32_t timeout; /* Timeout value */
+ u_int16_t eng_num; /* Engine number for this request */
+ u_int16_t vu_flags; /* Vendor Unique flags */
+};
+
+/*
+ * Definitions for the timeout field in the SCSI I/O CCB.
+ */
+#define CAM_TIME_DEFAULT 0x00000000 /* Use SIM default value */
+#define CAM_TIME_INFINITY 0xFFFFFFFF /* Infinite timeout */
+
+#define CAM_SUCCESS 0 /* For signaling general success */
+#define CAM_FAILURE 1 /* For signaling general failure */
+
+#define CAM_FALSE 0
+#define CAM_TRUE 1
+
+#define XPT_CCB_INVALID -1 /* for signaling a bad CCB to free */
+
+/*
+ * Union of all CCB types for kernel space allocation. This union should
+ * never be used for manipulating CCBs - its only use is for the allocation
+ * and deallocation of raw CCB space and is the return type of xpt_ccb_alloc
+ * and the argument to xpt_ccb_free.
+ */
+union ccb {
+ struct ccb_hdr ccb_h; /* For convenience */
+ struct ccb_scsiio csio;
+ struct ccb_getdev cgd;
+ struct ccb_getdevlist cgdl;
+ struct ccb_pathinq cpi;
+ struct ccb_relsim crs;
+ struct ccb_setasync csa;
+ struct ccb_setdev csd;
+ struct ccb_pathstats cpis;
+ struct ccb_getdevstats cgds;
+ struct ccb_dev_match cdm;
+ struct ccb_trans_settings cts;
+ struct ccb_calc_geometry ccg;
+ struct ccb_sim_knob knob;
+ struct ccb_abort cab;
+ struct ccb_resetbus crb;
+ struct ccb_resetdev crd;
+ struct ccb_termio tio;
+ struct ccb_accept_tio atio;
+ struct ccb_scsiio ctio;
+ struct ccb_en_lun cel;
+ struct ccb_immed_notify cin;
+ struct ccb_notify_ack cna;
+ struct ccb_immediate_notify cin1;
+ struct ccb_notify_acknowledge cna2;
+ struct ccb_eng_inq cei;
+ struct ccb_eng_exec cee;
+ struct ccb_rescan crcn;
+ struct ccb_debug cdbg;
+ struct ccb_ataio ataio;
+};
+
+__BEGIN_DECLS
+static __inline void
+cam_fill_csio(struct ccb_scsiio *csio, u_int32_t retries,
+ void (*cbfcnp)(struct cam_periph *, union ccb *),
+ u_int32_t flags, u_int8_t tag_action,
+ u_int8_t *data_ptr, u_int32_t dxfer_len,
+ u_int8_t sense_len, u_int8_t cdb_len,
+ u_int32_t timeout);
+
+static __inline void
+cam_fill_ctio(struct ccb_scsiio *csio, u_int32_t retries,
+ void (*cbfcnp)(struct cam_periph *, union ccb *),
+ u_int32_t flags, u_int tag_action, u_int tag_id,
+ u_int init_id, u_int scsi_status, u_int8_t *data_ptr,
+ u_int32_t dxfer_len, u_int32_t timeout);
+
+static __inline void
+cam_fill_ataio(struct ccb_ataio *ataio, u_int32_t retries,
+ void (*cbfcnp)(struct cam_periph *, union ccb *),
+ u_int32_t flags, u_int tag_action,
+ u_int8_t *data_ptr, u_int32_t dxfer_len,
+ u_int32_t timeout);
+
+static __inline void
+cam_fill_csio(struct ccb_scsiio *csio, u_int32_t retries,
+ void (*cbfcnp)(struct cam_periph *, union ccb *),
+ u_int32_t flags, u_int8_t tag_action,
+ u_int8_t *data_ptr, u_int32_t dxfer_len,
+ u_int8_t sense_len, u_int8_t cdb_len,
+ u_int32_t timeout)
+{
+ csio->ccb_h.func_code = XPT_SCSI_IO;
+ csio->ccb_h.flags = flags;
+ csio->ccb_h.retry_count = retries;
+ csio->ccb_h.cbfcnp = cbfcnp;
+ csio->ccb_h.timeout = timeout;
+ csio->data_ptr = data_ptr;
+ csio->dxfer_len = dxfer_len;
+ csio->sense_len = sense_len;
+ csio->cdb_len = cdb_len;
+ csio->tag_action = tag_action;
+}
+
+static __inline void
+cam_fill_ctio(struct ccb_scsiio *csio, u_int32_t retries,
+ void (*cbfcnp)(struct cam_periph *, union ccb *),
+ u_int32_t flags, u_int tag_action, u_int tag_id,
+ u_int init_id, u_int scsi_status, u_int8_t *data_ptr,
+ u_int32_t dxfer_len, u_int32_t timeout)
+{
+ csio->ccb_h.func_code = XPT_CONT_TARGET_IO;
+ csio->ccb_h.flags = flags;
+ csio->ccb_h.retry_count = retries;
+ csio->ccb_h.cbfcnp = cbfcnp;
+ csio->ccb_h.timeout = timeout;
+ csio->data_ptr = data_ptr;
+ csio->dxfer_len = dxfer_len;
+ csio->scsi_status = scsi_status;
+ csio->tag_action = tag_action;
+ csio->tag_id = tag_id;
+ csio->init_id = init_id;
+}
+
+static __inline void
+cam_fill_ataio(struct ccb_ataio *ataio, u_int32_t retries,
+ void (*cbfcnp)(struct cam_periph *, union ccb *),
+ u_int32_t flags, u_int tag_action,
+ u_int8_t *data_ptr, u_int32_t dxfer_len,
+ u_int32_t timeout)
+{
+ ataio->ccb_h.func_code = XPT_ATA_IO;
+ ataio->ccb_h.flags = flags;
+ ataio->ccb_h.retry_count = retries;
+ ataio->ccb_h.cbfcnp = cbfcnp;
+ ataio->ccb_h.timeout = timeout;
+ ataio->data_ptr = data_ptr;
+ ataio->dxfer_len = dxfer_len;
+ ataio->tag_action = tag_action;
+}
+
+void cam_calc_geometry(struct ccb_calc_geometry *ccg, int extended);
+
+__END_DECLS
+
+#endif /* _CAM_CAM_CCB_H */
diff --git a/rtems/freebsd/cam/cam_debug.h b/rtems/freebsd/cam/cam_debug.h
new file mode 100644
index 00000000..4b0fd245
--- /dev/null
+++ b/rtems/freebsd/cam/cam_debug.h
@@ -0,0 +1,87 @@
+/*-
+ * Macros for tracing/loging information in the CAM layer
+ *
+ * Copyright (c) 1997 Justin T. Gibbs.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification, immediately at the beginning of the file.
+ * 2. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
+ * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+#ifndef _CAM_CAM_DEBUG_H
+#define _CAM_CAM_DEBUG_H 1
+
+/*
+ * Debugging flags.
+ */
+typedef enum {
+ CAM_DEBUG_NONE = 0x00, /* no debugging */
+ CAM_DEBUG_INFO = 0x01, /* scsi commands, errors, data */
+ CAM_DEBUG_TRACE = 0x02, /* routine flow tracking */
+ CAM_DEBUG_SUBTRACE = 0x04, /* internal to routine flows */
+ CAM_DEBUG_CDB = 0x08, /* print out SCSI CDBs only */
+ CAM_DEBUG_XPT = 0x10, /* print out xpt scheduling */
+ CAM_DEBUG_PERIPH = 0x20 /* print out peripheral calls */
+} cam_debug_flags;
+
+#if defined(CAMDEBUG) && defined(_KERNEL)
+
+/* Path we want to debug */
+extern struct cam_path *cam_dpath;
+/* Current debug levels set */
+extern u_int32_t cam_dflags;
+/* Printf delay value (to prevent scrolling) */
+extern u_int32_t cam_debug_delay;
+
+/* Debugging macros. */
+#define CAM_DEBUGGED(path, flag) \
+ ((cam_dflags & (flag)) \
+ && (cam_dpath != NULL) \
+ && (xpt_path_comp(cam_dpath, path) >= 0) \
+ && (xpt_path_comp(cam_dpath, path) < 2))
+#define CAM_DEBUG(path, flag, printfargs) \
+ if ((cam_dflags & (flag)) \
+ && (cam_dpath != NULL) \
+ && (xpt_path_comp(cam_dpath, path) >= 0) \
+ && (xpt_path_comp(cam_dpath, path) < 2)) { \
+ xpt_print_path(path); \
+ printf printfargs; \
+ if (cam_debug_delay != 0) \
+ DELAY(cam_debug_delay); \
+ }
+#define CAM_DEBUG_PRINT(flag, printfargs) \
+ if (cam_dflags & (flag)) { \
+ printf("cam_debug: "); \
+ printf printfargs; \
+ if (cam_debug_delay != 0) \
+ DELAY(cam_debug_delay); \
+ }
+
+#else /* !CAMDEBUG || !_KERNEL */
+
+#define CAM_DEBUGGED(A, B) 0
+#define CAM_DEBUG(A, B, C)
+#define CAM_DEBUG_PRINT(A, B)
+
+#endif /* CAMDEBUG && _KERNEL */
+
+#endif /* _CAM_CAM_DEBUG_H */
diff --git a/rtems/freebsd/cam/cam_periph.h b/rtems/freebsd/cam/cam_periph.h
new file mode 100644
index 00000000..32a12182
--- /dev/null
+++ b/rtems/freebsd/cam/cam_periph.h
@@ -0,0 +1,204 @@
+/*-
+ * Data structures and definitions for CAM peripheral ("type") drivers.
+ *
+ * Copyright (c) 1997, 1998 Justin T. Gibbs.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification, immediately at the beginning of the file.
+ * 2. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
+ * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _CAM_CAM_PERIPH_H
+#define _CAM_CAM_PERIPH_H 1
+
+#include <rtems/freebsd/sys/queue.h>
+#include <rtems/freebsd/cam/cam_sim.h>
+
+#ifdef _KERNEL
+
+struct devstat;
+
+extern struct cam_periph *xpt_periph;
+
+extern struct periph_driver **periph_drivers;
+void periphdriver_register(void *);
+void periphdriver_init(int level);
+
+#include <rtems/freebsd/sys/module.h>
+#define PERIPHDRIVER_DECLARE(name, driver) \
+ static int name ## _modevent(module_t mod, int type, void *data) \
+ { \
+ switch (type) { \
+ case MOD_LOAD: \
+ periphdriver_register(data); \
+ break; \
+ case MOD_UNLOAD: \
+ printf(#name " module unload - not possible for this module type\n"); \
+ return EINVAL; \
+ default: \
+ return EOPNOTSUPP; \
+ } \
+ return 0; \
+ } \
+ static moduledata_t name ## _mod = { \
+ #name, \
+ name ## _modevent, \
+ (void *)&driver \
+ }; \
+ DECLARE_MODULE(name, name ## _mod, SI_SUB_DRIVERS, SI_ORDER_ANY); \
+ MODULE_DEPEND(name, cam, 1, 1, 1)
+
+typedef void (periph_init_t)(void); /*
+ * Callback informing the peripheral driver
+ * it can perform it's initialization since
+ * the XPT is now fully initialized.
+ */
+typedef periph_init_t *periph_init_func_t;
+
+struct periph_driver {
+ periph_init_func_t init;
+ char *driver_name;
+ TAILQ_HEAD(,cam_periph) units;
+ u_int generation;
+ u_int flags;
+#define CAM_PERIPH_DRV_EARLY 0x01
+};
+
+typedef enum {
+ CAM_PERIPH_BIO
+} cam_periph_type;
+
+/* Generically usefull offsets into the peripheral private area */
+#define ppriv_ptr0 periph_priv.entries[0].ptr
+#define ppriv_ptr1 periph_priv.entries[1].ptr
+#define ppriv_field0 periph_priv.entries[0].field
+#define ppriv_field1 periph_priv.entries[1].field
+
+typedef void periph_start_t (struct cam_periph *periph,
+ union ccb *start_ccb);
+typedef cam_status periph_ctor_t (struct cam_periph *periph,
+ void *arg);
+typedef void periph_oninv_t (struct cam_periph *periph);
+typedef void periph_dtor_t (struct cam_periph *periph);
+struct cam_periph {
+ cam_pinfo pinfo;
+ periph_start_t *periph_start;
+ periph_oninv_t *periph_oninval;
+ periph_dtor_t *periph_dtor;
+ char *periph_name;
+ struct cam_path *path; /* Compiled path to device */
+ void *softc;
+ struct cam_sim *sim;
+ u_int32_t unit_number;
+ cam_periph_type type;
+ u_int32_t flags;
+#define CAM_PERIPH_RUNNING 0x01
+#define CAM_PERIPH_LOCKED 0x02
+#define CAM_PERIPH_LOCK_WANTED 0x04
+#define CAM_PERIPH_INVALID 0x08
+#define CAM_PERIPH_NEW_DEV_FOUND 0x10
+#define CAM_PERIPH_RECOVERY_INPROG 0x20
+#define CAM_PERIPH_SENSE_INPROG 0x40
+ u_int32_t immediate_priority;
+ u_int32_t refcount;
+ SLIST_HEAD(, ccb_hdr) ccb_list; /* For "immediate" requests */
+ SLIST_ENTRY(cam_periph) periph_links;
+ TAILQ_ENTRY(cam_periph) unit_links;
+ ac_callback_t *deferred_callback;
+ ac_code deferred_ac;
+};
+
+#define CAM_PERIPH_MAXMAPS 2
+
+struct cam_periph_map_info {
+ int num_bufs_used;
+ struct buf *bp[CAM_PERIPH_MAXMAPS];
+};
+
+cam_status cam_periph_alloc(periph_ctor_t *periph_ctor,
+ periph_oninv_t *periph_oninvalidate,
+ periph_dtor_t *periph_dtor,
+ periph_start_t *periph_start,
+ char *name, cam_periph_type type, struct cam_path *,
+ ac_callback_t *, ac_code, void *arg);
+struct cam_periph *cam_periph_find(struct cam_path *path, char *name);
+cam_status cam_periph_acquire(struct cam_periph *periph);
+void cam_periph_release(struct cam_periph *periph);
+void cam_periph_release_locked(struct cam_periph *periph);
+int cam_periph_hold(struct cam_periph *periph, int priority);
+void cam_periph_unhold(struct cam_periph *periph);
+void cam_periph_invalidate(struct cam_periph *periph);
+int cam_periph_mapmem(union ccb *ccb,
+ struct cam_periph_map_info *mapinfo);
+void cam_periph_unmapmem(union ccb *ccb,
+ struct cam_periph_map_info *mapinfo);
+union ccb *cam_periph_getccb(struct cam_periph *periph,
+ u_int32_t priority);
+void cam_periph_ccbwait(union ccb *ccb);
+int cam_periph_runccb(union ccb *ccb,
+ int (*error_routine)(union ccb *ccb,
+ cam_flags camflags,
+ u_int32_t sense_flags),
+ cam_flags camflags, u_int32_t sense_flags,
+ struct devstat *ds);
+int cam_periph_ioctl(struct cam_periph *periph, u_long cmd,
+ caddr_t addr,
+ int (*error_routine)(union ccb *ccb,
+ cam_flags camflags,
+ u_int32_t sense_flags));
+void cam_freeze_devq(struct cam_path *path);
+void cam_freeze_devq_arg(struct cam_path *path, u_int32_t flags,
+ uint32_t arg);
+u_int32_t cam_release_devq(struct cam_path *path, u_int32_t relsim_flags,
+ u_int32_t opening_reduction, u_int32_t arg,
+ int getcount_only);
+void cam_periph_async(struct cam_periph *periph, u_int32_t code,
+ struct cam_path *path, void *arg);
+void cam_periph_bus_settle(struct cam_periph *periph,
+ u_int bus_settle_ms);
+void cam_periph_freeze_after_event(struct cam_periph *periph,
+ struct timeval* event_time,
+ u_int duration_ms);
+int cam_periph_error(union ccb *ccb, cam_flags camflags,
+ u_int32_t sense_flags, union ccb *save_ccb);
+
+static __inline void
+cam_periph_lock(struct cam_periph *periph)
+{
+ mtx_lock(periph->sim->mtx);
+}
+
+static __inline void
+cam_periph_unlock(struct cam_periph *periph)
+{
+ mtx_unlock(periph->sim->mtx);
+}
+
+static __inline int
+cam_periph_owned(struct cam_periph *periph)
+{
+ return (mtx_owned(periph->sim->mtx));
+}
+
+#endif /* _KERNEL */
+#endif /* _CAM_CAM_PERIPH_H */
diff --git a/rtems/freebsd/cam/cam_queue.h b/rtems/freebsd/cam/cam_queue.h
new file mode 100644
index 00000000..936ffd88
--- /dev/null
+++ b/rtems/freebsd/cam/cam_queue.h
@@ -0,0 +1 @@
+/* EMPTY */
diff --git a/rtems/freebsd/cam/cam_sim.h b/rtems/freebsd/cam/cam_sim.h
new file mode 100644
index 00000000..6b5a496f
--- /dev/null
+++ b/rtems/freebsd/cam/cam_sim.h
@@ -0,0 +1,201 @@
+/*-
+ * Data structures and definitions for SCSI Interface Modules (SIMs).
+ *
+ * Copyright (c) 1997 Justin T. Gibbs.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification, immediately at the beginning of the file.
+ * 2. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
+ * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _CAM_CAM_SIM_H
+#define _CAM_CAM_SIM_H 1
+
+#ifdef _KERNEL
+
+/*
+ * The sim driver creates a sim for each controller. The sim device
+ * queue is separately created in order to allow resource sharing between
+ * sims. For instance, a driver may create one sim for each channel of
+ * a multi-channel controller and use the same queue for each channel.
+ * In this way, the queue resources are shared across all the channels
+ * of the multi-channel controller.
+ */
+
+struct cam_sim;
+struct cam_devq;
+
+typedef void (*sim_action_func)(struct cam_sim *sim, union ccb *ccb);
+typedef void (*sim_poll_func)(struct cam_sim *sim);
+
+struct cam_devq * cam_simq_alloc(u_int32_t max_sim_transactions);
+void cam_simq_free(struct cam_devq *devq);
+
+struct cam_sim * cam_sim_alloc(sim_action_func sim_action,
+ sim_poll_func sim_poll,
+ const char *sim_name,
+ void *softc,
+ u_int32_t unit,
+ struct mtx *mtx,
+ int max_dev_transactions,
+ int max_tagged_dev_transactions,
+ struct cam_devq *queue);
+void cam_sim_free(struct cam_sim *sim, int free_devq);
+void cam_sim_hold(struct cam_sim *sim);
+void cam_sim_release(struct cam_sim *sim);
+
+/* Optional sim attributes may be set with these. */
+void cam_sim_set_path(struct cam_sim *sim, u_int32_t path_id);
+
+
+
+/* Convenience routines for accessing sim attributes. */
+static __inline u_int32_t cam_sim_path(struct cam_sim *sim);
+static __inline const char * cam_sim_name(struct cam_sim *sim);
+static __inline void * cam_sim_softc(struct cam_sim *sim);
+static __inline u_int32_t cam_sim_unit(struct cam_sim *sim);
+#ifndef __rtems__
+static __inline u_int32_t cam_sim_bus(struct cam_sim *sim);
+#endif /* __rtems__ */
+
+
+
+/* Generically useful offsets into the sim private area */
+#define spriv_ptr0 sim_priv.entries[0].ptr
+#define spriv_ptr1 sim_priv.entries[1].ptr
+#define spriv_field0 sim_priv.entries[0].field
+#define spriv_field1 sim_priv.entries[1].field
+
+#ifdef __rtems__
+/**
+ * @brief SIM states.
+ *
+ * @dot
+ * digraph bsd_sim_state {
+ * BSD_SIM_INIT -> BSD_SIM_INIT_BUSY;
+ * BSD_SIM_INIT -> BSD_SIM_IDLE;
+ * BSD_SIM_INIT_BUSY -> BSD_SIM_INIT_READY;
+ * BSD_SIM_BUSY -> BSD_SIM_IDLE;
+ * BSD_SIM_INIT_READY -> BSD_SIM_INIT;
+ * BSD_SIM_IDLE -> BSD_SIM_BUSY;
+ * BSD_SIM_IDLE -> BSD_SIM_DELETED;
+ * }
+ * @enddot
+ */
+enum bsd_sim_state {
+ BSD_SIM_INIT = 0,
+ BSD_SIM_INIT_BUSY,
+ BSD_SIM_INIT_READY,
+ BSD_SIM_IDLE,
+ BSD_SIM_BUSY,
+ BSD_SIM_DELETED
+};
+#endif /* __rtems__ */
+
+/*
+ * The sim driver should not access anything directly from this
+ * structure.
+ */
+struct cam_sim {
+ sim_action_func sim_action;
+ sim_poll_func sim_poll;
+ const char *sim_name;
+ void *softc;
+ struct mtx *mtx;
+#ifndef __rtems__
+ TAILQ_HEAD(, ccb_hdr) sim_doneq;
+ TAILQ_ENTRY(cam_sim) links;
+ u_int32_t path_id;/* The Boot device may set this to 0? */
+#else /* __rtems__ */
+ char *disk;
+ enum bsd_sim_state state;
+ struct cv state_changed;
+ union ccb ccb;
+#endif /* __rtems__ */
+ u_int32_t unit_number;
+#ifndef __rtems__
+ u_int32_t bus_id;
+ int max_tagged_dev_openings;
+ int max_dev_openings;
+ u_int32_t flags;
+#define CAM_SIM_REL_TIMEOUT_PENDING 0x01
+#define CAM_SIM_MPSAFE 0x02
+#define CAM_SIM_ON_DONEQ 0x04
+ struct callout callout;
+ struct cam_devq *devq; /* Device Queue to use for this SIM */
+ int refcount; /* References to the SIM. */
+
+ /* "Pool" of inactive ccbs managed by xpt_get_ccb and xpt_release_ccb */
+ SLIST_HEAD(,ccb_hdr) ccb_freeq;
+ /*
+ * Maximum size of ccb pool. Modified as devices are added/removed
+ * or have their * opening counts changed.
+ */
+ u_int max_ccbs;
+ /* Current count of allocated ccbs */
+ u_int ccb_count;
+#endif /* __rtems__ */
+
+};
+
+#define CAM_SIM_LOCK(sim) mtx_lock((sim)->mtx);
+#define CAM_SIM_UNLOCK(sim) mtx_unlock((sim)->mtx);
+
+static __inline u_int32_t
+cam_sim_path(struct cam_sim *sim)
+{
+#ifndef __rtems__
+ return (sim->path_id);
+#else /* __rtems__ */
+ return (0);
+#endif /* __rtems__ */
+}
+
+static __inline const char *
+cam_sim_name(struct cam_sim *sim)
+{
+ return (sim->sim_name);
+}
+
+static __inline void *
+cam_sim_softc(struct cam_sim *sim)
+{
+ return (sim->softc);
+}
+
+static __inline u_int32_t
+cam_sim_unit(struct cam_sim *sim)
+{
+ return (sim->unit_number);
+}
+
+#ifndef __rtems__
+static __inline u_int32_t
+cam_sim_bus(struct cam_sim *sim)
+{
+ return (sim->bus_id);
+}
+#endif /* __rtems__ */
+
+#endif /* _KERNEL */
+#endif /* _CAM_CAM_SIM_H */
diff --git a/rtems/freebsd/cam/cam_xpt.h b/rtems/freebsd/cam/cam_xpt.h
new file mode 100644
index 00000000..61a7f3f0
--- /dev/null
+++ b/rtems/freebsd/cam/cam_xpt.h
@@ -0,0 +1,138 @@
+/*-
+ * Data structures and definitions for dealing with the
+ * Common Access Method Transport (xpt) layer.
+ *
+ * Copyright (c) 1997 Justin T. Gibbs.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification, immediately at the beginning of the file.
+ * 2. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
+ * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _CAM_CAM_XPT_H
+#define _CAM_CAM_XPT_H 1
+
+/* Forward Declarations */
+union ccb;
+struct cam_periph;
+struct cam_sim;
+
+/*
+ * Definition of a CAM path. Paths are created from bus, target, and lun ids
+ * via xpt_create_path and allow for reference to devices without recurring
+ * lookups in the edt.
+ */
+struct cam_path;
+
+/* Path functions */
+
+#ifdef _KERNEL
+
+/*
+ * Definition of an async handler callback block. These are used to add
+ * SIMs and peripherals to the async callback lists.
+ */
+struct async_node {
+ SLIST_ENTRY(async_node) links;
+ u_int32_t event_enable; /* Async Event enables */
+ void (*callback)(void *arg, u_int32_t code,
+ struct cam_path *path, void *args);
+ void *callback_arg;
+};
+
+SLIST_HEAD(async_list, async_node);
+SLIST_HEAD(periph_list, cam_periph);
+
+#if defined(CAM_DEBUG_FLAGS) && !defined(CAMDEBUG)
+#error "You must have options CAMDEBUG to use options CAM_DEBUG_FLAGS"
+#endif
+
+/*
+ * In order to enable the CAM_DEBUG_* options, the user must have CAMDEBUG
+ * enabled. Also, the user must have either none, or all of CAM_DEBUG_BUS,
+ * CAM_DEBUG_TARGET, and CAM_DEBUG_LUN specified.
+ */
+#if defined(CAM_DEBUG_BUS) || defined(CAM_DEBUG_TARGET) \
+ || defined(CAM_DEBUG_LUN)
+#ifdef CAMDEBUG
+#if !defined(CAM_DEBUG_BUS) || !defined(CAM_DEBUG_TARGET) \
+ || !defined(CAM_DEBUG_LUN)
+#error "You must define all or none of CAM_DEBUG_BUS, CAM_DEBUG_TARGET \
+ and CAM_DEBUG_LUN"
+#endif /* !CAM_DEBUG_BUS || !CAM_DEBUG_TARGET || !CAM_DEBUG_LUN */
+#else /* !CAMDEBUG */
+#error "You must use options CAMDEBUG if you use the CAM_DEBUG_* options"
+#endif /* CAMDEBUG */
+#endif /* CAM_DEBUG_BUS || CAM_DEBUG_TARGET || CAM_DEBUG_LUN */
+
+void xpt_action(union ccb *new_ccb);
+void xpt_action_default(union ccb *new_ccb);
+union ccb *xpt_alloc_ccb(void);
+union ccb *xpt_alloc_ccb_nowait(void);
+void xpt_free_ccb(union ccb *free_ccb);
+void xpt_setup_ccb(struct ccb_hdr *ccb_h,
+ struct cam_path *path,
+ u_int32_t priority);
+void xpt_merge_ccb(union ccb *master_ccb,
+ union ccb *slave_ccb);
+cam_status xpt_create_path(struct cam_path **new_path_ptr,
+ struct cam_periph *perph,
+ path_id_t path_id,
+ target_id_t target_id, lun_id_t lun_id);
+cam_status xpt_create_path_unlocked(struct cam_path **new_path_ptr,
+ struct cam_periph *perph,
+ path_id_t path_id,
+ target_id_t target_id, lun_id_t lun_id);
+void xpt_free_path(struct cam_path *path);
+int xpt_path_comp(struct cam_path *path1,
+ struct cam_path *path2);
+void xpt_print_path(struct cam_path *path);
+void xpt_print(struct cam_path *path, const char *fmt, ...);
+int xpt_path_string(struct cam_path *path, char *str,
+ size_t str_len);
+path_id_t xpt_path_path_id(struct cam_path *path);
+target_id_t xpt_path_target_id(struct cam_path *path);
+lun_id_t xpt_path_lun_id(struct cam_path *path);
+struct cam_sim *xpt_path_sim(struct cam_path *path);
+struct cam_periph *xpt_path_periph(struct cam_path *path);
+void xpt_async(u_int32_t async_code, struct cam_path *path,
+ void *async_arg);
+void xpt_rescan(union ccb *ccb);
+void xpt_hold_boot(void);
+void xpt_release_boot(void);
+void xpt_lock_buses(void);
+void xpt_unlock_buses(void);
+cam_status xpt_register_async(int event, ac_callback_t *cbfunc,
+ void *cbarg, struct cam_path *path);
+cam_status xpt_compile_path(struct cam_path *new_path,
+ struct cam_periph *perph,
+ path_id_t path_id,
+ target_id_t target_id,
+ lun_id_t lun_id);
+
+void xpt_release_path(struct cam_path *path);
+
+#endif /* _KERNEL */
+
+#endif /* _CAM_CAM_XPT_H */
+
diff --git a/rtems/freebsd/cam/cam_xpt_sim.h b/rtems/freebsd/cam/cam_xpt_sim.h
new file mode 100644
index 00000000..036e9902
--- /dev/null
+++ b/rtems/freebsd/cam/cam_xpt_sim.h
@@ -0,0 +1,57 @@
+/*-
+ * Data structures and definitions for dealing with the
+ * Common Access Method Transport (xpt) layer.
+ *
+ * Copyright (c) 1997 Justin T. Gibbs.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification, immediately at the beginning of the file.
+ * 2. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
+ * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _CAM_CAM_XPT_SIM_H
+#define _CAM_CAM_XPT_SIM_H 1
+
+#include <rtems/freebsd/cam/cam_xpt.h>
+#include <rtems/freebsd/cam/cam_queue.h>
+
+/* Functions accessed by SIM drivers */
+#ifdef _KERNEL
+int32_t xpt_bus_register(struct cam_sim *sim, device_t parent,
+ u_int32_t bus);
+int32_t xpt_bus_deregister(path_id_t path_id);
+u_int32_t xpt_freeze_simq(struct cam_sim *sim, u_int count);
+void xpt_release_simq(struct cam_sim *sim, int run_queue);
+u_int32_t xpt_freeze_devq(struct cam_path *path, u_int count);
+u_int32_t xpt_freeze_devq_rl(struct cam_path *path, cam_rl rl,
+ u_int count);
+void xpt_release_devq(struct cam_path *path,
+ u_int count, int run_queue);
+void xpt_release_devq_rl(struct cam_path *path, cam_rl rl,
+ u_int count, int run_queue);
+int xpt_sim_opened(struct cam_sim *sim);
+void xpt_done(union ccb *done_ccb);
+#endif
+
+#endif /* _CAM_CAM_XPT_SIM_H */
+
diff --git a/rtems/freebsd/cam/scsi/scsi_all.c b/rtems/freebsd/cam/scsi/scsi_all.c
new file mode 100644
index 00000000..a3f6f5e9
--- /dev/null
+++ b/rtems/freebsd/cam/scsi/scsi_all.c
@@ -0,0 +1,4305 @@
+#include <rtems/freebsd/machine/rtems-bsd-config.h>
+
+/*-
+ * Implementation of Utility functions for all SCSI device types.
+ *
+ * Copyright (c) 1997, 1998, 1999 Justin T. Gibbs.
+ * Copyright (c) 1997, 1998, 2003 Kenneth D. Merry.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification, immediately at the beginning of the file.
+ * 2. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
+ * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <rtems/freebsd/sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <rtems/freebsd/sys/param.h>
+
+#ifdef _KERNEL
+#ifndef __rtems__
+#include <rtems/freebsd/opt_scsi.h>
+#else /* __rtems__ */
+#include <rtems/freebsd/local/opt_scsi.h>
+#endif /* __rtems__ */
+
+#include <rtems/freebsd/sys/systm.h>
+#include <rtems/freebsd/sys/libkern.h>
+#include <rtems/freebsd/sys/kernel.h>
+#include <rtems/freebsd/sys/sysctl.h>
+#else
+#include <rtems/freebsd/errno.h>
+#include <rtems/freebsd/stdio.h>
+#include <rtems/freebsd/stdlib.h>
+#include <rtems/freebsd/string.h>
+#endif
+
+#include <rtems/freebsd/cam/cam.h>
+#include <rtems/freebsd/cam/cam_ccb.h>
+#include <rtems/freebsd/cam/cam_queue.h>
+#include <rtems/freebsd/cam/cam_xpt.h>
+#include <rtems/freebsd/cam/scsi/scsi_all.h>
+#include <rtems/freebsd/sys/sbuf.h>
+#ifndef _KERNEL
+#include <rtems/freebsd/camlib.h>
+
+#ifndef FALSE
+#define FALSE 0
+#endif /* FALSE */
+#ifndef TRUE
+#define TRUE 1
+#endif /* TRUE */
+#define ERESTART -1 /* restart syscall */
+#define EJUSTRETURN -2 /* don't modify regs, just return */
+#endif /* !_KERNEL */
+
+/*
+ * This is the default number of milliseconds we wait for devices to settle
+ * after a SCSI bus reset.
+ */
+#ifndef SCSI_DELAY
+#define SCSI_DELAY 2000
+#endif
+/*
+ * All devices need _some_ sort of bus settle delay, so we'll set it to
+ * a minimum value of 100ms. Note that this is pertinent only for SPI-
+ * not transport like Fibre Channel or iSCSI where 'delay' is completely
+ * meaningless.
+ */
+#ifndef SCSI_MIN_DELAY
+#define SCSI_MIN_DELAY 100
+#endif
+/*
+ * Make sure the user isn't using seconds instead of milliseconds.
+ */
+#if (SCSI_DELAY < SCSI_MIN_DELAY && SCSI_DELAY != 0)
+#error "SCSI_DELAY is in milliseconds, not seconds! Please use a larger value"
+#endif
+
+#ifndef __rtems__
+int scsi_delay;
+
+static int ascentrycomp(const void *key, const void *member);
+static int senseentrycomp(const void *key, const void *member);
+static void fetchtableentries(int sense_key, int asc, int ascq,
+ struct scsi_inquiry_data *,
+ const struct sense_key_table_entry **,
+ const struct asc_table_entry **);
+#ifdef _KERNEL
+static void init_scsi_delay(void);
+static int sysctl_scsi_delay(SYSCTL_HANDLER_ARGS);
+static int set_scsi_delay(int delay);
+#endif
+
+#if !defined(SCSI_NO_OP_STRINGS)
+
+#define D (1 << T_DIRECT)
+#define T (1 << T_SEQUENTIAL)
+#define L (1 << T_PRINTER)
+#define P (1 << T_PROCESSOR)
+#define W (1 << T_WORM)
+#define R (1 << T_CDROM)
+#define O (1 << T_OPTICAL)
+#define M (1 << T_CHANGER)
+#define A (1 << T_STORARRAY)
+#define E (1 << T_ENCLOSURE)
+#define B (1 << T_RBC)
+#define K (1 << T_OCRW)
+#define V (1 << T_ADC)
+#define F (1 << T_OSD)
+#define S (1 << T_SCANNER)
+#define C (1 << T_COMM)
+
+#define ALL (D | T | L | P | W | R | O | M | A | E | B | K | V | F | S | C)
+
+static struct op_table_entry plextor_cd_ops[] = {
+ { 0xD8, R, "CD-DA READ" }
+};
+
+static struct scsi_op_quirk_entry scsi_op_quirk_table[] = {
+ {
+ /*
+ * I believe that 0xD8 is the Plextor proprietary command
+ * to read CD-DA data. I'm not sure which Plextor CDROM
+ * models support the command, though. I know for sure
+ * that the 4X, 8X, and 12X models do, and presumably the
+ * 12-20X does. I don't know about any earlier models,
+ * though. If anyone has any more complete information,
+ * feel free to change this quirk entry.
+ */
+ {T_CDROM, SIP_MEDIA_REMOVABLE, "PLEXTOR", "CD-ROM PX*", "*"},
+ sizeof(plextor_cd_ops)/sizeof(struct op_table_entry),
+ plextor_cd_ops
+ }
+};
+
+static struct op_table_entry scsi_op_codes[] = {
+ /*
+ * From: http://www.t10.org/lists/op-num.txt
+ * Modifications by Kenneth Merry (ken@FreeBSD.ORG)
+ * and Jung-uk Kim (jkim@FreeBSD.org)
+ *
+ * Note: order is important in this table, scsi_op_desc() currently
+ * depends on the opcodes in the table being in order to save
+ * search time.
+ * Note: scanner and comm. devices are carried over from the previous
+ * version because they were removed in the latest spec.
+ */
+ /* File: OP-NUM.TXT
+ *
+ * SCSI Operation Codes
+ * Numeric Sorted Listing
+ * as of 3/11/08
+ *
+ * D - DIRECT ACCESS DEVICE (SBC-2) device column key
+ * .T - SEQUENTIAL ACCESS DEVICE (SSC-2) -----------------
+ * . L - PRINTER DEVICE (SSC) M = Mandatory
+ * . P - PROCESSOR DEVICE (SPC) O = Optional
+ * . .W - WRITE ONCE READ MULTIPLE DEVICE (SBC-2) V = Vendor spec.
+ * . . R - CD/DVE DEVICE (MMC-3) Z = Obsolete
+ * . . O - OPTICAL MEMORY DEVICE (SBC-2)
+ * . . .M - MEDIA CHANGER DEVICE (SMC-2)
+ * . . . A - STORAGE ARRAY DEVICE (SCC-2)
+ * . . . .E - ENCLOSURE SERVICES DEVICE (SES)
+ * . . . .B - SIMPLIFIED DIRECT-ACCESS DEVICE (RBC)
+ * . . . . K - OPTICAL CARD READER/WRITER DEVICE (OCRW)
+ * . . . . V - AUTOMATION/DRIVE INTERFACE (ADC)
+ * . . . . .F - OBJECT-BASED STORAGE (OSD)
+ * OP DTLPWROMAEBKVF Description
+ * -- -------------- ---------------------------------------------- */
+ /* 00 MMMMMMMMMMMMMM TEST UNIT READY */
+ { 0x00, ALL, "TEST UNIT READY" },
+ /* 01 M REWIND */
+ { 0x01, T, "REWIND" },
+ /* 01 Z V ZZZZ REZERO UNIT */
+ { 0x01, D | W | R | O | M, "REZERO UNIT" },
+ /* 02 VVVVVV V */
+ /* 03 MMMMMMMMMMOMMM REQUEST SENSE */
+ { 0x03, ALL, "REQUEST SENSE" },
+ /* 04 M OO FORMAT UNIT */
+ { 0x04, D | R | O, "FORMAT UNIT" },
+ /* 04 O FORMAT MEDIUM */
+ { 0x04, T, "FORMAT MEDIUM" },
+ /* 04 O FORMAT */
+ { 0x04, L, "FORMAT" },
+ /* 05 VMVVVV V READ BLOCK LIMITS */
+ { 0x05, T, "READ BLOCK LIMITS" },
+ /* 06 VVVVVV V */
+ /* 07 OVV O OV REASSIGN BLOCKS */
+ { 0x07, D | W | O, "REASSIGN BLOCKS" },
+ /* 07 O INITIALIZE ELEMENT STATUS */
+ { 0x07, M, "INITIALIZE ELEMENT STATUS" },
+ /* 08 MOV O OV READ(6) */
+ { 0x08, D | T | W | O, "READ(6)" },
+ /* 08 O RECEIVE */
+ { 0x08, P, "RECEIVE" },
+ /* 08 GET MESSAGE(6) */
+ { 0x08, C, "GET MESSAGE(6)" },
+ /* 09 VVVVVV V */
+ /* 0A OO O OV WRITE(6) */
+ { 0x0A, D | T | W | O, "WRITE(6)" },
+ /* 0A M SEND(6) */
+ { 0x0A, P, "SEND(6)" },
+ /* 0A SEND MESSAGE(6) */
+ { 0x0A, C, "SEND MESSAGE(6)" },
+ /* 0A M PRINT */
+ { 0x0A, L, "PRINT" },
+ /* 0B Z ZOZV SEEK(6) */
+ { 0x0B, D | W | R | O, "SEEK(6)" },
+ /* 0B O SET CAPACITY */
+ { 0x0B, T, "SET CAPACITY" },
+ /* 0B O SLEW AND PRINT */
+ { 0x0B, L, "SLEW AND PRINT" },
+ /* 0C VVVVVV V */
+ /* 0D VVVVVV V */
+ /* 0E VVVVVV V */
+ /* 0F VOVVVV V READ REVERSE(6) */
+ { 0x0F, T, "READ REVERSE(6)" },
+ /* 10 VM VVV WRITE FILEMARKS(6) */
+ { 0x10, T, "WRITE FILEMARKS(6)" },
+ /* 10 O SYNCHRONIZE BUFFER */
+ { 0x10, L, "SYNCHRONIZE BUFFER" },
+ /* 11 VMVVVV SPACE(6) */
+ { 0x11, T, "SPACE(6)" },
+ /* 12 MMMMMMMMMMMMMM INQUIRY */
+ { 0x12, ALL, "INQUIRY" },
+ /* 13 V VVVV */
+ /* 13 O VERIFY(6) */
+ { 0x13, T, "VERIFY(6)" },
+ /* 14 VOOVVV RECOVER BUFFERED DATA */
+ { 0x14, T | L, "RECOVER BUFFERED DATA" },
+ /* 15 OMO O OOOO OO MODE SELECT(6) */
+ { 0x15, ALL & ~(P | R | B | F), "MODE SELECT(6)" },
+ /* 16 ZZMZO OOOZ O RESERVE(6) */
+ { 0x16, ALL & ~(R | B | V | F | C), "RESERVE(6)" },
+ /* 16 Z RESERVE ELEMENT(6) */
+ { 0x16, M, "RESERVE ELEMENT(6)" },
+ /* 17 ZZMZO OOOZ O RELEASE(6) */
+ { 0x17, ALL & ~(R | B | V | F | C), "RELEASE(6)" },
+ /* 17 Z RELEASE ELEMENT(6) */
+ { 0x17, M, "RELEASE ELEMENT(6)" },
+ /* 18 ZZZZOZO Z COPY */
+ { 0x18, D | T | L | P | W | R | O | K | S, "COPY" },
+ /* 19 VMVVVV ERASE(6) */
+ { 0x19, T, "ERASE(6)" },
+ /* 1A OMO O OOOO OO MODE SENSE(6) */
+ { 0x1A, ALL & ~(P | R | B | F), "MODE SENSE(6)" },
+ /* 1B O OOO O MO O START STOP UNIT */
+ { 0x1B, D | W | R | O | A | B | K | F, "START STOP UNIT" },
+ /* 1B O M LOAD UNLOAD */
+ { 0x1B, T | V, "LOAD UNLOAD" },
+ /* 1B SCAN */
+ { 0x1B, S, "SCAN" },
+ /* 1B O STOP PRINT */
+ { 0x1B, L, "STOP PRINT" },
+ /* 1B O OPEN/CLOSE IMPORT/EXPORT ELEMENT */
+ { 0x1B, M, "OPEN/CLOSE IMPORT/EXPORT ELEMENT" },
+ /* 1C OOOOO OOOM OOO RECEIVE DIAGNOSTIC RESULTS */
+ { 0x1C, ALL & ~(R | B), "RECEIVE DIAGNOSTIC RESULTS" },
+ /* 1D MMMMM MMOM MMM SEND DIAGNOSTIC */
+ { 0x1D, ALL & ~(R | B), "SEND DIAGNOSTIC" },
+ /* 1E OO OOOO O O PREVENT ALLOW MEDIUM REMOVAL */
+ { 0x1E, D | T | W | R | O | M | K | F, "PREVENT ALLOW MEDIUM REMOVAL" },
+ /* 1F */
+ /* 20 V VVV V */
+ /* 21 V VVV V */
+ /* 22 V VVV V */
+ /* 23 V V V V */
+ /* 23 O READ FORMAT CAPACITIES */
+ { 0x23, R, "READ FORMAT CAPACITIES" },
+ /* 24 V VV SET WINDOW */
+ { 0x24, S, "SET WINDOW" },
+ /* 25 M M M M READ CAPACITY(10) */
+ { 0x25, D | W | O | B, "READ CAPACITY(10)" },
+ /* 25 O READ CAPACITY */
+ { 0x25, R, "READ CAPACITY" },
+ /* 25 M READ CARD CAPACITY */
+ { 0x25, K, "READ CARD CAPACITY" },
+ /* 25 GET WINDOW */
+ { 0x25, S, "GET WINDOW" },
+ /* 26 V VV */
+ /* 27 V VV */
+ /* 28 M MOM MM READ(10) */
+ { 0x28, D | W | R | O | B | K | S, "READ(10)" },
+ /* 28 GET MESSAGE(10) */
+ { 0x28, C, "GET MESSAGE(10)" },
+ /* 29 V VVO READ GENERATION */
+ { 0x29, O, "READ GENERATION" },
+ /* 2A O MOM MO WRITE(10) */
+ { 0x2A, D | W | R | O | B | K, "WRITE(10)" },
+ /* 2A SEND(10) */
+ { 0x2A, S, "SEND(10)" },
+ /* 2A SEND MESSAGE(10) */
+ { 0x2A, C, "SEND MESSAGE(10)" },
+ /* 2B Z OOO O SEEK(10) */
+ { 0x2B, D | W | R | O | K, "SEEK(10)" },
+ /* 2B O LOCATE(10) */
+ { 0x2B, T, "LOCATE(10)" },
+ /* 2B O POSITION TO ELEMENT */
+ { 0x2B, M, "POSITION TO ELEMENT" },
+ /* 2C V OO ERASE(10) */
+ { 0x2C, R | O, "ERASE(10)" },
+ /* 2D O READ UPDATED BLOCK */
+ { 0x2D, O, "READ UPDATED BLOCK" },
+ /* 2D V */
+ /* 2E O OOO MO WRITE AND VERIFY(10) */
+ { 0x2E, D | W | R | O | B | K, "WRITE AND VERIFY(10)" },
+ /* 2F O OOO VERIFY(10) */
+ { 0x2F, D | W | R | O, "VERIFY(10)" },
+ /* 30 Z ZZZ SEARCH DATA HIGH(10) */
+ { 0x30, D | W | R | O, "SEARCH DATA HIGH(10)" },
+ /* 31 Z ZZZ SEARCH DATA EQUAL(10) */
+ { 0x31, D | W | R | O, "SEARCH DATA EQUAL(10)" },
+ /* 31 OBJECT POSITION */
+ { 0x31, S, "OBJECT POSITION" },
+ /* 32 Z ZZZ SEARCH DATA LOW(10) */
+ { 0x32, D | W | R | O, "SEARCH DATA LOW(10)" },
+ /* 33 Z OZO SET LIMITS(10) */
+ { 0x33, D | W | R | O, "SET LIMITS(10)" },
+ /* 34 O O O O PRE-FETCH(10) */
+ { 0x34, D | W | O | K, "PRE-FETCH(10)" },
+ /* 34 M READ POSITION */
+ { 0x34, T, "READ POSITION" },
+ /* 34 GET DATA BUFFER STATUS */
+ { 0x34, S, "GET DATA BUFFER STATUS" },
+ /* 35 O OOO MO SYNCHRONIZE CACHE(10) */
+ { 0x35, D | W | R | O | B | K, "SYNCHRONIZE CACHE(10)" },
+ /* 36 Z O O O LOCK UNLOCK CACHE(10) */
+ { 0x36, D | W | O | K, "LOCK UNLOCK CACHE(10)" },
+ /* 37 O O READ DEFECT DATA(10) */
+ { 0x37, D | O, "READ DEFECT DATA(10)" },
+ /* 37 O INITIALIZE ELEMENT STATUS WITH RANGE */
+ { 0x37, M, "INITIALIZE ELEMENT STATUS WITH RANGE" },
+ /* 38 O O O MEDIUM SCAN */
+ { 0x38, W | O | K, "MEDIUM SCAN" },
+ /* 39 ZZZZOZO Z COMPARE */
+ { 0x39, D | T | L | P | W | R | O | K | S, "COMPARE" },
+ /* 3A ZZZZOZO Z COPY AND VERIFY */
+ { 0x3A, D | T | L | P | W | R | O | K | S, "COPY AND VERIFY" },
+ /* 3B OOOOOOOOOOMOOO WRITE BUFFER */
+ { 0x3B, ALL, "WRITE BUFFER" },
+ /* 3C OOOOOOOOOO OOO READ BUFFER */
+ { 0x3C, ALL & ~(B), "READ BUFFER" },
+ /* 3D O UPDATE BLOCK */
+ { 0x3D, O, "UPDATE BLOCK" },
+ /* 3E O O O READ LONG(10) */
+ { 0x3E, D | W | O, "READ LONG(10)" },
+ /* 3F O O O WRITE LONG(10) */
+ { 0x3F, D | W | O, "WRITE LONG(10)" },
+ /* 40 ZZZZOZOZ CHANGE DEFINITION */
+ { 0x40, D | T | L | P | W | R | O | M | S | C, "CHANGE DEFINITION" },
+ /* 41 O WRITE SAME(10) */
+ { 0x41, D, "WRITE SAME(10)" },
+ /* 42 O READ SUB-CHANNEL */
+ { 0x42, R, "READ SUB-CHANNEL" },
+ /* 43 O READ TOC/PMA/ATIP */
+ { 0x43, R, "READ TOC/PMA/ATIP" },
+ /* 44 M M REPORT DENSITY SUPPORT */
+ { 0x44, T | V, "REPORT DENSITY SUPPORT" },
+ /* 44 READ HEADER */
+ /* 45 O PLAY AUDIO(10) */
+ { 0x45, R, "PLAY AUDIO(10)" },
+ /* 46 M GET CONFIGURATION */
+ { 0x46, R, "GET CONFIGURATION" },
+ /* 47 O PLAY AUDIO MSF */
+ { 0x47, R, "PLAY AUDIO MSF" },
+ /* 48 */
+ /* 49 */
+ /* 4A M GET EVENT STATUS NOTIFICATION */
+ { 0x4A, R, "GET EVENT STATUS NOTIFICATION" },
+ /* 4B O PAUSE/RESUME */
+ { 0x4B, R, "PAUSE/RESUME" },
+ /* 4C OOOOO OOOO OOO LOG SELECT */
+ { 0x4C, ALL & ~(R | B), "LOG SELECT" },
+ /* 4D OOOOO OOOO OMO LOG SENSE */
+ { 0x4D, ALL & ~(R | B), "LOG SENSE" },
+ /* 4E O STOP PLAY/SCAN */
+ { 0x4E, R, "STOP PLAY/SCAN" },
+ /* 4F */
+ /* 50 O XDWRITE(10) */
+ { 0x50, D, "XDWRITE(10)" },
+ /* 51 O XPWRITE(10) */
+ { 0x51, D, "XPWRITE(10)" },
+ /* 51 O READ DISC INFORMATION */
+ { 0x51, R, "READ DISC INFORMATION" },
+ /* 52 O XDREAD(10) */
+ { 0x52, D, "XDREAD(10)" },
+ /* 52 O READ TRACK INFORMATION */
+ { 0x52, R, "READ TRACK INFORMATION" },
+ /* 53 O RESERVE TRACK */
+ { 0x53, R, "RESERVE TRACK" },
+ /* 54 O SEND OPC INFORMATION */
+ { 0x54, R, "SEND OPC INFORMATION" },
+ /* 55 OOO OMOOOOMOMO MODE SELECT(10) */
+ { 0x55, ALL & ~(P), "MODE SELECT(10)" },
+ /* 56 ZZMZO OOOZ RESERVE(10) */
+ { 0x56, ALL & ~(R | B | K | V | F | C), "RESERVE(10)" },
+ /* 56 Z RESERVE ELEMENT(10) */
+ { 0x56, M, "RESERVE ELEMENT(10)" },
+ /* 57 ZZMZO OOOZ RELEASE(10) */
+ { 0x57, ALL & ~(R | B | K | V | F | C), "RELEASE(10)" },
+ /* 57 Z RELEASE ELEMENT(10) */
+ { 0x57, M, "RELEASE ELEMENT(10)" },
+ /* 58 O REPAIR TRACK */
+ { 0x58, R, "REPAIR TRACK" },
+ /* 59 */
+ /* 5A OOO OMOOOOMOMO MODE SENSE(10) */
+ { 0x5A, ALL & ~(P), "MODE SENSE(10)" },
+ /* 5B O CLOSE TRACK/SESSION */
+ { 0x5B, R, "CLOSE TRACK/SESSION" },
+ /* 5C O READ BUFFER CAPACITY */
+ { 0x5C, R, "READ BUFFER CAPACITY" },
+ /* 5D O SEND CUE SHEET */
+ { 0x5D, R, "SEND CUE SHEET" },
+ /* 5E OOOOO OOOO M PERSISTENT RESERVE IN */
+ { 0x5E, ALL & ~(R | B | K | V | C), "PERSISTENT RESERVE IN" },
+ /* 5F OOOOO OOOO M PERSISTENT RESERVE OUT */
+ { 0x5F, ALL & ~(R | B | K | V | C), "PERSISTENT RESERVE OUT" },
+ /* 7E OO O OOOO O extended CDB */
+ { 0x7E, D | T | R | M | A | E | B | V, "extended CDB" },
+ /* 7F O M variable length CDB (more than 16 bytes) */
+ { 0x7F, D | F, "variable length CDB (more than 16 bytes)" },
+ /* 80 Z XDWRITE EXTENDED(16) */
+ { 0x80, D, "XDWRITE EXTENDED(16)" },
+ /* 80 M WRITE FILEMARKS(16) */
+ { 0x80, T, "WRITE FILEMARKS(16)" },
+ /* 81 Z REBUILD(16) */
+ { 0x81, D, "REBUILD(16)" },
+ /* 81 O READ REVERSE(16) */
+ { 0x81, T, "READ REVERSE(16)" },
+ /* 82 Z REGENERATE(16) */
+ { 0x82, D, "REGENERATE(16)" },
+ /* 83 OOOOO O OO EXTENDED COPY */
+ { 0x83, D | T | L | P | W | O | K | V, "EXTENDED COPY" },
+ /* 84 OOOOO O OO RECEIVE COPY RESULTS */
+ { 0x84, D | T | L | P | W | O | K | V, "RECEIVE COPY RESULTS" },
+ /* 85 O O O ATA COMMAND PASS THROUGH(16) */
+ { 0x85, D | R | B, "ATA COMMAND PASS THROUGH(16)" },
+ /* 86 OO OO OOOOOOO ACCESS CONTROL IN */
+ { 0x86, ALL & ~(L | R | F), "ACCESS CONTROL IN" },
+ /* 87 OO OO OOOOOOO ACCESS CONTROL OUT */
+ { 0x87, ALL & ~(L | R | F), "ACCESS CONTROL OUT" },
+ /*
+ * XXX READ(16)/WRITE(16) were not listed for CD/DVE in op-num.txt
+ * but we had it since r1.40. Do we really want them?
+ */
+ /* 88 MM O O O READ(16) */
+ { 0x88, D | T | W | O | B, "READ(16)" },
+ /* 89 */
+ /* 8A OM O O O WRITE(16) */
+ { 0x8A, D | T | W | O | B, "WRITE(16)" },
+ /* 8B O ORWRITE */
+ { 0x8B, D, "ORWRITE" },
+ /* 8C OO O OO O M READ ATTRIBUTE */
+ { 0x8C, D | T | W | O | M | B | V, "READ ATTRIBUTE" },
+ /* 8D OO O OO O O WRITE ATTRIBUTE */
+ { 0x8D, D | T | W | O | M | B | V, "WRITE ATTRIBUTE" },
+ /* 8E O O O O WRITE AND VERIFY(16) */
+ { 0x8E, D | W | O | B, "WRITE AND VERIFY(16)" },
+ /* 8F OO O O O VERIFY(16) */
+ { 0x8F, D | T | W | O | B, "VERIFY(16)" },
+ /* 90 O O O O PRE-FETCH(16) */
+ { 0x90, D | W | O | B, "PRE-FETCH(16)" },
+ /* 91 O O O O SYNCHRONIZE CACHE(16) */
+ { 0x91, D | W | O | B, "SYNCHRONIZE CACHE(16)" },
+ /* 91 O SPACE(16) */
+ { 0x91, T, "SPACE(16)" },
+ /* 92 Z O O LOCK UNLOCK CACHE(16) */
+ { 0x92, D | W | O, "LOCK UNLOCK CACHE(16)" },
+ /* 92 O LOCATE(16) */
+ { 0x92, T, "LOCATE(16)" },
+ /* 93 O WRITE SAME(16) */
+ { 0x93, D, "WRITE SAME(16)" },
+ /* 93 M ERASE(16) */
+ { 0x93, T, "ERASE(16)" },
+ /* 94 [usage proposed by SCSI Socket Services project] */
+ /* 95 [usage proposed by SCSI Socket Services project] */
+ /* 96 [usage proposed by SCSI Socket Services project] */
+ /* 97 [usage proposed by SCSI Socket Services project] */
+ /* 98 */
+ /* 99 */
+ /* 9A */
+ /* 9B */
+ /* 9C */
+ /* 9D */
+ /* XXX KDM ALL for this? op-num.txt defines it for none.. */
+ /* 9E SERVICE ACTION IN(16) */
+ { 0x9E, ALL, "SERVICE ACTION IN(16)" },
+ /* XXX KDM ALL for this? op-num.txt defines it for ADC.. */
+ /* 9F M SERVICE ACTION OUT(16) */
+ { 0x9F, ALL, "SERVICE ACTION OUT(16)" },
+ /* A0 MMOOO OMMM OMO REPORT LUNS */
+ { 0xA0, ALL & ~(R | B), "REPORT LUNS" },
+ /* A1 O BLANK */
+ { 0xA1, R, "BLANK" },
+ /* A1 O O ATA COMMAND PASS THROUGH(12) */
+ { 0xA1, D | B, "ATA COMMAND PASS THROUGH(12)" },
+ /* A2 OO O O SECURITY PROTOCOL IN */
+ { 0xA2, D | T | R | V, "SECURITY PROTOCOL IN" },
+ /* A3 OOO O OOMOOOM MAINTENANCE (IN) */
+ { 0xA3, ALL & ~(P | R | F), "MAINTENANCE (IN)" },
+ /* A3 O SEND KEY */
+ { 0xA3, R, "SEND KEY" },
+ /* A4 OOO O OOOOOOO MAINTENANCE (OUT) */
+ { 0xA4, ALL & ~(P | R | F), "MAINTENANCE (OUT)" },
+ /* A4 O REPORT KEY */
+ { 0xA4, R, "REPORT KEY" },
+ /* A5 O O OM MOVE MEDIUM */
+ { 0xA5, T | W | O | M, "MOVE MEDIUM" },
+ /* A5 O PLAY AUDIO(12) */
+ { 0xA5, R, "PLAY AUDIO(12)" },
+ /* A6 O EXCHANGE MEDIUM */
+ { 0xA6, M, "EXCHANGE MEDIUM" },
+ /* A6 O LOAD/UNLOAD C/DVD */
+ { 0xA6, R, "LOAD/UNLOAD C/DVD" },
+ /* A7 ZZ O O MOVE MEDIUM ATTACHED */
+ { 0xA7, D | T | W | O, "MOVE MEDIUM ATTACHED" },
+ /* A7 O SET READ AHEAD */
+ { 0xA7, R, "SET READ AHEAD" },
+ /* A8 O OOO READ(12) */
+ { 0xA8, D | W | R | O, "READ(12)" },
+ /* A8 GET MESSAGE(12) */
+ { 0xA8, C, "GET MESSAGE(12)" },
+ /* A9 O SERVICE ACTION OUT(12) */
+ { 0xA9, V, "SERVICE ACTION OUT(12)" },
+ /* AA O OOO WRITE(12) */
+ { 0xAA, D | W | R | O, "WRITE(12)" },
+ /* AA SEND MESSAGE(12) */
+ { 0xAA, C, "SEND MESSAGE(12)" },
+ /* AB O O SERVICE ACTION IN(12) */
+ { 0xAB, R | V, "SERVICE ACTION IN(12)" },
+ /* AC O ERASE(12) */
+ { 0xAC, O, "ERASE(12)" },
+ /* AC O GET PERFORMANCE */
+ { 0xAC, R, "GET PERFORMANCE" },
+ /* AD O READ DVD STRUCTURE */
+ { 0xAD, R, "READ DVD STRUCTURE" },
+ /* AE O O O WRITE AND VERIFY(12) */
+ { 0xAE, D | W | O, "WRITE AND VERIFY(12)" },
+ /* AF O OZO VERIFY(12) */
+ { 0xAF, D | W | R | O, "VERIFY(12)" },
+ /* B0 ZZZ SEARCH DATA HIGH(12) */
+ { 0xB0, W | R | O, "SEARCH DATA HIGH(12)" },
+ /* B1 ZZZ SEARCH DATA EQUAL(12) */
+ { 0xB1, W | R | O, "SEARCH DATA EQUAL(12)" },
+ /* B2 ZZZ SEARCH DATA LOW(12) */
+ { 0xB2, W | R | O, "SEARCH DATA LOW(12)" },
+ /* B3 Z OZO SET LIMITS(12) */
+ { 0xB3, D | W | R | O, "SET LIMITS(12)" },
+ /* B4 ZZ OZO READ ELEMENT STATUS ATTACHED */
+ { 0xB4, D | T | W | R | O, "READ ELEMENT STATUS ATTACHED" },
+ /* B5 OO O O SECURITY PROTOCOL OUT */
+ { 0xB5, D | T | R | V, "SECURITY PROTOCOL OUT" },
+ /* B5 O REQUEST VOLUME ELEMENT ADDRESS */
+ { 0xB5, M, "REQUEST VOLUME ELEMENT ADDRESS" },
+ /* B6 O SEND VOLUME TAG */
+ { 0xB6, M, "SEND VOLUME TAG" },
+ /* B6 O SET STREAMING */
+ { 0xB6, R, "SET STREAMING" },
+ /* B7 O O READ DEFECT DATA(12) */
+ { 0xB7, D | O, "READ DEFECT DATA(12)" },
+ /* B8 O OZOM READ ELEMENT STATUS */
+ { 0xB8, T | W | R | O | M, "READ ELEMENT STATUS" },
+ /* B9 O READ CD MSF */
+ { 0xB9, R, "READ CD MSF" },
+ /* BA O O OOMO REDUNDANCY GROUP (IN) */
+ { 0xBA, D | W | O | M | A | E, "REDUNDANCY GROUP (IN)" },
+ /* BA O SCAN */
+ { 0xBA, R, "SCAN" },
+ /* BB O O OOOO REDUNDANCY GROUP (OUT) */
+ { 0xBB, D | W | O | M | A | E, "REDUNDANCY GROUP (OUT)" },
+ /* BB O SET CD SPEED */
+ { 0xBB, R, "SET CD SPEED" },
+ /* BC O O OOMO SPARE (IN) */
+ { 0xBC, D | W | O | M | A | E, "SPARE (IN)" },
+ /* BD O O OOOO SPARE (OUT) */
+ { 0xBD, D | W | O | M | A | E, "SPARE (OUT)" },
+ /* BD O MECHANISM STATUS */
+ { 0xBD, R, "MECHANISM STATUS" },
+ /* BE O O OOMO VOLUME SET (IN) */
+ { 0xBE, D | W | O | M | A | E, "VOLUME SET (IN)" },
+ /* BE O READ CD */
+ { 0xBE, R, "READ CD" },
+ /* BF O O OOOO VOLUME SET (OUT) */
+ { 0xBF, D | W | O | M | A | E, "VOLUME SET (OUT)" },
+ /* BF O SEND DVD STRUCTURE */
+ { 0xBF, R, "SEND DVD STRUCTURE" }
+};
+
+const char *
+scsi_op_desc(u_int16_t opcode, struct scsi_inquiry_data *inq_data)
+{
+ caddr_t match;
+ int i, j;
+ u_int32_t opmask;
+ u_int16_t pd_type;
+ int num_ops[2];
+ struct op_table_entry *table[2];
+ int num_tables;
+
+ pd_type = SID_TYPE(inq_data);
+
+ match = cam_quirkmatch((caddr_t)inq_data,
+ (caddr_t)scsi_op_quirk_table,
+ sizeof(scsi_op_quirk_table)/
+ sizeof(*scsi_op_quirk_table),
+ sizeof(*scsi_op_quirk_table),
+ scsi_inquiry_match);
+
+ if (match != NULL) {
+ table[0] = ((struct scsi_op_quirk_entry *)match)->op_table;
+ num_ops[0] = ((struct scsi_op_quirk_entry *)match)->num_ops;
+ table[1] = scsi_op_codes;
+ num_ops[1] = sizeof(scsi_op_codes)/sizeof(scsi_op_codes[0]);
+ num_tables = 2;
+ } else {
+ /*
+ * If this is true, we have a vendor specific opcode that
+ * wasn't covered in the quirk table.
+ */
+ if ((opcode > 0xBF) || ((opcode > 0x5F) && (opcode < 0x80)))
+ return("Vendor Specific Command");
+
+ table[0] = scsi_op_codes;
+ num_ops[0] = sizeof(scsi_op_codes)/sizeof(scsi_op_codes[0]);
+ num_tables = 1;
+ }
+
+ /* RBC is 'Simplified' Direct Access Device */
+ if (pd_type == T_RBC)
+ pd_type = T_DIRECT;
+
+ opmask = 1 << pd_type;
+
+ for (j = 0; j < num_tables; j++) {
+ for (i = 0;i < num_ops[j] && table[j][i].opcode <= opcode; i++){
+ if ((table[j][i].opcode == opcode)
+ && ((table[j][i].opmask & opmask) != 0))
+ return(table[j][i].desc);
+ }
+ }
+
+ /*
+ * If we can't find a match for the command in the table, we just
+ * assume it's a vendor specifc command.
+ */
+ return("Vendor Specific Command");
+
+}
+
+#else /* SCSI_NO_OP_STRINGS */
+
+const char *
+scsi_op_desc(u_int16_t opcode, struct scsi_inquiry_data *inq_data)
+{
+ return("");
+}
+
+#endif
+
+
+#if !defined(SCSI_NO_SENSE_STRINGS)
+#define SST(asc, ascq, action, desc) \
+ asc, ascq, action, desc
+#else
+const char empty_string[] = "";
+
+#define SST(asc, ascq, action, desc) \
+ asc, ascq, action, empty_string
+#endif
+
+const struct sense_key_table_entry sense_key_table[] =
+{
+ { SSD_KEY_NO_SENSE, SS_NOP, "NO SENSE" },
+ { SSD_KEY_RECOVERED_ERROR, SS_NOP|SSQ_PRINT_SENSE, "RECOVERED ERROR" },
+ {
+ SSD_KEY_NOT_READY, SS_TUR|SSQ_MANY|SSQ_DECREMENT_COUNT|EBUSY,
+ "NOT READY"
+ },
+ { SSD_KEY_MEDIUM_ERROR, SS_RDEF, "MEDIUM ERROR" },
+ { SSD_KEY_HARDWARE_ERROR, SS_RDEF, "HARDWARE FAILURE" },
+ { SSD_KEY_ILLEGAL_REQUEST, SS_FATAL|EINVAL, "ILLEGAL REQUEST" },
+ { SSD_KEY_UNIT_ATTENTION, SS_FATAL|ENXIO, "UNIT ATTENTION" },
+ { SSD_KEY_DATA_PROTECT, SS_FATAL|EACCES, "DATA PROTECT" },
+ { SSD_KEY_BLANK_CHECK, SS_FATAL|ENOSPC, "BLANK CHECK" },
+ { SSD_KEY_Vendor_Specific, SS_FATAL|EIO, "Vendor Specific" },
+ { SSD_KEY_COPY_ABORTED, SS_FATAL|EIO, "COPY ABORTED" },
+ { SSD_KEY_ABORTED_COMMAND, SS_RDEF, "ABORTED COMMAND" },
+ { SSD_KEY_EQUAL, SS_NOP, "EQUAL" },
+ { SSD_KEY_VOLUME_OVERFLOW, SS_FATAL|EIO, "VOLUME OVERFLOW" },
+ { SSD_KEY_MISCOMPARE, SS_NOP, "MISCOMPARE" },
+ { SSD_KEY_RESERVED, SS_FATAL|EIO, "RESERVED" }
+};
+
+const int sense_key_table_size =
+ sizeof(sense_key_table)/sizeof(sense_key_table[0]);
+
+static struct asc_table_entry quantum_fireball_entries[] = {
+ { SST(0x04, 0x0b, SS_START | SSQ_DECREMENT_COUNT | ENXIO,
+ "Logical unit not ready, initializing cmd. required") }
+};
+
+static struct asc_table_entry sony_mo_entries[] = {
+ { SST(0x04, 0x00, SS_START | SSQ_DECREMENT_COUNT | ENXIO,
+ "Logical unit not ready, cause not reportable") }
+};
+
+static struct scsi_sense_quirk_entry sense_quirk_table[] = {
+ {
+ /*
+ * XXX The Quantum Fireball ST and SE like to return 0x04 0x0b
+ * when they really should return 0x04 0x02.
+ */
+ {T_DIRECT, SIP_MEDIA_FIXED, "QUANTUM", "FIREBALL S*", "*"},
+ /*num_sense_keys*/0,
+ sizeof(quantum_fireball_entries)/sizeof(struct asc_table_entry),
+ /*sense key entries*/NULL,
+ quantum_fireball_entries
+ },
+ {
+ /*
+ * This Sony MO drive likes to return 0x04, 0x00 when it
+ * isn't spun up.
+ */
+ {T_DIRECT, SIP_MEDIA_REMOVABLE, "SONY", "SMO-*", "*"},
+ /*num_sense_keys*/0,
+ sizeof(sony_mo_entries)/sizeof(struct asc_table_entry),
+ /*sense key entries*/NULL,
+ sony_mo_entries
+ }
+};
+
+const int sense_quirk_table_size =
+ sizeof(sense_quirk_table)/sizeof(sense_quirk_table[0]);
+
+static struct asc_table_entry asc_table[] = {
+ /*
+ * From: http://www.t10.org/lists/asc-num.txt
+ * Modifications by Jung-uk Kim (jkim@FreeBSD.org)
+ */
+ /*
+ * File: ASC-NUM.TXT
+ *
+ * SCSI ASC/ASCQ Assignments
+ * Numeric Sorted Listing
+ * as of 7/29/08
+ *
+ * D - DIRECT ACCESS DEVICE (SBC-2) device column key
+ * .T - SEQUENTIAL ACCESS DEVICE (SSC) -------------------
+ * . L - PRINTER DEVICE (SSC) blank = reserved
+ * . P - PROCESSOR DEVICE (SPC) not blank = allowed
+ * . .W - WRITE ONCE READ MULTIPLE DEVICE (SBC-2)
+ * . . R - CD DEVICE (MMC)
+ * . . O - OPTICAL MEMORY DEVICE (SBC-2)
+ * . . .M - MEDIA CHANGER DEVICE (SMC)
+ * . . . A - STORAGE ARRAY DEVICE (SCC)
+ * . . . E - ENCLOSURE SERVICES DEVICE (SES)
+ * . . . .B - SIMPLIFIED DIRECT-ACCESS DEVICE (RBC)
+ * . . . . K - OPTICAL CARD READER/WRITER DEVICE (OCRW)
+ * . . . . V - AUTOMATION/DRIVE INTERFACE (ADC)
+ * . . . . .F - OBJECT-BASED STORAGE (OSD)
+ * DTLPWROMAEBKVF
+ * ASC ASCQ Action
+ * Description
+ */
+ /* DTLPWROMAEBKVF */
+ { SST(0x00, 0x00, SS_NOP,
+ "No additional sense information") },
+ /* T */
+ { SST(0x00, 0x01, SS_RDEF,
+ "Filemark detected") },
+ /* T */
+ { SST(0x00, 0x02, SS_RDEF,
+ "End-of-partition/medium detected") },
+ /* T */
+ { SST(0x00, 0x03, SS_RDEF,
+ "Setmark detected") },
+ /* T */
+ { SST(0x00, 0x04, SS_RDEF,
+ "Beginning-of-partition/medium detected") },
+ /* TL */
+ { SST(0x00, 0x05, SS_RDEF,
+ "End-of-data detected") },
+ /* DTLPWROMAEBKVF */
+ { SST(0x00, 0x06, SS_RDEF,
+ "I/O process terminated") },
+ /* T */
+ { SST(0x00, 0x07, SS_RDEF, /* XXX TBD */
+ "Programmable early warning detected") },
+ /* R */
+ { SST(0x00, 0x11, SS_FATAL | EBUSY,
+ "Audio play operation in progress") },
+ /* R */
+ { SST(0x00, 0x12, SS_NOP,
+ "Audio play operation paused") },
+ /* R */
+ { SST(0x00, 0x13, SS_NOP,
+ "Audio play operation successfully completed") },
+ /* R */
+ { SST(0x00, 0x14, SS_RDEF,
+ "Audio play operation stopped due to error") },
+ /* R */
+ { SST(0x00, 0x15, SS_NOP,
+ "No current audio status to return") },
+ /* DTLPWROMAEBKVF */
+ { SST(0x00, 0x16, SS_FATAL | EBUSY,
+ "Operation in progress") },
+ /* DTL WROMAEBKVF */
+ { SST(0x00, 0x17, SS_RDEF,
+ "Cleaning requested") },
+ /* T */
+ { SST(0x00, 0x18, SS_RDEF, /* XXX TBD */
+ "Erase operation in progress") },
+ /* T */
+ { SST(0x00, 0x19, SS_RDEF, /* XXX TBD */
+ "Locate operation in progress") },
+ /* T */
+ { SST(0x00, 0x1A, SS_RDEF, /* XXX TBD */
+ "Rewind operation in progress") },
+ /* T */
+ { SST(0x00, 0x1B, SS_RDEF, /* XXX TBD */
+ "Set capacity operation in progress") },
+ /* T */
+ { SST(0x00, 0x1C, SS_RDEF, /* XXX TBD */
+ "Verify operation in progress") },
+ /* DT B */
+ { SST(0x00, 0x1D, SS_RDEF, /* XXX TBD */
+ "ATA pass through information available") },
+ /* DT R MAEBKV */
+ { SST(0x00, 0x1E, SS_RDEF, /* XXX TBD */
+ "Conflicting SA creation request") },
+ /* D W O BK */
+ { SST(0x01, 0x00, SS_RDEF,
+ "No index/sector signal") },
+ /* D WRO BK */
+ { SST(0x02, 0x00, SS_RDEF,
+ "No seek complete") },
+ /* DTL W O BK */
+ { SST(0x03, 0x00, SS_RDEF,
+ "Peripheral device write fault") },
+ /* T */
+ { SST(0x03, 0x01, SS_RDEF,
+ "No write current") },
+ /* T */
+ { SST(0x03, 0x02, SS_RDEF,
+ "Excessive write errors") },
+ /* DTLPWROMAEBKVF */
+ { SST(0x04, 0x00, SS_TUR | SSQ_MANY | SSQ_DECREMENT_COUNT | EIO,
+ "Logical unit not ready, cause not reportable") },
+ /* DTLPWROMAEBKVF */
+ { SST(0x04, 0x01, SS_TUR | SSQ_MANY | SSQ_DECREMENT_COUNT | EBUSY,
+ "Logical unit is in process of becoming ready") },
+ /* DTLPWROMAEBKVF */
+ { SST(0x04, 0x02, SS_START | SSQ_DECREMENT_COUNT | ENXIO,
+ "Logical unit not ready, initializing command required") },
+ /* DTLPWROMAEBKVF */
+ { SST(0x04, 0x03, SS_FATAL | ENXIO,
+ "Logical unit not ready, manual intervention required") },
+ /* DTL RO B */
+ { SST(0x04, 0x04, SS_FATAL | EBUSY,
+ "Logical unit not ready, format in progress") },
+ /* DT W O A BK F */
+ { SST(0x04, 0x05, SS_FATAL | EBUSY,
+ "Logical unit not ready, rebuild in progress") },
+ /* DT W O A BK */
+ { SST(0x04, 0x06, SS_FATAL | EBUSY,
+ "Logical unit not ready, recalculation in progress") },
+ /* DTLPWROMAEBKVF */
+ { SST(0x04, 0x07, SS_FATAL | EBUSY,
+ "Logical unit not ready, operation in progress") },
+ /* R */
+ { SST(0x04, 0x08, SS_FATAL | EBUSY,
+ "Logical unit not ready, long write in progress") },
+ /* DTLPWROMAEBKVF */
+ { SST(0x04, 0x09, SS_RDEF, /* XXX TBD */
+ "Logical unit not ready, self-test in progress") },
+ /* DTLPWROMAEBKVF */
+ { SST(0x04, 0x0A, SS_RDEF, /* XXX TBD */
+ "Logical unit not accessible, asymmetric access state transition")},
+ /* DTLPWROMAEBKVF */
+ { SST(0x04, 0x0B, SS_RDEF, /* XXX TBD */
+ "Logical unit not accessible, target port in standby state") },
+ /* DTLPWROMAEBKVF */
+ { SST(0x04, 0x0C, SS_RDEF, /* XXX TBD */
+ "Logical unit not accessible, target port in unavailable state") },
+ /* F */
+ { SST(0x04, 0x0D, SS_RDEF, /* XXX TBD */
+ "Logical unit not ready, structure check required") },
+ /* DT WROM B */
+ { SST(0x04, 0x10, SS_RDEF, /* XXX TBD */
+ "Logical unit not ready, auxiliary memory not accessible") },
+ /* DT WRO AEB VF */
+ { SST(0x04, 0x11, SS_RDEF, /* XXX TBD */
+ "Logical unit not ready, notify (enable spinup) required") },
+ /* M V */
+ { SST(0x04, 0x12, SS_RDEF, /* XXX TBD */
+ "Logical unit not ready, offline") },
+ /* DT R MAEBKV */
+ { SST(0x04, 0x13, SS_RDEF, /* XXX TBD */
+ "Logical unit not ready, SA creation in progress") },
+ /* DTL WROMAEBKVF */
+ { SST(0x05, 0x00, SS_RDEF,
+ "Logical unit does not respond to selection") },
+ /* D WROM BK */
+ { SST(0x06, 0x00, SS_RDEF,
+ "No reference position found") },
+ /* DTL WROM BK */
+ { SST(0x07, 0x00, SS_RDEF,
+ "Multiple peripheral devices selected") },
+ /* DTL WROMAEBKVF */
+ { SST(0x08, 0x00, SS_RDEF,
+ "Logical unit communication failure") },
+ /* DTL WROMAEBKVF */
+ { SST(0x08, 0x01, SS_RDEF,
+ "Logical unit communication time-out") },
+ /* DTL WROMAEBKVF */
+ { SST(0x08, 0x02, SS_RDEF,
+ "Logical unit communication parity error") },
+ /* DT ROM BK */
+ { SST(0x08, 0x03, SS_RDEF,
+ "Logical unit communication CRC error (Ultra-DMA/32)") },
+ /* DTLPWRO K */
+ { SST(0x08, 0x04, SS_RDEF, /* XXX TBD */
+ "Unreachable copy target") },
+ /* DT WRO B */
+ { SST(0x09, 0x00, SS_RDEF,
+ "Track following error") },
+ /* WRO K */
+ { SST(0x09, 0x01, SS_RDEF,
+ "Tracking servo failure") },
+ /* WRO K */
+ { SST(0x09, 0x02, SS_RDEF,
+ "Focus servo failure") },
+ /* WRO */
+ { SST(0x09, 0x03, SS_RDEF,
+ "Spindle servo failure") },
+ /* DT WRO B */
+ { SST(0x09, 0x04, SS_RDEF,
+ "Head select fault") },
+ /* DTLPWROMAEBKVF */
+ { SST(0x0A, 0x00, SS_FATAL | ENOSPC,
+ "Error log overflow") },
+ /* DTLPWROMAEBKVF */
+ { SST(0x0B, 0x00, SS_RDEF,
+ "Warning") },
+ /* DTLPWROMAEBKVF */
+ { SST(0x0B, 0x01, SS_RDEF,
+ "Warning - specified temperature exceeded") },
+ /* DTLPWROMAEBKVF */
+ { SST(0x0B, 0x02, SS_RDEF,
+ "Warning - enclosure degraded") },
+ /* DTLPWROMAEBKVF */
+ { SST(0x0B, 0x03, SS_RDEF, /* XXX TBD */
+ "Warning - background self-test failed") },
+ /* DTLPWRO AEBKVF */
+ { SST(0x0B, 0x04, SS_RDEF, /* XXX TBD */
+ "Warning - background pre-scan detected medium error") },
+ /* DTLPWRO AEBKVF */
+ { SST(0x0B, 0x05, SS_RDEF, /* XXX TBD */
+ "Warning - background medium scan detected medium error") },
+ /* DTLPWROMAEBKVF */
+ { SST(0x0B, 0x06, SS_RDEF, /* XXX TBD */
+ "Warning - non-volatile cache now volatile") },
+ /* DTLPWROMAEBKVF */
+ { SST(0x0B, 0x07, SS_RDEF, /* XXX TBD */
+ "Warning - degraded power to non-volatile cache") },
+ /* T R */
+ { SST(0x0C, 0x00, SS_RDEF,
+ "Write error") },
+ /* K */
+ { SST(0x0C, 0x01, SS_NOP | SSQ_PRINT_SENSE,
+ "Write error - recovered with auto reallocation") },
+ /* D W O BK */
+ { SST(0x0C, 0x02, SS_RDEF,
+ "Write error - auto reallocation failed") },
+ /* D W O BK */
+ { SST(0x0C, 0x03, SS_RDEF,
+ "Write error - recommend reassignment") },
+ /* DT W O B */
+ { SST(0x0C, 0x04, SS_RDEF,
+ "Compression check miscompare error") },
+ /* DT W O B */
+ { SST(0x0C, 0x05, SS_RDEF,
+ "Data expansion occurred during compression") },
+ /* DT W O B */
+ { SST(0x0C, 0x06, SS_RDEF,
+ "Block not compressible") },
+ /* R */
+ { SST(0x0C, 0x07, SS_RDEF,
+ "Write error - recovery needed") },
+ /* R */
+ { SST(0x0C, 0x08, SS_RDEF,
+ "Write error - recovery failed") },
+ /* R */
+ { SST(0x0C, 0x09, SS_RDEF,
+ "Write error - loss of streaming") },
+ /* R */
+ { SST(0x0C, 0x0A, SS_RDEF,
+ "Write error - padding blocks added") },
+ /* DT WROM B */
+ { SST(0x0C, 0x0B, SS_RDEF, /* XXX TBD */
+ "Auxiliary memory write error") },
+ /* DTLPWRO AEBKVF */
+ { SST(0x0C, 0x0C, SS_RDEF, /* XXX TBD */
+ "Write error - unexpected unsolicited data") },
+ /* DTLPWRO AEBKVF */
+ { SST(0x0C, 0x0D, SS_RDEF, /* XXX TBD */
+ "Write error - not enough unsolicited data") },
+ /* R */
+ { SST(0x0C, 0x0F, SS_RDEF, /* XXX TBD */
+ "Defects in error window") },
+ /* DTLPWRO A K */
+ { SST(0x0D, 0x00, SS_RDEF, /* XXX TBD */
+ "Error detected by third party temporary initiator") },
+ /* DTLPWRO A K */
+ { SST(0x0D, 0x01, SS_RDEF, /* XXX TBD */
+ "Third party device failure") },
+ /* DTLPWRO A K */
+ { SST(0x0D, 0x02, SS_RDEF, /* XXX TBD */
+ "Copy target device not reachable") },
+ /* DTLPWRO A K */
+ { SST(0x0D, 0x03, SS_RDEF, /* XXX TBD */
+ "Incorrect copy target device type") },
+ /* DTLPWRO A K */
+ { SST(0x0D, 0x04, SS_RDEF, /* XXX TBD */
+ "Copy target device data underrun") },
+ /* DTLPWRO A K */
+ { SST(0x0D, 0x05, SS_RDEF, /* XXX TBD */
+ "Copy target device data overrun") },
+ /* DT PWROMAEBK F */
+ { SST(0x0E, 0x00, SS_RDEF, /* XXX TBD */
+ "Invalid information unit") },
+ /* DT PWROMAEBK F */
+ { SST(0x0E, 0x01, SS_RDEF, /* XXX TBD */
+ "Information unit too short") },
+ /* DT PWROMAEBK F */
+ { SST(0x0E, 0x02, SS_RDEF, /* XXX TBD */
+ "Information unit too long") },
+ /* DT P R MAEBK F */
+ { SST(0x0E, 0x03, SS_RDEF, /* XXX TBD */
+ "Invalid field in command information unit") },
+ /* D W O BK */
+ { SST(0x10, 0x00, SS_RDEF,
+ "ID CRC or ECC error") },
+ /* DT W O */
+ { SST(0x10, 0x01, SS_RDEF, /* XXX TBD */
+ "Logical block guard check failed") },
+ /* DT W O */
+ { SST(0x10, 0x02, SS_RDEF, /* XXX TBD */
+ "Logical block application tag check failed") },
+ /* DT W O */
+ { SST(0x10, 0x03, SS_RDEF, /* XXX TBD */
+ "Logical block reference tag check failed") },
+ /* DT WRO BK */
+ { SST(0x11, 0x00, SS_RDEF,
+ "Unrecovered read error") },
+ /* DT WRO BK */
+ { SST(0x11, 0x01, SS_RDEF,
+ "Read retries exhausted") },
+ /* DT WRO BK */
+ { SST(0x11, 0x02, SS_RDEF,
+ "Error too long to correct") },
+ /* DT W O BK */
+ { SST(0x11, 0x03, SS_RDEF,
+ "Multiple read errors") },
+ /* D W O BK */
+ { SST(0x11, 0x04, SS_RDEF,
+ "Unrecovered read error - auto reallocate failed") },
+ /* WRO B */
+ { SST(0x11, 0x05, SS_RDEF,
+ "L-EC uncorrectable error") },
+ /* WRO B */
+ { SST(0x11, 0x06, SS_RDEF,
+ "CIRC unrecovered error") },
+ /* W O B */
+ { SST(0x11, 0x07, SS_RDEF,
+ "Data re-synchronization error") },
+ /* T */
+ { SST(0x11, 0x08, SS_RDEF,
+ "Incomplete block read") },
+ /* T */
+ { SST(0x11, 0x09, SS_RDEF,
+ "No gap found") },
+ /* DT O BK */
+ { SST(0x11, 0x0A, SS_RDEF,
+ "Miscorrected error") },
+ /* D W O BK */
+ { SST(0x11, 0x0B, SS_RDEF,
+ "Unrecovered read error - recommend reassignment") },
+ /* D W O BK */
+ { SST(0x11, 0x0C, SS_RDEF,
+ "Unrecovered read error - recommend rewrite the data") },
+ /* DT WRO B */
+ { SST(0x11, 0x0D, SS_RDEF,
+ "De-compression CRC error") },
+ /* DT WRO B */
+ { SST(0x11, 0x0E, SS_RDEF,
+ "Cannot decompress using declared algorithm") },
+ /* R */
+ { SST(0x11, 0x0F, SS_RDEF,
+ "Error reading UPC/EAN number") },
+ /* R */
+ { SST(0x11, 0x10, SS_RDEF,
+ "Error reading ISRC number") },
+ /* R */
+ { SST(0x11, 0x11, SS_RDEF,
+ "Read error - loss of streaming") },
+ /* DT WROM B */
+ { SST(0x11, 0x12, SS_RDEF, /* XXX TBD */
+ "Auxiliary memory read error") },
+ /* DTLPWRO AEBKVF */
+ { SST(0x11, 0x13, SS_RDEF, /* XXX TBD */
+ "Read error - failed retransmission request") },
+ /* D */
+ { SST(0x11, 0x14, SS_RDEF, /* XXX TBD */
+ "Read error - LBA marked bad by application client") },
+ /* D W O BK */
+ { SST(0x12, 0x00, SS_RDEF,
+ "Address mark not found for ID field") },
+ /* D W O BK */
+ { SST(0x13, 0x00, SS_RDEF,
+ "Address mark not found for data field") },
+ /* DTL WRO BK */
+ { SST(0x14, 0x00, SS_RDEF,
+ "Recorded entity not found") },
+ /* DT WRO BK */
+ { SST(0x14, 0x01, SS_RDEF,
+ "Record not found") },
+ /* T */
+ { SST(0x14, 0x02, SS_RDEF,
+ "Filemark or setmark not found") },
+ /* T */
+ { SST(0x14, 0x03, SS_RDEF,
+ "End-of-data not found") },
+ /* T */
+ { SST(0x14, 0x04, SS_RDEF,
+ "Block sequence error") },
+ /* DT W O BK */
+ { SST(0x14, 0x05, SS_RDEF,
+ "Record not found - recommend reassignment") },
+ /* DT W O BK */
+ { SST(0x14, 0x06, SS_RDEF,
+ "Record not found - data auto-reallocated") },
+ /* T */
+ { SST(0x14, 0x07, SS_RDEF, /* XXX TBD */
+ "Locate operation failure") },
+ /* DTL WROM BK */
+ { SST(0x15, 0x00, SS_RDEF,
+ "Random positioning error") },
+ /* DTL WROM BK */
+ { SST(0x15, 0x01, SS_RDEF,
+ "Mechanical positioning error") },
+ /* DT WRO BK */
+ { SST(0x15, 0x02, SS_RDEF,
+ "Positioning error detected by read of medium") },
+ /* D W O BK */
+ { SST(0x16, 0x00, SS_RDEF,
+ "Data synchronization mark error") },
+ /* D W O BK */
+ { SST(0x16, 0x01, SS_RDEF,
+ "Data sync error - data rewritten") },
+ /* D W O BK */
+ { SST(0x16, 0x02, SS_RDEF,
+ "Data sync error - recommend rewrite") },
+ /* D W O BK */
+ { SST(0x16, 0x03, SS_NOP | SSQ_PRINT_SENSE,
+ "Data sync error - data auto-reallocated") },
+ /* D W O BK */
+ { SST(0x16, 0x04, SS_RDEF,
+ "Data sync error - recommend reassignment") },
+ /* DT WRO BK */
+ { SST(0x17, 0x00, SS_NOP | SSQ_PRINT_SENSE,
+ "Recovered data with no error correction applied") },
+ /* DT WRO BK */
+ { SST(0x17, 0x01, SS_NOP | SSQ_PRINT_SENSE,
+ "Recovered data with retries") },
+ /* DT WRO BK */
+ { SST(0x17, 0x02, SS_NOP | SSQ_PRINT_SENSE,
+ "Recovered data with positive head offset") },
+ /* DT WRO BK */
+ { SST(0x17, 0x03, SS_NOP | SSQ_PRINT_SENSE,
+ "Recovered data with negative head offset") },
+ /* WRO B */
+ { SST(0x17, 0x04, SS_NOP | SSQ_PRINT_SENSE,
+ "Recovered data with retries and/or CIRC applied") },
+ /* D WRO BK */
+ { SST(0x17, 0x05, SS_NOP | SSQ_PRINT_SENSE,
+ "Recovered data using previous sector ID") },
+ /* D W O BK */
+ { SST(0x17, 0x06, SS_NOP | SSQ_PRINT_SENSE,
+ "Recovered data without ECC - data auto-reallocated") },
+ /* D WRO BK */
+ { SST(0x17, 0x07, SS_NOP | SSQ_PRINT_SENSE,
+ "Recovered data without ECC - recommend reassignment") },
+ /* D WRO BK */
+ { SST(0x17, 0x08, SS_NOP | SSQ_PRINT_SENSE,
+ "Recovered data without ECC - recommend rewrite") },
+ /* D WRO BK */
+ { SST(0x17, 0x09, SS_NOP | SSQ_PRINT_SENSE,
+ "Recovered data without ECC - data rewritten") },
+ /* DT WRO BK */
+ { SST(0x18, 0x00, SS_NOP | SSQ_PRINT_SENSE,
+ "Recovered data with error correction applied") },
+ /* D WRO BK */
+ { SST(0x18, 0x01, SS_NOP | SSQ_PRINT_SENSE,
+ "Recovered data with error corr. & retries applied") },
+ /* D WRO BK */
+ { SST(0x18, 0x02, SS_NOP | SSQ_PRINT_SENSE,
+ "Recovered data - data auto-reallocated") },
+ /* R */
+ { SST(0x18, 0x03, SS_NOP | SSQ_PRINT_SENSE,
+ "Recovered data with CIRC") },
+ /* R */
+ { SST(0x18, 0x04, SS_NOP | SSQ_PRINT_SENSE,
+ "Recovered data with L-EC") },
+ /* D WRO BK */
+ { SST(0x18, 0x05, SS_NOP | SSQ_PRINT_SENSE,
+ "Recovered data - recommend reassignment") },
+ /* D WRO BK */
+ { SST(0x18, 0x06, SS_NOP | SSQ_PRINT_SENSE,
+ "Recovered data - recommend rewrite") },
+ /* D W O BK */
+ { SST(0x18, 0x07, SS_NOP | SSQ_PRINT_SENSE,
+ "Recovered data with ECC - data rewritten") },
+ /* R */
+ { SST(0x18, 0x08, SS_RDEF, /* XXX TBD */
+ "Recovered data with linking") },
+ /* D O K */
+ { SST(0x19, 0x00, SS_RDEF,
+ "Defect list error") },
+ /* D O K */
+ { SST(0x19, 0x01, SS_RDEF,
+ "Defect list not available") },
+ /* D O K */
+ { SST(0x19, 0x02, SS_RDEF,
+ "Defect list error in primary list") },
+ /* D O K */
+ { SST(0x19, 0x03, SS_RDEF,
+ "Defect list error in grown list") },
+ /* DTLPWROMAEBKVF */
+ { SST(0x1A, 0x00, SS_RDEF,
+ "Parameter list length error") },
+ /* DTLPWROMAEBKVF */
+ { SST(0x1B, 0x00, SS_RDEF,
+ "Synchronous data transfer error") },
+ /* D O BK */
+ { SST(0x1C, 0x00, SS_RDEF,
+ "Defect list not found") },
+ /* D O BK */
+ { SST(0x1C, 0x01, SS_RDEF,
+ "Primary defect list not found") },
+ /* D O BK */
+ { SST(0x1C, 0x02, SS_RDEF,
+ "Grown defect list not found") },
+ /* DT WRO BK */
+ { SST(0x1D, 0x00, SS_FATAL,
+ "Miscompare during verify operation") },
+ /* D W O BK */
+ { SST(0x1E, 0x00, SS_NOP | SSQ_PRINT_SENSE,
+ "Recovered ID with ECC correction") },
+ /* D O K */
+ { SST(0x1F, 0x00, SS_RDEF,
+ "Partial defect list transfer") },
+ /* DTLPWROMAEBKVF */
+ { SST(0x20, 0x00, SS_FATAL | EINVAL,
+ "Invalid command operation code") },
+ /* DT PWROMAEBK */
+ { SST(0x20, 0x01, SS_RDEF, /* XXX TBD */
+ "Access denied - initiator pending-enrolled") },
+ /* DT PWROMAEBK */
+ { SST(0x20, 0x02, SS_RDEF, /* XXX TBD */
+ "Access denied - no access rights") },
+ /* DT PWROMAEBK */
+ { SST(0x20, 0x03, SS_RDEF, /* XXX TBD */
+ "Access denied - invalid mgmt ID key") },
+ /* T */
+ { SST(0x20, 0x04, SS_RDEF, /* XXX TBD */
+ "Illegal command while in write capable state") },
+ /* T */
+ { SST(0x20, 0x05, SS_RDEF, /* XXX TBD */
+ "Obsolete") },
+ /* T */
+ { SST(0x20, 0x06, SS_RDEF, /* XXX TBD */
+ "Illegal command while in explicit address mode") },
+ /* T */
+ { SST(0x20, 0x07, SS_RDEF, /* XXX TBD */
+ "Illegal command while in implicit address mode") },
+ /* DT PWROMAEBK */
+ { SST(0x20, 0x08, SS_RDEF, /* XXX TBD */
+ "Access denied - enrollment conflict") },
+ /* DT PWROMAEBK */
+ { SST(0x20, 0x09, SS_RDEF, /* XXX TBD */
+ "Access denied - invalid LU identifier") },
+ /* DT PWROMAEBK */
+ { SST(0x20, 0x0A, SS_RDEF, /* XXX TBD */
+ "Access denied - invalid proxy token") },
+ /* DT PWROMAEBK */
+ { SST(0x20, 0x0B, SS_RDEF, /* XXX TBD */
+ "Access denied - ACL LUN conflict") },
+ /* DT WRO BK */
+ { SST(0x21, 0x00, SS_FATAL | EINVAL,
+ "Logical block address out of range") },
+ /* DT WROM BK */
+ { SST(0x21, 0x01, SS_FATAL | EINVAL,
+ "Invalid element address") },
+ /* R */
+ { SST(0x21, 0x02, SS_RDEF, /* XXX TBD */
+ "Invalid address for write") },
+ /* R */
+ { SST(0x21, 0x03, SS_RDEF, /* XXX TBD */
+ "Invalid write crossing layer jump") },
+ /* D */
+ { SST(0x22, 0x00, SS_FATAL | EINVAL,
+ "Illegal function (use 20 00, 24 00, or 26 00)") },
+ /* DTLPWROMAEBKVF */
+ { SST(0x24, 0x00, SS_FATAL | EINVAL,
+ "Invalid field in CDB") },
+ /* DTLPWRO AEBKVF */
+ { SST(0x24, 0x01, SS_RDEF, /* XXX TBD */
+ "CDB decryption error") },
+ /* T */
+ { SST(0x24, 0x02, SS_RDEF, /* XXX TBD */
+ "Obsolete") },
+ /* T */
+ { SST(0x24, 0x03, SS_RDEF, /* XXX TBD */
+ "Obsolete") },
+ /* F */
+ { SST(0x24, 0x04, SS_RDEF, /* XXX TBD */
+ "Security audit value frozen") },
+ /* F */
+ { SST(0x24, 0x05, SS_RDEF, /* XXX TBD */
+ "Security working key frozen") },
+ /* F */
+ { SST(0x24, 0x06, SS_RDEF, /* XXX TBD */
+ "NONCE not unique") },
+ /* F */
+ { SST(0x24, 0x07, SS_RDEF, /* XXX TBD */
+ "NONCE timestamp out of range") },
+ /* DT R MAEBKV */
+ { SST(0x24, 0x08, SS_RDEF, /* XXX TBD */
+ "Invalid XCDB") },
+ /* DTLPWROMAEBKVF */
+ { SST(0x25, 0x00, SS_FATAL | ENXIO,
+ "Logical unit not supported") },
+ /* DTLPWROMAEBKVF */
+ { SST(0x26, 0x00, SS_FATAL | EINVAL,
+ "Invalid field in parameter list") },
+ /* DTLPWROMAEBKVF */
+ { SST(0x26, 0x01, SS_FATAL | EINVAL,
+ "Parameter not supported") },
+ /* DTLPWROMAEBKVF */
+ { SST(0x26, 0x02, SS_FATAL | EINVAL,
+ "Parameter value invalid") },
+ /* DTLPWROMAE K */
+ { SST(0x26, 0x03, SS_FATAL | EINVAL,
+ "Threshold parameters not supported") },
+ /* DTLPWROMAEBKVF */
+ { SST(0x26, 0x04, SS_FATAL | EINVAL,
+ "Invalid release of persistent reservation") },
+ /* DTLPWRO A BK */
+ { SST(0x26, 0x05, SS_RDEF, /* XXX TBD */
+ "Data decryption error") },
+ /* DTLPWRO K */
+ { SST(0x26, 0x06, SS_RDEF, /* XXX TBD */
+ "Too many target descriptors") },
+ /* DTLPWRO K */
+ { SST(0x26, 0x07, SS_RDEF, /* XXX TBD */
+ "Unsupported target descriptor type code") },
+ /* DTLPWRO K */
+ { SST(0x26, 0x08, SS_RDEF, /* XXX TBD */
+ "Too many segment descriptors") },
+ /* DTLPWRO K */
+ { SST(0x26, 0x09, SS_RDEF, /* XXX TBD */
+ "Unsupported segment descriptor type code") },
+ /* DTLPWRO K */
+ { SST(0x26, 0x0A, SS_RDEF, /* XXX TBD */
+ "Unexpected inexact segment") },
+ /* DTLPWRO K */
+ { SST(0x26, 0x0B, SS_RDEF, /* XXX TBD */
+ "Inline data length exceeded") },
+ /* DTLPWRO K */
+ { SST(0x26, 0x0C, SS_RDEF, /* XXX TBD */
+ "Invalid operation for copy source or destination") },
+ /* DTLPWRO K */
+ { SST(0x26, 0x0D, SS_RDEF, /* XXX TBD */
+ "Copy segment granularity violation") },
+ /* DT PWROMAEBK */
+ { SST(0x26, 0x0E, SS_RDEF, /* XXX TBD */
+ "Invalid parameter while port is enabled") },
+ /* F */
+ { SST(0x26, 0x0F, SS_RDEF, /* XXX TBD */
+ "Invalid data-out buffer integrity check value") },
+ /* T */
+ { SST(0x26, 0x10, SS_RDEF, /* XXX TBD */
+ "Data decryption key fail limit reached") },
+ /* T */
+ { SST(0x26, 0x11, SS_RDEF, /* XXX TBD */
+ "Incomplete key-associated data set") },
+ /* T */
+ { SST(0x26, 0x12, SS_RDEF, /* XXX TBD */
+ "Vendor specific key reference not found") },
+ /* DT WRO BK */
+ { SST(0x27, 0x00, SS_FATAL | EACCES,
+ "Write protected") },
+ /* DT WRO BK */
+ { SST(0x27, 0x01, SS_FATAL | EACCES,
+ "Hardware write protected") },
+ /* DT WRO BK */
+ { SST(0x27, 0x02, SS_FATAL | EACCES,
+ "Logical unit software write protected") },
+ /* T R */
+ { SST(0x27, 0x03, SS_FATAL | EACCES,
+ "Associated write protect") },
+ /* T R */
+ { SST(0x27, 0x04, SS_FATAL | EACCES,
+ "Persistent write protect") },
+ /* T R */
+ { SST(0x27, 0x05, SS_FATAL | EACCES,
+ "Permanent write protect") },
+ /* R F */
+ { SST(0x27, 0x06, SS_RDEF, /* XXX TBD */
+ "Conditional write protect") },
+ /* DTLPWROMAEBKVF */
+ { SST(0x28, 0x00, SS_FATAL | ENXIO,
+ "Not ready to ready change, medium may have changed") },
+ /* DT WROM B */
+ { SST(0x28, 0x01, SS_FATAL | ENXIO,
+ "Import or export element accessed") },
+ /* R */
+ { SST(0x28, 0x02, SS_RDEF, /* XXX TBD */
+ "Format-layer may have changed") },
+ /* M */
+ { SST(0x28, 0x03, SS_RDEF, /* XXX TBD */
+ "Import/export element accessed, medium changed") },
+ /*
+ * XXX JGibbs - All of these should use the same errno, but I don't
+ * think ENXIO is the correct choice. Should we borrow from
+ * the networking errnos? ECONNRESET anyone?
+ */
+ /* DTLPWROMAEBKVF */
+ { SST(0x29, 0x00, SS_FATAL | ENXIO,
+ "Power on, reset, or bus device reset occurred") },
+ /* DTLPWROMAEBKVF */
+ { SST(0x29, 0x01, SS_RDEF,
+ "Power on occurred") },
+ /* DTLPWROMAEBKVF */
+ { SST(0x29, 0x02, SS_RDEF,
+ "SCSI bus reset occurred") },
+ /* DTLPWROMAEBKVF */
+ { SST(0x29, 0x03, SS_RDEF,
+ "Bus device reset function occurred") },
+ /* DTLPWROMAEBKVF */
+ { SST(0x29, 0x04, SS_RDEF,
+ "Device internal reset") },
+ /* DTLPWROMAEBKVF */
+ { SST(0x29, 0x05, SS_RDEF,
+ "Transceiver mode changed to single-ended") },
+ /* DTLPWROMAEBKVF */
+ { SST(0x29, 0x06, SS_RDEF,
+ "Transceiver mode changed to LVD") },
+ /* DTLPWROMAEBKVF */
+ { SST(0x29, 0x07, SS_RDEF, /* XXX TBD */
+ "I_T nexus loss occurred") },
+ /* DTL WROMAEBKVF */
+ { SST(0x2A, 0x00, SS_RDEF,
+ "Parameters changed") },
+ /* DTL WROMAEBKVF */
+ { SST(0x2A, 0x01, SS_RDEF,
+ "Mode parameters changed") },
+ /* DTL WROMAE K */
+ { SST(0x2A, 0x02, SS_RDEF,
+ "Log parameters changed") },
+ /* DTLPWROMAE K */
+ { SST(0x2A, 0x03, SS_RDEF,
+ "Reservations preempted") },
+ /* DTLPWROMAE */
+ { SST(0x2A, 0x04, SS_RDEF, /* XXX TBD */
+ "Reservations released") },
+ /* DTLPWROMAE */
+ { SST(0x2A, 0x05, SS_RDEF, /* XXX TBD */
+ "Registrations preempted") },
+ /* DTLPWROMAEBKVF */
+ { SST(0x2A, 0x06, SS_RDEF, /* XXX TBD */
+ "Asymmetric access state changed") },
+ /* DTLPWROMAEBKVF */
+ { SST(0x2A, 0x07, SS_RDEF, /* XXX TBD */
+ "Implicit asymmetric access state transition failed") },
+ /* DT WROMAEBKVF */
+ { SST(0x2A, 0x08, SS_RDEF, /* XXX TBD */
+ "Priority changed") },
+ /* D */
+ { SST(0x2A, 0x09, SS_RDEF, /* XXX TBD */
+ "Capacity data has changed") },
+ /* DT */
+ { SST(0x2A, 0x0A, SS_RDEF, /* XXX TBD */
+ "Error history I_T nexus cleared") },
+ /* DT */
+ { SST(0x2A, 0x0B, SS_RDEF, /* XXX TBD */
+ "Error history snapshot released") },
+ /* F */
+ { SST(0x2A, 0x0C, SS_RDEF, /* XXX TBD */
+ "Error recovery attributes have changed") },
+ /* T */
+ { SST(0x2A, 0x0D, SS_RDEF, /* XXX TBD */
+ "Data encryption capabilities changed") },
+ /* DT M E V */
+ { SST(0x2A, 0x10, SS_RDEF, /* XXX TBD */
+ "Timestamp changed") },
+ /* T */
+ { SST(0x2A, 0x11, SS_RDEF, /* XXX TBD */
+ "Data encryption parameters changed by another I_T nexus") },
+ /* T */
+ { SST(0x2A, 0x12, SS_RDEF, /* XXX TBD */
+ "Data encryption parameters changed by vendor specific event") },
+ /* T */
+ { SST(0x2A, 0x13, SS_RDEF, /* XXX TBD */
+ "Data encryption key instance counter has changed") },
+ /* DT R MAEBKV */
+ { SST(0x2A, 0x14, SS_RDEF, /* XXX TBD */
+ "SA creation capabilities data has changed") },
+ /* DTLPWRO K */
+ { SST(0x2B, 0x00, SS_RDEF,
+ "Copy cannot execute since host cannot disconnect") },
+ /* DTLPWROMAEBKVF */
+ { SST(0x2C, 0x00, SS_RDEF,
+ "Command sequence error") },
+ /* */
+ { SST(0x2C, 0x01, SS_RDEF,
+ "Too many windows specified") },
+ /* */
+ { SST(0x2C, 0x02, SS_RDEF,
+ "Invalid combination of windows specified") },
+ /* R */
+ { SST(0x2C, 0x03, SS_RDEF,
+ "Current program area is not empty") },
+ /* R */
+ { SST(0x2C, 0x04, SS_RDEF,
+ "Current program area is empty") },
+ /* B */
+ { SST(0x2C, 0x05, SS_RDEF, /* XXX TBD */
+ "Illegal power condition request") },
+ /* R */
+ { SST(0x2C, 0x06, SS_RDEF, /* XXX TBD */
+ "Persistent prevent conflict") },
+ /* DTLPWROMAEBKVF */
+ { SST(0x2C, 0x07, SS_RDEF, /* XXX TBD */
+ "Previous busy status") },
+ /* DTLPWROMAEBKVF */
+ { SST(0x2C, 0x08, SS_RDEF, /* XXX TBD */
+ "Previous task set full status") },
+ /* DTLPWROM EBKVF */
+ { SST(0x2C, 0x09, SS_RDEF, /* XXX TBD */
+ "Previous reservation conflict status") },
+ /* F */
+ { SST(0x2C, 0x0A, SS_RDEF, /* XXX TBD */
+ "Partition or collection contains user objects") },
+ /* T */
+ { SST(0x2C, 0x0B, SS_RDEF, /* XXX TBD */
+ "Not reserved") },
+ /* T */
+ { SST(0x2D, 0x00, SS_RDEF,
+ "Overwrite error on update in place") },
+ /* R */
+ { SST(0x2E, 0x00, SS_RDEF, /* XXX TBD */
+ "Insufficient time for operation") },
+ /* DTLPWROMAEBKVF */
+ { SST(0x2F, 0x00, SS_RDEF,
+ "Commands cleared by another initiator") },
+ /* D */
+ { SST(0x2F, 0x01, SS_RDEF, /* XXX TBD */
+ "Commands cleared by power loss notification") },
+ /* DTLPWROMAEBKVF */
+ { SST(0x2F, 0x02, SS_RDEF, /* XXX TBD */
+ "Commands cleared by device server") },
+ /* DT WROM BK */
+ { SST(0x30, 0x00, SS_RDEF,
+ "Incompatible medium installed") },
+ /* DT WRO BK */
+ { SST(0x30, 0x01, SS_RDEF,
+ "Cannot read medium - unknown format") },
+ /* DT WRO BK */
+ { SST(0x30, 0x02, SS_RDEF,
+ "Cannot read medium - incompatible format") },
+ /* DT R K */
+ { SST(0x30, 0x03, SS_RDEF,
+ "Cleaning cartridge installed") },
+ /* DT WRO BK */
+ { SST(0x30, 0x04, SS_RDEF,
+ "Cannot write medium - unknown format") },
+ /* DT WRO BK */
+ { SST(0x30, 0x05, SS_RDEF,
+ "Cannot write medium - incompatible format") },
+ /* DT WRO B */
+ { SST(0x30, 0x06, SS_RDEF,
+ "Cannot format medium - incompatible medium") },
+ /* DTL WROMAEBKVF */
+ { SST(0x30, 0x07, SS_RDEF,
+ "Cleaning failure") },
+ /* R */
+ { SST(0x30, 0x08, SS_RDEF,
+ "Cannot write - application code mismatch") },
+ /* R */
+ { SST(0x30, 0x09, SS_RDEF,
+ "Current session not fixated for append") },
+ /* DT WRO AEBK */
+ { SST(0x30, 0x0A, SS_RDEF, /* XXX TBD */
+ "Cleaning request rejected") },
+ /* T */
+ { SST(0x30, 0x0C, SS_RDEF, /* XXX TBD */
+ "WORM medium - overwrite attempted") },
+ /* T */
+ { SST(0x30, 0x0D, SS_RDEF, /* XXX TBD */
+ "WORM medium - integrity check") },
+ /* R */
+ { SST(0x30, 0x10, SS_RDEF, /* XXX TBD */
+ "Medium not formatted") },
+ /* M */
+ { SST(0x30, 0x11, SS_RDEF, /* XXX TBD */
+ "Incompatible volume type") },
+ /* M */
+ { SST(0x30, 0x12, SS_RDEF, /* XXX TBD */
+ "Incompatible volume qualifier") },
+ /* DT WRO BK */
+ { SST(0x31, 0x00, SS_RDEF,
+ "Medium format corrupted") },
+ /* D L RO B */
+ { SST(0x31, 0x01, SS_RDEF,
+ "Format command failed") },
+ /* R */
+ { SST(0x31, 0x02, SS_RDEF, /* XXX TBD */
+ "Zoned formatting failed due to spare linking") },
+ /* D W O BK */
+ { SST(0x32, 0x00, SS_RDEF,
+ "No defect spare location available") },
+ /* D W O BK */
+ { SST(0x32, 0x01, SS_RDEF,
+ "Defect list update failure") },
+ /* T */
+ { SST(0x33, 0x00, SS_RDEF,
+ "Tape length error") },
+ /* DTLPWROMAEBKVF */
+ { SST(0x34, 0x00, SS_RDEF,
+ "Enclosure failure") },
+ /* DTLPWROMAEBKVF */
+ { SST(0x35, 0x00, SS_RDEF,
+ "Enclosure services failure") },
+ /* DTLPWROMAEBKVF */
+ { SST(0x35, 0x01, SS_RDEF,
+ "Unsupported enclosure function") },
+ /* DTLPWROMAEBKVF */
+ { SST(0x35, 0x02, SS_RDEF,
+ "Enclosure services unavailable") },
+ /* DTLPWROMAEBKVF */
+ { SST(0x35, 0x03, SS_RDEF,
+ "Enclosure services transfer failure") },
+ /* DTLPWROMAEBKVF */
+ { SST(0x35, 0x04, SS_RDEF,
+ "Enclosure services transfer refused") },
+ /* DTL WROMAEBKVF */
+ { SST(0x35, 0x05, SS_RDEF, /* XXX TBD */
+ "Enclosure services checksum error") },
+ /* L */
+ { SST(0x36, 0x00, SS_RDEF,
+ "Ribbon, ink, or toner failure") },
+ /* DTL WROMAEBKVF */
+ { SST(0x37, 0x00, SS_RDEF,
+ "Rounded parameter") },
+ /* B */
+ { SST(0x38, 0x00, SS_RDEF, /* XXX TBD */
+ "Event status notification") },
+ /* B */
+ { SST(0x38, 0x02, SS_RDEF, /* XXX TBD */
+ "ESN - power management class event") },
+ /* B */
+ { SST(0x38, 0x04, SS_RDEF, /* XXX TBD */
+ "ESN - media class event") },
+ /* B */
+ { SST(0x38, 0x06, SS_RDEF, /* XXX TBD */
+ "ESN - device busy class event") },
+ /* DTL WROMAE K */
+ { SST(0x39, 0x00, SS_RDEF,
+ "Saving parameters not supported") },
+ /* DTL WROM BK */
+ { SST(0x3A, 0x00, SS_FATAL | ENXIO,
+ "Medium not present") },
+ /* DT WROM BK */
+ { SST(0x3A, 0x01, SS_FATAL | ENXIO,
+ "Medium not present - tray closed") },
+ /* DT WROM BK */
+ { SST(0x3A, 0x02, SS_FATAL | ENXIO,
+ "Medium not present - tray open") },
+ /* DT WROM B */
+ { SST(0x3A, 0x03, SS_RDEF, /* XXX TBD */
+ "Medium not present - loadable") },
+ /* DT WRO B */
+ { SST(0x3A, 0x04, SS_RDEF, /* XXX TBD */
+ "Medium not present - medium auxiliary memory accessible") },
+ /* TL */
+ { SST(0x3B, 0x00, SS_RDEF,
+ "Sequential positioning error") },
+ /* T */
+ { SST(0x3B, 0x01, SS_RDEF,
+ "Tape position error at beginning-of-medium") },
+ /* T */
+ { SST(0x3B, 0x02, SS_RDEF,
+ "Tape position error at end-of-medium") },
+ /* L */
+ { SST(0x3B, 0x03, SS_RDEF,
+ "Tape or electronic vertical forms unit not ready") },
+ /* L */
+ { SST(0x3B, 0x04, SS_RDEF,
+ "Slew failure") },
+ /* L */
+ { SST(0x3B, 0x05, SS_RDEF,
+ "Paper jam") },
+ /* L */
+ { SST(0x3B, 0x06, SS_RDEF,
+ "Failed to sense top-of-form") },
+ /* L */
+ { SST(0x3B, 0x07, SS_RDEF,
+ "Failed to sense bottom-of-form") },
+ /* T */
+ { SST(0x3B, 0x08, SS_RDEF,
+ "Reposition error") },
+ /* */
+ { SST(0x3B, 0x09, SS_RDEF,
+ "Read past end of medium") },
+ /* */
+ { SST(0x3B, 0x0A, SS_RDEF,
+ "Read past beginning of medium") },
+ /* */
+ { SST(0x3B, 0x0B, SS_RDEF,
+ "Position past end of medium") },
+ /* T */
+ { SST(0x3B, 0x0C, SS_RDEF,
+ "Position past beginning of medium") },
+ /* DT WROM BK */
+ { SST(0x3B, 0x0D, SS_FATAL | ENOSPC,
+ "Medium destination element full") },
+ /* DT WROM BK */
+ { SST(0x3B, 0x0E, SS_RDEF,
+ "Medium source element empty") },
+ /* R */
+ { SST(0x3B, 0x0F, SS_RDEF,
+ "End of medium reached") },
+ /* DT WROM BK */
+ { SST(0x3B, 0x11, SS_RDEF,
+ "Medium magazine not accessible") },
+ /* DT WROM BK */
+ { SST(0x3B, 0x12, SS_RDEF,
+ "Medium magazine removed") },
+ /* DT WROM BK */
+ { SST(0x3B, 0x13, SS_RDEF,
+ "Medium magazine inserted") },
+ /* DT WROM BK */
+ { SST(0x3B, 0x14, SS_RDEF,
+ "Medium magazine locked") },
+ /* DT WROM BK */
+ { SST(0x3B, 0x15, SS_RDEF,
+ "Medium magazine unlocked") },
+ /* R */
+ { SST(0x3B, 0x16, SS_RDEF, /* XXX TBD */
+ "Mechanical positioning or changer error") },
+ /* F */
+ { SST(0x3B, 0x17, SS_RDEF, /* XXX TBD */
+ "Read past end of user object") },
+ /* M */
+ { SST(0x3B, 0x18, SS_RDEF, /* XXX TBD */
+ "Element disabled") },
+ /* M */
+ { SST(0x3B, 0x19, SS_RDEF, /* XXX TBD */
+ "Element enabled") },
+ /* M */
+ { SST(0x3B, 0x1A, SS_RDEF, /* XXX TBD */
+ "Data transfer device removed") },
+ /* M */
+ { SST(0x3B, 0x1B, SS_RDEF, /* XXX TBD */
+ "Data transfer device inserted") },
+ /* DTLPWROMAE K */
+ { SST(0x3D, 0x00, SS_RDEF,
+ "Invalid bits in IDENTIFY message") },
+ /* DTLPWROMAEBKVF */
+ { SST(0x3E, 0x00, SS_RDEF,
+ "Logical unit has not self-configured yet") },
+ /* DTLPWROMAEBKVF */
+ { SST(0x3E, 0x01, SS_RDEF,
+ "Logical unit failure") },
+ /* DTLPWROMAEBKVF */
+ { SST(0x3E, 0x02, SS_RDEF,
+ "Timeout on logical unit") },
+ /* DTLPWROMAEBKVF */
+ { SST(0x3E, 0x03, SS_RDEF, /* XXX TBD */
+ "Logical unit failed self-test") },
+ /* DTLPWROMAEBKVF */
+ { SST(0x3E, 0x04, SS_RDEF, /* XXX TBD */
+ "Logical unit unable to update self-test log") },
+ /* DTLPWROMAEBKVF */
+ { SST(0x3F, 0x00, SS_RDEF,
+ "Target operating conditions have changed") },
+ /* DTLPWROMAEBKVF */
+ { SST(0x3F, 0x01, SS_RDEF,
+ "Microcode has been changed") },
+ /* DTLPWROM BK */
+ { SST(0x3F, 0x02, SS_RDEF,
+ "Changed operating definition") },
+ /* DTLPWROMAEBKVF */
+ { SST(0x3F, 0x03, SS_RDEF,
+ "INQUIRY data has changed") },
+ /* DT WROMAEBK */
+ { SST(0x3F, 0x04, SS_RDEF,
+ "Component device attached") },
+ /* DT WROMAEBK */
+ { SST(0x3F, 0x05, SS_RDEF,
+ "Device identifier changed") },
+ /* DT WROMAEB */
+ { SST(0x3F, 0x06, SS_RDEF,
+ "Redundancy group created or modified") },
+ /* DT WROMAEB */
+ { SST(0x3F, 0x07, SS_RDEF,
+ "Redundancy group deleted") },
+ /* DT WROMAEB */
+ { SST(0x3F, 0x08, SS_RDEF,
+ "Spare created or modified") },
+ /* DT WROMAEB */
+ { SST(0x3F, 0x09, SS_RDEF,
+ "Spare deleted") },
+ /* DT WROMAEBK */
+ { SST(0x3F, 0x0A, SS_RDEF,
+ "Volume set created or modified") },
+ /* DT WROMAEBK */
+ { SST(0x3F, 0x0B, SS_RDEF,
+ "Volume set deleted") },
+ /* DT WROMAEBK */
+ { SST(0x3F, 0x0C, SS_RDEF,
+ "Volume set deassigned") },
+ /* DT WROMAEBK */
+ { SST(0x3F, 0x0D, SS_RDEF,
+ "Volume set reassigned") },
+ /* DTLPWROMAE */
+ { SST(0x3F, 0x0E, SS_RDEF, /* XXX TBD */
+ "Reported LUNs data has changed") },
+ /* DTLPWROMAEBKVF */
+ { SST(0x3F, 0x0F, SS_RDEF, /* XXX TBD */
+ "Echo buffer overwritten") },
+ /* DT WROM B */
+ { SST(0x3F, 0x10, SS_RDEF, /* XXX TBD */
+ "Medium loadable") },
+ /* DT WROM B */
+ { SST(0x3F, 0x11, SS_RDEF, /* XXX TBD */
+ "Medium auxiliary memory accessible") },
+ /* DTLPWR MAEBK F */
+ { SST(0x3F, 0x12, SS_RDEF, /* XXX TBD */
+ "iSCSI IP address added") },
+ /* DTLPWR MAEBK F */
+ { SST(0x3F, 0x13, SS_RDEF, /* XXX TBD */
+ "iSCSI IP address removed") },
+ /* DTLPWR MAEBK F */
+ { SST(0x3F, 0x14, SS_RDEF, /* XXX TBD */
+ "iSCSI IP address changed") },
+ /* D */
+ { SST(0x40, 0x00, SS_RDEF,
+ "RAM failure") }, /* deprecated - use 40 NN instead */
+ /* DTLPWROMAEBKVF */
+ { SST(0x40, 0x80, SS_RDEF,
+ "Diagnostic failure: ASCQ = Component ID") },
+ /* DTLPWROMAEBKVF */
+ { SST(0x40, 0xFF, SS_RDEF | SSQ_RANGE,
+ NULL) }, /* Range 0x80->0xFF */
+ /* D */
+ { SST(0x41, 0x00, SS_RDEF,
+ "Data path failure") }, /* deprecated - use 40 NN instead */
+ /* D */
+ { SST(0x42, 0x00, SS_RDEF,
+ "Power-on or self-test failure") },
+ /* deprecated - use 40 NN instead */
+ /* DTLPWROMAEBKVF */
+ { SST(0x43, 0x00, SS_RDEF,
+ "Message error") },
+ /* DTLPWROMAEBKVF */
+ { SST(0x44, 0x00, SS_RDEF,
+ "Internal target failure") },
+ /* DT B */
+ { SST(0x44, 0x71, SS_RDEF, /* XXX TBD */
+ "ATA device failed set features") },
+ /* DTLPWROMAEBKVF */
+ { SST(0x45, 0x00, SS_RDEF,
+ "Select or reselect failure") },
+ /* DTLPWROM BK */
+ { SST(0x46, 0x00, SS_RDEF,
+ "Unsuccessful soft reset") },
+ /* DTLPWROMAEBKVF */
+ { SST(0x47, 0x00, SS_RDEF,
+ "SCSI parity error") },
+ /* DTLPWROMAEBKVF */
+ { SST(0x47, 0x01, SS_RDEF, /* XXX TBD */
+ "Data phase CRC error detected") },
+ /* DTLPWROMAEBKVF */
+ { SST(0x47, 0x02, SS_RDEF, /* XXX TBD */
+ "SCSI parity error detected during ST data phase") },
+ /* DTLPWROMAEBKVF */
+ { SST(0x47, 0x03, SS_RDEF, /* XXX TBD */
+ "Information unit iuCRC error detected") },
+ /* DTLPWROMAEBKVF */
+ { SST(0x47, 0x04, SS_RDEF, /* XXX TBD */
+ "Asynchronous information protection error detected") },
+ /* DTLPWROMAEBKVF */
+ { SST(0x47, 0x05, SS_RDEF, /* XXX TBD */
+ "Protocol service CRC error") },
+ /* DT MAEBKVF */
+ { SST(0x47, 0x06, SS_RDEF, /* XXX TBD */
+ "PHY test function in progress") },
+ /* DT PWROMAEBK */
+ { SST(0x47, 0x7F, SS_RDEF, /* XXX TBD */
+ "Some commands cleared by iSCSI protocol event") },
+ /* DTLPWROMAEBKVF */
+ { SST(0x48, 0x00, SS_RDEF,
+ "Initiator detected error message received") },
+ /* DTLPWROMAEBKVF */
+ { SST(0x49, 0x00, SS_RDEF,
+ "Invalid message error") },
+ /* DTLPWROMAEBKVF */
+ { SST(0x4A, 0x00, SS_RDEF,
+ "Command phase error") },
+ /* DTLPWROMAEBKVF */
+ { SST(0x4B, 0x00, SS_RDEF,
+ "Data phase error") },
+ /* DT PWROMAEBK */
+ { SST(0x4B, 0x01, SS_RDEF, /* XXX TBD */
+ "Invalid target port transfer tag received") },
+ /* DT PWROMAEBK */
+ { SST(0x4B, 0x02, SS_RDEF, /* XXX TBD */
+ "Too much write data") },
+ /* DT PWROMAEBK */
+ { SST(0x4B, 0x03, SS_RDEF, /* XXX TBD */
+ "ACK/NAK timeout") },
+ /* DT PWROMAEBK */
+ { SST(0x4B, 0x04, SS_RDEF, /* XXX TBD */
+ "NAK received") },
+ /* DT PWROMAEBK */
+ { SST(0x4B, 0x05, SS_RDEF, /* XXX TBD */
+ "Data offset error") },
+ /* DT PWROMAEBK */
+ { SST(0x4B, 0x06, SS_RDEF, /* XXX TBD */
+ "Initiator response timeout") },
+ /* DTLPWROMAEBKVF */
+ { SST(0x4C, 0x00, SS_RDEF,
+ "Logical unit failed self-configuration") },
+ /* DTLPWROMAEBKVF */
+ { SST(0x4D, 0x00, SS_RDEF,
+ "Tagged overlapped commands: ASCQ = Queue tag ID") },
+ /* DTLPWROMAEBKVF */
+ { SST(0x4D, 0xFF, SS_RDEF | SSQ_RANGE,
+ NULL) }, /* Range 0x00->0xFF */
+ /* DTLPWROMAEBKVF */
+ { SST(0x4E, 0x00, SS_RDEF,
+ "Overlapped commands attempted") },
+ /* T */
+ { SST(0x50, 0x00, SS_RDEF,
+ "Write append error") },
+ /* T */
+ { SST(0x50, 0x01, SS_RDEF,
+ "Write append position error") },
+ /* T */
+ { SST(0x50, 0x02, SS_RDEF,
+ "Position error related to timing") },
+ /* T RO */
+ { SST(0x51, 0x00, SS_RDEF,
+ "Erase failure") },
+ /* R */
+ { SST(0x51, 0x01, SS_RDEF, /* XXX TBD */
+ "Erase failure - incomplete erase operation detected") },
+ /* T */
+ { SST(0x52, 0x00, SS_RDEF,
+ "Cartridge fault") },
+ /* DTL WROM BK */
+ { SST(0x53, 0x00, SS_RDEF,
+ "Media load or eject failed") },
+ /* T */
+ { SST(0x53, 0x01, SS_RDEF,
+ "Unload tape failure") },
+ /* DT WROM BK */
+ { SST(0x53, 0x02, SS_RDEF,
+ "Medium removal prevented") },
+ /* M */
+ { SST(0x53, 0x03, SS_RDEF, /* XXX TBD */
+ "Medium removal prevented by data transfer element") },
+ /* T */
+ { SST(0x53, 0x04, SS_RDEF, /* XXX TBD */
+ "Medium thread or unthread failure") },
+ /* P */
+ { SST(0x54, 0x00, SS_RDEF,
+ "SCSI to host system interface failure") },
+ /* P */
+ { SST(0x55, 0x00, SS_RDEF,
+ "System resource failure") },
+ /* D O BK */
+ { SST(0x55, 0x01, SS_FATAL | ENOSPC,
+ "System buffer full") },
+ /* DTLPWROMAE K */
+ { SST(0x55, 0x02, SS_RDEF, /* XXX TBD */
+ "Insufficient reservation resources") },
+ /* DTLPWROMAE K */
+ { SST(0x55, 0x03, SS_RDEF, /* XXX TBD */
+ "Insufficient resources") },
+ /* DTLPWROMAE K */
+ { SST(0x55, 0x04, SS_RDEF, /* XXX TBD */
+ "Insufficient registration resources") },
+ /* DT PWROMAEBK */
+ { SST(0x55, 0x05, SS_RDEF, /* XXX TBD */
+ "Insufficient access control resources") },
+ /* DT WROM B */
+ { SST(0x55, 0x06, SS_RDEF, /* XXX TBD */
+ "Auxiliary memory out of space") },
+ /* F */
+ { SST(0x55, 0x07, SS_RDEF, /* XXX TBD */
+ "Quota error") },
+ /* T */
+ { SST(0x55, 0x08, SS_RDEF, /* XXX TBD */
+ "Maximum number of supplemental decryption keys exceeded") },
+ /* M */
+ { SST(0x55, 0x09, SS_RDEF, /* XXX TBD */
+ "Medium auxiliary memory not accessible") },
+ /* M */
+ { SST(0x55, 0x0A, SS_RDEF, /* XXX TBD */
+ "Data currently unavailable") },
+ /* R */
+ { SST(0x57, 0x00, SS_RDEF,
+ "Unable to recover table-of-contents") },
+ /* O */
+ { SST(0x58, 0x00, SS_RDEF,
+ "Generation does not exist") },
+ /* O */
+ { SST(0x59, 0x00, SS_RDEF,
+ "Updated block read") },
+ /* DTLPWRO BK */
+ { SST(0x5A, 0x00, SS_RDEF,
+ "Operator request or state change input") },
+ /* DT WROM BK */
+ { SST(0x5A, 0x01, SS_RDEF,
+ "Operator medium removal request") },
+ /* DT WRO A BK */
+ { SST(0x5A, 0x02, SS_RDEF,
+ "Operator selected write protect") },
+ /* DT WRO A BK */
+ { SST(0x5A, 0x03, SS_RDEF,
+ "Operator selected write permit") },
+ /* DTLPWROM K */
+ { SST(0x5B, 0x00, SS_RDEF,
+ "Log exception") },
+ /* DTLPWROM K */
+ { SST(0x5B, 0x01, SS_RDEF,
+ "Threshold condition met") },
+ /* DTLPWROM K */
+ { SST(0x5B, 0x02, SS_RDEF,
+ "Log counter at maximum") },
+ /* DTLPWROM K */
+ { SST(0x5B, 0x03, SS_RDEF,
+ "Log list codes exhausted") },
+ /* D O */
+ { SST(0x5C, 0x00, SS_RDEF,
+ "RPL status change") },
+ /* D O */
+ { SST(0x5C, 0x01, SS_NOP | SSQ_PRINT_SENSE,
+ "Spindles synchronized") },
+ /* D O */
+ { SST(0x5C, 0x02, SS_RDEF,
+ "Spindles not synchronized") },
+ /* DTLPWROMAEBKVF */
+ { SST(0x5D, 0x00, SS_RDEF,
+ "Failure prediction threshold exceeded") },
+ /* R B */
+ { SST(0x5D, 0x01, SS_RDEF, /* XXX TBD */
+ "Media failure prediction threshold exceeded") },
+ /* R */
+ { SST(0x5D, 0x02, SS_RDEF, /* XXX TBD */
+ "Logical unit failure prediction threshold exceeded") },
+ /* R */
+ { SST(0x5D, 0x03, SS_RDEF, /* XXX TBD */
+ "Spare area exhaustion prediction threshold exceeded") },
+ /* D B */
+ { SST(0x5D, 0x10, SS_RDEF, /* XXX TBD */
+ "Hardware impending failure general hard drive failure") },
+ /* D B */
+ { SST(0x5D, 0x11, SS_RDEF, /* XXX TBD */
+ "Hardware impending failure drive error rate too high") },
+ /* D B */
+ { SST(0x5D, 0x12, SS_RDEF, /* XXX TBD */
+ "Hardware impending failure data error rate too high") },
+ /* D B */
+ { SST(0x5D, 0x13, SS_RDEF, /* XXX TBD */
+ "Hardware impending failure seek error rate too high") },
+ /* D B */
+ { SST(0x5D, 0x14, SS_RDEF, /* XXX TBD */
+ "Hardware impending failure too many block reassigns") },
+ /* D B */
+ { SST(0x5D, 0x15, SS_RDEF, /* XXX TBD */
+ "Hardware impending failure access times too high") },
+ /* D B */
+ { SST(0x5D, 0x16, SS_RDEF, /* XXX TBD */
+ "Hardware impending failure start unit times too high") },
+ /* D B */
+ { SST(0x5D, 0x17, SS_RDEF, /* XXX TBD */
+ "Hardware impending failure channel parametrics") },
+ /* D B */
+ { SST(0x5D, 0x18, SS_RDEF, /* XXX TBD */
+ "Hardware impending failure controller detected") },
+ /* D B */
+ { SST(0x5D, 0x19, SS_RDEF, /* XXX TBD */
+ "Hardware impending failure throughput performance") },
+ /* D B */
+ { SST(0x5D, 0x1A, SS_RDEF, /* XXX TBD */
+ "Hardware impending failure seek time performance") },
+ /* D B */
+ { SST(0x5D, 0x1B, SS_RDEF, /* XXX TBD */
+ "Hardware impending failure spin-up retry count") },
+ /* D B */
+ { SST(0x5D, 0x1C, SS_RDEF, /* XXX TBD */
+ "Hardware impending failure drive calibration retry count") },
+ /* D B */
+ { SST(0x5D, 0x20, SS_RDEF, /* XXX TBD */
+ "Controller impending failure general hard drive failure") },
+ /* D B */
+ { SST(0x5D, 0x21, SS_RDEF, /* XXX TBD */
+ "Controller impending failure drive error rate too high") },
+ /* D B */
+ { SST(0x5D, 0x22, SS_RDEF, /* XXX TBD */
+ "Controller impending failure data error rate too high") },
+ /* D B */
+ { SST(0x5D, 0x23, SS_RDEF, /* XXX TBD */
+ "Controller impending failure seek error rate too high") },
+ /* D B */
+ { SST(0x5D, 0x24, SS_RDEF, /* XXX TBD */
+ "Controller impending failure too many block reassigns") },
+ /* D B */
+ { SST(0x5D, 0x25, SS_RDEF, /* XXX TBD */
+ "Controller impending failure access times too high") },
+ /* D B */
+ { SST(0x5D, 0x26, SS_RDEF, /* XXX TBD */
+ "Controller impending failure start unit times too high") },
+ /* D B */
+ { SST(0x5D, 0x27, SS_RDEF, /* XXX TBD */
+ "Controller impending failure channel parametrics") },
+ /* D B */
+ { SST(0x5D, 0x28, SS_RDEF, /* XXX TBD */
+ "Controller impending failure controller detected") },
+ /* D B */
+ { SST(0x5D, 0x29, SS_RDEF, /* XXX TBD */
+ "Controller impending failure throughput performance") },
+ /* D B */
+ { SST(0x5D, 0x2A, SS_RDEF, /* XXX TBD */
+ "Controller impending failure seek time performance") },
+ /* D B */
+ { SST(0x5D, 0x2B, SS_RDEF, /* XXX TBD */
+ "Controller impending failure spin-up retry count") },
+ /* D B */
+ { SST(0x5D, 0x2C, SS_RDEF, /* XXX TBD */
+ "Controller impending failure drive calibration retry count") },
+ /* D B */
+ { SST(0x5D, 0x30, SS_RDEF, /* XXX TBD */
+ "Data channel impending failure general hard drive failure") },
+ /* D B */
+ { SST(0x5D, 0x31, SS_RDEF, /* XXX TBD */
+ "Data channel impending failure drive error rate too high") },
+ /* D B */
+ { SST(0x5D, 0x32, SS_RDEF, /* XXX TBD */
+ "Data channel impending failure data error rate too high") },
+ /* D B */
+ { SST(0x5D, 0x33, SS_RDEF, /* XXX TBD */
+ "Data channel impending failure seek error rate too high") },
+ /* D B */
+ { SST(0x5D, 0x34, SS_RDEF, /* XXX TBD */
+ "Data channel impending failure too many block reassigns") },
+ /* D B */
+ { SST(0x5D, 0x35, SS_RDEF, /* XXX TBD */
+ "Data channel impending failure access times too high") },
+ /* D B */
+ { SST(0x5D, 0x36, SS_RDEF, /* XXX TBD */
+ "Data channel impending failure start unit times too high") },
+ /* D B */
+ { SST(0x5D, 0x37, SS_RDEF, /* XXX TBD */
+ "Data channel impending failure channel parametrics") },
+ /* D B */
+ { SST(0x5D, 0x38, SS_RDEF, /* XXX TBD */
+ "Data channel impending failure controller detected") },
+ /* D B */
+ { SST(0x5D, 0x39, SS_RDEF, /* XXX TBD */
+ "Data channel impending failure throughput performance") },
+ /* D B */
+ { SST(0x5D, 0x3A, SS_RDEF, /* XXX TBD */
+ "Data channel impending failure seek time performance") },
+ /* D B */
+ { SST(0x5D, 0x3B, SS_RDEF, /* XXX TBD */
+ "Data channel impending failure spin-up retry count") },
+ /* D B */
+ { SST(0x5D, 0x3C, SS_RDEF, /* XXX TBD */
+ "Data channel impending failure drive calibration retry count") },
+ /* D B */
+ { SST(0x5D, 0x40, SS_RDEF, /* XXX TBD */
+ "Servo impending failure general hard drive failure") },
+ /* D B */
+ { SST(0x5D, 0x41, SS_RDEF, /* XXX TBD */
+ "Servo impending failure drive error rate too high") },
+ /* D B */
+ { SST(0x5D, 0x42, SS_RDEF, /* XXX TBD */
+ "Servo impending failure data error rate too high") },
+ /* D B */
+ { SST(0x5D, 0x43, SS_RDEF, /* XXX TBD */
+ "Servo impending failure seek error rate too high") },
+ /* D B */
+ { SST(0x5D, 0x44, SS_RDEF, /* XXX TBD */
+ "Servo impending failure too many block reassigns") },
+ /* D B */
+ { SST(0x5D, 0x45, SS_RDEF, /* XXX TBD */
+ "Servo impending failure access times too high") },
+ /* D B */
+ { SST(0x5D, 0x46, SS_RDEF, /* XXX TBD */
+ "Servo impending failure start unit times too high") },
+ /* D B */
+ { SST(0x5D, 0x47, SS_RDEF, /* XXX TBD */
+ "Servo impending failure channel parametrics") },
+ /* D B */
+ { SST(0x5D, 0x48, SS_RDEF, /* XXX TBD */
+ "Servo impending failure controller detected") },
+ /* D B */
+ { SST(0x5D, 0x49, SS_RDEF, /* XXX TBD */
+ "Servo impending failure throughput performance") },
+ /* D B */
+ { SST(0x5D, 0x4A, SS_RDEF, /* XXX TBD */
+ "Servo impending failure seek time performance") },
+ /* D B */
+ { SST(0x5D, 0x4B, SS_RDEF, /* XXX TBD */
+ "Servo impending failure spin-up retry count") },
+ /* D B */
+ { SST(0x5D, 0x4C, SS_RDEF, /* XXX TBD */
+ "Servo impending failure drive calibration retry count") },
+ /* D B */
+ { SST(0x5D, 0x50, SS_RDEF, /* XXX TBD */
+ "Spindle impending failure general hard drive failure") },
+ /* D B */
+ { SST(0x5D, 0x51, SS_RDEF, /* XXX TBD */
+ "Spindle impending failure drive error rate too high") },
+ /* D B */
+ { SST(0x5D, 0x52, SS_RDEF, /* XXX TBD */
+ "Spindle impending failure data error rate too high") },
+ /* D B */
+ { SST(0x5D, 0x53, SS_RDEF, /* XXX TBD */
+ "Spindle impending failure seek error rate too high") },
+ /* D B */
+ { SST(0x5D, 0x54, SS_RDEF, /* XXX TBD */
+ "Spindle impending failure too many block reassigns") },
+ /* D B */
+ { SST(0x5D, 0x55, SS_RDEF, /* XXX TBD */
+ "Spindle impending failure access times too high") },
+ /* D B */
+ { SST(0x5D, 0x56, SS_RDEF, /* XXX TBD */
+ "Spindle impending failure start unit times too high") },
+ /* D B */
+ { SST(0x5D, 0x57, SS_RDEF, /* XXX TBD */
+ "Spindle impending failure channel parametrics") },
+ /* D B */
+ { SST(0x5D, 0x58, SS_RDEF, /* XXX TBD */
+ "Spindle impending failure controller detected") },
+ /* D B */
+ { SST(0x5D, 0x59, SS_RDEF, /* XXX TBD */
+ "Spindle impending failure throughput performance") },
+ /* D B */
+ { SST(0x5D, 0x5A, SS_RDEF, /* XXX TBD */
+ "Spindle impending failure seek time performance") },
+ /* D B */
+ { SST(0x5D, 0x5B, SS_RDEF, /* XXX TBD */
+ "Spindle impending failure spin-up retry count") },
+ /* D B */
+ { SST(0x5D, 0x5C, SS_RDEF, /* XXX TBD */
+ "Spindle impending failure drive calibration retry count") },
+ /* D B */
+ { SST(0x5D, 0x60, SS_RDEF, /* XXX TBD */
+ "Firmware impending failure general hard drive failure") },
+ /* D B */
+ { SST(0x5D, 0x61, SS_RDEF, /* XXX TBD */
+ "Firmware impending failure drive error rate too high") },
+ /* D B */
+ { SST(0x5D, 0x62, SS_RDEF, /* XXX TBD */
+ "Firmware impending failure data error rate too high") },
+ /* D B */
+ { SST(0x5D, 0x63, SS_RDEF, /* XXX TBD */
+ "Firmware impending failure seek error rate too high") },
+ /* D B */
+ { SST(0x5D, 0x64, SS_RDEF, /* XXX TBD */
+ "Firmware impending failure too many block reassigns") },
+ /* D B */
+ { SST(0x5D, 0x65, SS_RDEF, /* XXX TBD */
+ "Firmware impending failure access times too high") },
+ /* D B */
+ { SST(0x5D, 0x66, SS_RDEF, /* XXX TBD */
+ "Firmware impending failure start unit times too high") },
+ /* D B */
+ { SST(0x5D, 0x67, SS_RDEF, /* XXX TBD */
+ "Firmware impending failure channel parametrics") },
+ /* D B */
+ { SST(0x5D, 0x68, SS_RDEF, /* XXX TBD */
+ "Firmware impending failure controller detected") },
+ /* D B */
+ { SST(0x5D, 0x69, SS_RDEF, /* XXX TBD */
+ "Firmware impending failure throughput performance") },
+ /* D B */
+ { SST(0x5D, 0x6A, SS_RDEF, /* XXX TBD */
+ "Firmware impending failure seek time performance") },
+ /* D B */
+ { SST(0x5D, 0x6B, SS_RDEF, /* XXX TBD */
+ "Firmware impending failure spin-up retry count") },
+ /* D B */
+ { SST(0x5D, 0x6C, SS_RDEF, /* XXX TBD */
+ "Firmware impending failure drive calibration retry count") },
+ /* DTLPWROMAEBKVF */
+ { SST(0x5D, 0xFF, SS_RDEF,
+ "Failure prediction threshold exceeded (false)") },
+ /* DTLPWRO A K */
+ { SST(0x5E, 0x00, SS_RDEF,
+ "Low power condition on") },
+ /* DTLPWRO A K */
+ { SST(0x5E, 0x01, SS_RDEF,
+ "Idle condition activated by timer") },
+ /* DTLPWRO A K */
+ { SST(0x5E, 0x02, SS_RDEF,
+ "Standby condition activated by timer") },
+ /* DTLPWRO A K */
+ { SST(0x5E, 0x03, SS_RDEF,
+ "Idle condition activated by command") },
+ /* DTLPWRO A K */
+ { SST(0x5E, 0x04, SS_RDEF,
+ "Standby condition activated by command") },
+ /* B */
+ { SST(0x5E, 0x41, SS_RDEF, /* XXX TBD */
+ "Power state change to active") },
+ /* B */
+ { SST(0x5E, 0x42, SS_RDEF, /* XXX TBD */
+ "Power state change to idle") },
+ /* B */
+ { SST(0x5E, 0x43, SS_RDEF, /* XXX TBD */
+ "Power state change to standby") },
+ /* B */
+ { SST(0x5E, 0x45, SS_RDEF, /* XXX TBD */
+ "Power state change to sleep") },
+ /* BK */
+ { SST(0x5E, 0x47, SS_RDEF, /* XXX TBD */
+ "Power state change to device control") },
+ /* */
+ { SST(0x60, 0x00, SS_RDEF,
+ "Lamp failure") },
+ /* */
+ { SST(0x61, 0x00, SS_RDEF,
+ "Video acquisition error") },
+ /* */
+ { SST(0x61, 0x01, SS_RDEF,
+ "Unable to acquire video") },
+ /* */
+ { SST(0x61, 0x02, SS_RDEF,
+ "Out of focus") },
+ /* */
+ { SST(0x62, 0x00, SS_RDEF,
+ "Scan head positioning error") },
+ /* R */
+ { SST(0x63, 0x00, SS_RDEF,
+ "End of user area encountered on this track") },
+ /* R */
+ { SST(0x63, 0x01, SS_FATAL | ENOSPC,
+ "Packet does not fit in available space") },
+ /* R */
+ { SST(0x64, 0x00, SS_FATAL | ENXIO,
+ "Illegal mode for this track") },
+ /* R */
+ { SST(0x64, 0x01, SS_RDEF,
+ "Invalid packet size") },
+ /* DTLPWROMAEBKVF */
+ { SST(0x65, 0x00, SS_RDEF,
+ "Voltage fault") },
+ /* */
+ { SST(0x66, 0x00, SS_RDEF,
+ "Automatic document feeder cover up") },
+ /* */
+ { SST(0x66, 0x01, SS_RDEF,
+ "Automatic document feeder lift up") },
+ /* */
+ { SST(0x66, 0x02, SS_RDEF,
+ "Document jam in automatic document feeder") },
+ /* */
+ { SST(0x66, 0x03, SS_RDEF,
+ "Document miss feed automatic in document feeder") },
+ /* A */
+ { SST(0x67, 0x00, SS_RDEF,
+ "Configuration failure") },
+ /* A */
+ { SST(0x67, 0x01, SS_RDEF,
+ "Configuration of incapable logical units failed") },
+ /* A */
+ { SST(0x67, 0x02, SS_RDEF,
+ "Add logical unit failed") },
+ /* A */
+ { SST(0x67, 0x03, SS_RDEF,
+ "Modification of logical unit failed") },
+ /* A */
+ { SST(0x67, 0x04, SS_RDEF,
+ "Exchange of logical unit failed") },
+ /* A */
+ { SST(0x67, 0x05, SS_RDEF,
+ "Remove of logical unit failed") },
+ /* A */
+ { SST(0x67, 0x06, SS_RDEF,
+ "Attachment of logical unit failed") },
+ /* A */
+ { SST(0x67, 0x07, SS_RDEF,
+ "Creation of logical unit failed") },
+ /* A */
+ { SST(0x67, 0x08, SS_RDEF, /* XXX TBD */
+ "Assign failure occurred") },
+ /* A */
+ { SST(0x67, 0x09, SS_RDEF, /* XXX TBD */
+ "Multiply assigned logical unit") },
+ /* DTLPWROMAEBKVF */
+ { SST(0x67, 0x0A, SS_RDEF, /* XXX TBD */
+ "Set target port groups command failed") },
+ /* DT B */
+ { SST(0x67, 0x0B, SS_RDEF, /* XXX TBD */
+ "ATA device feature not enabled") },
+ /* A */
+ { SST(0x68, 0x00, SS_RDEF,
+ "Logical unit not configured") },
+ /* A */
+ { SST(0x69, 0x00, SS_RDEF,
+ "Data loss on logical unit") },
+ /* A */
+ { SST(0x69, 0x01, SS_RDEF,
+ "Multiple logical unit failures") },
+ /* A */
+ { SST(0x69, 0x02, SS_RDEF,
+ "Parity/data mismatch") },
+ /* A */
+ { SST(0x6A, 0x00, SS_RDEF,
+ "Informational, refer to log") },
+ /* A */
+ { SST(0x6B, 0x00, SS_RDEF,
+ "State change has occurred") },
+ /* A */
+ { SST(0x6B, 0x01, SS_RDEF,
+ "Redundancy level got better") },
+ /* A */
+ { SST(0x6B, 0x02, SS_RDEF,
+ "Redundancy level got worse") },
+ /* A */
+ { SST(0x6C, 0x00, SS_RDEF,
+ "Rebuild failure occurred") },
+ /* A */
+ { SST(0x6D, 0x00, SS_RDEF,
+ "Recalculate failure occurred") },
+ /* A */
+ { SST(0x6E, 0x00, SS_RDEF,
+ "Command to logical unit failed") },
+ /* R */
+ { SST(0x6F, 0x00, SS_RDEF, /* XXX TBD */
+ "Copy protection key exchange failure - authentication failure") },
+ /* R */
+ { SST(0x6F, 0x01, SS_RDEF, /* XXX TBD */
+ "Copy protection key exchange failure - key not present") },
+ /* R */
+ { SST(0x6F, 0x02, SS_RDEF, /* XXX TBD */
+ "Copy protection key exchange failure - key not established") },
+ /* R */
+ { SST(0x6F, 0x03, SS_RDEF, /* XXX TBD */
+ "Read of scrambled sector without authentication") },
+ /* R */
+ { SST(0x6F, 0x04, SS_RDEF, /* XXX TBD */
+ "Media region code is mismatched to logical unit region") },
+ /* R */
+ { SST(0x6F, 0x05, SS_RDEF, /* XXX TBD */
+ "Drive region must be permanent/region reset count error") },
+ /* R */
+ { SST(0x6F, 0x06, SS_RDEF, /* XXX TBD */
+ "Insufficient block count for binding NONCE recording") },
+ /* R */
+ { SST(0x6F, 0x07, SS_RDEF, /* XXX TBD */
+ "Conflict in binding NONCE recording") },
+ /* T */
+ { SST(0x70, 0x00, SS_RDEF,
+ "Decompression exception short: ASCQ = Algorithm ID") },
+ /* T */
+ { SST(0x70, 0xFF, SS_RDEF | SSQ_RANGE,
+ NULL) }, /* Range 0x00 -> 0xFF */
+ /* T */
+ { SST(0x71, 0x00, SS_RDEF,
+ "Decompression exception long: ASCQ = Algorithm ID") },
+ /* T */
+ { SST(0x71, 0xFF, SS_RDEF | SSQ_RANGE,
+ NULL) }, /* Range 0x00 -> 0xFF */
+ /* R */
+ { SST(0x72, 0x00, SS_RDEF,
+ "Session fixation error") },
+ /* R */
+ { SST(0x72, 0x01, SS_RDEF,
+ "Session fixation error writing lead-in") },
+ /* R */
+ { SST(0x72, 0x02, SS_RDEF,
+ "Session fixation error writing lead-out") },
+ /* R */
+ { SST(0x72, 0x03, SS_RDEF,
+ "Session fixation error - incomplete track in session") },
+ /* R */
+ { SST(0x72, 0x04, SS_RDEF,
+ "Empty or partially written reserved track") },
+ /* R */
+ { SST(0x72, 0x05, SS_RDEF, /* XXX TBD */
+ "No more track reservations allowed") },
+ /* R */
+ { SST(0x72, 0x06, SS_RDEF, /* XXX TBD */
+ "RMZ extension is not allowed") },
+ /* R */
+ { SST(0x72, 0x07, SS_RDEF, /* XXX TBD */
+ "No more test zone extensions are allowed") },
+ /* R */
+ { SST(0x73, 0x00, SS_RDEF,
+ "CD control error") },
+ /* R */
+ { SST(0x73, 0x01, SS_RDEF,
+ "Power calibration area almost full") },
+ /* R */
+ { SST(0x73, 0x02, SS_FATAL | ENOSPC,
+ "Power calibration area is full") },
+ /* R */
+ { SST(0x73, 0x03, SS_RDEF,
+ "Power calibration area error") },
+ /* R */
+ { SST(0x73, 0x04, SS_RDEF,
+ "Program memory area update failure") },
+ /* R */
+ { SST(0x73, 0x05, SS_RDEF,
+ "Program memory area is full") },
+ /* R */
+ { SST(0x73, 0x06, SS_RDEF, /* XXX TBD */
+ "RMA/PMA is almost full") },
+ /* R */
+ { SST(0x73, 0x10, SS_RDEF, /* XXX TBD */
+ "Current power calibration area almost full") },
+ /* R */
+ { SST(0x73, 0x11, SS_RDEF, /* XXX TBD */
+ "Current power calibration area is full") },
+ /* R */
+ { SST(0x73, 0x17, SS_RDEF, /* XXX TBD */
+ "RDZ is full") },
+ /* T */
+ { SST(0x74, 0x00, SS_RDEF, /* XXX TBD */
+ "Security error") },
+ /* T */
+ { SST(0x74, 0x01, SS_RDEF, /* XXX TBD */
+ "Unable to decrypt data") },
+ /* T */
+ { SST(0x74, 0x02, SS_RDEF, /* XXX TBD */
+ "Unencrypted data encountered while decrypting") },
+ /* T */
+ { SST(0x74, 0x03, SS_RDEF, /* XXX TBD */
+ "Incorrect data encryption key") },
+ /* T */
+ { SST(0x74, 0x04, SS_RDEF, /* XXX TBD */
+ "Cryptographic integrity validation failed") },
+ /* T */
+ { SST(0x74, 0x05, SS_RDEF, /* XXX TBD */
+ "Error decrypting data") },
+ /* T */
+ { SST(0x74, 0x06, SS_RDEF, /* XXX TBD */
+ "Unknown signature verification key") },
+ /* T */
+ { SST(0x74, 0x07, SS_RDEF, /* XXX TBD */
+ "Encryption parameters not useable") },
+ /* DT R M E VF */
+ { SST(0x74, 0x08, SS_RDEF, /* XXX TBD */
+ "Digital signature validation failure") },
+ /* T */
+ { SST(0x74, 0x09, SS_RDEF, /* XXX TBD */
+ "Encryption mode mismatch on read") },
+ /* T */
+ { SST(0x74, 0x0A, SS_RDEF, /* XXX TBD */
+ "Encrypted block not raw read enabled") },
+ /* T */
+ { SST(0x74, 0x0B, SS_RDEF, /* XXX TBD */
+ "Incorrect encryption parameters") },
+ /* DT R MAEBKV */
+ { SST(0x74, 0x0C, SS_RDEF, /* XXX TBD */
+ "Unable to decrypt parameter list") },
+ /* T */
+ { SST(0x74, 0x0D, SS_RDEF, /* XXX TBD */
+ "Encryption algorithm disabled") },
+ /* DT R MAEBKV */
+ { SST(0x74, 0x10, SS_RDEF, /* XXX TBD */
+ "SA creation parameter value invalid") },
+ /* DT R MAEBKV */
+ { SST(0x74, 0x11, SS_RDEF, /* XXX TBD */
+ "SA creation parameter value rejected") },
+ /* DT R MAEBKV */
+ { SST(0x74, 0x12, SS_RDEF, /* XXX TBD */
+ "Invalid SA usage") },
+ /* T */
+ { SST(0x74, 0x21, SS_RDEF, /* XXX TBD */
+ "Data encryption configuration prevented") },
+ /* DT R MAEBKV */
+ { SST(0x74, 0x30, SS_RDEF, /* XXX TBD */
+ "SA creation parameter not supported") },
+ /* DT R MAEBKV */
+ { SST(0x74, 0x40, SS_RDEF, /* XXX TBD */
+ "Authentication failed") },
+ /* V */
+ { SST(0x74, 0x61, SS_RDEF, /* XXX TBD */
+ "External data encryption key manager access error") },
+ /* V */
+ { SST(0x74, 0x62, SS_RDEF, /* XXX TBD */
+ "External data encryption key manager error") },
+ /* V */
+ { SST(0x74, 0x63, SS_RDEF, /* XXX TBD */
+ "External data encryption key not found") },
+ /* V */
+ { SST(0x74, 0x64, SS_RDEF, /* XXX TBD */
+ "External data encryption request not authorized") },
+ /* T */
+ { SST(0x74, 0x6E, SS_RDEF, /* XXX TBD */
+ "External data encryption control timeout") },
+ /* T */
+ { SST(0x74, 0x6F, SS_RDEF, /* XXX TBD */
+ "External data encryption control error") },
+ /* DT R M E V */
+ { SST(0x74, 0x71, SS_RDEF, /* XXX TBD */
+ "Logical unit access not authorized") },
+ /* D */
+ { SST(0x74, 0x79, SS_RDEF, /* XXX TBD */
+ "Security conflict in translated device") }
+};
+
+const int asc_table_size = sizeof(asc_table)/sizeof(asc_table[0]);
+
+struct asc_key
+{
+ int asc;
+ int ascq;
+};
+
+static int
+ascentrycomp(const void *key, const void *member)
+{
+ int asc;
+ int ascq;
+ const struct asc_table_entry *table_entry;
+
+ asc = ((const struct asc_key *)key)->asc;
+ ascq = ((const struct asc_key *)key)->ascq;
+ table_entry = (const struct asc_table_entry *)member;
+
+ if (asc >= table_entry->asc) {
+
+ if (asc > table_entry->asc)
+ return (1);
+
+ if (ascq <= table_entry->ascq) {
+ /* Check for ranges */
+ if (ascq == table_entry->ascq
+ || ((table_entry->action & SSQ_RANGE) != 0
+ && ascq >= (table_entry - 1)->ascq))
+ return (0);
+ return (-1);
+ }
+ return (1);
+ }
+ return (-1);
+}
+
+static int
+senseentrycomp(const void *key, const void *member)
+{
+ int sense_key;
+ const struct sense_key_table_entry *table_entry;
+
+ sense_key = *((const int *)key);
+ table_entry = (const struct sense_key_table_entry *)member;
+
+ if (sense_key >= table_entry->sense_key) {
+ if (sense_key == table_entry->sense_key)
+ return (0);
+ return (1);
+ }
+ return (-1);
+}
+
+static void
+fetchtableentries(int sense_key, int asc, int ascq,
+ struct scsi_inquiry_data *inq_data,
+ const struct sense_key_table_entry **sense_entry,
+ const struct asc_table_entry **asc_entry)
+{
+ caddr_t match;
+ const struct asc_table_entry *asc_tables[2];
+ const struct sense_key_table_entry *sense_tables[2];
+ struct asc_key asc_ascq;
+ size_t asc_tables_size[2];
+ size_t sense_tables_size[2];
+ int num_asc_tables;
+ int num_sense_tables;
+ int i;
+
+ /* Default to failure */
+ *sense_entry = NULL;
+ *asc_entry = NULL;
+ match = NULL;
+ if (inq_data != NULL)
+ match = cam_quirkmatch((caddr_t)inq_data,
+ (caddr_t)sense_quirk_table,
+ sense_quirk_table_size,
+ sizeof(*sense_quirk_table),
+ scsi_inquiry_match);
+
+ if (match != NULL) {
+ struct scsi_sense_quirk_entry *quirk;
+
+ quirk = (struct scsi_sense_quirk_entry *)match;
+ asc_tables[0] = quirk->asc_info;
+ asc_tables_size[0] = quirk->num_ascs;
+ asc_tables[1] = asc_table;
+ asc_tables_size[1] = asc_table_size;
+ num_asc_tables = 2;
+ sense_tables[0] = quirk->sense_key_info;
+ sense_tables_size[0] = quirk->num_sense_keys;
+ sense_tables[1] = sense_key_table;
+ sense_tables_size[1] = sense_key_table_size;
+ num_sense_tables = 2;
+ } else {
+ asc_tables[0] = asc_table;
+ asc_tables_size[0] = asc_table_size;
+ num_asc_tables = 1;
+ sense_tables[0] = sense_key_table;
+ sense_tables_size[0] = sense_key_table_size;
+ num_sense_tables = 1;
+ }
+
+ asc_ascq.asc = asc;
+ asc_ascq.ascq = ascq;
+ for (i = 0; i < num_asc_tables; i++) {
+ void *found_entry;
+
+ found_entry = bsearch(&asc_ascq, asc_tables[i],
+ asc_tables_size[i],
+ sizeof(**asc_tables),
+ ascentrycomp);
+
+ if (found_entry) {
+ *asc_entry = (struct asc_table_entry *)found_entry;
+ break;
+ }
+ }
+
+ for (i = 0; i < num_sense_tables; i++) {
+ void *found_entry;
+
+ found_entry = bsearch(&sense_key, sense_tables[i],
+ sense_tables_size[i],
+ sizeof(**sense_tables),
+ senseentrycomp);
+
+ if (found_entry) {
+ *sense_entry =
+ (struct sense_key_table_entry *)found_entry;
+ break;
+ }
+ }
+}
+
+void
+scsi_sense_desc(int sense_key, int asc, int ascq,
+ struct scsi_inquiry_data *inq_data,
+ const char **sense_key_desc, const char **asc_desc)
+{
+ const struct asc_table_entry *asc_entry;
+ const struct sense_key_table_entry *sense_entry;
+
+ fetchtableentries(sense_key, asc, ascq,
+ inq_data,
+ &sense_entry,
+ &asc_entry);
+
+ *sense_key_desc = sense_entry->desc;
+
+ if (asc_entry != NULL)
+ *asc_desc = asc_entry->desc;
+ else if (asc >= 0x80 && asc <= 0xff)
+ *asc_desc = "Vendor Specific ASC";
+ else if (ascq >= 0x80 && ascq <= 0xff)
+ *asc_desc = "Vendor Specific ASCQ";
+ else
+ *asc_desc = "Reserved ASC/ASCQ pair";
+}
+
+/*
+ * Given sense and device type information, return the appropriate action.
+ * If we do not understand the specific error as identified by the ASC/ASCQ
+ * pair, fall back on the more generic actions derived from the sense key.
+ */
+scsi_sense_action
+scsi_error_action(struct ccb_scsiio *csio, struct scsi_inquiry_data *inq_data,
+ u_int32_t sense_flags)
+{
+ const struct asc_table_entry *asc_entry;
+ const struct sense_key_table_entry *sense_entry;
+ int error_code, sense_key, asc, ascq;
+ scsi_sense_action action;
+
+ scsi_extract_sense(&csio->sense_data, &error_code,
+ &sense_key, &asc, &ascq);
+
+ if (error_code == SSD_DEFERRED_ERROR) {
+ /*
+ * XXX dufault@FreeBSD.org
+ * This error doesn't relate to the command associated
+ * with this request sense. A deferred error is an error
+ * for a command that has already returned GOOD status
+ * (see SCSI2 8.2.14.2).
+ *
+ * By my reading of that section, it looks like the current
+ * command has been cancelled, we should now clean things up
+ * (hopefully recovering any lost data) and then retry the
+ * current command. There are two easy choices, both wrong:
+ *
+ * 1. Drop through (like we had been doing), thus treating
+ * this as if the error were for the current command and
+ * return and stop the current command.
+ *
+ * 2. Issue a retry (like I made it do) thus hopefully
+ * recovering the current transfer, and ignoring the
+ * fact that we've dropped a command.
+ *
+ * These should probably be handled in a device specific
+ * sense handler or punted back up to a user mode daemon
+ */
+ action = SS_RETRY|SSQ_DECREMENT_COUNT|SSQ_PRINT_SENSE;
+ } else {
+ fetchtableentries(sense_key, asc, ascq,
+ inq_data,
+ &sense_entry,
+ &asc_entry);
+
+ /*
+ * Override the 'No additional Sense' entry (0,0)
+ * with the error action of the sense key.
+ */
+ if (asc_entry != NULL
+ && (asc != 0 || ascq != 0))
+ action = asc_entry->action;
+ else
+ action = sense_entry->action;
+
+ if (sense_key == SSD_KEY_RECOVERED_ERROR) {
+ /*
+ * The action succeeded but the device wants
+ * the user to know that some recovery action
+ * was required.
+ */
+ action &= ~(SS_MASK|SSQ_MASK|SS_ERRMASK);
+ action |= SS_NOP|SSQ_PRINT_SENSE;
+ } else if (sense_key == SSD_KEY_ILLEGAL_REQUEST) {
+ if ((sense_flags & SF_QUIET_IR) != 0)
+ action &= ~SSQ_PRINT_SENSE;
+ } else if (sense_key == SSD_KEY_UNIT_ATTENTION) {
+ if ((sense_flags & SF_RETRY_UA) != 0
+ && (action & SS_MASK) == SS_FAIL) {
+ action &= ~(SS_MASK|SSQ_MASK);
+ action |= SS_RETRY|SSQ_DECREMENT_COUNT|
+ SSQ_PRINT_SENSE;
+ }
+ }
+ }
+#ifdef _KERNEL
+ if (bootverbose)
+ sense_flags |= SF_PRINT_ALWAYS;
+#endif
+ if ((sense_flags & SF_PRINT_ALWAYS) != 0)
+ action |= SSQ_PRINT_SENSE;
+ else if ((sense_flags & SF_NO_PRINT) != 0)
+ action &= ~SSQ_PRINT_SENSE;
+
+ return (action);
+}
+
+char *
+scsi_cdb_string(u_int8_t *cdb_ptr, char *cdb_string, size_t len)
+{
+ u_int8_t cdb_len;
+ int i;
+
+ if (cdb_ptr == NULL)
+ return("");
+
+ /* Silence warnings */
+ cdb_len = 0;
+
+ /*
+ * This is taken from the SCSI-3 draft spec.
+ * (T10/1157D revision 0.3)
+ * The top 3 bits of an opcode are the group code. The next 5 bits
+ * are the command code.
+ * Group 0: six byte commands
+ * Group 1: ten byte commands
+ * Group 2: ten byte commands
+ * Group 3: reserved
+ * Group 4: sixteen byte commands
+ * Group 5: twelve byte commands
+ * Group 6: vendor specific
+ * Group 7: vendor specific
+ */
+ switch((*cdb_ptr >> 5) & 0x7) {
+ case 0:
+ cdb_len = 6;
+ break;
+ case 1:
+ case 2:
+ cdb_len = 10;
+ break;
+ case 3:
+ case 6:
+ case 7:
+ /* in this case, just print out the opcode */
+ cdb_len = 1;
+ break;
+ case 4:
+ cdb_len = 16;
+ break;
+ case 5:
+ cdb_len = 12;
+ break;
+ }
+ *cdb_string = '\0';
+ for (i = 0; i < cdb_len; i++)
+ snprintf(cdb_string + strlen(cdb_string),
+ len - strlen(cdb_string), "%x ", cdb_ptr[i]);
+
+ return(cdb_string);
+}
+
+const char *
+scsi_status_string(struct ccb_scsiio *csio)
+{
+ switch(csio->scsi_status) {
+ case SCSI_STATUS_OK:
+ return("OK");
+ case SCSI_STATUS_CHECK_COND:
+ return("Check Condition");
+ case SCSI_STATUS_BUSY:
+ return("Busy");
+ case SCSI_STATUS_INTERMED:
+ return("Intermediate");
+ case SCSI_STATUS_INTERMED_COND_MET:
+ return("Intermediate-Condition Met");
+ case SCSI_STATUS_RESERV_CONFLICT:
+ return("Reservation Conflict");
+ case SCSI_STATUS_CMD_TERMINATED:
+ return("Command Terminated");
+ case SCSI_STATUS_QUEUE_FULL:
+ return("Queue Full");
+ case SCSI_STATUS_ACA_ACTIVE:
+ return("ACA Active");
+ case SCSI_STATUS_TASK_ABORTED:
+ return("Task Aborted");
+ default: {
+ static char unkstr[64];
+ snprintf(unkstr, sizeof(unkstr), "Unknown %#x",
+ csio->scsi_status);
+ return(unkstr);
+ }
+ }
+}
+
+/*
+ * scsi_command_string() returns 0 for success and -1 for failure.
+ */
+#ifdef _KERNEL
+int
+scsi_command_string(struct ccb_scsiio *csio, struct sbuf *sb)
+#else /* !_KERNEL */
+int
+scsi_command_string(struct cam_device *device, struct ccb_scsiio *csio,
+ struct sbuf *sb)
+#endif /* _KERNEL/!_KERNEL */
+{
+ struct scsi_inquiry_data *inq_data;
+ char cdb_str[(SCSI_MAX_CDBLEN * 3) + 1];
+#ifdef _KERNEL
+ struct ccb_getdev *cgd;
+#endif /* _KERNEL */
+
+#ifdef _KERNEL
+ if ((cgd = (struct ccb_getdev*)xpt_alloc_ccb_nowait()) == NULL)
+ return(-1);
+ /*
+ * Get the device information.
+ */
+ xpt_setup_ccb(&cgd->ccb_h,
+ csio->ccb_h.path,
+ CAM_PRIORITY_NORMAL);
+ cgd->ccb_h.func_code = XPT_GDEV_TYPE;
+ xpt_action((union ccb *)cgd);
+
+ /*
+ * If the device is unconfigured, just pretend that it is a hard
+ * drive. scsi_op_desc() needs this.
+ */
+ if (cgd->ccb_h.status == CAM_DEV_NOT_THERE)
+ cgd->inq_data.device = T_DIRECT;
+
+ inq_data = &cgd->inq_data;
+
+#else /* !_KERNEL */
+
+ inq_data = &device->inq_data;
+
+#endif /* _KERNEL/!_KERNEL */
+
+ if ((csio->ccb_h.flags & CAM_CDB_POINTER) != 0) {
+ sbuf_printf(sb, "%s. CDB: %s",
+ scsi_op_desc(csio->cdb_io.cdb_ptr[0], inq_data),
+ scsi_cdb_string(csio->cdb_io.cdb_ptr, cdb_str,
+ sizeof(cdb_str)));
+ } else {
+ sbuf_printf(sb, "%s. CDB: %s",
+ scsi_op_desc(csio->cdb_io.cdb_bytes[0], inq_data),
+ scsi_cdb_string(csio->cdb_io.cdb_bytes, cdb_str,
+ sizeof(cdb_str)));
+ }
+
+ return(0);
+}
+
+
+/*
+ * scsi_sense_sbuf() returns 0 for success and -1 for failure.
+ */
+#ifdef _KERNEL
+int
+scsi_sense_sbuf(struct ccb_scsiio *csio, struct sbuf *sb,
+ scsi_sense_string_flags flags)
+#else /* !_KERNEL */
+int
+scsi_sense_sbuf(struct cam_device *device, struct ccb_scsiio *csio,
+ struct sbuf *sb, scsi_sense_string_flags flags)
+#endif /* _KERNEL/!_KERNEL */
+{
+ struct scsi_sense_data *sense;
+ struct scsi_inquiry_data *inq_data;
+#ifdef _KERNEL
+ struct ccb_getdev *cgd;
+#endif /* _KERNEL */
+ u_int32_t info;
+ int error_code;
+ int sense_key;
+ int asc, ascq;
+ char path_str[64];
+
+#ifndef _KERNEL
+ if (device == NULL)
+ return(-1);
+#endif /* !_KERNEL */
+ if ((csio == NULL) || (sb == NULL))
+ return(-1);
+
+ /*
+ * If the CDB is a physical address, we can't deal with it..
+ */
+ if ((csio->ccb_h.flags & CAM_CDB_PHYS) != 0)
+ flags &= ~SSS_FLAG_PRINT_COMMAND;
+
+#ifdef _KERNEL
+ xpt_path_string(csio->ccb_h.path, path_str, sizeof(path_str));
+#else /* !_KERNEL */
+ cam_path_string(device, path_str, sizeof(path_str));
+#endif /* _KERNEL/!_KERNEL */
+
+#ifdef _KERNEL
+ if ((cgd = (struct ccb_getdev*)xpt_alloc_ccb_nowait()) == NULL)
+ return(-1);
+ /*
+ * Get the device information.
+ */
+ xpt_setup_ccb(&cgd->ccb_h,
+ csio->ccb_h.path,
+ CAM_PRIORITY_NORMAL);
+ cgd->ccb_h.func_code = XPT_GDEV_TYPE;
+ xpt_action((union ccb *)cgd);
+
+ /*
+ * If the device is unconfigured, just pretend that it is a hard
+ * drive. scsi_op_desc() needs this.
+ */
+ if (cgd->ccb_h.status == CAM_DEV_NOT_THERE)
+ cgd->inq_data.device = T_DIRECT;
+
+ inq_data = &cgd->inq_data;
+
+#else /* !_KERNEL */
+
+ inq_data = &device->inq_data;
+
+#endif /* _KERNEL/!_KERNEL */
+
+ sense = NULL;
+
+ if (flags & SSS_FLAG_PRINT_COMMAND) {
+
+ sbuf_cat(sb, path_str);
+
+#ifdef _KERNEL
+ scsi_command_string(csio, sb);
+#else /* !_KERNEL */
+ scsi_command_string(device, csio, sb);
+#endif /* _KERNEL/!_KERNEL */
+ sbuf_printf(sb, "\n");
+ }
+
+ /*
+ * If the sense data is a physical pointer, forget it.
+ */
+ if (csio->ccb_h.flags & CAM_SENSE_PTR) {
+ if (csio->ccb_h.flags & CAM_SENSE_PHYS) {
+#ifdef _KERNEL
+ xpt_free_ccb((union ccb*)cgd);
+#endif /* _KERNEL/!_KERNEL */
+ return(-1);
+ } else {
+ /*
+ * bcopy the pointer to avoid unaligned access
+ * errors on finicky architectures. We don't
+ * ensure that the sense data is pointer aligned.
+ */
+ bcopy(&csio->sense_data, &sense,
+ sizeof(struct scsi_sense_data *));
+ }
+ } else {
+ /*
+ * If the physical sense flag is set, but the sense pointer
+ * is not also set, we assume that the user is an idiot and
+ * return. (Well, okay, it could be that somehow, the
+ * entire csio is physical, but we would have probably core
+ * dumped on one of the bogus pointer deferences above
+ * already.)
+ */
+ if (csio->ccb_h.flags & CAM_SENSE_PHYS) {
+#ifdef _KERNEL
+ xpt_free_ccb((union ccb*)cgd);
+#endif /* _KERNEL/!_KERNEL */
+ return(-1);
+ } else
+ sense = &csio->sense_data;
+ }
+
+
+ sbuf_cat(sb, path_str);
+
+ error_code = sense->error_code & SSD_ERRCODE;
+ sense_key = sense->flags & SSD_KEY;
+
+ sbuf_printf(sb, "SCSI sense: ");
+ switch (error_code) {
+ case SSD_DEFERRED_ERROR:
+ sbuf_printf(sb, "Deferred error: ");
+
+ /* FALLTHROUGH */
+ case SSD_CURRENT_ERROR:
+ {
+ const char *sense_key_desc;
+ const char *asc_desc;
+
+ asc = (sense->extra_len >= 5) ? sense->add_sense_code : 0;
+ ascq = (sense->extra_len >= 6) ? sense->add_sense_code_qual : 0;
+ scsi_sense_desc(sense_key, asc, ascq, inq_data,
+ &sense_key_desc, &asc_desc);
+ sbuf_cat(sb, sense_key_desc);
+
+ info = scsi_4btoul(sense->info);
+
+ if (sense->error_code & SSD_ERRCODE_VALID) {
+
+ switch (sense_key) {
+ case SSD_KEY_NOT_READY:
+ case SSD_KEY_ILLEGAL_REQUEST:
+ case SSD_KEY_UNIT_ATTENTION:
+ case SSD_KEY_DATA_PROTECT:
+ break;
+ case SSD_KEY_BLANK_CHECK:
+ sbuf_printf(sb, " req sz: %d (decimal)", info);
+ break;
+ default:
+ if (info) {
+ if (sense->flags & SSD_ILI) {
+ sbuf_printf(sb, " ILI (length "
+ "mismatch): %d", info);
+
+ } else {
+ sbuf_printf(sb, " info:%x",
+ info);
+ }
+ }
+ }
+ } else if (info) {
+ sbuf_printf(sb, " info?:%x", info);
+ }
+
+ if (sense->extra_len >= 4) {
+ if (bcmp(sense->cmd_spec_info, "\0\0\0\0", 4)) {
+ sbuf_printf(sb, " csi:%x,%x,%x,%x",
+ sense->cmd_spec_info[0],
+ sense->cmd_spec_info[1],
+ sense->cmd_spec_info[2],
+ sense->cmd_spec_info[3]);
+ }
+ }
+
+ sbuf_printf(sb, " asc:%x,%x (%s)", asc, ascq, asc_desc);
+
+ if (sense->extra_len >= 7 && sense->fru) {
+ sbuf_printf(sb, " field replaceable unit: %x",
+ sense->fru);
+ }
+
+ if ((sense->extra_len >= 10)
+ && (sense->sense_key_spec[0] & SSD_SCS_VALID) != 0) {
+ switch(sense_key) {
+ case SSD_KEY_ILLEGAL_REQUEST: {
+ int bad_command;
+ char tmpstr2[40];
+
+ if (sense->sense_key_spec[0] & 0x40)
+ bad_command = 1;
+ else
+ bad_command = 0;
+
+ tmpstr2[0] = '\0';
+
+ /* Bit pointer is valid */
+ if (sense->sense_key_spec[0] & 0x08)
+ snprintf(tmpstr2, sizeof(tmpstr2),
+ "bit %d ",
+ sense->sense_key_spec[0] & 0x7);
+ sbuf_printf(sb, ": %s byte %d %sis invalid",
+ bad_command ? "Command" : "Data",
+ scsi_2btoul(
+ &sense->sense_key_spec[1]),
+ tmpstr2);
+ break;
+ }
+ case SSD_KEY_RECOVERED_ERROR:
+ case SSD_KEY_HARDWARE_ERROR:
+ case SSD_KEY_MEDIUM_ERROR:
+ sbuf_printf(sb, " actual retry count: %d",
+ scsi_2btoul(
+ &sense->sense_key_spec[1]));
+ break;
+ default:
+ sbuf_printf(sb, " sks:%#x,%#x",
+ sense->sense_key_spec[0],
+ scsi_2btoul(
+ &sense->sense_key_spec[1]));
+ break;
+ }
+ }
+ break;
+
+ }
+ default:
+ sbuf_printf(sb, "Error code 0x%x", sense->error_code);
+ if (sense->error_code & SSD_ERRCODE_VALID) {
+ sbuf_printf(sb, " at block no. %d (decimal)",
+ info = scsi_4btoul(sense->info));
+ }
+ }
+
+ sbuf_printf(sb, "\n");
+
+#ifdef _KERNEL
+ xpt_free_ccb((union ccb*)cgd);
+#endif /* _KERNEL/!_KERNEL */
+ return(0);
+}
+
+
+
+#ifdef _KERNEL
+char *
+scsi_sense_string(struct ccb_scsiio *csio, char *str, int str_len)
+#else /* !_KERNEL */
+char *
+scsi_sense_string(struct cam_device *device, struct ccb_scsiio *csio,
+ char *str, int str_len)
+#endif /* _KERNEL/!_KERNEL */
+{
+ struct sbuf sb;
+
+ sbuf_new(&sb, str, str_len, 0);
+
+#ifdef _KERNEL
+ scsi_sense_sbuf(csio, &sb, SSS_FLAG_PRINT_COMMAND);
+#else /* !_KERNEL */
+ scsi_sense_sbuf(device, csio, &sb, SSS_FLAG_PRINT_COMMAND);
+#endif /* _KERNEL/!_KERNEL */
+
+ sbuf_finish(&sb);
+
+ return(sbuf_data(&sb));
+}
+
+#ifdef _KERNEL
+void
+scsi_sense_print(struct ccb_scsiio *csio)
+{
+ struct sbuf sb;
+ char str[512];
+
+ sbuf_new(&sb, str, sizeof(str), 0);
+
+ scsi_sense_sbuf(csio, &sb, SSS_FLAG_PRINT_COMMAND);
+
+ sbuf_finish(&sb);
+
+ printf("%s", sbuf_data(&sb));
+}
+
+#else /* !_KERNEL */
+void
+scsi_sense_print(struct cam_device *device, struct ccb_scsiio *csio,
+ FILE *ofile)
+{
+ struct sbuf sb;
+ char str[512];
+
+ if ((device == NULL) || (csio == NULL) || (ofile == NULL))
+ return;
+
+ sbuf_new(&sb, str, sizeof(str), 0);
+
+ scsi_sense_sbuf(device, csio, &sb, SSS_FLAG_PRINT_COMMAND);
+
+ sbuf_finish(&sb);
+
+ fprintf(ofile, "%s", sbuf_data(&sb));
+}
+
+#endif /* _KERNEL/!_KERNEL */
+#endif /* __rtems__ */
+
+/*
+ * This function currently requires at least 36 bytes, or
+ * SHORT_INQUIRY_LENGTH, worth of data to function properly. If this
+ * function needs more or less data in the future, another length should be
+ * defined in scsi_all.h to indicate the minimum amount of data necessary
+ * for this routine to function properly.
+ */
+void
+scsi_print_inquiry(struct scsi_inquiry_data *inq_data)
+{
+ u_int8_t type;
+ char *dtype, *qtype;
+ char vendor[16], product[48], revision[16], rstr[4];
+
+ type = SID_TYPE(inq_data);
+
+ /*
+ * Figure out basic device type and qualifier.
+ */
+ if (SID_QUAL_IS_VENDOR_UNIQUE(inq_data)) {
+ qtype = "(vendor-unique qualifier)";
+ } else {
+ switch (SID_QUAL(inq_data)) {
+ case SID_QUAL_LU_CONNECTED:
+ qtype = "";
+ break;
+
+ case SID_QUAL_LU_OFFLINE:
+ qtype = "(offline)";
+ break;
+
+ case SID_QUAL_RSVD:
+ qtype = "(reserved qualifier)";
+ break;
+ default:
+ case SID_QUAL_BAD_LU:
+ qtype = "(LUN not supported)";
+ break;
+ }
+ }
+
+ switch (type) {
+ case T_DIRECT:
+ dtype = "Direct Access";
+ break;
+ case T_SEQUENTIAL:
+ dtype = "Sequential Access";
+ break;
+ case T_PRINTER:
+ dtype = "Printer";
+ break;
+ case T_PROCESSOR:
+ dtype = "Processor";
+ break;
+ case T_WORM:
+ dtype = "WORM";
+ break;
+ case T_CDROM:
+ dtype = "CD-ROM";
+ break;
+ case T_SCANNER:
+ dtype = "Scanner";
+ break;
+ case T_OPTICAL:
+ dtype = "Optical";
+ break;
+ case T_CHANGER:
+ dtype = "Changer";
+ break;
+ case T_COMM:
+ dtype = "Communication";
+ break;
+ case T_STORARRAY:
+ dtype = "Storage Array";
+ break;
+ case T_ENCLOSURE:
+ dtype = "Enclosure Services";
+ break;
+ case T_RBC:
+ dtype = "Simplified Direct Access";
+ break;
+ case T_OCRW:
+ dtype = "Optical Card Read/Write";
+ break;
+ case T_OSD:
+ dtype = "Object-Based Storage";
+ break;
+ case T_ADC:
+ dtype = "Automation/Drive Interface";
+ break;
+ case T_NODEVICE:
+ dtype = "Uninstalled";
+ break;
+ default:
+ dtype = "unknown";
+ break;
+ }
+
+ cam_strvis(vendor, inq_data->vendor, sizeof(inq_data->vendor),
+ sizeof(vendor));
+ cam_strvis(product, inq_data->product, sizeof(inq_data->product),
+ sizeof(product));
+ cam_strvis(revision, inq_data->revision, sizeof(inq_data->revision),
+ sizeof(revision));
+
+ if (SID_ANSI_REV(inq_data) == SCSI_REV_CCS)
+ bcopy("CCS", rstr, 4);
+ else
+ snprintf(rstr, sizeof (rstr), "%d", SID_ANSI_REV(inq_data));
+ printf("<%s %s %s> %s %s SCSI-%s device %s\n",
+ vendor, product, revision,
+ SID_IS_REMOVABLE(inq_data) ? "Removable" : "Fixed",
+ dtype, rstr, qtype);
+}
+
+#ifndef __rtems__
+/*
+ * Table of syncrates that don't follow the "divisible by 4"
+ * rule. This table will be expanded in future SCSI specs.
+ */
+static struct {
+ u_int period_factor;
+ u_int period; /* in 100ths of ns */
+} scsi_syncrates[] = {
+ { 0x08, 625 }, /* FAST-160 */
+ { 0x09, 1250 }, /* FAST-80 */
+ { 0x0a, 2500 }, /* FAST-40 40MHz */
+ { 0x0b, 3030 }, /* FAST-40 33MHz */
+ { 0x0c, 5000 } /* FAST-20 */
+};
+
+/*
+ * Return the frequency in kHz corresponding to the given
+ * sync period factor.
+ */
+u_int
+scsi_calc_syncsrate(u_int period_factor)
+{
+ int i;
+ int num_syncrates;
+
+ /*
+ * It's a bug if period is zero, but if it is anyway, don't
+ * die with a divide fault- instead return something which
+ * 'approximates' async
+ */
+ if (period_factor == 0) {
+ return (3300);
+ }
+
+ num_syncrates = sizeof(scsi_syncrates) / sizeof(scsi_syncrates[0]);
+ /* See if the period is in the "exception" table */
+ for (i = 0; i < num_syncrates; i++) {
+
+ if (period_factor == scsi_syncrates[i].period_factor) {
+ /* Period in kHz */
+ return (100000000 / scsi_syncrates[i].period);
+ }
+ }
+
+ /*
+ * Wasn't in the table, so use the standard
+ * 4 times conversion.
+ */
+ return (10000000 / (period_factor * 4 * 10));
+}
+
+/*
+ * Return the SCSI sync parameter that corresponsd to
+ * the passed in period in 10ths of ns.
+ */
+u_int
+scsi_calc_syncparam(u_int period)
+{
+ int i;
+ int num_syncrates;
+
+ if (period == 0)
+ return (~0); /* Async */
+
+ /* Adjust for exception table being in 100ths. */
+ period *= 10;
+ num_syncrates = sizeof(scsi_syncrates) / sizeof(scsi_syncrates[0]);
+ /* See if the period is in the "exception" table */
+ for (i = 0; i < num_syncrates; i++) {
+
+ if (period <= scsi_syncrates[i].period) {
+ /* Period in 100ths of ns */
+ return (scsi_syncrates[i].period_factor);
+ }
+ }
+
+ /*
+ * Wasn't in the table, so use the standard
+ * 1/4 period in ns conversion.
+ */
+ return (period/400);
+}
+#endif /* __rtems__ */
+
+void
+scsi_test_unit_ready(struct ccb_scsiio *csio, u_int32_t retries,
+ void (*cbfcnp)(struct cam_periph *, union ccb *),
+ u_int8_t tag_action, u_int8_t sense_len, u_int32_t timeout)
+{
+ struct scsi_test_unit_ready *scsi_cmd;
+
+ cam_fill_csio(csio,
+ retries,
+ cbfcnp,
+ CAM_DIR_NONE,
+ tag_action,
+ /*data_ptr*/NULL,
+ /*dxfer_len*/0,
+ sense_len,
+ sizeof(*scsi_cmd),
+ timeout);
+
+ scsi_cmd = (struct scsi_test_unit_ready *)&csio->cdb_io.cdb_bytes;
+ bzero(scsi_cmd, sizeof(*scsi_cmd));
+ scsi_cmd->opcode = TEST_UNIT_READY;
+}
+
+#ifndef __rtems__
+void
+scsi_request_sense(struct ccb_scsiio *csio, u_int32_t retries,
+ void (*cbfcnp)(struct cam_periph *, union ccb *),
+ void *data_ptr, u_int8_t dxfer_len, u_int8_t tag_action,
+ u_int8_t sense_len, u_int32_t timeout)
+{
+ struct scsi_request_sense *scsi_cmd;
+
+ cam_fill_csio(csio,
+ retries,
+ cbfcnp,
+ CAM_DIR_IN,
+ tag_action,
+ data_ptr,
+ dxfer_len,
+ sense_len,
+ sizeof(*scsi_cmd),
+ timeout);
+
+ scsi_cmd = (struct scsi_request_sense *)&csio->cdb_io.cdb_bytes;
+ bzero(scsi_cmd, sizeof(*scsi_cmd));
+ scsi_cmd->opcode = REQUEST_SENSE;
+ scsi_cmd->length = dxfer_len;
+}
+#endif /* __rtems__ */
+
+void
+scsi_inquiry(struct ccb_scsiio *csio, u_int32_t retries,
+ void (*cbfcnp)(struct cam_periph *, union ccb *),
+ u_int8_t tag_action, u_int8_t *inq_buf, u_int32_t inq_len,
+ int evpd, u_int8_t page_code, u_int8_t sense_len,
+ u_int32_t timeout)
+{
+ struct scsi_inquiry *scsi_cmd;
+
+ cam_fill_csio(csio,
+ retries,
+ cbfcnp,
+ /*flags*/CAM_DIR_IN,
+ tag_action,
+ /*data_ptr*/inq_buf,
+ /*dxfer_len*/inq_len,
+ sense_len,
+ sizeof(*scsi_cmd),
+ timeout);
+
+ scsi_cmd = (struct scsi_inquiry *)&csio->cdb_io.cdb_bytes;
+ bzero(scsi_cmd, sizeof(*scsi_cmd));
+ scsi_cmd->opcode = INQUIRY;
+ if (evpd) {
+ scsi_cmd->byte2 |= SI_EVPD;
+ scsi_cmd->page_code = page_code;
+ }
+ /*
+ * A 'transfer units' count of 256 is coded as
+ * zero for all commands with a single byte count
+ * field.
+ */
+ if (inq_len == 256)
+ inq_len = 0;
+ scsi_cmd->length = inq_len;
+}
+
+#ifndef __rtems__
+void
+scsi_mode_sense(struct ccb_scsiio *csio, u_int32_t retries,
+ void (*cbfcnp)(struct cam_periph *, union ccb *),
+ u_int8_t tag_action, int dbd, u_int8_t page_code,
+ u_int8_t page, u_int8_t *param_buf, u_int32_t param_len,
+ u_int8_t sense_len, u_int32_t timeout)
+{
+
+ scsi_mode_sense_len(csio, retries, cbfcnp, tag_action, dbd,
+ page_code, page, param_buf, param_len, 0,
+ sense_len, timeout);
+}
+
+void
+scsi_mode_sense_len(struct ccb_scsiio *csio, u_int32_t retries,
+ void (*cbfcnp)(struct cam_periph *, union ccb *),
+ u_int8_t tag_action, int dbd, u_int8_t page_code,
+ u_int8_t page, u_int8_t *param_buf, u_int32_t param_len,
+ int minimum_cmd_size, u_int8_t sense_len, u_int32_t timeout)
+{
+ u_int8_t cdb_len;
+
+ /*
+ * Use the smallest possible command to perform the operation.
+ */
+ if ((param_len < 256)
+ && (minimum_cmd_size < 10)) {
+ /*
+ * We can fit in a 6 byte cdb.
+ */
+ struct scsi_mode_sense_6 *scsi_cmd;
+
+ scsi_cmd = (struct scsi_mode_sense_6 *)&csio->cdb_io.cdb_bytes;
+ bzero(scsi_cmd, sizeof(*scsi_cmd));
+ scsi_cmd->opcode = MODE_SENSE_6;
+ if (dbd != 0)
+ scsi_cmd->byte2 |= SMS_DBD;
+ scsi_cmd->page = page_code | page;
+ scsi_cmd->length = param_len;
+ cdb_len = sizeof(*scsi_cmd);
+ } else {
+ /*
+ * Need a 10 byte cdb.
+ */
+ struct scsi_mode_sense_10 *scsi_cmd;
+
+ scsi_cmd = (struct scsi_mode_sense_10 *)&csio->cdb_io.cdb_bytes;
+ bzero(scsi_cmd, sizeof(*scsi_cmd));
+ scsi_cmd->opcode = MODE_SENSE_10;
+ if (dbd != 0)
+ scsi_cmd->byte2 |= SMS_DBD;
+ scsi_cmd->page = page_code | page;
+ scsi_ulto2b(param_len, scsi_cmd->length);
+ cdb_len = sizeof(*scsi_cmd);
+ }
+ cam_fill_csio(csio,
+ retries,
+ cbfcnp,
+ CAM_DIR_IN,
+ tag_action,
+ param_buf,
+ param_len,
+ sense_len,
+ cdb_len,
+ timeout);
+}
+
+void
+scsi_mode_select(struct ccb_scsiio *csio, u_int32_t retries,
+ void (*cbfcnp)(struct cam_periph *, union ccb *),
+ u_int8_t tag_action, int scsi_page_fmt, int save_pages,
+ u_int8_t *param_buf, u_int32_t param_len, u_int8_t sense_len,
+ u_int32_t timeout)
+{
+ scsi_mode_select_len(csio, retries, cbfcnp, tag_action,
+ scsi_page_fmt, save_pages, param_buf,
+ param_len, 0, sense_len, timeout);
+}
+
+void
+scsi_mode_select_len(struct ccb_scsiio *csio, u_int32_t retries,
+ void (*cbfcnp)(struct cam_periph *, union ccb *),
+ u_int8_t tag_action, int scsi_page_fmt, int save_pages,
+ u_int8_t *param_buf, u_int32_t param_len,
+ int minimum_cmd_size, u_int8_t sense_len,
+ u_int32_t timeout)
+{
+ u_int8_t cdb_len;
+
+ /*
+ * Use the smallest possible command to perform the operation.
+ */
+ if ((param_len < 256)
+ && (minimum_cmd_size < 10)) {
+ /*
+ * We can fit in a 6 byte cdb.
+ */
+ struct scsi_mode_select_6 *scsi_cmd;
+
+ scsi_cmd = (struct scsi_mode_select_6 *)&csio->cdb_io.cdb_bytes;
+ bzero(scsi_cmd, sizeof(*scsi_cmd));
+ scsi_cmd->opcode = MODE_SELECT_6;
+ if (scsi_page_fmt != 0)
+ scsi_cmd->byte2 |= SMS_PF;
+ if (save_pages != 0)
+ scsi_cmd->byte2 |= SMS_SP;
+ scsi_cmd->length = param_len;
+ cdb_len = sizeof(*scsi_cmd);
+ } else {
+ /*
+ * Need a 10 byte cdb.
+ */
+ struct scsi_mode_select_10 *scsi_cmd;
+
+ scsi_cmd =
+ (struct scsi_mode_select_10 *)&csio->cdb_io.cdb_bytes;
+ bzero(scsi_cmd, sizeof(*scsi_cmd));
+ scsi_cmd->opcode = MODE_SELECT_10;
+ if (scsi_page_fmt != 0)
+ scsi_cmd->byte2 |= SMS_PF;
+ if (save_pages != 0)
+ scsi_cmd->byte2 |= SMS_SP;
+ scsi_ulto2b(param_len, scsi_cmd->length);
+ cdb_len = sizeof(*scsi_cmd);
+ }
+ cam_fill_csio(csio,
+ retries,
+ cbfcnp,
+ CAM_DIR_OUT,
+ tag_action,
+ param_buf,
+ param_len,
+ sense_len,
+ cdb_len,
+ timeout);
+}
+
+void
+scsi_log_sense(struct ccb_scsiio *csio, u_int32_t retries,
+ void (*cbfcnp)(struct cam_periph *, union ccb *),
+ u_int8_t tag_action, u_int8_t page_code, u_int8_t page,
+ int save_pages, int ppc, u_int32_t paramptr,
+ u_int8_t *param_buf, u_int32_t param_len, u_int8_t sense_len,
+ u_int32_t timeout)
+{
+ struct scsi_log_sense *scsi_cmd;
+ u_int8_t cdb_len;
+
+ scsi_cmd = (struct scsi_log_sense *)&csio->cdb_io.cdb_bytes;
+ bzero(scsi_cmd, sizeof(*scsi_cmd));
+ scsi_cmd->opcode = LOG_SENSE;
+ scsi_cmd->page = page_code | page;
+ if (save_pages != 0)
+ scsi_cmd->byte2 |= SLS_SP;
+ if (ppc != 0)
+ scsi_cmd->byte2 |= SLS_PPC;
+ scsi_ulto2b(paramptr, scsi_cmd->paramptr);
+ scsi_ulto2b(param_len, scsi_cmd->length);
+ cdb_len = sizeof(*scsi_cmd);
+
+ cam_fill_csio(csio,
+ retries,
+ cbfcnp,
+ /*flags*/CAM_DIR_IN,
+ tag_action,
+ /*data_ptr*/param_buf,
+ /*dxfer_len*/param_len,
+ sense_len,
+ cdb_len,
+ timeout);
+}
+
+void
+scsi_log_select(struct ccb_scsiio *csio, u_int32_t retries,
+ void (*cbfcnp)(struct cam_periph *, union ccb *),
+ u_int8_t tag_action, u_int8_t page_code, int save_pages,
+ int pc_reset, u_int8_t *param_buf, u_int32_t param_len,
+ u_int8_t sense_len, u_int32_t timeout)
+{
+ struct scsi_log_select *scsi_cmd;
+ u_int8_t cdb_len;
+
+ scsi_cmd = (struct scsi_log_select *)&csio->cdb_io.cdb_bytes;
+ bzero(scsi_cmd, sizeof(*scsi_cmd));
+ scsi_cmd->opcode = LOG_SELECT;
+ scsi_cmd->page = page_code & SLS_PAGE_CODE;
+ if (save_pages != 0)
+ scsi_cmd->byte2 |= SLS_SP;
+ if (pc_reset != 0)
+ scsi_cmd->byte2 |= SLS_PCR;
+ scsi_ulto2b(param_len, scsi_cmd->length);
+ cdb_len = sizeof(*scsi_cmd);
+
+ cam_fill_csio(csio,
+ retries,
+ cbfcnp,
+ /*flags*/CAM_DIR_OUT,
+ tag_action,
+ /*data_ptr*/param_buf,
+ /*dxfer_len*/param_len,
+ sense_len,
+ cdb_len,
+ timeout);
+}
+
+/*
+ * Prevent or allow the user to remove the media
+ */
+void
+scsi_prevent(struct ccb_scsiio *csio, u_int32_t retries,
+ void (*cbfcnp)(struct cam_periph *, union ccb *),
+ u_int8_t tag_action, u_int8_t action,
+ u_int8_t sense_len, u_int32_t timeout)
+{
+ struct scsi_prevent *scsi_cmd;
+
+ cam_fill_csio(csio,
+ retries,
+ cbfcnp,
+ /*flags*/CAM_DIR_NONE,
+ tag_action,
+ /*data_ptr*/NULL,
+ /*dxfer_len*/0,
+ sense_len,
+ sizeof(*scsi_cmd),
+ timeout);
+
+ scsi_cmd = (struct scsi_prevent *)&csio->cdb_io.cdb_bytes;
+ bzero(scsi_cmd, sizeof(*scsi_cmd));
+ scsi_cmd->opcode = PREVENT_ALLOW;
+ scsi_cmd->how = action;
+}
+#endif /* __rtems__ */
+
+/* XXX allow specification of address and PMI bit and LBA */
+void
+scsi_read_capacity(struct ccb_scsiio *csio, u_int32_t retries,
+ void (*cbfcnp)(struct cam_periph *, union ccb *),
+ u_int8_t tag_action,
+ struct scsi_read_capacity_data *rcap_buf,
+ u_int8_t sense_len, u_int32_t timeout)
+{
+ struct scsi_read_capacity *scsi_cmd;
+
+ cam_fill_csio(csio,
+ retries,
+ cbfcnp,
+ /*flags*/CAM_DIR_IN,
+ tag_action,
+ /*data_ptr*/(u_int8_t *)rcap_buf,
+ /*dxfer_len*/sizeof(*rcap_buf),
+ sense_len,
+ sizeof(*scsi_cmd),
+ timeout);
+
+ scsi_cmd = (struct scsi_read_capacity *)&csio->cdb_io.cdb_bytes;
+ bzero(scsi_cmd, sizeof(*scsi_cmd));
+ scsi_cmd->opcode = READ_CAPACITY;
+}
+
+#ifndef __rtems__
+void
+scsi_read_capacity_16(struct ccb_scsiio *csio, uint32_t retries,
+ void (*cbfcnp)(struct cam_periph *, union ccb *),
+ uint8_t tag_action, uint64_t lba, int reladr, int pmi,
+ struct scsi_read_capacity_data_long *rcap_buf,
+ uint8_t sense_len, uint32_t timeout)
+{
+ struct scsi_read_capacity_16 *scsi_cmd;
+
+
+ cam_fill_csio(csio,
+ retries,
+ cbfcnp,
+ /*flags*/CAM_DIR_IN,
+ tag_action,
+ /*data_ptr*/(u_int8_t *)rcap_buf,
+ /*dxfer_len*/sizeof(*rcap_buf),
+ sense_len,
+ sizeof(*scsi_cmd),
+ timeout);
+ scsi_cmd = (struct scsi_read_capacity_16 *)&csio->cdb_io.cdb_bytes;
+ bzero(scsi_cmd, sizeof(*scsi_cmd));
+ scsi_cmd->opcode = SERVICE_ACTION_IN;
+ scsi_cmd->service_action = SRC16_SERVICE_ACTION;
+ scsi_u64to8b(lba, scsi_cmd->addr);
+ scsi_ulto4b(sizeof(*rcap_buf), scsi_cmd->alloc_len);
+ if (pmi)
+ reladr |= SRC16_PMI;
+ if (reladr)
+ reladr |= SRC16_RELADR;
+}
+
+void
+scsi_report_luns(struct ccb_scsiio *csio, u_int32_t retries,
+ void (*cbfcnp)(struct cam_periph *, union ccb *),
+ u_int8_t tag_action, u_int8_t select_report,
+ struct scsi_report_luns_data *rpl_buf, u_int32_t alloc_len,
+ u_int8_t sense_len, u_int32_t timeout)
+{
+ struct scsi_report_luns *scsi_cmd;
+
+ cam_fill_csio(csio,
+ retries,
+ cbfcnp,
+ /*flags*/CAM_DIR_IN,
+ tag_action,
+ /*data_ptr*/(u_int8_t *)rpl_buf,
+ /*dxfer_len*/alloc_len,
+ sense_len,
+ sizeof(*scsi_cmd),
+ timeout);
+ scsi_cmd = (struct scsi_report_luns *)&csio->cdb_io.cdb_bytes;
+ bzero(scsi_cmd, sizeof(*scsi_cmd));
+ scsi_cmd->opcode = REPORT_LUNS;
+ scsi_cmd->select_report = select_report;
+ scsi_ulto4b(alloc_len, scsi_cmd->length);
+}
+
+void
+scsi_report_target_group(struct ccb_scsiio *csio, u_int32_t retries,
+ void (*cbfcnp)(struct cam_periph *, union ccb *),
+ u_int8_t tag_action, u_int8_t pdf,
+ void *buf, u_int32_t alloc_len,
+ u_int8_t sense_len, u_int32_t timeout)
+{
+ struct scsi_target_group *scsi_cmd;
+
+ cam_fill_csio(csio,
+ retries,
+ cbfcnp,
+ /*flags*/CAM_DIR_IN,
+ tag_action,
+ /*data_ptr*/(u_int8_t *)buf,
+ /*dxfer_len*/alloc_len,
+ sense_len,
+ sizeof(*scsi_cmd),
+ timeout);
+ scsi_cmd = (struct scsi_target_group *)&csio->cdb_io.cdb_bytes;
+ bzero(scsi_cmd, sizeof(*scsi_cmd));
+ scsi_cmd->opcode = MAINTENANCE_IN;
+ scsi_cmd->service_action = REPORT_TARGET_PORT_GROUPS | pdf;
+ scsi_ulto4b(alloc_len, scsi_cmd->length);
+}
+
+void
+scsi_set_target_group(struct ccb_scsiio *csio, u_int32_t retries,
+ void (*cbfcnp)(struct cam_periph *, union ccb *),
+ u_int8_t tag_action, void *buf, u_int32_t alloc_len,
+ u_int8_t sense_len, u_int32_t timeout)
+{
+ struct scsi_target_group *scsi_cmd;
+
+ cam_fill_csio(csio,
+ retries,
+ cbfcnp,
+ /*flags*/CAM_DIR_OUT,
+ tag_action,
+ /*data_ptr*/(u_int8_t *)buf,
+ /*dxfer_len*/alloc_len,
+ sense_len,
+ sizeof(*scsi_cmd),
+ timeout);
+ scsi_cmd = (struct scsi_target_group *)&csio->cdb_io.cdb_bytes;
+ bzero(scsi_cmd, sizeof(*scsi_cmd));
+ scsi_cmd->opcode = MAINTENANCE_OUT;
+ scsi_cmd->service_action = SET_TARGET_PORT_GROUPS;
+ scsi_ulto4b(alloc_len, scsi_cmd->length);
+}
+
+/*
+ * Syncronize the media to the contents of the cache for
+ * the given lba/count pair. Specifying 0/0 means sync
+ * the whole cache.
+ */
+void
+scsi_synchronize_cache(struct ccb_scsiio *csio, u_int32_t retries,
+ void (*cbfcnp)(struct cam_periph *, union ccb *),
+ u_int8_t tag_action, u_int32_t begin_lba,
+ u_int16_t lb_count, u_int8_t sense_len,
+ u_int32_t timeout)
+{
+ struct scsi_sync_cache *scsi_cmd;
+
+ cam_fill_csio(csio,
+ retries,
+ cbfcnp,
+ /*flags*/CAM_DIR_NONE,
+ tag_action,
+ /*data_ptr*/NULL,
+ /*dxfer_len*/0,
+ sense_len,
+ sizeof(*scsi_cmd),
+ timeout);
+
+ scsi_cmd = (struct scsi_sync_cache *)&csio->cdb_io.cdb_bytes;
+ bzero(scsi_cmd, sizeof(*scsi_cmd));
+ scsi_cmd->opcode = SYNCHRONIZE_CACHE;
+ scsi_ulto4b(begin_lba, scsi_cmd->begin_lba);
+ scsi_ulto2b(lb_count, scsi_cmd->lb_count);
+}
+#endif /* __rtems__ */
+
+void
+scsi_read_write(struct ccb_scsiio *csio, u_int32_t retries,
+ void (*cbfcnp)(struct cam_periph *, union ccb *),
+ u_int8_t tag_action, int readop, u_int8_t byte2,
+ int minimum_cmd_size, u_int64_t lba, u_int32_t block_count,
+ u_int8_t *data_ptr, u_int32_t dxfer_len, u_int8_t sense_len,
+ u_int32_t timeout)
+{
+ u_int8_t cdb_len;
+ /*
+ * Use the smallest possible command to perform the operation
+ * as some legacy hardware does not support the 10 byte commands.
+ * If any of the bits in byte2 is set, we have to go with a larger
+ * command.
+ */
+ if ((minimum_cmd_size < 10)
+ && ((lba & 0x1fffff) == lba)
+ && ((block_count & 0xff) == block_count)
+ && (byte2 == 0)) {
+ /*
+ * We can fit in a 6 byte cdb.
+ */
+ struct scsi_rw_6 *scsi_cmd;
+
+ scsi_cmd = (struct scsi_rw_6 *)&csio->cdb_io.cdb_bytes;
+ scsi_cmd->opcode = readop ? READ_6 : WRITE_6;
+ scsi_ulto3b(lba, scsi_cmd->addr);
+ scsi_cmd->length = block_count & 0xff;
+ scsi_cmd->control = 0;
+ cdb_len = sizeof(*scsi_cmd);
+
+ CAM_DEBUG(csio->ccb_h.path, CAM_DEBUG_SUBTRACE,
+ ("6byte: %x%x%x:%d:%d\n", scsi_cmd->addr[0],
+ scsi_cmd->addr[1], scsi_cmd->addr[2],
+ scsi_cmd->length, dxfer_len));
+ } else if ((minimum_cmd_size < 12)
+ && ((block_count & 0xffff) == block_count)
+ && ((lba & 0xffffffff) == lba)) {
+ /*
+ * Need a 10 byte cdb.
+ */
+ struct scsi_rw_10 *scsi_cmd;
+
+ scsi_cmd = (struct scsi_rw_10 *)&csio->cdb_io.cdb_bytes;
+ scsi_cmd->opcode = readop ? READ_10 : WRITE_10;
+ scsi_cmd->byte2 = byte2;
+ scsi_ulto4b(lba, scsi_cmd->addr);
+ scsi_cmd->reserved = 0;
+ scsi_ulto2b(block_count, scsi_cmd->length);
+ scsi_cmd->control = 0;
+ cdb_len = sizeof(*scsi_cmd);
+
+ CAM_DEBUG(csio->ccb_h.path, CAM_DEBUG_SUBTRACE,
+ ("10byte: %x%x%x%x:%x%x: %d\n", scsi_cmd->addr[0],
+ scsi_cmd->addr[1], scsi_cmd->addr[2],
+ scsi_cmd->addr[3], scsi_cmd->length[0],
+ scsi_cmd->length[1], dxfer_len));
+ } else if ((minimum_cmd_size < 16)
+ && ((block_count & 0xffffffff) == block_count)
+ && ((lba & 0xffffffff) == lba)) {
+ /*
+ * The block count is too big for a 10 byte CDB, use a 12
+ * byte CDB.
+ */
+ struct scsi_rw_12 *scsi_cmd;
+
+ scsi_cmd = (struct scsi_rw_12 *)&csio->cdb_io.cdb_bytes;
+ scsi_cmd->opcode = readop ? READ_12 : WRITE_12;
+ scsi_cmd->byte2 = byte2;
+ scsi_ulto4b(lba, scsi_cmd->addr);
+ scsi_cmd->reserved = 0;
+ scsi_ulto4b(block_count, scsi_cmd->length);
+ scsi_cmd->control = 0;
+ cdb_len = sizeof(*scsi_cmd);
+
+ CAM_DEBUG(csio->ccb_h.path, CAM_DEBUG_SUBTRACE,
+ ("12byte: %x%x%x%x:%x%x%x%x: %d\n", scsi_cmd->addr[0],
+ scsi_cmd->addr[1], scsi_cmd->addr[2],
+ scsi_cmd->addr[3], scsi_cmd->length[0],
+ scsi_cmd->length[1], scsi_cmd->length[2],
+ scsi_cmd->length[3], dxfer_len));
+ } else {
+ /*
+ * 16 byte CDB. We'll only get here if the LBA is larger
+ * than 2^32, or if the user asks for a 16 byte command.
+ */
+ struct scsi_rw_16 *scsi_cmd;
+
+ scsi_cmd = (struct scsi_rw_16 *)&csio->cdb_io.cdb_bytes;
+ scsi_cmd->opcode = readop ? READ_16 : WRITE_16;
+ scsi_cmd->byte2 = byte2;
+ scsi_u64to8b(lba, scsi_cmd->addr);
+ scsi_cmd->reserved = 0;
+ scsi_ulto4b(block_count, scsi_cmd->length);
+ scsi_cmd->control = 0;
+ cdb_len = sizeof(*scsi_cmd);
+ }
+ cam_fill_csio(csio,
+ retries,
+ cbfcnp,
+ /*flags*/readop ? CAM_DIR_IN : CAM_DIR_OUT,
+ tag_action,
+ data_ptr,
+ dxfer_len,
+ sense_len,
+ cdb_len,
+ timeout);
+}
+
+#ifndef __rtems__
+void
+scsi_start_stop(struct ccb_scsiio *csio, u_int32_t retries,
+ void (*cbfcnp)(struct cam_periph *, union ccb *),
+ u_int8_t tag_action, int start, int load_eject,
+ int immediate, u_int8_t sense_len, u_int32_t timeout)
+{
+ struct scsi_start_stop_unit *scsi_cmd;
+ int extra_flags = 0;
+
+ scsi_cmd = (struct scsi_start_stop_unit *)&csio->cdb_io.cdb_bytes;
+ bzero(scsi_cmd, sizeof(*scsi_cmd));
+ scsi_cmd->opcode = START_STOP_UNIT;
+ if (start != 0) {
+ scsi_cmd->how |= SSS_START;
+ /* it takes a lot of power to start a drive */
+ extra_flags |= CAM_HIGH_POWER;
+ }
+ if (load_eject != 0)
+ scsi_cmd->how |= SSS_LOEJ;
+ if (immediate != 0)
+ scsi_cmd->byte2 |= SSS_IMMED;
+
+ cam_fill_csio(csio,
+ retries,
+ cbfcnp,
+ /*flags*/CAM_DIR_NONE | extra_flags,
+ tag_action,
+ /*data_ptr*/NULL,
+ /*dxfer_len*/0,
+ sense_len,
+ sizeof(*scsi_cmd),
+ timeout);
+
+}
+
+
+/*
+ * Try make as good a match as possible with
+ * available sub drivers
+ */
+int
+scsi_inquiry_match(caddr_t inqbuffer, caddr_t table_entry)
+{
+ struct scsi_inquiry_pattern *entry;
+ struct scsi_inquiry_data *inq;
+
+ entry = (struct scsi_inquiry_pattern *)table_entry;
+ inq = (struct scsi_inquiry_data *)inqbuffer;
+
+ if (((SID_TYPE(inq) == entry->type)
+ || (entry->type == T_ANY))
+ && (SID_IS_REMOVABLE(inq) ? entry->media_type & SIP_MEDIA_REMOVABLE
+ : entry->media_type & SIP_MEDIA_FIXED)
+ && (cam_strmatch(inq->vendor, entry->vendor, sizeof(inq->vendor)) == 0)
+ && (cam_strmatch(inq->product, entry->product,
+ sizeof(inq->product)) == 0)
+ && (cam_strmatch(inq->revision, entry->revision,
+ sizeof(inq->revision)) == 0)) {
+ return (0);
+ }
+ return (-1);
+}
+
+/*
+ * Try make as good a match as possible with
+ * available sub drivers
+ */
+int
+scsi_static_inquiry_match(caddr_t inqbuffer, caddr_t table_entry)
+{
+ struct scsi_static_inquiry_pattern *entry;
+ struct scsi_inquiry_data *inq;
+
+ entry = (struct scsi_static_inquiry_pattern *)table_entry;
+ inq = (struct scsi_inquiry_data *)inqbuffer;
+
+ if (((SID_TYPE(inq) == entry->type)
+ || (entry->type == T_ANY))
+ && (SID_IS_REMOVABLE(inq) ? entry->media_type & SIP_MEDIA_REMOVABLE
+ : entry->media_type & SIP_MEDIA_FIXED)
+ && (cam_strmatch(inq->vendor, entry->vendor, sizeof(inq->vendor)) == 0)
+ && (cam_strmatch(inq->product, entry->product,
+ sizeof(inq->product)) == 0)
+ && (cam_strmatch(inq->revision, entry->revision,
+ sizeof(inq->revision)) == 0)) {
+ return (0);
+ }
+ return (-1);
+}
+
+#ifdef _KERNEL
+static void
+init_scsi_delay(void)
+{
+ int delay;
+
+ delay = SCSI_DELAY;
+ TUNABLE_INT_FETCH("kern.cam.scsi_delay", &delay);
+
+ if (set_scsi_delay(delay) != 0) {
+ printf("cam: invalid value for tunable kern.cam.scsi_delay\n");
+ set_scsi_delay(SCSI_DELAY);
+ }
+}
+SYSINIT(scsi_delay, SI_SUB_TUNABLES, SI_ORDER_ANY, init_scsi_delay, NULL);
+
+static int
+sysctl_scsi_delay(SYSCTL_HANDLER_ARGS)
+{
+ int error, delay;
+
+ delay = scsi_delay;
+ error = sysctl_handle_int(oidp, &delay, 0, req);
+ if (error != 0 || req->newptr == NULL)
+ return (error);
+ return (set_scsi_delay(delay));
+}
+SYSCTL_PROC(_kern_cam, OID_AUTO, scsi_delay, CTLTYPE_INT|CTLFLAG_RW,
+ 0, 0, sysctl_scsi_delay, "I",
+ "Delay to allow devices to settle after a SCSI bus reset (ms)");
+
+static int
+set_scsi_delay(int delay)
+{
+ /*
+ * If someone sets this to 0, we assume that they want the
+ * minimum allowable bus settle delay.
+ */
+ if (delay == 0) {
+ printf("cam: using minimum scsi_delay (%dms)\n",
+ SCSI_MIN_DELAY);
+ delay = SCSI_MIN_DELAY;
+ }
+ if (delay < SCSI_MIN_DELAY)
+ return (EINVAL);
+ scsi_delay = delay;
+ return (0);
+}
+#endif /* _KERNEL */
+#endif /* __rtems__ */
diff --git a/rtems/freebsd/cam/scsi/scsi_all.h b/rtems/freebsd/cam/scsi/scsi_all.h
new file mode 100644
index 00000000..cf23e209
--- /dev/null
+++ b/rtems/freebsd/cam/scsi/scsi_all.h
@@ -0,0 +1,1454 @@
+/*-
+ * Largely written by Julian Elischer (julian@tfs.com)
+ * for TRW Financial Systems.
+ *
+ * TRW Financial Systems, in accordance with their agreement with Carnegie
+ * Mellon University, makes this software available to CMU to distribute
+ * or use in any manner that they see fit as long as this message is kept with
+ * the software. For this reason TFS also grants any other persons or
+ * organisations permission to use or modify this software.
+ *
+ * TFS supplies this software to be publicly redistributed
+ * on the understanding that TFS is not responsible for the correct
+ * functioning of this software in any circumstances.
+ *
+ * Ported to run under 386BSD by Julian Elischer (julian@tfs.com) Sept 1992
+ *
+ * $FreeBSD$
+ */
+
+/*
+ * SCSI general interface description
+ */
+
+#ifndef _SCSI_SCSI_ALL_H
+#define _SCSI_SCSI_ALL_H 1
+
+#include <rtems/freebsd/sys/cdefs.h>
+
+#ifdef _KERNEL
+/*
+ * This is the number of seconds we wait for devices to settle after a SCSI
+ * bus reset.
+ */
+extern int scsi_delay;
+#endif /* _KERNEL */
+
+/*
+ * SCSI command format
+ */
+
+/*
+ * Define dome bits that are in ALL (or a lot of) scsi commands
+ */
+#define SCSI_CTL_LINK 0x01
+#define SCSI_CTL_FLAG 0x02
+#define SCSI_CTL_VENDOR 0xC0
+#define SCSI_CMD_LUN 0xA0 /* these two should not be needed */
+#define SCSI_CMD_LUN_SHIFT 5 /* LUN in the cmd is no longer SCSI */
+
+#define SCSI_MAX_CDBLEN 16 /*
+ * 16 byte commands are in the
+ * SCSI-3 spec
+ */
+#if defined(CAM_MAX_CDBLEN) && (CAM_MAX_CDBLEN < SCSI_MAX_CDBLEN)
+#error "CAM_MAX_CDBLEN cannot be less than SCSI_MAX_CDBLEN"
+#endif
+
+/* 6byte CDBs special case 0 length to be 256 */
+#define SCSI_CDB6_LEN(len) ((len) == 0 ? 256 : len)
+
+/*
+ * This type defines actions to be taken when a particular sense code is
+ * received. Right now, these flags are only defined to take up 16 bits,
+ * but can be expanded in the future if necessary.
+ */
+typedef enum {
+ SS_NOP = 0x000000, /* Do nothing */
+ SS_RETRY = 0x010000, /* Retry the command */
+ SS_FAIL = 0x020000, /* Bail out */
+ SS_START = 0x030000, /* Send a Start Unit command to the device,
+ * then retry the original command.
+ */
+ SS_TUR = 0x040000, /* Send a Test Unit Ready command to the
+ * device, then retry the original command.
+ */
+ SS_REQSENSE = 0x050000, /* Send a RequestSense command to the
+ * device, then retry the original command.
+ */
+ SS_MASK = 0xff0000
+} scsi_sense_action;
+
+typedef enum {
+ SSQ_NONE = 0x0000,
+ SSQ_DECREMENT_COUNT = 0x0100, /* Decrement the retry count */
+ SSQ_MANY = 0x0200, /* send lots of recovery commands */
+ SSQ_RANGE = 0x0400, /*
+ * This table entry represents the
+ * end of a range of ASCQs that
+ * have identical error actions
+ * and text.
+ */
+ SSQ_PRINT_SENSE = 0x0800,
+ SSQ_MASK = 0xff00
+} scsi_sense_action_qualifier;
+
+/* Mask for error status values */
+#define SS_ERRMASK 0xff
+
+/* The default, retyable, error action */
+#define SS_RDEF SS_RETRY|SSQ_DECREMENT_COUNT|SSQ_PRINT_SENSE|EIO
+
+/* The retyable, error action, with table specified error code */
+#define SS_RET SS_RETRY|SSQ_DECREMENT_COUNT|SSQ_PRINT_SENSE
+
+/* Fatal error action, with table specified error code */
+#define SS_FATAL SS_FAIL|SSQ_PRINT_SENSE
+
+struct scsi_generic
+{
+ u_int8_t opcode;
+ u_int8_t bytes[11];
+};
+
+struct scsi_request_sense
+{
+ u_int8_t opcode;
+ u_int8_t byte2;
+ u_int8_t unused[2];
+ u_int8_t length;
+ u_int8_t control;
+};
+
+struct scsi_test_unit_ready
+{
+ u_int8_t opcode;
+ u_int8_t byte2;
+ u_int8_t unused[3];
+ u_int8_t control;
+};
+
+struct scsi_send_diag
+{
+ u_int8_t opcode;
+ u_int8_t byte2;
+#define SSD_UOL 0x01
+#define SSD_DOL 0x02
+#define SSD_SELFTEST 0x04
+#define SSD_PF 0x10
+ u_int8_t unused[1];
+ u_int8_t paramlen[2];
+ u_int8_t control;
+};
+
+struct scsi_sense
+{
+ u_int8_t opcode;
+ u_int8_t byte2;
+ u_int8_t unused[2];
+ u_int8_t length;
+ u_int8_t control;
+};
+
+struct scsi_inquiry
+{
+ u_int8_t opcode;
+ u_int8_t byte2;
+#define SI_EVPD 0x01
+ u_int8_t page_code;
+ u_int8_t reserved;
+ u_int8_t length;
+ u_int8_t control;
+};
+
+struct scsi_mode_sense_6
+{
+ u_int8_t opcode;
+ u_int8_t byte2;
+#define SMS_DBD 0x08
+ u_int8_t page;
+#define SMS_PAGE_CODE 0x3F
+#define SMS_VENDOR_SPECIFIC_PAGE 0x00
+#define SMS_DISCONNECT_RECONNECT_PAGE 0x02
+#define SMS_FORMAT_DEVICE_PAGE 0x03
+#define SMS_GEOMETRY_PAGE 0x04
+#define SMS_CACHE_PAGE 0x08
+#define SMS_PERIPHERAL_DEVICE_PAGE 0x09
+#define SMS_CONTROL_MODE_PAGE 0x0A
+#define SMS_PROTO_SPECIFIC_PAGE 0x19
+#define SMS_INFO_EXCEPTIONS_PAGE 0x1C
+#define SMS_ALL_PAGES_PAGE 0x3F
+#define SMS_PAGE_CTRL_MASK 0xC0
+#define SMS_PAGE_CTRL_CURRENT 0x00
+#define SMS_PAGE_CTRL_CHANGEABLE 0x40
+#define SMS_PAGE_CTRL_DEFAULT 0x80
+#define SMS_PAGE_CTRL_SAVED 0xC0
+ u_int8_t unused;
+ u_int8_t length;
+ u_int8_t control;
+};
+
+struct scsi_mode_sense_10
+{
+ u_int8_t opcode;
+ u_int8_t byte2; /* same bits as small version */
+ u_int8_t page; /* same bits as small version */
+ u_int8_t unused[4];
+ u_int8_t length[2];
+ u_int8_t control;
+};
+
+struct scsi_mode_select_6
+{
+ u_int8_t opcode;
+ u_int8_t byte2;
+#define SMS_SP 0x01
+#define SMS_PF 0x10
+ u_int8_t unused[2];
+ u_int8_t length;
+ u_int8_t control;
+};
+
+struct scsi_mode_select_10
+{
+ u_int8_t opcode;
+ u_int8_t byte2; /* same bits as small version */
+ u_int8_t unused[5];
+ u_int8_t length[2];
+ u_int8_t control;
+};
+
+/*
+ * When sending a mode select to a tape drive, the medium type must be 0.
+ */
+struct scsi_mode_hdr_6
+{
+ u_int8_t datalen;
+ u_int8_t medium_type;
+ u_int8_t dev_specific;
+ u_int8_t block_descr_len;
+};
+
+struct scsi_mode_hdr_10
+{
+ u_int8_t datalen[2];
+ u_int8_t medium_type;
+ u_int8_t dev_specific;
+ u_int8_t reserved[2];
+ u_int8_t block_descr_len[2];
+};
+
+struct scsi_mode_block_descr
+{
+ u_int8_t density_code;
+ u_int8_t num_blocks[3];
+ u_int8_t reserved;
+ u_int8_t block_len[3];
+};
+
+struct scsi_log_sense
+{
+ u_int8_t opcode;
+ u_int8_t byte2;
+#define SLS_SP 0x01
+#define SLS_PPC 0x02
+ u_int8_t page;
+#define SLS_PAGE_CODE 0x3F
+#define SLS_ALL_PAGES_PAGE 0x00
+#define SLS_OVERRUN_PAGE 0x01
+#define SLS_ERROR_WRITE_PAGE 0x02
+#define SLS_ERROR_READ_PAGE 0x03
+#define SLS_ERROR_READREVERSE_PAGE 0x04
+#define SLS_ERROR_VERIFY_PAGE 0x05
+#define SLS_ERROR_NONMEDIUM_PAGE 0x06
+#define SLS_ERROR_LASTN_PAGE 0x07
+#define SLS_SELF_TEST_PAGE 0x10
+#define SLS_IE_PAGE 0x2f
+#define SLS_PAGE_CTRL_MASK 0xC0
+#define SLS_PAGE_CTRL_THRESHOLD 0x00
+#define SLS_PAGE_CTRL_CUMULATIVE 0x40
+#define SLS_PAGE_CTRL_THRESH_DEFAULT 0x80
+#define SLS_PAGE_CTRL_CUMUL_DEFAULT 0xC0
+ u_int8_t reserved[2];
+ u_int8_t paramptr[2];
+ u_int8_t length[2];
+ u_int8_t control;
+};
+
+struct scsi_log_select
+{
+ u_int8_t opcode;
+ u_int8_t byte2;
+/* SLS_SP 0x01 */
+#define SLS_PCR 0x02
+ u_int8_t page;
+/* SLS_PAGE_CTRL_MASK 0xC0 */
+/* SLS_PAGE_CTRL_THRESHOLD 0x00 */
+/* SLS_PAGE_CTRL_CUMULATIVE 0x40 */
+/* SLS_PAGE_CTRL_THRESH_DEFAULT 0x80 */
+/* SLS_PAGE_CTRL_CUMUL_DEFAULT 0xC0 */
+ u_int8_t reserved[4];
+ u_int8_t length[2];
+ u_int8_t control;
+};
+
+struct scsi_log_header
+{
+ u_int8_t page;
+ u_int8_t reserved;
+ u_int8_t datalen[2];
+};
+
+struct scsi_log_param_header {
+ u_int8_t param_code[2];
+ u_int8_t param_control;
+#define SLP_LP 0x01
+#define SLP_LBIN 0x02
+#define SLP_TMC_MASK 0x0C
+#define SLP_TMC_ALWAYS 0x00
+#define SLP_TMC_EQUAL 0x04
+#define SLP_TMC_NOTEQUAL 0x08
+#define SLP_TMC_GREATER 0x0C
+#define SLP_ETC 0x10
+#define SLP_TSD 0x20
+#define SLP_DS 0x40
+#define SLP_DU 0x80
+ u_int8_t param_len;
+};
+
+struct scsi_control_page {
+ u_int8_t page_code;
+ u_int8_t page_length;
+ u_int8_t rlec;
+#define SCB_RLEC 0x01 /*Report Log Exception Cond*/
+ u_int8_t queue_flags;
+#define SCP_QUEUE_ALG_MASK 0xF0
+#define SCP_QUEUE_ALG_RESTRICTED 0x00
+#define SCP_QUEUE_ALG_UNRESTRICTED 0x10
+#define SCP_QUEUE_ERR 0x02 /*Queued I/O aborted for CACs*/
+#define SCP_QUEUE_DQUE 0x01 /*Queued I/O disabled*/
+ u_int8_t eca_and_aen;
+#define SCP_EECA 0x80 /*Enable Extended CA*/
+#define SCP_RAENP 0x04 /*Ready AEN Permission*/
+#define SCP_UAAENP 0x02 /*UA AEN Permission*/
+#define SCP_EAENP 0x01 /*Error AEN Permission*/
+ u_int8_t reserved;
+ u_int8_t aen_holdoff_period[2];
+};
+
+struct scsi_cache_page {
+ u_int8_t page_code;
+#define SCHP_PAGE_SAVABLE 0x80 /* Page is savable */
+ u_int8_t page_length;
+ u_int8_t cache_flags;
+#define SCHP_FLAGS_WCE 0x04 /* Write Cache Enable */
+#define SCHP_FLAGS_MF 0x02 /* Multiplication factor */
+#define SCHP_FLAGS_RCD 0x01 /* Read Cache Disable */
+ u_int8_t rw_cache_policy;
+ u_int8_t dis_prefetch[2];
+ u_int8_t min_prefetch[2];
+ u_int8_t max_prefetch[2];
+ u_int8_t max_prefetch_ceil[2];
+};
+
+struct scsi_info_exceptions_page {
+ u_int8_t page_code;
+#define SIEP_PAGE_SAVABLE 0x80 /* Page is savable */
+ u_int8_t page_length;
+ u_int8_t info_flags;
+#define SIEP_FLAGS_PERF 0x80
+#define SIEP_FLAGS_EBF 0x20
+#define SIEP_FLAGS_EWASC 0x10
+#define SIEP_FLAGS_DEXCPT 0x08
+#define SIEP_FLAGS_TEST 0x04
+#define SIEP_FLAGS_EBACKERR 0x02
+#define SIEP_FLAGS_LOGERR 0x01
+ u_int8_t mrie;
+ u_int8_t interval_timer[4];
+ u_int8_t report_count[4];
+};
+
+struct scsi_proto_specific_page {
+ u_int8_t page_code;
+#define SPSP_PAGE_SAVABLE 0x80 /* Page is savable */
+ u_int8_t page_length;
+ u_int8_t protocol;
+#define SPSP_PROTO_FC 0x00
+#define SPSP_PROTO_SPI 0x01
+#define SPSP_PROTO_SSA 0x02
+#define SPSP_PROTO_1394 0x03
+#define SPSP_PROTO_RDMA 0x04
+#define SPSP_PROTO_ISCSI 0x05
+#define SPSP_PROTO_SAS 0x06
+#define SPSP_PROTO_ADT 0x07
+#define SPSP_PROTO_ATA 0x08
+#define SPSP_PROTO_NONE 0x0f
+};
+
+struct scsi_reserve
+{
+ u_int8_t opcode;
+ u_int8_t byte2;
+ u_int8_t unused[2];
+ u_int8_t length;
+ u_int8_t control;
+};
+
+struct scsi_release
+{
+ u_int8_t opcode;
+ u_int8_t byte2;
+ u_int8_t unused[2];
+ u_int8_t length;
+ u_int8_t control;
+};
+
+struct scsi_prevent
+{
+ u_int8_t opcode;
+ u_int8_t byte2;
+ u_int8_t unused[2];
+ u_int8_t how;
+ u_int8_t control;
+};
+#define PR_PREVENT 0x01
+#define PR_ALLOW 0x00
+
+struct scsi_sync_cache
+{
+ u_int8_t opcode;
+ u_int8_t byte2;
+ u_int8_t begin_lba[4];
+ u_int8_t reserved;
+ u_int8_t lb_count[2];
+ u_int8_t control;
+};
+
+
+struct scsi_changedef
+{
+ u_int8_t opcode;
+ u_int8_t byte2;
+ u_int8_t unused1;
+ u_int8_t how;
+ u_int8_t unused[4];
+ u_int8_t datalen;
+ u_int8_t control;
+};
+
+struct scsi_read_buffer
+{
+ u_int8_t opcode;
+ u_int8_t byte2;
+#define RWB_MODE 0x07
+#define RWB_MODE_HDR_DATA 0x00
+#define RWB_MODE_DATA 0x02
+#define RWB_MODE_DOWNLOAD 0x04
+#define RWB_MODE_DOWNLOAD_SAVE 0x05
+ u_int8_t buffer_id;
+ u_int8_t offset[3];
+ u_int8_t length[3];
+ u_int8_t control;
+};
+
+struct scsi_write_buffer
+{
+ u_int8_t opcode;
+ u_int8_t byte2;
+ u_int8_t buffer_id;
+ u_int8_t offset[3];
+ u_int8_t length[3];
+ u_int8_t control;
+};
+
+struct scsi_rw_6
+{
+ u_int8_t opcode;
+ u_int8_t addr[3];
+/* only 5 bits are valid in the MSB address byte */
+#define SRW_TOPADDR 0x1F
+ u_int8_t length;
+ u_int8_t control;
+};
+
+struct scsi_rw_10
+{
+ u_int8_t opcode;
+#define SRW10_RELADDR 0x01
+/* EBP defined for WRITE(10) only */
+#define SRW10_EBP 0x04
+#define SRW10_FUA 0x08
+#define SRW10_DPO 0x10
+ u_int8_t byte2;
+ u_int8_t addr[4];
+ u_int8_t reserved;
+ u_int8_t length[2];
+ u_int8_t control;
+};
+
+struct scsi_rw_12
+{
+ u_int8_t opcode;
+#define SRW12_RELADDR 0x01
+#define SRW12_FUA 0x08
+#define SRW12_DPO 0x10
+ u_int8_t byte2;
+ u_int8_t addr[4];
+ u_int8_t length[4];
+ u_int8_t reserved;
+ u_int8_t control;
+};
+
+struct scsi_rw_16
+{
+ u_int8_t opcode;
+#define SRW16_RELADDR 0x01
+#define SRW16_FUA 0x08
+#define SRW16_DPO 0x10
+ u_int8_t byte2;
+ u_int8_t addr[8];
+ u_int8_t length[4];
+ u_int8_t reserved;
+ u_int8_t control;
+};
+
+struct scsi_start_stop_unit
+{
+ u_int8_t opcode;
+ u_int8_t byte2;
+#define SSS_IMMED 0x01
+ u_int8_t reserved[2];
+ u_int8_t how;
+#define SSS_START 0x01
+#define SSS_LOEJ 0x02
+ u_int8_t control;
+};
+
+struct ata_pass_12 {
+ u_int8_t opcode;
+ u_int8_t protocol;
+#define AP_MULTI 0xe0
+ u_int8_t flags;
+#define AP_T_LEN 0x03
+#define AP_BB 0x04
+#define AP_T_DIR 0x08
+#define AP_CK_COND 0x20
+#define AP_OFFLINE 0x60
+ u_int8_t features;
+ u_int8_t sector_count;
+ u_int8_t lba_low;
+ u_int8_t lba_mid;
+ u_int8_t lba_high;
+ u_int8_t device;
+ u_int8_t command;
+ u_int8_t reserved;
+ u_int8_t control;
+};
+
+struct ata_pass_16 {
+ u_int8_t opcode;
+ u_int8_t protocol;
+#define AP_EXTEND 0x01
+ u_int8_t flags;
+ u_int8_t features_ext;
+ u_int8_t features;
+ u_int8_t sector_count_ext;
+ u_int8_t sector_count;
+ u_int8_t lba_low_ext;
+ u_int8_t lba_low;
+ u_int8_t lba_mid_ext;
+ u_int8_t lba_mid;
+ u_int8_t lba_high_ext;
+ u_int8_t lba_high;
+ u_int8_t device;
+ u_int8_t command;
+ u_int8_t control;
+};
+
+#define SC_SCSI_1 0x01
+#define SC_SCSI_2 0x03
+
+/*
+ * Opcodes
+ */
+
+#define TEST_UNIT_READY 0x00
+#define REQUEST_SENSE 0x03
+#define READ_6 0x08
+#define WRITE_6 0x0A
+#define INQUIRY 0x12
+#define MODE_SELECT_6 0x15
+#define MODE_SENSE_6 0x1A
+#define START_STOP_UNIT 0x1B
+#define START_STOP 0x1B
+#define RESERVE 0x16
+#define RELEASE 0x17
+#define RECEIVE_DIAGNOSTIC 0x1C
+#define SEND_DIAGNOSTIC 0x1D
+#define PREVENT_ALLOW 0x1E
+#define READ_CAPACITY 0x25
+#define READ_10 0x28
+#define WRITE_10 0x2A
+#define POSITION_TO_ELEMENT 0x2B
+#define SYNCHRONIZE_CACHE 0x35
+#define READ_DEFECT_DATA_10 0x37
+#define WRITE_BUFFER 0x3B
+#define READ_BUFFER 0x3C
+#define CHANGE_DEFINITION 0x40
+#define LOG_SELECT 0x4C
+#define LOG_SENSE 0x4D
+#define MODE_SELECT_10 0x55
+#define MODE_SENSE_10 0x5A
+#define ATA_PASS_16 0x85
+#define READ_16 0x88
+#define WRITE_16 0x8A
+#define SERVICE_ACTION_IN 0x9E
+#define REPORT_LUNS 0xA0
+#define ATA_PASS_12 0xA1
+#define MAINTENANCE_IN 0xA3
+#define MAINTENANCE_OUT 0xA4
+#define MOVE_MEDIUM 0xA5
+#define READ_12 0xA8
+#define WRITE_12 0xAA
+#define READ_ELEMENT_STATUS 0xB8
+
+/* Maintenance In Service Action Codes */
+#define REPORT_IDENTIFYING_INFRMATION 0x05
+#define REPORT_TARGET_PORT_GROUPS 0x0A
+#define REPORT_ALIASES 0x0B
+#define REPORT_SUPPORTED_OPERATION_CODES 0x0C
+#define REPORT_SUPPORTED_TASK_MANAGEMENT_FUNCTIONS 0x0D
+#define REPORT_PRIORITY 0x0E
+#define REPORT_TIMESTAMP 0x0F
+#define MANAGEMENT_PROTOCOL_IN 0x10
+/* Maintenance Out Service Action Codes */
+#define SET_IDENTIFY_INFORMATION 0x06
+#define SET_TARGET_PORT_GROUPS 0x0A
+#define CHANGE_ALIASES 0x0B
+#define SET_PRIORITY 0x0E
+#define SET_TIMESTAMP 0x0F
+#define MANGAEMENT_PROTOCOL_OUT 0x10
+
+/*
+ * Device Types
+ */
+#define T_DIRECT 0x00
+#define T_SEQUENTIAL 0x01
+#define T_PRINTER 0x02
+#define T_PROCESSOR 0x03
+#define T_WORM 0x04
+#define T_CDROM 0x05
+#define T_SCANNER 0x06
+#define T_OPTICAL 0x07
+#define T_CHANGER 0x08
+#define T_COMM 0x09
+#define T_ASC0 0x0a
+#define T_ASC1 0x0b
+#define T_STORARRAY 0x0c
+#define T_ENCLOSURE 0x0d
+#define T_RBC 0x0e
+#define T_OCRW 0x0f
+#define T_OSD 0x11
+#define T_ADC 0x12
+#define T_NODEVICE 0x1f
+#define T_ANY 0xff /* Used in Quirk table matches */
+
+#define T_REMOV 1
+#define T_FIXED 0
+
+/*
+ * This length is the initial inquiry length used by the probe code, as
+ * well as the legnth necessary for scsi_print_inquiry() to function
+ * correctly. If either use requires a different length in the future,
+ * the two values should be de-coupled.
+ */
+#define SHORT_INQUIRY_LENGTH 36
+
+struct scsi_inquiry_data
+{
+ u_int8_t device;
+#define SID_TYPE(inq_data) ((inq_data)->device & 0x1f)
+#define SID_QUAL(inq_data) (((inq_data)->device & 0xE0) >> 5)
+#define SID_QUAL_LU_CONNECTED 0x00 /*
+ * The specified peripheral device
+ * type is currently connected to
+ * logical unit. If the target cannot
+ * determine whether or not a physical
+ * device is currently connected, it
+ * shall also use this peripheral
+ * qualifier when returning the INQUIRY
+ * data. This peripheral qualifier
+ * does not mean that the device is
+ * ready for access by the initiator.
+ */
+#define SID_QUAL_LU_OFFLINE 0x01 /*
+ * The target is capable of supporting
+ * the specified peripheral device type
+ * on this logical unit; however, the
+ * physical device is not currently
+ * connected to this logical unit.
+ */
+#define SID_QUAL_RSVD 0x02
+#define SID_QUAL_BAD_LU 0x03 /*
+ * The target is not capable of
+ * supporting a physical device on
+ * this logical unit. For this
+ * peripheral qualifier the peripheral
+ * device type shall be set to 1Fh to
+ * provide compatibility with previous
+ * versions of SCSI. All other
+ * peripheral device type values are
+ * reserved for this peripheral
+ * qualifier.
+ */
+#define SID_QUAL_IS_VENDOR_UNIQUE(inq_data) ((SID_QUAL(inq_data) & 0x08) != 0)
+ u_int8_t dev_qual2;
+#define SID_QUAL2 0x7F
+#define SID_IS_REMOVABLE(inq_data) (((inq_data)->dev_qual2 & 0x80) != 0)
+ u_int8_t version;
+#define SID_ANSI_REV(inq_data) ((inq_data)->version & 0x07)
+#define SCSI_REV_0 0
+#define SCSI_REV_CCS 1
+#define SCSI_REV_2 2
+#define SCSI_REV_SPC 3
+#define SCSI_REV_SPC2 4
+#define SCSI_REV_SPC3 5
+#define SCSI_REV_SPC4 6
+
+#define SID_ECMA 0x38
+#define SID_ISO 0xC0
+ u_int8_t response_format;
+#define SID_AENC 0x80
+#define SID_TrmIOP 0x40
+ u_int8_t additional_length;
+#define SID_ADDITIONAL_LENGTH(iqd) \
+ ((iqd)->additional_length + \
+ offsetof(struct scsi_inquiry_data, additional_length) + 1)
+ u_int8_t spc3_flags;
+#define SPC3_SID_PROTECT 0x01
+#define SPC3_SID_3PC 0x08
+#define SPC3_SID_TPGS_MASK 0x30
+#define SPC3_SID_TPGS_IMPLICIT 0x10
+#define SPC3_SID_TPGS_EXPLICIT 0x20
+#define SPC3_SID_ACC 0x40
+#define SPC3_SID_SCCS 0x80
+ u_int8_t spc2_flags;
+#define SPC2_SID_MChngr 0x08
+#define SPC2_SID_MultiP 0x10
+#define SPC2_SID_EncServ 0x40
+#define SPC2_SID_BQueue 0x80
+
+#define INQ_DATA_TQ_ENABLED(iqd) \
+ ((SID_ANSI_REV(iqd) < SCSI_REV_SPC2)? ((iqd)->flags & SID_CmdQue) : \
+ (((iqd)->flags & SID_CmdQue) && !((iqd)->spc2_flags & SPC2_SID_BQueue)) || \
+ (!((iqd)->flags & SID_CmdQue) && ((iqd)->spc2_flags & SPC2_SID_BQueue)))
+
+ u_int8_t flags;
+#define SID_SftRe 0x01
+#define SID_CmdQue 0x02
+#define SID_Linked 0x08
+#define SID_Sync 0x10
+#define SID_WBus16 0x20
+#define SID_WBus32 0x40
+#define SID_RelAdr 0x80
+#define SID_VENDOR_SIZE 8
+ char vendor[SID_VENDOR_SIZE];
+#define SID_PRODUCT_SIZE 16
+ char product[SID_PRODUCT_SIZE];
+#define SID_REVISION_SIZE 4
+ char revision[SID_REVISION_SIZE];
+ /*
+ * The following fields were taken from SCSI Primary Commands - 2
+ * (SPC-2) Revision 14, Dated 11 November 1999
+ */
+#define SID_VENDOR_SPECIFIC_0_SIZE 20
+ u_int8_t vendor_specific0[SID_VENDOR_SPECIFIC_0_SIZE];
+ /*
+ * An extension of SCSI Parallel Specific Values
+ */
+#define SID_SPI_IUS 0x01
+#define SID_SPI_QAS 0x02
+#define SID_SPI_CLOCK_ST 0x00
+#define SID_SPI_CLOCK_DT 0x04
+#define SID_SPI_CLOCK_DT_ST 0x0C
+#define SID_SPI_MASK 0x0F
+ u_int8_t spi3data;
+ u_int8_t reserved2;
+ /*
+ * Version Descriptors, stored 2 byte values.
+ */
+ u_int8_t version1[2];
+ u_int8_t version2[2];
+ u_int8_t version3[2];
+ u_int8_t version4[2];
+ u_int8_t version5[2];
+ u_int8_t version6[2];
+ u_int8_t version7[2];
+ u_int8_t version8[2];
+
+ u_int8_t reserved3[22];
+
+#define SID_VENDOR_SPECIFIC_1_SIZE 160
+ u_int8_t vendor_specific1[SID_VENDOR_SPECIFIC_1_SIZE];
+};
+
+struct scsi_vpd_supported_page_list
+{
+ u_int8_t device;
+ u_int8_t page_code;
+#define SVPD_SUPPORTED_PAGE_LIST 0x00
+ u_int8_t reserved;
+ u_int8_t length; /* number of VPD entries */
+#define SVPD_SUPPORTED_PAGES_SIZE 251
+ u_int8_t list[SVPD_SUPPORTED_PAGES_SIZE];
+};
+
+struct scsi_vpd_unit_serial_number
+{
+ u_int8_t device;
+ u_int8_t page_code;
+#define SVPD_UNIT_SERIAL_NUMBER 0x80
+ u_int8_t reserved;
+ u_int8_t length; /* serial number length */
+#define SVPD_SERIAL_NUM_SIZE 251
+ u_int8_t serial_num[SVPD_SERIAL_NUM_SIZE];
+};
+
+struct scsi_read_capacity
+{
+ u_int8_t opcode;
+ u_int8_t byte2;
+ u_int8_t addr[4];
+ u_int8_t unused[3];
+ u_int8_t control;
+};
+
+struct scsi_read_capacity_16
+{
+ uint8_t opcode;
+#define SRC16_SERVICE_ACTION 0x10
+ uint8_t service_action;
+ uint8_t addr[8];
+ uint8_t alloc_len[4];
+#define SRC16_PMI 0x01
+#define SRC16_RELADR 0x02
+ uint8_t reladr;
+ uint8_t control;
+};
+
+struct scsi_read_capacity_data
+{
+ u_int8_t addr[4];
+ u_int8_t length[4];
+};
+
+struct scsi_read_capacity_data_long
+{
+ uint8_t addr[8];
+ uint8_t length[4];
+};
+
+struct scsi_report_luns
+{
+ uint8_t opcode;
+ uint8_t reserved1;
+#define RPL_REPORT_DEFAULT 0x00
+#define RPL_REPORT_WELLKNOWN 0x01
+#define RPL_REPORT_ALL 0x02
+ uint8_t select_report;
+ uint8_t reserved2[3];
+ uint8_t length[4];
+ uint8_t reserved3;
+ uint8_t control;
+};
+
+struct scsi_report_luns_data {
+ u_int8_t length[4]; /* length of LUN inventory, in bytes */
+ u_int8_t reserved[4]; /* unused */
+ /*
+ * LUN inventory- we only support the type zero form for now.
+ */
+ struct {
+ u_int8_t lundata[8];
+ } luns[0];
+};
+#define RPL_LUNDATA_PERIPH_BUS_MASK 0x3f
+#define RPL_LUNDATA_FLAT_LUN_MASK 0x3f
+#define RPL_LUNDATA_LUN_TARG_MASK 0x3f
+#define RPL_LUNDATA_LUN_BUS_MASK 0xe0
+#define RPL_LUNDATA_LUN_LUN_MASK 0x1f
+#define RPL_LUNDATA_EXT_LEN_MASK 0x30
+#define RPL_LUNDATA_EXT_EAM_MASK 0x0f
+#define RPL_LUNDATA_EXT_EAM_WK 0x01
+#define RPL_LUNDATA_EXT_EAM_NOT_SPEC 0x0f
+#define RPL_LUNDATA_ATYP_MASK 0xc0 /* MBZ for type 0 lun */
+#define RPL_LUNDATA_ATYP_PERIPH 0x00
+#define RPL_LUNDATA_ATYP_FLAT 0x40
+#define RPL_LUNDATA_ATYP_LUN 0x80
+#define RPL_LUNDATA_ATYP_EXTLUN 0xc0
+
+struct scsi_target_group
+{
+ uint8_t opcode;
+ uint8_t service_action;
+#define STG_PDF_LENGTH 0x00
+#define RPL_PDF_EXTENDED 0x20
+ uint8_t reserved1[4];
+ uint8_t length[4];
+ uint8_t reserved2;
+ uint8_t control;
+};
+
+struct scsi_target_port_descriptor {
+ uint8_t reserved[2];
+ uint8_t relative_target_port_identifier[2];
+ uint8_t desc_list[];
+};
+
+struct scsi_target_port_group_descriptor {
+ uint8_t pref_state;
+#define TPG_PRIMARY 0x80
+#define TPG_ASYMMETRIC_ACCESS_STATE_MASK 0xf
+#define TPG_ASYMMETRIC_ACCESS_OPTIMIZED 0x0
+#define TPG_ASYMMETRIC_ACCESS_NONOPTIMIZED 0x1
+#define TPG_ASYMMETRIC_ACCESS_STANDBY 0x2
+#define TPG_ASYMMETRIC_ACCESS_UNAVAILABLE 0x3
+#define TPG_ASYMMETRIC_ACCESS_LBA_DEPENDENT 0x4
+#define TPG_ASYMMETRIC_ACCESS_OFFLINE 0xE
+#define TPG_ASYMMETRIC_ACCESS_TRANSITIONING 0xF
+ uint8_t support;
+#define TPG_AO_SUP 0x01
+#define TPG_AN_SUP 0x02
+#define TPG_S_SUP 0x04
+#define TPG_U_SUP 0x08
+#define TPG_LBD_SUP 0x10
+#define TPG_O_SUP 0x40
+#define TPG_T_SUP 0x80
+ uint8_t target_port_group[2];
+ uint8_t reserved;
+ uint8_t status;
+ uint8_t vendor_specific;
+ uint8_t target_port_count;
+ struct scsi_target_port_descriptor descriptors[];
+};
+
+struct scsi_target_group_data {
+ uint8_t length[4]; /* length of returned data, in bytes */
+ struct scsi_target_port_group_descriptor groups[];
+};
+
+struct scsi_target_group_data_extended {
+ uint8_t length[4]; /* length of returned data, in bytes */
+ uint8_t format_type; /* STG_PDF_LENGTH or RPL_PDF_EXTENDED */
+ uint8_t implicit_transition_time;
+ uint8_t reserved[2];
+ struct scsi_target_port_group_descriptor groups[];
+};
+
+
+struct scsi_sense_data
+{
+ u_int8_t error_code;
+#define SSD_ERRCODE 0x7F
+#define SSD_CURRENT_ERROR 0x70
+#define SSD_DEFERRED_ERROR 0x71
+#define SSD_ERRCODE_VALID 0x80
+ u_int8_t segment;
+ u_int8_t flags;
+#define SSD_KEY 0x0F
+#define SSD_KEY_NO_SENSE 0x00
+#define SSD_KEY_RECOVERED_ERROR 0x01
+#define SSD_KEY_NOT_READY 0x02
+#define SSD_KEY_MEDIUM_ERROR 0x03
+#define SSD_KEY_HARDWARE_ERROR 0x04
+#define SSD_KEY_ILLEGAL_REQUEST 0x05
+#define SSD_KEY_UNIT_ATTENTION 0x06
+#define SSD_KEY_DATA_PROTECT 0x07
+#define SSD_KEY_BLANK_CHECK 0x08
+#define SSD_KEY_Vendor_Specific 0x09
+#define SSD_KEY_COPY_ABORTED 0x0a
+#define SSD_KEY_ABORTED_COMMAND 0x0b
+#define SSD_KEY_EQUAL 0x0c
+#define SSD_KEY_VOLUME_OVERFLOW 0x0d
+#define SSD_KEY_MISCOMPARE 0x0e
+#define SSD_KEY_RESERVED 0x0f
+#define SSD_ILI 0x20
+#define SSD_EOM 0x40
+#define SSD_FILEMARK 0x80
+ u_int8_t info[4];
+ u_int8_t extra_len;
+ u_int8_t cmd_spec_info[4];
+ u_int8_t add_sense_code;
+ u_int8_t add_sense_code_qual;
+ u_int8_t fru;
+ u_int8_t sense_key_spec[3];
+#define SSD_SCS_VALID 0x80
+#define SSD_FIELDPTR_CMD 0x40
+#define SSD_BITPTR_VALID 0x08
+#define SSD_BITPTR_VALUE 0x07
+#define SSD_MIN_SIZE 18
+ u_int8_t extra_bytes[14];
+#define SSD_FULL_SIZE sizeof(struct scsi_sense_data)
+};
+
+struct scsi_mode_header_6
+{
+ u_int8_t data_length; /* Sense data length */
+ u_int8_t medium_type;
+ u_int8_t dev_spec;
+ u_int8_t blk_desc_len;
+};
+
+struct scsi_mode_header_10
+{
+ u_int8_t data_length[2];/* Sense data length */
+ u_int8_t medium_type;
+ u_int8_t dev_spec;
+ u_int8_t unused[2];
+ u_int8_t blk_desc_len[2];
+};
+
+struct scsi_mode_page_header
+{
+ u_int8_t page_code;
+ u_int8_t page_length;
+};
+
+struct scsi_mode_blk_desc
+{
+ u_int8_t density;
+ u_int8_t nblocks[3];
+ u_int8_t reserved;
+ u_int8_t blklen[3];
+};
+
+#define SCSI_DEFAULT_DENSITY 0x00 /* use 'default' density */
+#define SCSI_SAME_DENSITY 0x7f /* use 'same' density- >= SCSI-2 only */
+
+
+/*
+ * Status Byte
+ */
+#define SCSI_STATUS_OK 0x00
+#define SCSI_STATUS_CHECK_COND 0x02
+#define SCSI_STATUS_COND_MET 0x04
+#define SCSI_STATUS_BUSY 0x08
+#define SCSI_STATUS_INTERMED 0x10
+#define SCSI_STATUS_INTERMED_COND_MET 0x14
+#define SCSI_STATUS_RESERV_CONFLICT 0x18
+#define SCSI_STATUS_CMD_TERMINATED 0x22 /* Obsolete in SAM-2 */
+#define SCSI_STATUS_QUEUE_FULL 0x28
+#define SCSI_STATUS_ACA_ACTIVE 0x30
+#define SCSI_STATUS_TASK_ABORTED 0x40
+
+struct scsi_inquiry_pattern {
+ u_int8_t type;
+ u_int8_t media_type;
+#define SIP_MEDIA_REMOVABLE 0x01
+#define SIP_MEDIA_FIXED 0x02
+ const char *vendor;
+ const char *product;
+ const char *revision;
+};
+
+struct scsi_static_inquiry_pattern {
+ u_int8_t type;
+ u_int8_t media_type;
+ char vendor[SID_VENDOR_SIZE+1];
+ char product[SID_PRODUCT_SIZE+1];
+ char revision[SID_REVISION_SIZE+1];
+};
+
+struct scsi_sense_quirk_entry {
+ struct scsi_inquiry_pattern inq_pat;
+ int num_sense_keys;
+ int num_ascs;
+ struct sense_key_table_entry *sense_key_info;
+ struct asc_table_entry *asc_info;
+};
+
+struct sense_key_table_entry {
+ u_int8_t sense_key;
+ u_int32_t action;
+ const char *desc;
+};
+
+struct asc_table_entry {
+ u_int8_t asc;
+ u_int8_t ascq;
+ u_int32_t action;
+ const char *desc;
+};
+
+struct op_table_entry {
+ u_int8_t opcode;
+ u_int32_t opmask;
+ const char *desc;
+};
+
+struct scsi_op_quirk_entry {
+ struct scsi_inquiry_pattern inq_pat;
+ int num_ops;
+ struct op_table_entry *op_table;
+};
+
+typedef enum {
+ SSS_FLAG_NONE = 0x00,
+ SSS_FLAG_PRINT_COMMAND = 0x01
+} scsi_sense_string_flags;
+
+struct ccb_scsiio;
+struct cam_periph;
+union ccb;
+#ifndef _KERNEL
+struct cam_device;
+#endif
+
+extern const char *scsi_sense_key_text[];
+
+struct sbuf;
+
+__BEGIN_DECLS
+void scsi_sense_desc(int sense_key, int asc, int ascq,
+ struct scsi_inquiry_data *inq_data,
+ const char **sense_key_desc, const char **asc_desc);
+scsi_sense_action scsi_error_action(struct ccb_scsiio* csio,
+ struct scsi_inquiry_data *inq_data,
+ u_int32_t sense_flags);
+const char * scsi_status_string(struct ccb_scsiio *csio);
+#ifdef _KERNEL
+int scsi_command_string(struct ccb_scsiio *csio, struct sbuf *sb);
+int scsi_sense_sbuf(struct ccb_scsiio *csio, struct sbuf *sb,
+ scsi_sense_string_flags flags);
+char * scsi_sense_string(struct ccb_scsiio *csio,
+ char *str, int str_len);
+void scsi_sense_print(struct ccb_scsiio *csio);
+int scsi_interpret_sense(union ccb *ccb,
+ u_int32_t sense_flags,
+ u_int32_t *relsim_flags,
+ u_int32_t *reduction,
+ u_int32_t *timeout,
+ scsi_sense_action error_action);
+#else /* _KERNEL */
+int scsi_command_string(struct cam_device *device,
+ struct ccb_scsiio *csio, struct sbuf *sb);
+int scsi_sense_sbuf(struct cam_device *device,
+ struct ccb_scsiio *csio, struct sbuf *sb,
+ scsi_sense_string_flags flags);
+char * scsi_sense_string(struct cam_device *device,
+ struct ccb_scsiio *csio,
+ char *str, int str_len);
+void scsi_sense_print(struct cam_device *device,
+ struct ccb_scsiio *csio, FILE *ofile);
+int scsi_interpret_sense(struct cam_device *device,
+ union ccb *ccb,
+ u_int32_t sense_flags,
+ u_int32_t *relsim_flags,
+ u_int32_t *reduction,
+ u_int32_t *timeout,
+ scsi_sense_action error_action);
+#endif /* _KERNEL */
+
+#define SF_RETRY_UA 0x01
+#define SF_NO_PRINT 0x02
+#define SF_QUIET_IR 0x04 /* Be quiet about Illegal Request reponses */
+#define SF_PRINT_ALWAYS 0x08
+
+
+const char * scsi_op_desc(u_int16_t opcode,
+ struct scsi_inquiry_data *inq_data);
+char * scsi_cdb_string(u_int8_t *cdb_ptr, char *cdb_string,
+ size_t len);
+
+void scsi_print_inquiry(struct scsi_inquiry_data *inq_data);
+
+u_int scsi_calc_syncsrate(u_int period_factor);
+u_int scsi_calc_syncparam(u_int period);
+
+void scsi_test_unit_ready(struct ccb_scsiio *csio, u_int32_t retries,
+ void (*cbfcnp)(struct cam_periph *,
+ union ccb *),
+ u_int8_t tag_action,
+ u_int8_t sense_len, u_int32_t timeout);
+
+void scsi_request_sense(struct ccb_scsiio *csio, u_int32_t retries,
+ void (*cbfcnp)(struct cam_periph *,
+ union ccb *),
+ void *data_ptr, u_int8_t dxfer_len,
+ u_int8_t tag_action, u_int8_t sense_len,
+ u_int32_t timeout);
+
+void scsi_inquiry(struct ccb_scsiio *csio, u_int32_t retries,
+ void (*cbfcnp)(struct cam_periph *, union ccb *),
+ u_int8_t tag_action, u_int8_t *inq_buf,
+ u_int32_t inq_len, int evpd, u_int8_t page_code,
+ u_int8_t sense_len, u_int32_t timeout);
+
+void scsi_mode_sense(struct ccb_scsiio *csio, u_int32_t retries,
+ void (*cbfcnp)(struct cam_periph *,
+ union ccb *),
+ u_int8_t tag_action, int dbd,
+ u_int8_t page_code, u_int8_t page,
+ u_int8_t *param_buf, u_int32_t param_len,
+ u_int8_t sense_len, u_int32_t timeout);
+
+void scsi_mode_sense_len(struct ccb_scsiio *csio, u_int32_t retries,
+ void (*cbfcnp)(struct cam_periph *,
+ union ccb *),
+ u_int8_t tag_action, int dbd,
+ u_int8_t page_code, u_int8_t page,
+ u_int8_t *param_buf, u_int32_t param_len,
+ int minimum_cmd_size, u_int8_t sense_len,
+ u_int32_t timeout);
+
+void scsi_mode_select(struct ccb_scsiio *csio, u_int32_t retries,
+ void (*cbfcnp)(struct cam_periph *,
+ union ccb *),
+ u_int8_t tag_action, int scsi_page_fmt,
+ int save_pages, u_int8_t *param_buf,
+ u_int32_t param_len, u_int8_t sense_len,
+ u_int32_t timeout);
+
+void scsi_mode_select_len(struct ccb_scsiio *csio, u_int32_t retries,
+ void (*cbfcnp)(struct cam_periph *,
+ union ccb *),
+ u_int8_t tag_action, int scsi_page_fmt,
+ int save_pages, u_int8_t *param_buf,
+ u_int32_t param_len, int minimum_cmd_size,
+ u_int8_t sense_len, u_int32_t timeout);
+
+void scsi_log_sense(struct ccb_scsiio *csio, u_int32_t retries,
+ void (*cbfcnp)(struct cam_periph *, union ccb *),
+ u_int8_t tag_action, u_int8_t page_code,
+ u_int8_t page, int save_pages, int ppc,
+ u_int32_t paramptr, u_int8_t *param_buf,
+ u_int32_t param_len, u_int8_t sense_len,
+ u_int32_t timeout);
+
+void scsi_log_select(struct ccb_scsiio *csio, u_int32_t retries,
+ void (*cbfcnp)(struct cam_periph *,
+ union ccb *), u_int8_t tag_action,
+ u_int8_t page_code, int save_pages,
+ int pc_reset, u_int8_t *param_buf,
+ u_int32_t param_len, u_int8_t sense_len,
+ u_int32_t timeout);
+
+void scsi_prevent(struct ccb_scsiio *csio, u_int32_t retries,
+ void (*cbfcnp)(struct cam_periph *, union ccb *),
+ u_int8_t tag_action, u_int8_t action,
+ u_int8_t sense_len, u_int32_t timeout);
+
+void scsi_read_capacity(struct ccb_scsiio *csio, u_int32_t retries,
+ void (*cbfcnp)(struct cam_periph *,
+ union ccb *), u_int8_t tag_action,
+ struct scsi_read_capacity_data *,
+ u_int8_t sense_len, u_int32_t timeout);
+void scsi_read_capacity_16(struct ccb_scsiio *csio, uint32_t retries,
+ void (*cbfcnp)(struct cam_periph *,
+ union ccb *), uint8_t tag_action,
+ uint64_t lba, int reladr, int pmi,
+ struct scsi_read_capacity_data_long
+ *rcap_buf, uint8_t sense_len,
+ uint32_t timeout);
+
+void scsi_report_luns(struct ccb_scsiio *csio, u_int32_t retries,
+ void (*cbfcnp)(struct cam_periph *,
+ union ccb *), u_int8_t tag_action,
+ u_int8_t select_report,
+ struct scsi_report_luns_data *rpl_buf,
+ u_int32_t alloc_len, u_int8_t sense_len,
+ u_int32_t timeout);
+
+void scsi_report_target_group(struct ccb_scsiio *csio, u_int32_t retries,
+ void (*cbfcnp)(struct cam_periph *,
+ union ccb *), u_int8_t tag_action,
+ u_int8_t pdf,
+ void *buf,
+ u_int32_t alloc_len, u_int8_t sense_len,
+ u_int32_t timeout);
+
+void scsi_set_target_group(struct ccb_scsiio *csio, u_int32_t retries,
+ void (*cbfcnp)(struct cam_periph *,
+ union ccb *), u_int8_t tag_action, void *buf,
+ u_int32_t alloc_len, u_int8_t sense_len,
+ u_int32_t timeout);
+
+void scsi_synchronize_cache(struct ccb_scsiio *csio,
+ u_int32_t retries,
+ void (*cbfcnp)(struct cam_periph *,
+ union ccb *), u_int8_t tag_action,
+ u_int32_t begin_lba, u_int16_t lb_count,
+ u_int8_t sense_len, u_int32_t timeout);
+
+void scsi_read_write(struct ccb_scsiio *csio, u_int32_t retries,
+ void (*cbfcnp)(struct cam_periph *, union ccb *),
+ u_int8_t tag_action, int readop, u_int8_t byte2,
+ int minimum_cmd_size, u_int64_t lba,
+ u_int32_t block_count, u_int8_t *data_ptr,
+ u_int32_t dxfer_len, u_int8_t sense_len,
+ u_int32_t timeout);
+
+void scsi_start_stop(struct ccb_scsiio *csio, u_int32_t retries,
+ void (*cbfcnp)(struct cam_periph *, union ccb *),
+ u_int8_t tag_action, int start, int load_eject,
+ int immediate, u_int8_t sense_len, u_int32_t timeout);
+
+int scsi_inquiry_match(caddr_t inqbuffer, caddr_t table_entry);
+int scsi_static_inquiry_match(caddr_t inqbuffer,
+ caddr_t table_entry);
+
+static __inline void scsi_extract_sense(struct scsi_sense_data *sense,
+ int *error_code, int *sense_key,
+ int *asc, int *ascq);
+static __inline void scsi_ulto2b(u_int32_t val, u_int8_t *bytes);
+static __inline void scsi_ulto3b(u_int32_t val, u_int8_t *bytes);
+static __inline void scsi_ulto4b(u_int32_t val, u_int8_t *bytes);
+static __inline void scsi_u64to8b(u_int64_t val, u_int8_t *bytes);
+static __inline u_int32_t scsi_2btoul(u_int8_t *bytes);
+static __inline u_int32_t scsi_3btoul(u_int8_t *bytes);
+static __inline int32_t scsi_3btol(u_int8_t *bytes);
+static __inline u_int32_t scsi_4btoul(u_int8_t *bytes);
+static __inline u_int64_t scsi_8btou64(u_int8_t *bytes);
+static __inline void *find_mode_page_6(struct scsi_mode_header_6 *mode_header);
+static __inline void *find_mode_page_10(struct scsi_mode_header_10 *mode_header);
+
+static __inline void scsi_extract_sense(struct scsi_sense_data *sense,
+ int *error_code, int *sense_key,
+ int *asc, int *ascq)
+{
+ *error_code = sense->error_code & SSD_ERRCODE;
+ *sense_key = sense->flags & SSD_KEY;
+ *asc = (sense->extra_len >= 5) ? sense->add_sense_code : 0;
+ *ascq = (sense->extra_len >= 6) ? sense->add_sense_code_qual : 0;
+}
+
+static __inline void
+scsi_ulto2b(u_int32_t val, u_int8_t *bytes)
+{
+
+ bytes[0] = (val >> 8) & 0xff;
+ bytes[1] = val & 0xff;
+}
+
+static __inline void
+scsi_ulto3b(u_int32_t val, u_int8_t *bytes)
+{
+
+ bytes[0] = (val >> 16) & 0xff;
+ bytes[1] = (val >> 8) & 0xff;
+ bytes[2] = val & 0xff;
+}
+
+static __inline void
+scsi_ulto4b(u_int32_t val, u_int8_t *bytes)
+{
+
+ bytes[0] = (val >> 24) & 0xff;
+ bytes[1] = (val >> 16) & 0xff;
+ bytes[2] = (val >> 8) & 0xff;
+ bytes[3] = val & 0xff;
+}
+
+static __inline void
+scsi_u64to8b(u_int64_t val, u_int8_t *bytes)
+{
+
+ bytes[0] = (val >> 56) & 0xff;
+ bytes[1] = (val >> 48) & 0xff;
+ bytes[2] = (val >> 40) & 0xff;
+ bytes[3] = (val >> 32) & 0xff;
+ bytes[4] = (val >> 24) & 0xff;
+ bytes[5] = (val >> 16) & 0xff;
+ bytes[6] = (val >> 8) & 0xff;
+ bytes[7] = val & 0xff;
+}
+
+static __inline u_int32_t
+scsi_2btoul(u_int8_t *bytes)
+{
+ u_int32_t rv;
+
+ rv = (bytes[0] << 8) |
+ bytes[1];
+ return (rv);
+}
+
+static __inline u_int32_t
+scsi_3btoul(u_int8_t *bytes)
+{
+ u_int32_t rv;
+
+ rv = (bytes[0] << 16) |
+ (bytes[1] << 8) |
+ bytes[2];
+ return (rv);
+}
+
+static __inline int32_t
+scsi_3btol(u_int8_t *bytes)
+{
+ u_int32_t rc = scsi_3btoul(bytes);
+
+ if (rc & 0x00800000)
+ rc |= 0xff000000;
+
+ return (int32_t) rc;
+}
+
+static __inline u_int32_t
+scsi_4btoul(u_int8_t *bytes)
+{
+ u_int32_t rv;
+
+ rv = (bytes[0] << 24) |
+ (bytes[1] << 16) |
+ (bytes[2] << 8) |
+ bytes[3];
+ return (rv);
+}
+
+static __inline uint64_t
+scsi_8btou64(uint8_t *bytes)
+{
+ uint64_t rv;
+
+ rv = (((uint64_t)bytes[0]) << 56) |
+ (((uint64_t)bytes[1]) << 48) |
+ (((uint64_t)bytes[2]) << 40) |
+ (((uint64_t)bytes[3]) << 32) |
+ (((uint64_t)bytes[4]) << 24) |
+ (((uint64_t)bytes[5]) << 16) |
+ (((uint64_t)bytes[6]) << 8) |
+ bytes[7];
+ return (rv);
+}
+
+/*
+ * Given the pointer to a returned mode sense buffer, return a pointer to
+ * the start of the first mode page.
+ */
+static __inline void *
+find_mode_page_6(struct scsi_mode_header_6 *mode_header)
+{
+ void *page_start;
+
+ page_start = (void *)((u_int8_t *)&mode_header[1] +
+ mode_header->blk_desc_len);
+
+ return(page_start);
+}
+
+static __inline void *
+find_mode_page_10(struct scsi_mode_header_10 *mode_header)
+{
+ void *page_start;
+
+ page_start = (void *)((u_int8_t *)&mode_header[1] +
+ scsi_2btoul(mode_header->blk_desc_len));
+
+ return(page_start);
+}
+
+__END_DECLS
+
+#endif /*_SCSI_SCSI_ALL_H*/
diff --git a/rtems/freebsd/cam/scsi/scsi_da.h b/rtems/freebsd/cam/scsi/scsi_da.h
new file mode 100644
index 00000000..4c6019f2
--- /dev/null
+++ b/rtems/freebsd/cam/scsi/scsi_da.h
@@ -0,0 +1,463 @@
+/*
+ * Structures and definitions for SCSI commands to Direct Access Devices
+ */
+
+/*-
+ * Some lines of this file come from a file of the name "scsi.h"
+ * distributed by OSF as part of mach2.5,
+ * so the following disclaimer has been kept.
+ *
+ * Copyright 1990 by Open Software Foundation,
+ * Grenoble, FRANCE
+ *
+ * All Rights Reserved
+ *
+ * Permission to use, copy, modify, and distribute this software and
+ * its documentation for any purpose and without fee is hereby granted,
+ * provided that the above copyright notice appears in all copies and
+ * that both the copyright notice and this permission notice appear in
+ * supporting documentation, and that the name of OSF or Open Software
+ * Foundation not be used in advertising or publicity pertaining to
+ * distribution of the software without specific, written prior
+ * permission.
+ *
+ * OSF DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE
+ * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS,
+ * IN NO EVENT SHALL OSF BE LIABLE FOR ANY SPECIAL, INDIRECT, OR
+ * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+ * LOSS OF USE, DATA OR PROFITS, WHETHER IN ACTION OF CONTRACT,
+ * NEGLIGENCE, OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
+ * WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/*-
+ * Largely written by Julian Elischer (julian@tfs.com)
+ * for TRW Financial Systems.
+ *
+ * TRW Financial Systems, in accordance with their agreement with Carnegie
+ * Mellon University, makes this software available to CMU to distribute
+ * or use in any manner that they see fit as long as this message is kept with
+ * the software. For this reason TFS also grants any other persons or
+ * organisations permission to use or modify this software.
+ *
+ * TFS supplies this software to be publicly redistributed
+ * on the understanding that TFS is not responsible for the correct
+ * functioning of this software in any circumstances.
+ *
+ * Ported to run under 386BSD by Julian Elischer (julian@tfs.com) Sept 1992
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _SCSI_SCSI_DA_H
+#define _SCSI_SCSI_DA_H 1
+
+#include <rtems/freebsd/sys/cdefs.h>
+
+struct scsi_rezero_unit
+{
+ u_int8_t opcode;
+#define SRZU_LUN_MASK 0xE0
+ u_int8_t byte2;
+ u_int8_t reserved[3];
+ u_int8_t control;
+};
+
+/*
+ * NOTE: The lower three bits of byte2 of the format CDB are the same as
+ * the lower three bits of byte2 of the read defect data CDB, below.
+ */
+struct scsi_format_unit
+{
+ u_int8_t opcode;
+ u_int8_t byte2;
+#define FU_FORMAT_MASK SRDD10_DLIST_FORMAT_MASK
+#define FU_BLOCK_FORMAT SRDD10_BLOCK_FORMAT
+#define FU_BFI_FORMAT SRDD10_BYTES_FROM_INDEX_FORMAT
+#define FU_PHYS_FORMAT SRDD10_PHYSICAL_SECTOR_FORMAT
+#define FU_CMPLST 0x08
+#define FU_FMT_DATA 0x10
+ u_int8_t vendor_specific;
+ u_int8_t interleave[2];
+ u_int8_t control;
+};
+
+struct scsi_reassign_blocks
+{
+ u_int8_t opcode;
+ u_int8_t byte2;
+ u_int8_t unused[3];
+ u_int8_t control;
+};
+
+struct scsi_read_defect_data_10
+{
+ u_int8_t opcode;
+
+ /*
+ * The most significant 3 bits are the LUN, the other 5 are
+ * reserved.
+ */
+#define SRDD10_LUN_MASK 0xE0
+ u_int8_t byte2;
+#define SRDD10_GLIST 0x08
+#define SRDD10_PLIST 0x10
+#define SRDD10_DLIST_FORMAT_MASK 0x07
+#define SRDD10_BLOCK_FORMAT 0x00
+#define SRDD10_BYTES_FROM_INDEX_FORMAT 0x04
+#define SRDD10_PHYSICAL_SECTOR_FORMAT 0x05
+ u_int8_t format;
+
+ u_int8_t reserved[4];
+
+ u_int8_t alloc_length[2];
+
+ u_int8_t control;
+};
+
+struct scsi_read_defect_data_12
+{
+ u_int8_t opcode;
+
+ /*
+ * The most significant 3 bits are the LUN, the other 5 are
+ * reserved.
+ */
+#define SRDD12_LUN_MASK 0xE0
+ u_int8_t byte2;
+
+#define SRDD12_GLIST 0x08
+#define SRDD12_PLIST 0x10
+#define SRDD12_DLIST_FORMAT_MASK 0x07
+#define SRDD12_BLOCK_FORMAT 0x00
+#define SRDD12_BYTES_FROM_INDEX_FORMAT 0x04
+#define SRDD12_PHYSICAL_SECTOR_FORMAT 0x05
+ u_int8_t format;
+
+ u_int8_t reserved[4];
+
+ u_int8_t alloc_length[4];
+
+ u_int8_t control;
+
+};
+
+
+/*
+ * Opcodes
+ */
+#define REZERO_UNIT 0x01
+#define FORMAT_UNIT 0x04
+#define REASSIGN_BLOCKS 0x07
+#define MODE_SELECT 0x15
+#define MODE_SENSE 0x1a
+#define READ_FORMAT_CAPACITIES 0x23
+#define WRITE_AND_VERIFY 0x2e
+#define VERIFY 0x2f
+#define READ_DEFECT_DATA_10 0x37
+#define READ_DEFECT_DATA_12 0xb7
+
+struct format_defect_list_header
+{
+ u_int8_t reserved;
+ u_int8_t byte2;
+#define FU_DLH_VS 0x01
+#define FU_DLH_IMMED 0x02
+#define FU_DLH_DSP 0x04
+#define FU_DLH_IP 0x08
+#define FU_DLH_STPF 0x10
+#define FU_DLH_DCRT 0x20
+#define FU_DLH_DPRY 0x40
+#define FU_DLH_FOV 0x80
+ u_int8_t defect_list_length[2];
+};
+
+struct format_ipat_descriptor
+{
+ u_int8_t byte1;
+#define FU_INIT_NO_HDR 0x00
+#define FU_INIT_LBA_MSB 0x40
+#define FU_INIT_LBA_EACH 0x80
+#define FU_INIT_SI 0x20
+ u_int8_t pattern_type;
+#define FU_INIT_PAT_DEFAULT 0x00
+#define FU_INIT_PAT_REPEAT 0x01
+ u_int8_t pat_length[2];
+};
+
+struct scsi_read_format_capacities
+{
+ uint8_t opcode; /* READ_FORMAT_CAPACITIES */
+ uint8_t byte2;
+#define SRFC_LUN_MASK 0xE0
+ uint8_t reserved0[5];
+ uint8_t alloc_length[2];
+ uint8_t reserved1[3];
+};
+
+struct scsi_verify
+{
+ uint8_t opcode; /* VERIFY */
+ uint8_t byte2;
+#define SVFY_LUN_MASK 0xE0
+#define SVFY_RELADR 0x01
+#define SVFY_BYTECHK 0x02
+#define SVFY_DPO 0x10
+ uint8_t addr[4]; /* LBA to begin verification at */
+ uint8_t reserved0[1];
+ uint8_t len[2]; /* number of blocks to verify */
+ uint8_t reserved1[3];
+};
+
+struct scsi_write_and_verify
+{
+ uint8_t opcode; /* WRITE_AND_VERIFY */
+ uint8_t byte2;
+#define SWVY_LUN_MASK 0xE0
+#define SWVY_RELADR 0x01
+#define SWVY_BYTECHK 0x02
+#define SWVY_DPO 0x10
+ uint8_t addr[4]; /* LBA to begin verification at */
+ uint8_t reserved0[1];
+ uint8_t len[2]; /* number of blocks to write and verify */
+ uint8_t reserved1[3];
+};
+
+/*
+ * Replies to READ_FORMAT_CAPACITIES look like this:
+ *
+ * struct format_capacity_list_header
+ * struct format_capacity_descriptor[1..n]
+ *
+ * These are similar, but not totally identical to, the
+ * defect list used to format a rigid disk.
+ *
+ * The appropriate csio_decode() format string looks like this:
+ * "{} *i3 {Len} i1 {Blocks} i4 {} *b6 {Code} b2 {Blocklen} i3"
+ *
+ * If the capacity_list_length is greater than
+ * sizeof(struct format_capacity_descriptor), then there are
+ * additional format capacity descriptors available which
+ * denote which format(s) the drive can handle.
+ *
+ * (Source: USB Mass Storage UFI Specification)
+ */
+
+struct format_capacity_list_header {
+ uint8_t unused[3];
+ uint8_t capacity_list_length;
+};
+
+struct format_capacity_descriptor {
+ uint8_t nblocks[4]; /* total number of LBAs */
+ uint8_t byte4; /* only present in max/cur descriptor */
+#define FCD_CODE_MASK 0x03 /* mask for code field above */
+#define FCD_UNFORMATTED 0x01 /* unformatted media present,
+ * maximum capacity returned */
+#define FCD_FORMATTED 0x02 /* formatted media present,
+ * current capacity returned */
+#define FCD_NOMEDIA 0x03 /* no media present,
+ * maximum device capacity returned */
+ uint8_t block_length[3]; /* length of an LBA in bytes */
+};
+
+struct scsi_reassign_blocks_data
+{
+ u_int8_t reserved[2];
+ u_int8_t length[2];
+ struct {
+ u_int8_t dlbaddr[4]; /* defect logical block address */
+ } defect_descriptor[1];
+};
+
+
+/*
+ * This is the list header for the READ DEFECT DATA(10) command above.
+ * It may be a bit wrong to append the 10 at the end of the data structure,
+ * since it's only 4 bytes but it does tie it to the 10 byte command.
+ */
+struct scsi_read_defect_data_hdr_10
+{
+ u_int8_t reserved;
+#define SRDDH10_GLIST 0x08
+#define SRDDH10_PLIST 0x10
+#define SRDDH10_DLIST_FORMAT_MASK 0x07
+#define SRDDH10_BLOCK_FORMAT 0x00
+#define SRDDH10_BYTES_FROM_INDEX_FORMAT 0x04
+#define SRDDH10_PHYSICAL_SECTOR_FORMAT 0x05
+ u_int8_t format;
+ u_int8_t length[2];
+};
+
+struct scsi_defect_desc_block
+{
+ u_int8_t address[4];
+};
+
+struct scsi_defect_desc_bytes_from_index
+{
+ u_int8_t cylinder[3];
+ u_int8_t head;
+ u_int8_t bytes_from_index[4];
+};
+
+struct scsi_defect_desc_phys_sector
+{
+ u_int8_t cylinder[3];
+ u_int8_t head;
+ u_int8_t sector[4];
+};
+
+struct scsi_read_defect_data_hdr_12
+{
+ u_int8_t reserved;
+#define SRDDH12_GLIST 0x08
+#define SRDDH12_PLIST 0x10
+#define SRDDH12_DLIST_FORMAT_MASK 0x07
+#define SRDDH12_BLOCK_FORMAT 0x00
+#define SRDDH12_BYTES_FROM_INDEX_FORMAT 0x04
+#define SRDDH12_PHYSICAL_SECTOR_FORMAT 0x05
+ u_int8_t format;
+ u_int8_t length[4];
+};
+
+union disk_pages /* this is the structure copied from osf */
+{
+ struct format_device_page {
+ u_int8_t pg_code; /* page code (should be 3) */
+#define SMS_FORMAT_DEVICE_PAGE 0x03 /* only 6 bits valid */
+ u_int8_t pg_length; /* page length (should be 0x16) */
+#define SMS_FORMAT_DEVICE_PLEN 0x16
+ u_int8_t trk_z_1; /* tracks per zone (MSB) */
+ u_int8_t trk_z_0; /* tracks per zone (LSB) */
+ u_int8_t alt_sec_1; /* alternate sectors per zone (MSB) */
+ u_int8_t alt_sec_0; /* alternate sectors per zone (LSB) */
+ u_int8_t alt_trk_z_1; /* alternate tracks per zone (MSB) */
+ u_int8_t alt_trk_z_0; /* alternate tracks per zone (LSB) */
+ u_int8_t alt_trk_v_1; /* alternate tracks per volume (MSB) */
+ u_int8_t alt_trk_v_0; /* alternate tracks per volume (LSB) */
+ u_int8_t ph_sec_t_1; /* physical sectors per track (MSB) */
+ u_int8_t ph_sec_t_0; /* physical sectors per track (LSB) */
+ u_int8_t bytes_s_1; /* bytes per sector (MSB) */
+ u_int8_t bytes_s_0; /* bytes per sector (LSB) */
+ u_int8_t interleave_1; /* interleave (MSB) */
+ u_int8_t interleave_0; /* interleave (LSB) */
+ u_int8_t trk_skew_1; /* track skew factor (MSB) */
+ u_int8_t trk_skew_0; /* track skew factor (LSB) */
+ u_int8_t cyl_skew_1; /* cylinder skew (MSB) */
+ u_int8_t cyl_skew_0; /* cylinder skew (LSB) */
+ u_int8_t flags; /* various */
+#define DISK_FMT_SURF 0x10
+#define DISK_FMT_RMB 0x20
+#define DISK_FMT_HSEC 0x40
+#define DISK_FMT_SSEC 0x80
+ u_int8_t reserved21;
+ u_int8_t reserved22;
+ u_int8_t reserved23;
+ } format_device;
+ struct rigid_geometry_page {
+ u_int8_t pg_code; /* page code (should be 4) */
+#define SMS_RIGID_GEOMETRY_PAGE 0x04
+ u_int8_t pg_length; /* page length (should be 0x16) */
+#define SMS_RIGID_GEOMETRY_PLEN 0x16
+ u_int8_t ncyl_2; /* number of cylinders (MSB) */
+ u_int8_t ncyl_1; /* number of cylinders */
+ u_int8_t ncyl_0; /* number of cylinders (LSB) */
+ u_int8_t nheads; /* number of heads */
+ u_int8_t st_cyl_wp_2; /* starting cyl., write precomp (MSB) */
+ u_int8_t st_cyl_wp_1; /* starting cyl., write precomp */
+ u_int8_t st_cyl_wp_0; /* starting cyl., write precomp (LSB) */
+ u_int8_t st_cyl_rwc_2; /* starting cyl., red. write cur (MSB)*/
+ u_int8_t st_cyl_rwc_1; /* starting cyl., red. write cur */
+ u_int8_t st_cyl_rwc_0; /* starting cyl., red. write cur (LSB)*/
+ u_int8_t driv_step_1; /* drive step rate (MSB) */
+ u_int8_t driv_step_0; /* drive step rate (LSB) */
+ u_int8_t land_zone_2; /* landing zone cylinder (MSB) */
+ u_int8_t land_zone_1; /* landing zone cylinder */
+ u_int8_t land_zone_0; /* landing zone cylinder (LSB) */
+ u_int8_t rpl; /* rotational position locking (2 bits) */
+ u_int8_t rot_offset; /* rotational offset */
+ u_int8_t reserved19;
+ u_int8_t medium_rot_rate_1; /* medium rotation rate (RPM) (MSB) */
+ u_int8_t medium_rot_rate_0; /* medium rotation rate (RPM) (LSB) */
+ u_int8_t reserved22;
+ u_int8_t reserved23;
+ } rigid_geometry;
+ struct flexible_disk_page {
+ u_int8_t pg_code; /* page code (should be 5) */
+#define SMS_FLEXIBLE_GEOMETRY_PAGE 0x05
+ u_int8_t pg_length; /* page length (should be 0x1E) */
+#define SMS_FLEXIBLE_GEOMETRY_PLEN 0x1E
+ u_int8_t xfr_rate_1; /* transfer rate (MSB) */
+ u_int8_t xfr_rate_0; /* transfer rate (LSB) */
+ u_int8_t nheads; /* number of heads */
+ u_int8_t sec_per_track; /* Sectors per track */
+ u_int8_t bytes_s_1; /* bytes per sector (MSB) */
+ u_int8_t bytes_s_0; /* bytes per sector (LSB) */
+ u_int8_t ncyl_1; /* number of cylinders (MSB) */
+ u_int8_t ncyl_0; /* number of cylinders (LSB) */
+ u_int8_t st_cyl_wp_1; /* starting cyl., write precomp (MSB) */
+ u_int8_t st_cyl_wp_0; /* starting cyl., write precomp (LSB) */
+ u_int8_t st_cyl_rwc_1; /* starting cyl., red. write cur (MSB)*/
+ u_int8_t st_cyl_rwc_0; /* starting cyl., red. write cur (LSB)*/
+ u_int8_t driv_step_1; /* drive step rate (MSB) */
+ u_int8_t driv_step_0; /* drive step rate (LSB) */
+ u_int8_t driv_step_pw; /* drive step pulse width */
+ u_int8_t head_stl_del_1;/* Head settle delay (MSB) */
+ u_int8_t head_stl_del_0;/* Head settle delay (LSB) */
+ u_int8_t motor_on_del; /* Motor on delay */
+ u_int8_t motor_off_del; /* Motor off delay */
+ u_int8_t trdy_ssn_mo; /* XXX ??? */
+ u_int8_t spc; /* XXX ??? */
+ u_int8_t write_comp; /* Write compensation */
+ u_int8_t head_load_del; /* Head load delay */
+ u_int8_t head_uload_del;/* Head un-load delay */
+ u_int8_t pin32_pin2;
+ u_int8_t pin4_pint1;
+ u_int8_t medium_rot_rate_1; /* medium rotation rate (RPM) (MSB) */
+ u_int8_t medium_rot_rate_0; /* medium rotation rate (RPM) (LSB) */
+ u_int8_t reserved30;
+ u_int8_t reserved31;
+ } flexible_disk;
+};
+
+struct scsi_da_rw_recovery_page {
+ u_int8_t page_code;
+#define SMS_RW_ERROR_RECOVERY_PAGE 0x01
+ u_int8_t page_length;
+ u_int8_t byte3;
+#define SMS_RWER_AWRE 0x80
+#define SMS_RWER_ARRE 0x40
+#define SMS_RWER_TB 0x20
+#define SMS_RWER_RC 0x10
+#define SMS_RWER_EER 0x08
+#define SMS_RWER_PER 0x04
+#define SMS_RWER_DTE 0x02
+#define SMS_RWER_DCR 0x01
+ u_int8_t read_retry_count;
+ u_int8_t correction_span;
+ u_int8_t head_offset_count;
+ u_int8_t data_strobe_offset_cnt;
+ u_int8_t reserved;
+ u_int8_t write_retry_count;
+ u_int8_t reserved2;
+ u_int8_t recovery_time_limit[2];
+};
+
+__BEGIN_DECLS
+/*
+ * XXX This is only left out of the kernel build to silence warnings. If,
+ * for some reason this function is used in the kernel, the ifdefs should
+ * be moved so it is included both in the kernel and userland.
+ */
+#ifndef _KERNEL
+void scsi_format_unit(struct ccb_scsiio *csio, u_int32_t retries,
+ void (*cbfcnp)(struct cam_periph *, union ccb *),
+ u_int8_t tag_action, u_int8_t byte2, u_int16_t ileave,
+ u_int8_t *data_ptr, u_int32_t dxfer_len,
+ u_int8_t sense_len, u_int32_t timeout);
+
+#endif /* !_KERNEL */
+__END_DECLS
+
+#endif /* _SCSI_SCSI_DA_H */
diff --git a/rtems/freebsd/crypto/blowfish/bf_ecb.c b/rtems/freebsd/crypto/blowfish/bf_ecb.c
new file mode 100644
index 00000000..e411335c
--- /dev/null
+++ b/rtems/freebsd/crypto/blowfish/bf_ecb.c
@@ -0,0 +1,88 @@
+#include <rtems/freebsd/machine/rtems-bsd-config.h>
+
+/* crypto/bf/bf_ecb.c */
+/* Copyright (C) 1995-1998 Eric Young (eay@cryptsoft.com)
+ * All rights reserved.
+ *
+ * This package is an SSL implementation written
+ * by Eric Young (eay@cryptsoft.com).
+ * The implementation was written so as to conform with Netscapes SSL.
+ *
+ * This library is free for commercial and non-commercial use as long as
+ * the following conditions are aheared to. The following conditions
+ * apply to all code found in this distribution, be it the RC4, RSA,
+ * lhash, DES, etc., code; not just the SSL code. The SSL documentation
+ * included with this distribution is covered by the same copyright terms
+ * except that the holder is Tim Hudson (tjh@cryptsoft.com).
+ *
+ * Copyright remains Eric Young's, and as such any Copyright notices in
+ * the code are not to be removed.
+ * If this package is used in a product, Eric Young should be given attribution
+ * as the author of the parts of the library used.
+ * This can be in the form of a textual message at program startup or
+ * in documentation (online or textual) provided with the package.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * "This product includes cryptographic software written by
+ * Eric Young (eay@cryptsoft.com)"
+ * The word 'cryptographic' can be left out if the rouines from the library
+ * being used are not cryptographic related :-).
+ * 4. If you include any Windows specific code (or a derivative thereof) from
+ * the apps directory (application code) you must include an acknowledgement:
+ * "This product includes software written by Tim Hudson (tjh@cryptsoft.com)"
+ *
+ * THIS SOFTWARE IS PROVIDED BY ERIC YOUNG ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * The licence and distribution terms for any publically available version or
+ * derivative of this code cannot be changed. i.e. this code cannot simply be
+ * copied and put under another distribution licence
+ * [including the GNU Public Licence.]
+ */
+
+#include <rtems/freebsd/sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <rtems/freebsd/sys/types.h>
+#include <rtems/freebsd/crypto/blowfish/blowfish.h>
+#include <rtems/freebsd/crypto/blowfish/bf_locl.h>
+
+/* Blowfish as implemented from 'Blowfish: Springer-Verlag paper'
+ * (From LECTURE NOTES IN COMPUTER SCIENCE 809, FAST SOFTWARE ENCRYPTION,
+ * CAMBRIDGE SECURITY WORKSHOP, CAMBRIDGE, U.K., DECEMBER 9-11, 1993)
+ */
+
+void BF_ecb_encrypt(const unsigned char *in, unsigned char *out,
+ BF_KEY *key, int encrypt)
+ {
+ BF_LONG l,d[2];
+
+ n2l(in,l); d[0]=l;
+ n2l(in,l); d[1]=l;
+ if (encrypt)
+ BF_encrypt(d,key);
+ else
+ BF_decrypt(d,key);
+ l=d[0]; l2n(l,out);
+ l=d[1]; l2n(l,out);
+ l=d[0]=d[1]=0;
+ }
+
diff --git a/rtems/freebsd/crypto/blowfish/bf_enc.c b/rtems/freebsd/crypto/blowfish/bf_enc.c
new file mode 100644
index 00000000..24807c05
--- /dev/null
+++ b/rtems/freebsd/crypto/blowfish/bf_enc.c
@@ -0,0 +1,163 @@
+#include <rtems/freebsd/machine/rtems-bsd-config.h>
+
+/* $KAME: bf_enc.c,v 1.7 2002/02/27 01:33:59 itojun Exp $ */
+
+/* crypto/bf/bf_enc.c */
+
+/* Copyright (C) 1995-1998 Eric Young (eay@cryptsoft.com)
+ * All rights reserved.
+ *
+ * This package is an SSL implementation written
+ * by Eric Young (eay@cryptsoft.com).
+ * The implementation was written so as to conform with Netscapes SSL.
+ *
+ * This library is free for commercial and non-commercial use as long as
+ * the following conditions are aheared to. The following conditions
+ * apply to all code found in this distribution, be it the RC4, RSA,
+ * lhash, DES, etc., code; not just the SSL code. The SSL documentation
+ * included with this distribution is covered by the same copyright terms
+ * except that the holder is Tim Hudson (tjh@cryptsoft.com).
+ *
+ * Copyright remains Eric Young's, and as such any Copyright notices in
+ * the code are not to be removed.
+ * If this package is used in a product, Eric Young should be given attribution
+ * as the author of the parts of the library used.
+ * This can be in the form of a textual message at program startup or
+ * in documentation (online or textual) provided with the package.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * "This product includes cryptographic software written by
+ * Eric Young (eay@cryptsoft.com)"
+ * The word 'cryptographic' can be left out if the rouines from the library
+ * being used are not cryptographic related :-).
+ * 4. If you include any Windows specific code (or a derivative thereof) from
+ * the apps directory (application code) you must include an acknowledgement:
+ * "This product includes software written by Tim Hudson (tjh@cryptsoft.com)"
+ *
+ * THIS SOFTWARE IS PROVIDED BY ERIC YOUNG ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * The licence and distribution terms for any publically available version or
+ * derivative of this code cannot be changed. i.e. this code cannot simply be
+ * copied and put under another distribution licence
+ * [including the GNU Public Licence.]
+ */
+
+#include <rtems/freebsd/sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <rtems/freebsd/sys/types.h>
+#include <rtems/freebsd/crypto/blowfish/blowfish.h>
+#include <rtems/freebsd/crypto/blowfish/bf_locl.h>
+
+/* Blowfish as implemented from 'Blowfish: Springer-Verlag paper'
+ * (From LECTURE NOTES IN COIMPUTER SCIENCE 809, FAST SOFTWARE ENCRYPTION,
+ * CAMBRIDGE SECURITY WORKSHOP, CAMBRIDGE, U.K., DECEMBER 9-11, 1993)
+ */
+
+#if (BF_ROUNDS != 16) && (BF_ROUNDS != 20)
+If you set BF_ROUNDS to some value other than 16 or 20, you will have
+to modify the code.
+#endif
+
+/* XXX "data" is host endian */
+void
+BF_encrypt(data, key)
+ BF_LONG *data;
+ BF_KEY *key;
+{
+ register BF_LONG l, r, *p, *s;
+
+ p = key->P;
+ s= &key->S[0];
+ l = data[0];
+ r = data[1];
+
+ l^=p[0];
+ BF_ENC(r, l, s, p[ 1]);
+ BF_ENC(l, r, s, p[ 2]);
+ BF_ENC(r, l, s, p[ 3]);
+ BF_ENC(l, r, s, p[ 4]);
+ BF_ENC(r, l, s, p[ 5]);
+ BF_ENC(l, r, s, p[ 6]);
+ BF_ENC(r, l, s, p[ 7]);
+ BF_ENC(l, r, s, p[ 8]);
+ BF_ENC(r, l, s, p[ 9]);
+ BF_ENC(l, r, s, p[10]);
+ BF_ENC(r, l, s, p[11]);
+ BF_ENC(l, r, s, p[12]);
+ BF_ENC(r, l, s, p[13]);
+ BF_ENC(l, r, s, p[14]);
+ BF_ENC(r, l, s, p[15]);
+ BF_ENC(l, r, s, p[16]);
+#if BF_ROUNDS == 20
+ BF_ENC(r, l, s, p[17]);
+ BF_ENC(l, r, s, p[18]);
+ BF_ENC(r, l, s, p[19]);
+ BF_ENC(l, r, s, p[20]);
+#endif
+ r ^= p[BF_ROUNDS + 1];
+
+ data[1] = l & 0xffffffff;
+ data[0] = r & 0xffffffff;
+}
+
+/* XXX "data" is host endian */
+void
+BF_decrypt(data, key)
+ BF_LONG *data;
+ BF_KEY *key;
+{
+ register BF_LONG l, r, *p, *s;
+
+ p = key->P;
+ s= &key->S[0];
+ l = data[0];
+ r = data[1];
+
+ l ^= p[BF_ROUNDS + 1];
+#if BF_ROUNDS == 20
+ BF_ENC(r, l, s, p[20]);
+ BF_ENC(l, r, s, p[19]);
+ BF_ENC(r, l, s, p[18]);
+ BF_ENC(l, r, s, p[17]);
+#endif
+ BF_ENC(r, l, s, p[16]);
+ BF_ENC(l, r, s, p[15]);
+ BF_ENC(r, l, s, p[14]);
+ BF_ENC(l, r, s, p[13]);
+ BF_ENC(r, l, s, p[12]);
+ BF_ENC(l, r, s, p[11]);
+ BF_ENC(r, l, s, p[10]);
+ BF_ENC(l, r, s, p[ 9]);
+ BF_ENC(r, l, s, p[ 8]);
+ BF_ENC(l, r, s, p[ 7]);
+ BF_ENC(r, l, s, p[ 6]);
+ BF_ENC(l, r, s, p[ 5]);
+ BF_ENC(r, l, s, p[ 4]);
+ BF_ENC(l, r, s, p[ 3]);
+ BF_ENC(r, l, s, p[ 2]);
+ BF_ENC(l, r, s, p[ 1]);
+ r ^= p[0];
+
+ data[1] = l & 0xffffffff;
+ data[0] = r & 0xffffffff;
+}
diff --git a/rtems/freebsd/crypto/blowfish/bf_locl.h b/rtems/freebsd/crypto/blowfish/bf_locl.h
new file mode 100644
index 00000000..9314ff3c
--- /dev/null
+++ b/rtems/freebsd/crypto/blowfish/bf_locl.h
@@ -0,0 +1,224 @@
+/* $FreeBSD$ */
+/* $KAME: bf_locl.h,v 1.6 2001/09/10 04:03:56 itojun Exp $ */
+
+/* crypto/bf/bf_local.h */
+/* Copyright (C) 1995-1997 Eric Young (eay@mincom.oz.au)
+ * All rights reserved.
+ *
+ * This package is an SSL implementation written
+ * by Eric Young (eay@mincom.oz.au).
+ * The implementation was written so as to conform with Netscapes SSL.
+ *
+ * This library is free for commercial and non-commercial use as long as
+ * the following conditions are aheared to. The following conditions
+ * apply to all code found in this distribution, be it the RC4, RSA,
+ * lhash, DES, etc., code; not just the SSL code. The SSL documentation
+ * included with this distribution is covered by the same copyright terms
+ * except that the holder is Tim Hudson (tjh@mincom.oz.au).
+ *
+ * Copyright remains Eric Young's, and as such any Copyright notices in
+ * the code are not to be removed.
+ * If this package is used in a product, Eric Young should be given attribution
+ * as the author of the parts of the library used.
+ * This can be in the form of a textual message at program startup or
+ * in documentation (online or textual) provided with the package.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * "This product includes cryptographic software written by
+ * Eric Young (eay@mincom.oz.au)"
+ * The word 'cryptographic' can be left out if the rouines from the library
+ * being used are not cryptographic related :-).
+ * 4. If you include any Windows specific code (or a derivative thereof) from
+ * the apps directory (application code) you must include an acknowledgement:
+ * "This product includes software written by Tim Hudson (tjh@mincom.oz.au)"
+ *
+ * THIS SOFTWARE IS PROVIDED BY ERIC YOUNG ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * The licence and distribution terms for any publically available version or
+ * derivative of this code cannot be changed. i.e. this code cannot simply be
+ * copied and put under another distribution licence
+ * [including the GNU Public Licence.]
+ */
+/* WARNING WARNING WARNING WARNING WARNING WARNING WARNING WARNING WARNING
+ *
+ * Always modify bf_locl.org since bf_locl.h is automatically generated from
+ * it during SSLeay configuration.
+ *
+ * WARNING WARNING WARNING WARNING WARNING WARNING WARNING WARNING WARNING
+ */
+
+#undef c2l
+#define c2l(c,l) (l =((BF_LONG)(*((c)++))) , \
+ l|=((BF_LONG)(*((c)++)))<< 8L, \
+ l|=((BF_LONG)(*((c)++)))<<16L, \
+ l|=((BF_LONG)(*((c)++)))<<24L)
+
+/* NOTE - c is not incremented as per c2l */
+#undef c2ln
+#define c2ln(c,l1,l2,n) { \
+ c+=n; \
+ l1=l2=0; \
+ switch (n) { \
+ case 8: l2 =((BF_LONG)(*(--(c))))<<24L; \
+ case 7: l2|=((BF_LONG)(*(--(c))))<<16L; \
+ case 6: l2|=((BF_LONG)(*(--(c))))<< 8L; \
+ case 5: l2|=((BF_LONG)(*(--(c)))); \
+ case 4: l1 =((BF_LONG)(*(--(c))))<<24L; \
+ case 3: l1|=((BF_LONG)(*(--(c))))<<16L; \
+ case 2: l1|=((BF_LONG)(*(--(c))))<< 8L; \
+ case 1: l1|=((BF_LONG)(*(--(c)))); \
+ } \
+ }
+
+#undef l2c
+#define l2c(l,c) (*((c)++)=(unsigned char)(((l) )&0xff), \
+ *((c)++)=(unsigned char)(((l)>> 8L)&0xff), \
+ *((c)++)=(unsigned char)(((l)>>16L)&0xff), \
+ *((c)++)=(unsigned char)(((l)>>24L)&0xff))
+
+/* NOTE - c is not incremented as per l2c */
+#undef l2cn
+#define l2cn(l1,l2,c,n) { \
+ c+=n; \
+ switch (n) { \
+ case 8: *(--(c))=(unsigned char)(((l2)>>24L)&0xff); \
+ case 7: *(--(c))=(unsigned char)(((l2)>>16L)&0xff); \
+ case 6: *(--(c))=(unsigned char)(((l2)>> 8L)&0xff); \
+ case 5: *(--(c))=(unsigned char)(((l2) )&0xff); \
+ case 4: *(--(c))=(unsigned char)(((l1)>>24L)&0xff); \
+ case 3: *(--(c))=(unsigned char)(((l1)>>16L)&0xff); \
+ case 2: *(--(c))=(unsigned char)(((l1)>> 8L)&0xff); \
+ case 1: *(--(c))=(unsigned char)(((l1) )&0xff); \
+ } \
+ }
+
+/* NOTE - c is not incremented as per n2l */
+#define n2ln(c,l1,l2,n) { \
+ c+=n; \
+ l1=l2=0; \
+ switch (n) { \
+ case 8: l2 =((BF_LONG)(*(--(c)))) ; \
+ case 7: l2|=((BF_LONG)(*(--(c))))<< 8; \
+ case 6: l2|=((BF_LONG)(*(--(c))))<<16; \
+ case 5: l2|=((BF_LONG)(*(--(c))))<<24; \
+ case 4: l1 =((BF_LONG)(*(--(c)))) ; \
+ case 3: l1|=((BF_LONG)(*(--(c))))<< 8; \
+ case 2: l1|=((BF_LONG)(*(--(c))))<<16; \
+ case 1: l1|=((BF_LONG)(*(--(c))))<<24; \
+ } \
+ }
+
+/* NOTE - c is not incremented as per l2n */
+#define l2nn(l1,l2,c,n) { \
+ c+=n; \
+ switch (n) { \
+ case 8: *(--(c))=(unsigned char)(((l2) )&0xff); \
+ case 7: *(--(c))=(unsigned char)(((l2)>> 8)&0xff); \
+ case 6: *(--(c))=(unsigned char)(((l2)>>16)&0xff); \
+ case 5: *(--(c))=(unsigned char)(((l2)>>24)&0xff); \
+ case 4: *(--(c))=(unsigned char)(((l1) )&0xff); \
+ case 3: *(--(c))=(unsigned char)(((l1)>> 8)&0xff); \
+ case 2: *(--(c))=(unsigned char)(((l1)>>16)&0xff); \
+ case 1: *(--(c))=(unsigned char)(((l1)>>24)&0xff); \
+ } \
+ }
+
+#undef n2l
+#define n2l(c,l) (l =((BF_LONG)(*((c)++)))<<24L, \
+ l|=((BF_LONG)(*((c)++)))<<16L, \
+ l|=((BF_LONG)(*((c)++)))<< 8L, \
+ l|=((BF_LONG)(*((c)++))))
+
+#undef l2n
+#define l2n(l,c) (*((c)++)=(unsigned char)(((l)>>24L)&0xff), \
+ *((c)++)=(unsigned char)(((l)>>16L)&0xff), \
+ *((c)++)=(unsigned char)(((l)>> 8L)&0xff), \
+ *((c)++)=(unsigned char)(((l) )&0xff))
+
+/* This is actually a big endian algorithm, the most significate byte
+ * is used to lookup array 0 */
+
+/* use BF_PTR2 for intel boxes,
+ * BF_PTR for sparc and MIPS/SGI
+ * use nothing for Alpha and HP.
+ */
+#undef BF_PTR
+#undef BF_PTR2
+#ifdef __i386__
+#define BF_PTR2
+#else
+#ifdef __mips__
+#define BF_PTR
+#endif
+#endif
+
+#define BF_M 0x3fc
+#define BF_0 22L
+#define BF_1 14L
+#define BF_2 6L
+#define BF_3 2L /* left shift */
+
+#if defined(BF_PTR2)
+
+/* This is basically a special pentium verson */
+#define BF_ENC(LL,R,S,P) \
+ { \
+ BF_LONG t,u,v; \
+ u=R>>BF_0; \
+ v=R>>BF_1; \
+ u&=BF_M; \
+ v&=BF_M; \
+ t= *(BF_LONG *)((unsigned char *)&(S[ 0])+u); \
+ u=R>>BF_2; \
+ t+= *(BF_LONG *)((unsigned char *)&(S[256])+v); \
+ v=R<<BF_3; \
+ u&=BF_M; \
+ v&=BF_M; \
+ t^= *(BF_LONG *)((unsigned char *)&(S[512])+u); \
+ LL^=P; \
+ t+= *(BF_LONG *)((unsigned char *)&(S[768])+v); \
+ LL^=t; \
+ }
+
+#elif defined(BF_PTR)
+
+/* This is normally very good */
+
+#define BF_ENC(LL,R,S,P) \
+ LL^=P; \
+ LL^= (((*(BF_LONG *)((unsigned char *)&(S[ 0])+((R>>BF_0)&BF_M))+ \
+ *(BF_LONG *)((unsigned char *)&(S[256])+((R>>BF_1)&BF_M)))^ \
+ *(BF_LONG *)((unsigned char *)&(S[512])+((R>>BF_2)&BF_M)))+ \
+ *(BF_LONG *)((unsigned char *)&(S[768])+((R<<BF_3)&BF_M)));
+#else
+
+/* This will always work, even on 64 bit machines and strangly enough,
+ * on the Alpha it is faster than the pointer versions (both 32 and 64
+ * versions of BF_LONG) */
+
+#define BF_ENC(LL,R,S,P) \
+ LL^=P; \
+ LL^=((( S[ (R>>24L) ] + \
+ S[0x0100+((R>>16L)&0xff)])^ \
+ S[0x0200+((R>> 8L)&0xff)])+ \
+ S[0x0300+((R )&0xff)])&0xffffffff;
+#endif
diff --git a/rtems/freebsd/crypto/blowfish/bf_pi.h b/rtems/freebsd/crypto/blowfish/bf_pi.h
new file mode 100644
index 00000000..fdd5a27e
--- /dev/null
+++ b/rtems/freebsd/crypto/blowfish/bf_pi.h
@@ -0,0 +1,328 @@
+/* $FreeBSD$ */
+/* $KAME: bf_pi.h,v 1.4 2001/09/10 04:03:56 itojun Exp $ */
+
+/* crypto/bf/bf_pi.h */
+/* Copyright (C) 1995-1997 Eric Young (eay@mincom.oz.au)
+ * All rights reserved.
+ *
+ * This package is an SSL implementation written
+ * by Eric Young (eay@mincom.oz.au).
+ * The implementation was written so as to conform with Netscapes SSL.
+ *
+ * This library is free for commercial and non-commercial use as long as
+ * the following conditions are aheared to. The following conditions
+ * apply to all code found in this distribution, be it the RC4, RSA,
+ * lhash, DES, etc., code; not just the SSL code. The SSL documentation
+ * included with this distribution is covered by the same copyright terms
+ * except that the holder is Tim Hudson (tjh@mincom.oz.au).
+ *
+ * Copyright remains Eric Young's, and as such any Copyright notices in
+ * the code are not to be removed.
+ * If this package is used in a product, Eric Young should be given attribution
+ * as the author of the parts of the library used.
+ * This can be in the form of a textual message at program startup or
+ * in documentation (online or textual) provided with the package.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * "This product includes cryptographic software written by
+ * Eric Young (eay@mincom.oz.au)"
+ * The word 'cryptographic' can be left out if the rouines from the library
+ * being used are not cryptographic related :-).
+ * 4. If you include any Windows specific code (or a derivative thereof) from
+ * the apps directory (application code) you must include an acknowledgement:
+ * "This product includes software written by Tim Hudson (tjh@mincom.oz.au)"
+ *
+ * THIS SOFTWARE IS PROVIDED BY ERIC YOUNG ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * The licence and distribution terms for any publically available version or
+ * derivative of this code cannot be changed. i.e. this code cannot simply be
+ * copied and put under another distribution licence
+ * [including the GNU Public Licence.]
+ */
+
+static const BF_KEY bf_init= {
+ {
+ 0x243f6a88L, 0x85a308d3L, 0x13198a2eL, 0x03707344L,
+ 0xa4093822L, 0x299f31d0L, 0x082efa98L, 0xec4e6c89L,
+ 0x452821e6L, 0x38d01377L, 0xbe5466cfL, 0x34e90c6cL,
+ 0xc0ac29b7L, 0xc97c50ddL, 0x3f84d5b5L, 0xb5470917L,
+ 0x9216d5d9L, 0x8979fb1b
+ },{
+ 0xd1310ba6L, 0x98dfb5acL, 0x2ffd72dbL, 0xd01adfb7L,
+ 0xb8e1afedL, 0x6a267e96L, 0xba7c9045L, 0xf12c7f99L,
+ 0x24a19947L, 0xb3916cf7L, 0x0801f2e2L, 0x858efc16L,
+ 0x636920d8L, 0x71574e69L, 0xa458fea3L, 0xf4933d7eL,
+ 0x0d95748fL, 0x728eb658L, 0x718bcd58L, 0x82154aeeL,
+ 0x7b54a41dL, 0xc25a59b5L, 0x9c30d539L, 0x2af26013L,
+ 0xc5d1b023L, 0x286085f0L, 0xca417918L, 0xb8db38efL,
+ 0x8e79dcb0L, 0x603a180eL, 0x6c9e0e8bL, 0xb01e8a3eL,
+ 0xd71577c1L, 0xbd314b27L, 0x78af2fdaL, 0x55605c60L,
+ 0xe65525f3L, 0xaa55ab94L, 0x57489862L, 0x63e81440L,
+ 0x55ca396aL, 0x2aab10b6L, 0xb4cc5c34L, 0x1141e8ceL,
+ 0xa15486afL, 0x7c72e993L, 0xb3ee1411L, 0x636fbc2aL,
+ 0x2ba9c55dL, 0x741831f6L, 0xce5c3e16L, 0x9b87931eL,
+ 0xafd6ba33L, 0x6c24cf5cL, 0x7a325381L, 0x28958677L,
+ 0x3b8f4898L, 0x6b4bb9afL, 0xc4bfe81bL, 0x66282193L,
+ 0x61d809ccL, 0xfb21a991L, 0x487cac60L, 0x5dec8032L,
+ 0xef845d5dL, 0xe98575b1L, 0xdc262302L, 0xeb651b88L,
+ 0x23893e81L, 0xd396acc5L, 0x0f6d6ff3L, 0x83f44239L,
+ 0x2e0b4482L, 0xa4842004L, 0x69c8f04aL, 0x9e1f9b5eL,
+ 0x21c66842L, 0xf6e96c9aL, 0x670c9c61L, 0xabd388f0L,
+ 0x6a51a0d2L, 0xd8542f68L, 0x960fa728L, 0xab5133a3L,
+ 0x6eef0b6cL, 0x137a3be4L, 0xba3bf050L, 0x7efb2a98L,
+ 0xa1f1651dL, 0x39af0176L, 0x66ca593eL, 0x82430e88L,
+ 0x8cee8619L, 0x456f9fb4L, 0x7d84a5c3L, 0x3b8b5ebeL,
+ 0xe06f75d8L, 0x85c12073L, 0x401a449fL, 0x56c16aa6L,
+ 0x4ed3aa62L, 0x363f7706L, 0x1bfedf72L, 0x429b023dL,
+ 0x37d0d724L, 0xd00a1248L, 0xdb0fead3L, 0x49f1c09bL,
+ 0x075372c9L, 0x80991b7bL, 0x25d479d8L, 0xf6e8def7L,
+ 0xe3fe501aL, 0xb6794c3bL, 0x976ce0bdL, 0x04c006baL,
+ 0xc1a94fb6L, 0x409f60c4L, 0x5e5c9ec2L, 0x196a2463L,
+ 0x68fb6fafL, 0x3e6c53b5L, 0x1339b2ebL, 0x3b52ec6fL,
+ 0x6dfc511fL, 0x9b30952cL, 0xcc814544L, 0xaf5ebd09L,
+ 0xbee3d004L, 0xde334afdL, 0x660f2807L, 0x192e4bb3L,
+ 0xc0cba857L, 0x45c8740fL, 0xd20b5f39L, 0xb9d3fbdbL,
+ 0x5579c0bdL, 0x1a60320aL, 0xd6a100c6L, 0x402c7279L,
+ 0x679f25feL, 0xfb1fa3ccL, 0x8ea5e9f8L, 0xdb3222f8L,
+ 0x3c7516dfL, 0xfd616b15L, 0x2f501ec8L, 0xad0552abL,
+ 0x323db5faL, 0xfd238760L, 0x53317b48L, 0x3e00df82L,
+ 0x9e5c57bbL, 0xca6f8ca0L, 0x1a87562eL, 0xdf1769dbL,
+ 0xd542a8f6L, 0x287effc3L, 0xac6732c6L, 0x8c4f5573L,
+ 0x695b27b0L, 0xbbca58c8L, 0xe1ffa35dL, 0xb8f011a0L,
+ 0x10fa3d98L, 0xfd2183b8L, 0x4afcb56cL, 0x2dd1d35bL,
+ 0x9a53e479L, 0xb6f84565L, 0xd28e49bcL, 0x4bfb9790L,
+ 0xe1ddf2daL, 0xa4cb7e33L, 0x62fb1341L, 0xcee4c6e8L,
+ 0xef20cadaL, 0x36774c01L, 0xd07e9efeL, 0x2bf11fb4L,
+ 0x95dbda4dL, 0xae909198L, 0xeaad8e71L, 0x6b93d5a0L,
+ 0xd08ed1d0L, 0xafc725e0L, 0x8e3c5b2fL, 0x8e7594b7L,
+ 0x8ff6e2fbL, 0xf2122b64L, 0x8888b812L, 0x900df01cL,
+ 0x4fad5ea0L, 0x688fc31cL, 0xd1cff191L, 0xb3a8c1adL,
+ 0x2f2f2218L, 0xbe0e1777L, 0xea752dfeL, 0x8b021fa1L,
+ 0xe5a0cc0fL, 0xb56f74e8L, 0x18acf3d6L, 0xce89e299L,
+ 0xb4a84fe0L, 0xfd13e0b7L, 0x7cc43b81L, 0xd2ada8d9L,
+ 0x165fa266L, 0x80957705L, 0x93cc7314L, 0x211a1477L,
+ 0xe6ad2065L, 0x77b5fa86L, 0xc75442f5L, 0xfb9d35cfL,
+ 0xebcdaf0cL, 0x7b3e89a0L, 0xd6411bd3L, 0xae1e7e49L,
+ 0x00250e2dL, 0x2071b35eL, 0x226800bbL, 0x57b8e0afL,
+ 0x2464369bL, 0xf009b91eL, 0x5563911dL, 0x59dfa6aaL,
+ 0x78c14389L, 0xd95a537fL, 0x207d5ba2L, 0x02e5b9c5L,
+ 0x83260376L, 0x6295cfa9L, 0x11c81968L, 0x4e734a41L,
+ 0xb3472dcaL, 0x7b14a94aL, 0x1b510052L, 0x9a532915L,
+ 0xd60f573fL, 0xbc9bc6e4L, 0x2b60a476L, 0x81e67400L,
+ 0x08ba6fb5L, 0x571be91fL, 0xf296ec6bL, 0x2a0dd915L,
+ 0xb6636521L, 0xe7b9f9b6L, 0xff34052eL, 0xc5855664L,
+ 0x53b02d5dL, 0xa99f8fa1L, 0x08ba4799L, 0x6e85076aL,
+ 0x4b7a70e9L, 0xb5b32944L, 0xdb75092eL, 0xc4192623L,
+ 0xad6ea6b0L, 0x49a7df7dL, 0x9cee60b8L, 0x8fedb266L,
+ 0xecaa8c71L, 0x699a17ffL, 0x5664526cL, 0xc2b19ee1L,
+ 0x193602a5L, 0x75094c29L, 0xa0591340L, 0xe4183a3eL,
+ 0x3f54989aL, 0x5b429d65L, 0x6b8fe4d6L, 0x99f73fd6L,
+ 0xa1d29c07L, 0xefe830f5L, 0x4d2d38e6L, 0xf0255dc1L,
+ 0x4cdd2086L, 0x8470eb26L, 0x6382e9c6L, 0x021ecc5eL,
+ 0x09686b3fL, 0x3ebaefc9L, 0x3c971814L, 0x6b6a70a1L,
+ 0x687f3584L, 0x52a0e286L, 0xb79c5305L, 0xaa500737L,
+ 0x3e07841cL, 0x7fdeae5cL, 0x8e7d44ecL, 0x5716f2b8L,
+ 0xb03ada37L, 0xf0500c0dL, 0xf01c1f04L, 0x0200b3ffL,
+ 0xae0cf51aL, 0x3cb574b2L, 0x25837a58L, 0xdc0921bdL,
+ 0xd19113f9L, 0x7ca92ff6L, 0x94324773L, 0x22f54701L,
+ 0x3ae5e581L, 0x37c2dadcL, 0xc8b57634L, 0x9af3dda7L,
+ 0xa9446146L, 0x0fd0030eL, 0xecc8c73eL, 0xa4751e41L,
+ 0xe238cd99L, 0x3bea0e2fL, 0x3280bba1L, 0x183eb331L,
+ 0x4e548b38L, 0x4f6db908L, 0x6f420d03L, 0xf60a04bfL,
+ 0x2cb81290L, 0x24977c79L, 0x5679b072L, 0xbcaf89afL,
+ 0xde9a771fL, 0xd9930810L, 0xb38bae12L, 0xdccf3f2eL,
+ 0x5512721fL, 0x2e6b7124L, 0x501adde6L, 0x9f84cd87L,
+ 0x7a584718L, 0x7408da17L, 0xbc9f9abcL, 0xe94b7d8cL,
+ 0xec7aec3aL, 0xdb851dfaL, 0x63094366L, 0xc464c3d2L,
+ 0xef1c1847L, 0x3215d908L, 0xdd433b37L, 0x24c2ba16L,
+ 0x12a14d43L, 0x2a65c451L, 0x50940002L, 0x133ae4ddL,
+ 0x71dff89eL, 0x10314e55L, 0x81ac77d6L, 0x5f11199bL,
+ 0x043556f1L, 0xd7a3c76bL, 0x3c11183bL, 0x5924a509L,
+ 0xf28fe6edL, 0x97f1fbfaL, 0x9ebabf2cL, 0x1e153c6eL,
+ 0x86e34570L, 0xeae96fb1L, 0x860e5e0aL, 0x5a3e2ab3L,
+ 0x771fe71cL, 0x4e3d06faL, 0x2965dcb9L, 0x99e71d0fL,
+ 0x803e89d6L, 0x5266c825L, 0x2e4cc978L, 0x9c10b36aL,
+ 0xc6150ebaL, 0x94e2ea78L, 0xa5fc3c53L, 0x1e0a2df4L,
+ 0xf2f74ea7L, 0x361d2b3dL, 0x1939260fL, 0x19c27960L,
+ 0x5223a708L, 0xf71312b6L, 0xebadfe6eL, 0xeac31f66L,
+ 0xe3bc4595L, 0xa67bc883L, 0xb17f37d1L, 0x018cff28L,
+ 0xc332ddefL, 0xbe6c5aa5L, 0x65582185L, 0x68ab9802L,
+ 0xeecea50fL, 0xdb2f953bL, 0x2aef7dadL, 0x5b6e2f84L,
+ 0x1521b628L, 0x29076170L, 0xecdd4775L, 0x619f1510L,
+ 0x13cca830L, 0xeb61bd96L, 0x0334fe1eL, 0xaa0363cfL,
+ 0xb5735c90L, 0x4c70a239L, 0xd59e9e0bL, 0xcbaade14L,
+ 0xeecc86bcL, 0x60622ca7L, 0x9cab5cabL, 0xb2f3846eL,
+ 0x648b1eafL, 0x19bdf0caL, 0xa02369b9L, 0x655abb50L,
+ 0x40685a32L, 0x3c2ab4b3L, 0x319ee9d5L, 0xc021b8f7L,
+ 0x9b540b19L, 0x875fa099L, 0x95f7997eL, 0x623d7da8L,
+ 0xf837889aL, 0x97e32d77L, 0x11ed935fL, 0x16681281L,
+ 0x0e358829L, 0xc7e61fd6L, 0x96dedfa1L, 0x7858ba99L,
+ 0x57f584a5L, 0x1b227263L, 0x9b83c3ffL, 0x1ac24696L,
+ 0xcdb30aebL, 0x532e3054L, 0x8fd948e4L, 0x6dbc3128L,
+ 0x58ebf2efL, 0x34c6ffeaL, 0xfe28ed61L, 0xee7c3c73L,
+ 0x5d4a14d9L, 0xe864b7e3L, 0x42105d14L, 0x203e13e0L,
+ 0x45eee2b6L, 0xa3aaabeaL, 0xdb6c4f15L, 0xfacb4fd0L,
+ 0xc742f442L, 0xef6abbb5L, 0x654f3b1dL, 0x41cd2105L,
+ 0xd81e799eL, 0x86854dc7L, 0xe44b476aL, 0x3d816250L,
+ 0xcf62a1f2L, 0x5b8d2646L, 0xfc8883a0L, 0xc1c7b6a3L,
+ 0x7f1524c3L, 0x69cb7492L, 0x47848a0bL, 0x5692b285L,
+ 0x095bbf00L, 0xad19489dL, 0x1462b174L, 0x23820e00L,
+ 0x58428d2aL, 0x0c55f5eaL, 0x1dadf43eL, 0x233f7061L,
+ 0x3372f092L, 0x8d937e41L, 0xd65fecf1L, 0x6c223bdbL,
+ 0x7cde3759L, 0xcbee7460L, 0x4085f2a7L, 0xce77326eL,
+ 0xa6078084L, 0x19f8509eL, 0xe8efd855L, 0x61d99735L,
+ 0xa969a7aaL, 0xc50c06c2L, 0x5a04abfcL, 0x800bcadcL,
+ 0x9e447a2eL, 0xc3453484L, 0xfdd56705L, 0x0e1e9ec9L,
+ 0xdb73dbd3L, 0x105588cdL, 0x675fda79L, 0xe3674340L,
+ 0xc5c43465L, 0x713e38d8L, 0x3d28f89eL, 0xf16dff20L,
+ 0x153e21e7L, 0x8fb03d4aL, 0xe6e39f2bL, 0xdb83adf7L,
+ 0xe93d5a68L, 0x948140f7L, 0xf64c261cL, 0x94692934L,
+ 0x411520f7L, 0x7602d4f7L, 0xbcf46b2eL, 0xd4a20068L,
+ 0xd4082471L, 0x3320f46aL, 0x43b7d4b7L, 0x500061afL,
+ 0x1e39f62eL, 0x97244546L, 0x14214f74L, 0xbf8b8840L,
+ 0x4d95fc1dL, 0x96b591afL, 0x70f4ddd3L, 0x66a02f45L,
+ 0xbfbc09ecL, 0x03bd9785L, 0x7fac6dd0L, 0x31cb8504L,
+ 0x96eb27b3L, 0x55fd3941L, 0xda2547e6L, 0xabca0a9aL,
+ 0x28507825L, 0x530429f4L, 0x0a2c86daL, 0xe9b66dfbL,
+ 0x68dc1462L, 0xd7486900L, 0x680ec0a4L, 0x27a18deeL,
+ 0x4f3ffea2L, 0xe887ad8cL, 0xb58ce006L, 0x7af4d6b6L,
+ 0xaace1e7cL, 0xd3375fecL, 0xce78a399L, 0x406b2a42L,
+ 0x20fe9e35L, 0xd9f385b9L, 0xee39d7abL, 0x3b124e8bL,
+ 0x1dc9faf7L, 0x4b6d1856L, 0x26a36631L, 0xeae397b2L,
+ 0x3a6efa74L, 0xdd5b4332L, 0x6841e7f7L, 0xca7820fbL,
+ 0xfb0af54eL, 0xd8feb397L, 0x454056acL, 0xba489527L,
+ 0x55533a3aL, 0x20838d87L, 0xfe6ba9b7L, 0xd096954bL,
+ 0x55a867bcL, 0xa1159a58L, 0xcca92963L, 0x99e1db33L,
+ 0xa62a4a56L, 0x3f3125f9L, 0x5ef47e1cL, 0x9029317cL,
+ 0xfdf8e802L, 0x04272f70L, 0x80bb155cL, 0x05282ce3L,
+ 0x95c11548L, 0xe4c66d22L, 0x48c1133fL, 0xc70f86dcL,
+ 0x07f9c9eeL, 0x41041f0fL, 0x404779a4L, 0x5d886e17L,
+ 0x325f51ebL, 0xd59bc0d1L, 0xf2bcc18fL, 0x41113564L,
+ 0x257b7834L, 0x602a9c60L, 0xdff8e8a3L, 0x1f636c1bL,
+ 0x0e12b4c2L, 0x02e1329eL, 0xaf664fd1L, 0xcad18115L,
+ 0x6b2395e0L, 0x333e92e1L, 0x3b240b62L, 0xeebeb922L,
+ 0x85b2a20eL, 0xe6ba0d99L, 0xde720c8cL, 0x2da2f728L,
+ 0xd0127845L, 0x95b794fdL, 0x647d0862L, 0xe7ccf5f0L,
+ 0x5449a36fL, 0x877d48faL, 0xc39dfd27L, 0xf33e8d1eL,
+ 0x0a476341L, 0x992eff74L, 0x3a6f6eabL, 0xf4f8fd37L,
+ 0xa812dc60L, 0xa1ebddf8L, 0x991be14cL, 0xdb6e6b0dL,
+ 0xc67b5510L, 0x6d672c37L, 0x2765d43bL, 0xdcd0e804L,
+ 0xf1290dc7L, 0xcc00ffa3L, 0xb5390f92L, 0x690fed0bL,
+ 0x667b9ffbL, 0xcedb7d9cL, 0xa091cf0bL, 0xd9155ea3L,
+ 0xbb132f88L, 0x515bad24L, 0x7b9479bfL, 0x763bd6ebL,
+ 0x37392eb3L, 0xcc115979L, 0x8026e297L, 0xf42e312dL,
+ 0x6842ada7L, 0xc66a2b3bL, 0x12754cccL, 0x782ef11cL,
+ 0x6a124237L, 0xb79251e7L, 0x06a1bbe6L, 0x4bfb6350L,
+ 0x1a6b1018L, 0x11caedfaL, 0x3d25bdd8L, 0xe2e1c3c9L,
+ 0x44421659L, 0x0a121386L, 0xd90cec6eL, 0xd5abea2aL,
+ 0x64af674eL, 0xda86a85fL, 0xbebfe988L, 0x64e4c3feL,
+ 0x9dbc8057L, 0xf0f7c086L, 0x60787bf8L, 0x6003604dL,
+ 0xd1fd8346L, 0xf6381fb0L, 0x7745ae04L, 0xd736fcccL,
+ 0x83426b33L, 0xf01eab71L, 0xb0804187L, 0x3c005e5fL,
+ 0x77a057beL, 0xbde8ae24L, 0x55464299L, 0xbf582e61L,
+ 0x4e58f48fL, 0xf2ddfda2L, 0xf474ef38L, 0x8789bdc2L,
+ 0x5366f9c3L, 0xc8b38e74L, 0xb475f255L, 0x46fcd9b9L,
+ 0x7aeb2661L, 0x8b1ddf84L, 0x846a0e79L, 0x915f95e2L,
+ 0x466e598eL, 0x20b45770L, 0x8cd55591L, 0xc902de4cL,
+ 0xb90bace1L, 0xbb8205d0L, 0x11a86248L, 0x7574a99eL,
+ 0xb77f19b6L, 0xe0a9dc09L, 0x662d09a1L, 0xc4324633L,
+ 0xe85a1f02L, 0x09f0be8cL, 0x4a99a025L, 0x1d6efe10L,
+ 0x1ab93d1dL, 0x0ba5a4dfL, 0xa186f20fL, 0x2868f169L,
+ 0xdcb7da83L, 0x573906feL, 0xa1e2ce9bL, 0x4fcd7f52L,
+ 0x50115e01L, 0xa70683faL, 0xa002b5c4L, 0x0de6d027L,
+ 0x9af88c27L, 0x773f8641L, 0xc3604c06L, 0x61a806b5L,
+ 0xf0177a28L, 0xc0f586e0L, 0x006058aaL, 0x30dc7d62L,
+ 0x11e69ed7L, 0x2338ea63L, 0x53c2dd94L, 0xc2c21634L,
+ 0xbbcbee56L, 0x90bcb6deL, 0xebfc7da1L, 0xce591d76L,
+ 0x6f05e409L, 0x4b7c0188L, 0x39720a3dL, 0x7c927c24L,
+ 0x86e3725fL, 0x724d9db9L, 0x1ac15bb4L, 0xd39eb8fcL,
+ 0xed545578L, 0x08fca5b5L, 0xd83d7cd3L, 0x4dad0fc4L,
+ 0x1e50ef5eL, 0xb161e6f8L, 0xa28514d9L, 0x6c51133cL,
+ 0x6fd5c7e7L, 0x56e14ec4L, 0x362abfceL, 0xddc6c837L,
+ 0xd79a3234L, 0x92638212L, 0x670efa8eL, 0x406000e0L,
+ 0x3a39ce37L, 0xd3faf5cfL, 0xabc27737L, 0x5ac52d1bL,
+ 0x5cb0679eL, 0x4fa33742L, 0xd3822740L, 0x99bc9bbeL,
+ 0xd5118e9dL, 0xbf0f7315L, 0xd62d1c7eL, 0xc700c47bL,
+ 0xb78c1b6bL, 0x21a19045L, 0xb26eb1beL, 0x6a366eb4L,
+ 0x5748ab2fL, 0xbc946e79L, 0xc6a376d2L, 0x6549c2c8L,
+ 0x530ff8eeL, 0x468dde7dL, 0xd5730a1dL, 0x4cd04dc6L,
+ 0x2939bbdbL, 0xa9ba4650L, 0xac9526e8L, 0xbe5ee304L,
+ 0xa1fad5f0L, 0x6a2d519aL, 0x63ef8ce2L, 0x9a86ee22L,
+ 0xc089c2b8L, 0x43242ef6L, 0xa51e03aaL, 0x9cf2d0a4L,
+ 0x83c061baL, 0x9be96a4dL, 0x8fe51550L, 0xba645bd6L,
+ 0x2826a2f9L, 0xa73a3ae1L, 0x4ba99586L, 0xef5562e9L,
+ 0xc72fefd3L, 0xf752f7daL, 0x3f046f69L, 0x77fa0a59L,
+ 0x80e4a915L, 0x87b08601L, 0x9b09e6adL, 0x3b3ee593L,
+ 0xe990fd5aL, 0x9e34d797L, 0x2cf0b7d9L, 0x022b8b51L,
+ 0x96d5ac3aL, 0x017da67dL, 0xd1cf3ed6L, 0x7c7d2d28L,
+ 0x1f9f25cfL, 0xadf2b89bL, 0x5ad6b472L, 0x5a88f54cL,
+ 0xe029ac71L, 0xe019a5e6L, 0x47b0acfdL, 0xed93fa9bL,
+ 0xe8d3c48dL, 0x283b57ccL, 0xf8d56629L, 0x79132e28L,
+ 0x785f0191L, 0xed756055L, 0xf7960e44L, 0xe3d35e8cL,
+ 0x15056dd4L, 0x88f46dbaL, 0x03a16125L, 0x0564f0bdL,
+ 0xc3eb9e15L, 0x3c9057a2L, 0x97271aecL, 0xa93a072aL,
+ 0x1b3f6d9bL, 0x1e6321f5L, 0xf59c66fbL, 0x26dcf319L,
+ 0x7533d928L, 0xb155fdf5L, 0x03563482L, 0x8aba3cbbL,
+ 0x28517711L, 0xc20ad9f8L, 0xabcc5167L, 0xccad925fL,
+ 0x4de81751L, 0x3830dc8eL, 0x379d5862L, 0x9320f991L,
+ 0xea7a90c2L, 0xfb3e7bceL, 0x5121ce64L, 0x774fbe32L,
+ 0xa8b6e37eL, 0xc3293d46L, 0x48de5369L, 0x6413e680L,
+ 0xa2ae0810L, 0xdd6db224L, 0x69852dfdL, 0x09072166L,
+ 0xb39a460aL, 0x6445c0ddL, 0x586cdecfL, 0x1c20c8aeL,
+ 0x5bbef7ddL, 0x1b588d40L, 0xccd2017fL, 0x6bb4e3bbL,
+ 0xdda26a7eL, 0x3a59ff45L, 0x3e350a44L, 0xbcb4cdd5L,
+ 0x72eacea8L, 0xfa6484bbL, 0x8d6612aeL, 0xbf3c6f47L,
+ 0xd29be463L, 0x542f5d9eL, 0xaec2771bL, 0xf64e6370L,
+ 0x740e0d8dL, 0xe75b1357L, 0xf8721671L, 0xaf537d5dL,
+ 0x4040cb08L, 0x4eb4e2ccL, 0x34d2466aL, 0x0115af84L,
+ 0xe1b00428L, 0x95983a1dL, 0x06b89fb4L, 0xce6ea048L,
+ 0x6f3f3b82L, 0x3520ab82L, 0x011a1d4bL, 0x277227f8L,
+ 0x611560b1L, 0xe7933fdcL, 0xbb3a792bL, 0x344525bdL,
+ 0xa08839e1L, 0x51ce794bL, 0x2f32c9b7L, 0xa01fbac9L,
+ 0xe01cc87eL, 0xbcc7d1f6L, 0xcf0111c3L, 0xa1e8aac7L,
+ 0x1a908749L, 0xd44fbd9aL, 0xd0dadecbL, 0xd50ada38L,
+ 0x0339c32aL, 0xc6913667L, 0x8df9317cL, 0xe0b12b4fL,
+ 0xf79e59b7L, 0x43f5bb3aL, 0xf2d519ffL, 0x27d9459cL,
+ 0xbf97222cL, 0x15e6fc2aL, 0x0f91fc71L, 0x9b941525L,
+ 0xfae59361L, 0xceb69cebL, 0xc2a86459L, 0x12baa8d1L,
+ 0xb6c1075eL, 0xe3056a0cL, 0x10d25065L, 0xcb03a442L,
+ 0xe0ec6e0eL, 0x1698db3bL, 0x4c98a0beL, 0x3278e964L,
+ 0x9f1f9532L, 0xe0d392dfL, 0xd3a0342bL, 0x8971f21eL,
+ 0x1b0a7441L, 0x4ba3348cL, 0xc5be7120L, 0xc37632d8L,
+ 0xdf359f8dL, 0x9b992f2eL, 0xe60b6f47L, 0x0fe3f11dL,
+ 0xe54cda54L, 0x1edad891L, 0xce6279cfL, 0xcd3e7e6fL,
+ 0x1618b166L, 0xfd2c1d05L, 0x848fd2c5L, 0xf6fb2299L,
+ 0xf523f357L, 0xa6327623L, 0x93a83531L, 0x56cccd02L,
+ 0xacf08162L, 0x5a75ebb5L, 0x6e163697L, 0x88d273ccL,
+ 0xde966292L, 0x81b949d0L, 0x4c50901bL, 0x71c65614L,
+ 0xe6c6c7bdL, 0x327a140aL, 0x45e1d006L, 0xc3f27b9aL,
+ 0xc9aa53fdL, 0x62a80f00L, 0xbb25bfe2L, 0x35bdd2f6L,
+ 0x71126905L, 0xb2040222L, 0xb6cbcf7cL, 0xcd769c2bL,
+ 0x53113ec0L, 0x1640e3d3L, 0x38abbd60L, 0x2547adf0L,
+ 0xba38209cL, 0xf746ce76L, 0x77afa1c5L, 0x20756060L,
+ 0x85cbfe4eL, 0x8ae88dd8L, 0x7aaaf9b0L, 0x4cf9aa7eL,
+ 0x1948c25cL, 0x02fb8a8cL, 0x01c36ae4L, 0xd6ebe1f9L,
+ 0x90d4f869L, 0xa65cdea0L, 0x3f09252dL, 0xc208e69fL,
+ 0xb74e6132L, 0xce77e25bL, 0x578fdfe3L, 0x3ac372e6L,
+ }
+ };
+
diff --git a/rtems/freebsd/crypto/blowfish/bf_skey.c b/rtems/freebsd/crypto/blowfish/bf_skey.c
new file mode 100644
index 00000000..a7c7c304
--- /dev/null
+++ b/rtems/freebsd/crypto/blowfish/bf_skey.c
@@ -0,0 +1,125 @@
+#include <rtems/freebsd/machine/rtems-bsd-config.h>
+
+/* $KAME: bf_skey.c,v 1.7 2002/02/27 01:33:59 itojun Exp $ */
+
+/* crypto/bf/bf_skey.c */
+
+/* Copyright (C) 1995-1997 Eric Young (eay@mincom.oz.au)
+ * All rights reserved.
+ *
+ * This package is an SSL implementation written
+ * by Eric Young (eay@mincom.oz.au).
+ * The implementation was written so as to conform with Netscapes SSL.
+ *
+ * This library is free for commercial and non-commercial use as long as
+ * the following conditions are aheared to. The following conditions
+ * apply to all code found in this distribution, be it the RC4, RSA,
+ * lhash, DES, etc., code; not just the SSL code. The SSL documentation
+ * included with this distribution is covered by the same copyright terms
+ * except that the holder is Tim Hudson (tjh@mincom.oz.au).
+ *
+ * Copyright remains Eric Young's, and as such any Copyright notices in
+ * the code are not to be removed.
+ * If this package is used in a product, Eric Young should be given attribution
+ * as the author of the parts of the library used.
+ * This can be in the form of a textual message at program startup or
+ * in documentation (online or textual) provided with the package.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * "This product includes cryptographic software written by
+ * Eric Young (eay@mincom.oz.au)"
+ * The word 'cryptographic' can be left out if the rouines from the library
+ * being used are not cryptographic related :-).
+ * 4. If you include any Windows specific code (or a derivative thereof) from
+ * the apps directory (application code) you must include an acknowledgement:
+ * "This product includes software written by Tim Hudson (tjh@mincom.oz.au)"
+ *
+ * THIS SOFTWARE IS PROVIDED BY ERIC YOUNG ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * The licence and distribution terms for any publically available version or
+ * derivative of this code cannot be changed. i.e. this code cannot simply be
+ * copied and put under another distribution licence
+ * [including the GNU Public Licence.]
+ */
+
+#include <rtems/freebsd/sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <rtems/freebsd/sys/types.h>
+#include <rtems/freebsd/sys/time.h>
+#include <rtems/freebsd/sys/systm.h>
+#include <rtems/freebsd/crypto/blowfish/blowfish.h>
+#include <rtems/freebsd/crypto/blowfish/bf_locl.h>
+#include <rtems/freebsd/crypto/blowfish/bf_pi.h>
+
+void
+BF_set_key(key, len, data)
+ BF_KEY *key;
+ int len;
+ unsigned char *data;
+{
+ int i;
+ BF_LONG *p, ri, in[2];
+ unsigned char *d, *end;
+
+ memcpy((char *)key, (const char *)&bf_init, sizeof(BF_KEY));
+ p = key->P;
+
+ if (len > ((BF_ROUNDS + 2) * 4))
+ len = (BF_ROUNDS + 2) * 4;
+
+ d = data;
+ end= &(data[len]);
+ for (i = 0; i < BF_ROUNDS + 2; i++) {
+ ri = *(d++);
+ if (d >= end) d = data;
+
+ ri <<= 8;
+ ri |= *(d++);
+ if (d >= end) d = data;
+
+ ri <<= 8;
+ ri |= *(d++);
+ if (d >= end) d = data;
+
+ ri <<= 8;
+ ri |= *(d++);
+ if (d >= end) d = data;
+
+ p[i] ^= ri;
+ }
+
+ in[0] = 0L;
+ in[1] = 0L;
+ for (i = 0; i < BF_ROUNDS + 2; i += 2) {
+ BF_encrypt(in, key);
+ p[i ] = in[0];
+ p[i+1] = in[1];
+ }
+
+ p = key->S;
+ for (i = 0; i < 4 * 256; i += 2) {
+ BF_encrypt(in, key);
+ p[i ] = in[0];
+ p[i+1] = in[1];
+ }
+}
diff --git a/rtems/freebsd/crypto/blowfish/blowfish.h b/rtems/freebsd/crypto/blowfish/blowfish.h
new file mode 100644
index 00000000..ecc14075
--- /dev/null
+++ b/rtems/freebsd/crypto/blowfish/blowfish.h
@@ -0,0 +1,93 @@
+/* $FreeBSD$ */
+/* $KAME: blowfish.h,v 1.12 2002/02/27 01:33:59 itojun Exp $ */
+
+/* crypto/bf/blowfish.h */
+/* Copyright (C) 1995-1997 Eric Young (eay@mincom.oz.au)
+ * All rights reserved.
+ *
+ * This package is an SSL implementation written
+ * by Eric Young (eay@mincom.oz.au).
+ * The implementation was written so as to conform with Netscapes SSL.
+ *
+ * This library is free for commercial and non-commercial use as long as
+ * the following conditions are aheared to. The following conditions
+ * apply to all code found in this distribution, be it the RC4, RSA,
+ * lhash, DES, etc., code; not just the SSL code. The SSL documentation
+ * included with this distribution is covered by the same copyright terms
+ * except that the holder is Tim Hudson (tjh@mincom.oz.au).
+ *
+ * Copyright remains Eric Young's, and as such any Copyright notices in
+ * the code are not to be removed.
+ * If this package is used in a product, Eric Young should be given attribution
+ * as the author of the parts of the library used.
+ * This can be in the form of a textual message at program startup or
+ * in documentation (online or textual) provided with the package.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * "This product includes cryptographic software written by
+ * Eric Young (eay@mincom.oz.au)"
+ * The word 'cryptographic' can be left out if the rouines from the library
+ * being used are not cryptographic related :-).
+ * 4. If you include any Windows specific code (or a derivative thereof) from
+ * the apps directory (application code) you must include an acknowledgement:
+ * "This product includes software written by Tim Hudson (tjh@mincom.oz.au)"
+ *
+ * THIS SOFTWARE IS PROVIDED BY ERIC YOUNG ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * The licence and distribution terms for any publically available version or
+ * derivative of this code cannot be changed. i.e. this code cannot simply be
+ * copied and put under another distribution licence
+ * [including the GNU Public Licence.]
+ */
+
+#ifndef HEADER_BLOWFISH_H
+#define HEADER_BLOWFISH_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#define BF_ENCRYPT 1
+#define BF_DECRYPT 0
+
+/* must be 32bit quantity */
+#define BF_LONG u_int32_t
+
+#define BF_ROUNDS 16
+#define BF_BLOCK 8
+
+typedef struct bf_key_st {
+ BF_LONG P[BF_ROUNDS+2];
+ BF_LONG S[4*256];
+} BF_KEY;
+
+void BF_set_key(BF_KEY *, int, unsigned char *);
+void BF_encrypt(BF_LONG *, BF_KEY *);
+void BF_decrypt(BF_LONG *, BF_KEY *);
+void BF_ecb_encrypt(const unsigned char *, unsigned char *,
+ BF_KEY *, int);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/rtems/freebsd/crypto/camellia/camellia-api.c b/rtems/freebsd/crypto/camellia/camellia-api.c
new file mode 100644
index 00000000..db3b5e0b
--- /dev/null
+++ b/rtems/freebsd/crypto/camellia/camellia-api.c
@@ -0,0 +1,60 @@
+#include <rtems/freebsd/machine/rtems-bsd-config.h>
+
+/*
+ *
+ * Copyright (c) 2006
+ * NTT (Nippon Telegraph and Telephone Corporation) . All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer as
+ * the first lines of this file unmodified.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY NTT ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL NTT BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#include <rtems/freebsd/sys/cdefs.h>
+
+#include <rtems/freebsd/sys/types.h>
+#ifdef _KERNEL
+#include <rtems/freebsd/sys/systm.h>
+#endif
+#include <rtems/freebsd/crypto/camellia/camellia.h>
+
+void
+camellia_set_key(camellia_ctx *ctx, const u_char *key, int bits)
+{
+
+ Camellia_Ekeygen(bits, key, ctx->subkey);
+ ctx->bits = bits;
+}
+
+void
+camellia_decrypt(const camellia_ctx *ctx, const u_char *src, u_char *dst)
+{
+
+ Camellia_DecryptBlock(ctx->bits, src, ctx->subkey, dst);
+}
+
+void
+camellia_encrypt(const camellia_ctx *ctx, const u_char *src, u_char *dst)
+{
+
+ Camellia_EncryptBlock(ctx->bits, src, ctx->subkey, dst);
+}
diff --git a/rtems/freebsd/crypto/camellia/camellia.c b/rtems/freebsd/crypto/camellia/camellia.c
new file mode 100644
index 00000000..d42840b5
--- /dev/null
+++ b/rtems/freebsd/crypto/camellia/camellia.c
@@ -0,0 +1,1334 @@
+#include <rtems/freebsd/machine/rtems-bsd-config.h>
+
+/* camellia.h ver 1.1.0
+ *
+ * Copyright (c) 2006
+ * NTT (Nippon Telegraph and Telephone Corporation) . All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer as
+ * the first lines of this file unmodified.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY NTT ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL NTT BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+/*
+ * Algorithm Specification
+ * http://info.isl.ntt.co.jp/crypt/eng/camellia/specifications.html
+ */
+
+#include <rtems/freebsd/sys/cdefs.h>
+#include <rtems/freebsd/sys/types.h>
+#include <rtems/freebsd/sys/endian.h>
+#ifdef _KERNEL
+#include <rtems/freebsd/sys/systm.h>
+#else
+#include <rtems/freebsd/string.h>
+#include <rtems/freebsd/assert.h>
+#define KASSERT(exp, msg) assert(exp)
+#endif
+
+#include <rtems/freebsd/crypto/camellia/camellia.h>
+
+
+/* key constants */
+
+#define CAMELLIA_SIGMA1L (0xA09E667FL)
+#define CAMELLIA_SIGMA1R (0x3BCC908BL)
+#define CAMELLIA_SIGMA2L (0xB67AE858L)
+#define CAMELLIA_SIGMA2R (0x4CAA73B2L)
+#define CAMELLIA_SIGMA3L (0xC6EF372FL)
+#define CAMELLIA_SIGMA3R (0xE94F82BEL)
+#define CAMELLIA_SIGMA4L (0x54FF53A5L)
+#define CAMELLIA_SIGMA4R (0xF1D36F1CL)
+#define CAMELLIA_SIGMA5L (0x10E527FAL)
+#define CAMELLIA_SIGMA5R (0xDE682D1DL)
+#define CAMELLIA_SIGMA6L (0xB05688C2L)
+#define CAMELLIA_SIGMA6R (0xB3E6C1FDL)
+
+/*
+ * macros
+ */
+#define GETU32(pt) (((uint32_t)(pt)[0] << 24) \
+ ^ ((uint32_t)(pt)[1] << 16) \
+ ^ ((uint32_t)(pt)[2] << 8) \
+ ^ ((uint32_t)(pt)[3]))
+
+#define PUTU32(ct, st) {(ct)[0] = (uint8_t)((st) >> 24); \
+ (ct)[1] = (uint8_t)((st) >> 16); \
+ (ct)[2] = (uint8_t)((st) >> 8); \
+ (ct)[3] = (uint8_t)(st);}
+
+#define SUBL(INDEX) (subkey[(INDEX)*2+1])
+#define SUBR(INDEX) (subkey[(INDEX)*2])
+
+#define CAMELLIA_RR8(x) (((x) >> 8) + ((x) << 24))
+#define CAMELLIA_RL1(x) (((x) << 1) + ((x) >> 31))
+#define CAMELLIA_RL8(x) (((x) << 8) + ((x) >> 24))
+
+#define CAMELLIA_ROLDQ(ll, lr, rl, rr, w0, w1, bits) \
+ do { \
+ w0 = ll; \
+ ll = (ll << bits) + (lr >> (32 - bits)); \
+ lr = (lr << bits) + (rl >> (32 - bits)); \
+ rl = (rl << bits) + (rr >> (32 - bits)); \
+ rr = (rr << bits) + (w0 >> (32 - bits)); \
+ } while(0)
+
+#define CAMELLIA_ROLDQo32(ll, lr, rl, rr, w0, w1, bits) \
+ do { \
+ w0 = ll; \
+ w1 = lr; \
+ ll = (lr << (bits - 32)) + (rl >> (64 - bits)); \
+ lr = (rl << (bits - 32)) + (rr >> (64 - bits)); \
+ rl = (rr << (bits - 32)) + (w0 >> (64 - bits)); \
+ rr = (w0 << (bits - 32)) + (w1 >> (64 - bits)); \
+ } while(0)
+
+#define CAMELLIA_SP1110(INDEX) (camellia_sp1110[(INDEX)])
+#define CAMELLIA_SP0222(INDEX) (camellia_sp0222[(INDEX)])
+#define CAMELLIA_SP3033(INDEX) (camellia_sp3033[(INDEX)])
+#define CAMELLIA_SP4404(INDEX) (camellia_sp4404[(INDEX)])
+
+#define CAMELLIA_F(xl, xr, kl, kr, yl, yr, il, ir, t0, t1) \
+ do { \
+ il = xl ^ kl; \
+ ir = xr ^ kr; \
+ t0 = il >> 16; \
+ t1 = ir >> 16; \
+ yl = CAMELLIA_SP1110(ir & 0xff) \
+ ^ CAMELLIA_SP0222((t1 >> 8) & 0xff) \
+ ^ CAMELLIA_SP3033(t1 & 0xff) \
+ ^ CAMELLIA_SP4404((ir >> 8) & 0xff); \
+ yr = CAMELLIA_SP1110((t0 >> 8) & 0xff) \
+ ^ CAMELLIA_SP0222(t0 & 0xff) \
+ ^ CAMELLIA_SP3033((il >> 8) & 0xff) \
+ ^ CAMELLIA_SP4404(il & 0xff); \
+ yl ^= yr; \
+ yr = CAMELLIA_RR8(yr); \
+ yr ^= yl; \
+ } while(0)
+
+
+#define CAMELLIA_FLS(ll, lr, rl, rr, kll, klr, krl, krr, t0, t1, t2, t3) \
+ do { \
+ t0 = kll; \
+ t2 = krr; \
+ t0 &= ll; \
+ t2 |= rr; \
+ rl ^= t2; \
+ lr ^= CAMELLIA_RL1(t0); \
+ t3 = krl; \
+ t1 = klr; \
+ t3 &= rl; \
+ t1 |= lr; \
+ ll ^= t1; \
+ rr ^= CAMELLIA_RL1(t3); \
+ } while(0)
+
+#define CAMELLIA_ROUNDSM(xl, xr, kl, kr, yl, yr, il, ir, t0, t1) \
+ do { \
+ ir = CAMELLIA_SP1110(xr & 0xff); \
+ il = CAMELLIA_SP1110((xl>>24) & 0xff); \
+ ir ^= CAMELLIA_SP0222((xr>>24) & 0xff); \
+ il ^= CAMELLIA_SP0222((xl>>16) & 0xff); \
+ ir ^= CAMELLIA_SP3033((xr>>16) & 0xff); \
+ il ^= CAMELLIA_SP3033((xl>>8) & 0xff); \
+ ir ^= CAMELLIA_SP4404((xr>>8) & 0xff); \
+ il ^= CAMELLIA_SP4404(xl & 0xff); \
+ il ^= kl; \
+ ir ^= kr; \
+ ir ^= il; \
+ il = CAMELLIA_RR8(il); \
+ il ^= ir; \
+ yl ^= ir; \
+ yr ^= il; \
+ } while(0)
+
+
+static const uint32_t camellia_sp1110[256] = {
+ 0x70707000,0x82828200,0x2c2c2c00,0xececec00,
+ 0xb3b3b300,0x27272700,0xc0c0c000,0xe5e5e500,
+ 0xe4e4e400,0x85858500,0x57575700,0x35353500,
+ 0xeaeaea00,0x0c0c0c00,0xaeaeae00,0x41414100,
+ 0x23232300,0xefefef00,0x6b6b6b00,0x93939300,
+ 0x45454500,0x19191900,0xa5a5a500,0x21212100,
+ 0xededed00,0x0e0e0e00,0x4f4f4f00,0x4e4e4e00,
+ 0x1d1d1d00,0x65656500,0x92929200,0xbdbdbd00,
+ 0x86868600,0xb8b8b800,0xafafaf00,0x8f8f8f00,
+ 0x7c7c7c00,0xebebeb00,0x1f1f1f00,0xcecece00,
+ 0x3e3e3e00,0x30303000,0xdcdcdc00,0x5f5f5f00,
+ 0x5e5e5e00,0xc5c5c500,0x0b0b0b00,0x1a1a1a00,
+ 0xa6a6a600,0xe1e1e100,0x39393900,0xcacaca00,
+ 0xd5d5d500,0x47474700,0x5d5d5d00,0x3d3d3d00,
+ 0xd9d9d900,0x01010100,0x5a5a5a00,0xd6d6d600,
+ 0x51515100,0x56565600,0x6c6c6c00,0x4d4d4d00,
+ 0x8b8b8b00,0x0d0d0d00,0x9a9a9a00,0x66666600,
+ 0xfbfbfb00,0xcccccc00,0xb0b0b000,0x2d2d2d00,
+ 0x74747400,0x12121200,0x2b2b2b00,0x20202000,
+ 0xf0f0f000,0xb1b1b100,0x84848400,0x99999900,
+ 0xdfdfdf00,0x4c4c4c00,0xcbcbcb00,0xc2c2c200,
+ 0x34343400,0x7e7e7e00,0x76767600,0x05050500,
+ 0x6d6d6d00,0xb7b7b700,0xa9a9a900,0x31313100,
+ 0xd1d1d100,0x17171700,0x04040400,0xd7d7d700,
+ 0x14141400,0x58585800,0x3a3a3a00,0x61616100,
+ 0xdedede00,0x1b1b1b00,0x11111100,0x1c1c1c00,
+ 0x32323200,0x0f0f0f00,0x9c9c9c00,0x16161600,
+ 0x53535300,0x18181800,0xf2f2f200,0x22222200,
+ 0xfefefe00,0x44444400,0xcfcfcf00,0xb2b2b200,
+ 0xc3c3c300,0xb5b5b500,0x7a7a7a00,0x91919100,
+ 0x24242400,0x08080800,0xe8e8e800,0xa8a8a800,
+ 0x60606000,0xfcfcfc00,0x69696900,0x50505000,
+ 0xaaaaaa00,0xd0d0d000,0xa0a0a000,0x7d7d7d00,
+ 0xa1a1a100,0x89898900,0x62626200,0x97979700,
+ 0x54545400,0x5b5b5b00,0x1e1e1e00,0x95959500,
+ 0xe0e0e000,0xffffff00,0x64646400,0xd2d2d200,
+ 0x10101000,0xc4c4c400,0x00000000,0x48484800,
+ 0xa3a3a300,0xf7f7f700,0x75757500,0xdbdbdb00,
+ 0x8a8a8a00,0x03030300,0xe6e6e600,0xdadada00,
+ 0x09090900,0x3f3f3f00,0xdddddd00,0x94949400,
+ 0x87878700,0x5c5c5c00,0x83838300,0x02020200,
+ 0xcdcdcd00,0x4a4a4a00,0x90909000,0x33333300,
+ 0x73737300,0x67676700,0xf6f6f600,0xf3f3f300,
+ 0x9d9d9d00,0x7f7f7f00,0xbfbfbf00,0xe2e2e200,
+ 0x52525200,0x9b9b9b00,0xd8d8d800,0x26262600,
+ 0xc8c8c800,0x37373700,0xc6c6c600,0x3b3b3b00,
+ 0x81818100,0x96969600,0x6f6f6f00,0x4b4b4b00,
+ 0x13131300,0xbebebe00,0x63636300,0x2e2e2e00,
+ 0xe9e9e900,0x79797900,0xa7a7a700,0x8c8c8c00,
+ 0x9f9f9f00,0x6e6e6e00,0xbcbcbc00,0x8e8e8e00,
+ 0x29292900,0xf5f5f500,0xf9f9f900,0xb6b6b600,
+ 0x2f2f2f00,0xfdfdfd00,0xb4b4b400,0x59595900,
+ 0x78787800,0x98989800,0x06060600,0x6a6a6a00,
+ 0xe7e7e700,0x46464600,0x71717100,0xbababa00,
+ 0xd4d4d400,0x25252500,0xababab00,0x42424200,
+ 0x88888800,0xa2a2a200,0x8d8d8d00,0xfafafa00,
+ 0x72727200,0x07070700,0xb9b9b900,0x55555500,
+ 0xf8f8f800,0xeeeeee00,0xacacac00,0x0a0a0a00,
+ 0x36363600,0x49494900,0x2a2a2a00,0x68686800,
+ 0x3c3c3c00,0x38383800,0xf1f1f100,0xa4a4a400,
+ 0x40404000,0x28282800,0xd3d3d300,0x7b7b7b00,
+ 0xbbbbbb00,0xc9c9c900,0x43434300,0xc1c1c100,
+ 0x15151500,0xe3e3e300,0xadadad00,0xf4f4f400,
+ 0x77777700,0xc7c7c700,0x80808000,0x9e9e9e00,
+};
+
+static const uint32_t camellia_sp0222[256] = {
+ 0x00e0e0e0,0x00050505,0x00585858,0x00d9d9d9,
+ 0x00676767,0x004e4e4e,0x00818181,0x00cbcbcb,
+ 0x00c9c9c9,0x000b0b0b,0x00aeaeae,0x006a6a6a,
+ 0x00d5d5d5,0x00181818,0x005d5d5d,0x00828282,
+ 0x00464646,0x00dfdfdf,0x00d6d6d6,0x00272727,
+ 0x008a8a8a,0x00323232,0x004b4b4b,0x00424242,
+ 0x00dbdbdb,0x001c1c1c,0x009e9e9e,0x009c9c9c,
+ 0x003a3a3a,0x00cacaca,0x00252525,0x007b7b7b,
+ 0x000d0d0d,0x00717171,0x005f5f5f,0x001f1f1f,
+ 0x00f8f8f8,0x00d7d7d7,0x003e3e3e,0x009d9d9d,
+ 0x007c7c7c,0x00606060,0x00b9b9b9,0x00bebebe,
+ 0x00bcbcbc,0x008b8b8b,0x00161616,0x00343434,
+ 0x004d4d4d,0x00c3c3c3,0x00727272,0x00959595,
+ 0x00ababab,0x008e8e8e,0x00bababa,0x007a7a7a,
+ 0x00b3b3b3,0x00020202,0x00b4b4b4,0x00adadad,
+ 0x00a2a2a2,0x00acacac,0x00d8d8d8,0x009a9a9a,
+ 0x00171717,0x001a1a1a,0x00353535,0x00cccccc,
+ 0x00f7f7f7,0x00999999,0x00616161,0x005a5a5a,
+ 0x00e8e8e8,0x00242424,0x00565656,0x00404040,
+ 0x00e1e1e1,0x00636363,0x00090909,0x00333333,
+ 0x00bfbfbf,0x00989898,0x00979797,0x00858585,
+ 0x00686868,0x00fcfcfc,0x00ececec,0x000a0a0a,
+ 0x00dadada,0x006f6f6f,0x00535353,0x00626262,
+ 0x00a3a3a3,0x002e2e2e,0x00080808,0x00afafaf,
+ 0x00282828,0x00b0b0b0,0x00747474,0x00c2c2c2,
+ 0x00bdbdbd,0x00363636,0x00222222,0x00383838,
+ 0x00646464,0x001e1e1e,0x00393939,0x002c2c2c,
+ 0x00a6a6a6,0x00303030,0x00e5e5e5,0x00444444,
+ 0x00fdfdfd,0x00888888,0x009f9f9f,0x00656565,
+ 0x00878787,0x006b6b6b,0x00f4f4f4,0x00232323,
+ 0x00484848,0x00101010,0x00d1d1d1,0x00515151,
+ 0x00c0c0c0,0x00f9f9f9,0x00d2d2d2,0x00a0a0a0,
+ 0x00555555,0x00a1a1a1,0x00414141,0x00fafafa,
+ 0x00434343,0x00131313,0x00c4c4c4,0x002f2f2f,
+ 0x00a8a8a8,0x00b6b6b6,0x003c3c3c,0x002b2b2b,
+ 0x00c1c1c1,0x00ffffff,0x00c8c8c8,0x00a5a5a5,
+ 0x00202020,0x00898989,0x00000000,0x00909090,
+ 0x00474747,0x00efefef,0x00eaeaea,0x00b7b7b7,
+ 0x00151515,0x00060606,0x00cdcdcd,0x00b5b5b5,
+ 0x00121212,0x007e7e7e,0x00bbbbbb,0x00292929,
+ 0x000f0f0f,0x00b8b8b8,0x00070707,0x00040404,
+ 0x009b9b9b,0x00949494,0x00212121,0x00666666,
+ 0x00e6e6e6,0x00cecece,0x00ededed,0x00e7e7e7,
+ 0x003b3b3b,0x00fefefe,0x007f7f7f,0x00c5c5c5,
+ 0x00a4a4a4,0x00373737,0x00b1b1b1,0x004c4c4c,
+ 0x00919191,0x006e6e6e,0x008d8d8d,0x00767676,
+ 0x00030303,0x002d2d2d,0x00dedede,0x00969696,
+ 0x00262626,0x007d7d7d,0x00c6c6c6,0x005c5c5c,
+ 0x00d3d3d3,0x00f2f2f2,0x004f4f4f,0x00191919,
+ 0x003f3f3f,0x00dcdcdc,0x00797979,0x001d1d1d,
+ 0x00525252,0x00ebebeb,0x00f3f3f3,0x006d6d6d,
+ 0x005e5e5e,0x00fbfbfb,0x00696969,0x00b2b2b2,
+ 0x00f0f0f0,0x00313131,0x000c0c0c,0x00d4d4d4,
+ 0x00cfcfcf,0x008c8c8c,0x00e2e2e2,0x00757575,
+ 0x00a9a9a9,0x004a4a4a,0x00575757,0x00848484,
+ 0x00111111,0x00454545,0x001b1b1b,0x00f5f5f5,
+ 0x00e4e4e4,0x000e0e0e,0x00737373,0x00aaaaaa,
+ 0x00f1f1f1,0x00dddddd,0x00595959,0x00141414,
+ 0x006c6c6c,0x00929292,0x00545454,0x00d0d0d0,
+ 0x00787878,0x00707070,0x00e3e3e3,0x00494949,
+ 0x00808080,0x00505050,0x00a7a7a7,0x00f6f6f6,
+ 0x00777777,0x00939393,0x00868686,0x00838383,
+ 0x002a2a2a,0x00c7c7c7,0x005b5b5b,0x00e9e9e9,
+ 0x00eeeeee,0x008f8f8f,0x00010101,0x003d3d3d,
+};
+
+static const uint32_t camellia_sp3033[256] = {
+ 0x38003838,0x41004141,0x16001616,0x76007676,
+ 0xd900d9d9,0x93009393,0x60006060,0xf200f2f2,
+ 0x72007272,0xc200c2c2,0xab00abab,0x9a009a9a,
+ 0x75007575,0x06000606,0x57005757,0xa000a0a0,
+ 0x91009191,0xf700f7f7,0xb500b5b5,0xc900c9c9,
+ 0xa200a2a2,0x8c008c8c,0xd200d2d2,0x90009090,
+ 0xf600f6f6,0x07000707,0xa700a7a7,0x27002727,
+ 0x8e008e8e,0xb200b2b2,0x49004949,0xde00dede,
+ 0x43004343,0x5c005c5c,0xd700d7d7,0xc700c7c7,
+ 0x3e003e3e,0xf500f5f5,0x8f008f8f,0x67006767,
+ 0x1f001f1f,0x18001818,0x6e006e6e,0xaf00afaf,
+ 0x2f002f2f,0xe200e2e2,0x85008585,0x0d000d0d,
+ 0x53005353,0xf000f0f0,0x9c009c9c,0x65006565,
+ 0xea00eaea,0xa300a3a3,0xae00aeae,0x9e009e9e,
+ 0xec00ecec,0x80008080,0x2d002d2d,0x6b006b6b,
+ 0xa800a8a8,0x2b002b2b,0x36003636,0xa600a6a6,
+ 0xc500c5c5,0x86008686,0x4d004d4d,0x33003333,
+ 0xfd00fdfd,0x66006666,0x58005858,0x96009696,
+ 0x3a003a3a,0x09000909,0x95009595,0x10001010,
+ 0x78007878,0xd800d8d8,0x42004242,0xcc00cccc,
+ 0xef00efef,0x26002626,0xe500e5e5,0x61006161,
+ 0x1a001a1a,0x3f003f3f,0x3b003b3b,0x82008282,
+ 0xb600b6b6,0xdb00dbdb,0xd400d4d4,0x98009898,
+ 0xe800e8e8,0x8b008b8b,0x02000202,0xeb00ebeb,
+ 0x0a000a0a,0x2c002c2c,0x1d001d1d,0xb000b0b0,
+ 0x6f006f6f,0x8d008d8d,0x88008888,0x0e000e0e,
+ 0x19001919,0x87008787,0x4e004e4e,0x0b000b0b,
+ 0xa900a9a9,0x0c000c0c,0x79007979,0x11001111,
+ 0x7f007f7f,0x22002222,0xe700e7e7,0x59005959,
+ 0xe100e1e1,0xda00dada,0x3d003d3d,0xc800c8c8,
+ 0x12001212,0x04000404,0x74007474,0x54005454,
+ 0x30003030,0x7e007e7e,0xb400b4b4,0x28002828,
+ 0x55005555,0x68006868,0x50005050,0xbe00bebe,
+ 0xd000d0d0,0xc400c4c4,0x31003131,0xcb00cbcb,
+ 0x2a002a2a,0xad00adad,0x0f000f0f,0xca00caca,
+ 0x70007070,0xff00ffff,0x32003232,0x69006969,
+ 0x08000808,0x62006262,0x00000000,0x24002424,
+ 0xd100d1d1,0xfb00fbfb,0xba00baba,0xed00eded,
+ 0x45004545,0x81008181,0x73007373,0x6d006d6d,
+ 0x84008484,0x9f009f9f,0xee00eeee,0x4a004a4a,
+ 0xc300c3c3,0x2e002e2e,0xc100c1c1,0x01000101,
+ 0xe600e6e6,0x25002525,0x48004848,0x99009999,
+ 0xb900b9b9,0xb300b3b3,0x7b007b7b,0xf900f9f9,
+ 0xce00cece,0xbf00bfbf,0xdf00dfdf,0x71007171,
+ 0x29002929,0xcd00cdcd,0x6c006c6c,0x13001313,
+ 0x64006464,0x9b009b9b,0x63006363,0x9d009d9d,
+ 0xc000c0c0,0x4b004b4b,0xb700b7b7,0xa500a5a5,
+ 0x89008989,0x5f005f5f,0xb100b1b1,0x17001717,
+ 0xf400f4f4,0xbc00bcbc,0xd300d3d3,0x46004646,
+ 0xcf00cfcf,0x37003737,0x5e005e5e,0x47004747,
+ 0x94009494,0xfa00fafa,0xfc00fcfc,0x5b005b5b,
+ 0x97009797,0xfe00fefe,0x5a005a5a,0xac00acac,
+ 0x3c003c3c,0x4c004c4c,0x03000303,0x35003535,
+ 0xf300f3f3,0x23002323,0xb800b8b8,0x5d005d5d,
+ 0x6a006a6a,0x92009292,0xd500d5d5,0x21002121,
+ 0x44004444,0x51005151,0xc600c6c6,0x7d007d7d,
+ 0x39003939,0x83008383,0xdc00dcdc,0xaa00aaaa,
+ 0x7c007c7c,0x77007777,0x56005656,0x05000505,
+ 0x1b001b1b,0xa400a4a4,0x15001515,0x34003434,
+ 0x1e001e1e,0x1c001c1c,0xf800f8f8,0x52005252,
+ 0x20002020,0x14001414,0xe900e9e9,0xbd00bdbd,
+ 0xdd00dddd,0xe400e4e4,0xa100a1a1,0xe000e0e0,
+ 0x8a008a8a,0xf100f1f1,0xd600d6d6,0x7a007a7a,
+ 0xbb00bbbb,0xe300e3e3,0x40004040,0x4f004f4f,
+};
+
+static const uint32_t camellia_sp4404[256] = {
+ 0x70700070,0x2c2c002c,0xb3b300b3,0xc0c000c0,
+ 0xe4e400e4,0x57570057,0xeaea00ea,0xaeae00ae,
+ 0x23230023,0x6b6b006b,0x45450045,0xa5a500a5,
+ 0xeded00ed,0x4f4f004f,0x1d1d001d,0x92920092,
+ 0x86860086,0xafaf00af,0x7c7c007c,0x1f1f001f,
+ 0x3e3e003e,0xdcdc00dc,0x5e5e005e,0x0b0b000b,
+ 0xa6a600a6,0x39390039,0xd5d500d5,0x5d5d005d,
+ 0xd9d900d9,0x5a5a005a,0x51510051,0x6c6c006c,
+ 0x8b8b008b,0x9a9a009a,0xfbfb00fb,0xb0b000b0,
+ 0x74740074,0x2b2b002b,0xf0f000f0,0x84840084,
+ 0xdfdf00df,0xcbcb00cb,0x34340034,0x76760076,
+ 0x6d6d006d,0xa9a900a9,0xd1d100d1,0x04040004,
+ 0x14140014,0x3a3a003a,0xdede00de,0x11110011,
+ 0x32320032,0x9c9c009c,0x53530053,0xf2f200f2,
+ 0xfefe00fe,0xcfcf00cf,0xc3c300c3,0x7a7a007a,
+ 0x24240024,0xe8e800e8,0x60600060,0x69690069,
+ 0xaaaa00aa,0xa0a000a0,0xa1a100a1,0x62620062,
+ 0x54540054,0x1e1e001e,0xe0e000e0,0x64640064,
+ 0x10100010,0x00000000,0xa3a300a3,0x75750075,
+ 0x8a8a008a,0xe6e600e6,0x09090009,0xdddd00dd,
+ 0x87870087,0x83830083,0xcdcd00cd,0x90900090,
+ 0x73730073,0xf6f600f6,0x9d9d009d,0xbfbf00bf,
+ 0x52520052,0xd8d800d8,0xc8c800c8,0xc6c600c6,
+ 0x81810081,0x6f6f006f,0x13130013,0x63630063,
+ 0xe9e900e9,0xa7a700a7,0x9f9f009f,0xbcbc00bc,
+ 0x29290029,0xf9f900f9,0x2f2f002f,0xb4b400b4,
+ 0x78780078,0x06060006,0xe7e700e7,0x71710071,
+ 0xd4d400d4,0xabab00ab,0x88880088,0x8d8d008d,
+ 0x72720072,0xb9b900b9,0xf8f800f8,0xacac00ac,
+ 0x36360036,0x2a2a002a,0x3c3c003c,0xf1f100f1,
+ 0x40400040,0xd3d300d3,0xbbbb00bb,0x43430043,
+ 0x15150015,0xadad00ad,0x77770077,0x80800080,
+ 0x82820082,0xecec00ec,0x27270027,0xe5e500e5,
+ 0x85850085,0x35350035,0x0c0c000c,0x41410041,
+ 0xefef00ef,0x93930093,0x19190019,0x21210021,
+ 0x0e0e000e,0x4e4e004e,0x65650065,0xbdbd00bd,
+ 0xb8b800b8,0x8f8f008f,0xebeb00eb,0xcece00ce,
+ 0x30300030,0x5f5f005f,0xc5c500c5,0x1a1a001a,
+ 0xe1e100e1,0xcaca00ca,0x47470047,0x3d3d003d,
+ 0x01010001,0xd6d600d6,0x56560056,0x4d4d004d,
+ 0x0d0d000d,0x66660066,0xcccc00cc,0x2d2d002d,
+ 0x12120012,0x20200020,0xb1b100b1,0x99990099,
+ 0x4c4c004c,0xc2c200c2,0x7e7e007e,0x05050005,
+ 0xb7b700b7,0x31310031,0x17170017,0xd7d700d7,
+ 0x58580058,0x61610061,0x1b1b001b,0x1c1c001c,
+ 0x0f0f000f,0x16160016,0x18180018,0x22220022,
+ 0x44440044,0xb2b200b2,0xb5b500b5,0x91910091,
+ 0x08080008,0xa8a800a8,0xfcfc00fc,0x50500050,
+ 0xd0d000d0,0x7d7d007d,0x89890089,0x97970097,
+ 0x5b5b005b,0x95950095,0xffff00ff,0xd2d200d2,
+ 0xc4c400c4,0x48480048,0xf7f700f7,0xdbdb00db,
+ 0x03030003,0xdada00da,0x3f3f003f,0x94940094,
+ 0x5c5c005c,0x02020002,0x4a4a004a,0x33330033,
+ 0x67670067,0xf3f300f3,0x7f7f007f,0xe2e200e2,
+ 0x9b9b009b,0x26260026,0x37370037,0x3b3b003b,
+ 0x96960096,0x4b4b004b,0xbebe00be,0x2e2e002e,
+ 0x79790079,0x8c8c008c,0x6e6e006e,0x8e8e008e,
+ 0xf5f500f5,0xb6b600b6,0xfdfd00fd,0x59590059,
+ 0x98980098,0x6a6a006a,0x46460046,0xbaba00ba,
+ 0x25250025,0x42420042,0xa2a200a2,0xfafa00fa,
+ 0x07070007,0x55550055,0xeeee00ee,0x0a0a000a,
+ 0x49490049,0x68680068,0x38380038,0xa4a400a4,
+ 0x28280028,0x7b7b007b,0xc9c900c9,0xc1c100c1,
+ 0xe3e300e3,0xf4f400f4,0xc7c700c7,0x9e9e009e,
+};
+
+
+/*
+ * Stuff related to the Camellia key schedule
+ */
+#define subl(x) subL[(x)]
+#define subr(x) subR[(x)]
+
+void
+camellia_setup128(const unsigned char *key, uint32_t *subkey)
+{
+ uint32_t kll, klr, krl, krr;
+ uint32_t il, ir, t0, t1, w0, w1;
+ uint32_t kw4l, kw4r, dw, tl, tr;
+ uint32_t subL[26];
+ uint32_t subR[26];
+
+ /*
+ * k == kll || klr || krl || krr (|| is concatination)
+ */
+ kll = GETU32(key );
+ klr = GETU32(key + 4);
+ krl = GETU32(key + 8);
+ krr = GETU32(key + 12);
+ /*
+ * generate KL dependent subkeys
+ */
+ subl(0) = kll; subr(0) = klr;
+ subl(1) = krl; subr(1) = krr;
+ CAMELLIA_ROLDQ(kll, klr, krl, krr, w0, w1, 15);
+ subl(4) = kll; subr(4) = klr;
+ subl(5) = krl; subr(5) = krr;
+ CAMELLIA_ROLDQ(kll, klr, krl, krr, w0, w1, 30);
+ subl(10) = kll; subr(10) = klr;
+ subl(11) = krl; subr(11) = krr;
+ CAMELLIA_ROLDQ(kll, klr, krl, krr, w0, w1, 15);
+ subl(13) = krl; subr(13) = krr;
+ CAMELLIA_ROLDQ(kll, klr, krl, krr, w0, w1, 17);
+ subl(16) = kll; subr(16) = klr;
+ subl(17) = krl; subr(17) = krr;
+ CAMELLIA_ROLDQ(kll, klr, krl, krr, w0, w1, 17);
+ subl(18) = kll; subr(18) = klr;
+ subl(19) = krl; subr(19) = krr;
+ CAMELLIA_ROLDQ(kll, klr, krl, krr, w0, w1, 17);
+ subl(22) = kll; subr(22) = klr;
+ subl(23) = krl; subr(23) = krr;
+
+ /* generate KA */
+ kll = subl(0); klr = subr(0);
+ krl = subl(1); krr = subr(1);
+ CAMELLIA_F(kll, klr, CAMELLIA_SIGMA1L, CAMELLIA_SIGMA1R,
+ w0, w1, il, ir, t0, t1);
+ krl ^= w0; krr ^= w1;
+ CAMELLIA_F(krl, krr, CAMELLIA_SIGMA2L, CAMELLIA_SIGMA2R,
+ kll, klr, il, ir, t0, t1);
+ CAMELLIA_F(kll, klr, CAMELLIA_SIGMA3L, CAMELLIA_SIGMA3R,
+ krl, krr, il, ir, t0, t1);
+ krl ^= w0; krr ^= w1;
+ CAMELLIA_F(krl, krr, CAMELLIA_SIGMA4L, CAMELLIA_SIGMA4R,
+ w0, w1, il, ir, t0, t1);
+ kll ^= w0; klr ^= w1;
+
+ /* generate KA dependent subkeys */
+ subl(2) = kll; subr(2) = klr;
+ subl(3) = krl; subr(3) = krr;
+ CAMELLIA_ROLDQ(kll, klr, krl, krr, w0, w1, 15);
+ subl(6) = kll; subr(6) = klr;
+ subl(7) = krl; subr(7) = krr;
+ CAMELLIA_ROLDQ(kll, klr, krl, krr, w0, w1, 15);
+ subl(8) = kll; subr(8) = klr;
+ subl(9) = krl; subr(9) = krr;
+ CAMELLIA_ROLDQ(kll, klr, krl, krr, w0, w1, 15);
+ subl(12) = kll; subr(12) = klr;
+ CAMELLIA_ROLDQ(kll, klr, krl, krr, w0, w1, 15);
+ subl(14) = kll; subr(14) = klr;
+ subl(15) = krl; subr(15) = krr;
+ CAMELLIA_ROLDQo32(kll, klr, krl, krr, w0, w1, 34);
+ subl(20) = kll; subr(20) = klr;
+ subl(21) = krl; subr(21) = krr;
+ CAMELLIA_ROLDQ(kll, klr, krl, krr, w0, w1, 17);
+ subl(24) = kll; subr(24) = klr;
+ subl(25) = krl; subr(25) = krr;
+
+
+ /* absorb kw2 to other subkeys */
+ subl(3) ^= subl(1); subr(3) ^= subr(1);
+ subl(5) ^= subl(1); subr(5) ^= subr(1);
+ subl(7) ^= subl(1); subr(7) ^= subr(1);
+ subl(1) ^= subr(1) & ~subr(9);
+ dw = subl(1) & subl(9), subr(1) ^= CAMELLIA_RL1(dw);
+ subl(11) ^= subl(1); subr(11) ^= subr(1);
+ subl(13) ^= subl(1); subr(13) ^= subr(1);
+ subl(15) ^= subl(1); subr(15) ^= subr(1);
+ subl(1) ^= subr(1) & ~subr(17);
+ dw = subl(1) & subl(17), subr(1) ^= CAMELLIA_RL1(dw);
+ subl(19) ^= subl(1); subr(19) ^= subr(1);
+ subl(21) ^= subl(1); subr(21) ^= subr(1);
+ subl(23) ^= subl(1); subr(23) ^= subr(1);
+ subl(24) ^= subl(1); subr(24) ^= subr(1);
+
+ /* absorb kw4 to other subkeys */
+ kw4l = subl(25); kw4r = subr(25);
+ subl(22) ^= kw4l; subr(22) ^= kw4r;
+ subl(20) ^= kw4l; subr(20) ^= kw4r;
+ subl(18) ^= kw4l; subr(18) ^= kw4r;
+ kw4l ^= kw4r & ~subr(16);
+ dw = kw4l & subl(16), kw4r ^= CAMELLIA_RL1(dw);
+ subl(14) ^= kw4l; subr(14) ^= kw4r;
+ subl(12) ^= kw4l; subr(12) ^= kw4r;
+ subl(10) ^= kw4l; subr(10) ^= kw4r;
+ kw4l ^= kw4r & ~subr(8);
+ dw = kw4l & subl(8), kw4r ^= CAMELLIA_RL1(dw);
+ subl(6) ^= kw4l; subr(6) ^= kw4r;
+ subl(4) ^= kw4l; subr(4) ^= kw4r;
+ subl(2) ^= kw4l; subr(2) ^= kw4r;
+ subl(0) ^= kw4l; subr(0) ^= kw4r;
+
+ /* key XOR is end of F-function */
+ SUBL(0) = subl(0) ^ subl(2);
+ SUBR(0) = subr(0) ^ subr(2);
+ SUBL(2) = subl(3);
+ SUBR(2) = subr(3);
+ SUBL(3) = subl(2) ^ subl(4);
+ SUBR(3) = subr(2) ^ subr(4);
+ SUBL(4) = subl(3) ^ subl(5);
+ SUBR(4) = subr(3) ^ subr(5);
+ SUBL(5) = subl(4) ^ subl(6);
+ SUBR(5) = subr(4) ^ subr(6);
+ SUBL(6) = subl(5) ^ subl(7);
+ SUBR(6) = subr(5) ^ subr(7);
+ tl = subl(10) ^ (subr(10) & ~subr(8));
+ dw = tl & subl(8), tr = subr(10) ^ CAMELLIA_RL1(dw);
+ SUBL(7) = subl(6) ^ tl;
+ SUBR(7) = subr(6) ^ tr;
+ SUBL(8) = subl(8);
+ SUBR(8) = subr(8);
+ SUBL(9) = subl(9);
+ SUBR(9) = subr(9);
+ tl = subl(7) ^ (subr(7) & ~subr(9));
+ dw = tl & subl(9), tr = subr(7) ^ CAMELLIA_RL1(dw);
+ SUBL(10) = tl ^ subl(11);
+ SUBR(10) = tr ^ subr(11);
+ SUBL(11) = subl(10) ^ subl(12);
+ SUBR(11) = subr(10) ^ subr(12);
+ SUBL(12) = subl(11) ^ subl(13);
+ SUBR(12) = subr(11) ^ subr(13);
+ SUBL(13) = subl(12) ^ subl(14);
+ SUBR(13) = subr(12) ^ subr(14);
+ SUBL(14) = subl(13) ^ subl(15);
+ SUBR(14) = subr(13) ^ subr(15);
+ tl = subl(18) ^ (subr(18) & ~subr(16));
+ dw = tl & subl(16), tr = subr(18) ^ CAMELLIA_RL1(dw);
+ SUBL(15) = subl(14) ^ tl;
+ SUBR(15) = subr(14) ^ tr;
+ SUBL(16) = subl(16);
+ SUBR(16) = subr(16);
+ SUBL(17) = subl(17);
+ SUBR(17) = subr(17);
+ tl = subl(15) ^ (subr(15) & ~subr(17));
+ dw = tl & subl(17), tr = subr(15) ^ CAMELLIA_RL1(dw);
+ SUBL(18) = tl ^ subl(19);
+ SUBR(18) = tr ^ subr(19);
+ SUBL(19) = subl(18) ^ subl(20);
+ SUBR(19) = subr(18) ^ subr(20);
+ SUBL(20) = subl(19) ^ subl(21);
+ SUBR(20) = subr(19) ^ subr(21);
+ SUBL(21) = subl(20) ^ subl(22);
+ SUBR(21) = subr(20) ^ subr(22);
+ SUBL(22) = subl(21) ^ subl(23);
+ SUBR(22) = subr(21) ^ subr(23);
+ SUBL(23) = subl(22);
+ SUBR(23) = subr(22);
+ SUBL(24) = subl(24) ^ subl(23);
+ SUBR(24) = subr(24) ^ subr(23);
+
+ /* apply the inverse of the last half of P-function */
+ dw = SUBL(2) ^ SUBR(2), dw = CAMELLIA_RL8(dw);
+ SUBR(2) = SUBL(2) ^ dw, SUBL(2) = dw;
+ dw = SUBL(3) ^ SUBR(3), dw = CAMELLIA_RL8(dw);
+ SUBR(3) = SUBL(3) ^ dw, SUBL(3) = dw;
+ dw = SUBL(4) ^ SUBR(4), dw = CAMELLIA_RL8(dw);
+ SUBR(4) = SUBL(4) ^ dw, SUBL(4) = dw;
+ dw = SUBL(5) ^ SUBR(5), dw = CAMELLIA_RL8(dw);
+ SUBR(5) = SUBL(5) ^ dw, SUBL(5) = dw;
+ dw = SUBL(6) ^ SUBR(6), dw = CAMELLIA_RL8(dw);
+ SUBR(6) = SUBL(6) ^ dw, SUBL(6) = dw;
+ dw = SUBL(7) ^ SUBR(7), dw = CAMELLIA_RL8(dw);
+ SUBR(7) = SUBL(7) ^ dw, SUBL(7) = dw;
+ dw = SUBL(10) ^ SUBR(10), dw = CAMELLIA_RL8(dw);
+ SUBR(10) = SUBL(10) ^ dw, SUBL(10) = dw;
+ dw = SUBL(11) ^ SUBR(11), dw = CAMELLIA_RL8(dw);
+ SUBR(11) = SUBL(11) ^ dw, SUBL(11) = dw;
+ dw = SUBL(12) ^ SUBR(12), dw = CAMELLIA_RL8(dw);
+ SUBR(12) = SUBL(12) ^ dw, SUBL(12) = dw;
+ dw = SUBL(13) ^ SUBR(13), dw = CAMELLIA_RL8(dw);
+ SUBR(13) = SUBL(13) ^ dw, SUBL(13) = dw;
+ dw = SUBL(14) ^ SUBR(14), dw = CAMELLIA_RL8(dw);
+ SUBR(14) = SUBL(14) ^ dw, SUBL(14) = dw;
+ dw = SUBL(15) ^ SUBR(15), dw = CAMELLIA_RL8(dw);
+ SUBR(15) = SUBL(15) ^ dw, SUBL(15) = dw;
+ dw = SUBL(18) ^ SUBR(18), dw = CAMELLIA_RL8(dw);
+ SUBR(18) = SUBL(18) ^ dw, SUBL(18) = dw;
+ dw = SUBL(19) ^ SUBR(19), dw = CAMELLIA_RL8(dw);
+ SUBR(19) = SUBL(19) ^ dw, SUBL(19) = dw;
+ dw = SUBL(20) ^ SUBR(20), dw = CAMELLIA_RL8(dw);
+ SUBR(20) = SUBL(20) ^ dw, SUBL(20) = dw;
+ dw = SUBL(21) ^ SUBR(21), dw = CAMELLIA_RL8(dw);
+ SUBR(21) = SUBL(21) ^ dw, SUBL(21) = dw;
+ dw = SUBL(22) ^ SUBR(22), dw = CAMELLIA_RL8(dw);
+ SUBR(22) = SUBL(22) ^ dw, SUBL(22) = dw;
+ dw = SUBL(23) ^ SUBR(23), dw = CAMELLIA_RL8(dw);
+ SUBR(23) = SUBL(23) ^ dw, SUBL(23) = dw;
+}
+
+void
+camellia_setup256(const unsigned char *key, uint32_t *subkey)
+{
+ uint32_t kll,klr,krl,krr; /* left half of key */
+ uint32_t krll,krlr,krrl,krrr; /* right half of key */
+ uint32_t il, ir, t0, t1, w0, w1; /* temporary variables */
+ uint32_t kw4l, kw4r, dw, tl, tr;
+ uint32_t subL[34];
+ uint32_t subR[34];
+
+ /*
+ * key = (kll || klr || krl || krr || krll || krlr || krrl || krrr)
+ * (|| is concatination)
+ */
+
+ kll = GETU32(key );
+ klr = GETU32(key + 4);
+ krl = GETU32(key + 8);
+ krr = GETU32(key + 12);
+ krll = GETU32(key + 16);
+ krlr = GETU32(key + 20);
+ krrl = GETU32(key + 24);
+ krrr = GETU32(key + 28);
+
+ /* generate KL dependent subkeys */
+ subl(0) = kll; subr(0) = klr;
+ subl(1) = krl; subr(1) = krr;
+ CAMELLIA_ROLDQo32(kll, klr, krl, krr, w0, w1, 45);
+ subl(12) = kll; subr(12) = klr;
+ subl(13) = krl; subr(13) = krr;
+ CAMELLIA_ROLDQ(kll, klr, krl, krr, w0, w1, 15);
+ subl(16) = kll; subr(16) = klr;
+ subl(17) = krl; subr(17) = krr;
+ CAMELLIA_ROLDQ(kll, klr, krl, krr, w0, w1, 17);
+ subl(22) = kll; subr(22) = klr;
+ subl(23) = krl; subr(23) = krr;
+ CAMELLIA_ROLDQo32(kll, klr, krl, krr, w0, w1, 34);
+ subl(30) = kll; subr(30) = klr;
+ subl(31) = krl; subr(31) = krr;
+
+ /* generate KR dependent subkeys */
+ CAMELLIA_ROLDQ(krll, krlr, krrl, krrr, w0, w1, 15);
+ subl(4) = krll; subr(4) = krlr;
+ subl(5) = krrl; subr(5) = krrr;
+ CAMELLIA_ROLDQ(krll, krlr, krrl, krrr, w0, w1, 15);
+ subl(8) = krll; subr(8) = krlr;
+ subl(9) = krrl; subr(9) = krrr;
+ CAMELLIA_ROLDQ(krll, krlr, krrl, krrr, w0, w1, 30);
+ subl(18) = krll; subr(18) = krlr;
+ subl(19) = krrl; subr(19) = krrr;
+ CAMELLIA_ROLDQo32(krll, krlr, krrl, krrr, w0, w1, 34);
+ subl(26) = krll; subr(26) = krlr;
+ subl(27) = krrl; subr(27) = krrr;
+ CAMELLIA_ROLDQo32(krll, krlr, krrl, krrr, w0, w1, 34);
+
+ /* generate KA */
+ kll = subl(0) ^ krll; klr = subr(0) ^ krlr;
+ krl = subl(1) ^ krrl; krr = subr(1) ^ krrr;
+ CAMELLIA_F(kll, klr, CAMELLIA_SIGMA1L, CAMELLIA_SIGMA1R,
+ w0, w1, il, ir, t0, t1);
+ krl ^= w0; krr ^= w1;
+ CAMELLIA_F(krl, krr, CAMELLIA_SIGMA2L, CAMELLIA_SIGMA2R,
+ kll, klr, il, ir, t0, t1);
+ kll ^= krll; klr ^= krlr;
+ CAMELLIA_F(kll, klr, CAMELLIA_SIGMA3L, CAMELLIA_SIGMA3R,
+ krl, krr, il, ir, t0, t1);
+ krl ^= w0 ^ krrl; krr ^= w1 ^ krrr;
+ CAMELLIA_F(krl, krr, CAMELLIA_SIGMA4L, CAMELLIA_SIGMA4R,
+ w0, w1, il, ir, t0, t1);
+ kll ^= w0; klr ^= w1;
+
+ /* generate KB */
+ krll ^= kll; krlr ^= klr;
+ krrl ^= krl; krrr ^= krr;
+ CAMELLIA_F(krll, krlr, CAMELLIA_SIGMA5L, CAMELLIA_SIGMA5R,
+ w0, w1, il, ir, t0, t1);
+ krrl ^= w0; krrr ^= w1;
+ CAMELLIA_F(krrl, krrr, CAMELLIA_SIGMA6L, CAMELLIA_SIGMA6R,
+ w0, w1, il, ir, t0, t1);
+ krll ^= w0; krlr ^= w1;
+
+ /* generate KA dependent subkeys */
+ CAMELLIA_ROLDQ(kll, klr, krl, krr, w0, w1, 15);
+ subl(6) = kll; subr(6) = klr;
+ subl(7) = krl; subr(7) = krr;
+ CAMELLIA_ROLDQ(kll, klr, krl, krr, w0, w1, 30);
+ subl(14) = kll; subr(14) = klr;
+ subl(15) = krl; subr(15) = krr;
+ subl(24) = klr; subr(24) = krl;
+ subl(25) = krr; subr(25) = kll;
+ CAMELLIA_ROLDQo32(kll, klr, krl, krr, w0, w1, 49);
+ subl(28) = kll; subr(28) = klr;
+ subl(29) = krl; subr(29) = krr;
+
+ /* generate KB dependent subkeys */
+ subl(2) = krll; subr(2) = krlr;
+ subl(3) = krrl; subr(3) = krrr;
+ CAMELLIA_ROLDQ(krll, krlr, krrl, krrr, w0, w1, 30);
+ subl(10) = krll; subr(10) = krlr;
+ subl(11) = krrl; subr(11) = krrr;
+ CAMELLIA_ROLDQ(krll, krlr, krrl, krrr, w0, w1, 30);
+ subl(20) = krll; subr(20) = krlr;
+ subl(21) = krrl; subr(21) = krrr;
+ CAMELLIA_ROLDQo32(krll, krlr, krrl, krrr, w0, w1, 51);
+ subl(32) = krll; subr(32) = krlr;
+ subl(33) = krrl; subr(33) = krrr;
+
+ /* absorb kw2 to other subkeys */
+ subl(3) ^= subl(1); subr(3) ^= subr(1);
+ subl(5) ^= subl(1); subr(5) ^= subr(1);
+ subl(7) ^= subl(1); subr(7) ^= subr(1);
+ subl(1) ^= subr(1) & ~subr(9);
+ dw = subl(1) & subl(9), subr(1) ^= CAMELLIA_RL1(dw);
+ subl(11) ^= subl(1); subr(11) ^= subr(1);
+ subl(13) ^= subl(1); subr(13) ^= subr(1);
+ subl(15) ^= subl(1); subr(15) ^= subr(1);
+ subl(1) ^= subr(1) & ~subr(17);
+ dw = subl(1) & subl(17), subr(1) ^= CAMELLIA_RL1(dw);
+ subl(19) ^= subl(1); subr(19) ^= subr(1);
+ subl(21) ^= subl(1); subr(21) ^= subr(1);
+ subl(23) ^= subl(1); subr(23) ^= subr(1);
+ subl(1) ^= subr(1) & ~subr(25);
+ dw = subl(1) & subl(25), subr(1) ^= CAMELLIA_RL1(dw);
+ subl(27) ^= subl(1); subr(27) ^= subr(1);
+ subl(29) ^= subl(1); subr(29) ^= subr(1);
+ subl(31) ^= subl(1); subr(31) ^= subr(1);
+ subl(32) ^= subl(1); subr(32) ^= subr(1);
+
+
+ /* absorb kw4 to other subkeys */
+ kw4l = subl(33); kw4r = subr(33);
+ subl(30) ^= kw4l; subr(30) ^= kw4r;
+ subl(28) ^= kw4l; subr(28) ^= kw4r;
+ subl(26) ^= kw4l; subr(26) ^= kw4r;
+ kw4l ^= kw4r & ~subr(24);
+ dw = kw4l & subl(24), kw4r ^= CAMELLIA_RL1(dw);
+ subl(22) ^= kw4l; subr(22) ^= kw4r;
+ subl(20) ^= kw4l; subr(20) ^= kw4r;
+ subl(18) ^= kw4l; subr(18) ^= kw4r;
+ kw4l ^= kw4r & ~subr(16);
+ dw = kw4l & subl(16), kw4r ^= CAMELLIA_RL1(dw);
+ subl(14) ^= kw4l; subr(14) ^= kw4r;
+ subl(12) ^= kw4l; subr(12) ^= kw4r;
+ subl(10) ^= kw4l; subr(10) ^= kw4r;
+ kw4l ^= kw4r & ~subr(8);
+ dw = kw4l & subl(8), kw4r ^= CAMELLIA_RL1(dw);
+ subl(6) ^= kw4l; subr(6) ^= kw4r;
+ subl(4) ^= kw4l; subr(4) ^= kw4r;
+ subl(2) ^= kw4l; subr(2) ^= kw4r;
+ subl(0) ^= kw4l; subr(0) ^= kw4r;
+
+ /* key XOR is end of F-function */
+ SUBL(0) = subl(0) ^ subl(2);
+ SUBR(0) = subr(0) ^ subr(2);
+ SUBL(2) = subl(3);
+ SUBR(2) = subr(3);
+ SUBL(3) = subl(2) ^ subl(4);
+ SUBR(3) = subr(2) ^ subr(4);
+ SUBL(4) = subl(3) ^ subl(5);
+ SUBR(4) = subr(3) ^ subr(5);
+ SUBL(5) = subl(4) ^ subl(6);
+ SUBR(5) = subr(4) ^ subr(6);
+ SUBL(6) = subl(5) ^ subl(7);
+ SUBR(6) = subr(5) ^ subr(7);
+ tl = subl(10) ^ (subr(10) & ~subr(8));
+ dw = tl & subl(8), tr = subr(10) ^ CAMELLIA_RL1(dw);
+ SUBL(7) = subl(6) ^ tl;
+ SUBR(7) = subr(6) ^ tr;
+ SUBL(8) = subl(8);
+ SUBR(8) = subr(8);
+ SUBL(9) = subl(9);
+ SUBR(9) = subr(9);
+ tl = subl(7) ^ (subr(7) & ~subr(9));
+ dw = tl & subl(9), tr = subr(7) ^ CAMELLIA_RL1(dw);
+ SUBL(10) = tl ^ subl(11);
+ SUBR(10) = tr ^ subr(11);
+ SUBL(11) = subl(10) ^ subl(12);
+ SUBR(11) = subr(10) ^ subr(12);
+ SUBL(12) = subl(11) ^ subl(13);
+ SUBR(12) = subr(11) ^ subr(13);
+ SUBL(13) = subl(12) ^ subl(14);
+ SUBR(13) = subr(12) ^ subr(14);
+ SUBL(14) = subl(13) ^ subl(15);
+ SUBR(14) = subr(13) ^ subr(15);
+ tl = subl(18) ^ (subr(18) & ~subr(16));
+ dw = tl & subl(16), tr = subr(18) ^ CAMELLIA_RL1(dw);
+ SUBL(15) = subl(14) ^ tl;
+ SUBR(15) = subr(14) ^ tr;
+ SUBL(16) = subl(16);
+ SUBR(16) = subr(16);
+ SUBL(17) = subl(17);
+ SUBR(17) = subr(17);
+ tl = subl(15) ^ (subr(15) & ~subr(17));
+ dw = tl & subl(17), tr = subr(15) ^ CAMELLIA_RL1(dw);
+ SUBL(18) = tl ^ subl(19);
+ SUBR(18) = tr ^ subr(19);
+ SUBL(19) = subl(18) ^ subl(20);
+ SUBR(19) = subr(18) ^ subr(20);
+ SUBL(20) = subl(19) ^ subl(21);
+ SUBR(20) = subr(19) ^ subr(21);
+ SUBL(21) = subl(20) ^ subl(22);
+ SUBR(21) = subr(20) ^ subr(22);
+ SUBL(22) = subl(21) ^ subl(23);
+ SUBR(22) = subr(21) ^ subr(23);
+ tl = subl(26) ^ (subr(26) & ~subr(24));
+ dw = tl & subl(24), tr = subr(26) ^ CAMELLIA_RL1(dw);
+ SUBL(23) = subl(22) ^ tl;
+ SUBR(23) = subr(22) ^ tr;
+ SUBL(24) = subl(24);
+ SUBR(24) = subr(24);
+ SUBL(25) = subl(25);
+ SUBR(25) = subr(25);
+ tl = subl(23) ^ (subr(23) & ~subr(25));
+ dw = tl & subl(25), tr = subr(23) ^ CAMELLIA_RL1(dw);
+ SUBL(26) = tl ^ subl(27);
+ SUBR(26) = tr ^ subr(27);
+ SUBL(27) = subl(26) ^ subl(28);
+ SUBR(27) = subr(26) ^ subr(28);
+ SUBL(28) = subl(27) ^ subl(29);
+ SUBR(28) = subr(27) ^ subr(29);
+ SUBL(29) = subl(28) ^ subl(30);
+ SUBR(29) = subr(28) ^ subr(30);
+ SUBL(30) = subl(29) ^ subl(31);
+ SUBR(30) = subr(29) ^ subr(31);
+ SUBL(31) = subl(30);
+ SUBR(31) = subr(30);
+ SUBL(32) = subl(32) ^ subl(31);
+ SUBR(32) = subr(32) ^ subr(31);
+
+ /* apply the inverse of the last half of P-function */
+ dw = SUBL(2) ^ SUBR(2), dw = CAMELLIA_RL8(dw);
+ SUBR(2) = SUBL(2) ^ dw, SUBL(2) = dw;
+ dw = SUBL(3) ^ SUBR(3), dw = CAMELLIA_RL8(dw);
+ SUBR(3) = SUBL(3) ^ dw, SUBL(3) = dw;
+ dw = SUBL(4) ^ SUBR(4), dw = CAMELLIA_RL8(dw);
+ SUBR(4) = SUBL(4) ^ dw, SUBL(4) = dw;
+ dw = SUBL(5) ^ SUBR(5), dw = CAMELLIA_RL8(dw);
+ SUBR(5) = SUBL(5) ^ dw, SUBL(5) = dw;
+ dw = SUBL(6) ^ SUBR(6), dw = CAMELLIA_RL8(dw);
+ SUBR(6) = SUBL(6) ^ dw, SUBL(6) = dw;
+ dw = SUBL(7) ^ SUBR(7), dw = CAMELLIA_RL8(dw);
+ SUBR(7) = SUBL(7) ^ dw, SUBL(7) = dw;
+ dw = SUBL(10) ^ SUBR(10), dw = CAMELLIA_RL8(dw);
+ SUBR(10) = SUBL(10) ^ dw, SUBL(10) = dw;
+ dw = SUBL(11) ^ SUBR(11), dw = CAMELLIA_RL8(dw);
+ SUBR(11) = SUBL(11) ^ dw, SUBL(11) = dw;
+ dw = SUBL(12) ^ SUBR(12), dw = CAMELLIA_RL8(dw);
+ SUBR(12) = SUBL(12) ^ dw, SUBL(12) = dw;
+ dw = SUBL(13) ^ SUBR(13), dw = CAMELLIA_RL8(dw);
+ SUBR(13) = SUBL(13) ^ dw, SUBL(13) = dw;
+ dw = SUBL(14) ^ SUBR(14), dw = CAMELLIA_RL8(dw);
+ SUBR(14) = SUBL(14) ^ dw, SUBL(14) = dw;
+ dw = SUBL(15) ^ SUBR(15), dw = CAMELLIA_RL8(dw);
+ SUBR(15) = SUBL(15) ^ dw, SUBL(15) = dw;
+ dw = SUBL(18) ^ SUBR(18), dw = CAMELLIA_RL8(dw);
+ SUBR(18) = SUBL(18) ^ dw, SUBL(18) = dw;
+ dw = SUBL(19) ^ SUBR(19), dw = CAMELLIA_RL8(dw);
+ SUBR(19) = SUBL(19) ^ dw, SUBL(19) = dw;
+ dw = SUBL(20) ^ SUBR(20), dw = CAMELLIA_RL8(dw);
+ SUBR(20) = SUBL(20) ^ dw, SUBL(20) = dw;
+ dw = SUBL(21) ^ SUBR(21), dw = CAMELLIA_RL8(dw);
+ SUBR(21) = SUBL(21) ^ dw, SUBL(21) = dw;
+ dw = SUBL(22) ^ SUBR(22), dw = CAMELLIA_RL8(dw);
+ SUBR(22) = SUBL(22) ^ dw, SUBL(22) = dw;
+ dw = SUBL(23) ^ SUBR(23), dw = CAMELLIA_RL8(dw);
+ SUBR(23) = SUBL(23) ^ dw, SUBL(23) = dw;
+ dw = SUBL(26) ^ SUBR(26), dw = CAMELLIA_RL8(dw);
+ SUBR(26) = SUBL(26) ^ dw, SUBL(26) = dw;
+ dw = SUBL(27) ^ SUBR(27), dw = CAMELLIA_RL8(dw);
+ SUBR(27) = SUBL(27) ^ dw, SUBL(27) = dw;
+ dw = SUBL(28) ^ SUBR(28), dw = CAMELLIA_RL8(dw);
+ SUBR(28) = SUBL(28) ^ dw, SUBL(28) = dw;
+ dw = SUBL(29) ^ SUBR(29), dw = CAMELLIA_RL8(dw);
+ SUBR(29) = SUBL(29) ^ dw, SUBL(29) = dw;
+ dw = SUBL(30) ^ SUBR(30), dw = CAMELLIA_RL8(dw);
+ SUBR(30) = SUBL(30) ^ dw, SUBL(30) = dw;
+ dw = SUBL(31) ^ SUBR(31), dw = CAMELLIA_RL8(dw);
+ SUBR(31) = SUBL(31) ^ dw, SUBL(31) = dw;
+}
+
+void
+camellia_setup192(const unsigned char *key, uint32_t *subkey)
+{
+ unsigned char kk[32];
+ uint32_t krll, krlr, krrl,krrr;
+
+ memcpy(kk, key, 24);
+ memcpy((unsigned char *)&krll, key+16,4);
+ memcpy((unsigned char *)&krlr, key+20,4);
+ krrl = ~krll;
+ krrr = ~krlr;
+ memcpy(kk+24, (unsigned char *)&krrl, 4);
+ memcpy(kk+28, (unsigned char *)&krrr, 4);
+ camellia_setup256(kk, subkey);
+}
+
+
+/**
+ * Stuff related to camellia encryption/decryption
+ */
+void
+camellia_encrypt128(const uint32_t *subkey, uint32_t *io)
+{
+ uint32_t il, ir, t0, t1;
+
+ /* pre whitening but absorb kw2*/
+ io[0] ^= SUBL(0);
+ io[1] ^= SUBR(0);
+ /* main iteration */
+
+ CAMELLIA_ROUNDSM(io[0],io[1], SUBL(2),SUBR(2),
+ io[2],io[3],il,ir,t0,t1);
+ CAMELLIA_ROUNDSM(io[2],io[3], SUBL(3),SUBR(3),
+ io[0],io[1],il,ir,t0,t1);
+ CAMELLIA_ROUNDSM(io[0],io[1], SUBL(4),SUBR(4),
+ io[2],io[3],il,ir,t0,t1);
+ CAMELLIA_ROUNDSM(io[2],io[3], SUBL(5),SUBR(5),
+ io[0],io[1],il,ir,t0,t1);
+ CAMELLIA_ROUNDSM(io[0],io[1], SUBL(6),SUBR(6),
+ io[2],io[3],il,ir,t0,t1);
+ CAMELLIA_ROUNDSM(io[2],io[3], SUBL(7),SUBR(7),
+ io[0],io[1],il,ir,t0,t1);
+
+ CAMELLIA_FLS(io[0],io[1],io[2],io[3], SUBL(8),SUBR(8), SUBL(9),SUBR(9),
+ t0,t1,il,ir);
+
+ CAMELLIA_ROUNDSM(io[0],io[1], SUBL(10),SUBR(10),
+ io[2],io[3],il,ir,t0,t1);
+ CAMELLIA_ROUNDSM(io[2],io[3], SUBL(11),SUBR(11),
+ io[0],io[1],il,ir,t0,t1);
+ CAMELLIA_ROUNDSM(io[0],io[1], SUBL(12),SUBR(12),
+ io[2],io[3],il,ir,t0,t1);
+ CAMELLIA_ROUNDSM(io[2],io[3], SUBL(13),SUBR(13),
+ io[0],io[1],il,ir,t0,t1);
+ CAMELLIA_ROUNDSM(io[0],io[1], SUBL(14),SUBR(14),
+ io[2],io[3],il,ir,t0,t1);
+ CAMELLIA_ROUNDSM(io[2],io[3], SUBL(15),SUBR(15),
+ io[0],io[1],il,ir,t0,t1);
+
+ CAMELLIA_FLS(io[0],io[1],io[2],io[3], SUBL(16), SUBR(16), SUBL(17),SUBR(17),
+ t0,t1,il,ir);
+
+ CAMELLIA_ROUNDSM(io[0],io[1], SUBL(18),SUBR(18),
+ io[2],io[3],il,ir,t0,t1);
+ CAMELLIA_ROUNDSM(io[2],io[3], SUBL(19),SUBR(19),
+ io[0],io[1],il,ir,t0,t1);
+ CAMELLIA_ROUNDSM(io[0],io[1], SUBL(20),SUBR(20),
+ io[2],io[3],il,ir,t0,t1);
+ CAMELLIA_ROUNDSM(io[2],io[3], SUBL(21),SUBR(21),
+ io[0],io[1],il,ir,t0,t1);
+ CAMELLIA_ROUNDSM(io[0],io[1], SUBL(22),SUBR(22),
+ io[2],io[3],il,ir,t0,t1);
+ CAMELLIA_ROUNDSM(io[2],io[3], SUBL(23),SUBR(23),
+ io[0],io[1],il,ir,t0,t1);
+
+ /* post whitening but kw4 */
+ io[2] ^= SUBL(24);
+ io[3] ^= SUBR(24);
+
+ t0 = io[0];
+ t1 = io[1];
+ io[0] = io[2];
+ io[1] = io[3];
+ io[2] = t0;
+ io[3] = t1;
+}
+
+void
+camellia_decrypt128(const uint32_t *subkey, uint32_t *io)
+{
+ uint32_t il,ir,t0,t1; /* temporary valiables */
+
+ /* pre whitening but absorb kw2*/
+ io[0] ^= SUBL(24);
+ io[1] ^= SUBR(24);
+
+ /* main iteration */
+ CAMELLIA_ROUNDSM(io[0],io[1], SUBL(23),SUBR(23),
+ io[2],io[3],il,ir,t0,t1);
+ CAMELLIA_ROUNDSM(io[2],io[3], SUBL(22),SUBR(22),
+ io[0],io[1],il,ir,t0,t1);
+ CAMELLIA_ROUNDSM(io[0],io[1], SUBL(21),SUBR(21),
+ io[2],io[3],il,ir,t0,t1);
+ CAMELLIA_ROUNDSM(io[2],io[3], SUBL(20),SUBR(20),
+ io[0],io[1],il,ir,t0,t1);
+ CAMELLIA_ROUNDSM(io[0],io[1], SUBL(19),SUBR(19),
+ io[2],io[3],il,ir,t0,t1);
+ CAMELLIA_ROUNDSM(io[2],io[3], SUBL(18),SUBR(18),
+ io[0],io[1],il,ir,t0,t1);
+
+ CAMELLIA_FLS(io[0],io[1],io[2],io[3],SUBL(17),SUBR(17),SUBL(16),SUBR(16),
+ t0,t1,il,ir);
+
+ CAMELLIA_ROUNDSM(io[0],io[1], SUBL(15),SUBR(15),
+ io[2],io[3],il,ir,t0,t1);
+ CAMELLIA_ROUNDSM(io[2],io[3], SUBL(14),SUBR(14),
+ io[0],io[1],il,ir,t0,t1);
+ CAMELLIA_ROUNDSM(io[0],io[1], SUBL(13),SUBR(13),
+ io[2],io[3],il,ir,t0,t1);
+ CAMELLIA_ROUNDSM(io[2],io[3], SUBL(12),SUBR(12),
+ io[0],io[1],il,ir,t0,t1);
+ CAMELLIA_ROUNDSM(io[0],io[1], SUBL(11),SUBR(11),
+ io[2],io[3],il,ir,t0,t1);
+ CAMELLIA_ROUNDSM(io[2],io[3], SUBL(10),SUBR(10),
+ io[0],io[1],il,ir,t0,t1);
+
+ CAMELLIA_FLS(io[0],io[1],io[2],io[3], SUBL(9),SUBR(9), SUBL(8),SUBR(8),
+ t0,t1,il,ir);
+
+ CAMELLIA_ROUNDSM(io[0],io[1], SUBL(7),SUBR(7),
+ io[2],io[3],il,ir,t0,t1);
+ CAMELLIA_ROUNDSM(io[2],io[3], SUBL(6),SUBR(6),
+ io[0],io[1],il,ir,t0,t1);
+ CAMELLIA_ROUNDSM(io[0],io[1], SUBL(5),SUBR(5),
+ io[2],io[3],il,ir,t0,t1);
+ CAMELLIA_ROUNDSM(io[2],io[3], SUBL(4),SUBR(4),
+ io[0],io[1],il,ir,t0,t1);
+ CAMELLIA_ROUNDSM(io[0],io[1], SUBL(3),SUBR(3),
+ io[2],io[3],il,ir,t0,t1);
+ CAMELLIA_ROUNDSM(io[2],io[3], SUBL(2),SUBR(2),
+ io[0],io[1],il,ir,t0,t1);
+
+ /* post whitening but kw4 */
+ io[2] ^= SUBL(0);
+ io[3] ^= SUBR(0);
+
+ t0 = io[0];
+ t1 = io[1];
+ io[0] = io[2];
+ io[1] = io[3];
+ io[2] = t0;
+ io[3] = t1;
+}
+
+/**
+ * stuff for 192 and 256bit encryption/decryption
+ */
+void
+camellia_encrypt256(const uint32_t *subkey, uint32_t *io)
+{
+ uint32_t il,ir,t0,t1; /* temporary valiables */
+
+ /* pre whitening but absorb kw2*/
+ io[0] ^= SUBL(0);
+ io[1] ^= SUBR(0);
+
+ /* main iteration */
+ CAMELLIA_ROUNDSM(io[0],io[1], SUBL(2),SUBR(2),
+ io[2],io[3],il,ir,t0,t1);
+ CAMELLIA_ROUNDSM(io[2],io[3], SUBL(3),SUBR(3),
+ io[0],io[1],il,ir,t0,t1);
+ CAMELLIA_ROUNDSM(io[0],io[1], SUBL(4),SUBR(4),
+ io[2],io[3],il,ir,t0,t1);
+ CAMELLIA_ROUNDSM(io[2],io[3], SUBL(5),SUBR(5),
+ io[0],io[1],il,ir,t0,t1);
+ CAMELLIA_ROUNDSM(io[0],io[1], SUBL(6),SUBR(6),
+ io[2],io[3],il,ir,t0,t1);
+ CAMELLIA_ROUNDSM(io[2],io[3], SUBL(7),SUBR(7),
+ io[0],io[1],il,ir,t0,t1);
+
+ CAMELLIA_FLS(io[0],io[1],io[2],io[3], SUBL(8),SUBR(8), SUBL(9),SUBR(9),
+ t0,t1,il,ir);
+
+ CAMELLIA_ROUNDSM(io[0],io[1], SUBL(10),SUBR(10),
+ io[2],io[3],il,ir,t0,t1);
+ CAMELLIA_ROUNDSM(io[2],io[3], SUBL(11),SUBR(11),
+ io[0],io[1],il,ir,t0,t1);
+ CAMELLIA_ROUNDSM(io[0],io[1], SUBL(12),SUBR(12),
+ io[2],io[3],il,ir,t0,t1);
+ CAMELLIA_ROUNDSM(io[2],io[3], SUBL(13),SUBR(13),
+ io[0],io[1],il,ir,t0,t1);
+ CAMELLIA_ROUNDSM(io[0],io[1], SUBL(14),SUBR(14),
+ io[2],io[3],il,ir,t0,t1);
+ CAMELLIA_ROUNDSM(io[2],io[3], SUBL(15),SUBR(15),
+ io[0],io[1],il,ir,t0,t1);
+
+ CAMELLIA_FLS(io[0],io[1],io[2],io[3], SUBL(16),SUBR(16), SUBL(17),SUBR(17),
+ t0,t1,il,ir);
+
+ CAMELLIA_ROUNDSM(io[0],io[1], SUBL(18),SUBR(18),
+ io[2],io[3],il,ir,t0,t1);
+ CAMELLIA_ROUNDSM(io[2],io[3], SUBL(19),SUBR(19),
+ io[0],io[1],il,ir,t0,t1);
+ CAMELLIA_ROUNDSM(io[0],io[1], SUBL(20),SUBR(20),
+ io[2],io[3],il,ir,t0,t1);
+ CAMELLIA_ROUNDSM(io[2],io[3], SUBL(21),SUBR(21),
+ io[0],io[1],il,ir,t0,t1);
+ CAMELLIA_ROUNDSM(io[0],io[1], SUBL(22),SUBR(22),
+ io[2],io[3],il,ir,t0,t1);
+ CAMELLIA_ROUNDSM(io[2],io[3], SUBL(23),SUBR(23),
+ io[0],io[1],il,ir,t0,t1);
+
+ CAMELLIA_FLS(io[0],io[1],io[2],io[3], SUBL(24),SUBR(24), SUBL(25),SUBR(25),
+ t0,t1,il,ir);
+
+ CAMELLIA_ROUNDSM(io[0],io[1], SUBL(26),SUBR(26),
+ io[2],io[3],il,ir,t0,t1);
+ CAMELLIA_ROUNDSM(io[2],io[3], SUBL(27),SUBR(27),
+ io[0],io[1],il,ir,t0,t1);
+ CAMELLIA_ROUNDSM(io[0],io[1], SUBL(28),SUBR(28),
+ io[2],io[3],il,ir,t0,t1);
+ CAMELLIA_ROUNDSM(io[2],io[3], SUBL(29),SUBR(29),
+ io[0],io[1],il,ir,t0,t1);
+ CAMELLIA_ROUNDSM(io[0],io[1], SUBL(30),SUBR(30),
+ io[2],io[3],il,ir,t0,t1);
+ CAMELLIA_ROUNDSM(io[2],io[3], SUBL(31),SUBR(31),
+ io[0],io[1],il,ir,t0,t1);
+
+ /* post whitening but kw4 */
+ io[2] ^= SUBL(32);
+ io[3] ^= SUBR(32);
+
+ t0 = io[0];
+ t1 = io[1];
+ io[0] = io[2];
+ io[1] = io[3];
+ io[2] = t0;
+ io[3] = t1;
+}
+
+void
+camellia_decrypt256(const uint32_t *subkey, uint32_t *io)
+{
+ uint32_t il,ir,t0,t1; /* temporary valiables */
+
+ /* pre whitening but absorb kw2*/
+ io[0] ^= SUBL(32);
+ io[1] ^= SUBR(32);
+
+ /* main iteration */
+ CAMELLIA_ROUNDSM(io[0],io[1], SUBL(31),SUBR(31),
+ io[2],io[3],il,ir,t0,t1);
+ CAMELLIA_ROUNDSM(io[2],io[3], SUBL(30),SUBR(30),
+ io[0],io[1],il,ir,t0,t1);
+ CAMELLIA_ROUNDSM(io[0],io[1], SUBL(29),SUBR(29),
+ io[2],io[3],il,ir,t0,t1);
+ CAMELLIA_ROUNDSM(io[2],io[3], SUBL(28),SUBR(28),
+ io[0],io[1],il,ir,t0,t1);
+ CAMELLIA_ROUNDSM(io[0],io[1], SUBL(27),SUBR(27),
+ io[2],io[3],il,ir,t0,t1);
+ CAMELLIA_ROUNDSM(io[2],io[3], SUBL(26),SUBR(26),
+ io[0],io[1],il,ir,t0,t1);
+
+ CAMELLIA_FLS(io[0],io[1],io[2],io[3], SUBL(25),SUBR(25), SUBL(24),SUBR(24),
+ t0,t1,il,ir);
+
+ CAMELLIA_ROUNDSM(io[0],io[1], SUBL(23),SUBR(23),
+ io[2],io[3],il,ir,t0,t1);
+ CAMELLIA_ROUNDSM(io[2],io[3], SUBL(22),SUBR(22),
+ io[0],io[1],il,ir,t0,t1);
+ CAMELLIA_ROUNDSM(io[0],io[1], SUBL(21),SUBR(21),
+ io[2],io[3],il,ir,t0,t1);
+ CAMELLIA_ROUNDSM(io[2],io[3], SUBL(20),SUBR(20),
+ io[0],io[1],il,ir,t0,t1);
+ CAMELLIA_ROUNDSM(io[0],io[1], SUBL(19),SUBR(19),
+ io[2],io[3],il,ir,t0,t1);
+ CAMELLIA_ROUNDSM(io[2],io[3], SUBL(18),SUBR(18),
+ io[0],io[1],il,ir,t0,t1);
+
+ CAMELLIA_FLS(io[0],io[1],io[2],io[3], SUBL(17),SUBR(17), SUBL(16),SUBR(16),
+ t0,t1,il,ir);
+
+ CAMELLIA_ROUNDSM(io[0],io[1], SUBL(15),SUBR(15),
+ io[2],io[3],il,ir,t0,t1);
+ CAMELLIA_ROUNDSM(io[2],io[3], SUBL(14),SUBR(14),
+ io[0],io[1],il,ir,t0,t1);
+ CAMELLIA_ROUNDSM(io[0],io[1], SUBL(13),SUBR(13),
+ io[2],io[3],il,ir,t0,t1);
+ CAMELLIA_ROUNDSM(io[2],io[3], SUBL(12),SUBR(12),
+ io[0],io[1],il,ir,t0,t1);
+ CAMELLIA_ROUNDSM(io[0],io[1], SUBL(11),SUBR(11),
+ io[2],io[3],il,ir,t0,t1);
+ CAMELLIA_ROUNDSM(io[2],io[3], SUBL(10),SUBR(10),
+ io[0],io[1],il,ir,t0,t1);
+
+ CAMELLIA_FLS(io[0],io[1],io[2],io[3], SUBL(9),SUBR(9), SUBL(8),SUBR(8),
+ t0,t1,il,ir);
+
+ CAMELLIA_ROUNDSM(io[0],io[1], SUBL(7),SUBR(7),
+ io[2],io[3],il,ir,t0,t1);
+ CAMELLIA_ROUNDSM(io[2],io[3], SUBL(6),SUBR(6),
+ io[0],io[1],il,ir,t0,t1);
+ CAMELLIA_ROUNDSM(io[0],io[1], SUBL(5),SUBR(5),
+ io[2],io[3],il,ir,t0,t1);
+ CAMELLIA_ROUNDSM(io[2],io[3], SUBL(4),SUBR(4),
+ io[0],io[1],il,ir,t0,t1);
+ CAMELLIA_ROUNDSM(io[0],io[1], SUBL(3),SUBR(3),
+ io[2],io[3],il,ir,t0,t1);
+ CAMELLIA_ROUNDSM(io[2],io[3], SUBL(2),SUBR(2),
+ io[0],io[1],il,ir,t0,t1);
+
+ /* post whitening but kw4 */
+ io[2] ^= SUBL(0);
+ io[3] ^= SUBR(0);
+
+ t0 = io[0];
+ t1 = io[1];
+ io[0] = io[2];
+ io[1] = io[3];
+ io[2] = t0;
+ io[3] = t1;
+}
+
+void
+Camellia_Ekeygen(const int keyBitLength,
+ const unsigned char *rawKey,
+ uint32_t *subkey)
+{
+ KASSERT(keyBitLength == 128 || keyBitLength == 192 || keyBitLength == 256,
+ ("Invalid key size (%d).", keyBitLength));
+
+ switch(keyBitLength) {
+ case 128:
+ camellia_setup128(rawKey, subkey);
+ break;
+ case 192:
+ camellia_setup192(rawKey, subkey);
+ break;
+ case 256:
+ camellia_setup256(rawKey, subkey);
+ break;
+ default:
+ break;
+ }
+}
+void
+Camellia_EncryptBlock(const int keyBitLength,
+ const unsigned char *plaintext,
+ const uint32_t *subkey,
+ unsigned char *ciphertext)
+{
+ uint32_t tmp[4];
+
+ tmp[0] = GETU32(plaintext);
+ tmp[1] = GETU32(plaintext + 4);
+ tmp[2] = GETU32(plaintext + 8);
+ tmp[3] = GETU32(plaintext + 12);
+
+ switch (keyBitLength) {
+ case 128:
+ camellia_encrypt128(subkey, tmp);
+ break;
+ case 192:
+ /* fall through */
+ case 256:
+ camellia_encrypt256(subkey, tmp);
+ break;
+ default:
+ break;
+ }
+
+ PUTU32(ciphertext, tmp[0]);
+ PUTU32(ciphertext+4, tmp[1]);
+ PUTU32(ciphertext+8, tmp[2]);
+ PUTU32(ciphertext+12, tmp[3]);
+}
+
+void
+Camellia_DecryptBlock(const int keyBitLength,
+ const unsigned char *ciphertext,
+ const uint32_t *subkey,
+ unsigned char *plaintext)
+{
+ uint32_t tmp[4];
+
+ tmp[0] = GETU32(ciphertext);
+ tmp[1] = GETU32(ciphertext + 4);
+ tmp[2] = GETU32(ciphertext + 8);
+ tmp[3] = GETU32(ciphertext + 12);
+
+ switch (keyBitLength) {
+ case 128:
+ camellia_decrypt128(subkey, tmp);
+ break;
+ case 192:
+ /* fall through */
+ case 256:
+ camellia_decrypt256(subkey, tmp);
+ break;
+ default:
+ break;
+ }
+
+ PUTU32(plaintext, tmp[0]);
+ PUTU32(plaintext+4, tmp[1]);
+ PUTU32(plaintext+8, tmp[2]);
+ PUTU32(plaintext+12, tmp[3]);
+}
diff --git a/rtems/freebsd/crypto/camellia/camellia.h b/rtems/freebsd/crypto/camellia/camellia.h
new file mode 100644
index 00000000..ab0ce57e
--- /dev/null
+++ b/rtems/freebsd/crypto/camellia/camellia.h
@@ -0,0 +1,69 @@
+/* camellia.h ver 1.1.0
+ *
+ * Copyright (c) 2006
+ * NTT (Nippon Telegraph and Telephone Corporation) . All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer as
+ * the first lines of this file unmodified.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY NTT ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL NTT BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _CAMELLIA_H
+#define _CAMELLIA_H
+
+#define CAMELLIA_BLOCK_SIZE 16
+#define CAMELLIA_SUBKEYWORD 68 /* (34*8/4) */
+
+typedef struct {
+ int bits; /* key-length */
+ uint32_t subkey[CAMELLIA_SUBKEYWORD]; /* encrypt/decrypt key schedule */
+} camellia_ctx;
+
+void camellia_set_key(camellia_ctx *, const u_char *, int);
+void camellia_decrypt(const camellia_ctx *, const u_char *, u_char *);
+void camellia_encrypt(const camellia_ctx *, const u_char *, u_char *);
+
+
+void Camellia_Ekeygen(const int keyBitLength,
+ const unsigned char *rawKey,
+ uint32_t *subkey);
+
+void Camellia_EncryptBlock(const int keyBitLength,
+ const unsigned char *plaintext,
+ const uint32_t *subkey,
+ unsigned char *cipherText);
+
+void Camellia_DecryptBlock(const int keyBitLength,
+ const unsigned char *cipherText,
+ const uint32_t *subkey,
+ unsigned char *plaintext);
+
+void camellia_setup128(const unsigned char *key, uint32_t *subkey);
+void camellia_setup192(const unsigned char *key, uint32_t *subkey);
+void camellia_setup256(const unsigned char *key, uint32_t *subkey);
+void camellia_encrypt128(const uint32_t *subkey, uint32_t *io);
+void camellia_encrypt256(const uint32_t *subkey, uint32_t *io);
+void camellia_decrypt128(const uint32_t *subkey, uint32_t *io);
+void camellia_decrypt256(const uint32_t *subkey, uint32_t *io);
+
+
+#endif /* _CAMELLIA_H */
diff --git a/rtems/freebsd/crypto/des/des.h b/rtems/freebsd/crypto/des/des.h
new file mode 100644
index 00000000..81c7bfbe
--- /dev/null
+++ b/rtems/freebsd/crypto/des/des.h
@@ -0,0 +1,117 @@
+/* $FreeBSD$ */
+/* $KAME: des.h,v 1.8 2001/09/10 04:03:57 itojun Exp $ */
+
+/* lib/des/des.h */
+/* Copyright (C) 1995-1996 Eric Young (eay@mincom.oz.au)
+ * All rights reserved.
+ *
+ * This file is part of an SSL implementation written
+ * by Eric Young (eay@mincom.oz.au).
+ * The implementation was written so as to conform with Netscapes SSL
+ * specification. This library and applications are
+ * FREE FOR COMMERCIAL AND NON-COMMERCIAL USE
+ * as long as the following conditions are aheared to.
+ *
+ * Copyright remains Eric Young's, and as such any Copyright notices in
+ * the code are not to be removed. If this code is used in a product,
+ * Eric Young should be given attribution as the author of the parts used.
+ * This can be in the form of a textual message at program startup or
+ * in documentation (online or textual) provided with the package.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by Eric Young (eay@mincom.oz.au)
+ *
+ * THIS SOFTWARE IS PROVIDED BY ERIC YOUNG ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * The licence and distribution terms for any publically available version or
+ * derivative of this code cannot be changed. i.e. this code cannot simply be
+ * copied and put under another distribution licence
+ * [including the GNU Public Licence.]
+ */
+
+#ifndef HEADER_DES_H
+#define HEADER_DES_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/* must be 32bit quantity */
+#define DES_LONG u_int32_t
+
+typedef unsigned char des_cblock[8];
+typedef struct des_ks_struct
+ {
+ union {
+ des_cblock cblock;
+ /* make sure things are correct size on machines with
+ * 8 byte longs */
+ DES_LONG deslong[2];
+ } ks;
+ int weak_key;
+} des_key_schedule[16];
+
+#define DES_KEY_SZ (sizeof(des_cblock))
+#define DES_SCHEDULE_SZ (sizeof(des_key_schedule))
+
+#define DES_ENCRYPT 1
+#define DES_DECRYPT 0
+
+#define DES_CBC_MODE 0
+#define DES_PCBC_MODE 1
+
+extern int des_check_key; /* defaults to false */
+
+char *des_options(void);
+void des_ecb_encrypt(des_cblock *, des_cblock *, des_key_schedule, int);
+
+void des_encrypt1(DES_LONG *, des_key_schedule, int);
+void des_encrypt2(DES_LONG *, des_key_schedule, int);
+void des_encrypt3(DES_LONG *, des_key_schedule, des_key_schedule,
+ des_key_schedule);
+void des_decrypt3(DES_LONG *, des_key_schedule, des_key_schedule,
+ des_key_schedule);
+
+void des_ecb3_encrypt(des_cblock *, des_cblock *, des_key_schedule,
+ des_key_schedule, des_key_schedule, int);
+
+void des_ncbc_encrypt(const unsigned char *, unsigned char *, long,
+ des_key_schedule, des_cblock *, int);
+
+void des_ede3_cbc_encrypt(const unsigned char *, unsigned char *, long,
+ des_key_schedule, des_key_schedule,
+ des_key_schedule, des_cblock *, int);
+
+void des_set_odd_parity(des_cblock *);
+void des_fixup_key_parity(des_cblock *);
+int des_is_weak_key(des_cblock *);
+int des_set_key(des_cblock *, des_key_schedule);
+int des_key_sched(des_cblock *, des_key_schedule);
+int des_set_key_checked(des_cblock *, des_key_schedule);
+void des_set_key_unchecked(des_cblock *, des_key_schedule);
+int des_check_key_parity(des_cblock *);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/rtems/freebsd/crypto/des/des_ecb.c b/rtems/freebsd/crypto/des/des_ecb.c
new file mode 100644
index 00000000..8eda937b
--- /dev/null
+++ b/rtems/freebsd/crypto/des/des_ecb.c
@@ -0,0 +1,141 @@
+#include <rtems/freebsd/machine/rtems-bsd-config.h>
+
+/* $KAME: des_ecb.c,v 1.6 2001/09/10 04:03:58 itojun Exp $ */
+
+/* crypto/des/ecb_enc.c */
+
+/* Copyright (C) 1995-1998 Eric Young (eay@mincom.oz.au)
+ * All rights reserved.
+ *
+ * This file is part of an SSL implementation written
+ * by Eric Young (eay@mincom.oz.au).
+ * The implementation was written so as to conform with Netscapes SSL
+ * specification. This library and applications are
+ * FREE FOR COMMERCIAL AND NON-COMMERCIAL USE
+ * as long as the following conditions are aheared to.
+ *
+ * Copyright remains Eric Young's, and as such any Copyright notices in
+ * the code are not to be removed. If this code is used in a product,
+ * Eric Young should be given attribution as the author of the parts used.
+ * This can be in the form of a textual message at program startup or
+ * in documentation (online or textual) provided with the package.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by Eric Young (eay@mincom.oz.au)
+ *
+ * THIS SOFTWARE IS PROVIDED BY ERIC YOUNG ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * The licence and distribution terms for any publically available version or
+ * derivative of this code cannot be changed. i.e. this code cannot simply be
+ * copied and put under another distribution licence
+ * [including the GNU Public Licence.]
+ */
+
+#include <rtems/freebsd/sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <rtems/freebsd/sys/param.h>
+#include <rtems/freebsd/sys/systm.h>
+#include <rtems/freebsd/crypto/des/des_locl.h>
+#include <rtems/freebsd/crypto/des/spr.h>
+
+/* char *libdes_version="libdes v 3.24 - 20-Apr-1996 - eay"; */ /* wrong */
+/* char *DES_version="DES part of SSLeay 0.6.4 30-Aug-1996"; */
+
+char *des_options(void)
+ {
+ static int init=1;
+ static char buf[32];
+
+ if (init)
+ {
+ const char *ptr,*unroll,*risc,*size;
+
+#ifdef DES_PTR
+ ptr="ptr";
+#else
+ ptr="idx";
+#endif
+#if defined(DES_RISC1) || defined(DES_RISC2)
+#ifdef DES_RISC1
+ risc="risc1";
+#endif
+#ifdef DES_RISC2
+ risc="risc2";
+#endif
+#else
+ risc="cisc";
+#endif
+#ifdef DES_UNROLL
+ unroll="16";
+#else
+ unroll="4";
+#endif
+ if (sizeof(DES_LONG) != sizeof(long))
+ size="int";
+ else
+ size="long";
+ sprintf(buf,"des(%s,%s,%s,%s)",ptr,risc,unroll,size);
+ init=0;
+ }
+ return(buf);
+}
+void des_ecb_encrypt(des_cblock *input, des_cblock *output,
+ des_key_schedule ks, int enc)
+{
+ register DES_LONG l;
+ DES_LONG ll[2];
+ const unsigned char *in=&(*input)[0];
+ unsigned char *out = &(*output)[0];
+
+ c2l(in,l); ll[0]=l;
+ c2l(in,l); ll[1]=l;
+ des_encrypt1(ll,ks,enc);
+ l=ll[0]; l2c(l,out);
+ l=ll[1]; l2c(l,out);
+ l=ll[0]=ll[1]=0;
+}
+
+void des_ecb3_encrypt(des_cblock *input, des_cblock *output,
+ des_key_schedule ks1, des_key_schedule ks2, des_key_schedule ks3,
+ int enc)
+{
+ register DES_LONG l0,l1;
+ DES_LONG ll[2];
+ const unsigned char *in = &(*input)[0];
+ unsigned char *out = &(*output)[0];
+
+ c2l(in,l0);
+ c2l(in,l1);
+ ll[0]=l0;
+ ll[1]=l1;
+
+ if (enc)
+ des_encrypt3(ll,ks1,ks2,ks3);
+ else
+ des_decrypt3(ll,ks1,ks2,ks3);
+
+ l0=ll[0];
+ l1=ll[1];
+ l2c(l0,out);
+ l2c(l1,out);
+}
diff --git a/rtems/freebsd/crypto/des/des_enc.c b/rtems/freebsd/crypto/des/des_enc.c
new file mode 100644
index 00000000..46b81fee
--- /dev/null
+++ b/rtems/freebsd/crypto/des/des_enc.c
@@ -0,0 +1,299 @@
+#include <rtems/freebsd/machine/rtems-bsd-config.h>
+
+/* $KAME: des_enc.c,v 1.1 2001/09/10 04:03:58 itojun Exp $ */
+
+/* crypto/des/des_enc.c */
+
+/* Copyright (C) 1995-1998 Eric Young (eay@cryptsoft.com)
+ * All rights reserved.
+ *
+ * This package is an SSL implementation written
+ * by Eric Young (eay@cryptsoft.com).
+ * The implementation was written so as to conform with Netscapes SSL.
+ *
+ * This library is free for commercial and non-commercial use as long as
+ * the following conditions are aheared to. The following conditions
+ * apply to all code found in this distribution, be it the RC4, RSA,
+ * lhash, DES, etc., code; not just the SSL code. The SSL documentation
+ * included with this distribution is covered by the same copyright terms
+ * except that the holder is Tim Hudson (tjh@cryptsoft.com).
+ *
+ * Copyright remains Eric Young's, and as such any Copyright notices in
+ * the code are not to be removed.
+ * If this package is used in a product, Eric Young should be given attribution
+ * as the author of the parts of the library used.
+ * This can be in the form of a textual message at program startup or
+ * in documentation (online or textual) provided with the package.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * "This product includes cryptographic software written by
+ * Eric Young (eay@cryptsoft.com)"
+ * The word 'cryptographic' can be left out if the rouines from the library
+ * being used are not cryptographic related :-).
+ * 4. If you include any Windows specific code (or a derivative thereof) from
+ * the apps directory (application code) you must include an acknowledgement:
+ * "This product includes software written by Tim Hudson (tjh@cryptsoft.com)"
+ *
+ * THIS SOFTWARE IS PROVIDED BY ERIC YOUNG ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * The licence and distribution terms for any publically available version or
+ * derivative of this code cannot be changed. i.e. this code cannot simply be
+ * copied and put under another distribution licence
+ * [including the GNU Public Licence.]
+ */
+
+#include <rtems/freebsd/sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <rtems/freebsd/sys/types.h>
+#include <rtems/freebsd/crypto/des/des_locl.h>
+
+extern const DES_LONG des_SPtrans[8][64];
+
+void des_encrypt1(DES_LONG *data, des_key_schedule ks, int enc)
+{
+ register DES_LONG l,r,t,u;
+#ifdef DES_PTR
+ register const unsigned char *des_SP=(const unsigned char *)des_SPtrans;
+#endif
+#ifndef DES_UNROLL
+ register int i;
+#endif
+ register DES_LONG *s;
+
+ r=data[0];
+ l=data[1];
+
+ IP(r,l);
+ /* Things have been modified so that the initial rotate is
+ * done outside the loop. This required the
+ * des_SPtrans values in sp.h to be rotated 1 bit to the right.
+ * One perl script later and things have a 5% speed up on a sparc2.
+ * Thanks to Richard Outerbridge <71755.204@CompuServe.COM>
+ * for pointing this out. */
+ /* clear the top bits on machines with 8byte longs */
+ /* shift left by 2 */
+ r=ROTATE(r,29)&0xffffffffL;
+ l=ROTATE(l,29)&0xffffffffL;
+
+ s=ks->ks.deslong;
+ /* I don't know if it is worth the effort of loop unrolling the
+ * inner loop */
+ if (enc)
+ {
+#ifdef DES_UNROLL
+ D_ENCRYPT(l,r, 0); /* 1 */
+ D_ENCRYPT(r,l, 2); /* 2 */
+ D_ENCRYPT(l,r, 4); /* 3 */
+ D_ENCRYPT(r,l, 6); /* 4 */
+ D_ENCRYPT(l,r, 8); /* 5 */
+ D_ENCRYPT(r,l,10); /* 6 */
+ D_ENCRYPT(l,r,12); /* 7 */
+ D_ENCRYPT(r,l,14); /* 8 */
+ D_ENCRYPT(l,r,16); /* 9 */
+ D_ENCRYPT(r,l,18); /* 10 */
+ D_ENCRYPT(l,r,20); /* 11 */
+ D_ENCRYPT(r,l,22); /* 12 */
+ D_ENCRYPT(l,r,24); /* 13 */
+ D_ENCRYPT(r,l,26); /* 14 */
+ D_ENCRYPT(l,r,28); /* 15 */
+ D_ENCRYPT(r,l,30); /* 16 */
+#else
+ for (i=0; i<32; i+=8)
+ {
+ D_ENCRYPT(l,r,i+0); /* 1 */
+ D_ENCRYPT(r,l,i+2); /* 2 */
+ D_ENCRYPT(l,r,i+4); /* 3 */
+ D_ENCRYPT(r,l,i+6); /* 4 */
+ }
+#endif
+ }
+ else
+ {
+#ifdef DES_UNROLL
+ D_ENCRYPT(l,r,30); /* 16 */
+ D_ENCRYPT(r,l,28); /* 15 */
+ D_ENCRYPT(l,r,26); /* 14 */
+ D_ENCRYPT(r,l,24); /* 13 */
+ D_ENCRYPT(l,r,22); /* 12 */
+ D_ENCRYPT(r,l,20); /* 11 */
+ D_ENCRYPT(l,r,18); /* 10 */
+ D_ENCRYPT(r,l,16); /* 9 */
+ D_ENCRYPT(l,r,14); /* 8 */
+ D_ENCRYPT(r,l,12); /* 7 */
+ D_ENCRYPT(l,r,10); /* 6 */
+ D_ENCRYPT(r,l, 8); /* 5 */
+ D_ENCRYPT(l,r, 6); /* 4 */
+ D_ENCRYPT(r,l, 4); /* 3 */
+ D_ENCRYPT(l,r, 2); /* 2 */
+ D_ENCRYPT(r,l, 0); /* 1 */
+#else
+ for (i=30; i>0; i-=8)
+ {
+ D_ENCRYPT(l,r,i-0); /* 16 */
+ D_ENCRYPT(r,l,i-2); /* 15 */
+ D_ENCRYPT(l,r,i-4); /* 14 */
+ D_ENCRYPT(r,l,i-6); /* 13 */
+ }
+#endif
+ }
+
+ /* rotate and clear the top bits on machines with 8byte longs */
+ l=ROTATE(l,3)&0xffffffffL;
+ r=ROTATE(r,3)&0xffffffffL;
+
+ FP(r,l);
+ data[0]=l;
+ data[1]=r;
+ l=r=t=u=0;
+}
+
+void des_encrypt2(DES_LONG *data, des_key_schedule ks, int enc)
+{
+ register DES_LONG l,r,t,u;
+#ifdef DES_PTR
+ register const unsigned char *des_SP=(const unsigned char *)des_SPtrans;
+#endif
+#ifndef DES_UNROLL
+ register int i;
+#endif
+ register DES_LONG *s;
+
+ r=data[0];
+ l=data[1];
+
+ /* Things have been modified so that the initial rotate is
+ * done outside the loop. This required the
+ * des_SPtrans values in sp.h to be rotated 1 bit to the right.
+ * One perl script later and things have a 5% speed up on a sparc2.
+ * Thanks to Richard Outerbridge <71755.204@CompuServe.COM>
+ * for pointing this out. */
+ /* clear the top bits on machines with 8byte longs */
+ r=ROTATE(r,29)&0xffffffffL;
+ l=ROTATE(l,29)&0xffffffffL;
+
+ s=ks->ks.deslong;
+ /* I don't know if it is worth the effort of loop unrolling the
+ * inner loop */
+ if (enc)
+ {
+#ifdef DES_UNROLL
+ D_ENCRYPT(l,r, 0); /* 1 */
+ D_ENCRYPT(r,l, 2); /* 2 */
+ D_ENCRYPT(l,r, 4); /* 3 */
+ D_ENCRYPT(r,l, 6); /* 4 */
+ D_ENCRYPT(l,r, 8); /* 5 */
+ D_ENCRYPT(r,l,10); /* 6 */
+ D_ENCRYPT(l,r,12); /* 7 */
+ D_ENCRYPT(r,l,14); /* 8 */
+ D_ENCRYPT(l,r,16); /* 9 */
+ D_ENCRYPT(r,l,18); /* 10 */
+ D_ENCRYPT(l,r,20); /* 11 */
+ D_ENCRYPT(r,l,22); /* 12 */
+ D_ENCRYPT(l,r,24); /* 13 */
+ D_ENCRYPT(r,l,26); /* 14 */
+ D_ENCRYPT(l,r,28); /* 15 */
+ D_ENCRYPT(r,l,30); /* 16 */
+#else
+ for (i=0; i<32; i+=8)
+ {
+ D_ENCRYPT(l,r,i+0); /* 1 */
+ D_ENCRYPT(r,l,i+2); /* 2 */
+ D_ENCRYPT(l,r,i+4); /* 3 */
+ D_ENCRYPT(r,l,i+6); /* 4 */
+ }
+#endif
+ }
+ else
+ {
+#ifdef DES_UNROLL
+ D_ENCRYPT(l,r,30); /* 16 */
+ D_ENCRYPT(r,l,28); /* 15 */
+ D_ENCRYPT(l,r,26); /* 14 */
+ D_ENCRYPT(r,l,24); /* 13 */
+ D_ENCRYPT(l,r,22); /* 12 */
+ D_ENCRYPT(r,l,20); /* 11 */
+ D_ENCRYPT(l,r,18); /* 10 */
+ D_ENCRYPT(r,l,16); /* 9 */
+ D_ENCRYPT(l,r,14); /* 8 */
+ D_ENCRYPT(r,l,12); /* 7 */
+ D_ENCRYPT(l,r,10); /* 6 */
+ D_ENCRYPT(r,l, 8); /* 5 */
+ D_ENCRYPT(l,r, 6); /* 4 */
+ D_ENCRYPT(r,l, 4); /* 3 */
+ D_ENCRYPT(l,r, 2); /* 2 */
+ D_ENCRYPT(r,l, 0); /* 1 */
+#else
+ for (i=30; i>0; i-=8)
+ {
+ D_ENCRYPT(l,r,i-0); /* 16 */
+ D_ENCRYPT(r,l,i-2); /* 15 */
+ D_ENCRYPT(l,r,i-4); /* 14 */
+ D_ENCRYPT(r,l,i-6); /* 13 */
+ }
+#endif
+ }
+ /* rotate and clear the top bits on machines with 8byte longs */
+ data[0]=ROTATE(l,3)&0xffffffffL;
+ data[1]=ROTATE(r,3)&0xffffffffL;
+ l=r=t=u=0;
+}
+
+void des_encrypt3(DES_LONG *data, des_key_schedule ks1, des_key_schedule ks2,
+ des_key_schedule ks3)
+{
+ register DES_LONG l,r;
+
+ l=data[0];
+ r=data[1];
+ IP(l,r);
+ data[0]=l;
+ data[1]=r;
+ des_encrypt2((DES_LONG *)data,ks1,DES_ENCRYPT);
+ des_encrypt2((DES_LONG *)data,ks2,DES_DECRYPT);
+ des_encrypt2((DES_LONG *)data,ks3,DES_ENCRYPT);
+ l=data[0];
+ r=data[1];
+ FP(r,l);
+ data[0]=l;
+ data[1]=r;
+}
+
+void des_decrypt3(DES_LONG *data, des_key_schedule ks1, des_key_schedule ks2,
+ des_key_schedule ks3)
+{
+ register DES_LONG l,r;
+
+ l=data[0];
+ r=data[1];
+ IP(l,r);
+ data[0]=l;
+ data[1]=r;
+ des_encrypt2((DES_LONG *)data,ks3,DES_DECRYPT);
+ des_encrypt2((DES_LONG *)data,ks2,DES_ENCRYPT);
+ des_encrypt2((DES_LONG *)data,ks1,DES_DECRYPT);
+ l=data[0];
+ r=data[1];
+ FP(r,l);
+ data[0]=l;
+ data[1]=r;
+}
diff --git a/rtems/freebsd/crypto/des/des_locl.h b/rtems/freebsd/crypto/des/des_locl.h
new file mode 100644
index 00000000..fbb400b9
--- /dev/null
+++ b/rtems/freebsd/crypto/des/des_locl.h
@@ -0,0 +1,364 @@
+/* $FreeBSD$ */
+/* $KAME: des_locl.h,v 1.7 2001/09/10 04:03:58 itojun Exp $ */
+
+/* crypto/des/des_locl.h */
+/* Copyright (C) 1995-1997 Eric Young (eay@mincom.oz.au)
+ * All rights reserved.
+ *
+ * This file is part of an SSL implementation written
+ * by Eric Young (eay@mincom.oz.au).
+ * The implementation was written so as to conform with Netscapes SSL
+ * specification. This library and applications are
+ * FREE FOR COMMERCIAL AND NON-COMMERCIAL USE
+ * as long as the following conditions are aheared to.
+ *
+ * Copyright remains Eric Young's, and as such any Copyright notices in
+ * the code are not to be removed. If this code is used in a product,
+ * Eric Young should be given attribution as the author of the parts used.
+ * This can be in the form of a textual message at program startup or
+ * in documentation (online or textual) provided with the package.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by Eric Young (eay@mincom.oz.au)
+ *
+ * THIS SOFTWARE IS PROVIDED BY ERIC YOUNG ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * The licence and distribution terms for any publically available version or
+ * derivative of this code cannot be changed. i.e. this code cannot simply be
+ * copied and put under another distribution licence
+ * [including the GNU Public Licence.]
+ */
+
+#ifndef HEADER_DES_LOCL_H
+#define HEADER_DES_LOCL_H
+
+#include <rtems/freebsd/crypto/des/des.h>
+
+#undef DES_PTR
+
+#ifdef __STDC__
+#undef NOPROTO
+#endif
+
+#define ITERATIONS 16
+#define HALF_ITERATIONS 8
+
+/* used in des_read and des_write */
+#define MAXWRITE (1024*16)
+#define BSIZE (MAXWRITE+4)
+
+#define c2l(c,l) (l =((DES_LONG)(*((c)++))) , \
+ l|=((DES_LONG)(*((c)++)))<< 8L, \
+ l|=((DES_LONG)(*((c)++)))<<16L, \
+ l|=((DES_LONG)(*((c)++)))<<24L)
+
+/* NOTE - c is not incremented as per c2l */
+#define c2ln(c,l1,l2,n) { \
+ c+=n; \
+ l1=l2=0; \
+ switch (n) { \
+ case 8: l2 =((DES_LONG)(*(--(c))))<<24L; \
+ case 7: l2|=((DES_LONG)(*(--(c))))<<16L; \
+ case 6: l2|=((DES_LONG)(*(--(c))))<< 8L; \
+ case 5: l2|=((DES_LONG)(*(--(c)))); \
+ case 4: l1 =((DES_LONG)(*(--(c))))<<24L; \
+ case 3: l1|=((DES_LONG)(*(--(c))))<<16L; \
+ case 2: l1|=((DES_LONG)(*(--(c))))<< 8L; \
+ case 1: l1|=((DES_LONG)(*(--(c)))); \
+ } \
+ }
+
+#define l2c(l,c) (*((c)++)=(unsigned char)(((l) )&0xff), \
+ *((c)++)=(unsigned char)(((l)>> 8L)&0xff), \
+ *((c)++)=(unsigned char)(((l)>>16L)&0xff), \
+ *((c)++)=(unsigned char)(((l)>>24L)&0xff))
+
+/* replacements for htonl and ntohl since I have no idea what to do
+ * when faced with machines with 8 byte longs. */
+#define HDRSIZE 4
+
+#define n2l(c,l) (l =((DES_LONG)(*((c)++)))<<24L, \
+ l|=((DES_LONG)(*((c)++)))<<16L, \
+ l|=((DES_LONG)(*((c)++)))<< 8L, \
+ l|=((DES_LONG)(*((c)++))))
+
+#define l2n(l,c) (*((c)++)=(unsigned char)(((l)>>24L)&0xff), \
+ *((c)++)=(unsigned char)(((l)>>16L)&0xff), \
+ *((c)++)=(unsigned char)(((l)>> 8L)&0xff), \
+ *((c)++)=(unsigned char)(((l) )&0xff))
+
+/* NOTE - c is not incremented as per l2c */
+#define l2cn(l1,l2,c,n) { \
+ c+=n; \
+ switch (n) { \
+ case 8: *(--(c))=(unsigned char)(((l2)>>24L)&0xff); \
+ case 7: *(--(c))=(unsigned char)(((l2)>>16L)&0xff); \
+ case 6: *(--(c))=(unsigned char)(((l2)>> 8L)&0xff); \
+ case 5: *(--(c))=(unsigned char)(((l2) )&0xff); \
+ case 4: *(--(c))=(unsigned char)(((l1)>>24L)&0xff); \
+ case 3: *(--(c))=(unsigned char)(((l1)>>16L)&0xff); \
+ case 2: *(--(c))=(unsigned char)(((l1)>> 8L)&0xff); \
+ case 1: *(--(c))=(unsigned char)(((l1) )&0xff); \
+ } \
+ }
+
+#define ROTATE(a,n) (((a)>>(n))+((a)<<(32-(n))))
+
+#define LOAD_DATA_tmp(a,b,c,d,e,f) LOAD_DATA(a,b,c,d,e,f,g)
+#define LOAD_DATA(R,S,u,t,E0,E1,tmp) \
+ u=R^s[S ]; \
+ t=R^s[S+1]
+
+/* The changes to this macro may help or hinder, depending on the
+ * compiler and the achitecture. gcc2 always seems to do well :-).
+ * Inspired by Dana How <how@isl.stanford.edu>
+ * DO NOT use the alternative version on machines with 8 byte longs.
+ * It does not seem to work on the Alpha, even when DES_LONG is 4
+ * bytes, probably an issue of accessing non-word aligned objects :-( */
+#ifdef DES_PTR
+
+/* It recently occurred to me that 0^0^0^0^0^0^0 == 0, so there
+ * is no reason to not xor all the sub items together. This potentially
+ * saves a register since things can be xored directly into L */
+
+#if defined(DES_RISC1) || defined(DES_RISC2)
+#ifdef DES_RISC1
+#define D_ENCRYPT(LL,R,S) { \
+ unsigned int u1,u2,u3; \
+ LOAD_DATA(R,S,u,t,E0,E1,u1); \
+ u2=(int)u>>8L; \
+ u1=(int)u&0xfc; \
+ u2&=0xfc; \
+ t=ROTATE(t,4); \
+ u>>=16L; \
+ LL^= *(const DES_LONG *)(des_SP +u1); \
+ LL^= *(const DES_LONG *)(des_SP+0x200+u2); \
+ u3=(int)(u>>8L); \
+ u1=(int)u&0xfc; \
+ u3&=0xfc; \
+ LL^= *(const DES_LONG *)(des_SP+0x400+u1); \
+ LL^= *(const DES_LONG *)(des_SP+0x600+u3); \
+ u2=(int)t>>8L; \
+ u1=(int)t&0xfc; \
+ u2&=0xfc; \
+ t>>=16L; \
+ LL^= *(const DES_LONG *)(des_SP+0x100+u1); \
+ LL^= *(const DES_LONG *)(des_SP+0x300+u2); \
+ u3=(int)t>>8L; \
+ u1=(int)t&0xfc; \
+ u3&=0xfc; \
+ LL^= *(const DES_LONG *)(des_SP+0x500+u1); \
+ LL^= *(const DES_LONG *)(des_SP+0x700+u3); }
+#endif /* DES_RISC1 */
+#ifdef DES_RISC2
+#define D_ENCRYPT(LL,R,S) { \
+ unsigned int u1,u2,s1,s2; \
+ LOAD_DATA(R,S,u,t,E0,E1,u1); \
+ u2=(int)u>>8L; \
+ u1=(int)u&0xfc; \
+ u2&=0xfc; \
+ t=ROTATE(t,4); \
+ LL^= *(const DES_LONG *)(des_SP +u1); \
+ LL^= *(const DES_LONG *)(des_SP+0x200+u2); \
+ s1=(int)(u>>16L); \
+ s2=(int)(u>>24L); \
+ s1&=0xfc; \
+ s2&=0xfc; \
+ LL^= *(const DES_LONG *)(des_SP+0x400+s1); \
+ LL^= *(const DES_LONG *)(des_SP+0x600+s2); \
+ u2=(int)t>>8L; \
+ u1=(int)t&0xfc; \
+ u2&=0xfc; \
+ LL^= *(const DES_LONG *)(des_SP+0x100+u1); \
+ LL^= *(const DES_LONG *)(des_SP+0x300+u2); \
+ s1=(int)(t>>16L); \
+ s2=(int)(t>>24L); \
+ s1&=0xfc; \
+ s2&=0xfc; \
+ LL^= *(const DES_LONG *)(des_SP+0x400+s1); \
+ LL^= *(const DES_LONG *)(des_SP+0x600+s2); \
+ u2=(int)t>>8L; \
+ u1=(int)t&0xfc; \
+ u2&=0xfc; \
+ LL^= *(const DES_LONG *)(des_SP+0x100+u1); \
+ LL^= *(const DES_LONG *)(des_SP+0x300+u2); \
+ s1=(int)(t>>16L); \
+ s2=(int)(t>>24L); \
+ s1&=0xfc; \
+ s2&=0xfc; \
+ LL^= *(const DES_LONG *)(des_SP+0x500+s1); \
+ LL^= *(const DES_LONG *)(des_SP+0x700+s2); }
+#endif /* DES_RISC2 */
+#else /* DES_RISC1 || DES_RISC2 */
+#define D_ENCRYPT(LL,R,S) { \
+ LOAD_DATA_tmp(R,S,u,t,E0,E1); \
+ t=ROTATE(t,4); \
+ LL^= \
+ *(const DES_LONG *)(des_SP +((u )&0xfc))^ \
+ *(const DES_LONG *)(des_SP+0x200+((u>> 8L)&0xfc))^ \
+ *(const DES_LONG *)(des_SP+0x400+((u>>16L)&0xfc))^ \
+ *(const DES_LONG *)(des_SP+0x600+((u>>24L)&0xfc))^ \
+ *(const DES_LONG *)(des_SP+0x100+((t )&0xfc))^ \
+ *(const DES_LONG *)(des_SP+0x300+((t>> 8L)&0xfc))^ \
+ *(const DES_LONG *)(des_SP+0x500+((t>>16L)&0xfc))^ \
+ *(const DES_LONG *)(des_SP+0x700+((t>>24L)&0xfc)); }
+#endif /* DES_RISC1 || DES_RISC2 */
+#else /* original version */
+
+#if defined(DES_RISC1) || defined(DES_RISC2)
+#ifdef DES_RISC1
+#define D_ENCRYPT(LL,R,S) {\
+ unsigned int u1,u2,u3; \
+ LOAD_DATA(R,S,u,t,E0,E1,u1); \
+ u>>=2L; \
+ t=ROTATE(t,6); \
+ u2=(int)u>>8L; \
+ u1=(int)u&0x3f; \
+ u2&=0x3f; \
+ u>>=16L; \
+ LL^=des_SPtrans[0][u1]; \
+ LL^=des_SPtrans[2][u2]; \
+ u3=(int)u>>8L; \
+ u1=(int)u&0x3f; \
+ u3&=0x3f; \
+ LL^=des_SPtrans[4][u1]; \
+ LL^=des_SPtrans[6][u3]; \
+ u2=(int)t>>8L; \
+ u1=(int)t&0x3f; \
+ u2&=0x3f; \
+ t>>=16L; \
+ LL^=des_SPtrans[1][u1]; \
+ LL^=des_SPtrans[3][u2]; \
+ u3=(int)t>>8L; \
+ u1=(int)t&0x3f; \
+ u3&=0x3f; \
+ LL^=des_SPtrans[5][u1]; \
+ LL^=des_SPtrans[7][u3]; }
+#endif /* DES_RISC1 */
+#ifdef DES_RISC2
+#define D_ENCRYPT(LL,R,S) {\
+ unsigned int u1,u2,s1,s2; \
+ LOAD_DATA(R,S,u,t,E0,E1,u1); \
+ u>>=2L; \
+ t=ROTATE(t,6); \
+ u2=(int)u>>8L; \
+ u1=(int)u&0x3f; \
+ u2&=0x3f; \
+ LL^=des_SPtrans[0][u1]; \
+ LL^=des_SPtrans[2][u2]; \
+ s1=(int)u>>16L; \
+ s2=(int)u>>24L; \
+ s1&=0x3f; \
+ s2&=0x3f; \
+ LL^=des_SPtrans[4][s1]; \
+ LL^=des_SPtrans[6][s2]; \
+ u2=(int)t>>8L; \
+ u1=(int)t&0x3f; \
+ u2&=0x3f; \
+ LL^=des_SPtrans[1][u1]; \
+ LL^=des_SPtrans[3][u2]; \
+ s1=(int)t>>16; \
+ s2=(int)t>>24L; \
+ s1&=0x3f; \
+ s2&=0x3f; \
+ LL^=des_SPtrans[5][s1]; \
+ LL^=des_SPtrans[7][s2]; }
+#endif /* DES_RISC2 */
+
+#else /* DES_RISC1 || DES_RISC2 */
+
+#define D_ENCRYPT(LL,R,S) {\
+ LOAD_DATA_tmp(R,S,u,t,E0,E1); \
+ t=ROTATE(t,4); \
+ LL^=\
+ des_SPtrans[0][(u>> 2L)&0x3f]^ \
+ des_SPtrans[2][(u>>10L)&0x3f]^ \
+ des_SPtrans[4][(u>>18L)&0x3f]^ \
+ des_SPtrans[6][(u>>26L)&0x3f]^ \
+ des_SPtrans[1][(t>> 2L)&0x3f]^ \
+ des_SPtrans[3][(t>>10L)&0x3f]^ \
+ des_SPtrans[5][(t>>18L)&0x3f]^ \
+ des_SPtrans[7][(t>>26L)&0x3f]; }
+#endif /* DES_RISC1 || DES_RISC2 */
+#endif /* DES_PTR */
+
+ /* IP and FP
+ * The problem is more of a geometric problem that random bit fiddling.
+ 0 1 2 3 4 5 6 7 62 54 46 38 30 22 14 6
+ 8 9 10 11 12 13 14 15 60 52 44 36 28 20 12 4
+ 16 17 18 19 20 21 22 23 58 50 42 34 26 18 10 2
+ 24 25 26 27 28 29 30 31 to 56 48 40 32 24 16 8 0
+
+ 32 33 34 35 36 37 38 39 63 55 47 39 31 23 15 7
+ 40 41 42 43 44 45 46 47 61 53 45 37 29 21 13 5
+ 48 49 50 51 52 53 54 55 59 51 43 35 27 19 11 3
+ 56 57 58 59 60 61 62 63 57 49 41 33 25 17 9 1
+
+ The output has been subject to swaps of the form
+ 0 1 -> 3 1 but the odd and even bits have been put into
+ 2 3 2 0
+ different words. The main trick is to remember that
+ t=((l>>size)^r)&(mask);
+ r^=t;
+ l^=(t<<size);
+ can be used to swap and move bits between words.
+
+ So l = 0 1 2 3 r = 16 17 18 19
+ 4 5 6 7 20 21 22 23
+ 8 9 10 11 24 25 26 27
+ 12 13 14 15 28 29 30 31
+ becomes (for size == 2 and mask == 0x3333)
+ t = 2^16 3^17 -- -- l = 0 1 16 17 r = 2 3 18 19
+ 6^20 7^21 -- -- 4 5 20 21 6 7 22 23
+ 10^24 11^25 -- -- 8 9 24 25 10 11 24 25
+ 14^28 15^29 -- -- 12 13 28 29 14 15 28 29
+
+ Thanks for hints from Richard Outerbridge - he told me IP&FP
+ could be done in 15 xor, 10 shifts and 5 ands.
+ When I finally started to think of the problem in 2D
+ I first got ~42 operations without xors. When I remembered
+ how to use xors :-) I got it to its final state.
+ */
+#define PERM_OP(a,b,t,n,m) ((t)=((((a)>>(n))^(b))&(m)),\
+ (b)^=(t),\
+ (a)^=((t)<<(n)))
+
+#define IP(l,r) \
+ { \
+ register DES_LONG tt; \
+ PERM_OP(r,l,tt, 4,0x0f0f0f0fL); \
+ PERM_OP(l,r,tt,16,0x0000ffffL); \
+ PERM_OP(r,l,tt, 2,0x33333333L); \
+ PERM_OP(l,r,tt, 8,0x00ff00ffL); \
+ PERM_OP(r,l,tt, 1,0x55555555L); \
+ }
+
+#define FP(l,r) \
+ { \
+ register DES_LONG tt; \
+ PERM_OP(l,r,tt, 1,0x55555555L); \
+ PERM_OP(r,l,tt, 8,0x00ff00ffL); \
+ PERM_OP(l,r,tt, 2,0x33333333L); \
+ PERM_OP(r,l,tt,16,0x0000ffffL); \
+ PERM_OP(l,r,tt, 4,0x0f0f0f0fL); \
+ }
+#endif
diff --git a/rtems/freebsd/crypto/des/des_setkey.c b/rtems/freebsd/crypto/des/des_setkey.c
new file mode 100644
index 00000000..d5fa4d73
--- /dev/null
+++ b/rtems/freebsd/crypto/des/des_setkey.c
@@ -0,0 +1,238 @@
+#include <rtems/freebsd/machine/rtems-bsd-config.h>
+
+/* $KAME: des_setkey.c,v 1.7 2001/09/10 04:03:58 itojun Exp $ */
+
+/* crypto/des/set_key.c */
+
+/* Copyright (C) 1995-1996 Eric Young (eay@mincom.oz.au)
+ * All rights reserved.
+ *
+ * This file is part of an SSL implementation written
+ * by Eric Young (eay@mincom.oz.au).
+ * The implementation was written so as to conform with Netscapes SSL
+ * specification. This library and applications are
+ * FREE FOR COMMERCIAL AND NON-COMMERCIAL USE
+ * as long as the following conditions are aheared to.
+ *
+ * Copyright remains Eric Young's, and as such any Copyright notices in
+ * the code are not to be removed. If this code is used in a product,
+ * Eric Young should be given attribution as the author of the parts used.
+ * This can be in the form of a textual message at program startup or
+ * in documentation (online or textual) provided with the package.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by Eric Young (eay@mincom.oz.au)
+ *
+ * THIS SOFTWARE IS PROVIDED BY ERIC YOUNG ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * The licence and distribution terms for any publically available version or
+ * derivative of this code cannot be changed. i.e. this code cannot simply be
+ * copied and put under another distribution licence
+ * [including the GNU Public Licence.]
+ */
+
+/* set_key.c v 1.4 eay 24/9/91
+ * 1.4 Speed up by 400% :-)
+ * 1.3 added register declarations.
+ * 1.2 unrolled make_key_sched a bit more
+ * 1.1 added norm_expand_bits
+ * 1.0 First working version
+ */
+
+#include <rtems/freebsd/sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <rtems/freebsd/sys/param.h>
+#include <rtems/freebsd/sys/systm.h>
+#include <rtems/freebsd/crypto/des/des_locl.h>
+#include <rtems/freebsd/crypto/des/podd.h>
+#include <rtems/freebsd/crypto/des/sk.h>
+
+int des_check_key=0;
+
+void des_set_odd_parity(des_cblock *key)
+{
+ int i;
+
+ for (i=0; i<DES_KEY_SZ; i++)
+ (*key)[i]=odd_parity[(*key)[i]];
+}
+
+int des_check_key_parity(des_cblock *key)
+{
+ int i;
+
+ for (i=0; i<DES_KEY_SZ; i++)
+ {
+ if ((*key)[i] != odd_parity[(*key)[i]])
+ return(0);
+ }
+ return(1);
+}
+
+/* Weak and semi week keys as take from
+ * %A D.W. Davies
+ * %A W.L. Price
+ * %T Security for Computer Networks
+ * %I John Wiley & Sons
+ * %D 1984
+ * Many thanks to smb@ulysses.att.com (Steven Bellovin) for the reference
+ * (and actual cblock values).
+ */
+#define NUM_WEAK_KEY 16
+static des_cblock weak_keys[NUM_WEAK_KEY]={
+ /* weak keys */
+ {0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01},
+ {0xFE,0xFE,0xFE,0xFE,0xFE,0xFE,0xFE,0xFE},
+ {0x1F,0x1F,0x1F,0x1F,0x0E,0x0E,0x0E,0x0E},
+ {0xE0,0xE0,0xE0,0xE0,0xF1,0xF1,0xF1,0xF1},
+ /* semi-weak keys */
+ {0x01,0xFE,0x01,0xFE,0x01,0xFE,0x01,0xFE},
+ {0xFE,0x01,0xFE,0x01,0xFE,0x01,0xFE,0x01},
+ {0x1F,0xE0,0x1F,0xE0,0x0E,0xF1,0x0E,0xF1},
+ {0xE0,0x1F,0xE0,0x1F,0xF1,0x0E,0xF1,0x0E},
+ {0x01,0xE0,0x01,0xE0,0x01,0xF1,0x01,0xF1},
+ {0xE0,0x01,0xE0,0x01,0xF1,0x01,0xF1,0x01},
+ {0x1F,0xFE,0x1F,0xFE,0x0E,0xFE,0x0E,0xFE},
+ {0xFE,0x1F,0xFE,0x1F,0xFE,0x0E,0xFE,0x0E},
+ {0x01,0x1F,0x01,0x1F,0x01,0x0E,0x01,0x0E},
+ {0x1F,0x01,0x1F,0x01,0x0E,0x01,0x0E,0x01},
+ {0xE0,0xFE,0xE0,0xFE,0xF1,0xFE,0xF1,0xFE},
+ {0xFE,0xE0,0xFE,0xE0,0xFE,0xF1,0xFE,0xF1}};
+
+int des_is_weak_key(des_cblock *key)
+{
+ int i;
+
+ for (i=0; i<NUM_WEAK_KEY; i++)
+ /* Added == 0 to comparison, I obviously don't run
+ * this section very often :-(, thanks to
+ * engineering@MorningStar.Com for the fix
+ * eay 93/06/29
+ * Another problem, I was comparing only the first 4
+ * bytes, 97/03/18 */
+ if (memcmp(weak_keys[i],key,sizeof(des_cblock)) == 0) return(1);
+ return(0);
+}
+
+/* NOW DEFINED IN des_local.h
+ * See ecb_encrypt.c for a pseudo description of these macros.
+ * #define PERM_OP(a,b,t,n,m) ((t)=((((a)>>(n))^(b))&(m)),\
+ * (b)^=(t),\
+ * (a)=((a)^((t)<<(n))))
+ */
+
+#define HPERM_OP(a,t,n,m) ((t)=((((a)<<(16-(n)))^(a))&(m)),\
+ (a)=(a)^(t)^(t>>(16-(n))))
+
+int des_set_key(des_cblock *key, des_key_schedule schedule)
+{
+ if (des_check_key)
+ {
+ return des_set_key_checked(key, schedule);
+ }
+ else
+ {
+ des_set_key_unchecked(key, schedule);
+ return 0;
+ }
+}
+
+/* return 0 if key parity is odd (correct),
+ * return -1 if key parity error,
+ * return -2 if illegal weak key.
+ */
+int des_set_key_checked(des_cblock *key, des_key_schedule schedule)
+{
+ if (!des_check_key_parity(key))
+ return(-1);
+ if (des_is_weak_key(key))
+ return(-2);
+ des_set_key_unchecked(key, schedule);
+ return 0;
+}
+
+void des_set_key_unchecked(des_cblock *key, des_key_schedule schedule)
+{
+ static int shifts2[16]={0,0,1,1,1,1,1,1,0,1,1,1,1,1,1,0};
+ register DES_LONG c,d,t,s,t2;
+ register const unsigned char *in;
+ register DES_LONG *k;
+ register int i;
+
+ k = &schedule->ks.deslong[0];
+ in = &(*key)[0];
+
+ c2l(in,c);
+ c2l(in,d);
+
+ /* do PC1 in 47 simple operations :-)
+ * Thanks to John Fletcher (john_fletcher@lccmail.ocf.llnl.gov)
+ * for the inspiration. :-) */
+ PERM_OP (d,c,t,4,0x0f0f0f0fL);
+ HPERM_OP(c,t,-2,0xcccc0000L);
+ HPERM_OP(d,t,-2,0xcccc0000L);
+ PERM_OP (d,c,t,1,0x55555555L);
+ PERM_OP (c,d,t,8,0x00ff00ffL);
+ PERM_OP (d,c,t,1,0x55555555L);
+ d= (((d&0x000000ffL)<<16L)| (d&0x0000ff00L) |
+ ((d&0x00ff0000L)>>16L)|((c&0xf0000000L)>>4L));
+ c&=0x0fffffffL;
+
+ for (i=0; i<ITERATIONS; i++)
+ {
+ if (shifts2[i])
+ { c=((c>>2L)|(c<<26L)); d=((d>>2L)|(d<<26L)); }
+ else
+ { c=((c>>1L)|(c<<27L)); d=((d>>1L)|(d<<27L)); }
+ c&=0x0fffffffL;
+ d&=0x0fffffffL;
+ /* could be a few less shifts but I am to lazy at this
+ * point in time to investigate */
+ s= des_skb[0][ (c )&0x3f ]|
+ des_skb[1][((c>> 6L)&0x03)|((c>> 7L)&0x3c)]|
+ des_skb[2][((c>>13L)&0x0f)|((c>>14L)&0x30)]|
+ des_skb[3][((c>>20L)&0x01)|((c>>21L)&0x06) |
+ ((c>>22L)&0x38)];
+ t= des_skb[4][ (d )&0x3f ]|
+ des_skb[5][((d>> 7L)&0x03)|((d>> 8L)&0x3c)]|
+ des_skb[6][ (d>>15L)&0x3f ]|
+ des_skb[7][((d>>21L)&0x0f)|((d>>22L)&0x30)];
+
+ /* table contained 0213 4657 */
+ t2=((t<<16L)|(s&0x0000ffffL))&0xffffffffL;
+ *(k++)=ROTATE(t2,30)&0xffffffffL;
+
+ t2=((s>>16L)|(t&0xffff0000L));
+ *(k++)=ROTATE(t2,26)&0xffffffffL;
+ }
+}
+
+int des_key_sched(des_cblock *key, des_key_schedule schedule)
+{
+ return(des_set_key(key,schedule));
+}
+
+void des_fixup_key_parity(des_cblock *key)
+{
+ des_set_odd_parity(key);
+}
diff --git a/rtems/freebsd/crypto/des/podd.h b/rtems/freebsd/crypto/des/podd.h
new file mode 100644
index 00000000..0528b9ce
--- /dev/null
+++ b/rtems/freebsd/crypto/des/podd.h
@@ -0,0 +1,67 @@
+/* $FreeBSD$ */
+/* $KAME: podd.h,v 1.4 2001/09/10 04:03:58 itojun Exp $ */
+
+/* crypto/des/podd.h */
+/* Copyright (C) 1995-1996 Eric Young (eay@mincom.oz.au)
+ * All rights reserved.
+ *
+ * This file is part of an SSL implementation written
+ * by Eric Young (eay@mincom.oz.au).
+ * The implementation was written so as to conform with Netscapes SSL
+ * specification. This library and applications are
+ * FREE FOR COMMERCIAL AND NON-COMMERCIAL USE
+ * as long as the following conditions are aheared to.
+ *
+ * Copyright remains Eric Young's, and as such any Copyright notices in
+ * the code are not to be removed. If this code is used in a product,
+ * Eric Young should be given attribution as the author of the parts used.
+ * This can be in the form of a textual message at program startup or
+ * in documentation (online or textual) provided with the package.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by Eric Young (eay@mincom.oz.au)
+ *
+ * THIS SOFTWARE IS PROVIDED BY ERIC YOUNG ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * The licence and distribution terms for any publically available version or
+ * derivative of this code cannot be changed. i.e. this code cannot simply be
+ * copied and put under another distribution licence
+ * [including the GNU Public Licence.]
+ */
+
+static const unsigned char odd_parity[256]={
+ 1, 1, 2, 2, 4, 4, 7, 7, 8, 8, 11, 11, 13, 13, 14, 14,
+ 16, 16, 19, 19, 21, 21, 22, 22, 25, 25, 26, 26, 28, 28, 31, 31,
+ 32, 32, 35, 35, 37, 37, 38, 38, 41, 41, 42, 42, 44, 44, 47, 47,
+ 49, 49, 50, 50, 52, 52, 55, 55, 56, 56, 59, 59, 61, 61, 62, 62,
+ 64, 64, 67, 67, 69, 69, 70, 70, 73, 73, 74, 74, 76, 76, 79, 79,
+ 81, 81, 82, 82, 84, 84, 87, 87, 88, 88, 91, 91, 93, 93, 94, 94,
+ 97, 97, 98, 98,100,100,103,103,104,104,107,107,109,109,110,110,
+112,112,115,115,117,117,118,118,121,121,122,122,124,124,127,127,
+128,128,131,131,133,133,134,134,137,137,138,138,140,140,143,143,
+145,145,146,146,148,148,151,151,152,152,155,155,157,157,158,158,
+161,161,162,162,164,164,167,167,168,168,171,171,173,173,174,174,
+176,176,179,179,181,181,182,182,185,185,186,186,188,188,191,191,
+193,193,194,194,196,196,199,199,200,200,203,203,205,205,206,206,
+208,208,211,211,213,213,214,214,217,217,218,218,220,220,223,223,
+224,224,227,227,229,229,230,230,233,233,234,234,236,236,239,239,
+241,241,242,242,244,244,247,247,248,248,251,251,253,253,254,254};
diff --git a/rtems/freebsd/crypto/des/sk.h b/rtems/freebsd/crypto/des/sk.h
new file mode 100644
index 00000000..d4aa3750
--- /dev/null
+++ b/rtems/freebsd/crypto/des/sk.h
@@ -0,0 +1,196 @@
+/* $FreeBSD$ */
+/* $KAME: sk.h,v 1.4 2001/09/10 04:03:58 itojun Exp $ */
+
+/* crypto/des/sk.h */
+/* Copyright (C) 1995-1996 Eric Young (eay@mincom.oz.au)
+ * All rights reserved.
+ *
+ * This file is part of an SSL implementation written
+ * by Eric Young (eay@mincom.oz.au).
+ * The implementation was written so as to conform with Netscapes SSL
+ * specification. This library and applications are
+ * FREE FOR COMMERCIAL AND NON-COMMERCIAL USE
+ * as long as the following conditions are aheared to.
+ *
+ * Copyright remains Eric Young's, and as such any Copyright notices in
+ * the code are not to be removed. If this code is used in a product,
+ * Eric Young should be given attribution as the author of the parts used.
+ * This can be in the form of a textual message at program startup or
+ * in documentation (online or textual) provided with the package.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by Eric Young (eay@mincom.oz.au)
+ *
+ * THIS SOFTWARE IS PROVIDED BY ERIC YOUNG ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * The licence and distribution terms for any publically available version or
+ * derivative of this code cannot be changed. i.e. this code cannot simply be
+ * copied and put under another distribution licence
+ * [including the GNU Public Licence.]
+ */
+
+static const DES_LONG des_skb[8][64]={
+{
+/* for C bits (numbered as per FIPS 46) 1 2 3 4 5 6 */
+0x00000000L,0x00000010L,0x20000000L,0x20000010L,
+0x00010000L,0x00010010L,0x20010000L,0x20010010L,
+0x00000800L,0x00000810L,0x20000800L,0x20000810L,
+0x00010800L,0x00010810L,0x20010800L,0x20010810L,
+0x00000020L,0x00000030L,0x20000020L,0x20000030L,
+0x00010020L,0x00010030L,0x20010020L,0x20010030L,
+0x00000820L,0x00000830L,0x20000820L,0x20000830L,
+0x00010820L,0x00010830L,0x20010820L,0x20010830L,
+0x00080000L,0x00080010L,0x20080000L,0x20080010L,
+0x00090000L,0x00090010L,0x20090000L,0x20090010L,
+0x00080800L,0x00080810L,0x20080800L,0x20080810L,
+0x00090800L,0x00090810L,0x20090800L,0x20090810L,
+0x00080020L,0x00080030L,0x20080020L,0x20080030L,
+0x00090020L,0x00090030L,0x20090020L,0x20090030L,
+0x00080820L,0x00080830L,0x20080820L,0x20080830L,
+0x00090820L,0x00090830L,0x20090820L,0x20090830L,
+},{
+/* for C bits (numbered as per FIPS 46) 7 8 10 11 12 13 */
+0x00000000L,0x02000000L,0x00002000L,0x02002000L,
+0x00200000L,0x02200000L,0x00202000L,0x02202000L,
+0x00000004L,0x02000004L,0x00002004L,0x02002004L,
+0x00200004L,0x02200004L,0x00202004L,0x02202004L,
+0x00000400L,0x02000400L,0x00002400L,0x02002400L,
+0x00200400L,0x02200400L,0x00202400L,0x02202400L,
+0x00000404L,0x02000404L,0x00002404L,0x02002404L,
+0x00200404L,0x02200404L,0x00202404L,0x02202404L,
+0x10000000L,0x12000000L,0x10002000L,0x12002000L,
+0x10200000L,0x12200000L,0x10202000L,0x12202000L,
+0x10000004L,0x12000004L,0x10002004L,0x12002004L,
+0x10200004L,0x12200004L,0x10202004L,0x12202004L,
+0x10000400L,0x12000400L,0x10002400L,0x12002400L,
+0x10200400L,0x12200400L,0x10202400L,0x12202400L,
+0x10000404L,0x12000404L,0x10002404L,0x12002404L,
+0x10200404L,0x12200404L,0x10202404L,0x12202404L,
+},{
+/* for C bits (numbered as per FIPS 46) 14 15 16 17 19 20 */
+0x00000000L,0x00000001L,0x00040000L,0x00040001L,
+0x01000000L,0x01000001L,0x01040000L,0x01040001L,
+0x00000002L,0x00000003L,0x00040002L,0x00040003L,
+0x01000002L,0x01000003L,0x01040002L,0x01040003L,
+0x00000200L,0x00000201L,0x00040200L,0x00040201L,
+0x01000200L,0x01000201L,0x01040200L,0x01040201L,
+0x00000202L,0x00000203L,0x00040202L,0x00040203L,
+0x01000202L,0x01000203L,0x01040202L,0x01040203L,
+0x08000000L,0x08000001L,0x08040000L,0x08040001L,
+0x09000000L,0x09000001L,0x09040000L,0x09040001L,
+0x08000002L,0x08000003L,0x08040002L,0x08040003L,
+0x09000002L,0x09000003L,0x09040002L,0x09040003L,
+0x08000200L,0x08000201L,0x08040200L,0x08040201L,
+0x09000200L,0x09000201L,0x09040200L,0x09040201L,
+0x08000202L,0x08000203L,0x08040202L,0x08040203L,
+0x09000202L,0x09000203L,0x09040202L,0x09040203L,
+},{
+/* for C bits (numbered as per FIPS 46) 21 23 24 26 27 28 */
+0x00000000L,0x00100000L,0x00000100L,0x00100100L,
+0x00000008L,0x00100008L,0x00000108L,0x00100108L,
+0x00001000L,0x00101000L,0x00001100L,0x00101100L,
+0x00001008L,0x00101008L,0x00001108L,0x00101108L,
+0x04000000L,0x04100000L,0x04000100L,0x04100100L,
+0x04000008L,0x04100008L,0x04000108L,0x04100108L,
+0x04001000L,0x04101000L,0x04001100L,0x04101100L,
+0x04001008L,0x04101008L,0x04001108L,0x04101108L,
+0x00020000L,0x00120000L,0x00020100L,0x00120100L,
+0x00020008L,0x00120008L,0x00020108L,0x00120108L,
+0x00021000L,0x00121000L,0x00021100L,0x00121100L,
+0x00021008L,0x00121008L,0x00021108L,0x00121108L,
+0x04020000L,0x04120000L,0x04020100L,0x04120100L,
+0x04020008L,0x04120008L,0x04020108L,0x04120108L,
+0x04021000L,0x04121000L,0x04021100L,0x04121100L,
+0x04021008L,0x04121008L,0x04021108L,0x04121108L,
+},{
+/* for D bits (numbered as per FIPS 46) 1 2 3 4 5 6 */
+0x00000000L,0x10000000L,0x00010000L,0x10010000L,
+0x00000004L,0x10000004L,0x00010004L,0x10010004L,
+0x20000000L,0x30000000L,0x20010000L,0x30010000L,
+0x20000004L,0x30000004L,0x20010004L,0x30010004L,
+0x00100000L,0x10100000L,0x00110000L,0x10110000L,
+0x00100004L,0x10100004L,0x00110004L,0x10110004L,
+0x20100000L,0x30100000L,0x20110000L,0x30110000L,
+0x20100004L,0x30100004L,0x20110004L,0x30110004L,
+0x00001000L,0x10001000L,0x00011000L,0x10011000L,
+0x00001004L,0x10001004L,0x00011004L,0x10011004L,
+0x20001000L,0x30001000L,0x20011000L,0x30011000L,
+0x20001004L,0x30001004L,0x20011004L,0x30011004L,
+0x00101000L,0x10101000L,0x00111000L,0x10111000L,
+0x00101004L,0x10101004L,0x00111004L,0x10111004L,
+0x20101000L,0x30101000L,0x20111000L,0x30111000L,
+0x20101004L,0x30101004L,0x20111004L,0x30111004L,
+},{
+/* for D bits (numbered as per FIPS 46) 8 9 11 12 13 14 */
+0x00000000L,0x08000000L,0x00000008L,0x08000008L,
+0x00000400L,0x08000400L,0x00000408L,0x08000408L,
+0x00020000L,0x08020000L,0x00020008L,0x08020008L,
+0x00020400L,0x08020400L,0x00020408L,0x08020408L,
+0x00000001L,0x08000001L,0x00000009L,0x08000009L,
+0x00000401L,0x08000401L,0x00000409L,0x08000409L,
+0x00020001L,0x08020001L,0x00020009L,0x08020009L,
+0x00020401L,0x08020401L,0x00020409L,0x08020409L,
+0x02000000L,0x0A000000L,0x02000008L,0x0A000008L,
+0x02000400L,0x0A000400L,0x02000408L,0x0A000408L,
+0x02020000L,0x0A020000L,0x02020008L,0x0A020008L,
+0x02020400L,0x0A020400L,0x02020408L,0x0A020408L,
+0x02000001L,0x0A000001L,0x02000009L,0x0A000009L,
+0x02000401L,0x0A000401L,0x02000409L,0x0A000409L,
+0x02020001L,0x0A020001L,0x02020009L,0x0A020009L,
+0x02020401L,0x0A020401L,0x02020409L,0x0A020409L,
+},{
+/* for D bits (numbered as per FIPS 46) 16 17 18 19 20 21 */
+0x00000000L,0x00000100L,0x00080000L,0x00080100L,
+0x01000000L,0x01000100L,0x01080000L,0x01080100L,
+0x00000010L,0x00000110L,0x00080010L,0x00080110L,
+0x01000010L,0x01000110L,0x01080010L,0x01080110L,
+0x00200000L,0x00200100L,0x00280000L,0x00280100L,
+0x01200000L,0x01200100L,0x01280000L,0x01280100L,
+0x00200010L,0x00200110L,0x00280010L,0x00280110L,
+0x01200010L,0x01200110L,0x01280010L,0x01280110L,
+0x00000200L,0x00000300L,0x00080200L,0x00080300L,
+0x01000200L,0x01000300L,0x01080200L,0x01080300L,
+0x00000210L,0x00000310L,0x00080210L,0x00080310L,
+0x01000210L,0x01000310L,0x01080210L,0x01080310L,
+0x00200200L,0x00200300L,0x00280200L,0x00280300L,
+0x01200200L,0x01200300L,0x01280200L,0x01280300L,
+0x00200210L,0x00200310L,0x00280210L,0x00280310L,
+0x01200210L,0x01200310L,0x01280210L,0x01280310L,
+},{
+/* for D bits (numbered as per FIPS 46) 22 23 24 25 27 28 */
+0x00000000L,0x04000000L,0x00040000L,0x04040000L,
+0x00000002L,0x04000002L,0x00040002L,0x04040002L,
+0x00002000L,0x04002000L,0x00042000L,0x04042000L,
+0x00002002L,0x04002002L,0x00042002L,0x04042002L,
+0x00000020L,0x04000020L,0x00040020L,0x04040020L,
+0x00000022L,0x04000022L,0x00040022L,0x04040022L,
+0x00002020L,0x04002020L,0x00042020L,0x04042020L,
+0x00002022L,0x04002022L,0x00042022L,0x04042022L,
+0x00000800L,0x04000800L,0x00040800L,0x04040800L,
+0x00000802L,0x04000802L,0x00040802L,0x04040802L,
+0x00002800L,0x04002800L,0x00042800L,0x04042800L,
+0x00002802L,0x04002802L,0x00042802L,0x04042802L,
+0x00000820L,0x04000820L,0x00040820L,0x04040820L,
+0x00000822L,0x04000822L,0x00040822L,0x04040822L,
+0x00002820L,0x04002820L,0x00042820L,0x04042820L,
+0x00002822L,0x04002822L,0x00042822L,0x04042822L,
+}};
diff --git a/rtems/freebsd/crypto/des/spr.h b/rtems/freebsd/crypto/des/spr.h
new file mode 100644
index 00000000..129b8277
--- /dev/null
+++ b/rtems/freebsd/crypto/des/spr.h
@@ -0,0 +1,207 @@
+/* $FreeBSD$ */
+/* $KAME: spr.h,v 1.4 2001/09/10 04:03:58 itojun Exp $ */
+
+/* crypto/des/spr.h */
+/* Copyright (C) 1995-1998 Eric Young (eay@cryptsoft.com)
+ * All rights reserved.
+ *
+ * This package is an SSL implementation written
+ * by Eric Young (eay@cryptsoft.com).
+ * The implementation was written so as to conform with Netscapes SSL.
+ *
+ * This library is free for commercial and non-commercial use as long as
+ * the following conditions are aheared to. The following conditions
+ * apply to all code found in this distribution, be it the RC4, RSA,
+ * lhash, DES, etc., code; not just the SSL code. The SSL documentation
+ * included with this distribution is covered by the same copyright terms
+ * except that the holder is Tim Hudson (tjh@cryptsoft.com).
+ *
+ * Copyright remains Eric Young's, and as such any Copyright notices in
+ * the code are not to be removed.
+ * If this package is used in a product, Eric Young should be given attribution
+ * as the author of the parts of the library used.
+ * This can be in the form of a textual message at program startup or
+ * in documentation (online or textual) provided with the package.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * "This product includes cryptographic software written by
+ * Eric Young (eay@cryptsoft.com)"
+ * The word 'cryptographic' can be left out if the rouines from the library
+ * being used are not cryptographic related :-).
+ * 4. If you include any Windows specific code (or a derivative thereof) from
+ * the apps directory (application code) you must include an acknowledgement:
+ * "This product includes software written by Tim Hudson (tjh@cryptsoft.com)"
+ *
+ * THIS SOFTWARE IS PROVIDED BY ERIC YOUNG ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * The licence and distribution terms for any publically available version or
+ * derivative of this code cannot be changed. i.e. this code cannot simply be
+ * copied and put under another distribution licence
+ * [including the GNU Public Licence.]
+ */
+
+const DES_LONG des_SPtrans[8][64]={
+{
+/* nibble 0 */
+0x02080800L, 0x00080000L, 0x02000002L, 0x02080802L,
+0x02000000L, 0x00080802L, 0x00080002L, 0x02000002L,
+0x00080802L, 0x02080800L, 0x02080000L, 0x00000802L,
+0x02000802L, 0x02000000L, 0x00000000L, 0x00080002L,
+0x00080000L, 0x00000002L, 0x02000800L, 0x00080800L,
+0x02080802L, 0x02080000L, 0x00000802L, 0x02000800L,
+0x00000002L, 0x00000800L, 0x00080800L, 0x02080002L,
+0x00000800L, 0x02000802L, 0x02080002L, 0x00000000L,
+0x00000000L, 0x02080802L, 0x02000800L, 0x00080002L,
+0x02080800L, 0x00080000L, 0x00000802L, 0x02000800L,
+0x02080002L, 0x00000800L, 0x00080800L, 0x02000002L,
+0x00080802L, 0x00000002L, 0x02000002L, 0x02080000L,
+0x02080802L, 0x00080800L, 0x02080000L, 0x02000802L,
+0x02000000L, 0x00000802L, 0x00080002L, 0x00000000L,
+0x00080000L, 0x02000000L, 0x02000802L, 0x02080800L,
+0x00000002L, 0x02080002L, 0x00000800L, 0x00080802L,
+},{
+/* nibble 1 */
+0x40108010L, 0x00000000L, 0x00108000L, 0x40100000L,
+0x40000010L, 0x00008010L, 0x40008000L, 0x00108000L,
+0x00008000L, 0x40100010L, 0x00000010L, 0x40008000L,
+0x00100010L, 0x40108000L, 0x40100000L, 0x00000010L,
+0x00100000L, 0x40008010L, 0x40100010L, 0x00008000L,
+0x00108010L, 0x40000000L, 0x00000000L, 0x00100010L,
+0x40008010L, 0x00108010L, 0x40108000L, 0x40000010L,
+0x40000000L, 0x00100000L, 0x00008010L, 0x40108010L,
+0x00100010L, 0x40108000L, 0x40008000L, 0x00108010L,
+0x40108010L, 0x00100010L, 0x40000010L, 0x00000000L,
+0x40000000L, 0x00008010L, 0x00100000L, 0x40100010L,
+0x00008000L, 0x40000000L, 0x00108010L, 0x40008010L,
+0x40108000L, 0x00008000L, 0x00000000L, 0x40000010L,
+0x00000010L, 0x40108010L, 0x00108000L, 0x40100000L,
+0x40100010L, 0x00100000L, 0x00008010L, 0x40008000L,
+0x40008010L, 0x00000010L, 0x40100000L, 0x00108000L,
+},{
+/* nibble 2 */
+0x04000001L, 0x04040100L, 0x00000100L, 0x04000101L,
+0x00040001L, 0x04000000L, 0x04000101L, 0x00040100L,
+0x04000100L, 0x00040000L, 0x04040000L, 0x00000001L,
+0x04040101L, 0x00000101L, 0x00000001L, 0x04040001L,
+0x00000000L, 0x00040001L, 0x04040100L, 0x00000100L,
+0x00000101L, 0x04040101L, 0x00040000L, 0x04000001L,
+0x04040001L, 0x04000100L, 0x00040101L, 0x04040000L,
+0x00040100L, 0x00000000L, 0x04000000L, 0x00040101L,
+0x04040100L, 0x00000100L, 0x00000001L, 0x00040000L,
+0x00000101L, 0x00040001L, 0x04040000L, 0x04000101L,
+0x00000000L, 0x04040100L, 0x00040100L, 0x04040001L,
+0x00040001L, 0x04000000L, 0x04040101L, 0x00000001L,
+0x00040101L, 0x04000001L, 0x04000000L, 0x04040101L,
+0x00040000L, 0x04000100L, 0x04000101L, 0x00040100L,
+0x04000100L, 0x00000000L, 0x04040001L, 0x00000101L,
+0x04000001L, 0x00040101L, 0x00000100L, 0x04040000L,
+},{
+/* nibble 3 */
+0x00401008L, 0x10001000L, 0x00000008L, 0x10401008L,
+0x00000000L, 0x10400000L, 0x10001008L, 0x00400008L,
+0x10401000L, 0x10000008L, 0x10000000L, 0x00001008L,
+0x10000008L, 0x00401008L, 0x00400000L, 0x10000000L,
+0x10400008L, 0x00401000L, 0x00001000L, 0x00000008L,
+0x00401000L, 0x10001008L, 0x10400000L, 0x00001000L,
+0x00001008L, 0x00000000L, 0x00400008L, 0x10401000L,
+0x10001000L, 0x10400008L, 0x10401008L, 0x00400000L,
+0x10400008L, 0x00001008L, 0x00400000L, 0x10000008L,
+0x00401000L, 0x10001000L, 0x00000008L, 0x10400000L,
+0x10001008L, 0x00000000L, 0x00001000L, 0x00400008L,
+0x00000000L, 0x10400008L, 0x10401000L, 0x00001000L,
+0x10000000L, 0x10401008L, 0x00401008L, 0x00400000L,
+0x10401008L, 0x00000008L, 0x10001000L, 0x00401008L,
+0x00400008L, 0x00401000L, 0x10400000L, 0x10001008L,
+0x00001008L, 0x10000000L, 0x10000008L, 0x10401000L,
+},{
+/* nibble 4 */
+0x08000000L, 0x00010000L, 0x00000400L, 0x08010420L,
+0x08010020L, 0x08000400L, 0x00010420L, 0x08010000L,
+0x00010000L, 0x00000020L, 0x08000020L, 0x00010400L,
+0x08000420L, 0x08010020L, 0x08010400L, 0x00000000L,
+0x00010400L, 0x08000000L, 0x00010020L, 0x00000420L,
+0x08000400L, 0x00010420L, 0x00000000L, 0x08000020L,
+0x00000020L, 0x08000420L, 0x08010420L, 0x00010020L,
+0x08010000L, 0x00000400L, 0x00000420L, 0x08010400L,
+0x08010400L, 0x08000420L, 0x00010020L, 0x08010000L,
+0x00010000L, 0x00000020L, 0x08000020L, 0x08000400L,
+0x08000000L, 0x00010400L, 0x08010420L, 0x00000000L,
+0x00010420L, 0x08000000L, 0x00000400L, 0x00010020L,
+0x08000420L, 0x00000400L, 0x00000000L, 0x08010420L,
+0x08010020L, 0x08010400L, 0x00000420L, 0x00010000L,
+0x00010400L, 0x08010020L, 0x08000400L, 0x00000420L,
+0x00000020L, 0x00010420L, 0x08010000L, 0x08000020L,
+},{
+/* nibble 5 */
+0x80000040L, 0x00200040L, 0x00000000L, 0x80202000L,
+0x00200040L, 0x00002000L, 0x80002040L, 0x00200000L,
+0x00002040L, 0x80202040L, 0x00202000L, 0x80000000L,
+0x80002000L, 0x80000040L, 0x80200000L, 0x00202040L,
+0x00200000L, 0x80002040L, 0x80200040L, 0x00000000L,
+0x00002000L, 0x00000040L, 0x80202000L, 0x80200040L,
+0x80202040L, 0x80200000L, 0x80000000L, 0x00002040L,
+0x00000040L, 0x00202000L, 0x00202040L, 0x80002000L,
+0x00002040L, 0x80000000L, 0x80002000L, 0x00202040L,
+0x80202000L, 0x00200040L, 0x00000000L, 0x80002000L,
+0x80000000L, 0x00002000L, 0x80200040L, 0x00200000L,
+0x00200040L, 0x80202040L, 0x00202000L, 0x00000040L,
+0x80202040L, 0x00202000L, 0x00200000L, 0x80002040L,
+0x80000040L, 0x80200000L, 0x00202040L, 0x00000000L,
+0x00002000L, 0x80000040L, 0x80002040L, 0x80202000L,
+0x80200000L, 0x00002040L, 0x00000040L, 0x80200040L,
+},{
+/* nibble 6 */
+0x00004000L, 0x00000200L, 0x01000200L, 0x01000004L,
+0x01004204L, 0x00004004L, 0x00004200L, 0x00000000L,
+0x01000000L, 0x01000204L, 0x00000204L, 0x01004000L,
+0x00000004L, 0x01004200L, 0x01004000L, 0x00000204L,
+0x01000204L, 0x00004000L, 0x00004004L, 0x01004204L,
+0x00000000L, 0x01000200L, 0x01000004L, 0x00004200L,
+0x01004004L, 0x00004204L, 0x01004200L, 0x00000004L,
+0x00004204L, 0x01004004L, 0x00000200L, 0x01000000L,
+0x00004204L, 0x01004000L, 0x01004004L, 0x00000204L,
+0x00004000L, 0x00000200L, 0x01000000L, 0x01004004L,
+0x01000204L, 0x00004204L, 0x00004200L, 0x00000000L,
+0x00000200L, 0x01000004L, 0x00000004L, 0x01000200L,
+0x00000000L, 0x01000204L, 0x01000200L, 0x00004200L,
+0x00000204L, 0x00004000L, 0x01004204L, 0x01000000L,
+0x01004200L, 0x00000004L, 0x00004004L, 0x01004204L,
+0x01000004L, 0x01004200L, 0x01004000L, 0x00004004L,
+},{
+/* nibble 7 */
+0x20800080L, 0x20820000L, 0x00020080L, 0x00000000L,
+0x20020000L, 0x00800080L, 0x20800000L, 0x20820080L,
+0x00000080L, 0x20000000L, 0x00820000L, 0x00020080L,
+0x00820080L, 0x20020080L, 0x20000080L, 0x20800000L,
+0x00020000L, 0x00820080L, 0x00800080L, 0x20020000L,
+0x20820080L, 0x20000080L, 0x00000000L, 0x00820000L,
+0x20000000L, 0x00800000L, 0x20020080L, 0x20800080L,
+0x00800000L, 0x00020000L, 0x20820000L, 0x00000080L,
+0x00800000L, 0x00020000L, 0x20000080L, 0x20820080L,
+0x00020080L, 0x20000000L, 0x00000000L, 0x00820000L,
+0x20800080L, 0x20020080L, 0x20020000L, 0x00800080L,
+0x20820000L, 0x00000080L, 0x00800080L, 0x20020000L,
+0x20820080L, 0x00800000L, 0x20800000L, 0x20000080L,
+0x00820000L, 0x00020080L, 0x20020080L, 0x20800000L,
+0x00000080L, 0x20820000L, 0x00820080L, 0x00000000L,
+0x20000000L, 0x20800080L, 0x00020000L, 0x00820080L,
+}};
diff --git a/rtems/freebsd/crypto/rc4/rc4.c b/rtems/freebsd/crypto/rc4/rc4.c
new file mode 100644
index 00000000..16f16d61
--- /dev/null
+++ b/rtems/freebsd/crypto/rc4/rc4.c
@@ -0,0 +1,130 @@
+#include <rtems/freebsd/machine/rtems-bsd-config.h>
+
+/*
+ * rc4.c
+ *
+ * Copyright (c) 1996-2000 Whistle Communications, Inc.
+ * All rights reserved.
+ *
+ * Subject to the following obligations and disclaimer of warranty, use and
+ * redistribution of this software, in source or object code forms, with or
+ * without modifications are expressly permitted by Whistle Communications;
+ * provided, however, that:
+ * 1. Any and all reproductions of the source or object code must include the
+ * copyright notice above and the following disclaimer of warranties; and
+ * 2. No rights are granted, in any manner or form, to use Whistle
+ * Communications, Inc. trademarks, including the mark "WHISTLE
+ * COMMUNICATIONS" on advertising, endorsements, or otherwise except as
+ * such appears in the above copyright notice or in the software.
+ *
+ * THIS SOFTWARE IS BEING PROVIDED BY WHISTLE COMMUNICATIONS "AS IS", AND
+ * TO THE MAXIMUM EXTENT PERMITTED BY LAW, WHISTLE COMMUNICATIONS MAKES NO
+ * REPRESENTATIONS OR WARRANTIES, EXPRESS OR IMPLIED, REGARDING THIS SOFTWARE,
+ * INCLUDING WITHOUT LIMITATION, ANY AND ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT.
+ * WHISTLE COMMUNICATIONS DOES NOT WARRANT, GUARANTEE, OR MAKE ANY
+ * REPRESENTATIONS REGARDING THE USE OF, OR THE RESULTS OF THE USE OF THIS
+ * SOFTWARE IN TERMS OF ITS CORRECTNESS, ACCURACY, RELIABILITY OR OTHERWISE.
+ * IN NO EVENT SHALL WHISTLE COMMUNICATIONS BE LIABLE FOR ANY DAMAGES
+ * RESULTING FROM OR ARISING OUT OF ANY USE OF THIS SOFTWARE, INCLUDING
+ * WITHOUT LIMITATION, ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
+ * PUNITIVE, OR CONSEQUENTIAL DAMAGES, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES, LOSS OF USE, DATA OR PROFITS, HOWEVER CAUSED AND UNDER ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF WHISTLE COMMUNICATIONS IS ADVISED OF THE POSSIBILITY
+ * OF SUCH DAMAGE.
+ */
+
+#include <rtems/freebsd/sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <rtems/freebsd/sys/param.h>
+#include <rtems/freebsd/sys/kernel.h>
+#include <rtems/freebsd/sys/module.h>
+#include <rtems/freebsd/sys/types.h>
+#include <rtems/freebsd/crypto/rc4/rc4.h>
+
+static __inline void
+swap_bytes(u_char *a, u_char *b)
+{
+ u_char temp;
+
+ temp = *a;
+ *a = *b;
+ *b = temp;
+}
+
+/*
+ * Initialize an RC4 state buffer using the supplied key,
+ * which can have arbitrary length.
+ */
+void
+rc4_init(struct rc4_state *const state, const u_char *key, int keylen)
+{
+ u_char j;
+ int i, k;
+
+ /* Initialize state with identity permutation */
+ for (i = 0; i < 256; i++)
+ state->perm[i] = (u_char)i;
+ state->index1 = 0;
+ state->index2 = 0;
+
+ /* Randomize the permutation using key data */
+ for (j = i = k = 0; i < 256; i++) {
+ j += state->perm[i] + key[k];
+ swap_bytes(&state->perm[i], &state->perm[j]);
+ if (++k >= keylen)
+ k = 0;
+ }
+}
+
+/*
+ * Encrypt some data using the supplied RC4 state buffer.
+ * The input and output buffers may be the same buffer.
+ * Since RC4 is a stream cypher, this function is used
+ * for both encryption and decryption.
+ */
+void
+rc4_crypt(struct rc4_state *const state,
+ const u_char *inbuf, u_char *outbuf, int buflen)
+{
+ int i;
+ u_char j;
+
+ for (i = 0; i < buflen; i++) {
+
+ /* Update modification indicies */
+ state->index1++;
+ state->index2 += state->perm[state->index1];
+
+ /* Modify permutation */
+ swap_bytes(&state->perm[state->index1],
+ &state->perm[state->index2]);
+
+ /* Encrypt/decrypt next byte */
+ j = state->perm[state->index1] + state->perm[state->index2];
+ outbuf[i] = inbuf[i] ^ state->perm[j];
+ }
+}
+
+static int
+rc4_modevent(module_t mod, int type, void *unused)
+{
+ switch (type) {
+ case MOD_LOAD:
+ return 0;
+ case MOD_UNLOAD:
+ return 0;
+ }
+ return EINVAL;
+}
+
+static moduledata_t rc4_mod = {
+ "rc4",
+ rc4_modevent,
+ 0
+};
+DECLARE_MODULE(rc4, rc4_mod, SI_SUB_DRIVERS, SI_ORDER_FIRST);
+MODULE_VERSION(rc4, 1);
diff --git a/rtems/freebsd/crypto/rc4/rc4.h b/rtems/freebsd/crypto/rc4/rc4.h
new file mode 100644
index 00000000..90723ddb
--- /dev/null
+++ b/rtems/freebsd/crypto/rc4/rc4.h
@@ -0,0 +1,54 @@
+
+/*
+ * rc4.h
+ *
+ * Copyright (c) 1996-2000 Whistle Communications, Inc.
+ * All rights reserved.
+ *
+ * Subject to the following obligations and disclaimer of warranty, use and
+ * redistribution of this software, in source or object code forms, with or
+ * without modifications are expressly permitted by Whistle Communications;
+ * provided, however, that:
+ * 1. Any and all reproductions of the source or object code must include the
+ * copyright notice above and the following disclaimer of warranties; and
+ * 2. No rights are granted, in any manner or form, to use Whistle
+ * Communications, Inc. trademarks, including the mark "WHISTLE
+ * COMMUNICATIONS" on advertising, endorsements, or otherwise except as
+ * such appears in the above copyright notice or in the software.
+ *
+ * THIS SOFTWARE IS BEING PROVIDED BY WHISTLE COMMUNICATIONS "AS IS", AND
+ * TO THE MAXIMUM EXTENT PERMITTED BY LAW, WHISTLE COMMUNICATIONS MAKES NO
+ * REPRESENTATIONS OR WARRANTIES, EXPRESS OR IMPLIED, REGARDING THIS SOFTWARE,
+ * INCLUDING WITHOUT LIMITATION, ANY AND ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT.
+ * WHISTLE COMMUNICATIONS DOES NOT WARRANT, GUARANTEE, OR MAKE ANY
+ * REPRESENTATIONS REGARDING THE USE OF, OR THE RESULTS OF THE USE OF THIS
+ * SOFTWARE IN TERMS OF ITS CORRECTNESS, ACCURACY, RELIABILITY OR OTHERWISE.
+ * IN NO EVENT SHALL WHISTLE COMMUNICATIONS BE LIABLE FOR ANY DAMAGES
+ * RESULTING FROM OR ARISING OUT OF ANY USE OF THIS SOFTWARE, INCLUDING
+ * WITHOUT LIMITATION, ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
+ * PUNITIVE, OR CONSEQUENTIAL DAMAGES, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES, LOSS OF USE, DATA OR PROFITS, HOWEVER CAUSED AND UNDER ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF WHISTLE COMMUNICATIONS IS ADVISED OF THE POSSIBILITY
+ * OF SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _SYS_CRYPTO_RC4_RC4_HH_
+#define _SYS_CRYPTO_RC4_RC4_HH_
+
+struct rc4_state {
+ u_char perm[256];
+ u_char index1;
+ u_char index2;
+};
+
+extern void rc4_init(struct rc4_state *state, const u_char *key, int keylen);
+extern void rc4_crypt(struct rc4_state *state,
+ const u_char *inbuf, u_char *outbuf, int buflen);
+
+#endif
+
diff --git a/rtems/freebsd/crypto/rijndael/rijndael-alg-fst.c b/rtems/freebsd/crypto/rijndael/rijndael-alg-fst.c
new file mode 100644
index 00000000..daeedee4
--- /dev/null
+++ b/rtems/freebsd/crypto/rijndael/rijndael-alg-fst.c
@@ -0,0 +1,1225 @@
+#include <rtems/freebsd/machine/rtems-bsd-config.h>
+
+/* $KAME: rijndael-alg-fst.c,v 1.10 2003/07/15 10:47:16 itojun Exp $ */
+/**
+ * rijndael-alg-fst.c
+ *
+ * @version 3.0 (December 2000)
+ *
+ * Optimised ANSI C code for the Rijndael cipher (now AES)
+ *
+ * @author Vincent Rijmen <vincent.rijmen@esat.kuleuven.ac.be>
+ * @author Antoon Bosselaers <antoon.bosselaers@esat.kuleuven.ac.be>
+ * @author Paulo Barreto <paulo.barreto@terra.com.br>
+ *
+ * This code is hereby placed in the public domain.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHORS ''AS IS'' AND ANY EXPRESS
+ * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
+ * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#include <rtems/freebsd/sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <rtems/freebsd/sys/cdefs.h>
+#include <rtems/freebsd/sys/types.h>
+#ifdef _KERNEL
+#include <rtems/freebsd/sys/systm.h>
+#else
+#include <rtems/freebsd/string.h>
+#include <rtems/freebsd/assert.h>
+#define KASSERT(exp, msg) assert(exp)
+#endif
+
+#include <rtems/freebsd/crypto/rijndael/rijndael.h>
+#include <rtems/freebsd/crypto/rijndael/rijndael_local.h>
+
+/*
+Te0[x] = S [x].[02, 01, 01, 03];
+Te1[x] = S [x].[03, 02, 01, 01];
+Te2[x] = S [x].[01, 03, 02, 01];
+Te3[x] = S [x].[01, 01, 03, 02];
+Te4[x] = S [x].[01, 01, 01, 01];
+
+Td0[x] = Si[x].[0e, 09, 0d, 0b];
+Td1[x] = Si[x].[0b, 0e, 09, 0d];
+Td2[x] = Si[x].[0d, 0b, 0e, 09];
+Td3[x] = Si[x].[09, 0d, 0b, 0e];
+Td4[x] = Si[x].[01, 01, 01, 01];
+*/
+
+static const u32 Te0[256] = {
+ 0xc66363a5U, 0xf87c7c84U, 0xee777799U, 0xf67b7b8dU,
+ 0xfff2f20dU, 0xd66b6bbdU, 0xde6f6fb1U, 0x91c5c554U,
+ 0x60303050U, 0x02010103U, 0xce6767a9U, 0x562b2b7dU,
+ 0xe7fefe19U, 0xb5d7d762U, 0x4dababe6U, 0xec76769aU,
+ 0x8fcaca45U, 0x1f82829dU, 0x89c9c940U, 0xfa7d7d87U,
+ 0xeffafa15U, 0xb25959ebU, 0x8e4747c9U, 0xfbf0f00bU,
+ 0x41adadecU, 0xb3d4d467U, 0x5fa2a2fdU, 0x45afafeaU,
+ 0x239c9cbfU, 0x53a4a4f7U, 0xe4727296U, 0x9bc0c05bU,
+ 0x75b7b7c2U, 0xe1fdfd1cU, 0x3d9393aeU, 0x4c26266aU,
+ 0x6c36365aU, 0x7e3f3f41U, 0xf5f7f702U, 0x83cccc4fU,
+ 0x6834345cU, 0x51a5a5f4U, 0xd1e5e534U, 0xf9f1f108U,
+ 0xe2717193U, 0xabd8d873U, 0x62313153U, 0x2a15153fU,
+ 0x0804040cU, 0x95c7c752U, 0x46232365U, 0x9dc3c35eU,
+ 0x30181828U, 0x379696a1U, 0x0a05050fU, 0x2f9a9ab5U,
+ 0x0e070709U, 0x24121236U, 0x1b80809bU, 0xdfe2e23dU,
+ 0xcdebeb26U, 0x4e272769U, 0x7fb2b2cdU, 0xea75759fU,
+ 0x1209091bU, 0x1d83839eU, 0x582c2c74U, 0x341a1a2eU,
+ 0x361b1b2dU, 0xdc6e6eb2U, 0xb45a5aeeU, 0x5ba0a0fbU,
+ 0xa45252f6U, 0x763b3b4dU, 0xb7d6d661U, 0x7db3b3ceU,
+ 0x5229297bU, 0xdde3e33eU, 0x5e2f2f71U, 0x13848497U,
+ 0xa65353f5U, 0xb9d1d168U, 0x00000000U, 0xc1eded2cU,
+ 0x40202060U, 0xe3fcfc1fU, 0x79b1b1c8U, 0xb65b5bedU,
+ 0xd46a6abeU, 0x8dcbcb46U, 0x67bebed9U, 0x7239394bU,
+ 0x944a4adeU, 0x984c4cd4U, 0xb05858e8U, 0x85cfcf4aU,
+ 0xbbd0d06bU, 0xc5efef2aU, 0x4faaaae5U, 0xedfbfb16U,
+ 0x864343c5U, 0x9a4d4dd7U, 0x66333355U, 0x11858594U,
+ 0x8a4545cfU, 0xe9f9f910U, 0x04020206U, 0xfe7f7f81U,
+ 0xa05050f0U, 0x783c3c44U, 0x259f9fbaU, 0x4ba8a8e3U,
+ 0xa25151f3U, 0x5da3a3feU, 0x804040c0U, 0x058f8f8aU,
+ 0x3f9292adU, 0x219d9dbcU, 0x70383848U, 0xf1f5f504U,
+ 0x63bcbcdfU, 0x77b6b6c1U, 0xafdada75U, 0x42212163U,
+ 0x20101030U, 0xe5ffff1aU, 0xfdf3f30eU, 0xbfd2d26dU,
+ 0x81cdcd4cU, 0x180c0c14U, 0x26131335U, 0xc3ecec2fU,
+ 0xbe5f5fe1U, 0x359797a2U, 0x884444ccU, 0x2e171739U,
+ 0x93c4c457U, 0x55a7a7f2U, 0xfc7e7e82U, 0x7a3d3d47U,
+ 0xc86464acU, 0xba5d5de7U, 0x3219192bU, 0xe6737395U,
+ 0xc06060a0U, 0x19818198U, 0x9e4f4fd1U, 0xa3dcdc7fU,
+ 0x44222266U, 0x542a2a7eU, 0x3b9090abU, 0x0b888883U,
+ 0x8c4646caU, 0xc7eeee29U, 0x6bb8b8d3U, 0x2814143cU,
+ 0xa7dede79U, 0xbc5e5ee2U, 0x160b0b1dU, 0xaddbdb76U,
+ 0xdbe0e03bU, 0x64323256U, 0x743a3a4eU, 0x140a0a1eU,
+ 0x924949dbU, 0x0c06060aU, 0x4824246cU, 0xb85c5ce4U,
+ 0x9fc2c25dU, 0xbdd3d36eU, 0x43acacefU, 0xc46262a6U,
+ 0x399191a8U, 0x319595a4U, 0xd3e4e437U, 0xf279798bU,
+ 0xd5e7e732U, 0x8bc8c843U, 0x6e373759U, 0xda6d6db7U,
+ 0x018d8d8cU, 0xb1d5d564U, 0x9c4e4ed2U, 0x49a9a9e0U,
+ 0xd86c6cb4U, 0xac5656faU, 0xf3f4f407U, 0xcfeaea25U,
+ 0xca6565afU, 0xf47a7a8eU, 0x47aeaee9U, 0x10080818U,
+ 0x6fbabad5U, 0xf0787888U, 0x4a25256fU, 0x5c2e2e72U,
+ 0x381c1c24U, 0x57a6a6f1U, 0x73b4b4c7U, 0x97c6c651U,
+ 0xcbe8e823U, 0xa1dddd7cU, 0xe874749cU, 0x3e1f1f21U,
+ 0x964b4bddU, 0x61bdbddcU, 0x0d8b8b86U, 0x0f8a8a85U,
+ 0xe0707090U, 0x7c3e3e42U, 0x71b5b5c4U, 0xcc6666aaU,
+ 0x904848d8U, 0x06030305U, 0xf7f6f601U, 0x1c0e0e12U,
+ 0xc26161a3U, 0x6a35355fU, 0xae5757f9U, 0x69b9b9d0U,
+ 0x17868691U, 0x99c1c158U, 0x3a1d1d27U, 0x279e9eb9U,
+ 0xd9e1e138U, 0xebf8f813U, 0x2b9898b3U, 0x22111133U,
+ 0xd26969bbU, 0xa9d9d970U, 0x078e8e89U, 0x339494a7U,
+ 0x2d9b9bb6U, 0x3c1e1e22U, 0x15878792U, 0xc9e9e920U,
+ 0x87cece49U, 0xaa5555ffU, 0x50282878U, 0xa5dfdf7aU,
+ 0x038c8c8fU, 0x59a1a1f8U, 0x09898980U, 0x1a0d0d17U,
+ 0x65bfbfdaU, 0xd7e6e631U, 0x844242c6U, 0xd06868b8U,
+ 0x824141c3U, 0x299999b0U, 0x5a2d2d77U, 0x1e0f0f11U,
+ 0x7bb0b0cbU, 0xa85454fcU, 0x6dbbbbd6U, 0x2c16163aU,
+};
+static const u32 Te1[256] = {
+ 0xa5c66363U, 0x84f87c7cU, 0x99ee7777U, 0x8df67b7bU,
+ 0x0dfff2f2U, 0xbdd66b6bU, 0xb1de6f6fU, 0x5491c5c5U,
+ 0x50603030U, 0x03020101U, 0xa9ce6767U, 0x7d562b2bU,
+ 0x19e7fefeU, 0x62b5d7d7U, 0xe64dababU, 0x9aec7676U,
+ 0x458fcacaU, 0x9d1f8282U, 0x4089c9c9U, 0x87fa7d7dU,
+ 0x15effafaU, 0xebb25959U, 0xc98e4747U, 0x0bfbf0f0U,
+ 0xec41adadU, 0x67b3d4d4U, 0xfd5fa2a2U, 0xea45afafU,
+ 0xbf239c9cU, 0xf753a4a4U, 0x96e47272U, 0x5b9bc0c0U,
+ 0xc275b7b7U, 0x1ce1fdfdU, 0xae3d9393U, 0x6a4c2626U,
+ 0x5a6c3636U, 0x417e3f3fU, 0x02f5f7f7U, 0x4f83ccccU,
+ 0x5c683434U, 0xf451a5a5U, 0x34d1e5e5U, 0x08f9f1f1U,
+ 0x93e27171U, 0x73abd8d8U, 0x53623131U, 0x3f2a1515U,
+ 0x0c080404U, 0x5295c7c7U, 0x65462323U, 0x5e9dc3c3U,
+ 0x28301818U, 0xa1379696U, 0x0f0a0505U, 0xb52f9a9aU,
+ 0x090e0707U, 0x36241212U, 0x9b1b8080U, 0x3ddfe2e2U,
+ 0x26cdebebU, 0x694e2727U, 0xcd7fb2b2U, 0x9fea7575U,
+ 0x1b120909U, 0x9e1d8383U, 0x74582c2cU, 0x2e341a1aU,
+ 0x2d361b1bU, 0xb2dc6e6eU, 0xeeb45a5aU, 0xfb5ba0a0U,
+ 0xf6a45252U, 0x4d763b3bU, 0x61b7d6d6U, 0xce7db3b3U,
+ 0x7b522929U, 0x3edde3e3U, 0x715e2f2fU, 0x97138484U,
+ 0xf5a65353U, 0x68b9d1d1U, 0x00000000U, 0x2cc1ededU,
+ 0x60402020U, 0x1fe3fcfcU, 0xc879b1b1U, 0xedb65b5bU,
+ 0xbed46a6aU, 0x468dcbcbU, 0xd967bebeU, 0x4b723939U,
+ 0xde944a4aU, 0xd4984c4cU, 0xe8b05858U, 0x4a85cfcfU,
+ 0x6bbbd0d0U, 0x2ac5efefU, 0xe54faaaaU, 0x16edfbfbU,
+ 0xc5864343U, 0xd79a4d4dU, 0x55663333U, 0x94118585U,
+ 0xcf8a4545U, 0x10e9f9f9U, 0x06040202U, 0x81fe7f7fU,
+ 0xf0a05050U, 0x44783c3cU, 0xba259f9fU, 0xe34ba8a8U,
+ 0xf3a25151U, 0xfe5da3a3U, 0xc0804040U, 0x8a058f8fU,
+ 0xad3f9292U, 0xbc219d9dU, 0x48703838U, 0x04f1f5f5U,
+ 0xdf63bcbcU, 0xc177b6b6U, 0x75afdadaU, 0x63422121U,
+ 0x30201010U, 0x1ae5ffffU, 0x0efdf3f3U, 0x6dbfd2d2U,
+ 0x4c81cdcdU, 0x14180c0cU, 0x35261313U, 0x2fc3ececU,
+ 0xe1be5f5fU, 0xa2359797U, 0xcc884444U, 0x392e1717U,
+ 0x5793c4c4U, 0xf255a7a7U, 0x82fc7e7eU, 0x477a3d3dU,
+ 0xacc86464U, 0xe7ba5d5dU, 0x2b321919U, 0x95e67373U,
+ 0xa0c06060U, 0x98198181U, 0xd19e4f4fU, 0x7fa3dcdcU,
+ 0x66442222U, 0x7e542a2aU, 0xab3b9090U, 0x830b8888U,
+ 0xca8c4646U, 0x29c7eeeeU, 0xd36bb8b8U, 0x3c281414U,
+ 0x79a7dedeU, 0xe2bc5e5eU, 0x1d160b0bU, 0x76addbdbU,
+ 0x3bdbe0e0U, 0x56643232U, 0x4e743a3aU, 0x1e140a0aU,
+ 0xdb924949U, 0x0a0c0606U, 0x6c482424U, 0xe4b85c5cU,
+ 0x5d9fc2c2U, 0x6ebdd3d3U, 0xef43acacU, 0xa6c46262U,
+ 0xa8399191U, 0xa4319595U, 0x37d3e4e4U, 0x8bf27979U,
+ 0x32d5e7e7U, 0x438bc8c8U, 0x596e3737U, 0xb7da6d6dU,
+ 0x8c018d8dU, 0x64b1d5d5U, 0xd29c4e4eU, 0xe049a9a9U,
+ 0xb4d86c6cU, 0xfaac5656U, 0x07f3f4f4U, 0x25cfeaeaU,
+ 0xafca6565U, 0x8ef47a7aU, 0xe947aeaeU, 0x18100808U,
+ 0xd56fbabaU, 0x88f07878U, 0x6f4a2525U, 0x725c2e2eU,
+ 0x24381c1cU, 0xf157a6a6U, 0xc773b4b4U, 0x5197c6c6U,
+ 0x23cbe8e8U, 0x7ca1ddddU, 0x9ce87474U, 0x213e1f1fU,
+ 0xdd964b4bU, 0xdc61bdbdU, 0x860d8b8bU, 0x850f8a8aU,
+ 0x90e07070U, 0x427c3e3eU, 0xc471b5b5U, 0xaacc6666U,
+ 0xd8904848U, 0x05060303U, 0x01f7f6f6U, 0x121c0e0eU,
+ 0xa3c26161U, 0x5f6a3535U, 0xf9ae5757U, 0xd069b9b9U,
+ 0x91178686U, 0x5899c1c1U, 0x273a1d1dU, 0xb9279e9eU,
+ 0x38d9e1e1U, 0x13ebf8f8U, 0xb32b9898U, 0x33221111U,
+ 0xbbd26969U, 0x70a9d9d9U, 0x89078e8eU, 0xa7339494U,
+ 0xb62d9b9bU, 0x223c1e1eU, 0x92158787U, 0x20c9e9e9U,
+ 0x4987ceceU, 0xffaa5555U, 0x78502828U, 0x7aa5dfdfU,
+ 0x8f038c8cU, 0xf859a1a1U, 0x80098989U, 0x171a0d0dU,
+ 0xda65bfbfU, 0x31d7e6e6U, 0xc6844242U, 0xb8d06868U,
+ 0xc3824141U, 0xb0299999U, 0x775a2d2dU, 0x111e0f0fU,
+ 0xcb7bb0b0U, 0xfca85454U, 0xd66dbbbbU, 0x3a2c1616U,
+};
+static const u32 Te2[256] = {
+ 0x63a5c663U, 0x7c84f87cU, 0x7799ee77U, 0x7b8df67bU,
+ 0xf20dfff2U, 0x6bbdd66bU, 0x6fb1de6fU, 0xc55491c5U,
+ 0x30506030U, 0x01030201U, 0x67a9ce67U, 0x2b7d562bU,
+ 0xfe19e7feU, 0xd762b5d7U, 0xabe64dabU, 0x769aec76U,
+ 0xca458fcaU, 0x829d1f82U, 0xc94089c9U, 0x7d87fa7dU,
+ 0xfa15effaU, 0x59ebb259U, 0x47c98e47U, 0xf00bfbf0U,
+ 0xadec41adU, 0xd467b3d4U, 0xa2fd5fa2U, 0xafea45afU,
+ 0x9cbf239cU, 0xa4f753a4U, 0x7296e472U, 0xc05b9bc0U,
+ 0xb7c275b7U, 0xfd1ce1fdU, 0x93ae3d93U, 0x266a4c26U,
+ 0x365a6c36U, 0x3f417e3fU, 0xf702f5f7U, 0xcc4f83ccU,
+ 0x345c6834U, 0xa5f451a5U, 0xe534d1e5U, 0xf108f9f1U,
+ 0x7193e271U, 0xd873abd8U, 0x31536231U, 0x153f2a15U,
+ 0x040c0804U, 0xc75295c7U, 0x23654623U, 0xc35e9dc3U,
+ 0x18283018U, 0x96a13796U, 0x050f0a05U, 0x9ab52f9aU,
+ 0x07090e07U, 0x12362412U, 0x809b1b80U, 0xe23ddfe2U,
+ 0xeb26cdebU, 0x27694e27U, 0xb2cd7fb2U, 0x759fea75U,
+ 0x091b1209U, 0x839e1d83U, 0x2c74582cU, 0x1a2e341aU,
+ 0x1b2d361bU, 0x6eb2dc6eU, 0x5aeeb45aU, 0xa0fb5ba0U,
+ 0x52f6a452U, 0x3b4d763bU, 0xd661b7d6U, 0xb3ce7db3U,
+ 0x297b5229U, 0xe33edde3U, 0x2f715e2fU, 0x84971384U,
+ 0x53f5a653U, 0xd168b9d1U, 0x00000000U, 0xed2cc1edU,
+ 0x20604020U, 0xfc1fe3fcU, 0xb1c879b1U, 0x5bedb65bU,
+ 0x6abed46aU, 0xcb468dcbU, 0xbed967beU, 0x394b7239U,
+ 0x4ade944aU, 0x4cd4984cU, 0x58e8b058U, 0xcf4a85cfU,
+ 0xd06bbbd0U, 0xef2ac5efU, 0xaae54faaU, 0xfb16edfbU,
+ 0x43c58643U, 0x4dd79a4dU, 0x33556633U, 0x85941185U,
+ 0x45cf8a45U, 0xf910e9f9U, 0x02060402U, 0x7f81fe7fU,
+ 0x50f0a050U, 0x3c44783cU, 0x9fba259fU, 0xa8e34ba8U,
+ 0x51f3a251U, 0xa3fe5da3U, 0x40c08040U, 0x8f8a058fU,
+ 0x92ad3f92U, 0x9dbc219dU, 0x38487038U, 0xf504f1f5U,
+ 0xbcdf63bcU, 0xb6c177b6U, 0xda75afdaU, 0x21634221U,
+ 0x10302010U, 0xff1ae5ffU, 0xf30efdf3U, 0xd26dbfd2U,
+ 0xcd4c81cdU, 0x0c14180cU, 0x13352613U, 0xec2fc3ecU,
+ 0x5fe1be5fU, 0x97a23597U, 0x44cc8844U, 0x17392e17U,
+ 0xc45793c4U, 0xa7f255a7U, 0x7e82fc7eU, 0x3d477a3dU,
+ 0x64acc864U, 0x5de7ba5dU, 0x192b3219U, 0x7395e673U,
+ 0x60a0c060U, 0x81981981U, 0x4fd19e4fU, 0xdc7fa3dcU,
+ 0x22664422U, 0x2a7e542aU, 0x90ab3b90U, 0x88830b88U,
+ 0x46ca8c46U, 0xee29c7eeU, 0xb8d36bb8U, 0x143c2814U,
+ 0xde79a7deU, 0x5ee2bc5eU, 0x0b1d160bU, 0xdb76addbU,
+ 0xe03bdbe0U, 0x32566432U, 0x3a4e743aU, 0x0a1e140aU,
+ 0x49db9249U, 0x060a0c06U, 0x246c4824U, 0x5ce4b85cU,
+ 0xc25d9fc2U, 0xd36ebdd3U, 0xacef43acU, 0x62a6c462U,
+ 0x91a83991U, 0x95a43195U, 0xe437d3e4U, 0x798bf279U,
+ 0xe732d5e7U, 0xc8438bc8U, 0x37596e37U, 0x6db7da6dU,
+ 0x8d8c018dU, 0xd564b1d5U, 0x4ed29c4eU, 0xa9e049a9U,
+ 0x6cb4d86cU, 0x56faac56U, 0xf407f3f4U, 0xea25cfeaU,
+ 0x65afca65U, 0x7a8ef47aU, 0xaee947aeU, 0x08181008U,
+ 0xbad56fbaU, 0x7888f078U, 0x256f4a25U, 0x2e725c2eU,
+ 0x1c24381cU, 0xa6f157a6U, 0xb4c773b4U, 0xc65197c6U,
+ 0xe823cbe8U, 0xdd7ca1ddU, 0x749ce874U, 0x1f213e1fU,
+ 0x4bdd964bU, 0xbddc61bdU, 0x8b860d8bU, 0x8a850f8aU,
+ 0x7090e070U, 0x3e427c3eU, 0xb5c471b5U, 0x66aacc66U,
+ 0x48d89048U, 0x03050603U, 0xf601f7f6U, 0x0e121c0eU,
+ 0x61a3c261U, 0x355f6a35U, 0x57f9ae57U, 0xb9d069b9U,
+ 0x86911786U, 0xc15899c1U, 0x1d273a1dU, 0x9eb9279eU,
+ 0xe138d9e1U, 0xf813ebf8U, 0x98b32b98U, 0x11332211U,
+ 0x69bbd269U, 0xd970a9d9U, 0x8e89078eU, 0x94a73394U,
+ 0x9bb62d9bU, 0x1e223c1eU, 0x87921587U, 0xe920c9e9U,
+ 0xce4987ceU, 0x55ffaa55U, 0x28785028U, 0xdf7aa5dfU,
+ 0x8c8f038cU, 0xa1f859a1U, 0x89800989U, 0x0d171a0dU,
+ 0xbfda65bfU, 0xe631d7e6U, 0x42c68442U, 0x68b8d068U,
+ 0x41c38241U, 0x99b02999U, 0x2d775a2dU, 0x0f111e0fU,
+ 0xb0cb7bb0U, 0x54fca854U, 0xbbd66dbbU, 0x163a2c16U,
+};
+static const u32 Te3[256] = {
+
+ 0x6363a5c6U, 0x7c7c84f8U, 0x777799eeU, 0x7b7b8df6U,
+ 0xf2f20dffU, 0x6b6bbdd6U, 0x6f6fb1deU, 0xc5c55491U,
+ 0x30305060U, 0x01010302U, 0x6767a9ceU, 0x2b2b7d56U,
+ 0xfefe19e7U, 0xd7d762b5U, 0xababe64dU, 0x76769aecU,
+ 0xcaca458fU, 0x82829d1fU, 0xc9c94089U, 0x7d7d87faU,
+ 0xfafa15efU, 0x5959ebb2U, 0x4747c98eU, 0xf0f00bfbU,
+ 0xadadec41U, 0xd4d467b3U, 0xa2a2fd5fU, 0xafafea45U,
+ 0x9c9cbf23U, 0xa4a4f753U, 0x727296e4U, 0xc0c05b9bU,
+ 0xb7b7c275U, 0xfdfd1ce1U, 0x9393ae3dU, 0x26266a4cU,
+ 0x36365a6cU, 0x3f3f417eU, 0xf7f702f5U, 0xcccc4f83U,
+ 0x34345c68U, 0xa5a5f451U, 0xe5e534d1U, 0xf1f108f9U,
+ 0x717193e2U, 0xd8d873abU, 0x31315362U, 0x15153f2aU,
+ 0x04040c08U, 0xc7c75295U, 0x23236546U, 0xc3c35e9dU,
+ 0x18182830U, 0x9696a137U, 0x05050f0aU, 0x9a9ab52fU,
+ 0x0707090eU, 0x12123624U, 0x80809b1bU, 0xe2e23ddfU,
+ 0xebeb26cdU, 0x2727694eU, 0xb2b2cd7fU, 0x75759feaU,
+ 0x09091b12U, 0x83839e1dU, 0x2c2c7458U, 0x1a1a2e34U,
+ 0x1b1b2d36U, 0x6e6eb2dcU, 0x5a5aeeb4U, 0xa0a0fb5bU,
+ 0x5252f6a4U, 0x3b3b4d76U, 0xd6d661b7U, 0xb3b3ce7dU,
+ 0x29297b52U, 0xe3e33eddU, 0x2f2f715eU, 0x84849713U,
+ 0x5353f5a6U, 0xd1d168b9U, 0x00000000U, 0xeded2cc1U,
+ 0x20206040U, 0xfcfc1fe3U, 0xb1b1c879U, 0x5b5bedb6U,
+ 0x6a6abed4U, 0xcbcb468dU, 0xbebed967U, 0x39394b72U,
+ 0x4a4ade94U, 0x4c4cd498U, 0x5858e8b0U, 0xcfcf4a85U,
+ 0xd0d06bbbU, 0xefef2ac5U, 0xaaaae54fU, 0xfbfb16edU,
+ 0x4343c586U, 0x4d4dd79aU, 0x33335566U, 0x85859411U,
+ 0x4545cf8aU, 0xf9f910e9U, 0x02020604U, 0x7f7f81feU,
+ 0x5050f0a0U, 0x3c3c4478U, 0x9f9fba25U, 0xa8a8e34bU,
+ 0x5151f3a2U, 0xa3a3fe5dU, 0x4040c080U, 0x8f8f8a05U,
+ 0x9292ad3fU, 0x9d9dbc21U, 0x38384870U, 0xf5f504f1U,
+ 0xbcbcdf63U, 0xb6b6c177U, 0xdada75afU, 0x21216342U,
+ 0x10103020U, 0xffff1ae5U, 0xf3f30efdU, 0xd2d26dbfU,
+ 0xcdcd4c81U, 0x0c0c1418U, 0x13133526U, 0xecec2fc3U,
+ 0x5f5fe1beU, 0x9797a235U, 0x4444cc88U, 0x1717392eU,
+ 0xc4c45793U, 0xa7a7f255U, 0x7e7e82fcU, 0x3d3d477aU,
+ 0x6464acc8U, 0x5d5de7baU, 0x19192b32U, 0x737395e6U,
+ 0x6060a0c0U, 0x81819819U, 0x4f4fd19eU, 0xdcdc7fa3U,
+ 0x22226644U, 0x2a2a7e54U, 0x9090ab3bU, 0x8888830bU,
+ 0x4646ca8cU, 0xeeee29c7U, 0xb8b8d36bU, 0x14143c28U,
+ 0xdede79a7U, 0x5e5ee2bcU, 0x0b0b1d16U, 0xdbdb76adU,
+ 0xe0e03bdbU, 0x32325664U, 0x3a3a4e74U, 0x0a0a1e14U,
+ 0x4949db92U, 0x06060a0cU, 0x24246c48U, 0x5c5ce4b8U,
+ 0xc2c25d9fU, 0xd3d36ebdU, 0xacacef43U, 0x6262a6c4U,
+ 0x9191a839U, 0x9595a431U, 0xe4e437d3U, 0x79798bf2U,
+ 0xe7e732d5U, 0xc8c8438bU, 0x3737596eU, 0x6d6db7daU,
+ 0x8d8d8c01U, 0xd5d564b1U, 0x4e4ed29cU, 0xa9a9e049U,
+ 0x6c6cb4d8U, 0x5656faacU, 0xf4f407f3U, 0xeaea25cfU,
+ 0x6565afcaU, 0x7a7a8ef4U, 0xaeaee947U, 0x08081810U,
+ 0xbabad56fU, 0x787888f0U, 0x25256f4aU, 0x2e2e725cU,
+ 0x1c1c2438U, 0xa6a6f157U, 0xb4b4c773U, 0xc6c65197U,
+ 0xe8e823cbU, 0xdddd7ca1U, 0x74749ce8U, 0x1f1f213eU,
+ 0x4b4bdd96U, 0xbdbddc61U, 0x8b8b860dU, 0x8a8a850fU,
+ 0x707090e0U, 0x3e3e427cU, 0xb5b5c471U, 0x6666aaccU,
+ 0x4848d890U, 0x03030506U, 0xf6f601f7U, 0x0e0e121cU,
+ 0x6161a3c2U, 0x35355f6aU, 0x5757f9aeU, 0xb9b9d069U,
+ 0x86869117U, 0xc1c15899U, 0x1d1d273aU, 0x9e9eb927U,
+ 0xe1e138d9U, 0xf8f813ebU, 0x9898b32bU, 0x11113322U,
+ 0x6969bbd2U, 0xd9d970a9U, 0x8e8e8907U, 0x9494a733U,
+ 0x9b9bb62dU, 0x1e1e223cU, 0x87879215U, 0xe9e920c9U,
+ 0xcece4987U, 0x5555ffaaU, 0x28287850U, 0xdfdf7aa5U,
+ 0x8c8c8f03U, 0xa1a1f859U, 0x89898009U, 0x0d0d171aU,
+ 0xbfbfda65U, 0xe6e631d7U, 0x4242c684U, 0x6868b8d0U,
+ 0x4141c382U, 0x9999b029U, 0x2d2d775aU, 0x0f0f111eU,
+ 0xb0b0cb7bU, 0x5454fca8U, 0xbbbbd66dU, 0x16163a2cU,
+};
+static const u32 Te4[256] = {
+ 0x63636363U, 0x7c7c7c7cU, 0x77777777U, 0x7b7b7b7bU,
+ 0xf2f2f2f2U, 0x6b6b6b6bU, 0x6f6f6f6fU, 0xc5c5c5c5U,
+ 0x30303030U, 0x01010101U, 0x67676767U, 0x2b2b2b2bU,
+ 0xfefefefeU, 0xd7d7d7d7U, 0xababababU, 0x76767676U,
+ 0xcacacacaU, 0x82828282U, 0xc9c9c9c9U, 0x7d7d7d7dU,
+ 0xfafafafaU, 0x59595959U, 0x47474747U, 0xf0f0f0f0U,
+ 0xadadadadU, 0xd4d4d4d4U, 0xa2a2a2a2U, 0xafafafafU,
+ 0x9c9c9c9cU, 0xa4a4a4a4U, 0x72727272U, 0xc0c0c0c0U,
+ 0xb7b7b7b7U, 0xfdfdfdfdU, 0x93939393U, 0x26262626U,
+ 0x36363636U, 0x3f3f3f3fU, 0xf7f7f7f7U, 0xccccccccU,
+ 0x34343434U, 0xa5a5a5a5U, 0xe5e5e5e5U, 0xf1f1f1f1U,
+ 0x71717171U, 0xd8d8d8d8U, 0x31313131U, 0x15151515U,
+ 0x04040404U, 0xc7c7c7c7U, 0x23232323U, 0xc3c3c3c3U,
+ 0x18181818U, 0x96969696U, 0x05050505U, 0x9a9a9a9aU,
+ 0x07070707U, 0x12121212U, 0x80808080U, 0xe2e2e2e2U,
+ 0xebebebebU, 0x27272727U, 0xb2b2b2b2U, 0x75757575U,
+ 0x09090909U, 0x83838383U, 0x2c2c2c2cU, 0x1a1a1a1aU,
+ 0x1b1b1b1bU, 0x6e6e6e6eU, 0x5a5a5a5aU, 0xa0a0a0a0U,
+ 0x52525252U, 0x3b3b3b3bU, 0xd6d6d6d6U, 0xb3b3b3b3U,
+ 0x29292929U, 0xe3e3e3e3U, 0x2f2f2f2fU, 0x84848484U,
+ 0x53535353U, 0xd1d1d1d1U, 0x00000000U, 0xededededU,
+ 0x20202020U, 0xfcfcfcfcU, 0xb1b1b1b1U, 0x5b5b5b5bU,
+ 0x6a6a6a6aU, 0xcbcbcbcbU, 0xbebebebeU, 0x39393939U,
+ 0x4a4a4a4aU, 0x4c4c4c4cU, 0x58585858U, 0xcfcfcfcfU,
+ 0xd0d0d0d0U, 0xefefefefU, 0xaaaaaaaaU, 0xfbfbfbfbU,
+ 0x43434343U, 0x4d4d4d4dU, 0x33333333U, 0x85858585U,
+ 0x45454545U, 0xf9f9f9f9U, 0x02020202U, 0x7f7f7f7fU,
+ 0x50505050U, 0x3c3c3c3cU, 0x9f9f9f9fU, 0xa8a8a8a8U,
+ 0x51515151U, 0xa3a3a3a3U, 0x40404040U, 0x8f8f8f8fU,
+ 0x92929292U, 0x9d9d9d9dU, 0x38383838U, 0xf5f5f5f5U,
+ 0xbcbcbcbcU, 0xb6b6b6b6U, 0xdadadadaU, 0x21212121U,
+ 0x10101010U, 0xffffffffU, 0xf3f3f3f3U, 0xd2d2d2d2U,
+ 0xcdcdcdcdU, 0x0c0c0c0cU, 0x13131313U, 0xececececU,
+ 0x5f5f5f5fU, 0x97979797U, 0x44444444U, 0x17171717U,
+ 0xc4c4c4c4U, 0xa7a7a7a7U, 0x7e7e7e7eU, 0x3d3d3d3dU,
+ 0x64646464U, 0x5d5d5d5dU, 0x19191919U, 0x73737373U,
+ 0x60606060U, 0x81818181U, 0x4f4f4f4fU, 0xdcdcdcdcU,
+ 0x22222222U, 0x2a2a2a2aU, 0x90909090U, 0x88888888U,
+ 0x46464646U, 0xeeeeeeeeU, 0xb8b8b8b8U, 0x14141414U,
+ 0xdedededeU, 0x5e5e5e5eU, 0x0b0b0b0bU, 0xdbdbdbdbU,
+ 0xe0e0e0e0U, 0x32323232U, 0x3a3a3a3aU, 0x0a0a0a0aU,
+ 0x49494949U, 0x06060606U, 0x24242424U, 0x5c5c5c5cU,
+ 0xc2c2c2c2U, 0xd3d3d3d3U, 0xacacacacU, 0x62626262U,
+ 0x91919191U, 0x95959595U, 0xe4e4e4e4U, 0x79797979U,
+ 0xe7e7e7e7U, 0xc8c8c8c8U, 0x37373737U, 0x6d6d6d6dU,
+ 0x8d8d8d8dU, 0xd5d5d5d5U, 0x4e4e4e4eU, 0xa9a9a9a9U,
+ 0x6c6c6c6cU, 0x56565656U, 0xf4f4f4f4U, 0xeaeaeaeaU,
+ 0x65656565U, 0x7a7a7a7aU, 0xaeaeaeaeU, 0x08080808U,
+ 0xbabababaU, 0x78787878U, 0x25252525U, 0x2e2e2e2eU,
+ 0x1c1c1c1cU, 0xa6a6a6a6U, 0xb4b4b4b4U, 0xc6c6c6c6U,
+ 0xe8e8e8e8U, 0xddddddddU, 0x74747474U, 0x1f1f1f1fU,
+ 0x4b4b4b4bU, 0xbdbdbdbdU, 0x8b8b8b8bU, 0x8a8a8a8aU,
+ 0x70707070U, 0x3e3e3e3eU, 0xb5b5b5b5U, 0x66666666U,
+ 0x48484848U, 0x03030303U, 0xf6f6f6f6U, 0x0e0e0e0eU,
+ 0x61616161U, 0x35353535U, 0x57575757U, 0xb9b9b9b9U,
+ 0x86868686U, 0xc1c1c1c1U, 0x1d1d1d1dU, 0x9e9e9e9eU,
+ 0xe1e1e1e1U, 0xf8f8f8f8U, 0x98989898U, 0x11111111U,
+ 0x69696969U, 0xd9d9d9d9U, 0x8e8e8e8eU, 0x94949494U,
+ 0x9b9b9b9bU, 0x1e1e1e1eU, 0x87878787U, 0xe9e9e9e9U,
+ 0xcecececeU, 0x55555555U, 0x28282828U, 0xdfdfdfdfU,
+ 0x8c8c8c8cU, 0xa1a1a1a1U, 0x89898989U, 0x0d0d0d0dU,
+ 0xbfbfbfbfU, 0xe6e6e6e6U, 0x42424242U, 0x68686868U,
+ 0x41414141U, 0x99999999U, 0x2d2d2d2dU, 0x0f0f0f0fU,
+ 0xb0b0b0b0U, 0x54545454U, 0xbbbbbbbbU, 0x16161616U,
+};
+static const u32 Td0[256] = {
+ 0x51f4a750U, 0x7e416553U, 0x1a17a4c3U, 0x3a275e96U,
+ 0x3bab6bcbU, 0x1f9d45f1U, 0xacfa58abU, 0x4be30393U,
+ 0x2030fa55U, 0xad766df6U, 0x88cc7691U, 0xf5024c25U,
+ 0x4fe5d7fcU, 0xc52acbd7U, 0x26354480U, 0xb562a38fU,
+ 0xdeb15a49U, 0x25ba1b67U, 0x45ea0e98U, 0x5dfec0e1U,
+ 0xc32f7502U, 0x814cf012U, 0x8d4697a3U, 0x6bd3f9c6U,
+ 0x038f5fe7U, 0x15929c95U, 0xbf6d7aebU, 0x955259daU,
+ 0xd4be832dU, 0x587421d3U, 0x49e06929U, 0x8ec9c844U,
+ 0x75c2896aU, 0xf48e7978U, 0x99583e6bU, 0x27b971ddU,
+ 0xbee14fb6U, 0xf088ad17U, 0xc920ac66U, 0x7dce3ab4U,
+ 0x63df4a18U, 0xe51a3182U, 0x97513360U, 0x62537f45U,
+ 0xb16477e0U, 0xbb6bae84U, 0xfe81a01cU, 0xf9082b94U,
+ 0x70486858U, 0x8f45fd19U, 0x94de6c87U, 0x527bf8b7U,
+ 0xab73d323U, 0x724b02e2U, 0xe31f8f57U, 0x6655ab2aU,
+ 0xb2eb2807U, 0x2fb5c203U, 0x86c57b9aU, 0xd33708a5U,
+ 0x302887f2U, 0x23bfa5b2U, 0x02036abaU, 0xed16825cU,
+ 0x8acf1c2bU, 0xa779b492U, 0xf307f2f0U, 0x4e69e2a1U,
+ 0x65daf4cdU, 0x0605bed5U, 0xd134621fU, 0xc4a6fe8aU,
+ 0x342e539dU, 0xa2f355a0U, 0x058ae132U, 0xa4f6eb75U,
+ 0x0b83ec39U, 0x4060efaaU, 0x5e719f06U, 0xbd6e1051U,
+ 0x3e218af9U, 0x96dd063dU, 0xdd3e05aeU, 0x4de6bd46U,
+ 0x91548db5U, 0x71c45d05U, 0x0406d46fU, 0x605015ffU,
+ 0x1998fb24U, 0xd6bde997U, 0x894043ccU, 0x67d99e77U,
+ 0xb0e842bdU, 0x07898b88U, 0xe7195b38U, 0x79c8eedbU,
+ 0xa17c0a47U, 0x7c420fe9U, 0xf8841ec9U, 0x00000000U,
+ 0x09808683U, 0x322bed48U, 0x1e1170acU, 0x6c5a724eU,
+ 0xfd0efffbU, 0x0f853856U, 0x3daed51eU, 0x362d3927U,
+ 0x0a0fd964U, 0x685ca621U, 0x9b5b54d1U, 0x24362e3aU,
+ 0x0c0a67b1U, 0x9357e70fU, 0xb4ee96d2U, 0x1b9b919eU,
+ 0x80c0c54fU, 0x61dc20a2U, 0x5a774b69U, 0x1c121a16U,
+ 0xe293ba0aU, 0xc0a02ae5U, 0x3c22e043U, 0x121b171dU,
+ 0x0e090d0bU, 0xf28bc7adU, 0x2db6a8b9U, 0x141ea9c8U,
+ 0x57f11985U, 0xaf75074cU, 0xee99ddbbU, 0xa37f60fdU,
+ 0xf701269fU, 0x5c72f5bcU, 0x44663bc5U, 0x5bfb7e34U,
+ 0x8b432976U, 0xcb23c6dcU, 0xb6edfc68U, 0xb8e4f163U,
+ 0xd731dccaU, 0x42638510U, 0x13972240U, 0x84c61120U,
+ 0x854a247dU, 0xd2bb3df8U, 0xaef93211U, 0xc729a16dU,
+ 0x1d9e2f4bU, 0xdcb230f3U, 0x0d8652ecU, 0x77c1e3d0U,
+ 0x2bb3166cU, 0xa970b999U, 0x119448faU, 0x47e96422U,
+ 0xa8fc8cc4U, 0xa0f03f1aU, 0x567d2cd8U, 0x223390efU,
+ 0x87494ec7U, 0xd938d1c1U, 0x8ccaa2feU, 0x98d40b36U,
+ 0xa6f581cfU, 0xa57ade28U, 0xdab78e26U, 0x3fadbfa4U,
+ 0x2c3a9de4U, 0x5078920dU, 0x6a5fcc9bU, 0x547e4662U,
+ 0xf68d13c2U, 0x90d8b8e8U, 0x2e39f75eU, 0x82c3aff5U,
+ 0x9f5d80beU, 0x69d0937cU, 0x6fd52da9U, 0xcf2512b3U,
+ 0xc8ac993bU, 0x10187da7U, 0xe89c636eU, 0xdb3bbb7bU,
+ 0xcd267809U, 0x6e5918f4U, 0xec9ab701U, 0x834f9aa8U,
+ 0xe6956e65U, 0xaaffe67eU, 0x21bccf08U, 0xef15e8e6U,
+ 0xbae79bd9U, 0x4a6f36ceU, 0xea9f09d4U, 0x29b07cd6U,
+ 0x31a4b2afU, 0x2a3f2331U, 0xc6a59430U, 0x35a266c0U,
+ 0x744ebc37U, 0xfc82caa6U, 0xe090d0b0U, 0x33a7d815U,
+ 0xf104984aU, 0x41ecdaf7U, 0x7fcd500eU, 0x1791f62fU,
+ 0x764dd68dU, 0x43efb04dU, 0xccaa4d54U, 0xe49604dfU,
+ 0x9ed1b5e3U, 0x4c6a881bU, 0xc12c1fb8U, 0x4665517fU,
+ 0x9d5eea04U, 0x018c355dU, 0xfa877473U, 0xfb0b412eU,
+ 0xb3671d5aU, 0x92dbd252U, 0xe9105633U, 0x6dd64713U,
+ 0x9ad7618cU, 0x37a10c7aU, 0x59f8148eU, 0xeb133c89U,
+ 0xcea927eeU, 0xb761c935U, 0xe11ce5edU, 0x7a47b13cU,
+ 0x9cd2df59U, 0x55f2733fU, 0x1814ce79U, 0x73c737bfU,
+ 0x53f7cdeaU, 0x5ffdaa5bU, 0xdf3d6f14U, 0x7844db86U,
+ 0xcaaff381U, 0xb968c43eU, 0x3824342cU, 0xc2a3405fU,
+ 0x161dc372U, 0xbce2250cU, 0x283c498bU, 0xff0d9541U,
+ 0x39a80171U, 0x080cb3deU, 0xd8b4e49cU, 0x6456c190U,
+ 0x7bcb8461U, 0xd532b670U, 0x486c5c74U, 0xd0b85742U,
+};
+static const u32 Td1[256] = {
+ 0x5051f4a7U, 0x537e4165U, 0xc31a17a4U, 0x963a275eU,
+ 0xcb3bab6bU, 0xf11f9d45U, 0xabacfa58U, 0x934be303U,
+ 0x552030faU, 0xf6ad766dU, 0x9188cc76U, 0x25f5024cU,
+ 0xfc4fe5d7U, 0xd7c52acbU, 0x80263544U, 0x8fb562a3U,
+ 0x49deb15aU, 0x6725ba1bU, 0x9845ea0eU, 0xe15dfec0U,
+ 0x02c32f75U, 0x12814cf0U, 0xa38d4697U, 0xc66bd3f9U,
+ 0xe7038f5fU, 0x9515929cU, 0xebbf6d7aU, 0xda955259U,
+ 0x2dd4be83U, 0xd3587421U, 0x2949e069U, 0x448ec9c8U,
+ 0x6a75c289U, 0x78f48e79U, 0x6b99583eU, 0xdd27b971U,
+ 0xb6bee14fU, 0x17f088adU, 0x66c920acU, 0xb47dce3aU,
+ 0x1863df4aU, 0x82e51a31U, 0x60975133U, 0x4562537fU,
+ 0xe0b16477U, 0x84bb6baeU, 0x1cfe81a0U, 0x94f9082bU,
+ 0x58704868U, 0x198f45fdU, 0x8794de6cU, 0xb7527bf8U,
+ 0x23ab73d3U, 0xe2724b02U, 0x57e31f8fU, 0x2a6655abU,
+ 0x07b2eb28U, 0x032fb5c2U, 0x9a86c57bU, 0xa5d33708U,
+ 0xf2302887U, 0xb223bfa5U, 0xba02036aU, 0x5ced1682U,
+ 0x2b8acf1cU, 0x92a779b4U, 0xf0f307f2U, 0xa14e69e2U,
+ 0xcd65daf4U, 0xd50605beU, 0x1fd13462U, 0x8ac4a6feU,
+ 0x9d342e53U, 0xa0a2f355U, 0x32058ae1U, 0x75a4f6ebU,
+ 0x390b83ecU, 0xaa4060efU, 0x065e719fU, 0x51bd6e10U,
+ 0xf93e218aU, 0x3d96dd06U, 0xaedd3e05U, 0x464de6bdU,
+ 0xb591548dU, 0x0571c45dU, 0x6f0406d4U, 0xff605015U,
+ 0x241998fbU, 0x97d6bde9U, 0xcc894043U, 0x7767d99eU,
+ 0xbdb0e842U, 0x8807898bU, 0x38e7195bU, 0xdb79c8eeU,
+ 0x47a17c0aU, 0xe97c420fU, 0xc9f8841eU, 0x00000000U,
+ 0x83098086U, 0x48322bedU, 0xac1e1170U, 0x4e6c5a72U,
+ 0xfbfd0effU, 0x560f8538U, 0x1e3daed5U, 0x27362d39U,
+ 0x640a0fd9U, 0x21685ca6U, 0xd19b5b54U, 0x3a24362eU,
+ 0xb10c0a67U, 0x0f9357e7U, 0xd2b4ee96U, 0x9e1b9b91U,
+ 0x4f80c0c5U, 0xa261dc20U, 0x695a774bU, 0x161c121aU,
+ 0x0ae293baU, 0xe5c0a02aU, 0x433c22e0U, 0x1d121b17U,
+ 0x0b0e090dU, 0xadf28bc7U, 0xb92db6a8U, 0xc8141ea9U,
+ 0x8557f119U, 0x4caf7507U, 0xbbee99ddU, 0xfda37f60U,
+ 0x9ff70126U, 0xbc5c72f5U, 0xc544663bU, 0x345bfb7eU,
+ 0x768b4329U, 0xdccb23c6U, 0x68b6edfcU, 0x63b8e4f1U,
+ 0xcad731dcU, 0x10426385U, 0x40139722U, 0x2084c611U,
+ 0x7d854a24U, 0xf8d2bb3dU, 0x11aef932U, 0x6dc729a1U,
+ 0x4b1d9e2fU, 0xf3dcb230U, 0xec0d8652U, 0xd077c1e3U,
+ 0x6c2bb316U, 0x99a970b9U, 0xfa119448U, 0x2247e964U,
+ 0xc4a8fc8cU, 0x1aa0f03fU, 0xd8567d2cU, 0xef223390U,
+ 0xc787494eU, 0xc1d938d1U, 0xfe8ccaa2U, 0x3698d40bU,
+ 0xcfa6f581U, 0x28a57adeU, 0x26dab78eU, 0xa43fadbfU,
+ 0xe42c3a9dU, 0x0d507892U, 0x9b6a5fccU, 0x62547e46U,
+ 0xc2f68d13U, 0xe890d8b8U, 0x5e2e39f7U, 0xf582c3afU,
+ 0xbe9f5d80U, 0x7c69d093U, 0xa96fd52dU, 0xb3cf2512U,
+ 0x3bc8ac99U, 0xa710187dU, 0x6ee89c63U, 0x7bdb3bbbU,
+ 0x09cd2678U, 0xf46e5918U, 0x01ec9ab7U, 0xa8834f9aU,
+ 0x65e6956eU, 0x7eaaffe6U, 0x0821bccfU, 0xe6ef15e8U,
+ 0xd9bae79bU, 0xce4a6f36U, 0xd4ea9f09U, 0xd629b07cU,
+ 0xaf31a4b2U, 0x312a3f23U, 0x30c6a594U, 0xc035a266U,
+ 0x37744ebcU, 0xa6fc82caU, 0xb0e090d0U, 0x1533a7d8U,
+ 0x4af10498U, 0xf741ecdaU, 0x0e7fcd50U, 0x2f1791f6U,
+ 0x8d764dd6U, 0x4d43efb0U, 0x54ccaa4dU, 0xdfe49604U,
+ 0xe39ed1b5U, 0x1b4c6a88U, 0xb8c12c1fU, 0x7f466551U,
+ 0x049d5eeaU, 0x5d018c35U, 0x73fa8774U, 0x2efb0b41U,
+ 0x5ab3671dU, 0x5292dbd2U, 0x33e91056U, 0x136dd647U,
+ 0x8c9ad761U, 0x7a37a10cU, 0x8e59f814U, 0x89eb133cU,
+ 0xeecea927U, 0x35b761c9U, 0xede11ce5U, 0x3c7a47b1U,
+ 0x599cd2dfU, 0x3f55f273U, 0x791814ceU, 0xbf73c737U,
+ 0xea53f7cdU, 0x5b5ffdaaU, 0x14df3d6fU, 0x867844dbU,
+ 0x81caaff3U, 0x3eb968c4U, 0x2c382434U, 0x5fc2a340U,
+ 0x72161dc3U, 0x0cbce225U, 0x8b283c49U, 0x41ff0d95U,
+ 0x7139a801U, 0xde080cb3U, 0x9cd8b4e4U, 0x906456c1U,
+ 0x617bcb84U, 0x70d532b6U, 0x74486c5cU, 0x42d0b857U,
+};
+static const u32 Td2[256] = {
+ 0xa75051f4U, 0x65537e41U, 0xa4c31a17U, 0x5e963a27U,
+ 0x6bcb3babU, 0x45f11f9dU, 0x58abacfaU, 0x03934be3U,
+ 0xfa552030U, 0x6df6ad76U, 0x769188ccU, 0x4c25f502U,
+ 0xd7fc4fe5U, 0xcbd7c52aU, 0x44802635U, 0xa38fb562U,
+ 0x5a49deb1U, 0x1b6725baU, 0x0e9845eaU, 0xc0e15dfeU,
+ 0x7502c32fU, 0xf012814cU, 0x97a38d46U, 0xf9c66bd3U,
+ 0x5fe7038fU, 0x9c951592U, 0x7aebbf6dU, 0x59da9552U,
+ 0x832dd4beU, 0x21d35874U, 0x692949e0U, 0xc8448ec9U,
+ 0x896a75c2U, 0x7978f48eU, 0x3e6b9958U, 0x71dd27b9U,
+ 0x4fb6bee1U, 0xad17f088U, 0xac66c920U, 0x3ab47dceU,
+ 0x4a1863dfU, 0x3182e51aU, 0x33609751U, 0x7f456253U,
+ 0x77e0b164U, 0xae84bb6bU, 0xa01cfe81U, 0x2b94f908U,
+ 0x68587048U, 0xfd198f45U, 0x6c8794deU, 0xf8b7527bU,
+ 0xd323ab73U, 0x02e2724bU, 0x8f57e31fU, 0xab2a6655U,
+ 0x2807b2ebU, 0xc2032fb5U, 0x7b9a86c5U, 0x08a5d337U,
+ 0x87f23028U, 0xa5b223bfU, 0x6aba0203U, 0x825ced16U,
+ 0x1c2b8acfU, 0xb492a779U, 0xf2f0f307U, 0xe2a14e69U,
+ 0xf4cd65daU, 0xbed50605U, 0x621fd134U, 0xfe8ac4a6U,
+ 0x539d342eU, 0x55a0a2f3U, 0xe132058aU, 0xeb75a4f6U,
+ 0xec390b83U, 0xefaa4060U, 0x9f065e71U, 0x1051bd6eU,
+
+ 0x8af93e21U, 0x063d96ddU, 0x05aedd3eU, 0xbd464de6U,
+ 0x8db59154U, 0x5d0571c4U, 0xd46f0406U, 0x15ff6050U,
+ 0xfb241998U, 0xe997d6bdU, 0x43cc8940U, 0x9e7767d9U,
+ 0x42bdb0e8U, 0x8b880789U, 0x5b38e719U, 0xeedb79c8U,
+ 0x0a47a17cU, 0x0fe97c42U, 0x1ec9f884U, 0x00000000U,
+ 0x86830980U, 0xed48322bU, 0x70ac1e11U, 0x724e6c5aU,
+ 0xfffbfd0eU, 0x38560f85U, 0xd51e3daeU, 0x3927362dU,
+ 0xd9640a0fU, 0xa621685cU, 0x54d19b5bU, 0x2e3a2436U,
+ 0x67b10c0aU, 0xe70f9357U, 0x96d2b4eeU, 0x919e1b9bU,
+ 0xc54f80c0U, 0x20a261dcU, 0x4b695a77U, 0x1a161c12U,
+ 0xba0ae293U, 0x2ae5c0a0U, 0xe0433c22U, 0x171d121bU,
+ 0x0d0b0e09U, 0xc7adf28bU, 0xa8b92db6U, 0xa9c8141eU,
+ 0x198557f1U, 0x074caf75U, 0xddbbee99U, 0x60fda37fU,
+ 0x269ff701U, 0xf5bc5c72U, 0x3bc54466U, 0x7e345bfbU,
+ 0x29768b43U, 0xc6dccb23U, 0xfc68b6edU, 0xf163b8e4U,
+ 0xdccad731U, 0x85104263U, 0x22401397U, 0x112084c6U,
+ 0x247d854aU, 0x3df8d2bbU, 0x3211aef9U, 0xa16dc729U,
+ 0x2f4b1d9eU, 0x30f3dcb2U, 0x52ec0d86U, 0xe3d077c1U,
+ 0x166c2bb3U, 0xb999a970U, 0x48fa1194U, 0x642247e9U,
+ 0x8cc4a8fcU, 0x3f1aa0f0U, 0x2cd8567dU, 0x90ef2233U,
+ 0x4ec78749U, 0xd1c1d938U, 0xa2fe8ccaU, 0x0b3698d4U,
+ 0x81cfa6f5U, 0xde28a57aU, 0x8e26dab7U, 0xbfa43fadU,
+ 0x9de42c3aU, 0x920d5078U, 0xcc9b6a5fU, 0x4662547eU,
+ 0x13c2f68dU, 0xb8e890d8U, 0xf75e2e39U, 0xaff582c3U,
+ 0x80be9f5dU, 0x937c69d0U, 0x2da96fd5U, 0x12b3cf25U,
+ 0x993bc8acU, 0x7da71018U, 0x636ee89cU, 0xbb7bdb3bU,
+ 0x7809cd26U, 0x18f46e59U, 0xb701ec9aU, 0x9aa8834fU,
+ 0x6e65e695U, 0xe67eaaffU, 0xcf0821bcU, 0xe8e6ef15U,
+ 0x9bd9bae7U, 0x36ce4a6fU, 0x09d4ea9fU, 0x7cd629b0U,
+ 0xb2af31a4U, 0x23312a3fU, 0x9430c6a5U, 0x66c035a2U,
+ 0xbc37744eU, 0xcaa6fc82U, 0xd0b0e090U, 0xd81533a7U,
+ 0x984af104U, 0xdaf741ecU, 0x500e7fcdU, 0xf62f1791U,
+ 0xd68d764dU, 0xb04d43efU, 0x4d54ccaaU, 0x04dfe496U,
+ 0xb5e39ed1U, 0x881b4c6aU, 0x1fb8c12cU, 0x517f4665U,
+ 0xea049d5eU, 0x355d018cU, 0x7473fa87U, 0x412efb0bU,
+ 0x1d5ab367U, 0xd25292dbU, 0x5633e910U, 0x47136dd6U,
+ 0x618c9ad7U, 0x0c7a37a1U, 0x148e59f8U, 0x3c89eb13U,
+ 0x27eecea9U, 0xc935b761U, 0xe5ede11cU, 0xb13c7a47U,
+ 0xdf599cd2U, 0x733f55f2U, 0xce791814U, 0x37bf73c7U,
+ 0xcdea53f7U, 0xaa5b5ffdU, 0x6f14df3dU, 0xdb867844U,
+ 0xf381caafU, 0xc43eb968U, 0x342c3824U, 0x405fc2a3U,
+ 0xc372161dU, 0x250cbce2U, 0x498b283cU, 0x9541ff0dU,
+ 0x017139a8U, 0xb3de080cU, 0xe49cd8b4U, 0xc1906456U,
+ 0x84617bcbU, 0xb670d532U, 0x5c74486cU, 0x5742d0b8U,
+};
+static const u32 Td3[256] = {
+ 0xf4a75051U, 0x4165537eU, 0x17a4c31aU, 0x275e963aU,
+ 0xab6bcb3bU, 0x9d45f11fU, 0xfa58abacU, 0xe303934bU,
+ 0x30fa5520U, 0x766df6adU, 0xcc769188U, 0x024c25f5U,
+ 0xe5d7fc4fU, 0x2acbd7c5U, 0x35448026U, 0x62a38fb5U,
+ 0xb15a49deU, 0xba1b6725U, 0xea0e9845U, 0xfec0e15dU,
+ 0x2f7502c3U, 0x4cf01281U, 0x4697a38dU, 0xd3f9c66bU,
+ 0x8f5fe703U, 0x929c9515U, 0x6d7aebbfU, 0x5259da95U,
+ 0xbe832dd4U, 0x7421d358U, 0xe0692949U, 0xc9c8448eU,
+ 0xc2896a75U, 0x8e7978f4U, 0x583e6b99U, 0xb971dd27U,
+ 0xe14fb6beU, 0x88ad17f0U, 0x20ac66c9U, 0xce3ab47dU,
+ 0xdf4a1863U, 0x1a3182e5U, 0x51336097U, 0x537f4562U,
+ 0x6477e0b1U, 0x6bae84bbU, 0x81a01cfeU, 0x082b94f9U,
+ 0x48685870U, 0x45fd198fU, 0xde6c8794U, 0x7bf8b752U,
+ 0x73d323abU, 0x4b02e272U, 0x1f8f57e3U, 0x55ab2a66U,
+ 0xeb2807b2U, 0xb5c2032fU, 0xc57b9a86U, 0x3708a5d3U,
+ 0x2887f230U, 0xbfa5b223U, 0x036aba02U, 0x16825cedU,
+ 0xcf1c2b8aU, 0x79b492a7U, 0x07f2f0f3U, 0x69e2a14eU,
+ 0xdaf4cd65U, 0x05bed506U, 0x34621fd1U, 0xa6fe8ac4U,
+ 0x2e539d34U, 0xf355a0a2U, 0x8ae13205U, 0xf6eb75a4U,
+ 0x83ec390bU, 0x60efaa40U, 0x719f065eU, 0x6e1051bdU,
+ 0x218af93eU, 0xdd063d96U, 0x3e05aeddU, 0xe6bd464dU,
+ 0x548db591U, 0xc45d0571U, 0x06d46f04U, 0x5015ff60U,
+ 0x98fb2419U, 0xbde997d6U, 0x4043cc89U, 0xd99e7767U,
+ 0xe842bdb0U, 0x898b8807U, 0x195b38e7U, 0xc8eedb79U,
+ 0x7c0a47a1U, 0x420fe97cU, 0x841ec9f8U, 0x00000000U,
+ 0x80868309U, 0x2bed4832U, 0x1170ac1eU, 0x5a724e6cU,
+ 0x0efffbfdU, 0x8538560fU, 0xaed51e3dU, 0x2d392736U,
+ 0x0fd9640aU, 0x5ca62168U, 0x5b54d19bU, 0x362e3a24U,
+ 0x0a67b10cU, 0x57e70f93U, 0xee96d2b4U, 0x9b919e1bU,
+ 0xc0c54f80U, 0xdc20a261U, 0x774b695aU, 0x121a161cU,
+ 0x93ba0ae2U, 0xa02ae5c0U, 0x22e0433cU, 0x1b171d12U,
+ 0x090d0b0eU, 0x8bc7adf2U, 0xb6a8b92dU, 0x1ea9c814U,
+ 0xf1198557U, 0x75074cafU, 0x99ddbbeeU, 0x7f60fda3U,
+ 0x01269ff7U, 0x72f5bc5cU, 0x663bc544U, 0xfb7e345bU,
+ 0x4329768bU, 0x23c6dccbU, 0xedfc68b6U, 0xe4f163b8U,
+ 0x31dccad7U, 0x63851042U, 0x97224013U, 0xc6112084U,
+ 0x4a247d85U, 0xbb3df8d2U, 0xf93211aeU, 0x29a16dc7U,
+ 0x9e2f4b1dU, 0xb230f3dcU, 0x8652ec0dU, 0xc1e3d077U,
+ 0xb3166c2bU, 0x70b999a9U, 0x9448fa11U, 0xe9642247U,
+ 0xfc8cc4a8U, 0xf03f1aa0U, 0x7d2cd856U, 0x3390ef22U,
+ 0x494ec787U, 0x38d1c1d9U, 0xcaa2fe8cU, 0xd40b3698U,
+ 0xf581cfa6U, 0x7ade28a5U, 0xb78e26daU, 0xadbfa43fU,
+ 0x3a9de42cU, 0x78920d50U, 0x5fcc9b6aU, 0x7e466254U,
+ 0x8d13c2f6U, 0xd8b8e890U, 0x39f75e2eU, 0xc3aff582U,
+ 0x5d80be9fU, 0xd0937c69U, 0xd52da96fU, 0x2512b3cfU,
+ 0xac993bc8U, 0x187da710U, 0x9c636ee8U, 0x3bbb7bdbU,
+ 0x267809cdU, 0x5918f46eU, 0x9ab701ecU, 0x4f9aa883U,
+ 0x956e65e6U, 0xffe67eaaU, 0xbccf0821U, 0x15e8e6efU,
+ 0xe79bd9baU, 0x6f36ce4aU, 0x9f09d4eaU, 0xb07cd629U,
+ 0xa4b2af31U, 0x3f23312aU, 0xa59430c6U, 0xa266c035U,
+ 0x4ebc3774U, 0x82caa6fcU, 0x90d0b0e0U, 0xa7d81533U,
+ 0x04984af1U, 0xecdaf741U, 0xcd500e7fU, 0x91f62f17U,
+ 0x4dd68d76U, 0xefb04d43U, 0xaa4d54ccU, 0x9604dfe4U,
+ 0xd1b5e39eU, 0x6a881b4cU, 0x2c1fb8c1U, 0x65517f46U,
+ 0x5eea049dU, 0x8c355d01U, 0x877473faU, 0x0b412efbU,
+ 0x671d5ab3U, 0xdbd25292U, 0x105633e9U, 0xd647136dU,
+ 0xd7618c9aU, 0xa10c7a37U, 0xf8148e59U, 0x133c89ebU,
+ 0xa927eeceU, 0x61c935b7U, 0x1ce5ede1U, 0x47b13c7aU,
+ 0xd2df599cU, 0xf2733f55U, 0x14ce7918U, 0xc737bf73U,
+ 0xf7cdea53U, 0xfdaa5b5fU, 0x3d6f14dfU, 0x44db8678U,
+ 0xaff381caU, 0x68c43eb9U, 0x24342c38U, 0xa3405fc2U,
+ 0x1dc37216U, 0xe2250cbcU, 0x3c498b28U, 0x0d9541ffU,
+ 0xa8017139U, 0x0cb3de08U, 0xb4e49cd8U, 0x56c19064U,
+ 0xcb84617bU, 0x32b670d5U, 0x6c5c7448U, 0xb85742d0U,
+};
+static const u32 Td4[256] = {
+ 0x52525252U, 0x09090909U, 0x6a6a6a6aU, 0xd5d5d5d5U,
+ 0x30303030U, 0x36363636U, 0xa5a5a5a5U, 0x38383838U,
+ 0xbfbfbfbfU, 0x40404040U, 0xa3a3a3a3U, 0x9e9e9e9eU,
+ 0x81818181U, 0xf3f3f3f3U, 0xd7d7d7d7U, 0xfbfbfbfbU,
+ 0x7c7c7c7cU, 0xe3e3e3e3U, 0x39393939U, 0x82828282U,
+ 0x9b9b9b9bU, 0x2f2f2f2fU, 0xffffffffU, 0x87878787U,
+ 0x34343434U, 0x8e8e8e8eU, 0x43434343U, 0x44444444U,
+ 0xc4c4c4c4U, 0xdedededeU, 0xe9e9e9e9U, 0xcbcbcbcbU,
+ 0x54545454U, 0x7b7b7b7bU, 0x94949494U, 0x32323232U,
+ 0xa6a6a6a6U, 0xc2c2c2c2U, 0x23232323U, 0x3d3d3d3dU,
+ 0xeeeeeeeeU, 0x4c4c4c4cU, 0x95959595U, 0x0b0b0b0bU,
+ 0x42424242U, 0xfafafafaU, 0xc3c3c3c3U, 0x4e4e4e4eU,
+ 0x08080808U, 0x2e2e2e2eU, 0xa1a1a1a1U, 0x66666666U,
+ 0x28282828U, 0xd9d9d9d9U, 0x24242424U, 0xb2b2b2b2U,
+ 0x76767676U, 0x5b5b5b5bU, 0xa2a2a2a2U, 0x49494949U,
+ 0x6d6d6d6dU, 0x8b8b8b8bU, 0xd1d1d1d1U, 0x25252525U,
+ 0x72727272U, 0xf8f8f8f8U, 0xf6f6f6f6U, 0x64646464U,
+ 0x86868686U, 0x68686868U, 0x98989898U, 0x16161616U,
+ 0xd4d4d4d4U, 0xa4a4a4a4U, 0x5c5c5c5cU, 0xccccccccU,
+ 0x5d5d5d5dU, 0x65656565U, 0xb6b6b6b6U, 0x92929292U,
+ 0x6c6c6c6cU, 0x70707070U, 0x48484848U, 0x50505050U,
+ 0xfdfdfdfdU, 0xededededU, 0xb9b9b9b9U, 0xdadadadaU,
+ 0x5e5e5e5eU, 0x15151515U, 0x46464646U, 0x57575757U,
+ 0xa7a7a7a7U, 0x8d8d8d8dU, 0x9d9d9d9dU, 0x84848484U,
+ 0x90909090U, 0xd8d8d8d8U, 0xababababU, 0x00000000U,
+ 0x8c8c8c8cU, 0xbcbcbcbcU, 0xd3d3d3d3U, 0x0a0a0a0aU,
+ 0xf7f7f7f7U, 0xe4e4e4e4U, 0x58585858U, 0x05050505U,
+ 0xb8b8b8b8U, 0xb3b3b3b3U, 0x45454545U, 0x06060606U,
+ 0xd0d0d0d0U, 0x2c2c2c2cU, 0x1e1e1e1eU, 0x8f8f8f8fU,
+ 0xcacacacaU, 0x3f3f3f3fU, 0x0f0f0f0fU, 0x02020202U,
+ 0xc1c1c1c1U, 0xafafafafU, 0xbdbdbdbdU, 0x03030303U,
+ 0x01010101U, 0x13131313U, 0x8a8a8a8aU, 0x6b6b6b6bU,
+ 0x3a3a3a3aU, 0x91919191U, 0x11111111U, 0x41414141U,
+ 0x4f4f4f4fU, 0x67676767U, 0xdcdcdcdcU, 0xeaeaeaeaU,
+ 0x97979797U, 0xf2f2f2f2U, 0xcfcfcfcfU, 0xcecececeU,
+ 0xf0f0f0f0U, 0xb4b4b4b4U, 0xe6e6e6e6U, 0x73737373U,
+ 0x96969696U, 0xacacacacU, 0x74747474U, 0x22222222U,
+ 0xe7e7e7e7U, 0xadadadadU, 0x35353535U, 0x85858585U,
+ 0xe2e2e2e2U, 0xf9f9f9f9U, 0x37373737U, 0xe8e8e8e8U,
+ 0x1c1c1c1cU, 0x75757575U, 0xdfdfdfdfU, 0x6e6e6e6eU,
+ 0x47474747U, 0xf1f1f1f1U, 0x1a1a1a1aU, 0x71717171U,
+ 0x1d1d1d1dU, 0x29292929U, 0xc5c5c5c5U, 0x89898989U,
+ 0x6f6f6f6fU, 0xb7b7b7b7U, 0x62626262U, 0x0e0e0e0eU,
+ 0xaaaaaaaaU, 0x18181818U, 0xbebebebeU, 0x1b1b1b1bU,
+ 0xfcfcfcfcU, 0x56565656U, 0x3e3e3e3eU, 0x4b4b4b4bU,
+ 0xc6c6c6c6U, 0xd2d2d2d2U, 0x79797979U, 0x20202020U,
+ 0x9a9a9a9aU, 0xdbdbdbdbU, 0xc0c0c0c0U, 0xfefefefeU,
+ 0x78787878U, 0xcdcdcdcdU, 0x5a5a5a5aU, 0xf4f4f4f4U,
+ 0x1f1f1f1fU, 0xddddddddU, 0xa8a8a8a8U, 0x33333333U,
+ 0x88888888U, 0x07070707U, 0xc7c7c7c7U, 0x31313131U,
+ 0xb1b1b1b1U, 0x12121212U, 0x10101010U, 0x59595959U,
+ 0x27272727U, 0x80808080U, 0xececececU, 0x5f5f5f5fU,
+ 0x60606060U, 0x51515151U, 0x7f7f7f7fU, 0xa9a9a9a9U,
+ 0x19191919U, 0xb5b5b5b5U, 0x4a4a4a4aU, 0x0d0d0d0dU,
+ 0x2d2d2d2dU, 0xe5e5e5e5U, 0x7a7a7a7aU, 0x9f9f9f9fU,
+ 0x93939393U, 0xc9c9c9c9U, 0x9c9c9c9cU, 0xefefefefU,
+ 0xa0a0a0a0U, 0xe0e0e0e0U, 0x3b3b3b3bU, 0x4d4d4d4dU,
+ 0xaeaeaeaeU, 0x2a2a2a2aU, 0xf5f5f5f5U, 0xb0b0b0b0U,
+ 0xc8c8c8c8U, 0xebebebebU, 0xbbbbbbbbU, 0x3c3c3c3cU,
+ 0x83838383U, 0x53535353U, 0x99999999U, 0x61616161U,
+ 0x17171717U, 0x2b2b2b2bU, 0x04040404U, 0x7e7e7e7eU,
+ 0xbabababaU, 0x77777777U, 0xd6d6d6d6U, 0x26262626U,
+ 0xe1e1e1e1U, 0x69696969U, 0x14141414U, 0x63636363U,
+ 0x55555555U, 0x21212121U, 0x0c0c0c0cU, 0x7d7d7d7dU,
+};
+static const u32 rcon[] = {
+ 0x01000000, 0x02000000, 0x04000000, 0x08000000,
+ 0x10000000, 0x20000000, 0x40000000, 0x80000000,
+ 0x1B000000, 0x36000000, /* for 128-bit blocks, Rijndael never uses more than 10 rcon values */
+};
+
+#define SWAP(x) (_lrotl(x, 8) & 0x00ff00ff | _lrotr(x, 8) & 0xff00ff00)
+
+#define GETU32(pt) (((u32)(pt)[0] << 24) ^ ((u32)(pt)[1] << 16) ^ ((u32)(pt)[2] << 8) ^ ((u32)(pt)[3]))
+#define PUTU32(ct, st) { (ct)[0] = (u8)((st) >> 24); (ct)[1] = (u8)((st) >> 16); (ct)[2] = (u8)((st) >> 8); (ct)[3] = (u8)(st); }
+
+/**
+ * Expand the cipher key into the encryption key schedule.
+ *
+ * @return the number of rounds for the given cipher key size.
+ */
+int rijndaelKeySetupEnc(u32 rk[/*4*(Nr + 1)*/], const u8 cipherKey[], int keyBits) {
+ int i = 0;
+ u32 temp;
+
+ KASSERT(keyBits == 128 || keyBits == 192 || keyBits == 256,
+ ("Invalid key size (%d).", keyBits));
+ rk[0] = GETU32(cipherKey );
+ rk[1] = GETU32(cipherKey + 4);
+ rk[2] = GETU32(cipherKey + 8);
+ rk[3] = GETU32(cipherKey + 12);
+ if (keyBits == 128) {
+ for (;;) {
+ temp = rk[3];
+ rk[4] = rk[0] ^
+ (Te4[(temp >> 16) & 0xff] & 0xff000000) ^
+ (Te4[(temp >> 8) & 0xff] & 0x00ff0000) ^
+ (Te4[(temp ) & 0xff] & 0x0000ff00) ^
+ (Te4[(temp >> 24) ] & 0x000000ff) ^
+ rcon[i];
+ rk[5] = rk[1] ^ rk[4];
+ rk[6] = rk[2] ^ rk[5];
+ rk[7] = rk[3] ^ rk[6];
+ if (++i == 10) {
+ return 10;
+ }
+ rk += 4;
+ }
+ }
+ rk[4] = GETU32(cipherKey + 16);
+ rk[5] = GETU32(cipherKey + 20);
+ if (keyBits == 192) {
+ for (;;) {
+ temp = rk[ 5];
+ rk[ 6] = rk[ 0] ^
+ (Te4[(temp >> 16) & 0xff] & 0xff000000) ^
+ (Te4[(temp >> 8) & 0xff] & 0x00ff0000) ^
+ (Te4[(temp ) & 0xff] & 0x0000ff00) ^
+ (Te4[(temp >> 24) ] & 0x000000ff) ^
+ rcon[i];
+ rk[ 7] = rk[ 1] ^ rk[ 6];
+ rk[ 8] = rk[ 2] ^ rk[ 7];
+ rk[ 9] = rk[ 3] ^ rk[ 8];
+ if (++i == 8) {
+ return 12;
+ }
+ rk[10] = rk[ 4] ^ rk[ 9];
+ rk[11] = rk[ 5] ^ rk[10];
+ rk += 6;
+ }
+ }
+ rk[6] = GETU32(cipherKey + 24);
+ rk[7] = GETU32(cipherKey + 28);
+ if (keyBits == 256) {
+ for (;;) {
+ temp = rk[ 7];
+ rk[ 8] = rk[ 0] ^
+ (Te4[(temp >> 16) & 0xff] & 0xff000000) ^
+ (Te4[(temp >> 8) & 0xff] & 0x00ff0000) ^
+ (Te4[(temp ) & 0xff] & 0x0000ff00) ^
+ (Te4[(temp >> 24) ] & 0x000000ff) ^
+ rcon[i];
+ rk[ 9] = rk[ 1] ^ rk[ 8];
+ rk[10] = rk[ 2] ^ rk[ 9];
+ rk[11] = rk[ 3] ^ rk[10];
+ if (++i == 7) {
+ return 14;
+ }
+ temp = rk[11];
+ rk[12] = rk[ 4] ^
+ (Te4[(temp >> 24) ] & 0xff000000) ^
+ (Te4[(temp >> 16) & 0xff] & 0x00ff0000) ^
+ (Te4[(temp >> 8) & 0xff] & 0x0000ff00) ^
+ (Te4[(temp ) & 0xff] & 0x000000ff);
+ rk[13] = rk[ 5] ^ rk[12];
+ rk[14] = rk[ 6] ^ rk[13];
+ rk[15] = rk[ 7] ^ rk[14];
+
+ rk += 8;
+ }
+ }
+ return 0;
+}
+
+/**
+ * Expand the cipher key into the decryption key schedule.
+ *
+ * @return the number of rounds for the given cipher key size.
+ */
+int rijndaelKeySetupDec(u32 rk[/*4*(Nr + 1)*/], const u8 cipherKey[], int keyBits) {
+ int Nr, i, j;
+ u32 temp;
+
+ /* expand the cipher key: */
+ Nr = rijndaelKeySetupEnc(rk, cipherKey, keyBits);
+ /* invert the order of the round keys: */
+ for (i = 0, j = 4*Nr; i < j; i += 4, j -= 4) {
+ temp = rk[i ]; rk[i ] = rk[j ]; rk[j ] = temp;
+ temp = rk[i + 1]; rk[i + 1] = rk[j + 1]; rk[j + 1] = temp;
+ temp = rk[i + 2]; rk[i + 2] = rk[j + 2]; rk[j + 2] = temp;
+ temp = rk[i + 3]; rk[i + 3] = rk[j + 3]; rk[j + 3] = temp;
+ }
+ /* apply the inverse MixColumn transform to all round keys but the first and the last: */
+ for (i = 1; i < Nr; i++) {
+ rk += 4;
+ rk[0] =
+ Td0[Te4[(rk[0] >> 24) ] & 0xff] ^
+ Td1[Te4[(rk[0] >> 16) & 0xff] & 0xff] ^
+ Td2[Te4[(rk[0] >> 8) & 0xff] & 0xff] ^
+ Td3[Te4[(rk[0] ) & 0xff] & 0xff];
+ rk[1] =
+ Td0[Te4[(rk[1] >> 24) ] & 0xff] ^
+ Td1[Te4[(rk[1] >> 16) & 0xff] & 0xff] ^
+ Td2[Te4[(rk[1] >> 8) & 0xff] & 0xff] ^
+ Td3[Te4[(rk[1] ) & 0xff] & 0xff];
+ rk[2] =
+ Td0[Te4[(rk[2] >> 24) ] & 0xff] ^
+ Td1[Te4[(rk[2] >> 16) & 0xff] & 0xff] ^
+ Td2[Te4[(rk[2] >> 8) & 0xff] & 0xff] ^
+ Td3[Te4[(rk[2] ) & 0xff] & 0xff];
+ rk[3] =
+ Td0[Te4[(rk[3] >> 24) ] & 0xff] ^
+ Td1[Te4[(rk[3] >> 16) & 0xff] & 0xff] ^
+ Td2[Te4[(rk[3] >> 8) & 0xff] & 0xff] ^
+ Td3[Te4[(rk[3] ) & 0xff] & 0xff];
+ }
+ return Nr;
+}
+
+void rijndaelEncrypt(const u32 rk[/*4*(Nr + 1)*/], int Nr, const u8 pt[16], u8 ct[16]) {
+ u32 s0, s1, s2, s3, t0, t1, t2, t3;
+#ifndef FULL_UNROLL
+ int r;
+#endif /* ?FULL_UNROLL */
+
+ /*
+ * map byte array block to cipher state
+ * and add initial round key:
+ */
+ s0 = GETU32(pt ) ^ rk[0];
+ s1 = GETU32(pt + 4) ^ rk[1];
+ s2 = GETU32(pt + 8) ^ rk[2];
+ s3 = GETU32(pt + 12) ^ rk[3];
+#ifdef FULL_UNROLL
+ /* round 1: */
+ t0 = Te0[s0 >> 24] ^ Te1[(s1 >> 16) & 0xff] ^ Te2[(s2 >> 8) & 0xff] ^ Te3[s3 & 0xff] ^ rk[ 4];
+ t1 = Te0[s1 >> 24] ^ Te1[(s2 >> 16) & 0xff] ^ Te2[(s3 >> 8) & 0xff] ^ Te3[s0 & 0xff] ^ rk[ 5];
+ t2 = Te0[s2 >> 24] ^ Te1[(s3 >> 16) & 0xff] ^ Te2[(s0 >> 8) & 0xff] ^ Te3[s1 & 0xff] ^ rk[ 6];
+ t3 = Te0[s3 >> 24] ^ Te1[(s0 >> 16) & 0xff] ^ Te2[(s1 >> 8) & 0xff] ^ Te3[s2 & 0xff] ^ rk[ 7];
+ /* round 2: */
+ s0 = Te0[t0 >> 24] ^ Te1[(t1 >> 16) & 0xff] ^ Te2[(t2 >> 8) & 0xff] ^ Te3[t3 & 0xff] ^ rk[ 8];
+ s1 = Te0[t1 >> 24] ^ Te1[(t2 >> 16) & 0xff] ^ Te2[(t3 >> 8) & 0xff] ^ Te3[t0 & 0xff] ^ rk[ 9];
+ s2 = Te0[t2 >> 24] ^ Te1[(t3 >> 16) & 0xff] ^ Te2[(t0 >> 8) & 0xff] ^ Te3[t1 & 0xff] ^ rk[10];
+ s3 = Te0[t3 >> 24] ^ Te1[(t0 >> 16) & 0xff] ^ Te2[(t1 >> 8) & 0xff] ^ Te3[t2 & 0xff] ^ rk[11];
+ /* round 3: */
+ t0 = Te0[s0 >> 24] ^ Te1[(s1 >> 16) & 0xff] ^ Te2[(s2 >> 8) & 0xff] ^ Te3[s3 & 0xff] ^ rk[12];
+ t1 = Te0[s1 >> 24] ^ Te1[(s2 >> 16) & 0xff] ^ Te2[(s3 >> 8) & 0xff] ^ Te3[s0 & 0xff] ^ rk[13];
+ t2 = Te0[s2 >> 24] ^ Te1[(s3 >> 16) & 0xff] ^ Te2[(s0 >> 8) & 0xff] ^ Te3[s1 & 0xff] ^ rk[14];
+ t3 = Te0[s3 >> 24] ^ Te1[(s0 >> 16) & 0xff] ^ Te2[(s1 >> 8) & 0xff] ^ Te3[s2 & 0xff] ^ rk[15];
+ /* round 4: */
+ s0 = Te0[t0 >> 24] ^ Te1[(t1 >> 16) & 0xff] ^ Te2[(t2 >> 8) & 0xff] ^ Te3[t3 & 0xff] ^ rk[16];
+ s1 = Te0[t1 >> 24] ^ Te1[(t2 >> 16) & 0xff] ^ Te2[(t3 >> 8) & 0xff] ^ Te3[t0 & 0xff] ^ rk[17];
+ s2 = Te0[t2 >> 24] ^ Te1[(t3 >> 16) & 0xff] ^ Te2[(t0 >> 8) & 0xff] ^ Te3[t1 & 0xff] ^ rk[18];
+ s3 = Te0[t3 >> 24] ^ Te1[(t0 >> 16) & 0xff] ^ Te2[(t1 >> 8) & 0xff] ^ Te3[t2 & 0xff] ^ rk[19];
+ /* round 5: */
+ t0 = Te0[s0 >> 24] ^ Te1[(s1 >> 16) & 0xff] ^ Te2[(s2 >> 8) & 0xff] ^ Te3[s3 & 0xff] ^ rk[20];
+ t1 = Te0[s1 >> 24] ^ Te1[(s2 >> 16) & 0xff] ^ Te2[(s3 >> 8) & 0xff] ^ Te3[s0 & 0xff] ^ rk[21];
+ t2 = Te0[s2 >> 24] ^ Te1[(s3 >> 16) & 0xff] ^ Te2[(s0 >> 8) & 0xff] ^ Te3[s1 & 0xff] ^ rk[22];
+ t3 = Te0[s3 >> 24] ^ Te1[(s0 >> 16) & 0xff] ^ Te2[(s1 >> 8) & 0xff] ^ Te3[s2 & 0xff] ^ rk[23];
+ /* round 6: */
+ s0 = Te0[t0 >> 24] ^ Te1[(t1 >> 16) & 0xff] ^ Te2[(t2 >> 8) & 0xff] ^ Te3[t3 & 0xff] ^ rk[24];
+ s1 = Te0[t1 >> 24] ^ Te1[(t2 >> 16) & 0xff] ^ Te2[(t3 >> 8) & 0xff] ^ Te3[t0 & 0xff] ^ rk[25];
+ s2 = Te0[t2 >> 24] ^ Te1[(t3 >> 16) & 0xff] ^ Te2[(t0 >> 8) & 0xff] ^ Te3[t1 & 0xff] ^ rk[26];
+ s3 = Te0[t3 >> 24] ^ Te1[(t0 >> 16) & 0xff] ^ Te2[(t1 >> 8) & 0xff] ^ Te3[t2 & 0xff] ^ rk[27];
+ /* round 7: */
+ t0 = Te0[s0 >> 24] ^ Te1[(s1 >> 16) & 0xff] ^ Te2[(s2 >> 8) & 0xff] ^ Te3[s3 & 0xff] ^ rk[28];
+ t1 = Te0[s1 >> 24] ^ Te1[(s2 >> 16) & 0xff] ^ Te2[(s3 >> 8) & 0xff] ^ Te3[s0 & 0xff] ^ rk[29];
+ t2 = Te0[s2 >> 24] ^ Te1[(s3 >> 16) & 0xff] ^ Te2[(s0 >> 8) & 0xff] ^ Te3[s1 & 0xff] ^ rk[30];
+ t3 = Te0[s3 >> 24] ^ Te1[(s0 >> 16) & 0xff] ^ Te2[(s1 >> 8) & 0xff] ^ Te3[s2 & 0xff] ^ rk[31];
+ /* round 8: */
+ s0 = Te0[t0 >> 24] ^ Te1[(t1 >> 16) & 0xff] ^ Te2[(t2 >> 8) & 0xff] ^ Te3[t3 & 0xff] ^ rk[32];
+ s1 = Te0[t1 >> 24] ^ Te1[(t2 >> 16) & 0xff] ^ Te2[(t3 >> 8) & 0xff] ^ Te3[t0 & 0xff] ^ rk[33];
+ s2 = Te0[t2 >> 24] ^ Te1[(t3 >> 16) & 0xff] ^ Te2[(t0 >> 8) & 0xff] ^ Te3[t1 & 0xff] ^ rk[34];
+ s3 = Te0[t3 >> 24] ^ Te1[(t0 >> 16) & 0xff] ^ Te2[(t1 >> 8) & 0xff] ^ Te3[t2 & 0xff] ^ rk[35];
+ /* round 9: */
+ t0 = Te0[s0 >> 24] ^ Te1[(s1 >> 16) & 0xff] ^ Te2[(s2 >> 8) & 0xff] ^ Te3[s3 & 0xff] ^ rk[36];
+ t1 = Te0[s1 >> 24] ^ Te1[(s2 >> 16) & 0xff] ^ Te2[(s3 >> 8) & 0xff] ^ Te3[s0 & 0xff] ^ rk[37];
+ t2 = Te0[s2 >> 24] ^ Te1[(s3 >> 16) & 0xff] ^ Te2[(s0 >> 8) & 0xff] ^ Te3[s1 & 0xff] ^ rk[38];
+ t3 = Te0[s3 >> 24] ^ Te1[(s0 >> 16) & 0xff] ^ Te2[(s1 >> 8) & 0xff] ^ Te3[s2 & 0xff] ^ rk[39];
+ if (Nr > 10) {
+ /* round 10: */
+ s0 = Te0[t0 >> 24] ^ Te1[(t1 >> 16) & 0xff] ^ Te2[(t2 >> 8) & 0xff] ^ Te3[t3 & 0xff] ^ rk[40];
+ s1 = Te0[t1 >> 24] ^ Te1[(t2 >> 16) & 0xff] ^ Te2[(t3 >> 8) & 0xff] ^ Te3[t0 & 0xff] ^ rk[41];
+ s2 = Te0[t2 >> 24] ^ Te1[(t3 >> 16) & 0xff] ^ Te2[(t0 >> 8) & 0xff] ^ Te3[t1 & 0xff] ^ rk[42];
+ s3 = Te0[t3 >> 24] ^ Te1[(t0 >> 16) & 0xff] ^ Te2[(t1 >> 8) & 0xff] ^ Te3[t2 & 0xff] ^ rk[43];
+ /* round 11: */
+ t0 = Te0[s0 >> 24] ^ Te1[(s1 >> 16) & 0xff] ^ Te2[(s2 >> 8) & 0xff] ^ Te3[s3 & 0xff] ^ rk[44];
+ t1 = Te0[s1 >> 24] ^ Te1[(s2 >> 16) & 0xff] ^ Te2[(s3 >> 8) & 0xff] ^ Te3[s0 & 0xff] ^ rk[45];
+ t2 = Te0[s2 >> 24] ^ Te1[(s3 >> 16) & 0xff] ^ Te2[(s0 >> 8) & 0xff] ^ Te3[s1 & 0xff] ^ rk[46];
+ t3 = Te0[s3 >> 24] ^ Te1[(s0 >> 16) & 0xff] ^ Te2[(s1 >> 8) & 0xff] ^ Te3[s2 & 0xff] ^ rk[47];
+ if (Nr > 12) {
+ /* round 12: */
+ s0 = Te0[t0 >> 24] ^ Te1[(t1 >> 16) & 0xff] ^ Te2[(t2 >> 8) & 0xff] ^ Te3[t3 & 0xff] ^ rk[48];
+ s1 = Te0[t1 >> 24] ^ Te1[(t2 >> 16) & 0xff] ^ Te2[(t3 >> 8) & 0xff] ^ Te3[t0 & 0xff] ^ rk[49];
+ s2 = Te0[t2 >> 24] ^ Te1[(t3 >> 16) & 0xff] ^ Te2[(t0 >> 8) & 0xff] ^ Te3[t1 & 0xff] ^ rk[50];
+ s3 = Te0[t3 >> 24] ^ Te1[(t0 >> 16) & 0xff] ^ Te2[(t1 >> 8) & 0xff] ^ Te3[t2 & 0xff] ^ rk[51];
+ /* round 13: */
+ t0 = Te0[s0 >> 24] ^ Te1[(s1 >> 16) & 0xff] ^ Te2[(s2 >> 8) & 0xff] ^ Te3[s3 & 0xff] ^ rk[52];
+ t1 = Te0[s1 >> 24] ^ Te1[(s2 >> 16) & 0xff] ^ Te2[(s3 >> 8) & 0xff] ^ Te3[s0 & 0xff] ^ rk[53];
+ t2 = Te0[s2 >> 24] ^ Te1[(s3 >> 16) & 0xff] ^ Te2[(s0 >> 8) & 0xff] ^ Te3[s1 & 0xff] ^ rk[54];
+ t3 = Te0[s3 >> 24] ^ Te1[(s0 >> 16) & 0xff] ^ Te2[(s1 >> 8) & 0xff] ^ Te3[s2 & 0xff] ^ rk[55];
+ }
+ }
+ rk += Nr << 2;
+#else /* !FULL_UNROLL */
+ /*
+ * Nr - 1 full rounds:
+ */
+ r = Nr >> 1;
+ for (;;) {
+ t0 =
+ Te0[(s0 >> 24) ] ^
+ Te1[(s1 >> 16) & 0xff] ^
+ Te2[(s2 >> 8) & 0xff] ^
+ Te3[(s3 ) & 0xff] ^
+ rk[4];
+ t1 =
+ Te0[(s1 >> 24) ] ^
+ Te1[(s2 >> 16) & 0xff] ^
+ Te2[(s3 >> 8) & 0xff] ^
+ Te3[(s0 ) & 0xff] ^
+ rk[5];
+ t2 =
+ Te0[(s2 >> 24) ] ^
+ Te1[(s3 >> 16) & 0xff] ^
+ Te2[(s0 >> 8) & 0xff] ^
+ Te3[(s1 ) & 0xff] ^
+ rk[6];
+ t3 =
+ Te0[(s3 >> 24) ] ^
+ Te1[(s0 >> 16) & 0xff] ^
+ Te2[(s1 >> 8) & 0xff] ^
+ Te3[(s2 ) & 0xff] ^
+ rk[7];
+
+ rk += 8;
+ if (--r == 0) {
+ break;
+ }
+
+ s0 =
+ Te0[(t0 >> 24) ] ^
+ Te1[(t1 >> 16) & 0xff] ^
+ Te2[(t2 >> 8) & 0xff] ^
+ Te3[(t3 ) & 0xff] ^
+ rk[0];
+ s1 =
+ Te0[(t1 >> 24) ] ^
+ Te1[(t2 >> 16) & 0xff] ^
+ Te2[(t3 >> 8) & 0xff] ^
+ Te3[(t0 ) & 0xff] ^
+ rk[1];
+ s2 =
+ Te0[(t2 >> 24) ] ^
+ Te1[(t3 >> 16) & 0xff] ^
+ Te2[(t0 >> 8) & 0xff] ^
+ Te3[(t1 ) & 0xff] ^
+ rk[2];
+ s3 =
+ Te0[(t3 >> 24) ] ^
+ Te1[(t0 >> 16) & 0xff] ^
+ Te2[(t1 >> 8) & 0xff] ^
+ Te3[(t2 ) & 0xff] ^
+ rk[3];
+ }
+#endif /* ?FULL_UNROLL */
+ /*
+ * apply last round and
+ * map cipher state to byte array block:
+ */
+ s0 =
+ (Te4[(t0 >> 24) ] & 0xff000000) ^
+ (Te4[(t1 >> 16) & 0xff] & 0x00ff0000) ^
+ (Te4[(t2 >> 8) & 0xff] & 0x0000ff00) ^
+ (Te4[(t3 ) & 0xff] & 0x000000ff) ^
+ rk[0];
+ PUTU32(ct , s0);
+ s1 =
+ (Te4[(t1 >> 24) ] & 0xff000000) ^
+ (Te4[(t2 >> 16) & 0xff] & 0x00ff0000) ^
+ (Te4[(t3 >> 8) & 0xff] & 0x0000ff00) ^
+ (Te4[(t0 ) & 0xff] & 0x000000ff) ^
+ rk[1];
+ PUTU32(ct + 4, s1);
+ s2 =
+ (Te4[(t2 >> 24) ] & 0xff000000) ^
+ (Te4[(t3 >> 16) & 0xff] & 0x00ff0000) ^
+ (Te4[(t0 >> 8) & 0xff] & 0x0000ff00) ^
+ (Te4[(t1 ) & 0xff] & 0x000000ff) ^
+ rk[2];
+ PUTU32(ct + 8, s2);
+ s3 =
+ (Te4[(t3 >> 24) ] & 0xff000000) ^
+ (Te4[(t0 >> 16) & 0xff] & 0x00ff0000) ^
+ (Te4[(t1 >> 8) & 0xff] & 0x0000ff00) ^
+ (Te4[(t2 ) & 0xff] & 0x000000ff) ^
+ rk[3];
+ PUTU32(ct + 12, s3);
+}
+
+void rijndaelDecrypt(const u32 rk[/*4*(Nr + 1)*/], int Nr, const u8 ct[16], u8 pt[16]) {
+ u32 s0, s1, s2, s3, t0, t1, t2, t3;
+#ifndef FULL_UNROLL
+ int r;
+#endif /* ?FULL_UNROLL */
+
+ /*
+ * map byte array block to cipher state
+ * and add initial round key:
+ */
+ s0 = GETU32(ct ) ^ rk[0];
+ s1 = GETU32(ct + 4) ^ rk[1];
+ s2 = GETU32(ct + 8) ^ rk[2];
+ s3 = GETU32(ct + 12) ^ rk[3];
+#ifdef FULL_UNROLL
+ /* round 1: */
+ t0 = Td0[s0 >> 24] ^ Td1[(s3 >> 16) & 0xff] ^ Td2[(s2 >> 8) & 0xff] ^ Td3[s1 & 0xff] ^ rk[ 4];
+ t1 = Td0[s1 >> 24] ^ Td1[(s0 >> 16) & 0xff] ^ Td2[(s3 >> 8) & 0xff] ^ Td3[s2 & 0xff] ^ rk[ 5];
+ t2 = Td0[s2 >> 24] ^ Td1[(s1 >> 16) & 0xff] ^ Td2[(s0 >> 8) & 0xff] ^ Td3[s3 & 0xff] ^ rk[ 6];
+ t3 = Td0[s3 >> 24] ^ Td1[(s2 >> 16) & 0xff] ^ Td2[(s1 >> 8) & 0xff] ^ Td3[s0 & 0xff] ^ rk[ 7];
+ /* round 2: */
+ s0 = Td0[t0 >> 24] ^ Td1[(t3 >> 16) & 0xff] ^ Td2[(t2 >> 8) & 0xff] ^ Td3[t1 & 0xff] ^ rk[ 8];
+ s1 = Td0[t1 >> 24] ^ Td1[(t0 >> 16) & 0xff] ^ Td2[(t3 >> 8) & 0xff] ^ Td3[t2 & 0xff] ^ rk[ 9];
+ s2 = Td0[t2 >> 24] ^ Td1[(t1 >> 16) & 0xff] ^ Td2[(t0 >> 8) & 0xff] ^ Td3[t3 & 0xff] ^ rk[10];
+ s3 = Td0[t3 >> 24] ^ Td1[(t2 >> 16) & 0xff] ^ Td2[(t1 >> 8) & 0xff] ^ Td3[t0 & 0xff] ^ rk[11];
+ /* round 3: */
+ t0 = Td0[s0 >> 24] ^ Td1[(s3 >> 16) & 0xff] ^ Td2[(s2 >> 8) & 0xff] ^ Td3[s1 & 0xff] ^ rk[12];
+ t1 = Td0[s1 >> 24] ^ Td1[(s0 >> 16) & 0xff] ^ Td2[(s3 >> 8) & 0xff] ^ Td3[s2 & 0xff] ^ rk[13];
+ t2 = Td0[s2 >> 24] ^ Td1[(s1 >> 16) & 0xff] ^ Td2[(s0 >> 8) & 0xff] ^ Td3[s3 & 0xff] ^ rk[14];
+ t3 = Td0[s3 >> 24] ^ Td1[(s2 >> 16) & 0xff] ^ Td2[(s1 >> 8) & 0xff] ^ Td3[s0 & 0xff] ^ rk[15];
+ /* round 4: */
+ s0 = Td0[t0 >> 24] ^ Td1[(t3 >> 16) & 0xff] ^ Td2[(t2 >> 8) & 0xff] ^ Td3[t1 & 0xff] ^ rk[16];
+ s1 = Td0[t1 >> 24] ^ Td1[(t0 >> 16) & 0xff] ^ Td2[(t3 >> 8) & 0xff] ^ Td3[t2 & 0xff] ^ rk[17];
+ s2 = Td0[t2 >> 24] ^ Td1[(t1 >> 16) & 0xff] ^ Td2[(t0 >> 8) & 0xff] ^ Td3[t3 & 0xff] ^ rk[18];
+ s3 = Td0[t3 >> 24] ^ Td1[(t2 >> 16) & 0xff] ^ Td2[(t1 >> 8) & 0xff] ^ Td3[t0 & 0xff] ^ rk[19];
+ /* round 5: */
+ t0 = Td0[s0 >> 24] ^ Td1[(s3 >> 16) & 0xff] ^ Td2[(s2 >> 8) & 0xff] ^ Td3[s1 & 0xff] ^ rk[20];
+ t1 = Td0[s1 >> 24] ^ Td1[(s0 >> 16) & 0xff] ^ Td2[(s3 >> 8) & 0xff] ^ Td3[s2 & 0xff] ^ rk[21];
+ t2 = Td0[s2 >> 24] ^ Td1[(s1 >> 16) & 0xff] ^ Td2[(s0 >> 8) & 0xff] ^ Td3[s3 & 0xff] ^ rk[22];
+ t3 = Td0[s3 >> 24] ^ Td1[(s2 >> 16) & 0xff] ^ Td2[(s1 >> 8) & 0xff] ^ Td3[s0 & 0xff] ^ rk[23];
+ /* round 6: */
+ s0 = Td0[t0 >> 24] ^ Td1[(t3 >> 16) & 0xff] ^ Td2[(t2 >> 8) & 0xff] ^ Td3[t1 & 0xff] ^ rk[24];
+ s1 = Td0[t1 >> 24] ^ Td1[(t0 >> 16) & 0xff] ^ Td2[(t3 >> 8) & 0xff] ^ Td3[t2 & 0xff] ^ rk[25];
+ s2 = Td0[t2 >> 24] ^ Td1[(t1 >> 16) & 0xff] ^ Td2[(t0 >> 8) & 0xff] ^ Td3[t3 & 0xff] ^ rk[26];
+ s3 = Td0[t3 >> 24] ^ Td1[(t2 >> 16) & 0xff] ^ Td2[(t1 >> 8) & 0xff] ^ Td3[t0 & 0xff] ^ rk[27];
+ /* round 7: */
+ t0 = Td0[s0 >> 24] ^ Td1[(s3 >> 16) & 0xff] ^ Td2[(s2 >> 8) & 0xff] ^ Td3[s1 & 0xff] ^ rk[28];
+ t1 = Td0[s1 >> 24] ^ Td1[(s0 >> 16) & 0xff] ^ Td2[(s3 >> 8) & 0xff] ^ Td3[s2 & 0xff] ^ rk[29];
+ t2 = Td0[s2 >> 24] ^ Td1[(s1 >> 16) & 0xff] ^ Td2[(s0 >> 8) & 0xff] ^ Td3[s3 & 0xff] ^ rk[30];
+ t3 = Td0[s3 >> 24] ^ Td1[(s2 >> 16) & 0xff] ^ Td2[(s1 >> 8) & 0xff] ^ Td3[s0 & 0xff] ^ rk[31];
+ /* round 8: */
+ s0 = Td0[t0 >> 24] ^ Td1[(t3 >> 16) & 0xff] ^ Td2[(t2 >> 8) & 0xff] ^ Td3[t1 & 0xff] ^ rk[32];
+ s1 = Td0[t1 >> 24] ^ Td1[(t0 >> 16) & 0xff] ^ Td2[(t3 >> 8) & 0xff] ^ Td3[t2 & 0xff] ^ rk[33];
+ s2 = Td0[t2 >> 24] ^ Td1[(t1 >> 16) & 0xff] ^ Td2[(t0 >> 8) & 0xff] ^ Td3[t3 & 0xff] ^ rk[34];
+ s3 = Td0[t3 >> 24] ^ Td1[(t2 >> 16) & 0xff] ^ Td2[(t1 >> 8) & 0xff] ^ Td3[t0 & 0xff] ^ rk[35];
+ /* round 9: */
+ t0 = Td0[s0 >> 24] ^ Td1[(s3 >> 16) & 0xff] ^ Td2[(s2 >> 8) & 0xff] ^ Td3[s1 & 0xff] ^ rk[36];
+ t1 = Td0[s1 >> 24] ^ Td1[(s0 >> 16) & 0xff] ^ Td2[(s3 >> 8) & 0xff] ^ Td3[s2 & 0xff] ^ rk[37];
+ t2 = Td0[s2 >> 24] ^ Td1[(s1 >> 16) & 0xff] ^ Td2[(s0 >> 8) & 0xff] ^ Td3[s3 & 0xff] ^ rk[38];
+ t3 = Td0[s3 >> 24] ^ Td1[(s2 >> 16) & 0xff] ^ Td2[(s1 >> 8) & 0xff] ^ Td3[s0 & 0xff] ^ rk[39];
+ if (Nr > 10) {
+ /* round 10: */
+ s0 = Td0[t0 >> 24] ^ Td1[(t3 >> 16) & 0xff] ^ Td2[(t2 >> 8) & 0xff] ^ Td3[t1 & 0xff] ^ rk[40];
+ s1 = Td0[t1 >> 24] ^ Td1[(t0 >> 16) & 0xff] ^ Td2[(t3 >> 8) & 0xff] ^ Td3[t2 & 0xff] ^ rk[41];
+ s2 = Td0[t2 >> 24] ^ Td1[(t1 >> 16) & 0xff] ^ Td2[(t0 >> 8) & 0xff] ^ Td3[t3 & 0xff] ^ rk[42];
+ s3 = Td0[t3 >> 24] ^ Td1[(t2 >> 16) & 0xff] ^ Td2[(t1 >> 8) & 0xff] ^ Td3[t0 & 0xff] ^ rk[43];
+ /* round 11: */
+ t0 = Td0[s0 >> 24] ^ Td1[(s3 >> 16) & 0xff] ^ Td2[(s2 >> 8) & 0xff] ^ Td3[s1 & 0xff] ^ rk[44];
+ t1 = Td0[s1 >> 24] ^ Td1[(s0 >> 16) & 0xff] ^ Td2[(s3 >> 8) & 0xff] ^ Td3[s2 & 0xff] ^ rk[45];
+ t2 = Td0[s2 >> 24] ^ Td1[(s1 >> 16) & 0xff] ^ Td2[(s0 >> 8) & 0xff] ^ Td3[s3 & 0xff] ^ rk[46];
+ t3 = Td0[s3 >> 24] ^ Td1[(s2 >> 16) & 0xff] ^ Td2[(s1 >> 8) & 0xff] ^ Td3[s0 & 0xff] ^ rk[47];
+ if (Nr > 12) {
+ /* round 12: */
+ s0 = Td0[t0 >> 24] ^ Td1[(t3 >> 16) & 0xff] ^ Td2[(t2 >> 8) & 0xff] ^ Td3[t1 & 0xff] ^ rk[48];
+ s1 = Td0[t1 >> 24] ^ Td1[(t0 >> 16) & 0xff] ^ Td2[(t3 >> 8) & 0xff] ^ Td3[t2 & 0xff] ^ rk[49];
+ s2 = Td0[t2 >> 24] ^ Td1[(t1 >> 16) & 0xff] ^ Td2[(t0 >> 8) & 0xff] ^ Td3[t3 & 0xff] ^ rk[50];
+ s3 = Td0[t3 >> 24] ^ Td1[(t2 >> 16) & 0xff] ^ Td2[(t1 >> 8) & 0xff] ^ Td3[t0 & 0xff] ^ rk[51];
+ /* round 13: */
+ t0 = Td0[s0 >> 24] ^ Td1[(s3 >> 16) & 0xff] ^ Td2[(s2 >> 8) & 0xff] ^ Td3[s1 & 0xff] ^ rk[52];
+ t1 = Td0[s1 >> 24] ^ Td1[(s0 >> 16) & 0xff] ^ Td2[(s3 >> 8) & 0xff] ^ Td3[s2 & 0xff] ^ rk[53];
+ t2 = Td0[s2 >> 24] ^ Td1[(s1 >> 16) & 0xff] ^ Td2[(s0 >> 8) & 0xff] ^ Td3[s3 & 0xff] ^ rk[54];
+ t3 = Td0[s3 >> 24] ^ Td1[(s2 >> 16) & 0xff] ^ Td2[(s1 >> 8) & 0xff] ^ Td3[s0 & 0xff] ^ rk[55];
+ }
+ }
+ rk += Nr << 2;
+#else /* !FULL_UNROLL */
+ /*
+ * Nr - 1 full rounds:
+ */
+ r = Nr >> 1;
+ for (;;) {
+ t0 =
+ Td0[(s0 >> 24) ] ^
+ Td1[(s3 >> 16) & 0xff] ^
+ Td2[(s2 >> 8) & 0xff] ^
+ Td3[(s1 ) & 0xff] ^
+ rk[4];
+ t1 =
+ Td0[(s1 >> 24) ] ^
+ Td1[(s0 >> 16) & 0xff] ^
+ Td2[(s3 >> 8) & 0xff] ^
+ Td3[(s2 ) & 0xff] ^
+ rk[5];
+ t2 =
+ Td0[(s2 >> 24) ] ^
+ Td1[(s1 >> 16) & 0xff] ^
+ Td2[(s0 >> 8) & 0xff] ^
+ Td3[(s3 ) & 0xff] ^
+ rk[6];
+ t3 =
+ Td0[(s3 >> 24) ] ^
+ Td1[(s2 >> 16) & 0xff] ^
+ Td2[(s1 >> 8) & 0xff] ^
+ Td3[(s0 ) & 0xff] ^
+ rk[7];
+
+ rk += 8;
+ if (--r == 0) {
+ break;
+ }
+
+ s0 =
+ Td0[(t0 >> 24) ] ^
+ Td1[(t3 >> 16) & 0xff] ^
+ Td2[(t2 >> 8) & 0xff] ^
+ Td3[(t1 ) & 0xff] ^
+ rk[0];
+ s1 =
+ Td0[(t1 >> 24) ] ^
+ Td1[(t0 >> 16) & 0xff] ^
+ Td2[(t3 >> 8) & 0xff] ^
+ Td3[(t2 ) & 0xff] ^
+ rk[1];
+ s2 =
+ Td0[(t2 >> 24) ] ^
+ Td1[(t1 >> 16) & 0xff] ^
+ Td2[(t0 >> 8) & 0xff] ^
+ Td3[(t3 ) & 0xff] ^
+ rk[2];
+ s3 =
+ Td0[(t3 >> 24) ] ^
+ Td1[(t2 >> 16) & 0xff] ^
+ Td2[(t1 >> 8) & 0xff] ^
+ Td3[(t0 ) & 0xff] ^
+ rk[3];
+ }
+#endif /* ?FULL_UNROLL */
+ /*
+ * apply last round and
+ * map cipher state to byte array block:
+ */
+ s0 =
+ (Td4[(t0 >> 24) ] & 0xff000000) ^
+ (Td4[(t3 >> 16) & 0xff] & 0x00ff0000) ^
+ (Td4[(t2 >> 8) & 0xff] & 0x0000ff00) ^
+ (Td4[(t1 ) & 0xff] & 0x000000ff) ^
+ rk[0];
+ PUTU32(pt , s0);
+ s1 =
+ (Td4[(t1 >> 24) ] & 0xff000000) ^
+ (Td4[(t0 >> 16) & 0xff] & 0x00ff0000) ^
+ (Td4[(t3 >> 8) & 0xff] & 0x0000ff00) ^
+ (Td4[(t2 ) & 0xff] & 0x000000ff) ^
+ rk[1];
+ PUTU32(pt + 4, s1);
+ s2 =
+ (Td4[(t2 >> 24) ] & 0xff000000) ^
+ (Td4[(t1 >> 16) & 0xff] & 0x00ff0000) ^
+ (Td4[(t0 >> 8) & 0xff] & 0x0000ff00) ^
+ (Td4[(t3 ) & 0xff] & 0x000000ff) ^
+ rk[2];
+ PUTU32(pt + 8, s2);
+ s3 =
+ (Td4[(t3 >> 24) ] & 0xff000000) ^
+ (Td4[(t2 >> 16) & 0xff] & 0x00ff0000) ^
+ (Td4[(t1 >> 8) & 0xff] & 0x0000ff00) ^
+ (Td4[(t0 ) & 0xff] & 0x000000ff) ^
+ rk[3];
+ PUTU32(pt + 12, s3);
+}
diff --git a/rtems/freebsd/crypto/rijndael/rijndael-api-fst.c b/rtems/freebsd/crypto/rijndael/rijndael-api-fst.c
new file mode 100644
index 00000000..30cc96ce
--- /dev/null
+++ b/rtems/freebsd/crypto/rijndael/rijndael-api-fst.c
@@ -0,0 +1,443 @@
+#include <rtems/freebsd/machine/rtems-bsd-config.h>
+
+/* $KAME: rijndael-api-fst.c,v 1.10 2001/05/27 09:34:18 itojun Exp $ */
+
+/*
+ * rijndael-api-fst.c v2.3 April '2000
+ *
+ * Optimised ANSI C code
+ *
+ * authors: v1.0: Antoon Bosselaers
+ * v2.0: Vincent Rijmen
+ * v2.1: Vincent Rijmen
+ * v2.2: Vincent Rijmen
+ * v2.3: Paulo Barreto
+ * v2.4: Vincent Rijmen
+ *
+ * This code is placed in the public domain.
+ */
+
+#include <rtems/freebsd/sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <rtems/freebsd/sys/param.h>
+#ifdef _KERNEL
+#include <rtems/freebsd/sys/systm.h>
+#else
+#include <rtems/freebsd/string.h>
+#endif
+
+#include <rtems/freebsd/crypto/rijndael/rijndael_local.h>
+#include <rtems/freebsd/crypto/rijndael/rijndael-api-fst.h>
+
+#ifndef TRUE
+#define TRUE 1
+#endif
+
+typedef u_int8_t BYTE;
+
+int rijndael_makeKey(keyInstance *key, BYTE direction, int keyLen, char *keyMaterial) {
+ u_int8_t cipherKey[RIJNDAEL_MAXKB];
+
+ if (key == NULL) {
+ return BAD_KEY_INSTANCE;
+ }
+
+ if ((direction == DIR_ENCRYPT) || (direction == DIR_DECRYPT)) {
+ key->direction = direction;
+ } else {
+ return BAD_KEY_DIR;
+ }
+
+ if ((keyLen == 128) || (keyLen == 192) || (keyLen == 256)) {
+ key->keyLen = keyLen;
+ } else {
+ return BAD_KEY_MAT;
+ }
+
+ if (keyMaterial != NULL) {
+ memcpy(key->keyMaterial, keyMaterial, keyLen/8);
+ }
+
+ /* initialize key schedule: */
+ memcpy(cipherKey, key->keyMaterial, keyLen/8);
+ if (direction == DIR_ENCRYPT) {
+ key->Nr = rijndaelKeySetupEnc(key->rk, cipherKey, keyLen);
+ } else {
+ key->Nr = rijndaelKeySetupDec(key->rk, cipherKey, keyLen);
+ }
+ rijndaelKeySetupEnc(key->ek, cipherKey, keyLen);
+ return TRUE;
+}
+
+int rijndael_cipherInit(cipherInstance *cipher, BYTE mode, char *IV) {
+ if ((mode == MODE_ECB) || (mode == MODE_CBC) || (mode == MODE_CFB1)) {
+ cipher->mode = mode;
+ } else {
+ return BAD_CIPHER_MODE;
+ }
+ if (IV != NULL) {
+ memcpy(cipher->IV, IV, RIJNDAEL_MAX_IV_SIZE);
+ } else {
+ memset(cipher->IV, 0, RIJNDAEL_MAX_IV_SIZE);
+ }
+ return TRUE;
+}
+
+int rijndael_blockEncrypt(cipherInstance *cipher, keyInstance *key,
+ BYTE *input, int inputLen, BYTE *outBuffer) {
+ int i, k, numBlocks;
+ u_int8_t block[16], iv[4][4];
+
+ if (cipher == NULL ||
+ key == NULL ||
+ key->direction == DIR_DECRYPT) {
+ return BAD_CIPHER_STATE;
+ }
+ if (input == NULL || inputLen <= 0) {
+ return 0; /* nothing to do */
+ }
+
+ numBlocks = inputLen/128;
+
+ switch (cipher->mode) {
+ case MODE_ECB:
+ for (i = numBlocks; i > 0; i--) {
+ rijndaelEncrypt(key->rk, key->Nr, input, outBuffer);
+ input += 16;
+ outBuffer += 16;
+ }
+ break;
+
+ case MODE_CBC:
+#if 1 /*STRICT_ALIGN*/
+ memcpy(block, cipher->IV, 16);
+ memcpy(iv, input, 16);
+ ((u_int32_t*)block)[0] ^= ((u_int32_t*)iv)[0];
+ ((u_int32_t*)block)[1] ^= ((u_int32_t*)iv)[1];
+ ((u_int32_t*)block)[2] ^= ((u_int32_t*)iv)[2];
+ ((u_int32_t*)block)[3] ^= ((u_int32_t*)iv)[3];
+#else
+ ((u_int32_t*)block)[0] = ((u_int32_t*)cipher->IV)[0] ^ ((u_int32_t*)input)[0];
+ ((u_int32_t*)block)[1] = ((u_int32_t*)cipher->IV)[1] ^ ((u_int32_t*)input)[1];
+ ((u_int32_t*)block)[2] = ((u_int32_t*)cipher->IV)[2] ^ ((u_int32_t*)input)[2];
+ ((u_int32_t*)block)[3] = ((u_int32_t*)cipher->IV)[3] ^ ((u_int32_t*)input)[3];
+#endif
+ rijndaelEncrypt(key->rk, key->Nr, block, outBuffer);
+ input += 16;
+ for (i = numBlocks - 1; i > 0; i--) {
+#if 1 /*STRICT_ALIGN*/
+ memcpy(block, outBuffer, 16);
+ memcpy(iv, input, 16);
+ ((u_int32_t*)block)[0] ^= ((u_int32_t*)iv)[0];
+ ((u_int32_t*)block)[1] ^= ((u_int32_t*)iv)[1];
+ ((u_int32_t*)block)[2] ^= ((u_int32_t*)iv)[2];
+ ((u_int32_t*)block)[3] ^= ((u_int32_t*)iv)[3];
+#else
+ ((u_int32_t*)block)[0] = ((u_int32_t*)outBuffer)[0] ^ ((u_int32_t*)input)[0];
+ ((u_int32_t*)block)[1] = ((u_int32_t*)outBuffer)[1] ^ ((u_int32_t*)input)[1];
+ ((u_int32_t*)block)[2] = ((u_int32_t*)outBuffer)[2] ^ ((u_int32_t*)input)[2];
+ ((u_int32_t*)block)[3] = ((u_int32_t*)outBuffer)[3] ^ ((u_int32_t*)input)[3];
+#endif
+ outBuffer += 16;
+ rijndaelEncrypt(key->rk, key->Nr, block, outBuffer);
+ input += 16;
+ }
+ break;
+
+ case MODE_CFB1:
+#if 1 /*STRICT_ALIGN*/
+ memcpy(iv, cipher->IV, 16);
+#else /* !STRICT_ALIGN */
+ *((u_int32_t*)iv[0]) = *((u_int32_t*)(cipher->IV ));
+ *((u_int32_t*)iv[1]) = *((u_int32_t*)(cipher->IV+ 4));
+ *((u_int32_t*)iv[2]) = *((u_int32_t*)(cipher->IV+ 8));
+ *((u_int32_t*)iv[3]) = *((u_int32_t*)(cipher->IV+12));
+#endif /* ?STRICT_ALIGN */
+ for (i = numBlocks; i > 0; i--) {
+ for (k = 0; k < 128; k++) {
+ *((u_int32_t*) block ) = *((u_int32_t*)iv[0]);
+ *((u_int32_t*)(block+ 4)) = *((u_int32_t*)iv[1]);
+ *((u_int32_t*)(block+ 8)) = *((u_int32_t*)iv[2]);
+ *((u_int32_t*)(block+12)) = *((u_int32_t*)iv[3]);
+ rijndaelEncrypt(key->ek, key->Nr, block,
+ block);
+ outBuffer[k/8] ^= (block[0] & 0x80) >> (k & 7);
+ iv[0][0] = (iv[0][0] << 1) | (iv[0][1] >> 7);
+ iv[0][1] = (iv[0][1] << 1) | (iv[0][2] >> 7);
+ iv[0][2] = (iv[0][2] << 1) | (iv[0][3] >> 7);
+ iv[0][3] = (iv[0][3] << 1) | (iv[1][0] >> 7);
+ iv[1][0] = (iv[1][0] << 1) | (iv[1][1] >> 7);
+ iv[1][1] = (iv[1][1] << 1) | (iv[1][2] >> 7);
+ iv[1][2] = (iv[1][2] << 1) | (iv[1][3] >> 7);
+ iv[1][3] = (iv[1][3] << 1) | (iv[2][0] >> 7);
+ iv[2][0] = (iv[2][0] << 1) | (iv[2][1] >> 7);
+ iv[2][1] = (iv[2][1] << 1) | (iv[2][2] >> 7);
+ iv[2][2] = (iv[2][2] << 1) | (iv[2][3] >> 7);
+ iv[2][3] = (iv[2][3] << 1) | (iv[3][0] >> 7);
+ iv[3][0] = (iv[3][0] << 1) | (iv[3][1] >> 7);
+ iv[3][1] = (iv[3][1] << 1) | (iv[3][2] >> 7);
+ iv[3][2] = (iv[3][2] << 1) | (iv[3][3] >> 7);
+ iv[3][3] = (iv[3][3] << 1) | ((outBuffer[k/8] >> (7-(k&7))) & 1);
+ }
+ }
+ break;
+
+ default:
+ return BAD_CIPHER_STATE;
+ }
+
+ return 128*numBlocks;
+}
+
+/**
+ * Encrypt data partitioned in octets, using RFC 2040-like padding.
+ *
+ * @param input data to be encrypted (octet sequence)
+ * @param inputOctets input length in octets (not bits)
+ * @param outBuffer encrypted output data
+ *
+ * @return length in octets (not bits) of the encrypted output buffer.
+ */
+int rijndael_padEncrypt(cipherInstance *cipher, keyInstance *key,
+ BYTE *input, int inputOctets, BYTE *outBuffer) {
+ int i, numBlocks, padLen;
+ u_int8_t block[16], *iv, *cp;
+
+ if (cipher == NULL ||
+ key == NULL ||
+ key->direction == DIR_DECRYPT) {
+ return BAD_CIPHER_STATE;
+ }
+ if (input == NULL || inputOctets <= 0) {
+ return 0; /* nothing to do */
+ }
+
+ numBlocks = inputOctets/16;
+
+ switch (cipher->mode) {
+ case MODE_ECB:
+ for (i = numBlocks; i > 0; i--) {
+ rijndaelEncrypt(key->rk, key->Nr, input, outBuffer);
+ input += 16;
+ outBuffer += 16;
+ }
+ padLen = 16 - (inputOctets - 16*numBlocks);
+ if (padLen <= 0 || padLen > 16)
+ return BAD_CIPHER_STATE;
+ memcpy(block, input, 16 - padLen);
+ for (cp = block + 16 - padLen; cp < block + 16; cp++)
+ *cp = padLen;
+ rijndaelEncrypt(key->rk, key->Nr, block, outBuffer);
+ break;
+
+ case MODE_CBC:
+ iv = cipher->IV;
+ for (i = numBlocks; i > 0; i--) {
+ ((u_int32_t*)block)[0] = ((u_int32_t*)input)[0] ^ ((u_int32_t*)iv)[0];
+ ((u_int32_t*)block)[1] = ((u_int32_t*)input)[1] ^ ((u_int32_t*)iv)[1];
+ ((u_int32_t*)block)[2] = ((u_int32_t*)input)[2] ^ ((u_int32_t*)iv)[2];
+ ((u_int32_t*)block)[3] = ((u_int32_t*)input)[3] ^ ((u_int32_t*)iv)[3];
+ rijndaelEncrypt(key->rk, key->Nr, block, outBuffer);
+ iv = outBuffer;
+ input += 16;
+ outBuffer += 16;
+ }
+ padLen = 16 - (inputOctets - 16*numBlocks);
+ if (padLen <= 0 || padLen > 16)
+ return BAD_CIPHER_STATE;
+ for (i = 0; i < 16 - padLen; i++) {
+ block[i] = input[i] ^ iv[i];
+ }
+ for (i = 16 - padLen; i < 16; i++) {
+ block[i] = (BYTE)padLen ^ iv[i];
+ }
+ rijndaelEncrypt(key->rk, key->Nr, block, outBuffer);
+ break;
+
+ default:
+ return BAD_CIPHER_STATE;
+ }
+
+ return 16*(numBlocks + 1);
+}
+
+int rijndael_blockDecrypt(cipherInstance *cipher, keyInstance *key,
+ BYTE *input, int inputLen, BYTE *outBuffer) {
+ int i, k, numBlocks;
+ u_int8_t block[16], iv[4][4];
+
+ if (cipher == NULL ||
+ key == NULL ||
+ (cipher->mode != MODE_CFB1 && key->direction == DIR_ENCRYPT)) {
+ return BAD_CIPHER_STATE;
+ }
+ if (input == NULL || inputLen <= 0) {
+ return 0; /* nothing to do */
+ }
+
+ numBlocks = inputLen/128;
+
+ switch (cipher->mode) {
+ case MODE_ECB:
+ for (i = numBlocks; i > 0; i--) {
+ rijndaelDecrypt(key->rk, key->Nr, input, outBuffer);
+ input += 16;
+ outBuffer += 16;
+ }
+ break;
+
+ case MODE_CBC:
+#if 1 /*STRICT_ALIGN */
+ memcpy(iv, cipher->IV, 16);
+#else
+ *((u_int32_t*)iv[0]) = *((u_int32_t*)(cipher->IV ));
+ *((u_int32_t*)iv[1]) = *((u_int32_t*)(cipher->IV+ 4));
+ *((u_int32_t*)iv[2]) = *((u_int32_t*)(cipher->IV+ 8));
+ *((u_int32_t*)iv[3]) = *((u_int32_t*)(cipher->IV+12));
+#endif
+ for (i = numBlocks; i > 0; i--) {
+ rijndaelDecrypt(key->rk, key->Nr, input, block);
+ ((u_int32_t*)block)[0] ^= *((u_int32_t*)iv[0]);
+ ((u_int32_t*)block)[1] ^= *((u_int32_t*)iv[1]);
+ ((u_int32_t*)block)[2] ^= *((u_int32_t*)iv[2]);
+ ((u_int32_t*)block)[3] ^= *((u_int32_t*)iv[3]);
+#if 1 /*STRICT_ALIGN*/
+ memcpy(iv, input, 16);
+ memcpy(outBuffer, block, 16);
+#else
+ *((u_int32_t*)iv[0]) = ((u_int32_t*)input)[0]; ((u_int32_t*)outBuffer)[0] = ((u_int32_t*)block)[0];
+ *((u_int32_t*)iv[1]) = ((u_int32_t*)input)[1]; ((u_int32_t*)outBuffer)[1] = ((u_int32_t*)block)[1];
+ *((u_int32_t*)iv[2]) = ((u_int32_t*)input)[2]; ((u_int32_t*)outBuffer)[2] = ((u_int32_t*)block)[2];
+ *((u_int32_t*)iv[3]) = ((u_int32_t*)input)[3]; ((u_int32_t*)outBuffer)[3] = ((u_int32_t*)block)[3];
+#endif
+ input += 16;
+ outBuffer += 16;
+ }
+ break;
+
+ case MODE_CFB1:
+#if 1 /*STRICT_ALIGN */
+ memcpy(iv, cipher->IV, 16);
+#else
+ *((u_int32_t*)iv[0]) = *((u_int32_t*)(cipher->IV));
+ *((u_int32_t*)iv[1]) = *((u_int32_t*)(cipher->IV+ 4));
+ *((u_int32_t*)iv[2]) = *((u_int32_t*)(cipher->IV+ 8));
+ *((u_int32_t*)iv[3]) = *((u_int32_t*)(cipher->IV+12));
+#endif
+ for (i = numBlocks; i > 0; i--) {
+ for (k = 0; k < 128; k++) {
+ *((u_int32_t*) block ) = *((u_int32_t*)iv[0]);
+ *((u_int32_t*)(block+ 4)) = *((u_int32_t*)iv[1]);
+ *((u_int32_t*)(block+ 8)) = *((u_int32_t*)iv[2]);
+ *((u_int32_t*)(block+12)) = *((u_int32_t*)iv[3]);
+ rijndaelEncrypt(key->ek, key->Nr, block,
+ block);
+ iv[0][0] = (iv[0][0] << 1) | (iv[0][1] >> 7);
+ iv[0][1] = (iv[0][1] << 1) | (iv[0][2] >> 7);
+ iv[0][2] = (iv[0][2] << 1) | (iv[0][3] >> 7);
+ iv[0][3] = (iv[0][3] << 1) | (iv[1][0] >> 7);
+ iv[1][0] = (iv[1][0] << 1) | (iv[1][1] >> 7);
+ iv[1][1] = (iv[1][1] << 1) | (iv[1][2] >> 7);
+ iv[1][2] = (iv[1][2] << 1) | (iv[1][3] >> 7);
+ iv[1][3] = (iv[1][3] << 1) | (iv[2][0] >> 7);
+ iv[2][0] = (iv[2][0] << 1) | (iv[2][1] >> 7);
+ iv[2][1] = (iv[2][1] << 1) | (iv[2][2] >> 7);
+ iv[2][2] = (iv[2][2] << 1) | (iv[2][3] >> 7);
+ iv[2][3] = (iv[2][3] << 1) | (iv[3][0] >> 7);
+ iv[3][0] = (iv[3][0] << 1) | (iv[3][1] >> 7);
+ iv[3][1] = (iv[3][1] << 1) | (iv[3][2] >> 7);
+ iv[3][2] = (iv[3][2] << 1) | (iv[3][3] >> 7);
+ iv[3][3] = (iv[3][3] << 1) | ((input[k/8] >> (7-(k&7))) & 1);
+ outBuffer[k/8] ^= (block[0] & 0x80) >> (k & 7);
+ }
+ }
+ break;
+
+ default:
+ return BAD_CIPHER_STATE;
+ }
+
+ return 128*numBlocks;
+}
+
+int rijndael_padDecrypt(cipherInstance *cipher, keyInstance *key,
+ BYTE *input, int inputOctets, BYTE *outBuffer) {
+ int i, numBlocks, padLen;
+ u_int8_t block[16];
+ u_int32_t iv[4];
+
+ if (cipher == NULL ||
+ key == NULL ||
+ key->direction == DIR_ENCRYPT) {
+ return BAD_CIPHER_STATE;
+ }
+ if (input == NULL || inputOctets <= 0) {
+ return 0; /* nothing to do */
+ }
+ if (inputOctets % 16 != 0) {
+ return BAD_DATA;
+ }
+
+ numBlocks = inputOctets/16;
+
+ switch (cipher->mode) {
+ case MODE_ECB:
+ /* all blocks but last */
+ for (i = numBlocks - 1; i > 0; i--) {
+ rijndaelDecrypt(key->rk, key->Nr, input, outBuffer);
+ input += 16;
+ outBuffer += 16;
+ }
+ /* last block */
+ rijndaelDecrypt(key->rk, key->Nr, input, block);
+ padLen = block[15];
+ if (padLen >= 16) {
+ return BAD_DATA;
+ }
+ for (i = 16 - padLen; i < 16; i++) {
+ if (block[i] != padLen) {
+ return BAD_DATA;
+ }
+ }
+ memcpy(outBuffer, block, 16 - padLen);
+ break;
+
+ case MODE_CBC:
+ memcpy(iv, cipher->IV, 16);
+ /* all blocks but last */
+ for (i = numBlocks - 1; i > 0; i--) {
+ rijndaelDecrypt(key->rk, key->Nr, input, block);
+ ((u_int32_t*)block)[0] ^= iv[0];
+ ((u_int32_t*)block)[1] ^= iv[1];
+ ((u_int32_t*)block)[2] ^= iv[2];
+ ((u_int32_t*)block)[3] ^= iv[3];
+ memcpy(iv, input, 16);
+ memcpy(outBuffer, block, 16);
+ input += 16;
+ outBuffer += 16;
+ }
+ /* last block */
+ rijndaelDecrypt(key->rk, key->Nr, input, block);
+ ((u_int32_t*)block)[0] ^= iv[0];
+ ((u_int32_t*)block)[1] ^= iv[1];
+ ((u_int32_t*)block)[2] ^= iv[2];
+ ((u_int32_t*)block)[3] ^= iv[3];
+ padLen = block[15];
+ if (padLen <= 0 || padLen > 16) {
+ return BAD_DATA;
+ }
+ for (i = 16 - padLen; i < 16; i++) {
+ if (block[i] != padLen) {
+ return BAD_DATA;
+ }
+ }
+ memcpy(outBuffer, block, 16 - padLen);
+ break;
+
+ default:
+ return BAD_CIPHER_STATE;
+ }
+
+ return 16*numBlocks - padLen;
+}
diff --git a/rtems/freebsd/crypto/rijndael/rijndael-api-fst.h b/rtems/freebsd/crypto/rijndael/rijndael-api-fst.h
new file mode 100644
index 00000000..4bc909bd
--- /dev/null
+++ b/rtems/freebsd/crypto/rijndael/rijndael-api-fst.h
@@ -0,0 +1,73 @@
+/* $FreeBSD$ */
+/* $KAME: rijndael-api-fst.h,v 1.6 2001/05/27 00:23:23 itojun Exp $ */
+
+/*
+ * rijndael-api-fst.h v2.3 April '2000
+ *
+ * Optimised ANSI C code
+ *
+ */
+
+#ifndef __RIJNDAEL_API_FST_H
+#define __RIJNDAEL_API_FST_H
+
+#include <rtems/freebsd/crypto/rijndael/rijndael.h>
+
+/* Generic Defines */
+#define DIR_ENCRYPT 0 /* Are we encrpyting? */
+#define DIR_DECRYPT 1 /* Are we decrpyting? */
+#define MODE_ECB 1 /* Are we ciphering in ECB mode? */
+#define MODE_CBC 2 /* Are we ciphering in CBC mode? */
+#define MODE_CFB1 3 /* Are we ciphering in 1-bit CFB mode? */
+#define BITSPERBLOCK 128 /* Default number of bits in a cipher block */
+
+/* Error Codes */
+#define BAD_KEY_DIR -1 /* Key direction is invalid, e.g., unknown value */
+#define BAD_KEY_MAT -2 /* Key material not of correct length */
+#define BAD_KEY_INSTANCE -3 /* Key passed is not valid */
+#define BAD_CIPHER_MODE -4 /* Params struct passed to cipherInit invalid */
+#define BAD_CIPHER_STATE -5 /* Cipher in wrong state (e.g., not initialized) */
+#define BAD_BLOCK_LENGTH -6
+#define BAD_CIPHER_INSTANCE -7
+#define BAD_DATA -8 /* Data contents are invalid, e.g., invalid padding */
+#define BAD_OTHER -9 /* Unknown error */
+
+/* Algorithm-specific Defines */
+#define RIJNDAEL_MAX_KEY_SIZE 64 /* # of ASCII char's needed to represent a key */
+#define RIJNDAEL_MAX_IV_SIZE 16 /* # bytes needed to represent an IV */
+
+/* Typedefs */
+
+/* The structure for key information */
+typedef struct {
+ u_int8_t direction; /* Key used for encrypting or decrypting? */
+ int keyLen; /* Length of the key */
+ char keyMaterial[RIJNDAEL_MAX_KEY_SIZE+1]; /* Raw key data in ASCII, e.g., user input or KAT values */
+ int Nr; /* key-length-dependent number of rounds */
+ u_int32_t rk[4*(RIJNDAEL_MAXNR + 1)]; /* key schedule */
+ u_int32_t ek[4*(RIJNDAEL_MAXNR + 1)]; /* CFB1 key schedule (encryption only) */
+} keyInstance;
+
+/* The structure for cipher information */
+typedef struct { /* changed order of the components */
+ u_int8_t mode; /* MODE_ECB, MODE_CBC, or MODE_CFB1 */
+ u_int8_t IV[RIJNDAEL_MAX_IV_SIZE]; /* A possible Initialization Vector for ciphering */
+} cipherInstance;
+
+/* Function prototypes */
+
+int rijndael_makeKey(keyInstance *, u_int8_t, int, char *);
+
+int rijndael_cipherInit(cipherInstance *, u_int8_t, char *);
+
+int rijndael_blockEncrypt(cipherInstance *, keyInstance *, u_int8_t *, int,
+ u_int8_t *);
+int rijndael_padEncrypt(cipherInstance *, keyInstance *, u_int8_t *, int,
+ u_int8_t *);
+
+int rijndael_blockDecrypt(cipherInstance *, keyInstance *, u_int8_t *, int,
+ u_int8_t *);
+int rijndael_padDecrypt(cipherInstance *, keyInstance *, u_int8_t *, int,
+ u_int8_t *);
+
+#endif /* __RIJNDAEL_API_FST_H */
diff --git a/rtems/freebsd/crypto/rijndael/rijndael-api.c b/rtems/freebsd/crypto/rijndael/rijndael-api.c
new file mode 100644
index 00000000..141a22f9
--- /dev/null
+++ b/rtems/freebsd/crypto/rijndael/rijndael-api.c
@@ -0,0 +1,61 @@
+#include <rtems/freebsd/machine/rtems-bsd-config.h>
+
+/* $KAME: rijndael.c,v 1.3 2003/08/28 14:20:22 itojun Exp $ */
+
+/*
+ * rijndael-alg-fst.c
+ *
+ * @version 3.0 (December 2000)
+ *
+ * Optimised ANSI C code for the Rijndael cipher (now AES)
+ *
+ * @author Vincent Rijmen <vincent.rijmen@esat.kuleuven.ac.be>
+ * @author Antoon Bosselaers <antoon.bosselaers@esat.kuleuven.ac.be>
+ * @author Paulo Barreto <paulo.barreto@terra.com.br>
+ *
+ * This code is hereby placed in the public domain.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHORS ''AS IS'' AND ANY EXPRESS
+ * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
+ * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <rtems/freebsd/sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <rtems/freebsd/sys/types.h>
+#ifdef _KERNEL
+#include <rtems/freebsd/sys/systm.h>
+#endif
+
+#include <rtems/freebsd/crypto/rijndael/rijndael.h>
+
+void
+rijndael_set_key(rijndael_ctx *ctx, const u_char *key, int bits)
+{
+
+ ctx->Nr = rijndaelKeySetupEnc(ctx->ek, key, bits);
+ rijndaelKeySetupDec(ctx->dk, key, bits);
+}
+
+void
+rijndael_decrypt(const rijndael_ctx *ctx, const u_char *src, u_char *dst)
+{
+
+ rijndaelDecrypt(ctx->dk, ctx->Nr, src, dst);
+}
+
+void
+rijndael_encrypt(const rijndael_ctx *ctx, const u_char *src, u_char *dst)
+{
+
+ rijndaelEncrypt(ctx->ek, ctx->Nr, src, dst);
+}
diff --git a/rtems/freebsd/crypto/rijndael/rijndael.h b/rtems/freebsd/crypto/rijndael/rijndael.h
new file mode 100644
index 00000000..da855284
--- /dev/null
+++ b/rtems/freebsd/crypto/rijndael/rijndael.h
@@ -0,0 +1,55 @@
+/* $KAME: rijndael.h,v 1.6 2003/08/28 08:36:32 itojun Exp $ */
+/* $FreeBSD$ */
+
+/**
+ * rijndael-alg-fst.h
+ *
+ * @version 3.0 (December 2000)
+ *
+ * Optimised ANSI C code for the Rijndael cipher (now AES)
+ *
+ * @author Vincent Rijmen <vincent.rijmen@esat.kuleuven.ac.be>
+ * @author Antoon Bosselaers <antoon.bosselaers@esat.kuleuven.ac.be>
+ * @author Paulo Barreto <paulo.barreto@terra.com.br>
+ *
+ * This code is hereby placed in the public domain.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHORS ''AS IS'' AND ANY EXPRESS
+ * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
+ * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __RIJNDAEL_H
+#define __RIJNDAEL_H
+
+#define RIJNDAEL_MAXKC (256/32)
+#define RIJNDAEL_MAXKB (256/8)
+#define RIJNDAEL_MAXNR 14
+
+typedef struct {
+ int decrypt;
+ int Nr; /* key-length-dependent number of rounds */
+ uint32_t ek[4 * (RIJNDAEL_MAXNR + 1)]; /* encrypt key schedule */
+ uint32_t dk[4 * (RIJNDAEL_MAXNR + 1)]; /* decrypt key schedule */
+} rijndael_ctx;
+
+void rijndael_set_key(rijndael_ctx *, const u_char *, int);
+void rijndael_decrypt(const rijndael_ctx *, const u_char *, u_char *);
+void rijndael_encrypt(const rijndael_ctx *, const u_char *, u_char *);
+
+int rijndaelKeySetupEnc(u_int32_t [/*4*(Nr+1)*/], const u_int8_t [], int);
+int rijndaelKeySetupDec(u_int32_t [/*4*(Nr+1)*/], const u_int8_t [], int);
+void rijndaelEncrypt(const u_int32_t [/*4*(Nr+1)*/], int,
+ const u_int8_t[16], u_int8_t [16]);
+void rijndaelDecrypt(const u_int32_t [/*4*(Nr+1)*/], int,
+ const u_int8_t [16], u_int8_t [16]);
+
+#endif /* __RIJNDAEL_H */
diff --git a/rtems/freebsd/crypto/rijndael/rijndael_local.h b/rtems/freebsd/crypto/rijndael/rijndael_local.h
new file mode 100644
index 00000000..7c765a2b
--- /dev/null
+++ b/rtems/freebsd/crypto/rijndael/rijndael_local.h
@@ -0,0 +1,7 @@
+/* $KAME: rijndael_local.h,v 1.5 2003/08/28 08:37:24 itojun Exp $ */
+/* $FreeBSD$ */
+
+/* the file should not be used from outside */
+typedef u_int8_t u8;
+typedef u_int16_t u16;
+typedef u_int32_t u32;
diff --git a/rtems/freebsd/crypto/sha1.c b/rtems/freebsd/crypto/sha1.c
new file mode 100644
index 00000000..ed6a85cb
--- /dev/null
+++ b/rtems/freebsd/crypto/sha1.c
@@ -0,0 +1,278 @@
+#include <rtems/freebsd/machine/rtems-bsd-config.h>
+
+/* $KAME: sha1.c,v 1.5 2000/11/08 06:13:08 itojun Exp $ */
+/*
+ * Copyright (C) 1995, 1996, 1997, and 1998 WIDE Project.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the project nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+/*
+ * FIPS pub 180-1: Secure Hash Algorithm (SHA-1)
+ * based on: http://csrc.nist.gov/fips/fip180-1.txt
+ * implemented by Jun-ichiro itojun Itoh <itojun@itojun.org>
+ */
+
+#include <rtems/freebsd/sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <rtems/freebsd/sys/types.h>
+#include <rtems/freebsd/sys/cdefs.h>
+#include <rtems/freebsd/sys/time.h>
+#include <rtems/freebsd/sys/systm.h>
+
+#include <rtems/freebsd/crypto/sha1.h>
+
+/* sanity check */
+#if BYTE_ORDER != BIG_ENDIAN
+# if BYTE_ORDER != LITTLE_ENDIAN
+# define unsupported 1
+# endif
+#endif
+
+#ifndef unsupported
+
+/* constant table */
+static u_int32_t _K[] = { 0x5a827999, 0x6ed9eba1, 0x8f1bbcdc, 0xca62c1d6 };
+#define K(t) _K[(t) / 20]
+
+#define F0(b, c, d) (((b) & (c)) | ((~(b)) & (d)))
+#define F1(b, c, d) (((b) ^ (c)) ^ (d))
+#define F2(b, c, d) (((b) & (c)) | ((b) & (d)) | ((c) & (d)))
+#define F3(b, c, d) (((b) ^ (c)) ^ (d))
+
+#define S(n, x) (((x) << (n)) | ((x) >> (32 - n)))
+
+#define H(n) (ctxt->h.b32[(n)])
+#define COUNT (ctxt->count)
+#define BCOUNT (ctxt->c.b64[0] / 8)
+#define W(n) (ctxt->m.b32[(n)])
+
+#define PUTBYTE(x) { \
+ ctxt->m.b8[(COUNT % 64)] = (x); \
+ COUNT++; \
+ COUNT %= 64; \
+ ctxt->c.b64[0] += 8; \
+ if (COUNT % 64 == 0) \
+ sha1_step(ctxt); \
+ }
+
+#define PUTPAD(x) { \
+ ctxt->m.b8[(COUNT % 64)] = (x); \
+ COUNT++; \
+ COUNT %= 64; \
+ if (COUNT % 64 == 0) \
+ sha1_step(ctxt); \
+ }
+
+static void sha1_step(struct sha1_ctxt *);
+
+static void
+sha1_step(ctxt)
+ struct sha1_ctxt *ctxt;
+{
+ u_int32_t a, b, c, d, e;
+ size_t t, s;
+ u_int32_t tmp;
+
+#if BYTE_ORDER == LITTLE_ENDIAN
+ struct sha1_ctxt tctxt;
+ bcopy(&ctxt->m.b8[0], &tctxt.m.b8[0], 64);
+ ctxt->m.b8[0] = tctxt.m.b8[3]; ctxt->m.b8[1] = tctxt.m.b8[2];
+ ctxt->m.b8[2] = tctxt.m.b8[1]; ctxt->m.b8[3] = tctxt.m.b8[0];
+ ctxt->m.b8[4] = tctxt.m.b8[7]; ctxt->m.b8[5] = tctxt.m.b8[6];
+ ctxt->m.b8[6] = tctxt.m.b8[5]; ctxt->m.b8[7] = tctxt.m.b8[4];
+ ctxt->m.b8[8] = tctxt.m.b8[11]; ctxt->m.b8[9] = tctxt.m.b8[10];
+ ctxt->m.b8[10] = tctxt.m.b8[9]; ctxt->m.b8[11] = tctxt.m.b8[8];
+ ctxt->m.b8[12] = tctxt.m.b8[15]; ctxt->m.b8[13] = tctxt.m.b8[14];
+ ctxt->m.b8[14] = tctxt.m.b8[13]; ctxt->m.b8[15] = tctxt.m.b8[12];
+ ctxt->m.b8[16] = tctxt.m.b8[19]; ctxt->m.b8[17] = tctxt.m.b8[18];
+ ctxt->m.b8[18] = tctxt.m.b8[17]; ctxt->m.b8[19] = tctxt.m.b8[16];
+ ctxt->m.b8[20] = tctxt.m.b8[23]; ctxt->m.b8[21] = tctxt.m.b8[22];
+ ctxt->m.b8[22] = tctxt.m.b8[21]; ctxt->m.b8[23] = tctxt.m.b8[20];
+ ctxt->m.b8[24] = tctxt.m.b8[27]; ctxt->m.b8[25] = tctxt.m.b8[26];
+ ctxt->m.b8[26] = tctxt.m.b8[25]; ctxt->m.b8[27] = tctxt.m.b8[24];
+ ctxt->m.b8[28] = tctxt.m.b8[31]; ctxt->m.b8[29] = tctxt.m.b8[30];
+ ctxt->m.b8[30] = tctxt.m.b8[29]; ctxt->m.b8[31] = tctxt.m.b8[28];
+ ctxt->m.b8[32] = tctxt.m.b8[35]; ctxt->m.b8[33] = tctxt.m.b8[34];
+ ctxt->m.b8[34] = tctxt.m.b8[33]; ctxt->m.b8[35] = tctxt.m.b8[32];
+ ctxt->m.b8[36] = tctxt.m.b8[39]; ctxt->m.b8[37] = tctxt.m.b8[38];
+ ctxt->m.b8[38] = tctxt.m.b8[37]; ctxt->m.b8[39] = tctxt.m.b8[36];
+ ctxt->m.b8[40] = tctxt.m.b8[43]; ctxt->m.b8[41] = tctxt.m.b8[42];
+ ctxt->m.b8[42] = tctxt.m.b8[41]; ctxt->m.b8[43] = tctxt.m.b8[40];
+ ctxt->m.b8[44] = tctxt.m.b8[47]; ctxt->m.b8[45] = tctxt.m.b8[46];
+ ctxt->m.b8[46] = tctxt.m.b8[45]; ctxt->m.b8[47] = tctxt.m.b8[44];
+ ctxt->m.b8[48] = tctxt.m.b8[51]; ctxt->m.b8[49] = tctxt.m.b8[50];
+ ctxt->m.b8[50] = tctxt.m.b8[49]; ctxt->m.b8[51] = tctxt.m.b8[48];
+ ctxt->m.b8[52] = tctxt.m.b8[55]; ctxt->m.b8[53] = tctxt.m.b8[54];
+ ctxt->m.b8[54] = tctxt.m.b8[53]; ctxt->m.b8[55] = tctxt.m.b8[52];
+ ctxt->m.b8[56] = tctxt.m.b8[59]; ctxt->m.b8[57] = tctxt.m.b8[58];
+ ctxt->m.b8[58] = tctxt.m.b8[57]; ctxt->m.b8[59] = tctxt.m.b8[56];
+ ctxt->m.b8[60] = tctxt.m.b8[63]; ctxt->m.b8[61] = tctxt.m.b8[62];
+ ctxt->m.b8[62] = tctxt.m.b8[61]; ctxt->m.b8[63] = tctxt.m.b8[60];
+#endif
+
+ a = H(0); b = H(1); c = H(2); d = H(3); e = H(4);
+
+ for (t = 0; t < 20; t++) {
+ s = t & 0x0f;
+ if (t >= 16) {
+ W(s) = S(1, W((s+13) & 0x0f) ^ W((s+8) & 0x0f) ^ W((s+2) & 0x0f) ^ W(s));
+ }
+ tmp = S(5, a) + F0(b, c, d) + e + W(s) + K(t);
+ e = d; d = c; c = S(30, b); b = a; a = tmp;
+ }
+ for (t = 20; t < 40; t++) {
+ s = t & 0x0f;
+ W(s) = S(1, W((s+13) & 0x0f) ^ W((s+8) & 0x0f) ^ W((s+2) & 0x0f) ^ W(s));
+ tmp = S(5, a) + F1(b, c, d) + e + W(s) + K(t);
+ e = d; d = c; c = S(30, b); b = a; a = tmp;
+ }
+ for (t = 40; t < 60; t++) {
+ s = t & 0x0f;
+ W(s) = S(1, W((s+13) & 0x0f) ^ W((s+8) & 0x0f) ^ W((s+2) & 0x0f) ^ W(s));
+ tmp = S(5, a) + F2(b, c, d) + e + W(s) + K(t);
+ e = d; d = c; c = S(30, b); b = a; a = tmp;
+ }
+ for (t = 60; t < 80; t++) {
+ s = t & 0x0f;
+ W(s) = S(1, W((s+13) & 0x0f) ^ W((s+8) & 0x0f) ^ W((s+2) & 0x0f) ^ W(s));
+ tmp = S(5, a) + F3(b, c, d) + e + W(s) + K(t);
+ e = d; d = c; c = S(30, b); b = a; a = tmp;
+ }
+
+ H(0) = H(0) + a;
+ H(1) = H(1) + b;
+ H(2) = H(2) + c;
+ H(3) = H(3) + d;
+ H(4) = H(4) + e;
+
+ bzero(&ctxt->m.b8[0], 64);
+}
+
+/*------------------------------------------------------------*/
+
+void
+sha1_init(ctxt)
+ struct sha1_ctxt *ctxt;
+{
+ bzero(ctxt, sizeof(struct sha1_ctxt));
+ H(0) = 0x67452301;
+ H(1) = 0xefcdab89;
+ H(2) = 0x98badcfe;
+ H(3) = 0x10325476;
+ H(4) = 0xc3d2e1f0;
+}
+
+void
+sha1_pad(ctxt)
+ struct sha1_ctxt *ctxt;
+{
+ size_t padlen; /*pad length in bytes*/
+ size_t padstart;
+
+ PUTPAD(0x80);
+
+ padstart = COUNT % 64;
+ padlen = 64 - padstart;
+ if (padlen < 8) {
+ bzero(&ctxt->m.b8[padstart], padlen);
+ COUNT += padlen;
+ COUNT %= 64;
+ sha1_step(ctxt);
+ padstart = COUNT % 64; /* should be 0 */
+ padlen = 64 - padstart; /* should be 64 */
+ }
+ bzero(&ctxt->m.b8[padstart], padlen - 8);
+ COUNT += (padlen - 8);
+ COUNT %= 64;
+#if BYTE_ORDER == BIG_ENDIAN
+ PUTPAD(ctxt->c.b8[0]); PUTPAD(ctxt->c.b8[1]);
+ PUTPAD(ctxt->c.b8[2]); PUTPAD(ctxt->c.b8[3]);
+ PUTPAD(ctxt->c.b8[4]); PUTPAD(ctxt->c.b8[5]);
+ PUTPAD(ctxt->c.b8[6]); PUTPAD(ctxt->c.b8[7]);
+#else
+ PUTPAD(ctxt->c.b8[7]); PUTPAD(ctxt->c.b8[6]);
+ PUTPAD(ctxt->c.b8[5]); PUTPAD(ctxt->c.b8[4]);
+ PUTPAD(ctxt->c.b8[3]); PUTPAD(ctxt->c.b8[2]);
+ PUTPAD(ctxt->c.b8[1]); PUTPAD(ctxt->c.b8[0]);
+#endif
+}
+
+void
+sha1_loop(ctxt, input, len)
+ struct sha1_ctxt *ctxt;
+ const u_int8_t *input;
+ size_t len;
+{
+ size_t gaplen;
+ size_t gapstart;
+ size_t off;
+ size_t copysiz;
+
+ off = 0;
+
+ while (off < len) {
+ gapstart = COUNT % 64;
+ gaplen = 64 - gapstart;
+
+ copysiz = (gaplen < len - off) ? gaplen : len - off;
+ bcopy(&input[off], &ctxt->m.b8[gapstart], copysiz);
+ COUNT += copysiz;
+ COUNT %= 64;
+ ctxt->c.b64[0] += copysiz * 8;
+ if (COUNT % 64 == 0)
+ sha1_step(ctxt);
+ off += copysiz;
+ }
+}
+
+void
+sha1_result(ctxt, digest0)
+ struct sha1_ctxt *ctxt;
+ caddr_t digest0;
+{
+ u_int8_t *digest;
+
+ digest = (u_int8_t *)digest0;
+ sha1_pad(ctxt);
+#if BYTE_ORDER == BIG_ENDIAN
+ bcopy(&ctxt->h.b8[0], digest, 20);
+#else
+ digest[0] = ctxt->h.b8[3]; digest[1] = ctxt->h.b8[2];
+ digest[2] = ctxt->h.b8[1]; digest[3] = ctxt->h.b8[0];
+ digest[4] = ctxt->h.b8[7]; digest[5] = ctxt->h.b8[6];
+ digest[6] = ctxt->h.b8[5]; digest[7] = ctxt->h.b8[4];
+ digest[8] = ctxt->h.b8[11]; digest[9] = ctxt->h.b8[10];
+ digest[10] = ctxt->h.b8[9]; digest[11] = ctxt->h.b8[8];
+ digest[12] = ctxt->h.b8[15]; digest[13] = ctxt->h.b8[14];
+ digest[14] = ctxt->h.b8[13]; digest[15] = ctxt->h.b8[12];
+ digest[16] = ctxt->h.b8[19]; digest[17] = ctxt->h.b8[18];
+ digest[18] = ctxt->h.b8[17]; digest[19] = ctxt->h.b8[16];
+#endif
+}
+
+#endif /*unsupported*/
diff --git a/rtems/freebsd/crypto/sha1.h b/rtems/freebsd/crypto/sha1.h
new file mode 100644
index 00000000..18aedda0
--- /dev/null
+++ b/rtems/freebsd/crypto/sha1.h
@@ -0,0 +1,72 @@
+/* $FreeBSD$ */
+/* $KAME: sha1.h,v 1.5 2000/03/27 04:36:23 sumikawa Exp $ */
+
+/*
+ * Copyright (C) 1995, 1996, 1997, and 1998 WIDE Project.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the project nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+/*
+ * FIPS pub 180-1: Secure Hash Algorithm (SHA-1)
+ * based on: http://csrc.nist.gov/fips/fip180-1.txt
+ * implemented by Jun-ichiro itojun Itoh <itojun@itojun.org>
+ */
+
+#ifndef _NETINET6_SHA1_HH_
+#define _NETINET6_SHA1_HH_
+
+struct sha1_ctxt {
+ union {
+ u_int8_t b8[20];
+ u_int32_t b32[5];
+ } h;
+ union {
+ u_int8_t b8[8];
+ u_int64_t b64[1];
+ } c;
+ union {
+ u_int8_t b8[64];
+ u_int32_t b32[16];
+ } m;
+ u_int8_t count;
+};
+
+#ifdef _KERNEL
+extern void sha1_init(struct sha1_ctxt *);
+extern void sha1_pad(struct sha1_ctxt *);
+extern void sha1_loop(struct sha1_ctxt *, const u_int8_t *, size_t);
+extern void sha1_result(struct sha1_ctxt *, caddr_t);
+
+/* compatibilty with other SHA1 source codes */
+typedef struct sha1_ctxt SHA1_CTX;
+#define SHA1Init(x) sha1_init((x))
+#define SHA1Update(x, y, z) sha1_loop((x), (y), (z))
+#define SHA1Final(x, y) sha1_result((y), (x))
+#endif /* _KERNEL */
+
+#define SHA1_RESULTLEN (160/8)
+
+#endif /*_NETINET6_SHA1_HH_*/
diff --git a/rtems/freebsd/crypto/sha2/sha2.c b/rtems/freebsd/crypto/sha2/sha2.c
new file mode 100644
index 00000000..abd1dec4
--- /dev/null
+++ b/rtems/freebsd/crypto/sha2/sha2.c
@@ -0,0 +1,1054 @@
+#include <rtems/freebsd/machine/rtems-bsd-config.h>
+
+/* $KAME: sha2.c,v 1.8 2001/11/08 01:07:52 itojun Exp $ */
+
+/*
+ * sha2.c
+ *
+ * Version 1.0.0beta1
+ *
+ * Written by Aaron D. Gifford <me@aarongifford.com>
+ *
+ * Copyright 2000 Aaron D. Gifford. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the copyright holder nor the names of contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR(S) AND CONTRIBUTOR(S) ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR(S) OR CONTRIBUTOR(S) BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <rtems/freebsd/sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <rtems/freebsd/sys/types.h>
+#include <rtems/freebsd/sys/time.h>
+#ifdef _KERNEL
+#include <rtems/freebsd/sys/systm.h>
+#else
+#include <rtems/freebsd/string.h>
+#endif
+#include <rtems/freebsd/machine/endian.h>
+#include <rtems/freebsd/crypto/sha2/sha2.h>
+
+/*
+ * ASSERT NOTE:
+ * Some sanity checking code is included using assert(). On my FreeBSD
+ * system, this additional code can be removed by compiling with NDEBUG
+ * defined. Check your own systems manpage on assert() to see how to
+ * compile WITHOUT the sanity checking code on your system.
+ *
+ * UNROLLED TRANSFORM LOOP NOTE:
+ * You can define SHA2_UNROLL_TRANSFORM to use the unrolled transform
+ * loop version for the hash transform rounds (defined using macros
+ * later in this file). Either define on the command line, for example:
+ *
+ * cc -DSHA2_UNROLL_TRANSFORM -o sha2 sha2.c sha2prog.c
+ *
+ * or define below:
+ *
+ * #define SHA2_UNROLL_TRANSFORM
+ *
+ */
+
+#if defined(__bsdi__) || defined(__FreeBSD__)
+#define assert(x)
+#endif
+
+
+/*** SHA-256/384/512 Machine Architecture Definitions *****************/
+/*
+ * BYTE_ORDER NOTE:
+ *
+ * Please make sure that your system defines BYTE_ORDER. If your
+ * architecture is little-endian, make sure it also defines
+ * LITTLE_ENDIAN and that the two (BYTE_ORDER and LITTLE_ENDIAN) are
+ * equivilent.
+ *
+ * If your system does not define the above, then you can do so by
+ * hand like this:
+ *
+ * #define LITTLE_ENDIAN 1234
+ * #define BIG_ENDIAN 4321
+ *
+ * And for little-endian machines, add:
+ *
+ * #define BYTE_ORDER LITTLE_ENDIAN
+ *
+ * Or for big-endian machines:
+ *
+ * #define BYTE_ORDER BIG_ENDIAN
+ *
+ * The FreeBSD machine this was written on defines BYTE_ORDER
+ * appropriately by including <sys/types.h> (which in turn includes
+ * <machine/endian.h> where the appropriate definitions are actually
+ * made).
+ */
+#if !defined(BYTE_ORDER) || (BYTE_ORDER != LITTLE_ENDIAN && BYTE_ORDER != BIG_ENDIAN)
+#error Define BYTE_ORDER to be equal to either LITTLE_ENDIAN or BIG_ENDIAN
+#endif
+
+/*
+ * Define the followingsha2_* types to types of the correct length on
+ * the native archtecture. Most BSD systems and Linux define u_intXX_t
+ * types. Machines with very recent ANSI C headers, can use the
+ * uintXX_t definintions from inttypes.h by defining SHA2_USE_INTTYPES_H
+ * during compile or in the sha.h header file.
+ *
+ * Machines that support neither u_intXX_t nor inttypes.h's uintXX_t
+ * will need to define these three typedefs below (and the appropriate
+ * ones in sha.h too) by hand according to their system architecture.
+ *
+ * Thank you, Jun-ichiro itojun Hagino, for suggesting using u_intXX_t
+ * types and pointing out recent ANSI C support for uintXX_t in inttypes.h.
+ */
+#if 0 /*def SHA2_USE_INTTYPES_H*/
+
+typedef uint8_t sha2_byte; /* Exactly 1 byte */
+typedef uint32_t sha2_word32; /* Exactly 4 bytes */
+typedef uint64_t sha2_word64; /* Exactly 8 bytes */
+
+#else /* SHA2_USE_INTTYPES_H */
+
+typedef u_int8_t sha2_byte; /* Exactly 1 byte */
+typedef u_int32_t sha2_word32; /* Exactly 4 bytes */
+typedef u_int64_t sha2_word64; /* Exactly 8 bytes */
+
+#endif /* SHA2_USE_INTTYPES_H */
+
+
+/*** SHA-256/384/512 Various Length Definitions ***********************/
+/* NOTE: Most of these are in sha2.h */
+#define SHA256_SHORT_BLOCK_LENGTH (SHA256_BLOCK_LENGTH - 8)
+#define SHA384_SHORT_BLOCK_LENGTH (SHA384_BLOCK_LENGTH - 16)
+#define SHA512_SHORT_BLOCK_LENGTH (SHA512_BLOCK_LENGTH - 16)
+
+
+/*** ENDIAN REVERSAL MACROS *******************************************/
+#if BYTE_ORDER == LITTLE_ENDIAN
+#define REVERSE32(w,x) { \
+ sha2_word32 tmp = (w); \
+ tmp = (tmp >> 16) | (tmp << 16); \
+ (x) = ((tmp & 0xff00ff00UL) >> 8) | ((tmp & 0x00ff00ffUL) << 8); \
+}
+#define REVERSE64(w,x) { \
+ sha2_word64 tmp = (w); \
+ tmp = (tmp >> 32) | (tmp << 32); \
+ tmp = ((tmp & 0xff00ff00ff00ff00ULL) >> 8) | \
+ ((tmp & 0x00ff00ff00ff00ffULL) << 8); \
+ (x) = ((tmp & 0xffff0000ffff0000ULL) >> 16) | \
+ ((tmp & 0x0000ffff0000ffffULL) << 16); \
+}
+#endif /* BYTE_ORDER == LITTLE_ENDIAN */
+
+/*
+ * Macro for incrementally adding the unsigned 64-bit integer n to the
+ * unsigned 128-bit integer (represented using a two-element array of
+ * 64-bit words):
+ */
+#define ADDINC128(w,n) { \
+ (w)[0] += (sha2_word64)(n); \
+ if ((w)[0] < (n)) { \
+ (w)[1]++; \
+ } \
+}
+
+/*** THE SIX LOGICAL FUNCTIONS ****************************************/
+/*
+ * Bit shifting and rotation (used by the six SHA-XYZ logical functions:
+ *
+ * NOTE: The naming of R and S appears backwards here (R is a SHIFT and
+ * S is a ROTATION) because the SHA-256/384/512 description document
+ * (see http://csrc.nist.gov/cryptval/shs/sha256-384-512.pdf) uses this
+ * same "backwards" definition.
+ */
+/* Shift-right (used in SHA-256, SHA-384, and SHA-512): */
+#define R(b,x) ((x) >> (b))
+/* 32-bit Rotate-right (used in SHA-256): */
+#define S32(b,x) (((x) >> (b)) | ((x) << (32 - (b))))
+/* 64-bit Rotate-right (used in SHA-384 and SHA-512): */
+#define S64(b,x) (((x) >> (b)) | ((x) << (64 - (b))))
+
+/* Two of six logical functions used in SHA-256, SHA-384, and SHA-512: */
+#define Ch(x,y,z) (((x) & (y)) ^ ((~(x)) & (z)))
+#define Maj(x,y,z) (((x) & (y)) ^ ((x) & (z)) ^ ((y) & (z)))
+
+/* Four of six logical functions used in SHA-256: */
+#define Sigma0_256(x) (S32(2, (x)) ^ S32(13, (x)) ^ S32(22, (x)))
+#define Sigma1_256(x) (S32(6, (x)) ^ S32(11, (x)) ^ S32(25, (x)))
+#define sigma0_256(x) (S32(7, (x)) ^ S32(18, (x)) ^ R(3 , (x)))
+#define sigma1_256(x) (S32(17, (x)) ^ S32(19, (x)) ^ R(10, (x)))
+
+/* Four of six logical functions used in SHA-384 and SHA-512: */
+#define Sigma0_512(x) (S64(28, (x)) ^ S64(34, (x)) ^ S64(39, (x)))
+#define Sigma1_512(x) (S64(14, (x)) ^ S64(18, (x)) ^ S64(41, (x)))
+#define sigma0_512(x) (S64( 1, (x)) ^ S64( 8, (x)) ^ R( 7, (x)))
+#define sigma1_512(x) (S64(19, (x)) ^ S64(61, (x)) ^ R( 6, (x)))
+
+/*** INTERNAL FUNCTION PROTOTYPES *************************************/
+/* NOTE: These should not be accessed directly from outside this
+ * library -- they are intended for private internal visibility/use
+ * only.
+ */
+void SHA512_Last(SHA512_CTX*);
+void SHA256_Transform(SHA256_CTX*, const sha2_word32*);
+void SHA512_Transform(SHA512_CTX*, const sha2_word64*);
+
+
+/*** SHA-XYZ INITIAL HASH VALUES AND CONSTANTS ************************/
+/* Hash constant words K for SHA-256: */
+static const sha2_word32 K256[64] = {
+ 0x428a2f98UL, 0x71374491UL, 0xb5c0fbcfUL, 0xe9b5dba5UL,
+ 0x3956c25bUL, 0x59f111f1UL, 0x923f82a4UL, 0xab1c5ed5UL,
+ 0xd807aa98UL, 0x12835b01UL, 0x243185beUL, 0x550c7dc3UL,
+ 0x72be5d74UL, 0x80deb1feUL, 0x9bdc06a7UL, 0xc19bf174UL,
+ 0xe49b69c1UL, 0xefbe4786UL, 0x0fc19dc6UL, 0x240ca1ccUL,
+ 0x2de92c6fUL, 0x4a7484aaUL, 0x5cb0a9dcUL, 0x76f988daUL,
+ 0x983e5152UL, 0xa831c66dUL, 0xb00327c8UL, 0xbf597fc7UL,
+ 0xc6e00bf3UL, 0xd5a79147UL, 0x06ca6351UL, 0x14292967UL,
+ 0x27b70a85UL, 0x2e1b2138UL, 0x4d2c6dfcUL, 0x53380d13UL,
+ 0x650a7354UL, 0x766a0abbUL, 0x81c2c92eUL, 0x92722c85UL,
+ 0xa2bfe8a1UL, 0xa81a664bUL, 0xc24b8b70UL, 0xc76c51a3UL,
+ 0xd192e819UL, 0xd6990624UL, 0xf40e3585UL, 0x106aa070UL,
+ 0x19a4c116UL, 0x1e376c08UL, 0x2748774cUL, 0x34b0bcb5UL,
+ 0x391c0cb3UL, 0x4ed8aa4aUL, 0x5b9cca4fUL, 0x682e6ff3UL,
+ 0x748f82eeUL, 0x78a5636fUL, 0x84c87814UL, 0x8cc70208UL,
+ 0x90befffaUL, 0xa4506cebUL, 0xbef9a3f7UL, 0xc67178f2UL
+};
+
+/* Initial hash value H for SHA-256: */
+static const sha2_word32 sha256_initial_hash_value[8] = {
+ 0x6a09e667UL,
+ 0xbb67ae85UL,
+ 0x3c6ef372UL,
+ 0xa54ff53aUL,
+ 0x510e527fUL,
+ 0x9b05688cUL,
+ 0x1f83d9abUL,
+ 0x5be0cd19UL
+};
+
+/* Hash constant words K for SHA-384 and SHA-512: */
+static const sha2_word64 K512[80] = {
+ 0x428a2f98d728ae22ULL, 0x7137449123ef65cdULL,
+ 0xb5c0fbcfec4d3b2fULL, 0xe9b5dba58189dbbcULL,
+ 0x3956c25bf348b538ULL, 0x59f111f1b605d019ULL,
+ 0x923f82a4af194f9bULL, 0xab1c5ed5da6d8118ULL,
+ 0xd807aa98a3030242ULL, 0x12835b0145706fbeULL,
+ 0x243185be4ee4b28cULL, 0x550c7dc3d5ffb4e2ULL,
+ 0x72be5d74f27b896fULL, 0x80deb1fe3b1696b1ULL,
+ 0x9bdc06a725c71235ULL, 0xc19bf174cf692694ULL,
+ 0xe49b69c19ef14ad2ULL, 0xefbe4786384f25e3ULL,
+ 0x0fc19dc68b8cd5b5ULL, 0x240ca1cc77ac9c65ULL,
+ 0x2de92c6f592b0275ULL, 0x4a7484aa6ea6e483ULL,
+ 0x5cb0a9dcbd41fbd4ULL, 0x76f988da831153b5ULL,
+ 0x983e5152ee66dfabULL, 0xa831c66d2db43210ULL,
+ 0xb00327c898fb213fULL, 0xbf597fc7beef0ee4ULL,
+ 0xc6e00bf33da88fc2ULL, 0xd5a79147930aa725ULL,
+ 0x06ca6351e003826fULL, 0x142929670a0e6e70ULL,
+ 0x27b70a8546d22ffcULL, 0x2e1b21385c26c926ULL,
+ 0x4d2c6dfc5ac42aedULL, 0x53380d139d95b3dfULL,
+ 0x650a73548baf63deULL, 0x766a0abb3c77b2a8ULL,
+ 0x81c2c92e47edaee6ULL, 0x92722c851482353bULL,
+ 0xa2bfe8a14cf10364ULL, 0xa81a664bbc423001ULL,
+ 0xc24b8b70d0f89791ULL, 0xc76c51a30654be30ULL,
+ 0xd192e819d6ef5218ULL, 0xd69906245565a910ULL,
+ 0xf40e35855771202aULL, 0x106aa07032bbd1b8ULL,
+ 0x19a4c116b8d2d0c8ULL, 0x1e376c085141ab53ULL,
+ 0x2748774cdf8eeb99ULL, 0x34b0bcb5e19b48a8ULL,
+ 0x391c0cb3c5c95a63ULL, 0x4ed8aa4ae3418acbULL,
+ 0x5b9cca4f7763e373ULL, 0x682e6ff3d6b2b8a3ULL,
+ 0x748f82ee5defb2fcULL, 0x78a5636f43172f60ULL,
+ 0x84c87814a1f0ab72ULL, 0x8cc702081a6439ecULL,
+ 0x90befffa23631e28ULL, 0xa4506cebde82bde9ULL,
+ 0xbef9a3f7b2c67915ULL, 0xc67178f2e372532bULL,
+ 0xca273eceea26619cULL, 0xd186b8c721c0c207ULL,
+ 0xeada7dd6cde0eb1eULL, 0xf57d4f7fee6ed178ULL,
+ 0x06f067aa72176fbaULL, 0x0a637dc5a2c898a6ULL,
+ 0x113f9804bef90daeULL, 0x1b710b35131c471bULL,
+ 0x28db77f523047d84ULL, 0x32caab7b40c72493ULL,
+ 0x3c9ebe0a15c9bebcULL, 0x431d67c49c100d4cULL,
+ 0x4cc5d4becb3e42b6ULL, 0x597f299cfc657e2aULL,
+ 0x5fcb6fab3ad6faecULL, 0x6c44198c4a475817ULL
+};
+
+/* Initial hash value H for SHA-384 */
+static const sha2_word64 sha384_initial_hash_value[8] = {
+ 0xcbbb9d5dc1059ed8ULL,
+ 0x629a292a367cd507ULL,
+ 0x9159015a3070dd17ULL,
+ 0x152fecd8f70e5939ULL,
+ 0x67332667ffc00b31ULL,
+ 0x8eb44a8768581511ULL,
+ 0xdb0c2e0d64f98fa7ULL,
+ 0x47b5481dbefa4fa4ULL
+};
+
+/* Initial hash value H for SHA-512 */
+static const sha2_word64 sha512_initial_hash_value[8] = {
+ 0x6a09e667f3bcc908ULL,
+ 0xbb67ae8584caa73bULL,
+ 0x3c6ef372fe94f82bULL,
+ 0xa54ff53a5f1d36f1ULL,
+ 0x510e527fade682d1ULL,
+ 0x9b05688c2b3e6c1fULL,
+ 0x1f83d9abfb41bd6bULL,
+ 0x5be0cd19137e2179ULL
+};
+
+/*
+ * Constant used by SHA256/384/512_End() functions for converting the
+ * digest to a readable hexadecimal character string:
+ */
+static const char *sha2_hex_digits = "0123456789abcdef";
+
+
+/*** SHA-256: *********************************************************/
+void SHA256_Init(SHA256_CTX* context) {
+ if (context == (SHA256_CTX*)0) {
+ return;
+ }
+ bcopy(sha256_initial_hash_value, context->state, SHA256_DIGEST_LENGTH);
+ bzero(context->buffer, SHA256_BLOCK_LENGTH);
+ context->bitcount = 0;
+}
+
+#ifdef SHA2_UNROLL_TRANSFORM
+
+/* Unrolled SHA-256 round macros: */
+
+#if BYTE_ORDER == LITTLE_ENDIAN
+
+#define ROUND256_0_TO_15(a,b,c,d,e,f,g,h) \
+ REVERSE32(*data++, W256[j]); \
+ T1 = (h) + Sigma1_256(e) + Ch((e), (f), (g)) + \
+ K256[j] + W256[j]; \
+ (d) += T1; \
+ (h) = T1 + Sigma0_256(a) + Maj((a), (b), (c)); \
+ j++
+
+
+#else /* BYTE_ORDER == LITTLE_ENDIAN */
+
+#define ROUND256_0_TO_15(a,b,c,d,e,f,g,h) \
+ T1 = (h) + Sigma1_256(e) + Ch((e), (f), (g)) + \
+ K256[j] + (W256[j] = *data++); \
+ (d) += T1; \
+ (h) = T1 + Sigma0_256(a) + Maj((a), (b), (c)); \
+ j++
+
+#endif /* BYTE_ORDER == LITTLE_ENDIAN */
+
+#define ROUND256(a,b,c,d,e,f,g,h) \
+ s0 = W256[(j+1)&0x0f]; \
+ s0 = sigma0_256(s0); \
+ s1 = W256[(j+14)&0x0f]; \
+ s1 = sigma1_256(s1); \
+ T1 = (h) + Sigma1_256(e) + Ch((e), (f), (g)) + K256[j] + \
+ (W256[j&0x0f] += s1 + W256[(j+9)&0x0f] + s0); \
+ (d) += T1; \
+ (h) = T1 + Sigma0_256(a) + Maj((a), (b), (c)); \
+ j++
+
+void SHA256_Transform(SHA256_CTX* context, const sha2_word32* data) {
+ sha2_word32 a, b, c, d, e, f, g, h, s0, s1;
+ sha2_word32 T1, *W256;
+ int j;
+
+ W256 = (sha2_word32*)context->buffer;
+
+ /* Initialize registers with the prev. intermediate value */
+ a = context->state[0];
+ b = context->state[1];
+ c = context->state[2];
+ d = context->state[3];
+ e = context->state[4];
+ f = context->state[5];
+ g = context->state[6];
+ h = context->state[7];
+
+ j = 0;
+ do {
+ /* Rounds 0 to 15 (unrolled): */
+ ROUND256_0_TO_15(a,b,c,d,e,f,g,h);
+ ROUND256_0_TO_15(h,a,b,c,d,e,f,g);
+ ROUND256_0_TO_15(g,h,a,b,c,d,e,f);
+ ROUND256_0_TO_15(f,g,h,a,b,c,d,e);
+ ROUND256_0_TO_15(e,f,g,h,a,b,c,d);
+ ROUND256_0_TO_15(d,e,f,g,h,a,b,c);
+ ROUND256_0_TO_15(c,d,e,f,g,h,a,b);
+ ROUND256_0_TO_15(b,c,d,e,f,g,h,a);
+ } while (j < 16);
+
+ /* Now for the remaining rounds to 64: */
+ do {
+ ROUND256(a,b,c,d,e,f,g,h);
+ ROUND256(h,a,b,c,d,e,f,g);
+ ROUND256(g,h,a,b,c,d,e,f);
+ ROUND256(f,g,h,a,b,c,d,e);
+ ROUND256(e,f,g,h,a,b,c,d);
+ ROUND256(d,e,f,g,h,a,b,c);
+ ROUND256(c,d,e,f,g,h,a,b);
+ ROUND256(b,c,d,e,f,g,h,a);
+ } while (j < 64);
+
+ /* Compute the current intermediate hash value */
+ context->state[0] += a;
+ context->state[1] += b;
+ context->state[2] += c;
+ context->state[3] += d;
+ context->state[4] += e;
+ context->state[5] += f;
+ context->state[6] += g;
+ context->state[7] += h;
+
+ /* Clean up */
+ a = b = c = d = e = f = g = h = T1 = 0;
+}
+
+#else /* SHA2_UNROLL_TRANSFORM */
+
+void SHA256_Transform(SHA256_CTX* context, const sha2_word32* data) {
+ sha2_word32 a, b, c, d, e, f, g, h, s0, s1;
+ sha2_word32 T1, T2, *W256;
+ int j;
+
+ W256 = (sha2_word32*)context->buffer;
+
+ /* Initialize registers with the prev. intermediate value */
+ a = context->state[0];
+ b = context->state[1];
+ c = context->state[2];
+ d = context->state[3];
+ e = context->state[4];
+ f = context->state[5];
+ g = context->state[6];
+ h = context->state[7];
+
+ j = 0;
+ do {
+#if BYTE_ORDER == LITTLE_ENDIAN
+ /* Copy data while converting to host byte order */
+ REVERSE32(*data++,W256[j]);
+ /* Apply the SHA-256 compression function to update a..h */
+ T1 = h + Sigma1_256(e) + Ch(e, f, g) + K256[j] + W256[j];
+#else /* BYTE_ORDER == LITTLE_ENDIAN */
+ /* Apply the SHA-256 compression function to update a..h with copy */
+ T1 = h + Sigma1_256(e) + Ch(e, f, g) + K256[j] + (W256[j] = *data++);
+#endif /* BYTE_ORDER == LITTLE_ENDIAN */
+ T2 = Sigma0_256(a) + Maj(a, b, c);
+ h = g;
+ g = f;
+ f = e;
+ e = d + T1;
+ d = c;
+ c = b;
+ b = a;
+ a = T1 + T2;
+
+ j++;
+ } while (j < 16);
+
+ do {
+ /* Part of the message block expansion: */
+ s0 = W256[(j+1)&0x0f];
+ s0 = sigma0_256(s0);
+ s1 = W256[(j+14)&0x0f];
+ s1 = sigma1_256(s1);
+
+ /* Apply the SHA-256 compression function to update a..h */
+ T1 = h + Sigma1_256(e) + Ch(e, f, g) + K256[j] +
+ (W256[j&0x0f] += s1 + W256[(j+9)&0x0f] + s0);
+ T2 = Sigma0_256(a) + Maj(a, b, c);
+ h = g;
+ g = f;
+ f = e;
+ e = d + T1;
+ d = c;
+ c = b;
+ b = a;
+ a = T1 + T2;
+
+ j++;
+ } while (j < 64);
+
+ /* Compute the current intermediate hash value */
+ context->state[0] += a;
+ context->state[1] += b;
+ context->state[2] += c;
+ context->state[3] += d;
+ context->state[4] += e;
+ context->state[5] += f;
+ context->state[6] += g;
+ context->state[7] += h;
+
+ /* Clean up */
+ a = b = c = d = e = f = g = h = T1 = T2 = 0;
+}
+
+#endif /* SHA2_UNROLL_TRANSFORM */
+
+void SHA256_Update(SHA256_CTX* context, const sha2_byte *data, size_t len) {
+ unsigned int freespace, usedspace;
+
+ if (len == 0) {
+ /* Calling with no data is valid - we do nothing */
+ return;
+ }
+
+ /* Sanity check: */
+ assert(context != (SHA256_CTX*)0 && data != (sha2_byte*)0);
+
+ usedspace = (context->bitcount >> 3) % SHA256_BLOCK_LENGTH;
+ if (usedspace > 0) {
+ /* Calculate how much free space is available in the buffer */
+ freespace = SHA256_BLOCK_LENGTH - usedspace;
+
+ if (len >= freespace) {
+ /* Fill the buffer completely and process it */
+ bcopy(data, &context->buffer[usedspace], freespace);
+ context->bitcount += freespace << 3;
+ len -= freespace;
+ data += freespace;
+ SHA256_Transform(context, (sha2_word32*)context->buffer);
+ } else {
+ /* The buffer is not yet full */
+ bcopy(data, &context->buffer[usedspace], len);
+ context->bitcount += len << 3;
+ /* Clean up: */
+ usedspace = freespace = 0;
+ return;
+ }
+ }
+ while (len >= SHA256_BLOCK_LENGTH) {
+ /* Process as many complete blocks as we can */
+ SHA256_Transform(context, (const sha2_word32*)data);
+ context->bitcount += SHA256_BLOCK_LENGTH << 3;
+ len -= SHA256_BLOCK_LENGTH;
+ data += SHA256_BLOCK_LENGTH;
+ }
+ if (len > 0) {
+ /* There's left-overs, so save 'em */
+ bcopy(data, context->buffer, len);
+ context->bitcount += len << 3;
+ }
+ /* Clean up: */
+ usedspace = freespace = 0;
+}
+
+void SHA256_Final(sha2_byte digest[], SHA256_CTX* context) {
+ sha2_word32 *d = (sha2_word32*)digest;
+ unsigned int usedspace;
+
+ /* Sanity check: */
+ assert(context != (SHA256_CTX*)0);
+
+ /* If no digest buffer is passed, we don't bother doing this: */
+ if (digest != (sha2_byte*)0) {
+ usedspace = (context->bitcount >> 3) % SHA256_BLOCK_LENGTH;
+#if BYTE_ORDER == LITTLE_ENDIAN
+ /* Convert FROM host byte order */
+ REVERSE64(context->bitcount,context->bitcount);
+#endif
+ if (usedspace > 0) {
+ /* Begin padding with a 1 bit: */
+ context->buffer[usedspace++] = 0x80;
+
+ if (usedspace <= SHA256_SHORT_BLOCK_LENGTH) {
+ /* Set-up for the last transform: */
+ bzero(&context->buffer[usedspace], SHA256_SHORT_BLOCK_LENGTH - usedspace);
+ } else {
+ if (usedspace < SHA256_BLOCK_LENGTH) {
+ bzero(&context->buffer[usedspace], SHA256_BLOCK_LENGTH - usedspace);
+ }
+ /* Do second-to-last transform: */
+ SHA256_Transform(context, (sha2_word32*)context->buffer);
+
+ /* And set-up for the last transform: */
+ bzero(context->buffer, SHA256_SHORT_BLOCK_LENGTH);
+ }
+ } else {
+ /* Set-up for the last transform: */
+ bzero(context->buffer, SHA256_SHORT_BLOCK_LENGTH);
+
+ /* Begin padding with a 1 bit: */
+ *context->buffer = 0x80;
+ }
+ /* Set the bit count: */
+ *(sha2_word64*)&context->buffer[SHA256_SHORT_BLOCK_LENGTH] = context->bitcount;
+
+ /* Final transform: */
+ SHA256_Transform(context, (sha2_word32*)context->buffer);
+
+#if BYTE_ORDER == LITTLE_ENDIAN
+ {
+ /* Convert TO host byte order */
+ int j;
+ for (j = 0; j < 8; j++) {
+ REVERSE32(context->state[j],context->state[j]);
+ *d++ = context->state[j];
+ }
+ }
+#else
+ bcopy(context->state, d, SHA256_DIGEST_LENGTH);
+#endif
+ }
+
+ /* Clean up state data: */
+ bzero(context, sizeof(*context));
+ usedspace = 0;
+}
+
+char *SHA256_End(SHA256_CTX* context, char buffer[]) {
+ sha2_byte digest[SHA256_DIGEST_LENGTH], *d = digest;
+ int i;
+
+ /* Sanity check: */
+ assert(context != (SHA256_CTX*)0);
+
+ if (buffer != (char*)0) {
+ SHA256_Final(digest, context);
+
+ for (i = 0; i < SHA256_DIGEST_LENGTH; i++) {
+ *buffer++ = sha2_hex_digits[(*d & 0xf0) >> 4];
+ *buffer++ = sha2_hex_digits[*d & 0x0f];
+ d++;
+ }
+ *buffer = (char)0;
+ } else {
+ bzero(context, sizeof(*context));
+ }
+ bzero(digest, SHA256_DIGEST_LENGTH);
+ return buffer;
+}
+
+char* SHA256_Data(const sha2_byte* data, size_t len, char digest[SHA256_DIGEST_STRING_LENGTH]) {
+ SHA256_CTX context;
+
+ SHA256_Init(&context);
+ SHA256_Update(&context, data, len);
+ return SHA256_End(&context, digest);
+}
+
+
+/*** SHA-512: *********************************************************/
+void SHA512_Init(SHA512_CTX* context) {
+ if (context == (SHA512_CTX*)0) {
+ return;
+ }
+ bcopy(sha512_initial_hash_value, context->state, SHA512_DIGEST_LENGTH);
+ bzero(context->buffer, SHA512_BLOCK_LENGTH);
+ context->bitcount[0] = context->bitcount[1] = 0;
+}
+
+#ifdef SHA2_UNROLL_TRANSFORM
+
+/* Unrolled SHA-512 round macros: */
+#if BYTE_ORDER == LITTLE_ENDIAN
+
+#define ROUND512_0_TO_15(a,b,c,d,e,f,g,h) \
+ REVERSE64(*data++, W512[j]); \
+ T1 = (h) + Sigma1_512(e) + Ch((e), (f), (g)) + \
+ K512[j] + W512[j]; \
+ (d) += T1, \
+ (h) = T1 + Sigma0_512(a) + Maj((a), (b), (c)), \
+ j++
+
+
+#else /* BYTE_ORDER == LITTLE_ENDIAN */
+
+#define ROUND512_0_TO_15(a,b,c,d,e,f,g,h) \
+ T1 = (h) + Sigma1_512(e) + Ch((e), (f), (g)) + \
+ K512[j] + (W512[j] = *data++); \
+ (d) += T1; \
+ (h) = T1 + Sigma0_512(a) + Maj((a), (b), (c)); \
+ j++
+
+#endif /* BYTE_ORDER == LITTLE_ENDIAN */
+
+#define ROUND512(a,b,c,d,e,f,g,h) \
+ s0 = W512[(j+1)&0x0f]; \
+ s0 = sigma0_512(s0); \
+ s1 = W512[(j+14)&0x0f]; \
+ s1 = sigma1_512(s1); \
+ T1 = (h) + Sigma1_512(e) + Ch((e), (f), (g)) + K512[j] + \
+ (W512[j&0x0f] += s1 + W512[(j+9)&0x0f] + s0); \
+ (d) += T1; \
+ (h) = T1 + Sigma0_512(a) + Maj((a), (b), (c)); \
+ j++
+
+void SHA512_Transform(SHA512_CTX* context, const sha2_word64* data) {
+ sha2_word64 a, b, c, d, e, f, g, h, s0, s1;
+ sha2_word64 T1, *W512 = (sha2_word64*)context->buffer;
+ int j;
+
+ /* Initialize registers with the prev. intermediate value */
+ a = context->state[0];
+ b = context->state[1];
+ c = context->state[2];
+ d = context->state[3];
+ e = context->state[4];
+ f = context->state[5];
+ g = context->state[6];
+ h = context->state[7];
+
+ j = 0;
+ do {
+ ROUND512_0_TO_15(a,b,c,d,e,f,g,h);
+ ROUND512_0_TO_15(h,a,b,c,d,e,f,g);
+ ROUND512_0_TO_15(g,h,a,b,c,d,e,f);
+ ROUND512_0_TO_15(f,g,h,a,b,c,d,e);
+ ROUND512_0_TO_15(e,f,g,h,a,b,c,d);
+ ROUND512_0_TO_15(d,e,f,g,h,a,b,c);
+ ROUND512_0_TO_15(c,d,e,f,g,h,a,b);
+ ROUND512_0_TO_15(b,c,d,e,f,g,h,a);
+ } while (j < 16);
+
+ /* Now for the remaining rounds up to 79: */
+ do {
+ ROUND512(a,b,c,d,e,f,g,h);
+ ROUND512(h,a,b,c,d,e,f,g);
+ ROUND512(g,h,a,b,c,d,e,f);
+ ROUND512(f,g,h,a,b,c,d,e);
+ ROUND512(e,f,g,h,a,b,c,d);
+ ROUND512(d,e,f,g,h,a,b,c);
+ ROUND512(c,d,e,f,g,h,a,b);
+ ROUND512(b,c,d,e,f,g,h,a);
+ } while (j < 80);
+
+ /* Compute the current intermediate hash value */
+ context->state[0] += a;
+ context->state[1] += b;
+ context->state[2] += c;
+ context->state[3] += d;
+ context->state[4] += e;
+ context->state[5] += f;
+ context->state[6] += g;
+ context->state[7] += h;
+
+ /* Clean up */
+ a = b = c = d = e = f = g = h = T1 = 0;
+}
+
+#else /* SHA2_UNROLL_TRANSFORM */
+
+void SHA512_Transform(SHA512_CTX* context, const sha2_word64* data) {
+ sha2_word64 a, b, c, d, e, f, g, h, s0, s1;
+ sha2_word64 T1 = 0, T2 = 0, *W512 = (sha2_word64*)context->buffer;
+ int j;
+
+ /* Initialize registers with the prev. intermediate value */
+ a = context->state[0];
+ b = context->state[1];
+ c = context->state[2];
+ d = context->state[3];
+ e = context->state[4];
+ f = context->state[5];
+ g = context->state[6];
+ h = context->state[7];
+
+ j = 0;
+ do {
+#if BYTE_ORDER == LITTLE_ENDIAN
+ /* Convert TO host byte order */
+ REVERSE64(*data++, W512[j]);
+ /* Apply the SHA-512 compression function to update a..h */
+ T1 = h + Sigma1_512(e) + Ch(e, f, g) + K512[j] + W512[j];
+#else /* BYTE_ORDER == LITTLE_ENDIAN */
+ /* Apply the SHA-512 compression function to update a..h with copy */
+ T1 = h + Sigma1_512(e) + Ch(e, f, g) + K512[j] + (W512[j] = *data++);
+#endif /* BYTE_ORDER == LITTLE_ENDIAN */
+ T2 = Sigma0_512(a) + Maj(a, b, c);
+ h = g;
+ g = f;
+ f = e;
+ e = d + T1;
+ d = c;
+ c = b;
+ b = a;
+ a = T1 + T2;
+
+ j++;
+ } while (j < 16);
+
+ do {
+ /* Part of the message block expansion: */
+ s0 = W512[(j+1)&0x0f];
+ s0 = sigma0_512(s0);
+ s1 = W512[(j+14)&0x0f];
+ s1 = sigma1_512(s1);
+
+ /* Apply the SHA-512 compression function to update a..h */
+ T1 = h + Sigma1_512(e) + Ch(e, f, g) + K512[j] +
+ (W512[j&0x0f] += s1 + W512[(j+9)&0x0f] + s0);
+ T2 = Sigma0_512(a) + Maj(a, b, c);
+ h = g;
+ g = f;
+ f = e;
+ e = d + T1;
+ d = c;
+ c = b;
+ b = a;
+ a = T1 + T2;
+
+ j++;
+ } while (j < 80);
+
+ /* Compute the current intermediate hash value */
+ context->state[0] += a;
+ context->state[1] += b;
+ context->state[2] += c;
+ context->state[3] += d;
+ context->state[4] += e;
+ context->state[5] += f;
+ context->state[6] += g;
+ context->state[7] += h;
+
+ /* Clean up */
+ a = b = c = d = e = f = g = h = T1 = T2 = 0;
+}
+
+#endif /* SHA2_UNROLL_TRANSFORM */
+
+void SHA512_Update(SHA512_CTX* context, const sha2_byte *data, size_t len) {
+ unsigned int freespace, usedspace;
+
+ if (len == 0) {
+ /* Calling with no data is valid - we do nothing */
+ return;
+ }
+
+ /* Sanity check: */
+ assert(context != (SHA512_CTX*)0 && data != (sha2_byte*)0);
+
+ usedspace = (context->bitcount[0] >> 3) % SHA512_BLOCK_LENGTH;
+ if (usedspace > 0) {
+ /* Calculate how much free space is available in the buffer */
+ freespace = SHA512_BLOCK_LENGTH - usedspace;
+
+ if (len >= freespace) {
+ /* Fill the buffer completely and process it */
+ bcopy(data, &context->buffer[usedspace], freespace);
+ ADDINC128(context->bitcount, freespace << 3);
+ len -= freespace;
+ data += freespace;
+ SHA512_Transform(context, (sha2_word64*)context->buffer);
+ } else {
+ /* The buffer is not yet full */
+ bcopy(data, &context->buffer[usedspace], len);
+ ADDINC128(context->bitcount, len << 3);
+ /* Clean up: */
+ usedspace = freespace = 0;
+ return;
+ }
+ }
+ while (len >= SHA512_BLOCK_LENGTH) {
+ /* Process as many complete blocks as we can */
+ SHA512_Transform(context, (const sha2_word64*)data);
+ ADDINC128(context->bitcount, SHA512_BLOCK_LENGTH << 3);
+ len -= SHA512_BLOCK_LENGTH;
+ data += SHA512_BLOCK_LENGTH;
+ }
+ if (len > 0) {
+ /* There's left-overs, so save 'em */
+ bcopy(data, context->buffer, len);
+ ADDINC128(context->bitcount, len << 3);
+ }
+ /* Clean up: */
+ usedspace = freespace = 0;
+}
+
+void SHA512_Last(SHA512_CTX* context) {
+ unsigned int usedspace;
+
+ usedspace = (context->bitcount[0] >> 3) % SHA512_BLOCK_LENGTH;
+#if BYTE_ORDER == LITTLE_ENDIAN
+ /* Convert FROM host byte order */
+ REVERSE64(context->bitcount[0],context->bitcount[0]);
+ REVERSE64(context->bitcount[1],context->bitcount[1]);
+#endif
+ if (usedspace > 0) {
+ /* Begin padding with a 1 bit: */
+ context->buffer[usedspace++] = 0x80;
+
+ if (usedspace <= SHA512_SHORT_BLOCK_LENGTH) {
+ /* Set-up for the last transform: */
+ bzero(&context->buffer[usedspace], SHA512_SHORT_BLOCK_LENGTH - usedspace);
+ } else {
+ if (usedspace < SHA512_BLOCK_LENGTH) {
+ bzero(&context->buffer[usedspace], SHA512_BLOCK_LENGTH - usedspace);
+ }
+ /* Do second-to-last transform: */
+ SHA512_Transform(context, (sha2_word64*)context->buffer);
+
+ /* And set-up for the last transform: */
+ bzero(context->buffer, SHA512_BLOCK_LENGTH - 2);
+ }
+ } else {
+ /* Prepare for final transform: */
+ bzero(context->buffer, SHA512_SHORT_BLOCK_LENGTH);
+
+ /* Begin padding with a 1 bit: */
+ *context->buffer = 0x80;
+ }
+ /* Store the length of input data (in bits): */
+ *(sha2_word64*)&context->buffer[SHA512_SHORT_BLOCK_LENGTH] = context->bitcount[1];
+ *(sha2_word64*)&context->buffer[SHA512_SHORT_BLOCK_LENGTH+8] = context->bitcount[0];
+
+ /* Final transform: */
+ SHA512_Transform(context, (sha2_word64*)context->buffer);
+}
+
+void SHA512_Final(sha2_byte digest[], SHA512_CTX* context) {
+ sha2_word64 *d = (sha2_word64*)digest;
+
+ /* Sanity check: */
+ assert(context != (SHA512_CTX*)0);
+
+ /* If no digest buffer is passed, we don't bother doing this: */
+ if (digest != (sha2_byte*)0) {
+ SHA512_Last(context);
+
+ /* Save the hash data for output: */
+#if BYTE_ORDER == LITTLE_ENDIAN
+ {
+ /* Convert TO host byte order */
+ int j;
+ for (j = 0; j < 8; j++) {
+ REVERSE64(context->state[j],context->state[j]);
+ *d++ = context->state[j];
+ }
+ }
+#else
+ bcopy(context->state, d, SHA512_DIGEST_LENGTH);
+#endif
+ }
+
+ /* Zero out state data */
+ bzero(context, sizeof(*context));
+}
+
+char *SHA512_End(SHA512_CTX* context, char buffer[]) {
+ sha2_byte digest[SHA512_DIGEST_LENGTH], *d = digest;
+ int i;
+
+ /* Sanity check: */
+ assert(context != (SHA512_CTX*)0);
+
+ if (buffer != (char*)0) {
+ SHA512_Final(digest, context);
+
+ for (i = 0; i < SHA512_DIGEST_LENGTH; i++) {
+ *buffer++ = sha2_hex_digits[(*d & 0xf0) >> 4];
+ *buffer++ = sha2_hex_digits[*d & 0x0f];
+ d++;
+ }
+ *buffer = (char)0;
+ } else {
+ bzero(context, sizeof(*context));
+ }
+ bzero(digest, SHA512_DIGEST_LENGTH);
+ return buffer;
+}
+
+char* SHA512_Data(const sha2_byte* data, size_t len, char digest[SHA512_DIGEST_STRING_LENGTH]) {
+ SHA512_CTX context;
+
+ SHA512_Init(&context);
+ SHA512_Update(&context, data, len);
+ return SHA512_End(&context, digest);
+}
+
+
+/*** SHA-384: *********************************************************/
+void SHA384_Init(SHA384_CTX* context) {
+ if (context == (SHA384_CTX*)0) {
+ return;
+ }
+ bcopy(sha384_initial_hash_value, context->state, SHA512_DIGEST_LENGTH);
+ bzero(context->buffer, SHA384_BLOCK_LENGTH);
+ context->bitcount[0] = context->bitcount[1] = 0;
+}
+
+void SHA384_Update(SHA384_CTX* context, const sha2_byte* data, size_t len) {
+ SHA512_Update((SHA512_CTX*)context, data, len);
+}
+
+void SHA384_Final(sha2_byte digest[], SHA384_CTX* context) {
+ sha2_word64 *d = (sha2_word64*)digest;
+
+ /* Sanity check: */
+ assert(context != (SHA384_CTX*)0);
+
+ /* If no digest buffer is passed, we don't bother doing this: */
+ if (digest != (sha2_byte*)0) {
+ SHA512_Last((SHA512_CTX*)context);
+
+ /* Save the hash data for output: */
+#if BYTE_ORDER == LITTLE_ENDIAN
+ {
+ /* Convert TO host byte order */
+ int j;
+ for (j = 0; j < 6; j++) {
+ REVERSE64(context->state[j],context->state[j]);
+ *d++ = context->state[j];
+ }
+ }
+#else
+ bcopy(context->state, d, SHA384_DIGEST_LENGTH);
+#endif
+ }
+
+ /* Zero out state data */
+ bzero(context, sizeof(*context));
+}
+
+char *SHA384_End(SHA384_CTX* context, char buffer[]) {
+ sha2_byte digest[SHA384_DIGEST_LENGTH], *d = digest;
+ int i;
+
+ /* Sanity check: */
+ assert(context != (SHA384_CTX*)0);
+
+ if (buffer != (char*)0) {
+ SHA384_Final(digest, context);
+
+ for (i = 0; i < SHA384_DIGEST_LENGTH; i++) {
+ *buffer++ = sha2_hex_digits[(*d & 0xf0) >> 4];
+ *buffer++ = sha2_hex_digits[*d & 0x0f];
+ d++;
+ }
+ *buffer = (char)0;
+ } else {
+ bzero(context, sizeof(*context));
+ }
+ bzero(digest, SHA384_DIGEST_LENGTH);
+ return buffer;
+}
+
+char* SHA384_Data(const sha2_byte* data, size_t len, char digest[SHA384_DIGEST_STRING_LENGTH]) {
+ SHA384_CTX context;
+
+ SHA384_Init(&context);
+ SHA384_Update(&context, data, len);
+ return SHA384_End(&context, digest);
+}
+
diff --git a/rtems/freebsd/crypto/sha2/sha2.h b/rtems/freebsd/crypto/sha2/sha2.h
new file mode 100644
index 00000000..1277eb2c
--- /dev/null
+++ b/rtems/freebsd/crypto/sha2/sha2.h
@@ -0,0 +1,141 @@
+/* $FreeBSD$ */
+/* $KAME: sha2.h,v 1.3 2001/03/12 08:27:48 itojun Exp $ */
+
+/*
+ * sha2.h
+ *
+ * Version 1.0.0beta1
+ *
+ * Written by Aaron D. Gifford <me@aarongifford.com>
+ *
+ * Copyright 2000 Aaron D. Gifford. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the copyright holder nor the names of contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR(S) AND CONTRIBUTOR(S) ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR(S) OR CONTRIBUTOR(S) BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ */
+
+#ifndef __SHA2_HH__
+#define __SHA2_HH__
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+
+/*** SHA-256/384/512 Various Length Definitions ***********************/
+#define SHA256_BLOCK_LENGTH 64
+#define SHA256_DIGEST_LENGTH 32
+#define SHA256_DIGEST_STRING_LENGTH (SHA256_DIGEST_LENGTH * 2 + 1)
+#define SHA384_BLOCK_LENGTH 128
+#define SHA384_DIGEST_LENGTH 48
+#define SHA384_DIGEST_STRING_LENGTH (SHA384_DIGEST_LENGTH * 2 + 1)
+#define SHA512_BLOCK_LENGTH 128
+#define SHA512_DIGEST_LENGTH 64
+#define SHA512_DIGEST_STRING_LENGTH (SHA512_DIGEST_LENGTH * 2 + 1)
+
+
+/*** SHA-256/384/512 Context Structures *******************************/
+/* NOTE: If your architecture does not define either u_intXX_t types or
+ * uintXX_t (from inttypes.h), you may need to define things by hand
+ * for your system:
+ */
+#if 0
+typedef unsigned char u_int8_t; /* 1-byte (8-bits) */
+typedef unsigned int u_int32_t; /* 4-bytes (32-bits) */
+typedef unsigned long long u_int64_t; /* 8-bytes (64-bits) */
+#endif
+/*
+ * Most BSD systems already define u_intXX_t types, as does Linux.
+ * Some systems, however, like Compaq's Tru64 Unix instead can use
+ * uintXX_t types defined by very recent ANSI C standards and included
+ * in the file:
+ *
+ * #include <rtems/freebsd/inttypes.h>
+ *
+ * If you choose to use <inttypes.h> then please define:
+ *
+ * #define SHA2_USE_INTTYPES_H
+ *
+ * Or on the command line during compile:
+ *
+ * cc -DSHA2_USE_INTTYPES_H ...
+ */
+#if 0 /*def SHA2_USE_INTTYPES_H*/
+
+typedef struct _SHA256_CTX {
+ uint32_t state[8];
+ uint64_t bitcount;
+ uint8_t buffer[SHA256_BLOCK_LENGTH];
+} SHA256_CTX;
+typedef struct _SHA512_CTX {
+ uint64_t state[8];
+ uint64_t bitcount[2];
+ uint8_t buffer[SHA512_BLOCK_LENGTH];
+} SHA512_CTX;
+
+#else /* SHA2_USE_INTTYPES_H */
+
+typedef struct _SHA256_CTX {
+ u_int32_t state[8];
+ u_int64_t bitcount;
+ u_int8_t buffer[SHA256_BLOCK_LENGTH];
+} SHA256_CTX;
+typedef struct _SHA512_CTX {
+ u_int64_t state[8];
+ u_int64_t bitcount[2];
+ u_int8_t buffer[SHA512_BLOCK_LENGTH];
+} SHA512_CTX;
+
+#endif /* SHA2_USE_INTTYPES_H */
+
+typedef SHA512_CTX SHA384_CTX;
+
+
+/*** SHA-256/384/512 Function Prototypes ******************************/
+
+void SHA256_Init(SHA256_CTX *);
+void SHA256_Update(SHA256_CTX*, const u_int8_t*, size_t);
+void SHA256_Final(u_int8_t[SHA256_DIGEST_LENGTH], SHA256_CTX*);
+char* SHA256_End(SHA256_CTX*, char[SHA256_DIGEST_STRING_LENGTH]);
+char* SHA256_Data(const u_int8_t*, size_t, char[SHA256_DIGEST_STRING_LENGTH]);
+
+void SHA384_Init(SHA384_CTX*);
+void SHA384_Update(SHA384_CTX*, const u_int8_t*, size_t);
+void SHA384_Final(u_int8_t[SHA384_DIGEST_LENGTH], SHA384_CTX*);
+char* SHA384_End(SHA384_CTX*, char[SHA384_DIGEST_STRING_LENGTH]);
+char* SHA384_Data(const u_int8_t*, size_t, char[SHA384_DIGEST_STRING_LENGTH]);
+
+void SHA512_Init(SHA512_CTX*);
+void SHA512_Update(SHA512_CTX*, const u_int8_t*, size_t);
+void SHA512_Final(u_int8_t[SHA512_DIGEST_LENGTH], SHA512_CTX*);
+char* SHA512_End(SHA512_CTX*, char[SHA512_DIGEST_STRING_LENGTH]);
+char* SHA512_Data(const u_int8_t*, size_t, char[SHA512_DIGEST_STRING_LENGTH]);
+
+#ifdef __cplusplus
+}
+#endif /* __cplusplus */
+
+#endif /* __SHA2_HH__ */
+
diff --git a/rtems/freebsd/ddb/db_sym.h b/rtems/freebsd/ddb/db_sym.h
new file mode 100644
index 00000000..936ffd88
--- /dev/null
+++ b/rtems/freebsd/ddb/db_sym.h
@@ -0,0 +1 @@
+/* EMPTY */
diff --git a/rtems/freebsd/ddb/ddb.h b/rtems/freebsd/ddb/ddb.h
new file mode 100644
index 00000000..936ffd88
--- /dev/null
+++ b/rtems/freebsd/ddb/ddb.h
@@ -0,0 +1 @@
+/* EMPTY */
diff --git a/rtems/freebsd/dev/mii/icsphy.c b/rtems/freebsd/dev/mii/icsphy.c
new file mode 100644
index 00000000..5b588a68
--- /dev/null
+++ b/rtems/freebsd/dev/mii/icsphy.c
@@ -0,0 +1,277 @@
+#include <rtems/freebsd/machine/rtems-bsd-config.h>
+
+/* $NetBSD: icsphy.c,v 1.41 2006/11/16 21:24:07 christos Exp $ */
+
+/*-
+ * Copyright (c) 1998, 1999, 2000 The NetBSD Foundation, Inc.
+ * All rights reserved.
+ *
+ * This code is derived from software contributed to The NetBSD Foundation
+ * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
+ * NASA Ames Research Center.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
+ * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * Copyright (c) 1997 Manuel Bouyer. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <rtems/freebsd/sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+/*
+ * driver for Integrated Circuit Systems' ICS1889-1893 ethernet 10/100 PHY
+ * datasheet from www.icst.com
+ */
+
+#include <rtems/freebsd/sys/param.h>
+#include <rtems/freebsd/sys/systm.h>
+#include <rtems/freebsd/sys/kernel.h>
+#include <rtems/freebsd/sys/module.h>
+#include <rtems/freebsd/sys/socket.h>
+#include <rtems/freebsd/sys/bus.h>
+
+#include <rtems/freebsd/net/if.h>
+#include <rtems/freebsd/net/if_media.h>
+
+#include <rtems/freebsd/dev/mii/mii.h>
+#include <rtems/freebsd/dev/mii/miivar.h>
+#include <rtems/freebsd/local/miidevs.h>
+
+#include <rtems/freebsd/dev/mii/icsphyreg.h>
+
+#include <rtems/freebsd/local/miibus_if.h>
+
+static int icsphy_probe(device_t dev);
+static int icsphy_attach(device_t dev);
+
+struct icsphy_softc {
+ struct mii_softc mii_sc;
+ int mii_model;
+};
+
+static device_method_t icsphy_methods[] = {
+ /* device interface */
+ DEVMETHOD(device_probe, icsphy_probe),
+ DEVMETHOD(device_attach, icsphy_attach),
+ DEVMETHOD(device_detach, mii_phy_detach),
+ DEVMETHOD(device_shutdown, bus_generic_shutdown),
+ { 0, 0 }
+};
+
+static devclass_t icsphy_devclass;
+
+static driver_t icsphy_driver = {
+ "icsphy",
+ icsphy_methods,
+ sizeof(struct icsphy_softc)
+};
+
+DRIVER_MODULE(icsphy, miibus, icsphy_driver, icsphy_devclass, 0, 0);
+
+static int icsphy_service(struct mii_softc *, struct mii_data *, int);
+static void icsphy_status(struct mii_softc *);
+static void icsphy_reset(struct mii_softc *);
+
+static const struct mii_phydesc icsphys[] = {
+ MII_PHY_DESC(xxICS, 1889),
+ MII_PHY_DESC(xxICS, 1890),
+ MII_PHY_DESC(xxICS, 1892),
+ MII_PHY_DESC(xxICS, 1893),
+ MII_PHY_END
+};
+
+static int
+icsphy_probe(device_t dev)
+{
+
+ return (mii_phy_dev_probe(dev, icsphys, BUS_PROBE_DEFAULT));
+}
+
+static int
+icsphy_attach(device_t dev)
+{
+ struct icsphy_softc *isc;
+ struct mii_softc *sc;
+ struct mii_attach_args *ma;
+ struct mii_data *mii;
+
+ isc = device_get_softc(dev);
+ sc = &isc->mii_sc;
+ ma = device_get_ivars(dev);
+ sc->mii_dev = device_get_parent(dev);
+ mii = ma->mii_data;
+ LIST_INSERT_HEAD(&mii->mii_phys, sc, mii_list);
+
+ sc->mii_flags = miibus_get_flags(dev);
+ sc->mii_inst = mii->mii_instance++;
+ sc->mii_phy = ma->mii_phyno;
+ sc->mii_service = icsphy_service;
+ sc->mii_pdata = mii;
+
+ sc->mii_flags |= MIIF_NOISOLATE;
+
+ ifmedia_add(&mii->mii_media,
+ IFM_MAKEWORD(IFM_ETHER, IFM_100_TX, IFM_LOOP, sc->mii_inst),
+ MII_MEDIA_100_TX, NULL);
+
+ isc->mii_model = MII_MODEL(ma->mii_id2);
+ icsphy_reset(sc);
+
+ sc->mii_capabilities = PHY_READ(sc, MII_BMSR) & ma->mii_capmask;
+ device_printf(dev, " ");
+ mii_phy_add_media(sc);
+ printf("\n");
+
+ MIIBUS_MEDIAINIT(sc->mii_dev);
+
+ return (0);
+}
+
+static int
+icsphy_service(struct mii_softc *sc, struct mii_data *mii, int cmd)
+{
+
+ switch (cmd) {
+ case MII_POLLSTAT:
+ break;
+
+ case MII_MEDIACHG:
+ /*
+ * If the interface is not up, don't do anything.
+ */
+ if ((mii->mii_ifp->if_flags & IFF_UP) == 0)
+ break;
+
+ mii_phy_setmedia(sc);
+ break;
+
+ case MII_TICK:
+ if (mii_phy_tick(sc) == EJUSTRETURN)
+ return (0);
+ break;
+ }
+
+ /* Update the media status. */
+ icsphy_status(sc);
+
+ /* Callback if something changed. */
+ mii_phy_update(sc, cmd);
+ return (0);
+}
+
+static void
+icsphy_status(struct mii_softc *sc)
+{
+ struct mii_data *mii = sc->mii_pdata;
+ struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
+ int bmcr, qpr;
+
+ mii->mii_media_status = IFM_AVALID;
+ mii->mii_media_active = IFM_ETHER;
+
+ /*
+ * Don't get link from the BMSR. It's available in the QPR,
+ * and we have to read it twice to unlatch it anyhow. This
+ * gives us fewer register reads.
+ */
+ qpr = PHY_READ(sc, MII_ICSPHY_QPR); /* unlatch */
+ qpr = PHY_READ(sc, MII_ICSPHY_QPR); /* real value */
+
+ if (qpr & QPR_LINK)
+ mii->mii_media_status |= IFM_ACTIVE;
+
+ bmcr = PHY_READ(sc, MII_BMCR);
+ if (bmcr & BMCR_ISO) {
+ mii->mii_media_active |= IFM_NONE;
+ mii->mii_media_status = 0;
+ return;
+ }
+
+ if (bmcr & BMCR_LOOP)
+ mii->mii_media_active |= IFM_LOOP;
+
+ if (bmcr & BMCR_AUTOEN) {
+ if ((qpr & QPR_ACOMP) == 0) {
+ /* Erg, still trying, I guess... */
+ mii->mii_media_active |= IFM_NONE;
+ return;
+ }
+ if (qpr & QPR_SPEED)
+ mii->mii_media_active |= IFM_100_TX;
+ else
+ mii->mii_media_active |= IFM_10_T;
+ if (qpr & QPR_FDX)
+ mii->mii_media_active |= IFM_FDX;
+ else
+ mii->mii_media_active |= IFM_HDX;
+ } else
+ mii->mii_media_active = ife->ifm_media;
+}
+
+static void
+icsphy_reset(struct mii_softc *sc)
+{
+ struct icsphy_softc *isc = (struct icsphy_softc *)sc;
+
+ mii_phy_reset(sc);
+ /* set powerdown feature */
+ switch (isc->mii_model) {
+ case MII_MODEL_xxICS_1890:
+ case MII_MODEL_xxICS_1893:
+ PHY_WRITE(sc, MII_ICSPHY_ECR2, ECR2_100AUTOPWRDN);
+ break;
+ case MII_MODEL_xxICS_1892:
+ PHY_WRITE(sc, MII_ICSPHY_ECR2,
+ ECR2_10AUTOPWRDN|ECR2_100AUTOPWRDN);
+ break;
+ default:
+ /* 1889 have no ECR2 */
+ break;
+ }
+ /*
+ * There is no description that the reset do auto-negotiation in the
+ * data sheet.
+ */
+ PHY_WRITE(sc, MII_BMCR, BMCR_S100|BMCR_STARTNEG|BMCR_FDX);
+}
diff --git a/rtems/freebsd/dev/mii/icsphyreg.h b/rtems/freebsd/dev/mii/icsphyreg.h
new file mode 100644
index 00000000..a5754b7d
--- /dev/null
+++ b/rtems/freebsd/dev/mii/icsphyreg.h
@@ -0,0 +1,129 @@
+/* $NetBSD: icsphyreg.h,v 1.2 2003/07/01 22:46:08 msaitoh Exp $ */
+
+/*-
+ * Copyright (c) 1998 The NetBSD Foundation, Inc.
+ * All rights reserved.
+ *
+ * This code is derived from software contributed to The NetBSD Foundation
+ * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
+ * NASA Ames Research Center.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
+ * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _DEV_MII_ICSPHYREG_HH_
+#define _DEV_MII_ICSPHYREG_HH_
+
+/*
+ * ICS1890 registers.
+ * http://www.icst.com/pdf/18??.pdf
+ */
+
+/* HEX 1889 1890 1892 1893
+ *--------------------------------------------------------------
+ * 0 Control * * * *
+ * 1 Status * * * *
+ * 2 PHY Identifier * * * *
+ * 3 PHY Identifier * * * *
+ * 4 Auto-Neg. Advertisement * * *
+ * 5 Auto-Neg. Link Parent Adv * * *
+ * 6 Auto-Neg. Expansion * * *
+ * 7 Auto-Neg. Next Page Tx * *
+ * 8 ANg Nxt Page Lnk Parnt Abl * *
+ * 10 Extended Control * * * *
+ * 11 Quick Poll Status * * * *
+ * 12 10Base-T Operation * * *
+ * 13 Extended Control2 * * *
+ */
+
+#define MII_ICSPHY_ECR 0x10 /* Extended Control Register */
+#define ECR_OVR 0x8000 /* disable command reg overwrites */
+#define ECR_PHYADDR_MASK 0x07c0 /* PHY address mask */
+#define ECR_CTEST 0x0020 /* Stream Cipher Test Mode */
+#define ECR_IECT 0x0004 /* Invalid Error Code Test */
+#define ECR_SSD 0x0001 /* Stream Cipher Disable */
+
+#define MII_ICSPHY_QPR 0x11 /* Quick Poll Register */
+#define QPR_SPEED 0x8000 /* 100Mbps */
+#define QPR_FDX 0x4000 /* Full dupled */
+#define QPR_ANB2 0x2000 /* Autoneg monitor bit 2 */
+#define QPR_ANB1 0x1000 /* Autoneg monitor bit 1 */
+#define QPR_ANB0 0x0800 /* Autoneg monitor bit 0 */
+#define QPR_RXERR 0x0400 /* Receive signal lost */
+#define QPR_PLLERR 0x0200 /* PLL error */
+#define QPR_FCARR 0x0100 /* False carrier detected */
+#define QPR_INVALSYM 0x0080 /* Invalid Symbol Detected */
+#define QPR_HALT 0x0040 /* Halt Symbol Detected */
+#define QPR_PREEM 0x0020 /* Two Idle Symbols together */
+#define QPR_ACOMP 0x0010 /* Autonegotiation complete */
+#define QPR_SDETECT 0x0008 /* signal detect */
+#define QPR_JABBER 0x0004 /* Jabber detected */
+#define QPR_RFAULT 0x0002 /* Remote Fault */
+#define QPR_LINK 0x0001 /* Link */
+
+#define MII_ICSPHY_TTR 0x12 /* 10baseT Operations Register */
+#define TTR_RJABBER 0x8000 /* Remote Jabber */
+#define TTR_POLARITY 0x4000 /* Polarity Reversed */
+#define TTR_NOJABBER 0x0020 /* Disable Jabber Check */
+#define TTR_LOOP 0x0010 /* Loopback mode */
+#define TTR_NOAPOLARITY 0x0008 /* Disable auto polarity correction */
+#define TTR_NOSQE 0x0004 /* Disable SQE check */
+#define TTR_NOLINK 0x0002 /* Disable Link check */
+#define TTR_NOSQUELCH 0x0001 /* Disable squelch */
+
+
+/*
+ * Extended Control Register 2
+ *
+ * HEX 1889 1890 1892 1893
+ *-------------------------------------------------------------------
+ * 8000 Node/Repeater Mode * * *
+ * 4000 Hardware/Software Mode * * *
+ * 2000 Link Partner Support Remote Flt *
+ * 2000 Remote Fault * *
+ * 1000
+ * 0800
+ * 0400 Xmitted Remote Fault status *
+ * 0200
+ * 0100
+ * 0080 Tri-state Enable *
+ * 0040
+ * 0020
+ * 0010 A-N Powerup Remote Flt *
+ * 0008
+ * 0004
+ * 0002 Automatic 10Base-T Power Down *
+ * 0001 Automatic 100Base-TX Power Down * * *
+ */
+
+#define MII_ICSPHY_ECR2 0x13 /* Extended Control Register 2 */
+#define ECR2_REPEATER 0x8000 /* Repeater Mode */
+#define ECR2_HWSW 0x4000 /* hw/sw config priority */
+#define ECR2_LPRF 0x2000 /* link partner supports rem fault */
+#define ECR2_FORCERF 0x0400 /* Force transmit of rem fault */
+#define ECR2_RFPUP 0x0010 /* A-N Powerup Remote fault */
+#define ECR2_10AUTOPWRDN 0x0002 /* Automatic 10baseT power down */
+#define ECR2_100AUTOPWRDN 0x0001 /* Automatic 100baseTX power down */
+
+#endif /* _DEV_MII_ICSPHYREG_HH_ */
diff --git a/rtems/freebsd/dev/mii/mii.c b/rtems/freebsd/dev/mii/mii.c
new file mode 100644
index 00000000..73e2f84a
--- /dev/null
+++ b/rtems/freebsd/dev/mii/mii.c
@@ -0,0 +1,576 @@
+#include <rtems/freebsd/machine/rtems-bsd-config.h>
+
+/* $NetBSD: mii.c,v 1.12 1999/08/03 19:41:49 drochner Exp $ */
+
+/*-
+ * Copyright (c) 1998 The NetBSD Foundation, Inc.
+ * All rights reserved.
+ *
+ * This code is derived from software contributed to The NetBSD Foundation
+ * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
+ * NASA Ames Research Center.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
+ * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <rtems/freebsd/sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+/*
+ * MII bus layer, glues MII-capable network interface drivers to sharable
+ * PHY drivers. This exports an interface compatible with BSD/OS 3.0's,
+ * plus some NetBSD extensions.
+ */
+
+#include <rtems/freebsd/sys/param.h>
+#include <rtems/freebsd/sys/systm.h>
+#include <rtems/freebsd/sys/socket.h>
+#include <rtems/freebsd/sys/malloc.h>
+#include <rtems/freebsd/sys/module.h>
+#include <rtems/freebsd/sys/bus.h>
+
+#include <rtems/freebsd/net/if.h>
+#include <rtems/freebsd/net/if_media.h>
+#include <rtems/freebsd/net/route.h>
+
+#include <rtems/freebsd/dev/mii/mii.h>
+#include <rtems/freebsd/dev/mii/miivar.h>
+
+MODULE_VERSION(miibus, 1);
+
+#include <rtems/freebsd/local/miibus_if.h>
+
+static int miibus_print_child(device_t dev, device_t child);
+static int miibus_read_ivar(device_t dev, device_t child, int which,
+ uintptr_t *result);
+static int miibus_child_location_str(device_t bus, device_t child, char *buf,
+ size_t buflen);
+static int miibus_child_pnpinfo_str(device_t bus, device_t child, char *buf,
+ size_t buflen);
+static int miibus_readreg(device_t, int, int);
+static int miibus_writereg(device_t, int, int, int);
+static void miibus_statchg(device_t);
+static void miibus_linkchg(device_t);
+static void miibus_mediainit(device_t);
+
+static device_method_t miibus_methods[] = {
+ /* device interface */
+ DEVMETHOD(device_probe, miibus_probe),
+ DEVMETHOD(device_attach, miibus_attach),
+ DEVMETHOD(device_detach, miibus_detach),
+ DEVMETHOD(device_shutdown, bus_generic_shutdown),
+
+ /* bus interface */
+ DEVMETHOD(bus_print_child, miibus_print_child),
+ DEVMETHOD(bus_read_ivar, miibus_read_ivar),
+ DEVMETHOD(bus_driver_added, bus_generic_driver_added),
+ DEVMETHOD(bus_child_pnpinfo_str, miibus_child_pnpinfo_str),
+ DEVMETHOD(bus_child_location_str, miibus_child_location_str),
+
+ /* MII interface */
+ DEVMETHOD(miibus_readreg, miibus_readreg),
+ DEVMETHOD(miibus_writereg, miibus_writereg),
+ DEVMETHOD(miibus_statchg, miibus_statchg),
+ DEVMETHOD(miibus_linkchg, miibus_linkchg),
+ DEVMETHOD(miibus_mediainit, miibus_mediainit),
+
+ { 0, 0 }
+};
+
+devclass_t miibus_devclass;
+
+driver_t miibus_driver = {
+ "miibus",
+ miibus_methods,
+ sizeof(struct mii_data)
+};
+
+struct miibus_ivars {
+ struct ifnet *ifp;
+ ifm_change_cb_t ifmedia_upd;
+ ifm_stat_cb_t ifmedia_sts;
+ int mii_flags;
+};
+
+int
+miibus_probe(device_t dev)
+{
+
+ device_set_desc(dev, "MII bus");
+
+ return (BUS_PROBE_SPECIFIC);
+}
+
+int
+miibus_attach(device_t dev)
+{
+ struct miibus_ivars *ivars;
+ struct mii_attach_args *ma;
+ struct mii_data *mii;
+ device_t *children;
+ int i, nchildren;
+
+ mii = device_get_softc(dev);
+ nchildren = 0;
+ if (device_get_children(dev, &children, &nchildren) == 0) {
+ for (i = 0; i < nchildren; i++) {
+ ma = device_get_ivars(children[i]);
+ ma->mii_data = mii;
+ }
+ free(children, M_TEMP);
+ }
+ if (nchildren == 0) {
+ device_printf(dev, "cannot get children\n");
+ return (ENXIO);
+ }
+ ivars = device_get_ivars(dev);
+ ifmedia_init(&mii->mii_media, IFM_IMASK, ivars->ifmedia_upd,
+ ivars->ifmedia_sts);
+ mii->mii_ifp = ivars->ifp;
+ mii->mii_ifp->if_capabilities |= IFCAP_LINKSTATE;
+ mii->mii_ifp->if_capenable |= IFCAP_LINKSTATE;
+ LIST_INIT(&mii->mii_phys);
+
+ return (bus_generic_attach(dev));
+}
+
+int
+miibus_detach(device_t dev)
+{
+ struct mii_data *mii;
+
+ bus_generic_detach(dev);
+ mii = device_get_softc(dev);
+ ifmedia_removeall(&mii->mii_media);
+ mii->mii_ifp = NULL;
+
+ return (0);
+}
+
+static int
+miibus_print_child(device_t dev, device_t child)
+{
+ struct mii_attach_args *ma;
+ int retval;
+
+ ma = device_get_ivars(child);
+ retval = bus_print_child_header(dev, child);
+ retval += printf(" PHY %d", ma->mii_phyno);
+ retval += bus_print_child_footer(dev, child);
+
+ return (retval);
+}
+
+static int
+miibus_read_ivar(device_t dev, device_t child __unused, int which,
+ uintptr_t *result)
+{
+ struct miibus_ivars *ivars;
+
+ /*
+ * NB: this uses the instance variables of the miibus rather than
+ * its PHY children.
+ */
+ ivars = device_get_ivars(dev);
+ switch (which) {
+ case MIIBUS_IVAR_FLAGS:
+ *result = ivars->mii_flags;
+ break;
+ default:
+ return (ENOENT);
+ }
+ return (0);
+}
+
+static int
+miibus_child_pnpinfo_str(device_t bus __unused, device_t child, char *buf,
+ size_t buflen)
+{
+ struct mii_attach_args *ma;
+
+ ma = device_get_ivars(child);
+ snprintf(buf, buflen, "oui=0x%x model=0x%x rev=0x%x",
+ MII_OUI(ma->mii_id1, ma->mii_id2),
+ MII_MODEL(ma->mii_id2), MII_REV(ma->mii_id2));
+ return (0);
+}
+
+static int
+miibus_child_location_str(device_t bus __unused, device_t child, char *buf,
+ size_t buflen)
+{
+ struct mii_attach_args *ma;
+
+ ma = device_get_ivars(child);
+ snprintf(buf, buflen, "phyno=%d", ma->mii_phyno);
+ return (0);
+}
+
+static int
+miibus_readreg(device_t dev, int phy, int reg)
+{
+ device_t parent;
+
+ parent = device_get_parent(dev);
+ return (MIIBUS_READREG(parent, phy, reg));
+}
+
+static int
+miibus_writereg(device_t dev, int phy, int reg, int data)
+{
+ device_t parent;
+
+ parent = device_get_parent(dev);
+ return (MIIBUS_WRITEREG(parent, phy, reg, data));
+}
+
+static void
+miibus_statchg(device_t dev)
+{
+ device_t parent;
+ struct mii_data *mii;
+ struct ifnet *ifp;
+
+ parent = device_get_parent(dev);
+ MIIBUS_STATCHG(parent);
+
+ mii = device_get_softc(dev);
+
+ /*
+ * Note that each NIC's softc must start with an ifnet pointer.
+ * XXX: EVIL HACK!
+ */
+ ifp = *(struct ifnet **)device_get_softc(parent);
+ ifp->if_baudrate = ifmedia_baudrate(mii->mii_media_active);
+}
+
+static void
+miibus_linkchg(device_t dev)
+{
+ struct mii_data *mii;
+ device_t parent;
+ int link_state;
+
+ parent = device_get_parent(dev);
+ MIIBUS_LINKCHG(parent);
+
+ mii = device_get_softc(dev);
+
+ if (mii->mii_media_status & IFM_AVALID) {
+ if (mii->mii_media_status & IFM_ACTIVE)
+ link_state = LINK_STATE_UP;
+ else
+ link_state = LINK_STATE_DOWN;
+ } else
+ link_state = LINK_STATE_UNKNOWN;
+ /*
+ * Note that each NIC's softc must start with an ifnet pointer.
+ * XXX: EVIL HACK!
+ */
+ if_link_state_change(*(struct ifnet**)device_get_softc(parent), link_state);
+}
+
+static void
+miibus_mediainit(device_t dev)
+{
+ struct mii_data *mii;
+ struct ifmedia_entry *m;
+ int media = 0;
+
+ /* Poke the parent in case it has any media of its own to add. */
+ MIIBUS_MEDIAINIT(device_get_parent(dev));
+
+ mii = device_get_softc(dev);
+ LIST_FOREACH(m, &mii->mii_media.ifm_list, ifm_list) {
+ media = m->ifm_media;
+ if (media == (IFM_ETHER | IFM_AUTO))
+ break;
+ }
+
+ ifmedia_set(&mii->mii_media, media);
+}
+
+/*
+ * Helper function used by network interface drivers, attaches the miibus and
+ * the PHYs to the network interface driver parent.
+ */
+int
+mii_attach(device_t dev, device_t *miibus, struct ifnet *ifp,
+ ifm_change_cb_t ifmedia_upd, ifm_stat_cb_t ifmedia_sts, int capmask,
+ int phyloc, int offloc, int flags)
+{
+ struct miibus_ivars *ivars;
+ struct mii_attach_args ma, *args;
+ device_t *children, phy;
+ int bmsr, first, i, nchildren, offset, phymax, phymin, rv;
+
+ if (phyloc != MII_PHY_ANY && offloc != MII_OFFSET_ANY) {
+ printf("%s: phyloc and offloc specified\n", __func__);
+ return (EINVAL);
+ }
+
+ if (offloc != MII_OFFSET_ANY && (offloc < 0 || offloc >= MII_NPHY)) {
+ printf("%s: ivalid offloc %d\n", __func__, offloc);
+ return (EINVAL);
+ }
+
+ if (phyloc == MII_PHY_ANY) {
+ phymin = 0;
+ phymax = MII_NPHY - 1;
+ } else {
+ if (phyloc < 0 || phyloc >= MII_NPHY) {
+ printf("%s: ivalid phyloc %d\n", __func__, phyloc);
+ return (EINVAL);
+ }
+ phymin = phymax = phyloc;
+ }
+
+ first = 0;
+ if (*miibus == NULL) {
+ first = 1;
+ ivars = malloc(sizeof(*ivars), M_DEVBUF, M_NOWAIT);
+ if (ivars == NULL)
+ return (ENOMEM);
+ ivars->ifp = ifp;
+ ivars->ifmedia_upd = ifmedia_upd;
+ ivars->ifmedia_sts = ifmedia_sts;
+ ivars->mii_flags = flags;
+ *miibus = device_add_child(dev, "miibus", -1);
+ if (*miibus == NULL) {
+ rv = ENXIO;
+ goto fail;
+ }
+ device_set_ivars(*miibus, ivars);
+ } else {
+ ivars = device_get_ivars(*miibus);
+ if (ivars->ifp != ifp || ivars->ifmedia_upd != ifmedia_upd ||
+ ivars->ifmedia_sts != ifmedia_sts ||
+ ivars->mii_flags != flags) {
+ printf("%s: non-matching invariant\n", __func__);
+ return (EINVAL);
+ }
+ /*
+ * Assignment of the attach arguments mii_data for the first
+ * pass is done in miibus_attach(), i.e. once the miibus softc
+ * has been allocated.
+ */
+ ma.mii_data = device_get_softc(*miibus);
+ }
+
+ ma.mii_capmask = capmask;
+
+ phy = NULL;
+ offset = 0;
+ for (ma.mii_phyno = phymin; ma.mii_phyno <= phymax; ma.mii_phyno++) {
+ /*
+ * Make sure we haven't already configured a PHY at this
+ * address. This allows mii_attach() to be called
+ * multiple times.
+ */
+ if (device_get_children(*miibus, &children, &nchildren) == 0) {
+ for (i = 0; i < nchildren; i++) {
+ args = device_get_ivars(children[i]);
+ if (args->mii_phyno == ma.mii_phyno) {
+ /*
+ * Yes, there is already something
+ * configured at this address.
+ */
+ free(children, M_TEMP);
+ goto skip;
+ }
+ }
+ free(children, M_TEMP);
+ }
+
+ /*
+ * Check to see if there is a PHY at this address. Note,
+ * many braindead PHYs report 0/0 in their ID registers,
+ * so we test for media in the BMSR.
+ */
+ bmsr = MIIBUS_READREG(dev, ma.mii_phyno, MII_BMSR);
+ if (bmsr == 0 || bmsr == 0xffff ||
+ (bmsr & (BMSR_EXTSTAT | BMSR_MEDIAMASK)) == 0) {
+ /* Assume no PHY at this address. */
+ continue;
+ }
+
+ /*
+ * There is a PHY at this address. If we were given an
+ * `offset' locator, skip this PHY if it doesn't match.
+ */
+ if (offloc != MII_OFFSET_ANY && offloc != offset)
+ goto skip;
+
+ /*
+ * Extract the IDs. Braindead PHYs will be handled by
+ * the `ukphy' driver, as we have no ID information to
+ * match on.
+ */
+ ma.mii_id1 = MIIBUS_READREG(dev, ma.mii_phyno, MII_PHYIDR1);
+ ma.mii_id2 = MIIBUS_READREG(dev, ma.mii_phyno, MII_PHYIDR2);
+
+ args = malloc(sizeof(struct mii_attach_args), M_DEVBUF,
+ M_NOWAIT);
+ if (args == NULL)
+ goto skip;
+ bcopy((char *)&ma, (char *)args, sizeof(ma));
+ phy = device_add_child(*miibus, NULL, -1);
+ if (phy == NULL) {
+ free(args, M_DEVBUF);
+ goto skip;
+ }
+ device_set_ivars(phy, args);
+ skip:
+ offset++;
+ }
+
+ if (first != 0) {
+ if (phy == NULL) {
+ rv = ENXIO;
+ goto fail;
+ }
+ rv = bus_generic_attach(dev);
+ if (rv != 0)
+ goto fail;
+
+ /* Attaching of the PHY drivers is done in miibus_attach(). */
+ return (0);
+ }
+ rv = bus_generic_attach(*miibus);
+ if (rv != 0)
+ goto fail;
+
+ return (0);
+
+ fail:
+ if (*miibus != NULL)
+ device_delete_child(dev, *miibus);
+ free(ivars, M_DEVBUF);
+ if (first != 0)
+ *miibus = NULL;
+ return (rv);
+}
+
+int
+mii_phy_probe(device_t dev, device_t *child, ifm_change_cb_t ifmedia_upd,
+ ifm_stat_cb_t ifmedia_sts)
+{
+ struct ifnet *ifp;
+
+ /*
+ * Note that each NIC's softc must start with an ifnet pointer.
+ * XXX: EVIL HACK!
+ */
+ ifp = *(struct ifnet **)device_get_softc(dev);
+ return (mii_attach(dev, child, ifp, ifmedia_upd, ifmedia_sts,
+ BMSR_DEFCAPMASK, MII_PHY_ANY, MII_OFFSET_ANY, 0));
+}
+
+/*
+ * Media changed; notify all PHYs.
+ */
+int
+mii_mediachg(struct mii_data *mii)
+{
+ struct mii_softc *child;
+ struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
+ int rv;
+
+ mii->mii_media_status = 0;
+ mii->mii_media_active = IFM_NONE;
+
+ LIST_FOREACH(child, &mii->mii_phys, mii_list) {
+ /*
+ * If the media indicates a different PHY instance,
+ * isolate this one.
+ */
+ if (IFM_INST(ife->ifm_media) != child->mii_inst) {
+ if ((child->mii_flags & MIIF_NOISOLATE) != 0) {
+ device_printf(child->mii_dev, "%s: "
+ "can't handle non-zero PHY instance %d\n",
+ __func__, child->mii_inst);
+ continue;
+ }
+ PHY_WRITE(child, MII_BMCR, PHY_READ(child, MII_BMCR) |
+ BMCR_ISO);
+ continue;
+ }
+ rv = (*child->mii_service)(child, mii, MII_MEDIACHG);
+ if (rv)
+ return (rv);
+ }
+ return (0);
+}
+
+/*
+ * Call the PHY tick routines, used during autonegotiation.
+ */
+void
+mii_tick(struct mii_data *mii)
+{
+ struct mii_softc *child;
+ struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
+
+ LIST_FOREACH(child, &mii->mii_phys, mii_list) {
+ /*
+ * If this PHY instance isn't currently selected, just skip
+ * it.
+ */
+ if (IFM_INST(ife->ifm_media) != child->mii_inst)
+ continue;
+ (void)(*child->mii_service)(child, mii, MII_TICK);
+ }
+}
+
+/*
+ * Get media status from PHYs.
+ */
+void
+mii_pollstat(struct mii_data *mii)
+{
+ struct mii_softc *child;
+ struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
+
+ mii->mii_media_status = 0;
+ mii->mii_media_active = IFM_NONE;
+
+ LIST_FOREACH(child, &mii->mii_phys, mii_list) {
+ /*
+ * If we're not polling this PHY instance, just skip it.
+ */
+ if (IFM_INST(ife->ifm_media) != child->mii_inst)
+ continue;
+ (void)(*child->mii_service)(child, mii, MII_POLLSTAT);
+ }
+}
+
+/*
+ * Inform the PHYs that the interface is down.
+ */
+void
+mii_down(struct mii_data *mii)
+{
+ struct mii_softc *child;
+
+ LIST_FOREACH(child, &mii->mii_phys, mii_list)
+ mii_phy_down(child);
+}
diff --git a/rtems/freebsd/dev/mii/mii.h b/rtems/freebsd/dev/mii/mii.h
new file mode 100644
index 00000000..21c6b768
--- /dev/null
+++ b/rtems/freebsd/dev/mii/mii.h
@@ -0,0 +1,212 @@
+/* $NetBSD: mii.h,v 1.9 2001/05/31 03:07:14 thorpej Exp $ */
+
+/*-
+ * Copyright (c) 1997 Manuel Bouyer. All rights reserved.
+ *
+ * Modification to match BSD/OS 3.0 MII interface by Jason R. Thorpe,
+ * Numerical Aerospace Simulation Facility, NASA Ames Research Center.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _DEV_MII_MII_HH_
+#define _DEV_MII_MII_HH_
+
+/*
+ * Registers common to all PHYs.
+ */
+
+#define MII_NPHY 32 /* max # of PHYs per MII */
+
+/*
+ * MII commands, used if a device must drive the MII lines
+ * manually.
+ */
+#define MII_COMMAND_START 0x01
+#define MII_COMMAND_READ 0x02
+#define MII_COMMAND_WRITE 0x01
+#define MII_COMMAND_ACK 0x02
+
+#define MII_BMCR 0x00 /* Basic mode control register (rw) */
+#define BMCR_RESET 0x8000 /* reset */
+#define BMCR_LOOP 0x4000 /* loopback */
+#define BMCR_SPEED0 0x2000 /* speed selection (LSB) */
+#define BMCR_AUTOEN 0x1000 /* autonegotiation enable */
+#define BMCR_PDOWN 0x0800 /* power down */
+#define BMCR_ISO 0x0400 /* isolate */
+#define BMCR_STARTNEG 0x0200 /* restart autonegotiation */
+#define BMCR_FDX 0x0100 /* Set duplex mode */
+#define BMCR_CTEST 0x0080 /* collision test */
+#define BMCR_SPEED1 0x0040 /* speed selection (MSB) */
+
+#define BMCR_S10 0x0000 /* 10 Mb/s */
+#define BMCR_S100 BMCR_SPEED0 /* 100 Mb/s */
+#define BMCR_S1000 BMCR_SPEED1 /* 1000 Mb/s */
+
+#define BMCR_SPEED(x) ((x) & (BMCR_SPEED0|BMCR_SPEED1))
+
+#define MII_BMSR 0x01 /* Basic mode status register (ro) */
+#define BMSR_100T4 0x8000 /* 100 base T4 capable */
+#define BMSR_100TXFDX 0x4000 /* 100 base Tx full duplex capable */
+#define BMSR_100TXHDX 0x2000 /* 100 base Tx half duplex capable */
+#define BMSR_10TFDX 0x1000 /* 10 base T full duplex capable */
+#define BMSR_10THDX 0x0800 /* 10 base T half duplex capable */
+#define BMSR_100T2FDX 0x0400 /* 100 base T2 full duplex capable */
+#define BMSR_100T2HDX 0x0200 /* 100 base T2 half duplex capable */
+#define BMSR_EXTSTAT 0x0100 /* Extended status in register 15 */
+#define BMSR_MFPS 0x0040 /* MII Frame Preamble Suppression */
+#define BMSR_ACOMP 0x0020 /* Autonegotiation complete */
+#define BMSR_RFAULT 0x0010 /* Link partner fault */
+#define BMSR_ANEG 0x0008 /* Autonegotiation capable */
+#define BMSR_LINK 0x0004 /* Link status */
+#define BMSR_JABBER 0x0002 /* Jabber detected */
+#define BMSR_EXTCAP 0x0001 /* Extended capability */
+
+#define BMSR_DEFCAPMASK 0xffffffff
+
+/*
+ * Note that the EXTSTAT bit indicates that there is extended status
+ * info available in register 15, but 802.3 section 22.2.4.3 also
+ * states that that all 1000 Mb/s capable PHYs will set this bit to 1.
+ */
+#if 0
+#define BMSR_MEDIAMASK (BMSR_100T4|BMSR_100TXFDX|BMSR_100TXHDX|BMSR_10TFDX| \
+ BMSR_10THDX|BMSR_ANEG)
+
+#else
+/* NetBSD uses: */
+#define BMSR_MEDIAMASK (BMSR_100T4|BMSR_100TXFDX|BMSR_100TXHDX| \
+ BMSR_10TFDX|BMSR_10THDX|BMSR_100T2FDX|BMSR_100T2HDX)
+#endif
+
+/*
+ * Convert BMSR media capabilities to ANAR bits for autonegotiation.
+ * Note the shift chopps off the BMSR_ANEG bit.
+ */
+#define BMSR_MEDIA_TO_ANAR(x) (((x) & BMSR_MEDIAMASK) >> 6)
+
+#define MII_PHYIDR1 0x02 /* ID register 1 (ro) */
+
+#define MII_PHYIDR2 0x03 /* ID register 2 (ro) */
+#define IDR2_OUILSB 0xfc00 /* OUI LSB */
+#define IDR2_MODEL 0x03f0 /* vendor model */
+#define IDR2_REV 0x000f /* vendor revision */
+
+#define MII_OUI(id1, id2) (((id1) << 6) | ((id2) >> 10))
+#define MII_MODEL(id2) (((id2) & IDR2_MODEL) >> 4)
+#define MII_REV(id2) ((id2) & IDR2_REV)
+
+#define MII_ANAR 0x04 /* Autonegotiation advertisement (rw) */
+ /* section 28.2.4.1 and 37.2.6.1 */
+#define ANAR_NP 0x8000 /* Next page (ro) */
+#define ANAR_ACK 0x4000 /* link partner abilities acknowledged (ro) */
+#define ANAR_RF 0x2000 /* remote fault (ro) */
+#define ANAR_FC 0x0400 /* local device supports PAUSE */
+#define ANAR_T4 0x0200 /* local device supports 100bT4 */
+#define ANAR_TX_FD 0x0100 /* local device supports 100bTx FD */
+#define ANAR_TX 0x0080 /* local device supports 100bTx */
+#define ANAR_10_FD 0x0040 /* local device supports 10bT FD */
+#define ANAR_10 0x0020 /* local device supports 10bT */
+#define ANAR_CSMA 0x0001 /* protocol selector CSMA/CD */
+#define ANAR_PAUSE_NONE (0 << 10)
+#define ANAR_PAUSE_SYM (1 << 10)
+#define ANAR_PAUSE_ASYM (2 << 10)
+#define ANAR_PAUSE_TOWARDS (3 << 10)
+
+#define ANAR_X_FD 0x0020 /* local device supports 1000BASE-X FD */
+#define ANAR_X_HD 0x0040 /* local device supports 1000BASE-X HD */
+#define ANAR_X_PAUSE_NONE (0 << 7)
+#define ANAR_X_PAUSE_SYM (1 << 7)
+#define ANAR_X_PAUSE_ASYM (2 << 7)
+#define ANAR_X_PAUSE_TOWARDS (3 << 7)
+
+#define MII_ANLPAR 0x05 /* Autonegotiation lnk partner abilities (rw) */
+ /* section 28.2.4.1 and 37.2.6.1 */
+#define ANLPAR_NP 0x8000 /* Next page (ro) */
+#define ANLPAR_ACK 0x4000 /* link partner accepted ACK (ro) */
+#define ANLPAR_RF 0x2000 /* remote fault (ro) */
+#define ANLPAR_FC 0x0400 /* link partner supports PAUSE */
+#define ANLPAR_T4 0x0200 /* link partner supports 100bT4 */
+#define ANLPAR_TX_FD 0x0100 /* link partner supports 100bTx FD */
+#define ANLPAR_TX 0x0080 /* link partner supports 100bTx */
+#define ANLPAR_10_FD 0x0040 /* link partner supports 10bT FD */
+#define ANLPAR_10 0x0020 /* link partner supports 10bT */
+#define ANLPAR_CSMA 0x0001 /* protocol selector CSMA/CD */
+#define ANLPAR_PAUSE_MASK (3 << 10)
+#define ANLPAR_PAUSE_NONE (0 << 10)
+#define ANLPAR_PAUSE_SYM (1 << 10)
+#define ANLPAR_PAUSE_ASYM (2 << 10)
+#define ANLPAR_PAUSE_TOWARDS (3 << 10)
+
+#define ANLPAR_X_FD 0x0020 /* local device supports 1000BASE-X FD */
+#define ANLPAR_X_HD 0x0040 /* local device supports 1000BASE-X HD */
+#define ANLPAR_X_PAUSE_MASK (3 << 7)
+#define ANLPAR_X_PAUSE_NONE (0 << 7)
+#define ANLPAR_X_PAUSE_SYM (1 << 7)
+#define ANLPAR_X_PAUSE_ASYM (2 << 7)
+#define ANLPAR_X_PAUSE_TOWARDS (3 << 7)
+
+#define MII_ANER 0x06 /* Autonegotiation expansion (ro) */
+ /* section 28.2.4.1 and 37.2.6.1 */
+#define ANER_MLF 0x0010 /* multiple link detection fault */
+#define ANER_LPNP 0x0008 /* link parter next page-able */
+#define ANER_NP 0x0004 /* next page-able */
+#define ANER_PAGE_RX 0x0002 /* Page received */
+#define ANER_LPAN 0x0001 /* link parter autoneg-able */
+
+#define MII_ANNP 0x07 /* Autonegotiation next page */
+ /* section 28.2.4.1 and 37.2.6.1 */
+
+#define MII_ANLPRNP 0x08 /* Autonegotiation link partner rx next page */
+ /* section 32.5.1 and 37.2.6.1 */
+
+ /* This is also the 1000baseT control register */
+#define MII_100T2CR 0x09 /* 100base-T2 control register */
+#define GTCR_TEST_MASK 0xe000 /* see 802.3ab ss. 40.6.1.1.2 */
+#define GTCR_MAN_MS 0x1000 /* enable manual master/slave control */
+#define GTCR_ADV_MS 0x0800 /* 1 = adv. master, 0 = adv. slave */
+#define GTCR_PORT_TYPE 0x0400 /* 1 = DCE, 0 = DTE (NIC) */
+#define GTCR_ADV_1000TFDX 0x0200 /* adv. 1000baseT FDX */
+#define GTCR_ADV_1000THDX 0x0100 /* adv. 1000baseT HDX */
+
+ /* This is also the 1000baseT status register */
+#define MII_100T2SR 0x0a /* 100base-T2 status register */
+#define GTSR_MAN_MS_FLT 0x8000 /* master/slave config fault */
+#define GTSR_MS_RES 0x4000 /* result: 1 = master, 0 = slave */
+#define GTSR_LRS 0x2000 /* local rx status, 1 = ok */
+#define GTSR_RRS 0x1000 /* remove rx status, 1 = ok */
+#define GTSR_LP_1000TFDX 0x0800 /* link partner 1000baseT FDX capable */
+#define GTSR_LP_1000THDX 0x0400 /* link partner 1000baseT HDX capable */
+#define GTSR_LP_ASM_DIR 0x0200 /* link partner asym. pause dir. capable */
+#define GTSR_IDLE_ERR 0x00ff /* IDLE error count */
+
+#define MII_EXTSR 0x0f /* Extended status register */
+#define EXTSR_1000XFDX 0x8000 /* 1000X full-duplex capable */
+#define EXTSR_1000XHDX 0x4000 /* 1000X half-duplex capable */
+#define EXTSR_1000TFDX 0x2000 /* 1000T full-duplex capable */
+#define EXTSR_1000THDX 0x1000 /* 1000T half-duplex capable */
+
+#define EXTSR_MEDIAMASK (EXTSR_1000XFDX|EXTSR_1000XHDX| \
+ EXTSR_1000TFDX|EXTSR_1000THDX)
+
+#endif /* _DEV_MII_MII_HH_ */
diff --git a/rtems/freebsd/dev/mii/mii_physubr.c b/rtems/freebsd/dev/mii/mii_physubr.c
new file mode 100644
index 00000000..866c6be6
--- /dev/null
+++ b/rtems/freebsd/dev/mii/mii_physubr.c
@@ -0,0 +1,667 @@
+#include <rtems/freebsd/machine/rtems-bsd-config.h>
+
+/* $NetBSD: mii_physubr.c,v 1.5 1999/08/03 19:41:49 drochner Exp $ */
+
+/*-
+ * Copyright (c) 1998, 1999, 2000, 2001 The NetBSD Foundation, Inc.
+ * All rights reserved.
+ *
+ * This code is derived from software contributed to The NetBSD Foundation
+ * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
+ * NASA Ames Research Center.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
+ * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <rtems/freebsd/sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+/*
+ * Subroutines common to all PHYs.
+ */
+
+#include <rtems/freebsd/sys/param.h>
+#include <rtems/freebsd/sys/systm.h>
+#include <rtems/freebsd/sys/kernel.h>
+#include <rtems/freebsd/sys/socket.h>
+#include <rtems/freebsd/sys/errno.h>
+#include <rtems/freebsd/sys/module.h>
+#include <rtems/freebsd/sys/bus.h>
+
+#include <rtems/freebsd/net/if.h>
+#include <rtems/freebsd/net/if_media.h>
+
+#include <rtems/freebsd/dev/mii/mii.h>
+#include <rtems/freebsd/dev/mii/miivar.h>
+
+#include <rtems/freebsd/local/miibus_if.h>
+
+/*
+ * Media to register setting conversion table. Order matters.
+ */
+const struct mii_media mii_media_table[MII_NMEDIA] = {
+ /* None */
+ { BMCR_ISO, ANAR_CSMA,
+ 0, },
+
+ /* 10baseT */
+ { BMCR_S10, ANAR_CSMA|ANAR_10,
+ 0, },
+
+ /* 10baseT-FDX */
+ { BMCR_S10|BMCR_FDX, ANAR_CSMA|ANAR_10_FD,
+ 0, },
+
+ /* 100baseT4 */
+ { BMCR_S100, ANAR_CSMA|ANAR_T4,
+ 0, },
+
+ /* 100baseTX */
+ { BMCR_S100, ANAR_CSMA|ANAR_TX,
+ 0, },
+
+ /* 100baseTX-FDX */
+ { BMCR_S100|BMCR_FDX, ANAR_CSMA|ANAR_TX_FD,
+ 0, },
+
+ /* 1000baseX */
+ { BMCR_S1000, ANAR_CSMA,
+ 0, },
+
+ /* 1000baseX-FDX */
+ { BMCR_S1000|BMCR_FDX, ANAR_CSMA,
+ 0, },
+
+ /* 1000baseT */
+ { BMCR_S1000, ANAR_CSMA,
+ GTCR_ADV_1000THDX },
+
+ /* 1000baseT-FDX */
+ { BMCR_S1000, ANAR_CSMA,
+ GTCR_ADV_1000TFDX },
+};
+
+void
+mii_phy_setmedia(struct mii_softc *sc)
+{
+ struct mii_data *mii = sc->mii_pdata;
+ struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
+ int bmcr, anar, gtcr;
+
+ if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
+ /*
+ * Force renegotiation if MIIF_DOPAUSE or MIIF_FORCEANEG.
+ * The former is necessary as we might switch from flow-
+ * control advertisment being off to on or vice versa.
+ */
+ if ((PHY_READ(sc, MII_BMCR) & BMCR_AUTOEN) == 0 ||
+ (sc->mii_flags & (MIIF_DOPAUSE | MIIF_FORCEANEG)) != 0)
+ (void)mii_phy_auto(sc);
+ return;
+ }
+
+ /*
+ * Table index is stored in the media entry.
+ */
+
+ KASSERT(ife->ifm_data >=0 && ife->ifm_data < MII_NMEDIA,
+ ("invalid ife->ifm_data (0x%x) in mii_phy_setmedia",
+ ife->ifm_data));
+
+ anar = mii_media_table[ife->ifm_data].mm_anar;
+ bmcr = mii_media_table[ife->ifm_data].mm_bmcr;
+ gtcr = mii_media_table[ife->ifm_data].mm_gtcr;
+
+ if (IFM_SUBTYPE(ife->ifm_media) == IFM_1000_T) {
+ gtcr |= GTCR_MAN_MS;
+ if ((ife->ifm_media & IFM_ETH_MASTER) != 0)
+ gtcr |= GTCR_ADV_MS;
+ }
+
+ if ((ife->ifm_media & IFM_GMASK) == (IFM_FDX | IFM_FLOW) ||
+ (sc->mii_flags & MIIF_FORCEPAUSE) != 0) {
+ if ((sc->mii_flags & MIIF_IS_1000X) != 0)
+ anar |= ANAR_X_PAUSE_TOWARDS;
+ else {
+ anar |= ANAR_FC;
+ /* XXX Only 1000BASE-T has PAUSE_ASYM? */
+ if ((sc->mii_flags & MIIF_HAVE_GTCR) != 0 &&
+ (sc->mii_extcapabilities &
+ (EXTSR_1000THDX | EXTSR_1000TFDX)) != 0)
+ anar |= ANAR_X_PAUSE_ASYM;
+ }
+ }
+
+ if ((ife->ifm_media & IFM_LOOP) != 0)
+ bmcr |= BMCR_LOOP;
+
+ PHY_WRITE(sc, MII_ANAR, anar);
+ PHY_WRITE(sc, MII_BMCR, bmcr);
+ if ((sc->mii_flags & MIIF_HAVE_GTCR) != 0)
+ PHY_WRITE(sc, MII_100T2CR, gtcr);
+}
+
+int
+mii_phy_auto(struct mii_softc *sc)
+{
+ struct ifmedia_entry *ife = sc->mii_pdata->mii_media.ifm_cur;
+ int anar, gtcr;
+
+ /*
+ * Check for 1000BASE-X. Autonegotiation is a bit
+ * different on such devices.
+ */
+ if ((sc->mii_flags & MIIF_IS_1000X) != 0) {
+ anar = 0;
+ if ((sc->mii_extcapabilities & EXTSR_1000XFDX) != 0)
+ anar |= ANAR_X_FD;
+ if ((sc->mii_extcapabilities & EXTSR_1000XHDX) != 0)
+ anar |= ANAR_X_HD;
+
+ if ((ife->ifm_media & IFM_FLOW) != 0 ||
+ (sc->mii_flags & MIIF_FORCEPAUSE) != 0)
+ anar |= ANAR_X_PAUSE_TOWARDS;
+ PHY_WRITE(sc, MII_ANAR, anar);
+ } else {
+ anar = BMSR_MEDIA_TO_ANAR(sc->mii_capabilities) |
+ ANAR_CSMA;
+ if ((ife->ifm_media & IFM_FLOW) != 0 ||
+ (sc->mii_flags & MIIF_FORCEPAUSE) != 0) {
+ if ((sc->mii_capabilities & BMSR_100TXFDX) != 0)
+ anar |= ANAR_FC;
+ /* XXX Only 1000BASE-T has PAUSE_ASYM? */
+ if (((sc->mii_flags & MIIF_HAVE_GTCR) != 0) &&
+ (sc->mii_extcapabilities &
+ (EXTSR_1000THDX | EXTSR_1000TFDX)) != 0)
+ anar |= ANAR_X_PAUSE_ASYM;
+ }
+ PHY_WRITE(sc, MII_ANAR, anar);
+ if ((sc->mii_flags & MIIF_HAVE_GTCR) != 0) {
+ gtcr = 0;
+ if ((sc->mii_extcapabilities & EXTSR_1000TFDX) != 0)
+ gtcr |= GTCR_ADV_1000TFDX;
+ if ((sc->mii_extcapabilities & EXTSR_1000THDX) != 0)
+ gtcr |= GTCR_ADV_1000THDX;
+ PHY_WRITE(sc, MII_100T2CR, gtcr);
+ }
+ }
+ PHY_WRITE(sc, MII_BMCR, BMCR_AUTOEN | BMCR_STARTNEG);
+ return (EJUSTRETURN);
+}
+
+int
+mii_phy_tick(struct mii_softc *sc)
+{
+ struct ifmedia_entry *ife = sc->mii_pdata->mii_media.ifm_cur;
+ struct ifnet *ifp = sc->mii_pdata->mii_ifp;
+ int reg;
+
+ /* Just bail now if the interface is down. */
+ if ((ifp->if_flags & IFF_UP) == 0)
+ return (EJUSTRETURN);
+
+ /*
+ * If we're not doing autonegotiation, we don't need to do
+ * any extra work here. However, we need to check the link
+ * status so we can generate an announcement if the status
+ * changes.
+ */
+ if (IFM_SUBTYPE(ife->ifm_media) != IFM_AUTO) {
+ sc->mii_ticks = 0; /* reset autonegotiation timer. */
+ return (0);
+ }
+
+ /* Read the status register twice; BMSR_LINK is latch-low. */
+ reg = PHY_READ(sc, MII_BMSR) | PHY_READ(sc, MII_BMSR);
+ if ((reg & BMSR_LINK) != 0) {
+ sc->mii_ticks = 0; /* reset autonegotiation timer. */
+ /* See above. */
+ return (0);
+ }
+
+ /* Announce link loss right after it happens */
+ if (sc->mii_ticks++ == 0)
+ return (0);
+
+ /* XXX: use default value if phy driver did not set mii_anegticks */
+ if (sc->mii_anegticks == 0)
+ sc->mii_anegticks = MII_ANEGTICKS_GIGE;
+
+ /* Only retry autonegotiation every mii_anegticks ticks. */
+ if (sc->mii_ticks <= sc->mii_anegticks)
+ return (EJUSTRETURN);
+
+ sc->mii_ticks = 0;
+ mii_phy_reset(sc);
+ mii_phy_auto(sc);
+ return (0);
+}
+
+void
+mii_phy_reset(struct mii_softc *sc)
+{
+ struct ifmedia_entry *ife = sc->mii_pdata->mii_media.ifm_cur;
+ int reg, i;
+
+ if ((sc->mii_flags & MIIF_NOISOLATE) != 0)
+ reg = BMCR_RESET;
+ else
+ reg = BMCR_RESET | BMCR_ISO;
+ PHY_WRITE(sc, MII_BMCR, reg);
+
+ /* Wait 100ms for it to complete. */
+ for (i = 0; i < 100; i++) {
+ reg = PHY_READ(sc, MII_BMCR);
+ if ((reg & BMCR_RESET) == 0)
+ break;
+ DELAY(1000);
+ }
+
+ if ((sc->mii_flags & MIIF_NOISOLATE) == 0) {
+ if ((ife == NULL && sc->mii_inst != 0) ||
+ (ife != NULL && IFM_INST(ife->ifm_media) != sc->mii_inst))
+ PHY_WRITE(sc, MII_BMCR, reg | BMCR_ISO);
+ }
+}
+
+void
+mii_phy_down(struct mii_softc *sc)
+{
+
+}
+
+void
+mii_phy_update(struct mii_softc *sc, int cmd)
+{
+ struct mii_data *mii = sc->mii_pdata;
+
+ if (sc->mii_media_active != mii->mii_media_active ||
+ cmd == MII_MEDIACHG) {
+ MIIBUS_STATCHG(sc->mii_dev);
+ sc->mii_media_active = mii->mii_media_active;
+ }
+ if (sc->mii_media_status != mii->mii_media_status) {
+ MIIBUS_LINKCHG(sc->mii_dev);
+ sc->mii_media_status = mii->mii_media_status;
+ }
+}
+
+/*
+ * Given an ifmedia word, return the corresponding ANAR value.
+ */
+int
+mii_anar(int media)
+{
+ int rv;
+
+ switch (media & (IFM_TMASK|IFM_NMASK|IFM_FDX)) {
+ case IFM_ETHER|IFM_10_T:
+ rv = ANAR_10|ANAR_CSMA;
+ break;
+ case IFM_ETHER|IFM_10_T|IFM_FDX:
+ rv = ANAR_10_FD|ANAR_CSMA;
+ break;
+ case IFM_ETHER|IFM_100_TX:
+ rv = ANAR_TX|ANAR_CSMA;
+ break;
+ case IFM_ETHER|IFM_100_TX|IFM_FDX:
+ rv = ANAR_TX_FD|ANAR_CSMA;
+ break;
+ case IFM_ETHER|IFM_100_T4:
+ rv = ANAR_T4|ANAR_CSMA;
+ break;
+ default:
+ rv = 0;
+ break;
+ }
+
+ return (rv);
+}
+
+/*
+ * Initialize generic PHY media based on BMSR, called when a PHY is
+ * attached. We expect to be set up to print a comma-separated list
+ * of media names. Does not print a newline.
+ */
+void
+mii_add_media(struct mii_softc *sc)
+{
+ struct mii_data *mii = sc->mii_pdata;
+ const char *sep = "";
+
+ if ((sc->mii_capabilities & BMSR_MEDIAMASK) == 0) {
+ printf("no media present");
+ return;
+ }
+
+#define ADD(m, c) ifmedia_add(&mii->mii_media, (m), (c), NULL)
+#define PRINT(s) printf("%s%s", sep, s); sep = ", "
+
+ if (sc->mii_capabilities & BMSR_10THDX) {
+ ADD(IFM_MAKEWORD(IFM_ETHER, IFM_10_T, 0, sc->mii_inst), 0);
+ PRINT("10baseT");
+ }
+ if (sc->mii_capabilities & BMSR_10TFDX) {
+ ADD(IFM_MAKEWORD(IFM_ETHER, IFM_10_T, IFM_FDX, sc->mii_inst),
+ BMCR_FDX);
+ PRINT("10baseT-FDX");
+ }
+ if (sc->mii_capabilities & BMSR_100TXHDX) {
+ ADD(IFM_MAKEWORD(IFM_ETHER, IFM_100_TX, 0, sc->mii_inst),
+ BMCR_S100);
+ PRINT("100baseTX");
+ }
+ if (sc->mii_capabilities & BMSR_100TXFDX) {
+ ADD(IFM_MAKEWORD(IFM_ETHER, IFM_100_TX, IFM_FDX, sc->mii_inst),
+ BMCR_S100|BMCR_FDX);
+ PRINT("100baseTX-FDX");
+ }
+ if (sc->mii_capabilities & BMSR_100T4) {
+ /*
+ * XXX How do you enable 100baseT4? I assume we set
+ * XXX BMCR_S100 and then assume the PHYs will take
+ * XXX watever action is necessary to switch themselves
+ * XXX into T4 mode.
+ */
+ ADD(IFM_MAKEWORD(IFM_ETHER, IFM_100_T4, 0, sc->mii_inst),
+ BMCR_S100);
+ PRINT("100baseT4");
+ }
+ if (sc->mii_capabilities & BMSR_ANEG) {
+ ADD(IFM_MAKEWORD(IFM_ETHER, IFM_AUTO, 0, sc->mii_inst),
+ BMCR_AUTOEN);
+ PRINT("auto");
+ }
+
+
+
+#undef ADD
+#undef PRINT
+}
+
+/*
+ * Initialize generic PHY media based on BMSR, called when a PHY is
+ * attached. We expect to be set up to print a comma-separated list
+ * of media names. Does not print a newline.
+ */
+void
+mii_phy_add_media(struct mii_softc *sc)
+{
+ struct mii_data *mii = sc->mii_pdata;
+ const char *sep = "";
+ int fdx = 0;
+
+ if ((sc->mii_capabilities & BMSR_MEDIAMASK) == 0 &&
+ (sc->mii_extcapabilities & EXTSR_MEDIAMASK) == 0) {
+ printf("no media present");
+ return;
+ }
+
+ /*
+ * Set the autonegotiation timer for 10/100 media. Gigabit media is
+ * handled below.
+ */
+ sc->mii_anegticks = MII_ANEGTICKS;
+
+#define ADD(m, c) ifmedia_add(&mii->mii_media, (m), (c), NULL)
+#define PRINT(s) printf("%s%s", sep, s); sep = ", "
+
+ if ((sc->mii_flags & MIIF_NOISOLATE) == 0)
+ ADD(IFM_MAKEWORD(IFM_ETHER, IFM_NONE, 0, sc->mii_inst),
+ MII_MEDIA_NONE);
+
+ /*
+ * There are different interpretations for the bits in
+ * HomePNA PHYs. And there is really only one media type
+ * that is supported.
+ */
+ if ((sc->mii_flags & MIIF_IS_HPNA) != 0) {
+ if ((sc->mii_capabilities & BMSR_10THDX) != 0) {
+ ADD(IFM_MAKEWORD(IFM_ETHER, IFM_HPNA_1, 0,
+ sc->mii_inst), MII_MEDIA_10_T);
+ PRINT("HomePNA1");
+ }
+ return;
+ }
+
+ if ((sc->mii_capabilities & BMSR_10THDX) != 0) {
+ ADD(IFM_MAKEWORD(IFM_ETHER, IFM_10_T, 0, sc->mii_inst),
+ MII_MEDIA_10_T);
+ PRINT("10baseT");
+ }
+ if ((sc->mii_capabilities & BMSR_10TFDX) != 0) {
+ ADD(IFM_MAKEWORD(IFM_ETHER, IFM_10_T, IFM_FDX, sc->mii_inst),
+ MII_MEDIA_10_T_FDX);
+ PRINT("10baseT-FDX");
+ if ((sc->mii_flags & MIIF_DOPAUSE) != 0 &&
+ (sc->mii_flags & MIIF_NOMANPAUSE) == 0) {
+ ADD(IFM_MAKEWORD(IFM_ETHER, IFM_10_T,
+ IFM_FDX | IFM_FLOW, sc->mii_inst),
+ MII_MEDIA_10_T_FDX);
+ PRINT("10baseT-FDX-flow");
+ }
+ fdx = 1;
+ }
+ if ((sc->mii_capabilities & BMSR_100TXHDX) != 0) {
+ ADD(IFM_MAKEWORD(IFM_ETHER, IFM_100_TX, 0, sc->mii_inst),
+ MII_MEDIA_100_TX);
+ PRINT("100baseTX");
+ }
+ if ((sc->mii_capabilities & BMSR_100TXFDX) != 0) {
+ ADD(IFM_MAKEWORD(IFM_ETHER, IFM_100_TX, IFM_FDX, sc->mii_inst),
+ MII_MEDIA_100_TX_FDX);
+ PRINT("100baseTX-FDX");
+ if ((sc->mii_flags & MIIF_DOPAUSE) != 0 &&
+ (sc->mii_flags & MIIF_NOMANPAUSE) == 0) {
+ ADD(IFM_MAKEWORD(IFM_ETHER, IFM_100_TX,
+ IFM_FDX | IFM_FLOW, sc->mii_inst),
+ MII_MEDIA_100_TX_FDX);
+ PRINT("100baseTX-FDX-flow");
+ }
+ fdx = 1;
+ }
+ if ((sc->mii_capabilities & BMSR_100T4) != 0) {
+ ADD(IFM_MAKEWORD(IFM_ETHER, IFM_100_T4, 0, sc->mii_inst),
+ MII_MEDIA_100_T4);
+ PRINT("100baseT4");
+ }
+
+ if ((sc->mii_extcapabilities & EXTSR_MEDIAMASK) != 0) {
+ /*
+ * XXX Right now only handle 1000SX and 1000TX. Need
+ * XXX to handle 1000LX and 1000CX somehow.
+ */
+ if ((sc->mii_extcapabilities & EXTSR_1000XHDX) != 0) {
+ sc->mii_anegticks = MII_ANEGTICKS_GIGE;
+ sc->mii_flags |= MIIF_IS_1000X;
+ ADD(IFM_MAKEWORD(IFM_ETHER, IFM_1000_SX, 0,
+ sc->mii_inst), MII_MEDIA_1000_X);
+ PRINT("1000baseSX");
+ }
+ if ((sc->mii_extcapabilities & EXTSR_1000XFDX) != 0) {
+ sc->mii_anegticks = MII_ANEGTICKS_GIGE;
+ sc->mii_flags |= MIIF_IS_1000X;
+ ADD(IFM_MAKEWORD(IFM_ETHER, IFM_1000_SX, IFM_FDX,
+ sc->mii_inst), MII_MEDIA_1000_X_FDX);
+ PRINT("1000baseSX-FDX");
+ if ((sc->mii_flags & MIIF_DOPAUSE) != 0 &&
+ (sc->mii_flags & MIIF_NOMANPAUSE) == 0) {
+ ADD(IFM_MAKEWORD(IFM_ETHER, IFM_1000_SX,
+ IFM_FDX | IFM_FLOW, sc->mii_inst),
+ MII_MEDIA_1000_X_FDX);
+ PRINT("1000baseSX-FDX-flow");
+ }
+ fdx = 1;
+ }
+
+ /*
+ * 1000baseT media needs to be able to manipulate
+ * master/slave mode.
+ *
+ * All 1000baseT PHYs have a 1000baseT control register.
+ */
+ if ((sc->mii_extcapabilities & EXTSR_1000THDX) != 0) {
+ sc->mii_anegticks = MII_ANEGTICKS_GIGE;
+ sc->mii_flags |= MIIF_HAVE_GTCR;
+ ADD(IFM_MAKEWORD(IFM_ETHER, IFM_1000_T, 0,
+ sc->mii_inst), MII_MEDIA_1000_T);
+ PRINT("1000baseT");
+ ADD(IFM_MAKEWORD(IFM_ETHER, IFM_1000_T,
+ IFM_ETH_MASTER, sc->mii_inst), MII_MEDIA_1000_T);
+ PRINT("1000baseT-master");
+ }
+ if ((sc->mii_extcapabilities & EXTSR_1000TFDX) != 0) {
+ sc->mii_anegticks = MII_ANEGTICKS_GIGE;
+ sc->mii_flags |= MIIF_HAVE_GTCR;
+ ADD(IFM_MAKEWORD(IFM_ETHER, IFM_1000_T, IFM_FDX,
+ sc->mii_inst), MII_MEDIA_1000_T_FDX);
+ PRINT("1000baseT-FDX");
+ ADD(IFM_MAKEWORD(IFM_ETHER, IFM_1000_T,
+ IFM_FDX | IFM_ETH_MASTER, sc->mii_inst),
+ MII_MEDIA_1000_T_FDX);
+ PRINT("1000baseT-FDX-master");
+ if ((sc->mii_flags & MIIF_DOPAUSE) != 0 &&
+ (sc->mii_flags & MIIF_NOMANPAUSE) == 0) {
+ ADD(IFM_MAKEWORD(IFM_ETHER, IFM_1000_T,
+ IFM_FDX | IFM_FLOW, sc->mii_inst),
+ MII_MEDIA_1000_T_FDX);
+ PRINT("1000baseT-FDX-flow");
+ ADD(IFM_MAKEWORD(IFM_ETHER, IFM_1000_T,
+ IFM_FDX | IFM_FLOW | IFM_ETH_MASTER,
+ sc->mii_inst), MII_MEDIA_1000_T_FDX);
+ PRINT("1000baseT-FDX-flow-master");
+ }
+ fdx = 1;
+ }
+ }
+
+ if ((sc->mii_capabilities & BMSR_ANEG) != 0) {
+ /* intentionally invalid index */
+ ADD(IFM_MAKEWORD(IFM_ETHER, IFM_AUTO, 0, sc->mii_inst),
+ MII_NMEDIA);
+ PRINT("auto");
+ if (fdx != 0 && (sc->mii_flags & MIIF_DOPAUSE) != 0) {
+ ADD(IFM_MAKEWORD(IFM_ETHER, IFM_AUTO, IFM_FLOW,
+ sc->mii_inst), MII_NMEDIA);
+ PRINT("auto-flow");
+ }
+ }
+#undef ADD
+#undef PRINT
+}
+
+int
+mii_phy_detach(device_t dev)
+{
+ struct mii_softc *sc;
+
+ sc = device_get_softc(dev);
+ mii_phy_down(sc);
+ sc->mii_dev = NULL;
+ LIST_REMOVE(sc, mii_list);
+ return (0);
+}
+
+const struct mii_phydesc *
+mii_phy_match_gen(const struct mii_attach_args *ma,
+ const struct mii_phydesc *mpd, size_t len)
+{
+
+ for (; mpd->mpd_name != NULL;
+ mpd = (const struct mii_phydesc *)((const char *)mpd + len)) {
+ if (MII_OUI(ma->mii_id1, ma->mii_id2) == mpd->mpd_oui &&
+ MII_MODEL(ma->mii_id2) == mpd->mpd_model)
+ return (mpd);
+ }
+ return (NULL);
+}
+
+const struct mii_phydesc *
+mii_phy_match(const struct mii_attach_args *ma, const struct mii_phydesc *mpd)
+{
+
+ return (mii_phy_match_gen(ma, mpd, sizeof(struct mii_phydesc)));
+}
+
+int
+mii_phy_dev_probe(device_t dev, const struct mii_phydesc *mpd, int mrv)
+{
+
+ mpd = mii_phy_match(device_get_ivars(dev), mpd);
+ if (mpd != NULL) {
+ device_set_desc(dev, mpd->mpd_name);
+ return (mrv);
+ }
+ return (ENXIO);
+}
+
+/*
+ * Return the flow control status flag from MII_ANAR & MII_ANLPAR.
+ */
+u_int
+mii_phy_flowstatus(struct mii_softc *sc)
+{
+ int anar, anlpar;
+
+ if ((sc->mii_flags & MIIF_DOPAUSE) == 0)
+ return (0);
+
+ anar = PHY_READ(sc, MII_ANAR);
+ anlpar = PHY_READ(sc, MII_ANLPAR);
+
+ /*
+ * Check for 1000BASE-X. Autonegotiation is a bit
+ * different on such devices.
+ */
+ if ((sc->mii_flags & MIIF_IS_1000X) != 0) {
+ anar <<= 3;
+ anlpar <<= 3;
+ }
+
+ if ((anar & ANAR_PAUSE_SYM) != 0 && (anlpar & ANLPAR_PAUSE_SYM) != 0)
+ return (IFM_FLOW | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE);
+
+ if ((anar & ANAR_PAUSE_SYM) == 0) {
+ if ((anar & ANAR_PAUSE_ASYM) != 0 &&
+ (anlpar & ANLPAR_PAUSE_TOWARDS) != 0)
+ return (IFM_FLOW | IFM_ETH_TXPAUSE);
+ else
+ return (0);
+ }
+
+ if ((anar & ANAR_PAUSE_ASYM) == 0) {
+ if ((anlpar & ANLPAR_PAUSE_SYM) != 0)
+ return (IFM_FLOW | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE);
+ else
+ return (0);
+ }
+
+ switch ((anlpar & ANLPAR_PAUSE_TOWARDS)) {
+ case ANLPAR_PAUSE_NONE:
+ return (0);
+ case ANLPAR_PAUSE_ASYM:
+ return (IFM_FLOW | IFM_ETH_RXPAUSE);
+ default:
+ return (IFM_FLOW | IFM_ETH_RXPAUSE | IFM_ETH_TXPAUSE);
+ }
+ /* NOTREACHED */
+}
diff --git a/rtems/freebsd/dev/mii/miivar.h b/rtems/freebsd/dev/mii/miivar.h
new file mode 100644
index 00000000..d523ebb2
--- /dev/null
+++ b/rtems/freebsd/dev/mii/miivar.h
@@ -0,0 +1,260 @@
+/* $NetBSD: miivar.h,v 1.8 1999/04/23 04:24:32 thorpej Exp $ */
+
+/*-
+ * Copyright (c) 1998, 1999 The NetBSD Foundation, Inc.
+ * All rights reserved.
+ *
+ * This code is derived from software contributed to The NetBSD Foundation
+ * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
+ * NASA Ames Research Center.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
+ * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _DEV_MII_MIIVAR_HH_
+#define _DEV_MII_MIIVAR_HH_
+
+#include <rtems/freebsd/sys/queue.h>
+
+/*
+ * Media Independent Interface configuration defintions.
+ */
+
+struct mii_softc;
+
+/*
+ * Callbacks from MII layer into network interface device driver.
+ */
+typedef int (*mii_readreg_t)(struct device *, int, int);
+typedef void (*mii_writereg_t)(struct device *, int, int, int);
+typedef void (*mii_statchg_t)(struct device *);
+
+/*
+ * A network interface driver has one of these structures in its softc.
+ * It is the interface from the network interface driver to the MII
+ * layer.
+ */
+struct mii_data {
+ struct ifmedia mii_media; /* media information */
+ struct ifnet *mii_ifp; /* pointer back to network interface */
+
+ /*
+ * For network interfaces with multiple PHYs, a list of all
+ * PHYs is required so they can all be notified when a media
+ * request is made.
+ */
+ LIST_HEAD(mii_listhead, mii_softc) mii_phys;
+ int mii_instance;
+
+ /*
+ * PHY driver fills this in with active media status.
+ */
+ int mii_media_status;
+ int mii_media_active;
+
+ /*
+ * Calls from MII layer into network interface driver.
+ */
+ mii_readreg_t mii_readreg;
+ mii_writereg_t mii_writereg;
+ mii_statchg_t mii_statchg;
+};
+typedef struct mii_data mii_data_t;
+
+/*
+ * This call is used by the MII layer to call into the PHY driver
+ * to perform a `service request'.
+ */
+typedef int (*mii_downcall_t)(struct mii_softc *, struct mii_data *, int);
+
+/*
+ * Requests that can be made to the downcall.
+ */
+#define MII_TICK 1 /* once-per-second tick */
+#define MII_MEDIACHG 2 /* user changed media; perform the switch */
+#define MII_POLLSTAT 3 /* user requested media status; fill it in */
+
+/*
+ * Each PHY driver's softc has one of these as the first member.
+ * XXX This would be better named "phy_softc", but this is the name
+ * XXX BSDI used, and we would like to have the same interface.
+ */
+struct mii_softc {
+ device_t mii_dev; /* generic device glue */
+
+ LIST_ENTRY(mii_softc) mii_list; /* entry on parent's PHY list */
+
+ int mii_phy; /* our MII address */
+ int mii_inst; /* instance for ifmedia */
+
+ mii_downcall_t mii_service; /* our downcall */
+ struct mii_data *mii_pdata; /* pointer to parent's mii_data */
+
+ int mii_flags; /* misc. flags; see below */
+ int mii_capabilities; /* capabilities from BMSR */
+ int mii_extcapabilities; /* extended capabilities */
+ int mii_ticks; /* MII_TICK counter */
+ int mii_anegticks; /* ticks before retrying aneg */
+ int mii_media_active; /* last active media */
+ int mii_media_status; /* last active status */
+};
+typedef struct mii_softc mii_softc_t;
+
+/* mii_flags */
+#define MIIF_INITDONE 0x00000001 /* has been initialized (mii_data) */
+#define MIIF_NOISOLATE 0x00000002 /* do not isolate the PHY */
+#define MIIF_NOLOOP 0x00000004 /* no loopback capability */
+#define MIIF_DOINGAUTO 0x00000008 /* doing autonegotiation (mii_softc) */
+#define MIIF_AUTOTSLEEP 0x00000010 /* use tsleep(), not callout() */
+#define MIIF_HAVEFIBER 0x00000020 /* from parent: has fiber interface */
+#define MIIF_HAVE_GTCR 0x00000040 /* has 100base-T2/1000base-T CR */
+#define MIIF_IS_1000X 0x00000080 /* is a 1000BASE-X device */
+#define MIIF_DOPAUSE 0x00000100 /* advertise PAUSE capability */
+#define MIIF_IS_HPNA 0x00000200 /* is a HomePNA device */
+#define MIIF_FORCEANEG 0x00000400 /* force auto-negotiation */
+#define MIIF_NOMANPAUSE 0x00100000 /* no manual PAUSE selection */
+#define MIIF_FORCEPAUSE 0x00200000 /* force PAUSE advertisment */
+#define MIIF_MACPRIV0 0x01000000 /* private to the MAC driver */
+#define MIIF_MACPRIV1 0x02000000 /* private to the MAC driver */
+#define MIIF_MACPRIV2 0x04000000 /* private to the MAC driver */
+#define MIIF_PHYPRIV0 0x10000000 /* private to the PHY driver */
+#define MIIF_PHYPRIV1 0x20000000 /* private to the PHY driver */
+#define MIIF_PHYPRIV2 0x40000000 /* private to the PHY driver */
+
+/* Default mii_anegticks values */
+#define MII_ANEGTICKS 5
+#define MII_ANEGTICKS_GIGE 17
+
+#define MIIF_INHERIT_MASK (MIIF_NOISOLATE|MIIF_NOLOOP|MIIF_AUTOTSLEEP)
+
+/*
+ * Special `locators' passed to mii_attach(). If one of these is not
+ * an `any' value, we look for *that* PHY and configure it. If both
+ * are not `any', that is an error, and mii_attach() will fail.
+ */
+#define MII_OFFSET_ANY -1
+#define MII_PHY_ANY -1
+
+/*
+ * Used to attach a PHY to a parent.
+ */
+struct mii_attach_args {
+ struct mii_data *mii_data; /* pointer to parent data */
+ int mii_phyno; /* MII address */
+ int mii_id1; /* PHY ID register 1 */
+ int mii_id2; /* PHY ID register 2 */
+ int mii_capmask; /* capability mask from BMSR */
+};
+typedef struct mii_attach_args mii_attach_args_t;
+
+/*
+ * Used to match a PHY.
+ */
+struct mii_phydesc {
+ u_int32_t mpd_oui; /* the PHY's OUI */
+ u_int32_t mpd_model; /* the PHY's model */
+ const char *mpd_name; /* the PHY's name */
+};
+#define MII_PHY_DESC(a, b) { MII_OUI_ ## a, MII_MODEL_ ## a ## _ ## b, \
+ MII_STR_ ## a ## _ ## b }
+#define MII_PHY_END { 0, 0, NULL }
+
+/*
+ * An array of these structures map MII media types to BMCR/ANAR settings.
+ */
+struct mii_media {
+ int mm_bmcr; /* BMCR settings for this media */
+ int mm_anar; /* ANAR settings for this media */
+ int mm_gtcr; /* 100base-T2 or 1000base-T CR */
+};
+
+#define MII_MEDIA_NONE 0
+#define MII_MEDIA_10_T 1
+#define MII_MEDIA_10_T_FDX 2
+#define MII_MEDIA_100_T4 3
+#define MII_MEDIA_100_TX 4
+#define MII_MEDIA_100_TX_FDX 5
+#define MII_MEDIA_1000_X 6
+#define MII_MEDIA_1000_X_FDX 7
+#define MII_MEDIA_1000_T 8
+#define MII_MEDIA_1000_T_FDX 9
+#define MII_NMEDIA 10
+
+#ifdef _KERNEL
+
+#define PHY_READ(p, r) \
+ MIIBUS_READREG((p)->mii_dev, (p)->mii_phy, (r))
+
+#define PHY_WRITE(p, r, v) \
+ MIIBUS_WRITEREG((p)->mii_dev, (p)->mii_phy, (r), (v))
+
+enum miibus_device_ivars {
+ MIIBUS_IVAR_FLAGS
+};
+
+/*
+ * Simplified accessors for miibus
+ */
+#define MIIBUS_ACCESSOR(var, ivar, type) \
+ __BUS_ACCESSOR(miibus, var, MIIBUS, ivar, type)
+
+MIIBUS_ACCESSOR(flags, FLAGS, int)
+
+extern devclass_t miibus_devclass;
+extern driver_t miibus_driver;
+
+int miibus_probe(device_t);
+int miibus_attach(device_t);
+int miibus_detach(device_t);
+
+int mii_attach(device_t, device_t *, struct ifnet *, ifm_change_cb_t,
+ ifm_stat_cb_t, int, int, int, int);
+int mii_anar(int);
+void mii_down(struct mii_data *);
+int mii_mediachg(struct mii_data *);
+void mii_tick(struct mii_data *);
+void mii_pollstat(struct mii_data *);
+int mii_phy_probe(device_t, device_t *, ifm_change_cb_t, ifm_stat_cb_t);
+void mii_add_media(struct mii_softc *);
+void mii_phy_add_media(struct mii_softc *);
+
+int mii_phy_auto(struct mii_softc *);
+int mii_phy_detach(device_t dev);
+void mii_phy_down(struct mii_softc *);
+u_int mii_phy_flowstatus(struct mii_softc *);
+void mii_phy_reset(struct mii_softc *);
+void mii_phy_setmedia(struct mii_softc *sc);
+void mii_phy_update(struct mii_softc *, int);
+int mii_phy_tick(struct mii_softc *);
+
+const struct mii_phydesc * mii_phy_match(const struct mii_attach_args *ma,
+ const struct mii_phydesc *mpd);
+const struct mii_phydesc * mii_phy_match_gen(const struct mii_attach_args *ma,
+ const struct mii_phydesc *mpd, size_t endlen);
+int mii_phy_dev_probe(device_t dev, const struct mii_phydesc *mpd, int mrv);
+
+void ukphy_status(struct mii_softc *);
+#endif /* _KERNEL */
+
+#endif /* _DEV_MII_MIIVAR_HH_ */
diff --git a/rtems/freebsd/dev/pci/pcireg.h b/rtems/freebsd/dev/pci/pcireg.h
new file mode 100644
index 00000000..936ffd88
--- /dev/null
+++ b/rtems/freebsd/dev/pci/pcireg.h
@@ -0,0 +1 @@
+/* EMPTY */
diff --git a/rtems/freebsd/dev/pci/pcivar.h b/rtems/freebsd/dev/pci/pcivar.h
new file mode 100644
index 00000000..936ffd88
--- /dev/null
+++ b/rtems/freebsd/dev/pci/pcivar.h
@@ -0,0 +1 @@
+/* EMPTY */
diff --git a/rtems/freebsd/dev/usb/controller/ehci.c b/rtems/freebsd/dev/usb/controller/ehci.c
new file mode 100644
index 00000000..1762dbfa
--- /dev/null
+++ b/rtems/freebsd/dev/usb/controller/ehci.c
@@ -0,0 +1,3939 @@
+#include <rtems/freebsd/machine/rtems-bsd-config.h>
+
+/*-
+ * Copyright (c) 2008 Hans Petter Selasky. All rights reserved.
+ * Copyright (c) 2004 The NetBSD Foundation, Inc. All rights reserved.
+ * Copyright (c) 2004 Lennart Augustsson. All rights reserved.
+ * Copyright (c) 2004 Charles M. Hannum. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+/*
+ * USB Enhanced Host Controller Driver, a.k.a. USB 2.0 controller.
+ *
+ * The EHCI 0.96 spec can be found at
+ * http://developer.intel.com/technology/usb/download/ehci-r096.pdf
+ * The EHCI 1.0 spec can be found at
+ * http://developer.intel.com/technology/usb/download/ehci-r10.pdf
+ * and the USB 2.0 spec at
+ * http://www.usb.org/developers/docs/usb_20.zip
+ *
+ */
+
+/*
+ * TODO:
+ * 1) command failures are not recovered correctly
+ */
+
+#include <rtems/freebsd/sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <rtems/freebsd/sys/stdint.h>
+#include <rtems/freebsd/sys/stddef.h>
+#include <rtems/freebsd/sys/param.h>
+#include <rtems/freebsd/sys/queue.h>
+#include <rtems/freebsd/sys/types.h>
+#include <rtems/freebsd/sys/systm.h>
+#include <rtems/freebsd/sys/kernel.h>
+#include <rtems/freebsd/sys/bus.h>
+#include <rtems/freebsd/sys/linker_set.h>
+#include <rtems/freebsd/sys/module.h>
+#include <rtems/freebsd/sys/lock.h>
+#include <rtems/freebsd/sys/mutex.h>
+#include <rtems/freebsd/sys/condvar.h>
+#include <rtems/freebsd/sys/sysctl.h>
+#include <rtems/freebsd/sys/sx.h>
+#include <rtems/freebsd/sys/unistd.h>
+#include <rtems/freebsd/sys/callout.h>
+#include <rtems/freebsd/sys/malloc.h>
+#include <rtems/freebsd/sys/priv.h>
+
+#include <rtems/freebsd/dev/usb/usb.h>
+#include <rtems/freebsd/dev/usb/usbdi.h>
+
+#define USB_DEBUG_VAR ehcidebug
+
+#include <rtems/freebsd/dev/usb/usb_core.h>
+#include <rtems/freebsd/dev/usb/usb_debug.h>
+#include <rtems/freebsd/dev/usb/usb_busdma.h>
+#include <rtems/freebsd/dev/usb/usb_process.h>
+#include <rtems/freebsd/dev/usb/usb_transfer.h>
+#include <rtems/freebsd/dev/usb/usb_device.h>
+#include <rtems/freebsd/dev/usb/usb_hub.h>
+#include <rtems/freebsd/dev/usb/usb_util.h>
+
+#include <rtems/freebsd/dev/usb/usb_controller.h>
+#include <rtems/freebsd/dev/usb/usb_bus.h>
+#include <rtems/freebsd/dev/usb/controller/ehci.h>
+#include <rtems/freebsd/dev/usb/controller/ehcireg.h>
+
+#define EHCI_BUS2SC(bus) \
+ ((ehci_softc_t *)(((uint8_t *)(bus)) - \
+ ((uint8_t *)&(((ehci_softc_t *)0)->sc_bus))))
+
+#ifdef USB_DEBUG
+static int ehcidebug = 0;
+static int ehcinohighspeed = 0;
+static int ehciiaadbug = 0;
+static int ehcilostintrbug = 0;
+
+SYSCTL_NODE(_hw_usb, OID_AUTO, ehci, CTLFLAG_RW, 0, "USB ehci");
+SYSCTL_INT(_hw_usb_ehci, OID_AUTO, debug, CTLFLAG_RW,
+ &ehcidebug, 0, "Debug level");
+SYSCTL_INT(_hw_usb_ehci, OID_AUTO, no_hs, CTLFLAG_RW,
+ &ehcinohighspeed, 0, "Disable High Speed USB");
+SYSCTL_INT(_hw_usb_ehci, OID_AUTO, iaadbug, CTLFLAG_RW,
+ &ehciiaadbug, 0, "Enable doorbell bug workaround");
+SYSCTL_INT(_hw_usb_ehci, OID_AUTO, lostintrbug, CTLFLAG_RW,
+ &ehcilostintrbug, 0, "Enable lost interrupt bug workaround");
+
+TUNABLE_INT("hw.usb.ehci.debug", &ehcidebug);
+TUNABLE_INT("hw.usb.ehci.no_hs", &ehcinohighspeed);
+TUNABLE_INT("hw.usb.ehci.iaadbug", &ehciiaadbug);
+TUNABLE_INT("hw.usb.ehci.lostintrbug", &ehcilostintrbug);
+
+static void ehci_dump_regs(ehci_softc_t *sc);
+static void ehci_dump_sqh(ehci_softc_t *sc, ehci_qh_t *sqh);
+
+#endif
+
+#define EHCI_INTR_ENDPT 1
+
+extern struct usb_bus_methods ehci_bus_methods;
+extern struct usb_pipe_methods ehci_device_bulk_methods;
+extern struct usb_pipe_methods ehci_device_ctrl_methods;
+extern struct usb_pipe_methods ehci_device_intr_methods;
+extern struct usb_pipe_methods ehci_device_isoc_fs_methods;
+extern struct usb_pipe_methods ehci_device_isoc_hs_methods;
+
+static void ehci_do_poll(struct usb_bus *);
+static void ehci_device_done(struct usb_xfer *, usb_error_t);
+static uint8_t ehci_check_transfer(struct usb_xfer *);
+static void ehci_timeout(void *);
+static void ehci_poll_timeout(void *);
+
+static void ehci_root_intr(ehci_softc_t *sc);
+
+struct ehci_std_temp {
+ ehci_softc_t *sc;
+ struct usb_page_cache *pc;
+ ehci_qtd_t *td;
+ ehci_qtd_t *td_next;
+ uint32_t average;
+ uint32_t qtd_status;
+ uint32_t len;
+ uint16_t max_frame_size;
+ uint8_t shortpkt;
+ uint8_t auto_data_toggle;
+ uint8_t setup_alt_next;
+ uint8_t last_frame;
+};
+
+void
+ehci_iterate_hw_softc(struct usb_bus *bus, usb_bus_mem_sub_cb_t *cb)
+{
+ ehci_softc_t *sc = EHCI_BUS2SC(bus);
+ uint32_t i;
+
+ cb(bus, &sc->sc_hw.pframes_pc, &sc->sc_hw.pframes_pg,
+ sizeof(uint32_t) * EHCI_FRAMELIST_COUNT, EHCI_FRAMELIST_ALIGN);
+
+ cb(bus, &sc->sc_hw.terminate_pc, &sc->sc_hw.terminate_pg,
+ sizeof(struct ehci_qh_sub), EHCI_QH_ALIGN);
+
+ cb(bus, &sc->sc_hw.async_start_pc, &sc->sc_hw.async_start_pg,
+ sizeof(ehci_qh_t), EHCI_QH_ALIGN);
+
+ for (i = 0; i != EHCI_VIRTUAL_FRAMELIST_COUNT; i++) {
+ cb(bus, sc->sc_hw.intr_start_pc + i,
+ sc->sc_hw.intr_start_pg + i,
+ sizeof(ehci_qh_t), EHCI_QH_ALIGN);
+ }
+
+ for (i = 0; i != EHCI_VIRTUAL_FRAMELIST_COUNT; i++) {
+ cb(bus, sc->sc_hw.isoc_hs_start_pc + i,
+ sc->sc_hw.isoc_hs_start_pg + i,
+ sizeof(ehci_itd_t), EHCI_ITD_ALIGN);
+ }
+
+ for (i = 0; i != EHCI_VIRTUAL_FRAMELIST_COUNT; i++) {
+ cb(bus, sc->sc_hw.isoc_fs_start_pc + i,
+ sc->sc_hw.isoc_fs_start_pg + i,
+ sizeof(ehci_sitd_t), EHCI_SITD_ALIGN);
+ }
+}
+
+usb_error_t
+ehci_reset(ehci_softc_t *sc)
+{
+ uint32_t hcr;
+ int i;
+
+ EOWRITE4(sc, EHCI_USBCMD, EHCI_CMD_HCRESET);
+ for (i = 0; i < 100; i++) {
+ usb_pause_mtx(NULL, hz / 1000);
+ hcr = EOREAD4(sc, EHCI_USBCMD) & EHCI_CMD_HCRESET;
+ if (!hcr) {
+ if (sc->sc_flags & (EHCI_SCFLG_SETMODE | EHCI_SCFLG_BIGEMMIO)) {
+ /*
+ * Force USBMODE as requested. Controllers
+ * may have multiple operating modes.
+ */
+ uint32_t usbmode = EOREAD4(sc, EHCI_USBMODE);
+ if (sc->sc_flags & EHCI_SCFLG_SETMODE) {
+ usbmode = (usbmode &~ EHCI_UM_CM) | EHCI_UM_CM_HOST;
+ device_printf(sc->sc_bus.bdev,
+ "set host controller mode\n");
+ }
+ if (sc->sc_flags & EHCI_SCFLG_BIGEMMIO) {
+ usbmode = (usbmode &~ EHCI_UM_ES) | EHCI_UM_ES_BE;
+ device_printf(sc->sc_bus.bdev,
+ "set big-endian mode\n");
+ }
+ EOWRITE4(sc, EHCI_USBMODE, usbmode);
+ }
+ return (0);
+ }
+ }
+ device_printf(sc->sc_bus.bdev, "reset timeout\n");
+ return (USB_ERR_IOERROR);
+}
+
+static usb_error_t
+ehci_hcreset(ehci_softc_t *sc)
+{
+ uint32_t hcr;
+ int i;
+
+ EOWRITE4(sc, EHCI_USBCMD, 0); /* Halt controller */
+ for (i = 0; i < 100; i++) {
+ usb_pause_mtx(NULL, hz / 1000);
+ hcr = EOREAD4(sc, EHCI_USBSTS) & EHCI_STS_HCH;
+ if (hcr)
+ break;
+ }
+ if (!hcr)
+ /*
+ * Fall through and try reset anyway even though
+ * Table 2-9 in the EHCI spec says this will result
+ * in undefined behavior.
+ */
+ device_printf(sc->sc_bus.bdev, "stop timeout\n");
+
+ return ehci_reset(sc);
+}
+
+usb_error_t
+ehci_init(ehci_softc_t *sc)
+{
+ struct usb_page_search buf_res;
+ uint32_t version;
+ uint32_t sparams;
+ uint32_t cparams;
+ uint32_t hcr;
+ uint16_t i;
+ uint16_t x;
+ uint16_t y;
+ uint16_t bit;
+ usb_error_t err = 0;
+
+ DPRINTF("start\n");
+
+ usb_callout_init_mtx(&sc->sc_tmo_pcd, &sc->sc_bus.bus_mtx, 0);
+ usb_callout_init_mtx(&sc->sc_tmo_poll, &sc->sc_bus.bus_mtx, 0);
+
+#ifdef USB_DEBUG
+ if (ehciiaadbug)
+ sc->sc_flags |= EHCI_SCFLG_IAADBUG;
+ if (ehcilostintrbug)
+ sc->sc_flags |= EHCI_SCFLG_LOSTINTRBUG;
+ if (ehcidebug > 2) {
+ ehci_dump_regs(sc);
+ }
+#endif
+
+ sc->sc_offs = EHCI_CAPLENGTH(EREAD4(sc, EHCI_CAPLEN_HCIVERSION));
+
+ version = EHCI_HCIVERSION(EREAD4(sc, EHCI_CAPLEN_HCIVERSION));
+ device_printf(sc->sc_bus.bdev, "EHCI version %x.%x\n",
+ version >> 8, version & 0xff);
+
+ sparams = EREAD4(sc, EHCI_HCSPARAMS);
+ DPRINTF("sparams=0x%x\n", sparams);
+
+ sc->sc_noport = EHCI_HCS_N_PORTS(sparams);
+ cparams = EREAD4(sc, EHCI_HCCPARAMS);
+ DPRINTF("cparams=0x%x\n", cparams);
+
+ if (EHCI_HCC_64BIT(cparams)) {
+ DPRINTF("HCC uses 64-bit structures\n");
+
+ /* MUST clear segment register if 64 bit capable */
+ EWRITE4(sc, EHCI_CTRLDSSEGMENT, 0);
+ }
+ sc->sc_bus.usbrev = USB_REV_2_0;
+
+ /* Reset the controller */
+ DPRINTF("%s: resetting\n", device_get_nameunit(sc->sc_bus.bdev));
+
+ err = ehci_hcreset(sc);
+ if (err) {
+ device_printf(sc->sc_bus.bdev, "reset timeout\n");
+ return (err);
+ }
+ /*
+ * use current frame-list-size selection 0: 1024*4 bytes 1: 512*4
+ * bytes 2: 256*4 bytes 3: unknown
+ */
+ if (EHCI_CMD_FLS(EOREAD4(sc, EHCI_USBCMD)) == 3) {
+ device_printf(sc->sc_bus.bdev, "invalid frame-list-size\n");
+ return (USB_ERR_IOERROR);
+ }
+ /* set up the bus struct */
+ sc->sc_bus.methods = &ehci_bus_methods;
+
+ sc->sc_eintrs = EHCI_NORMAL_INTRS;
+
+ if (1) {
+ struct ehci_qh_sub *qh;
+
+ usbd_get_page(&sc->sc_hw.terminate_pc, 0, &buf_res);
+
+ qh = buf_res.buffer;
+
+ sc->sc_terminate_self = htohc32(sc, buf_res.physaddr);
+
+ /* init terminate TD */
+ qh->qtd_next =
+ htohc32(sc, EHCI_LINK_TERMINATE);
+ qh->qtd_altnext =
+ htohc32(sc, EHCI_LINK_TERMINATE);
+ qh->qtd_status =
+ htohc32(sc, EHCI_QTD_HALTED);
+ }
+
+ for (i = 0; i < EHCI_VIRTUAL_FRAMELIST_COUNT; i++) {
+ ehci_qh_t *qh;
+
+ usbd_get_page(sc->sc_hw.intr_start_pc + i, 0, &buf_res);
+
+ qh = buf_res.buffer;
+
+ /* initialize page cache pointer */
+
+ qh->page_cache = sc->sc_hw.intr_start_pc + i;
+
+ /* store a pointer to queue head */
+
+ sc->sc_intr_p_last[i] = qh;
+
+ qh->qh_self =
+ htohc32(sc, buf_res.physaddr) |
+ htohc32(sc, EHCI_LINK_QH);
+
+ qh->qh_endp =
+ htohc32(sc, EHCI_QH_SET_EPS(EHCI_QH_SPEED_HIGH));
+ qh->qh_endphub =
+ htohc32(sc, EHCI_QH_SET_MULT(1));
+ qh->qh_curqtd = 0;
+
+ qh->qh_qtd.qtd_next =
+ htohc32(sc, EHCI_LINK_TERMINATE);
+ qh->qh_qtd.qtd_altnext =
+ htohc32(sc, EHCI_LINK_TERMINATE);
+ qh->qh_qtd.qtd_status =
+ htohc32(sc, EHCI_QTD_HALTED);
+ }
+
+ /*
+ * the QHs are arranged to give poll intervals that are
+ * powers of 2 times 1ms
+ */
+ bit = EHCI_VIRTUAL_FRAMELIST_COUNT / 2;
+ while (bit) {
+ x = bit;
+ while (x & bit) {
+ ehci_qh_t *qh_x;
+ ehci_qh_t *qh_y;
+
+ y = (x ^ bit) | (bit / 2);
+
+ qh_x = sc->sc_intr_p_last[x];
+ qh_y = sc->sc_intr_p_last[y];
+
+ /*
+ * the next QH has half the poll interval
+ */
+ qh_x->qh_link = qh_y->qh_self;
+
+ x++;
+ }
+ bit >>= 1;
+ }
+
+ if (1) {
+ ehci_qh_t *qh;
+
+ qh = sc->sc_intr_p_last[0];
+
+ /* the last (1ms) QH terminates */
+ qh->qh_link = htohc32(sc, EHCI_LINK_TERMINATE);
+ }
+ for (i = 0; i < EHCI_VIRTUAL_FRAMELIST_COUNT; i++) {
+ ehci_sitd_t *sitd;
+ ehci_itd_t *itd;
+
+ usbd_get_page(sc->sc_hw.isoc_fs_start_pc + i, 0, &buf_res);
+
+ sitd = buf_res.buffer;
+
+ /* initialize page cache pointer */
+
+ sitd->page_cache = sc->sc_hw.isoc_fs_start_pc + i;
+
+ /* store a pointer to the transfer descriptor */
+
+ sc->sc_isoc_fs_p_last[i] = sitd;
+
+ /* initialize full speed isochronous */
+
+ sitd->sitd_self =
+ htohc32(sc, buf_res.physaddr) |
+ htohc32(sc, EHCI_LINK_SITD);
+
+ sitd->sitd_back =
+ htohc32(sc, EHCI_LINK_TERMINATE);
+
+ sitd->sitd_next =
+ sc->sc_intr_p_last[i | (EHCI_VIRTUAL_FRAMELIST_COUNT / 2)]->qh_self;
+
+
+ usbd_get_page(sc->sc_hw.isoc_hs_start_pc + i, 0, &buf_res);
+
+ itd = buf_res.buffer;
+
+ /* initialize page cache pointer */
+
+ itd->page_cache = sc->sc_hw.isoc_hs_start_pc + i;
+
+ /* store a pointer to the transfer descriptor */
+
+ sc->sc_isoc_hs_p_last[i] = itd;
+
+ /* initialize high speed isochronous */
+
+ itd->itd_self =
+ htohc32(sc, buf_res.physaddr) |
+ htohc32(sc, EHCI_LINK_ITD);
+
+ itd->itd_next =
+ sitd->sitd_self;
+ }
+
+ usbd_get_page(&sc->sc_hw.pframes_pc, 0, &buf_res);
+
+ if (1) {
+ uint32_t *pframes;
+
+ pframes = buf_res.buffer;
+
+ /*
+ * execution order:
+ * pframes -> high speed isochronous ->
+ * full speed isochronous -> interrupt QH's
+ */
+ for (i = 0; i < EHCI_FRAMELIST_COUNT; i++) {
+ pframes[i] = sc->sc_isoc_hs_p_last
+ [i & (EHCI_VIRTUAL_FRAMELIST_COUNT - 1)]->itd_self;
+ }
+ }
+ /* setup sync list pointer */
+ EOWRITE4(sc, EHCI_PERIODICLISTBASE, buf_res.physaddr);
+
+ usbd_get_page(&sc->sc_hw.async_start_pc, 0, &buf_res);
+
+ if (1) {
+
+ ehci_qh_t *qh;
+
+ qh = buf_res.buffer;
+
+ /* initialize page cache pointer */
+
+ qh->page_cache = &sc->sc_hw.async_start_pc;
+
+ /* store a pointer to the queue head */
+
+ sc->sc_async_p_last = qh;
+
+ /* init dummy QH that starts the async list */
+
+ qh->qh_self =
+ htohc32(sc, buf_res.physaddr) |
+ htohc32(sc, EHCI_LINK_QH);
+
+ /* fill the QH */
+ qh->qh_endp =
+ htohc32(sc, EHCI_QH_SET_EPS(EHCI_QH_SPEED_HIGH) | EHCI_QH_HRECL);
+ qh->qh_endphub = htohc32(sc, EHCI_QH_SET_MULT(1));
+ qh->qh_link = qh->qh_self;
+ qh->qh_curqtd = 0;
+
+ /* fill the overlay qTD */
+ qh->qh_qtd.qtd_next = htohc32(sc, EHCI_LINK_TERMINATE);
+ qh->qh_qtd.qtd_altnext = htohc32(sc, EHCI_LINK_TERMINATE);
+ qh->qh_qtd.qtd_status = htohc32(sc, EHCI_QTD_HALTED);
+ }
+ /* flush all cache into memory */
+
+ usb_bus_mem_flush_all(&sc->sc_bus, &ehci_iterate_hw_softc);
+
+#ifdef USB_DEBUG
+ if (ehcidebug) {
+ ehci_dump_sqh(sc, sc->sc_async_p_last);
+ }
+#endif
+
+ /* setup async list pointer */
+ EOWRITE4(sc, EHCI_ASYNCLISTADDR, buf_res.physaddr | EHCI_LINK_QH);
+
+
+ /* enable interrupts */
+ EOWRITE4(sc, EHCI_USBINTR, sc->sc_eintrs);
+
+ /* turn on controller */
+ EOWRITE4(sc, EHCI_USBCMD,
+ EHCI_CMD_ITC_1 | /* 1 microframes interrupt delay */
+ (EOREAD4(sc, EHCI_USBCMD) & EHCI_CMD_FLS_M) |
+ EHCI_CMD_ASE |
+ EHCI_CMD_PSE |
+ EHCI_CMD_RS);
+
+ /* Take over port ownership */
+ EOWRITE4(sc, EHCI_CONFIGFLAG, EHCI_CONF_CF);
+
+ for (i = 0; i < 100; i++) {
+ usb_pause_mtx(NULL, hz / 1000);
+ hcr = EOREAD4(sc, EHCI_USBSTS) & EHCI_STS_HCH;
+ if (!hcr) {
+ break;
+ }
+ }
+ if (hcr) {
+ device_printf(sc->sc_bus.bdev, "run timeout\n");
+ return (USB_ERR_IOERROR);
+ }
+
+ if (!err) {
+ /* catch any lost interrupts */
+ ehci_do_poll(&sc->sc_bus);
+ }
+ return (err);
+}
+
+/*
+ * shut down the controller when the system is going down
+ */
+void
+ehci_detach(ehci_softc_t *sc)
+{
+ USB_BUS_LOCK(&sc->sc_bus);
+
+ usb_callout_stop(&sc->sc_tmo_pcd);
+ usb_callout_stop(&sc->sc_tmo_poll);
+
+ EOWRITE4(sc, EHCI_USBINTR, 0);
+ USB_BUS_UNLOCK(&sc->sc_bus);
+
+ if (ehci_hcreset(sc)) {
+ DPRINTF("reset failed!\n");
+ }
+
+ /* XXX let stray task complete */
+ usb_pause_mtx(NULL, hz / 20);
+
+ usb_callout_drain(&sc->sc_tmo_pcd);
+ usb_callout_drain(&sc->sc_tmo_poll);
+}
+
+void
+ehci_suspend(ehci_softc_t *sc)
+{
+ uint32_t cmd;
+ uint32_t hcr;
+ uint8_t i;
+
+ USB_BUS_LOCK(&sc->sc_bus);
+
+ for (i = 1; i <= sc->sc_noport; i++) {
+ cmd = EOREAD4(sc, EHCI_PORTSC(i));
+ if (((cmd & EHCI_PS_PO) == 0) &&
+ ((cmd & EHCI_PS_PE) == EHCI_PS_PE)) {
+ EOWRITE4(sc, EHCI_PORTSC(i),
+ cmd | EHCI_PS_SUSP);
+ }
+ }
+
+ sc->sc_cmd = EOREAD4(sc, EHCI_USBCMD);
+
+ cmd = sc->sc_cmd & ~(EHCI_CMD_ASE | EHCI_CMD_PSE);
+ EOWRITE4(sc, EHCI_USBCMD, cmd);
+
+ for (i = 0; i < 100; i++) {
+ hcr = EOREAD4(sc, EHCI_USBSTS) &
+ (EHCI_STS_ASS | EHCI_STS_PSS);
+
+ if (hcr == 0) {
+ break;
+ }
+ usb_pause_mtx(&sc->sc_bus.bus_mtx, hz / 1000);
+ }
+
+ if (hcr != 0) {
+ device_printf(sc->sc_bus.bdev, "reset timeout\n");
+ }
+ cmd &= ~EHCI_CMD_RS;
+ EOWRITE4(sc, EHCI_USBCMD, cmd);
+
+ for (i = 0; i < 100; i++) {
+ hcr = EOREAD4(sc, EHCI_USBSTS) & EHCI_STS_HCH;
+ if (hcr == EHCI_STS_HCH) {
+ break;
+ }
+ usb_pause_mtx(&sc->sc_bus.bus_mtx, hz / 1000);
+ }
+
+ if (hcr != EHCI_STS_HCH) {
+ device_printf(sc->sc_bus.bdev,
+ "config timeout\n");
+ }
+ USB_BUS_UNLOCK(&sc->sc_bus);
+}
+
+void
+ehci_resume(ehci_softc_t *sc)
+{
+ struct usb_page_search buf_res;
+ uint32_t cmd;
+ uint32_t hcr;
+ uint8_t i;
+
+ USB_BUS_LOCK(&sc->sc_bus);
+
+ /* restore things in case the bios doesn't */
+ EOWRITE4(sc, EHCI_CTRLDSSEGMENT, 0);
+
+ usbd_get_page(&sc->sc_hw.pframes_pc, 0, &buf_res);
+ EOWRITE4(sc, EHCI_PERIODICLISTBASE, buf_res.physaddr);
+
+ usbd_get_page(&sc->sc_hw.async_start_pc, 0, &buf_res);
+ EOWRITE4(sc, EHCI_ASYNCLISTADDR, buf_res.physaddr | EHCI_LINK_QH);
+
+ EOWRITE4(sc, EHCI_USBINTR, sc->sc_eintrs);
+
+ hcr = 0;
+ for (i = 1; i <= sc->sc_noport; i++) {
+ cmd = EOREAD4(sc, EHCI_PORTSC(i));
+ if (((cmd & EHCI_PS_PO) == 0) &&
+ ((cmd & EHCI_PS_SUSP) == EHCI_PS_SUSP)) {
+ EOWRITE4(sc, EHCI_PORTSC(i),
+ cmd | EHCI_PS_FPR);
+ hcr = 1;
+ }
+ }
+
+ if (hcr) {
+ usb_pause_mtx(&sc->sc_bus.bus_mtx,
+ USB_MS_TO_TICKS(USB_RESUME_WAIT));
+
+ for (i = 1; i <= sc->sc_noport; i++) {
+ cmd = EOREAD4(sc, EHCI_PORTSC(i));
+ if (((cmd & EHCI_PS_PO) == 0) &&
+ ((cmd & EHCI_PS_SUSP) == EHCI_PS_SUSP)) {
+ EOWRITE4(sc, EHCI_PORTSC(i),
+ cmd & ~EHCI_PS_FPR);
+ }
+ }
+ }
+ EOWRITE4(sc, EHCI_USBCMD, sc->sc_cmd);
+
+ for (i = 0; i < 100; i++) {
+ hcr = EOREAD4(sc, EHCI_USBSTS) & EHCI_STS_HCH;
+ if (hcr != EHCI_STS_HCH) {
+ break;
+ }
+ usb_pause_mtx(&sc->sc_bus.bus_mtx, hz / 1000);
+ }
+ if (hcr == EHCI_STS_HCH) {
+ device_printf(sc->sc_bus.bdev, "config timeout\n");
+ }
+
+ USB_BUS_UNLOCK(&sc->sc_bus);
+
+ usb_pause_mtx(NULL,
+ USB_MS_TO_TICKS(USB_RESUME_WAIT));
+
+ /* catch any lost interrupts */
+ ehci_do_poll(&sc->sc_bus);
+}
+
+void
+ehci_shutdown(ehci_softc_t *sc)
+{
+ DPRINTF("stopping the HC\n");
+
+ if (ehci_hcreset(sc)) {
+ DPRINTF("reset failed!\n");
+ }
+}
+
+#ifdef USB_DEBUG
+static void
+ehci_dump_regs(ehci_softc_t *sc)
+{
+ uint32_t i;
+
+ i = EOREAD4(sc, EHCI_USBCMD);
+ printf("cmd=0x%08x\n", i);
+
+ if (i & EHCI_CMD_ITC_1)
+ printf(" EHCI_CMD_ITC_1\n");
+ if (i & EHCI_CMD_ITC_2)
+ printf(" EHCI_CMD_ITC_2\n");
+ if (i & EHCI_CMD_ITC_4)
+ printf(" EHCI_CMD_ITC_4\n");
+ if (i & EHCI_CMD_ITC_8)
+ printf(" EHCI_CMD_ITC_8\n");
+ if (i & EHCI_CMD_ITC_16)
+ printf(" EHCI_CMD_ITC_16\n");
+ if (i & EHCI_CMD_ITC_32)
+ printf(" EHCI_CMD_ITC_32\n");
+ if (i & EHCI_CMD_ITC_64)
+ printf(" EHCI_CMD_ITC_64\n");
+ if (i & EHCI_CMD_ASPME)
+ printf(" EHCI_CMD_ASPME\n");
+ if (i & EHCI_CMD_ASPMC)
+ printf(" EHCI_CMD_ASPMC\n");
+ if (i & EHCI_CMD_LHCR)
+ printf(" EHCI_CMD_LHCR\n");
+ if (i & EHCI_CMD_IAAD)
+ printf(" EHCI_CMD_IAAD\n");
+ if (i & EHCI_CMD_ASE)
+ printf(" EHCI_CMD_ASE\n");
+ if (i & EHCI_CMD_PSE)
+ printf(" EHCI_CMD_PSE\n");
+ if (i & EHCI_CMD_FLS_M)
+ printf(" EHCI_CMD_FLS_M\n");
+ if (i & EHCI_CMD_HCRESET)
+ printf(" EHCI_CMD_HCRESET\n");
+ if (i & EHCI_CMD_RS)
+ printf(" EHCI_CMD_RS\n");
+
+ i = EOREAD4(sc, EHCI_USBSTS);
+
+ printf("sts=0x%08x\n", i);
+
+ if (i & EHCI_STS_ASS)
+ printf(" EHCI_STS_ASS\n");
+ if (i & EHCI_STS_PSS)
+ printf(" EHCI_STS_PSS\n");
+ if (i & EHCI_STS_REC)
+ printf(" EHCI_STS_REC\n");
+ if (i & EHCI_STS_HCH)
+ printf(" EHCI_STS_HCH\n");
+ if (i & EHCI_STS_IAA)
+ printf(" EHCI_STS_IAA\n");
+ if (i & EHCI_STS_HSE)
+ printf(" EHCI_STS_HSE\n");
+ if (i & EHCI_STS_FLR)
+ printf(" EHCI_STS_FLR\n");
+ if (i & EHCI_STS_PCD)
+ printf(" EHCI_STS_PCD\n");
+ if (i & EHCI_STS_ERRINT)
+ printf(" EHCI_STS_ERRINT\n");
+ if (i & EHCI_STS_INT)
+ printf(" EHCI_STS_INT\n");
+
+ printf("ien=0x%08x\n",
+ EOREAD4(sc, EHCI_USBINTR));
+ printf("frindex=0x%08x ctrdsegm=0x%08x periodic=0x%08x async=0x%08x\n",
+ EOREAD4(sc, EHCI_FRINDEX),
+ EOREAD4(sc, EHCI_CTRLDSSEGMENT),
+ EOREAD4(sc, EHCI_PERIODICLISTBASE),
+ EOREAD4(sc, EHCI_ASYNCLISTADDR));
+ for (i = 1; i <= sc->sc_noport; i++) {
+ printf("port %d status=0x%08x\n", i,
+ EOREAD4(sc, EHCI_PORTSC(i)));
+ }
+}
+
+static void
+ehci_dump_link(ehci_softc_t *sc, uint32_t link, int type)
+{
+ link = hc32toh(sc, link);
+ printf("0x%08x", link);
+ if (link & EHCI_LINK_TERMINATE)
+ printf("<T>");
+ else {
+ printf("<");
+ if (type) {
+ switch (EHCI_LINK_TYPE(link)) {
+ case EHCI_LINK_ITD:
+ printf("ITD");
+ break;
+ case EHCI_LINK_QH:
+ printf("QH");
+ break;
+ case EHCI_LINK_SITD:
+ printf("SITD");
+ break;
+ case EHCI_LINK_FSTN:
+ printf("FSTN");
+ break;
+ }
+ }
+ printf(">");
+ }
+}
+
+static void
+ehci_dump_qtd(ehci_softc_t *sc, ehci_qtd_t *qtd)
+{
+ uint32_t s;
+
+ printf(" next=");
+ ehci_dump_link(sc, qtd->qtd_next, 0);
+ printf(" altnext=");
+ ehci_dump_link(sc, qtd->qtd_altnext, 0);
+ printf("\n");
+ s = hc32toh(sc, qtd->qtd_status);
+ printf(" status=0x%08x: toggle=%d bytes=0x%x ioc=%d c_page=0x%x\n",
+ s, EHCI_QTD_GET_TOGGLE(s), EHCI_QTD_GET_BYTES(s),
+ EHCI_QTD_GET_IOC(s), EHCI_QTD_GET_C_PAGE(s));
+ printf(" cerr=%d pid=%d stat=%s%s%s%s%s%s%s%s\n",
+ EHCI_QTD_GET_CERR(s), EHCI_QTD_GET_PID(s),
+ (s & EHCI_QTD_ACTIVE) ? "ACTIVE" : "NOT_ACTIVE",
+ (s & EHCI_QTD_HALTED) ? "-HALTED" : "",
+ (s & EHCI_QTD_BUFERR) ? "-BUFERR" : "",
+ (s & EHCI_QTD_BABBLE) ? "-BABBLE" : "",
+ (s & EHCI_QTD_XACTERR) ? "-XACTERR" : "",
+ (s & EHCI_QTD_MISSEDMICRO) ? "-MISSED" : "",
+ (s & EHCI_QTD_SPLITXSTATE) ? "-SPLIT" : "",
+ (s & EHCI_QTD_PINGSTATE) ? "-PING" : "");
+
+ for (s = 0; s < 5; s++) {
+ printf(" buffer[%d]=0x%08x\n", s,
+ hc32toh(sc, qtd->qtd_buffer[s]));
+ }
+ for (s = 0; s < 5; s++) {
+ printf(" buffer_hi[%d]=0x%08x\n", s,
+ hc32toh(sc, qtd->qtd_buffer_hi[s]));
+ }
+}
+
+static uint8_t
+ehci_dump_sqtd(ehci_softc_t *sc, ehci_qtd_t *sqtd)
+{
+ uint8_t temp;
+
+ usb_pc_cpu_invalidate(sqtd->page_cache);
+ printf("QTD(%p) at 0x%08x:\n", sqtd, hc32toh(sc, sqtd->qtd_self));
+ ehci_dump_qtd(sc, sqtd);
+ temp = (sqtd->qtd_next & htohc32(sc, EHCI_LINK_TERMINATE)) ? 1 : 0;
+ return (temp);
+}
+
+static void
+ehci_dump_sqtds(ehci_softc_t *sc, ehci_qtd_t *sqtd)
+{
+ uint16_t i;
+ uint8_t stop;
+
+ stop = 0;
+ for (i = 0; sqtd && (i < 20) && !stop; sqtd = sqtd->obj_next, i++) {
+ stop = ehci_dump_sqtd(sc, sqtd);
+ }
+ if (sqtd) {
+ printf("dump aborted, too many TDs\n");
+ }
+}
+
+static void
+ehci_dump_sqh(ehci_softc_t *sc, ehci_qh_t *qh)
+{
+ uint32_t endp;
+ uint32_t endphub;
+
+ usb_pc_cpu_invalidate(qh->page_cache);
+ printf("QH(%p) at 0x%08x:\n", qh, hc32toh(sc, qh->qh_self) & ~0x1F);
+ printf(" link=");
+ ehci_dump_link(sc, qh->qh_link, 1);
+ printf("\n");
+ endp = hc32toh(sc, qh->qh_endp);
+ printf(" endp=0x%08x\n", endp);
+ printf(" addr=0x%02x inact=%d endpt=%d eps=%d dtc=%d hrecl=%d\n",
+ EHCI_QH_GET_ADDR(endp), EHCI_QH_GET_INACT(endp),
+ EHCI_QH_GET_ENDPT(endp), EHCI_QH_GET_EPS(endp),
+ EHCI_QH_GET_DTC(endp), EHCI_QH_GET_HRECL(endp));
+ printf(" mpl=0x%x ctl=%d nrl=%d\n",
+ EHCI_QH_GET_MPL(endp), EHCI_QH_GET_CTL(endp),
+ EHCI_QH_GET_NRL(endp));
+ endphub = hc32toh(sc, qh->qh_endphub);
+ printf(" endphub=0x%08x\n", endphub);
+ printf(" smask=0x%02x cmask=0x%02x huba=0x%02x port=%d mult=%d\n",
+ EHCI_QH_GET_SMASK(endphub), EHCI_QH_GET_CMASK(endphub),
+ EHCI_QH_GET_HUBA(endphub), EHCI_QH_GET_PORT(endphub),
+ EHCI_QH_GET_MULT(endphub));
+ printf(" curqtd=");
+ ehci_dump_link(sc, qh->qh_curqtd, 0);
+ printf("\n");
+ printf("Overlay qTD:\n");
+ ehci_dump_qtd(sc, (void *)&qh->qh_qtd);
+}
+
+static void
+ehci_dump_sitd(ehci_softc_t *sc, ehci_sitd_t *sitd)
+{
+ usb_pc_cpu_invalidate(sitd->page_cache);
+ printf("SITD(%p) at 0x%08x\n", sitd, hc32toh(sc, sitd->sitd_self) & ~0x1F);
+ printf(" next=0x%08x\n", hc32toh(sc, sitd->sitd_next));
+ printf(" portaddr=0x%08x dir=%s addr=%d endpt=0x%x port=0x%x huba=0x%x\n",
+ hc32toh(sc, sitd->sitd_portaddr),
+ (sitd->sitd_portaddr & htohc32(sc, EHCI_SITD_SET_DIR_IN))
+ ? "in" : "out",
+ EHCI_SITD_GET_ADDR(hc32toh(sc, sitd->sitd_portaddr)),
+ EHCI_SITD_GET_ENDPT(hc32toh(sc, sitd->sitd_portaddr)),
+ EHCI_SITD_GET_PORT(hc32toh(sc, sitd->sitd_portaddr)),
+ EHCI_SITD_GET_HUBA(hc32toh(sc, sitd->sitd_portaddr)));
+ printf(" mask=0x%08x\n", hc32toh(sc, sitd->sitd_mask));
+ printf(" status=0x%08x <%s> len=0x%x\n", hc32toh(sc, sitd->sitd_status),
+ (sitd->sitd_status & htohc32(sc, EHCI_SITD_ACTIVE)) ? "ACTIVE" : "",
+ EHCI_SITD_GET_LEN(hc32toh(sc, sitd->sitd_status)));
+ printf(" back=0x%08x, bp=0x%08x,0x%08x,0x%08x,0x%08x\n",
+ hc32toh(sc, sitd->sitd_back),
+ hc32toh(sc, sitd->sitd_bp[0]),
+ hc32toh(sc, sitd->sitd_bp[1]),
+ hc32toh(sc, sitd->sitd_bp_hi[0]),
+ hc32toh(sc, sitd->sitd_bp_hi[1]));
+}
+
+static void
+ehci_dump_itd(ehci_softc_t *sc, ehci_itd_t *itd)
+{
+ usb_pc_cpu_invalidate(itd->page_cache);
+ printf("ITD(%p) at 0x%08x\n", itd, hc32toh(sc, itd->itd_self) & ~0x1F);
+ printf(" next=0x%08x\n", hc32toh(sc, itd->itd_next));
+ printf(" status[0]=0x%08x; <%s>\n", hc32toh(sc, itd->itd_status[0]),
+ (itd->itd_status[0] & htohc32(sc, EHCI_ITD_ACTIVE)) ? "ACTIVE" : "");
+ printf(" status[1]=0x%08x; <%s>\n", hc32toh(sc, itd->itd_status[1]),
+ (itd->itd_status[1] & htohc32(sc, EHCI_ITD_ACTIVE)) ? "ACTIVE" : "");
+ printf(" status[2]=0x%08x; <%s>\n", hc32toh(sc, itd->itd_status[2]),
+ (itd->itd_status[2] & htohc32(sc, EHCI_ITD_ACTIVE)) ? "ACTIVE" : "");
+ printf(" status[3]=0x%08x; <%s>\n", hc32toh(sc, itd->itd_status[3]),
+ (itd->itd_status[3] & htohc32(sc, EHCI_ITD_ACTIVE)) ? "ACTIVE" : "");
+ printf(" status[4]=0x%08x; <%s>\n", hc32toh(sc, itd->itd_status[4]),
+ (itd->itd_status[4] & htohc32(sc, EHCI_ITD_ACTIVE)) ? "ACTIVE" : "");
+ printf(" status[5]=0x%08x; <%s>\n", hc32toh(sc, itd->itd_status[5]),
+ (itd->itd_status[5] & htohc32(sc, EHCI_ITD_ACTIVE)) ? "ACTIVE" : "");
+ printf(" status[6]=0x%08x; <%s>\n", hc32toh(sc, itd->itd_status[6]),
+ (itd->itd_status[6] & htohc32(sc, EHCI_ITD_ACTIVE)) ? "ACTIVE" : "");
+ printf(" status[7]=0x%08x; <%s>\n", hc32toh(sc, itd->itd_status[7]),
+ (itd->itd_status[7] & htohc32(sc, EHCI_ITD_ACTIVE)) ? "ACTIVE" : "");
+ printf(" bp[0]=0x%08x\n", hc32toh(sc, itd->itd_bp[0]));
+ printf(" addr=0x%02x; endpt=0x%01x\n",
+ EHCI_ITD_GET_ADDR(hc32toh(sc, itd->itd_bp[0])),
+ EHCI_ITD_GET_ENDPT(hc32toh(sc, itd->itd_bp[0])));
+ printf(" bp[1]=0x%08x\n", hc32toh(sc, itd->itd_bp[1]));
+ printf(" dir=%s; mpl=0x%02x\n",
+ (hc32toh(sc, itd->itd_bp[1]) & EHCI_ITD_SET_DIR_IN) ? "in" : "out",
+ EHCI_ITD_GET_MPL(hc32toh(sc, itd->itd_bp[1])));
+ printf(" bp[2..6]=0x%08x,0x%08x,0x%08x,0x%08x,0x%08x\n",
+ hc32toh(sc, itd->itd_bp[2]),
+ hc32toh(sc, itd->itd_bp[3]),
+ hc32toh(sc, itd->itd_bp[4]),
+ hc32toh(sc, itd->itd_bp[5]),
+ hc32toh(sc, itd->itd_bp[6]));
+ printf(" bp_hi=0x%08x,0x%08x,0x%08x,0x%08x,\n"
+ " 0x%08x,0x%08x,0x%08x\n",
+ hc32toh(sc, itd->itd_bp_hi[0]),
+ hc32toh(sc, itd->itd_bp_hi[1]),
+ hc32toh(sc, itd->itd_bp_hi[2]),
+ hc32toh(sc, itd->itd_bp_hi[3]),
+ hc32toh(sc, itd->itd_bp_hi[4]),
+ hc32toh(sc, itd->itd_bp_hi[5]),
+ hc32toh(sc, itd->itd_bp_hi[6]));
+}
+
+static void
+ehci_dump_isoc(ehci_softc_t *sc)
+{
+ ehci_itd_t *itd;
+ ehci_sitd_t *sitd;
+ uint16_t max = 1000;
+ uint16_t pos;
+
+ pos = (EOREAD4(sc, EHCI_FRINDEX) / 8) &
+ (EHCI_VIRTUAL_FRAMELIST_COUNT - 1);
+
+ printf("%s: isochronous dump from frame 0x%03x:\n",
+ __FUNCTION__, pos);
+
+ itd = sc->sc_isoc_hs_p_last[pos];
+ sitd = sc->sc_isoc_fs_p_last[pos];
+
+ while (itd && max && max--) {
+ ehci_dump_itd(sc, itd);
+ itd = itd->prev;
+ }
+
+ while (sitd && max && max--) {
+ ehci_dump_sitd(sc, sitd);
+ sitd = sitd->prev;
+ }
+}
+
+#endif
+
+static void
+ehci_transfer_intr_enqueue(struct usb_xfer *xfer)
+{
+ /* check for early completion */
+ if (ehci_check_transfer(xfer)) {
+ return;
+ }
+ /* put transfer on interrupt queue */
+ usbd_transfer_enqueue(&xfer->xroot->bus->intr_q, xfer);
+
+ /* start timeout, if any */
+ if (xfer->timeout != 0) {
+ usbd_transfer_timeout_ms(xfer, &ehci_timeout, xfer->timeout);
+ }
+}
+
+#define EHCI_APPEND_FS_TD(std,last) (last) = _ehci_append_fs_td(std,last)
+static ehci_sitd_t *
+_ehci_append_fs_td(ehci_sitd_t *std, ehci_sitd_t *last)
+{
+ DPRINTFN(11, "%p to %p\n", std, last);
+
+ /* (sc->sc_bus.mtx) must be locked */
+
+ std->next = last->next;
+ std->sitd_next = last->sitd_next;
+
+ std->prev = last;
+
+ usb_pc_cpu_flush(std->page_cache);
+
+ /*
+ * the last->next->prev is never followed: std->next->prev = std;
+ */
+ last->next = std;
+ last->sitd_next = std->sitd_self;
+
+ usb_pc_cpu_flush(last->page_cache);
+
+ return (std);
+}
+
+#define EHCI_APPEND_HS_TD(std,last) (last) = _ehci_append_hs_td(std,last)
+static ehci_itd_t *
+_ehci_append_hs_td(ehci_itd_t *std, ehci_itd_t *last)
+{
+ DPRINTFN(11, "%p to %p\n", std, last);
+
+ /* (sc->sc_bus.mtx) must be locked */
+
+ std->next = last->next;
+ std->itd_next = last->itd_next;
+
+ std->prev = last;
+
+ usb_pc_cpu_flush(std->page_cache);
+
+ /*
+ * the last->next->prev is never followed: std->next->prev = std;
+ */
+ last->next = std;
+ last->itd_next = std->itd_self;
+
+ usb_pc_cpu_flush(last->page_cache);
+
+ return (std);
+}
+
+#define EHCI_APPEND_QH(sqh,last) (last) = _ehci_append_qh(sqh,last)
+static ehci_qh_t *
+_ehci_append_qh(ehci_qh_t *sqh, ehci_qh_t *last)
+{
+ DPRINTFN(11, "%p to %p\n", sqh, last);
+
+ if (sqh->prev != NULL) {
+ /* should not happen */
+ DPRINTFN(0, "QH already linked!\n");
+ return (last);
+ }
+ /* (sc->sc_bus.mtx) must be locked */
+
+ sqh->next = last->next;
+ sqh->qh_link = last->qh_link;
+
+ sqh->prev = last;
+
+ usb_pc_cpu_flush(sqh->page_cache);
+
+ /*
+ * the last->next->prev is never followed: sqh->next->prev = sqh;
+ */
+
+ last->next = sqh;
+ last->qh_link = sqh->qh_self;
+
+ usb_pc_cpu_flush(last->page_cache);
+
+ return (sqh);
+}
+
+#define EHCI_REMOVE_FS_TD(std,last) (last) = _ehci_remove_fs_td(std,last)
+static ehci_sitd_t *
+_ehci_remove_fs_td(ehci_sitd_t *std, ehci_sitd_t *last)
+{
+ DPRINTFN(11, "%p from %p\n", std, last);
+
+ /* (sc->sc_bus.mtx) must be locked */
+
+ std->prev->next = std->next;
+ std->prev->sitd_next = std->sitd_next;
+
+ usb_pc_cpu_flush(std->prev->page_cache);
+
+ if (std->next) {
+ std->next->prev = std->prev;
+ usb_pc_cpu_flush(std->next->page_cache);
+ }
+ return ((last == std) ? std->prev : last);
+}
+
+#define EHCI_REMOVE_HS_TD(std,last) (last) = _ehci_remove_hs_td(std,last)
+static ehci_itd_t *
+_ehci_remove_hs_td(ehci_itd_t *std, ehci_itd_t *last)
+{
+ DPRINTFN(11, "%p from %p\n", std, last);
+
+ /* (sc->sc_bus.mtx) must be locked */
+
+ std->prev->next = std->next;
+ std->prev->itd_next = std->itd_next;
+
+ usb_pc_cpu_flush(std->prev->page_cache);
+
+ if (std->next) {
+ std->next->prev = std->prev;
+ usb_pc_cpu_flush(std->next->page_cache);
+ }
+ return ((last == std) ? std->prev : last);
+}
+
+#define EHCI_REMOVE_QH(sqh,last) (last) = _ehci_remove_qh(sqh,last)
+static ehci_qh_t *
+_ehci_remove_qh(ehci_qh_t *sqh, ehci_qh_t *last)
+{
+ DPRINTFN(11, "%p from %p\n", sqh, last);
+
+ /* (sc->sc_bus.mtx) must be locked */
+
+ /* only remove if not removed from a queue */
+ if (sqh->prev) {
+
+ sqh->prev->next = sqh->next;
+ sqh->prev->qh_link = sqh->qh_link;
+
+ usb_pc_cpu_flush(sqh->prev->page_cache);
+
+ if (sqh->next) {
+ sqh->next->prev = sqh->prev;
+ usb_pc_cpu_flush(sqh->next->page_cache);
+ }
+ last = ((last == sqh) ? sqh->prev : last);
+
+ sqh->prev = 0;
+
+ usb_pc_cpu_flush(sqh->page_cache);
+ }
+ return (last);
+}
+
+static usb_error_t
+ehci_non_isoc_done_sub(struct usb_xfer *xfer)
+{
+ ehci_softc_t *sc = EHCI_BUS2SC(xfer->xroot->bus);
+ ehci_qtd_t *td;
+ ehci_qtd_t *td_alt_next;
+ uint32_t status;
+ uint16_t len;
+
+ td = xfer->td_transfer_cache;
+ td_alt_next = td->alt_next;
+
+ if (xfer->aframes != xfer->nframes) {
+ usbd_xfer_set_frame_len(xfer, xfer->aframes, 0);
+ }
+ while (1) {
+
+ usb_pc_cpu_invalidate(td->page_cache);
+ status = hc32toh(sc, td->qtd_status);
+
+ len = EHCI_QTD_GET_BYTES(status);
+
+ /*
+ * Verify the status length and
+ * add the length to "frlengths[]":
+ */
+ if (len > td->len) {
+ /* should not happen */
+ DPRINTF("Invalid status length, "
+ "0x%04x/0x%04x bytes\n", len, td->len);
+ status |= EHCI_QTD_HALTED;
+ } else if (xfer->aframes != xfer->nframes) {
+ xfer->frlengths[xfer->aframes] += td->len - len;
+ }
+ /* Check for last transfer */
+ if (((void *)td) == xfer->td_transfer_last) {
+ td = NULL;
+ break;
+ }
+ /* Check for transfer error */
+ if (status & EHCI_QTD_HALTED) {
+ /* the transfer is finished */
+ td = NULL;
+ break;
+ }
+ /* Check for short transfer */
+ if (len > 0) {
+ if (xfer->flags_int.short_frames_ok) {
+ /* follow alt next */
+ td = td->alt_next;
+ } else {
+ /* the transfer is finished */
+ td = NULL;
+ }
+ break;
+ }
+ td = td->obj_next;
+
+ if (td->alt_next != td_alt_next) {
+ /* this USB frame is complete */
+ break;
+ }
+ }
+
+ /* update transfer cache */
+
+ xfer->td_transfer_cache = td;
+
+#ifdef USB_DEBUG
+ if (status & EHCI_QTD_STATERRS) {
+ DPRINTFN(11, "error, addr=%d, endpt=0x%02x, frame=0x%02x"
+ "status=%s%s%s%s%s%s%s%s\n",
+ xfer->address, xfer->endpointno, xfer->aframes,
+ (status & EHCI_QTD_ACTIVE) ? "[ACTIVE]" : "[NOT_ACTIVE]",
+ (status & EHCI_QTD_HALTED) ? "[HALTED]" : "",
+ (status & EHCI_QTD_BUFERR) ? "[BUFERR]" : "",
+ (status & EHCI_QTD_BABBLE) ? "[BABBLE]" : "",
+ (status & EHCI_QTD_XACTERR) ? "[XACTERR]" : "",
+ (status & EHCI_QTD_MISSEDMICRO) ? "[MISSED]" : "",
+ (status & EHCI_QTD_SPLITXSTATE) ? "[SPLIT]" : "",
+ (status & EHCI_QTD_PINGSTATE) ? "[PING]" : "");
+ }
+#endif
+
+ return ((status & EHCI_QTD_HALTED) ?
+ USB_ERR_STALLED : USB_ERR_NORMAL_COMPLETION);
+}
+
+static void
+ehci_non_isoc_done(struct usb_xfer *xfer)
+{
+ ehci_softc_t *sc = EHCI_BUS2SC(xfer->xroot->bus);
+ ehci_qh_t *qh;
+ uint32_t status;
+ usb_error_t err = 0;
+
+ DPRINTFN(13, "xfer=%p endpoint=%p transfer done\n",
+ xfer, xfer->endpoint);
+
+#ifdef USB_DEBUG
+ if (ehcidebug > 10) {
+ ehci_softc_t *sc = EHCI_BUS2SC(xfer->xroot->bus);
+
+ ehci_dump_sqtds(sc, xfer->td_transfer_first);
+ }
+#endif
+
+ /* extract data toggle directly from the QH's overlay area */
+
+ qh = xfer->qh_start[xfer->flags_int.curr_dma_set];
+
+ usb_pc_cpu_invalidate(qh->page_cache);
+
+ status = hc32toh(sc, qh->qh_qtd.qtd_status);
+
+ xfer->endpoint->toggle_next =
+ (status & EHCI_QTD_TOGGLE_MASK) ? 1 : 0;
+
+ /* reset scanner */
+
+ xfer->td_transfer_cache = xfer->td_transfer_first;
+
+ if (xfer->flags_int.control_xfr) {
+
+ if (xfer->flags_int.control_hdr) {
+
+ err = ehci_non_isoc_done_sub(xfer);
+ }
+ xfer->aframes = 1;
+
+ if (xfer->td_transfer_cache == NULL) {
+ goto done;
+ }
+ }
+ while (xfer->aframes != xfer->nframes) {
+
+ err = ehci_non_isoc_done_sub(xfer);
+ xfer->aframes++;
+
+ if (xfer->td_transfer_cache == NULL) {
+ goto done;
+ }
+ }
+
+ if (xfer->flags_int.control_xfr &&
+ !xfer->flags_int.control_act) {
+
+ err = ehci_non_isoc_done_sub(xfer);
+ }
+done:
+ ehci_device_done(xfer, err);
+}
+
+/*------------------------------------------------------------------------*
+ * ehci_check_transfer
+ *
+ * Return values:
+ * 0: USB transfer is not finished
+ * Else: USB transfer is finished
+ *------------------------------------------------------------------------*/
+static uint8_t
+ehci_check_transfer(struct usb_xfer *xfer)
+{
+ struct usb_pipe_methods *methods = xfer->endpoint->methods;
+ ehci_softc_t *sc = EHCI_BUS2SC(xfer->xroot->bus);
+
+ uint32_t status;
+
+ DPRINTFN(13, "xfer=%p checking transfer\n", xfer);
+
+ if (methods == &ehci_device_isoc_fs_methods) {
+ ehci_sitd_t *td;
+
+ /* isochronous full speed transfer */
+
+ td = xfer->td_transfer_last;
+ usb_pc_cpu_invalidate(td->page_cache);
+ status = hc32toh(sc, td->sitd_status);
+
+ /* also check if first is complete */
+
+ td = xfer->td_transfer_first;
+ usb_pc_cpu_invalidate(td->page_cache);
+ status |= hc32toh(sc, td->sitd_status);
+
+ if (!(status & EHCI_SITD_ACTIVE)) {
+ ehci_device_done(xfer, USB_ERR_NORMAL_COMPLETION);
+ goto transferred;
+ }
+ } else if (methods == &ehci_device_isoc_hs_methods) {
+ ehci_itd_t *td;
+
+ /* isochronous high speed transfer */
+
+ /* check last transfer */
+ td = xfer->td_transfer_last;
+ usb_pc_cpu_invalidate(td->page_cache);
+ status = td->itd_status[0];
+ status |= td->itd_status[1];
+ status |= td->itd_status[2];
+ status |= td->itd_status[3];
+ status |= td->itd_status[4];
+ status |= td->itd_status[5];
+ status |= td->itd_status[6];
+ status |= td->itd_status[7];
+
+ /* also check first transfer */
+ td = xfer->td_transfer_first;
+ usb_pc_cpu_invalidate(td->page_cache);
+ status |= td->itd_status[0];
+ status |= td->itd_status[1];
+ status |= td->itd_status[2];
+ status |= td->itd_status[3];
+ status |= td->itd_status[4];
+ status |= td->itd_status[5];
+ status |= td->itd_status[6];
+ status |= td->itd_status[7];
+
+ /* if no transactions are active we continue */
+ if (!(status & htohc32(sc, EHCI_ITD_ACTIVE))) {
+ ehci_device_done(xfer, USB_ERR_NORMAL_COMPLETION);
+ goto transferred;
+ }
+ } else {
+ ehci_qtd_t *td;
+ ehci_qh_t *qh;
+
+ /* non-isochronous transfer */
+
+ /*
+ * check whether there is an error somewhere in the middle,
+ * or whether there was a short packet (SPD and not ACTIVE)
+ */
+ td = xfer->td_transfer_cache;
+
+ qh = xfer->qh_start[xfer->flags_int.curr_dma_set];
+
+ usb_pc_cpu_invalidate(qh->page_cache);
+
+ status = hc32toh(sc, qh->qh_qtd.qtd_status);
+ if (status & EHCI_QTD_ACTIVE) {
+ /* transfer is pending */
+ goto done;
+ }
+
+ while (1) {
+ usb_pc_cpu_invalidate(td->page_cache);
+ status = hc32toh(sc, td->qtd_status);
+
+ /*
+ * Check if there is an active TD which
+ * indicates that the transfer isn't done.
+ */
+ if (status & EHCI_QTD_ACTIVE) {
+ /* update cache */
+ xfer->td_transfer_cache = td;
+ goto done;
+ }
+ /*
+ * last transfer descriptor makes the transfer done
+ */
+ if (((void *)td) == xfer->td_transfer_last) {
+ break;
+ }
+ /*
+ * any kind of error makes the transfer done
+ */
+ if (status & EHCI_QTD_HALTED) {
+ break;
+ }
+ /*
+ * if there is no alternate next transfer, a short
+ * packet also makes the transfer done
+ */
+ if (EHCI_QTD_GET_BYTES(status)) {
+ if (xfer->flags_int.short_frames_ok) {
+ /* follow alt next */
+ if (td->alt_next) {
+ td = td->alt_next;
+ continue;
+ }
+ }
+ /* transfer is done */
+ break;
+ }
+ td = td->obj_next;
+ }
+ ehci_non_isoc_done(xfer);
+ goto transferred;
+ }
+
+done:
+ DPRINTFN(13, "xfer=%p is still active\n", xfer);
+ return (0);
+
+transferred:
+ return (1);
+}
+
+static void
+ehci_pcd_enable(ehci_softc_t *sc)
+{
+ USB_BUS_LOCK_ASSERT(&sc->sc_bus, MA_OWNED);
+
+ sc->sc_eintrs |= EHCI_STS_PCD;
+ EOWRITE4(sc, EHCI_USBINTR, sc->sc_eintrs);
+
+ /* acknowledge any PCD interrupt */
+ EOWRITE4(sc, EHCI_USBSTS, EHCI_STS_PCD);
+
+ ehci_root_intr(sc);
+}
+
+static void
+ehci_interrupt_poll(ehci_softc_t *sc)
+{
+ struct usb_xfer *xfer;
+
+repeat:
+ TAILQ_FOREACH(xfer, &sc->sc_bus.intr_q.head, wait_entry) {
+ /*
+ * check if transfer is transferred
+ */
+ if (ehci_check_transfer(xfer)) {
+ /* queue has been modified */
+ goto repeat;
+ }
+ }
+}
+
+/*
+ * Some EHCI chips from VIA / ATI seem to trigger interrupts before
+ * writing back the qTD status, or miss signalling occasionally under
+ * heavy load. If the host machine is too fast, we can miss
+ * transaction completion - when we scan the active list the
+ * transaction still seems to be active. This generally exhibits
+ * itself as a umass stall that never recovers.
+ *
+ * We work around this behaviour by setting up this callback after any
+ * softintr that completes with transactions still pending, giving us
+ * another chance to check for completion after the writeback has
+ * taken place.
+ */
+static void
+ehci_poll_timeout(void *arg)
+{
+ ehci_softc_t *sc = arg;
+
+ DPRINTFN(3, "\n");
+ ehci_interrupt_poll(sc);
+}
+
+/*------------------------------------------------------------------------*
+ * ehci_interrupt - EHCI interrupt handler
+ *
+ * NOTE: Do not access "sc->sc_bus.bdev" inside the interrupt handler,
+ * hence the interrupt handler will be setup before "sc->sc_bus.bdev"
+ * is present !
+ *------------------------------------------------------------------------*/
+void
+ehci_interrupt(ehci_softc_t *sc)
+{
+ uint32_t status;
+
+ USB_BUS_LOCK(&sc->sc_bus);
+
+ DPRINTFN(16, "real interrupt\n");
+
+#ifdef USB_DEBUG
+ if (ehcidebug > 15) {
+ ehci_dump_regs(sc);
+ }
+#endif
+
+ status = EHCI_STS_INTRS(EOREAD4(sc, EHCI_USBSTS));
+ if (status == 0) {
+ /* the interrupt was not for us */
+ goto done;
+ }
+ if (!(status & sc->sc_eintrs)) {
+ goto done;
+ }
+ EOWRITE4(sc, EHCI_USBSTS, status); /* acknowledge */
+
+ status &= sc->sc_eintrs;
+
+ if (status & EHCI_STS_HSE) {
+ printf("%s: unrecoverable error, "
+ "controller halted\n", __FUNCTION__);
+#ifdef USB_DEBUG
+ ehci_dump_regs(sc);
+ ehci_dump_isoc(sc);
+#endif
+ }
+ if (status & EHCI_STS_PCD) {
+ /*
+ * Disable PCD interrupt for now, because it will be
+ * on until the port has been reset.
+ */
+ sc->sc_eintrs &= ~EHCI_STS_PCD;
+ EOWRITE4(sc, EHCI_USBINTR, sc->sc_eintrs);
+
+ ehci_root_intr(sc);
+
+ /* do not allow RHSC interrupts > 1 per second */
+ usb_callout_reset(&sc->sc_tmo_pcd, hz,
+ (void *)&ehci_pcd_enable, sc);
+ }
+ status &= ~(EHCI_STS_INT | EHCI_STS_ERRINT | EHCI_STS_PCD | EHCI_STS_IAA);
+
+ if (status != 0) {
+ /* block unprocessed interrupts */
+ sc->sc_eintrs &= ~status;
+ EOWRITE4(sc, EHCI_USBINTR, sc->sc_eintrs);
+ printf("%s: blocking interrupts 0x%x\n", __FUNCTION__, status);
+ }
+ /* poll all the USB transfers */
+ ehci_interrupt_poll(sc);
+
+ if (sc->sc_flags & EHCI_SCFLG_LOSTINTRBUG) {
+ usb_callout_reset(&sc->sc_tmo_poll, hz / 128,
+ (void *)&ehci_poll_timeout, sc);
+ }
+
+done:
+ USB_BUS_UNLOCK(&sc->sc_bus);
+}
+
+/*
+ * called when a request does not complete
+ */
+static void
+ehci_timeout(void *arg)
+{
+ struct usb_xfer *xfer = arg;
+
+ DPRINTF("xfer=%p\n", xfer);
+
+ USB_BUS_LOCK_ASSERT(xfer->xroot->bus, MA_OWNED);
+
+ /* transfer is transferred */
+ ehci_device_done(xfer, USB_ERR_TIMEOUT);
+}
+
+static void
+ehci_do_poll(struct usb_bus *bus)
+{
+ ehci_softc_t *sc = EHCI_BUS2SC(bus);
+
+ USB_BUS_LOCK(&sc->sc_bus);
+ ehci_interrupt_poll(sc);
+ USB_BUS_UNLOCK(&sc->sc_bus);
+}
+
+static void
+ehci_setup_standard_chain_sub(struct ehci_std_temp *temp)
+{
+ struct usb_page_search buf_res;
+ ehci_qtd_t *td;
+ ehci_qtd_t *td_next;
+ ehci_qtd_t *td_alt_next;
+ uint32_t buf_offset;
+ uint32_t average;
+ uint32_t len_old;
+ uint32_t terminate;
+ uint32_t qtd_altnext;
+ uint8_t shortpkt_old;
+ uint8_t precompute;
+
+ terminate = temp->sc->sc_terminate_self;
+ qtd_altnext = temp->sc->sc_terminate_self;
+ td_alt_next = NULL;
+ buf_offset = 0;
+ shortpkt_old = temp->shortpkt;
+ len_old = temp->len;
+ precompute = 1;
+
+restart:
+
+ td = temp->td;
+ td_next = temp->td_next;
+
+ while (1) {
+
+ if (temp->len == 0) {
+
+ if (temp->shortpkt) {
+ break;
+ }
+ /* send a Zero Length Packet, ZLP, last */
+
+ temp->shortpkt = 1;
+ average = 0;
+
+ } else {
+
+ average = temp->average;
+
+ if (temp->len < average) {
+ if (temp->len % temp->max_frame_size) {
+ temp->shortpkt = 1;
+ }
+ average = temp->len;
+ }
+ }
+
+ if (td_next == NULL) {
+ panic("%s: out of EHCI transfer descriptors!", __FUNCTION__);
+ }
+ /* get next TD */
+
+ td = td_next;
+ td_next = td->obj_next;
+
+ /* check if we are pre-computing */
+
+ if (precompute) {
+
+ /* update remaining length */
+
+ temp->len -= average;
+
+ continue;
+ }
+ /* fill out current TD */
+
+ td->qtd_status =
+ temp->qtd_status |
+ htohc32(temp->sc, EHCI_QTD_IOC |
+ EHCI_QTD_SET_BYTES(average));
+
+ if (average == 0) {
+
+ if (temp->auto_data_toggle == 0) {
+
+ /* update data toggle, ZLP case */
+
+ temp->qtd_status ^=
+ htohc32(temp->sc, EHCI_QTD_TOGGLE_MASK);
+ }
+ td->len = 0;
+
+ td->qtd_buffer[0] = 0;
+ td->qtd_buffer_hi[0] = 0;
+
+ td->qtd_buffer[1] = 0;
+ td->qtd_buffer_hi[1] = 0;
+
+ } else {
+
+ uint8_t x;
+
+ if (temp->auto_data_toggle == 0) {
+
+ /* update data toggle */
+
+ if (((average + temp->max_frame_size - 1) /
+ temp->max_frame_size) & 1) {
+ temp->qtd_status ^=
+ htohc32(temp->sc, EHCI_QTD_TOGGLE_MASK);
+ }
+ }
+ td->len = average;
+
+ /* update remaining length */
+
+ temp->len -= average;
+
+ /* fill out buffer pointers */
+
+ usbd_get_page(temp->pc, buf_offset, &buf_res);
+ td->qtd_buffer[0] =
+ htohc32(temp->sc, buf_res.physaddr);
+ td->qtd_buffer_hi[0] = 0;
+
+ x = 1;
+
+ while (average > EHCI_PAGE_SIZE) {
+ average -= EHCI_PAGE_SIZE;
+ buf_offset += EHCI_PAGE_SIZE;
+ usbd_get_page(temp->pc, buf_offset, &buf_res);
+ td->qtd_buffer[x] =
+ htohc32(temp->sc,
+ buf_res.physaddr & (~0xFFF));
+ td->qtd_buffer_hi[x] = 0;
+ x++;
+ }
+
+ /*
+ * NOTE: The "average" variable is never zero after
+ * exiting the loop above !
+ *
+ * NOTE: We have to subtract one from the offset to
+ * ensure that we are computing the physical address
+ * of a valid page !
+ */
+ buf_offset += average;
+ usbd_get_page(temp->pc, buf_offset - 1, &buf_res);
+ td->qtd_buffer[x] =
+ htohc32(temp->sc,
+ buf_res.physaddr & (~0xFFF));
+ td->qtd_buffer_hi[x] = 0;
+ }
+
+ if (td_next) {
+ /* link the current TD with the next one */
+ td->qtd_next = td_next->qtd_self;
+ }
+ td->qtd_altnext = qtd_altnext;
+ td->alt_next = td_alt_next;
+
+ usb_pc_cpu_flush(td->page_cache);
+ }
+
+ if (precompute) {
+ precompute = 0;
+
+ /* setup alt next pointer, if any */
+ if (temp->last_frame) {
+ td_alt_next = NULL;
+ qtd_altnext = terminate;
+ } else {
+ /* we use this field internally */
+ td_alt_next = td_next;
+ if (temp->setup_alt_next) {
+ qtd_altnext = td_next->qtd_self;
+ } else {
+ qtd_altnext = terminate;
+ }
+ }
+
+ /* restore */
+ temp->shortpkt = shortpkt_old;
+ temp->len = len_old;
+ goto restart;
+ }
+ temp->td = td;
+ temp->td_next = td_next;
+}
+
+static void
+ehci_setup_standard_chain(struct usb_xfer *xfer, ehci_qh_t **qh_last)
+{
+ struct ehci_std_temp temp;
+ struct usb_pipe_methods *methods;
+ ehci_qh_t *qh;
+ ehci_qtd_t *td;
+ uint32_t qh_endp;
+ uint32_t qh_endphub;
+ uint32_t x;
+
+ DPRINTFN(9, "addr=%d endpt=%d sumlen=%d speed=%d\n",
+ xfer->address, UE_GET_ADDR(xfer->endpointno),
+ xfer->sumlen, usbd_get_speed(xfer->xroot->udev));
+
+ temp.average = xfer->max_hc_frame_size;
+ temp.max_frame_size = xfer->max_frame_size;
+ temp.sc = EHCI_BUS2SC(xfer->xroot->bus);
+
+ /* toggle the DMA set we are using */
+ xfer->flags_int.curr_dma_set ^= 1;
+
+ /* get next DMA set */
+ td = xfer->td_start[xfer->flags_int.curr_dma_set];
+
+ xfer->td_transfer_first = td;
+ xfer->td_transfer_cache = td;
+
+ temp.td = NULL;
+ temp.td_next = td;
+ temp.qtd_status = 0;
+ temp.last_frame = 0;
+ temp.setup_alt_next = xfer->flags_int.short_frames_ok;
+
+ if (xfer->flags_int.control_xfr) {
+ if (xfer->endpoint->toggle_next) {
+ /* DATA1 is next */
+ temp.qtd_status |=
+ htohc32(temp.sc, EHCI_QTD_SET_TOGGLE(1));
+ }
+ temp.auto_data_toggle = 0;
+ } else {
+ temp.auto_data_toggle = 1;
+ }
+
+ if ((xfer->xroot->udev->parent_hs_hub != NULL) ||
+ (xfer->xroot->udev->address != 0)) {
+ /* max 3 retries */
+ temp.qtd_status |=
+ htohc32(temp.sc, EHCI_QTD_SET_CERR(3));
+ }
+ /* check if we should prepend a setup message */
+
+ if (xfer->flags_int.control_xfr) {
+ if (xfer->flags_int.control_hdr) {
+
+ temp.qtd_status &=
+ htohc32(temp.sc, EHCI_QTD_SET_CERR(3));
+ temp.qtd_status |= htohc32(temp.sc,
+ EHCI_QTD_ACTIVE |
+ EHCI_QTD_SET_PID(EHCI_QTD_PID_SETUP) |
+ EHCI_QTD_SET_TOGGLE(0));
+
+ temp.len = xfer->frlengths[0];
+ temp.pc = xfer->frbuffers + 0;
+ temp.shortpkt = temp.len ? 1 : 0;
+ /* check for last frame */
+ if (xfer->nframes == 1) {
+ /* no STATUS stage yet, SETUP is last */
+ if (xfer->flags_int.control_act) {
+ temp.last_frame = 1;
+ temp.setup_alt_next = 0;
+ }
+ }
+ ehci_setup_standard_chain_sub(&temp);
+ }
+ x = 1;
+ } else {
+ x = 0;
+ }
+
+ while (x != xfer->nframes) {
+
+ /* DATA0 / DATA1 message */
+
+ temp.len = xfer->frlengths[x];
+ temp.pc = xfer->frbuffers + x;
+
+ x++;
+
+ if (x == xfer->nframes) {
+ if (xfer->flags_int.control_xfr) {
+ /* no STATUS stage yet, DATA is last */
+ if (xfer->flags_int.control_act) {
+ temp.last_frame = 1;
+ temp.setup_alt_next = 0;
+ }
+ } else {
+ temp.last_frame = 1;
+ temp.setup_alt_next = 0;
+ }
+ }
+ /* keep previous data toggle and error count */
+
+ temp.qtd_status &=
+ htohc32(temp.sc, EHCI_QTD_SET_CERR(3) |
+ EHCI_QTD_SET_TOGGLE(1));
+
+ if (temp.len == 0) {
+
+ /* make sure that we send an USB packet */
+
+ temp.shortpkt = 0;
+
+ } else {
+
+ /* regular data transfer */
+
+ temp.shortpkt = (xfer->flags.force_short_xfer) ? 0 : 1;
+ }
+
+ /* set endpoint direction */
+
+ temp.qtd_status |=
+ (UE_GET_DIR(xfer->endpointno) == UE_DIR_IN) ?
+ htohc32(temp.sc, EHCI_QTD_ACTIVE |
+ EHCI_QTD_SET_PID(EHCI_QTD_PID_IN)) :
+ htohc32(temp.sc, EHCI_QTD_ACTIVE |
+ EHCI_QTD_SET_PID(EHCI_QTD_PID_OUT));
+
+ ehci_setup_standard_chain_sub(&temp);
+ }
+
+ /* check if we should append a status stage */
+
+ if (xfer->flags_int.control_xfr &&
+ !xfer->flags_int.control_act) {
+
+ /*
+ * Send a DATA1 message and invert the current endpoint
+ * direction.
+ */
+
+ temp.qtd_status &= htohc32(temp.sc, EHCI_QTD_SET_CERR(3) |
+ EHCI_QTD_SET_TOGGLE(1));
+ temp.qtd_status |=
+ (UE_GET_DIR(xfer->endpointno) == UE_DIR_OUT) ?
+ htohc32(temp.sc, EHCI_QTD_ACTIVE |
+ EHCI_QTD_SET_PID(EHCI_QTD_PID_IN) |
+ EHCI_QTD_SET_TOGGLE(1)) :
+ htohc32(temp.sc, EHCI_QTD_ACTIVE |
+ EHCI_QTD_SET_PID(EHCI_QTD_PID_OUT) |
+ EHCI_QTD_SET_TOGGLE(1));
+
+ temp.len = 0;
+ temp.pc = NULL;
+ temp.shortpkt = 0;
+ temp.last_frame = 1;
+ temp.setup_alt_next = 0;
+
+ ehci_setup_standard_chain_sub(&temp);
+ }
+ td = temp.td;
+
+ /* the last TD terminates the transfer: */
+ td->qtd_next = htohc32(temp.sc, EHCI_LINK_TERMINATE);
+ td->qtd_altnext = htohc32(temp.sc, EHCI_LINK_TERMINATE);
+
+ usb_pc_cpu_flush(td->page_cache);
+
+ /* must have at least one frame! */
+
+ xfer->td_transfer_last = td;
+
+#ifdef USB_DEBUG
+ if (ehcidebug > 8) {
+ DPRINTF("nexttog=%d; data before transfer:\n",
+ xfer->endpoint->toggle_next);
+ ehci_dump_sqtds(temp.sc,
+ xfer->td_transfer_first);
+ }
+#endif
+
+ methods = xfer->endpoint->methods;
+
+ qh = xfer->qh_start[xfer->flags_int.curr_dma_set];
+
+ /* the "qh_link" field is filled when the QH is added */
+
+ qh_endp =
+ (EHCI_QH_SET_ADDR(xfer->address) |
+ EHCI_QH_SET_ENDPT(UE_GET_ADDR(xfer->endpointno)) |
+ EHCI_QH_SET_MPL(xfer->max_packet_size));
+
+ if (usbd_get_speed(xfer->xroot->udev) == USB_SPEED_HIGH) {
+ qh_endp |= EHCI_QH_SET_EPS(EHCI_QH_SPEED_HIGH);
+ if (methods != &ehci_device_intr_methods)
+ qh_endp |= EHCI_QH_SET_NRL(8);
+ } else {
+
+ if (usbd_get_speed(xfer->xroot->udev) == USB_SPEED_FULL) {
+ qh_endp |= EHCI_QH_SET_EPS(EHCI_QH_SPEED_FULL);
+ } else {
+ qh_endp |= EHCI_QH_SET_EPS(EHCI_QH_SPEED_LOW);
+ }
+
+ if (methods == &ehci_device_ctrl_methods) {
+ qh_endp |= EHCI_QH_CTL;
+ }
+ if (methods != &ehci_device_intr_methods) {
+ /* Only try one time per microframe! */
+ qh_endp |= EHCI_QH_SET_NRL(1);
+ }
+ }
+
+ if (temp.auto_data_toggle == 0) {
+ /* software computes the data toggle */
+ qh_endp |= EHCI_QH_DTC;
+ }
+
+ qh->qh_endp = htohc32(temp.sc, qh_endp);
+
+ qh_endphub =
+ (EHCI_QH_SET_MULT(xfer->max_packet_count & 3) |
+ EHCI_QH_SET_CMASK(xfer->endpoint->usb_cmask) |
+ EHCI_QH_SET_SMASK(xfer->endpoint->usb_smask) |
+ EHCI_QH_SET_HUBA(xfer->xroot->udev->hs_hub_addr) |
+ EHCI_QH_SET_PORT(xfer->xroot->udev->hs_port_no));
+
+ qh->qh_endphub = htohc32(temp.sc, qh_endphub);
+ qh->qh_curqtd = 0;
+
+ /* fill the overlay qTD */
+
+ if (temp.auto_data_toggle && xfer->endpoint->toggle_next) {
+ /* DATA1 is next */
+ qh->qh_qtd.qtd_status = htohc32(temp.sc, EHCI_QTD_SET_TOGGLE(1));
+ } else {
+ qh->qh_qtd.qtd_status = 0;
+ }
+
+ td = xfer->td_transfer_first;
+
+ qh->qh_qtd.qtd_next = td->qtd_self;
+ qh->qh_qtd.qtd_altnext =
+ htohc32(temp.sc, EHCI_LINK_TERMINATE);
+
+ usb_pc_cpu_flush(qh->page_cache);
+
+ if (xfer->xroot->udev->flags.self_suspended == 0) {
+ EHCI_APPEND_QH(qh, *qh_last);
+ }
+}
+
+static void
+ehci_root_intr(ehci_softc_t *sc)
+{
+ uint16_t i;
+ uint16_t m;
+
+ USB_BUS_LOCK_ASSERT(&sc->sc_bus, MA_OWNED);
+
+ /* clear any old interrupt data */
+ memset(sc->sc_hub_idata, 0, sizeof(sc->sc_hub_idata));
+
+ /* set bits */
+ m = (sc->sc_noport + 1);
+ if (m > (8 * sizeof(sc->sc_hub_idata))) {
+ m = (8 * sizeof(sc->sc_hub_idata));
+ }
+ for (i = 1; i < m; i++) {
+ /* pick out CHANGE bits from the status register */
+ if (EOREAD4(sc, EHCI_PORTSC(i)) & EHCI_PS_CLEAR) {
+ sc->sc_hub_idata[i / 8] |= 1 << (i % 8);
+ DPRINTF("port %d changed\n", i);
+ }
+ }
+ uhub_root_intr(&sc->sc_bus, sc->sc_hub_idata,
+ sizeof(sc->sc_hub_idata));
+}
+
+static void
+ehci_isoc_fs_done(ehci_softc_t *sc, struct usb_xfer *xfer)
+{
+ uint32_t nframes = xfer->nframes;
+ uint32_t status;
+ uint32_t *plen = xfer->frlengths;
+ uint16_t len = 0;
+ ehci_sitd_t *td = xfer->td_transfer_first;
+ ehci_sitd_t **pp_last = &sc->sc_isoc_fs_p_last[xfer->qh_pos];
+
+ DPRINTFN(13, "xfer=%p endpoint=%p transfer done\n",
+ xfer, xfer->endpoint);
+
+ while (nframes--) {
+ if (td == NULL) {
+ panic("%s:%d: out of TD's\n",
+ __FUNCTION__, __LINE__);
+ }
+ if (pp_last >= &sc->sc_isoc_fs_p_last[EHCI_VIRTUAL_FRAMELIST_COUNT]) {
+ pp_last = &sc->sc_isoc_fs_p_last[0];
+ }
+#ifdef USB_DEBUG
+ if (ehcidebug > 15) {
+ DPRINTF("isoc FS-TD\n");
+ ehci_dump_sitd(sc, td);
+ }
+#endif
+ usb_pc_cpu_invalidate(td->page_cache);
+ status = hc32toh(sc, td->sitd_status);
+
+ len = EHCI_SITD_GET_LEN(status);
+
+ DPRINTFN(2, "status=0x%08x, rem=%u\n", status, len);
+
+ if (*plen >= len) {
+ len = *plen - len;
+ } else {
+ len = 0;
+ }
+
+ *plen = len;
+
+ /* remove FS-TD from schedule */
+ EHCI_REMOVE_FS_TD(td, *pp_last);
+
+ pp_last++;
+ plen++;
+ td = td->obj_next;
+ }
+
+ xfer->aframes = xfer->nframes;
+}
+
+static void
+ehci_isoc_hs_done(ehci_softc_t *sc, struct usb_xfer *xfer)
+{
+ uint32_t nframes = xfer->nframes;
+ uint32_t status;
+ uint32_t *plen = xfer->frlengths;
+ uint16_t len = 0;
+ uint8_t td_no = 0;
+ ehci_itd_t *td = xfer->td_transfer_first;
+ ehci_itd_t **pp_last = &sc->sc_isoc_hs_p_last[xfer->qh_pos];
+
+ DPRINTFN(13, "xfer=%p endpoint=%p transfer done\n",
+ xfer, xfer->endpoint);
+
+ while (nframes) {
+ if (td == NULL) {
+ panic("%s:%d: out of TD's\n",
+ __FUNCTION__, __LINE__);
+ }
+ if (pp_last >= &sc->sc_isoc_hs_p_last[EHCI_VIRTUAL_FRAMELIST_COUNT]) {
+ pp_last = &sc->sc_isoc_hs_p_last[0];
+ }
+#ifdef USB_DEBUG
+ if (ehcidebug > 15) {
+ DPRINTF("isoc HS-TD\n");
+ ehci_dump_itd(sc, td);
+ }
+#endif
+
+ usb_pc_cpu_invalidate(td->page_cache);
+ status = hc32toh(sc, td->itd_status[td_no]);
+
+ len = EHCI_ITD_GET_LEN(status);
+
+ DPRINTFN(2, "status=0x%08x, len=%u\n", status, len);
+
+ if (xfer->endpoint->usb_smask & (1 << td_no)) {
+
+ if (*plen >= len) {
+ /*
+ * The length is valid. NOTE: The
+ * complete length is written back
+ * into the status field, and not the
+ * remainder like with other transfer
+ * descriptor types.
+ */
+ } else {
+ /* Invalid length - truncate */
+ len = 0;
+ }
+
+ *plen = len;
+ plen++;
+ nframes--;
+ }
+
+ td_no++;
+
+ if ((td_no == 8) || (nframes == 0)) {
+ /* remove HS-TD from schedule */
+ EHCI_REMOVE_HS_TD(td, *pp_last);
+ pp_last++;
+
+ td_no = 0;
+ td = td->obj_next;
+ }
+ }
+ xfer->aframes = xfer->nframes;
+}
+
+/* NOTE: "done" can be run two times in a row,
+ * from close and from interrupt
+ */
+static void
+ehci_device_done(struct usb_xfer *xfer, usb_error_t error)
+{
+ struct usb_pipe_methods *methods = xfer->endpoint->methods;
+ ehci_softc_t *sc = EHCI_BUS2SC(xfer->xroot->bus);
+
+ USB_BUS_LOCK_ASSERT(&sc->sc_bus, MA_OWNED);
+
+ DPRINTFN(2, "xfer=%p, endpoint=%p, error=%d\n",
+ xfer, xfer->endpoint, error);
+
+ if ((methods == &ehci_device_bulk_methods) ||
+ (methods == &ehci_device_ctrl_methods)) {
+#ifdef USB_DEBUG
+ if (ehcidebug > 8) {
+ DPRINTF("nexttog=%d; data after transfer:\n",
+ xfer->endpoint->toggle_next);
+ ehci_dump_sqtds(sc,
+ xfer->td_transfer_first);
+ }
+#endif
+
+ EHCI_REMOVE_QH(xfer->qh_start[xfer->flags_int.curr_dma_set],
+ sc->sc_async_p_last);
+ }
+ if (methods == &ehci_device_intr_methods) {
+ EHCI_REMOVE_QH(xfer->qh_start[xfer->flags_int.curr_dma_set],
+ sc->sc_intr_p_last[xfer->qh_pos]);
+ }
+ /*
+ * Only finish isochronous transfers once which will update
+ * "xfer->frlengths".
+ */
+ if (xfer->td_transfer_first &&
+ xfer->td_transfer_last) {
+ if (methods == &ehci_device_isoc_fs_methods) {
+ ehci_isoc_fs_done(sc, xfer);
+ }
+ if (methods == &ehci_device_isoc_hs_methods) {
+ ehci_isoc_hs_done(sc, xfer);
+ }
+ xfer->td_transfer_first = NULL;
+ xfer->td_transfer_last = NULL;
+ }
+ /* dequeue transfer and start next transfer */
+ usbd_transfer_done(xfer, error);
+}
+
+/*------------------------------------------------------------------------*
+ * ehci bulk support
+ *------------------------------------------------------------------------*/
+static void
+ehci_device_bulk_open(struct usb_xfer *xfer)
+{
+ return;
+}
+
+static void
+ehci_device_bulk_close(struct usb_xfer *xfer)
+{
+ ehci_device_done(xfer, USB_ERR_CANCELLED);
+}
+
+static void
+ehci_device_bulk_enter(struct usb_xfer *xfer)
+{
+ return;
+}
+
+static void
+ehci_device_bulk_start(struct usb_xfer *xfer)
+{
+ ehci_softc_t *sc = EHCI_BUS2SC(xfer->xroot->bus);
+ uint32_t temp;
+
+ /* setup TD's and QH */
+ ehci_setup_standard_chain(xfer, &sc->sc_async_p_last);
+
+ /* put transfer on interrupt queue */
+ ehci_transfer_intr_enqueue(xfer);
+
+ /*
+ * XXX Certain nVidia chipsets choke when using the IAAD
+ * feature too frequently.
+ */
+ if (sc->sc_flags & EHCI_SCFLG_IAADBUG)
+ return;
+
+ /* XXX Performance quirk: Some Host Controllers have a too low
+ * interrupt rate. Issue an IAAD to stimulate the Host
+ * Controller after queueing the BULK transfer.
+ */
+ temp = EOREAD4(sc, EHCI_USBCMD);
+ if (!(temp & EHCI_CMD_IAAD))
+ EOWRITE4(sc, EHCI_USBCMD, temp | EHCI_CMD_IAAD);
+}
+
+struct usb_pipe_methods ehci_device_bulk_methods =
+{
+ .open = ehci_device_bulk_open,
+ .close = ehci_device_bulk_close,
+ .enter = ehci_device_bulk_enter,
+ .start = ehci_device_bulk_start,
+};
+
+/*------------------------------------------------------------------------*
+ * ehci control support
+ *------------------------------------------------------------------------*/
+static void
+ehci_device_ctrl_open(struct usb_xfer *xfer)
+{
+ return;
+}
+
+static void
+ehci_device_ctrl_close(struct usb_xfer *xfer)
+{
+ ehci_device_done(xfer, USB_ERR_CANCELLED);
+}
+
+static void
+ehci_device_ctrl_enter(struct usb_xfer *xfer)
+{
+ return;
+}
+
+static void
+ehci_device_ctrl_start(struct usb_xfer *xfer)
+{
+ ehci_softc_t *sc = EHCI_BUS2SC(xfer->xroot->bus);
+
+ /* setup TD's and QH */
+ ehci_setup_standard_chain(xfer, &sc->sc_async_p_last);
+
+ /* put transfer on interrupt queue */
+ ehci_transfer_intr_enqueue(xfer);
+}
+
+struct usb_pipe_methods ehci_device_ctrl_methods =
+{
+ .open = ehci_device_ctrl_open,
+ .close = ehci_device_ctrl_close,
+ .enter = ehci_device_ctrl_enter,
+ .start = ehci_device_ctrl_start,
+};
+
+/*------------------------------------------------------------------------*
+ * ehci interrupt support
+ *------------------------------------------------------------------------*/
+static void
+ehci_device_intr_open(struct usb_xfer *xfer)
+{
+ ehci_softc_t *sc = EHCI_BUS2SC(xfer->xroot->bus);
+ uint16_t best;
+ uint16_t bit;
+ uint16_t x;
+
+ usb_hs_bandwidth_alloc(xfer);
+
+ /*
+ * Find the best QH position corresponding to the given interval:
+ */
+
+ best = 0;
+ bit = EHCI_VIRTUAL_FRAMELIST_COUNT / 2;
+ while (bit) {
+ if (xfer->interval >= bit) {
+ x = bit;
+ best = bit;
+ while (x & bit) {
+ if (sc->sc_intr_stat[x] <
+ sc->sc_intr_stat[best]) {
+ best = x;
+ }
+ x++;
+ }
+ break;
+ }
+ bit >>= 1;
+ }
+
+ sc->sc_intr_stat[best]++;
+ xfer->qh_pos = best;
+
+ DPRINTFN(3, "best=%d interval=%d\n",
+ best, xfer->interval);
+}
+
+static void
+ehci_device_intr_close(struct usb_xfer *xfer)
+{
+ ehci_softc_t *sc = EHCI_BUS2SC(xfer->xroot->bus);
+
+ sc->sc_intr_stat[xfer->qh_pos]--;
+
+ ehci_device_done(xfer, USB_ERR_CANCELLED);
+
+ /* bandwidth must be freed after device done */
+ usb_hs_bandwidth_free(xfer);
+}
+
+static void
+ehci_device_intr_enter(struct usb_xfer *xfer)
+{
+ return;
+}
+
+static void
+ehci_device_intr_start(struct usb_xfer *xfer)
+{
+ ehci_softc_t *sc = EHCI_BUS2SC(xfer->xroot->bus);
+
+ /* setup TD's and QH */
+ ehci_setup_standard_chain(xfer, &sc->sc_intr_p_last[xfer->qh_pos]);
+
+ /* put transfer on interrupt queue */
+ ehci_transfer_intr_enqueue(xfer);
+}
+
+struct usb_pipe_methods ehci_device_intr_methods =
+{
+ .open = ehci_device_intr_open,
+ .close = ehci_device_intr_close,
+ .enter = ehci_device_intr_enter,
+ .start = ehci_device_intr_start,
+};
+
+/*------------------------------------------------------------------------*
+ * ehci full speed isochronous support
+ *------------------------------------------------------------------------*/
+static void
+ehci_device_isoc_fs_open(struct usb_xfer *xfer)
+{
+ ehci_softc_t *sc = EHCI_BUS2SC(xfer->xroot->bus);
+ ehci_sitd_t *td;
+ uint32_t sitd_portaddr;
+ uint8_t ds;
+
+ sitd_portaddr =
+ EHCI_SITD_SET_ADDR(xfer->address) |
+ EHCI_SITD_SET_ENDPT(UE_GET_ADDR(xfer->endpointno)) |
+ EHCI_SITD_SET_HUBA(xfer->xroot->udev->hs_hub_addr) |
+ EHCI_SITD_SET_PORT(xfer->xroot->udev->hs_port_no);
+
+ if (UE_GET_DIR(xfer->endpointno) == UE_DIR_IN) {
+ sitd_portaddr |= EHCI_SITD_SET_DIR_IN;
+ }
+ sitd_portaddr = htohc32(sc, sitd_portaddr);
+
+ /* initialize all TD's */
+
+ for (ds = 0; ds != 2; ds++) {
+
+ for (td = xfer->td_start[ds]; td; td = td->obj_next) {
+
+ td->sitd_portaddr = sitd_portaddr;
+
+ /*
+ * TODO: make some kind of automatic
+ * SMASK/CMASK selection based on micro-frame
+ * usage
+ *
+ * micro-frame usage (8 microframes per 1ms)
+ */
+ td->sitd_back = htohc32(sc, EHCI_LINK_TERMINATE);
+
+ usb_pc_cpu_flush(td->page_cache);
+ }
+ }
+}
+
+static void
+ehci_device_isoc_fs_close(struct usb_xfer *xfer)
+{
+ ehci_device_done(xfer, USB_ERR_CANCELLED);
+}
+
+static void
+ehci_device_isoc_fs_enter(struct usb_xfer *xfer)
+{
+ struct usb_page_search buf_res;
+ ehci_softc_t *sc = EHCI_BUS2SC(xfer->xroot->bus);
+ struct usb_fs_isoc_schedule *fss_start;
+ struct usb_fs_isoc_schedule *fss_end;
+ struct usb_fs_isoc_schedule *fss;
+ ehci_sitd_t *td;
+ ehci_sitd_t *td_last = NULL;
+ ehci_sitd_t **pp_last;
+ uint32_t *plen;
+ uint32_t buf_offset;
+ uint32_t nframes;
+ uint32_t temp;
+ uint32_t sitd_mask;
+ uint16_t tlen;
+ uint8_t sa;
+ uint8_t sb;
+ uint8_t error;
+
+#ifdef USB_DEBUG
+ uint8_t once = 1;
+
+#endif
+
+ DPRINTFN(6, "xfer=%p next=%d nframes=%d\n",
+ xfer, xfer->endpoint->isoc_next, xfer->nframes);
+
+ /* get the current frame index */
+
+ nframes = EOREAD4(sc, EHCI_FRINDEX) / 8;
+
+ /*
+ * check if the frame index is within the window where the frames
+ * will be inserted
+ */
+ buf_offset = (nframes - xfer->endpoint->isoc_next) &
+ (EHCI_VIRTUAL_FRAMELIST_COUNT - 1);
+
+ if ((xfer->endpoint->is_synced == 0) ||
+ (buf_offset < xfer->nframes)) {
+ /*
+ * If there is data underflow or the pipe queue is empty we
+ * schedule the transfer a few frames ahead of the current
+ * frame position. Else two isochronous transfers might
+ * overlap.
+ */
+ xfer->endpoint->isoc_next = (nframes + 3) &
+ (EHCI_VIRTUAL_FRAMELIST_COUNT - 1);
+ xfer->endpoint->is_synced = 1;
+ DPRINTFN(3, "start next=%d\n", xfer->endpoint->isoc_next);
+ }
+ /*
+ * compute how many milliseconds the insertion is ahead of the
+ * current frame position:
+ */
+ buf_offset = (xfer->endpoint->isoc_next - nframes) &
+ (EHCI_VIRTUAL_FRAMELIST_COUNT - 1);
+
+ /*
+ * pre-compute when the isochronous transfer will be finished:
+ */
+ xfer->isoc_time_complete =
+ usbd_fs_isoc_schedule_isoc_time_expand
+ (xfer->xroot->udev, &fss_start, &fss_end, nframes) + buf_offset +
+ xfer->nframes;
+
+ /* get the real number of frames */
+
+ nframes = xfer->nframes;
+
+ buf_offset = 0;
+
+ plen = xfer->frlengths;
+
+ /* toggle the DMA set we are using */
+ xfer->flags_int.curr_dma_set ^= 1;
+
+ /* get next DMA set */
+ td = xfer->td_start[xfer->flags_int.curr_dma_set];
+ xfer->td_transfer_first = td;
+
+ pp_last = &sc->sc_isoc_fs_p_last[xfer->endpoint->isoc_next];
+
+ /* store starting position */
+
+ xfer->qh_pos = xfer->endpoint->isoc_next;
+
+ fss = fss_start + (xfer->qh_pos % USB_ISOC_TIME_MAX);
+
+ while (nframes--) {
+ if (td == NULL) {
+ panic("%s:%d: out of TD's\n",
+ __FUNCTION__, __LINE__);
+ }
+ if (pp_last >= &sc->sc_isoc_fs_p_last[EHCI_VIRTUAL_FRAMELIST_COUNT]) {
+ pp_last = &sc->sc_isoc_fs_p_last[0];
+ }
+ if (fss >= fss_end) {
+ fss = fss_start;
+ }
+ /* reuse sitd_portaddr and sitd_back from last transfer */
+
+ if (*plen > xfer->max_frame_size) {
+#ifdef USB_DEBUG
+ if (once) {
+ once = 0;
+ printf("%s: frame length(%d) exceeds %d "
+ "bytes (frame truncated)\n",
+ __FUNCTION__, *plen,
+ xfer->max_frame_size);
+ }
+#endif
+ *plen = xfer->max_frame_size;
+ }
+ /*
+ * We currently don't care if the ISOCHRONOUS schedule is
+ * full!
+ */
+ error = usbd_fs_isoc_schedule_alloc(fss, &sa, *plen);
+ if (error) {
+ /*
+ * The FULL speed schedule is FULL! Set length
+ * to zero.
+ */
+ *plen = 0;
+ }
+ if (*plen) {
+ /*
+ * only call "usbd_get_page()" when we have a
+ * non-zero length
+ */
+ usbd_get_page(xfer->frbuffers, buf_offset, &buf_res);
+ td->sitd_bp[0] = htohc32(sc, buf_res.physaddr);
+ buf_offset += *plen;
+ /*
+ * NOTE: We need to subtract one from the offset so
+ * that we are on a valid page!
+ */
+ usbd_get_page(xfer->frbuffers, buf_offset - 1,
+ &buf_res);
+ temp = buf_res.physaddr & ~0xFFF;
+ } else {
+ td->sitd_bp[0] = 0;
+ temp = 0;
+ }
+
+ if (UE_GET_DIR(xfer->endpointno) == UE_DIR_OUT) {
+ tlen = *plen;
+ if (tlen <= 188) {
+ temp |= 1; /* T-count = 1, TP = ALL */
+ tlen = 1;
+ } else {
+ tlen += 187;
+ tlen /= 188;
+ temp |= tlen; /* T-count = [1..6] */
+ temp |= 8; /* TP = Begin */
+ }
+
+ tlen += sa;
+
+ if (tlen >= 8) {
+ sb = 0;
+ } else {
+ sb = (1 << tlen);
+ }
+
+ sa = (1 << sa);
+ sa = (sb - sa) & 0x3F;
+ sb = 0;
+ } else {
+ sb = (-(4 << sa)) & 0xFE;
+ sa = (1 << sa) & 0x3F;
+ }
+
+ sitd_mask = (EHCI_SITD_SET_SMASK(sa) |
+ EHCI_SITD_SET_CMASK(sb));
+
+ td->sitd_bp[1] = htohc32(sc, temp);
+
+ td->sitd_mask = htohc32(sc, sitd_mask);
+
+ if (nframes == 0) {
+ td->sitd_status = htohc32(sc,
+ EHCI_SITD_IOC |
+ EHCI_SITD_ACTIVE |
+ EHCI_SITD_SET_LEN(*plen));
+ } else {
+ td->sitd_status = htohc32(sc,
+ EHCI_SITD_ACTIVE |
+ EHCI_SITD_SET_LEN(*plen));
+ }
+ usb_pc_cpu_flush(td->page_cache);
+
+#ifdef USB_DEBUG
+ if (ehcidebug > 15) {
+ DPRINTF("FS-TD %d\n", nframes);
+ ehci_dump_sitd(sc, td);
+ }
+#endif
+ /* insert TD into schedule */
+ EHCI_APPEND_FS_TD(td, *pp_last);
+ pp_last++;
+
+ plen++;
+ fss++;
+ td_last = td;
+ td = td->obj_next;
+ }
+
+ xfer->td_transfer_last = td_last;
+
+ /* update isoc_next */
+ xfer->endpoint->isoc_next = (pp_last - &sc->sc_isoc_fs_p_last[0]) &
+ (EHCI_VIRTUAL_FRAMELIST_COUNT - 1);
+}
+
+static void
+ehci_device_isoc_fs_start(struct usb_xfer *xfer)
+{
+ /* put transfer on interrupt queue */
+ ehci_transfer_intr_enqueue(xfer);
+}
+
+struct usb_pipe_methods ehci_device_isoc_fs_methods =
+{
+ .open = ehci_device_isoc_fs_open,
+ .close = ehci_device_isoc_fs_close,
+ .enter = ehci_device_isoc_fs_enter,
+ .start = ehci_device_isoc_fs_start,
+};
+
+/*------------------------------------------------------------------------*
+ * ehci high speed isochronous support
+ *------------------------------------------------------------------------*/
+static void
+ehci_device_isoc_hs_open(struct usb_xfer *xfer)
+{
+ ehci_softc_t *sc = EHCI_BUS2SC(xfer->xroot->bus);
+ ehci_itd_t *td;
+ uint32_t temp;
+ uint8_t ds;
+
+ usb_hs_bandwidth_alloc(xfer);
+
+ /* initialize all TD's */
+
+ for (ds = 0; ds != 2; ds++) {
+
+ for (td = xfer->td_start[ds]; td; td = td->obj_next) {
+
+ /* set TD inactive */
+ td->itd_status[0] = 0;
+ td->itd_status[1] = 0;
+ td->itd_status[2] = 0;
+ td->itd_status[3] = 0;
+ td->itd_status[4] = 0;
+ td->itd_status[5] = 0;
+ td->itd_status[6] = 0;
+ td->itd_status[7] = 0;
+
+ /* set endpoint and address */
+ td->itd_bp[0] = htohc32(sc,
+ EHCI_ITD_SET_ADDR(xfer->address) |
+ EHCI_ITD_SET_ENDPT(UE_GET_ADDR(xfer->endpointno)));
+
+ temp =
+ EHCI_ITD_SET_MPL(xfer->max_packet_size & 0x7FF);
+
+ /* set direction */
+ if (UE_GET_DIR(xfer->endpointno) == UE_DIR_IN) {
+ temp |= EHCI_ITD_SET_DIR_IN;
+ }
+ /* set maximum packet size */
+ td->itd_bp[1] = htohc32(sc, temp);
+
+ /* set transfer multiplier */
+ td->itd_bp[2] = htohc32(sc, xfer->max_packet_count & 3);
+
+ usb_pc_cpu_flush(td->page_cache);
+ }
+ }
+}
+
+static void
+ehci_device_isoc_hs_close(struct usb_xfer *xfer)
+{
+ ehci_device_done(xfer, USB_ERR_CANCELLED);
+
+ /* bandwidth must be freed after device done */
+ usb_hs_bandwidth_free(xfer);
+}
+
+static void
+ehci_device_isoc_hs_enter(struct usb_xfer *xfer)
+{
+ struct usb_page_search buf_res;
+ ehci_softc_t *sc = EHCI_BUS2SC(xfer->xroot->bus);
+ ehci_itd_t *td;
+ ehci_itd_t *td_last = NULL;
+ ehci_itd_t **pp_last;
+ bus_size_t page_addr;
+ uint32_t *plen;
+ uint32_t status;
+ uint32_t buf_offset;
+ uint32_t nframes;
+ uint32_t itd_offset[8 + 1];
+ uint8_t x;
+ uint8_t td_no;
+ uint8_t page_no;
+ uint8_t shift = usbd_xfer_get_fps_shift(xfer);
+
+#ifdef USB_DEBUG
+ uint8_t once = 1;
+
+#endif
+
+ DPRINTFN(6, "xfer=%p next=%d nframes=%d shift=%d\n",
+ xfer, xfer->endpoint->isoc_next, xfer->nframes, (int)shift);
+
+ /* get the current frame index */
+
+ nframes = EOREAD4(sc, EHCI_FRINDEX) / 8;
+
+ /*
+ * check if the frame index is within the window where the frames
+ * will be inserted
+ */
+ buf_offset = (nframes - xfer->endpoint->isoc_next) &
+ (EHCI_VIRTUAL_FRAMELIST_COUNT - 1);
+
+ if ((xfer->endpoint->is_synced == 0) ||
+ (buf_offset < (((xfer->nframes << shift) + 7) / 8))) {
+ /*
+ * If there is data underflow or the pipe queue is empty we
+ * schedule the transfer a few frames ahead of the current
+ * frame position. Else two isochronous transfers might
+ * overlap.
+ */
+ xfer->endpoint->isoc_next = (nframes + 3) &
+ (EHCI_VIRTUAL_FRAMELIST_COUNT - 1);
+ xfer->endpoint->is_synced = 1;
+ DPRINTFN(3, "start next=%d\n", xfer->endpoint->isoc_next);
+ }
+ /*
+ * compute how many milliseconds the insertion is ahead of the
+ * current frame position:
+ */
+ buf_offset = (xfer->endpoint->isoc_next - nframes) &
+ (EHCI_VIRTUAL_FRAMELIST_COUNT - 1);
+
+ /*
+ * pre-compute when the isochronous transfer will be finished:
+ */
+ xfer->isoc_time_complete =
+ usb_isoc_time_expand(&sc->sc_bus, nframes) + buf_offset +
+ (((xfer->nframes << shift) + 7) / 8);
+
+ /* get the real number of frames */
+
+ nframes = xfer->nframes;
+
+ buf_offset = 0;
+ td_no = 0;
+
+ plen = xfer->frlengths;
+
+ /* toggle the DMA set we are using */
+ xfer->flags_int.curr_dma_set ^= 1;
+
+ /* get next DMA set */
+ td = xfer->td_start[xfer->flags_int.curr_dma_set];
+ xfer->td_transfer_first = td;
+
+ pp_last = &sc->sc_isoc_hs_p_last[xfer->endpoint->isoc_next];
+
+ /* store starting position */
+
+ xfer->qh_pos = xfer->endpoint->isoc_next;
+
+ while (nframes) {
+ if (td == NULL) {
+ panic("%s:%d: out of TD's\n",
+ __FUNCTION__, __LINE__);
+ }
+ if (pp_last >= &sc->sc_isoc_hs_p_last[EHCI_VIRTUAL_FRAMELIST_COUNT]) {
+ pp_last = &sc->sc_isoc_hs_p_last[0];
+ }
+ /* range check */
+ if (*plen > xfer->max_frame_size) {
+#ifdef USB_DEBUG
+ if (once) {
+ once = 0;
+ printf("%s: frame length(%d) exceeds %d bytes "
+ "(frame truncated)\n",
+ __FUNCTION__, *plen, xfer->max_frame_size);
+ }
+#endif
+ *plen = xfer->max_frame_size;
+ }
+
+ if (xfer->endpoint->usb_smask & (1 << td_no)) {
+ status = (EHCI_ITD_SET_LEN(*plen) |
+ EHCI_ITD_ACTIVE |
+ EHCI_ITD_SET_PG(0));
+ td->itd_status[td_no] = htohc32(sc, status);
+ itd_offset[td_no] = buf_offset;
+ buf_offset += *plen;
+ plen++;
+ nframes --;
+ } else {
+ td->itd_status[td_no] = 0; /* not active */
+ itd_offset[td_no] = buf_offset;
+ }
+
+ td_no++;
+
+ if ((td_no == 8) || (nframes == 0)) {
+
+ /* the rest of the transfers are not active, if any */
+ for (x = td_no; x != 8; x++) {
+ td->itd_status[x] = 0; /* not active */
+ }
+
+ /* check if there is any data to be transferred */
+ if (itd_offset[0] != buf_offset) {
+ page_no = 0;
+ itd_offset[td_no] = buf_offset;
+
+ /* get first page offset */
+ usbd_get_page(xfer->frbuffers, itd_offset[0], &buf_res);
+ /* get page address */
+ page_addr = buf_res.physaddr & ~0xFFF;
+ /* update page address */
+ td->itd_bp[0] &= htohc32(sc, 0xFFF);
+ td->itd_bp[0] |= htohc32(sc, page_addr);
+
+ for (x = 0; x != td_no; x++) {
+ /* set page number and page offset */
+ status = (EHCI_ITD_SET_PG(page_no) |
+ (buf_res.physaddr & 0xFFF));
+ td->itd_status[x] |= htohc32(sc, status);
+
+ /* get next page offset */
+ if (itd_offset[x + 1] == buf_offset) {
+ /*
+ * We subtract one so that
+ * we don't go off the last
+ * page!
+ */
+ usbd_get_page(xfer->frbuffers, buf_offset - 1, &buf_res);
+ } else {
+ usbd_get_page(xfer->frbuffers, itd_offset[x + 1], &buf_res);
+ }
+
+ /* check if we need a new page */
+ if ((buf_res.physaddr ^ page_addr) & ~0xFFF) {
+ /* new page needed */
+ page_addr = buf_res.physaddr & ~0xFFF;
+ if (page_no == 6) {
+ panic("%s: too many pages\n", __FUNCTION__);
+ }
+ page_no++;
+ /* update page address */
+ td->itd_bp[page_no] &= htohc32(sc, 0xFFF);
+ td->itd_bp[page_no] |= htohc32(sc, page_addr);
+ }
+ }
+ }
+ /* set IOC bit if we are complete */
+ if (nframes == 0) {
+ td->itd_status[td_no - 1] |= htohc32(sc, EHCI_ITD_IOC);
+ }
+ usb_pc_cpu_flush(td->page_cache);
+#ifdef USB_DEBUG
+ if (ehcidebug > 15) {
+ DPRINTF("HS-TD %d\n", nframes);
+ ehci_dump_itd(sc, td);
+ }
+#endif
+ /* insert TD into schedule */
+ EHCI_APPEND_HS_TD(td, *pp_last);
+ pp_last++;
+
+ td_no = 0;
+ td_last = td;
+ td = td->obj_next;
+ }
+ }
+
+ xfer->td_transfer_last = td_last;
+
+ /* update isoc_next */
+ xfer->endpoint->isoc_next = (pp_last - &sc->sc_isoc_hs_p_last[0]) &
+ (EHCI_VIRTUAL_FRAMELIST_COUNT - 1);
+}
+
+static void
+ehci_device_isoc_hs_start(struct usb_xfer *xfer)
+{
+ /* put transfer on interrupt queue */
+ ehci_transfer_intr_enqueue(xfer);
+}
+
+struct usb_pipe_methods ehci_device_isoc_hs_methods =
+{
+ .open = ehci_device_isoc_hs_open,
+ .close = ehci_device_isoc_hs_close,
+ .enter = ehci_device_isoc_hs_enter,
+ .start = ehci_device_isoc_hs_start,
+};
+
+/*------------------------------------------------------------------------*
+ * ehci root control support
+ *------------------------------------------------------------------------*
+ * Simulate a hardware hub by handling all the necessary requests.
+ *------------------------------------------------------------------------*/
+
+static const
+struct usb_device_descriptor ehci_devd =
+{
+ sizeof(struct usb_device_descriptor),
+ UDESC_DEVICE, /* type */
+ {0x00, 0x02}, /* USB version */
+ UDCLASS_HUB, /* class */
+ UDSUBCLASS_HUB, /* subclass */
+ UDPROTO_HSHUBSTT, /* protocol */
+ 64, /* max packet */
+ {0}, {0}, {0x00, 0x01}, /* device id */
+ 1, 2, 0, /* string indicies */
+ 1 /* # of configurations */
+};
+
+static const
+struct usb_device_qualifier ehci_odevd =
+{
+ sizeof(struct usb_device_qualifier),
+ UDESC_DEVICE_QUALIFIER, /* type */
+ {0x00, 0x02}, /* USB version */
+ UDCLASS_HUB, /* class */
+ UDSUBCLASS_HUB, /* subclass */
+ UDPROTO_FSHUB, /* protocol */
+ 0, /* max packet */
+ 0, /* # of configurations */
+ 0
+};
+
+static const struct ehci_config_desc ehci_confd = {
+ .confd = {
+ .bLength = sizeof(struct usb_config_descriptor),
+ .bDescriptorType = UDESC_CONFIG,
+ .wTotalLength[0] = sizeof(ehci_confd),
+ .bNumInterface = 1,
+ .bConfigurationValue = 1,
+ .iConfiguration = 0,
+ .bmAttributes = UC_SELF_POWERED,
+ .bMaxPower = 0 /* max power */
+ },
+ .ifcd = {
+ .bLength = sizeof(struct usb_interface_descriptor),
+ .bDescriptorType = UDESC_INTERFACE,
+ .bNumEndpoints = 1,
+ .bInterfaceClass = UICLASS_HUB,
+ .bInterfaceSubClass = UISUBCLASS_HUB,
+ .bInterfaceProtocol = 0,
+ },
+ .endpd = {
+ .bLength = sizeof(struct usb_endpoint_descriptor),
+ .bDescriptorType = UDESC_ENDPOINT,
+ .bEndpointAddress = UE_DIR_IN | EHCI_INTR_ENDPT,
+ .bmAttributes = UE_INTERRUPT,
+ .wMaxPacketSize[0] = 8, /* max packet (63 ports) */
+ .bInterval = 255,
+ },
+};
+
+static const
+struct usb_hub_descriptor ehci_hubd =
+{
+ 0, /* dynamic length */
+ UDESC_HUB,
+ 0,
+ {0, 0},
+ 0,
+ 0,
+ {0},
+};
+
+static void
+ehci_disown(ehci_softc_t *sc, uint16_t index, uint8_t lowspeed)
+{
+ uint32_t port;
+ uint32_t v;
+
+ DPRINTF("index=%d lowspeed=%d\n", index, lowspeed);
+
+ port = EHCI_PORTSC(index);
+ v = EOREAD4(sc, port) & ~EHCI_PS_CLEAR;
+ EOWRITE4(sc, port, v | EHCI_PS_PO);
+}
+
+static usb_error_t
+ehci_roothub_exec(struct usb_device *udev,
+ struct usb_device_request *req, const void **pptr, uint16_t *plength)
+{
+ ehci_softc_t *sc = EHCI_BUS2SC(udev->bus);
+ const char *str_ptr;
+ const void *ptr;
+ uint32_t port;
+ uint32_t v;
+ uint16_t len;
+ uint16_t i;
+ uint16_t value;
+ uint16_t index;
+ usb_error_t err;
+
+ USB_BUS_LOCK_ASSERT(&sc->sc_bus, MA_OWNED);
+
+ /* buffer reset */
+ ptr = (const void *)&sc->sc_hub_desc;
+ len = 0;
+ err = 0;
+
+ value = UGETW(req->wValue);
+ index = UGETW(req->wIndex);
+
+ DPRINTFN(3, "type=0x%02x request=0x%02x wLen=0x%04x "
+ "wValue=0x%04x wIndex=0x%04x\n",
+ req->bmRequestType, req->bRequest,
+ UGETW(req->wLength), value, index);
+
+#define C(x,y) ((x) | ((y) << 8))
+ switch (C(req->bRequest, req->bmRequestType)) {
+ case C(UR_CLEAR_FEATURE, UT_WRITE_DEVICE):
+ case C(UR_CLEAR_FEATURE, UT_WRITE_INTERFACE):
+ case C(UR_CLEAR_FEATURE, UT_WRITE_ENDPOINT):
+ /*
+ * DEVICE_REMOTE_WAKEUP and ENDPOINT_HALT are no-ops
+ * for the integrated root hub.
+ */
+ break;
+ case C(UR_GET_CONFIG, UT_READ_DEVICE):
+ len = 1;
+ sc->sc_hub_desc.temp[0] = sc->sc_conf;
+ break;
+ case C(UR_GET_DESCRIPTOR, UT_READ_DEVICE):
+ switch (value >> 8) {
+ case UDESC_DEVICE:
+ if ((value & 0xff) != 0) {
+ err = USB_ERR_IOERROR;
+ goto done;
+ }
+ len = sizeof(ehci_devd);
+ ptr = (const void *)&ehci_devd;
+ break;
+ /*
+ * We can't really operate at another speed,
+ * but the specification says we need this
+ * descriptor:
+ */
+ case UDESC_DEVICE_QUALIFIER:
+ if ((value & 0xff) != 0) {
+ err = USB_ERR_IOERROR;
+ goto done;
+ }
+ len = sizeof(ehci_odevd);
+ ptr = (const void *)&ehci_odevd;
+ break;
+
+ case UDESC_CONFIG:
+ if ((value & 0xff) != 0) {
+ err = USB_ERR_IOERROR;
+ goto done;
+ }
+ len = sizeof(ehci_confd);
+ ptr = (const void *)&ehci_confd;
+ break;
+
+ case UDESC_STRING:
+ switch (value & 0xff) {
+ case 0: /* Language table */
+ str_ptr = "\001";
+ break;
+
+ case 1: /* Vendor */
+ str_ptr = sc->sc_vendor;
+ break;
+
+ case 2: /* Product */
+ str_ptr = "EHCI root HUB";
+ break;
+
+ default:
+ str_ptr = "";
+ break;
+ }
+
+ len = usb_make_str_desc(
+ sc->sc_hub_desc.temp,
+ sizeof(sc->sc_hub_desc.temp),
+ str_ptr);
+ break;
+ default:
+ err = USB_ERR_IOERROR;
+ goto done;
+ }
+ break;
+ case C(UR_GET_INTERFACE, UT_READ_INTERFACE):
+ len = 1;
+ sc->sc_hub_desc.temp[0] = 0;
+ break;
+ case C(UR_GET_STATUS, UT_READ_DEVICE):
+ len = 2;
+ USETW(sc->sc_hub_desc.stat.wStatus, UDS_SELF_POWERED);
+ break;
+ case C(UR_GET_STATUS, UT_READ_INTERFACE):
+ case C(UR_GET_STATUS, UT_READ_ENDPOINT):
+ len = 2;
+ USETW(sc->sc_hub_desc.stat.wStatus, 0);
+ break;
+ case C(UR_SET_ADDRESS, UT_WRITE_DEVICE):
+ if (value >= EHCI_MAX_DEVICES) {
+ err = USB_ERR_IOERROR;
+ goto done;
+ }
+ sc->sc_addr = value;
+ break;
+ case C(UR_SET_CONFIG, UT_WRITE_DEVICE):
+ if ((value != 0) && (value != 1)) {
+ err = USB_ERR_IOERROR;
+ goto done;
+ }
+ sc->sc_conf = value;
+ break;
+ case C(UR_SET_DESCRIPTOR, UT_WRITE_DEVICE):
+ break;
+ case C(UR_SET_FEATURE, UT_WRITE_DEVICE):
+ case C(UR_SET_FEATURE, UT_WRITE_INTERFACE):
+ case C(UR_SET_FEATURE, UT_WRITE_ENDPOINT):
+ err = USB_ERR_IOERROR;
+ goto done;
+ case C(UR_SET_INTERFACE, UT_WRITE_INTERFACE):
+ break;
+ case C(UR_SYNCH_FRAME, UT_WRITE_ENDPOINT):
+ break;
+ /* Hub requests */
+ case C(UR_CLEAR_FEATURE, UT_WRITE_CLASS_DEVICE):
+ break;
+ case C(UR_CLEAR_FEATURE, UT_WRITE_CLASS_OTHER):
+ DPRINTFN(9, "UR_CLEAR_PORT_FEATURE\n");
+
+ if ((index < 1) ||
+ (index > sc->sc_noport)) {
+ err = USB_ERR_IOERROR;
+ goto done;
+ }
+ port = EHCI_PORTSC(index);
+ v = EOREAD4(sc, port) & ~EHCI_PS_CLEAR;
+ switch (value) {
+ case UHF_PORT_ENABLE:
+ EOWRITE4(sc, port, v & ~EHCI_PS_PE);
+ break;
+ case UHF_PORT_SUSPEND:
+ if ((v & EHCI_PS_SUSP) && (!(v & EHCI_PS_FPR))) {
+
+ /*
+ * waking up a High Speed device is rather
+ * complicated if
+ */
+ EOWRITE4(sc, port, v | EHCI_PS_FPR);
+ }
+ /* wait 20ms for resume sequence to complete */
+ usb_pause_mtx(&sc->sc_bus.bus_mtx, hz / 50);
+
+ EOWRITE4(sc, port, v & ~(EHCI_PS_SUSP |
+ EHCI_PS_FPR | (3 << 10) /* High Speed */ ));
+
+ /* 4ms settle time */
+ usb_pause_mtx(&sc->sc_bus.bus_mtx, hz / 250);
+ break;
+ case UHF_PORT_POWER:
+ EOWRITE4(sc, port, v & ~EHCI_PS_PP);
+ break;
+ case UHF_PORT_TEST:
+ DPRINTFN(3, "clear port test "
+ "%d\n", index);
+ break;
+ case UHF_PORT_INDICATOR:
+ DPRINTFN(3, "clear port ind "
+ "%d\n", index);
+ EOWRITE4(sc, port, v & ~EHCI_PS_PIC);
+ break;
+ case UHF_C_PORT_CONNECTION:
+ EOWRITE4(sc, port, v | EHCI_PS_CSC);
+ break;
+ case UHF_C_PORT_ENABLE:
+ EOWRITE4(sc, port, v | EHCI_PS_PEC);
+ break;
+ case UHF_C_PORT_SUSPEND:
+ EOWRITE4(sc, port, v | EHCI_PS_SUSP);
+ break;
+ case UHF_C_PORT_OVER_CURRENT:
+ EOWRITE4(sc, port, v | EHCI_PS_OCC);
+ break;
+ case UHF_C_PORT_RESET:
+ sc->sc_isreset = 0;
+ break;
+ default:
+ err = USB_ERR_IOERROR;
+ goto done;
+ }
+ break;
+ case C(UR_GET_DESCRIPTOR, UT_READ_CLASS_DEVICE):
+ if ((value & 0xff) != 0) {
+ err = USB_ERR_IOERROR;
+ goto done;
+ }
+ v = EREAD4(sc, EHCI_HCSPARAMS);
+
+ sc->sc_hub_desc.hubd = ehci_hubd;
+ sc->sc_hub_desc.hubd.bNbrPorts = sc->sc_noport;
+
+ if (EHCI_HCS_PPC(v))
+ i = UHD_PWR_INDIVIDUAL;
+ else
+ i = UHD_PWR_NO_SWITCH;
+
+ if (EHCI_HCS_P_INDICATOR(v))
+ i |= UHD_PORT_IND;
+
+ USETW(sc->sc_hub_desc.hubd.wHubCharacteristics, i);
+ /* XXX can't find out? */
+ sc->sc_hub_desc.hubd.bPwrOn2PwrGood = 200;
+ /* XXX don't know if ports are removable or not */
+ sc->sc_hub_desc.hubd.bDescLength =
+ 8 + ((sc->sc_noport + 7) / 8);
+ len = sc->sc_hub_desc.hubd.bDescLength;
+ break;
+ case C(UR_GET_STATUS, UT_READ_CLASS_DEVICE):
+ len = 16;
+ bzero(sc->sc_hub_desc.temp, 16);
+ break;
+ case C(UR_GET_STATUS, UT_READ_CLASS_OTHER):
+ DPRINTFN(9, "get port status i=%d\n",
+ index);
+ if ((index < 1) ||
+ (index > sc->sc_noport)) {
+ err = USB_ERR_IOERROR;
+ goto done;
+ }
+ v = EOREAD4(sc, EHCI_PORTSC(index));
+ DPRINTFN(9, "port status=0x%04x\n", v);
+ if (sc->sc_flags & (EHCI_SCFLG_FORCESPEED | EHCI_SCFLG_TT)) {
+ if ((v & 0xc000000) == 0x8000000)
+ i = UPS_HIGH_SPEED;
+ else if ((v & 0xc000000) == 0x4000000)
+ i = UPS_LOW_SPEED;
+ else
+ i = 0;
+ } else {
+ i = UPS_HIGH_SPEED;
+ }
+ if (v & EHCI_PS_CS)
+ i |= UPS_CURRENT_CONNECT_STATUS;
+ if (v & EHCI_PS_PE)
+ i |= UPS_PORT_ENABLED;
+ if ((v & EHCI_PS_SUSP) && !(v & EHCI_PS_FPR))
+ i |= UPS_SUSPEND;
+ if (v & EHCI_PS_OCA)
+ i |= UPS_OVERCURRENT_INDICATOR;
+ if (v & EHCI_PS_PR)
+ i |= UPS_RESET;
+ if (v & EHCI_PS_PP)
+ i |= UPS_PORT_POWER;
+ USETW(sc->sc_hub_desc.ps.wPortStatus, i);
+ i = 0;
+ if (v & EHCI_PS_CSC)
+ i |= UPS_C_CONNECT_STATUS;
+ if (v & EHCI_PS_PEC)
+ i |= UPS_C_PORT_ENABLED;
+ if (v & EHCI_PS_OCC)
+ i |= UPS_C_OVERCURRENT_INDICATOR;
+ if (v & EHCI_PS_FPR)
+ i |= UPS_C_SUSPEND;
+ if (sc->sc_isreset)
+ i |= UPS_C_PORT_RESET;
+ USETW(sc->sc_hub_desc.ps.wPortChange, i);
+ len = sizeof(sc->sc_hub_desc.ps);
+ break;
+ case C(UR_SET_DESCRIPTOR, UT_WRITE_CLASS_DEVICE):
+ err = USB_ERR_IOERROR;
+ goto done;
+ case C(UR_SET_FEATURE, UT_WRITE_CLASS_DEVICE):
+ break;
+ case C(UR_SET_FEATURE, UT_WRITE_CLASS_OTHER):
+ if ((index < 1) ||
+ (index > sc->sc_noport)) {
+ err = USB_ERR_IOERROR;
+ goto done;
+ }
+ port = EHCI_PORTSC(index);
+ v = EOREAD4(sc, port) & ~EHCI_PS_CLEAR;
+ switch (value) {
+ case UHF_PORT_ENABLE:
+ EOWRITE4(sc, port, v | EHCI_PS_PE);
+ break;
+ case UHF_PORT_SUSPEND:
+ EOWRITE4(sc, port, v | EHCI_PS_SUSP);
+ break;
+ case UHF_PORT_RESET:
+ DPRINTFN(6, "reset port %d\n", index);
+#ifdef USB_DEBUG
+ if (ehcinohighspeed) {
+ /*
+ * Connect USB device to companion
+ * controller.
+ */
+ ehci_disown(sc, index, 1);
+ break;
+ }
+#endif
+ if (EHCI_PS_IS_LOWSPEED(v) &&
+ (sc->sc_flags & EHCI_SCFLG_TT) == 0) {
+ /* Low speed device, give up ownership. */
+ ehci_disown(sc, index, 1);
+ break;
+ }
+ /* Start reset sequence. */
+ v &= ~(EHCI_PS_PE | EHCI_PS_PR);
+ EOWRITE4(sc, port, v | EHCI_PS_PR);
+
+ /* Wait for reset to complete. */
+ usb_pause_mtx(&sc->sc_bus.bus_mtx,
+ USB_MS_TO_TICKS(USB_PORT_ROOT_RESET_DELAY));
+
+ /* Terminate reset sequence. */
+ if (!(sc->sc_flags & EHCI_SCFLG_NORESTERM))
+ EOWRITE4(sc, port, v);
+
+ /* Wait for HC to complete reset. */
+ usb_pause_mtx(&sc->sc_bus.bus_mtx,
+ USB_MS_TO_TICKS(EHCI_PORT_RESET_COMPLETE));
+
+ v = EOREAD4(sc, port);
+ DPRINTF("ehci after reset, status=0x%08x\n", v);
+ if (v & EHCI_PS_PR) {
+ device_printf(sc->sc_bus.bdev,
+ "port reset timeout\n");
+ err = USB_ERR_TIMEOUT;
+ goto done;
+ }
+ if (!(v & EHCI_PS_PE) &&
+ (sc->sc_flags & EHCI_SCFLG_TT) == 0) {
+ /* Not a high speed device, give up ownership.*/
+ ehci_disown(sc, index, 0);
+ break;
+ }
+ sc->sc_isreset = 1;
+ DPRINTF("ehci port %d reset, status = 0x%08x\n",
+ index, v);
+ break;
+
+ case UHF_PORT_POWER:
+ DPRINTFN(3, "set port power %d\n", index);
+ EOWRITE4(sc, port, v | EHCI_PS_PP);
+ break;
+
+ case UHF_PORT_TEST:
+ DPRINTFN(3, "set port test %d\n", index);
+ break;
+
+ case UHF_PORT_INDICATOR:
+ DPRINTFN(3, "set port ind %d\n", index);
+ EOWRITE4(sc, port, v | EHCI_PS_PIC);
+ break;
+
+ default:
+ err = USB_ERR_IOERROR;
+ goto done;
+ }
+ break;
+ case C(UR_CLEAR_TT_BUFFER, UT_WRITE_CLASS_OTHER):
+ case C(UR_RESET_TT, UT_WRITE_CLASS_OTHER):
+ case C(UR_GET_TT_STATE, UT_READ_CLASS_OTHER):
+ case C(UR_STOP_TT, UT_WRITE_CLASS_OTHER):
+ break;
+ default:
+ err = USB_ERR_IOERROR;
+ goto done;
+ }
+done:
+ *plength = len;
+ *pptr = ptr;
+ return (err);
+}
+
+static void
+ehci_xfer_setup(struct usb_setup_params *parm)
+{
+ struct usb_page_search page_info;
+ struct usb_page_cache *pc;
+ ehci_softc_t *sc;
+ struct usb_xfer *xfer;
+ void *last_obj;
+ uint32_t nqtd;
+ uint32_t nqh;
+ uint32_t nsitd;
+ uint32_t nitd;
+ uint32_t n;
+
+ sc = EHCI_BUS2SC(parm->udev->bus);
+ xfer = parm->curr_xfer;
+
+ nqtd = 0;
+ nqh = 0;
+ nsitd = 0;
+ nitd = 0;
+
+ /*
+ * compute maximum number of some structures
+ */
+ if (parm->methods == &ehci_device_ctrl_methods) {
+
+ /*
+ * The proof for the "nqtd" formula is illustrated like
+ * this:
+ *
+ * +------------------------------------+
+ * | |
+ * | |remainder -> |
+ * | +-----+---+ |
+ * | | xxx | x | frm 0 |
+ * | +-----+---++ |
+ * | | xxx | xx | frm 1 |
+ * | +-----+----+ |
+ * | ... |
+ * +------------------------------------+
+ *
+ * "xxx" means a completely full USB transfer descriptor
+ *
+ * "x" and "xx" means a short USB packet
+ *
+ * For the remainder of an USB transfer modulo
+ * "max_data_length" we need two USB transfer descriptors.
+ * One to transfer the remaining data and one to finalise
+ * with a zero length packet in case the "force_short_xfer"
+ * flag is set. We only need two USB transfer descriptors in
+ * the case where the transfer length of the first one is a
+ * factor of "max_frame_size". The rest of the needed USB
+ * transfer descriptors is given by the buffer size divided
+ * by the maximum data payload.
+ */
+ parm->hc_max_packet_size = 0x400;
+ parm->hc_max_packet_count = 1;
+ parm->hc_max_frame_size = EHCI_QTD_PAYLOAD_MAX;
+ xfer->flags_int.bdma_enable = 1;
+
+ usbd_transfer_setup_sub(parm);
+
+ nqh = 1;
+ nqtd = ((2 * xfer->nframes) + 1 /* STATUS */
+ + (xfer->max_data_length / xfer->max_hc_frame_size));
+
+ } else if (parm->methods == &ehci_device_bulk_methods) {
+
+ parm->hc_max_packet_size = 0x400;
+ parm->hc_max_packet_count = 1;
+ parm->hc_max_frame_size = EHCI_QTD_PAYLOAD_MAX;
+ xfer->flags_int.bdma_enable = 1;
+
+ usbd_transfer_setup_sub(parm);
+
+ nqh = 1;
+ nqtd = ((2 * xfer->nframes)
+ + (xfer->max_data_length / xfer->max_hc_frame_size));
+
+ } else if (parm->methods == &ehci_device_intr_methods) {
+
+ if (parm->speed == USB_SPEED_HIGH) {
+ parm->hc_max_packet_size = 0x400;
+ parm->hc_max_packet_count = 3;
+ } else if (parm->speed == USB_SPEED_FULL) {
+ parm->hc_max_packet_size = USB_FS_BYTES_PER_HS_UFRAME;
+ parm->hc_max_packet_count = 1;
+ } else {
+ parm->hc_max_packet_size = USB_FS_BYTES_PER_HS_UFRAME / 8;
+ parm->hc_max_packet_count = 1;
+ }
+
+ parm->hc_max_frame_size = EHCI_QTD_PAYLOAD_MAX;
+ xfer->flags_int.bdma_enable = 1;
+
+ usbd_transfer_setup_sub(parm);
+
+ nqh = 1;
+ nqtd = ((2 * xfer->nframes)
+ + (xfer->max_data_length / xfer->max_hc_frame_size));
+
+ } else if (parm->methods == &ehci_device_isoc_fs_methods) {
+
+ parm->hc_max_packet_size = 0x3FF;
+ parm->hc_max_packet_count = 1;
+ parm->hc_max_frame_size = 0x3FF;
+ xfer->flags_int.bdma_enable = 1;
+
+ usbd_transfer_setup_sub(parm);
+
+ nsitd = xfer->nframes;
+
+ } else if (parm->methods == &ehci_device_isoc_hs_methods) {
+
+ parm->hc_max_packet_size = 0x400;
+ parm->hc_max_packet_count = 3;
+ parm->hc_max_frame_size = 0xC00;
+ xfer->flags_int.bdma_enable = 1;
+
+ usbd_transfer_setup_sub(parm);
+
+ nitd = ((xfer->nframes + 7) / 8) <<
+ usbd_xfer_get_fps_shift(xfer);
+
+ } else {
+
+ parm->hc_max_packet_size = 0x400;
+ parm->hc_max_packet_count = 1;
+ parm->hc_max_frame_size = 0x400;
+
+ usbd_transfer_setup_sub(parm);
+ }
+
+alloc_dma_set:
+
+ if (parm->err) {
+ return;
+ }
+ /*
+ * Allocate queue heads and transfer descriptors
+ */
+ last_obj = NULL;
+
+ if (usbd_transfer_setup_sub_malloc(
+ parm, &pc, sizeof(ehci_itd_t),
+ EHCI_ITD_ALIGN, nitd)) {
+ parm->err = USB_ERR_NOMEM;
+ return;
+ }
+ if (parm->buf) {
+ for (n = 0; n != nitd; n++) {
+ ehci_itd_t *td;
+
+ usbd_get_page(pc + n, 0, &page_info);
+
+ td = page_info.buffer;
+
+ /* init TD */
+ td->itd_self = htohc32(sc, page_info.physaddr | EHCI_LINK_ITD);
+ td->obj_next = last_obj;
+ td->page_cache = pc + n;
+
+ last_obj = td;
+
+ usb_pc_cpu_flush(pc + n);
+ }
+ }
+ if (usbd_transfer_setup_sub_malloc(
+ parm, &pc, sizeof(ehci_sitd_t),
+ EHCI_SITD_ALIGN, nsitd)) {
+ parm->err = USB_ERR_NOMEM;
+ return;
+ }
+ if (parm->buf) {
+ for (n = 0; n != nsitd; n++) {
+ ehci_sitd_t *td;
+
+ usbd_get_page(pc + n, 0, &page_info);
+
+ td = page_info.buffer;
+
+ /* init TD */
+ td->sitd_self = htohc32(sc, page_info.physaddr | EHCI_LINK_SITD);
+ td->obj_next = last_obj;
+ td->page_cache = pc + n;
+
+ last_obj = td;
+
+ usb_pc_cpu_flush(pc + n);
+ }
+ }
+ if (usbd_transfer_setup_sub_malloc(
+ parm, &pc, sizeof(ehci_qtd_t),
+ EHCI_QTD_ALIGN, nqtd)) {
+ parm->err = USB_ERR_NOMEM;
+ return;
+ }
+ if (parm->buf) {
+ for (n = 0; n != nqtd; n++) {
+ ehci_qtd_t *qtd;
+
+ usbd_get_page(pc + n, 0, &page_info);
+
+ qtd = page_info.buffer;
+
+ /* init TD */
+ qtd->qtd_self = htohc32(sc, page_info.physaddr);
+ qtd->obj_next = last_obj;
+ qtd->page_cache = pc + n;
+
+ last_obj = qtd;
+
+ usb_pc_cpu_flush(pc + n);
+ }
+ }
+ xfer->td_start[xfer->flags_int.curr_dma_set] = last_obj;
+
+ last_obj = NULL;
+
+ if (usbd_transfer_setup_sub_malloc(
+ parm, &pc, sizeof(ehci_qh_t),
+ EHCI_QH_ALIGN, nqh)) {
+ parm->err = USB_ERR_NOMEM;
+ return;
+ }
+ if (parm->buf) {
+ for (n = 0; n != nqh; n++) {
+ ehci_qh_t *qh;
+
+ usbd_get_page(pc + n, 0, &page_info);
+
+ qh = page_info.buffer;
+
+ /* init QH */
+ qh->qh_self = htohc32(sc, page_info.physaddr | EHCI_LINK_QH);
+ qh->obj_next = last_obj;
+ qh->page_cache = pc + n;
+
+ last_obj = qh;
+
+ usb_pc_cpu_flush(pc + n);
+ }
+ }
+ xfer->qh_start[xfer->flags_int.curr_dma_set] = last_obj;
+
+ if (!xfer->flags_int.curr_dma_set) {
+ xfer->flags_int.curr_dma_set = 1;
+ goto alloc_dma_set;
+ }
+}
+
+static void
+ehci_xfer_unsetup(struct usb_xfer *xfer)
+{
+ return;
+}
+
+static void
+ehci_ep_init(struct usb_device *udev, struct usb_endpoint_descriptor *edesc,
+ struct usb_endpoint *ep)
+{
+ ehci_softc_t *sc = EHCI_BUS2SC(udev->bus);
+
+ DPRINTFN(2, "endpoint=%p, addr=%d, endpt=%d, mode=%d (%d)\n",
+ ep, udev->address,
+ edesc->bEndpointAddress, udev->flags.usb_mode,
+ sc->sc_addr);
+
+ if (udev->flags.usb_mode != USB_MODE_HOST) {
+ /* not supported */
+ return;
+ }
+ if (udev->device_index != sc->sc_addr) {
+
+ if ((udev->speed != USB_SPEED_HIGH) &&
+ ((udev->hs_hub_addr == 0) ||
+ (udev->hs_port_no == 0) ||
+ (udev->parent_hs_hub == NULL) ||
+ (udev->parent_hs_hub->hub == NULL))) {
+ /* We need a transaction translator */
+ goto done;
+ }
+ switch (edesc->bmAttributes & UE_XFERTYPE) {
+ case UE_CONTROL:
+ ep->methods = &ehci_device_ctrl_methods;
+ break;
+ case UE_INTERRUPT:
+ ep->methods = &ehci_device_intr_methods;
+ break;
+ case UE_ISOCHRONOUS:
+ if (udev->speed == USB_SPEED_HIGH) {
+ ep->methods = &ehci_device_isoc_hs_methods;
+ } else if (udev->speed == USB_SPEED_FULL) {
+ ep->methods = &ehci_device_isoc_fs_methods;
+ }
+ break;
+ case UE_BULK:
+ ep->methods = &ehci_device_bulk_methods;
+ break;
+ default:
+ /* do nothing */
+ break;
+ }
+ }
+done:
+ return;
+}
+
+static void
+ehci_get_dma_delay(struct usb_device *udev, uint32_t *pus)
+{
+ /*
+ * Wait until the hardware has finished any possible use of
+ * the transfer descriptor(s) and QH
+ */
+ *pus = (188); /* microseconds */
+}
+
+static void
+ehci_device_resume(struct usb_device *udev)
+{
+ ehci_softc_t *sc = EHCI_BUS2SC(udev->bus);
+ struct usb_xfer *xfer;
+ struct usb_pipe_methods *methods;
+
+ DPRINTF("\n");
+
+ USB_BUS_LOCK(udev->bus);
+
+ TAILQ_FOREACH(xfer, &sc->sc_bus.intr_q.head, wait_entry) {
+
+ if (xfer->xroot->udev == udev) {
+
+ methods = xfer->endpoint->methods;
+
+ if ((methods == &ehci_device_bulk_methods) ||
+ (methods == &ehci_device_ctrl_methods)) {
+ EHCI_APPEND_QH(xfer->qh_start[xfer->flags_int.curr_dma_set],
+ sc->sc_async_p_last);
+ }
+ if (methods == &ehci_device_intr_methods) {
+ EHCI_APPEND_QH(xfer->qh_start[xfer->flags_int.curr_dma_set],
+ sc->sc_intr_p_last[xfer->qh_pos]);
+ }
+ }
+ }
+
+ USB_BUS_UNLOCK(udev->bus);
+
+ return;
+}
+
+static void
+ehci_device_suspend(struct usb_device *udev)
+{
+ ehci_softc_t *sc = EHCI_BUS2SC(udev->bus);
+ struct usb_xfer *xfer;
+ struct usb_pipe_methods *methods;
+
+ DPRINTF("\n");
+
+ USB_BUS_LOCK(udev->bus);
+
+ TAILQ_FOREACH(xfer, &sc->sc_bus.intr_q.head, wait_entry) {
+
+ if (xfer->xroot->udev == udev) {
+
+ methods = xfer->endpoint->methods;
+
+ if ((methods == &ehci_device_bulk_methods) ||
+ (methods == &ehci_device_ctrl_methods)) {
+ EHCI_REMOVE_QH(xfer->qh_start[xfer->flags_int.curr_dma_set],
+ sc->sc_async_p_last);
+ }
+ if (methods == &ehci_device_intr_methods) {
+ EHCI_REMOVE_QH(xfer->qh_start[xfer->flags_int.curr_dma_set],
+ sc->sc_intr_p_last[xfer->qh_pos]);
+ }
+ }
+ }
+
+ USB_BUS_UNLOCK(udev->bus);
+
+ return;
+}
+
+static void
+ehci_set_hw_power(struct usb_bus *bus)
+{
+ ehci_softc_t *sc = EHCI_BUS2SC(bus);
+ uint32_t temp;
+ uint32_t flags;
+
+ DPRINTF("\n");
+
+ USB_BUS_LOCK(bus);
+
+ flags = bus->hw_power_state;
+
+ temp = EOREAD4(sc, EHCI_USBCMD);
+
+ temp &= ~(EHCI_CMD_ASE | EHCI_CMD_PSE);
+
+ if (flags & (USB_HW_POWER_CONTROL |
+ USB_HW_POWER_BULK)) {
+ DPRINTF("Async is active\n");
+ temp |= EHCI_CMD_ASE;
+ }
+ if (flags & (USB_HW_POWER_INTERRUPT |
+ USB_HW_POWER_ISOC)) {
+ DPRINTF("Periodic is active\n");
+ temp |= EHCI_CMD_PSE;
+ }
+ EOWRITE4(sc, EHCI_USBCMD, temp);
+
+ USB_BUS_UNLOCK(bus);
+
+ return;
+}
+
+struct usb_bus_methods ehci_bus_methods =
+{
+ .endpoint_init = ehci_ep_init,
+ .xfer_setup = ehci_xfer_setup,
+ .xfer_unsetup = ehci_xfer_unsetup,
+ .get_dma_delay = ehci_get_dma_delay,
+ .device_resume = ehci_device_resume,
+ .device_suspend = ehci_device_suspend,
+ .set_hw_power = ehci_set_hw_power,
+ .roothub_exec = ehci_roothub_exec,
+ .xfer_poll = ehci_do_poll,
+};
diff --git a/rtems/freebsd/dev/usb/controller/ehci.h b/rtems/freebsd/dev/usb/controller/ehci.h
new file mode 100644
index 00000000..26ba336e
--- /dev/null
+++ b/rtems/freebsd/dev/usb/controller/ehci.h
@@ -0,0 +1,486 @@
+/* $FreeBSD$ */
+/*-
+ * Copyright (c) 2001 The NetBSD Foundation, Inc.
+ * All rights reserved.
+ *
+ * This code is derived from software contributed to The NetBSD Foundation
+ * by Lennart Augustsson (lennart@augustsson.net).
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the NetBSD
+ * Foundation, Inc. and its contributors.
+ * 4. Neither the name of The NetBSD Foundation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
+ * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _EHCI_HH_
+#define _EHCI_HH_
+
+#define EHCI_MAX_DEVICES MIN(USB_MAX_DEVICES, 128)
+
+/*
+ * Alignment NOTE: structures must be aligned so that the hardware can index
+ * without performing addition.
+ */
+#define EHCI_FRAMELIST_ALIGN 0x1000 /* bytes */
+#define EHCI_FRAMELIST_COUNT 1024 /* units */
+#define EHCI_VIRTUAL_FRAMELIST_COUNT 128 /* units */
+
+#if ((8*EHCI_VIRTUAL_FRAMELIST_COUNT) < USB_MAX_HS_ISOC_FRAMES_PER_XFER)
+#error "maximum number of high-speed isochronous frames is higher than supported!"
+#endif
+
+#if (EHCI_VIRTUAL_FRAMELIST_COUNT < USB_MAX_FS_ISOC_FRAMES_PER_XFER)
+#error "maximum number of full-speed isochronous frames is higher than supported!"
+#endif
+
+/* Link types */
+#define EHCI_LINK_TERMINATE 0x00000001
+#define EHCI_LINK_TYPE(x) ((x) & 0x00000006)
+#define EHCI_LINK_ITD 0x0
+#define EHCI_LINK_QH 0x2
+#define EHCI_LINK_SITD 0x4
+#define EHCI_LINK_FSTN 0x6
+#define EHCI_LINK_ADDR(x) ((x) &~ 0x1f)
+
+/* Structures alignment (bytes) */
+#define EHCI_ITD_ALIGN 128
+#define EHCI_SITD_ALIGN 64
+#define EHCI_QTD_ALIGN 64
+#define EHCI_QH_ALIGN 128
+#define EHCI_FSTN_ALIGN 32
+/* Data buffers are divided into one or more pages */
+#define EHCI_PAGE_SIZE 0x1000
+#if ((USB_PAGE_SIZE < EHCI_PAGE_SIZE) || (EHCI_PAGE_SIZE == 0) || \
+ (USB_PAGE_SIZE < EHCI_ITD_ALIGN) || (EHCI_ITD_ALIGN == 0) || \
+ (USB_PAGE_SIZE < EHCI_SITD_ALIGN) || (EHCI_SITD_ALIGN == 0) || \
+ (USB_PAGE_SIZE < EHCI_QTD_ALIGN) || (EHCI_QTD_ALIGN == 0) || \
+ (USB_PAGE_SIZE < EHCI_QH_ALIGN) || (EHCI_QH_ALIGN == 0) || \
+ (USB_PAGE_SIZE < EHCI_FSTN_ALIGN) || (EHCI_FSTN_ALIGN == 0))
+#error "Invalid USB page size!"
+#endif
+
+
+/*
+ * Isochronous Transfer Descriptor. This descriptor is used for high speed
+ * transfers only.
+ */
+struct ehci_itd {
+ volatile uint32_t itd_next;
+ volatile uint32_t itd_status[8];
+#define EHCI_ITD_SET_LEN(x) ((x) << 16)
+#define EHCI_ITD_GET_LEN(x) (((x) >> 16) & 0xFFF)
+#define EHCI_ITD_IOC (1 << 15)
+#define EHCI_ITD_SET_PG(x) ((x) << 12)
+#define EHCI_ITD_GET_PG(x) (((x) >> 12) & 0x7)
+#define EHCI_ITD_SET_OFFS(x) (x)
+#define EHCI_ITD_GET_OFFS(x) (((x) >> 0) & 0xFFF)
+#define EHCI_ITD_ACTIVE (1 << 31)
+#define EHCI_ITD_DATABUFERR (1 << 30)
+#define EHCI_ITD_BABBLE (1 << 29)
+#define EHCI_ITD_XACTERR (1 << 28)
+ volatile uint32_t itd_bp[7];
+ /* itd_bp[0] */
+#define EHCI_ITD_SET_ADDR(x) (x)
+#define EHCI_ITD_GET_ADDR(x) (((x) >> 0) & 0x7F)
+#define EHCI_ITD_SET_ENDPT(x) ((x) << 8)
+#define EHCI_ITD_GET_ENDPT(x) (((x) >> 8) & 0xF)
+ /* itd_bp[1] */
+#define EHCI_ITD_SET_DIR_IN (1 << 11)
+#define EHCI_ITD_SET_DIR_OUT (0 << 11)
+#define EHCI_ITD_SET_MPL(x) (x)
+#define EHCI_ITD_GET_MPL(x) (((x) >> 0) & 0x7FF)
+ volatile uint32_t itd_bp_hi[7];
+/*
+ * Extra information needed:
+ */
+ uint32_t itd_self;
+ struct ehci_itd *next;
+ struct ehci_itd *prev;
+ struct ehci_itd *obj_next;
+ struct usb_page_cache *page_cache;
+} __aligned(EHCI_ITD_ALIGN);
+
+typedef struct ehci_itd ehci_itd_t;
+
+/*
+ * Split Transaction Isochronous Transfer Descriptor. This descriptor is used
+ * for full speed transfers only.
+ */
+struct ehci_sitd {
+ volatile uint32_t sitd_next;
+ volatile uint32_t sitd_portaddr;
+#define EHCI_SITD_SET_DIR_OUT (0 << 31)
+#define EHCI_SITD_SET_DIR_IN (1 << 31)
+#define EHCI_SITD_SET_ADDR(x) (x)
+#define EHCI_SITD_GET_ADDR(x) ((x) & 0x7F)
+#define EHCI_SITD_SET_ENDPT(x) ((x) << 8)
+#define EHCI_SITD_GET_ENDPT(x) (((x) >> 8) & 0xF)
+#define EHCI_SITD_GET_DIR(x) ((x) >> 31)
+#define EHCI_SITD_SET_PORT(x) ((x) << 24)
+#define EHCI_SITD_GET_PORT(x) (((x) >> 24) & 0x7F)
+#define EHCI_SITD_SET_HUBA(x) ((x) << 16)
+#define EHCI_SITD_GET_HUBA(x) (((x) >> 16) & 0x7F)
+ volatile uint32_t sitd_mask;
+#define EHCI_SITD_SET_SMASK(x) (x)
+#define EHCI_SITD_SET_CMASK(x) ((x) << 8)
+ volatile uint32_t sitd_status;
+#define EHCI_SITD_COMPLETE_SPLIT (1<<1)
+#define EHCI_SITD_START_SPLIT (0<<1)
+#define EHCI_SITD_MISSED_MICRO_FRAME (1<<2)
+#define EHCI_SITD_XACTERR (1<<3)
+#define EHCI_SITD_BABBLE (1<<4)
+#define EHCI_SITD_DATABUFERR (1<<5)
+#define EHCI_SITD_ERROR (1<<6)
+#define EHCI_SITD_ACTIVE (1<<7)
+#define EHCI_SITD_IOC (1<<31)
+#define EHCI_SITD_SET_LEN(len) ((len)<<16)
+#define EHCI_SITD_GET_LEN(x) (((x)>>16) & 0x3FF)
+ volatile uint32_t sitd_bp[2];
+ volatile uint32_t sitd_back;
+ volatile uint32_t sitd_bp_hi[2];
+/*
+ * Extra information needed:
+ */
+ uint32_t sitd_self;
+ struct ehci_sitd *next;
+ struct ehci_sitd *prev;
+ struct ehci_sitd *obj_next;
+ struct usb_page_cache *page_cache;
+} __aligned(EHCI_SITD_ALIGN);
+
+typedef struct ehci_sitd ehci_sitd_t;
+
+/* Queue Element Transfer Descriptor */
+struct ehci_qtd {
+ volatile uint32_t qtd_next;
+ volatile uint32_t qtd_altnext;
+ volatile uint32_t qtd_status;
+#define EHCI_QTD_GET_STATUS(x) (((x) >> 0) & 0xff)
+#define EHCI_QTD_SET_STATUS(x) ((x) << 0)
+#define EHCI_QTD_ACTIVE 0x80
+#define EHCI_QTD_HALTED 0x40
+#define EHCI_QTD_BUFERR 0x20
+#define EHCI_QTD_BABBLE 0x10
+#define EHCI_QTD_XACTERR 0x08
+#define EHCI_QTD_MISSEDMICRO 0x04
+#define EHCI_QTD_SPLITXSTATE 0x02
+#define EHCI_QTD_PINGSTATE 0x01
+#define EHCI_QTD_STATERRS 0x74
+#define EHCI_QTD_GET_PID(x) (((x) >> 8) & 0x3)
+#define EHCI_QTD_SET_PID(x) ((x) << 8)
+#define EHCI_QTD_PID_OUT 0x0
+#define EHCI_QTD_PID_IN 0x1
+#define EHCI_QTD_PID_SETUP 0x2
+#define EHCI_QTD_GET_CERR(x) (((x) >> 10) & 0x3)
+#define EHCI_QTD_SET_CERR(x) ((x) << 10)
+#define EHCI_QTD_GET_C_PAGE(x) (((x) >> 12) & 0x7)
+#define EHCI_QTD_SET_C_PAGE(x) ((x) << 12)
+#define EHCI_QTD_GET_IOC(x) (((x) >> 15) & 0x1)
+#define EHCI_QTD_IOC 0x00008000
+#define EHCI_QTD_GET_BYTES(x) (((x) >> 16) & 0x7fff)
+#define EHCI_QTD_SET_BYTES(x) ((x) << 16)
+#define EHCI_QTD_GET_TOGGLE(x) (((x) >> 31) & 0x1)
+#define EHCI_QTD_SET_TOGGLE(x) ((x) << 31)
+#define EHCI_QTD_TOGGLE_MASK 0x80000000
+#define EHCI_QTD_NBUFFERS 5
+#define EHCI_QTD_PAYLOAD_MAX ((EHCI_QTD_NBUFFERS-1)*EHCI_PAGE_SIZE)
+ volatile uint32_t qtd_buffer[EHCI_QTD_NBUFFERS];
+ volatile uint32_t qtd_buffer_hi[EHCI_QTD_NBUFFERS];
+/*
+ * Extra information needed:
+ */
+ struct ehci_qtd *alt_next;
+ struct ehci_qtd *obj_next;
+ struct usb_page_cache *page_cache;
+ uint32_t qtd_self;
+ uint16_t len;
+} __aligned(EHCI_QTD_ALIGN);
+
+typedef struct ehci_qtd ehci_qtd_t;
+
+/* Queue Head Sub Structure */
+struct ehci_qh_sub {
+ volatile uint32_t qtd_next;
+ volatile uint32_t qtd_altnext;
+ volatile uint32_t qtd_status;
+ volatile uint32_t qtd_buffer[EHCI_QTD_NBUFFERS];
+ volatile uint32_t qtd_buffer_hi[EHCI_QTD_NBUFFERS];
+} __aligned(4);
+
+/* Queue Head */
+struct ehci_qh {
+ volatile uint32_t qh_link;
+ volatile uint32_t qh_endp;
+#define EHCI_QH_GET_ADDR(x) (((x) >> 0) & 0x7f) /* endpoint addr */
+#define EHCI_QH_SET_ADDR(x) (x)
+#define EHCI_QH_ADDRMASK 0x0000007f
+#define EHCI_QH_GET_INACT(x) (((x) >> 7) & 0x01) /* inactivate on next */
+#define EHCI_QH_INACT 0x00000080
+#define EHCI_QH_GET_ENDPT(x) (((x) >> 8) & 0x0f) /* endpoint no */
+#define EHCI_QH_SET_ENDPT(x) ((x) << 8)
+#define EHCI_QH_GET_EPS(x) (((x) >> 12) & 0x03) /* endpoint speed */
+#define EHCI_QH_SET_EPS(x) ((x) << 12)
+#define EHCI_QH_SPEED_FULL 0x0
+#define EHCI_QH_SPEED_LOW 0x1
+#define EHCI_QH_SPEED_HIGH 0x2
+#define EHCI_QH_GET_DTC(x) (((x) >> 14) & 0x01) /* data toggle control */
+#define EHCI_QH_DTC 0x00004000
+#define EHCI_QH_GET_HRECL(x) (((x) >> 15) & 0x01) /* head of reclamation */
+#define EHCI_QH_HRECL 0x00008000
+#define EHCI_QH_GET_MPL(x) (((x) >> 16) & 0x7ff) /* max packet len */
+#define EHCI_QH_SET_MPL(x) ((x) << 16)
+#define EHCI_QH_MPLMASK 0x07ff0000
+#define EHCI_QH_GET_CTL(x) (((x) >> 27) & 0x01) /* control endpoint */
+#define EHCI_QH_CTL 0x08000000
+#define EHCI_QH_GET_NRL(x) (((x) >> 28) & 0x0f) /* NAK reload */
+#define EHCI_QH_SET_NRL(x) ((x) << 28)
+ volatile uint32_t qh_endphub;
+#define EHCI_QH_GET_SMASK(x) (((x) >> 0) & 0xff) /* intr sched mask */
+#define EHCI_QH_SET_SMASK(x) ((x) << 0)
+#define EHCI_QH_GET_CMASK(x) (((x) >> 8) & 0xff) /* split completion mask */
+#define EHCI_QH_SET_CMASK(x) ((x) << 8)
+#define EHCI_QH_GET_HUBA(x) (((x) >> 16) & 0x7f) /* hub address */
+#define EHCI_QH_SET_HUBA(x) ((x) << 16)
+#define EHCI_QH_GET_PORT(x) (((x) >> 23) & 0x7f) /* hub port */
+#define EHCI_QH_SET_PORT(x) ((x) << 23)
+#define EHCI_QH_GET_MULT(x) (((x) >> 30) & 0x03) /* pipe multiplier */
+#define EHCI_QH_SET_MULT(x) ((x) << 30)
+ volatile uint32_t qh_curqtd;
+ struct ehci_qh_sub qh_qtd;
+/*
+ * Extra information needed:
+ */
+ struct ehci_qh *next;
+ struct ehci_qh *prev;
+ struct ehci_qh *obj_next;
+ struct usb_page_cache *page_cache;
+ uint32_t qh_self;
+} __aligned(EHCI_QH_ALIGN);
+
+typedef struct ehci_qh ehci_qh_t;
+
+/* Periodic Frame Span Traversal Node */
+struct ehci_fstn {
+ volatile uint32_t fstn_link;
+ volatile uint32_t fstn_back;
+} __aligned(EHCI_FSTN_ALIGN);
+
+typedef struct ehci_fstn ehci_fstn_t;
+
+struct ehci_hw_softc {
+ struct usb_page_cache pframes_pc;
+ struct usb_page_cache terminate_pc;
+ struct usb_page_cache async_start_pc;
+ struct usb_page_cache intr_start_pc[EHCI_VIRTUAL_FRAMELIST_COUNT];
+ struct usb_page_cache isoc_hs_start_pc[EHCI_VIRTUAL_FRAMELIST_COUNT];
+ struct usb_page_cache isoc_fs_start_pc[EHCI_VIRTUAL_FRAMELIST_COUNT];
+
+ struct usb_page pframes_pg;
+ struct usb_page terminate_pg;
+ struct usb_page async_start_pg;
+ struct usb_page intr_start_pg[EHCI_VIRTUAL_FRAMELIST_COUNT];
+ struct usb_page isoc_hs_start_pg[EHCI_VIRTUAL_FRAMELIST_COUNT];
+ struct usb_page isoc_fs_start_pg[EHCI_VIRTUAL_FRAMELIST_COUNT];
+};
+
+struct ehci_config_desc {
+ struct usb_config_descriptor confd;
+ struct usb_interface_descriptor ifcd;
+ struct usb_endpoint_descriptor endpd;
+} __packed;
+
+union ehci_hub_desc {
+ struct usb_status stat;
+ struct usb_port_status ps;
+ struct usb_hub_descriptor hubd;
+ uint8_t temp[128];
+};
+
+typedef struct ehci_softc {
+ struct ehci_hw_softc sc_hw;
+ struct usb_bus sc_bus; /* base device */
+ struct usb_callout sc_tmo_pcd;
+ struct usb_callout sc_tmo_poll;
+ union ehci_hub_desc sc_hub_desc;
+
+ struct usb_device *sc_devices[EHCI_MAX_DEVICES];
+ struct resource *sc_io_res;
+ struct resource *sc_irq_res;
+ struct ehci_qh *sc_async_p_last;
+ struct ehci_qh *sc_intr_p_last[EHCI_VIRTUAL_FRAMELIST_COUNT];
+ struct ehci_sitd *sc_isoc_fs_p_last[EHCI_VIRTUAL_FRAMELIST_COUNT];
+ struct ehci_itd *sc_isoc_hs_p_last[EHCI_VIRTUAL_FRAMELIST_COUNT];
+ void *sc_intr_hdl;
+ bus_size_t sc_io_size;
+ bus_space_tag_t sc_io_tag;
+ bus_space_handle_t sc_io_hdl;
+
+ uint32_t sc_terminate_self; /* TD short packet termination pointer */
+ uint32_t sc_eintrs;
+ uint32_t sc_cmd; /* shadow of cmd register during
+ * suspend */
+
+ uint16_t sc_intr_stat[EHCI_VIRTUAL_FRAMELIST_COUNT];
+ uint16_t sc_id_vendor; /* vendor ID for root hub */
+ uint16_t sc_flags; /* chip specific flags */
+#define EHCI_SCFLG_SETMODE 0x0001 /* set bridge mode again after init */
+#define EHCI_SCFLG_FORCESPEED 0x0002 /* force speed */
+#define EHCI_SCFLG_NORESTERM 0x0004 /* don't terminate reset sequence */
+#define EHCI_SCFLG_BIGEDESC 0x0008 /* big-endian byte order descriptors */
+#define EHCI_SCFLG_BIGEMMIO 0x0010 /* big-endian byte order MMIO */
+#define EHCI_SCFLG_TT 0x0020 /* transaction translator present */
+#define EHCI_SCFLG_LOSTINTRBUG 0x0040 /* workaround for VIA / ATI chipsets */
+#define EHCI_SCFLG_IAADBUG 0x0080 /* workaround for nVidia chipsets */
+
+ uint8_t sc_offs; /* offset to operational registers */
+ uint8_t sc_doorbell_disable; /* set on doorbell failure */
+ uint8_t sc_noport;
+ uint8_t sc_addr; /* device address */
+ uint8_t sc_conf; /* device configuration */
+ uint8_t sc_isreset;
+ uint8_t sc_hub_idata[8];
+
+ char sc_vendor[16]; /* vendor string for root hub */
+
+} ehci_softc_t;
+
+#define EREAD1(sc, a) bus_space_read_1((sc)->sc_io_tag, (sc)->sc_io_hdl, (a))
+#ifndef __rtems__
+#define EREAD2(sc, a) bus_space_read_2((sc)->sc_io_tag, (sc)->sc_io_hdl, (a))
+#define EREAD4(sc, a) bus_space_read_4((sc)->sc_io_tag, (sc)->sc_io_hdl, (a))
+#else /* __rtems__ */
+#define EREAD2(sc, a) le16toh(bus_space_read_2((sc)->sc_io_tag, (sc)->sc_io_hdl, (a)))
+#define EREAD4(sc, a) le32toh(bus_space_read_4((sc)->sc_io_tag, (sc)->sc_io_hdl, (a)))
+#endif /* __rtems__ */
+#define EWRITE1(sc, a, x) \
+ bus_space_write_1((sc)->sc_io_tag, (sc)->sc_io_hdl, (a), (x))
+#ifndef __rtems__
+#define EWRITE2(sc, a, x) \
+ bus_space_write_2((sc)->sc_io_tag, (sc)->sc_io_hdl, (a), (x))
+#define EWRITE4(sc, a, x) \
+ bus_space_write_4((sc)->sc_io_tag, (sc)->sc_io_hdl, (a), (x))
+#else /* __rtems__ */
+#define EWRITE2(sc, a, x) \
+ bus_space_write_2((sc)->sc_io_tag, (sc)->sc_io_hdl, (a), htole16(x))
+#define EWRITE4(sc, a, x) \
+ bus_space_write_4((sc)->sc_io_tag, (sc)->sc_io_hdl, (a), htole32(x))
+#endif /* __rtems__ */
+#define EOREAD1(sc, a) \
+ bus_space_read_1((sc)->sc_io_tag, (sc)->sc_io_hdl, (sc)->sc_offs+(a))
+#ifndef __rtems__
+#define EOREAD2(sc, a) \
+ bus_space_read_2((sc)->sc_io_tag, (sc)->sc_io_hdl, (sc)->sc_offs+(a))
+#define EOREAD4(sc, a) \
+ bus_space_read_4((sc)->sc_io_tag, (sc)->sc_io_hdl, (sc)->sc_offs+(a))
+#else /* __rtems__ */
+#define EOREAD2(sc, a) \
+ le16toh(bus_space_read_2((sc)->sc_io_tag, (sc)->sc_io_hdl, (sc)->sc_offs+(a)))
+#define EOREAD4(sc, a) \
+ le32toh(bus_space_read_4((sc)->sc_io_tag, (sc)->sc_io_hdl, (sc)->sc_offs+(a)))
+#endif /* __rtems__ */
+#define EOWRITE1(sc, a, x) \
+ bus_space_write_1((sc)->sc_io_tag, (sc)->sc_io_hdl, (sc)->sc_offs+(a), (x))
+#ifndef __rtems__
+#define EOWRITE2(sc, a, x) \
+ bus_space_write_2((sc)->sc_io_tag, (sc)->sc_io_hdl, (sc)->sc_offs+(a), (x))
+#define EOWRITE4(sc, a, x) \
+ bus_space_write_4((sc)->sc_io_tag, (sc)->sc_io_hdl, (sc)->sc_offs+(a), (x))
+#else /* __rtems__ */
+#define EOWRITE2(sc, a, x) \
+ bus_space_write_2((sc)->sc_io_tag, (sc)->sc_io_hdl, (sc)->sc_offs+(a), htole16(x))
+#define EOWRITE4(sc, a, x) \
+ bus_space_write_4((sc)->sc_io_tag, (sc)->sc_io_hdl, (sc)->sc_offs+(a), htole32(x))
+#endif /* __rtems__ */
+
+#ifdef USB_EHCI_BIG_ENDIAN_DESC
+/*
+ * Handle byte order conversion between host and ``host controller''.
+ * Typically the latter is little-endian but some controllers require
+ * big-endian in which case we may need to manually swap.
+ */
+static __inline uint32_t
+htohc32(const struct ehci_softc *sc, const uint32_t v)
+{
+ return sc->sc_flags & EHCI_SCFLG_BIGEDESC ? htobe32(v) : htole32(v);
+}
+
+static __inline uint16_t
+htohc16(const struct ehci_softc *sc, const uint16_t v)
+{
+ return sc->sc_flags & EHCI_SCFLG_BIGEDESC ? htobe16(v) : htole16(v);
+}
+
+static __inline uint32_t
+hc32toh(const struct ehci_softc *sc, const uint32_t v)
+{
+ return sc->sc_flags & EHCI_SCFLG_BIGEDESC ? be32toh(v) : le32toh(v);
+}
+
+static __inline uint16_t
+hc16toh(const struct ehci_softc *sc, const uint16_t v)
+{
+ return sc->sc_flags & EHCI_SCFLG_BIGEDESC ? be16toh(v) : le16toh(v);
+}
+#else
+/*
+ * Normal little-endian only conversion routines.
+ */
+static __inline uint32_t
+htohc32(const struct ehci_softc *sc, const uint32_t v)
+{
+ return htole32(v);
+}
+
+static __inline uint16_t
+htohc16(const struct ehci_softc *sc, const uint16_t v)
+{
+ return htole16(v);
+}
+
+static __inline uint32_t
+hc32toh(const struct ehci_softc *sc, const uint32_t v)
+{
+ return le32toh(v);
+}
+
+static __inline uint16_t
+hc16toh(const struct ehci_softc *sc, const uint16_t v)
+{
+ return le16toh(v);
+}
+#endif
+
+usb_bus_mem_cb_t ehci_iterate_hw_softc;
+
+usb_error_t ehci_reset(ehci_softc_t *sc);
+usb_error_t ehci_init(ehci_softc_t *sc);
+void ehci_detach(struct ehci_softc *sc);
+void ehci_suspend(struct ehci_softc *sc);
+void ehci_resume(struct ehci_softc *sc);
+void ehci_shutdown(ehci_softc_t *sc);
+void ehci_interrupt(ehci_softc_t *sc);
+
+#endif /* _EHCI_HH_ */
diff --git a/rtems/freebsd/dev/usb/controller/ehcireg.h b/rtems/freebsd/dev/usb/controller/ehcireg.h
new file mode 100644
index 00000000..7677dfad
--- /dev/null
+++ b/rtems/freebsd/dev/usb/controller/ehcireg.h
@@ -0,0 +1,171 @@
+/* $FreeBSD$ */
+/*-
+ * Copyright (c) 2001 The NetBSD Foundation, Inc.
+ * All rights reserved.
+ *
+ * This code is derived from software contributed to The NetBSD Foundation
+ * by Lennart Augustsson (lennart@augustsson.net).
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
+ * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _EHCIREG_HH_
+#define _EHCIREG_HH_
+
+/* PCI config registers */
+#define PCI_CBMEM 0x10 /* configuration base MEM */
+#define PCI_INTERFACE_EHCI 0x20
+#define PCI_USBREV 0x60 /* RO USB protocol revision */
+#define PCI_USB_REV_MASK 0xff
+#define PCI_USB_REV_PRE_1_0 0x00
+#define PCI_USB_REV_1_0 0x10
+#define PCI_USB_REV_1_1 0x11
+#define PCI_USB_REV_2_0 0x20
+#define PCI_EHCI_FLADJ 0x61 /* RW Frame len adj, SOF=59488+6*fladj */
+#define PCI_EHCI_PORTWAKECAP 0x62 /* RW Port wake caps (opt) */
+
+/* EHCI Extended Capabilities */
+#define EHCI_EC_LEGSUP 0x01
+#define EHCI_EECP_NEXT(x) (((x) >> 8) & 0xff)
+#define EHCI_EECP_ID(x) ((x) & 0xff)
+
+/* Legacy support extended capability */
+#define EHCI_LEGSUP_BIOS_SEM 0x02
+#define EHCI_LEGSUP_OS_SEM 0x03
+#define EHCI_LEGSUP_USBLEGCTLSTS 0x04
+
+/* EHCI capability registers */
+#define EHCI_CAPLEN_HCIVERSION 0x00 /* RO Capability register length
+ * (least-significant byte) and
+ * interface version number (two
+ * most significant)
+ */
+#define EHCI_CAPLENGTH(x) ((x) & 0xff)
+#define EHCI_HCIVERSION(x) (((x) >> 16) & 0xffff)
+#define EHCI_HCSPARAMS 0x04 /* RO Structural parameters */
+#define EHCI_HCS_DEBUGPORT(x) (((x) >> 20) & 0xf)
+#define EHCI_HCS_P_INDICATOR(x) ((x) & 0x10000)
+#define EHCI_HCS_N_CC(x) (((x) >> 12) & 0xf) /* # of companion ctlrs */
+#define EHCI_HCS_N_PCC(x) (((x) >> 8) & 0xf) /* # of ports per comp. */
+#define EHCI_HCS_PPC(x) ((x) & 0x10) /* port power control */
+#define EHCI_HCS_N_PORTS(x) ((x) & 0xf) /* # of ports */
+#define EHCI_HCCPARAMS 0x08 /* RO Capability parameters */
+#define EHCI_HCC_EECP(x) (((x) >> 8) & 0xff) /* extended ports caps */
+#define EHCI_HCC_IST(x) (((x) >> 4) & 0xf) /* isoc sched threshold */
+#define EHCI_HCC_ASPC(x) ((x) & 0x4) /* async sched park cap */
+#define EHCI_HCC_PFLF(x) ((x) & 0x2) /* prog frame list flag */
+#define EHCI_HCC_64BIT(x) ((x) & 0x1) /* 64 bit address cap */
+#define EHCI_HCSP_PORTROUTE 0x0c /* RO Companion port route description */
+
+/* EHCI operational registers. Offset given by EHCI_CAPLENGTH register */
+#define EHCI_USBCMD 0x00 /* RO, RW, WO Command register */
+#define EHCI_CMD_ITC_M 0x00ff0000 /* RW interrupt threshold ctrl */
+#define EHCI_CMD_ITC_1 0x00010000
+#define EHCI_CMD_ITC_2 0x00020000
+#define EHCI_CMD_ITC_4 0x00040000
+#define EHCI_CMD_ITC_8 0x00080000
+#define EHCI_CMD_ITC_16 0x00100000
+#define EHCI_CMD_ITC_32 0x00200000
+#define EHCI_CMD_ITC_64 0x00400000
+#define EHCI_CMD_ASPME 0x00000800 /* RW/RO async park enable */
+#define EHCI_CMD_ASPMC 0x00000300 /* RW/RO async park count */
+#define EHCI_CMD_LHCR 0x00000080 /* RW light host ctrl reset */
+#define EHCI_CMD_IAAD 0x00000040 /* RW intr on async adv door
+ * bell */
+#define EHCI_CMD_ASE 0x00000020 /* RW async sched enable */
+#define EHCI_CMD_PSE 0x00000010 /* RW periodic sched enable */
+#define EHCI_CMD_FLS_M 0x0000000c /* RW/RO frame list size */
+#define EHCI_CMD_FLS(x) (((x) >> 2) & 3) /* RW/RO frame list size */
+#define EHCI_CMD_HCRESET 0x00000002 /* RW reset */
+#define EHCI_CMD_RS 0x00000001 /* RW run/stop */
+#define EHCI_USBSTS 0x04 /* RO, RW, RWC Status register */
+#define EHCI_STS_ASS 0x00008000 /* RO async sched status */
+#define EHCI_STS_PSS 0x00004000 /* RO periodic sched status */
+#define EHCI_STS_REC 0x00002000 /* RO reclamation */
+#define EHCI_STS_HCH 0x00001000 /* RO host controller halted */
+#define EHCI_STS_IAA 0x00000020 /* RWC interrupt on async adv */
+#define EHCI_STS_HSE 0x00000010 /* RWC host system error */
+#define EHCI_STS_FLR 0x00000008 /* RWC frame list rollover */
+#define EHCI_STS_PCD 0x00000004 /* RWC port change detect */
+#define EHCI_STS_ERRINT 0x00000002 /* RWC error interrupt */
+#define EHCI_STS_INT 0x00000001 /* RWC interrupt */
+#define EHCI_STS_INTRS(x) ((x) & 0x3f)
+
+/*
+ * NOTE: the doorbell interrupt is enabled, but the doorbell is never
+ * used! SiS chipsets require this.
+ */
+#define EHCI_NORMAL_INTRS (EHCI_STS_IAA | EHCI_STS_HSE | \
+ EHCI_STS_PCD | EHCI_STS_ERRINT | EHCI_STS_INT)
+
+#define EHCI_USBINTR 0x08 /* RW Interrupt register */
+#define EHCI_INTR_IAAE 0x00000020 /* interrupt on async advance
+ * ena */
+#define EHCI_INTR_HSEE 0x00000010 /* host system error ena */
+#define EHCI_INTR_FLRE 0x00000008 /* frame list rollover ena */
+#define EHCI_INTR_PCIE 0x00000004 /* port change ena */
+#define EHCI_INTR_UEIE 0x00000002 /* USB error intr ena */
+#define EHCI_INTR_UIE 0x00000001 /* USB intr ena */
+
+#define EHCI_FRINDEX 0x0c /* RW Frame Index register */
+
+#define EHCI_CTRLDSSEGMENT 0x10 /* RW Control Data Structure Segment */
+
+#define EHCI_PERIODICLISTBASE 0x14 /* RW Periodic List Base */
+#define EHCI_ASYNCLISTADDR 0x18 /* RW Async List Base */
+
+#define EHCI_CONFIGFLAG 0x40 /* RW Configure Flag register */
+#define EHCI_CONF_CF 0x00000001 /* RW configure flag */
+
+#define EHCI_PORTSC(n) (0x40+(4*(n))) /* RO, RW, RWC Port Status reg */
+#define EHCI_PS_WKOC_E 0x00400000 /* RW wake on over current ena */
+#define EHCI_PS_WKDSCNNT_E 0x00200000 /* RW wake on disconnect ena */
+#define EHCI_PS_WKCNNT_E 0x00100000 /* RW wake on connect ena */
+#define EHCI_PS_PTC 0x000f0000 /* RW port test control */
+#define EHCI_PS_PIC 0x0000c000 /* RW port indicator control */
+#define EHCI_PS_PO 0x00002000 /* RW port owner */
+#define EHCI_PS_PP 0x00001000 /* RW,RO port power */
+#define EHCI_PS_LS 0x00000c00 /* RO line status */
+#define EHCI_PS_IS_LOWSPEED(x) (((x) & EHCI_PS_LS) == 0x00000400)
+#define EHCI_PS_PR 0x00000100 /* RW port reset */
+#define EHCI_PS_SUSP 0x00000080 /* RW suspend */
+#define EHCI_PS_FPR 0x00000040 /* RW force port resume */
+#define EHCI_PS_OCC 0x00000020 /* RWC over current change */
+#define EHCI_PS_OCA 0x00000010 /* RO over current active */
+#define EHCI_PS_PEC 0x00000008 /* RWC port enable change */
+#define EHCI_PS_PE 0x00000004 /* RW port enable */
+#define EHCI_PS_CSC 0x00000002 /* RWC connect status change */
+#define EHCI_PS_CS 0x00000001 /* RO connect status */
+#define EHCI_PS_CLEAR (EHCI_PS_OCC | EHCI_PS_PEC | EHCI_PS_CSC)
+
+#define EHCI_USBMODE 0x68 /* RW USB Device mode register */
+#define EHCI_UM_CM 0x00000003 /* R/WO Controller Mode */
+#define EHCI_UM_CM_IDLE 0x0 /* Idle */
+#define EHCI_UM_CM_HOST 0x3 /* Host Controller */
+#define EHCI_UM_ES 0x00000004 /* R/WO Endian Select */
+#define EHCI_UM_ES_LE 0x0 /* Little-endian byte alignment */
+#define EHCI_UM_ES_BE 0x4 /* Big-endian byte alignment */
+#define EHCI_UM_SDIS 0x00000010 /* R/WO Stream Disable Mode */
+
+#define EHCI_PORT_RESET_COMPLETE 2 /* ms */
+
+#endif /* _EHCIREG_HH_ */
diff --git a/rtems/freebsd/dev/usb/controller/ohci.c b/rtems/freebsd/dev/usb/controller/ohci.c
new file mode 100644
index 00000000..20950243
--- /dev/null
+++ b/rtems/freebsd/dev/usb/controller/ohci.c
@@ -0,0 +1,2764 @@
+#include <rtems/freebsd/machine/rtems-bsd-config.h>
+
+/*-
+ * Copyright (c) 2008 Hans Petter Selasky. All rights reserved.
+ * Copyright (c) 1998 The NetBSD Foundation, Inc. All rights reserved.
+ * Copyright (c) 1998 Lennart Augustsson. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <rtems/freebsd/sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+/*
+ * USB Open Host Controller driver.
+ *
+ * OHCI spec: http://www.compaq.com/productinfo/development/openhci.html
+ * USB spec: http://www.usb.org/developers/docs/usbspec.zip
+ */
+
+#include <rtems/freebsd/sys/stdint.h>
+#include <rtems/freebsd/sys/stddef.h>
+#include <rtems/freebsd/sys/param.h>
+#include <rtems/freebsd/sys/queue.h>
+#include <rtems/freebsd/sys/types.h>
+#include <rtems/freebsd/sys/systm.h>
+#include <rtems/freebsd/sys/kernel.h>
+#include <rtems/freebsd/sys/bus.h>
+#include <rtems/freebsd/sys/linker_set.h>
+#include <rtems/freebsd/sys/module.h>
+#include <rtems/freebsd/sys/lock.h>
+#include <rtems/freebsd/sys/mutex.h>
+#include <rtems/freebsd/sys/condvar.h>
+#include <rtems/freebsd/sys/sysctl.h>
+#include <rtems/freebsd/sys/sx.h>
+#include <rtems/freebsd/sys/unistd.h>
+#include <rtems/freebsd/sys/callout.h>
+#include <rtems/freebsd/sys/malloc.h>
+#include <rtems/freebsd/sys/priv.h>
+
+#include <rtems/freebsd/dev/usb/usb.h>
+#include <rtems/freebsd/dev/usb/usbdi.h>
+
+#define USB_DEBUG_VAR ohcidebug
+
+#include <rtems/freebsd/dev/usb/usb_core.h>
+#include <rtems/freebsd/dev/usb/usb_debug.h>
+#include <rtems/freebsd/dev/usb/usb_busdma.h>
+#include <rtems/freebsd/dev/usb/usb_process.h>
+#include <rtems/freebsd/dev/usb/usb_transfer.h>
+#include <rtems/freebsd/dev/usb/usb_device.h>
+#include <rtems/freebsd/dev/usb/usb_hub.h>
+#include <rtems/freebsd/dev/usb/usb_util.h>
+
+#include <rtems/freebsd/dev/usb/usb_controller.h>
+#include <rtems/freebsd/dev/usb/usb_bus.h>
+#include <rtems/freebsd/dev/usb/controller/ohci.h>
+#include <rtems/freebsd/dev/usb/controller/ohcireg.h>
+
+#define OHCI_BUS2SC(bus) \
+ ((ohci_softc_t *)(((uint8_t *)(bus)) - \
+ ((uint8_t *)&(((ohci_softc_t *)0)->sc_bus))))
+
+#ifdef USB_DEBUG
+static int ohcidebug = 0;
+
+SYSCTL_NODE(_hw_usb, OID_AUTO, ohci, CTLFLAG_RW, 0, "USB ohci");
+SYSCTL_INT(_hw_usb_ohci, OID_AUTO, debug, CTLFLAG_RW,
+ &ohcidebug, 0, "ohci debug level");
+
+TUNABLE_INT("hw.usb.ohci.debug", &ohcidebug);
+
+static void ohci_dumpregs(ohci_softc_t *);
+static void ohci_dump_tds(ohci_td_t *);
+static uint8_t ohci_dump_td(ohci_td_t *);
+static void ohci_dump_ed(ohci_ed_t *);
+static uint8_t ohci_dump_itd(ohci_itd_t *);
+static void ohci_dump_itds(ohci_itd_t *);
+
+#endif
+
+#define OBARR(sc) bus_space_barrier((sc)->sc_io_tag, (sc)->sc_io_hdl, 0, (sc)->sc_io_size, \
+ BUS_SPACE_BARRIER_READ|BUS_SPACE_BARRIER_WRITE)
+#define OWRITE1(sc, r, x) \
+ do { OBARR(sc); bus_space_write_1((sc)->sc_io_tag, (sc)->sc_io_hdl, (r), (x)); } while (0)
+#define OWRITE2(sc, r, x) \
+ do { OBARR(sc); bus_space_write_2((sc)->sc_io_tag, (sc)->sc_io_hdl, (r), (x)); } while (0)
+#define OWRITE4(sc, r, x) \
+ do { OBARR(sc); bus_space_write_4((sc)->sc_io_tag, (sc)->sc_io_hdl, (r), (x)); } while (0)
+#define OREAD1(sc, r) (OBARR(sc), bus_space_read_1((sc)->sc_io_tag, (sc)->sc_io_hdl, (r)))
+#define OREAD2(sc, r) (OBARR(sc), bus_space_read_2((sc)->sc_io_tag, (sc)->sc_io_hdl, (r)))
+#define OREAD4(sc, r) (OBARR(sc), bus_space_read_4((sc)->sc_io_tag, (sc)->sc_io_hdl, (r)))
+
+#define OHCI_INTR_ENDPT 1
+
+extern struct usb_bus_methods ohci_bus_methods;
+extern struct usb_pipe_methods ohci_device_bulk_methods;
+extern struct usb_pipe_methods ohci_device_ctrl_methods;
+extern struct usb_pipe_methods ohci_device_intr_methods;
+extern struct usb_pipe_methods ohci_device_isoc_methods;
+
+static void ohci_do_poll(struct usb_bus *bus);
+static void ohci_device_done(struct usb_xfer *xfer, usb_error_t error);
+static void ohci_timeout(void *arg);
+static uint8_t ohci_check_transfer(struct usb_xfer *xfer);
+static void ohci_root_intr(ohci_softc_t *sc);
+
+struct ohci_std_temp {
+ struct usb_page_cache *pc;
+ ohci_td_t *td;
+ ohci_td_t *td_next;
+ uint32_t average;
+ uint32_t td_flags;
+ uint32_t len;
+ uint16_t max_frame_size;
+ uint8_t shortpkt;
+ uint8_t setup_alt_next;
+ uint8_t last_frame;
+};
+
+static struct ohci_hcca *
+ohci_get_hcca(ohci_softc_t *sc)
+{
+ usb_pc_cpu_invalidate(&sc->sc_hw.hcca_pc);
+ return (sc->sc_hcca_p);
+}
+
+void
+ohci_iterate_hw_softc(struct usb_bus *bus, usb_bus_mem_sub_cb_t *cb)
+{
+ struct ohci_softc *sc = OHCI_BUS2SC(bus);
+ uint32_t i;
+
+ cb(bus, &sc->sc_hw.hcca_pc, &sc->sc_hw.hcca_pg,
+ sizeof(ohci_hcca_t), OHCI_HCCA_ALIGN);
+
+ cb(bus, &sc->sc_hw.ctrl_start_pc, &sc->sc_hw.ctrl_start_pg,
+ sizeof(ohci_ed_t), OHCI_ED_ALIGN);
+
+ cb(bus, &sc->sc_hw.bulk_start_pc, &sc->sc_hw.bulk_start_pg,
+ sizeof(ohci_ed_t), OHCI_ED_ALIGN);
+
+ cb(bus, &sc->sc_hw.isoc_start_pc, &sc->sc_hw.isoc_start_pg,
+ sizeof(ohci_ed_t), OHCI_ED_ALIGN);
+
+ for (i = 0; i != OHCI_NO_EDS; i++) {
+ cb(bus, sc->sc_hw.intr_start_pc + i, sc->sc_hw.intr_start_pg + i,
+ sizeof(ohci_ed_t), OHCI_ED_ALIGN);
+ }
+}
+
+static usb_error_t
+ohci_controller_init(ohci_softc_t *sc)
+{
+ struct usb_page_search buf_res;
+ uint32_t i;
+ uint32_t ctl;
+ uint32_t ival;
+ uint32_t hcr;
+ uint32_t fm;
+ uint32_t per;
+ uint32_t desca;
+
+ /* Determine in what context we are running. */
+ ctl = OREAD4(sc, OHCI_CONTROL);
+ if (ctl & OHCI_IR) {
+ /* SMM active, request change */
+ DPRINTF("SMM active, request owner change\n");
+ OWRITE4(sc, OHCI_COMMAND_STATUS, OHCI_OCR);
+ for (i = 0; (i < 100) && (ctl & OHCI_IR); i++) {
+ usb_pause_mtx(NULL, hz / 1000);
+ ctl = OREAD4(sc, OHCI_CONTROL);
+ }
+ if (ctl & OHCI_IR) {
+ device_printf(sc->sc_bus.bdev,
+ "SMM does not respond, resetting\n");
+ OWRITE4(sc, OHCI_CONTROL, OHCI_HCFS_RESET);
+ goto reset;
+ }
+ } else {
+ DPRINTF("cold started\n");
+reset:
+ /* controller was cold started */
+ usb_pause_mtx(NULL,
+ USB_MS_TO_TICKS(USB_BUS_RESET_DELAY));
+ }
+
+ /*
+ * This reset should not be necessary according to the OHCI spec, but
+ * without it some controllers do not start.
+ */
+ DPRINTF("%s: resetting\n", device_get_nameunit(sc->sc_bus.bdev));
+ OWRITE4(sc, OHCI_CONTROL, OHCI_HCFS_RESET);
+
+ usb_pause_mtx(NULL,
+ USB_MS_TO_TICKS(USB_BUS_RESET_DELAY));
+
+ /* we now own the host controller and the bus has been reset */
+ ival = OHCI_GET_IVAL(OREAD4(sc, OHCI_FM_INTERVAL));
+
+ OWRITE4(sc, OHCI_COMMAND_STATUS, OHCI_HCR); /* Reset HC */
+ /* nominal time for a reset is 10 us */
+ for (i = 0; i < 10; i++) {
+ DELAY(10);
+ hcr = OREAD4(sc, OHCI_COMMAND_STATUS) & OHCI_HCR;
+ if (!hcr) {
+ break;
+ }
+ }
+ if (hcr) {
+ device_printf(sc->sc_bus.bdev, "reset timeout\n");
+ return (USB_ERR_IOERROR);
+ }
+#ifdef USB_DEBUG
+ if (ohcidebug > 15) {
+ ohci_dumpregs(sc);
+ }
+#endif
+
+ /* The controller is now in SUSPEND state, we have 2ms to finish. */
+
+ /* set up HC registers */
+ usbd_get_page(&sc->sc_hw.hcca_pc, 0, &buf_res);
+ OWRITE4(sc, OHCI_HCCA, buf_res.physaddr);
+
+ usbd_get_page(&sc->sc_hw.ctrl_start_pc, 0, &buf_res);
+ OWRITE4(sc, OHCI_CONTROL_HEAD_ED, buf_res.physaddr);
+
+ usbd_get_page(&sc->sc_hw.bulk_start_pc, 0, &buf_res);
+ OWRITE4(sc, OHCI_BULK_HEAD_ED, buf_res.physaddr);
+
+ /* disable all interrupts and then switch on all desired interrupts */
+ OWRITE4(sc, OHCI_INTERRUPT_DISABLE, OHCI_ALL_INTRS);
+ OWRITE4(sc, OHCI_INTERRUPT_ENABLE, sc->sc_eintrs | OHCI_MIE);
+ /* switch on desired functional features */
+ ctl = OREAD4(sc, OHCI_CONTROL);
+ ctl &= ~(OHCI_CBSR_MASK | OHCI_LES | OHCI_HCFS_MASK | OHCI_IR);
+ ctl |= OHCI_PLE | OHCI_IE | OHCI_CLE | OHCI_BLE |
+ OHCI_RATIO_1_4 | OHCI_HCFS_OPERATIONAL;
+ /* And finally start it! */
+ OWRITE4(sc, OHCI_CONTROL, ctl);
+
+ /*
+ * The controller is now OPERATIONAL. Set a some final
+ * registers that should be set earlier, but that the
+ * controller ignores when in the SUSPEND state.
+ */
+ fm = (OREAD4(sc, OHCI_FM_INTERVAL) & OHCI_FIT) ^ OHCI_FIT;
+ fm |= OHCI_FSMPS(ival) | ival;
+ OWRITE4(sc, OHCI_FM_INTERVAL, fm);
+ per = OHCI_PERIODIC(ival); /* 90% periodic */
+ OWRITE4(sc, OHCI_PERIODIC_START, per);
+
+ /* Fiddle the No OverCurrent Protection bit to avoid chip bug. */
+ desca = OREAD4(sc, OHCI_RH_DESCRIPTOR_A);
+ OWRITE4(sc, OHCI_RH_DESCRIPTOR_A, desca | OHCI_NOCP);
+ OWRITE4(sc, OHCI_RH_STATUS, OHCI_LPSC); /* Enable port power */
+ usb_pause_mtx(NULL,
+ USB_MS_TO_TICKS(OHCI_ENABLE_POWER_DELAY));
+ OWRITE4(sc, OHCI_RH_DESCRIPTOR_A, desca);
+
+ /*
+ * The AMD756 requires a delay before re-reading the register,
+ * otherwise it will occasionally report 0 ports.
+ */
+ sc->sc_noport = 0;
+ for (i = 0; (i < 10) && (sc->sc_noport == 0); i++) {
+ usb_pause_mtx(NULL,
+ USB_MS_TO_TICKS(OHCI_READ_DESC_DELAY));
+ sc->sc_noport = OHCI_GET_NDP(OREAD4(sc, OHCI_RH_DESCRIPTOR_A));
+ }
+
+#ifdef USB_DEBUG
+ if (ohcidebug > 5) {
+ ohci_dumpregs(sc);
+ }
+#endif
+ return (USB_ERR_NORMAL_COMPLETION);
+}
+
+static struct ohci_ed *
+ohci_init_ed(struct usb_page_cache *pc)
+{
+ struct usb_page_search buf_res;
+ struct ohci_ed *ed;
+
+ usbd_get_page(pc, 0, &buf_res);
+
+ ed = buf_res.buffer;
+
+ ed->ed_self = htole32(buf_res.physaddr);
+ ed->ed_flags = htole32(OHCI_ED_SKIP);
+ ed->page_cache = pc;
+
+ return (ed);
+}
+
+usb_error_t
+ohci_init(ohci_softc_t *sc)
+{
+ struct usb_page_search buf_res;
+ uint16_t i;
+ uint16_t bit;
+ uint16_t x;
+ uint16_t y;
+
+ DPRINTF("start\n");
+
+ sc->sc_eintrs = OHCI_NORMAL_INTRS;
+
+ /*
+ * Setup all ED's
+ */
+
+ sc->sc_ctrl_p_last =
+ ohci_init_ed(&sc->sc_hw.ctrl_start_pc);
+
+ sc->sc_bulk_p_last =
+ ohci_init_ed(&sc->sc_hw.bulk_start_pc);
+
+ sc->sc_isoc_p_last =
+ ohci_init_ed(&sc->sc_hw.isoc_start_pc);
+
+ for (i = 0; i != OHCI_NO_EDS; i++) {
+ sc->sc_intr_p_last[i] =
+ ohci_init_ed(sc->sc_hw.intr_start_pc + i);
+ }
+
+ /*
+ * the QHs are arranged to give poll intervals that are
+ * powers of 2 times 1ms
+ */
+ bit = OHCI_NO_EDS / 2;
+ while (bit) {
+ x = bit;
+ while (x & bit) {
+ ohci_ed_t *ed_x;
+ ohci_ed_t *ed_y;
+
+ y = (x ^ bit) | (bit / 2);
+
+ /*
+ * the next QH has half the poll interval
+ */
+ ed_x = sc->sc_intr_p_last[x];
+ ed_y = sc->sc_intr_p_last[y];
+
+ ed_x->next = NULL;
+ ed_x->ed_next = ed_y->ed_self;
+
+ x++;
+ }
+ bit >>= 1;
+ }
+
+ if (1) {
+
+ ohci_ed_t *ed_int;
+ ohci_ed_t *ed_isc;
+
+ ed_int = sc->sc_intr_p_last[0];
+ ed_isc = sc->sc_isoc_p_last;
+
+ /* the last (1ms) QH */
+ ed_int->next = ed_isc;
+ ed_int->ed_next = ed_isc->ed_self;
+ }
+ usbd_get_page(&sc->sc_hw.hcca_pc, 0, &buf_res);
+
+ sc->sc_hcca_p = buf_res.buffer;
+
+ /*
+ * Fill HCCA interrupt table. The bit reversal is to get
+ * the tree set up properly to spread the interrupts.
+ */
+ for (i = 0; i != OHCI_NO_INTRS; i++) {
+ sc->sc_hcca_p->hcca_interrupt_table[i] =
+ sc->sc_intr_p_last[i | (OHCI_NO_EDS / 2)]->ed_self;
+ }
+ /* flush all cache into memory */
+
+ usb_bus_mem_flush_all(&sc->sc_bus, &ohci_iterate_hw_softc);
+
+ /* set up the bus struct */
+ sc->sc_bus.methods = &ohci_bus_methods;
+
+ usb_callout_init_mtx(&sc->sc_tmo_rhsc, &sc->sc_bus.bus_mtx, 0);
+
+#ifdef USB_DEBUG
+ if (ohcidebug > 15) {
+ for (i = 0; i != OHCI_NO_EDS; i++) {
+ printf("ed#%d ", i);
+ ohci_dump_ed(sc->sc_intr_p_last[i]);
+ }
+ printf("iso ");
+ ohci_dump_ed(sc->sc_isoc_p_last);
+ }
+#endif
+
+ sc->sc_bus.usbrev = USB_REV_1_0;
+
+ if (ohci_controller_init(sc)) {
+ return (USB_ERR_INVAL);
+ } else {
+ /* catch any lost interrupts */
+ ohci_do_poll(&sc->sc_bus);
+ return (USB_ERR_NORMAL_COMPLETION);
+ }
+}
+
+/*
+ * shut down the controller when the system is going down
+ */
+void
+ohci_detach(struct ohci_softc *sc)
+{
+ USB_BUS_LOCK(&sc->sc_bus);
+
+ usb_callout_stop(&sc->sc_tmo_rhsc);
+
+ OWRITE4(sc, OHCI_INTERRUPT_DISABLE, OHCI_ALL_INTRS);
+ OWRITE4(sc, OHCI_CONTROL, OHCI_HCFS_RESET);
+
+ USB_BUS_UNLOCK(&sc->sc_bus);
+
+ /* XXX let stray task complete */
+ usb_pause_mtx(NULL, hz / 20);
+
+ usb_callout_drain(&sc->sc_tmo_rhsc);
+}
+
+/* NOTE: suspend/resume is called from
+ * interrupt context and cannot sleep!
+ */
+void
+ohci_suspend(ohci_softc_t *sc)
+{
+ uint32_t ctl;
+
+ USB_BUS_LOCK(&sc->sc_bus);
+
+#ifdef USB_DEBUG
+ DPRINTF("\n");
+ if (ohcidebug > 2) {
+ ohci_dumpregs(sc);
+ }
+#endif
+
+ ctl = OREAD4(sc, OHCI_CONTROL) & ~OHCI_HCFS_MASK;
+ if (sc->sc_control == 0) {
+ /*
+ * Preserve register values, in case that APM BIOS
+ * does not recover them.
+ */
+ sc->sc_control = ctl;
+ sc->sc_intre = OREAD4(sc, OHCI_INTERRUPT_ENABLE);
+ }
+ ctl |= OHCI_HCFS_SUSPEND;
+ OWRITE4(sc, OHCI_CONTROL, ctl);
+
+ usb_pause_mtx(&sc->sc_bus.bus_mtx,
+ USB_MS_TO_TICKS(USB_RESUME_WAIT));
+
+ USB_BUS_UNLOCK(&sc->sc_bus);
+}
+
+void
+ohci_resume(ohci_softc_t *sc)
+{
+ uint32_t ctl;
+
+#ifdef USB_DEBUG
+ DPRINTF("\n");
+ if (ohcidebug > 2) {
+ ohci_dumpregs(sc);
+ }
+#endif
+ /* some broken BIOSes never initialize the Controller chip */
+ ohci_controller_init(sc);
+
+ USB_BUS_LOCK(&sc->sc_bus);
+ if (sc->sc_intre) {
+ OWRITE4(sc, OHCI_INTERRUPT_ENABLE,
+ sc->sc_intre & (OHCI_ALL_INTRS | OHCI_MIE));
+ }
+ if (sc->sc_control)
+ ctl = sc->sc_control;
+ else
+ ctl = OREAD4(sc, OHCI_CONTROL);
+ ctl |= OHCI_HCFS_RESUME;
+ OWRITE4(sc, OHCI_CONTROL, ctl);
+ usb_pause_mtx(&sc->sc_bus.bus_mtx,
+ USB_MS_TO_TICKS(USB_RESUME_DELAY));
+ ctl = (ctl & ~OHCI_HCFS_MASK) | OHCI_HCFS_OPERATIONAL;
+ OWRITE4(sc, OHCI_CONTROL, ctl);
+ usb_pause_mtx(&sc->sc_bus.bus_mtx,
+ USB_MS_TO_TICKS(USB_RESUME_RECOVERY));
+ sc->sc_control = sc->sc_intre = 0;
+
+ USB_BUS_UNLOCK(&sc->sc_bus);
+
+ /* catch any lost interrupts */
+ ohci_do_poll(&sc->sc_bus);
+}
+
+#ifdef USB_DEBUG
+static void
+ohci_dumpregs(ohci_softc_t *sc)
+{
+ struct ohci_hcca *hcca;
+
+ DPRINTF("ohci_dumpregs: rev=0x%08x control=0x%08x command=0x%08x\n",
+ OREAD4(sc, OHCI_REVISION),
+ OREAD4(sc, OHCI_CONTROL),
+ OREAD4(sc, OHCI_COMMAND_STATUS));
+ DPRINTF(" intrstat=0x%08x intre=0x%08x intrd=0x%08x\n",
+ OREAD4(sc, OHCI_INTERRUPT_STATUS),
+ OREAD4(sc, OHCI_INTERRUPT_ENABLE),
+ OREAD4(sc, OHCI_INTERRUPT_DISABLE));
+ DPRINTF(" hcca=0x%08x percur=0x%08x ctrlhd=0x%08x\n",
+ OREAD4(sc, OHCI_HCCA),
+ OREAD4(sc, OHCI_PERIOD_CURRENT_ED),
+ OREAD4(sc, OHCI_CONTROL_HEAD_ED));
+ DPRINTF(" ctrlcur=0x%08x bulkhd=0x%08x bulkcur=0x%08x\n",
+ OREAD4(sc, OHCI_CONTROL_CURRENT_ED),
+ OREAD4(sc, OHCI_BULK_HEAD_ED),
+ OREAD4(sc, OHCI_BULK_CURRENT_ED));
+ DPRINTF(" done=0x%08x fmival=0x%08x fmrem=0x%08x\n",
+ OREAD4(sc, OHCI_DONE_HEAD),
+ OREAD4(sc, OHCI_FM_INTERVAL),
+ OREAD4(sc, OHCI_FM_REMAINING));
+ DPRINTF(" fmnum=0x%08x perst=0x%08x lsthrs=0x%08x\n",
+ OREAD4(sc, OHCI_FM_NUMBER),
+ OREAD4(sc, OHCI_PERIODIC_START),
+ OREAD4(sc, OHCI_LS_THRESHOLD));
+ DPRINTF(" desca=0x%08x descb=0x%08x stat=0x%08x\n",
+ OREAD4(sc, OHCI_RH_DESCRIPTOR_A),
+ OREAD4(sc, OHCI_RH_DESCRIPTOR_B),
+ OREAD4(sc, OHCI_RH_STATUS));
+ DPRINTF(" port1=0x%08x port2=0x%08x\n",
+ OREAD4(sc, OHCI_RH_PORT_STATUS(1)),
+ OREAD4(sc, OHCI_RH_PORT_STATUS(2)));
+
+ hcca = ohci_get_hcca(sc);
+
+ DPRINTF(" HCCA: frame_number=0x%04x done_head=0x%08x\n",
+ le32toh(hcca->hcca_frame_number),
+ le32toh(hcca->hcca_done_head));
+}
+static void
+ohci_dump_tds(ohci_td_t *std)
+{
+ for (; std; std = std->obj_next) {
+ if (ohci_dump_td(std)) {
+ break;
+ }
+ }
+}
+
+static uint8_t
+ohci_dump_td(ohci_td_t *std)
+{
+ uint32_t td_flags;
+ uint8_t temp;
+
+ usb_pc_cpu_invalidate(std->page_cache);
+
+ td_flags = le32toh(std->td_flags);
+ temp = (std->td_next == 0);
+
+ printf("TD(%p) at 0x%08x: %s%s%s%s%s delay=%d ec=%d "
+ "cc=%d\ncbp=0x%08x next=0x%08x be=0x%08x\n",
+ std, le32toh(std->td_self),
+ (td_flags & OHCI_TD_R) ? "-R" : "",
+ (td_flags & OHCI_TD_OUT) ? "-OUT" : "",
+ (td_flags & OHCI_TD_IN) ? "-IN" : "",
+ ((td_flags & OHCI_TD_TOGGLE_MASK) == OHCI_TD_TOGGLE_1) ? "-TOG1" : "",
+ ((td_flags & OHCI_TD_TOGGLE_MASK) == OHCI_TD_TOGGLE_0) ? "-TOG0" : "",
+ OHCI_TD_GET_DI(td_flags),
+ OHCI_TD_GET_EC(td_flags),
+ OHCI_TD_GET_CC(td_flags),
+ le32toh(std->td_cbp),
+ le32toh(std->td_next),
+ le32toh(std->td_be));
+
+ return (temp);
+}
+
+static uint8_t
+ohci_dump_itd(ohci_itd_t *sitd)
+{
+ uint32_t itd_flags;
+ uint16_t i;
+ uint8_t temp;
+
+ usb_pc_cpu_invalidate(sitd->page_cache);
+
+ itd_flags = le32toh(sitd->itd_flags);
+ temp = (sitd->itd_next == 0);
+
+ printf("ITD(%p) at 0x%08x: sf=%d di=%d fc=%d cc=%d\n"
+ "bp0=0x%08x next=0x%08x be=0x%08x\n",
+ sitd, le32toh(sitd->itd_self),
+ OHCI_ITD_GET_SF(itd_flags),
+ OHCI_ITD_GET_DI(itd_flags),
+ OHCI_ITD_GET_FC(itd_flags),
+ OHCI_ITD_GET_CC(itd_flags),
+ le32toh(sitd->itd_bp0),
+ le32toh(sitd->itd_next),
+ le32toh(sitd->itd_be));
+ for (i = 0; i < OHCI_ITD_NOFFSET; i++) {
+ printf("offs[%d]=0x%04x ", i,
+ (uint32_t)le16toh(sitd->itd_offset[i]));
+ }
+ printf("\n");
+
+ return (temp);
+}
+
+static void
+ohci_dump_itds(ohci_itd_t *sitd)
+{
+ for (; sitd; sitd = sitd->obj_next) {
+ if (ohci_dump_itd(sitd)) {
+ break;
+ }
+ }
+}
+
+static void
+ohci_dump_ed(ohci_ed_t *sed)
+{
+ uint32_t ed_flags;
+ uint32_t ed_headp;
+
+ usb_pc_cpu_invalidate(sed->page_cache);
+
+ ed_flags = le32toh(sed->ed_flags);
+ ed_headp = le32toh(sed->ed_headp);
+
+ printf("ED(%p) at 0x%08x: addr=%d endpt=%d maxp=%d flags=%s%s%s%s%s\n"
+ "tailp=0x%08x headflags=%s%s headp=0x%08x nexted=0x%08x\n",
+ sed, le32toh(sed->ed_self),
+ OHCI_ED_GET_FA(ed_flags),
+ OHCI_ED_GET_EN(ed_flags),
+ OHCI_ED_GET_MAXP(ed_flags),
+ (ed_flags & OHCI_ED_DIR_OUT) ? "-OUT" : "",
+ (ed_flags & OHCI_ED_DIR_IN) ? "-IN" : "",
+ (ed_flags & OHCI_ED_SPEED) ? "-LOWSPEED" : "",
+ (ed_flags & OHCI_ED_SKIP) ? "-SKIP" : "",
+ (ed_flags & OHCI_ED_FORMAT_ISO) ? "-ISO" : "",
+ le32toh(sed->ed_tailp),
+ (ed_headp & OHCI_HALTED) ? "-HALTED" : "",
+ (ed_headp & OHCI_TOGGLECARRY) ? "-CARRY" : "",
+ le32toh(sed->ed_headp),
+ le32toh(sed->ed_next));
+}
+
+#endif
+
+static void
+ohci_transfer_intr_enqueue(struct usb_xfer *xfer)
+{
+ /* check for early completion */
+ if (ohci_check_transfer(xfer)) {
+ return;
+ }
+ /* put transfer on interrupt queue */
+ usbd_transfer_enqueue(&xfer->xroot->bus->intr_q, xfer);
+
+ /* start timeout, if any */
+ if (xfer->timeout != 0) {
+ usbd_transfer_timeout_ms(xfer, &ohci_timeout, xfer->timeout);
+ }
+}
+
+#define OHCI_APPEND_QH(sed,last) (last) = _ohci_append_qh(sed,last)
+static ohci_ed_t *
+_ohci_append_qh(ohci_ed_t *sed, ohci_ed_t *last)
+{
+ DPRINTFN(11, "%p to %p\n", sed, last);
+
+ if (sed->prev != NULL) {
+ /* should not happen */
+ DPRINTFN(0, "ED already linked!\n");
+ return (last);
+ }
+ /* (sc->sc_bus.bus_mtx) must be locked */
+
+ sed->next = last->next;
+ sed->ed_next = last->ed_next;
+ sed->ed_tailp = 0;
+
+ sed->prev = last;
+
+ usb_pc_cpu_flush(sed->page_cache);
+
+ /*
+ * the last->next->prev is never followed: sed->next->prev = sed;
+ */
+
+ last->next = sed;
+ last->ed_next = sed->ed_self;
+
+ usb_pc_cpu_flush(last->page_cache);
+
+ return (sed);
+}
+
+#define OHCI_REMOVE_QH(sed,last) (last) = _ohci_remove_qh(sed,last)
+static ohci_ed_t *
+_ohci_remove_qh(ohci_ed_t *sed, ohci_ed_t *last)
+{
+ DPRINTFN(11, "%p from %p\n", sed, last);
+
+ /* (sc->sc_bus.bus_mtx) must be locked */
+
+ /* only remove if not removed from a queue */
+ if (sed->prev) {
+
+ sed->prev->next = sed->next;
+ sed->prev->ed_next = sed->ed_next;
+
+ usb_pc_cpu_flush(sed->prev->page_cache);
+
+ if (sed->next) {
+ sed->next->prev = sed->prev;
+ usb_pc_cpu_flush(sed->next->page_cache);
+ }
+ last = ((last == sed) ? sed->prev : last);
+
+ sed->prev = 0;
+
+ usb_pc_cpu_flush(sed->page_cache);
+ }
+ return (last);
+}
+
+static void
+ohci_isoc_done(struct usb_xfer *xfer)
+{
+ uint8_t nframes;
+ uint32_t *plen = xfer->frlengths;
+ volatile uint16_t *olen;
+ uint16_t len = 0;
+ ohci_itd_t *td = xfer->td_transfer_first;
+
+ while (1) {
+ if (td == NULL) {
+ panic("%s:%d: out of TD's\n",
+ __FUNCTION__, __LINE__);
+ }
+#ifdef USB_DEBUG
+ if (ohcidebug > 5) {
+ DPRINTF("isoc TD\n");
+ ohci_dump_itd(td);
+ }
+#endif
+ usb_pc_cpu_invalidate(td->page_cache);
+
+ nframes = td->frames;
+ olen = &td->itd_offset[0];
+
+ if (nframes > 8) {
+ nframes = 8;
+ }
+ while (nframes--) {
+ len = le16toh(*olen);
+
+ if ((len >> 12) == OHCI_CC_NOT_ACCESSED) {
+ len = 0;
+ } else {
+ len &= ((1 << 12) - 1);
+ }
+
+ if (len > *plen) {
+ len = 0;/* invalid length */
+ }
+ *plen = len;
+ plen++;
+ olen++;
+ }
+
+ if (((void *)td) == xfer->td_transfer_last) {
+ break;
+ }
+ td = td->obj_next;
+ }
+
+ xfer->aframes = xfer->nframes;
+ ohci_device_done(xfer, USB_ERR_NORMAL_COMPLETION);
+}
+
+#ifdef USB_DEBUG
+static const char *const
+ ohci_cc_strs[] =
+{
+ "NO_ERROR",
+ "CRC",
+ "BIT_STUFFING",
+ "DATA_TOGGLE_MISMATCH",
+
+ "STALL",
+ "DEVICE_NOT_RESPONDING",
+ "PID_CHECK_FAILURE",
+ "UNEXPECTED_PID",
+
+ "DATA_OVERRUN",
+ "DATA_UNDERRUN",
+ "BUFFER_OVERRUN",
+ "BUFFER_UNDERRUN",
+
+ "reserved",
+ "reserved",
+ "NOT_ACCESSED",
+ "NOT_ACCESSED"
+};
+
+#endif
+
+static usb_error_t
+ohci_non_isoc_done_sub(struct usb_xfer *xfer)
+{
+ ohci_td_t *td;
+ ohci_td_t *td_alt_next;
+ uint32_t temp;
+ uint32_t phy_start;
+ uint32_t phy_end;
+ uint32_t td_flags;
+ uint16_t cc;
+
+ td = xfer->td_transfer_cache;
+ td_alt_next = td->alt_next;
+ td_flags = 0;
+
+ if (xfer->aframes != xfer->nframes) {
+ usbd_xfer_set_frame_len(xfer, xfer->aframes, 0);
+ }
+ while (1) {
+
+ usb_pc_cpu_invalidate(td->page_cache);
+ phy_start = le32toh(td->td_cbp);
+ td_flags = le32toh(td->td_flags);
+ cc = OHCI_TD_GET_CC(td_flags);
+
+ if (phy_start) {
+ /*
+ * short transfer - compute the number of remaining
+ * bytes in the hardware buffer:
+ */
+ phy_end = le32toh(td->td_be);
+ temp = (OHCI_PAGE(phy_start ^ phy_end) ?
+ (OHCI_PAGE_SIZE + 1) : 0x0001);
+ temp += OHCI_PAGE_OFFSET(phy_end);
+ temp -= OHCI_PAGE_OFFSET(phy_start);
+
+ if (temp > td->len) {
+ /* guard against corruption */
+ cc = OHCI_CC_STALL;
+ } else if (xfer->aframes != xfer->nframes) {
+ /*
+ * Sum up total transfer length
+ * in "frlengths[]":
+ */
+ xfer->frlengths[xfer->aframes] += td->len - temp;
+ }
+ } else {
+ if (xfer->aframes != xfer->nframes) {
+ /* transfer was complete */
+ xfer->frlengths[xfer->aframes] += td->len;
+ }
+ }
+ /* Check for last transfer */
+ if (((void *)td) == xfer->td_transfer_last) {
+ td = NULL;
+ break;
+ }
+ /* Check transfer status */
+ if (cc) {
+ /* the transfer is finished */
+ td = NULL;
+ break;
+ }
+ /* Check for short transfer */
+ if (phy_start) {
+ if (xfer->flags_int.short_frames_ok) {
+ /* follow alt next */
+ td = td->alt_next;
+ } else {
+ /* the transfer is finished */
+ td = NULL;
+ }
+ break;
+ }
+ td = td->obj_next;
+
+ if (td->alt_next != td_alt_next) {
+ /* this USB frame is complete */
+ break;
+ }
+ }
+
+ /* update transfer cache */
+
+ xfer->td_transfer_cache = td;
+
+ DPRINTFN(16, "error cc=%d (%s)\n",
+ cc, ohci_cc_strs[cc]);
+
+ return ((cc == 0) ? USB_ERR_NORMAL_COMPLETION :
+ (cc == OHCI_CC_STALL) ? USB_ERR_STALLED : USB_ERR_IOERROR);
+}
+
+static void
+ohci_non_isoc_done(struct usb_xfer *xfer)
+{
+ usb_error_t err = 0;
+
+ DPRINTFN(13, "xfer=%p endpoint=%p transfer done\n",
+ xfer, xfer->endpoint);
+
+#ifdef USB_DEBUG
+ if (ohcidebug > 10) {
+ ohci_dump_tds(xfer->td_transfer_first);
+ }
+#endif
+
+ /* reset scanner */
+
+ xfer->td_transfer_cache = xfer->td_transfer_first;
+
+ if (xfer->flags_int.control_xfr) {
+
+ if (xfer->flags_int.control_hdr) {
+
+ err = ohci_non_isoc_done_sub(xfer);
+ }
+ xfer->aframes = 1;
+
+ if (xfer->td_transfer_cache == NULL) {
+ goto done;
+ }
+ }
+ while (xfer->aframes != xfer->nframes) {
+
+ err = ohci_non_isoc_done_sub(xfer);
+ xfer->aframes++;
+
+ if (xfer->td_transfer_cache == NULL) {
+ goto done;
+ }
+ }
+
+ if (xfer->flags_int.control_xfr &&
+ !xfer->flags_int.control_act) {
+
+ err = ohci_non_isoc_done_sub(xfer);
+ }
+done:
+ ohci_device_done(xfer, err);
+}
+
+/*------------------------------------------------------------------------*
+ * ohci_check_transfer_sub
+ *------------------------------------------------------------------------*/
+static void
+ohci_check_transfer_sub(struct usb_xfer *xfer)
+{
+ ohci_td_t *td;
+ ohci_ed_t *ed;
+ uint32_t phy_start;
+ uint32_t td_flags;
+ uint32_t td_next;
+ uint16_t cc;
+
+ td = xfer->td_transfer_cache;
+
+ while (1) {
+
+ usb_pc_cpu_invalidate(td->page_cache);
+ phy_start = le32toh(td->td_cbp);
+ td_flags = le32toh(td->td_flags);
+ td_next = le32toh(td->td_next);
+
+ /* Check for last transfer */
+ if (((void *)td) == xfer->td_transfer_last) {
+ /* the transfer is finished */
+ td = NULL;
+ break;
+ }
+ /* Check transfer status */
+ cc = OHCI_TD_GET_CC(td_flags);
+ if (cc) {
+ /* the transfer is finished */
+ td = NULL;
+ break;
+ }
+ /*
+ * Check if we reached the last packet
+ * or if there is a short packet:
+ */
+
+ if (((td_next & (~0xF)) == OHCI_TD_NEXT_END) || phy_start) {
+ /* follow alt next */
+ td = td->alt_next;
+ break;
+ }
+ td = td->obj_next;
+ }
+
+ /* update transfer cache */
+
+ xfer->td_transfer_cache = td;
+
+ if (td) {
+
+ ed = xfer->qh_start[xfer->flags_int.curr_dma_set];
+
+ ed->ed_headp = td->td_self;
+ usb_pc_cpu_flush(ed->page_cache);
+
+ DPRINTFN(13, "xfer=%p following alt next\n", xfer);
+
+ /*
+ * Make sure that the OHCI re-scans the schedule by
+ * writing the BLF and CLF bits:
+ */
+
+ if (xfer->xroot->udev->flags.self_suspended) {
+ /* nothing to do */
+ } else if (xfer->endpoint->methods == &ohci_device_bulk_methods) {
+ ohci_softc_t *sc = OHCI_BUS2SC(xfer->xroot->bus);
+
+ OWRITE4(sc, OHCI_COMMAND_STATUS, OHCI_BLF);
+ } else if (xfer->endpoint->methods == &ohci_device_ctrl_methods) {
+ ohci_softc_t *sc = OHCI_BUS2SC(xfer->xroot->bus);
+
+ OWRITE4(sc, OHCI_COMMAND_STATUS, OHCI_CLF);
+ }
+ }
+}
+
+/*------------------------------------------------------------------------*
+ * ohci_check_transfer
+ *
+ * Return values:
+ * 0: USB transfer is not finished
+ * Else: USB transfer is finished
+ *------------------------------------------------------------------------*/
+static uint8_t
+ohci_check_transfer(struct usb_xfer *xfer)
+{
+ ohci_ed_t *ed;
+ uint32_t ed_headp;
+ uint32_t ed_tailp;
+
+ DPRINTFN(13, "xfer=%p checking transfer\n", xfer);
+
+ ed = xfer->qh_start[xfer->flags_int.curr_dma_set];
+
+ usb_pc_cpu_invalidate(ed->page_cache);
+ ed_headp = le32toh(ed->ed_headp);
+ ed_tailp = le32toh(ed->ed_tailp);
+
+ if ((ed_headp & OHCI_HALTED) ||
+ (((ed_headp ^ ed_tailp) & (~0xF)) == 0)) {
+ if (xfer->endpoint->methods == &ohci_device_isoc_methods) {
+ /* isochronous transfer */
+ ohci_isoc_done(xfer);
+ } else {
+ if (xfer->flags_int.short_frames_ok) {
+ ohci_check_transfer_sub(xfer);
+ if (xfer->td_transfer_cache) {
+ /* not finished yet */
+ return (0);
+ }
+ }
+ /* store data-toggle */
+ if (ed_headp & OHCI_TOGGLECARRY) {
+ xfer->endpoint->toggle_next = 1;
+ } else {
+ xfer->endpoint->toggle_next = 0;
+ }
+
+ /* non-isochronous transfer */
+ ohci_non_isoc_done(xfer);
+ }
+ return (1);
+ }
+ DPRINTFN(13, "xfer=%p is still active\n", xfer);
+ return (0);
+}
+
+static void
+ohci_rhsc_enable(ohci_softc_t *sc)
+{
+ DPRINTFN(5, "\n");
+
+ USB_BUS_LOCK_ASSERT(&sc->sc_bus, MA_OWNED);
+
+ sc->sc_eintrs |= OHCI_RHSC;
+ OWRITE4(sc, OHCI_INTERRUPT_ENABLE, OHCI_RHSC);
+
+ /* acknowledge any RHSC interrupt */
+ OWRITE4(sc, OHCI_INTERRUPT_STATUS, OHCI_RHSC);
+
+ ohci_root_intr(sc);
+}
+
+static void
+ohci_interrupt_poll(ohci_softc_t *sc)
+{
+ struct usb_xfer *xfer;
+
+repeat:
+ TAILQ_FOREACH(xfer, &sc->sc_bus.intr_q.head, wait_entry) {
+ /*
+ * check if transfer is transferred
+ */
+ if (ohci_check_transfer(xfer)) {
+ /* queue has been modified */
+ goto repeat;
+ }
+ }
+}
+
+/*------------------------------------------------------------------------*
+ * ohci_interrupt - OHCI interrupt handler
+ *
+ * NOTE: Do not access "sc->sc_bus.bdev" inside the interrupt handler,
+ * hence the interrupt handler will be setup before "sc->sc_bus.bdev"
+ * is present !
+ *------------------------------------------------------------------------*/
+void
+ohci_interrupt(ohci_softc_t *sc)
+{
+ struct ohci_hcca *hcca;
+ uint32_t status;
+ uint32_t done;
+
+ USB_BUS_LOCK(&sc->sc_bus);
+
+ hcca = ohci_get_hcca(sc);
+
+ DPRINTFN(16, "real interrupt\n");
+
+#ifdef USB_DEBUG
+ if (ohcidebug > 15) {
+ ohci_dumpregs(sc);
+ }
+#endif
+
+ done = le32toh(hcca->hcca_done_head);
+
+ /*
+ * The LSb of done is used to inform the HC Driver that an interrupt
+ * condition exists for both the Done list and for another event
+ * recorded in HcInterruptStatus. On an interrupt from the HC, the
+ * HC Driver checks the HccaDoneHead Value. If this value is 0, then
+ * the interrupt was caused by other than the HccaDoneHead update
+ * and the HcInterruptStatus register needs to be accessed to
+ * determine that exact interrupt cause. If HccaDoneHead is nonzero,
+ * then a Done list update interrupt is indicated and if the LSb of
+ * done is nonzero, then an additional interrupt event is indicated
+ * and HcInterruptStatus should be checked to determine its cause.
+ */
+ if (done != 0) {
+ status = 0;
+
+ if (done & ~OHCI_DONE_INTRS) {
+ status |= OHCI_WDH;
+ }
+ if (done & OHCI_DONE_INTRS) {
+ status |= OREAD4(sc, OHCI_INTERRUPT_STATUS);
+ }
+ hcca->hcca_done_head = 0;
+
+ usb_pc_cpu_flush(&sc->sc_hw.hcca_pc);
+ } else {
+ status = OREAD4(sc, OHCI_INTERRUPT_STATUS) & ~OHCI_WDH;
+ }
+
+ status &= ~OHCI_MIE;
+ if (status == 0) {
+ /*
+ * nothing to be done (PCI shared
+ * interrupt)
+ */
+ goto done;
+ }
+ OWRITE4(sc, OHCI_INTERRUPT_STATUS, status); /* Acknowledge */
+
+ status &= sc->sc_eintrs;
+ if (status == 0) {
+ goto done;
+ }
+ if (status & (OHCI_SO | OHCI_RD | OHCI_UE | OHCI_RHSC)) {
+#if 0
+ if (status & OHCI_SO) {
+ /* XXX do what */
+ }
+#endif
+ if (status & OHCI_RD) {
+ printf("%s: resume detect\n", __FUNCTION__);
+ /* XXX process resume detect */
+ }
+ if (status & OHCI_UE) {
+ printf("%s: unrecoverable error, "
+ "controller halted\n", __FUNCTION__);
+ OWRITE4(sc, OHCI_CONTROL, OHCI_HCFS_RESET);
+ /* XXX what else */
+ }
+ if (status & OHCI_RHSC) {
+ /*
+ * Disable RHSC interrupt for now, because it will be
+ * on until the port has been reset.
+ */
+ sc->sc_eintrs &= ~OHCI_RHSC;
+ OWRITE4(sc, OHCI_INTERRUPT_DISABLE, OHCI_RHSC);
+
+ ohci_root_intr(sc);
+
+ /* do not allow RHSC interrupts > 1 per second */
+ usb_callout_reset(&sc->sc_tmo_rhsc, hz,
+ (void *)&ohci_rhsc_enable, sc);
+ }
+ }
+ status &= ~(OHCI_RHSC | OHCI_WDH | OHCI_SO);
+ if (status != 0) {
+ /* Block unprocessed interrupts. XXX */
+ OWRITE4(sc, OHCI_INTERRUPT_DISABLE, status);
+ sc->sc_eintrs &= ~status;
+ printf("%s: blocking intrs 0x%x\n",
+ __FUNCTION__, status);
+ }
+ /* poll all the USB transfers */
+ ohci_interrupt_poll(sc);
+
+done:
+ USB_BUS_UNLOCK(&sc->sc_bus);
+}
+
+/*
+ * called when a request does not complete
+ */
+static void
+ohci_timeout(void *arg)
+{
+ struct usb_xfer *xfer = arg;
+
+ DPRINTF("xfer=%p\n", xfer);
+
+ USB_BUS_LOCK_ASSERT(xfer->xroot->bus, MA_OWNED);
+
+ /* transfer is transferred */
+ ohci_device_done(xfer, USB_ERR_TIMEOUT);
+}
+
+static void
+ohci_do_poll(struct usb_bus *bus)
+{
+ struct ohci_softc *sc = OHCI_BUS2SC(bus);
+
+ USB_BUS_LOCK(&sc->sc_bus);
+ ohci_interrupt_poll(sc);
+ USB_BUS_UNLOCK(&sc->sc_bus);
+}
+
+static void
+ohci_setup_standard_chain_sub(struct ohci_std_temp *temp)
+{
+ struct usb_page_search buf_res;
+ ohci_td_t *td;
+ ohci_td_t *td_next;
+ ohci_td_t *td_alt_next;
+ uint32_t buf_offset;
+ uint32_t average;
+ uint32_t len_old;
+ uint8_t shortpkt_old;
+ uint8_t precompute;
+
+ td_alt_next = NULL;
+ buf_offset = 0;
+ shortpkt_old = temp->shortpkt;
+ len_old = temp->len;
+ precompute = 1;
+
+ /* software is used to detect short incoming transfers */
+
+ if ((temp->td_flags & htole32(OHCI_TD_DP_MASK)) == htole32(OHCI_TD_IN)) {
+ temp->td_flags |= htole32(OHCI_TD_R);
+ } else {
+ temp->td_flags &= ~htole32(OHCI_TD_R);
+ }
+
+restart:
+
+ td = temp->td;
+ td_next = temp->td_next;
+
+ while (1) {
+
+ if (temp->len == 0) {
+
+ if (temp->shortpkt) {
+ break;
+ }
+ /* send a Zero Length Packet, ZLP, last */
+
+ temp->shortpkt = 1;
+ average = 0;
+
+ } else {
+
+ average = temp->average;
+
+ if (temp->len < average) {
+ if (temp->len % temp->max_frame_size) {
+ temp->shortpkt = 1;
+ }
+ average = temp->len;
+ }
+ }
+
+ if (td_next == NULL) {
+ panic("%s: out of OHCI transfer descriptors!", __FUNCTION__);
+ }
+ /* get next TD */
+
+ td = td_next;
+ td_next = td->obj_next;
+
+ /* check if we are pre-computing */
+
+ if (precompute) {
+
+ /* update remaining length */
+
+ temp->len -= average;
+
+ continue;
+ }
+ /* fill out current TD */
+ td->td_flags = temp->td_flags;
+
+ /* the next TD uses TOGGLE_CARRY */
+ temp->td_flags &= ~htole32(OHCI_TD_TOGGLE_MASK);
+
+ if (average == 0) {
+ /*
+ * The buffer start and end phys addresses should be
+ * 0x0 for a zero length packet.
+ */
+ td->td_cbp = 0;
+ td->td_be = 0;
+ td->len = 0;
+
+ } else {
+
+ usbd_get_page(temp->pc, buf_offset, &buf_res);
+ td->td_cbp = htole32(buf_res.physaddr);
+ buf_offset += (average - 1);
+
+ usbd_get_page(temp->pc, buf_offset, &buf_res);
+ td->td_be = htole32(buf_res.physaddr);
+ buf_offset++;
+
+ td->len = average;
+
+ /* update remaining length */
+
+ temp->len -= average;
+ }
+
+ if ((td_next == td_alt_next) && temp->setup_alt_next) {
+ /* we need to receive these frames one by one ! */
+ td->td_flags &= htole32(~OHCI_TD_INTR_MASK);
+ td->td_flags |= htole32(OHCI_TD_SET_DI(1));
+ td->td_next = htole32(OHCI_TD_NEXT_END);
+ } else {
+ if (td_next) {
+ /* link the current TD with the next one */
+ td->td_next = td_next->td_self;
+ }
+ }
+
+ td->alt_next = td_alt_next;
+
+ usb_pc_cpu_flush(td->page_cache);
+ }
+
+ if (precompute) {
+ precompute = 0;
+
+ /* setup alt next pointer, if any */
+ if (temp->last_frame) {
+ /* no alternate next */
+ td_alt_next = NULL;
+ } else {
+ /* we use this field internally */
+ td_alt_next = td_next;
+ }
+
+ /* restore */
+ temp->shortpkt = shortpkt_old;
+ temp->len = len_old;
+ goto restart;
+ }
+ temp->td = td;
+ temp->td_next = td_next;
+}
+
+static void
+ohci_setup_standard_chain(struct usb_xfer *xfer, ohci_ed_t **ed_last)
+{
+ struct ohci_std_temp temp;
+ struct usb_pipe_methods *methods;
+ ohci_ed_t *ed;
+ ohci_td_t *td;
+ uint32_t ed_flags;
+ uint32_t x;
+
+ DPRINTFN(9, "addr=%d endpt=%d sumlen=%d speed=%d\n",
+ xfer->address, UE_GET_ADDR(xfer->endpointno),
+ xfer->sumlen, usbd_get_speed(xfer->xroot->udev));
+
+ temp.average = xfer->max_hc_frame_size;
+ temp.max_frame_size = xfer->max_frame_size;
+
+ /* toggle the DMA set we are using */
+ xfer->flags_int.curr_dma_set ^= 1;
+
+ /* get next DMA set */
+ td = xfer->td_start[xfer->flags_int.curr_dma_set];
+
+ xfer->td_transfer_first = td;
+ xfer->td_transfer_cache = td;
+
+ temp.td = NULL;
+ temp.td_next = td;
+ temp.last_frame = 0;
+ temp.setup_alt_next = xfer->flags_int.short_frames_ok;
+
+ methods = xfer->endpoint->methods;
+
+ /* check if we should prepend a setup message */
+
+ if (xfer->flags_int.control_xfr) {
+ if (xfer->flags_int.control_hdr) {
+
+ temp.td_flags = htole32(OHCI_TD_SETUP | OHCI_TD_NOCC |
+ OHCI_TD_TOGGLE_0 | OHCI_TD_NOINTR);
+
+ temp.len = xfer->frlengths[0];
+ temp.pc = xfer->frbuffers + 0;
+ temp.shortpkt = temp.len ? 1 : 0;
+ /* check for last frame */
+ if (xfer->nframes == 1) {
+ /* no STATUS stage yet, SETUP is last */
+ if (xfer->flags_int.control_act) {
+ temp.last_frame = 1;
+ temp.setup_alt_next = 0;
+ }
+ }
+ ohci_setup_standard_chain_sub(&temp);
+
+ /*
+ * XXX assume that the setup message is
+ * contained within one USB packet:
+ */
+ xfer->endpoint->toggle_next = 1;
+ }
+ x = 1;
+ } else {
+ x = 0;
+ }
+ temp.td_flags = htole32(OHCI_TD_NOCC | OHCI_TD_NOINTR);
+
+ /* set data toggle */
+
+ if (xfer->endpoint->toggle_next) {
+ temp.td_flags |= htole32(OHCI_TD_TOGGLE_1);
+ } else {
+ temp.td_flags |= htole32(OHCI_TD_TOGGLE_0);
+ }
+
+ /* set endpoint direction */
+
+ if (UE_GET_DIR(xfer->endpointno) == UE_DIR_IN) {
+ temp.td_flags |= htole32(OHCI_TD_IN);
+ } else {
+ temp.td_flags |= htole32(OHCI_TD_OUT);
+ }
+
+ while (x != xfer->nframes) {
+
+ /* DATA0 / DATA1 message */
+
+ temp.len = xfer->frlengths[x];
+ temp.pc = xfer->frbuffers + x;
+
+ x++;
+
+ if (x == xfer->nframes) {
+ if (xfer->flags_int.control_xfr) {
+ /* no STATUS stage yet, DATA is last */
+ if (xfer->flags_int.control_act) {
+ temp.last_frame = 1;
+ temp.setup_alt_next = 0;
+ }
+ } else {
+ temp.last_frame = 1;
+ temp.setup_alt_next = 0;
+ }
+ }
+ if (temp.len == 0) {
+
+ /* make sure that we send an USB packet */
+
+ temp.shortpkt = 0;
+
+ } else {
+
+ /* regular data transfer */
+
+ temp.shortpkt = (xfer->flags.force_short_xfer) ? 0 : 1;
+ }
+
+ ohci_setup_standard_chain_sub(&temp);
+ }
+
+ /* check if we should append a status stage */
+
+ if (xfer->flags_int.control_xfr &&
+ !xfer->flags_int.control_act) {
+
+ /*
+ * Send a DATA1 message and invert the current endpoint
+ * direction.
+ */
+
+ /* set endpoint direction and data toggle */
+
+ if (UE_GET_DIR(xfer->endpointno) == UE_DIR_IN) {
+ temp.td_flags = htole32(OHCI_TD_OUT |
+ OHCI_TD_NOCC | OHCI_TD_TOGGLE_1 | OHCI_TD_SET_DI(1));
+ } else {
+ temp.td_flags = htole32(OHCI_TD_IN |
+ OHCI_TD_NOCC | OHCI_TD_TOGGLE_1 | OHCI_TD_SET_DI(1));
+ }
+
+ temp.len = 0;
+ temp.pc = NULL;
+ temp.shortpkt = 0;
+ temp.last_frame = 1;
+ temp.setup_alt_next = 0;
+
+ ohci_setup_standard_chain_sub(&temp);
+ }
+ td = temp.td;
+
+ /* Ensure that last TD is terminating: */
+ td->td_next = htole32(OHCI_TD_NEXT_END);
+ td->td_flags &= ~htole32(OHCI_TD_INTR_MASK);
+ td->td_flags |= htole32(OHCI_TD_SET_DI(1));
+
+ usb_pc_cpu_flush(td->page_cache);
+
+ /* must have at least one frame! */
+
+ xfer->td_transfer_last = td;
+
+#ifdef USB_DEBUG
+ if (ohcidebug > 8) {
+ DPRINTF("nexttog=%d; data before transfer:\n",
+ xfer->endpoint->toggle_next);
+ ohci_dump_tds(xfer->td_transfer_first);
+ }
+#endif
+
+ ed = xfer->qh_start[xfer->flags_int.curr_dma_set];
+
+ ed_flags = (OHCI_ED_SET_FA(xfer->address) |
+ OHCI_ED_SET_EN(UE_GET_ADDR(xfer->endpointno)) |
+ OHCI_ED_SET_MAXP(xfer->max_frame_size));
+
+ ed_flags |= (OHCI_ED_FORMAT_GEN | OHCI_ED_DIR_TD);
+
+ if (xfer->xroot->udev->speed == USB_SPEED_LOW) {
+ ed_flags |= OHCI_ED_SPEED;
+ }
+ ed->ed_flags = htole32(ed_flags);
+
+ td = xfer->td_transfer_first;
+
+ ed->ed_headp = td->td_self;
+
+ if (xfer->xroot->udev->flags.self_suspended == 0) {
+ /* the append function will flush the endpoint descriptor */
+ OHCI_APPEND_QH(ed, *ed_last);
+
+ if (methods == &ohci_device_bulk_methods) {
+ ohci_softc_t *sc = OHCI_BUS2SC(xfer->xroot->bus);
+
+ OWRITE4(sc, OHCI_COMMAND_STATUS, OHCI_BLF);
+ }
+ if (methods == &ohci_device_ctrl_methods) {
+ ohci_softc_t *sc = OHCI_BUS2SC(xfer->xroot->bus);
+
+ OWRITE4(sc, OHCI_COMMAND_STATUS, OHCI_CLF);
+ }
+ } else {
+ usb_pc_cpu_flush(ed->page_cache);
+ }
+}
+
+static void
+ohci_root_intr(ohci_softc_t *sc)
+{
+ uint32_t hstatus;
+ uint16_t i;
+ uint16_t m;
+
+ USB_BUS_LOCK_ASSERT(&sc->sc_bus, MA_OWNED);
+
+ /* clear any old interrupt data */
+ memset(sc->sc_hub_idata, 0, sizeof(sc->sc_hub_idata));
+
+ hstatus = OREAD4(sc, OHCI_RH_STATUS);
+ DPRINTF("sc=%p hstatus=0x%08x\n",
+ sc, hstatus);
+
+ /* set bits */
+ m = (sc->sc_noport + 1);
+ if (m > (8 * sizeof(sc->sc_hub_idata))) {
+ m = (8 * sizeof(sc->sc_hub_idata));
+ }
+ for (i = 1; i < m; i++) {
+ /* pick out CHANGE bits from the status register */
+ if (OREAD4(sc, OHCI_RH_PORT_STATUS(i)) >> 16) {
+ sc->sc_hub_idata[i / 8] |= 1 << (i % 8);
+ DPRINTF("port %d changed\n", i);
+ }
+ }
+
+ uhub_root_intr(&sc->sc_bus, sc->sc_hub_idata,
+ sizeof(sc->sc_hub_idata));
+}
+
+/* NOTE: "done" can be run two times in a row,
+ * from close and from interrupt
+ */
+static void
+ohci_device_done(struct usb_xfer *xfer, usb_error_t error)
+{
+ struct usb_pipe_methods *methods = xfer->endpoint->methods;
+ ohci_softc_t *sc = OHCI_BUS2SC(xfer->xroot->bus);
+ ohci_ed_t *ed;
+
+ USB_BUS_LOCK_ASSERT(&sc->sc_bus, MA_OWNED);
+
+
+ DPRINTFN(2, "xfer=%p, endpoint=%p, error=%d\n",
+ xfer, xfer->endpoint, error);
+
+ ed = xfer->qh_start[xfer->flags_int.curr_dma_set];
+ if (ed) {
+ usb_pc_cpu_invalidate(ed->page_cache);
+ }
+ if (methods == &ohci_device_bulk_methods) {
+ OHCI_REMOVE_QH(ed, sc->sc_bulk_p_last);
+ }
+ if (methods == &ohci_device_ctrl_methods) {
+ OHCI_REMOVE_QH(ed, sc->sc_ctrl_p_last);
+ }
+ if (methods == &ohci_device_intr_methods) {
+ OHCI_REMOVE_QH(ed, sc->sc_intr_p_last[xfer->qh_pos]);
+ }
+ if (methods == &ohci_device_isoc_methods) {
+ OHCI_REMOVE_QH(ed, sc->sc_isoc_p_last);
+ }
+ xfer->td_transfer_first = NULL;
+ xfer->td_transfer_last = NULL;
+
+ /* dequeue transfer and start next transfer */
+ usbd_transfer_done(xfer, error);
+}
+
+/*------------------------------------------------------------------------*
+ * ohci bulk support
+ *------------------------------------------------------------------------*/
+static void
+ohci_device_bulk_open(struct usb_xfer *xfer)
+{
+ return;
+}
+
+static void
+ohci_device_bulk_close(struct usb_xfer *xfer)
+{
+ ohci_device_done(xfer, USB_ERR_CANCELLED);
+}
+
+static void
+ohci_device_bulk_enter(struct usb_xfer *xfer)
+{
+ return;
+}
+
+static void
+ohci_device_bulk_start(struct usb_xfer *xfer)
+{
+ ohci_softc_t *sc = OHCI_BUS2SC(xfer->xroot->bus);
+
+ /* setup TD's and QH */
+ ohci_setup_standard_chain(xfer, &sc->sc_bulk_p_last);
+
+ /* put transfer on interrupt queue */
+ ohci_transfer_intr_enqueue(xfer);
+}
+
+struct usb_pipe_methods ohci_device_bulk_methods =
+{
+ .open = ohci_device_bulk_open,
+ .close = ohci_device_bulk_close,
+ .enter = ohci_device_bulk_enter,
+ .start = ohci_device_bulk_start,
+};
+
+/*------------------------------------------------------------------------*
+ * ohci control support
+ *------------------------------------------------------------------------*/
+static void
+ohci_device_ctrl_open(struct usb_xfer *xfer)
+{
+ return;
+}
+
+static void
+ohci_device_ctrl_close(struct usb_xfer *xfer)
+{
+ ohci_device_done(xfer, USB_ERR_CANCELLED);
+}
+
+static void
+ohci_device_ctrl_enter(struct usb_xfer *xfer)
+{
+ return;
+}
+
+static void
+ohci_device_ctrl_start(struct usb_xfer *xfer)
+{
+ ohci_softc_t *sc = OHCI_BUS2SC(xfer->xroot->bus);
+
+ /* setup TD's and QH */
+ ohci_setup_standard_chain(xfer, &sc->sc_ctrl_p_last);
+
+ /* put transfer on interrupt queue */
+ ohci_transfer_intr_enqueue(xfer);
+}
+
+struct usb_pipe_methods ohci_device_ctrl_methods =
+{
+ .open = ohci_device_ctrl_open,
+ .close = ohci_device_ctrl_close,
+ .enter = ohci_device_ctrl_enter,
+ .start = ohci_device_ctrl_start,
+};
+
+/*------------------------------------------------------------------------*
+ * ohci interrupt support
+ *------------------------------------------------------------------------*/
+static void
+ohci_device_intr_open(struct usb_xfer *xfer)
+{
+ ohci_softc_t *sc = OHCI_BUS2SC(xfer->xroot->bus);
+ uint16_t best;
+ uint16_t bit;
+ uint16_t x;
+
+ best = 0;
+ bit = OHCI_NO_EDS / 2;
+ while (bit) {
+ if (xfer->interval >= bit) {
+ x = bit;
+ best = bit;
+ while (x & bit) {
+ if (sc->sc_intr_stat[x] <
+ sc->sc_intr_stat[best]) {
+ best = x;
+ }
+ x++;
+ }
+ break;
+ }
+ bit >>= 1;
+ }
+
+ sc->sc_intr_stat[best]++;
+ xfer->qh_pos = best;
+
+ DPRINTFN(3, "best=%d interval=%d\n",
+ best, xfer->interval);
+}
+
+static void
+ohci_device_intr_close(struct usb_xfer *xfer)
+{
+ ohci_softc_t *sc = OHCI_BUS2SC(xfer->xroot->bus);
+
+ sc->sc_intr_stat[xfer->qh_pos]--;
+
+ ohci_device_done(xfer, USB_ERR_CANCELLED);
+}
+
+static void
+ohci_device_intr_enter(struct usb_xfer *xfer)
+{
+ return;
+}
+
+static void
+ohci_device_intr_start(struct usb_xfer *xfer)
+{
+ ohci_softc_t *sc = OHCI_BUS2SC(xfer->xroot->bus);
+
+ /* setup TD's and QH */
+ ohci_setup_standard_chain(xfer, &sc->sc_intr_p_last[xfer->qh_pos]);
+
+ /* put transfer on interrupt queue */
+ ohci_transfer_intr_enqueue(xfer);
+}
+
+struct usb_pipe_methods ohci_device_intr_methods =
+{
+ .open = ohci_device_intr_open,
+ .close = ohci_device_intr_close,
+ .enter = ohci_device_intr_enter,
+ .start = ohci_device_intr_start,
+};
+
+/*------------------------------------------------------------------------*
+ * ohci isochronous support
+ *------------------------------------------------------------------------*/
+static void
+ohci_device_isoc_open(struct usb_xfer *xfer)
+{
+ return;
+}
+
+static void
+ohci_device_isoc_close(struct usb_xfer *xfer)
+{
+ /**/
+ ohci_device_done(xfer, USB_ERR_CANCELLED);
+}
+
+static void
+ohci_device_isoc_enter(struct usb_xfer *xfer)
+{
+ struct usb_page_search buf_res;
+ ohci_softc_t *sc = OHCI_BUS2SC(xfer->xroot->bus);
+ struct ohci_hcca *hcca;
+ uint32_t buf_offset;
+ uint32_t nframes;
+ uint32_t ed_flags;
+ uint32_t *plen;
+ uint16_t itd_offset[OHCI_ITD_NOFFSET];
+ uint16_t length;
+ uint8_t ncur;
+ ohci_itd_t *td;
+ ohci_itd_t *td_last = NULL;
+ ohci_ed_t *ed;
+
+ hcca = ohci_get_hcca(sc);
+
+ nframes = le32toh(hcca->hcca_frame_number);
+
+ DPRINTFN(6, "xfer=%p isoc_next=%u nframes=%u hcca_fn=%u\n",
+ xfer, xfer->endpoint->isoc_next, xfer->nframes, nframes);
+
+ if ((xfer->endpoint->is_synced == 0) ||
+ (((nframes - xfer->endpoint->isoc_next) & 0xFFFF) < xfer->nframes) ||
+ (((xfer->endpoint->isoc_next - nframes) & 0xFFFF) >= 128)) {
+ /*
+ * If there is data underflow or the pipe queue is empty we
+ * schedule the transfer a few frames ahead of the current
+ * frame position. Else two isochronous transfers might
+ * overlap.
+ */
+ xfer->endpoint->isoc_next = (nframes + 3) & 0xFFFF;
+ xfer->endpoint->is_synced = 1;
+ DPRINTFN(3, "start next=%d\n", xfer->endpoint->isoc_next);
+ }
+ /*
+ * compute how many milliseconds the insertion is ahead of the
+ * current frame position:
+ */
+ buf_offset = ((xfer->endpoint->isoc_next - nframes) & 0xFFFF);
+
+ /*
+ * pre-compute when the isochronous transfer will be finished:
+ */
+ xfer->isoc_time_complete =
+ (usb_isoc_time_expand(&sc->sc_bus, nframes) + buf_offset +
+ xfer->nframes);
+
+ /* get the real number of frames */
+
+ nframes = xfer->nframes;
+
+ buf_offset = 0;
+
+ plen = xfer->frlengths;
+
+ /* toggle the DMA set we are using */
+ xfer->flags_int.curr_dma_set ^= 1;
+
+ /* get next DMA set */
+ td = xfer->td_start[xfer->flags_int.curr_dma_set];
+
+ xfer->td_transfer_first = td;
+
+ ncur = 0;
+ length = 0;
+
+ while (nframes--) {
+ if (td == NULL) {
+ panic("%s:%d: out of TD's\n",
+ __FUNCTION__, __LINE__);
+ }
+ itd_offset[ncur] = length;
+ buf_offset += *plen;
+ length += *plen;
+ plen++;
+ ncur++;
+
+ if ( /* check if the ITD is full */
+ (ncur == OHCI_ITD_NOFFSET) ||
+ /* check if we have put more than 4K into the ITD */
+ (length & 0xF000) ||
+ /* check if it is the last frame */
+ (nframes == 0)) {
+
+ /* fill current ITD */
+ td->itd_flags = htole32(
+ OHCI_ITD_NOCC |
+ OHCI_ITD_SET_SF(xfer->endpoint->isoc_next) |
+ OHCI_ITD_NOINTR |
+ OHCI_ITD_SET_FC(ncur));
+
+ td->frames = ncur;
+ xfer->endpoint->isoc_next += ncur;
+
+ if (length == 0) {
+ /* all zero */
+ td->itd_bp0 = 0;
+ td->itd_be = ~0;
+
+ while (ncur--) {
+ td->itd_offset[ncur] =
+ htole16(OHCI_ITD_MK_OFFS(0));
+ }
+ } else {
+ usbd_get_page(xfer->frbuffers, buf_offset - length, &buf_res);
+ length = OHCI_PAGE_MASK(buf_res.physaddr);
+ buf_res.physaddr =
+ OHCI_PAGE(buf_res.physaddr);
+ td->itd_bp0 = htole32(buf_res.physaddr);
+ usbd_get_page(xfer->frbuffers, buf_offset - 1, &buf_res);
+ td->itd_be = htole32(buf_res.physaddr);
+
+ while (ncur--) {
+ itd_offset[ncur] += length;
+ itd_offset[ncur] =
+ OHCI_ITD_MK_OFFS(itd_offset[ncur]);
+ td->itd_offset[ncur] =
+ htole16(itd_offset[ncur]);
+ }
+ }
+ ncur = 0;
+ length = 0;
+ td_last = td;
+ td = td->obj_next;
+
+ if (td) {
+ /* link the last TD with the next one */
+ td_last->itd_next = td->itd_self;
+ }
+ usb_pc_cpu_flush(td_last->page_cache);
+ }
+ }
+
+ /* update the last TD */
+ td_last->itd_flags &= ~htole32(OHCI_ITD_NOINTR);
+ td_last->itd_flags |= htole32(OHCI_ITD_SET_DI(0));
+ td_last->itd_next = 0;
+
+ usb_pc_cpu_flush(td_last->page_cache);
+
+ xfer->td_transfer_last = td_last;
+
+#ifdef USB_DEBUG
+ if (ohcidebug > 8) {
+ DPRINTF("data before transfer:\n");
+ ohci_dump_itds(xfer->td_transfer_first);
+ }
+#endif
+ ed = xfer->qh_start[xfer->flags_int.curr_dma_set];
+
+ if (UE_GET_DIR(xfer->endpointno) == UE_DIR_IN)
+ ed_flags = (OHCI_ED_DIR_IN | OHCI_ED_FORMAT_ISO);
+ else
+ ed_flags = (OHCI_ED_DIR_OUT | OHCI_ED_FORMAT_ISO);
+
+ ed_flags |= (OHCI_ED_SET_FA(xfer->address) |
+ OHCI_ED_SET_EN(UE_GET_ADDR(xfer->endpointno)) |
+ OHCI_ED_SET_MAXP(xfer->max_frame_size));
+
+ if (xfer->xroot->udev->speed == USB_SPEED_LOW) {
+ ed_flags |= OHCI_ED_SPEED;
+ }
+ ed->ed_flags = htole32(ed_flags);
+
+ td = xfer->td_transfer_first;
+
+ ed->ed_headp = td->itd_self;
+
+ /* isochronous transfers are not affected by suspend / resume */
+ /* the append function will flush the endpoint descriptor */
+
+ OHCI_APPEND_QH(ed, sc->sc_isoc_p_last);
+}
+
+static void
+ohci_device_isoc_start(struct usb_xfer *xfer)
+{
+ /* put transfer on interrupt queue */
+ ohci_transfer_intr_enqueue(xfer);
+}
+
+struct usb_pipe_methods ohci_device_isoc_methods =
+{
+ .open = ohci_device_isoc_open,
+ .close = ohci_device_isoc_close,
+ .enter = ohci_device_isoc_enter,
+ .start = ohci_device_isoc_start,
+};
+
+/*------------------------------------------------------------------------*
+ * ohci root control support
+ *------------------------------------------------------------------------*
+ * Simulate a hardware hub by handling all the necessary requests.
+ *------------------------------------------------------------------------*/
+
+static const
+struct usb_device_descriptor ohci_devd =
+{
+ sizeof(struct usb_device_descriptor),
+ UDESC_DEVICE, /* type */
+ {0x00, 0x01}, /* USB version */
+ UDCLASS_HUB, /* class */
+ UDSUBCLASS_HUB, /* subclass */
+ UDPROTO_FSHUB, /* protocol */
+ 64, /* max packet */
+ {0}, {0}, {0x00, 0x01}, /* device id */
+ 1, 2, 0, /* string indicies */
+ 1 /* # of configurations */
+};
+
+static const
+struct ohci_config_desc ohci_confd =
+{
+ .confd = {
+ .bLength = sizeof(struct usb_config_descriptor),
+ .bDescriptorType = UDESC_CONFIG,
+ .wTotalLength[0] = sizeof(ohci_confd),
+ .bNumInterface = 1,
+ .bConfigurationValue = 1,
+ .iConfiguration = 0,
+ .bmAttributes = UC_SELF_POWERED,
+ .bMaxPower = 0, /* max power */
+ },
+ .ifcd = {
+ .bLength = sizeof(struct usb_interface_descriptor),
+ .bDescriptorType = UDESC_INTERFACE,
+ .bNumEndpoints = 1,
+ .bInterfaceClass = UICLASS_HUB,
+ .bInterfaceSubClass = UISUBCLASS_HUB,
+ .bInterfaceProtocol = 0,
+ },
+ .endpd = {
+ .bLength = sizeof(struct usb_endpoint_descriptor),
+ .bDescriptorType = UDESC_ENDPOINT,
+ .bEndpointAddress = UE_DIR_IN | OHCI_INTR_ENDPT,
+ .bmAttributes = UE_INTERRUPT,
+ .wMaxPacketSize[0] = 32,/* max packet (255 ports) */
+ .bInterval = 255,
+ },
+};
+
+static const
+struct usb_hub_descriptor ohci_hubd =
+{
+ 0, /* dynamic length */
+ UDESC_HUB,
+ 0,
+ {0, 0},
+ 0,
+ 0,
+ {0},
+};
+
+static usb_error_t
+ohci_roothub_exec(struct usb_device *udev,
+ struct usb_device_request *req, const void **pptr, uint16_t *plength)
+{
+ ohci_softc_t *sc = OHCI_BUS2SC(udev->bus);
+ const void *ptr;
+ const char *str_ptr;
+ uint32_t port;
+ uint32_t v;
+ uint16_t len;
+ uint16_t value;
+ uint16_t index;
+ uint8_t l;
+ usb_error_t err;
+
+ USB_BUS_LOCK_ASSERT(&sc->sc_bus, MA_OWNED);
+
+ /* buffer reset */
+ ptr = (const void *)&sc->sc_hub_desc.temp;
+ len = 0;
+ err = 0;
+
+ value = UGETW(req->wValue);
+ index = UGETW(req->wIndex);
+
+ DPRINTFN(3, "type=0x%02x request=0x%02x wLen=0x%04x "
+ "wValue=0x%04x wIndex=0x%04x\n",
+ req->bmRequestType, req->bRequest,
+ UGETW(req->wLength), value, index);
+
+#define C(x,y) ((x) | ((y) << 8))
+ switch (C(req->bRequest, req->bmRequestType)) {
+ case C(UR_CLEAR_FEATURE, UT_WRITE_DEVICE):
+ case C(UR_CLEAR_FEATURE, UT_WRITE_INTERFACE):
+ case C(UR_CLEAR_FEATURE, UT_WRITE_ENDPOINT):
+ /*
+ * DEVICE_REMOTE_WAKEUP and ENDPOINT_HALT are no-ops
+ * for the integrated root hub.
+ */
+ break;
+ case C(UR_GET_CONFIG, UT_READ_DEVICE):
+ len = 1;
+ sc->sc_hub_desc.temp[0] = sc->sc_conf;
+ break;
+ case C(UR_GET_DESCRIPTOR, UT_READ_DEVICE):
+ switch (value >> 8) {
+ case UDESC_DEVICE:
+ if ((value & 0xff) != 0) {
+ err = USB_ERR_IOERROR;
+ goto done;
+ }
+ len = sizeof(ohci_devd);
+ ptr = (const void *)&ohci_devd;
+ break;
+
+ case UDESC_CONFIG:
+ if ((value & 0xff) != 0) {
+ err = USB_ERR_IOERROR;
+ goto done;
+ }
+ len = sizeof(ohci_confd);
+ ptr = (const void *)&ohci_confd;
+ break;
+
+ case UDESC_STRING:
+ switch (value & 0xff) {
+ case 0: /* Language table */
+ str_ptr = "\001";
+ break;
+
+ case 1: /* Vendor */
+ str_ptr = sc->sc_vendor;
+ break;
+
+ case 2: /* Product */
+ str_ptr = "OHCI root HUB";
+ break;
+
+ default:
+ str_ptr = "";
+ break;
+ }
+
+ len = usb_make_str_desc(
+ sc->sc_hub_desc.temp,
+ sizeof(sc->sc_hub_desc.temp),
+ str_ptr);
+ break;
+
+ default:
+ err = USB_ERR_IOERROR;
+ goto done;
+ }
+ break;
+ case C(UR_GET_INTERFACE, UT_READ_INTERFACE):
+ len = 1;
+ sc->sc_hub_desc.temp[0] = 0;
+ break;
+ case C(UR_GET_STATUS, UT_READ_DEVICE):
+ len = 2;
+ USETW(sc->sc_hub_desc.stat.wStatus, UDS_SELF_POWERED);
+ break;
+ case C(UR_GET_STATUS, UT_READ_INTERFACE):
+ case C(UR_GET_STATUS, UT_READ_ENDPOINT):
+ len = 2;
+ USETW(sc->sc_hub_desc.stat.wStatus, 0);
+ break;
+ case C(UR_SET_ADDRESS, UT_WRITE_DEVICE):
+ if (value >= OHCI_MAX_DEVICES) {
+ err = USB_ERR_IOERROR;
+ goto done;
+ }
+ sc->sc_addr = value;
+ break;
+ case C(UR_SET_CONFIG, UT_WRITE_DEVICE):
+ if ((value != 0) && (value != 1)) {
+ err = USB_ERR_IOERROR;
+ goto done;
+ }
+ sc->sc_conf = value;
+ break;
+ case C(UR_SET_DESCRIPTOR, UT_WRITE_DEVICE):
+ break;
+ case C(UR_SET_FEATURE, UT_WRITE_DEVICE):
+ case C(UR_SET_FEATURE, UT_WRITE_INTERFACE):
+ case C(UR_SET_FEATURE, UT_WRITE_ENDPOINT):
+ err = USB_ERR_IOERROR;
+ goto done;
+ case C(UR_SET_INTERFACE, UT_WRITE_INTERFACE):
+ break;
+ case C(UR_SYNCH_FRAME, UT_WRITE_ENDPOINT):
+ break;
+ /* Hub requests */
+ case C(UR_CLEAR_FEATURE, UT_WRITE_CLASS_DEVICE):
+ break;
+ case C(UR_CLEAR_FEATURE, UT_WRITE_CLASS_OTHER):
+ DPRINTFN(9, "UR_CLEAR_PORT_FEATURE "
+ "port=%d feature=%d\n",
+ index, value);
+ if ((index < 1) ||
+ (index > sc->sc_noport)) {
+ err = USB_ERR_IOERROR;
+ goto done;
+ }
+ port = OHCI_RH_PORT_STATUS(index);
+ switch (value) {
+ case UHF_PORT_ENABLE:
+ OWRITE4(sc, port, UPS_CURRENT_CONNECT_STATUS);
+ break;
+ case UHF_PORT_SUSPEND:
+ OWRITE4(sc, port, UPS_OVERCURRENT_INDICATOR);
+ break;
+ case UHF_PORT_POWER:
+ /* Yes, writing to the LOW_SPEED bit clears power. */
+ OWRITE4(sc, port, UPS_LOW_SPEED);
+ break;
+ case UHF_C_PORT_CONNECTION:
+ OWRITE4(sc, port, UPS_C_CONNECT_STATUS << 16);
+ break;
+ case UHF_C_PORT_ENABLE:
+ OWRITE4(sc, port, UPS_C_PORT_ENABLED << 16);
+ break;
+ case UHF_C_PORT_SUSPEND:
+ OWRITE4(sc, port, UPS_C_SUSPEND << 16);
+ break;
+ case UHF_C_PORT_OVER_CURRENT:
+ OWRITE4(sc, port, UPS_C_OVERCURRENT_INDICATOR << 16);
+ break;
+ case UHF_C_PORT_RESET:
+ OWRITE4(sc, port, UPS_C_PORT_RESET << 16);
+ break;
+ default:
+ err = USB_ERR_IOERROR;
+ goto done;
+ }
+ switch (value) {
+ case UHF_C_PORT_CONNECTION:
+ case UHF_C_PORT_ENABLE:
+ case UHF_C_PORT_SUSPEND:
+ case UHF_C_PORT_OVER_CURRENT:
+ case UHF_C_PORT_RESET:
+ /* enable RHSC interrupt if condition is cleared. */
+ if ((OREAD4(sc, port) >> 16) == 0)
+ ohci_rhsc_enable(sc);
+ break;
+ default:
+ break;
+ }
+ break;
+ case C(UR_GET_DESCRIPTOR, UT_READ_CLASS_DEVICE):
+ if ((value & 0xff) != 0) {
+ err = USB_ERR_IOERROR;
+ goto done;
+ }
+ v = OREAD4(sc, OHCI_RH_DESCRIPTOR_A);
+
+ sc->sc_hub_desc.hubd = ohci_hubd;
+ sc->sc_hub_desc.hubd.bNbrPorts = sc->sc_noport;
+ USETW(sc->sc_hub_desc.hubd.wHubCharacteristics,
+ (v & OHCI_NPS ? UHD_PWR_NO_SWITCH :
+ v & OHCI_PSM ? UHD_PWR_GANGED : UHD_PWR_INDIVIDUAL)
+ /* XXX overcurrent */
+ );
+ sc->sc_hub_desc.hubd.bPwrOn2PwrGood = OHCI_GET_POTPGT(v);
+ v = OREAD4(sc, OHCI_RH_DESCRIPTOR_B);
+
+ for (l = 0; l < sc->sc_noport; l++) {
+ if (v & 1) {
+ sc->sc_hub_desc.hubd.DeviceRemovable[l / 8] |= (1 << (l % 8));
+ }
+ v >>= 1;
+ }
+ sc->sc_hub_desc.hubd.bDescLength =
+ 8 + ((sc->sc_noport + 7) / 8);
+ len = sc->sc_hub_desc.hubd.bDescLength;
+ break;
+
+ case C(UR_GET_STATUS, UT_READ_CLASS_DEVICE):
+ len = 16;
+ bzero(sc->sc_hub_desc.temp, 16);
+ break;
+ case C(UR_GET_STATUS, UT_READ_CLASS_OTHER):
+ DPRINTFN(9, "get port status i=%d\n",
+ index);
+ if ((index < 1) ||
+ (index > sc->sc_noport)) {
+ err = USB_ERR_IOERROR;
+ goto done;
+ }
+ v = OREAD4(sc, OHCI_RH_PORT_STATUS(index));
+ DPRINTFN(9, "port status=0x%04x\n", v);
+ USETW(sc->sc_hub_desc.ps.wPortStatus, v);
+ USETW(sc->sc_hub_desc.ps.wPortChange, v >> 16);
+ len = sizeof(sc->sc_hub_desc.ps);
+ break;
+ case C(UR_SET_DESCRIPTOR, UT_WRITE_CLASS_DEVICE):
+ err = USB_ERR_IOERROR;
+ goto done;
+ case C(UR_SET_FEATURE, UT_WRITE_CLASS_DEVICE):
+ break;
+ case C(UR_SET_FEATURE, UT_WRITE_CLASS_OTHER):
+ if ((index < 1) ||
+ (index > sc->sc_noport)) {
+ err = USB_ERR_IOERROR;
+ goto done;
+ }
+ port = OHCI_RH_PORT_STATUS(index);
+ switch (value) {
+ case UHF_PORT_ENABLE:
+ OWRITE4(sc, port, UPS_PORT_ENABLED);
+ break;
+ case UHF_PORT_SUSPEND:
+ OWRITE4(sc, port, UPS_SUSPEND);
+ break;
+ case UHF_PORT_RESET:
+ DPRINTFN(6, "reset port %d\n", index);
+ OWRITE4(sc, port, UPS_RESET);
+ for (v = 0;; v++) {
+ if (v < 12) {
+ usb_pause_mtx(&sc->sc_bus.bus_mtx,
+ USB_MS_TO_TICKS(USB_PORT_ROOT_RESET_DELAY));
+
+ if ((OREAD4(sc, port) & UPS_RESET) == 0) {
+ break;
+ }
+ } else {
+ err = USB_ERR_TIMEOUT;
+ goto done;
+ }
+ }
+ DPRINTFN(9, "ohci port %d reset, status = 0x%04x\n",
+ index, OREAD4(sc, port));
+ break;
+ case UHF_PORT_POWER:
+ DPRINTFN(3, "set port power %d\n", index);
+ OWRITE4(sc, port, UPS_PORT_POWER);
+ break;
+ default:
+ err = USB_ERR_IOERROR;
+ goto done;
+ }
+ break;
+ default:
+ err = USB_ERR_IOERROR;
+ goto done;
+ }
+done:
+ *plength = len;
+ *pptr = ptr;
+ return (err);
+}
+
+static void
+ohci_xfer_setup(struct usb_setup_params *parm)
+{
+ struct usb_page_search page_info;
+ struct usb_page_cache *pc;
+ ohci_softc_t *sc;
+ struct usb_xfer *xfer;
+ void *last_obj;
+ uint32_t ntd;
+ uint32_t nitd;
+ uint32_t nqh;
+ uint32_t n;
+
+ sc = OHCI_BUS2SC(parm->udev->bus);
+ xfer = parm->curr_xfer;
+
+ parm->hc_max_packet_size = 0x500;
+ parm->hc_max_packet_count = 1;
+ parm->hc_max_frame_size = OHCI_PAGE_SIZE;
+
+ /*
+ * calculate ntd and nqh
+ */
+ if (parm->methods == &ohci_device_ctrl_methods) {
+ xfer->flags_int.bdma_enable = 1;
+
+ usbd_transfer_setup_sub(parm);
+
+ nitd = 0;
+ ntd = ((2 * xfer->nframes) + 1 /* STATUS */
+ + (xfer->max_data_length / xfer->max_hc_frame_size));
+ nqh = 1;
+
+ } else if (parm->methods == &ohci_device_bulk_methods) {
+ xfer->flags_int.bdma_enable = 1;
+
+ usbd_transfer_setup_sub(parm);
+
+ nitd = 0;
+ ntd = ((2 * xfer->nframes)
+ + (xfer->max_data_length / xfer->max_hc_frame_size));
+ nqh = 1;
+
+ } else if (parm->methods == &ohci_device_intr_methods) {
+ xfer->flags_int.bdma_enable = 1;
+
+ usbd_transfer_setup_sub(parm);
+
+ nitd = 0;
+ ntd = ((2 * xfer->nframes)
+ + (xfer->max_data_length / xfer->max_hc_frame_size));
+ nqh = 1;
+
+ } else if (parm->methods == &ohci_device_isoc_methods) {
+ xfer->flags_int.bdma_enable = 1;
+
+ usbd_transfer_setup_sub(parm);
+
+ nitd = ((xfer->max_data_length / OHCI_PAGE_SIZE) +
+ ((xfer->nframes + OHCI_ITD_NOFFSET - 1) / OHCI_ITD_NOFFSET) +
+ 1 /* EXTRA */ );
+ ntd = 0;
+ nqh = 1;
+
+ } else {
+
+ usbd_transfer_setup_sub(parm);
+
+ nitd = 0;
+ ntd = 0;
+ nqh = 0;
+ }
+
+alloc_dma_set:
+
+ if (parm->err) {
+ return;
+ }
+ last_obj = NULL;
+
+ if (usbd_transfer_setup_sub_malloc(
+ parm, &pc, sizeof(ohci_td_t),
+ OHCI_TD_ALIGN, ntd)) {
+ parm->err = USB_ERR_NOMEM;
+ return;
+ }
+ if (parm->buf) {
+ for (n = 0; n != ntd; n++) {
+ ohci_td_t *td;
+
+ usbd_get_page(pc + n, 0, &page_info);
+
+ td = page_info.buffer;
+
+ /* init TD */
+ td->td_self = htole32(page_info.physaddr);
+ td->obj_next = last_obj;
+ td->page_cache = pc + n;
+
+ last_obj = td;
+
+ usb_pc_cpu_flush(pc + n);
+ }
+ }
+ if (usbd_transfer_setup_sub_malloc(
+ parm, &pc, sizeof(ohci_itd_t),
+ OHCI_ITD_ALIGN, nitd)) {
+ parm->err = USB_ERR_NOMEM;
+ return;
+ }
+ if (parm->buf) {
+ for (n = 0; n != nitd; n++) {
+ ohci_itd_t *itd;
+
+ usbd_get_page(pc + n, 0, &page_info);
+
+ itd = page_info.buffer;
+
+ /* init TD */
+ itd->itd_self = htole32(page_info.physaddr);
+ itd->obj_next = last_obj;
+ itd->page_cache = pc + n;
+
+ last_obj = itd;
+
+ usb_pc_cpu_flush(pc + n);
+ }
+ }
+ xfer->td_start[xfer->flags_int.curr_dma_set] = last_obj;
+
+ last_obj = NULL;
+
+ if (usbd_transfer_setup_sub_malloc(
+ parm, &pc, sizeof(ohci_ed_t),
+ OHCI_ED_ALIGN, nqh)) {
+ parm->err = USB_ERR_NOMEM;
+ return;
+ }
+ if (parm->buf) {
+ for (n = 0; n != nqh; n++) {
+ ohci_ed_t *ed;
+
+ usbd_get_page(pc + n, 0, &page_info);
+
+ ed = page_info.buffer;
+
+ /* init QH */
+ ed->ed_self = htole32(page_info.physaddr);
+ ed->obj_next = last_obj;
+ ed->page_cache = pc + n;
+
+ last_obj = ed;
+
+ usb_pc_cpu_flush(pc + n);
+ }
+ }
+ xfer->qh_start[xfer->flags_int.curr_dma_set] = last_obj;
+
+ if (!xfer->flags_int.curr_dma_set) {
+ xfer->flags_int.curr_dma_set = 1;
+ goto alloc_dma_set;
+ }
+}
+
+static void
+ohci_ep_init(struct usb_device *udev, struct usb_endpoint_descriptor *edesc,
+ struct usb_endpoint *ep)
+{
+ ohci_softc_t *sc = OHCI_BUS2SC(udev->bus);
+
+ DPRINTFN(2, "endpoint=%p, addr=%d, endpt=%d, mode=%d (%d)\n",
+ ep, udev->address,
+ edesc->bEndpointAddress, udev->flags.usb_mode,
+ sc->sc_addr);
+
+ if (udev->flags.usb_mode != USB_MODE_HOST) {
+ /* not supported */
+ return;
+ }
+ if (udev->device_index != sc->sc_addr) {
+ switch (edesc->bmAttributes & UE_XFERTYPE) {
+ case UE_CONTROL:
+ ep->methods = &ohci_device_ctrl_methods;
+ break;
+ case UE_INTERRUPT:
+ ep->methods = &ohci_device_intr_methods;
+ break;
+ case UE_ISOCHRONOUS:
+ if (udev->speed == USB_SPEED_FULL) {
+ ep->methods = &ohci_device_isoc_methods;
+ }
+ break;
+ case UE_BULK:
+ ep->methods = &ohci_device_bulk_methods;
+ break;
+ default:
+ /* do nothing */
+ break;
+ }
+ }
+}
+
+static void
+ohci_xfer_unsetup(struct usb_xfer *xfer)
+{
+ return;
+}
+
+static void
+ohci_get_dma_delay(struct usb_device *udev, uint32_t *pus)
+{
+ /*
+ * Wait until hardware has finished any possible use of the
+ * transfer descriptor(s) and QH
+ */
+ *pus = (1125); /* microseconds */
+}
+
+static void
+ohci_device_resume(struct usb_device *udev)
+{
+ struct ohci_softc *sc = OHCI_BUS2SC(udev->bus);
+ struct usb_xfer *xfer;
+ struct usb_pipe_methods *methods;
+ ohci_ed_t *ed;
+
+ DPRINTF("\n");
+
+ USB_BUS_LOCK(udev->bus);
+
+ TAILQ_FOREACH(xfer, &sc->sc_bus.intr_q.head, wait_entry) {
+
+ if (xfer->xroot->udev == udev) {
+
+ methods = xfer->endpoint->methods;
+ ed = xfer->qh_start[xfer->flags_int.curr_dma_set];
+
+ if (methods == &ohci_device_bulk_methods) {
+ OHCI_APPEND_QH(ed, sc->sc_bulk_p_last);
+ OWRITE4(sc, OHCI_COMMAND_STATUS, OHCI_BLF);
+ }
+ if (methods == &ohci_device_ctrl_methods) {
+ OHCI_APPEND_QH(ed, sc->sc_ctrl_p_last);
+ OWRITE4(sc, OHCI_COMMAND_STATUS, OHCI_CLF);
+ }
+ if (methods == &ohci_device_intr_methods) {
+ OHCI_APPEND_QH(ed, sc->sc_intr_p_last[xfer->qh_pos]);
+ }
+ }
+ }
+
+ USB_BUS_UNLOCK(udev->bus);
+
+ return;
+}
+
+static void
+ohci_device_suspend(struct usb_device *udev)
+{
+ struct ohci_softc *sc = OHCI_BUS2SC(udev->bus);
+ struct usb_xfer *xfer;
+ struct usb_pipe_methods *methods;
+ ohci_ed_t *ed;
+
+ DPRINTF("\n");
+
+ USB_BUS_LOCK(udev->bus);
+
+ TAILQ_FOREACH(xfer, &sc->sc_bus.intr_q.head, wait_entry) {
+
+ if (xfer->xroot->udev == udev) {
+
+ methods = xfer->endpoint->methods;
+ ed = xfer->qh_start[xfer->flags_int.curr_dma_set];
+
+ if (methods == &ohci_device_bulk_methods) {
+ OHCI_REMOVE_QH(ed, sc->sc_bulk_p_last);
+ }
+ if (methods == &ohci_device_ctrl_methods) {
+ OHCI_REMOVE_QH(ed, sc->sc_ctrl_p_last);
+ }
+ if (methods == &ohci_device_intr_methods) {
+ OHCI_REMOVE_QH(ed, sc->sc_intr_p_last[xfer->qh_pos]);
+ }
+ }
+ }
+
+ USB_BUS_UNLOCK(udev->bus);
+
+ return;
+}
+
+static void
+ohci_set_hw_power(struct usb_bus *bus)
+{
+ struct ohci_softc *sc = OHCI_BUS2SC(bus);
+ uint32_t temp;
+ uint32_t flags;
+
+ DPRINTF("\n");
+
+ USB_BUS_LOCK(bus);
+
+ flags = bus->hw_power_state;
+
+ temp = OREAD4(sc, OHCI_CONTROL);
+ temp &= ~(OHCI_PLE | OHCI_IE | OHCI_CLE | OHCI_BLE);
+
+ if (flags & USB_HW_POWER_CONTROL)
+ temp |= OHCI_CLE;
+
+ if (flags & USB_HW_POWER_BULK)
+ temp |= OHCI_BLE;
+
+ if (flags & USB_HW_POWER_INTERRUPT)
+ temp |= OHCI_PLE;
+
+ if (flags & USB_HW_POWER_ISOC)
+ temp |= OHCI_IE | OHCI_PLE;
+
+ OWRITE4(sc, OHCI_CONTROL, temp);
+
+ USB_BUS_UNLOCK(bus);
+
+ return;
+}
+
+struct usb_bus_methods ohci_bus_methods =
+{
+ .endpoint_init = ohci_ep_init,
+ .xfer_setup = ohci_xfer_setup,
+ .xfer_unsetup = ohci_xfer_unsetup,
+ .get_dma_delay = ohci_get_dma_delay,
+ .device_resume = ohci_device_resume,
+ .device_suspend = ohci_device_suspend,
+ .set_hw_power = ohci_set_hw_power,
+ .roothub_exec = ohci_roothub_exec,
+ .xfer_poll = ohci_do_poll,
+};
diff --git a/rtems/freebsd/dev/usb/controller/ohci.h b/rtems/freebsd/dev/usb/controller/ohci.h
new file mode 100644
index 00000000..1affa420
--- /dev/null
+++ b/rtems/freebsd/dev/usb/controller/ohci.h
@@ -0,0 +1,276 @@
+/* $FreeBSD$ */
+/*-
+ * Copyright (c) 1998 The NetBSD Foundation, Inc.
+ * All rights reserved.
+ *
+ * This code is derived from software contributed to The NetBSD Foundation
+ * by Lennart Augustsson (lennart@augustsson.net) at
+ * Carlstedt Research & Technology.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the NetBSD
+ * Foundation, Inc. and its contributors.
+ * 4. Neither the name of The NetBSD Foundation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
+ * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _OHCI_HH_
+#define _OHCI_HH_
+
+#define OHCI_MAX_DEVICES MIN(USB_MAX_DEVICES, 128)
+
+#define OHCI_NO_INTRS 32
+#define OHCI_HCCA_SIZE 256
+
+/* Structures alignment (bytes) */
+#define OHCI_HCCA_ALIGN 256
+#define OHCI_ED_ALIGN 16
+#define OHCI_TD_ALIGN 16
+#define OHCI_ITD_ALIGN 32
+
+#define OHCI_PAGE_SIZE 0x1000
+#define OHCI_PAGE(x) ((x) &~ 0xfff)
+#define OHCI_PAGE_OFFSET(x) ((x) & 0xfff)
+#define OHCI_PAGE_MASK(x) ((x) & 0xfff)
+
+#if ((USB_PAGE_SIZE < OHCI_ED_ALIGN) || (OHCI_ED_ALIGN == 0) || \
+ (USB_PAGE_SIZE < OHCI_TD_ALIGN) || (OHCI_TD_ALIGN == 0) || \
+ (USB_PAGE_SIZE < OHCI_ITD_ALIGN) || (OHCI_ITD_ALIGN == 0) || \
+ (USB_PAGE_SIZE < OHCI_PAGE_SIZE) || (OHCI_PAGE_SIZE == 0))
+#error "Invalid USB page size!"
+#endif
+
+#define OHCI_VIRTUAL_FRAMELIST_COUNT 128/* dummy */
+
+#if (OHCI_VIRTUAL_FRAMELIST_COUNT < USB_MAX_FS_ISOC_FRAMES_PER_XFER)
+#error "maximum number of full-speed isochronous frames is higher than supported!"
+#endif
+
+struct ohci_hcca {
+ volatile uint32_t hcca_interrupt_table[OHCI_NO_INTRS];
+ volatile uint32_t hcca_frame_number;
+ volatile uint32_t hcca_done_head;
+#define OHCI_DONE_INTRS 1
+} __aligned(OHCI_HCCA_ALIGN);
+
+typedef struct ohci_hcca ohci_hcca_t;
+
+struct ohci_ed {
+ volatile uint32_t ed_flags;
+#define OHCI_ED_GET_FA(s) ((s) & 0x7f)
+#define OHCI_ED_ADDRMASK 0x0000007f
+#define OHCI_ED_SET_FA(s) (s)
+#define OHCI_ED_GET_EN(s) (((s) >> 7) & 0xf)
+#define OHCI_ED_SET_EN(s) ((s) << 7)
+#define OHCI_ED_DIR_MASK 0x00001800
+#define OHCI_ED_DIR_TD 0x00000000
+#define OHCI_ED_DIR_OUT 0x00000800
+#define OHCI_ED_DIR_IN 0x00001000
+#define OHCI_ED_SPEED 0x00002000
+#define OHCI_ED_SKIP 0x00004000
+#define OHCI_ED_FORMAT_GEN 0x00000000
+#define OHCI_ED_FORMAT_ISO 0x00008000
+#define OHCI_ED_GET_MAXP(s) (((s) >> 16) & 0x07ff)
+#define OHCI_ED_SET_MAXP(s) ((s) << 16)
+#define OHCI_ED_MAXPMASK (0x7ff << 16)
+ volatile uint32_t ed_tailp;
+ volatile uint32_t ed_headp;
+#define OHCI_HALTED 0x00000001
+#define OHCI_TOGGLECARRY 0x00000002
+#define OHCI_HEADMASK 0xfffffffc
+ volatile uint32_t ed_next;
+/*
+ * Extra information needed:
+ */
+ struct ohci_ed *next;
+ struct ohci_ed *prev;
+ struct ohci_ed *obj_next;
+ struct usb_page_cache *page_cache;
+ uint32_t ed_self;
+} __aligned(OHCI_ED_ALIGN);
+
+typedef struct ohci_ed ohci_ed_t;
+
+struct ohci_td {
+ volatile uint32_t td_flags;
+#define OHCI_TD_R 0x00040000 /* Buffer Rounding */
+#define OHCI_TD_DP_MASK 0x00180000 /* Direction / PID */
+#define OHCI_TD_SETUP 0x00000000
+#define OHCI_TD_OUT 0x00080000
+#define OHCI_TD_IN 0x00100000
+#define OHCI_TD_GET_DI(x) (((x) >> 21) & 7) /* Delay Interrupt */
+#define OHCI_TD_SET_DI(x) ((x) << 21)
+#define OHCI_TD_NOINTR 0x00e00000
+#define OHCI_TD_INTR_MASK 0x00e00000
+#define OHCI_TD_TOGGLE_CARRY 0x00000000
+#define OHCI_TD_TOGGLE_0 0x02000000
+#define OHCI_TD_TOGGLE_1 0x03000000
+#define OHCI_TD_TOGGLE_MASK 0x03000000
+#define OHCI_TD_GET_EC(x) (((x) >> 26) & 3) /* Error Count */
+#define OHCI_TD_GET_CC(x) ((x) >> 28) /* Condition Code */
+#define OHCI_TD_SET_CC(x) ((x) << 28)
+#define OHCI_TD_NOCC 0xf0000000
+ volatile uint32_t td_cbp; /* Current Buffer Pointer */
+ volatile uint32_t td_next; /* Next TD */
+#define OHCI_TD_NEXT_END 0
+ volatile uint32_t td_be; /* Buffer End */
+/*
+ * Extra information needed:
+ */
+ struct ohci_td *obj_next;
+ struct ohci_td *alt_next;
+ struct usb_page_cache *page_cache;
+ uint32_t td_self;
+ uint16_t len;
+} __aligned(OHCI_TD_ALIGN);
+
+typedef struct ohci_td ohci_td_t;
+
+struct ohci_itd {
+ volatile uint32_t itd_flags;
+#define OHCI_ITD_GET_SF(x) ((x) & 0x0000ffff)
+#define OHCI_ITD_SET_SF(x) ((x) & 0xffff)
+#define OHCI_ITD_GET_DI(x) (((x) >> 21) & 7) /* Delay Interrupt */
+#define OHCI_ITD_SET_DI(x) ((x) << 21)
+#define OHCI_ITD_NOINTR 0x00e00000
+#define OHCI_ITD_GET_FC(x) ((((x) >> 24) & 7)+1) /* Frame Count */
+#define OHCI_ITD_SET_FC(x) (((x)-1) << 24)
+#define OHCI_ITD_GET_CC(x) ((x) >> 28) /* Condition Code */
+#define OHCI_ITD_NOCC 0xf0000000
+#define OHCI_ITD_NOFFSET 8
+ volatile uint32_t itd_bp0; /* Buffer Page 0 */
+ volatile uint32_t itd_next; /* Next ITD */
+ volatile uint32_t itd_be; /* Buffer End */
+ volatile uint16_t itd_offset[OHCI_ITD_NOFFSET]; /* Buffer offsets and
+ * Status */
+#define OHCI_ITD_PAGE_SELECT 0x00001000
+#define OHCI_ITD_MK_OFFS(len) (0xe000 | ((len) & 0x1fff))
+#define OHCI_ITD_PSW_LENGTH(x) ((x) & 0xfff) /* Transfer length */
+#define OHCI_ITD_PSW_GET_CC(x) ((x) >> 12) /* Condition Code */
+/*
+ * Extra information needed:
+ */
+ struct ohci_itd *obj_next;
+ struct usb_page_cache *page_cache;
+ uint32_t itd_self;
+ uint8_t frames;
+} __aligned(OHCI_ITD_ALIGN);
+
+typedef struct ohci_itd ohci_itd_t;
+
+#define OHCI_CC_NO_ERROR 0
+#define OHCI_CC_CRC 1
+#define OHCI_CC_BIT_STUFFING 2
+#define OHCI_CC_DATA_TOGGLE_MISMATCH 3
+#define OHCI_CC_STALL 4
+#define OHCI_CC_DEVICE_NOT_RESPONDING 5
+#define OHCI_CC_PID_CHECK_FAILURE 6
+#define OHCI_CC_UNEXPECTED_PID 7
+#define OHCI_CC_DATA_OVERRUN 8
+#define OHCI_CC_DATA_UNDERRUN 9
+#define OHCI_CC_BUFFER_OVERRUN 12
+#define OHCI_CC_BUFFER_UNDERRUN 13
+#define OHCI_CC_NOT_ACCESSED 15
+
+/* Some delay needed when changing certain registers. */
+#define OHCI_ENABLE_POWER_DELAY 5
+#define OHCI_READ_DESC_DELAY 5
+
+#define OHCI_NO_EDS (2*OHCI_NO_INTRS)
+
+struct ohci_hw_softc {
+ struct usb_page_cache hcca_pc;
+ struct usb_page_cache ctrl_start_pc;
+ struct usb_page_cache bulk_start_pc;
+ struct usb_page_cache isoc_start_pc;
+ struct usb_page_cache intr_start_pc[OHCI_NO_EDS];
+
+ struct usb_page hcca_pg;
+ struct usb_page ctrl_start_pg;
+ struct usb_page bulk_start_pg;
+ struct usb_page isoc_start_pg;
+ struct usb_page intr_start_pg[OHCI_NO_EDS];
+};
+
+struct ohci_config_desc {
+ struct usb_config_descriptor confd;
+ struct usb_interface_descriptor ifcd;
+ struct usb_endpoint_descriptor endpd;
+} __packed;
+
+union ohci_hub_desc {
+ struct usb_status stat;
+ struct usb_port_status ps;
+ struct usb_hub_descriptor hubd;
+ uint8_t temp[128];
+};
+
+typedef struct ohci_softc {
+ struct ohci_hw_softc sc_hw;
+ struct usb_bus sc_bus; /* base device */
+ struct usb_callout sc_tmo_rhsc;
+ union ohci_hub_desc sc_hub_desc;
+
+ struct usb_device *sc_devices[OHCI_MAX_DEVICES];
+ struct resource *sc_io_res;
+ struct resource *sc_irq_res;
+ struct ohci_hcca *sc_hcca_p;
+ struct ohci_ed *sc_ctrl_p_last;
+ struct ohci_ed *sc_bulk_p_last;
+ struct ohci_ed *sc_isoc_p_last;
+ struct ohci_ed *sc_intr_p_last[OHCI_NO_EDS];
+#ifndef __rtems__
+ void *sc_intr_hdl;
+#endif /* __rtems__ */
+ device_t sc_dev;
+ bus_size_t sc_io_size;
+ bus_space_tag_t sc_io_tag;
+ bus_space_handle_t sc_io_hdl;
+
+ uint32_t sc_eintrs; /* enabled interrupts */
+ uint32_t sc_control; /* Preserved during suspend/standby */
+ uint32_t sc_intre;
+
+ uint16_t sc_intr_stat[OHCI_NO_EDS];
+ uint16_t sc_id_vendor;
+
+ uint8_t sc_noport;
+ uint8_t sc_addr; /* device address */
+ uint8_t sc_conf; /* device configuration */
+ uint8_t sc_hub_idata[32];
+
+ char sc_vendor[16];
+
+} ohci_softc_t;
+
+usb_bus_mem_cb_t ohci_iterate_hw_softc;
+
+usb_error_t ohci_init(ohci_softc_t *sc);
+void ohci_detach(struct ohci_softc *sc);
+void ohci_suspend(ohci_softc_t *sc);
+void ohci_resume(ohci_softc_t *sc);
+void ohci_interrupt(ohci_softc_t *sc);
+
+#endif /* _OHCI_HH_ */
diff --git a/rtems/freebsd/dev/usb/controller/ohci_lpc3250.c b/rtems/freebsd/dev/usb/controller/ohci_lpc3250.c
new file mode 100644
index 00000000..2e36dad7
--- /dev/null
+++ b/rtems/freebsd/dev/usb/controller/ohci_lpc3250.c
@@ -0,0 +1,361 @@
+#include <rtems/freebsd/machine/rtems-bsd-config.h>
+
+#include <rtems/freebsd/machine/rtems-bsd-config.h>
+
+#include <bsp.h>
+
+#ifdef LIBBSP_ARM_LPC3250_BSP_H
+
+#include <bsp/irq.h>
+
+#include <rtems/freebsd/sys/cdefs.h>
+#include <rtems/freebsd/sys/stdint.h>
+#include <rtems/freebsd/sys/stddef.h>
+#include <rtems/freebsd/sys/param.h>
+#include <rtems/freebsd/sys/queue.h>
+#include <rtems/freebsd/sys/types.h>
+#include <rtems/freebsd/sys/systm.h>
+#include <rtems/freebsd/sys/kernel.h>
+#include <rtems/freebsd/sys/bus.h>
+#include <rtems/freebsd/sys/linker_set.h>
+#include <rtems/freebsd/sys/module.h>
+#include <rtems/freebsd/sys/lock.h>
+#include <rtems/freebsd/sys/mutex.h>
+#include <rtems/freebsd/sys/condvar.h>
+#include <rtems/freebsd/sys/sysctl.h>
+#include <rtems/freebsd/sys/sx.h>
+#include <rtems/freebsd/sys/unistd.h>
+#include <rtems/freebsd/sys/callout.h>
+#include <rtems/freebsd/sys/malloc.h>
+#include <rtems/freebsd/sys/priv.h>
+
+#include <rtems/freebsd/dev/usb/usb.h>
+#include <rtems/freebsd/dev/usb/usbdi.h>
+
+#include <rtems/freebsd/dev/usb/usb_core.h>
+#include <rtems/freebsd/dev/usb/usb_busdma.h>
+#include <rtems/freebsd/dev/usb/usb_process.h>
+#include <rtems/freebsd/dev/usb/usb_util.h>
+
+#include <rtems/freebsd/dev/usb/usb_controller.h>
+#include <rtems/freebsd/dev/usb/usb_bus.h>
+#include <rtems/freebsd/dev/usb/controller/ohci.h>
+
+typedef struct
+{
+ uint16_t VendorID;
+ uint16_t ProductID;
+ uint16_t VersionID;
+} isp130x_PhyDetails_Typ;
+
+static void
+i2c_wait_for_receive_fifo_not_empty(void)
+{
+ while ((OTGI2CSTS & OTG_I2C_RFE) != 0) {
+ /* Wait */
+ }
+}
+
+static uint8_t
+isp1301_read(uint8_t reg)
+{
+ OTGI2CCTL = OTG_I2C_RESET;
+
+ OTGI2CTX = ISP1301_I2C_ADDR | OTG_I2C_START;
+
+ OTGI2CTX = reg;
+
+ OTGI2CTX = ISP1301_I2C_ADDR | OTG_I2C_READ | OTG_I2C_START;
+
+ OTGI2CTX = OTG_I2C_STOP;
+
+ i2c_wait_for_receive_fifo_not_empty();
+
+ return (uint8_t) OTGI2CRX;
+}
+
+static void
+i2c_wait_for_transaction_done(void)
+{
+ while ((OTGI2CSTS & OTG_I2C_TDI) == 0) {
+ /* Wait */
+ }
+
+ OTGI2CSTS = OTG_I2C_TDI;
+}
+
+static void
+isp1301_write(uint8_t reg, uint8_t val)
+{
+
+ OTGI2CCTL = OTG_I2C_RESET;
+
+ OTGI2CTX = ISP1301_I2C_ADDR | OTG_I2C_START;
+
+ OTGI2CTX = reg;
+
+ OTGI2CTX = val | OTG_I2C_STOP;
+
+ i2c_wait_for_transaction_done();
+}
+
+static void
+isp1301_dump(void)
+{
+ BSD_PRINTF(
+ "ISP1301: mc1 %02x, mc2 %02x, otgctrl %02x, otgsts %02x, isrc %02x, iltch %02x, ienl %02x, ienh %02x\n",
+ isp1301_read(ISP1301_MODE_CONTROL_1),
+ isp1301_read(ISP1301_MODE_CONTROL_2),
+ isp1301_read(ISP1301_OTG_CONTROL_1),
+ isp1301_read(ISP1301_OTG_STATUS),
+ isp1301_read(ISP1301_I2C_INTERRUPT_SOURCE),
+ isp1301_read(ISP1301_I2C_INTERRUPT_LATCH),
+ isp1301_read(ISP1301_I2C_INTERRUPT_FALLING),
+ isp1301_read(ISP1301_I2C_INTERRUPT_RISING)
+ );
+}
+
+
+static isp130x_PhyDetails_Typ
+ isp130x_GetPhyDetails(void)
+{
+ isp130x_PhyDetails_Typ PhyDetails;
+
+ PhyDetails.VendorID = (uint16_t)((isp1301_read(ISP1301_I2C_VENDOR_ID_HIGH) << 8) |
+ isp1301_read(ISP1301_I2C_VENDOR_ID_LOW));
+
+ PhyDetails.ProductID = (uint16_t)((isp1301_read(ISP1301_I2C_PRODUCT_ID_HIGH) << 8) |
+ isp1301_read(ISP1301_I2C_PRODUCT_ID_LOW));
+
+ PhyDetails.VersionID = (uint16_t)((isp1301_read(ISP1301_I2C_VERSION_ID_HIGH) << 8) |
+ isp1301_read(ISP1301_I2C_VERSION_ID_LOW));
+
+ return PhyDetails;
+}
+
+
+static void
+isp1301_configure(void)
+{
+ isp130x_PhyDetails_Typ PhyDetails = isp130x_GetPhyDetails();
+
+ BSD_PRINTF("ISP130x: vendor 0x%04x, product 0x%04x, version 0x%04x\n",
+ PhyDetails.VendorID,
+ PhyDetails.ProductID,
+ PhyDetails.VersionID);
+
+ isp1301_write(ISP1301_MODE_CONTROL_1_CLEAR, 0xff);
+ isp1301_write(ISP1301_MODE_CONTROL_1_SET, MC1_SPEED_REG);
+ isp1301_write(ISP1301_MODE_CONTROL_2_CLEAR, 0xff);
+
+ switch (PhyDetails.ProductID)
+ {
+ case ISP1301_PRODUCT_ID:
+ isp1301_write(ISP1301_MODE_CONTROL_2_SET, MC2_BI_DI |
+ MC2_PSW_EN |
+ MC2_SPD_SUSP_CTRL);
+ break;
+
+ case ISP1302_PRODUCT_ID:
+ // Do not set 'SPD_SUSP_CTRL' bit as per ISP1301 this bit is reserved in
+ // ISP1302, setting it will cause problems.
+ isp1301_write(ISP1301_MODE_CONTROL_2_SET, MC2_BI_DI |
+ MC2_PSW_EN);
+
+ // ISP1302 has an additonal register we should initialise it..
+ isp1301_write(ISP1302_MISC_CONTROL_CLEAR, 0xff);
+ isp1301_write(ISP1302_MISC_CONTROL_SET, MISC_UART_2V8_EN);
+
+ break;
+
+ default:
+ break;
+ }
+
+ isp1301_write(ISP1301_OTG_CONTROL_CLEAR, 0xff);
+ isp1301_write(ISP1301_MODE_CONTROL_1_SET, MC1_DAT_SE0);
+ isp1301_write(ISP1301_OTG_CONTROL_SET, OTG1_DM_PULLDOWN | OTG1_DP_PULLDOWN);
+ isp1301_write(ISP1301_I2C_INTERRUPT_LATCH_CLEAR, 0xff);
+ isp1301_write(ISP1301_I2C_INTERRUPT_FALLING_CLEAR, 0xff);
+ isp1301_write(ISP1301_I2C_INTERRUPT_RISING_CLEAR, 0xff);
+}
+
+static void
+isp1301_vbus_on(void)
+{
+ isp1301_write(ISP1301_OTG_CONTROL_SET, OTG1_VBUS_DRV);
+}
+
+static int
+ohci_lpc32xx_suspend(device_t self)
+{
+ ohci_softc_t *e = device_get_softc(self);
+ int eno = bus_generic_suspend(self);
+
+ if (eno != 0) {
+ return (eno);
+ }
+
+ ohci_suspend(e);
+
+ return (0);
+}
+
+static int
+ohci_lpc32xx_resume(device_t self)
+{
+ ohci_softc_t *e = device_get_softc(self);
+
+ ohci_resume(e);
+
+ bus_generic_resume(self);
+
+ return (0);
+}
+
+
+static int
+ohci_lpc32xx_probe(device_t self)
+{
+ device_set_desc(self, "LPC3250 OHCI controller");
+
+ return (0);
+}
+
+static int
+ohci_lpc32xx_detach(device_t self)
+{
+ ohci_softc_t *e = device_get_softc(self);
+
+ BSD_PRINTF("FIXME\n");
+
+ return (0);
+}
+
+static int
+ohci_lpc32xx_attach(device_t self)
+{
+ rtems_status_code sc = RTEMS_SUCCESSFUL;
+ ohci_softc_t *e = device_get_softc(self);
+ usb_error_t ue = USB_ERR_NORMAL_COMPLETION;
+ int eno = 0;
+
+ memset(e, 0, sizeof(*e));
+
+ /* Initialize some bus fields */
+ e->sc_bus.parent = self;
+ e->sc_bus.devices = e->sc_devices;
+ e->sc_bus.devices_max = OHCI_MAX_DEVICES;
+
+ /* Get all DMA memory */
+ if (usb_bus_mem_alloc_all(&e->sc_bus, USB_GET_DMA_TAG(self), &ohci_iterate_hw_softc)) {
+ return (ENOMEM);
+ }
+ e->sc_dev = self;
+
+ /* Child device */
+ e->sc_bus.bdev = device_add_child(self, "usbus", -1);
+ if (e->sc_bus.bdev == NULL) {
+ device_printf(self, "Could not add USB device\n");
+ goto error;
+ }
+ device_set_ivars(e->sc_bus.bdev, &e->sc_bus);
+ device_set_desc(e->sc_bus.bdev, "LPC3250 OHCI bus");
+ snprintf(e->sc_vendor, sizeof(e->sc_vendor), "NXP");
+
+ /* Register space */
+ e->sc_io_tag = 0U;
+ e->sc_io_hdl = OTGUSB_BASE;
+ e->sc_io_size = 0x5cU;
+
+ /* Enable USB PLL */
+ USBDIVCTRL = 0xc;
+ USBCTRL = USBCLK_SLAVE_HCLK_EN
+ | USBCLK_PC_BUS_KEEPER
+ | USBCLK_CLKEN1
+ | USBCLK_POWER_UP
+ | USBCLK_P_2
+ | USBCLK_N_1
+ | (191U << USBCLK_M_SHIFT);
+ while ((USBCTRL & USBCLK_PLL_LOCK) == 0) {
+ /* Wait */
+ }
+ USBCTRL |= USBCLK_CLKEN2;
+
+ /* Enable USB host and AHB clocks */
+ OTGCLKCTRL = 0x1c;
+ while ((OTGCLKSTAT & 0x1c) != 0x1c) {
+ /* Wait */
+ }
+
+ isp1301_configure();
+
+ USBCTRL |= USBCLK_HOST_NEED_CLK_EN;
+
+ OTGCLKCTRL = 0x1d;
+ while ((OTGCLKSTAT & 0x1d) != 0x1d) {
+ /* Wait */
+ }
+
+ /* Set OTG Status and Control Register */
+ OTGSTAT = 0x1;
+
+ isp1301_vbus_on();
+
+ /* Install interrupt handler */
+ sc = rtems_interrupt_server_handler_install(
+ RTEMS_ID_NONE,
+ IRQ_USB_HOST,
+ "USB",
+ RTEMS_INTERRUPT_UNIQUE,
+ (rtems_interrupt_handler) ohci_interrupt,
+ e
+ );
+ BSD_ASSERT_SC(sc);
+
+ /* OHCI intitialization */
+ ue = ohci_init(e);
+ if (ue != USB_ERR_NORMAL_COMPLETION) {
+ goto error;
+ }
+
+ /* Probe and attach child */
+ eno = device_probe_and_attach(e->sc_bus.bdev);
+ if (eno != 0) {
+ goto error;
+ }
+
+ return (0);
+
+error:
+ ohci_lpc32xx_detach(self);
+ return (ENXIO);
+}
+
+static device_method_t ohci_methods [] = {
+ /* Device interface */
+ DEVMETHOD(device_probe, ohci_lpc32xx_probe),
+ DEVMETHOD(device_attach, ohci_lpc32xx_attach),
+ DEVMETHOD(device_detach, ohci_lpc32xx_detach),
+ DEVMETHOD(device_suspend, ohci_lpc32xx_suspend),
+ DEVMETHOD(device_resume, ohci_lpc32xx_resume),
+ DEVMETHOD(device_shutdown, bus_generic_shutdown),
+
+ /* Bus interface */
+ DEVMETHOD(bus_print_child, bus_generic_print_child),
+
+ {0, 0}
+};
+
+static driver_t ohci_driver = {
+ .name = "ohci",
+ .methods = ohci_methods,
+ .size = sizeof(struct ohci_softc)
+};
+
+static devclass_t ohci_devclass;
+
+DRIVER_MODULE(ohci, nexus, ohci_driver, ohci_devclass, 0, 0);
+MODULE_DEPEND(ohci, usb, 1, 1, 1);
+
+#endif /* LIBBSP_ARM_LPC3250_BSP_H */
diff --git a/rtems/freebsd/dev/usb/controller/ohcireg.h b/rtems/freebsd/dev/usb/controller/ohcireg.h
new file mode 100644
index 00000000..b3acb69b
--- /dev/null
+++ b/rtems/freebsd/dev/usb/controller/ohcireg.h
@@ -0,0 +1,131 @@
+/* $FreeBSD$ */
+/*-
+ * Copyright (c) 1998 The NetBSD Foundation, Inc.
+ * All rights reserved.
+ *
+ * This code is derived from software contributed to The NetBSD Foundation
+ * by Lennart Augustsson (lennart@augustsson.net) at
+ * Carlstedt Research & Technology.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the NetBSD
+ * Foundation, Inc. and its contributors.
+ * 4. Neither the name of The NetBSD Foundation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
+ * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _OHCIREG_HH_
+#define _OHCIREG_HH_
+
+/* PCI config registers */
+#define PCI_CBMEM 0x10 /* configuration base memory */
+#define PCI_INTERFACE_OHCI 0x10
+
+/* OHCI registers */
+#define OHCI_REVISION 0x00 /* OHCI revision */
+#define OHCI_REV_LO(rev) ((rev) & 0xf)
+#define OHCI_REV_HI(rev) (((rev)>>4) & 0xf)
+#define OHCI_REV_LEGACY(rev) ((rev) & 0x100)
+#define OHCI_CONTROL 0x04
+#define OHCI_CBSR_MASK 0x00000003 /* Control/Bulk Service Ratio */
+#define OHCI_RATIO_1_1 0x00000000
+#define OHCI_RATIO_1_2 0x00000001
+#define OHCI_RATIO_1_3 0x00000002
+#define OHCI_RATIO_1_4 0x00000003
+#define OHCI_PLE 0x00000004 /* Periodic List Enable */
+#define OHCI_IE 0x00000008 /* Isochronous Enable */
+#define OHCI_CLE 0x00000010 /* Control List Enable */
+#define OHCI_BLE 0x00000020 /* Bulk List Enable */
+#define OHCI_HCFS_MASK 0x000000c0 /* HostControllerFunctionalStat
+ * e */
+#define OHCI_HCFS_RESET 0x00000000
+#define OHCI_HCFS_RESUME 0x00000040
+#define OHCI_HCFS_OPERATIONAL 0x00000080
+#define OHCI_HCFS_SUSPEND 0x000000c0
+#define OHCI_IR 0x00000100 /* Interrupt Routing */
+#define OHCI_RWC 0x00000200 /* Remote Wakeup Connected */
+#define OHCI_RWE 0x00000400 /* Remote Wakeup Enabled */
+#define OHCI_COMMAND_STATUS 0x08
+#define OHCI_HCR 0x00000001 /* Host Controller Reset */
+#define OHCI_CLF 0x00000002 /* Control List Filled */
+#define OHCI_BLF 0x00000004 /* Bulk List Filled */
+#define OHCI_OCR 0x00000008 /* Ownership Change Request */
+#define OHCI_SOC_MASK 0x00030000 /* Scheduling Overrun Count */
+#define OHCI_INTERRUPT_STATUS 0x0c
+#define OHCI_SO 0x00000001 /* Scheduling Overrun */
+#define OHCI_WDH 0x00000002 /* Writeback Done Head */
+#define OHCI_SF 0x00000004 /* Start of Frame */
+#define OHCI_RD 0x00000008 /* Resume Detected */
+#define OHCI_UE 0x00000010 /* Unrecoverable Error */
+#define OHCI_FNO 0x00000020 /* Frame Number Overflow */
+#define OHCI_RHSC 0x00000040 /* Root Hub Status Change */
+#define OHCI_OC 0x40000000 /* Ownership Change */
+#define OHCI_MIE 0x80000000 /* Master Interrupt Enable */
+#define OHCI_INTERRUPT_ENABLE 0x10
+#define OHCI_INTERRUPT_DISABLE 0x14
+#define OHCI_HCCA 0x18
+#define OHCI_PERIOD_CURRENT_ED 0x1c
+#define OHCI_CONTROL_HEAD_ED 0x20
+#define OHCI_CONTROL_CURRENT_ED 0x24
+#define OHCI_BULK_HEAD_ED 0x28
+#define OHCI_BULK_CURRENT_ED 0x2c
+#define OHCI_DONE_HEAD 0x30
+#define OHCI_FM_INTERVAL 0x34
+#define OHCI_GET_IVAL(s) ((s) & 0x3fff)
+#define OHCI_GET_FSMPS(s) (((s) >> 16) & 0x7fff)
+#define OHCI_FIT 0x80000000
+#define OHCI_FM_REMAINING 0x38
+#define OHCI_FM_NUMBER 0x3c
+#define OHCI_PERIODIC_START 0x40
+#define OHCI_LS_THRESHOLD 0x44
+#define OHCI_RH_DESCRIPTOR_A 0x48
+#define OHCI_GET_NDP(s) ((s) & 0xff)
+#define OHCI_PSM 0x0100 /* Power Switching Mode */
+#define OHCI_NPS 0x0200 /* No Power Switching */
+#define OHCI_DT 0x0400 /* Device Type */
+#define OHCI_OCPM 0x0800 /* Overcurrent Protection Mode */
+#define OHCI_NOCP 0x1000 /* No Overcurrent Protection */
+#define OHCI_GET_POTPGT(s) ((s) >> 24)
+#define OHCI_RH_DESCRIPTOR_B 0x4c
+#define OHCI_RH_STATUS 0x50
+#define OHCI_LPS 0x00000001 /* Local Power Status */
+#define OHCI_OCI 0x00000002 /* OverCurrent Indicator */
+#define OHCI_DRWE 0x00008000 /* Device Remote Wakeup Enable */
+#define OHCI_LPSC 0x00010000 /* Local Power Status Change */
+#define OHCI_CCIC 0x00020000 /* OverCurrent Indicator
+ * Change */
+#define OHCI_CRWE 0x80000000 /* Clear Remote Wakeup Enable */
+#define OHCI_RH_PORT_STATUS(n) (0x50 + ((n)*4)) /* 1 based indexing */
+
+#define OHCI_LES (OHCI_PLE | OHCI_IE | OHCI_CLE | OHCI_BLE)
+#define OHCI_ALL_INTRS (OHCI_SO | OHCI_WDH | OHCI_SF | \
+ OHCI_RD | OHCI_UE | OHCI_FNO | \
+ OHCI_RHSC | OHCI_OC)
+#define OHCI_NORMAL_INTRS (OHCI_WDH | OHCI_RD | OHCI_UE | OHCI_RHSC)
+
+#define OHCI_FSMPS(i) (((i-210)*6/7) << 16)
+#define OHCI_PERIODIC(i) ((i)*9/10)
+
+#endif /* _OHCIREG_HH_ */
diff --git a/rtems/freebsd/dev/usb/controller/usb_controller.c b/rtems/freebsd/dev/usb/controller/usb_controller.c
new file mode 100644
index 00000000..8c4baa2b
--- /dev/null
+++ b/rtems/freebsd/dev/usb/controller/usb_controller.c
@@ -0,0 +1,606 @@
+#include <rtems/freebsd/machine/rtems-bsd-config.h>
+
+/* $FreeBSD$ */
+/*-
+ * Copyright (c) 2008 Hans Petter Selasky. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <rtems/freebsd/local/opt_ddb.h>
+
+#include <rtems/freebsd/sys/stdint.h>
+#include <rtems/freebsd/sys/stddef.h>
+#include <rtems/freebsd/sys/param.h>
+#include <rtems/freebsd/sys/queue.h>
+#include <rtems/freebsd/sys/types.h>
+#include <rtems/freebsd/sys/systm.h>
+#include <rtems/freebsd/sys/kernel.h>
+#include <rtems/freebsd/sys/bus.h>
+#include <rtems/freebsd/sys/linker_set.h>
+#include <rtems/freebsd/sys/module.h>
+#include <rtems/freebsd/sys/lock.h>
+#include <rtems/freebsd/sys/mutex.h>
+#include <rtems/freebsd/sys/condvar.h>
+#include <rtems/freebsd/sys/sysctl.h>
+#include <rtems/freebsd/sys/sx.h>
+#include <rtems/freebsd/sys/unistd.h>
+#include <rtems/freebsd/sys/callout.h>
+#include <rtems/freebsd/sys/malloc.h>
+#include <rtems/freebsd/sys/priv.h>
+
+#include <rtems/freebsd/dev/usb/usb.h>
+#include <rtems/freebsd/dev/usb/usbdi.h>
+
+#define USB_DEBUG_VAR usb_ctrl_debug
+
+#include <rtems/freebsd/dev/usb/usb_core.h>
+#include <rtems/freebsd/dev/usb/usb_debug.h>
+#include <rtems/freebsd/dev/usb/usb_process.h>
+#include <rtems/freebsd/dev/usb/usb_busdma.h>
+#include <rtems/freebsd/dev/usb/usb_dynamic.h>
+#include <rtems/freebsd/dev/usb/usb_device.h>
+#include <rtems/freebsd/dev/usb/usb_hub.h>
+
+#include <rtems/freebsd/dev/usb/usb_controller.h>
+#include <rtems/freebsd/dev/usb/usb_bus.h>
+
+/* function prototypes */
+
+static device_probe_t usb_probe;
+static device_attach_t usb_attach;
+static device_detach_t usb_detach;
+
+static void usb_attach_sub(device_t, struct usb_bus *);
+
+/* static variables */
+
+#ifdef USB_DEBUG
+static int usb_ctrl_debug = 0;
+
+SYSCTL_NODE(_hw_usb, OID_AUTO, ctrl, CTLFLAG_RW, 0, "USB controller");
+SYSCTL_INT(_hw_usb_ctrl, OID_AUTO, debug, CTLFLAG_RW, &usb_ctrl_debug, 0,
+ "Debug level");
+#endif
+
+#ifndef __rtems__
+static int usb_no_boot_wait = 0;
+TUNABLE_INT("hw.usb.no_boot_wait", &usb_no_boot_wait);
+SYSCTL_INT(_hw_usb, OID_AUTO, no_boot_wait, CTLFLAG_RDTUN, &usb_no_boot_wait, 0,
+ "No device enumerate waiting at boot.");
+#endif /* __rtems__ */
+
+static devclass_t usb_devclass;
+
+static device_method_t usb_methods[] = {
+ DEVMETHOD(device_probe, usb_probe),
+ DEVMETHOD(device_attach, usb_attach),
+ DEVMETHOD(device_detach, usb_detach),
+ DEVMETHOD(device_suspend, bus_generic_suspend),
+ DEVMETHOD(device_resume, bus_generic_resume),
+ DEVMETHOD(device_shutdown, bus_generic_shutdown),
+ {0, 0}
+};
+
+static driver_t usb_driver = {
+ .name = "usbus",
+ .methods = usb_methods,
+ .size = 0,
+};
+
+/* Host Only Drivers */
+DRIVER_MODULE(usbus, ohci, usb_driver, usb_devclass, 0, 0);
+DRIVER_MODULE(usbus, uhci, usb_driver, usb_devclass, 0, 0);
+DRIVER_MODULE(usbus, ehci, usb_driver, usb_devclass, 0, 0);
+DRIVER_MODULE(usbus, xhci, usb_driver, usb_devclass, 0, 0);
+
+/* Device Only Drivers */
+DRIVER_MODULE(usbus, at91_udp, usb_driver, usb_devclass, 0, 0);
+DRIVER_MODULE(usbus, musbotg, usb_driver, usb_devclass, 0, 0);
+DRIVER_MODULE(usbus, uss820, usb_driver, usb_devclass, 0, 0);
+
+/*------------------------------------------------------------------------*
+ * usb_probe
+ *
+ * This function is called from "{ehci,ohci,uhci}_pci_attach()".
+ *------------------------------------------------------------------------*/
+static int
+usb_probe(device_t dev)
+{
+ DPRINTF("\n");
+ return (0);
+}
+
+static void
+usb_root_mount_rel(struct usb_bus *bus)
+{
+#ifndef __rtems__
+ if (bus->bus_roothold != NULL) {
+ DPRINTF("Releasing root mount hold %p\n", bus->bus_roothold);
+ root_mount_rel(bus->bus_roothold);
+ bus->bus_roothold = NULL;
+ }
+#endif /* __rtems__ */
+}
+
+/*------------------------------------------------------------------------*
+ * usb_attach
+ *------------------------------------------------------------------------*/
+static int
+usb_attach(device_t dev)
+{
+ struct usb_bus *bus = device_get_ivars(dev);
+
+ DPRINTF("\n");
+
+ if (bus == NULL) {
+ device_printf(dev, "USB device has no ivars\n");
+ return (ENXIO);
+ }
+
+#ifndef __rtems__
+ if (usb_no_boot_wait == 0) {
+ /* delay vfs_mountroot until the bus is explored */
+ bus->bus_roothold = root_mount_hold(device_get_nameunit(dev));
+ }
+#endif /* __rtems__ */
+
+ usb_attach_sub(dev, bus);
+
+ return (0); /* return success */
+}
+
+/*------------------------------------------------------------------------*
+ * usb_detach
+ *------------------------------------------------------------------------*/
+static int
+usb_detach(device_t dev)
+{
+ struct usb_bus *bus = device_get_softc(dev);
+
+ DPRINTF("\n");
+
+ if (bus == NULL) {
+ /* was never setup properly */
+ return (0);
+ }
+ /* Stop power watchdog */
+ usb_callout_drain(&bus->power_wdog);
+
+ /* Let the USB explore process detach all devices. */
+ usb_root_mount_rel(bus);
+
+ USB_BUS_LOCK(bus);
+ if (usb_proc_msignal(&bus->explore_proc,
+ &bus->detach_msg[0], &bus->detach_msg[1])) {
+ /* ignore */
+ }
+ /* Wait for detach to complete */
+
+ usb_proc_mwait(&bus->explore_proc,
+ &bus->detach_msg[0], &bus->detach_msg[1]);
+
+ USB_BUS_UNLOCK(bus);
+
+ /* Get rid of USB callback processes */
+
+ usb_proc_free(&bus->giant_callback_proc);
+ usb_proc_free(&bus->non_giant_callback_proc);
+
+ /* Get rid of USB explore process */
+
+ usb_proc_free(&bus->explore_proc);
+
+ /* Get rid of control transfer process */
+
+ usb_proc_free(&bus->control_xfer_proc);
+
+ return (0);
+}
+
+/*------------------------------------------------------------------------*
+ * usb_bus_explore
+ *
+ * This function is used to explore the device tree from the root.
+ *------------------------------------------------------------------------*/
+static void
+usb_bus_explore(struct usb_proc_msg *pm)
+{
+ struct usb_bus *bus;
+ struct usb_device *udev;
+
+ bus = ((struct usb_bus_msg *)pm)->bus;
+ udev = bus->devices[USB_ROOT_HUB_ADDR];
+
+ if (udev && udev->hub) {
+
+ if (bus->do_probe) {
+ bus->do_probe = 0;
+ bus->driver_added_refcount++;
+ }
+ if (bus->driver_added_refcount == 0) {
+ /* avoid zero, hence that is memory default */
+ bus->driver_added_refcount = 1;
+ }
+
+#ifdef DDB
+ /*
+ * The following three lines of code are only here to
+ * recover from DDB:
+ */
+ usb_proc_rewakeup(&bus->control_xfer_proc);
+ usb_proc_rewakeup(&bus->giant_callback_proc);
+ usb_proc_rewakeup(&bus->non_giant_callback_proc);
+#endif
+
+ USB_BUS_UNLOCK(bus);
+
+#if USB_HAVE_POWERD
+ /*
+ * First update the USB power state!
+ */
+ usb_bus_powerd(bus);
+#endif
+ /* Explore the Root USB HUB. */
+ (udev->hub->explore) (udev);
+ USB_BUS_LOCK(bus);
+ }
+ usb_root_mount_rel(bus);
+}
+
+/*------------------------------------------------------------------------*
+ * usb_bus_detach
+ *
+ * This function is used to detach the device tree from the root.
+ *------------------------------------------------------------------------*/
+static void
+usb_bus_detach(struct usb_proc_msg *pm)
+{
+ struct usb_bus *bus;
+ struct usb_device *udev;
+ device_t dev;
+
+ bus = ((struct usb_bus_msg *)pm)->bus;
+ udev = bus->devices[USB_ROOT_HUB_ADDR];
+ dev = bus->bdev;
+ /* clear the softc */
+ device_set_softc(dev, NULL);
+ USB_BUS_UNLOCK(bus);
+
+ /* detach children first */
+ mtx_lock(&Giant);
+ bus_generic_detach(dev);
+ mtx_unlock(&Giant);
+
+ /*
+ * Free USB device and all subdevices, if any.
+ */
+ usb_free_device(udev, 0);
+
+ USB_BUS_LOCK(bus);
+ /* clear bdev variable last */
+ bus->bdev = NULL;
+}
+
+static void
+usb_power_wdog(void *arg)
+{
+ struct usb_bus *bus = arg;
+
+ USB_BUS_LOCK_ASSERT(bus, MA_OWNED);
+
+ usb_callout_reset(&bus->power_wdog,
+ 4 * hz, usb_power_wdog, arg);
+
+#ifdef DDB
+ /*
+ * The following line of code is only here to recover from
+ * DDB:
+ */
+ usb_proc_rewakeup(&bus->explore_proc); /* recover from DDB */
+#endif
+
+#if USB_HAVE_POWERD
+ USB_BUS_UNLOCK(bus);
+
+ usb_bus_power_update(bus);
+
+ USB_BUS_LOCK(bus);
+#endif
+}
+
+/*------------------------------------------------------------------------*
+ * usb_bus_attach
+ *
+ * This function attaches USB in context of the explore thread.
+ *------------------------------------------------------------------------*/
+static void
+usb_bus_attach(struct usb_proc_msg *pm)
+{
+ struct usb_bus *bus;
+ struct usb_device *child;
+ device_t dev;
+ usb_error_t err;
+ enum usb_dev_speed speed;
+
+ bus = ((struct usb_bus_msg *)pm)->bus;
+ dev = bus->bdev;
+
+ DPRINTF("\n");
+
+ switch (bus->usbrev) {
+ case USB_REV_1_0:
+ speed = USB_SPEED_FULL;
+ device_printf(bus->bdev, "12Mbps Full Speed USB v1.0\n");
+ break;
+
+ case USB_REV_1_1:
+ speed = USB_SPEED_FULL;
+ device_printf(bus->bdev, "12Mbps Full Speed USB v1.1\n");
+ break;
+
+ case USB_REV_2_0:
+ speed = USB_SPEED_HIGH;
+ device_printf(bus->bdev, "480Mbps High Speed USB v2.0\n");
+ break;
+
+ case USB_REV_2_5:
+ speed = USB_SPEED_VARIABLE;
+ device_printf(bus->bdev, "480Mbps Wireless USB v2.5\n");
+ break;
+
+ case USB_REV_3_0:
+ speed = USB_SPEED_SUPER;
+ device_printf(bus->bdev, "4.8Gbps Super Speed USB v3.0\n");
+ break;
+
+ default:
+ device_printf(bus->bdev, "Unsupported USB revision\n");
+ usb_root_mount_rel(bus);
+ return;
+ }
+
+ USB_BUS_UNLOCK(bus);
+
+ /* default power_mask value */
+ bus->hw_power_state =
+ USB_HW_POWER_CONTROL |
+ USB_HW_POWER_BULK |
+ USB_HW_POWER_INTERRUPT |
+ USB_HW_POWER_ISOC |
+ USB_HW_POWER_NON_ROOT_HUB;
+
+ /* make sure power is set at least once */
+
+ if (bus->methods->set_hw_power != NULL) {
+ (bus->methods->set_hw_power) (bus);
+ }
+
+ /* Allocate the Root USB device */
+
+ child = usb_alloc_device(bus->bdev, bus, NULL, 0, 0, 1,
+ speed, USB_MODE_HOST);
+ if (child) {
+ err = usb_probe_and_attach(child,
+ USB_IFACE_INDEX_ANY);
+ if (!err) {
+ if ((bus->devices[USB_ROOT_HUB_ADDR] == NULL) ||
+ (bus->devices[USB_ROOT_HUB_ADDR]->hub == NULL)) {
+ err = USB_ERR_NO_ROOT_HUB;
+ }
+ }
+ } else {
+ err = USB_ERR_NOMEM;
+ }
+
+ USB_BUS_LOCK(bus);
+
+ if (err) {
+ device_printf(bus->bdev, "Root HUB problem, error=%s\n",
+ usbd_errstr(err));
+ usb_root_mount_rel(bus);
+ }
+
+ /* set softc - we are ready */
+ device_set_softc(dev, bus);
+
+ /* start watchdog */
+ usb_power_wdog(bus);
+}
+
+/*------------------------------------------------------------------------*
+ * usb_attach_sub
+ *
+ * This function creates a thread which runs the USB attach code.
+ *------------------------------------------------------------------------*/
+static void
+usb_attach_sub(device_t dev, struct usb_bus *bus)
+{
+ const char *pname = device_get_nameunit(dev);
+
+ mtx_lock(&Giant);
+ if (usb_devclass_ptr == NULL)
+ usb_devclass_ptr = devclass_find("usbus");
+ mtx_unlock(&Giant);
+
+ /* Initialise USB process messages */
+ bus->explore_msg[0].hdr.pm_callback = &usb_bus_explore;
+ bus->explore_msg[0].bus = bus;
+ bus->explore_msg[1].hdr.pm_callback = &usb_bus_explore;
+ bus->explore_msg[1].bus = bus;
+
+ bus->detach_msg[0].hdr.pm_callback = &usb_bus_detach;
+ bus->detach_msg[0].bus = bus;
+ bus->detach_msg[1].hdr.pm_callback = &usb_bus_detach;
+ bus->detach_msg[1].bus = bus;
+
+ bus->attach_msg[0].hdr.pm_callback = &usb_bus_attach;
+ bus->attach_msg[0].bus = bus;
+ bus->attach_msg[1].hdr.pm_callback = &usb_bus_attach;
+ bus->attach_msg[1].bus = bus;
+
+ /* Create USB explore and callback processes */
+
+ if (usb_proc_create(&bus->giant_callback_proc,
+ &bus->bus_mtx, pname, USB_PRI_MED)) {
+ printf("WARNING: Creation of USB Giant "
+ "callback process failed.\n");
+ } else if (usb_proc_create(&bus->non_giant_callback_proc,
+ &bus->bus_mtx, pname, USB_PRI_HIGH)) {
+ printf("WARNING: Creation of USB non-Giant "
+ "callback process failed.\n");
+ } else if (usb_proc_create(&bus->explore_proc,
+ &bus->bus_mtx, pname, USB_PRI_MED)) {
+ printf("WARNING: Creation of USB explore "
+ "process failed.\n");
+ } else if (usb_proc_create(&bus->control_xfer_proc,
+ &bus->bus_mtx, pname, USB_PRI_MED)) {
+ printf("WARNING: Creation of USB control transfer "
+ "process failed.\n");
+ } else {
+ /* Get final attach going */
+ USB_BUS_LOCK(bus);
+ if (usb_proc_msignal(&bus->explore_proc,
+ &bus->attach_msg[0], &bus->attach_msg[1])) {
+ /* ignore */
+ }
+ USB_BUS_UNLOCK(bus);
+
+ /* Do initial explore */
+ usb_needs_explore(bus, 1);
+ }
+}
+
+SYSUNINIT(usb_bus_unload, SI_SUB_KLD, SI_ORDER_ANY, usb_bus_unload, NULL);
+
+/*------------------------------------------------------------------------*
+ * usb_bus_mem_flush_all_cb
+ *------------------------------------------------------------------------*/
+#if USB_HAVE_BUSDMA
+static void
+usb_bus_mem_flush_all_cb(struct usb_bus *bus, struct usb_page_cache *pc,
+ struct usb_page *pg, usb_size_t size, usb_size_t align)
+{
+ usb_pc_cpu_flush(pc);
+}
+#endif
+
+/*------------------------------------------------------------------------*
+ * usb_bus_mem_flush_all - factored out code
+ *------------------------------------------------------------------------*/
+#if USB_HAVE_BUSDMA
+void
+usb_bus_mem_flush_all(struct usb_bus *bus, usb_bus_mem_cb_t *cb)
+{
+ if (cb) {
+ cb(bus, &usb_bus_mem_flush_all_cb);
+ }
+}
+#endif
+
+/*------------------------------------------------------------------------*
+ * usb_bus_mem_alloc_all_cb
+ *------------------------------------------------------------------------*/
+#if USB_HAVE_BUSDMA
+static void
+usb_bus_mem_alloc_all_cb(struct usb_bus *bus, struct usb_page_cache *pc,
+ struct usb_page *pg, usb_size_t size, usb_size_t align)
+{
+ /* need to initialize the page cache */
+ pc->tag_parent = bus->dma_parent_tag;
+
+ if (usb_pc_alloc_mem(pc, pg, size, align)) {
+ bus->alloc_failed = 1;
+ }
+}
+#endif
+
+/*------------------------------------------------------------------------*
+ * usb_bus_mem_alloc_all - factored out code
+ *
+ * Returns:
+ * 0: Success
+ * Else: Failure
+ *------------------------------------------------------------------------*/
+uint8_t
+usb_bus_mem_alloc_all(struct usb_bus *bus, bus_dma_tag_t dmat,
+ usb_bus_mem_cb_t *cb)
+{
+ bus->alloc_failed = 0;
+
+ mtx_init(&bus->bus_mtx, device_get_nameunit(bus->parent),
+ NULL, MTX_DEF | MTX_RECURSE);
+
+ usb_callout_init_mtx(&bus->power_wdog,
+ &bus->bus_mtx, 0);
+
+ TAILQ_INIT(&bus->intr_q.head);
+
+#if USB_HAVE_BUSDMA
+ usb_dma_tag_setup(bus->dma_parent_tag, bus->dma_tags,
+ dmat, &bus->bus_mtx, NULL, 32, USB_BUS_DMA_TAG_MAX);
+#endif
+ if ((bus->devices_max > USB_MAX_DEVICES) ||
+ (bus->devices_max < USB_MIN_DEVICES) ||
+ (bus->devices == NULL)) {
+ DPRINTFN(0, "Devices field has not been "
+ "initialised properly\n");
+ bus->alloc_failed = 1; /* failure */
+ }
+#if USB_HAVE_BUSDMA
+ if (cb) {
+ cb(bus, &usb_bus_mem_alloc_all_cb);
+ }
+#endif
+ if (bus->alloc_failed) {
+ usb_bus_mem_free_all(bus, cb);
+ }
+ return (bus->alloc_failed);
+}
+
+/*------------------------------------------------------------------------*
+ * usb_bus_mem_free_all_cb
+ *------------------------------------------------------------------------*/
+#if USB_HAVE_BUSDMA
+static void
+usb_bus_mem_free_all_cb(struct usb_bus *bus, struct usb_page_cache *pc,
+ struct usb_page *pg, usb_size_t size, usb_size_t align)
+{
+ usb_pc_free_mem(pc);
+}
+#endif
+
+/*------------------------------------------------------------------------*
+ * usb_bus_mem_free_all - factored out code
+ *------------------------------------------------------------------------*/
+void
+usb_bus_mem_free_all(struct usb_bus *bus, usb_bus_mem_cb_t *cb)
+{
+#if USB_HAVE_BUSDMA
+ if (cb) {
+ cb(bus, &usb_bus_mem_free_all_cb);
+ }
+ usb_dma_tag_unsetup(bus->dma_parent_tag);
+#endif
+
+ mtx_destroy(&bus->bus_mtx);
+}
diff --git a/rtems/freebsd/dev/usb/quirk/usb_quirk.c b/rtems/freebsd/dev/usb/quirk/usb_quirk.c
new file mode 100644
index 00000000..cea8b93c
--- /dev/null
+++ b/rtems/freebsd/dev/usb/quirk/usb_quirk.c
@@ -0,0 +1,807 @@
+#include <rtems/freebsd/machine/rtems-bsd-config.h>
+
+/* $FreeBSD$ */
+/*-
+ * Copyright (c) 1998 The NetBSD Foundation, Inc. All rights reserved.
+ * Copyright (c) 1998 Lennart Augustsson. All rights reserved.
+ * Copyright (c) 2008 Hans Petter Selasky. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <rtems/freebsd/sys/stdint.h>
+#include <rtems/freebsd/sys/stddef.h>
+#include <rtems/freebsd/sys/param.h>
+#include <rtems/freebsd/sys/queue.h>
+#include <rtems/freebsd/sys/types.h>
+#include <rtems/freebsd/sys/systm.h>
+#include <rtems/freebsd/sys/kernel.h>
+#include <rtems/freebsd/sys/bus.h>
+#include <rtems/freebsd/sys/linker_set.h>
+#include <rtems/freebsd/sys/module.h>
+#include <rtems/freebsd/sys/lock.h>
+#include <rtems/freebsd/sys/mutex.h>
+#include <rtems/freebsd/sys/condvar.h>
+#include <rtems/freebsd/sys/sysctl.h>
+#include <rtems/freebsd/sys/sx.h>
+#include <rtems/freebsd/sys/unistd.h>
+#include <rtems/freebsd/sys/callout.h>
+#include <rtems/freebsd/sys/malloc.h>
+#include <rtems/freebsd/sys/priv.h>
+
+#include <rtems/freebsd/dev/usb/usb.h>
+#include <rtems/freebsd/dev/usb/usb_ioctl.h>
+#include <rtems/freebsd/dev/usb/usbdi.h>
+#include <rtems/freebsd/local/usbdevs.h>
+
+#define USB_DEBUG_VAR usb_debug
+#include <rtems/freebsd/dev/usb/usb_debug.h>
+#include <rtems/freebsd/dev/usb/usb_dynamic.h>
+
+#include <rtems/freebsd/dev/usb/quirk/usb_quirk.h>
+
+MODULE_DEPEND(usb_quirk, usb, 1, 1, 1);
+MODULE_VERSION(usb_quirk, 1);
+
+#define USB_DEV_QUIRKS_MAX 256
+#define USB_SUB_QUIRKS_MAX 8
+
+struct usb_quirk_entry {
+ uint16_t vid;
+ uint16_t pid;
+ uint16_t lo_rev;
+ uint16_t hi_rev;
+ uint16_t quirks[USB_SUB_QUIRKS_MAX];
+};
+
+static struct mtx usb_quirk_mtx;
+
+#define USB_QUIRK_VP(v,p,l,h,...) \
+ { .vid = (v), .pid = (p), .lo_rev = (l), .hi_rev = (h), \
+ .quirks = { __VA_ARGS__ } }
+#define USB_QUIRK(v,p,l,h,...) \
+ USB_QUIRK_VP(USB_VENDOR_##v, USB_PRODUCT_##v##_##p, l, h, __VA_ARGS__)
+
+static struct usb_quirk_entry usb_quirks[USB_DEV_QUIRKS_MAX] = {
+ USB_QUIRK(ASUS, LCM, 0x0000, 0xffff, UQ_HID_IGNORE),
+ USB_QUIRK(INSIDEOUT, EDGEPORT4, 0x094, 0x094, UQ_SWAP_UNICODE),
+ USB_QUIRK(DALLAS, J6502, 0x0a2, 0x0a2, UQ_BAD_ADC),
+ USB_QUIRK(DALLAS, J6502, 0x0a2, 0x0a2, UQ_AU_NO_XU),
+ USB_QUIRK(ALTEC, ADA70, 0x103, 0x103, UQ_BAD_ADC),
+ USB_QUIRK(ALTEC, ASC495, 0x000, 0x000, UQ_BAD_AUDIO),
+ USB_QUIRK(QTRONIX, 980N, 0x110, 0x110, UQ_SPUR_BUT_UP),
+ USB_QUIRK(ALCOR2, KBD_HUB, 0x001, 0x001, UQ_SPUR_BUT_UP),
+ USB_QUIRK(MCT, HUB0100, 0x102, 0x102, UQ_BUS_POWERED),
+ USB_QUIRK(MCT, USB232, 0x102, 0x102, UQ_BUS_POWERED),
+ USB_QUIRK(TI, UTUSB41, 0x110, 0x110, UQ_POWER_CLAIM),
+ USB_QUIRK(TELEX, MIC1, 0x009, 0x009, UQ_AU_NO_FRAC),
+ USB_QUIRK(SILICONPORTALS, YAPPHONE, 0x100, 0x100, UQ_AU_INP_ASYNC),
+ USB_QUIRK(LOGITECH, UN53B, 0x0000, 0xffff, UQ_NO_STRINGS),
+ USB_QUIRK(ELSA, MODEM1, 0x0000, 0xffff, UQ_CFG_INDEX_1),
+
+ /*
+ * XXX The following quirks should have a more specific revision
+ * number:
+ */
+ USB_QUIRK(HP, 895C, 0x0000, 0xffff, UQ_BROKEN_BIDIR),
+ USB_QUIRK(HP, 880C, 0x0000, 0xffff, UQ_BROKEN_BIDIR),
+ USB_QUIRK(HP, 815C, 0x0000, 0xffff, UQ_BROKEN_BIDIR),
+ USB_QUIRK(HP, 810C, 0x0000, 0xffff, UQ_BROKEN_BIDIR),
+ USB_QUIRK(HP, 830C, 0x0000, 0xffff, UQ_BROKEN_BIDIR),
+ USB_QUIRK(HP, 1220C, 0x0000, 0xffff, UQ_BROKEN_BIDIR),
+ USB_QUIRK(XEROX, WCM15, 0x0000, 0xffff, UQ_BROKEN_BIDIR),
+ /* Devices which should be ignored by uhid */
+ USB_QUIRK(APC, UPS, 0x0000, 0xffff, UQ_HID_IGNORE),
+ USB_QUIRK(BELKIN, F6C550AVR, 0x0000, 0xffff, UQ_HID_IGNORE),
+ USB_QUIRK(CYBERPOWER, 1500CAVRLCD, 0x0000, 0xffff, UQ_HID_IGNORE),
+ USB_QUIRK(CYPRESS, SILVERSHIELD, 0x0000, 0xffff, UQ_HID_IGNORE),
+ USB_QUIRK(DELORME, EARTHMATE, 0x0000, 0xffff, UQ_HID_IGNORE),
+ USB_QUIRK(ITUNERNET, USBLCD2X20, 0x0000, 0xffff, UQ_HID_IGNORE),
+ USB_QUIRK(ITUNERNET, USBLCD4X20, 0x0000, 0xffff, UQ_HID_IGNORE),
+ USB_QUIRK(LIEBERT, POWERSURE_PXT, 0x0000, 0xffff, UQ_HID_IGNORE),
+ USB_QUIRK(MGE, UPS1, 0x0000, 0xffff, UQ_HID_IGNORE),
+ USB_QUIRK(MGE, UPS2, 0x0000, 0xffff, UQ_HID_IGNORE),
+ USB_QUIRK(APPLE, IPHONE, 0x0000, 0xffff, UQ_HID_IGNORE),
+ USB_QUIRK(APPLE, IPHONE_3G, 0x0000, 0xffff, UQ_HID_IGNORE),
+ USB_QUIRK(MEGATEC, UPS, 0x0000, 0xffff, UQ_HID_IGNORE),
+ /* Devices which should be ignored by both ukbd and uhid */
+ USB_QUIRK(CYPRESS, WISPY1A, 0x0000, 0xffff, UQ_KBD_IGNORE, UQ_HID_IGNORE),
+ USB_QUIRK(METAGEEK, WISPY1B, 0x0000, 0xffff, UQ_KBD_IGNORE, UQ_HID_IGNORE),
+ USB_QUIRK(METAGEEK, WISPY24X, 0x0000, 0xffff, UQ_KBD_IGNORE, UQ_HID_IGNORE),
+ USB_QUIRK(METAGEEK2, WISPYDBX, 0x0000, 0xffff, UQ_KBD_IGNORE, UQ_HID_IGNORE),
+ USB_QUIRK(TENX, UAUDIO0, 0x0101, 0x0101, UQ_AUDIO_SWAP_LR),
+ /* MS keyboards do weird things */
+ USB_QUIRK(MICROSOFT, WLINTELLIMOUSE, 0x0000, 0xffff, UQ_MS_LEADING_BYTE),
+ /* umodem(4) device quirks */
+ USB_QUIRK(METRICOM, RICOCHET_GS, 0x100, 0x100, UQ_ASSUME_CM_OVER_DATA),
+ USB_QUIRK(SANYO, SCP4900, 0x000, 0x000, UQ_ASSUME_CM_OVER_DATA),
+ USB_QUIRK(MOTOROLA2, T720C, 0x001, 0x001, UQ_ASSUME_CM_OVER_DATA),
+ USB_QUIRK(EICON, DIVA852, 0x100, 0x100, UQ_ASSUME_CM_OVER_DATA),
+ USB_QUIRK(SIEMENS2, ES75, 0x000, 0x000, UQ_ASSUME_CM_OVER_DATA),
+ USB_QUIRK(QUALCOMM, CDMA_MSM, 0x0000, 0xffff, UQ_ASSUME_CM_OVER_DATA),
+ USB_QUIRK(QUALCOMM2, CDMA_MSM, 0x0000, 0xffff, UQ_ASSUME_CM_OVER_DATA),
+ USB_QUIRK(CURITEL, UM175, 0x0000, 0xffff, UQ_ASSUME_CM_OVER_DATA),
+
+ /* USB Mass Storage Class Quirks */
+ USB_QUIRK_VP(USB_VENDOR_ASAHIOPTICAL, 0, UQ_MSC_NO_RS_CLEAR_UA,
+ UQ_MATCH_VENDOR_ONLY),
+ USB_QUIRK(ADDON, ATTACHE, 0x0000, 0xffff, UQ_MSC_FORCE_WIRE_BBB,
+ UQ_MSC_FORCE_PROTO_SCSI, UQ_MSC_IGNORE_RESIDUE),
+ USB_QUIRK(ADDON, A256MB, 0x0000, 0xffff, UQ_MSC_FORCE_WIRE_BBB,
+ UQ_MSC_FORCE_PROTO_SCSI, UQ_MSC_IGNORE_RESIDUE),
+ USB_QUIRK(ADDON, DISKPRO512, 0x0000, 0xffff, UQ_MSC_FORCE_WIRE_BBB,
+ UQ_MSC_FORCE_PROTO_SCSI, UQ_MSC_IGNORE_RESIDUE),
+ USB_QUIRK(ADDONICS2, CABLE_205, 0x0000, 0xffff, UQ_MSC_FORCE_WIRE_BBB,
+ UQ_MSC_FORCE_PROTO_SCSI),
+ USB_QUIRK(AIPTEK, POCKETCAM3M, 0x0000, 0xffff, UQ_MSC_FORCE_WIRE_BBB,
+ UQ_MSC_FORCE_PROTO_SCSI),
+ USB_QUIRK(AIPTEK2, SUNPLUS_TECH, 0x0000, 0xffff, UQ_MSC_NO_SYNC_CACHE),
+ USB_QUIRK(ALCOR, SDCR_6335, 0x0000, 0xffff, UQ_MSC_NO_TEST_UNIT_READY,
+ UQ_MSC_NO_SYNC_CACHE),
+ USB_QUIRK(ALCOR, SDCR_6362, 0x0000, 0xffff, UQ_MSC_NO_TEST_UNIT_READY,
+ UQ_MSC_NO_SYNC_CACHE),
+ USB_QUIRK(ALCOR, AU6390, 0x0000, 0xffff, UQ_MSC_NO_SYNC_CACHE),
+ USB_QUIRK(ALCOR, UMCR_9361, 0x0000, 0xffff, UQ_MSC_FORCE_WIRE_BBB,
+ UQ_MSC_FORCE_PROTO_SCSI, UQ_MSC_NO_GETMAXLUN),
+ USB_QUIRK(ALCOR, TRANSCEND, 0x0142, 0x0142, UQ_MSC_FORCE_WIRE_BBB,
+ UQ_MSC_FORCE_PROTO_SCSI, UQ_MSC_NO_GETMAXLUN, UQ_MSC_NO_SYNC_CACHE),
+ USB_QUIRK(ALCOR, TRANSCEND, 0x0000, 0xffff, UQ_MSC_FORCE_WIRE_BBB,
+ UQ_MSC_FORCE_PROTO_SCSI, UQ_MSC_NO_GETMAXLUN),
+ USB_QUIRK(APACER, HT202, 0x0000, 0xffff, UQ_MSC_NO_TEST_UNIT_READY,
+ UQ_MSC_NO_SYNC_CACHE),
+ USB_QUIRK(ASAHIOPTICAL, OPTIO230, 0x0000, 0xffff, UQ_MSC_FORCE_WIRE_BBB,
+ UQ_MSC_FORCE_PROTO_SCSI, UQ_MSC_NO_INQUIRY),
+ USB_QUIRK(ASAHIOPTICAL, OPTIO330, 0x0000, 0xffff, UQ_MSC_FORCE_WIRE_BBB,
+ UQ_MSC_FORCE_PROTO_SCSI, UQ_MSC_NO_INQUIRY),
+ USB_QUIRK(BELKIN, USB2SCSI, 0x0000, 0xffff, UQ_MSC_FORCE_WIRE_BBB,
+ UQ_MSC_FORCE_PROTO_SCSI),
+ USB_QUIRK(CASIO, QV_DIGICAM, 0x0000, 0xffff, UQ_MSC_FORCE_WIRE_CBI,
+ UQ_MSC_FORCE_PROTO_SCSI, UQ_MSC_NO_INQUIRY),
+ USB_QUIRK(CCYU, ED1064, 0x0000, 0xffff, UQ_MSC_FORCE_WIRE_BBB,
+ UQ_MSC_FORCE_PROTO_SCSI),
+ USB_QUIRK(CENTURY, EX35QUAT, 0x0000, 0xffff, UQ_MSC_FORCE_WIRE_BBB,
+ UQ_MSC_FORCE_PROTO_SCSI, UQ_MSC_FORCE_SHORT_INQ,
+ UQ_MSC_NO_START_STOP, UQ_MSC_IGNORE_RESIDUE),
+ USB_QUIRK(CENTURY, EX35SW4_SB4, 0x0000, 0xffff, UQ_MSC_NO_SYNC_CACHE),
+ USB_QUIRK(CYPRESS, XX6830XX, 0x0000, 0xffff, UQ_MSC_NO_GETMAXLUN,
+ UQ_MSC_NO_SYNC_CACHE),
+ USB_QUIRK(DESKNOTE, UCR_61S2B, 0x0000, 0xffff, UQ_MSC_FORCE_WIRE_BBB,
+ UQ_MSC_FORCE_PROTO_SCSI),
+ USB_QUIRK(DMI, CFSM_RW, 0x0000, 0xffff, UQ_MSC_FORCE_PROTO_SCSI,
+ UQ_MSC_NO_GETMAXLUN),
+ USB_QUIRK(DMI, DISK, 0x000, 0xffff, UQ_MSC_NO_SYNC_CACHE),
+ USB_QUIRK(EPSON, STYLUS_875DC, 0x0000, 0xffff, UQ_MSC_FORCE_WIRE_CBI,
+ UQ_MSC_FORCE_PROTO_SCSI, UQ_MSC_NO_INQUIRY),
+ USB_QUIRK(EPSON, STYLUS_895, 0x0000, 0xffff, UQ_MSC_FORCE_WIRE_BBB,
+ UQ_MSC_FORCE_PROTO_SCSI, UQ_MSC_NO_GETMAXLUN),
+ USB_QUIRK(FEIYA, 5IN1, 0x0000, 0xffff, UQ_MSC_FORCE_WIRE_BBB,
+ UQ_MSC_FORCE_PROTO_SCSI),
+ USB_QUIRK(FREECOM, DVD, 0x0000, 0xffff, UQ_MSC_FORCE_PROTO_SCSI),
+ USB_QUIRK(FREECOM, HDD, 0x0000, 0xffff, UQ_MSC_NO_SYNC_CACHE),
+ USB_QUIRK(FUJIPHOTO, MASS0100, 0x0000, 0xffff, UQ_MSC_FORCE_WIRE_CBI_I,
+ UQ_MSC_FORCE_PROTO_ATAPI, UQ_MSC_NO_RS_CLEAR_UA, UQ_MSC_NO_SYNC_CACHE),
+ USB_QUIRK(GENESYS, GL641USB2IDE, 0x0000, 0xffff, UQ_MSC_FORCE_WIRE_BBB,
+ UQ_MSC_FORCE_PROTO_SCSI, UQ_MSC_FORCE_SHORT_INQ,
+ UQ_MSC_NO_START_STOP, UQ_MSC_IGNORE_RESIDUE, UQ_MSC_NO_SYNC_CACHE),
+ USB_QUIRK(GENESYS, GL641USB2IDE_2, 0x0000, 0xffff,
+ UQ_MSC_FORCE_WIRE_BBB, UQ_MSC_FORCE_PROTO_ATAPI,
+ UQ_MSC_FORCE_SHORT_INQ, UQ_MSC_NO_START_STOP,
+ UQ_MSC_IGNORE_RESIDUE),
+ USB_QUIRK(GENESYS, GL641USB, 0x0000, 0xffff, UQ_MSC_FORCE_WIRE_BBB,
+ UQ_MSC_FORCE_PROTO_SCSI, UQ_MSC_FORCE_SHORT_INQ,
+ UQ_MSC_NO_START_STOP, UQ_MSC_IGNORE_RESIDUE),
+ USB_QUIRK(GENESYS, GL641USB_2, 0x0000, 0xffff, UQ_MSC_FORCE_WIRE_BBB,
+ UQ_MSC_FORCE_PROTO_SCSI, UQ_MSC_WRONG_CSWSIG),
+ USB_QUIRK(HAGIWARA, FG, 0x0000, 0xffff, UQ_MSC_FORCE_WIRE_BBB,
+ UQ_MSC_FORCE_PROTO_SCSI),
+ USB_QUIRK(HAGIWARA, FGSM, 0x0000, 0xffff, UQ_MSC_FORCE_WIRE_BBB,
+ UQ_MSC_FORCE_PROTO_SCSI),
+ USB_QUIRK(HITACHI, DVDCAM_DZ_MV100A, 0x0000, 0xffff,
+ UQ_MSC_FORCE_WIRE_CBI, UQ_MSC_FORCE_PROTO_SCSI,
+ UQ_MSC_NO_GETMAXLUN),
+ USB_QUIRK(HITACHI, DVDCAM_USB, 0x0000, 0xffff, UQ_MSC_FORCE_WIRE_CBI_I,
+ UQ_MSC_FORCE_PROTO_ATAPI, UQ_MSC_NO_INQUIRY),
+ USB_QUIRK(HP, CDW4E, 0x0000, 0xffff, UQ_MSC_FORCE_PROTO_ATAPI),
+ USB_QUIRK(HP, CDW8200, 0x0000, 0xffff, UQ_MSC_FORCE_WIRE_CBI_I,
+ UQ_MSC_FORCE_PROTO_ATAPI, UQ_MSC_NO_TEST_UNIT_READY,
+ UQ_MSC_NO_START_STOP),
+ USB_QUIRK(IMAGINATION, DBX1, 0x0000, 0xffff, UQ_MSC_FORCE_WIRE_BBB,
+ UQ_MSC_FORCE_PROTO_SCSI, UQ_MSC_WRONG_CSWSIG),
+ USB_QUIRK(INSYSTEM, USBCABLE, 0x0000, 0xffff, UQ_MSC_FORCE_WIRE_CBI,
+ UQ_MSC_FORCE_PROTO_ATAPI, UQ_MSC_NO_TEST_UNIT_READY,
+ UQ_MSC_NO_START_STOP, UQ_MSC_ALT_IFACE_1),
+ USB_QUIRK(INSYSTEM, ATAPI, 0x0000, 0xffff, UQ_MSC_FORCE_WIRE_CBI,
+ UQ_MSC_FORCE_PROTO_RBC),
+ USB_QUIRK(INSYSTEM, STORAGE_V2, 0x0000, 0xffff, UQ_MSC_FORCE_WIRE_CBI,
+ UQ_MSC_FORCE_PROTO_RBC),
+ USB_QUIRK(IODATA, IU_CD2, 0x0000, 0xffff, UQ_MSC_FORCE_WIRE_BBB,
+ UQ_MSC_FORCE_PROTO_SCSI),
+ USB_QUIRK(IODATA, DVR_UEH8, 0x0000, 0xffff, UQ_MSC_FORCE_WIRE_BBB,
+ UQ_MSC_FORCE_PROTO_SCSI),
+ USB_QUIRK(IOMEGA, ZIP100, 0x0000, 0xffff, UQ_MSC_FORCE_WIRE_BBB,
+ UQ_MSC_FORCE_PROTO_SCSI,
+ UQ_MSC_NO_TEST_UNIT_READY), /* XXX ZIP drives can also use ATAPI */
+ USB_QUIRK(JMICRON, JM20336, 0x0000, 0xffff, UQ_MSC_NO_SYNC_CACHE),
+ USB_QUIRK(JMICRON, JM20337, 0x0000, 0xffff, UQ_MSC_FORCE_WIRE_BBB,
+ UQ_MSC_FORCE_PROTO_SCSI,
+ UQ_MSC_NO_SYNC_CACHE),
+ USB_QUIRK(KYOCERA, FINECAM_L3, 0x0000, 0xffff, UQ_MSC_FORCE_WIRE_BBB,
+ UQ_MSC_FORCE_PROTO_SCSI, UQ_MSC_NO_INQUIRY),
+ USB_QUIRK(KYOCERA, FINECAM_S3X, 0x0000, 0xffff, UQ_MSC_FORCE_WIRE_CBI,
+ UQ_MSC_FORCE_PROTO_ATAPI, UQ_MSC_NO_INQUIRY),
+ USB_QUIRK(KYOCERA, FINECAM_S4, 0x0000, 0xffff, UQ_MSC_FORCE_WIRE_CBI,
+ UQ_MSC_FORCE_PROTO_ATAPI, UQ_MSC_NO_INQUIRY),
+ USB_QUIRK(KYOCERA, FINECAM_S5, 0x0000, 0xffff, UQ_MSC_FORCE_WIRE_BBB,
+ UQ_MSC_FORCE_PROTO_SCSI, UQ_MSC_NO_INQUIRY),
+ USB_QUIRK(LACIE, HD, 0x0000, 0xffff, UQ_MSC_FORCE_WIRE_CBI,
+ UQ_MSC_FORCE_PROTO_RBC),
+ USB_QUIRK(LEXAR, CF_READER, 0x0000, 0xffff, UQ_MSC_FORCE_WIRE_BBB,
+ UQ_MSC_FORCE_PROTO_SCSI, UQ_MSC_NO_INQUIRY),
+ USB_QUIRK(LEXAR, JUMPSHOT, 0x0000, 0xffff, UQ_MSC_FORCE_PROTO_SCSI),
+ USB_QUIRK(LOGITEC, LDR_H443SU2, 0x0000, 0xffff, UQ_MSC_FORCE_PROTO_SCSI),
+ USB_QUIRK(LOGITEC, LDR_H443U2, 0x0000, 0xffff, UQ_MSC_FORCE_WIRE_BBB,
+ UQ_MSC_FORCE_PROTO_SCSI,),
+ USB_QUIRK(MELCO, DUBPXXG, 0x0000, 0xffff, UQ_MSC_FORCE_WIRE_BBB,
+ UQ_MSC_FORCE_PROTO_SCSI, UQ_MSC_FORCE_SHORT_INQ,
+ UQ_MSC_NO_START_STOP, UQ_MSC_IGNORE_RESIDUE),
+ USB_QUIRK(MICROTECH, DPCM, 0x0000, 0xffff, UQ_MSC_FORCE_WIRE_CBI,
+ UQ_MSC_FORCE_PROTO_SCSI, UQ_MSC_NO_TEST_UNIT_READY,
+ UQ_MSC_NO_START_STOP),
+ USB_QUIRK(MICROTECH, SCSIDB25, 0x0000, 0xffff, UQ_MSC_FORCE_WIRE_BBB,
+ UQ_MSC_FORCE_PROTO_SCSI),
+ USB_QUIRK(MICROTECH, SCSIHD50, 0x0000, 0xffff, UQ_MSC_FORCE_WIRE_BBB,
+ UQ_MSC_FORCE_PROTO_SCSI),
+ USB_QUIRK(MINOLTA, E223, 0x0000, 0xffff, UQ_MSC_FORCE_PROTO_SCSI),
+ USB_QUIRK(MINOLTA, F300, 0x0000, 0xffff, UQ_MSC_FORCE_WIRE_BBB,
+ UQ_MSC_FORCE_PROTO_SCSI),
+ USB_QUIRK(MITSUMI, CDRRW, 0x0000, 0xffff, UQ_MSC_FORCE_WIRE_CBI |
+ UQ_MSC_FORCE_PROTO_ATAPI),
+ USB_QUIRK(MOTOROLA2, E398, 0x0000, 0xffff, UQ_MSC_FORCE_WIRE_BBB,
+ UQ_MSC_FORCE_PROTO_SCSI, UQ_MSC_FORCE_SHORT_INQ,
+ UQ_MSC_NO_INQUIRY_EVPD, UQ_MSC_NO_GETMAXLUN),
+ USB_QUIRK_VP(USB_VENDOR_MPMAN, 0, UQ_MSC_NO_SYNC_CACHE,
+ UQ_MATCH_VENDOR_ONLY),
+ USB_QUIRK(MSYSTEMS, DISKONKEY, 0x0000, 0xffff, UQ_MSC_FORCE_WIRE_BBB,
+ UQ_MSC_FORCE_PROTO_SCSI, UQ_MSC_IGNORE_RESIDUE, UQ_MSC_NO_GETMAXLUN,
+ UQ_MSC_NO_RS_CLEAR_UA),
+ USB_QUIRK(MSYSTEMS, DISKONKEY2, 0x0000, 0xffff, UQ_MSC_FORCE_WIRE_BBB,
+ UQ_MSC_FORCE_PROTO_ATAPI),
+ USB_QUIRK(MYSON, HEDEN, 0x0000, 0xffff, UQ_MSC_IGNORE_RESIDUE,
+ UQ_MSC_NO_SYNC_CACHE),
+ USB_QUIRK(MYSON, HEDEN_8813, 0x0000, 0xffff, UQ_MSC_NO_SYNC_CACHE),
+ USB_QUIRK(MYSON, STARREADER, 0x0000, 0xffff, UQ_MSC_NO_SYNC_CACHE),
+ USB_QUIRK(NEODIO, ND3260, 0x0000, 0xffff, UQ_MSC_FORCE_WIRE_BBB,
+ UQ_MSC_FORCE_PROTO_SCSI, UQ_MSC_FORCE_SHORT_INQ),
+ USB_QUIRK(NETAC, CF_CARD, 0x0000, 0xffff, UQ_MSC_FORCE_WIRE_BBB,
+ UQ_MSC_FORCE_PROTO_SCSI, UQ_MSC_NO_INQUIRY),
+ USB_QUIRK(NETAC, ONLYDISK, 0x0000, 0xffff, UQ_MSC_FORCE_WIRE_BBB,
+ UQ_MSC_FORCE_PROTO_SCSI, UQ_MSC_IGNORE_RESIDUE),
+ USB_QUIRK(NETCHIP, CLIK_40, 0x0000, 0xffff, UQ_MSC_FORCE_PROTO_ATAPI,
+ UQ_MSC_NO_INQUIRY),
+ USB_QUIRK(NIKON, D300, 0x0000, 0xffff, UQ_MSC_FORCE_WIRE_BBB,
+ UQ_MSC_FORCE_PROTO_SCSI),
+ USB_QUIRK(OLYMPUS, C1, 0x0000, 0xffff, UQ_MSC_FORCE_WIRE_BBB,
+ UQ_MSC_FORCE_PROTO_SCSI, UQ_MSC_WRONG_CSWSIG),
+ USB_QUIRK(OLYMPUS, C700, 0x0000, 0xffff, UQ_MSC_NO_GETMAXLUN),
+ USB_QUIRK(ONSPEC, SDS_HOTFIND_D, 0x0000, 0xffff, UQ_MSC_FORCE_WIRE_BBB,
+ UQ_MSC_FORCE_PROTO_SCSI, UQ_MSC_NO_GETMAXLUN, UQ_MSC_NO_SYNC_CACHE),
+ USB_QUIRK(ONSPEC, CFMS_RW, 0x0000, 0xffff, UQ_MSC_FORCE_PROTO_SCSI),
+ USB_QUIRK(ONSPEC, CFSM_COMBO, 0x0000, 0xffff, UQ_MSC_FORCE_PROTO_SCSI),
+ USB_QUIRK(ONSPEC, CFSM_READER, 0x0000, 0xffff, UQ_MSC_FORCE_PROTO_SCSI),
+ USB_QUIRK(ONSPEC, CFSM_READER2, 0x0000, 0xffff,
+ UQ_MSC_FORCE_PROTO_SCSI),
+ USB_QUIRK(ONSPEC, MDCFE_B_CF_READER, 0x0000, 0xffff,
+ UQ_MSC_FORCE_PROTO_SCSI),
+ USB_QUIRK(ONSPEC, MDSM_B_READER, 0x0000, 0xffff,
+ UQ_MSC_FORCE_PROTO_SCSI, UQ_MSC_NO_INQUIRY),
+ USB_QUIRK(ONSPEC, READER, 0x0000, 0xffff, UQ_MSC_FORCE_PROTO_SCSI),
+ USB_QUIRK(ONSPEC, UCF100, 0x0000, 0xffff, UQ_MSC_FORCE_WIRE_BBB,
+ UQ_MSC_FORCE_PROTO_ATAPI, UQ_MSC_NO_INQUIRY | UQ_MSC_NO_GETMAXLUN),
+ USB_QUIRK(ONSPEC2, IMAGEMATE_SDDR55, 0x0000, 0xffff,
+ UQ_MSC_FORCE_PROTO_SCSI, UQ_MSC_NO_GETMAXLUN),
+ USB_QUIRK(PANASONIC, KXL840AN, 0x0000, 0xffff, UQ_MSC_FORCE_WIRE_BBB,
+ UQ_MSC_FORCE_PROTO_ATAPI, UQ_MSC_NO_GETMAXLUN),
+ USB_QUIRK(PANASONIC, KXLCB20AN, 0x0000, 0xffff, UQ_MSC_FORCE_WIRE_BBB,
+ UQ_MSC_FORCE_PROTO_SCSI),
+ USB_QUIRK(PANASONIC, KXLCB35AN, 0x0000, 0xffff, UQ_MSC_FORCE_WIRE_BBB,
+ UQ_MSC_FORCE_PROTO_SCSI),
+ USB_QUIRK(PANASONIC, LS120CAM, 0x0000, 0xffff, UQ_MSC_FORCE_PROTO_UFI),
+ USB_QUIRK(PHILIPS, SPE3030CC, 0x0000, 0xffff, UQ_MSC_NO_SYNC_CACHE),
+ USB_QUIRK(PLEXTOR, 40_12_40U, 0x0000, 0xffff, UQ_MSC_FORCE_WIRE_BBB,
+ UQ_MSC_FORCE_PROTO_SCSI, UQ_MSC_NO_TEST_UNIT_READY),
+ USB_QUIRK(PNY, ATTACHE2, 0x0000, 0xffff, UQ_MSC_FORCE_WIRE_BBB,
+ UQ_MSC_FORCE_PROTO_SCSI, UQ_MSC_IGNORE_RESIDUE,
+ UQ_MSC_NO_START_STOP),
+ USB_QUIRK(PROLIFIC, PL2506, 0x0000, 0xffff,
+ UQ_MSC_NO_SYNC_CACHE),
+ USB_QUIRK_VP(USB_VENDOR_SAMSUNG_TECHWIN,
+ USB_PRODUCT_SAMSUNG_TECHWIN_DIGIMAX_410, UQ_MSC_FORCE_WIRE_BBB,
+ UQ_MSC_FORCE_PROTO_SCSI, UQ_MSC_NO_INQUIRY),
+ USB_QUIRK(SAMSUNG, YP_U4, 0x0000, 0xffff, UQ_MSC_NO_SYNC_CACHE),
+ USB_QUIRK(SANDISK, SDDR05A, 0x0000, 0xffff, UQ_MSC_FORCE_WIRE_CBI,
+ UQ_MSC_FORCE_PROTO_SCSI, UQ_MSC_READ_CAP_OFFBY1,
+ UQ_MSC_NO_GETMAXLUN),
+ USB_QUIRK(SANDISK, SDDR09, 0x0000, 0xffff, UQ_MSC_FORCE_PROTO_SCSI,
+ UQ_MSC_READ_CAP_OFFBY1, UQ_MSC_NO_GETMAXLUN),
+ USB_QUIRK(SANDISK, SDDR12, 0x0000, 0xffff, UQ_MSC_FORCE_WIRE_CBI,
+ UQ_MSC_FORCE_PROTO_SCSI, UQ_MSC_READ_CAP_OFFBY1,
+ UQ_MSC_NO_GETMAXLUN),
+ USB_QUIRK(SANDISK, SDCZ2_256, 0x0000, 0xffff, UQ_MSC_FORCE_WIRE_BBB,
+ UQ_MSC_FORCE_PROTO_SCSI, UQ_MSC_IGNORE_RESIDUE),
+ USB_QUIRK(SANDISK, SDCZ4_128, 0x0000, 0xffff, UQ_MSC_FORCE_WIRE_BBB,
+ UQ_MSC_FORCE_PROTO_SCSI, UQ_MSC_IGNORE_RESIDUE),
+ USB_QUIRK(SANDISK, SDCZ4_256, 0x0000, 0xffff, UQ_MSC_FORCE_WIRE_BBB,
+ UQ_MSC_FORCE_PROTO_SCSI, UQ_MSC_IGNORE_RESIDUE),
+ USB_QUIRK(SANDISK, SDDR31, 0x0000, 0xffff, UQ_MSC_FORCE_WIRE_BBB,
+ UQ_MSC_FORCE_PROTO_SCSI, UQ_MSC_READ_CAP_OFFBY1),
+ USB_QUIRK(SCANLOGIC, SL11R, 0x0000, 0xffff, UQ_MSC_FORCE_WIRE_BBB,
+ UQ_MSC_FORCE_PROTO_ATAPI, UQ_MSC_NO_INQUIRY),
+ USB_QUIRK(SHUTTLE, EUSB, 0x0000, 0xffff, UQ_MSC_FORCE_WIRE_CBI_I,
+ UQ_MSC_FORCE_PROTO_ATAPI, UQ_MSC_NO_TEST_UNIT_READY,
+ UQ_MSC_NO_START_STOP, UQ_MSC_SHUTTLE_INIT),
+ USB_QUIRK(SHUTTLE, CDRW, 0x0000, 0xffff, UQ_MSC_FORCE_WIRE_CBI,
+ UQ_MSC_FORCE_PROTO_ATAPI),
+ USB_QUIRK(SHUTTLE, CF, 0x0000, 0xffff, UQ_MSC_FORCE_WIRE_CBI,
+ UQ_MSC_FORCE_PROTO_ATAPI),
+ USB_QUIRK(SHUTTLE, EUSBATAPI, 0x0000, 0xffff, UQ_MSC_FORCE_WIRE_CBI,
+ UQ_MSC_FORCE_PROTO_ATAPI),
+ USB_QUIRK(SHUTTLE, EUSBCFSM, 0x0000, 0xffff, UQ_MSC_FORCE_PROTO_SCSI),
+ USB_QUIRK(SHUTTLE, EUSCSI, 0x0000, 0xffff, UQ_MSC_FORCE_WIRE_BBB,
+ UQ_MSC_FORCE_PROTO_SCSI),
+ USB_QUIRK(SHUTTLE, HIFD, 0x0000, 0xffff, UQ_MSC_FORCE_WIRE_CBI,
+ UQ_MSC_FORCE_PROTO_SCSI, UQ_MSC_NO_GETMAXLUN),
+ USB_QUIRK(SHUTTLE, SDDR09, 0x0000, 0xffff, UQ_MSC_FORCE_PROTO_SCSI,
+ UQ_MSC_NO_GETMAXLUN),
+ USB_QUIRK(SHUTTLE, ZIOMMC, 0x0000, 0xffff, UQ_MSC_FORCE_WIRE_CBI,
+ UQ_MSC_FORCE_PROTO_SCSI, UQ_MSC_NO_GETMAXLUN),
+ USB_QUIRK(SIGMATEL, I_BEAD100, 0x0000, 0xffff, UQ_MSC_FORCE_WIRE_BBB,
+ UQ_MSC_FORCE_PROTO_SCSI, UQ_MSC_SHUTTLE_INIT),
+ USB_QUIRK(SIIG, WINTERREADER, 0x0000, 0xffff, UQ_MSC_FORCE_WIRE_BBB,
+ UQ_MSC_FORCE_PROTO_SCSI, UQ_MSC_IGNORE_RESIDUE),
+ USB_QUIRK(SKANHEX, MD_7425, 0x0000, 0xffff, UQ_MSC_FORCE_WIRE_BBB,
+ UQ_MSC_FORCE_PROTO_SCSI, UQ_MSC_NO_INQUIRY),
+ USB_QUIRK(SKANHEX, SX_520Z, 0x0000, 0xffff, UQ_MSC_FORCE_WIRE_BBB,
+ UQ_MSC_FORCE_PROTO_SCSI, UQ_MSC_NO_INQUIRY),
+ USB_QUIRK(SONY, HANDYCAM, 0x0500, 0x0500, UQ_MSC_FORCE_WIRE_CBI,
+ UQ_MSC_FORCE_PROTO_RBC, UQ_MSC_RBC_PAD_TO_12),
+ USB_QUIRK(SONY, CLIE_40_MS, 0x0000, 0xffff, UQ_MSC_FORCE_WIRE_BBB,
+ UQ_MSC_FORCE_PROTO_SCSI, UQ_MSC_NO_INQUIRY),
+ USB_QUIRK(SONY, DSC, 0x0500, 0x0500, UQ_MSC_FORCE_WIRE_CBI,
+ UQ_MSC_FORCE_PROTO_RBC, UQ_MSC_RBC_PAD_TO_12),
+ USB_QUIRK(SONY, DSC, 0x0600, 0x0600, UQ_MSC_FORCE_WIRE_CBI,
+ UQ_MSC_FORCE_PROTO_RBC, UQ_MSC_RBC_PAD_TO_12),
+ USB_QUIRK(SONY, DSC, 0x0000, 0xffff, UQ_MSC_FORCE_WIRE_CBI,
+ UQ_MSC_FORCE_PROTO_RBC),
+ USB_QUIRK(SONY, HANDYCAM, 0x0000, 0xffff, UQ_MSC_FORCE_WIRE_CBI,
+ UQ_MSC_FORCE_PROTO_RBC),
+ USB_QUIRK(SONY, MSC, 0x0000, 0xffff, UQ_MSC_FORCE_WIRE_CBI,
+ UQ_MSC_FORCE_PROTO_RBC),
+ USB_QUIRK(SONY, MS_MSC_U03, 0x0000, 0xffff, UQ_MSC_FORCE_WIRE_CBI,
+ UQ_MSC_FORCE_PROTO_UFI, UQ_MSC_NO_GETMAXLUN),
+ USB_QUIRK(SONY, MS_NW_MS7, 0x0000, 0xffff, UQ_MSC_FORCE_WIRE_BBB,
+ UQ_MSC_FORCE_PROTO_SCSI, UQ_MSC_NO_GETMAXLUN),
+ USB_QUIRK(SONY, MS_PEG_N760C, 0x0000, 0xffff, UQ_MSC_FORCE_WIRE_BBB,
+ UQ_MSC_FORCE_PROTO_SCSI, UQ_MSC_NO_INQUIRY),
+ USB_QUIRK(SONY, MSACUS1, 0x0000, 0xffff, UQ_MSC_FORCE_WIRE_BBB,
+ UQ_MSC_FORCE_PROTO_SCSI, UQ_MSC_NO_GETMAXLUN),
+ USB_QUIRK(SONY, PORTABLE_HDD_V2, 0x0000, 0xffff, UQ_MSC_FORCE_WIRE_BBB,
+ UQ_MSC_FORCE_PROTO_SCSI),
+ USB_QUIRK(SUPERTOP, IDE, 0x0000, 0xffff, UQ_MSC_IGNORE_RESIDUE,
+ UQ_MSC_NO_SYNC_CACHE),
+ USB_QUIRK(TAUGA, CAMERAMATE, 0x0000, 0xffff, UQ_MSC_FORCE_PROTO_SCSI),
+ USB_QUIRK(TEAC, FD05PUB, 0x0000, 0xffff, UQ_MSC_FORCE_WIRE_CBI,
+ UQ_MSC_FORCE_PROTO_UFI),
+ USB_QUIRK(TECLAST, TLC300, 0x0000, 0xffff, UQ_MSC_NO_TEST_UNIT_READY,
+ UQ_MSC_NO_SYNC_CACHE),
+ USB_QUIRK(TREK, MEMKEY, 0x0000, 0xffff, UQ_MSC_FORCE_WIRE_BBB,
+ UQ_MSC_FORCE_PROTO_SCSI, UQ_MSC_NO_INQUIRY),
+ USB_QUIRK(TREK, THUMBDRIVE_8MB, 0x0000, 0xffff, UQ_MSC_FORCE_WIRE_BBB,
+ UQ_MSC_FORCE_PROTO_ATAPI, UQ_MSC_IGNORE_RESIDUE),
+ USB_QUIRK(TRUMPION, C3310, 0x0000, 0xffff, UQ_MSC_FORCE_WIRE_CBI,
+ UQ_MSC_FORCE_PROTO_UFI),
+ USB_QUIRK(TRUMPION, MP3, 0x0000, 0xffff, UQ_MSC_FORCE_PROTO_RBC),
+ USB_QUIRK(TRUMPION, T33520, 0x0000, 0xffff, UQ_MSC_FORCE_PROTO_SCSI),
+ USB_QUIRK(TWINMOS, MDIV, 0x0000, 0xffff, UQ_MSC_FORCE_WIRE_BBB,
+ UQ_MSC_FORCE_PROTO_SCSI),
+ USB_QUIRK(VIA, USB2IDEBRIDGE, 0x0000, 0xffff, UQ_MSC_FORCE_WIRE_BBB,
+ UQ_MSC_FORCE_PROTO_SCSI, UQ_MSC_NO_SYNC_CACHE),
+ USB_QUIRK(VIVITAR, 35XX, 0x0000, 0xffff, UQ_MSC_FORCE_WIRE_BBB,
+ UQ_MSC_FORCE_PROTO_SCSI, UQ_MSC_NO_INQUIRY),
+ USB_QUIRK(WESTERN, COMBO, 0x0000, 0xffff, UQ_MSC_FORCE_WIRE_BBB,
+ UQ_MSC_FORCE_PROTO_SCSI, UQ_MSC_FORCE_SHORT_INQ,
+ UQ_MSC_NO_START_STOP, UQ_MSC_IGNORE_RESIDUE),
+ USB_QUIRK(WESTERN, EXTHDD, 0x0000, 0xffff, UQ_MSC_FORCE_WIRE_BBB,
+ UQ_MSC_FORCE_PROTO_SCSI, UQ_MSC_FORCE_SHORT_INQ,
+ UQ_MSC_NO_START_STOP, UQ_MSC_IGNORE_RESIDUE),
+ USB_QUIRK(WESTERN, MYBOOK, 0x0000, 0xffff, UQ_MSC_FORCE_WIRE_BBB,
+ UQ_MSC_FORCE_PROTO_SCSI, UQ_MSC_NO_INQUIRY_EVPD,
+ UQ_MSC_NO_SYNC_CACHE),
+ USB_QUIRK(WESTERN, MYPASSWORD, 0x0000, 0xffff, UQ_MSC_FORCE_SHORT_INQ),
+ USB_QUIRK(WINMAXGROUP, FLASH64MC, 0x0000, 0xffff, UQ_MSC_FORCE_WIRE_BBB,
+ UQ_MSC_FORCE_PROTO_SCSI, UQ_MSC_NO_INQUIRY),
+ USB_QUIRK(YANO, FW800HD, 0x0000, 0xffff, UQ_MSC_FORCE_WIRE_BBB,
+ UQ_MSC_FORCE_PROTO_SCSI, UQ_MSC_FORCE_SHORT_INQ,
+ UQ_MSC_NO_START_STOP, UQ_MSC_IGNORE_RESIDUE),
+ USB_QUIRK(YANO, U640MO, 0x0000, 0xffff, UQ_MSC_FORCE_WIRE_CBI_I,
+ UQ_MSC_FORCE_PROTO_ATAPI, UQ_MSC_FORCE_SHORT_INQ),
+ USB_QUIRK(YEDATA, FLASHBUSTERU, 0x0000, 0x007F, UQ_MSC_FORCE_WIRE_CBI,
+ UQ_MSC_FORCE_PROTO_UFI, UQ_MSC_NO_RS_CLEAR_UA, UQ_MSC_FLOPPY_SPEED,
+ UQ_MSC_NO_TEST_UNIT_READY, UQ_MSC_NO_GETMAXLUN),
+ USB_QUIRK(YEDATA, FLASHBUSTERU, 0x0080, 0x0080, UQ_MSC_FORCE_WIRE_CBI_I,
+ UQ_MSC_FORCE_PROTO_UFI, UQ_MSC_NO_RS_CLEAR_UA, UQ_MSC_FLOPPY_SPEED,
+ UQ_MSC_NO_TEST_UNIT_READY, UQ_MSC_NO_GETMAXLUN),
+ USB_QUIRK(YEDATA, FLASHBUSTERU, 0x0081, 0xFFFF, UQ_MSC_FORCE_WIRE_CBI_I,
+ UQ_MSC_FORCE_PROTO_UFI, UQ_MSC_NO_RS_CLEAR_UA, UQ_MSC_FLOPPY_SPEED,
+ UQ_MSC_NO_GETMAXLUN),
+ USB_QUIRK(ZORAN, EX20DSC, 0x0000, 0xffff, UQ_MSC_FORCE_WIRE_CBI,
+ UQ_MSC_FORCE_PROTO_ATAPI),
+ USB_QUIRK(MEIZU, M6_SL, 0x0000, 0xffff, UQ_MSC_FORCE_WIRE_BBB,
+ UQ_MSC_FORCE_PROTO_SCSI, UQ_MSC_NO_INQUIRY, UQ_MSC_NO_SYNC_CACHE),
+ USB_QUIRK(ACTIONS, MP4, 0x0000, 0xffff, UQ_MSC_FORCE_WIRE_BBB,
+ UQ_MSC_FORCE_PROTO_SCSI, UQ_MSC_NO_SYNC_CACHE),
+ USB_QUIRK(ASUS, GMSC, 0x0000, 0xffff, UQ_MSC_NO_SYNC_CACHE),
+ USB_QUIRK(CHIPSBANK, USBMEMSTICK, 0x0000, 0xffff, UQ_MSC_NO_SYNC_CACHE),
+ USB_QUIRK(CHIPSBANK, USBMEMSTICK1, 0x0000, 0xffff, UQ_MSC_NO_SYNC_CACHE),
+ USB_QUIRK(NEWLINK, USB2IDEBRIDGE, 0x0000, 0xffff, UQ_MSC_NO_SYNC_CACHE),
+};
+#undef USB_QUIRK_VP
+#undef USB_QUIRK
+
+static const char *usb_quirk_str[USB_QUIRK_MAX] = {
+ [UQ_NONE] = "UQ_NONE",
+ [UQ_MATCH_VENDOR_ONLY] = "UQ_MATCH_VENDOR_ONLY",
+ [UQ_AUDIO_SWAP_LR] = "UQ_AUDIO_SWAP_LR",
+ [UQ_AU_INP_ASYNC] = "UQ_AU_INP_ASYNC",
+ [UQ_AU_NO_FRAC] = "UQ_AU_NO_FRAC",
+ [UQ_AU_NO_XU] = "UQ_AU_NO_XU",
+ [UQ_BAD_ADC] = "UQ_BAD_ADC",
+ [UQ_BAD_AUDIO] = "UQ_BAD_AUDIO",
+ [UQ_BROKEN_BIDIR] = "UQ_BROKEN_BIDIR",
+ [UQ_BUS_POWERED] = "UQ_BUS_POWERED",
+ [UQ_HID_IGNORE] = "UQ_HID_IGNORE",
+ [UQ_KBD_IGNORE] = "UQ_KBD_IGNORE",
+ [UQ_KBD_BOOTPROTO] = "UQ_KBD_BOOTPROTO",
+ [UQ_MS_BAD_CLASS] = "UQ_MS_BAD_CLASS",
+ [UQ_MS_LEADING_BYTE] = "UQ_MS_LEADING_BYTE",
+ [UQ_MS_REVZ] = "UQ_MS_REVZ",
+ [UQ_NO_STRINGS] = "UQ_NO_STRINGS",
+ [UQ_OPEN_CLEARSTALL] = "UQ_OPEN_CLEARSTALL",
+ [UQ_POWER_CLAIM] = "UQ_POWER_CLAIM",
+ [UQ_SPUR_BUT_UP] = "UQ_SPUR_BUT_UP",
+ [UQ_SWAP_UNICODE] = "UQ_SWAP_UNICODE",
+ [UQ_CFG_INDEX_1] = "UQ_CFG_INDEX_1",
+ [UQ_CFG_INDEX_2] = "UQ_CFG_INDEX_2",
+ [UQ_CFG_INDEX_3] = "UQ_CFG_INDEX_3",
+ [UQ_CFG_INDEX_4] = "UQ_CFG_INDEX_4",
+ [UQ_CFG_INDEX_0] = "UQ_CFG_INDEX_0",
+ [UQ_ASSUME_CM_OVER_DATA] = "UQ_ASSUME_CM_OVER_DATA",
+ [UQ_MSC_NO_TEST_UNIT_READY] = "UQ_MSC_NO_TEST_UNIT_READY",
+ [UQ_MSC_NO_RS_CLEAR_UA] = "UQ_MSC_NO_RS_CLEAR_UA",
+ [UQ_MSC_NO_START_STOP] = "UQ_MSC_NO_START_STOP",
+ [UQ_MSC_NO_GETMAXLUN] = "UQ_MSC_NO_GETMAXLUN",
+ [UQ_MSC_NO_INQUIRY] = "UQ_MSC_NO_INQUIRY",
+ [UQ_MSC_NO_INQUIRY_EVPD] = "UQ_MSC_NO_INQUIRY_EVPD",
+ [UQ_MSC_NO_SYNC_CACHE] = "UQ_MSC_NO_SYNC_CACHE",
+ [UQ_MSC_SHUTTLE_INIT] = "UQ_MSC_SHUTTLE_INIT",
+ [UQ_MSC_ALT_IFACE_1] = "UQ_MSC_ALT_IFACE_1",
+ [UQ_MSC_FLOPPY_SPEED] = "UQ_MSC_FLOPPY_SPEED",
+ [UQ_MSC_IGNORE_RESIDUE] = "UQ_MSC_IGNORE_RESIDUE",
+ [UQ_MSC_WRONG_CSWSIG] = "UQ_MSC_WRONG_CSWSIG",
+ [UQ_MSC_RBC_PAD_TO_12] = "UQ_MSC_RBC_PAD_TO_12",
+ [UQ_MSC_READ_CAP_OFFBY1] = "UQ_MSC_READ_CAP_OFFBY1",
+ [UQ_MSC_FORCE_SHORT_INQ] = "UQ_MSC_FORCE_SHORT_INQ",
+ [UQ_MSC_FORCE_WIRE_BBB] = "UQ_MSC_FORCE_WIRE_BBB",
+ [UQ_MSC_FORCE_WIRE_CBI] = "UQ_MSC_FORCE_WIRE_CBI",
+ [UQ_MSC_FORCE_WIRE_CBI_I] = "UQ_MSC_FORCE_WIRE_CBI_I",
+ [UQ_MSC_FORCE_PROTO_SCSI] = "UQ_MSC_FORCE_PROTO_SCSI",
+ [UQ_MSC_FORCE_PROTO_ATAPI] = "UQ_MSC_FORCE_PROTO_ATAPI",
+ [UQ_MSC_FORCE_PROTO_UFI] = "UQ_MSC_FORCE_PROTO_UFI",
+ [UQ_MSC_FORCE_PROTO_RBC] = "UQ_MSC_FORCE_PROTO_RBC",
+ [UQ_MSC_EJECT_HUAWEI] = "UQ_MSC_EJECT_HUAWEI",
+ [UQ_MSC_EJECT_SIERRA] = "UQ_MSC_EJECT_SIERRA",
+ [UQ_MSC_EJECT_SCSIEJECT] = "UQ_MSC_EJECT_SCSIEJECT",
+ [UQ_MSC_EJECT_REZERO] = "UQ_MSC_EJECT_REZERO",
+ [UQ_MSC_EJECT_ZTESTOR] = "UQ_MSC_EJECT_ZTESTOR",
+ [UQ_MSC_EJECT_CMOTECH] = "UQ_MSC_EJECT_CMOTECH",
+ [UQ_MSC_EJECT_WAIT] = "UQ_MSC_EJECT_WAIT",
+ [UQ_MSC_EJECT_SAEL_M460] = "UQ_MSC_EJECT_SAEL_M460",
+ [UQ_MSC_EJECT_HUAWEISCSI] = "UQ_MSC_EJECT_HUAWEISCSI",
+ [UQ_MSC_EJECT_TCT] = "UQ_MSC_EJECT_TCT",
+};
+
+/*------------------------------------------------------------------------*
+ * usb_quirkstr
+ *
+ * This function converts an USB quirk code into a string.
+ *------------------------------------------------------------------------*/
+static const char *
+usb_quirkstr(uint16_t quirk)
+{
+ return ((quirk < USB_QUIRK_MAX) ?
+ usb_quirk_str[quirk] : "USB_QUIRK_UNKNOWN");
+}
+
+/*------------------------------------------------------------------------*
+ * usb_test_quirk_by_info
+ *
+ * Returns:
+ * 0: Quirk not found
+ * Else: Quirk found
+ *------------------------------------------------------------------------*/
+static uint8_t
+usb_test_quirk_by_info(const struct usbd_lookup_info *info, uint16_t quirk)
+{
+ uint16_t x;
+ uint16_t y;
+
+ if (quirk == UQ_NONE) {
+ return (0);
+ }
+ mtx_lock(&usb_quirk_mtx);
+
+ for (x = 0; x != USB_DEV_QUIRKS_MAX; x++) {
+ /* see if quirk information does not match */
+ if ((usb_quirks[x].vid != info->idVendor) ||
+ (usb_quirks[x].lo_rev > info->bcdDevice) ||
+ (usb_quirks[x].hi_rev < info->bcdDevice)) {
+ continue;
+ }
+ /* see if quirk only should match vendor ID */
+ if (usb_quirks[x].pid != info->idProduct) {
+ if (usb_quirks[x].pid != 0)
+ continue;
+
+ for (y = 0; y != USB_SUB_QUIRKS_MAX; y++) {
+ if (usb_quirks[x].quirks[y] == UQ_MATCH_VENDOR_ONLY)
+ break;
+ }
+ if (y == USB_SUB_QUIRKS_MAX)
+ continue;
+ }
+ /* lookup quirk */
+ for (y = 0; y != USB_SUB_QUIRKS_MAX; y++) {
+ if (usb_quirks[x].quirks[y] == quirk) {
+ mtx_unlock(&usb_quirk_mtx);
+ DPRINTF("Found quirk '%s'.\n", usb_quirkstr(quirk));
+ return (1);
+ }
+ }
+ /* no quirk found */
+ break;
+ }
+ mtx_unlock(&usb_quirk_mtx);
+ return (0);
+}
+
+static struct usb_quirk_entry *
+usb_quirk_get_entry(uint16_t vid, uint16_t pid,
+ uint16_t lo_rev, uint16_t hi_rev, uint8_t do_alloc)
+{
+ uint16_t x;
+
+ mtx_assert(&usb_quirk_mtx, MA_OWNED);
+
+ if ((vid | pid | lo_rev | hi_rev) == 0) {
+ /* all zero - special case */
+ return (usb_quirks + USB_DEV_QUIRKS_MAX - 1);
+ }
+ /* search for an existing entry */
+ for (x = 0; x != USB_DEV_QUIRKS_MAX; x++) {
+ /* see if quirk information does not match */
+ if ((usb_quirks[x].vid != vid) ||
+ (usb_quirks[x].pid != pid) ||
+ (usb_quirks[x].lo_rev != lo_rev) ||
+ (usb_quirks[x].hi_rev != hi_rev)) {
+ continue;
+ }
+ return (usb_quirks + x);
+ }
+
+ if (do_alloc == 0) {
+ /* no match */
+ return (NULL);
+ }
+ /* search for a free entry */
+ for (x = 0; x != USB_DEV_QUIRKS_MAX; x++) {
+ /* see if quirk information does not match */
+ if ((usb_quirks[x].vid |
+ usb_quirks[x].pid |
+ usb_quirks[x].lo_rev |
+ usb_quirks[x].hi_rev) != 0) {
+ continue;
+ }
+ usb_quirks[x].vid = vid;
+ usb_quirks[x].pid = pid;
+ usb_quirks[x].lo_rev = lo_rev;
+ usb_quirks[x].hi_rev = hi_rev;
+
+ return (usb_quirks + x);
+ }
+
+ /* no entry found */
+ return (NULL);
+}
+
+/*------------------------------------------------------------------------*
+ * usb_quirk_ioctl - handle quirk IOCTLs
+ *
+ * Returns:
+ * 0: Success
+ * Else: Failure
+ *------------------------------------------------------------------------*/
+static int
+usb_quirk_ioctl(unsigned long cmd, caddr_t data,
+ int fflag, struct thread *td)
+{
+ struct usb_gen_quirk *pgq;
+ struct usb_quirk_entry *pqe;
+ uint32_t x;
+ uint32_t y;
+ int err;
+
+ switch (cmd) {
+ case USB_DEV_QUIRK_GET:
+ pgq = (void *)data;
+ x = pgq->index % USB_SUB_QUIRKS_MAX;
+ y = pgq->index / USB_SUB_QUIRKS_MAX;
+ if (y >= USB_DEV_QUIRKS_MAX) {
+ return (EINVAL);
+ }
+ mtx_lock(&usb_quirk_mtx);
+ /* copy out data */
+ pgq->vid = usb_quirks[y].vid;
+ pgq->pid = usb_quirks[y].pid;
+ pgq->bcdDeviceLow = usb_quirks[y].lo_rev;
+ pgq->bcdDeviceHigh = usb_quirks[y].hi_rev;
+ strlcpy(pgq->quirkname,
+ usb_quirkstr(usb_quirks[y].quirks[x]),
+ sizeof(pgq->quirkname));
+ mtx_unlock(&usb_quirk_mtx);
+ return (0); /* success */
+
+ case USB_QUIRK_NAME_GET:
+ pgq = (void *)data;
+ x = pgq->index;
+ if (x >= USB_QUIRK_MAX) {
+ return (EINVAL);
+ }
+ strlcpy(pgq->quirkname,
+ usb_quirkstr(x), sizeof(pgq->quirkname));
+ return (0); /* success */
+
+ case USB_DEV_QUIRK_ADD:
+ pgq = (void *)data;
+
+ /* check privileges */
+ err = priv_check(curthread, PRIV_DRIVER);
+ if (err) {
+ return (err);
+ }
+ /* convert quirk string into numerical */
+ for (y = 0; y != USB_DEV_QUIRKS_MAX; y++) {
+ if (strcmp(pgq->quirkname, usb_quirkstr(y)) == 0) {
+ break;
+ }
+ }
+ if (y == USB_DEV_QUIRKS_MAX) {
+ return (EINVAL);
+ }
+ if (y == UQ_NONE) {
+ return (EINVAL);
+ }
+ mtx_lock(&usb_quirk_mtx);
+ pqe = usb_quirk_get_entry(pgq->vid, pgq->pid,
+ pgq->bcdDeviceLow, pgq->bcdDeviceHigh, 1);
+ if (pqe == NULL) {
+ mtx_unlock(&usb_quirk_mtx);
+ return (EINVAL);
+ }
+ for (x = 0; x != USB_SUB_QUIRKS_MAX; x++) {
+ if (pqe->quirks[x] == UQ_NONE) {
+ pqe->quirks[x] = y;
+ break;
+ }
+ }
+ mtx_unlock(&usb_quirk_mtx);
+ if (x == USB_SUB_QUIRKS_MAX) {
+ return (ENOMEM);
+ }
+ return (0); /* success */
+
+ case USB_DEV_QUIRK_REMOVE:
+ pgq = (void *)data;
+ /* check privileges */
+ err = priv_check(curthread, PRIV_DRIVER);
+ if (err) {
+ return (err);
+ }
+ /* convert quirk string into numerical */
+ for (y = 0; y != USB_DEV_QUIRKS_MAX; y++) {
+ if (strcmp(pgq->quirkname, usb_quirkstr(y)) == 0) {
+ break;
+ }
+ }
+ if (y == USB_DEV_QUIRKS_MAX) {
+ return (EINVAL);
+ }
+ if (y == UQ_NONE) {
+ return (EINVAL);
+ }
+ mtx_lock(&usb_quirk_mtx);
+ pqe = usb_quirk_get_entry(pgq->vid, pgq->pid,
+ pgq->bcdDeviceLow, pgq->bcdDeviceHigh, 0);
+ if (pqe == NULL) {
+ mtx_unlock(&usb_quirk_mtx);
+ return (EINVAL);
+ }
+ for (x = 0; x != USB_SUB_QUIRKS_MAX; x++) {
+ if (pqe->quirks[x] == y) {
+ pqe->quirks[x] = UQ_NONE;
+ break;
+ }
+ }
+ if (x == USB_SUB_QUIRKS_MAX) {
+ mtx_unlock(&usb_quirk_mtx);
+ return (ENOMEM);
+ }
+ for (x = 0; x != USB_SUB_QUIRKS_MAX; x++) {
+ if (pqe->quirks[x] != UQ_NONE) {
+ break;
+ }
+ }
+ if (x == USB_SUB_QUIRKS_MAX) {
+ /* all quirk entries are unused - release */
+ memset(pqe, 0, sizeof(pqe));
+ }
+ mtx_unlock(&usb_quirk_mtx);
+ return (0); /* success */
+
+ default:
+ break;
+ }
+ return (ENOIOCTL);
+}
+
+static void
+usb_quirk_init(void *arg)
+{
+ /* initialize mutex */
+ mtx_init(&usb_quirk_mtx, "USB quirk", NULL, MTX_DEF);
+
+ /* register our function */
+ usb_test_quirk_p = &usb_test_quirk_by_info;
+ usb_quirk_ioctl_p = &usb_quirk_ioctl;
+}
+
+#ifndef __rtems__
+static void
+usb_quirk_uninit(void *arg)
+{
+ usb_quirk_unload(arg);
+
+ /* destroy mutex */
+ mtx_destroy(&usb_quirk_mtx);
+}
+#endif /* __rtems__ */
+
+SYSINIT(usb_quirk_init, SI_SUB_LOCK, SI_ORDER_FIRST, usb_quirk_init, NULL);
+SYSUNINIT(usb_quirk_uninit, SI_SUB_LOCK, SI_ORDER_ANY, usb_quirk_uninit, NULL);
diff --git a/rtems/freebsd/dev/usb/quirk/usb_quirk.h b/rtems/freebsd/dev/usb/quirk/usb_quirk.h
new file mode 100644
index 00000000..e8cc0dbf
--- /dev/null
+++ b/rtems/freebsd/dev/usb/quirk/usb_quirk.h
@@ -0,0 +1,108 @@
+/* $FreeBSD$ */
+/*-
+ * Copyright (c) 2008 Hans Petter Selasky. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifndef _USB_QUIRK_HH_
+#define _USB_QUIRK_HH_
+
+enum {
+ /*
+ * Keep in sync with theusb_quirk_str usb_quirk.c, and with the
+ * share/man/man4/usb_quirk.4
+ */
+ UQ_NONE, /* not a valid quirk */
+
+ UQ_MATCH_VENDOR_ONLY, /* match quirk on vendor only */
+
+ /* Various quirks */
+
+ UQ_AUDIO_SWAP_LR, /* left and right sound channels are swapped */
+ UQ_AU_INP_ASYNC, /* input is async despite claim of adaptive */
+ UQ_AU_NO_FRAC, /* don't adjust for fractional samples */
+ UQ_AU_NO_XU, /* audio device has broken extension unit */
+ UQ_BAD_ADC, /* bad audio spec version number */
+ UQ_BAD_AUDIO, /* device claims audio class, but isn't */
+ UQ_BROKEN_BIDIR, /* printer has broken bidir mode */
+ UQ_BUS_POWERED, /* device is bus powered, despite claim */
+ UQ_HID_IGNORE, /* device should be ignored by hid class */
+ UQ_KBD_IGNORE, /* device should be ignored by kbd class */
+ UQ_KBD_BOOTPROTO, /* device should set the boot protocol */
+ UQ_MS_BAD_CLASS, /* doesn't identify properly */
+ UQ_MS_LEADING_BYTE, /* mouse sends an unknown leading byte */
+ UQ_MS_REVZ, /* mouse has Z-axis reversed */
+ UQ_NO_STRINGS, /* string descriptors are broken */
+ UQ_OPEN_CLEARSTALL, /* device needs clear endpoint stall */
+ UQ_POWER_CLAIM, /* hub lies about power status */
+ UQ_SPUR_BUT_UP, /* spurious mouse button up events */
+ UQ_SWAP_UNICODE, /* has some Unicode strings swapped */
+ UQ_CFG_INDEX_1, /* select configuration index 1 by default */
+ UQ_CFG_INDEX_2, /* select configuration index 2 by default */
+ UQ_CFG_INDEX_3, /* select configuration index 3 by default */
+ UQ_CFG_INDEX_4, /* select configuration index 4 by default */
+ UQ_CFG_INDEX_0, /* select configuration index 0 by default */
+ UQ_ASSUME_CM_OVER_DATA, /* assume cm over data feature */
+
+ /* USB Mass Storage Quirks. See "storage/umass.c" for a detailed description. */
+ UQ_MSC_NO_TEST_UNIT_READY, /* send start/stop instead of TUR */
+ UQ_MSC_NO_RS_CLEAR_UA, /* does not reset Unit Att. */
+ UQ_MSC_NO_START_STOP, /* does not support start/stop */
+ UQ_MSC_NO_GETMAXLUN, /* does not support get max LUN */
+ UQ_MSC_NO_INQUIRY, /* fake generic inq response */
+ UQ_MSC_NO_INQUIRY_EVPD, /* does not support inq EVPD */
+ UQ_MSC_NO_SYNC_CACHE, /* does not support sync cache */
+ UQ_MSC_SHUTTLE_INIT, /* requires Shuttle init sequence */
+ UQ_MSC_ALT_IFACE_1, /* switch to alternate interface 1 */
+ UQ_MSC_FLOPPY_SPEED, /* does floppy speeds (20kb/s) */
+ UQ_MSC_IGNORE_RESIDUE, /* gets residue wrong */
+ UQ_MSC_WRONG_CSWSIG, /* uses wrong CSW signature */
+ UQ_MSC_RBC_PAD_TO_12, /* pad RBC requests to 12 bytes */
+ UQ_MSC_READ_CAP_OFFBY1, /* reports sector count, not max sec. */
+ UQ_MSC_FORCE_SHORT_INQ, /* does not support full inq. */
+ UQ_MSC_FORCE_WIRE_BBB, /* force BBB wire protocol */
+ UQ_MSC_FORCE_WIRE_CBI, /* force CBI wire protocol */
+ UQ_MSC_FORCE_WIRE_CBI_I, /* force CBI with int. wire protocol */
+ UQ_MSC_FORCE_PROTO_SCSI, /* force SCSI command protocol */
+ UQ_MSC_FORCE_PROTO_ATAPI, /* force ATAPI command protocol */
+ UQ_MSC_FORCE_PROTO_UFI, /* force UFI command protocol */
+ UQ_MSC_FORCE_PROTO_RBC, /* force RBC command protocol */
+
+ /* Ejection of mass storage (driver disk) */
+ UQ_MSC_EJECT_HUAWEI, /* ejects after Huawei USB command */
+ UQ_MSC_EJECT_SIERRA, /* ejects after Sierra USB command */
+ UQ_MSC_EJECT_SCSIEJECT, /* ejects after SCSI eject command */
+ UQ_MSC_EJECT_REZERO, /* ejects after SCSI rezero command */
+ UQ_MSC_EJECT_ZTESTOR, /* ejects after ZTE SCSI command */
+ UQ_MSC_EJECT_CMOTECH, /* ejects after C-motech SCSI cmd */
+ UQ_MSC_EJECT_WAIT, /* wait for the device to eject */
+ UQ_MSC_EJECT_SAEL_M460, /* ejects after Sael USB commands */
+ UQ_MSC_EJECT_HUAWEISCSI, /* ejects after Huawei SCSI command */
+ UQ_MSC_EJECT_TCT, /* ejects after TCT SCSI command */
+
+ USB_QUIRK_MAX
+};
+
+uint8_t usb_test_quirk(const struct usb_attach_arg *uaa, uint16_t quirk);
+
+#endif /* _USB_QUIRK_HH_ */
diff --git a/rtems/freebsd/dev/usb/storage/umass.c b/rtems/freebsd/dev/usb/storage/umass.c
new file mode 100644
index 00000000..ac787f50
--- /dev/null
+++ b/rtems/freebsd/dev/usb/storage/umass.c
@@ -0,0 +1,3119 @@
+#include <rtems/freebsd/machine/rtems-bsd-config.h>
+
+#include <rtems/freebsd/sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+/*-
+ * Copyright (c) 1999 MAEKAWA Masahide <bishop@rr.iij4u.or.jp>,
+ * Nick Hibma <n_hibma@FreeBSD.org>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ * $NetBSD: umass.c,v 1.28 2000/04/02 23:46:53 augustss Exp $
+ */
+
+/* Also already merged from NetBSD:
+ * $NetBSD: umass.c,v 1.67 2001/11/25 19:05:22 augustss Exp $
+ * $NetBSD: umass.c,v 1.90 2002/11/04 19:17:33 pooka Exp $
+ * $NetBSD: umass.c,v 1.108 2003/11/07 17:03:25 wiz Exp $
+ * $NetBSD: umass.c,v 1.109 2003/12/04 13:57:31 keihan Exp $
+ */
+
+/*
+ * Universal Serial Bus Mass Storage Class specs:
+ * http://www.usb.org/developers/devclass_docs/usb_msc_overview_1.2.pdf
+ * http://www.usb.org/developers/devclass_docs/usbmassbulk_10.pdf
+ * http://www.usb.org/developers/devclass_docs/usb_msc_cbi_1.1.pdf
+ * http://www.usb.org/developers/devclass_docs/usbmass-ufi10.pdf
+ */
+
+/*
+ * Ported to NetBSD by Lennart Augustsson <augustss@NetBSD.org>.
+ * Parts of the code written by Jason R. Thorpe <thorpej@shagadelic.org>.
+ */
+
+/*
+ * The driver handles 3 Wire Protocols
+ * - Command/Bulk/Interrupt (CBI)
+ * - Command/Bulk/Interrupt with Command Completion Interrupt (CBI with CCI)
+ * - Mass Storage Bulk-Only (BBB)
+ * (BBB refers Bulk/Bulk/Bulk for Command/Data/Status phases)
+ *
+ * Over these wire protocols it handles the following command protocols
+ * - SCSI
+ * - UFI (floppy command set)
+ * - 8070i (ATAPI)
+ *
+ * UFI and 8070i (ATAPI) are transformed versions of the SCSI command set. The
+ * sc->sc_transform method is used to convert the commands into the appropriate
+ * format (if at all necessary). For example, UFI requires all commands to be
+ * 12 bytes in length amongst other things.
+ *
+ * The source code below is marked and can be split into a number of pieces
+ * (in this order):
+ *
+ * - probe/attach/detach
+ * - generic transfer routines
+ * - BBB
+ * - CBI
+ * - CBI_I (in addition to functions from CBI)
+ * - CAM (Common Access Method)
+ * - SCSI
+ * - UFI
+ * - 8070i (ATAPI)
+ *
+ * The protocols are implemented using a state machine, for the transfers as
+ * well as for the resets. The state machine is contained in umass_t_*_callback.
+ * The state machine is started through either umass_command_start() or
+ * umass_reset().
+ *
+ * The reason for doing this is a) CAM performs a lot better this way and b) it
+ * avoids using tsleep from interrupt context (for example after a failed
+ * transfer).
+ */
+
+/*
+ * The SCSI related part of this driver has been derived from the
+ * dev/ppbus/vpo.c driver, by Nicolas Souchu (nsouch@FreeBSD.org).
+ *
+ * The CAM layer uses so called actions which are messages sent to the host
+ * adapter for completion. The actions come in through umass_cam_action. The
+ * appropriate block of routines is called depending on the transport protocol
+ * in use. When the transfer has finished, these routines call
+ * umass_cam_cb again to complete the CAM command.
+ */
+
+#include <rtems/freebsd/sys/stdint.h>
+#include <rtems/freebsd/sys/stddef.h>
+#include <rtems/freebsd/sys/param.h>
+#include <rtems/freebsd/sys/queue.h>
+#include <rtems/freebsd/sys/types.h>
+#include <rtems/freebsd/sys/systm.h>
+#include <rtems/freebsd/sys/kernel.h>
+#include <rtems/freebsd/sys/bus.h>
+#include <rtems/freebsd/sys/linker_set.h>
+#include <rtems/freebsd/sys/module.h>
+#include <rtems/freebsd/sys/lock.h>
+#include <rtems/freebsd/sys/mutex.h>
+#include <rtems/freebsd/sys/condvar.h>
+#include <rtems/freebsd/sys/sysctl.h>
+#include <rtems/freebsd/sys/sx.h>
+#include <rtems/freebsd/sys/unistd.h>
+#include <rtems/freebsd/sys/callout.h>
+#include <rtems/freebsd/sys/malloc.h>
+#include <rtems/freebsd/sys/priv.h>
+
+#include <rtems/freebsd/dev/usb/usb.h>
+#include <rtems/freebsd/dev/usb/usbdi.h>
+#include <rtems/freebsd/dev/usb/usbdi_util.h>
+#include <rtems/freebsd/local/usbdevs.h>
+
+#include <rtems/freebsd/dev/usb/quirk/usb_quirk.h>
+
+#include <rtems/freebsd/cam/cam.h>
+#include <rtems/freebsd/cam/cam_ccb.h>
+#include <rtems/freebsd/cam/cam_sim.h>
+#include <rtems/freebsd/cam/cam_xpt_sim.h>
+#include <rtems/freebsd/cam/scsi/scsi_all.h>
+#include <rtems/freebsd/cam/scsi/scsi_da.h>
+
+#include <rtems/freebsd/cam/cam_periph.h>
+
+#define UMASS_EXT_BUFFER
+#ifdef UMASS_EXT_BUFFER
+/* this enables loading of virtual buffers into DMA */
+#define UMASS_USB_FLAGS .ext_buffer=1,
+#else
+#define UMASS_USB_FLAGS
+#endif
+
+#ifdef USB_DEBUG
+#define DIF(m, x) \
+ do { \
+ if (umass_debug & (m)) { x ; } \
+ } while (0)
+
+#define DPRINTF(sc, m, fmt, ...) \
+ do { \
+ if (umass_debug & (m)) { \
+ printf("%s:%s: " fmt, \
+ (sc) ? (const char *)(sc)->sc_name : \
+ (const char *)"umassX", \
+ __FUNCTION__ ,## __VA_ARGS__); \
+ } \
+ } while (0)
+
+#define UDMASS_GEN 0x00010000 /* general */
+#define UDMASS_SCSI 0x00020000 /* scsi */
+#define UDMASS_UFI 0x00040000 /* ufi command set */
+#define UDMASS_ATAPI 0x00080000 /* 8070i command set */
+#define UDMASS_CMD (UDMASS_SCSI|UDMASS_UFI|UDMASS_ATAPI)
+#define UDMASS_USB 0x00100000 /* USB general */
+#define UDMASS_BBB 0x00200000 /* Bulk-Only transfers */
+#define UDMASS_CBI 0x00400000 /* CBI transfers */
+#define UDMASS_WIRE (UDMASS_BBB|UDMASS_CBI)
+#define UDMASS_ALL 0xffff0000 /* all of the above */
+static int umass_debug = 0;
+
+SYSCTL_NODE(_hw_usb, OID_AUTO, umass, CTLFLAG_RW, 0, "USB umass");
+SYSCTL_INT(_hw_usb_umass, OID_AUTO, debug, CTLFLAG_RW,
+ &umass_debug, 0, "umass debug level");
+
+TUNABLE_INT("hw.usb.umass.debug", &umass_debug);
+#else
+#define DIF(...) do { } while (0)
+#define DPRINTF(...) do { } while (0)
+#endif
+
+#define UMASS_GONE ((struct umass_softc *)1)
+
+#define UMASS_BULK_SIZE (1 << 17)
+#define UMASS_CBI_DIAGNOSTIC_CMDLEN 12 /* bytes */
+#define UMASS_MAX_CMDLEN MAX(12, CAM_MAX_CDBLEN) /* bytes */
+
+/* USB transfer definitions */
+
+#define UMASS_T_BBB_RESET1 0 /* Bulk-Only */
+#define UMASS_T_BBB_RESET2 1
+#define UMASS_T_BBB_RESET3 2
+#define UMASS_T_BBB_COMMAND 3
+#define UMASS_T_BBB_DATA_READ 4
+#define UMASS_T_BBB_DATA_RD_CS 5
+#define UMASS_T_BBB_DATA_WRITE 6
+#define UMASS_T_BBB_DATA_WR_CS 7
+#define UMASS_T_BBB_STATUS 8
+#define UMASS_T_BBB_MAX 9
+
+#define UMASS_T_CBI_RESET1 0 /* CBI */
+#define UMASS_T_CBI_RESET2 1
+#define UMASS_T_CBI_RESET3 2
+#define UMASS_T_CBI_COMMAND 3
+#define UMASS_T_CBI_DATA_READ 4
+#define UMASS_T_CBI_DATA_RD_CS 5
+#define UMASS_T_CBI_DATA_WRITE 6
+#define UMASS_T_CBI_DATA_WR_CS 7
+#define UMASS_T_CBI_STATUS 8
+#define UMASS_T_CBI_RESET4 9
+#define UMASS_T_CBI_MAX 10
+
+#define UMASS_T_MAX MAX(UMASS_T_CBI_MAX, UMASS_T_BBB_MAX)
+
+/* Generic definitions */
+
+/* Direction for transfer */
+#define DIR_NONE 0
+#define DIR_IN 1
+#define DIR_OUT 2
+
+/* device name */
+#define DEVNAME "umass"
+#define DEVNAME_SIM "umass-sim"
+
+/* Approximate maximum transfer speeds (assumes 33% overhead). */
+#define UMASS_FULL_TRANSFER_SPEED 1000
+#define UMASS_HIGH_TRANSFER_SPEED 40000
+#define UMASS_SUPER_TRANSFER_SPEED 400000
+#define UMASS_FLOPPY_TRANSFER_SPEED 20
+
+#define UMASS_TIMEOUT 5000 /* ms */
+
+/* CAM specific definitions */
+
+#define UMASS_SCSIID_MAX 1 /* maximum number of drives expected */
+#define UMASS_SCSIID_HOST UMASS_SCSIID_MAX
+
+/* Bulk-Only features */
+
+#define UR_BBB_RESET 0xff /* Bulk-Only reset */
+#define UR_BBB_GET_MAX_LUN 0xfe /* Get maximum lun */
+
+/* Command Block Wrapper */
+typedef struct {
+ uDWord dCBWSignature;
+#define CBWSIGNATURE 0x43425355
+ uDWord dCBWTag;
+ uDWord dCBWDataTransferLength;
+ uByte bCBWFlags;
+#define CBWFLAGS_OUT 0x00
+#define CBWFLAGS_IN 0x80
+ uByte bCBWLUN;
+ uByte bCDBLength;
+#define CBWCDBLENGTH 16
+ uByte CBWCDB[CBWCDBLENGTH];
+} __packed umass_bbb_cbw_t;
+
+#define UMASS_BBB_CBW_SIZE 31
+
+/* Command Status Wrapper */
+typedef struct {
+ uDWord dCSWSignature;
+#define CSWSIGNATURE 0x53425355
+#define CSWSIGNATURE_IMAGINATION_DBX1 0x43425355
+#define CSWSIGNATURE_OLYMPUS_C1 0x55425355
+ uDWord dCSWTag;
+ uDWord dCSWDataResidue;
+ uByte bCSWStatus;
+#define CSWSTATUS_GOOD 0x0
+#define CSWSTATUS_FAILED 0x1
+#define CSWSTATUS_PHASE 0x2
+} __packed umass_bbb_csw_t;
+
+#define UMASS_BBB_CSW_SIZE 13
+
+/* CBI features */
+
+#define UR_CBI_ADSC 0x00
+
+typedef union {
+ struct {
+ uint8_t type;
+#define IDB_TYPE_CCI 0x00
+ uint8_t value;
+#define IDB_VALUE_PASS 0x00
+#define IDB_VALUE_FAIL 0x01
+#define IDB_VALUE_PHASE 0x02
+#define IDB_VALUE_PERSISTENT 0x03
+#define IDB_VALUE_STATUS_MASK 0x03
+ } __packed common;
+
+ struct {
+ uint8_t asc;
+ uint8_t ascq;
+ } __packed ufi;
+} __packed umass_cbi_sbl_t;
+
+struct umass_softc; /* see below */
+
+typedef void (umass_callback_t)(struct umass_softc *sc, union ccb *ccb,
+ uint32_t residue, uint8_t status);
+
+#define STATUS_CMD_OK 0 /* everything ok */
+#define STATUS_CMD_UNKNOWN 1 /* will have to fetch sense */
+#define STATUS_CMD_FAILED 2 /* transfer was ok, command failed */
+#define STATUS_WIRE_FAILED 3 /* couldn't even get command across */
+
+typedef uint8_t (umass_transform_t)(struct umass_softc *sc, uint8_t *cmd_ptr,
+ uint8_t cmd_len);
+
+/* Wire and command protocol */
+#define UMASS_PROTO_BBB 0x0001 /* USB wire protocol */
+#define UMASS_PROTO_CBI 0x0002
+#define UMASS_PROTO_CBI_I 0x0004
+#define UMASS_PROTO_WIRE 0x00ff /* USB wire protocol mask */
+#define UMASS_PROTO_SCSI 0x0100 /* command protocol */
+#define UMASS_PROTO_ATAPI 0x0200
+#define UMASS_PROTO_UFI 0x0400
+#define UMASS_PROTO_RBC 0x0800
+#define UMASS_PROTO_COMMAND 0xff00 /* command protocol mask */
+
+/* Device specific quirks */
+#define NO_QUIRKS 0x0000
+ /*
+ * The drive does not support Test Unit Ready. Convert to Start Unit
+ */
+#define NO_TEST_UNIT_READY 0x0001
+ /*
+ * The drive does not reset the Unit Attention state after REQUEST
+ * SENSE has been sent. The INQUIRY command does not reset the UA
+ * either, and so CAM runs in circles trying to retrieve the initial
+ * INQUIRY data.
+ */
+#define RS_NO_CLEAR_UA 0x0002
+ /* The drive does not support START STOP. */
+#define NO_START_STOP 0x0004
+ /* Don't ask for full inquiry data (255b). */
+#define FORCE_SHORT_INQUIRY 0x0008
+ /* Needs to be initialised the Shuttle way */
+#define SHUTTLE_INIT 0x0010
+ /* Drive needs to be switched to alternate iface 1 */
+#define ALT_IFACE_1 0x0020
+ /* Drive does not do 1Mb/s, but just floppy speeds (20kb/s) */
+#define FLOPPY_SPEED 0x0040
+ /* The device can't count and gets the residue of transfers wrong */
+#define IGNORE_RESIDUE 0x0080
+ /* No GetMaxLun call */
+#define NO_GETMAXLUN 0x0100
+ /* The device uses a weird CSWSIGNATURE. */
+#define WRONG_CSWSIG 0x0200
+ /* Device cannot handle INQUIRY so fake a generic response */
+#define NO_INQUIRY 0x0400
+ /* Device cannot handle INQUIRY EVPD, return CHECK CONDITION */
+#define NO_INQUIRY_EVPD 0x0800
+ /* Pad all RBC requests to 12 bytes. */
+#define RBC_PAD_TO_12 0x1000
+ /*
+ * Device reports number of sectors from READ_CAPACITY, not max
+ * sector number.
+ */
+#define READ_CAPACITY_OFFBY1 0x2000
+ /*
+ * Device cannot handle a SCSI synchronize cache command. Normally
+ * this quirk would be handled in the cam layer, but for IDE bridges
+ * we need to associate the quirk with the bridge and not the
+ * underlying disk device. This is handled by faking a success
+ * result.
+ */
+#define NO_SYNCHRONIZE_CACHE 0x4000
+
+struct umass_softc {
+
+ struct scsi_sense cam_scsi_sense;
+ struct scsi_test_unit_ready cam_scsi_test_unit_ready;
+ struct mtx sc_mtx;
+ struct {
+ uint8_t *data_ptr;
+ union ccb *ccb;
+ umass_callback_t *callback;
+
+ uint32_t data_len; /* bytes */
+ uint32_t data_rem; /* bytes */
+ uint32_t data_timeout; /* ms */
+ uint32_t actlen; /* bytes */
+
+ uint8_t cmd_data[UMASS_MAX_CMDLEN];
+ uint8_t cmd_len; /* bytes */
+ uint8_t dir;
+ uint8_t lun;
+ } sc_transfer;
+
+ /* Bulk specific variables for transfers in progress */
+ umass_bbb_cbw_t cbw; /* command block wrapper */
+ umass_bbb_csw_t csw; /* command status wrapper */
+
+ /* CBI specific variables for transfers in progress */
+ umass_cbi_sbl_t sbl; /* status block */
+
+ device_t sc_dev;
+ struct usb_device *sc_udev;
+ struct cam_sim *sc_sim; /* SCSI Interface Module */
+ struct usb_xfer *sc_xfer[UMASS_T_MAX];
+
+ /*
+ * The command transform function is used to convert the SCSI
+ * commands into their derivatives, like UFI, ATAPI, and friends.
+ */
+ umass_transform_t *sc_transform;
+
+ uint32_t sc_unit;
+ uint32_t sc_quirks; /* they got it almost right */
+ uint32_t sc_proto; /* wire and cmd protocol */
+
+ uint8_t sc_name[16];
+ uint8_t sc_iface_no; /* interface number */
+ uint8_t sc_maxlun; /* maximum LUN number, inclusive */
+ uint8_t sc_last_xfer_index;
+ uint8_t sc_status_try;
+};
+
+struct umass_probe_proto {
+ uint32_t quirks;
+ uint32_t proto;
+
+ int error;
+};
+
+/* prototypes */
+
+static device_probe_t umass_probe;
+static device_attach_t umass_attach;
+static device_detach_t umass_detach;
+
+static usb_callback_t umass_tr_error;
+static usb_callback_t umass_t_bbb_reset1_callback;
+static usb_callback_t umass_t_bbb_reset2_callback;
+static usb_callback_t umass_t_bbb_reset3_callback;
+static usb_callback_t umass_t_bbb_command_callback;
+static usb_callback_t umass_t_bbb_data_read_callback;
+static usb_callback_t umass_t_bbb_data_rd_cs_callback;
+static usb_callback_t umass_t_bbb_data_write_callback;
+static usb_callback_t umass_t_bbb_data_wr_cs_callback;
+static usb_callback_t umass_t_bbb_status_callback;
+static usb_callback_t umass_t_cbi_reset1_callback;
+static usb_callback_t umass_t_cbi_reset2_callback;
+static usb_callback_t umass_t_cbi_reset3_callback;
+static usb_callback_t umass_t_cbi_reset4_callback;
+static usb_callback_t umass_t_cbi_command_callback;
+static usb_callback_t umass_t_cbi_data_read_callback;
+static usb_callback_t umass_t_cbi_data_rd_cs_callback;
+static usb_callback_t umass_t_cbi_data_write_callback;
+static usb_callback_t umass_t_cbi_data_wr_cs_callback;
+static usb_callback_t umass_t_cbi_status_callback;
+
+static void umass_cancel_ccb(struct umass_softc *);
+static void umass_init_shuttle(struct umass_softc *);
+static void umass_reset(struct umass_softc *);
+static void umass_t_bbb_data_clear_stall_callback(struct usb_xfer *,
+ uint8_t, uint8_t, usb_error_t);
+static void umass_command_start(struct umass_softc *, uint8_t, void *,
+ uint32_t, uint32_t, umass_callback_t *, union ccb *);
+static uint8_t umass_bbb_get_max_lun(struct umass_softc *);
+static void umass_cbi_start_status(struct umass_softc *);
+static void umass_t_cbi_data_clear_stall_callback(struct usb_xfer *,
+ uint8_t, uint8_t, usb_error_t);
+static int umass_cam_attach_sim(struct umass_softc *);
+#ifndef __rtems__
+static void umass_cam_attach(struct umass_softc *);
+#endif /* __rtems__ */
+static void umass_cam_detach_sim(struct umass_softc *);
+static void umass_cam_action(struct cam_sim *, union ccb *);
+static void umass_cam_poll(struct cam_sim *);
+static void umass_cam_cb(struct umass_softc *, union ccb *, uint32_t,
+ uint8_t);
+static void umass_cam_sense_cb(struct umass_softc *, union ccb *, uint32_t,
+ uint8_t);
+static void umass_cam_quirk_cb(struct umass_softc *, union ccb *, uint32_t,
+ uint8_t);
+static uint8_t umass_scsi_transform(struct umass_softc *, uint8_t *, uint8_t);
+static uint8_t umass_rbc_transform(struct umass_softc *, uint8_t *, uint8_t);
+static uint8_t umass_ufi_transform(struct umass_softc *, uint8_t *, uint8_t);
+static uint8_t umass_atapi_transform(struct umass_softc *, uint8_t *,
+ uint8_t);
+static uint8_t umass_no_transform(struct umass_softc *, uint8_t *, uint8_t);
+static uint8_t umass_std_transform(struct umass_softc *, union ccb *, uint8_t
+ *, uint8_t);
+
+#ifdef USB_DEBUG
+static void umass_bbb_dump_cbw(struct umass_softc *, umass_bbb_cbw_t *);
+static void umass_bbb_dump_csw(struct umass_softc *, umass_bbb_csw_t *);
+static void umass_cbi_dump_cmd(struct umass_softc *, void *, uint8_t);
+static void umass_dump_buffer(struct umass_softc *, uint8_t *, uint32_t,
+ uint32_t);
+#endif
+
+static struct usb_config umass_bbb_config[UMASS_T_BBB_MAX] = {
+
+ [UMASS_T_BBB_RESET1] = {
+ .type = UE_CONTROL,
+ .endpoint = 0x00, /* Control pipe */
+ .direction = UE_DIR_ANY,
+ .bufsize = sizeof(struct usb_device_request),
+ .callback = &umass_t_bbb_reset1_callback,
+ .timeout = 5000, /* 5 seconds */
+ .interval = 500, /* 500 milliseconds */
+ },
+
+ [UMASS_T_BBB_RESET2] = {
+ .type = UE_CONTROL,
+ .endpoint = 0x00, /* Control pipe */
+ .direction = UE_DIR_ANY,
+ .bufsize = sizeof(struct usb_device_request),
+ .callback = &umass_t_bbb_reset2_callback,
+ .timeout = 5000, /* 5 seconds */
+ .interval = 50, /* 50 milliseconds */
+ },
+
+ [UMASS_T_BBB_RESET3] = {
+ .type = UE_CONTROL,
+ .endpoint = 0x00, /* Control pipe */
+ .direction = UE_DIR_ANY,
+ .bufsize = sizeof(struct usb_device_request),
+ .callback = &umass_t_bbb_reset3_callback,
+ .timeout = 5000, /* 5 seconds */
+ .interval = 50, /* 50 milliseconds */
+ },
+
+ [UMASS_T_BBB_COMMAND] = {
+ .type = UE_BULK,
+ .endpoint = UE_ADDR_ANY,
+ .direction = UE_DIR_OUT,
+ .bufsize = sizeof(umass_bbb_cbw_t),
+ .callback = &umass_t_bbb_command_callback,
+ .timeout = 5000, /* 5 seconds */
+ },
+
+ [UMASS_T_BBB_DATA_READ] = {
+ .type = UE_BULK,
+ .endpoint = UE_ADDR_ANY,
+ .direction = UE_DIR_IN,
+ .bufsize = UMASS_BULK_SIZE,
+ .flags = {.proxy_buffer = 1,.short_xfer_ok = 1, UMASS_USB_FLAGS},
+ .callback = &umass_t_bbb_data_read_callback,
+ .timeout = 0, /* overwritten later */
+ },
+
+ [UMASS_T_BBB_DATA_RD_CS] = {
+ .type = UE_CONTROL,
+ .endpoint = 0x00, /* Control pipe */
+ .direction = UE_DIR_ANY,
+ .bufsize = sizeof(struct usb_device_request),
+ .callback = &umass_t_bbb_data_rd_cs_callback,
+ .timeout = 5000, /* 5 seconds */
+ },
+
+ [UMASS_T_BBB_DATA_WRITE] = {
+ .type = UE_BULK,
+ .endpoint = UE_ADDR_ANY,
+ .direction = UE_DIR_OUT,
+ .bufsize = UMASS_BULK_SIZE,
+ .flags = {.proxy_buffer = 1,.short_xfer_ok = 1, UMASS_USB_FLAGS},
+ .callback = &umass_t_bbb_data_write_callback,
+ .timeout = 0, /* overwritten later */
+ },
+
+ [UMASS_T_BBB_DATA_WR_CS] = {
+ .type = UE_CONTROL,
+ .endpoint = 0x00, /* Control pipe */
+ .direction = UE_DIR_ANY,
+ .bufsize = sizeof(struct usb_device_request),
+ .callback = &umass_t_bbb_data_wr_cs_callback,
+ .timeout = 5000, /* 5 seconds */
+ },
+
+ [UMASS_T_BBB_STATUS] = {
+ .type = UE_BULK,
+ .endpoint = UE_ADDR_ANY,
+ .direction = UE_DIR_IN,
+ .bufsize = sizeof(umass_bbb_csw_t),
+ .flags = {.short_xfer_ok = 1,},
+ .callback = &umass_t_bbb_status_callback,
+ .timeout = 5000, /* ms */
+ },
+};
+
+static struct usb_config umass_cbi_config[UMASS_T_CBI_MAX] = {
+
+ [UMASS_T_CBI_RESET1] = {
+ .type = UE_CONTROL,
+ .endpoint = 0x00, /* Control pipe */
+ .direction = UE_DIR_ANY,
+ .bufsize = (sizeof(struct usb_device_request) +
+ UMASS_CBI_DIAGNOSTIC_CMDLEN),
+ .callback = &umass_t_cbi_reset1_callback,
+ .timeout = 5000, /* 5 seconds */
+ .interval = 500, /* 500 milliseconds */
+ },
+
+ [UMASS_T_CBI_RESET2] = {
+ .type = UE_CONTROL,
+ .endpoint = 0x00, /* Control pipe */
+ .direction = UE_DIR_ANY,
+ .bufsize = sizeof(struct usb_device_request),
+ .callback = &umass_t_cbi_reset2_callback,
+ .timeout = 5000, /* 5 seconds */
+ .interval = 50, /* 50 milliseconds */
+ },
+
+ [UMASS_T_CBI_RESET3] = {
+ .type = UE_CONTROL,
+ .endpoint = 0x00, /* Control pipe */
+ .direction = UE_DIR_ANY,
+ .bufsize = sizeof(struct usb_device_request),
+ .callback = &umass_t_cbi_reset3_callback,
+ .timeout = 5000, /* 5 seconds */
+ .interval = 50, /* 50 milliseconds */
+ },
+
+ [UMASS_T_CBI_COMMAND] = {
+ .type = UE_CONTROL,
+ .endpoint = 0x00, /* Control pipe */
+ .direction = UE_DIR_ANY,
+ .bufsize = (sizeof(struct usb_device_request) +
+ UMASS_MAX_CMDLEN),
+ .callback = &umass_t_cbi_command_callback,
+ .timeout = 5000, /* 5 seconds */
+ },
+
+ [UMASS_T_CBI_DATA_READ] = {
+ .type = UE_BULK,
+ .endpoint = UE_ADDR_ANY,
+ .direction = UE_DIR_IN,
+ .bufsize = UMASS_BULK_SIZE,
+ .flags = {.proxy_buffer = 1,.short_xfer_ok = 1, UMASS_USB_FLAGS},
+ .callback = &umass_t_cbi_data_read_callback,
+ .timeout = 0, /* overwritten later */
+ },
+
+ [UMASS_T_CBI_DATA_RD_CS] = {
+ .type = UE_CONTROL,
+ .endpoint = 0x00, /* Control pipe */
+ .direction = UE_DIR_ANY,
+ .bufsize = sizeof(struct usb_device_request),
+ .callback = &umass_t_cbi_data_rd_cs_callback,
+ .timeout = 5000, /* 5 seconds */
+ },
+
+ [UMASS_T_CBI_DATA_WRITE] = {
+ .type = UE_BULK,
+ .endpoint = UE_ADDR_ANY,
+ .direction = UE_DIR_OUT,
+ .bufsize = UMASS_BULK_SIZE,
+ .flags = {.proxy_buffer = 1,.short_xfer_ok = 1, UMASS_USB_FLAGS},
+ .callback = &umass_t_cbi_data_write_callback,
+ .timeout = 0, /* overwritten later */
+ },
+
+ [UMASS_T_CBI_DATA_WR_CS] = {
+ .type = UE_CONTROL,
+ .endpoint = 0x00, /* Control pipe */
+ .direction = UE_DIR_ANY,
+ .bufsize = sizeof(struct usb_device_request),
+ .callback = &umass_t_cbi_data_wr_cs_callback,
+ .timeout = 5000, /* 5 seconds */
+ },
+
+ [UMASS_T_CBI_STATUS] = {
+ .type = UE_INTERRUPT,
+ .endpoint = UE_ADDR_ANY,
+ .direction = UE_DIR_IN,
+ .flags = {.short_xfer_ok = 1,.no_pipe_ok = 1,},
+ .bufsize = sizeof(umass_cbi_sbl_t),
+ .callback = &umass_t_cbi_status_callback,
+ .timeout = 5000, /* ms */
+ },
+
+ [UMASS_T_CBI_RESET4] = {
+ .type = UE_CONTROL,
+ .endpoint = 0x00, /* Control pipe */
+ .direction = UE_DIR_ANY,
+ .bufsize = sizeof(struct usb_device_request),
+ .callback = &umass_t_cbi_reset4_callback,
+ .timeout = 5000, /* ms */
+ },
+};
+
+/* If device cannot return valid inquiry data, fake it */
+static const uint8_t fake_inq_data[SHORT_INQUIRY_LENGTH] = {
+ 0, /* removable */ 0x80, SCSI_REV_2, SCSI_REV_2,
+ /* additional_length */ 31, 0, 0, 0
+};
+
+#define UFI_COMMAND_LENGTH 12 /* UFI commands are always 12 bytes */
+#define ATAPI_COMMAND_LENGTH 12 /* ATAPI commands are always 12 bytes */
+
+static devclass_t umass_devclass;
+
+static device_method_t umass_methods[] = {
+ /* Device interface */
+ DEVMETHOD(device_probe, umass_probe),
+ DEVMETHOD(device_attach, umass_attach),
+ DEVMETHOD(device_detach, umass_detach),
+ {0, 0}
+};
+
+static driver_t umass_driver = {
+ .name = "umass",
+ .methods = umass_methods,
+ .size = sizeof(struct umass_softc),
+};
+
+DRIVER_MODULE(umass, uhub, umass_driver, umass_devclass, NULL, 0);
+MODULE_DEPEND(umass, usb, 1, 1, 1);
+MODULE_DEPEND(umass, cam, 1, 1, 1);
+MODULE_VERSION(umass, 1);
+
+/*
+ * USB device probe/attach/detach
+ */
+
+static uint16_t
+umass_get_proto(struct usb_interface *iface)
+{
+ struct usb_interface_descriptor *id;
+ uint16_t retval;
+
+ retval = 0;
+
+ /* Check for a standards compliant device */
+ id = usbd_get_interface_descriptor(iface);
+ if ((id == NULL) ||
+ (id->bInterfaceClass != UICLASS_MASS)) {
+ goto done;
+ }
+ switch (id->bInterfaceSubClass) {
+ case UISUBCLASS_SCSI:
+ retval |= UMASS_PROTO_SCSI;
+ break;
+ case UISUBCLASS_UFI:
+ retval |= UMASS_PROTO_UFI;
+ break;
+ case UISUBCLASS_RBC:
+ retval |= UMASS_PROTO_RBC;
+ break;
+ case UISUBCLASS_SFF8020I:
+ case UISUBCLASS_SFF8070I:
+ retval |= UMASS_PROTO_ATAPI;
+ break;
+ default:
+ goto done;
+ }
+
+ switch (id->bInterfaceProtocol) {
+ case UIPROTO_MASS_CBI:
+ retval |= UMASS_PROTO_CBI;
+ break;
+ case UIPROTO_MASS_CBI_I:
+ retval |= UMASS_PROTO_CBI_I;
+ break;
+ case UIPROTO_MASS_BBB_OLD:
+ case UIPROTO_MASS_BBB:
+ retval |= UMASS_PROTO_BBB;
+ break;
+ default:
+ goto done;
+ }
+done:
+ return (retval);
+}
+
+/*
+ * Match the device we are seeing with the devices supported.
+ */
+static struct umass_probe_proto
+umass_probe_proto(device_t dev, struct usb_attach_arg *uaa)
+{
+ struct umass_probe_proto ret;
+ uint32_t quirks = NO_QUIRKS;
+ uint32_t proto = umass_get_proto(uaa->iface);
+
+ memset(&ret, 0, sizeof(ret));
+
+ /* Search for protocol enforcement */
+
+ if (usb_test_quirk(uaa, UQ_MSC_FORCE_WIRE_BBB)) {
+ proto &= ~UMASS_PROTO_WIRE;
+ proto |= UMASS_PROTO_BBB;
+ } else if (usb_test_quirk(uaa, UQ_MSC_FORCE_WIRE_CBI)) {
+ proto &= ~UMASS_PROTO_WIRE;
+ proto |= UMASS_PROTO_CBI;
+ } else if (usb_test_quirk(uaa, UQ_MSC_FORCE_WIRE_CBI_I)) {
+ proto &= ~UMASS_PROTO_WIRE;
+ proto |= UMASS_PROTO_CBI_I;
+ }
+
+ if (usb_test_quirk(uaa, UQ_MSC_FORCE_PROTO_SCSI)) {
+ proto &= ~UMASS_PROTO_COMMAND;
+ proto |= UMASS_PROTO_SCSI;
+ } else if (usb_test_quirk(uaa, UQ_MSC_FORCE_PROTO_ATAPI)) {
+ proto &= ~UMASS_PROTO_COMMAND;
+ proto |= UMASS_PROTO_ATAPI;
+ } else if (usb_test_quirk(uaa, UQ_MSC_FORCE_PROTO_UFI)) {
+ proto &= ~UMASS_PROTO_COMMAND;
+ proto |= UMASS_PROTO_UFI;
+ } else if (usb_test_quirk(uaa, UQ_MSC_FORCE_PROTO_RBC)) {
+ proto &= ~UMASS_PROTO_COMMAND;
+ proto |= UMASS_PROTO_RBC;
+ }
+
+ /* Check if the protocol is invalid */
+
+ if ((proto & UMASS_PROTO_COMMAND) == 0) {
+ ret.error = ENXIO;
+ goto done;
+ }
+
+ if ((proto & UMASS_PROTO_WIRE) == 0) {
+ ret.error = ENXIO;
+ goto done;
+ }
+
+ /* Search for quirks */
+
+ if (usb_test_quirk(uaa, UQ_MSC_NO_TEST_UNIT_READY))
+ quirks |= NO_TEST_UNIT_READY;
+ if (usb_test_quirk(uaa, UQ_MSC_NO_RS_CLEAR_UA))
+ quirks |= RS_NO_CLEAR_UA;
+ if (usb_test_quirk(uaa, UQ_MSC_NO_START_STOP))
+ quirks |= NO_START_STOP;
+ if (usb_test_quirk(uaa, UQ_MSC_NO_GETMAXLUN))
+ quirks |= NO_GETMAXLUN;
+ if (usb_test_quirk(uaa, UQ_MSC_NO_INQUIRY))
+ quirks |= NO_INQUIRY;
+ if (usb_test_quirk(uaa, UQ_MSC_NO_INQUIRY_EVPD))
+ quirks |= NO_INQUIRY_EVPD;
+ if (usb_test_quirk(uaa, UQ_MSC_NO_SYNC_CACHE))
+ quirks |= NO_SYNCHRONIZE_CACHE;
+ if (usb_test_quirk(uaa, UQ_MSC_SHUTTLE_INIT))
+ quirks |= SHUTTLE_INIT;
+ if (usb_test_quirk(uaa, UQ_MSC_ALT_IFACE_1))
+ quirks |= ALT_IFACE_1;
+ if (usb_test_quirk(uaa, UQ_MSC_FLOPPY_SPEED))
+ quirks |= FLOPPY_SPEED;
+ if (usb_test_quirk(uaa, UQ_MSC_IGNORE_RESIDUE))
+ quirks |= IGNORE_RESIDUE;
+ if (usb_test_quirk(uaa, UQ_MSC_WRONG_CSWSIG))
+ quirks |= WRONG_CSWSIG;
+ if (usb_test_quirk(uaa, UQ_MSC_RBC_PAD_TO_12))
+ quirks |= RBC_PAD_TO_12;
+ if (usb_test_quirk(uaa, UQ_MSC_READ_CAP_OFFBY1))
+ quirks |= READ_CAPACITY_OFFBY1;
+ if (usb_test_quirk(uaa, UQ_MSC_FORCE_SHORT_INQ))
+ quirks |= FORCE_SHORT_INQUIRY;
+
+done:
+ ret.quirks = quirks;
+ ret.proto = proto;
+ return (ret);
+}
+
+static int
+umass_probe(device_t dev)
+{
+ struct usb_attach_arg *uaa = device_get_ivars(dev);
+ struct umass_probe_proto temp;
+
+ if (uaa->usb_mode != USB_MODE_HOST) {
+ return (ENXIO);
+ }
+ if (uaa->use_generic == 0) {
+ /* give other drivers a try first */
+ return (ENXIO);
+ }
+ temp = umass_probe_proto(dev, uaa);
+
+ return (temp.error);
+}
+
+static int
+umass_attach(device_t dev)
+{
+ struct umass_softc *sc = device_get_softc(dev);
+ struct usb_attach_arg *uaa = device_get_ivars(dev);
+ struct umass_probe_proto temp = umass_probe_proto(dev, uaa);
+ struct usb_interface_descriptor *id;
+ int32_t err;
+
+ /*
+ * NOTE: the softc struct is bzero-ed in device_set_driver.
+ * We can safely call umass_detach without specifically
+ * initializing the struct.
+ */
+
+ sc->sc_dev = dev;
+ sc->sc_udev = uaa->device;
+ sc->sc_proto = temp.proto;
+ sc->sc_quirks = temp.quirks;
+ sc->sc_unit = device_get_unit(dev);
+
+ snprintf(sc->sc_name, sizeof(sc->sc_name),
+ "%s", device_get_nameunit(dev));
+
+ device_set_usb_desc(dev);
+
+ mtx_init(&sc->sc_mtx, device_get_nameunit(dev),
+ NULL, MTX_DEF | MTX_RECURSE);
+
+ /* get interface index */
+
+ id = usbd_get_interface_descriptor(uaa->iface);
+ if (id == NULL) {
+ device_printf(dev, "failed to get "
+ "interface number\n");
+ goto detach;
+ }
+ sc->sc_iface_no = id->bInterfaceNumber;
+
+#ifdef USB_DEBUG
+ device_printf(dev, " ");
+
+ switch (sc->sc_proto & UMASS_PROTO_COMMAND) {
+ case UMASS_PROTO_SCSI:
+ printf("SCSI");
+ break;
+ case UMASS_PROTO_ATAPI:
+ printf("8070i (ATAPI)");
+ break;
+ case UMASS_PROTO_UFI:
+ printf("UFI");
+ break;
+ case UMASS_PROTO_RBC:
+ printf("RBC");
+ break;
+ default:
+ printf("(unknown 0x%02x)",
+ sc->sc_proto & UMASS_PROTO_COMMAND);
+ break;
+ }
+
+ printf(" over ");
+
+ switch (sc->sc_proto & UMASS_PROTO_WIRE) {
+ case UMASS_PROTO_BBB:
+ printf("Bulk-Only");
+ break;
+ case UMASS_PROTO_CBI: /* uses Comand/Bulk pipes */
+ printf("CBI");
+ break;
+ case UMASS_PROTO_CBI_I: /* uses Comand/Bulk/Interrupt pipes */
+ printf("CBI with CCI");
+ break;
+ default:
+ printf("(unknown 0x%02x)",
+ sc->sc_proto & UMASS_PROTO_WIRE);
+ }
+
+ printf("; quirks = 0x%04x\n", sc->sc_quirks);
+#endif
+
+ if (sc->sc_quirks & ALT_IFACE_1) {
+ err = usbd_set_alt_interface_index
+ (uaa->device, uaa->info.bIfaceIndex, 1);
+
+ if (err) {
+ DPRINTF(sc, UDMASS_USB, "could not switch to "
+ "Alt Interface 1\n");
+ goto detach;
+ }
+ }
+ /* allocate all required USB transfers */
+
+ if (sc->sc_proto & UMASS_PROTO_BBB) {
+
+ err = usbd_transfer_setup(uaa->device,
+ &uaa->info.bIfaceIndex, sc->sc_xfer, umass_bbb_config,
+ UMASS_T_BBB_MAX, sc, &sc->sc_mtx);
+
+ /* skip reset first time */
+ sc->sc_last_xfer_index = UMASS_T_BBB_COMMAND;
+
+ } else if (sc->sc_proto & (UMASS_PROTO_CBI | UMASS_PROTO_CBI_I)) {
+
+ err = usbd_transfer_setup(uaa->device,
+ &uaa->info.bIfaceIndex, sc->sc_xfer, umass_cbi_config,
+ UMASS_T_CBI_MAX, sc, &sc->sc_mtx);
+
+ /* skip reset first time */
+ sc->sc_last_xfer_index = UMASS_T_CBI_COMMAND;
+
+ } else {
+ err = USB_ERR_INVAL;
+ }
+
+ if (err) {
+ device_printf(dev, "could not setup required "
+ "transfers, %s\n", usbd_errstr(err));
+ goto detach;
+ }
+ sc->sc_transform =
+ (sc->sc_proto & UMASS_PROTO_SCSI) ? &umass_scsi_transform :
+ (sc->sc_proto & UMASS_PROTO_UFI) ? &umass_ufi_transform :
+ (sc->sc_proto & UMASS_PROTO_ATAPI) ? &umass_atapi_transform :
+ (sc->sc_proto & UMASS_PROTO_RBC) ? &umass_rbc_transform :
+ &umass_no_transform;
+
+ /* from here onwards the device can be used. */
+
+ if (sc->sc_quirks & SHUTTLE_INIT) {
+ umass_init_shuttle(sc);
+ }
+ /* get the maximum LUN supported by the device */
+
+ if (((sc->sc_proto & UMASS_PROTO_WIRE) == UMASS_PROTO_BBB) &&
+ !(sc->sc_quirks & NO_GETMAXLUN))
+ sc->sc_maxlun = umass_bbb_get_max_lun(sc);
+ else
+ sc->sc_maxlun = 0;
+
+ /* Prepare the SCSI command block */
+ sc->cam_scsi_sense.opcode = REQUEST_SENSE;
+ sc->cam_scsi_test_unit_ready.opcode = TEST_UNIT_READY;
+
+ /*
+ * some devices need a delay after that the configuration value is
+ * set to function properly:
+ */
+ usb_pause_mtx(NULL, hz);
+
+ /* register the SIM */
+ err = umass_cam_attach_sim(sc);
+ if (err) {
+ goto detach;
+ }
+#ifndef __rtems__
+ /* scan the SIM */
+ umass_cam_attach(sc);
+#endif /* __rtems__ */
+
+ DPRINTF(sc, UDMASS_GEN, "Attach finished\n");
+
+ return (0); /* success */
+
+detach:
+ umass_detach(dev);
+ return (ENXIO); /* failure */
+}
+
+static int
+umass_detach(device_t dev)
+{
+ struct umass_softc *sc = device_get_softc(dev);
+
+ DPRINTF(sc, UDMASS_USB, "\n");
+
+ /* teardown our statemachine */
+
+ usbd_transfer_unsetup(sc->sc_xfer, UMASS_T_MAX);
+
+#if (__FreeBSD_version >= 700037)
+ mtx_lock(&sc->sc_mtx);
+#endif
+ umass_cam_detach_sim(sc);
+
+#if (__FreeBSD_version >= 700037)
+ mtx_unlock(&sc->sc_mtx);
+#endif
+ mtx_destroy(&sc->sc_mtx);
+
+ return (0); /* success */
+}
+
+static void
+umass_init_shuttle(struct umass_softc *sc)
+{
+ struct usb_device_request req;
+ usb_error_t err;
+ uint8_t status[2] = {0, 0};
+
+ /*
+ * The Linux driver does this, but no one can tell us what the
+ * command does.
+ */
+ req.bmRequestType = UT_READ_VENDOR_DEVICE;
+ req.bRequest = 1; /* XXX unknown command */
+ USETW(req.wValue, 0);
+ req.wIndex[0] = sc->sc_iface_no;
+ req.wIndex[1] = 0;
+ USETW(req.wLength, sizeof(status));
+ err = usbd_do_request(sc->sc_udev, NULL, &req, &status);
+
+ DPRINTF(sc, UDMASS_GEN, "Shuttle init returned 0x%02x%02x\n",
+ status[0], status[1]);
+}
+
+/*
+ * Generic functions to handle transfers
+ */
+
+static void
+umass_transfer_start(struct umass_softc *sc, uint8_t xfer_index)
+{
+ DPRINTF(sc, UDMASS_GEN, "transfer index = "
+ "%d\n", xfer_index);
+
+ if (sc->sc_xfer[xfer_index]) {
+ sc->sc_last_xfer_index = xfer_index;
+ usbd_transfer_start(sc->sc_xfer[xfer_index]);
+ } else {
+ umass_cancel_ccb(sc);
+ }
+}
+
+static void
+umass_reset(struct umass_softc *sc)
+{
+ DPRINTF(sc, UDMASS_GEN, "resetting device\n");
+
+ /*
+ * stop the last transfer, if not already stopped:
+ */
+ usbd_transfer_stop(sc->sc_xfer[sc->sc_last_xfer_index]);
+ umass_transfer_start(sc, 0);
+}
+
+static void
+umass_cancel_ccb(struct umass_softc *sc)
+{
+ union ccb *ccb;
+
+ mtx_assert(&sc->sc_mtx, MA_OWNED);
+
+ ccb = sc->sc_transfer.ccb;
+ sc->sc_transfer.ccb = NULL;
+ sc->sc_last_xfer_index = 0;
+
+ if (ccb) {
+ (sc->sc_transfer.callback)
+ (sc, ccb, (sc->sc_transfer.data_len -
+ sc->sc_transfer.actlen), STATUS_WIRE_FAILED);
+ }
+}
+
+static void
+umass_tr_error(struct usb_xfer *xfer, usb_error_t error)
+{
+ struct umass_softc *sc = usbd_xfer_softc(xfer);
+
+ if (error != USB_ERR_CANCELLED) {
+
+ DPRINTF(sc, UDMASS_GEN, "transfer error, %s -> "
+ "reset\n", usbd_errstr(error));
+ }
+ umass_cancel_ccb(sc);
+}
+
+/*
+ * BBB protocol specific functions
+ */
+
+static void
+umass_t_bbb_reset1_callback(struct usb_xfer *xfer, usb_error_t error)
+{
+ struct umass_softc *sc = usbd_xfer_softc(xfer);
+ struct usb_device_request req;
+ struct usb_page_cache *pc;
+
+ switch (USB_GET_STATE(xfer)) {
+ case USB_ST_TRANSFERRED:
+ umass_transfer_start(sc, UMASS_T_BBB_RESET2);
+ return;
+
+ case USB_ST_SETUP:
+ /*
+ * Reset recovery (5.3.4 in Universal Serial Bus Mass Storage Class)
+ *
+ * For Reset Recovery the host shall issue in the following order:
+ * a) a Bulk-Only Mass Storage Reset
+ * b) a Clear Feature HALT to the Bulk-In endpoint
+ * c) a Clear Feature HALT to the Bulk-Out endpoint
+ *
+ * This is done in 3 steps, using 3 transfers:
+ * UMASS_T_BBB_RESET1
+ * UMASS_T_BBB_RESET2
+ * UMASS_T_BBB_RESET3
+ */
+
+ DPRINTF(sc, UDMASS_BBB, "BBB reset!\n");
+
+ req.bmRequestType = UT_WRITE_CLASS_INTERFACE;
+ req.bRequest = UR_BBB_RESET; /* bulk only reset */
+ USETW(req.wValue, 0);
+ req.wIndex[0] = sc->sc_iface_no;
+ req.wIndex[1] = 0;
+ USETW(req.wLength, 0);
+
+ pc = usbd_xfer_get_frame(xfer, 0);
+ usbd_copy_in(pc, 0, &req, sizeof(req));
+
+ usbd_xfer_set_frame_len(xfer, 0, sizeof(req));
+ usbd_xfer_set_frames(xfer, 1);
+ usbd_transfer_submit(xfer);
+ return;
+
+ default: /* Error */
+ umass_tr_error(xfer, error);
+ return;
+
+ }
+}
+
+static void
+umass_t_bbb_reset2_callback(struct usb_xfer *xfer, usb_error_t error)
+{
+ umass_t_bbb_data_clear_stall_callback(xfer, UMASS_T_BBB_RESET3,
+ UMASS_T_BBB_DATA_READ, error);
+}
+
+static void
+umass_t_bbb_reset3_callback(struct usb_xfer *xfer, usb_error_t error)
+{
+ umass_t_bbb_data_clear_stall_callback(xfer, UMASS_T_BBB_COMMAND,
+ UMASS_T_BBB_DATA_WRITE, error);
+}
+
+static void
+umass_t_bbb_data_clear_stall_callback(struct usb_xfer *xfer,
+ uint8_t next_xfer, uint8_t stall_xfer, usb_error_t error)
+{
+ struct umass_softc *sc = usbd_xfer_softc(xfer);
+
+ switch (USB_GET_STATE(xfer)) {
+ case USB_ST_TRANSFERRED:
+tr_transferred:
+ umass_transfer_start(sc, next_xfer);
+ return;
+
+ case USB_ST_SETUP:
+ if (usbd_clear_stall_callback(xfer, sc->sc_xfer[stall_xfer])) {
+ goto tr_transferred;
+ }
+ return;
+
+ default: /* Error */
+ umass_tr_error(xfer, error);
+ return;
+
+ }
+}
+
+static void
+umass_t_bbb_command_callback(struct usb_xfer *xfer, usb_error_t error)
+{
+ struct umass_softc *sc = usbd_xfer_softc(xfer);
+ union ccb *ccb = sc->sc_transfer.ccb;
+ struct usb_page_cache *pc;
+ uint32_t tag;
+
+ switch (USB_GET_STATE(xfer)) {
+ case USB_ST_TRANSFERRED:
+ umass_transfer_start
+ (sc, ((sc->sc_transfer.dir == DIR_IN) ? UMASS_T_BBB_DATA_READ :
+ (sc->sc_transfer.dir == DIR_OUT) ? UMASS_T_BBB_DATA_WRITE :
+ UMASS_T_BBB_STATUS));
+ return;
+
+ case USB_ST_SETUP:
+
+ sc->sc_status_try = 0;
+
+ if (ccb) {
+
+ /*
+ * the initial value is not important,
+ * as long as the values are unique:
+ */
+ tag = UGETDW(sc->cbw.dCBWTag) + 1;
+
+ USETDW(sc->cbw.dCBWSignature, CBWSIGNATURE);
+ USETDW(sc->cbw.dCBWTag, tag);
+
+ /*
+ * dCBWDataTransferLength:
+ * This field indicates the number of bytes of data that the host
+ * intends to transfer on the IN or OUT Bulk endpoint(as indicated by
+ * the Direction bit) during the execution of this command. If this
+ * field is set to 0, the device will expect that no data will be
+ * transferred IN or OUT during this command, regardless of the value
+ * of the Direction bit defined in dCBWFlags.
+ */
+ USETDW(sc->cbw.dCBWDataTransferLength, sc->sc_transfer.data_len);
+
+ /*
+ * dCBWFlags:
+ * The bits of the Flags field are defined as follows:
+ * Bits 0-6 reserved
+ * Bit 7 Direction - this bit shall be ignored if the
+ * dCBWDataTransferLength field is zero.
+ * 0 = data Out from host to device
+ * 1 = data In from device to host
+ */
+ sc->cbw.bCBWFlags = ((sc->sc_transfer.dir == DIR_IN) ?
+ CBWFLAGS_IN : CBWFLAGS_OUT);
+ sc->cbw.bCBWLUN = sc->sc_transfer.lun;
+
+ if (sc->sc_transfer.cmd_len > sizeof(sc->cbw.CBWCDB)) {
+ sc->sc_transfer.cmd_len = sizeof(sc->cbw.CBWCDB);
+ DPRINTF(sc, UDMASS_BBB, "Truncating long command!\n");
+ }
+ sc->cbw.bCDBLength = sc->sc_transfer.cmd_len;
+
+ bcopy(sc->sc_transfer.cmd_data, sc->cbw.CBWCDB,
+ sc->sc_transfer.cmd_len);
+
+ bzero(sc->sc_transfer.cmd_data + sc->sc_transfer.cmd_len,
+ sizeof(sc->cbw.CBWCDB) - sc->sc_transfer.cmd_len);
+
+ DIF(UDMASS_BBB, umass_bbb_dump_cbw(sc, &sc->cbw));
+
+ pc = usbd_xfer_get_frame(xfer, 0);
+ usbd_copy_in(pc, 0, &sc->cbw, sizeof(sc->cbw));
+ usbd_xfer_set_frame_len(xfer, 0, sizeof(sc->cbw));
+
+ usbd_transfer_submit(xfer);
+ }
+ return;
+
+ default: /* Error */
+ umass_tr_error(xfer, error);
+ return;
+
+ }
+}
+
+static void
+umass_t_bbb_data_read_callback(struct usb_xfer *xfer, usb_error_t error)
+{
+ struct umass_softc *sc = usbd_xfer_softc(xfer);
+ uint32_t max_bulk = usbd_xfer_max_len(xfer);
+#ifndef UMASS_EXT_BUFFER
+ struct usb_page_cache *pc;
+#endif
+ int actlen, sumlen;
+
+ usbd_xfer_status(xfer, &actlen, &sumlen, NULL, NULL);
+
+ switch (USB_GET_STATE(xfer)) {
+ case USB_ST_TRANSFERRED:
+#ifndef UMASS_EXT_BUFFER
+ pc = usbd_xfer_get_frame(xfer, 0);
+ usbd_copy_out(pc, 0, sc->sc_transfer.data_ptr, actlen);
+#endif
+ sc->sc_transfer.data_rem -= actlen;
+ sc->sc_transfer.data_ptr += actlen;
+ sc->sc_transfer.actlen += actlen;
+
+ if (actlen < sumlen) {
+ /* short transfer */
+ sc->sc_transfer.data_rem = 0;
+ }
+ case USB_ST_SETUP:
+ DPRINTF(sc, UDMASS_BBB, "max_bulk=%d, data_rem=%d\n",
+ max_bulk, sc->sc_transfer.data_rem);
+
+ if (sc->sc_transfer.data_rem == 0) {
+ umass_transfer_start(sc, UMASS_T_BBB_STATUS);
+ return;
+ }
+ if (max_bulk > sc->sc_transfer.data_rem) {
+ max_bulk = sc->sc_transfer.data_rem;
+ }
+ usbd_xfer_set_timeout(xfer, sc->sc_transfer.data_timeout);
+
+#ifdef UMASS_EXT_BUFFER
+ usbd_xfer_set_frame_data(xfer, 0, sc->sc_transfer.data_ptr,
+ max_bulk);
+#else
+ usbd_xfer_set_frame_len(xfer, 0, max_bulk);
+#endif
+ usbd_transfer_submit(xfer);
+ return;
+
+ default: /* Error */
+ if (error == USB_ERR_CANCELLED) {
+ umass_tr_error(xfer, error);
+ } else {
+ umass_transfer_start(sc, UMASS_T_BBB_DATA_RD_CS);
+ }
+ return;
+
+ }
+}
+
+static void
+umass_t_bbb_data_rd_cs_callback(struct usb_xfer *xfer, usb_error_t error)
+{
+ umass_t_bbb_data_clear_stall_callback(xfer, UMASS_T_BBB_STATUS,
+ UMASS_T_BBB_DATA_READ, error);
+}
+
+static void
+umass_t_bbb_data_write_callback(struct usb_xfer *xfer, usb_error_t error)
+{
+ struct umass_softc *sc = usbd_xfer_softc(xfer);
+ uint32_t max_bulk = usbd_xfer_max_len(xfer);
+#ifndef UMASS_EXT_BUFFER
+ struct usb_page_cache *pc;
+#endif
+ int actlen, sumlen;
+
+ usbd_xfer_status(xfer, &actlen, &sumlen, NULL, NULL);
+
+ switch (USB_GET_STATE(xfer)) {
+ case USB_ST_TRANSFERRED:
+ sc->sc_transfer.data_rem -= actlen;
+ sc->sc_transfer.data_ptr += actlen;
+ sc->sc_transfer.actlen += actlen;
+
+ if (actlen < sumlen) {
+ /* short transfer */
+ sc->sc_transfer.data_rem = 0;
+ }
+ case USB_ST_SETUP:
+ DPRINTF(sc, UDMASS_BBB, "max_bulk=%d, data_rem=%d\n",
+ max_bulk, sc->sc_transfer.data_rem);
+
+ if (sc->sc_transfer.data_rem == 0) {
+ umass_transfer_start(sc, UMASS_T_BBB_STATUS);
+ return;
+ }
+ if (max_bulk > sc->sc_transfer.data_rem) {
+ max_bulk = sc->sc_transfer.data_rem;
+ }
+ usbd_xfer_set_timeout(xfer, sc->sc_transfer.data_timeout);
+
+#ifdef UMASS_EXT_BUFFER
+ usbd_xfer_set_frame_data(xfer, 0, sc->sc_transfer.data_ptr,
+ max_bulk);
+#else
+ pc = usbd_xfer_get_frame(xfer, 0);
+ usbd_copy_in(pc, 0, sc->sc_transfer.data_ptr, max_bulk);
+ usbd_xfer_set_frame_len(xfer, 0, max_bulk);
+#endif
+
+ usbd_transfer_submit(xfer);
+ return;
+
+ default: /* Error */
+ if (error == USB_ERR_CANCELLED) {
+ umass_tr_error(xfer, error);
+ } else {
+ umass_transfer_start(sc, UMASS_T_BBB_DATA_WR_CS);
+ }
+ return;
+
+ }
+}
+
+static void
+umass_t_bbb_data_wr_cs_callback(struct usb_xfer *xfer, usb_error_t error)
+{
+ umass_t_bbb_data_clear_stall_callback(xfer, UMASS_T_BBB_STATUS,
+ UMASS_T_BBB_DATA_WRITE, error);
+}
+
+static void
+umass_t_bbb_status_callback(struct usb_xfer *xfer, usb_error_t error)
+{
+ struct umass_softc *sc = usbd_xfer_softc(xfer);
+ union ccb *ccb = sc->sc_transfer.ccb;
+ struct usb_page_cache *pc;
+ uint32_t residue;
+ int actlen;
+
+ usbd_xfer_status(xfer, &actlen, NULL, NULL, NULL);
+
+ switch (USB_GET_STATE(xfer)) {
+ case USB_ST_TRANSFERRED:
+
+ /*
+ * Do a full reset if there is something wrong with the CSW:
+ */
+ sc->sc_status_try = 1;
+
+ /* Zero missing parts of the CSW: */
+
+ if (actlen < sizeof(sc->csw)) {
+ bzero(&sc->csw, sizeof(sc->csw));
+ }
+ pc = usbd_xfer_get_frame(xfer, 0);
+ usbd_copy_out(pc, 0, &sc->csw, actlen);
+
+ DIF(UDMASS_BBB, umass_bbb_dump_csw(sc, &sc->csw));
+
+ residue = UGETDW(sc->csw.dCSWDataResidue);
+
+ if ((!residue) || (sc->sc_quirks & IGNORE_RESIDUE)) {
+ residue = (sc->sc_transfer.data_len -
+ sc->sc_transfer.actlen);
+ }
+ if (residue > sc->sc_transfer.data_len) {
+ DPRINTF(sc, UDMASS_BBB, "truncating residue from %d "
+ "to %d bytes\n", residue, sc->sc_transfer.data_len);
+ residue = sc->sc_transfer.data_len;
+ }
+ /* translate weird command-status signatures: */
+ if (sc->sc_quirks & WRONG_CSWSIG) {
+
+ uint32_t temp = UGETDW(sc->csw.dCSWSignature);
+
+ if ((temp == CSWSIGNATURE_OLYMPUS_C1) ||
+ (temp == CSWSIGNATURE_IMAGINATION_DBX1)) {
+ USETDW(sc->csw.dCSWSignature, CSWSIGNATURE);
+ }
+ }
+ /* check CSW and handle eventual error */
+ if (UGETDW(sc->csw.dCSWSignature) != CSWSIGNATURE) {
+ DPRINTF(sc, UDMASS_BBB, "bad CSW signature 0x%08x != 0x%08x\n",
+ UGETDW(sc->csw.dCSWSignature), CSWSIGNATURE);
+ /*
+ * Invalid CSW: Wrong signature or wrong tag might
+ * indicate that we lost synchronization. Reset the
+ * device.
+ */
+ goto tr_error;
+ } else if (UGETDW(sc->csw.dCSWTag) != UGETDW(sc->cbw.dCBWTag)) {
+ DPRINTF(sc, UDMASS_BBB, "Invalid CSW: tag 0x%08x should be "
+ "0x%08x\n", UGETDW(sc->csw.dCSWTag),
+ UGETDW(sc->cbw.dCBWTag));
+ goto tr_error;
+ } else if (sc->csw.bCSWStatus > CSWSTATUS_PHASE) {
+ DPRINTF(sc, UDMASS_BBB, "Invalid CSW: status %d > %d\n",
+ sc->csw.bCSWStatus, CSWSTATUS_PHASE);
+ goto tr_error;
+ } else if (sc->csw.bCSWStatus == CSWSTATUS_PHASE) {
+ DPRINTF(sc, UDMASS_BBB, "Phase error, residue = "
+ "%d\n", residue);
+ goto tr_error;
+ } else if (sc->sc_transfer.actlen > sc->sc_transfer.data_len) {
+ DPRINTF(sc, UDMASS_BBB, "Buffer overrun %d > %d\n",
+ sc->sc_transfer.actlen, sc->sc_transfer.data_len);
+ goto tr_error;
+ } else if (sc->csw.bCSWStatus == CSWSTATUS_FAILED) {
+ DPRINTF(sc, UDMASS_BBB, "Command failed, residue = "
+ "%d\n", residue);
+
+ sc->sc_transfer.ccb = NULL;
+
+ sc->sc_last_xfer_index = UMASS_T_BBB_COMMAND;
+
+ (sc->sc_transfer.callback)
+ (sc, ccb, residue, STATUS_CMD_FAILED);
+ } else {
+ sc->sc_transfer.ccb = NULL;
+
+ sc->sc_last_xfer_index = UMASS_T_BBB_COMMAND;
+
+ (sc->sc_transfer.callback)
+ (sc, ccb, residue, STATUS_CMD_OK);
+ }
+ return;
+
+ case USB_ST_SETUP:
+ usbd_xfer_set_frame_len(xfer, 0, usbd_xfer_max_len(xfer));
+ usbd_transfer_submit(xfer);
+ return;
+
+ default:
+tr_error:
+ DPRINTF(sc, UDMASS_BBB, "Failed to read CSW: %s, try %d\n",
+ usbd_errstr(error), sc->sc_status_try);
+
+ if ((error == USB_ERR_CANCELLED) ||
+ (sc->sc_status_try)) {
+ umass_tr_error(xfer, error);
+ } else {
+ sc->sc_status_try = 1;
+ umass_transfer_start(sc, UMASS_T_BBB_DATA_RD_CS);
+ }
+ return;
+
+ }
+}
+
+static void
+umass_command_start(struct umass_softc *sc, uint8_t dir,
+ void *data_ptr, uint32_t data_len,
+ uint32_t data_timeout, umass_callback_t *callback,
+ union ccb *ccb)
+{
+ sc->sc_transfer.lun = ccb->ccb_h.target_lun;
+
+ /*
+ * NOTE: assumes that "sc->sc_transfer.cmd_data" and
+ * "sc->sc_transfer.cmd_len" has been properly
+ * initialized.
+ */
+
+ sc->sc_transfer.dir = data_len ? dir : DIR_NONE;
+ sc->sc_transfer.data_ptr = data_ptr;
+ sc->sc_transfer.data_len = data_len;
+ sc->sc_transfer.data_rem = data_len;
+ sc->sc_transfer.data_timeout = (data_timeout + UMASS_TIMEOUT);
+
+ sc->sc_transfer.actlen = 0;
+ sc->sc_transfer.callback = callback;
+ sc->sc_transfer.ccb = ccb;
+
+ if (sc->sc_xfer[sc->sc_last_xfer_index]) {
+ usbd_transfer_start(sc->sc_xfer[sc->sc_last_xfer_index]);
+ } else {
+ ccb->ccb_h.status = CAM_TID_INVALID;
+ xpt_done(ccb);
+ }
+}
+
+static uint8_t
+umass_bbb_get_max_lun(struct umass_softc *sc)
+{
+ struct usb_device_request req;
+ usb_error_t err;
+ uint8_t buf = 0;
+
+ /* The Get Max Lun command is a class-specific request. */
+ req.bmRequestType = UT_READ_CLASS_INTERFACE;
+ req.bRequest = UR_BBB_GET_MAX_LUN;
+ USETW(req.wValue, 0);
+ req.wIndex[0] = sc->sc_iface_no;
+ req.wIndex[1] = 0;
+ USETW(req.wLength, 1);
+
+ err = usbd_do_request(sc->sc_udev, NULL, &req, &buf);
+ if (err) {
+ buf = 0;
+
+ /* Device doesn't support Get Max Lun request. */
+ printf("%s: Get Max Lun not supported (%s)\n",
+ sc->sc_name, usbd_errstr(err));
+ }
+ return (buf);
+}
+
+/*
+ * Command/Bulk/Interrupt (CBI) specific functions
+ */
+
+static void
+umass_cbi_start_status(struct umass_softc *sc)
+{
+ if (sc->sc_xfer[UMASS_T_CBI_STATUS]) {
+ umass_transfer_start(sc, UMASS_T_CBI_STATUS);
+ } else {
+ union ccb *ccb = sc->sc_transfer.ccb;
+
+ sc->sc_transfer.ccb = NULL;
+
+ sc->sc_last_xfer_index = UMASS_T_CBI_COMMAND;
+
+ (sc->sc_transfer.callback)
+ (sc, ccb, (sc->sc_transfer.data_len -
+ sc->sc_transfer.actlen), STATUS_CMD_UNKNOWN);
+ }
+}
+
+static void
+umass_t_cbi_reset1_callback(struct usb_xfer *xfer, usb_error_t error)
+{
+ struct umass_softc *sc = usbd_xfer_softc(xfer);
+ struct usb_device_request req;
+ struct usb_page_cache *pc;
+ uint8_t buf[UMASS_CBI_DIAGNOSTIC_CMDLEN];
+
+ uint8_t i;
+
+ switch (USB_GET_STATE(xfer)) {
+ case USB_ST_TRANSFERRED:
+ umass_transfer_start(sc, UMASS_T_CBI_RESET2);
+ break;
+
+ case USB_ST_SETUP:
+ /*
+ * Command Block Reset Protocol
+ *
+ * First send a reset request to the device. Then clear
+ * any possibly stalled bulk endpoints.
+ *
+ * This is done in 3 steps, using 3 transfers:
+ * UMASS_T_CBI_RESET1
+ * UMASS_T_CBI_RESET2
+ * UMASS_T_CBI_RESET3
+ * UMASS_T_CBI_RESET4 (only if there is an interrupt endpoint)
+ */
+
+ DPRINTF(sc, UDMASS_CBI, "CBI reset!\n");
+
+ req.bmRequestType = UT_WRITE_CLASS_INTERFACE;
+ req.bRequest = UR_CBI_ADSC;
+ USETW(req.wValue, 0);
+ req.wIndex[0] = sc->sc_iface_no;
+ req.wIndex[1] = 0;
+ USETW(req.wLength, UMASS_CBI_DIAGNOSTIC_CMDLEN);
+
+ /*
+ * The 0x1d code is the SEND DIAGNOSTIC command. To
+ * distinguish between the two, the last 10 bytes of the CBL
+ * is filled with 0xff (section 2.2 of the CBI
+ * specification)
+ */
+ buf[0] = 0x1d; /* Command Block Reset */
+ buf[1] = 0x04;
+
+ for (i = 2; i < UMASS_CBI_DIAGNOSTIC_CMDLEN; i++) {
+ buf[i] = 0xff;
+ }
+
+ pc = usbd_xfer_get_frame(xfer, 0);
+ usbd_copy_in(pc, 0, &req, sizeof(req));
+ pc = usbd_xfer_get_frame(xfer, 1);
+ usbd_copy_in(pc, 0, buf, sizeof(buf));
+
+ usbd_xfer_set_frame_len(xfer, 0, sizeof(req));
+ usbd_xfer_set_frame_len(xfer, 1, sizeof(buf));
+ usbd_xfer_set_frames(xfer, 2);
+ usbd_transfer_submit(xfer);
+ break;
+
+ default: /* Error */
+ if (error == USB_ERR_CANCELLED)
+ umass_tr_error(xfer, error);
+ else
+ umass_transfer_start(sc, UMASS_T_CBI_RESET2);
+ break;
+
+ }
+}
+
+static void
+umass_t_cbi_reset2_callback(struct usb_xfer *xfer, usb_error_t error)
+{
+ umass_t_cbi_data_clear_stall_callback(xfer, UMASS_T_CBI_RESET3,
+ UMASS_T_CBI_DATA_READ, error);
+}
+
+static void
+umass_t_cbi_reset3_callback(struct usb_xfer *xfer, usb_error_t error)
+{
+ struct umass_softc *sc = usbd_xfer_softc(xfer);
+
+ umass_t_cbi_data_clear_stall_callback
+ (xfer, (sc->sc_xfer[UMASS_T_CBI_RESET4] &&
+ sc->sc_xfer[UMASS_T_CBI_STATUS]) ?
+ UMASS_T_CBI_RESET4 : UMASS_T_CBI_COMMAND,
+ UMASS_T_CBI_DATA_WRITE, error);
+}
+
+static void
+umass_t_cbi_reset4_callback(struct usb_xfer *xfer, usb_error_t error)
+{
+ umass_t_cbi_data_clear_stall_callback(xfer, UMASS_T_CBI_COMMAND,
+ UMASS_T_CBI_STATUS, error);
+}
+
+static void
+umass_t_cbi_data_clear_stall_callback(struct usb_xfer *xfer,
+ uint8_t next_xfer, uint8_t stall_xfer, usb_error_t error)
+{
+ struct umass_softc *sc = usbd_xfer_softc(xfer);
+
+ switch (USB_GET_STATE(xfer)) {
+ case USB_ST_TRANSFERRED:
+tr_transferred:
+ if (next_xfer == UMASS_T_CBI_STATUS) {
+ umass_cbi_start_status(sc);
+ } else {
+ umass_transfer_start(sc, next_xfer);
+ }
+ break;
+
+ case USB_ST_SETUP:
+ if (usbd_clear_stall_callback(xfer, sc->sc_xfer[stall_xfer])) {
+ goto tr_transferred; /* should not happen */
+ }
+ break;
+
+ default: /* Error */
+ umass_tr_error(xfer, error);
+ break;
+
+ }
+}
+
+static void
+umass_t_cbi_command_callback(struct usb_xfer *xfer, usb_error_t error)
+{
+ struct umass_softc *sc = usbd_xfer_softc(xfer);
+ union ccb *ccb = sc->sc_transfer.ccb;
+ struct usb_device_request req;
+ struct usb_page_cache *pc;
+
+ switch (USB_GET_STATE(xfer)) {
+ case USB_ST_TRANSFERRED:
+
+ if (sc->sc_transfer.dir == DIR_NONE) {
+ umass_cbi_start_status(sc);
+ } else {
+ umass_transfer_start
+ (sc, (sc->sc_transfer.dir == DIR_IN) ?
+ UMASS_T_CBI_DATA_READ : UMASS_T_CBI_DATA_WRITE);
+ }
+ break;
+
+ case USB_ST_SETUP:
+
+ if (ccb) {
+
+ /*
+ * do a CBI transfer with cmd_len bytes from
+ * cmd_data, possibly a data phase of data_len
+ * bytes from/to the device and finally a status
+ * read phase.
+ */
+
+ req.bmRequestType = UT_WRITE_CLASS_INTERFACE;
+ req.bRequest = UR_CBI_ADSC;
+ USETW(req.wValue, 0);
+ req.wIndex[0] = sc->sc_iface_no;
+ req.wIndex[1] = 0;
+ req.wLength[0] = sc->sc_transfer.cmd_len;
+ req.wLength[1] = 0;
+
+ pc = usbd_xfer_get_frame(xfer, 0);
+ usbd_copy_in(pc, 0, &req, sizeof(req));
+ pc = usbd_xfer_get_frame(xfer, 1);
+ usbd_copy_in(pc, 0, sc->sc_transfer.cmd_data,
+ sc->sc_transfer.cmd_len);
+
+ usbd_xfer_set_frame_len(xfer, 0, sizeof(req));
+ usbd_xfer_set_frame_len(xfer, 1, sc->sc_transfer.cmd_len);
+ usbd_xfer_set_frames(xfer,
+ sc->sc_transfer.cmd_len ? 2 : 1);
+
+ DIF(UDMASS_CBI,
+ umass_cbi_dump_cmd(sc,
+ sc->sc_transfer.cmd_data,
+ sc->sc_transfer.cmd_len));
+
+ usbd_transfer_submit(xfer);
+ }
+ break;
+
+ default: /* Error */
+ umass_tr_error(xfer, error);
+ /* skip reset */
+ sc->sc_last_xfer_index = UMASS_T_CBI_COMMAND;
+ break;
+ }
+}
+
+static void
+umass_t_cbi_data_read_callback(struct usb_xfer *xfer, usb_error_t error)
+{
+ struct umass_softc *sc = usbd_xfer_softc(xfer);
+ uint32_t max_bulk = usbd_xfer_max_len(xfer);
+#ifndef UMASS_EXT_BUFFER
+ struct usb_page_cache *pc;
+#endif
+ int actlen, sumlen;
+
+ usbd_xfer_status(xfer, &actlen, &sumlen, NULL, NULL);
+
+ switch (USB_GET_STATE(xfer)) {
+ case USB_ST_TRANSFERRED:
+#ifndef UMASS_EXT_BUFFER
+ pc = usbd_xfer_get_frame(xfer, 0);
+ usbd_copy_out(pc, 0, sc->sc_transfer.data_ptr, actlen);
+#endif
+ sc->sc_transfer.data_rem -= actlen;
+ sc->sc_transfer.data_ptr += actlen;
+ sc->sc_transfer.actlen += actlen;
+
+ if (actlen < sumlen) {
+ /* short transfer */
+ sc->sc_transfer.data_rem = 0;
+ }
+ case USB_ST_SETUP:
+ DPRINTF(sc, UDMASS_CBI, "max_bulk=%d, data_rem=%d\n",
+ max_bulk, sc->sc_transfer.data_rem);
+
+ if (sc->sc_transfer.data_rem == 0) {
+ umass_cbi_start_status(sc);
+ break;
+ }
+ if (max_bulk > sc->sc_transfer.data_rem) {
+ max_bulk = sc->sc_transfer.data_rem;
+ }
+ usbd_xfer_set_timeout(xfer, sc->sc_transfer.data_timeout);
+
+#ifdef UMASS_EXT_BUFFER
+ usbd_xfer_set_frame_data(xfer, 0, sc->sc_transfer.data_ptr,
+ max_bulk);
+#else
+ usbd_xfer_set_frame_len(xfer, 0, max_bulk);
+#endif
+ usbd_transfer_submit(xfer);
+ break;
+
+ default: /* Error */
+ if ((error == USB_ERR_CANCELLED) ||
+ (sc->sc_transfer.callback != &umass_cam_cb)) {
+ umass_tr_error(xfer, error);
+ } else {
+ umass_transfer_start(sc, UMASS_T_CBI_DATA_RD_CS);
+ }
+ break;
+
+ }
+}
+
+static void
+umass_t_cbi_data_rd_cs_callback(struct usb_xfer *xfer, usb_error_t error)
+{
+ umass_t_cbi_data_clear_stall_callback(xfer, UMASS_T_CBI_STATUS,
+ UMASS_T_CBI_DATA_READ, error);
+}
+
+static void
+umass_t_cbi_data_write_callback(struct usb_xfer *xfer, usb_error_t error)
+{
+ struct umass_softc *sc = usbd_xfer_softc(xfer);
+ uint32_t max_bulk = usbd_xfer_max_len(xfer);
+#ifndef UMASS_EXT_BUFFER
+ struct usb_page_cache *pc;
+#endif
+ int actlen, sumlen;
+
+ usbd_xfer_status(xfer, &actlen, &sumlen, NULL, NULL);
+
+ switch (USB_GET_STATE(xfer)) {
+ case USB_ST_TRANSFERRED:
+ sc->sc_transfer.data_rem -= actlen;
+ sc->sc_transfer.data_ptr += actlen;
+ sc->sc_transfer.actlen += actlen;
+
+ if (actlen < sumlen) {
+ /* short transfer */
+ sc->sc_transfer.data_rem = 0;
+ }
+ case USB_ST_SETUP:
+ DPRINTF(sc, UDMASS_CBI, "max_bulk=%d, data_rem=%d\n",
+ max_bulk, sc->sc_transfer.data_rem);
+
+ if (sc->sc_transfer.data_rem == 0) {
+ umass_cbi_start_status(sc);
+ break;
+ }
+ if (max_bulk > sc->sc_transfer.data_rem) {
+ max_bulk = sc->sc_transfer.data_rem;
+ }
+ usbd_xfer_set_timeout(xfer, sc->sc_transfer.data_timeout);
+
+#ifdef UMASS_EXT_BUFFER
+ usbd_xfer_set_frame_data(xfer, 0, sc->sc_transfer.data_ptr,
+ max_bulk);
+#else
+ pc = usbd_xfer_get_frame(xfer, 0);
+ usbd_copy_in(pc, 0, sc->sc_transfer.data_ptr, max_bulk);
+ usbd_xfer_set_frame_len(xfer, 0, max_bulk);
+#endif
+
+ usbd_transfer_submit(xfer);
+ break;
+
+ default: /* Error */
+ if ((error == USB_ERR_CANCELLED) ||
+ (sc->sc_transfer.callback != &umass_cam_cb)) {
+ umass_tr_error(xfer, error);
+ } else {
+ umass_transfer_start(sc, UMASS_T_CBI_DATA_WR_CS);
+ }
+ break;
+
+ }
+}
+
+static void
+umass_t_cbi_data_wr_cs_callback(struct usb_xfer *xfer, usb_error_t error)
+{
+ umass_t_cbi_data_clear_stall_callback(xfer, UMASS_T_CBI_STATUS,
+ UMASS_T_CBI_DATA_WRITE, error);
+}
+
+static void
+umass_t_cbi_status_callback(struct usb_xfer *xfer, usb_error_t error)
+{
+ struct umass_softc *sc = usbd_xfer_softc(xfer);
+ union ccb *ccb = sc->sc_transfer.ccb;
+ struct usb_page_cache *pc;
+ uint32_t residue;
+ uint8_t status;
+ int actlen;
+
+ usbd_xfer_status(xfer, &actlen, NULL, NULL, NULL);
+
+ switch (USB_GET_STATE(xfer)) {
+ case USB_ST_TRANSFERRED:
+
+ if (actlen < sizeof(sc->sbl)) {
+ goto tr_setup;
+ }
+ pc = usbd_xfer_get_frame(xfer, 0);
+ usbd_copy_out(pc, 0, &sc->sbl, sizeof(sc->sbl));
+
+ residue = (sc->sc_transfer.data_len -
+ sc->sc_transfer.actlen);
+
+ /* dissect the information in the buffer */
+
+ if (sc->sc_proto & UMASS_PROTO_UFI) {
+
+ /*
+ * Section 3.4.3.1.3 specifies that the UFI command
+ * protocol returns an ASC and ASCQ in the interrupt
+ * data block.
+ */
+
+ DPRINTF(sc, UDMASS_CBI, "UFI CCI, ASC = 0x%02x, "
+ "ASCQ = 0x%02x\n", sc->sbl.ufi.asc,
+ sc->sbl.ufi.ascq);
+
+ status = (((sc->sbl.ufi.asc == 0) &&
+ (sc->sbl.ufi.ascq == 0)) ?
+ STATUS_CMD_OK : STATUS_CMD_FAILED);
+
+ sc->sc_transfer.ccb = NULL;
+
+ sc->sc_last_xfer_index = UMASS_T_CBI_COMMAND;
+
+ (sc->sc_transfer.callback)
+ (sc, ccb, residue, status);
+
+ break;
+
+ } else {
+
+ /* Command Interrupt Data Block */
+
+ DPRINTF(sc, UDMASS_CBI, "type=0x%02x, value=0x%02x\n",
+ sc->sbl.common.type, sc->sbl.common.value);
+
+ if (sc->sbl.common.type == IDB_TYPE_CCI) {
+
+ status = (sc->sbl.common.value & IDB_VALUE_STATUS_MASK);
+
+ status = ((status == IDB_VALUE_PASS) ? STATUS_CMD_OK :
+ (status == IDB_VALUE_FAIL) ? STATUS_CMD_FAILED :
+ (status == IDB_VALUE_PERSISTENT) ? STATUS_CMD_FAILED :
+ STATUS_WIRE_FAILED);
+
+ sc->sc_transfer.ccb = NULL;
+
+ sc->sc_last_xfer_index = UMASS_T_CBI_COMMAND;
+
+ (sc->sc_transfer.callback)
+ (sc, ccb, residue, status);
+
+ break;
+ }
+ }
+
+ /* fallthrough */
+
+ case USB_ST_SETUP:
+tr_setup:
+ usbd_xfer_set_frame_len(xfer, 0, usbd_xfer_max_len(xfer));
+ usbd_transfer_submit(xfer);
+ break;
+
+ default: /* Error */
+ DPRINTF(sc, UDMASS_CBI, "Failed to read CSW: %s\n",
+ usbd_errstr(error));
+ umass_tr_error(xfer, error);
+ break;
+
+ }
+}
+
+/*
+ * CAM specific functions (used by SCSI, UFI, 8070i (ATAPI))
+ */
+
+static int
+umass_cam_attach_sim(struct umass_softc *sc)
+{
+ struct cam_devq *devq; /* Per device Queue */
+
+ /*
+ * A HBA is attached to the CAM layer.
+ *
+ * The CAM layer will then after a while start probing for devices on
+ * the bus. The number of SIMs is limited to one.
+ */
+
+ devq = cam_simq_alloc(1 /* maximum openings */ );
+ if (devq == NULL) {
+ return (ENOMEM);
+ }
+ sc->sc_sim = cam_sim_alloc
+ (&umass_cam_action, &umass_cam_poll,
+ DEVNAME_SIM,
+ sc /* priv */ ,
+ sc->sc_unit /* unit number */ ,
+#if (__FreeBSD_version >= 700037)
+ &sc->sc_mtx /* mutex */ ,
+#endif
+ 1 /* maximum device openings */ ,
+ 0 /* maximum tagged device openings */ ,
+ devq);
+
+ if (sc->sc_sim == NULL) {
+ cam_simq_free(devq);
+ return (ENOMEM);
+ }
+
+#if (__FreeBSD_version >= 700037)
+ mtx_lock(&sc->sc_mtx);
+#endif
+
+#if (__FreeBSD_version >= 700048)
+ if (xpt_bus_register(sc->sc_sim, sc->sc_dev, sc->sc_unit) != CAM_SUCCESS) {
+ mtx_unlock(&sc->sc_mtx);
+ return (ENOMEM);
+ }
+#else
+ if (xpt_bus_register(sc->sc_sim, sc->sc_unit) != CAM_SUCCESS) {
+#if (__FreeBSD_version >= 700037)
+ mtx_unlock(&sc->sc_mtx);
+#endif
+ return (ENOMEM);
+ }
+#endif
+
+#if (__FreeBSD_version >= 700037)
+ mtx_unlock(&sc->sc_mtx);
+#endif
+ return (0);
+}
+
+#ifndef __rtems__
+static void
+umass_cam_attach(struct umass_softc *sc)
+{
+#ifndef USB_DEBUG
+ if (bootverbose)
+#endif
+ printf("%s:%d:%d:%d: Attached to scbus%d\n",
+ sc->sc_name, cam_sim_path(sc->sc_sim),
+ sc->sc_unit, CAM_LUN_WILDCARD,
+ cam_sim_path(sc->sc_sim));
+}
+#endif /* __rtems__ */
+
+/* umass_cam_detach
+ * detach from the CAM layer
+ */
+
+static void
+umass_cam_detach_sim(struct umass_softc *sc)
+{
+ if (sc->sc_sim != NULL) {
+ if (xpt_bus_deregister(cam_sim_path(sc->sc_sim))) {
+ /* accessing the softc is not possible after this */
+ sc->sc_sim->softc = UMASS_GONE;
+ cam_sim_free(sc->sc_sim, /* free_devq */ TRUE);
+ } else {
+ panic("%s: CAM layer is busy\n",
+ sc->sc_name);
+ }
+ sc->sc_sim = NULL;
+ }
+}
+
+/* umass_cam_action
+ * CAM requests for action come through here
+ */
+
+static void
+umass_cam_action(struct cam_sim *sim, union ccb *ccb)
+{
+ struct umass_softc *sc = (struct umass_softc *)sim->softc;
+
+ if (sc == UMASS_GONE ||
+ (sc != NULL && !usbd_device_attached(sc->sc_udev))) {
+ ccb->ccb_h.status = CAM_SEL_TIMEOUT;
+ xpt_done(ccb);
+ return;
+ }
+ if (sc) {
+#if (__FreeBSD_version < 700037)
+ mtx_lock(&sc->sc_mtx);
+#endif
+ }
+ /*
+ * Verify, depending on the operation to perform, that we either got
+ * a valid sc, because an existing target was referenced, or
+ * otherwise the SIM is addressed.
+ *
+ * This avoids bombing out at a printf and does give the CAM layer some
+ * sensible feedback on errors.
+ */
+ switch (ccb->ccb_h.func_code) {
+ case XPT_SCSI_IO:
+ case XPT_RESET_DEV:
+ case XPT_GET_TRAN_SETTINGS:
+ case XPT_SET_TRAN_SETTINGS:
+ case XPT_CALC_GEOMETRY:
+ /* the opcodes requiring a target. These should never occur. */
+ if (sc == NULL) {
+ DPRINTF(sc, UDMASS_GEN, "%s:%d:%d:%d:func_code 0x%04x: "
+ "Invalid target (target needed)\n",
+ DEVNAME_SIM, cam_sim_path(sc->sc_sim),
+ ccb->ccb_h.target_id, ccb->ccb_h.target_lun,
+ ccb->ccb_h.func_code);
+
+ ccb->ccb_h.status = CAM_TID_INVALID;
+ xpt_done(ccb);
+ goto done;
+ }
+ break;
+ case XPT_PATH_INQ:
+ case XPT_NOOP:
+ /*
+ * The opcodes sometimes aimed at a target (sc is valid),
+ * sometimes aimed at the SIM (sc is invalid and target is
+ * CAM_TARGET_WILDCARD)
+ */
+ if ((sc == NULL) &&
+ (ccb->ccb_h.target_id != CAM_TARGET_WILDCARD)) {
+ DPRINTF(sc, UDMASS_SCSI, "%s:%d:%d:%d:func_code 0x%04x: "
+ "Invalid target (no wildcard)\n",
+ DEVNAME_SIM, cam_sim_path(sc->sc_sim),
+ ccb->ccb_h.target_id, ccb->ccb_h.target_lun,
+ ccb->ccb_h.func_code);
+
+ ccb->ccb_h.status = CAM_TID_INVALID;
+ xpt_done(ccb);
+ goto done;
+ }
+ break;
+ default:
+ /* XXX Hm, we should check the input parameters */
+ break;
+ }
+
+ /* Perform the requested action */
+ switch (ccb->ccb_h.func_code) {
+ case XPT_SCSI_IO:
+ {
+ uint8_t *cmd;
+ uint8_t dir;
+
+ if (ccb->csio.ccb_h.flags & CAM_CDB_POINTER) {
+ cmd = (uint8_t *)(ccb->csio.cdb_io.cdb_ptr);
+ } else {
+ cmd = (uint8_t *)(ccb->csio.cdb_io.cdb_bytes);
+ }
+
+ DPRINTF(sc, UDMASS_SCSI, "%d:%d:%d:XPT_SCSI_IO: "
+ "cmd: 0x%02x, flags: 0x%02x, "
+ "%db cmd/%db data/%db sense\n",
+ cam_sim_path(sc->sc_sim), ccb->ccb_h.target_id,
+ ccb->ccb_h.target_lun, cmd[0],
+ ccb->ccb_h.flags & CAM_DIR_MASK, ccb->csio.cdb_len,
+ ccb->csio.dxfer_len, ccb->csio.sense_len);
+
+ if (sc->sc_transfer.ccb) {
+ DPRINTF(sc, UDMASS_SCSI, "%d:%d:%d:XPT_SCSI_IO: "
+ "I/O in progress, deferring\n",
+ cam_sim_path(sc->sc_sim), ccb->ccb_h.target_id,
+ ccb->ccb_h.target_lun);
+ ccb->ccb_h.status = CAM_SCSI_BUSY;
+ xpt_done(ccb);
+ goto done;
+ }
+ switch (ccb->ccb_h.flags & CAM_DIR_MASK) {
+ case CAM_DIR_IN:
+ dir = DIR_IN;
+ break;
+ case CAM_DIR_OUT:
+ dir = DIR_OUT;
+ DIF(UDMASS_SCSI,
+ umass_dump_buffer(sc, ccb->csio.data_ptr,
+ ccb->csio.dxfer_len, 48));
+ break;
+ default:
+ dir = DIR_NONE;
+ }
+
+ ccb->ccb_h.status = CAM_REQ_INPROG | CAM_SIM_QUEUED;
+
+ /*
+ * sc->sc_transform will convert the command to the
+ * command format needed by the specific command set
+ * and return the converted command in
+ * "sc->sc_transfer.cmd_data"
+ */
+ if (umass_std_transform(sc, ccb, cmd, ccb->csio.cdb_len)) {
+
+ if (sc->sc_transfer.cmd_data[0] == INQUIRY) {
+ const char *pserial;
+
+ pserial = usb_get_serial(sc->sc_udev);
+
+ /*
+ * Umass devices don't generally report their serial numbers
+ * in the usual SCSI way. Emulate it here.
+ */
+ if ((sc->sc_transfer.cmd_data[1] & SI_EVPD) &&
+ (sc->sc_transfer.cmd_data[2] == SVPD_UNIT_SERIAL_NUMBER) &&
+ (pserial[0] != '\0')) {
+ struct scsi_vpd_unit_serial_number *vpd_serial;
+
+ vpd_serial = (struct scsi_vpd_unit_serial_number *)ccb->csio.data_ptr;
+ vpd_serial->length = strlen(pserial);
+ if (vpd_serial->length > sizeof(vpd_serial->serial_num))
+ vpd_serial->length = sizeof(vpd_serial->serial_num);
+ memcpy(vpd_serial->serial_num, pserial, vpd_serial->length);
+ ccb->csio.scsi_status = SCSI_STATUS_OK;
+ ccb->ccb_h.status = CAM_REQ_CMP;
+ xpt_done(ccb);
+ goto done;
+ }
+
+ /*
+ * Handle EVPD inquiry for broken devices first
+ * NO_INQUIRY also implies NO_INQUIRY_EVPD
+ */
+ if ((sc->sc_quirks & (NO_INQUIRY_EVPD | NO_INQUIRY)) &&
+ (sc->sc_transfer.cmd_data[1] & SI_EVPD)) {
+ struct scsi_sense_data *sense;
+
+ sense = &ccb->csio.sense_data;
+ bzero(sense, sizeof(*sense));
+ sense->error_code = SSD_CURRENT_ERROR;
+ sense->flags = SSD_KEY_ILLEGAL_REQUEST;
+ sense->add_sense_code = 0x24;
+ sense->extra_len = 10;
+ ccb->csio.scsi_status = SCSI_STATUS_CHECK_COND;
+ ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR |
+ CAM_AUTOSNS_VALID;
+ xpt_done(ccb);
+ goto done;
+ }
+ /*
+ * Return fake inquiry data for
+ * broken devices
+ */
+ if (sc->sc_quirks & NO_INQUIRY) {
+ memcpy(ccb->csio.data_ptr, &fake_inq_data,
+ sizeof(fake_inq_data));
+ ccb->csio.scsi_status = SCSI_STATUS_OK;
+ ccb->ccb_h.status = CAM_REQ_CMP;
+ xpt_done(ccb);
+ goto done;
+ }
+ if (sc->sc_quirks & FORCE_SHORT_INQUIRY) {
+ ccb->csio.dxfer_len = SHORT_INQUIRY_LENGTH;
+ }
+ } else if (sc->sc_transfer.cmd_data[0] == SYNCHRONIZE_CACHE) {
+ if (sc->sc_quirks & NO_SYNCHRONIZE_CACHE) {
+ ccb->csio.scsi_status = SCSI_STATUS_OK;
+ ccb->ccb_h.status = CAM_REQ_CMP;
+ xpt_done(ccb);
+ goto done;
+ }
+ }
+ umass_command_start(sc, dir, ccb->csio.data_ptr,
+ ccb->csio.dxfer_len,
+ ccb->ccb_h.timeout,
+ &umass_cam_cb, ccb);
+ }
+ break;
+ }
+ case XPT_PATH_INQ:
+ {
+ struct ccb_pathinq *cpi = &ccb->cpi;
+
+ DPRINTF(sc, UDMASS_SCSI, "%d:%d:%d:XPT_PATH_INQ:.\n",
+ sc ? cam_sim_path(sc->sc_sim) : -1, ccb->ccb_h.target_id,
+ ccb->ccb_h.target_lun);
+
+ /* host specific information */
+ cpi->version_num = 1;
+ cpi->hba_inquiry = 0;
+ cpi->target_sprt = 0;
+ cpi->hba_misc = PIM_NO_6_BYTE;
+ cpi->hba_eng_cnt = 0;
+ cpi->max_target = UMASS_SCSIID_MAX; /* one target */
+ cpi->initiator_id = UMASS_SCSIID_HOST;
+ strlcpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
+ strlcpy(cpi->hba_vid, "USB SCSI", HBA_IDLEN);
+ strlcpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN);
+ cpi->unit_number = cam_sim_unit(sim);
+ cpi->bus_id = sc->sc_unit;
+#if (__FreeBSD_version >= 700025)
+ cpi->protocol = PROTO_SCSI;
+ cpi->protocol_version = SCSI_REV_2;
+ cpi->transport = XPORT_USB;
+ cpi->transport_version = 0;
+#endif
+ if (sc == NULL) {
+ cpi->base_transfer_speed = 0;
+ cpi->max_lun = 0;
+ } else {
+ if (sc->sc_quirks & FLOPPY_SPEED) {
+ cpi->base_transfer_speed =
+ UMASS_FLOPPY_TRANSFER_SPEED;
+ } else {
+ switch (usbd_get_speed(sc->sc_udev)) {
+ case USB_SPEED_SUPER:
+ cpi->base_transfer_speed =
+ UMASS_SUPER_TRANSFER_SPEED;
+ cpi->maxio = MAXPHYS;
+ break;
+ case USB_SPEED_HIGH:
+ cpi->base_transfer_speed =
+ UMASS_HIGH_TRANSFER_SPEED;
+ break;
+ default:
+ cpi->base_transfer_speed =
+ UMASS_FULL_TRANSFER_SPEED;
+ break;
+ }
+ }
+ cpi->max_lun = sc->sc_maxlun;
+ }
+
+ cpi->ccb_h.status = CAM_REQ_CMP;
+ xpt_done(ccb);
+ break;
+ }
+ case XPT_RESET_DEV:
+ {
+ DPRINTF(sc, UDMASS_SCSI, "%d:%d:%d:XPT_RESET_DEV:.\n",
+ cam_sim_path(sc->sc_sim), ccb->ccb_h.target_id,
+ ccb->ccb_h.target_lun);
+
+ umass_reset(sc);
+
+ ccb->ccb_h.status = CAM_REQ_CMP;
+ xpt_done(ccb);
+ break;
+ }
+ case XPT_GET_TRAN_SETTINGS:
+ {
+ struct ccb_trans_settings *cts = &ccb->cts;
+
+ DPRINTF(sc, UDMASS_SCSI, "%d:%d:%d:XPT_GET_TRAN_SETTINGS:.\n",
+ cam_sim_path(sc->sc_sim), ccb->ccb_h.target_id,
+ ccb->ccb_h.target_lun);
+
+#if (__FreeBSD_version >= 700025)
+ cts->protocol = PROTO_SCSI;
+ cts->protocol_version = SCSI_REV_2;
+ cts->transport = XPORT_USB;
+ cts->transport_version = 0;
+ cts->xport_specific.valid = 0;
+#else
+ cts->valid = 0;
+ cts->flags = 0; /* no disconnection, tagging */
+#endif
+ ccb->ccb_h.status = CAM_REQ_CMP;
+ xpt_done(ccb);
+ break;
+ }
+ case XPT_SET_TRAN_SETTINGS:
+ {
+ DPRINTF(sc, UDMASS_SCSI, "%d:%d:%d:XPT_SET_TRAN_SETTINGS:.\n",
+ cam_sim_path(sc->sc_sim), ccb->ccb_h.target_id,
+ ccb->ccb_h.target_lun);
+
+ ccb->ccb_h.status = CAM_FUNC_NOTAVAIL;
+ xpt_done(ccb);
+ break;
+ }
+#ifndef __rtems__
+ case XPT_CALC_GEOMETRY:
+ {
+ cam_calc_geometry(&ccb->ccg, /* extended */ 1);
+ xpt_done(ccb);
+ break;
+ }
+#endif /* __rtems__ */
+ case XPT_NOOP:
+ {
+ DPRINTF(sc, UDMASS_SCSI, "%d:%d:%d:XPT_NOOP:.\n",
+ sc ? cam_sim_path(sc->sc_sim) : -1, ccb->ccb_h.target_id,
+ ccb->ccb_h.target_lun);
+
+ ccb->ccb_h.status = CAM_REQ_CMP;
+ xpt_done(ccb);
+ break;
+ }
+ default:
+ DPRINTF(sc, UDMASS_SCSI, "%d:%d:%d:func_code 0x%04x: "
+ "Not implemented\n",
+ sc ? cam_sim_path(sc->sc_sim) : -1, ccb->ccb_h.target_id,
+ ccb->ccb_h.target_lun, ccb->ccb_h.func_code);
+
+ ccb->ccb_h.status = CAM_FUNC_NOTAVAIL;
+ xpt_done(ccb);
+ break;
+ }
+
+done:
+#if (__FreeBSD_version < 700037)
+ if (sc) {
+ mtx_unlock(&sc->sc_mtx);
+ }
+#endif
+ return;
+}
+
+static void
+umass_cam_poll(struct cam_sim *sim)
+{
+ struct umass_softc *sc = (struct umass_softc *)sim->softc;
+
+ if (sc == UMASS_GONE)
+ return;
+
+ DPRINTF(sc, UDMASS_SCSI, "CAM poll\n");
+
+ usbd_transfer_poll(sc->sc_xfer, UMASS_T_MAX);
+}
+
+
+/* umass_cam_cb
+ * finalise a completed CAM command
+ */
+
+static void
+umass_cam_cb(struct umass_softc *sc, union ccb *ccb, uint32_t residue,
+ uint8_t status)
+{
+ ccb->csio.resid = residue;
+
+ switch (status) {
+ case STATUS_CMD_OK:
+ ccb->ccb_h.status = CAM_REQ_CMP;
+ if ((sc->sc_quirks & READ_CAPACITY_OFFBY1) &&
+ (ccb->ccb_h.func_code == XPT_SCSI_IO) &&
+ (ccb->csio.cdb_io.cdb_bytes[0] == READ_CAPACITY)) {
+ struct scsi_read_capacity_data *rcap;
+ uint32_t maxsector;
+
+ rcap = (void *)(ccb->csio.data_ptr);
+ maxsector = scsi_4btoul(rcap->addr) - 1;
+ scsi_ulto4b(maxsector, rcap->addr);
+ }
+ /*
+ * We have to add SVPD_UNIT_SERIAL_NUMBER to the list
+ * of pages supported by the device - otherwise, CAM
+ * will never ask us for the serial number if the
+ * device cannot handle that by itself.
+ */
+ if (ccb->ccb_h.func_code == XPT_SCSI_IO &&
+ sc->sc_transfer.cmd_data[0] == INQUIRY &&
+ (sc->sc_transfer.cmd_data[1] & SI_EVPD) &&
+ sc->sc_transfer.cmd_data[2] == SVPD_SUPPORTED_PAGE_LIST &&
+ (usb_get_serial(sc->sc_udev)[0] != '\0')) {
+ struct ccb_scsiio *csio;
+ struct scsi_vpd_supported_page_list *page_list;
+
+ csio = &ccb->csio;
+ page_list = (struct scsi_vpd_supported_page_list *)csio->data_ptr;
+ if (page_list->length + 1 < SVPD_SUPPORTED_PAGES_SIZE) {
+ page_list->list[page_list->length] = SVPD_UNIT_SERIAL_NUMBER;
+ page_list->length++;
+ }
+ }
+ xpt_done(ccb);
+ break;
+
+ case STATUS_CMD_UNKNOWN:
+ case STATUS_CMD_FAILED:
+
+ /* fetch sense data */
+
+ /* the rest of the command was filled in at attach */
+ sc->cam_scsi_sense.length = ccb->csio.sense_len;
+
+ DPRINTF(sc, UDMASS_SCSI, "Fetching %d bytes of "
+ "sense data\n", ccb->csio.sense_len);
+
+ if (umass_std_transform(sc, ccb, &sc->cam_scsi_sense.opcode,
+ sizeof(sc->cam_scsi_sense))) {
+
+ if ((sc->sc_quirks & FORCE_SHORT_INQUIRY) &&
+ (sc->sc_transfer.cmd_data[0] == INQUIRY)) {
+ ccb->csio.sense_len = SHORT_INQUIRY_LENGTH;
+ }
+ umass_command_start(sc, DIR_IN, &ccb->csio.sense_data.error_code,
+ ccb->csio.sense_len, ccb->ccb_h.timeout,
+ &umass_cam_sense_cb, ccb);
+ }
+ break;
+
+ default:
+ /*
+ * The wire protocol failed and will hopefully have
+ * recovered. We return an error to CAM and let CAM
+ * retry the command if necessary. In case of SCSI IO
+ * commands we ask the CAM layer to check the
+ * condition first. This is a quick hack to make
+ * certain devices work.
+ */
+ if (ccb->ccb_h.func_code == XPT_SCSI_IO) {
+ ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR;
+ ccb->csio.scsi_status = SCSI_STATUS_CHECK_COND;
+ } else {
+ ccb->ccb_h.status = CAM_REQ_CMP_ERR;
+ }
+ xpt_done(ccb);
+ break;
+ }
+}
+
+/*
+ * Finalise a completed autosense operation
+ */
+static void
+umass_cam_sense_cb(struct umass_softc *sc, union ccb *ccb, uint32_t residue,
+ uint8_t status)
+{
+ uint8_t *cmd;
+ uint8_t key;
+
+ switch (status) {
+ case STATUS_CMD_OK:
+ case STATUS_CMD_UNKNOWN:
+ case STATUS_CMD_FAILED:
+
+ if (ccb->csio.ccb_h.flags & CAM_CDB_POINTER) {
+ cmd = (uint8_t *)(ccb->csio.cdb_io.cdb_ptr);
+ } else {
+ cmd = (uint8_t *)(ccb->csio.cdb_io.cdb_bytes);
+ }
+
+ key = (ccb->csio.sense_data.flags & SSD_KEY);
+
+ /*
+ * Getting sense data always succeeds (apart from wire
+ * failures):
+ */
+ if ((sc->sc_quirks & RS_NO_CLEAR_UA) &&
+ (cmd[0] == INQUIRY) &&
+ (key == SSD_KEY_UNIT_ATTENTION)) {
+ /*
+ * Ignore unit attention errors in the case where
+ * the Unit Attention state is not cleared on
+ * REQUEST SENSE. They will appear again at the next
+ * command.
+ */
+ ccb->ccb_h.status = CAM_REQ_CMP;
+ } else if (key == SSD_KEY_NO_SENSE) {
+ /*
+ * No problem after all (in the case of CBI without
+ * CCI)
+ */
+ ccb->ccb_h.status = CAM_REQ_CMP;
+ } else if ((sc->sc_quirks & RS_NO_CLEAR_UA) &&
+ (cmd[0] == READ_CAPACITY) &&
+ (key == SSD_KEY_UNIT_ATTENTION)) {
+ /*
+ * Some devices do not clear the unit attention error
+ * on request sense. We insert a test unit ready
+ * command to make sure we clear the unit attention
+ * condition, then allow the retry to proceed as
+ * usual.
+ */
+
+ ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR
+ | CAM_AUTOSNS_VALID;
+ ccb->csio.scsi_status = SCSI_STATUS_CHECK_COND;
+
+#if 0
+ DELAY(300000);
+#endif
+ DPRINTF(sc, UDMASS_SCSI, "Doing a sneaky"
+ "TEST_UNIT_READY\n");
+
+ /* the rest of the command was filled in at attach */
+
+ if (umass_std_transform(sc, ccb,
+ &sc->cam_scsi_test_unit_ready.opcode,
+ sizeof(sc->cam_scsi_test_unit_ready))) {
+ umass_command_start(sc, DIR_NONE, NULL, 0,
+ ccb->ccb_h.timeout,
+ &umass_cam_quirk_cb, ccb);
+ }
+ break;
+ } else {
+ ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR
+ | CAM_AUTOSNS_VALID;
+ ccb->csio.scsi_status = SCSI_STATUS_CHECK_COND;
+ }
+ xpt_done(ccb);
+ break;
+
+ default:
+ DPRINTF(sc, UDMASS_SCSI, "Autosense failed, "
+ "status %d\n", status);
+ ccb->ccb_h.status = CAM_AUTOSENSE_FAIL;
+ xpt_done(ccb);
+ }
+}
+
+/*
+ * This completion code just handles the fact that we sent a test-unit-ready
+ * after having previously failed a READ CAPACITY with CHECK_COND. Even
+ * though this command succeeded, we have to tell CAM to retry.
+ */
+static void
+umass_cam_quirk_cb(struct umass_softc *sc, union ccb *ccb, uint32_t residue,
+ uint8_t status)
+{
+ DPRINTF(sc, UDMASS_SCSI, "Test unit ready "
+ "returned status %d\n", status);
+
+ ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR
+ | CAM_AUTOSNS_VALID;
+ ccb->csio.scsi_status = SCSI_STATUS_CHECK_COND;
+ xpt_done(ccb);
+}
+
+/*
+ * SCSI specific functions
+ */
+
+static uint8_t
+umass_scsi_transform(struct umass_softc *sc, uint8_t *cmd_ptr,
+ uint8_t cmd_len)
+{
+ if ((cmd_len == 0) ||
+ (cmd_len > sizeof(sc->sc_transfer.cmd_data))) {
+ DPRINTF(sc, UDMASS_SCSI, "Invalid command "
+ "length: %d bytes\n", cmd_len);
+ return (0); /* failure */
+ }
+ sc->sc_transfer.cmd_len = cmd_len;
+
+ switch (cmd_ptr[0]) {
+ case TEST_UNIT_READY:
+ if (sc->sc_quirks & NO_TEST_UNIT_READY) {
+ DPRINTF(sc, UDMASS_SCSI, "Converted TEST_UNIT_READY "
+ "to START_UNIT\n");
+ bzero(sc->sc_transfer.cmd_data, cmd_len);
+ sc->sc_transfer.cmd_data[0] = START_STOP_UNIT;
+ sc->sc_transfer.cmd_data[4] = SSS_START;
+ return (1);
+ }
+ break;
+
+ case INQUIRY:
+ /*
+ * some drives wedge when asked for full inquiry
+ * information.
+ */
+ if (sc->sc_quirks & FORCE_SHORT_INQUIRY) {
+ bcopy(cmd_ptr, sc->sc_transfer.cmd_data, cmd_len);
+ sc->sc_transfer.cmd_data[4] = SHORT_INQUIRY_LENGTH;
+ return (1);
+ }
+ break;
+ }
+
+ bcopy(cmd_ptr, sc->sc_transfer.cmd_data, cmd_len);
+ return (1);
+}
+
+static uint8_t
+umass_rbc_transform(struct umass_softc *sc, uint8_t *cmd_ptr, uint8_t cmd_len)
+{
+ if ((cmd_len == 0) ||
+ (cmd_len > sizeof(sc->sc_transfer.cmd_data))) {
+ DPRINTF(sc, UDMASS_SCSI, "Invalid command "
+ "length: %d bytes\n", cmd_len);
+ return (0); /* failure */
+ }
+ switch (cmd_ptr[0]) {
+ /* these commands are defined in RBC: */
+ case READ_10:
+ case READ_CAPACITY:
+ case START_STOP_UNIT:
+ case SYNCHRONIZE_CACHE:
+ case WRITE_10:
+ case 0x2f: /* VERIFY_10 is absent from
+ * scsi_all.h??? */
+ case INQUIRY:
+ case MODE_SELECT_10:
+ case MODE_SENSE_10:
+ case TEST_UNIT_READY:
+ case WRITE_BUFFER:
+ /*
+ * The following commands are not listed in my copy of the
+ * RBC specs. CAM however seems to want those, and at least
+ * the Sony DSC device appears to support those as well
+ */
+ case REQUEST_SENSE:
+ case PREVENT_ALLOW:
+
+ bcopy(cmd_ptr, sc->sc_transfer.cmd_data, cmd_len);
+
+ if ((sc->sc_quirks & RBC_PAD_TO_12) && (cmd_len < 12)) {
+ bzero(sc->sc_transfer.cmd_data + cmd_len, 12 - cmd_len);
+ cmd_len = 12;
+ }
+ sc->sc_transfer.cmd_len = cmd_len;
+ return (1); /* sucess */
+
+ /* All other commands are not legal in RBC */
+ default:
+ DPRINTF(sc, UDMASS_SCSI, "Unsupported RBC "
+ "command 0x%02x\n", cmd_ptr[0]);
+ return (0); /* failure */
+ }
+}
+
+static uint8_t
+umass_ufi_transform(struct umass_softc *sc, uint8_t *cmd_ptr,
+ uint8_t cmd_len)
+{
+ if ((cmd_len == 0) ||
+ (cmd_len > sizeof(sc->sc_transfer.cmd_data))) {
+ DPRINTF(sc, UDMASS_SCSI, "Invalid command "
+ "length: %d bytes\n", cmd_len);
+ return (0); /* failure */
+ }
+ /* An UFI command is always 12 bytes in length */
+ sc->sc_transfer.cmd_len = UFI_COMMAND_LENGTH;
+
+ /* Zero the command data */
+ bzero(sc->sc_transfer.cmd_data, UFI_COMMAND_LENGTH);
+
+ switch (cmd_ptr[0]) {
+ /*
+ * Commands of which the format has been verified. They
+ * should work. Copy the command into the (zeroed out)
+ * destination buffer.
+ */
+ case TEST_UNIT_READY:
+ if (sc->sc_quirks & NO_TEST_UNIT_READY) {
+ /*
+ * Some devices do not support this command. Start
+ * Stop Unit should give the same results
+ */
+ DPRINTF(sc, UDMASS_UFI, "Converted TEST_UNIT_READY "
+ "to START_UNIT\n");
+
+ sc->sc_transfer.cmd_data[0] = START_STOP_UNIT;
+ sc->sc_transfer.cmd_data[4] = SSS_START;
+ return (1);
+ }
+ break;
+
+ case REZERO_UNIT:
+ case REQUEST_SENSE:
+ case FORMAT_UNIT:
+ case INQUIRY:
+ case START_STOP_UNIT:
+ case SEND_DIAGNOSTIC:
+ case PREVENT_ALLOW:
+ case READ_CAPACITY:
+ case READ_10:
+ case WRITE_10:
+ case POSITION_TO_ELEMENT: /* SEEK_10 */
+ case WRITE_AND_VERIFY:
+ case VERIFY:
+ case MODE_SELECT_10:
+ case MODE_SENSE_10:
+ case READ_12:
+ case WRITE_12:
+ case READ_FORMAT_CAPACITIES:
+ break;
+
+ /*
+ * SYNCHRONIZE_CACHE isn't supported by UFI, nor should it be
+ * required for UFI devices, so it is appropriate to fake
+ * success.
+ */
+ case SYNCHRONIZE_CACHE:
+ return (2);
+
+ default:
+ DPRINTF(sc, UDMASS_SCSI, "Unsupported UFI "
+ "command 0x%02x\n", cmd_ptr[0]);
+ return (0); /* failure */
+ }
+
+ bcopy(cmd_ptr, sc->sc_transfer.cmd_data, cmd_len);
+ return (1); /* success */
+}
+
+/*
+ * 8070i (ATAPI) specific functions
+ */
+static uint8_t
+umass_atapi_transform(struct umass_softc *sc, uint8_t *cmd_ptr,
+ uint8_t cmd_len)
+{
+ if ((cmd_len == 0) ||
+ (cmd_len > sizeof(sc->sc_transfer.cmd_data))) {
+ DPRINTF(sc, UDMASS_SCSI, "Invalid command "
+ "length: %d bytes\n", cmd_len);
+ return (0); /* failure */
+ }
+ /* An ATAPI command is always 12 bytes in length. */
+ sc->sc_transfer.cmd_len = ATAPI_COMMAND_LENGTH;
+
+ /* Zero the command data */
+ bzero(sc->sc_transfer.cmd_data, ATAPI_COMMAND_LENGTH);
+
+ switch (cmd_ptr[0]) {
+ /*
+ * Commands of which the format has been verified. They
+ * should work. Copy the command into the destination
+ * buffer.
+ */
+ case INQUIRY:
+ /*
+ * some drives wedge when asked for full inquiry
+ * information.
+ */
+ if (sc->sc_quirks & FORCE_SHORT_INQUIRY) {
+ bcopy(cmd_ptr, sc->sc_transfer.cmd_data, cmd_len);
+
+ sc->sc_transfer.cmd_data[4] = SHORT_INQUIRY_LENGTH;
+ return (1);
+ }
+ break;
+
+ case TEST_UNIT_READY:
+ if (sc->sc_quirks & NO_TEST_UNIT_READY) {
+ DPRINTF(sc, UDMASS_SCSI, "Converted TEST_UNIT_READY "
+ "to START_UNIT\n");
+ sc->sc_transfer.cmd_data[0] = START_STOP_UNIT;
+ sc->sc_transfer.cmd_data[4] = SSS_START;
+ return (1);
+ }
+ break;
+
+ case REZERO_UNIT:
+ case REQUEST_SENSE:
+ case START_STOP_UNIT:
+ case SEND_DIAGNOSTIC:
+ case PREVENT_ALLOW:
+ case READ_CAPACITY:
+ case READ_10:
+ case WRITE_10:
+ case POSITION_TO_ELEMENT: /* SEEK_10 */
+ case SYNCHRONIZE_CACHE:
+ case MODE_SELECT_10:
+ case MODE_SENSE_10:
+ case READ_BUFFER:
+ case 0x42: /* READ_SUBCHANNEL */
+ case 0x43: /* READ_TOC */
+ case 0x44: /* READ_HEADER */
+ case 0x47: /* PLAY_MSF (Play Minute/Second/Frame) */
+ case 0x48: /* PLAY_TRACK */
+ case 0x49: /* PLAY_TRACK_REL */
+ case 0x4b: /* PAUSE */
+ case 0x51: /* READ_DISK_INFO */
+ case 0x52: /* READ_TRACK_INFO */
+ case 0x54: /* SEND_OPC */
+ case 0x59: /* READ_MASTER_CUE */
+ case 0x5b: /* CLOSE_TR_SESSION */
+ case 0x5c: /* READ_BUFFER_CAP */
+ case 0x5d: /* SEND_CUE_SHEET */
+ case 0xa1: /* BLANK */
+ case 0xa5: /* PLAY_12 */
+ case 0xa6: /* EXCHANGE_MEDIUM */
+ case 0xad: /* READ_DVD_STRUCTURE */
+ case 0xbb: /* SET_CD_SPEED */
+ case 0xe5: /* READ_TRACK_INFO_PHILIPS */
+ break;
+
+ case READ_12:
+ case WRITE_12:
+ default:
+ DPRINTF(sc, UDMASS_SCSI, "Unsupported ATAPI "
+ "command 0x%02x - trying anyway\n",
+ cmd_ptr[0]);
+ break;
+ }
+
+ bcopy(cmd_ptr, sc->sc_transfer.cmd_data, cmd_len);
+ return (1); /* success */
+}
+
+static uint8_t
+umass_no_transform(struct umass_softc *sc, uint8_t *cmd,
+ uint8_t cmdlen)
+{
+ return (0); /* failure */
+}
+
+static uint8_t
+umass_std_transform(struct umass_softc *sc, union ccb *ccb,
+ uint8_t *cmd, uint8_t cmdlen)
+{
+ uint8_t retval;
+
+ retval = (sc->sc_transform) (sc, cmd, cmdlen);
+
+ if (retval == 2) {
+ ccb->ccb_h.status = CAM_REQ_CMP;
+ xpt_done(ccb);
+ return (0);
+ } else if (retval == 0) {
+ ccb->ccb_h.status = CAM_REQ_INVALID;
+ xpt_done(ccb);
+ return (0);
+ }
+ /* Command should be executed */
+ return (1);
+}
+
+#ifdef USB_DEBUG
+static void
+umass_bbb_dump_cbw(struct umass_softc *sc, umass_bbb_cbw_t *cbw)
+{
+ uint8_t *c = cbw->CBWCDB;
+
+ uint32_t dlen = UGETDW(cbw->dCBWDataTransferLength);
+ uint32_t tag = UGETDW(cbw->dCBWTag);
+
+ uint8_t clen = cbw->bCDBLength;
+ uint8_t flags = cbw->bCBWFlags;
+ uint8_t lun = cbw->bCBWLUN;
+
+ DPRINTF(sc, UDMASS_BBB, "CBW %d: cmd = %db "
+ "(0x%02x%02x%02x%02x%02x%02x%s), "
+ "data = %db, lun = %d, dir = %s\n",
+ tag, clen,
+ c[0], c[1], c[2], c[3], c[4], c[5], (clen > 6 ? "..." : ""),
+ dlen, lun, (flags == CBWFLAGS_IN ? "in" :
+ (flags == CBWFLAGS_OUT ? "out" : "<invalid>")));
+}
+
+static void
+umass_bbb_dump_csw(struct umass_softc *sc, umass_bbb_csw_t *csw)
+{
+ uint32_t sig = UGETDW(csw->dCSWSignature);
+ uint32_t tag = UGETDW(csw->dCSWTag);
+ uint32_t res = UGETDW(csw->dCSWDataResidue);
+ uint8_t status = csw->bCSWStatus;
+
+ DPRINTF(sc, UDMASS_BBB, "CSW %d: sig = 0x%08x (%s), tag = 0x%08x, "
+ "res = %d, status = 0x%02x (%s)\n",
+ tag, sig, (sig == CSWSIGNATURE ? "valid" : "invalid"),
+ tag, res,
+ status, (status == CSWSTATUS_GOOD ? "good" :
+ (status == CSWSTATUS_FAILED ? "failed" :
+ (status == CSWSTATUS_PHASE ? "phase" : "<invalid>"))));
+}
+
+static void
+umass_cbi_dump_cmd(struct umass_softc *sc, void *cmd, uint8_t cmdlen)
+{
+ uint8_t *c = cmd;
+ uint8_t dir = sc->sc_transfer.dir;
+
+ DPRINTF(sc, UDMASS_BBB, "cmd = %db "
+ "(0x%02x%02x%02x%02x%02x%02x%s), "
+ "data = %db, dir = %s\n",
+ cmdlen,
+ c[0], c[1], c[2], c[3], c[4], c[5], (cmdlen > 6 ? "..." : ""),
+ sc->sc_transfer.data_len,
+ (dir == DIR_IN ? "in" :
+ (dir == DIR_OUT ? "out" :
+ (dir == DIR_NONE ? "no data phase" : "<invalid>"))));
+}
+
+static void
+umass_dump_buffer(struct umass_softc *sc, uint8_t *buffer, uint32_t buflen,
+ uint32_t printlen)
+{
+ uint32_t i, j;
+ char s1[40];
+ char s2[40];
+ char s3[5];
+
+ s1[0] = '\0';
+ s3[0] = '\0';
+
+ sprintf(s2, " buffer=%p, buflen=%d", buffer, buflen);
+ for (i = 0; (i < buflen) && (i < printlen); i++) {
+ j = i % 16;
+ if (j == 0 && i != 0) {
+ DPRINTF(sc, UDMASS_GEN, "0x %s%s\n",
+ s1, s2);
+ s2[0] = '\0';
+ }
+ sprintf(&s1[j * 2], "%02x", buffer[i] & 0xff);
+ }
+ if (buflen > printlen)
+ sprintf(s3, " ...");
+ DPRINTF(sc, UDMASS_GEN, "0x %s%s%s\n",
+ s1, s2, s3);
+}
+
+#endif
diff --git a/rtems/freebsd/dev/usb/ufm_ioctl.h b/rtems/freebsd/dev/usb/ufm_ioctl.h
new file mode 100644
index 00000000..aa081506
--- /dev/null
+++ b/rtems/freebsd/dev/usb/ufm_ioctl.h
@@ -0,0 +1,39 @@
+/*-
+ * Copyright (c) 2001 M. Warner Losh
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
+ * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * This code is based on ugen.c and ulpt.c developed by Lennart Augustsson.
+ * This code includes software developed by the NetBSD Foundation, Inc. and
+ * its contributors.
+ */
+
+/* $FreeBSD$ */
+
+#include <rtems/freebsd/sys/ioccom.h>
+
+#define FM_SET_FREQ _IOWR('U', 200, int)
+#define FM_GET_FREQ _IOWR('U', 201, int)
+#define FM_START _IOWR('U', 202, int)
+#define FM_STOP _IOWR('U', 203, int)
+#define FM_GET_STAT _IOWR('U', 204, int)
diff --git a/rtems/freebsd/dev/usb/usb.h b/rtems/freebsd/dev/usb/usb.h
new file mode 100644
index 00000000..1ff63d72
--- /dev/null
+++ b/rtems/freebsd/dev/usb/usb.h
@@ -0,0 +1,755 @@
+/* $FreeBSD$ */
+/*-
+ * Copyright (c) 2008 Hans Petter Selasky. All rights reserved.
+ * Copyright (c) 1998 The NetBSD Foundation, Inc. All rights reserved.
+ * Copyright (c) 1998 Lennart Augustsson. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+/*
+ * This file contains standard definitions for the following USB
+ * protocol versions:
+ *
+ * USB v1.0
+ * USB v1.1
+ * USB v2.0
+ * USB v3.0
+ */
+
+#ifndef _USB_STANDARD_HH_
+#define _USB_STANDARD_HH_
+
+#if defined(_KERNEL)
+#include <rtems/freebsd/local/opt_usb.h>
+
+/* Declare parent SYSCTL USB node. */
+#ifdef SYSCTL_DECL
+SYSCTL_DECL(_hw_usb);
+#endif
+
+#include <rtems/freebsd/sys/malloc.h>
+
+MALLOC_DECLARE(M_USB);
+MALLOC_DECLARE(M_USBDEV);
+MALLOC_DECLARE(M_USBHC);
+#endif /* _KERNEL */
+
+#include <rtems/freebsd/dev/usb/usb_endian.h>
+#include <rtems/freebsd/dev/usb/usb_freebsd.h>
+
+#define USB_STACK_VERSION 2000 /* 2.0 */
+
+/* Definition of some hardcoded USB constants. */
+
+#define USB_MAX_IPACKET 8 /* initial USB packet size */
+#define USB_EP_MAX (2*16) /* hardcoded */
+#define USB_ROOT_HUB_ADDR 1 /* index */
+#define USB_MIN_DEVICES 2 /* unused + root HUB */
+#define USB_UNCONFIG_INDEX 0xFF /* internal use only */
+#define USB_IFACE_INDEX_ANY 0xFF /* internal use only */
+#define USB_START_ADDR 0 /* default USB device BUS address
+ * after USB bus reset */
+#define USB_CONTROL_ENDPOINT 0 /* default control endpoint */
+
+#define USB_FRAMES_PER_SECOND_FS 1000 /* full speed */
+#define USB_FRAMES_PER_SECOND_HS 8000 /* high speed */
+
+#define USB_FS_BYTES_PER_HS_UFRAME 188 /* bytes */
+#define USB_HS_MICRO_FRAMES_MAX 8 /* units */
+
+#define USB_ISOC_TIME_MAX 128 /* ms */
+
+/*
+ * Minimum time a device needs to be powered down to go through a
+ * power cycle. These values are not in the USB specification.
+ */
+#define USB_POWER_DOWN_TIME 200 /* ms */
+#define USB_PORT_POWER_DOWN_TIME 100 /* ms */
+
+/* Definition of software USB power modes */
+#define USB_POWER_MODE_OFF 0 /* turn off device */
+#define USB_POWER_MODE_ON 1 /* always on */
+#define USB_POWER_MODE_SAVE 2 /* automatic suspend and resume */
+#define USB_POWER_MODE_SUSPEND 3 /* force suspend */
+#define USB_POWER_MODE_RESUME 4 /* force resume */
+
+#if 0
+/* These are the values from the USB specification. */
+#define USB_PORT_RESET_DELAY 10 /* ms */
+#define USB_PORT_ROOT_RESET_DELAY 50 /* ms */
+#define USB_PORT_RESET_RECOVERY 10 /* ms */
+#define USB_PORT_POWERUP_DELAY 100 /* ms */
+#define USB_PORT_RESUME_DELAY 20 /* ms */
+#define USB_SET_ADDRESS_SETTLE 2 /* ms */
+#define USB_RESUME_DELAY (20*5) /* ms */
+#define USB_RESUME_WAIT 10 /* ms */
+#define USB_RESUME_RECOVERY 10 /* ms */
+#define USB_EXTRA_POWER_UP_TIME 0 /* ms */
+#else
+/* Allow for marginal and non-conforming devices. */
+#define USB_PORT_RESET_DELAY 50 /* ms */
+#define USB_PORT_ROOT_RESET_DELAY 250 /* ms */
+#define USB_PORT_RESET_RECOVERY 250 /* ms */
+#define USB_PORT_POWERUP_DELAY 300 /* ms */
+#define USB_PORT_RESUME_DELAY (20*2) /* ms */
+#define USB_SET_ADDRESS_SETTLE 10 /* ms */
+#define USB_RESUME_DELAY (50*5) /* ms */
+#define USB_RESUME_WAIT 50 /* ms */
+#define USB_RESUME_RECOVERY 50 /* ms */
+#define USB_EXTRA_POWER_UP_TIME 20 /* ms */
+#endif
+
+#define USB_MIN_POWER 100 /* mA */
+#define USB_MAX_POWER 500 /* mA */
+
+#define USB_BUS_RESET_DELAY 100 /* ms */
+
+/*
+ * USB record layout in memory:
+ *
+ * - USB config 0
+ * - USB interfaces
+ * - USB alternative interfaces
+ * - USB endpoints
+ *
+ * - USB config 1
+ * - USB interfaces
+ * - USB alternative interfaces
+ * - USB endpoints
+ */
+
+/* Declaration of USB records */
+
+struct usb_device_request {
+ uByte bmRequestType;
+ uByte bRequest;
+ uWord wValue;
+ uWord wIndex;
+ uWord wLength;
+} __packed;
+typedef struct usb_device_request usb_device_request_t;
+
+#define UT_WRITE 0x00
+#define UT_READ 0x80
+#define UT_STANDARD 0x00
+#define UT_CLASS 0x20
+#define UT_VENDOR 0x40
+#define UT_DEVICE 0x00
+#define UT_INTERFACE 0x01
+#define UT_ENDPOINT 0x02
+#define UT_OTHER 0x03
+
+#define UT_READ_DEVICE (UT_READ | UT_STANDARD | UT_DEVICE)
+#define UT_READ_INTERFACE (UT_READ | UT_STANDARD | UT_INTERFACE)
+#define UT_READ_ENDPOINT (UT_READ | UT_STANDARD | UT_ENDPOINT)
+#define UT_WRITE_DEVICE (UT_WRITE | UT_STANDARD | UT_DEVICE)
+#define UT_WRITE_INTERFACE (UT_WRITE | UT_STANDARD | UT_INTERFACE)
+#define UT_WRITE_ENDPOINT (UT_WRITE | UT_STANDARD | UT_ENDPOINT)
+#define UT_READ_CLASS_DEVICE (UT_READ | UT_CLASS | UT_DEVICE)
+#define UT_READ_CLASS_INTERFACE (UT_READ | UT_CLASS | UT_INTERFACE)
+#define UT_READ_CLASS_OTHER (UT_READ | UT_CLASS | UT_OTHER)
+#define UT_READ_CLASS_ENDPOINT (UT_READ | UT_CLASS | UT_ENDPOINT)
+#define UT_WRITE_CLASS_DEVICE (UT_WRITE | UT_CLASS | UT_DEVICE)
+#define UT_WRITE_CLASS_INTERFACE (UT_WRITE | UT_CLASS | UT_INTERFACE)
+#define UT_WRITE_CLASS_OTHER (UT_WRITE | UT_CLASS | UT_OTHER)
+#define UT_WRITE_CLASS_ENDPOINT (UT_WRITE | UT_CLASS | UT_ENDPOINT)
+#define UT_READ_VENDOR_DEVICE (UT_READ | UT_VENDOR | UT_DEVICE)
+#define UT_READ_VENDOR_INTERFACE (UT_READ | UT_VENDOR | UT_INTERFACE)
+#define UT_READ_VENDOR_OTHER (UT_READ | UT_VENDOR | UT_OTHER)
+#define UT_READ_VENDOR_ENDPOINT (UT_READ | UT_VENDOR | UT_ENDPOINT)
+#define UT_WRITE_VENDOR_DEVICE (UT_WRITE | UT_VENDOR | UT_DEVICE)
+#define UT_WRITE_VENDOR_INTERFACE (UT_WRITE | UT_VENDOR | UT_INTERFACE)
+#define UT_WRITE_VENDOR_OTHER (UT_WRITE | UT_VENDOR | UT_OTHER)
+#define UT_WRITE_VENDOR_ENDPOINT (UT_WRITE | UT_VENDOR | UT_ENDPOINT)
+
+/* Requests */
+#define UR_GET_STATUS 0x00
+#define UR_CLEAR_FEATURE 0x01
+#define UR_SET_FEATURE 0x03
+#define UR_SET_ADDRESS 0x05
+#define UR_GET_DESCRIPTOR 0x06
+#define UDESC_DEVICE 0x01
+#define UDESC_CONFIG 0x02
+#define UDESC_STRING 0x03
+#define USB_LANGUAGE_TABLE 0x00 /* language ID string index */
+#define UDESC_INTERFACE 0x04
+#define UDESC_ENDPOINT 0x05
+#define UDESC_DEVICE_QUALIFIER 0x06
+#define UDESC_OTHER_SPEED_CONFIGURATION 0x07
+#define UDESC_INTERFACE_POWER 0x08
+#define UDESC_OTG 0x09
+#define UDESC_DEBUG 0x0A
+#define UDESC_IFACE_ASSOC 0x0B /* interface association */
+#define UDESC_BOS 0x0F /* binary object store */
+#define UDESC_DEVICE_CAPABILITY 0x10
+#define UDESC_CS_DEVICE 0x21 /* class specific */
+#define UDESC_CS_CONFIG 0x22
+#define UDESC_CS_STRING 0x23
+#define UDESC_CS_INTERFACE 0x24
+#define UDESC_CS_ENDPOINT 0x25
+#define UDESC_HUB 0x29
+#define UDESC_SS_HUB 0x2A /* super speed */
+#define UDESC_ENDPOINT_SS_COMP 0x30 /* super speed */
+#define UR_SET_DESCRIPTOR 0x07
+#define UR_GET_CONFIG 0x08
+#define UR_SET_CONFIG 0x09
+#define UR_GET_INTERFACE 0x0a
+#define UR_SET_INTERFACE 0x0b
+#define UR_SYNCH_FRAME 0x0c
+#define UR_SET_SEL 0x30
+#define UR_ISOCH_DELAY 0x31
+
+/* HUB specific request */
+#define UR_GET_BUS_STATE 0x02
+#define UR_CLEAR_TT_BUFFER 0x08
+#define UR_RESET_TT 0x09
+#define UR_GET_TT_STATE 0x0a
+#define UR_STOP_TT 0x0b
+#define UR_SET_HUB_DEPTH 0x0c
+#define USB_SS_HUB_DEPTH_MAX 5
+#define UR_GET_PORT_ERR_COUNT 0x0d
+
+/* Feature numbers */
+#define UF_ENDPOINT_HALT 0
+#define UF_DEVICE_REMOTE_WAKEUP 1
+#define UF_TEST_MODE 2
+#define UF_U1_ENABLE 0x30
+#define UF_U2_ENABLE 0x31
+#define UF_LTM_ENABLE 0x32
+
+/* HUB specific features */
+#define UHF_C_HUB_LOCAL_POWER 0
+#define UHF_C_HUB_OVER_CURRENT 1
+#define UHF_PORT_CONNECTION 0
+#define UHF_PORT_ENABLE 1
+#define UHF_PORT_SUSPEND 2
+#define UHF_PORT_OVER_CURRENT 3
+#define UHF_PORT_RESET 4
+#define UHF_PORT_LINK_STATE 5
+#define UHF_PORT_POWER 8
+#define UHF_PORT_LOW_SPEED 9
+#define UHF_C_PORT_CONNECTION 16
+#define UHF_C_PORT_ENABLE 17
+#define UHF_C_PORT_SUSPEND 18
+#define UHF_C_PORT_OVER_CURRENT 19
+#define UHF_C_PORT_RESET 20
+#define UHF_PORT_TEST 21
+#define UHF_PORT_INDICATOR 22
+
+/* SuperSpeed HUB specific features */
+#define UHF_PORT_U1_TIMEOUT 23
+#define UHF_PORT_U2_TIMEOUT 24
+#define UHF_C_PORT_LINK_STATE 25
+#define UHF_C_PORT_CONFIG_ERROR 26
+#define UHF_PORT_REMOTE_WAKE_MASK 27
+#define UHF_BH_PORT_RESET 28
+#define UHF_C_BH_PORT_RESET 29
+#define UHF_FORCE_LINKPM_ACCEPT 30
+
+struct usb_descriptor {
+ uByte bLength;
+ uByte bDescriptorType;
+ uByte bDescriptorSubtype;
+} __packed;
+typedef struct usb_descriptor usb_descriptor_t;
+
+struct usb_device_descriptor {
+ uByte bLength;
+ uByte bDescriptorType;
+ uWord bcdUSB;
+#define UD_USB_2_0 0x0200
+#define UD_USB_3_0 0x0300
+#define UD_IS_USB2(d) ((d)->bcdUSB[1] == 0x02)
+#define UD_IS_USB3(d) ((d)->bcdUSB[1] == 0x03)
+ uByte bDeviceClass;
+ uByte bDeviceSubClass;
+ uByte bDeviceProtocol;
+ uByte bMaxPacketSize;
+ /* The fields below are not part of the initial descriptor. */
+ uWord idVendor;
+ uWord idProduct;
+ uWord bcdDevice;
+ uByte iManufacturer;
+ uByte iProduct;
+ uByte iSerialNumber;
+ uByte bNumConfigurations;
+} __packed;
+typedef struct usb_device_descriptor usb_device_descriptor_t;
+
+/* Binary Device Object Store (BOS) */
+struct usb_bos_descriptor {
+ uByte bLength;
+ uByte bDescriptorType;
+ uWord wTotalLength;
+ uByte bNumDeviceCaps;
+} __packed;
+typedef struct usb_bos_descriptor usb_bos_descriptor_t;
+
+/* Binary Device Object Store Capability */
+struct usb_bos_cap_descriptor {
+ uByte bLength;
+ uByte bDescriptorType;
+ uByte bDevCapabilityType;
+#define USB_DEVCAP_RESERVED 0x00
+#define USB_DEVCAP_WUSB 0x01
+#define USB_DEVCAP_USB2EXT 0x02
+#define USB_DEVCAP_SUPER_SPEED 0x03
+#define USB_DEVCAP_CONTAINER_ID 0x04
+ /* data ... */
+} __packed;
+typedef struct usb_bos_cap_descriptor usb_bos_cap_descriptor_t;
+
+struct usb_devcap_usb2ext_descriptor {
+ uByte bLength;
+ uByte bDescriptorType;
+ uByte bDevCapabilityType;
+ uByte bmAttributes;
+#define USB_V2EXT_LPM 0x02
+} __packed;
+typedef struct usb_devcap_usb2ext_descriptor usb_devcap_usb2ext_descriptor_t;
+
+struct usb_devcap_ss_descriptor {
+ uByte bLength;
+ uByte bDescriptorType;
+ uByte bDevCapabilityType;
+ uByte bmAttributes;
+ uWord wSpeedsSupported;
+ uByte bFunctionalitySupport;
+ uByte bU1DevExitLat;
+ uByte bU2DevExitLat;
+} __packed;
+typedef struct usb_devcap_ss_descriptor usb_devcap_ss_descriptor_t;
+
+struct usb_devcap_container_id_descriptor {
+ uByte bLength;
+ uByte bDescriptorType;
+ uByte bDevCapabilityType;
+ uByte bReserved;
+ uByte bContainerID;
+} __packed;
+typedef struct usb_devcap_container_id_descriptor
+ usb_devcap_container_id_descriptor_t;
+
+/* Device class codes */
+#define UDCLASS_IN_INTERFACE 0x00
+#define UDCLASS_COMM 0x02
+#define UDCLASS_HUB 0x09
+#define UDSUBCLASS_HUB 0x00
+#define UDPROTO_FSHUB 0x00
+#define UDPROTO_HSHUBSTT 0x01
+#define UDPROTO_HSHUBMTT 0x02
+#define UDPROTO_SSHUB 0x03
+#define UDCLASS_DIAGNOSTIC 0xdc
+#define UDCLASS_WIRELESS 0xe0
+#define UDSUBCLASS_RF 0x01
+#define UDPROTO_BLUETOOTH 0x01
+#define UDCLASS_VENDOR 0xff
+
+struct usb_config_descriptor {
+ uByte bLength;
+ uByte bDescriptorType;
+ uWord wTotalLength;
+ uByte bNumInterface;
+ uByte bConfigurationValue;
+#define USB_UNCONFIG_NO 0
+ uByte iConfiguration;
+ uByte bmAttributes;
+#define UC_BUS_POWERED 0x80
+#define UC_SELF_POWERED 0x40
+#define UC_REMOTE_WAKEUP 0x20
+ uByte bMaxPower; /* max current in 2 mA units */
+#define UC_POWER_FACTOR 2
+} __packed;
+typedef struct usb_config_descriptor usb_config_descriptor_t;
+
+struct usb_interface_descriptor {
+ uByte bLength;
+ uByte bDescriptorType;
+ uByte bInterfaceNumber;
+ uByte bAlternateSetting;
+ uByte bNumEndpoints;
+ uByte bInterfaceClass;
+ uByte bInterfaceSubClass;
+ uByte bInterfaceProtocol;
+ uByte iInterface;
+} __packed;
+typedef struct usb_interface_descriptor usb_interface_descriptor_t;
+
+struct usb_interface_assoc_descriptor {
+ uByte bLength;
+ uByte bDescriptorType;
+ uByte bFirstInterface;
+ uByte bInterfaceCount;
+ uByte bFunctionClass;
+ uByte bFunctionSubClass;
+ uByte bFunctionProtocol;
+ uByte iFunction;
+} __packed;
+typedef struct usb_interface_assoc_descriptor usb_interface_assoc_descriptor_t;
+
+/* Interface class codes */
+#define UICLASS_UNSPEC 0x00
+#define UICLASS_AUDIO 0x01 /* audio */
+#define UISUBCLASS_AUDIOCONTROL 1
+#define UISUBCLASS_AUDIOSTREAM 2
+#define UISUBCLASS_MIDISTREAM 3
+
+#define UICLASS_CDC 0x02 /* communication */
+#define UISUBCLASS_DIRECT_LINE_CONTROL_MODEL 1
+#define UISUBCLASS_ABSTRACT_CONTROL_MODEL 2
+#define UISUBCLASS_TELEPHONE_CONTROL_MODEL 3
+#define UISUBCLASS_MULTICHANNEL_CONTROL_MODEL 4
+#define UISUBCLASS_CAPI_CONTROLMODEL 5
+#define UISUBCLASS_ETHERNET_NETWORKING_CONTROL_MODEL 6
+#define UISUBCLASS_ATM_NETWORKING_CONTROL_MODEL 7
+#define UISUBCLASS_WIRELESS_HANDSET_CM 8
+#define UISUBCLASS_DEVICE_MGMT 9
+#define UISUBCLASS_MOBILE_DIRECT_LINE_MODEL 10
+#define UISUBCLASS_OBEX 11
+#define UISUBCLASS_ETHERNET_EMULATION_MODEL 12
+#define UISUBCLASS_NETWORK_CONTROL_MODEL 13
+
+#define UIPROTO_CDC_AT 1
+
+#define UICLASS_HID 0x03
+#define UISUBCLASS_BOOT 1
+#define UIPROTO_BOOT_KEYBOARD 1
+#define UIPROTO_MOUSE 2
+
+#define UICLASS_PHYSICAL 0x05
+#define UICLASS_IMAGE 0x06
+#define UISUBCLASS_SIC 1 /* still image class */
+#define UICLASS_PRINTER 0x07
+#define UISUBCLASS_PRINTER 1
+#define UIPROTO_PRINTER_UNI 1
+#define UIPROTO_PRINTER_BI 2
+#define UIPROTO_PRINTER_1284 3
+
+#define UICLASS_MASS 0x08
+#define UISUBCLASS_RBC 1
+#define UISUBCLASS_SFF8020I 2
+#define UISUBCLASS_QIC157 3
+#define UISUBCLASS_UFI 4
+#define UISUBCLASS_SFF8070I 5
+#define UISUBCLASS_SCSI 6
+#define UIPROTO_MASS_CBI_I 0
+#define UIPROTO_MASS_CBI 1
+#define UIPROTO_MASS_BBB_OLD 2 /* Not in the spec anymore */
+#define UIPROTO_MASS_BBB 80 /* 'P' for the Iomega Zip drive */
+
+#define UICLASS_HUB 0x09
+#define UISUBCLASS_HUB 0
+#define UIPROTO_FSHUB 0
+#define UIPROTO_HSHUBSTT 0 /* Yes, same as previous */
+#define UIPROTO_HSHUBMTT 1
+
+#define UICLASS_CDC_DATA 0x0a
+#define UISUBCLASS_DATA 0x00
+#define UIPROTO_DATA_ISDNBRI 0x30 /* Physical iface */
+#define UIPROTO_DATA_HDLC 0x31 /* HDLC */
+#define UIPROTO_DATA_TRANSPARENT 0x32 /* Transparent */
+#define UIPROTO_DATA_Q921M 0x50 /* Management for Q921 */
+#define UIPROTO_DATA_Q921 0x51 /* Data for Q921 */
+#define UIPROTO_DATA_Q921TM 0x52 /* TEI multiplexer for Q921 */
+#define UIPROTO_DATA_V42BIS 0x90 /* Data compression */
+#define UIPROTO_DATA_Q931 0x91 /* Euro-ISDN */
+#define UIPROTO_DATA_V120 0x92 /* V.24 rate adaption */
+#define UIPROTO_DATA_CAPI 0x93 /* CAPI 2.0 commands */
+#define UIPROTO_DATA_HOST_BASED 0xfd /* Host based driver */
+#define UIPROTO_DATA_PUF 0xfe /* see Prot. Unit Func. Desc. */
+#define UIPROTO_DATA_VENDOR 0xff /* Vendor specific */
+#define UIPROTO_DATA_NCM 0x01 /* Network Control Model */
+
+#define UICLASS_SMARTCARD 0x0b
+#define UICLASS_FIRM_UPD 0x0c
+#define UICLASS_SECURITY 0x0d
+#define UICLASS_DIAGNOSTIC 0xdc
+#define UICLASS_WIRELESS 0xe0
+#define UISUBCLASS_RF 0x01
+#define UIPROTO_BLUETOOTH 0x01
+
+#define UICLASS_IAD 0xEF /* Interface Association Descriptor */
+
+#define UICLASS_APPL_SPEC 0xfe
+#define UISUBCLASS_FIRMWARE_DOWNLOAD 1
+#define UISUBCLASS_IRDA 2
+#define UIPROTO_IRDA 0
+
+#define UICLASS_VENDOR 0xff
+#define UISUBCLASS_XBOX360_CONTROLLER 0x5d
+#define UIPROTO_XBOX360_GAMEPAD 0x01
+
+struct usb_endpoint_descriptor {
+ uByte bLength;
+ uByte bDescriptorType;
+ uByte bEndpointAddress;
+#define UE_GET_DIR(a) ((a) & 0x80)
+#define UE_SET_DIR(a,d) ((a) | (((d)&1) << 7))
+#define UE_DIR_IN 0x80 /* IN-token endpoint, fixed */
+#define UE_DIR_OUT 0x00 /* OUT-token endpoint, fixed */
+#define UE_DIR_RX 0xfd /* for internal use only! */
+#define UE_DIR_TX 0xfe /* for internal use only! */
+#define UE_DIR_ANY 0xff /* for internal use only! */
+#define UE_ADDR 0x0f
+#define UE_ADDR_ANY 0xff /* for internal use only! */
+#define UE_GET_ADDR(a) ((a) & UE_ADDR)
+ uByte bmAttributes;
+#define UE_XFERTYPE 0x03
+#define UE_CONTROL 0x00
+#define UE_ISOCHRONOUS 0x01
+#define UE_BULK 0x02
+#define UE_INTERRUPT 0x03
+#define UE_BULK_INTR 0xfe /* for internal use only! */
+#define UE_TYPE_ANY 0xff /* for internal use only! */
+#define UE_GET_XFERTYPE(a) ((a) & UE_XFERTYPE)
+#define UE_ISO_TYPE 0x0c
+#define UE_ISO_ASYNC 0x04
+#define UE_ISO_ADAPT 0x08
+#define UE_ISO_SYNC 0x0c
+#define UE_GET_ISO_TYPE(a) ((a) & UE_ISO_TYPE)
+ uWord wMaxPacketSize;
+#define UE_ZERO_MPS 0xFFFF /* for internal use only */
+ uByte bInterval;
+} __packed;
+typedef struct usb_endpoint_descriptor usb_endpoint_descriptor_t;
+
+struct usb_endpoint_ss_comp_descriptor {
+ uByte bLength;
+ uByte bDescriptorType;
+ uByte bMaxBurst;
+ uByte bmAttributes;
+ uWord wBytesPerInterval;
+} __packed;
+typedef struct usb_endpoint_ss_comp_descriptor
+ usb_endpoint_ss_comp_descriptor_t;
+
+struct usb_string_descriptor {
+ uByte bLength;
+ uByte bDescriptorType;
+ uWord bString[126];
+ uByte bUnused;
+} __packed;
+typedef struct usb_string_descriptor usb_string_descriptor_t;
+
+#define USB_MAKE_STRING_DESC(m,name) \
+struct name { \
+ uByte bLength; \
+ uByte bDescriptorType; \
+ uByte bData[sizeof((uint8_t []){m})]; \
+} __packed; \
+static const struct name name = { \
+ .bLength = sizeof(struct name), \
+ .bDescriptorType = UDESC_STRING, \
+ .bData = { m }, \
+}
+
+struct usb_hub_descriptor {
+ uByte bDescLength;
+ uByte bDescriptorType;
+ uByte bNbrPorts;
+ uWord wHubCharacteristics;
+#define UHD_PWR 0x0003
+#define UHD_PWR_GANGED 0x0000
+#define UHD_PWR_INDIVIDUAL 0x0001
+#define UHD_PWR_NO_SWITCH 0x0002
+#define UHD_COMPOUND 0x0004
+#define UHD_OC 0x0018
+#define UHD_OC_GLOBAL 0x0000
+#define UHD_OC_INDIVIDUAL 0x0008
+#define UHD_OC_NONE 0x0010
+#define UHD_TT_THINK 0x0060
+#define UHD_TT_THINK_8 0x0000
+#define UHD_TT_THINK_16 0x0020
+#define UHD_TT_THINK_24 0x0040
+#define UHD_TT_THINK_32 0x0060
+#define UHD_PORT_IND 0x0080
+ uByte bPwrOn2PwrGood; /* delay in 2 ms units */
+#define UHD_PWRON_FACTOR 2
+ uByte bHubContrCurrent;
+ uByte DeviceRemovable[32]; /* max 255 ports */
+#define UHD_NOT_REMOV(desc, i) \
+ (((desc)->DeviceRemovable[(i)/8] >> ((i) % 8)) & 1)
+ uByte PortPowerCtrlMask[1]; /* deprecated */
+} __packed;
+typedef struct usb_hub_descriptor usb_hub_descriptor_t;
+
+struct usb_hub_ss_descriptor {
+ uByte bLength;
+ uByte bDescriptorType;
+ uByte bNbrPorts;
+ uWord wHubCharacteristics;
+ uByte bPwrOn2PwrGood; /* delay in 2 ms units */
+ uByte bHubContrCurrent;
+ uByte bHubHdrDecLat;
+ uWord wHubDelay;
+ uByte DeviceRemovable[32]; /* max 255 ports */
+} __packed;
+typedef struct usb_hub_ss_descriptor usb_hub_ss_descriptor_t;
+
+/* minimum HUB descriptor (8-ports maximum) */
+struct usb_hub_descriptor_min {
+ uByte bDescLength;
+ uByte bDescriptorType;
+ uByte bNbrPorts;
+ uWord wHubCharacteristics;
+ uByte bPwrOn2PwrGood;
+ uByte bHubContrCurrent;
+ uByte DeviceRemovable[1];
+ uByte PortPowerCtrlMask[1];
+} __packed;
+typedef struct usb_hub_descriptor_min usb_hub_descriptor_min_t;
+
+struct usb_device_qualifier {
+ uByte bLength;
+ uByte bDescriptorType;
+ uWord bcdUSB;
+ uByte bDeviceClass;
+ uByte bDeviceSubClass;
+ uByte bDeviceProtocol;
+ uByte bMaxPacketSize0;
+ uByte bNumConfigurations;
+ uByte bReserved;
+} __packed;
+typedef struct usb_device_qualifier usb_device_qualifier_t;
+
+struct usb_otg_descriptor {
+ uByte bLength;
+ uByte bDescriptorType;
+ uByte bmAttributes;
+#define UOTG_SRP 0x01
+#define UOTG_HNP 0x02
+} __packed;
+typedef struct usb_otg_descriptor usb_otg_descriptor_t;
+
+/* OTG feature selectors */
+#define UOTG_B_HNP_ENABLE 3
+#define UOTG_A_HNP_SUPPORT 4
+#define UOTG_A_ALT_HNP_SUPPORT 5
+
+struct usb_status {
+ uWord wStatus;
+/* Device status flags */
+#define UDS_SELF_POWERED 0x0001
+#define UDS_REMOTE_WAKEUP 0x0002
+/* Endpoint status flags */
+#define UES_HALT 0x0001
+} __packed;
+typedef struct usb_status usb_status_t;
+
+struct usb_hub_status {
+ uWord wHubStatus;
+#define UHS_LOCAL_POWER 0x0001
+#define UHS_OVER_CURRENT 0x0002
+ uWord wHubChange;
+} __packed;
+typedef struct usb_hub_status usb_hub_status_t;
+
+struct usb_port_status {
+ uWord wPortStatus;
+#define UPS_CURRENT_CONNECT_STATUS 0x0001
+#define UPS_PORT_ENABLED 0x0002
+#define UPS_SUSPEND 0x0004
+#define UPS_OVERCURRENT_INDICATOR 0x0008
+#define UPS_RESET 0x0010
+/* The link-state bits are valid for Super-Speed USB HUBs */
+#define UPS_PORT_LINK_STATE_GET(x) (((x) >> 5) & 0xF)
+#define UPS_PORT_LINK_STATE_SET(x) (((x) & 0xF) << 5)
+#define UPS_PORT_LS_U0 0x00
+#define UPS_PORT_LS_U1 0x01
+#define UPS_PORT_LS_U2 0x02
+#define UPS_PORT_LS_U3 0x03
+#define UPS_PORT_LS_SS_DIS 0x04
+#define UPS_PORT_LS_RX_DET 0x05
+#define UPS_PORT_LS_SS_INA 0x06
+#define UPS_PORT_LS_POLL 0x07
+#define UPS_PORT_LS_RECOVER 0x08
+#define UPS_PORT_LS_HOT_RST 0x09
+#define UPS_PORT_LS_COMP_MODE 0x0A
+#define UPS_PORT_LS_LOOPBACK 0x0B
+#define UPS_PORT_POWER 0x0100
+#define UPS_LOW_SPEED 0x0200
+#define UPS_HIGH_SPEED 0x0400
+#define UPS_OTHER_SPEED 0x0600 /* currently FreeBSD specific */
+#define UPS_PORT_TEST 0x0800
+#define UPS_PORT_INDICATOR 0x1000
+#define UPS_PORT_MODE_DEVICE 0x8000 /* currently FreeBSD specific */
+ uWord wPortChange;
+#define UPS_C_CONNECT_STATUS 0x0001
+#define UPS_C_PORT_ENABLED 0x0002
+#define UPS_C_SUSPEND 0x0004
+#define UPS_C_OVERCURRENT_INDICATOR 0x0008
+#define UPS_C_PORT_RESET 0x0010
+#define UPS_C_BH_PORT_RESET 0x0020
+#define UPS_C_PORT_LINK_STATE 0x0040
+#define UPS_C_PORT_CONFIG_ERROR 0x0080
+} __packed;
+typedef struct usb_port_status usb_port_status_t;
+
+/*
+ * The "USB_SPEED" macros defines all the supported USB speeds.
+ */
+enum usb_dev_speed {
+ USB_SPEED_VARIABLE,
+ USB_SPEED_LOW,
+ USB_SPEED_FULL,
+ USB_SPEED_HIGH,
+ USB_SPEED_SUPER,
+};
+#define USB_SPEED_MAX (USB_SPEED_SUPER+1)
+
+/*
+ * The "USB_REV" macros defines all the supported USB revisions.
+ */
+enum usb_revision {
+ USB_REV_UNKNOWN,
+ USB_REV_PRE_1_0,
+ USB_REV_1_0,
+ USB_REV_1_1,
+ USB_REV_2_0,
+ USB_REV_2_5,
+ USB_REV_3_0
+};
+#define USB_REV_MAX (USB_REV_3_0+1)
+
+/*
+ * Supported host contoller modes.
+ */
+enum usb_hc_mode {
+ USB_MODE_HOST, /* initiates transfers */
+ USB_MODE_DEVICE, /* bus transfer target */
+ USB_MODE_DUAL /* can be host or device */
+};
+#define USB_MODE_MAX (USB_MODE_DUAL+1)
+
+/*
+ * The "USB_MODE" macros defines all the supported device states.
+ */
+enum usb_dev_state {
+ USB_STATE_DETACHED,
+ USB_STATE_ATTACHED,
+ USB_STATE_POWERED,
+ USB_STATE_ADDRESSED,
+ USB_STATE_CONFIGURED,
+};
+#define USB_STATE_MAX (USB_STATE_CONFIGURED+1)
+#endif /* _USB_STANDARD_HH_ */
diff --git a/rtems/freebsd/dev/usb/usb_bus.h b/rtems/freebsd/dev/usb/usb_bus.h
new file mode 100644
index 00000000..b437fac0
--- /dev/null
+++ b/rtems/freebsd/dev/usb/usb_bus.h
@@ -0,0 +1,114 @@
+/* $FreeBSD$ */
+/*-
+ * Copyright (c) 2008 Hans Petter Selasky. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifndef _USB_BUS_HH_
+#define _USB_BUS_HH_
+
+/*
+ * The following structure defines the USB explore message sent to the USB
+ * explore process.
+ */
+
+struct usb_bus_msg {
+ struct usb_proc_msg hdr;
+ struct usb_bus *bus;
+};
+
+/*
+ * The following structure defines the USB statistics structure.
+ */
+struct usb_bus_stat {
+ uint32_t uds_requests[4];
+};
+
+/*
+ * The following structure defines an USB BUS. There is one USB BUS
+ * for every Host or Device controller.
+ */
+struct usb_bus {
+ struct usb_bus_stat stats_err;
+ struct usb_bus_stat stats_ok;
+#ifndef __rtems__
+ struct root_hold_token *bus_roothold;
+#endif /* __rtems__ */
+ /*
+ * There are two callback processes. One for Giant locked
+ * callbacks. One for non-Giant locked callbacks. This should
+ * avoid congestion and reduce response time in most cases.
+ */
+ struct usb_process giant_callback_proc;
+ struct usb_process non_giant_callback_proc;
+
+ /* Explore process */
+ struct usb_process explore_proc;
+
+ /* Control request process */
+ struct usb_process control_xfer_proc;
+
+ struct usb_bus_msg explore_msg[2];
+ struct usb_bus_msg detach_msg[2];
+ struct usb_bus_msg attach_msg[2];
+ /*
+ * This mutex protects the USB hardware:
+ */
+ struct mtx bus_mtx;
+ struct usb_xfer_queue intr_q;
+ struct usb_callout power_wdog; /* power management */
+
+ device_t parent;
+ device_t bdev; /* filled by HC driver */
+
+#if USB_HAVE_BUSDMA
+ struct usb_dma_parent_tag dma_parent_tag[1];
+ struct usb_dma_tag dma_tags[USB_BUS_DMA_TAG_MAX];
+#endif
+ struct usb_bus_methods *methods; /* filled by HC driver */
+ struct usb_device **devices;
+
+ usb_power_mask_t hw_power_state; /* see USB_HW_POWER_XXX */
+ usb_size_t uframe_usage[USB_HS_MICRO_FRAMES_MAX];
+
+ uint16_t isoc_time_last; /* in milliseconds */
+
+ uint8_t alloc_failed; /* Set if memory allocation failed. */
+ uint8_t driver_added_refcount; /* Current driver generation count */
+ enum usb_revision usbrev; /* USB revision. See "USB_REV_XXX". */
+
+ uint8_t devices_max; /* maximum number of USB devices */
+ uint8_t do_probe; /* set if USB BUS should be re-probed */
+
+ /*
+ * The scratch area can only be used inside the explore thread
+ * belonging to the give serial bus.
+ */
+ union {
+ struct usb_hw_ep_scratch hw_ep_scratch[1];
+ struct usb_temp_setup temp_setup[1];
+ uint8_t data[255];
+ } scratch[1];
+};
+
+#endif /* _USB_BUS_HH_ */
diff --git a/rtems/freebsd/dev/usb/usb_busdma.c b/rtems/freebsd/dev/usb/usb_busdma.c
new file mode 100644
index 00000000..107fc1d2
--- /dev/null
+++ b/rtems/freebsd/dev/usb/usb_busdma.c
@@ -0,0 +1,1071 @@
+#include <rtems/freebsd/machine/rtems-bsd-config.h>
+
+/* $FreeBSD$ */
+/*-
+ * Copyright (c) 2008 Hans Petter Selasky. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <rtems/freebsd/sys/stdint.h>
+#include <rtems/freebsd/sys/stddef.h>
+#include <rtems/freebsd/sys/param.h>
+#include <rtems/freebsd/sys/queue.h>
+#include <rtems/freebsd/sys/types.h>
+#include <rtems/freebsd/sys/systm.h>
+#include <rtems/freebsd/sys/kernel.h>
+#include <rtems/freebsd/sys/bus.h>
+#include <rtems/freebsd/sys/linker_set.h>
+#include <rtems/freebsd/sys/module.h>
+#include <rtems/freebsd/sys/lock.h>
+#include <rtems/freebsd/sys/mutex.h>
+#include <rtems/freebsd/sys/condvar.h>
+#include <rtems/freebsd/sys/sysctl.h>
+#include <rtems/freebsd/sys/sx.h>
+#include <rtems/freebsd/sys/unistd.h>
+#include <rtems/freebsd/sys/callout.h>
+#include <rtems/freebsd/sys/malloc.h>
+#include <rtems/freebsd/sys/priv.h>
+
+#include <rtems/freebsd/dev/usb/usb.h>
+#include <rtems/freebsd/dev/usb/usbdi.h>
+#include <rtems/freebsd/dev/usb/usbdi_util.h>
+
+#define USB_DEBUG_VAR usb_debug
+
+#include <rtems/freebsd/dev/usb/usb_core.h>
+#include <rtems/freebsd/dev/usb/usb_busdma.h>
+#include <rtems/freebsd/dev/usb/usb_process.h>
+#include <rtems/freebsd/dev/usb/usb_transfer.h>
+#include <rtems/freebsd/dev/usb/usb_device.h>
+#include <rtems/freebsd/dev/usb/usb_util.h>
+#include <rtems/freebsd/dev/usb/usb_debug.h>
+
+#include <rtems/freebsd/dev/usb/usb_controller.h>
+#include <rtems/freebsd/dev/usb/usb_bus.h>
+
+#if USB_HAVE_BUSDMA
+static void usb_dma_tag_create(struct usb_dma_tag *, usb_size_t, usb_size_t);
+static void usb_dma_tag_destroy(struct usb_dma_tag *);
+static void usb_dma_lock_cb(void *, bus_dma_lock_op_t);
+static void usb_pc_alloc_mem_cb(void *, bus_dma_segment_t *, int, int);
+static void usb_pc_load_mem_cb(void *, bus_dma_segment_t *, int, int);
+static void usb_pc_common_mem_cb(void *, bus_dma_segment_t *, int, int,
+ uint8_t);
+#endif
+
+/*------------------------------------------------------------------------*
+ * usbd_get_page - lookup DMA-able memory for the given offset
+ *
+ * NOTE: Only call this function when the "page_cache" structure has
+ * been properly initialized !
+ *------------------------------------------------------------------------*/
+void
+usbd_get_page(struct usb_page_cache *pc, usb_frlength_t offset,
+ struct usb_page_search *res)
+{
+ struct usb_page *page;
+
+#if USB_HAVE_BUSDMA
+ if (pc->page_start) {
+
+ /* Case 1 - something has been loaded into DMA */
+
+ if (pc->buffer) {
+
+ /* Case 1a - Kernel Virtual Address */
+
+ res->buffer = USB_ADD_BYTES(pc->buffer, offset);
+ }
+ offset += pc->page_offset_buf;
+
+ /* compute destination page */
+
+ page = pc->page_start;
+
+ if (pc->ismultiseg) {
+
+ page += (offset / USB_PAGE_SIZE);
+
+ offset %= USB_PAGE_SIZE;
+
+ res->length = USB_PAGE_SIZE - offset;
+ res->physaddr = page->physaddr + offset;
+ } else {
+ res->length = 0 - 1;
+ res->physaddr = page->physaddr + offset;
+ }
+ if (!pc->buffer) {
+
+ /* Case 1b - Non Kernel Virtual Address */
+
+ res->buffer = USB_ADD_BYTES(page->buffer, offset);
+ }
+ return;
+ }
+#endif
+ /* Case 2 - Plain PIO */
+
+ res->buffer = USB_ADD_BYTES(pc->buffer, offset);
+ res->length = 0 - 1;
+#if USB_HAVE_BUSDMA
+ res->physaddr = 0;
+#endif
+}
+
+/*------------------------------------------------------------------------*
+ * usbd_copy_in - copy directly to DMA-able memory
+ *------------------------------------------------------------------------*/
+void
+usbd_copy_in(struct usb_page_cache *cache, usb_frlength_t offset,
+ const void *ptr, usb_frlength_t len)
+{
+ struct usb_page_search buf_res;
+
+ while (len != 0) {
+
+ usbd_get_page(cache, offset, &buf_res);
+
+ if (buf_res.length > len) {
+ buf_res.length = len;
+ }
+ bcopy(ptr, buf_res.buffer, buf_res.length);
+
+ offset += buf_res.length;
+ len -= buf_res.length;
+ ptr = USB_ADD_BYTES(ptr, buf_res.length);
+ }
+}
+
+/*------------------------------------------------------------------------*
+ * usbd_copy_in_user - copy directly to DMA-able memory from userland
+ *
+ * Return values:
+ * 0: Success
+ * Else: Failure
+ *------------------------------------------------------------------------*/
+#if USB_HAVE_USER_IO
+int
+usbd_copy_in_user(struct usb_page_cache *cache, usb_frlength_t offset,
+ const void *ptr, usb_frlength_t len)
+{
+ struct usb_page_search buf_res;
+ int error;
+
+ while (len != 0) {
+
+ usbd_get_page(cache, offset, &buf_res);
+
+ if (buf_res.length > len) {
+ buf_res.length = len;
+ }
+ error = copyin(ptr, buf_res.buffer, buf_res.length);
+ if (error)
+ return (error);
+
+ offset += buf_res.length;
+ len -= buf_res.length;
+ ptr = USB_ADD_BYTES(ptr, buf_res.length);
+ }
+ return (0); /* success */
+}
+#endif
+
+/*------------------------------------------------------------------------*
+ * usbd_m_copy_in - copy a mbuf chain directly into DMA-able memory
+ *------------------------------------------------------------------------*/
+#if USB_HAVE_MBUF
+struct usb_m_copy_in_arg {
+ struct usb_page_cache *cache;
+ usb_frlength_t dst_offset;
+};
+
+static int
+usbd_m_copy_in_cb(void *arg, void *src, uint32_t count)
+{
+ register struct usb_m_copy_in_arg *ua = arg;
+
+ usbd_copy_in(ua->cache, ua->dst_offset, src, count);
+ ua->dst_offset += count;
+ return (0);
+}
+
+void
+usbd_m_copy_in(struct usb_page_cache *cache, usb_frlength_t dst_offset,
+ struct mbuf *m, usb_size_t src_offset, usb_frlength_t src_len)
+{
+ struct usb_m_copy_in_arg arg = {cache, dst_offset};
+ int error;
+
+ error = m_apply(m, src_offset, src_len, &usbd_m_copy_in_cb, &arg);
+}
+#endif
+
+/*------------------------------------------------------------------------*
+ * usb_uiomove - factored out code
+ *------------------------------------------------------------------------*/
+#if USB_HAVE_USER_IO
+int
+usb_uiomove(struct usb_page_cache *pc, struct uio *uio,
+ usb_frlength_t pc_offset, usb_frlength_t len)
+{
+ struct usb_page_search res;
+ int error = 0;
+
+ while (len != 0) {
+
+ usbd_get_page(pc, pc_offset, &res);
+
+ if (res.length > len) {
+ res.length = len;
+ }
+ /*
+ * "uiomove()" can sleep so one needs to make a wrapper,
+ * exiting the mutex and checking things
+ */
+ error = uiomove(res.buffer, res.length, uio);
+
+ if (error) {
+ break;
+ }
+ pc_offset += res.length;
+ len -= res.length;
+ }
+ return (error);
+}
+#endif
+
+/*------------------------------------------------------------------------*
+ * usbd_copy_out - copy directly from DMA-able memory
+ *------------------------------------------------------------------------*/
+void
+usbd_copy_out(struct usb_page_cache *cache, usb_frlength_t offset,
+ void *ptr, usb_frlength_t len)
+{
+ struct usb_page_search res;
+
+ while (len != 0) {
+
+ usbd_get_page(cache, offset, &res);
+
+ if (res.length > len) {
+ res.length = len;
+ }
+ bcopy(res.buffer, ptr, res.length);
+
+ offset += res.length;
+ len -= res.length;
+ ptr = USB_ADD_BYTES(ptr, res.length);
+ }
+}
+
+/*------------------------------------------------------------------------*
+ * usbd_copy_out_user - copy directly from DMA-able memory to userland
+ *
+ * Return values:
+ * 0: Success
+ * Else: Failure
+ *------------------------------------------------------------------------*/
+#if USB_HAVE_USER_IO
+int
+usbd_copy_out_user(struct usb_page_cache *cache, usb_frlength_t offset,
+ void *ptr, usb_frlength_t len)
+{
+ struct usb_page_search res;
+ int error;
+
+ while (len != 0) {
+
+ usbd_get_page(cache, offset, &res);
+
+ if (res.length > len) {
+ res.length = len;
+ }
+ error = copyout(res.buffer, ptr, res.length);
+ if (error)
+ return (error);
+
+ offset += res.length;
+ len -= res.length;
+ ptr = USB_ADD_BYTES(ptr, res.length);
+ }
+ return (0); /* success */
+}
+#endif
+
+/*------------------------------------------------------------------------*
+ * usbd_frame_zero - zero DMA-able memory
+ *------------------------------------------------------------------------*/
+void
+usbd_frame_zero(struct usb_page_cache *cache, usb_frlength_t offset,
+ usb_frlength_t len)
+{
+ struct usb_page_search res;
+
+ while (len != 0) {
+
+ usbd_get_page(cache, offset, &res);
+
+ if (res.length > len) {
+ res.length = len;
+ }
+ bzero(res.buffer, res.length);
+
+ offset += res.length;
+ len -= res.length;
+ }
+}
+
+#if USB_HAVE_BUSDMA
+
+/*------------------------------------------------------------------------*
+ * usb_dma_lock_cb - dummy callback
+ *------------------------------------------------------------------------*/
+static void
+usb_dma_lock_cb(void *arg, bus_dma_lock_op_t op)
+{
+ /* we use "mtx_owned()" instead of this function */
+}
+
+/*------------------------------------------------------------------------*
+ * usb_dma_tag_create - allocate a DMA tag
+ *
+ * NOTE: If the "align" parameter has a value of 1 the DMA-tag will
+ * allow multi-segment mappings. Else all mappings are single-segment.
+ *------------------------------------------------------------------------*/
+static void
+usb_dma_tag_create(struct usb_dma_tag *udt,
+ usb_size_t size, usb_size_t align)
+{
+ bus_dma_tag_t tag;
+
+ if (bus_dma_tag_create
+ ( /* parent */ udt->tag_parent->tag,
+ /* alignment */ align,
+ /* boundary */ (align == 1) ?
+ USB_PAGE_SIZE : 0,
+ /* lowaddr */ (2ULL << (udt->tag_parent->dma_bits - 1)) - 1,
+ /* highaddr */ BUS_SPACE_MAXADDR,
+ /* filter */ NULL,
+ /* filterarg */ NULL,
+ /* maxsize */ size,
+ /* nsegments */ (align == 1 && size > 1) ?
+ (2 + (size / USB_PAGE_SIZE)) : 1,
+ /* maxsegsz */ (align == 1 && size > USB_PAGE_SIZE) ?
+ USB_PAGE_SIZE : size,
+ /* flags */ BUS_DMA_KEEP_PG_OFFSET,
+ /* lockfn */ &usb_dma_lock_cb,
+ /* lockarg */ NULL,
+ &tag)) {
+ tag = NULL;
+ }
+ udt->tag = tag;
+}
+
+/*------------------------------------------------------------------------*
+ * usb_dma_tag_free - free a DMA tag
+ *------------------------------------------------------------------------*/
+static void
+usb_dma_tag_destroy(struct usb_dma_tag *udt)
+{
+ bus_dma_tag_destroy(udt->tag);
+}
+
+/*------------------------------------------------------------------------*
+ * usb_pc_alloc_mem_cb - BUS-DMA callback function
+ *------------------------------------------------------------------------*/
+static void
+usb_pc_alloc_mem_cb(void *arg, bus_dma_segment_t *segs,
+ int nseg, int error)
+{
+ usb_pc_common_mem_cb(arg, segs, nseg, error, 0);
+}
+
+/*------------------------------------------------------------------------*
+ * usb_pc_load_mem_cb - BUS-DMA callback function
+ *------------------------------------------------------------------------*/
+static void
+usb_pc_load_mem_cb(void *arg, bus_dma_segment_t *segs,
+ int nseg, int error)
+{
+ usb_pc_common_mem_cb(arg, segs, nseg, error, 1);
+}
+
+/*------------------------------------------------------------------------*
+ * usb_pc_common_mem_cb - BUS-DMA callback function
+ *------------------------------------------------------------------------*/
+static void
+usb_pc_common_mem_cb(void *arg, bus_dma_segment_t *segs,
+ int nseg, int error, uint8_t isload)
+{
+ struct usb_dma_parent_tag *uptag;
+ struct usb_page_cache *pc;
+ struct usb_page *pg;
+ usb_size_t rem;
+ uint8_t owned;
+
+ pc = arg;
+ uptag = pc->tag_parent;
+
+ /*
+ * XXX There is sometimes recursive locking here.
+ * XXX We should try to find a better solution.
+ * XXX Until further the "owned" variable does
+ * XXX the trick.
+ */
+
+ if (error) {
+ goto done;
+ }
+ pg = pc->page_start;
+ pg->physaddr = segs->ds_addr & ~(USB_PAGE_SIZE - 1);
+ rem = segs->ds_addr & (USB_PAGE_SIZE - 1);
+ pc->page_offset_buf = rem;
+ pc->page_offset_end += rem;
+ nseg--;
+#ifdef USB_DEBUG
+ if (rem != (USB_P2U(pc->buffer) & (USB_PAGE_SIZE - 1))) {
+ /*
+ * This check verifies that the physical address is correct:
+ */
+ DPRINTFN(0, "Page offset was not preserved\n");
+ error = 1;
+ goto done;
+ }
+#endif
+ while (nseg > 0) {
+ nseg--;
+ segs++;
+ pg++;
+ pg->physaddr = segs->ds_addr & ~(USB_PAGE_SIZE - 1);
+ }
+
+done:
+ owned = mtx_owned(uptag->mtx);
+ if (!owned)
+ mtx_lock(uptag->mtx);
+
+ uptag->dma_error = (error ? 1 : 0);
+ if (isload) {
+ (uptag->func) (uptag);
+ } else {
+ cv_broadcast(uptag->cv);
+ }
+ if (!owned)
+ mtx_unlock(uptag->mtx);
+}
+
+/*------------------------------------------------------------------------*
+ * usb_pc_alloc_mem - allocate DMA'able memory
+ *
+ * Returns:
+ * 0: Success
+ * Else: Failure
+ *------------------------------------------------------------------------*/
+uint8_t
+usb_pc_alloc_mem(struct usb_page_cache *pc, struct usb_page *pg,
+ usb_size_t size, usb_size_t align)
+{
+ struct usb_dma_parent_tag *uptag;
+ struct usb_dma_tag *utag;
+ bus_dmamap_t map;
+ void *ptr;
+ int err;
+
+ uptag = pc->tag_parent;
+
+ if (align != 1) {
+ /*
+ * The alignment must be greater or equal to the
+ * "size" else the object can be split between two
+ * memory pages and we get a problem!
+ */
+ while (align < size) {
+ align *= 2;
+ if (align == 0) {
+ goto error;
+ }
+ }
+#if 1
+ /*
+ * XXX BUS-DMA workaround - FIXME later:
+ *
+ * We assume that that the aligment at this point of
+ * the code is greater than or equal to the size and
+ * less than two times the size, so that if we double
+ * the size, the size will be greater than the
+ * alignment.
+ *
+ * The bus-dma system has a check for "alignment"
+ * being less than "size". If that check fails we end
+ * up using contigmalloc which is page based even for
+ * small allocations. Try to avoid that to save
+ * memory, hence we sometimes to a large number of
+ * small allocations!
+ */
+ if (size <= (USB_PAGE_SIZE / 2)) {
+ size *= 2;
+ }
+#endif
+ }
+ /* get the correct DMA tag */
+ utag = usb_dma_tag_find(uptag, size, align);
+ if (utag == NULL) {
+ goto error;
+ }
+ /* allocate memory */
+ if (bus_dmamem_alloc(
+ utag->tag, &ptr, (BUS_DMA_WAITOK | BUS_DMA_COHERENT), &map)) {
+ goto error;
+ }
+ /* setup page cache */
+ pc->buffer = ptr;
+ pc->page_start = pg;
+ pc->page_offset_buf = 0;
+ pc->page_offset_end = size;
+ pc->map = map;
+ pc->tag = utag->tag;
+ pc->ismultiseg = (align == 1);
+
+ mtx_lock(uptag->mtx);
+
+ /* load memory into DMA */
+ err = bus_dmamap_load(
+ utag->tag, map, ptr, size, &usb_pc_alloc_mem_cb,
+ pc, (BUS_DMA_WAITOK | BUS_DMA_COHERENT));
+
+ if (err == EINPROGRESS) {
+ cv_wait(uptag->cv, uptag->mtx);
+ err = 0;
+ }
+ mtx_unlock(uptag->mtx);
+
+ if (err || uptag->dma_error) {
+ bus_dmamem_free(utag->tag, ptr, map);
+ goto error;
+ }
+ bzero(ptr, size);
+
+ usb_pc_cpu_flush(pc);
+
+ return (0);
+
+error:
+ /* reset most of the page cache */
+ pc->buffer = NULL;
+ pc->page_start = NULL;
+ pc->page_offset_buf = 0;
+ pc->page_offset_end = 0;
+ pc->map = NULL;
+ pc->tag = NULL;
+ return (1);
+}
+
+/*------------------------------------------------------------------------*
+ * usb_pc_free_mem - free DMA memory
+ *
+ * This function is NULL safe.
+ *------------------------------------------------------------------------*/
+void
+usb_pc_free_mem(struct usb_page_cache *pc)
+{
+ if (pc && pc->buffer) {
+
+ bus_dmamap_unload(pc->tag, pc->map);
+
+ bus_dmamem_free(pc->tag, pc->buffer, pc->map);
+
+ pc->buffer = NULL;
+ }
+}
+
+/*------------------------------------------------------------------------*
+ * usb_pc_load_mem - load virtual memory into DMA
+ *
+ * Return values:
+ * 0: Success
+ * Else: Error
+ *------------------------------------------------------------------------*/
+uint8_t
+usb_pc_load_mem(struct usb_page_cache *pc, usb_size_t size, uint8_t sync)
+{
+ /* setup page cache */
+ pc->page_offset_buf = 0;
+ pc->page_offset_end = size;
+ pc->ismultiseg = 1;
+
+ mtx_assert(pc->tag_parent->mtx, MA_OWNED);
+
+ if (size > 0) {
+ if (sync) {
+ struct usb_dma_parent_tag *uptag;
+ int err;
+
+ uptag = pc->tag_parent;
+
+ /*
+ * We have to unload the previous loaded DMA
+ * pages before trying to load a new one!
+ */
+ bus_dmamap_unload(pc->tag, pc->map);
+
+ /*
+ * Try to load memory into DMA.
+ */
+ err = bus_dmamap_load(
+ pc->tag, pc->map, pc->buffer, size,
+ &usb_pc_alloc_mem_cb, pc, BUS_DMA_WAITOK);
+ if (err == EINPROGRESS) {
+ cv_wait(uptag->cv, uptag->mtx);
+ err = 0;
+ }
+ if (err || uptag->dma_error) {
+ return (1);
+ }
+ } else {
+
+ /*
+ * We have to unload the previous loaded DMA
+ * pages before trying to load a new one!
+ */
+ bus_dmamap_unload(pc->tag, pc->map);
+
+ /*
+ * Try to load memory into DMA. The callback
+ * will be called in all cases:
+ */
+ if (bus_dmamap_load(
+ pc->tag, pc->map, pc->buffer, size,
+ &usb_pc_load_mem_cb, pc, BUS_DMA_WAITOK)) {
+ }
+ }
+ } else {
+ if (!sync) {
+ /*
+ * Call callback so that refcount is decremented
+ * properly:
+ */
+ pc->tag_parent->dma_error = 0;
+ (pc->tag_parent->func) (pc->tag_parent);
+ }
+ }
+ return (0);
+}
+
+/*------------------------------------------------------------------------*
+ * usb_pc_cpu_invalidate - invalidate CPU cache
+ *------------------------------------------------------------------------*/
+void
+usb_pc_cpu_invalidate(struct usb_page_cache *pc)
+{
+ if (pc->page_offset_end == pc->page_offset_buf) {
+ /* nothing has been loaded into this page cache! */
+ return;
+ }
+
+ /*
+ * TODO: We currently do XXX_POSTREAD and XXX_PREREAD at the
+ * same time, but in the future we should try to isolate the
+ * different cases to optimise the code. --HPS
+ */
+ bus_dmamap_sync(pc->tag, pc->map, BUS_DMASYNC_POSTREAD);
+ bus_dmamap_sync(pc->tag, pc->map, BUS_DMASYNC_PREREAD);
+}
+
+/*------------------------------------------------------------------------*
+ * usb_pc_cpu_flush - flush CPU cache
+ *------------------------------------------------------------------------*/
+void
+usb_pc_cpu_flush(struct usb_page_cache *pc)
+{
+ if (pc->page_offset_end == pc->page_offset_buf) {
+ /* nothing has been loaded into this page cache! */
+ return;
+ }
+ bus_dmamap_sync(pc->tag, pc->map, BUS_DMASYNC_PREWRITE);
+}
+
+/*------------------------------------------------------------------------*
+ * usb_pc_dmamap_create - create a DMA map
+ *
+ * Returns:
+ * 0: Success
+ * Else: Failure
+ *------------------------------------------------------------------------*/
+uint8_t
+usb_pc_dmamap_create(struct usb_page_cache *pc, usb_size_t size)
+{
+ struct usb_xfer_root *info;
+ struct usb_dma_tag *utag;
+
+ /* get info */
+ info = USB_DMATAG_TO_XROOT(pc->tag_parent);
+
+ /* sanity check */
+ if (info == NULL) {
+ goto error;
+ }
+ utag = usb_dma_tag_find(pc->tag_parent, size, 1);
+ if (utag == NULL) {
+ goto error;
+ }
+ /* create DMA map */
+ if (bus_dmamap_create(utag->tag, 0, &pc->map)) {
+ goto error;
+ }
+ pc->tag = utag->tag;
+ return 0; /* success */
+
+error:
+ pc->map = NULL;
+ pc->tag = NULL;
+ return 1; /* failure */
+}
+
+/*------------------------------------------------------------------------*
+ * usb_pc_dmamap_destroy
+ *
+ * This function is NULL safe.
+ *------------------------------------------------------------------------*/
+void
+usb_pc_dmamap_destroy(struct usb_page_cache *pc)
+{
+ if (pc && pc->tag) {
+ bus_dmamap_destroy(pc->tag, pc->map);
+ pc->tag = NULL;
+ pc->map = NULL;
+ }
+}
+
+/*------------------------------------------------------------------------*
+ * usb_dma_tag_find - factored out code
+ *------------------------------------------------------------------------*/
+struct usb_dma_tag *
+usb_dma_tag_find(struct usb_dma_parent_tag *udpt,
+ usb_size_t size, usb_size_t align)
+{
+ struct usb_dma_tag *udt;
+ uint8_t nudt;
+
+ USB_ASSERT(align > 0, ("Invalid parameter align = 0\n"));
+ USB_ASSERT(size > 0, ("Invalid parameter size = 0\n"));
+
+ udt = udpt->utag_first;
+ nudt = udpt->utag_max;
+
+ while (nudt--) {
+
+ if (udt->align == 0) {
+ usb_dma_tag_create(udt, size, align);
+ if (udt->tag == NULL) {
+ return (NULL);
+ }
+ udt->align = align;
+ udt->size = size;
+ return (udt);
+ }
+ if ((udt->align == align) && (udt->size == size)) {
+ return (udt);
+ }
+ udt++;
+ }
+ return (NULL);
+}
+
+/*------------------------------------------------------------------------*
+ * usb_dma_tag_setup - initialise USB DMA tags
+ *------------------------------------------------------------------------*/
+void
+usb_dma_tag_setup(struct usb_dma_parent_tag *udpt,
+ struct usb_dma_tag *udt, bus_dma_tag_t dmat,
+ struct mtx *mtx, usb_dma_callback_t *func,
+ uint8_t ndmabits, uint8_t nudt)
+{
+ bzero(udpt, sizeof(*udpt));
+
+ /* sanity checking */
+ if ((nudt == 0) ||
+ (ndmabits == 0) ||
+ (mtx == NULL)) {
+ /* something is corrupt */
+ return;
+ }
+ /* initialise condition variable */
+ cv_init(udpt->cv, "USB DMA CV");
+
+ /* store some information */
+ udpt->mtx = mtx;
+ udpt->func = func;
+ udpt->tag = dmat;
+ udpt->utag_first = udt;
+ udpt->utag_max = nudt;
+ udpt->dma_bits = ndmabits;
+
+ while (nudt--) {
+ bzero(udt, sizeof(*udt));
+ udt->tag_parent = udpt;
+ udt++;
+ }
+}
+
+/*------------------------------------------------------------------------*
+ * usb_bus_tag_unsetup - factored out code
+ *------------------------------------------------------------------------*/
+void
+usb_dma_tag_unsetup(struct usb_dma_parent_tag *udpt)
+{
+ struct usb_dma_tag *udt;
+ uint8_t nudt;
+
+ udt = udpt->utag_first;
+ nudt = udpt->utag_max;
+
+ while (nudt--) {
+
+ if (udt->align) {
+ /* destroy the USB DMA tag */
+ usb_dma_tag_destroy(udt);
+ udt->align = 0;
+ }
+ udt++;
+ }
+
+ if (udpt->utag_max) {
+ /* destroy the condition variable */
+ cv_destroy(udpt->cv);
+ }
+}
+
+/*------------------------------------------------------------------------*
+ * usb_bdma_work_loop
+ *
+ * This function handles loading of virtual buffers into DMA and is
+ * only called when "dma_refcount" is zero.
+ *------------------------------------------------------------------------*/
+void
+usb_bdma_work_loop(struct usb_xfer_queue *pq)
+{
+ struct usb_xfer_root *info;
+ struct usb_xfer *xfer;
+ usb_frcount_t nframes;
+
+ xfer = pq->curr;
+ info = xfer->xroot;
+
+ mtx_assert(info->xfer_mtx, MA_OWNED);
+
+ if (xfer->error) {
+ /* some error happened */
+ USB_BUS_LOCK(info->bus);
+ usbd_transfer_done(xfer, 0);
+ USB_BUS_UNLOCK(info->bus);
+ return;
+ }
+ if (!xfer->flags_int.bdma_setup) {
+ struct usb_page *pg;
+ usb_frlength_t frlength_0;
+ uint8_t isread;
+
+ xfer->flags_int.bdma_setup = 1;
+
+ /* reset BUS-DMA load state */
+
+ info->dma_error = 0;
+
+ if (xfer->flags_int.isochronous_xfr) {
+ /* only one frame buffer */
+ nframes = 1;
+ frlength_0 = xfer->sumlen;
+ } else {
+ /* can be multiple frame buffers */
+ nframes = xfer->nframes;
+ frlength_0 = xfer->frlengths[0];
+ }
+
+ /*
+ * Set DMA direction first. This is needed to
+ * select the correct cache invalidate and cache
+ * flush operations.
+ */
+ isread = USB_GET_DATA_ISREAD(xfer);
+ pg = xfer->dma_page_ptr;
+
+ if (xfer->flags_int.control_xfr &&
+ xfer->flags_int.control_hdr) {
+ /* special case */
+ if (xfer->flags_int.usb_mode == USB_MODE_DEVICE) {
+ /* The device controller writes to memory */
+ xfer->frbuffers[0].isread = 1;
+ } else {
+ /* The host controller reads from memory */
+ xfer->frbuffers[0].isread = 0;
+ }
+ } else {
+ /* default case */
+ xfer->frbuffers[0].isread = isread;
+ }
+
+ /*
+ * Setup the "page_start" pointer which points to an array of
+ * USB pages where information about the physical address of a
+ * page will be stored. Also initialise the "isread" field of
+ * the USB page caches.
+ */
+ xfer->frbuffers[0].page_start = pg;
+
+ info->dma_nframes = nframes;
+ info->dma_currframe = 0;
+ info->dma_frlength_0 = frlength_0;
+
+ pg += (frlength_0 / USB_PAGE_SIZE);
+ pg += 2;
+
+ while (--nframes > 0) {
+ xfer->frbuffers[nframes].isread = isread;
+ xfer->frbuffers[nframes].page_start = pg;
+
+ pg += (xfer->frlengths[nframes] / USB_PAGE_SIZE);
+ pg += 2;
+ }
+
+ }
+ if (info->dma_error) {
+ USB_BUS_LOCK(info->bus);
+ usbd_transfer_done(xfer, USB_ERR_DMA_LOAD_FAILED);
+ USB_BUS_UNLOCK(info->bus);
+ return;
+ }
+ if (info->dma_currframe != info->dma_nframes) {
+
+ if (info->dma_currframe == 0) {
+ /* special case */
+ usb_pc_load_mem(xfer->frbuffers,
+ info->dma_frlength_0, 0);
+ } else {
+ /* default case */
+ nframes = info->dma_currframe;
+ usb_pc_load_mem(xfer->frbuffers + nframes,
+ xfer->frlengths[nframes], 0);
+ }
+
+ /* advance frame index */
+ info->dma_currframe++;
+
+ return;
+ }
+ /* go ahead */
+ usb_bdma_pre_sync(xfer);
+
+ /* start loading next USB transfer, if any */
+ usb_command_wrapper(pq, NULL);
+
+ /* finally start the hardware */
+ usbd_pipe_enter(xfer);
+}
+
+/*------------------------------------------------------------------------*
+ * usb_bdma_done_event
+ *
+ * This function is called when the BUS-DMA has loaded virtual memory
+ * into DMA, if any.
+ *------------------------------------------------------------------------*/
+void
+usb_bdma_done_event(struct usb_dma_parent_tag *udpt)
+{
+ struct usb_xfer_root *info;
+
+ info = USB_DMATAG_TO_XROOT(udpt);
+
+ mtx_assert(info->xfer_mtx, MA_OWNED);
+
+ /* copy error */
+ info->dma_error = udpt->dma_error;
+
+ /* enter workloop again */
+ usb_command_wrapper(&info->dma_q,
+ info->dma_q.curr);
+}
+
+/*------------------------------------------------------------------------*
+ * usb_bdma_pre_sync
+ *
+ * This function handles DMA synchronisation that must be done before
+ * an USB transfer is started.
+ *------------------------------------------------------------------------*/
+void
+usb_bdma_pre_sync(struct usb_xfer *xfer)
+{
+ struct usb_page_cache *pc;
+ usb_frcount_t nframes;
+
+ if (xfer->flags_int.isochronous_xfr) {
+ /* only one frame buffer */
+ nframes = 1;
+ } else {
+ /* can be multiple frame buffers */
+ nframes = xfer->nframes;
+ }
+
+ pc = xfer->frbuffers;
+
+ while (nframes--) {
+
+ if (pc->isread) {
+ usb_pc_cpu_invalidate(pc);
+ } else {
+ usb_pc_cpu_flush(pc);
+ }
+ pc++;
+ }
+}
+
+/*------------------------------------------------------------------------*
+ * usb_bdma_post_sync
+ *
+ * This function handles DMA synchronisation that must be done after
+ * an USB transfer is complete.
+ *------------------------------------------------------------------------*/
+void
+usb_bdma_post_sync(struct usb_xfer *xfer)
+{
+ struct usb_page_cache *pc;
+ usb_frcount_t nframes;
+
+ if (xfer->flags_int.isochronous_xfr) {
+ /* only one frame buffer */
+ nframes = 1;
+ } else {
+ /* can be multiple frame buffers */
+ nframes = xfer->nframes;
+ }
+
+ pc = xfer->frbuffers;
+
+ while (nframes--) {
+ if (pc->isread) {
+ usb_pc_cpu_invalidate(pc);
+ }
+ pc++;
+ }
+}
+
+#endif
diff --git a/rtems/freebsd/dev/usb/usb_busdma.h b/rtems/freebsd/dev/usb/usb_busdma.h
new file mode 100644
index 00000000..53921d45
--- /dev/null
+++ b/rtems/freebsd/dev/usb/usb_busdma.h
@@ -0,0 +1,161 @@
+/* $FreeBSD$ */
+/*-
+ * Copyright (c) 2008 Hans Petter Selasky. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifndef _USB_BUSDMA_HH_
+#define _USB_BUSDMA_HH_
+
+#include <rtems/freebsd/sys/uio.h>
+#include <rtems/freebsd/sys/mbuf.h>
+
+#include <rtems/freebsd/machine/bus.h>
+
+/* defines */
+
+#define USB_PAGE_SIZE PAGE_SIZE /* use system PAGE_SIZE */
+
+#if (__FreeBSD_version >= 700020)
+#define USB_GET_DMA_TAG(dev) bus_get_dma_tag(dev)
+#else
+#define USB_GET_DMA_TAG(dev) NULL /* XXX */
+#endif
+
+/* structure prototypes */
+
+struct usb_xfer_root;
+struct usb_dma_parent_tag;
+struct usb_dma_tag;
+
+/*
+ * The following typedef defines the USB DMA load done callback.
+ */
+
+typedef void (usb_dma_callback_t)(struct usb_dma_parent_tag *udpt);
+
+/*
+ * The following structure defines physical and non kernel virtual
+ * address of a memory page having size USB_PAGE_SIZE.
+ */
+struct usb_page {
+#if USB_HAVE_BUSDMA
+ bus_size_t physaddr;
+ void *buffer; /* non Kernel Virtual Address */
+#endif
+};
+
+/*
+ * The following structure is used when needing the kernel virtual
+ * pointer and the physical address belonging to an offset in an USB
+ * page cache.
+ */
+struct usb_page_search {
+ void *buffer;
+#if USB_HAVE_BUSDMA
+ bus_size_t physaddr;
+#endif
+ usb_size_t length;
+};
+
+/*
+ * The following structure is used to keep information about a DMA
+ * memory allocation.
+ */
+struct usb_page_cache {
+
+#if USB_HAVE_BUSDMA
+ bus_dma_tag_t tag;
+ bus_dmamap_t map;
+ struct usb_page *page_start;
+#endif
+ struct usb_dma_parent_tag *tag_parent; /* always set */
+ void *buffer; /* virtual buffer pointer */
+#if USB_HAVE_BUSDMA
+ usb_size_t page_offset_buf;
+ usb_size_t page_offset_end;
+ uint8_t isread:1; /* set if we are currently reading
+ * from the memory. Else write. */
+ uint8_t ismultiseg:1; /* set if we can have multiple
+ * segments */
+#endif
+};
+
+/*
+ * The following structure describes the parent USB DMA tag.
+ */
+#if USB_HAVE_BUSDMA
+struct usb_dma_parent_tag {
+ struct cv cv[1]; /* internal condition variable */
+ bus_dma_tag_t tag; /* always set */
+
+ struct mtx *mtx; /* private mutex, always set */
+ usb_dma_callback_t *func; /* load complete callback function */
+ struct usb_dma_tag *utag_first;/* pointer to first USB DMA tag */
+ uint8_t dma_error; /* set if DMA load operation failed */
+ uint8_t dma_bits; /* number of DMA address lines */
+ uint8_t utag_max; /* number of USB DMA tags */
+};
+#else
+struct usb_dma_parent_tag {}; /* empty struct */
+#endif
+
+/*
+ * The following structure describes an USB DMA tag.
+ */
+#if USB_HAVE_BUSDMA
+struct usb_dma_tag {
+ struct usb_dma_parent_tag *tag_parent;
+ bus_dma_tag_t tag;
+ usb_size_t align;
+ usb_size_t size;
+};
+#else
+struct usb_dma_tag {}; /* empty struct */
+#endif
+
+/* function prototypes */
+
+int usb_uiomove(struct usb_page_cache *pc, struct uio *uio,
+ usb_frlength_t pc_offset, usb_frlength_t len);
+struct usb_dma_tag *usb_dma_tag_find(struct usb_dma_parent_tag *udpt,
+ usb_size_t size, usb_size_t align);
+uint8_t usb_pc_alloc_mem(struct usb_page_cache *pc, struct usb_page *pg,
+ usb_size_t size, usb_size_t align);
+uint8_t usb_pc_dmamap_create(struct usb_page_cache *pc, usb_size_t size);
+uint8_t usb_pc_load_mem(struct usb_page_cache *pc, usb_size_t size,
+ uint8_t sync);
+void usb_bdma_done_event(struct usb_dma_parent_tag *udpt);
+void usb_bdma_post_sync(struct usb_xfer *xfer);
+void usb_bdma_pre_sync(struct usb_xfer *xfer);
+void usb_bdma_work_loop(struct usb_xfer_queue *pq);
+void usb_dma_tag_setup(struct usb_dma_parent_tag *udpt,
+ struct usb_dma_tag *udt, bus_dma_tag_t dmat, struct mtx *mtx,
+ usb_dma_callback_t *func, uint8_t ndmabits, uint8_t nudt);
+void usb_dma_tag_unsetup(struct usb_dma_parent_tag *udpt);
+void usb_pc_cpu_flush(struct usb_page_cache *pc);
+void usb_pc_cpu_invalidate(struct usb_page_cache *pc);
+void usb_pc_dmamap_destroy(struct usb_page_cache *pc);
+void usb_pc_free_mem(struct usb_page_cache *pc);
+
+#endif /* _USB_BUSDMA_HH_ */
diff --git a/rtems/freebsd/dev/usb/usb_cdc.h b/rtems/freebsd/dev/usb/usb_cdc.h
new file mode 100644
index 00000000..632cfe9c
--- /dev/null
+++ b/rtems/freebsd/dev/usb/usb_cdc.h
@@ -0,0 +1,295 @@
+/* $NetBSD: usbcdc.h,v 1.9 2004/10/23 13:24:24 augustss Exp $ */
+/* $FreeBSD$ */
+
+/*-
+ * Copyright (c) 1998 The NetBSD Foundation, Inc.
+ * All rights reserved.
+ *
+ * This code is derived from software contributed to The NetBSD Foundation
+ * by Lennart Augustsson (lennart@augustsson.net) at
+ * Carlstedt Research & Technology.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the NetBSD
+ * Foundation, Inc. and its contributors.
+ * 4. Neither the name of The NetBSD Foundation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
+ * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _USB_CDC_HH_
+#define _USB_CDC_HH_
+
+#define UDESCSUB_CDC_HEADER 0
+#define UDESCSUB_CDC_CM 1 /* Call Management */
+#define UDESCSUB_CDC_ACM 2 /* Abstract Control Model */
+#define UDESCSUB_CDC_DLM 3 /* Direct Line Management */
+#define UDESCSUB_CDC_TRF 4 /* Telephone Ringer */
+#define UDESCSUB_CDC_TCLSR 5 /* Telephone Call */
+#define UDESCSUB_CDC_UNION 6
+#define UDESCSUB_CDC_CS 7 /* Country Selection */
+#define UDESCSUB_CDC_TOM 8 /* Telephone Operational Modes */
+#define UDESCSUB_CDC_USBT 9 /* USB Terminal */
+#define UDESCSUB_CDC_NCT 10
+#define UDESCSUB_CDC_PUF 11
+#define UDESCSUB_CDC_EUF 12
+#define UDESCSUB_CDC_MCMF 13
+#define UDESCSUB_CDC_CCMF 14
+#define UDESCSUB_CDC_ENF 15
+#define UDESCSUB_CDC_ANF 16
+
+struct usb_cdc_header_descriptor {
+ uByte bLength;
+ uByte bDescriptorType;
+ uByte bDescriptorSubtype;
+ uWord bcdCDC;
+} __packed;
+
+struct usb_cdc_cm_descriptor {
+ uByte bLength;
+ uByte bDescriptorType;
+ uByte bDescriptorSubtype;
+ uByte bmCapabilities;
+#define USB_CDC_CM_DOES_CM 0x01
+#define USB_CDC_CM_OVER_DATA 0x02
+ uByte bDataInterface;
+} __packed;
+
+struct usb_cdc_acm_descriptor {
+ uByte bLength;
+ uByte bDescriptorType;
+ uByte bDescriptorSubtype;
+ uByte bmCapabilities;
+#define USB_CDC_ACM_HAS_FEATURE 0x01
+#define USB_CDC_ACM_HAS_LINE 0x02
+#define USB_CDC_ACM_HAS_BREAK 0x04
+#define USB_CDC_ACM_HAS_NETWORK_CONN 0x08
+} __packed;
+
+struct usb_cdc_union_descriptor {
+ uByte bLength;
+ uByte bDescriptorType;
+ uByte bDescriptorSubtype;
+ uByte bMasterInterface;
+ uByte bSlaveInterface[1];
+} __packed;
+
+struct usb_cdc_ethernet_descriptor {
+ uByte bLength;
+ uByte bDescriptorType;
+ uByte bDescriptorSubtype;
+ uByte iMacAddress;
+ uDWord bmEthernetStatistics;
+ uWord wMaxSegmentSize;
+ uWord wNumberMCFilters;
+ uByte bNumberPowerFilters;
+} __packed;
+
+#define UCDC_SEND_ENCAPSULATED_COMMAND 0x00
+#define UCDC_GET_ENCAPSULATED_RESPONSE 0x01
+#define UCDC_SET_COMM_FEATURE 0x02
+#define UCDC_GET_COMM_FEATURE 0x03
+#define UCDC_ABSTRACT_STATE 0x01
+#define UCDC_COUNTRY_SETTING 0x02
+#define UCDC_CLEAR_COMM_FEATURE 0x04
+#define UCDC_SET_LINE_CODING 0x20
+#define UCDC_GET_LINE_CODING 0x21
+#define UCDC_SET_CONTROL_LINE_STATE 0x22
+#define UCDC_LINE_DTR 0x0001
+#define UCDC_LINE_RTS 0x0002
+#define UCDC_SEND_BREAK 0x23
+#define UCDC_BREAK_ON 0xffff
+#define UCDC_BREAK_OFF 0x0000
+
+struct usb_cdc_abstract_state {
+ uWord wState;
+#define UCDC_IDLE_SETTING 0x0001
+#define UCDC_DATA_MULTIPLEXED 0x0002
+} __packed;
+
+#define UCDC_ABSTRACT_STATE_LENGTH 2
+
+struct usb_cdc_line_state {
+ uDWord dwDTERate;
+ uByte bCharFormat;
+#define UCDC_STOP_BIT_1 0
+#define UCDC_STOP_BIT_1_5 1
+#define UCDC_STOP_BIT_2 2
+ uByte bParityType;
+#define UCDC_PARITY_NONE 0
+#define UCDC_PARITY_ODD 1
+#define UCDC_PARITY_EVEN 2
+#define UCDC_PARITY_MARK 3
+#define UCDC_PARITY_SPACE 4
+ uByte bDataBits;
+} __packed;
+
+#define UCDC_LINE_STATE_LENGTH 7
+
+struct usb_cdc_notification {
+ uByte bmRequestType;
+#define UCDC_NOTIFICATION 0xa1
+ uByte bNotification;
+#define UCDC_N_NETWORK_CONNECTION 0x00
+#define UCDC_N_RESPONSE_AVAILABLE 0x01
+#define UCDC_N_AUX_JACK_HOOK_STATE 0x08
+#define UCDC_N_RING_DETECT 0x09
+#define UCDC_N_SERIAL_STATE 0x20
+#define UCDC_N_CALL_STATE_CHANGED 0x28
+#define UCDC_N_LINE_STATE_CHANGED 0x29
+#define UCDC_N_CONNECTION_SPEED_CHANGE 0x2a
+ uWord wValue;
+ uWord wIndex;
+ uWord wLength;
+ uByte data[16];
+} __packed;
+
+#define UCDC_NOTIFICATION_LENGTH 8
+
+/*
+ * Bits set in the SERIAL STATE notifcation (first byte of data)
+ */
+
+#define UCDC_N_SERIAL_OVERRUN 0x40
+#define UCDC_N_SERIAL_PARITY 0x20
+#define UCDC_N_SERIAL_FRAMING 0x10
+#define UCDC_N_SERIAL_RI 0x08
+#define UCDC_N_SERIAL_BREAK 0x04
+#define UCDC_N_SERIAL_DSR 0x02
+#define UCDC_N_SERIAL_DCD 0x01
+
+/* Serial state bit masks */
+#define UCDC_MDM_RXCARRIER 0x01
+#define UCDC_MDM_TXCARRIER 0x02
+#define UCDC_MDM_BREAK 0x04
+#define UCDC_MDM_RING 0x08
+#define UCDC_MDM_FRAMING_ERR 0x10
+#define UCDC_MDM_PARITY_ERR 0x20
+#define UCDC_MDM_OVERRUN_ERR 0x40
+
+/*
+ * Network Control Model, NCM16 + NCM32, protocol definitions
+ */
+struct usb_ncm16_hdr {
+ uDWord dwSignature;
+ uWord wHeaderLength;
+ uWord wSequence;
+ uWord wBlockLength;
+ uWord wDptIndex;
+} __packed;
+
+struct usb_ncm16_dp {
+ uWord wFrameIndex;
+ uWord wFrameLength;
+} __packed;
+
+struct usb_ncm16_dpt {
+ uDWord dwSignature;
+ uWord wLength;
+ uWord wNextNdpIndex;
+ struct usb_ncm16_dp dp[0];
+} __packed;
+
+struct usb_ncm32_hdr {
+ uDWord dwSignature;
+ uWord wHeaderLength;
+ uWord wSequence;
+ uDWord dwBlockLength;
+ uDWord dwDptIndex;
+} __packed;
+
+struct usb_ncm32_dp {
+ uDWord dwFrameIndex;
+ uDWord dwFrameLength;
+} __packed;
+
+struct usb_ncm32_dpt {
+ uDWord dwSignature;
+ uWord wLength;
+ uWord wReserved6;
+ uDWord dwNextNdpIndex;
+ uDWord dwReserved12;
+ struct usb_ncm32_dp dp[0];
+} __packed;
+
+/* Communications interface class specific descriptors */
+
+#define UCDC_NCM_FUNC_DESC_SUBTYPE 0x1A
+
+struct usb_ncm_func_descriptor {
+ uByte bLength;
+ uByte bDescriptorType;
+ uByte bDescriptorSubtype;
+ uByte bcdNcmVersion[2];
+ uByte bmNetworkCapabilities;
+#define UCDC_NCM_CAP_FILTER 0x01
+#define UCDC_NCM_CAP_MAC_ADDR 0x02
+#define UCDC_NCM_CAP_ENCAP 0x04
+#define UCDC_NCM_CAP_MAX_DATA 0x08
+#define UCDC_NCM_CAP_CRCMODE 0x10
+#define UCDC_NCM_CAP_MAX_DGRAM 0x20
+} __packed;
+
+/* Communications interface specific class request codes */
+
+#define UCDC_NCM_SET_ETHERNET_MULTICAST_FILTERS 0x40
+#define UCDC_NCM_SET_ETHERNET_POWER_MGMT_PATTERN_FILTER 0x41
+#define UCDC_NCM_GET_ETHERNET_POWER_MGMT_PATTERN_FILTER 0x42
+#define UCDC_NCM_SET_ETHERNET_PACKET_FILTER 0x43
+#define UCDC_NCM_GET_ETHERNET_STATISTIC 0x44
+#define UCDC_NCM_GET_NTB_PARAMETERS 0x80
+#define UCDC_NCM_GET_NET_ADDRESS 0x81
+#define UCDC_NCM_SET_NET_ADDRESS 0x82
+#define UCDC_NCM_GET_NTB_FORMAT 0x83
+#define UCDC_NCM_SET_NTB_FORMAT 0x84
+#define UCDC_NCM_GET_NTB_INPUT_SIZE 0x85
+#define UCDC_NCM_SET_NTB_INPUT_SIZE 0x86
+#define UCDC_NCM_GET_MAX_DATAGRAM_SIZE 0x87
+#define UCDC_NCM_SET_MAX_DATAGRAM_SIZE 0x88
+#define UCDC_NCM_GET_CRC_MODE 0x89
+#define UCDC_NCM_SET_CRC_MODE 0x8A
+
+struct usb_ncm_parameters {
+ uWord wLength;
+ uWord bmNtbFormatsSupported;
+#define UCDC_NCM_FORMAT_NTB16 0x0001
+#define UCDC_NCM_FORMAT_NTB32 0x0002
+ uDWord dwNtbInMaxSize;
+ uWord wNdpInDivisor;
+ uWord wNdpInPayloadRemainder;
+ uWord wNdpInAlignment;
+ uWord wReserved14;
+ uDWord dwNtbOutMaxSize;
+ uWord wNdpOutDivisor;
+ uWord wNdpOutPayloadRemainder;
+ uWord wNdpOutAlignment;
+ uWord wNtbOutMaxDatagrams;
+} __packed;
+
+/* Communications interface specific class notification codes */
+#define UCDC_NCM_NOTIF_NETWORK_CONNECTION 0x00
+#define UCDC_NCM_NOTIF_RESPONSE_AVAILABLE 0x01
+#define UCDC_NCM_NOTIF_CONNECTION_SPEED_CHANGE 0x2A
+
+#endif /* _USB_CDC_HH_ */
diff --git a/rtems/freebsd/dev/usb/usb_controller.h b/rtems/freebsd/dev/usb/usb_controller.h
new file mode 100644
index 00000000..8f3f3de4
--- /dev/null
+++ b/rtems/freebsd/dev/usb/usb_controller.h
@@ -0,0 +1,225 @@
+/* $FreeBSD$ */
+/*-
+ * Copyright (c) 2008 Hans Petter Selasky. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifndef _USB_CONTROLLER_HH_
+#define _USB_CONTROLLER_HH_
+
+/* defines */
+
+#define USB_BUS_DMA_TAG_MAX 8
+
+/* structure prototypes */
+
+struct usb_bus;
+struct usb_page;
+struct usb_endpoint;
+struct usb_page_cache;
+struct usb_setup_params;
+struct usb_hw_ep_profile;
+struct usb_fs_isoc_schedule;
+struct usb_config_descriptor;
+struct usb_endpoint_descriptor;
+
+/* typedefs */
+
+typedef void (usb_bus_mem_sub_cb_t)(struct usb_bus *bus, struct usb_page_cache *pc, struct usb_page *pg, usb_size_t size, usb_size_t align);
+typedef void (usb_bus_mem_cb_t)(struct usb_bus *bus, usb_bus_mem_sub_cb_t *scb);
+
+/*
+ * The following structure is used to define all the USB BUS
+ * callbacks.
+ */
+struct usb_bus_methods {
+
+ /* USB Device and Host mode - Mandatory */
+
+ usb_handle_req_t *roothub_exec;
+
+ void (*endpoint_init) (struct usb_device *,
+ struct usb_endpoint_descriptor *, struct usb_endpoint *);
+ void (*xfer_setup) (struct usb_setup_params *);
+ void (*xfer_unsetup) (struct usb_xfer *);
+ void (*get_dma_delay) (struct usb_device *, uint32_t *);
+ void (*device_suspend) (struct usb_device *);
+ void (*device_resume) (struct usb_device *);
+ void (*set_hw_power) (struct usb_bus *);
+
+ /*
+ * The following flag is set if one or more control transfers are
+ * active:
+ */
+#define USB_HW_POWER_CONTROL 0x01
+ /*
+ * The following flag is set if one or more bulk transfers are
+ * active:
+ */
+#define USB_HW_POWER_BULK 0x02
+ /*
+ * The following flag is set if one or more interrupt transfers are
+ * active:
+ */
+#define USB_HW_POWER_INTERRUPT 0x04
+ /*
+ * The following flag is set if one or more isochronous transfers
+ * are active:
+ */
+#define USB_HW_POWER_ISOC 0x08
+ /*
+ * The following flag is set if one or more non-root-HUB devices
+ * are present on the given USB bus:
+ */
+#define USB_HW_POWER_NON_ROOT_HUB 0x10
+
+ /* USB Device mode only - Mandatory */
+
+ void (*get_hw_ep_profile) (struct usb_device *udev, const struct usb_hw_ep_profile **ppf, uint8_t ep_addr);
+ void (*set_stall) (struct usb_device *udev, struct usb_xfer *xfer, struct usb_endpoint *ep, uint8_t *did_stall);
+
+ /* USB Device mode mandatory. USB Host mode optional. */
+
+ void (*clear_stall) (struct usb_device *udev, struct usb_endpoint *ep);
+
+ /* Optional transfer polling support */
+
+ void (*xfer_poll) (struct usb_bus *);
+
+ /* Optional fixed power mode support */
+
+ void (*get_power_mode) (struct usb_device *udev, int8_t *pmode);
+
+ /* Optional endpoint uninit */
+
+ void (*endpoint_uninit) (struct usb_device *, struct usb_endpoint *);
+
+ /* Optional device init */
+
+ usb_error_t (*device_init) (struct usb_device *);
+
+ /* Optional device uninit */
+
+ void (*device_uninit) (struct usb_device *);
+
+ /* Optional for device and host mode */
+
+ void (*start_dma_delay) (struct usb_xfer *);
+
+ void (*device_state_change) (struct usb_device *);
+
+ /* Optional for host mode */
+
+ usb_error_t (*set_address) (struct usb_device *, struct mtx *, uint16_t);
+};
+
+/*
+ * The following structure is used to define all the USB pipe
+ * callbacks.
+ */
+struct usb_pipe_methods {
+
+ /* Mandatory USB Device and Host mode callbacks: */
+
+ void (*open)(struct usb_xfer *);
+ void (*close)(struct usb_xfer *);
+
+ void (*enter)(struct usb_xfer *);
+ void (*start)(struct usb_xfer *);
+
+ /* Optional */
+
+ void *info;
+};
+
+/*
+ * The following structure keeps information about what a hardware USB
+ * endpoint supports.
+ */
+struct usb_hw_ep_profile {
+ uint16_t max_in_frame_size; /* IN-token direction */
+ uint16_t max_out_frame_size; /* OUT-token direction */
+ uint8_t is_simplex:1;
+ uint8_t support_multi_buffer:1;
+ uint8_t support_bulk:1;
+ uint8_t support_control:1;
+ uint8_t support_interrupt:1;
+ uint8_t support_isochronous:1;
+ uint8_t support_in:1; /* IN-token is supported */
+ uint8_t support_out:1; /* OUT-token is supported */
+};
+
+/*
+ * The following structure is used when trying to allocate hardware
+ * endpoints for an USB configuration in USB device side mode.
+ */
+struct usb_hw_ep_scratch_sub {
+ const struct usb_hw_ep_profile *pf;
+ uint16_t max_frame_size;
+ uint8_t hw_endpoint_out;
+ uint8_t hw_endpoint_in;
+ uint8_t needs_ep_type;
+ uint8_t needs_in:1;
+ uint8_t needs_out:1;
+};
+
+/*
+ * The following structure is used when trying to allocate hardware
+ * endpoints for an USB configuration in USB device side mode.
+ */
+struct usb_hw_ep_scratch {
+ struct usb_hw_ep_scratch_sub ep[USB_EP_MAX];
+ struct usb_hw_ep_scratch_sub *ep_max;
+ struct usb_config_descriptor *cd;
+ struct usb_device *udev;
+ struct usb_bus_methods *methods;
+ uint8_t bmOutAlloc[(USB_EP_MAX + 15) / 16];
+ uint8_t bmInAlloc[(USB_EP_MAX + 15) / 16];
+};
+
+/*
+ * The following structure is used when generating USB descriptors
+ * from USB templates.
+ */
+struct usb_temp_setup {
+ void *buf;
+ usb_size_t size;
+ enum usb_dev_speed usb_speed;
+ uint8_t self_powered;
+ uint8_t bNumEndpoints;
+ uint8_t bInterfaceNumber;
+ uint8_t bAlternateSetting;
+ uint8_t bConfigurationValue;
+ usb_error_t err;
+};
+
+/* prototypes */
+
+void usb_bus_mem_flush_all(struct usb_bus *bus, usb_bus_mem_cb_t *cb);
+uint8_t usb_bus_mem_alloc_all(struct usb_bus *bus, bus_dma_tag_t dmat, usb_bus_mem_cb_t *cb);
+void usb_bus_mem_free_all(struct usb_bus *bus, usb_bus_mem_cb_t *cb);
+uint16_t usb_isoc_time_expand(struct usb_bus *bus, uint16_t isoc_time_curr);
+uint16_t usbd_fs_isoc_schedule_isoc_time_expand(struct usb_device *udev, struct usb_fs_isoc_schedule **pp_start, struct usb_fs_isoc_schedule **pp_end, uint16_t isoc_time);
+uint8_t usbd_fs_isoc_schedule_alloc(struct usb_fs_isoc_schedule *fss, uint8_t *pstart, uint16_t len);
+
+#endif /* _USB_CONTROLLER_HH_ */
diff --git a/rtems/freebsd/dev/usb/usb_core.c b/rtems/freebsd/dev/usb/usb_core.c
new file mode 100644
index 00000000..9d4c051a
--- /dev/null
+++ b/rtems/freebsd/dev/usb/usb_core.c
@@ -0,0 +1,62 @@
+#include <rtems/freebsd/machine/rtems-bsd-config.h>
+
+/* $FreeBSD$ */
+/*-
+ * Copyright (c) 2008 Hans Petter Selasky. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+/*
+ * USB specifications and other documentation can be found at
+ * http://www.usb.org/developers/docs/ and
+ * http://www.usb.org/developers/devclass_docs/
+ */
+
+#include <rtems/freebsd/sys/stdint.h>
+#include <rtems/freebsd/sys/stddef.h>
+#include <rtems/freebsd/sys/param.h>
+#include <rtems/freebsd/sys/queue.h>
+#include <rtems/freebsd/sys/types.h>
+#include <rtems/freebsd/sys/systm.h>
+#include <rtems/freebsd/sys/kernel.h>
+#include <rtems/freebsd/sys/bus.h>
+#include <rtems/freebsd/sys/linker_set.h>
+#include <rtems/freebsd/sys/module.h>
+#include <rtems/freebsd/sys/lock.h>
+#include <rtems/freebsd/sys/mutex.h>
+#include <rtems/freebsd/sys/condvar.h>
+#include <rtems/freebsd/sys/sysctl.h>
+#include <rtems/freebsd/sys/sx.h>
+#include <rtems/freebsd/sys/unistd.h>
+#include <rtems/freebsd/sys/callout.h>
+#include <rtems/freebsd/sys/malloc.h>
+#include <rtems/freebsd/sys/priv.h>
+
+#include <rtems/freebsd/dev/usb/usb.h>
+#include <rtems/freebsd/dev/usb/usbdi.h>
+
+MALLOC_DEFINE(M_USB, "USB", "USB");
+MALLOC_DEFINE(M_USBDEV, "USBdev", "USB device");
+MALLOC_DEFINE(M_USBHC, "USBHC", "USB host controller");
+
+MODULE_VERSION(usb, 1);
diff --git a/rtems/freebsd/dev/usb/usb_core.h b/rtems/freebsd/dev/usb/usb_core.h
new file mode 100644
index 00000000..9d724171
--- /dev/null
+++ b/rtems/freebsd/dev/usb/usb_core.h
@@ -0,0 +1,183 @@
+/* $FreeBSD$ */
+/*-
+ * Copyright (c) 2008 Hans Petter Selasky. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+/*
+ * Including this file is mandatory for all USB related c-files in the kernel.
+ */
+
+#ifndef _USB_CORE_HH_
+#define _USB_CORE_HH_
+
+/*
+ * The following macro will tell if an USB transfer is currently
+ * receiving or transferring data.
+ */
+#define USB_GET_DATA_ISREAD(xfer) ((xfer)->flags_int.usb_mode == \
+ USB_MODE_DEVICE ? (((xfer)->endpointno & UE_DIR_IN) ? 0 : 1) : \
+ (((xfer)->endpointno & UE_DIR_IN) ? 1 : 0))
+
+/* macros */
+
+#define USB_BUS_LOCK(_b) mtx_lock(&(_b)->bus_mtx)
+#define USB_BUS_UNLOCK(_b) mtx_unlock(&(_b)->bus_mtx)
+#define USB_BUS_LOCK_ASSERT(_b, _t) mtx_assert(&(_b)->bus_mtx, _t)
+#define USB_XFER_LOCK(_x) mtx_lock((_x)->xroot->xfer_mtx)
+#define USB_XFER_UNLOCK(_x) mtx_unlock((_x)->xroot->xfer_mtx)
+#define USB_XFER_LOCK_ASSERT(_x, _t) mtx_assert((_x)->xroot->xfer_mtx, _t)
+
+/* helper for converting pointers to integers */
+#define USB_P2U(ptr) \
+ (((const uint8_t *)(ptr)) - ((const uint8_t *)0))
+
+/* helper for computing offsets */
+#define USB_ADD_BYTES(ptr,size) \
+ ((void *)(USB_P2U(ptr) + (size)))
+
+/* debug macro */
+#define USB_ASSERT KASSERT
+
+/* structure prototypes */
+
+struct file;
+struct usb_bus;
+struct usb_device;
+struct usb_device_request;
+struct usb_page;
+struct usb_page_cache;
+struct usb_xfer;
+struct usb_xfer_root;
+
+/* typedefs */
+
+/* structures */
+
+/*
+ * The following structure defines a set of internal USB transfer
+ * flags.
+ */
+struct usb_xfer_flags_int {
+
+ enum usb_hc_mode usb_mode; /* shadow copy of "udev->usb_mode" */
+ uint16_t control_rem; /* remainder in bytes */
+
+ uint8_t open:1; /* set if USB pipe has been opened */
+ uint8_t transferring:1; /* set if an USB transfer is in
+ * progress */
+ uint8_t did_dma_delay:1; /* set if we waited for HW DMA */
+ uint8_t did_close:1; /* set if we closed the USB transfer */
+ uint8_t draining:1; /* set if we are draining an USB
+ * transfer */
+ uint8_t started:1; /* keeps track of started or stopped */
+ uint8_t bandwidth_reclaimed:1;
+ uint8_t control_xfr:1; /* set if control transfer */
+ uint8_t control_hdr:1; /* set if control header should be
+ * sent */
+ uint8_t control_act:1; /* set if control transfer is active */
+ uint8_t control_stall:1; /* set if control transfer should be stalled */
+
+ uint8_t short_frames_ok:1; /* filtered version */
+ uint8_t short_xfer_ok:1; /* filtered version */
+#if USB_HAVE_BUSDMA
+ uint8_t bdma_enable:1; /* filtered version (only set if
+ * hardware supports DMA) */
+ uint8_t bdma_no_post_sync:1; /* set if the USB callback wrapper
+ * should not do the BUS-DMA post sync
+ * operation */
+ uint8_t bdma_setup:1; /* set if BUS-DMA has been setup */
+#endif
+ uint8_t isochronous_xfr:1; /* set if isochronous transfer */
+ uint8_t curr_dma_set:1; /* used by USB HC/DC driver */
+ uint8_t can_cancel_immed:1; /* set if USB transfer can be
+ * cancelled immediately */
+ uint8_t doing_callback:1; /* set if executing the callback */
+};
+
+/*
+ * The following structure defines an USB transfer.
+ */
+struct usb_xfer {
+ struct usb_callout timeout_handle;
+ TAILQ_ENTRY(usb_xfer) wait_entry; /* used at various places */
+
+ struct usb_page_cache *buf_fixup; /* fixup buffer(s) */
+ struct usb_xfer_queue *wait_queue; /* pointer to queue that we
+ * are waiting on */
+ struct usb_page *dma_page_ptr;
+ struct usb_endpoint *endpoint; /* our USB endpoint */
+ struct usb_xfer_root *xroot; /* used by HC driver */
+ void *qh_start[2]; /* used by HC driver */
+ void *td_start[2]; /* used by HC driver */
+ void *td_transfer_first; /* used by HC driver */
+ void *td_transfer_last; /* used by HC driver */
+ void *td_transfer_cache; /* used by HC driver */
+ void *priv_sc; /* device driver data pointer 1 */
+ void *priv_fifo; /* device driver data pointer 2 */
+ void *local_buffer;
+ usb_frlength_t *frlengths;
+ struct usb_page_cache *frbuffers;
+ usb_callback_t *callback;
+
+ usb_frlength_t max_hc_frame_size;
+ usb_frlength_t max_data_length;
+ usb_frlength_t sumlen; /* sum of all lengths in bytes */
+ usb_frlength_t actlen; /* actual length in bytes */
+ usb_timeout_t timeout; /* milliseconds */
+
+ usb_frcount_t max_frame_count; /* initial value of "nframes" after
+ * setup */
+ usb_frcount_t nframes; /* number of USB frames to transfer */
+ usb_frcount_t aframes; /* actual number of USB frames
+ * transferred */
+
+ uint16_t max_packet_size;
+ uint16_t max_frame_size;
+ uint16_t qh_pos;
+ uint16_t isoc_time_complete; /* in ms */
+ usb_timeout_t interval; /* milliseconds */
+
+ uint8_t address; /* physical USB address */
+ uint8_t endpointno; /* physical USB endpoint */
+ uint8_t max_packet_count;
+ uint8_t usb_state;
+ uint8_t fps_shift; /* down shift of FPS, 0..3 */
+
+ usb_error_t error;
+
+ struct usb_xfer_flags flags;
+ struct usb_xfer_flags_int flags_int;
+};
+
+/* external variables */
+
+extern struct mtx usb_ref_lock;
+
+/* typedefs */
+
+typedef struct malloc_type *usb_malloc_type;
+
+/* prototypes */
+
+#endif /* _USB_CORE_HH_ */
diff --git a/rtems/freebsd/dev/usb/usb_debug.c b/rtems/freebsd/dev/usb/usb_debug.c
new file mode 100644
index 00000000..e9f4331d
--- /dev/null
+++ b/rtems/freebsd/dev/usb/usb_debug.c
@@ -0,0 +1,179 @@
+#include <rtems/freebsd/machine/rtems-bsd-config.h>
+
+/* $FreeBSD$ */
+/*-
+ * Copyright (c) 2008 Hans Petter Selasky. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <rtems/freebsd/sys/stdint.h>
+#include <rtems/freebsd/sys/stddef.h>
+#include <rtems/freebsd/sys/param.h>
+#include <rtems/freebsd/sys/queue.h>
+#include <rtems/freebsd/sys/types.h>
+#include <rtems/freebsd/sys/systm.h>
+#include <rtems/freebsd/sys/kernel.h>
+#include <rtems/freebsd/sys/bus.h>
+#include <rtems/freebsd/sys/linker_set.h>
+#include <rtems/freebsd/sys/module.h>
+#include <rtems/freebsd/sys/lock.h>
+#include <rtems/freebsd/sys/mutex.h>
+#include <rtems/freebsd/sys/condvar.h>
+#include <rtems/freebsd/sys/sysctl.h>
+#include <rtems/freebsd/sys/sx.h>
+#include <rtems/freebsd/sys/unistd.h>
+#include <rtems/freebsd/sys/callout.h>
+#include <rtems/freebsd/sys/malloc.h>
+#include <rtems/freebsd/sys/priv.h>
+
+#include <rtems/freebsd/dev/usb/usb.h>
+#include <rtems/freebsd/dev/usb/usbdi.h>
+
+#include <rtems/freebsd/dev/usb/usb_core.h>
+#include <rtems/freebsd/dev/usb/usb_debug.h>
+#include <rtems/freebsd/dev/usb/usb_process.h>
+#include <rtems/freebsd/dev/usb/usb_device.h>
+#include <rtems/freebsd/dev/usb/usb_busdma.h>
+#include <rtems/freebsd/dev/usb/usb_transfer.h>
+
+#include <rtems/freebsd/ddb/ddb.h>
+#include <rtems/freebsd/ddb/db_sym.h>
+
+/*
+ * Define this unconditionally in case a kernel module is loaded that
+ * has been compiled with debugging options.
+ */
+int usb_debug = 0;
+
+SYSCTL_NODE(_hw, OID_AUTO, usb, CTLFLAG_RW, 0, "USB debugging");
+SYSCTL_INT(_hw_usb, OID_AUTO, debug, CTLFLAG_RW,
+ &usb_debug, 0, "Debug level");
+
+TUNABLE_INT("hw.usb.debug", &usb_debug);
+
+/*------------------------------------------------------------------------*
+ * usb_dump_iface
+ *
+ * This function dumps information about an USB interface.
+ *------------------------------------------------------------------------*/
+void
+usb_dump_iface(struct usb_interface *iface)
+{
+ printf("usb_dump_iface: iface=%p\n", iface);
+ if (iface == NULL) {
+ return;
+ }
+ printf(" iface=%p idesc=%p altindex=%d\n",
+ iface, iface->idesc, iface->alt_index);
+}
+
+/*------------------------------------------------------------------------*
+ * usb_dump_device
+ *
+ * This function dumps information about an USB device.
+ *------------------------------------------------------------------------*/
+void
+usb_dump_device(struct usb_device *udev)
+{
+ printf("usb_dump_device: dev=%p\n", udev);
+ if (udev == NULL) {
+ return;
+ }
+ printf(" bus=%p \n"
+ " address=%d config=%d depth=%d speed=%d self_powered=%d\n"
+ " power=%d langid=%d\n",
+ udev->bus,
+ udev->address, udev->curr_config_no, udev->depth, udev->speed,
+ udev->flags.self_powered, udev->power, udev->langid);
+}
+
+/*------------------------------------------------------------------------*
+ * usb_dump_queue
+ *
+ * This function dumps the USB transfer that are queued up on an USB endpoint.
+ *------------------------------------------------------------------------*/
+void
+usb_dump_queue(struct usb_endpoint *ep)
+{
+ struct usb_xfer *xfer;
+
+ printf("usb_dump_queue: endpoint=%p xfer: ", ep);
+ TAILQ_FOREACH(xfer, &ep->endpoint_q.head, wait_entry) {
+ printf(" %p", xfer);
+ }
+ printf("\n");
+}
+
+/*------------------------------------------------------------------------*
+ * usb_dump_endpoint
+ *
+ * This function dumps information about an USB endpoint.
+ *------------------------------------------------------------------------*/
+void
+usb_dump_endpoint(struct usb_endpoint *ep)
+{
+ if (ep) {
+ printf("usb_dump_endpoint: endpoint=%p", ep);
+
+ printf(" edesc=%p isoc_next=%d toggle_next=%d",
+ ep->edesc, ep->isoc_next, ep->toggle_next);
+
+ if (ep->edesc) {
+ printf(" bEndpointAddress=0x%02x",
+ ep->edesc->bEndpointAddress);
+ }
+ printf("\n");
+ usb_dump_queue(ep);
+ } else {
+ printf("usb_dump_endpoint: endpoint=NULL\n");
+ }
+}
+
+/*------------------------------------------------------------------------*
+ * usb_dump_xfer
+ *
+ * This function dumps information about an USB transfer.
+ *------------------------------------------------------------------------*/
+void
+usb_dump_xfer(struct usb_xfer *xfer)
+{
+ struct usb_device *udev;
+ printf("usb_dump_xfer: xfer=%p\n", xfer);
+ if (xfer == NULL) {
+ return;
+ }
+ if (xfer->endpoint == NULL) {
+ printf("xfer %p: endpoint=NULL\n",
+ xfer);
+ return;
+ }
+ udev = xfer->xroot->udev;
+ printf("xfer %p: udev=%p vid=0x%04x pid=0x%04x addr=%d "
+ "endpoint=%p ep=0x%02x attr=0x%02x\n",
+ xfer, udev,
+ UGETW(udev->ddesc.idVendor),
+ UGETW(udev->ddesc.idProduct),
+ udev->address, xfer->endpoint,
+ xfer->endpoint->edesc->bEndpointAddress,
+ xfer->endpoint->edesc->bmAttributes);
+}
diff --git a/rtems/freebsd/dev/usb/usb_debug.h b/rtems/freebsd/dev/usb/usb_debug.h
new file mode 100644
index 00000000..aa3a5a49
--- /dev/null
+++ b/rtems/freebsd/dev/usb/usb_debug.h
@@ -0,0 +1,62 @@
+/* $FreeBSD$ */
+/*-
+ * Copyright (c) 2008 Hans Petter Selasky. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+/* This file contains various factored out debug macros. */
+
+#ifndef _USB_DEBUG_HH_
+#define _USB_DEBUG_HH_
+
+/* Declare global USB debug variable. */
+extern int usb_debug;
+
+/* Check if USB debugging is enabled. */
+#ifdef USB_DEBUG_VAR
+#ifdef USB_DEBUG
+#define DPRINTFN(n,fmt,...) do { \
+ if ((USB_DEBUG_VAR) >= (n)) { \
+ printf("%s: " fmt, \
+ __FUNCTION__,## __VA_ARGS__); \
+ } \
+} while (0)
+#define DPRINTF(...) DPRINTFN(1, __VA_ARGS__)
+#else
+#define DPRINTF(...) do { } while (0)
+#define DPRINTFN(...) do { } while (0)
+#endif
+#endif
+
+struct usb_interface;
+struct usb_device;
+struct usb_endpoint;
+struct usb_xfer;
+
+void usb_dump_iface(struct usb_interface *iface);
+void usb_dump_device(struct usb_device *udev);
+void usb_dump_queue(struct usb_endpoint *ep);
+void usb_dump_endpoint(struct usb_endpoint *ep);
+void usb_dump_xfer(struct usb_xfer *xfer);
+
+#endif /* _USB_DEBUG_HH_ */
diff --git a/rtems/freebsd/dev/usb/usb_dev.c b/rtems/freebsd/dev/usb/usb_dev.c
new file mode 100644
index 00000000..56f1e357
--- /dev/null
+++ b/rtems/freebsd/dev/usb/usb_dev.c
@@ -0,0 +1,2309 @@
+#include <rtems/freebsd/machine/rtems-bsd-config.h>
+
+/* $FreeBSD$ */
+/*-
+ * Copyright (c) 2006-2008 Hans Petter Selasky. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ *
+ * usb_dev.c - An abstraction layer for creating devices under /dev/...
+ */
+
+#include <rtems/freebsd/sys/stdint.h>
+#include <rtems/freebsd/sys/stddef.h>
+#include <rtems/freebsd/sys/param.h>
+#include <rtems/freebsd/sys/queue.h>
+#include <rtems/freebsd/sys/types.h>
+#include <rtems/freebsd/sys/systm.h>
+#include <rtems/freebsd/sys/kernel.h>
+#include <rtems/freebsd/sys/bus.h>
+#include <rtems/freebsd/sys/linker_set.h>
+#include <rtems/freebsd/sys/module.h>
+#include <rtems/freebsd/sys/lock.h>
+#include <rtems/freebsd/sys/mutex.h>
+#include <rtems/freebsd/sys/condvar.h>
+#include <rtems/freebsd/sys/sysctl.h>
+#include <rtems/freebsd/sys/sx.h>
+#include <rtems/freebsd/sys/unistd.h>
+#include <rtems/freebsd/sys/callout.h>
+#include <rtems/freebsd/sys/malloc.h>
+#include <rtems/freebsd/sys/priv.h>
+#ifndef __rtems__
+#include <rtems/freebsd/sys/vnode.h>
+#endif
+#include <rtems/freebsd/sys/conf.h>
+#include <rtems/freebsd/sys/fcntl.h>
+
+#include <rtems/freebsd/dev/usb/usb.h>
+#include <rtems/freebsd/dev/usb/usb_ioctl.h>
+#include <rtems/freebsd/dev/usb/usbdi.h>
+#include <rtems/freebsd/dev/usb/usbdi_util.h>
+
+#define USB_DEBUG_VAR usb_fifo_debug
+
+#include <rtems/freebsd/dev/usb/usb_core.h>
+#include <rtems/freebsd/dev/usb/usb_dev.h>
+#include <rtems/freebsd/dev/usb/usb_mbuf.h>
+#include <rtems/freebsd/dev/usb/usb_process.h>
+#include <rtems/freebsd/dev/usb/usb_device.h>
+#include <rtems/freebsd/dev/usb/usb_debug.h>
+#include <rtems/freebsd/dev/usb/usb_busdma.h>
+#include <rtems/freebsd/dev/usb/usb_generic.h>
+#include <rtems/freebsd/dev/usb/usb_dynamic.h>
+#include <rtems/freebsd/dev/usb/usb_util.h>
+
+#include <rtems/freebsd/dev/usb/usb_controller.h>
+#include <rtems/freebsd/dev/usb/usb_bus.h>
+
+#include <rtems/freebsd/sys/filio.h>
+#include <rtems/freebsd/sys/ttycom.h>
+#include <rtems/freebsd/sys/syscallsubr.h>
+
+#include <rtems/freebsd/machine/stdarg.h>
+
+#if USB_HAVE_UGEN
+
+#ifdef USB_DEBUG
+static int usb_fifo_debug = 0;
+
+SYSCTL_NODE(_hw_usb, OID_AUTO, dev, CTLFLAG_RW, 0, "USB device");
+SYSCTL_INT(_hw_usb_dev, OID_AUTO, debug, CTLFLAG_RW,
+ &usb_fifo_debug, 0, "Debug Level");
+
+TUNABLE_INT("hw.usb.dev.debug", &usb_fifo_debug);
+#endif
+
+#if ((__FreeBSD_version >= 700001) || (__FreeBSD_version == 0) || \
+ ((__FreeBSD_version >= 600034) && (__FreeBSD_version < 700000)))
+#define USB_UCRED struct ucred *ucred,
+#else
+#define USB_UCRED
+#endif
+
+/* prototypes */
+
+static int usb_fifo_open(struct usb_cdev_privdata *,
+ struct usb_fifo *, int);
+static void usb_fifo_close(struct usb_fifo *, int);
+static void usb_dev_init(void *);
+static void usb_dev_init_post(void *);
+static void usb_dev_uninit(void *);
+static int usb_fifo_uiomove(struct usb_fifo *, void *, int,
+ struct uio *);
+static void usb_fifo_check_methods(struct usb_fifo_methods *);
+static struct usb_fifo *usb_fifo_alloc(void);
+static struct usb_endpoint *usb_dev_get_ep(struct usb_device *, uint8_t,
+ uint8_t);
+static void usb_loc_fill(struct usb_fs_privdata *,
+ struct usb_cdev_privdata *);
+static void usb_close(void *);
+static usb_error_t usb_ref_device(struct usb_cdev_privdata *, struct usb_cdev_refdata *, int);
+static usb_error_t usb_usb_ref_device(struct usb_cdev_privdata *, struct usb_cdev_refdata *);
+static void usb_unref_device(struct usb_cdev_privdata *, struct usb_cdev_refdata *);
+
+static d_open_t usb_open;
+static d_ioctl_t usb_ioctl;
+static d_read_t usb_read;
+static d_write_t usb_write;
+static d_poll_t usb_poll;
+
+static d_ioctl_t usb_static_ioctl;
+
+static usb_fifo_open_t usb_fifo_dummy_open;
+static usb_fifo_close_t usb_fifo_dummy_close;
+static usb_fifo_ioctl_t usb_fifo_dummy_ioctl;
+static usb_fifo_cmd_t usb_fifo_dummy_cmd;
+
+/* character device structure used for devices (/dev/ugenX.Y and /dev/uXXX) */
+struct cdevsw usb_devsw = {
+ .d_version = D_VERSION,
+ .d_open = usb_open,
+ .d_ioctl = usb_ioctl,
+ .d_name = "usbdev",
+ .d_flags = D_TRACKCLOSE,
+ .d_read = usb_read,
+ .d_write = usb_write,
+ .d_poll = usb_poll
+};
+
+static struct cdev* usb_dev = NULL;
+
+/* character device structure used for /dev/usb */
+static struct cdevsw usb_static_devsw = {
+ .d_version = D_VERSION,
+ .d_ioctl = usb_static_ioctl,
+ .d_name = "usb"
+};
+
+static TAILQ_HEAD(, usb_symlink) usb_sym_head;
+static struct sx usb_sym_lock;
+
+struct mtx usb_ref_lock;
+
+/*------------------------------------------------------------------------*
+ * usb_loc_fill
+ *
+ * This is used to fill out a usb_cdev_privdata structure based on the
+ * device's address as contained in usb_fs_privdata.
+ *------------------------------------------------------------------------*/
+static void
+usb_loc_fill(struct usb_fs_privdata* pd, struct usb_cdev_privdata *cpd)
+{
+ cpd->bus_index = pd->bus_index;
+ cpd->dev_index = pd->dev_index;
+ cpd->ep_addr = pd->ep_addr;
+ cpd->fifo_index = pd->fifo_index;
+}
+
+/*------------------------------------------------------------------------*
+ * usb_ref_device
+ *
+ * This function is used to atomically refer an USB device by its
+ * device location. If this function returns success the USB device
+ * will not dissappear until the USB device is unreferenced.
+ *
+ * Return values:
+ * 0: Success, refcount incremented on the given USB device.
+ * Else: Failure.
+ *------------------------------------------------------------------------*/
+static usb_error_t
+usb_ref_device(struct usb_cdev_privdata *cpd,
+ struct usb_cdev_refdata *crd, int need_uref)
+{
+ struct usb_fifo **ppf;
+ struct usb_fifo *f;
+
+ DPRINTFN(2, "cpd=%p need uref=%d\n", cpd, need_uref);
+
+ /* clear all refs */
+ memset(crd, 0, sizeof(*crd));
+
+ mtx_lock(&usb_ref_lock);
+ cpd->bus = devclass_get_softc(usb_devclass_ptr, cpd->bus_index);
+ if (cpd->bus == NULL) {
+ DPRINTFN(2, "no bus at %u\n", cpd->bus_index);
+ goto error;
+ }
+ cpd->udev = cpd->bus->devices[cpd->dev_index];
+ if (cpd->udev == NULL) {
+ DPRINTFN(2, "no device at %u\n", cpd->dev_index);
+ goto error;
+ }
+ if (cpd->udev->refcount == USB_DEV_REF_MAX) {
+ DPRINTFN(2, "no dev ref\n");
+ goto error;
+ }
+ if (need_uref) {
+ DPRINTFN(2, "ref udev - needed\n");
+ cpd->udev->refcount++;
+
+ mtx_unlock(&usb_ref_lock);
+
+ /*
+ * We need to grab the sx-lock before grabbing the
+ * FIFO refs to avoid deadlock at detach!
+ */
+ usbd_enum_lock(cpd->udev);
+
+ mtx_lock(&usb_ref_lock);
+
+ /*
+ * Set "is_uref" after grabbing the default SX lock
+ */
+ crd->is_uref = 1;
+ }
+
+ /* check if we are doing an open */
+ if (cpd->fflags == 0) {
+ /* use zero defaults */
+ } else {
+ /* check for write */
+ if (cpd->fflags & FWRITE) {
+ ppf = cpd->udev->fifo;
+ f = ppf[cpd->fifo_index + USB_FIFO_TX];
+ crd->txfifo = f;
+ crd->is_write = 1; /* ref */
+ if (f == NULL || f->refcount == USB_FIFO_REF_MAX)
+ goto error;
+ if (f->curr_cpd != cpd)
+ goto error;
+ /* check if USB-FS is active */
+ if (f->fs_ep_max != 0) {
+ crd->is_usbfs = 1;
+ }
+ }
+
+ /* check for read */
+ if (cpd->fflags & FREAD) {
+ ppf = cpd->udev->fifo;
+ f = ppf[cpd->fifo_index + USB_FIFO_RX];
+ crd->rxfifo = f;
+ crd->is_read = 1; /* ref */
+ if (f == NULL || f->refcount == USB_FIFO_REF_MAX)
+ goto error;
+ if (f->curr_cpd != cpd)
+ goto error;
+ /* check if USB-FS is active */
+ if (f->fs_ep_max != 0) {
+ crd->is_usbfs = 1;
+ }
+ }
+ }
+
+ /* when everything is OK we increment the refcounts */
+ if (crd->is_write) {
+ DPRINTFN(2, "ref write\n");
+ crd->txfifo->refcount++;
+ }
+ if (crd->is_read) {
+ DPRINTFN(2, "ref read\n");
+ crd->rxfifo->refcount++;
+ }
+ mtx_unlock(&usb_ref_lock);
+
+ return (0);
+
+error:
+ if (crd->is_uref) {
+ usbd_enum_unlock(cpd->udev);
+
+ if (--(cpd->udev->refcount) == 0) {
+ cv_signal(&cpd->udev->ref_cv);
+ }
+ }
+ mtx_unlock(&usb_ref_lock);
+ DPRINTFN(2, "fail\n");
+ return (USB_ERR_INVAL);
+}
+
+/*------------------------------------------------------------------------*
+ * usb_usb_ref_device
+ *
+ * This function is used to upgrade an USB reference to include the
+ * USB device reference on a USB location.
+ *
+ * Return values:
+ * 0: Success, refcount incremented on the given USB device.
+ * Else: Failure.
+ *------------------------------------------------------------------------*/
+static usb_error_t
+usb_usb_ref_device(struct usb_cdev_privdata *cpd,
+ struct usb_cdev_refdata *crd)
+{
+ /*
+ * Check if we already got an USB reference on this location:
+ */
+ if (crd->is_uref)
+ return (0); /* success */
+
+ /*
+ * To avoid deadlock at detach we need to drop the FIFO ref
+ * and re-acquire a new ref!
+ */
+ usb_unref_device(cpd, crd);
+
+ return (usb_ref_device(cpd, crd, 1 /* need uref */));
+}
+
+/*------------------------------------------------------------------------*
+ * usb_unref_device
+ *
+ * This function will release the reference count by one unit for the
+ * given USB device.
+ *------------------------------------------------------------------------*/
+static void
+usb_unref_device(struct usb_cdev_privdata *cpd,
+ struct usb_cdev_refdata *crd)
+{
+
+ DPRINTFN(2, "cpd=%p is_uref=%d\n", cpd, crd->is_uref);
+
+ if (crd->is_uref)
+ usbd_enum_unlock(cpd->udev);
+
+ mtx_lock(&usb_ref_lock);
+ if (crd->is_read) {
+ if (--(crd->rxfifo->refcount) == 0) {
+ cv_signal(&crd->rxfifo->cv_drain);
+ }
+ crd->is_read = 0;
+ }
+ if (crd->is_write) {
+ if (--(crd->txfifo->refcount) == 0) {
+ cv_signal(&crd->txfifo->cv_drain);
+ }
+ crd->is_write = 0;
+ }
+ if (crd->is_uref) {
+ if (--(cpd->udev->refcount) == 0) {
+ cv_signal(&cpd->udev->ref_cv);
+ }
+ crd->is_uref = 0;
+ }
+ mtx_unlock(&usb_ref_lock);
+}
+
+static struct usb_fifo *
+usb_fifo_alloc(void)
+{
+ struct usb_fifo *f;
+
+ f = malloc(sizeof(*f), M_USBDEV, M_WAITOK | M_ZERO);
+ if (f) {
+ cv_init(&f->cv_io, "FIFO-IO");
+ cv_init(&f->cv_drain, "FIFO-DRAIN");
+ f->refcount = 1;
+ }
+ return (f);
+}
+
+/*------------------------------------------------------------------------*
+ * usb_fifo_create
+ *------------------------------------------------------------------------*/
+static int
+usb_fifo_create(struct usb_cdev_privdata *cpd,
+ struct usb_cdev_refdata *crd)
+{
+ struct usb_device *udev = cpd->udev;
+ struct usb_fifo *f;
+ struct usb_endpoint *ep;
+ uint8_t n;
+ uint8_t is_tx;
+ uint8_t is_rx;
+ uint8_t no_null;
+ uint8_t is_busy;
+ int e = cpd->ep_addr;
+
+ is_tx = (cpd->fflags & FWRITE) ? 1 : 0;
+ is_rx = (cpd->fflags & FREAD) ? 1 : 0;
+ no_null = 1;
+ is_busy = 0;
+
+ /* Preallocated FIFO */
+ if (e < 0) {
+ DPRINTFN(5, "Preallocated FIFO\n");
+ if (is_tx) {
+ f = udev->fifo[cpd->fifo_index + USB_FIFO_TX];
+ if (f == NULL)
+ return (EINVAL);
+ crd->txfifo = f;
+ }
+ if (is_rx) {
+ f = udev->fifo[cpd->fifo_index + USB_FIFO_RX];
+ if (f == NULL)
+ return (EINVAL);
+ crd->rxfifo = f;
+ }
+ return (0);
+ }
+
+ KASSERT(e >= 0 && e <= 15, ("endpoint %d out of range", e));
+
+ /* search for a free FIFO slot */
+ DPRINTFN(5, "Endpoint device, searching for 0x%02x\n", e);
+ for (n = 0;; n += 2) {
+
+ if (n == USB_FIFO_MAX) {
+ if (no_null) {
+ no_null = 0;
+ n = 0;
+ } else {
+ /* end of FIFOs reached */
+ DPRINTFN(5, "out of FIFOs\n");
+ return (ENOMEM);
+ }
+ }
+ /* Check for TX FIFO */
+ if (is_tx) {
+ f = udev->fifo[n + USB_FIFO_TX];
+ if (f != NULL) {
+ if (f->dev_ep_index != e) {
+ /* wrong endpoint index */
+ continue;
+ }
+ if (f->curr_cpd != NULL) {
+ /* FIFO is opened */
+ is_busy = 1;
+ continue;
+ }
+ } else if (no_null) {
+ continue;
+ }
+ }
+ /* Check for RX FIFO */
+ if (is_rx) {
+ f = udev->fifo[n + USB_FIFO_RX];
+ if (f != NULL) {
+ if (f->dev_ep_index != e) {
+ /* wrong endpoint index */
+ continue;
+ }
+ if (f->curr_cpd != NULL) {
+ /* FIFO is opened */
+ is_busy = 1;
+ continue;
+ }
+ } else if (no_null) {
+ continue;
+ }
+ }
+ break;
+ }
+
+ if (no_null == 0) {
+ if (e >= (USB_EP_MAX / 2)) {
+ /* we don't create any endpoints in this range */
+ DPRINTFN(5, "ep out of range\n");
+ return (is_busy ? EBUSY : EINVAL);
+ }
+ }
+
+ if ((e != 0) && is_busy) {
+ /*
+ * Only the default control endpoint is allowed to be
+ * opened multiple times!
+ */
+ DPRINTFN(5, "busy\n");
+ return (EBUSY);
+ }
+
+ /* Check TX FIFO */
+ if (is_tx &&
+ (udev->fifo[n + USB_FIFO_TX] == NULL)) {
+ ep = usb_dev_get_ep(udev, e, USB_FIFO_TX);
+ DPRINTFN(5, "dev_get_endpoint(%d, 0x%x)\n", e, USB_FIFO_TX);
+ if (ep == NULL) {
+ DPRINTFN(5, "dev_get_endpoint returned NULL\n");
+ return (EINVAL);
+ }
+ f = usb_fifo_alloc();
+ if (f == NULL) {
+ DPRINTFN(5, "could not alloc tx fifo\n");
+ return (ENOMEM);
+ }
+ /* update some fields */
+ f->fifo_index = n + USB_FIFO_TX;
+ f->dev_ep_index = e;
+ f->priv_mtx = &udev->device_mtx;
+ f->priv_sc0 = ep;
+ f->methods = &usb_ugen_methods;
+ f->iface_index = ep->iface_index;
+ f->udev = udev;
+ mtx_lock(&usb_ref_lock);
+ udev->fifo[n + USB_FIFO_TX] = f;
+ mtx_unlock(&usb_ref_lock);
+ }
+ /* Check RX FIFO */
+ if (is_rx &&
+ (udev->fifo[n + USB_FIFO_RX] == NULL)) {
+
+ ep = usb_dev_get_ep(udev, e, USB_FIFO_RX);
+ DPRINTFN(5, "dev_get_endpoint(%d, 0x%x)\n", e, USB_FIFO_RX);
+ if (ep == NULL) {
+ DPRINTFN(5, "dev_get_endpoint returned NULL\n");
+ return (EINVAL);
+ }
+ f = usb_fifo_alloc();
+ if (f == NULL) {
+ DPRINTFN(5, "could not alloc rx fifo\n");
+ return (ENOMEM);
+ }
+ /* update some fields */
+ f->fifo_index = n + USB_FIFO_RX;
+ f->dev_ep_index = e;
+ f->priv_mtx = &udev->device_mtx;
+ f->priv_sc0 = ep;
+ f->methods = &usb_ugen_methods;
+ f->iface_index = ep->iface_index;
+ f->udev = udev;
+ mtx_lock(&usb_ref_lock);
+ udev->fifo[n + USB_FIFO_RX] = f;
+ mtx_unlock(&usb_ref_lock);
+ }
+ if (is_tx) {
+ crd->txfifo = udev->fifo[n + USB_FIFO_TX];
+ }
+ if (is_rx) {
+ crd->rxfifo = udev->fifo[n + USB_FIFO_RX];
+ }
+ /* fill out fifo index */
+ DPRINTFN(5, "fifo index = %d\n", n);
+ cpd->fifo_index = n;
+
+ /* complete */
+
+ return (0);
+}
+
+void
+usb_fifo_free(struct usb_fifo *f)
+{
+ uint8_t n;
+
+ if (f == NULL) {
+ /* be NULL safe */
+ return;
+ }
+ /* destroy symlink devices, if any */
+ for (n = 0; n != 2; n++) {
+ if (f->symlink[n]) {
+ usb_free_symlink(f->symlink[n]);
+ f->symlink[n] = NULL;
+ }
+ }
+ mtx_lock(&usb_ref_lock);
+
+ /* delink ourselves to stop calls from userland */
+ if ((f->fifo_index < USB_FIFO_MAX) &&
+ (f->udev != NULL) &&
+ (f->udev->fifo[f->fifo_index] == f)) {
+ f->udev->fifo[f->fifo_index] = NULL;
+ } else {
+ DPRINTFN(0, "USB FIFO %p has not been linked\n", f);
+ }
+
+ /* decrease refcount */
+ f->refcount--;
+ /* prevent any write flush */
+ f->flag_iserror = 1;
+ /* need to wait until all callers have exited */
+ while (f->refcount != 0) {
+ mtx_unlock(&usb_ref_lock); /* avoid LOR */
+ mtx_lock(f->priv_mtx);
+ /* get I/O thread out of any sleep state */
+ if (f->flag_sleeping) {
+ f->flag_sleeping = 0;
+ cv_broadcast(&f->cv_io);
+ }
+ mtx_unlock(f->priv_mtx);
+ mtx_lock(&usb_ref_lock);
+
+ /* wait for sync */
+ cv_wait(&f->cv_drain, &usb_ref_lock);
+ }
+ mtx_unlock(&usb_ref_lock);
+
+ /* take care of closing the device here, if any */
+ usb_fifo_close(f, 0);
+
+ cv_destroy(&f->cv_io);
+ cv_destroy(&f->cv_drain);
+
+ free(f, M_USBDEV);
+}
+
+static struct usb_endpoint *
+usb_dev_get_ep(struct usb_device *udev, uint8_t ep_index, uint8_t dir)
+{
+ struct usb_endpoint *ep;
+ uint8_t ep_dir;
+
+ if (ep_index == 0) {
+ ep = &udev->ctrl_ep;
+ } else {
+ if (dir == USB_FIFO_RX) {
+ if (udev->flags.usb_mode == USB_MODE_HOST) {
+ ep_dir = UE_DIR_IN;
+ } else {
+ ep_dir = UE_DIR_OUT;
+ }
+ } else {
+ if (udev->flags.usb_mode == USB_MODE_HOST) {
+ ep_dir = UE_DIR_OUT;
+ } else {
+ ep_dir = UE_DIR_IN;
+ }
+ }
+ ep = usbd_get_ep_by_addr(udev, ep_index | ep_dir);
+ }
+
+ if (ep == NULL) {
+ /* if the endpoint does not exist then return */
+ return (NULL);
+ }
+ if (ep->edesc == NULL) {
+ /* invalid endpoint */
+ return (NULL);
+ }
+ return (ep); /* success */
+}
+
+/*------------------------------------------------------------------------*
+ * usb_fifo_open
+ *
+ * Returns:
+ * 0: Success
+ * Else: Failure
+ *------------------------------------------------------------------------*/
+static int
+usb_fifo_open(struct usb_cdev_privdata *cpd,
+ struct usb_fifo *f, int fflags)
+{
+ int err;
+
+ if (f == NULL) {
+ /* no FIFO there */
+ DPRINTFN(2, "no FIFO\n");
+ return (ENXIO);
+ }
+ /* remove FWRITE and FREAD flags */
+ fflags &= ~(FWRITE | FREAD);
+
+ /* set correct file flags */
+ if ((f->fifo_index & 1) == USB_FIFO_TX) {
+ fflags |= FWRITE;
+ } else {
+ fflags |= FREAD;
+ }
+
+ /* check if we are already opened */
+ /* we don't need any locks when checking this variable */
+ if (f->curr_cpd != NULL) {
+ err = EBUSY;
+ goto done;
+ }
+
+ /* reset short flag before open */
+ f->flag_short = 0;
+
+ /* call open method */
+ err = (f->methods->f_open) (f, fflags);
+ if (err) {
+ goto done;
+ }
+ mtx_lock(f->priv_mtx);
+
+ /* reset sleep flag */
+ f->flag_sleeping = 0;
+
+ /* reset error flag */
+ f->flag_iserror = 0;
+
+ /* reset complete flag */
+ f->flag_iscomplete = 0;
+
+ /* reset select flag */
+ f->flag_isselect = 0;
+
+ /* reset flushing flag */
+ f->flag_flushing = 0;
+
+ /* reset ASYNC proc flag */
+ f->async_p = NULL;
+
+ mtx_lock(&usb_ref_lock);
+ /* flag the fifo as opened to prevent others */
+ f->curr_cpd = cpd;
+ mtx_unlock(&usb_ref_lock);
+
+ /* reset queue */
+ usb_fifo_reset(f);
+
+ mtx_unlock(f->priv_mtx);
+done:
+ return (err);
+}
+
+/*------------------------------------------------------------------------*
+ * usb_fifo_reset
+ *------------------------------------------------------------------------*/
+void
+usb_fifo_reset(struct usb_fifo *f)
+{
+ struct usb_mbuf *m;
+
+ if (f == NULL) {
+ return;
+ }
+ while (1) {
+ USB_IF_DEQUEUE(&f->used_q, m);
+ if (m) {
+ USB_IF_ENQUEUE(&f->free_q, m);
+ } else {
+ break;
+ }
+ }
+ /* reset have fragment flag */
+ f->flag_have_fragment = 0;
+}
+
+/*------------------------------------------------------------------------*
+ * usb_fifo_close
+ *------------------------------------------------------------------------*/
+static void
+usb_fifo_close(struct usb_fifo *f, int fflags)
+{
+ int err;
+
+ /* check if we are not opened */
+ if (f->curr_cpd == NULL) {
+ /* nothing to do - already closed */
+ return;
+ }
+ mtx_lock(f->priv_mtx);
+
+ /* clear current cdev private data pointer */
+ f->curr_cpd = NULL;
+
+ /* check if we are selected */
+ if (f->flag_isselect) {
+ selwakeup(&f->selinfo);
+ f->flag_isselect = 0;
+ }
+ /* check if a thread wants SIGIO */
+ if (f->async_p != NULL) {
+ PROC_LOCK(f->async_p);
+ psignal(f->async_p, SIGIO);
+ PROC_UNLOCK(f->async_p);
+ f->async_p = NULL;
+ }
+ /* remove FWRITE and FREAD flags */
+ fflags &= ~(FWRITE | FREAD);
+
+ /* flush written data, if any */
+ if ((f->fifo_index & 1) == USB_FIFO_TX) {
+
+ if (!f->flag_iserror) {
+
+ /* set flushing flag */
+ f->flag_flushing = 1;
+
+ /* get the last packet in */
+ if (f->flag_have_fragment) {
+ struct usb_mbuf *m;
+ f->flag_have_fragment = 0;
+ USB_IF_DEQUEUE(&f->free_q, m);
+ if (m) {
+ USB_IF_ENQUEUE(&f->used_q, m);
+ }
+ }
+
+ /* start write transfer, if not already started */
+ (f->methods->f_start_write) (f);
+
+ /* check if flushed already */
+ while (f->flag_flushing &&
+ (!f->flag_iserror)) {
+ /* wait until all data has been written */
+ f->flag_sleeping = 1;
+ err = cv_wait_sig(&f->cv_io, f->priv_mtx);
+ if (err) {
+ DPRINTF("signal received\n");
+ break;
+ }
+ }
+ }
+ fflags |= FWRITE;
+
+ /* stop write transfer, if not already stopped */
+ (f->methods->f_stop_write) (f);
+ } else {
+ fflags |= FREAD;
+
+ /* stop write transfer, if not already stopped */
+ (f->methods->f_stop_read) (f);
+ }
+
+ /* check if we are sleeping */
+ if (f->flag_sleeping) {
+ DPRINTFN(2, "Sleeping at close!\n");
+ }
+ mtx_unlock(f->priv_mtx);
+
+ /* call close method */
+ (f->methods->f_close) (f, fflags);
+
+ DPRINTF("closed\n");
+}
+
+/*------------------------------------------------------------------------*
+ * usb_open - cdev callback
+ *------------------------------------------------------------------------*/
+static int
+usb_open(struct cdev *dev, int fflags, int devtype, struct thread *td)
+{
+ struct usb_fs_privdata* pd = (struct usb_fs_privdata*)dev->si_drv1;
+ struct usb_cdev_refdata refs;
+ struct usb_cdev_privdata *cpd;
+ int err, ep;
+
+ DPRINTFN(2, "%s fflags=0x%08x\n", dev->si_name, fflags);
+
+ KASSERT(fflags & (FREAD|FWRITE), ("invalid open flags"));
+ if (((fflags & FREAD) && !(pd->mode & FREAD)) ||
+ ((fflags & FWRITE) && !(pd->mode & FWRITE))) {
+ DPRINTFN(2, "access mode not supported\n");
+ return (EPERM);
+ }
+
+ cpd = malloc(sizeof(*cpd), M_USBDEV, M_WAITOK | M_ZERO);
+ ep = cpd->ep_addr = pd->ep_addr;
+
+ usb_loc_fill(pd, cpd);
+ err = usb_ref_device(cpd, &refs, 1);
+ if (err) {
+ DPRINTFN(2, "cannot ref device\n");
+ free(cpd, M_USBDEV);
+ return (ENXIO);
+ }
+ cpd->fflags = fflags; /* access mode for open lifetime */
+
+ /* create FIFOs, if any */
+ err = usb_fifo_create(cpd, &refs);
+ /* check for error */
+ if (err) {
+ DPRINTFN(2, "cannot create fifo\n");
+ usb_unref_device(cpd, &refs);
+ free(cpd, M_USBDEV);
+ return (err);
+ }
+ if (fflags & FREAD) {
+ err = usb_fifo_open(cpd, refs.rxfifo, fflags);
+ if (err) {
+ DPRINTFN(2, "read open failed\n");
+ usb_unref_device(cpd, &refs);
+ free(cpd, M_USBDEV);
+ return (err);
+ }
+ }
+ if (fflags & FWRITE) {
+ err = usb_fifo_open(cpd, refs.txfifo, fflags);
+ if (err) {
+ DPRINTFN(2, "write open failed\n");
+ if (fflags & FREAD) {
+ usb_fifo_close(refs.rxfifo, fflags);
+ }
+ usb_unref_device(cpd, &refs);
+ free(cpd, M_USBDEV);
+ return (err);
+ }
+ }
+ usb_unref_device(cpd, &refs);
+ devfs_set_cdevpriv(cpd, usb_close);
+
+ return (0);
+}
+
+/*------------------------------------------------------------------------*
+ * usb_close - cdev callback
+ *------------------------------------------------------------------------*/
+static void
+usb_close(void *arg)
+{
+ struct usb_cdev_refdata refs;
+ struct usb_cdev_privdata *cpd = arg;
+ int err;
+
+ DPRINTFN(2, "cpd=%p\n", cpd);
+
+ err = usb_ref_device(cpd, &refs, 1);
+ if (err) {
+ free(cpd, M_USBDEV);
+ return;
+ }
+ if (cpd->fflags & FREAD) {
+ usb_fifo_close(refs.rxfifo, cpd->fflags);
+ }
+ if (cpd->fflags & FWRITE) {
+ usb_fifo_close(refs.txfifo, cpd->fflags);
+ }
+
+ usb_unref_device(cpd, &refs);
+ free(cpd, M_USBDEV);
+ return;
+}
+
+static void
+usb_dev_init(void *arg)
+{
+ mtx_init(&usb_ref_lock, "USB ref mutex", NULL, MTX_DEF);
+ sx_init(&usb_sym_lock, "USB sym mutex");
+ TAILQ_INIT(&usb_sym_head);
+
+ /* check the UGEN methods */
+ usb_fifo_check_methods(&usb_ugen_methods);
+}
+
+SYSINIT(usb_dev_init, SI_SUB_KLD, SI_ORDER_FIRST, usb_dev_init, NULL);
+
+static void
+usb_dev_init_post(void *arg)
+{
+ /*
+ * Create /dev/usb - this is needed for usbconfig(8), which
+ * needs a well-known device name to access.
+ */
+ usb_dev = make_dev(&usb_static_devsw, 0, UID_ROOT, GID_OPERATOR,
+ 0644, USB_DEVICE_NAME);
+ if (usb_dev == NULL) {
+ DPRINTFN(0, "Could not create usb bus device\n");
+ }
+}
+
+SYSINIT(usb_dev_init_post, SI_SUB_KICK_SCHEDULER, SI_ORDER_FIRST, usb_dev_init_post, NULL);
+
+static void
+usb_dev_uninit(void *arg)
+{
+ if (usb_dev != NULL) {
+ destroy_dev(usb_dev);
+ usb_dev = NULL;
+ }
+ mtx_destroy(&usb_ref_lock);
+ sx_destroy(&usb_sym_lock);
+}
+
+SYSUNINIT(usb_dev_uninit, SI_SUB_KICK_SCHEDULER, SI_ORDER_ANY, usb_dev_uninit, NULL);
+
+static int
+usb_ioctl_f_sub(struct usb_fifo *f, u_long cmd, void *addr,
+ struct thread *td)
+{
+ int error = 0;
+
+ switch (cmd) {
+ case FIODTYPE:
+ *(int *)addr = 0; /* character device */
+ break;
+
+ case FIONBIO:
+ /* handled by upper FS layer */
+ break;
+
+ case FIOASYNC:
+ if (*(int *)addr) {
+ if (f->async_p != NULL) {
+ error = EBUSY;
+ break;
+ }
+ f->async_p = USB_TD_GET_PROC(td);
+ } else {
+ f->async_p = NULL;
+ }
+ break;
+
+ /* XXX this is not the most general solution */
+ case TIOCSPGRP:
+ if (f->async_p == NULL) {
+ error = EINVAL;
+ break;
+ }
+ if (*(int *)addr != USB_PROC_GET_GID(f->async_p)) {
+ error = EPERM;
+ break;
+ }
+ break;
+ default:
+ return (ENOIOCTL);
+ }
+ DPRINTFN(3, "cmd 0x%lx = %d\n", cmd, error);
+ return (error);
+}
+
+/*------------------------------------------------------------------------*
+ * usb_ioctl - cdev callback
+ *------------------------------------------------------------------------*/
+static int
+usb_ioctl(struct cdev *dev, u_long cmd, caddr_t addr, int fflag, struct thread* td)
+{
+ struct usb_cdev_refdata refs;
+ struct usb_cdev_privdata* cpd;
+ struct usb_fifo *f;
+ int fflags;
+ int err;
+
+ DPRINTFN(2, "cmd=0x%lx\n", cmd);
+
+ err = devfs_get_cdevpriv((void **)&cpd);
+ if (err != 0)
+ return (err);
+
+ /*
+ * Performance optimisation: We try to check for IOCTL's that
+ * don't need the USB reference first. Then we grab the USB
+ * reference if we need it!
+ */
+ err = usb_ref_device(cpd, &refs, 0 /* no uref */ );
+ if (err)
+ return (ENXIO);
+
+ fflags = cpd->fflags;
+
+ f = NULL; /* set default value */
+ err = ENOIOCTL; /* set default value */
+
+ if (fflags & FWRITE) {
+ f = refs.txfifo;
+ err = usb_ioctl_f_sub(f, cmd, addr, td);
+ }
+ if (fflags & FREAD) {
+ f = refs.rxfifo;
+ err = usb_ioctl_f_sub(f, cmd, addr, td);
+ }
+ KASSERT(f != NULL, ("fifo not found"));
+ if (err != ENOIOCTL)
+ goto done;
+
+ err = (f->methods->f_ioctl) (f, cmd, addr, fflags);
+
+ DPRINTFN(2, "f_ioctl cmd 0x%lx = %d\n", cmd, err);
+
+ if (err != ENOIOCTL)
+ goto done;
+
+ if (usb_usb_ref_device(cpd, &refs)) {
+ err = ENXIO;
+ goto done;
+ }
+
+ err = (f->methods->f_ioctl_post) (f, cmd, addr, fflags);
+
+ DPRINTFN(2, "f_ioctl_post cmd 0x%lx = %d\n", cmd, err);
+
+ if (err == ENOIOCTL)
+ err = ENOTTY;
+
+ if (err)
+ goto done;
+
+ /* Wait for re-enumeration, if any */
+
+ while (f->udev->re_enumerate_wait != 0) {
+
+ usb_unref_device(cpd, &refs);
+
+ usb_pause_mtx(NULL, hz / 128);
+
+ if (usb_ref_device(cpd, &refs, 1 /* need uref */)) {
+ err = ENXIO;
+ goto done;
+ }
+ }
+
+done:
+ usb_unref_device(cpd, &refs);
+ return (err);
+}
+
+/* ARGSUSED */
+static int
+usb_poll(struct cdev* dev, int events, struct thread* td)
+{
+ struct usb_cdev_refdata refs;
+ struct usb_cdev_privdata* cpd;
+ struct usb_fifo *f;
+ struct usb_mbuf *m;
+ int fflags, revents;
+
+ if (devfs_get_cdevpriv((void **)&cpd) != 0 ||
+ usb_ref_device(cpd, &refs, 0) != 0)
+ return (events &
+ (POLLHUP|POLLIN|POLLRDNORM|POLLOUT|POLLWRNORM));
+
+ fflags = cpd->fflags;
+
+ /* Figure out who needs service */
+ revents = 0;
+ if ((events & (POLLOUT | POLLWRNORM)) &&
+ (fflags & FWRITE)) {
+
+ f = refs.txfifo;
+
+ mtx_lock(f->priv_mtx);
+
+ if (!refs.is_usbfs) {
+ if (f->flag_iserror) {
+ /* we got an error */
+ m = (void *)1;
+ } else {
+ if (f->queue_data == NULL) {
+ /*
+ * start write transfer, if not
+ * already started
+ */
+ (f->methods->f_start_write) (f);
+ }
+ /* check if any packets are available */
+ USB_IF_POLL(&f->free_q, m);
+ }
+ } else {
+ if (f->flag_iscomplete) {
+ m = (void *)1;
+ } else {
+ m = NULL;
+ }
+ }
+
+ if (m) {
+ revents |= events & (POLLOUT | POLLWRNORM);
+ } else {
+ f->flag_isselect = 1;
+ selrecord(td, &f->selinfo);
+ }
+
+ mtx_unlock(f->priv_mtx);
+ }
+ if ((events & (POLLIN | POLLRDNORM)) &&
+ (fflags & FREAD)) {
+
+ f = refs.rxfifo;
+
+ mtx_lock(f->priv_mtx);
+
+ if (!refs.is_usbfs) {
+ if (f->flag_iserror) {
+ /* we have and error */
+ m = (void *)1;
+ } else {
+ if (f->queue_data == NULL) {
+ /*
+ * start read transfer, if not
+ * already started
+ */
+ (f->methods->f_start_read) (f);
+ }
+ /* check if any packets are available */
+ USB_IF_POLL(&f->used_q, m);
+ }
+ } else {
+ if (f->flag_iscomplete) {
+ m = (void *)1;
+ } else {
+ m = NULL;
+ }
+ }
+
+ if (m) {
+ revents |= events & (POLLIN | POLLRDNORM);
+ } else {
+ f->flag_isselect = 1;
+ selrecord(td, &f->selinfo);
+
+ if (!refs.is_usbfs) {
+ /* start reading data */
+ (f->methods->f_start_read) (f);
+ }
+ }
+
+ mtx_unlock(f->priv_mtx);
+ }
+ usb_unref_device(cpd, &refs);
+ return (revents);
+}
+
+static int
+usb_read(struct cdev *dev, struct uio *uio, int ioflag)
+{
+ struct usb_cdev_refdata refs;
+ struct usb_cdev_privdata* cpd;
+ struct usb_fifo *f;
+ struct usb_mbuf *m;
+ int fflags;
+ int resid;
+ int io_len;
+ int err;
+ uint8_t tr_data = 0;
+
+ err = devfs_get_cdevpriv((void **)&cpd);
+ if (err != 0)
+ return (err);
+
+ err = usb_ref_device(cpd, &refs, 0 /* no uref */ );
+ if (err) {
+ return (ENXIO);
+ }
+ fflags = cpd->fflags;
+
+ f = refs.rxfifo;
+ if (f == NULL) {
+ /* should not happen */
+ usb_unref_device(cpd, &refs);
+ return (EPERM);
+ }
+
+ resid = uio->uio_resid;
+
+ mtx_lock(f->priv_mtx);
+
+ /* check for permanent read error */
+ if (f->flag_iserror) {
+ err = EIO;
+ goto done;
+ }
+ /* check if USB-FS interface is active */
+ if (refs.is_usbfs) {
+ /*
+ * The queue is used for events that should be
+ * retrieved using the "USB_FS_COMPLETE" ioctl.
+ */
+ err = EINVAL;
+ goto done;
+ }
+ while (uio->uio_resid > 0) {
+
+ USB_IF_DEQUEUE(&f->used_q, m);
+
+ if (m == NULL) {
+
+ /* start read transfer, if not already started */
+
+ (f->methods->f_start_read) (f);
+
+ if (ioflag & IO_NDELAY) {
+ if (tr_data) {
+ /* return length before error */
+ break;
+ }
+ err = EWOULDBLOCK;
+ break;
+ }
+ DPRINTF("sleeping\n");
+
+ err = usb_fifo_wait(f);
+ if (err) {
+ break;
+ }
+ continue;
+ }
+ if (f->methods->f_filter_read) {
+ /*
+ * Sometimes it is convenient to process data at the
+ * expense of a userland process instead of a kernel
+ * process.
+ */
+ (f->methods->f_filter_read) (f, m);
+ }
+ tr_data = 1;
+
+ io_len = MIN(m->cur_data_len, uio->uio_resid);
+
+ DPRINTFN(2, "transfer %d bytes from %p\n",
+ io_len, m->cur_data_ptr);
+
+ err = usb_fifo_uiomove(f,
+ m->cur_data_ptr, io_len, uio);
+
+ m->cur_data_len -= io_len;
+ m->cur_data_ptr += io_len;
+
+ if (m->cur_data_len == 0) {
+
+ uint8_t last_packet;
+
+ last_packet = m->last_packet;
+
+ USB_IF_ENQUEUE(&f->free_q, m);
+
+ if (last_packet) {
+ /* keep framing */
+ break;
+ }
+ } else {
+ USB_IF_PREPEND(&f->used_q, m);
+ }
+
+ if (err) {
+ break;
+ }
+ }
+done:
+ mtx_unlock(f->priv_mtx);
+
+ usb_unref_device(cpd, &refs);
+
+ return (err);
+}
+
+static int
+usb_write(struct cdev *dev, struct uio *uio, int ioflag)
+{
+ struct usb_cdev_refdata refs;
+ struct usb_cdev_privdata* cpd;
+ struct usb_fifo *f;
+ struct usb_mbuf *m;
+ uint8_t *pdata;
+ int fflags;
+ int resid;
+ int io_len;
+ int err;
+ uint8_t tr_data = 0;
+
+ DPRINTFN(2, "\n");
+
+ err = devfs_get_cdevpriv((void **)&cpd);
+ if (err != 0)
+ return (err);
+
+ err = usb_ref_device(cpd, &refs, 0 /* no uref */ );
+ if (err) {
+ return (ENXIO);
+ }
+ fflags = cpd->fflags;
+
+ f = refs.txfifo;
+ if (f == NULL) {
+ /* should not happen */
+ usb_unref_device(cpd, &refs);
+ return (EPERM);
+ }
+ resid = uio->uio_resid;
+
+ mtx_lock(f->priv_mtx);
+
+ /* check for permanent write error */
+ if (f->flag_iserror) {
+ err = EIO;
+ goto done;
+ }
+ /* check if USB-FS interface is active */
+ if (refs.is_usbfs) {
+ /*
+ * The queue is used for events that should be
+ * retrieved using the "USB_FS_COMPLETE" ioctl.
+ */
+ err = EINVAL;
+ goto done;
+ }
+ if (f->queue_data == NULL) {
+ /* start write transfer, if not already started */
+ (f->methods->f_start_write) (f);
+ }
+ /* we allow writing zero length data */
+ do {
+ USB_IF_DEQUEUE(&f->free_q, m);
+
+ if (m == NULL) {
+
+ if (ioflag & IO_NDELAY) {
+ if (tr_data) {
+ /* return length before error */
+ break;
+ }
+ err = EWOULDBLOCK;
+ break;
+ }
+ DPRINTF("sleeping\n");
+
+ err = usb_fifo_wait(f);
+ if (err) {
+ break;
+ }
+ continue;
+ }
+ tr_data = 1;
+
+ if (f->flag_have_fragment == 0) {
+ USB_MBUF_RESET(m);
+ io_len = m->cur_data_len;
+ pdata = m->cur_data_ptr;
+ if (io_len > uio->uio_resid)
+ io_len = uio->uio_resid;
+ m->cur_data_len = io_len;
+ } else {
+ io_len = m->max_data_len - m->cur_data_len;
+ pdata = m->cur_data_ptr + m->cur_data_len;
+ if (io_len > uio->uio_resid)
+ io_len = uio->uio_resid;
+ m->cur_data_len += io_len;
+ }
+
+ DPRINTFN(2, "transfer %d bytes to %p\n",
+ io_len, pdata);
+
+ err = usb_fifo_uiomove(f, pdata, io_len, uio);
+
+ if (err) {
+ f->flag_have_fragment = 0;
+ USB_IF_ENQUEUE(&f->free_q, m);
+ break;
+ }
+
+ /* check if the buffer is ready to be transmitted */
+
+ if ((f->flag_write_defrag == 0) ||
+ (m->cur_data_len == m->max_data_len)) {
+ f->flag_have_fragment = 0;
+
+ /*
+ * Check for write filter:
+ *
+ * Sometimes it is convenient to process data
+ * at the expense of a userland process
+ * instead of a kernel process.
+ */
+ if (f->methods->f_filter_write) {
+ (f->methods->f_filter_write) (f, m);
+ }
+
+ /* Put USB mbuf in the used queue */
+ USB_IF_ENQUEUE(&f->used_q, m);
+
+ /* Start writing data, if not already started */
+ (f->methods->f_start_write) (f);
+ } else {
+ /* Wait for more data or close */
+ f->flag_have_fragment = 1;
+ USB_IF_PREPEND(&f->free_q, m);
+ }
+
+ } while (uio->uio_resid > 0);
+done:
+ mtx_unlock(f->priv_mtx);
+
+ usb_unref_device(cpd, &refs);
+
+ return (err);
+}
+
+int
+usb_static_ioctl(struct cdev *dev, u_long cmd, caddr_t data, int fflag,
+ struct thread *td)
+{
+ union {
+ struct usb_read_dir *urd;
+ void* data;
+ } u;
+ int err;
+
+ u.data = data;
+ switch (cmd) {
+ case USB_READ_DIR:
+ err = usb_read_symlink(u.urd->urd_data,
+ u.urd->urd_startentry, u.urd->urd_maxlen);
+ break;
+ case USB_DEV_QUIRK_GET:
+ case USB_QUIRK_NAME_GET:
+ case USB_DEV_QUIRK_ADD:
+ case USB_DEV_QUIRK_REMOVE:
+ err = usb_quirk_ioctl_p(cmd, data, fflag, td);
+ break;
+ case USB_GET_TEMPLATE:
+ *(int *)data = usb_template;
+ err = 0;
+ break;
+ case USB_SET_TEMPLATE:
+ err = priv_check(curthread, PRIV_DRIVER);
+ if (err)
+ break;
+ usb_template = *(int *)data;
+ break;
+ default:
+ err = ENOTTY;
+ break;
+ }
+ return (err);
+}
+
+static int
+usb_fifo_uiomove(struct usb_fifo *f, void *cp,
+ int n, struct uio *uio)
+{
+ int error;
+
+ mtx_unlock(f->priv_mtx);
+
+ /*
+ * "uiomove()" can sleep so one needs to make a wrapper,
+ * exiting the mutex and checking things:
+ */
+ error = uiomove(cp, n, uio);
+
+ mtx_lock(f->priv_mtx);
+
+ return (error);
+}
+
+int
+usb_fifo_wait(struct usb_fifo *f)
+{
+ int err;
+
+ mtx_assert(f->priv_mtx, MA_OWNED);
+
+ if (f->flag_iserror) {
+ /* we are gone */
+ return (EIO);
+ }
+ f->flag_sleeping = 1;
+
+ err = cv_wait_sig(&f->cv_io, f->priv_mtx);
+
+ if (f->flag_iserror) {
+ /* we are gone */
+ err = EIO;
+ }
+ return (err);
+}
+
+void
+usb_fifo_signal(struct usb_fifo *f)
+{
+ if (f->flag_sleeping) {
+ f->flag_sleeping = 0;
+ cv_broadcast(&f->cv_io);
+ }
+}
+
+void
+usb_fifo_wakeup(struct usb_fifo *f)
+{
+ usb_fifo_signal(f);
+
+ if (f->flag_isselect) {
+ selwakeup(&f->selinfo);
+ f->flag_isselect = 0;
+ }
+ if (f->async_p != NULL) {
+ PROC_LOCK(f->async_p);
+ psignal(f->async_p, SIGIO);
+ PROC_UNLOCK(f->async_p);
+ }
+}
+
+static int
+usb_fifo_dummy_open(struct usb_fifo *fifo, int fflags)
+{
+ return (0);
+}
+
+static void
+usb_fifo_dummy_close(struct usb_fifo *fifo, int fflags)
+{
+ return;
+}
+
+static int
+usb_fifo_dummy_ioctl(struct usb_fifo *fifo, u_long cmd, void *addr, int fflags)
+{
+ return (ENOIOCTL);
+}
+
+static void
+usb_fifo_dummy_cmd(struct usb_fifo *fifo)
+{
+ fifo->flag_flushing = 0; /* not flushing */
+}
+
+static void
+usb_fifo_check_methods(struct usb_fifo_methods *pm)
+{
+ /* check that all callback functions are OK */
+
+ if (pm->f_open == NULL)
+ pm->f_open = &usb_fifo_dummy_open;
+
+ if (pm->f_close == NULL)
+ pm->f_close = &usb_fifo_dummy_close;
+
+ if (pm->f_ioctl == NULL)
+ pm->f_ioctl = &usb_fifo_dummy_ioctl;
+
+ if (pm->f_ioctl_post == NULL)
+ pm->f_ioctl_post = &usb_fifo_dummy_ioctl;
+
+ if (pm->f_start_read == NULL)
+ pm->f_start_read = &usb_fifo_dummy_cmd;
+
+ if (pm->f_stop_read == NULL)
+ pm->f_stop_read = &usb_fifo_dummy_cmd;
+
+ if (pm->f_start_write == NULL)
+ pm->f_start_write = &usb_fifo_dummy_cmd;
+
+ if (pm->f_stop_write == NULL)
+ pm->f_stop_write = &usb_fifo_dummy_cmd;
+}
+
+/*------------------------------------------------------------------------*
+ * usb_fifo_attach
+ *
+ * The following function will create a duplex FIFO.
+ *
+ * Return values:
+ * 0: Success.
+ * Else: Failure.
+ *------------------------------------------------------------------------*/
+int
+usb_fifo_attach(struct usb_device *udev, void *priv_sc,
+ struct mtx *priv_mtx, struct usb_fifo_methods *pm,
+ struct usb_fifo_sc *f_sc, uint16_t unit, uint16_t subunit,
+ uint8_t iface_index, uid_t uid, gid_t gid, int mode)
+{
+ struct usb_fifo *f_tx;
+ struct usb_fifo *f_rx;
+ char devname[32];
+ uint8_t n;
+ struct usb_fs_privdata* pd;
+
+ f_sc->fp[USB_FIFO_TX] = NULL;
+ f_sc->fp[USB_FIFO_RX] = NULL;
+
+ if (pm == NULL)
+ return (EINVAL);
+
+ /* check the methods */
+ usb_fifo_check_methods(pm);
+
+ if (priv_mtx == NULL)
+ priv_mtx = &Giant;
+
+ /* search for a free FIFO slot */
+ for (n = 0;; n += 2) {
+
+ if (n == USB_FIFO_MAX) {
+ /* end of FIFOs reached */
+ return (ENOMEM);
+ }
+ /* Check for TX FIFO */
+ if (udev->fifo[n + USB_FIFO_TX] != NULL) {
+ continue;
+ }
+ /* Check for RX FIFO */
+ if (udev->fifo[n + USB_FIFO_RX] != NULL) {
+ continue;
+ }
+ break;
+ }
+
+ f_tx = usb_fifo_alloc();
+ f_rx = usb_fifo_alloc();
+
+ if ((f_tx == NULL) || (f_rx == NULL)) {
+ usb_fifo_free(f_tx);
+ usb_fifo_free(f_rx);
+ return (ENOMEM);
+ }
+ /* initialise FIFO structures */
+
+ f_tx->fifo_index = n + USB_FIFO_TX;
+ f_tx->dev_ep_index = -1;
+ f_tx->priv_mtx = priv_mtx;
+ f_tx->priv_sc0 = priv_sc;
+ f_tx->methods = pm;
+ f_tx->iface_index = iface_index;
+ f_tx->udev = udev;
+
+ f_rx->fifo_index = n + USB_FIFO_RX;
+ f_rx->dev_ep_index = -1;
+ f_rx->priv_mtx = priv_mtx;
+ f_rx->priv_sc0 = priv_sc;
+ f_rx->methods = pm;
+ f_rx->iface_index = iface_index;
+ f_rx->udev = udev;
+
+ f_sc->fp[USB_FIFO_TX] = f_tx;
+ f_sc->fp[USB_FIFO_RX] = f_rx;
+
+ mtx_lock(&usb_ref_lock);
+ udev->fifo[f_tx->fifo_index] = f_tx;
+ udev->fifo[f_rx->fifo_index] = f_rx;
+ mtx_unlock(&usb_ref_lock);
+
+ for (n = 0; n != 4; n++) {
+
+ if (pm->basename[n] == NULL) {
+ continue;
+ }
+ if (subunit == 0xFFFF) {
+ if (snprintf(devname, sizeof(devname),
+ "%s%u%s", pm->basename[n],
+ unit, pm->postfix[n] ?
+ pm->postfix[n] : "")) {
+ /* ignore */
+ }
+ } else {
+ if (snprintf(devname, sizeof(devname),
+ "%s%u.%u%s", pm->basename[n],
+ unit, subunit, pm->postfix[n] ?
+ pm->postfix[n] : "")) {
+ /* ignore */
+ }
+ }
+
+ /*
+ * Distribute the symbolic links into two FIFO structures:
+ */
+ if (n & 1) {
+ f_rx->symlink[n / 2] =
+ usb_alloc_symlink(devname);
+ } else {
+ f_tx->symlink[n / 2] =
+ usb_alloc_symlink(devname);
+ }
+
+ /*
+ * Initialize device private data - this is used to find the
+ * actual USB device itself.
+ */
+ pd = malloc(sizeof(struct usb_fs_privdata), M_USBDEV, M_WAITOK | M_ZERO);
+ pd->bus_index = device_get_unit(udev->bus->bdev);
+ pd->dev_index = udev->device_index;
+ pd->ep_addr = -1; /* not an endpoint */
+ pd->fifo_index = f_tx->fifo_index & f_rx->fifo_index;
+ pd->mode = FREAD|FWRITE;
+
+ /* Now, create the device itself */
+ f_sc->dev = make_dev(&usb_devsw, 0, uid, gid, mode,
+ "%s", devname);
+ /* XXX setting si_drv1 and creating the device is not atomic! */
+ f_sc->dev->si_drv1 = pd;
+ }
+
+ DPRINTFN(2, "attached %p/%p\n", f_tx, f_rx);
+ return (0);
+}
+
+/*------------------------------------------------------------------------*
+ * usb_fifo_alloc_buffer
+ *
+ * Return values:
+ * 0: Success
+ * Else failure
+ *------------------------------------------------------------------------*/
+int
+usb_fifo_alloc_buffer(struct usb_fifo *f, usb_size_t bufsize,
+ uint16_t nbuf)
+{
+ usb_fifo_free_buffer(f);
+
+ /* allocate an endpoint */
+ f->free_q.ifq_maxlen = nbuf;
+ f->used_q.ifq_maxlen = nbuf;
+
+ f->queue_data = usb_alloc_mbufs(
+ M_USBDEV, &f->free_q, bufsize, nbuf);
+
+ if ((f->queue_data == NULL) && bufsize && nbuf) {
+ return (ENOMEM);
+ }
+ return (0); /* success */
+}
+
+/*------------------------------------------------------------------------*
+ * usb_fifo_free_buffer
+ *
+ * This function will free the buffers associated with a FIFO. This
+ * function can be called multiple times in a row.
+ *------------------------------------------------------------------------*/
+void
+usb_fifo_free_buffer(struct usb_fifo *f)
+{
+ if (f->queue_data) {
+ /* free old buffer */
+ free(f->queue_data, M_USBDEV);
+ f->queue_data = NULL;
+ }
+ /* reset queues */
+
+ bzero(&f->free_q, sizeof(f->free_q));
+ bzero(&f->used_q, sizeof(f->used_q));
+}
+
+static void
+usb_fifo_cleanup(void* ptr)
+{
+ free(ptr, M_USBDEV);
+}
+
+void
+usb_fifo_detach(struct usb_fifo_sc *f_sc)
+{
+ if (f_sc == NULL) {
+ return;
+ }
+ usb_fifo_free(f_sc->fp[USB_FIFO_TX]);
+ usb_fifo_free(f_sc->fp[USB_FIFO_RX]);
+
+ f_sc->fp[USB_FIFO_TX] = NULL;
+ f_sc->fp[USB_FIFO_RX] = NULL;
+
+ if (f_sc->dev != NULL) {
+ destroy_dev_sched_cb(f_sc->dev,
+ usb_fifo_cleanup, f_sc->dev->si_drv1);
+ f_sc->dev = NULL;
+ }
+
+ DPRINTFN(2, "detached %p\n", f_sc);
+}
+
+usb_size_t
+usb_fifo_put_bytes_max(struct usb_fifo *f)
+{
+ struct usb_mbuf *m;
+ usb_size_t len;
+
+ USB_IF_POLL(&f->free_q, m);
+
+ if (m) {
+ len = m->max_data_len;
+ } else {
+ len = 0;
+ }
+ return (len);
+}
+
+/*------------------------------------------------------------------------*
+ * usb_fifo_put_data
+ *
+ * what:
+ * 0 - normal operation
+ * 1 - set last packet flag to enforce framing
+ *------------------------------------------------------------------------*/
+void
+usb_fifo_put_data(struct usb_fifo *f, struct usb_page_cache *pc,
+ usb_frlength_t offset, usb_frlength_t len, uint8_t what)
+{
+ struct usb_mbuf *m;
+ usb_frlength_t io_len;
+
+ while (len || (what == 1)) {
+
+ USB_IF_DEQUEUE(&f->free_q, m);
+
+ if (m) {
+ USB_MBUF_RESET(m);
+
+ io_len = MIN(len, m->cur_data_len);
+
+ usbd_copy_out(pc, offset, m->cur_data_ptr, io_len);
+
+ m->cur_data_len = io_len;
+ offset += io_len;
+ len -= io_len;
+
+ if ((len == 0) && (what == 1)) {
+ m->last_packet = 1;
+ }
+ USB_IF_ENQUEUE(&f->used_q, m);
+
+ usb_fifo_wakeup(f);
+
+ if ((len == 0) || (what == 1)) {
+ break;
+ }
+ } else {
+ break;
+ }
+ }
+}
+
+void
+usb_fifo_put_data_linear(struct usb_fifo *f, void *ptr,
+ usb_size_t len, uint8_t what)
+{
+ struct usb_mbuf *m;
+ usb_size_t io_len;
+
+ while (len || (what == 1)) {
+
+ USB_IF_DEQUEUE(&f->free_q, m);
+
+ if (m) {
+ USB_MBUF_RESET(m);
+
+ io_len = MIN(len, m->cur_data_len);
+
+ bcopy(ptr, m->cur_data_ptr, io_len);
+
+ m->cur_data_len = io_len;
+ ptr = USB_ADD_BYTES(ptr, io_len);
+ len -= io_len;
+
+ if ((len == 0) && (what == 1)) {
+ m->last_packet = 1;
+ }
+ USB_IF_ENQUEUE(&f->used_q, m);
+
+ usb_fifo_wakeup(f);
+
+ if ((len == 0) || (what == 1)) {
+ break;
+ }
+ } else {
+ break;
+ }
+ }
+}
+
+uint8_t
+usb_fifo_put_data_buffer(struct usb_fifo *f, void *ptr, usb_size_t len)
+{
+ struct usb_mbuf *m;
+
+ USB_IF_DEQUEUE(&f->free_q, m);
+
+ if (m) {
+ m->cur_data_len = len;
+ m->cur_data_ptr = ptr;
+ USB_IF_ENQUEUE(&f->used_q, m);
+ usb_fifo_wakeup(f);
+ return (1);
+ }
+ return (0);
+}
+
+void
+usb_fifo_put_data_error(struct usb_fifo *f)
+{
+ f->flag_iserror = 1;
+ usb_fifo_wakeup(f);
+}
+
+/*------------------------------------------------------------------------*
+ * usb_fifo_get_data
+ *
+ * what:
+ * 0 - normal operation
+ * 1 - only get one "usb_mbuf"
+ *
+ * returns:
+ * 0 - no more data
+ * 1 - data in buffer
+ *------------------------------------------------------------------------*/
+uint8_t
+usb_fifo_get_data(struct usb_fifo *f, struct usb_page_cache *pc,
+ usb_frlength_t offset, usb_frlength_t len, usb_frlength_t *actlen,
+ uint8_t what)
+{
+ struct usb_mbuf *m;
+ usb_frlength_t io_len;
+ uint8_t tr_data = 0;
+
+ actlen[0] = 0;
+
+ while (1) {
+
+ USB_IF_DEQUEUE(&f->used_q, m);
+
+ if (m) {
+
+ tr_data = 1;
+
+ io_len = MIN(len, m->cur_data_len);
+
+ usbd_copy_in(pc, offset, m->cur_data_ptr, io_len);
+
+ len -= io_len;
+ offset += io_len;
+ actlen[0] += io_len;
+ m->cur_data_ptr += io_len;
+ m->cur_data_len -= io_len;
+
+ if ((m->cur_data_len == 0) || (what == 1)) {
+ USB_IF_ENQUEUE(&f->free_q, m);
+
+ usb_fifo_wakeup(f);
+
+ if (what == 1) {
+ break;
+ }
+ } else {
+ USB_IF_PREPEND(&f->used_q, m);
+ }
+ } else {
+
+ if (tr_data) {
+ /* wait for data to be written out */
+ break;
+ }
+ if (f->flag_flushing) {
+ /* check if we should send a short packet */
+ if (f->flag_short != 0) {
+ f->flag_short = 0;
+ tr_data = 1;
+ break;
+ }
+ /* flushing complete */
+ f->flag_flushing = 0;
+ usb_fifo_wakeup(f);
+ }
+ break;
+ }
+ if (len == 0) {
+ break;
+ }
+ }
+ return (tr_data);
+}
+
+uint8_t
+usb_fifo_get_data_linear(struct usb_fifo *f, void *ptr,
+ usb_size_t len, usb_size_t *actlen, uint8_t what)
+{
+ struct usb_mbuf *m;
+ usb_size_t io_len;
+ uint8_t tr_data = 0;
+
+ actlen[0] = 0;
+
+ while (1) {
+
+ USB_IF_DEQUEUE(&f->used_q, m);
+
+ if (m) {
+
+ tr_data = 1;
+
+ io_len = MIN(len, m->cur_data_len);
+
+ bcopy(m->cur_data_ptr, ptr, io_len);
+
+ len -= io_len;
+ ptr = USB_ADD_BYTES(ptr, io_len);
+ actlen[0] += io_len;
+ m->cur_data_ptr += io_len;
+ m->cur_data_len -= io_len;
+
+ if ((m->cur_data_len == 0) || (what == 1)) {
+ USB_IF_ENQUEUE(&f->free_q, m);
+
+ usb_fifo_wakeup(f);
+
+ if (what == 1) {
+ break;
+ }
+ } else {
+ USB_IF_PREPEND(&f->used_q, m);
+ }
+ } else {
+
+ if (tr_data) {
+ /* wait for data to be written out */
+ break;
+ }
+ if (f->flag_flushing) {
+ /* check if we should send a short packet */
+ if (f->flag_short != 0) {
+ f->flag_short = 0;
+ tr_data = 1;
+ break;
+ }
+ /* flushing complete */
+ f->flag_flushing = 0;
+ usb_fifo_wakeup(f);
+ }
+ break;
+ }
+ if (len == 0) {
+ break;
+ }
+ }
+ return (tr_data);
+}
+
+uint8_t
+usb_fifo_get_data_buffer(struct usb_fifo *f, void **pptr, usb_size_t *plen)
+{
+ struct usb_mbuf *m;
+
+ USB_IF_POLL(&f->used_q, m);
+
+ if (m) {
+ *plen = m->cur_data_len;
+ *pptr = m->cur_data_ptr;
+
+ return (1);
+ }
+ return (0);
+}
+
+void
+usb_fifo_get_data_error(struct usb_fifo *f)
+{
+ f->flag_iserror = 1;
+ usb_fifo_wakeup(f);
+}
+
+/*------------------------------------------------------------------------*
+ * usb_alloc_symlink
+ *
+ * Return values:
+ * NULL: Failure
+ * Else: Pointer to symlink entry
+ *------------------------------------------------------------------------*/
+struct usb_symlink *
+usb_alloc_symlink(const char *target)
+{
+ struct usb_symlink *ps;
+
+ ps = malloc(sizeof(*ps), M_USBDEV, M_WAITOK);
+ if (ps == NULL) {
+ return (ps);
+ }
+ /* XXX no longer needed */
+ strlcpy(ps->src_path, target, sizeof(ps->src_path));
+ ps->src_len = strlen(ps->src_path);
+ strlcpy(ps->dst_path, target, sizeof(ps->dst_path));
+ ps->dst_len = strlen(ps->dst_path);
+
+ sx_xlock(&usb_sym_lock);
+ TAILQ_INSERT_TAIL(&usb_sym_head, ps, sym_entry);
+ sx_unlock(&usb_sym_lock);
+ return (ps);
+}
+
+/*------------------------------------------------------------------------*
+ * usb_free_symlink
+ *------------------------------------------------------------------------*/
+void
+usb_free_symlink(struct usb_symlink *ps)
+{
+ if (ps == NULL) {
+ return;
+ }
+ sx_xlock(&usb_sym_lock);
+ TAILQ_REMOVE(&usb_sym_head, ps, sym_entry);
+ sx_unlock(&usb_sym_lock);
+
+ free(ps, M_USBDEV);
+}
+
+/*------------------------------------------------------------------------*
+ * usb_read_symlink
+ *
+ * Return value:
+ * 0: Success
+ * Else: Failure
+ *------------------------------------------------------------------------*/
+int
+usb_read_symlink(uint8_t *user_ptr, uint32_t startentry, uint32_t user_len)
+{
+ struct usb_symlink *ps;
+ uint32_t temp;
+ uint32_t delta = 0;
+ uint8_t len;
+ int error = 0;
+
+ sx_xlock(&usb_sym_lock);
+
+ TAILQ_FOREACH(ps, &usb_sym_head, sym_entry) {
+
+ /*
+ * Compute total length of source and destination symlink
+ * strings pluss one length byte and two NUL bytes:
+ */
+ temp = ps->src_len + ps->dst_len + 3;
+
+ if (temp > 255) {
+ /*
+ * Skip entry because this length cannot fit
+ * into one byte:
+ */
+ continue;
+ }
+ if (startentry != 0) {
+ /* decrement read offset */
+ startentry--;
+ continue;
+ }
+ if (temp > user_len) {
+ /* out of buffer space */
+ break;
+ }
+ len = temp;
+
+ /* copy out total length */
+
+ error = copyout(&len,
+ USB_ADD_BYTES(user_ptr, delta), 1);
+ if (error) {
+ break;
+ }
+ delta += 1;
+
+ /* copy out source string */
+
+ error = copyout(ps->src_path,
+ USB_ADD_BYTES(user_ptr, delta), ps->src_len);
+ if (error) {
+ break;
+ }
+ len = 0;
+ delta += ps->src_len;
+ error = copyout(&len,
+ USB_ADD_BYTES(user_ptr, delta), 1);
+ if (error) {
+ break;
+ }
+ delta += 1;
+
+ /* copy out destination string */
+
+ error = copyout(ps->dst_path,
+ USB_ADD_BYTES(user_ptr, delta), ps->dst_len);
+ if (error) {
+ break;
+ }
+ len = 0;
+ delta += ps->dst_len;
+ error = copyout(&len,
+ USB_ADD_BYTES(user_ptr, delta), 1);
+ if (error) {
+ break;
+ }
+ delta += 1;
+
+ user_len -= temp;
+ }
+
+ /* a zero length entry indicates the end */
+
+ if ((user_len != 0) && (error == 0)) {
+
+ len = 0;
+
+ error = copyout(&len,
+ USB_ADD_BYTES(user_ptr, delta), 1);
+ }
+ sx_unlock(&usb_sym_lock);
+ return (error);
+}
+
+void
+usb_fifo_set_close_zlp(struct usb_fifo *f, uint8_t onoff)
+{
+ if (f == NULL)
+ return;
+
+ /* send a Zero Length Packet, ZLP, before close */
+ f->flag_short = onoff;
+}
+
+void
+usb_fifo_set_write_defrag(struct usb_fifo *f, uint8_t onoff)
+{
+ if (f == NULL)
+ return;
+
+ /* defrag written data */
+ f->flag_write_defrag = onoff;
+ /* reset defrag state */
+ f->flag_have_fragment = 0;
+}
+
+void *
+usb_fifo_softc(struct usb_fifo *f)
+{
+ return (f->priv_sc0);
+}
+#endif /* USB_HAVE_UGEN */
diff --git a/rtems/freebsd/dev/usb/usb_dev.h b/rtems/freebsd/dev/usb/usb_dev.h
new file mode 100644
index 00000000..2231525f
--- /dev/null
+++ b/rtems/freebsd/dev/usb/usb_dev.h
@@ -0,0 +1,154 @@
+/* $FreeBSD$ */
+/*-
+ * Copyright (c) 2008 Hans Petter Selasky. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifndef _USB_DEV_HH_
+#define _USB_DEV_HH_
+
+#include <rtems/freebsd/sys/file.h>
+#include <rtems/freebsd/sys/selinfo.h>
+#include <rtems/freebsd/sys/poll.h>
+#include <rtems/freebsd/sys/signalvar.h>
+#include <rtems/freebsd/sys/proc.h>
+
+struct usb_fifo;
+struct usb_mbuf;
+
+struct usb_symlink {
+ TAILQ_ENTRY(usb_symlink) sym_entry;
+ char src_path[32]; /* Source path - including terminating
+ * zero */
+ char dst_path[32]; /* Destination path - including
+ * terminating zero */
+ uint8_t src_len; /* String length */
+ uint8_t dst_len; /* String length */
+};
+
+/*
+ * Private per-device information.
+ */
+struct usb_cdev_privdata {
+ struct usb_bus *bus;
+ struct usb_device *udev;
+ struct usb_interface *iface;
+ int bus_index; /* bus index */
+ int dev_index; /* device index */
+ int ep_addr; /* endpoint address */
+ int fflags;
+ uint8_t fifo_index; /* FIFO index */
+};
+
+/*
+ * The following structure defines a minimum re-implementation of the
+ * ifqueue structure in the kernel.
+ */
+struct usb_ifqueue {
+ struct usb_mbuf *ifq_head;
+ struct usb_mbuf *ifq_tail;
+
+ usb_size_t ifq_len;
+ usb_size_t ifq_maxlen;
+};
+
+/*
+ * Private per-device and per-thread reference information
+ */
+struct usb_cdev_refdata {
+ struct usb_fifo *rxfifo;
+ struct usb_fifo *txfifo;
+ uint8_t is_read; /* location has read access */
+ uint8_t is_write; /* location has write access */
+ uint8_t is_uref; /* USB refcount decr. needed */
+ uint8_t is_usbfs; /* USB-FS is active */
+};
+
+struct usb_fs_privdata {
+ int bus_index;
+ int dev_index;
+ int ep_addr;
+ int mode;
+ int fifo_index;
+ struct cdev *cdev;
+
+ LIST_ENTRY(usb_fs_privdata) pd_next;
+};
+
+/*
+ * Most of the fields in the "usb_fifo" structure are used by the
+ * generic USB access layer.
+ */
+struct usb_fifo {
+ struct usb_ifqueue free_q;
+ struct usb_ifqueue used_q;
+ struct selinfo selinfo;
+ struct cv cv_io;
+ struct cv cv_drain;
+ struct usb_fifo_methods *methods;
+ struct usb_symlink *symlink[2];/* our symlinks */
+ struct proc *async_p; /* process that wants SIGIO */
+ struct usb_fs_endpoint *fs_ep_ptr;
+ struct usb_device *udev;
+ struct usb_xfer *xfer[2];
+ struct usb_xfer **fs_xfer;
+ struct mtx *priv_mtx; /* client data */
+ /* set if FIFO is opened by a FILE: */
+ struct usb_cdev_privdata *curr_cpd;
+ void *priv_sc0; /* client data */
+ void *priv_sc1; /* client data */
+ void *queue_data;
+ usb_timeout_t timeout; /* timeout in milliseconds */
+ usb_frlength_t bufsize; /* BULK and INTERRUPT buffer size */
+ usb_frcount_t nframes; /* for isochronous mode */
+ uint16_t dev_ep_index; /* our device endpoint index */
+ uint8_t flag_sleeping; /* set if FIFO is sleeping */
+ uint8_t flag_iscomplete; /* set if a USB transfer is complete */
+ uint8_t flag_iserror; /* set if FIFO error happened */
+ uint8_t flag_isselect; /* set if FIFO is selected */
+ uint8_t flag_flushing; /* set if FIFO is flushing data */
+ uint8_t flag_short; /* set if short_ok or force_short
+ * transfer flags should be set */
+ uint8_t flag_stall; /* set if clear stall should be run */
+ uint8_t flag_write_defrag; /* set to defrag written data */
+ uint8_t flag_have_fragment; /* set if defragging */
+ uint8_t iface_index; /* set to the interface we belong to */
+ uint8_t fifo_index; /* set to the FIFO index in "struct
+ * usb_device" */
+ uint8_t fs_ep_max;
+ uint8_t fifo_zlp; /* zero length packet count */
+ uint8_t refcount;
+#define USB_FIFO_REF_MAX 0xFF
+};
+
+extern struct cdevsw usb_devsw;
+
+int usb_fifo_wait(struct usb_fifo *fifo);
+void usb_fifo_signal(struct usb_fifo *fifo);
+uint8_t usb_fifo_opened(struct usb_fifo *fifo);
+struct usb_symlink *usb_alloc_symlink(const char *target);
+void usb_free_symlink(struct usb_symlink *ps);
+int usb_read_symlink(uint8_t *user_ptr, uint32_t startentry,
+ uint32_t user_len);
+
+#endif /* _USB_DEV_HH_ */
diff --git a/rtems/freebsd/dev/usb/usb_device.c b/rtems/freebsd/dev/usb/usb_device.c
new file mode 100644
index 00000000..015ff0ae
--- /dev/null
+++ b/rtems/freebsd/dev/usb/usb_device.c
@@ -0,0 +1,2693 @@
+#include <rtems/freebsd/machine/rtems-bsd-config.h>
+
+/* $FreeBSD$ */
+/*-
+ * Copyright (c) 2008 Hans Petter Selasky. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <rtems/freebsd/sys/stdint.h>
+#include <rtems/freebsd/sys/stddef.h>
+#include <rtems/freebsd/sys/param.h>
+#include <rtems/freebsd/sys/queue.h>
+#include <rtems/freebsd/sys/types.h>
+#include <rtems/freebsd/sys/systm.h>
+#include <rtems/freebsd/sys/kernel.h>
+#include <rtems/freebsd/sys/bus.h>
+#include <rtems/freebsd/sys/linker_set.h>
+#include <rtems/freebsd/sys/module.h>
+#include <rtems/freebsd/sys/lock.h>
+#include <rtems/freebsd/sys/mutex.h>
+#include <rtems/freebsd/sys/condvar.h>
+#include <rtems/freebsd/sys/sysctl.h>
+#include <rtems/freebsd/sys/sx.h>
+#include <rtems/freebsd/sys/unistd.h>
+#include <rtems/freebsd/sys/callout.h>
+#include <rtems/freebsd/sys/malloc.h>
+#include <rtems/freebsd/sys/priv.h>
+#include <rtems/freebsd/sys/conf.h>
+#include <rtems/freebsd/sys/fcntl.h>
+
+#include <rtems/freebsd/dev/usb/usb.h>
+#include <rtems/freebsd/dev/usb/usbdi.h>
+#include <rtems/freebsd/dev/usb/usbdi_util.h>
+#include <rtems/freebsd/dev/usb/usb_ioctl.h>
+
+#if USB_HAVE_UGEN
+#include <rtems/freebsd/sys/sbuf.h>
+#endif
+
+#include <rtems/freebsd/local/usbdevs.h>
+
+#define USB_DEBUG_VAR usb_debug
+
+#include <rtems/freebsd/dev/usb/usb_core.h>
+#include <rtems/freebsd/dev/usb/usb_debug.h>
+#include <rtems/freebsd/dev/usb/usb_process.h>
+#include <rtems/freebsd/dev/usb/usb_device.h>
+#include <rtems/freebsd/dev/usb/usb_busdma.h>
+#include <rtems/freebsd/dev/usb/usb_transfer.h>
+#include <rtems/freebsd/dev/usb/usb_request.h>
+#include <rtems/freebsd/dev/usb/usb_dynamic.h>
+#include <rtems/freebsd/dev/usb/usb_hub.h>
+#include <rtems/freebsd/dev/usb/usb_util.h>
+#include <rtems/freebsd/dev/usb/usb_msctest.h>
+#if USB_HAVE_UGEN
+#include <rtems/freebsd/dev/usb/usb_dev.h>
+#include <rtems/freebsd/dev/usb/usb_generic.h>
+#endif
+
+#include <rtems/freebsd/dev/usb/quirk/usb_quirk.h>
+
+#include <rtems/freebsd/dev/usb/usb_controller.h>
+#include <rtems/freebsd/dev/usb/usb_bus.h>
+
+/* function prototypes */
+
+static void usb_init_endpoint(struct usb_device *, uint8_t,
+ struct usb_endpoint_descriptor *,
+ struct usb_endpoint_ss_comp_descriptor *,
+ struct usb_endpoint *);
+static void usb_unconfigure(struct usb_device *, uint8_t);
+static void usb_detach_device_sub(struct usb_device *, device_t *,
+ uint8_t);
+static uint8_t usb_probe_and_attach_sub(struct usb_device *,
+ struct usb_attach_arg *);
+static void usb_init_attach_arg(struct usb_device *,
+ struct usb_attach_arg *);
+static void usb_suspend_resume_sub(struct usb_device *, device_t,
+ uint8_t);
+static void usbd_clear_stall_proc(struct usb_proc_msg *_pm);
+static usb_error_t usb_config_parse(struct usb_device *, uint8_t, uint8_t);
+static void usbd_set_device_strings(struct usb_device *);
+#if USB_HAVE_UGEN
+static void usb_notify_addq(const char *type, struct usb_device *);
+static void usb_fifo_free_wrap(struct usb_device *, uint8_t, uint8_t);
+static struct cdev *usb_make_dev(struct usb_device *, int, int);
+static void usb_cdev_create(struct usb_device *);
+static void usb_cdev_free(struct usb_device *);
+static void usb_cdev_cleanup(void *);
+#endif
+
+/* This variable is global to allow easy access to it: */
+
+int usb_template = 0;
+
+#ifndef __rtems__
+TUNABLE_INT("hw.usb.usb_template", &usb_template);
+SYSCTL_INT(_hw_usb, OID_AUTO, template, CTLFLAG_RW,
+ &usb_template, 0, "Selected USB device side template");
+#endif /* __rtems__ */
+
+/* English is default language */
+
+static int usb_lang_id = 0x0009;
+static int usb_lang_mask = 0x00FF;
+
+#ifndef __rtems__
+TUNABLE_INT("hw.usb.usb_lang_id", &usb_lang_id);
+SYSCTL_INT(_hw_usb, OID_AUTO, usb_lang_id, CTLFLAG_RW,
+ &usb_lang_id, 0, "Preferred USB language ID");
+
+TUNABLE_INT("hw.usb.usb_lang_mask", &usb_lang_mask);
+SYSCTL_INT(_hw_usb, OID_AUTO, usb_lang_mask, CTLFLAG_RW,
+ &usb_lang_mask, 0, "Preferred USB language mask");
+#endif /* __rtems__ */
+
+static const char* statestr[USB_STATE_MAX] = {
+ [USB_STATE_DETACHED] = "DETACHED",
+ [USB_STATE_ATTACHED] = "ATTACHED",
+ [USB_STATE_POWERED] = "POWERED",
+ [USB_STATE_ADDRESSED] = "ADDRESSED",
+ [USB_STATE_CONFIGURED] = "CONFIGURED",
+};
+
+const char *
+usb_statestr(enum usb_dev_state state)
+{
+ return ((state < USB_STATE_MAX) ? statestr[state] : "UNKNOWN");
+}
+
+const char *
+usb_get_manufacturer(struct usb_device *udev)
+{
+ return (udev->manufacturer ? udev->manufacturer : "Unknown");
+}
+
+const char *
+usb_get_product(struct usb_device *udev)
+{
+ return (udev->product ? udev->product : "");
+}
+
+const char *
+usb_get_serial(struct usb_device *udev)
+{
+ return (udev->serial ? udev->serial : "");
+}
+
+/*------------------------------------------------------------------------*
+ * usbd_get_ep_by_addr
+ *
+ * This function searches for an USB ep by endpoint address and
+ * direction.
+ *
+ * Returns:
+ * NULL: Failure
+ * Else: Success
+ *------------------------------------------------------------------------*/
+struct usb_endpoint *
+usbd_get_ep_by_addr(struct usb_device *udev, uint8_t ea_val)
+{
+ struct usb_endpoint *ep = udev->endpoints;
+ struct usb_endpoint *ep_end = udev->endpoints + udev->endpoints_max;
+ enum {
+ EA_MASK = (UE_DIR_IN | UE_DIR_OUT | UE_ADDR),
+ };
+
+ /*
+ * According to the USB specification not all bits are used
+ * for the endpoint address. Keep defined bits only:
+ */
+ ea_val &= EA_MASK;
+
+ /*
+ * Iterate accross all the USB endpoints searching for a match
+ * based on the endpoint address:
+ */
+ for (; ep != ep_end; ep++) {
+
+ if (ep->edesc == NULL) {
+ continue;
+ }
+ /* do the mask and check the value */
+ if ((ep->edesc->bEndpointAddress & EA_MASK) == ea_val) {
+ goto found;
+ }
+ }
+
+ /*
+ * The default endpoint is always present and is checked separately:
+ */
+ if ((udev->ctrl_ep.edesc) &&
+ ((udev->ctrl_ep.edesc->bEndpointAddress & EA_MASK) == ea_val)) {
+ ep = &udev->ctrl_ep;
+ goto found;
+ }
+ return (NULL);
+
+found:
+ return (ep);
+}
+
+/*------------------------------------------------------------------------*
+ * usbd_get_endpoint
+ *
+ * This function searches for an USB endpoint based on the information
+ * given by the passed "struct usb_config" pointer.
+ *
+ * Return values:
+ * NULL: No match.
+ * Else: Pointer to "struct usb_endpoint".
+ *------------------------------------------------------------------------*/
+struct usb_endpoint *
+usbd_get_endpoint(struct usb_device *udev, uint8_t iface_index,
+ const struct usb_config *setup)
+{
+ struct usb_endpoint *ep = udev->endpoints;
+ struct usb_endpoint *ep_end = udev->endpoints + udev->endpoints_max;
+ uint8_t index = setup->ep_index;
+ uint8_t ea_mask;
+ uint8_t ea_val;
+ uint8_t type_mask;
+ uint8_t type_val;
+
+ DPRINTFN(10, "udev=%p iface_index=%d address=0x%x "
+ "type=0x%x dir=0x%x index=%d\n",
+ udev, iface_index, setup->endpoint,
+ setup->type, setup->direction, setup->ep_index);
+
+ /* check USB mode */
+
+ if (setup->usb_mode != USB_MODE_DUAL &&
+ udev->flags.usb_mode != setup->usb_mode) {
+ /* wrong mode - no endpoint */
+ return (NULL);
+ }
+
+ /* setup expected endpoint direction mask and value */
+
+ if (setup->direction == UE_DIR_RX) {
+ ea_mask = (UE_DIR_IN | UE_DIR_OUT);
+ ea_val = (udev->flags.usb_mode == USB_MODE_DEVICE) ?
+ UE_DIR_OUT : UE_DIR_IN;
+ } else if (setup->direction == UE_DIR_TX) {
+ ea_mask = (UE_DIR_IN | UE_DIR_OUT);
+ ea_val = (udev->flags.usb_mode == USB_MODE_DEVICE) ?
+ UE_DIR_IN : UE_DIR_OUT;
+ } else if (setup->direction == UE_DIR_ANY) {
+ /* match any endpoint direction */
+ ea_mask = 0;
+ ea_val = 0;
+ } else {
+ /* match the given endpoint direction */
+ ea_mask = (UE_DIR_IN | UE_DIR_OUT);
+ ea_val = (setup->direction & (UE_DIR_IN | UE_DIR_OUT));
+ }
+
+ /* setup expected endpoint address */
+
+ if (setup->endpoint == UE_ADDR_ANY) {
+ /* match any endpoint address */
+ } else {
+ /* match the given endpoint address */
+ ea_mask |= UE_ADDR;
+ ea_val |= (setup->endpoint & UE_ADDR);
+ }
+
+ /* setup expected endpoint type */
+
+ if (setup->type == UE_BULK_INTR) {
+ /* this will match BULK and INTERRUPT endpoints */
+ type_mask = 2;
+ type_val = 2;
+ } else if (setup->type == UE_TYPE_ANY) {
+ /* match any endpoint type */
+ type_mask = 0;
+ type_val = 0;
+ } else {
+ /* match the given endpoint type */
+ type_mask = UE_XFERTYPE;
+ type_val = (setup->type & UE_XFERTYPE);
+ }
+
+ /*
+ * Iterate accross all the USB endpoints searching for a match
+ * based on the endpoint address. Note that we are searching
+ * the endpoints from the beginning of the "udev->endpoints" array.
+ */
+ for (; ep != ep_end; ep++) {
+
+ if ((ep->edesc == NULL) ||
+ (ep->iface_index != iface_index)) {
+ continue;
+ }
+ /* do the masks and check the values */
+
+ if (((ep->edesc->bEndpointAddress & ea_mask) == ea_val) &&
+ ((ep->edesc->bmAttributes & type_mask) == type_val)) {
+ if (!index--) {
+ goto found;
+ }
+ }
+ }
+
+ /*
+ * Match against default endpoint last, so that "any endpoint", "any
+ * address" and "any direction" returns the first endpoint of the
+ * interface. "iface_index" and "direction" is ignored:
+ */
+ if ((udev->ctrl_ep.edesc) &&
+ ((udev->ctrl_ep.edesc->bEndpointAddress & ea_mask) == ea_val) &&
+ ((udev->ctrl_ep.edesc->bmAttributes & type_mask) == type_val) &&
+ (!index)) {
+ ep = &udev->ctrl_ep;
+ goto found;
+ }
+ return (NULL);
+
+found:
+ return (ep);
+}
+
+/*------------------------------------------------------------------------*
+ * usbd_interface_count
+ *
+ * This function stores the number of USB interfaces excluding
+ * alternate settings, which the USB config descriptor reports into
+ * the unsigned 8-bit integer pointed to by "count".
+ *
+ * Returns:
+ * 0: Success
+ * Else: Failure
+ *------------------------------------------------------------------------*/
+usb_error_t
+usbd_interface_count(struct usb_device *udev, uint8_t *count)
+{
+ if (udev->cdesc == NULL) {
+ *count = 0;
+ return (USB_ERR_NOT_CONFIGURED);
+ }
+ *count = udev->ifaces_max;
+ return (USB_ERR_NORMAL_COMPLETION);
+}
+
+
+/*------------------------------------------------------------------------*
+ * usb_init_endpoint
+ *
+ * This function will initialise the USB endpoint structure pointed to by
+ * the "endpoint" argument. The structure pointed to by "endpoint" must be
+ * zeroed before calling this function.
+ *------------------------------------------------------------------------*/
+static void
+usb_init_endpoint(struct usb_device *udev, uint8_t iface_index,
+ struct usb_endpoint_descriptor *edesc,
+ struct usb_endpoint_ss_comp_descriptor *ecomp,
+ struct usb_endpoint *ep)
+{
+ struct usb_bus_methods *methods;
+
+ methods = udev->bus->methods;
+
+ (methods->endpoint_init) (udev, edesc, ep);
+
+ /* initialise USB endpoint structure */
+ ep->edesc = edesc;
+ ep->ecomp = ecomp;
+ ep->iface_index = iface_index;
+ TAILQ_INIT(&ep->endpoint_q.head);
+ ep->endpoint_q.command = &usbd_pipe_start;
+
+ /* the pipe is not supported by the hardware */
+ if (ep->methods == NULL)
+ return;
+
+ /* clear stall, if any */
+ if (methods->clear_stall != NULL) {
+ USB_BUS_LOCK(udev->bus);
+ (methods->clear_stall) (udev, ep);
+ USB_BUS_UNLOCK(udev->bus);
+ }
+}
+
+/*-----------------------------------------------------------------------*
+ * usb_endpoint_foreach
+ *
+ * This function will iterate all the USB endpoints except the control
+ * endpoint. This function is NULL safe.
+ *
+ * Return values:
+ * NULL: End of USB endpoints
+ * Else: Pointer to next USB endpoint
+ *------------------------------------------------------------------------*/
+struct usb_endpoint *
+usb_endpoint_foreach(struct usb_device *udev, struct usb_endpoint *ep)
+{
+ struct usb_endpoint *ep_end;
+
+ /* be NULL safe */
+ if (udev == NULL)
+ return (NULL);
+
+ ep_end = udev->endpoints + udev->endpoints_max;
+
+ /* get next endpoint */
+ if (ep == NULL)
+ ep = udev->endpoints;
+ else
+ ep++;
+
+ /* find next allocated ep */
+ while (ep != ep_end) {
+ if (ep->edesc != NULL)
+ return (ep);
+ ep++;
+ }
+ return (NULL);
+}
+
+/*------------------------------------------------------------------------*
+ * usb_unconfigure
+ *
+ * This function will free all USB interfaces and USB endpoints belonging
+ * to an USB device.
+ *
+ * Flag values, see "USB_UNCFG_FLAG_XXX".
+ *------------------------------------------------------------------------*/
+static void
+usb_unconfigure(struct usb_device *udev, uint8_t flag)
+{
+ uint8_t do_unlock;
+
+ /* automatic locking */
+ if (usbd_enum_is_locked(udev)) {
+ do_unlock = 0;
+ } else {
+ do_unlock = 1;
+ usbd_enum_lock(udev);
+ }
+
+ /* detach all interface drivers */
+ usb_detach_device(udev, USB_IFACE_INDEX_ANY, flag);
+
+#if USB_HAVE_UGEN
+ /* free all FIFOs except control endpoint FIFOs */
+ usb_fifo_free_wrap(udev, USB_IFACE_INDEX_ANY, flag);
+
+ /*
+ * Free all cdev's, if any.
+ */
+ usb_cdev_free(udev);
+#endif
+
+#if USB_HAVE_COMPAT_LINUX
+ /* free Linux compat device, if any */
+ if (udev->linux_endpoint_start) {
+ usb_linux_free_device(udev);
+ udev->linux_endpoint_start = NULL;
+ }
+#endif
+
+ usb_config_parse(udev, USB_IFACE_INDEX_ANY, USB_CFG_FREE);
+
+ /* free "cdesc" after "ifaces" and "endpoints", if any */
+ if (udev->cdesc != NULL) {
+ if (udev->flags.usb_mode != USB_MODE_DEVICE)
+ free(udev->cdesc, M_USB);
+ udev->cdesc = NULL;
+ }
+ /* set unconfigured state */
+ udev->curr_config_no = USB_UNCONFIG_NO;
+ udev->curr_config_index = USB_UNCONFIG_INDEX;
+
+ if (do_unlock)
+ usbd_enum_unlock(udev);
+}
+
+/*------------------------------------------------------------------------*
+ * usbd_set_config_index
+ *
+ * This function selects configuration by index, independent of the
+ * actual configuration number. This function should not be used by
+ * USB drivers.
+ *
+ * Returns:
+ * 0: Success
+ * Else: Failure
+ *------------------------------------------------------------------------*/
+usb_error_t
+usbd_set_config_index(struct usb_device *udev, uint8_t index)
+{
+ struct usb_status ds;
+ struct usb_config_descriptor *cdp;
+ uint16_t power;
+ uint16_t max_power;
+ uint8_t selfpowered;
+ uint8_t do_unlock;
+ usb_error_t err;
+
+ DPRINTFN(6, "udev=%p index=%d\n", udev, index);
+
+ /* automatic locking */
+ if (usbd_enum_is_locked(udev)) {
+ do_unlock = 0;
+ } else {
+ do_unlock = 1;
+ usbd_enum_lock(udev);
+ }
+
+ usb_unconfigure(udev, 0);
+
+ if (index == USB_UNCONFIG_INDEX) {
+ /*
+ * Leave unallocated when unconfiguring the
+ * device. "usb_unconfigure()" will also reset
+ * the current config number and index.
+ */
+ err = usbd_req_set_config(udev, NULL, USB_UNCONFIG_NO);
+ if (udev->state == USB_STATE_CONFIGURED)
+ usb_set_device_state(udev, USB_STATE_ADDRESSED);
+ goto done;
+ }
+ /* get the full config descriptor */
+ if (udev->flags.usb_mode == USB_MODE_DEVICE) {
+ /* save some memory */
+ err = usbd_req_get_descriptor_ptr(udev, &cdp,
+ (UDESC_CONFIG << 8) | index);
+ } else {
+ /* normal request */
+ err = usbd_req_get_config_desc_full(udev,
+ NULL, &cdp, M_USB, index);
+ }
+ if (err) {
+ goto done;
+ }
+ /* set the new config descriptor */
+
+ udev->cdesc = cdp;
+
+ /* Figure out if the device is self or bus powered. */
+ selfpowered = 0;
+ if ((!udev->flags.uq_bus_powered) &&
+ (cdp->bmAttributes & UC_SELF_POWERED) &&
+ (udev->flags.usb_mode == USB_MODE_HOST)) {
+ /* May be self powered. */
+ if (cdp->bmAttributes & UC_BUS_POWERED) {
+ /* Must ask device. */
+ err = usbd_req_get_device_status(udev, NULL, &ds);
+ if (err) {
+ DPRINTFN(0, "could not read "
+ "device status: %s\n",
+ usbd_errstr(err));
+ } else if (UGETW(ds.wStatus) & UDS_SELF_POWERED) {
+ selfpowered = 1;
+ }
+ DPRINTF("status=0x%04x \n",
+ UGETW(ds.wStatus));
+ } else
+ selfpowered = 1;
+ }
+ DPRINTF("udev=%p cdesc=%p (addr %d) cno=%d attr=0x%02x, "
+ "selfpowered=%d, power=%d\n",
+ udev, cdp,
+ udev->address, cdp->bConfigurationValue, cdp->bmAttributes,
+ selfpowered, cdp->bMaxPower * 2);
+
+ /* Check if we have enough power. */
+ power = cdp->bMaxPower * 2;
+
+ if (udev->parent_hub) {
+ max_power = udev->parent_hub->hub->portpower;
+ } else {
+ max_power = USB_MAX_POWER;
+ }
+
+ if (power > max_power) {
+ DPRINTFN(0, "power exceeded %d > %d\n", power, max_power);
+ err = USB_ERR_NO_POWER;
+ goto done;
+ }
+ /* Only update "self_powered" in USB Host Mode */
+ if (udev->flags.usb_mode == USB_MODE_HOST) {
+ udev->flags.self_powered = selfpowered;
+ }
+ udev->power = power;
+ udev->curr_config_no = cdp->bConfigurationValue;
+ udev->curr_config_index = index;
+ usb_set_device_state(udev, USB_STATE_CONFIGURED);
+
+ /* Set the actual configuration value. */
+ err = usbd_req_set_config(udev, NULL, cdp->bConfigurationValue);
+ if (err) {
+ goto done;
+ }
+
+ err = usb_config_parse(udev, USB_IFACE_INDEX_ANY, USB_CFG_ALLOC);
+ if (err) {
+ goto done;
+ }
+
+ err = usb_config_parse(udev, USB_IFACE_INDEX_ANY, USB_CFG_INIT);
+ if (err) {
+ goto done;
+ }
+
+#if USB_HAVE_UGEN
+ /* create device nodes for each endpoint */
+ usb_cdev_create(udev);
+#endif
+
+done:
+ DPRINTF("error=%s\n", usbd_errstr(err));
+ if (err) {
+ usb_unconfigure(udev, 0);
+ }
+ if (do_unlock)
+ usbd_enum_unlock(udev);
+ return (err);
+}
+
+/*------------------------------------------------------------------------*
+ * usb_config_parse
+ *
+ * This function will allocate and free USB interfaces and USB endpoints,
+ * parse the USB configuration structure and initialise the USB endpoints
+ * and interfaces. If "iface_index" is not equal to
+ * "USB_IFACE_INDEX_ANY" then the "cmd" parameter is the
+ * alternate_setting to be selected for the given interface. Else the
+ * "cmd" parameter is defined by "USB_CFG_XXX". "iface_index" can be
+ * "USB_IFACE_INDEX_ANY" or a valid USB interface index. This function
+ * is typically called when setting the configuration or when setting
+ * an alternate interface.
+ *
+ * Returns:
+ * 0: Success
+ * Else: Failure
+ *------------------------------------------------------------------------*/
+static usb_error_t
+usb_config_parse(struct usb_device *udev, uint8_t iface_index, uint8_t cmd)
+{
+ struct usb_idesc_parse_state ips;
+ struct usb_interface_descriptor *id;
+ struct usb_endpoint_descriptor *ed;
+ struct usb_interface *iface;
+ struct usb_endpoint *ep;
+ usb_error_t err;
+ uint8_t ep_curr;
+ uint8_t ep_max;
+ uint8_t temp;
+ uint8_t do_init;
+ uint8_t alt_index;
+
+ if (iface_index != USB_IFACE_INDEX_ANY) {
+ /* parameter overload */
+ alt_index = cmd;
+ cmd = USB_CFG_INIT;
+ } else {
+ /* not used */
+ alt_index = 0;
+ }
+
+ err = 0;
+
+ DPRINTFN(5, "iface_index=%d cmd=%d\n",
+ iface_index, cmd);
+
+ if (cmd == USB_CFG_FREE)
+ goto cleanup;
+
+ if (cmd == USB_CFG_INIT) {
+ sx_assert(&udev->enum_sx, SA_LOCKED);
+
+ /* check for in-use endpoints */
+
+ ep = udev->endpoints;
+ ep_max = udev->endpoints_max;
+ while (ep_max--) {
+ /* look for matching endpoints */
+ if ((iface_index == USB_IFACE_INDEX_ANY) ||
+ (iface_index == ep->iface_index)) {
+ if (ep->refcount_alloc != 0) {
+ /*
+ * This typically indicates a
+ * more serious error.
+ */
+ err = USB_ERR_IN_USE;
+ } else {
+ /* reset endpoint */
+ memset(ep, 0, sizeof(*ep));
+ /* make sure we don't zero the endpoint again */
+ ep->iface_index = USB_IFACE_INDEX_ANY;
+ }
+ }
+ ep++;
+ }
+
+ if (err)
+ return (err);
+ }
+
+ memset(&ips, 0, sizeof(ips));
+
+ ep_curr = 0;
+ ep_max = 0;
+
+ while ((id = usb_idesc_foreach(udev->cdesc, &ips))) {
+
+ /* check for interface overflow */
+ if (ips.iface_index == USB_IFACE_MAX)
+ break; /* crazy */
+
+ iface = udev->ifaces + ips.iface_index;
+
+ /* check for specific interface match */
+
+ if (cmd == USB_CFG_INIT) {
+ if ((iface_index != USB_IFACE_INDEX_ANY) &&
+ (iface_index != ips.iface_index)) {
+ /* wrong interface */
+ do_init = 0;
+ } else if (alt_index != ips.iface_index_alt) {
+ /* wrong alternate setting */
+ do_init = 0;
+ } else {
+ /* initialise interface */
+ do_init = 1;
+ }
+ } else
+ do_init = 0;
+
+ /* check for new interface */
+ if (ips.iface_index_alt == 0) {
+ /* update current number of endpoints */
+ ep_curr = ep_max;
+ }
+ /* check for init */
+ if (do_init) {
+ /* setup the USB interface structure */
+ iface->idesc = id;
+ /* default setting */
+ iface->parent_iface_index = USB_IFACE_INDEX_ANY;
+ /* set alternate index */
+ iface->alt_index = alt_index;
+ }
+
+ DPRINTFN(5, "found idesc nendpt=%d\n", id->bNumEndpoints);
+
+ ed = (struct usb_endpoint_descriptor *)id;
+
+ temp = ep_curr;
+
+ /* iterate all the endpoint descriptors */
+ while ((ed = usb_edesc_foreach(udev->cdesc, ed))) {
+
+ if (temp == USB_EP_MAX)
+ break; /* crazy */
+
+ ep = udev->endpoints + temp;
+
+ if (do_init) {
+ void *ecomp;
+
+ ecomp = usb_ed_comp_foreach(udev->cdesc, (void *)ed);
+ if (ecomp != NULL)
+ DPRINTFN(5, "Found endpoint companion descriptor\n");
+
+ usb_init_endpoint(udev,
+ ips.iface_index, ed, ecomp, ep);
+ }
+
+ temp ++;
+
+ /* find maximum number of endpoints */
+ if (ep_max < temp)
+ ep_max = temp;
+
+ /* optimalisation */
+ id = (struct usb_interface_descriptor *)ed;
+ }
+ }
+
+ /* NOTE: It is valid to have no interfaces and no endpoints! */
+
+ if (cmd == USB_CFG_ALLOC) {
+ udev->ifaces_max = ips.iface_index;
+ udev->ifaces = NULL;
+ if (udev->ifaces_max != 0) {
+ udev->ifaces = malloc(sizeof(*iface) * udev->ifaces_max,
+ M_USB, M_WAITOK | M_ZERO);
+ if (udev->ifaces == NULL) {
+ err = USB_ERR_NOMEM;
+ goto done;
+ }
+ }
+ if (ep_max != 0) {
+ udev->endpoints = malloc(sizeof(*ep) * ep_max,
+ M_USB, M_WAITOK | M_ZERO);
+ if (udev->endpoints == NULL) {
+ err = USB_ERR_NOMEM;
+ goto done;
+ }
+ } else {
+ udev->endpoints = NULL;
+ }
+ USB_BUS_LOCK(udev->bus);
+ udev->endpoints_max = ep_max;
+ /* reset any ongoing clear-stall */
+ udev->ep_curr = NULL;
+ USB_BUS_UNLOCK(udev->bus);
+ }
+
+done:
+ if (err) {
+ if (cmd == USB_CFG_ALLOC) {
+cleanup:
+ USB_BUS_LOCK(udev->bus);
+ udev->endpoints_max = 0;
+ /* reset any ongoing clear-stall */
+ udev->ep_curr = NULL;
+ USB_BUS_UNLOCK(udev->bus);
+
+ /* cleanup */
+ if (udev->ifaces != NULL)
+ free(udev->ifaces, M_USB);
+ if (udev->endpoints != NULL)
+ free(udev->endpoints, M_USB);
+
+ udev->ifaces = NULL;
+ udev->endpoints = NULL;
+ udev->ifaces_max = 0;
+ }
+ }
+ return (err);
+}
+
+/*------------------------------------------------------------------------*
+ * usbd_set_alt_interface_index
+ *
+ * This function will select an alternate interface index for the
+ * given interface index. The interface should not be in use when this
+ * function is called. That means there should not be any open USB
+ * transfers. Else an error is returned. If the alternate setting is
+ * already set this function will simply return success. This function
+ * is called in Host mode and Device mode!
+ *
+ * Returns:
+ * 0: Success
+ * Else: Failure
+ *------------------------------------------------------------------------*/
+usb_error_t
+usbd_set_alt_interface_index(struct usb_device *udev,
+ uint8_t iface_index, uint8_t alt_index)
+{
+ struct usb_interface *iface = usbd_get_iface(udev, iface_index);
+ usb_error_t err;
+ uint8_t do_unlock;
+
+ /* automatic locking */
+ if (usbd_enum_is_locked(udev)) {
+ do_unlock = 0;
+ } else {
+ do_unlock = 1;
+ usbd_enum_lock(udev);
+ }
+ if (iface == NULL) {
+ err = USB_ERR_INVAL;
+ goto done;
+ }
+ if (iface->alt_index == alt_index) {
+ /*
+ * Optimise away duplicate setting of
+ * alternate setting in USB Host Mode!
+ */
+ err = 0;
+ goto done;
+ }
+#if USB_HAVE_UGEN
+ /*
+ * Free all generic FIFOs for this interface, except control
+ * endpoint FIFOs:
+ */
+ usb_fifo_free_wrap(udev, iface_index, 0);
+#endif
+
+ err = usb_config_parse(udev, iface_index, alt_index);
+ if (err) {
+ goto done;
+ }
+ if (iface->alt_index != alt_index) {
+ /* the alternate setting does not exist */
+ err = USB_ERR_INVAL;
+ goto done;
+ }
+
+ err = usbd_req_set_alt_interface_no(udev, NULL, iface_index,
+ iface->idesc->bAlternateSetting);
+
+done:
+ if (do_unlock)
+ usbd_enum_unlock(udev);
+
+ return (err);
+}
+
+/*------------------------------------------------------------------------*
+ * usbd_set_endpoint_stall
+ *
+ * This function is used to make a BULK or INTERRUPT endpoint send
+ * STALL tokens in USB device mode.
+ *
+ * Returns:
+ * 0: Success
+ * Else: Failure
+ *------------------------------------------------------------------------*/
+usb_error_t
+usbd_set_endpoint_stall(struct usb_device *udev, struct usb_endpoint *ep,
+ uint8_t do_stall)
+{
+ struct usb_xfer *xfer;
+ uint8_t et;
+ uint8_t was_stalled;
+
+ if (ep == NULL) {
+ /* nothing to do */
+ DPRINTF("Cannot find endpoint\n");
+ /*
+ * Pretend that the clear or set stall request is
+ * successful else some USB host stacks can do
+ * strange things, especially when a control endpoint
+ * stalls.
+ */
+ return (0);
+ }
+ et = (ep->edesc->bmAttributes & UE_XFERTYPE);
+
+ if ((et != UE_BULK) &&
+ (et != UE_INTERRUPT)) {
+ /*
+ * Should not stall control
+ * nor isochronous endpoints.
+ */
+ DPRINTF("Invalid endpoint\n");
+ return (0);
+ }
+ USB_BUS_LOCK(udev->bus);
+
+ /* store current stall state */
+ was_stalled = ep->is_stalled;
+
+ /* check for no change */
+ if (was_stalled && do_stall) {
+ /* if the endpoint is already stalled do nothing */
+ USB_BUS_UNLOCK(udev->bus);
+ DPRINTF("No change\n");
+ return (0);
+ }
+ /* set stalled state */
+ ep->is_stalled = 1;
+
+ if (do_stall || (!was_stalled)) {
+ if (!was_stalled) {
+ /* lookup the current USB transfer, if any */
+ xfer = ep->endpoint_q.curr;
+ } else {
+ xfer = NULL;
+ }
+
+ /*
+ * If "xfer" is non-NULL the "set_stall" method will
+ * complete the USB transfer like in case of a timeout
+ * setting the error code "USB_ERR_STALLED".
+ */
+ (udev->bus->methods->set_stall) (udev, xfer, ep, &do_stall);
+ }
+ if (!do_stall) {
+ ep->toggle_next = 0; /* reset data toggle */
+ ep->is_stalled = 0; /* clear stalled state */
+
+ (udev->bus->methods->clear_stall) (udev, ep);
+
+ /* start up the current or next transfer, if any */
+ usb_command_wrapper(&ep->endpoint_q, ep->endpoint_q.curr);
+ }
+ USB_BUS_UNLOCK(udev->bus);
+ return (0);
+}
+
+/*------------------------------------------------------------------------*
+ * usb_reset_iface_endpoints - used in USB device side mode
+ *------------------------------------------------------------------------*/
+usb_error_t
+usb_reset_iface_endpoints(struct usb_device *udev, uint8_t iface_index)
+{
+ struct usb_endpoint *ep;
+ struct usb_endpoint *ep_end;
+
+ ep = udev->endpoints;
+ ep_end = udev->endpoints + udev->endpoints_max;
+
+ for (; ep != ep_end; ep++) {
+
+ if ((ep->edesc == NULL) ||
+ (ep->iface_index != iface_index)) {
+ continue;
+ }
+ /* simulate a clear stall from the peer */
+ usbd_set_endpoint_stall(udev, ep, 0);
+ }
+ return (0);
+}
+
+/*------------------------------------------------------------------------*
+ * usb_detach_device_sub
+ *
+ * This function will try to detach an USB device. If it fails a panic
+ * will result.
+ *
+ * Flag values, see "USB_UNCFG_FLAG_XXX".
+ *------------------------------------------------------------------------*/
+static void
+usb_detach_device_sub(struct usb_device *udev, device_t *ppdev,
+ uint8_t flag)
+{
+ device_t dev;
+ int err;
+
+ dev = *ppdev;
+ if (dev) {
+ /*
+ * NOTE: It is important to clear "*ppdev" before deleting
+ * the child due to some device methods being called late
+ * during the delete process !
+ */
+ *ppdev = NULL;
+
+ device_printf(dev, "at %s, port %d, addr %d "
+ "(disconnected)\n",
+ device_get_nameunit(udev->parent_dev),
+ udev->port_no, udev->address);
+
+ if (device_is_attached(dev)) {
+ if (udev->flags.peer_suspended) {
+ err = DEVICE_RESUME(dev);
+ if (err) {
+ device_printf(dev, "Resume failed\n");
+ }
+ }
+ if (device_detach(dev)) {
+ goto error;
+ }
+ }
+ if (device_delete_child(udev->parent_dev, dev)) {
+ goto error;
+ }
+ }
+ return;
+
+error:
+ /* Detach is not allowed to fail in the USB world */
+ panic("A USB driver would not detach\n");
+}
+
+/*------------------------------------------------------------------------*
+ * usb_detach_device
+ *
+ * The following function will detach the matching interfaces.
+ * This function is NULL safe.
+ *
+ * Flag values, see "USB_UNCFG_FLAG_XXX".
+ *------------------------------------------------------------------------*/
+void
+usb_detach_device(struct usb_device *udev, uint8_t iface_index,
+ uint8_t flag)
+{
+ struct usb_interface *iface;
+ uint8_t i;
+
+ if (udev == NULL) {
+ /* nothing to do */
+ return;
+ }
+ DPRINTFN(4, "udev=%p\n", udev);
+
+ sx_assert(&udev->enum_sx, SA_LOCKED);
+
+ /*
+ * First detach the child to give the child's detach routine a
+ * chance to detach the sub-devices in the correct order.
+ * Then delete the child using "device_delete_child()" which
+ * will detach all sub-devices from the bottom and upwards!
+ */
+ if (iface_index != USB_IFACE_INDEX_ANY) {
+ i = iface_index;
+ iface_index = i + 1;
+ } else {
+ i = 0;
+ iface_index = USB_IFACE_MAX;
+ }
+
+ /* do the detach */
+
+ for (; i != iface_index; i++) {
+
+ iface = usbd_get_iface(udev, i);
+ if (iface == NULL) {
+ /* looks like the end of the USB interfaces */
+ break;
+ }
+ usb_detach_device_sub(udev, &iface->subdev, flag);
+ }
+}
+
+/*------------------------------------------------------------------------*
+ * usb_probe_and_attach_sub
+ *
+ * Returns:
+ * 0: Success
+ * Else: Failure
+ *------------------------------------------------------------------------*/
+static uint8_t
+usb_probe_and_attach_sub(struct usb_device *udev,
+ struct usb_attach_arg *uaa)
+{
+ struct usb_interface *iface;
+ device_t dev;
+ int err;
+
+ iface = uaa->iface;
+ if (iface->parent_iface_index != USB_IFACE_INDEX_ANY) {
+ /* leave interface alone */
+ return (0);
+ }
+ dev = iface->subdev;
+ if (dev) {
+
+ /* clean up after module unload */
+
+ if (device_is_attached(dev)) {
+ /* already a device there */
+ return (0);
+ }
+ /* clear "iface->subdev" as early as possible */
+
+ iface->subdev = NULL;
+
+ if (device_delete_child(udev->parent_dev, dev)) {
+
+ /*
+ * Panic here, else one can get a double call
+ * to device_detach(). USB devices should
+ * never fail on detach!
+ */
+ panic("device_delete_child() failed\n");
+ }
+ }
+ if (uaa->temp_dev == NULL) {
+
+ /* create a new child */
+ uaa->temp_dev = device_add_child(udev->parent_dev, NULL, -1);
+ if (uaa->temp_dev == NULL) {
+ device_printf(udev->parent_dev,
+ "Device creation failed\n");
+ return (1); /* failure */
+ }
+ device_set_ivars(uaa->temp_dev, uaa);
+ device_quiet(uaa->temp_dev);
+ }
+ /*
+ * Set "subdev" before probe and attach so that "devd" gets
+ * the information it needs.
+ */
+ iface->subdev = uaa->temp_dev;
+
+ if (device_probe_and_attach(iface->subdev) == 0) {
+ /*
+ * The USB attach arguments are only available during probe
+ * and attach !
+ */
+ uaa->temp_dev = NULL;
+ device_set_ivars(iface->subdev, NULL);
+
+ if (udev->flags.peer_suspended) {
+ err = DEVICE_SUSPEND(iface->subdev);
+ if (err)
+ device_printf(iface->subdev, "Suspend failed\n");
+ }
+ return (0); /* success */
+ } else {
+ /* No USB driver found */
+ iface->subdev = NULL;
+ }
+ return (1); /* failure */
+}
+
+/*------------------------------------------------------------------------*
+ * usbd_set_parent_iface
+ *
+ * Using this function will lock the alternate interface setting on an
+ * interface. It is typically used for multi interface drivers. In USB
+ * device side mode it is assumed that the alternate interfaces all
+ * have the same endpoint descriptors. The default parent index value
+ * is "USB_IFACE_INDEX_ANY". Then the alternate setting value is not
+ * locked.
+ *------------------------------------------------------------------------*/
+void
+usbd_set_parent_iface(struct usb_device *udev, uint8_t iface_index,
+ uint8_t parent_index)
+{
+ struct usb_interface *iface;
+
+ iface = usbd_get_iface(udev, iface_index);
+ if (iface) {
+ iface->parent_iface_index = parent_index;
+ }
+}
+
+static void
+usb_init_attach_arg(struct usb_device *udev,
+ struct usb_attach_arg *uaa)
+{
+ bzero(uaa, sizeof(*uaa));
+
+ uaa->device = udev;
+ uaa->usb_mode = udev->flags.usb_mode;
+ uaa->port = udev->port_no;
+ uaa->dev_state = UAA_DEV_READY;
+
+ uaa->info.idVendor = UGETW(udev->ddesc.idVendor);
+ uaa->info.idProduct = UGETW(udev->ddesc.idProduct);
+ uaa->info.bcdDevice = UGETW(udev->ddesc.bcdDevice);
+ uaa->info.bDeviceClass = udev->ddesc.bDeviceClass;
+ uaa->info.bDeviceSubClass = udev->ddesc.bDeviceSubClass;
+ uaa->info.bDeviceProtocol = udev->ddesc.bDeviceProtocol;
+ uaa->info.bConfigIndex = udev->curr_config_index;
+ uaa->info.bConfigNum = udev->curr_config_no;
+}
+
+/*------------------------------------------------------------------------*
+ * usb_probe_and_attach
+ *
+ * This function is called from "uhub_explore_sub()",
+ * "usb_handle_set_config()" and "usb_handle_request()".
+ *
+ * Returns:
+ * 0: Success
+ * Else: A control transfer failed
+ *------------------------------------------------------------------------*/
+usb_error_t
+usb_probe_and_attach(struct usb_device *udev, uint8_t iface_index)
+{
+ struct usb_attach_arg uaa;
+ struct usb_interface *iface;
+ uint8_t i;
+ uint8_t j;
+ uint8_t do_unlock;
+
+ if (udev == NULL) {
+ DPRINTF("udev == NULL\n");
+ return (USB_ERR_INVAL);
+ }
+ /* automatic locking */
+ if (usbd_enum_is_locked(udev)) {
+ do_unlock = 0;
+ } else {
+ do_unlock = 1;
+ usbd_enum_lock(udev);
+ }
+
+ if (udev->curr_config_index == USB_UNCONFIG_INDEX) {
+ /* do nothing - no configuration has been set */
+ goto done;
+ }
+ /* setup USB attach arguments */
+
+ usb_init_attach_arg(udev, &uaa);
+
+ /* Check if only one interface should be probed: */
+ if (iface_index != USB_IFACE_INDEX_ANY) {
+ i = iface_index;
+ j = i + 1;
+ } else {
+ i = 0;
+ j = USB_IFACE_MAX;
+ }
+
+ /* Do the probe and attach */
+ for (; i != j; i++) {
+
+ iface = usbd_get_iface(udev, i);
+ if (iface == NULL) {
+ /*
+ * Looks like the end of the USB
+ * interfaces !
+ */
+ DPRINTFN(2, "end of interfaces "
+ "at %u\n", i);
+ break;
+ }
+ if (iface->idesc == NULL) {
+ /* no interface descriptor */
+ continue;
+ }
+ uaa.iface = iface;
+
+ uaa.info.bInterfaceClass =
+ iface->idesc->bInterfaceClass;
+ uaa.info.bInterfaceSubClass =
+ iface->idesc->bInterfaceSubClass;
+ uaa.info.bInterfaceProtocol =
+ iface->idesc->bInterfaceProtocol;
+ uaa.info.bIfaceIndex = i;
+ uaa.info.bIfaceNum =
+ iface->idesc->bInterfaceNumber;
+ uaa.use_generic = 0;
+ uaa.driver_info = 0; /* reset driver_info */
+
+ DPRINTFN(2, "iclass=%u/%u/%u iindex=%u/%u\n",
+ uaa.info.bInterfaceClass,
+ uaa.info.bInterfaceSubClass,
+ uaa.info.bInterfaceProtocol,
+ uaa.info.bIfaceIndex,
+ uaa.info.bIfaceNum);
+
+ /* try specific interface drivers first */
+
+ if (usb_probe_and_attach_sub(udev, &uaa)) {
+ /* ignore */
+ }
+ /* try generic interface drivers last */
+
+ uaa.use_generic = 1;
+ uaa.driver_info = 0; /* reset driver_info */
+
+ if (usb_probe_and_attach_sub(udev, &uaa)) {
+ /* ignore */
+ }
+ }
+
+ if (uaa.temp_dev) {
+ /* remove the last created child; it is unused */
+
+ if (device_delete_child(udev->parent_dev, uaa.temp_dev)) {
+ DPRINTFN(0, "device delete child failed\n");
+ }
+ }
+done:
+ if (do_unlock)
+ usbd_enum_unlock(udev);
+
+ return (0);
+}
+
+/*------------------------------------------------------------------------*
+ * usb_suspend_resume_sub
+ *
+ * This function is called when the suspend or resume methods should
+ * be executed on an USB device.
+ *------------------------------------------------------------------------*/
+static void
+usb_suspend_resume_sub(struct usb_device *udev, device_t dev, uint8_t do_suspend)
+{
+ int err;
+
+ if (dev == NULL) {
+ return;
+ }
+ if (!device_is_attached(dev)) {
+ return;
+ }
+ if (do_suspend) {
+ err = DEVICE_SUSPEND(dev);
+ } else {
+ err = DEVICE_RESUME(dev);
+ }
+ if (err) {
+ device_printf(dev, "%s failed\n",
+ do_suspend ? "Suspend" : "Resume");
+ }
+}
+
+/*------------------------------------------------------------------------*
+ * usb_suspend_resume
+ *
+ * The following function will suspend or resume the USB device.
+ *
+ * Returns:
+ * 0: Success
+ * Else: Failure
+ *------------------------------------------------------------------------*/
+usb_error_t
+usb_suspend_resume(struct usb_device *udev, uint8_t do_suspend)
+{
+ struct usb_interface *iface;
+ uint8_t i;
+
+ if (udev == NULL) {
+ /* nothing to do */
+ return (0);
+ }
+ DPRINTFN(4, "udev=%p do_suspend=%d\n", udev, do_suspend);
+
+ sx_assert(&udev->sr_sx, SA_LOCKED);
+
+ USB_BUS_LOCK(udev->bus);
+ /* filter the suspend events */
+ if (udev->flags.peer_suspended == do_suspend) {
+ USB_BUS_UNLOCK(udev->bus);
+ /* nothing to do */
+ return (0);
+ }
+ udev->flags.peer_suspended = do_suspend;
+ USB_BUS_UNLOCK(udev->bus);
+
+ /* do the suspend or resume */
+
+ for (i = 0; i != USB_IFACE_MAX; i++) {
+
+ iface = usbd_get_iface(udev, i);
+ if (iface == NULL) {
+ /* looks like the end of the USB interfaces */
+ break;
+ }
+ usb_suspend_resume_sub(udev, iface->subdev, do_suspend);
+ }
+ return (0);
+}
+
+/*------------------------------------------------------------------------*
+ * usbd_clear_stall_proc
+ *
+ * This function performs generic USB clear stall operations.
+ *------------------------------------------------------------------------*/
+static void
+usbd_clear_stall_proc(struct usb_proc_msg *_pm)
+{
+ struct usb_clear_stall_msg *pm = (void *)_pm;
+ struct usb_device *udev = pm->udev;
+
+ /* Change lock */
+ USB_BUS_UNLOCK(udev->bus);
+ mtx_lock(&udev->device_mtx);
+
+ /* Start clear stall callback */
+ usbd_transfer_start(udev->ctrl_xfer[1]);
+
+ /* Change lock */
+ mtx_unlock(&udev->device_mtx);
+ USB_BUS_LOCK(udev->bus);
+}
+
+/*------------------------------------------------------------------------*
+ * usb_alloc_device
+ *
+ * This function allocates a new USB device. This function is called
+ * when a new device has been put in the powered state, but not yet in
+ * the addressed state. Get initial descriptor, set the address, get
+ * full descriptor and get strings.
+ *
+ * Return values:
+ * 0: Failure
+ * Else: Success
+ *------------------------------------------------------------------------*/
+struct usb_device *
+usb_alloc_device(device_t parent_dev, struct usb_bus *bus,
+ struct usb_device *parent_hub, uint8_t depth, uint8_t port_index,
+ uint8_t port_no, enum usb_dev_speed speed, enum usb_hc_mode mode)
+{
+ struct usb_attach_arg uaa;
+ struct usb_device *udev;
+ struct usb_device *adev;
+ struct usb_device *hub;
+ uint8_t *scratch_ptr;
+ size_t scratch_size;
+ usb_error_t err;
+ uint8_t device_index;
+ uint8_t config_index;
+ uint8_t config_quirk;
+ uint8_t set_config_failed;
+
+ DPRINTF("parent_dev=%p, bus=%p, parent_hub=%p, depth=%u, "
+ "port_index=%u, port_no=%u, speed=%u, usb_mode=%u\n",
+ parent_dev, bus, parent_hub, depth, port_index, port_no,
+ speed, mode);
+
+ /*
+ * Find an unused device index. In USB Host mode this is the
+ * same as the device address.
+ *
+ * Device index zero is not used and device index 1 should
+ * always be the root hub.
+ */
+ for (device_index = USB_ROOT_HUB_ADDR;
+ (device_index != bus->devices_max) &&
+ (bus->devices[device_index] != NULL);
+ device_index++) /* nop */;
+
+ if (device_index == bus->devices_max) {
+ device_printf(bus->bdev,
+ "No free USB device index for new device\n");
+ return (NULL);
+ }
+
+ if (depth > 0x10) {
+ device_printf(bus->bdev,
+ "Invalid device depth\n");
+ return (NULL);
+ }
+ udev = malloc(sizeof(*udev), M_USB, M_WAITOK | M_ZERO);
+ if (udev == NULL) {
+ return (NULL);
+ }
+ /* initialise our SX-lock */
+ sx_init_flags(&udev->ctrl_sx, "USB device SX lock", SX_DUPOK);
+
+ /* initialise our SX-lock */
+ sx_init_flags(&udev->enum_sx, "USB config SX lock", SX_DUPOK);
+ sx_init_flags(&udev->sr_sx, "USB suspend and resume SX lock", SX_DUPOK);
+
+ cv_init(&udev->ctrlreq_cv, "WCTRL");
+ cv_init(&udev->ref_cv, "UGONE");
+
+ /* initialise our mutex */
+ mtx_init(&udev->device_mtx, "USB device mutex", NULL, MTX_DEF);
+
+ /* initialise generic clear stall */
+ udev->cs_msg[0].hdr.pm_callback = &usbd_clear_stall_proc;
+ udev->cs_msg[0].udev = udev;
+ udev->cs_msg[1].hdr.pm_callback = &usbd_clear_stall_proc;
+ udev->cs_msg[1].udev = udev;
+
+ /* initialise some USB device fields */
+ udev->parent_hub = parent_hub;
+ udev->parent_dev = parent_dev;
+ udev->port_index = port_index;
+ udev->port_no = port_no;
+ udev->depth = depth;
+ udev->bus = bus;
+ udev->address = USB_START_ADDR; /* default value */
+ udev->plugtime = (usb_ticks_t)ticks;
+ /*
+ * We need to force the power mode to "on" because there are plenty
+ * of USB devices out there that do not work very well with
+ * automatic suspend and resume!
+ */
+ udev->power_mode = usbd_filter_power_mode(udev, USB_POWER_MODE_ON);
+ udev->pwr_save.last_xfer_time = ticks;
+ /* we are not ready yet */
+ udev->refcount = 1;
+
+ /* set up default endpoint descriptor */
+ udev->ctrl_ep_desc.bLength = sizeof(udev->ctrl_ep_desc);
+ udev->ctrl_ep_desc.bDescriptorType = UDESC_ENDPOINT;
+ udev->ctrl_ep_desc.bEndpointAddress = USB_CONTROL_ENDPOINT;
+ udev->ctrl_ep_desc.bmAttributes = UE_CONTROL;
+ udev->ctrl_ep_desc.wMaxPacketSize[0] = USB_MAX_IPACKET;
+ udev->ctrl_ep_desc.wMaxPacketSize[1] = 0;
+ udev->ctrl_ep_desc.bInterval = 0;
+
+ /* set up default endpoint companion descriptor */
+ udev->ctrl_ep_comp_desc.bLength = sizeof(udev->ctrl_ep_comp_desc);
+ udev->ctrl_ep_comp_desc.bDescriptorType = UDESC_ENDPOINT_SS_COMP;
+
+ udev->ddesc.bMaxPacketSize = USB_MAX_IPACKET;
+
+ udev->speed = speed;
+ udev->flags.usb_mode = mode;
+
+ /* search for our High Speed USB HUB, if any */
+
+ adev = udev;
+ hub = udev->parent_hub;
+
+ while (hub) {
+ if (hub->speed == USB_SPEED_HIGH) {
+ udev->hs_hub_addr = hub->address;
+ udev->parent_hs_hub = hub;
+ udev->hs_port_no = adev->port_no;
+ break;
+ }
+ adev = hub;
+ hub = hub->parent_hub;
+ }
+
+ /* init the default endpoint */
+ usb_init_endpoint(udev, 0,
+ &udev->ctrl_ep_desc,
+ &udev->ctrl_ep_comp_desc,
+ &udev->ctrl_ep);
+
+ /* set device index */
+ udev->device_index = device_index;
+
+#if USB_HAVE_UGEN
+ /* Create ugen name */
+ snprintf(udev->ugen_name, sizeof(udev->ugen_name),
+ USB_GENERIC_NAME "%u.%u", device_get_unit(bus->bdev),
+ device_index);
+ LIST_INIT(&udev->pd_list);
+
+ /* Create the control endpoint device */
+ udev->ctrl_dev = usb_make_dev(udev, 0, FREAD|FWRITE);
+
+ /* Create a link from /dev/ugenX.X to the default endpoint */
+ make_dev_alias(udev->ctrl_dev, "%s", udev->ugen_name);
+#endif
+ /* Initialise device */
+ if (bus->methods->device_init != NULL) {
+ err = (bus->methods->device_init) (udev);
+ if (err != 0) {
+ DPRINTFN(0, "device init %d failed "
+ "(%s, ignored)\n", device_index,
+ usbd_errstr(err));
+ goto done;
+ }
+ }
+ /* set powered device state after device init is complete */
+ usb_set_device_state(udev, USB_STATE_POWERED);
+
+ if (udev->flags.usb_mode == USB_MODE_HOST) {
+
+ err = usbd_req_set_address(udev, NULL, device_index);
+
+ /*
+ * This is the new USB device address from now on, if
+ * the set address request didn't set it already.
+ */
+ if (udev->address == USB_START_ADDR)
+ udev->address = device_index;
+
+ /*
+ * We ignore any set-address errors, hence there are
+ * buggy USB devices out there that actually receive
+ * the SETUP PID, but manage to set the address before
+ * the STATUS stage is ACK'ed. If the device responds
+ * to the subsequent get-descriptor at the new
+ * address, then we know that the set-address command
+ * was successful.
+ */
+ if (err) {
+ DPRINTFN(0, "set address %d failed "
+ "(%s, ignored)\n", udev->address,
+ usbd_errstr(err));
+ }
+ } else {
+ /* We are not self powered */
+ udev->flags.self_powered = 0;
+
+ /* Set unconfigured state */
+ udev->curr_config_no = USB_UNCONFIG_NO;
+ udev->curr_config_index = USB_UNCONFIG_INDEX;
+
+ /* Setup USB descriptors */
+ err = (usb_temp_setup_by_index_p) (udev, usb_template);
+ if (err) {
+ DPRINTFN(0, "setting up USB template failed maybe the USB "
+ "template module has not been loaded\n");
+ goto done;
+ }
+ }
+ usb_set_device_state(udev, USB_STATE_ADDRESSED);
+
+ /* setup the device descriptor and the initial "wMaxPacketSize" */
+ err = usbd_setup_device_desc(udev, NULL);
+
+ if (err != 0) {
+ /* XXX try to re-enumerate the device */
+ err = usbd_req_re_enumerate(udev, NULL);
+ if (err)
+ goto done;
+ }
+
+ /*
+ * Setup temporary USB attach args so that we can figure out some
+ * basic quirks for this device.
+ */
+ usb_init_attach_arg(udev, &uaa);
+
+ if (usb_test_quirk(&uaa, UQ_BUS_POWERED)) {
+ udev->flags.uq_bus_powered = 1;
+ }
+ if (usb_test_quirk(&uaa, UQ_NO_STRINGS)) {
+ udev->flags.no_strings = 1;
+ }
+ /*
+ * Workaround for buggy USB devices.
+ *
+ * It appears that some string-less USB chips will crash and
+ * disappear if any attempts are made to read any string
+ * descriptors.
+ *
+ * Try to detect such chips by checking the strings in the USB
+ * device descriptor. If no strings are present there we
+ * simply disable all USB strings.
+ */
+ scratch_ptr = udev->bus->scratch[0].data;
+ scratch_size = sizeof(udev->bus->scratch[0].data);
+
+ if (udev->ddesc.iManufacturer ||
+ udev->ddesc.iProduct ||
+ udev->ddesc.iSerialNumber) {
+ /* read out the language ID string */
+ err = usbd_req_get_string_desc(udev, NULL,
+ (char *)scratch_ptr, 4, 0, USB_LANGUAGE_TABLE);
+ } else {
+ err = USB_ERR_INVAL;
+ }
+
+ if (err || (scratch_ptr[0] < 4)) {
+ udev->flags.no_strings = 1;
+ } else {
+ uint16_t langid;
+ uint16_t pref;
+ uint16_t mask;
+ uint8_t x;
+
+ /* load preferred value and mask */
+ pref = usb_lang_id;
+ mask = usb_lang_mask;
+
+ /* align length correctly */
+ scratch_ptr[0] &= ~1;
+
+ /* fix compiler warning */
+ langid = 0;
+
+ /* search for preferred language */
+ for (x = 2; (x < scratch_ptr[0]); x += 2) {
+ langid = UGETW(scratch_ptr + x);
+ if ((langid & mask) == pref)
+ break;
+ }
+ if (x >= scratch_ptr[0]) {
+ /* pick the first language as the default */
+ DPRINTFN(1, "Using first language\n");
+ langid = UGETW(scratch_ptr + 2);
+ }
+
+ DPRINTFN(1, "Language selected: 0x%04x\n", langid);
+ udev->langid = langid;
+ }
+
+ /* assume 100mA bus powered for now. Changed when configured. */
+ udev->power = USB_MIN_POWER;
+ /* fetch the vendor and product strings from the device */
+ usbd_set_device_strings(udev);
+
+ if (udev->flags.usb_mode == USB_MODE_DEVICE) {
+ /* USB device mode setup is complete */
+ err = 0;
+ goto config_done;
+ }
+
+ /*
+ * Most USB devices should attach to config index 0 by
+ * default
+ */
+ if (usb_test_quirk(&uaa, UQ_CFG_INDEX_0)) {
+ config_index = 0;
+ config_quirk = 1;
+ } else if (usb_test_quirk(&uaa, UQ_CFG_INDEX_1)) {
+ config_index = 1;
+ config_quirk = 1;
+ } else if (usb_test_quirk(&uaa, UQ_CFG_INDEX_2)) {
+ config_index = 2;
+ config_quirk = 1;
+ } else if (usb_test_quirk(&uaa, UQ_CFG_INDEX_3)) {
+ config_index = 3;
+ config_quirk = 1;
+ } else if (usb_test_quirk(&uaa, UQ_CFG_INDEX_4)) {
+ config_index = 4;
+ config_quirk = 1;
+ } else {
+ config_index = 0;
+ config_quirk = 0;
+ }
+
+ set_config_failed = 0;
+repeat_set_config:
+
+ DPRINTF("setting config %u\n", config_index);
+
+ /* get the USB device configured */
+ err = usbd_set_config_index(udev, config_index);
+ if (err) {
+ if (udev->ddesc.bNumConfigurations != 0) {
+ if (!set_config_failed) {
+ set_config_failed = 1;
+ /* XXX try to re-enumerate the device */
+ err = usbd_req_re_enumerate(udev, NULL);
+ if (err == 0)
+ goto repeat_set_config;
+ }
+ DPRINTFN(0, "Failure selecting configuration index %u:"
+ "%s, port %u, addr %u (ignored)\n",
+ config_index, usbd_errstr(err), udev->port_no,
+ udev->address);
+ }
+ /*
+ * Some USB devices do not have any configurations. Ignore any
+ * set config failures!
+ */
+ err = 0;
+ goto config_done;
+ }
+ if (!config_quirk && config_index + 1 < udev->ddesc.bNumConfigurations) {
+ if ((udev->cdesc->bNumInterface < 2) &&
+ usbd_get_no_descriptors(udev->cdesc, UDESC_ENDPOINT) == 0) {
+ DPRINTFN(0, "Found no endpoints, trying next config\n");
+ config_index++;
+ goto repeat_set_config;
+ }
+ if (config_index == 0) {
+ /*
+ * Try to figure out if we have an
+ * auto-install disk there:
+ */
+ if (usb_iface_is_cdrom(udev, 0)) {
+ DPRINTFN(0, "Found possible auto-install "
+ "disk (trying next config)\n");
+ config_index++;
+ goto repeat_set_config;
+ }
+ }
+ }
+#ifndef __rtems__
+ EVENTHANDLER_INVOKE(usb_dev_configured, udev, &uaa);
+#endif /* __rtems__ */
+ if (uaa.dev_state != UAA_DEV_READY) {
+ /* leave device unconfigured */
+ usb_unconfigure(udev, 0);
+ }
+
+config_done:
+ DPRINTF("new dev (addr %d), udev=%p, parent_hub=%p\n",
+ udev->address, udev, udev->parent_hub);
+
+ /* register our device - we are ready */
+ usb_bus_port_set_device(bus, parent_hub ?
+ parent_hub->hub->ports + port_index : NULL, udev, device_index);
+
+#if USB_HAVE_UGEN
+ /* Symlink the ugen device name */
+ udev->ugen_symlink = usb_alloc_symlink(udev->ugen_name);
+
+ /* Announce device */
+ printf("%s: <%s> at %s\n", udev->ugen_name,
+ usb_get_manufacturer(udev),
+ device_get_nameunit(udev->bus->bdev));
+
+ usb_notify_addq("ATTACH", udev);
+#endif
+done:
+ if (err) {
+ /*
+ * Free USB device and all subdevices, if any.
+ */
+ usb_free_device(udev, 0);
+ udev = NULL;
+ }
+ return (udev);
+}
+
+#if USB_HAVE_UGEN
+static struct cdev *
+usb_make_dev(struct usb_device *udev, int ep, int mode)
+{
+ struct usb_fs_privdata* pd;
+ char devname[20];
+
+ /* Store information to locate ourselves again later */
+ pd = malloc(sizeof(struct usb_fs_privdata), M_USBDEV,
+ M_WAITOK | M_ZERO);
+ pd->bus_index = device_get_unit(udev->bus->bdev);
+ pd->dev_index = udev->device_index;
+ pd->ep_addr = ep;
+ pd->mode = mode;
+
+ /* Now, create the device itself */
+ snprintf(devname, sizeof(devname), "%u.%u.%u",
+ pd->bus_index, pd->dev_index, pd->ep_addr);
+ pd->cdev = make_dev(&usb_devsw, 0, UID_ROOT,
+ GID_OPERATOR, 0600, USB_DEVICE_DIR "/%s", devname);
+ pd->cdev->si_drv1 = pd;
+
+ return (pd->cdev);
+}
+
+static void
+usb_cdev_create(struct usb_device *udev)
+{
+ struct usb_config_descriptor *cd;
+ struct usb_endpoint_descriptor *ed;
+ struct usb_descriptor *desc;
+ struct usb_fs_privdata* pd;
+ struct cdev *dev;
+ int inmode, outmode, inmask, outmask, mode;
+ uint8_t ep;
+
+ KASSERT(LIST_FIRST(&udev->pd_list) == NULL, ("stale cdev entries"));
+
+ DPRINTFN(2, "Creating device nodes\n");
+
+ if (usbd_get_mode(udev) == USB_MODE_DEVICE) {
+ inmode = FWRITE;
+ outmode = FREAD;
+ } else { /* USB_MODE_HOST */
+ inmode = FREAD;
+ outmode = FWRITE;
+ }
+
+ inmask = 0;
+ outmask = 0;
+ desc = NULL;
+
+ /*
+ * Collect all used endpoint numbers instead of just
+ * generating 16 static endpoints.
+ */
+ cd = usbd_get_config_descriptor(udev);
+ while ((desc = usb_desc_foreach(cd, desc))) {
+ /* filter out all endpoint descriptors */
+ if ((desc->bDescriptorType == UDESC_ENDPOINT) &&
+ (desc->bLength >= sizeof(*ed))) {
+ ed = (struct usb_endpoint_descriptor *)desc;
+
+ /* update masks */
+ ep = ed->bEndpointAddress;
+ if (UE_GET_DIR(ep) == UE_DIR_OUT)
+ outmask |= 1 << UE_GET_ADDR(ep);
+ else
+ inmask |= 1 << UE_GET_ADDR(ep);
+ }
+ }
+
+ /* Create all available endpoints except EP0 */
+ for (ep = 1; ep < 16; ep++) {
+ mode = inmask & (1 << ep) ? inmode : 0;
+ mode |= outmask & (1 << ep) ? outmode : 0;
+ if (mode == 0)
+ continue; /* no IN or OUT endpoint */
+
+ dev = usb_make_dev(udev, ep, mode);
+ pd = dev->si_drv1;
+ LIST_INSERT_HEAD(&udev->pd_list, pd, pd_next);
+ }
+}
+
+static void
+usb_cdev_free(struct usb_device *udev)
+{
+ struct usb_fs_privdata* pd;
+ struct cdev* pcdev;
+
+ DPRINTFN(2, "Freeing device nodes\n");
+
+ while ((pd = LIST_FIRST(&udev->pd_list)) != NULL) {
+ KASSERT(pd->cdev->si_drv1 == pd, ("privdata corrupt"));
+
+ pcdev = pd->cdev;
+ pd->cdev = NULL;
+ LIST_REMOVE(pd, pd_next);
+ if (pcdev != NULL)
+ destroy_dev_sched_cb(pcdev, usb_cdev_cleanup, pd);
+ }
+}
+
+static void
+usb_cdev_cleanup(void* arg)
+{
+ free(arg, M_USBDEV);
+}
+#endif
+
+/*------------------------------------------------------------------------*
+ * usb_free_device
+ *
+ * This function is NULL safe and will free an USB device and its
+ * children devices, if any.
+ *
+ * Flag values: Reserved, set to zero.
+ *------------------------------------------------------------------------*/
+void
+usb_free_device(struct usb_device *udev, uint8_t flag)
+{
+ struct usb_bus *bus;
+
+ if (udev == NULL)
+ return; /* already freed */
+
+ DPRINTFN(4, "udev=%p port=%d\n", udev, udev->port_no);
+
+ bus = udev->bus;
+ usb_set_device_state(udev, USB_STATE_DETACHED);
+
+#if USB_HAVE_UGEN
+ usb_notify_addq("DETACH", udev);
+
+ printf("%s: <%s> at %s (disconnected)\n", udev->ugen_name,
+ usb_get_manufacturer(udev), device_get_nameunit(bus->bdev));
+
+ /* Destroy UGEN symlink, if any */
+ if (udev->ugen_symlink) {
+ usb_free_symlink(udev->ugen_symlink);
+ udev->ugen_symlink = NULL;
+ }
+#endif
+ /*
+ * Unregister our device first which will prevent any further
+ * references:
+ */
+ usb_bus_port_set_device(bus, udev->parent_hub ?
+ udev->parent_hub->hub->ports + udev->port_index : NULL,
+ NULL, USB_ROOT_HUB_ADDR);
+
+#if USB_HAVE_UGEN
+ /* wait for all pending references to go away: */
+ mtx_lock(&usb_ref_lock);
+ udev->refcount--;
+ while (udev->refcount != 0) {
+ cv_wait(&udev->ref_cv, &usb_ref_lock);
+ }
+ mtx_unlock(&usb_ref_lock);
+
+ destroy_dev_sched_cb(udev->ctrl_dev, usb_cdev_cleanup,
+ udev->ctrl_dev->si_drv1);
+#endif
+
+ if (udev->flags.usb_mode == USB_MODE_DEVICE) {
+ /* stop receiving any control transfers (Device Side Mode) */
+ usbd_transfer_unsetup(udev->ctrl_xfer, USB_CTRL_XFER_MAX);
+ }
+
+ /* the following will get the device unconfigured in software */
+ usb_unconfigure(udev, USB_UNCFG_FLAG_FREE_EP0);
+
+ /* unsetup any leftover default USB transfers */
+ usbd_transfer_unsetup(udev->ctrl_xfer, USB_CTRL_XFER_MAX);
+
+ /* template unsetup, if any */
+ (usb_temp_unsetup_p) (udev);
+
+ /*
+ * Make sure that our clear-stall messages are not queued
+ * anywhere:
+ */
+ USB_BUS_LOCK(udev->bus);
+ usb_proc_mwait(&udev->bus->non_giant_callback_proc,
+ &udev->cs_msg[0], &udev->cs_msg[1]);
+ USB_BUS_UNLOCK(udev->bus);
+
+ sx_destroy(&udev->ctrl_sx);
+ sx_destroy(&udev->enum_sx);
+ sx_destroy(&udev->sr_sx);
+
+ cv_destroy(&udev->ctrlreq_cv);
+ cv_destroy(&udev->ref_cv);
+
+ mtx_destroy(&udev->device_mtx);
+#if USB_HAVE_UGEN
+ KASSERT(LIST_FIRST(&udev->pd_list) == NULL, ("leaked cdev entries"));
+#endif
+
+ /* Uninitialise device */
+ if (bus->methods->device_uninit != NULL)
+ (bus->methods->device_uninit) (udev);
+
+ /* free device */
+ free(udev->serial, M_USB);
+ free(udev->manufacturer, M_USB);
+ free(udev->product, M_USB);
+ free(udev, M_USB);
+}
+
+/*------------------------------------------------------------------------*
+ * usbd_get_iface
+ *
+ * This function is the safe way to get the USB interface structure
+ * pointer by interface index.
+ *
+ * Return values:
+ * NULL: Interface not present.
+ * Else: Pointer to USB interface structure.
+ *------------------------------------------------------------------------*/
+struct usb_interface *
+usbd_get_iface(struct usb_device *udev, uint8_t iface_index)
+{
+ struct usb_interface *iface = udev->ifaces + iface_index;
+
+ if (iface_index >= udev->ifaces_max)
+ return (NULL);
+ return (iface);
+}
+
+/*------------------------------------------------------------------------*
+ * usbd_find_descriptor
+ *
+ * This function will lookup the first descriptor that matches the
+ * criteria given by the arguments "type" and "subtype". Descriptors
+ * will only be searched within the interface having the index
+ * "iface_index". If the "id" argument points to an USB descriptor,
+ * it will be skipped before the search is started. This allows
+ * searching for multiple descriptors using the same criteria. Else
+ * the search is started after the interface descriptor.
+ *
+ * Return values:
+ * NULL: End of descriptors
+ * Else: A descriptor matching the criteria
+ *------------------------------------------------------------------------*/
+void *
+usbd_find_descriptor(struct usb_device *udev, void *id, uint8_t iface_index,
+ uint8_t type, uint8_t type_mask,
+ uint8_t subtype, uint8_t subtype_mask)
+{
+ struct usb_descriptor *desc;
+ struct usb_config_descriptor *cd;
+ struct usb_interface *iface;
+
+ cd = usbd_get_config_descriptor(udev);
+ if (cd == NULL) {
+ return (NULL);
+ }
+ if (id == NULL) {
+ iface = usbd_get_iface(udev, iface_index);
+ if (iface == NULL) {
+ return (NULL);
+ }
+ id = usbd_get_interface_descriptor(iface);
+ if (id == NULL) {
+ return (NULL);
+ }
+ }
+ desc = (void *)id;
+
+ while ((desc = usb_desc_foreach(cd, desc))) {
+
+ if (desc->bDescriptorType == UDESC_INTERFACE) {
+ break;
+ }
+ if (((desc->bDescriptorType & type_mask) == type) &&
+ ((desc->bDescriptorSubtype & subtype_mask) == subtype)) {
+ return (desc);
+ }
+ }
+ return (NULL);
+}
+
+/*------------------------------------------------------------------------*
+ * usb_devinfo
+ *
+ * This function will dump information from the device descriptor
+ * belonging to the USB device pointed to by "udev", to the string
+ * pointed to by "dst_ptr" having a maximum length of "dst_len" bytes
+ * including the terminating zero.
+ *------------------------------------------------------------------------*/
+void
+usb_devinfo(struct usb_device *udev, char *dst_ptr, uint16_t dst_len)
+{
+ struct usb_device_descriptor *udd = &udev->ddesc;
+ uint16_t bcdDevice;
+ uint16_t bcdUSB;
+
+ bcdUSB = UGETW(udd->bcdUSB);
+ bcdDevice = UGETW(udd->bcdDevice);
+
+ if (udd->bDeviceClass != 0xFF) {
+ snprintf(dst_ptr, dst_len, "%s %s, class %d/%d, rev %x.%02x/"
+ "%x.%02x, addr %d",
+ usb_get_manufacturer(udev),
+ usb_get_product(udev),
+ udd->bDeviceClass, udd->bDeviceSubClass,
+ (bcdUSB >> 8), bcdUSB & 0xFF,
+ (bcdDevice >> 8), bcdDevice & 0xFF,
+ udev->address);
+ } else {
+ snprintf(dst_ptr, dst_len, "%s %s, rev %x.%02x/"
+ "%x.%02x, addr %d",
+ usb_get_manufacturer(udev),
+ usb_get_product(udev),
+ (bcdUSB >> 8), bcdUSB & 0xFF,
+ (bcdDevice >> 8), bcdDevice & 0xFF,
+ udev->address);
+ }
+}
+
+#ifdef USB_VERBOSE
+/*
+ * Descriptions of of known vendors and devices ("products").
+ */
+struct usb_knowndev {
+ uint16_t vendor;
+ uint16_t product;
+ uint32_t flags;
+ const char *vendorname;
+ const char *productname;
+};
+
+#define USB_KNOWNDEV_NOPROD 0x01 /* match on vendor only */
+
+#include <rtems/freebsd/local/usbdevs.h>
+#include <rtems/freebsd/local/usbdevs_data.h>
+#endif /* USB_VERBOSE */
+
+static void
+usbd_set_device_strings(struct usb_device *udev)
+{
+ struct usb_device_descriptor *udd = &udev->ddesc;
+#ifdef USB_VERBOSE
+ const struct usb_knowndev *kdp;
+#endif
+ char *temp_ptr;
+ size_t temp_size;
+ uint16_t vendor_id;
+ uint16_t product_id;
+
+ temp_ptr = (char *)udev->bus->scratch[0].data;
+ temp_size = sizeof(udev->bus->scratch[0].data);
+
+ vendor_id = UGETW(udd->idVendor);
+ product_id = UGETW(udd->idProduct);
+
+ /* get serial number string */
+ usbd_req_get_string_any(udev, NULL, temp_ptr, temp_size,
+ udev->ddesc.iSerialNumber);
+ udev->serial = strdup(temp_ptr, M_USB);
+
+ /* get manufacturer string */
+ usbd_req_get_string_any(udev, NULL, temp_ptr, temp_size,
+ udev->ddesc.iManufacturer);
+ usb_trim_spaces(temp_ptr);
+ if (temp_ptr[0] != '\0')
+ udev->manufacturer = strdup(temp_ptr, M_USB);
+
+ /* get product string */
+ usbd_req_get_string_any(udev, NULL, temp_ptr, temp_size,
+ udev->ddesc.iProduct);
+ usb_trim_spaces(temp_ptr);
+ if (temp_ptr[0] != '\0')
+ udev->product = strdup(temp_ptr, M_USB);
+
+#ifdef USB_VERBOSE
+ if (udev->manufacturer == NULL || udev->product == NULL) {
+ for (kdp = usb_knowndevs; kdp->vendorname != NULL; kdp++) {
+ if (kdp->vendor == vendor_id &&
+ (kdp->product == product_id ||
+ (kdp->flags & USB_KNOWNDEV_NOPROD) != 0))
+ break;
+ }
+ if (kdp->vendorname != NULL) {
+ /* XXX should use pointer to knowndevs string */
+ if (udev->manufacturer == NULL) {
+ udev->manufacturer = strdup(kdp->vendorname,
+ M_USB);
+ }
+ if (udev->product == NULL &&
+ (kdp->flags & USB_KNOWNDEV_NOPROD) == 0) {
+ udev->product = strdup(kdp->productname,
+ M_USB);
+ }
+ }
+ }
+#endif
+ /* Provide default strings if none were found */
+ if (udev->manufacturer == NULL) {
+ snprintf(temp_ptr, temp_size, "vendor 0x%04x", vendor_id);
+ udev->manufacturer = strdup(temp_ptr, M_USB);
+ }
+ if (udev->product == NULL) {
+ snprintf(temp_ptr, temp_size, "product 0x%04x", product_id);
+ udev->product = strdup(temp_ptr, M_USB);
+ }
+}
+
+/*
+ * Returns:
+ * See: USB_MODE_XXX
+ */
+enum usb_hc_mode
+usbd_get_mode(struct usb_device *udev)
+{
+ return (udev->flags.usb_mode);
+}
+
+/*
+ * Returns:
+ * See: USB_SPEED_XXX
+ */
+enum usb_dev_speed
+usbd_get_speed(struct usb_device *udev)
+{
+ return (udev->speed);
+}
+
+uint32_t
+usbd_get_isoc_fps(struct usb_device *udev)
+{
+ ; /* indent fix */
+ switch (udev->speed) {
+ case USB_SPEED_LOW:
+ case USB_SPEED_FULL:
+ return (1000);
+ default:
+ return (8000);
+ }
+}
+
+struct usb_device_descriptor *
+usbd_get_device_descriptor(struct usb_device *udev)
+{
+ if (udev == NULL)
+ return (NULL); /* be NULL safe */
+ return (&udev->ddesc);
+}
+
+struct usb_config_descriptor *
+usbd_get_config_descriptor(struct usb_device *udev)
+{
+ if (udev == NULL)
+ return (NULL); /* be NULL safe */
+ return (udev->cdesc);
+}
+
+/*------------------------------------------------------------------------*
+ * usb_test_quirk - test a device for a given quirk
+ *
+ * Return values:
+ * 0: The USB device does not have the given quirk.
+ * Else: The USB device has the given quirk.
+ *------------------------------------------------------------------------*/
+uint8_t
+usb_test_quirk(const struct usb_attach_arg *uaa, uint16_t quirk)
+{
+ uint8_t found;
+
+ found = (usb_test_quirk_p) (&uaa->info, quirk);
+ return (found);
+}
+
+struct usb_interface_descriptor *
+usbd_get_interface_descriptor(struct usb_interface *iface)
+{
+ if (iface == NULL)
+ return (NULL); /* be NULL safe */
+ return (iface->idesc);
+}
+
+uint8_t
+usbd_get_interface_altindex(struct usb_interface *iface)
+{
+ return (iface->alt_index);
+}
+
+uint8_t
+usbd_get_bus_index(struct usb_device *udev)
+{
+ return ((uint8_t)device_get_unit(udev->bus->bdev));
+}
+
+uint8_t
+usbd_get_device_index(struct usb_device *udev)
+{
+ return (udev->device_index);
+}
+
+#if USB_HAVE_UGEN
+/*------------------------------------------------------------------------*
+ * usb_notify_addq
+ *
+ * This function will generate events for dev.
+ *------------------------------------------------------------------------*/
+#ifndef BURN_BRIDGES
+static void
+usb_notify_addq_compat(const char *type, struct usb_device *udev)
+{
+ char *data = NULL;
+ const char *ntype;
+ struct malloc_type *mt;
+ const size_t buf_size = 512;
+
+ /* Convert notify type */
+ if (strcmp(type, "ATTACH") == 0)
+ ntype = "+";
+ else if (strcmp(type, "DETACH") == 0)
+ ntype = "-";
+ else
+ return;
+
+ mtx_lock(&malloc_mtx);
+ mt = malloc_desc2type("bus"); /* XXX M_BUS */
+ mtx_unlock(&malloc_mtx);
+ if (mt == NULL)
+ return;
+
+ data = malloc(buf_size, mt, M_NOWAIT);
+ if (data == NULL)
+ return;
+
+ /* String it all together. */
+ snprintf(data, buf_size,
+ "%s"
+ "%s "
+ "vendor=0x%04x "
+ "product=0x%04x "
+ "devclass=0x%02x "
+ "devsubclass=0x%02x "
+ "sernum=\"%s\" "
+ "release=0x%04x "
+ "at "
+ "port=%u "
+ "on "
+ "%s\n",
+ ntype,
+ udev->ugen_name,
+ UGETW(udev->ddesc.idVendor),
+ UGETW(udev->ddesc.idProduct),
+ udev->ddesc.bDeviceClass,
+ udev->ddesc.bDeviceSubClass,
+ usb_get_serial(udev),
+ UGETW(udev->ddesc.bcdDevice),
+ udev->port_no,
+ udev->parent_hub != NULL ?
+ udev->parent_hub->ugen_name :
+ device_get_nameunit(device_get_parent(udev->bus->bdev)));
+
+ devctl_queue_data(data);
+}
+#endif
+
+static void
+usb_notify_addq(const char *type, struct usb_device *udev)
+{
+ struct usb_interface *iface;
+ struct sbuf *sb;
+ int i;
+
+#ifndef BURN_BRIDGES
+ usb_notify_addq_compat(type, udev);
+#endif
+
+ /* announce the device */
+ sb = sbuf_new_auto();
+ sbuf_printf(sb,
+ "cdev=%s "
+ "vendor=0x%04x "
+ "product=0x%04x "
+ "devclass=0x%02x "
+ "devsubclass=0x%02x "
+ "sernum=\"%s\" "
+ "release=0x%04x "
+ "mode=%s "
+ "port=%u "
+ "parent=%s\n",
+ udev->ugen_name,
+ UGETW(udev->ddesc.idVendor),
+ UGETW(udev->ddesc.idProduct),
+ udev->ddesc.bDeviceClass,
+ udev->ddesc.bDeviceSubClass,
+ usb_get_serial(udev),
+ UGETW(udev->ddesc.bcdDevice),
+ (udev->flags.usb_mode == USB_MODE_HOST) ? "host" : "device",
+ udev->port_no,
+ udev->parent_hub != NULL ?
+ udev->parent_hub->ugen_name :
+ device_get_nameunit(device_get_parent(udev->bus->bdev)));
+ sbuf_finish(sb);
+ devctl_notify("USB", "DEVICE", type, sbuf_data(sb));
+ sbuf_delete(sb);
+
+ /* announce each interface */
+ for (i = 0; i < USB_IFACE_MAX; i++) {
+ iface = usbd_get_iface(udev, i);
+ if (iface == NULL)
+ break; /* end of interfaces */
+ if (iface->idesc == NULL)
+ continue; /* no interface descriptor */
+
+ sb = sbuf_new_auto();
+ sbuf_printf(sb,
+ "cdev=%s "
+ "vendor=0x%04x "
+ "product=0x%04x "
+ "devclass=0x%02x "
+ "devsubclass=0x%02x "
+ "sernum=\"%s\" "
+ "release=0x%04x "
+ "mode=%s "
+ "interface=%d "
+ "endpoints=%d "
+ "intclass=0x%02x "
+ "intsubclass=0x%02x "
+ "intprotocol=0x%02x\n",
+ udev->ugen_name,
+ UGETW(udev->ddesc.idVendor),
+ UGETW(udev->ddesc.idProduct),
+ udev->ddesc.bDeviceClass,
+ udev->ddesc.bDeviceSubClass,
+ usb_get_serial(udev),
+ UGETW(udev->ddesc.bcdDevice),
+ (udev->flags.usb_mode == USB_MODE_HOST) ? "host" : "device",
+ iface->idesc->bInterfaceNumber,
+ iface->idesc->bNumEndpoints,
+ iface->idesc->bInterfaceClass,
+ iface->idesc->bInterfaceSubClass,
+ iface->idesc->bInterfaceProtocol);
+ sbuf_finish(sb);
+ devctl_notify("USB", "INTERFACE", type, sbuf_data(sb));
+ sbuf_delete(sb);
+ }
+}
+
+/*------------------------------------------------------------------------*
+ * usb_fifo_free_wrap
+ *
+ * This function will free the FIFOs.
+ *
+ * Description of "flag" argument: If the USB_UNCFG_FLAG_FREE_EP0 flag
+ * is set and "iface_index" is set to "USB_IFACE_INDEX_ANY", we free
+ * all FIFOs. If the USB_UNCFG_FLAG_FREE_EP0 flag is not set and
+ * "iface_index" is set to "USB_IFACE_INDEX_ANY", we free all non
+ * control endpoint FIFOs. If "iface_index" is not set to
+ * "USB_IFACE_INDEX_ANY" the flag has no effect.
+ *------------------------------------------------------------------------*/
+static void
+usb_fifo_free_wrap(struct usb_device *udev,
+ uint8_t iface_index, uint8_t flag)
+{
+ struct usb_fifo *f;
+ uint16_t i;
+
+ /*
+ * Free any USB FIFOs on the given interface:
+ */
+ for (i = 0; i != USB_FIFO_MAX; i++) {
+ f = udev->fifo[i];
+ if (f == NULL) {
+ continue;
+ }
+ /* Check if the interface index matches */
+ if (iface_index == f->iface_index) {
+ if (f->methods != &usb_ugen_methods) {
+ /*
+ * Don't free any non-generic FIFOs in
+ * this case.
+ */
+ continue;
+ }
+ if ((f->dev_ep_index == 0) &&
+ (f->fs_xfer == NULL)) {
+ /* no need to free this FIFO */
+ continue;
+ }
+ } else if (iface_index == USB_IFACE_INDEX_ANY) {
+ if ((f->methods == &usb_ugen_methods) &&
+ (f->dev_ep_index == 0) &&
+ (!(flag & USB_UNCFG_FLAG_FREE_EP0)) &&
+ (f->fs_xfer == NULL)) {
+ /* no need to free this FIFO */
+ continue;
+ }
+ } else {
+ /* no need to free this FIFO */
+ continue;
+ }
+ /* free this FIFO */
+ usb_fifo_free(f);
+ }
+}
+#endif
+
+/*------------------------------------------------------------------------*
+ * usb_peer_can_wakeup
+ *
+ * Return values:
+ * 0: Peer cannot do resume signalling.
+ * Else: Peer can do resume signalling.
+ *------------------------------------------------------------------------*/
+uint8_t
+usb_peer_can_wakeup(struct usb_device *udev)
+{
+ const struct usb_config_descriptor *cdp;
+
+ cdp = udev->cdesc;
+ if ((cdp != NULL) && (udev->flags.usb_mode == USB_MODE_HOST)) {
+ return (cdp->bmAttributes & UC_REMOTE_WAKEUP);
+ }
+ return (0); /* not supported */
+}
+
+void
+usb_set_device_state(struct usb_device *udev, enum usb_dev_state state)
+{
+
+ KASSERT(state < USB_STATE_MAX, ("invalid udev state"));
+
+ DPRINTF("udev %p state %s -> %s\n", udev,
+ usb_statestr(udev->state), usb_statestr(state));
+ udev->state = state;
+
+ if (udev->bus->methods->device_state_change != NULL)
+ (udev->bus->methods->device_state_change) (udev);
+}
+
+enum usb_dev_state
+usb_get_device_state(struct usb_device *udev)
+{
+ if (udev == NULL)
+ return (USB_STATE_DETACHED);
+ return (udev->state);
+}
+
+uint8_t
+usbd_device_attached(struct usb_device *udev)
+{
+ return (udev->state > USB_STATE_DETACHED);
+}
+
+/* The following function locks enumerating the given USB device. */
+
+void
+usbd_enum_lock(struct usb_device *udev)
+{
+ sx_xlock(&udev->enum_sx);
+ sx_xlock(&udev->sr_sx);
+ /*
+ * NEWBUS LOCK NOTE: We should check if any parent SX locks
+ * are locked before locking Giant. Else the lock can be
+ * locked multiple times.
+ */
+ mtx_lock(&Giant);
+}
+
+/* The following function unlocks enumerating the given USB device. */
+
+void
+usbd_enum_unlock(struct usb_device *udev)
+{
+ mtx_unlock(&Giant);
+ sx_xunlock(&udev->enum_sx);
+ sx_xunlock(&udev->sr_sx);
+}
+
+/* The following function locks suspend and resume. */
+
+void
+usbd_sr_lock(struct usb_device *udev)
+{
+ sx_xlock(&udev->sr_sx);
+ /*
+ * NEWBUS LOCK NOTE: We should check if any parent SX locks
+ * are locked before locking Giant. Else the lock can be
+ * locked multiple times.
+ */
+ mtx_lock(&Giant);
+}
+
+/* The following function unlocks suspend and resume. */
+
+void
+usbd_sr_unlock(struct usb_device *udev)
+{
+ mtx_unlock(&Giant);
+ sx_xunlock(&udev->sr_sx);
+}
+
+/*
+ * The following function checks the enumerating lock for the given
+ * USB device.
+ */
+
+uint8_t
+usbd_enum_is_locked(struct usb_device *udev)
+{
+ return (sx_xlocked(&udev->enum_sx));
+}
diff --git a/rtems/freebsd/dev/usb/usb_device.h b/rtems/freebsd/dev/usb/usb_device.h
new file mode 100644
index 00000000..eb6a3fcb
--- /dev/null
+++ b/rtems/freebsd/dev/usb/usb_device.h
@@ -0,0 +1,227 @@
+/* $FreeBSD$ */
+/*-
+ * Copyright (c) 2008 Hans Petter Selasky. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifndef _USB_DEVICE_HH_
+#define _USB_DEVICE_HH_
+
+struct usb_symlink; /* UGEN */
+struct usb_device; /* linux compat */
+
+#define USB_CTRL_XFER_MAX 2
+
+/* "usb_parse_config()" commands */
+
+#define USB_CFG_ALLOC 0
+#define USB_CFG_FREE 1
+#define USB_CFG_INIT 2
+
+/* "usb_unconfigure()" flags */
+
+#define USB_UNCFG_FLAG_NONE 0x00
+#define USB_UNCFG_FLAG_FREE_EP0 0x02 /* endpoint zero is freed */
+
+struct usb_clear_stall_msg {
+ struct usb_proc_msg hdr;
+ struct usb_device *udev;
+};
+
+/* The following four structures makes up a tree, where we have the
+ * leaf structure, "usb_host_endpoint", first, and the root structure,
+ * "usb_device", last. The four structures below mirror the structure
+ * of the USB descriptors belonging to an USB configuration. Please
+ * refer to the USB specification for a definition of "endpoints" and
+ * "interfaces".
+ */
+struct usb_host_endpoint {
+ struct usb_endpoint_descriptor desc;
+ TAILQ_HEAD(, urb) bsd_urb_list;
+ struct usb_xfer *bsd_xfer[2];
+ uint8_t *extra; /* Extra descriptors */
+ usb_frlength_t fbsd_buf_size;
+ uint16_t extralen;
+ uint8_t bsd_iface_index;
+} __aligned(USB_HOST_ALIGN);
+
+struct usb_host_interface {
+ struct usb_interface_descriptor desc;
+ /* the following array has size "desc.bNumEndpoint" */
+ struct usb_host_endpoint *endpoint;
+ const char *string; /* iInterface string, if present */
+ uint8_t *extra; /* Extra descriptors */
+ uint16_t extralen;
+ uint8_t bsd_iface_index;
+} __aligned(USB_HOST_ALIGN);
+
+/*
+ * The following structure defines the USB device flags.
+ */
+struct usb_device_flags {
+ enum usb_hc_mode usb_mode; /* host or device mode */
+ uint8_t self_powered:1; /* set if USB device is self powered */
+ uint8_t no_strings:1; /* set if USB device does not support
+ * strings */
+ uint8_t remote_wakeup:1; /* set if remote wakeup is enabled */
+ uint8_t uq_bus_powered:1; /* set if BUS powered quirk is present */
+
+ /*
+ * NOTE: Although the flags below will reach the same value
+ * over time, but the instant values may differ, and
+ * consequently the flags cannot be merged into one!
+ */
+ uint8_t peer_suspended:1; /* set if peer is suspended */
+ uint8_t self_suspended:1; /* set if self is suspended */
+};
+
+/*
+ * The following structure is used for power-save purposes. The data
+ * in this structure is protected by the USB BUS lock.
+ */
+struct usb_power_save {
+ usb_ticks_t last_xfer_time; /* copy of "ticks" */
+ usb_size_t type_refs[4]; /* transfer reference count */
+ usb_size_t read_refs; /* data read references */
+ usb_size_t write_refs; /* data write references */
+};
+
+/*
+ * The following structure defines an USB device. There exists one of
+ * these structures for every USB device.
+ */
+struct usb_device {
+ struct usb_clear_stall_msg cs_msg[2]; /* generic clear stall
+ * messages */
+ struct sx ctrl_sx;
+ struct sx enum_sx;
+ struct sx sr_sx;
+ struct mtx device_mtx;
+ struct cv ctrlreq_cv;
+ struct cv ref_cv;
+ struct usb_interface *ifaces;
+ struct usb_endpoint ctrl_ep; /* Control Endpoint 0 */
+ struct usb_endpoint *endpoints;
+ struct usb_power_save pwr_save;/* power save data */
+ struct usb_bus *bus; /* our USB BUS */
+ device_t parent_dev; /* parent device */
+ struct usb_device *parent_hub;
+ struct usb_device *parent_hs_hub; /* high-speed parent HUB */
+ struct usb_config_descriptor *cdesc; /* full config descr */
+ struct usb_hub *hub; /* only if this is a hub */
+ struct usb_xfer *ctrl_xfer[USB_CTRL_XFER_MAX];
+ struct usb_temp_data *usb_template_ptr;
+ struct usb_endpoint *ep_curr; /* current clear stall endpoint */
+#if USB_HAVE_UGEN
+ struct usb_fifo *fifo[USB_FIFO_MAX];
+ struct usb_symlink *ugen_symlink; /* our generic symlink */
+ struct cdev *ctrl_dev; /* Control Endpoint 0 device node */
+ LIST_HEAD(,usb_fs_privdata) pd_list;
+ char ugen_name[20]; /* name of ugenX.X device */
+#endif
+ usb_ticks_t plugtime; /* copy of "ticks" */
+
+ enum usb_dev_state state;
+ enum usb_dev_speed speed;
+ uint16_t refcount;
+#define USB_DEV_REF_MAX 0xffff
+
+ uint16_t power; /* mA the device uses */
+ uint16_t langid; /* language for strings */
+
+ uint8_t address; /* device addess */
+ uint8_t device_index; /* device index in "bus->devices" */
+ uint8_t controller_slot_id; /* controller specific value */
+ uint8_t curr_config_index; /* current configuration index */
+ uint8_t curr_config_no; /* current configuration number */
+ uint8_t depth; /* distance from root HUB */
+ uint8_t port_index; /* parent HUB port index */
+ uint8_t port_no; /* parent HUB port number */
+ uint8_t hs_hub_addr; /* high-speed HUB address */
+ uint8_t hs_port_no; /* high-speed HUB port number */
+ uint8_t driver_added_refcount; /* our driver added generation count */
+ uint8_t power_mode; /* see USB_POWER_XXX */
+ uint8_t re_enumerate_wait; /* set if re-enum. is in progress */
+ uint8_t ifaces_max; /* number of interfaces present */
+ uint8_t endpoints_max; /* number of endpoints present */
+
+ /* the "flags" field is write-protected by "bus->mtx" */
+
+ struct usb_device_flags flags;
+
+ struct usb_endpoint_descriptor ctrl_ep_desc; /* for endpoint 0 */
+ struct usb_endpoint_ss_comp_descriptor ctrl_ep_comp_desc; /* for endpoint 0 */
+ struct usb_device_descriptor ddesc; /* device descriptor */
+
+ char *serial; /* serial number, can be NULL */
+ char *manufacturer; /* manufacturer string, can be NULL */
+ char *product; /* product string, can be NULL */
+
+#if USB_HAVE_COMPAT_LINUX
+ /* Linux compat */
+ struct usb_device_descriptor descriptor;
+ struct usb_host_endpoint ep0;
+ struct usb_interface *linux_iface_start;
+ struct usb_interface *linux_iface_end;
+ struct usb_host_endpoint *linux_endpoint_start;
+ struct usb_host_endpoint *linux_endpoint_end;
+ uint16_t devnum;
+#endif
+};
+
+/* globals */
+
+extern int usb_template;
+
+/* function prototypes */
+
+const char *usb_statestr(enum usb_dev_state state);
+struct usb_device *usb_alloc_device(device_t parent_dev, struct usb_bus *bus,
+ struct usb_device *parent_hub, uint8_t depth,
+ uint8_t port_index, uint8_t port_no,
+ enum usb_dev_speed speed, enum usb_hc_mode mode);
+usb_error_t usb_probe_and_attach(struct usb_device *udev,
+ uint8_t iface_index);
+void usb_detach_device(struct usb_device *, uint8_t, uint8_t);
+usb_error_t usb_reset_iface_endpoints(struct usb_device *udev,
+ uint8_t iface_index);
+usb_error_t usbd_set_config_index(struct usb_device *udev, uint8_t index);
+usb_error_t usbd_set_endpoint_stall(struct usb_device *udev,
+ struct usb_endpoint *ep, uint8_t do_stall);
+usb_error_t usb_suspend_resume(struct usb_device *udev,
+ uint8_t do_suspend);
+void usb_devinfo(struct usb_device *udev, char *dst_ptr, uint16_t dst_len);
+void usb_free_device(struct usb_device *, uint8_t);
+void usb_linux_free_device(struct usb_device *dev);
+uint8_t usb_peer_can_wakeup(struct usb_device *udev);
+struct usb_endpoint *usb_endpoint_foreach(struct usb_device *udev, struct usb_endpoint *ep);
+void usb_set_device_state(struct usb_device *, enum usb_dev_state);
+enum usb_dev_state usb_get_device_state(struct usb_device *);
+
+void usbd_enum_lock(struct usb_device *);
+void usbd_enum_unlock(struct usb_device *);
+void usbd_sr_lock(struct usb_device *);
+void usbd_sr_unlock(struct usb_device *);
+uint8_t usbd_enum_is_locked(struct usb_device *);
+
+#endif /* _USB_DEVICE_HH_ */
diff --git a/rtems/freebsd/dev/usb/usb_dynamic.c b/rtems/freebsd/dev/usb/usb_dynamic.c
new file mode 100644
index 00000000..79dc30bc
--- /dev/null
+++ b/rtems/freebsd/dev/usb/usb_dynamic.c
@@ -0,0 +1,151 @@
+#include <rtems/freebsd/machine/rtems-bsd-config.h>
+
+/* $FreeBSD$ */
+/*-
+ * Copyright (c) 2008 Hans Petter Selasky. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <rtems/freebsd/sys/stdint.h>
+#include <rtems/freebsd/sys/stddef.h>
+#include <rtems/freebsd/sys/param.h>
+#include <rtems/freebsd/sys/queue.h>
+#include <rtems/freebsd/sys/types.h>
+#include <rtems/freebsd/sys/systm.h>
+#include <rtems/freebsd/sys/kernel.h>
+#include <rtems/freebsd/sys/bus.h>
+#include <rtems/freebsd/sys/linker_set.h>
+#include <rtems/freebsd/sys/module.h>
+#include <rtems/freebsd/sys/lock.h>
+#include <rtems/freebsd/sys/mutex.h>
+#include <rtems/freebsd/sys/condvar.h>
+#include <rtems/freebsd/sys/sysctl.h>
+#include <rtems/freebsd/sys/sx.h>
+#include <rtems/freebsd/sys/unistd.h>
+#include <rtems/freebsd/sys/callout.h>
+#include <rtems/freebsd/sys/malloc.h>
+#include <rtems/freebsd/sys/priv.h>
+
+#include <rtems/freebsd/dev/usb/usb.h>
+#include <rtems/freebsd/dev/usb/usbdi.h>
+
+#include <rtems/freebsd/dev/usb/usb_core.h>
+#include <rtems/freebsd/dev/usb/usb_process.h>
+#include <rtems/freebsd/dev/usb/usb_device.h>
+#include <rtems/freebsd/dev/usb/usb_dynamic.h>
+
+/* function prototypes */
+static usb_handle_req_t usb_temp_get_desc_w;
+static usb_temp_setup_by_index_t usb_temp_setup_by_index_w;
+static usb_temp_unsetup_t usb_temp_unsetup_w;
+static usb_test_quirk_t usb_test_quirk_w;
+static usb_quirk_ioctl_t usb_quirk_ioctl_w;
+
+/* global variables */
+usb_handle_req_t *usb_temp_get_desc_p = &usb_temp_get_desc_w;
+usb_temp_setup_by_index_t *usb_temp_setup_by_index_p = &usb_temp_setup_by_index_w;
+usb_temp_unsetup_t *usb_temp_unsetup_p = &usb_temp_unsetup_w;
+usb_test_quirk_t *usb_test_quirk_p = &usb_test_quirk_w;
+usb_quirk_ioctl_t *usb_quirk_ioctl_p = &usb_quirk_ioctl_w;
+devclass_t usb_devclass_ptr = NULL;
+
+static usb_error_t
+usb_temp_setup_by_index_w(struct usb_device *udev, uint16_t index)
+{
+ return (USB_ERR_INVAL);
+}
+
+static uint8_t
+usb_test_quirk_w(const struct usbd_lookup_info *info, uint16_t quirk)
+{
+ return (0); /* no match */
+}
+
+static int
+usb_quirk_ioctl_w(unsigned long cmd, caddr_t data, int fflag, struct thread *td)
+{
+ return (ENOIOCTL);
+}
+
+static usb_error_t
+usb_temp_get_desc_w(struct usb_device *udev, struct usb_device_request *req, const void **pPtr, uint16_t *pLength)
+{
+ /* stall */
+ return (USB_ERR_STALLED);
+}
+
+static void
+usb_temp_unsetup_w(struct usb_device *udev)
+{
+ if (udev->usb_template_ptr) {
+
+ free(udev->usb_template_ptr, M_USB);
+
+ udev->usb_template_ptr = NULL;
+ }
+}
+
+void
+usb_quirk_unload(void *arg)
+{
+ /* reset function pointers */
+
+ usb_test_quirk_p = &usb_test_quirk_w;
+ usb_quirk_ioctl_p = &usb_quirk_ioctl_w;
+
+ /* wait for CPU to exit the loaded functions, if any */
+
+ /* XXX this is a tradeoff */
+
+ pause("WAIT", hz);
+}
+
+void
+usb_temp_unload(void *arg)
+{
+ /* reset function pointers */
+
+ usb_temp_get_desc_p = &usb_temp_get_desc_w;
+ usb_temp_setup_by_index_p = &usb_temp_setup_by_index_w;
+ usb_temp_unsetup_p = &usb_temp_unsetup_w;
+
+ /* wait for CPU to exit the loaded functions, if any */
+
+ /* XXX this is a tradeoff */
+
+ pause("WAIT", hz);
+}
+
+void
+usb_bus_unload(void *arg)
+{
+ /* reset function pointers */
+
+ usb_devclass_ptr = NULL;
+
+ /* wait for CPU to exit the loaded functions, if any */
+
+ /* XXX this is a tradeoff */
+
+ pause("WAIT", hz);
+}
diff --git a/rtems/freebsd/dev/usb/usb_dynamic.h b/rtems/freebsd/dev/usb/usb_dynamic.h
new file mode 100644
index 00000000..32fc8362
--- /dev/null
+++ b/rtems/freebsd/dev/usb/usb_dynamic.h
@@ -0,0 +1,61 @@
+/* $FreeBSD$ */
+/*-
+ * Copyright (c) 2008 Hans Petter Selasky. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifndef _USB_DYNAMIC_HH_
+#define _USB_DYNAMIC_HH_
+
+/* prototypes */
+
+struct usb_device;
+struct usbd_lookup_info;
+struct usb_device_request;
+
+/* typedefs */
+
+typedef usb_error_t (usb_temp_setup_by_index_t)(struct usb_device *udev,
+ uint16_t index);
+typedef uint8_t (usb_test_quirk_t)(const struct usbd_lookup_info *info,
+ uint16_t quirk);
+typedef int (usb_quirk_ioctl_t)(unsigned long cmd, caddr_t data,
+ int fflag, struct thread *td);
+typedef void (usb_temp_unsetup_t)(struct usb_device *udev);
+
+/* global function pointers */
+
+extern usb_handle_req_t *usb_temp_get_desc_p;
+extern usb_temp_setup_by_index_t *usb_temp_setup_by_index_p;
+extern usb_temp_unsetup_t *usb_temp_unsetup_p;
+extern usb_test_quirk_t *usb_test_quirk_p;
+extern usb_quirk_ioctl_t *usb_quirk_ioctl_p;
+extern devclass_t usb_devclass_ptr;
+
+/* function prototypes */
+
+void usb_temp_unload(void *);
+void usb_quirk_unload(void *);
+void usb_bus_unload(void *);
+
+#endif /* _USB_DYNAMIC_HH_ */
diff --git a/rtems/freebsd/dev/usb/usb_endian.h b/rtems/freebsd/dev/usb/usb_endian.h
new file mode 100644
index 00000000..f2906063
--- /dev/null
+++ b/rtems/freebsd/dev/usb/usb_endian.h
@@ -0,0 +1,119 @@
+/* $FreeBSD$ */
+/*
+ * Copyright (c) 2008 Hans Petter Selasky. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifndef _USB_ENDIAN_HH_
+#define _USB_ENDIAN_HH_
+
+#include <rtems/freebsd/sys/stdint.h>
+#include <rtems/freebsd/sys/endian.h>
+
+/*
+ * Declare the basic USB record types. USB records have an alignment
+ * of 1 byte and are always packed.
+ */
+typedef uint8_t uByte;
+typedef uint8_t uWord[2];
+typedef uint8_t uDWord[4];
+typedef uint8_t uQWord[8];
+
+/*
+ * Define a set of macros that can get and set data independent of
+ * CPU endianness and CPU alignment requirements:
+ */
+#define UGETB(w) \
+ ((w)[0])
+
+#define UGETW(w) \
+ ((w)[0] | \
+ (((uint16_t)((w)[1])) << 8))
+
+#define UGETDW(w) \
+ ((w)[0] | \
+ (((uint16_t)((w)[1])) << 8) | \
+ (((uint32_t)((w)[2])) << 16) | \
+ (((uint32_t)((w)[3])) << 24))
+
+#define UGETQW(w) \
+ ((w)[0] | \
+ (((uint16_t)((w)[1])) << 8) | \
+ (((uint32_t)((w)[2])) << 16) | \
+ (((uint32_t)((w)[3])) << 24) | \
+ (((uint64_t)((w)[4])) << 32) | \
+ (((uint64_t)((w)[5])) << 40) | \
+ (((uint64_t)((w)[6])) << 48) | \
+ (((uint64_t)((w)[7])) << 56))
+
+#define USETB(w,v) do { \
+ (w)[0] = (uint8_t)(v); \
+} while (0)
+
+#define USETW(w,v) do { \
+ (w)[0] = (uint8_t)(v); \
+ (w)[1] = (uint8_t)((v) >> 8); \
+} while (0)
+
+#define USETDW(w,v) do { \
+ (w)[0] = (uint8_t)(v); \
+ (w)[1] = (uint8_t)((v) >> 8); \
+ (w)[2] = (uint8_t)((v) >> 16); \
+ (w)[3] = (uint8_t)((v) >> 24); \
+} while (0)
+
+#define USETQW(w,v) do { \
+ (w)[0] = (uint8_t)(v); \
+ (w)[1] = (uint8_t)((v) >> 8); \
+ (w)[2] = (uint8_t)((v) >> 16); \
+ (w)[3] = (uint8_t)((v) >> 24); \
+ (w)[4] = (uint8_t)((v) >> 32); \
+ (w)[5] = (uint8_t)((v) >> 40); \
+ (w)[6] = (uint8_t)((v) >> 48); \
+ (w)[7] = (uint8_t)((v) >> 56); \
+} while (0)
+
+#define USETW2(w,b1,b0) do { \
+ (w)[0] = (uint8_t)(b0); \
+ (w)[1] = (uint8_t)(b1); \
+} while (0)
+
+#define USETW4(w,b3,b2,b1,b0) do { \
+ (w)[0] = (uint8_t)(b0); \
+ (w)[1] = (uint8_t)(b1); \
+ (w)[2] = (uint8_t)(b2); \
+ (w)[3] = (uint8_t)(b3); \
+} while (0)
+
+#define USETW8(w,b7,b6,b5,b4,b3,b2,b1,b0) do { \
+ (w)[0] = (uint8_t)(b0); \
+ (w)[1] = (uint8_t)(b1); \
+ (w)[2] = (uint8_t)(b2); \
+ (w)[3] = (uint8_t)(b3); \
+ (w)[4] = (uint8_t)(b4); \
+ (w)[5] = (uint8_t)(b5); \
+ (w)[6] = (uint8_t)(b6); \
+ (w)[7] = (uint8_t)(b7); \
+} while (0)
+
+#endif /* _USB_ENDIAN_HH_ */
diff --git a/rtems/freebsd/dev/usb/usb_error.c b/rtems/freebsd/dev/usb/usb_error.c
new file mode 100644
index 00000000..c050939e
--- /dev/null
+++ b/rtems/freebsd/dev/usb/usb_error.c
@@ -0,0 +1,93 @@
+#include <rtems/freebsd/machine/rtems-bsd-config.h>
+
+/* $FreeBSD$ */
+/*-
+ * Copyright (c) 2008 Hans Petter Selasky. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <rtems/freebsd/sys/stdint.h>
+#include <rtems/freebsd/sys/stddef.h>
+#include <rtems/freebsd/sys/param.h>
+#include <rtems/freebsd/sys/queue.h>
+#include <rtems/freebsd/sys/types.h>
+#include <rtems/freebsd/sys/systm.h>
+#include <rtems/freebsd/sys/kernel.h>
+#include <rtems/freebsd/sys/bus.h>
+#include <rtems/freebsd/sys/linker_set.h>
+#include <rtems/freebsd/sys/module.h>
+#include <rtems/freebsd/sys/lock.h>
+#include <rtems/freebsd/sys/mutex.h>
+#include <rtems/freebsd/sys/condvar.h>
+#include <rtems/freebsd/sys/sysctl.h>
+#include <rtems/freebsd/sys/sx.h>
+#include <rtems/freebsd/sys/unistd.h>
+#include <rtems/freebsd/sys/callout.h>
+#include <rtems/freebsd/sys/malloc.h>
+#include <rtems/freebsd/sys/priv.h>
+
+#include <rtems/freebsd/dev/usb/usb.h>
+#include <rtems/freebsd/dev/usb/usbdi.h>
+
+static const char* usb_errstr_table[USB_ERR_MAX] = {
+ [USB_ERR_NORMAL_COMPLETION] = "USB_ERR_NORMAL_COMPLETION",
+ [USB_ERR_PENDING_REQUESTS] = "USB_ERR_PENDING_REQUESTS",
+ [USB_ERR_NOT_STARTED] = "USB_ERR_NOT_STARTED",
+ [USB_ERR_INVAL] = "USB_ERR_INVAL",
+ [USB_ERR_NOMEM] = "USB_ERR_NOMEM",
+ [USB_ERR_CANCELLED] = "USB_ERR_CANCELLED",
+ [USB_ERR_BAD_ADDRESS] = "USB_ERR_BAD_ADDRESS",
+ [USB_ERR_BAD_BUFSIZE] = "USB_ERR_BAD_BUFSIZE",
+ [USB_ERR_BAD_FLAG] = "USB_ERR_BAD_FLAG",
+ [USB_ERR_NO_CALLBACK] = "USB_ERR_NO_CALLBACK",
+ [USB_ERR_IN_USE] = "USB_ERR_IN_USE",
+ [USB_ERR_NO_ADDR] = "USB_ERR_NO_ADDR",
+ [USB_ERR_NO_PIPE] = "USB_ERR_NO_PIPE",
+ [USB_ERR_ZERO_NFRAMES] = "USB_ERR_ZERO_NFRAMES",
+ [USB_ERR_ZERO_MAXP] = "USB_ERR_ZERO_MAXP",
+ [USB_ERR_SET_ADDR_FAILED] = "USB_ERR_SET_ADDR_FAILED",
+ [USB_ERR_NO_POWER] = "USB_ERR_NO_POWER",
+ [USB_ERR_TOO_DEEP] = "USB_ERR_TOO_DEEP",
+ [USB_ERR_IOERROR] = "USB_ERR_IOERROR",
+ [USB_ERR_NOT_CONFIGURED] = "USB_ERR_NOT_CONFIGURED",
+ [USB_ERR_TIMEOUT] = "USB_ERR_TIMEOUT",
+ [USB_ERR_SHORT_XFER] = "USB_ERR_SHORT_XFER",
+ [USB_ERR_STALLED] = "USB_ERR_STALLED",
+ [USB_ERR_INTERRUPTED] = "USB_ERR_INTERRUPTED",
+ [USB_ERR_DMA_LOAD_FAILED] = "USB_ERR_DMA_LOAD_FAILED",
+ [USB_ERR_BAD_CONTEXT] = "USB_ERR_BAD_CONTEXT",
+ [USB_ERR_NO_ROOT_HUB] = "USB_ERR_NO_ROOT_HUB",
+ [USB_ERR_NO_INTR_THREAD] = "USB_ERR_NO_INTR_THREAD",
+ [USB_ERR_NOT_LOCKED] = "USB_ERR_NOT_LOCKED",
+};
+
+/*------------------------------------------------------------------------*
+ * usbd_errstr
+ *
+ * This function converts an USB error code into a string.
+ *------------------------------------------------------------------------*/
+const char *
+usbd_errstr(usb_error_t err)
+{
+ return (err < USB_ERR_MAX ? usb_errstr_table[err] : "USB_ERR_UNKNOWN");
+}
diff --git a/rtems/freebsd/dev/usb/usb_freebsd.h b/rtems/freebsd/dev/usb/usb_freebsd.h
new file mode 100644
index 00000000..ed0f0356
--- /dev/null
+++ b/rtems/freebsd/dev/usb/usb_freebsd.h
@@ -0,0 +1,69 @@
+/* $FreeBSD$ */
+/*-
+ * Copyright (c) 2008 Hans Petter Selasky. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+/*
+ * Including this file is mandatory for all USB related c-files in the kernel.
+ */
+
+#ifndef _USB_FREEBSD_HH_
+#define _USB_FREEBSD_HH_
+
+/* Default USB configuration */
+#ifndef __rtems__
+#define USB_HAVE_UGEN 1
+#define USB_HAVE_BUSDMA 1
+#define USB_HAVE_COMPAT_LINUX 1
+#define USB_HAVE_USER_IO 1
+#define USB_HAVE_MBUF 1
+#define USB_HAVE_TT_SUPPORT 1
+#define USB_HAVE_POWERD 1
+#define USB_HAVE_MSCTEST 1
+#endif /* __rtems__ */
+
+#define USB_TD_GET_PROC(td) (td)->td_proc
+#define USB_PROC_GET_GID(td) (td)->p_pgid
+
+#define USB_HOST_ALIGN 8 /* bytes, must be power of two */
+#define USB_FS_ISOC_UFRAME_MAX 4 /* exclusive unit */
+#define USB_BUS_MAX 256 /* units */
+#define USB_MAX_DEVICES 128 /* units */
+#define USB_IFACE_MAX 32 /* units */
+#define USB_FIFO_MAX 128 /* units */
+
+#define USB_MAX_FS_ISOC_FRAMES_PER_XFER (120) /* units */
+#define USB_MAX_HS_ISOC_FRAMES_PER_XFER (8*120) /* units */
+
+#define USB_HUB_MAX_DEPTH 5
+#define USB_EP0_BUFSIZE 1024 /* bytes */
+
+typedef uint32_t usb_timeout_t; /* milliseconds */
+typedef uint32_t usb_frlength_t; /* bytes */
+typedef uint32_t usb_frcount_t; /* units */
+typedef uint32_t usb_size_t; /* bytes */
+typedef uint32_t usb_ticks_t; /* system defined */
+typedef uint16_t usb_power_mask_t; /* see "USB_HW_POWER_XXX" */
+
+#endif /* _USB_FREEBSD_HH_ */
diff --git a/rtems/freebsd/dev/usb/usb_generic.c b/rtems/freebsd/dev/usb/usb_generic.c
new file mode 100644
index 00000000..a487cafb
--- /dev/null
+++ b/rtems/freebsd/dev/usb/usb_generic.c
@@ -0,0 +1,2239 @@
+#include <rtems/freebsd/machine/rtems-bsd-config.h>
+
+/* $FreeBSD$ */
+/*-
+ * Copyright (c) 2008 Hans Petter Selasky. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <rtems/freebsd/sys/stdint.h>
+#include <rtems/freebsd/sys/stddef.h>
+#include <rtems/freebsd/sys/param.h>
+#include <rtems/freebsd/sys/queue.h>
+#include <rtems/freebsd/sys/types.h>
+#include <rtems/freebsd/sys/systm.h>
+#include <rtems/freebsd/sys/kernel.h>
+#include <rtems/freebsd/sys/bus.h>
+#include <rtems/freebsd/sys/linker_set.h>
+#include <rtems/freebsd/sys/module.h>
+#include <rtems/freebsd/sys/lock.h>
+#include <rtems/freebsd/sys/mutex.h>
+#include <rtems/freebsd/sys/condvar.h>
+#include <rtems/freebsd/sys/sysctl.h>
+#include <rtems/freebsd/sys/sx.h>
+#include <rtems/freebsd/sys/unistd.h>
+#include <rtems/freebsd/sys/callout.h>
+#include <rtems/freebsd/sys/malloc.h>
+#include <rtems/freebsd/sys/priv.h>
+#include <rtems/freebsd/sys/conf.h>
+#include <rtems/freebsd/sys/fcntl.h>
+
+#include <rtems/freebsd/dev/usb/usb.h>
+#include <rtems/freebsd/dev/usb/usb_ioctl.h>
+#include <rtems/freebsd/dev/usb/usbdi.h>
+#include <rtems/freebsd/dev/usb/usbdi_util.h>
+
+#define USB_DEBUG_VAR ugen_debug
+
+#include <rtems/freebsd/dev/usb/usb_core.h>
+#include <rtems/freebsd/dev/usb/usb_dev.h>
+#include <rtems/freebsd/dev/usb/usb_mbuf.h>
+#include <rtems/freebsd/dev/usb/usb_process.h>
+#include <rtems/freebsd/dev/usb/usb_device.h>
+#include <rtems/freebsd/dev/usb/usb_debug.h>
+#include <rtems/freebsd/dev/usb/usb_request.h>
+#include <rtems/freebsd/dev/usb/usb_busdma.h>
+#include <rtems/freebsd/dev/usb/usb_util.h>
+#include <rtems/freebsd/dev/usb/usb_hub.h>
+#include <rtems/freebsd/dev/usb/usb_generic.h>
+#include <rtems/freebsd/dev/usb/usb_transfer.h>
+
+#include <rtems/freebsd/dev/usb/usb_controller.h>
+#include <rtems/freebsd/dev/usb/usb_bus.h>
+
+#if USB_HAVE_UGEN
+
+/* defines */
+
+#define UGEN_BULK_FS_BUFFER_SIZE (64*32) /* bytes */
+#define UGEN_BULK_HS_BUFFER_SIZE (1024*32) /* bytes */
+#define UGEN_HW_FRAMES 50 /* number of milliseconds per transfer */
+
+/* function prototypes */
+
+static usb_callback_t ugen_read_clear_stall_callback;
+static usb_callback_t ugen_write_clear_stall_callback;
+static usb_callback_t ugen_ctrl_read_callback;
+static usb_callback_t ugen_ctrl_write_callback;
+static usb_callback_t ugen_isoc_read_callback;
+static usb_callback_t ugen_isoc_write_callback;
+static usb_callback_t ugen_ctrl_fs_callback;
+
+static usb_fifo_open_t ugen_open;
+static usb_fifo_close_t ugen_close;
+static usb_fifo_ioctl_t ugen_ioctl;
+static usb_fifo_ioctl_t ugen_ioctl_post;
+static usb_fifo_cmd_t ugen_start_read;
+static usb_fifo_cmd_t ugen_start_write;
+static usb_fifo_cmd_t ugen_stop_io;
+
+static int ugen_transfer_setup(struct usb_fifo *,
+ const struct usb_config *, uint8_t);
+static int ugen_open_pipe_write(struct usb_fifo *);
+static int ugen_open_pipe_read(struct usb_fifo *);
+static int ugen_set_config(struct usb_fifo *, uint8_t);
+static int ugen_set_interface(struct usb_fifo *, uint8_t, uint8_t);
+static int ugen_get_cdesc(struct usb_fifo *, struct usb_gen_descriptor *);
+static int ugen_get_sdesc(struct usb_fifo *, struct usb_gen_descriptor *);
+static int ugen_get_iface_driver(struct usb_fifo *f, struct usb_gen_descriptor *ugd);
+static int usb_gen_fill_deviceinfo(struct usb_fifo *,
+ struct usb_device_info *);
+static int ugen_re_enumerate(struct usb_fifo *);
+static int ugen_iface_ioctl(struct usb_fifo *, u_long, void *, int);
+static uint8_t ugen_fs_get_complete(struct usb_fifo *, uint8_t *);
+static int ugen_fs_uninit(struct usb_fifo *f);
+
+/* structures */
+
+struct usb_fifo_methods usb_ugen_methods = {
+ .f_open = &ugen_open,
+ .f_close = &ugen_close,
+ .f_ioctl = &ugen_ioctl,
+ .f_ioctl_post = &ugen_ioctl_post,
+ .f_start_read = &ugen_start_read,
+ .f_stop_read = &ugen_stop_io,
+ .f_start_write = &ugen_start_write,
+ .f_stop_write = &ugen_stop_io,
+};
+
+#ifdef USB_DEBUG
+static int ugen_debug = 0;
+
+SYSCTL_NODE(_hw_usb, OID_AUTO, ugen, CTLFLAG_RW, 0, "USB generic");
+SYSCTL_INT(_hw_usb_ugen, OID_AUTO, debug, CTLFLAG_RW, &ugen_debug,
+ 0, "Debug level");
+
+TUNABLE_INT("hw.usb.ugen.debug", &ugen_debug);
+#endif
+
+
+/* prototypes */
+
+static int
+ugen_transfer_setup(struct usb_fifo *f,
+ const struct usb_config *setup, uint8_t n_setup)
+{
+ struct usb_endpoint *ep = usb_fifo_softc(f);
+ struct usb_device *udev = f->udev;
+ uint8_t iface_index = ep->iface_index;
+ int error;
+
+ mtx_unlock(f->priv_mtx);
+
+ /*
+ * "usbd_transfer_setup()" can sleep so one needs to make a wrapper,
+ * exiting the mutex and checking things
+ */
+ error = usbd_transfer_setup(udev, &iface_index, f->xfer,
+ setup, n_setup, f, f->priv_mtx);
+ if (error == 0) {
+
+ if (f->xfer[0]->nframes == 1) {
+ error = usb_fifo_alloc_buffer(f,
+ f->xfer[0]->max_data_length, 2);
+ } else {
+ error = usb_fifo_alloc_buffer(f,
+ f->xfer[0]->max_frame_size,
+ 2 * f->xfer[0]->nframes);
+ }
+ if (error) {
+ usbd_transfer_unsetup(f->xfer, n_setup);
+ }
+ }
+ mtx_lock(f->priv_mtx);
+
+ return (error);
+}
+
+static int
+ugen_open(struct usb_fifo *f, int fflags)
+{
+ struct usb_endpoint *ep = usb_fifo_softc(f);
+ struct usb_endpoint_descriptor *ed = ep->edesc;
+ uint8_t type;
+
+ DPRINTFN(6, "flag=0x%x\n", fflags);
+
+ mtx_lock(f->priv_mtx);
+ switch (usbd_get_speed(f->udev)) {
+ case USB_SPEED_LOW:
+ case USB_SPEED_FULL:
+ f->nframes = UGEN_HW_FRAMES;
+ f->bufsize = UGEN_BULK_FS_BUFFER_SIZE;
+ break;
+ default:
+ f->nframes = UGEN_HW_FRAMES * 8;
+ f->bufsize = UGEN_BULK_HS_BUFFER_SIZE;
+ break;
+ }
+
+ type = ed->bmAttributes & UE_XFERTYPE;
+ if (type == UE_INTERRUPT) {
+ f->bufsize = 0; /* use "wMaxPacketSize" */
+ }
+ f->timeout = USB_NO_TIMEOUT;
+ f->flag_short = 0;
+ f->fifo_zlp = 0;
+ mtx_unlock(f->priv_mtx);
+
+ return (0);
+}
+
+static void
+ugen_close(struct usb_fifo *f, int fflags)
+{
+ DPRINTFN(6, "flag=0x%x\n", fflags);
+
+ /* cleanup */
+
+ mtx_lock(f->priv_mtx);
+ usbd_transfer_stop(f->xfer[0]);
+ usbd_transfer_stop(f->xfer[1]);
+ mtx_unlock(f->priv_mtx);
+
+ usbd_transfer_unsetup(f->xfer, 2);
+ usb_fifo_free_buffer(f);
+
+ if (ugen_fs_uninit(f)) {
+ /* ignore any errors - we are closing */
+ DPRINTFN(6, "no FIFOs\n");
+ }
+}
+
+static int
+ugen_open_pipe_write(struct usb_fifo *f)
+{
+ struct usb_config usb_config[2];
+ struct usb_endpoint *ep = usb_fifo_softc(f);
+ struct usb_endpoint_descriptor *ed = ep->edesc;
+
+ mtx_assert(f->priv_mtx, MA_OWNED);
+
+ if (f->xfer[0] || f->xfer[1]) {
+ /* transfers are already opened */
+ return (0);
+ }
+ bzero(usb_config, sizeof(usb_config));
+
+ usb_config[1].type = UE_CONTROL;
+ usb_config[1].endpoint = 0;
+ usb_config[1].direction = UE_DIR_ANY;
+ usb_config[1].timeout = 1000; /* 1 second */
+ usb_config[1].interval = 50;/* 50 milliseconds */
+ usb_config[1].bufsize = sizeof(struct usb_device_request);
+ usb_config[1].callback = &ugen_write_clear_stall_callback;
+ usb_config[1].usb_mode = USB_MODE_HOST;
+
+ usb_config[0].type = ed->bmAttributes & UE_XFERTYPE;
+ usb_config[0].endpoint = ed->bEndpointAddress & UE_ADDR;
+ usb_config[0].direction = UE_DIR_TX;
+ usb_config[0].interval = USB_DEFAULT_INTERVAL;
+ usb_config[0].flags.proxy_buffer = 1;
+ usb_config[0].usb_mode = USB_MODE_DUAL; /* both modes */
+
+ switch (ed->bmAttributes & UE_XFERTYPE) {
+ case UE_INTERRUPT:
+ case UE_BULK:
+ if (f->flag_short) {
+ usb_config[0].flags.force_short_xfer = 1;
+ }
+ usb_config[0].callback = &ugen_ctrl_write_callback;
+ usb_config[0].timeout = f->timeout;
+ usb_config[0].frames = 1;
+ usb_config[0].bufsize = f->bufsize;
+ if (ugen_transfer_setup(f, usb_config, 2)) {
+ return (EIO);
+ }
+ /* first transfer does not clear stall */
+ f->flag_stall = 0;
+ break;
+
+ case UE_ISOCHRONOUS:
+ usb_config[0].flags.short_xfer_ok = 1;
+ usb_config[0].bufsize = 0; /* use default */
+ usb_config[0].frames = f->nframes;
+ usb_config[0].callback = &ugen_isoc_write_callback;
+ usb_config[0].timeout = 0;
+
+ /* clone configuration */
+ usb_config[1] = usb_config[0];
+
+ if (ugen_transfer_setup(f, usb_config, 2)) {
+ return (EIO);
+ }
+ break;
+ default:
+ return (EINVAL);
+ }
+ return (0);
+}
+
+static int
+ugen_open_pipe_read(struct usb_fifo *f)
+{
+ struct usb_config usb_config[2];
+ struct usb_endpoint *ep = usb_fifo_softc(f);
+ struct usb_endpoint_descriptor *ed = ep->edesc;
+
+ mtx_assert(f->priv_mtx, MA_OWNED);
+
+ if (f->xfer[0] || f->xfer[1]) {
+ /* transfers are already opened */
+ return (0);
+ }
+ bzero(usb_config, sizeof(usb_config));
+
+ usb_config[1].type = UE_CONTROL;
+ usb_config[1].endpoint = 0;
+ usb_config[1].direction = UE_DIR_ANY;
+ usb_config[1].timeout = 1000; /* 1 second */
+ usb_config[1].interval = 50;/* 50 milliseconds */
+ usb_config[1].bufsize = sizeof(struct usb_device_request);
+ usb_config[1].callback = &ugen_read_clear_stall_callback;
+ usb_config[1].usb_mode = USB_MODE_HOST;
+
+ usb_config[0].type = ed->bmAttributes & UE_XFERTYPE;
+ usb_config[0].endpoint = ed->bEndpointAddress & UE_ADDR;
+ usb_config[0].direction = UE_DIR_RX;
+ usb_config[0].interval = USB_DEFAULT_INTERVAL;
+ usb_config[0].flags.proxy_buffer = 1;
+ usb_config[0].usb_mode = USB_MODE_DUAL; /* both modes */
+
+ switch (ed->bmAttributes & UE_XFERTYPE) {
+ case UE_INTERRUPT:
+ case UE_BULK:
+ if (f->flag_short) {
+ usb_config[0].flags.short_xfer_ok = 1;
+ }
+ usb_config[0].timeout = f->timeout;
+ usb_config[0].frames = 1;
+ usb_config[0].callback = &ugen_ctrl_read_callback;
+ usb_config[0].bufsize = f->bufsize;
+
+ if (ugen_transfer_setup(f, usb_config, 2)) {
+ return (EIO);
+ }
+ /* first transfer does not clear stall */
+ f->flag_stall = 0;
+ break;
+
+ case UE_ISOCHRONOUS:
+ usb_config[0].flags.short_xfer_ok = 1;
+ usb_config[0].bufsize = 0; /* use default */
+ usb_config[0].frames = f->nframes;
+ usb_config[0].callback = &ugen_isoc_read_callback;
+ usb_config[0].timeout = 0;
+
+ /* clone configuration */
+ usb_config[1] = usb_config[0];
+
+ if (ugen_transfer_setup(f, usb_config, 2)) {
+ return (EIO);
+ }
+ break;
+
+ default:
+ return (EINVAL);
+ }
+ return (0);
+}
+
+static void
+ugen_start_read(struct usb_fifo *f)
+{
+ /* check that pipes are open */
+ if (ugen_open_pipe_read(f)) {
+ /* signal error */
+ usb_fifo_put_data_error(f);
+ }
+ /* start transfers */
+ usbd_transfer_start(f->xfer[0]);
+ usbd_transfer_start(f->xfer[1]);
+}
+
+static void
+ugen_start_write(struct usb_fifo *f)
+{
+ /* check that pipes are open */
+ if (ugen_open_pipe_write(f)) {
+ /* signal error */
+ usb_fifo_get_data_error(f);
+ }
+ /* start transfers */
+ usbd_transfer_start(f->xfer[0]);
+ usbd_transfer_start(f->xfer[1]);
+}
+
+static void
+ugen_stop_io(struct usb_fifo *f)
+{
+ /* stop transfers */
+ usbd_transfer_stop(f->xfer[0]);
+ usbd_transfer_stop(f->xfer[1]);
+}
+
+static void
+ugen_ctrl_read_callback(struct usb_xfer *xfer, usb_error_t error)
+{
+ struct usb_fifo *f = usbd_xfer_softc(xfer);
+ struct usb_mbuf *m;
+
+ DPRINTFN(4, "actlen=%u, aframes=%u\n", xfer->actlen, xfer->aframes);
+
+ switch (USB_GET_STATE(xfer)) {
+ case USB_ST_TRANSFERRED:
+ if (xfer->actlen == 0) {
+ if (f->fifo_zlp != 4) {
+ f->fifo_zlp++;
+ } else {
+ /*
+ * Throttle a little bit we have multiple ZLPs
+ * in a row!
+ */
+ xfer->interval = 64; /* ms */
+ }
+ } else {
+ /* clear throttle */
+ xfer->interval = 0;
+ f->fifo_zlp = 0;
+ }
+ usb_fifo_put_data(f, xfer->frbuffers, 0,
+ xfer->actlen, 1);
+
+ case USB_ST_SETUP:
+ if (f->flag_stall) {
+ usbd_transfer_start(f->xfer[1]);
+ break;
+ }
+ USB_IF_POLL(&f->free_q, m);
+ if (m) {
+ usbd_xfer_set_frame_len(xfer, 0, usbd_xfer_max_len(xfer));
+ usbd_transfer_submit(xfer);
+ }
+ break;
+
+ default: /* Error */
+ if (xfer->error != USB_ERR_CANCELLED) {
+ /* send a zero length packet to userland */
+ usb_fifo_put_data(f, xfer->frbuffers, 0, 0, 1);
+ f->flag_stall = 1;
+ f->fifo_zlp = 0;
+ usbd_transfer_start(f->xfer[1]);
+ }
+ break;
+ }
+}
+
+static void
+ugen_ctrl_write_callback(struct usb_xfer *xfer, usb_error_t error)
+{
+ struct usb_fifo *f = usbd_xfer_softc(xfer);
+ usb_frlength_t actlen;
+
+ DPRINTFN(4, "actlen=%u, aframes=%u\n", xfer->actlen, xfer->aframes);
+
+ switch (USB_GET_STATE(xfer)) {
+ case USB_ST_SETUP:
+ case USB_ST_TRANSFERRED:
+ /*
+ * If writing is in stall, just jump to clear stall
+ * callback and solve the situation.
+ */
+ if (f->flag_stall) {
+ usbd_transfer_start(f->xfer[1]);
+ break;
+ }
+ /*
+ * Write data, setup and perform hardware transfer.
+ */
+ if (usb_fifo_get_data(f, xfer->frbuffers, 0,
+ xfer->max_data_length, &actlen, 0)) {
+ usbd_xfer_set_frame_len(xfer, 0, actlen);
+ usbd_transfer_submit(xfer);
+ }
+ break;
+
+ default: /* Error */
+ if (xfer->error != USB_ERR_CANCELLED) {
+ f->flag_stall = 1;
+ usbd_transfer_start(f->xfer[1]);
+ }
+ break;
+ }
+}
+
+static void
+ugen_read_clear_stall_callback(struct usb_xfer *xfer, usb_error_t error)
+{
+ struct usb_fifo *f = usbd_xfer_softc(xfer);
+ struct usb_xfer *xfer_other = f->xfer[0];
+
+ if (f->flag_stall == 0) {
+ /* nothing to do */
+ return;
+ }
+ if (usbd_clear_stall_callback(xfer, xfer_other)) {
+ DPRINTFN(5, "f=%p: stall cleared\n", f);
+ f->flag_stall = 0;
+ usbd_transfer_start(xfer_other);
+ }
+}
+
+static void
+ugen_write_clear_stall_callback(struct usb_xfer *xfer, usb_error_t error)
+{
+ struct usb_fifo *f = usbd_xfer_softc(xfer);
+ struct usb_xfer *xfer_other = f->xfer[0];
+
+ if (f->flag_stall == 0) {
+ /* nothing to do */
+ return;
+ }
+ if (usbd_clear_stall_callback(xfer, xfer_other)) {
+ DPRINTFN(5, "f=%p: stall cleared\n", f);
+ f->flag_stall = 0;
+ usbd_transfer_start(xfer_other);
+ }
+}
+
+static void
+ugen_isoc_read_callback(struct usb_xfer *xfer, usb_error_t error)
+{
+ struct usb_fifo *f = usbd_xfer_softc(xfer);
+ usb_frlength_t offset;
+ usb_frcount_t n;
+
+ DPRINTFN(4, "actlen=%u, aframes=%u\n", xfer->actlen, xfer->aframes);
+
+ switch (USB_GET_STATE(xfer)) {
+ case USB_ST_TRANSFERRED:
+
+ DPRINTFN(6, "actlen=%d\n", xfer->actlen);
+
+ offset = 0;
+
+ for (n = 0; n != xfer->aframes; n++) {
+ usb_fifo_put_data(f, xfer->frbuffers, offset,
+ xfer->frlengths[n], 1);
+ offset += xfer->max_frame_size;
+ }
+
+ case USB_ST_SETUP:
+tr_setup:
+ for (n = 0; n != xfer->nframes; n++) {
+ /* setup size for next transfer */
+ usbd_xfer_set_frame_len(xfer, n, xfer->max_frame_size);
+ }
+ usbd_transfer_submit(xfer);
+ break;
+
+ default: /* Error */
+ if (xfer->error == USB_ERR_CANCELLED) {
+ break;
+ }
+ goto tr_setup;
+ }
+}
+
+static void
+ugen_isoc_write_callback(struct usb_xfer *xfer, usb_error_t error)
+{
+ struct usb_fifo *f = usbd_xfer_softc(xfer);
+ usb_frlength_t actlen;
+ usb_frlength_t offset;
+ usb_frcount_t n;
+
+ DPRINTFN(4, "actlen=%u, aframes=%u\n", xfer->actlen, xfer->aframes);
+
+ switch (USB_GET_STATE(xfer)) {
+ case USB_ST_TRANSFERRED:
+ case USB_ST_SETUP:
+tr_setup:
+ offset = 0;
+ for (n = 0; n != xfer->nframes; n++) {
+ if (usb_fifo_get_data(f, xfer->frbuffers, offset,
+ xfer->max_frame_size, &actlen, 1)) {
+ usbd_xfer_set_frame_len(xfer, n, actlen);
+ offset += actlen;
+ } else {
+ break;
+ }
+ }
+
+ for (; n != xfer->nframes; n++) {
+ /* fill in zero frames */
+ usbd_xfer_set_frame_len(xfer, n, 0);
+ }
+ usbd_transfer_submit(xfer);
+ break;
+
+ default: /* Error */
+ if (xfer->error == USB_ERR_CANCELLED) {
+ break;
+ }
+ goto tr_setup;
+ }
+}
+
+static int
+ugen_set_config(struct usb_fifo *f, uint8_t index)
+{
+ DPRINTFN(2, "index %u\n", index);
+
+ if (f->udev->flags.usb_mode != USB_MODE_HOST) {
+ /* not possible in device side mode */
+ return (ENOTTY);
+ }
+ if (f->udev->curr_config_index == index) {
+ /* no change needed */
+ return (0);
+ }
+ /* make sure all FIFO's are gone */
+ /* else there can be a deadlock */
+ if (ugen_fs_uninit(f)) {
+ /* ignore any errors */
+ DPRINTFN(6, "no FIFOs\n");
+ }
+ /* change setting - will free generic FIFOs, if any */
+ if (usbd_set_config_index(f->udev, index)) {
+ return (EIO);
+ }
+ /* probe and attach */
+ if (usb_probe_and_attach(f->udev, USB_IFACE_INDEX_ANY)) {
+ return (EIO);
+ }
+ return (0);
+}
+
+static int
+ugen_set_interface(struct usb_fifo *f,
+ uint8_t iface_index, uint8_t alt_index)
+{
+ DPRINTFN(2, "%u, %u\n", iface_index, alt_index);
+
+ if (f->udev->flags.usb_mode != USB_MODE_HOST) {
+ /* not possible in device side mode */
+ return (ENOTTY);
+ }
+ /* make sure all FIFO's are gone */
+ /* else there can be a deadlock */
+ if (ugen_fs_uninit(f)) {
+ /* ignore any errors */
+ DPRINTFN(6, "no FIFOs\n");
+ }
+ /* change setting - will free generic FIFOs, if any */
+ if (usbd_set_alt_interface_index(f->udev, iface_index, alt_index)) {
+ return (EIO);
+ }
+ /* probe and attach */
+ if (usb_probe_and_attach(f->udev, iface_index)) {
+ return (EIO);
+ }
+ return (0);
+}
+
+/*------------------------------------------------------------------------*
+ * ugen_get_cdesc
+ *
+ * This function will retrieve the complete configuration descriptor
+ * at the given index.
+ *------------------------------------------------------------------------*/
+static int
+ugen_get_cdesc(struct usb_fifo *f, struct usb_gen_descriptor *ugd)
+{
+ struct usb_config_descriptor *cdesc;
+ struct usb_device *udev = f->udev;
+ int error;
+ uint16_t len;
+ uint8_t free_data;
+
+ DPRINTFN(6, "\n");
+
+ if (ugd->ugd_data == NULL) {
+ /* userland pointer should not be zero */
+ return (EINVAL);
+ }
+ if ((ugd->ugd_config_index == USB_UNCONFIG_INDEX) ||
+ (ugd->ugd_config_index == udev->curr_config_index)) {
+ cdesc = usbd_get_config_descriptor(udev);
+ if (cdesc == NULL) {
+ return (ENXIO);
+ }
+ free_data = 0;
+
+ } else {
+ if (usbd_req_get_config_desc_full(udev,
+ NULL, &cdesc, M_USBDEV,
+ ugd->ugd_config_index)) {
+ return (ENXIO);
+ }
+ free_data = 1;
+ }
+
+ len = UGETW(cdesc->wTotalLength);
+ if (len > ugd->ugd_maxlen) {
+ len = ugd->ugd_maxlen;
+ }
+ DPRINTFN(6, "len=%u\n", len);
+
+ ugd->ugd_actlen = len;
+ ugd->ugd_offset = 0;
+
+ error = copyout(cdesc, ugd->ugd_data, len);
+
+ if (free_data) {
+ free(cdesc, M_USBDEV);
+ }
+ return (error);
+}
+
+static int
+ugen_get_sdesc(struct usb_fifo *f, struct usb_gen_descriptor *ugd)
+{
+ void *ptr = f->udev->bus->scratch[0].data;
+ uint16_t size = sizeof(f->udev->bus->scratch[0].data);
+ int error;
+
+ if (usbd_req_get_string_desc(f->udev, NULL, ptr,
+ size, ugd->ugd_lang_id, ugd->ugd_string_index)) {
+ error = EINVAL;
+ } else {
+
+ if (size > ((uint8_t *)ptr)[0]) {
+ size = ((uint8_t *)ptr)[0];
+ }
+ if (size > ugd->ugd_maxlen) {
+ size = ugd->ugd_maxlen;
+ }
+ ugd->ugd_actlen = size;
+ ugd->ugd_offset = 0;
+
+ error = copyout(ptr, ugd->ugd_data, size);
+ }
+ return (error);
+}
+
+/*------------------------------------------------------------------------*
+ * ugen_get_iface_driver
+ *
+ * This function generates an USB interface description for userland.
+ *
+ * Returns:
+ * 0: Success
+ * Else: Failure
+ *------------------------------------------------------------------------*/
+static int
+ugen_get_iface_driver(struct usb_fifo *f, struct usb_gen_descriptor *ugd)
+{
+ struct usb_device *udev = f->udev;
+ struct usb_interface *iface;
+ const char *ptr;
+ const char *desc;
+ unsigned int len;
+ unsigned int maxlen;
+ char buf[128];
+ int error;
+
+ DPRINTFN(6, "\n");
+
+ if ((ugd->ugd_data == NULL) || (ugd->ugd_maxlen == 0)) {
+ /* userland pointer should not be zero */
+ return (EINVAL);
+ }
+
+ iface = usbd_get_iface(udev, ugd->ugd_iface_index);
+ if ((iface == NULL) || (iface->idesc == NULL)) {
+ /* invalid interface index */
+ return (EINVAL);
+ }
+
+ /* read out device nameunit string, if any */
+ if ((iface->subdev != NULL) &&
+ device_is_attached(iface->subdev) &&
+ (ptr = device_get_nameunit(iface->subdev)) &&
+ (desc = device_get_desc(iface->subdev))) {
+
+ /* print description */
+ snprintf(buf, sizeof(buf), "%s: <%s>", ptr, desc);
+
+ /* range checks */
+ maxlen = ugd->ugd_maxlen - 1;
+ len = strlen(buf);
+ if (len > maxlen)
+ len = maxlen;
+
+ /* update actual length, including terminating zero */
+ ugd->ugd_actlen = len + 1;
+
+ /* copy out interface description */
+ error = copyout(buf, ugd->ugd_data, ugd->ugd_actlen);
+ } else {
+ /* zero length string is default */
+ error = copyout("", ugd->ugd_data, 1);
+ }
+ return (error);
+}
+
+/*------------------------------------------------------------------------*
+ * usb_gen_fill_deviceinfo
+ *
+ * This function dumps information about an USB device to the
+ * structure pointed to by the "di" argument.
+ *
+ * Returns:
+ * 0: Success
+ * Else: Failure
+ *------------------------------------------------------------------------*/
+static int
+usb_gen_fill_deviceinfo(struct usb_fifo *f, struct usb_device_info *di)
+{
+ struct usb_device *udev;
+ struct usb_device *hub;
+
+ udev = f->udev;
+
+ bzero(di, sizeof(di[0]));
+
+ di->udi_bus = device_get_unit(udev->bus->bdev);
+ di->udi_addr = udev->address;
+ di->udi_index = udev->device_index;
+ strlcpy(di->udi_serial, usb_get_serial(udev), sizeof(di->udi_serial));
+ strlcpy(di->udi_vendor, usb_get_manufacturer(udev), sizeof(di->udi_vendor));
+ strlcpy(di->udi_product, usb_get_product(udev), sizeof(di->udi_product));
+ usb_printbcd(di->udi_release, sizeof(di->udi_release),
+ UGETW(udev->ddesc.bcdDevice));
+ di->udi_vendorNo = UGETW(udev->ddesc.idVendor);
+ di->udi_productNo = UGETW(udev->ddesc.idProduct);
+ di->udi_releaseNo = UGETW(udev->ddesc.bcdDevice);
+ di->udi_class = udev->ddesc.bDeviceClass;
+ di->udi_subclass = udev->ddesc.bDeviceSubClass;
+ di->udi_protocol = udev->ddesc.bDeviceProtocol;
+ di->udi_config_no = udev->curr_config_no;
+ di->udi_config_index = udev->curr_config_index;
+ di->udi_power = udev->flags.self_powered ? 0 : udev->power;
+ di->udi_speed = udev->speed;
+ di->udi_mode = udev->flags.usb_mode;
+ di->udi_power_mode = udev->power_mode;
+ di->udi_suspended = udev->flags.peer_suspended;
+
+ hub = udev->parent_hub;
+ if (hub) {
+ di->udi_hubaddr = hub->address;
+ di->udi_hubindex = hub->device_index;
+ di->udi_hubport = udev->port_no;
+ }
+ return (0);
+}
+
+/*------------------------------------------------------------------------*
+ * ugen_check_request
+ *
+ * Return values:
+ * 0: Access allowed
+ * Else: No access
+ *------------------------------------------------------------------------*/
+static int
+ugen_check_request(struct usb_device *udev, struct usb_device_request *req)
+{
+ struct usb_endpoint *ep;
+ int error;
+
+ /*
+ * Avoid requests that would damage the bus integrity:
+ */
+ if (((req->bmRequestType == UT_WRITE_DEVICE) &&
+ (req->bRequest == UR_SET_ADDRESS)) ||
+ ((req->bmRequestType == UT_WRITE_DEVICE) &&
+ (req->bRequest == UR_SET_CONFIG)) ||
+ ((req->bmRequestType == UT_WRITE_INTERFACE) &&
+ (req->bRequest == UR_SET_INTERFACE))) {
+ /*
+ * These requests can be useful for testing USB drivers.
+ */
+ error = priv_check(curthread, PRIV_DRIVER);
+ if (error) {
+ return (error);
+ }
+ }
+ /*
+ * Special case - handle clearing of stall
+ */
+ if (req->bmRequestType == UT_WRITE_ENDPOINT) {
+
+ ep = usbd_get_ep_by_addr(udev, req->wIndex[0]);
+ if (ep == NULL) {
+ return (EINVAL);
+ }
+ if ((req->bRequest == UR_CLEAR_FEATURE) &&
+ (UGETW(req->wValue) == UF_ENDPOINT_HALT)) {
+ usbd_clear_data_toggle(udev, ep);
+ }
+ }
+ /* TODO: add more checks to verify the interface index */
+
+ return (0);
+}
+
+int
+ugen_do_request(struct usb_fifo *f, struct usb_ctl_request *ur)
+{
+ int error;
+ uint16_t len;
+ uint16_t actlen;
+
+ if (ugen_check_request(f->udev, &ur->ucr_request)) {
+ return (EPERM);
+ }
+ len = UGETW(ur->ucr_request.wLength);
+
+ /* check if "ucr_data" is valid */
+ if (len != 0) {
+ if (ur->ucr_data == NULL) {
+ return (EFAULT);
+ }
+ }
+ /* do the USB request */
+ error = usbd_do_request_flags
+ (f->udev, NULL, &ur->ucr_request, ur->ucr_data,
+ (ur->ucr_flags & USB_SHORT_XFER_OK) |
+ USB_USER_DATA_PTR, &actlen,
+ USB_DEFAULT_TIMEOUT);
+
+ ur->ucr_actlen = actlen;
+
+ if (error) {
+ error = EIO;
+ }
+ return (error);
+}
+
+/*------------------------------------------------------------------------
+ * ugen_re_enumerate
+ *------------------------------------------------------------------------*/
+static int
+ugen_re_enumerate(struct usb_fifo *f)
+{
+ struct usb_device *udev = f->udev;
+ int error;
+
+ /*
+ * This request can be useful for testing USB drivers:
+ */
+ error = priv_check(curthread, PRIV_DRIVER);
+ if (error) {
+ return (error);
+ }
+ if (udev->flags.usb_mode != USB_MODE_HOST) {
+ /* not possible in device side mode */
+ return (ENOTTY);
+ }
+ /* make sure all FIFO's are gone */
+ /* else there can be a deadlock */
+ if (ugen_fs_uninit(f)) {
+ /* ignore any errors */
+ DPRINTFN(6, "no FIFOs\n");
+ }
+ if (udev->re_enumerate_wait == 0) {
+ udev->re_enumerate_wait = 1;
+ usb_needs_explore(udev->bus, 0);
+ }
+ return (0);
+}
+
+int
+ugen_fs_uninit(struct usb_fifo *f)
+{
+ if (f->fs_xfer == NULL) {
+ return (EINVAL);
+ }
+ usbd_transfer_unsetup(f->fs_xfer, f->fs_ep_max);
+ free(f->fs_xfer, M_USB);
+ f->fs_xfer = NULL;
+ f->fs_ep_max = 0;
+ f->fs_ep_ptr = NULL;
+ f->flag_iscomplete = 0;
+ usb_fifo_free_buffer(f);
+ return (0);
+}
+
+static uint8_t
+ugen_fs_get_complete(struct usb_fifo *f, uint8_t *pindex)
+{
+ struct usb_mbuf *m;
+
+ USB_IF_DEQUEUE(&f->used_q, m);
+
+ if (m) {
+ *pindex = *((uint8_t *)(m->cur_data_ptr));
+
+ USB_IF_ENQUEUE(&f->free_q, m);
+
+ return (0); /* success */
+ } else {
+
+ *pindex = 0; /* fix compiler warning */
+
+ f->flag_iscomplete = 0;
+ }
+ return (1); /* failure */
+}
+
+static void
+ugen_fs_set_complete(struct usb_fifo *f, uint8_t index)
+{
+ struct usb_mbuf *m;
+
+ USB_IF_DEQUEUE(&f->free_q, m);
+
+ if (m == NULL) {
+ /* can happen during close */
+ DPRINTF("out of buffers\n");
+ return;
+ }
+ USB_MBUF_RESET(m);
+
+ *((uint8_t *)(m->cur_data_ptr)) = index;
+
+ USB_IF_ENQUEUE(&f->used_q, m);
+
+ f->flag_iscomplete = 1;
+
+ usb_fifo_wakeup(f);
+}
+
+static int
+ugen_fs_copy_in(struct usb_fifo *f, uint8_t ep_index)
+{
+ struct usb_device_request *req;
+ struct usb_xfer *xfer;
+ struct usb_fs_endpoint fs_ep;
+ void *uaddr; /* userland pointer */
+ void *kaddr;
+ usb_frlength_t offset;
+ usb_frlength_t rem;
+ usb_frcount_t n;
+ uint32_t length;
+ int error;
+ uint8_t isread;
+
+ if (ep_index >= f->fs_ep_max) {
+ return (EINVAL);
+ }
+ xfer = f->fs_xfer[ep_index];
+ if (xfer == NULL) {
+ return (EINVAL);
+ }
+ mtx_lock(f->priv_mtx);
+ if (usbd_transfer_pending(xfer)) {
+ mtx_unlock(f->priv_mtx);
+ return (EBUSY); /* should not happen */
+ }
+ mtx_unlock(f->priv_mtx);
+
+ error = copyin(f->fs_ep_ptr +
+ ep_index, &fs_ep, sizeof(fs_ep));
+ if (error) {
+ return (error);
+ }
+ /* security checks */
+
+ if (fs_ep.nFrames > xfer->max_frame_count) {
+ xfer->error = USB_ERR_INVAL;
+ goto complete;
+ }
+ if (fs_ep.nFrames == 0) {
+ xfer->error = USB_ERR_INVAL;
+ goto complete;
+ }
+ error = copyin(fs_ep.ppBuffer,
+ &uaddr, sizeof(uaddr));
+ if (error) {
+ return (error);
+ }
+ /* reset first frame */
+ usbd_xfer_set_frame_offset(xfer, 0, 0);
+
+ if (xfer->flags_int.control_xfr) {
+
+ req = xfer->frbuffers[0].buffer;
+
+ error = copyin(fs_ep.pLength,
+ &length, sizeof(length));
+ if (error) {
+ return (error);
+ }
+ if (length != sizeof(*req)) {
+ xfer->error = USB_ERR_INVAL;
+ goto complete;
+ }
+ if (length != 0) {
+ error = copyin(uaddr, req, length);
+ if (error) {
+ return (error);
+ }
+ }
+ if (ugen_check_request(f->udev, req)) {
+ xfer->error = USB_ERR_INVAL;
+ goto complete;
+ }
+ usbd_xfer_set_frame_len(xfer, 0, length);
+
+ /* Host mode only ! */
+ if ((req->bmRequestType &
+ (UT_READ | UT_WRITE)) == UT_READ) {
+ isread = 1;
+ } else {
+ isread = 0;
+ }
+ n = 1;
+ offset = sizeof(*req);
+
+ } else {
+ /* Device and Host mode */
+ if (USB_GET_DATA_ISREAD(xfer)) {
+ isread = 1;
+ } else {
+ isread = 0;
+ }
+ n = 0;
+ offset = 0;
+ }
+
+ rem = usbd_xfer_max_len(xfer);
+ xfer->nframes = fs_ep.nFrames;
+ xfer->timeout = fs_ep.timeout;
+ if (xfer->timeout > 65535) {
+ xfer->timeout = 65535;
+ }
+ if (fs_ep.flags & USB_FS_FLAG_SINGLE_SHORT_OK)
+ xfer->flags.short_xfer_ok = 1;
+ else
+ xfer->flags.short_xfer_ok = 0;
+
+ if (fs_ep.flags & USB_FS_FLAG_MULTI_SHORT_OK)
+ xfer->flags.short_frames_ok = 1;
+ else
+ xfer->flags.short_frames_ok = 0;
+
+ if (fs_ep.flags & USB_FS_FLAG_FORCE_SHORT)
+ xfer->flags.force_short_xfer = 1;
+ else
+ xfer->flags.force_short_xfer = 0;
+
+ if (fs_ep.flags & USB_FS_FLAG_CLEAR_STALL)
+ usbd_xfer_set_stall(xfer);
+ else
+ xfer->flags.stall_pipe = 0;
+
+ for (; n != xfer->nframes; n++) {
+
+ error = copyin(fs_ep.pLength + n,
+ &length, sizeof(length));
+ if (error) {
+ break;
+ }
+ usbd_xfer_set_frame_len(xfer, n, length);
+
+ if (length > rem) {
+ xfer->error = USB_ERR_INVAL;
+ goto complete;
+ }
+ rem -= length;
+
+ if (!isread) {
+
+ /* we need to know the source buffer */
+ error = copyin(fs_ep.ppBuffer + n,
+ &uaddr, sizeof(uaddr));
+ if (error) {
+ break;
+ }
+ if (xfer->flags_int.isochronous_xfr) {
+ /* get kernel buffer address */
+ kaddr = xfer->frbuffers[0].buffer;
+ kaddr = USB_ADD_BYTES(kaddr, offset);
+ } else {
+ /* set current frame offset */
+ usbd_xfer_set_frame_offset(xfer, offset, n);
+
+ /* get kernel buffer address */
+ kaddr = xfer->frbuffers[n].buffer;
+ }
+
+ /* move data */
+ error = copyin(uaddr, kaddr, length);
+ if (error) {
+ break;
+ }
+ }
+ offset += length;
+ }
+ return (error);
+
+complete:
+ mtx_lock(f->priv_mtx);
+ ugen_fs_set_complete(f, ep_index);
+ mtx_unlock(f->priv_mtx);
+ return (0);
+}
+
+static int
+ugen_fs_copy_out(struct usb_fifo *f, uint8_t ep_index)
+{
+ struct usb_device_request *req;
+ struct usb_xfer *xfer;
+ struct usb_fs_endpoint fs_ep;
+ struct usb_fs_endpoint *fs_ep_uptr; /* userland ptr */
+ void *uaddr; /* userland ptr */
+ void *kaddr;
+ usb_frlength_t offset;
+ usb_frlength_t rem;
+ usb_frcount_t n;
+ uint32_t length;
+ uint32_t temp;
+ int error;
+ uint8_t isread;
+
+ if (ep_index >= f->fs_ep_max)
+ return (EINVAL);
+
+ xfer = f->fs_xfer[ep_index];
+ if (xfer == NULL)
+ return (EINVAL);
+
+ mtx_lock(f->priv_mtx);
+ if (usbd_transfer_pending(xfer)) {
+ mtx_unlock(f->priv_mtx);
+ return (EBUSY); /* should not happen */
+ }
+ mtx_unlock(f->priv_mtx);
+
+ fs_ep_uptr = f->fs_ep_ptr + ep_index;
+ error = copyin(fs_ep_uptr, &fs_ep, sizeof(fs_ep));
+ if (error) {
+ return (error);
+ }
+ fs_ep.status = xfer->error;
+ fs_ep.aFrames = xfer->aframes;
+ fs_ep.isoc_time_complete = xfer->isoc_time_complete;
+ if (xfer->error) {
+ goto complete;
+ }
+ if (xfer->flags_int.control_xfr) {
+ req = xfer->frbuffers[0].buffer;
+
+ /* Host mode only ! */
+ if ((req->bmRequestType & (UT_READ | UT_WRITE)) == UT_READ) {
+ isread = 1;
+ } else {
+ isread = 0;
+ }
+ if (xfer->nframes == 0)
+ n = 0; /* should never happen */
+ else
+ n = 1;
+ } else {
+ /* Device and Host mode */
+ if (USB_GET_DATA_ISREAD(xfer)) {
+ isread = 1;
+ } else {
+ isread = 0;
+ }
+ n = 0;
+ }
+
+ /* Update lengths and copy out data */
+
+ rem = usbd_xfer_max_len(xfer);
+ offset = 0;
+
+ for (; n != xfer->nframes; n++) {
+
+ /* get initial length into "temp" */
+ error = copyin(fs_ep.pLength + n,
+ &temp, sizeof(temp));
+ if (error) {
+ return (error);
+ }
+ if (temp > rem) {
+ /* the userland length has been corrupted */
+ DPRINTF("corrupt userland length "
+ "%u > %u\n", temp, rem);
+ fs_ep.status = USB_ERR_INVAL;
+ goto complete;
+ }
+ rem -= temp;
+
+ /* get actual transfer length */
+ length = xfer->frlengths[n];
+ if (length > temp) {
+ /* data overflow */
+ fs_ep.status = USB_ERR_INVAL;
+ DPRINTF("data overflow %u > %u\n",
+ length, temp);
+ goto complete;
+ }
+ if (isread) {
+
+ /* we need to know the destination buffer */
+ error = copyin(fs_ep.ppBuffer + n,
+ &uaddr, sizeof(uaddr));
+ if (error) {
+ return (error);
+ }
+ if (xfer->flags_int.isochronous_xfr) {
+ /* only one frame buffer */
+ kaddr = USB_ADD_BYTES(
+ xfer->frbuffers[0].buffer, offset);
+ } else {
+ /* multiple frame buffers */
+ kaddr = xfer->frbuffers[n].buffer;
+ }
+
+ /* move data */
+ error = copyout(kaddr, uaddr, length);
+ if (error) {
+ return (error);
+ }
+ }
+ /*
+ * Update offset according to initial length, which is
+ * needed by isochronous transfers!
+ */
+ offset += temp;
+
+ /* update length */
+ error = copyout(&length,
+ fs_ep.pLength + n, sizeof(length));
+ if (error) {
+ return (error);
+ }
+ }
+
+complete:
+ /* update "aFrames" */
+ error = copyout(&fs_ep.aFrames, &fs_ep_uptr->aFrames,
+ sizeof(fs_ep.aFrames));
+ if (error)
+ goto done;
+
+ /* update "isoc_time_complete" */
+ error = copyout(&fs_ep.isoc_time_complete,
+ &fs_ep_uptr->isoc_time_complete,
+ sizeof(fs_ep.isoc_time_complete));
+ if (error)
+ goto done;
+ /* update "status" */
+ error = copyout(&fs_ep.status, &fs_ep_uptr->status,
+ sizeof(fs_ep.status));
+done:
+ return (error);
+}
+
+static uint8_t
+ugen_fifo_in_use(struct usb_fifo *f, int fflags)
+{
+ struct usb_fifo *f_rx;
+ struct usb_fifo *f_tx;
+
+ f_rx = f->udev->fifo[(f->fifo_index & ~1) + USB_FIFO_RX];
+ f_tx = f->udev->fifo[(f->fifo_index & ~1) + USB_FIFO_TX];
+
+ if ((fflags & FREAD) && f_rx &&
+ (f_rx->xfer[0] || f_rx->xfer[1])) {
+ return (1); /* RX FIFO in use */
+ }
+ if ((fflags & FWRITE) && f_tx &&
+ (f_tx->xfer[0] || f_tx->xfer[1])) {
+ return (1); /* TX FIFO in use */
+ }
+ return (0); /* not in use */
+}
+
+static int
+ugen_ioctl(struct usb_fifo *f, u_long cmd, void *addr, int fflags)
+{
+ struct usb_config usb_config[1];
+ struct usb_device_request req;
+ union {
+ struct usb_fs_complete *pcomp;
+ struct usb_fs_start *pstart;
+ struct usb_fs_stop *pstop;
+ struct usb_fs_open *popen;
+ struct usb_fs_close *pclose;
+ struct usb_fs_clear_stall_sync *pstall;
+ void *addr;
+ } u;
+ struct usb_endpoint *ep;
+ struct usb_endpoint_descriptor *ed;
+ int error = 0;
+ uint8_t iface_index;
+ uint8_t isread;
+ uint8_t ep_index;
+
+ u.addr = addr;
+
+ DPRINTFN(6, "cmd=0x%08lx\n", cmd);
+
+ switch (cmd) {
+ case USB_FS_COMPLETE:
+ mtx_lock(f->priv_mtx);
+ error = ugen_fs_get_complete(f, &ep_index);
+ mtx_unlock(f->priv_mtx);
+
+ if (error) {
+ error = EBUSY;
+ break;
+ }
+ u.pcomp->ep_index = ep_index;
+ error = ugen_fs_copy_out(f, u.pcomp->ep_index);
+ break;
+
+ case USB_FS_START:
+ error = ugen_fs_copy_in(f, u.pstart->ep_index);
+ if (error) {
+ break;
+ }
+ mtx_lock(f->priv_mtx);
+ usbd_transfer_start(f->fs_xfer[u.pstart->ep_index]);
+ mtx_unlock(f->priv_mtx);
+ break;
+
+ case USB_FS_STOP:
+ if (u.pstop->ep_index >= f->fs_ep_max) {
+ error = EINVAL;
+ break;
+ }
+ mtx_lock(f->priv_mtx);
+ usbd_transfer_stop(f->fs_xfer[u.pstop->ep_index]);
+ mtx_unlock(f->priv_mtx);
+ break;
+
+ case USB_FS_OPEN:
+ if (u.popen->ep_index >= f->fs_ep_max) {
+ error = EINVAL;
+ break;
+ }
+ if (f->fs_xfer[u.popen->ep_index] != NULL) {
+ error = EBUSY;
+ break;
+ }
+ if (u.popen->max_bufsize > USB_FS_MAX_BUFSIZE) {
+ u.popen->max_bufsize = USB_FS_MAX_BUFSIZE;
+ }
+ if (u.popen->max_frames > USB_FS_MAX_FRAMES) {
+ u.popen->max_frames = USB_FS_MAX_FRAMES;
+ break;
+ }
+ if (u.popen->max_frames == 0) {
+ error = EINVAL;
+ break;
+ }
+ ep = usbd_get_ep_by_addr(f->udev, u.popen->ep_no);
+ if (ep == NULL) {
+ error = EINVAL;
+ break;
+ }
+ ed = ep->edesc;
+ if (ed == NULL) {
+ error = ENXIO;
+ break;
+ }
+ iface_index = ep->iface_index;
+
+ bzero(usb_config, sizeof(usb_config));
+
+ usb_config[0].type = ed->bmAttributes & UE_XFERTYPE;
+ usb_config[0].endpoint = ed->bEndpointAddress & UE_ADDR;
+ usb_config[0].direction = ed->bEndpointAddress & (UE_DIR_OUT | UE_DIR_IN);
+ usb_config[0].interval = USB_DEFAULT_INTERVAL;
+ usb_config[0].flags.proxy_buffer = 1;
+ usb_config[0].callback = &ugen_ctrl_fs_callback;
+ usb_config[0].timeout = 0; /* no timeout */
+ usb_config[0].frames = u.popen->max_frames;
+ usb_config[0].bufsize = u.popen->max_bufsize;
+ usb_config[0].usb_mode = USB_MODE_DUAL; /* both modes */
+
+ if (usb_config[0].type == UE_CONTROL) {
+ if (f->udev->flags.usb_mode != USB_MODE_HOST) {
+ error = EINVAL;
+ break;
+ }
+ } else {
+
+ isread = ((usb_config[0].endpoint &
+ (UE_DIR_IN | UE_DIR_OUT)) == UE_DIR_IN);
+
+ if (f->udev->flags.usb_mode != USB_MODE_HOST) {
+ isread = !isread;
+ }
+ /* check permissions */
+ if (isread) {
+ if (!(fflags & FREAD)) {
+ error = EPERM;
+ break;
+ }
+ } else {
+ if (!(fflags & FWRITE)) {
+ error = EPERM;
+ break;
+ }
+ }
+ }
+ error = usbd_transfer_setup(f->udev, &iface_index,
+ f->fs_xfer + u.popen->ep_index, usb_config, 1,
+ f, f->priv_mtx);
+ if (error == 0) {
+ /* update maximums */
+ u.popen->max_packet_length =
+ f->fs_xfer[u.popen->ep_index]->max_frame_size;
+ u.popen->max_bufsize =
+ f->fs_xfer[u.popen->ep_index]->max_data_length;
+ f->fs_xfer[u.popen->ep_index]->priv_fifo =
+ ((uint8_t *)0) + u.popen->ep_index;
+ } else {
+ error = ENOMEM;
+ }
+ break;
+
+ case USB_FS_CLOSE:
+ if (u.pclose->ep_index >= f->fs_ep_max) {
+ error = EINVAL;
+ break;
+ }
+ if (f->fs_xfer[u.pclose->ep_index] == NULL) {
+ error = EINVAL;
+ break;
+ }
+ usbd_transfer_unsetup(f->fs_xfer + u.pclose->ep_index, 1);
+ break;
+
+ case USB_FS_CLEAR_STALL_SYNC:
+ if (u.pstall->ep_index >= f->fs_ep_max) {
+ error = EINVAL;
+ break;
+ }
+ if (f->fs_xfer[u.pstall->ep_index] == NULL) {
+ error = EINVAL;
+ break;
+ }
+ if (f->udev->flags.usb_mode != USB_MODE_HOST) {
+ error = EINVAL;
+ break;
+ }
+ mtx_lock(f->priv_mtx);
+ error = usbd_transfer_pending(f->fs_xfer[u.pstall->ep_index]);
+ mtx_unlock(f->priv_mtx);
+
+ if (error) {
+ return (EBUSY);
+ }
+ ep = f->fs_xfer[u.pstall->ep_index]->endpoint;
+
+ /* setup a clear-stall packet */
+ req.bmRequestType = UT_WRITE_ENDPOINT;
+ req.bRequest = UR_CLEAR_FEATURE;
+ USETW(req.wValue, UF_ENDPOINT_HALT);
+ req.wIndex[0] = ep->edesc->bEndpointAddress;
+ req.wIndex[1] = 0;
+ USETW(req.wLength, 0);
+
+ error = usbd_do_request(f->udev, NULL, &req, NULL);
+ if (error == 0) {
+ usbd_clear_data_toggle(f->udev, ep);
+ } else {
+ error = ENXIO;
+ }
+ break;
+
+ default:
+ error = ENOIOCTL;
+ break;
+ }
+
+ DPRINTFN(6, "error=%d\n", error);
+
+ return (error);
+}
+
+static int
+ugen_set_short_xfer(struct usb_fifo *f, void *addr)
+{
+ uint8_t t;
+
+ if (*(int *)addr)
+ t = 1;
+ else
+ t = 0;
+
+ if (f->flag_short == t) {
+ /* same value like before - accept */
+ return (0);
+ }
+ if (f->xfer[0] || f->xfer[1]) {
+ /* cannot change this during transfer */
+ return (EBUSY);
+ }
+ f->flag_short = t;
+ return (0);
+}
+
+static int
+ugen_set_timeout(struct usb_fifo *f, void *addr)
+{
+ f->timeout = *(int *)addr;
+ if (f->timeout > 65535) {
+ /* limit user input */
+ f->timeout = 65535;
+ }
+ return (0);
+}
+
+static int
+ugen_get_frame_size(struct usb_fifo *f, void *addr)
+{
+ if (f->xfer[0]) {
+ *(int *)addr = f->xfer[0]->max_frame_size;
+ } else {
+ return (EINVAL);
+ }
+ return (0);
+}
+
+static int
+ugen_set_buffer_size(struct usb_fifo *f, void *addr)
+{
+ usb_frlength_t t;
+
+ if (*(int *)addr < 0)
+ t = 0; /* use "wMaxPacketSize" */
+ else if (*(int *)addr < (256 * 1024))
+ t = *(int *)addr;
+ else
+ t = 256 * 1024;
+
+ if (f->bufsize == t) {
+ /* same value like before - accept */
+ return (0);
+ }
+ if (f->xfer[0] || f->xfer[1]) {
+ /* cannot change this during transfer */
+ return (EBUSY);
+ }
+ f->bufsize = t;
+ return (0);
+}
+
+static int
+ugen_get_buffer_size(struct usb_fifo *f, void *addr)
+{
+ *(int *)addr = f->bufsize;
+ return (0);
+}
+
+static int
+ugen_get_iface_desc(struct usb_fifo *f,
+ struct usb_interface_descriptor *idesc)
+{
+ struct usb_interface *iface;
+
+ iface = usbd_get_iface(f->udev, f->iface_index);
+ if (iface && iface->idesc) {
+ *idesc = *(iface->idesc);
+ } else {
+ return (EIO);
+ }
+ return (0);
+}
+
+static int
+ugen_get_endpoint_desc(struct usb_fifo *f,
+ struct usb_endpoint_descriptor *ed)
+{
+ struct usb_endpoint *ep;
+
+ ep = usb_fifo_softc(f);
+
+ if (ep && ep->edesc) {
+ *ed = *ep->edesc;
+ } else {
+ return (EINVAL);
+ }
+ return (0);
+}
+
+static int
+ugen_set_power_mode(struct usb_fifo *f, int mode)
+{
+ struct usb_device *udev = f->udev;
+ int err;
+ uint8_t old_mode;
+
+ if ((udev == NULL) ||
+ (udev->parent_hub == NULL)) {
+ return (EINVAL);
+ }
+ err = priv_check(curthread, PRIV_DRIVER);
+ if (err)
+ return (err);
+
+ /* get old power mode */
+ old_mode = udev->power_mode;
+
+ /* if no change, then just return */
+ if (old_mode == mode)
+ return (0);
+
+ switch (mode) {
+ case USB_POWER_MODE_OFF:
+ /* get the device unconfigured */
+ err = ugen_set_config(f, USB_UNCONFIG_INDEX);
+ if (err) {
+ DPRINTFN(0, "Could not unconfigure "
+ "device (ignored)\n");
+ }
+
+ /* clear port enable */
+ err = usbd_req_clear_port_feature(udev->parent_hub,
+ NULL, udev->port_no, UHF_PORT_ENABLE);
+ break;
+
+ case USB_POWER_MODE_ON:
+ case USB_POWER_MODE_SAVE:
+ break;
+
+ case USB_POWER_MODE_RESUME:
+#if USB_HAVE_POWERD
+ /* let USB-powerd handle resume */
+ USB_BUS_LOCK(udev->bus);
+ udev->pwr_save.write_refs++;
+ udev->pwr_save.last_xfer_time = ticks;
+ USB_BUS_UNLOCK(udev->bus);
+
+ /* set new power mode */
+ usbd_set_power_mode(udev, USB_POWER_MODE_SAVE);
+
+ /* wait for resume to complete */
+ usb_pause_mtx(NULL, hz / 4);
+
+ /* clear write reference */
+ USB_BUS_LOCK(udev->bus);
+ udev->pwr_save.write_refs--;
+ USB_BUS_UNLOCK(udev->bus);
+#endif
+ mode = USB_POWER_MODE_SAVE;
+ break;
+
+ case USB_POWER_MODE_SUSPEND:
+#if USB_HAVE_POWERD
+ /* let USB-powerd handle suspend */
+ USB_BUS_LOCK(udev->bus);
+ udev->pwr_save.last_xfer_time = ticks - (256 * hz);
+ USB_BUS_UNLOCK(udev->bus);
+#endif
+ mode = USB_POWER_MODE_SAVE;
+ break;
+
+ default:
+ return (EINVAL);
+ }
+
+ if (err)
+ return (ENXIO); /* I/O failure */
+
+ /* if we are powered off we need to re-enumerate first */
+ if (old_mode == USB_POWER_MODE_OFF) {
+ if (udev->flags.usb_mode == USB_MODE_HOST) {
+ if (udev->re_enumerate_wait == 0)
+ udev->re_enumerate_wait = 1;
+ }
+ /* set power mode will wake up the explore thread */
+ }
+
+ /* set new power mode */
+ usbd_set_power_mode(udev, mode);
+
+ return (0); /* success */
+}
+
+static int
+ugen_get_power_mode(struct usb_fifo *f)
+{
+ struct usb_device *udev = f->udev;
+
+ if (udev == NULL)
+ return (USB_POWER_MODE_ON);
+
+ return (udev->power_mode);
+}
+
+static int
+ugen_do_port_feature(struct usb_fifo *f, uint8_t port_no,
+ uint8_t set, uint16_t feature)
+{
+ struct usb_device *udev = f->udev;
+ struct usb_hub *hub;
+ int err;
+
+ err = priv_check(curthread, PRIV_DRIVER);
+ if (err) {
+ return (err);
+ }
+ if (port_no == 0) {
+ return (EINVAL);
+ }
+ if ((udev == NULL) ||
+ (udev->hub == NULL)) {
+ return (EINVAL);
+ }
+ hub = udev->hub;
+
+ if (port_no > hub->nports) {
+ return (EINVAL);
+ }
+ if (set)
+ err = usbd_req_set_port_feature(udev,
+ NULL, port_no, feature);
+ else
+ err = usbd_req_clear_port_feature(udev,
+ NULL, port_no, feature);
+
+ if (err)
+ return (ENXIO); /* failure */
+
+ return (0); /* success */
+}
+
+static int
+ugen_iface_ioctl(struct usb_fifo *f, u_long cmd, void *addr, int fflags)
+{
+ struct usb_fifo *f_rx;
+ struct usb_fifo *f_tx;
+ int error = 0;
+
+ f_rx = f->udev->fifo[(f->fifo_index & ~1) + USB_FIFO_RX];
+ f_tx = f->udev->fifo[(f->fifo_index & ~1) + USB_FIFO_TX];
+
+ switch (cmd) {
+ case USB_SET_RX_SHORT_XFER:
+ if (fflags & FREAD) {
+ error = ugen_set_short_xfer(f_rx, addr);
+ } else {
+ error = EINVAL;
+ }
+ break;
+
+ case USB_SET_TX_FORCE_SHORT:
+ if (fflags & FWRITE) {
+ error = ugen_set_short_xfer(f_tx, addr);
+ } else {
+ error = EINVAL;
+ }
+ break;
+
+ case USB_SET_RX_TIMEOUT:
+ if (fflags & FREAD) {
+ error = ugen_set_timeout(f_rx, addr);
+ } else {
+ error = EINVAL;
+ }
+ break;
+
+ case USB_SET_TX_TIMEOUT:
+ if (fflags & FWRITE) {
+ error = ugen_set_timeout(f_tx, addr);
+ } else {
+ error = EINVAL;
+ }
+ break;
+
+ case USB_GET_RX_FRAME_SIZE:
+ if (fflags & FREAD) {
+ error = ugen_get_frame_size(f_rx, addr);
+ } else {
+ error = EINVAL;
+ }
+ break;
+
+ case USB_GET_TX_FRAME_SIZE:
+ if (fflags & FWRITE) {
+ error = ugen_get_frame_size(f_tx, addr);
+ } else {
+ error = EINVAL;
+ }
+ break;
+
+ case USB_SET_RX_BUFFER_SIZE:
+ if (fflags & FREAD) {
+ error = ugen_set_buffer_size(f_rx, addr);
+ } else {
+ error = EINVAL;
+ }
+ break;
+
+ case USB_SET_TX_BUFFER_SIZE:
+ if (fflags & FWRITE) {
+ error = ugen_set_buffer_size(f_tx, addr);
+ } else {
+ error = EINVAL;
+ }
+ break;
+
+ case USB_GET_RX_BUFFER_SIZE:
+ if (fflags & FREAD) {
+ error = ugen_get_buffer_size(f_rx, addr);
+ } else {
+ error = EINVAL;
+ }
+ break;
+
+ case USB_GET_TX_BUFFER_SIZE:
+ if (fflags & FWRITE) {
+ error = ugen_get_buffer_size(f_tx, addr);
+ } else {
+ error = EINVAL;
+ }
+ break;
+
+ case USB_GET_RX_INTERFACE_DESC:
+ if (fflags & FREAD) {
+ error = ugen_get_iface_desc(f_rx, addr);
+ } else {
+ error = EINVAL;
+ }
+ break;
+
+ case USB_GET_TX_INTERFACE_DESC:
+ if (fflags & FWRITE) {
+ error = ugen_get_iface_desc(f_tx, addr);
+ } else {
+ error = EINVAL;
+ }
+ break;
+
+ case USB_GET_RX_ENDPOINT_DESC:
+ if (fflags & FREAD) {
+ error = ugen_get_endpoint_desc(f_rx, addr);
+ } else {
+ error = EINVAL;
+ }
+ break;
+
+ case USB_GET_TX_ENDPOINT_DESC:
+ if (fflags & FWRITE) {
+ error = ugen_get_endpoint_desc(f_tx, addr);
+ } else {
+ error = EINVAL;
+ }
+ break;
+
+ case USB_SET_RX_STALL_FLAG:
+ if ((fflags & FREAD) && (*(int *)addr)) {
+ f_rx->flag_stall = 1;
+ }
+ break;
+
+ case USB_SET_TX_STALL_FLAG:
+ if ((fflags & FWRITE) && (*(int *)addr)) {
+ f_tx->flag_stall = 1;
+ }
+ break;
+
+ default:
+ error = ENOIOCTL;
+ break;
+ }
+ return (error);
+}
+
+static int
+ugen_ioctl_post(struct usb_fifo *f, u_long cmd, void *addr, int fflags)
+{
+ union {
+ struct usb_interface_descriptor *idesc;
+ struct usb_alt_interface *ai;
+ struct usb_device_descriptor *ddesc;
+ struct usb_config_descriptor *cdesc;
+ struct usb_device_stats *stat;
+ struct usb_fs_init *pinit;
+ struct usb_fs_uninit *puninit;
+ uint32_t *ptime;
+ void *addr;
+ int *pint;
+ } u;
+ struct usb_device_descriptor *dtemp;
+ struct usb_config_descriptor *ctemp;
+ struct usb_interface *iface;
+ int error = 0;
+ uint8_t n;
+
+ u.addr = addr;
+
+ DPRINTFN(6, "cmd=0x%08lx\n", cmd);
+
+ switch (cmd) {
+ case USB_DISCOVER:
+ usb_needs_explore_all();
+ break;
+
+ case USB_SETDEBUG:
+ if (!(fflags & FWRITE)) {
+ error = EPERM;
+ break;
+ }
+ usb_debug = *(int *)addr;
+ break;
+
+ case USB_GET_CONFIG:
+ *(int *)addr = f->udev->curr_config_index;
+ break;
+
+ case USB_SET_CONFIG:
+ if (!(fflags & FWRITE)) {
+ error = EPERM;
+ break;
+ }
+ error = ugen_set_config(f, *(int *)addr);
+ break;
+
+ case USB_GET_ALTINTERFACE:
+ iface = usbd_get_iface(f->udev,
+ u.ai->uai_interface_index);
+ if (iface && iface->idesc) {
+ u.ai->uai_alt_index = iface->alt_index;
+ } else {
+ error = EINVAL;
+ }
+ break;
+
+ case USB_SET_ALTINTERFACE:
+ if (!(fflags & FWRITE)) {
+ error = EPERM;
+ break;
+ }
+ error = ugen_set_interface(f,
+ u.ai->uai_interface_index, u.ai->uai_alt_index);
+ break;
+
+ case USB_GET_DEVICE_DESC:
+ dtemp = usbd_get_device_descriptor(f->udev);
+ if (!dtemp) {
+ error = EIO;
+ break;
+ }
+ *u.ddesc = *dtemp;
+ break;
+
+ case USB_GET_CONFIG_DESC:
+ ctemp = usbd_get_config_descriptor(f->udev);
+ if (!ctemp) {
+ error = EIO;
+ break;
+ }
+ *u.cdesc = *ctemp;
+ break;
+
+ case USB_GET_FULL_DESC:
+ error = ugen_get_cdesc(f, addr);
+ break;
+
+ case USB_GET_STRING_DESC:
+ error = ugen_get_sdesc(f, addr);
+ break;
+
+ case USB_GET_IFACE_DRIVER:
+ error = ugen_get_iface_driver(f, addr);
+ break;
+
+ case USB_REQUEST:
+ case USB_DO_REQUEST:
+ if (!(fflags & FWRITE)) {
+ error = EPERM;
+ break;
+ }
+ error = ugen_do_request(f, addr);
+ break;
+
+ case USB_DEVICEINFO:
+ case USB_GET_DEVICEINFO:
+ error = usb_gen_fill_deviceinfo(f, addr);
+ break;
+
+ case USB_DEVICESTATS:
+ for (n = 0; n != 4; n++) {
+
+ u.stat->uds_requests_fail[n] =
+ f->udev->bus->stats_err.uds_requests[n];
+
+ u.stat->uds_requests_ok[n] =
+ f->udev->bus->stats_ok.uds_requests[n];
+ }
+ break;
+
+ case USB_DEVICEENUMERATE:
+ error = ugen_re_enumerate(f);
+ break;
+
+ case USB_GET_PLUGTIME:
+ *u.ptime = f->udev->plugtime;
+ break;
+
+ case USB_CLAIM_INTERFACE:
+ case USB_RELEASE_INTERFACE:
+ /* TODO */
+ break;
+
+ case USB_IFACE_DRIVER_ACTIVE:
+
+ n = *u.pint & 0xFF;
+
+ iface = usbd_get_iface(f->udev, n);
+
+ if (iface && iface->subdev)
+ error = 0;
+ else
+ error = ENXIO;
+ break;
+
+ case USB_IFACE_DRIVER_DETACH:
+
+ error = priv_check(curthread, PRIV_DRIVER);
+
+ if (error)
+ break;
+
+ n = *u.pint & 0xFF;
+
+ if (n == USB_IFACE_INDEX_ANY) {
+ error = EINVAL;
+ break;
+ }
+
+ usb_detach_device(f->udev, n, 0);
+ break;
+
+ case USB_SET_POWER_MODE:
+ error = ugen_set_power_mode(f, *u.pint);
+ break;
+
+ case USB_GET_POWER_MODE:
+ *u.pint = ugen_get_power_mode(f);
+ break;
+
+ case USB_SET_PORT_ENABLE:
+ error = ugen_do_port_feature(f,
+ *u.pint, 1, UHF_PORT_ENABLE);
+ break;
+
+ case USB_SET_PORT_DISABLE:
+ error = ugen_do_port_feature(f,
+ *u.pint, 0, UHF_PORT_ENABLE);
+ break;
+
+ case USB_FS_INIT:
+ /* verify input parameters */
+ if (u.pinit->pEndpoints == NULL) {
+ error = EINVAL;
+ break;
+ }
+ if (u.pinit->ep_index_max > 127) {
+ error = EINVAL;
+ break;
+ }
+ if (u.pinit->ep_index_max == 0) {
+ error = EINVAL;
+ break;
+ }
+ if (f->fs_xfer != NULL) {
+ error = EBUSY;
+ break;
+ }
+ if (f->dev_ep_index != 0) {
+ error = EINVAL;
+ break;
+ }
+ if (ugen_fifo_in_use(f, fflags)) {
+ error = EBUSY;
+ break;
+ }
+ error = usb_fifo_alloc_buffer(f, 1, u.pinit->ep_index_max);
+ if (error) {
+ break;
+ }
+ f->fs_xfer = malloc(sizeof(f->fs_xfer[0]) *
+ u.pinit->ep_index_max, M_USB, M_WAITOK | M_ZERO);
+ if (f->fs_xfer == NULL) {
+ usb_fifo_free_buffer(f);
+ error = ENOMEM;
+ break;
+ }
+ f->fs_ep_max = u.pinit->ep_index_max;
+ f->fs_ep_ptr = u.pinit->pEndpoints;
+ break;
+
+ case USB_FS_UNINIT:
+ if (u.puninit->dummy != 0) {
+ error = EINVAL;
+ break;
+ }
+ error = ugen_fs_uninit(f);
+ break;
+
+ default:
+ mtx_lock(f->priv_mtx);
+ error = ugen_iface_ioctl(f, cmd, addr, fflags);
+ mtx_unlock(f->priv_mtx);
+ break;
+ }
+ DPRINTFN(6, "error=%d\n", error);
+ return (error);
+}
+
+static void
+ugen_ctrl_fs_callback(struct usb_xfer *xfer, usb_error_t error)
+{
+ ; /* workaround for a bug in "indent" */
+
+ DPRINTF("st=%u alen=%u aframes=%u\n",
+ USB_GET_STATE(xfer), xfer->actlen, xfer->aframes);
+
+ switch (USB_GET_STATE(xfer)) {
+ case USB_ST_SETUP:
+ usbd_transfer_submit(xfer);
+ break;
+ default:
+ ugen_fs_set_complete(xfer->priv_sc, USB_P2U(xfer->priv_fifo));
+ break;
+ }
+}
+#endif /* USB_HAVE_UGEN */
diff --git a/rtems/freebsd/dev/usb/usb_generic.h b/rtems/freebsd/dev/usb/usb_generic.h
new file mode 100644
index 00000000..17506001
--- /dev/null
+++ b/rtems/freebsd/dev/usb/usb_generic.h
@@ -0,0 +1,33 @@
+/* $FreeBSD$ */
+/*-
+ * Copyright (c) 2008 Hans Petter Selasky. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifndef _USB_GENERIC_HH_
+#define _USB_GENERIC_HH_
+
+extern struct usb_fifo_methods usb_ugen_methods;
+int ugen_do_request(struct usb_fifo *f, struct usb_ctl_request *ur);
+
+#endif /* _USB_GENERIC_HH_ */
diff --git a/rtems/freebsd/dev/usb/usb_handle_request.c b/rtems/freebsd/dev/usb/usb_handle_request.c
new file mode 100644
index 00000000..b2da48df
--- /dev/null
+++ b/rtems/freebsd/dev/usb/usb_handle_request.c
@@ -0,0 +1,807 @@
+#include <rtems/freebsd/machine/rtems-bsd-config.h>
+
+/* $FreeBSD$ */
+/*-
+ * Copyright (c) 2008 Hans Petter Selasky. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <rtems/freebsd/sys/stdint.h>
+#include <rtems/freebsd/sys/stddef.h>
+#include <rtems/freebsd/sys/param.h>
+#include <rtems/freebsd/sys/queue.h>
+#include <rtems/freebsd/sys/types.h>
+#include <rtems/freebsd/sys/systm.h>
+#include <rtems/freebsd/sys/kernel.h>
+#include <rtems/freebsd/sys/bus.h>
+#include <rtems/freebsd/sys/linker_set.h>
+#include <rtems/freebsd/sys/module.h>
+#include <rtems/freebsd/sys/lock.h>
+#include <rtems/freebsd/sys/mutex.h>
+#include <rtems/freebsd/sys/condvar.h>
+#include <rtems/freebsd/sys/sysctl.h>
+#include <rtems/freebsd/sys/sx.h>
+#include <rtems/freebsd/sys/unistd.h>
+#include <rtems/freebsd/sys/callout.h>
+#include <rtems/freebsd/sys/malloc.h>
+#include <rtems/freebsd/sys/priv.h>
+
+#include <rtems/freebsd/dev/usb/usb.h>
+#include <rtems/freebsd/dev/usb/usbdi.h>
+#include <rtems/freebsd/dev/usb/usbdi_util.h>
+#include <rtems/freebsd/local/usb_if.h>
+
+#define USB_DEBUG_VAR usb_debug
+
+#include <rtems/freebsd/dev/usb/usb_core.h>
+#include <rtems/freebsd/dev/usb/usb_process.h>
+#include <rtems/freebsd/dev/usb/usb_busdma.h>
+#include <rtems/freebsd/dev/usb/usb_transfer.h>
+#include <rtems/freebsd/dev/usb/usb_device.h>
+#include <rtems/freebsd/dev/usb/usb_debug.h>
+#include <rtems/freebsd/dev/usb/usb_dynamic.h>
+#include <rtems/freebsd/dev/usb/usb_hub.h>
+
+#include <rtems/freebsd/dev/usb/usb_controller.h>
+#include <rtems/freebsd/dev/usb/usb_bus.h>
+
+/* function prototypes */
+
+static uint8_t usb_handle_get_stall(struct usb_device *, uint8_t);
+static usb_error_t usb_handle_remote_wakeup(struct usb_xfer *, uint8_t);
+static usb_error_t usb_handle_request(struct usb_xfer *);
+static usb_error_t usb_handle_set_config(struct usb_xfer *, uint8_t);
+static usb_error_t usb_handle_set_stall(struct usb_xfer *, uint8_t,
+ uint8_t);
+static usb_error_t usb_handle_iface_request(struct usb_xfer *, void **,
+ uint16_t *, struct usb_device_request, uint16_t,
+ uint8_t);
+
+/*------------------------------------------------------------------------*
+ * usb_handle_request_callback
+ *
+ * This function is the USB callback for generic USB Device control
+ * transfers.
+ *------------------------------------------------------------------------*/
+void
+usb_handle_request_callback(struct usb_xfer *xfer, usb_error_t error)
+{
+ usb_error_t err;
+
+ /* check the current transfer state */
+
+ switch (USB_GET_STATE(xfer)) {
+ case USB_ST_SETUP:
+ case USB_ST_TRANSFERRED:
+
+ /* handle the request */
+ err = usb_handle_request(xfer);
+
+ if (err) {
+
+ if (err == USB_ERR_BAD_CONTEXT) {
+ /* we need to re-setup the control transfer */
+ usb_needs_explore(xfer->xroot->bus, 0);
+ break;
+ }
+ goto tr_restart;
+ }
+ usbd_transfer_submit(xfer);
+ break;
+
+ default:
+ /* check if a control transfer is active */
+ if (xfer->flags_int.control_rem != 0xFFFF) {
+ /* handle the request */
+ err = usb_handle_request(xfer);
+ }
+ if (xfer->error != USB_ERR_CANCELLED) {
+ /* should not happen - try stalling */
+ goto tr_restart;
+ }
+ break;
+ }
+ return;
+
+tr_restart:
+ /*
+ * If a control transfer is active, stall it, and wait for the
+ * next control transfer.
+ */
+ usbd_xfer_set_frame_len(xfer, 0, sizeof(struct usb_device_request));
+ xfer->nframes = 1;
+ xfer->flags.manual_status = 1;
+ xfer->flags.force_short_xfer = 0;
+ usbd_xfer_set_stall(xfer); /* cancel previous transfer, if any */
+ usbd_transfer_submit(xfer);
+}
+
+/*------------------------------------------------------------------------*
+ * usb_handle_set_config
+ *
+ * Returns:
+ * 0: Success
+ * Else: Failure
+ *------------------------------------------------------------------------*/
+static usb_error_t
+usb_handle_set_config(struct usb_xfer *xfer, uint8_t conf_no)
+{
+ struct usb_device *udev = xfer->xroot->udev;
+ usb_error_t err = 0;
+
+ /*
+ * We need to protect against other threads doing probe and
+ * attach:
+ */
+ USB_XFER_UNLOCK(xfer);
+
+ usbd_enum_lock(udev);
+
+ if (conf_no == USB_UNCONFIG_NO) {
+ conf_no = USB_UNCONFIG_INDEX;
+ } else {
+ /*
+ * The relationship between config number and config index
+ * is very simple in our case:
+ */
+ conf_no--;
+ }
+
+ if (usbd_set_config_index(udev, conf_no)) {
+ DPRINTF("set config %d failed\n", conf_no);
+ err = USB_ERR_STALLED;
+ goto done;
+ }
+ if (usb_probe_and_attach(udev, USB_IFACE_INDEX_ANY)) {
+ DPRINTF("probe and attach failed\n");
+ err = USB_ERR_STALLED;
+ goto done;
+ }
+done:
+ usbd_enum_unlock(udev);
+ USB_XFER_LOCK(xfer);
+ return (err);
+}
+
+static usb_error_t
+usb_check_alt_setting(struct usb_device *udev,
+ struct usb_interface *iface, uint8_t alt_index)
+{
+ uint8_t do_unlock;
+ usb_error_t err = 0;
+
+ /* automatic locking */
+ if (usbd_enum_is_locked(udev)) {
+ do_unlock = 0;
+ } else {
+ do_unlock = 1;
+ usbd_enum_lock(udev);
+ }
+
+ if (alt_index >= usbd_get_no_alts(udev->cdesc, iface->idesc))
+ err = USB_ERR_INVAL;
+
+ if (do_unlock)
+ usbd_enum_unlock(udev);
+
+ return (err);
+}
+
+/*------------------------------------------------------------------------*
+ * usb_handle_iface_request
+ *
+ * Returns:
+ * 0: Success
+ * Else: Failure
+ *------------------------------------------------------------------------*/
+static usb_error_t
+usb_handle_iface_request(struct usb_xfer *xfer,
+ void **ppdata, uint16_t *plen,
+ struct usb_device_request req, uint16_t off, uint8_t state)
+{
+ struct usb_interface *iface;
+ struct usb_interface *iface_parent; /* parent interface */
+ struct usb_device *udev = xfer->xroot->udev;
+ int error;
+ uint8_t iface_index;
+ uint8_t temp_state;
+
+ if ((req.bmRequestType & 0x1F) == UT_INTERFACE) {
+ iface_index = req.wIndex[0]; /* unicast */
+ } else {
+ iface_index = 0; /* broadcast */
+ }
+
+ /*
+ * We need to protect against other threads doing probe and
+ * attach:
+ */
+ USB_XFER_UNLOCK(xfer);
+
+ usbd_enum_lock(udev);
+
+ error = ENXIO;
+
+tr_repeat:
+ iface = usbd_get_iface(udev, iface_index);
+ if ((iface == NULL) ||
+ (iface->idesc == NULL)) {
+ /* end of interfaces non-existing interface */
+ goto tr_stalled;
+ }
+ /* set initial state */
+
+ temp_state = state;
+
+ /* forward request to interface, if any */
+
+ if ((error != 0) &&
+ (error != ENOTTY) &&
+ (iface->subdev != NULL) &&
+ device_is_attached(iface->subdev)) {
+#if 0
+ DEVMETHOD(usb_handle_request, NULL); /* dummy */
+#endif
+ error = USB_HANDLE_REQUEST(iface->subdev,
+ &req, ppdata, plen,
+ off, &temp_state);
+ }
+ iface_parent = usbd_get_iface(udev, iface->parent_iface_index);
+
+ if ((iface_parent == NULL) ||
+ (iface_parent->idesc == NULL)) {
+ /* non-existing interface */
+ iface_parent = NULL;
+ }
+ /* forward request to parent interface, if any */
+
+ if ((error != 0) &&
+ (error != ENOTTY) &&
+ (iface_parent != NULL) &&
+ (iface_parent->subdev != NULL) &&
+ ((req.bmRequestType & 0x1F) == UT_INTERFACE) &&
+ (iface_parent->subdev != iface->subdev) &&
+ device_is_attached(iface_parent->subdev)) {
+ error = USB_HANDLE_REQUEST(iface_parent->subdev,
+ &req, ppdata, plen, off, &temp_state);
+ }
+ if (error == 0) {
+ /* negativly adjust pointer and length */
+ *ppdata = ((uint8_t *)(*ppdata)) - off;
+ *plen += off;
+
+ if ((state == USB_HR_NOT_COMPLETE) &&
+ (temp_state == USB_HR_COMPLETE_OK))
+ goto tr_short;
+ else
+ goto tr_valid;
+ } else if (error == ENOTTY) {
+ goto tr_stalled;
+ }
+ if ((req.bmRequestType & 0x1F) != UT_INTERFACE) {
+ iface_index++; /* iterate */
+ goto tr_repeat;
+ }
+ if (state != USB_HR_NOT_COMPLETE) {
+ /* we are complete */
+ goto tr_valid;
+ }
+ switch (req.bmRequestType) {
+ case UT_WRITE_INTERFACE:
+ switch (req.bRequest) {
+ case UR_SET_INTERFACE:
+ /*
+ * We assume that the endpoints are the same
+ * accross the alternate settings.
+ *
+ * Reset the endpoints, because re-attaching
+ * only a part of the device is not possible.
+ */
+ error = usb_check_alt_setting(udev,
+ iface, req.wValue[0]);
+ if (error) {
+ DPRINTF("alt setting does not exist %s\n",
+ usbd_errstr(error));
+ goto tr_stalled;
+ }
+ error = usb_reset_iface_endpoints(udev, iface_index);
+ if (error) {
+ DPRINTF("alt setting failed %s\n",
+ usbd_errstr(error));
+ goto tr_stalled;
+ }
+ /* update the current alternate setting */
+ iface->alt_index = req.wValue[0];
+ break;
+
+ default:
+ goto tr_stalled;
+ }
+ break;
+
+ case UT_READ_INTERFACE:
+ switch (req.bRequest) {
+ case UR_GET_INTERFACE:
+ *ppdata = &iface->alt_index;
+ *plen = 1;
+ break;
+
+ default:
+ goto tr_stalled;
+ }
+ break;
+ default:
+ goto tr_stalled;
+ }
+tr_valid:
+ usbd_enum_unlock(udev);
+ USB_XFER_LOCK(xfer);
+ return (0);
+
+tr_short:
+ usbd_enum_unlock(udev);
+ USB_XFER_LOCK(xfer);
+ return (USB_ERR_SHORT_XFER);
+
+tr_stalled:
+ usbd_enum_unlock(udev);
+ USB_XFER_LOCK(xfer);
+ return (USB_ERR_STALLED);
+}
+
+/*------------------------------------------------------------------------*
+ * usb_handle_stall
+ *
+ * Returns:
+ * 0: Success
+ * Else: Failure
+ *------------------------------------------------------------------------*/
+static usb_error_t
+usb_handle_set_stall(struct usb_xfer *xfer, uint8_t ep, uint8_t do_stall)
+{
+ struct usb_device *udev = xfer->xroot->udev;
+ usb_error_t err;
+
+ USB_XFER_UNLOCK(xfer);
+ err = usbd_set_endpoint_stall(udev,
+ usbd_get_ep_by_addr(udev, ep), do_stall);
+ USB_XFER_LOCK(xfer);
+ return (err);
+}
+
+/*------------------------------------------------------------------------*
+ * usb_handle_get_stall
+ *
+ * Returns:
+ * 0: Success
+ * Else: Failure
+ *------------------------------------------------------------------------*/
+static uint8_t
+usb_handle_get_stall(struct usb_device *udev, uint8_t ea_val)
+{
+ struct usb_endpoint *ep;
+ uint8_t halted;
+
+ ep = usbd_get_ep_by_addr(udev, ea_val);
+ if (ep == NULL) {
+ /* nothing to do */
+ return (0);
+ }
+ USB_BUS_LOCK(udev->bus);
+ halted = ep->is_stalled;
+ USB_BUS_UNLOCK(udev->bus);
+
+ return (halted);
+}
+
+/*------------------------------------------------------------------------*
+ * usb_handle_remote_wakeup
+ *
+ * Returns:
+ * 0: Success
+ * Else: Failure
+ *------------------------------------------------------------------------*/
+static usb_error_t
+usb_handle_remote_wakeup(struct usb_xfer *xfer, uint8_t is_on)
+{
+ struct usb_device *udev;
+ struct usb_bus *bus;
+
+ udev = xfer->xroot->udev;
+ bus = udev->bus;
+
+ USB_BUS_LOCK(bus);
+
+ if (is_on) {
+ udev->flags.remote_wakeup = 1;
+ } else {
+ udev->flags.remote_wakeup = 0;
+ }
+
+ USB_BUS_UNLOCK(bus);
+
+ /* In case we are out of sync, update the power state. */
+ usb_bus_power_update(udev->bus);
+ return (0); /* success */
+}
+
+/*------------------------------------------------------------------------*
+ * usb_handle_request
+ *
+ * Internal state sequence:
+ *
+ * USB_HR_NOT_COMPLETE -> USB_HR_COMPLETE_OK v USB_HR_COMPLETE_ERR
+ *
+ * Returns:
+ * 0: Ready to start hardware
+ * Else: Stall current transfer, if any
+ *------------------------------------------------------------------------*/
+static usb_error_t
+usb_handle_request(struct usb_xfer *xfer)
+{
+ struct usb_device_request req;
+ struct usb_device *udev;
+ const void *src_zcopy; /* zero-copy source pointer */
+ const void *src_mcopy; /* non zero-copy source pointer */
+ uint16_t off; /* data offset */
+ uint16_t rem; /* data remainder */
+ uint16_t max_len; /* max fragment length */
+ uint16_t wValue;
+ uint16_t wIndex;
+ uint8_t state;
+ uint8_t is_complete = 1;
+ usb_error_t err;
+ union {
+ uWord wStatus;
+ uint8_t buf[2];
+ } temp;
+
+ /*
+ * Filter the USB transfer state into
+ * something which we understand:
+ */
+
+ switch (USB_GET_STATE(xfer)) {
+ case USB_ST_SETUP:
+ state = USB_HR_NOT_COMPLETE;
+
+ if (!xfer->flags_int.control_act) {
+ /* nothing to do */
+ goto tr_stalled;
+ }
+ break;
+ case USB_ST_TRANSFERRED:
+ if (!xfer->flags_int.control_act) {
+ state = USB_HR_COMPLETE_OK;
+ } else {
+ state = USB_HR_NOT_COMPLETE;
+ }
+ break;
+ default:
+ state = USB_HR_COMPLETE_ERR;
+ break;
+ }
+
+ /* reset frame stuff */
+
+ usbd_xfer_set_frame_len(xfer, 0, 0);
+
+ usbd_xfer_set_frame_offset(xfer, 0, 0);
+ usbd_xfer_set_frame_offset(xfer, sizeof(req), 1);
+
+ /* get the current request, if any */
+
+ usbd_copy_out(xfer->frbuffers, 0, &req, sizeof(req));
+
+ if (xfer->flags_int.control_rem == 0xFFFF) {
+ /* first time - not initialised */
+ rem = UGETW(req.wLength);
+ off = 0;
+ } else {
+ /* not first time - initialised */
+ rem = xfer->flags_int.control_rem;
+ off = UGETW(req.wLength) - rem;
+ }
+
+ /* set some defaults */
+
+ max_len = 0;
+ src_zcopy = NULL;
+ src_mcopy = NULL;
+ udev = xfer->xroot->udev;
+
+ /* get some request fields decoded */
+
+ wValue = UGETW(req.wValue);
+ wIndex = UGETW(req.wIndex);
+
+ DPRINTF("req 0x%02x 0x%02x 0x%04x 0x%04x "
+ "off=0x%x rem=0x%x, state=%d\n", req.bmRequestType,
+ req.bRequest, wValue, wIndex, off, rem, state);
+
+ /* demultiplex the control request */
+
+ switch (req.bmRequestType) {
+ case UT_READ_DEVICE:
+ if (state != USB_HR_NOT_COMPLETE) {
+ break;
+ }
+ switch (req.bRequest) {
+ case UR_GET_DESCRIPTOR:
+ goto tr_handle_get_descriptor;
+ case UR_GET_CONFIG:
+ goto tr_handle_get_config;
+ case UR_GET_STATUS:
+ goto tr_handle_get_status;
+ default:
+ goto tr_stalled;
+ }
+ break;
+
+ case UT_WRITE_DEVICE:
+ switch (req.bRequest) {
+ case UR_SET_ADDRESS:
+ goto tr_handle_set_address;
+ case UR_SET_CONFIG:
+ goto tr_handle_set_config;
+ case UR_CLEAR_FEATURE:
+ switch (wValue) {
+ case UF_DEVICE_REMOTE_WAKEUP:
+ goto tr_handle_clear_wakeup;
+ default:
+ goto tr_stalled;
+ }
+ break;
+ case UR_SET_FEATURE:
+ switch (wValue) {
+ case UF_DEVICE_REMOTE_WAKEUP:
+ goto tr_handle_set_wakeup;
+ default:
+ goto tr_stalled;
+ }
+ break;
+ default:
+ goto tr_stalled;
+ }
+ break;
+
+ case UT_WRITE_ENDPOINT:
+ switch (req.bRequest) {
+ case UR_CLEAR_FEATURE:
+ switch (wValue) {
+ case UF_ENDPOINT_HALT:
+ goto tr_handle_clear_halt;
+ default:
+ goto tr_stalled;
+ }
+ break;
+ case UR_SET_FEATURE:
+ switch (wValue) {
+ case UF_ENDPOINT_HALT:
+ goto tr_handle_set_halt;
+ default:
+ goto tr_stalled;
+ }
+ break;
+ default:
+ goto tr_stalled;
+ }
+ break;
+
+ case UT_READ_ENDPOINT:
+ switch (req.bRequest) {
+ case UR_GET_STATUS:
+ goto tr_handle_get_ep_status;
+ default:
+ goto tr_stalled;
+ }
+ break;
+ default:
+ /* we use "USB_ADD_BYTES" to de-const the src_zcopy */
+ err = usb_handle_iface_request(xfer,
+ USB_ADD_BYTES(&src_zcopy, 0),
+ &max_len, req, off, state);
+ if (err == 0) {
+ is_complete = 0;
+ goto tr_valid;
+ } else if (err == USB_ERR_SHORT_XFER) {
+ goto tr_valid;
+ }
+ /*
+ * Reset zero-copy pointer and max length
+ * variable in case they were unintentionally
+ * set:
+ */
+ src_zcopy = NULL;
+ max_len = 0;
+
+ /*
+ * Check if we have a vendor specific
+ * descriptor:
+ */
+ goto tr_handle_get_descriptor;
+ }
+ goto tr_valid;
+
+tr_handle_get_descriptor:
+ err = (usb_temp_get_desc_p) (udev, &req, &src_zcopy, &max_len);
+ if (err)
+ goto tr_stalled;
+ if (src_zcopy == NULL)
+ goto tr_stalled;
+ goto tr_valid;
+
+tr_handle_get_config:
+ temp.buf[0] = udev->curr_config_no;
+ src_mcopy = temp.buf;
+ max_len = 1;
+ goto tr_valid;
+
+tr_handle_get_status:
+
+ wValue = 0;
+
+ USB_BUS_LOCK(udev->bus);
+ if (udev->flags.remote_wakeup) {
+ wValue |= UDS_REMOTE_WAKEUP;
+ }
+ if (udev->flags.self_powered) {
+ wValue |= UDS_SELF_POWERED;
+ }
+ USB_BUS_UNLOCK(udev->bus);
+
+ USETW(temp.wStatus, wValue);
+ src_mcopy = temp.wStatus;
+ max_len = sizeof(temp.wStatus);
+ goto tr_valid;
+
+tr_handle_set_address:
+ if (state == USB_HR_NOT_COMPLETE) {
+ if (wValue >= 0x80) {
+ /* invalid value */
+ goto tr_stalled;
+ } else if (udev->curr_config_no != 0) {
+ /* we are configured ! */
+ goto tr_stalled;
+ }
+ } else if (state != USB_HR_NOT_COMPLETE) {
+ udev->address = (wValue & 0x7F);
+ goto tr_bad_context;
+ }
+ goto tr_valid;
+
+tr_handle_set_config:
+ if (state == USB_HR_NOT_COMPLETE) {
+ if (usb_handle_set_config(xfer, req.wValue[0])) {
+ goto tr_stalled;
+ }
+ }
+ goto tr_valid;
+
+tr_handle_clear_halt:
+ if (state == USB_HR_NOT_COMPLETE) {
+ if (usb_handle_set_stall(xfer, req.wIndex[0], 0)) {
+ goto tr_stalled;
+ }
+ }
+ goto tr_valid;
+
+tr_handle_clear_wakeup:
+ if (state == USB_HR_NOT_COMPLETE) {
+ if (usb_handle_remote_wakeup(xfer, 0)) {
+ goto tr_stalled;
+ }
+ }
+ goto tr_valid;
+
+tr_handle_set_halt:
+ if (state == USB_HR_NOT_COMPLETE) {
+ if (usb_handle_set_stall(xfer, req.wIndex[0], 1)) {
+ goto tr_stalled;
+ }
+ }
+ goto tr_valid;
+
+tr_handle_set_wakeup:
+ if (state == USB_HR_NOT_COMPLETE) {
+ if (usb_handle_remote_wakeup(xfer, 1)) {
+ goto tr_stalled;
+ }
+ }
+ goto tr_valid;
+
+tr_handle_get_ep_status:
+ if (state == USB_HR_NOT_COMPLETE) {
+ temp.wStatus[0] =
+ usb_handle_get_stall(udev, req.wIndex[0]);
+ temp.wStatus[1] = 0;
+ src_mcopy = temp.wStatus;
+ max_len = sizeof(temp.wStatus);
+ }
+ goto tr_valid;
+
+tr_valid:
+ if (state != USB_HR_NOT_COMPLETE) {
+ goto tr_stalled;
+ }
+ /* subtract offset from length */
+
+ max_len -= off;
+
+ /* Compute the real maximum data length */
+
+ if (max_len > xfer->max_data_length) {
+ max_len = usbd_xfer_max_len(xfer);
+ }
+ if (max_len > rem) {
+ max_len = rem;
+ }
+ /*
+ * If the remainder is greater than the maximum data length,
+ * we need to truncate the value for the sake of the
+ * comparison below:
+ */
+ if (rem > xfer->max_data_length) {
+ rem = usbd_xfer_max_len(xfer);
+ }
+ if ((rem != max_len) && (is_complete != 0)) {
+ /*
+ * If we don't transfer the data we can transfer, then
+ * the transfer is short !
+ */
+ xfer->flags.force_short_xfer = 1;
+ xfer->nframes = 2;
+ } else {
+ /*
+ * Default case
+ */
+ xfer->flags.force_short_xfer = 0;
+ xfer->nframes = max_len ? 2 : 1;
+ }
+ if (max_len > 0) {
+ if (src_mcopy) {
+ src_mcopy = USB_ADD_BYTES(src_mcopy, off);
+ usbd_copy_in(xfer->frbuffers + 1, 0,
+ src_mcopy, max_len);
+ usbd_xfer_set_frame_len(xfer, 1, max_len);
+ } else {
+ usbd_xfer_set_frame_data(xfer, 1,
+ USB_ADD_BYTES(src_zcopy, off), max_len);
+ }
+ } else {
+ /* the end is reached, send status */
+ xfer->flags.manual_status = 0;
+ usbd_xfer_set_frame_len(xfer, 1, 0);
+ }
+ DPRINTF("success\n");
+ return (0); /* success */
+
+tr_stalled:
+ DPRINTF("%s\n", (state != USB_HR_NOT_COMPLETE) ?
+ "complete" : "stalled");
+ return (USB_ERR_STALLED);
+
+tr_bad_context:
+ DPRINTF("bad context\n");
+ return (USB_ERR_BAD_CONTEXT);
+}
diff --git a/rtems/freebsd/dev/usb/usb_hid.c b/rtems/freebsd/dev/usb/usb_hid.c
new file mode 100644
index 00000000..8e987bd0
--- /dev/null
+++ b/rtems/freebsd/dev/usb/usb_hid.c
@@ -0,0 +1,820 @@
+#include <rtems/freebsd/machine/rtems-bsd-config.h>
+
+/* $NetBSD: hid.c,v 1.17 2001/11/13 06:24:53 lukem Exp $ */
+
+
+#include <rtems/freebsd/sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+/*-
+ * Copyright (c) 1998 The NetBSD Foundation, Inc.
+ * All rights reserved.
+ *
+ * This code is derived from software contributed to The NetBSD Foundation
+ * by Lennart Augustsson (lennart@augustsson.net) at
+ * Carlstedt Research & Technology.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the NetBSD
+ * Foundation, Inc. and its contributors.
+ * 4. Neither the name of The NetBSD Foundation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
+ * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <rtems/freebsd/sys/stdint.h>
+#include <rtems/freebsd/sys/stddef.h>
+#include <rtems/freebsd/sys/param.h>
+#include <rtems/freebsd/sys/queue.h>
+#include <rtems/freebsd/sys/types.h>
+#include <rtems/freebsd/sys/systm.h>
+#include <rtems/freebsd/sys/kernel.h>
+#include <rtems/freebsd/sys/bus.h>
+#include <rtems/freebsd/sys/linker_set.h>
+#include <rtems/freebsd/sys/module.h>
+#include <rtems/freebsd/sys/lock.h>
+#include <rtems/freebsd/sys/mutex.h>
+#include <rtems/freebsd/sys/condvar.h>
+#include <rtems/freebsd/sys/sysctl.h>
+#include <rtems/freebsd/sys/sx.h>
+#include <rtems/freebsd/sys/unistd.h>
+#include <rtems/freebsd/sys/callout.h>
+#include <rtems/freebsd/sys/malloc.h>
+#include <rtems/freebsd/sys/priv.h>
+
+#include <rtems/freebsd/dev/usb/usb.h>
+#include <rtems/freebsd/dev/usb/usbdi.h>
+#include <rtems/freebsd/dev/usb/usbdi_util.h>
+#include <rtems/freebsd/dev/usb/usbhid.h>
+
+#define USB_DEBUG_VAR usb_debug
+
+#include <rtems/freebsd/dev/usb/usb_core.h>
+#include <rtems/freebsd/dev/usb/usb_debug.h>
+#include <rtems/freebsd/dev/usb/usb_process.h>
+#include <rtems/freebsd/dev/usb/usb_device.h>
+#include <rtems/freebsd/dev/usb/usb_request.h>
+
+static void hid_clear_local(struct hid_item *);
+static uint8_t hid_get_byte(struct hid_data *s, const uint16_t wSize);
+
+#define MAXUSAGE 64
+#define MAXPUSH 4
+#define MAXID 16
+
+struct hid_pos_data {
+ int32_t rid;
+ uint32_t pos;
+};
+
+struct hid_data {
+ const uint8_t *start;
+ const uint8_t *end;
+ const uint8_t *p;
+ struct hid_item cur[MAXPUSH];
+ struct hid_pos_data last_pos[MAXID];
+ int32_t usages_min[MAXUSAGE];
+ int32_t usages_max[MAXUSAGE];
+ int32_t usage_last; /* last seen usage */
+ uint32_t loc_size; /* last seen size */
+ uint32_t loc_count; /* last seen count */
+ uint8_t kindset; /* we have 5 kinds so 8 bits are enough */
+ uint8_t pushlevel; /* current pushlevel */
+ uint8_t ncount; /* end usage item count */
+ uint8_t icount; /* current usage item count */
+ uint8_t nusage; /* end "usages_min/max" index */
+ uint8_t iusage; /* current "usages_min/max" index */
+ uint8_t ousage; /* current "usages_min/max" offset */
+ uint8_t susage; /* usage set flags */
+};
+
+/*------------------------------------------------------------------------*
+ * hid_clear_local
+ *------------------------------------------------------------------------*/
+static void
+hid_clear_local(struct hid_item *c)
+{
+
+ c->loc.count = 0;
+ c->loc.size = 0;
+ c->usage = 0;
+ c->usage_minimum = 0;
+ c->usage_maximum = 0;
+ c->designator_index = 0;
+ c->designator_minimum = 0;
+ c->designator_maximum = 0;
+ c->string_index = 0;
+ c->string_minimum = 0;
+ c->string_maximum = 0;
+ c->set_delimiter = 0;
+}
+
+static void
+hid_switch_rid(struct hid_data *s, struct hid_item *c, int32_t next_rID)
+{
+ uint8_t i;
+
+ /* check for same report ID - optimise */
+
+ if (c->report_ID == next_rID)
+ return;
+
+ /* save current position for current rID */
+
+ if (c->report_ID == 0) {
+ i = 0;
+ } else {
+ for (i = 1; i != MAXID; i++) {
+ if (s->last_pos[i].rid == c->report_ID)
+ break;
+ if (s->last_pos[i].rid == 0)
+ break;
+ }
+ }
+ if (i != MAXID) {
+ s->last_pos[i].rid = c->report_ID;
+ s->last_pos[i].pos = c->loc.pos;
+ }
+
+ /* store next report ID */
+
+ c->report_ID = next_rID;
+
+ /* lookup last position for next rID */
+
+ if (next_rID == 0) {
+ i = 0;
+ } else {
+ for (i = 1; i != MAXID; i++) {
+ if (s->last_pos[i].rid == next_rID)
+ break;
+ if (s->last_pos[i].rid == 0)
+ break;
+ }
+ }
+ if (i != MAXID) {
+ s->last_pos[i].rid = next_rID;
+ c->loc.pos = s->last_pos[i].pos;
+ } else {
+ DPRINTF("Out of RID entries, position is set to zero!\n");
+ c->loc.pos = 0;
+ }
+}
+
+/*------------------------------------------------------------------------*
+ * hid_start_parse
+ *------------------------------------------------------------------------*/
+struct hid_data *
+hid_start_parse(const void *d, usb_size_t len, int kindset)
+{
+ struct hid_data *s;
+
+ if ((kindset-1) & kindset) {
+ DPRINTFN(0, "Only one bit can be "
+ "set in the kindset\n");
+ return (NULL);
+ }
+
+ s = malloc(sizeof *s, M_TEMP, M_WAITOK | M_ZERO);
+ s->start = s->p = d;
+ s->end = ((const uint8_t *)d) + len;
+ s->kindset = kindset;
+ return (s);
+}
+
+/*------------------------------------------------------------------------*
+ * hid_end_parse
+ *------------------------------------------------------------------------*/
+void
+hid_end_parse(struct hid_data *s)
+{
+ if (s == NULL)
+ return;
+
+ free(s, M_TEMP);
+}
+
+/*------------------------------------------------------------------------*
+ * get byte from HID descriptor
+ *------------------------------------------------------------------------*/
+static uint8_t
+hid_get_byte(struct hid_data *s, const uint16_t wSize)
+{
+ const uint8_t *ptr;
+ uint8_t retval;
+
+ ptr = s->p;
+
+ /* check if end is reached */
+ if (ptr == s->end)
+ return (0);
+
+ /* read out a byte */
+ retval = *ptr;
+
+ /* check if data pointer can be advanced by "wSize" bytes */
+ if ((s->end - ptr) < wSize)
+ ptr = s->end;
+ else
+ ptr += wSize;
+
+ /* update pointer */
+ s->p = ptr;
+
+ return (retval);
+}
+
+/*------------------------------------------------------------------------*
+ * hid_get_item
+ *------------------------------------------------------------------------*/
+int
+hid_get_item(struct hid_data *s, struct hid_item *h)
+{
+ struct hid_item *c;
+ unsigned int bTag, bType, bSize;
+ uint32_t oldpos;
+ int32_t mask;
+ int32_t dval;
+
+ if (s == NULL)
+ return (0);
+
+ c = &s->cur[s->pushlevel];
+
+ top:
+ /* check if there is an array of items */
+ if (s->icount < s->ncount) {
+ /* get current usage */
+ if (s->iusage < s->nusage) {
+ dval = s->usages_min[s->iusage] + s->ousage;
+ c->usage = dval;
+ s->usage_last = dval;
+ if (dval == s->usages_max[s->iusage]) {
+ s->iusage ++;
+ s->ousage = 0;
+ } else {
+ s->ousage ++;
+ }
+ } else {
+ DPRINTFN(1, "Using last usage\n");
+ dval = s->usage_last;
+ }
+ s->icount ++;
+ /*
+ * Only copy HID item, increment position and return
+ * if correct kindset!
+ */
+ if (s->kindset & (1 << c->kind)) {
+ *h = *c;
+ DPRINTFN(1, "%u,%u,%u\n", h->loc.pos,
+ h->loc.size, h->loc.count);
+ c->loc.pos += c->loc.size * c->loc.count;
+ return (1);
+ }
+ }
+
+ /* reset state variables */
+ s->icount = 0;
+ s->ncount = 0;
+ s->iusage = 0;
+ s->nusage = 0;
+ s->susage = 0;
+ s->ousage = 0;
+ hid_clear_local(c);
+
+ /* get next item */
+ while (s->p != s->end) {
+
+ bSize = hid_get_byte(s, 1);
+ if (bSize == 0xfe) {
+ /* long item */
+ bSize = hid_get_byte(s, 1);
+ bSize |= hid_get_byte(s, 1) << 8;
+ bTag = hid_get_byte(s, 1);
+ bType = 0xff; /* XXX what should it be */
+ } else {
+ /* short item */
+ bTag = bSize >> 4;
+ bType = (bSize >> 2) & 3;
+ bSize &= 3;
+ if (bSize == 3)
+ bSize = 4;
+ }
+ switch (bSize) {
+ case 0:
+ dval = 0;
+ mask = 0;
+ break;
+ case 1:
+ dval = (int8_t)hid_get_byte(s, 1);
+ mask = 0xFF;
+ break;
+ case 2:
+ dval = hid_get_byte(s, 1);
+ dval |= hid_get_byte(s, 1) << 8;
+ dval = (int16_t)dval;
+ mask = 0xFFFF;
+ break;
+ case 4:
+ dval = hid_get_byte(s, 1);
+ dval |= hid_get_byte(s, 1) << 8;
+ dval |= hid_get_byte(s, 1) << 16;
+ dval |= hid_get_byte(s, 1) << 24;
+ mask = 0xFFFFFFFF;
+ break;
+ default:
+ dval = hid_get_byte(s, bSize);
+ DPRINTFN(0, "bad length %u (data=0x%02x)\n",
+ bSize, dval);
+ continue;
+ }
+
+ switch (bType) {
+ case 0: /* Main */
+ switch (bTag) {
+ case 8: /* Input */
+ c->kind = hid_input;
+ c->flags = dval;
+ ret:
+ c->loc.count = s->loc_count;
+ c->loc.size = s->loc_size;
+
+ if (c->flags & HIO_VARIABLE) {
+ /* range check usage count */
+ if (c->loc.count > 255) {
+ DPRINTFN(0, "Number of "
+ "items truncated to 255\n");
+ s->ncount = 255;
+ } else
+ s->ncount = c->loc.count;
+
+ /*
+ * The "top" loop will return
+ * one and one item:
+ */
+ c->loc.count = 1;
+ } else {
+ s->ncount = 1;
+ }
+ goto top;
+
+ case 9: /* Output */
+ c->kind = hid_output;
+ c->flags = dval;
+ goto ret;
+ case 10: /* Collection */
+ c->kind = hid_collection;
+ c->collection = dval;
+ c->collevel++;
+ c->usage = s->usage_last;
+ *h = *c;
+ return (1);
+ case 11: /* Feature */
+ c->kind = hid_feature;
+ c->flags = dval;
+ goto ret;
+ case 12: /* End collection */
+ c->kind = hid_endcollection;
+ if (c->collevel == 0) {
+ DPRINTFN(0, "invalid end collection\n");
+ return (0);
+ }
+ c->collevel--;
+ *h = *c;
+ return (1);
+ default:
+ DPRINTFN(0, "Main bTag=%d\n", bTag);
+ break;
+ }
+ break;
+ case 1: /* Global */
+ switch (bTag) {
+ case 0:
+ c->_usage_page = dval << 16;
+ break;
+ case 1:
+ c->logical_minimum = dval;
+ break;
+ case 2:
+ c->logical_maximum = dval;
+ break;
+ case 3:
+ c->physical_minimum = dval;
+ break;
+ case 4:
+ c->physical_maximum = dval;
+ break;
+ case 5:
+ c->unit_exponent = dval;
+ break;
+ case 6:
+ c->unit = dval;
+ break;
+ case 7:
+ /* mask because value is unsigned */
+ s->loc_size = dval & mask;
+ break;
+ case 8:
+ hid_switch_rid(s, c, dval);
+ break;
+ case 9:
+ /* mask because value is unsigned */
+ s->loc_count = dval & mask;
+ break;
+ case 10: /* Push */
+ s->pushlevel ++;
+ if (s->pushlevel < MAXPUSH) {
+ s->cur[s->pushlevel] = *c;
+ /* store size and count */
+ c->loc.size = s->loc_size;
+ c->loc.count = s->loc_count;
+ /* update current item pointer */
+ c = &s->cur[s->pushlevel];
+ } else {
+ DPRINTFN(0, "Cannot push "
+ "item @ %d\n", s->pushlevel);
+ }
+ break;
+ case 11: /* Pop */
+ s->pushlevel --;
+ if (s->pushlevel < MAXPUSH) {
+ /* preserve position */
+ oldpos = c->loc.pos;
+ c = &s->cur[s->pushlevel];
+ /* restore size and count */
+ s->loc_size = c->loc.size;
+ s->loc_count = c->loc.count;
+ /* set default item location */
+ c->loc.pos = oldpos;
+ c->loc.size = 0;
+ c->loc.count = 0;
+ } else {
+ DPRINTFN(0, "Cannot pop "
+ "item @ %d\n", s->pushlevel);
+ }
+ break;
+ default:
+ DPRINTFN(0, "Global bTag=%d\n", bTag);
+ break;
+ }
+ break;
+ case 2: /* Local */
+ switch (bTag) {
+ case 0:
+ if (bSize != 4)
+ dval = (dval & mask) | c->_usage_page;
+
+ /* set last usage, in case of a collection */
+ s->usage_last = dval;
+
+ if (s->nusage < MAXUSAGE) {
+ s->usages_min[s->nusage] = dval;
+ s->usages_max[s->nusage] = dval;
+ s->nusage ++;
+ } else {
+ DPRINTFN(0, "max usage reached\n");
+ }
+
+ /* clear any pending usage sets */
+ s->susage = 0;
+ break;
+ case 1:
+ s->susage |= 1;
+
+ if (bSize != 4)
+ dval = (dval & mask) | c->_usage_page;
+ c->usage_minimum = dval;
+
+ goto check_set;
+ case 2:
+ s->susage |= 2;
+
+ if (bSize != 4)
+ dval = (dval & mask) | c->_usage_page;
+ c->usage_maximum = dval;
+
+ check_set:
+ if (s->susage != 3)
+ break;
+
+ /* sanity check */
+ if ((s->nusage < MAXUSAGE) &&
+ (c->usage_minimum <= c->usage_maximum)) {
+ /* add usage range */
+ s->usages_min[s->nusage] =
+ c->usage_minimum;
+ s->usages_max[s->nusage] =
+ c->usage_maximum;
+ s->nusage ++;
+ } else {
+ DPRINTFN(0, "Usage set dropped\n");
+ }
+ s->susage = 0;
+ break;
+ case 3:
+ c->designator_index = dval;
+ break;
+ case 4:
+ c->designator_minimum = dval;
+ break;
+ case 5:
+ c->designator_maximum = dval;
+ break;
+ case 7:
+ c->string_index = dval;
+ break;
+ case 8:
+ c->string_minimum = dval;
+ break;
+ case 9:
+ c->string_maximum = dval;
+ break;
+ case 10:
+ c->set_delimiter = dval;
+ break;
+ default:
+ DPRINTFN(0, "Local bTag=%d\n", bTag);
+ break;
+ }
+ break;
+ default:
+ DPRINTFN(0, "default bType=%d\n", bType);
+ break;
+ }
+ }
+ return (0);
+}
+
+/*------------------------------------------------------------------------*
+ * hid_report_size
+ *------------------------------------------------------------------------*/
+int
+hid_report_size(const void *buf, usb_size_t len, enum hid_kind k, uint8_t *id)
+{
+ struct hid_data *d;
+ struct hid_item h;
+ uint32_t temp;
+ uint32_t hpos;
+ uint32_t lpos;
+ uint8_t any_id;
+
+ any_id = 0;
+ hpos = 0;
+ lpos = 0xFFFFFFFF;
+
+ for (d = hid_start_parse(buf, len, 1 << k); hid_get_item(d, &h);) {
+ if (h.kind == k) {
+ /* check for ID-byte presense */
+ if ((h.report_ID != 0) && !any_id) {
+ if (id != NULL)
+ *id = h.report_ID;
+ any_id = 1;
+ }
+ /* compute minimum */
+ if (lpos > h.loc.pos)
+ lpos = h.loc.pos;
+ /* compute end position */
+ temp = h.loc.pos + (h.loc.size * h.loc.count);
+ /* compute maximum */
+ if (hpos < temp)
+ hpos = temp;
+ }
+ }
+ hid_end_parse(d);
+
+ /* safety check - can happen in case of currupt descriptors */
+ if (lpos > hpos)
+ temp = 0;
+ else
+ temp = hpos - lpos;
+
+ /* check for ID byte */
+ if (any_id)
+ temp += 8;
+ else if (id != NULL)
+ *id = 0;
+
+ /* return length in bytes rounded up */
+ return ((temp + 7) / 8);
+}
+
+/*------------------------------------------------------------------------*
+ * hid_locate
+ *------------------------------------------------------------------------*/
+int
+hid_locate(const void *desc, usb_size_t size, uint32_t u, enum hid_kind k,
+ uint8_t index, struct hid_location *loc, uint32_t *flags, uint8_t *id)
+{
+ struct hid_data *d;
+ struct hid_item h;
+
+ for (d = hid_start_parse(desc, size, 1 << k); hid_get_item(d, &h);) {
+ if (h.kind == k && !(h.flags & HIO_CONST) && h.usage == u) {
+ if (index--)
+ continue;
+ if (loc != NULL)
+ *loc = h.loc;
+ if (flags != NULL)
+ *flags = h.flags;
+ if (id != NULL)
+ *id = h.report_ID;
+ hid_end_parse(d);
+ return (1);
+ }
+ }
+ if (loc != NULL)
+ loc->size = 0;
+ if (flags != NULL)
+ *flags = 0;
+ if (id != NULL)
+ *id = 0;
+ hid_end_parse(d);
+ return (0);
+}
+
+/*------------------------------------------------------------------------*
+ * hid_get_data
+ *------------------------------------------------------------------------*/
+static uint32_t
+hid_get_data_sub(const uint8_t *buf, usb_size_t len, struct hid_location *loc,
+ int is_signed)
+{
+ uint32_t hpos = loc->pos;
+ uint32_t hsize = loc->size;
+ uint32_t data;
+ uint32_t rpos;
+ uint8_t n;
+
+ DPRINTFN(11, "hid_get_data: loc %d/%d\n", hpos, hsize);
+
+ /* Range check and limit */
+ if (hsize == 0)
+ return (0);
+ if (hsize > 32)
+ hsize = 32;
+
+ /* Get data in a safe way */
+ data = 0;
+ rpos = (hpos / 8);
+ n = (hsize + 7) / 8;
+ rpos += n;
+ while (n--) {
+ rpos--;
+ if (rpos < len)
+ data |= buf[rpos] << (8 * n);
+ }
+
+ /* Correctly shift down data */
+ data = (data >> (hpos % 8));
+ n = 32 - hsize;
+
+ /* Mask and sign extend in one */
+ if (is_signed != 0)
+ data = (int32_t)((int32_t)data << n) >> n;
+ else
+ data = (uint32_t)((uint32_t)data << n) >> n;
+
+ DPRINTFN(11, "hid_get_data: loc %d/%d = %lu\n",
+ loc->pos, loc->size, (long)data);
+ return (data);
+}
+
+int32_t
+hid_get_data(const uint8_t *buf, usb_size_t len, struct hid_location *loc)
+{
+ return (hid_get_data_sub(buf, len, loc, 1));
+}
+
+uint32_t
+hid_get_data_unsigned(const uint8_t *buf, usb_size_t len, struct hid_location *loc)
+{
+ return (hid_get_data_sub(buf, len, loc, 0));
+}
+
+/*------------------------------------------------------------------------*
+ * hid_is_collection
+ *------------------------------------------------------------------------*/
+int
+hid_is_collection(const void *desc, usb_size_t size, uint32_t usage)
+{
+ struct hid_data *hd;
+ struct hid_item hi;
+ int err;
+
+ hd = hid_start_parse(desc, size, hid_input);
+ if (hd == NULL)
+ return (0);
+
+ while ((err = hid_get_item(hd, &hi))) {
+ if (hi.kind == hid_collection &&
+ hi.usage == usage)
+ break;
+ }
+ hid_end_parse(hd);
+ return (err);
+}
+
+/*------------------------------------------------------------------------*
+ * hid_get_descriptor_from_usb
+ *
+ * This function will search for a HID descriptor between two USB
+ * interface descriptors.
+ *
+ * Return values:
+ * NULL: No more HID descriptors.
+ * Else: Pointer to HID descriptor.
+ *------------------------------------------------------------------------*/
+struct usb_hid_descriptor *
+hid_get_descriptor_from_usb(struct usb_config_descriptor *cd,
+ struct usb_interface_descriptor *id)
+{
+ struct usb_descriptor *desc = (void *)id;
+
+ if (desc == NULL) {
+ return (NULL);
+ }
+ while ((desc = usb_desc_foreach(cd, desc))) {
+ if ((desc->bDescriptorType == UDESC_HID) &&
+ (desc->bLength >= USB_HID_DESCRIPTOR_SIZE(0))) {
+ return (void *)desc;
+ }
+ if (desc->bDescriptorType == UDESC_INTERFACE) {
+ break;
+ }
+ }
+ return (NULL);
+}
+
+/*------------------------------------------------------------------------*
+ * usbd_req_get_hid_desc
+ *
+ * This function will read out an USB report descriptor from the USB
+ * device.
+ *
+ * Return values:
+ * NULL: Failure.
+ * Else: Success. The pointer should eventually be passed to free().
+ *------------------------------------------------------------------------*/
+usb_error_t
+usbd_req_get_hid_desc(struct usb_device *udev, struct mtx *mtx,
+ void **descp, uint16_t *sizep,
+ struct malloc_type *mem, uint8_t iface_index)
+{
+ struct usb_interface *iface = usbd_get_iface(udev, iface_index);
+ struct usb_hid_descriptor *hid;
+ usb_error_t err;
+
+ if ((iface == NULL) || (iface->idesc == NULL)) {
+ return (USB_ERR_INVAL);
+ }
+ hid = hid_get_descriptor_from_usb
+ (usbd_get_config_descriptor(udev), iface->idesc);
+
+ if (hid == NULL) {
+ return (USB_ERR_IOERROR);
+ }
+ *sizep = UGETW(hid->descrs[0].wDescriptorLength);
+ if (*sizep == 0) {
+ return (USB_ERR_IOERROR);
+ }
+ if (mtx)
+ mtx_unlock(mtx);
+
+ *descp = malloc(*sizep, mem, M_ZERO | M_WAITOK);
+
+ if (mtx)
+ mtx_lock(mtx);
+
+ if (*descp == NULL) {
+ return (USB_ERR_NOMEM);
+ }
+ err = usbd_req_get_report_descriptor
+ (udev, mtx, *descp, *sizep, iface_index);
+
+ if (err) {
+ free(*descp, mem);
+ *descp = NULL;
+ return (err);
+ }
+ return (USB_ERR_NORMAL_COMPLETION);
+}
diff --git a/rtems/freebsd/dev/usb/usb_hub.c b/rtems/freebsd/dev/usb/usb_hub.c
new file mode 100644
index 00000000..c885c697
--- /dev/null
+++ b/rtems/freebsd/dev/usb/usb_hub.c
@@ -0,0 +1,2474 @@
+#include <rtems/freebsd/machine/rtems-bsd-config.h>
+
+/* $FreeBSD$ */
+/*-
+ * Copyright (c) 1998 The NetBSD Foundation, Inc. All rights reserved.
+ * Copyright (c) 1998 Lennart Augustsson. All rights reserved.
+ * Copyright (c) 2008-2010 Hans Petter Selasky. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+/*
+ * USB spec: http://www.usb.org/developers/docs/usbspec.zip
+ */
+
+#include <rtems/freebsd/sys/stdint.h>
+#include <rtems/freebsd/sys/stddef.h>
+#include <rtems/freebsd/sys/param.h>
+#include <rtems/freebsd/sys/queue.h>
+#include <rtems/freebsd/sys/types.h>
+#include <rtems/freebsd/sys/systm.h>
+#include <rtems/freebsd/sys/kernel.h>
+#include <rtems/freebsd/sys/bus.h>
+#include <rtems/freebsd/sys/linker_set.h>
+#include <rtems/freebsd/sys/module.h>
+#include <rtems/freebsd/sys/lock.h>
+#include <rtems/freebsd/sys/mutex.h>
+#include <rtems/freebsd/sys/condvar.h>
+#include <rtems/freebsd/sys/sysctl.h>
+#include <rtems/freebsd/sys/sx.h>
+#include <rtems/freebsd/sys/unistd.h>
+#include <rtems/freebsd/sys/callout.h>
+#include <rtems/freebsd/sys/malloc.h>
+#include <rtems/freebsd/sys/priv.h>
+
+#include <rtems/freebsd/dev/usb/usb.h>
+#include <rtems/freebsd/dev/usb/usb_ioctl.h>
+#include <rtems/freebsd/dev/usb/usbdi.h>
+#include <rtems/freebsd/dev/usb/usbdi_util.h>
+
+#define USB_DEBUG_VAR uhub_debug
+
+#include <rtems/freebsd/dev/usb/usb_core.h>
+#include <rtems/freebsd/dev/usb/usb_process.h>
+#include <rtems/freebsd/dev/usb/usb_device.h>
+#include <rtems/freebsd/dev/usb/usb_request.h>
+#include <rtems/freebsd/dev/usb/usb_debug.h>
+#include <rtems/freebsd/dev/usb/usb_hub.h>
+#include <rtems/freebsd/dev/usb/usb_util.h>
+#include <rtems/freebsd/dev/usb/usb_busdma.h>
+#include <rtems/freebsd/dev/usb/usb_transfer.h>
+#include <rtems/freebsd/dev/usb/usb_dynamic.h>
+
+#include <rtems/freebsd/dev/usb/usb_controller.h>
+#include <rtems/freebsd/dev/usb/usb_bus.h>
+
+#define UHUB_INTR_INTERVAL 250 /* ms */
+#define UHUB_N_TRANSFER 1
+
+#ifdef USB_DEBUG
+static int uhub_debug = 0;
+
+SYSCTL_NODE(_hw_usb, OID_AUTO, uhub, CTLFLAG_RW, 0, "USB HUB");
+SYSCTL_INT(_hw_usb_uhub, OID_AUTO, debug, CTLFLAG_RW, &uhub_debug, 0,
+ "Debug level");
+
+TUNABLE_INT("hw.usb.uhub.debug", &uhub_debug);
+#endif
+
+#if USB_HAVE_POWERD
+static int usb_power_timeout = 30; /* seconds */
+
+SYSCTL_INT(_hw_usb, OID_AUTO, power_timeout, CTLFLAG_RW,
+ &usb_power_timeout, 0, "USB power timeout");
+#endif
+
+struct uhub_current_state {
+ uint16_t port_change;
+ uint16_t port_status;
+};
+
+struct uhub_softc {
+ struct uhub_current_state sc_st;/* current state */
+ device_t sc_dev; /* base device */
+ struct mtx sc_mtx; /* our mutex */
+ struct usb_device *sc_udev; /* USB device */
+ struct usb_xfer *sc_xfer[UHUB_N_TRANSFER]; /* interrupt xfer */
+ uint8_t sc_flags;
+#define UHUB_FLAG_DID_EXPLORE 0x01
+ char sc_name[32];
+};
+
+#define UHUB_PROTO(sc) ((sc)->sc_udev->ddesc.bDeviceProtocol)
+#define UHUB_IS_HIGH_SPEED(sc) (UHUB_PROTO(sc) != UDPROTO_FSHUB)
+#define UHUB_IS_SINGLE_TT(sc) (UHUB_PROTO(sc) == UDPROTO_HSHUBSTT)
+#define UHUB_IS_SUPER_SPEED(sc) (UHUB_PROTO(sc) == UDPROTO_SSHUB)
+
+/* prototypes for type checking: */
+
+static device_probe_t uhub_probe;
+static device_attach_t uhub_attach;
+static device_detach_t uhub_detach;
+static device_suspend_t uhub_suspend;
+static device_resume_t uhub_resume;
+
+static bus_driver_added_t uhub_driver_added;
+static bus_child_location_str_t uhub_child_location_string;
+static bus_child_pnpinfo_str_t uhub_child_pnpinfo_string;
+
+static usb_callback_t uhub_intr_callback;
+
+static void usb_dev_resume_peer(struct usb_device *udev);
+static void usb_dev_suspend_peer(struct usb_device *udev);
+static uint8_t usb_peer_should_wakeup(struct usb_device *udev);
+
+static const struct usb_config uhub_config[UHUB_N_TRANSFER] = {
+
+ [0] = {
+ .type = UE_INTERRUPT,
+ .endpoint = UE_ADDR_ANY,
+ .direction = UE_DIR_ANY,
+ .timeout = 0,
+ .flags = {.pipe_bof = 1,.short_xfer_ok = 1,},
+ .bufsize = 0, /* use wMaxPacketSize */
+ .callback = &uhub_intr_callback,
+ .interval = UHUB_INTR_INTERVAL,
+ },
+};
+
+/*
+ * driver instance for "hub" connected to "usb"
+ * and "hub" connected to "hub"
+ */
+static devclass_t uhub_devclass;
+
+static device_method_t uhub_methods[] = {
+ DEVMETHOD(device_probe, uhub_probe),
+ DEVMETHOD(device_attach, uhub_attach),
+ DEVMETHOD(device_detach, uhub_detach),
+
+ DEVMETHOD(device_suspend, uhub_suspend),
+ DEVMETHOD(device_resume, uhub_resume),
+
+ DEVMETHOD(bus_child_location_str, uhub_child_location_string),
+ DEVMETHOD(bus_child_pnpinfo_str, uhub_child_pnpinfo_string),
+ DEVMETHOD(bus_driver_added, uhub_driver_added),
+ {0, 0}
+};
+
+static driver_t uhub_driver = {
+ .name = "uhub",
+ .methods = uhub_methods,
+ .size = sizeof(struct uhub_softc)
+};
+
+DRIVER_MODULE(uhub, usbus, uhub_driver, uhub_devclass, 0, 0);
+DRIVER_MODULE(uhub, uhub, uhub_driver, uhub_devclass, NULL, 0);
+MODULE_VERSION(uhub, 1);
+
+static void
+uhub_intr_callback(struct usb_xfer *xfer, usb_error_t error)
+{
+ struct uhub_softc *sc = usbd_xfer_softc(xfer);
+
+ switch (USB_GET_STATE(xfer)) {
+ case USB_ST_TRANSFERRED:
+ DPRINTFN(2, "\n");
+ /*
+ * This is an indication that some port
+ * has changed status. Notify the bus
+ * event handler thread that we need
+ * to be explored again:
+ */
+ usb_needs_explore(sc->sc_udev->bus, 0);
+
+ case USB_ST_SETUP:
+ usbd_xfer_set_frame_len(xfer, 0, usbd_xfer_max_len(xfer));
+ usbd_transfer_submit(xfer);
+ break;
+
+ default: /* Error */
+ if (xfer->error != USB_ERR_CANCELLED) {
+ /*
+ * Do a clear-stall. The "stall_pipe" flag
+ * will get cleared before next callback by
+ * the USB stack.
+ */
+ usbd_xfer_set_stall(xfer);
+ usbd_xfer_set_frame_len(xfer, 0, usbd_xfer_max_len(xfer));
+ usbd_transfer_submit(xfer);
+ }
+ break;
+ }
+}
+
+/*------------------------------------------------------------------------*
+ * uhub_explore_sub - subroutine
+ *
+ * Return values:
+ * 0: Success
+ * Else: A control transaction failed
+ *------------------------------------------------------------------------*/
+static usb_error_t
+uhub_explore_sub(struct uhub_softc *sc, struct usb_port *up)
+{
+ struct usb_bus *bus;
+ struct usb_device *child;
+ uint8_t refcount;
+ usb_error_t err;
+
+ bus = sc->sc_udev->bus;
+ err = 0;
+
+ /* get driver added refcount from USB bus */
+ refcount = bus->driver_added_refcount;
+
+ /* get device assosiated with the given port */
+ child = usb_bus_port_get_device(bus, up);
+ if (child == NULL) {
+ /* nothing to do */
+ goto done;
+ }
+
+ /* check if device should be re-enumerated */
+
+ if (child->flags.usb_mode == USB_MODE_HOST) {
+ usbd_enum_lock(child);
+ if (child->re_enumerate_wait) {
+ err = usbd_set_config_index(child, USB_UNCONFIG_INDEX);
+ if (err == 0)
+ err = usbd_req_re_enumerate(child, NULL);
+ if (err == 0)
+ err = usbd_set_config_index(child, 0);
+ if (err == 0) {
+ err = usb_probe_and_attach(child,
+ USB_IFACE_INDEX_ANY);
+ }
+ child->re_enumerate_wait = 0;
+ err = 0;
+ }
+ usbd_enum_unlock(child);
+ }
+
+ /* check if probe and attach should be done */
+
+ if (child->driver_added_refcount != refcount) {
+ child->driver_added_refcount = refcount;
+ err = usb_probe_and_attach(child,
+ USB_IFACE_INDEX_ANY);
+ if (err) {
+ goto done;
+ }
+ }
+ /* start control transfer, if device mode */
+
+ if (child->flags.usb_mode == USB_MODE_DEVICE)
+ usbd_ctrl_transfer_setup(child);
+
+ /* if a HUB becomes present, do a recursive HUB explore */
+
+ if (child->hub)
+ err = (child->hub->explore) (child);
+
+done:
+ return (err);
+}
+
+/*------------------------------------------------------------------------*
+ * uhub_read_port_status - factored out code
+ *------------------------------------------------------------------------*/
+static usb_error_t
+uhub_read_port_status(struct uhub_softc *sc, uint8_t portno)
+{
+ struct usb_port_status ps;
+ usb_error_t err;
+
+ err = usbd_req_get_port_status(
+ sc->sc_udev, NULL, &ps, portno);
+
+ /* update status regardless of error */
+
+ sc->sc_st.port_status = UGETW(ps.wPortStatus);
+ sc->sc_st.port_change = UGETW(ps.wPortChange);
+
+ /* debugging print */
+
+ DPRINTFN(4, "port %d, wPortStatus=0x%04x, "
+ "wPortChange=0x%04x, err=%s\n",
+ portno, sc->sc_st.port_status,
+ sc->sc_st.port_change, usbd_errstr(err));
+ return (err);
+}
+
+/*------------------------------------------------------------------------*
+ * uhub_reattach_port
+ *
+ * Returns:
+ * 0: Success
+ * Else: A control transaction failed
+ *------------------------------------------------------------------------*/
+static usb_error_t
+uhub_reattach_port(struct uhub_softc *sc, uint8_t portno)
+{
+ struct usb_device *child;
+ struct usb_device *udev;
+ enum usb_dev_speed speed;
+ enum usb_hc_mode mode;
+ usb_error_t err;
+ uint8_t timeout;
+
+ DPRINTF("reattaching port %d\n", portno);
+
+ err = 0;
+ timeout = 0;
+ udev = sc->sc_udev;
+ child = usb_bus_port_get_device(udev->bus,
+ udev->hub->ports + portno - 1);
+
+repeat:
+
+ /* first clear the port connection change bit */
+
+ err = usbd_req_clear_port_feature(udev, NULL,
+ portno, UHF_C_PORT_CONNECTION);
+
+ if (err) {
+ goto error;
+ }
+ /* check if there is a child */
+
+ if (child != NULL) {
+ /*
+ * Free USB device and all subdevices, if any.
+ */
+ usb_free_device(child, 0);
+ child = NULL;
+ }
+ /* get fresh status */
+
+ err = uhub_read_port_status(sc, portno);
+ if (err) {
+ goto error;
+ }
+ /* check if nothing is connected to the port */
+
+ if (!(sc->sc_st.port_status & UPS_CURRENT_CONNECT_STATUS)) {
+ goto error;
+ }
+ /* check if there is no power on the port and print a warning */
+
+ if (!(sc->sc_st.port_status & UPS_PORT_POWER)) {
+ DPRINTF("WARNING: strange, connected port %d "
+ "has no power\n", portno);
+ }
+ /* check if the device is in Host Mode */
+
+ if (!(sc->sc_st.port_status & UPS_PORT_MODE_DEVICE)) {
+
+ DPRINTF("Port %d is in Host Mode\n", portno);
+
+ if (sc->sc_st.port_status & UPS_SUSPEND) {
+ /*
+ * NOTE: Should not get here in SuperSpeed
+ * mode, because the HUB should report this
+ * bit as zero.
+ */
+ DPRINTF("Port %d was still "
+ "suspended, clearing.\n", portno);
+ err = usbd_req_clear_port_feature(udev,
+ NULL, portno, UHF_PORT_SUSPEND);
+ }
+
+ /* USB Host Mode */
+
+ /* wait for maximum device power up time */
+
+ usb_pause_mtx(NULL,
+ USB_MS_TO_TICKS(USB_PORT_POWERUP_DELAY));
+
+ /* reset port, which implies enabling it */
+
+ err = usbd_req_reset_port(udev, NULL, portno);
+
+ if (err) {
+ DPRINTFN(0, "port %d reset "
+ "failed, error=%s\n",
+ portno, usbd_errstr(err));
+ goto error;
+ }
+ /* get port status again, it might have changed during reset */
+
+ err = uhub_read_port_status(sc, portno);
+ if (err) {
+ goto error;
+ }
+ /* check if something changed during port reset */
+
+ if ((sc->sc_st.port_change & UPS_C_CONNECT_STATUS) ||
+ (!(sc->sc_st.port_status & UPS_CURRENT_CONNECT_STATUS))) {
+ if (timeout) {
+ DPRINTFN(0, "giving up port reset "
+ "- device vanished\n");
+ goto error;
+ }
+ timeout = 1;
+ goto repeat;
+ }
+ } else {
+ DPRINTF("Port %d is in Device Mode\n", portno);
+ }
+
+ /*
+ * Figure out the device speed
+ */
+ switch (udev->speed) {
+ case USB_SPEED_HIGH:
+ if (sc->sc_st.port_status & UPS_HIGH_SPEED)
+ speed = USB_SPEED_HIGH;
+ else if (sc->sc_st.port_status & UPS_LOW_SPEED)
+ speed = USB_SPEED_LOW;
+ else
+ speed = USB_SPEED_FULL;
+ break;
+ case USB_SPEED_FULL:
+ if (sc->sc_st.port_status & UPS_LOW_SPEED)
+ speed = USB_SPEED_LOW;
+ else
+ speed = USB_SPEED_FULL;
+ break;
+ case USB_SPEED_LOW:
+ speed = USB_SPEED_LOW;
+ break;
+ case USB_SPEED_SUPER:
+ if (udev->parent_hub == NULL) {
+ /* Root HUB - special case */
+ switch (sc->sc_st.port_status & UPS_OTHER_SPEED) {
+ case 0:
+ speed = USB_SPEED_FULL;
+ break;
+ case UPS_LOW_SPEED:
+ speed = USB_SPEED_LOW;
+ break;
+ case UPS_HIGH_SPEED:
+ speed = USB_SPEED_HIGH;
+ break;
+ default:
+ speed = USB_SPEED_SUPER;
+ break;
+ }
+ } else {
+ speed = USB_SPEED_SUPER;
+ }
+ break;
+ default:
+ /* same speed like parent */
+ speed = udev->speed;
+ break;
+ }
+ if (speed == USB_SPEED_SUPER) {
+ err = usbd_req_set_hub_u1_timeout(udev, NULL,
+ portno, 128 - (2 * udev->depth));
+ if (err) {
+ DPRINTFN(0, "port %d U1 timeout "
+ "failed, error=%s\n",
+ portno, usbd_errstr(err));
+ }
+ err = usbd_req_set_hub_u2_timeout(udev, NULL,
+ portno, 128 - (2 * udev->depth));
+ if (err) {
+ DPRINTFN(0, "port %d U2 timeout "
+ "failed, error=%s\n",
+ portno, usbd_errstr(err));
+ }
+ }
+
+ /*
+ * Figure out the device mode
+ *
+ * NOTE: This part is currently FreeBSD specific.
+ */
+ if (sc->sc_st.port_status & UPS_PORT_MODE_DEVICE)
+ mode = USB_MODE_DEVICE;
+ else
+ mode = USB_MODE_HOST;
+
+ /* need to create a new child */
+ child = usb_alloc_device(sc->sc_dev, udev->bus, udev,
+ udev->depth + 1, portno - 1, portno, speed, mode);
+ if (child == NULL) {
+ DPRINTFN(0, "could not allocate new device\n");
+ goto error;
+ }
+ return (0); /* success */
+
+error:
+ if (child != NULL) {
+ /*
+ * Free USB device and all subdevices, if any.
+ */
+ usb_free_device(child, 0);
+ child = NULL;
+ }
+ if (err == 0) {
+ if (sc->sc_st.port_status & UPS_PORT_ENABLED) {
+ err = usbd_req_clear_port_feature(
+ sc->sc_udev, NULL,
+ portno, UHF_PORT_ENABLE);
+ }
+ }
+ if (err) {
+ DPRINTFN(0, "device problem (%s), "
+ "disabling port %d\n", usbd_errstr(err), portno);
+ }
+ return (err);
+}
+
+/*------------------------------------------------------------------------*
+ * usb_device_20_compatible
+ *
+ * Returns:
+ * 0: HUB does not support suspend and resume
+ * Else: HUB supports suspend and resume
+ *------------------------------------------------------------------------*/
+static uint8_t
+usb_device_20_compatible(struct usb_device *udev)
+{
+ if (udev == NULL)
+ return (0);
+ switch (udev->speed) {
+ case USB_SPEED_LOW:
+ case USB_SPEED_FULL:
+ case USB_SPEED_HIGH:
+ return (1);
+ default:
+ return (0);
+ }
+}
+
+/*------------------------------------------------------------------------*
+ * uhub_suspend_resume_port
+ *
+ * Returns:
+ * 0: Success
+ * Else: A control transaction failed
+ *------------------------------------------------------------------------*/
+static usb_error_t
+uhub_suspend_resume_port(struct uhub_softc *sc, uint8_t portno)
+{
+ struct usb_device *child;
+ struct usb_device *udev;
+ uint8_t is_suspend;
+ usb_error_t err;
+
+ DPRINTF("port %d\n", portno);
+
+ udev = sc->sc_udev;
+ child = usb_bus_port_get_device(udev->bus,
+ udev->hub->ports + portno - 1);
+
+ /* first clear the port suspend change bit */
+
+ if (usb_device_20_compatible(udev)) {
+ err = usbd_req_clear_port_feature(udev, NULL,
+ portno, UHF_C_PORT_SUSPEND);
+ } else {
+ err = usbd_req_clear_port_feature(udev, NULL,
+ portno, UHF_C_PORT_LINK_STATE);
+ }
+
+ if (err) {
+ DPRINTF("clearing suspend failed.\n");
+ goto done;
+ }
+ /* get fresh status */
+
+ err = uhub_read_port_status(sc, portno);
+ if (err) {
+ DPRINTF("reading port status failed.\n");
+ goto done;
+ }
+ /* convert current state */
+
+ if (usb_device_20_compatible(udev)) {
+ if (sc->sc_st.port_status & UPS_SUSPEND) {
+ is_suspend = 1;
+ } else {
+ is_suspend = 0;
+ }
+ } else {
+ switch (UPS_PORT_LINK_STATE_GET(sc->sc_st.port_status)) {
+ case UPS_PORT_LS_U0:
+ case UPS_PORT_LS_U1:
+ is_suspend = 0;
+ break;
+ default:
+ is_suspend = 1;
+ break;
+ }
+ }
+
+ DPRINTF("suspended=%u\n", is_suspend);
+
+ /* do the suspend or resume */
+
+ if (child) {
+ /*
+ * This code handle two cases: 1) Host Mode - we can only
+ * receive resume here 2) Device Mode - we can receive
+ * suspend and resume here
+ */
+ if (is_suspend == 0)
+ usb_dev_resume_peer(child);
+ else if ((child->flags.usb_mode == USB_MODE_DEVICE) ||
+ (usb_device_20_compatible(child) == 0))
+ usb_dev_suspend_peer(child);
+ }
+done:
+ return (err);
+}
+
+/*------------------------------------------------------------------------*
+ * uhub_root_interrupt
+ *
+ * This function is called when a Root HUB interrupt has
+ * happened. "ptr" and "len" makes up the Root HUB interrupt
+ * packet. This function is called having the "bus_mtx" locked.
+ *------------------------------------------------------------------------*/
+void
+uhub_root_intr(struct usb_bus *bus, const uint8_t *ptr, uint8_t len)
+{
+ USB_BUS_LOCK_ASSERT(bus, MA_OWNED);
+
+ usb_needs_explore(bus, 0);
+}
+
+static uint8_t
+uhub_is_too_deep(struct usb_device *udev)
+{
+ switch (udev->speed) {
+ case USB_SPEED_FULL:
+ case USB_SPEED_LOW:
+ case USB_SPEED_HIGH:
+ if (udev->depth > USB_HUB_MAX_DEPTH)
+ return (1);
+ break;
+ case USB_SPEED_SUPER:
+ if (udev->depth > USB_SS_HUB_DEPTH_MAX)
+ return (1);
+ break;
+ default:
+ break;
+ }
+ return (0);
+}
+
+/*------------------------------------------------------------------------*
+ * uhub_explore
+ *
+ * Returns:
+ * 0: Success
+ * Else: Failure
+ *------------------------------------------------------------------------*/
+static usb_error_t
+uhub_explore(struct usb_device *udev)
+{
+ struct usb_hub *hub;
+ struct uhub_softc *sc;
+ struct usb_port *up;
+ usb_error_t err;
+ uint8_t portno;
+ uint8_t x;
+
+ hub = udev->hub;
+ sc = hub->hubsoftc;
+
+ DPRINTFN(11, "udev=%p addr=%d\n", udev, udev->address);
+
+ /* ignore devices that are too deep */
+ if (uhub_is_too_deep(udev))
+ return (USB_ERR_TOO_DEEP);
+
+ /* check if device is suspended */
+ if (udev->flags.self_suspended) {
+ /* need to wait until the child signals resume */
+ DPRINTF("Device is suspended!\n");
+ return (0);
+ }
+ for (x = 0; x != hub->nports; x++) {
+ up = hub->ports + x;
+ portno = x + 1;
+
+ err = uhub_read_port_status(sc, portno);
+ if (err) {
+ /* most likely the HUB is gone */
+ break;
+ }
+ if (sc->sc_st.port_change & UPS_C_OVERCURRENT_INDICATOR) {
+ DPRINTF("Overcurrent on port %u.\n", portno);
+ err = usbd_req_clear_port_feature(
+ udev, NULL, portno, UHF_C_PORT_OVER_CURRENT);
+ if (err) {
+ /* most likely the HUB is gone */
+ break;
+ }
+ }
+ if (!(sc->sc_flags & UHUB_FLAG_DID_EXPLORE)) {
+ /*
+ * Fake a connect status change so that the
+ * status gets checked initially!
+ */
+ sc->sc_st.port_change |=
+ UPS_C_CONNECT_STATUS;
+ }
+ if (sc->sc_st.port_change & UPS_C_PORT_ENABLED) {
+ err = usbd_req_clear_port_feature(
+ udev, NULL, portno, UHF_C_PORT_ENABLE);
+ if (err) {
+ /* most likely the HUB is gone */
+ break;
+ }
+ if (sc->sc_st.port_change & UPS_C_CONNECT_STATUS) {
+ /*
+ * Ignore the port error if the device
+ * has vanished !
+ */
+ } else if (sc->sc_st.port_status & UPS_PORT_ENABLED) {
+ DPRINTFN(0, "illegal enable change, "
+ "port %d\n", portno);
+ } else {
+
+ if (up->restartcnt == USB_RESTART_MAX) {
+ /* XXX could try another speed ? */
+ DPRINTFN(0, "port error, giving up "
+ "port %d\n", portno);
+ } else {
+ sc->sc_st.port_change |=
+ UPS_C_CONNECT_STATUS;
+ up->restartcnt++;
+ }
+ }
+ }
+ if (sc->sc_st.port_change & UPS_C_CONNECT_STATUS) {
+ err = uhub_reattach_port(sc, portno);
+ if (err) {
+ /* most likely the HUB is gone */
+ break;
+ }
+ }
+ if (sc->sc_st.port_change & (UPS_C_SUSPEND | UPS_C_PORT_LINK_STATE)) {
+ err = uhub_suspend_resume_port(sc, portno);
+ if (err) {
+ /* most likely the HUB is gone */
+ break;
+ }
+ }
+ err = uhub_explore_sub(sc, up);
+ if (err) {
+ /* no device(s) present */
+ continue;
+ }
+ /* explore succeeded - reset restart counter */
+ up->restartcnt = 0;
+ }
+
+ /* initial status checked */
+ sc->sc_flags |= UHUB_FLAG_DID_EXPLORE;
+
+ /* return success */
+ return (USB_ERR_NORMAL_COMPLETION);
+}
+
+static int
+uhub_probe(device_t dev)
+{
+ struct usb_attach_arg *uaa = device_get_ivars(dev);
+
+ if (uaa->usb_mode != USB_MODE_HOST)
+ return (ENXIO);
+
+ /*
+ * The subclass for USB HUBs is currently ignored because it
+ * is 0 for some and 1 for others.
+ */
+ if (uaa->info.bConfigIndex == 0 &&
+ uaa->info.bDeviceClass == UDCLASS_HUB)
+ return (0);
+
+ return (ENXIO);
+}
+
+/* NOTE: The information returned by this function can be wrong. */
+usb_error_t
+uhub_query_info(struct usb_device *udev, uint8_t *pnports, uint8_t *ptt)
+{
+ struct usb_hub_descriptor hubdesc20;
+ struct usb_hub_ss_descriptor hubdesc30;
+ usb_error_t err;
+ uint8_t nports;
+ uint8_t tt;
+
+ if (udev->ddesc.bDeviceClass != UDCLASS_HUB)
+ return (USB_ERR_INVAL);
+
+ nports = 0;
+ tt = 0;
+
+ switch (udev->speed) {
+ case USB_SPEED_LOW:
+ case USB_SPEED_FULL:
+ case USB_SPEED_HIGH:
+ /* assuming that there is one port */
+ err = usbd_req_get_hub_descriptor(udev, NULL, &hubdesc20, 1);
+ if (err) {
+ DPRINTFN(0, "getting USB 2.0 HUB descriptor failed,"
+ "error=%s\n", usbd_errstr(err));
+ break;
+ }
+ nports = hubdesc20.bNbrPorts;
+ if (nports > 127)
+ nports = 127;
+
+ if (udev->speed == USB_SPEED_HIGH)
+ tt = (UGETW(hubdesc20.wHubCharacteristics) >> 5) & 3;
+ break;
+
+ case USB_SPEED_SUPER:
+ err = usbd_req_get_ss_hub_descriptor(udev, NULL, &hubdesc30, 1);
+ if (err) {
+ DPRINTFN(0, "Getting USB 3.0 HUB descriptor failed,"
+ "error=%s\n", usbd_errstr(err));
+ break;
+ }
+ nports = hubdesc30.bNbrPorts;
+ if (nports > 16)
+ nports = 16;
+ break;
+
+ default:
+ err = USB_ERR_INVAL;
+ break;
+ }
+
+ if (pnports != NULL)
+ *pnports = nports;
+
+ if (ptt != NULL)
+ *ptt = tt;
+
+ return (err);
+}
+
+static int
+uhub_attach(device_t dev)
+{
+ struct uhub_softc *sc = device_get_softc(dev);
+ struct usb_attach_arg *uaa = device_get_ivars(dev);
+ struct usb_device *udev = uaa->device;
+ struct usb_device *parent_hub = udev->parent_hub;
+ struct usb_hub *hub;
+ struct usb_hub_descriptor hubdesc20;
+ struct usb_hub_ss_descriptor hubdesc30;
+ uint16_t pwrdly;
+ uint8_t x;
+ uint8_t nports;
+ uint8_t portno;
+ uint8_t removable;
+ uint8_t iface_index;
+ usb_error_t err;
+
+ sc->sc_udev = udev;
+ sc->sc_dev = dev;
+
+ mtx_init(&sc->sc_mtx, "USB HUB mutex", NULL, MTX_DEF);
+
+ snprintf(sc->sc_name, sizeof(sc->sc_name), "%s",
+ device_get_nameunit(dev));
+
+ device_set_usb_desc(dev);
+
+ DPRINTFN(2, "depth=%d selfpowered=%d, parent=%p, "
+ "parent->selfpowered=%d\n",
+ udev->depth,
+ udev->flags.self_powered,
+ parent_hub,
+ parent_hub ?
+ parent_hub->flags.self_powered : 0);
+
+ if (uhub_is_too_deep(udev)) {
+ DPRINTFN(0, "HUB at depth %d, "
+ "exceeds maximum. HUB ignored\n", (int)udev->depth);
+ goto error;
+ }
+
+ if (!udev->flags.self_powered && parent_hub &&
+ !parent_hub->flags.self_powered) {
+ DPRINTFN(0, "Bus powered HUB connected to "
+ "bus powered HUB. HUB ignored\n");
+ goto error;
+ }
+ /* get HUB descriptor */
+
+ DPRINTFN(2, "Getting HUB descriptor\n");
+
+ switch (udev->speed) {
+ case USB_SPEED_LOW:
+ case USB_SPEED_FULL:
+ case USB_SPEED_HIGH:
+ /* assuming that there is one port */
+ err = usbd_req_get_hub_descriptor(udev, NULL, &hubdesc20, 1);
+ if (err) {
+ DPRINTFN(0, "getting USB 2.0 HUB descriptor failed,"
+ "error=%s\n", usbd_errstr(err));
+ goto error;
+ }
+ /* get number of ports */
+ nports = hubdesc20.bNbrPorts;
+
+ /* get power delay */
+ pwrdly = ((hubdesc20.bPwrOn2PwrGood * UHD_PWRON_FACTOR) +
+ USB_EXTRA_POWER_UP_TIME);
+
+ /* get complete HUB descriptor */
+ if (nports >= 8) {
+ /* check number of ports */
+ if (nports > 127) {
+ DPRINTFN(0, "Invalid number of USB 2.0 ports,"
+ "error=%s\n", usbd_errstr(err));
+ goto error;
+ }
+ /* get complete HUB descriptor */
+ err = usbd_req_get_hub_descriptor(udev, NULL, &hubdesc20, nports);
+
+ if (err) {
+ DPRINTFN(0, "Getting USB 2.0 HUB descriptor failed,"
+ "error=%s\n", usbd_errstr(err));
+ goto error;
+ }
+ if (hubdesc20.bNbrPorts != nports) {
+ DPRINTFN(0, "Number of ports changed\n");
+ goto error;
+ }
+ }
+ break;
+ case USB_SPEED_SUPER:
+ if (udev->parent_hub != NULL) {
+ err = usbd_req_set_hub_depth(udev, NULL,
+ udev->depth - 1);
+ if (err) {
+ DPRINTFN(0, "Setting USB 3.0 HUB depth failed,"
+ "error=%s\n", usbd_errstr(err));
+ goto error;
+ }
+ }
+ err = usbd_req_get_ss_hub_descriptor(udev, NULL, &hubdesc30, 1);
+ if (err) {
+ DPRINTFN(0, "Getting USB 3.0 HUB descriptor failed,"
+ "error=%s\n", usbd_errstr(err));
+ goto error;
+ }
+ /* get number of ports */
+ nports = hubdesc30.bNbrPorts;
+
+ /* get power delay */
+ pwrdly = ((hubdesc30.bPwrOn2PwrGood * UHD_PWRON_FACTOR) +
+ USB_EXTRA_POWER_UP_TIME);
+
+ /* get complete HUB descriptor */
+ if (nports >= 8) {
+ /* check number of ports */
+ if (nports > ((udev->parent_hub != NULL) ? 15 : 127)) {
+ DPRINTFN(0, "Invalid number of USB 3.0 ports,"
+ "error=%s\n", usbd_errstr(err));
+ goto error;
+ }
+ /* get complete HUB descriptor */
+ err = usbd_req_get_ss_hub_descriptor(udev, NULL, &hubdesc30, nports);
+
+ if (err) {
+ DPRINTFN(0, "Getting USB 2.0 HUB descriptor failed,"
+ "error=%s\n", usbd_errstr(err));
+ goto error;
+ }
+ if (hubdesc30.bNbrPorts != nports) {
+ DPRINTFN(0, "Number of ports changed\n");
+ goto error;
+ }
+ }
+ break;
+ default:
+ DPRINTF("Assuming HUB has only one port\n");
+ /* default number of ports */
+ nports = 1;
+ /* default power delay */
+ pwrdly = ((10 * UHD_PWRON_FACTOR) + USB_EXTRA_POWER_UP_TIME);
+ break;
+ }
+ if (nports == 0) {
+ DPRINTFN(0, "portless HUB\n");
+ goto error;
+ }
+ hub = malloc(sizeof(hub[0]) + (sizeof(hub->ports[0]) * nports),
+ M_USBDEV, M_WAITOK | M_ZERO);
+
+ if (hub == NULL) {
+ goto error;
+ }
+ udev->hub = hub;
+
+#if USB_HAVE_TT_SUPPORT
+ /* init FULL-speed ISOCHRONOUS schedule */
+ usbd_fs_isoc_schedule_init_all(hub->fs_isoc_schedule);
+#endif
+ /* initialize HUB structure */
+ hub->hubsoftc = sc;
+ hub->explore = &uhub_explore;
+ hub->nports = nports;
+ hub->hubudev = udev;
+
+ /* if self powered hub, give ports maximum current */
+ if (udev->flags.self_powered) {
+ hub->portpower = USB_MAX_POWER;
+ } else {
+ hub->portpower = USB_MIN_POWER;
+ }
+
+ /* set up interrupt pipe */
+ iface_index = 0;
+ if (udev->parent_hub == NULL) {
+ /* root HUB is special */
+ err = 0;
+ } else {
+ /* normal HUB */
+ err = usbd_transfer_setup(udev, &iface_index, sc->sc_xfer,
+ uhub_config, UHUB_N_TRANSFER, sc, &sc->sc_mtx);
+ }
+ if (err) {
+ DPRINTFN(0, "cannot setup interrupt transfer, "
+ "errstr=%s\n", usbd_errstr(err));
+ goto error;
+ }
+ /* wait with power off for a while */
+ usb_pause_mtx(NULL, USB_MS_TO_TICKS(USB_POWER_DOWN_TIME));
+
+ /*
+ * To have the best chance of success we do things in the exact same
+ * order as Windoze98. This should not be necessary, but some
+ * devices do not follow the USB specs to the letter.
+ *
+ * These are the events on the bus when a hub is attached:
+ * Get device and config descriptors (see attach code)
+ * Get hub descriptor (see above)
+ * For all ports
+ * turn on power
+ * wait for power to become stable
+ * (all below happens in explore code)
+ * For all ports
+ * clear C_PORT_CONNECTION
+ * For all ports
+ * get port status
+ * if device connected
+ * wait 100 ms
+ * turn on reset
+ * wait
+ * clear C_PORT_RESET
+ * get port status
+ * proceed with device attachment
+ */
+
+ /* XXX should check for none, individual, or ganged power? */
+
+ removable = 0;
+
+ for (x = 0; x != nports; x++) {
+ /* set up data structures */
+ struct usb_port *up = hub->ports + x;
+
+ up->device_index = 0;
+ up->restartcnt = 0;
+ portno = x + 1;
+
+ /* check if port is removable */
+ switch (udev->speed) {
+ case USB_SPEED_LOW:
+ case USB_SPEED_FULL:
+ case USB_SPEED_HIGH:
+ if (!UHD_NOT_REMOV(&hubdesc20, portno))
+ removable++;
+ break;
+ case USB_SPEED_SUPER:
+ if (!UHD_NOT_REMOV(&hubdesc30, portno))
+ removable++;
+ break;
+ default:
+ DPRINTF("Assuming removable port\n");
+ removable++;
+ break;
+ }
+ if (!err) {
+ /* turn the power on */
+ err = usbd_req_set_port_feature(udev, NULL,
+ portno, UHF_PORT_POWER);
+ }
+ if (err) {
+ DPRINTFN(0, "port %d power on failed, %s\n",
+ portno, usbd_errstr(err));
+ }
+ DPRINTF("turn on port %d power\n",
+ portno);
+
+ /* wait for stable power */
+ usb_pause_mtx(NULL, USB_MS_TO_TICKS(pwrdly));
+ }
+
+ device_printf(dev, "%d port%s with %d "
+ "removable, %s powered\n", nports, (nports != 1) ? "s" : "",
+ removable, udev->flags.self_powered ? "self" : "bus");
+
+ /* Start the interrupt endpoint, if any */
+
+ if (sc->sc_xfer[0] != NULL) {
+ mtx_lock(&sc->sc_mtx);
+ usbd_transfer_start(sc->sc_xfer[0]);
+ mtx_unlock(&sc->sc_mtx);
+ }
+
+ /* Enable automatic power save on all USB HUBs */
+
+ usbd_set_power_mode(udev, USB_POWER_MODE_SAVE);
+
+ return (0);
+
+error:
+ usbd_transfer_unsetup(sc->sc_xfer, UHUB_N_TRANSFER);
+
+ if (udev->hub) {
+ free(udev->hub, M_USBDEV);
+ udev->hub = NULL;
+ }
+
+ mtx_destroy(&sc->sc_mtx);
+
+ return (ENXIO);
+}
+
+/*
+ * Called from process context when the hub is gone.
+ * Detach all devices on active ports.
+ */
+static int
+uhub_detach(device_t dev)
+{
+ struct uhub_softc *sc = device_get_softc(dev);
+ struct usb_hub *hub = sc->sc_udev->hub;
+ struct usb_device *child;
+ uint8_t x;
+
+ if (hub == NULL) /* must be partially working */
+ return (0);
+
+ /* Make sure interrupt transfer is gone. */
+ usbd_transfer_unsetup(sc->sc_xfer, UHUB_N_TRANSFER);
+
+ /* Detach all ports */
+ for (x = 0; x != hub->nports; x++) {
+
+ child = usb_bus_port_get_device(sc->sc_udev->bus, hub->ports + x);
+
+ if (child == NULL) {
+ continue;
+ }
+
+ /*
+ * Free USB device and all subdevices, if any.
+ */
+ usb_free_device(child, 0);
+ }
+
+ free(hub, M_USBDEV);
+ sc->sc_udev->hub = NULL;
+
+ mtx_destroy(&sc->sc_mtx);
+
+ return (0);
+}
+
+static int
+uhub_suspend(device_t dev)
+{
+ DPRINTF("\n");
+ /* Sub-devices are not suspended here! */
+ return (0);
+}
+
+static int
+uhub_resume(device_t dev)
+{
+ DPRINTF("\n");
+ /* Sub-devices are not resumed here! */
+ return (0);
+}
+
+static void
+uhub_driver_added(device_t dev, driver_t *driver)
+{
+ usb_needs_explore_all();
+}
+
+struct hub_result {
+ struct usb_device *udev;
+ uint8_t portno;
+ uint8_t iface_index;
+};
+
+static void
+uhub_find_iface_index(struct usb_hub *hub, device_t child,
+ struct hub_result *res)
+{
+ struct usb_interface *iface;
+ struct usb_device *udev;
+ uint8_t nports;
+ uint8_t x;
+ uint8_t i;
+
+ nports = hub->nports;
+ for (x = 0; x != nports; x++) {
+ udev = usb_bus_port_get_device(hub->hubudev->bus,
+ hub->ports + x);
+ if (!udev) {
+ continue;
+ }
+ for (i = 0; i != USB_IFACE_MAX; i++) {
+ iface = usbd_get_iface(udev, i);
+ if (iface &&
+ (iface->subdev == child)) {
+ res->iface_index = i;
+ res->udev = udev;
+ res->portno = x + 1;
+ return;
+ }
+ }
+ }
+ res->iface_index = 0;
+ res->udev = NULL;
+ res->portno = 0;
+}
+
+static int
+uhub_child_location_string(device_t parent, device_t child,
+ char *buf, size_t buflen)
+{
+ struct uhub_softc *sc;
+ struct usb_hub *hub;
+ struct hub_result res;
+
+ if (!device_is_attached(parent)) {
+ if (buflen)
+ buf[0] = 0;
+ return (0);
+ }
+
+ sc = device_get_softc(parent);
+ hub = sc->sc_udev->hub;
+
+ mtx_lock(&Giant);
+ uhub_find_iface_index(hub, child, &res);
+ if (!res.udev) {
+ DPRINTF("device not on hub\n");
+ if (buflen) {
+ buf[0] = '\0';
+ }
+ goto done;
+ }
+ snprintf(buf, buflen, "bus=%u hubaddr=%u port=%u devaddr=%u interface=%u",
+ (res.udev->parent_hub != NULL) ? res.udev->parent_hub->device_index : 0,
+ res.portno, device_get_unit(res.udev->bus->bdev),
+ res.udev->device_index, res.iface_index);
+done:
+ mtx_unlock(&Giant);
+
+ return (0);
+}
+
+static int
+uhub_child_pnpinfo_string(device_t parent, device_t child,
+ char *buf, size_t buflen)
+{
+ struct uhub_softc *sc;
+ struct usb_hub *hub;
+ struct usb_interface *iface;
+ struct hub_result res;
+
+ if (!device_is_attached(parent)) {
+ if (buflen)
+ buf[0] = 0;
+ return (0);
+ }
+
+ sc = device_get_softc(parent);
+ hub = sc->sc_udev->hub;
+
+ mtx_lock(&Giant);
+ uhub_find_iface_index(hub, child, &res);
+ if (!res.udev) {
+ DPRINTF("device not on hub\n");
+ if (buflen) {
+ buf[0] = '\0';
+ }
+ goto done;
+ }
+ iface = usbd_get_iface(res.udev, res.iface_index);
+ if (iface && iface->idesc) {
+ snprintf(buf, buflen, "vendor=0x%04x product=0x%04x "
+ "devclass=0x%02x devsubclass=0x%02x "
+ "sernum=\"%s\" "
+ "release=0x%04x "
+ "intclass=0x%02x intsubclass=0x%02x",
+ UGETW(res.udev->ddesc.idVendor),
+ UGETW(res.udev->ddesc.idProduct),
+ res.udev->ddesc.bDeviceClass,
+ res.udev->ddesc.bDeviceSubClass,
+ usb_get_serial(res.udev),
+ UGETW(res.udev->ddesc.bcdDevice),
+ iface->idesc->bInterfaceClass,
+ iface->idesc->bInterfaceSubClass);
+ } else {
+ if (buflen) {
+ buf[0] = '\0';
+ }
+ goto done;
+ }
+done:
+ mtx_unlock(&Giant);
+
+ return (0);
+}
+
+/*
+ * The USB Transaction Translator:
+ * ===============================
+ *
+ * When doing LOW- and FULL-speed USB transfers accross a HIGH-speed
+ * USB HUB, bandwidth must be allocated for ISOCHRONOUS and INTERRUPT
+ * USB transfers. To utilize bandwidth dynamically the "scatter and
+ * gather" principle must be applied. This means that bandwidth must
+ * be divided into equal parts of bandwidth. With regard to USB all
+ * data is transferred in smaller packets with length
+ * "wMaxPacketSize". The problem however is that "wMaxPacketSize" is
+ * not a constant!
+ *
+ * The bandwidth scheduler which I have implemented will simply pack
+ * the USB transfers back to back until there is no more space in the
+ * schedule. Out of the 8 microframes which the USB 2.0 standard
+ * provides, only 6 are available for non-HIGH-speed devices. I have
+ * reserved the first 4 microframes for ISOCHRONOUS transfers. The
+ * last 2 microframes I have reserved for INTERRUPT transfers. Without
+ * this division, it is very difficult to allocate and free bandwidth
+ * dynamically.
+ *
+ * NOTE about the Transaction Translator in USB HUBs:
+ *
+ * USB HUBs have a very simple Transaction Translator, that will
+ * simply pipeline all the SPLIT transactions. That means that the
+ * transactions will be executed in the order they are queued!
+ *
+ */
+
+/*------------------------------------------------------------------------*
+ * usb_intr_find_best_slot
+ *
+ * Return value:
+ * The best Transaction Translation slot for an interrupt endpoint.
+ *------------------------------------------------------------------------*/
+static uint8_t
+usb_intr_find_best_slot(usb_size_t *ptr, uint8_t start,
+ uint8_t end, uint8_t mask)
+{
+ usb_size_t min = 0 - 1;
+ usb_size_t sum;
+ uint8_t x;
+ uint8_t y;
+ uint8_t z;
+
+ y = 0;
+
+ /* find the last slot with lesser used bandwidth */
+
+ for (x = start; x < end; x++) {
+
+ sum = 0;
+
+ /* compute sum of bandwidth */
+ for (z = x; z < end; z++) {
+ if (mask & (1U << (z - x)))
+ sum += ptr[z];
+ }
+
+ /* check if the current multi-slot is more optimal */
+ if (min >= sum) {
+ min = sum;
+ y = x;
+ }
+
+ /* check if the mask is about to be shifted out */
+ if (mask & (1U << (end - 1 - x)))
+ break;
+ }
+ return (y);
+}
+
+/*------------------------------------------------------------------------*
+ * usb_hs_bandwidth_adjust
+ *
+ * This function will update the bandwith usage for the microframe
+ * having index "slot" by "len" bytes. "len" can be negative. If the
+ * "slot" argument is greater or equal to "USB_HS_MICRO_FRAMES_MAX"
+ * the "slot" argument will be replaced by the slot having least used
+ * bandwidth. The "mask" argument is used for multi-slot allocations.
+ *
+ * Returns:
+ * The slot in which the bandwidth update was done: 0..7
+ *------------------------------------------------------------------------*/
+static uint8_t
+usb_hs_bandwidth_adjust(struct usb_device *udev, int16_t len,
+ uint8_t slot, uint8_t mask)
+{
+ struct usb_bus *bus = udev->bus;
+ struct usb_hub *hub;
+ enum usb_dev_speed speed;
+ uint8_t x;
+
+ USB_BUS_LOCK_ASSERT(bus, MA_OWNED);
+
+ speed = usbd_get_speed(udev);
+
+ switch (speed) {
+ case USB_SPEED_LOW:
+ case USB_SPEED_FULL:
+ if (speed == USB_SPEED_LOW) {
+ len *= 8;
+ }
+ /*
+ * The Host Controller Driver should have
+ * performed checks so that the lookup
+ * below does not result in a NULL pointer
+ * access.
+ */
+
+ hub = udev->parent_hs_hub->hub;
+ if (slot >= USB_HS_MICRO_FRAMES_MAX) {
+ slot = usb_intr_find_best_slot(hub->uframe_usage,
+ USB_FS_ISOC_UFRAME_MAX, 6, mask);
+ }
+ for (x = slot; x < 8; x++) {
+ if (mask & (1U << (x - slot))) {
+ hub->uframe_usage[x] += len;
+ bus->uframe_usage[x] += len;
+ }
+ }
+ break;
+ default:
+ if (slot >= USB_HS_MICRO_FRAMES_MAX) {
+ slot = usb_intr_find_best_slot(bus->uframe_usage, 0,
+ USB_HS_MICRO_FRAMES_MAX, mask);
+ }
+ for (x = slot; x < 8; x++) {
+ if (mask & (1U << (x - slot))) {
+ bus->uframe_usage[x] += len;
+ }
+ }
+ break;
+ }
+ return (slot);
+}
+
+/*------------------------------------------------------------------------*
+ * usb_hs_bandwidth_alloc
+ *
+ * This function is a wrapper function for "usb_hs_bandwidth_adjust()".
+ *------------------------------------------------------------------------*/
+void
+usb_hs_bandwidth_alloc(struct usb_xfer *xfer)
+{
+ struct usb_device *udev;
+ uint8_t slot;
+ uint8_t mask;
+ uint8_t speed;
+
+ udev = xfer->xroot->udev;
+
+ if (udev->flags.usb_mode != USB_MODE_HOST)
+ return; /* not supported */
+
+ xfer->endpoint->refcount_bw++;
+ if (xfer->endpoint->refcount_bw != 1)
+ return; /* already allocated */
+
+ speed = usbd_get_speed(udev);
+
+ switch (xfer->endpoint->edesc->bmAttributes & UE_XFERTYPE) {
+ case UE_INTERRUPT:
+ /* allocate a microframe slot */
+
+ mask = 0x01;
+ slot = usb_hs_bandwidth_adjust(udev,
+ xfer->max_frame_size, USB_HS_MICRO_FRAMES_MAX, mask);
+
+ xfer->endpoint->usb_uframe = slot;
+ xfer->endpoint->usb_smask = mask << slot;
+
+ if ((speed != USB_SPEED_FULL) &&
+ (speed != USB_SPEED_LOW)) {
+ xfer->endpoint->usb_cmask = 0x00 ;
+ } else {
+ xfer->endpoint->usb_cmask = (-(0x04 << slot)) & 0xFE;
+ }
+ break;
+
+ case UE_ISOCHRONOUS:
+ switch (usbd_xfer_get_fps_shift(xfer)) {
+ case 0:
+ mask = 0xFF;
+ break;
+ case 1:
+ mask = 0x55;
+ break;
+ case 2:
+ mask = 0x11;
+ break;
+ default:
+ mask = 0x01;
+ break;
+ }
+
+ /* allocate a microframe multi-slot */
+
+ slot = usb_hs_bandwidth_adjust(udev,
+ xfer->max_frame_size, USB_HS_MICRO_FRAMES_MAX, mask);
+
+ xfer->endpoint->usb_uframe = slot;
+ xfer->endpoint->usb_cmask = 0;
+ xfer->endpoint->usb_smask = mask << slot;
+ break;
+
+ default:
+ xfer->endpoint->usb_uframe = 0;
+ xfer->endpoint->usb_cmask = 0;
+ xfer->endpoint->usb_smask = 0;
+ break;
+ }
+
+ DPRINTFN(11, "slot=%d, mask=0x%02x\n",
+ xfer->endpoint->usb_uframe,
+ xfer->endpoint->usb_smask >> xfer->endpoint->usb_uframe);
+}
+
+/*------------------------------------------------------------------------*
+ * usb_hs_bandwidth_free
+ *
+ * This function is a wrapper function for "usb_hs_bandwidth_adjust()".
+ *------------------------------------------------------------------------*/
+void
+usb_hs_bandwidth_free(struct usb_xfer *xfer)
+{
+ struct usb_device *udev;
+ uint8_t slot;
+ uint8_t mask;
+
+ udev = xfer->xroot->udev;
+
+ if (udev->flags.usb_mode != USB_MODE_HOST)
+ return; /* not supported */
+
+ xfer->endpoint->refcount_bw--;
+ if (xfer->endpoint->refcount_bw != 0)
+ return; /* still allocated */
+
+ switch (xfer->endpoint->edesc->bmAttributes & UE_XFERTYPE) {
+ case UE_INTERRUPT:
+ case UE_ISOCHRONOUS:
+
+ slot = xfer->endpoint->usb_uframe;
+ mask = xfer->endpoint->usb_smask;
+
+ /* free microframe slot(s): */
+ usb_hs_bandwidth_adjust(udev,
+ -xfer->max_frame_size, slot, mask >> slot);
+
+ DPRINTFN(11, "slot=%d, mask=0x%02x\n",
+ slot, mask >> slot);
+
+ xfer->endpoint->usb_uframe = 0;
+ xfer->endpoint->usb_cmask = 0;
+ xfer->endpoint->usb_smask = 0;
+ break;
+
+ default:
+ break;
+ }
+}
+
+/*------------------------------------------------------------------------*
+ * usbd_fs_isoc_schedule_init_sub
+ *
+ * This function initialises an USB FULL speed isochronous schedule
+ * entry.
+ *------------------------------------------------------------------------*/
+#if USB_HAVE_TT_SUPPORT
+static void
+usbd_fs_isoc_schedule_init_sub(struct usb_fs_isoc_schedule *fss)
+{
+ fss->total_bytes = (USB_FS_ISOC_UFRAME_MAX *
+ USB_FS_BYTES_PER_HS_UFRAME);
+ fss->frame_bytes = (USB_FS_BYTES_PER_HS_UFRAME);
+ fss->frame_slot = 0;
+}
+#endif
+
+/*------------------------------------------------------------------------*
+ * usbd_fs_isoc_schedule_init_all
+ *
+ * This function will reset the complete USB FULL speed isochronous
+ * bandwidth schedule.
+ *------------------------------------------------------------------------*/
+#if USB_HAVE_TT_SUPPORT
+void
+usbd_fs_isoc_schedule_init_all(struct usb_fs_isoc_schedule *fss)
+{
+ struct usb_fs_isoc_schedule *fss_end = fss + USB_ISOC_TIME_MAX;
+
+ while (fss != fss_end) {
+ usbd_fs_isoc_schedule_init_sub(fss);
+ fss++;
+ }
+}
+#endif
+
+/*------------------------------------------------------------------------*
+ * usb_isoc_time_expand
+ *
+ * This function will expand the time counter from 7-bit to 16-bit.
+ *
+ * Returns:
+ * 16-bit isochronous time counter.
+ *------------------------------------------------------------------------*/
+uint16_t
+usb_isoc_time_expand(struct usb_bus *bus, uint16_t isoc_time_curr)
+{
+ uint16_t rem;
+
+ USB_BUS_LOCK_ASSERT(bus, MA_OWNED);
+
+ rem = bus->isoc_time_last & (USB_ISOC_TIME_MAX - 1);
+
+ isoc_time_curr &= (USB_ISOC_TIME_MAX - 1);
+
+ if (isoc_time_curr < rem) {
+ /* the time counter wrapped around */
+ bus->isoc_time_last += USB_ISOC_TIME_MAX;
+ }
+ /* update the remainder */
+
+ bus->isoc_time_last &= ~(USB_ISOC_TIME_MAX - 1);
+ bus->isoc_time_last |= isoc_time_curr;
+
+ return (bus->isoc_time_last);
+}
+
+/*------------------------------------------------------------------------*
+ * usbd_fs_isoc_schedule_isoc_time_expand
+ *
+ * This function does multiple things. First of all it will expand the
+ * passed isochronous time, which is the return value. Then it will
+ * store where the current FULL speed isochronous schedule is
+ * positioned in time and where the end is. See "pp_start" and
+ * "pp_end" arguments.
+ *
+ * Returns:
+ * Expanded version of "isoc_time".
+ *
+ * NOTE: This function depends on being called regularly with
+ * intervals less than "USB_ISOC_TIME_MAX".
+ *------------------------------------------------------------------------*/
+#if USB_HAVE_TT_SUPPORT
+uint16_t
+usbd_fs_isoc_schedule_isoc_time_expand(struct usb_device *udev,
+ struct usb_fs_isoc_schedule **pp_start,
+ struct usb_fs_isoc_schedule **pp_end,
+ uint16_t isoc_time)
+{
+ struct usb_fs_isoc_schedule *fss_end;
+ struct usb_fs_isoc_schedule *fss_a;
+ struct usb_fs_isoc_schedule *fss_b;
+ struct usb_hub *hs_hub;
+
+ isoc_time = usb_isoc_time_expand(udev->bus, isoc_time);
+
+ hs_hub = udev->parent_hs_hub->hub;
+
+ if (hs_hub != NULL) {
+
+ fss_a = hs_hub->fs_isoc_schedule +
+ (hs_hub->isoc_last_time % USB_ISOC_TIME_MAX);
+
+ hs_hub->isoc_last_time = isoc_time;
+
+ fss_b = hs_hub->fs_isoc_schedule +
+ (isoc_time % USB_ISOC_TIME_MAX);
+
+ fss_end = hs_hub->fs_isoc_schedule + USB_ISOC_TIME_MAX;
+
+ *pp_start = hs_hub->fs_isoc_schedule;
+ *pp_end = fss_end;
+
+ while (fss_a != fss_b) {
+ if (fss_a == fss_end) {
+ fss_a = hs_hub->fs_isoc_schedule;
+ continue;
+ }
+ usbd_fs_isoc_schedule_init_sub(fss_a);
+ fss_a++;
+ }
+
+ } else {
+
+ *pp_start = NULL;
+ *pp_end = NULL;
+ }
+ return (isoc_time);
+}
+#endif
+
+/*------------------------------------------------------------------------*
+ * usbd_fs_isoc_schedule_alloc
+ *
+ * This function will allocate bandwidth for an isochronous FULL speed
+ * transaction in the FULL speed schedule. The microframe slot where
+ * the transaction should be started is stored in the byte pointed to
+ * by "pstart". The "len" argument specifies the length of the
+ * transaction in bytes.
+ *
+ * Returns:
+ * 0: Success
+ * Else: Error
+ *------------------------------------------------------------------------*/
+#if USB_HAVE_TT_SUPPORT
+uint8_t
+usbd_fs_isoc_schedule_alloc(struct usb_fs_isoc_schedule *fss,
+ uint8_t *pstart, uint16_t len)
+{
+ uint8_t slot = fss->frame_slot;
+
+ /* Compute overhead and bit-stuffing */
+
+ len += 8;
+
+ len *= 7;
+ len /= 6;
+
+ if (len > fss->total_bytes) {
+ *pstart = 0; /* set some dummy value */
+ return (1); /* error */
+ }
+ if (len > 0) {
+
+ fss->total_bytes -= len;
+
+ while (len >= fss->frame_bytes) {
+ len -= fss->frame_bytes;
+ fss->frame_bytes = USB_FS_BYTES_PER_HS_UFRAME;
+ fss->frame_slot++;
+ }
+
+ fss->frame_bytes -= len;
+ }
+ *pstart = slot;
+ return (0); /* success */
+}
+#endif
+
+/*------------------------------------------------------------------------*
+ * usb_bus_port_get_device
+ *
+ * This function is NULL safe.
+ *------------------------------------------------------------------------*/
+struct usb_device *
+usb_bus_port_get_device(struct usb_bus *bus, struct usb_port *up)
+{
+ if ((bus == NULL) || (up == NULL)) {
+ /* be NULL safe */
+ return (NULL);
+ }
+ if (up->device_index == 0) {
+ /* nothing to do */
+ return (NULL);
+ }
+ return (bus->devices[up->device_index]);
+}
+
+/*------------------------------------------------------------------------*
+ * usb_bus_port_set_device
+ *
+ * This function is NULL safe.
+ *------------------------------------------------------------------------*/
+void
+usb_bus_port_set_device(struct usb_bus *bus, struct usb_port *up,
+ struct usb_device *udev, uint8_t device_index)
+{
+ if (bus == NULL) {
+ /* be NULL safe */
+ return;
+ }
+ /*
+ * There is only one case where we don't
+ * have an USB port, and that is the Root Hub!
+ */
+ if (up) {
+ if (udev) {
+ up->device_index = device_index;
+ } else {
+ device_index = up->device_index;
+ up->device_index = 0;
+ }
+ }
+ /*
+ * Make relationships to our new device
+ */
+ if (device_index != 0) {
+#if USB_HAVE_UGEN
+ mtx_lock(&usb_ref_lock);
+#endif
+ bus->devices[device_index] = udev;
+#if USB_HAVE_UGEN
+ mtx_unlock(&usb_ref_lock);
+#endif
+ }
+ /*
+ * Debug print
+ */
+ DPRINTFN(2, "bus %p devices[%u] = %p\n", bus, device_index, udev);
+}
+
+/*------------------------------------------------------------------------*
+ * usb_needs_explore
+ *
+ * This functions is called when the USB event thread needs to run.
+ *------------------------------------------------------------------------*/
+void
+usb_needs_explore(struct usb_bus *bus, uint8_t do_probe)
+{
+ uint8_t do_unlock;
+
+ DPRINTF("\n");
+
+ if (bus == NULL) {
+ DPRINTF("No bus pointer!\n");
+ return;
+ }
+ if ((bus->devices == NULL) ||
+ (bus->devices[USB_ROOT_HUB_ADDR] == NULL)) {
+ DPRINTF("No root HUB\n");
+ return;
+ }
+ if (mtx_owned(&bus->bus_mtx)) {
+ do_unlock = 0;
+ } else {
+ USB_BUS_LOCK(bus);
+ do_unlock = 1;
+ }
+ if (do_probe) {
+ bus->do_probe = 1;
+ }
+ if (usb_proc_msignal(&bus->explore_proc,
+ &bus->explore_msg[0], &bus->explore_msg[1])) {
+ /* ignore */
+ }
+ if (do_unlock) {
+ USB_BUS_UNLOCK(bus);
+ }
+}
+
+/*------------------------------------------------------------------------*
+ * usb_needs_explore_all
+ *
+ * This function is called whenever a new driver is loaded and will
+ * cause that all USB busses are re-explored.
+ *------------------------------------------------------------------------*/
+void
+usb_needs_explore_all(void)
+{
+ struct usb_bus *bus;
+ devclass_t dc;
+ device_t dev;
+ int max;
+
+ DPRINTFN(3, "\n");
+
+ dc = usb_devclass_ptr;
+ if (dc == NULL) {
+ DPRINTFN(0, "no devclass\n");
+ return;
+ }
+ /*
+ * Explore all USB busses in parallell.
+ */
+ max = devclass_get_maxunit(dc);
+ while (max >= 0) {
+ dev = devclass_get_device(dc, max);
+ if (dev) {
+ bus = device_get_softc(dev);
+ if (bus) {
+ usb_needs_explore(bus, 1);
+ }
+ }
+ max--;
+ }
+}
+
+/*------------------------------------------------------------------------*
+ * usb_bus_power_update
+ *
+ * This function will ensure that all USB devices on the given bus are
+ * properly suspended or resumed according to the device transfer
+ * state.
+ *------------------------------------------------------------------------*/
+#if USB_HAVE_POWERD
+void
+usb_bus_power_update(struct usb_bus *bus)
+{
+ usb_needs_explore(bus, 0 /* no probe */ );
+}
+#endif
+
+/*------------------------------------------------------------------------*
+ * usbd_transfer_power_ref
+ *
+ * This function will modify the power save reference counts and
+ * wakeup the USB device associated with the given USB transfer, if
+ * needed.
+ *------------------------------------------------------------------------*/
+#if USB_HAVE_POWERD
+void
+usbd_transfer_power_ref(struct usb_xfer *xfer, int val)
+{
+ static const usb_power_mask_t power_mask[4] = {
+ [UE_CONTROL] = USB_HW_POWER_CONTROL,
+ [UE_BULK] = USB_HW_POWER_BULK,
+ [UE_INTERRUPT] = USB_HW_POWER_INTERRUPT,
+ [UE_ISOCHRONOUS] = USB_HW_POWER_ISOC,
+ };
+ struct usb_device *udev;
+ uint8_t needs_explore;
+ uint8_t needs_hw_power;
+ uint8_t xfer_type;
+
+ udev = xfer->xroot->udev;
+
+ if (udev->device_index == USB_ROOT_HUB_ADDR) {
+ /* no power save for root HUB */
+ return;
+ }
+ USB_BUS_LOCK(udev->bus);
+
+ xfer_type = xfer->endpoint->edesc->bmAttributes & UE_XFERTYPE;
+
+ udev->pwr_save.last_xfer_time = ticks;
+ udev->pwr_save.type_refs[xfer_type] += val;
+
+ if (xfer->flags_int.control_xfr) {
+ udev->pwr_save.read_refs += val;
+ if (xfer->flags_int.usb_mode == USB_MODE_HOST) {
+ /*
+ * It is not allowed to suspend during a
+ * control transfer:
+ */
+ udev->pwr_save.write_refs += val;
+ }
+ } else if (USB_GET_DATA_ISREAD(xfer)) {
+ udev->pwr_save.read_refs += val;
+ } else {
+ udev->pwr_save.write_refs += val;
+ }
+
+ if (val > 0) {
+ if (udev->flags.self_suspended)
+ needs_explore = usb_peer_should_wakeup(udev);
+ else
+ needs_explore = 0;
+
+ if (!(udev->bus->hw_power_state & power_mask[xfer_type])) {
+ DPRINTF("Adding type %u to power state\n", xfer_type);
+ udev->bus->hw_power_state |= power_mask[xfer_type];
+ needs_hw_power = 1;
+ } else {
+ needs_hw_power = 0;
+ }
+ } else {
+ needs_explore = 0;
+ needs_hw_power = 0;
+ }
+
+ USB_BUS_UNLOCK(udev->bus);
+
+ if (needs_explore) {
+ DPRINTF("update\n");
+ usb_bus_power_update(udev->bus);
+ } else if (needs_hw_power) {
+ DPRINTF("needs power\n");
+ if (udev->bus->methods->set_hw_power != NULL) {
+ (udev->bus->methods->set_hw_power) (udev->bus);
+ }
+ }
+}
+#endif
+
+/*------------------------------------------------------------------------*
+ * usb_peer_should_wakeup
+ *
+ * This function returns non-zero if the current device should wake up.
+ *------------------------------------------------------------------------*/
+static uint8_t
+usb_peer_should_wakeup(struct usb_device *udev)
+{
+ return ((udev->power_mode == USB_POWER_MODE_ON) ||
+ (udev->driver_added_refcount != udev->bus->driver_added_refcount) ||
+ (udev->re_enumerate_wait != 0) ||
+ (udev->pwr_save.type_refs[UE_ISOCHRONOUS] != 0) ||
+ (udev->pwr_save.write_refs != 0) ||
+ ((udev->pwr_save.read_refs != 0) &&
+ (udev->flags.usb_mode == USB_MODE_HOST) &&
+ (usb_device_20_compatible(udev) != 0) &&
+ (usb_peer_can_wakeup(udev) == 0)));
+}
+
+/*------------------------------------------------------------------------*
+ * usb_bus_powerd
+ *
+ * This function implements the USB power daemon and is called
+ * regularly from the USB explore thread.
+ *------------------------------------------------------------------------*/
+#if USB_HAVE_POWERD
+void
+usb_bus_powerd(struct usb_bus *bus)
+{
+ struct usb_device *udev;
+ usb_ticks_t temp;
+ usb_ticks_t limit;
+ usb_ticks_t mintime;
+ usb_size_t type_refs[5];
+ uint8_t x;
+
+ limit = usb_power_timeout;
+ if (limit == 0)
+ limit = hz;
+ else if (limit > 255)
+ limit = 255 * hz;
+ else
+ limit = limit * hz;
+
+ DPRINTF("bus=%p\n", bus);
+
+ USB_BUS_LOCK(bus);
+
+ /*
+ * The root HUB device is never suspended
+ * and we simply skip it.
+ */
+ for (x = USB_ROOT_HUB_ADDR + 1;
+ x != bus->devices_max; x++) {
+
+ udev = bus->devices[x];
+ if (udev == NULL)
+ continue;
+
+ temp = ticks - udev->pwr_save.last_xfer_time;
+
+ if (usb_peer_should_wakeup(udev)) {
+ /* check if we are suspended */
+ if (udev->flags.self_suspended != 0) {
+ USB_BUS_UNLOCK(bus);
+ usb_dev_resume_peer(udev);
+ USB_BUS_LOCK(bus);
+ }
+ } else if ((temp >= limit) &&
+ (udev->flags.usb_mode == USB_MODE_HOST) &&
+ (udev->flags.self_suspended == 0)) {
+ /* try to do suspend */
+
+ USB_BUS_UNLOCK(bus);
+ usb_dev_suspend_peer(udev);
+ USB_BUS_LOCK(bus);
+ }
+ }
+
+ /* reset counters */
+
+ mintime = 0 - 1;
+ type_refs[0] = 0;
+ type_refs[1] = 0;
+ type_refs[2] = 0;
+ type_refs[3] = 0;
+ type_refs[4] = 0;
+
+ /* Re-loop all the devices to get the actual state */
+
+ for (x = USB_ROOT_HUB_ADDR + 1;
+ x != bus->devices_max; x++) {
+
+ udev = bus->devices[x];
+ if (udev == NULL)
+ continue;
+
+ /* we found a non-Root-Hub USB device */
+ type_refs[4] += 1;
+
+ /* "last_xfer_time" can be updated by a resume */
+ temp = ticks - udev->pwr_save.last_xfer_time;
+
+ /*
+ * Compute minimum time since last transfer for the complete
+ * bus:
+ */
+ if (temp < mintime)
+ mintime = temp;
+
+ if (udev->flags.self_suspended == 0) {
+ type_refs[0] += udev->pwr_save.type_refs[0];
+ type_refs[1] += udev->pwr_save.type_refs[1];
+ type_refs[2] += udev->pwr_save.type_refs[2];
+ type_refs[3] += udev->pwr_save.type_refs[3];
+ }
+ }
+
+ if (mintime >= (1 * hz)) {
+ /* recompute power masks */
+ DPRINTF("Recomputing power masks\n");
+ bus->hw_power_state = 0;
+ if (type_refs[UE_CONTROL] != 0)
+ bus->hw_power_state |= USB_HW_POWER_CONTROL;
+ if (type_refs[UE_BULK] != 0)
+ bus->hw_power_state |= USB_HW_POWER_BULK;
+ if (type_refs[UE_INTERRUPT] != 0)
+ bus->hw_power_state |= USB_HW_POWER_INTERRUPT;
+ if (type_refs[UE_ISOCHRONOUS] != 0)
+ bus->hw_power_state |= USB_HW_POWER_ISOC;
+ if (type_refs[4] != 0)
+ bus->hw_power_state |= USB_HW_POWER_NON_ROOT_HUB;
+ }
+ USB_BUS_UNLOCK(bus);
+
+ if (bus->methods->set_hw_power != NULL) {
+ /* always update hardware power! */
+ (bus->methods->set_hw_power) (bus);
+ }
+ return;
+}
+#endif
+
+/*------------------------------------------------------------------------*
+ * usb_dev_resume_peer
+ *
+ * This function will resume an USB peer and do the required USB
+ * signalling to get an USB device out of the suspended state.
+ *------------------------------------------------------------------------*/
+static void
+usb_dev_resume_peer(struct usb_device *udev)
+{
+ struct usb_bus *bus;
+ int err;
+
+ /* be NULL safe */
+ if (udev == NULL)
+ return;
+
+ /* check if already resumed */
+ if (udev->flags.self_suspended == 0)
+ return;
+
+ /* we need a parent HUB to do resume */
+ if (udev->parent_hub == NULL)
+ return;
+
+ DPRINTF("udev=%p\n", udev);
+
+ if ((udev->flags.usb_mode == USB_MODE_DEVICE) &&
+ (udev->flags.remote_wakeup == 0)) {
+ /*
+ * If the host did not set the remote wakeup feature, we can
+ * not wake it up either!
+ */
+ DPRINTF("remote wakeup is not set!\n");
+ return;
+ }
+ /* get bus pointer */
+ bus = udev->bus;
+
+ /* resume parent hub first */
+ usb_dev_resume_peer(udev->parent_hub);
+
+ /* reduce chance of instant resume failure by waiting a little bit */
+ usb_pause_mtx(NULL, USB_MS_TO_TICKS(20));
+
+ if (usb_device_20_compatible(udev)) {
+ /* resume current port (Valid in Host and Device Mode) */
+ err = usbd_req_clear_port_feature(udev->parent_hub,
+ NULL, udev->port_no, UHF_PORT_SUSPEND);
+ if (err) {
+ DPRINTFN(0, "Resuming port failed\n");
+ return;
+ }
+ }
+
+ /* resume settle time */
+ usb_pause_mtx(NULL, USB_MS_TO_TICKS(USB_PORT_RESUME_DELAY));
+
+ if (bus->methods->device_resume != NULL) {
+ /* resume USB device on the USB controller */
+ (bus->methods->device_resume) (udev);
+ }
+ USB_BUS_LOCK(bus);
+ /* set that this device is now resumed */
+ udev->flags.self_suspended = 0;
+#if USB_HAVE_POWERD
+ /* make sure that we don't go into suspend right away */
+ udev->pwr_save.last_xfer_time = ticks;
+
+ /* make sure the needed power masks are on */
+ if (udev->pwr_save.type_refs[UE_CONTROL] != 0)
+ bus->hw_power_state |= USB_HW_POWER_CONTROL;
+ if (udev->pwr_save.type_refs[UE_BULK] != 0)
+ bus->hw_power_state |= USB_HW_POWER_BULK;
+ if (udev->pwr_save.type_refs[UE_INTERRUPT] != 0)
+ bus->hw_power_state |= USB_HW_POWER_INTERRUPT;
+ if (udev->pwr_save.type_refs[UE_ISOCHRONOUS] != 0)
+ bus->hw_power_state |= USB_HW_POWER_ISOC;
+#endif
+ USB_BUS_UNLOCK(bus);
+
+ if (bus->methods->set_hw_power != NULL) {
+ /* always update hardware power! */
+ (bus->methods->set_hw_power) (bus);
+ }
+
+ usbd_sr_lock(udev);
+
+ /* notify all sub-devices about resume */
+ err = usb_suspend_resume(udev, 0);
+
+ usbd_sr_unlock(udev);
+
+ /* check if peer has wakeup capability */
+ if (usb_peer_can_wakeup(udev) &&
+ usb_device_20_compatible(udev)) {
+ /* clear remote wakeup */
+ err = usbd_req_clear_device_feature(udev,
+ NULL, UF_DEVICE_REMOTE_WAKEUP);
+ if (err) {
+ DPRINTFN(0, "Clearing device "
+ "remote wakeup failed: %s\n",
+ usbd_errstr(err));
+ }
+ }
+}
+
+/*------------------------------------------------------------------------*
+ * usb_dev_suspend_peer
+ *
+ * This function will suspend an USB peer and do the required USB
+ * signalling to get an USB device into the suspended state.
+ *------------------------------------------------------------------------*/
+static void
+usb_dev_suspend_peer(struct usb_device *udev)
+{
+ struct usb_device *child;
+ int err;
+ uint8_t x;
+ uint8_t nports;
+
+repeat:
+ /* be NULL safe */
+ if (udev == NULL)
+ return;
+
+ /* check if already suspended */
+ if (udev->flags.self_suspended)
+ return;
+
+ /* we need a parent HUB to do suspend */
+ if (udev->parent_hub == NULL)
+ return;
+
+ DPRINTF("udev=%p\n", udev);
+
+ /* check if the current device is a HUB */
+ if (udev->hub != NULL) {
+ nports = udev->hub->nports;
+
+ /* check if all devices on the HUB are suspended */
+ for (x = 0; x != nports; x++) {
+ child = usb_bus_port_get_device(udev->bus,
+ udev->hub->ports + x);
+
+ if (child == NULL)
+ continue;
+
+ if (child->flags.self_suspended)
+ continue;
+
+ DPRINTFN(1, "Port %u is busy on the HUB!\n", x + 1);
+ return;
+ }
+ }
+
+ if (usb_peer_can_wakeup(udev) &&
+ usb_device_20_compatible(udev)) {
+ /*
+ * This request needs to be done before we set
+ * "udev->flags.self_suspended":
+ */
+
+ /* allow device to do remote wakeup */
+ err = usbd_req_set_device_feature(udev,
+ NULL, UF_DEVICE_REMOTE_WAKEUP);
+ if (err) {
+ DPRINTFN(0, "Setting device "
+ "remote wakeup failed\n");
+ }
+ }
+
+ USB_BUS_LOCK(udev->bus);
+ /*
+ * Checking for suspend condition and setting suspended bit
+ * must be atomic!
+ */
+ err = usb_peer_should_wakeup(udev);
+ if (err == 0) {
+ /*
+ * Set that this device is suspended. This variable
+ * must be set before calling USB controller suspend
+ * callbacks.
+ */
+ udev->flags.self_suspended = 1;
+ }
+ USB_BUS_UNLOCK(udev->bus);
+
+ if (err != 0) {
+ if (usb_peer_can_wakeup(udev) &&
+ usb_device_20_compatible(udev)) {
+ /* allow device to do remote wakeup */
+ err = usbd_req_clear_device_feature(udev,
+ NULL, UF_DEVICE_REMOTE_WAKEUP);
+ if (err) {
+ DPRINTFN(0, "Setting device "
+ "remote wakeup failed\n");
+ }
+ }
+
+ if (udev->flags.usb_mode == USB_MODE_DEVICE) {
+ /* resume parent HUB first */
+ usb_dev_resume_peer(udev->parent_hub);
+
+ /* reduce chance of instant resume failure by waiting a little bit */
+ usb_pause_mtx(NULL, USB_MS_TO_TICKS(20));
+
+ /* resume current port (Valid in Host and Device Mode) */
+ err = usbd_req_clear_port_feature(udev->parent_hub,
+ NULL, udev->port_no, UHF_PORT_SUSPEND);
+
+ /* resume settle time */
+ usb_pause_mtx(NULL, USB_MS_TO_TICKS(USB_PORT_RESUME_DELAY));
+ }
+ DPRINTF("Suspend was cancelled!\n");
+ return;
+ }
+
+ usbd_sr_lock(udev);
+
+ /* notify all sub-devices about suspend */
+ err = usb_suspend_resume(udev, 1);
+
+ usbd_sr_unlock(udev);
+
+ if (udev->bus->methods->device_suspend != NULL) {
+ usb_timeout_t temp;
+
+ /* suspend device on the USB controller */
+ (udev->bus->methods->device_suspend) (udev);
+
+ /* do DMA delay */
+ temp = usbd_get_dma_delay(udev);
+ if (temp != 0)
+ usb_pause_mtx(NULL, USB_MS_TO_TICKS(temp));
+
+ }
+
+ if (usb_device_20_compatible(udev)) {
+ /* suspend current port */
+ err = usbd_req_set_port_feature(udev->parent_hub,
+ NULL, udev->port_no, UHF_PORT_SUSPEND);
+ if (err) {
+ DPRINTFN(0, "Suspending port failed\n");
+ return;
+ }
+ }
+
+ udev = udev->parent_hub;
+ goto repeat;
+}
+
+/*------------------------------------------------------------------------*
+ * usbd_set_power_mode
+ *
+ * This function will set the power mode, see USB_POWER_MODE_XXX for a
+ * USB device.
+ *------------------------------------------------------------------------*/
+void
+usbd_set_power_mode(struct usb_device *udev, uint8_t power_mode)
+{
+ /* filter input argument */
+ if ((power_mode != USB_POWER_MODE_ON) &&
+ (power_mode != USB_POWER_MODE_OFF))
+ power_mode = USB_POWER_MODE_SAVE;
+
+ power_mode = usbd_filter_power_mode(udev, power_mode);
+
+ udev->power_mode = power_mode; /* update copy of power mode */
+
+#if USB_HAVE_POWERD
+ usb_bus_power_update(udev->bus);
+#endif
+}
+
+/*------------------------------------------------------------------------*
+ * usbd_filter_power_mode
+ *
+ * This function filters the power mode based on hardware requirements.
+ *------------------------------------------------------------------------*/
+uint8_t
+usbd_filter_power_mode(struct usb_device *udev, uint8_t power_mode)
+{
+ struct usb_bus_methods *mtod;
+ int8_t temp;
+
+ mtod = udev->bus->methods;
+ temp = -1;
+
+ if (mtod->get_power_mode != NULL)
+ (mtod->get_power_mode) (udev, &temp);
+
+ /* check if we should not filter */
+ if (temp < 0)
+ return (power_mode);
+
+ /* use fixed power mode given by hardware driver */
+ return (temp);
+}
diff --git a/rtems/freebsd/dev/usb/usb_hub.h b/rtems/freebsd/dev/usb/usb_hub.h
new file mode 100644
index 00000000..6f4637da
--- /dev/null
+++ b/rtems/freebsd/dev/usb/usb_hub.h
@@ -0,0 +1,83 @@
+/* $FreeBSD$ */
+/*-
+ * Copyright (c) 2008 Hans Petter Selasky. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifndef _USB_HUB_HH_
+#define _USB_HUB_HH_
+
+/*
+ * The following structure defines an USB port.
+ */
+struct usb_port {
+ uint8_t restartcnt;
+#define USB_RESTART_MAX 5
+ uint8_t device_index; /* zero means not valid */
+ enum usb_hc_mode usb_mode; /* host or device mode */
+};
+
+/*
+ * The following structure defines how many bytes are
+ * left in an 1ms USB time slot.
+ */
+struct usb_fs_isoc_schedule {
+ uint16_t total_bytes;
+ uint8_t frame_bytes;
+ uint8_t frame_slot;
+};
+
+/*
+ * The following structure defines an USB HUB.
+ */
+struct usb_hub {
+#if USB_HAVE_TT_SUPPORT
+ struct usb_fs_isoc_schedule fs_isoc_schedule[USB_ISOC_TIME_MAX];
+#endif
+ struct usb_device *hubudev; /* the HUB device */
+ usb_error_t (*explore) (struct usb_device *hub);
+ void *hubsoftc;
+ usb_size_t uframe_usage[USB_HS_MICRO_FRAMES_MAX];
+ uint16_t portpower; /* mA per USB port */
+ uint8_t isoc_last_time;
+ uint8_t nports;
+ struct usb_port ports[0];
+};
+
+/* function prototypes */
+
+void usb_hs_bandwidth_alloc(struct usb_xfer *xfer);
+void usb_hs_bandwidth_free(struct usb_xfer *xfer);
+void usbd_fs_isoc_schedule_init_all(struct usb_fs_isoc_schedule *fss);
+void usb_bus_port_set_device(struct usb_bus *bus, struct usb_port *up,
+ struct usb_device *udev, uint8_t device_index);
+struct usb_device *usb_bus_port_get_device(struct usb_bus *bus,
+ struct usb_port *up);
+void usb_needs_explore(struct usb_bus *bus, uint8_t do_probe);
+void usb_needs_explore_all(void);
+void usb_bus_power_update(struct usb_bus *bus);
+void usb_bus_powerd(struct usb_bus *bus);
+void uhub_root_intr(struct usb_bus *, const uint8_t *, uint8_t);
+usb_error_t uhub_query_info(struct usb_device *, uint8_t *, uint8_t *);
+
+#endif /* _USB_HUB_HH_ */
diff --git a/rtems/freebsd/dev/usb/usb_ioctl.h b/rtems/freebsd/dev/usb/usb_ioctl.h
new file mode 100644
index 00000000..ac014a14
--- /dev/null
+++ b/rtems/freebsd/dev/usb/usb_ioctl.h
@@ -0,0 +1,272 @@
+/* $FreeBSD$ */
+/*-
+ * Copyright (c) 2008 Hans Petter Selasky. All rights reserved.
+ * Copyright (c) 1998 The NetBSD Foundation, Inc. All rights reserved.
+ * Copyright (c) 1998 Lennart Augustsson. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifndef _USB_IOCTL_HH_
+#define _USB_IOCTL_HH_
+
+#include <rtems/freebsd/sys/ioccom.h>
+
+/* Building "kdump" depends on these includes */
+
+#include <rtems/freebsd/dev/usb/usb_endian.h>
+#include <rtems/freebsd/dev/usb/usb.h>
+
+#define USB_DEVICE_NAME "usbctl"
+#define USB_DEVICE_DIR "usb"
+#define USB_GENERIC_NAME "ugen"
+
+struct usb_read_dir {
+ void *urd_data;
+ uint32_t urd_startentry;
+ uint32_t urd_maxlen;
+};
+
+struct usb_ctl_request {
+ void *ucr_data;
+ uint16_t ucr_flags;
+ uint16_t ucr_actlen; /* actual length transferred */
+ uint8_t ucr_addr; /* zero - currently not used */
+ struct usb_device_request ucr_request;
+};
+
+struct usb_alt_interface {
+ uint8_t uai_interface_index;
+ uint8_t uai_alt_index;
+};
+
+struct usb_gen_descriptor {
+ void *ugd_data;
+ uint16_t ugd_lang_id;
+ uint16_t ugd_maxlen;
+ uint16_t ugd_actlen;
+ uint16_t ugd_offset;
+ uint8_t ugd_config_index;
+ uint8_t ugd_string_index;
+ uint8_t ugd_iface_index;
+ uint8_t ugd_altif_index;
+ uint8_t ugd_endpt_index;
+ uint8_t ugd_report_type;
+ uint8_t reserved[8];
+};
+
+struct usb_device_info {
+ uint16_t udi_productNo;
+ uint16_t udi_vendorNo;
+ uint16_t udi_releaseNo;
+ uint16_t udi_power; /* power consumption in mA, 0 if
+ * selfpowered */
+ uint8_t udi_bus;
+ uint8_t udi_addr; /* device address */
+ uint8_t udi_index; /* device index */
+ uint8_t udi_class;
+ uint8_t udi_subclass;
+ uint8_t udi_protocol;
+ uint8_t udi_config_no; /* current config number */
+ uint8_t udi_config_index; /* current config index */
+ uint8_t udi_speed; /* see "USB_SPEED_XXX" */
+ uint8_t udi_mode; /* see "USB_MODE_XXX" */
+ uint8_t udi_nports;
+ uint8_t udi_hubaddr; /* parent HUB address */
+ uint8_t udi_hubindex; /* parent HUB device index */
+ uint8_t udi_hubport; /* parent HUB port */
+ uint8_t udi_power_mode; /* see "USB_POWER_MODE_XXX" */
+ uint8_t udi_suspended; /* set if device is suspended */
+ uint8_t udi_reserved[16]; /* leave space for the future */
+ char udi_product[128];
+ char udi_vendor[128];
+ char udi_serial[64];
+ char udi_release[8];
+};
+
+struct usb_device_stats {
+ uint32_t uds_requests_ok[4]; /* Indexed by transfer type UE_XXX */
+ uint32_t uds_requests_fail[4]; /* Indexed by transfer type UE_XXX */
+};
+
+struct usb_fs_start {
+ uint8_t ep_index;
+};
+
+struct usb_fs_stop {
+ uint8_t ep_index;
+};
+
+struct usb_fs_complete {
+ uint8_t ep_index;
+};
+
+/* This structure is used for all endpoint types */
+struct usb_fs_endpoint {
+ /*
+ * NOTE: isochronous USB transfer only use one buffer, but can have
+ * multiple frame lengths !
+ */
+ void **ppBuffer; /* pointer to userland buffers */
+ uint32_t *pLength; /* pointer to frame lengths, updated
+ * to actual length */
+ uint32_t nFrames; /* number of frames */
+ uint32_t aFrames; /* actual number of frames */
+ uint16_t flags;
+ /* a single short frame will terminate */
+#define USB_FS_FLAG_SINGLE_SHORT_OK 0x0001
+ /* multiple short frames are allowed */
+#define USB_FS_FLAG_MULTI_SHORT_OK 0x0002
+ /* all frame(s) transmitted are short terminated */
+#define USB_FS_FLAG_FORCE_SHORT 0x0004
+ /* will do a clear-stall before xfer */
+#define USB_FS_FLAG_CLEAR_STALL 0x0008
+ uint16_t timeout; /* in milliseconds */
+ /* isocronous completion time in milliseconds - used for echo cancel */
+ uint16_t isoc_time_complete;
+ /* timeout value for no timeout */
+#define USB_FS_TIMEOUT_NONE 0
+ int status; /* see USB_ERR_XXX */
+};
+
+struct usb_fs_init {
+ /* userland pointer to endpoints structure */
+ struct usb_fs_endpoint *pEndpoints;
+ /* maximum number of endpoints */
+ uint8_t ep_index_max;
+};
+
+struct usb_fs_uninit {
+ uint8_t dummy; /* zero */
+};
+
+struct usb_fs_open {
+#define USB_FS_MAX_BUFSIZE (1 << 18)
+ uint32_t max_bufsize;
+#define USB_FS_MAX_FRAMES (1 << 12)
+ uint32_t max_frames;
+ uint16_t max_packet_length; /* read only */
+ uint8_t dev_index; /* currently unused */
+ uint8_t ep_index;
+ uint8_t ep_no; /* bEndpointNumber */
+};
+
+struct usb_fs_close {
+ uint8_t ep_index;
+};
+
+struct usb_fs_clear_stall_sync {
+ uint8_t ep_index;
+};
+
+struct usb_gen_quirk {
+ uint16_t index; /* Quirk Index */
+ uint16_t vid; /* Vendor ID */
+ uint16_t pid; /* Product ID */
+ uint16_t bcdDeviceLow; /* Low Device Revision */
+ uint16_t bcdDeviceHigh; /* High Device Revision */
+ uint16_t reserved[2];
+ /*
+ * String version of quirk including terminating zero. See UQ_XXX in
+ * "usb_quirk.h".
+ */
+ char quirkname[64 - 14];
+};
+
+/* USB controller */
+#define USB_REQUEST _IOWR('U', 1, struct usb_ctl_request)
+#define USB_SETDEBUG _IOW ('U', 2, int)
+#define USB_DISCOVER _IO ('U', 3)
+#define USB_DEVICEINFO _IOWR('U', 4, struct usb_device_info)
+#define USB_DEVICESTATS _IOR ('U', 5, struct usb_device_stats)
+#define USB_DEVICEENUMERATE _IOW ('U', 6, int)
+
+/* Generic HID device */
+#define USB_GET_REPORT_DESC _IOWR('U', 21, struct usb_gen_descriptor)
+#define USB_SET_IMMED _IOW ('U', 22, int)
+#define USB_GET_REPORT _IOWR('U', 23, struct usb_gen_descriptor)
+#define USB_SET_REPORT _IOW ('U', 24, struct usb_gen_descriptor)
+#define USB_GET_REPORT_ID _IOR ('U', 25, int)
+
+/* Generic USB device */
+#define USB_GET_CONFIG _IOR ('U', 100, int)
+#define USB_SET_CONFIG _IOW ('U', 101, int)
+#define USB_GET_ALTINTERFACE _IOWR('U', 102, struct usb_alt_interface)
+#define USB_SET_ALTINTERFACE _IOWR('U', 103, struct usb_alt_interface)
+#define USB_GET_DEVICE_DESC _IOR ('U', 105, struct usb_device_descriptor)
+#define USB_GET_CONFIG_DESC _IOR ('U', 106, struct usb_config_descriptor)
+#define USB_GET_RX_INTERFACE_DESC _IOR ('U', 107, struct usb_interface_descriptor)
+#define USB_GET_RX_ENDPOINT_DESC _IOR ('U', 108, struct usb_endpoint_descriptor)
+#define USB_GET_FULL_DESC _IOWR('U', 109, struct usb_gen_descriptor)
+#define USB_GET_STRING_DESC _IOWR('U', 110, struct usb_gen_descriptor)
+#define USB_DO_REQUEST _IOWR('U', 111, struct usb_ctl_request)
+#define USB_GET_DEVICEINFO _IOR ('U', 112, struct usb_device_info)
+#define USB_SET_RX_SHORT_XFER _IOW ('U', 113, int)
+#define USB_SET_RX_TIMEOUT _IOW ('U', 114, int)
+#define USB_GET_RX_FRAME_SIZE _IOR ('U', 115, int)
+#define USB_GET_RX_BUFFER_SIZE _IOR ('U', 117, int)
+#define USB_SET_RX_BUFFER_SIZE _IOW ('U', 118, int)
+#define USB_SET_RX_STALL_FLAG _IOW ('U', 119, int)
+#define USB_SET_TX_STALL_FLAG _IOW ('U', 120, int)
+#define USB_GET_IFACE_DRIVER _IOWR('U', 121, struct usb_gen_descriptor)
+#define USB_CLAIM_INTERFACE _IOW ('U', 122, int)
+#define USB_RELEASE_INTERFACE _IOW ('U', 123, int)
+#define USB_IFACE_DRIVER_ACTIVE _IOW ('U', 124, int)
+#define USB_IFACE_DRIVER_DETACH _IOW ('U', 125, int)
+#define USB_GET_PLUGTIME _IOR ('U', 126, uint32_t)
+#define USB_READ_DIR _IOW ('U', 127, struct usb_read_dir)
+/* 128 - 135 unused */
+#define USB_SET_TX_FORCE_SHORT _IOW ('U', 136, int)
+#define USB_SET_TX_TIMEOUT _IOW ('U', 137, int)
+#define USB_GET_TX_FRAME_SIZE _IOR ('U', 138, int)
+#define USB_GET_TX_BUFFER_SIZE _IOR ('U', 139, int)
+#define USB_SET_TX_BUFFER_SIZE _IOW ('U', 140, int)
+#define USB_GET_TX_INTERFACE_DESC _IOR ('U', 141, struct usb_interface_descriptor)
+#define USB_GET_TX_ENDPOINT_DESC _IOR ('U', 142, struct usb_endpoint_descriptor)
+#define USB_SET_PORT_ENABLE _IOW ('U', 143, int)
+#define USB_SET_PORT_DISABLE _IOW ('U', 144, int)
+#define USB_SET_POWER_MODE _IOW ('U', 145, int)
+#define USB_GET_POWER_MODE _IOR ('U', 146, int)
+#define USB_SET_TEMPLATE _IOW ('U', 147, int)
+#define USB_GET_TEMPLATE _IOR ('U', 148, int)
+
+/* Modem device */
+#define USB_GET_CM_OVER_DATA _IOR ('U', 180, int)
+#define USB_SET_CM_OVER_DATA _IOW ('U', 181, int)
+
+/* USB file system interface */
+#define USB_FS_START _IOW ('U', 192, struct usb_fs_start)
+#define USB_FS_STOP _IOW ('U', 193, struct usb_fs_stop)
+#define USB_FS_COMPLETE _IOR ('U', 194, struct usb_fs_complete)
+#define USB_FS_INIT _IOW ('U', 195, struct usb_fs_init)
+#define USB_FS_UNINIT _IOW ('U', 196, struct usb_fs_uninit)
+#define USB_FS_OPEN _IOWR('U', 197, struct usb_fs_open)
+#define USB_FS_CLOSE _IOW ('U', 198, struct usb_fs_close)
+#define USB_FS_CLEAR_STALL_SYNC _IOW ('U', 199, struct usb_fs_clear_stall_sync)
+
+/* USB quirk system interface */
+#define USB_DEV_QUIRK_GET _IOWR('Q', 0, struct usb_gen_quirk)
+#define USB_QUIRK_NAME_GET _IOWR('Q', 1, struct usb_gen_quirk)
+#define USB_DEV_QUIRK_ADD _IOW ('Q', 2, struct usb_gen_quirk)
+#define USB_DEV_QUIRK_REMOVE _IOW ('Q', 3, struct usb_gen_quirk)
+
+#endif /* _USB_IOCTL_HH_ */
diff --git a/rtems/freebsd/dev/usb/usb_lookup.c b/rtems/freebsd/dev/usb/usb_lookup.c
new file mode 100644
index 00000000..1eccf0d8
--- /dev/null
+++ b/rtems/freebsd/dev/usb/usb_lookup.c
@@ -0,0 +1,156 @@
+#include <rtems/freebsd/machine/rtems-bsd-config.h>
+
+/* $FreeBSD$ */
+/*-
+ * Copyright (c) 2008 Hans Petter Selasky. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <rtems/freebsd/sys/stdint.h>
+#include <rtems/freebsd/sys/stddef.h>
+#include <rtems/freebsd/sys/param.h>
+#include <rtems/freebsd/sys/queue.h>
+#include <rtems/freebsd/sys/types.h>
+#include <rtems/freebsd/sys/systm.h>
+#include <rtems/freebsd/sys/kernel.h>
+#include <rtems/freebsd/sys/bus.h>
+#include <rtems/freebsd/sys/linker_set.h>
+#include <rtems/freebsd/sys/module.h>
+#include <rtems/freebsd/sys/lock.h>
+#include <rtems/freebsd/sys/mutex.h>
+#include <rtems/freebsd/sys/condvar.h>
+#include <rtems/freebsd/sys/sysctl.h>
+#include <rtems/freebsd/sys/sx.h>
+#include <rtems/freebsd/sys/unistd.h>
+#include <rtems/freebsd/sys/callout.h>
+#include <rtems/freebsd/sys/malloc.h>
+#include <rtems/freebsd/sys/priv.h>
+
+#include <rtems/freebsd/dev/usb/usb.h>
+#include <rtems/freebsd/dev/usb/usbdi.h>
+
+/*------------------------------------------------------------------------*
+ * usbd_lookup_id_by_info
+ *
+ * This functions takes an array of "struct usb_device_id" and tries
+ * to match the entries with the information in "struct usbd_lookup_info".
+ *
+ * NOTE: The "sizeof_id" parameter must be a multiple of the
+ * usb_device_id structure size. Else the behaviour of this function
+ * is undefined.
+ *
+ * Return values:
+ * NULL: No match found.
+ * Else: Pointer to matching entry.
+ *------------------------------------------------------------------------*/
+const struct usb_device_id *
+usbd_lookup_id_by_info(const struct usb_device_id *id, usb_size_t sizeof_id,
+ const struct usbd_lookup_info *info)
+{
+ const struct usb_device_id *id_end;
+
+ if (id == NULL) {
+ goto done;
+ }
+ id_end = (const void *)(((const uint8_t *)id) + sizeof_id);
+
+ /*
+ * Keep on matching array entries until we find a match or
+ * until we reach the end of the matching array:
+ */
+ for (; id != id_end; id++) {
+
+ if ((id->match_flag_vendor) &&
+ (id->idVendor != info->idVendor)) {
+ continue;
+ }
+ if ((id->match_flag_product) &&
+ (id->idProduct != info->idProduct)) {
+ continue;
+ }
+ if ((id->match_flag_dev_lo) &&
+ (id->bcdDevice_lo > info->bcdDevice)) {
+ continue;
+ }
+ if ((id->match_flag_dev_hi) &&
+ (id->bcdDevice_hi < info->bcdDevice)) {
+ continue;
+ }
+ if ((id->match_flag_dev_class) &&
+ (id->bDeviceClass != info->bDeviceClass)) {
+ continue;
+ }
+ if ((id->match_flag_dev_subclass) &&
+ (id->bDeviceSubClass != info->bDeviceSubClass)) {
+ continue;
+ }
+ if ((id->match_flag_dev_protocol) &&
+ (id->bDeviceProtocol != info->bDeviceProtocol)) {
+ continue;
+ }
+ if ((info->bDeviceClass == 0xFF) &&
+ (!(id->match_flag_vendor)) &&
+ ((id->match_flag_int_class) ||
+ (id->match_flag_int_subclass) ||
+ (id->match_flag_int_protocol))) {
+ continue;
+ }
+ if ((id->match_flag_int_class) &&
+ (id->bInterfaceClass != info->bInterfaceClass)) {
+ continue;
+ }
+ if ((id->match_flag_int_subclass) &&
+ (id->bInterfaceSubClass != info->bInterfaceSubClass)) {
+ continue;
+ }
+ if ((id->match_flag_int_protocol) &&
+ (id->bInterfaceProtocol != info->bInterfaceProtocol)) {
+ continue;
+ }
+ /* We found a match! */
+ return (id);
+ }
+
+done:
+ return (NULL);
+}
+
+/*------------------------------------------------------------------------*
+ * usbd_lookup_id_by_uaa - factored out code
+ *
+ * Return values:
+ * 0: Success
+ * Else: Failure
+ *------------------------------------------------------------------------*/
+int
+usbd_lookup_id_by_uaa(const struct usb_device_id *id, usb_size_t sizeof_id,
+ struct usb_attach_arg *uaa)
+{
+ id = usbd_lookup_id_by_info(id, sizeof_id, &uaa->info);
+ if (id) {
+ /* copy driver info */
+ uaa->driver_info = id->driver_info;
+ return (0);
+ }
+ return (ENXIO);
+}
diff --git a/rtems/freebsd/dev/usb/usb_mbuf.c b/rtems/freebsd/dev/usb/usb_mbuf.c
new file mode 100644
index 00000000..51e5ef06
--- /dev/null
+++ b/rtems/freebsd/dev/usb/usb_mbuf.c
@@ -0,0 +1,101 @@
+#include <rtems/freebsd/machine/rtems-bsd-config.h>
+
+/* $FreeBSD$ */
+/*-
+ * Copyright (c) 2008 Hans Petter Selasky. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <rtems/freebsd/sys/stdint.h>
+#include <rtems/freebsd/sys/stddef.h>
+#include <rtems/freebsd/sys/param.h>
+#include <rtems/freebsd/sys/queue.h>
+#include <rtems/freebsd/sys/types.h>
+#include <rtems/freebsd/sys/systm.h>
+#include <rtems/freebsd/sys/kernel.h>
+#include <rtems/freebsd/sys/bus.h>
+#include <rtems/freebsd/sys/linker_set.h>
+#include <rtems/freebsd/sys/module.h>
+#include <rtems/freebsd/sys/lock.h>
+#include <rtems/freebsd/sys/mutex.h>
+#include <rtems/freebsd/sys/condvar.h>
+#include <rtems/freebsd/sys/sysctl.h>
+#include <rtems/freebsd/sys/sx.h>
+#include <rtems/freebsd/sys/unistd.h>
+#include <rtems/freebsd/sys/callout.h>
+#include <rtems/freebsd/sys/malloc.h>
+#include <rtems/freebsd/sys/priv.h>
+
+#include <rtems/freebsd/dev/usb/usb.h>
+#include <rtems/freebsd/dev/usb/usbdi.h>
+#include <rtems/freebsd/dev/usb/usb_dev.h>
+#include <rtems/freebsd/dev/usb/usb_mbuf.h>
+
+/*------------------------------------------------------------------------*
+ * usb_alloc_mbufs - allocate mbufs to an usbd interface queue
+ *
+ * Returns:
+ * A pointer that should be passed to "free()" when the buffer(s)
+ * should be released.
+ *------------------------------------------------------------------------*/
+void *
+usb_alloc_mbufs(struct malloc_type *type, struct usb_ifqueue *ifq,
+ usb_size_t block_size, uint16_t nblocks)
+{
+ struct usb_mbuf *m_ptr;
+ uint8_t *data_ptr;
+ void *free_ptr = NULL;
+ usb_size_t alloc_size;
+
+ /* align data */
+ block_size += ((-block_size) & (USB_HOST_ALIGN - 1));
+
+ if (nblocks && block_size) {
+
+ alloc_size = (block_size + sizeof(struct usb_mbuf)) * nblocks;
+
+ free_ptr = malloc(alloc_size, type, M_WAITOK | M_ZERO);
+
+ if (free_ptr == NULL) {
+ goto done;
+ }
+ m_ptr = free_ptr;
+ data_ptr = (void *)(m_ptr + nblocks);
+
+ while (nblocks--) {
+
+ m_ptr->cur_data_ptr =
+ m_ptr->min_data_ptr = data_ptr;
+
+ m_ptr->cur_data_len =
+ m_ptr->max_data_len = block_size;
+
+ USB_IF_ENQUEUE(ifq, m_ptr);
+
+ m_ptr++;
+ data_ptr += block_size;
+ }
+ }
+done:
+ return (free_ptr);
+}
diff --git a/rtems/freebsd/dev/usb/usb_mbuf.h b/rtems/freebsd/dev/usb/usb_mbuf.h
new file mode 100644
index 00000000..44dba71b
--- /dev/null
+++ b/rtems/freebsd/dev/usb/usb_mbuf.h
@@ -0,0 +1,90 @@
+/* $FreeBSD$ */
+/*-
+ * Copyright (c) 2008 Hans Petter Selasky. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifndef _USB_MBUF_HH_
+#define _USB_MBUF_HH_
+
+/*
+ * The following structure defines a minimum re-implementation of the
+ * mbuf system in the kernel.
+ */
+struct usb_mbuf {
+ uint8_t *cur_data_ptr;
+ uint8_t *min_data_ptr;
+ struct usb_mbuf *usb_nextpkt;
+ struct usb_mbuf *usb_next;
+
+ usb_size_t cur_data_len;
+ usb_size_t max_data_len;
+ uint8_t last_packet:1;
+ uint8_t unused:7;
+};
+
+#define USB_IF_ENQUEUE(ifq, m) do { \
+ (m)->usb_nextpkt = NULL; \
+ if ((ifq)->ifq_tail == NULL) \
+ (ifq)->ifq_head = (m); \
+ else \
+ (ifq)->ifq_tail->usb_nextpkt = (m); \
+ (ifq)->ifq_tail = (m); \
+ (ifq)->ifq_len++; \
+ } while (0)
+
+#define USB_IF_DEQUEUE(ifq, m) do { \
+ (m) = (ifq)->ifq_head; \
+ if (m) { \
+ if (((ifq)->ifq_head = (m)->usb_nextpkt) == NULL) { \
+ (ifq)->ifq_tail = NULL; \
+ } \
+ (m)->usb_nextpkt = NULL; \
+ (ifq)->ifq_len--; \
+ } \
+ } while (0)
+
+#define USB_IF_PREPEND(ifq, m) do { \
+ (m)->usb_nextpkt = (ifq)->ifq_head; \
+ if ((ifq)->ifq_tail == NULL) { \
+ (ifq)->ifq_tail = (m); \
+ } \
+ (ifq)->ifq_head = (m); \
+ (ifq)->ifq_len++; \
+ } while (0)
+
+#define USB_IF_QFULL(ifq) ((ifq)->ifq_len >= (ifq)->ifq_maxlen)
+#define USB_IF_QLEN(ifq) ((ifq)->ifq_len)
+#define USB_IF_POLL(ifq, m) ((m) = (ifq)->ifq_head)
+
+#define USB_MBUF_RESET(m) do { \
+ (m)->cur_data_ptr = (m)->min_data_ptr; \
+ (m)->cur_data_len = (m)->max_data_len; \
+ (m)->last_packet = 0; \
+ } while (0)
+
+/* prototypes */
+void *usb_alloc_mbufs(struct malloc_type *type, struct usb_ifqueue *ifq,
+ usb_size_t block_size, uint16_t nblocks);
+
+#endif /* _USB_MBUF_HH_ */
diff --git a/rtems/freebsd/dev/usb/usb_msctest.c b/rtems/freebsd/dev/usb/usb_msctest.c
new file mode 100644
index 00000000..13e16021
--- /dev/null
+++ b/rtems/freebsd/dev/usb/usb_msctest.c
@@ -0,0 +1,641 @@
+#include <rtems/freebsd/machine/rtems-bsd-config.h>
+
+/* $FreeBSD$ */
+/*-
+ * Copyright (c) 2008 Hans Petter Selasky. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+/*
+ * The following file contains code that will detect USB autoinstall
+ * disks.
+ *
+ * TODO: Potentially we could add code to automatically detect USB
+ * mass storage quirks for not supported SCSI commands!
+ */
+
+#include <rtems/freebsd/sys/stdint.h>
+#include <rtems/freebsd/sys/stddef.h>
+#include <rtems/freebsd/sys/param.h>
+#include <rtems/freebsd/sys/queue.h>
+#include <rtems/freebsd/sys/types.h>
+#include <rtems/freebsd/sys/systm.h>
+#include <rtems/freebsd/sys/kernel.h>
+#include <rtems/freebsd/sys/bus.h>
+#include <rtems/freebsd/sys/linker_set.h>
+#include <rtems/freebsd/sys/module.h>
+#include <rtems/freebsd/sys/lock.h>
+#include <rtems/freebsd/sys/mutex.h>
+#include <rtems/freebsd/sys/condvar.h>
+#include <rtems/freebsd/sys/sysctl.h>
+#include <rtems/freebsd/sys/sx.h>
+#include <rtems/freebsd/sys/unistd.h>
+#include <rtems/freebsd/sys/callout.h>
+#include <rtems/freebsd/sys/malloc.h>
+#include <rtems/freebsd/sys/priv.h>
+
+#include <rtems/freebsd/dev/usb/usb.h>
+#include <rtems/freebsd/dev/usb/usbdi.h>
+#include <rtems/freebsd/dev/usb/usbdi_util.h>
+
+#define USB_DEBUG_VAR usb_debug
+
+#include <rtems/freebsd/dev/usb/usb_busdma.h>
+#include <rtems/freebsd/dev/usb/usb_process.h>
+#include <rtems/freebsd/dev/usb/usb_transfer.h>
+#include <rtems/freebsd/dev/usb/usb_msctest.h>
+#include <rtems/freebsd/dev/usb/usb_debug.h>
+#include <rtems/freebsd/dev/usb/usb_busdma.h>
+#include <rtems/freebsd/dev/usb/usb_device.h>
+#include <rtems/freebsd/dev/usb/usb_request.h>
+#include <rtems/freebsd/dev/usb/usb_util.h>
+#include <rtems/freebsd/dev/usb/quirk/usb_quirk.h>
+
+enum {
+ ST_COMMAND,
+ ST_DATA_RD,
+ ST_DATA_RD_CS,
+ ST_DATA_WR,
+ ST_DATA_WR_CS,
+ ST_STATUS,
+ ST_MAX,
+};
+
+enum {
+ DIR_IN,
+ DIR_OUT,
+ DIR_NONE,
+};
+
+#define SCSI_INQ_LEN 0x24
+static uint8_t scsi_test_unit_ready[] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
+static uint8_t scsi_inquiry[] = { 0x12, 0x00, 0x00, 0x00, SCSI_INQ_LEN, 0x00 };
+static uint8_t scsi_rezero_init[] = { 0x01, 0x00, 0x00, 0x00, 0x00, 0x00 };
+static uint8_t scsi_start_stop_unit[] = { 0x1b, 0x00, 0x00, 0x00, 0x02, 0x00 };
+static uint8_t scsi_ztestor_eject[] = { 0x85, 0x01, 0x01, 0x01, 0x18, 0x01,
+ 0x01, 0x01, 0x01, 0x01, 0x00, 0x00 };
+static uint8_t scsi_cmotech_eject[] = { 0xff, 0x52, 0x44, 0x45, 0x56, 0x43,
+ 0x48, 0x47 };
+static uint8_t scsi_huawei_eject[] = { 0x11, 0x06, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00 };
+static uint8_t scsi_tct_eject[] = { 0x06, 0xf5, 0x04, 0x02, 0x52, 0x70 };
+
+#define BULK_SIZE 64 /* dummy */
+#define ERR_CSW_FAILED -1
+
+/* Command Block Wrapper */
+struct bbb_cbw {
+ uDWord dCBWSignature;
+#define CBWSIGNATURE 0x43425355
+ uDWord dCBWTag;
+ uDWord dCBWDataTransferLength;
+ uByte bCBWFlags;
+#define CBWFLAGS_OUT 0x00
+#define CBWFLAGS_IN 0x80
+ uByte bCBWLUN;
+ uByte bCDBLength;
+#define CBWCDBLENGTH 16
+ uByte CBWCDB[CBWCDBLENGTH];
+} __packed;
+
+/* Command Status Wrapper */
+struct bbb_csw {
+ uDWord dCSWSignature;
+#define CSWSIGNATURE 0x53425355
+ uDWord dCSWTag;
+ uDWord dCSWDataResidue;
+ uByte bCSWStatus;
+#define CSWSTATUS_GOOD 0x0
+#define CSWSTATUS_FAILED 0x1
+#define CSWSTATUS_PHASE 0x2
+} __packed;
+
+struct bbb_transfer {
+ struct mtx mtx;
+ struct cv cv;
+ struct bbb_cbw cbw;
+ struct bbb_csw csw;
+
+ struct usb_xfer *xfer[ST_MAX];
+
+ uint8_t *data_ptr;
+
+ usb_size_t data_len; /* bytes */
+ usb_size_t data_rem; /* bytes */
+ usb_timeout_t data_timeout; /* ms */
+ usb_frlength_t actlen; /* bytes */
+
+ uint8_t cmd_len; /* bytes */
+ uint8_t dir;
+ uint8_t lun;
+ uint8_t state;
+ uint8_t status_try;
+ int error;
+
+ uint8_t buffer[256];
+};
+
+static usb_callback_t bbb_command_callback;
+static usb_callback_t bbb_data_read_callback;
+static usb_callback_t bbb_data_rd_cs_callback;
+static usb_callback_t bbb_data_write_callback;
+static usb_callback_t bbb_data_wr_cs_callback;
+static usb_callback_t bbb_status_callback;
+
+static void bbb_done(struct bbb_transfer *, int);
+static void bbb_transfer_start(struct bbb_transfer *, uint8_t);
+static void bbb_data_clear_stall_callback(struct usb_xfer *, uint8_t,
+ uint8_t);
+static uint8_t bbb_command_start(struct bbb_transfer *, uint8_t, uint8_t,
+ void *, size_t, void *, size_t, usb_timeout_t);
+static struct bbb_transfer *bbb_attach(struct usb_device *, uint8_t);
+static void bbb_detach(struct bbb_transfer *);
+
+static const struct usb_config bbb_config[ST_MAX] = {
+
+ [ST_COMMAND] = {
+ .type = UE_BULK,
+ .endpoint = UE_ADDR_ANY,
+ .direction = UE_DIR_OUT,
+ .bufsize = sizeof(struct bbb_cbw),
+ .callback = &bbb_command_callback,
+ .timeout = 4 * USB_MS_HZ, /* 4 seconds */
+ },
+
+ [ST_DATA_RD] = {
+ .type = UE_BULK,
+ .endpoint = UE_ADDR_ANY,
+ .direction = UE_DIR_IN,
+ .bufsize = BULK_SIZE,
+ .flags = {.proxy_buffer = 1,.short_xfer_ok = 1,},
+ .callback = &bbb_data_read_callback,
+ .timeout = 4 * USB_MS_HZ, /* 4 seconds */
+ },
+
+ [ST_DATA_RD_CS] = {
+ .type = UE_CONTROL,
+ .endpoint = 0x00, /* Control pipe */
+ .direction = UE_DIR_ANY,
+ .bufsize = sizeof(struct usb_device_request),
+ .callback = &bbb_data_rd_cs_callback,
+ .timeout = 1 * USB_MS_HZ, /* 1 second */
+ },
+
+ [ST_DATA_WR] = {
+ .type = UE_BULK,
+ .endpoint = UE_ADDR_ANY,
+ .direction = UE_DIR_OUT,
+ .bufsize = BULK_SIZE,
+ .flags = {.proxy_buffer = 1,},
+ .callback = &bbb_data_write_callback,
+ .timeout = 4 * USB_MS_HZ, /* 4 seconds */
+ },
+
+ [ST_DATA_WR_CS] = {
+ .type = UE_CONTROL,
+ .endpoint = 0x00, /* Control pipe */
+ .direction = UE_DIR_ANY,
+ .bufsize = sizeof(struct usb_device_request),
+ .callback = &bbb_data_wr_cs_callback,
+ .timeout = 1 * USB_MS_HZ, /* 1 second */
+ },
+
+ [ST_STATUS] = {
+ .type = UE_BULK,
+ .endpoint = UE_ADDR_ANY,
+ .direction = UE_DIR_IN,
+ .bufsize = sizeof(struct bbb_csw),
+ .flags = {.short_xfer_ok = 1,},
+ .callback = &bbb_status_callback,
+ .timeout = 1 * USB_MS_HZ, /* 1 second */
+ },
+};
+
+static void
+bbb_done(struct bbb_transfer *sc, int error)
+{
+
+ sc->error = error;
+ sc->state = ST_COMMAND;
+ sc->status_try = 1;
+ cv_signal(&sc->cv);
+}
+
+static void
+bbb_transfer_start(struct bbb_transfer *sc, uint8_t xfer_index)
+{
+ sc->state = xfer_index;
+ usbd_transfer_start(sc->xfer[xfer_index]);
+}
+
+static void
+bbb_data_clear_stall_callback(struct usb_xfer *xfer,
+ uint8_t next_xfer, uint8_t stall_xfer)
+{
+ struct bbb_transfer *sc = usbd_xfer_softc(xfer);
+
+ if (usbd_clear_stall_callback(xfer, sc->xfer[stall_xfer])) {
+ switch (USB_GET_STATE(xfer)) {
+ case USB_ST_SETUP:
+ case USB_ST_TRANSFERRED:
+ bbb_transfer_start(sc, next_xfer);
+ break;
+ default:
+ bbb_done(sc, USB_ERR_STALLED);
+ break;
+ }
+ }
+}
+
+static void
+bbb_command_callback(struct usb_xfer *xfer, usb_error_t error)
+{
+ struct bbb_transfer *sc = usbd_xfer_softc(xfer);
+ uint32_t tag;
+
+ switch (USB_GET_STATE(xfer)) {
+ case USB_ST_TRANSFERRED:
+ bbb_transfer_start
+ (sc, ((sc->dir == DIR_IN) ? ST_DATA_RD :
+ (sc->dir == DIR_OUT) ? ST_DATA_WR :
+ ST_STATUS));
+ break;
+
+ case USB_ST_SETUP:
+ sc->status_try = 0;
+ tag = UGETDW(sc->cbw.dCBWTag) + 1;
+ USETDW(sc->cbw.dCBWSignature, CBWSIGNATURE);
+ USETDW(sc->cbw.dCBWTag, tag);
+ USETDW(sc->cbw.dCBWDataTransferLength, (uint32_t)sc->data_len);
+ sc->cbw.bCBWFlags = ((sc->dir == DIR_IN) ? CBWFLAGS_IN : CBWFLAGS_OUT);
+ sc->cbw.bCBWLUN = sc->lun;
+ sc->cbw.bCDBLength = sc->cmd_len;
+ if (sc->cbw.bCDBLength > sizeof(sc->cbw.CBWCDB)) {
+ sc->cbw.bCDBLength = sizeof(sc->cbw.CBWCDB);
+ DPRINTFN(0, "Truncating long command\n");
+ }
+ usbd_xfer_set_frame_data(xfer, 0, &sc->cbw, sizeof(sc->cbw));
+ usbd_transfer_submit(xfer);
+ break;
+
+ default: /* Error */
+ bbb_done(sc, error);
+ break;
+ }
+}
+
+static void
+bbb_data_read_callback(struct usb_xfer *xfer, usb_error_t error)
+{
+ struct bbb_transfer *sc = usbd_xfer_softc(xfer);
+ usb_frlength_t max_bulk = usbd_xfer_max_len(xfer);
+ int actlen, sumlen;
+
+ usbd_xfer_status(xfer, &actlen, &sumlen, NULL, NULL);
+
+ switch (USB_GET_STATE(xfer)) {
+ case USB_ST_TRANSFERRED:
+ sc->data_rem -= actlen;
+ sc->data_ptr += actlen;
+ sc->actlen += actlen;
+
+ if (actlen < sumlen) {
+ /* short transfer */
+ sc->data_rem = 0;
+ }
+ case USB_ST_SETUP:
+ DPRINTF("max_bulk=%d, data_rem=%d\n",
+ max_bulk, sc->data_rem);
+
+ if (sc->data_rem == 0) {
+ bbb_transfer_start(sc, ST_STATUS);
+ break;
+ }
+ if (max_bulk > sc->data_rem) {
+ max_bulk = sc->data_rem;
+ }
+ usbd_xfer_set_timeout(xfer, sc->data_timeout);
+ usbd_xfer_set_frame_data(xfer, 0, sc->data_ptr, max_bulk);
+ usbd_transfer_submit(xfer);
+ break;
+
+ default: /* Error */
+ if (error == USB_ERR_CANCELLED) {
+ bbb_done(sc, error);
+ } else {
+ bbb_transfer_start(sc, ST_DATA_RD_CS);
+ }
+ break;
+ }
+}
+
+static void
+bbb_data_rd_cs_callback(struct usb_xfer *xfer, usb_error_t error)
+{
+ bbb_data_clear_stall_callback(xfer, ST_STATUS,
+ ST_DATA_RD);
+}
+
+static void
+bbb_data_write_callback(struct usb_xfer *xfer, usb_error_t error)
+{
+ struct bbb_transfer *sc = usbd_xfer_softc(xfer);
+ usb_frlength_t max_bulk = usbd_xfer_max_len(xfer);
+ int actlen, sumlen;
+
+ usbd_xfer_status(xfer, &actlen, &sumlen, NULL, NULL);
+
+ switch (USB_GET_STATE(xfer)) {
+ case USB_ST_TRANSFERRED:
+ sc->data_rem -= actlen;
+ sc->data_ptr += actlen;
+ sc->actlen += actlen;
+
+ if (actlen < sumlen) {
+ /* short transfer */
+ sc->data_rem = 0;
+ }
+ case USB_ST_SETUP:
+ DPRINTF("max_bulk=%d, data_rem=%d\n",
+ max_bulk, sc->data_rem);
+
+ if (sc->data_rem == 0) {
+ bbb_transfer_start(sc, ST_STATUS);
+ return;
+ }
+ if (max_bulk > sc->data_rem) {
+ max_bulk = sc->data_rem;
+ }
+ usbd_xfer_set_timeout(xfer, sc->data_timeout);
+ usbd_xfer_set_frame_data(xfer, 0, sc->data_ptr, max_bulk);
+ usbd_transfer_submit(xfer);
+ return;
+
+ default: /* Error */
+ if (error == USB_ERR_CANCELLED) {
+ bbb_done(sc, error);
+ } else {
+ bbb_transfer_start(sc, ST_DATA_WR_CS);
+ }
+ return;
+
+ }
+}
+
+static void
+bbb_data_wr_cs_callback(struct usb_xfer *xfer, usb_error_t error)
+{
+ bbb_data_clear_stall_callback(xfer, ST_STATUS,
+ ST_DATA_WR);
+}
+
+static void
+bbb_status_callback(struct usb_xfer *xfer, usb_error_t error)
+{
+ struct bbb_transfer *sc = usbd_xfer_softc(xfer);
+ int actlen, sumlen;
+
+ usbd_xfer_status(xfer, &actlen, &sumlen, NULL, NULL);
+
+ switch (USB_GET_STATE(xfer)) {
+ case USB_ST_TRANSFERRED:
+
+ /* very simple status check */
+
+ if (actlen < sizeof(sc->csw)) {
+ bbb_done(sc, USB_ERR_SHORT_XFER);
+ } else if (sc->csw.bCSWStatus == CSWSTATUS_GOOD) {
+ bbb_done(sc, 0); /* success */
+ } else {
+ bbb_done(sc, ERR_CSW_FAILED); /* error */
+ }
+ break;
+
+ case USB_ST_SETUP:
+ usbd_xfer_set_frame_data(xfer, 0, &sc->csw, sizeof(sc->csw));
+ usbd_transfer_submit(xfer);
+ break;
+
+ default:
+ DPRINTF("Failed to read CSW: %s, try %d\n",
+ usbd_errstr(error), sc->status_try);
+
+ if (error == USB_ERR_CANCELLED || sc->status_try) {
+ bbb_done(sc, error);
+ } else {
+ sc->status_try = 1;
+ bbb_transfer_start(sc, ST_DATA_RD_CS);
+ }
+ break;
+ }
+}
+
+/*------------------------------------------------------------------------*
+ * bbb_command_start - execute a SCSI command synchronously
+ *
+ * Return values
+ * 0: Success
+ * Else: Failure
+ *------------------------------------------------------------------------*/
+static uint8_t
+bbb_command_start(struct bbb_transfer *sc, uint8_t dir, uint8_t lun,
+ void *data_ptr, size_t data_len, void *cmd_ptr, size_t cmd_len,
+ usb_timeout_t data_timeout)
+{
+ sc->lun = lun;
+ sc->dir = data_len ? dir : DIR_NONE;
+ sc->data_ptr = data_ptr;
+ sc->data_len = data_len;
+ sc->data_rem = data_len;
+ sc->data_timeout = (data_timeout + USB_MS_HZ);
+ sc->actlen = 0;
+ sc->cmd_len = cmd_len;
+ bzero(&sc->cbw.CBWCDB, sizeof(sc->cbw.CBWCDB));
+ bcopy(cmd_ptr, &sc->cbw.CBWCDB, cmd_len);
+ DPRINTFN(1, "SCSI cmd = %*D\n", (int)cmd_len, &sc->cbw.CBWCDB, ":");
+
+ mtx_lock(&sc->mtx);
+ usbd_transfer_start(sc->xfer[sc->state]);
+
+ while (usbd_transfer_pending(sc->xfer[sc->state])) {
+ cv_wait(&sc->cv, &sc->mtx);
+ }
+ mtx_unlock(&sc->mtx);
+ return (sc->error);
+}
+
+static struct bbb_transfer *
+bbb_attach(struct usb_device *udev, uint8_t iface_index)
+{
+ struct usb_interface *iface;
+ struct usb_interface_descriptor *id;
+ struct bbb_transfer *sc;
+ usb_error_t err;
+
+ iface = usbd_get_iface(udev, iface_index);
+ if (iface == NULL)
+ return (NULL);
+
+ id = iface->idesc;
+ if (id == NULL || id->bInterfaceClass != UICLASS_MASS)
+ return (NULL);
+
+ switch (id->bInterfaceSubClass) {
+ case UISUBCLASS_SCSI:
+ case UISUBCLASS_UFI:
+ case UISUBCLASS_SFF8020I:
+ case UISUBCLASS_SFF8070I:
+ break;
+ default:
+ return (NULL);
+ }
+
+ switch (id->bInterfaceProtocol) {
+ case UIPROTO_MASS_BBB_OLD:
+ case UIPROTO_MASS_BBB:
+ break;
+ default:
+ return (NULL);
+ }
+
+ sc = malloc(sizeof(*sc), M_USB, M_WAITOK | M_ZERO);
+ mtx_init(&sc->mtx, "USB autoinstall", NULL, MTX_DEF);
+ cv_init(&sc->cv, "WBBB");
+
+ err = usbd_transfer_setup(udev, &iface_index, sc->xfer, bbb_config,
+ ST_MAX, sc, &sc->mtx);
+ if (err) {
+ bbb_detach(sc);
+ return (NULL);
+ }
+ return (sc);
+}
+
+static void
+bbb_detach(struct bbb_transfer *sc)
+{
+ usbd_transfer_unsetup(sc->xfer, ST_MAX);
+ mtx_destroy(&sc->mtx);
+ cv_destroy(&sc->cv);
+ free(sc, M_USB);
+}
+
+/*------------------------------------------------------------------------*
+ * usb_iface_is_cdrom
+ *
+ * Return values:
+ * 1: This interface is an auto install disk (CD-ROM)
+ * 0: Not an auto install disk.
+ *------------------------------------------------------------------------*/
+int
+usb_iface_is_cdrom(struct usb_device *udev, uint8_t iface_index)
+{
+ struct bbb_transfer *sc;
+ usb_error_t err;
+ uint8_t timeout, is_cdrom;
+ uint8_t sid_type;
+
+ sc = bbb_attach(udev, iface_index);
+ if (sc == NULL)
+ return (0);
+
+ is_cdrom = 0;
+ timeout = 4; /* tries */
+ while (--timeout) {
+ err = bbb_command_start(sc, DIR_IN, 0, sc->buffer,
+ SCSI_INQ_LEN, &scsi_inquiry, sizeof(scsi_inquiry),
+ USB_MS_HZ);
+
+ if (err == 0 && sc->actlen > 0) {
+ sid_type = sc->buffer[0] & 0x1F;
+ if (sid_type == 0x05)
+ is_cdrom = 1;
+ break;
+ } else if (err != ERR_CSW_FAILED)
+ break; /* non retryable error */
+ usb_pause_mtx(NULL, hz);
+ }
+ bbb_detach(sc);
+ return (is_cdrom);
+}
+
+usb_error_t
+usb_msc_eject(struct usb_device *udev, uint8_t iface_index, int method)
+{
+ struct bbb_transfer *sc;
+ usb_error_t err;
+
+ sc = bbb_attach(udev, iface_index);
+ if (sc == NULL)
+ return (USB_ERR_INVAL);
+
+ err = 0;
+ switch (method) {
+ case MSC_EJECT_STOPUNIT:
+ err = bbb_command_start(sc, DIR_IN, 0, NULL, 0,
+ &scsi_test_unit_ready, sizeof(scsi_test_unit_ready),
+ USB_MS_HZ);
+ DPRINTF("Test unit ready status: %s\n", usbd_errstr(err));
+ err = bbb_command_start(sc, DIR_IN, 0, NULL, 0,
+ &scsi_start_stop_unit, sizeof(scsi_start_stop_unit),
+ USB_MS_HZ);
+ break;
+ case MSC_EJECT_REZERO:
+ err = bbb_command_start(sc, DIR_IN, 0, NULL, 0,
+ &scsi_rezero_init, sizeof(scsi_rezero_init),
+ USB_MS_HZ);
+ break;
+ case MSC_EJECT_ZTESTOR:
+ err = bbb_command_start(sc, DIR_IN, 0, NULL, 0,
+ &scsi_ztestor_eject, sizeof(scsi_ztestor_eject),
+ USB_MS_HZ);
+ break;
+ case MSC_EJECT_CMOTECH:
+ err = bbb_command_start(sc, DIR_IN, 0, NULL, 0,
+ &scsi_cmotech_eject, sizeof(scsi_cmotech_eject),
+ USB_MS_HZ);
+ break;
+ case MSC_EJECT_HUAWEI:
+ err = bbb_command_start(sc, DIR_IN, 0, NULL, 0,
+ &scsi_huawei_eject, sizeof(scsi_huawei_eject),
+ USB_MS_HZ);
+ break;
+ case MSC_EJECT_TCT:
+ /*
+ * TCTMobile needs DIR_IN flag. To get it, we
+ * supply a dummy data with the command.
+ */
+ err = bbb_command_start(sc, DIR_IN, 0, &sc->buffer,
+ sizeof(sc->buffer), &scsi_tct_eject,
+ sizeof(scsi_tct_eject), USB_MS_HZ);
+ break;
+ default:
+ printf("usb_msc_eject: unknown eject method (%d)\n", method);
+ break;
+ }
+ DPRINTF("Eject CD command status: %s\n", usbd_errstr(err));
+
+ bbb_detach(sc);
+ return (0);
+}
diff --git a/rtems/freebsd/dev/usb/usb_msctest.h b/rtems/freebsd/dev/usb/usb_msctest.h
new file mode 100644
index 00000000..6ba2c3fd
--- /dev/null
+++ b/rtems/freebsd/dev/usb/usb_msctest.h
@@ -0,0 +1,44 @@
+/* $FreeBSD$ */
+/*-
+ * Copyright (c) 2008 Hans Petter Selasky. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifndef _USB_MSCTEST_HH_
+#define _USB_MSCTEST_HH_
+
+enum {
+ MSC_EJECT_STOPUNIT,
+ MSC_EJECT_REZERO,
+ MSC_EJECT_ZTESTOR,
+ MSC_EJECT_CMOTECH,
+ MSC_EJECT_HUAWEI,
+ MSC_EJECT_TCT,
+};
+
+int usb_iface_is_cdrom(struct usb_device *udev,
+ uint8_t iface_index);
+usb_error_t usb_msc_eject(struct usb_device *udev,
+ uint8_t iface_index, int method);
+
+#endif /* _USB_MSCTEST_HH_ */
diff --git a/rtems/freebsd/dev/usb/usb_parse.c b/rtems/freebsd/dev/usb/usb_parse.c
new file mode 100644
index 00000000..0e519ada
--- /dev/null
+++ b/rtems/freebsd/dev/usb/usb_parse.c
@@ -0,0 +1,291 @@
+#include <rtems/freebsd/machine/rtems-bsd-config.h>
+
+/* $FreeBSD$ */
+/*-
+ * Copyright (c) 2008 Hans Petter Selasky. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <rtems/freebsd/sys/stdint.h>
+#include <rtems/freebsd/sys/stddef.h>
+#include <rtems/freebsd/sys/param.h>
+#include <rtems/freebsd/sys/queue.h>
+#include <rtems/freebsd/sys/types.h>
+#include <rtems/freebsd/sys/systm.h>
+#include <rtems/freebsd/sys/kernel.h>
+#include <rtems/freebsd/sys/bus.h>
+#include <rtems/freebsd/sys/linker_set.h>
+#include <rtems/freebsd/sys/module.h>
+#include <rtems/freebsd/sys/lock.h>
+#include <rtems/freebsd/sys/mutex.h>
+#include <rtems/freebsd/sys/condvar.h>
+#include <rtems/freebsd/sys/sysctl.h>
+#include <rtems/freebsd/sys/sx.h>
+#include <rtems/freebsd/sys/unistd.h>
+#include <rtems/freebsd/sys/callout.h>
+#include <rtems/freebsd/sys/malloc.h>
+#include <rtems/freebsd/sys/priv.h>
+
+#include <rtems/freebsd/dev/usb/usb.h>
+#include <rtems/freebsd/dev/usb/usbdi.h>
+#include <rtems/freebsd/dev/usb/usbdi_util.h>
+
+
+/*------------------------------------------------------------------------*
+ * usb_desc_foreach
+ *
+ * This function is the safe way to iterate across the USB config
+ * descriptor. It contains several checks against invalid
+ * descriptors. If the "desc" argument passed to this function is
+ * "NULL" the first descriptor, if any, will be returned.
+ *
+ * Return values:
+ * NULL: End of descriptors
+ * Else: Next descriptor after "desc"
+ *------------------------------------------------------------------------*/
+struct usb_descriptor *
+usb_desc_foreach(struct usb_config_descriptor *cd,
+ struct usb_descriptor *_desc)
+{
+ uint8_t *desc_next;
+ uint8_t *start;
+ uint8_t *end;
+ uint8_t *desc;
+
+ /* be NULL safe */
+ if (cd == NULL)
+ return (NULL);
+
+ /* We assume that the "wTotalLength" has been checked. */
+ start = (uint8_t *)cd;
+ end = start + UGETW(cd->wTotalLength);
+ desc = (uint8_t *)_desc;
+
+ /* Get start of next USB descriptor. */
+ if (desc == NULL)
+ desc = start;
+ else
+ desc = desc + desc[0];
+
+ /* Check that the next USB descriptor is within the range. */
+ if ((desc < start) || (desc >= end))
+ return (NULL); /* out of range, or EOD */
+
+ /* Check that the second next USB descriptor is within range. */
+ desc_next = desc + desc[0];
+ if ((desc_next < start) || (desc_next > end))
+ return (NULL); /* out of range */
+
+ /* Check minimum descriptor length. */
+ if (desc[0] < 3)
+ return (NULL); /* too short descriptor */
+
+ /* Return start of next descriptor. */
+ return ((struct usb_descriptor *)desc);
+}
+
+/*------------------------------------------------------------------------*
+ * usb_idesc_foreach
+ *
+ * This function will iterate the interface descriptors in the config
+ * descriptor. The parse state structure should be zeroed before
+ * calling this function the first time.
+ *
+ * Return values:
+ * NULL: End of descriptors
+ * Else: A valid interface descriptor
+ *------------------------------------------------------------------------*/
+struct usb_interface_descriptor *
+usb_idesc_foreach(struct usb_config_descriptor *cd,
+ struct usb_idesc_parse_state *ps)
+{
+ struct usb_interface_descriptor *id;
+ uint8_t new_iface;
+
+ /* retrieve current descriptor */
+ id = (struct usb_interface_descriptor *)ps->desc;
+ /* default is to start a new interface */
+ new_iface = 1;
+
+ while (1) {
+ id = (struct usb_interface_descriptor *)
+ usb_desc_foreach(cd, (struct usb_descriptor *)id);
+ if (id == NULL)
+ break;
+ if ((id->bDescriptorType == UDESC_INTERFACE) &&
+ (id->bLength >= sizeof(*id))) {
+ if (ps->iface_no_last == id->bInterfaceNumber)
+ new_iface = 0;
+ ps->iface_no_last = id->bInterfaceNumber;
+ break;
+ }
+ }
+
+ if (ps->desc == NULL) {
+ /* first time */
+ } else if (new_iface) {
+ /* new interface */
+ ps->iface_index ++;
+ ps->iface_index_alt = 0;
+ } else {
+ /* new alternate interface */
+ ps->iface_index_alt ++;
+ }
+
+ /* store and return current descriptor */
+ ps->desc = (struct usb_descriptor *)id;
+ return (id);
+}
+
+/*------------------------------------------------------------------------*
+ * usb_edesc_foreach
+ *
+ * This function will iterate all the endpoint descriptors within an
+ * interface descriptor. Starting value for the "ped" argument should
+ * be a valid interface descriptor.
+ *
+ * Return values:
+ * NULL: End of descriptors
+ * Else: A valid endpoint descriptor
+ *------------------------------------------------------------------------*/
+struct usb_endpoint_descriptor *
+usb_edesc_foreach(struct usb_config_descriptor *cd,
+ struct usb_endpoint_descriptor *ped)
+{
+ struct usb_descriptor *desc;
+
+ desc = ((struct usb_descriptor *)ped);
+
+ while ((desc = usb_desc_foreach(cd, desc))) {
+ if (desc->bDescriptorType == UDESC_INTERFACE) {
+ break;
+ }
+ if (desc->bDescriptorType == UDESC_ENDPOINT) {
+ if (desc->bLength < sizeof(*ped)) {
+ /* endpoint descriptor is invalid */
+ break;
+ }
+ return ((struct usb_endpoint_descriptor *)desc);
+ }
+ }
+ return (NULL);
+}
+
+/*------------------------------------------------------------------------*
+ * usb_ed_comp_foreach
+ *
+ * This function will iterate all the endpoint companion descriptors
+ * within an endpoint descriptor in an interface descriptor. Starting
+ * value for the "ped" argument should be a valid endpoint companion
+ * descriptor.
+ *
+ * Return values:
+ * NULL: End of descriptors
+ * Else: A valid endpoint companion descriptor
+ *------------------------------------------------------------------------*/
+struct usb_endpoint_ss_comp_descriptor *
+usb_ed_comp_foreach(struct usb_config_descriptor *cd,
+ struct usb_endpoint_ss_comp_descriptor *ped)
+{
+ struct usb_descriptor *desc;
+
+ desc = ((struct usb_descriptor *)ped);
+
+ while ((desc = usb_desc_foreach(cd, desc))) {
+ if (desc->bDescriptorType == UDESC_INTERFACE)
+ break;
+ if (desc->bDescriptorType == UDESC_ENDPOINT)
+ break;
+ if (desc->bDescriptorType == UDESC_ENDPOINT_SS_COMP) {
+ if (desc->bLength < sizeof(*ped)) {
+ /* endpoint companion descriptor is invalid */
+ break;
+ }
+ return ((struct usb_endpoint_ss_comp_descriptor *)desc);
+ }
+ }
+ return (NULL);
+}
+
+/*------------------------------------------------------------------------*
+ * usbd_get_no_descriptors
+ *
+ * This function will count the total number of descriptors in the
+ * configuration descriptor of type "type".
+ *------------------------------------------------------------------------*/
+uint8_t
+usbd_get_no_descriptors(struct usb_config_descriptor *cd, uint8_t type)
+{
+ struct usb_descriptor *desc = NULL;
+ uint8_t count = 0;
+
+ while ((desc = usb_desc_foreach(cd, desc))) {
+ if (desc->bDescriptorType == type) {
+ count++;
+ if (count == 0xFF)
+ break; /* crazy */
+ }
+ }
+ return (count);
+}
+
+/*------------------------------------------------------------------------*
+ * usbd_get_no_alts
+ *
+ * Return value:
+ * Number of alternate settings for the given interface descriptor
+ * pointer. If the USB descriptor is corrupt, the returned value can
+ * be greater than the actual number of alternate settings.
+ *------------------------------------------------------------------------*/
+uint8_t
+usbd_get_no_alts(struct usb_config_descriptor *cd,
+ struct usb_interface_descriptor *id)
+{
+ struct usb_descriptor *desc;
+ uint8_t n;
+ uint8_t ifaceno;
+
+ /* Reset interface count */
+
+ n = 0;
+
+ /* Get the interface number */
+
+ ifaceno = id->bInterfaceNumber;
+
+ /* Iterate all the USB descriptors */
+
+ desc = NULL;
+ while ((desc = usb_desc_foreach(cd, desc))) {
+ if ((desc->bDescriptorType == UDESC_INTERFACE) &&
+ (desc->bLength >= sizeof(*id))) {
+ id = (struct usb_interface_descriptor *)desc;
+ if (id->bInterfaceNumber == ifaceno) {
+ n++;
+ if (n == 0xFF)
+ break; /* crazy */
+ }
+ }
+ }
+ return (n);
+}
diff --git a/rtems/freebsd/dev/usb/usb_process.c b/rtems/freebsd/dev/usb/usb_process.c
new file mode 100644
index 00000000..8f52b163
--- /dev/null
+++ b/rtems/freebsd/dev/usb/usb_process.c
@@ -0,0 +1,497 @@
+#include <rtems/freebsd/machine/rtems-bsd-config.h>
+
+/* $FreeBSD$ */
+/*-
+ * Copyright (c) 2008 Hans Petter Selasky. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#define USB_DEBUG_VAR usb_proc_debug
+
+#include <rtems/freebsd/sys/stdint.h>
+#include <rtems/freebsd/sys/stddef.h>
+#include <rtems/freebsd/sys/param.h>
+#include <rtems/freebsd/sys/queue.h>
+#include <rtems/freebsd/sys/types.h>
+#include <rtems/freebsd/sys/systm.h>
+#include <rtems/freebsd/sys/kernel.h>
+#include <rtems/freebsd/sys/bus.h>
+#include <rtems/freebsd/sys/linker_set.h>
+#include <rtems/freebsd/sys/module.h>
+#include <rtems/freebsd/sys/lock.h>
+#include <rtems/freebsd/sys/mutex.h>
+#include <rtems/freebsd/sys/condvar.h>
+#include <rtems/freebsd/sys/sysctl.h>
+#include <rtems/freebsd/sys/sx.h>
+#include <rtems/freebsd/sys/unistd.h>
+#include <rtems/freebsd/sys/callout.h>
+#include <rtems/freebsd/sys/malloc.h>
+#include <rtems/freebsd/sys/priv.h>
+
+#include <rtems/freebsd/dev/usb/usb.h>
+#include <rtems/freebsd/dev/usb/usbdi.h>
+#include <rtems/freebsd/dev/usb/usbdi_util.h>
+#include <rtems/freebsd/dev/usb/usb_process.h>
+#include <rtems/freebsd/dev/usb/usb_debug.h>
+#include <rtems/freebsd/dev/usb/usb_util.h>
+
+#include <rtems/freebsd/sys/proc.h>
+#include <rtems/freebsd/sys/kthread.h>
+#include <rtems/freebsd/sys/sched.h>
+
+#if (__FreeBSD_version < 700000)
+#define thread_lock(td) mtx_lock_spin(&sched_lock)
+#define thread_unlock(td) mtx_unlock_spin(&sched_lock)
+#endif
+
+#if (__FreeBSD_version >= 800000)
+static struct proc *usbproc;
+static int usb_pcount;
+#define USB_THREAD_CREATE(f, s, p, ...) \
+ kproc_kthread_add((f), (s), &usbproc, (p), RFHIGHPID, \
+ 0, "usb", __VA_ARGS__)
+#define USB_THREAD_SUSPEND(p) kthread_suspend(p,0)
+#define USB_THREAD_EXIT(err) kthread_exit()
+#else
+#define USB_THREAD_CREATE(f, s, p, ...) \
+ kthread_create((f), (s), (p), RFHIGHPID, 0, __VA_ARGS__)
+#define USB_THREAD_SUSPEND(p) kthread_suspend(p,0)
+#define USB_THREAD_EXIT(err) kthread_exit(err)
+#endif
+
+#ifdef USB_DEBUG
+static int usb_proc_debug;
+
+SYSCTL_NODE(_hw_usb, OID_AUTO, proc, CTLFLAG_RW, 0, "USB process");
+SYSCTL_INT(_hw_usb_proc, OID_AUTO, debug, CTLFLAG_RW, &usb_proc_debug, 0,
+ "Debug level");
+
+TUNABLE_INT("hw.usb.proc.debug", &usb_proc_debug);
+#endif
+
+/*------------------------------------------------------------------------*
+ * usb_process
+ *
+ * This function is the USB process dispatcher.
+ *------------------------------------------------------------------------*/
+static void
+usb_process(void *arg)
+{
+ struct usb_process *up = arg;
+ struct usb_proc_msg *pm;
+#ifndef __rtems__
+ struct thread *td;
+
+ /* adjust priority */
+ td = curthread;
+ thread_lock(td);
+ sched_prio(td, up->up_prio);
+ thread_unlock(td);
+#endif /* __rtems__ */
+
+ mtx_lock(up->up_mtx);
+
+#ifndef __rtems__
+ up->up_curtd = td;
+#endif /* __rtems__ */
+
+ while (1) {
+
+ if (up->up_gone)
+ break;
+
+ /*
+ * NOTE to reimplementors: dequeueing a command from the
+ * "used" queue and executing it must be atomic, with regard
+ * to the "up_mtx" mutex. That means any attempt to queue a
+ * command by another thread must be blocked until either:
+ *
+ * 1) the command sleeps
+ *
+ * 2) the command returns
+ *
+ * Here is a practical example that shows how this helps
+ * solving a problem:
+ *
+ * Assume that you want to set the baud rate on a USB serial
+ * device. During the programming of the device you don't
+ * want to receive nor transmit any data, because it will be
+ * garbage most likely anyway. The programming of our USB
+ * device takes 20 milliseconds and it needs to call
+ * functions that sleep.
+ *
+ * Non-working solution: Before we queue the programming
+ * command, we stop transmission and reception of data. Then
+ * we queue a programming command. At the end of the
+ * programming command we enable transmission and reception
+ * of data.
+ *
+ * Problem: If a second programming command is queued while the
+ * first one is sleeping, we end up enabling transmission
+ * and reception of data too early.
+ *
+ * Working solution: Before we queue the programming command,
+ * we stop transmission and reception of data. Then we queue
+ * a programming command. Then we queue a second command
+ * that only enables transmission and reception of data.
+ *
+ * Why it works: If a second programming command is queued
+ * while the first one is sleeping, then the queueing of a
+ * second command to enable the data transfers, will cause
+ * the previous one, which is still on the queue, to be
+ * removed from the queue, and re-inserted after the last
+ * baud rate programming command, which then gives the
+ * desired result.
+ */
+ pm = TAILQ_FIRST(&up->up_qhead);
+
+ if (pm) {
+ DPRINTF("Message pm=%p, cb=%p (enter)\n",
+ pm, pm->pm_callback);
+
+ (pm->pm_callback) (pm);
+
+ if (pm == TAILQ_FIRST(&up->up_qhead)) {
+ /* nothing changed */
+ TAILQ_REMOVE(&up->up_qhead, pm, pm_qentry);
+ pm->pm_qentry.tqe_prev = NULL;
+ }
+ DPRINTF("Message pm=%p (leave)\n", pm);
+
+ continue;
+ }
+ /* end if messages - check if anyone is waiting for sync */
+ if (up->up_dsleep) {
+ up->up_dsleep = 0;
+ cv_broadcast(&up->up_drain);
+ }
+ up->up_msleep = 1;
+ cv_wait(&up->up_cv, up->up_mtx);
+ }
+
+ up->up_ptr = NULL;
+ cv_signal(&up->up_cv);
+ mtx_unlock(up->up_mtx);
+#if (__FreeBSD_version >= 800000)
+ /* Clear the proc pointer if this is the last thread. */
+ if (--usb_pcount == 0)
+ usbproc = NULL;
+#endif
+
+ USB_THREAD_EXIT(0);
+}
+
+/*------------------------------------------------------------------------*
+ * usb_proc_create
+ *
+ * This function will create a process using the given "prio" that can
+ * execute callbacks. The mutex pointed to by "p_mtx" will be applied
+ * before calling the callbacks and released after that the callback
+ * has returned. The structure pointed to by "up" is assumed to be
+ * zeroed before this function is called.
+ *
+ * Return values:
+ * 0: success
+ * Else: failure
+ *------------------------------------------------------------------------*/
+int
+usb_proc_create(struct usb_process *up, struct mtx *p_mtx,
+ const char *pmesg, uint8_t prio)
+{
+ up->up_mtx = p_mtx;
+ up->up_prio = prio;
+
+ TAILQ_INIT(&up->up_qhead);
+
+ cv_init(&up->up_cv, "-");
+ cv_init(&up->up_drain, "usbdrain");
+
+ if (USB_THREAD_CREATE(&usb_process, up,
+ &up->up_ptr, "%s", pmesg)) {
+ DPRINTFN(0, "Unable to create USB process.");
+ up->up_ptr = NULL;
+ goto error;
+ }
+#if (__FreeBSD_version >= 800000)
+ usb_pcount++;
+#endif
+ return (0);
+
+error:
+ usb_proc_free(up);
+ return (ENOMEM);
+}
+
+/*------------------------------------------------------------------------*
+ * usb_proc_free
+ *
+ * NOTE: If the structure pointed to by "up" is all zero, this
+ * function does nothing.
+ *
+ * NOTE: Messages that are pending on the process queue will not be
+ * removed nor called.
+ *------------------------------------------------------------------------*/
+void
+usb_proc_free(struct usb_process *up)
+{
+ /* check if not initialised */
+ if (up->up_mtx == NULL)
+ return;
+
+ usb_proc_drain(up);
+
+ cv_destroy(&up->up_cv);
+ cv_destroy(&up->up_drain);
+
+ /* make sure that we do not enter here again */
+ up->up_mtx = NULL;
+}
+
+/*------------------------------------------------------------------------*
+ * usb_proc_msignal
+ *
+ * This function will queue one of the passed USB process messages on
+ * the USB process queue. The first message that is not already queued
+ * will get queued. If both messages are already queued the one queued
+ * last will be removed from the queue and queued in the end. The USB
+ * process mutex must be locked when calling this function. This
+ * function exploits the fact that a process can only do one callback
+ * at a time. The message that was queued is returned.
+ *------------------------------------------------------------------------*/
+void *
+usb_proc_msignal(struct usb_process *up, void *_pm0, void *_pm1)
+{
+ struct usb_proc_msg *pm0 = _pm0;
+ struct usb_proc_msg *pm1 = _pm1;
+ struct usb_proc_msg *pm2;
+ usb_size_t d;
+ uint8_t t;
+
+ /* check if gone, return dummy value */
+ if (up->up_gone)
+ return (_pm0);
+
+ mtx_assert(up->up_mtx, MA_OWNED);
+
+ t = 0;
+
+ if (pm0->pm_qentry.tqe_prev) {
+ t |= 1;
+ }
+ if (pm1->pm_qentry.tqe_prev) {
+ t |= 2;
+ }
+ if (t == 0) {
+ /*
+ * No entries are queued. Queue "pm0" and use the existing
+ * message number.
+ */
+ pm2 = pm0;
+ } else if (t == 1) {
+ /* Check if we need to increment the message number. */
+ if (pm0->pm_num == up->up_msg_num) {
+ up->up_msg_num++;
+ }
+ pm2 = pm1;
+ } else if (t == 2) {
+ /* Check if we need to increment the message number. */
+ if (pm1->pm_num == up->up_msg_num) {
+ up->up_msg_num++;
+ }
+ pm2 = pm0;
+ } else if (t == 3) {
+ /*
+ * Both entries are queued. Re-queue the entry closest to
+ * the end.
+ */
+ d = (pm1->pm_num - pm0->pm_num);
+
+ /* Check sign after subtraction */
+ if (d & 0x80000000) {
+ pm2 = pm0;
+ } else {
+ pm2 = pm1;
+ }
+
+ TAILQ_REMOVE(&up->up_qhead, pm2, pm_qentry);
+ } else {
+ pm2 = NULL; /* panic - should not happen */
+ }
+
+ DPRINTF(" t=%u, num=%u\n", t, up->up_msg_num);
+
+ /* Put message last on queue */
+
+ pm2->pm_num = up->up_msg_num;
+ TAILQ_INSERT_TAIL(&up->up_qhead, pm2, pm_qentry);
+
+ /* Check if we need to wakeup the USB process. */
+
+ if (up->up_msleep) {
+ up->up_msleep = 0; /* save "cv_signal()" calls */
+ cv_signal(&up->up_cv);
+ }
+ return (pm2);
+}
+
+/*------------------------------------------------------------------------*
+ * usb_proc_is_gone
+ *
+ * Return values:
+ * 0: USB process is running
+ * Else: USB process is tearing down
+ *------------------------------------------------------------------------*/
+uint8_t
+usb_proc_is_gone(struct usb_process *up)
+{
+ if (up->up_gone)
+ return (1);
+
+ mtx_assert(up->up_mtx, MA_OWNED);
+ return (0);
+}
+
+/*------------------------------------------------------------------------*
+ * usb_proc_mwait
+ *
+ * This function will return when the USB process message pointed to
+ * by "pm" is no longer on a queue. This function must be called
+ * having "up->up_mtx" locked.
+ *------------------------------------------------------------------------*/
+void
+usb_proc_mwait(struct usb_process *up, void *_pm0, void *_pm1)
+{
+ struct usb_proc_msg *pm0 = _pm0;
+ struct usb_proc_msg *pm1 = _pm1;
+
+ /* check if gone */
+ if (up->up_gone)
+ return;
+
+ mtx_assert(up->up_mtx, MA_OWNED);
+
+#ifndef __rtems__
+ if (up->up_curtd == curthread) {
+#else /* __rtems__ */
+ if (up->up_ptr->td_id == rtems_task_self()) {
+#endif /* __rtems__ */
+ /* Just remove the messages from the queue. */
+ if (pm0->pm_qentry.tqe_prev) {
+ TAILQ_REMOVE(&up->up_qhead, pm0, pm_qentry);
+ pm0->pm_qentry.tqe_prev = NULL;
+ }
+ if (pm1->pm_qentry.tqe_prev) {
+ TAILQ_REMOVE(&up->up_qhead, pm1, pm_qentry);
+ pm1->pm_qentry.tqe_prev = NULL;
+ }
+ } else
+ while (pm0->pm_qentry.tqe_prev ||
+ pm1->pm_qentry.tqe_prev) {
+ /* check if config thread is gone */
+ if (up->up_gone)
+ break;
+ up->up_dsleep = 1;
+ cv_wait(&up->up_drain, up->up_mtx);
+ }
+}
+
+/*------------------------------------------------------------------------*
+ * usb_proc_drain
+ *
+ * This function will tear down an USB process, waiting for the
+ * currently executing command to return.
+ *
+ * NOTE: If the structure pointed to by "up" is all zero,
+ * this function does nothing.
+ *------------------------------------------------------------------------*/
+void
+usb_proc_drain(struct usb_process *up)
+{
+ /* check if not initialised */
+ if (up->up_mtx == NULL)
+ return;
+ /* handle special case with Giant */
+ if (up->up_mtx != &Giant)
+ mtx_assert(up->up_mtx, MA_NOTOWNED);
+
+ mtx_lock(up->up_mtx);
+
+ /* Set the gone flag */
+
+ up->up_gone = 1;
+
+ while (up->up_ptr) {
+
+ /* Check if we need to wakeup the USB process */
+
+ if (up->up_msleep || up->up_csleep) {
+ up->up_msleep = 0;
+ up->up_csleep = 0;
+ cv_signal(&up->up_cv);
+ }
+ /* Check if we are still cold booted */
+
+ if (cold) {
+ USB_THREAD_SUSPEND(up->up_ptr);
+ printf("WARNING: A USB process has "
+ "been left suspended\n");
+ break;
+ }
+ cv_wait(&up->up_cv, up->up_mtx);
+ }
+ /* Check if someone is waiting - should not happen */
+
+ if (up->up_dsleep) {
+ up->up_dsleep = 0;
+ cv_broadcast(&up->up_drain);
+ DPRINTF("WARNING: Someone is waiting "
+ "for USB process drain!\n");
+ }
+ mtx_unlock(up->up_mtx);
+}
+
+/*------------------------------------------------------------------------*
+ * usb_proc_rewakeup
+ *
+ * This function is called to re-wakeup the the given USB
+ * process. This usually happens after that the USB system has been in
+ * polling mode, like during a panic. This function must be called
+ * having "up->up_mtx" locked.
+ *------------------------------------------------------------------------*/
+void
+usb_proc_rewakeup(struct usb_process *up)
+{
+ /* check if not initialised */
+ if (up->up_mtx == NULL)
+ return;
+ /* check if gone */
+ if (up->up_gone)
+ return;
+
+ mtx_assert(up->up_mtx, MA_OWNED);
+
+ if (up->up_msleep == 0) {
+ /* re-wakeup */
+ cv_signal(&up->up_cv);
+ }
+}
diff --git a/rtems/freebsd/dev/usb/usb_process.h b/rtems/freebsd/dev/usb/usb_process.h
new file mode 100644
index 00000000..c7d6aa44
--- /dev/null
+++ b/rtems/freebsd/dev/usb/usb_process.h
@@ -0,0 +1,84 @@
+/* $FreeBSD$ */
+/*-
+ * Copyright (c) 2008 Hans Petter Selasky. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifndef _USB_PROCESS_HH_
+#define _USB_PROCESS_HH_
+
+#include <rtems/freebsd/sys/priority.h>
+
+/* defines */
+#define USB_PRI_HIGH PI_NET
+#define USB_PRI_MED PI_DISK
+
+#define USB_PROC_WAIT_TIMEOUT 2
+#define USB_PROC_WAIT_DRAIN 1
+#define USB_PROC_WAIT_NORMAL 0
+
+/* structure prototypes */
+
+struct usb_proc_msg;
+
+/*
+ * The following structure defines the USB process.
+ */
+struct usb_process {
+ TAILQ_HEAD(, usb_proc_msg) up_qhead;
+ struct cv up_cv;
+ struct cv up_drain;
+
+#ifndef __rtems__
+#if (__FreeBSD_version >= 800000)
+ struct thread *up_ptr;
+#else
+ struct proc *up_ptr;
+#endif
+ struct thread *up_curtd;
+#else /* __rtems__ */
+ struct thread *up_ptr;
+#endif /* __rtems__ */
+ struct mtx *up_mtx;
+
+ usb_size_t up_msg_num;
+
+ uint8_t up_prio;
+ uint8_t up_gone;
+ uint8_t up_msleep;
+ uint8_t up_csleep;
+ uint8_t up_dsleep;
+};
+
+/* prototypes */
+
+uint8_t usb_proc_is_gone(struct usb_process *up);
+int usb_proc_create(struct usb_process *up, struct mtx *p_mtx,
+ const char *pmesg, uint8_t prio);
+void usb_proc_drain(struct usb_process *up);
+void usb_proc_mwait(struct usb_process *up, void *pm0, void *pm1);
+void usb_proc_free(struct usb_process *up);
+void *usb_proc_msignal(struct usb_process *up, void *pm0, void *pm1);
+void usb_proc_rewakeup(struct usb_process *up);
+
+#endif /* _USB_PROCESS_HH_ */
diff --git a/rtems/freebsd/dev/usb/usb_request.c b/rtems/freebsd/dev/usb/usb_request.c
new file mode 100644
index 00000000..5ec541b6
--- /dev/null
+++ b/rtems/freebsd/dev/usb/usb_request.c
@@ -0,0 +1,2031 @@
+#include <rtems/freebsd/machine/rtems-bsd-config.h>
+
+/* $FreeBSD$ */
+/*-
+ * Copyright (c) 1998 The NetBSD Foundation, Inc. All rights reserved.
+ * Copyright (c) 1998 Lennart Augustsson. All rights reserved.
+ * Copyright (c) 2008 Hans Petter Selasky. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <rtems/freebsd/sys/stdint.h>
+#include <rtems/freebsd/sys/stddef.h>
+#include <rtems/freebsd/sys/param.h>
+#include <rtems/freebsd/sys/queue.h>
+#include <rtems/freebsd/sys/types.h>
+#include <rtems/freebsd/sys/systm.h>
+#include <rtems/freebsd/sys/kernel.h>
+#include <rtems/freebsd/sys/bus.h>
+#include <rtems/freebsd/sys/linker_set.h>
+#include <rtems/freebsd/sys/module.h>
+#include <rtems/freebsd/sys/lock.h>
+#include <rtems/freebsd/sys/mutex.h>
+#include <rtems/freebsd/sys/condvar.h>
+#include <rtems/freebsd/sys/sysctl.h>
+#include <rtems/freebsd/sys/sx.h>
+#include <rtems/freebsd/sys/unistd.h>
+#include <rtems/freebsd/sys/callout.h>
+#include <rtems/freebsd/sys/malloc.h>
+#include <rtems/freebsd/sys/priv.h>
+
+#include <rtems/freebsd/dev/usb/usb.h>
+#include <rtems/freebsd/dev/usb/usbdi.h>
+#include <rtems/freebsd/dev/usb/usbdi_util.h>
+#include <rtems/freebsd/dev/usb/usb_ioctl.h>
+#include <rtems/freebsd/dev/usb/usbhid.h>
+
+#define USB_DEBUG_VAR usb_debug
+
+#include <rtems/freebsd/dev/usb/usb_core.h>
+#include <rtems/freebsd/dev/usb/usb_busdma.h>
+#include <rtems/freebsd/dev/usb/usb_request.h>
+#include <rtems/freebsd/dev/usb/usb_process.h>
+#include <rtems/freebsd/dev/usb/usb_transfer.h>
+#include <rtems/freebsd/dev/usb/usb_debug.h>
+#include <rtems/freebsd/dev/usb/usb_device.h>
+#include <rtems/freebsd/dev/usb/usb_util.h>
+#include <rtems/freebsd/dev/usb/usb_dynamic.h>
+
+#include <rtems/freebsd/dev/usb/usb_controller.h>
+#include <rtems/freebsd/dev/usb/usb_bus.h>
+#include <rtems/freebsd/sys/ctype.h>
+
+#ifdef USB_DEBUG
+static int usb_pr_poll_delay = USB_PORT_RESET_DELAY;
+static int usb_pr_recovery_delay = USB_PORT_RESET_RECOVERY;
+
+SYSCTL_INT(_hw_usb, OID_AUTO, pr_poll_delay, CTLFLAG_RW,
+ &usb_pr_poll_delay, 0, "USB port reset poll delay in ms");
+SYSCTL_INT(_hw_usb, OID_AUTO, pr_recovery_delay, CTLFLAG_RW,
+ &usb_pr_recovery_delay, 0, "USB port reset recovery delay in ms");
+
+#ifdef USB_REQ_DEBUG
+/* The following structures are used in connection to fault injection. */
+struct usb_ctrl_debug {
+ int bus_index; /* target bus */
+ int dev_index; /* target address */
+ int ds_fail; /* fail data stage */
+ int ss_fail; /* fail data stage */
+ int ds_delay; /* data stage delay in ms */
+ int ss_delay; /* status stage delay in ms */
+ int bmRequestType_value;
+ int bRequest_value;
+};
+
+struct usb_ctrl_debug_bits {
+ uint16_t ds_delay;
+ uint16_t ss_delay;
+ uint8_t ds_fail:1;
+ uint8_t ss_fail:1;
+ uint8_t enabled:1;
+};
+
+/* The default is to disable fault injection. */
+
+static struct usb_ctrl_debug usb_ctrl_debug = {
+ .bus_index = -1,
+ .dev_index = -1,
+ .bmRequestType_value = -1,
+ .bRequest_value = -1,
+};
+
+SYSCTL_INT(_hw_usb, OID_AUTO, ctrl_bus_fail, CTLFLAG_RW,
+ &usb_ctrl_debug.bus_index, 0, "USB controller index to fail");
+SYSCTL_INT(_hw_usb, OID_AUTO, ctrl_dev_fail, CTLFLAG_RW,
+ &usb_ctrl_debug.dev_index, 0, "USB device address to fail");
+SYSCTL_INT(_hw_usb, OID_AUTO, ctrl_ds_fail, CTLFLAG_RW,
+ &usb_ctrl_debug.ds_fail, 0, "USB fail data stage");
+SYSCTL_INT(_hw_usb, OID_AUTO, ctrl_ss_fail, CTLFLAG_RW,
+ &usb_ctrl_debug.ss_fail, 0, "USB fail status stage");
+SYSCTL_INT(_hw_usb, OID_AUTO, ctrl_ds_delay, CTLFLAG_RW,
+ &usb_ctrl_debug.ds_delay, 0, "USB data stage delay in ms");
+SYSCTL_INT(_hw_usb, OID_AUTO, ctrl_ss_delay, CTLFLAG_RW,
+ &usb_ctrl_debug.ss_delay, 0, "USB status stage delay in ms");
+SYSCTL_INT(_hw_usb, OID_AUTO, ctrl_rt_fail, CTLFLAG_RW,
+ &usb_ctrl_debug.bmRequestType_value, 0, "USB bmRequestType to fail");
+SYSCTL_INT(_hw_usb, OID_AUTO, ctrl_rv_fail, CTLFLAG_RW,
+ &usb_ctrl_debug.bRequest_value, 0, "USB bRequest to fail");
+
+/*------------------------------------------------------------------------*
+ * usbd_get_debug_bits
+ *
+ * This function is only useful in USB host mode.
+ *------------------------------------------------------------------------*/
+static void
+usbd_get_debug_bits(struct usb_device *udev, struct usb_device_request *req,
+ struct usb_ctrl_debug_bits *dbg)
+{
+ int temp;
+
+ memset(dbg, 0, sizeof(*dbg));
+
+ /* Compute data stage delay */
+
+ temp = usb_ctrl_debug.ds_delay;
+ if (temp < 0)
+ temp = 0;
+ else if (temp > (16*1024))
+ temp = (16*1024);
+
+ dbg->ds_delay = temp;
+
+ /* Compute status stage delay */
+
+ temp = usb_ctrl_debug.ss_delay;
+ if (temp < 0)
+ temp = 0;
+ else if (temp > (16*1024))
+ temp = (16*1024);
+
+ dbg->ss_delay = temp;
+
+ /* Check if this control request should be failed */
+
+ if (usbd_get_bus_index(udev) != usb_ctrl_debug.bus_index)
+ return;
+
+ if (usbd_get_device_index(udev) != usb_ctrl_debug.dev_index)
+ return;
+
+ temp = usb_ctrl_debug.bmRequestType_value;
+
+ if ((temp != req->bmRequestType) && (temp >= 0) && (temp <= 255))
+ return;
+
+ temp = usb_ctrl_debug.bRequest_value;
+
+ if ((temp != req->bRequest) && (temp >= 0) && (temp <= 255))
+ return;
+
+ temp = usb_ctrl_debug.ds_fail;
+ if (temp)
+ dbg->ds_fail = 1;
+
+ temp = usb_ctrl_debug.ss_fail;
+ if (temp)
+ dbg->ss_fail = 1;
+
+ dbg->enabled = 1;
+}
+#endif /* USB_REQ_DEBUG */
+#endif /* USB_DEBUG */
+
+/*------------------------------------------------------------------------*
+ * usbd_do_request_callback
+ *
+ * This function is the USB callback for generic USB Host control
+ * transfers.
+ *------------------------------------------------------------------------*/
+void
+usbd_do_request_callback(struct usb_xfer *xfer, usb_error_t error)
+{
+ ; /* workaround for a bug in "indent" */
+
+ DPRINTF("st=%u\n", USB_GET_STATE(xfer));
+
+ switch (USB_GET_STATE(xfer)) {
+ case USB_ST_SETUP:
+ usbd_transfer_submit(xfer);
+ break;
+ default:
+ cv_signal(&xfer->xroot->udev->ctrlreq_cv);
+ break;
+ }
+}
+
+/*------------------------------------------------------------------------*
+ * usb_do_clear_stall_callback
+ *
+ * This function is the USB callback for generic clear stall requests.
+ *------------------------------------------------------------------------*/
+void
+usb_do_clear_stall_callback(struct usb_xfer *xfer, usb_error_t error)
+{
+ struct usb_device_request req;
+ struct usb_device *udev;
+ struct usb_endpoint *ep;
+ struct usb_endpoint *ep_end;
+ struct usb_endpoint *ep_first;
+ uint8_t to;
+
+ udev = xfer->xroot->udev;
+
+ USB_BUS_LOCK(udev->bus);
+
+ /* round robin endpoint clear stall */
+
+ ep = udev->ep_curr;
+ ep_end = udev->endpoints + udev->endpoints_max;
+ ep_first = udev->endpoints;
+ to = udev->endpoints_max;
+
+ switch (USB_GET_STATE(xfer)) {
+ case USB_ST_TRANSFERRED:
+ if (ep == NULL)
+ goto tr_setup; /* device was unconfigured */
+ if (ep->edesc &&
+ ep->is_stalled) {
+ ep->toggle_next = 0;
+ ep->is_stalled = 0;
+ /* some hardware needs a callback to clear the data toggle */
+ usbd_clear_stall_locked(udev, ep);
+ /* start up the current or next transfer, if any */
+ usb_command_wrapper(&ep->endpoint_q,
+ ep->endpoint_q.curr);
+ }
+ ep++;
+
+ case USB_ST_SETUP:
+tr_setup:
+ if (to == 0)
+ break; /* no endpoints - nothing to do */
+ if ((ep < ep_first) || (ep >= ep_end))
+ ep = ep_first; /* endpoint wrapped around */
+ if (ep->edesc &&
+ ep->is_stalled) {
+
+ /* setup a clear-stall packet */
+
+ req.bmRequestType = UT_WRITE_ENDPOINT;
+ req.bRequest = UR_CLEAR_FEATURE;
+ USETW(req.wValue, UF_ENDPOINT_HALT);
+ req.wIndex[0] = ep->edesc->bEndpointAddress;
+ req.wIndex[1] = 0;
+ USETW(req.wLength, 0);
+
+ /* copy in the transfer */
+
+ usbd_copy_in(xfer->frbuffers, 0, &req, sizeof(req));
+
+ /* set length */
+ usbd_xfer_set_frame_len(xfer, 0, sizeof(req));
+ xfer->nframes = 1;
+ USB_BUS_UNLOCK(udev->bus);
+
+ usbd_transfer_submit(xfer);
+
+ USB_BUS_LOCK(udev->bus);
+ break;
+ }
+ ep++;
+ to--;
+ goto tr_setup;
+
+ default:
+ if (xfer->error == USB_ERR_CANCELLED) {
+ break;
+ }
+ goto tr_setup;
+ }
+
+ /* store current endpoint */
+ udev->ep_curr = ep;
+ USB_BUS_UNLOCK(udev->bus);
+}
+
+static usb_handle_req_t *
+usbd_get_hr_func(struct usb_device *udev)
+{
+ /* figure out if there is a Handle Request function */
+ if (udev->flags.usb_mode == USB_MODE_DEVICE)
+ return (usb_temp_get_desc_p);
+ else if (udev->parent_hub == NULL)
+ return (udev->bus->methods->roothub_exec);
+ else
+ return (NULL);
+}
+
+/*------------------------------------------------------------------------*
+ * usbd_do_request_flags and usbd_do_request
+ *
+ * Description of arguments passed to these functions:
+ *
+ * "udev" - this is the "usb_device" structure pointer on which the
+ * request should be performed. It is possible to call this function
+ * in both Host Side mode and Device Side mode.
+ *
+ * "mtx" - if this argument is non-NULL the mutex pointed to by it
+ * will get dropped and picked up during the execution of this
+ * function, hence this function sometimes needs to sleep. If this
+ * argument is NULL it has no effect.
+ *
+ * "req" - this argument must always be non-NULL and points to an
+ * 8-byte structure holding the USB request to be done. The USB
+ * request structure has a bit telling the direction of the USB
+ * request, if it is a read or a write.
+ *
+ * "data" - if the "wLength" part of the structure pointed to by "req"
+ * is non-zero this argument must point to a valid kernel buffer which
+ * can hold at least "wLength" bytes. If "wLength" is zero "data" can
+ * be NULL.
+ *
+ * "flags" - here is a list of valid flags:
+ *
+ * o USB_SHORT_XFER_OK: allows the data transfer to be shorter than
+ * specified
+ *
+ * o USB_DELAY_STATUS_STAGE: allows the status stage to be performed
+ * at a later point in time. This is tunable by the "hw.usb.ss_delay"
+ * sysctl. This flag is mostly useful for debugging.
+ *
+ * o USB_USER_DATA_PTR: treat the "data" pointer like a userland
+ * pointer.
+ *
+ * "actlen" - if non-NULL the actual transfer length will be stored in
+ * the 16-bit unsigned integer pointed to by "actlen". This
+ * information is mostly useful when the "USB_SHORT_XFER_OK" flag is
+ * used.
+ *
+ * "timeout" - gives the timeout for the control transfer in
+ * milliseconds. A "timeout" value less than 50 milliseconds is
+ * treated like a 50 millisecond timeout. A "timeout" value greater
+ * than 30 seconds is treated like a 30 second timeout. This USB stack
+ * does not allow control requests without a timeout.
+ *
+ * NOTE: This function is thread safe. All calls to
+ * "usbd_do_request_flags" will be serialised by the use of an
+ * internal "sx_lock".
+ *
+ * Returns:
+ * 0: Success
+ * Else: Failure
+ *------------------------------------------------------------------------*/
+usb_error_t
+usbd_do_request_flags(struct usb_device *udev, struct mtx *mtx,
+ struct usb_device_request *req, void *data, uint16_t flags,
+ uint16_t *actlen, usb_timeout_t timeout)
+{
+#ifdef USB_REQ_DEBUG
+ struct usb_ctrl_debug_bits dbg;
+#endif
+ usb_handle_req_t *hr_func;
+ struct usb_xfer *xfer;
+ const void *desc;
+ int err = 0;
+ usb_ticks_t start_ticks;
+ usb_ticks_t delta_ticks;
+ usb_ticks_t max_ticks;
+ uint16_t length;
+ uint16_t temp;
+ uint16_t acttemp;
+ uint8_t enum_locked;
+
+ if (timeout < 50) {
+ /* timeout is too small */
+ timeout = 50;
+ }
+ if (timeout > 30000) {
+ /* timeout is too big */
+ timeout = 30000;
+ }
+ length = UGETW(req->wLength);
+
+ enum_locked = usbd_enum_is_locked(udev);
+
+ DPRINTFN(5, "udev=%p bmRequestType=0x%02x bRequest=0x%02x "
+ "wValue=0x%02x%02x wIndex=0x%02x%02x wLength=0x%02x%02x\n",
+ udev, req->bmRequestType, req->bRequest,
+ req->wValue[1], req->wValue[0],
+ req->wIndex[1], req->wIndex[0],
+ req->wLength[1], req->wLength[0]);
+
+ /* Check if the device is still alive */
+ if (udev->state < USB_STATE_POWERED) {
+ DPRINTF("usb device has gone\n");
+ return (USB_ERR_NOT_CONFIGURED);
+ }
+
+ /*
+ * Set "actlen" to a known value in case the caller does not
+ * check the return value:
+ */
+ if (actlen)
+ *actlen = 0;
+
+#if (USB_HAVE_USER_IO == 0)
+ if (flags & USB_USER_DATA_PTR)
+ return (USB_ERR_INVAL);
+#endif
+ if ((mtx != NULL) && (mtx != &Giant)) {
+ mtx_unlock(mtx);
+ mtx_assert(mtx, MA_NOTOWNED);
+ }
+
+ /*
+ * We need to allow suspend and resume at this point, else the
+ * control transfer will timeout if the device is suspended!
+ */
+ if (enum_locked)
+ usbd_sr_unlock(udev);
+
+ /*
+ * Grab the default sx-lock so that serialisation
+ * is achieved when multiple threads are involved:
+ */
+ sx_xlock(&udev->ctrl_sx);
+
+ hr_func = usbd_get_hr_func(udev);
+
+ if (hr_func != NULL) {
+ DPRINTF("Handle Request function is set\n");
+
+ desc = NULL;
+ temp = 0;
+
+ if (!(req->bmRequestType & UT_READ)) {
+ if (length != 0) {
+ DPRINTFN(1, "The handle request function "
+ "does not support writing data!\n");
+ err = USB_ERR_INVAL;
+ goto done;
+ }
+ }
+
+ /* The root HUB code needs the BUS lock locked */
+
+ USB_BUS_LOCK(udev->bus);
+ err = (hr_func) (udev, req, &desc, &temp);
+ USB_BUS_UNLOCK(udev->bus);
+
+ if (err)
+ goto done;
+
+ if (length > temp) {
+ if (!(flags & USB_SHORT_XFER_OK)) {
+ err = USB_ERR_SHORT_XFER;
+ goto done;
+ }
+ length = temp;
+ }
+ if (actlen)
+ *actlen = length;
+
+ if (length > 0) {
+#if USB_HAVE_USER_IO
+ if (flags & USB_USER_DATA_PTR) {
+ if (copyout(desc, data, length)) {
+ err = USB_ERR_INVAL;
+ goto done;
+ }
+ } else
+#endif
+ bcopy(desc, data, length);
+ }
+ goto done; /* success */
+ }
+
+ /*
+ * Setup a new USB transfer or use the existing one, if any:
+ */
+ usbd_ctrl_transfer_setup(udev);
+
+ xfer = udev->ctrl_xfer[0];
+ if (xfer == NULL) {
+ /* most likely out of memory */
+ err = USB_ERR_NOMEM;
+ goto done;
+ }
+
+#ifdef USB_REQ_DEBUG
+ /* Get debug bits */
+ usbd_get_debug_bits(udev, req, &dbg);
+
+ /* Check for fault injection */
+ if (dbg.enabled)
+ flags |= USB_DELAY_STATUS_STAGE;
+#endif
+ USB_XFER_LOCK(xfer);
+
+ if (flags & USB_DELAY_STATUS_STAGE)
+ xfer->flags.manual_status = 1;
+ else
+ xfer->flags.manual_status = 0;
+
+ if (flags & USB_SHORT_XFER_OK)
+ xfer->flags.short_xfer_ok = 1;
+ else
+ xfer->flags.short_xfer_ok = 0;
+
+ xfer->timeout = timeout;
+
+ start_ticks = ticks;
+
+ max_ticks = USB_MS_TO_TICKS(timeout);
+
+ usbd_copy_in(xfer->frbuffers, 0, req, sizeof(*req));
+
+ usbd_xfer_set_frame_len(xfer, 0, sizeof(*req));
+
+ while (1) {
+ temp = length;
+ if (temp > usbd_xfer_max_len(xfer)) {
+ temp = usbd_xfer_max_len(xfer);
+ }
+#ifdef USB_REQ_DEBUG
+ if (xfer->flags.manual_status) {
+ if (usbd_xfer_frame_len(xfer, 0) != 0) {
+ /* Execute data stage separately */
+ temp = 0;
+ } else if (temp > 0) {
+ if (dbg.ds_fail) {
+ err = USB_ERR_INVAL;
+ break;
+ }
+ if (dbg.ds_delay > 0) {
+ usb_pause_mtx(
+ xfer->xroot->xfer_mtx,
+ USB_MS_TO_TICKS(dbg.ds_delay));
+ /* make sure we don't time out */
+ start_ticks = ticks;
+ }
+ }
+ }
+#endif
+ usbd_xfer_set_frame_len(xfer, 1, temp);
+
+ if (temp > 0) {
+ if (!(req->bmRequestType & UT_READ)) {
+#if USB_HAVE_USER_IO
+ if (flags & USB_USER_DATA_PTR) {
+ USB_XFER_UNLOCK(xfer);
+ err = usbd_copy_in_user(xfer->frbuffers + 1,
+ 0, data, temp);
+ USB_XFER_LOCK(xfer);
+ if (err) {
+ err = USB_ERR_INVAL;
+ break;
+ }
+ } else
+#endif
+ usbd_copy_in(xfer->frbuffers + 1,
+ 0, data, temp);
+ }
+ usbd_xfer_set_frames(xfer, 2);
+ } else {
+ if (usbd_xfer_frame_len(xfer, 0) == 0) {
+ if (xfer->flags.manual_status) {
+#ifdef USB_REQ_DEBUG
+ if (dbg.ss_fail) {
+ err = USB_ERR_INVAL;
+ break;
+ }
+ if (dbg.ss_delay > 0) {
+ usb_pause_mtx(
+ xfer->xroot->xfer_mtx,
+ USB_MS_TO_TICKS(dbg.ss_delay));
+ /* make sure we don't time out */
+ start_ticks = ticks;
+ }
+#endif
+ xfer->flags.manual_status = 0;
+ } else {
+ break;
+ }
+ }
+ usbd_xfer_set_frames(xfer, 1);
+ }
+
+ usbd_transfer_start(xfer);
+
+ while (usbd_transfer_pending(xfer)) {
+ cv_wait(&udev->ctrlreq_cv,
+ xfer->xroot->xfer_mtx);
+ }
+
+ err = xfer->error;
+
+ if (err) {
+ break;
+ }
+
+ /* get actual length of DATA stage */
+
+ if (xfer->aframes < 2) {
+ acttemp = 0;
+ } else {
+ acttemp = usbd_xfer_frame_len(xfer, 1);
+ }
+
+ /* check for short packet */
+
+ if (temp > acttemp) {
+ temp = acttemp;
+ length = temp;
+ }
+ if (temp > 0) {
+ if (req->bmRequestType & UT_READ) {
+#if USB_HAVE_USER_IO
+ if (flags & USB_USER_DATA_PTR) {
+ USB_XFER_UNLOCK(xfer);
+ err = usbd_copy_out_user(xfer->frbuffers + 1,
+ 0, data, temp);
+ USB_XFER_LOCK(xfer);
+ if (err) {
+ err = USB_ERR_INVAL;
+ break;
+ }
+ } else
+#endif
+ usbd_copy_out(xfer->frbuffers + 1,
+ 0, data, temp);
+ }
+ }
+ /*
+ * Clear "frlengths[0]" so that we don't send the setup
+ * packet again:
+ */
+ usbd_xfer_set_frame_len(xfer, 0, 0);
+
+ /* update length and data pointer */
+ length -= temp;
+ data = USB_ADD_BYTES(data, temp);
+
+ if (actlen) {
+ (*actlen) += temp;
+ }
+ /* check for timeout */
+
+ delta_ticks = ticks - start_ticks;
+ if (delta_ticks > max_ticks) {
+ if (!err) {
+ err = USB_ERR_TIMEOUT;
+ }
+ }
+ if (err) {
+ break;
+ }
+ }
+
+ if (err) {
+ /*
+ * Make sure that the control endpoint is no longer
+ * blocked in case of a non-transfer related error:
+ */
+ usbd_transfer_stop(xfer);
+ }
+ USB_XFER_UNLOCK(xfer);
+
+done:
+ sx_xunlock(&udev->ctrl_sx);
+
+ if (enum_locked)
+ usbd_sr_lock(udev);
+
+ if ((mtx != NULL) && (mtx != &Giant))
+ mtx_lock(mtx);
+
+ return ((usb_error_t)err);
+}
+
+/*------------------------------------------------------------------------*
+ * usbd_do_request_proc - factored out code
+ *
+ * This function is factored out code. It does basically the same like
+ * usbd_do_request_flags, except it will check the status of the
+ * passed process argument before doing the USB request. If the
+ * process is draining the USB_ERR_IOERROR code will be returned. It
+ * is assumed that the mutex associated with the process is locked
+ * when calling this function.
+ *------------------------------------------------------------------------*/
+usb_error_t
+usbd_do_request_proc(struct usb_device *udev, struct usb_process *pproc,
+ struct usb_device_request *req, void *data, uint16_t flags,
+ uint16_t *actlen, usb_timeout_t timeout)
+{
+ usb_error_t err;
+ uint16_t len;
+
+ /* get request data length */
+ len = UGETW(req->wLength);
+
+ /* check if the device is being detached */
+ if (usb_proc_is_gone(pproc)) {
+ err = USB_ERR_IOERROR;
+ goto done;
+ }
+
+ /* forward the USB request */
+ err = usbd_do_request_flags(udev, pproc->up_mtx,
+ req, data, flags, actlen, timeout);
+
+done:
+ /* on failure we zero the data */
+ /* on short packet we zero the unused data */
+ if ((len != 0) && (req->bmRequestType & UE_DIR_IN)) {
+ if (err)
+ memset(data, 0, len);
+ else if (actlen && *actlen != len)
+ memset(((uint8_t *)data) + *actlen, 0, len - *actlen);
+ }
+ return (err);
+}
+
+/*------------------------------------------------------------------------*
+ * usbd_req_reset_port
+ *
+ * This function will instruct a USB HUB to perform a reset sequence
+ * on the specified port number.
+ *
+ * Returns:
+ * 0: Success. The USB device should now be at address zero.
+ * Else: Failure. No USB device is present and the USB port should be
+ * disabled.
+ *------------------------------------------------------------------------*/
+usb_error_t
+usbd_req_reset_port(struct usb_device *udev, struct mtx *mtx, uint8_t port)
+{
+ struct usb_port_status ps;
+ usb_error_t err;
+ uint16_t n;
+
+#ifdef USB_DEBUG
+ uint16_t pr_poll_delay;
+ uint16_t pr_recovery_delay;
+
+#endif
+ err = usbd_req_set_port_feature(udev, mtx, port, UHF_PORT_RESET);
+ if (err) {
+ goto done;
+ }
+#ifdef USB_DEBUG
+ /* range check input parameters */
+ pr_poll_delay = usb_pr_poll_delay;
+ if (pr_poll_delay < 1) {
+ pr_poll_delay = 1;
+ } else if (pr_poll_delay > 1000) {
+ pr_poll_delay = 1000;
+ }
+ pr_recovery_delay = usb_pr_recovery_delay;
+ if (pr_recovery_delay > 1000) {
+ pr_recovery_delay = 1000;
+ }
+#endif
+ n = 0;
+ while (1) {
+#ifdef USB_DEBUG
+ /* wait for the device to recover from reset */
+ usb_pause_mtx(mtx, USB_MS_TO_TICKS(pr_poll_delay));
+ n += pr_poll_delay;
+#else
+ /* wait for the device to recover from reset */
+ usb_pause_mtx(mtx, USB_MS_TO_TICKS(USB_PORT_RESET_DELAY));
+ n += USB_PORT_RESET_DELAY;
+#endif
+ err = usbd_req_get_port_status(udev, mtx, &ps, port);
+ if (err) {
+ goto done;
+ }
+ /* if the device disappeared, just give up */
+ if (!(UGETW(ps.wPortStatus) & UPS_CURRENT_CONNECT_STATUS)) {
+ goto done;
+ }
+ /* check if reset is complete */
+ if (UGETW(ps.wPortChange) & UPS_C_PORT_RESET) {
+ break;
+ }
+ /* check for timeout */
+ if (n > 1000) {
+ n = 0;
+ break;
+ }
+ }
+
+ /* clear port reset first */
+ err = usbd_req_clear_port_feature(
+ udev, mtx, port, UHF_C_PORT_RESET);
+ if (err) {
+ goto done;
+ }
+ /* check for timeout */
+ if (n == 0) {
+ err = USB_ERR_TIMEOUT;
+ goto done;
+ }
+#ifdef USB_DEBUG
+ /* wait for the device to recover from reset */
+ usb_pause_mtx(mtx, USB_MS_TO_TICKS(pr_recovery_delay));
+#else
+ /* wait for the device to recover from reset */
+ usb_pause_mtx(mtx, USB_MS_TO_TICKS(USB_PORT_RESET_RECOVERY));
+#endif
+
+done:
+ DPRINTFN(2, "port %d reset returning error=%s\n",
+ port, usbd_errstr(err));
+ return (err);
+}
+
+/*------------------------------------------------------------------------*
+ * usbd_req_warm_reset_port
+ *
+ * This function will instruct an USB HUB to perform a warm reset
+ * sequence on the specified port number. This kind of reset is not
+ * mandatory for LOW-, FULL- and HIGH-speed USB HUBs and is targeted
+ * for SUPER-speed USB HUBs.
+ *
+ * Returns:
+ * 0: Success. The USB device should now be available again.
+ * Else: Failure. No USB device is present and the USB port should be
+ * disabled.
+ *------------------------------------------------------------------------*/
+usb_error_t
+usbd_req_warm_reset_port(struct usb_device *udev, struct mtx *mtx, uint8_t port)
+{
+ struct usb_port_status ps;
+ usb_error_t err;
+ uint16_t n;
+
+#ifdef USB_DEBUG
+ uint16_t pr_poll_delay;
+ uint16_t pr_recovery_delay;
+
+#endif
+ err = usbd_req_set_port_feature(udev, mtx, port, UHF_BH_PORT_RESET);
+ if (err) {
+ goto done;
+ }
+#ifdef USB_DEBUG
+ /* range check input parameters */
+ pr_poll_delay = usb_pr_poll_delay;
+ if (pr_poll_delay < 1) {
+ pr_poll_delay = 1;
+ } else if (pr_poll_delay > 1000) {
+ pr_poll_delay = 1000;
+ }
+ pr_recovery_delay = usb_pr_recovery_delay;
+ if (pr_recovery_delay > 1000) {
+ pr_recovery_delay = 1000;
+ }
+#endif
+ n = 0;
+ while (1) {
+#ifdef USB_DEBUG
+ /* wait for the device to recover from reset */
+ usb_pause_mtx(mtx, USB_MS_TO_TICKS(pr_poll_delay));
+ n += pr_poll_delay;
+#else
+ /* wait for the device to recover from reset */
+ usb_pause_mtx(mtx, USB_MS_TO_TICKS(USB_PORT_RESET_DELAY));
+ n += USB_PORT_RESET_DELAY;
+#endif
+ err = usbd_req_get_port_status(udev, mtx, &ps, port);
+ if (err) {
+ goto done;
+ }
+ /* if the device disappeared, just give up */
+ if (!(UGETW(ps.wPortStatus) & UPS_CURRENT_CONNECT_STATUS)) {
+ goto done;
+ }
+ /* check if reset is complete */
+ if (UGETW(ps.wPortChange) & UPS_C_BH_PORT_RESET) {
+ break;
+ }
+ /* check for timeout */
+ if (n > 1000) {
+ n = 0;
+ break;
+ }
+ }
+
+ /* clear port reset first */
+ err = usbd_req_clear_port_feature(
+ udev, mtx, port, UHF_C_BH_PORT_RESET);
+ if (err) {
+ goto done;
+ }
+ /* check for timeout */
+ if (n == 0) {
+ err = USB_ERR_TIMEOUT;
+ goto done;
+ }
+#ifdef USB_DEBUG
+ /* wait for the device to recover from reset */
+ usb_pause_mtx(mtx, USB_MS_TO_TICKS(pr_recovery_delay));
+#else
+ /* wait for the device to recover from reset */
+ usb_pause_mtx(mtx, USB_MS_TO_TICKS(USB_PORT_RESET_RECOVERY));
+#endif
+
+done:
+ DPRINTFN(2, "port %d warm reset returning error=%s\n",
+ port, usbd_errstr(err));
+ return (err);
+}
+
+/*------------------------------------------------------------------------*
+ * usbd_req_get_desc
+ *
+ * This function can be used to retrieve USB descriptors. It contains
+ * some additional logic like zeroing of missing descriptor bytes and
+ * retrying an USB descriptor in case of failure. The "min_len"
+ * argument specifies the minimum descriptor length. The "max_len"
+ * argument specifies the maximum descriptor length. If the real
+ * descriptor length is less than the minimum length the missing
+ * byte(s) will be zeroed. The type field, the second byte of the USB
+ * descriptor, will get forced to the correct type. If the "actlen"
+ * pointer is non-NULL, the actual length of the transfer will get
+ * stored in the 16-bit unsigned integer which it is pointing to. The
+ * first byte of the descriptor will not get updated. If the "actlen"
+ * pointer is NULL the first byte of the descriptor will get updated
+ * to reflect the actual length instead. If "min_len" is not equal to
+ * "max_len" then this function will try to retrive the beginning of
+ * the descriptor and base the maximum length on the first byte of the
+ * descriptor.
+ *
+ * Returns:
+ * 0: Success
+ * Else: Failure
+ *------------------------------------------------------------------------*/
+usb_error_t
+usbd_req_get_desc(struct usb_device *udev,
+ struct mtx *mtx, uint16_t *actlen, void *desc,
+ uint16_t min_len, uint16_t max_len,
+ uint16_t id, uint8_t type, uint8_t index,
+ uint8_t retries)
+{
+ struct usb_device_request req;
+ uint8_t *buf;
+ usb_error_t err;
+
+ DPRINTFN(4, "id=%d, type=%d, index=%d, max_len=%d\n",
+ id, type, index, max_len);
+
+ req.bmRequestType = UT_READ_DEVICE;
+ req.bRequest = UR_GET_DESCRIPTOR;
+ USETW2(req.wValue, type, index);
+ USETW(req.wIndex, id);
+
+ while (1) {
+
+ if ((min_len < 2) || (max_len < 2)) {
+ err = USB_ERR_INVAL;
+ goto done;
+ }
+ USETW(req.wLength, min_len);
+
+ err = usbd_do_request_flags(udev, mtx, &req,
+ desc, 0, NULL, 1000);
+
+ if (err) {
+ if (!retries) {
+ goto done;
+ }
+ retries--;
+
+ usb_pause_mtx(mtx, hz / 5);
+
+ continue;
+ }
+ buf = desc;
+
+ if (min_len == max_len) {
+
+ /* enforce correct length */
+ if ((buf[0] > min_len) && (actlen == NULL))
+ buf[0] = min_len;
+
+ /* enforce correct type */
+ buf[1] = type;
+
+ goto done;
+ }
+ /* range check */
+
+ if (max_len > buf[0]) {
+ max_len = buf[0];
+ }
+ /* zero minimum data */
+
+ while (min_len > max_len) {
+ min_len--;
+ buf[min_len] = 0;
+ }
+
+ /* set new minimum length */
+
+ min_len = max_len;
+ }
+done:
+ if (actlen != NULL) {
+ if (err)
+ *actlen = 0;
+ else
+ *actlen = min_len;
+ }
+ return (err);
+}
+
+/*------------------------------------------------------------------------*
+ * usbd_req_get_string_any
+ *
+ * This function will return the string given by "string_index"
+ * using the first language ID. The maximum length "len" includes
+ * the terminating zero. The "len" argument should be twice as
+ * big pluss 2 bytes, compared with the actual maximum string length !
+ *
+ * Returns:
+ * 0: Success
+ * Else: Failure
+ *------------------------------------------------------------------------*/
+usb_error_t
+usbd_req_get_string_any(struct usb_device *udev, struct mtx *mtx, char *buf,
+ uint16_t len, uint8_t string_index)
+{
+ char *s;
+ uint8_t *temp;
+ uint16_t i;
+ uint16_t n;
+ uint16_t c;
+ uint8_t swap;
+ usb_error_t err;
+
+ if (len == 0) {
+ /* should not happen */
+ return (USB_ERR_NORMAL_COMPLETION);
+ }
+ if (string_index == 0) {
+ /* this is the language table */
+ buf[0] = 0;
+ return (USB_ERR_INVAL);
+ }
+ if (udev->flags.no_strings) {
+ buf[0] = 0;
+ return (USB_ERR_STALLED);
+ }
+ err = usbd_req_get_string_desc
+ (udev, mtx, buf, len, udev->langid, string_index);
+ if (err) {
+ buf[0] = 0;
+ return (err);
+ }
+ temp = (uint8_t *)buf;
+
+ if (temp[0] < 2) {
+ /* string length is too short */
+ buf[0] = 0;
+ return (USB_ERR_INVAL);
+ }
+ /* reserve one byte for terminating zero */
+ len--;
+
+ /* find maximum length */
+ s = buf;
+ n = (temp[0] / 2) - 1;
+ if (n > len) {
+ n = len;
+ }
+ /* skip descriptor header */
+ temp += 2;
+
+ /* reset swap state */
+ swap = 3;
+
+ /* convert and filter */
+ for (i = 0; (i != n); i++) {
+ c = UGETW(temp + (2 * i));
+
+ /* convert from Unicode, handle buggy strings */
+ if (((c & 0xff00) == 0) && (swap & 1)) {
+ /* Little Endian, default */
+ *s = c;
+ swap = 1;
+ } else if (((c & 0x00ff) == 0) && (swap & 2)) {
+ /* Big Endian */
+ *s = c >> 8;
+ swap = 2;
+ } else {
+ /* silently skip bad character */
+ continue;
+ }
+
+ /*
+ * Filter by default - we don't allow greater and less than
+ * signs because they might confuse the dmesg printouts!
+ */
+ if ((*s == '<') || (*s == '>') || (!isprint(*s))) {
+ /* silently skip bad character */
+ continue;
+ }
+ s++;
+ }
+ *s = 0; /* zero terminate resulting string */
+ return (USB_ERR_NORMAL_COMPLETION);
+}
+
+/*------------------------------------------------------------------------*
+ * usbd_req_get_string_desc
+ *
+ * If you don't know the language ID, consider using
+ * "usbd_req_get_string_any()".
+ *
+ * Returns:
+ * 0: Success
+ * Else: Failure
+ *------------------------------------------------------------------------*/
+usb_error_t
+usbd_req_get_string_desc(struct usb_device *udev, struct mtx *mtx, void *sdesc,
+ uint16_t max_len, uint16_t lang_id,
+ uint8_t string_index)
+{
+ return (usbd_req_get_desc(udev, mtx, NULL, sdesc, 2, max_len, lang_id,
+ UDESC_STRING, string_index, 0));
+}
+
+/*------------------------------------------------------------------------*
+ * usbd_req_get_config_desc_ptr
+ *
+ * This function is used in device side mode to retrieve the pointer
+ * to the generated config descriptor. This saves allocating space for
+ * an additional config descriptor when setting the configuration.
+ *
+ * Returns:
+ * 0: Success
+ * Else: Failure
+ *------------------------------------------------------------------------*/
+usb_error_t
+usbd_req_get_descriptor_ptr(struct usb_device *udev,
+ struct usb_config_descriptor **ppcd, uint16_t wValue)
+{
+ struct usb_device_request req;
+ usb_handle_req_t *hr_func;
+ const void *ptr;
+ uint16_t len;
+ usb_error_t err;
+
+ req.bmRequestType = UT_READ_DEVICE;
+ req.bRequest = UR_GET_DESCRIPTOR;
+ USETW(req.wValue, wValue);
+ USETW(req.wIndex, 0);
+ USETW(req.wLength, 0);
+
+ ptr = NULL;
+ len = 0;
+
+ hr_func = usbd_get_hr_func(udev);
+
+ if (hr_func == NULL)
+ err = USB_ERR_INVAL;
+ else {
+ USB_BUS_LOCK(udev->bus);
+ err = (hr_func) (udev, &req, &ptr, &len);
+ USB_BUS_UNLOCK(udev->bus);
+ }
+
+ if (err)
+ ptr = NULL;
+ else if (ptr == NULL)
+ err = USB_ERR_INVAL;
+
+ *ppcd = __DECONST(struct usb_config_descriptor *, ptr);
+
+ return (err);
+}
+
+/*------------------------------------------------------------------------*
+ * usbd_req_get_config_desc
+ *
+ * Returns:
+ * 0: Success
+ * Else: Failure
+ *------------------------------------------------------------------------*/
+usb_error_t
+usbd_req_get_config_desc(struct usb_device *udev, struct mtx *mtx,
+ struct usb_config_descriptor *d, uint8_t conf_index)
+{
+ usb_error_t err;
+
+ DPRINTFN(4, "confidx=%d\n", conf_index);
+
+ err = usbd_req_get_desc(udev, mtx, NULL, d, sizeof(*d),
+ sizeof(*d), 0, UDESC_CONFIG, conf_index, 0);
+ if (err) {
+ goto done;
+ }
+ /* Extra sanity checking */
+ if (UGETW(d->wTotalLength) < sizeof(*d)) {
+ err = USB_ERR_INVAL;
+ }
+done:
+ return (err);
+}
+
+/*------------------------------------------------------------------------*
+ * usbd_req_get_config_desc_full
+ *
+ * This function gets the complete USB configuration descriptor and
+ * ensures that "wTotalLength" is correct.
+ *
+ * Returns:
+ * 0: Success
+ * Else: Failure
+ *------------------------------------------------------------------------*/
+usb_error_t
+usbd_req_get_config_desc_full(struct usb_device *udev, struct mtx *mtx,
+ struct usb_config_descriptor **ppcd, struct malloc_type *mtype,
+ uint8_t index)
+{
+ struct usb_config_descriptor cd;
+ struct usb_config_descriptor *cdesc;
+ uint16_t len;
+ usb_error_t err;
+
+ DPRINTFN(4, "index=%d\n", index);
+
+ *ppcd = NULL;
+
+ err = usbd_req_get_config_desc(udev, mtx, &cd, index);
+ if (err) {
+ return (err);
+ }
+ /* get full descriptor */
+ len = UGETW(cd.wTotalLength);
+ if (len < sizeof(*cdesc)) {
+ /* corrupt descriptor */
+ return (USB_ERR_INVAL);
+ }
+ cdesc = malloc(len, mtype, M_WAITOK);
+ if (cdesc == NULL) {
+ return (USB_ERR_NOMEM);
+ }
+ err = usbd_req_get_desc(udev, mtx, NULL, cdesc, len, len, 0,
+ UDESC_CONFIG, index, 3);
+ if (err) {
+ free(cdesc, mtype);
+ return (err);
+ }
+ /* make sure that the device is not fooling us: */
+ USETW(cdesc->wTotalLength, len);
+
+ *ppcd = cdesc;
+
+ return (0); /* success */
+}
+
+/*------------------------------------------------------------------------*
+ * usbd_req_get_device_desc
+ *
+ * Returns:
+ * 0: Success
+ * Else: Failure
+ *------------------------------------------------------------------------*/
+usb_error_t
+usbd_req_get_device_desc(struct usb_device *udev, struct mtx *mtx,
+ struct usb_device_descriptor *d)
+{
+ DPRINTFN(4, "\n");
+ return (usbd_req_get_desc(udev, mtx, NULL, d, sizeof(*d),
+ sizeof(*d), 0, UDESC_DEVICE, 0, 3));
+}
+
+/*------------------------------------------------------------------------*
+ * usbd_req_get_alt_interface_no
+ *
+ * Returns:
+ * 0: Success
+ * Else: Failure
+ *------------------------------------------------------------------------*/
+usb_error_t
+usbd_req_get_alt_interface_no(struct usb_device *udev, struct mtx *mtx,
+ uint8_t *alt_iface_no, uint8_t iface_index)
+{
+ struct usb_interface *iface = usbd_get_iface(udev, iface_index);
+ struct usb_device_request req;
+
+ if ((iface == NULL) || (iface->idesc == NULL))
+ return (USB_ERR_INVAL);
+
+ req.bmRequestType = UT_READ_INTERFACE;
+ req.bRequest = UR_GET_INTERFACE;
+ USETW(req.wValue, 0);
+ req.wIndex[0] = iface->idesc->bInterfaceNumber;
+ req.wIndex[1] = 0;
+ USETW(req.wLength, 1);
+ return (usbd_do_request(udev, mtx, &req, alt_iface_no));
+}
+
+/*------------------------------------------------------------------------*
+ * usbd_req_set_alt_interface_no
+ *
+ * Returns:
+ * 0: Success
+ * Else: Failure
+ *------------------------------------------------------------------------*/
+usb_error_t
+usbd_req_set_alt_interface_no(struct usb_device *udev, struct mtx *mtx,
+ uint8_t iface_index, uint8_t alt_no)
+{
+ struct usb_interface *iface = usbd_get_iface(udev, iface_index);
+ struct usb_device_request req;
+
+ if ((iface == NULL) || (iface->idesc == NULL))
+ return (USB_ERR_INVAL);
+
+ req.bmRequestType = UT_WRITE_INTERFACE;
+ req.bRequest = UR_SET_INTERFACE;
+ req.wValue[0] = alt_no;
+ req.wValue[1] = 0;
+ req.wIndex[0] = iface->idesc->bInterfaceNumber;
+ req.wIndex[1] = 0;
+ USETW(req.wLength, 0);
+ return (usbd_do_request(udev, mtx, &req, 0));
+}
+
+/*------------------------------------------------------------------------*
+ * usbd_req_get_device_status
+ *
+ * Returns:
+ * 0: Success
+ * Else: Failure
+ *------------------------------------------------------------------------*/
+usb_error_t
+usbd_req_get_device_status(struct usb_device *udev, struct mtx *mtx,
+ struct usb_status *st)
+{
+ struct usb_device_request req;
+
+ req.bmRequestType = UT_READ_DEVICE;
+ req.bRequest = UR_GET_STATUS;
+ USETW(req.wValue, 0);
+ USETW(req.wIndex, 0);
+ USETW(req.wLength, sizeof(*st));
+ return (usbd_do_request(udev, mtx, &req, st));
+}
+
+/*------------------------------------------------------------------------*
+ * usbd_req_get_hub_descriptor
+ *
+ * Returns:
+ * 0: Success
+ * Else: Failure
+ *------------------------------------------------------------------------*/
+usb_error_t
+usbd_req_get_hub_descriptor(struct usb_device *udev, struct mtx *mtx,
+ struct usb_hub_descriptor *hd, uint8_t nports)
+{
+ struct usb_device_request req;
+ uint16_t len = (nports + 7 + (8 * 8)) / 8;
+
+ req.bmRequestType = UT_READ_CLASS_DEVICE;
+ req.bRequest = UR_GET_DESCRIPTOR;
+ USETW2(req.wValue, UDESC_HUB, 0);
+ USETW(req.wIndex, 0);
+ USETW(req.wLength, len);
+ return (usbd_do_request(udev, mtx, &req, hd));
+}
+
+/*------------------------------------------------------------------------*
+ * usbd_req_get_ss_hub_descriptor
+ *
+ * Returns:
+ * 0: Success
+ * Else: Failure
+ *------------------------------------------------------------------------*/
+usb_error_t
+usbd_req_get_ss_hub_descriptor(struct usb_device *udev, struct mtx *mtx,
+ struct usb_hub_ss_descriptor *hd, uint8_t nports)
+{
+ struct usb_device_request req;
+ uint16_t len = sizeof(*hd) - 32 + 1 + ((nports + 7) / 8);
+
+ req.bmRequestType = UT_READ_CLASS_DEVICE;
+ req.bRequest = UR_GET_DESCRIPTOR;
+ USETW2(req.wValue, UDESC_SS_HUB, 0);
+ USETW(req.wIndex, 0);
+ USETW(req.wLength, len);
+ return (usbd_do_request(udev, mtx, &req, hd));
+}
+
+/*------------------------------------------------------------------------*
+ * usbd_req_get_hub_status
+ *
+ * Returns:
+ * 0: Success
+ * Else: Failure
+ *------------------------------------------------------------------------*/
+usb_error_t
+usbd_req_get_hub_status(struct usb_device *udev, struct mtx *mtx,
+ struct usb_hub_status *st)
+{
+ struct usb_device_request req;
+
+ req.bmRequestType = UT_READ_CLASS_DEVICE;
+ req.bRequest = UR_GET_STATUS;
+ USETW(req.wValue, 0);
+ USETW(req.wIndex, 0);
+ USETW(req.wLength, sizeof(struct usb_hub_status));
+ return (usbd_do_request(udev, mtx, &req, st));
+}
+
+/*------------------------------------------------------------------------*
+ * usbd_req_set_address
+ *
+ * This function is used to set the address for an USB device. After
+ * port reset the USB device will respond at address zero.
+ *
+ * Returns:
+ * 0: Success
+ * Else: Failure
+ *------------------------------------------------------------------------*/
+usb_error_t
+usbd_req_set_address(struct usb_device *udev, struct mtx *mtx, uint16_t addr)
+{
+ struct usb_device_request req;
+ usb_error_t err;
+
+ DPRINTFN(6, "setting device address=%d\n", addr);
+
+ req.bmRequestType = UT_WRITE_DEVICE;
+ req.bRequest = UR_SET_ADDRESS;
+ USETW(req.wValue, addr);
+ USETW(req.wIndex, 0);
+ USETW(req.wLength, 0);
+
+ err = USB_ERR_INVAL;
+
+ /* check if USB controller handles set address */
+ if (udev->bus->methods->set_address != NULL)
+ err = (udev->bus->methods->set_address) (udev, mtx, addr);
+
+ if (err != USB_ERR_INVAL)
+ goto done;
+
+ /* Setting the address should not take more than 1 second ! */
+ err = usbd_do_request_flags(udev, mtx, &req, NULL,
+ USB_DELAY_STATUS_STAGE, NULL, 1000);
+
+done:
+ /* allow device time to set new address */
+ usb_pause_mtx(mtx,
+ USB_MS_TO_TICKS(USB_SET_ADDRESS_SETTLE));
+
+ return (err);
+}
+
+/*------------------------------------------------------------------------*
+ * usbd_req_get_port_status
+ *
+ * Returns:
+ * 0: Success
+ * Else: Failure
+ *------------------------------------------------------------------------*/
+usb_error_t
+usbd_req_get_port_status(struct usb_device *udev, struct mtx *mtx,
+ struct usb_port_status *ps, uint8_t port)
+{
+ struct usb_device_request req;
+
+ req.bmRequestType = UT_READ_CLASS_OTHER;
+ req.bRequest = UR_GET_STATUS;
+ USETW(req.wValue, 0);
+ req.wIndex[0] = port;
+ req.wIndex[1] = 0;
+ USETW(req.wLength, sizeof *ps);
+ return (usbd_do_request(udev, mtx, &req, ps));
+}
+
+/*------------------------------------------------------------------------*
+ * usbd_req_clear_hub_feature
+ *
+ * Returns:
+ * 0: Success
+ * Else: Failure
+ *------------------------------------------------------------------------*/
+usb_error_t
+usbd_req_clear_hub_feature(struct usb_device *udev, struct mtx *mtx,
+ uint16_t sel)
+{
+ struct usb_device_request req;
+
+ req.bmRequestType = UT_WRITE_CLASS_DEVICE;
+ req.bRequest = UR_CLEAR_FEATURE;
+ USETW(req.wValue, sel);
+ USETW(req.wIndex, 0);
+ USETW(req.wLength, 0);
+ return (usbd_do_request(udev, mtx, &req, 0));
+}
+
+/*------------------------------------------------------------------------*
+ * usbd_req_set_hub_feature
+ *
+ * Returns:
+ * 0: Success
+ * Else: Failure
+ *------------------------------------------------------------------------*/
+usb_error_t
+usbd_req_set_hub_feature(struct usb_device *udev, struct mtx *mtx,
+ uint16_t sel)
+{
+ struct usb_device_request req;
+
+ req.bmRequestType = UT_WRITE_CLASS_DEVICE;
+ req.bRequest = UR_SET_FEATURE;
+ USETW(req.wValue, sel);
+ USETW(req.wIndex, 0);
+ USETW(req.wLength, 0);
+ return (usbd_do_request(udev, mtx, &req, 0));
+}
+
+/*------------------------------------------------------------------------*
+ * usbd_req_set_hub_u1_timeout
+ *
+ * Returns:
+ * 0: Success
+ * Else: Failure
+ *------------------------------------------------------------------------*/
+usb_error_t
+usbd_req_set_hub_u1_timeout(struct usb_device *udev, struct mtx *mtx,
+ uint8_t port, uint8_t timeout)
+{
+ struct usb_device_request req;
+
+ req.bmRequestType = UT_WRITE_CLASS_OTHER;
+ req.bRequest = UR_SET_FEATURE;
+ USETW(req.wValue, UHF_PORT_U1_TIMEOUT);
+ req.wIndex[0] = port;
+ req.wIndex[1] = timeout;
+ USETW(req.wLength, 0);
+ return (usbd_do_request(udev, mtx, &req, 0));
+}
+
+/*------------------------------------------------------------------------*
+ * usbd_req_set_hub_u2_timeout
+ *
+ * Returns:
+ * 0: Success
+ * Else: Failure
+ *------------------------------------------------------------------------*/
+usb_error_t
+usbd_req_set_hub_u2_timeout(struct usb_device *udev, struct mtx *mtx,
+ uint8_t port, uint8_t timeout)
+{
+ struct usb_device_request req;
+
+ req.bmRequestType = UT_WRITE_CLASS_OTHER;
+ req.bRequest = UR_SET_FEATURE;
+ USETW(req.wValue, UHF_PORT_U2_TIMEOUT);
+ req.wIndex[0] = port;
+ req.wIndex[1] = timeout;
+ USETW(req.wLength, 0);
+ return (usbd_do_request(udev, mtx, &req, 0));
+}
+
+/*------------------------------------------------------------------------*
+ * usbd_req_set_hub_depth
+ *
+ * Returns:
+ * 0: Success
+ * Else: Failure
+ *------------------------------------------------------------------------*/
+usb_error_t
+usbd_req_set_hub_depth(struct usb_device *udev, struct mtx *mtx,
+ uint16_t depth)
+{
+ struct usb_device_request req;
+
+ req.bmRequestType = UT_WRITE_CLASS_DEVICE;
+ req.bRequest = UR_SET_HUB_DEPTH;
+ USETW(req.wValue, depth);
+ USETW(req.wIndex, 0);
+ USETW(req.wLength, 0);
+ return (usbd_do_request(udev, mtx, &req, 0));
+}
+
+/*------------------------------------------------------------------------*
+ * usbd_req_clear_port_feature
+ *
+ * Returns:
+ * 0: Success
+ * Else: Failure
+ *------------------------------------------------------------------------*/
+usb_error_t
+usbd_req_clear_port_feature(struct usb_device *udev, struct mtx *mtx,
+ uint8_t port, uint16_t sel)
+{
+ struct usb_device_request req;
+
+ req.bmRequestType = UT_WRITE_CLASS_OTHER;
+ req.bRequest = UR_CLEAR_FEATURE;
+ USETW(req.wValue, sel);
+ req.wIndex[0] = port;
+ req.wIndex[1] = 0;
+ USETW(req.wLength, 0);
+ return (usbd_do_request(udev, mtx, &req, 0));
+}
+
+/*------------------------------------------------------------------------*
+ * usbd_req_set_port_feature
+ *
+ * Returns:
+ * 0: Success
+ * Else: Failure
+ *------------------------------------------------------------------------*/
+usb_error_t
+usbd_req_set_port_feature(struct usb_device *udev, struct mtx *mtx,
+ uint8_t port, uint16_t sel)
+{
+ struct usb_device_request req;
+
+ req.bmRequestType = UT_WRITE_CLASS_OTHER;
+ req.bRequest = UR_SET_FEATURE;
+ USETW(req.wValue, sel);
+ req.wIndex[0] = port;
+ req.wIndex[1] = 0;
+ USETW(req.wLength, 0);
+ return (usbd_do_request(udev, mtx, &req, 0));
+}
+
+/*------------------------------------------------------------------------*
+ * usbd_req_set_protocol
+ *
+ * Returns:
+ * 0: Success
+ * Else: Failure
+ *------------------------------------------------------------------------*/
+usb_error_t
+usbd_req_set_protocol(struct usb_device *udev, struct mtx *mtx,
+ uint8_t iface_index, uint16_t report)
+{
+ struct usb_interface *iface = usbd_get_iface(udev, iface_index);
+ struct usb_device_request req;
+
+ if ((iface == NULL) || (iface->idesc == NULL)) {
+ return (USB_ERR_INVAL);
+ }
+ DPRINTFN(5, "iface=%p, report=%d, endpt=%d\n",
+ iface, report, iface->idesc->bInterfaceNumber);
+
+ req.bmRequestType = UT_WRITE_CLASS_INTERFACE;
+ req.bRequest = UR_SET_PROTOCOL;
+ USETW(req.wValue, report);
+ req.wIndex[0] = iface->idesc->bInterfaceNumber;
+ req.wIndex[1] = 0;
+ USETW(req.wLength, 0);
+ return (usbd_do_request(udev, mtx, &req, 0));
+}
+
+/*------------------------------------------------------------------------*
+ * usbd_req_set_report
+ *
+ * Returns:
+ * 0: Success
+ * Else: Failure
+ *------------------------------------------------------------------------*/
+usb_error_t
+usbd_req_set_report(struct usb_device *udev, struct mtx *mtx, void *data, uint16_t len,
+ uint8_t iface_index, uint8_t type, uint8_t id)
+{
+ struct usb_interface *iface = usbd_get_iface(udev, iface_index);
+ struct usb_device_request req;
+
+ if ((iface == NULL) || (iface->idesc == NULL)) {
+ return (USB_ERR_INVAL);
+ }
+ DPRINTFN(5, "len=%d\n", len);
+
+ req.bmRequestType = UT_WRITE_CLASS_INTERFACE;
+ req.bRequest = UR_SET_REPORT;
+ USETW2(req.wValue, type, id);
+ req.wIndex[0] = iface->idesc->bInterfaceNumber;
+ req.wIndex[1] = 0;
+ USETW(req.wLength, len);
+ return (usbd_do_request(udev, mtx, &req, data));
+}
+
+/*------------------------------------------------------------------------*
+ * usbd_req_get_report
+ *
+ * Returns:
+ * 0: Success
+ * Else: Failure
+ *------------------------------------------------------------------------*/
+usb_error_t
+usbd_req_get_report(struct usb_device *udev, struct mtx *mtx, void *data,
+ uint16_t len, uint8_t iface_index, uint8_t type, uint8_t id)
+{
+ struct usb_interface *iface = usbd_get_iface(udev, iface_index);
+ struct usb_device_request req;
+
+ if ((iface == NULL) || (iface->idesc == NULL) || (id == 0)) {
+ return (USB_ERR_INVAL);
+ }
+ DPRINTFN(5, "len=%d\n", len);
+
+ req.bmRequestType = UT_READ_CLASS_INTERFACE;
+ req.bRequest = UR_GET_REPORT;
+ USETW2(req.wValue, type, id);
+ req.wIndex[0] = iface->idesc->bInterfaceNumber;
+ req.wIndex[1] = 0;
+ USETW(req.wLength, len);
+ return (usbd_do_request(udev, mtx, &req, data));
+}
+
+/*------------------------------------------------------------------------*
+ * usbd_req_set_idle
+ *
+ * Returns:
+ * 0: Success
+ * Else: Failure
+ *------------------------------------------------------------------------*/
+usb_error_t
+usbd_req_set_idle(struct usb_device *udev, struct mtx *mtx,
+ uint8_t iface_index, uint8_t duration, uint8_t id)
+{
+ struct usb_interface *iface = usbd_get_iface(udev, iface_index);
+ struct usb_device_request req;
+
+ if ((iface == NULL) || (iface->idesc == NULL)) {
+ return (USB_ERR_INVAL);
+ }
+ DPRINTFN(5, "%d %d\n", duration, id);
+
+ req.bmRequestType = UT_WRITE_CLASS_INTERFACE;
+ req.bRequest = UR_SET_IDLE;
+ USETW2(req.wValue, duration, id);
+ req.wIndex[0] = iface->idesc->bInterfaceNumber;
+ req.wIndex[1] = 0;
+ USETW(req.wLength, 0);
+ return (usbd_do_request(udev, mtx, &req, 0));
+}
+
+/*------------------------------------------------------------------------*
+ * usbd_req_get_report_descriptor
+ *
+ * Returns:
+ * 0: Success
+ * Else: Failure
+ *------------------------------------------------------------------------*/
+usb_error_t
+usbd_req_get_report_descriptor(struct usb_device *udev, struct mtx *mtx,
+ void *d, uint16_t size, uint8_t iface_index)
+{
+ struct usb_interface *iface = usbd_get_iface(udev, iface_index);
+ struct usb_device_request req;
+
+ if ((iface == NULL) || (iface->idesc == NULL)) {
+ return (USB_ERR_INVAL);
+ }
+ req.bmRequestType = UT_READ_INTERFACE;
+ req.bRequest = UR_GET_DESCRIPTOR;
+ USETW2(req.wValue, UDESC_REPORT, 0); /* report id should be 0 */
+ req.wIndex[0] = iface->idesc->bInterfaceNumber;
+ req.wIndex[1] = 0;
+ USETW(req.wLength, size);
+ return (usbd_do_request(udev, mtx, &req, d));
+}
+
+/*------------------------------------------------------------------------*
+ * usbd_req_set_config
+ *
+ * This function is used to select the current configuration number in
+ * both USB device side mode and USB host side mode. When setting the
+ * configuration the function of the interfaces can change.
+ *
+ * Returns:
+ * 0: Success
+ * Else: Failure
+ *------------------------------------------------------------------------*/
+usb_error_t
+usbd_req_set_config(struct usb_device *udev, struct mtx *mtx, uint8_t conf)
+{
+ struct usb_device_request req;
+
+ DPRINTF("setting config %d\n", conf);
+
+ /* do "set configuration" request */
+
+ req.bmRequestType = UT_WRITE_DEVICE;
+ req.bRequest = UR_SET_CONFIG;
+ req.wValue[0] = conf;
+ req.wValue[1] = 0;
+ USETW(req.wIndex, 0);
+ USETW(req.wLength, 0);
+ return (usbd_do_request(udev, mtx, &req, 0));
+}
+
+/*------------------------------------------------------------------------*
+ * usbd_req_get_config
+ *
+ * Returns:
+ * 0: Success
+ * Else: Failure
+ *------------------------------------------------------------------------*/
+usb_error_t
+usbd_req_get_config(struct usb_device *udev, struct mtx *mtx, uint8_t *pconf)
+{
+ struct usb_device_request req;
+
+ req.bmRequestType = UT_READ_DEVICE;
+ req.bRequest = UR_GET_CONFIG;
+ USETW(req.wValue, 0);
+ USETW(req.wIndex, 0);
+ USETW(req.wLength, 1);
+ return (usbd_do_request(udev, mtx, &req, pconf));
+}
+
+/*------------------------------------------------------------------------*
+ * usbd_setup_device_desc
+ *------------------------------------------------------------------------*/
+usb_error_t
+usbd_setup_device_desc(struct usb_device *udev, struct mtx *mtx)
+{
+ usb_error_t err;
+
+ /*
+ * Get the first 8 bytes of the device descriptor !
+ *
+ * NOTE: "usbd_do_request()" will check the device descriptor
+ * next time we do a request to see if the maximum packet size
+ * changed! The 8 first bytes of the device descriptor
+ * contains the maximum packet size to use on control endpoint
+ * 0. If this value is different from "USB_MAX_IPACKET" a new
+ * USB control request will be setup!
+ */
+ switch (udev->speed) {
+ case USB_SPEED_FULL:
+ case USB_SPEED_LOW:
+ err = usbd_req_get_desc(udev, mtx, NULL, &udev->ddesc,
+ USB_MAX_IPACKET, USB_MAX_IPACKET, 0, UDESC_DEVICE, 0, 0);
+ if (err != 0) {
+ DPRINTFN(0, "getting device descriptor "
+ "at addr %d failed, %s\n", udev->address,
+ usbd_errstr(err));
+ return (err);
+ }
+ break;
+ default:
+ DPRINTF("Minimum MaxPacketSize is large enough "
+ "to hold the complete device descriptor\n");
+ break;
+ }
+
+ /* get the full device descriptor */
+ err = usbd_req_get_device_desc(udev, mtx, &udev->ddesc);
+
+ /* try one more time, if error */
+ if (err)
+ err = usbd_req_get_device_desc(udev, mtx, &udev->ddesc);
+
+ if (err) {
+ DPRINTF("addr=%d, getting full desc failed\n",
+ udev->address);
+ return (err);
+ }
+
+ DPRINTF("adding unit addr=%d, rev=%02x, class=%d, "
+ "subclass=%d, protocol=%d, maxpacket=%d, len=%d, speed=%d\n",
+ udev->address, UGETW(udev->ddesc.bcdUSB),
+ udev->ddesc.bDeviceClass,
+ udev->ddesc.bDeviceSubClass,
+ udev->ddesc.bDeviceProtocol,
+ udev->ddesc.bMaxPacketSize,
+ udev->ddesc.bLength,
+ udev->speed);
+
+ return (err);
+}
+
+/*------------------------------------------------------------------------*
+ * usbd_req_re_enumerate
+ *
+ * NOTE: After this function returns the hardware is in the
+ * unconfigured state! The application is responsible for setting a
+ * new configuration.
+ *
+ * Returns:
+ * 0: Success
+ * Else: Failure
+ *------------------------------------------------------------------------*/
+usb_error_t
+usbd_req_re_enumerate(struct usb_device *udev, struct mtx *mtx)
+{
+ struct usb_device *parent_hub;
+ usb_error_t err;
+ uint8_t old_addr;
+ uint8_t do_retry = 1;
+
+ if (udev->flags.usb_mode != USB_MODE_HOST) {
+ return (USB_ERR_INVAL);
+ }
+ old_addr = udev->address;
+ parent_hub = udev->parent_hub;
+ if (parent_hub == NULL) {
+ return (USB_ERR_INVAL);
+ }
+retry:
+ err = usbd_req_reset_port(parent_hub, mtx, udev->port_no);
+ if (err) {
+ DPRINTFN(0, "addr=%d, port reset failed, %s\n",
+ old_addr, usbd_errstr(err));
+ goto done;
+ }
+
+ /*
+ * After that the port has been reset our device should be at
+ * address zero:
+ */
+ udev->address = USB_START_ADDR;
+
+ /* reset "bMaxPacketSize" */
+ udev->ddesc.bMaxPacketSize = USB_MAX_IPACKET;
+
+ /* reset USB state */
+ usb_set_device_state(udev, USB_STATE_POWERED);
+
+ /*
+ * Restore device address:
+ */
+ err = usbd_req_set_address(udev, mtx, old_addr);
+ if (err) {
+ /* XXX ignore any errors! */
+ DPRINTFN(0, "addr=%d, set address failed! (%s, ignored)\n",
+ old_addr, usbd_errstr(err));
+ }
+ /*
+ * Restore device address, if the controller driver did not
+ * set a new one:
+ */
+ if (udev->address == USB_START_ADDR)
+ udev->address = old_addr;
+
+ /* setup the device descriptor and the initial "wMaxPacketSize" */
+ err = usbd_setup_device_desc(udev, mtx);
+
+done:
+ if (err && do_retry) {
+ /* give the USB firmware some time to load */
+ usb_pause_mtx(mtx, hz / 2);
+ /* no more retries after this retry */
+ do_retry = 0;
+ /* try again */
+ goto retry;
+ }
+ /* restore address */
+ if (udev->address == USB_START_ADDR)
+ udev->address = old_addr;
+ /* update state, if successful */
+ if (err == 0)
+ usb_set_device_state(udev, USB_STATE_ADDRESSED);
+ return (err);
+}
+
+/*------------------------------------------------------------------------*
+ * usbd_req_clear_device_feature
+ *
+ * Returns:
+ * 0: Success
+ * Else: Failure
+ *------------------------------------------------------------------------*/
+usb_error_t
+usbd_req_clear_device_feature(struct usb_device *udev, struct mtx *mtx,
+ uint16_t sel)
+{
+ struct usb_device_request req;
+
+ req.bmRequestType = UT_WRITE_DEVICE;
+ req.bRequest = UR_CLEAR_FEATURE;
+ USETW(req.wValue, sel);
+ USETW(req.wIndex, 0);
+ USETW(req.wLength, 0);
+ return (usbd_do_request(udev, mtx, &req, 0));
+}
+
+/*------------------------------------------------------------------------*
+ * usbd_req_set_device_feature
+ *
+ * Returns:
+ * 0: Success
+ * Else: Failure
+ *------------------------------------------------------------------------*/
+usb_error_t
+usbd_req_set_device_feature(struct usb_device *udev, struct mtx *mtx,
+ uint16_t sel)
+{
+ struct usb_device_request req;
+
+ req.bmRequestType = UT_WRITE_DEVICE;
+ req.bRequest = UR_SET_FEATURE;
+ USETW(req.wValue, sel);
+ USETW(req.wIndex, 0);
+ USETW(req.wLength, 0);
+ return (usbd_do_request(udev, mtx, &req, 0));
+}
diff --git a/rtems/freebsd/dev/usb/usb_request.h b/rtems/freebsd/dev/usb/usb_request.h
new file mode 100644
index 00000000..0cf882dc
--- /dev/null
+++ b/rtems/freebsd/dev/usb/usb_request.h
@@ -0,0 +1,89 @@
+/* $FreeBSD$ */
+/*-
+ * Copyright (c) 2008 Hans Petter Selasky. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifndef _USB_REQUEST_HH_
+#define _USB_REQUEST_HH_
+
+struct usb_process;
+
+usb_error_t usbd_req_clear_hub_feature(struct usb_device *udev,
+ struct mtx *mtx, uint16_t sel);
+usb_error_t usbd_req_clear_port_feature(struct usb_device *udev,
+ struct mtx *mtx, uint8_t port, uint16_t sel);
+usb_error_t usbd_req_get_alt_interface_no(struct usb_device *udev,
+ struct mtx *mtx, uint8_t *alt_iface_no,
+ uint8_t iface_index);
+usb_error_t usbd_req_get_config(struct usb_device *udev, struct mtx *mtx,
+ uint8_t *pconf);
+usb_error_t usbd_req_get_descriptor_ptr(struct usb_device *udev,
+ struct usb_config_descriptor **ppcd, uint16_t wValue);
+usb_error_t usbd_req_get_config_desc(struct usb_device *udev, struct mtx *mtx,
+ struct usb_config_descriptor *d, uint8_t conf_index);
+usb_error_t usbd_req_get_config_desc_full(struct usb_device *udev,
+ struct mtx *mtx, struct usb_config_descriptor **ppcd,
+ struct malloc_type *mtype, uint8_t conf_index);
+usb_error_t usbd_req_get_desc(struct usb_device *udev, struct mtx *mtx,
+ uint16_t *actlen, void *desc, uint16_t min_len,
+ uint16_t max_len, uint16_t id, uint8_t type,
+ uint8_t index, uint8_t retries);
+usb_error_t usbd_req_get_device_desc(struct usb_device *udev, struct mtx *mtx,
+ struct usb_device_descriptor *d);
+usb_error_t usbd_req_get_device_status(struct usb_device *udev,
+ struct mtx *mtx, struct usb_status *st);
+usb_error_t usbd_req_get_hub_descriptor(struct usb_device *udev,
+ struct mtx *mtx, struct usb_hub_descriptor *hd,
+ uint8_t nports);
+usb_error_t usbd_req_get_ss_hub_descriptor(struct usb_device *udev,
+ struct mtx *mtx, struct usb_hub_ss_descriptor *hd,
+ uint8_t nports);
+usb_error_t usbd_req_get_hub_status(struct usb_device *udev, struct mtx *mtx,
+ struct usb_hub_status *st);
+usb_error_t usbd_req_get_port_status(struct usb_device *udev, struct mtx *mtx,
+ struct usb_port_status *ps, uint8_t port);
+usb_error_t usbd_req_reset_port(struct usb_device *udev, struct mtx *mtx,
+ uint8_t port);
+usb_error_t usbd_req_warm_reset_port(struct usb_device *udev,
+ struct mtx *mtx, uint8_t port);
+usb_error_t usbd_req_set_address(struct usb_device *udev, struct mtx *mtx,
+ uint16_t addr);
+usb_error_t usbd_req_set_hub_feature(struct usb_device *udev, struct mtx *mtx,
+ uint16_t sel);
+usb_error_t usbd_req_set_port_feature(struct usb_device *udev,
+ struct mtx *mtx, uint8_t port, uint16_t sel);
+usb_error_t usbd_setup_device_desc(struct usb_device *udev, struct mtx *mtx);
+usb_error_t usbd_req_re_enumerate(struct usb_device *udev, struct mtx *mtx);
+usb_error_t usbd_req_clear_device_feature(struct usb_device *udev,
+ struct mtx *mtx, uint16_t sel);
+usb_error_t usbd_req_set_device_feature(struct usb_device *udev,
+ struct mtx *mtx, uint16_t sel);
+usb_error_t usbd_req_set_hub_u1_timeout(struct usb_device *udev,
+ struct mtx *mtx, uint8_t port, uint8_t timeout);
+usb_error_t usbd_req_set_hub_u2_timeout(struct usb_device *udev,
+ struct mtx *mtx, uint8_t port, uint8_t timeout);
+usb_error_t usbd_req_set_hub_depth(struct usb_device *udev,
+ struct mtx *mtx, uint16_t depth);
+
+#endif /* _USB_REQUEST_HH_ */
diff --git a/rtems/freebsd/dev/usb/usb_transfer.c b/rtems/freebsd/dev/usb/usb_transfer.c
new file mode 100644
index 00000000..8f48d08b
--- /dev/null
+++ b/rtems/freebsd/dev/usb/usb_transfer.c
@@ -0,0 +1,3305 @@
+#include <rtems/freebsd/machine/rtems-bsd-config.h>
+
+/* $FreeBSD$ */
+/*-
+ * Copyright (c) 2008 Hans Petter Selasky. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <rtems/freebsd/sys/stdint.h>
+#include <rtems/freebsd/sys/stddef.h>
+#include <rtems/freebsd/sys/param.h>
+#include <rtems/freebsd/sys/queue.h>
+#include <rtems/freebsd/sys/types.h>
+#include <rtems/freebsd/sys/systm.h>
+#include <rtems/freebsd/sys/kernel.h>
+#include <rtems/freebsd/sys/bus.h>
+#include <rtems/freebsd/sys/linker_set.h>
+#include <rtems/freebsd/sys/module.h>
+#include <rtems/freebsd/sys/lock.h>
+#include <rtems/freebsd/sys/mutex.h>
+#include <rtems/freebsd/sys/condvar.h>
+#include <rtems/freebsd/sys/sysctl.h>
+#include <rtems/freebsd/sys/sx.h>
+#include <rtems/freebsd/sys/unistd.h>
+#include <rtems/freebsd/sys/callout.h>
+#include <rtems/freebsd/sys/malloc.h>
+#include <rtems/freebsd/sys/priv.h>
+
+#include <rtems/freebsd/dev/usb/usb.h>
+#include <rtems/freebsd/dev/usb/usbdi.h>
+#include <rtems/freebsd/dev/usb/usbdi_util.h>
+
+#define USB_DEBUG_VAR usb_debug
+
+#include <rtems/freebsd/dev/usb/usb_core.h>
+#include <rtems/freebsd/dev/usb/usb_busdma.h>
+#include <rtems/freebsd/dev/usb/usb_process.h>
+#include <rtems/freebsd/dev/usb/usb_transfer.h>
+#include <rtems/freebsd/dev/usb/usb_device.h>
+#include <rtems/freebsd/dev/usb/usb_debug.h>
+#include <rtems/freebsd/dev/usb/usb_util.h>
+
+#include <rtems/freebsd/dev/usb/usb_controller.h>
+#include <rtems/freebsd/dev/usb/usb_bus.h>
+#ifdef __rtems__
+#include <rtems/freebsd/machine/rtems-bsd-cache.h>
+#endif /* __rtems__ */
+
+struct usb_std_packet_size {
+ struct {
+ uint16_t min; /* inclusive */
+ uint16_t max; /* inclusive */
+ } range;
+
+ uint16_t fixed[4];
+};
+
+static usb_callback_t usb_request_callback;
+
+static const struct usb_config usb_control_ep_cfg[USB_CTRL_XFER_MAX] = {
+
+ /* This transfer is used for generic control endpoint transfers */
+
+ [0] = {
+ .type = UE_CONTROL,
+ .endpoint = 0x00, /* Control endpoint */
+ .direction = UE_DIR_ANY,
+ .bufsize = USB_EP0_BUFSIZE, /* bytes */
+ .flags = {.proxy_buffer = 1,},
+ .callback = &usb_request_callback,
+ .usb_mode = USB_MODE_DUAL, /* both modes */
+ },
+
+ /* This transfer is used for generic clear stall only */
+
+ [1] = {
+ .type = UE_CONTROL,
+ .endpoint = 0x00, /* Control pipe */
+ .direction = UE_DIR_ANY,
+ .bufsize = sizeof(struct usb_device_request),
+ .callback = &usb_do_clear_stall_callback,
+ .timeout = 1000, /* 1 second */
+ .interval = 50, /* 50ms */
+ .usb_mode = USB_MODE_HOST,
+ },
+};
+
+/* function prototypes */
+
+static void usbd_update_max_frame_size(struct usb_xfer *);
+static void usbd_transfer_unsetup_sub(struct usb_xfer_root *, uint8_t);
+static void usbd_control_transfer_init(struct usb_xfer *);
+static int usbd_setup_ctrl_transfer(struct usb_xfer *);
+static void usb_callback_proc(struct usb_proc_msg *);
+static void usbd_callback_ss_done_defer(struct usb_xfer *);
+static void usbd_callback_wrapper(struct usb_xfer_queue *);
+static void usbd_transfer_start_cb(void *);
+static uint8_t usbd_callback_wrapper_sub(struct usb_xfer *);
+static void usbd_get_std_packet_size(struct usb_std_packet_size *ptr,
+ uint8_t type, enum usb_dev_speed speed);
+
+/*------------------------------------------------------------------------*
+ * usb_request_callback
+ *------------------------------------------------------------------------*/
+static void
+usb_request_callback(struct usb_xfer *xfer, usb_error_t error)
+{
+ if (xfer->flags_int.usb_mode == USB_MODE_DEVICE)
+ usb_handle_request_callback(xfer, error);
+ else
+ usbd_do_request_callback(xfer, error);
+}
+
+/*------------------------------------------------------------------------*
+ * usbd_update_max_frame_size
+ *
+ * This function updates the maximum frame size, hence high speed USB
+ * can transfer multiple consecutive packets.
+ *------------------------------------------------------------------------*/
+static void
+usbd_update_max_frame_size(struct usb_xfer *xfer)
+{
+ /* compute maximum frame size */
+ /* this computation should not overflow 16-bit */
+ /* max = 15 * 1024 */
+
+ xfer->max_frame_size = xfer->max_packet_size * xfer->max_packet_count;
+}
+
+/*------------------------------------------------------------------------*
+ * usbd_get_dma_delay
+ *
+ * The following function is called when we need to
+ * synchronize with DMA hardware.
+ *
+ * Returns:
+ * 0: no DMA delay required
+ * Else: milliseconds of DMA delay
+ *------------------------------------------------------------------------*/
+usb_timeout_t
+usbd_get_dma_delay(struct usb_device *udev)
+{
+ struct usb_bus_methods *mtod;
+ uint32_t temp;
+
+ mtod = udev->bus->methods;
+ temp = 0;
+
+ if (mtod->get_dma_delay) {
+ (mtod->get_dma_delay) (udev, &temp);
+ /*
+ * Round up and convert to milliseconds. Note that we use
+ * 1024 milliseconds per second. to save a division.
+ */
+ temp += 0x3FF;
+ temp /= 0x400;
+ }
+ return (temp);
+}
+
+/*------------------------------------------------------------------------*
+ * usbd_transfer_setup_sub_malloc
+ *
+ * This function will allocate one or more DMA'able memory chunks
+ * according to "size", "align" and "count" arguments. "ppc" is
+ * pointed to a linear array of USB page caches afterwards.
+ *
+ * Returns:
+ * 0: Success
+ * Else: Failure
+ *------------------------------------------------------------------------*/
+#if USB_HAVE_BUSDMA
+uint8_t
+usbd_transfer_setup_sub_malloc(struct usb_setup_params *parm,
+ struct usb_page_cache **ppc, usb_size_t size, usb_size_t align,
+ usb_size_t count)
+{
+ struct usb_page_cache *pc;
+ struct usb_page *pg;
+ void *buf;
+ usb_size_t n_dma_pc;
+ usb_size_t n_obj;
+ usb_size_t x;
+ usb_size_t y;
+ usb_size_t r;
+ usb_size_t z;
+
+ USB_ASSERT(align > 1, ("Invalid alignment, 0x%08x\n",
+ align));
+ USB_ASSERT(size > 0, ("Invalid size = 0\n"));
+
+ if (count == 0) {
+ return (0); /* nothing to allocate */
+ }
+#ifdef __rtems__
+#ifdef CPU_DATA_CACHE_ALIGNMENT
+ if (align < CPU_DATA_CACHE_ALIGNMENT) {
+ align = CPU_DATA_CACHE_ALIGNMENT;
+ }
+#endif /* CPU_DATA_CACHE_ALIGNMENT */
+#endif /* __rtems__ */
+ /*
+ * Make sure that the size is aligned properly.
+ */
+ size = -((-size) & (-align));
+
+ /*
+ * Try multi-allocation chunks to reduce the number of DMA
+ * allocations, hence DMA allocations are slow.
+ */
+ if (size >= PAGE_SIZE) {
+ n_dma_pc = count;
+ n_obj = 1;
+ } else {
+ /* compute number of objects per page */
+ n_obj = (PAGE_SIZE / size);
+ /*
+ * Compute number of DMA chunks, rounded up
+ * to nearest one:
+ */
+ n_dma_pc = ((count + n_obj - 1) / n_obj);
+ }
+
+ if (parm->buf == NULL) {
+ /* for the future */
+ parm->dma_page_ptr += n_dma_pc;
+ parm->dma_page_cache_ptr += n_dma_pc;
+ parm->dma_page_ptr += count;
+ parm->xfer_page_cache_ptr += count;
+ return (0);
+ }
+ for (x = 0; x != n_dma_pc; x++) {
+ /* need to initialize the page cache */
+ parm->dma_page_cache_ptr[x].tag_parent =
+ &parm->curr_xfer->xroot->dma_parent_tag;
+ }
+ for (x = 0; x != count; x++) {
+ /* need to initialize the page cache */
+ parm->xfer_page_cache_ptr[x].tag_parent =
+ &parm->curr_xfer->xroot->dma_parent_tag;
+ }
+
+ if (ppc) {
+ *ppc = parm->xfer_page_cache_ptr;
+ }
+ r = count; /* set remainder count */
+ z = n_obj * size; /* set allocation size */
+ pc = parm->xfer_page_cache_ptr;
+ pg = parm->dma_page_ptr;
+
+ for (x = 0; x != n_dma_pc; x++) {
+
+ if (r < n_obj) {
+ /* compute last remainder */
+ z = r * size;
+ n_obj = r;
+ }
+ if (usb_pc_alloc_mem(parm->dma_page_cache_ptr,
+ pg, z, align)) {
+ return (1); /* failure */
+ }
+ /* Set beginning of current buffer */
+ buf = parm->dma_page_cache_ptr->buffer;
+ /* Make room for one DMA page cache and one page */
+ parm->dma_page_cache_ptr++;
+ pg++;
+
+ for (y = 0; (y != n_obj); y++, r--, pc++, pg++) {
+
+ /* Load sub-chunk into DMA */
+ if (usb_pc_dmamap_create(pc, size)) {
+ return (1); /* failure */
+ }
+ pc->buffer = USB_ADD_BYTES(buf, y * size);
+ pc->page_start = pg;
+
+ mtx_lock(pc->tag_parent->mtx);
+ if (usb_pc_load_mem(pc, size, 1 /* synchronous */ )) {
+ mtx_unlock(pc->tag_parent->mtx);
+ return (1); /* failure */
+ }
+ mtx_unlock(pc->tag_parent->mtx);
+ }
+ }
+
+ parm->xfer_page_cache_ptr = pc;
+ parm->dma_page_ptr = pg;
+ return (0);
+}
+#endif
+
+/*------------------------------------------------------------------------*
+ * usbd_transfer_setup_sub - transfer setup subroutine
+ *
+ * This function must be called from the "xfer_setup" callback of the
+ * USB Host or Device controller driver when setting up an USB
+ * transfer. This function will setup correct packet sizes, buffer
+ * sizes, flags and more, that are stored in the "usb_xfer"
+ * structure.
+ *------------------------------------------------------------------------*/
+void
+usbd_transfer_setup_sub(struct usb_setup_params *parm)
+{
+ enum {
+ REQ_SIZE = 8,
+ MIN_PKT = 8,
+ };
+ struct usb_xfer *xfer = parm->curr_xfer;
+ const struct usb_config *setup = parm->curr_setup;
+ struct usb_endpoint_ss_comp_descriptor *ecomp;
+ struct usb_endpoint_descriptor *edesc;
+ struct usb_std_packet_size std_size;
+ usb_frcount_t n_frlengths;
+ usb_frcount_t n_frbuffers;
+ usb_frcount_t x;
+ uint8_t type;
+ uint8_t zmps;
+
+ /*
+ * Sanity check. The following parameters must be initialized before
+ * calling this function.
+ */
+ if ((parm->hc_max_packet_size == 0) ||
+ (parm->hc_max_packet_count == 0) ||
+ (parm->hc_max_frame_size == 0)) {
+ parm->err = USB_ERR_INVAL;
+ goto done;
+ }
+ edesc = xfer->endpoint->edesc;
+ ecomp = xfer->endpoint->ecomp;
+
+ type = (edesc->bmAttributes & UE_XFERTYPE);
+
+ xfer->flags = setup->flags;
+ xfer->nframes = setup->frames;
+ xfer->timeout = setup->timeout;
+ xfer->callback = setup->callback;
+ xfer->interval = setup->interval;
+ xfer->endpointno = edesc->bEndpointAddress;
+ xfer->max_packet_size = UGETW(edesc->wMaxPacketSize);
+ xfer->max_packet_count = 1;
+ /* make a shadow copy: */
+ xfer->flags_int.usb_mode = parm->udev->flags.usb_mode;
+
+ parm->bufsize = setup->bufsize;
+
+ switch (parm->speed) {
+ case USB_SPEED_HIGH:
+ switch (type) {
+ case UE_ISOCHRONOUS:
+ case UE_INTERRUPT:
+ xfer->max_packet_count += (xfer->max_packet_size >> 11) & 3;
+
+ /* check for invalid max packet count */
+ if (xfer->max_packet_count > 3)
+ xfer->max_packet_count = 3;
+ break;
+ default:
+ break;
+ }
+ xfer->max_packet_size &= 0x7FF;
+ break;
+ case USB_SPEED_SUPER:
+ xfer->max_packet_count += (xfer->max_packet_size >> 11) & 3;
+
+ if (ecomp != NULL)
+ xfer->max_packet_count += ecomp->bMaxBurst;
+
+ if ((xfer->max_packet_count == 0) ||
+ (xfer->max_packet_count > 16))
+ xfer->max_packet_count = 16;
+
+ switch (type) {
+ case UE_CONTROL:
+ xfer->max_packet_count = 1;
+ break;
+ case UE_ISOCHRONOUS:
+ if (ecomp != NULL) {
+ uint8_t mult;
+
+ mult = (ecomp->bmAttributes & 3) + 1;
+ if (mult > 3)
+ mult = 3;
+
+ xfer->max_packet_count *= mult;
+ }
+ break;
+ default:
+ break;
+ }
+ xfer->max_packet_size &= 0x7FF;
+ break;
+ default:
+ break;
+ }
+ /* range check "max_packet_count" */
+
+ if (xfer->max_packet_count > parm->hc_max_packet_count) {
+ xfer->max_packet_count = parm->hc_max_packet_count;
+ }
+ /* filter "wMaxPacketSize" according to HC capabilities */
+
+ if ((xfer->max_packet_size > parm->hc_max_packet_size) ||
+ (xfer->max_packet_size == 0)) {
+ xfer->max_packet_size = parm->hc_max_packet_size;
+ }
+ /* filter "wMaxPacketSize" according to standard sizes */
+
+ usbd_get_std_packet_size(&std_size, type, parm->speed);
+
+ if (std_size.range.min || std_size.range.max) {
+
+ if (xfer->max_packet_size < std_size.range.min) {
+ xfer->max_packet_size = std_size.range.min;
+ }
+ if (xfer->max_packet_size > std_size.range.max) {
+ xfer->max_packet_size = std_size.range.max;
+ }
+ } else {
+
+ if (xfer->max_packet_size >= std_size.fixed[3]) {
+ xfer->max_packet_size = std_size.fixed[3];
+ } else if (xfer->max_packet_size >= std_size.fixed[2]) {
+ xfer->max_packet_size = std_size.fixed[2];
+ } else if (xfer->max_packet_size >= std_size.fixed[1]) {
+ xfer->max_packet_size = std_size.fixed[1];
+ } else {
+ /* only one possibility left */
+ xfer->max_packet_size = std_size.fixed[0];
+ }
+ }
+
+ /* compute "max_frame_size" */
+
+ usbd_update_max_frame_size(xfer);
+
+ /* check interrupt interval and transfer pre-delay */
+
+ if (type == UE_ISOCHRONOUS) {
+
+ uint16_t frame_limit;
+
+ xfer->interval = 0; /* not used, must be zero */
+ xfer->flags_int.isochronous_xfr = 1; /* set flag */
+
+ if (xfer->timeout == 0) {
+ /*
+ * set a default timeout in
+ * case something goes wrong!
+ */
+ xfer->timeout = 1000 / 4;
+ }
+ switch (parm->speed) {
+ case USB_SPEED_LOW:
+ case USB_SPEED_FULL:
+ frame_limit = USB_MAX_FS_ISOC_FRAMES_PER_XFER;
+ xfer->fps_shift = 0;
+ break;
+ default:
+ frame_limit = USB_MAX_HS_ISOC_FRAMES_PER_XFER;
+ xfer->fps_shift = edesc->bInterval;
+ if (xfer->fps_shift > 0)
+ xfer->fps_shift--;
+ if (xfer->fps_shift > 3)
+ xfer->fps_shift = 3;
+ break;
+ }
+
+ if (xfer->nframes > frame_limit) {
+ /*
+ * this is not going to work
+ * cross hardware
+ */
+ parm->err = USB_ERR_INVAL;
+ goto done;
+ }
+ if (xfer->nframes == 0) {
+ /*
+ * this is not a valid value
+ */
+ parm->err = USB_ERR_ZERO_NFRAMES;
+ goto done;
+ }
+ } else {
+
+ /*
+ * If a value is specified use that else check the
+ * endpoint descriptor!
+ */
+ if (type == UE_INTERRUPT) {
+
+ uint32_t temp;
+
+ if (xfer->interval == 0) {
+
+ xfer->interval = edesc->bInterval;
+
+ switch (parm->speed) {
+ case USB_SPEED_LOW:
+ case USB_SPEED_FULL:
+ break;
+ default:
+ /* 125us -> 1ms */
+ if (xfer->interval < 4)
+ xfer->interval = 1;
+ else if (xfer->interval > 16)
+ xfer->interval = (1 << (16 - 4));
+ else
+ xfer->interval =
+ (1 << (xfer->interval - 4));
+ break;
+ }
+ }
+
+ if (xfer->interval == 0) {
+ /*
+ * One millisecond is the smallest
+ * interval we support:
+ */
+ xfer->interval = 1;
+ }
+
+ xfer->fps_shift = 0;
+ temp = 1;
+
+ while ((temp != 0) && (temp < xfer->interval)) {
+ xfer->fps_shift++;
+ temp *= 2;
+ }
+
+ switch (parm->speed) {
+ case USB_SPEED_LOW:
+ case USB_SPEED_FULL:
+ break;
+ default:
+ xfer->fps_shift += 3;
+ break;
+ }
+ }
+ }
+
+ /*
+ * NOTE: we do not allow "max_packet_size" or "max_frame_size"
+ * to be equal to zero when setting up USB transfers, hence
+ * this leads to alot of extra code in the USB kernel.
+ */
+
+ if ((xfer->max_frame_size == 0) ||
+ (xfer->max_packet_size == 0)) {
+
+ zmps = 1;
+
+ if ((parm->bufsize <= MIN_PKT) &&
+ (type != UE_CONTROL) &&
+ (type != UE_BULK)) {
+
+ /* workaround */
+ xfer->max_packet_size = MIN_PKT;
+ xfer->max_packet_count = 1;
+ parm->bufsize = 0; /* automatic setup length */
+ usbd_update_max_frame_size(xfer);
+
+ } else {
+ parm->err = USB_ERR_ZERO_MAXP;
+ goto done;
+ }
+
+ } else {
+ zmps = 0;
+ }
+
+ /*
+ * check if we should setup a default
+ * length:
+ */
+
+ if (parm->bufsize == 0) {
+
+ parm->bufsize = xfer->max_frame_size;
+
+ if (type == UE_ISOCHRONOUS) {
+ parm->bufsize *= xfer->nframes;
+ }
+ }
+ /*
+ * check if we are about to setup a proxy
+ * type of buffer:
+ */
+
+ if (xfer->flags.proxy_buffer) {
+
+ /* round bufsize up */
+
+ parm->bufsize += (xfer->max_frame_size - 1);
+
+ if (parm->bufsize < xfer->max_frame_size) {
+ /* length wrapped around */
+ parm->err = USB_ERR_INVAL;
+ goto done;
+ }
+ /* subtract remainder */
+
+ parm->bufsize -= (parm->bufsize % xfer->max_frame_size);
+
+ /* add length of USB device request structure, if any */
+
+ if (type == UE_CONTROL) {
+ parm->bufsize += REQ_SIZE; /* SETUP message */
+ }
+ }
+ xfer->max_data_length = parm->bufsize;
+
+ /* Setup "n_frlengths" and "n_frbuffers" */
+
+ if (type == UE_ISOCHRONOUS) {
+ n_frlengths = xfer->nframes;
+ n_frbuffers = 1;
+ } else {
+
+ if (type == UE_CONTROL) {
+ xfer->flags_int.control_xfr = 1;
+ if (xfer->nframes == 0) {
+ if (parm->bufsize <= REQ_SIZE) {
+ /*
+ * there will never be any data
+ * stage
+ */
+ xfer->nframes = 1;
+ } else {
+ xfer->nframes = 2;
+ }
+ }
+ } else {
+ if (xfer->nframes == 0) {
+ xfer->nframes = 1;
+ }
+ }
+
+ n_frlengths = xfer->nframes;
+ n_frbuffers = xfer->nframes;
+ }
+
+ /*
+ * check if we have room for the
+ * USB device request structure:
+ */
+
+ if (type == UE_CONTROL) {
+
+ if (xfer->max_data_length < REQ_SIZE) {
+ /* length wrapped around or too small bufsize */
+ parm->err = USB_ERR_INVAL;
+ goto done;
+ }
+ xfer->max_data_length -= REQ_SIZE;
+ }
+ /* setup "frlengths" */
+ xfer->frlengths = parm->xfer_length_ptr;
+ parm->xfer_length_ptr += n_frlengths;
+
+ /* setup "frbuffers" */
+ xfer->frbuffers = parm->xfer_page_cache_ptr;
+ parm->xfer_page_cache_ptr += n_frbuffers;
+
+ /* initialize max frame count */
+ xfer->max_frame_count = xfer->nframes;
+
+ /*
+ * check if we need to setup
+ * a local buffer:
+ */
+
+ if (!xfer->flags.ext_buffer) {
+
+ /* align data */
+#ifdef __rtems__
+#ifdef CPU_DATA_CACHE_ALIGNMENT
+ parm->size[0] += CPU_DATA_CACHE_ALIGNMENT;
+#endif /* CPU_DATA_CACHE_ALIGNMENT */
+#else /* __rtems__ */
+ parm->size[0] += ((-parm->size[0]) & (USB_HOST_ALIGN - 1));
+#endif /* __rtems__ */
+
+ if (parm->buf) {
+
+ xfer->local_buffer =
+ USB_ADD_BYTES(parm->buf, parm->size[0]);
+#ifdef __rtems__
+#ifdef CPU_DATA_CACHE_ALIGNMENT
+ xfer->local_buffer = (char *) xfer->local_buffer
+ + ((-(uintptr_t) xfer->local_buffer)
+ & (CPU_DATA_CACHE_ALIGNMENT - 1));
+#endif /* CPU_DATA_CACHE_ALIGNMENT */
+#endif /* __rtems__ */
+
+ usbd_xfer_set_frame_offset(xfer, 0, 0);
+
+ if ((type == UE_CONTROL) && (n_frbuffers > 1)) {
+ usbd_xfer_set_frame_offset(xfer, REQ_SIZE, 1);
+ }
+ }
+ parm->size[0] += parm->bufsize;
+
+ /* align data again */
+#ifdef __rtems__
+#ifdef CPU_DATA_CACHE_ALIGNMENT
+ parm->size[0] += CPU_DATA_CACHE_ALIGNMENT;
+#endif /* CPU_DATA_CACHE_ALIGNMENT */
+#endif /* __rtems__ */
+ parm->size[0] += ((-parm->size[0]) & (USB_HOST_ALIGN - 1));
+ }
+ /*
+ * Compute maximum buffer size
+ */
+
+ if (parm->bufsize_max < parm->bufsize) {
+ parm->bufsize_max = parm->bufsize;
+ }
+#if USB_HAVE_BUSDMA
+ if (xfer->flags_int.bdma_enable) {
+ /*
+ * Setup "dma_page_ptr".
+ *
+ * Proof for formula below:
+ *
+ * Assume there are three USB frames having length "a", "b" and
+ * "c". These USB frames will at maximum need "z"
+ * "usb_page" structures. "z" is given by:
+ *
+ * z = ((a / USB_PAGE_SIZE) + 2) + ((b / USB_PAGE_SIZE) + 2) +
+ * ((c / USB_PAGE_SIZE) + 2);
+ *
+ * Constraining "a", "b" and "c" like this:
+ *
+ * (a + b + c) <= parm->bufsize
+ *
+ * We know that:
+ *
+ * z <= ((parm->bufsize / USB_PAGE_SIZE) + (3*2));
+ *
+ * Here is the general formula:
+ */
+ xfer->dma_page_ptr = parm->dma_page_ptr;
+ parm->dma_page_ptr += (2 * n_frbuffers);
+ parm->dma_page_ptr += (parm->bufsize / USB_PAGE_SIZE);
+ }
+#endif
+ if (zmps) {
+ /* correct maximum data length */
+ xfer->max_data_length = 0;
+ }
+ /* subtract USB frame remainder from "hc_max_frame_size" */
+
+ xfer->max_hc_frame_size =
+ (parm->hc_max_frame_size -
+ (parm->hc_max_frame_size % xfer->max_frame_size));
+
+ if (xfer->max_hc_frame_size == 0) {
+ parm->err = USB_ERR_INVAL;
+ goto done;
+ }
+
+ /* initialize frame buffers */
+
+ if (parm->buf) {
+ for (x = 0; x != n_frbuffers; x++) {
+ xfer->frbuffers[x].tag_parent =
+ &xfer->xroot->dma_parent_tag;
+#if USB_HAVE_BUSDMA
+ if (xfer->flags_int.bdma_enable &&
+ (parm->bufsize_max > 0)) {
+
+ if (usb_pc_dmamap_create(
+ xfer->frbuffers + x,
+ parm->bufsize_max)) {
+ parm->err = USB_ERR_NOMEM;
+ goto done;
+ }
+ }
+#endif
+ }
+ }
+done:
+ if (parm->err) {
+ /*
+ * Set some dummy values so that we avoid division by zero:
+ */
+ xfer->max_hc_frame_size = 1;
+ xfer->max_frame_size = 1;
+ xfer->max_packet_size = 1;
+ xfer->max_data_length = 0;
+ xfer->nframes = 0;
+ xfer->max_frame_count = 0;
+ }
+}
+
+/*------------------------------------------------------------------------*
+ * usbd_transfer_setup - setup an array of USB transfers
+ *
+ * NOTE: You must always call "usbd_transfer_unsetup" after calling
+ * "usbd_transfer_setup" if success was returned.
+ *
+ * The idea is that the USB device driver should pre-allocate all its
+ * transfers by one call to this function.
+ *
+ * Return values:
+ * 0: Success
+ * Else: Failure
+ *------------------------------------------------------------------------*/
+usb_error_t
+usbd_transfer_setup(struct usb_device *udev,
+ const uint8_t *ifaces, struct usb_xfer **ppxfer,
+ const struct usb_config *setup_start, uint16_t n_setup,
+ void *priv_sc, struct mtx *xfer_mtx)
+{
+ struct usb_xfer dummy;
+ struct usb_setup_params parm;
+ const struct usb_config *setup_end = setup_start + n_setup;
+ const struct usb_config *setup;
+ struct usb_endpoint *ep;
+ struct usb_xfer_root *info;
+ struct usb_xfer *xfer;
+ void *buf = NULL;
+ uint16_t n;
+ uint16_t refcount;
+
+ parm.err = 0;
+ refcount = 0;
+ info = NULL;
+
+ WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL,
+ "usbd_transfer_setup can sleep!");
+
+ /* do some checking first */
+
+ if (n_setup == 0) {
+ DPRINTFN(6, "setup array has zero length!\n");
+ return (USB_ERR_INVAL);
+ }
+ if (ifaces == 0) {
+ DPRINTFN(6, "ifaces array is NULL!\n");
+ return (USB_ERR_INVAL);
+ }
+ if (xfer_mtx == NULL) {
+ DPRINTFN(6, "using global lock\n");
+ xfer_mtx = &Giant;
+ }
+ /* sanity checks */
+ for (setup = setup_start, n = 0;
+ setup != setup_end; setup++, n++) {
+ if (setup->bufsize == (usb_frlength_t)-1) {
+ parm.err = USB_ERR_BAD_BUFSIZE;
+ DPRINTF("invalid bufsize\n");
+ }
+ if (setup->callback == NULL) {
+ parm.err = USB_ERR_NO_CALLBACK;
+ DPRINTF("no callback\n");
+ }
+ ppxfer[n] = NULL;
+ }
+
+ if (parm.err) {
+ goto done;
+ }
+ bzero(&parm, sizeof(parm));
+
+ parm.udev = udev;
+ parm.speed = usbd_get_speed(udev);
+ parm.hc_max_packet_count = 1;
+
+ if (parm.speed >= USB_SPEED_MAX) {
+ parm.err = USB_ERR_INVAL;
+ goto done;
+ }
+ /* setup all transfers */
+
+ while (1) {
+
+ if (buf) {
+ /*
+ * Initialize the "usb_xfer_root" structure,
+ * which is common for all our USB transfers.
+ */
+ info = USB_ADD_BYTES(buf, 0);
+
+ info->memory_base = buf;
+ info->memory_size = parm.size[0];
+
+#if USB_HAVE_BUSDMA
+ info->dma_page_cache_start = USB_ADD_BYTES(buf, parm.size[4]);
+ info->dma_page_cache_end = USB_ADD_BYTES(buf, parm.size[5]);
+#endif
+ info->xfer_page_cache_start = USB_ADD_BYTES(buf, parm.size[5]);
+ info->xfer_page_cache_end = USB_ADD_BYTES(buf, parm.size[2]);
+
+ cv_init(&info->cv_drain, "WDRAIN");
+
+ info->xfer_mtx = xfer_mtx;
+#if USB_HAVE_BUSDMA
+ usb_dma_tag_setup(&info->dma_parent_tag,
+ parm.dma_tag_p, udev->bus->dma_parent_tag[0].tag,
+ xfer_mtx, &usb_bdma_done_event, 32, parm.dma_tag_max);
+#endif
+
+ info->bus = udev->bus;
+ info->udev = udev;
+
+ TAILQ_INIT(&info->done_q.head);
+ info->done_q.command = &usbd_callback_wrapper;
+#if USB_HAVE_BUSDMA
+ TAILQ_INIT(&info->dma_q.head);
+ info->dma_q.command = &usb_bdma_work_loop;
+#endif
+ info->done_m[0].hdr.pm_callback = &usb_callback_proc;
+ info->done_m[0].xroot = info;
+ info->done_m[1].hdr.pm_callback = &usb_callback_proc;
+ info->done_m[1].xroot = info;
+
+ /*
+ * In device side mode control endpoint
+ * requests need to run from a separate
+ * context, else there is a chance of
+ * deadlock!
+ */
+ if (setup_start == usb_control_ep_cfg)
+ info->done_p =
+ &udev->bus->control_xfer_proc;
+ else if (xfer_mtx == &Giant)
+ info->done_p =
+ &udev->bus->giant_callback_proc;
+ else
+ info->done_p =
+ &udev->bus->non_giant_callback_proc;
+ }
+ /* reset sizes */
+
+ parm.size[0] = 0;
+ parm.buf = buf;
+ parm.size[0] += sizeof(info[0]);
+
+ for (setup = setup_start, n = 0;
+ setup != setup_end; setup++, n++) {
+
+ /* skip USB transfers without callbacks: */
+ if (setup->callback == NULL) {
+ continue;
+ }
+ /* see if there is a matching endpoint */
+ ep = usbd_get_endpoint(udev,
+ ifaces[setup->if_index], setup);
+
+ if ((ep == NULL) || (ep->methods == NULL)) {
+ if (setup->flags.no_pipe_ok)
+ continue;
+ if ((setup->usb_mode != USB_MODE_DUAL) &&
+ (setup->usb_mode != udev->flags.usb_mode))
+ continue;
+ parm.err = USB_ERR_NO_PIPE;
+ goto done;
+ }
+
+ /* align data properly */
+ parm.size[0] += ((-parm.size[0]) & (USB_HOST_ALIGN - 1));
+
+ /* store current setup pointer */
+ parm.curr_setup = setup;
+
+ if (buf) {
+ /*
+ * Common initialization of the
+ * "usb_xfer" structure.
+ */
+ xfer = USB_ADD_BYTES(buf, parm.size[0]);
+ xfer->address = udev->address;
+ xfer->priv_sc = priv_sc;
+ xfer->xroot = info;
+
+ usb_callout_init_mtx(&xfer->timeout_handle,
+ &udev->bus->bus_mtx, 0);
+ } else {
+ /*
+ * Setup a dummy xfer, hence we are
+ * writing to the "usb_xfer"
+ * structure pointed to by "xfer"
+ * before we have allocated any
+ * memory:
+ */
+ xfer = &dummy;
+ bzero(&dummy, sizeof(dummy));
+ refcount++;
+ }
+
+ /* set transfer endpoint pointer */
+ xfer->endpoint = ep;
+
+ parm.size[0] += sizeof(xfer[0]);
+ parm.methods = xfer->endpoint->methods;
+ parm.curr_xfer = xfer;
+
+ /*
+ * Call the Host or Device controller transfer
+ * setup routine:
+ */
+ (udev->bus->methods->xfer_setup) (&parm);
+
+ /* check for error */
+ if (parm.err)
+ goto done;
+
+ if (buf) {
+ /*
+ * Increment the endpoint refcount. This
+ * basically prevents setting a new
+ * configuration and alternate setting
+ * when USB transfers are in use on
+ * the given interface. Search the USB
+ * code for "endpoint->refcount_alloc" if you
+ * want more information.
+ */
+ USB_BUS_LOCK(info->bus);
+ if (xfer->endpoint->refcount_alloc >= USB_EP_REF_MAX)
+ parm.err = USB_ERR_INVAL;
+
+ xfer->endpoint->refcount_alloc++;
+
+ if (xfer->endpoint->refcount_alloc == 0)
+ panic("usbd_transfer_setup(): Refcount wrapped to zero\n");
+ USB_BUS_UNLOCK(info->bus);
+
+ /*
+ * Whenever we set ppxfer[] then we
+ * also need to increment the
+ * "setup_refcount":
+ */
+ info->setup_refcount++;
+
+ /*
+ * Transfer is successfully setup and
+ * can be used:
+ */
+ ppxfer[n] = xfer;
+ }
+
+ /* check for error */
+ if (parm.err)
+ goto done;
+ }
+
+ if (buf || parm.err) {
+ goto done;
+ }
+ if (refcount == 0) {
+ /* no transfers - nothing to do ! */
+ goto done;
+ }
+ /* align data properly */
+ parm.size[0] += ((-parm.size[0]) & (USB_HOST_ALIGN - 1));
+
+ /* store offset temporarily */
+ parm.size[1] = parm.size[0];
+
+ /*
+ * The number of DMA tags required depends on
+ * the number of endpoints. The current estimate
+ * for maximum number of DMA tags per endpoint
+ * is two.
+ */
+ parm.dma_tag_max += 2 * MIN(n_setup, USB_EP_MAX);
+
+ /*
+ * DMA tags for QH, TD, Data and more.
+ */
+ parm.dma_tag_max += 8;
+
+ parm.dma_tag_p += parm.dma_tag_max;
+
+ parm.size[0] += ((uint8_t *)parm.dma_tag_p) -
+ ((uint8_t *)0);
+
+ /* align data properly */
+ parm.size[0] += ((-parm.size[0]) & (USB_HOST_ALIGN - 1));
+
+ /* store offset temporarily */
+ parm.size[3] = parm.size[0];
+
+ parm.size[0] += ((uint8_t *)parm.dma_page_ptr) -
+ ((uint8_t *)0);
+
+ /* align data properly */
+ parm.size[0] += ((-parm.size[0]) & (USB_HOST_ALIGN - 1));
+
+ /* store offset temporarily */
+ parm.size[4] = parm.size[0];
+
+ parm.size[0] += ((uint8_t *)parm.dma_page_cache_ptr) -
+ ((uint8_t *)0);
+
+ /* store end offset temporarily */
+ parm.size[5] = parm.size[0];
+
+ parm.size[0] += ((uint8_t *)parm.xfer_page_cache_ptr) -
+ ((uint8_t *)0);
+
+ /* store end offset temporarily */
+
+ parm.size[2] = parm.size[0];
+
+ /* align data properly */
+ parm.size[0] += ((-parm.size[0]) & (USB_HOST_ALIGN - 1));
+
+ parm.size[6] = parm.size[0];
+
+ parm.size[0] += ((uint8_t *)parm.xfer_length_ptr) -
+ ((uint8_t *)0);
+
+ /* align data properly */
+ parm.size[0] += ((-parm.size[0]) & (USB_HOST_ALIGN - 1));
+
+ /* allocate zeroed memory */
+ buf = malloc(parm.size[0], M_USB, M_WAITOK | M_ZERO);
+
+ if (buf == NULL) {
+ parm.err = USB_ERR_NOMEM;
+ DPRINTFN(0, "cannot allocate memory block for "
+ "configuration (%d bytes)\n",
+ parm.size[0]);
+ goto done;
+ }
+ parm.dma_tag_p = USB_ADD_BYTES(buf, parm.size[1]);
+ parm.dma_page_ptr = USB_ADD_BYTES(buf, parm.size[3]);
+ parm.dma_page_cache_ptr = USB_ADD_BYTES(buf, parm.size[4]);
+ parm.xfer_page_cache_ptr = USB_ADD_BYTES(buf, parm.size[5]);
+ parm.xfer_length_ptr = USB_ADD_BYTES(buf, parm.size[6]);
+ }
+
+done:
+ if (buf) {
+ if (info->setup_refcount == 0) {
+ /*
+ * "usbd_transfer_unsetup_sub" will unlock
+ * the bus mutex before returning !
+ */
+ USB_BUS_LOCK(info->bus);
+
+ /* something went wrong */
+ usbd_transfer_unsetup_sub(info, 0);
+ }
+ }
+ if (parm.err) {
+ usbd_transfer_unsetup(ppxfer, n_setup);
+ }
+ return (parm.err);
+}
+
+/*------------------------------------------------------------------------*
+ * usbd_transfer_unsetup_sub - factored out code
+ *------------------------------------------------------------------------*/
+static void
+usbd_transfer_unsetup_sub(struct usb_xfer_root *info, uint8_t needs_delay)
+{
+ struct usb_page_cache *pc;
+
+ USB_BUS_LOCK_ASSERT(info->bus, MA_OWNED);
+
+ /* wait for any outstanding DMA operations */
+
+ if (needs_delay) {
+ usb_timeout_t temp;
+ temp = usbd_get_dma_delay(info->udev);
+ if (temp != 0) {
+ usb_pause_mtx(&info->bus->bus_mtx,
+ USB_MS_TO_TICKS(temp));
+ }
+ }
+
+ /* make sure that our done messages are not queued anywhere */
+ usb_proc_mwait(info->done_p, &info->done_m[0], &info->done_m[1]);
+
+ USB_BUS_UNLOCK(info->bus);
+
+#if USB_HAVE_BUSDMA
+ /* free DMA'able memory, if any */
+ pc = info->dma_page_cache_start;
+ while (pc != info->dma_page_cache_end) {
+ usb_pc_free_mem(pc);
+ pc++;
+ }
+
+ /* free DMA maps in all "xfer->frbuffers" */
+ pc = info->xfer_page_cache_start;
+ while (pc != info->xfer_page_cache_end) {
+ usb_pc_dmamap_destroy(pc);
+ pc++;
+ }
+
+ /* free all DMA tags */
+ usb_dma_tag_unsetup(&info->dma_parent_tag);
+#endif
+
+ cv_destroy(&info->cv_drain);
+
+ /*
+ * free the "memory_base" last, hence the "info" structure is
+ * contained within the "memory_base"!
+ */
+ free(info->memory_base, M_USB);
+}
+
+/*------------------------------------------------------------------------*
+ * usbd_transfer_unsetup - unsetup/free an array of USB transfers
+ *
+ * NOTE: All USB transfers in progress will get called back passing
+ * the error code "USB_ERR_CANCELLED" before this function
+ * returns.
+ *------------------------------------------------------------------------*/
+void
+usbd_transfer_unsetup(struct usb_xfer **pxfer, uint16_t n_setup)
+{
+ struct usb_xfer *xfer;
+ struct usb_xfer_root *info;
+ uint8_t needs_delay = 0;
+
+ WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL,
+ "usbd_transfer_unsetup can sleep!");
+
+ while (n_setup--) {
+ xfer = pxfer[n_setup];
+
+ if (xfer == NULL)
+ continue;
+
+ info = xfer->xroot;
+
+ USB_XFER_LOCK(xfer);
+ USB_BUS_LOCK(info->bus);
+
+ /*
+ * HINT: when you start/stop a transfer, it might be a
+ * good idea to directly use the "pxfer[]" structure:
+ *
+ * usbd_transfer_start(sc->pxfer[0]);
+ * usbd_transfer_stop(sc->pxfer[0]);
+ *
+ * That way, if your code has many parts that will not
+ * stop running under the same lock, in other words
+ * "xfer_mtx", the usbd_transfer_start and
+ * usbd_transfer_stop functions will simply return
+ * when they detect a NULL pointer argument.
+ *
+ * To avoid any races we clear the "pxfer[]" pointer
+ * while holding the private mutex of the driver:
+ */
+ pxfer[n_setup] = NULL;
+
+ USB_BUS_UNLOCK(info->bus);
+ USB_XFER_UNLOCK(xfer);
+
+ usbd_transfer_drain(xfer);
+
+#if USB_HAVE_BUSDMA
+ if (xfer->flags_int.bdma_enable)
+ needs_delay = 1;
+#endif
+ /*
+ * NOTE: default endpoint does not have an
+ * interface, even if endpoint->iface_index == 0
+ */
+ USB_BUS_LOCK(info->bus);
+ xfer->endpoint->refcount_alloc--;
+ USB_BUS_UNLOCK(info->bus);
+
+ usb_callout_drain(&xfer->timeout_handle);
+
+ USB_BUS_LOCK(info->bus);
+
+ USB_ASSERT(info->setup_refcount != 0, ("Invalid setup "
+ "reference count\n"));
+
+ info->setup_refcount--;
+
+ if (info->setup_refcount == 0) {
+ usbd_transfer_unsetup_sub(info,
+ needs_delay);
+ } else {
+ USB_BUS_UNLOCK(info->bus);
+ }
+ }
+}
+
+/*------------------------------------------------------------------------*
+ * usbd_control_transfer_init - factored out code
+ *
+ * In USB Device Mode we have to wait for the SETUP packet which
+ * containst the "struct usb_device_request" structure, before we can
+ * transfer any data. In USB Host Mode we already have the SETUP
+ * packet at the moment the USB transfer is started. This leads us to
+ * having to setup the USB transfer at two different places in
+ * time. This function just contains factored out control transfer
+ * initialisation code, so that we don't duplicate the code.
+ *------------------------------------------------------------------------*/
+static void
+usbd_control_transfer_init(struct usb_xfer *xfer)
+{
+ struct usb_device_request req;
+
+ /* copy out the USB request header */
+
+ usbd_copy_out(xfer->frbuffers, 0, &req, sizeof(req));
+
+ /* setup remainder */
+
+ xfer->flags_int.control_rem = UGETW(req.wLength);
+
+ /* copy direction to endpoint variable */
+
+ xfer->endpointno &= ~(UE_DIR_IN | UE_DIR_OUT);
+ xfer->endpointno |=
+ (req.bmRequestType & UT_READ) ? UE_DIR_IN : UE_DIR_OUT;
+}
+
+/*------------------------------------------------------------------------*
+ * usbd_setup_ctrl_transfer
+ *
+ * This function handles initialisation of control transfers. Control
+ * transfers are special in that regard that they can both transmit
+ * and receive data.
+ *
+ * Return values:
+ * 0: Success
+ * Else: Failure
+ *------------------------------------------------------------------------*/
+static int
+usbd_setup_ctrl_transfer(struct usb_xfer *xfer)
+{
+ usb_frlength_t len;
+
+ /* Check for control endpoint stall */
+ if (xfer->flags.stall_pipe && xfer->flags_int.control_act) {
+ /* the control transfer is no longer active */
+ xfer->flags_int.control_stall = 1;
+ xfer->flags_int.control_act = 0;
+ } else {
+ /* don't stall control transfer by default */
+ xfer->flags_int.control_stall = 0;
+ }
+
+ /* Check for invalid number of frames */
+ if (xfer->nframes > 2) {
+ /*
+ * If you need to split a control transfer, you
+ * have to do one part at a time. Only with
+ * non-control transfers you can do multiple
+ * parts a time.
+ */
+ DPRINTFN(0, "Too many frames: %u\n",
+ (unsigned int)xfer->nframes);
+ goto error;
+ }
+
+ /*
+ * Check if there is a control
+ * transfer in progress:
+ */
+ if (xfer->flags_int.control_act) {
+
+ if (xfer->flags_int.control_hdr) {
+
+ /* clear send header flag */
+
+ xfer->flags_int.control_hdr = 0;
+
+ /* setup control transfer */
+ if (xfer->flags_int.usb_mode == USB_MODE_DEVICE) {
+ usbd_control_transfer_init(xfer);
+ }
+ }
+ /* get data length */
+
+ len = xfer->sumlen;
+
+ } else {
+
+ /* the size of the SETUP structure is hardcoded ! */
+
+ if (xfer->frlengths[0] != sizeof(struct usb_device_request)) {
+ DPRINTFN(0, "Wrong framelength %u != %zu\n",
+ xfer->frlengths[0], sizeof(struct
+ usb_device_request));
+ goto error;
+ }
+ /* check USB mode */
+ if (xfer->flags_int.usb_mode == USB_MODE_DEVICE) {
+
+ /* check number of frames */
+ if (xfer->nframes != 1) {
+ /*
+ * We need to receive the setup
+ * message first so that we know the
+ * data direction!
+ */
+ DPRINTF("Misconfigured transfer\n");
+ goto error;
+ }
+ /*
+ * Set a dummy "control_rem" value. This
+ * variable will be overwritten later by a
+ * call to "usbd_control_transfer_init()" !
+ */
+ xfer->flags_int.control_rem = 0xFFFF;
+ } else {
+
+ /* setup "endpoint" and "control_rem" */
+
+ usbd_control_transfer_init(xfer);
+ }
+
+ /* set transfer-header flag */
+
+ xfer->flags_int.control_hdr = 1;
+
+ /* get data length */
+
+ len = (xfer->sumlen - sizeof(struct usb_device_request));
+ }
+
+ /* check if there is a length mismatch */
+
+ if (len > xfer->flags_int.control_rem) {
+ DPRINTFN(0, "Length (%d) greater than "
+ "remaining length (%d)\n", len,
+ xfer->flags_int.control_rem);
+ goto error;
+ }
+ /* check if we are doing a short transfer */
+
+ if (xfer->flags.force_short_xfer) {
+ xfer->flags_int.control_rem = 0;
+ } else {
+ if ((len != xfer->max_data_length) &&
+ (len != xfer->flags_int.control_rem) &&
+ (xfer->nframes != 1)) {
+ DPRINTFN(0, "Short control transfer without "
+ "force_short_xfer set\n");
+ goto error;
+ }
+ xfer->flags_int.control_rem -= len;
+ }
+
+ /* the status part is executed when "control_act" is 0 */
+
+ if ((xfer->flags_int.control_rem > 0) ||
+ (xfer->flags.manual_status)) {
+ /* don't execute the STATUS stage yet */
+ xfer->flags_int.control_act = 1;
+
+ /* sanity check */
+ if ((!xfer->flags_int.control_hdr) &&
+ (xfer->nframes == 1)) {
+ /*
+ * This is not a valid operation!
+ */
+ DPRINTFN(0, "Invalid parameter "
+ "combination\n");
+ goto error;
+ }
+ } else {
+ /* time to execute the STATUS stage */
+ xfer->flags_int.control_act = 0;
+ }
+ return (0); /* success */
+
+error:
+ return (1); /* failure */
+}
+
+/*------------------------------------------------------------------------*
+ * usbd_transfer_submit - start USB hardware for the given transfer
+ *
+ * This function should only be called from the USB callback.
+ *------------------------------------------------------------------------*/
+void
+usbd_transfer_submit(struct usb_xfer *xfer)
+{
+ struct usb_xfer_root *info;
+ struct usb_bus *bus;
+ usb_frcount_t x;
+
+ info = xfer->xroot;
+ bus = info->bus;
+
+ DPRINTF("xfer=%p, endpoint=%p, nframes=%d, dir=%s\n",
+ xfer, xfer->endpoint, xfer->nframes, USB_GET_DATA_ISREAD(xfer) ?
+ "read" : "write");
+
+#ifdef USB_DEBUG
+ if (USB_DEBUG_VAR > 0) {
+ USB_BUS_LOCK(bus);
+
+ usb_dump_endpoint(xfer->endpoint);
+
+ USB_BUS_UNLOCK(bus);
+ }
+#endif
+
+ USB_XFER_LOCK_ASSERT(xfer, MA_OWNED);
+ USB_BUS_LOCK_ASSERT(bus, MA_NOTOWNED);
+
+ /* Only open the USB transfer once! */
+ if (!xfer->flags_int.open) {
+ xfer->flags_int.open = 1;
+
+ DPRINTF("open\n");
+
+ USB_BUS_LOCK(bus);
+ (xfer->endpoint->methods->open) (xfer);
+ USB_BUS_UNLOCK(bus);
+ }
+ /* set "transferring" flag */
+ xfer->flags_int.transferring = 1;
+
+#if USB_HAVE_POWERD
+ /* increment power reference */
+ usbd_transfer_power_ref(xfer, 1);
+#endif
+ /*
+ * Check if the transfer is waiting on a queue, most
+ * frequently the "done_q":
+ */
+ if (xfer->wait_queue) {
+ USB_BUS_LOCK(bus);
+ usbd_transfer_dequeue(xfer);
+ USB_BUS_UNLOCK(bus);
+ }
+ /* clear "did_dma_delay" flag */
+ xfer->flags_int.did_dma_delay = 0;
+
+ /* clear "did_close" flag */
+ xfer->flags_int.did_close = 0;
+
+#if USB_HAVE_BUSDMA
+ /* clear "bdma_setup" flag */
+ xfer->flags_int.bdma_setup = 0;
+#endif
+ /* by default we cannot cancel any USB transfer immediately */
+ xfer->flags_int.can_cancel_immed = 0;
+
+ /* clear lengths and frame counts by default */
+ xfer->sumlen = 0;
+ xfer->actlen = 0;
+ xfer->aframes = 0;
+
+ /* clear any previous errors */
+ xfer->error = 0;
+
+ /* Check if the device is still alive */
+ if (info->udev->state < USB_STATE_POWERED) {
+ USB_BUS_LOCK(bus);
+ /*
+ * Must return cancelled error code else
+ * device drivers can hang.
+ */
+ usbd_transfer_done(xfer, USB_ERR_CANCELLED);
+ USB_BUS_UNLOCK(bus);
+ return;
+ }
+
+ /* sanity check */
+ if (xfer->nframes == 0) {
+ if (xfer->flags.stall_pipe) {
+ /*
+ * Special case - want to stall without transferring
+ * any data:
+ */
+ DPRINTF("xfer=%p nframes=0: stall "
+ "or clear stall!\n", xfer);
+ USB_BUS_LOCK(bus);
+ xfer->flags_int.can_cancel_immed = 1;
+ /* start the transfer */
+ usb_command_wrapper(&xfer->endpoint->endpoint_q, xfer);
+ USB_BUS_UNLOCK(bus);
+ return;
+ }
+ USB_BUS_LOCK(bus);
+ usbd_transfer_done(xfer, USB_ERR_INVAL);
+ USB_BUS_UNLOCK(bus);
+ return;
+ }
+ /* compute total transfer length */
+
+ for (x = 0; x != xfer->nframes; x++) {
+ xfer->sumlen += xfer->frlengths[x];
+ if (xfer->sumlen < xfer->frlengths[x]) {
+ /* length wrapped around */
+ USB_BUS_LOCK(bus);
+ usbd_transfer_done(xfer, USB_ERR_INVAL);
+ USB_BUS_UNLOCK(bus);
+ return;
+ }
+ }
+
+ /* clear some internal flags */
+
+ xfer->flags_int.short_xfer_ok = 0;
+ xfer->flags_int.short_frames_ok = 0;
+
+ /* check if this is a control transfer */
+
+ if (xfer->flags_int.control_xfr) {
+
+ if (usbd_setup_ctrl_transfer(xfer)) {
+ USB_BUS_LOCK(bus);
+ usbd_transfer_done(xfer, USB_ERR_STALLED);
+ USB_BUS_UNLOCK(bus);
+ return;
+ }
+ }
+ /*
+ * Setup filtered version of some transfer flags,
+ * in case of data read direction
+ */
+ if (USB_GET_DATA_ISREAD(xfer)) {
+
+ if (xfer->flags.short_frames_ok) {
+ xfer->flags_int.short_xfer_ok = 1;
+ xfer->flags_int.short_frames_ok = 1;
+ } else if (xfer->flags.short_xfer_ok) {
+ xfer->flags_int.short_xfer_ok = 1;
+
+ /* check for control transfer */
+ if (xfer->flags_int.control_xfr) {
+ /*
+ * 1) Control transfers do not support
+ * reception of multiple short USB
+ * frames in host mode and device side
+ * mode, with exception of:
+ *
+ * 2) Due to sometimes buggy device
+ * side firmware we need to do a
+ * STATUS stage in case of short
+ * control transfers in USB host mode.
+ * The STATUS stage then becomes the
+ * "alt_next" to the DATA stage.
+ */
+ xfer->flags_int.short_frames_ok = 1;
+ }
+ }
+ }
+ /*
+ * Check if BUS-DMA support is enabled and try to load virtual
+ * buffers into DMA, if any:
+ */
+#if USB_HAVE_BUSDMA
+ if (xfer->flags_int.bdma_enable) {
+ /* insert the USB transfer last in the BUS-DMA queue */
+ usb_command_wrapper(&xfer->xroot->dma_q, xfer);
+ return;
+ }
+#endif
+ /*
+ * Enter the USB transfer into the Host Controller or
+ * Device Controller schedule:
+ */
+ usbd_pipe_enter(xfer);
+}
+
+/*------------------------------------------------------------------------*
+ * usbd_pipe_enter - factored out code
+ *------------------------------------------------------------------------*/
+void
+usbd_pipe_enter(struct usb_xfer *xfer)
+{
+ struct usb_endpoint *ep;
+
+ USB_XFER_LOCK_ASSERT(xfer, MA_OWNED);
+
+ USB_BUS_LOCK(xfer->xroot->bus);
+
+ ep = xfer->endpoint;
+
+ DPRINTF("enter\n");
+
+ /* enter the transfer */
+ (ep->methods->enter) (xfer);
+
+ xfer->flags_int.can_cancel_immed = 1;
+
+ /* check for transfer error */
+ if (xfer->error) {
+ /* some error has happened */
+ usbd_transfer_done(xfer, 0);
+ USB_BUS_UNLOCK(xfer->xroot->bus);
+ return;
+ }
+
+ /* start the transfer */
+ usb_command_wrapper(&ep->endpoint_q, xfer);
+ USB_BUS_UNLOCK(xfer->xroot->bus);
+}
+
+/*------------------------------------------------------------------------*
+ * usbd_transfer_start - start an USB transfer
+ *
+ * NOTE: Calling this function more than one time will only
+ * result in a single transfer start, until the USB transfer
+ * completes.
+ *------------------------------------------------------------------------*/
+void
+usbd_transfer_start(struct usb_xfer *xfer)
+{
+ if (xfer == NULL) {
+ /* transfer is gone */
+ return;
+ }
+ USB_XFER_LOCK_ASSERT(xfer, MA_OWNED);
+
+ /* mark the USB transfer started */
+
+ if (!xfer->flags_int.started) {
+ /* lock the BUS lock to avoid races updating flags_int */
+ USB_BUS_LOCK(xfer->xroot->bus);
+ xfer->flags_int.started = 1;
+ USB_BUS_UNLOCK(xfer->xroot->bus);
+ }
+ /* check if the USB transfer callback is already transferring */
+
+ if (xfer->flags_int.transferring) {
+ return;
+ }
+ USB_BUS_LOCK(xfer->xroot->bus);
+ /* call the USB transfer callback */
+ usbd_callback_ss_done_defer(xfer);
+ USB_BUS_UNLOCK(xfer->xroot->bus);
+}
+
+/*------------------------------------------------------------------------*
+ * usbd_transfer_stop - stop an USB transfer
+ *
+ * NOTE: Calling this function more than one time will only
+ * result in a single transfer stop.
+ * NOTE: When this function returns it is not safe to free nor
+ * reuse any DMA buffers. See "usbd_transfer_drain()".
+ *------------------------------------------------------------------------*/
+void
+usbd_transfer_stop(struct usb_xfer *xfer)
+{
+ struct usb_endpoint *ep;
+
+ if (xfer == NULL) {
+ /* transfer is gone */
+ return;
+ }
+ USB_XFER_LOCK_ASSERT(xfer, MA_OWNED);
+
+ /* check if the USB transfer was ever opened */
+
+ if (!xfer->flags_int.open) {
+ if (xfer->flags_int.started) {
+ /* nothing to do except clearing the "started" flag */
+ /* lock the BUS lock to avoid races updating flags_int */
+ USB_BUS_LOCK(xfer->xroot->bus);
+ xfer->flags_int.started = 0;
+ USB_BUS_UNLOCK(xfer->xroot->bus);
+ }
+ return;
+ }
+ /* try to stop the current USB transfer */
+
+ USB_BUS_LOCK(xfer->xroot->bus);
+ /* override any previous error */
+ xfer->error = USB_ERR_CANCELLED;
+
+ /*
+ * Clear "open" and "started" when both private and USB lock
+ * is locked so that we don't get a race updating "flags_int"
+ */
+ xfer->flags_int.open = 0;
+ xfer->flags_int.started = 0;
+
+ /*
+ * Check if we can cancel the USB transfer immediately.
+ */
+ if (xfer->flags_int.transferring) {
+ if (xfer->flags_int.can_cancel_immed &&
+ (!xfer->flags_int.did_close)) {
+ DPRINTF("close\n");
+ /*
+ * The following will lead to an USB_ERR_CANCELLED
+ * error code being passed to the USB callback.
+ */
+ (xfer->endpoint->methods->close) (xfer);
+ /* only close once */
+ xfer->flags_int.did_close = 1;
+ } else {
+ /* need to wait for the next done callback */
+ }
+ } else {
+ DPRINTF("close\n");
+
+ /* close here and now */
+ (xfer->endpoint->methods->close) (xfer);
+
+ /*
+ * Any additional DMA delay is done by
+ * "usbd_transfer_unsetup()".
+ */
+
+ /*
+ * Special case. Check if we need to restart a blocked
+ * endpoint.
+ */
+ ep = xfer->endpoint;
+
+ /*
+ * If the current USB transfer is completing we need
+ * to start the next one:
+ */
+ if (ep->endpoint_q.curr == xfer) {
+ usb_command_wrapper(&ep->endpoint_q, NULL);
+ }
+ }
+
+ USB_BUS_UNLOCK(xfer->xroot->bus);
+}
+
+/*------------------------------------------------------------------------*
+ * usbd_transfer_pending
+ *
+ * This function will check if an USB transfer is pending which is a
+ * little bit complicated!
+ * Return values:
+ * 0: Not pending
+ * 1: Pending: The USB transfer will receive a callback in the future.
+ *------------------------------------------------------------------------*/
+uint8_t
+usbd_transfer_pending(struct usb_xfer *xfer)
+{
+ struct usb_xfer_root *info;
+ struct usb_xfer_queue *pq;
+
+ if (xfer == NULL) {
+ /* transfer is gone */
+ return (0);
+ }
+ USB_XFER_LOCK_ASSERT(xfer, MA_OWNED);
+
+ if (xfer->flags_int.transferring) {
+ /* trivial case */
+ return (1);
+ }
+ USB_BUS_LOCK(xfer->xroot->bus);
+ if (xfer->wait_queue) {
+ /* we are waiting on a queue somewhere */
+ USB_BUS_UNLOCK(xfer->xroot->bus);
+ return (1);
+ }
+ info = xfer->xroot;
+ pq = &info->done_q;
+
+ if (pq->curr == xfer) {
+ /* we are currently scheduled for callback */
+ USB_BUS_UNLOCK(xfer->xroot->bus);
+ return (1);
+ }
+ /* we are not pending */
+ USB_BUS_UNLOCK(xfer->xroot->bus);
+ return (0);
+}
+
+/*------------------------------------------------------------------------*
+ * usbd_transfer_drain
+ *
+ * This function will stop the USB transfer and wait for any
+ * additional BUS-DMA and HW-DMA operations to complete. Buffers that
+ * are loaded into DMA can safely be freed or reused after that this
+ * function has returned.
+ *------------------------------------------------------------------------*/
+void
+usbd_transfer_drain(struct usb_xfer *xfer)
+{
+ WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL,
+ "usbd_transfer_drain can sleep!");
+
+ if (xfer == NULL) {
+ /* transfer is gone */
+ return;
+ }
+ if (xfer->xroot->xfer_mtx != &Giant) {
+ USB_XFER_LOCK_ASSERT(xfer, MA_NOTOWNED);
+ }
+ USB_XFER_LOCK(xfer);
+
+ usbd_transfer_stop(xfer);
+
+ while (usbd_transfer_pending(xfer) ||
+ xfer->flags_int.doing_callback) {
+
+ /*
+ * It is allowed that the callback can drop its
+ * transfer mutex. In that case checking only
+ * "usbd_transfer_pending()" is not enough to tell if
+ * the USB transfer is fully drained. We also need to
+ * check the internal "doing_callback" flag.
+ */
+ xfer->flags_int.draining = 1;
+
+ /*
+ * Wait until the current outstanding USB
+ * transfer is complete !
+ */
+ cv_wait(&xfer->xroot->cv_drain, xfer->xroot->xfer_mtx);
+ }
+ USB_XFER_UNLOCK(xfer);
+}
+
+struct usb_page_cache *
+usbd_xfer_get_frame(struct usb_xfer *xfer, usb_frcount_t frindex)
+{
+ KASSERT(frindex < xfer->max_frame_count, ("frame index overflow"));
+
+ return (&xfer->frbuffers[frindex]);
+}
+
+/*------------------------------------------------------------------------*
+ * usbd_xfer_get_fps_shift
+ *
+ * The following function is only useful for isochronous transfers. It
+ * returns how many times the frame execution rate has been shifted
+ * down.
+ *
+ * Return value:
+ * Success: 0..3
+ * Failure: 0
+ *------------------------------------------------------------------------*/
+uint8_t
+usbd_xfer_get_fps_shift(struct usb_xfer *xfer)
+{
+ return (xfer->fps_shift);
+}
+
+usb_frlength_t
+usbd_xfer_frame_len(struct usb_xfer *xfer, usb_frcount_t frindex)
+{
+ KASSERT(frindex < xfer->max_frame_count, ("frame index overflow"));
+
+ return (xfer->frlengths[frindex]);
+}
+
+/*------------------------------------------------------------------------*
+ * usbd_xfer_set_frame_data
+ *
+ * This function sets the pointer of the buffer that should
+ * loaded directly into DMA for the given USB frame. Passing "ptr"
+ * equal to NULL while the corresponding "frlength" is greater
+ * than zero gives undefined results!
+ *------------------------------------------------------------------------*/
+void
+usbd_xfer_set_frame_data(struct usb_xfer *xfer, usb_frcount_t frindex,
+ void *ptr, usb_frlength_t len)
+{
+ KASSERT(frindex < xfer->max_frame_count, ("frame index overflow"));
+
+ /* set virtual address to load and length */
+ xfer->frbuffers[frindex].buffer = ptr;
+ usbd_xfer_set_frame_len(xfer, frindex, len);
+}
+
+void
+usbd_xfer_frame_data(struct usb_xfer *xfer, usb_frcount_t frindex,
+ void **ptr, int *len)
+{
+ KASSERT(frindex < xfer->max_frame_count, ("frame index overflow"));
+
+ if (ptr != NULL)
+ *ptr = xfer->frbuffers[frindex].buffer;
+ if (len != NULL)
+ *len = xfer->frlengths[frindex];
+}
+
+void
+usbd_xfer_status(struct usb_xfer *xfer, int *actlen, int *sumlen, int *aframes,
+ int *nframes)
+{
+ if (actlen != NULL)
+ *actlen = xfer->actlen;
+ if (sumlen != NULL)
+ *sumlen = xfer->sumlen;
+ if (aframes != NULL)
+ *aframes = xfer->aframes;
+ if (nframes != NULL)
+ *nframes = xfer->nframes;
+}
+
+/*------------------------------------------------------------------------*
+ * usbd_xfer_set_frame_offset
+ *
+ * This function sets the frame data buffer offset relative to the beginning
+ * of the USB DMA buffer allocated for this USB transfer.
+ *------------------------------------------------------------------------*/
+void
+usbd_xfer_set_frame_offset(struct usb_xfer *xfer, usb_frlength_t offset,
+ usb_frcount_t frindex)
+{
+ KASSERT(!xfer->flags.ext_buffer, ("Cannot offset data frame "
+ "when the USB buffer is external\n"));
+ KASSERT(frindex < xfer->max_frame_count, ("frame index overflow"));
+
+ /* set virtual address to load */
+ xfer->frbuffers[frindex].buffer =
+ USB_ADD_BYTES(xfer->local_buffer, offset);
+}
+
+void
+usbd_xfer_set_interval(struct usb_xfer *xfer, int i)
+{
+ xfer->interval = i;
+}
+
+void
+usbd_xfer_set_timeout(struct usb_xfer *xfer, int t)
+{
+ xfer->timeout = t;
+}
+
+void
+usbd_xfer_set_frames(struct usb_xfer *xfer, usb_frcount_t n)
+{
+ xfer->nframes = n;
+}
+
+usb_frcount_t
+usbd_xfer_max_frames(struct usb_xfer *xfer)
+{
+ return (xfer->max_frame_count);
+}
+
+usb_frlength_t
+usbd_xfer_max_len(struct usb_xfer *xfer)
+{
+ return (xfer->max_data_length);
+}
+
+usb_frlength_t
+usbd_xfer_max_framelen(struct usb_xfer *xfer)
+{
+ return (xfer->max_frame_size);
+}
+
+void
+usbd_xfer_set_frame_len(struct usb_xfer *xfer, usb_frcount_t frindex,
+ usb_frlength_t len)
+{
+ KASSERT(frindex < xfer->max_frame_count, ("frame index overflow"));
+
+ xfer->frlengths[frindex] = len;
+}
+
+/*------------------------------------------------------------------------*
+ * usb_callback_proc - factored out code
+ *
+ * This function performs USB callbacks.
+ *------------------------------------------------------------------------*/
+static void
+usb_callback_proc(struct usb_proc_msg *_pm)
+{
+ struct usb_done_msg *pm = (void *)_pm;
+ struct usb_xfer_root *info = pm->xroot;
+
+ /* Change locking order */
+ USB_BUS_UNLOCK(info->bus);
+
+ /*
+ * We exploit the fact that the mutex is the same for all
+ * callbacks that will be called from this thread:
+ */
+ mtx_lock(info->xfer_mtx);
+ USB_BUS_LOCK(info->bus);
+
+ /* Continue where we lost track */
+ usb_command_wrapper(&info->done_q,
+ info->done_q.curr);
+
+ mtx_unlock(info->xfer_mtx);
+}
+
+/*------------------------------------------------------------------------*
+ * usbd_callback_ss_done_defer
+ *
+ * This function will defer the start, stop and done callback to the
+ * correct thread.
+ *------------------------------------------------------------------------*/
+static void
+usbd_callback_ss_done_defer(struct usb_xfer *xfer)
+{
+ struct usb_xfer_root *info = xfer->xroot;
+ struct usb_xfer_queue *pq = &info->done_q;
+
+ USB_BUS_LOCK_ASSERT(xfer->xroot->bus, MA_OWNED);
+
+ if (pq->curr != xfer) {
+ usbd_transfer_enqueue(pq, xfer);
+ }
+ if (!pq->recurse_1) {
+
+ /*
+ * We have to postpone the callback due to the fact we
+ * will have a Lock Order Reversal, LOR, if we try to
+ * proceed !
+ */
+ if (usb_proc_msignal(info->done_p,
+ &info->done_m[0], &info->done_m[1])) {
+ /* ignore */
+ }
+ } else {
+ /* clear second recurse flag */
+ pq->recurse_2 = 0;
+ }
+ return;
+
+}
+
+/*------------------------------------------------------------------------*
+ * usbd_callback_wrapper
+ *
+ * This is a wrapper for USB callbacks. This wrapper does some
+ * auto-magic things like figuring out if we can call the callback
+ * directly from the current context or if we need to wakeup the
+ * interrupt process.
+ *------------------------------------------------------------------------*/
+static void
+usbd_callback_wrapper(struct usb_xfer_queue *pq)
+{
+ struct usb_xfer *xfer = pq->curr;
+ struct usb_xfer_root *info = xfer->xroot;
+
+ USB_BUS_LOCK_ASSERT(info->bus, MA_OWNED);
+ if (!mtx_owned(info->xfer_mtx)) {
+ /*
+ * Cases that end up here:
+ *
+ * 5) HW interrupt done callback or other source.
+ */
+ DPRINTFN(3, "case 5\n");
+
+ /*
+ * We have to postpone the callback due to the fact we
+ * will have a Lock Order Reversal, LOR, if we try to
+ * proceed !
+ */
+ if (usb_proc_msignal(info->done_p,
+ &info->done_m[0], &info->done_m[1])) {
+ /* ignore */
+ }
+ return;
+ }
+ /*
+ * Cases that end up here:
+ *
+ * 1) We are starting a transfer
+ * 2) We are prematurely calling back a transfer
+ * 3) We are stopping a transfer
+ * 4) We are doing an ordinary callback
+ */
+ DPRINTFN(3, "case 1-4\n");
+ /* get next USB transfer in the queue */
+ info->done_q.curr = NULL;
+
+ /* set flag in case of drain */
+ xfer->flags_int.doing_callback = 1;
+
+ USB_BUS_UNLOCK(info->bus);
+ USB_BUS_LOCK_ASSERT(info->bus, MA_NOTOWNED);
+
+ /* set correct USB state for callback */
+ if (!xfer->flags_int.transferring) {
+ xfer->usb_state = USB_ST_SETUP;
+ if (!xfer->flags_int.started) {
+ /* we got stopped before we even got started */
+ USB_BUS_LOCK(info->bus);
+ goto done;
+ }
+ } else {
+
+ if (usbd_callback_wrapper_sub(xfer)) {
+ /* the callback has been deferred */
+ USB_BUS_LOCK(info->bus);
+ goto done;
+ }
+#if USB_HAVE_POWERD
+ /* decrement power reference */
+ usbd_transfer_power_ref(xfer, -1);
+#endif
+ xfer->flags_int.transferring = 0;
+
+ if (xfer->error) {
+ xfer->usb_state = USB_ST_ERROR;
+ } else {
+ /* set transferred state */
+ xfer->usb_state = USB_ST_TRANSFERRED;
+#if USB_HAVE_BUSDMA
+ /* sync DMA memory, if any */
+ if (xfer->flags_int.bdma_enable &&
+ (!xfer->flags_int.bdma_no_post_sync)) {
+ usb_bdma_post_sync(xfer);
+ }
+#endif
+ }
+ }
+
+ /* call processing routine */
+ (xfer->callback) (xfer, xfer->error);
+
+ /* pickup the USB mutex again */
+ USB_BUS_LOCK(info->bus);
+
+ /*
+ * Check if we got started after that we got cancelled, but
+ * before we managed to do the callback.
+ */
+ if ((!xfer->flags_int.open) &&
+ (xfer->flags_int.started) &&
+ (xfer->usb_state == USB_ST_ERROR)) {
+ /* clear flag in case of drain */
+ xfer->flags_int.doing_callback = 0;
+ /* try to loop, but not recursivly */
+ usb_command_wrapper(&info->done_q, xfer);
+ return;
+ }
+
+done:
+ /* clear flag in case of drain */
+ xfer->flags_int.doing_callback = 0;
+
+ /*
+ * Check if we are draining.
+ */
+ if (xfer->flags_int.draining &&
+ (!xfer->flags_int.transferring)) {
+ /* "usbd_transfer_drain()" is waiting for end of transfer */
+ xfer->flags_int.draining = 0;
+ cv_broadcast(&info->cv_drain);
+ }
+
+ /* do the next callback, if any */
+ usb_command_wrapper(&info->done_q,
+ info->done_q.curr);
+}
+
+/*------------------------------------------------------------------------*
+ * usb_dma_delay_done_cb
+ *
+ * This function is called when the DMA delay has been exectuded, and
+ * will make sure that the callback is called to complete the USB
+ * transfer. This code path is ususally only used when there is an USB
+ * error like USB_ERR_CANCELLED.
+ *------------------------------------------------------------------------*/
+void
+usb_dma_delay_done_cb(struct usb_xfer *xfer)
+{
+ USB_BUS_LOCK_ASSERT(xfer->xroot->bus, MA_OWNED);
+
+ DPRINTFN(3, "Completed %p\n", xfer);
+
+ /* queue callback for execution, again */
+ usbd_transfer_done(xfer, 0);
+}
+
+/*------------------------------------------------------------------------*
+ * usbd_transfer_dequeue
+ *
+ * - This function is used to remove an USB transfer from a USB
+ * transfer queue.
+ *
+ * - This function can be called multiple times in a row.
+ *------------------------------------------------------------------------*/
+void
+usbd_transfer_dequeue(struct usb_xfer *xfer)
+{
+ struct usb_xfer_queue *pq;
+
+ pq = xfer->wait_queue;
+ if (pq) {
+ TAILQ_REMOVE(&pq->head, xfer, wait_entry);
+ xfer->wait_queue = NULL;
+ }
+}
+
+/*------------------------------------------------------------------------*
+ * usbd_transfer_enqueue
+ *
+ * - This function is used to insert an USB transfer into a USB *
+ * transfer queue.
+ *
+ * - This function can be called multiple times in a row.
+ *------------------------------------------------------------------------*/
+void
+usbd_transfer_enqueue(struct usb_xfer_queue *pq, struct usb_xfer *xfer)
+{
+ /*
+ * Insert the USB transfer into the queue, if it is not
+ * already on a USB transfer queue:
+ */
+ if (xfer->wait_queue == NULL) {
+ xfer->wait_queue = pq;
+ TAILQ_INSERT_TAIL(&pq->head, xfer, wait_entry);
+ }
+}
+
+/*------------------------------------------------------------------------*
+ * usbd_transfer_done
+ *
+ * - This function is used to remove an USB transfer from the busdma,
+ * pipe or interrupt queue.
+ *
+ * - This function is used to queue the USB transfer on the done
+ * queue.
+ *
+ * - This function is used to stop any USB transfer timeouts.
+ *------------------------------------------------------------------------*/
+void
+usbd_transfer_done(struct usb_xfer *xfer, usb_error_t error)
+{
+ USB_BUS_LOCK_ASSERT(xfer->xroot->bus, MA_OWNED);
+
+ DPRINTF("err=%s\n", usbd_errstr(error));
+
+ /*
+ * If we are not transferring then just return.
+ * This can happen during transfer cancel.
+ */
+ if (!xfer->flags_int.transferring) {
+ DPRINTF("not transferring\n");
+ /* end of control transfer, if any */
+ xfer->flags_int.control_act = 0;
+ return;
+ }
+ /* only set transfer error if not already set */
+ if (!xfer->error) {
+ xfer->error = error;
+ }
+ /* stop any callouts */
+ usb_callout_stop(&xfer->timeout_handle);
+
+ /*
+ * If we are waiting on a queue, just remove the USB transfer
+ * from the queue, if any. We should have the required locks
+ * locked to do the remove when this function is called.
+ */
+ usbd_transfer_dequeue(xfer);
+
+#if USB_HAVE_BUSDMA
+ if (mtx_owned(xfer->xroot->xfer_mtx)) {
+ struct usb_xfer_queue *pq;
+
+ /*
+ * If the private USB lock is not locked, then we assume
+ * that the BUS-DMA load stage has been passed:
+ */
+ pq = &xfer->xroot->dma_q;
+
+ if (pq->curr == xfer) {
+ /* start the next BUS-DMA load, if any */
+ usb_command_wrapper(pq, NULL);
+ }
+ }
+#endif
+ /* keep some statistics */
+ if (xfer->error) {
+ xfer->xroot->bus->stats_err.uds_requests
+ [xfer->endpoint->edesc->bmAttributes & UE_XFERTYPE]++;
+ } else {
+ xfer->xroot->bus->stats_ok.uds_requests
+ [xfer->endpoint->edesc->bmAttributes & UE_XFERTYPE]++;
+ }
+
+ /* call the USB transfer callback */
+ usbd_callback_ss_done_defer(xfer);
+}
+
+/*------------------------------------------------------------------------*
+ * usbd_transfer_start_cb
+ *
+ * This function is called to start the USB transfer when
+ * "xfer->interval" is greater than zero, and and the endpoint type is
+ * BULK or CONTROL.
+ *------------------------------------------------------------------------*/
+static void
+usbd_transfer_start_cb(void *arg)
+{
+ struct usb_xfer *xfer = arg;
+ struct usb_endpoint *ep = xfer->endpoint;
+
+ USB_BUS_LOCK_ASSERT(xfer->xroot->bus, MA_OWNED);
+
+ DPRINTF("start\n");
+
+ /* start the transfer */
+ (ep->methods->start) (xfer);
+
+ xfer->flags_int.can_cancel_immed = 1;
+
+ /* check for error */
+ if (xfer->error) {
+ /* some error has happened */
+ usbd_transfer_done(xfer, 0);
+ }
+}
+
+/*------------------------------------------------------------------------*
+ * usbd_xfer_set_stall
+ *
+ * This function is used to set the stall flag outside the
+ * callback. This function is NULL safe.
+ *------------------------------------------------------------------------*/
+void
+usbd_xfer_set_stall(struct usb_xfer *xfer)
+{
+ if (xfer == NULL) {
+ /* tearing down */
+ return;
+ }
+ USB_XFER_LOCK_ASSERT(xfer, MA_OWNED);
+
+ /* avoid any races by locking the USB mutex */
+ USB_BUS_LOCK(xfer->xroot->bus);
+ xfer->flags.stall_pipe = 1;
+ USB_BUS_UNLOCK(xfer->xroot->bus);
+}
+
+int
+usbd_xfer_is_stalled(struct usb_xfer *xfer)
+{
+ return (xfer->endpoint->is_stalled);
+}
+
+/*------------------------------------------------------------------------*
+ * usbd_transfer_clear_stall
+ *
+ * This function is used to clear the stall flag outside the
+ * callback. This function is NULL safe.
+ *------------------------------------------------------------------------*/
+void
+usbd_transfer_clear_stall(struct usb_xfer *xfer)
+{
+ if (xfer == NULL) {
+ /* tearing down */
+ return;
+ }
+ USB_XFER_LOCK_ASSERT(xfer, MA_OWNED);
+
+ /* avoid any races by locking the USB mutex */
+ USB_BUS_LOCK(xfer->xroot->bus);
+
+ xfer->flags.stall_pipe = 0;
+
+ USB_BUS_UNLOCK(xfer->xroot->bus);
+}
+
+/*------------------------------------------------------------------------*
+ * usbd_pipe_start
+ *
+ * This function is used to add an USB transfer to the pipe transfer list.
+ *------------------------------------------------------------------------*/
+void
+usbd_pipe_start(struct usb_xfer_queue *pq)
+{
+ struct usb_endpoint *ep;
+ struct usb_xfer *xfer;
+ uint8_t type;
+
+ xfer = pq->curr;
+ ep = xfer->endpoint;
+
+ USB_BUS_LOCK_ASSERT(xfer->xroot->bus, MA_OWNED);
+
+ /*
+ * If the endpoint is already stalled we do nothing !
+ */
+ if (ep->is_stalled) {
+ return;
+ }
+ /*
+ * Check if we are supposed to stall the endpoint:
+ */
+ if (xfer->flags.stall_pipe) {
+ struct usb_device *udev;
+ struct usb_xfer_root *info;
+
+ /* clear stall command */
+ xfer->flags.stall_pipe = 0;
+
+ /* get pointer to USB device */
+ info = xfer->xroot;
+ udev = info->udev;
+
+ /*
+ * Only stall BULK and INTERRUPT endpoints.
+ */
+ type = (ep->edesc->bmAttributes & UE_XFERTYPE);
+ if ((type == UE_BULK) ||
+ (type == UE_INTERRUPT)) {
+ uint8_t did_stall;
+
+ did_stall = 1;
+
+ if (udev->flags.usb_mode == USB_MODE_DEVICE) {
+ (udev->bus->methods->set_stall) (
+ udev, NULL, ep, &did_stall);
+ } else if (udev->ctrl_xfer[1]) {
+ info = udev->ctrl_xfer[1]->xroot;
+ usb_proc_msignal(
+ &info->bus->non_giant_callback_proc,
+ &udev->cs_msg[0], &udev->cs_msg[1]);
+ } else {
+ /* should not happen */
+ DPRINTFN(0, "No stall handler\n");
+ }
+ /*
+ * Check if we should stall. Some USB hardware
+ * handles set- and clear-stall in hardware.
+ */
+ if (did_stall) {
+ /*
+ * The transfer will be continued when
+ * the clear-stall control endpoint
+ * message is received.
+ */
+ ep->is_stalled = 1;
+ return;
+ }
+ } else if (type == UE_ISOCHRONOUS) {
+
+ /*
+ * Make sure any FIFO overflow or other FIFO
+ * error conditions go away by resetting the
+ * endpoint FIFO through the clear stall
+ * method.
+ */
+ if (udev->flags.usb_mode == USB_MODE_DEVICE) {
+ (udev->bus->methods->clear_stall) (udev, ep);
+ }
+ }
+ }
+ /* Set or clear stall complete - special case */
+ if (xfer->nframes == 0) {
+ /* we are complete */
+ xfer->aframes = 0;
+ usbd_transfer_done(xfer, 0);
+ return;
+ }
+ /*
+ * Handled cases:
+ *
+ * 1) Start the first transfer queued.
+ *
+ * 2) Re-start the current USB transfer.
+ */
+ /*
+ * Check if there should be any
+ * pre transfer start delay:
+ */
+ if (xfer->interval > 0) {
+ type = (ep->edesc->bmAttributes & UE_XFERTYPE);
+ if ((type == UE_BULK) ||
+ (type == UE_CONTROL)) {
+ usbd_transfer_timeout_ms(xfer,
+ &usbd_transfer_start_cb,
+ xfer->interval);
+ return;
+ }
+ }
+ DPRINTF("start\n");
+
+ /* start USB transfer */
+ (ep->methods->start) (xfer);
+
+ xfer->flags_int.can_cancel_immed = 1;
+
+ /* check for error */
+ if (xfer->error) {
+ /* some error has happened */
+ usbd_transfer_done(xfer, 0);
+ }
+}
+
+/*------------------------------------------------------------------------*
+ * usbd_transfer_timeout_ms
+ *
+ * This function is used to setup a timeout on the given USB
+ * transfer. If the timeout has been deferred the callback given by
+ * "cb" will get called after "ms" milliseconds.
+ *------------------------------------------------------------------------*/
+void
+usbd_transfer_timeout_ms(struct usb_xfer *xfer,
+ void (*cb) (void *arg), usb_timeout_t ms)
+{
+ USB_BUS_LOCK_ASSERT(xfer->xroot->bus, MA_OWNED);
+
+ /* defer delay */
+ usb_callout_reset(&xfer->timeout_handle,
+ USB_MS_TO_TICKS(ms), cb, xfer);
+}
+
+/*------------------------------------------------------------------------*
+ * usbd_callback_wrapper_sub
+ *
+ * - This function will update variables in an USB transfer after
+ * that the USB transfer is complete.
+ *
+ * - This function is used to start the next USB transfer on the
+ * ep transfer queue, if any.
+ *
+ * NOTE: In some special cases the USB transfer will not be removed from
+ * the pipe queue, but remain first. To enforce USB transfer removal call
+ * this function passing the error code "USB_ERR_CANCELLED".
+ *
+ * Return values:
+ * 0: Success.
+ * Else: The callback has been deferred.
+ *------------------------------------------------------------------------*/
+static uint8_t
+usbd_callback_wrapper_sub(struct usb_xfer *xfer)
+{
+ struct usb_endpoint *ep;
+ struct usb_bus *bus;
+ usb_frcount_t x;
+
+ bus = xfer->xroot->bus;
+
+ if ((!xfer->flags_int.open) &&
+ (!xfer->flags_int.did_close)) {
+ DPRINTF("close\n");
+ USB_BUS_LOCK(bus);
+ (xfer->endpoint->methods->close) (xfer);
+ USB_BUS_UNLOCK(bus);
+ /* only close once */
+ xfer->flags_int.did_close = 1;
+ return (1); /* wait for new callback */
+ }
+ /*
+ * If we have a non-hardware induced error we
+ * need to do the DMA delay!
+ */
+ if (xfer->error != 0 && !xfer->flags_int.did_dma_delay &&
+ (xfer->error == USB_ERR_CANCELLED ||
+ xfer->error == USB_ERR_TIMEOUT ||
+ bus->methods->start_dma_delay != NULL)) {
+
+ usb_timeout_t temp;
+
+ /* only delay once */
+ xfer->flags_int.did_dma_delay = 1;
+
+ /* we can not cancel this delay */
+ xfer->flags_int.can_cancel_immed = 0;
+
+ temp = usbd_get_dma_delay(xfer->xroot->udev);
+
+ DPRINTFN(3, "DMA delay, %u ms, "
+ "on %p\n", temp, xfer);
+
+ if (temp != 0) {
+ USB_BUS_LOCK(bus);
+ /*
+ * Some hardware solutions have dedicated
+ * events when it is safe to free DMA'ed
+ * memory. For the other hardware platforms we
+ * use a static delay.
+ */
+ if (bus->methods->start_dma_delay != NULL) {
+ (bus->methods->start_dma_delay) (xfer);
+ } else {
+ usbd_transfer_timeout_ms(xfer,
+ (void *)&usb_dma_delay_done_cb, temp);
+ }
+ USB_BUS_UNLOCK(bus);
+ return (1); /* wait for new callback */
+ }
+ }
+ /* check actual number of frames */
+ if (xfer->aframes > xfer->nframes) {
+ if (xfer->error == 0) {
+ panic("%s: actual number of frames, %d, is "
+ "greater than initial number of frames, %d\n",
+ __FUNCTION__, xfer->aframes, xfer->nframes);
+ } else {
+ /* just set some valid value */
+ xfer->aframes = xfer->nframes;
+ }
+ }
+ /* compute actual length */
+ xfer->actlen = 0;
+
+ for (x = 0; x != xfer->aframes; x++) {
+ xfer->actlen += xfer->frlengths[x];
+ }
+
+ /*
+ * Frames that were not transferred get zero actual length in
+ * case the USB device driver does not check the actual number
+ * of frames transferred, "xfer->aframes":
+ */
+ for (; x < xfer->nframes; x++) {
+ usbd_xfer_set_frame_len(xfer, x, 0);
+ }
+
+ /* check actual length */
+ if (xfer->actlen > xfer->sumlen) {
+ if (xfer->error == 0) {
+ panic("%s: actual length, %d, is greater than "
+ "initial length, %d\n",
+ __FUNCTION__, xfer->actlen, xfer->sumlen);
+ } else {
+ /* just set some valid value */
+ xfer->actlen = xfer->sumlen;
+ }
+ }
+ DPRINTFN(1, "xfer=%p endpoint=%p sts=%d alen=%d, slen=%d, afrm=%d, nfrm=%d\n",
+ xfer, xfer->endpoint, xfer->error, xfer->actlen, xfer->sumlen,
+ xfer->aframes, xfer->nframes);
+
+ if (xfer->error) {
+ /* end of control transfer, if any */
+ xfer->flags_int.control_act = 0;
+
+ /* check if we should block the execution queue */
+ if ((xfer->error != USB_ERR_CANCELLED) &&
+ (xfer->flags.pipe_bof)) {
+ DPRINTFN(2, "xfer=%p: Block On Failure "
+ "on endpoint=%p\n", xfer, xfer->endpoint);
+ goto done;
+ }
+ } else {
+ /* check for short transfers */
+ if (xfer->actlen < xfer->sumlen) {
+
+ /* end of control transfer, if any */
+ xfer->flags_int.control_act = 0;
+
+ if (!xfer->flags_int.short_xfer_ok) {
+ xfer->error = USB_ERR_SHORT_XFER;
+ if (xfer->flags.pipe_bof) {
+ DPRINTFN(2, "xfer=%p: Block On Failure on "
+ "Short Transfer on endpoint %p.\n",
+ xfer, xfer->endpoint);
+ goto done;
+ }
+ }
+ } else {
+ /*
+ * Check if we are in the middle of a
+ * control transfer:
+ */
+ if (xfer->flags_int.control_act) {
+ DPRINTFN(5, "xfer=%p: Control transfer "
+ "active on endpoint=%p\n", xfer, xfer->endpoint);
+ goto done;
+ }
+ }
+ }
+
+ ep = xfer->endpoint;
+
+ /*
+ * If the current USB transfer is completing we need to start the
+ * next one:
+ */
+ USB_BUS_LOCK(bus);
+ if (ep->endpoint_q.curr == xfer) {
+ usb_command_wrapper(&ep->endpoint_q, NULL);
+
+ if (ep->endpoint_q.curr || TAILQ_FIRST(&ep->endpoint_q.head)) {
+ /* there is another USB transfer waiting */
+ } else {
+ /* this is the last USB transfer */
+ /* clear isochronous sync flag */
+ xfer->endpoint->is_synced = 0;
+ }
+ }
+ USB_BUS_UNLOCK(bus);
+done:
+ return (0);
+}
+
+/*------------------------------------------------------------------------*
+ * usb_command_wrapper
+ *
+ * This function is used to execute commands non-recursivly on an USB
+ * transfer.
+ *------------------------------------------------------------------------*/
+void
+usb_command_wrapper(struct usb_xfer_queue *pq, struct usb_xfer *xfer)
+{
+ if (xfer) {
+ /*
+ * If the transfer is not already processing,
+ * queue it!
+ */
+ if (pq->curr != xfer) {
+ usbd_transfer_enqueue(pq, xfer);
+ if (pq->curr != NULL) {
+ /* something is already processing */
+ DPRINTFN(6, "busy %p\n", pq->curr);
+ return;
+ }
+ }
+ } else {
+ /* Get next element in queue */
+ pq->curr = NULL;
+ }
+
+ if (!pq->recurse_1) {
+
+ do {
+
+ /* set both recurse flags */
+ pq->recurse_1 = 1;
+ pq->recurse_2 = 1;
+
+ if (pq->curr == NULL) {
+ xfer = TAILQ_FIRST(&pq->head);
+ if (xfer) {
+ TAILQ_REMOVE(&pq->head, xfer,
+ wait_entry);
+ xfer->wait_queue = NULL;
+ pq->curr = xfer;
+ } else {
+ break;
+ }
+ }
+ DPRINTFN(6, "cb %p (enter)\n", pq->curr);
+ (pq->command) (pq);
+ DPRINTFN(6, "cb %p (leave)\n", pq->curr);
+
+ } while (!pq->recurse_2);
+
+ /* clear first recurse flag */
+ pq->recurse_1 = 0;
+
+ } else {
+ /* clear second recurse flag */
+ pq->recurse_2 = 0;
+ }
+}
+
+/*------------------------------------------------------------------------*
+ * usbd_ctrl_transfer_setup
+ *
+ * This function is used to setup the default USB control endpoint
+ * transfer.
+ *------------------------------------------------------------------------*/
+void
+usbd_ctrl_transfer_setup(struct usb_device *udev)
+{
+ struct usb_xfer *xfer;
+ uint8_t no_resetup;
+ uint8_t iface_index;
+
+ /* check for root HUB */
+ if (udev->parent_hub == NULL)
+ return;
+repeat:
+
+ xfer = udev->ctrl_xfer[0];
+ if (xfer) {
+ USB_XFER_LOCK(xfer);
+ no_resetup =
+ ((xfer->address == udev->address) &&
+ (udev->ctrl_ep_desc.wMaxPacketSize[0] ==
+ udev->ddesc.bMaxPacketSize));
+ if (udev->flags.usb_mode == USB_MODE_DEVICE) {
+ if (no_resetup) {
+ /*
+ * NOTE: checking "xfer->address" and
+ * starting the USB transfer must be
+ * atomic!
+ */
+ usbd_transfer_start(xfer);
+ }
+ }
+ USB_XFER_UNLOCK(xfer);
+ } else {
+ no_resetup = 0;
+ }
+
+ if (no_resetup) {
+ /*
+ * All parameters are exactly the same like before.
+ * Just return.
+ */
+ return;
+ }
+ /*
+ * Update wMaxPacketSize for the default control endpoint:
+ */
+ udev->ctrl_ep_desc.wMaxPacketSize[0] =
+ udev->ddesc.bMaxPacketSize;
+
+ /*
+ * Unsetup any existing USB transfer:
+ */
+ usbd_transfer_unsetup(udev->ctrl_xfer, USB_CTRL_XFER_MAX);
+
+ /*
+ * Try to setup a new USB transfer for the
+ * default control endpoint:
+ */
+ iface_index = 0;
+ if (usbd_transfer_setup(udev, &iface_index,
+ udev->ctrl_xfer, usb_control_ep_cfg, USB_CTRL_XFER_MAX, NULL,
+ &udev->device_mtx)) {
+ DPRINTFN(0, "could not setup default "
+ "USB transfer\n");
+ } else {
+ goto repeat;
+ }
+}
+
+/*------------------------------------------------------------------------*
+ * usbd_clear_data_toggle - factored out code
+ *
+ * NOTE: the intention of this function is not to reset the hardware
+ * data toggle.
+ *------------------------------------------------------------------------*/
+void
+usbd_clear_stall_locked(struct usb_device *udev, struct usb_endpoint *ep)
+{
+ USB_BUS_LOCK_ASSERT(udev->bus, MA_OWNED);
+
+ /* check that we have a valid case */
+ if (udev->flags.usb_mode == USB_MODE_HOST &&
+ udev->parent_hub != NULL &&
+ udev->bus->methods->clear_stall != NULL &&
+ ep->methods != NULL) {
+ (udev->bus->methods->clear_stall) (udev, ep);
+ }
+}
+
+/*------------------------------------------------------------------------*
+ * usbd_clear_data_toggle - factored out code
+ *
+ * NOTE: the intention of this function is not to reset the hardware
+ * data toggle on the USB device side.
+ *------------------------------------------------------------------------*/
+void
+usbd_clear_data_toggle(struct usb_device *udev, struct usb_endpoint *ep)
+{
+ DPRINTFN(5, "udev=%p endpoint=%p\n", udev, ep);
+
+ USB_BUS_LOCK(udev->bus);
+ ep->toggle_next = 0;
+ /* some hardware needs a callback to clear the data toggle */
+ usbd_clear_stall_locked(udev, ep);
+ USB_BUS_UNLOCK(udev->bus);
+}
+
+/*------------------------------------------------------------------------*
+ * usbd_clear_stall_callback - factored out clear stall callback
+ *
+ * Input parameters:
+ * xfer1: Clear Stall Control Transfer
+ * xfer2: Stalled USB Transfer
+ *
+ * This function is NULL safe.
+ *
+ * Return values:
+ * 0: In progress
+ * Else: Finished
+ *
+ * Clear stall config example:
+ *
+ * static const struct usb_config my_clearstall = {
+ * .type = UE_CONTROL,
+ * .endpoint = 0,
+ * .direction = UE_DIR_ANY,
+ * .interval = 50, //50 milliseconds
+ * .bufsize = sizeof(struct usb_device_request),
+ * .timeout = 1000, //1.000 seconds
+ * .callback = &my_clear_stall_callback, // **
+ * .usb_mode = USB_MODE_HOST,
+ * };
+ *
+ * ** "my_clear_stall_callback" calls "usbd_clear_stall_callback"
+ * passing the correct parameters.
+ *------------------------------------------------------------------------*/
+uint8_t
+usbd_clear_stall_callback(struct usb_xfer *xfer1,
+ struct usb_xfer *xfer2)
+{
+ struct usb_device_request req;
+
+ if (xfer2 == NULL) {
+ /* looks like we are tearing down */
+ DPRINTF("NULL input parameter\n");
+ return (0);
+ }
+ USB_XFER_LOCK_ASSERT(xfer1, MA_OWNED);
+ USB_XFER_LOCK_ASSERT(xfer2, MA_OWNED);
+
+ switch (USB_GET_STATE(xfer1)) {
+ case USB_ST_SETUP:
+
+ /*
+ * pre-clear the data toggle to DATA0 ("umass.c" and
+ * "ata-usb.c" depends on this)
+ */
+
+ usbd_clear_data_toggle(xfer2->xroot->udev, xfer2->endpoint);
+
+ /* setup a clear-stall packet */
+
+ req.bmRequestType = UT_WRITE_ENDPOINT;
+ req.bRequest = UR_CLEAR_FEATURE;
+ USETW(req.wValue, UF_ENDPOINT_HALT);
+ req.wIndex[0] = xfer2->endpoint->edesc->bEndpointAddress;
+ req.wIndex[1] = 0;
+ USETW(req.wLength, 0);
+
+ /*
+ * "usbd_transfer_setup_sub()" will ensure that
+ * we have sufficient room in the buffer for
+ * the request structure!
+ */
+
+ /* copy in the transfer */
+
+ usbd_copy_in(xfer1->frbuffers, 0, &req, sizeof(req));
+
+ /* set length */
+ xfer1->frlengths[0] = sizeof(req);
+ xfer1->nframes = 1;
+
+ usbd_transfer_submit(xfer1);
+ return (0);
+
+ case USB_ST_TRANSFERRED:
+ break;
+
+ default: /* Error */
+ if (xfer1->error == USB_ERR_CANCELLED) {
+ return (0);
+ }
+ break;
+ }
+ return (1); /* Clear Stall Finished */
+}
+
+/*------------------------------------------------------------------------*
+ * usbd_transfer_poll
+ *
+ * The following function gets called from the USB keyboard driver and
+ * UMASS when the system has paniced.
+ *
+ * NOTE: It is currently not possible to resume normal operation on
+ * the USB controller which has been polled, due to clearing of the
+ * "up_dsleep" and "up_msleep" flags.
+ *------------------------------------------------------------------------*/
+void
+usbd_transfer_poll(struct usb_xfer **ppxfer, uint16_t max)
+{
+ struct usb_xfer *xfer;
+ struct usb_xfer_root *xroot;
+ struct usb_device *udev;
+ struct usb_proc_msg *pm;
+ uint16_t n;
+ uint16_t drop_bus;
+ uint16_t drop_xfer;
+
+ for (n = 0; n != max; n++) {
+ /* Extra checks to avoid panic */
+ xfer = ppxfer[n];
+ if (xfer == NULL)
+ continue; /* no USB transfer */
+ xroot = xfer->xroot;
+ if (xroot == NULL)
+ continue; /* no USB root */
+ udev = xroot->udev;
+ if (udev == NULL)
+ continue; /* no USB device */
+ if (udev->bus == NULL)
+ continue; /* no BUS structure */
+ if (udev->bus->methods == NULL)
+ continue; /* no BUS methods */
+ if (udev->bus->methods->xfer_poll == NULL)
+ continue; /* no poll method */
+
+ /* make sure that the BUS mutex is not locked */
+ drop_bus = 0;
+ while (mtx_owned(&xroot->udev->bus->bus_mtx)) {
+ mtx_unlock(&xroot->udev->bus->bus_mtx);
+ drop_bus++;
+ }
+
+ /* make sure that the transfer mutex is not locked */
+ drop_xfer = 0;
+ while (mtx_owned(xroot->xfer_mtx)) {
+ mtx_unlock(xroot->xfer_mtx);
+ drop_xfer++;
+ }
+
+ /* Make sure cv_signal() and cv_broadcast() is not called */
+ udev->bus->control_xfer_proc.up_msleep = 0;
+ udev->bus->explore_proc.up_msleep = 0;
+ udev->bus->giant_callback_proc.up_msleep = 0;
+ udev->bus->non_giant_callback_proc.up_msleep = 0;
+
+ /* poll USB hardware */
+ (udev->bus->methods->xfer_poll) (udev->bus);
+
+ USB_BUS_LOCK(xroot->bus);
+
+ /* check for clear stall */
+ if (udev->ctrl_xfer[1] != NULL) {
+
+ /* poll clear stall start */
+ pm = &udev->cs_msg[0].hdr;
+ (pm->pm_callback) (pm);
+ /* poll clear stall done thread */
+ pm = &udev->ctrl_xfer[1]->
+ xroot->done_m[0].hdr;
+ (pm->pm_callback) (pm);
+ }
+
+ /* poll done thread */
+ pm = &xroot->done_m[0].hdr;
+ (pm->pm_callback) (pm);
+
+ USB_BUS_UNLOCK(xroot->bus);
+
+ /* restore transfer mutex */
+ while (drop_xfer--)
+ mtx_lock(xroot->xfer_mtx);
+
+ /* restore BUS mutex */
+ while (drop_bus--)
+ mtx_lock(&xroot->udev->bus->bus_mtx);
+ }
+}
+
+static void
+usbd_get_std_packet_size(struct usb_std_packet_size *ptr,
+ uint8_t type, enum usb_dev_speed speed)
+{
+ static const uint16_t intr_range_max[USB_SPEED_MAX] = {
+ [USB_SPEED_LOW] = 8,
+ [USB_SPEED_FULL] = 64,
+ [USB_SPEED_HIGH] = 1024,
+ [USB_SPEED_VARIABLE] = 1024,
+ [USB_SPEED_SUPER] = 1024,
+ };
+
+ static const uint16_t isoc_range_max[USB_SPEED_MAX] = {
+ [USB_SPEED_LOW] = 0, /* invalid */
+ [USB_SPEED_FULL] = 1023,
+ [USB_SPEED_HIGH] = 1024,
+ [USB_SPEED_VARIABLE] = 3584,
+ [USB_SPEED_SUPER] = 1024,
+ };
+
+ static const uint16_t control_min[USB_SPEED_MAX] = {
+ [USB_SPEED_LOW] = 8,
+ [USB_SPEED_FULL] = 8,
+ [USB_SPEED_HIGH] = 64,
+ [USB_SPEED_VARIABLE] = 512,
+ [USB_SPEED_SUPER] = 512,
+ };
+
+ static const uint16_t bulk_min[USB_SPEED_MAX] = {
+ [USB_SPEED_LOW] = 8,
+ [USB_SPEED_FULL] = 8,
+ [USB_SPEED_HIGH] = 512,
+ [USB_SPEED_VARIABLE] = 512,
+ [USB_SPEED_SUPER] = 1024,
+ };
+
+ uint16_t temp;
+
+ memset(ptr, 0, sizeof(*ptr));
+
+ switch (type) {
+ case UE_INTERRUPT:
+ ptr->range.max = intr_range_max[speed];
+ break;
+ case UE_ISOCHRONOUS:
+ ptr->range.max = isoc_range_max[speed];
+ break;
+ default:
+ if (type == UE_BULK)
+ temp = bulk_min[speed];
+ else /* UE_CONTROL */
+ temp = control_min[speed];
+
+ /* default is fixed */
+ ptr->fixed[0] = temp;
+ ptr->fixed[1] = temp;
+ ptr->fixed[2] = temp;
+ ptr->fixed[3] = temp;
+
+ if (speed == USB_SPEED_FULL) {
+ /* multiple sizes */
+ ptr->fixed[1] = 16;
+ ptr->fixed[2] = 32;
+ ptr->fixed[3] = 64;
+ }
+ if ((speed == USB_SPEED_VARIABLE) &&
+ (type == UE_BULK)) {
+ /* multiple sizes */
+ ptr->fixed[2] = 1024;
+ ptr->fixed[3] = 1536;
+ }
+ break;
+ }
+}
+
+void *
+usbd_xfer_softc(struct usb_xfer *xfer)
+{
+ return (xfer->priv_sc);
+}
+
+void *
+usbd_xfer_get_priv(struct usb_xfer *xfer)
+{
+ return (xfer->priv_fifo);
+}
+
+void
+usbd_xfer_set_priv(struct usb_xfer *xfer, void *ptr)
+{
+ xfer->priv_fifo = ptr;
+}
+
+uint8_t
+usbd_xfer_state(struct usb_xfer *xfer)
+{
+ return (xfer->usb_state);
+}
+
+void
+usbd_xfer_set_flag(struct usb_xfer *xfer, int flag)
+{
+ switch (flag) {
+ case USB_FORCE_SHORT_XFER:
+ xfer->flags.force_short_xfer = 1;
+ break;
+ case USB_SHORT_XFER_OK:
+ xfer->flags.short_xfer_ok = 1;
+ break;
+ case USB_MULTI_SHORT_OK:
+ xfer->flags.short_frames_ok = 1;
+ break;
+ case USB_MANUAL_STATUS:
+ xfer->flags.manual_status = 1;
+ break;
+ }
+}
+
+void
+usbd_xfer_clr_flag(struct usb_xfer *xfer, int flag)
+{
+ switch (flag) {
+ case USB_FORCE_SHORT_XFER:
+ xfer->flags.force_short_xfer = 0;
+ break;
+ case USB_SHORT_XFER_OK:
+ xfer->flags.short_xfer_ok = 0;
+ break;
+ case USB_MULTI_SHORT_OK:
+ xfer->flags.short_frames_ok = 0;
+ break;
+ case USB_MANUAL_STATUS:
+ xfer->flags.manual_status = 0;
+ break;
+ }
+}
+
+/*
+ * The following function returns in milliseconds when the isochronous
+ * transfer was completed by the hardware. The returned value wraps
+ * around 65536 milliseconds.
+ */
+uint16_t
+usbd_xfer_get_timestamp(struct usb_xfer *xfer)
+{
+ return (xfer->isoc_time_complete);
+}
diff --git a/rtems/freebsd/dev/usb/usb_transfer.h b/rtems/freebsd/dev/usb/usb_transfer.h
new file mode 100644
index 00000000..eb2abd0c
--- /dev/null
+++ b/rtems/freebsd/dev/usb/usb_transfer.h
@@ -0,0 +1,140 @@
+/* $FreeBSD$ */
+/*-
+ * Copyright (c) 2008 Hans Petter Selasky. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifndef _USB_TRANSFER_HH_
+#define _USB_TRANSFER_HH_
+
+/*
+ * The following structure defines the messages that is used to signal
+ * the "done_p" USB process.
+ */
+struct usb_done_msg {
+ struct usb_proc_msg hdr;
+ struct usb_xfer_root *xroot;
+};
+
+#define USB_DMATAG_TO_XROOT(dpt) \
+ ((struct usb_xfer_root *)( \
+ ((uint8_t *)(dpt)) - \
+ ((uint8_t *)&((struct usb_xfer_root *)0)->dma_parent_tag)))
+
+/*
+ * The following structure is used to keep information about memory
+ * that should be automatically freed at the moment all USB transfers
+ * have been freed.
+ */
+struct usb_xfer_root {
+ struct usb_dma_parent_tag dma_parent_tag;
+#if USB_HAVE_BUSDMA
+ struct usb_xfer_queue dma_q;
+#endif
+ struct usb_xfer_queue done_q;
+ struct usb_done_msg done_m[2];
+ struct cv cv_drain;
+
+ struct usb_process *done_p; /* pointer to callback process */
+ void *memory_base;
+ struct mtx *xfer_mtx; /* cannot be changed during operation */
+#if USB_HAVE_BUSDMA
+ struct usb_page_cache *dma_page_cache_start;
+ struct usb_page_cache *dma_page_cache_end;
+#endif
+ struct usb_page_cache *xfer_page_cache_start;
+ struct usb_page_cache *xfer_page_cache_end;
+ struct usb_bus *bus; /* pointer to USB bus (cached) */
+ struct usb_device *udev; /* pointer to USB device */
+
+ usb_size_t memory_size;
+ usb_size_t setup_refcount;
+#if USB_HAVE_BUSDMA
+ usb_frcount_t dma_nframes; /* number of page caches to load */
+ usb_frcount_t dma_currframe; /* currect page cache number */
+ usb_frlength_t dma_frlength_0; /* length of page cache zero */
+ uint8_t dma_error; /* set if virtual memory could not be
+ * loaded */
+#endif
+ uint8_t done_sleep; /* set if done thread is sleeping */
+};
+
+/*
+ * The following structure is used when setting up an array of USB
+ * transfers.
+ */
+struct usb_setup_params {
+ struct usb_dma_tag *dma_tag_p;
+ struct usb_page *dma_page_ptr;
+ struct usb_page_cache *dma_page_cache_ptr; /* these will be
+ * auto-freed */
+ struct usb_page_cache *xfer_page_cache_ptr; /* these will not be
+ * auto-freed */
+ struct usb_device *udev;
+ struct usb_xfer *curr_xfer;
+ const struct usb_config *curr_setup;
+ const struct usb_pipe_methods *methods;
+ void *buf;
+ usb_frlength_t *xfer_length_ptr;
+
+ usb_size_t size[7];
+ usb_frlength_t bufsize;
+ usb_frlength_t bufsize_max;
+
+ uint32_t hc_max_frame_size;
+ uint16_t hc_max_packet_size;
+ uint8_t hc_max_packet_count;
+ enum usb_dev_speed speed;
+ uint8_t dma_tag_max;
+ usb_error_t err;
+};
+
+/* function prototypes */
+
+uint8_t usbd_transfer_setup_sub_malloc(struct usb_setup_params *parm,
+ struct usb_page_cache **ppc, usb_size_t size, usb_size_t align,
+ usb_size_t count);
+void usb_dma_delay_done_cb(struct usb_xfer *);
+void usb_command_wrapper(struct usb_xfer_queue *pq,
+ struct usb_xfer *xfer);
+void usbd_pipe_enter(struct usb_xfer *xfer);
+void usbd_pipe_start(struct usb_xfer_queue *pq);
+void usbd_transfer_dequeue(struct usb_xfer *xfer);
+void usbd_transfer_done(struct usb_xfer *xfer, usb_error_t error);
+void usbd_transfer_enqueue(struct usb_xfer_queue *pq,
+ struct usb_xfer *xfer);
+void usbd_transfer_setup_sub(struct usb_setup_params *parm);
+void usbd_ctrl_transfer_setup(struct usb_device *udev);
+void usbd_clear_stall_locked(struct usb_device *udev,
+ struct usb_endpoint *ep);
+void usbd_clear_data_toggle(struct usb_device *udev,
+ struct usb_endpoint *ep);
+usb_callback_t usbd_do_request_callback;
+usb_callback_t usb_handle_request_callback;
+usb_callback_t usb_do_clear_stall_callback;
+void usbd_transfer_timeout_ms(struct usb_xfer *xfer,
+ void (*cb) (void *arg), usb_timeout_t ms);
+usb_timeout_t usbd_get_dma_delay(struct usb_device *udev);
+void usbd_transfer_power_ref(struct usb_xfer *xfer, int val);
+
+#endif /* _USB_TRANSFER_HH_ */
diff --git a/rtems/freebsd/dev/usb/usb_util.c b/rtems/freebsd/dev/usb/usb_util.c
new file mode 100644
index 00000000..f91ca259
--- /dev/null
+++ b/rtems/freebsd/dev/usb/usb_util.c
@@ -0,0 +1,251 @@
+#include <rtems/freebsd/machine/rtems-bsd-config.h>
+
+/* $FreeBSD$ */
+/*-
+ * Copyright (c) 2008 Hans Petter Selasky. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <rtems/freebsd/sys/stdint.h>
+#include <rtems/freebsd/sys/stddef.h>
+#include <rtems/freebsd/sys/param.h>
+#include <rtems/freebsd/sys/queue.h>
+#include <rtems/freebsd/sys/types.h>
+#include <rtems/freebsd/sys/systm.h>
+#include <rtems/freebsd/sys/kernel.h>
+#include <rtems/freebsd/sys/bus.h>
+#include <rtems/freebsd/sys/linker_set.h>
+#include <rtems/freebsd/sys/module.h>
+#include <rtems/freebsd/sys/lock.h>
+#include <rtems/freebsd/sys/mutex.h>
+#include <rtems/freebsd/sys/condvar.h>
+#include <rtems/freebsd/sys/sysctl.h>
+#include <rtems/freebsd/sys/sx.h>
+#include <rtems/freebsd/sys/unistd.h>
+#include <rtems/freebsd/sys/callout.h>
+#include <rtems/freebsd/sys/malloc.h>
+#include <rtems/freebsd/sys/priv.h>
+
+#include <rtems/freebsd/dev/usb/usb.h>
+#include <rtems/freebsd/dev/usb/usbdi.h>
+#include <rtems/freebsd/dev/usb/usbdi_util.h>
+
+#include <rtems/freebsd/dev/usb/usb_core.h>
+#include <rtems/freebsd/dev/usb/usb_util.h>
+#include <rtems/freebsd/dev/usb/usb_process.h>
+#include <rtems/freebsd/dev/usb/usb_device.h>
+#include <rtems/freebsd/dev/usb/usb_request.h>
+#include <rtems/freebsd/dev/usb/usb_busdma.h>
+
+#include <rtems/freebsd/dev/usb/usb_controller.h>
+#include <rtems/freebsd/dev/usb/usb_bus.h>
+
+/*------------------------------------------------------------------------*
+ * device_delete_all_children - delete all children of a device
+ *------------------------------------------------------------------------*/
+#ifndef device_delete_all_children
+int
+device_delete_all_children(device_t dev)
+{
+ device_t *devlist;
+ int devcount;
+ int error;
+
+ error = device_get_children(dev, &devlist, &devcount);
+ if (error == 0) {
+ while (devcount-- > 0) {
+ error = device_delete_child(dev, devlist[devcount]);
+ if (error) {
+ break;
+ }
+ }
+ free(devlist, M_TEMP);
+ }
+ return (error);
+}
+#endif
+
+/*------------------------------------------------------------------------*
+ * device_set_usb_desc
+ *
+ * This function can be called at probe or attach to set the USB
+ * device supplied textual description for the given device.
+ *------------------------------------------------------------------------*/
+void
+device_set_usb_desc(device_t dev)
+{
+ struct usb_attach_arg *uaa;
+ struct usb_device *udev;
+ struct usb_interface *iface;
+ char *temp_p;
+ usb_error_t err;
+
+ if (dev == NULL) {
+ /* should not happen */
+ return;
+ }
+ uaa = device_get_ivars(dev);
+ if (uaa == NULL) {
+ /* can happen if called at the wrong time */
+ return;
+ }
+ udev = uaa->device;
+ iface = uaa->iface;
+
+ if ((iface == NULL) ||
+ (iface->idesc == NULL) ||
+ (iface->idesc->iInterface == 0)) {
+ err = USB_ERR_INVAL;
+ } else {
+ err = 0;
+ }
+
+ temp_p = (char *)udev->bus->scratch[0].data;
+
+ if (!err) {
+ /* try to get the interface string ! */
+ err = usbd_req_get_string_any
+ (udev, NULL, temp_p,
+ sizeof(udev->bus->scratch), iface->idesc->iInterface);
+ }
+ if (err) {
+ /* use default description */
+ usb_devinfo(udev, temp_p,
+ sizeof(udev->bus->scratch));
+ }
+ device_set_desc_copy(dev, temp_p);
+ device_printf(dev, "<%s> on %s\n", temp_p,
+ device_get_nameunit(udev->bus->bdev));
+}
+
+/*------------------------------------------------------------------------*
+ * usb_pause_mtx - factored out code
+ *
+ * This function will delay the code by the passed number of system
+ * ticks. The passed mutex "mtx" will be dropped while waiting, if
+ * "mtx" is not NULL.
+ *------------------------------------------------------------------------*/
+void
+usb_pause_mtx(struct mtx *mtx, int _ticks)
+{
+ if (mtx != NULL)
+ mtx_unlock(mtx);
+
+ if (cold) {
+ /* convert to milliseconds */
+ _ticks = (_ticks * 1000) / hz;
+ /* convert to microseconds, rounded up */
+ _ticks = (_ticks + 1) * 1000;
+ DELAY(_ticks);
+
+ } else {
+
+ /*
+ * Add one to the number of ticks so that we don't return
+ * too early!
+ */
+ _ticks++;
+
+ if (pause("USBWAIT", _ticks)) {
+ /* ignore */
+ }
+ }
+ if (mtx != NULL)
+ mtx_lock(mtx);
+}
+
+/*------------------------------------------------------------------------*
+ * usb_printbcd
+ *
+ * This function will print the version number "bcd" to the string
+ * pointed to by "p" having a maximum length of "p_len" bytes
+ * including the terminating zero.
+ *------------------------------------------------------------------------*/
+void
+usb_printbcd(char *p, uint16_t p_len, uint16_t bcd)
+{
+ if (snprintf(p, p_len, "%x.%02x", bcd >> 8, bcd & 0xff)) {
+ /* ignore any errors */
+ }
+}
+
+/*------------------------------------------------------------------------*
+ * usb_trim_spaces
+ *
+ * This function removes spaces at the beginning and the end of the string
+ * pointed to by the "p" argument.
+ *------------------------------------------------------------------------*/
+void
+usb_trim_spaces(char *p)
+{
+ char *q;
+ char *e;
+
+ if (p == NULL)
+ return;
+ q = e = p;
+ while (*q == ' ') /* skip leading spaces */
+ q++;
+ while ((*p = *q++)) /* copy string */
+ if (*p++ != ' ') /* remember last non-space */
+ e = p;
+ *e = 0; /* kill trailing spaces */
+}
+
+/*------------------------------------------------------------------------*
+ * usb_make_str_desc - convert an ASCII string into a UNICODE string
+ *------------------------------------------------------------------------*/
+uint8_t
+usb_make_str_desc(void *ptr, uint16_t max_len, const char *s)
+{
+ struct usb_string_descriptor *p = ptr;
+ uint8_t totlen;
+ int j;
+
+ if (max_len < 2) {
+ /* invalid length */
+ return (0);
+ }
+ max_len = ((max_len / 2) - 1);
+
+ j = strlen(s);
+
+ if (j < 0) {
+ j = 0;
+ }
+ if (j > 126) {
+ j = 126;
+ }
+ if (max_len > j) {
+ max_len = j;
+ }
+ totlen = (max_len + 1) * 2;
+
+ p->bLength = totlen;
+ p->bDescriptorType = UDESC_STRING;
+
+ while (max_len--) {
+ USETW2(p->bString[max_len], 0, s[max_len]);
+ }
+ return (totlen);
+}
diff --git a/rtems/freebsd/dev/usb/usb_util.h b/rtems/freebsd/dev/usb/usb_util.h
new file mode 100644
index 00000000..9e001088
--- /dev/null
+++ b/rtems/freebsd/dev/usb/usb_util.h
@@ -0,0 +1,35 @@
+/* $FreeBSD$ */
+/*-
+ * Copyright (c) 2008 Hans Petter Selasky. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifndef _USB_UTIL_HH_
+#define _USB_UTIL_HH_
+
+int device_delete_all_children(device_t dev);
+uint8_t usb_make_str_desc(void *ptr, uint16_t max_len, const char *s);
+void usb_printbcd(char *p, uint16_t p_len, uint16_t bcd);
+void usb_trim_spaces(char *p);
+
+#endif /* _USB_UTIL_HH_ */
diff --git a/rtems/freebsd/dev/usb/usbdi.h b/rtems/freebsd/dev/usb/usbdi.h
new file mode 100644
index 00000000..43421019
--- /dev/null
+++ b/rtems/freebsd/dev/usb/usbdi.h
@@ -0,0 +1,562 @@
+/*-
+ * Copyright (c) 2009 Andrew Thompson
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+#ifndef _USB_USBDI_HH_
+#define _USB_USBDI_HH_
+
+struct usb_fifo;
+struct usb_xfer;
+struct usb_device;
+struct usb_attach_arg;
+struct usb_interface;
+struct usb_endpoint;
+struct usb_page_cache;
+struct usb_page_search;
+struct usb_process;
+struct usb_proc_msg;
+struct usb_mbuf;
+struct mbuf;
+
+typedef enum { /* keep in sync with usb_errstr_table */
+ USB_ERR_NORMAL_COMPLETION = 0,
+ USB_ERR_PENDING_REQUESTS, /* 1 */
+ USB_ERR_NOT_STARTED, /* 2 */
+ USB_ERR_INVAL, /* 3 */
+ USB_ERR_NOMEM, /* 4 */
+ USB_ERR_CANCELLED, /* 5 */
+ USB_ERR_BAD_ADDRESS, /* 6 */
+ USB_ERR_BAD_BUFSIZE, /* 7 */
+ USB_ERR_BAD_FLAG, /* 8 */
+ USB_ERR_NO_CALLBACK, /* 9 */
+ USB_ERR_IN_USE, /* 10 */
+ USB_ERR_NO_ADDR, /* 11 */
+ USB_ERR_NO_PIPE, /* 12 */
+ USB_ERR_ZERO_NFRAMES, /* 13 */
+ USB_ERR_ZERO_MAXP, /* 14 */
+ USB_ERR_SET_ADDR_FAILED, /* 15 */
+ USB_ERR_NO_POWER, /* 16 */
+ USB_ERR_TOO_DEEP, /* 17 */
+ USB_ERR_IOERROR, /* 18 */
+ USB_ERR_NOT_CONFIGURED, /* 19 */
+ USB_ERR_TIMEOUT, /* 20 */
+ USB_ERR_SHORT_XFER, /* 21 */
+ USB_ERR_STALLED, /* 22 */
+ USB_ERR_INTERRUPTED, /* 23 */
+ USB_ERR_DMA_LOAD_FAILED, /* 24 */
+ USB_ERR_BAD_CONTEXT, /* 25 */
+ USB_ERR_NO_ROOT_HUB, /* 26 */
+ USB_ERR_NO_INTR_THREAD, /* 27 */
+ USB_ERR_NOT_LOCKED, /* 28 */
+ USB_ERR_MAX
+} usb_error_t;
+
+/*
+ * Flags for transfers
+ */
+#define USB_FORCE_SHORT_XFER 0x0001 /* force a short transmit last */
+#define USB_SHORT_XFER_OK 0x0004 /* allow short reads */
+#define USB_DELAY_STATUS_STAGE 0x0010 /* insert delay before STATUS stage */
+#define USB_USER_DATA_PTR 0x0020 /* internal flag */
+#define USB_MULTI_SHORT_OK 0x0040 /* allow multiple short frames */
+#define USB_MANUAL_STATUS 0x0080 /* manual ctrl status */
+
+#define USB_NO_TIMEOUT 0
+#define USB_DEFAULT_TIMEOUT 5000 /* 5000 ms = 5 seconds */
+
+#if defined(_KERNEL)
+/* typedefs */
+
+typedef void (usb_callback_t)(struct usb_xfer *, usb_error_t);
+typedef void (usb_proc_callback_t)(struct usb_proc_msg *);
+typedef usb_error_t (usb_handle_req_t)(struct usb_device *,
+ struct usb_device_request *, const void **, uint16_t *);
+
+typedef int (usb_fifo_open_t)(struct usb_fifo *fifo, int fflags);
+typedef void (usb_fifo_close_t)(struct usb_fifo *fifo, int fflags);
+typedef int (usb_fifo_ioctl_t)(struct usb_fifo *fifo, u_long cmd, void *addr, int fflags);
+typedef void (usb_fifo_cmd_t)(struct usb_fifo *fifo);
+typedef void (usb_fifo_filter_t)(struct usb_fifo *fifo, struct usb_mbuf *m);
+
+
+/* USB events */
+#include <rtems/freebsd/sys/eventhandler.h>
+typedef void (*usb_dev_configured_t)(void *, struct usb_device *,
+ struct usb_attach_arg *);
+EVENTHANDLER_DECLARE(usb_dev_configured, usb_dev_configured_t);
+
+/*
+ * The following macros are used used to convert milliseconds into
+ * HZ. We use 1024 instead of 1000 milliseconds per second to save a
+ * full division.
+ */
+#define USB_MS_HZ 1024
+
+#define USB_MS_TO_TICKS(ms) \
+ (((uint32_t)((((uint32_t)(ms)) * ((uint32_t)(hz))) + USB_MS_HZ - 1)) / USB_MS_HZ)
+
+/*
+ * Common queue structure for USB transfers.
+ */
+struct usb_xfer_queue {
+ TAILQ_HEAD(, usb_xfer) head;
+ struct usb_xfer *curr; /* current USB transfer processed */
+ void (*command) (struct usb_xfer_queue *pq);
+ uint8_t recurse_1:1;
+ uint8_t recurse_2:1;
+};
+
+/*
+ * The following structure defines an USB endpoint
+ * USB endpoint.
+ */
+struct usb_endpoint {
+ struct usb_xfer_queue endpoint_q; /* queue of USB transfers */
+
+ struct usb_endpoint_descriptor *edesc;
+ struct usb_endpoint_ss_comp_descriptor *ecomp;
+ struct usb_pipe_methods *methods; /* set by HC driver */
+
+ uint16_t isoc_next;
+
+ uint8_t toggle_next:1; /* next data toggle value */
+ uint8_t is_stalled:1; /* set if endpoint is stalled */
+ uint8_t is_synced:1; /* set if we a synchronised */
+ uint8_t unused:5;
+ uint8_t iface_index; /* not used by "default endpoint" */
+
+ uint8_t refcount_alloc; /* allocation refcount */
+ uint8_t refcount_bw; /* bandwidth refcount */
+#define USB_EP_REF_MAX 0x3f
+
+ /* High-Speed resource allocation (valid if "refcount_bw" > 0) */
+
+ uint8_t usb_smask; /* USB start mask */
+ uint8_t usb_cmask; /* USB complete mask */
+ uint8_t usb_uframe; /* USB microframe */
+};
+
+/*
+ * The following structure defines an USB interface.
+ */
+struct usb_interface {
+ struct usb_interface_descriptor *idesc;
+ device_t subdev;
+ uint8_t alt_index;
+ uint8_t parent_iface_index;
+
+ /* Linux compat */
+ struct usb_host_interface *altsetting;
+ struct usb_host_interface *cur_altsetting;
+ struct usb_device *linux_udev;
+ void *bsd_priv_sc; /* device specific information */
+ uint8_t num_altsetting; /* number of alternate settings */
+ uint8_t bsd_iface_index;
+};
+
+/*
+ * The following structure defines a set of USB transfer flags.
+ */
+struct usb_xfer_flags {
+ uint8_t force_short_xfer:1; /* force a short transmit transfer
+ * last */
+ uint8_t short_xfer_ok:1; /* allow short receive transfers */
+ uint8_t short_frames_ok:1; /* allow short frames */
+ uint8_t pipe_bof:1; /* block pipe on failure */
+ uint8_t proxy_buffer:1; /* makes buffer size a factor of
+ * "max_frame_size" */
+ uint8_t ext_buffer:1; /* uses external DMA buffer */
+ uint8_t manual_status:1; /* non automatic status stage on
+ * control transfers */
+ uint8_t no_pipe_ok:1; /* set if "USB_ERR_NO_PIPE" error can
+ * be ignored */
+ uint8_t stall_pipe:1; /* set if the endpoint belonging to
+ * this USB transfer should be stalled
+ * before starting this transfer! */
+};
+
+/*
+ * The following structure define an USB configuration, that basically
+ * is used when setting up an USB transfer.
+ */
+struct usb_config {
+ usb_callback_t *callback; /* USB transfer callback */
+ usb_frlength_t bufsize; /* total pipe buffer size in bytes */
+ usb_frcount_t frames; /* maximum number of USB frames */
+ usb_timeout_t interval; /* interval in milliseconds */
+#define USB_DEFAULT_INTERVAL 0
+ usb_timeout_t timeout; /* transfer timeout in milliseconds */
+ struct usb_xfer_flags flags; /* transfer flags */
+ enum usb_hc_mode usb_mode; /* host or device mode */
+ uint8_t type; /* pipe type */
+ uint8_t endpoint; /* pipe number */
+ uint8_t direction; /* pipe direction */
+ uint8_t ep_index; /* pipe index match to use */
+ uint8_t if_index; /* "ifaces" index to use */
+};
+
+/*
+ * The following structure is used when looking up an USB driver for
+ * an USB device. It is inspired by the Linux structure called
+ * "usb_device_id".
+ */
+struct usb_device_id {
+
+ /* Hook for driver specific information */
+ unsigned long driver_info;
+
+ /* Used for product specific matches; the BCD range is inclusive */
+ uint16_t idVendor;
+ uint16_t idProduct;
+ uint16_t bcdDevice_lo;
+ uint16_t bcdDevice_hi;
+
+ /* Used for device class matches */
+ uint8_t bDeviceClass;
+ uint8_t bDeviceSubClass;
+ uint8_t bDeviceProtocol;
+
+ /* Used for interface class matches */
+ uint8_t bInterfaceClass;
+ uint8_t bInterfaceSubClass;
+ uint8_t bInterfaceProtocol;
+
+ /* Select which fields to match against */
+ uint8_t match_flag_vendor:1;
+ uint8_t match_flag_product:1;
+ uint8_t match_flag_dev_lo:1;
+ uint8_t match_flag_dev_hi:1;
+ uint8_t match_flag_dev_class:1;
+ uint8_t match_flag_dev_subclass:1;
+ uint8_t match_flag_dev_protocol:1;
+ uint8_t match_flag_int_class:1;
+ uint8_t match_flag_int_subclass:1;
+ uint8_t match_flag_int_protocol:1;
+
+#if USB_HAVE_COMPAT_LINUX
+ /* which fields to match against */
+ uint16_t match_flags;
+#define USB_DEVICE_ID_MATCH_VENDOR 0x0001
+#define USB_DEVICE_ID_MATCH_PRODUCT 0x0002
+#define USB_DEVICE_ID_MATCH_DEV_LO 0x0004
+#define USB_DEVICE_ID_MATCH_DEV_HI 0x0008
+#define USB_DEVICE_ID_MATCH_DEV_CLASS 0x0010
+#define USB_DEVICE_ID_MATCH_DEV_SUBCLASS 0x0020
+#define USB_DEVICE_ID_MATCH_DEV_PROTOCOL 0x0040
+#define USB_DEVICE_ID_MATCH_INT_CLASS 0x0080
+#define USB_DEVICE_ID_MATCH_INT_SUBCLASS 0x0100
+#define USB_DEVICE_ID_MATCH_INT_PROTOCOL 0x0200
+#endif
+};
+
+#define USB_VENDOR(vend) \
+ .match_flag_vendor = 1, .idVendor = (vend)
+
+#define USB_PRODUCT(prod) \
+ .match_flag_product = 1, .idProduct = (prod)
+
+#define USB_VP(vend,prod) \
+ USB_VENDOR(vend), USB_PRODUCT(prod)
+
+#define USB_VPI(vend,prod,info) \
+ USB_VENDOR(vend), USB_PRODUCT(prod), USB_DRIVER_INFO(info)
+
+#define USB_DEV_BCD_GTEQ(lo) /* greater than or equal */ \
+ .match_flag_dev_lo = 1, .bcdDevice_lo = (lo)
+
+#define USB_DEV_BCD_LTEQ(hi) /* less than or equal */ \
+ .match_flag_dev_hi = 1, .bcdDevice_hi = (hi)
+
+#define USB_DEV_CLASS(dc) \
+ .match_flag_dev_class = 1, .bDeviceClass = (dc)
+
+#define USB_DEV_SUBCLASS(dsc) \
+ .match_flag_dev_subclass = 1, .bDeviceSubClass = (dsc)
+
+#define USB_DEV_PROTOCOL(dp) \
+ .match_flag_dev_protocol = 1, .bDeviceProtocol = (dp)
+
+#define USB_IFACE_CLASS(ic) \
+ .match_flag_int_class = 1, .bInterfaceClass = (ic)
+
+#define USB_IFACE_SUBCLASS(isc) \
+ .match_flag_int_subclass = 1, .bInterfaceSubClass = (isc)
+
+#define USB_IFACE_PROTOCOL(ip) \
+ .match_flag_int_protocol = 1, .bInterfaceProtocol = (ip)
+
+#define USB_IF_CSI(class,subclass,info) \
+ USB_IFACE_CLASS(class), USB_IFACE_SUBCLASS(subclass), USB_DRIVER_INFO(info)
+
+#define USB_DRIVER_INFO(n) \
+ .driver_info = (n)
+
+#define USB_GET_DRIVER_INFO(did) \
+ (did)->driver_info
+
+/*
+ * The following structure keeps information that is used to match
+ * against an array of "usb_device_id" elements.
+ */
+struct usbd_lookup_info {
+ uint16_t idVendor;
+ uint16_t idProduct;
+ uint16_t bcdDevice;
+ uint8_t bDeviceClass;
+ uint8_t bDeviceSubClass;
+ uint8_t bDeviceProtocol;
+ uint8_t bInterfaceClass;
+ uint8_t bInterfaceSubClass;
+ uint8_t bInterfaceProtocol;
+ uint8_t bIfaceIndex;
+ uint8_t bIfaceNum;
+ uint8_t bConfigIndex;
+ uint8_t bConfigNum;
+};
+
+/* Structure used by probe and attach */
+
+struct usb_attach_arg {
+ struct usbd_lookup_info info;
+ device_t temp_dev; /* for internal use */
+ unsigned long driver_info; /* for internal use */
+ void *driver_ivar;
+ struct usb_device *device; /* current device */
+ struct usb_interface *iface; /* current interface */
+ enum usb_hc_mode usb_mode; /* host or device mode */
+ uint8_t port;
+ uint8_t use_generic; /* hint for generic drivers */
+ uint8_t dev_state;
+#define UAA_DEV_READY 0
+#define UAA_DEV_DISABLED 1
+#define UAA_DEV_EJECTING 2
+};
+
+/*
+ * The following is a wrapper for the callout structure to ease
+ * porting the code to other platforms.
+ */
+struct usb_callout {
+ struct callout co;
+};
+#define usb_callout_init_mtx(c,m,f) callout_init_mtx(&(c)->co,m,f)
+#define usb_callout_reset(c,t,f,d) callout_reset(&(c)->co,t,f,d)
+#define usb_callout_stop(c) callout_stop(&(c)->co)
+#define usb_callout_drain(c) callout_drain(&(c)->co)
+#define usb_callout_pending(c) callout_pending(&(c)->co)
+
+/* USB transfer states */
+
+#define USB_ST_SETUP 0
+#define USB_ST_TRANSFERRED 1
+#define USB_ST_ERROR 2
+
+/* USB handle request states */
+#define USB_HR_NOT_COMPLETE 0
+#define USB_HR_COMPLETE_OK 1
+#define USB_HR_COMPLETE_ERR 2
+
+/*
+ * The following macro will return the current state of an USB
+ * transfer like defined by the "USB_ST_XXX" enums.
+ */
+#define USB_GET_STATE(xfer) (usbd_xfer_state(xfer))
+
+/*
+ * The following structure defines the USB process message header.
+ */
+struct usb_proc_msg {
+ TAILQ_ENTRY(usb_proc_msg) pm_qentry;
+ usb_proc_callback_t *pm_callback;
+ usb_size_t pm_num;
+};
+
+#define USB_FIFO_TX 0
+#define USB_FIFO_RX 1
+
+/*
+ * Locking note for the following functions. All the
+ * "usb_fifo_cmd_t" and "usb_fifo_filter_t" functions are called
+ * locked. The others are called unlocked.
+ */
+struct usb_fifo_methods {
+ usb_fifo_open_t *f_open;
+ usb_fifo_close_t *f_close;
+ usb_fifo_ioctl_t *f_ioctl;
+ /*
+ * NOTE: The post-ioctl callback is called after the USB reference
+ * gets locked in the IOCTL handler:
+ */
+ usb_fifo_ioctl_t *f_ioctl_post;
+ usb_fifo_cmd_t *f_start_read;
+ usb_fifo_cmd_t *f_stop_read;
+ usb_fifo_cmd_t *f_start_write;
+ usb_fifo_cmd_t *f_stop_write;
+ usb_fifo_filter_t *f_filter_read;
+ usb_fifo_filter_t *f_filter_write;
+ const char *basename[4];
+ const char *postfix[4];
+};
+
+struct usb_fifo_sc {
+ struct usb_fifo *fp[2];
+ struct cdev* dev;
+};
+
+const char *usbd_errstr(usb_error_t error);
+void *usbd_find_descriptor(struct usb_device *udev, void *id,
+ uint8_t iface_index, uint8_t type, uint8_t type_mask,
+ uint8_t subtype, uint8_t subtype_mask);
+struct usb_config_descriptor *usbd_get_config_descriptor(
+ struct usb_device *udev);
+struct usb_device_descriptor *usbd_get_device_descriptor(
+ struct usb_device *udev);
+struct usb_interface *usbd_get_iface(struct usb_device *udev,
+ uint8_t iface_index);
+struct usb_interface_descriptor *usbd_get_interface_descriptor(
+ struct usb_interface *iface);
+struct usb_endpoint *usbd_get_endpoint(struct usb_device *udev, uint8_t iface_index,
+ const struct usb_config *setup);
+struct usb_endpoint *usbd_get_ep_by_addr(struct usb_device *udev, uint8_t ea_val);
+usb_error_t usbd_interface_count(struct usb_device *udev, uint8_t *count);
+enum usb_hc_mode usbd_get_mode(struct usb_device *udev);
+enum usb_dev_speed usbd_get_speed(struct usb_device *udev);
+void device_set_usb_desc(device_t dev);
+void usb_pause_mtx(struct mtx *mtx, int _ticks);
+
+const struct usb_device_id *usbd_lookup_id_by_info(
+ const struct usb_device_id *id, usb_size_t sizeof_id,
+ const struct usbd_lookup_info *info);
+int usbd_lookup_id_by_uaa(const struct usb_device_id *id,
+ usb_size_t sizeof_id, struct usb_attach_arg *uaa);
+
+usb_error_t usbd_do_request_flags(struct usb_device *udev, struct mtx *mtx,
+ struct usb_device_request *req, void *data, uint16_t flags,
+ uint16_t *actlen, usb_timeout_t timeout);
+#define usbd_do_request(u,m,r,d) \
+ usbd_do_request_flags(u,m,r,d,0,NULL,USB_DEFAULT_TIMEOUT)
+
+uint8_t usbd_clear_stall_callback(struct usb_xfer *xfer1,
+ struct usb_xfer *xfer2);
+uint8_t usbd_get_interface_altindex(struct usb_interface *iface);
+usb_error_t usbd_set_alt_interface_index(struct usb_device *udev,
+ uint8_t iface_index, uint8_t alt_index);
+uint32_t usbd_get_isoc_fps(struct usb_device *udev);
+usb_error_t usbd_transfer_setup(struct usb_device *udev,
+ const uint8_t *ifaces, struct usb_xfer **pxfer,
+ const struct usb_config *setup_start, uint16_t n_setup,
+ void *priv_sc, struct mtx *priv_mtx);
+void usbd_transfer_submit(struct usb_xfer *xfer);
+void usbd_transfer_clear_stall(struct usb_xfer *xfer);
+void usbd_transfer_drain(struct usb_xfer *xfer);
+uint8_t usbd_transfer_pending(struct usb_xfer *xfer);
+void usbd_transfer_start(struct usb_xfer *xfer);
+void usbd_transfer_stop(struct usb_xfer *xfer);
+void usbd_transfer_unsetup(struct usb_xfer **pxfer, uint16_t n_setup);
+void usbd_transfer_poll(struct usb_xfer **ppxfer, uint16_t max);
+void usbd_set_parent_iface(struct usb_device *udev, uint8_t iface_index,
+ uint8_t parent_index);
+uint8_t usbd_get_bus_index(struct usb_device *udev);
+uint8_t usbd_get_device_index(struct usb_device *udev);
+void usbd_set_power_mode(struct usb_device *udev, uint8_t power_mode);
+uint8_t usbd_filter_power_mode(struct usb_device *udev, uint8_t power_mode);
+uint8_t usbd_device_attached(struct usb_device *udev);
+
+void usbd_xfer_status(struct usb_xfer *xfer, int *actlen, int *sumlen,
+ int *aframes, int *nframes);
+struct usb_page_cache *usbd_xfer_get_frame(struct usb_xfer *xfer,
+ usb_frcount_t frindex);
+void *usbd_xfer_softc(struct usb_xfer *xfer);
+void *usbd_xfer_get_priv(struct usb_xfer *xfer);
+void usbd_xfer_set_priv(struct usb_xfer *xfer, void *);
+void usbd_xfer_set_interval(struct usb_xfer *xfer, int);
+uint8_t usbd_xfer_state(struct usb_xfer *xfer);
+void usbd_xfer_set_frame_data(struct usb_xfer *xfer, usb_frcount_t frindex,
+ void *ptr, usb_frlength_t len);
+void usbd_xfer_frame_data(struct usb_xfer *xfer, usb_frcount_t frindex,
+ void **ptr, int *len);
+void usbd_xfer_set_frame_offset(struct usb_xfer *xfer, usb_frlength_t offset,
+ usb_frcount_t frindex);
+usb_frlength_t usbd_xfer_max_len(struct usb_xfer *xfer);
+usb_frlength_t usbd_xfer_max_framelen(struct usb_xfer *xfer);
+usb_frcount_t usbd_xfer_max_frames(struct usb_xfer *xfer);
+uint8_t usbd_xfer_get_fps_shift(struct usb_xfer *xfer);
+usb_frlength_t usbd_xfer_frame_len(struct usb_xfer *xfer,
+ usb_frcount_t frindex);
+void usbd_xfer_set_frame_len(struct usb_xfer *xfer, usb_frcount_t frindex,
+ usb_frlength_t len);
+void usbd_xfer_set_timeout(struct usb_xfer *xfer, int timeout);
+void usbd_xfer_set_frames(struct usb_xfer *xfer, usb_frcount_t n);
+void usbd_xfer_set_stall(struct usb_xfer *xfer);
+int usbd_xfer_is_stalled(struct usb_xfer *xfer);
+void usbd_xfer_set_flag(struct usb_xfer *xfer, int flag);
+void usbd_xfer_clr_flag(struct usb_xfer *xfer, int flag);
+uint16_t usbd_xfer_get_timestamp(struct usb_xfer *xfer);
+
+void usbd_copy_in(struct usb_page_cache *cache, usb_frlength_t offset,
+ const void *ptr, usb_frlength_t len);
+int usbd_copy_in_user(struct usb_page_cache *cache, usb_frlength_t offset,
+ const void *ptr, usb_frlength_t len);
+void usbd_copy_out(struct usb_page_cache *cache, usb_frlength_t offset,
+ void *ptr, usb_frlength_t len);
+int usbd_copy_out_user(struct usb_page_cache *cache, usb_frlength_t offset,
+ void *ptr, usb_frlength_t len);
+void usbd_get_page(struct usb_page_cache *pc, usb_frlength_t offset,
+ struct usb_page_search *res);
+void usbd_m_copy_in(struct usb_page_cache *cache, usb_frlength_t dst_offset,
+ struct mbuf *m, usb_size_t src_offset, usb_frlength_t src_len);
+void usbd_frame_zero(struct usb_page_cache *cache, usb_frlength_t offset,
+ usb_frlength_t len);
+
+int usb_fifo_attach(struct usb_device *udev, void *priv_sc,
+ struct mtx *priv_mtx, struct usb_fifo_methods *pm,
+ struct usb_fifo_sc *f_sc, uint16_t unit, uint16_t subunit,
+ uint8_t iface_index, uid_t uid, gid_t gid, int mode);
+void usb_fifo_detach(struct usb_fifo_sc *f_sc);
+int usb_fifo_alloc_buffer(struct usb_fifo *f, uint32_t bufsize,
+ uint16_t nbuf);
+void usb_fifo_free_buffer(struct usb_fifo *f);
+uint32_t usb_fifo_put_bytes_max(struct usb_fifo *fifo);
+void usb_fifo_put_data(struct usb_fifo *fifo, struct usb_page_cache *pc,
+ usb_frlength_t offset, usb_frlength_t len, uint8_t what);
+void usb_fifo_put_data_linear(struct usb_fifo *fifo, void *ptr,
+ usb_size_t len, uint8_t what);
+uint8_t usb_fifo_put_data_buffer(struct usb_fifo *f, void *ptr, usb_size_t len);
+void usb_fifo_put_data_error(struct usb_fifo *fifo);
+uint8_t usb_fifo_get_data(struct usb_fifo *fifo, struct usb_page_cache *pc,
+ usb_frlength_t offset, usb_frlength_t len, usb_frlength_t *actlen,
+ uint8_t what);
+uint8_t usb_fifo_get_data_linear(struct usb_fifo *fifo, void *ptr,
+ usb_size_t len, usb_size_t *actlen, uint8_t what);
+uint8_t usb_fifo_get_data_buffer(struct usb_fifo *f, void **pptr,
+ usb_size_t *plen);
+void usb_fifo_reset(struct usb_fifo *f);
+void usb_fifo_wakeup(struct usb_fifo *f);
+void usb_fifo_get_data_error(struct usb_fifo *fifo);
+void *usb_fifo_softc(struct usb_fifo *fifo);
+void usb_fifo_set_close_zlp(struct usb_fifo *, uint8_t);
+void usb_fifo_set_write_defrag(struct usb_fifo *, uint8_t);
+void usb_fifo_free(struct usb_fifo *f);
+#endif /* _KERNEL */
+#endif /* _USB_USBDI_HH_ */
diff --git a/rtems/freebsd/dev/usb/usbdi_util.h b/rtems/freebsd/dev/usb/usbdi_util.h
new file mode 100644
index 00000000..32931e52
--- /dev/null
+++ b/rtems/freebsd/dev/usb/usbdi_util.h
@@ -0,0 +1,91 @@
+/*-
+ * Copyright (c) 2009 Andrew Thompson
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+#ifndef _USB_USBDI_UTIL_HH_
+#define _USB_USBDI_UTIL_HH_
+
+struct cv;
+
+/* structures */
+
+struct usb_idesc_parse_state {
+ struct usb_descriptor *desc;
+ uint8_t iface_index; /* current interface index */
+ uint8_t iface_no_last;
+ uint8_t iface_index_alt; /* current alternate setting */
+};
+
+/* prototypes */
+
+usb_error_t usbd_do_request_proc(struct usb_device *udev, struct usb_process *pproc,
+ struct usb_device_request *req, void *data, uint16_t flags,
+ uint16_t *actlen, usb_timeout_t timeout);
+
+struct usb_descriptor *usb_desc_foreach(struct usb_config_descriptor *cd,
+ struct usb_descriptor *desc);
+struct usb_interface_descriptor *usb_idesc_foreach(
+ struct usb_config_descriptor *cd,
+ struct usb_idesc_parse_state *ps);
+struct usb_endpoint_descriptor *usb_edesc_foreach(
+ struct usb_config_descriptor *cd,
+ struct usb_endpoint_descriptor *ped);
+struct usb_endpoint_ss_comp_descriptor *usb_ed_comp_foreach(
+ struct usb_config_descriptor *cd,
+ struct usb_endpoint_ss_comp_descriptor *ped);
+uint8_t usbd_get_no_descriptors(struct usb_config_descriptor *cd,
+ uint8_t type);
+uint8_t usbd_get_no_alts(struct usb_config_descriptor *cd,
+ struct usb_interface_descriptor *id);
+
+usb_error_t usbd_req_get_report(struct usb_device *udev, struct mtx *mtx,
+ void *data, uint16_t len, uint8_t iface_index, uint8_t type,
+ uint8_t id);
+usb_error_t usbd_req_get_report_descriptor(struct usb_device *udev,
+ struct mtx *mtx, void *d, uint16_t size,
+ uint8_t iface_index);
+usb_error_t usbd_req_get_string_any(struct usb_device *udev, struct mtx *mtx,
+ char *buf, uint16_t len, uint8_t string_index);
+usb_error_t usbd_req_get_string_desc(struct usb_device *udev, struct mtx *mtx,
+ void *sdesc, uint16_t max_len, uint16_t lang_id,
+ uint8_t string_index);
+usb_error_t usbd_req_set_config(struct usb_device *udev, struct mtx *mtx,
+ uint8_t conf);
+usb_error_t usbd_req_set_alt_interface_no(struct usb_device *udev,
+ struct mtx *mtx, uint8_t iface_index, uint8_t alt_no);
+usb_error_t usbd_req_set_idle(struct usb_device *udev, struct mtx *mtx,
+ uint8_t iface_index, uint8_t duration, uint8_t id);
+usb_error_t usbd_req_set_protocol(struct usb_device *udev, struct mtx *mtx,
+ uint8_t iface_index, uint16_t report);
+usb_error_t usbd_req_set_report(struct usb_device *udev, struct mtx *mtx,
+ void *data, uint16_t len, uint8_t iface_index,
+ uint8_t type, uint8_t id);
+
+/* The following functions will not return NULL strings. */
+
+const char *usb_get_manufacturer(struct usb_device *);
+const char *usb_get_product(struct usb_device *);
+const char *usb_get_serial(struct usb_device *);
+
+#endif /* _USB_USBDI_UTIL_HH_ */
diff --git a/rtems/freebsd/dev/usb/usbhid.h b/rtems/freebsd/dev/usb/usbhid.h
new file mode 100644
index 00000000..af032322
--- /dev/null
+++ b/rtems/freebsd/dev/usb/usbhid.h
@@ -0,0 +1,244 @@
+/* $FreeBSD$ */
+/*-
+ * Copyright (c) 2008 Hans Petter Selasky. All rights reserved.
+ * Copyright (c) 1998 The NetBSD Foundation, Inc. All rights reserved.
+ * Copyright (c) 1998 Lennart Augustsson. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifndef _USB_HID_HH_
+#define _USB_HID_HH_
+
+#include <rtems/freebsd/dev/usb/usb_endian.h>
+
+#define UR_GET_HID_DESCRIPTOR 0x06
+#define UDESC_HID 0x21
+#define UDESC_REPORT 0x22
+#define UDESC_PHYSICAL 0x23
+#define UR_SET_HID_DESCRIPTOR 0x07
+#define UR_GET_REPORT 0x01
+#define UR_SET_REPORT 0x09
+#define UR_GET_IDLE 0x02
+#define UR_SET_IDLE 0x0a
+#define UR_GET_PROTOCOL 0x03
+#define UR_SET_PROTOCOL 0x0b
+
+struct usb_hid_descriptor {
+ uByte bLength;
+ uByte bDescriptorType;
+ uWord bcdHID;
+ uByte bCountryCode;
+ uByte bNumDescriptors;
+ struct {
+ uByte bDescriptorType;
+ uWord wDescriptorLength;
+ } descrs[1];
+} __packed;
+
+#define USB_HID_DESCRIPTOR_SIZE(n) (9+((n)*3))
+
+/* Usage pages */
+#define HUP_UNDEFINED 0x0000
+#define HUP_GENERIC_DESKTOP 0x0001
+#define HUP_SIMULATION 0x0002
+#define HUP_VR_CONTROLS 0x0003
+#define HUP_SPORTS_CONTROLS 0x0004
+#define HUP_GAMING_CONTROLS 0x0005
+#define HUP_KEYBOARD 0x0007
+#define HUP_LEDS 0x0008
+#define HUP_BUTTON 0x0009
+#define HUP_ORDINALS 0x000a
+#define HUP_TELEPHONY 0x000b
+#define HUP_CONSUMER 0x000c
+#define HUP_DIGITIZERS 0x000d
+#define HUP_PHYSICAL_IFACE 0x000e
+#define HUP_UNICODE 0x0010
+#define HUP_ALPHANUM_DISPLAY 0x0014
+#define HUP_MONITOR 0x0080
+#define HUP_MONITOR_ENUM_VAL 0x0081
+#define HUP_VESA_VC 0x0082
+#define HUP_VESA_CMD 0x0083
+#define HUP_POWER 0x0084
+#define HUP_BATTERY_SYSTEM 0x0085
+#define HUP_BARCODE_SCANNER 0x008b
+#define HUP_SCALE 0x008c
+#define HUP_CAMERA_CONTROL 0x0090
+#define HUP_ARCADE 0x0091
+#define HUP_MICROSOFT 0xff00
+
+/* Usages, generic desktop */
+#define HUG_POINTER 0x0001
+#define HUG_MOUSE 0x0002
+#define HUG_JOYSTICK 0x0004
+#define HUG_GAME_PAD 0x0005
+#define HUG_KEYBOARD 0x0006
+#define HUG_KEYPAD 0x0007
+#define HUG_X 0x0030
+#define HUG_Y 0x0031
+#define HUG_Z 0x0032
+#define HUG_RX 0x0033
+#define HUG_RY 0x0034
+#define HUG_RZ 0x0035
+#define HUG_SLIDER 0x0036
+#define HUG_DIAL 0x0037
+#define HUG_WHEEL 0x0038
+#define HUG_HAT_SWITCH 0x0039
+#define HUG_COUNTED_BUFFER 0x003a
+#define HUG_BYTE_COUNT 0x003b
+#define HUG_MOTION_WAKEUP 0x003c
+#define HUG_VX 0x0040
+#define HUG_VY 0x0041
+#define HUG_VZ 0x0042
+#define HUG_VBRX 0x0043
+#define HUG_VBRY 0x0044
+#define HUG_VBRZ 0x0045
+#define HUG_VNO 0x0046
+#define HUG_TWHEEL 0x0048 /* M$ Wireless Intellimouse Wheel */
+#define HUG_SYSTEM_CONTROL 0x0080
+#define HUG_SYSTEM_POWER_DOWN 0x0081
+#define HUG_SYSTEM_SLEEP 0x0082
+#define HUG_SYSTEM_WAKEUP 0x0083
+#define HUG_SYSTEM_CONTEXT_MENU 0x0084
+#define HUG_SYSTEM_MAIN_MENU 0x0085
+#define HUG_SYSTEM_APP_MENU 0x0086
+#define HUG_SYSTEM_MENU_HELP 0x0087
+#define HUG_SYSTEM_MENU_EXIT 0x0088
+#define HUG_SYSTEM_MENU_SELECT 0x0089
+#define HUG_SYSTEM_MENU_RIGHT 0x008a
+#define HUG_SYSTEM_MENU_LEFT 0x008b
+#define HUG_SYSTEM_MENU_UP 0x008c
+#define HUG_SYSTEM_MENU_DOWN 0x008d
+#define HUG_APPLE_EJECT 0x00b8
+
+/* Usages Digitizers */
+#define HUD_UNDEFINED 0x0000
+#define HUD_TIP_PRESSURE 0x0030
+#define HUD_BARREL_PRESSURE 0x0031
+#define HUD_IN_RANGE 0x0032
+#define HUD_TOUCH 0x0033
+#define HUD_UNTOUCH 0x0034
+#define HUD_TAP 0x0035
+#define HUD_QUALITY 0x0036
+#define HUD_DATA_VALID 0x0037
+#define HUD_TRANSDUCER_INDEX 0x0038
+#define HUD_TABLET_FKEYS 0x0039
+#define HUD_PROGRAM_CHANGE_KEYS 0x003a
+#define HUD_BATTERY_STRENGTH 0x003b
+#define HUD_INVERT 0x003c
+#define HUD_X_TILT 0x003d
+#define HUD_Y_TILT 0x003e
+#define HUD_AZIMUTH 0x003f
+#define HUD_ALTITUDE 0x0040
+#define HUD_TWIST 0x0041
+#define HUD_TIP_SWITCH 0x0042
+#define HUD_SEC_TIP_SWITCH 0x0043
+#define HUD_BARREL_SWITCH 0x0044
+#define HUD_ERASER 0x0045
+#define HUD_TABLET_PICK 0x0046
+
+/* Usages, Consumer */
+#define HUC_AC_PAN 0x0238
+
+#define HID_USAGE2(p,u) (((p) << 16) | (u))
+
+#define UHID_INPUT_REPORT 0x01
+#define UHID_OUTPUT_REPORT 0x02
+#define UHID_FEATURE_REPORT 0x03
+
+/* Bits in the input/output/feature items */
+#define HIO_CONST 0x001
+#define HIO_VARIABLE 0x002
+#define HIO_RELATIVE 0x004
+#define HIO_WRAP 0x008
+#define HIO_NONLINEAR 0x010
+#define HIO_NOPREF 0x020
+#define HIO_NULLSTATE 0x040
+#define HIO_VOLATILE 0x080
+#define HIO_BUFBYTES 0x100
+
+#ifdef _KERNEL
+struct usb_config_descriptor;
+
+enum hid_kind {
+ hid_input, hid_output, hid_feature, hid_collection, hid_endcollection
+};
+
+struct hid_location {
+ uint32_t size;
+ uint32_t count;
+ uint32_t pos;
+};
+
+struct hid_item {
+ /* Global */
+ int32_t _usage_page;
+ int32_t logical_minimum;
+ int32_t logical_maximum;
+ int32_t physical_minimum;
+ int32_t physical_maximum;
+ int32_t unit_exponent;
+ int32_t unit;
+ int32_t report_ID;
+ /* Local */
+ int32_t usage;
+ int32_t usage_minimum;
+ int32_t usage_maximum;
+ int32_t designator_index;
+ int32_t designator_minimum;
+ int32_t designator_maximum;
+ int32_t string_index;
+ int32_t string_minimum;
+ int32_t string_maximum;
+ int32_t set_delimiter;
+ /* Misc */
+ int32_t collection;
+ int collevel;
+ enum hid_kind kind;
+ uint32_t flags;
+ /* Location */
+ struct hid_location loc;
+};
+
+/* prototypes from "usb_hid.c" */
+
+struct hid_data *hid_start_parse(const void *d, usb_size_t len, int kindset);
+void hid_end_parse(struct hid_data *s);
+int hid_get_item(struct hid_data *s, struct hid_item *h);
+int hid_report_size(const void *buf, usb_size_t len, enum hid_kind k,
+ uint8_t *id);
+int hid_locate(const void *desc, usb_size_t size, uint32_t usage,
+ enum hid_kind kind, uint8_t index, struct hid_location *loc,
+ uint32_t *flags, uint8_t *id);
+int32_t hid_get_data(const uint8_t *buf, usb_size_t len,
+ struct hid_location *loc);
+uint32_t hid_get_data_unsigned(const uint8_t *buf, usb_size_t len,
+ struct hid_location *loc);
+int hid_is_collection(const void *desc, usb_size_t size, uint32_t usage);
+struct usb_hid_descriptor *hid_get_descriptor_from_usb(
+ struct usb_config_descriptor *cd,
+ struct usb_interface_descriptor *id);
+usb_error_t usbd_req_get_hid_desc(struct usb_device *udev, struct mtx *mtx,
+ void **descp, uint16_t *sizep, struct malloc_type *mem,
+ uint8_t iface_index);
+#endif /* _KERNEL */
+#endif /* _USB_HID_HH_ */
diff --git a/rtems/freebsd/fs/devfs/devfs_int.h b/rtems/freebsd/fs/devfs/devfs_int.h
new file mode 100644
index 00000000..66a3863c
--- /dev/null
+++ b/rtems/freebsd/fs/devfs/devfs_int.h
@@ -0,0 +1,90 @@
+/*-
+ * Copyright (c) 2005 Poul-Henning Kamp. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+/*
+ * This file documents a private interface and it SHALL only be used
+ * by kern/kern_conf.c and fs/devfs/...
+ */
+
+#ifndef _FS_DEVFS_DEVFS_INT_HH_
+#define _FS_DEVFS_DEVFS_INT_HH_
+
+#include <rtems/freebsd/sys/queue.h>
+
+#ifdef _KERNEL
+
+struct devfs_dirent;
+
+struct cdev_privdata {
+ struct file *cdpd_fp;
+ void *cdpd_data;
+ void (*cdpd_dtr)(void *);
+ LIST_ENTRY(cdev_privdata) cdpd_list;
+};
+
+struct cdev_priv {
+ struct cdev cdp_c;
+ TAILQ_ENTRY(cdev_priv) cdp_list;
+
+ u_int cdp_inode;
+
+ u_int cdp_flags;
+#define CDP_ACTIVE (1 << 0)
+#define CDP_SCHED_DTR (1 << 1)
+
+#ifndef __rtems__
+ u_int cdp_inuse;
+ u_int cdp_maxdirent;
+ struct devfs_dirent **cdp_dirents;
+ struct devfs_dirent *cdp_dirent0;
+
+ TAILQ_ENTRY(cdev_priv) cdp_dtr_list;
+ void (*cdp_dtr_cb)(void *);
+ void *cdp_dtr_cb_arg;
+
+ LIST_HEAD(, cdev_privdata) cdp_fdpriv;
+#endif /* __rtems__ */
+};
+
+#define cdev2priv(c) member2struct(cdev_priv, cdp_c, c)
+
+struct cdev *devfs_alloc(int);
+void devfs_free(struct cdev *);
+void devfs_create(struct cdev *dev);
+void devfs_destroy(struct cdev *dev);
+void devfs_destroy_cdevpriv(struct cdev_privdata *p);
+
+extern struct unrhdr *devfs_inos;
+extern struct mtx devmtx;
+extern struct mtx devfs_de_interlock;
+extern struct sx clone_drain_lock;
+extern struct mtx cdevpriv_mtx;
+extern TAILQ_HEAD(cdev_priv_list, cdev_priv) cdevp_list;
+
+#endif /* _KERNEL */
+
+#endif /* !_FS_DEVFS_DEVFS_INT_HH_ */
diff --git a/rtems/freebsd/geom/geom_disk.h b/rtems/freebsd/geom/geom_disk.h
new file mode 100644
index 00000000..936ffd88
--- /dev/null
+++ b/rtems/freebsd/geom/geom_disk.h
@@ -0,0 +1 @@
+/* EMPTY */
diff --git a/rtems/freebsd/kern/init_main.c b/rtems/freebsd/kern/init_main.c
new file mode 100644
index 00000000..1a855d30
--- /dev/null
+++ b/rtems/freebsd/kern/init_main.c
@@ -0,0 +1,877 @@
+#include <rtems/freebsd/machine/rtems-bsd-config.h>
+
+/*-
+ * Copyright (c) 1995 Terrence R. Lambert
+ * All rights reserved.
+ *
+ * Copyright (c) 1982, 1986, 1989, 1991, 1992, 1993
+ * The Regents of the University of California. All rights reserved.
+ * (c) UNIX System Laboratories, Inc.
+ * All or some portions of this file are derived from material licensed
+ * to the University of California by American Telephone and Telegraph
+ * Co. or Unix System Laboratories, Inc. and are reproduced herein with
+ * the permission of UNIX System Laboratories, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the University of
+ * California, Berkeley and its contributors.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * @(#)init_main.c 8.9 (Berkeley) 1/21/94
+ */
+
+#include <rtems/freebsd/sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <rtems/freebsd/local/opt_ddb.h>
+#include <rtems/freebsd/local/opt_init_path.h>
+
+#include <rtems/freebsd/sys/param.h>
+#include <rtems/freebsd/sys/kernel.h>
+#include <rtems/freebsd/sys/exec.h>
+#include <rtems/freebsd/sys/file.h>
+#include <rtems/freebsd/sys/filedesc.h>
+#include <rtems/freebsd/sys/jail.h>
+#include <rtems/freebsd/sys/ktr.h>
+#include <rtems/freebsd/sys/lock.h>
+#include <rtems/freebsd/sys/mount.h>
+#include <rtems/freebsd/sys/mutex.h>
+#include <rtems/freebsd/sys/syscallsubr.h>
+#include <rtems/freebsd/sys/sysctl.h>
+#include <rtems/freebsd/sys/proc.h>
+#include <rtems/freebsd/sys/resourcevar.h>
+#include <rtems/freebsd/sys/systm.h>
+#include <rtems/freebsd/sys/signalvar.h>
+#ifndef __rtems__
+#include <rtems/freebsd/sys/vnode.h>
+#endif
+#include <rtems/freebsd/sys/sysent.h>
+#include <rtems/freebsd/sys/reboot.h>
+#include <rtems/freebsd/sys/sched.h>
+#include <rtems/freebsd/sys/sx.h>
+#include <rtems/freebsd/sys/sysproto.h>
+#ifndef __rtems__
+#include <rtems/freebsd/sys/vmmeter.h>
+#endif
+#include <rtems/freebsd/sys/unistd.h>
+#include <rtems/freebsd/sys/malloc.h>
+#include <rtems/freebsd/sys/conf.h>
+#include <rtems/freebsd/sys/cpuset.h>
+
+#include <rtems/freebsd/machine/cpu.h>
+
+#include <rtems/freebsd/security/audit/audit.h>
+#include <rtems/freebsd/security/mac/mac_framework.h>
+
+#ifndef __rtems__
+#include <rtems/freebsd/vm/vm.h>
+#include <rtems/freebsd/vm/vm_param.h>
+#include <rtems/freebsd/vm/pmap.h>
+#include <rtems/freebsd/vm/vm_map.h>
+#endif
+#include <rtems/freebsd/sys/copyright.h>
+
+#include <rtems/freebsd/ddb/ddb.h>
+#include <rtems/freebsd/ddb/db_sym.h>
+
+void mi_startup(void); /* Should be elsewhere */
+
+#ifndef __rtems__
+/* Components of the first process -- never freed. */
+static struct session session0;
+static struct pgrp pgrp0;
+struct proc proc0;
+struct thread thread0 __aligned(16);
+struct vmspace vmspace0;
+struct proc *initproc;
+
+int boothowto = 0; /* initialized so that it can be patched */
+SYSCTL_INT(_debug, OID_AUTO, boothowto, CTLFLAG_RD, &boothowto, 0, "");
+int bootverbose;
+SYSCTL_INT(_debug, OID_AUTO, bootverbose, CTLFLAG_RW, &bootverbose, 0, "");
+
+/*
+ * This ensures that there is at least one entry so that the sysinit_set
+ * symbol is not undefined. A sybsystem ID of SI_SUB_DUMMY is never
+ * executed.
+ */
+SYSINIT(placeholder, SI_SUB_DUMMY, SI_ORDER_ANY, NULL, NULL);
+
+/*
+ * The sysinit table itself. Items are checked off as the are run.
+ * If we want to register new sysinit types, add them to newsysinit.
+ */
+
+#else /* __rtems__ */
+
+struct proc proc0;
+
+#endif /* __rtems__ */
+SET_DECLARE(sysinit_set, struct sysinit);
+#ifndef __rtems__
+struct sysinit **sysinit, **sysinit_end;
+struct sysinit **newsysinit, **newsysinit_end;
+
+/*
+ * Merge a new sysinit set into the current set, reallocating it if
+ * necessary. This can only be called after malloc is running.
+ */
+void
+sysinit_add(struct sysinit **set, struct sysinit **set_end)
+{
+ struct sysinit **newset;
+ struct sysinit **sipp;
+ struct sysinit **xipp;
+ int count;
+
+ count = set_end - set;
+ if (newsysinit)
+ count += newsysinit_end - newsysinit;
+ else
+ count += sysinit_end - sysinit;
+ newset = malloc(count * sizeof(*sipp), M_TEMP, M_NOWAIT);
+ if (newset == NULL)
+ panic("cannot malloc for sysinit");
+ xipp = newset;
+ if (newsysinit)
+ for (sipp = newsysinit; sipp < newsysinit_end; sipp++)
+ *xipp++ = *sipp;
+ else
+ for (sipp = sysinit; sipp < sysinit_end; sipp++)
+ *xipp++ = *sipp;
+ for (sipp = set; sipp < set_end; sipp++)
+ *xipp++ = *sipp;
+ if (newsysinit)
+ free(newsysinit, M_TEMP);
+ newsysinit = newset;
+ newsysinit_end = newset + count;
+}
+#endif /* __rtems__ */
+
+/*
+ * System startup; initialize the world, create process 0, mount root
+ * filesystem, and fork to create init and pagedaemon. Most of the
+ * hard work is done in the lower-level initialization routines including
+ * startup(), which does memory initialization and autoconfiguration.
+ *
+ * This allows simple addition of new kernel subsystems that require
+ * boot time initialization. It also allows substitution of subsystem
+ * (for instance, a scheduler, kernel profiler, or VM system) by object
+ * module. Finally, it allows for optional "kernel threads".
+ */
+void
+mi_startup(void)
+{
+
+ register struct sysinit **sipp; /* system initialization*/
+ register struct sysinit **xipp; /* interior loop of sort*/
+ register struct sysinit *save; /* bubble*/
+#ifdef __rtems__
+ struct sysinit **sysinit = NULL;
+ struct sysinit **sysinit_end = NULL;
+#endif /* __rtems__ */
+
+#if defined(VERBOSE_SYSINIT)
+ int last;
+ int verbose;
+#endif
+
+#ifndef __rtems__
+ if (boothowto & RB_VERBOSE)
+ bootverbose++;
+#endif /* __rtems__ */
+
+ if (sysinit == NULL) {
+ sysinit = SET_BEGIN(sysinit_set);
+ sysinit_end = SET_LIMIT(sysinit_set);
+ }
+
+restart:
+ /*
+ * Perform a bubble sort of the system initialization objects by
+ * their subsystem (primary key) and order (secondary key).
+ */
+ for (sipp = sysinit; sipp < sysinit_end; sipp++) {
+ for (xipp = sipp + 1; xipp < sysinit_end; xipp++) {
+ if ((*sipp)->subsystem < (*xipp)->subsystem ||
+ ((*sipp)->subsystem == (*xipp)->subsystem &&
+ (*sipp)->order <= (*xipp)->order))
+ continue; /* skip*/
+ save = *sipp;
+ *sipp = *xipp;
+ *xipp = save;
+ }
+ }
+
+#if defined(VERBOSE_SYSINIT)
+ last = SI_SUB_COPYRIGHT;
+ verbose = 0;
+#if !defined(DDB)
+ printf("VERBOSE_SYSINIT: DDB not enabled, symbol lookups disabled.\n");
+#endif
+#endif
+
+ /*
+ * Traverse the (now) ordered list of system initialization tasks.
+ * Perform each task, and continue on to the next task.
+ *
+ * The last item on the list is expected to be the scheduler,
+ * which will not return.
+ */
+ for (sipp = sysinit; sipp < sysinit_end; sipp++) {
+
+ if ((*sipp)->subsystem == SI_SUB_DUMMY)
+ continue; /* skip dummy task(s)*/
+
+ if ((*sipp)->subsystem == SI_SUB_DONE)
+ continue;
+
+#if defined(VERBOSE_SYSINIT)
+ if ((*sipp)->subsystem > last) {
+ verbose = 1;
+ last = (*sipp)->subsystem;
+ printf("subsystem %x\n", last);
+ }
+ if (verbose) {
+#if defined(DDB)
+ const char *name;
+ c_db_sym_t sym;
+ db_expr_t offset;
+
+ sym = db_search_symbol((vm_offset_t)(*sipp)->func,
+ DB_STGY_PROC, &offset);
+ db_symbol_values(sym, &name, NULL);
+ if (name != NULL)
+ printf(" %s(%p)... ", name, (*sipp)->udata);
+ else
+#endif
+ printf(" %p(%p)... ", (*sipp)->func,
+ (*sipp)->udata);
+ }
+#endif
+
+ /* Call function */
+ (*((*sipp)->func))((*sipp)->udata);
+
+#if defined(VERBOSE_SYSINIT)
+ if (verbose)
+ printf("done.\n");
+#endif
+
+ /* Check off the one we're just done */
+ (*sipp)->subsystem = SI_SUB_DONE;
+
+#ifndef __rtems__
+ /* Check if we've installed more sysinit items via KLD */
+ if (newsysinit != NULL) {
+ if (sysinit != SET_BEGIN(sysinit_set))
+ free(sysinit, M_TEMP);
+ sysinit = newsysinit;
+ sysinit_end = newsysinit_end;
+ newsysinit = NULL;
+ newsysinit_end = NULL;
+ goto restart;
+ }
+#endif /* __rtems__ */
+ }
+
+#ifndef __rtems__
+ panic("Shouldn't get here!");
+ /* NOTREACHED*/
+#endif /* __rtems__ */
+}
+
+
+#ifndef __rtems__
+/*
+ ***************************************************************************
+ ****
+ **** The following SYSINIT's belong elsewhere, but have not yet
+ **** been moved.
+ ****
+ ***************************************************************************
+ */
+static void
+print_caddr_t(void *data)
+{
+ printf("%s", (char *)data);
+}
+
+static void
+print_version(void *data __unused)
+{
+ int len;
+
+ /* Strip a trailing newline from version. */
+ len = strlen(version);
+ while (len > 0 && version[len - 1] == '\n')
+ len--;
+ printf("%.*s %s\n", len, version, machine);
+}
+
+SYSINIT(announce, SI_SUB_COPYRIGHT, SI_ORDER_FIRST, print_caddr_t,
+ copyright);
+SYSINIT(trademark, SI_SUB_COPYRIGHT, SI_ORDER_SECOND, print_caddr_t,
+ trademark);
+SYSINIT(version, SI_SUB_COPYRIGHT, SI_ORDER_THIRD, print_version, NULL);
+
+#ifdef WITNESS
+static char wit_warn[] =
+ "WARNING: WITNESS option enabled, expect reduced performance.\n";
+SYSINIT(witwarn, SI_SUB_COPYRIGHT, SI_ORDER_THIRD + 1,
+ print_caddr_t, wit_warn);
+SYSINIT(witwarn2, SI_SUB_RUN_SCHEDULER, SI_ORDER_THIRD + 1,
+ print_caddr_t, wit_warn);
+#endif
+
+#ifdef DIAGNOSTIC
+static char diag_warn[] =
+ "WARNING: DIAGNOSTIC option enabled, expect reduced performance.\n";
+SYSINIT(diagwarn, SI_SUB_COPYRIGHT, SI_ORDER_THIRD + 2,
+ print_caddr_t, diag_warn);
+SYSINIT(diagwarn2, SI_SUB_RUN_SCHEDULER, SI_ORDER_THIRD + 2,
+ print_caddr_t, diag_warn);
+#endif
+
+static int
+null_fetch_syscall_args(struct thread *td __unused,
+ struct syscall_args *sa __unused)
+{
+
+ panic("null_fetch_syscall_args");
+}
+
+static void
+null_set_syscall_retval(struct thread *td __unused, int error __unused)
+{
+
+ panic("null_set_syscall_retval");
+}
+
+struct sysentvec null_sysvec = {
+ .sv_size = 0,
+ .sv_table = NULL,
+ .sv_mask = 0,
+ .sv_sigsize = 0,
+ .sv_sigtbl = NULL,
+ .sv_errsize = 0,
+ .sv_errtbl = NULL,
+ .sv_transtrap = NULL,
+ .sv_fixup = NULL,
+ .sv_sendsig = NULL,
+ .sv_sigcode = NULL,
+ .sv_szsigcode = NULL,
+ .sv_prepsyscall = NULL,
+ .sv_name = "null",
+ .sv_coredump = NULL,
+ .sv_imgact_try = NULL,
+ .sv_minsigstksz = 0,
+ .sv_pagesize = PAGE_SIZE,
+ .sv_minuser = VM_MIN_ADDRESS,
+ .sv_maxuser = VM_MAXUSER_ADDRESS,
+ .sv_usrstack = USRSTACK,
+ .sv_psstrings = PS_STRINGS,
+ .sv_stackprot = VM_PROT_ALL,
+ .sv_copyout_strings = NULL,
+ .sv_setregs = NULL,
+ .sv_fixlimit = NULL,
+ .sv_maxssiz = NULL,
+ .sv_flags = 0,
+ .sv_set_syscall_retval = null_set_syscall_retval,
+ .sv_fetch_syscall_args = null_fetch_syscall_args,
+ .sv_syscallnames = NULL,
+};
+
+/*
+ ***************************************************************************
+ ****
+ **** The two following SYSINIT's are proc0 specific glue code. I am not
+ **** convinced that they can not be safely combined, but their order of
+ **** operation has been maintained as the same as the original init_main.c
+ **** for right now.
+ ****
+ **** These probably belong in init_proc.c or kern_proc.c, since they
+ **** deal with proc0 (the fork template process).
+ ****
+ ***************************************************************************
+ */
+/* ARGSUSED*/
+static void
+proc0_init(void *dummy __unused)
+{
+ struct proc *p;
+ unsigned i;
+ struct thread *td;
+
+ GIANT_REQUIRED;
+ p = &proc0;
+ td = &thread0;
+
+ /*
+ * Initialize magic number and osrel.
+ */
+ p->p_magic = P_MAGIC;
+ p->p_osrel = osreldate;
+
+ /*
+ * Initialize thread and process structures.
+ */
+ procinit(); /* set up proc zone */
+ threadinit(); /* set up UMA zones */
+
+ /*
+ * Initialise scheduler resources.
+ * Add scheduler specific parts to proc, thread as needed.
+ */
+ schedinit(); /* scheduler gets its house in order */
+ /*
+ * Initialize sleep queue hash table
+ */
+ sleepinit();
+
+ /*
+ * additional VM structures
+ */
+ vm_init2();
+
+ /*
+ * Create process 0 (the swapper).
+ */
+ LIST_INSERT_HEAD(&allproc, p, p_list);
+ LIST_INSERT_HEAD(PIDHASH(0), p, p_hash);
+ mtx_init(&pgrp0.pg_mtx, "process group", NULL, MTX_DEF | MTX_DUPOK);
+ p->p_pgrp = &pgrp0;
+ LIST_INSERT_HEAD(PGRPHASH(0), &pgrp0, pg_hash);
+ LIST_INIT(&pgrp0.pg_members);
+ LIST_INSERT_HEAD(&pgrp0.pg_members, p, p_pglist);
+
+ pgrp0.pg_session = &session0;
+ mtx_init(&session0.s_mtx, "session", NULL, MTX_DEF);
+ refcount_init(&session0.s_count, 1);
+ session0.s_leader = p;
+
+ p->p_sysent = &null_sysvec;
+ p->p_flag = P_SYSTEM | P_INMEM;
+ p->p_state = PRS_NORMAL;
+ knlist_init_mtx(&p->p_klist, &p->p_mtx);
+ STAILQ_INIT(&p->p_ktr);
+ p->p_nice = NZERO;
+ td->td_tid = PID_MAX + 1;
+ td->td_state = TDS_RUNNING;
+ td->td_pri_class = PRI_TIMESHARE;
+ td->td_user_pri = PUSER;
+ td->td_base_user_pri = PUSER;
+ td->td_priority = PVM;
+ td->td_base_pri = PUSER;
+ td->td_oncpu = 0;
+ td->td_flags = TDF_INMEM|TDP_KTHREAD;
+ td->td_cpuset = cpuset_thread0();
+ prison0.pr_cpuset = cpuset_ref(td->td_cpuset);
+ p->p_peers = 0;
+ p->p_leader = p;
+
+
+ strncpy(p->p_comm, "kernel", sizeof (p->p_comm));
+ strncpy(td->td_name, "swapper", sizeof (td->td_name));
+
+ callout_init(&p->p_itcallout, CALLOUT_MPSAFE);
+ callout_init_mtx(&p->p_limco, &p->p_mtx, 0);
+ callout_init(&td->td_slpcallout, CALLOUT_MPSAFE);
+
+ /* Create credentials. */
+ p->p_ucred = crget();
+ p->p_ucred->cr_ngroups = 1; /* group 0 */
+ p->p_ucred->cr_uidinfo = uifind(0);
+ p->p_ucred->cr_ruidinfo = uifind(0);
+ p->p_ucred->cr_prison = &prison0;
+#ifdef AUDIT
+ audit_cred_kproc0(p->p_ucred);
+#endif
+#ifdef MAC
+ mac_cred_create_swapper(p->p_ucred);
+#endif
+ td->td_ucred = crhold(p->p_ucred);
+
+ /* Create sigacts. */
+ p->p_sigacts = sigacts_alloc();
+
+ /* Initialize signal state for process 0. */
+ siginit(&proc0);
+
+ /* Create the file descriptor table. */
+ p->p_fd = fdinit(NULL);
+ p->p_fdtol = NULL;
+
+ /* Create the limits structures. */
+ p->p_limit = lim_alloc();
+ for (i = 0; i < RLIM_NLIMITS; i++)
+ p->p_limit->pl_rlimit[i].rlim_cur =
+ p->p_limit->pl_rlimit[i].rlim_max = RLIM_INFINITY;
+ p->p_limit->pl_rlimit[RLIMIT_NOFILE].rlim_cur =
+ p->p_limit->pl_rlimit[RLIMIT_NOFILE].rlim_max = maxfiles;
+ p->p_limit->pl_rlimit[RLIMIT_NPROC].rlim_cur =
+ p->p_limit->pl_rlimit[RLIMIT_NPROC].rlim_max = maxproc;
+ i = ptoa(cnt.v_free_count);
+ p->p_limit->pl_rlimit[RLIMIT_RSS].rlim_max = i;
+ p->p_limit->pl_rlimit[RLIMIT_MEMLOCK].rlim_max = i;
+ p->p_limit->pl_rlimit[RLIMIT_MEMLOCK].rlim_cur = i / 3;
+ p->p_cpulimit = RLIM_INFINITY;
+
+ p->p_stats = pstats_alloc();
+
+ /* Allocate a prototype map so we have something to fork. */
+ pmap_pinit0(vmspace_pmap(&vmspace0));
+ p->p_vmspace = &vmspace0;
+ vmspace0.vm_refcnt = 1;
+
+ /*
+ * proc0 is not expected to enter usermode, so there is no special
+ * handling for sv_minuser here, like is done for exec_new_vmspace().
+ */
+ vm_map_init(&vmspace0.vm_map, p->p_sysent->sv_minuser,
+ p->p_sysent->sv_maxuser);
+ vmspace0.vm_map.pmap = vmspace_pmap(&vmspace0);
+
+ /*-
+ * call the init and ctor for the new thread and proc
+ * we wait to do this until all other structures
+ * are fairly sane.
+ */
+ EVENTHANDLER_INVOKE(process_init, p);
+ EVENTHANDLER_INVOKE(thread_init, td);
+ EVENTHANDLER_INVOKE(process_ctor, p);
+ EVENTHANDLER_INVOKE(thread_ctor, td);
+
+ /*
+ * Charge root for one process.
+ */
+ (void)chgproccnt(p->p_ucred->cr_ruidinfo, 1, 0);
+}
+SYSINIT(p0init, SI_SUB_INTRINSIC, SI_ORDER_FIRST, proc0_init, NULL);
+
+/* ARGSUSED*/
+static void
+proc0_post(void *dummy __unused)
+{
+ struct timespec ts;
+ struct proc *p;
+ struct rusage ru;
+ struct thread *td;
+
+ /*
+ * Now we can look at the time, having had a chance to verify the
+ * time from the filesystem. Pretend that proc0 started now.
+ */
+ sx_slock(&allproc_lock);
+ FOREACH_PROC_IN_SYSTEM(p) {
+ microuptime(&p->p_stats->p_start);
+ PROC_SLOCK(p);
+ rufetch(p, &ru); /* Clears thread stats */
+ PROC_SUNLOCK(p);
+ p->p_rux.rux_runtime = 0;
+ p->p_rux.rux_uticks = 0;
+ p->p_rux.rux_sticks = 0;
+ p->p_rux.rux_iticks = 0;
+ FOREACH_THREAD_IN_PROC(p, td) {
+ td->td_runtime = 0;
+ }
+ }
+ sx_sunlock(&allproc_lock);
+ PCPU_SET(switchtime, cpu_ticks());
+ PCPU_SET(switchticks, ticks);
+
+ /*
+ * Give the ``random'' number generator a thump.
+ */
+ nanotime(&ts);
+ srandom(ts.tv_sec ^ ts.tv_nsec);
+}
+SYSINIT(p0post, SI_SUB_INTRINSIC_POST, SI_ORDER_FIRST, proc0_post, NULL);
+
+static void
+random_init(void *dummy __unused)
+{
+
+ /*
+ * After CPU has been started we have some randomness on most
+ * platforms via get_cyclecount(). For platforms that don't
+ * we will reseed random(9) in proc0_post() as well.
+ */
+ srandom(get_cyclecount());
+}
+SYSINIT(random, SI_SUB_RANDOM, SI_ORDER_FIRST, random_init, NULL);
+
+/*
+ ***************************************************************************
+ ****
+ **** The following SYSINIT's and glue code should be moved to the
+ **** respective files on a per subsystem basis.
+ ****
+ ***************************************************************************
+ */
+
+
+/*
+ ***************************************************************************
+ ****
+ **** The following code probably belongs in another file, like
+ **** kern/init_init.c.
+ ****
+ ***************************************************************************
+ */
+
+/*
+ * List of paths to try when searching for "init".
+ */
+static char init_path[MAXPATHLEN] =
+#ifdef INIT_PATH
+ __XSTRING(INIT_PATH);
+#else
+ "/sbin/init:/sbin/oinit:/sbin/init.bak:/rescue/init:/stand/sysinstall";
+#endif
+SYSCTL_STRING(_kern, OID_AUTO, init_path, CTLFLAG_RD, init_path, 0,
+ "Path used to search the init process");
+
+/*
+ * Shutdown timeout of init(8).
+ * Unused within kernel, but used to control init(8), hence do not remove.
+ */
+#ifndef INIT_SHUTDOWN_TIMEOUT
+#define INIT_SHUTDOWN_TIMEOUT 120
+#endif
+static int init_shutdown_timeout = INIT_SHUTDOWN_TIMEOUT;
+SYSCTL_INT(_kern, OID_AUTO, init_shutdown_timeout,
+ CTLFLAG_RW, &init_shutdown_timeout, 0, "");
+
+/*
+ * Start the initial user process; try exec'ing each pathname in init_path.
+ * The program is invoked with one argument containing the boot flags.
+ */
+static void
+start_init(void *dummy)
+{
+ vm_offset_t addr;
+ struct execve_args args;
+ int options, error;
+ char *var, *path, *next, *s;
+ char *ucp, **uap, *arg0, *arg1;
+ struct thread *td;
+ struct proc *p;
+
+ mtx_lock(&Giant);
+
+ GIANT_REQUIRED;
+
+ td = curthread;
+ p = td->td_proc;
+
+ vfs_mountroot();
+
+ /*
+ * Need just enough stack to hold the faked-up "execve()" arguments.
+ */
+ addr = p->p_sysent->sv_usrstack - PAGE_SIZE;
+ if (vm_map_find(&p->p_vmspace->vm_map, NULL, 0, &addr, PAGE_SIZE,
+ FALSE, VM_PROT_ALL, VM_PROT_ALL, 0) != 0)
+ panic("init: couldn't allocate argument space");
+ p->p_vmspace->vm_maxsaddr = (caddr_t)addr;
+ p->p_vmspace->vm_ssize = 1;
+
+ if ((var = getenv("init_path")) != NULL) {
+ strlcpy(init_path, var, sizeof(init_path));
+ freeenv(var);
+ }
+
+ for (path = init_path; *path != '\0'; path = next) {
+ while (*path == ':')
+ path++;
+ if (*path == '\0')
+ break;
+ for (next = path; *next != '\0' && *next != ':'; next++)
+ /* nothing */ ;
+ if (bootverbose)
+ printf("start_init: trying %.*s\n", (int)(next - path),
+ path);
+
+ /*
+ * Move out the boot flag argument.
+ */
+ options = 0;
+ ucp = (char *)p->p_sysent->sv_usrstack;
+ (void)subyte(--ucp, 0); /* trailing zero */
+ if (boothowto & RB_SINGLE) {
+ (void)subyte(--ucp, 's');
+ options = 1;
+ }
+#ifdef notyet
+ if (boothowto & RB_FASTBOOT) {
+ (void)subyte(--ucp, 'f');
+ options = 1;
+ }
+#endif
+
+#ifdef BOOTCDROM
+ (void)subyte(--ucp, 'C');
+ options = 1;
+#endif
+
+ if (options == 0)
+ (void)subyte(--ucp, '-');
+ (void)subyte(--ucp, '-'); /* leading hyphen */
+ arg1 = ucp;
+
+ /*
+ * Move out the file name (also arg 0).
+ */
+ (void)subyte(--ucp, 0);
+ for (s = next - 1; s >= path; s--)
+ (void)subyte(--ucp, *s);
+ arg0 = ucp;
+
+ /*
+ * Move out the arg pointers.
+ */
+ uap = (char **)((intptr_t)ucp & ~(sizeof(intptr_t)-1));
+ (void)suword((caddr_t)--uap, (long)0); /* terminator */
+ (void)suword((caddr_t)--uap, (long)(intptr_t)arg1);
+ (void)suword((caddr_t)--uap, (long)(intptr_t)arg0);
+
+ /*
+ * Point at the arguments.
+ */
+ args.fname = arg0;
+ args.argv = uap;
+ args.envv = NULL;
+
+ /*
+ * Now try to exec the program. If can't for any reason
+ * other than it doesn't exist, complain.
+ *
+ * Otherwise, return via fork_trampoline() all the way
+ * to user mode as init!
+ */
+ if ((error = execve(td, &args)) == 0) {
+ mtx_unlock(&Giant);
+ return;
+ }
+ if (error != ENOENT)
+ printf("exec %.*s: error %d\n", (int)(next - path),
+ path, error);
+ }
+ printf("init: not found in path %s\n", init_path);
+ panic("no init");
+}
+
+/*
+ * Like kproc_create(), but runs in it's own address space.
+ * We do this early to reserve pid 1.
+ *
+ * Note special case - do not make it runnable yet. Other work
+ * in progress will change this more.
+ */
+static void
+create_init(const void *udata __unused)
+{
+ struct ucred *newcred, *oldcred;
+ int error;
+
+ error = fork1(&thread0, RFFDG | RFPROC | RFSTOPPED, 0, &initproc);
+ if (error)
+ panic("cannot fork init: %d\n", error);
+ KASSERT(initproc->p_pid == 1, ("create_init: initproc->p_pid != 1"));
+ /* divorce init's credentials from the kernel's */
+ newcred = crget();
+ PROC_LOCK(initproc);
+ initproc->p_flag |= P_SYSTEM | P_INMEM;
+ oldcred = initproc->p_ucred;
+ crcopy(newcred, oldcred);
+#ifdef MAC
+ mac_cred_create_init(newcred);
+#endif
+#ifdef AUDIT
+ audit_cred_proc1(newcred);
+#endif
+ initproc->p_ucred = newcred;
+ PROC_UNLOCK(initproc);
+ crfree(oldcred);
+ cred_update_thread(FIRST_THREAD_IN_PROC(initproc));
+ cpu_set_fork_handler(FIRST_THREAD_IN_PROC(initproc), start_init, NULL);
+}
+SYSINIT(init, SI_SUB_CREATE_INIT, SI_ORDER_FIRST, create_init, NULL);
+
+/*
+ * Make it runnable now.
+ */
+static void
+kick_init(const void *udata __unused)
+{
+ struct thread *td;
+
+ td = FIRST_THREAD_IN_PROC(initproc);
+ thread_lock(td);
+ TD_SET_CAN_RUN(td);
+ sched_add(td, SRQ_BORING);
+ thread_unlock(td);
+}
+SYSINIT(kickinit, SI_SUB_KTHREAD_INIT, SI_ORDER_FIRST, kick_init, NULL);
+#else /* __rtems__ */
+/*
+ ***************************************************************************
+ ****
+ **** The two following SYSINIT's are proc0 specific glue code. I am not
+ **** convinced that they can not be safely combined, but their order of
+ **** operation has been maintained as the same as the original init_main.c
+ **** for right now.
+ ****
+ **** These probably belong in init_proc.c or kern_proc.c, since they
+ **** deal with proc0 (the fork template process).
+ ****
+ ***************************************************************************
+ */
+/* ARGSUSED*/
+static void
+proc0_init(void *dummy __unused)
+{
+ struct proc *p;
+
+ GIANT_REQUIRED;
+ p = &proc0;
+
+ /* Create credentials. */
+ p->p_ucred = crget();
+ p->p_ucred->cr_ngroups = 1; /* group 0 */
+ p->p_ucred->cr_uidinfo = uifind(0);
+ p->p_ucred->cr_ruidinfo = uifind(0);
+ p->p_ucred->cr_prison = &prison0;
+#ifdef AUDIT
+ audit_cred_kproc0(p->p_ucred);
+#endif
+#ifdef MAC
+ mac_cred_create_swapper(p->p_ucred);
+#endif
+}
+SYSINIT(p0init, SI_SUB_INTRINSIC, SI_ORDER_FIRST, proc0_init, NULL);
+#endif /* __rtems__ */
diff --git a/rtems/freebsd/kern/kern_mbuf.c b/rtems/freebsd/kern/kern_mbuf.c
new file mode 100644
index 00000000..e4ef9a71
--- /dev/null
+++ b/rtems/freebsd/kern/kern_mbuf.c
@@ -0,0 +1,706 @@
+#include <rtems/freebsd/machine/rtems-bsd-config.h>
+
+/*-
+ * Copyright (c) 2004, 2005,
+ * Bosko Milekic <bmilekic@FreeBSD.org>. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice unmodified, this list of conditions and the following
+ * disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <rtems/freebsd/sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <rtems/freebsd/local/opt_param.h>
+
+#include <rtems/freebsd/sys/param.h>
+#include <rtems/freebsd/sys/malloc.h>
+#include <rtems/freebsd/sys/systm.h>
+#include <rtems/freebsd/sys/mbuf.h>
+#include <rtems/freebsd/sys/domain.h>
+#include <rtems/freebsd/sys/eventhandler.h>
+#include <rtems/freebsd/sys/kernel.h>
+#include <rtems/freebsd/sys/protosw.h>
+#include <rtems/freebsd/sys/smp.h>
+#include <rtems/freebsd/sys/sysctl.h>
+
+#include <rtems/freebsd/security/mac/mac_framework.h>
+
+#ifndef __rtems__
+#include <rtems/freebsd/vm/vm.h>
+#include <rtems/freebsd/vm/vm_extern.h>
+#include <rtems/freebsd/vm/vm_kern.h>
+#include <rtems/freebsd/vm/vm_page.h>
+#endif
+#include <rtems/freebsd/vm/uma.h>
+#include <rtems/freebsd/vm/uma_int.h>
+#include <rtems/freebsd/vm/uma_dbg.h>
+
+/*
+ * In FreeBSD, Mbufs and Mbuf Clusters are allocated from UMA
+ * Zones.
+ *
+ * Mbuf Clusters (2K, contiguous) are allocated from the Cluster
+ * Zone. The Zone can be capped at kern.ipc.nmbclusters, if the
+ * administrator so desires.
+ *
+ * Mbufs are allocated from a UMA Master Zone called the Mbuf
+ * Zone.
+ *
+ * Additionally, FreeBSD provides a Packet Zone, which it
+ * configures as a Secondary Zone to the Mbuf Master Zone,
+ * thus sharing backend Slab kegs with the Mbuf Master Zone.
+ *
+ * Thus common-case allocations and locking are simplified:
+ *
+ * m_clget() m_getcl()
+ * | |
+ * | .------------>[(Packet Cache)] m_get(), m_gethdr()
+ * | | [ Packet ] |
+ * [(Cluster Cache)] [ Secondary ] [ (Mbuf Cache) ]
+ * [ Cluster Zone ] [ Zone ] [ Mbuf Master Zone ]
+ * | \________ |
+ * [ Cluster Keg ] \ /
+ * | [ Mbuf Keg ]
+ * [ Cluster Slabs ] |
+ * | [ Mbuf Slabs ]
+ * \____________(VM)_________________/
+ *
+ *
+ * Whenever an object is allocated with uma_zalloc() out of
+ * one of the Zones its _ctor_ function is executed. The same
+ * for any deallocation through uma_zfree() the _dtor_ function
+ * is executed.
+ *
+ * Caches are per-CPU and are filled from the Master Zone.
+ *
+ * Whenever an object is allocated from the underlying global
+ * memory pool it gets pre-initialized with the _zinit_ functions.
+ * When the Keg's are overfull objects get decomissioned with
+ * _zfini_ functions and free'd back to the global memory pool.
+ *
+ */
+
+int nmbclusters; /* limits number of mbuf clusters */
+int nmbjumbop; /* limits number of page size jumbo clusters */
+int nmbjumbo9; /* limits number of 9k jumbo clusters */
+int nmbjumbo16; /* limits number of 16k jumbo clusters */
+struct mbstat mbstat;
+
+/*
+ * tunable_mbinit() has to be run before init_maxsockets() thus
+ * the SYSINIT order below is SI_ORDER_MIDDLE while init_maxsockets()
+ * runs at SI_ORDER_ANY.
+ */
+static void
+tunable_mbinit(void *dummy)
+{
+ TUNABLE_INT_FETCH("kern.ipc.nmbclusters", &nmbclusters);
+
+ /* This has to be done before VM init. */
+ if (nmbclusters == 0)
+ nmbclusters = 1024 + maxusers * 64;
+ nmbjumbop = nmbclusters / 2;
+ nmbjumbo9 = nmbjumbop / 2;
+ nmbjumbo16 = nmbjumbo9 / 2;
+}
+SYSINIT(tunable_mbinit, SI_SUB_TUNABLES, SI_ORDER_MIDDLE, tunable_mbinit, NULL);
+
+static int
+sysctl_nmbclusters(SYSCTL_HANDLER_ARGS)
+{
+ int error, newnmbclusters;
+
+ newnmbclusters = nmbclusters;
+ error = sysctl_handle_int(oidp, &newnmbclusters, 0, req);
+ if (error == 0 && req->newptr) {
+ if (newnmbclusters > nmbclusters) {
+ nmbclusters = newnmbclusters;
+ uma_zone_set_max(zone_clust, nmbclusters);
+#ifndef __rtems__
+ EVENTHANDLER_INVOKE(nmbclusters_change);
+#endif
+ } else
+ error = EINVAL;
+ }
+ return (error);
+}
+SYSCTL_PROC(_kern_ipc, OID_AUTO, nmbclusters, CTLTYPE_INT|CTLFLAG_RW,
+&nmbclusters, 0, sysctl_nmbclusters, "IU",
+ "Maximum number of mbuf clusters allowed");
+
+static int
+sysctl_nmbjumbop(SYSCTL_HANDLER_ARGS)
+{
+ int error, newnmbjumbop;
+
+ newnmbjumbop = nmbjumbop;
+ error = sysctl_handle_int(oidp, &newnmbjumbop, 0, req);
+ if (error == 0 && req->newptr) {
+ if (newnmbjumbop> nmbjumbop) {
+ nmbjumbop = newnmbjumbop;
+ uma_zone_set_max(zone_jumbop, nmbjumbop);
+ } else
+ error = EINVAL;
+ }
+ return (error);
+}
+SYSCTL_PROC(_kern_ipc, OID_AUTO, nmbjumbop, CTLTYPE_INT|CTLFLAG_RW,
+&nmbjumbop, 0, sysctl_nmbjumbop, "IU",
+ "Maximum number of mbuf page size jumbo clusters allowed");
+
+
+static int
+sysctl_nmbjumbo9(SYSCTL_HANDLER_ARGS)
+{
+ int error, newnmbjumbo9;
+
+ newnmbjumbo9 = nmbjumbo9;
+ error = sysctl_handle_int(oidp, &newnmbjumbo9, 0, req);
+ if (error == 0 && req->newptr) {
+ if (newnmbjumbo9> nmbjumbo9) {
+ nmbjumbo9 = newnmbjumbo9;
+ uma_zone_set_max(zone_jumbo9, nmbjumbo9);
+ } else
+ error = EINVAL;
+ }
+ return (error);
+}
+SYSCTL_PROC(_kern_ipc, OID_AUTO, nmbjumbo9, CTLTYPE_INT|CTLFLAG_RW,
+&nmbjumbo9, 0, sysctl_nmbjumbo9, "IU",
+ "Maximum number of mbuf 9k jumbo clusters allowed");
+
+static int
+sysctl_nmbjumbo16(SYSCTL_HANDLER_ARGS)
+{
+ int error, newnmbjumbo16;
+
+ newnmbjumbo16 = nmbjumbo16;
+ error = sysctl_handle_int(oidp, &newnmbjumbo16, 0, req);
+ if (error == 0 && req->newptr) {
+ if (newnmbjumbo16> nmbjumbo16) {
+ nmbjumbo16 = newnmbjumbo16;
+ uma_zone_set_max(zone_jumbo16, nmbjumbo16);
+ } else
+ error = EINVAL;
+ }
+ return (error);
+}
+SYSCTL_PROC(_kern_ipc, OID_AUTO, nmbjumbo16, CTLTYPE_INT|CTLFLAG_RW,
+&nmbjumbo16, 0, sysctl_nmbjumbo16, "IU",
+ "Maximum number of mbuf 16k jumbo clusters allowed");
+
+
+
+SYSCTL_STRUCT(_kern_ipc, OID_AUTO, mbstat, CTLFLAG_RD, &mbstat, mbstat,
+ "Mbuf general information and statistics");
+
+/*
+ * Zones from which we allocate.
+ */
+uma_zone_t zone_mbuf;
+uma_zone_t zone_clust;
+uma_zone_t zone_pack;
+uma_zone_t zone_jumbop;
+uma_zone_t zone_jumbo9;
+uma_zone_t zone_jumbo16;
+uma_zone_t zone_ext_refcnt;
+
+/*
+ * Local prototypes.
+ */
+static int mb_ctor_mbuf(void *, int, void *, int);
+static int mb_ctor_clust(void *, int, void *, int);
+static int mb_ctor_pack(void *, int, void *, int);
+static void mb_dtor_mbuf(void *, int, void *);
+static void mb_dtor_clust(void *, int, void *);
+static void mb_dtor_pack(void *, int, void *);
+static int mb_zinit_pack(void *, int, int);
+static void mb_zfini_pack(void *, int);
+
+static void mb_reclaim(void *);
+static void mbuf_init(void *);
+static void *mbuf_jumbo_alloc(uma_zone_t, int, u_int8_t *, int);
+
+/* Ensure that MSIZE doesn't break dtom() - it must be a power of 2 */
+CTASSERT((((MSIZE - 1) ^ MSIZE) + 1) >> 1 == MSIZE);
+
+/*
+ * Initialize FreeBSD Network buffer allocation.
+ */
+SYSINIT(mbuf, SI_SUB_MBUF, SI_ORDER_FIRST, mbuf_init, NULL);
+static void
+mbuf_init(void *dummy)
+{
+
+ /*
+ * Configure UMA zones for Mbufs, Clusters, and Packets.
+ */
+ zone_mbuf = uma_zcreate(MBUF_MEM_NAME, MSIZE,
+ mb_ctor_mbuf, mb_dtor_mbuf,
+#ifdef INVARIANTS
+ trash_init, trash_fini,
+#else
+ NULL, NULL,
+#endif
+ MSIZE - 1, UMA_ZONE_MAXBUCKET);
+
+ zone_clust = uma_zcreate(MBUF_CLUSTER_MEM_NAME, MCLBYTES,
+ mb_ctor_clust, mb_dtor_clust,
+#ifdef INVARIANTS
+ trash_init, trash_fini,
+#else
+ NULL, NULL,
+#endif
+ UMA_ALIGN_PTR, UMA_ZONE_REFCNT);
+ if (nmbclusters > 0)
+ uma_zone_set_max(zone_clust, nmbclusters);
+
+ zone_pack = uma_zsecond_create(MBUF_PACKET_MEM_NAME, mb_ctor_pack,
+ mb_dtor_pack, mb_zinit_pack, mb_zfini_pack, zone_mbuf);
+
+ /* Make jumbo frame zone too. Page size, 9k and 16k. */
+ zone_jumbop = uma_zcreate(MBUF_JUMBOP_MEM_NAME, MJUMPAGESIZE,
+ mb_ctor_clust, mb_dtor_clust,
+#ifdef INVARIANTS
+ trash_init, trash_fini,
+#else
+ NULL, NULL,
+#endif
+ UMA_ALIGN_PTR, UMA_ZONE_REFCNT);
+ if (nmbjumbop > 0)
+ uma_zone_set_max(zone_jumbop, nmbjumbop);
+
+ zone_jumbo9 = uma_zcreate(MBUF_JUMBO9_MEM_NAME, MJUM9BYTES,
+ mb_ctor_clust, mb_dtor_clust,
+#ifdef INVARIANTS
+ trash_init, trash_fini,
+#else
+ NULL, NULL,
+#endif
+ UMA_ALIGN_PTR, UMA_ZONE_REFCNT);
+ if (nmbjumbo9 > 0)
+ uma_zone_set_max(zone_jumbo9, nmbjumbo9);
+ uma_zone_set_allocf(zone_jumbo9, mbuf_jumbo_alloc);
+
+ zone_jumbo16 = uma_zcreate(MBUF_JUMBO16_MEM_NAME, MJUM16BYTES,
+ mb_ctor_clust, mb_dtor_clust,
+#ifdef INVARIANTS
+ trash_init, trash_fini,
+#else
+ NULL, NULL,
+#endif
+ UMA_ALIGN_PTR, UMA_ZONE_REFCNT);
+ if (nmbjumbo16 > 0)
+ uma_zone_set_max(zone_jumbo16, nmbjumbo16);
+ uma_zone_set_allocf(zone_jumbo16, mbuf_jumbo_alloc);
+
+ zone_ext_refcnt = uma_zcreate(MBUF_EXTREFCNT_MEM_NAME, sizeof(u_int),
+ NULL, NULL,
+ NULL, NULL,
+ UMA_ALIGN_PTR, UMA_ZONE_ZINIT);
+
+ /* uma_prealloc() goes here... */
+
+ /*
+ * Hook event handler for low-memory situation, used to
+ * drain protocols and push data back to the caches (UMA
+ * later pushes it back to VM).
+ */
+#ifndef __rtems__
+ EVENTHANDLER_REGISTER(vm_lowmem, mb_reclaim, NULL,
+ EVENTHANDLER_PRI_FIRST);
+#endif
+
+ /*
+ * [Re]set counters and local statistics knobs.
+ * XXX Some of these should go and be replaced, but UMA stat
+ * gathering needs to be revised.
+ */
+ mbstat.m_mbufs = 0;
+ mbstat.m_mclusts = 0;
+ mbstat.m_drain = 0;
+ mbstat.m_msize = MSIZE;
+ mbstat.m_mclbytes = MCLBYTES;
+ mbstat.m_minclsize = MINCLSIZE;
+ mbstat.m_mlen = MLEN;
+ mbstat.m_mhlen = MHLEN;
+ mbstat.m_numtypes = MT_NTYPES;
+
+ mbstat.m_mcfail = mbstat.m_mpfail = 0;
+ mbstat.sf_iocnt = 0;
+ mbstat.sf_allocwait = mbstat.sf_allocfail = 0;
+}
+
+/*
+ * UMA backend page allocator for the jumbo frame zones.
+ *
+ * Allocates kernel virtual memory that is backed by contiguous physical
+ * pages.
+ */
+static void *
+mbuf_jumbo_alloc(uma_zone_t zone, int bytes, u_int8_t *flags, int wait)
+{
+
+ /* Inform UMA that this allocator uses kernel_map/object. */
+ *flags = UMA_SLAB_KERNEL;
+#ifndef __rtems__
+ return ((void *)kmem_alloc_contig(kernel_map, bytes, wait,
+ (vm_paddr_t)0, ~(vm_paddr_t)0, 1, 0, VM_MEMATTR_DEFAULT));
+#else
+ return ((void *)malloc(bytes, M_TEMP, wait));
+#endif
+}
+
+/*
+ * Constructor for Mbuf master zone.
+ *
+ * The 'arg' pointer points to a mb_args structure which
+ * contains call-specific information required to support the
+ * mbuf allocation API. See mbuf.h.
+ */
+static int
+mb_ctor_mbuf(void *mem, int size, void *arg, int how)
+{
+ struct mbuf *m;
+ struct mb_args *args;
+#ifdef MAC
+ int error;
+#endif
+ int flags;
+ short type;
+
+#ifdef INVARIANTS
+ trash_ctor(mem, size, arg, how);
+#endif
+ m = (struct mbuf *)mem;
+ args = (struct mb_args *)arg;
+ flags = args->flags;
+ type = args->type;
+
+ /*
+ * The mbuf is initialized later. The caller has the
+ * responsibility to set up any MAC labels too.
+ */
+ if (type == MT_NOINIT)
+ return (0);
+
+ m->m_next = NULL;
+ m->m_nextpkt = NULL;
+ m->m_len = 0;
+ m->m_flags = flags;
+ m->m_type = type;
+ if (flags & M_PKTHDR) {
+ m->m_data = m->m_pktdat;
+ m->m_pkthdr.rcvif = NULL;
+ m->m_pkthdr.header = NULL;
+ m->m_pkthdr.len = 0;
+ m->m_pkthdr.csum_flags = 0;
+ m->m_pkthdr.csum_data = 0;
+ m->m_pkthdr.tso_segsz = 0;
+ m->m_pkthdr.ether_vtag = 0;
+ m->m_pkthdr.flowid = 0;
+ SLIST_INIT(&m->m_pkthdr.tags);
+#ifdef MAC
+ /* If the label init fails, fail the alloc */
+ error = mac_mbuf_init(m, how);
+ if (error)
+ return (error);
+#endif
+ } else
+ m->m_data = m->m_dat;
+ return (0);
+}
+
+/*
+ * The Mbuf master zone destructor.
+ */
+static void
+mb_dtor_mbuf(void *mem, int size, void *arg)
+{
+ struct mbuf *m;
+ unsigned long flags;
+
+ m = (struct mbuf *)mem;
+ flags = (unsigned long)arg;
+
+ if ((flags & MB_NOTAGS) == 0 && (m->m_flags & M_PKTHDR) != 0)
+ m_tag_delete_chain(m, NULL);
+ KASSERT((m->m_flags & M_EXT) == 0, ("%s: M_EXT set", __func__));
+ KASSERT((m->m_flags & M_NOFREE) == 0, ("%s: M_NOFREE set", __func__));
+#ifdef INVARIANTS
+ trash_dtor(mem, size, arg);
+#endif
+}
+
+/*
+ * The Mbuf Packet zone destructor.
+ */
+static void
+mb_dtor_pack(void *mem, int size, void *arg)
+{
+ struct mbuf *m;
+
+ m = (struct mbuf *)mem;
+ if ((m->m_flags & M_PKTHDR) != 0)
+ m_tag_delete_chain(m, NULL);
+
+ /* Make sure we've got a clean cluster back. */
+ KASSERT((m->m_flags & M_EXT) == M_EXT, ("%s: M_EXT not set", __func__));
+ KASSERT(m->m_ext.ext_buf != NULL, ("%s: ext_buf == NULL", __func__));
+ KASSERT(m->m_ext.ext_free == NULL, ("%s: ext_free != NULL", __func__));
+ KASSERT(m->m_ext.ext_arg1 == NULL, ("%s: ext_arg1 != NULL", __func__));
+ KASSERT(m->m_ext.ext_arg2 == NULL, ("%s: ext_arg2 != NULL", __func__));
+ KASSERT(m->m_ext.ext_size == MCLBYTES, ("%s: ext_size != MCLBYTES", __func__));
+ KASSERT(m->m_ext.ext_type == EXT_PACKET, ("%s: ext_type != EXT_PACKET", __func__));
+ KASSERT(*m->m_ext.ref_cnt == 1, ("%s: ref_cnt != 1", __func__));
+#ifdef INVARIANTS
+ trash_dtor(m->m_ext.ext_buf, MCLBYTES, arg);
+#endif
+ /*
+ * If there are processes blocked on zone_clust, waiting for pages
+ * to be freed up, * cause them to be woken up by draining the
+ * packet zone. We are exposed to a race here * (in the check for
+ * the UMA_ZFLAG_FULL) where we might miss the flag set, but that
+ * is deliberate. We don't want to acquire the zone lock for every
+ * mbuf free.
+ */
+ if (uma_zone_exhausted_nolock(zone_clust))
+ zone_drain(zone_pack);
+}
+
+/*
+ * The Cluster and Jumbo[PAGESIZE|9|16] zone constructor.
+ *
+ * Here the 'arg' pointer points to the Mbuf which we
+ * are configuring cluster storage for. If 'arg' is
+ * empty we allocate just the cluster without setting
+ * the mbuf to it. See mbuf.h.
+ */
+static int
+mb_ctor_clust(void *mem, int size, void *arg, int how)
+{
+ struct mbuf *m;
+ u_int *refcnt;
+ int type;
+ uma_zone_t zone;
+
+#ifdef INVARIANTS
+ trash_ctor(mem, size, arg, how);
+#endif
+ switch (size) {
+ case MCLBYTES:
+ type = EXT_CLUSTER;
+ zone = zone_clust;
+ break;
+#if MJUMPAGESIZE != MCLBYTES
+ case MJUMPAGESIZE:
+ type = EXT_JUMBOP;
+ zone = zone_jumbop;
+ break;
+#endif
+ case MJUM9BYTES:
+ type = EXT_JUMBO9;
+ zone = zone_jumbo9;
+ break;
+ case MJUM16BYTES:
+ type = EXT_JUMBO16;
+ zone = zone_jumbo16;
+ break;
+ default:
+ panic("unknown cluster size");
+ break;
+ }
+
+ m = (struct mbuf *)arg;
+ refcnt = uma_find_refcnt(zone, mem);
+ *refcnt = 1;
+ if (m != NULL) {
+ m->m_ext.ext_buf = (caddr_t)mem;
+ m->m_data = m->m_ext.ext_buf;
+ m->m_flags |= M_EXT;
+ m->m_ext.ext_free = NULL;
+ m->m_ext.ext_arg1 = NULL;
+ m->m_ext.ext_arg2 = NULL;
+ m->m_ext.ext_size = size;
+ m->m_ext.ext_type = type;
+ m->m_ext.ref_cnt = refcnt;
+ }
+
+ return (0);
+}
+
+/*
+ * The Mbuf Cluster zone destructor.
+ */
+static void
+mb_dtor_clust(void *mem, int size, void *arg)
+{
+#ifdef INVARIANTS
+ uma_zone_t zone;
+
+ zone = m_getzone(size);
+ KASSERT(*(uma_find_refcnt(zone, mem)) <= 1,
+ ("%s: refcnt incorrect %u", __func__,
+ *(uma_find_refcnt(zone, mem))) );
+
+ trash_dtor(mem, size, arg);
+#endif
+}
+
+/*
+ * The Packet secondary zone's init routine, executed on the
+ * object's transition from mbuf keg slab to zone cache.
+ */
+static int
+mb_zinit_pack(void *mem, int size, int how)
+{
+ struct mbuf *m;
+
+ m = (struct mbuf *)mem; /* m is virgin. */
+ if (uma_zalloc_arg(zone_clust, m, how) == NULL ||
+ m->m_ext.ext_buf == NULL)
+ return (ENOMEM);
+ m->m_ext.ext_type = EXT_PACKET; /* Override. */
+#ifdef INVARIANTS
+ trash_init(m->m_ext.ext_buf, MCLBYTES, how);
+#endif
+ return (0);
+}
+
+/*
+ * The Packet secondary zone's fini routine, executed on the
+ * object's transition from zone cache to keg slab.
+ */
+static void
+mb_zfini_pack(void *mem, int size)
+{
+ struct mbuf *m;
+
+ m = (struct mbuf *)mem;
+#ifdef INVARIANTS
+ trash_fini(m->m_ext.ext_buf, MCLBYTES);
+#endif
+ uma_zfree_arg(zone_clust, m->m_ext.ext_buf, NULL);
+#ifdef INVARIANTS
+ trash_dtor(mem, size, NULL);
+#endif
+}
+
+/*
+ * The "packet" keg constructor.
+ */
+static int
+mb_ctor_pack(void *mem, int size, void *arg, int how)
+{
+ struct mbuf *m;
+ struct mb_args *args;
+#ifdef MAC
+ int error;
+#endif
+ int flags;
+ short type;
+
+ m = (struct mbuf *)mem;
+ args = (struct mb_args *)arg;
+ flags = args->flags;
+ type = args->type;
+
+#ifdef INVARIANTS
+ trash_ctor(m->m_ext.ext_buf, MCLBYTES, arg, how);
+#endif
+ m->m_next = NULL;
+ m->m_nextpkt = NULL;
+ m->m_data = m->m_ext.ext_buf;
+ m->m_len = 0;
+ m->m_flags = (flags | M_EXT);
+ m->m_type = type;
+
+ if (flags & M_PKTHDR) {
+ m->m_pkthdr.rcvif = NULL;
+ m->m_pkthdr.len = 0;
+ m->m_pkthdr.header = NULL;
+ m->m_pkthdr.csum_flags = 0;
+ m->m_pkthdr.csum_data = 0;
+ m->m_pkthdr.tso_segsz = 0;
+ m->m_pkthdr.ether_vtag = 0;
+ m->m_pkthdr.flowid = 0;
+ SLIST_INIT(&m->m_pkthdr.tags);
+#ifdef MAC
+ /* If the label init fails, fail the alloc */
+ error = mac_mbuf_init(m, how);
+ if (error)
+ return (error);
+#endif
+ }
+ /* m_ext is already initialized. */
+
+ return (0);
+}
+
+int
+m_pkthdr_init(struct mbuf *m, int how)
+{
+#ifdef MAC
+ int error;
+#endif
+ m->m_data = m->m_pktdat;
+ SLIST_INIT(&m->m_pkthdr.tags);
+ m->m_pkthdr.rcvif = NULL;
+ m->m_pkthdr.header = NULL;
+ m->m_pkthdr.len = 0;
+ m->m_pkthdr.flowid = 0;
+ m->m_pkthdr.csum_flags = 0;
+ m->m_pkthdr.csum_data = 0;
+ m->m_pkthdr.tso_segsz = 0;
+ m->m_pkthdr.ether_vtag = 0;
+#ifdef MAC
+ /* If the label init fails, fail the alloc */
+ error = mac_mbuf_init(m, how);
+ if (error)
+ return (error);
+#endif
+
+ return (0);
+}
+
+/*
+ * This is the protocol drain routine.
+ *
+ * No locks should be held when this is called. The drain routines have to
+ * presently acquire some locks which raises the possibility of lock order
+ * reversal.
+ */
+static void
+mb_reclaim(void *junk)
+{
+ struct domain *dp;
+ struct protosw *pr;
+
+ WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK | WARN_PANIC, NULL,
+ "mb_reclaim()");
+
+ for (dp = domains; dp != NULL; dp = dp->dom_next)
+ for (pr = dp->dom_protosw; pr < dp->dom_protoswNPROTOSW; pr++)
+ if (pr->pr_drain != NULL)
+ (*pr->pr_drain)();
+}
diff --git a/rtems/freebsd/kern/kern_module.c b/rtems/freebsd/kern/kern_module.c
new file mode 100644
index 00000000..3dfc761c
--- /dev/null
+++ b/rtems/freebsd/kern/kern_module.c
@@ -0,0 +1,551 @@
+#include <rtems/freebsd/machine/rtems-bsd-config.h>
+
+/*-
+ * Copyright (c) 1997 Doug Rabson
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <rtems/freebsd/local/opt_compat.h>
+
+#include <rtems/freebsd/sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <rtems/freebsd/sys/param.h>
+#include <rtems/freebsd/sys/kernel.h>
+#include <rtems/freebsd/sys/systm.h>
+#include <rtems/freebsd/sys/eventhandler.h>
+#include <rtems/freebsd/sys/malloc.h>
+#include <rtems/freebsd/sys/sysproto.h>
+#include <rtems/freebsd/sys/sysent.h>
+#include <rtems/freebsd/sys/proc.h>
+#include <rtems/freebsd/sys/lock.h>
+#include <rtems/freebsd/sys/mutex.h>
+#include <rtems/freebsd/sys/reboot.h>
+#include <rtems/freebsd/sys/sx.h>
+#include <rtems/freebsd/sys/module.h>
+#include <rtems/freebsd/sys/linker.h>
+
+static MALLOC_DEFINE(M_MODULE, "module", "module data structures");
+
+struct module {
+ TAILQ_ENTRY(module) link; /* chain together all modules */
+#ifndef __rtems__
+ TAILQ_ENTRY(module) flink; /* all modules in a file */
+ struct linker_file *file; /* file which contains this module */
+ int refs; /* reference count */
+#endif /* __rtems__ */
+ int id; /* unique id number */
+ char *name; /* module name */
+ modeventhand_t handler; /* event handler */
+ void *arg; /* argument for handler */
+ modspecific_t data; /* module specific data */
+};
+
+#define MOD_EVENT(mod, type) (mod)->handler((mod), (type), (mod)->arg)
+
+static TAILQ_HEAD(modulelist, module) modules;
+struct sx modules_sx;
+static int nextid = 1;
+#ifndef __rtems__
+static void module_shutdown(void *, int);
+#endif /* __rtems__ */
+
+static int
+modevent_nop(module_t mod, int what, void *arg)
+{
+
+ switch(what) {
+ case MOD_LOAD:
+ return (0);
+ case MOD_UNLOAD:
+ return (EBUSY);
+ default:
+ return (EOPNOTSUPP);
+ }
+}
+
+static void
+module_init(void *arg)
+{
+
+ sx_init(&modules_sx, "module subsystem sx lock");
+ TAILQ_INIT(&modules);
+#ifndef __rtems__
+ EVENTHANDLER_REGISTER(shutdown_final, module_shutdown, NULL,
+ SHUTDOWN_PRI_DEFAULT);
+#endif /* __rtems__ */
+}
+
+SYSINIT(module, SI_SUB_KLD, SI_ORDER_FIRST, module_init, 0);
+
+#ifndef __rtems__
+static void
+module_shutdown(void *arg1, int arg2)
+{
+ module_t mod;
+
+ if (arg2 & RB_NOSYNC)
+ return;
+ mtx_lock(&Giant);
+ MOD_SLOCK;
+ TAILQ_FOREACH_REVERSE(mod, &modules, modulelist, link)
+ MOD_EVENT(mod, MOD_SHUTDOWN);
+ MOD_SUNLOCK;
+ mtx_unlock(&Giant);
+}
+#endif /* __rtems__ */
+
+void
+module_register_init(const void *arg)
+{
+ const moduledata_t *data = (const moduledata_t *)arg;
+ int error;
+ module_t mod;
+
+ mtx_lock(&Giant);
+ MOD_SLOCK;
+ mod = module_lookupbyname(data->name);
+ if (mod == NULL)
+ panic("module_register_init: module named %s not found\n",
+ data->name);
+ MOD_SUNLOCK;
+ error = MOD_EVENT(mod, MOD_LOAD);
+ if (error) {
+ MOD_EVENT(mod, MOD_UNLOAD);
+ MOD_XLOCK;
+ module_release(mod);
+ MOD_XUNLOCK;
+ printf("module_register_init: MOD_LOAD (%s, %p, %p) error"
+ " %d\n", data->name, (void *)data->evhand, data->priv,
+ error);
+ } else {
+#ifndef __rtems__
+ MOD_XLOCK;
+ if (mod->file) {
+ /*
+ * Once a module is succesfully loaded, move
+ * it to the head of the module list for this
+ * linker file. This resorts the list so that
+ * when the kernel linker iterates over the
+ * modules to unload them, it will unload them
+ * in the reverse order they were loaded.
+ */
+ TAILQ_REMOVE(&mod->file->modules, mod, flink);
+ TAILQ_INSERT_HEAD(&mod->file->modules, mod, flink);
+ }
+ MOD_XUNLOCK;
+#endif /* __rtems__ */
+ }
+ mtx_unlock(&Giant);
+}
+
+int
+module_register(const moduledata_t *data, linker_file_t container)
+{
+ size_t namelen;
+ module_t newmod;
+
+ MOD_XLOCK;
+ newmod = module_lookupbyname(data->name);
+ if (newmod != NULL) {
+ MOD_XUNLOCK;
+ printf("module_register: module %s already exists!\n",
+ data->name);
+ return (EEXIST);
+ }
+ namelen = strlen(data->name) + 1;
+ newmod = malloc(sizeof(struct module) + namelen, M_MODULE, M_WAITOK);
+ if (newmod == NULL) {
+ MOD_XUNLOCK;
+ return (ENOMEM);
+ }
+#ifndef __rtems__
+ newmod->refs = 1;
+#endif /* __rtems__ */
+ newmod->id = nextid++;
+ newmod->name = (char *)(newmod + 1);
+ strcpy(newmod->name, data->name);
+ newmod->handler = data->evhand ? data->evhand : modevent_nop;
+ newmod->arg = data->priv;
+ bzero(&newmod->data, sizeof(newmod->data));
+ TAILQ_INSERT_TAIL(&modules, newmod, link);
+
+ if (container)
+#ifndef __rtems__
+ TAILQ_INSERT_TAIL(&container->modules, newmod, flink);
+ newmod->file = container;
+#else /* __rtems__ */
+ BSD_PANIC("not supported");
+#endif /* __rtems__ */
+ MOD_XUNLOCK;
+ return (0);
+}
+
+#ifndef __rtems__
+void
+module_reference(module_t mod)
+{
+
+ MOD_XLOCK_ASSERT;
+
+ MOD_DPF(REFS, ("module_reference: before, refs=%d\n", mod->refs));
+ mod->refs++;
+}
+#endif /* __rtems__ */
+
+void
+module_release(module_t mod)
+{
+
+ MOD_XLOCK_ASSERT;
+
+#ifndef __rtems__
+ if (mod->refs <= 0)
+ panic("module_release: bad reference count");
+
+ MOD_DPF(REFS, ("module_release: before, refs=%d\n", mod->refs));
+
+ mod->refs--;
+ if (mod->refs == 0) {
+#endif /* __rtems__ */
+ TAILQ_REMOVE(&modules, mod, link);
+#ifndef __rtems__
+ if (mod->file)
+ TAILQ_REMOVE(&mod->file->modules, mod, flink);
+#endif /* __rtems__ */
+ free(mod, M_MODULE);
+#ifndef __rtems__
+ }
+#endif /* __rtems__ */
+}
+
+module_t
+module_lookupbyname(const char *name)
+{
+ module_t mod;
+ int err;
+
+ MOD_LOCK_ASSERT;
+
+ TAILQ_FOREACH(mod, &modules, link) {
+ err = strcmp(mod->name, name);
+ if (err == 0)
+ return (mod);
+ }
+ return (NULL);
+}
+
+#ifndef __rtems__
+module_t
+module_lookupbyid(int modid)
+{
+ module_t mod;
+
+ MOD_LOCK_ASSERT;
+
+ TAILQ_FOREACH(mod, &modules, link)
+ if (mod->id == modid)
+ return(mod);
+ return (NULL);
+}
+
+int
+module_quiesce(module_t mod)
+{
+ int error;
+
+ mtx_lock(&Giant);
+ error = MOD_EVENT(mod, MOD_QUIESCE);
+ mtx_unlock(&Giant);
+ if (error == EOPNOTSUPP || error == EINVAL)
+ error = 0;
+ return (error);
+}
+
+int
+module_unload(module_t mod)
+{
+ int error;
+
+ mtx_lock(&Giant);
+ error = MOD_EVENT(mod, MOD_UNLOAD);
+ mtx_unlock(&Giant);
+ return (error);
+}
+
+int
+module_getid(module_t mod)
+{
+
+ MOD_LOCK_ASSERT;
+ return (mod->id);
+}
+
+module_t
+module_getfnext(module_t mod)
+{
+
+ MOD_LOCK_ASSERT;
+ return (TAILQ_NEXT(mod, flink));
+}
+
+const char *
+module_getname(module_t mod)
+{
+
+ MOD_LOCK_ASSERT;
+ return (mod->name);
+}
+
+void
+module_setspecific(module_t mod, modspecific_t *datap)
+{
+
+ MOD_XLOCK_ASSERT;
+ mod->data = *datap;
+}
+
+linker_file_t
+module_file(module_t mod)
+{
+
+ return (mod->file);
+}
+
+/*
+ * Syscalls.
+ */
+int
+modnext(struct thread *td, struct modnext_args *uap)
+{
+ module_t mod;
+ int error = 0;
+
+ td->td_retval[0] = -1;
+
+ MOD_SLOCK;
+ if (uap->modid == 0) {
+ mod = TAILQ_FIRST(&modules);
+ if (mod)
+ td->td_retval[0] = mod->id;
+ else
+ error = ENOENT;
+ goto done2;
+ }
+ mod = module_lookupbyid(uap->modid);
+ if (mod == NULL) {
+ error = ENOENT;
+ goto done2;
+ }
+ if (TAILQ_NEXT(mod, link))
+ td->td_retval[0] = TAILQ_NEXT(mod, link)->id;
+ else
+ td->td_retval[0] = 0;
+done2:
+ MOD_SUNLOCK;
+ return (error);
+}
+
+int
+modfnext(struct thread *td, struct modfnext_args *uap)
+{
+ module_t mod;
+ int error;
+
+ td->td_retval[0] = -1;
+
+ MOD_SLOCK;
+ mod = module_lookupbyid(uap->modid);
+ if (mod == NULL) {
+ error = ENOENT;
+ } else {
+ error = 0;
+ if (TAILQ_NEXT(mod, flink))
+ td->td_retval[0] = TAILQ_NEXT(mod, flink)->id;
+ else
+ td->td_retval[0] = 0;
+ }
+ MOD_SUNLOCK;
+ return (error);
+}
+
+struct module_stat_v1 {
+ int version; /* set to sizeof(struct module_stat) */
+ char name[MAXMODNAME];
+ int refs;
+ int id;
+};
+
+int
+modstat(struct thread *td, struct modstat_args *uap)
+{
+ module_t mod;
+ modspecific_t data;
+ int error = 0;
+ int id, namelen, refs, version;
+ struct module_stat *stat;
+ char *name;
+
+ MOD_SLOCK;
+ mod = module_lookupbyid(uap->modid);
+ if (mod == NULL) {
+ MOD_SUNLOCK;
+ return (ENOENT);
+ }
+ id = mod->id;
+ refs = mod->refs;
+ name = mod->name;
+ data = mod->data;
+ MOD_SUNLOCK;
+ stat = uap->stat;
+
+ /*
+ * Check the version of the user's structure.
+ */
+ if ((error = copyin(&stat->version, &version, sizeof(version))) != 0)
+ return (error);
+ if (version != sizeof(struct module_stat_v1)
+ && version != sizeof(struct module_stat))
+ return (EINVAL);
+ namelen = strlen(mod->name) + 1;
+ if (namelen > MAXMODNAME)
+ namelen = MAXMODNAME;
+ if ((error = copyout(name, &stat->name[0], namelen)) != 0)
+ return (error);
+
+ if ((error = copyout(&refs, &stat->refs, sizeof(int))) != 0)
+ return (error);
+ if ((error = copyout(&id, &stat->id, sizeof(int))) != 0)
+ return (error);
+
+ /*
+ * >v1 stat includes module data.
+ */
+ if (version == sizeof(struct module_stat))
+ if ((error = copyout(&data, &stat->data,
+ sizeof(data))) != 0)
+ return (error);
+ td->td_retval[0] = 0;
+ return (error);
+}
+
+int
+modfind(struct thread *td, struct modfind_args *uap)
+{
+ int error = 0;
+ char name[MAXMODNAME];
+ module_t mod;
+
+ if ((error = copyinstr(uap->name, name, sizeof name, 0)) != 0)
+ return (error);
+
+ MOD_SLOCK;
+ mod = module_lookupbyname(name);
+ if (mod == NULL)
+ error = ENOENT;
+ else
+ td->td_retval[0] = module_getid(mod);
+ MOD_SUNLOCK;
+ return (error);
+}
+#endif /* __rtems__ */
+
+MODULE_VERSION(kernel, __FreeBSD_version);
+
+#ifdef COMPAT_FREEBSD32
+#include <rtems/freebsd/sys/mount.h>
+#include <rtems/freebsd/sys/socket.h>
+#include <rtems/freebsd/compat/freebsd32/freebsd32_util.h>
+#include <rtems/freebsd/compat/freebsd32/freebsd32.h>
+#include <rtems/freebsd/compat/freebsd32/freebsd32_proto.h>
+
+typedef union modspecific32 {
+ int intval;
+ u_int32_t uintval;
+ int longval;
+ u_int32_t ulongval;
+} modspecific32_t;
+
+struct module_stat32 {
+ int version;
+ char name[MAXMODNAME];
+ int refs;
+ int id;
+ modspecific32_t data;
+};
+
+int
+freebsd32_modstat(struct thread *td, struct freebsd32_modstat_args *uap)
+{
+ module_t mod;
+ modspecific32_t data32;
+ int error = 0;
+ int id, namelen, refs, version;
+ struct module_stat32 *stat32;
+ char *name;
+
+ MOD_SLOCK;
+ mod = module_lookupbyid(uap->modid);
+ if (mod == NULL) {
+ MOD_SUNLOCK;
+ return (ENOENT);
+ }
+
+ id = mod->id;
+ refs = mod->refs;
+ name = mod->name;
+ CP(mod->data, data32, intval);
+ CP(mod->data, data32, uintval);
+ CP(mod->data, data32, longval);
+ CP(mod->data, data32, ulongval);
+ MOD_SUNLOCK;
+ stat32 = uap->stat;
+
+ if ((error = copyin(&stat32->version, &version, sizeof(version))) != 0)
+ return (error);
+ if (version != sizeof(struct module_stat_v1)
+ && version != sizeof(struct module_stat32))
+ return (EINVAL);
+ namelen = strlen(mod->name) + 1;
+ if (namelen > MAXMODNAME)
+ namelen = MAXMODNAME;
+ if ((error = copyout(name, &stat32->name[0], namelen)) != 0)
+ return (error);
+
+ if ((error = copyout(&refs, &stat32->refs, sizeof(int))) != 0)
+ return (error);
+ if ((error = copyout(&id, &stat32->id, sizeof(int))) != 0)
+ return (error);
+
+ /*
+ * >v1 stat includes module data.
+ */
+ if (version == sizeof(struct module_stat32))
+ if ((error = copyout(&data32, &stat32->data,
+ sizeof(data32))) != 0)
+ return (error);
+ td->td_retval[0] = 0;
+ return (error);
+}
+#endif
diff --git a/rtems/freebsd/kern/kern_sysctl.c b/rtems/freebsd/kern/kern_sysctl.c
new file mode 100644
index 00000000..3eeaa6c0
--- /dev/null
+++ b/rtems/freebsd/kern/kern_sysctl.c
@@ -0,0 +1,1579 @@
+#include <rtems/freebsd/machine/rtems-bsd-config.h>
+
+/*-
+ * Copyright (c) 1982, 1986, 1989, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * This code is derived from software contributed to Berkeley by
+ * Mike Karels at Berkeley Software Design, Inc.
+ *
+ * Quite extensively rewritten by Poul-Henning Kamp of the FreeBSD
+ * project, to make these variables more userfriendly.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * @(#)kern_sysctl.c 8.4 (Berkeley) 4/14/94
+ */
+
+#ifdef __rtems__
+/* FIXME */
+#undef sysctl
+#endif /* __rtems__ */
+#include <rtems/freebsd/sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <rtems/freebsd/local/opt_compat.h>
+#include <rtems/freebsd/local/opt_ktrace.h>
+
+#include <rtems/freebsd/sys/param.h>
+#include <rtems/freebsd/sys/systm.h>
+#include <rtems/freebsd/sys/kernel.h>
+#include <rtems/freebsd/sys/sysctl.h>
+#include <rtems/freebsd/sys/malloc.h>
+#include <rtems/freebsd/sys/priv.h>
+#include <rtems/freebsd/sys/proc.h>
+#include <rtems/freebsd/sys/jail.h>
+#include <rtems/freebsd/sys/lock.h>
+#include <rtems/freebsd/sys/mutex.h>
+#include <rtems/freebsd/sys/sx.h>
+#include <rtems/freebsd/sys/sysproto.h>
+#include <rtems/freebsd/sys/uio.h>
+#ifdef KTRACE
+#include <rtems/freebsd/sys/ktrace.h>
+#endif
+
+#include <rtems/freebsd/net/vnet.h>
+
+#include <rtems/freebsd/security/mac/mac_framework.h>
+#ifndef __rtems__
+#include <rtems/freebsd/vm/vm.h>
+#include <rtems/freebsd/vm/vm_extern.h>
+#endif
+
+#ifdef __rtems__
+/* From FreeBSD file 'sys/kern/kern_mib.c' */
+SYSCTL_NODE(, 0, sysctl, CTLFLAG_RW, 0, "Sysctl internal magic");
+SYSCTL_NODE(, CTL_KERN, kern, CTLFLAG_RW, 0, "High kernel, proc, limits &c");
+SYSCTL_NODE(, CTL_DEBUG, debug, CTLFLAG_RW, 0, "Debugging");
+SYSCTL_NODE(, CTL_HW, hw, CTLFLAG_RW, 0, "hardware");
+#endif /* __rtems__ */
+static MALLOC_DEFINE(M_SYSCTL, "sysctl", "sysctl internal magic");
+static MALLOC_DEFINE(M_SYSCTLOID, "sysctloid", "sysctl dynamic oids");
+static MALLOC_DEFINE(M_SYSCTLTMP, "sysctltmp", "sysctl temp output buffer");
+
+/*
+ * The sysctllock protects the MIB tree. It also protects sysctl
+ * contexts used with dynamic sysctls. The sysctl_register_oid() and
+ * sysctl_unregister_oid() routines require the sysctllock to already
+ * be held, so the sysctl_lock() and sysctl_unlock() routines are
+ * provided for the few places in the kernel which need to use that
+ * API rather than using the dynamic API. Use of the dynamic API is
+ * strongly encouraged for most code.
+ *
+ * The sysctlmemlock is used to limit the amount of user memory wired for
+ * sysctl requests. This is implemented by serializing any userland
+ * sysctl requests larger than a single page via an exclusive lock.
+ */
+static struct sx sysctllock;
+static struct sx sysctlmemlock;
+
+#define SYSCTL_SLOCK() sx_slock(&sysctllock)
+#define SYSCTL_SUNLOCK() sx_sunlock(&sysctllock)
+#define SYSCTL_XLOCK() sx_xlock(&sysctllock)
+#define SYSCTL_XUNLOCK() sx_xunlock(&sysctllock)
+#define SYSCTL_ASSERT_XLOCKED() sx_assert(&sysctllock, SA_XLOCKED)
+#define SYSCTL_ASSERT_LOCKED() sx_assert(&sysctllock, SA_LOCKED)
+#define SYSCTL_INIT() sx_init(&sysctllock, "sysctl lock")
+
+static int sysctl_root(SYSCTL_HANDLER_ARGS);
+
+struct sysctl_oid_list sysctl__children; /* root list */
+
+static int sysctl_remove_oid_locked(struct sysctl_oid *oidp, int del,
+ int recurse);
+
+static struct sysctl_oid *
+sysctl_find_oidname(const char *name, struct sysctl_oid_list *list)
+{
+ struct sysctl_oid *oidp;
+
+ SYSCTL_ASSERT_LOCKED();
+ SLIST_FOREACH(oidp, list, oid_link) {
+ if (strcmp(oidp->oid_name, name) == 0) {
+ return (oidp);
+ }
+ }
+ return (NULL);
+}
+
+/*
+ * Initialization of the MIB tree.
+ *
+ * Order by number in each list.
+ */
+void
+sysctl_lock(void)
+{
+
+ SYSCTL_XLOCK();
+}
+
+void
+sysctl_unlock(void)
+{
+
+ SYSCTL_XUNLOCK();
+}
+
+void
+sysctl_register_oid(struct sysctl_oid *oidp)
+{
+ struct sysctl_oid_list *parent = oidp->oid_parent;
+ struct sysctl_oid *p;
+ struct sysctl_oid *q;
+
+ /*
+ * First check if another oid with the same name already
+ * exists in the parent's list.
+ */
+ SYSCTL_ASSERT_XLOCKED();
+ p = sysctl_find_oidname(oidp->oid_name, parent);
+ if (p != NULL) {
+ if ((p->oid_kind & CTLTYPE) == CTLTYPE_NODE) {
+ p->oid_refcnt++;
+ return;
+ } else {
+ printf("can't re-use a leaf (%s)!\n", p->oid_name);
+ return;
+ }
+ }
+ /*
+ * If this oid has a number OID_AUTO, give it a number which
+ * is greater than any current oid.
+ * NOTE: DO NOT change the starting value here, change it in
+ * <sys/sysctl.h>, and make sure it is at least 256 to
+ * accomodate e.g. net.inet.raw as a static sysctl node.
+ */
+ if (oidp->oid_number == OID_AUTO) {
+ static int newoid = CTL_AUTO_START;
+
+ oidp->oid_number = newoid++;
+ if (newoid == 0x7fffffff)
+ panic("out of oids");
+ }
+#if 0
+ else if (oidp->oid_number >= CTL_AUTO_START) {
+ /* do not panic; this happens when unregistering sysctl sets */
+ printf("static sysctl oid too high: %d", oidp->oid_number);
+ }
+#endif
+
+ /*
+ * Insert the oid into the parent's list in order.
+ */
+ q = NULL;
+ SLIST_FOREACH(p, parent, oid_link) {
+ if (oidp->oid_number < p->oid_number)
+ break;
+ q = p;
+ }
+ if (q)
+ SLIST_INSERT_AFTER(q, oidp, oid_link);
+ else
+ SLIST_INSERT_HEAD(parent, oidp, oid_link);
+}
+
+void
+sysctl_unregister_oid(struct sysctl_oid *oidp)
+{
+ struct sysctl_oid *p;
+ int error;
+
+ SYSCTL_ASSERT_XLOCKED();
+ error = ENOENT;
+ if (oidp->oid_number == OID_AUTO) {
+ error = EINVAL;
+ } else {
+ SLIST_FOREACH(p, oidp->oid_parent, oid_link) {
+ if (p == oidp) {
+ SLIST_REMOVE(oidp->oid_parent, oidp,
+ sysctl_oid, oid_link);
+ error = 0;
+ break;
+ }
+ }
+ }
+
+ /*
+ * This can happen when a module fails to register and is
+ * being unloaded afterwards. It should not be a panic()
+ * for normal use.
+ */
+ if (error)
+ printf("%s: failed to unregister sysctl\n", __func__);
+}
+
+/* Initialize a new context to keep track of dynamically added sysctls. */
+int
+sysctl_ctx_init(struct sysctl_ctx_list *c)
+{
+
+ if (c == NULL) {
+ return (EINVAL);
+ }
+
+ /*
+ * No locking here, the caller is responsible for not adding
+ * new nodes to a context until after this function has
+ * returned.
+ */
+ TAILQ_INIT(c);
+ return (0);
+}
+
+/* Free the context, and destroy all dynamic oids registered in this context */
+int
+sysctl_ctx_free(struct sysctl_ctx_list *clist)
+{
+ struct sysctl_ctx_entry *e, *e1;
+ int error;
+
+ error = 0;
+ /*
+ * First perform a "dry run" to check if it's ok to remove oids.
+ * XXX FIXME
+ * XXX This algorithm is a hack. But I don't know any
+ * XXX better solution for now...
+ */
+ SYSCTL_XLOCK();
+ TAILQ_FOREACH(e, clist, link) {
+ error = sysctl_remove_oid_locked(e->entry, 0, 0);
+ if (error)
+ break;
+ }
+ /*
+ * Restore deregistered entries, either from the end,
+ * or from the place where error occured.
+ * e contains the entry that was not unregistered
+ */
+ if (error)
+ e1 = TAILQ_PREV(e, sysctl_ctx_list, link);
+ else
+ e1 = TAILQ_LAST(clist, sysctl_ctx_list);
+ while (e1 != NULL) {
+ sysctl_register_oid(e1->entry);
+ e1 = TAILQ_PREV(e1, sysctl_ctx_list, link);
+ }
+ if (error) {
+ SYSCTL_XUNLOCK();
+ return(EBUSY);
+ }
+ /* Now really delete the entries */
+ e = TAILQ_FIRST(clist);
+ while (e != NULL) {
+ e1 = TAILQ_NEXT(e, link);
+ error = sysctl_remove_oid_locked(e->entry, 1, 0);
+ if (error)
+ panic("sysctl_remove_oid: corrupt tree, entry: %s",
+ e->entry->oid_name);
+ free(e, M_SYSCTLOID);
+ e = e1;
+ }
+ SYSCTL_XUNLOCK();
+ return (error);
+}
+
+/* Add an entry to the context */
+struct sysctl_ctx_entry *
+sysctl_ctx_entry_add(struct sysctl_ctx_list *clist, struct sysctl_oid *oidp)
+{
+ struct sysctl_ctx_entry *e;
+
+ SYSCTL_ASSERT_XLOCKED();
+ if (clist == NULL || oidp == NULL)
+ return(NULL);
+ e = malloc(sizeof(struct sysctl_ctx_entry), M_SYSCTLOID, M_WAITOK);
+ e->entry = oidp;
+ TAILQ_INSERT_HEAD(clist, e, link);
+ return (e);
+}
+
+/* Find an entry in the context */
+struct sysctl_ctx_entry *
+sysctl_ctx_entry_find(struct sysctl_ctx_list *clist, struct sysctl_oid *oidp)
+{
+ struct sysctl_ctx_entry *e;
+
+ SYSCTL_ASSERT_LOCKED();
+ if (clist == NULL || oidp == NULL)
+ return(NULL);
+ TAILQ_FOREACH(e, clist, link) {
+ if(e->entry == oidp)
+ return(e);
+ }
+ return (e);
+}
+
+/*
+ * Delete an entry from the context.
+ * NOTE: this function doesn't free oidp! You have to remove it
+ * with sysctl_remove_oid().
+ */
+int
+sysctl_ctx_entry_del(struct sysctl_ctx_list *clist, struct sysctl_oid *oidp)
+{
+ struct sysctl_ctx_entry *e;
+
+ if (clist == NULL || oidp == NULL)
+ return (EINVAL);
+ SYSCTL_XLOCK();
+ e = sysctl_ctx_entry_find(clist, oidp);
+ if (e != NULL) {
+ TAILQ_REMOVE(clist, e, link);
+ SYSCTL_XUNLOCK();
+ free(e, M_SYSCTLOID);
+ return (0);
+ } else {
+ SYSCTL_XUNLOCK();
+ return (ENOENT);
+ }
+}
+
+/*
+ * Remove dynamically created sysctl trees.
+ * oidp - top of the tree to be removed
+ * del - if 0 - just deregister, otherwise free up entries as well
+ * recurse - if != 0 traverse the subtree to be deleted
+ */
+int
+sysctl_remove_oid(struct sysctl_oid *oidp, int del, int recurse)
+{
+ int error;
+
+ SYSCTL_XLOCK();
+ error = sysctl_remove_oid_locked(oidp, del, recurse);
+ SYSCTL_XUNLOCK();
+ return (error);
+}
+
+static int
+sysctl_remove_oid_locked(struct sysctl_oid *oidp, int del, int recurse)
+{
+ struct sysctl_oid *p;
+ int error;
+
+ SYSCTL_ASSERT_XLOCKED();
+ if (oidp == NULL)
+ return(EINVAL);
+ if ((oidp->oid_kind & CTLFLAG_DYN) == 0) {
+ printf("can't remove non-dynamic nodes!\n");
+ return (EINVAL);
+ }
+ /*
+ * WARNING: normal method to do this should be through
+ * sysctl_ctx_free(). Use recursing as the last resort
+ * method to purge your sysctl tree of leftovers...
+ * However, if some other code still references these nodes,
+ * it will panic.
+ */
+ if ((oidp->oid_kind & CTLTYPE) == CTLTYPE_NODE) {
+ if (oidp->oid_refcnt == 1) {
+ SLIST_FOREACH(p, SYSCTL_CHILDREN(oidp), oid_link) {
+ if (!recurse)
+ return (ENOTEMPTY);
+ error = sysctl_remove_oid_locked(p, del,
+ recurse);
+ if (error)
+ return (error);
+ }
+ if (del)
+ free(SYSCTL_CHILDREN(oidp), M_SYSCTLOID);
+ }
+ }
+ if (oidp->oid_refcnt > 1 ) {
+ oidp->oid_refcnt--;
+ } else {
+ if (oidp->oid_refcnt == 0) {
+ printf("Warning: bad oid_refcnt=%u (%s)!\n",
+ oidp->oid_refcnt, oidp->oid_name);
+ return (EINVAL);
+ }
+ sysctl_unregister_oid(oidp);
+ if (del) {
+ if (oidp->oid_descr)
+ free((void *)(uintptr_t)(const void *)oidp->oid_descr, M_SYSCTLOID);
+ free((void *)(uintptr_t)(const void *)oidp->oid_name,
+ M_SYSCTLOID);
+ free(oidp, M_SYSCTLOID);
+ }
+ }
+ return (0);
+}
+
+/*
+ * Create new sysctls at run time.
+ * clist may point to a valid context initialized with sysctl_ctx_init().
+ */
+struct sysctl_oid *
+sysctl_add_oid(struct sysctl_ctx_list *clist, struct sysctl_oid_list *parent,
+ int number, const char *name, int kind, void *arg1, int arg2,
+ int (*handler)(SYSCTL_HANDLER_ARGS), const char *fmt, const char *descr)
+{
+ struct sysctl_oid *oidp;
+ ssize_t len;
+ char *newname;
+
+ /* You have to hook up somewhere.. */
+ if (parent == NULL)
+ return(NULL);
+ /* Check if the node already exists, otherwise create it */
+ SYSCTL_XLOCK();
+ oidp = sysctl_find_oidname(name, parent);
+ if (oidp != NULL) {
+ if ((oidp->oid_kind & CTLTYPE) == CTLTYPE_NODE) {
+ oidp->oid_refcnt++;
+ /* Update the context */
+ if (clist != NULL)
+ sysctl_ctx_entry_add(clist, oidp);
+ SYSCTL_XUNLOCK();
+ return (oidp);
+ } else {
+ SYSCTL_XUNLOCK();
+ printf("can't re-use a leaf (%s)!\n", name);
+ return (NULL);
+ }
+ }
+ oidp = malloc(sizeof(struct sysctl_oid), M_SYSCTLOID, M_WAITOK|M_ZERO);
+ oidp->oid_parent = parent;
+ SLIST_NEXT(oidp, oid_link) = NULL;
+ oidp->oid_number = number;
+ oidp->oid_refcnt = 1;
+ len = strlen(name);
+ newname = malloc(len + 1, M_SYSCTLOID, M_WAITOK);
+ bcopy(name, newname, len + 1);
+ newname[len] = '\0';
+ oidp->oid_name = newname;
+ oidp->oid_handler = handler;
+ oidp->oid_kind = CTLFLAG_DYN | kind;
+ if ((kind & CTLTYPE) == CTLTYPE_NODE) {
+ /* Allocate space for children */
+ SYSCTL_CHILDREN_SET(oidp, malloc(sizeof(struct sysctl_oid_list),
+ M_SYSCTLOID, M_WAITOK));
+ SLIST_INIT(SYSCTL_CHILDREN(oidp));
+ } else {
+ oidp->oid_arg1 = arg1;
+ oidp->oid_arg2 = arg2;
+ }
+ oidp->oid_fmt = fmt;
+ if (descr) {
+ int len = strlen(descr) + 1;
+ oidp->oid_descr = malloc(len, M_SYSCTLOID, M_WAITOK);
+ if (oidp->oid_descr)
+ strcpy((char *)(uintptr_t)(const void *)oidp->oid_descr, descr);
+ }
+ /* Update the context, if used */
+ if (clist != NULL)
+ sysctl_ctx_entry_add(clist, oidp);
+ /* Register this oid */
+ sysctl_register_oid(oidp);
+ SYSCTL_XUNLOCK();
+ return (oidp);
+}
+
+/*
+ * Rename an existing oid.
+ */
+void
+sysctl_rename_oid(struct sysctl_oid *oidp, const char *name)
+{
+ ssize_t len;
+ char *newname;
+ void *oldname;
+
+ len = strlen(name);
+ newname = malloc(len + 1, M_SYSCTLOID, M_WAITOK);
+ bcopy(name, newname, len + 1);
+ newname[len] = '\0';
+ SYSCTL_XLOCK();
+ oldname = (void *)(uintptr_t)(const void *)oidp->oid_name;
+ oidp->oid_name = newname;
+ SYSCTL_XUNLOCK();
+ free(oldname, M_SYSCTLOID);
+}
+
+/*
+ * Reparent an existing oid.
+ */
+int
+sysctl_move_oid(struct sysctl_oid *oid, struct sysctl_oid_list *parent)
+{
+ struct sysctl_oid *oidp;
+
+ SYSCTL_XLOCK();
+ if (oid->oid_parent == parent) {
+ SYSCTL_XUNLOCK();
+ return (0);
+ }
+ oidp = sysctl_find_oidname(oid->oid_name, parent);
+ if (oidp != NULL) {
+ SYSCTL_XUNLOCK();
+ return (EEXIST);
+ }
+ sysctl_unregister_oid(oid);
+ oid->oid_parent = parent;
+ oid->oid_number = OID_AUTO;
+ sysctl_register_oid(oid);
+ SYSCTL_XUNLOCK();
+ return (0);
+}
+
+/*
+ * Register the kernel's oids on startup.
+ */
+SET_DECLARE(sysctl_set, struct sysctl_oid);
+
+static void
+sysctl_register_all(void *arg)
+{
+ struct sysctl_oid **oidp;
+
+ sx_init(&sysctlmemlock, "sysctl mem");
+ SYSCTL_INIT();
+ SYSCTL_XLOCK();
+ SET_FOREACH(oidp, sysctl_set)
+ sysctl_register_oid(*oidp);
+ SYSCTL_XUNLOCK();
+}
+SYSINIT(sysctl, SI_SUB_KMEM, SI_ORDER_ANY, sysctl_register_all, 0);
+
+/*
+ * "Staff-functions"
+ *
+ * These functions implement a presently undocumented interface
+ * used by the sysctl program to walk the tree, and get the type
+ * so it can print the value.
+ * This interface is under work and consideration, and should probably
+ * be killed with a big axe by the first person who can find the time.
+ * (be aware though, that the proper interface isn't as obvious as it
+ * may seem, there are various conflicting requirements.
+ *
+ * {0,0} printf the entire MIB-tree.
+ * {0,1,...} return the name of the "..." OID.
+ * {0,2,...} return the next OID.
+ * {0,3} return the OID of the name in "new"
+ * {0,4,...} return the kind & format info for the "..." OID.
+ * {0,5,...} return the description the "..." OID.
+ */
+
+#ifdef SYSCTL_DEBUG
+static void
+sysctl_sysctl_debug_dump_node(struct sysctl_oid_list *l, int i)
+{
+ int k;
+ struct sysctl_oid *oidp;
+
+ SYSCTL_ASSERT_LOCKED();
+ SLIST_FOREACH(oidp, l, oid_link) {
+
+ for (k=0; k<i; k++)
+ printf(" ");
+
+ printf("%d %s ", oidp->oid_number, oidp->oid_name);
+
+ printf("%c%c",
+ oidp->oid_kind & CTLFLAG_RD ? 'R':' ',
+ oidp->oid_kind & CTLFLAG_WR ? 'W':' ');
+
+ if (oidp->oid_handler)
+ printf(" *Handler");
+
+ switch (oidp->oid_kind & CTLTYPE) {
+ case CTLTYPE_NODE:
+ printf(" Node\n");
+ if (!oidp->oid_handler) {
+ sysctl_sysctl_debug_dump_node(
+ oidp->oid_arg1, i+2);
+ }
+ break;
+ case CTLTYPE_INT: printf(" Int\n"); break;
+ case CTLTYPE_STRING: printf(" String\n"); break;
+ case CTLTYPE_QUAD: printf(" Quad\n"); break;
+ case CTLTYPE_OPAQUE: printf(" Opaque/struct\n"); break;
+ default: printf("\n");
+ }
+
+ }
+}
+
+static int
+sysctl_sysctl_debug(SYSCTL_HANDLER_ARGS)
+{
+ int error;
+
+ error = priv_check(req->td, PRIV_SYSCTL_DEBUG);
+ if (error)
+ return (error);
+ sysctl_sysctl_debug_dump_node(&sysctl__children, 0);
+ return (ENOENT);
+}
+
+SYSCTL_PROC(_sysctl, 0, debug, CTLTYPE_STRING|CTLFLAG_RD,
+ 0, 0, sysctl_sysctl_debug, "-", "");
+#endif
+
+static int
+sysctl_sysctl_name(SYSCTL_HANDLER_ARGS)
+{
+ int *name = (int *) arg1;
+ u_int namelen = arg2;
+ int error = 0;
+ struct sysctl_oid *oid;
+ struct sysctl_oid_list *lsp = &sysctl__children, *lsp2;
+ char buf[10];
+
+ SYSCTL_ASSERT_LOCKED();
+ while (namelen) {
+ if (!lsp) {
+ snprintf(buf,sizeof(buf),"%d",*name);
+ if (req->oldidx)
+ error = SYSCTL_OUT(req, ".", 1);
+ if (!error)
+ error = SYSCTL_OUT(req, buf, strlen(buf));
+ if (error)
+ return (error);
+ namelen--;
+ name++;
+ continue;
+ }
+ lsp2 = 0;
+ SLIST_FOREACH(oid, lsp, oid_link) {
+ if (oid->oid_number != *name)
+ continue;
+
+ if (req->oldidx)
+ error = SYSCTL_OUT(req, ".", 1);
+ if (!error)
+ error = SYSCTL_OUT(req, oid->oid_name,
+ strlen(oid->oid_name));
+ if (error)
+ return (error);
+
+ namelen--;
+ name++;
+
+ if ((oid->oid_kind & CTLTYPE) != CTLTYPE_NODE)
+ break;
+
+ if (oid->oid_handler)
+ break;
+
+ lsp2 = (struct sysctl_oid_list *)oid->oid_arg1;
+ break;
+ }
+ lsp = lsp2;
+ }
+ return (SYSCTL_OUT(req, "", 1));
+}
+
+static SYSCTL_NODE(_sysctl, 1, name, CTLFLAG_RD, sysctl_sysctl_name, "");
+
+static int
+sysctl_sysctl_next_ls(struct sysctl_oid_list *lsp, int *name, u_int namelen,
+ int *next, int *len, int level, struct sysctl_oid **oidpp)
+{
+ struct sysctl_oid *oidp;
+
+ SYSCTL_ASSERT_LOCKED();
+ *len = level;
+ SLIST_FOREACH(oidp, lsp, oid_link) {
+ *next = oidp->oid_number;
+ *oidpp = oidp;
+
+ if (oidp->oid_kind & CTLFLAG_SKIP)
+ continue;
+
+ if (!namelen) {
+ if ((oidp->oid_kind & CTLTYPE) != CTLTYPE_NODE)
+ return (0);
+ if (oidp->oid_handler)
+ /* We really should call the handler here...*/
+ return (0);
+ lsp = (struct sysctl_oid_list *)oidp->oid_arg1;
+ if (!sysctl_sysctl_next_ls(lsp, 0, 0, next+1,
+ len, level+1, oidpp))
+ return (0);
+ goto emptynode;
+ }
+
+ if (oidp->oid_number < *name)
+ continue;
+
+ if (oidp->oid_number > *name) {
+ if ((oidp->oid_kind & CTLTYPE) != CTLTYPE_NODE)
+ return (0);
+ if (oidp->oid_handler)
+ return (0);
+ lsp = (struct sysctl_oid_list *)oidp->oid_arg1;
+ if (!sysctl_sysctl_next_ls(lsp, name+1, namelen-1,
+ next+1, len, level+1, oidpp))
+ return (0);
+ goto next;
+ }
+ if ((oidp->oid_kind & CTLTYPE) != CTLTYPE_NODE)
+ continue;
+
+ if (oidp->oid_handler)
+ continue;
+
+ lsp = (struct sysctl_oid_list *)oidp->oid_arg1;
+ if (!sysctl_sysctl_next_ls(lsp, name+1, namelen-1, next+1,
+ len, level+1, oidpp))
+ return (0);
+ next:
+ namelen = 1;
+ emptynode:
+ *len = level;
+ }
+ return (1);
+}
+
+static int
+sysctl_sysctl_next(SYSCTL_HANDLER_ARGS)
+{
+ int *name = (int *) arg1;
+ u_int namelen = arg2;
+ int i, j, error;
+ struct sysctl_oid *oid;
+ struct sysctl_oid_list *lsp = &sysctl__children;
+ int newoid[CTL_MAXNAME];
+
+ i = sysctl_sysctl_next_ls(lsp, name, namelen, newoid, &j, 1, &oid);
+ if (i)
+ return (ENOENT);
+ error = SYSCTL_OUT(req, newoid, j * sizeof (int));
+ return (error);
+}
+
+static SYSCTL_NODE(_sysctl, 2, next, CTLFLAG_RD, sysctl_sysctl_next, "");
+
+static int
+name2oid(char *name, int *oid, int *len, struct sysctl_oid **oidpp)
+{
+ int i;
+ struct sysctl_oid *oidp;
+ struct sysctl_oid_list *lsp = &sysctl__children;
+ char *p;
+
+ SYSCTL_ASSERT_LOCKED();
+
+ if (!*name)
+ return (ENOENT);
+
+ p = name + strlen(name) - 1 ;
+ if (*p == '.')
+ *p = '\0';
+
+ *len = 0;
+
+ for (p = name; *p && *p != '.'; p++)
+ ;
+ i = *p;
+ if (i == '.')
+ *p = '\0';
+
+ oidp = SLIST_FIRST(lsp);
+
+ while (oidp && *len < CTL_MAXNAME) {
+ if (strcmp(name, oidp->oid_name)) {
+ oidp = SLIST_NEXT(oidp, oid_link);
+ continue;
+ }
+ *oid++ = oidp->oid_number;
+ (*len)++;
+
+ if (!i) {
+ if (oidpp)
+ *oidpp = oidp;
+ return (0);
+ }
+
+ if ((oidp->oid_kind & CTLTYPE) != CTLTYPE_NODE)
+ break;
+
+ if (oidp->oid_handler)
+ break;
+
+ lsp = (struct sysctl_oid_list *)oidp->oid_arg1;
+ oidp = SLIST_FIRST(lsp);
+ name = p+1;
+ for (p = name; *p && *p != '.'; p++)
+ ;
+ i = *p;
+ if (i == '.')
+ *p = '\0';
+ }
+ return (ENOENT);
+}
+
+static int
+sysctl_sysctl_name2oid(SYSCTL_HANDLER_ARGS)
+{
+ char *p;
+ int error, oid[CTL_MAXNAME], len;
+ struct sysctl_oid *op = 0;
+
+ SYSCTL_ASSERT_LOCKED();
+
+ if (!req->newlen)
+ return (ENOENT);
+ if (req->newlen >= MAXPATHLEN) /* XXX arbitrary, undocumented */
+ return (ENAMETOOLONG);
+
+ p = malloc(req->newlen+1, M_SYSCTL, M_WAITOK);
+
+ error = SYSCTL_IN(req, p, req->newlen);
+ if (error) {
+ free(p, M_SYSCTL);
+ return (error);
+ }
+
+ p [req->newlen] = '\0';
+
+ error = name2oid(p, oid, &len, &op);
+
+ free(p, M_SYSCTL);
+
+ if (error)
+ return (error);
+
+ error = SYSCTL_OUT(req, oid, len * sizeof *oid);
+ return (error);
+}
+
+SYSCTL_PROC(_sysctl, 3, name2oid, CTLFLAG_RW|CTLFLAG_ANYBODY|CTLFLAG_MPSAFE,
+ 0, 0, sysctl_sysctl_name2oid, "I", "");
+
+static int
+sysctl_sysctl_oidfmt(SYSCTL_HANDLER_ARGS)
+{
+ struct sysctl_oid *oid;
+ int error;
+
+ error = sysctl_find_oid(arg1, arg2, &oid, NULL, req);
+ if (error)
+ return (error);
+
+ if (!oid->oid_fmt)
+ return (ENOENT);
+ error = SYSCTL_OUT(req, &oid->oid_kind, sizeof(oid->oid_kind));
+ if (error)
+ return (error);
+ error = SYSCTL_OUT(req, oid->oid_fmt, strlen(oid->oid_fmt) + 1);
+ return (error);
+}
+
+
+static SYSCTL_NODE(_sysctl, 4, oidfmt, CTLFLAG_RD|CTLFLAG_MPSAFE,
+ sysctl_sysctl_oidfmt, "");
+
+static int
+sysctl_sysctl_oiddescr(SYSCTL_HANDLER_ARGS)
+{
+ struct sysctl_oid *oid;
+ int error;
+
+ error = sysctl_find_oid(arg1, arg2, &oid, NULL, req);
+ if (error)
+ return (error);
+
+ if (!oid->oid_descr)
+ return (ENOENT);
+ error = SYSCTL_OUT(req, oid->oid_descr, strlen(oid->oid_descr) + 1);
+ return (error);
+}
+
+static SYSCTL_NODE(_sysctl, 5, oiddescr, CTLFLAG_RD, sysctl_sysctl_oiddescr, "");
+
+/*
+ * Default "handler" functions.
+ */
+
+/*
+ * Handle an int, signed or unsigned.
+ * Two cases:
+ * a variable: point arg1 at it.
+ * a constant: pass it in arg2.
+ */
+
+int
+sysctl_handle_int(SYSCTL_HANDLER_ARGS)
+{
+ int tmpout, error = 0;
+
+ /*
+ * Attempt to get a coherent snapshot by making a copy of the data.
+ */
+ if (arg1)
+ tmpout = *(int *)arg1;
+ else
+ tmpout = arg2;
+ error = SYSCTL_OUT(req, &tmpout, sizeof(int));
+
+ if (error || !req->newptr)
+ return (error);
+
+ if (!arg1)
+ error = EPERM;
+ else
+ error = SYSCTL_IN(req, arg1, sizeof(int));
+ return (error);
+}
+
+/*
+ * Based on on sysctl_handle_int() convert milliseconds into ticks.
+ * Note: this is used by TCP.
+ */
+
+int
+sysctl_msec_to_ticks(SYSCTL_HANDLER_ARGS)
+{
+ int error, s, tt;
+
+ tt = *(int *)arg1;
+ s = (int)((int64_t)tt * 1000 / hz);
+
+ error = sysctl_handle_int(oidp, &s, 0, req);
+ if (error || !req->newptr)
+ return (error);
+
+ tt = (int)((int64_t)s * hz / 1000);
+ if (tt < 1)
+ return (EINVAL);
+
+ *(int *)arg1 = tt;
+ return (0);
+}
+
+
+/*
+ * Handle a long, signed or unsigned. arg1 points to it.
+ */
+
+int
+sysctl_handle_long(SYSCTL_HANDLER_ARGS)
+{
+ int error = 0;
+ long tmplong;
+#ifdef SCTL_MASK32
+ int tmpint;
+#endif
+
+ /*
+ * Attempt to get a coherent snapshot by making a copy of the data.
+ */
+ if (!arg1)
+ return (EINVAL);
+ tmplong = *(long *)arg1;
+#ifdef SCTL_MASK32
+ if (req->flags & SCTL_MASK32) {
+ tmpint = tmplong;
+ error = SYSCTL_OUT(req, &tmpint, sizeof(int));
+ } else
+#endif
+ error = SYSCTL_OUT(req, &tmplong, sizeof(long));
+
+ if (error || !req->newptr)
+ return (error);
+
+#ifdef SCTL_MASK32
+ if (req->flags & SCTL_MASK32) {
+ error = SYSCTL_IN(req, &tmpint, sizeof(int));
+ *(long *)arg1 = (long)tmpint;
+ } else
+#endif
+ error = SYSCTL_IN(req, arg1, sizeof(long));
+ return (error);
+}
+
+/*
+ * Handle a 64 bit int, signed or unsigned. arg1 points to it.
+ */
+
+int
+sysctl_handle_quad(SYSCTL_HANDLER_ARGS)
+{
+ int error = 0;
+ uint64_t tmpout;
+
+ /*
+ * Attempt to get a coherent snapshot by making a copy of the data.
+ */
+ if (!arg1)
+ return (EINVAL);
+ tmpout = *(uint64_t *)arg1;
+ error = SYSCTL_OUT(req, &tmpout, sizeof(uint64_t));
+
+ if (error || !req->newptr)
+ return (error);
+
+ error = SYSCTL_IN(req, arg1, sizeof(uint64_t));
+ return (error);
+}
+
+/*
+ * Handle our generic '\0' terminated 'C' string.
+ * Two cases:
+ * a variable string: point arg1 at it, arg2 is max length.
+ * a constant string: point arg1 at it, arg2 is zero.
+ */
+
+int
+sysctl_handle_string(SYSCTL_HANDLER_ARGS)
+{
+ int error=0;
+ char *tmparg;
+ size_t outlen;
+
+ /*
+ * Attempt to get a coherent snapshot by copying to a
+ * temporary kernel buffer.
+ */
+retry:
+ outlen = strlen((char *)arg1)+1;
+ tmparg = malloc(outlen, M_SYSCTLTMP, M_WAITOK);
+
+ if (strlcpy(tmparg, (char *)arg1, outlen) >= outlen) {
+ free(tmparg, M_SYSCTLTMP);
+ goto retry;
+ }
+
+ error = SYSCTL_OUT(req, tmparg, outlen);
+ free(tmparg, M_SYSCTLTMP);
+
+ if (error || !req->newptr)
+ return (error);
+
+ if ((req->newlen - req->newidx) >= arg2) {
+ error = EINVAL;
+ } else {
+ arg2 = (req->newlen - req->newidx);
+ error = SYSCTL_IN(req, arg1, arg2);
+ ((char *)arg1)[arg2] = '\0';
+ }
+
+ return (error);
+}
+
+/*
+ * Handle any kind of opaque data.
+ * arg1 points to it, arg2 is the size.
+ */
+
+int
+sysctl_handle_opaque(SYSCTL_HANDLER_ARGS)
+{
+#ifndef __rtems__
+ int error, tries;
+ u_int generation;
+ struct sysctl_req req2;
+
+ /*
+ * Attempt to get a coherent snapshot, by using the thread
+ * pre-emption counter updated from within mi_switch() to
+ * determine if we were pre-empted during a bcopy() or
+ * copyout(). Make 3 attempts at doing this before giving up.
+ * If we encounter an error, stop immediately.
+ */
+ tries = 0;
+ req2 = *req;
+retry:
+ generation = curthread->td_generation;
+ error = SYSCTL_OUT(req, arg1, arg2);
+ if (error)
+ return (error);
+ tries++;
+ if (generation != curthread->td_generation && tries < 3) {
+ *req = req2;
+ goto retry;
+ }
+
+ error = SYSCTL_IN(req, arg1, arg2);
+
+ return (error);
+#else /* __rtems__ */
+ /* FIXME */
+ return (0);
+#endif /* __rtems__ */
+}
+
+/*
+ * Transfer functions to/from kernel space.
+ * XXX: rather untested at this point
+ */
+static int
+sysctl_old_kernel(struct sysctl_req *req, const void *p, size_t l)
+{
+ size_t i = 0;
+
+ if (req->oldptr) {
+ i = l;
+ if (req->oldlen <= req->oldidx)
+ i = 0;
+ else
+ if (i > req->oldlen - req->oldidx)
+ i = req->oldlen - req->oldidx;
+ if (i > 0)
+ bcopy(p, (char *)req->oldptr + req->oldidx, i);
+ }
+ req->oldidx += l;
+ if (req->oldptr && i != l)
+ return (ENOMEM);
+ return (0);
+}
+
+static int
+sysctl_new_kernel(struct sysctl_req *req, void *p, size_t l)
+{
+ if (!req->newptr)
+ return (0);
+ if (req->newlen - req->newidx < l)
+ return (EINVAL);
+ bcopy((char *)req->newptr + req->newidx, p, l);
+ req->newidx += l;
+ return (0);
+}
+
+int
+kernel_sysctl(struct thread *td, int *name, u_int namelen, void *old,
+#ifndef __rtems__
+ size_t *oldlenp, void *new, size_t newlen, size_t *retval, int flags)
+#else /* __rtems__ */
+ size_t *oldlenp, const void *new, size_t newlen, size_t *retval, int flags)
+#endif /* __rtems__ */
+{
+ int error = 0;
+ struct sysctl_req req;
+
+ bzero(&req, sizeof req);
+
+#ifndef __rtems__
+ req.td = td;
+#endif /* __rtems__ */
+ req.flags = flags;
+
+ if (oldlenp) {
+ req.oldlen = *oldlenp;
+ }
+ req.validlen = req.oldlen;
+
+ if (old) {
+ req.oldptr= old;
+ }
+
+ if (new != NULL) {
+ req.newlen = newlen;
+ req.newptr = new;
+ }
+
+ req.oldfunc = sysctl_old_kernel;
+ req.newfunc = sysctl_new_kernel;
+ req.lock = REQ_LOCKED;
+
+ SYSCTL_SLOCK();
+ error = sysctl_root(0, name, namelen, &req);
+ SYSCTL_SUNLOCK();
+
+#ifndef __rtems__
+ if (req.lock == REQ_WIRED && req.validlen > 0)
+ vsunlock(req.oldptr, req.validlen);
+#endif /* __rtems__ */
+
+ if (error && error != ENOMEM)
+ return (error);
+
+ if (retval) {
+ if (req.oldptr && req.oldidx > req.validlen)
+ *retval = req.validlen;
+ else
+ *retval = req.oldidx;
+ }
+ return (error);
+}
+
+#ifndef __rtems__
+int
+kernel_sysctlbyname(struct thread *td, char *name, void *old, size_t *oldlenp,
+ void *new, size_t newlen, size_t *retval, int flags)
+{
+ int oid[CTL_MAXNAME];
+ size_t oidlen, plen;
+ int error;
+
+ oid[0] = 0; /* sysctl internal magic */
+ oid[1] = 3; /* name2oid */
+ oidlen = sizeof(oid);
+
+ error = kernel_sysctl(td, oid, 2, oid, &oidlen,
+ (void *)name, strlen(name), &plen, flags);
+ if (error)
+ return (error);
+
+ error = kernel_sysctl(td, oid, plen / sizeof(int), old, oldlenp,
+ new, newlen, retval, flags);
+ return (error);
+}
+
+/*
+ * Transfer function to/from user space.
+ */
+static int
+sysctl_old_user(struct sysctl_req *req, const void *p, size_t l)
+{
+ int error = 0;
+ size_t i, len, origidx;
+
+ origidx = req->oldidx;
+ req->oldidx += l;
+ if (req->oldptr == NULL)
+ return (0);
+ /*
+ * If we have not wired the user supplied buffer and we are currently
+ * holding locks, drop a witness warning, as it's possible that
+ * write operations to the user page can sleep.
+ */
+ if (req->lock != REQ_WIRED)
+ WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL,
+ "sysctl_old_user()");
+ i = l;
+ len = req->validlen;
+ if (len <= origidx)
+ i = 0;
+ else {
+ if (i > len - origidx)
+ i = len - origidx;
+ error = copyout(p, (char *)req->oldptr + origidx, i);
+ }
+ if (error)
+ return (error);
+ if (i < l)
+ return (ENOMEM);
+ return (0);
+}
+
+static int
+sysctl_new_user(struct sysctl_req *req, void *p, size_t l)
+{
+ int error;
+
+ if (!req->newptr)
+ return (0);
+ if (req->newlen - req->newidx < l)
+ return (EINVAL);
+ WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL,
+ "sysctl_new_user()");
+ error = copyin((char *)req->newptr + req->newidx, p, l);
+ req->newidx += l;
+ return (error);
+}
+
+/*
+ * Wire the user space destination buffer. If set to a value greater than
+ * zero, the len parameter limits the maximum amount of wired memory.
+ */
+int
+sysctl_wire_old_buffer(struct sysctl_req *req, size_t len)
+{
+ int ret;
+ size_t wiredlen;
+
+ wiredlen = (len > 0 && len < req->oldlen) ? len : req->oldlen;
+ ret = 0;
+ if (req->lock == REQ_LOCKED && req->oldptr &&
+ req->oldfunc == sysctl_old_user) {
+ if (wiredlen != 0) {
+ ret = vslock(req->oldptr, wiredlen);
+ if (ret != 0) {
+ if (ret != ENOMEM)
+ return (ret);
+ wiredlen = 0;
+ }
+ }
+ req->lock = REQ_WIRED;
+ req->validlen = wiredlen;
+ }
+ return (0);
+}
+#endif /* __rtems__ */
+
+int
+sysctl_find_oid(int *name, u_int namelen, struct sysctl_oid **noid,
+ int *nindx, struct sysctl_req *req)
+{
+ struct sysctl_oid *oid;
+ int indx;
+
+ SYSCTL_ASSERT_LOCKED();
+ oid = SLIST_FIRST(&sysctl__children);
+ indx = 0;
+ while (oid && indx < CTL_MAXNAME) {
+ if (oid->oid_number == name[indx]) {
+ indx++;
+ if (oid->oid_kind & CTLFLAG_NOLOCK)
+ req->lock = REQ_UNLOCKED;
+ if ((oid->oid_kind & CTLTYPE) == CTLTYPE_NODE) {
+ if (oid->oid_handler != NULL ||
+ indx == namelen) {
+ *noid = oid;
+ if (nindx != NULL)
+ *nindx = indx;
+ return (0);
+ }
+ oid = SLIST_FIRST(
+ (struct sysctl_oid_list *)oid->oid_arg1);
+ } else if (indx == namelen) {
+ *noid = oid;
+ if (nindx != NULL)
+ *nindx = indx;
+ return (0);
+ } else {
+ return (ENOTDIR);
+ }
+ } else {
+ oid = SLIST_NEXT(oid, oid_link);
+ }
+ }
+ return (ENOENT);
+}
+
+/*
+ * Traverse our tree, and find the right node, execute whatever it points
+ * to, and return the resulting error code.
+ */
+
+static int
+sysctl_root(SYSCTL_HANDLER_ARGS)
+{
+ struct sysctl_oid *oid;
+ int error, indx, lvl;
+
+ SYSCTL_ASSERT_LOCKED();
+
+ error = sysctl_find_oid(arg1, arg2, &oid, &indx, req);
+ if (error)
+ return (error);
+
+ if ((oid->oid_kind & CTLTYPE) == CTLTYPE_NODE) {
+ /*
+ * You can't call a sysctl when it's a node, but has
+ * no handler. Inform the user that it's a node.
+ * The indx may or may not be the same as namelen.
+ */
+ if (oid->oid_handler == NULL)
+ return (EISDIR);
+ }
+
+ /* Is this sysctl writable? */
+ if (req->newptr && !(oid->oid_kind & CTLFLAG_WR))
+ return (EPERM);
+
+#ifndef __rtems__
+ KASSERT(req->td != NULL, ("sysctl_root(): req->td == NULL"));
+
+ /* Is this sysctl sensitive to securelevels? */
+ if (req->newptr && (oid->oid_kind & CTLFLAG_SECURE)) {
+ lvl = (oid->oid_kind & CTLMASK_SECURE) >> CTLSHIFT_SECURE;
+ error = securelevel_gt(req->td->td_ucred, lvl);
+ if (error)
+ return (error);
+ }
+
+ /* Is this sysctl writable by only privileged users? */
+ if (req->newptr && !(oid->oid_kind & CTLFLAG_ANYBODY)) {
+ int priv;
+
+ if (oid->oid_kind & CTLFLAG_PRISON)
+ priv = PRIV_SYSCTL_WRITEJAIL;
+#ifdef VIMAGE
+ else if ((oid->oid_kind & CTLFLAG_VNET) &&
+ prison_owns_vnet(req->td->td_ucred))
+ priv = PRIV_SYSCTL_WRITEJAIL;
+#endif
+ else
+ priv = PRIV_SYSCTL_WRITE;
+ error = priv_check(req->td, priv);
+ if (error)
+ return (error);
+ }
+#endif /* __rtems__ */
+
+ if (!oid->oid_handler)
+ return (EINVAL);
+
+ if ((oid->oid_kind & CTLTYPE) == CTLTYPE_NODE) {
+ arg1 = (int *)arg1 + indx;
+ arg2 -= indx;
+ } else {
+ arg1 = oid->oid_arg1;
+ arg2 = oid->oid_arg2;
+ }
+#ifdef MAC
+ error = mac_system_check_sysctl(req->td->td_ucred, oid, arg1, arg2,
+ req);
+ if (error != 0)
+ return (error);
+#endif
+ if (!(oid->oid_kind & CTLFLAG_MPSAFE))
+ mtx_lock(&Giant);
+ error = oid->oid_handler(oid, arg1, arg2, req);
+ if (!(oid->oid_kind & CTLFLAG_MPSAFE))
+ mtx_unlock(&Giant);
+
+ return (error);
+}
+
+#ifndef __rtems__
+#ifndef _SYS_SYSPROTO_HH_
+struct sysctl_args {
+ int *name;
+ u_int namelen;
+ void *old;
+ size_t *oldlenp;
+ void *new;
+ size_t newlen;
+};
+#endif
+int
+__sysctl(struct thread *td, struct sysctl_args *uap)
+{
+ int error, i, name[CTL_MAXNAME];
+ size_t j;
+
+ if (uap->namelen > CTL_MAXNAME || uap->namelen < 2)
+ return (EINVAL);
+
+ error = copyin(uap->name, &name, uap->namelen * sizeof(int));
+ if (error)
+ return (error);
+
+ error = userland_sysctl(td, name, uap->namelen,
+ uap->old, uap->oldlenp, 0,
+ uap->new, uap->newlen, &j, 0);
+ if (error && error != ENOMEM)
+ return (error);
+ if (uap->oldlenp) {
+ i = copyout(&j, uap->oldlenp, sizeof(j));
+ if (i)
+ return (i);
+ }
+ return (error);
+}
+
+/*
+ * This is used from various compatibility syscalls too. That's why name
+ * must be in kernel space.
+ */
+int
+userland_sysctl(struct thread *td, int *name, u_int namelen, void *old,
+ size_t *oldlenp, int inkernel, void *new, size_t newlen, size_t *retval,
+ int flags)
+{
+ int error = 0, memlocked;
+ struct sysctl_req req;
+
+ bzero(&req, sizeof req);
+
+ req.td = td;
+ req.flags = flags;
+
+ if (oldlenp) {
+ if (inkernel) {
+ req.oldlen = *oldlenp;
+ } else {
+ error = copyin(oldlenp, &req.oldlen, sizeof(*oldlenp));
+ if (error)
+ return (error);
+ }
+ }
+ req.validlen = req.oldlen;
+
+ if (old) {
+ if (!useracc(old, req.oldlen, VM_PROT_WRITE))
+ return (EFAULT);
+ req.oldptr= old;
+ }
+
+ if (new != NULL) {
+ if (!useracc(new, newlen, VM_PROT_READ))
+ return (EFAULT);
+ req.newlen = newlen;
+ req.newptr = new;
+ }
+
+ req.oldfunc = sysctl_old_user;
+ req.newfunc = sysctl_new_user;
+ req.lock = REQ_LOCKED;
+
+#ifdef KTRACE
+ if (KTRPOINT(curthread, KTR_SYSCTL))
+ ktrsysctl(name, namelen);
+#endif
+
+ if (req.oldlen > PAGE_SIZE) {
+ memlocked = 1;
+ sx_xlock(&sysctlmemlock);
+ } else
+ memlocked = 0;
+ CURVNET_SET(TD_TO_VNET(td));
+
+ for (;;) {
+ req.oldidx = 0;
+ req.newidx = 0;
+ SYSCTL_SLOCK();
+ error = sysctl_root(0, name, namelen, &req);
+ SYSCTL_SUNLOCK();
+ if (error != EAGAIN)
+ break;
+ uio_yield();
+ }
+
+ CURVNET_RESTORE();
+
+ if (req.lock == REQ_WIRED && req.validlen > 0)
+ vsunlock(req.oldptr, req.validlen);
+ if (memlocked)
+ sx_xunlock(&sysctlmemlock);
+
+ if (error && error != ENOMEM)
+ return (error);
+
+ if (retval) {
+ if (req.oldptr && req.oldidx > req.validlen)
+ *retval = req.validlen;
+ else
+ *retval = req.oldidx;
+ }
+ return (error);
+}
+#endif /* __rtems__ */
diff --git a/rtems/freebsd/kern/subr_bus.c b/rtems/freebsd/kern/subr_bus.c
new file mode 100644
index 00000000..f375fa90
--- /dev/null
+++ b/rtems/freebsd/kern/subr_bus.c
@@ -0,0 +1,4523 @@
+#include <rtems/freebsd/machine/rtems-bsd-config.h>
+
+/*-
+ * Copyright (c) 1997,1998,2003 Doug Rabson
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <rtems/freebsd/sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <rtems/freebsd/local/opt_bus.h>
+
+#include <rtems/freebsd/sys/param.h>
+#include <rtems/freebsd/sys/conf.h>
+#include <rtems/freebsd/sys/filio.h>
+#include <rtems/freebsd/sys/lock.h>
+#include <rtems/freebsd/sys/kernel.h>
+#include <rtems/freebsd/sys/kobj.h>
+#include <rtems/freebsd/sys/limits.h>
+#include <rtems/freebsd/sys/malloc.h>
+#include <rtems/freebsd/sys/module.h>
+#include <rtems/freebsd/sys/mutex.h>
+#include <rtems/freebsd/sys/poll.h>
+#include <rtems/freebsd/sys/proc.h>
+#include <rtems/freebsd/sys/condvar.h>
+#include <rtems/freebsd/sys/queue.h>
+#include <rtems/freebsd/machine/bus.h>
+#include <rtems/freebsd/sys/rman.h>
+#include <rtems/freebsd/sys/selinfo.h>
+#include <rtems/freebsd/sys/signalvar.h>
+#include <rtems/freebsd/sys/sysctl.h>
+#include <rtems/freebsd/sys/systm.h>
+#include <rtems/freebsd/sys/uio.h>
+#include <rtems/freebsd/sys/bus.h>
+#include <rtems/freebsd/sys/interrupt.h>
+
+#include <rtems/freebsd/machine/stdarg.h>
+
+#include <rtems/freebsd/vm/uma.h>
+
+SYSCTL_NODE(_hw, OID_AUTO, bus, CTLFLAG_RW, NULL, NULL);
+SYSCTL_NODE(, OID_AUTO, dev, CTLFLAG_RW, NULL, NULL);
+
+/*
+ * Used to attach drivers to devclasses.
+ */
+typedef struct driverlink *driverlink_t;
+struct driverlink {
+ kobj_class_t driver;
+ TAILQ_ENTRY(driverlink) link; /* list of drivers in devclass */
+ int pass;
+ TAILQ_ENTRY(driverlink) passlink;
+};
+
+/*
+ * Forward declarations
+ */
+typedef TAILQ_HEAD(devclass_list, devclass) devclass_list_t;
+typedef TAILQ_HEAD(driver_list, driverlink) driver_list_t;
+typedef TAILQ_HEAD(device_list, device) device_list_t;
+
+struct devclass {
+ TAILQ_ENTRY(devclass) link;
+ devclass_t parent; /* parent in devclass hierarchy */
+ driver_list_t drivers; /* bus devclasses store drivers for bus */
+ char *name;
+ device_t *devices; /* array of devices indexed by unit */
+ int maxunit; /* size of devices array */
+ int flags;
+#define DC_HAS_CHILDREN 1
+
+ struct sysctl_ctx_list sysctl_ctx;
+ struct sysctl_oid *sysctl_tree;
+};
+
+/**
+ * @brief Implementation of device.
+ */
+struct device {
+ /*
+ * A device is a kernel object. The first field must be the
+ * current ops table for the object.
+ */
+ KOBJ_FIELDS;
+
+ /*
+ * Device hierarchy.
+ */
+ TAILQ_ENTRY(device) link; /**< list of devices in parent */
+ TAILQ_ENTRY(device) devlink; /**< global device list membership */
+ device_t parent; /**< parent of this device */
+ device_list_t children; /**< list of child devices */
+
+ /*
+ * Details of this device.
+ */
+ driver_t *driver; /**< current driver */
+ devclass_t devclass; /**< current device class */
+ int unit; /**< current unit number */
+ char* nameunit; /**< name+unit e.g. foodev0 */
+ char* desc; /**< driver specific description */
+ int busy; /**< count of calls to device_busy() */
+ device_state_t state; /**< current device state */
+ u_int32_t devflags; /**< api level flags for device_get_flags() */
+ u_int flags; /**< internal device flags */
+#define DF_ENABLED 0x01 /* device should be probed/attached */
+#define DF_FIXEDCLASS 0x02 /* devclass specified at create time */
+#define DF_WILDCARD 0x04 /* unit was originally wildcard */
+#define DF_DESCMALLOCED 0x08 /* description was malloced */
+#define DF_QUIET 0x10 /* don't print verbose attach message */
+#define DF_DONENOMATCH 0x20 /* don't execute DEVICE_NOMATCH again */
+#define DF_EXTERNALSOFTC 0x40 /* softc not allocated by us */
+#define DF_REBID 0x80 /* Can rebid after attach */
+ u_int order; /**< order from device_add_child_ordered() */
+ void *ivars; /**< instance variables */
+ void *softc; /**< current driver's variables */
+
+ struct sysctl_ctx_list sysctl_ctx; /**< state for sysctl variables */
+ struct sysctl_oid *sysctl_tree; /**< state for sysctl variables */
+};
+
+static MALLOC_DEFINE(M_BUS, "bus", "Bus data structures");
+static MALLOC_DEFINE(M_BUS_SC, "bus-sc", "Bus data structures, softc");
+
+#ifdef BUS_DEBUG
+
+static int bus_debug = 1;
+TUNABLE_INT("bus.debug", &bus_debug);
+SYSCTL_INT(_debug, OID_AUTO, bus_debug, CTLFLAG_RW, &bus_debug, 0,
+ "Debug bus code");
+
+#define PDEBUG(a) if (bus_debug) {printf("%s:%d: ", __func__, __LINE__), printf a; printf("\n");}
+#define DEVICENAME(d) ((d)? device_get_name(d): "no device")
+#define DRIVERNAME(d) ((d)? d->name : "no driver")
+#define DEVCLANAME(d) ((d)? d->name : "no devclass")
+
+/**
+ * Produce the indenting, indent*2 spaces plus a '.' ahead of that to
+ * prevent syslog from deleting initial spaces
+ */
+#define indentprintf(p) do { int iJ; printf("."); for (iJ=0; iJ<indent; iJ++) printf(" "); printf p ; } while (0)
+
+static void print_device_short(device_t dev, int indent);
+static void print_device(device_t dev, int indent);
+void print_device_tree_short(device_t dev, int indent);
+void print_device_tree(device_t dev, int indent);
+static void print_driver_short(driver_t *driver, int indent);
+static void print_driver(driver_t *driver, int indent);
+static void print_driver_list(driver_list_t drivers, int indent);
+static void print_devclass_short(devclass_t dc, int indent);
+static void print_devclass(devclass_t dc, int indent);
+void print_devclass_list_short(void);
+void print_devclass_list(void);
+
+#else
+/* Make the compiler ignore the function calls */
+#define PDEBUG(a) /* nop */
+#define DEVICENAME(d) /* nop */
+#define DRIVERNAME(d) /* nop */
+#define DEVCLANAME(d) /* nop */
+
+#define print_device_short(d,i) /* nop */
+#define print_device(d,i) /* nop */
+#define print_device_tree_short(d,i) /* nop */
+#define print_device_tree(d,i) /* nop */
+#define print_driver_short(d,i) /* nop */
+#define print_driver(d,i) /* nop */
+#define print_driver_list(d,i) /* nop */
+#define print_devclass_short(d,i) /* nop */
+#define print_devclass(d,i) /* nop */
+#define print_devclass_list_short() /* nop */
+#define print_devclass_list() /* nop */
+#endif
+
+/*
+ * dev sysctl tree
+ */
+
+enum {
+ DEVCLASS_SYSCTL_PARENT,
+};
+
+static int
+devclass_sysctl_handler(SYSCTL_HANDLER_ARGS)
+{
+ devclass_t dc = (devclass_t)arg1;
+ const char *value;
+
+ switch (arg2) {
+ case DEVCLASS_SYSCTL_PARENT:
+ value = dc->parent ? dc->parent->name : "";
+ break;
+ default:
+ return (EINVAL);
+ }
+ return (SYSCTL_OUT(req, value, strlen(value)));
+}
+
+static void
+devclass_sysctl_init(devclass_t dc)
+{
+
+ if (dc->sysctl_tree != NULL)
+ return;
+ sysctl_ctx_init(&dc->sysctl_ctx);
+ dc->sysctl_tree = SYSCTL_ADD_NODE(&dc->sysctl_ctx,
+ SYSCTL_STATIC_CHILDREN(_dev), OID_AUTO, dc->name,
+ CTLFLAG_RD, NULL, "");
+ SYSCTL_ADD_PROC(&dc->sysctl_ctx, SYSCTL_CHILDREN(dc->sysctl_tree),
+ OID_AUTO, "%parent", CTLFLAG_RD,
+ dc, DEVCLASS_SYSCTL_PARENT, devclass_sysctl_handler, "A",
+ "parent class");
+}
+
+enum {
+ DEVICE_SYSCTL_DESC,
+ DEVICE_SYSCTL_DRIVER,
+ DEVICE_SYSCTL_LOCATION,
+ DEVICE_SYSCTL_PNPINFO,
+ DEVICE_SYSCTL_PARENT,
+};
+
+static int
+device_sysctl_handler(SYSCTL_HANDLER_ARGS)
+{
+ device_t dev = (device_t)arg1;
+ const char *value;
+ char *buf;
+ int error;
+
+ buf = NULL;
+ switch (arg2) {
+ case DEVICE_SYSCTL_DESC:
+ value = dev->desc ? dev->desc : "";
+ break;
+ case DEVICE_SYSCTL_DRIVER:
+ value = dev->driver ? dev->driver->name : "";
+ break;
+ case DEVICE_SYSCTL_LOCATION:
+ value = buf = malloc(1024, M_BUS, M_WAITOK | M_ZERO);
+ bus_child_location_str(dev, buf, 1024);
+ break;
+ case DEVICE_SYSCTL_PNPINFO:
+ value = buf = malloc(1024, M_BUS, M_WAITOK | M_ZERO);
+ bus_child_pnpinfo_str(dev, buf, 1024);
+ break;
+ case DEVICE_SYSCTL_PARENT:
+ value = dev->parent ? dev->parent->nameunit : "";
+ break;
+ default:
+ return (EINVAL);
+ }
+ error = SYSCTL_OUT(req, value, strlen(value));
+ if (buf != NULL)
+ free(buf, M_BUS);
+ return (error);
+}
+
+static void
+device_sysctl_init(device_t dev)
+{
+ devclass_t dc = dev->devclass;
+
+ if (dev->sysctl_tree != NULL)
+ return;
+ devclass_sysctl_init(dc);
+ sysctl_ctx_init(&dev->sysctl_ctx);
+ dev->sysctl_tree = SYSCTL_ADD_NODE(&dev->sysctl_ctx,
+ SYSCTL_CHILDREN(dc->sysctl_tree), OID_AUTO,
+ dev->nameunit + strlen(dc->name),
+ CTLFLAG_RD, NULL, "");
+ SYSCTL_ADD_PROC(&dev->sysctl_ctx, SYSCTL_CHILDREN(dev->sysctl_tree),
+ OID_AUTO, "%desc", CTLFLAG_RD,
+ dev, DEVICE_SYSCTL_DESC, device_sysctl_handler, "A",
+ "device description");
+ SYSCTL_ADD_PROC(&dev->sysctl_ctx, SYSCTL_CHILDREN(dev->sysctl_tree),
+ OID_AUTO, "%driver", CTLFLAG_RD,
+ dev, DEVICE_SYSCTL_DRIVER, device_sysctl_handler, "A",
+ "device driver name");
+ SYSCTL_ADD_PROC(&dev->sysctl_ctx, SYSCTL_CHILDREN(dev->sysctl_tree),
+ OID_AUTO, "%location", CTLFLAG_RD,
+ dev, DEVICE_SYSCTL_LOCATION, device_sysctl_handler, "A",
+ "device location relative to parent");
+ SYSCTL_ADD_PROC(&dev->sysctl_ctx, SYSCTL_CHILDREN(dev->sysctl_tree),
+ OID_AUTO, "%pnpinfo", CTLFLAG_RD,
+ dev, DEVICE_SYSCTL_PNPINFO, device_sysctl_handler, "A",
+ "device identification");
+ SYSCTL_ADD_PROC(&dev->sysctl_ctx, SYSCTL_CHILDREN(dev->sysctl_tree),
+ OID_AUTO, "%parent", CTLFLAG_RD,
+ dev, DEVICE_SYSCTL_PARENT, device_sysctl_handler, "A",
+ "parent device");
+}
+
+static void
+device_sysctl_update(device_t dev)
+{
+ devclass_t dc = dev->devclass;
+
+ if (dev->sysctl_tree == NULL)
+ return;
+ sysctl_rename_oid(dev->sysctl_tree, dev->nameunit + strlen(dc->name));
+}
+
+static void
+device_sysctl_fini(device_t dev)
+{
+ if (dev->sysctl_tree == NULL)
+ return;
+ sysctl_ctx_free(&dev->sysctl_ctx);
+ dev->sysctl_tree = NULL;
+}
+
+/*
+ * /dev/devctl implementation
+ */
+
+/*
+ * This design allows only one reader for /dev/devctl. This is not desirable
+ * in the long run, but will get a lot of hair out of this implementation.
+ * Maybe we should make this device a clonable device.
+ *
+ * Also note: we specifically do not attach a device to the device_t tree
+ * to avoid potential chicken and egg problems. One could argue that all
+ * of this belongs to the root node. One could also further argue that the
+ * sysctl interface that we have not might more properly be an ioctl
+ * interface, but at this stage of the game, I'm not inclined to rock that
+ * boat.
+ *
+ * I'm also not sure that the SIGIO support is done correctly or not, as
+ * I copied it from a driver that had SIGIO support that likely hasn't been
+ * tested since 3.4 or 2.2.8!
+ */
+
+/* Deprecated way to adjust queue length */
+static int sysctl_devctl_disable(SYSCTL_HANDLER_ARGS);
+/* XXX Need to support old-style tunable hw.bus.devctl_disable" */
+SYSCTL_PROC(_hw_bus, OID_AUTO, devctl_disable, CTLTYPE_INT | CTLFLAG_RW, NULL,
+ 0, sysctl_devctl_disable, "I", "devctl disable -- deprecated");
+
+#define DEVCTL_DEFAULT_QUEUE_LEN 1000
+static int sysctl_devctl_queue(SYSCTL_HANDLER_ARGS);
+static int devctl_queue_length = DEVCTL_DEFAULT_QUEUE_LEN;
+TUNABLE_INT("hw.bus.devctl_queue", &devctl_queue_length);
+SYSCTL_PROC(_hw_bus, OID_AUTO, devctl_queue, CTLTYPE_INT | CTLFLAG_RW, NULL,
+ 0, sysctl_devctl_queue, "I", "devctl queue length");
+
+static d_open_t devopen;
+static d_close_t devclose;
+static d_read_t devread;
+static d_ioctl_t devioctl;
+static d_poll_t devpoll;
+
+static struct cdevsw dev_cdevsw = {
+ .d_version = D_VERSION,
+ .d_flags = D_NEEDGIANT,
+ .d_open = devopen,
+ .d_close = devclose,
+ .d_read = devread,
+ .d_ioctl = devioctl,
+ .d_poll = devpoll,
+ .d_name = "devctl",
+};
+
+struct dev_event_info
+{
+ char *dei_data;
+ TAILQ_ENTRY(dev_event_info) dei_link;
+};
+
+TAILQ_HEAD(devq, dev_event_info);
+
+static struct dev_softc
+{
+ int inuse;
+ int nonblock;
+ int queued;
+ struct mtx mtx;
+ struct cv cv;
+ struct selinfo sel;
+ struct devq devq;
+ struct proc *async_proc;
+} devsoftc;
+
+static struct cdev *devctl_dev;
+
+static void
+devinit(void)
+{
+ devctl_dev = make_dev(&dev_cdevsw, 0, UID_ROOT, GID_WHEEL, 0600,
+ "devctl");
+ mtx_init(&devsoftc.mtx, "dev mtx", "devd", MTX_DEF);
+ cv_init(&devsoftc.cv, "dev cv");
+ TAILQ_INIT(&devsoftc.devq);
+}
+
+static int
+devopen(struct cdev *dev, int oflags, int devtype, struct thread *td)
+{
+ if (devsoftc.inuse)
+ return (EBUSY);
+ /* move to init */
+ devsoftc.inuse = 1;
+ devsoftc.nonblock = 0;
+ devsoftc.async_proc = NULL;
+ return (0);
+}
+
+static int
+devclose(struct cdev *dev, int fflag, int devtype, struct thread *td)
+{
+ devsoftc.inuse = 0;
+ mtx_lock(&devsoftc.mtx);
+ cv_broadcast(&devsoftc.cv);
+ mtx_unlock(&devsoftc.mtx);
+ devsoftc.async_proc = NULL;
+ return (0);
+}
+
+/*
+ * The read channel for this device is used to report changes to
+ * userland in realtime. We are required to free the data as well as
+ * the n1 object because we allocate them separately. Also note that
+ * we return one record at a time. If you try to read this device a
+ * character at a time, you will lose the rest of the data. Listening
+ * programs are expected to cope.
+ */
+static int
+devread(struct cdev *dev, struct uio *uio, int ioflag)
+{
+ struct dev_event_info *n1;
+ int rv;
+
+ mtx_lock(&devsoftc.mtx);
+ while (TAILQ_EMPTY(&devsoftc.devq)) {
+ if (devsoftc.nonblock) {
+ mtx_unlock(&devsoftc.mtx);
+ return (EAGAIN);
+ }
+ rv = cv_wait_sig(&devsoftc.cv, &devsoftc.mtx);
+ if (rv) {
+ /*
+ * Need to translate ERESTART to EINTR here? -- jake
+ */
+ mtx_unlock(&devsoftc.mtx);
+ return (rv);
+ }
+ }
+ n1 = TAILQ_FIRST(&devsoftc.devq);
+ TAILQ_REMOVE(&devsoftc.devq, n1, dei_link);
+ devsoftc.queued--;
+ mtx_unlock(&devsoftc.mtx);
+ rv = uiomove(n1->dei_data, strlen(n1->dei_data), uio);
+ free(n1->dei_data, M_BUS);
+ free(n1, M_BUS);
+ return (rv);
+}
+
+static int
+devioctl(struct cdev *dev, u_long cmd, caddr_t data, int fflag, struct thread *td)
+{
+ switch (cmd) {
+
+ case FIONBIO:
+ if (*(int*)data)
+ devsoftc.nonblock = 1;
+ else
+ devsoftc.nonblock = 0;
+ return (0);
+ case FIOASYNC:
+ if (*(int*)data)
+ devsoftc.async_proc = td->td_proc;
+ else
+ devsoftc.async_proc = NULL;
+ return (0);
+
+ /* (un)Support for other fcntl() calls. */
+ case FIOCLEX:
+ case FIONCLEX:
+ case FIONREAD:
+ case FIOSETOWN:
+ case FIOGETOWN:
+ default:
+ break;
+ }
+ return (ENOTTY);
+}
+
+static int
+devpoll(struct cdev *dev, int events, struct thread *td)
+{
+ int revents = 0;
+
+ mtx_lock(&devsoftc.mtx);
+ if (events & (POLLIN | POLLRDNORM)) {
+ if (!TAILQ_EMPTY(&devsoftc.devq))
+ revents = events & (POLLIN | POLLRDNORM);
+ else
+ selrecord(td, &devsoftc.sel);
+ }
+ mtx_unlock(&devsoftc.mtx);
+
+ return (revents);
+}
+
+/**
+ * @brief Return whether the userland process is running
+ */
+boolean_t
+devctl_process_running(void)
+{
+ return (devsoftc.inuse == 1);
+}
+
+/**
+ * @brief Queue data to be read from the devctl device
+ *
+ * Generic interface to queue data to the devctl device. It is
+ * assumed that @p data is properly formatted. It is further assumed
+ * that @p data is allocated using the M_BUS malloc type.
+ */
+void
+devctl_queue_data_f(char *data, int flags)
+{
+ struct dev_event_info *n1 = NULL, *n2 = NULL;
+ struct proc *p;
+
+ if (strlen(data) == 0)
+ goto out;
+ if (devctl_queue_length == 0)
+ goto out;
+ n1 = malloc(sizeof(*n1), M_BUS, flags);
+ if (n1 == NULL)
+ goto out;
+ n1->dei_data = data;
+ mtx_lock(&devsoftc.mtx);
+ if (devctl_queue_length == 0) {
+ mtx_unlock(&devsoftc.mtx);
+ free(n1->dei_data, M_BUS);
+ free(n1, M_BUS);
+ return;
+ }
+ /* Leave at least one spot in the queue... */
+ while (devsoftc.queued > devctl_queue_length - 1) {
+ n2 = TAILQ_FIRST(&devsoftc.devq);
+ TAILQ_REMOVE(&devsoftc.devq, n2, dei_link);
+ free(n2->dei_data, M_BUS);
+ free(n2, M_BUS);
+ devsoftc.queued--;
+ }
+ TAILQ_INSERT_TAIL(&devsoftc.devq, n1, dei_link);
+ devsoftc.queued++;
+ cv_broadcast(&devsoftc.cv);
+ mtx_unlock(&devsoftc.mtx);
+ selwakeup(&devsoftc.sel);
+ p = devsoftc.async_proc;
+ if (p != NULL) {
+ PROC_LOCK(p);
+ psignal(p, SIGIO);
+ PROC_UNLOCK(p);
+ }
+ return;
+out:
+ /*
+ * We have to free data on all error paths since the caller
+ * assumes it will be free'd when this item is dequeued.
+ */
+ free(data, M_BUS);
+ return;
+}
+
+void
+devctl_queue_data(char *data)
+{
+
+ devctl_queue_data_f(data, M_NOWAIT);
+}
+
+/**
+ * @brief Send a 'notification' to userland, using standard ways
+ */
+void
+devctl_notify_f(const char *system, const char *subsystem, const char *type,
+ const char *data, int flags)
+{
+ int len = 0;
+ char *msg;
+
+ if (system == NULL)
+ return; /* BOGUS! Must specify system. */
+ if (subsystem == NULL)
+ return; /* BOGUS! Must specify subsystem. */
+ if (type == NULL)
+ return; /* BOGUS! Must specify type. */
+ len += strlen(" system=") + strlen(system);
+ len += strlen(" subsystem=") + strlen(subsystem);
+ len += strlen(" type=") + strlen(type);
+ /* add in the data message plus newline. */
+ if (data != NULL)
+ len += strlen(data);
+ len += 3; /* '!', '\n', and NUL */
+ msg = malloc(len, M_BUS, flags);
+ if (msg == NULL)
+ return; /* Drop it on the floor */
+ if (data != NULL)
+ snprintf(msg, len, "!system=%s subsystem=%s type=%s %s\n",
+ system, subsystem, type, data);
+ else
+ snprintf(msg, len, "!system=%s subsystem=%s type=%s\n",
+ system, subsystem, type);
+ devctl_queue_data_f(msg, flags);
+}
+
+void
+devctl_notify(const char *system, const char *subsystem, const char *type,
+ const char *data)
+{
+
+ devctl_notify_f(system, subsystem, type, data, M_NOWAIT);
+}
+
+/*
+ * Common routine that tries to make sending messages as easy as possible.
+ * We allocate memory for the data, copy strings into that, but do not
+ * free it unless there's an error. The dequeue part of the driver should
+ * free the data. We don't send data when the device is disabled. We do
+ * send data, even when we have no listeners, because we wish to avoid
+ * races relating to startup and restart of listening applications.
+ *
+ * devaddq is designed to string together the type of event, with the
+ * object of that event, plus the plug and play info and location info
+ * for that event. This is likely most useful for devices, but less
+ * useful for other consumers of this interface. Those should use
+ * the devctl_queue_data() interface instead.
+ */
+static void
+devaddq(const char *type, const char *what, device_t dev)
+{
+ char *data = NULL;
+ char *loc = NULL;
+ char *pnp = NULL;
+ const char *parstr;
+
+ if (!devctl_queue_length)/* Rare race, but lost races safely discard */
+ return;
+ data = malloc(1024, M_BUS, M_NOWAIT);
+ if (data == NULL)
+ goto bad;
+
+ /* get the bus specific location of this device */
+ loc = malloc(1024, M_BUS, M_NOWAIT);
+ if (loc == NULL)
+ goto bad;
+ *loc = '\0';
+ bus_child_location_str(dev, loc, 1024);
+
+ /* Get the bus specific pnp info of this device */
+ pnp = malloc(1024, M_BUS, M_NOWAIT);
+ if (pnp == NULL)
+ goto bad;
+ *pnp = '\0';
+ bus_child_pnpinfo_str(dev, pnp, 1024);
+
+ /* Get the parent of this device, or / if high enough in the tree. */
+ if (device_get_parent(dev) == NULL)
+ parstr = "."; /* Or '/' ? */
+ else
+ parstr = device_get_nameunit(device_get_parent(dev));
+ /* String it all together. */
+ snprintf(data, 1024, "%s%s at %s %s on %s\n", type, what, loc, pnp,
+ parstr);
+ free(loc, M_BUS);
+ free(pnp, M_BUS);
+ devctl_queue_data(data);
+ return;
+bad:
+ free(pnp, M_BUS);
+ free(loc, M_BUS);
+ free(data, M_BUS);
+ return;
+}
+
+/*
+ * A device was added to the tree. We are called just after it successfully
+ * attaches (that is, probe and attach success for this device). No call
+ * is made if a device is merely parented into the tree. See devnomatch
+ * if probe fails. If attach fails, no notification is sent (but maybe
+ * we should have a different message for this).
+ */
+static void
+devadded(device_t dev)
+{
+ char *pnp = NULL;
+ char *tmp = NULL;
+
+ pnp = malloc(1024, M_BUS, M_NOWAIT);
+ if (pnp == NULL)
+ goto fail;
+ tmp = malloc(1024, M_BUS, M_NOWAIT);
+ if (tmp == NULL)
+ goto fail;
+ *pnp = '\0';
+ bus_child_pnpinfo_str(dev, pnp, 1024);
+ snprintf(tmp, 1024, "%s %s", device_get_nameunit(dev), pnp);
+ devaddq("+", tmp, dev);
+fail:
+ if (pnp != NULL)
+ free(pnp, M_BUS);
+ if (tmp != NULL)
+ free(tmp, M_BUS);
+ return;
+}
+
+/*
+ * A device was removed from the tree. We are called just before this
+ * happens.
+ */
+static void
+devremoved(device_t dev)
+{
+ char *pnp = NULL;
+ char *tmp = NULL;
+
+ pnp = malloc(1024, M_BUS, M_NOWAIT);
+ if (pnp == NULL)
+ goto fail;
+ tmp = malloc(1024, M_BUS, M_NOWAIT);
+ if (tmp == NULL)
+ goto fail;
+ *pnp = '\0';
+ bus_child_pnpinfo_str(dev, pnp, 1024);
+ snprintf(tmp, 1024, "%s %s", device_get_nameunit(dev), pnp);
+ devaddq("-", tmp, dev);
+fail:
+ if (pnp != NULL)
+ free(pnp, M_BUS);
+ if (tmp != NULL)
+ free(tmp, M_BUS);
+ return;
+}
+
+/*
+ * Called when there's no match for this device. This is only called
+ * the first time that no match happens, so we don't keep getting this
+ * message. Should that prove to be undesirable, we can change it.
+ * This is called when all drivers that can attach to a given bus
+ * decline to accept this device. Other errrors may not be detected.
+ */
+static void
+devnomatch(device_t dev)
+{
+ devaddq("?", "", dev);
+}
+
+static int
+sysctl_devctl_disable(SYSCTL_HANDLER_ARGS)
+{
+ struct dev_event_info *n1;
+ int dis, error;
+
+ dis = devctl_queue_length == 0;
+ error = sysctl_handle_int(oidp, &dis, 0, req);
+ if (error || !req->newptr)
+ return (error);
+ mtx_lock(&devsoftc.mtx);
+ if (dis) {
+ while (!TAILQ_EMPTY(&devsoftc.devq)) {
+ n1 = TAILQ_FIRST(&devsoftc.devq);
+ TAILQ_REMOVE(&devsoftc.devq, n1, dei_link);
+ free(n1->dei_data, M_BUS);
+ free(n1, M_BUS);
+ }
+ devsoftc.queued = 0;
+ devctl_queue_length = 0;
+ } else {
+ devctl_queue_length = DEVCTL_DEFAULT_QUEUE_LEN;
+ }
+ mtx_unlock(&devsoftc.mtx);
+ return (0);
+}
+
+static int
+sysctl_devctl_queue(SYSCTL_HANDLER_ARGS)
+{
+ struct dev_event_info *n1;
+ int q, error;
+
+ q = devctl_queue_length;
+ error = sysctl_handle_int(oidp, &q, 0, req);
+ if (error || !req->newptr)
+ return (error);
+ if (q < 0)
+ return (EINVAL);
+ mtx_lock(&devsoftc.mtx);
+ devctl_queue_length = q;
+ while (devsoftc.queued > devctl_queue_length) {
+ n1 = TAILQ_FIRST(&devsoftc.devq);
+ TAILQ_REMOVE(&devsoftc.devq, n1, dei_link);
+ free(n1->dei_data, M_BUS);
+ free(n1, M_BUS);
+ devsoftc.queued--;
+ }
+ mtx_unlock(&devsoftc.mtx);
+ return (0);
+}
+
+/* End of /dev/devctl code */
+
+static TAILQ_HEAD(,device) bus_data_devices;
+static int bus_data_generation = 1;
+
+static kobj_method_t null_methods[] = {
+ KOBJMETHOD_END
+};
+
+DEFINE_CLASS(null, null_methods, 0);
+
+/*
+ * Bus pass implementation
+ */
+
+static driver_list_t passes = TAILQ_HEAD_INITIALIZER(passes);
+int bus_current_pass = BUS_PASS_ROOT;
+
+/**
+ * @internal
+ * @brief Register the pass level of a new driver attachment
+ *
+ * Register a new driver attachment's pass level. If no driver
+ * attachment with the same pass level has been added, then @p new
+ * will be added to the global passes list.
+ *
+ * @param new the new driver attachment
+ */
+static void
+driver_register_pass(struct driverlink *new)
+{
+ struct driverlink *dl;
+
+ /* We only consider pass numbers during boot. */
+ if (bus_current_pass == BUS_PASS_DEFAULT)
+ return;
+
+ /*
+ * Walk the passes list. If we already know about this pass
+ * then there is nothing to do. If we don't, then insert this
+ * driver link into the list.
+ */
+ TAILQ_FOREACH(dl, &passes, passlink) {
+ if (dl->pass < new->pass)
+ continue;
+ if (dl->pass == new->pass)
+ return;
+ TAILQ_INSERT_BEFORE(dl, new, passlink);
+ return;
+ }
+ TAILQ_INSERT_TAIL(&passes, new, passlink);
+}
+
+/**
+ * @brief Raise the current bus pass
+ *
+ * Raise the current bus pass level to @p pass. Call the BUS_NEW_PASS()
+ * method on the root bus to kick off a new device tree scan for each
+ * new pass level that has at least one driver.
+ */
+void
+bus_set_pass(int pass)
+{
+ struct driverlink *dl;
+
+ if (bus_current_pass > pass)
+ panic("Attempt to lower bus pass level");
+
+ TAILQ_FOREACH(dl, &passes, passlink) {
+ /* Skip pass values below the current pass level. */
+ if (dl->pass <= bus_current_pass)
+ continue;
+
+ /*
+ * Bail once we hit a driver with a pass level that is
+ * too high.
+ */
+ if (dl->pass > pass)
+ break;
+
+ /*
+ * Raise the pass level to the next level and rescan
+ * the tree.
+ */
+ bus_current_pass = dl->pass;
+ BUS_NEW_PASS(root_bus);
+ }
+
+ /*
+ * If there isn't a driver registered for the requested pass,
+ * then bus_current_pass might still be less than 'pass'. Set
+ * it to 'pass' in that case.
+ */
+ if (bus_current_pass < pass)
+ bus_current_pass = pass;
+ KASSERT(bus_current_pass == pass, ("Failed to update bus pass level"));
+}
+
+/*
+ * Devclass implementation
+ */
+
+static devclass_list_t devclasses = TAILQ_HEAD_INITIALIZER(devclasses);
+
+/**
+ * @internal
+ * @brief Find or create a device class
+ *
+ * If a device class with the name @p classname exists, return it,
+ * otherwise if @p create is non-zero create and return a new device
+ * class.
+ *
+ * If @p parentname is non-NULL, the parent of the devclass is set to
+ * the devclass of that name.
+ *
+ * @param classname the devclass name to find or create
+ * @param parentname the parent devclass name or @c NULL
+ * @param create non-zero to create a devclass
+ */
+static devclass_t
+devclass_find_internal(const char *classname, const char *parentname,
+ int create)
+{
+ devclass_t dc;
+
+ PDEBUG(("looking for %s", classname));
+ if (!classname)
+ return (NULL);
+
+ TAILQ_FOREACH(dc, &devclasses, link) {
+ if (!strcmp(dc->name, classname))
+ break;
+ }
+
+ if (create && !dc) {
+ PDEBUG(("creating %s", classname));
+ dc = malloc(sizeof(struct devclass) + strlen(classname) + 1,
+ M_BUS, M_NOWAIT | M_ZERO);
+ if (!dc)
+ return (NULL);
+ dc->parent = NULL;
+ dc->name = (char*) (dc + 1);
+ strcpy(dc->name, classname);
+ TAILQ_INIT(&dc->drivers);
+ TAILQ_INSERT_TAIL(&devclasses, dc, link);
+
+ bus_data_generation_update();
+ }
+
+ /*
+ * If a parent class is specified, then set that as our parent so
+ * that this devclass will support drivers for the parent class as
+ * well. If the parent class has the same name don't do this though
+ * as it creates a cycle that can trigger an infinite loop in
+ * device_probe_child() if a device exists for which there is no
+ * suitable driver.
+ */
+ if (parentname && dc && !dc->parent &&
+ strcmp(classname, parentname) != 0) {
+ dc->parent = devclass_find_internal(parentname, NULL, TRUE);
+ dc->parent->flags |= DC_HAS_CHILDREN;
+ }
+
+ return (dc);
+}
+
+/**
+ * @brief Create a device class
+ *
+ * If a device class with the name @p classname exists, return it,
+ * otherwise create and return a new device class.
+ *
+ * @param classname the devclass name to find or create
+ */
+devclass_t
+devclass_create(const char *classname)
+{
+ return (devclass_find_internal(classname, NULL, TRUE));
+}
+
+/**
+ * @brief Find a device class
+ *
+ * If a device class with the name @p classname exists, return it,
+ * otherwise return @c NULL.
+ *
+ * @param classname the devclass name to find
+ */
+devclass_t
+devclass_find(const char *classname)
+{
+ return (devclass_find_internal(classname, NULL, FALSE));
+}
+
+/**
+ * @brief Register that a device driver has been added to a devclass
+ *
+ * Register that a device driver has been added to a devclass. This
+ * is called by devclass_add_driver to accomplish the recursive
+ * notification of all the children classes of dc, as well as dc.
+ * Each layer will have BUS_DRIVER_ADDED() called for all instances of
+ * the devclass. We do a full search here of the devclass list at
+ * each iteration level to save storing children-lists in the devclass
+ * structure. If we ever move beyond a few dozen devices doing this,
+ * we may need to reevaluate...
+ *
+ * @param dc the devclass to edit
+ * @param driver the driver that was just added
+ */
+static void
+devclass_driver_added(devclass_t dc, driver_t *driver)
+{
+ devclass_t parent;
+ int i;
+
+ /*
+ * Call BUS_DRIVER_ADDED for any existing busses in this class.
+ */
+ for (i = 0; i < dc->maxunit; i++)
+ if (dc->devices[i] && device_is_attached(dc->devices[i]))
+ BUS_DRIVER_ADDED(dc->devices[i], driver);
+
+ /*
+ * Walk through the children classes. Since we only keep a
+ * single parent pointer around, we walk the entire list of
+ * devclasses looking for children. We set the
+ * DC_HAS_CHILDREN flag when a child devclass is created on
+ * the parent, so we only walk the list for those devclasses
+ * that have children.
+ */
+ if (!(dc->flags & DC_HAS_CHILDREN))
+ return;
+ parent = dc;
+ TAILQ_FOREACH(dc, &devclasses, link) {
+ if (dc->parent == parent)
+ devclass_driver_added(dc, driver);
+ }
+}
+
+/**
+ * @brief Add a device driver to a device class
+ *
+ * Add a device driver to a devclass. This is normally called
+ * automatically by DRIVER_MODULE(). The BUS_DRIVER_ADDED() method of
+ * all devices in the devclass will be called to allow them to attempt
+ * to re-probe any unmatched children.
+ *
+ * @param dc the devclass to edit
+ * @param driver the driver to register
+ */
+static int
+devclass_add_driver(devclass_t dc, driver_t *driver, int pass, devclass_t *dcp)
+{
+ driverlink_t dl;
+ const char *parentname;
+
+ PDEBUG(("%s", DRIVERNAME(driver)));
+
+ /* Don't allow invalid pass values. */
+ if (pass <= BUS_PASS_ROOT)
+ return (EINVAL);
+
+ dl = malloc(sizeof *dl, M_BUS, M_NOWAIT|M_ZERO);
+ if (!dl)
+ return (ENOMEM);
+
+ /*
+ * Compile the driver's methods. Also increase the reference count
+ * so that the class doesn't get freed when the last instance
+ * goes. This means we can safely use static methods and avoids a
+ * double-free in devclass_delete_driver.
+ */
+ kobj_class_compile((kobj_class_t) driver);
+
+ /*
+ * If the driver has any base classes, make the
+ * devclass inherit from the devclass of the driver's
+ * first base class. This will allow the system to
+ * search for drivers in both devclasses for children
+ * of a device using this driver.
+ */
+ if (driver->baseclasses)
+ parentname = driver->baseclasses[0]->name;
+ else
+ parentname = NULL;
+ *dcp = devclass_find_internal(driver->name, parentname, TRUE);
+
+ dl->driver = driver;
+ TAILQ_INSERT_TAIL(&dc->drivers, dl, link);
+ driver->refs++; /* XXX: kobj_mtx */
+ dl->pass = pass;
+ driver_register_pass(dl);
+
+ devclass_driver_added(dc, driver);
+ bus_data_generation_update();
+ return (0);
+}
+
+/**
+ * @brief Delete a device driver from a device class
+ *
+ * Delete a device driver from a devclass. This is normally called
+ * automatically by DRIVER_MODULE().
+ *
+ * If the driver is currently attached to any devices,
+ * devclass_delete_driver() will first attempt to detach from each
+ * device. If one of the detach calls fails, the driver will not be
+ * deleted.
+ *
+ * @param dc the devclass to edit
+ * @param driver the driver to unregister
+ */
+static int
+devclass_delete_driver(devclass_t busclass, driver_t *driver)
+{
+ devclass_t dc = devclass_find(driver->name);
+ driverlink_t dl;
+ device_t dev;
+ int i;
+ int error;
+
+ PDEBUG(("%s from devclass %s", driver->name, DEVCLANAME(busclass)));
+
+ if (!dc)
+ return (0);
+
+ /*
+ * Find the link structure in the bus' list of drivers.
+ */
+ TAILQ_FOREACH(dl, &busclass->drivers, link) {
+ if (dl->driver == driver)
+ break;
+ }
+
+ if (!dl) {
+ PDEBUG(("%s not found in %s list", driver->name,
+ busclass->name));
+ return (ENOENT);
+ }
+
+ /*
+ * Disassociate from any devices. We iterate through all the
+ * devices in the devclass of the driver and detach any which are
+ * using the driver and which have a parent in the devclass which
+ * we are deleting from.
+ *
+ * Note that since a driver can be in multiple devclasses, we
+ * should not detach devices which are not children of devices in
+ * the affected devclass.
+ */
+ for (i = 0; i < dc->maxunit; i++) {
+ if (dc->devices[i]) {
+ dev = dc->devices[i];
+ if (dev->driver == driver && dev->parent &&
+ dev->parent->devclass == busclass) {
+ if ((error = device_detach(dev)) != 0)
+ return (error);
+ device_set_driver(dev, NULL);
+ }
+ }
+ }
+
+ TAILQ_REMOVE(&busclass->drivers, dl, link);
+ free(dl, M_BUS);
+
+ /* XXX: kobj_mtx */
+ driver->refs--;
+ if (driver->refs == 0)
+ kobj_class_free((kobj_class_t) driver);
+
+ bus_data_generation_update();
+ return (0);
+}
+
+/**
+ * @brief Quiesces a set of device drivers from a device class
+ *
+ * Quiesce a device driver from a devclass. This is normally called
+ * automatically by DRIVER_MODULE().
+ *
+ * If the driver is currently attached to any devices,
+ * devclass_quiesece_driver() will first attempt to quiesce each
+ * device.
+ *
+ * @param dc the devclass to edit
+ * @param driver the driver to unregister
+ */
+static int
+devclass_quiesce_driver(devclass_t busclass, driver_t *driver)
+{
+ devclass_t dc = devclass_find(driver->name);
+ driverlink_t dl;
+ device_t dev;
+ int i;
+ int error;
+
+ PDEBUG(("%s from devclass %s", driver->name, DEVCLANAME(busclass)));
+
+ if (!dc)
+ return (0);
+
+ /*
+ * Find the link structure in the bus' list of drivers.
+ */
+ TAILQ_FOREACH(dl, &busclass->drivers, link) {
+ if (dl->driver == driver)
+ break;
+ }
+
+ if (!dl) {
+ PDEBUG(("%s not found in %s list", driver->name,
+ busclass->name));
+ return (ENOENT);
+ }
+
+ /*
+ * Quiesce all devices. We iterate through all the devices in
+ * the devclass of the driver and quiesce any which are using
+ * the driver and which have a parent in the devclass which we
+ * are quiescing.
+ *
+ * Note that since a driver can be in multiple devclasses, we
+ * should not quiesce devices which are not children of
+ * devices in the affected devclass.
+ */
+ for (i = 0; i < dc->maxunit; i++) {
+ if (dc->devices[i]) {
+ dev = dc->devices[i];
+ if (dev->driver == driver && dev->parent &&
+ dev->parent->devclass == busclass) {
+ if ((error = device_quiesce(dev)) != 0)
+ return (error);
+ }
+ }
+ }
+
+ return (0);
+}
+
+/**
+ * @internal
+ */
+static driverlink_t
+devclass_find_driver_internal(devclass_t dc, const char *classname)
+{
+ driverlink_t dl;
+
+ PDEBUG(("%s in devclass %s", classname, DEVCLANAME(dc)));
+
+ TAILQ_FOREACH(dl, &dc->drivers, link) {
+ if (!strcmp(dl->driver->name, classname))
+ return (dl);
+ }
+
+ PDEBUG(("not found"));
+ return (NULL);
+}
+
+/**
+ * @brief Return the name of the devclass
+ */
+const char *
+devclass_get_name(devclass_t dc)
+{
+ return (dc->name);
+}
+
+/**
+ * @brief Find a device given a unit number
+ *
+ * @param dc the devclass to search
+ * @param unit the unit number to search for
+ *
+ * @returns the device with the given unit number or @c
+ * NULL if there is no such device
+ */
+device_t
+devclass_get_device(devclass_t dc, int unit)
+{
+ if (dc == NULL || unit < 0 || unit >= dc->maxunit)
+ return (NULL);
+ return (dc->devices[unit]);
+}
+
+/**
+ * @brief Find the softc field of a device given a unit number
+ *
+ * @param dc the devclass to search
+ * @param unit the unit number to search for
+ *
+ * @returns the softc field of the device with the given
+ * unit number or @c NULL if there is no such
+ * device
+ */
+void *
+devclass_get_softc(devclass_t dc, int unit)
+{
+ device_t dev;
+
+ dev = devclass_get_device(dc, unit);
+ if (!dev)
+ return (NULL);
+
+ return (device_get_softc(dev));
+}
+
+/**
+ * @brief Get a list of devices in the devclass
+ *
+ * An array containing a list of all the devices in the given devclass
+ * is allocated and returned in @p *devlistp. The number of devices
+ * in the array is returned in @p *devcountp. The caller should free
+ * the array using @c free(p, M_TEMP), even if @p *devcountp is 0.
+ *
+ * @param dc the devclass to examine
+ * @param devlistp points at location for array pointer return
+ * value
+ * @param devcountp points at location for array size return value
+ *
+ * @retval 0 success
+ * @retval ENOMEM the array allocation failed
+ */
+int
+devclass_get_devices(devclass_t dc, device_t **devlistp, int *devcountp)
+{
+ int count, i;
+ device_t *list;
+
+ count = devclass_get_count(dc);
+ list = malloc(count * sizeof(device_t), M_TEMP, M_NOWAIT|M_ZERO);
+ if (!list)
+ return (ENOMEM);
+
+ count = 0;
+ for (i = 0; i < dc->maxunit; i++) {
+ if (dc->devices[i]) {
+ list[count] = dc->devices[i];
+ count++;
+ }
+ }
+
+ *devlistp = list;
+ *devcountp = count;
+
+ return (0);
+}
+
+/**
+ * @brief Get a list of drivers in the devclass
+ *
+ * An array containing a list of pointers to all the drivers in the
+ * given devclass is allocated and returned in @p *listp. The number
+ * of drivers in the array is returned in @p *countp. The caller should
+ * free the array using @c free(p, M_TEMP).
+ *
+ * @param dc the devclass to examine
+ * @param listp gives location for array pointer return value
+ * @param countp gives location for number of array elements
+ * return value
+ *
+ * @retval 0 success
+ * @retval ENOMEM the array allocation failed
+ */
+int
+devclass_get_drivers(devclass_t dc, driver_t ***listp, int *countp)
+{
+ driverlink_t dl;
+ driver_t **list;
+ int count;
+
+ count = 0;
+ TAILQ_FOREACH(dl, &dc->drivers, link)
+ count++;
+ list = malloc(count * sizeof(driver_t *), M_TEMP, M_NOWAIT);
+ if (list == NULL)
+ return (ENOMEM);
+
+ count = 0;
+ TAILQ_FOREACH(dl, &dc->drivers, link) {
+ list[count] = dl->driver;
+ count++;
+ }
+ *listp = list;
+ *countp = count;
+
+ return (0);
+}
+
+/**
+ * @brief Get the number of devices in a devclass
+ *
+ * @param dc the devclass to examine
+ */
+int
+devclass_get_count(devclass_t dc)
+{
+ int count, i;
+
+ count = 0;
+ for (i = 0; i < dc->maxunit; i++)
+ if (dc->devices[i])
+ count++;
+ return (count);
+}
+
+/**
+ * @brief Get the maximum unit number used in a devclass
+ *
+ * Note that this is one greater than the highest currently-allocated
+ * unit. If a null devclass_t is passed in, -1 is returned to indicate
+ * that not even the devclass has been allocated yet.
+ *
+ * @param dc the devclass to examine
+ */
+int
+devclass_get_maxunit(devclass_t dc)
+{
+ if (dc == NULL)
+ return (-1);
+ return (dc->maxunit);
+}
+
+/**
+ * @brief Find a free unit number in a devclass
+ *
+ * This function searches for the first unused unit number greater
+ * that or equal to @p unit.
+ *
+ * @param dc the devclass to examine
+ * @param unit the first unit number to check
+ */
+int
+devclass_find_free_unit(devclass_t dc, int unit)
+{
+ if (dc == NULL)
+ return (unit);
+ while (unit < dc->maxunit && dc->devices[unit] != NULL)
+ unit++;
+ return (unit);
+}
+
+/**
+ * @brief Set the parent of a devclass
+ *
+ * The parent class is normally initialised automatically by
+ * DRIVER_MODULE().
+ *
+ * @param dc the devclass to edit
+ * @param pdc the new parent devclass
+ */
+void
+devclass_set_parent(devclass_t dc, devclass_t pdc)
+{
+ dc->parent = pdc;
+}
+
+/**
+ * @brief Get the parent of a devclass
+ *
+ * @param dc the devclass to examine
+ */
+devclass_t
+devclass_get_parent(devclass_t dc)
+{
+ return (dc->parent);
+}
+
+struct sysctl_ctx_list *
+devclass_get_sysctl_ctx(devclass_t dc)
+{
+ return (&dc->sysctl_ctx);
+}
+
+struct sysctl_oid *
+devclass_get_sysctl_tree(devclass_t dc)
+{
+ return (dc->sysctl_tree);
+}
+
+/**
+ * @internal
+ * @brief Allocate a unit number
+ *
+ * On entry, @p *unitp is the desired unit number (or @c -1 if any
+ * will do). The allocated unit number is returned in @p *unitp.
+
+ * @param dc the devclass to allocate from
+ * @param unitp points at the location for the allocated unit
+ * number
+ *
+ * @retval 0 success
+ * @retval EEXIST the requested unit number is already allocated
+ * @retval ENOMEM memory allocation failure
+ */
+static int
+devclass_alloc_unit(devclass_t dc, device_t dev, int *unitp)
+{
+ const char *s;
+ int unit = *unitp;
+
+ PDEBUG(("unit %d in devclass %s", unit, DEVCLANAME(dc)));
+
+ /* Ask the parent bus if it wants to wire this device. */
+ if (unit == -1)
+ BUS_HINT_DEVICE_UNIT(device_get_parent(dev), dev, dc->name,
+ &unit);
+
+ /* If we were given a wired unit number, check for existing device */
+ /* XXX imp XXX */
+ if (unit != -1) {
+ if (unit >= 0 && unit < dc->maxunit &&
+ dc->devices[unit] != NULL) {
+ if (bootverbose)
+ printf("%s: %s%d already exists; skipping it\n",
+ dc->name, dc->name, *unitp);
+ return (EEXIST);
+ }
+ } else {
+ /* Unwired device, find the next available slot for it */
+ unit = 0;
+ for (unit = 0;; unit++) {
+ /* If there is an "at" hint for a unit then skip it. */
+ if (resource_string_value(dc->name, unit, "at", &s) ==
+ 0)
+ continue;
+
+ /* If this device slot is already in use, skip it. */
+ if (unit < dc->maxunit && dc->devices[unit] != NULL)
+ continue;
+
+ break;
+ }
+ }
+
+ /*
+ * We've selected a unit beyond the length of the table, so let's
+ * extend the table to make room for all units up to and including
+ * this one.
+ */
+ if (unit >= dc->maxunit) {
+ device_t *newlist, *oldlist;
+ int newsize;
+
+ oldlist = dc->devices;
+ newsize = roundup((unit + 1), MINALLOCSIZE / sizeof(device_t));
+ newlist = malloc(sizeof(device_t) * newsize, M_BUS, M_NOWAIT);
+ if (!newlist)
+ return (ENOMEM);
+ if (oldlist != NULL)
+ bcopy(oldlist, newlist, sizeof(device_t) * dc->maxunit);
+ bzero(newlist + dc->maxunit,
+ sizeof(device_t) * (newsize - dc->maxunit));
+ dc->devices = newlist;
+ dc->maxunit = newsize;
+ if (oldlist != NULL)
+ free(oldlist, M_BUS);
+ }
+ PDEBUG(("now: unit %d in devclass %s", unit, DEVCLANAME(dc)));
+
+ *unitp = unit;
+ return (0);
+}
+
+/**
+ * @internal
+ * @brief Add a device to a devclass
+ *
+ * A unit number is allocated for the device (using the device's
+ * preferred unit number if any) and the device is registered in the
+ * devclass. This allows the device to be looked up by its unit
+ * number, e.g. by decoding a dev_t minor number.
+ *
+ * @param dc the devclass to add to
+ * @param dev the device to add
+ *
+ * @retval 0 success
+ * @retval EEXIST the requested unit number is already allocated
+ * @retval ENOMEM memory allocation failure
+ */
+static int
+devclass_add_device(devclass_t dc, device_t dev)
+{
+ int buflen, error;
+
+ PDEBUG(("%s in devclass %s", DEVICENAME(dev), DEVCLANAME(dc)));
+
+ buflen = snprintf(NULL, 0, "%s%d$", dc->name, INT_MAX);
+ if (buflen < 0)
+ return (ENOMEM);
+ dev->nameunit = malloc(buflen, M_BUS, M_NOWAIT|M_ZERO);
+ if (!dev->nameunit)
+ return (ENOMEM);
+
+ if ((error = devclass_alloc_unit(dc, dev, &dev->unit)) != 0) {
+ free(dev->nameunit, M_BUS);
+ dev->nameunit = NULL;
+ return (error);
+ }
+ dc->devices[dev->unit] = dev;
+ dev->devclass = dc;
+ snprintf(dev->nameunit, buflen, "%s%d", dc->name, dev->unit);
+
+ return (0);
+}
+
+/**
+ * @internal
+ * @brief Delete a device from a devclass
+ *
+ * The device is removed from the devclass's device list and its unit
+ * number is freed.
+
+ * @param dc the devclass to delete from
+ * @param dev the device to delete
+ *
+ * @retval 0 success
+ */
+static int
+devclass_delete_device(devclass_t dc, device_t dev)
+{
+ if (!dc || !dev)
+ return (0);
+
+ PDEBUG(("%s in devclass %s", DEVICENAME(dev), DEVCLANAME(dc)));
+
+ if (dev->devclass != dc || dc->devices[dev->unit] != dev)
+ panic("devclass_delete_device: inconsistent device class");
+ dc->devices[dev->unit] = NULL;
+ if (dev->flags & DF_WILDCARD)
+ dev->unit = -1;
+ dev->devclass = NULL;
+ free(dev->nameunit, M_BUS);
+ dev->nameunit = NULL;
+
+ return (0);
+}
+
+/**
+ * @internal
+ * @brief Make a new device and add it as a child of @p parent
+ *
+ * @param parent the parent of the new device
+ * @param name the devclass name of the new device or @c NULL
+ * to leave the devclass unspecified
+ * @parem unit the unit number of the new device of @c -1 to
+ * leave the unit number unspecified
+ *
+ * @returns the new device
+ */
+static device_t
+make_device(device_t parent, const char *name, int unit)
+{
+ device_t dev;
+ devclass_t dc;
+
+ PDEBUG(("%s at %s as unit %d", name, DEVICENAME(parent), unit));
+
+ if (name) {
+ dc = devclass_find_internal(name, NULL, TRUE);
+ if (!dc) {
+ printf("make_device: can't find device class %s\n",
+ name);
+ return (NULL);
+ }
+ } else {
+ dc = NULL;
+ }
+
+ dev = malloc(sizeof(struct device), M_BUS, M_NOWAIT|M_ZERO);
+ if (!dev)
+ return (NULL);
+
+ dev->parent = parent;
+ TAILQ_INIT(&dev->children);
+ kobj_init((kobj_t) dev, &null_class);
+ dev->driver = NULL;
+ dev->devclass = NULL;
+ dev->unit = unit;
+ dev->nameunit = NULL;
+ dev->desc = NULL;
+ dev->busy = 0;
+ dev->devflags = 0;
+ dev->flags = DF_ENABLED;
+ dev->order = 0;
+ if (unit == -1)
+ dev->flags |= DF_WILDCARD;
+ if (name) {
+ dev->flags |= DF_FIXEDCLASS;
+ if (devclass_add_device(dc, dev)) {
+ kobj_delete((kobj_t) dev, M_BUS);
+ return (NULL);
+ }
+ }
+ dev->ivars = NULL;
+ dev->softc = NULL;
+
+ dev->state = DS_NOTPRESENT;
+
+ TAILQ_INSERT_TAIL(&bus_data_devices, dev, devlink);
+ bus_data_generation_update();
+
+ return (dev);
+}
+
+/**
+ * @internal
+ * @brief Print a description of a device.
+ */
+static int
+device_print_child(device_t dev, device_t child)
+{
+ int retval = 0;
+
+ if (device_is_alive(child))
+ retval += BUS_PRINT_CHILD(dev, child);
+ else
+ retval += device_printf(child, " not found\n");
+
+ return (retval);
+}
+
+/**
+ * @brief Create a new device
+ *
+ * This creates a new device and adds it as a child of an existing
+ * parent device. The new device will be added after the last existing
+ * child with order zero.
+ *
+ * @param dev the device which will be the parent of the
+ * new child device
+ * @param name devclass name for new device or @c NULL if not
+ * specified
+ * @param unit unit number for new device or @c -1 if not
+ * specified
+ *
+ * @returns the new device
+ */
+device_t
+device_add_child(device_t dev, const char *name, int unit)
+{
+ return (device_add_child_ordered(dev, 0, name, unit));
+}
+
+/**
+ * @brief Create a new device
+ *
+ * This creates a new device and adds it as a child of an existing
+ * parent device. The new device will be added after the last existing
+ * child with the same order.
+ *
+ * @param dev the device which will be the parent of the
+ * new child device
+ * @param order a value which is used to partially sort the
+ * children of @p dev - devices created using
+ * lower values of @p order appear first in @p
+ * dev's list of children
+ * @param name devclass name for new device or @c NULL if not
+ * specified
+ * @param unit unit number for new device or @c -1 if not
+ * specified
+ *
+ * @returns the new device
+ */
+device_t
+device_add_child_ordered(device_t dev, u_int order, const char *name, int unit)
+{
+ device_t child;
+ device_t place;
+
+ PDEBUG(("%s at %s with order %u as unit %d",
+ name, DEVICENAME(dev), order, unit));
+
+ child = make_device(dev, name, unit);
+ if (child == NULL)
+ return (child);
+ child->order = order;
+
+ TAILQ_FOREACH(place, &dev->children, link) {
+ if (place->order > order)
+ break;
+ }
+
+ if (place) {
+ /*
+ * The device 'place' is the first device whose order is
+ * greater than the new child.
+ */
+ TAILQ_INSERT_BEFORE(place, child, link);
+ } else {
+ /*
+ * The new child's order is greater or equal to the order of
+ * any existing device. Add the child to the tail of the list.
+ */
+ TAILQ_INSERT_TAIL(&dev->children, child, link);
+ }
+
+ bus_data_generation_update();
+ return (child);
+}
+
+/**
+ * @brief Delete a device
+ *
+ * This function deletes a device along with all of its children. If
+ * the device currently has a driver attached to it, the device is
+ * detached first using device_detach().
+ *
+ * @param dev the parent device
+ * @param child the device to delete
+ *
+ * @retval 0 success
+ * @retval non-zero a unit error code describing the error
+ */
+int
+device_delete_child(device_t dev, device_t child)
+{
+ int error;
+ device_t grandchild;
+
+ PDEBUG(("%s from %s", DEVICENAME(child), DEVICENAME(dev)));
+
+ /* remove children first */
+ while ( (grandchild = TAILQ_FIRST(&child->children)) ) {
+ error = device_delete_child(child, grandchild);
+ if (error)
+ return (error);
+ }
+
+ if ((error = device_detach(child)) != 0)
+ return (error);
+ if (child->devclass)
+ devclass_delete_device(child->devclass, child);
+ TAILQ_REMOVE(&dev->children, child, link);
+ TAILQ_REMOVE(&bus_data_devices, child, devlink);
+ kobj_delete((kobj_t) child, M_BUS);
+
+ bus_data_generation_update();
+ return (0);
+}
+
+/**
+ * @brief Find a device given a unit number
+ *
+ * This is similar to devclass_get_devices() but only searches for
+ * devices which have @p dev as a parent.
+ *
+ * @param dev the parent device to search
+ * @param unit the unit number to search for. If the unit is -1,
+ * return the first child of @p dev which has name
+ * @p classname (that is, the one with the lowest unit.)
+ *
+ * @returns the device with the given unit number or @c
+ * NULL if there is no such device
+ */
+device_t
+device_find_child(device_t dev, const char *classname, int unit)
+{
+ devclass_t dc;
+ device_t child;
+
+ dc = devclass_find(classname);
+ if (!dc)
+ return (NULL);
+
+ if (unit != -1) {
+ child = devclass_get_device(dc, unit);
+ if (child && child->parent == dev)
+ return (child);
+ } else {
+ for (unit = 0; unit < devclass_get_maxunit(dc); unit++) {
+ child = devclass_get_device(dc, unit);
+ if (child && child->parent == dev)
+ return (child);
+ }
+ }
+ return (NULL);
+}
+
+/**
+ * @internal
+ */
+static driverlink_t
+first_matching_driver(devclass_t dc, device_t dev)
+{
+ if (dev->devclass)
+ return (devclass_find_driver_internal(dc, dev->devclass->name));
+ return (TAILQ_FIRST(&dc->drivers));
+}
+
+/**
+ * @internal
+ */
+static driverlink_t
+next_matching_driver(devclass_t dc, device_t dev, driverlink_t last)
+{
+ if (dev->devclass) {
+ driverlink_t dl;
+ for (dl = TAILQ_NEXT(last, link); dl; dl = TAILQ_NEXT(dl, link))
+ if (!strcmp(dev->devclass->name, dl->driver->name))
+ return (dl);
+ return (NULL);
+ }
+ return (TAILQ_NEXT(last, link));
+}
+
+/**
+ * @internal
+ */
+int
+device_probe_child(device_t dev, device_t child)
+{
+ devclass_t dc;
+ driverlink_t best = NULL;
+ driverlink_t dl;
+ int result, pri = 0;
+ int hasclass = (child->devclass != NULL);
+
+ GIANT_REQUIRED;
+
+ dc = dev->devclass;
+ if (!dc)
+ panic("device_probe_child: parent device has no devclass");
+
+ /*
+ * If the state is already probed, then return. However, don't
+ * return if we can rebid this object.
+ */
+ if (child->state == DS_ALIVE && (child->flags & DF_REBID) == 0)
+ return (0);
+
+ for (; dc; dc = dc->parent) {
+ for (dl = first_matching_driver(dc, child);
+ dl;
+ dl = next_matching_driver(dc, child, dl)) {
+
+ /* If this driver's pass is too high, then ignore it. */
+ if (dl->pass > bus_current_pass)
+ continue;
+
+ PDEBUG(("Trying %s", DRIVERNAME(dl->driver)));
+ device_set_driver(child, dl->driver);
+ if (!hasclass) {
+ if (device_set_devclass(child, dl->driver->name)) {
+ printf("driver bug: Unable to set devclass (devname: %s)\n",
+ (child ? device_get_name(child) :
+ "no device"));
+ device_set_driver(child, NULL);
+ continue;
+ }
+ }
+
+ /* Fetch any flags for the device before probing. */
+ resource_int_value(dl->driver->name, child->unit,
+ "flags", &child->devflags);
+
+ result = DEVICE_PROBE(child);
+
+ /* Reset flags and devclass before the next probe. */
+ child->devflags = 0;
+ if (!hasclass)
+ device_set_devclass(child, NULL);
+
+ /*
+ * If the driver returns SUCCESS, there can be
+ * no higher match for this device.
+ */
+ if (result == 0) {
+ best = dl;
+ pri = 0;
+ break;
+ }
+
+ /*
+ * The driver returned an error so it
+ * certainly doesn't match.
+ */
+ if (result > 0) {
+ device_set_driver(child, NULL);
+ continue;
+ }
+
+ /*
+ * A priority lower than SUCCESS, remember the
+ * best matching driver. Initialise the value
+ * of pri for the first match.
+ */
+ if (best == NULL || result > pri) {
+ /*
+ * Probes that return BUS_PROBE_NOWILDCARD
+ * or lower only match when they are set
+ * in stone by the parent bus.
+ */
+ if (result <= BUS_PROBE_NOWILDCARD &&
+ child->flags & DF_WILDCARD)
+ continue;
+ best = dl;
+ pri = result;
+ continue;
+ }
+ }
+ /*
+ * If we have an unambiguous match in this devclass,
+ * don't look in the parent.
+ */
+ if (best && pri == 0)
+ break;
+ }
+
+ /*
+ * If we found a driver, change state and initialise the devclass.
+ */
+ /* XXX What happens if we rebid and got no best? */
+ if (best) {
+ /*
+ * If this device was atached, and we were asked to
+ * rescan, and it is a different driver, then we have
+ * to detach the old driver and reattach this new one.
+ * Note, we don't have to check for DF_REBID here
+ * because if the state is > DS_ALIVE, we know it must
+ * be.
+ *
+ * This assumes that all DF_REBID drivers can have
+ * their probe routine called at any time and that
+ * they are idempotent as well as completely benign in
+ * normal operations.
+ *
+ * We also have to make sure that the detach
+ * succeeded, otherwise we fail the operation (or
+ * maybe it should just fail silently? I'm torn).
+ */
+ if (child->state > DS_ALIVE && best->driver != child->driver)
+ if ((result = device_detach(dev)) != 0)
+ return (result);
+
+ /* Set the winning driver, devclass, and flags. */
+ if (!child->devclass) {
+ result = device_set_devclass(child, best->driver->name);
+ if (result != 0)
+ return (result);
+ }
+ device_set_driver(child, best->driver);
+ resource_int_value(best->driver->name, child->unit,
+ "flags", &child->devflags);
+
+ if (pri < 0) {
+ /*
+ * A bit bogus. Call the probe method again to make
+ * sure that we have the right description.
+ */
+ DEVICE_PROBE(child);
+#if 0
+ child->flags |= DF_REBID;
+#endif
+ } else
+ child->flags &= ~DF_REBID;
+ child->state = DS_ALIVE;
+
+ bus_data_generation_update();
+ return (0);
+ }
+
+ return (ENXIO);
+}
+
+/**
+ * @brief Return the parent of a device
+ */
+device_t
+device_get_parent(device_t dev)
+{
+ return (dev->parent);
+}
+
+/**
+ * @brief Get a list of children of a device
+ *
+ * An array containing a list of all the children of the given device
+ * is allocated and returned in @p *devlistp. The number of devices
+ * in the array is returned in @p *devcountp. The caller should free
+ * the array using @c free(p, M_TEMP).
+ *
+ * @param dev the device to examine
+ * @param devlistp points at location for array pointer return
+ * value
+ * @param devcountp points at location for array size return value
+ *
+ * @retval 0 success
+ * @retval ENOMEM the array allocation failed
+ */
+int
+device_get_children(device_t dev, device_t **devlistp, int *devcountp)
+{
+ int count;
+ device_t child;
+ device_t *list;
+
+ count = 0;
+ TAILQ_FOREACH(child, &dev->children, link) {
+ count++;
+ }
+
+ if (count) {
+ list = malloc(count * sizeof(device_t), M_TEMP,
+ M_NOWAIT|M_ZERO);
+ if (!list)
+ return (ENOMEM);
+
+ count = 0;
+ TAILQ_FOREACH(child, &dev->children, link) {
+ list[count] = child;
+ count++;
+ }
+ } else {
+ list = NULL;
+ }
+
+ *devlistp = list;
+ *devcountp = count;
+
+ return (0);
+}
+
+/**
+ * @brief Return the current driver for the device or @c NULL if there
+ * is no driver currently attached
+ */
+driver_t *
+device_get_driver(device_t dev)
+{
+ return (dev->driver);
+}
+
+/**
+ * @brief Return the current devclass for the device or @c NULL if
+ * there is none.
+ */
+devclass_t
+device_get_devclass(device_t dev)
+{
+ return (dev->devclass);
+}
+
+/**
+ * @brief Return the name of the device's devclass or @c NULL if there
+ * is none.
+ */
+const char *
+device_get_name(device_t dev)
+{
+ if (dev != NULL && dev->devclass)
+ return (devclass_get_name(dev->devclass));
+ return (NULL);
+}
+
+/**
+ * @brief Return a string containing the device's devclass name
+ * followed by an ascii representation of the device's unit number
+ * (e.g. @c "foo2").
+ */
+const char *
+device_get_nameunit(device_t dev)
+{
+ return (dev->nameunit);
+}
+
+/**
+ * @brief Return the device's unit number.
+ */
+int
+device_get_unit(device_t dev)
+{
+ return (dev->unit);
+}
+
+/**
+ * @brief Return the device's description string
+ */
+const char *
+device_get_desc(device_t dev)
+{
+ return (dev->desc);
+}
+
+/**
+ * @brief Return the device's flags
+ */
+u_int32_t
+device_get_flags(device_t dev)
+{
+ return (dev->devflags);
+}
+
+struct sysctl_ctx_list *
+device_get_sysctl_ctx(device_t dev)
+{
+ return (&dev->sysctl_ctx);
+}
+
+struct sysctl_oid *
+device_get_sysctl_tree(device_t dev)
+{
+ return (dev->sysctl_tree);
+}
+
+/**
+ * @brief Print the name of the device followed by a colon and a space
+ *
+ * @returns the number of characters printed
+ */
+int
+device_print_prettyname(device_t dev)
+{
+ const char *name = device_get_name(dev);
+
+ if (name == NULL)
+ return (printf("unknown: "));
+ return (printf("%s%d: ", name, device_get_unit(dev)));
+}
+
+/**
+ * @brief Print the name of the device followed by a colon, a space
+ * and the result of calling vprintf() with the value of @p fmt and
+ * the following arguments.
+ *
+ * @returns the number of characters printed
+ */
+int
+device_printf(device_t dev, const char * fmt, ...)
+{
+ va_list ap;
+ int retval;
+
+ retval = device_print_prettyname(dev);
+ va_start(ap, fmt);
+ retval += vprintf(fmt, ap);
+ va_end(ap);
+ return (retval);
+}
+
+/**
+ * @internal
+ */
+static void
+device_set_desc_internal(device_t dev, const char* desc, int copy)
+{
+ if (dev->desc && (dev->flags & DF_DESCMALLOCED)) {
+ free(dev->desc, M_BUS);
+ dev->flags &= ~DF_DESCMALLOCED;
+ dev->desc = NULL;
+ }
+
+ if (copy && desc) {
+ dev->desc = malloc(strlen(desc) + 1, M_BUS, M_NOWAIT);
+ if (dev->desc) {
+ strcpy(dev->desc, desc);
+ dev->flags |= DF_DESCMALLOCED;
+ }
+ } else {
+ /* Avoid a -Wcast-qual warning */
+ dev->desc = (char *)(uintptr_t) desc;
+ }
+
+ bus_data_generation_update();
+}
+
+/**
+ * @brief Set the device's description
+ *
+ * The value of @c desc should be a string constant that will not
+ * change (at least until the description is changed in a subsequent
+ * call to device_set_desc() or device_set_desc_copy()).
+ */
+void
+device_set_desc(device_t dev, const char* desc)
+{
+ device_set_desc_internal(dev, desc, FALSE);
+}
+
+/**
+ * @brief Set the device's description
+ *
+ * The string pointed to by @c desc is copied. Use this function if
+ * the device description is generated, (e.g. with sprintf()).
+ */
+void
+device_set_desc_copy(device_t dev, const char* desc)
+{
+ device_set_desc_internal(dev, desc, TRUE);
+}
+
+/**
+ * @brief Set the device's flags
+ */
+void
+device_set_flags(device_t dev, u_int32_t flags)
+{
+ dev->devflags = flags;
+}
+
+/**
+ * @brief Return the device's softc field
+ *
+ * The softc is allocated and zeroed when a driver is attached, based
+ * on the size field of the driver.
+ */
+void *
+device_get_softc(device_t dev)
+{
+ return (dev->softc);
+}
+
+/**
+ * @brief Set the device's softc field
+ *
+ * Most drivers do not need to use this since the softc is allocated
+ * automatically when the driver is attached.
+ */
+void
+device_set_softc(device_t dev, void *softc)
+{
+ if (dev->softc && !(dev->flags & DF_EXTERNALSOFTC))
+ free(dev->softc, M_BUS_SC);
+ dev->softc = softc;
+ if (dev->softc)
+ dev->flags |= DF_EXTERNALSOFTC;
+ else
+ dev->flags &= ~DF_EXTERNALSOFTC;
+}
+
+/**
+ * @brief Get the device's ivars field
+ *
+ * The ivars field is used by the parent device to store per-device
+ * state (e.g. the physical location of the device or a list of
+ * resources).
+ */
+void *
+device_get_ivars(device_t dev)
+{
+
+ KASSERT(dev != NULL, ("device_get_ivars(NULL, ...)"));
+ return (dev->ivars);
+}
+
+/**
+ * @brief Set the device's ivars field
+ */
+void
+device_set_ivars(device_t dev, void * ivars)
+{
+
+ KASSERT(dev != NULL, ("device_set_ivars(NULL, ...)"));
+ dev->ivars = ivars;
+}
+
+/**
+ * @brief Return the device's state
+ */
+device_state_t
+device_get_state(device_t dev)
+{
+ return (dev->state);
+}
+
+/**
+ * @brief Set the DF_ENABLED flag for the device
+ */
+void
+device_enable(device_t dev)
+{
+ dev->flags |= DF_ENABLED;
+}
+
+/**
+ * @brief Clear the DF_ENABLED flag for the device
+ */
+void
+device_disable(device_t dev)
+{
+ dev->flags &= ~DF_ENABLED;
+}
+
+/**
+ * @brief Increment the busy counter for the device
+ */
+void
+device_busy(device_t dev)
+{
+ if (dev->state < DS_ATTACHED)
+ panic("device_busy: called for unattached device");
+ if (dev->busy == 0 && dev->parent)
+ device_busy(dev->parent);
+ dev->busy++;
+ dev->state = DS_BUSY;
+}
+
+/**
+ * @brief Decrement the busy counter for the device
+ */
+void
+device_unbusy(device_t dev)
+{
+ if (dev->state != DS_BUSY)
+ panic("device_unbusy: called for non-busy device %s",
+ device_get_nameunit(dev));
+ dev->busy--;
+ if (dev->busy == 0) {
+ if (dev->parent)
+ device_unbusy(dev->parent);
+ dev->state = DS_ATTACHED;
+ }
+}
+
+/**
+ * @brief Set the DF_QUIET flag for the device
+ */
+void
+device_quiet(device_t dev)
+{
+ dev->flags |= DF_QUIET;
+}
+
+/**
+ * @brief Clear the DF_QUIET flag for the device
+ */
+void
+device_verbose(device_t dev)
+{
+ dev->flags &= ~DF_QUIET;
+}
+
+/**
+ * @brief Return non-zero if the DF_QUIET flag is set on the device
+ */
+int
+device_is_quiet(device_t dev)
+{
+ return ((dev->flags & DF_QUIET) != 0);
+}
+
+/**
+ * @brief Return non-zero if the DF_ENABLED flag is set on the device
+ */
+int
+device_is_enabled(device_t dev)
+{
+ return ((dev->flags & DF_ENABLED) != 0);
+}
+
+/**
+ * @brief Return non-zero if the device was successfully probed
+ */
+int
+device_is_alive(device_t dev)
+{
+ return (dev->state >= DS_ALIVE);
+}
+
+/**
+ * @brief Return non-zero if the device currently has a driver
+ * attached to it
+ */
+int
+device_is_attached(device_t dev)
+{
+ return (dev->state >= DS_ATTACHED);
+}
+
+/**
+ * @brief Set the devclass of a device
+ * @see devclass_add_device().
+ */
+int
+device_set_devclass(device_t dev, const char *classname)
+{
+ devclass_t dc;
+ int error;
+
+ if (!classname) {
+ if (dev->devclass)
+ devclass_delete_device(dev->devclass, dev);
+ return (0);
+ }
+
+ if (dev->devclass) {
+ printf("device_set_devclass: device class already set\n");
+ return (EINVAL);
+ }
+
+ dc = devclass_find_internal(classname, NULL, TRUE);
+ if (!dc)
+ return (ENOMEM);
+
+ error = devclass_add_device(dc, dev);
+
+ bus_data_generation_update();
+ return (error);
+}
+
+/**
+ * @brief Set the driver of a device
+ *
+ * @retval 0 success
+ * @retval EBUSY the device already has a driver attached
+ * @retval ENOMEM a memory allocation failure occurred
+ */
+int
+device_set_driver(device_t dev, driver_t *driver)
+{
+ if (dev->state >= DS_ATTACHED)
+ return (EBUSY);
+
+ if (dev->driver == driver)
+ return (0);
+
+ if (dev->softc && !(dev->flags & DF_EXTERNALSOFTC)) {
+ free(dev->softc, M_BUS_SC);
+ dev->softc = NULL;
+ }
+ kobj_delete((kobj_t) dev, NULL);
+ dev->driver = driver;
+ if (driver) {
+ kobj_init((kobj_t) dev, (kobj_class_t) driver);
+ if (!(dev->flags & DF_EXTERNALSOFTC) && driver->size > 0) {
+ dev->softc = malloc(driver->size, M_BUS_SC,
+ M_NOWAIT | M_ZERO);
+ if (!dev->softc) {
+ kobj_delete((kobj_t) dev, NULL);
+ kobj_init((kobj_t) dev, &null_class);
+ dev->driver = NULL;
+ return (ENOMEM);
+ }
+ }
+ } else {
+ kobj_init((kobj_t) dev, &null_class);
+ }
+
+ bus_data_generation_update();
+ return (0);
+}
+
+/**
+ * @brief Probe a device, and return this status.
+ *
+ * This function is the core of the device autoconfiguration
+ * system. Its purpose is to select a suitable driver for a device and
+ * then call that driver to initialise the hardware appropriately. The
+ * driver is selected by calling the DEVICE_PROBE() method of a set of
+ * candidate drivers and then choosing the driver which returned the
+ * best value. This driver is then attached to the device using
+ * device_attach().
+ *
+ * The set of suitable drivers is taken from the list of drivers in
+ * the parent device's devclass. If the device was originally created
+ * with a specific class name (see device_add_child()), only drivers
+ * with that name are probed, otherwise all drivers in the devclass
+ * are probed. If no drivers return successful probe values in the
+ * parent devclass, the search continues in the parent of that
+ * devclass (see devclass_get_parent()) if any.
+ *
+ * @param dev the device to initialise
+ *
+ * @retval 0 success
+ * @retval ENXIO no driver was found
+ * @retval ENOMEM memory allocation failure
+ * @retval non-zero some other unix error code
+ * @retval -1 Device already attached
+ */
+int
+device_probe(device_t dev)
+{
+ int error;
+
+ GIANT_REQUIRED;
+
+ if (dev->state >= DS_ALIVE && (dev->flags & DF_REBID) == 0)
+ return (-1);
+
+ if (!(dev->flags & DF_ENABLED)) {
+ if (bootverbose && device_get_name(dev) != NULL) {
+ device_print_prettyname(dev);
+ printf("not probed (disabled)\n");
+ }
+ return (-1);
+ }
+ if ((error = device_probe_child(dev->parent, dev)) != 0) {
+ if (bus_current_pass == BUS_PASS_DEFAULT &&
+ !(dev->flags & DF_DONENOMATCH)) {
+ BUS_PROBE_NOMATCH(dev->parent, dev);
+ devnomatch(dev);
+ dev->flags |= DF_DONENOMATCH;
+ }
+ return (error);
+ }
+ return (0);
+}
+
+/**
+ * @brief Probe a device and attach a driver if possible
+ *
+ * calls device_probe() and attaches if that was successful.
+ */
+int
+device_probe_and_attach(device_t dev)
+{
+ int error;
+
+ GIANT_REQUIRED;
+
+ error = device_probe(dev);
+ if (error == -1)
+ return (0);
+ else if (error != 0)
+ return (error);
+ return (device_attach(dev));
+}
+
+/**
+ * @brief Attach a device driver to a device
+ *
+ * This function is a wrapper around the DEVICE_ATTACH() driver
+ * method. In addition to calling DEVICE_ATTACH(), it initialises the
+ * device's sysctl tree, optionally prints a description of the device
+ * and queues a notification event for user-based device management
+ * services.
+ *
+ * Normally this function is only called internally from
+ * device_probe_and_attach().
+ *
+ * @param dev the device to initialise
+ *
+ * @retval 0 success
+ * @retval ENXIO no driver was found
+ * @retval ENOMEM memory allocation failure
+ * @retval non-zero some other unix error code
+ */
+int
+device_attach(device_t dev)
+{
+ int error;
+
+ device_sysctl_init(dev);
+ if (!device_is_quiet(dev))
+ device_print_child(dev->parent, dev);
+ if ((error = DEVICE_ATTACH(dev)) != 0) {
+ printf("device_attach: %s%d attach returned %d\n",
+ dev->driver->name, dev->unit, error);
+ /* Unset the class; set in device_probe_child */
+ if (dev->devclass == NULL)
+ device_set_devclass(dev, NULL);
+ device_set_driver(dev, NULL);
+ device_sysctl_fini(dev);
+ dev->state = DS_NOTPRESENT;
+ return (error);
+ }
+ device_sysctl_update(dev);
+ dev->state = DS_ATTACHED;
+ devadded(dev);
+ return (0);
+}
+
+/**
+ * @brief Detach a driver from a device
+ *
+ * This function is a wrapper around the DEVICE_DETACH() driver
+ * method. If the call to DEVICE_DETACH() succeeds, it calls
+ * BUS_CHILD_DETACHED() for the parent of @p dev, queues a
+ * notification event for user-based device management services and
+ * cleans up the device's sysctl tree.
+ *
+ * @param dev the device to un-initialise
+ *
+ * @retval 0 success
+ * @retval ENXIO no driver was found
+ * @retval ENOMEM memory allocation failure
+ * @retval non-zero some other unix error code
+ */
+int
+device_detach(device_t dev)
+{
+ int error;
+
+ GIANT_REQUIRED;
+
+ PDEBUG(("%s", DEVICENAME(dev)));
+ if (dev->state == DS_BUSY)
+ return (EBUSY);
+ if (dev->state != DS_ATTACHED)
+ return (0);
+
+ if ((error = DEVICE_DETACH(dev)) != 0)
+ return (error);
+ devremoved(dev);
+ if (!device_is_quiet(dev))
+ device_printf(dev, "detached\n");
+ if (dev->parent)
+ BUS_CHILD_DETACHED(dev->parent, dev);
+
+ if (!(dev->flags & DF_FIXEDCLASS))
+ devclass_delete_device(dev->devclass, dev);
+
+ dev->state = DS_NOTPRESENT;
+ device_set_driver(dev, NULL);
+ device_set_desc(dev, NULL);
+ device_sysctl_fini(dev);
+
+ return (0);
+}
+
+/**
+ * @brief Tells a driver to quiesce itself.
+ *
+ * This function is a wrapper around the DEVICE_QUIESCE() driver
+ * method. If the call to DEVICE_QUIESCE() succeeds.
+ *
+ * @param dev the device to quiesce
+ *
+ * @retval 0 success
+ * @retval ENXIO no driver was found
+ * @retval ENOMEM memory allocation failure
+ * @retval non-zero some other unix error code
+ */
+int
+device_quiesce(device_t dev)
+{
+
+ PDEBUG(("%s", DEVICENAME(dev)));
+ if (dev->state == DS_BUSY)
+ return (EBUSY);
+ if (dev->state != DS_ATTACHED)
+ return (0);
+
+ return (DEVICE_QUIESCE(dev));
+}
+
+/**
+ * @brief Notify a device of system shutdown
+ *
+ * This function calls the DEVICE_SHUTDOWN() driver method if the
+ * device currently has an attached driver.
+ *
+ * @returns the value returned by DEVICE_SHUTDOWN()
+ */
+int
+device_shutdown(device_t dev)
+{
+ if (dev->state < DS_ATTACHED)
+ return (0);
+ return (DEVICE_SHUTDOWN(dev));
+}
+
+/**
+ * @brief Set the unit number of a device
+ *
+ * This function can be used to override the unit number used for a
+ * device (e.g. to wire a device to a pre-configured unit number).
+ */
+int
+device_set_unit(device_t dev, int unit)
+{
+ devclass_t dc;
+ int err;
+
+ dc = device_get_devclass(dev);
+ if (unit < dc->maxunit && dc->devices[unit])
+ return (EBUSY);
+ err = devclass_delete_device(dc, dev);
+ if (err)
+ return (err);
+ dev->unit = unit;
+ err = devclass_add_device(dc, dev);
+ if (err)
+ return (err);
+
+ bus_data_generation_update();
+ return (0);
+}
+
+/*======================================*/
+/*
+ * Some useful method implementations to make life easier for bus drivers.
+ */
+
+/**
+ * @brief Initialise a resource list.
+ *
+ * @param rl the resource list to initialise
+ */
+void
+resource_list_init(struct resource_list *rl)
+{
+ STAILQ_INIT(rl);
+}
+
+/**
+ * @brief Reclaim memory used by a resource list.
+ *
+ * This function frees the memory for all resource entries on the list
+ * (if any).
+ *
+ * @param rl the resource list to free
+ */
+void
+resource_list_free(struct resource_list *rl)
+{
+ struct resource_list_entry *rle;
+
+ while ((rle = STAILQ_FIRST(rl)) != NULL) {
+ if (rle->res)
+ panic("resource_list_free: resource entry is busy");
+ STAILQ_REMOVE_HEAD(rl, link);
+ free(rle, M_BUS);
+ }
+}
+
+/**
+ * @brief Add a resource entry.
+ *
+ * This function adds a resource entry using the given @p type, @p
+ * start, @p end and @p count values. A rid value is chosen by
+ * searching sequentially for the first unused rid starting at zero.
+ *
+ * @param rl the resource list to edit
+ * @param type the resource entry type (e.g. SYS_RES_MEMORY)
+ * @param start the start address of the resource
+ * @param end the end address of the resource
+ * @param count XXX end-start+1
+ */
+int
+resource_list_add_next(struct resource_list *rl, int type, u_long start,
+ u_long end, u_long count)
+{
+ int rid;
+
+ rid = 0;
+ while (resource_list_find(rl, type, rid) != NULL)
+ rid++;
+ resource_list_add(rl, type, rid, start, end, count);
+ return (rid);
+}
+
+/**
+ * @brief Add or modify a resource entry.
+ *
+ * If an existing entry exists with the same type and rid, it will be
+ * modified using the given values of @p start, @p end and @p
+ * count. If no entry exists, a new one will be created using the
+ * given values. The resource list entry that matches is then returned.
+ *
+ * @param rl the resource list to edit
+ * @param type the resource entry type (e.g. SYS_RES_MEMORY)
+ * @param rid the resource identifier
+ * @param start the start address of the resource
+ * @param end the end address of the resource
+ * @param count XXX end-start+1
+ */
+struct resource_list_entry *
+resource_list_add(struct resource_list *rl, int type, int rid,
+ u_long start, u_long end, u_long count)
+{
+ struct resource_list_entry *rle;
+
+ rle = resource_list_find(rl, type, rid);
+ if (!rle) {
+ rle = malloc(sizeof(struct resource_list_entry), M_BUS,
+ M_NOWAIT);
+ if (!rle)
+ panic("resource_list_add: can't record entry");
+ STAILQ_INSERT_TAIL(rl, rle, link);
+ rle->type = type;
+ rle->rid = rid;
+ rle->res = NULL;
+ }
+
+ if (rle->res)
+ panic("resource_list_add: resource entry is busy");
+
+ rle->start = start;
+ rle->end = end;
+ rle->count = count;
+ return (rle);
+}
+
+/**
+ * @brief Find a resource entry by type and rid.
+ *
+ * @param rl the resource list to search
+ * @param type the resource entry type (e.g. SYS_RES_MEMORY)
+ * @param rid the resource identifier
+ *
+ * @returns the resource entry pointer or NULL if there is no such
+ * entry.
+ */
+struct resource_list_entry *
+resource_list_find(struct resource_list *rl, int type, int rid)
+{
+ struct resource_list_entry *rle;
+
+ STAILQ_FOREACH(rle, rl, link) {
+ if (rle->type == type && rle->rid == rid)
+ return (rle);
+ }
+ return (NULL);
+}
+
+/**
+ * @brief Delete a resource entry.
+ *
+ * @param rl the resource list to edit
+ * @param type the resource entry type (e.g. SYS_RES_MEMORY)
+ * @param rid the resource identifier
+ */
+void
+resource_list_delete(struct resource_list *rl, int type, int rid)
+{
+ struct resource_list_entry *rle = resource_list_find(rl, type, rid);
+
+ if (rle) {
+ if (rle->res != NULL)
+ panic("resource_list_delete: resource has not been released");
+ STAILQ_REMOVE(rl, rle, resource_list_entry, link);
+ free(rle, M_BUS);
+ }
+}
+
+/**
+ * @brief Helper function for implementing BUS_ALLOC_RESOURCE()
+ *
+ * Implement BUS_ALLOC_RESOURCE() by looking up a resource from the list
+ * and passing the allocation up to the parent of @p bus. This assumes
+ * that the first entry of @c device_get_ivars(child) is a struct
+ * resource_list. This also handles 'passthrough' allocations where a
+ * child is a remote descendant of bus by passing the allocation up to
+ * the parent of bus.
+ *
+ * Typically, a bus driver would store a list of child resources
+ * somewhere in the child device's ivars (see device_get_ivars()) and
+ * its implementation of BUS_ALLOC_RESOURCE() would find that list and
+ * then call resource_list_alloc() to perform the allocation.
+ *
+ * @param rl the resource list to allocate from
+ * @param bus the parent device of @p child
+ * @param child the device which is requesting an allocation
+ * @param type the type of resource to allocate
+ * @param rid a pointer to the resource identifier
+ * @param start hint at the start of the resource range - pass
+ * @c 0UL for any start address
+ * @param end hint at the end of the resource range - pass
+ * @c ~0UL for any end address
+ * @param count hint at the size of range required - pass @c 1
+ * for any size
+ * @param flags any extra flags to control the resource
+ * allocation - see @c RF_XXX flags in
+ * <sys/rman.h> for details
+ *
+ * @returns the resource which was allocated or @c NULL if no
+ * resource could be allocated
+ */
+struct resource *
+resource_list_alloc(struct resource_list *rl, device_t bus, device_t child,
+ int type, int *rid, u_long start, u_long end, u_long count, u_int flags)
+{
+ struct resource_list_entry *rle = NULL;
+ int passthrough = (device_get_parent(child) != bus);
+ int isdefault = (start == 0UL && end == ~0UL);
+
+ if (passthrough) {
+ return (BUS_ALLOC_RESOURCE(device_get_parent(bus), child,
+ type, rid, start, end, count, flags));
+ }
+
+ rle = resource_list_find(rl, type, *rid);
+
+ if (!rle)
+ return (NULL); /* no resource of that type/rid */
+
+ if (rle->res)
+ panic("resource_list_alloc: resource entry is busy");
+
+ if (isdefault) {
+ start = rle->start;
+ count = ulmax(count, rle->count);
+ end = ulmax(rle->end, start + count - 1);
+ }
+
+ rle->res = BUS_ALLOC_RESOURCE(device_get_parent(bus), child,
+ type, rid, start, end, count, flags);
+
+ /*
+ * Record the new range.
+ */
+ if (rle->res) {
+ rle->start = rman_get_start(rle->res);
+ rle->end = rman_get_end(rle->res);
+ rle->count = count;
+ }
+
+ return (rle->res);
+}
+
+/**
+ * @brief Helper function for implementing BUS_RELEASE_RESOURCE()
+ *
+ * Implement BUS_RELEASE_RESOURCE() using a resource list. Normally
+ * used with resource_list_alloc().
+ *
+ * @param rl the resource list which was allocated from
+ * @param bus the parent device of @p child
+ * @param child the device which is requesting a release
+ * @param type the type of resource to allocate
+ * @param rid the resource identifier
+ * @param res the resource to release
+ *
+ * @retval 0 success
+ * @retval non-zero a standard unix error code indicating what
+ * error condition prevented the operation
+ */
+int
+resource_list_release(struct resource_list *rl, device_t bus, device_t child,
+ int type, int rid, struct resource *res)
+{
+ struct resource_list_entry *rle = NULL;
+ int passthrough = (device_get_parent(child) != bus);
+ int error;
+
+ if (passthrough) {
+ return (BUS_RELEASE_RESOURCE(device_get_parent(bus), child,
+ type, rid, res));
+ }
+
+ rle = resource_list_find(rl, type, rid);
+
+ if (!rle)
+ panic("resource_list_release: can't find resource");
+ if (!rle->res)
+ panic("resource_list_release: resource entry is not busy");
+
+ error = BUS_RELEASE_RESOURCE(device_get_parent(bus), child,
+ type, rid, res);
+ if (error)
+ return (error);
+
+ rle->res = NULL;
+ return (0);
+}
+
+/**
+ * @brief Print a description of resources in a resource list
+ *
+ * Print all resources of a specified type, for use in BUS_PRINT_CHILD().
+ * The name is printed if at least one resource of the given type is available.
+ * The format is used to print resource start and end.
+ *
+ * @param rl the resource list to print
+ * @param name the name of @p type, e.g. @c "memory"
+ * @param type type type of resource entry to print
+ * @param format printf(9) format string to print resource
+ * start and end values
+ *
+ * @returns the number of characters printed
+ */
+int
+resource_list_print_type(struct resource_list *rl, const char *name, int type,
+ const char *format)
+{
+ struct resource_list_entry *rle;
+ int printed, retval;
+
+ printed = 0;
+ retval = 0;
+ /* Yes, this is kinda cheating */
+ STAILQ_FOREACH(rle, rl, link) {
+ if (rle->type == type) {
+ if (printed == 0)
+ retval += printf(" %s ", name);
+ else
+ retval += printf(",");
+ printed++;
+ retval += printf(format, rle->start);
+ if (rle->count > 1) {
+ retval += printf("-");
+ retval += printf(format, rle->start +
+ rle->count - 1);
+ }
+ }
+ }
+ return (retval);
+}
+
+/**
+ * @brief Releases all the resources in a list.
+ *
+ * @param rl The resource list to purge.
+ *
+ * @returns nothing
+ */
+void
+resource_list_purge(struct resource_list *rl)
+{
+ struct resource_list_entry *rle;
+
+ while ((rle = STAILQ_FIRST(rl)) != NULL) {
+ if (rle->res)
+ bus_release_resource(rman_get_device(rle->res),
+ rle->type, rle->rid, rle->res);
+ STAILQ_REMOVE_HEAD(rl, link);
+ free(rle, M_BUS);
+ }
+}
+
+device_t
+bus_generic_add_child(device_t dev, u_int order, const char *name, int unit)
+{
+
+ return (device_add_child_ordered(dev, order, name, unit));
+}
+
+/**
+ * @brief Helper function for implementing DEVICE_PROBE()
+ *
+ * This function can be used to help implement the DEVICE_PROBE() for
+ * a bus (i.e. a device which has other devices attached to it). It
+ * calls the DEVICE_IDENTIFY() method of each driver in the device's
+ * devclass.
+ */
+int
+bus_generic_probe(device_t dev)
+{
+ devclass_t dc = dev->devclass;
+ driverlink_t dl;
+
+ TAILQ_FOREACH(dl, &dc->drivers, link) {
+ /*
+ * If this driver's pass is too high, then ignore it.
+ * For most drivers in the default pass, this will
+ * never be true. For early-pass drivers they will
+ * only call the identify routines of eligible drivers
+ * when this routine is called. Drivers for later
+ * passes should have their identify routines called
+ * on early-pass busses during BUS_NEW_PASS().
+ */
+ if (dl->pass > bus_current_pass)
+ continue;
+ DEVICE_IDENTIFY(dl->driver, dev);
+ }
+
+ return (0);
+}
+
+/**
+ * @brief Helper function for implementing DEVICE_ATTACH()
+ *
+ * This function can be used to help implement the DEVICE_ATTACH() for
+ * a bus. It calls device_probe_and_attach() for each of the device's
+ * children.
+ */
+int
+bus_generic_attach(device_t dev)
+{
+ device_t child;
+
+ TAILQ_FOREACH(child, &dev->children, link) {
+ device_probe_and_attach(child);
+ }
+
+ return (0);
+}
+
+/**
+ * @brief Helper function for implementing DEVICE_DETACH()
+ *
+ * This function can be used to help implement the DEVICE_DETACH() for
+ * a bus. It calls device_detach() for each of the device's
+ * children.
+ */
+int
+bus_generic_detach(device_t dev)
+{
+ device_t child;
+ int error;
+
+ if (dev->state != DS_ATTACHED)
+ return (EBUSY);
+
+ TAILQ_FOREACH(child, &dev->children, link) {
+ if ((error = device_detach(child)) != 0)
+ return (error);
+ }
+
+ return (0);
+}
+
+/**
+ * @brief Helper function for implementing DEVICE_SHUTDOWN()
+ *
+ * This function can be used to help implement the DEVICE_SHUTDOWN()
+ * for a bus. It calls device_shutdown() for each of the device's
+ * children.
+ */
+int
+bus_generic_shutdown(device_t dev)
+{
+ device_t child;
+
+ TAILQ_FOREACH(child, &dev->children, link) {
+ device_shutdown(child);
+ }
+
+ return (0);
+}
+
+/**
+ * @brief Helper function for implementing DEVICE_SUSPEND()
+ *
+ * This function can be used to help implement the DEVICE_SUSPEND()
+ * for a bus. It calls DEVICE_SUSPEND() for each of the device's
+ * children. If any call to DEVICE_SUSPEND() fails, the suspend
+ * operation is aborted and any devices which were suspended are
+ * resumed immediately by calling their DEVICE_RESUME() methods.
+ */
+int
+bus_generic_suspend(device_t dev)
+{
+ int error;
+ device_t child, child2;
+
+ TAILQ_FOREACH(child, &dev->children, link) {
+ error = DEVICE_SUSPEND(child);
+ if (error) {
+ for (child2 = TAILQ_FIRST(&dev->children);
+ child2 && child2 != child;
+ child2 = TAILQ_NEXT(child2, link))
+ DEVICE_RESUME(child2);
+ return (error);
+ }
+ }
+ return (0);
+}
+
+/**
+ * @brief Helper function for implementing DEVICE_RESUME()
+ *
+ * This function can be used to help implement the DEVICE_RESUME() for
+ * a bus. It calls DEVICE_RESUME() on each of the device's children.
+ */
+int
+bus_generic_resume(device_t dev)
+{
+ device_t child;
+
+ TAILQ_FOREACH(child, &dev->children, link) {
+ DEVICE_RESUME(child);
+ /* if resume fails, there's nothing we can usefully do... */
+ }
+ return (0);
+}
+
+/**
+ * @brief Helper function for implementing BUS_PRINT_CHILD().
+ *
+ * This function prints the first part of the ascii representation of
+ * @p child, including its name, unit and description (if any - see
+ * device_set_desc()).
+ *
+ * @returns the number of characters printed
+ */
+int
+bus_print_child_header(device_t dev, device_t child)
+{
+ int retval = 0;
+
+ if (device_get_desc(child)) {
+ retval += device_printf(child, "<%s>", device_get_desc(child));
+ } else {
+ retval += printf("%s", device_get_nameunit(child));
+ }
+
+ return (retval);
+}
+
+/**
+ * @brief Helper function for implementing BUS_PRINT_CHILD().
+ *
+ * This function prints the last part of the ascii representation of
+ * @p child, which consists of the string @c " on " followed by the
+ * name and unit of the @p dev.
+ *
+ * @returns the number of characters printed
+ */
+int
+bus_print_child_footer(device_t dev, device_t child)
+{
+ return (printf(" on %s\n", device_get_nameunit(dev)));
+}
+
+/**
+ * @brief Helper function for implementing BUS_PRINT_CHILD().
+ *
+ * This function simply calls bus_print_child_header() followed by
+ * bus_print_child_footer().
+ *
+ * @returns the number of characters printed
+ */
+int
+bus_generic_print_child(device_t dev, device_t child)
+{
+ int retval = 0;
+
+ retval += bus_print_child_header(dev, child);
+ retval += bus_print_child_footer(dev, child);
+
+ return (retval);
+}
+
+/**
+ * @brief Stub function for implementing BUS_READ_IVAR().
+ *
+ * @returns ENOENT
+ */
+int
+bus_generic_read_ivar(device_t dev, device_t child, int index,
+ uintptr_t * result)
+{
+ return (ENOENT);
+}
+
+/**
+ * @brief Stub function for implementing BUS_WRITE_IVAR().
+ *
+ * @returns ENOENT
+ */
+int
+bus_generic_write_ivar(device_t dev, device_t child, int index,
+ uintptr_t value)
+{
+ return (ENOENT);
+}
+
+/**
+ * @brief Stub function for implementing BUS_GET_RESOURCE_LIST().
+ *
+ * @returns NULL
+ */
+struct resource_list *
+bus_generic_get_resource_list(device_t dev, device_t child)
+{
+ return (NULL);
+}
+
+/**
+ * @brief Helper function for implementing BUS_DRIVER_ADDED().
+ *
+ * This implementation of BUS_DRIVER_ADDED() simply calls the driver's
+ * DEVICE_IDENTIFY() method to allow it to add new children to the bus
+ * and then calls device_probe_and_attach() for each unattached child.
+ */
+void
+bus_generic_driver_added(device_t dev, driver_t *driver)
+{
+ device_t child;
+
+ DEVICE_IDENTIFY(driver, dev);
+ TAILQ_FOREACH(child, &dev->children, link) {
+ if (child->state == DS_NOTPRESENT ||
+ (child->flags & DF_REBID))
+ device_probe_and_attach(child);
+ }
+}
+
+/**
+ * @brief Helper function for implementing BUS_NEW_PASS().
+ *
+ * This implementing of BUS_NEW_PASS() first calls the identify
+ * routines for any drivers that probe at the current pass. Then it
+ * walks the list of devices for this bus. If a device is already
+ * attached, then it calls BUS_NEW_PASS() on that device. If the
+ * device is not already attached, it attempts to attach a driver to
+ * it.
+ */
+void
+bus_generic_new_pass(device_t dev)
+{
+ driverlink_t dl;
+ devclass_t dc;
+ device_t child;
+
+ dc = dev->devclass;
+ TAILQ_FOREACH(dl, &dc->drivers, link) {
+ if (dl->pass == bus_current_pass)
+ DEVICE_IDENTIFY(dl->driver, dev);
+ }
+ TAILQ_FOREACH(child, &dev->children, link) {
+ if (child->state >= DS_ATTACHED)
+ BUS_NEW_PASS(child);
+ else if (child->state == DS_NOTPRESENT)
+ device_probe_and_attach(child);
+ }
+}
+
+/**
+ * @brief Helper function for implementing BUS_SETUP_INTR().
+ *
+ * This simple implementation of BUS_SETUP_INTR() simply calls the
+ * BUS_SETUP_INTR() method of the parent of @p dev.
+ */
+int
+bus_generic_setup_intr(device_t dev, device_t child, struct resource *irq,
+ int flags, driver_filter_t *filter, driver_intr_t *intr, void *arg,
+ void **cookiep)
+{
+ /* Propagate up the bus hierarchy until someone handles it. */
+ if (dev->parent)
+ return (BUS_SETUP_INTR(dev->parent, child, irq, flags,
+ filter, intr, arg, cookiep));
+ return (EINVAL);
+}
+
+/**
+ * @brief Helper function for implementing BUS_TEARDOWN_INTR().
+ *
+ * This simple implementation of BUS_TEARDOWN_INTR() simply calls the
+ * BUS_TEARDOWN_INTR() method of the parent of @p dev.
+ */
+int
+bus_generic_teardown_intr(device_t dev, device_t child, struct resource *irq,
+ void *cookie)
+{
+ /* Propagate up the bus hierarchy until someone handles it. */
+ if (dev->parent)
+ return (BUS_TEARDOWN_INTR(dev->parent, child, irq, cookie));
+ return (EINVAL);
+}
+
+/**
+ * @brief Helper function for implementing BUS_ALLOC_RESOURCE().
+ *
+ * This simple implementation of BUS_ALLOC_RESOURCE() simply calls the
+ * BUS_ALLOC_RESOURCE() method of the parent of @p dev.
+ */
+struct resource *
+bus_generic_alloc_resource(device_t dev, device_t child, int type, int *rid,
+ u_long start, u_long end, u_long count, u_int flags)
+{
+ /* Propagate up the bus hierarchy until someone handles it. */
+ if (dev->parent)
+ return (BUS_ALLOC_RESOURCE(dev->parent, child, type, rid,
+ start, end, count, flags));
+ return (NULL);
+}
+
+/**
+ * @brief Helper function for implementing BUS_RELEASE_RESOURCE().
+ *
+ * This simple implementation of BUS_RELEASE_RESOURCE() simply calls the
+ * BUS_RELEASE_RESOURCE() method of the parent of @p dev.
+ */
+int
+bus_generic_release_resource(device_t dev, device_t child, int type, int rid,
+ struct resource *r)
+{
+ /* Propagate up the bus hierarchy until someone handles it. */
+ if (dev->parent)
+ return (BUS_RELEASE_RESOURCE(dev->parent, child, type, rid,
+ r));
+ return (EINVAL);
+}
+
+/**
+ * @brief Helper function for implementing BUS_ACTIVATE_RESOURCE().
+ *
+ * This simple implementation of BUS_ACTIVATE_RESOURCE() simply calls the
+ * BUS_ACTIVATE_RESOURCE() method of the parent of @p dev.
+ */
+int
+bus_generic_activate_resource(device_t dev, device_t child, int type, int rid,
+ struct resource *r)
+{
+ /* Propagate up the bus hierarchy until someone handles it. */
+ if (dev->parent)
+ return (BUS_ACTIVATE_RESOURCE(dev->parent, child, type, rid,
+ r));
+ return (EINVAL);
+}
+
+/**
+ * @brief Helper function for implementing BUS_DEACTIVATE_RESOURCE().
+ *
+ * This simple implementation of BUS_DEACTIVATE_RESOURCE() simply calls the
+ * BUS_DEACTIVATE_RESOURCE() method of the parent of @p dev.
+ */
+int
+bus_generic_deactivate_resource(device_t dev, device_t child, int type,
+ int rid, struct resource *r)
+{
+ /* Propagate up the bus hierarchy until someone handles it. */
+ if (dev->parent)
+ return (BUS_DEACTIVATE_RESOURCE(dev->parent, child, type, rid,
+ r));
+ return (EINVAL);
+}
+
+/**
+ * @brief Helper function for implementing BUS_BIND_INTR().
+ *
+ * This simple implementation of BUS_BIND_INTR() simply calls the
+ * BUS_BIND_INTR() method of the parent of @p dev.
+ */
+int
+bus_generic_bind_intr(device_t dev, device_t child, struct resource *irq,
+ int cpu)
+{
+
+ /* Propagate up the bus hierarchy until someone handles it. */
+ if (dev->parent)
+ return (BUS_BIND_INTR(dev->parent, child, irq, cpu));
+ return (EINVAL);
+}
+
+/**
+ * @brief Helper function for implementing BUS_CONFIG_INTR().
+ *
+ * This simple implementation of BUS_CONFIG_INTR() simply calls the
+ * BUS_CONFIG_INTR() method of the parent of @p dev.
+ */
+int
+bus_generic_config_intr(device_t dev, int irq, enum intr_trigger trig,
+ enum intr_polarity pol)
+{
+
+ /* Propagate up the bus hierarchy until someone handles it. */
+ if (dev->parent)
+ return (BUS_CONFIG_INTR(dev->parent, irq, trig, pol));
+ return (EINVAL);
+}
+
+/**
+ * @brief Helper function for implementing BUS_DESCRIBE_INTR().
+ *
+ * This simple implementation of BUS_DESCRIBE_INTR() simply calls the
+ * BUS_DESCRIBE_INTR() method of the parent of @p dev.
+ */
+int
+bus_generic_describe_intr(device_t dev, device_t child, struct resource *irq,
+ void *cookie, const char *descr)
+{
+
+ /* Propagate up the bus hierarchy until someone handles it. */
+ if (dev->parent)
+ return (BUS_DESCRIBE_INTR(dev->parent, child, irq, cookie,
+ descr));
+ return (EINVAL);
+}
+
+/**
+ * @brief Helper function for implementing BUS_GET_DMA_TAG().
+ *
+ * This simple implementation of BUS_GET_DMA_TAG() simply calls the
+ * BUS_GET_DMA_TAG() method of the parent of @p dev.
+ */
+bus_dma_tag_t
+bus_generic_get_dma_tag(device_t dev, device_t child)
+{
+
+ /* Propagate up the bus hierarchy until someone handles it. */
+ if (dev->parent != NULL)
+ return (BUS_GET_DMA_TAG(dev->parent, child));
+ return (NULL);
+}
+
+/**
+ * @brief Helper function for implementing BUS_GET_RESOURCE().
+ *
+ * This implementation of BUS_GET_RESOURCE() uses the
+ * resource_list_find() function to do most of the work. It calls
+ * BUS_GET_RESOURCE_LIST() to find a suitable resource list to
+ * search.
+ */
+int
+bus_generic_rl_get_resource(device_t dev, device_t child, int type, int rid,
+ u_long *startp, u_long *countp)
+{
+ struct resource_list * rl = NULL;
+ struct resource_list_entry * rle = NULL;
+
+ rl = BUS_GET_RESOURCE_LIST(dev, child);
+ if (!rl)
+ return (EINVAL);
+
+ rle = resource_list_find(rl, type, rid);
+ if (!rle)
+ return (ENOENT);
+
+ if (startp)
+ *startp = rle->start;
+ if (countp)
+ *countp = rle->count;
+
+ return (0);
+}
+
+/**
+ * @brief Helper function for implementing BUS_SET_RESOURCE().
+ *
+ * This implementation of BUS_SET_RESOURCE() uses the
+ * resource_list_add() function to do most of the work. It calls
+ * BUS_GET_RESOURCE_LIST() to find a suitable resource list to
+ * edit.
+ */
+int
+bus_generic_rl_set_resource(device_t dev, device_t child, int type, int rid,
+ u_long start, u_long count)
+{
+ struct resource_list * rl = NULL;
+
+ rl = BUS_GET_RESOURCE_LIST(dev, child);
+ if (!rl)
+ return (EINVAL);
+
+ resource_list_add(rl, type, rid, start, (start + count - 1), count);
+
+ return (0);
+}
+
+/**
+ * @brief Helper function for implementing BUS_DELETE_RESOURCE().
+ *
+ * This implementation of BUS_DELETE_RESOURCE() uses the
+ * resource_list_delete() function to do most of the work. It calls
+ * BUS_GET_RESOURCE_LIST() to find a suitable resource list to
+ * edit.
+ */
+void
+bus_generic_rl_delete_resource(device_t dev, device_t child, int type, int rid)
+{
+ struct resource_list * rl = NULL;
+
+ rl = BUS_GET_RESOURCE_LIST(dev, child);
+ if (!rl)
+ return;
+
+ resource_list_delete(rl, type, rid);
+
+ return;
+}
+
+/**
+ * @brief Helper function for implementing BUS_RELEASE_RESOURCE().
+ *
+ * This implementation of BUS_RELEASE_RESOURCE() uses the
+ * resource_list_release() function to do most of the work. It calls
+ * BUS_GET_RESOURCE_LIST() to find a suitable resource list.
+ */
+int
+bus_generic_rl_release_resource(device_t dev, device_t child, int type,
+ int rid, struct resource *r)
+{
+ struct resource_list * rl = NULL;
+
+ rl = BUS_GET_RESOURCE_LIST(dev, child);
+ if (!rl)
+ return (EINVAL);
+
+ return (resource_list_release(rl, dev, child, type, rid, r));
+}
+
+/**
+ * @brief Helper function for implementing BUS_ALLOC_RESOURCE().
+ *
+ * This implementation of BUS_ALLOC_RESOURCE() uses the
+ * resource_list_alloc() function to do most of the work. It calls
+ * BUS_GET_RESOURCE_LIST() to find a suitable resource list.
+ */
+struct resource *
+bus_generic_rl_alloc_resource(device_t dev, device_t child, int type,
+ int *rid, u_long start, u_long end, u_long count, u_int flags)
+{
+ struct resource_list * rl = NULL;
+
+ rl = BUS_GET_RESOURCE_LIST(dev, child);
+ if (!rl)
+ return (NULL);
+
+ return (resource_list_alloc(rl, dev, child, type, rid,
+ start, end, count, flags));
+}
+
+/**
+ * @brief Helper function for implementing BUS_CHILD_PRESENT().
+ *
+ * This simple implementation of BUS_CHILD_PRESENT() simply calls the
+ * BUS_CHILD_PRESENT() method of the parent of @p dev.
+ */
+int
+bus_generic_child_present(device_t dev, device_t child)
+{
+ return (BUS_CHILD_PRESENT(device_get_parent(dev), dev));
+}
+
+/*
+ * Some convenience functions to make it easier for drivers to use the
+ * resource-management functions. All these really do is hide the
+ * indirection through the parent's method table, making for slightly
+ * less-wordy code. In the future, it might make sense for this code
+ * to maintain some sort of a list of resources allocated by each device.
+ */
+
+int
+bus_alloc_resources(device_t dev, struct resource_spec *rs,
+ struct resource **res)
+{
+ int i;
+
+ for (i = 0; rs[i].type != -1; i++)
+ res[i] = NULL;
+ for (i = 0; rs[i].type != -1; i++) {
+ res[i] = bus_alloc_resource_any(dev,
+ rs[i].type, &rs[i].rid, rs[i].flags);
+ if (res[i] == NULL && !(rs[i].flags & RF_OPTIONAL)) {
+ bus_release_resources(dev, rs, res);
+ return (ENXIO);
+ }
+ }
+ return (0);
+}
+
+void
+bus_release_resources(device_t dev, const struct resource_spec *rs,
+ struct resource **res)
+{
+ int i;
+
+ for (i = 0; rs[i].type != -1; i++)
+ if (res[i] != NULL) {
+ bus_release_resource(
+ dev, rs[i].type, rs[i].rid, res[i]);
+ res[i] = NULL;
+ }
+}
+
+/**
+ * @brief Wrapper function for BUS_ALLOC_RESOURCE().
+ *
+ * This function simply calls the BUS_ALLOC_RESOURCE() method of the
+ * parent of @p dev.
+ */
+struct resource *
+bus_alloc_resource(device_t dev, int type, int *rid, u_long start, u_long end,
+ u_long count, u_int flags)
+{
+ if (dev->parent == NULL)
+ return (NULL);
+ return (BUS_ALLOC_RESOURCE(dev->parent, dev, type, rid, start, end,
+ count, flags));
+}
+
+/**
+ * @brief Wrapper function for BUS_ACTIVATE_RESOURCE().
+ *
+ * This function simply calls the BUS_ACTIVATE_RESOURCE() method of the
+ * parent of @p dev.
+ */
+int
+bus_activate_resource(device_t dev, int type, int rid, struct resource *r)
+{
+ if (dev->parent == NULL)
+ return (EINVAL);
+ return (BUS_ACTIVATE_RESOURCE(dev->parent, dev, type, rid, r));
+}
+
+/**
+ * @brief Wrapper function for BUS_DEACTIVATE_RESOURCE().
+ *
+ * This function simply calls the BUS_DEACTIVATE_RESOURCE() method of the
+ * parent of @p dev.
+ */
+int
+bus_deactivate_resource(device_t dev, int type, int rid, struct resource *r)
+{
+ if (dev->parent == NULL)
+ return (EINVAL);
+ return (BUS_DEACTIVATE_RESOURCE(dev->parent, dev, type, rid, r));
+}
+
+/**
+ * @brief Wrapper function for BUS_RELEASE_RESOURCE().
+ *
+ * This function simply calls the BUS_RELEASE_RESOURCE() method of the
+ * parent of @p dev.
+ */
+int
+bus_release_resource(device_t dev, int type, int rid, struct resource *r)
+{
+ if (dev->parent == NULL)
+ return (EINVAL);
+ return (BUS_RELEASE_RESOURCE(dev->parent, dev, type, rid, r));
+}
+
+/**
+ * @brief Wrapper function for BUS_SETUP_INTR().
+ *
+ * This function simply calls the BUS_SETUP_INTR() method of the
+ * parent of @p dev.
+ */
+int
+bus_setup_intr(device_t dev, struct resource *r, int flags,
+ driver_filter_t filter, driver_intr_t handler, void *arg, void **cookiep)
+{
+ int error;
+
+ if (dev->parent == NULL)
+ return (EINVAL);
+ error = BUS_SETUP_INTR(dev->parent, dev, r, flags, filter, handler,
+ arg, cookiep);
+ if (error != 0)
+ return (error);
+ if (handler != NULL && !(flags & INTR_MPSAFE))
+ device_printf(dev, "[GIANT-LOCKED]\n");
+ if (bootverbose && (flags & INTR_MPSAFE))
+ device_printf(dev, "[MPSAFE]\n");
+ if (filter != NULL) {
+ if (handler == NULL)
+ device_printf(dev, "[FILTER]\n");
+ else
+ device_printf(dev, "[FILTER+ITHREAD]\n");
+ } else
+ device_printf(dev, "[ITHREAD]\n");
+ return (0);
+}
+
+/**
+ * @brief Wrapper function for BUS_TEARDOWN_INTR().
+ *
+ * This function simply calls the BUS_TEARDOWN_INTR() method of the
+ * parent of @p dev.
+ */
+int
+bus_teardown_intr(device_t dev, struct resource *r, void *cookie)
+{
+ if (dev->parent == NULL)
+ return (EINVAL);
+ return (BUS_TEARDOWN_INTR(dev->parent, dev, r, cookie));
+}
+
+/**
+ * @brief Wrapper function for BUS_BIND_INTR().
+ *
+ * This function simply calls the BUS_BIND_INTR() method of the
+ * parent of @p dev.
+ */
+int
+bus_bind_intr(device_t dev, struct resource *r, int cpu)
+{
+ if (dev->parent == NULL)
+ return (EINVAL);
+ return (BUS_BIND_INTR(dev->parent, dev, r, cpu));
+}
+
+/**
+ * @brief Wrapper function for BUS_DESCRIBE_INTR().
+ *
+ * This function first formats the requested description into a
+ * temporary buffer and then calls the BUS_DESCRIBE_INTR() method of
+ * the parent of @p dev.
+ */
+int
+bus_describe_intr(device_t dev, struct resource *irq, void *cookie,
+ const char *fmt, ...)
+{
+ va_list ap;
+ char descr[MAXCOMLEN + 1];
+
+ if (dev->parent == NULL)
+ return (EINVAL);
+ va_start(ap, fmt);
+ vsnprintf(descr, sizeof(descr), fmt, ap);
+ va_end(ap);
+ return (BUS_DESCRIBE_INTR(dev->parent, dev, irq, cookie, descr));
+}
+
+/**
+ * @brief Wrapper function for BUS_SET_RESOURCE().
+ *
+ * This function simply calls the BUS_SET_RESOURCE() method of the
+ * parent of @p dev.
+ */
+int
+bus_set_resource(device_t dev, int type, int rid,
+ u_long start, u_long count)
+{
+ return (BUS_SET_RESOURCE(device_get_parent(dev), dev, type, rid,
+ start, count));
+}
+
+/**
+ * @brief Wrapper function for BUS_GET_RESOURCE().
+ *
+ * This function simply calls the BUS_GET_RESOURCE() method of the
+ * parent of @p dev.
+ */
+int
+bus_get_resource(device_t dev, int type, int rid,
+ u_long *startp, u_long *countp)
+{
+ return (BUS_GET_RESOURCE(device_get_parent(dev), dev, type, rid,
+ startp, countp));
+}
+
+/**
+ * @brief Wrapper function for BUS_GET_RESOURCE().
+ *
+ * This function simply calls the BUS_GET_RESOURCE() method of the
+ * parent of @p dev and returns the start value.
+ */
+u_long
+bus_get_resource_start(device_t dev, int type, int rid)
+{
+ u_long start, count;
+ int error;
+
+ error = BUS_GET_RESOURCE(device_get_parent(dev), dev, type, rid,
+ &start, &count);
+ if (error)
+ return (0);
+ return (start);
+}
+
+/**
+ * @brief Wrapper function for BUS_GET_RESOURCE().
+ *
+ * This function simply calls the BUS_GET_RESOURCE() method of the
+ * parent of @p dev and returns the count value.
+ */
+u_long
+bus_get_resource_count(device_t dev, int type, int rid)
+{
+ u_long start, count;
+ int error;
+
+ error = BUS_GET_RESOURCE(device_get_parent(dev), dev, type, rid,
+ &start, &count);
+ if (error)
+ return (0);
+ return (count);
+}
+
+/**
+ * @brief Wrapper function for BUS_DELETE_RESOURCE().
+ *
+ * This function simply calls the BUS_DELETE_RESOURCE() method of the
+ * parent of @p dev.
+ */
+void
+bus_delete_resource(device_t dev, int type, int rid)
+{
+ BUS_DELETE_RESOURCE(device_get_parent(dev), dev, type, rid);
+}
+
+/**
+ * @brief Wrapper function for BUS_CHILD_PRESENT().
+ *
+ * This function simply calls the BUS_CHILD_PRESENT() method of the
+ * parent of @p dev.
+ */
+int
+bus_child_present(device_t child)
+{
+ return (BUS_CHILD_PRESENT(device_get_parent(child), child));
+}
+
+/**
+ * @brief Wrapper function for BUS_CHILD_PNPINFO_STR().
+ *
+ * This function simply calls the BUS_CHILD_PNPINFO_STR() method of the
+ * parent of @p dev.
+ */
+int
+bus_child_pnpinfo_str(device_t child, char *buf, size_t buflen)
+{
+ device_t parent;
+
+ parent = device_get_parent(child);
+ if (parent == NULL) {
+ *buf = '\0';
+ return (0);
+ }
+ return (BUS_CHILD_PNPINFO_STR(parent, child, buf, buflen));
+}
+
+/**
+ * @brief Wrapper function for BUS_CHILD_LOCATION_STR().
+ *
+ * This function simply calls the BUS_CHILD_LOCATION_STR() method of the
+ * parent of @p dev.
+ */
+int
+bus_child_location_str(device_t child, char *buf, size_t buflen)
+{
+ device_t parent;
+
+ parent = device_get_parent(child);
+ if (parent == NULL) {
+ *buf = '\0';
+ return (0);
+ }
+ return (BUS_CHILD_LOCATION_STR(parent, child, buf, buflen));
+}
+
+/**
+ * @brief Wrapper function for BUS_GET_DMA_TAG().
+ *
+ * This function simply calls the BUS_GET_DMA_TAG() method of the
+ * parent of @p dev.
+ */
+bus_dma_tag_t
+bus_get_dma_tag(device_t dev)
+{
+ device_t parent;
+
+ parent = device_get_parent(dev);
+ if (parent == NULL)
+ return (NULL);
+ return (BUS_GET_DMA_TAG(parent, dev));
+}
+
+/* Resume all devices and then notify userland that we're up again. */
+static int
+root_resume(device_t dev)
+{
+ int error;
+
+ error = bus_generic_resume(dev);
+ if (error == 0)
+ devctl_notify("kern", "power", "resume", NULL);
+ return (error);
+}
+
+static int
+root_print_child(device_t dev, device_t child)
+{
+ int retval = 0;
+
+ retval += bus_print_child_header(dev, child);
+ retval += printf("\n");
+
+ return (retval);
+}
+
+static int
+root_setup_intr(device_t dev, device_t child, struct resource *irq, int flags,
+ driver_filter_t *filter, driver_intr_t *intr, void *arg, void **cookiep)
+{
+ /*
+ * If an interrupt mapping gets to here something bad has happened.
+ */
+ panic("root_setup_intr");
+}
+
+/*
+ * If we get here, assume that the device is permanant and really is
+ * present in the system. Removable bus drivers are expected to intercept
+ * this call long before it gets here. We return -1 so that drivers that
+ * really care can check vs -1 or some ERRNO returned higher in the food
+ * chain.
+ */
+static int
+root_child_present(device_t dev, device_t child)
+{
+ return (-1);
+}
+
+static kobj_method_t root_methods[] = {
+ /* Device interface */
+ KOBJMETHOD(device_shutdown, bus_generic_shutdown),
+ KOBJMETHOD(device_suspend, bus_generic_suspend),
+ KOBJMETHOD(device_resume, root_resume),
+
+ /* Bus interface */
+ KOBJMETHOD(bus_print_child, root_print_child),
+ KOBJMETHOD(bus_read_ivar, bus_generic_read_ivar),
+ KOBJMETHOD(bus_write_ivar, bus_generic_write_ivar),
+ KOBJMETHOD(bus_setup_intr, root_setup_intr),
+ KOBJMETHOD(bus_child_present, root_child_present),
+
+ KOBJMETHOD_END
+};
+
+static driver_t root_driver = {
+ "root",
+ root_methods,
+ 1, /* no softc */
+};
+
+device_t root_bus;
+devclass_t root_devclass;
+
+static int
+root_bus_module_handler(module_t mod, int what, void* arg)
+{
+ switch (what) {
+ case MOD_LOAD:
+ TAILQ_INIT(&bus_data_devices);
+ kobj_class_compile((kobj_class_t) &root_driver);
+ root_bus = make_device(NULL, "root", 0);
+ root_bus->desc = "System root bus";
+ kobj_init((kobj_t) root_bus, (kobj_class_t) &root_driver);
+ root_bus->driver = &root_driver;
+ root_bus->state = DS_ATTACHED;
+ root_devclass = devclass_find_internal("root", NULL, FALSE);
+ devinit();
+ return (0);
+
+ case MOD_SHUTDOWN:
+ device_shutdown(root_bus);
+ return (0);
+ default:
+ return (EOPNOTSUPP);
+ }
+
+ return (0);
+}
+
+static moduledata_t root_bus_mod = {
+ "rootbus",
+ root_bus_module_handler,
+ NULL
+};
+DECLARE_MODULE(rootbus, root_bus_mod, SI_SUB_DRIVERS, SI_ORDER_FIRST);
+
+/**
+ * @brief Automatically configure devices
+ *
+ * This function begins the autoconfiguration process by calling
+ * device_probe_and_attach() for each child of the @c root0 device.
+ */
+void
+root_bus_configure(void)
+{
+
+ PDEBUG(("."));
+
+ /* Eventually this will be split up, but this is sufficient for now. */
+ bus_set_pass(BUS_PASS_DEFAULT);
+}
+
+/**
+ * @brief Module handler for registering device drivers
+ *
+ * This module handler is used to automatically register device
+ * drivers when modules are loaded. If @p what is MOD_LOAD, it calls
+ * devclass_add_driver() for the driver described by the
+ * driver_module_data structure pointed to by @p arg
+ */
+int
+driver_module_handler(module_t mod, int what, void *arg)
+{
+ struct driver_module_data *dmd;
+ devclass_t bus_devclass;
+ kobj_class_t driver;
+ int error, pass;
+
+ dmd = (struct driver_module_data *)arg;
+ bus_devclass = devclass_find_internal(dmd->dmd_busname, NULL, TRUE);
+ error = 0;
+
+ switch (what) {
+ case MOD_LOAD:
+ if (dmd->dmd_chainevh)
+ error = dmd->dmd_chainevh(mod,what,dmd->dmd_chainarg);
+
+ pass = dmd->dmd_pass;
+ driver = dmd->dmd_driver;
+ PDEBUG(("Loading module: driver %s on bus %s (pass %d)",
+ DRIVERNAME(driver), dmd->dmd_busname, pass));
+ error = devclass_add_driver(bus_devclass, driver, pass,
+ dmd->dmd_devclass);
+ break;
+
+ case MOD_UNLOAD:
+ PDEBUG(("Unloading module: driver %s from bus %s",
+ DRIVERNAME(dmd->dmd_driver),
+ dmd->dmd_busname));
+ error = devclass_delete_driver(bus_devclass,
+ dmd->dmd_driver);
+
+ if (!error && dmd->dmd_chainevh)
+ error = dmd->dmd_chainevh(mod,what,dmd->dmd_chainarg);
+ break;
+ case MOD_QUIESCE:
+ PDEBUG(("Quiesce module: driver %s from bus %s",
+ DRIVERNAME(dmd->dmd_driver),
+ dmd->dmd_busname));
+ error = devclass_quiesce_driver(bus_devclass,
+ dmd->dmd_driver);
+
+ if (!error && dmd->dmd_chainevh)
+ error = dmd->dmd_chainevh(mod,what,dmd->dmd_chainarg);
+ break;
+ default:
+ error = EOPNOTSUPP;
+ break;
+ }
+
+ return (error);
+}
+
+/**
+ * @brief Enumerate all hinted devices for this bus.
+ *
+ * Walks through the hints for this bus and calls the bus_hinted_child
+ * routine for each one it fines. It searches first for the specific
+ * bus that's being probed for hinted children (eg isa0), and then for
+ * generic children (eg isa).
+ *
+ * @param dev bus device to enumerate
+ */
+void
+bus_enumerate_hinted_children(device_t bus)
+{
+ int i;
+ const char *dname, *busname;
+ int dunit;
+
+ /*
+ * enumerate all devices on the specific bus
+ */
+ busname = device_get_nameunit(bus);
+ i = 0;
+ while (resource_find_match(&i, &dname, &dunit, "at", busname) == 0)
+ BUS_HINTED_CHILD(bus, dname, dunit);
+
+ /*
+ * and all the generic ones.
+ */
+ busname = device_get_name(bus);
+ i = 0;
+ while (resource_find_match(&i, &dname, &dunit, "at", busname) == 0)
+ BUS_HINTED_CHILD(bus, dname, dunit);
+}
+
+#ifdef BUS_DEBUG
+
+/* the _short versions avoid iteration by not calling anything that prints
+ * more than oneliners. I love oneliners.
+ */
+
+static void
+print_device_short(device_t dev, int indent)
+{
+ if (!dev)
+ return;
+
+ indentprintf(("device %d: <%s> %sparent,%schildren,%s%s%s%s%s,%sivars,%ssoftc,busy=%d\n",
+ dev->unit, dev->desc,
+ (dev->parent? "":"no "),
+ (TAILQ_EMPTY(&dev->children)? "no ":""),
+ (dev->flags&DF_ENABLED? "enabled,":"disabled,"),
+ (dev->flags&DF_FIXEDCLASS? "fixed,":""),
+ (dev->flags&DF_WILDCARD? "wildcard,":""),
+ (dev->flags&DF_DESCMALLOCED? "descmalloced,":""),
+ (dev->flags&DF_REBID? "rebiddable,":""),
+ (dev->ivars? "":"no "),
+ (dev->softc? "":"no "),
+ dev->busy));
+}
+
+static void
+print_device(device_t dev, int indent)
+{
+ if (!dev)
+ return;
+
+ print_device_short(dev, indent);
+
+ indentprintf(("Parent:\n"));
+ print_device_short(dev->parent, indent+1);
+ indentprintf(("Driver:\n"));
+ print_driver_short(dev->driver, indent+1);
+ indentprintf(("Devclass:\n"));
+ print_devclass_short(dev->devclass, indent+1);
+}
+
+void
+print_device_tree_short(device_t dev, int indent)
+/* print the device and all its children (indented) */
+{
+ device_t child;
+
+ if (!dev)
+ return;
+
+ print_device_short(dev, indent);
+
+ TAILQ_FOREACH(child, &dev->children, link) {
+ print_device_tree_short(child, indent+1);
+ }
+}
+
+void
+print_device_tree(device_t dev, int indent)
+/* print the device and all its children (indented) */
+{
+ device_t child;
+
+ if (!dev)
+ return;
+
+ print_device(dev, indent);
+
+ TAILQ_FOREACH(child, &dev->children, link) {
+ print_device_tree(child, indent+1);
+ }
+}
+
+static void
+print_driver_short(driver_t *driver, int indent)
+{
+ if (!driver)
+ return;
+
+ indentprintf(("driver %s: softc size = %zd\n",
+ driver->name, driver->size));
+}
+
+static void
+print_driver(driver_t *driver, int indent)
+{
+ if (!driver)
+ return;
+
+ print_driver_short(driver, indent);
+}
+
+
+static void
+print_driver_list(driver_list_t drivers, int indent)
+{
+ driverlink_t driver;
+
+ TAILQ_FOREACH(driver, &drivers, link) {
+ print_driver(driver->driver, indent);
+ }
+}
+
+static void
+print_devclass_short(devclass_t dc, int indent)
+{
+ if ( !dc )
+ return;
+
+ indentprintf(("devclass %s: max units = %d\n", dc->name, dc->maxunit));
+}
+
+static void
+print_devclass(devclass_t dc, int indent)
+{
+ int i;
+
+ if ( !dc )
+ return;
+
+ print_devclass_short(dc, indent);
+ indentprintf(("Drivers:\n"));
+ print_driver_list(dc->drivers, indent+1);
+
+ indentprintf(("Devices:\n"));
+ for (i = 0; i < dc->maxunit; i++)
+ if (dc->devices[i])
+ print_device(dc->devices[i], indent+1);
+}
+
+void
+print_devclass_list_short(void)
+{
+ devclass_t dc;
+
+ printf("Short listing of devclasses, drivers & devices:\n");
+ TAILQ_FOREACH(dc, &devclasses, link) {
+ print_devclass_short(dc, 0);
+ }
+}
+
+void
+print_devclass_list(void)
+{
+ devclass_t dc;
+
+ printf("Full listing of devclasses, drivers & devices:\n");
+ TAILQ_FOREACH(dc, &devclasses, link) {
+ print_devclass(dc, 0);
+ }
+}
+
+#endif
+
+/*
+ * User-space access to the device tree.
+ *
+ * We implement a small set of nodes:
+ *
+ * hw.bus Single integer read method to obtain the
+ * current generation count.
+ * hw.bus.devices Reads the entire device tree in flat space.
+ * hw.bus.rman Resource manager interface
+ *
+ * We might like to add the ability to scan devclasses and/or drivers to
+ * determine what else is currently loaded/available.
+ */
+
+static int
+sysctl_bus(SYSCTL_HANDLER_ARGS)
+{
+ struct u_businfo ubus;
+
+ ubus.ub_version = BUS_USER_VERSION;
+ ubus.ub_generation = bus_data_generation;
+
+ return (SYSCTL_OUT(req, &ubus, sizeof(ubus)));
+}
+SYSCTL_NODE(_hw_bus, OID_AUTO, info, CTLFLAG_RW, sysctl_bus,
+ "bus-related data");
+
+static int
+sysctl_devices(SYSCTL_HANDLER_ARGS)
+{
+ int *name = (int *)arg1;
+ u_int namelen = arg2;
+ int index;
+ struct device *dev;
+ struct u_device udev; /* XXX this is a bit big */
+ int error;
+
+ if (namelen != 2)
+ return (EINVAL);
+
+ if (bus_data_generation_check(name[0]))
+ return (EINVAL);
+
+ index = name[1];
+
+ /*
+ * Scan the list of devices, looking for the requested index.
+ */
+ TAILQ_FOREACH(dev, &bus_data_devices, devlink) {
+ if (index-- == 0)
+ break;
+ }
+ if (dev == NULL)
+ return (ENOENT);
+
+ /*
+ * Populate the return array.
+ */
+ bzero(&udev, sizeof(udev));
+ udev.dv_handle = (uintptr_t)dev;
+ udev.dv_parent = (uintptr_t)dev->parent;
+ if (dev->nameunit != NULL)
+ strlcpy(udev.dv_name, dev->nameunit, sizeof(udev.dv_name));
+ if (dev->desc != NULL)
+ strlcpy(udev.dv_desc, dev->desc, sizeof(udev.dv_desc));
+ if (dev->driver != NULL && dev->driver->name != NULL)
+ strlcpy(udev.dv_drivername, dev->driver->name,
+ sizeof(udev.dv_drivername));
+ bus_child_pnpinfo_str(dev, udev.dv_pnpinfo, sizeof(udev.dv_pnpinfo));
+ bus_child_location_str(dev, udev.dv_location, sizeof(udev.dv_location));
+ udev.dv_devflags = dev->devflags;
+ udev.dv_flags = dev->flags;
+ udev.dv_state = dev->state;
+ error = SYSCTL_OUT(req, &udev, sizeof(udev));
+ return (error);
+}
+
+SYSCTL_NODE(_hw_bus, OID_AUTO, devices, CTLFLAG_RD, sysctl_devices,
+ "system device tree");
+
+int
+bus_data_generation_check(int generation)
+{
+ if (generation != bus_data_generation)
+ return (1);
+
+ /* XXX generate optimised lists here? */
+ return (0);
+}
+
+void
+bus_data_generation_update(void)
+{
+ bus_data_generation++;
+}
+
+int
+bus_free_resource(device_t dev, int type, struct resource *r)
+{
+ if (r == NULL)
+ return (0);
+ return (bus_release_resource(dev, type, rman_get_rid(r), r));
+}
diff --git a/rtems/freebsd/kern/subr_kobj.c b/rtems/freebsd/kern/subr_kobj.c
new file mode 100644
index 00000000..65f778fd
--- /dev/null
+++ b/rtems/freebsd/kern/subr_kobj.c
@@ -0,0 +1,363 @@
+#include <rtems/freebsd/machine/rtems-bsd-config.h>
+
+/*-
+ * Copyright (c) 2000,2003 Doug Rabson
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <rtems/freebsd/sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <rtems/freebsd/sys/param.h>
+#include <rtems/freebsd/sys/kernel.h>
+#include <rtems/freebsd/sys/kobj.h>
+#include <rtems/freebsd/sys/lock.h>
+#include <rtems/freebsd/sys/malloc.h>
+#include <rtems/freebsd/sys/mutex.h>
+#include <rtems/freebsd/sys/sysctl.h>
+#ifndef TEST
+#include <rtems/freebsd/sys/systm.h>
+#endif
+
+#ifdef TEST
+#include <rtems/freebsd/local/usertest.h>
+#endif
+
+static MALLOC_DEFINE(M_KOBJ, "kobj", "Kernel object structures");
+
+#ifdef KOBJ_STATS
+
+u_int kobj_lookup_hits;
+u_int kobj_lookup_misses;
+
+SYSCTL_UINT(_kern, OID_AUTO, kobj_hits, CTLFLAG_RD,
+ &kobj_lookup_hits, 0, "");
+SYSCTL_UINT(_kern, OID_AUTO, kobj_misses, CTLFLAG_RD,
+ &kobj_lookup_misses, 0, "");
+
+#endif
+
+static struct mtx kobj_mtx;
+static int kobj_mutex_inited;
+static int kobj_next_id = 1;
+
+/*
+ * In the event that kobj_mtx has not been initialized yet,
+ * we will ignore it, and run without locks in order to support
+ * use of KOBJ before mutexes are available. This early in the boot
+ * process, everything is single threaded and so races should not
+ * happen. This is used to provide the PMAP layer on PowerPC, as well
+ * as board support.
+ */
+
+#define KOBJ_LOCK() if (kobj_mutex_inited) mtx_lock(&kobj_mtx);
+#define KOBJ_UNLOCK() if (kobj_mutex_inited) mtx_unlock(&kobj_mtx);
+#define KOBJ_ASSERT(what) if (kobj_mutex_inited) mtx_assert(&kobj_mtx,what);
+
+SYSCTL_UINT(_kern, OID_AUTO, kobj_methodcount, CTLFLAG_RD,
+ &kobj_next_id, 0, "");
+
+static void
+kobj_init_mutex(void *arg)
+{
+ if (!kobj_mutex_inited) {
+ mtx_init(&kobj_mtx, "kobj", NULL, MTX_DEF);
+ kobj_mutex_inited = 1;
+ }
+}
+
+SYSINIT(kobj, SI_SUB_LOCK, SI_ORDER_ANY, kobj_init_mutex, NULL);
+
+/*
+ * This method structure is used to initialise new caches. Since the
+ * desc pointer is NULL, it is guaranteed never to match any read
+ * descriptors.
+ */
+static struct kobj_method null_method = {
+ 0, 0,
+};
+
+int
+kobj_error_method(void)
+{
+
+ return ENXIO;
+}
+
+static void
+kobj_register_method(struct kobjop_desc *desc)
+{
+ KOBJ_ASSERT(MA_OWNED);
+
+ if (desc->id == 0) {
+ desc->id = kobj_next_id++;
+ }
+}
+
+static void
+kobj_unregister_method(struct kobjop_desc *desc)
+{
+}
+
+static void
+kobj_class_compile_common(kobj_class_t cls, kobj_ops_t ops)
+{
+ kobj_method_t *m;
+ int i;
+
+ KOBJ_ASSERT(MA_OWNED);
+
+ /*
+ * Don't do anything if we are already compiled.
+ */
+ if (cls->ops)
+ return;
+
+ /*
+ * First register any methods which need it.
+ */
+ for (i = 0, m = cls->methods; m->desc; i++, m++)
+ kobj_register_method(m->desc);
+
+ /*
+ * Then initialise the ops table.
+ */
+ for (i = 0; i < KOBJ_CACHE_SIZE; i++)
+ ops->cache[i] = &null_method;
+ ops->cls = cls;
+ cls->ops = ops;
+}
+
+void
+kobj_class_compile(kobj_class_t cls)
+{
+ kobj_ops_t ops;
+
+ KOBJ_ASSERT(MA_NOTOWNED);
+
+ /*
+ * Allocate space for the compiled ops table.
+ */
+ ops = malloc(sizeof(struct kobj_ops), M_KOBJ, M_NOWAIT);
+ if (!ops)
+ panic("kobj_compile_methods: out of memory");
+
+ KOBJ_LOCK();
+
+ /*
+ * We may have lost a race for kobj_class_compile here - check
+ * to make sure someone else hasn't already compiled this
+ * class.
+ */
+ if (cls->ops) {
+ KOBJ_UNLOCK();
+ free(ops, M_KOBJ);
+ return;
+ }
+
+ kobj_class_compile_common(cls, ops);
+ KOBJ_UNLOCK();
+}
+
+void
+kobj_class_compile_static(kobj_class_t cls, kobj_ops_t ops)
+{
+
+ KOBJ_ASSERT(MA_NOTOWNED);
+
+ /*
+ * Increment refs to make sure that the ops table is not freed.
+ */
+ KOBJ_LOCK();
+
+ cls->refs++;
+ kobj_class_compile_common(cls, ops);
+
+ KOBJ_UNLOCK();
+}
+
+static kobj_method_t*
+kobj_lookup_method_class(kobj_class_t cls, kobjop_desc_t desc)
+{
+ kobj_method_t *methods = cls->methods;
+ kobj_method_t *ce;
+
+ for (ce = methods; ce && ce->desc; ce++) {
+ if (ce->desc == desc) {
+ return ce;
+ }
+ }
+
+ return NULL;
+}
+
+static kobj_method_t*
+kobj_lookup_method_mi(kobj_class_t cls,
+ kobjop_desc_t desc)
+{
+ kobj_method_t *ce;
+ kobj_class_t *basep;
+
+ ce = kobj_lookup_method_class(cls, desc);
+ if (ce)
+ return ce;
+
+ basep = cls->baseclasses;
+ if (basep) {
+ for (; *basep; basep++) {
+ ce = kobj_lookup_method_mi(*basep, desc);
+ if (ce)
+ return ce;
+ }
+ }
+
+ return NULL;
+}
+
+kobj_method_t*
+kobj_lookup_method(kobj_class_t cls,
+ kobj_method_t **cep,
+ kobjop_desc_t desc)
+{
+ kobj_method_t *ce;
+
+#ifdef KOBJ_STATS
+ /*
+ * Correct for the 'hit' assumption in KOBJOPLOOKUP and record
+ * a 'miss'.
+ */
+ kobj_lookup_hits--;
+ kobj_lookup_misses++;
+#endif
+
+ ce = kobj_lookup_method_mi(cls, desc);
+ if (!ce)
+ ce = desc->deflt;
+ *cep = ce;
+ return ce;
+}
+
+void
+kobj_class_free(kobj_class_t cls)
+{
+ int i;
+ kobj_method_t *m;
+ void* ops = NULL;
+
+ KOBJ_ASSERT(MA_NOTOWNED);
+ KOBJ_LOCK();
+
+ /*
+ * Protect against a race between kobj_create and
+ * kobj_delete.
+ */
+ if (cls->refs == 0) {
+ /*
+ * Unregister any methods which are no longer used.
+ */
+ for (i = 0, m = cls->methods; m->desc; i++, m++)
+ kobj_unregister_method(m->desc);
+
+ /*
+ * Free memory and clean up.
+ */
+ ops = cls->ops;
+ cls->ops = NULL;
+ }
+
+ KOBJ_UNLOCK();
+
+ if (ops)
+ free(ops, M_KOBJ);
+}
+
+kobj_t
+kobj_create(kobj_class_t cls,
+ struct malloc_type *mtype,
+ int mflags)
+{
+ kobj_t obj;
+
+ /*
+ * Allocate and initialise the new object.
+ */
+ obj = malloc(cls->size, mtype, mflags | M_ZERO);
+ if (!obj)
+ return NULL;
+ kobj_init(obj, cls);
+
+ return obj;
+}
+
+void
+kobj_init(kobj_t obj, kobj_class_t cls)
+{
+ KOBJ_ASSERT(MA_NOTOWNED);
+ retry:
+ KOBJ_LOCK();
+
+ /*
+ * Consider compiling the class' method table.
+ */
+ if (!cls->ops) {
+ /*
+ * kobj_class_compile doesn't want the lock held
+ * because of the call to malloc - we drop the lock
+ * and re-try.
+ */
+ KOBJ_UNLOCK();
+ kobj_class_compile(cls);
+ goto retry;
+ }
+
+ obj->ops = cls->ops;
+ cls->refs++;
+
+ KOBJ_UNLOCK();
+}
+
+void
+kobj_delete(kobj_t obj, struct malloc_type *mtype)
+{
+ kobj_class_t cls = obj->ops->cls;
+ int refs;
+
+ /*
+ * Consider freeing the compiled method table for the class
+ * after its last instance is deleted. As an optimisation, we
+ * should defer this for a short while to avoid thrashing.
+ */
+ KOBJ_ASSERT(MA_NOTOWNED);
+ KOBJ_LOCK();
+ cls->refs--;
+ refs = cls->refs;
+ KOBJ_UNLOCK();
+
+ if (!refs)
+ kobj_class_free(cls);
+
+ obj->ops = NULL;
+ if (mtype)
+ free(obj, mtype);
+}
diff --git a/rtems/freebsd/kern/uipc_mbuf.c b/rtems/freebsd/kern/uipc_mbuf.c
new file mode 100644
index 00000000..3da23117
--- /dev/null
+++ b/rtems/freebsd/kern/uipc_mbuf.c
@@ -0,0 +1,2123 @@
+#include <rtems/freebsd/machine/rtems-bsd-config.h>
+
+/*-
+ * Copyright (c) 1982, 1986, 1988, 1991, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * @(#)uipc_mbuf.c 8.2 (Berkeley) 1/4/94
+ */
+
+#include <rtems/freebsd/sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <rtems/freebsd/local/opt_param.h>
+#include <rtems/freebsd/local/opt_mbuf_stress_test.h>
+#include <rtems/freebsd/local/opt_mbuf_profiling.h>
+
+#include <rtems/freebsd/sys/param.h>
+#include <rtems/freebsd/sys/systm.h>
+#include <rtems/freebsd/sys/kernel.h>
+#include <rtems/freebsd/sys/limits.h>
+#include <rtems/freebsd/sys/lock.h>
+#include <rtems/freebsd/sys/malloc.h>
+#include <rtems/freebsd/sys/mbuf.h>
+#include <rtems/freebsd/sys/sysctl.h>
+#include <rtems/freebsd/sys/domain.h>
+#include <rtems/freebsd/sys/protosw.h>
+#include <rtems/freebsd/sys/uio.h>
+
+int max_linkhdr;
+int max_protohdr;
+int max_hdr;
+int max_datalen;
+#ifdef MBUF_STRESS_TEST
+int m_defragpackets;
+int m_defragbytes;
+int m_defraguseless;
+int m_defragfailure;
+int m_defragrandomfailures;
+#endif
+
+/*
+ * sysctl(8) exported objects
+ */
+SYSCTL_INT(_kern_ipc, KIPC_MAX_LINKHDR, max_linkhdr, CTLFLAG_RD,
+ &max_linkhdr, 0, "Size of largest link layer header");
+SYSCTL_INT(_kern_ipc, KIPC_MAX_PROTOHDR, max_protohdr, CTLFLAG_RD,
+ &max_protohdr, 0, "Size of largest protocol layer header");
+SYSCTL_INT(_kern_ipc, KIPC_MAX_HDR, max_hdr, CTLFLAG_RD,
+ &max_hdr, 0, "Size of largest link plus protocol header");
+SYSCTL_INT(_kern_ipc, KIPC_MAX_DATALEN, max_datalen, CTLFLAG_RD,
+ &max_datalen, 0, "Minimum space left in mbuf after max_hdr");
+#ifdef MBUF_STRESS_TEST
+SYSCTL_INT(_kern_ipc, OID_AUTO, m_defragpackets, CTLFLAG_RD,
+ &m_defragpackets, 0, "");
+SYSCTL_INT(_kern_ipc, OID_AUTO, m_defragbytes, CTLFLAG_RD,
+ &m_defragbytes, 0, "");
+SYSCTL_INT(_kern_ipc, OID_AUTO, m_defraguseless, CTLFLAG_RD,
+ &m_defraguseless, 0, "");
+SYSCTL_INT(_kern_ipc, OID_AUTO, m_defragfailure, CTLFLAG_RD,
+ &m_defragfailure, 0, "");
+SYSCTL_INT(_kern_ipc, OID_AUTO, m_defragrandomfailures, CTLFLAG_RW,
+ &m_defragrandomfailures, 0, "");
+#endif
+
+/*
+ * Allocate a given length worth of mbufs and/or clusters (whatever fits
+ * best) and return a pointer to the top of the allocated chain. If an
+ * existing mbuf chain is provided, then we will append the new chain
+ * to the existing one but still return the top of the newly allocated
+ * chain.
+ */
+struct mbuf *
+m_getm2(struct mbuf *m, int len, int how, short type, int flags)
+{
+ struct mbuf *mb, *nm = NULL, *mtail = NULL;
+
+ KASSERT(len >= 0, ("%s: len is < 0", __func__));
+
+ /* Validate flags. */
+ flags &= (M_PKTHDR | M_EOR);
+
+ /* Packet header mbuf must be first in chain. */
+ if ((flags & M_PKTHDR) && m != NULL)
+ flags &= ~M_PKTHDR;
+
+ /* Loop and append maximum sized mbufs to the chain tail. */
+ while (len > 0) {
+ if (len > MCLBYTES)
+ mb = m_getjcl(how, type, (flags & M_PKTHDR),
+ MJUMPAGESIZE);
+ else if (len >= MINCLSIZE)
+ mb = m_getcl(how, type, (flags & M_PKTHDR));
+ else if (flags & M_PKTHDR)
+ mb = m_gethdr(how, type);
+ else
+ mb = m_get(how, type);
+
+ /* Fail the whole operation if one mbuf can't be allocated. */
+ if (mb == NULL) {
+ if (nm != NULL)
+ m_freem(nm);
+ return (NULL);
+ }
+
+ /* Book keeping. */
+ len -= (mb->m_flags & M_EXT) ? mb->m_ext.ext_size :
+ ((mb->m_flags & M_PKTHDR) ? MHLEN : MLEN);
+ if (mtail != NULL)
+ mtail->m_next = mb;
+ else
+ nm = mb;
+ mtail = mb;
+ flags &= ~M_PKTHDR; /* Only valid on the first mbuf. */
+ }
+ if (flags & M_EOR)
+ mtail->m_flags |= M_EOR; /* Only valid on the last mbuf. */
+
+ /* If mbuf was supplied, append new chain to the end of it. */
+ if (m != NULL) {
+ for (mtail = m; mtail->m_next != NULL; mtail = mtail->m_next)
+ ;
+ mtail->m_next = nm;
+ mtail->m_flags &= ~M_EOR;
+ } else
+ m = nm;
+
+ return (m);
+}
+
+/*
+ * Free an entire chain of mbufs and associated external buffers, if
+ * applicable.
+ */
+void
+m_freem(struct mbuf *mb)
+{
+
+ while (mb != NULL)
+ mb = m_free(mb);
+}
+
+/*-
+ * Configure a provided mbuf to refer to the provided external storage
+ * buffer and setup a reference count for said buffer. If the setting
+ * up of the reference count fails, the M_EXT bit will not be set. If
+ * successfull, the M_EXT bit is set in the mbuf's flags.
+ *
+ * Arguments:
+ * mb The existing mbuf to which to attach the provided buffer.
+ * buf The address of the provided external storage buffer.
+ * size The size of the provided buffer.
+ * freef A pointer to a routine that is responsible for freeing the
+ * provided external storage buffer.
+ * args A pointer to an argument structure (of any type) to be passed
+ * to the provided freef routine (may be NULL).
+ * flags Any other flags to be passed to the provided mbuf.
+ * type The type that the external storage buffer should be
+ * labeled with.
+ *
+ * Returns:
+ * Nothing.
+ */
+void
+m_extadd(struct mbuf *mb, caddr_t buf, u_int size,
+ void (*freef)(void *, void *), void *arg1, void *arg2, int flags, int type)
+{
+ KASSERT(type != EXT_CLUSTER, ("%s: EXT_CLUSTER not allowed", __func__));
+
+ if (type != EXT_EXTREF)
+ mb->m_ext.ref_cnt = (u_int *)uma_zalloc(zone_ext_refcnt, M_NOWAIT);
+ if (mb->m_ext.ref_cnt != NULL) {
+ *(mb->m_ext.ref_cnt) = 1;
+ mb->m_flags |= (M_EXT | flags);
+ mb->m_ext.ext_buf = buf;
+ mb->m_data = mb->m_ext.ext_buf;
+ mb->m_ext.ext_size = size;
+ mb->m_ext.ext_free = freef;
+ mb->m_ext.ext_arg1 = arg1;
+ mb->m_ext.ext_arg2 = arg2;
+ mb->m_ext.ext_type = type;
+ }
+}
+
+/*
+ * Non-directly-exported function to clean up after mbufs with M_EXT
+ * storage attached to them if the reference count hits 1.
+ */
+void
+mb_free_ext(struct mbuf *m)
+{
+ int skipmbuf;
+
+ KASSERT((m->m_flags & M_EXT) == M_EXT, ("%s: M_EXT not set", __func__));
+ KASSERT(m->m_ext.ref_cnt != NULL, ("%s: ref_cnt not set", __func__));
+
+
+ /*
+ * check if the header is embedded in the cluster
+ */
+ skipmbuf = (m->m_flags & M_NOFREE);
+
+ /* Free attached storage if this mbuf is the only reference to it. */
+ if (*(m->m_ext.ref_cnt) == 1 ||
+ atomic_fetchadd_int(m->m_ext.ref_cnt, -1) == 1) {
+ switch (m->m_ext.ext_type) {
+ case EXT_PACKET: /* The packet zone is special. */
+ if (*(m->m_ext.ref_cnt) == 0)
+ *(m->m_ext.ref_cnt) = 1;
+ uma_zfree(zone_pack, m);
+ return; /* Job done. */
+ case EXT_CLUSTER:
+ uma_zfree(zone_clust, m->m_ext.ext_buf);
+ break;
+ case EXT_JUMBOP:
+ uma_zfree(zone_jumbop, m->m_ext.ext_buf);
+ break;
+ case EXT_JUMBO9:
+ uma_zfree(zone_jumbo9, m->m_ext.ext_buf);
+ break;
+ case EXT_JUMBO16:
+ uma_zfree(zone_jumbo16, m->m_ext.ext_buf);
+ break;
+ case EXT_SFBUF:
+ case EXT_NET_DRV:
+ case EXT_MOD_TYPE:
+ case EXT_DISPOSABLE:
+ *(m->m_ext.ref_cnt) = 0;
+ uma_zfree(zone_ext_refcnt, __DEVOLATILE(u_int *,
+ m->m_ext.ref_cnt));
+ /* FALLTHROUGH */
+ case EXT_EXTREF:
+ KASSERT(m->m_ext.ext_free != NULL,
+ ("%s: ext_free not set", __func__));
+ (*(m->m_ext.ext_free))(m->m_ext.ext_arg1,
+ m->m_ext.ext_arg2);
+ break;
+ default:
+ KASSERT(m->m_ext.ext_type == 0,
+ ("%s: unknown ext_type", __func__));
+ }
+ }
+ if (skipmbuf)
+ return;
+
+ /*
+ * Free this mbuf back to the mbuf zone with all m_ext
+ * information purged.
+ */
+ m->m_ext.ext_buf = NULL;
+ m->m_ext.ext_free = NULL;
+ m->m_ext.ext_arg1 = NULL;
+ m->m_ext.ext_arg2 = NULL;
+ m->m_ext.ref_cnt = NULL;
+ m->m_ext.ext_size = 0;
+ m->m_ext.ext_type = 0;
+ m->m_flags &= ~M_EXT;
+ uma_zfree(zone_mbuf, m);
+}
+
+/*
+ * Attach the the cluster from *m to *n, set up m_ext in *n
+ * and bump the refcount of the cluster.
+ */
+static void
+mb_dupcl(struct mbuf *n, struct mbuf *m)
+{
+ KASSERT((m->m_flags & M_EXT) == M_EXT, ("%s: M_EXT not set", __func__));
+ KASSERT(m->m_ext.ref_cnt != NULL, ("%s: ref_cnt not set", __func__));
+ KASSERT((n->m_flags & M_EXT) == 0, ("%s: M_EXT set", __func__));
+
+ if (*(m->m_ext.ref_cnt) == 1)
+ *(m->m_ext.ref_cnt) += 1;
+ else
+ atomic_add_int(m->m_ext.ref_cnt, 1);
+ n->m_ext.ext_buf = m->m_ext.ext_buf;
+ n->m_ext.ext_free = m->m_ext.ext_free;
+ n->m_ext.ext_arg1 = m->m_ext.ext_arg1;
+ n->m_ext.ext_arg2 = m->m_ext.ext_arg2;
+ n->m_ext.ext_size = m->m_ext.ext_size;
+ n->m_ext.ref_cnt = m->m_ext.ref_cnt;
+ n->m_ext.ext_type = m->m_ext.ext_type;
+ n->m_flags |= M_EXT;
+ n->m_flags |= m->m_flags & M_RDONLY;
+}
+
+/*
+ * Clean up mbuf (chain) from any tags and packet headers.
+ * If "all" is set then the first mbuf in the chain will be
+ * cleaned too.
+ */
+void
+m_demote(struct mbuf *m0, int all)
+{
+ struct mbuf *m;
+
+ for (m = all ? m0 : m0->m_next; m != NULL; m = m->m_next) {
+ if (m->m_flags & M_PKTHDR) {
+ m_tag_delete_chain(m, NULL);
+ m->m_flags &= ~M_PKTHDR;
+ bzero(&m->m_pkthdr, sizeof(struct pkthdr));
+ }
+ if (m != m0 && m->m_nextpkt != NULL) {
+ KASSERT(m->m_nextpkt == NULL,
+ ("%s: m_nextpkt not NULL", __func__));
+ m_freem(m->m_nextpkt);
+ m->m_nextpkt = NULL;
+ }
+ m->m_flags = m->m_flags & (M_EXT|M_RDONLY|M_FREELIST|M_NOFREE);
+ }
+}
+
+/*
+ * Sanity checks on mbuf (chain) for use in KASSERT() and general
+ * debugging.
+ * Returns 0 or panics when bad and 1 on all tests passed.
+ * Sanitize, 0 to run M_SANITY_ACTION, 1 to garble things so they
+ * blow up later.
+ */
+int
+m_sanity(struct mbuf *m0, int sanitize)
+{
+ struct mbuf *m;
+ caddr_t a, b;
+ int pktlen = 0;
+
+#ifdef INVARIANTS
+#define M_SANITY_ACTION(s) panic("mbuf %p: " s, m)
+#else
+#define M_SANITY_ACTION(s) printf("mbuf %p: " s, m)
+#endif
+
+ for (m = m0; m != NULL; m = m->m_next) {
+ /*
+ * Basic pointer checks. If any of these fails then some
+ * unrelated kernel memory before or after us is trashed.
+ * No way to recover from that.
+ */
+ a = ((m->m_flags & M_EXT) ? m->m_ext.ext_buf :
+ ((m->m_flags & M_PKTHDR) ? (caddr_t)(&m->m_pktdat) :
+ (caddr_t)(&m->m_dat)) );
+ b = (caddr_t)(a + (m->m_flags & M_EXT ? m->m_ext.ext_size :
+ ((m->m_flags & M_PKTHDR) ? MHLEN : MLEN)));
+ if ((caddr_t)m->m_data < a)
+ M_SANITY_ACTION("m_data outside mbuf data range left");
+ if ((caddr_t)m->m_data > b)
+ M_SANITY_ACTION("m_data outside mbuf data range right");
+ if ((caddr_t)m->m_data + m->m_len > b)
+ M_SANITY_ACTION("m_data + m_len exeeds mbuf space");
+ if ((m->m_flags & M_PKTHDR) && m->m_pkthdr.header) {
+ if ((caddr_t)m->m_pkthdr.header < a ||
+ (caddr_t)m->m_pkthdr.header > b)
+ M_SANITY_ACTION("m_pkthdr.header outside mbuf data range");
+ }
+
+ /* m->m_nextpkt may only be set on first mbuf in chain. */
+ if (m != m0 && m->m_nextpkt != NULL) {
+ if (sanitize) {
+ m_freem(m->m_nextpkt);
+ m->m_nextpkt = (struct mbuf *)0xDEADC0DE;
+ } else
+ M_SANITY_ACTION("m->m_nextpkt on in-chain mbuf");
+ }
+
+ /* packet length (not mbuf length!) calculation */
+ if (m0->m_flags & M_PKTHDR)
+ pktlen += m->m_len;
+
+ /* m_tags may only be attached to first mbuf in chain. */
+ if (m != m0 && m->m_flags & M_PKTHDR &&
+ !SLIST_EMPTY(&m->m_pkthdr.tags)) {
+ if (sanitize) {
+ m_tag_delete_chain(m, NULL);
+ /* put in 0xDEADC0DE perhaps? */
+ } else
+ M_SANITY_ACTION("m_tags on in-chain mbuf");
+ }
+
+ /* M_PKTHDR may only be set on first mbuf in chain */
+ if (m != m0 && m->m_flags & M_PKTHDR) {
+ if (sanitize) {
+ bzero(&m->m_pkthdr, sizeof(m->m_pkthdr));
+ m->m_flags &= ~M_PKTHDR;
+ /* put in 0xDEADCODE and leave hdr flag in */
+ } else
+ M_SANITY_ACTION("M_PKTHDR on in-chain mbuf");
+ }
+ }
+ m = m0;
+ if (pktlen && pktlen != m->m_pkthdr.len) {
+ if (sanitize)
+ m->m_pkthdr.len = 0;
+ else
+ M_SANITY_ACTION("m_pkthdr.len != mbuf chain length");
+ }
+ return 1;
+
+#undef M_SANITY_ACTION
+}
+
+
+/*
+ * "Move" mbuf pkthdr from "from" to "to".
+ * "from" must have M_PKTHDR set, and "to" must be empty.
+ */
+void
+m_move_pkthdr(struct mbuf *to, struct mbuf *from)
+{
+
+#if 0
+ /* see below for why these are not enabled */
+ M_ASSERTPKTHDR(to);
+ /* Note: with MAC, this may not be a good assertion. */
+ KASSERT(SLIST_EMPTY(&to->m_pkthdr.tags),
+ ("m_move_pkthdr: to has tags"));
+#endif
+#ifdef MAC
+ /*
+ * XXXMAC: It could be this should also occur for non-MAC?
+ */
+ if (to->m_flags & M_PKTHDR)
+ m_tag_delete_chain(to, NULL);
+#endif
+ to->m_flags = (from->m_flags & M_COPYFLAGS) | (to->m_flags & M_EXT);
+ if ((to->m_flags & M_EXT) == 0)
+ to->m_data = to->m_pktdat;
+ to->m_pkthdr = from->m_pkthdr; /* especially tags */
+ SLIST_INIT(&from->m_pkthdr.tags); /* purge tags from src */
+ from->m_flags &= ~M_PKTHDR;
+}
+
+/*
+ * Duplicate "from"'s mbuf pkthdr in "to".
+ * "from" must have M_PKTHDR set, and "to" must be empty.
+ * In particular, this does a deep copy of the packet tags.
+ */
+int
+m_dup_pkthdr(struct mbuf *to, struct mbuf *from, int how)
+{
+
+#if 0
+ /*
+ * The mbuf allocator only initializes the pkthdr
+ * when the mbuf is allocated with MGETHDR. Many users
+ * (e.g. m_copy*, m_prepend) use MGET and then
+ * smash the pkthdr as needed causing these
+ * assertions to trip. For now just disable them.
+ */
+ M_ASSERTPKTHDR(to);
+ /* Note: with MAC, this may not be a good assertion. */
+ KASSERT(SLIST_EMPTY(&to->m_pkthdr.tags), ("m_dup_pkthdr: to has tags"));
+#endif
+ MBUF_CHECKSLEEP(how);
+#ifdef MAC
+ if (to->m_flags & M_PKTHDR)
+ m_tag_delete_chain(to, NULL);
+#endif
+ to->m_flags = (from->m_flags & M_COPYFLAGS) | (to->m_flags & M_EXT);
+ if ((to->m_flags & M_EXT) == 0)
+ to->m_data = to->m_pktdat;
+ to->m_pkthdr = from->m_pkthdr;
+ SLIST_INIT(&to->m_pkthdr.tags);
+ return (m_tag_copy_chain(to, from, MBTOM(how)));
+}
+
+/*
+ * Lesser-used path for M_PREPEND:
+ * allocate new mbuf to prepend to chain,
+ * copy junk along.
+ */
+struct mbuf *
+m_prepend(struct mbuf *m, int len, int how)
+{
+ struct mbuf *mn;
+
+ if (m->m_flags & M_PKTHDR)
+ MGETHDR(mn, how, m->m_type);
+ else
+ MGET(mn, how, m->m_type);
+ if (mn == NULL) {
+ m_freem(m);
+ return (NULL);
+ }
+ if (m->m_flags & M_PKTHDR)
+ M_MOVE_PKTHDR(mn, m);
+ mn->m_next = m;
+ m = mn;
+ if(m->m_flags & M_PKTHDR) {
+ if (len < MHLEN)
+ MH_ALIGN(m, len);
+ } else {
+ if (len < MLEN)
+ M_ALIGN(m, len);
+ }
+ m->m_len = len;
+ return (m);
+}
+
+/*
+ * Make a copy of an mbuf chain starting "off0" bytes from the beginning,
+ * continuing for "len" bytes. If len is M_COPYALL, copy to end of mbuf.
+ * The wait parameter is a choice of M_WAIT/M_DONTWAIT from caller.
+ * Note that the copy is read-only, because clusters are not copied,
+ * only their reference counts are incremented.
+ */
+struct mbuf *
+m_copym(struct mbuf *m, int off0, int len, int wait)
+{
+ struct mbuf *n, **np;
+ int off = off0;
+ struct mbuf *top;
+ int copyhdr = 0;
+
+ KASSERT(off >= 0, ("m_copym, negative off %d", off));
+ KASSERT(len >= 0, ("m_copym, negative len %d", len));
+ MBUF_CHECKSLEEP(wait);
+ if (off == 0 && m->m_flags & M_PKTHDR)
+ copyhdr = 1;
+ while (off > 0) {
+ KASSERT(m != NULL, ("m_copym, offset > size of mbuf chain"));
+ if (off < m->m_len)
+ break;
+ off -= m->m_len;
+ m = m->m_next;
+ }
+ np = &top;
+ top = 0;
+ while (len > 0) {
+ if (m == NULL) {
+ KASSERT(len == M_COPYALL,
+ ("m_copym, length > size of mbuf chain"));
+ break;
+ }
+ if (copyhdr)
+ MGETHDR(n, wait, m->m_type);
+ else
+ MGET(n, wait, m->m_type);
+ *np = n;
+ if (n == NULL)
+ goto nospace;
+ if (copyhdr) {
+ if (!m_dup_pkthdr(n, m, wait))
+ goto nospace;
+ if (len == M_COPYALL)
+ n->m_pkthdr.len -= off0;
+ else
+ n->m_pkthdr.len = len;
+ copyhdr = 0;
+ }
+ n->m_len = min(len, m->m_len - off);
+ if (m->m_flags & M_EXT) {
+ n->m_data = m->m_data + off;
+ mb_dupcl(n, m);
+ } else
+ bcopy(mtod(m, caddr_t)+off, mtod(n, caddr_t),
+ (u_int)n->m_len);
+ if (len != M_COPYALL)
+ len -= n->m_len;
+ off = 0;
+ m = m->m_next;
+ np = &n->m_next;
+ }
+ if (top == NULL)
+ mbstat.m_mcfail++; /* XXX: No consistency. */
+
+ return (top);
+nospace:
+ m_freem(top);
+ mbstat.m_mcfail++; /* XXX: No consistency. */
+ return (NULL);
+}
+
+/*
+ * Returns mbuf chain with new head for the prepending case.
+ * Copies from mbuf (chain) n from off for len to mbuf (chain) m
+ * either prepending or appending the data.
+ * The resulting mbuf (chain) m is fully writeable.
+ * m is destination (is made writeable)
+ * n is source, off is offset in source, len is len from offset
+ * dir, 0 append, 1 prepend
+ * how, wait or nowait
+ */
+
+static int
+m_bcopyxxx(void *s, void *t, u_int len)
+{
+ bcopy(s, t, (size_t)len);
+ return 0;
+}
+
+struct mbuf *
+m_copymdata(struct mbuf *m, struct mbuf *n, int off, int len,
+ int prep, int how)
+{
+ struct mbuf *mm, *x, *z, *prev = NULL;
+ caddr_t p;
+ int i, nlen = 0;
+ caddr_t buf[MLEN];
+
+ KASSERT(m != NULL && n != NULL, ("m_copymdata, no target or source"));
+ KASSERT(off >= 0, ("m_copymdata, negative off %d", off));
+ KASSERT(len >= 0, ("m_copymdata, negative len %d", len));
+ KASSERT(prep == 0 || prep == 1, ("m_copymdata, unknown direction %d", prep));
+
+ mm = m;
+ if (!prep) {
+ while(mm->m_next) {
+ prev = mm;
+ mm = mm->m_next;
+ }
+ }
+ for (z = n; z != NULL; z = z->m_next)
+ nlen += z->m_len;
+ if (len == M_COPYALL)
+ len = nlen - off;
+ if (off + len > nlen || len < 1)
+ return NULL;
+
+ if (!M_WRITABLE(mm)) {
+ /* XXX: Use proper m_xxx function instead. */
+ x = m_getcl(how, MT_DATA, mm->m_flags);
+ if (x == NULL)
+ return NULL;
+ bcopy(mm->m_ext.ext_buf, x->m_ext.ext_buf, x->m_ext.ext_size);
+ p = x->m_ext.ext_buf + (mm->m_data - mm->m_ext.ext_buf);
+ x->m_data = p;
+ mm->m_next = NULL;
+ if (mm != m)
+ prev->m_next = x;
+ m_free(mm);
+ mm = x;
+ }
+
+ /*
+ * Append/prepend the data. Allocating mbufs as necessary.
+ */
+ /* Shortcut if enough free space in first/last mbuf. */
+ if (!prep && M_TRAILINGSPACE(mm) >= len) {
+ m_apply(n, off, len, m_bcopyxxx, mtod(mm, caddr_t) +
+ mm->m_len);
+ mm->m_len += len;
+ mm->m_pkthdr.len += len;
+ return m;
+ }
+ if (prep && M_LEADINGSPACE(mm) >= len) {
+ mm->m_data = mtod(mm, caddr_t) - len;
+ m_apply(n, off, len, m_bcopyxxx, mtod(mm, caddr_t));
+ mm->m_len += len;
+ mm->m_pkthdr.len += len;
+ return mm;
+ }
+
+ /* Expand first/last mbuf to cluster if possible. */
+ if (!prep && !(mm->m_flags & M_EXT) && len > M_TRAILINGSPACE(mm)) {
+ bcopy(mm->m_data, &buf, mm->m_len);
+ m_clget(mm, how);
+ if (!(mm->m_flags & M_EXT))
+ return NULL;
+ bcopy(&buf, mm->m_ext.ext_buf, mm->m_len);
+ mm->m_data = mm->m_ext.ext_buf;
+ mm->m_pkthdr.header = NULL;
+ }
+ if (prep && !(mm->m_flags & M_EXT) && len > M_LEADINGSPACE(mm)) {
+ bcopy(mm->m_data, &buf, mm->m_len);
+ m_clget(mm, how);
+ if (!(mm->m_flags & M_EXT))
+ return NULL;
+ bcopy(&buf, (caddr_t *)mm->m_ext.ext_buf +
+ mm->m_ext.ext_size - mm->m_len, mm->m_len);
+ mm->m_data = (caddr_t)mm->m_ext.ext_buf +
+ mm->m_ext.ext_size - mm->m_len;
+ mm->m_pkthdr.header = NULL;
+ }
+
+ /* Append/prepend as many mbuf (clusters) as necessary to fit len. */
+ if (!prep && len > M_TRAILINGSPACE(mm)) {
+ if (!m_getm(mm, len - M_TRAILINGSPACE(mm), how, MT_DATA))
+ return NULL;
+ }
+ if (prep && len > M_LEADINGSPACE(mm)) {
+ if (!(z = m_getm(NULL, len - M_LEADINGSPACE(mm), how, MT_DATA)))
+ return NULL;
+ i = 0;
+ for (x = z; x != NULL; x = x->m_next) {
+ i += x->m_flags & M_EXT ? x->m_ext.ext_size :
+ (x->m_flags & M_PKTHDR ? MHLEN : MLEN);
+ if (!x->m_next)
+ break;
+ }
+ z->m_data += i - len;
+ m_move_pkthdr(mm, z);
+ x->m_next = mm;
+ mm = z;
+ }
+
+ /* Seek to start position in source mbuf. Optimization for long chains. */
+ while (off > 0) {
+ if (off < n->m_len)
+ break;
+ off -= n->m_len;
+ n = n->m_next;
+ }
+
+ /* Copy data into target mbuf. */
+ z = mm;
+ while (len > 0) {
+ KASSERT(z != NULL, ("m_copymdata, falling off target edge"));
+ i = M_TRAILINGSPACE(z);
+ m_apply(n, off, i, m_bcopyxxx, mtod(z, caddr_t) + z->m_len);
+ z->m_len += i;
+ /* fixup pkthdr.len if necessary */
+ if ((prep ? mm : m)->m_flags & M_PKTHDR)
+ (prep ? mm : m)->m_pkthdr.len += i;
+ off += i;
+ len -= i;
+ z = z->m_next;
+ }
+ return (prep ? mm : m);
+}
+
+/*
+ * Copy an entire packet, including header (which must be present).
+ * An optimization of the common case `m_copym(m, 0, M_COPYALL, how)'.
+ * Note that the copy is read-only, because clusters are not copied,
+ * only their reference counts are incremented.
+ * Preserve alignment of the first mbuf so if the creator has left
+ * some room at the beginning (e.g. for inserting protocol headers)
+ * the copies still have the room available.
+ */
+struct mbuf *
+m_copypacket(struct mbuf *m, int how)
+{
+ struct mbuf *top, *n, *o;
+
+ MBUF_CHECKSLEEP(how);
+ MGET(n, how, m->m_type);
+ top = n;
+ if (n == NULL)
+ goto nospace;
+
+ if (!m_dup_pkthdr(n, m, how))
+ goto nospace;
+ n->m_len = m->m_len;
+ if (m->m_flags & M_EXT) {
+ n->m_data = m->m_data;
+ mb_dupcl(n, m);
+ } else {
+ n->m_data = n->m_pktdat + (m->m_data - m->m_pktdat );
+ bcopy(mtod(m, char *), mtod(n, char *), n->m_len);
+ }
+
+ m = m->m_next;
+ while (m) {
+ MGET(o, how, m->m_type);
+ if (o == NULL)
+ goto nospace;
+
+ n->m_next = o;
+ n = n->m_next;
+
+ n->m_len = m->m_len;
+ if (m->m_flags & M_EXT) {
+ n->m_data = m->m_data;
+ mb_dupcl(n, m);
+ } else {
+ bcopy(mtod(m, char *), mtod(n, char *), n->m_len);
+ }
+
+ m = m->m_next;
+ }
+ return top;
+nospace:
+ m_freem(top);
+ mbstat.m_mcfail++; /* XXX: No consistency. */
+ return (NULL);
+}
+
+/*
+ * Copy data from an mbuf chain starting "off" bytes from the beginning,
+ * continuing for "len" bytes, into the indicated buffer.
+ */
+void
+m_copydata(const struct mbuf *m, int off, int len, caddr_t cp)
+{
+ u_int count;
+
+ KASSERT(off >= 0, ("m_copydata, negative off %d", off));
+ KASSERT(len >= 0, ("m_copydata, negative len %d", len));
+ while (off > 0) {
+ KASSERT(m != NULL, ("m_copydata, offset > size of mbuf chain"));
+ if (off < m->m_len)
+ break;
+ off -= m->m_len;
+ m = m->m_next;
+ }
+ while (len > 0) {
+ KASSERT(m != NULL, ("m_copydata, length > size of mbuf chain"));
+ count = min(m->m_len - off, len);
+ bcopy(mtod(m, caddr_t) + off, cp, count);
+ len -= count;
+ cp += count;
+ off = 0;
+ m = m->m_next;
+ }
+}
+
+/*
+ * Copy a packet header mbuf chain into a completely new chain, including
+ * copying any mbuf clusters. Use this instead of m_copypacket() when
+ * you need a writable copy of an mbuf chain.
+ */
+struct mbuf *
+m_dup(struct mbuf *m, int how)
+{
+ struct mbuf **p, *top = NULL;
+ int remain, moff, nsize;
+
+ MBUF_CHECKSLEEP(how);
+ /* Sanity check */
+ if (m == NULL)
+ return (NULL);
+ M_ASSERTPKTHDR(m);
+
+ /* While there's more data, get a new mbuf, tack it on, and fill it */
+ remain = m->m_pkthdr.len;
+ moff = 0;
+ p = &top;
+ while (remain > 0 || top == NULL) { /* allow m->m_pkthdr.len == 0 */
+ struct mbuf *n;
+
+ /* Get the next new mbuf */
+ if (remain >= MINCLSIZE) {
+ n = m_getcl(how, m->m_type, 0);
+ nsize = MCLBYTES;
+ } else {
+ n = m_get(how, m->m_type);
+ nsize = MLEN;
+ }
+ if (n == NULL)
+ goto nospace;
+
+ if (top == NULL) { /* First one, must be PKTHDR */
+ if (!m_dup_pkthdr(n, m, how)) {
+ m_free(n);
+ goto nospace;
+ }
+ if ((n->m_flags & M_EXT) == 0)
+ nsize = MHLEN;
+ }
+ n->m_len = 0;
+
+ /* Link it into the new chain */
+ *p = n;
+ p = &n->m_next;
+
+ /* Copy data from original mbuf(s) into new mbuf */
+ while (n->m_len < nsize && m != NULL) {
+ int chunk = min(nsize - n->m_len, m->m_len - moff);
+
+ bcopy(m->m_data + moff, n->m_data + n->m_len, chunk);
+ moff += chunk;
+ n->m_len += chunk;
+ remain -= chunk;
+ if (moff == m->m_len) {
+ m = m->m_next;
+ moff = 0;
+ }
+ }
+
+ /* Check correct total mbuf length */
+ KASSERT((remain > 0 && m != NULL) || (remain == 0 && m == NULL),
+ ("%s: bogus m_pkthdr.len", __func__));
+ }
+ return (top);
+
+nospace:
+ m_freem(top);
+ mbstat.m_mcfail++; /* XXX: No consistency. */
+ return (NULL);
+}
+
+/*
+ * Concatenate mbuf chain n to m.
+ * Both chains must be of the same type (e.g. MT_DATA).
+ * Any m_pkthdr is not updated.
+ */
+void
+m_cat(struct mbuf *m, struct mbuf *n)
+{
+ while (m->m_next)
+ m = m->m_next;
+ while (n) {
+ if (m->m_flags & M_EXT ||
+ m->m_data + m->m_len + n->m_len >= &m->m_dat[MLEN]) {
+ /* just join the two chains */
+ m->m_next = n;
+ return;
+ }
+ /* splat the data from one into the other */
+ bcopy(mtod(n, caddr_t), mtod(m, caddr_t) + m->m_len,
+ (u_int)n->m_len);
+ m->m_len += n->m_len;
+ n = m_free(n);
+ }
+}
+
+void
+m_adj(struct mbuf *mp, int req_len)
+{
+ int len = req_len;
+ struct mbuf *m;
+ int count;
+
+ if ((m = mp) == NULL)
+ return;
+ if (len >= 0) {
+ /*
+ * Trim from head.
+ */
+ while (m != NULL && len > 0) {
+ if (m->m_len <= len) {
+ len -= m->m_len;
+ m->m_len = 0;
+ m = m->m_next;
+ } else {
+ m->m_len -= len;
+ m->m_data += len;
+ len = 0;
+ }
+ }
+ if (mp->m_flags & M_PKTHDR)
+ mp->m_pkthdr.len -= (req_len - len);
+ } else {
+ /*
+ * Trim from tail. Scan the mbuf chain,
+ * calculating its length and finding the last mbuf.
+ * If the adjustment only affects this mbuf, then just
+ * adjust and return. Otherwise, rescan and truncate
+ * after the remaining size.
+ */
+ len = -len;
+ count = 0;
+ for (;;) {
+ count += m->m_len;
+ if (m->m_next == (struct mbuf *)0)
+ break;
+ m = m->m_next;
+ }
+ if (m->m_len >= len) {
+ m->m_len -= len;
+ if (mp->m_flags & M_PKTHDR)
+ mp->m_pkthdr.len -= len;
+ return;
+ }
+ count -= len;
+ if (count < 0)
+ count = 0;
+ /*
+ * Correct length for chain is "count".
+ * Find the mbuf with last data, adjust its length,
+ * and toss data from remaining mbufs on chain.
+ */
+ m = mp;
+ if (m->m_flags & M_PKTHDR)
+ m->m_pkthdr.len = count;
+ for (; m; m = m->m_next) {
+ if (m->m_len >= count) {
+ m->m_len = count;
+ if (m->m_next != NULL) {
+ m_freem(m->m_next);
+ m->m_next = NULL;
+ }
+ break;
+ }
+ count -= m->m_len;
+ }
+ }
+}
+
+/*
+ * Rearange an mbuf chain so that len bytes are contiguous
+ * and in the data area of an mbuf (so that mtod and dtom
+ * will work for a structure of size len). Returns the resulting
+ * mbuf chain on success, frees it and returns null on failure.
+ * If there is room, it will add up to max_protohdr-len extra bytes to the
+ * contiguous region in an attempt to avoid being called next time.
+ */
+struct mbuf *
+m_pullup(struct mbuf *n, int len)
+{
+ struct mbuf *m;
+ int count;
+ int space;
+
+ /*
+ * If first mbuf has no cluster, and has room for len bytes
+ * without shifting current data, pullup into it,
+ * otherwise allocate a new mbuf to prepend to the chain.
+ */
+ if ((n->m_flags & M_EXT) == 0 &&
+ n->m_data + len < &n->m_dat[MLEN] && n->m_next) {
+ if (n->m_len >= len)
+ return (n);
+ m = n;
+ n = n->m_next;
+ len -= m->m_len;
+ } else {
+ if (len > MHLEN)
+ goto bad;
+ MGET(m, M_DONTWAIT, n->m_type);
+ if (m == NULL)
+ goto bad;
+ m->m_len = 0;
+ if (n->m_flags & M_PKTHDR)
+ M_MOVE_PKTHDR(m, n);
+ }
+ space = &m->m_dat[MLEN] - (m->m_data + m->m_len);
+ do {
+ count = min(min(max(len, max_protohdr), space), n->m_len);
+ bcopy(mtod(n, caddr_t), mtod(m, caddr_t) + m->m_len,
+ (u_int)count);
+ len -= count;
+ m->m_len += count;
+ n->m_len -= count;
+ space -= count;
+ if (n->m_len)
+ n->m_data += count;
+ else
+ n = m_free(n);
+ } while (len > 0 && n);
+ if (len > 0) {
+ (void) m_free(m);
+ goto bad;
+ }
+ m->m_next = n;
+ return (m);
+bad:
+ m_freem(n);
+ mbstat.m_mpfail++; /* XXX: No consistency. */
+ return (NULL);
+}
+
+/*
+ * Like m_pullup(), except a new mbuf is always allocated, and we allow
+ * the amount of empty space before the data in the new mbuf to be specified
+ * (in the event that the caller expects to prepend later).
+ */
+int MSFail;
+
+struct mbuf *
+m_copyup(struct mbuf *n, int len, int dstoff)
+{
+ struct mbuf *m;
+ int count, space;
+
+ if (len > (MHLEN - dstoff))
+ goto bad;
+ MGET(m, M_DONTWAIT, n->m_type);
+ if (m == NULL)
+ goto bad;
+ m->m_len = 0;
+ if (n->m_flags & M_PKTHDR)
+ M_MOVE_PKTHDR(m, n);
+ m->m_data += dstoff;
+ space = &m->m_dat[MLEN] - (m->m_data + m->m_len);
+ do {
+ count = min(min(max(len, max_protohdr), space), n->m_len);
+ memcpy(mtod(m, caddr_t) + m->m_len, mtod(n, caddr_t),
+ (unsigned)count);
+ len -= count;
+ m->m_len += count;
+ n->m_len -= count;
+ space -= count;
+ if (n->m_len)
+ n->m_data += count;
+ else
+ n = m_free(n);
+ } while (len > 0 && n);
+ if (len > 0) {
+ (void) m_free(m);
+ goto bad;
+ }
+ m->m_next = n;
+ return (m);
+ bad:
+ m_freem(n);
+ MSFail++;
+ return (NULL);
+}
+
+/*
+ * Partition an mbuf chain in two pieces, returning the tail --
+ * all but the first len0 bytes. In case of failure, it returns NULL and
+ * attempts to restore the chain to its original state.
+ *
+ * Note that the resulting mbufs might be read-only, because the new
+ * mbuf can end up sharing an mbuf cluster with the original mbuf if
+ * the "breaking point" happens to lie within a cluster mbuf. Use the
+ * M_WRITABLE() macro to check for this case.
+ */
+struct mbuf *
+m_split(struct mbuf *m0, int len0, int wait)
+{
+ struct mbuf *m, *n;
+ u_int len = len0, remain;
+
+ MBUF_CHECKSLEEP(wait);
+ for (m = m0; m && len > m->m_len; m = m->m_next)
+ len -= m->m_len;
+ if (m == NULL)
+ return (NULL);
+ remain = m->m_len - len;
+ if (m0->m_flags & M_PKTHDR) {
+ MGETHDR(n, wait, m0->m_type);
+ if (n == NULL)
+ return (NULL);
+ n->m_pkthdr.rcvif = m0->m_pkthdr.rcvif;
+ n->m_pkthdr.len = m0->m_pkthdr.len - len0;
+ m0->m_pkthdr.len = len0;
+ if (m->m_flags & M_EXT)
+ goto extpacket;
+ if (remain > MHLEN) {
+ /* m can't be the lead packet */
+ MH_ALIGN(n, 0);
+ n->m_next = m_split(m, len, wait);
+ if (n->m_next == NULL) {
+ (void) m_free(n);
+ return (NULL);
+ } else {
+ n->m_len = 0;
+ return (n);
+ }
+ } else
+ MH_ALIGN(n, remain);
+ } else if (remain == 0) {
+ n = m->m_next;
+ m->m_next = NULL;
+ return (n);
+ } else {
+ MGET(n, wait, m->m_type);
+ if (n == NULL)
+ return (NULL);
+ M_ALIGN(n, remain);
+ }
+extpacket:
+ if (m->m_flags & M_EXT) {
+ n->m_data = m->m_data + len;
+ mb_dupcl(n, m);
+ } else {
+ bcopy(mtod(m, caddr_t) + len, mtod(n, caddr_t), remain);
+ }
+ n->m_len = remain;
+ m->m_len = len;
+ n->m_next = m->m_next;
+ m->m_next = NULL;
+ return (n);
+}
+/*
+ * Routine to copy from device local memory into mbufs.
+ * Note that `off' argument is offset into first mbuf of target chain from
+ * which to begin copying the data to.
+ */
+struct mbuf *
+m_devget(char *buf, int totlen, int off, struct ifnet *ifp,
+ void (*copy)(char *from, caddr_t to, u_int len))
+{
+ struct mbuf *m;
+ struct mbuf *top = NULL, **mp = &top;
+ int len;
+
+ if (off < 0 || off > MHLEN)
+ return (NULL);
+
+ while (totlen > 0) {
+ if (top == NULL) { /* First one, must be PKTHDR */
+ if (totlen + off >= MINCLSIZE) {
+ m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
+ len = MCLBYTES;
+ } else {
+ m = m_gethdr(M_DONTWAIT, MT_DATA);
+ len = MHLEN;
+
+ /* Place initial small packet/header at end of mbuf */
+ if (m && totlen + off + max_linkhdr <= MLEN) {
+ m->m_data += max_linkhdr;
+ len -= max_linkhdr;
+ }
+ }
+ if (m == NULL)
+ return NULL;
+ m->m_pkthdr.rcvif = ifp;
+ m->m_pkthdr.len = totlen;
+ } else {
+ if (totlen + off >= MINCLSIZE) {
+ m = m_getcl(M_DONTWAIT, MT_DATA, 0);
+ len = MCLBYTES;
+ } else {
+ m = m_get(M_DONTWAIT, MT_DATA);
+ len = MLEN;
+ }
+ if (m == NULL) {
+ m_freem(top);
+ return NULL;
+ }
+ }
+ if (off) {
+ m->m_data += off;
+ len -= off;
+ off = 0;
+ }
+ m->m_len = len = min(totlen, len);
+ if (copy)
+ copy(buf, mtod(m, caddr_t), (u_int)len);
+ else
+ bcopy(buf, mtod(m, caddr_t), (u_int)len);
+ buf += len;
+ *mp = m;
+ mp = &m->m_next;
+ totlen -= len;
+ }
+ return (top);
+}
+
+/*
+ * Copy data from a buffer back into the indicated mbuf chain,
+ * starting "off" bytes from the beginning, extending the mbuf
+ * chain if necessary.
+ */
+void
+m_copyback(struct mbuf *m0, int off, int len, c_caddr_t cp)
+{
+ int mlen;
+ struct mbuf *m = m0, *n;
+ int totlen = 0;
+
+ if (m0 == NULL)
+ return;
+ while (off > (mlen = m->m_len)) {
+ off -= mlen;
+ totlen += mlen;
+ if (m->m_next == NULL) {
+ n = m_get(M_DONTWAIT, m->m_type);
+ if (n == NULL)
+ goto out;
+ bzero(mtod(n, caddr_t), MLEN);
+ n->m_len = min(MLEN, len + off);
+ m->m_next = n;
+ }
+ m = m->m_next;
+ }
+ while (len > 0) {
+ if (m->m_next == NULL && (len > m->m_len - off)) {
+ m->m_len += min(len - (m->m_len - off),
+ M_TRAILINGSPACE(m));
+ }
+ mlen = min (m->m_len - off, len);
+ bcopy(cp, off + mtod(m, caddr_t), (u_int)mlen);
+ cp += mlen;
+ len -= mlen;
+ mlen += off;
+ off = 0;
+ totlen += mlen;
+ if (len == 0)
+ break;
+ if (m->m_next == NULL) {
+ n = m_get(M_DONTWAIT, m->m_type);
+ if (n == NULL)
+ break;
+ n->m_len = min(MLEN, len);
+ m->m_next = n;
+ }
+ m = m->m_next;
+ }
+out: if (((m = m0)->m_flags & M_PKTHDR) && (m->m_pkthdr.len < totlen))
+ m->m_pkthdr.len = totlen;
+}
+
+/*
+ * Append the specified data to the indicated mbuf chain,
+ * Extend the mbuf chain if the new data does not fit in
+ * existing space.
+ *
+ * Return 1 if able to complete the job; otherwise 0.
+ */
+int
+m_append(struct mbuf *m0, int len, c_caddr_t cp)
+{
+ struct mbuf *m, *n;
+ int remainder, space;
+
+ for (m = m0; m->m_next != NULL; m = m->m_next)
+ ;
+ remainder = len;
+ space = M_TRAILINGSPACE(m);
+ if (space > 0) {
+ /*
+ * Copy into available space.
+ */
+ if (space > remainder)
+ space = remainder;
+ bcopy(cp, mtod(m, caddr_t) + m->m_len, space);
+ m->m_len += space;
+ cp += space, remainder -= space;
+ }
+ while (remainder > 0) {
+ /*
+ * Allocate a new mbuf; could check space
+ * and allocate a cluster instead.
+ */
+ n = m_get(M_DONTWAIT, m->m_type);
+ if (n == NULL)
+ break;
+ n->m_len = min(MLEN, remainder);
+ bcopy(cp, mtod(n, caddr_t), n->m_len);
+ cp += n->m_len, remainder -= n->m_len;
+ m->m_next = n;
+ m = n;
+ }
+ if (m0->m_flags & M_PKTHDR)
+ m0->m_pkthdr.len += len - remainder;
+ return (remainder == 0);
+}
+
+/*
+ * Apply function f to the data in an mbuf chain starting "off" bytes from
+ * the beginning, continuing for "len" bytes.
+ */
+int
+m_apply(struct mbuf *m, int off, int len,
+ int (*f)(void *, void *, u_int), void *arg)
+{
+ u_int count;
+ int rval;
+
+ KASSERT(off >= 0, ("m_apply, negative off %d", off));
+ KASSERT(len >= 0, ("m_apply, negative len %d", len));
+ while (off > 0) {
+ KASSERT(m != NULL, ("m_apply, offset > size of mbuf chain"));
+ if (off < m->m_len)
+ break;
+ off -= m->m_len;
+ m = m->m_next;
+ }
+ while (len > 0) {
+ KASSERT(m != NULL, ("m_apply, offset > size of mbuf chain"));
+ count = min(m->m_len - off, len);
+ rval = (*f)(arg, mtod(m, caddr_t) + off, count);
+ if (rval)
+ return (rval);
+ len -= count;
+ off = 0;
+ m = m->m_next;
+ }
+ return (0);
+}
+
+/*
+ * Return a pointer to mbuf/offset of location in mbuf chain.
+ */
+struct mbuf *
+m_getptr(struct mbuf *m, int loc, int *off)
+{
+
+ while (loc >= 0) {
+ /* Normal end of search. */
+ if (m->m_len > loc) {
+ *off = loc;
+ return (m);
+ } else {
+ loc -= m->m_len;
+ if (m->m_next == NULL) {
+ if (loc == 0) {
+ /* Point at the end of valid data. */
+ *off = m->m_len;
+ return (m);
+ }
+ return (NULL);
+ }
+ m = m->m_next;
+ }
+ }
+ return (NULL);
+}
+
+void
+m_print(const struct mbuf *m, int maxlen)
+{
+ int len;
+ int pdata;
+ const struct mbuf *m2;
+
+ if (m->m_flags & M_PKTHDR)
+ len = m->m_pkthdr.len;
+ else
+ len = -1;
+ m2 = m;
+ while (m2 != NULL && (len == -1 || len)) {
+ pdata = m2->m_len;
+ if (maxlen != -1 && pdata > maxlen)
+ pdata = maxlen;
+ printf("mbuf: %p len: %d, next: %p, %b%s", m2, m2->m_len,
+ m2->m_next, m2->m_flags, "\20\20freelist\17skipfw"
+ "\11proto5\10proto4\7proto3\6proto2\5proto1\4rdonly"
+ "\3eor\2pkthdr\1ext", pdata ? "" : "\n");
+ if (pdata)
+ printf(", %*D\n", pdata, (u_char *)m2->m_data, "-");
+ if (len != -1)
+ len -= m2->m_len;
+ m2 = m2->m_next;
+ }
+ if (len > 0)
+ printf("%d bytes unaccounted for.\n", len);
+ return;
+}
+
+u_int
+m_fixhdr(struct mbuf *m0)
+{
+ u_int len;
+
+ len = m_length(m0, NULL);
+ m0->m_pkthdr.len = len;
+ return (len);
+}
+
+u_int
+m_length(struct mbuf *m0, struct mbuf **last)
+{
+ struct mbuf *m;
+ u_int len;
+
+ len = 0;
+ for (m = m0; m != NULL; m = m->m_next) {
+ len += m->m_len;
+ if (m->m_next == NULL)
+ break;
+ }
+ if (last != NULL)
+ *last = m;
+ return (len);
+}
+
+/*
+ * Defragment a mbuf chain, returning the shortest possible
+ * chain of mbufs and clusters. If allocation fails and
+ * this cannot be completed, NULL will be returned, but
+ * the passed in chain will be unchanged. Upon success,
+ * the original chain will be freed, and the new chain
+ * will be returned.
+ *
+ * If a non-packet header is passed in, the original
+ * mbuf (chain?) will be returned unharmed.
+ */
+struct mbuf *
+m_defrag(struct mbuf *m0, int how)
+{
+ struct mbuf *m_new = NULL, *m_final = NULL;
+ int progress = 0, length;
+
+ MBUF_CHECKSLEEP(how);
+ if (!(m0->m_flags & M_PKTHDR))
+ return (m0);
+
+ m_fixhdr(m0); /* Needed sanity check */
+
+#ifdef MBUF_STRESS_TEST
+ if (m_defragrandomfailures) {
+ int temp = arc4random() & 0xff;
+ if (temp == 0xba)
+ goto nospace;
+ }
+#endif
+
+ if (m0->m_pkthdr.len > MHLEN)
+ m_final = m_getcl(how, MT_DATA, M_PKTHDR);
+ else
+ m_final = m_gethdr(how, MT_DATA);
+
+ if (m_final == NULL)
+ goto nospace;
+
+ if (m_dup_pkthdr(m_final, m0, how) == 0)
+ goto nospace;
+
+ m_new = m_final;
+
+ while (progress < m0->m_pkthdr.len) {
+ length = m0->m_pkthdr.len - progress;
+ if (length > MCLBYTES)
+ length = MCLBYTES;
+
+ if (m_new == NULL) {
+ if (length > MLEN)
+ m_new = m_getcl(how, MT_DATA, 0);
+ else
+ m_new = m_get(how, MT_DATA);
+ if (m_new == NULL)
+ goto nospace;
+ }
+
+ m_copydata(m0, progress, length, mtod(m_new, caddr_t));
+ progress += length;
+ m_new->m_len = length;
+ if (m_new != m_final)
+ m_cat(m_final, m_new);
+ m_new = NULL;
+ }
+#ifdef MBUF_STRESS_TEST
+ if (m0->m_next == NULL)
+ m_defraguseless++;
+#endif
+ m_freem(m0);
+ m0 = m_final;
+#ifdef MBUF_STRESS_TEST
+ m_defragpackets++;
+ m_defragbytes += m0->m_pkthdr.len;
+#endif
+ return (m0);
+nospace:
+#ifdef MBUF_STRESS_TEST
+ m_defragfailure++;
+#endif
+ if (m_final)
+ m_freem(m_final);
+ return (NULL);
+}
+
+/*
+ * Defragment an mbuf chain, returning at most maxfrags separate
+ * mbufs+clusters. If this is not possible NULL is returned and
+ * the original mbuf chain is left in it's present (potentially
+ * modified) state. We use two techniques: collapsing consecutive
+ * mbufs and replacing consecutive mbufs by a cluster.
+ *
+ * NB: this should really be named m_defrag but that name is taken
+ */
+struct mbuf *
+m_collapse(struct mbuf *m0, int how, int maxfrags)
+{
+ struct mbuf *m, *n, *n2, **prev;
+ u_int curfrags;
+
+ /*
+ * Calculate the current number of frags.
+ */
+ curfrags = 0;
+ for (m = m0; m != NULL; m = m->m_next)
+ curfrags++;
+ /*
+ * First, try to collapse mbufs. Note that we always collapse
+ * towards the front so we don't need to deal with moving the
+ * pkthdr. This may be suboptimal if the first mbuf has much
+ * less data than the following.
+ */
+ m = m0;
+again:
+ for (;;) {
+ n = m->m_next;
+ if (n == NULL)
+ break;
+ if ((m->m_flags & M_RDONLY) == 0 &&
+ n->m_len < M_TRAILINGSPACE(m)) {
+ bcopy(mtod(n, void *), mtod(m, char *) + m->m_len,
+ n->m_len);
+ m->m_len += n->m_len;
+ m->m_next = n->m_next;
+ m_free(n);
+ if (--curfrags <= maxfrags)
+ return m0;
+ } else
+ m = n;
+ }
+ KASSERT(maxfrags > 1,
+ ("maxfrags %u, but normal collapse failed", maxfrags));
+ /*
+ * Collapse consecutive mbufs to a cluster.
+ */
+ prev = &m0->m_next; /* NB: not the first mbuf */
+ while ((n = *prev) != NULL) {
+ if ((n2 = n->m_next) != NULL &&
+ n->m_len + n2->m_len < MCLBYTES) {
+ m = m_getcl(how, MT_DATA, 0);
+ if (m == NULL)
+ goto bad;
+ bcopy(mtod(n, void *), mtod(m, void *), n->m_len);
+ bcopy(mtod(n2, void *), mtod(m, char *) + n->m_len,
+ n2->m_len);
+ m->m_len = n->m_len + n2->m_len;
+ m->m_next = n2->m_next;
+ *prev = m;
+ m_free(n);
+ m_free(n2);
+ if (--curfrags <= maxfrags) /* +1 cl -2 mbufs */
+ return m0;
+ /*
+ * Still not there, try the normal collapse
+ * again before we allocate another cluster.
+ */
+ goto again;
+ }
+ prev = &n->m_next;
+ }
+ /*
+ * No place where we can collapse to a cluster; punt.
+ * This can occur if, for example, you request 2 frags
+ * but the packet requires that both be clusters (we
+ * never reallocate the first mbuf to avoid moving the
+ * packet header).
+ */
+bad:
+ return NULL;
+}
+
+#ifdef MBUF_STRESS_TEST
+
+/*
+ * Fragment an mbuf chain. There's no reason you'd ever want to do
+ * this in normal usage, but it's great for stress testing various
+ * mbuf consumers.
+ *
+ * If fragmentation is not possible, the original chain will be
+ * returned.
+ *
+ * Possible length values:
+ * 0 no fragmentation will occur
+ * > 0 each fragment will be of the specified length
+ * -1 each fragment will be the same random value in length
+ * -2 each fragment's length will be entirely random
+ * (Random values range from 1 to 256)
+ */
+struct mbuf *
+m_fragment(struct mbuf *m0, int how, int length)
+{
+ struct mbuf *m_new = NULL, *m_final = NULL;
+ int progress = 0;
+
+ if (!(m0->m_flags & M_PKTHDR))
+ return (m0);
+
+ if ((length == 0) || (length < -2))
+ return (m0);
+
+ m_fixhdr(m0); /* Needed sanity check */
+
+ m_final = m_getcl(how, MT_DATA, M_PKTHDR);
+
+ if (m_final == NULL)
+ goto nospace;
+
+ if (m_dup_pkthdr(m_final, m0, how) == 0)
+ goto nospace;
+
+ m_new = m_final;
+
+ if (length == -1)
+ length = 1 + (arc4random() & 255);
+
+ while (progress < m0->m_pkthdr.len) {
+ int fraglen;
+
+ if (length > 0)
+ fraglen = length;
+ else
+ fraglen = 1 + (arc4random() & 255);
+ if (fraglen > m0->m_pkthdr.len - progress)
+ fraglen = m0->m_pkthdr.len - progress;
+
+ if (fraglen > MCLBYTES)
+ fraglen = MCLBYTES;
+
+ if (m_new == NULL) {
+ m_new = m_getcl(how, MT_DATA, 0);
+ if (m_new == NULL)
+ goto nospace;
+ }
+
+ m_copydata(m0, progress, fraglen, mtod(m_new, caddr_t));
+ progress += fraglen;
+ m_new->m_len = fraglen;
+ if (m_new != m_final)
+ m_cat(m_final, m_new);
+ m_new = NULL;
+ }
+ m_freem(m0);
+ m0 = m_final;
+ return (m0);
+nospace:
+ if (m_final)
+ m_freem(m_final);
+ /* Return the original chain on failure */
+ return (m0);
+}
+
+#endif
+
+/*
+ * Copy the contents of uio into a properly sized mbuf chain.
+ */
+struct mbuf *
+m_uiotombuf(struct uio *uio, int how, int len, int align, int flags)
+{
+ struct mbuf *m, *mb;
+ int error, length, total;
+ int progress = 0;
+
+ /*
+ * len can be zero or an arbitrary large value bound by
+ * the total data supplied by the uio.
+ */
+ if (len > 0)
+ total = min(uio->uio_resid, len);
+ else
+ total = uio->uio_resid;
+
+ /*
+ * The smallest unit returned by m_getm2() is a single mbuf
+ * with pkthdr. We can't align past it.
+ */
+ if (align >= MHLEN)
+ return (NULL);
+
+ /*
+ * Give us the full allocation or nothing.
+ * If len is zero return the smallest empty mbuf.
+ */
+ m = m_getm2(NULL, max(total + align, 1), how, MT_DATA, flags);
+ if (m == NULL)
+ return (NULL);
+ m->m_data += align;
+
+ /* Fill all mbufs with uio data and update header information. */
+ for (mb = m; mb != NULL; mb = mb->m_next) {
+ length = min(M_TRAILINGSPACE(mb), total - progress);
+
+ error = uiomove(mtod(mb, void *), length, uio);
+ if (error) {
+ m_freem(m);
+ return (NULL);
+ }
+
+ mb->m_len = length;
+ progress += length;
+ if (flags & M_PKTHDR)
+ m->m_pkthdr.len += length;
+ }
+ KASSERT(progress == total, ("%s: progress != total", __func__));
+
+ return (m);
+}
+
+/*
+ * Copy an mbuf chain into a uio limited by len if set.
+ */
+int
+m_mbuftouio(struct uio *uio, struct mbuf *m, int len)
+{
+ int error, length, total;
+ int progress = 0;
+
+ if (len > 0)
+ total = min(uio->uio_resid, len);
+ else
+ total = uio->uio_resid;
+
+ /* Fill the uio with data from the mbufs. */
+ for (; m != NULL; m = m->m_next) {
+ length = min(m->m_len, total - progress);
+
+ error = uiomove(mtod(m, void *), length, uio);
+ if (error)
+ return (error);
+
+ progress += length;
+ }
+
+ return (0);
+}
+
+/*
+ * Set the m_data pointer of a newly-allocated mbuf
+ * to place an object of the specified size at the
+ * end of the mbuf, longword aligned.
+ */
+void
+m_align(struct mbuf *m, int len)
+{
+ int adjust;
+
+ if (m->m_flags & M_EXT)
+ adjust = m->m_ext.ext_size - len;
+ else if (m->m_flags & M_PKTHDR)
+ adjust = MHLEN - len;
+ else
+ adjust = MLEN - len;
+ m->m_data += adjust &~ (sizeof(long)-1);
+}
+
+/*
+ * Create a writable copy of the mbuf chain. While doing this
+ * we compact the chain with a goal of producing a chain with
+ * at most two mbufs. The second mbuf in this chain is likely
+ * to be a cluster. The primary purpose of this work is to create
+ * a writable packet for encryption, compression, etc. The
+ * secondary goal is to linearize the data so the data can be
+ * passed to crypto hardware in the most efficient manner possible.
+ */
+struct mbuf *
+m_unshare(struct mbuf *m0, int how)
+{
+ struct mbuf *m, *mprev;
+ struct mbuf *n, *mfirst, *mlast;
+ int len, off;
+
+ mprev = NULL;
+ for (m = m0; m != NULL; m = mprev->m_next) {
+ /*
+ * Regular mbufs are ignored unless there's a cluster
+ * in front of it that we can use to coalesce. We do
+ * the latter mainly so later clusters can be coalesced
+ * also w/o having to handle them specially (i.e. convert
+ * mbuf+cluster -> cluster). This optimization is heavily
+ * influenced by the assumption that we're running over
+ * Ethernet where MCLBYTES is large enough that the max
+ * packet size will permit lots of coalescing into a
+ * single cluster. This in turn permits efficient
+ * crypto operations, especially when using hardware.
+ */
+ if ((m->m_flags & M_EXT) == 0) {
+ if (mprev && (mprev->m_flags & M_EXT) &&
+ m->m_len <= M_TRAILINGSPACE(mprev)) {
+ /* XXX: this ignores mbuf types */
+ memcpy(mtod(mprev, caddr_t) + mprev->m_len,
+ mtod(m, caddr_t), m->m_len);
+ mprev->m_len += m->m_len;
+ mprev->m_next = m->m_next; /* unlink from chain */
+ m_free(m); /* reclaim mbuf */
+#if 0
+ newipsecstat.ips_mbcoalesced++;
+#endif
+ } else {
+ mprev = m;
+ }
+ continue;
+ }
+ /*
+ * Writable mbufs are left alone (for now).
+ */
+ if (M_WRITABLE(m)) {
+ mprev = m;
+ continue;
+ }
+
+ /*
+ * Not writable, replace with a copy or coalesce with
+ * the previous mbuf if possible (since we have to copy
+ * it anyway, we try to reduce the number of mbufs and
+ * clusters so that future work is easier).
+ */
+ KASSERT(m->m_flags & M_EXT, ("m_flags 0x%x", m->m_flags));
+ /* NB: we only coalesce into a cluster or larger */
+ if (mprev != NULL && (mprev->m_flags & M_EXT) &&
+ m->m_len <= M_TRAILINGSPACE(mprev)) {
+ /* XXX: this ignores mbuf types */
+ memcpy(mtod(mprev, caddr_t) + mprev->m_len,
+ mtod(m, caddr_t), m->m_len);
+ mprev->m_len += m->m_len;
+ mprev->m_next = m->m_next; /* unlink from chain */
+ m_free(m); /* reclaim mbuf */
+#if 0
+ newipsecstat.ips_clcoalesced++;
+#endif
+ continue;
+ }
+
+ /*
+ * Allocate new space to hold the copy...
+ */
+ /* XXX why can M_PKTHDR be set past the first mbuf? */
+ if (mprev == NULL && (m->m_flags & M_PKTHDR)) {
+ /*
+ * NB: if a packet header is present we must
+ * allocate the mbuf separately from any cluster
+ * because M_MOVE_PKTHDR will smash the data
+ * pointer and drop the M_EXT marker.
+ */
+ MGETHDR(n, how, m->m_type);
+ if (n == NULL) {
+ m_freem(m0);
+ return (NULL);
+ }
+ M_MOVE_PKTHDR(n, m);
+ MCLGET(n, how);
+ if ((n->m_flags & M_EXT) == 0) {
+ m_free(n);
+ m_freem(m0);
+ return (NULL);
+ }
+ } else {
+ n = m_getcl(how, m->m_type, m->m_flags);
+ if (n == NULL) {
+ m_freem(m0);
+ return (NULL);
+ }
+ }
+ /*
+ * ... and copy the data. We deal with jumbo mbufs
+ * (i.e. m_len > MCLBYTES) by splitting them into
+ * clusters. We could just malloc a buffer and make
+ * it external but too many device drivers don't know
+ * how to break up the non-contiguous memory when
+ * doing DMA.
+ */
+ len = m->m_len;
+ off = 0;
+ mfirst = n;
+ mlast = NULL;
+ for (;;) {
+ int cc = min(len, MCLBYTES);
+ memcpy(mtod(n, caddr_t), mtod(m, caddr_t) + off, cc);
+ n->m_len = cc;
+ if (mlast != NULL)
+ mlast->m_next = n;
+ mlast = n;
+#if 0
+ newipsecstat.ips_clcopied++;
+#endif
+
+ len -= cc;
+ if (len <= 0)
+ break;
+ off += cc;
+
+ n = m_getcl(how, m->m_type, m->m_flags);
+ if (n == NULL) {
+ m_freem(mfirst);
+ m_freem(m0);
+ return (NULL);
+ }
+ }
+ n->m_next = m->m_next;
+ if (mprev == NULL)
+ m0 = mfirst; /* new head of chain */
+ else
+ mprev->m_next = mfirst; /* replace old mbuf */
+ m_free(m); /* release old mbuf */
+ mprev = mfirst;
+ }
+ return (m0);
+}
+
+#ifdef MBUF_PROFILING
+
+#define MP_BUCKETS 32 /* don't just change this as things may overflow.*/
+struct mbufprofile {
+ uintmax_t wasted[MP_BUCKETS];
+ uintmax_t used[MP_BUCKETS];
+ uintmax_t segments[MP_BUCKETS];
+} mbprof;
+
+#define MP_MAXDIGITS 21 /* strlen("16,000,000,000,000,000,000") == 21 */
+#define MP_NUMLINES 6
+#define MP_NUMSPERLINE 16
+#define MP_EXTRABYTES 64 /* > strlen("used:\nwasted:\nsegments:\n") */
+/* work out max space needed and add a bit of spare space too */
+#define MP_MAXLINE ((MP_MAXDIGITS+1) * MP_NUMSPERLINE)
+#define MP_BUFSIZE ((MP_MAXLINE * MP_NUMLINES) + 1 + MP_EXTRABYTES)
+
+char mbprofbuf[MP_BUFSIZE];
+
+void
+m_profile(struct mbuf *m)
+{
+ int segments = 0;
+ int used = 0;
+ int wasted = 0;
+
+ while (m) {
+ segments++;
+ used += m->m_len;
+ if (m->m_flags & M_EXT) {
+ wasted += MHLEN - sizeof(m->m_ext) +
+ m->m_ext.ext_size - m->m_len;
+ } else {
+ if (m->m_flags & M_PKTHDR)
+ wasted += MHLEN - m->m_len;
+ else
+ wasted += MLEN - m->m_len;
+ }
+ m = m->m_next;
+ }
+ /* be paranoid.. it helps */
+ if (segments > MP_BUCKETS - 1)
+ segments = MP_BUCKETS - 1;
+ if (used > 100000)
+ used = 100000;
+ if (wasted > 100000)
+ wasted = 100000;
+ /* store in the appropriate bucket */
+ /* don't bother locking. if it's slightly off, so what? */
+ mbprof.segments[segments]++;
+ mbprof.used[fls(used)]++;
+ mbprof.wasted[fls(wasted)]++;
+}
+
+static void
+mbprof_textify(void)
+{
+ int offset;
+ char *c;
+ u_int64_t *p;
+
+
+ p = &mbprof.wasted[0];
+ c = mbprofbuf;
+ offset = snprintf(c, MP_MAXLINE + 10,
+ "wasted:\n"
+ "%ju %ju %ju %ju %ju %ju %ju %ju "
+ "%ju %ju %ju %ju %ju %ju %ju %ju\n",
+ p[0], p[1], p[2], p[3], p[4], p[5], p[6], p[7],
+ p[8], p[9], p[10], p[11], p[12], p[13], p[14], p[15]);
+#ifdef BIG_ARRAY
+ p = &mbprof.wasted[16];
+ c += offset;
+ offset = snprintf(c, MP_MAXLINE,
+ "%ju %ju %ju %ju %ju %ju %ju %ju "
+ "%ju %ju %ju %ju %ju %ju %ju %ju\n",
+ p[0], p[1], p[2], p[3], p[4], p[5], p[6], p[7],
+ p[8], p[9], p[10], p[11], p[12], p[13], p[14], p[15]);
+#endif
+ p = &mbprof.used[0];
+ c += offset;
+ offset = snprintf(c, MP_MAXLINE + 10,
+ "used:\n"
+ "%ju %ju %ju %ju %ju %ju %ju %ju "
+ "%ju %ju %ju %ju %ju %ju %ju %ju\n",
+ p[0], p[1], p[2], p[3], p[4], p[5], p[6], p[7],
+ p[8], p[9], p[10], p[11], p[12], p[13], p[14], p[15]);
+#ifdef BIG_ARRAY
+ p = &mbprof.used[16];
+ c += offset;
+ offset = snprintf(c, MP_MAXLINE,
+ "%ju %ju %ju %ju %ju %ju %ju %ju "
+ "%ju %ju %ju %ju %ju %ju %ju %ju\n",
+ p[0], p[1], p[2], p[3], p[4], p[5], p[6], p[7],
+ p[8], p[9], p[10], p[11], p[12], p[13], p[14], p[15]);
+#endif
+ p = &mbprof.segments[0];
+ c += offset;
+ offset = snprintf(c, MP_MAXLINE + 10,
+ "segments:\n"
+ "%ju %ju %ju %ju %ju %ju %ju %ju "
+ "%ju %ju %ju %ju %ju %ju %ju %ju\n",
+ p[0], p[1], p[2], p[3], p[4], p[5], p[6], p[7],
+ p[8], p[9], p[10], p[11], p[12], p[13], p[14], p[15]);
+#ifdef BIG_ARRAY
+ p = &mbprof.segments[16];
+ c += offset;
+ offset = snprintf(c, MP_MAXLINE,
+ "%ju %ju %ju %ju %ju %ju %ju %ju "
+ "%ju %ju %ju %ju %ju %ju %ju %jju",
+ p[0], p[1], p[2], p[3], p[4], p[5], p[6], p[7],
+ p[8], p[9], p[10], p[11], p[12], p[13], p[14], p[15]);
+#endif
+}
+
+static int
+mbprof_handler(SYSCTL_HANDLER_ARGS)
+{
+ int error;
+
+ mbprof_textify();
+ error = SYSCTL_OUT(req, mbprofbuf, strlen(mbprofbuf) + 1);
+ return (error);
+}
+
+static int
+mbprof_clr_handler(SYSCTL_HANDLER_ARGS)
+{
+ int clear, error;
+
+ clear = 0;
+ error = sysctl_handle_int(oidp, &clear, 0, req);
+ if (error || !req->newptr)
+ return (error);
+
+ if (clear) {
+ bzero(&mbprof, sizeof(mbprof));
+ }
+
+ return (error);
+}
+
+
+SYSCTL_PROC(_kern_ipc, OID_AUTO, mbufprofile, CTLTYPE_STRING|CTLFLAG_RD,
+ NULL, 0, mbprof_handler, "A", "mbuf profiling statistics");
+
+SYSCTL_PROC(_kern_ipc, OID_AUTO, mbufprofileclr, CTLTYPE_INT|CTLFLAG_RW,
+ NULL, 0, mbprof_clr_handler, "I", "clear mbuf profiling statistics");
+#endif
+
diff --git a/rtems/freebsd/kern/uipc_mbuf2.c b/rtems/freebsd/kern/uipc_mbuf2.c
new file mode 100644
index 00000000..210a2476
--- /dev/null
+++ b/rtems/freebsd/kern/uipc_mbuf2.c
@@ -0,0 +1,455 @@
+#include <rtems/freebsd/machine/rtems-bsd-config.h>
+
+/* $KAME: uipc_mbuf2.c,v 1.31 2001/11/28 11:08:53 itojun Exp $ */
+/* $NetBSD: uipc_mbuf.c,v 1.40 1999/04/01 00:23:25 thorpej Exp $ */
+
+/*-
+ * Copyright (C) 1999 WIDE Project.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the project nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+/*-
+ * Copyright (c) 1982, 1986, 1988, 1991, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * @(#)uipc_mbuf.c 8.4 (Berkeley) 2/14/95
+ */
+
+#include <rtems/freebsd/sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+/*#define PULLDOWN_DEBUG*/
+
+#include <rtems/freebsd/sys/param.h>
+#include <rtems/freebsd/sys/systm.h>
+#include <rtems/freebsd/sys/kernel.h>
+#include <rtems/freebsd/sys/lock.h>
+#include <rtems/freebsd/sys/malloc.h>
+#include <rtems/freebsd/sys/mbuf.h>
+#include <rtems/freebsd/sys/mutex.h>
+
+#include <rtems/freebsd/security/mac/mac_framework.h>
+
+static MALLOC_DEFINE(M_PACKET_TAGS, MBUF_TAG_MEM_NAME,
+ "packet-attached information");
+
+/* can't call it m_dup(), as freebsd[34] uses m_dup() with different arg */
+static struct mbuf *m_dup1(struct mbuf *, int, int, int);
+
+/*
+ * ensure that [off, off + len) is contiguous on the mbuf chain "m".
+ * packet chain before "off" is kept untouched.
+ * if offp == NULL, the target will start at <retval, 0> on resulting chain.
+ * if offp != NULL, the target will start at <retval, *offp> on resulting chain.
+ *
+ * on error return (NULL return value), original "m" will be freed.
+ *
+ * XXX: M_TRAILINGSPACE/M_LEADINGSPACE only permitted on writable ext_buf.
+ */
+struct mbuf *
+m_pulldown(struct mbuf *m, int off, int len, int *offp)
+{
+ struct mbuf *n, *o;
+ int hlen, tlen, olen;
+ int writable;
+
+ /* check invalid arguments. */
+ if (m == NULL)
+ panic("m == NULL in m_pulldown()");
+ if (len > MCLBYTES) {
+ m_freem(m);
+ return NULL; /* impossible */
+ }
+
+#ifdef PULLDOWN_DEBUG
+ {
+ struct mbuf *t;
+ printf("before:");
+ for (t = m; t; t = t->m_next)
+ printf(" %d", t->m_len);
+ printf("\n");
+ }
+#endif
+ n = m;
+ while (n != NULL && off > 0) {
+ if (n->m_len > off)
+ break;
+ off -= n->m_len;
+ n = n->m_next;
+ }
+ /* be sure to point non-empty mbuf */
+ while (n != NULL && n->m_len == 0)
+ n = n->m_next;
+ if (!n) {
+ m_freem(m);
+ return NULL; /* mbuf chain too short */
+ }
+
+ /*
+ * XXX: This code is flawed because it considers a "writable" mbuf
+ * data region to require all of the following:
+ * (i) mbuf _has_ to have M_EXT set; if it is just a regular
+ * mbuf, it is still not considered "writable."
+ * (ii) since mbuf has M_EXT, the ext_type _has_ to be
+ * EXT_CLUSTER. Anything else makes it non-writable.
+ * (iii) M_WRITABLE() must evaluate true.
+ * Ideally, the requirement should only be (iii).
+ *
+ * If we're writable, we're sure we're writable, because the ref. count
+ * cannot increase from 1, as that would require posession of mbuf
+ * n by someone else (which is impossible). However, if we're _not_
+ * writable, we may eventually become writable )if the ref. count drops
+ * to 1), but we'll fail to notice it unless we re-evaluate
+ * M_WRITABLE(). For now, we only evaluate once at the beginning and
+ * live with this.
+ */
+ /*
+ * XXX: This is dumb. If we're just a regular mbuf with no M_EXT,
+ * then we're not "writable," according to this code.
+ */
+ writable = 0;
+ if ((n->m_flags & M_EXT) == 0 ||
+ (n->m_ext.ext_type == EXT_CLUSTER && M_WRITABLE(n)))
+ writable = 1;
+
+ /*
+ * the target data is on <n, off>.
+ * if we got enough data on the mbuf "n", we're done.
+ */
+ if ((off == 0 || offp) && len <= n->m_len - off && writable)
+ goto ok;
+
+ /*
+ * when len <= n->m_len - off and off != 0, it is a special case.
+ * len bytes from <n, off> sits in single mbuf, but the caller does
+ * not like the starting position (off).
+ * chop the current mbuf into two pieces, set off to 0.
+ */
+ if (len <= n->m_len - off) {
+ o = m_dup1(n, off, n->m_len - off, M_DONTWAIT);
+ if (o == NULL) {
+ m_freem(m);
+ return NULL; /* ENOBUFS */
+ }
+ n->m_len = off;
+ o->m_next = n->m_next;
+ n->m_next = o;
+ n = n->m_next;
+ off = 0;
+ goto ok;
+ }
+
+ /*
+ * we need to take hlen from <n, off> and tlen from <n->m_next, 0>,
+ * and construct contiguous mbuf with m_len == len.
+ * note that hlen + tlen == len, and tlen > 0.
+ */
+ hlen = n->m_len - off;
+ tlen = len - hlen;
+
+ /*
+ * ensure that we have enough trailing data on mbuf chain.
+ * if not, we can do nothing about the chain.
+ */
+ olen = 0;
+ for (o = n->m_next; o != NULL; o = o->m_next)
+ olen += o->m_len;
+ if (hlen + olen < len) {
+ m_freem(m);
+ return NULL; /* mbuf chain too short */
+ }
+
+ /*
+ * easy cases first.
+ * we need to use m_copydata() to get data from <n->m_next, 0>.
+ */
+ if ((off == 0 || offp) && M_TRAILINGSPACE(n) >= tlen
+ && writable) {
+ m_copydata(n->m_next, 0, tlen, mtod(n, caddr_t) + n->m_len);
+ n->m_len += tlen;
+ m_adj(n->m_next, tlen);
+ goto ok;
+ }
+ if ((off == 0 || offp) && M_LEADINGSPACE(n->m_next) >= hlen
+ && writable) {
+ n->m_next->m_data -= hlen;
+ n->m_next->m_len += hlen;
+ bcopy(mtod(n, caddr_t) + off, mtod(n->m_next, caddr_t), hlen);
+ n->m_len -= hlen;
+ n = n->m_next;
+ off = 0;
+ goto ok;
+ }
+
+ /*
+ * now, we need to do the hard way. don't m_copy as there's no room
+ * on both end.
+ */
+ if (len > MLEN)
+ o = m_getcl(M_DONTWAIT, m->m_type, 0);
+ else
+ o = m_get(M_DONTWAIT, m->m_type);
+ if (!o) {
+ m_freem(m);
+ return NULL; /* ENOBUFS */
+ }
+ /* get hlen from <n, off> into <o, 0> */
+ o->m_len = hlen;
+ bcopy(mtod(n, caddr_t) + off, mtod(o, caddr_t), hlen);
+ n->m_len -= hlen;
+ /* get tlen from <n->m_next, 0> into <o, hlen> */
+ m_copydata(n->m_next, 0, tlen, mtod(o, caddr_t) + o->m_len);
+ o->m_len += tlen;
+ m_adj(n->m_next, tlen);
+ o->m_next = n->m_next;
+ n->m_next = o;
+ n = o;
+ off = 0;
+
+ok:
+#ifdef PULLDOWN_DEBUG
+ {
+ struct mbuf *t;
+ printf("after:");
+ for (t = m; t; t = t->m_next)
+ printf("%c%d", t == n ? '*' : ' ', t->m_len);
+ printf(" (off=%d)\n", off);
+ }
+#endif
+ if (offp)
+ *offp = off;
+ return n;
+}
+
+static struct mbuf *
+m_dup1(struct mbuf *m, int off, int len, int wait)
+{
+ struct mbuf *n;
+ int copyhdr;
+
+ if (len > MCLBYTES)
+ return NULL;
+ if (off == 0 && (m->m_flags & M_PKTHDR) != 0)
+ copyhdr = 1;
+ else
+ copyhdr = 0;
+ if (len >= MINCLSIZE) {
+ if (copyhdr == 1)
+ n = m_getcl(wait, m->m_type, M_PKTHDR);
+ else
+ n = m_getcl(wait, m->m_type, 0);
+ } else {
+ if (copyhdr == 1)
+ n = m_gethdr(wait, m->m_type);
+ else
+ n = m_get(wait, m->m_type);
+ }
+ if (!n)
+ return NULL; /* ENOBUFS */
+
+ if (copyhdr && !m_dup_pkthdr(n, m, wait)) {
+ m_free(n);
+ return NULL;
+ }
+ m_copydata(m, off, len, mtod(n, caddr_t));
+ n->m_len = len;
+ return n;
+}
+
+/* Free a packet tag. */
+void
+m_tag_free_default(struct m_tag *t)
+{
+#ifdef MAC
+ if (t->m_tag_id == PACKET_TAG_MACLABEL)
+ mac_mbuf_tag_destroy(t);
+#endif
+ free(t, M_PACKET_TAGS);
+}
+
+/* Get a packet tag structure along with specified data following. */
+struct m_tag *
+m_tag_alloc(u_int32_t cookie, int type, int len, int wait)
+{
+ struct m_tag *t;
+
+ MBUF_CHECKSLEEP(wait);
+ if (len < 0)
+ return NULL;
+ t = malloc(len + sizeof(struct m_tag), M_PACKET_TAGS, wait);
+ if (t == NULL)
+ return NULL;
+ m_tag_setup(t, cookie, type, len);
+ t->m_tag_free = m_tag_free_default;
+ return t;
+}
+
+/* Unlink and free a packet tag. */
+void
+m_tag_delete(struct mbuf *m, struct m_tag *t)
+{
+
+ KASSERT(m && t, ("m_tag_delete: null argument, m %p t %p", m, t));
+ m_tag_unlink(m, t);
+ m_tag_free(t);
+}
+
+/* Unlink and free a packet tag chain, starting from given tag. */
+void
+m_tag_delete_chain(struct mbuf *m, struct m_tag *t)
+{
+ struct m_tag *p, *q;
+
+ KASSERT(m, ("m_tag_delete_chain: null mbuf"));
+ if (t != NULL)
+ p = t;
+ else
+ p = SLIST_FIRST(&m->m_pkthdr.tags);
+ if (p == NULL)
+ return;
+ while ((q = SLIST_NEXT(p, m_tag_link)) != NULL)
+ m_tag_delete(m, q);
+ m_tag_delete(m, p);
+}
+
+/*
+ * Strip off all tags that would normally vanish when
+ * passing through a network interface. Only persistent
+ * tags will exist after this; these are expected to remain
+ * so long as the mbuf chain exists, regardless of the
+ * path the mbufs take.
+ */
+void
+m_tag_delete_nonpersistent(struct mbuf *m)
+{
+ struct m_tag *p, *q;
+
+ SLIST_FOREACH_SAFE(p, &m->m_pkthdr.tags, m_tag_link, q)
+ if ((p->m_tag_id & MTAG_PERSISTENT) == 0)
+ m_tag_delete(m, p);
+}
+
+/* Find a tag, starting from a given position. */
+struct m_tag *
+m_tag_locate(struct mbuf *m, u_int32_t cookie, int type, struct m_tag *t)
+{
+ struct m_tag *p;
+
+ KASSERT(m, ("m_tag_locate: null mbuf"));
+ if (t == NULL)
+ p = SLIST_FIRST(&m->m_pkthdr.tags);
+ else
+ p = SLIST_NEXT(t, m_tag_link);
+ while (p != NULL) {
+ if (p->m_tag_cookie == cookie && p->m_tag_id == type)
+ return p;
+ p = SLIST_NEXT(p, m_tag_link);
+ }
+ return NULL;
+}
+
+/* Copy a single tag. */
+struct m_tag *
+m_tag_copy(struct m_tag *t, int how)
+{
+ struct m_tag *p;
+
+ MBUF_CHECKSLEEP(how);
+ KASSERT(t, ("m_tag_copy: null tag"));
+ p = m_tag_alloc(t->m_tag_cookie, t->m_tag_id, t->m_tag_len, how);
+ if (p == NULL)
+ return (NULL);
+#ifdef MAC
+ /*
+ * XXXMAC: we should probably pass off the initialization, and
+ * copying here? can we hide that PACKET_TAG_MACLABEL is
+ * special from the mbuf code?
+ */
+ if (t->m_tag_id == PACKET_TAG_MACLABEL) {
+ if (mac_mbuf_tag_init(p, how) != 0) {
+ m_tag_free(p);
+ return (NULL);
+ }
+ mac_mbuf_tag_copy(t, p);
+ } else
+#endif
+ bcopy(t + 1, p + 1, t->m_tag_len); /* Copy the data */
+ return p;
+}
+
+/*
+ * Copy two tag chains. The destination mbuf (to) loses any attached
+ * tags even if the operation fails. This should not be a problem, as
+ * m_tag_copy_chain() is typically called with a newly-allocated
+ * destination mbuf.
+ */
+int
+m_tag_copy_chain(struct mbuf *to, struct mbuf *from, int how)
+{
+ struct m_tag *p, *t, *tprev = NULL;
+
+ MBUF_CHECKSLEEP(how);
+ KASSERT(to && from,
+ ("m_tag_copy_chain: null argument, to %p from %p", to, from));
+ m_tag_delete_chain(to, NULL);
+ SLIST_FOREACH(p, &from->m_pkthdr.tags, m_tag_link) {
+ t = m_tag_copy(p, how);
+ if (t == NULL) {
+ m_tag_delete_chain(to, NULL);
+ return 0;
+ }
+ if (tprev == NULL)
+ SLIST_INSERT_HEAD(&to->m_pkthdr.tags, t, m_tag_link);
+ else
+ SLIST_INSERT_AFTER(tprev, t, m_tag_link);
+ tprev = t;
+ }
+ return 1;
+}
diff --git a/rtems/freebsd/kern/uipc_socket.c b/rtems/freebsd/kern/uipc_socket.c
new file mode 100644
index 00000000..ff104127
--- /dev/null
+++ b/rtems/freebsd/kern/uipc_socket.c
@@ -0,0 +1,3575 @@
+#include <rtems/freebsd/machine/rtems-bsd-config.h>
+
+/*-
+ * Copyright (c) 1982, 1986, 1988, 1990, 1993
+ * The Regents of the University of California.
+ * Copyright (c) 2004 The FreeBSD Foundation
+ * Copyright (c) 2004-2008 Robert N. M. Watson
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * @(#)uipc_socket.c 8.3 (Berkeley) 4/15/94
+ */
+
+/*
+ * Comments on the socket life cycle:
+ *
+ * soalloc() sets of socket layer state for a socket, called only by
+ * socreate() and sonewconn(). Socket layer private.
+ *
+ * sodealloc() tears down socket layer state for a socket, called only by
+ * sofree() and sonewconn(). Socket layer private.
+ *
+ * pru_attach() associates protocol layer state with an allocated socket;
+ * called only once, may fail, aborting socket allocation. This is called
+ * from socreate() and sonewconn(). Socket layer private.
+ *
+ * pru_detach() disassociates protocol layer state from an attached socket,
+ * and will be called exactly once for sockets in which pru_attach() has
+ * been successfully called. If pru_attach() returned an error,
+ * pru_detach() will not be called. Socket layer private.
+ *
+ * pru_abort() and pru_close() notify the protocol layer that the last
+ * consumer of a socket is starting to tear down the socket, and that the
+ * protocol should terminate the connection. Historically, pru_abort() also
+ * detached protocol state from the socket state, but this is no longer the
+ * case.
+ *
+ * socreate() creates a socket and attaches protocol state. This is a public
+ * interface that may be used by socket layer consumers to create new
+ * sockets.
+ *
+ * sonewconn() creates a socket and attaches protocol state. This is a
+ * public interface that may be used by protocols to create new sockets when
+ * a new connection is received and will be available for accept() on a
+ * listen socket.
+ *
+ * soclose() destroys a socket after possibly waiting for it to disconnect.
+ * This is a public interface that socket consumers should use to close and
+ * release a socket when done with it.
+ *
+ * soabort() destroys a socket without waiting for it to disconnect (used
+ * only for incoming connections that are already partially or fully
+ * connected). This is used internally by the socket layer when clearing
+ * listen socket queues (due to overflow or close on the listen socket), but
+ * is also a public interface protocols may use to abort connections in
+ * their incomplete listen queues should they no longer be required. Sockets
+ * placed in completed connection listen queues should not be aborted for
+ * reasons described in the comment above the soclose() implementation. This
+ * is not a general purpose close routine, and except in the specific
+ * circumstances described here, should not be used.
+ *
+ * sofree() will free a socket and its protocol state if all references on
+ * the socket have been released, and is the public interface to attempt to
+ * free a socket when a reference is removed. This is a socket layer private
+ * interface.
+ *
+ * NOTE: In addition to socreate() and soclose(), which provide a single
+ * socket reference to the consumer to be managed as required, there are two
+ * calls to explicitly manage socket references, soref(), and sorele().
+ * Currently, these are generally required only when transitioning a socket
+ * from a listen queue to a file descriptor, in order to prevent garbage
+ * collection of the socket at an untimely moment. For a number of reasons,
+ * these interfaces are not preferred, and should be avoided.
+ */
+
+#include <rtems/freebsd/sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <rtems/freebsd/local/opt_inet.h>
+#include <rtems/freebsd/local/opt_inet6.h>
+#include <rtems/freebsd/local/opt_zero.h>
+#include <rtems/freebsd/local/opt_compat.h>
+
+#include <rtems/freebsd/sys/param.h>
+#include <rtems/freebsd/sys/systm.h>
+#include <rtems/freebsd/sys/fcntl.h>
+#include <rtems/freebsd/sys/limits.h>
+#include <rtems/freebsd/sys/lock.h>
+#include <rtems/freebsd/sys/mac.h>
+#include <rtems/freebsd/sys/malloc.h>
+#include <rtems/freebsd/sys/mbuf.h>
+#include <rtems/freebsd/sys/mutex.h>
+#include <rtems/freebsd/sys/domain.h>
+#include <rtems/freebsd/sys/file.h> /* for struct knote */
+#include <rtems/freebsd/sys/kernel.h>
+#include <rtems/freebsd/sys/event.h>
+#include <rtems/freebsd/sys/eventhandler.h>
+#include <rtems/freebsd/sys/poll.h>
+#include <rtems/freebsd/sys/proc.h>
+#include <rtems/freebsd/sys/protosw.h>
+#include <rtems/freebsd/sys/socket.h>
+#include <rtems/freebsd/sys/socketvar.h>
+#include <rtems/freebsd/sys/resourcevar.h>
+#include <rtems/freebsd/net/route.h>
+#include <rtems/freebsd/sys/signalvar.h>
+#include <rtems/freebsd/sys/stat.h>
+#include <rtems/freebsd/sys/sx.h>
+#include <rtems/freebsd/sys/sysctl.h>
+#include <rtems/freebsd/sys/uio.h>
+#include <rtems/freebsd/sys/jail.h>
+
+#include <rtems/freebsd/net/vnet.h>
+
+#include <rtems/freebsd/security/mac/mac_framework.h>
+
+#include <rtems/freebsd/vm/uma.h>
+
+#ifdef COMPAT_FREEBSD32
+#include <rtems/freebsd/sys/mount.h>
+#include <rtems/freebsd/sys/sysent.h>
+#include <rtems/freebsd/compat/freebsd32/freebsd32.h>
+#endif
+
+static int soreceive_rcvoob(struct socket *so, struct uio *uio,
+ int flags);
+
+static void filt_sordetach(struct knote *kn);
+static int filt_soread(struct knote *kn, long hint);
+static void filt_sowdetach(struct knote *kn);
+static int filt_sowrite(struct knote *kn, long hint);
+static int filt_solisten(struct knote *kn, long hint);
+
+static struct filterops solisten_filtops =
+ { 1, NULL, filt_sordetach, filt_solisten };
+static struct filterops soread_filtops =
+ { 1, NULL, filt_sordetach, filt_soread };
+static struct filterops sowrite_filtops =
+ { 1, NULL, filt_sowdetach, filt_sowrite };
+
+uma_zone_t socket_zone;
+so_gen_t so_gencnt; /* generation count for sockets */
+
+int maxsockets;
+
+MALLOC_DEFINE(M_SONAME, "soname", "socket name");
+MALLOC_DEFINE(M_PCB, "pcb", "protocol control block");
+
+static int somaxconn = SOMAXCONN;
+static int sysctl_somaxconn(SYSCTL_HANDLER_ARGS);
+/* XXX: we dont have SYSCTL_USHORT */
+SYSCTL_PROC(_kern_ipc, KIPC_SOMAXCONN, somaxconn, CTLTYPE_UINT | CTLFLAG_RW,
+ 0, sizeof(int), sysctl_somaxconn, "I", "Maximum pending socket connection "
+ "queue size");
+static int numopensockets;
+SYSCTL_INT(_kern_ipc, OID_AUTO, numopensockets, CTLFLAG_RD,
+ &numopensockets, 0, "Number of open sockets");
+#ifdef ZERO_COPY_SOCKETS
+/* These aren't static because they're used in other files. */
+int so_zero_copy_send = 1;
+int so_zero_copy_receive = 1;
+SYSCTL_NODE(_kern_ipc, OID_AUTO, zero_copy, CTLFLAG_RD, 0,
+ "Zero copy controls");
+SYSCTL_INT(_kern_ipc_zero_copy, OID_AUTO, receive, CTLFLAG_RW,
+ &so_zero_copy_receive, 0, "Enable zero copy receive");
+SYSCTL_INT(_kern_ipc_zero_copy, OID_AUTO, send, CTLFLAG_RW,
+ &so_zero_copy_send, 0, "Enable zero copy send");
+#endif /* ZERO_COPY_SOCKETS */
+
+/*
+ * accept_mtx locks down per-socket fields relating to accept queues. See
+ * socketvar.h for an annotation of the protected fields of struct socket.
+ */
+struct mtx accept_mtx;
+MTX_SYSINIT(accept_mtx, &accept_mtx, "accept", MTX_DEF);
+
+/*
+ * so_global_mtx protects so_gencnt, numopensockets, and the per-socket
+ * so_gencnt field.
+ */
+static struct mtx so_global_mtx;
+MTX_SYSINIT(so_global_mtx, &so_global_mtx, "so_glabel", MTX_DEF);
+
+/*
+ * General IPC sysctl name space, used by sockets and a variety of other IPC
+ * types.
+ */
+SYSCTL_NODE(_kern, KERN_IPC, ipc, CTLFLAG_RW, 0, "IPC");
+
+/*
+ * Sysctl to get and set the maximum global sockets limit. Notify protocols
+ * of the change so that they can update their dependent limits as required.
+ */
+static int
+sysctl_maxsockets(SYSCTL_HANDLER_ARGS)
+{
+ int error, newmaxsockets;
+
+ newmaxsockets = maxsockets;
+ error = sysctl_handle_int(oidp, &newmaxsockets, 0, req);
+ if (error == 0 && req->newptr) {
+ if (newmaxsockets > maxsockets) {
+ maxsockets = newmaxsockets;
+ if (maxsockets > ((maxfiles / 4) * 3)) {
+ maxfiles = (maxsockets * 5) / 4;
+ maxfilesperproc = (maxfiles * 9) / 10;
+ }
+ EVENTHANDLER_INVOKE(maxsockets_change);
+ } else
+ error = EINVAL;
+ }
+ return (error);
+}
+
+SYSCTL_PROC(_kern_ipc, OID_AUTO, maxsockets, CTLTYPE_INT|CTLFLAG_RW,
+ &maxsockets, 0, sysctl_maxsockets, "IU",
+ "Maximum number of sockets avaliable");
+
+/*
+ * Initialise maxsockets. This SYSINIT must be run after
+ * tunable_mbinit().
+ */
+static void
+init_maxsockets(void *ignored)
+{
+
+ TUNABLE_INT_FETCH("kern.ipc.maxsockets", &maxsockets);
+ maxsockets = imax(maxsockets, imax(maxfiles, nmbclusters));
+}
+SYSINIT(param, SI_SUB_TUNABLES, SI_ORDER_ANY, init_maxsockets, NULL);
+
+/*
+ * Socket operation routines. These routines are called by the routines in
+ * sys_socket.c or from a system process, and implement the semantics of
+ * socket operations by switching out to the protocol specific routines.
+ */
+
+/*
+ * Get a socket structure from our zone, and initialize it. Note that it
+ * would probably be better to allocate socket and PCB at the same time, but
+ * I'm not convinced that all the protocols can be easily modified to do
+ * this.
+ *
+ * soalloc() returns a socket with a ref count of 0.
+ */
+static struct socket *
+soalloc(struct vnet *vnet)
+{
+ struct socket *so;
+
+ so = uma_zalloc(socket_zone, M_NOWAIT | M_ZERO);
+ if (so == NULL)
+ return (NULL);
+#ifdef MAC
+ if (mac_socket_init(so, M_NOWAIT) != 0) {
+ uma_zfree(socket_zone, so);
+ return (NULL);
+ }
+#endif
+ SOCKBUF_LOCK_INIT(&so->so_snd, "so_snd");
+ SOCKBUF_LOCK_INIT(&so->so_rcv, "so_rcv");
+ sx_init(&so->so_snd.sb_sx, "so_snd_sx");
+ sx_init(&so->so_rcv.sb_sx, "so_rcv_sx");
+ TAILQ_INIT(&so->so_aiojobq);
+ mtx_lock(&so_global_mtx);
+ so->so_gencnt = ++so_gencnt;
+ ++numopensockets;
+#ifdef VIMAGE
+ vnet->vnet_sockcnt++;
+ so->so_vnet = vnet;
+#endif
+ mtx_unlock(&so_global_mtx);
+ return (so);
+}
+
+/*
+ * Free the storage associated with a socket at the socket layer, tear down
+ * locks, labels, etc. All protocol state is assumed already to have been
+ * torn down (and possibly never set up) by the caller.
+ */
+static void
+sodealloc(struct socket *so)
+{
+
+ KASSERT(so->so_count == 0, ("sodealloc(): so_count %d", so->so_count));
+ KASSERT(so->so_pcb == NULL, ("sodealloc(): so_pcb != NULL"));
+
+ mtx_lock(&so_global_mtx);
+ so->so_gencnt = ++so_gencnt;
+ --numopensockets; /* Could be below, but faster here. */
+#ifdef VIMAGE
+ so->so_vnet->vnet_sockcnt--;
+#endif
+ mtx_unlock(&so_global_mtx);
+ if (so->so_rcv.sb_hiwat)
+ (void)chgsbsize(so->so_cred->cr_uidinfo,
+ &so->so_rcv.sb_hiwat, 0, RLIM_INFINITY);
+ if (so->so_snd.sb_hiwat)
+ (void)chgsbsize(so->so_cred->cr_uidinfo,
+ &so->so_snd.sb_hiwat, 0, RLIM_INFINITY);
+#ifdef INET
+ /* remove acccept filter if one is present. */
+ if (so->so_accf != NULL)
+ do_setopt_accept_filter(so, NULL);
+#endif
+#ifdef MAC
+ mac_socket_destroy(so);
+#endif
+ crfree(so->so_cred);
+ sx_destroy(&so->so_snd.sb_sx);
+ sx_destroy(&so->so_rcv.sb_sx);
+ SOCKBUF_LOCK_DESTROY(&so->so_snd);
+ SOCKBUF_LOCK_DESTROY(&so->so_rcv);
+ uma_zfree(socket_zone, so);
+}
+
+/*
+ * socreate returns a socket with a ref count of 1. The socket should be
+ * closed with soclose().
+ */
+int
+socreate(int dom, struct socket **aso, int type, int proto,
+ struct ucred *cred, struct thread *td)
+{
+ struct protosw *prp;
+ struct socket *so;
+ int error;
+
+ if (proto)
+ prp = pffindproto(dom, proto, type);
+ else
+ prp = pffindtype(dom, type);
+
+ if (prp == NULL || prp->pr_usrreqs->pru_attach == NULL ||
+ prp->pr_usrreqs->pru_attach == pru_attach_notsupp)
+ return (EPROTONOSUPPORT);
+
+ if (prison_check_af(cred, prp->pr_domain->dom_family) != 0)
+ return (EPROTONOSUPPORT);
+
+ if (prp->pr_type != type)
+ return (EPROTOTYPE);
+ so = soalloc(CRED_TO_VNET(cred));
+ if (so == NULL)
+ return (ENOBUFS);
+
+ TAILQ_INIT(&so->so_incomp);
+ TAILQ_INIT(&so->so_comp);
+ so->so_type = type;
+ so->so_cred = crhold(cred);
+ if ((prp->pr_domain->dom_family == PF_INET) ||
+ (prp->pr_domain->dom_family == PF_ROUTE))
+ so->so_fibnum = td->td_proc->p_fibnum;
+ else
+ so->so_fibnum = 0;
+ so->so_proto = prp;
+#ifdef MAC
+ mac_socket_create(cred, so);
+#endif
+ knlist_init_mtx(&so->so_rcv.sb_sel.si_note, SOCKBUF_MTX(&so->so_rcv));
+ knlist_init_mtx(&so->so_snd.sb_sel.si_note, SOCKBUF_MTX(&so->so_snd));
+ so->so_count = 1;
+ /*
+ * Auto-sizing of socket buffers is managed by the protocols and
+ * the appropriate flags must be set in the pru_attach function.
+ */
+ CURVNET_SET(so->so_vnet);
+ error = (*prp->pr_usrreqs->pru_attach)(so, proto, td);
+ CURVNET_RESTORE();
+ if (error) {
+ KASSERT(so->so_count == 1, ("socreate: so_count %d",
+ so->so_count));
+ so->so_count = 0;
+ sodealloc(so);
+ return (error);
+ }
+ *aso = so;
+ return (0);
+}
+
+#ifdef REGRESSION
+static int regression_sonewconn_earlytest = 1;
+SYSCTL_INT(_regression, OID_AUTO, sonewconn_earlytest, CTLFLAG_RW,
+ &regression_sonewconn_earlytest, 0, "Perform early sonewconn limit test");
+#endif
+
+/*
+ * When an attempt at a new connection is noted on a socket which accepts
+ * connections, sonewconn is called. If the connection is possible (subject
+ * to space constraints, etc.) then we allocate a new structure, propoerly
+ * linked into the data structure of the original socket, and return this.
+ * Connstatus may be 0, or SO_ISCONFIRMING, or SO_ISCONNECTED.
+ *
+ * Note: the ref count on the socket is 0 on return.
+ */
+struct socket *
+sonewconn(struct socket *head, int connstatus)
+{
+ struct socket *so;
+ int over;
+
+ ACCEPT_LOCK();
+ over = (head->so_qlen > 3 * head->so_qlimit / 2);
+ ACCEPT_UNLOCK();
+#ifdef REGRESSION
+ if (regression_sonewconn_earlytest && over)
+#else
+ if (over)
+#endif
+ return (NULL);
+ VNET_ASSERT(head->so_vnet);
+ so = soalloc(head->so_vnet);
+ if (so == NULL)
+ return (NULL);
+ if ((head->so_options & SO_ACCEPTFILTER) != 0)
+ connstatus = 0;
+ so->so_head = head;
+ so->so_type = head->so_type;
+ so->so_options = head->so_options &~ SO_ACCEPTCONN;
+ so->so_linger = head->so_linger;
+ so->so_state = head->so_state | SS_NOFDREF;
+ so->so_fibnum = head->so_fibnum;
+ so->so_proto = head->so_proto;
+ so->so_cred = crhold(head->so_cred);
+#ifdef MAC
+ mac_socket_newconn(head, so);
+#endif
+ knlist_init_mtx(&so->so_rcv.sb_sel.si_note, SOCKBUF_MTX(&so->so_rcv));
+ knlist_init_mtx(&so->so_snd.sb_sel.si_note, SOCKBUF_MTX(&so->so_snd));
+ if (soreserve(so, head->so_snd.sb_hiwat, head->so_rcv.sb_hiwat) ||
+ (*so->so_proto->pr_usrreqs->pru_attach)(so, 0, NULL)) {
+ sodealloc(so);
+ return (NULL);
+ }
+ so->so_rcv.sb_lowat = head->so_rcv.sb_lowat;
+ so->so_snd.sb_lowat = head->so_snd.sb_lowat;
+ so->so_rcv.sb_timeo = head->so_rcv.sb_timeo;
+ so->so_snd.sb_timeo = head->so_snd.sb_timeo;
+ so->so_rcv.sb_flags |= head->so_rcv.sb_flags & SB_AUTOSIZE;
+ so->so_snd.sb_flags |= head->so_snd.sb_flags & SB_AUTOSIZE;
+ so->so_state |= connstatus;
+ ACCEPT_LOCK();
+ if (connstatus) {
+ TAILQ_INSERT_TAIL(&head->so_comp, so, so_list);
+ so->so_qstate |= SQ_COMP;
+ head->so_qlen++;
+ } else {
+ /*
+ * Keep removing sockets from the head until there's room for
+ * us to insert on the tail. In pre-locking revisions, this
+ * was a simple if(), but as we could be racing with other
+ * threads and soabort() requires dropping locks, we must
+ * loop waiting for the condition to be true.
+ */
+ while (head->so_incqlen > head->so_qlimit) {
+ struct socket *sp;
+ sp = TAILQ_FIRST(&head->so_incomp);
+ TAILQ_REMOVE(&head->so_incomp, sp, so_list);
+ head->so_incqlen--;
+ sp->so_qstate &= ~SQ_INCOMP;
+ sp->so_head = NULL;
+ ACCEPT_UNLOCK();
+ soabort(sp);
+ ACCEPT_LOCK();
+ }
+ TAILQ_INSERT_TAIL(&head->so_incomp, so, so_list);
+ so->so_qstate |= SQ_INCOMP;
+ head->so_incqlen++;
+ }
+ ACCEPT_UNLOCK();
+ if (connstatus) {
+ sorwakeup(head);
+ wakeup_one(&head->so_timeo);
+ }
+ return (so);
+}
+
+int
+sobind(struct socket *so, struct sockaddr *nam, struct thread *td)
+{
+ int error;
+
+ CURVNET_SET(so->so_vnet);
+ error = (*so->so_proto->pr_usrreqs->pru_bind)(so, nam, td);
+ CURVNET_RESTORE();
+ return error;
+}
+
+/*
+ * solisten() transitions a socket from a non-listening state to a listening
+ * state, but can also be used to update the listen queue depth on an
+ * existing listen socket. The protocol will call back into the sockets
+ * layer using solisten_proto_check() and solisten_proto() to check and set
+ * socket-layer listen state. Call backs are used so that the protocol can
+ * acquire both protocol and socket layer locks in whatever order is required
+ * by the protocol.
+ *
+ * Protocol implementors are advised to hold the socket lock across the
+ * socket-layer test and set to avoid races at the socket layer.
+ */
+int
+solisten(struct socket *so, int backlog, struct thread *td)
+{
+
+ return ((*so->so_proto->pr_usrreqs->pru_listen)(so, backlog, td));
+}
+
+int
+solisten_proto_check(struct socket *so)
+{
+
+ SOCK_LOCK_ASSERT(so);
+
+ if (so->so_state & (SS_ISCONNECTED | SS_ISCONNECTING |
+ SS_ISDISCONNECTING))
+ return (EINVAL);
+ return (0);
+}
+
+void
+solisten_proto(struct socket *so, int backlog)
+{
+
+ SOCK_LOCK_ASSERT(so);
+
+ if (backlog < 0 || backlog > somaxconn)
+ backlog = somaxconn;
+ so->so_qlimit = backlog;
+ so->so_options |= SO_ACCEPTCONN;
+}
+
+/*
+ * Attempt to free a socket. This should really be sotryfree().
+ *
+ * sofree() will succeed if:
+ *
+ * - There are no outstanding file descriptor references or related consumers
+ * (so_count == 0).
+ *
+ * - The socket has been closed by user space, if ever open (SS_NOFDREF).
+ *
+ * - The protocol does not have an outstanding strong reference on the socket
+ * (SS_PROTOREF).
+ *
+ * - The socket is not in a completed connection queue, so a process has been
+ * notified that it is present. If it is removed, the user process may
+ * block in accept() despite select() saying the socket was ready.
+ *
+ * Otherwise, it will quietly abort so that a future call to sofree(), when
+ * conditions are right, can succeed.
+ */
+void
+sofree(struct socket *so)
+{
+ struct protosw *pr = so->so_proto;
+ struct socket *head;
+
+ ACCEPT_LOCK_ASSERT();
+ SOCK_LOCK_ASSERT(so);
+
+ if ((so->so_state & SS_NOFDREF) == 0 || so->so_count != 0 ||
+ (so->so_state & SS_PROTOREF) || (so->so_qstate & SQ_COMP)) {
+ SOCK_UNLOCK(so);
+ ACCEPT_UNLOCK();
+ return;
+ }
+
+ head = so->so_head;
+ if (head != NULL) {
+ KASSERT((so->so_qstate & SQ_COMP) != 0 ||
+ (so->so_qstate & SQ_INCOMP) != 0,
+ ("sofree: so_head != NULL, but neither SQ_COMP nor "
+ "SQ_INCOMP"));
+ KASSERT((so->so_qstate & SQ_COMP) == 0 ||
+ (so->so_qstate & SQ_INCOMP) == 0,
+ ("sofree: so->so_qstate is SQ_COMP and also SQ_INCOMP"));
+ TAILQ_REMOVE(&head->so_incomp, so, so_list);
+ head->so_incqlen--;
+ so->so_qstate &= ~SQ_INCOMP;
+ so->so_head = NULL;
+ }
+ KASSERT((so->so_qstate & SQ_COMP) == 0 &&
+ (so->so_qstate & SQ_INCOMP) == 0,
+ ("sofree: so_head == NULL, but still SQ_COMP(%d) or SQ_INCOMP(%d)",
+ so->so_qstate & SQ_COMP, so->so_qstate & SQ_INCOMP));
+ if (so->so_options & SO_ACCEPTCONN) {
+ KASSERT((TAILQ_EMPTY(&so->so_comp)), ("sofree: so_comp populated"));
+ KASSERT((TAILQ_EMPTY(&so->so_incomp)), ("sofree: so_comp populated"));
+ }
+ SOCK_UNLOCK(so);
+ ACCEPT_UNLOCK();
+
+ if (pr->pr_flags & PR_RIGHTS && pr->pr_domain->dom_dispose != NULL)
+ (*pr->pr_domain->dom_dispose)(so->so_rcv.sb_mb);
+ if (pr->pr_usrreqs->pru_detach != NULL)
+ (*pr->pr_usrreqs->pru_detach)(so);
+
+ /*
+ * From this point on, we assume that no other references to this
+ * socket exist anywhere else in the stack. Therefore, no locks need
+ * to be acquired or held.
+ *
+ * We used to do a lot of socket buffer and socket locking here, as
+ * well as invoke sorflush() and perform wakeups. The direct call to
+ * dom_dispose() and sbrelease_internal() are an inlining of what was
+ * necessary from sorflush().
+ *
+ * Notice that the socket buffer and kqueue state are torn down
+ * before calling pru_detach. This means that protocols shold not
+ * assume they can perform socket wakeups, etc, in their detach code.
+ */
+ sbdestroy(&so->so_snd, so);
+ sbdestroy(&so->so_rcv, so);
+ knlist_destroy(&so->so_rcv.sb_sel.si_note);
+ knlist_destroy(&so->so_snd.sb_sel.si_note);
+ sodealloc(so);
+}
+
+/*
+ * Close a socket on last file table reference removal. Initiate disconnect
+ * if connected. Free socket when disconnect complete.
+ *
+ * This function will sorele() the socket. Note that soclose() may be called
+ * prior to the ref count reaching zero. The actual socket structure will
+ * not be freed until the ref count reaches zero.
+ */
+int
+soclose(struct socket *so)
+{
+ int error = 0;
+
+ KASSERT(!(so->so_state & SS_NOFDREF), ("soclose: SS_NOFDREF on enter"));
+
+ CURVNET_SET(so->so_vnet);
+ funsetown(&so->so_sigio);
+ if (so->so_state & SS_ISCONNECTED) {
+ if ((so->so_state & SS_ISDISCONNECTING) == 0) {
+ error = sodisconnect(so);
+ if (error) {
+ if (error == ENOTCONN)
+ error = 0;
+ goto drop;
+ }
+ }
+ if (so->so_options & SO_LINGER) {
+ if ((so->so_state & SS_ISDISCONNECTING) &&
+ (so->so_state & SS_NBIO))
+ goto drop;
+ while (so->so_state & SS_ISCONNECTED) {
+ error = tsleep(&so->so_timeo,
+ PSOCK | PCATCH, "soclos", so->so_linger * hz);
+ if (error)
+ break;
+ }
+ }
+ }
+
+drop:
+ if (so->so_proto->pr_usrreqs->pru_close != NULL)
+ (*so->so_proto->pr_usrreqs->pru_close)(so);
+ if (so->so_options & SO_ACCEPTCONN) {
+ struct socket *sp;
+ ACCEPT_LOCK();
+ while ((sp = TAILQ_FIRST(&so->so_incomp)) != NULL) {
+ TAILQ_REMOVE(&so->so_incomp, sp, so_list);
+ so->so_incqlen--;
+ sp->so_qstate &= ~SQ_INCOMP;
+ sp->so_head = NULL;
+ ACCEPT_UNLOCK();
+ soabort(sp);
+ ACCEPT_LOCK();
+ }
+ while ((sp = TAILQ_FIRST(&so->so_comp)) != NULL) {
+ TAILQ_REMOVE(&so->so_comp, sp, so_list);
+ so->so_qlen--;
+ sp->so_qstate &= ~SQ_COMP;
+ sp->so_head = NULL;
+ ACCEPT_UNLOCK();
+ soabort(sp);
+ ACCEPT_LOCK();
+ }
+ ACCEPT_UNLOCK();
+ }
+ ACCEPT_LOCK();
+ SOCK_LOCK(so);
+ KASSERT((so->so_state & SS_NOFDREF) == 0, ("soclose: NOFDREF"));
+ so->so_state |= SS_NOFDREF;
+ sorele(so);
+ CURVNET_RESTORE();
+ return (error);
+}
+
+/*
+ * soabort() is used to abruptly tear down a connection, such as when a
+ * resource limit is reached (listen queue depth exceeded), or if a listen
+ * socket is closed while there are sockets waiting to be accepted.
+ *
+ * This interface is tricky, because it is called on an unreferenced socket,
+ * and must be called only by a thread that has actually removed the socket
+ * from the listen queue it was on, or races with other threads are risked.
+ *
+ * This interface will call into the protocol code, so must not be called
+ * with any socket locks held. Protocols do call it while holding their own
+ * recursible protocol mutexes, but this is something that should be subject
+ * to review in the future.
+ */
+void
+soabort(struct socket *so)
+{
+
+ /*
+ * In as much as is possible, assert that no references to this
+ * socket are held. This is not quite the same as asserting that the
+ * current thread is responsible for arranging for no references, but
+ * is as close as we can get for now.
+ */
+ KASSERT(so->so_count == 0, ("soabort: so_count"));
+ KASSERT((so->so_state & SS_PROTOREF) == 0, ("soabort: SS_PROTOREF"));
+ KASSERT(so->so_state & SS_NOFDREF, ("soabort: !SS_NOFDREF"));
+ KASSERT((so->so_state & SQ_COMP) == 0, ("soabort: SQ_COMP"));
+ KASSERT((so->so_state & SQ_INCOMP) == 0, ("soabort: SQ_INCOMP"));
+
+ if (so->so_proto->pr_usrreqs->pru_abort != NULL)
+ (*so->so_proto->pr_usrreqs->pru_abort)(so);
+ ACCEPT_LOCK();
+ SOCK_LOCK(so);
+ sofree(so);
+}
+
+int
+soaccept(struct socket *so, struct sockaddr **nam)
+{
+ int error;
+
+ SOCK_LOCK(so);
+ KASSERT((so->so_state & SS_NOFDREF) != 0, ("soaccept: !NOFDREF"));
+ so->so_state &= ~SS_NOFDREF;
+ SOCK_UNLOCK(so);
+ error = (*so->so_proto->pr_usrreqs->pru_accept)(so, nam);
+ return (error);
+}
+
+int
+soconnect(struct socket *so, struct sockaddr *nam, struct thread *td)
+{
+ int error;
+
+ if (so->so_options & SO_ACCEPTCONN)
+ return (EOPNOTSUPP);
+
+ CURVNET_SET(so->so_vnet);
+ /*
+ * If protocol is connection-based, can only connect once.
+ * Otherwise, if connected, try to disconnect first. This allows
+ * user to disconnect by connecting to, e.g., a null address.
+ */
+ if (so->so_state & (SS_ISCONNECTED|SS_ISCONNECTING) &&
+ ((so->so_proto->pr_flags & PR_CONNREQUIRED) ||
+ (error = sodisconnect(so)))) {
+ error = EISCONN;
+ } else {
+ /*
+ * Prevent accumulated error from previous connection from
+ * biting us.
+ */
+ so->so_error = 0;
+ error = (*so->so_proto->pr_usrreqs->pru_connect)(so, nam, td);
+ }
+ CURVNET_RESTORE();
+
+ return (error);
+}
+
+int
+soconnect2(struct socket *so1, struct socket *so2)
+{
+
+ return ((*so1->so_proto->pr_usrreqs->pru_connect2)(so1, so2));
+}
+
+int
+sodisconnect(struct socket *so)
+{
+ int error;
+
+ if ((so->so_state & SS_ISCONNECTED) == 0)
+ return (ENOTCONN);
+ if (so->so_state & SS_ISDISCONNECTING)
+ return (EALREADY);
+ error = (*so->so_proto->pr_usrreqs->pru_disconnect)(so);
+ return (error);
+}
+
+#ifdef ZERO_COPY_SOCKETS
+struct so_zerocopy_stats{
+ int size_ok;
+ int align_ok;
+ int found_ifp;
+};
+struct so_zerocopy_stats so_zerocp_stats = {0,0,0};
+#include <rtems/freebsd/netinet/in.h>
+#include <rtems/freebsd/net/route.h>
+#include <rtems/freebsd/netinet/in_pcb.h>
+#include <rtems/freebsd/vm/vm.h>
+#include <rtems/freebsd/vm/vm_page.h>
+#include <rtems/freebsd/vm/vm_object.h>
+
+/*
+ * sosend_copyin() is only used if zero copy sockets are enabled. Otherwise
+ * sosend_dgram() and sosend_generic() use m_uiotombuf().
+ *
+ * sosend_copyin() accepts a uio and prepares an mbuf chain holding part or
+ * all of the data referenced by the uio. If desired, it uses zero-copy.
+ * *space will be updated to reflect data copied in.
+ *
+ * NB: If atomic I/O is requested, the caller must already have checked that
+ * space can hold resid bytes.
+ *
+ * NB: In the event of an error, the caller may need to free the partial
+ * chain pointed to by *mpp. The contents of both *uio and *space may be
+ * modified even in the case of an error.
+ */
+static int
+sosend_copyin(struct uio *uio, struct mbuf **retmp, int atomic, long *space,
+ int flags)
+{
+ struct mbuf *m, **mp, *top;
+ long len, resid;
+ int error;
+#ifdef ZERO_COPY_SOCKETS
+ int cow_send;
+#endif
+
+ *retmp = top = NULL;
+ mp = &top;
+ len = 0;
+ resid = uio->uio_resid;
+ error = 0;
+ do {
+#ifdef ZERO_COPY_SOCKETS
+ cow_send = 0;
+#endif /* ZERO_COPY_SOCKETS */
+ if (resid >= MINCLSIZE) {
+#ifdef ZERO_COPY_SOCKETS
+ if (top == NULL) {
+ m = m_gethdr(M_WAITOK, MT_DATA);
+ m->m_pkthdr.len = 0;
+ m->m_pkthdr.rcvif = NULL;
+ } else
+ m = m_get(M_WAITOK, MT_DATA);
+ if (so_zero_copy_send &&
+ resid>=PAGE_SIZE &&
+ *space>=PAGE_SIZE &&
+ uio->uio_iov->iov_len>=PAGE_SIZE) {
+ so_zerocp_stats.size_ok++;
+ so_zerocp_stats.align_ok++;
+ cow_send = socow_setup(m, uio);
+ len = cow_send;
+ }
+ if (!cow_send) {
+ m_clget(m, M_WAITOK);
+ len = min(min(MCLBYTES, resid), *space);
+ }
+#else /* ZERO_COPY_SOCKETS */
+ if (top == NULL) {
+ m = m_getcl(M_WAIT, MT_DATA, M_PKTHDR);
+ m->m_pkthdr.len = 0;
+ m->m_pkthdr.rcvif = NULL;
+ } else
+ m = m_getcl(M_WAIT, MT_DATA, 0);
+ len = min(min(MCLBYTES, resid), *space);
+#endif /* ZERO_COPY_SOCKETS */
+ } else {
+ if (top == NULL) {
+ m = m_gethdr(M_WAIT, MT_DATA);
+ m->m_pkthdr.len = 0;
+ m->m_pkthdr.rcvif = NULL;
+
+ len = min(min(MHLEN, resid), *space);
+ /*
+ * For datagram protocols, leave room
+ * for protocol headers in first mbuf.
+ */
+ if (atomic && m && len < MHLEN)
+ MH_ALIGN(m, len);
+ } else {
+ m = m_get(M_WAIT, MT_DATA);
+ len = min(min(MLEN, resid), *space);
+ }
+ }
+ if (m == NULL) {
+ error = ENOBUFS;
+ goto out;
+ }
+
+ *space -= len;
+#ifdef ZERO_COPY_SOCKETS
+ if (cow_send)
+ error = 0;
+ else
+#endif /* ZERO_COPY_SOCKETS */
+ error = uiomove(mtod(m, void *), (int)len, uio);
+ resid = uio->uio_resid;
+ m->m_len = len;
+ *mp = m;
+ top->m_pkthdr.len += len;
+ if (error)
+ goto out;
+ mp = &m->m_next;
+ if (resid <= 0) {
+ if (flags & MSG_EOR)
+ top->m_flags |= M_EOR;
+ break;
+ }
+ } while (*space > 0 && atomic);
+out:
+ *retmp = top;
+ return (error);
+}
+#endif /*ZERO_COPY_SOCKETS*/
+
+#define SBLOCKWAIT(f) (((f) & MSG_DONTWAIT) ? 0 : SBL_WAIT)
+
+int
+sosend_dgram(struct socket *so, struct sockaddr *addr, struct uio *uio,
+ struct mbuf *top, struct mbuf *control, int flags, struct thread *td)
+{
+ long space, resid;
+ int clen = 0, error, dontroute;
+#ifdef ZERO_COPY_SOCKETS
+ int atomic = sosendallatonce(so) || top;
+#endif
+
+ KASSERT(so->so_type == SOCK_DGRAM, ("sodgram_send: !SOCK_DGRAM"));
+ KASSERT(so->so_proto->pr_flags & PR_ATOMIC,
+ ("sodgram_send: !PR_ATOMIC"));
+
+ if (uio != NULL)
+ resid = uio->uio_resid;
+ else
+ resid = top->m_pkthdr.len;
+ /*
+ * In theory resid should be unsigned. However, space must be
+ * signed, as it might be less than 0 if we over-committed, and we
+ * must use a signed comparison of space and resid. On the other
+ * hand, a negative resid causes us to loop sending 0-length
+ * segments to the protocol.
+ */
+ if (resid < 0) {
+ error = EINVAL;
+ goto out;
+ }
+
+ dontroute =
+ (flags & MSG_DONTROUTE) && (so->so_options & SO_DONTROUTE) == 0;
+ if (td != NULL)
+ td->td_ru.ru_msgsnd++;
+ if (control != NULL)
+ clen = control->m_len;
+
+ SOCKBUF_LOCK(&so->so_snd);
+ if (so->so_snd.sb_state & SBS_CANTSENDMORE) {
+ SOCKBUF_UNLOCK(&so->so_snd);
+ error = EPIPE;
+ goto out;
+ }
+ if (so->so_error) {
+ error = so->so_error;
+ so->so_error = 0;
+ SOCKBUF_UNLOCK(&so->so_snd);
+ goto out;
+ }
+ if ((so->so_state & SS_ISCONNECTED) == 0) {
+ /*
+ * `sendto' and `sendmsg' is allowed on a connection-based
+ * socket if it supports implied connect. Return ENOTCONN if
+ * not connected and no address is supplied.
+ */
+ if ((so->so_proto->pr_flags & PR_CONNREQUIRED) &&
+ (so->so_proto->pr_flags & PR_IMPLOPCL) == 0) {
+ if ((so->so_state & SS_ISCONFIRMING) == 0 &&
+ !(resid == 0 && clen != 0)) {
+ SOCKBUF_UNLOCK(&so->so_snd);
+ error = ENOTCONN;
+ goto out;
+ }
+ } else if (addr == NULL) {
+ if (so->so_proto->pr_flags & PR_CONNREQUIRED)
+ error = ENOTCONN;
+ else
+ error = EDESTADDRREQ;
+ SOCKBUF_UNLOCK(&so->so_snd);
+ goto out;
+ }
+ }
+
+ /*
+ * Do we need MSG_OOB support in SOCK_DGRAM? Signs here may be a
+ * problem and need fixing.
+ */
+ space = sbspace(&so->so_snd);
+ if (flags & MSG_OOB)
+ space += 1024;
+ space -= clen;
+ SOCKBUF_UNLOCK(&so->so_snd);
+ if (resid > space) {
+ error = EMSGSIZE;
+ goto out;
+ }
+ if (uio == NULL) {
+ resid = 0;
+ if (flags & MSG_EOR)
+ top->m_flags |= M_EOR;
+ } else {
+#ifdef ZERO_COPY_SOCKETS
+ error = sosend_copyin(uio, &top, atomic, &space, flags);
+ if (error)
+ goto out;
+#else
+ /*
+ * Copy the data from userland into a mbuf chain.
+ * If no data is to be copied in, a single empty mbuf
+ * is returned.
+ */
+ top = m_uiotombuf(uio, M_WAITOK, space, max_hdr,
+ (M_PKTHDR | ((flags & MSG_EOR) ? M_EOR : 0)));
+ if (top == NULL) {
+ error = EFAULT; /* only possible error */
+ goto out;
+ }
+ space -= resid - uio->uio_resid;
+#endif
+ resid = uio->uio_resid;
+ }
+ KASSERT(resid == 0, ("sosend_dgram: resid != 0"));
+ /*
+ * XXXRW: Frobbing SO_DONTROUTE here is even worse without sblock
+ * than with.
+ */
+ if (dontroute) {
+ SOCK_LOCK(so);
+ so->so_options |= SO_DONTROUTE;
+ SOCK_UNLOCK(so);
+ }
+ /*
+ * XXX all the SBS_CANTSENDMORE checks previously done could be out
+ * of date. We could have recieved a reset packet in an interrupt or
+ * maybe we slept while doing page faults in uiomove() etc. We could
+ * probably recheck again inside the locking protection here, but
+ * there are probably other places that this also happens. We must
+ * rethink this.
+ */
+ error = (*so->so_proto->pr_usrreqs->pru_send)(so,
+ (flags & MSG_OOB) ? PRUS_OOB :
+ /*
+ * If the user set MSG_EOF, the protocol understands this flag and
+ * nothing left to send then use PRU_SEND_EOF instead of PRU_SEND.
+ */
+ ((flags & MSG_EOF) &&
+ (so->so_proto->pr_flags & PR_IMPLOPCL) &&
+ (resid <= 0)) ?
+ PRUS_EOF :
+ /* If there is more to send set PRUS_MORETOCOME */
+ (resid > 0 && space > 0) ? PRUS_MORETOCOME : 0,
+ top, addr, control, td);
+ if (dontroute) {
+ SOCK_LOCK(so);
+ so->so_options &= ~SO_DONTROUTE;
+ SOCK_UNLOCK(so);
+ }
+ clen = 0;
+ control = NULL;
+ top = NULL;
+out:
+ if (top != NULL)
+ m_freem(top);
+ if (control != NULL)
+ m_freem(control);
+ return (error);
+}
+
+/*
+ * Send on a socket. If send must go all at once and message is larger than
+ * send buffering, then hard error. Lock against other senders. If must go
+ * all at once and not enough room now, then inform user that this would
+ * block and do nothing. Otherwise, if nonblocking, send as much as
+ * possible. The data to be sent is described by "uio" if nonzero, otherwise
+ * by the mbuf chain "top" (which must be null if uio is not). Data provided
+ * in mbuf chain must be small enough to send all at once.
+ *
+ * Returns nonzero on error, timeout or signal; callers must check for short
+ * counts if EINTR/ERESTART are returned. Data and control buffers are freed
+ * on return.
+ */
+int
+sosend_generic(struct socket *so, struct sockaddr *addr, struct uio *uio,
+ struct mbuf *top, struct mbuf *control, int flags, struct thread *td)
+{
+ long space, resid;
+ int clen = 0, error, dontroute;
+ int atomic = sosendallatonce(so) || top;
+
+ if (uio != NULL)
+ resid = uio->uio_resid;
+ else
+ resid = top->m_pkthdr.len;
+ /*
+ * In theory resid should be unsigned. However, space must be
+ * signed, as it might be less than 0 if we over-committed, and we
+ * must use a signed comparison of space and resid. On the other
+ * hand, a negative resid causes us to loop sending 0-length
+ * segments to the protocol.
+ *
+ * Also check to make sure that MSG_EOR isn't used on SOCK_STREAM
+ * type sockets since that's an error.
+ */
+ if (resid < 0 || (so->so_type == SOCK_STREAM && (flags & MSG_EOR))) {
+ error = EINVAL;
+ goto out;
+ }
+
+ dontroute =
+ (flags & MSG_DONTROUTE) && (so->so_options & SO_DONTROUTE) == 0 &&
+ (so->so_proto->pr_flags & PR_ATOMIC);
+ if (td != NULL)
+ td->td_ru.ru_msgsnd++;
+ if (control != NULL)
+ clen = control->m_len;
+
+ error = sblock(&so->so_snd, SBLOCKWAIT(flags));
+ if (error)
+ goto out;
+
+restart:
+ do {
+ SOCKBUF_LOCK(&so->so_snd);
+ if (so->so_snd.sb_state & SBS_CANTSENDMORE) {
+ SOCKBUF_UNLOCK(&so->so_snd);
+ error = EPIPE;
+ goto release;
+ }
+ if (so->so_error) {
+ error = so->so_error;
+ so->so_error = 0;
+ SOCKBUF_UNLOCK(&so->so_snd);
+ goto release;
+ }
+ if ((so->so_state & SS_ISCONNECTED) == 0) {
+ /*
+ * `sendto' and `sendmsg' is allowed on a connection-
+ * based socket if it supports implied connect.
+ * Return ENOTCONN if not connected and no address is
+ * supplied.
+ */
+ if ((so->so_proto->pr_flags & PR_CONNREQUIRED) &&
+ (so->so_proto->pr_flags & PR_IMPLOPCL) == 0) {
+ if ((so->so_state & SS_ISCONFIRMING) == 0 &&
+ !(resid == 0 && clen != 0)) {
+ SOCKBUF_UNLOCK(&so->so_snd);
+ error = ENOTCONN;
+ goto release;
+ }
+ } else if (addr == NULL) {
+ SOCKBUF_UNLOCK(&so->so_snd);
+ if (so->so_proto->pr_flags & PR_CONNREQUIRED)
+ error = ENOTCONN;
+ else
+ error = EDESTADDRREQ;
+ goto release;
+ }
+ }
+ space = sbspace(&so->so_snd);
+ if (flags & MSG_OOB)
+ space += 1024;
+ if ((atomic && resid > so->so_snd.sb_hiwat) ||
+ clen > so->so_snd.sb_hiwat) {
+ SOCKBUF_UNLOCK(&so->so_snd);
+ error = EMSGSIZE;
+ goto release;
+ }
+ if (space < resid + clen &&
+ (atomic || space < so->so_snd.sb_lowat || space < clen)) {
+ if ((so->so_state & SS_NBIO) || (flags & MSG_NBIO)) {
+ SOCKBUF_UNLOCK(&so->so_snd);
+ error = EWOULDBLOCK;
+ goto release;
+ }
+ error = sbwait(&so->so_snd);
+ SOCKBUF_UNLOCK(&so->so_snd);
+ if (error)
+ goto release;
+ goto restart;
+ }
+ SOCKBUF_UNLOCK(&so->so_snd);
+ space -= clen;
+ do {
+ if (uio == NULL) {
+ resid = 0;
+ if (flags & MSG_EOR)
+ top->m_flags |= M_EOR;
+ } else {
+#ifdef ZERO_COPY_SOCKETS
+ error = sosend_copyin(uio, &top, atomic,
+ &space, flags);
+ if (error != 0)
+ goto release;
+#else
+ /*
+ * Copy the data from userland into a mbuf
+ * chain. If no data is to be copied in,
+ * a single empty mbuf is returned.
+ */
+ top = m_uiotombuf(uio, M_WAITOK, space,
+ (atomic ? max_hdr : 0),
+ (atomic ? M_PKTHDR : 0) |
+ ((flags & MSG_EOR) ? M_EOR : 0));
+ if (top == NULL) {
+ error = EFAULT; /* only possible error */
+ goto release;
+ }
+ space -= resid - uio->uio_resid;
+#endif
+ resid = uio->uio_resid;
+ }
+ if (dontroute) {
+ SOCK_LOCK(so);
+ so->so_options |= SO_DONTROUTE;
+ SOCK_UNLOCK(so);
+ }
+ /*
+ * XXX all the SBS_CANTSENDMORE checks previously
+ * done could be out of date. We could have recieved
+ * a reset packet in an interrupt or maybe we slept
+ * while doing page faults in uiomove() etc. We
+ * could probably recheck again inside the locking
+ * protection here, but there are probably other
+ * places that this also happens. We must rethink
+ * this.
+ */
+ error = (*so->so_proto->pr_usrreqs->pru_send)(so,
+ (flags & MSG_OOB) ? PRUS_OOB :
+ /*
+ * If the user set MSG_EOF, the protocol understands
+ * this flag and nothing left to send then use
+ * PRU_SEND_EOF instead of PRU_SEND.
+ */
+ ((flags & MSG_EOF) &&
+ (so->so_proto->pr_flags & PR_IMPLOPCL) &&
+ (resid <= 0)) ?
+ PRUS_EOF :
+ /* If there is more to send set PRUS_MORETOCOME. */
+ (resid > 0 && space > 0) ? PRUS_MORETOCOME : 0,
+ top, addr, control, td);
+ if (dontroute) {
+ SOCK_LOCK(so);
+ so->so_options &= ~SO_DONTROUTE;
+ SOCK_UNLOCK(so);
+ }
+ clen = 0;
+ control = NULL;
+ top = NULL;
+ if (error)
+ goto release;
+ } while (resid && space > 0);
+ } while (resid);
+
+release:
+ sbunlock(&so->so_snd);
+out:
+ if (top != NULL)
+ m_freem(top);
+ if (control != NULL)
+ m_freem(control);
+ return (error);
+}
+
+int
+sosend(struct socket *so, struct sockaddr *addr, struct uio *uio,
+ struct mbuf *top, struct mbuf *control, int flags, struct thread *td)
+{
+ int error;
+
+ CURVNET_SET(so->so_vnet);
+ error = so->so_proto->pr_usrreqs->pru_sosend(so, addr, uio, top,
+ control, flags, td);
+ CURVNET_RESTORE();
+ return (error);
+}
+
+/*
+ * The part of soreceive() that implements reading non-inline out-of-band
+ * data from a socket. For more complete comments, see soreceive(), from
+ * which this code originated.
+ *
+ * Note that soreceive_rcvoob(), unlike the remainder of soreceive(), is
+ * unable to return an mbuf chain to the caller.
+ */
+static int
+soreceive_rcvoob(struct socket *so, struct uio *uio, int flags)
+{
+ struct protosw *pr = so->so_proto;
+ struct mbuf *m;
+ int error;
+
+ KASSERT(flags & MSG_OOB, ("soreceive_rcvoob: (flags & MSG_OOB) == 0"));
+
+ m = m_get(M_WAIT, MT_DATA);
+ error = (*pr->pr_usrreqs->pru_rcvoob)(so, m, flags & MSG_PEEK);
+ if (error)
+ goto bad;
+ do {
+#ifdef ZERO_COPY_SOCKETS
+ if (so_zero_copy_receive) {
+ int disposable;
+
+ if ((m->m_flags & M_EXT)
+ && (m->m_ext.ext_type == EXT_DISPOSABLE))
+ disposable = 1;
+ else
+ disposable = 0;
+
+ error = uiomoveco(mtod(m, void *),
+ min(uio->uio_resid, m->m_len),
+ uio, disposable);
+ } else
+#endif /* ZERO_COPY_SOCKETS */
+ error = uiomove(mtod(m, void *),
+ (int) min(uio->uio_resid, m->m_len), uio);
+ m = m_free(m);
+ } while (uio->uio_resid && error == 0 && m);
+bad:
+ if (m != NULL)
+ m_freem(m);
+ return (error);
+}
+
+/*
+ * Following replacement or removal of the first mbuf on the first mbuf chain
+ * of a socket buffer, push necessary state changes back into the socket
+ * buffer so that other consumers see the values consistently. 'nextrecord'
+ * is the callers locally stored value of the original value of
+ * sb->sb_mb->m_nextpkt which must be restored when the lead mbuf changes.
+ * NOTE: 'nextrecord' may be NULL.
+ */
+static __inline void
+sockbuf_pushsync(struct sockbuf *sb, struct mbuf *nextrecord)
+{
+
+ SOCKBUF_LOCK_ASSERT(sb);
+ /*
+ * First, update for the new value of nextrecord. If necessary, make
+ * it the first record.
+ */
+ if (sb->sb_mb != NULL)
+ sb->sb_mb->m_nextpkt = nextrecord;
+ else
+ sb->sb_mb = nextrecord;
+
+ /*
+ * Now update any dependent socket buffer fields to reflect the new
+ * state. This is an expanded inline of SB_EMPTY_FIXUP(), with the
+ * addition of a second clause that takes care of the case where
+ * sb_mb has been updated, but remains the last record.
+ */
+ if (sb->sb_mb == NULL) {
+ sb->sb_mbtail = NULL;
+ sb->sb_lastrecord = NULL;
+ } else if (sb->sb_mb->m_nextpkt == NULL)
+ sb->sb_lastrecord = sb->sb_mb;
+}
+
+
+/*
+ * Implement receive operations on a socket. We depend on the way that
+ * records are added to the sockbuf by sbappend. In particular, each record
+ * (mbufs linked through m_next) must begin with an address if the protocol
+ * so specifies, followed by an optional mbuf or mbufs containing ancillary
+ * data, and then zero or more mbufs of data. In order to allow parallelism
+ * between network receive and copying to user space, as well as avoid
+ * sleeping with a mutex held, we release the socket buffer mutex during the
+ * user space copy. Although the sockbuf is locked, new data may still be
+ * appended, and thus we must maintain consistency of the sockbuf during that
+ * time.
+ *
+ * The caller may receive the data as a single mbuf chain by supplying an
+ * mbuf **mp0 for use in returning the chain. The uio is then used only for
+ * the count in uio_resid.
+ */
+int
+soreceive_generic(struct socket *so, struct sockaddr **psa, struct uio *uio,
+ struct mbuf **mp0, struct mbuf **controlp, int *flagsp)
+{
+ struct mbuf *m, **mp;
+ int flags, len, error, offset;
+ struct protosw *pr = so->so_proto;
+ struct mbuf *nextrecord;
+ int moff, type = 0;
+ int orig_resid = uio->uio_resid;
+
+ mp = mp0;
+ if (psa != NULL)
+ *psa = NULL;
+ if (controlp != NULL)
+ *controlp = NULL;
+ if (flagsp != NULL)
+ flags = *flagsp &~ MSG_EOR;
+ else
+ flags = 0;
+ if (flags & MSG_OOB)
+ return (soreceive_rcvoob(so, uio, flags));
+ if (mp != NULL)
+ *mp = NULL;
+ if ((pr->pr_flags & PR_WANTRCVD) && (so->so_state & SS_ISCONFIRMING)
+ && uio->uio_resid)
+ (*pr->pr_usrreqs->pru_rcvd)(so, 0);
+
+ error = sblock(&so->so_rcv, SBLOCKWAIT(flags));
+ if (error)
+ return (error);
+
+restart:
+ SOCKBUF_LOCK(&so->so_rcv);
+ m = so->so_rcv.sb_mb;
+ /*
+ * If we have less data than requested, block awaiting more (subject
+ * to any timeout) if:
+ * 1. the current count is less than the low water mark, or
+ * 2. MSG_WAITALL is set, and it is possible to do the entire
+ * receive operation at once if we block (resid <= hiwat).
+ * 3. MSG_DONTWAIT is not set
+ * If MSG_WAITALL is set but resid is larger than the receive buffer,
+ * we have to do the receive in sections, and thus risk returning a
+ * short count if a timeout or signal occurs after we start.
+ */
+ if (m == NULL || (((flags & MSG_DONTWAIT) == 0 &&
+ so->so_rcv.sb_cc < uio->uio_resid) &&
+ (so->so_rcv.sb_cc < so->so_rcv.sb_lowat ||
+ ((flags & MSG_WAITALL) && uio->uio_resid <= so->so_rcv.sb_hiwat)) &&
+ m->m_nextpkt == NULL && (pr->pr_flags & PR_ATOMIC) == 0)) {
+ KASSERT(m != NULL || !so->so_rcv.sb_cc,
+ ("receive: m == %p so->so_rcv.sb_cc == %u",
+ m, so->so_rcv.sb_cc));
+ if (so->so_error) {
+ if (m != NULL)
+ goto dontblock;
+ error = so->so_error;
+ if ((flags & MSG_PEEK) == 0)
+ so->so_error = 0;
+ SOCKBUF_UNLOCK(&so->so_rcv);
+ goto release;
+ }
+ SOCKBUF_LOCK_ASSERT(&so->so_rcv);
+ if (so->so_rcv.sb_state & SBS_CANTRCVMORE) {
+ if (m == NULL) {
+ SOCKBUF_UNLOCK(&so->so_rcv);
+ goto release;
+ } else
+ goto dontblock;
+ }
+ for (; m != NULL; m = m->m_next)
+ if (m->m_type == MT_OOBDATA || (m->m_flags & M_EOR)) {
+ m = so->so_rcv.sb_mb;
+ goto dontblock;
+ }
+ if ((so->so_state & (SS_ISCONNECTED|SS_ISCONNECTING)) == 0 &&
+ (so->so_proto->pr_flags & PR_CONNREQUIRED)) {
+ SOCKBUF_UNLOCK(&so->so_rcv);
+ error = ENOTCONN;
+ goto release;
+ }
+ if (uio->uio_resid == 0) {
+ SOCKBUF_UNLOCK(&so->so_rcv);
+ goto release;
+ }
+ if ((so->so_state & SS_NBIO) ||
+ (flags & (MSG_DONTWAIT|MSG_NBIO))) {
+ SOCKBUF_UNLOCK(&so->so_rcv);
+ error = EWOULDBLOCK;
+ goto release;
+ }
+ SBLASTRECORDCHK(&so->so_rcv);
+ SBLASTMBUFCHK(&so->so_rcv);
+ error = sbwait(&so->so_rcv);
+ SOCKBUF_UNLOCK(&so->so_rcv);
+ if (error)
+ goto release;
+ goto restart;
+ }
+dontblock:
+ /*
+ * From this point onward, we maintain 'nextrecord' as a cache of the
+ * pointer to the next record in the socket buffer. We must keep the
+ * various socket buffer pointers and local stack versions of the
+ * pointers in sync, pushing out modifications before dropping the
+ * socket buffer mutex, and re-reading them when picking it up.
+ *
+ * Otherwise, we will race with the network stack appending new data
+ * or records onto the socket buffer by using inconsistent/stale
+ * versions of the field, possibly resulting in socket buffer
+ * corruption.
+ *
+ * By holding the high-level sblock(), we prevent simultaneous
+ * readers from pulling off the front of the socket buffer.
+ */
+ SOCKBUF_LOCK_ASSERT(&so->so_rcv);
+ if (uio->uio_td)
+ uio->uio_td->td_ru.ru_msgrcv++;
+ KASSERT(m == so->so_rcv.sb_mb, ("soreceive: m != so->so_rcv.sb_mb"));
+ SBLASTRECORDCHK(&so->so_rcv);
+ SBLASTMBUFCHK(&so->so_rcv);
+ nextrecord = m->m_nextpkt;
+ if (pr->pr_flags & PR_ADDR) {
+ KASSERT(m->m_type == MT_SONAME,
+ ("m->m_type == %d", m->m_type));
+ orig_resid = 0;
+ if (psa != NULL)
+ *psa = sodupsockaddr(mtod(m, struct sockaddr *),
+ M_NOWAIT);
+ if (flags & MSG_PEEK) {
+ m = m->m_next;
+ } else {
+ sbfree(&so->so_rcv, m);
+ so->so_rcv.sb_mb = m_free(m);
+ m = so->so_rcv.sb_mb;
+ sockbuf_pushsync(&so->so_rcv, nextrecord);
+ }
+ }
+
+ /*
+ * Process one or more MT_CONTROL mbufs present before any data mbufs
+ * in the first mbuf chain on the socket buffer. If MSG_PEEK, we
+ * just copy the data; if !MSG_PEEK, we call into the protocol to
+ * perform externalization (or freeing if controlp == NULL).
+ */
+ if (m != NULL && m->m_type == MT_CONTROL) {
+ struct mbuf *cm = NULL, *cmn;
+ struct mbuf **cme = &cm;
+
+ do {
+ if (flags & MSG_PEEK) {
+ if (controlp != NULL) {
+ *controlp = m_copy(m, 0, m->m_len);
+ controlp = &(*controlp)->m_next;
+ }
+ m = m->m_next;
+ } else {
+ sbfree(&so->so_rcv, m);
+ so->so_rcv.sb_mb = m->m_next;
+ m->m_next = NULL;
+ *cme = m;
+ cme = &(*cme)->m_next;
+ m = so->so_rcv.sb_mb;
+ }
+ } while (m != NULL && m->m_type == MT_CONTROL);
+ if ((flags & MSG_PEEK) == 0)
+ sockbuf_pushsync(&so->so_rcv, nextrecord);
+ while (cm != NULL) {
+ cmn = cm->m_next;
+ cm->m_next = NULL;
+ if (pr->pr_domain->dom_externalize != NULL) {
+ SOCKBUF_UNLOCK(&so->so_rcv);
+ error = (*pr->pr_domain->dom_externalize)
+ (cm, controlp);
+ SOCKBUF_LOCK(&so->so_rcv);
+ } else if (controlp != NULL)
+ *controlp = cm;
+ else
+ m_freem(cm);
+ if (controlp != NULL) {
+ orig_resid = 0;
+ while (*controlp != NULL)
+ controlp = &(*controlp)->m_next;
+ }
+ cm = cmn;
+ }
+ if (m != NULL)
+ nextrecord = so->so_rcv.sb_mb->m_nextpkt;
+ else
+ nextrecord = so->so_rcv.sb_mb;
+ orig_resid = 0;
+ }
+ if (m != NULL) {
+ if ((flags & MSG_PEEK) == 0) {
+ KASSERT(m->m_nextpkt == nextrecord,
+ ("soreceive: post-control, nextrecord !sync"));
+ if (nextrecord == NULL) {
+ KASSERT(so->so_rcv.sb_mb == m,
+ ("soreceive: post-control, sb_mb!=m"));
+ KASSERT(so->so_rcv.sb_lastrecord == m,
+ ("soreceive: post-control, lastrecord!=m"));
+ }
+ }
+ type = m->m_type;
+ if (type == MT_OOBDATA)
+ flags |= MSG_OOB;
+ } else {
+ if ((flags & MSG_PEEK) == 0) {
+ KASSERT(so->so_rcv.sb_mb == nextrecord,
+ ("soreceive: sb_mb != nextrecord"));
+ if (so->so_rcv.sb_mb == NULL) {
+ KASSERT(so->so_rcv.sb_lastrecord == NULL,
+ ("soreceive: sb_lastercord != NULL"));
+ }
+ }
+ }
+ SOCKBUF_LOCK_ASSERT(&so->so_rcv);
+ SBLASTRECORDCHK(&so->so_rcv);
+ SBLASTMBUFCHK(&so->so_rcv);
+
+ /*
+ * Now continue to read any data mbufs off of the head of the socket
+ * buffer until the read request is satisfied. Note that 'type' is
+ * used to store the type of any mbuf reads that have happened so far
+ * such that soreceive() can stop reading if the type changes, which
+ * causes soreceive() to return only one of regular data and inline
+ * out-of-band data in a single socket receive operation.
+ */
+ moff = 0;
+ offset = 0;
+ while (m != NULL && uio->uio_resid > 0 && error == 0) {
+ /*
+ * If the type of mbuf has changed since the last mbuf
+ * examined ('type'), end the receive operation.
+ */
+ SOCKBUF_LOCK_ASSERT(&so->so_rcv);
+ if (m->m_type == MT_OOBDATA) {
+ if (type != MT_OOBDATA)
+ break;
+ } else if (type == MT_OOBDATA)
+ break;
+ else
+ KASSERT(m->m_type == MT_DATA,
+ ("m->m_type == %d", m->m_type));
+ so->so_rcv.sb_state &= ~SBS_RCVATMARK;
+ len = uio->uio_resid;
+ if (so->so_oobmark && len > so->so_oobmark - offset)
+ len = so->so_oobmark - offset;
+ if (len > m->m_len - moff)
+ len = m->m_len - moff;
+ /*
+ * If mp is set, just pass back the mbufs. Otherwise copy
+ * them out via the uio, then free. Sockbuf must be
+ * consistent here (points to current mbuf, it points to next
+ * record) when we drop priority; we must note any additions
+ * to the sockbuf when we block interrupts again.
+ */
+ if (mp == NULL) {
+ SOCKBUF_LOCK_ASSERT(&so->so_rcv);
+ SBLASTRECORDCHK(&so->so_rcv);
+ SBLASTMBUFCHK(&so->so_rcv);
+ SOCKBUF_UNLOCK(&so->so_rcv);
+#ifdef ZERO_COPY_SOCKETS
+ if (so_zero_copy_receive) {
+ int disposable;
+
+ if ((m->m_flags & M_EXT)
+ && (m->m_ext.ext_type == EXT_DISPOSABLE))
+ disposable = 1;
+ else
+ disposable = 0;
+
+ error = uiomoveco(mtod(m, char *) + moff,
+ (int)len, uio,
+ disposable);
+ } else
+#endif /* ZERO_COPY_SOCKETS */
+ error = uiomove(mtod(m, char *) + moff, (int)len, uio);
+ SOCKBUF_LOCK(&so->so_rcv);
+ if (error) {
+ /*
+ * The MT_SONAME mbuf has already been removed
+ * from the record, so it is necessary to
+ * remove the data mbufs, if any, to preserve
+ * the invariant in the case of PR_ADDR that
+ * requires MT_SONAME mbufs at the head of
+ * each record.
+ */
+ if (m && pr->pr_flags & PR_ATOMIC &&
+ ((flags & MSG_PEEK) == 0))
+ (void)sbdroprecord_locked(&so->so_rcv);
+ SOCKBUF_UNLOCK(&so->so_rcv);
+ goto release;
+ }
+ } else
+ uio->uio_resid -= len;
+ SOCKBUF_LOCK_ASSERT(&so->so_rcv);
+ if (len == m->m_len - moff) {
+ if (m->m_flags & M_EOR)
+ flags |= MSG_EOR;
+ if (flags & MSG_PEEK) {
+ m = m->m_next;
+ moff = 0;
+ } else {
+ nextrecord = m->m_nextpkt;
+ sbfree(&so->so_rcv, m);
+ if (mp != NULL) {
+ *mp = m;
+ mp = &m->m_next;
+ so->so_rcv.sb_mb = m = m->m_next;
+ *mp = NULL;
+ } else {
+ so->so_rcv.sb_mb = m_free(m);
+ m = so->so_rcv.sb_mb;
+ }
+ sockbuf_pushsync(&so->so_rcv, nextrecord);
+ SBLASTRECORDCHK(&so->so_rcv);
+ SBLASTMBUFCHK(&so->so_rcv);
+ }
+ } else {
+ if (flags & MSG_PEEK)
+ moff += len;
+ else {
+ if (mp != NULL) {
+ int copy_flag;
+
+ if (flags & MSG_DONTWAIT)
+ copy_flag = M_DONTWAIT;
+ else
+ copy_flag = M_WAIT;
+ if (copy_flag == M_WAIT)
+ SOCKBUF_UNLOCK(&so->so_rcv);
+ *mp = m_copym(m, 0, len, copy_flag);
+ if (copy_flag == M_WAIT)
+ SOCKBUF_LOCK(&so->so_rcv);
+ if (*mp == NULL) {
+ /*
+ * m_copym() couldn't
+ * allocate an mbuf. Adjust
+ * uio_resid back (it was
+ * adjusted down by len
+ * bytes, which we didn't end
+ * up "copying" over).
+ */
+ uio->uio_resid += len;
+ break;
+ }
+ }
+ m->m_data += len;
+ m->m_len -= len;
+ so->so_rcv.sb_cc -= len;
+ }
+ }
+ SOCKBUF_LOCK_ASSERT(&so->so_rcv);
+ if (so->so_oobmark) {
+ if ((flags & MSG_PEEK) == 0) {
+ so->so_oobmark -= len;
+ if (so->so_oobmark == 0) {
+ so->so_rcv.sb_state |= SBS_RCVATMARK;
+ break;
+ }
+ } else {
+ offset += len;
+ if (offset == so->so_oobmark)
+ break;
+ }
+ }
+ if (flags & MSG_EOR)
+ break;
+ /*
+ * If the MSG_WAITALL flag is set (for non-atomic socket), we
+ * must not quit until "uio->uio_resid == 0" or an error
+ * termination. If a signal/timeout occurs, return with a
+ * short count but without error. Keep sockbuf locked
+ * against other readers.
+ */
+ while (flags & MSG_WAITALL && m == NULL && uio->uio_resid > 0 &&
+ !sosendallatonce(so) && nextrecord == NULL) {
+ SOCKBUF_LOCK_ASSERT(&so->so_rcv);
+ if (so->so_error || so->so_rcv.sb_state & SBS_CANTRCVMORE)
+ break;
+ /*
+ * Notify the protocol that some data has been
+ * drained before blocking.
+ */
+ if (pr->pr_flags & PR_WANTRCVD) {
+ SOCKBUF_UNLOCK(&so->so_rcv);
+ (*pr->pr_usrreqs->pru_rcvd)(so, flags);
+ SOCKBUF_LOCK(&so->so_rcv);
+ }
+ SBLASTRECORDCHK(&so->so_rcv);
+ SBLASTMBUFCHK(&so->so_rcv);
+ error = sbwait(&so->so_rcv);
+ if (error) {
+ SOCKBUF_UNLOCK(&so->so_rcv);
+ goto release;
+ }
+ m = so->so_rcv.sb_mb;
+ if (m != NULL)
+ nextrecord = m->m_nextpkt;
+ }
+ }
+
+ SOCKBUF_LOCK_ASSERT(&so->so_rcv);
+ if (m != NULL && pr->pr_flags & PR_ATOMIC) {
+ flags |= MSG_TRUNC;
+ if ((flags & MSG_PEEK) == 0)
+ (void) sbdroprecord_locked(&so->so_rcv);
+ }
+ if ((flags & MSG_PEEK) == 0) {
+ if (m == NULL) {
+ /*
+ * First part is an inline SB_EMPTY_FIXUP(). Second
+ * part makes sure sb_lastrecord is up-to-date if
+ * there is still data in the socket buffer.
+ */
+ so->so_rcv.sb_mb = nextrecord;
+ if (so->so_rcv.sb_mb == NULL) {
+ so->so_rcv.sb_mbtail = NULL;
+ so->so_rcv.sb_lastrecord = NULL;
+ } else if (nextrecord->m_nextpkt == NULL)
+ so->so_rcv.sb_lastrecord = nextrecord;
+ }
+ SBLASTRECORDCHK(&so->so_rcv);
+ SBLASTMBUFCHK(&so->so_rcv);
+ /*
+ * If soreceive() is being done from the socket callback,
+ * then don't need to generate ACK to peer to update window,
+ * since ACK will be generated on return to TCP.
+ */
+ if (!(flags & MSG_SOCALLBCK) &&
+ (pr->pr_flags & PR_WANTRCVD)) {
+ SOCKBUF_UNLOCK(&so->so_rcv);
+ (*pr->pr_usrreqs->pru_rcvd)(so, flags);
+ SOCKBUF_LOCK(&so->so_rcv);
+ }
+ }
+ SOCKBUF_LOCK_ASSERT(&so->so_rcv);
+ if (orig_resid == uio->uio_resid && orig_resid &&
+ (flags & MSG_EOR) == 0 && (so->so_rcv.sb_state & SBS_CANTRCVMORE) == 0) {
+ SOCKBUF_UNLOCK(&so->so_rcv);
+ goto restart;
+ }
+ SOCKBUF_UNLOCK(&so->so_rcv);
+
+ if (flagsp != NULL)
+ *flagsp |= flags;
+release:
+ sbunlock(&so->so_rcv);
+ return (error);
+}
+
+/*
+ * Optimized version of soreceive() for stream (TCP) sockets.
+ */
+int
+soreceive_stream(struct socket *so, struct sockaddr **psa, struct uio *uio,
+ struct mbuf **mp0, struct mbuf **controlp, int *flagsp)
+{
+ int len = 0, error = 0, flags, oresid;
+ struct sockbuf *sb;
+ struct mbuf *m, *n = NULL;
+
+ /* We only do stream sockets. */
+ if (so->so_type != SOCK_STREAM)
+ return (EINVAL);
+ if (psa != NULL)
+ *psa = NULL;
+ if (controlp != NULL)
+ return (EINVAL);
+ if (flagsp != NULL)
+ flags = *flagsp &~ MSG_EOR;
+ else
+ flags = 0;
+ if (flags & MSG_OOB)
+ return (soreceive_rcvoob(so, uio, flags));
+ if (mp0 != NULL)
+ *mp0 = NULL;
+
+ sb = &so->so_rcv;
+
+ /* Prevent other readers from entering the socket. */
+ error = sblock(sb, SBLOCKWAIT(flags));
+ if (error)
+ goto out;
+ SOCKBUF_LOCK(sb);
+
+ /* Easy one, no space to copyout anything. */
+ if (uio->uio_resid == 0) {
+ error = EINVAL;
+ goto out;
+ }
+ oresid = uio->uio_resid;
+
+ /* We will never ever get anything unless we are connected. */
+ if (!(so->so_state & (SS_ISCONNECTED|SS_ISDISCONNECTED))) {
+ /* When disconnecting there may be still some data left. */
+ if (sb->sb_cc > 0)
+ goto deliver;
+ if (!(so->so_state & SS_ISDISCONNECTED))
+ error = ENOTCONN;
+ goto out;
+ }
+
+ /* Socket buffer is empty and we shall not block. */
+ if (sb->sb_cc == 0 &&
+ ((sb->sb_flags & SS_NBIO) || (flags & (MSG_DONTWAIT|MSG_NBIO)))) {
+ error = EAGAIN;
+ goto out;
+ }
+
+restart:
+ SOCKBUF_LOCK_ASSERT(&so->so_rcv);
+
+ /* Abort if socket has reported problems. */
+ if (so->so_error) {
+ if (sb->sb_cc > 0)
+ goto deliver;
+ if (oresid > uio->uio_resid)
+ goto out;
+ error = so->so_error;
+ if (!(flags & MSG_PEEK))
+ so->so_error = 0;
+ goto out;
+ }
+
+ /* Door is closed. Deliver what is left, if any. */
+ if (sb->sb_state & SBS_CANTRCVMORE) {
+ if (sb->sb_cc > 0)
+ goto deliver;
+ else
+ goto out;
+ }
+
+ /* Socket buffer got some data that we shall deliver now. */
+ if (sb->sb_cc > 0 && !(flags & MSG_WAITALL) &&
+ ((sb->sb_flags & SS_NBIO) ||
+ (flags & (MSG_DONTWAIT|MSG_NBIO)) ||
+ sb->sb_cc >= sb->sb_lowat ||
+ sb->sb_cc >= uio->uio_resid ||
+ sb->sb_cc >= sb->sb_hiwat) ) {
+ goto deliver;
+ }
+
+ /* On MSG_WAITALL we must wait until all data or error arrives. */
+ if ((flags & MSG_WAITALL) &&
+ (sb->sb_cc >= uio->uio_resid || sb->sb_cc >= sb->sb_lowat))
+ goto deliver;
+
+ /*
+ * Wait and block until (more) data comes in.
+ * NB: Drops the sockbuf lock during wait.
+ */
+ error = sbwait(sb);
+ if (error)
+ goto out;
+ goto restart;
+
+deliver:
+ SOCKBUF_LOCK_ASSERT(&so->so_rcv);
+ KASSERT(sb->sb_cc > 0, ("%s: sockbuf empty", __func__));
+ KASSERT(sb->sb_mb != NULL, ("%s: sb_mb == NULL", __func__));
+
+ /* Statistics. */
+ if (uio->uio_td)
+ uio->uio_td->td_ru.ru_msgrcv++;
+
+ /* Fill uio until full or current end of socket buffer is reached. */
+ len = min(uio->uio_resid, sb->sb_cc);
+ if (mp0 != NULL) {
+ /* Dequeue as many mbufs as possible. */
+ if (!(flags & MSG_PEEK) && len >= sb->sb_mb->m_len) {
+ for (*mp0 = m = sb->sb_mb;
+ m != NULL && m->m_len <= len;
+ m = m->m_next) {
+ len -= m->m_len;
+ uio->uio_resid -= m->m_len;
+ sbfree(sb, m);
+ n = m;
+ }
+ sb->sb_mb = m;
+ if (sb->sb_mb == NULL)
+ SB_EMPTY_FIXUP(sb);
+ n->m_next = NULL;
+ }
+ /* Copy the remainder. */
+ if (len > 0) {
+ KASSERT(sb->sb_mb != NULL,
+ ("%s: len > 0 && sb->sb_mb empty", __func__));
+
+ m = m_copym(sb->sb_mb, 0, len, M_DONTWAIT);
+ if (m == NULL)
+ len = 0; /* Don't flush data from sockbuf. */
+ else
+ uio->uio_resid -= m->m_len;
+ if (*mp0 != NULL)
+ n->m_next = m;
+ else
+ *mp0 = m;
+ if (*mp0 == NULL) {
+ error = ENOBUFS;
+ goto out;
+ }
+ }
+ } else {
+ /* NB: Must unlock socket buffer as uiomove may sleep. */
+ SOCKBUF_UNLOCK(sb);
+ error = m_mbuftouio(uio, sb->sb_mb, len);
+ SOCKBUF_LOCK(sb);
+ if (error)
+ goto out;
+ }
+ SBLASTRECORDCHK(sb);
+ SBLASTMBUFCHK(sb);
+
+ /*
+ * Remove the delivered data from the socket buffer unless we
+ * were only peeking.
+ */
+ if (!(flags & MSG_PEEK)) {
+ if (len > 0)
+ sbdrop_locked(sb, len);
+
+ /* Notify protocol that we drained some data. */
+ if ((so->so_proto->pr_flags & PR_WANTRCVD) &&
+ (((flags & MSG_WAITALL) && uio->uio_resid > 0) ||
+ !(flags & MSG_SOCALLBCK))) {
+ SOCKBUF_UNLOCK(sb);
+ (*so->so_proto->pr_usrreqs->pru_rcvd)(so, flags);
+ SOCKBUF_LOCK(sb);
+ }
+ }
+
+ /*
+ * For MSG_WAITALL we may have to loop again and wait for
+ * more data to come in.
+ */
+ if ((flags & MSG_WAITALL) && uio->uio_resid > 0)
+ goto restart;
+out:
+ SOCKBUF_LOCK_ASSERT(sb);
+ SBLASTRECORDCHK(sb);
+ SBLASTMBUFCHK(sb);
+ SOCKBUF_UNLOCK(sb);
+ sbunlock(sb);
+ return (error);
+}
+
+/*
+ * Optimized version of soreceive() for simple datagram cases from userspace.
+ * Unlike in the stream case, we're able to drop a datagram if copyout()
+ * fails, and because we handle datagrams atomically, we don't need to use a
+ * sleep lock to prevent I/O interlacing.
+ */
+int
+soreceive_dgram(struct socket *so, struct sockaddr **psa, struct uio *uio,
+ struct mbuf **mp0, struct mbuf **controlp, int *flagsp)
+{
+ struct mbuf *m, *m2;
+ int flags, len, error;
+ struct protosw *pr = so->so_proto;
+ struct mbuf *nextrecord;
+
+ if (psa != NULL)
+ *psa = NULL;
+ if (controlp != NULL)
+ *controlp = NULL;
+ if (flagsp != NULL)
+ flags = *flagsp &~ MSG_EOR;
+ else
+ flags = 0;
+
+ /*
+ * For any complicated cases, fall back to the full
+ * soreceive_generic().
+ */
+ if (mp0 != NULL || (flags & MSG_PEEK) || (flags & MSG_OOB))
+ return (soreceive_generic(so, psa, uio, mp0, controlp,
+ flagsp));
+
+ /*
+ * Enforce restrictions on use.
+ */
+ KASSERT((pr->pr_flags & PR_WANTRCVD) == 0,
+ ("soreceive_dgram: wantrcvd"));
+ KASSERT(pr->pr_flags & PR_ATOMIC, ("soreceive_dgram: !atomic"));
+ KASSERT((so->so_rcv.sb_state & SBS_RCVATMARK) == 0,
+ ("soreceive_dgram: SBS_RCVATMARK"));
+ KASSERT((so->so_proto->pr_flags & PR_CONNREQUIRED) == 0,
+ ("soreceive_dgram: P_CONNREQUIRED"));
+
+ /*
+ * Loop blocking while waiting for a datagram.
+ */
+ SOCKBUF_LOCK(&so->so_rcv);
+ while ((m = so->so_rcv.sb_mb) == NULL) {
+ KASSERT(so->so_rcv.sb_cc == 0,
+ ("soreceive_dgram: sb_mb NULL but sb_cc %u",
+ so->so_rcv.sb_cc));
+ if (so->so_error) {
+ error = so->so_error;
+ so->so_error = 0;
+ SOCKBUF_UNLOCK(&so->so_rcv);
+ return (error);
+ }
+ if (so->so_rcv.sb_state & SBS_CANTRCVMORE ||
+ uio->uio_resid == 0) {
+ SOCKBUF_UNLOCK(&so->so_rcv);
+ return (0);
+ }
+ if ((so->so_state & SS_NBIO) ||
+ (flags & (MSG_DONTWAIT|MSG_NBIO))) {
+ SOCKBUF_UNLOCK(&so->so_rcv);
+ return (EWOULDBLOCK);
+ }
+ SBLASTRECORDCHK(&so->so_rcv);
+ SBLASTMBUFCHK(&so->so_rcv);
+ error = sbwait(&so->so_rcv);
+ if (error) {
+ SOCKBUF_UNLOCK(&so->so_rcv);
+ return (error);
+ }
+ }
+ SOCKBUF_LOCK_ASSERT(&so->so_rcv);
+
+ if (uio->uio_td)
+ uio->uio_td->td_ru.ru_msgrcv++;
+ SBLASTRECORDCHK(&so->so_rcv);
+ SBLASTMBUFCHK(&so->so_rcv);
+ nextrecord = m->m_nextpkt;
+ if (nextrecord == NULL) {
+ KASSERT(so->so_rcv.sb_lastrecord == m,
+ ("soreceive_dgram: lastrecord != m"));
+ }
+
+ KASSERT(so->so_rcv.sb_mb->m_nextpkt == nextrecord,
+ ("soreceive_dgram: m_nextpkt != nextrecord"));
+
+ /*
+ * Pull 'm' and its chain off the front of the packet queue.
+ */
+ so->so_rcv.sb_mb = NULL;
+ sockbuf_pushsync(&so->so_rcv, nextrecord);
+
+ /*
+ * Walk 'm's chain and free that many bytes from the socket buffer.
+ */
+ for (m2 = m; m2 != NULL; m2 = m2->m_next)
+ sbfree(&so->so_rcv, m2);
+
+ /*
+ * Do a few last checks before we let go of the lock.
+ */
+ SBLASTRECORDCHK(&so->so_rcv);
+ SBLASTMBUFCHK(&so->so_rcv);
+ SOCKBUF_UNLOCK(&so->so_rcv);
+
+ if (pr->pr_flags & PR_ADDR) {
+ KASSERT(m->m_type == MT_SONAME,
+ ("m->m_type == %d", m->m_type));
+ if (psa != NULL)
+ *psa = sodupsockaddr(mtod(m, struct sockaddr *),
+ M_NOWAIT);
+ m = m_free(m);
+ }
+ if (m == NULL) {
+ /* XXXRW: Can this happen? */
+ return (0);
+ }
+
+ /*
+ * Packet to copyout() is now in 'm' and it is disconnected from the
+ * queue.
+ *
+ * Process one or more MT_CONTROL mbufs present before any data mbufs
+ * in the first mbuf chain on the socket buffer. We call into the
+ * protocol to perform externalization (or freeing if controlp ==
+ * NULL).
+ */
+ if (m->m_type == MT_CONTROL) {
+ struct mbuf *cm = NULL, *cmn;
+ struct mbuf **cme = &cm;
+
+ do {
+ m2 = m->m_next;
+ m->m_next = NULL;
+ *cme = m;
+ cme = &(*cme)->m_next;
+ m = m2;
+ } while (m != NULL && m->m_type == MT_CONTROL);
+ while (cm != NULL) {
+ cmn = cm->m_next;
+ cm->m_next = NULL;
+ if (pr->pr_domain->dom_externalize != NULL) {
+ error = (*pr->pr_domain->dom_externalize)
+ (cm, controlp);
+ } else if (controlp != NULL)
+ *controlp = cm;
+ else
+ m_freem(cm);
+ if (controlp != NULL) {
+ while (*controlp != NULL)
+ controlp = &(*controlp)->m_next;
+ }
+ cm = cmn;
+ }
+ }
+ KASSERT(m->m_type == MT_DATA, ("soreceive_dgram: !data"));
+
+ while (m != NULL && uio->uio_resid > 0) {
+ len = uio->uio_resid;
+ if (len > m->m_len)
+ len = m->m_len;
+ error = uiomove(mtod(m, char *), (int)len, uio);
+ if (error) {
+ m_freem(m);
+ return (error);
+ }
+ if (len == m->m_len)
+ m = m_free(m);
+ else {
+ m->m_data += len;
+ m->m_len -= len;
+ }
+ }
+ if (m != NULL)
+ flags |= MSG_TRUNC;
+ m_freem(m);
+ if (flagsp != NULL)
+ *flagsp |= flags;
+ return (0);
+}
+
+int
+soreceive(struct socket *so, struct sockaddr **psa, struct uio *uio,
+ struct mbuf **mp0, struct mbuf **controlp, int *flagsp)
+{
+
+ return (so->so_proto->pr_usrreqs->pru_soreceive(so, psa, uio, mp0,
+ controlp, flagsp));
+}
+
+int
+soshutdown(struct socket *so, int how)
+{
+ struct protosw *pr = so->so_proto;
+ int error;
+
+ if (!(how == SHUT_RD || how == SHUT_WR || how == SHUT_RDWR))
+ return (EINVAL);
+ if (pr->pr_usrreqs->pru_flush != NULL) {
+ (*pr->pr_usrreqs->pru_flush)(so, how);
+ }
+ if (how != SHUT_WR)
+ sorflush(so);
+ if (how != SHUT_RD) {
+ CURVNET_SET(so->so_vnet);
+ error = (*pr->pr_usrreqs->pru_shutdown)(so);
+ CURVNET_RESTORE();
+ return (error);
+ }
+ return (0);
+}
+
+void
+sorflush(struct socket *so)
+{
+ struct sockbuf *sb = &so->so_rcv;
+ struct protosw *pr = so->so_proto;
+ struct sockbuf asb;
+
+ /*
+ * In order to avoid calling dom_dispose with the socket buffer mutex
+ * held, and in order to generally avoid holding the lock for a long
+ * time, we make a copy of the socket buffer and clear the original
+ * (except locks, state). The new socket buffer copy won't have
+ * initialized locks so we can only call routines that won't use or
+ * assert those locks.
+ *
+ * Dislodge threads currently blocked in receive and wait to acquire
+ * a lock against other simultaneous readers before clearing the
+ * socket buffer. Don't let our acquire be interrupted by a signal
+ * despite any existing socket disposition on interruptable waiting.
+ */
+ CURVNET_SET(so->so_vnet);
+ socantrcvmore(so);
+ (void) sblock(sb, SBL_WAIT | SBL_NOINTR);
+
+ /*
+ * Invalidate/clear most of the sockbuf structure, but leave selinfo
+ * and mutex data unchanged.
+ */
+ SOCKBUF_LOCK(sb);
+ bzero(&asb, offsetof(struct sockbuf, sb_startzero));
+ bcopy(&sb->sb_startzero, &asb.sb_startzero,
+ sizeof(*sb) - offsetof(struct sockbuf, sb_startzero));
+ bzero(&sb->sb_startzero,
+ sizeof(*sb) - offsetof(struct sockbuf, sb_startzero));
+ SOCKBUF_UNLOCK(sb);
+ sbunlock(sb);
+
+ /*
+ * Dispose of special rights and flush the socket buffer. Don't call
+ * any unsafe routines (that rely on locks being initialized) on asb.
+ */
+ if (pr->pr_flags & PR_RIGHTS && pr->pr_domain->dom_dispose != NULL)
+ (*pr->pr_domain->dom_dispose)(asb.sb_mb);
+ sbrelease_internal(&asb, so);
+ CURVNET_RESTORE();
+}
+
+/*
+ * Perhaps this routine, and sooptcopyout(), below, ought to come in an
+ * additional variant to handle the case where the option value needs to be
+ * some kind of integer, but not a specific size. In addition to their use
+ * here, these functions are also called by the protocol-level pr_ctloutput()
+ * routines.
+ */
+int
+sooptcopyin(struct sockopt *sopt, void *buf, size_t len, size_t minlen)
+{
+ size_t valsize;
+
+ /*
+ * If the user gives us more than we wanted, we ignore it, but if we
+ * don't get the minimum length the caller wants, we return EINVAL.
+ * On success, sopt->sopt_valsize is set to however much we actually
+ * retrieved.
+ */
+ if ((valsize = sopt->sopt_valsize) < minlen)
+ return EINVAL;
+ if (valsize > len)
+ sopt->sopt_valsize = valsize = len;
+
+ if (sopt->sopt_td != NULL)
+ return (copyin(sopt->sopt_val, buf, valsize));
+
+ bcopy(sopt->sopt_val, buf, valsize);
+ return (0);
+}
+
+/*
+ * Kernel version of setsockopt(2).
+ *
+ * XXX: optlen is size_t, not socklen_t
+ */
+int
+so_setsockopt(struct socket *so, int level, int optname, void *optval,
+ size_t optlen)
+{
+ struct sockopt sopt;
+
+ sopt.sopt_level = level;
+ sopt.sopt_name = optname;
+ sopt.sopt_dir = SOPT_SET;
+ sopt.sopt_val = optval;
+ sopt.sopt_valsize = optlen;
+ sopt.sopt_td = NULL;
+ return (sosetopt(so, &sopt));
+}
+
+int
+sosetopt(struct socket *so, struct sockopt *sopt)
+{
+ int error, optval;
+ struct linger l;
+ struct timeval tv;
+ u_long val;
+#ifdef MAC
+ struct mac extmac;
+#endif
+
+ error = 0;
+ if (sopt->sopt_level != SOL_SOCKET) {
+ if (so->so_proto && so->so_proto->pr_ctloutput)
+ return ((*so->so_proto->pr_ctloutput)
+ (so, sopt));
+ error = ENOPROTOOPT;
+ } else {
+ switch (sopt->sopt_name) {
+#ifdef INET
+ case SO_ACCEPTFILTER:
+ error = do_setopt_accept_filter(so, sopt);
+ if (error)
+ goto bad;
+ break;
+#endif
+ case SO_LINGER:
+ error = sooptcopyin(sopt, &l, sizeof l, sizeof l);
+ if (error)
+ goto bad;
+
+ SOCK_LOCK(so);
+ so->so_linger = l.l_linger;
+ if (l.l_onoff)
+ so->so_options |= SO_LINGER;
+ else
+ so->so_options &= ~SO_LINGER;
+ SOCK_UNLOCK(so);
+ break;
+
+ case SO_DEBUG:
+ case SO_KEEPALIVE:
+ case SO_DONTROUTE:
+ case SO_USELOOPBACK:
+ case SO_BROADCAST:
+ case SO_REUSEADDR:
+ case SO_REUSEPORT:
+ case SO_OOBINLINE:
+ case SO_TIMESTAMP:
+ case SO_BINTIME:
+ case SO_NOSIGPIPE:
+ case SO_NO_DDP:
+ case SO_NO_OFFLOAD:
+ error = sooptcopyin(sopt, &optval, sizeof optval,
+ sizeof optval);
+ if (error)
+ goto bad;
+ SOCK_LOCK(so);
+ if (optval)
+ so->so_options |= sopt->sopt_name;
+ else
+ so->so_options &= ~sopt->sopt_name;
+ SOCK_UNLOCK(so);
+ break;
+
+ case SO_SETFIB:
+ error = sooptcopyin(sopt, &optval, sizeof optval,
+ sizeof optval);
+ if (optval < 1 || optval > rt_numfibs) {
+ error = EINVAL;
+ goto bad;
+ }
+ if ((so->so_proto->pr_domain->dom_family == PF_INET) ||
+ (so->so_proto->pr_domain->dom_family == PF_ROUTE)) {
+ so->so_fibnum = optval;
+ /* Note: ignore error */
+ if (so->so_proto && so->so_proto->pr_ctloutput)
+ (*so->so_proto->pr_ctloutput)(so, sopt);
+ } else {
+ so->so_fibnum = 0;
+ }
+ break;
+ case SO_SNDBUF:
+ case SO_RCVBUF:
+ case SO_SNDLOWAT:
+ case SO_RCVLOWAT:
+ error = sooptcopyin(sopt, &optval, sizeof optval,
+ sizeof optval);
+ if (error)
+ goto bad;
+
+ /*
+ * Values < 1 make no sense for any of these options,
+ * so disallow them.
+ */
+ if (optval < 1) {
+ error = EINVAL;
+ goto bad;
+ }
+
+ switch (sopt->sopt_name) {
+ case SO_SNDBUF:
+ case SO_RCVBUF:
+ if (sbreserve(sopt->sopt_name == SO_SNDBUF ?
+ &so->so_snd : &so->so_rcv, (u_long)optval,
+ so, curthread) == 0) {
+ error = ENOBUFS;
+ goto bad;
+ }
+ (sopt->sopt_name == SO_SNDBUF ? &so->so_snd :
+ &so->so_rcv)->sb_flags &= ~SB_AUTOSIZE;
+ break;
+
+ /*
+ * Make sure the low-water is never greater than the
+ * high-water.
+ */
+ case SO_SNDLOWAT:
+ SOCKBUF_LOCK(&so->so_snd);
+ so->so_snd.sb_lowat =
+ (optval > so->so_snd.sb_hiwat) ?
+ so->so_snd.sb_hiwat : optval;
+ SOCKBUF_UNLOCK(&so->so_snd);
+ break;
+ case SO_RCVLOWAT:
+ SOCKBUF_LOCK(&so->so_rcv);
+ so->so_rcv.sb_lowat =
+ (optval > so->so_rcv.sb_hiwat) ?
+ so->so_rcv.sb_hiwat : optval;
+ SOCKBUF_UNLOCK(&so->so_rcv);
+ break;
+ }
+ break;
+
+ case SO_SNDTIMEO:
+ case SO_RCVTIMEO:
+#ifdef COMPAT_FREEBSD32
+ if (SV_CURPROC_FLAG(SV_ILP32)) {
+ struct timeval32 tv32;
+
+ error = sooptcopyin(sopt, &tv32, sizeof tv32,
+ sizeof tv32);
+ CP(tv32, tv, tv_sec);
+ CP(tv32, tv, tv_usec);
+ } else
+#endif
+ error = sooptcopyin(sopt, &tv, sizeof tv,
+ sizeof tv);
+ if (error)
+ goto bad;
+
+ /* assert(hz > 0); */
+ if (tv.tv_sec < 0 || tv.tv_sec > INT_MAX / hz ||
+ tv.tv_usec < 0 || tv.tv_usec >= 1000000) {
+ error = EDOM;
+ goto bad;
+ }
+ /* assert(tick > 0); */
+ /* assert(ULONG_MAX - INT_MAX >= 1000000); */
+ val = (u_long)(tv.tv_sec * hz) + tv.tv_usec / tick;
+ if (val > INT_MAX) {
+ error = EDOM;
+ goto bad;
+ }
+ if (val == 0 && tv.tv_usec != 0)
+ val = 1;
+
+ switch (sopt->sopt_name) {
+ case SO_SNDTIMEO:
+ so->so_snd.sb_timeo = val;
+ break;
+ case SO_RCVTIMEO:
+ so->so_rcv.sb_timeo = val;
+ break;
+ }
+ break;
+
+ case SO_LABEL:
+#ifdef MAC
+ error = sooptcopyin(sopt, &extmac, sizeof extmac,
+ sizeof extmac);
+ if (error)
+ goto bad;
+ error = mac_setsockopt_label(sopt->sopt_td->td_ucred,
+ so, &extmac);
+#else
+ error = EOPNOTSUPP;
+#endif
+ break;
+
+ default:
+ error = ENOPROTOOPT;
+ break;
+ }
+ if (error == 0 && so->so_proto != NULL &&
+ so->so_proto->pr_ctloutput != NULL) {
+ (void) ((*so->so_proto->pr_ctloutput)
+ (so, sopt));
+ }
+ }
+bad:
+ return (error);
+}
+
+/*
+ * Helper routine for getsockopt.
+ */
+int
+sooptcopyout(struct sockopt *sopt, const void *buf, size_t len)
+{
+ int error;
+ size_t valsize;
+
+ error = 0;
+
+ /*
+ * Documented get behavior is that we always return a value, possibly
+ * truncated to fit in the user's buffer. Traditional behavior is
+ * that we always tell the user precisely how much we copied, rather
+ * than something useful like the total amount we had available for
+ * her. Note that this interface is not idempotent; the entire
+ * answer must generated ahead of time.
+ */
+ valsize = min(len, sopt->sopt_valsize);
+ sopt->sopt_valsize = valsize;
+ if (sopt->sopt_val != NULL) {
+ if (sopt->sopt_td != NULL)
+ error = copyout(buf, sopt->sopt_val, valsize);
+ else
+ bcopy(buf, sopt->sopt_val, valsize);
+ }
+ return (error);
+}
+
+int
+sogetopt(struct socket *so, struct sockopt *sopt)
+{
+ int error, optval;
+ struct linger l;
+ struct timeval tv;
+#ifdef MAC
+ struct mac extmac;
+#endif
+
+ error = 0;
+ if (sopt->sopt_level != SOL_SOCKET) {
+ if (so->so_proto && so->so_proto->pr_ctloutput) {
+ return ((*so->so_proto->pr_ctloutput)
+ (so, sopt));
+ } else
+ return (ENOPROTOOPT);
+ } else {
+ switch (sopt->sopt_name) {
+#ifdef INET
+ case SO_ACCEPTFILTER:
+ error = do_getopt_accept_filter(so, sopt);
+ break;
+#endif
+ case SO_LINGER:
+ SOCK_LOCK(so);
+ l.l_onoff = so->so_options & SO_LINGER;
+ l.l_linger = so->so_linger;
+ SOCK_UNLOCK(so);
+ error = sooptcopyout(sopt, &l, sizeof l);
+ break;
+
+ case SO_USELOOPBACK:
+ case SO_DONTROUTE:
+ case SO_DEBUG:
+ case SO_KEEPALIVE:
+ case SO_REUSEADDR:
+ case SO_REUSEPORT:
+ case SO_BROADCAST:
+ case SO_OOBINLINE:
+ case SO_ACCEPTCONN:
+ case SO_TIMESTAMP:
+ case SO_BINTIME:
+ case SO_NOSIGPIPE:
+ optval = so->so_options & sopt->sopt_name;
+integer:
+ error = sooptcopyout(sopt, &optval, sizeof optval);
+ break;
+
+ case SO_TYPE:
+ optval = so->so_type;
+ goto integer;
+
+ case SO_ERROR:
+ SOCK_LOCK(so);
+ optval = so->so_error;
+ so->so_error = 0;
+ SOCK_UNLOCK(so);
+ goto integer;
+
+ case SO_SNDBUF:
+ optval = so->so_snd.sb_hiwat;
+ goto integer;
+
+ case SO_RCVBUF:
+ optval = so->so_rcv.sb_hiwat;
+ goto integer;
+
+ case SO_SNDLOWAT:
+ optval = so->so_snd.sb_lowat;
+ goto integer;
+
+ case SO_RCVLOWAT:
+ optval = so->so_rcv.sb_lowat;
+ goto integer;
+
+ case SO_SNDTIMEO:
+ case SO_RCVTIMEO:
+ optval = (sopt->sopt_name == SO_SNDTIMEO ?
+ so->so_snd.sb_timeo : so->so_rcv.sb_timeo);
+
+ tv.tv_sec = optval / hz;
+ tv.tv_usec = (optval % hz) * tick;
+#ifdef COMPAT_FREEBSD32
+ if (SV_CURPROC_FLAG(SV_ILP32)) {
+ struct timeval32 tv32;
+
+ CP(tv, tv32, tv_sec);
+ CP(tv, tv32, tv_usec);
+ error = sooptcopyout(sopt, &tv32, sizeof tv32);
+ } else
+#endif
+ error = sooptcopyout(sopt, &tv, sizeof tv);
+ break;
+
+ case SO_LABEL:
+#ifdef MAC
+ error = sooptcopyin(sopt, &extmac, sizeof(extmac),
+ sizeof(extmac));
+ if (error)
+ return (error);
+ error = mac_getsockopt_label(sopt->sopt_td->td_ucred,
+ so, &extmac);
+ if (error)
+ return (error);
+ error = sooptcopyout(sopt, &extmac, sizeof extmac);
+#else
+ error = EOPNOTSUPP;
+#endif
+ break;
+
+ case SO_PEERLABEL:
+#ifdef MAC
+ error = sooptcopyin(sopt, &extmac, sizeof(extmac),
+ sizeof(extmac));
+ if (error)
+ return (error);
+ error = mac_getsockopt_peerlabel(
+ sopt->sopt_td->td_ucred, so, &extmac);
+ if (error)
+ return (error);
+ error = sooptcopyout(sopt, &extmac, sizeof extmac);
+#else
+ error = EOPNOTSUPP;
+#endif
+ break;
+
+ case SO_LISTENQLIMIT:
+ optval = so->so_qlimit;
+ goto integer;
+
+ case SO_LISTENQLEN:
+ optval = so->so_qlen;
+ goto integer;
+
+ case SO_LISTENINCQLEN:
+ optval = so->so_incqlen;
+ goto integer;
+
+ default:
+ error = ENOPROTOOPT;
+ break;
+ }
+ return (error);
+ }
+}
+
+/* XXX; prepare mbuf for (__FreeBSD__ < 3) routines. */
+int
+soopt_getm(struct sockopt *sopt, struct mbuf **mp)
+{
+ struct mbuf *m, *m_prev;
+ int sopt_size = sopt->sopt_valsize;
+
+ MGET(m, sopt->sopt_td ? M_WAIT : M_DONTWAIT, MT_DATA);
+ if (m == NULL)
+ return ENOBUFS;
+ if (sopt_size > MLEN) {
+ MCLGET(m, sopt->sopt_td ? M_WAIT : M_DONTWAIT);
+ if ((m->m_flags & M_EXT) == 0) {
+ m_free(m);
+ return ENOBUFS;
+ }
+ m->m_len = min(MCLBYTES, sopt_size);
+ } else {
+ m->m_len = min(MLEN, sopt_size);
+ }
+ sopt_size -= m->m_len;
+ *mp = m;
+ m_prev = m;
+
+ while (sopt_size) {
+ MGET(m, sopt->sopt_td ? M_WAIT : M_DONTWAIT, MT_DATA);
+ if (m == NULL) {
+ m_freem(*mp);
+ return ENOBUFS;
+ }
+ if (sopt_size > MLEN) {
+ MCLGET(m, sopt->sopt_td != NULL ? M_WAIT :
+ M_DONTWAIT);
+ if ((m->m_flags & M_EXT) == 0) {
+ m_freem(m);
+ m_freem(*mp);
+ return ENOBUFS;
+ }
+ m->m_len = min(MCLBYTES, sopt_size);
+ } else {
+ m->m_len = min(MLEN, sopt_size);
+ }
+ sopt_size -= m->m_len;
+ m_prev->m_next = m;
+ m_prev = m;
+ }
+ return (0);
+}
+
+/* XXX; copyin sopt data into mbuf chain for (__FreeBSD__ < 3) routines. */
+int
+soopt_mcopyin(struct sockopt *sopt, struct mbuf *m)
+{
+ struct mbuf *m0 = m;
+
+ if (sopt->sopt_val == NULL)
+ return (0);
+ while (m != NULL && sopt->sopt_valsize >= m->m_len) {
+ if (sopt->sopt_td != NULL) {
+ int error;
+
+ error = copyin(sopt->sopt_val, mtod(m, char *),
+ m->m_len);
+ if (error != 0) {
+ m_freem(m0);
+ return(error);
+ }
+ } else
+ bcopy(sopt->sopt_val, mtod(m, char *), m->m_len);
+ sopt->sopt_valsize -= m->m_len;
+ sopt->sopt_val = (char *)sopt->sopt_val + m->m_len;
+ m = m->m_next;
+ }
+ if (m != NULL) /* should be allocated enoughly at ip6_sooptmcopyin() */
+ panic("ip6_sooptmcopyin");
+ return (0);
+}
+
+/* XXX; copyout mbuf chain data into soopt for (__FreeBSD__ < 3) routines. */
+int
+soopt_mcopyout(struct sockopt *sopt, struct mbuf *m)
+{
+ struct mbuf *m0 = m;
+ size_t valsize = 0;
+
+ if (sopt->sopt_val == NULL)
+ return (0);
+ while (m != NULL && sopt->sopt_valsize >= m->m_len) {
+ if (sopt->sopt_td != NULL) {
+ int error;
+
+ error = copyout(mtod(m, char *), sopt->sopt_val,
+ m->m_len);
+ if (error != 0) {
+ m_freem(m0);
+ return(error);
+ }
+ } else
+ bcopy(mtod(m, char *), sopt->sopt_val, m->m_len);
+ sopt->sopt_valsize -= m->m_len;
+ sopt->sopt_val = (char *)sopt->sopt_val + m->m_len;
+ valsize += m->m_len;
+ m = m->m_next;
+ }
+ if (m != NULL) {
+ /* enough soopt buffer should be given from user-land */
+ m_freem(m0);
+ return(EINVAL);
+ }
+ sopt->sopt_valsize = valsize;
+ return (0);
+}
+
+/*
+ * sohasoutofband(): protocol notifies socket layer of the arrival of new
+ * out-of-band data, which will then notify socket consumers.
+ */
+void
+sohasoutofband(struct socket *so)
+{
+
+ if (so->so_sigio != NULL)
+ pgsigio(&so->so_sigio, SIGURG, 0);
+ selwakeuppri(&so->so_rcv.sb_sel, PSOCK);
+}
+
+int
+sopoll(struct socket *so, int events, struct ucred *active_cred,
+ struct thread *td)
+{
+
+ return (so->so_proto->pr_usrreqs->pru_sopoll(so, events, active_cred,
+ td));
+}
+
+int
+sopoll_generic(struct socket *so, int events, struct ucred *active_cred,
+ struct thread *td)
+{
+ int revents = 0;
+
+ SOCKBUF_LOCK(&so->so_snd);
+ SOCKBUF_LOCK(&so->so_rcv);
+ if (events & (POLLIN | POLLRDNORM))
+ if (soreadabledata(so))
+ revents |= events & (POLLIN | POLLRDNORM);
+
+ if (events & (POLLOUT | POLLWRNORM))
+ if (sowriteable(so))
+ revents |= events & (POLLOUT | POLLWRNORM);
+
+ if (events & (POLLPRI | POLLRDBAND))
+ if (so->so_oobmark || (so->so_rcv.sb_state & SBS_RCVATMARK))
+ revents |= events & (POLLPRI | POLLRDBAND);
+
+ if ((events & POLLINIGNEOF) == 0) {
+ if (so->so_rcv.sb_state & SBS_CANTRCVMORE) {
+ revents |= events & (POLLIN | POLLRDNORM);
+ if (so->so_snd.sb_state & SBS_CANTSENDMORE)
+ revents |= POLLHUP;
+ }
+ }
+
+ if (revents == 0) {
+ if (events & (POLLIN | POLLPRI | POLLRDNORM | POLLRDBAND)) {
+ selrecord(td, &so->so_rcv.sb_sel);
+ so->so_rcv.sb_flags |= SB_SEL;
+ }
+
+ if (events & (POLLOUT | POLLWRNORM)) {
+ selrecord(td, &so->so_snd.sb_sel);
+ so->so_snd.sb_flags |= SB_SEL;
+ }
+ }
+
+ SOCKBUF_UNLOCK(&so->so_rcv);
+ SOCKBUF_UNLOCK(&so->so_snd);
+ return (revents);
+}
+
+int
+soo_kqfilter(struct file *fp, struct knote *kn)
+{
+ struct socket *so = kn->kn_fp->f_data;
+ struct sockbuf *sb;
+
+ switch (kn->kn_filter) {
+ case EVFILT_READ:
+ if (so->so_options & SO_ACCEPTCONN)
+ kn->kn_fop = &solisten_filtops;
+ else
+ kn->kn_fop = &soread_filtops;
+ sb = &so->so_rcv;
+ break;
+ case EVFILT_WRITE:
+ kn->kn_fop = &sowrite_filtops;
+ sb = &so->so_snd;
+ break;
+ default:
+ return (EINVAL);
+ }
+
+ SOCKBUF_LOCK(sb);
+ knlist_add(&sb->sb_sel.si_note, kn, 1);
+ sb->sb_flags |= SB_KNOTE;
+ SOCKBUF_UNLOCK(sb);
+ return (0);
+}
+
+/*
+ * Some routines that return EOPNOTSUPP for entry points that are not
+ * supported by a protocol. Fill in as needed.
+ */
+int
+pru_accept_notsupp(struct socket *so, struct sockaddr **nam)
+{
+
+ return EOPNOTSUPP;
+}
+
+int
+pru_attach_notsupp(struct socket *so, int proto, struct thread *td)
+{
+
+ return EOPNOTSUPP;
+}
+
+int
+pru_bind_notsupp(struct socket *so, struct sockaddr *nam, struct thread *td)
+{
+
+ return EOPNOTSUPP;
+}
+
+int
+pru_connect_notsupp(struct socket *so, struct sockaddr *nam, struct thread *td)
+{
+
+ return EOPNOTSUPP;
+}
+
+int
+pru_connect2_notsupp(struct socket *so1, struct socket *so2)
+{
+
+ return EOPNOTSUPP;
+}
+
+int
+pru_control_notsupp(struct socket *so, u_long cmd, caddr_t data,
+ struct ifnet *ifp, struct thread *td)
+{
+
+ return EOPNOTSUPP;
+}
+
+int
+pru_disconnect_notsupp(struct socket *so)
+{
+
+ return EOPNOTSUPP;
+}
+
+int
+pru_listen_notsupp(struct socket *so, int backlog, struct thread *td)
+{
+
+ return EOPNOTSUPP;
+}
+
+int
+pru_peeraddr_notsupp(struct socket *so, struct sockaddr **nam)
+{
+
+ return EOPNOTSUPP;
+}
+
+int
+pru_rcvd_notsupp(struct socket *so, int flags)
+{
+
+ return EOPNOTSUPP;
+}
+
+int
+pru_rcvoob_notsupp(struct socket *so, struct mbuf *m, int flags)
+{
+
+ return EOPNOTSUPP;
+}
+
+int
+pru_send_notsupp(struct socket *so, int flags, struct mbuf *m,
+ struct sockaddr *addr, struct mbuf *control, struct thread *td)
+{
+
+ return EOPNOTSUPP;
+}
+
+/*
+ * This isn't really a ``null'' operation, but it's the default one and
+ * doesn't do anything destructive.
+ */
+int
+pru_sense_null(struct socket *so, struct stat *sb)
+{
+
+ sb->st_blksize = so->so_snd.sb_hiwat;
+ return 0;
+}
+
+int
+pru_shutdown_notsupp(struct socket *so)
+{
+
+ return EOPNOTSUPP;
+}
+
+int
+pru_sockaddr_notsupp(struct socket *so, struct sockaddr **nam)
+{
+
+ return EOPNOTSUPP;
+}
+
+int
+pru_sosend_notsupp(struct socket *so, struct sockaddr *addr, struct uio *uio,
+ struct mbuf *top, struct mbuf *control, int flags, struct thread *td)
+{
+
+ return EOPNOTSUPP;
+}
+
+int
+pru_soreceive_notsupp(struct socket *so, struct sockaddr **paddr,
+ struct uio *uio, struct mbuf **mp0, struct mbuf **controlp, int *flagsp)
+{
+
+ return EOPNOTSUPP;
+}
+
+int
+pru_sopoll_notsupp(struct socket *so, int events, struct ucred *cred,
+ struct thread *td)
+{
+
+ return EOPNOTSUPP;
+}
+
+static void
+filt_sordetach(struct knote *kn)
+{
+ struct socket *so = kn->kn_fp->f_data;
+
+ SOCKBUF_LOCK(&so->so_rcv);
+ knlist_remove(&so->so_rcv.sb_sel.si_note, kn, 1);
+ if (knlist_empty(&so->so_rcv.sb_sel.si_note))
+ so->so_rcv.sb_flags &= ~SB_KNOTE;
+ SOCKBUF_UNLOCK(&so->so_rcv);
+}
+
+/*ARGSUSED*/
+static int
+filt_soread(struct knote *kn, long hint)
+{
+ struct socket *so;
+
+ so = kn->kn_fp->f_data;
+ SOCKBUF_LOCK_ASSERT(&so->so_rcv);
+
+ kn->kn_data = so->so_rcv.sb_cc - so->so_rcv.sb_ctl;
+ if (so->so_rcv.sb_state & SBS_CANTRCVMORE) {
+ kn->kn_flags |= EV_EOF;
+ kn->kn_fflags = so->so_error;
+ return (1);
+ } else if (so->so_error) /* temporary udp error */
+ return (1);
+ else if (kn->kn_sfflags & NOTE_LOWAT)
+ return (kn->kn_data >= kn->kn_sdata);
+ else
+ return (so->so_rcv.sb_cc >= so->so_rcv.sb_lowat);
+}
+
+static void
+filt_sowdetach(struct knote *kn)
+{
+ struct socket *so = kn->kn_fp->f_data;
+
+ SOCKBUF_LOCK(&so->so_snd);
+ knlist_remove(&so->so_snd.sb_sel.si_note, kn, 1);
+ if (knlist_empty(&so->so_snd.sb_sel.si_note))
+ so->so_snd.sb_flags &= ~SB_KNOTE;
+ SOCKBUF_UNLOCK(&so->so_snd);
+}
+
+/*ARGSUSED*/
+static int
+filt_sowrite(struct knote *kn, long hint)
+{
+ struct socket *so;
+
+ so = kn->kn_fp->f_data;
+ SOCKBUF_LOCK_ASSERT(&so->so_snd);
+ kn->kn_data = sbspace(&so->so_snd);
+ if (so->so_snd.sb_state & SBS_CANTSENDMORE) {
+ kn->kn_flags |= EV_EOF;
+ kn->kn_fflags = so->so_error;
+ return (1);
+ } else if (so->so_error) /* temporary udp error */
+ return (1);
+ else if (((so->so_state & SS_ISCONNECTED) == 0) &&
+ (so->so_proto->pr_flags & PR_CONNREQUIRED))
+ return (0);
+ else if (kn->kn_sfflags & NOTE_LOWAT)
+ return (kn->kn_data >= kn->kn_sdata);
+ else
+ return (kn->kn_data >= so->so_snd.sb_lowat);
+}
+
+/*ARGSUSED*/
+static int
+filt_solisten(struct knote *kn, long hint)
+{
+ struct socket *so = kn->kn_fp->f_data;
+
+ kn->kn_data = so->so_qlen;
+ return (! TAILQ_EMPTY(&so->so_comp));
+}
+
+int
+socheckuid(struct socket *so, uid_t uid)
+{
+
+ if (so == NULL)
+ return (EPERM);
+ if (so->so_cred->cr_uid != uid)
+ return (EPERM);
+ return (0);
+}
+
+static int
+sysctl_somaxconn(SYSCTL_HANDLER_ARGS)
+{
+ int error;
+ int val;
+
+ val = somaxconn;
+ error = sysctl_handle_int(oidp, &val, 0, req);
+ if (error || !req->newptr )
+ return (error);
+
+ if (val < 1 || val > USHRT_MAX)
+ return (EINVAL);
+
+ somaxconn = val;
+ return (0);
+}
+
+/*
+ * These functions are used by protocols to notify the socket layer (and its
+ * consumers) of state changes in the sockets driven by protocol-side events.
+ */
+
+/*
+ * Procedures to manipulate state flags of socket and do appropriate wakeups.
+ *
+ * Normal sequence from the active (originating) side is that
+ * soisconnecting() is called during processing of connect() call, resulting
+ * in an eventual call to soisconnected() if/when the connection is
+ * established. When the connection is torn down soisdisconnecting() is
+ * called during processing of disconnect() call, and soisdisconnected() is
+ * called when the connection to the peer is totally severed. The semantics
+ * of these routines are such that connectionless protocols can call
+ * soisconnected() and soisdisconnected() only, bypassing the in-progress
+ * calls when setting up a ``connection'' takes no time.
+ *
+ * From the passive side, a socket is created with two queues of sockets:
+ * so_incomp for connections in progress and so_comp for connections already
+ * made and awaiting user acceptance. As a protocol is preparing incoming
+ * connections, it creates a socket structure queued on so_incomp by calling
+ * sonewconn(). When the connection is established, soisconnected() is
+ * called, and transfers the socket structure to so_comp, making it available
+ * to accept().
+ *
+ * If a socket is closed with sockets on either so_incomp or so_comp, these
+ * sockets are dropped.
+ *
+ * If higher-level protocols are implemented in the kernel, the wakeups done
+ * here will sometimes cause software-interrupt process scheduling.
+ */
+void
+soisconnecting(struct socket *so)
+{
+
+ SOCK_LOCK(so);
+ so->so_state &= ~(SS_ISCONNECTED|SS_ISDISCONNECTING);
+ so->so_state |= SS_ISCONNECTING;
+ SOCK_UNLOCK(so);
+}
+
+void
+soisconnected(struct socket *so)
+{
+ struct socket *head;
+ int ret;
+
+restart:
+ ACCEPT_LOCK();
+ SOCK_LOCK(so);
+ so->so_state &= ~(SS_ISCONNECTING|SS_ISDISCONNECTING|SS_ISCONFIRMING);
+ so->so_state |= SS_ISCONNECTED;
+ head = so->so_head;
+ if (head != NULL && (so->so_qstate & SQ_INCOMP)) {
+ if ((so->so_options & SO_ACCEPTFILTER) == 0) {
+ SOCK_UNLOCK(so);
+ TAILQ_REMOVE(&head->so_incomp, so, so_list);
+ head->so_incqlen--;
+ so->so_qstate &= ~SQ_INCOMP;
+ TAILQ_INSERT_TAIL(&head->so_comp, so, so_list);
+ head->so_qlen++;
+ so->so_qstate |= SQ_COMP;
+ ACCEPT_UNLOCK();
+ sorwakeup(head);
+ wakeup_one(&head->so_timeo);
+ } else {
+ ACCEPT_UNLOCK();
+ soupcall_set(so, SO_RCV,
+ head->so_accf->so_accept_filter->accf_callback,
+ head->so_accf->so_accept_filter_arg);
+ so->so_options &= ~SO_ACCEPTFILTER;
+ ret = head->so_accf->so_accept_filter->accf_callback(so,
+ head->so_accf->so_accept_filter_arg, M_DONTWAIT);
+ if (ret == SU_ISCONNECTED)
+ soupcall_clear(so, SO_RCV);
+ SOCK_UNLOCK(so);
+ if (ret == SU_ISCONNECTED)
+ goto restart;
+ }
+ return;
+ }
+ SOCK_UNLOCK(so);
+ ACCEPT_UNLOCK();
+ wakeup(&so->so_timeo);
+ sorwakeup(so);
+ sowwakeup(so);
+}
+
+void
+soisdisconnecting(struct socket *so)
+{
+
+ /*
+ * Note: This code assumes that SOCK_LOCK(so) and
+ * SOCKBUF_LOCK(&so->so_rcv) are the same.
+ */
+ SOCKBUF_LOCK(&so->so_rcv);
+ so->so_state &= ~SS_ISCONNECTING;
+ so->so_state |= SS_ISDISCONNECTING;
+ so->so_rcv.sb_state |= SBS_CANTRCVMORE;
+ sorwakeup_locked(so);
+ SOCKBUF_LOCK(&so->so_snd);
+ so->so_snd.sb_state |= SBS_CANTSENDMORE;
+ sowwakeup_locked(so);
+ wakeup(&so->so_timeo);
+}
+
+void
+soisdisconnected(struct socket *so)
+{
+
+ /*
+ * Note: This code assumes that SOCK_LOCK(so) and
+ * SOCKBUF_LOCK(&so->so_rcv) are the same.
+ */
+ SOCKBUF_LOCK(&so->so_rcv);
+ so->so_state &= ~(SS_ISCONNECTING|SS_ISCONNECTED|SS_ISDISCONNECTING);
+ so->so_state |= SS_ISDISCONNECTED;
+ so->so_rcv.sb_state |= SBS_CANTRCVMORE;
+ sorwakeup_locked(so);
+ SOCKBUF_LOCK(&so->so_snd);
+ so->so_snd.sb_state |= SBS_CANTSENDMORE;
+ sbdrop_locked(&so->so_snd, so->so_snd.sb_cc);
+ sowwakeup_locked(so);
+ wakeup(&so->so_timeo);
+}
+
+/*
+ * Make a copy of a sockaddr in a malloced buffer of type M_SONAME.
+ */
+struct sockaddr *
+sodupsockaddr(const struct sockaddr *sa, int mflags)
+{
+ struct sockaddr *sa2;
+
+ sa2 = malloc(sa->sa_len, M_SONAME, mflags);
+ if (sa2)
+ bcopy(sa, sa2, sa->sa_len);
+ return sa2;
+}
+
+/*
+ * Register per-socket buffer upcalls.
+ */
+void
+soupcall_set(struct socket *so, int which,
+ int (*func)(struct socket *, void *, int), void *arg)
+{
+ struct sockbuf *sb;
+
+ switch (which) {
+ case SO_RCV:
+ sb = &so->so_rcv;
+ break;
+ case SO_SND:
+ sb = &so->so_snd;
+ break;
+ default:
+ panic("soupcall_set: bad which");
+ }
+ SOCKBUF_LOCK_ASSERT(sb);
+#if 0
+ /* XXX: accf_http actually wants to do this on purpose. */
+ KASSERT(sb->sb_upcall == NULL, ("soupcall_set: overwriting upcall"));
+#endif
+ sb->sb_upcall = func;
+ sb->sb_upcallarg = arg;
+ sb->sb_flags |= SB_UPCALL;
+}
+
+void
+soupcall_clear(struct socket *so, int which)
+{
+ struct sockbuf *sb;
+
+ switch (which) {
+ case SO_RCV:
+ sb = &so->so_rcv;
+ break;
+ case SO_SND:
+ sb = &so->so_snd;
+ break;
+ default:
+ panic("soupcall_clear: bad which");
+ }
+ SOCKBUF_LOCK_ASSERT(sb);
+ KASSERT(sb->sb_upcall != NULL, ("soupcall_clear: no upcall to clear"));
+ sb->sb_upcall = NULL;
+ sb->sb_upcallarg = NULL;
+ sb->sb_flags &= ~SB_UPCALL;
+}
+
+/*
+ * Create an external-format (``xsocket'') structure using the information in
+ * the kernel-format socket structure pointed to by so. This is done to
+ * reduce the spew of irrelevant information over this interface, to isolate
+ * user code from changes in the kernel structure, and potentially to provide
+ * information-hiding if we decide that some of this information should be
+ * hidden from users.
+ */
+void
+sotoxsocket(struct socket *so, struct xsocket *xso)
+{
+
+ xso->xso_len = sizeof *xso;
+ xso->xso_so = so;
+ xso->so_type = so->so_type;
+ xso->so_options = so->so_options;
+ xso->so_linger = so->so_linger;
+ xso->so_state = so->so_state;
+ xso->so_pcb = so->so_pcb;
+ xso->xso_protocol = so->so_proto->pr_protocol;
+ xso->xso_family = so->so_proto->pr_domain->dom_family;
+ xso->so_qlen = so->so_qlen;
+ xso->so_incqlen = so->so_incqlen;
+ xso->so_qlimit = so->so_qlimit;
+ xso->so_timeo = so->so_timeo;
+ xso->so_error = so->so_error;
+ xso->so_pgid = so->so_sigio ? so->so_sigio->sio_pgid : 0;
+ xso->so_oobmark = so->so_oobmark;
+ sbtoxsockbuf(&so->so_snd, &xso->so_snd);
+ sbtoxsockbuf(&so->so_rcv, &xso->so_rcv);
+ xso->so_uid = so->so_cred->cr_uid;
+}
+
+
+/*
+ * Socket accessor functions to provide external consumers with
+ * a safe interface to socket state
+ *
+ */
+
+void
+so_listeners_apply_all(struct socket *so, void (*func)(struct socket *, void *), void *arg)
+{
+
+ TAILQ_FOREACH(so, &so->so_comp, so_list)
+ func(so, arg);
+}
+
+struct sockbuf *
+so_sockbuf_rcv(struct socket *so)
+{
+
+ return (&so->so_rcv);
+}
+
+struct sockbuf *
+so_sockbuf_snd(struct socket *so)
+{
+
+ return (&so->so_snd);
+}
+
+int
+so_state_get(const struct socket *so)
+{
+
+ return (so->so_state);
+}
+
+void
+so_state_set(struct socket *so, int val)
+{
+
+ so->so_state = val;
+}
+
+int
+so_options_get(const struct socket *so)
+{
+
+ return (so->so_options);
+}
+
+void
+so_options_set(struct socket *so, int val)
+{
+
+ so->so_options = val;
+}
+
+int
+so_error_get(const struct socket *so)
+{
+
+ return (so->so_error);
+}
+
+void
+so_error_set(struct socket *so, int val)
+{
+
+ so->so_error = val;
+}
+
+int
+so_linger_get(const struct socket *so)
+{
+
+ return (so->so_linger);
+}
+
+void
+so_linger_set(struct socket *so, int val)
+{
+
+ so->so_linger = val;
+}
+
+struct protosw *
+so_protosw_get(const struct socket *so)
+{
+
+ return (so->so_proto);
+}
+
+void
+so_protosw_set(struct socket *so, struct protosw *val)
+{
+
+ so->so_proto = val;
+}
+
+void
+so_sorwakeup(struct socket *so)
+{
+
+ sorwakeup(so);
+}
+
+void
+so_sowwakeup(struct socket *so)
+{
+
+ sowwakeup(so);
+}
+
+void
+so_sorwakeup_locked(struct socket *so)
+{
+
+ sorwakeup_locked(so);
+}
+
+void
+so_sowwakeup_locked(struct socket *so)
+{
+
+ sowwakeup_locked(so);
+}
+
+void
+so_lock(struct socket *so)
+{
+ SOCK_LOCK(so);
+}
+
+void
+so_unlock(struct socket *so)
+{
+ SOCK_UNLOCK(so);
+}
diff --git a/rtems/freebsd/local/bus_if.c b/rtems/freebsd/local/bus_if.c
new file mode 100644
index 00000000..63eff0a6
--- /dev/null
+++ b/rtems/freebsd/local/bus_if.c
@@ -0,0 +1,273 @@
+#include <rtems/freebsd/machine/rtems-bsd-config.h>
+
+/*
+ * This file is produced automatically.
+ * Do not modify anything in here by hand.
+ *
+ * Created from source file
+ * kern/bus_if.m
+ * with
+ * makeobjops.awk
+ *
+ * See the source file for legal information
+ */
+
+#include <rtems/freebsd/sys/param.h>
+#include <rtems/freebsd/sys/queue.h>
+#include <rtems/freebsd/sys/kernel.h>
+#include <rtems/freebsd/sys/kobj.h>
+#include <rtems/freebsd/sys/types.h>
+#include <rtems/freebsd/sys/systm.h>
+#include <rtems/freebsd/sys/bus.h>
+#include <rtems/freebsd/local/bus_if.h>
+
+
+static struct resource *
+null_alloc_resource(device_t dev, device_t child,
+ int type, int *rid, u_long start, u_long end,
+ u_long count, u_int flags)
+{
+ return (0);
+}
+
+static int
+null_remap_intr(device_t bus, device_t dev, u_int irq)
+{
+
+ if (dev != NULL)
+ return (BUS_REMAP_INTR(dev, NULL, irq));
+ return (ENXIO);
+}
+
+static device_t
+null_add_child(device_t bus, int order, const char *name,
+ int unit)
+{
+
+ panic("bus_add_child is not implemented");
+}
+
+struct kobj_method bus_print_child_method_default = {
+ &bus_print_child_desc, (kobjop_t) bus_generic_print_child
+};
+
+struct kobjop_desc bus_print_child_desc = {
+ 0, &bus_print_child_method_default
+};
+
+struct kobj_method bus_probe_nomatch_method_default = {
+ &bus_probe_nomatch_desc, (kobjop_t) kobj_error_method
+};
+
+struct kobjop_desc bus_probe_nomatch_desc = {
+ 0, &bus_probe_nomatch_method_default
+};
+
+struct kobj_method bus_read_ivar_method_default = {
+ &bus_read_ivar_desc, (kobjop_t) kobj_error_method
+};
+
+struct kobjop_desc bus_read_ivar_desc = {
+ 0, &bus_read_ivar_method_default
+};
+
+struct kobj_method bus_write_ivar_method_default = {
+ &bus_write_ivar_desc, (kobjop_t) kobj_error_method
+};
+
+struct kobjop_desc bus_write_ivar_desc = {
+ 0, &bus_write_ivar_method_default
+};
+
+struct kobj_method bus_child_detached_method_default = {
+ &bus_child_detached_desc, (kobjop_t) kobj_error_method
+};
+
+struct kobjop_desc bus_child_detached_desc = {
+ 0, &bus_child_detached_method_default
+};
+
+struct kobj_method bus_driver_added_method_default = {
+ &bus_driver_added_desc, (kobjop_t) bus_generic_driver_added
+};
+
+struct kobjop_desc bus_driver_added_desc = {
+ 0, &bus_driver_added_method_default
+};
+
+struct kobj_method bus_add_child_method_default = {
+ &bus_add_child_desc, (kobjop_t) null_add_child
+};
+
+struct kobjop_desc bus_add_child_desc = {
+ 0, &bus_add_child_method_default
+};
+
+struct kobj_method bus_alloc_resource_method_default = {
+ &bus_alloc_resource_desc, (kobjop_t) null_alloc_resource
+};
+
+struct kobjop_desc bus_alloc_resource_desc = {
+ 0, &bus_alloc_resource_method_default
+};
+
+struct kobj_method bus_activate_resource_method_default = {
+ &bus_activate_resource_desc, (kobjop_t) kobj_error_method
+};
+
+struct kobjop_desc bus_activate_resource_desc = {
+ 0, &bus_activate_resource_method_default
+};
+
+struct kobj_method bus_deactivate_resource_method_default = {
+ &bus_deactivate_resource_desc, (kobjop_t) kobj_error_method
+};
+
+struct kobjop_desc bus_deactivate_resource_desc = {
+ 0, &bus_deactivate_resource_method_default
+};
+
+struct kobj_method bus_release_resource_method_default = {
+ &bus_release_resource_desc, (kobjop_t) kobj_error_method
+};
+
+struct kobjop_desc bus_release_resource_desc = {
+ 0, &bus_release_resource_method_default
+};
+
+struct kobj_method bus_setup_intr_method_default = {
+ &bus_setup_intr_desc, (kobjop_t) kobj_error_method
+};
+
+struct kobjop_desc bus_setup_intr_desc = {
+ 0, &bus_setup_intr_method_default
+};
+
+struct kobj_method bus_teardown_intr_method_default = {
+ &bus_teardown_intr_desc, (kobjop_t) kobj_error_method
+};
+
+struct kobjop_desc bus_teardown_intr_desc = {
+ 0, &bus_teardown_intr_method_default
+};
+
+struct kobj_method bus_set_resource_method_default = {
+ &bus_set_resource_desc, (kobjop_t) kobj_error_method
+};
+
+struct kobjop_desc bus_set_resource_desc = {
+ 0, &bus_set_resource_method_default
+};
+
+struct kobj_method bus_get_resource_method_default = {
+ &bus_get_resource_desc, (kobjop_t) kobj_error_method
+};
+
+struct kobjop_desc bus_get_resource_desc = {
+ 0, &bus_get_resource_method_default
+};
+
+struct kobj_method bus_delete_resource_method_default = {
+ &bus_delete_resource_desc, (kobjop_t) kobj_error_method
+};
+
+struct kobjop_desc bus_delete_resource_desc = {
+ 0, &bus_delete_resource_method_default
+};
+
+struct kobj_method bus_get_resource_list_method_default = {
+ &bus_get_resource_list_desc, (kobjop_t) bus_generic_get_resource_list
+};
+
+struct kobjop_desc bus_get_resource_list_desc = {
+ 0, &bus_get_resource_list_method_default
+};
+
+struct kobj_method bus_child_present_method_default = {
+ &bus_child_present_desc, (kobjop_t) bus_generic_child_present
+};
+
+struct kobjop_desc bus_child_present_desc = {
+ 0, &bus_child_present_method_default
+};
+
+struct kobj_method bus_child_pnpinfo_str_method_default = {
+ &bus_child_pnpinfo_str_desc, (kobjop_t) kobj_error_method
+};
+
+struct kobjop_desc bus_child_pnpinfo_str_desc = {
+ 0, &bus_child_pnpinfo_str_method_default
+};
+
+struct kobj_method bus_child_location_str_method_default = {
+ &bus_child_location_str_desc, (kobjop_t) kobj_error_method
+};
+
+struct kobjop_desc bus_child_location_str_desc = {
+ 0, &bus_child_location_str_method_default
+};
+
+struct kobj_method bus_bind_intr_method_default = {
+ &bus_bind_intr_desc, (kobjop_t) bus_generic_bind_intr
+};
+
+struct kobjop_desc bus_bind_intr_desc = {
+ 0, &bus_bind_intr_method_default
+};
+
+struct kobj_method bus_config_intr_method_default = {
+ &bus_config_intr_desc, (kobjop_t) bus_generic_config_intr
+};
+
+struct kobjop_desc bus_config_intr_desc = {
+ 0, &bus_config_intr_method_default
+};
+
+struct kobj_method bus_describe_intr_method_default = {
+ &bus_describe_intr_desc, (kobjop_t) bus_generic_describe_intr
+};
+
+struct kobjop_desc bus_describe_intr_desc = {
+ 0, &bus_describe_intr_method_default
+};
+
+struct kobj_method bus_hinted_child_method_default = {
+ &bus_hinted_child_desc, (kobjop_t) kobj_error_method
+};
+
+struct kobjop_desc bus_hinted_child_desc = {
+ 0, &bus_hinted_child_method_default
+};
+
+struct kobj_method bus_get_dma_tag_method_default = {
+ &bus_get_dma_tag_desc, (kobjop_t) bus_generic_get_dma_tag
+};
+
+struct kobjop_desc bus_get_dma_tag_desc = {
+ 0, &bus_get_dma_tag_method_default
+};
+
+struct kobj_method bus_hint_device_unit_method_default = {
+ &bus_hint_device_unit_desc, (kobjop_t) kobj_error_method
+};
+
+struct kobjop_desc bus_hint_device_unit_desc = {
+ 0, &bus_hint_device_unit_method_default
+};
+
+struct kobj_method bus_new_pass_method_default = {
+ &bus_new_pass_desc, (kobjop_t) bus_generic_new_pass
+};
+
+struct kobjop_desc bus_new_pass_desc = {
+ 0, &bus_new_pass_method_default
+};
+
+struct kobj_method bus_remap_intr_method_default = {
+ &bus_remap_intr_desc, (kobjop_t) null_remap_intr
+};
+
+struct kobjop_desc bus_remap_intr_desc = {
+ 0, &bus_remap_intr_method_default
+};
+
diff --git a/rtems/freebsd/local/bus_if.h b/rtems/freebsd/local/bus_if.h
new file mode 100644
index 00000000..6a54d605
--- /dev/null
+++ b/rtems/freebsd/local/bus_if.h
@@ -0,0 +1,786 @@
+/*
+ * This file is produced automatically.
+ * Do not modify anything in here by hand.
+ *
+ * Created from source file
+ * kern/bus_if.m
+ * with
+ * makeobjops.awk
+ *
+ * See the source file for legal information
+ */
+
+/**
+ * @defgroup BUS bus - KObj methods for drivers of devices with children
+ * @brief A set of methods required device drivers that support
+ * child devices.
+ * @{
+ */
+
+#ifndef _bus_if_h_
+#define _bus_if_h_
+
+/** @brief Unique descriptor for the BUS_PRINT_CHILD() method */
+extern struct kobjop_desc bus_print_child_desc;
+/** @brief A function implementing the BUS_PRINT_CHILD() method */
+typedef int bus_print_child_t(device_t _dev, device_t _child);
+/**
+ * @brief Print a description of a child device
+ *
+ * This is called from system code which prints out a description of a
+ * device. It should describe the attachment that the child has with
+ * the parent. For instance the TurboLaser bus prints which node the
+ * device is attached to. See bus_generic_print_child() for more
+ * information.
+ *
+ * @param _dev the device whose child is being printed
+ * @param _child the child device to describe
+ *
+ * @returns the number of characters output.
+ */
+
+static __inline int BUS_PRINT_CHILD(device_t _dev, device_t _child)
+{
+ kobjop_t _m;
+ KOBJOPLOOKUP(((kobj_t)_dev)->ops,bus_print_child);
+ return ((bus_print_child_t *) _m)(_dev, _child);
+}
+
+/** @brief Unique descriptor for the BUS_PROBE_NOMATCH() method */
+extern struct kobjop_desc bus_probe_nomatch_desc;
+/** @brief A function implementing the BUS_PROBE_NOMATCH() method */
+typedef void bus_probe_nomatch_t(device_t _dev, device_t _child);
+/**
+ * @brief Print a notification about an unprobed child device.
+ *
+ * Called for each child device that did not succeed in probing for a
+ * driver.
+ *
+ * @param _dev the device whose child was being probed
+ * @param _child the child device which failed to probe
+ */
+
+static __inline void BUS_PROBE_NOMATCH(device_t _dev, device_t _child)
+{
+ kobjop_t _m;
+ KOBJOPLOOKUP(((kobj_t)_dev)->ops,bus_probe_nomatch);
+ ((bus_probe_nomatch_t *) _m)(_dev, _child);
+}
+
+/** @brief Unique descriptor for the BUS_READ_IVAR() method */
+extern struct kobjop_desc bus_read_ivar_desc;
+/** @brief A function implementing the BUS_READ_IVAR() method */
+typedef int bus_read_ivar_t(device_t _dev, device_t _child, int _index,
+ uintptr_t *_result);
+/**
+ * @brief Read the value of a bus-specific attribute of a device
+ *
+ * This method, along with BUS_WRITE_IVAR() manages a bus-specific set
+ * of instance variables of a child device. The intention is that
+ * each different type of bus defines a set of appropriate instance
+ * variables (such as ports and irqs for ISA bus etc.)
+ *
+ * This information could be given to the child device as a struct but
+ * that makes it hard for a bus to add or remove variables without
+ * forcing an edit and recompile for all drivers which may not be
+ * possible for vendor supplied binary drivers.
+ *
+ * This method copies the value of an instance variable to the
+ * location specified by @p *_result.
+ *
+ * @param _dev the device whose child was being examined
+ * @param _child the child device whose instance variable is
+ * being read
+ * @param _index the instance variable to read
+ * @param _result a loction to recieve the instance variable
+ * value
+ *
+ * @retval 0 success
+ * @retval ENOENT no such instance variable is supported by @p
+ * _dev
+ */
+
+static __inline int BUS_READ_IVAR(device_t _dev, device_t _child, int _index,
+ uintptr_t *_result)
+{
+ kobjop_t _m;
+ KOBJOPLOOKUP(((kobj_t)_dev)->ops,bus_read_ivar);
+ return ((bus_read_ivar_t *) _m)(_dev, _child, _index, _result);
+}
+
+/** @brief Unique descriptor for the BUS_WRITE_IVAR() method */
+extern struct kobjop_desc bus_write_ivar_desc;
+/** @brief A function implementing the BUS_WRITE_IVAR() method */
+typedef int bus_write_ivar_t(device_t _dev, device_t _child, int _indx,
+ uintptr_t _value);
+/**
+ * @brief Write the value of a bus-specific attribute of a device
+ *
+ * This method sets the value of an instance variable to @p _value.
+ *
+ * @param _dev the device whose child was being updated
+ * @param _child the child device whose instance variable is
+ * being written
+ * @param _index the instance variable to write
+ * @param _value the value to write to that instance variable
+ *
+ * @retval 0 success
+ * @retval ENOENT no such instance variable is supported by @p
+ * _dev
+ * @retval EINVAL the instance variable was recognised but
+ * contains a read-only value
+ */
+
+static __inline int BUS_WRITE_IVAR(device_t _dev, device_t _child, int _indx,
+ uintptr_t _value)
+{
+ kobjop_t _m;
+ KOBJOPLOOKUP(((kobj_t)_dev)->ops,bus_write_ivar);
+ return ((bus_write_ivar_t *) _m)(_dev, _child, _indx, _value);
+}
+
+/** @brief Unique descriptor for the BUS_CHILD_DETACHED() method */
+extern struct kobjop_desc bus_child_detached_desc;
+/** @brief A function implementing the BUS_CHILD_DETACHED() method */
+typedef void bus_child_detached_t(device_t _dev, device_t _child);
+/**
+ * @brief Notify a bus that a child was detached
+ *
+ * Called after the child's DEVICE_DETACH() method to allow the parent
+ * to reclaim any resources allocated on behalf of the child.
+ *
+ * @param _dev the device whose child changed state
+ * @param _child the child device which changed state
+ */
+
+static __inline void BUS_CHILD_DETACHED(device_t _dev, device_t _child)
+{
+ kobjop_t _m;
+ KOBJOPLOOKUP(((kobj_t)_dev)->ops,bus_child_detached);
+ ((bus_child_detached_t *) _m)(_dev, _child);
+}
+
+/** @brief Unique descriptor for the BUS_DRIVER_ADDED() method */
+extern struct kobjop_desc bus_driver_added_desc;
+/** @brief A function implementing the BUS_DRIVER_ADDED() method */
+typedef void bus_driver_added_t(device_t _dev, driver_t *_driver);
+/**
+ * @brief Notify a bus that a new driver was added
+ *
+ * Called when a new driver is added to the devclass which owns this
+ * bus. The generic implementation of this method attempts to probe and
+ * attach any un-matched children of the bus.
+ *
+ * @param _dev the device whose devclass had a new driver
+ * added to it
+ * @param _driver the new driver which was added
+ */
+
+static __inline void BUS_DRIVER_ADDED(device_t _dev, driver_t *_driver)
+{
+ kobjop_t _m;
+ KOBJOPLOOKUP(((kobj_t)_dev)->ops,bus_driver_added);
+ ((bus_driver_added_t *) _m)(_dev, _driver);
+}
+
+/** @brief Unique descriptor for the BUS_ADD_CHILD() method */
+extern struct kobjop_desc bus_add_child_desc;
+/** @brief A function implementing the BUS_ADD_CHILD() method */
+typedef device_t bus_add_child_t(device_t _dev, u_int _order, const char *_name,
+ int _unit);
+/**
+ * @brief Create a new child device
+ *
+ * For busses which use use drivers supporting DEVICE_IDENTIFY() to
+ * enumerate their devices, this method is used to create new
+ * device instances. The new device will be added after the last
+ * existing child with the same order.
+ *
+ * @param _dev the bus device which will be the parent of the
+ * new child device
+ * @param _order a value which is used to partially sort the
+ * children of @p _dev - devices created using
+ * lower values of @p _order appear first in @p
+ * _dev's list of children
+ * @param _name devclass name for new device or @c NULL if not
+ * specified
+ * @param _unit unit number for new device or @c -1 if not
+ * specified
+ */
+
+static __inline device_t BUS_ADD_CHILD(device_t _dev, u_int _order,
+ const char *_name, int _unit)
+{
+ kobjop_t _m;
+ KOBJOPLOOKUP(((kobj_t)_dev)->ops,bus_add_child);
+ return ((bus_add_child_t *) _m)(_dev, _order, _name, _unit);
+}
+
+/** @brief Unique descriptor for the BUS_ALLOC_RESOURCE() method */
+extern struct kobjop_desc bus_alloc_resource_desc;
+/** @brief A function implementing the BUS_ALLOC_RESOURCE() method */
+typedef struct resource * bus_alloc_resource_t(device_t _dev, device_t _child,
+ int _type, int *_rid,
+ u_long _start, u_long _end,
+ u_long _count, u_int _flags);
+/**
+ * @brief Allocate a system resource
+ *
+ * This method is called by child devices of a bus to allocate resources.
+ * The types are defined in <machine/resource.h>; the meaning of the
+ * resource-ID field varies from bus to bus (but @p *rid == 0 is always
+ * valid if the resource type is). If a resource was allocated and the
+ * caller did not use the RF_ACTIVE to specify that it should be
+ * activated immediately, the caller is responsible for calling
+ * BUS_ACTIVATE_RESOURCE() when it actually uses the resource.
+ *
+ * @param _dev the parent device of @p _child
+ * @param _child the device which is requesting an allocation
+ * @param _type the type of resource to allocate
+ * @param _rid a pointer to the resource identifier
+ * @param _start hint at the start of the resource range - pass
+ * @c 0UL for any start address
+ * @param _end hint at the end of the resource range - pass
+ * @c ~0UL for any end address
+ * @param _count hint at the size of range required - pass @c 1
+ * for any size
+ * @param _flags any extra flags to control the resource
+ * allocation - see @c RF_XXX flags in
+ * <sys/rman.h> for details
+ *
+ * @returns the resource which was allocated or @c NULL if no
+ * resource could be allocated
+ */
+
+static __inline struct resource * BUS_ALLOC_RESOURCE(device_t _dev,
+ device_t _child, int _type,
+ int *_rid, u_long _start,
+ u_long _end, u_long _count,
+ u_int _flags)
+{
+ kobjop_t _m;
+ KOBJOPLOOKUP(((kobj_t)_dev)->ops,bus_alloc_resource);
+ return ((bus_alloc_resource_t *) _m)(_dev, _child, _type, _rid, _start, _end, _count, _flags);
+}
+
+/** @brief Unique descriptor for the BUS_ACTIVATE_RESOURCE() method */
+extern struct kobjop_desc bus_activate_resource_desc;
+/** @brief A function implementing the BUS_ACTIVATE_RESOURCE() method */
+typedef int bus_activate_resource_t(device_t _dev, device_t _child, int _type,
+ int _rid, struct resource *_r);
+/**
+ * @brief Activate a resource
+ *
+ * Activate a resource previously allocated with
+ * BUS_ALLOC_RESOURCE(). This may for instance map a memory region
+ * into the kernel's virtual address space.
+ *
+ * @param _dev the parent device of @p _child
+ * @param _child the device which allocated the resource
+ * @param _type the type of resource
+ * @param _rid the resource identifier
+ * @param _r the resource to activate
+ */
+
+static __inline int BUS_ACTIVATE_RESOURCE(device_t _dev, device_t _child,
+ int _type, int _rid,
+ struct resource *_r)
+{
+ kobjop_t _m;
+ KOBJOPLOOKUP(((kobj_t)_dev)->ops,bus_activate_resource);
+ return ((bus_activate_resource_t *) _m)(_dev, _child, _type, _rid, _r);
+}
+
+/** @brief Unique descriptor for the BUS_DEACTIVATE_RESOURCE() method */
+extern struct kobjop_desc bus_deactivate_resource_desc;
+/** @brief A function implementing the BUS_DEACTIVATE_RESOURCE() method */
+typedef int bus_deactivate_resource_t(device_t _dev, device_t _child, int _type,
+ int _rid, struct resource *_r);
+/**
+ * @brief Deactivate a resource
+ *
+ * Deactivate a resource previously allocated with
+ * BUS_ALLOC_RESOURCE(). This may for instance unmap a memory region
+ * from the kernel's virtual address space.
+ *
+ * @param _dev the parent device of @p _child
+ * @param _child the device which allocated the resource
+ * @param _type the type of resource
+ * @param _rid the resource identifier
+ * @param _r the resource to deactivate
+ */
+
+static __inline int BUS_DEACTIVATE_RESOURCE(device_t _dev, device_t _child,
+ int _type, int _rid,
+ struct resource *_r)
+{
+ kobjop_t _m;
+ KOBJOPLOOKUP(((kobj_t)_dev)->ops,bus_deactivate_resource);
+ return ((bus_deactivate_resource_t *) _m)(_dev, _child, _type, _rid, _r);
+}
+
+/** @brief Unique descriptor for the BUS_RELEASE_RESOURCE() method */
+extern struct kobjop_desc bus_release_resource_desc;
+/** @brief A function implementing the BUS_RELEASE_RESOURCE() method */
+typedef int bus_release_resource_t(device_t _dev, device_t _child, int _type,
+ int _rid, struct resource *_res);
+/**
+ * @brief Release a resource
+ *
+ * Free a resource allocated by the BUS_ALLOC_RESOURCE. The @p _rid
+ * value must be the same as the one returned by BUS_ALLOC_RESOURCE()
+ * (which is not necessarily the same as the one the client passed).
+ *
+ * @param _dev the parent device of @p _child
+ * @param _child the device which allocated the resource
+ * @param _type the type of resource
+ * @param _rid the resource identifier
+ * @param _r the resource to release
+ */
+
+static __inline int BUS_RELEASE_RESOURCE(device_t _dev, device_t _child,
+ int _type, int _rid,
+ struct resource *_res)
+{
+ kobjop_t _m;
+ KOBJOPLOOKUP(((kobj_t)_dev)->ops,bus_release_resource);
+ return ((bus_release_resource_t *) _m)(_dev, _child, _type, _rid, _res);
+}
+
+/** @brief Unique descriptor for the BUS_SETUP_INTR() method */
+extern struct kobjop_desc bus_setup_intr_desc;
+/** @brief A function implementing the BUS_SETUP_INTR() method */
+typedef int bus_setup_intr_t(device_t _dev, device_t _child,
+ struct resource *_irq, int _flags,
+ driver_filter_t *_filter, driver_intr_t *_intr,
+ void *_arg, void **_cookiep);
+/**
+ * @brief Install an interrupt handler
+ *
+ * This method is used to associate an interrupt handler function with
+ * an irq resource. When the interrupt triggers, the function @p _intr
+ * will be called with the value of @p _arg as its single
+ * argument. The value returned in @p *_cookiep is used to cancel the
+ * interrupt handler - the caller should save this value to use in a
+ * future call to BUS_TEARDOWN_INTR().
+ *
+ * @param _dev the parent device of @p _child
+ * @param _child the device which allocated the resource
+ * @param _irq the resource representing the interrupt
+ * @param _flags a set of bits from enum intr_type specifying
+ * the class of interrupt
+ * @param _intr the function to call when the interrupt
+ * triggers
+ * @param _arg a value to use as the single argument in calls
+ * to @p _intr
+ * @param _cookiep a pointer to a location to recieve a cookie
+ * value that may be used to remove the interrupt
+ * handler
+ */
+
+static __inline int BUS_SETUP_INTR(device_t _dev, device_t _child,
+ struct resource *_irq, int _flags,
+ driver_filter_t *_filter,
+ driver_intr_t *_intr, void *_arg,
+ void **_cookiep)
+{
+ kobjop_t _m;
+ KOBJOPLOOKUP(((kobj_t)_dev)->ops,bus_setup_intr);
+ return ((bus_setup_intr_t *) _m)(_dev, _child, _irq, _flags, _filter, _intr, _arg, _cookiep);
+}
+
+/** @brief Unique descriptor for the BUS_TEARDOWN_INTR() method */
+extern struct kobjop_desc bus_teardown_intr_desc;
+/** @brief A function implementing the BUS_TEARDOWN_INTR() method */
+typedef int bus_teardown_intr_t(device_t _dev, device_t _child,
+ struct resource *_irq, void *_cookie);
+/**
+ * @brief Uninstall an interrupt handler
+ *
+ * This method is used to disassociate an interrupt handler function
+ * with an irq resource. The value of @p _cookie must be the value
+ * returned from a previous call to BUS_SETUP_INTR().
+ *
+ * @param _dev the parent device of @p _child
+ * @param _child the device which allocated the resource
+ * @param _irq the resource representing the interrupt
+ * @param _cookie the cookie value returned when the interrupt
+ * was originally registered
+ */
+
+static __inline int BUS_TEARDOWN_INTR(device_t _dev, device_t _child,
+ struct resource *_irq, void *_cookie)
+{
+ kobjop_t _m;
+ KOBJOPLOOKUP(((kobj_t)_dev)->ops,bus_teardown_intr);
+ return ((bus_teardown_intr_t *) _m)(_dev, _child, _irq, _cookie);
+}
+
+/** @brief Unique descriptor for the BUS_SET_RESOURCE() method */
+extern struct kobjop_desc bus_set_resource_desc;
+/** @brief A function implementing the BUS_SET_RESOURCE() method */
+typedef int bus_set_resource_t(device_t _dev, device_t _child, int _type,
+ int _rid, u_long _start, u_long _count);
+/**
+ * @brief Define a resource which can be allocated with
+ * BUS_ALLOC_RESOURCE().
+ *
+ * This method is used by some busses (typically ISA) to allow a
+ * driver to describe a resource range that it would like to
+ * allocate. The resource defined by @p _type and @p _rid is defined
+ * to start at @p _start and to include @p _count indices in its
+ * range.
+ *
+ * @param _dev the parent device of @p _child
+ * @param _child the device which owns the resource
+ * @param _type the type of resource
+ * @param _rid the resource identifier
+ * @param _start the start of the resource range
+ * @param _count the size of the resource range
+ */
+
+static __inline int BUS_SET_RESOURCE(device_t _dev, device_t _child, int _type,
+ int _rid, u_long _start, u_long _count)
+{
+ kobjop_t _m;
+ KOBJOPLOOKUP(((kobj_t)_dev)->ops,bus_set_resource);
+ return ((bus_set_resource_t *) _m)(_dev, _child, _type, _rid, _start, _count);
+}
+
+/** @brief Unique descriptor for the BUS_GET_RESOURCE() method */
+extern struct kobjop_desc bus_get_resource_desc;
+/** @brief A function implementing the BUS_GET_RESOURCE() method */
+typedef int bus_get_resource_t(device_t _dev, device_t _child, int _type,
+ int _rid, u_long *_startp, u_long *_countp);
+/**
+ * @brief Describe a resource
+ *
+ * This method allows a driver to examine the range used for a given
+ * resource without actually allocating it.
+ *
+ * @param _dev the parent device of @p _child
+ * @param _child the device which owns the resource
+ * @param _type the type of resource
+ * @param _rid the resource identifier
+ * @param _start the address of a location to recieve the start
+ * index of the resource range
+ * @param _count the address of a location to recieve the size
+ * of the resource range
+ */
+
+static __inline int BUS_GET_RESOURCE(device_t _dev, device_t _child, int _type,
+ int _rid, u_long *_startp, u_long *_countp)
+{
+ kobjop_t _m;
+ KOBJOPLOOKUP(((kobj_t)_dev)->ops,bus_get_resource);
+ return ((bus_get_resource_t *) _m)(_dev, _child, _type, _rid, _startp, _countp);
+}
+
+/** @brief Unique descriptor for the BUS_DELETE_RESOURCE() method */
+extern struct kobjop_desc bus_delete_resource_desc;
+/** @brief A function implementing the BUS_DELETE_RESOURCE() method */
+typedef void bus_delete_resource_t(device_t _dev, device_t _child, int _type,
+ int _rid);
+/**
+ * @brief Delete a resource.
+ *
+ * Use this to delete a resource (possibly one previously added with
+ * BUS_SET_RESOURCE()).
+ *
+ * @param _dev the parent device of @p _child
+ * @param _child the device which owns the resource
+ * @param _type the type of resource
+ * @param _rid the resource identifier
+ */
+
+static __inline void BUS_DELETE_RESOURCE(device_t _dev, device_t _child,
+ int _type, int _rid)
+{
+ kobjop_t _m;
+ KOBJOPLOOKUP(((kobj_t)_dev)->ops,bus_delete_resource);
+ ((bus_delete_resource_t *) _m)(_dev, _child, _type, _rid);
+}
+
+/** @brief Unique descriptor for the BUS_GET_RESOURCE_LIST() method */
+extern struct kobjop_desc bus_get_resource_list_desc;
+/** @brief A function implementing the BUS_GET_RESOURCE_LIST() method */
+typedef struct resource_list * bus_get_resource_list_t(device_t _dev,
+ device_t _child);
+/**
+ * @brief Return a struct resource_list.
+ *
+ * Used by drivers which use bus_generic_rl_alloc_resource() etc. to
+ * implement their resource handling. It should return the resource
+ * list of the given child device.
+ *
+ * @param _dev the parent device of @p _child
+ * @param _child the device which owns the resource list
+ */
+
+static __inline struct resource_list * BUS_GET_RESOURCE_LIST(device_t _dev,
+ device_t _child)
+{
+ kobjop_t _m;
+ KOBJOPLOOKUP(((kobj_t)_dev)->ops,bus_get_resource_list);
+ return ((bus_get_resource_list_t *) _m)(_dev, _child);
+}
+
+/** @brief Unique descriptor for the BUS_CHILD_PRESENT() method */
+extern struct kobjop_desc bus_child_present_desc;
+/** @brief A function implementing the BUS_CHILD_PRESENT() method */
+typedef int bus_child_present_t(device_t _dev, device_t _child);
+/**
+ * @brief Is the hardware described by @p _child still attached to the
+ * system?
+ *
+ * This method should return 0 if the device is not present. It
+ * should return -1 if it is present. Any errors in determining
+ * should be returned as a normal errno value. Client drivers are to
+ * assume that the device is present, even if there is an error
+ * determining if it is there. Busses are to try to avoid returning
+ * errors, but newcard will return an error if the device fails to
+ * implement this method.
+ *
+ * @param _dev the parent device of @p _child
+ * @param _child the device which is being examined
+ */
+
+static __inline int BUS_CHILD_PRESENT(device_t _dev, device_t _child)
+{
+ kobjop_t _m;
+ KOBJOPLOOKUP(((kobj_t)_dev)->ops,bus_child_present);
+ return ((bus_child_present_t *) _m)(_dev, _child);
+}
+
+/** @brief Unique descriptor for the BUS_CHILD_PNPINFO_STR() method */
+extern struct kobjop_desc bus_child_pnpinfo_str_desc;
+/** @brief A function implementing the BUS_CHILD_PNPINFO_STR() method */
+typedef int bus_child_pnpinfo_str_t(device_t _dev, device_t _child, char *_buf,
+ size_t _buflen);
+/**
+ * @brief Returns the pnp info for this device.
+ *
+ * Return it as a string. If the string is insufficient for the
+ * storage, then return EOVERFLOW.
+ *
+ * @param _dev the parent device of @p _child
+ * @param _child the device which is being examined
+ * @param _buf the address of a buffer to receive the pnp
+ * string
+ * @param _buflen the size of the buffer pointed to by @p _buf
+ */
+
+static __inline int BUS_CHILD_PNPINFO_STR(device_t _dev, device_t _child,
+ char *_buf, size_t _buflen)
+{
+ kobjop_t _m;
+ KOBJOPLOOKUP(((kobj_t)_dev)->ops,bus_child_pnpinfo_str);
+ return ((bus_child_pnpinfo_str_t *) _m)(_dev, _child, _buf, _buflen);
+}
+
+/** @brief Unique descriptor for the BUS_CHILD_LOCATION_STR() method */
+extern struct kobjop_desc bus_child_location_str_desc;
+/** @brief A function implementing the BUS_CHILD_LOCATION_STR() method */
+typedef int bus_child_location_str_t(device_t _dev, device_t _child, char *_buf,
+ size_t _buflen);
+/**
+ * @brief Returns the location for this device.
+ *
+ * Return it as a string. If the string is insufficient for the
+ * storage, then return EOVERFLOW.
+ *
+ * @param _dev the parent device of @p _child
+ * @param _child the device which is being examined
+ * @param _buf the address of a buffer to receive the location
+ * string
+ * @param _buflen the size of the buffer pointed to by @p _buf
+ */
+
+static __inline int BUS_CHILD_LOCATION_STR(device_t _dev, device_t _child,
+ char *_buf, size_t _buflen)
+{
+ kobjop_t _m;
+ KOBJOPLOOKUP(((kobj_t)_dev)->ops,bus_child_location_str);
+ return ((bus_child_location_str_t *) _m)(_dev, _child, _buf, _buflen);
+}
+
+/** @brief Unique descriptor for the BUS_BIND_INTR() method */
+extern struct kobjop_desc bus_bind_intr_desc;
+/** @brief A function implementing the BUS_BIND_INTR() method */
+typedef int bus_bind_intr_t(device_t _dev, device_t _child,
+ struct resource *_irq, int _cpu);
+/**
+ * @brief Allow drivers to request that an interrupt be bound to a specific
+ * CPU.
+ *
+ * @param _dev the parent device of @p _child
+ * @param _child the device which allocated the resource
+ * @param _irq the resource representing the interrupt
+ * @param _cpu the CPU to bind the interrupt to
+ */
+
+static __inline int BUS_BIND_INTR(device_t _dev, device_t _child,
+ struct resource *_irq, int _cpu)
+{
+ kobjop_t _m;
+ KOBJOPLOOKUP(((kobj_t)_dev)->ops,bus_bind_intr);
+ return ((bus_bind_intr_t *) _m)(_dev, _child, _irq, _cpu);
+}
+
+/** @brief Unique descriptor for the BUS_CONFIG_INTR() method */
+extern struct kobjop_desc bus_config_intr_desc;
+/** @brief A function implementing the BUS_CONFIG_INTR() method */
+typedef int bus_config_intr_t(device_t _dev, int _irq, enum intr_trigger _trig,
+ enum intr_polarity _pol);
+/**
+ * @brief Allow (bus) drivers to specify the trigger mode and polarity
+ * of the specified interrupt.
+ *
+ * @param _dev the bus device
+ * @param _irq the interrupt number to modify
+ * @param _trig the trigger mode required
+ * @param _pol the interrupt polarity required
+ */
+
+static __inline int BUS_CONFIG_INTR(device_t _dev, int _irq,
+ enum intr_trigger _trig,
+ enum intr_polarity _pol)
+{
+ kobjop_t _m;
+ KOBJOPLOOKUP(((kobj_t)_dev)->ops,bus_config_intr);
+ return ((bus_config_intr_t *) _m)(_dev, _irq, _trig, _pol);
+}
+
+/** @brief Unique descriptor for the BUS_DESCRIBE_INTR() method */
+extern struct kobjop_desc bus_describe_intr_desc;
+/** @brief A function implementing the BUS_DESCRIBE_INTR() method */
+typedef int bus_describe_intr_t(device_t _dev, device_t _child,
+ struct resource *_irq, void *_cookie,
+ const char *_descr);
+/**
+ * @brief Allow drivers to associate a description with an active
+ * interrupt handler.
+ *
+ * @param _dev the parent device of @p _child
+ * @param _child the device which allocated the resource
+ * @param _irq the resource representing the interrupt
+ * @param _cookie the cookie value returned when the interrupt
+ * was originally registered
+ * @param _descr the description to associate with the interrupt
+ */
+
+static __inline int BUS_DESCRIBE_INTR(device_t _dev, device_t _child,
+ struct resource *_irq, void *_cookie,
+ const char *_descr)
+{
+ kobjop_t _m;
+ KOBJOPLOOKUP(((kobj_t)_dev)->ops,bus_describe_intr);
+ return ((bus_describe_intr_t *) _m)(_dev, _child, _irq, _cookie, _descr);
+}
+
+/** @brief Unique descriptor for the BUS_HINTED_CHILD() method */
+extern struct kobjop_desc bus_hinted_child_desc;
+/** @brief A function implementing the BUS_HINTED_CHILD() method */
+typedef void bus_hinted_child_t(device_t _dev, const char *_dname, int _dunit);
+/**
+ * @brief Notify a (bus) driver about a child that the hints mechanism
+ * believes it has discovered.
+ *
+ * The bus is responsible for then adding the child in the right order
+ * and discovering other things about the child. The bus driver is
+ * free to ignore this hint, to do special things, etc. It is all up
+ * to the bus driver to interpret.
+ *
+ * This method is only called in response to the parent bus asking for
+ * hinted devices to be enumerated.
+ *
+ * @param _dev the bus device
+ * @param _dname the name of the device w/o unit numbers
+ * @param _dunit the unit number of the device
+ */
+
+static __inline void BUS_HINTED_CHILD(device_t _dev, const char *_dname,
+ int _dunit)
+{
+ kobjop_t _m;
+ KOBJOPLOOKUP(((kobj_t)_dev)->ops,bus_hinted_child);
+ ((bus_hinted_child_t *) _m)(_dev, _dname, _dunit);
+}
+
+/** @brief Unique descriptor for the BUS_GET_DMA_TAG() method */
+extern struct kobjop_desc bus_get_dma_tag_desc;
+/** @brief A function implementing the BUS_GET_DMA_TAG() method */
+typedef bus_dma_tag_t bus_get_dma_tag_t(device_t _dev, device_t _child);
+/**
+ * @brief Returns bus_dma_tag_t for use w/ devices on the bus.
+ *
+ * @param _dev the parent device of @p _child
+ * @param _child the device to which the tag will belong
+ */
+
+static __inline bus_dma_tag_t BUS_GET_DMA_TAG(device_t _dev, device_t _child)
+{
+ kobjop_t _m;
+ KOBJOPLOOKUP(((kobj_t)_dev)->ops,bus_get_dma_tag);
+ return ((bus_get_dma_tag_t *) _m)(_dev, _child);
+}
+
+/** @brief Unique descriptor for the BUS_HINT_DEVICE_UNIT() method */
+extern struct kobjop_desc bus_hint_device_unit_desc;
+/** @brief A function implementing the BUS_HINT_DEVICE_UNIT() method */
+typedef void bus_hint_device_unit_t(device_t _dev, device_t _child,
+ const char *_name, int *_unitp);
+/**
+ * @brief Allow the bus to determine the unit number of a device.
+ *
+ * @param _dev the parent device of @p _child
+ * @param _child the device whose unit is to be wired
+ * @param _name the name of the device's new devclass
+ * @param _unitp a pointer to the device's new unit value
+ */
+
+static __inline void BUS_HINT_DEVICE_UNIT(device_t _dev, device_t _child,
+ const char *_name, int *_unitp)
+{
+ kobjop_t _m;
+ KOBJOPLOOKUP(((kobj_t)_dev)->ops,bus_hint_device_unit);
+ ((bus_hint_device_unit_t *) _m)(_dev, _child, _name, _unitp);
+}
+
+/** @brief Unique descriptor for the BUS_NEW_PASS() method */
+extern struct kobjop_desc bus_new_pass_desc;
+/** @brief A function implementing the BUS_NEW_PASS() method */
+typedef void bus_new_pass_t(device_t _dev);
+/**
+ * @brief Notify a bus that the bus pass level has been changed
+ *
+ * @param _dev the bus device
+ */
+
+static __inline void BUS_NEW_PASS(device_t _dev)
+{
+ kobjop_t _m;
+ KOBJOPLOOKUP(((kobj_t)_dev)->ops,bus_new_pass);
+ ((bus_new_pass_t *) _m)(_dev);
+}
+
+/** @brief Unique descriptor for the BUS_REMAP_INTR() method */
+extern struct kobjop_desc bus_remap_intr_desc;
+/** @brief A function implementing the BUS_REMAP_INTR() method */
+typedef int bus_remap_intr_t(device_t _dev, device_t _child, u_int _irq);
+/**
+ * @brief Notify a bus that specified child's IRQ should be remapped.
+ *
+ * @param _dev the bus device
+ * @param _child the child device
+ * @param _irq the irq number
+ */
+
+static __inline int BUS_REMAP_INTR(device_t _dev, device_t _child, u_int _irq)
+{
+ kobjop_t _m;
+ KOBJOPLOOKUP(((kobj_t)_dev)->ops,bus_remap_intr);
+ return ((bus_remap_intr_t *) _m)(_dev, _child, _irq);
+}
+
+#endif /* _bus_if_h_ */
diff --git a/rtems/freebsd/local/cryptodev_if.c b/rtems/freebsd/local/cryptodev_if.c
new file mode 100644
index 00000000..242c64c3
--- /dev/null
+++ b/rtems/freebsd/local/cryptodev_if.c
@@ -0,0 +1,54 @@
+#include <rtems/freebsd/machine/rtems-bsd-config.h>
+
+/*
+ * This file is produced automatically.
+ * Do not modify anything in here by hand.
+ *
+ * Created from source file
+ * opencrypto/cryptodev_if.m
+ * with
+ * makeobjops.awk
+ *
+ * See the source file for legal information
+ */
+
+#include <rtems/freebsd/sys/param.h>
+#include <rtems/freebsd/sys/queue.h>
+#include <rtems/freebsd/sys/kernel.h>
+#include <rtems/freebsd/sys/kobj.h>
+#include <rtems/freebsd/sys/malloc.h>
+#include <rtems/freebsd/opencrypto/cryptodev.h>
+#include <rtems/freebsd/local/cryptodev_if.h>
+
+struct kobj_method cryptodev_newsession_method_default = {
+ &cryptodev_newsession_desc, (kobjop_t) kobj_error_method
+};
+
+struct kobjop_desc cryptodev_newsession_desc = {
+ 0, &cryptodev_newsession_method_default
+};
+
+struct kobj_method cryptodev_freesession_method_default = {
+ &cryptodev_freesession_desc, (kobjop_t) kobj_error_method
+};
+
+struct kobjop_desc cryptodev_freesession_desc = {
+ 0, &cryptodev_freesession_method_default
+};
+
+struct kobj_method cryptodev_process_method_default = {
+ &cryptodev_process_desc, (kobjop_t) kobj_error_method
+};
+
+struct kobjop_desc cryptodev_process_desc = {
+ 0, &cryptodev_process_method_default
+};
+
+struct kobj_method cryptodev_kprocess_method_default = {
+ &cryptodev_kprocess_desc, (kobjop_t) kobj_error_method
+};
+
+struct kobjop_desc cryptodev_kprocess_desc = {
+ 0, &cryptodev_kprocess_method_default
+};
+
diff --git a/rtems/freebsd/local/cryptodev_if.h b/rtems/freebsd/local/cryptodev_if.h
new file mode 100644
index 00000000..752527ed
--- /dev/null
+++ b/rtems/freebsd/local/cryptodev_if.h
@@ -0,0 +1,69 @@
+/*
+ * This file is produced automatically.
+ * Do not modify anything in here by hand.
+ *
+ * Created from source file
+ * opencrypto/cryptodev_if.m
+ * with
+ * makeobjops.awk
+ *
+ * See the source file for legal information
+ */
+
+
+#ifndef _cryptodev_if_h_
+#define _cryptodev_if_h_
+
+/** @brief Unique descriptor for the CRYPTODEV_NEWSESSION() method */
+extern struct kobjop_desc cryptodev_newsession_desc;
+/** @brief A function implementing the CRYPTODEV_NEWSESSION() method */
+typedef int cryptodev_newsession_t(device_t dev, uint32_t *sid,
+ struct cryptoini *cri);
+
+static __inline int CRYPTODEV_NEWSESSION(device_t dev, uint32_t *sid,
+ struct cryptoini *cri)
+{
+ kobjop_t _m;
+ KOBJOPLOOKUP(((kobj_t)dev)->ops,cryptodev_newsession);
+ return ((cryptodev_newsession_t *) _m)(dev, sid, cri);
+}
+
+/** @brief Unique descriptor for the CRYPTODEV_FREESESSION() method */
+extern struct kobjop_desc cryptodev_freesession_desc;
+/** @brief A function implementing the CRYPTODEV_FREESESSION() method */
+typedef int cryptodev_freesession_t(device_t dev, uint64_t sid);
+
+static __inline int CRYPTODEV_FREESESSION(device_t dev, uint64_t sid)
+{
+ kobjop_t _m;
+ KOBJOPLOOKUP(((kobj_t)dev)->ops,cryptodev_freesession);
+ return ((cryptodev_freesession_t *) _m)(dev, sid);
+}
+
+/** @brief Unique descriptor for the CRYPTODEV_PROCESS() method */
+extern struct kobjop_desc cryptodev_process_desc;
+/** @brief A function implementing the CRYPTODEV_PROCESS() method */
+typedef int cryptodev_process_t(device_t dev, struct cryptop *op, int flags);
+
+static __inline int CRYPTODEV_PROCESS(device_t dev, struct cryptop *op,
+ int flags)
+{
+ kobjop_t _m;
+ KOBJOPLOOKUP(((kobj_t)dev)->ops,cryptodev_process);
+ return ((cryptodev_process_t *) _m)(dev, op, flags);
+}
+
+/** @brief Unique descriptor for the CRYPTODEV_KPROCESS() method */
+extern struct kobjop_desc cryptodev_kprocess_desc;
+/** @brief A function implementing the CRYPTODEV_KPROCESS() method */
+typedef int cryptodev_kprocess_t(device_t dev, struct cryptkop *op, int flags);
+
+static __inline int CRYPTODEV_KPROCESS(device_t dev, struct cryptkop *op,
+ int flags)
+{
+ kobjop_t _m;
+ KOBJOPLOOKUP(((kobj_t)dev)->ops,cryptodev_kprocess);
+ return ((cryptodev_kprocess_t *) _m)(dev, op, flags);
+}
+
+#endif /* _cryptodev_if_h_ */
diff --git a/rtems/freebsd/local/device_if.c b/rtems/freebsd/local/device_if.c
new file mode 100644
index 00000000..0b7ea6e8
--- /dev/null
+++ b/rtems/freebsd/local/device_if.c
@@ -0,0 +1,106 @@
+#include <rtems/freebsd/machine/rtems-bsd-config.h>
+
+/*
+ * This file is produced automatically.
+ * Do not modify anything in here by hand.
+ *
+ * Created from source file
+ * kern/device_if.m
+ * with
+ * makeobjops.awk
+ *
+ * See the source file for legal information
+ */
+
+#include <rtems/freebsd/sys/param.h>
+#include <rtems/freebsd/sys/queue.h>
+#include <rtems/freebsd/sys/kernel.h>
+#include <rtems/freebsd/sys/kobj.h>
+#include <rtems/freebsd/sys/bus.h>
+#include <rtems/freebsd/local/device_if.h>
+
+
+static int null_shutdown(device_t dev)
+{
+ return 0;
+}
+
+static int null_suspend(device_t dev)
+{
+ return 0;
+}
+
+static int null_resume(device_t dev)
+{
+ return 0;
+}
+
+static int null_quiesce(device_t dev)
+{
+ return EOPNOTSUPP;
+}
+
+struct kobj_method device_probe_method_default = {
+ &device_probe_desc, (kobjop_t) kobj_error_method
+};
+
+struct kobjop_desc device_probe_desc = {
+ 0, &device_probe_method_default
+};
+
+struct kobj_method device_identify_method_default = {
+ &device_identify_desc, (kobjop_t) kobj_error_method
+};
+
+struct kobjop_desc device_identify_desc = {
+ 0, &device_identify_method_default
+};
+
+struct kobj_method device_attach_method_default = {
+ &device_attach_desc, (kobjop_t) kobj_error_method
+};
+
+struct kobjop_desc device_attach_desc = {
+ 0, &device_attach_method_default
+};
+
+struct kobj_method device_detach_method_default = {
+ &device_detach_desc, (kobjop_t) kobj_error_method
+};
+
+struct kobjop_desc device_detach_desc = {
+ 0, &device_detach_method_default
+};
+
+struct kobj_method device_shutdown_method_default = {
+ &device_shutdown_desc, (kobjop_t) null_shutdown
+};
+
+struct kobjop_desc device_shutdown_desc = {
+ 0, &device_shutdown_method_default
+};
+
+struct kobj_method device_suspend_method_default = {
+ &device_suspend_desc, (kobjop_t) null_suspend
+};
+
+struct kobjop_desc device_suspend_desc = {
+ 0, &device_suspend_method_default
+};
+
+struct kobj_method device_resume_method_default = {
+ &device_resume_desc, (kobjop_t) null_resume
+};
+
+struct kobjop_desc device_resume_desc = {
+ 0, &device_resume_method_default
+};
+
+struct kobj_method device_quiesce_method_default = {
+ &device_quiesce_desc, (kobjop_t) null_quiesce
+};
+
+struct kobjop_desc device_quiesce_desc = {
+ 0, &device_quiesce_method_default
+};
+
diff --git a/rtems/freebsd/local/device_if.h b/rtems/freebsd/local/device_if.h
new file mode 100644
index 00000000..47339eb3
--- /dev/null
+++ b/rtems/freebsd/local/device_if.h
@@ -0,0 +1,340 @@
+/*
+ * This file is produced automatically.
+ * Do not modify anything in here by hand.
+ *
+ * Created from source file
+ * kern/device_if.m
+ * with
+ * makeobjops.awk
+ *
+ * See the source file for legal information
+ */
+
+/**
+ * @defgroup DEVICE device - KObj methods for all device drivers
+ * @brief A basic set of methods required for all device drivers.
+ *
+ * The device interface is used to match devices to drivers during
+ * autoconfiguration and provides methods to allow drivers to handle
+ * system-wide events such as suspend, resume or shutdown.
+ * @{
+ */
+
+#ifndef _device_if_h_
+#define _device_if_h_
+
+/** @brief Unique descriptor for the DEVICE_PROBE() method */
+extern struct kobjop_desc device_probe_desc;
+/** @brief A function implementing the DEVICE_PROBE() method */
+typedef int device_probe_t(device_t dev);
+/**
+ * @brief Probe to see if a device matches a driver.
+ *
+ * Users should not call this method directly. Normally, this
+ * is called via device_probe_and_attach() to select a driver
+ * calling the DEVICE_PROBE() of all candidate drivers and attach
+ * the winning driver (if any) to the device.
+ *
+ * This function is used to match devices to device drivers.
+ * Typically, the driver will examine the device to see if
+ * it is suitable for this driver. This might include checking
+ * the values of various device instance variables or reading
+ * hardware registers.
+ *
+ * In some cases, there may be more than one driver available
+ * which can be used for a device (for instance there might
+ * be a generic driver which works for a set of many types of
+ * device and a more specific driver which works for a subset
+ * of devices). Because of this, a driver should not assume
+ * that it will be the driver that attaches to the device even
+ * if it returns a success status from DEVICE_PROBE(). In particular,
+ * a driver must free any resources which it allocated during
+ * the probe before returning. The return value of DEVICE_PROBE()
+ * is used to elect which driver is used - the driver which returns
+ * the largest non-error value wins the election and attaches to
+ * the device.
+ *
+ * If a driver matches the hardware, it should set the device
+ * description string using device_set_desc() or
+ * device_set_desc_copy(). This string is
+ * used to generate an informative message when DEVICE_ATTACH()
+ * is called.
+ *
+ * As a special case, if a driver returns zero, the driver election
+ * is cut short and that driver will attach to the device
+ * immediately.
+ *
+ * For example, a probe method for a pci device driver might look
+ * like this:
+ *
+ * @code
+ * int foo_probe(device_t dev)
+ * {
+ * if (pci_get_vendor(dev) == FOOVENDOR &&
+ * pci_get_device(dev) == FOODEVICE) {
+ * device_set_desc(dev, "Foo device");
+ * return (0);
+ * }
+ * return (ENXIO);
+ * }
+ * @endcode
+ *
+ * To include this method in a device driver, use a line like this
+ * in the driver's method list:
+ *
+ * @code
+ * KOBJMETHOD(device_probe, foo_probe)
+ * @endcode
+ *
+ * @param dev the device to probe
+ *
+ * @retval 0 if the driver strongly matches this device
+ * @retval negative if the driver can match this device - the
+ * least negative value is used to select the
+ * driver
+ * @retval ENXIO if the driver does not match the device
+ * @retval positive if some kind of error was detected during
+ * the probe, a regular unix error code should
+ * be returned to indicate the type of error
+ * @see DEVICE_ATTACH(), pci_get_vendor(), pci_get_device()
+ */
+
+static __inline int DEVICE_PROBE(device_t dev)
+{
+ kobjop_t _m;
+ KOBJOPLOOKUP(((kobj_t)dev)->ops,device_probe);
+ return ((device_probe_t *) _m)(dev);
+}
+
+/** @brief Unique descriptor for the DEVICE_IDENTIFY() method */
+extern struct kobjop_desc device_identify_desc;
+/** @brief A function implementing the DEVICE_IDENTIFY() method */
+typedef void device_identify_t(driver_t *driver, device_t parent);
+/**
+ * @brief Allow a device driver to detect devices not otherwise enumerated.
+ *
+ * The DEVICE_IDENTIFY() method is used by some drivers (e.g. the ISA
+ * bus driver) to help populate the bus device with a useful set of
+ * child devices, normally by calling the BUS_ADD_CHILD() method of
+ * the parent device. For instance, the ISA bus driver uses several
+ * special drivers, including the isahint driver and the pnp driver to
+ * create child devices based on configuration hints and PnP bus
+ * probes respectively.
+ *
+ * Many bus drivers which support true plug-and-play do not need to
+ * use this method at all since child devices can be discovered
+ * automatically without help from child drivers.
+ *
+ * To include this method in a device driver, use a line like this
+ * in the driver's method list:
+ *
+ * @code
+ * KOBJMETHOD(device_identify, foo_identify)
+ * @endcode
+ *
+ * @param driver the driver whose identify method is being called
+ * @param parent the parent device to use when adding new children
+ */
+
+static __inline void DEVICE_IDENTIFY(driver_t *driver, device_t parent)
+{
+ kobjop_t _m;
+ KOBJOPLOOKUP(driver->ops,device_identify);
+ ((device_identify_t *) _m)(driver, parent);
+}
+
+/** @brief Unique descriptor for the DEVICE_ATTACH() method */
+extern struct kobjop_desc device_attach_desc;
+/** @brief A function implementing the DEVICE_ATTACH() method */
+typedef int device_attach_t(device_t dev);
+/**
+ * @brief Attach a device to a device driver
+ *
+ * Normally only called via device_probe_and_attach(), this is called
+ * when a driver has succeeded in probing against a device.
+ * This method should initialise the hardware and allocate other
+ * system resources (e.g. devfs entries) as required.
+ *
+ * To include this method in a device driver, use a line like this
+ * in the driver's method list:
+ *
+ * @code
+ * KOBJMETHOD(device_attach, foo_attach)
+ * @endcode
+ *
+ * @param dev the device to probe
+ *
+ * @retval 0 success
+ * @retval non-zero if some kind of error was detected during
+ * the attach, a regular unix error code should
+ * be returned to indicate the type of error
+ * @see DEVICE_PROBE()
+ */
+
+static __inline int DEVICE_ATTACH(device_t dev)
+{
+ kobjop_t _m;
+ KOBJOPLOOKUP(((kobj_t)dev)->ops,device_attach);
+ return ((device_attach_t *) _m)(dev);
+}
+
+/** @brief Unique descriptor for the DEVICE_DETACH() method */
+extern struct kobjop_desc device_detach_desc;
+/** @brief A function implementing the DEVICE_DETACH() method */
+typedef int device_detach_t(device_t dev);
+/**
+ * @brief Detach a driver from a device.
+ *
+ * This can be called if the user is replacing the
+ * driver software or if a device is about to be physically removed
+ * from the system (e.g. for removable hardware such as USB or PCCARD).
+ *
+ * To include this method in a device driver, use a line like this
+ * in the driver's method list:
+ *
+ * @code
+ * KOBJMETHOD(device_detach, foo_detach)
+ * @endcode
+ *
+ * @param dev the device to detach
+ *
+ * @retval 0 success
+ * @retval non-zero the detach could not be performed, e.g. if the
+ * driver does not support detaching.
+ *
+ * @see DEVICE_ATTACH()
+ */
+
+static __inline int DEVICE_DETACH(device_t dev)
+{
+ kobjop_t _m;
+ KOBJOPLOOKUP(((kobj_t)dev)->ops,device_detach);
+ return ((device_detach_t *) _m)(dev);
+}
+
+/** @brief Unique descriptor for the DEVICE_SHUTDOWN() method */
+extern struct kobjop_desc device_shutdown_desc;
+/** @brief A function implementing the DEVICE_SHUTDOWN() method */
+typedef int device_shutdown_t(device_t dev);
+/**
+ * @brief Called during system shutdown.
+ *
+ * This method allows drivers to detect when the system is being shut down.
+ * Some drivers need to use this to place their hardware in a consistent
+ * state before rebooting the computer.
+ *
+ * To include this method in a device driver, use a line like this
+ * in the driver's method list:
+ *
+ * @code
+ * KOBJMETHOD(device_shutdown, foo_shutdown)
+ * @endcode
+ */
+
+static __inline int DEVICE_SHUTDOWN(device_t dev)
+{
+ kobjop_t _m;
+ KOBJOPLOOKUP(((kobj_t)dev)->ops,device_shutdown);
+ return ((device_shutdown_t *) _m)(dev);
+}
+
+/** @brief Unique descriptor for the DEVICE_SUSPEND() method */
+extern struct kobjop_desc device_suspend_desc;
+/** @brief A function implementing the DEVICE_SUSPEND() method */
+typedef int device_suspend_t(device_t dev);
+/**
+ * @brief This is called by the power-management subsystem when a
+ * suspend has been requested by the user or by some automatic
+ * mechanism.
+ *
+ * This gives drivers a chance to veto the suspend or save their
+ * configuration before power is removed.
+ *
+ * To include this method in a device driver, use a line like this in
+ * the driver's method list:
+ *
+ * @code
+ * KOBJMETHOD(device_suspend, foo_suspend)
+ * @endcode
+ *
+ * @param dev the device being suspended
+ *
+ * @retval 0 success
+ * @retval non-zero an error occurred while attempting to prepare the
+ * device for suspension
+ *
+ * @see DEVICE_RESUME()
+ */
+
+static __inline int DEVICE_SUSPEND(device_t dev)
+{
+ kobjop_t _m;
+ KOBJOPLOOKUP(((kobj_t)dev)->ops,device_suspend);
+ return ((device_suspend_t *) _m)(dev);
+}
+
+/** @brief Unique descriptor for the DEVICE_RESUME() method */
+extern struct kobjop_desc device_resume_desc;
+/** @brief A function implementing the DEVICE_RESUME() method */
+typedef int device_resume_t(device_t dev);
+/**
+ * @brief This is called when the system resumes after a suspend.
+ *
+ * To include this method in a device driver, use a line like this
+ * in the driver's method list:
+ *
+ * @code
+ * KOBJMETHOD(device_resume, foo_resume)
+ * @endcode
+ *
+ * @param dev the device being resumed
+ *
+ * @retval 0 success
+ * @retval non-zero an error occurred while attempting to restore the
+ * device from suspension
+ *
+ * @see DEVICE_SUSPEND()
+ */
+
+static __inline int DEVICE_RESUME(device_t dev)
+{
+ kobjop_t _m;
+ KOBJOPLOOKUP(((kobj_t)dev)->ops,device_resume);
+ return ((device_resume_t *) _m)(dev);
+}
+
+/** @brief Unique descriptor for the DEVICE_QUIESCE() method */
+extern struct kobjop_desc device_quiesce_desc;
+/** @brief A function implementing the DEVICE_QUIESCE() method */
+typedef int device_quiesce_t(device_t dev);
+/**
+ * @brief This is called when the driver is asked to quiesce itself.
+ *
+ * The driver should arrange for the orderly shutdown of this device.
+ * All further access to the device should be curtailed. Soon there
+ * will be a request to detach, but there won't necessarily be one.
+ *
+ * To include this method in a device driver, use a line like this
+ * in the driver's method list:
+ *
+ * @code
+ * KOBJMETHOD(device_quiesce, foo_quiesce)
+ * @endcode
+ *
+ * @param dev the device being quiesced
+ *
+ * @retval 0 success
+ * @retval non-zero an error occurred while attempting to quiesce the
+ * device
+ *
+ * @see DEVICE_DETACH()
+ */
+
+static __inline int DEVICE_QUIESCE(device_t dev)
+{
+ kobjop_t _m;
+ KOBJOPLOOKUP(((kobj_t)dev)->ops,device_quiesce);
+ return ((device_quiesce_t *) _m)(dev);
+}
+
+#endif /* _device_if_h_ */
diff --git a/rtems/freebsd/local/miibus_if.c b/rtems/freebsd/local/miibus_if.c
new file mode 100644
index 00000000..fcf76f10
--- /dev/null
+++ b/rtems/freebsd/local/miibus_if.c
@@ -0,0 +1,61 @@
+#include <rtems/freebsd/machine/rtems-bsd-config.h>
+
+/*
+ * This file is produced automatically.
+ * Do not modify anything in here by hand.
+ *
+ * Created from source file
+ * dev/mii/miibus_if.m
+ * with
+ * makeobjops.awk
+ *
+ * See the source file for legal information
+ */
+
+#include <rtems/freebsd/sys/param.h>
+#include <rtems/freebsd/sys/queue.h>
+#include <rtems/freebsd/sys/kernel.h>
+#include <rtems/freebsd/sys/kobj.h>
+#include <rtems/freebsd/sys/bus.h>
+#include <rtems/freebsd/local/miibus_if.h>
+
+struct kobj_method miibus_readreg_method_default = {
+ &miibus_readreg_desc, (kobjop_t) kobj_error_method
+};
+
+struct kobjop_desc miibus_readreg_desc = {
+ 0, &miibus_readreg_method_default
+};
+
+struct kobj_method miibus_writereg_method_default = {
+ &miibus_writereg_desc, (kobjop_t) kobj_error_method
+};
+
+struct kobjop_desc miibus_writereg_desc = {
+ 0, &miibus_writereg_method_default
+};
+
+struct kobj_method miibus_statchg_method_default = {
+ &miibus_statchg_desc, (kobjop_t) kobj_error_method
+};
+
+struct kobjop_desc miibus_statchg_desc = {
+ 0, &miibus_statchg_method_default
+};
+
+struct kobj_method miibus_linkchg_method_default = {
+ &miibus_linkchg_desc, (kobjop_t) kobj_error_method
+};
+
+struct kobjop_desc miibus_linkchg_desc = {
+ 0, &miibus_linkchg_method_default
+};
+
+struct kobj_method miibus_mediainit_method_default = {
+ &miibus_mediainit_desc, (kobjop_t) kobj_error_method
+};
+
+struct kobjop_desc miibus_mediainit_desc = {
+ 0, &miibus_mediainit_method_default
+};
+
diff --git a/rtems/freebsd/local/miibus_if.h b/rtems/freebsd/local/miibus_if.h
new file mode 100644
index 00000000..ec35ca19
--- /dev/null
+++ b/rtems/freebsd/local/miibus_if.h
@@ -0,0 +1,77 @@
+/*
+ * This file is produced automatically.
+ * Do not modify anything in here by hand.
+ *
+ * Created from source file
+ * dev/mii/miibus_if.m
+ * with
+ * makeobjops.awk
+ *
+ * See the source file for legal information
+ */
+
+
+#ifndef _miibus_if_h_
+#define _miibus_if_h_
+
+/** @brief Unique descriptor for the MIIBUS_READREG() method */
+extern struct kobjop_desc miibus_readreg_desc;
+/** @brief A function implementing the MIIBUS_READREG() method */
+typedef int miibus_readreg_t(device_t dev, int phy, int reg);
+
+static __inline int MIIBUS_READREG(device_t dev, int phy, int reg)
+{
+ kobjop_t _m;
+ KOBJOPLOOKUP(((kobj_t)dev)->ops,miibus_readreg);
+ return ((miibus_readreg_t *) _m)(dev, phy, reg);
+}
+
+/** @brief Unique descriptor for the MIIBUS_WRITEREG() method */
+extern struct kobjop_desc miibus_writereg_desc;
+/** @brief A function implementing the MIIBUS_WRITEREG() method */
+typedef int miibus_writereg_t(device_t dev, int phy, int reg, int val);
+
+static __inline int MIIBUS_WRITEREG(device_t dev, int phy, int reg, int val)
+{
+ kobjop_t _m;
+ KOBJOPLOOKUP(((kobj_t)dev)->ops,miibus_writereg);
+ return ((miibus_writereg_t *) _m)(dev, phy, reg, val);
+}
+
+/** @brief Unique descriptor for the MIIBUS_STATCHG() method */
+extern struct kobjop_desc miibus_statchg_desc;
+/** @brief A function implementing the MIIBUS_STATCHG() method */
+typedef void miibus_statchg_t(device_t dev);
+
+static __inline void MIIBUS_STATCHG(device_t dev)
+{
+ kobjop_t _m;
+ KOBJOPLOOKUP(((kobj_t)dev)->ops,miibus_statchg);
+ ((miibus_statchg_t *) _m)(dev);
+}
+
+/** @brief Unique descriptor for the MIIBUS_LINKCHG() method */
+extern struct kobjop_desc miibus_linkchg_desc;
+/** @brief A function implementing the MIIBUS_LINKCHG() method */
+typedef void miibus_linkchg_t(device_t dev);
+
+static __inline void MIIBUS_LINKCHG(device_t dev)
+{
+ kobjop_t _m;
+ KOBJOPLOOKUP(((kobj_t)dev)->ops,miibus_linkchg);
+ ((miibus_linkchg_t *) _m)(dev);
+}
+
+/** @brief Unique descriptor for the MIIBUS_MEDIAINIT() method */
+extern struct kobjop_desc miibus_mediainit_desc;
+/** @brief A function implementing the MIIBUS_MEDIAINIT() method */
+typedef void miibus_mediainit_t(device_t dev);
+
+static __inline void MIIBUS_MEDIAINIT(device_t dev)
+{
+ kobjop_t _m;
+ KOBJOPLOOKUP(((kobj_t)dev)->ops,miibus_mediainit);
+ ((miibus_mediainit_t *) _m)(dev);
+}
+
+#endif /* _miibus_if_h_ */
diff --git a/rtems/freebsd/local/miidevs.h b/rtems/freebsd/local/miidevs.h
new file mode 100644
index 00000000..ee8f4327
--- /dev/null
+++ b/rtems/freebsd/local/miidevs.h
@@ -0,0 +1,376 @@
+/* $FreeBSD$ */
+
+/*
+ * THIS FILE AUTOMATICALLY GENERATED. DO NOT EDIT.
+ *
+ * generated from:
+ * FreeBSD
+ */
+/*$NetBSD: miidevs,v 1.6 1999/05/14 11:37:30 drochner Exp $*/
+
+/*-
+ * Copyright (c) 1998, 1999 The NetBSD Foundation, Inc.
+ * All rights reserved.
+ *
+ * This code is derived from software contributed to The NetBSD Foundation
+ * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
+ * NASA Ames Research Center.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
+ * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * List of known MII OUIs.
+ * For a complete list see http://standards.ieee.org/regauth/oui/
+ *
+ * XXX Vendors do obviously not agree how OUIs (18 bit) are mapped
+ * to the 16 bits available in the id registers. The MII_OUI() macro
+ * in "mii.h" reflects the most obvious way. If a vendor uses a
+ * different mapping, an "xx" prefixed OUI is defined here which is
+ * mangled accordingly to compensate.
+ */
+
+#define MII_OUI_AGERE 0x00a0bc /* Agere Systems */
+#define MII_OUI_ALTIMA 0x0010a9 /* Altima Communications */
+#define MII_OUI_AMD 0x00001a /* Advanced Micro Devices */
+#define MII_OUI_ASIX 0x00602e /* Asix Semiconductor */
+#define MII_OUI_ATHEROS 0x001374 /* Atheros Communications */
+#define MII_OUI_BROADCOM 0x001018 /* Broadcom Corporation */
+#define MII_OUI_BROADCOM2 0x000af7 /* Broadcom Corporation */
+#define MII_OUI_CICADA 0x0003F1 /* Cicada Semiconductor */
+#define MII_OUI_DAVICOM 0x00606e /* Davicom Semiconductor */
+#define MII_OUI_ICPLUS 0x0090c3 /* IC Plus Corp. */
+#define MII_OUI_ICS 0x00a0be /* Integrated Circuit Systems */
+#define MII_OUI_INTEL 0x00aa00 /* Intel */
+#define MII_OUI_JATO 0x00e083 /* Jato Technologies */
+#define MII_OUI_JMICRON 0x001b8c /* JMicron Technologies */
+#define MII_OUI_LEVEL1 0x00207b /* Level 1 */
+#define MII_OUI_NATSEMI 0x080017 /* National Semiconductor */
+#define MII_OUI_QUALSEMI 0x006051 /* Quality Semiconductor */
+#define MII_OUI_REALTEK 0x000020 /* RealTek Semicondctor */
+#define MII_OUI_SEEQ 0x00a07d /* Seeq */
+#define MII_OUI_SIS 0x00e006 /* Silicon Integrated Systems */
+#define MII_OUI_SMSC 0x0005be /* SMSC */
+#define MII_OUI_TDK 0x00c039 /* TDK */
+#define MII_OUI_TI 0x080028 /* Texas Instruments */
+#define MII_OUI_VITESSE 0x0001c1 /* Vitesse Semiconductor */
+#define MII_OUI_XAQTI 0x00e0ae /* XaQti Corp. */
+#define MII_OUI_MARVELL 0x005043 /* Marvell Semiconductor */
+#define MII_OUI_xxMARVELL 0x000ac2 /* Marvell Semiconductor */
+
+/* in the 79c873, AMD uses another OUI (which matches Davicom!) */
+#define MII_OUI_xxAMD 0x00606e /* Advanced Micro Devices */
+
+/* Intel 82553 A/B steppings */
+#define MII_OUI_xxINTEL 0x00f800 /* Intel */
+
+/* some vendors have the bits swapped within bytes
+ (ie, ordered as on the wire) */
+#define MII_OUI_xxALTIMA 0x000895 /* Altima Communications */
+#define MII_OUI_xxBROADCOM 0x000818 /* Broadcom Corporation */
+#define MII_OUI_xxBROADCOM_ALT1 0x0050ef /* Broadcom Corporation */
+#define MII_OUI_xxBROADCOM_ALT2 0x00d897 /* Broadcom Corporation */
+#define MII_OUI_xxICS 0x00057d /* Integrated Circuit Systems */
+#define MII_OUI_xxSEEQ 0x0005be /* Seeq */
+#define MII_OUI_xxSIS 0x000760 /* Silicon Integrated Systems */
+#define MII_OUI_xxTI 0x100014 /* Texas Instruments */
+#define MII_OUI_xxXAQTI 0x350700 /* XaQti Corp. */
+
+/* Level 1 is completely different - from right to left.
+ (Two bits get lost in the third OUI byte.) */
+#define MII_OUI_xxLEVEL1 0x1e0400 /* Level 1 */
+
+/* Don't know what's going on here. */
+#define MII_OUI_xxDAVICOM 0x006040 /* Davicom Semiconductor */
+
+/* This is the OUI of the gigE PHY in the RealTek 8169S/8110S/8211B chips */
+#define MII_OUI_xxREALTEK 0x000732 /* */
+
+/*
+ * List of known models. Grouped by oui.
+ */
+
+/* Agere Systems PHYs */
+#define MII_MODEL_AGERE_ET1011 0x0001
+#define MII_STR_AGERE_ET1011 "ET1011 10/100/1000baseT PHY"
+#define MII_MODEL_AGERE_ET1011C 0x0004
+#define MII_STR_AGERE_ET1011C "ET1011C 10/100/1000baseT PHY"
+
+/* Altima Communications PHYs */
+#define MII_MODEL_xxALTIMA_AC101 0x0021
+#define MII_STR_xxALTIMA_AC101 "AC101 10/100 media interface"
+#define MII_MODEL_xxALTIMA_AC101L 0x0012
+#define MII_STR_xxALTIMA_AC101L "AC101L 10/100 media interface"
+#define MII_MODEL_xxALTIMA_ACXXX 0x0001
+#define MII_STR_xxALTIMA_ACXXX "ACXXX 10/100 media interface"
+
+/* Advanced Micro Devices PHYs */
+#define MII_MODEL_AMD_79c973phy 0x0036
+#define MII_STR_AMD_79c973phy "Am79c973 internal PHY"
+#define MII_MODEL_AMD_79c978 0x0039
+#define MII_STR_AMD_79c978 "Am79c978 HomePNA PHY"
+#define MII_MODEL_xxAMD_79C873 0x0000
+#define MII_STR_xxAMD_79C873 "Am79C873/DM9101 10/100 media interface"
+
+/* Asix semiconductor PHYs. */
+#define MII_MODEL_ASIX_AX88X9X 0x0031
+#define MII_STR_ASIX_AX88X9X "Ax88x9x internal PHY"
+
+/* Atheros Communications/Attansic PHYs. */
+#define MII_MODEL_ATHEROS_F1 0x0001
+#define MII_STR_ATHEROS_F1 "Atheros F1 10/100/1000 PHY"
+#define MII_MODEL_ATHEROS_F2 0x0002
+#define MII_STR_ATHEROS_F2 "Atheros F2 10/100 PHY"
+#define MII_MODEL_ATHEROS_F1_7 0x0007
+#define MII_STR_ATHEROS_F1_7 "Atheros F1 10/100/1000 PHY"
+
+/* Broadcom Corp. PHYs. */
+#define MII_MODEL_BROADCOM_3C905B 0x0012
+#define MII_STR_BROADCOM_3C905B "3c905B 10/100 internal PHY"
+#define MII_MODEL_BROADCOM_3C905C 0x0017
+#define MII_STR_BROADCOM_3C905C "3c905C 10/100 internal PHY"
+#define MII_MODEL_BROADCOM_BCM5201 0x0021
+#define MII_STR_BROADCOM_BCM5201 "BCM5201 10/100baseTX PHY"
+#define MII_MODEL_BROADCOM_BCM5214 0x0028
+#define MII_STR_BROADCOM_BCM5214 "BCM5214 Quad 10/100 PHY"
+#define MII_MODEL_BROADCOM_BCM5221 0x001e
+#define MII_STR_BROADCOM_BCM5221 "BCM5221 10/100baseTX PHY"
+#define MII_MODEL_BROADCOM_BCM5222 0x0032
+#define MII_STR_BROADCOM_BCM5222 "BCM5222 Dual 10/100 PHY"
+#define MII_MODEL_BROADCOM_BCM4401 0x0036
+#define MII_STR_BROADCOM_BCM4401 "BCM4401 10/100baseTX PHY"
+#define MII_MODEL_xxBROADCOM_BCM5400 0x0004
+#define MII_STR_xxBROADCOM_BCM5400 "Broadcom 1000baseTX PHY"
+#define MII_MODEL_xxBROADCOM_BCM5401 0x0005
+#define MII_STR_xxBROADCOM_BCM5401 "BCM5401 10/100/1000baseTX PHY"
+#define MII_MODEL_xxBROADCOM_BCM5411 0x0007
+#define MII_STR_xxBROADCOM_BCM5411 "BCM5411 10/100/1000baseTX PHY"
+#define MII_MODEL_xxBROADCOM_BCM5754 0x000e
+#define MII_STR_xxBROADCOM_BCM5754 "BCM5754 10/100/1000baseTX PHY"
+#define MII_MODEL_xxBROADCOM_BCM5752 0x0010
+#define MII_STR_xxBROADCOM_BCM5752 "BCM5752 10/100/1000baseTX PHY"
+#define MII_MODEL_xxBROADCOM_BCM5701 0x0011
+#define MII_STR_xxBROADCOM_BCM5701 "BCM5701 10/100/1000baseTX PHY"
+#define MII_MODEL_xxBROADCOM_BCM5706 0x0015
+#define MII_STR_xxBROADCOM_BCM5706 "BCM5706 10/100/1000baseTX/SX PHY"
+#define MII_MODEL_xxBROADCOM_BCM5703 0x0016
+#define MII_STR_xxBROADCOM_BCM5703 "BCM5703 10/100/1000baseTX PHY"
+#define MII_MODEL_xxBROADCOM_BCM5704 0x0019
+#define MII_STR_xxBROADCOM_BCM5704 "BCM5704 10/100/1000baseTX PHY"
+#define MII_MODEL_xxBROADCOM_BCM5705 0x001a
+#define MII_STR_xxBROADCOM_BCM5705 "BCM5705 10/100/1000baseTX PHY"
+#define MII_MODEL_xxBROADCOM_BCM5750 0x0018
+#define MII_STR_xxBROADCOM_BCM5750 "BCM5750 10/100/1000baseTX PHY"
+#define MII_MODEL_xxBROADCOM_BCM54K2 0x002e
+#define MII_STR_xxBROADCOM_BCM54K2 "BCM54K2 10/100/1000baseTX PHY"
+#define MII_MODEL_xxBROADCOM_BCM5714 0x0034
+#define MII_STR_xxBROADCOM_BCM5714 "BCM5714 10/100/1000baseTX PHY"
+#define MII_MODEL_xxBROADCOM_BCM5780 0x0035
+#define MII_STR_xxBROADCOM_BCM5780 "BCM5780 10/100/1000baseTX PHY"
+#define MII_MODEL_xxBROADCOM_BCM5708C 0x0036
+#define MII_STR_xxBROADCOM_BCM5708C "BCM5708C 10/100/1000baseTX PHY"
+#define MII_MODEL_xxBROADCOM_ALT1_BCM5755 0x000c
+#define MII_STR_xxBROADCOM_ALT1_BCM5755 "BCM5755 10/100/1000baseTX PHY"
+#define MII_MODEL_xxBROADCOM_ALT1_BCM5787 0x000e
+#define MII_STR_xxBROADCOM_ALT1_BCM5787 "BCM5787 10/100/1000baseTX PHY"
+#define MII_MODEL_xxBROADCOM_ALT1_BCM5708S 0x0015
+#define MII_STR_xxBROADCOM_ALT1_BCM5708S "BCM5708S 1000/2500BaseSX PHY"
+#define MII_MODEL_xxBROADCOM_ALT1_BCM5709CAX 0x002c
+#define MII_STR_xxBROADCOM_ALT1_BCM5709CAX "BCM5709C(AX) 10/100/1000baseTX PHY"
+#define MII_MODEL_xxBROADCOM_ALT1_BCM5722 0x002d
+#define MII_STR_xxBROADCOM_ALT1_BCM5722 "BCM5722 10/100/1000baseTX PHY"
+#define MII_MODEL_xxBROADCOM_ALT1_BCM5784 0x003a
+#define MII_STR_xxBROADCOM_ALT1_BCM5784 "BCM5784 10/100/1000baseTX PHY"
+#define MII_MODEL_xxBROADCOM_ALT1_BCM5709C 0x003c
+#define MII_STR_xxBROADCOM_ALT1_BCM5709C "BCM5709C 10/100/1000baseTX PHY"
+#define MII_MODEL_xxBROADCOM_ALT1_BCM5761 0x003d
+#define MII_STR_xxBROADCOM_ALT1_BCM5761 "BCM5761 10/100/1000baseTX PHY"
+#define MII_MODEL_xxBROADCOM_ALT1_BCM5709S 0x003f
+#define MII_STR_xxBROADCOM_ALT1_BCM5709S "BCM5709S 1000/2500baseSX PHY"
+#define MII_MODEL_xxBROADCOM_ALT2_BCM5717C 0x0020
+#define MII_STR_xxBROADCOM_ALT2_BCM5717C "BCM5717C 10/100/1000baseTX PHY"
+#define MII_MODEL_BROADCOM2_BCM5906 0x0004
+#define MII_STR_BROADCOM2_BCM5906 "BCM5906 10/100baseTX PHY"
+
+/* Cicada Semiconductor PHYs (now owned by Vitesse?) */
+#define MII_MODEL_CICADA_CS8201 0x0001
+#define MII_STR_CICADA_CS8201 "Cicada CS8201 10/100/1000TX PHY"
+#define MII_MODEL_CICADA_CS8204 0x0004
+#define MII_STR_CICADA_CS8204 "Cicada CS8204 10/100/1000TX PHY"
+#define MII_MODEL_CICADA_VSC8211 0x000b
+#define MII_STR_CICADA_VSC8211 "Cicada VSC8211 10/100/1000TX PHY"
+#define MII_MODEL_CICADA_CS8201A 0x0020
+#define MII_STR_CICADA_CS8201A "Cicada CS8201 10/100/1000TX PHY"
+#define MII_MODEL_CICADA_CS8201B 0x0021
+#define MII_STR_CICADA_CS8201B "Cicada CS8201 10/100/1000TX PHY"
+#define MII_MODEL_CICADA_CS8244 0x002c
+#define MII_STR_CICADA_CS8244 "Cicada CS8244 10/100/1000TX PHY"
+#define MII_MODEL_VITESSE_VSC8601 0x0002
+#define MII_STR_VITESSE_VSC8601 "Vitesse VSC8601 10/100/1000TX PHY"
+
+/* Davicom Semiconductor PHYs */
+#define MII_MODEL_DAVICOM_DM9102 0x0004
+#define MII_STR_DAVICOM_DM9102 "DM9102 10/100 media interface"
+#define MII_MODEL_xxDAVICOM_DM9101 0x0000
+#define MII_STR_xxDAVICOM_DM9101 "DM9101 10/100 media interface"
+
+/* Integrated Circuit Systems PHYs */
+#define MII_MODEL_xxICS_1889 0x0001
+#define MII_STR_xxICS_1889 "ICS1889 10/100 media interface"
+#define MII_MODEL_xxICS_1890 0x0002
+#define MII_STR_xxICS_1890 "ICS1890 10/100 media interface"
+#define MII_MODEL_xxICS_1892 0x0003
+#define MII_STR_xxICS_1892 "ICS1892 10/100 media interface"
+#define MII_MODEL_xxICS_1893 0x0004
+#define MII_STR_xxICS_1893 "ICS1893 10/100 media interface"
+
+/* IC Plus Corp. PHYs */
+#define MII_MODEL_ICPLUS_IP101 0x0005
+#define MII_STR_ICPLUS_IP101 "IC Plus 10/100 PHY"
+#define MII_MODEL_ICPLUS_IP1000A 0x0008
+#define MII_STR_ICPLUS_IP1000A "IC Plus 10/100/1000 media interface"
+#define MII_MODEL_ICPLUS_IP1001 0x0019
+#define MII_STR_ICPLUS_IP1001 "IC Plus IP1001 10/100/1000 media interface"
+
+/* Intel PHYs */
+#define MII_MODEL_xxINTEL_I82553AB 0x0000
+#define MII_STR_xxINTEL_I82553AB "i83553 10/100 media interface"
+#define MII_MODEL_INTEL_I82555 0x0015
+#define MII_STR_INTEL_I82555 "i82555 10/100 media interface"
+#define MII_MODEL_INTEL_I82562EM 0x0032
+#define MII_STR_INTEL_I82562EM "i82562EM 10/100 media interface"
+#define MII_MODEL_INTEL_I82562ET 0x0033
+#define MII_STR_INTEL_I82562ET "i82562ET 10/100 media interface"
+#define MII_MODEL_INTEL_I82553C 0x0035
+#define MII_STR_INTEL_I82553C "i82553 10/100 media interface"
+
+/* Jato Technologies PHYs */
+#define MII_MODEL_JATO_BASEX 0x0000
+#define MII_STR_JATO_BASEX "Jato 1000baseX media interface"
+
+/* JMicron Technologies PHYs */
+#define MII_MODEL_JMICRON_JMP211 0x0021
+#define MII_STR_JMICRON_JMP211 "JMP211 10/100/1000 media interface"
+#define MII_MODEL_JMICRON_JMP202 0x0022
+#define MII_STR_JMICRON_JMP202 "JMP202 10/100 media interface"
+
+/* Level 1 PHYs */
+#define MII_MODEL_xxLEVEL1_LXT970 0x0000
+#define MII_STR_xxLEVEL1_LXT970 "LXT970 10/100 media interface"
+
+/* National Semiconductor PHYs */
+#define MII_MODEL_NATSEMI_DP83840 0x0000
+#define MII_STR_NATSEMI_DP83840 "DP83840 10/100 media interface"
+#define MII_MODEL_NATSEMI_DP83843 0x0001
+#define MII_STR_NATSEMI_DP83843 "DP83843 10/100 media interface"
+#define MII_MODEL_NATSEMI_DP83815 0x0002
+#define MII_STR_NATSEMI_DP83815 "DP83815 10/100 media interface"
+#define MII_MODEL_NATSEMI_DP83847 0x0003
+#define MII_STR_NATSEMI_DP83847 "DP83847 10/100 media interface"
+#define MII_MODEL_NATSEMI_DP83891 0x0005
+#define MII_STR_NATSEMI_DP83891 "DP83891 10/100/1000 media interface"
+#define MII_MODEL_NATSEMI_DP83861 0x0006
+#define MII_STR_NATSEMI_DP83861 "DP83861 10/100/1000 media interface"
+#define MII_MODEL_NATSEMI_DP83865 0x0007
+#define MII_STR_NATSEMI_DP83865 "DP83865 10/100/1000 media interface"
+
+/* Quality Semiconductor PHYs */
+#define MII_MODEL_QUALSEMI_QS6612 0x0000
+#define MII_STR_QUALSEMI_QS6612 "QS6612 10/100 media interface"
+
+/* RealTek Semiconductor PHYs */
+#define MII_MODEL_REALTEK_RTL8201L 0x0020
+#define MII_STR_REALTEK_RTL8201L "RTL8201L 10/100 media interface"
+#define MII_MODEL_xxREALTEK_RTL8305SC 0x0005
+#define MII_STR_xxREALTEK_RTL8305SC "RTL8305SC 10/100 802.1q switch"
+#define MII_MODEL_xxREALTEK_RTL8169S 0x0011
+#define MII_STR_xxREALTEK_RTL8169S "RTL8169S/8110S/8211B media interface"
+
+/* Seeq PHYs */
+#define MII_MODEL_xxSEEQ_80220 0x0003
+#define MII_STR_xxSEEQ_80220 "Seeq 80220 10/100 media interface"
+#define MII_MODEL_xxSEEQ_84220 0x0004
+#define MII_STR_xxSEEQ_84220 "Seeq 84220 10/100 media interface"
+
+/* Silicon Integrated Systems PHYs */
+#define MII_MODEL_xxSIS_900 0x0000
+#define MII_STR_xxSIS_900 "SiS 900 10/100 media interface"
+
+/* SMSC PHYs */
+#define MII_MODEL_SMSC_LAN83C183 0x0004
+#define MII_STR_SMSC_LAN83C183 "SMSC LAN83C183 10/100 media interface"
+
+/* TDK */
+#define MII_MODEL_TDK_78Q2120 0x0014
+#define MII_STR_TDK_78Q2120 "TDK 78Q2120 media interface"
+
+/* Texas Instruments PHYs */
+#define MII_MODEL_xxTI_TLAN10T 0x0001
+#define MII_STR_xxTI_TLAN10T "ThunderLAN 10baseT media interface"
+#define MII_MODEL_xxTI_100VGPMI 0x0002
+#define MII_STR_xxTI_100VGPMI "ThunderLAN 100VG-AnyLan media interface"
+
+/* XaQti Corp. PHYs. */
+#define MII_MODEL_XAQTI_XMACII 0x0000
+#define MII_STR_XAQTI_XMACII "XaQti Corp. XMAC II gigabit interface"
+
+/* Marvell Semiconductor PHYs */
+#define MII_MODEL_MARVELL_E1000 0x0000
+#define MII_STR_MARVELL_E1000 "Marvell 88E1000 Gigabit PHY"
+#define MII_MODEL_MARVELL_E1011 0x0002
+#define MII_STR_MARVELL_E1011 "Marvell 88E1011 Gigabit PHY"
+#define MII_MODEL_MARVELL_E1000_3 0x0003
+#define MII_STR_MARVELL_E1000_3 "Marvell 88E1000 Gigabit PHY"
+#define MII_MODEL_MARVELL_E1000S 0x0004
+#define MII_STR_MARVELL_E1000S "Marvell 88E1000S Gigabit PHY"
+#define MII_MODEL_MARVELL_E1000_5 0x0005
+#define MII_STR_MARVELL_E1000_5 "Marvell 88E1000 Gigabit PHY"
+#define MII_MODEL_MARVELL_E1101 0x0006
+#define MII_STR_MARVELL_E1101 "Marvell 88E1101 Gigabit PHY"
+#define MII_MODEL_MARVELL_E3082 0x0008
+#define MII_STR_MARVELL_E3082 "Marvell 88E3082 10/100 Fast Ethernet PHY"
+#define MII_MODEL_MARVELL_E1112 0x0009
+#define MII_STR_MARVELL_E1112 "Marvell 88E1112 Gigabit PHY"
+#define MII_MODEL_MARVELL_E1149 0x000b
+#define MII_STR_MARVELL_E1149 "Marvell 88E1149 Gigabit PHY"
+#define MII_MODEL_MARVELL_E1111 0x000c
+#define MII_STR_MARVELL_E1111 "Marvell 88E1111 Gigabit PHY"
+#define MII_MODEL_MARVELL_E1116 0x0021
+#define MII_STR_MARVELL_E1116 "Marvell 88E1116 Gigabit PHY"
+#define MII_MODEL_MARVELL_E1116R 0x0024
+#define MII_STR_MARVELL_E1116R "Marvell 88E1116R Gigabit PHY"
+#define MII_MODEL_MARVELL_E1118 0x0022
+#define MII_STR_MARVELL_E1118 "Marvell 88E1118 Gigabit PHY"
+#define MII_MODEL_MARVELL_E3016 0x0026
+#define MII_STR_MARVELL_E3016 "Marvell 88E3016 10/100 Fast Ethernet PHY"
+#define MII_MODEL_MARVELL_PHYG65G 0x0027
+#define MII_STR_MARVELL_PHYG65G "Marvell PHYG65G Gigabit PHY"
+#define MII_MODEL_xxMARVELL_E1000 0x0005
+#define MII_STR_xxMARVELL_E1000 "Marvell 88E1000 Gigabit PHY"
+#define MII_MODEL_xxMARVELL_E1011 0x0002
+#define MII_STR_xxMARVELL_E1011 "Marvell 88E1011 Gigabit PHY"
+#define MII_MODEL_xxMARVELL_E1000_3 0x0003
+#define MII_STR_xxMARVELL_E1000_3 "Marvell 88E1000 Gigabit PHY"
+#define MII_MODEL_xxMARVELL_E1000_5 0x0005
+#define MII_STR_xxMARVELL_E1000_5 "Marvell 88E1000 Gigabit PHY"
+#define MII_MODEL_xxMARVELL_E1111 0x000c
+#define MII_STR_xxMARVELL_E1111 "Marvell 88E1111 Gigabit PHY"
diff --git a/rtems/freebsd/local/opt_altq.h b/rtems/freebsd/local/opt_altq.h
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/rtems/freebsd/local/opt_altq.h
diff --git a/rtems/freebsd/local/opt_atalk.h b/rtems/freebsd/local/opt_atalk.h
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/rtems/freebsd/local/opt_atalk.h
diff --git a/rtems/freebsd/local/opt_bootp.h b/rtems/freebsd/local/opt_bootp.h
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/rtems/freebsd/local/opt_bootp.h
diff --git a/rtems/freebsd/local/opt_bpf.h b/rtems/freebsd/local/opt_bpf.h
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/rtems/freebsd/local/opt_bpf.h
diff --git a/rtems/freebsd/local/opt_bus.h b/rtems/freebsd/local/opt_bus.h
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/rtems/freebsd/local/opt_bus.h
diff --git a/rtems/freebsd/local/opt_cam.h b/rtems/freebsd/local/opt_cam.h
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/rtems/freebsd/local/opt_cam.h
diff --git a/rtems/freebsd/local/opt_carp.h b/rtems/freebsd/local/opt_carp.h
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/rtems/freebsd/local/opt_carp.h
diff --git a/rtems/freebsd/local/opt_compat.h b/rtems/freebsd/local/opt_compat.h
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/rtems/freebsd/local/opt_compat.h
diff --git a/rtems/freebsd/local/opt_config.h b/rtems/freebsd/local/opt_config.h
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/rtems/freebsd/local/opt_config.h
diff --git a/rtems/freebsd/local/opt_cpu.h b/rtems/freebsd/local/opt_cpu.h
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/rtems/freebsd/local/opt_cpu.h
diff --git a/rtems/freebsd/local/opt_ddb.h b/rtems/freebsd/local/opt_ddb.h
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/rtems/freebsd/local/opt_ddb.h
diff --git a/rtems/freebsd/local/opt_device_polling.h b/rtems/freebsd/local/opt_device_polling.h
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/rtems/freebsd/local/opt_device_polling.h
diff --git a/rtems/freebsd/local/opt_ef.h b/rtems/freebsd/local/opt_ef.h
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/rtems/freebsd/local/opt_ef.h
diff --git a/rtems/freebsd/local/opt_enc.h b/rtems/freebsd/local/opt_enc.h
new file mode 100644
index 00000000..a06aab6c
--- /dev/null
+++ b/rtems/freebsd/local/opt_enc.h
@@ -0,0 +1 @@
+#define DEV_ENC 1
diff --git a/rtems/freebsd/local/opt_hwpmc_hooks.h b/rtems/freebsd/local/opt_hwpmc_hooks.h
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/rtems/freebsd/local/opt_hwpmc_hooks.h
diff --git a/rtems/freebsd/local/opt_inet.h b/rtems/freebsd/local/opt_inet.h
new file mode 100644
index 00000000..fdf70095
--- /dev/null
+++ b/rtems/freebsd/local/opt_inet.h
@@ -0,0 +1 @@
+#define INET 1
diff --git a/rtems/freebsd/local/opt_inet6.h b/rtems/freebsd/local/opt_inet6.h
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/rtems/freebsd/local/opt_inet6.h
diff --git a/rtems/freebsd/local/opt_init_path.h b/rtems/freebsd/local/opt_init_path.h
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/rtems/freebsd/local/opt_init_path.h
diff --git a/rtems/freebsd/local/opt_ipdivert.h b/rtems/freebsd/local/opt_ipdivert.h
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/rtems/freebsd/local/opt_ipdivert.h
diff --git a/rtems/freebsd/local/opt_ipdn.h b/rtems/freebsd/local/opt_ipdn.h
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/rtems/freebsd/local/opt_ipdn.h
diff --git a/rtems/freebsd/local/opt_ipfw.h b/rtems/freebsd/local/opt_ipfw.h
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/rtems/freebsd/local/opt_ipfw.h
diff --git a/rtems/freebsd/local/opt_ipsec.h b/rtems/freebsd/local/opt_ipsec.h
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/rtems/freebsd/local/opt_ipsec.h
diff --git a/rtems/freebsd/local/opt_ipstealth.h b/rtems/freebsd/local/opt_ipstealth.h
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/rtems/freebsd/local/opt_ipstealth.h
diff --git a/rtems/freebsd/local/opt_ipx.h b/rtems/freebsd/local/opt_ipx.h
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/rtems/freebsd/local/opt_ipx.h
diff --git a/rtems/freebsd/local/opt_kdb.h b/rtems/freebsd/local/opt_kdb.h
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/rtems/freebsd/local/opt_kdb.h
diff --git a/rtems/freebsd/local/opt_kdtrace.h b/rtems/freebsd/local/opt_kdtrace.h
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/rtems/freebsd/local/opt_kdtrace.h
diff --git a/rtems/freebsd/local/opt_ktrace.h b/rtems/freebsd/local/opt_ktrace.h
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/rtems/freebsd/local/opt_ktrace.h
diff --git a/rtems/freebsd/local/opt_mbuf_profiling.h b/rtems/freebsd/local/opt_mbuf_profiling.h
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/rtems/freebsd/local/opt_mbuf_profiling.h
diff --git a/rtems/freebsd/local/opt_mbuf_stress_test.h b/rtems/freebsd/local/opt_mbuf_stress_test.h
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/rtems/freebsd/local/opt_mbuf_stress_test.h
diff --git a/rtems/freebsd/local/opt_mpath.h b/rtems/freebsd/local/opt_mpath.h
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/rtems/freebsd/local/opt_mpath.h
diff --git a/rtems/freebsd/local/opt_mrouting.h b/rtems/freebsd/local/opt_mrouting.h
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/rtems/freebsd/local/opt_mrouting.h
diff --git a/rtems/freebsd/local/opt_natm.h b/rtems/freebsd/local/opt_natm.h
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/rtems/freebsd/local/opt_natm.h
diff --git a/rtems/freebsd/local/opt_netgraph.h b/rtems/freebsd/local/opt_netgraph.h
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/rtems/freebsd/local/opt_netgraph.h
diff --git a/rtems/freebsd/local/opt_param.h b/rtems/freebsd/local/opt_param.h
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/rtems/freebsd/local/opt_param.h
diff --git a/rtems/freebsd/local/opt_pf.h b/rtems/freebsd/local/opt_pf.h
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/rtems/freebsd/local/opt_pf.h
diff --git a/rtems/freebsd/local/opt_posix.h b/rtems/freebsd/local/opt_posix.h
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/rtems/freebsd/local/opt_posix.h
diff --git a/rtems/freebsd/local/opt_printf.h b/rtems/freebsd/local/opt_printf.h
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/rtems/freebsd/local/opt_printf.h
diff --git a/rtems/freebsd/local/opt_route.h b/rtems/freebsd/local/opt_route.h
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/rtems/freebsd/local/opt_route.h
diff --git a/rtems/freebsd/local/opt_scsi.h b/rtems/freebsd/local/opt_scsi.h
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/rtems/freebsd/local/opt_scsi.h
diff --git a/rtems/freebsd/local/opt_sctp.h b/rtems/freebsd/local/opt_sctp.h
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/rtems/freebsd/local/opt_sctp.h
diff --git a/rtems/freebsd/local/opt_tcpdebug.h b/rtems/freebsd/local/opt_tcpdebug.h
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/rtems/freebsd/local/opt_tcpdebug.h
diff --git a/rtems/freebsd/local/opt_tdma.h b/rtems/freebsd/local/opt_tdma.h
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/rtems/freebsd/local/opt_tdma.h
diff --git a/rtems/freebsd/local/opt_usb.h b/rtems/freebsd/local/opt_usb.h
new file mode 100644
index 00000000..f9643174
--- /dev/null
+++ b/rtems/freebsd/local/opt_usb.h
@@ -0,0 +1,19 @@
+#define USB_HAVE_CONDVAR 1
+
+#define USB_HAVE_UGEN 0
+
+#define USB_HAVE_BUSDMA 1
+
+#define USB_HAVE_COMPAT_LINUX 0
+
+#define USB_HAVE_USER_IO 0
+
+#define USB_HAVE_MBUF 0
+
+#undef USB_VERBOSE
+
+#define USB_DEBUG 0
+
+#define USB_HAVE_TT_SUPPORT 1
+
+#define USB_HAVE_POWERD 1
diff --git a/rtems/freebsd/local/opt_vlan.h b/rtems/freebsd/local/opt_vlan.h
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/rtems/freebsd/local/opt_vlan.h
diff --git a/rtems/freebsd/local/opt_wlan.h b/rtems/freebsd/local/opt_wlan.h
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/rtems/freebsd/local/opt_wlan.h
diff --git a/rtems/freebsd/local/opt_zero.h b/rtems/freebsd/local/opt_zero.h
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/rtems/freebsd/local/opt_zero.h
diff --git a/rtems/freebsd/local/pmap.h b/rtems/freebsd/local/pmap.h
new file mode 100644
index 00000000..f6d2924c
--- /dev/null
+++ b/rtems/freebsd/local/pmap.h
@@ -0,0 +1,29 @@
+/**
+ * @file
+ *
+ * @ingroup rtems_bsd_machine
+ *
+ * @brief TODO.
+ */
+
+/*
+ * Copyright (c) 2009, 2010 embedded brains GmbH. All rights reserved.
+ *
+ * embedded brains GmbH
+ * Obere Lagerstr. 30
+ * 82178 Puchheim
+ * Germany
+ * <rtems@embedded-brains.de>
+ *
+ * The license and distribution terms for this file may be
+ * found in the file LICENSE in this distribution or at
+ * http://www.rtems.com/license/LICENSE.
+ */
+
+#ifndef _RTEMS_BSD_MACHINE_PMAP_H
+#define _RTEMS_BSD_MACHINE_PMAP_H
+
+struct md_page {
+};
+
+#endif /* !_RTEMS_BSD_MACHINE_PMAP_H */
diff --git a/rtems/freebsd/local/usb_if.c b/rtems/freebsd/local/usb_if.c
new file mode 100644
index 00000000..e0ec0ece
--- /dev/null
+++ b/rtems/freebsd/local/usb_if.c
@@ -0,0 +1,29 @@
+#include <rtems/freebsd/machine/rtems-bsd-config.h>
+
+/*
+ * This file is produced automatically.
+ * Do not modify anything in here by hand.
+ *
+ * Created from source file
+ * dev/usb/usb_if.m
+ * with
+ * makeobjops.awk
+ *
+ * See the source file for legal information
+ */
+
+#include <rtems/freebsd/sys/param.h>
+#include <rtems/freebsd/sys/queue.h>
+#include <rtems/freebsd/sys/kernel.h>
+#include <rtems/freebsd/sys/kobj.h>
+#include <rtems/freebsd/sys/bus.h>
+#include <rtems/freebsd/local/usb_if.h>
+
+struct kobj_method usb_handle_request_method_default = {
+ &usb_handle_request_desc, (kobjop_t) kobj_error_method
+};
+
+struct kobjop_desc usb_handle_request_desc = {
+ 0, &usb_handle_request_method_default
+};
+
diff --git a/rtems/freebsd/local/usb_if.h b/rtems/freebsd/local/usb_if.h
new file mode 100644
index 00000000..6f4a15a3
--- /dev/null
+++ b/rtems/freebsd/local/usb_if.h
@@ -0,0 +1,31 @@
+/*
+ * This file is produced automatically.
+ * Do not modify anything in here by hand.
+ *
+ * Created from source file
+ * dev/usb/usb_if.m
+ * with
+ * makeobjops.awk
+ *
+ * See the source file for legal information
+ */
+
+
+#ifndef _usb_if_h_
+#define _usb_if_h_
+
+/** @brief Unique descriptor for the USB_HANDLE_REQUEST() method */
+extern struct kobjop_desc usb_handle_request_desc;
+/** @brief A function implementing the USB_HANDLE_REQUEST() method */
+typedef int usb_handle_request_t(device_t dev, const void *req,
+ /* pointer to the device request */ void **pptr, /* data pointer */ uint16_t *plen, /* maximum transfer length */ uint16_t offset, /* data offset */ uint8_t *pstate);
+
+static __inline int USB_HANDLE_REQUEST(device_t dev, const void *req,
+ /* pointer to the device request */ void **pptr, /* data pointer */ uint16_t *plen, /* maximum transfer length */ uint16_t offset, /* data offset */ uint8_t *pstate)
+{
+ kobjop_t _m;
+ KOBJOPLOOKUP(((kobj_t)dev)->ops,usb_handle_request);
+ return ((usb_handle_request_t *) _m)(dev, req, pptr, plen, offset, pstate);
+}
+
+#endif /* _usb_if_h_ */
diff --git a/rtems/freebsd/local/usbdevs.h b/rtems/freebsd/local/usbdevs.h
new file mode 100644
index 00000000..79bee891
--- /dev/null
+++ b/rtems/freebsd/local/usbdevs.h
@@ -0,0 +1,3433 @@
+/* ??? */
+
+/*
+ * THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT.
+ *
+ * generated from:
+ * FreeBSD
+ */
+/* $NetBSD: usbdevs,v 1.392 2004/12/29 08:38:44 imp Exp $ */
+
+/*-
+ * Copyright (c) 1998-2004 The NetBSD Foundation, Inc.
+ * All rights reserved.
+ *
+ * This code is derived from software contributed to The NetBSD Foundation
+ * by Lennart Augustsson (lennart@augustsson.net) at
+ * Carlstedt Research & Technology.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the NetBSD
+ * Foundation, Inc. and its contributors.
+ * 4. Neither the name of The NetBSD Foundation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
+ * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * List of known USB vendors
+ *
+ * USB.org publishes a VID list of USB-IF member companies at
+ * http://www.usb.org/developers/tools
+ * Note that it does not show companies that have obtained a Vendor ID
+ * without becoming full members.
+ *
+ * Please note that these IDs do not do anything. Adding an ID here and
+ * regenerating the usbdevs.h and usbdevs_data.h only makes a symbolic name
+ * available to the source code and does not change any functionality, nor
+ * does it make your device available to a specific driver.
+ * It will however make the descriptive string available if a device does not
+ * provide the string itself.
+ *
+ * After adding a vendor ID VNDR and a product ID PRDCT you will have the
+ * following extra defines:
+ * #define USB_VENDOR_VNDR 0x????
+ * #define USB_PRODUCT_VNDR_PRDCT 0x????
+ *
+ * You may have to add these defines to the respective probe routines to
+ * make the device recognised by the appropriate device driver.
+ */
+
+#define USB_VENDOR_UNKNOWN1 0x0053 /* Unknown vendor */
+#define USB_VENDOR_UNKNOWN2 0x0105 /* Unknown vendor */
+#define USB_VENDOR_EGALAX2 0x0123 /* eGalax, Inc. */
+#define USB_VENDOR_CHIPSBANK 0x0204 /* Chipsbank Microelectronics Co. */
+#define USB_VENDOR_HUMAX 0x02ad /* HUMAX */
+#define USB_VENDOR_LTS 0x0386 /* LTS */
+#define USB_VENDOR_BWCT 0x03da /* Bernd Walter Computer Technology */
+#define USB_VENDOR_AOX 0x03e8 /* AOX */
+#define USB_VENDOR_THESYS 0x03e9 /* Thesys */
+#define USB_VENDOR_DATABROADCAST 0x03ea /* Data Broadcasting */
+#define USB_VENDOR_ATMEL 0x03eb /* Atmel */
+#define USB_VENDOR_IWATSU 0x03ec /* Iwatsu America */
+#define USB_VENDOR_MITSUMI 0x03ee /* Mitsumi */
+#define USB_VENDOR_HP 0x03f0 /* Hewlett Packard */
+#define USB_VENDOR_GENOA 0x03f1 /* Genoa */
+#define USB_VENDOR_OAK 0x03f2 /* Oak */
+#define USB_VENDOR_ADAPTEC 0x03f3 /* Adaptec */
+#define USB_VENDOR_DIEBOLD 0x03f4 /* Diebold */
+#define USB_VENDOR_SIEMENSELECTRO 0x03f5 /* Siemens Electromechanical */
+#define USB_VENDOR_EPSONIMAGING 0x03f8 /* Epson Imaging */
+#define USB_VENDOR_KEYTRONIC 0x03f9 /* KeyTronic */
+#define USB_VENDOR_OPTI 0x03fb /* OPTi */
+#define USB_VENDOR_ELITEGROUP 0x03fc /* Elitegroup */
+#define USB_VENDOR_XILINX 0x03fd /* Xilinx */
+#define USB_VENDOR_FARALLON 0x03fe /* Farallon Communications */
+#define USB_VENDOR_NATIONAL 0x0400 /* National Semiconductor */
+#define USB_VENDOR_NATIONALREG 0x0401 /* National Registry */
+#define USB_VENDOR_ACERLABS 0x0402 /* Acer Labs */
+#define USB_VENDOR_FTDI 0x0403 /* Future Technology Devices */
+#define USB_VENDOR_NCR 0x0404 /* NCR */
+#define USB_VENDOR_SYNOPSYS2 0x0405 /* Synopsys */
+#define USB_VENDOR_FUJITSUICL 0x0406 /* Fujitsu-ICL */
+#define USB_VENDOR_FUJITSU2 0x0407 /* Fujitsu Personal Systems */
+#define USB_VENDOR_QUANTA 0x0408 /* Quanta */
+#define USB_VENDOR_NEC 0x0409 /* NEC */
+#define USB_VENDOR_KODAK 0x040a /* Eastman Kodak */
+#define USB_VENDOR_WELTREND 0x040b /* Weltrend */
+#define USB_VENDOR_VIA 0x040d /* VIA */
+#define USB_VENDOR_MCCI 0x040e /* MCCI */
+#define USB_VENDOR_MELCO 0x0411 /* Melco */
+#define USB_VENDOR_LEADTEK 0x0413 /* Leadtek */
+#define USB_VENDOR_WINBOND 0x0416 /* Winbond */
+#define USB_VENDOR_PHOENIX 0x041a /* Phoenix */
+#define USB_VENDOR_CREATIVE 0x041e /* Creative Labs */
+#define USB_VENDOR_NOKIA 0x0421 /* Nokia */
+#define USB_VENDOR_ADI 0x0422 /* ADI Systems */
+#define USB_VENDOR_CATC 0x0423 /* Computer Access Technology */
+#define USB_VENDOR_SMC2 0x0424 /* Standard Microsystems */
+#define USB_VENDOR_MOTOROLA_HK 0x0425 /* Motorola HK */
+#define USB_VENDOR_GRAVIS 0x0428 /* Advanced Gravis Computer */
+#define USB_VENDOR_CIRRUSLOGIC 0x0429 /* Cirrus Logic */
+#define USB_VENDOR_INNOVATIVE 0x042c /* Innovative Semiconductors */
+#define USB_VENDOR_MOLEX 0x042f /* Molex */
+#define USB_VENDOR_SUN 0x0430 /* Sun Microsystems */
+#define USB_VENDOR_UNISYS 0x0432 /* Unisys */
+#define USB_VENDOR_TAUGA 0x0436 /* Taugagreining HF */
+#define USB_VENDOR_AMD 0x0438 /* Advanced Micro Devices */
+#define USB_VENDOR_LEXMARK 0x043d /* Lexmark International */
+#define USB_VENDOR_LG 0x043e /* LG Electronics */
+#define USB_VENDOR_NANAO 0x0440 /* NANAO */
+#define USB_VENDOR_GATEWAY 0x0443 /* Gateway 2000 */
+#define USB_VENDOR_NMB 0x0446 /* NMB */
+#define USB_VENDOR_ALPS 0x044e /* Alps Electric */
+#define USB_VENDOR_THRUST 0x044f /* Thrustmaster */
+#define USB_VENDOR_TI 0x0451 /* Texas Instruments */
+#define USB_VENDOR_ANALOGDEVICES 0x0456 /* Analog Devices */
+#define USB_VENDOR_SIS 0x0457 /* Silicon Integrated Systems Corp. */
+#define USB_VENDOR_KYE 0x0458 /* KYE Systems */
+#define USB_VENDOR_DIAMOND2 0x045a /* Diamond (Supra) */
+#define USB_VENDOR_RENESAS 0x045b /* Renesas */
+#define USB_VENDOR_MICROSOFT 0x045e /* Microsoft */
+#define USB_VENDOR_PRIMAX 0x0461 /* Primax Electronics */
+#define USB_VENDOR_MGE 0x0463 /* MGE UPS Systems */
+#define USB_VENDOR_AMP 0x0464 /* AMP */
+#define USB_VENDOR_CHERRY 0x046a /* Cherry Mikroschalter */
+#define USB_VENDOR_MEGATRENDS 0x046b /* American Megatrends */
+#define USB_VENDOR_LOGITECH 0x046d /* Logitech */
+#define USB_VENDOR_BTC 0x046e /* Behavior Tech. Computer */
+#define USB_VENDOR_PHILIPS 0x0471 /* Philips */
+#define USB_VENDOR_SUN2 0x0472 /* Sun Microsystems (offical) */
+#define USB_VENDOR_SANYO 0x0474 /* Sanyo Electric */
+#define USB_VENDOR_SEAGATE 0x0477 /* Seagate */
+#define USB_VENDOR_CONNECTIX 0x0478 /* Connectix */
+#define USB_VENDOR_SEMTECH 0x047a /* Semtech */
+#define USB_VENDOR_KENSINGTON 0x047d /* Kensington */
+#define USB_VENDOR_LUCENT 0x047e /* Lucent */
+#define USB_VENDOR_PLANTRONICS 0x047f /* Plantronics */
+#define USB_VENDOR_KYOCERA 0x0482 /* Kyocera Wireless Corp. */
+#define USB_VENDOR_STMICRO 0x0483 /* STMicroelectronics */
+#define USB_VENDOR_FOXCONN 0x0489 /* Foxconn */
+#define USB_VENDOR_MEIZU 0x0492 /* Meizu Electronics */
+#define USB_VENDOR_YAMAHA 0x0499 /* YAMAHA */
+#define USB_VENDOR_COMPAQ 0x049f /* Compaq */
+#define USB_VENDOR_HITACHI 0x04a4 /* Hitachi */
+#define USB_VENDOR_ACERP 0x04a5 /* Acer Peripherals */
+#define USB_VENDOR_DAVICOM 0x04a6 /* Davicom */
+#define USB_VENDOR_VISIONEER 0x04a7 /* Visioneer */
+#define USB_VENDOR_CANON 0x04a9 /* Canon */
+#define USB_VENDOR_NIKON 0x04b0 /* Nikon */
+#define USB_VENDOR_PAN 0x04b1 /* Pan International */
+#define USB_VENDOR_IBM 0x04b3 /* IBM */
+#define USB_VENDOR_CYPRESS 0x04b4 /* Cypress Semiconductor */
+#define USB_VENDOR_ROHM 0x04b5 /* ROHM */
+#define USB_VENDOR_COMPAL 0x04b7 /* Compal */
+#define USB_VENDOR_EPSON 0x04b8 /* Seiko Epson */
+#define USB_VENDOR_RAINBOW 0x04b9 /* Rainbow Technologies */
+#define USB_VENDOR_IODATA 0x04bb /* I-O Data */
+#define USB_VENDOR_TDK 0x04bf /* TDK */
+#define USB_VENDOR_3COMUSR 0x04c1 /* U.S. Robotics */
+#define USB_VENDOR_METHODE 0x04c2 /* Methode Electronics Far East */
+#define USB_VENDOR_MAXISWITCH 0x04c3 /* Maxi Switch */
+#define USB_VENDOR_LOCKHEEDMER 0x04c4 /* Lockheed Martin Energy Research */
+#define USB_VENDOR_FUJITSU 0x04c5 /* Fujitsu */
+#define USB_VENDOR_TOSHIBAAM 0x04c6 /* Toshiba America */
+#define USB_VENDOR_MICROMACRO 0x04c7 /* Micro Macro Technologies */
+#define USB_VENDOR_KONICA 0x04c8 /* Konica */
+#define USB_VENDOR_LITEON 0x04ca /* Lite-On Technology */
+#define USB_VENDOR_FUJIPHOTO 0x04cb /* Fuji Photo Film */
+#define USB_VENDOR_PHILIPSSEMI 0x04cc /* Philips Semiconductors */
+#define USB_VENDOR_TATUNG 0x04cd /* Tatung Co. Of America */
+#define USB_VENDOR_SCANLOGIC 0x04ce /* ScanLogic */
+#define USB_VENDOR_MYSON 0x04cf /* Myson Technology */
+#define USB_VENDOR_DIGI2 0x04d0 /* Digi */
+#define USB_VENDOR_ITTCANON 0x04d1 /* ITT Canon */
+#define USB_VENDOR_ALTEC 0x04d2 /* Altec Lansing */
+#define USB_VENDOR_LSI 0x04d4 /* LSI */
+#define USB_VENDOR_MENTORGRAPHICS 0x04d6 /* Mentor Graphics */
+#define USB_VENDOR_ITUNERNET 0x04d8 /* I-Tuner Networks */
+#define USB_VENDOR_HOLTEK 0x04d9 /* Holtek Semiconductor, Inc. */
+#define USB_VENDOR_PANASONIC 0x04da /* Panasonic (Matsushita) */
+#define USB_VENDOR_HUANHSIN 0x04dc /* Huan Hsin */
+#define USB_VENDOR_SHARP 0x04dd /* Sharp */
+#define USB_VENDOR_IIYAMA 0x04e1 /* Iiyama */
+#define USB_VENDOR_SHUTTLE 0x04e6 /* Shuttle Technology */
+#define USB_VENDOR_ELO 0x04e7 /* Elo TouchSystems */
+#define USB_VENDOR_SAMSUNG 0x04e8 /* Samsung Electronics */
+#define USB_VENDOR_NORTHSTAR 0x04eb /* Northstar */
+#define USB_VENDOR_TOKYOELECTRON 0x04ec /* Tokyo Electron */
+#define USB_VENDOR_ANNABOOKS 0x04ed /* Annabooks */
+#define USB_VENDOR_JVC 0x04f1 /* JVC */
+#define USB_VENDOR_CHICONY 0x04f2 /* Chicony Electronics */
+#define USB_VENDOR_ELAN 0x04f3 /* Elan */
+#define USB_VENDOR_NEWNEX 0x04f7 /* Newnex */
+#define USB_VENDOR_BROTHER 0x04f9 /* Brother Industries */
+#define USB_VENDOR_DALLAS 0x04fa /* Dallas Semiconductor */
+#define USB_VENDOR_AIPTEK2 0x04fc /* AIPTEK International */
+#define USB_VENDOR_PFU 0x04fe /* PFU */
+#define USB_VENDOR_FUJIKURA 0x0501 /* Fujikura/DDK */
+#define USB_VENDOR_ACER 0x0502 /* Acer */
+#define USB_VENDOR_3COM 0x0506 /* 3Com */
+#define USB_VENDOR_HOSIDEN 0x0507 /* Hosiden Corporation */
+#define USB_VENDOR_AZTECH 0x0509 /* Aztech Systems */
+#define USB_VENDOR_BELKIN 0x050d /* Belkin Components */
+#define USB_VENDOR_KAWATSU 0x050f /* Kawatsu Semiconductor */
+#define USB_VENDOR_FCI 0x0514 /* FCI */
+#define USB_VENDOR_LONGWELL 0x0516 /* Longwell */
+#define USB_VENDOR_COMPOSITE 0x0518 /* Composite */
+#define USB_VENDOR_STAR 0x0519 /* Star Micronics */
+#define USB_VENDOR_APC 0x051d /* American Power Conversion */
+#define USB_VENDOR_SCIATLANTA 0x051e /* Scientific Atlanta */
+#define USB_VENDOR_TSM 0x0520 /* TSM */
+#define USB_VENDOR_CONNECTEK 0x0522 /* Advanced Connectek USA */
+#define USB_VENDOR_NETCHIP 0x0525 /* NetChip Technology */
+#define USB_VENDOR_ALTRA 0x0527 /* ALTRA */
+#define USB_VENDOR_ATI 0x0528 /* ATI Technologies */
+#define USB_VENDOR_AKS 0x0529 /* Aladdin Knowledge Systems */
+#define USB_VENDOR_TEKOM 0x052b /* Tekom */
+#define USB_VENDOR_CANONDEV 0x052c /* Canon */
+#define USB_VENDOR_WACOMTECH 0x0531 /* Wacom */
+#define USB_VENDOR_INVENTEC 0x0537 /* Inventec */
+#define USB_VENDOR_SHYHSHIUN 0x0539 /* Shyh Shiun Terminals */
+#define USB_VENDOR_PREHWERKE 0x053a /* Preh Werke Gmbh & Co. KG */
+#define USB_VENDOR_SYNOPSYS 0x053f /* Synopsys */
+#define USB_VENDOR_UNIACCESS 0x0540 /* Universal Access */
+#define USB_VENDOR_VIEWSONIC 0x0543 /* ViewSonic */
+#define USB_VENDOR_XIRLINK 0x0545 /* Xirlink */
+#define USB_VENDOR_ANCHOR 0x0547 /* Anchor Chips */
+#define USB_VENDOR_SONY 0x054c /* Sony */
+#define USB_VENDOR_FUJIXEROX 0x0550 /* Fuji Xerox */
+#define USB_VENDOR_VISION 0x0553 /* VLSI Vision */
+#define USB_VENDOR_ASAHIKASEI 0x0556 /* Asahi Kasei Microsystems */
+#define USB_VENDOR_ATEN 0x0557 /* ATEN International */
+#define USB_VENDOR_SAMSUNG2 0x055d /* Samsung Electronics */
+#define USB_VENDOR_MUSTEK 0x055f /* Mustek Systems */
+#define USB_VENDOR_TELEX 0x0562 /* Telex Communications */
+#define USB_VENDOR_CHINON 0x0564 /* Chinon */
+#define USB_VENDOR_PERACOM 0x0565 /* Peracom Networks */
+#define USB_VENDOR_ALCOR2 0x0566 /* Alcor Micro */
+#define USB_VENDOR_XYRATEX 0x0567 /* Xyratex */
+#define USB_VENDOR_WACOM 0x056a /* WACOM */
+#define USB_VENDOR_ETEK 0x056c /* e-TEK Labs */
+#define USB_VENDOR_EIZO 0x056d /* EIZO */
+#define USB_VENDOR_ELECOM 0x056e /* Elecom */
+#define USB_VENDOR_CONEXANT 0x0572 /* Conexant */
+#define USB_VENDOR_HAUPPAUGE 0x0573 /* Hauppauge Computer Works */
+#define USB_VENDOR_BAFO 0x0576 /* BAFO/Quality Computer Accessories */
+#define USB_VENDOR_YEDATA 0x057b /* Y-E Data */
+#define USB_VENDOR_AVM 0x057c /* AVM */
+#define USB_VENDOR_QUICKSHOT 0x057f /* Quickshot */
+#define USB_VENDOR_ROLAND 0x0582 /* Roland */
+#define USB_VENDOR_ROCKFIRE 0x0583 /* Rockfire */
+#define USB_VENDOR_RATOC 0x0584 /* RATOC Systems */
+#define USB_VENDOR_ZYXEL 0x0586 /* ZyXEL Communication */
+#define USB_VENDOR_INFINEON 0x058b /* Infineon */
+#define USB_VENDOR_MICREL 0x058d /* Micrel */
+#define USB_VENDOR_ALCOR 0x058f /* Alcor Micro */
+#define USB_VENDOR_OMRON 0x0590 /* OMRON */
+#define USB_VENDOR_ZORAN 0x0595 /* Zoran Microelectronics */
+#define USB_VENDOR_NIIGATA 0x0598 /* Niigata */
+#define USB_VENDOR_IOMEGA 0x059b /* Iomega */
+#define USB_VENDOR_ATREND 0x059c /* A-Trend Technology */
+#define USB_VENDOR_AID 0x059d /* Advanced Input Devices */
+#define USB_VENDOR_LACIE 0x059f /* LaCie */
+#define USB_VENDOR_FUJIFILM 0x05a2 /* Fuji Film */
+#define USB_VENDOR_ARC 0x05a3 /* ARC */
+#define USB_VENDOR_ORTEK 0x05a4 /* Ortek */
+#define USB_VENDOR_CISCOLINKSYS3 0x05a6 /* Cisco-Linksys */
+#define USB_VENDOR_BOSE 0x05a7 /* Bose */
+#define USB_VENDOR_OMNIVISION 0x05a9 /* OmniVision */
+#define USB_VENDOR_INSYSTEM 0x05ab /* In-System Design */
+#define USB_VENDOR_APPLE 0x05ac /* Apple Computer */
+#define USB_VENDOR_YCCABLE 0x05ad /* Y.C. Cable */
+#define USB_VENDOR_DIGITALPERSONA 0x05ba /* DigitalPersona */
+#define USB_VENDOR_3G 0x05bc /* 3G Green Green Globe */
+#define USB_VENDOR_RAFI 0x05bd /* RAFI */
+#define USB_VENDOR_TYCO 0x05be /* Tyco */
+#define USB_VENDOR_KAWASAKI 0x05c1 /* Kawasaki */
+#define USB_VENDOR_DIGI 0x05c5 /* Digi International */
+#define USB_VENDOR_QUALCOMM2 0x05c6 /* Qualcomm */
+#define USB_VENDOR_QTRONIX 0x05c7 /* Qtronix */
+#define USB_VENDOR_FOXLINK 0x05c8 /* Foxlink */
+#define USB_VENDOR_RICOH 0x05ca /* Ricoh */
+#define USB_VENDOR_ELSA 0x05cc /* ELSA */
+#define USB_VENDOR_SCIWORX 0x05ce /* sci-worx */
+#define USB_VENDOR_BRAINBOXES 0x05d1 /* Brainboxes Limited */
+#define USB_VENDOR_ULTIMA 0x05d8 /* Ultima */
+#define USB_VENDOR_AXIOHM 0x05d9 /* Axiohm Transaction Solutions */
+#define USB_VENDOR_MICROTEK 0x05da /* Microtek */
+#define USB_VENDOR_SUNTAC 0x05db /* SUN Corporation */
+#define USB_VENDOR_LEXAR 0x05dc /* Lexar Media */
+#define USB_VENDOR_ADDTRON 0x05dd /* Addtron */
+#define USB_VENDOR_SYMBOL 0x05e0 /* Symbol Technologies */
+#define USB_VENDOR_SYNTEK 0x05e1 /* Syntek */
+#define USB_VENDOR_GENESYS 0x05e3 /* Genesys Logic */
+#define USB_VENDOR_FUJI 0x05e5 /* Fuji Electric */
+#define USB_VENDOR_KEITHLEY 0x05e6 /* Keithley Instruments */
+#define USB_VENDOR_EIZONANAO 0x05e7 /* EIZO Nanao */
+#define USB_VENDOR_KLSI 0x05e9 /* Kawasaki LSI */
+#define USB_VENDOR_FFC 0x05eb /* FFC */
+#define USB_VENDOR_ANKO 0x05ef /* Anko Electronic */
+#define USB_VENDOR_PIENGINEERING 0x05f3 /* P.I. Engineering */
+#define USB_VENDOR_AOC 0x05f6 /* AOC International */
+#define USB_VENDOR_CHIC 0x05fe /* Chic Technology */
+#define USB_VENDOR_BARCO 0x0600 /* Barco Display Systems */
+#define USB_VENDOR_BRIDGE 0x0607 /* Bridge Information */
+#define USB_VENDOR_SOLIDYEAR 0x060b /* Solid Year */
+#define USB_VENDOR_BIORAD 0x0614 /* Bio-Rad Laboratories */
+#define USB_VENDOR_MACALLY 0x0618 /* Macally */
+#define USB_VENDOR_ACTLABS 0x061c /* Act Labs */
+#define USB_VENDOR_ALARIS 0x0620 /* Alaris */
+#define USB_VENDOR_APEX 0x0624 /* Apex */
+#define USB_VENDOR_CREATIVE3 0x062a /* Creative Labs */
+#define USB_VENDOR_VIVITAR 0x0636 /* Vivitar */
+#define USB_VENDOR_GUNZE 0x0637 /* Gunze Electronics USA */
+#define USB_VENDOR_AVISION 0x0638 /* Avision */
+#define USB_VENDOR_TEAC 0x0644 /* TEAC */
+#define USB_VENDOR_SGI 0x065e /* Silicon Graphics */
+#define USB_VENDOR_SANWASUPPLY 0x0663 /* Sanwa Supply */
+#define USB_VENDOR_MEGATEC 0x0665 /* Megatec */
+#define USB_VENDOR_LINKSYS 0x066b /* Linksys */
+#define USB_VENDOR_ACERSA 0x066e /* Acer Semiconductor America */
+#define USB_VENDOR_SIGMATEL 0x066f /* Sigmatel */
+#define USB_VENDOR_DRAYTEK 0x0675 /* DrayTek */
+#define USB_VENDOR_AIWA 0x0677 /* Aiwa */
+#define USB_VENDOR_ACARD 0x0678 /* ACARD Technology */
+#define USB_VENDOR_PROLIFIC 0x067b /* Prolific Technology */
+#define USB_VENDOR_SIEMENS 0x067c /* Siemens */
+#define USB_VENDOR_AVANCELOGIC 0x0680 /* Avance Logic */
+#define USB_VENDOR_SIEMENS2 0x0681 /* Siemens */
+#define USB_VENDOR_MINOLTA 0x0686 /* Minolta */
+#define USB_VENDOR_CHPRODUCTS 0x068e /* CH Products */
+#define USB_VENDOR_HAGIWARA 0x0693 /* Hagiwara Sys-Com */
+#define USB_VENDOR_CTX 0x0698 /* Chuntex */
+#define USB_VENDOR_ASKEY 0x069a /* Askey Computer */
+#define USB_VENDOR_SAITEK 0x06a3 /* Saitek */
+#define USB_VENDOR_ALCATELT 0x06b9 /* Alcatel Telecom */
+#define USB_VENDOR_AGFA 0x06bd /* AGFA-Gevaert */
+#define USB_VENDOR_ASIAMD 0x06be /* Asia Microelectronic Development */
+#define USB_VENDOR_BIZLINK 0x06c4 /* Bizlink International */
+#define USB_VENDOR_KEYSPAN 0x06cd /* Keyspan / InnoSys Inc. */
+#define USB_VENDOR_AASHIMA 0x06d6 /* Aashima Technology */
+#define USB_VENDOR_LIEBERT 0x06da /* Liebert */
+#define USB_VENDOR_MULTITECH 0x06e0 /* MultiTech */
+#define USB_VENDOR_ADS 0x06e1 /* ADS Technologies */
+#define USB_VENDOR_ALCATELM 0x06e4 /* Alcatel Microelectronics */
+#define USB_VENDOR_SIRIUS 0x06ea /* Sirius Technologies */
+#define USB_VENDOR_GUILLEMOT 0x06f8 /* Guillemot */
+#define USB_VENDOR_BOSTON 0x06fd /* Boston Acoustics */
+#define USB_VENDOR_SMC 0x0707 /* Standard Microsystems */
+#define USB_VENDOR_PUTERCOM 0x0708 /* Putercom */
+#define USB_VENDOR_MCT 0x0711 /* MCT */
+#define USB_VENDOR_IMATION 0x0718 /* Imation */
+#define USB_VENDOR_TECLAST 0x071b /* Teclast */
+#define USB_VENDOR_SONYERICSSON 0x0731 /* Sony Ericsson */
+#define USB_VENDOR_EICON 0x0734 /* Eicon Networks */
+#define USB_VENDOR_SYNTECH 0x0745 /* Syntech Information */
+#define USB_VENDOR_DIGITALSTREAM 0x074e /* Digital Stream */
+#define USB_VENDOR_AUREAL 0x0755 /* Aureal Semiconductor */
+#define USB_VENDOR_MIDIMAN 0x0763 /* Midiman */
+#define USB_VENDOR_CYBERPOWER 0x0764 /* Cyber Power Systems, Inc. */
+#define USB_VENDOR_SURECOM 0x0769 /* Surecom Technology */
+#define USB_VENDOR_HIDGLOBAL 0x076b /* HID Global */
+#define USB_VENDOR_LINKSYS2 0x077b /* Linksys */
+#define USB_VENDOR_GRIFFIN 0x077d /* Griffin Technology */
+#define USB_VENDOR_SANDISK 0x0781 /* SanDisk */
+#define USB_VENDOR_JENOPTIK 0x0784 /* Jenoptik */
+#define USB_VENDOR_LOGITEC 0x0789 /* Logitec */
+#define USB_VENDOR_NOKIA2 0x078b /* Nokia */
+#define USB_VENDOR_BRIMAX 0x078e /* Brimax */
+#define USB_VENDOR_AXIS 0x0792 /* Axis Communications */
+#define USB_VENDOR_ABL 0x0794 /* ABL Electronics */
+#define USB_VENDOR_SAGEM 0x079b /* Sagem */
+#define USB_VENDOR_SUNCOMM 0x079c /* Sun Communications, Inc. */
+#define USB_VENDOR_ALFADATA 0x079d /* Alfadata Computer */
+#define USB_VENDOR_NATIONALTECH 0x07a2 /* National Technical Systems */
+#define USB_VENDOR_ONNTO 0x07a3 /* Onnto */
+#define USB_VENDOR_BE 0x07a4 /* Be */
+#define USB_VENDOR_ADMTEK 0x07a6 /* ADMtek */
+#define USB_VENDOR_COREGA 0x07aa /* Corega */
+#define USB_VENDOR_FREECOM 0x07ab /* Freecom */
+#define USB_VENDOR_MICROTECH 0x07af /* Microtech */
+#define USB_VENDOR_GENERALINSTMNTS 0x07b2 /* General Instruments (Motorola) */
+#define USB_VENDOR_OLYMPUS 0x07b4 /* Olympus */
+#define USB_VENDOR_ABOCOM 0x07b8 /* AboCom Systems */
+#define USB_VENDOR_KEISOKUGIKEN 0x07c1 /* Keisokugiken */
+#define USB_VENDOR_ONSPEC 0x07c4 /* OnSpec */
+#define USB_VENDOR_APG 0x07c5 /* APG Cash Drawer */
+#define USB_VENDOR_BUG 0x07c8 /* B.U.G. */
+#define USB_VENDOR_ALLIEDTELESYN 0x07c9 /* Allied Telesyn International */
+#define USB_VENDOR_AVERMEDIA 0x07ca /* AVerMedia Technologies */
+#define USB_VENDOR_SIIG 0x07cc /* SIIG */
+#define USB_VENDOR_CASIO 0x07cf /* CASIO */
+#define USB_VENDOR_DLINK2 0x07d1 /* D-Link */
+#define USB_VENDOR_APTIO 0x07d2 /* Aptio Products */
+#define USB_VENDOR_ARASAN 0x07da /* Arasan Chip Systems */
+#define USB_VENDOR_ALLIEDCABLE 0x07e6 /* Allied Cable */
+#define USB_VENDOR_STSN 0x07ef /* STSN */
+#define USB_VENDOR_CENTURY 0x07f7 /* Century Corp */
+#define USB_VENDOR_NEWLINK 0x07ff /* NEWlink */
+#define USB_VENDOR_ZOOM 0x0803 /* Zoom Telephonics */
+#define USB_VENDOR_PCS 0x0810 /* Personal Communication Systems */
+#define USB_VENDOR_ALPHASMART 0x081e /* AlphaSmart, Inc. */
+#define USB_VENDOR_BROADLOGIC 0x0827 /* BroadLogic */
+#define USB_VENDOR_HANDSPRING 0x082d /* Handspring */
+#define USB_VENDOR_PALM 0x0830 /* Palm Computing */
+#define USB_VENDOR_SOURCENEXT 0x0833 /* SOURCENEXT */
+#define USB_VENDOR_ACTIONSTAR 0x0835 /* Action Star Enterprise */
+#define USB_VENDOR_SAMSUNG_TECHWIN 0x0839 /* Samsung Techwin */
+#define USB_VENDOR_ACCTON 0x083a /* Accton Technology */
+#define USB_VENDOR_DIAMOND 0x0841 /* Diamond */
+#define USB_VENDOR_NETGEAR 0x0846 /* BayNETGEAR */
+#define USB_VENDOR_TOPRE 0x0853 /* Topre Corporation */
+#define USB_VENDOR_ACTIVEWIRE 0x0854 /* ActiveWire */
+#define USB_VENDOR_BBELECTRONICS 0x0856 /* B&B Electronics */
+#define USB_VENDOR_PORTGEAR 0x085a /* PortGear */
+#define USB_VENDOR_NETGEAR2 0x0864 /* Netgear */
+#define USB_VENDOR_SYSTEMTALKS 0x086e /* System Talks */
+#define USB_VENDOR_METRICOM 0x0870 /* Metricom */
+#define USB_VENDOR_ADESSOKBTEK 0x087c /* ADESSO/Kbtek America */
+#define USB_VENDOR_JATON 0x087d /* Jaton */
+#define USB_VENDOR_APT 0x0880 /* APT Technologies */
+#define USB_VENDOR_BOCARESEARCH 0x0885 /* Boca Research */
+#define USB_VENDOR_ANDREA 0x08a8 /* Andrea Electronics */
+#define USB_VENDOR_BURRBROWN 0x08bb /* Burr-Brown Japan */
+#define USB_VENDOR_2WIRE 0x08c8 /* 2Wire */
+#define USB_VENDOR_AIPTEK 0x08ca /* AIPTEK International */
+#define USB_VENDOR_SMARTBRIDGES 0x08d1 /* SmartBridges */
+#define USB_VENDOR_FUJITSUSIEMENS 0x08d4 /* Fujitsu-Siemens */
+#define USB_VENDOR_BILLIONTON 0x08dd /* Billionton Systems */
+#define USB_VENDOR_GEMALTO 0x08e6 /* Gemalto SA */
+#define USB_VENDOR_EXTENDED 0x08e9 /* Extended Systems */
+#define USB_VENDOR_MSYSTEMS 0x08ec /* M-Systems */
+#define USB_VENDOR_DIGIANSWER 0x08fd /* Digianswer */
+#define USB_VENDOR_AUTHENTEC 0x08ff /* AuthenTec */
+#define USB_VENDOR_AUDIOTECHNICA 0x0909 /* Audio-Technica */
+#define USB_VENDOR_TRUMPION 0x090a /* Trumpion Microelectronics */
+#define USB_VENDOR_FEIYA 0x090c /* Feiya */
+#define USB_VENDOR_ALATION 0x0910 /* Alation Systems */
+#define USB_VENDOR_GLOBESPAN 0x0915 /* Globespan */
+#define USB_VENDOR_CONCORDCAMERA 0x0919 /* Concord Camera */
+#define USB_VENDOR_GARMIN 0x091e /* Garmin International */
+#define USB_VENDOR_GOHUBS 0x0921 /* GoHubs */
+#define USB_VENDOR_XEROX 0x0924 /* Xerox */
+#define USB_VENDOR_BIOMETRIC 0x0929 /* American Biometric Company */
+#define USB_VENDOR_TOSHIBA 0x0930 /* Toshiba */
+#define USB_VENDOR_PLEXTOR 0x093b /* Plextor */
+#define USB_VENDOR_INTREPIDCS 0x093c /* Intrepid */
+#define USB_VENDOR_YANO 0x094f /* Yano */
+#define USB_VENDOR_KINGSTON 0x0951 /* Kingston Technology */
+#define USB_VENDOR_BLUEWATER 0x0956 /* BlueWater Systems */
+#define USB_VENDOR_AGILENT 0x0957 /* Agilent Technologies */
+#define USB_VENDOR_GUDE 0x0959 /* Gude ADS */
+#define USB_VENDOR_PORTSMITH 0x095a /* Portsmith */
+#define USB_VENDOR_ACERW 0x0967 /* Acer */
+#define USB_VENDOR_ADIRONDACK 0x0976 /* Adirondack Wire & Cable */
+#define USB_VENDOR_BECKHOFF 0x0978 /* Beckhoff */
+#define USB_VENDOR_MINDSATWORK 0x097a /* Minds At Work */
+#define USB_VENDOR_POINTCHIPS 0x09a6 /* PointChips */
+#define USB_VENDOR_INTERSIL 0x09aa /* Intersil */
+#define USB_VENDOR_ALTIUS 0x09b3 /* Altius Solutions */
+#define USB_VENDOR_ARRIS 0x09c1 /* Arris Interactive */
+#define USB_VENDOR_ACTIVCARD 0x09c3 /* ACTIVCARD */
+#define USB_VENDOR_ACTISYS 0x09c4 /* ACTiSYS */
+#define USB_VENDOR_NOVATEL2 0x09d7 /* Novatel Wireless */
+#define USB_VENDOR_AFOURTECH 0x09da /* A-FOUR TECH */
+#define USB_VENDOR_AIMEX 0x09dc /* AIMEX */
+#define USB_VENDOR_ADDONICS 0x09df /* Addonics Technologies */
+#define USB_VENDOR_AKAI 0x09e8 /* AKAI professional M.I. */
+#define USB_VENDOR_ARESCOM 0x09f5 /* ARESCOM */
+#define USB_VENDOR_BAY 0x09f9 /* Bay Associates */
+#define USB_VENDOR_ALTERA 0x09fb /* Altera */
+#define USB_VENDOR_CSR 0x0a12 /* Cambridge Silicon Radio */
+#define USB_VENDOR_TREK 0x0a16 /* Trek Technology */
+#define USB_VENDOR_ASAHIOPTICAL 0x0a17 /* Asahi Optical */
+#define USB_VENDOR_BOCASYSTEMS 0x0a43 /* Boca Systems */
+#define USB_VENDOR_SHANTOU 0x0a46 /* ShanTou */
+#define USB_VENDOR_MEDIAGEAR 0x0a48 /* MediaGear */
+#define USB_VENDOR_BROADCOM 0x0a5c /* Broadcom */
+#define USB_VENDOR_GREENHOUSE 0x0a6b /* GREENHOUSE */
+#define USB_VENDOR_GEOCAST 0x0a79 /* Geocast Network Systems */
+#define USB_VENDOR_IDQUANTIQUE 0x0aba /* id Quantique */
+#define USB_VENDOR_ZYDAS 0x0ace /* Zydas Technology Corporation */
+#define USB_VENDOR_NEODIO 0x0aec /* Neodio */
+#define USB_VENDOR_OPTION 0x0af0 /* Option N.V. */
+#define USB_VENDOR_ASUS 0x0b05 /* ASUSTeK Computer */
+#define USB_VENDOR_TODOS 0x0b0c /* Todos Data System */
+#define USB_VENDOR_SIIG2 0x0b39 /* SIIG */
+#define USB_VENDOR_TEKRAM 0x0b3b /* Tekram Technology */
+#define USB_VENDOR_HAL 0x0b41 /* HAL Corporation */
+#define USB_VENDOR_EMS 0x0b43 /* EMS Production */
+#define USB_VENDOR_NEC2 0x0b62 /* NEC */
+#define USB_VENDOR_ADLINK 0x0b63 /* ADLINK Technoligy, Inc. */
+#define USB_VENDOR_ATI2 0x0b6f /* ATI */
+#define USB_VENDOR_ZEEVO 0x0b7a /* Zeevo, Inc. */
+#define USB_VENDOR_KURUSUGAWA 0x0b7e /* Kurusugawa Electronics, Inc. */
+#define USB_VENDOR_SMART 0x0b8c /* Smart Technologies */
+#define USB_VENDOR_ASIX 0x0b95 /* ASIX Electronics */
+#define USB_VENDOR_O2MICRO 0x0b97 /* O2 Micro, Inc. */
+#define USB_VENDOR_USR 0x0baf /* U.S. Robotics */
+#define USB_VENDOR_AMBIT 0x0bb2 /* Ambit Microsystems */
+#define USB_VENDOR_HTC 0x0bb4 /* HTC */
+#define USB_VENDOR_REALTEK 0x0bda /* Realtek */
+#define USB_VENDOR_MEI 0x0bed /* MEI */
+#define USB_VENDOR_ADDONICS2 0x0bf6 /* Addonics Technology */
+#define USB_VENDOR_FSC 0x0bf8 /* Fujitsu Siemens Computers */
+#define USB_VENDOR_AGATE 0x0c08 /* Agate Technologies */
+#define USB_VENDOR_DMI 0x0c0b /* DMI */
+#define USB_VENDOR_CHICONY2 0x0c45 /* Chicony */
+#define USB_VENDOR_REINERSCT 0x0c4b /* Reiner-SCT */
+#define USB_VENDOR_SEALEVEL 0x0c52 /* Sealevel System */
+#define USB_VENDOR_LUWEN 0x0c76 /* Luwen */
+#define USB_VENDOR_KYOCERA2 0x0c88 /* Kyocera Wireless Corp. */
+#define USB_VENDOR_ZCOM 0x0cde /* Z-Com */
+#define USB_VENDOR_ATHEROS2 0x0cf3 /* Atheros Communications */
+#define USB_VENDOR_TANGTOP 0x0d3d /* Tangtop */
+#define USB_VENDOR_SMC3 0x0d5c /* Standard Microsystems */
+#define USB_VENDOR_ADDON 0x0d7d /* Add-on Technology */
+#define USB_VENDOR_ACDC 0x0d7e /* American Computer & Digital Components */
+#define USB_VENDOR_CMEDIA 0x0d8c /* CMEDIA */
+#define USB_VENDOR_CONCEPTRONIC 0x0d8e /* Conceptronic */
+#define USB_VENDOR_SKANHEX 0x0d96 /* Skanhex Technology, Inc. */
+#define USB_VENDOR_MSI 0x0db0 /* Micro Star International */
+#define USB_VENDOR_ELCON 0x0db7 /* ELCON Systemtechnik */
+#define USB_VENDOR_NETAC 0x0dd8 /* Netac */
+#define USB_VENDOR_SITECOMEU 0x0df6 /* Sitecom Europe */
+#define USB_VENDOR_MOBILEACTION 0x0df7 /* Mobile Action */
+#define USB_VENDOR_AMIGO 0x0e0b /* Amigo Technology */
+#define USB_VENDOR_SPEEDDRAGON 0x0e55 /* Speed Dragon Multimedia */
+#define USB_VENDOR_HAWKING 0x0e66 /* Hawking */
+#define USB_VENDOR_FOSSIL 0x0e67 /* Fossil, Inc */
+#define USB_VENDOR_GMATE 0x0e7e /* G.Mate, Inc */
+#define USB_VENDOR_OTI 0x0ea0 /* Ours Technology */
+#define USB_VENDOR_YISO 0x0eab /* Yiso Wireless Co. */
+#define USB_VENDOR_PILOTECH 0x0eaf /* Pilotech */
+#define USB_VENDOR_NOVATECH 0x0eb0 /* NovaTech */
+#define USB_VENDOR_ITEGNO 0x0eba /* iTegno */
+#define USB_VENDOR_WINMAXGROUP 0x0ed1 /* WinMaxGroup */
+#define USB_VENDOR_TOD 0x0ede /* TOD */
+#define USB_VENDOR_EGALAX 0x0eef /* eGalax, Inc. */
+#define USB_VENDOR_AIRPRIME 0x0f3d /* AirPrime, Inc. */
+#define USB_VENDOR_MICROTUNE 0x0f4d /* Microtune */
+#define USB_VENDOR_VTECH 0x0f88 /* VTech */
+#define USB_VENDOR_FALCOM 0x0f94 /* Falcom Wireless Communications GmbH */
+#define USB_VENDOR_RIM 0x0fca /* Research In Motion */
+#define USB_VENDOR_DYNASTREAM 0x0fcf /* Dynastream Innovations */
+#define USB_VENDOR_QUALCOMM 0x1004 /* Qualcomm */
+#define USB_VENDOR_APACER 0x1005 /* Apacer */
+#define USB_VENDOR_MOTOROLA4 0x100d /* Motorola */
+#define USB_VENDOR_AIRPLUS 0x1011 /* Airplus */
+#define USB_VENDOR_DESKNOTE 0x1019 /* Desknote */
+#define USB_VENDOR_GIGABYTE 0x1044 /* GIGABYTE */
+#define USB_VENDOR_WESTERN 0x1058 /* Western Digital */
+#define USB_VENDOR_MOTOROLA 0x1063 /* Motorola */
+#define USB_VENDOR_CCYU 0x1065 /* CCYU Technology */
+#define USB_VENDOR_CURITEL 0x106c /* Curitel Communications Inc */
+#define USB_VENDOR_SILABS2 0x10a6 /* SILABS2 */
+#define USB_VENDOR_USI 0x10ab /* USI */
+#define USB_VENDOR_PLX 0x10b5 /* PLX */
+#define USB_VENDOR_ASANTE 0x10bd /* Asante */
+#define USB_VENDOR_SILABS 0x10c4 /* Silicon Labs */
+#define USB_VENDOR_SILABS3 0x10c5 /* Silicon Labs */
+#define USB_VENDOR_SILABS4 0x10ce /* Silicon Labs */
+#define USB_VENDOR_ACTIONS 0x10d6 /* Actions */
+#define USB_VENDOR_ANALOG 0x1110 /* Analog Devices */
+#define USB_VENDOR_TENX 0x1130 /* Ten X Technology, Inc. */
+#define USB_VENDOR_ISSC 0x1131 /* Integrated System Solution Corp. */
+#define USB_VENDOR_JRC 0x1145 /* Japan Radio Company */
+#define USB_VENDOR_SPHAIRON 0x114b /* Sphairon Access Systems GmbH */
+#define USB_VENDOR_DELORME 0x1163 /* DeLorme */
+#define USB_VENDOR_SERVERWORKS 0x1166 /* ServerWorks */
+#define USB_VENDOR_DLINK3 0x1186 /* Dlink */
+#define USB_VENDOR_ACERCM 0x1189 /* Acer Communications & Multimedia */
+#define USB_VENDOR_SIERRA 0x1199 /* Sierra Wireless */
+#define USB_VENDOR_SANWA 0x11ad /* Sanwa Electric Instrument Co., Ltd. */
+#define USB_VENDOR_TOPFIELD 0x11db /* Topfield Co., Ltd */
+#define USB_VENDOR_SIEMENS3 0x11f5 /* Siemens */
+#define USB_VENDOR_NETINDEX 0x11f6 /* NetIndex */
+#define USB_VENDOR_ALCATEL 0x11f7 /* Alcatel */
+#define USB_VENDOR_UNKNOWN3 0x1233 /* Unknown vendor */
+#define USB_VENDOR_TSUNAMI 0x1241 /* Tsunami */
+#define USB_VENDOR_PHEENET 0x124a /* Pheenet */
+#define USB_VENDOR_TARGUS 0x1267 /* Targus */
+#define USB_VENDOR_TWINMOS 0x126f /* TwinMOS */
+#define USB_VENDOR_TENDA 0x1286 /* Tenda */
+#define USB_VENDOR_CREATIVE2 0x1292 /* Creative Labs */
+#define USB_VENDOR_BELKIN2 0x1293 /* Belkin Components */
+#define USB_VENDOR_CYBERTAN 0x129b /* CyberTAN Technology */
+#define USB_VENDOR_HUAWEI 0x12d1 /* Huawei Technologies */
+#define USB_VENDOR_ARANEUS 0x12d8 /* Araneus Information Systems */
+#define USB_VENDOR_TAPWAVE 0x12ef /* Tapwave */
+#define USB_VENDOR_AINCOMM 0x12fd /* Aincomm */
+#define USB_VENDOR_MOBILITY 0x1342 /* Mobility */
+#define USB_VENDOR_DICKSMITH 0x1371 /* Dick Smith Electronics */
+#define USB_VENDOR_NETGEAR3 0x1385 /* Netgear */
+#define USB_VENDOR_BALTECH 0x13ad /* Baltech */
+#define USB_VENDOR_CISCOLINKSYS 0x13b1 /* Cisco-Linksys */
+#define USB_VENDOR_SHARK 0x13d2 /* Shark */
+#define USB_VENDOR_AZUREWAVE 0x13d3 /* AsureWave */
+#define USB_VENDOR_EMTEC 0x13fe /* Emtec */
+#define USB_VENDOR_NOVATEL 0x1410 /* Novatel Wireless */
+#define USB_VENDOR_MERLIN 0x1416 /* Merlin */
+#define USB_VENDOR_WISTRONNEWEB 0x1435 /* Wistron NeWeb */
+#define USB_VENDOR_RADIOSHACK 0x1453 /* Radio Shack */
+#define USB_VENDOR_HUAWEI3COM 0x1472 /* Huawei-3Com */
+#define USB_VENDOR_ABOCOM2 0x1482 /* AboCom Systems */
+#define USB_VENDOR_SILICOM 0x1485 /* Silicom */
+#define USB_VENDOR_RALINK 0x148f /* Ralink Technology */
+#define USB_VENDOR_IMAGINATION 0x149a /* Imagination Technologies */
+#define USB_VENDOR_CONCEPTRONIC2 0x14b2 /* Conceptronic */
+#define USB_VENDOR_SUPERTOP 0x14cd /* Super Top */
+#define USB_VENDOR_PLANEX3 0x14ea /* Planex Communications */
+#define USB_VENDOR_SILICONPORTALS 0x1527 /* Silicon Portals */
+#define USB_VENDOR_UBIQUAM 0x1529 /* UBIQUAM Co., Ltd. */
+#define USB_VENDOR_JMICRON 0x152d /* JMicron */
+#define USB_VENDOR_UBLOX 0x1546 /* U-blox */
+#define USB_VENDOR_PNY 0x154b /* PNY */
+#define USB_VENDOR_OWEN 0x1555 /* Owen */
+#define USB_VENDOR_OQO 0x1557 /* OQO */
+#define USB_VENDOR_UMEDIA 0x157e /* U-MEDIA Communications */
+#define USB_VENDOR_FIBERLINE 0x1582 /* Fiberline */
+#define USB_VENDOR_SPARKLAN 0x15a9 /* SparkLAN */
+#define USB_VENDOR_AMIT2 0x15c5 /* AMIT */
+#define USB_VENDOR_SOHOWARE 0x15e8 /* SOHOware */
+#define USB_VENDOR_UMAX 0x1606 /* UMAX Data Systems */
+#define USB_VENDOR_INSIDEOUT 0x1608 /* Inside Out Networks */
+#define USB_VENDOR_AMOI 0x1614 /* Amoi Electronics */
+#define USB_VENDOR_GOODWAY 0x1631 /* Good Way Technology */
+#define USB_VENDOR_ENTREGA 0x1645 /* Entrega */
+#define USB_VENDOR_ACTIONTEC 0x1668 /* Actiontec Electronics */
+#define USB_VENDOR_CLIPSAL 0x166a /* Clipsal */
+#define USB_VENDOR_CISCOLINKSYS2 0x167b /* Cisco-Linksys */
+#define USB_VENDOR_ATHEROS 0x168c /* Atheros Communications */
+#define USB_VENDOR_GIGASET 0x1690 /* Gigaset */
+#define USB_VENDOR_GLOBALSUN 0x16ab /* Global Sun Technology */
+#define USB_VENDOR_ANYDATA 0x16d5 /* AnyDATA Corporation */
+#define USB_VENDOR_JABLOTRON 0x16d6 /* Jablotron */
+#define USB_VENDOR_CMOTECH 0x16d8 /* C-motech */
+#define USB_VENDOR_AXESSTEL 0x1726 /* Axesstel Co., Ltd. */
+#define USB_VENDOR_LINKSYS4 0x1737 /* Linksys */
+#define USB_VENDOR_SENAO 0x1740 /* Senao */
+#define USB_VENDOR_ASUS2 0x1761 /* ASUS */
+#define USB_VENDOR_SWEEX2 0x177f /* Sweex */
+#define USB_VENDOR_METAGEEK 0x1781 /* MetaGeek */
+#define USB_VENDOR_WAVESENSE 0x17f4 /* WaveSense */
+#define USB_VENDOR_VAISALA 0x1843 /* Vaisala */
+#define USB_VENDOR_AMIT 0x18c5 /* AMIT */
+#define USB_VENDOR_GOOGLE 0x18d1 /* Google */
+#define USB_VENDOR_QCOM 0x18e8 /* Qcom */
+#define USB_VENDOR_ELV 0x18ef /* ELV */
+#define USB_VENDOR_LINKSYS3 0x1915 /* Linksys */
+#define USB_VENDOR_QUALCOMMINC 0x19d2 /* Qualcomm, Incorporated */
+#define USB_VENDOR_WCH2 0x1a86 /* QinHeng Electronics */
+#define USB_VENDOR_STELERA 0x1a8d /* Stelera Wireless */
+#define USB_VENDOR_MATRIXORBITAL 0x1b3d /* Matrix Orbital */
+#define USB_VENDOR_OVISLINK 0x1b75 /* OvisLink */
+#define USB_VENDOR_TCTMOBILE 0x1bbb /* TCT Mobile */
+#define USB_VENDOR_TELIT 0x1bc7 /* Telit */
+#define USB_VENDOR_LONGCHEER 0x1c9e /* Longcheer Holdings, Ltd. */
+#define USB_VENDOR_MPMAN 0x1cae /* MpMan */
+#define USB_VENDOR_DRESDENELEKTRONIK 0x1cf1 /* dresden elektronik */
+#define USB_VENDOR_NEOTEL 0x1d09 /* Neotel */
+#define USB_VENDOR_PEGATRON 0x1d4d /* Pegatron */
+#define USB_VENDOR_QISDA 0x1da5 /* Qisda */
+#define USB_VENDOR_METAGEEK2 0x1dd5 /* MetaGeek */
+#define USB_VENDOR_ALINK 0x1e0e /* Alink */
+#define USB_VENDOR_AIRTIES 0x1eda /* AirTies */
+#define USB_VENDOR_DLINK 0x2001 /* D-Link */
+#define USB_VENDOR_PLANEX2 0x2019 /* Planex Communications */
+#define USB_VENDOR_HAUPPAUGE2 0x2040 /* Hauppauge Computer Works */
+#define USB_VENDOR_TLAYTECH 0x20b9 /* Tlay Tech */
+#define USB_VENDOR_ENCORE 0x203d /* Encore */
+#define USB_VENDOR_PARA 0x20b8 /* PARA Industrial */
+#define USB_VENDOR_ERICSSON 0x2282 /* Ericsson */
+#define USB_VENDOR_MOTOROLA2 0x22b8 /* Motorola */
+#define USB_VENDOR_TRIPPLITE 0x2478 /* Tripp-Lite */
+#define USB_VENDOR_HIROSE 0x2631 /* Hirose Electric */
+#define USB_VENDOR_NHJ 0x2770 /* NHJ */
+#define USB_VENDOR_PLANEX 0x2c02 /* Planex Communications */
+#define USB_VENDOR_VIDZMEDIA 0x3275 /* VidzMedia Pte Ltd */
+#define USB_VENDOR_AEI 0x3334 /* AEI */
+#define USB_VENDOR_HANK 0x3353 /* Hank Connection */
+#define USB_VENDOR_PQI 0x3538 /* PQI */
+#define USB_VENDOR_DAISY 0x3579 /* Daisy Technology */
+#define USB_VENDOR_NI 0x3923 /* National Instruments */
+#define USB_VENDOR_MICRONET 0x3980 /* Micronet Communications */
+#define USB_VENDOR_IODATA2 0x40bb /* I-O Data */
+#define USB_VENDOR_IRIVER 0x4102 /* iRiver */
+#define USB_VENDOR_DELL 0x413c /* Dell */
+#define USB_VENDOR_WCH 0x4348 /* QinHeng Electronics */
+#define USB_VENDOR_ACEECA 0x4766 /* Aceeca */
+#define USB_VENDOR_AVERATEC 0x50c2 /* Averatec */
+#define USB_VENDOR_SWEEX 0x5173 /* Sweex */
+#define USB_VENDOR_PROLIFIC2 0x5372 /* Prolific Technologies */
+#define USB_VENDOR_ONSPEC2 0x55aa /* OnSpec Electronic Inc. */
+#define USB_VENDOR_ZINWELL 0x5a57 /* Zinwell */
+#define USB_VENDOR_SITECOM 0x6189 /* Sitecom */
+#define USB_VENDOR_ARKMICRO 0x6547 /* Arkmicro Technologies Inc. */
+#define USB_VENDOR_3COM2 0x6891 /* 3Com */
+#define USB_VENDOR_EDIMAX 0x7392 /* Edimax */
+#define USB_VENDOR_INTEL 0x8086 /* Intel */
+#define USB_VENDOR_INTEL2 0x8087 /* Intel */
+#define USB_VENDOR_ALLWIN 0x8516 /* ALLWIN Tech */
+#define USB_VENDOR_SITECOM2 0x9016 /* Sitecom */
+#define USB_VENDOR_MOSCHIP 0x9710 /* MosChip Semiconductor */
+#define USB_VENDOR_MARVELL 0x9e88 /* Marvell Technology Group Ltd. */
+#define USB_VENDOR_3COM3 0xa727 /* 3Com */
+#define USB_VENDOR_DATAAPEX 0xdaae /* DataApex */
+#define USB_VENDOR_HP2 0xf003 /* Hewlett Packard */
+#define USB_VENDOR_USRP 0xfffe /* GNU Radio USRP */
+
+/*
+ * List of known products. Grouped by vendor.
+ */
+
+/* 3Com products */
+#define USB_PRODUCT_3COM_HOMECONN 0x009d /* HomeConnect Camera */
+#define USB_PRODUCT_3COM_3CREB96 0x00a0 /* Bluetooth USB Adapter */
+#define USB_PRODUCT_3COM_3C19250 0x03e8 /* 3C19250 Ethernet Adapter */
+#define USB_PRODUCT_3COM_3CRSHEW696 0x0a01 /* 3CRSHEW696 Wireless Adapter */
+#define USB_PRODUCT_3COM_3C460 0x11f8 /* HomeConnect 3C460 */
+#define USB_PRODUCT_3COM_USR56K 0x3021 /* U.S.Robotics 56000 Voice FaxModem Pro */
+#define USB_PRODUCT_3COM_3C460B 0x4601 /* HomeConnect 3C460B */
+#define USB_PRODUCT_3COM2_3CRUSB10075 0xa727 /* 3CRUSB10075 */
+#define USB_PRODUCT_3COM3_AR5523_1 0x6893 /* AR5523 */
+#define USB_PRODUCT_3COM3_AR5523_2 0x6895 /* AR5523 */
+#define USB_PRODUCT_3COM3_AR5523_3 0x6897 /* AR5523 */
+
+#define USB_PRODUCT_3COMUSR_OFFICECONN 0x0082 /* 3Com OfficeConnect Analog Modem */
+#define USB_PRODUCT_3COMUSR_USRISDN 0x008f /* 3Com U.S. Robotics Pro ISDN TA */
+#define USB_PRODUCT_3COMUSR_HOMECONN 0x009d /* 3Com HomeConnect Camera */
+#define USB_PRODUCT_3COMUSR_USR56K 0x3021 /* U.S. Robotics 56000 Voice FaxModem Pro */
+
+/* AboCom products */
+#define USB_PRODUCT_ABOCOM_XX1 0x110c /* XX1 */
+#define USB_PRODUCT_ABOCOM_XX2 0x200c /* XX2 */
+#define USB_PRODUCT_ABOCOM_RT2770 0x2770 /* RT2770 */
+#define USB_PRODUCT_ABOCOM_RT2870 0x2870 /* RT2870 */
+#define USB_PRODUCT_ABOCOM_RT3070 0x3070 /* RT3070 */
+#define USB_PRODUCT_ABOCOM_RT3071 0x3071 /* RT3071 */
+#define USB_PRODUCT_ABOCOM_RT3072 0x3072 /* RT3072 */
+#define USB_PRODUCT_ABOCOM2_RT2870_1 0x3c09 /* RT2870 */
+#define USB_PRODUCT_ABOCOM_URE450 0x4000 /* URE450 Ethernet Adapter */
+#define USB_PRODUCT_ABOCOM_UFE1000 0x4002 /* UFE1000 Fast Ethernet Adapter */
+#define USB_PRODUCT_ABOCOM_DSB650TX_PNA 0x4003 /* 1/10/100 Ethernet Adapter */
+#define USB_PRODUCT_ABOCOM_XX4 0x4004 /* XX4 */
+#define USB_PRODUCT_ABOCOM_XX5 0x4007 /* XX5 */
+#define USB_PRODUCT_ABOCOM_XX6 0x400b /* XX6 */
+#define USB_PRODUCT_ABOCOM_XX7 0x400c /* XX7 */
+#define USB_PRODUCT_ABOCOM_RTL8151 0x401a /* RTL8151 */
+#define USB_PRODUCT_ABOCOM_XX8 0x4102 /* XX8 */
+#define USB_PRODUCT_ABOCOM_XX9 0x4104 /* XX9 */
+#define USB_PRODUCT_ABOCOM_UF200 0x420a /* UF200 Ethernet */
+#define USB_PRODUCT_ABOCOM_WL54 0x6001 /* WL54 */
+#define USB_PRODUCT_ABOCOM_XX10 0xabc1 /* XX10 */
+#define USB_PRODUCT_ABOCOM_BWU613 0xb000 /* BWU613 */
+#define USB_PRODUCT_ABOCOM_HWU54DM 0xb21b /* HWU54DM */
+#define USB_PRODUCT_ABOCOM_RT2573_2 0xb21c /* RT2573 */
+#define USB_PRODUCT_ABOCOM_RT2573_3 0xb21d /* RT2573 */
+#define USB_PRODUCT_ABOCOM_RT2573_4 0xb21e /* RT2573 */
+#define USB_PRODUCT_ABOCOM_WUG2700 0xb21f /* WUG2700 */
+
+/* Accton products */
+#define USB_PRODUCT_ACCTON_USB320_EC 0x1046 /* USB320-EC Ethernet Adapter */
+#define USB_PRODUCT_ACCTON_2664W 0x3501 /* 2664W */
+#define USB_PRODUCT_ACCTON_111 0x3503 /* T-Sinus 111 Wireless Adapter */
+#define USB_PRODUCT_ACCTON_SMCWUSBG_NF 0x4505 /* SMCWUSB-G (no firmware) */
+#define USB_PRODUCT_ACCTON_SMCWUSBG 0x4506 /* SMCWUSB-G */
+#define USB_PRODUCT_ACCTON_SMCWUSBTG2_NF 0x4507 /* SMCWUSBT-G2 (no firmware) */
+#define USB_PRODUCT_ACCTON_SMCWUSBTG2 0x4508 /* SMCWUSBT-G2 */
+#define USB_PRODUCT_ACCTON_PRISM_GT 0x4521 /* PrismGT USB 2.0 WLAN */
+#define USB_PRODUCT_ACCTON_SS1001 0x5046 /* SpeedStream Ethernet Adapter */
+#define USB_PRODUCT_ACCTON_RT2870_2 0x6618 /* RT2870 */
+#define USB_PRODUCT_ACCTON_RT3070 0x7511 /* RT3070 */
+#define USB_PRODUCT_ACCTON_RT2770 0x7512 /* RT2770 */
+#define USB_PRODUCT_ACCTON_RT2870_3 0x7522 /* RT2870 */
+#define USB_PRODUCT_ACCTON_RT2870_5 0x8522 /* RT2870 */
+#define USB_PRODUCT_ACCTON_RT3070_4 0xa512 /* RT3070 */
+#define USB_PRODUCT_ACCTON_RT2870_4 0xa618 /* RT2870 */
+#define USB_PRODUCT_ACCTON_RT3070_1 0xa701 /* RT3070 */
+#define USB_PRODUCT_ACCTON_RT3070_2 0xa702 /* RT3070 */
+#define USB_PRODUCT_ACCTON_RT2870_1 0xb522 /* RT2870 */
+#define USB_PRODUCT_ACCTON_RT3070_3 0xc522 /* RT3070 */
+#define USB_PRODUCT_ACCTON_RT3070_5 0xd522 /* RT3070 */
+#define USB_PRODUCT_ACCTON_ZD1211B 0xe501 /* ZD1211B */
+
+/* Aceeca products */
+#define USB_PRODUCT_ACEECA_MEZ1000 0x0001 /* MEZ1000 RDA */
+
+/* Acer Communications & Multimedia (oemd by Surecom) */
+#define USB_PRODUCT_ACERCM_EP1427X2 0x0893 /* EP-1427X-2 Ethernet Adapter */
+
+/* Acer Labs products */
+#define USB_PRODUCT_ACERLABS_M5632 0x5632 /* USB 2.0 Data Link */
+
+/* Acer Peripherals, Inc. products */
+#define USB_PRODUCT_ACERP_ACERSCAN_C310U 0x12a6 /* Acerscan C310U */
+#define USB_PRODUCT_ACERP_ACERSCAN_320U 0x2022 /* Acerscan 320U */
+#define USB_PRODUCT_ACERP_ACERSCAN_640U 0x2040 /* Acerscan 640U */
+#define USB_PRODUCT_ACERP_ACERSCAN_620U 0x2060 /* Acerscan 620U */
+#define USB_PRODUCT_ACERP_ACERSCAN_4300U 0x20b0 /* Benq 3300U/4300U */
+#define USB_PRODUCT_ACERP_ACERSCAN_640BT 0x20be /* Acerscan 640BT */
+#define USB_PRODUCT_ACERP_ACERSCAN_1240U 0x20c0 /* Acerscan 1240U */
+#define USB_PRODUCT_ACERP_S81 0x4027 /* BenQ S81 phone */
+#define USB_PRODUCT_ACERP_H10 0x4068 /* AWL400 Wireless Adapter */
+#define USB_PRODUCT_ACERP_ATAPI 0x6003 /* ATA/ATAPI Adapter */
+#define USB_PRODUCT_ACERP_AWL300 0x9000 /* AWL300 Wireless Adapter */
+#define USB_PRODUCT_ACERP_AWL400 0x9001 /* AWL400 Wireless Adapter */
+
+/* Acer Warp products */
+#define USB_PRODUCT_ACERW_WARPLINK 0x0204 /* Warplink */
+
+/* Actions products */
+#define USB_PRODUCT_ACTIONS_MP4 0x1101 /* Actions MP4 Player */
+
+/* Actiontec, Inc. products */
+#define USB_PRODUCT_ACTIONTEC_PRISM_25 0x0408 /* Prism2.5 Wireless Adapter */
+#define USB_PRODUCT_ACTIONTEC_PRISM_25A 0x0421 /* Prism2.5 Wireless Adapter A */
+#define USB_PRODUCT_ACTIONTEC_FREELAN 0x6106 /* ROPEX FreeLan 802.11b */
+#define USB_PRODUCT_ACTIONTEC_UAT1 0x7605 /* UAT1 Wireless Ethernet Adapter */
+
+/* ACTiSYS products */
+#define USB_PRODUCT_ACTISYS_IR2000U 0x0011 /* ACT-IR2000U FIR */
+
+/* ActiveWire, Inc. products */
+#define USB_PRODUCT_ACTIVEWIRE_IOBOARD 0x0100 /* I/O Board */
+#define USB_PRODUCT_ACTIVEWIRE_IOBOARD_FW1 0x0101 /* I/O Board, rev. 1 firmware */
+
+/* Adaptec products */
+#define USB_PRODUCT_ADAPTEC_AWN8020 0x0020 /* AWN-8020 WLAN */
+
+/* Addtron products */
+#define USB_PRODUCT_ADDTRON_AWU120 0xff31 /* AWU-120 */
+
+/* ADLINK Texhnology products */
+#define USB_PRODUCT_ADLINK_ND6530 0x6530 /* ND-6530 USB-Serial */
+
+/* ADMtek products */
+#define USB_PRODUCT_ADMTEK_PEGASUSII_4 0x07c2 /* AN986A Ethernet */
+#define USB_PRODUCT_ADMTEK_PEGASUS 0x0986 /* AN986 Ethernet */
+#define USB_PRODUCT_ADMTEK_PEGASUSII 0x8511 /* AN8511 Ethernet */
+#define USB_PRODUCT_ADMTEK_PEGASUSII_2 0x8513 /* AN8513 Ethernet */
+#define USB_PRODUCT_ADMTEK_PEGASUSII_3 0x8515 /* AN8515 Ethernet */
+
+/* ADDON products */
+/* PNY OEMs these */
+#define USB_PRODUCT_ADDON_ATTACHE 0x1300 /* USB 2.0 Flash Drive */
+#define USB_PRODUCT_ADDON_ATTACHE 0x1300 /* USB 2.0 Flash Drive */
+#define USB_PRODUCT_ADDON_A256MB 0x1400 /* Attache 256MB USB 2.0 Flash Drive */
+#define USB_PRODUCT_ADDON_DISKPRO512 0x1420 /* USB 2.0 Flash Drive (DANE-ELEC zMate 512MB USB flash drive) */
+
+/* Addonics products */
+#define USB_PRODUCT_ADDONICS2_CABLE_205 0xa001 /* Cable 205 */
+
+/* ADS products */
+#define USB_PRODUCT_ADS_UBS10BT 0x0008 /* UBS-10BT Ethernet */
+#define USB_PRODUCT_ADS_UBS10BTX 0x0009 /* UBS-10BT Ethernet */
+
+/* AEI products */
+#define USB_PRODUCT_AEI_FASTETHERNET 0x1701 /* Fast Ethernet */
+
+/* Agate Technologies products */
+#define USB_PRODUCT_AGATE_QDRIVE 0x0378 /* Q-Drive */
+
+/* AGFA products */
+#define USB_PRODUCT_AGFA_SNAPSCAN1212U 0x0001 /* SnapScan 1212U */
+#define USB_PRODUCT_AGFA_SNAPSCAN1236U 0x0002 /* SnapScan 1236U */
+#define USB_PRODUCT_AGFA_SNAPSCANTOUCH 0x0100 /* SnapScan Touch */
+#define USB_PRODUCT_AGFA_SNAPSCAN1212U2 0x2061 /* SnapScan 1212U */
+#define USB_PRODUCT_AGFA_SNAPSCANE40 0x208d /* SnapScan e40 */
+#define USB_PRODUCT_AGFA_SNAPSCANE50 0x208f /* SnapScan e50 */
+#define USB_PRODUCT_AGFA_SNAPSCANE20 0x2091 /* SnapScan e20 */
+#define USB_PRODUCT_AGFA_SNAPSCANE25 0x2095 /* SnapScan e25 */
+#define USB_PRODUCT_AGFA_SNAPSCANE26 0x2097 /* SnapScan e26 */
+#define USB_PRODUCT_AGFA_SNAPSCANE52 0x20fd /* SnapScan e52 */
+
+/* Ain Communication Technology products */
+#define USB_PRODUCT_AINCOMM_AWU2000B 0x1001 /* AWU2000B Wireless Adapter */
+
+/* AIPTEK products */
+#define USB_PRODUCT_AIPTEK_POCKETCAM3M 0x2011 /* PocketCAM 3Mega */
+#define USB_PRODUCT_AIPTEK2_PENCAM_MEGA_1_3 0x504a /* PenCam Mega 1.3 */
+#define USB_PRODUCT_AIPTEK2_SUNPLUS_TECH 0x0c15 /* Sunplus Technology Inc. */
+
+/* AirPlis products */
+#define USB_PRODUCT_AIRPLUS_MCD650 0x3198 /* MCD650 modem */
+
+/* AirPrime products */
+#define USB_PRODUCT_AIRPRIME_PC5220 0x0112 /* CDMA Wireless PC Card */
+
+/* AirTies products */
+#define USB_PRODUCT_AIRTIES_RT3070 0x2310 /* RT3070 */
+
+/* AKS products */
+#define USB_PRODUCT_AKS_USBHASP 0x0001 /* USB-HASP 0.06 */
+
+/* Alcatel products */
+#define USB_PRODUCT_ALCATEL_OT535 0x02df /* One Touch 535/735 */
+
+/* Alcor Micro, Inc. products */
+#define USB_PRODUCT_ALCOR2_KBD_HUB 0x2802 /* Kbd Hub */
+
+#define USB_PRODUCT_ALCOR_SDCR_6335 0x6335 /* SD/MMC Card Reader */
+#define USB_PRODUCT_ALCOR_SDCR_6362 0x6362 /* SD/MMC Card Reader */
+#define USB_PRODUCT_ALCOR_TRANSCEND 0x6387 /* Transcend JetFlash Drive */
+#define USB_PRODUCT_ALCOR_MA_KBD_HUB 0x9213 /* MacAlly Kbd Hub */
+#define USB_PRODUCT_ALCOR_AU9814 0x9215 /* AU9814 Hub */
+#define USB_PRODUCT_ALCOR_UMCR_9361 0x9361 /* USB Multimedia Card Reader */
+#define USB_PRODUCT_ALCOR_SM_KBD 0x9410 /* MicroConnectors/StrongMan Keyboard */
+#define USB_PRODUCT_ALCOR_NEC_KBD_HUB 0x9472 /* NEC Kbd Hub */
+#define USB_PRODUCT_ALCOR_AU9720 0x9720 /* USB2 - RS-232 */
+#define USB_PRODUCT_ALCOR_AU6390 0x6390 /* AU6390 USB-IDE converter */
+
+/* Alink products */
+#define USB_PRODUCT_ALINK_DWM652U5 0xce16 /* DWM-652 */
+#define USB_PRODUCT_ALINK_3G 0x9000 /* 3G modem */
+#define USB_PRODUCT_ALINK_3GU 0x9200 /* 3G modem */
+
+/* Altec Lansing products */
+#define USB_PRODUCT_ALTEC_ADA70 0x0070 /* ADA70 Speakers */
+#define USB_PRODUCT_ALTEC_ASC495 0xff05 /* ASC495 Speakers */
+
+/* Allied Telesyn International products */
+#define USB_PRODUCT_ALLIEDTELESYN_ATUSB100 0xb100 /* AT-USB100 */
+
+/* ALLWIN Tech products */
+#define USB_PRODUCT_ALLWIN_RT2070 0x2070 /* RT2070 */
+#define USB_PRODUCT_ALLWIN_RT2770 0x2770 /* RT2770 */
+#define USB_PRODUCT_ALLWIN_RT2870 0x2870 /* RT2870 */
+#define USB_PRODUCT_ALLWIN_RT3070 0x3070 /* RT3070 */
+#define USB_PRODUCT_ALLWIN_RT3071 0x3071 /* RT3071 */
+#define USB_PRODUCT_ALLWIN_RT3072 0x3072 /* RT3072 */
+#define USB_PRODUCT_ALLWIN_RT3572 0x3572 /* RT3572 */
+
+/* AlphaSmart, Inc. products */
+#define USB_PRODUCT_ALPHASMART_DANA_KB 0xdbac /* AlphaSmart Dana Keyboard */
+#define USB_PRODUCT_ALPHASMART_DANA_SYNC 0xdf00 /* AlphaSmart Dana HotSync */
+
+/* Amoi products */
+#define USB_PRODUCT_AMOI_H01 0x0800 /* H01 3G modem */
+#define USB_PRODUCT_AMOI_H01A 0x7002 /* H01A 3G modem */
+#define USB_PRODUCT_AMOI_H02 0x0802 /* H02 3G modem */
+
+/* American Power Conversion products */
+#define USB_PRODUCT_APC_UPS 0x0002 /* Uninterruptible Power Supply */
+
+/* Ambit Microsystems products */
+#define USB_PRODUCT_AMBIT_WLAN 0x0302 /* WLAN */
+#define USB_PRODUCT_AMBIT_NTL_250 0x6098 /* NTL 250 cable modem */
+
+/* Apacer products */
+#define USB_PRODUCT_APACER_HT202 0xb113 /* USB 2.0 Flash Drive */
+
+/* American Power Conversion products */
+#define USB_PRODUCT_APC_UPS 0x0002 /* Uninterruptible Power Supply */
+
+/* Amigo Technology products */
+#define USB_PRODUCT_AMIGO_RT2870_1 0x9031 /* RT2870 */
+#define USB_PRODUCT_AMIGO_RT2870_2 0x9041 /* RT2870 */
+
+/* AMIT products */
+#define USB_PRODUCT_AMIT_CGWLUSB2GO 0x0002 /* CG-WLUSB2GO */
+#define USB_PRODUCT_AMIT_CGWLUSB2GNR 0x0008 /* CG-WLUSB2GNR */
+#define USB_PRODUCT_AMIT_RT2870_1 0x0012 /* RT2870 */
+
+/* AMIT(2) products */
+#define USB_PRODUCT_AMIT2_RT2870 0x0008 /* RT2870 */
+
+/* Anchor products */
+#define USB_PRODUCT_ANCHOR_SERIAL 0x2008 /* Serial */
+#define USB_PRODUCT_ANCHOR_EZUSB 0x2131 /* EZUSB */
+#define USB_PRODUCT_ANCHOR_EZLINK 0x2720 /* EZLINK */
+
+/* AnyData products */
+#define USB_PRODUCT_ANYDATA_ADU_620UW 0x6202 /* CDMA 2000 EV-DO USB Modem */
+#define USB_PRODUCT_ANYDATA_ADU_E100X 0x6501 /* CDMA 2000 1xRTT/EV-DO USB Modem */
+#define USB_PRODUCT_ANYDATA_ADU_500A 0x6502 /* CDMA 2000 EV-DO USB Modem */
+
+/* AOX, Inc. products */
+#define USB_PRODUCT_AOX_USB101 0x0008 /* Ethernet */
+
+/* American Power Conversion products */
+#define USB_PRODUCT_APC_UPS 0x0002 /* Uninterruptible Power Supply */
+
+/* Apple Computer products */
+#define USB_PRODUCT_APPLE_IMAC_KBD 0x0201 /* USB iMac Keyboard */
+#define USB_PRODUCT_APPLE_KBD 0x0202 /* USB Keyboard M2452 */
+#define USB_PRODUCT_APPLE_EXT_KBD 0x020c /* Apple Extended USB Keyboard */
+#define USB_PRODUCT_APPLE_KBD_TP_ANSI 0x0223 /* Apple Internal Keyboard/Trackpad (Wellspring/ANSI) */
+#define USB_PRODUCT_APPLE_KBD_TP_ISO 0x0224 /* Apple Internal Keyboard/Trackpad (Wellspring/ISO) */
+#define USB_PRODUCT_APPLE_KBD_TP_JIS 0x0225 /* Apple Internal Keyboard/Trackpad (Wellspring/JIS) */
+#define USB_PRODUCT_APPLE_KBD_TP_ANSI2 0x0230 /* Apple Internal Keyboard/Trackpad (Wellspring2/ANSI) */
+#define USB_PRODUCT_APPLE_KBD_TP_ISO2 0x0231 /* Apple Internal Keyboard/Trackpad (Wellspring2/ISO) */
+#define USB_PRODUCT_APPLE_KBD_TP_JIS2 0x0232 /* Apple Internal Keyboard/Trackpad (Wellspring2/JIS) */
+#define USB_PRODUCT_APPLE_MOUSE 0x0301 /* Mouse M4848 */
+#define USB_PRODUCT_APPLE_OPTMOUSE 0x0302 /* Optical mouse */
+#define USB_PRODUCT_APPLE_MIGHTYMOUSE 0x0304 /* Mighty Mouse */
+#define USB_PRODUCT_APPLE_KBD_HUB 0x1001 /* Hub in Apple USB Keyboard */
+#define USB_PRODUCT_APPLE_EXT_KBD_HUB 0x1003 /* Hub in Apple Extended USB Keyboard */
+#define USB_PRODUCT_APPLE_SPEAKERS 0x1101 /* Speakers */
+#define USB_PRODUCT_APPLE_IPOD 0x1201 /* iPod */
+#define USB_PRODUCT_APPLE_IPOD2G 0x1202 /* iPod 2G */
+#define USB_PRODUCT_APPLE_IPOD3G 0x1203 /* iPod 3G */
+#define USB_PRODUCT_APPLE_IPOD_04 0x1204 /* iPod '04' */
+#define USB_PRODUCT_APPLE_IPODMINI 0x1205 /* iPod Mini */
+#define USB_PRODUCT_APPLE_IPOD_06 0x1206 /* iPod '06' */
+#define USB_PRODUCT_APPLE_IPOD_07 0x1207 /* iPod '07' */
+#define USB_PRODUCT_APPLE_IPOD_08 0x1208 /* iPod '08' */
+#define USB_PRODUCT_APPLE_IPODVIDEO 0x1209 /* iPod Video */
+#define USB_PRODUCT_APPLE_IPODNANO 0x120a /* iPod Nano */
+#define USB_PRODUCT_APPLE_IPHONE 0x1290 /* iPhone */
+#define USB_PRODUCT_APPLE_IPOD_TOUCH 0x1291 /* iPod Touch */
+#define USB_PRODUCT_APPLE_IPHONE_3G 0x1292 /* iPhone 3G */
+#define USB_PRODUCT_APPLE_IPHONE_3GS 0x1294 /* iPhone 3GS */
+#define USB_PRODUCT_APPLE_IPHONE_4 0x1297 /* iPhone 4 */
+#define USB_PRODUCT_APPLE_IPAD 0x129a /* iPad */
+#define USB_PRODUCT_APPLE_ETHERNET 0x1402 /* Ethernet A1277 */
+
+/* Arkmicro Technologies */
+#define USB_PRODUCT_ARKMICRO_ARK3116 0x0232 /* ARK3116 Serial */
+
+/* Asahi Optical products */
+#define USB_PRODUCT_ASAHIOPTICAL_OPTIO230 0x0004 /* Digital camera */
+#define USB_PRODUCT_ASAHIOPTICAL_OPTIO330 0x0006 /* Digital camera */
+
+/* Asante products */
+#define USB_PRODUCT_ASANTE_EA 0x1427 /* Ethernet */
+
+/* ASIX Electronics products */
+#define USB_PRODUCT_ASIX_AX88172 0x1720 /* 10/100 Ethernet */
+#define USB_PRODUCT_ASIX_AX88178 0x1780 /* AX88178 */
+#define USB_PRODUCT_ASIX_AX88772 0x7720 /* AX88772 */
+#define USB_PRODUCT_ASIX_AX88772A 0x772a /* AX88772A USB 2.0 10/100 Ethernet */
+
+/* ASUS products */
+#define USB_PRODUCT_ASUS2_USBN11 0x0b05 /* USB-N11 */
+#define USB_PRODUCT_ASUS_WL167G 0x1707 /* WL-167g Wireless Adapter */
+#define USB_PRODUCT_ASUS_WL159G 0x170c /* WL-159g */
+#define USB_PRODUCT_ASUS_A9T_WIFI 0x171b /* A9T wireless */
+#define USB_PRODUCT_ASUS_P5B_WIFI 0x171d /* P5B wireless */
+#define USB_PRODUCT_ASUS_RT2573_1 0x1723 /* RT2573 */
+#define USB_PRODUCT_ASUS_RT2573_2 0x1724 /* RT2573 */
+#define USB_PRODUCT_ASUS_LCM 0x1726 /* LCM display */
+#define USB_PRODUCT_ASUS_RT2870_1 0x1731 /* RT2870 */
+#define USB_PRODUCT_ASUS_RT2870_2 0x1732 /* RT2870 */
+#define USB_PRODUCT_ASUS_RT2870_3 0x1742 /* RT2870 */
+#define USB_PRODUCT_ASUS_RT2870_4 0x1760 /* RT2870 */
+#define USB_PRODUCT_ASUS_RT2870_5 0x1761 /* RT2870 */
+#define USB_PRODUCT_ASUS_USBN13 0x1784 /* USB-N13 */
+#define USB_PRODUCT_ASUS_RT3070_1 0x1790 /* RT3070 */
+#define USB_PRODUCT_ASUS_A730W 0x4202 /* ASUS MyPal A730W */
+#define USB_PRODUCT_ASUS_P535 0x420f /* ASUS P535 PDA */
+#define USB_PRODUCT_ASUS_GMSC 0x422f /* ASUS Generic Mass Storage */
+#define USB_PRODUCT_ASUS_RT2570 0x1706 /* RT2500USB Wireless Adapter */
+
+/* ATen products */
+#define USB_PRODUCT_ATEN_UC1284 0x2001 /* Parallel printer */
+#define USB_PRODUCT_ATEN_UC10T 0x2002 /* 10Mbps Ethernet */
+#define USB_PRODUCT_ATEN_UC110T 0x2007 /* UC-110T Ethernet */
+#define USB_PRODUCT_ATEN_UC232A 0x2008 /* Serial */
+#define USB_PRODUCT_ATEN_UC210T 0x2009 /* UC-210T Ethernet */
+#define USB_PRODUCT_ATEN_DSB650C 0x4000 /* DSB-650C */
+
+/* Atheros Communications products */
+#define USB_PRODUCT_ATHEROS_AR5523 0x0001 /* AR5523 */
+#define USB_PRODUCT_ATHEROS_AR5523_NF 0x0002 /* AR5523 (no firmware) */
+#define USB_PRODUCT_ATHEROS2_AR5523_1 0x0001 /* AR5523 */
+#define USB_PRODUCT_ATHEROS2_AR5523_1_NF 0x0002 /* AR5523 (no firmware) */
+#define USB_PRODUCT_ATHEROS2_AR5523_2 0x0003 /* AR5523 */
+#define USB_PRODUCT_ATHEROS2_AR5523_2_NF 0x0004 /* AR5523 (no firmware) */
+#define USB_PRODUCT_ATHEROS2_AR5523_3 0x0005 /* AR5523 */
+#define USB_PRODUCT_ATHEROS2_AR5523_3_NF 0x0006 /* AR5523 (no firmware) */
+
+/* Atmel Comp. products */
+#define USB_PRODUCT_ATMEL_STK541 0x2109 /* Zigbee Controller */
+#define USB_PRODUCT_ATMEL_UHB124 0x3301 /* UHB124 hub */
+#define USB_PRODUCT_ATMEL_DWL120 0x7603 /* DWL-120 Wireless Adapter */
+#define USB_PRODUCT_ATMEL_BW002 0x7605 /* BW002 Wireless Adapter */
+#define USB_PRODUCT_ATMEL_WL1130USB 0x7613 /* WL-1130 USB */
+#define USB_PRODUCT_ATMEL_AT76C505A 0x7614 /* AT76c505a Wireless Adapter */
+
+/* AuthenTec products */
+#define USB_PRODUCT_AUTHENTEC_AES1610 0x1600 /* AES1610 Fingerprint Sensor */
+
+/* Avision products */
+#define USB_PRODUCT_AVISION_1200U 0x0268 /* 1200U scanner */
+
+/* Axesstel products */
+#define USB_PRODUCT_AXESSTEL_DATAMODEM 0x1000 /* Data Modem */
+
+/* AsureWave products */
+#define USB_PRODUCT_AZUREWAVE_RT2870_1 0x3247 /* RT2870 */
+#define USB_PRODUCT_AZUREWAVE_RT2870_2 0x3262 /* RT2870 */
+#define USB_PRODUCT_AZUREWAVE_RT3070_1 0x3273 /* RT3070 */
+#define USB_PRODUCT_AZUREWAVE_RT3070_2 0x3284 /* RT3070 */
+#define USB_PRODUCT_AZUREWAVE_RT3070_3 0x3305 /* RT3070 */
+
+/* Baltech products */
+#define USB_PRODUCT_BALTECH_CARDREADER 0x9999 /* Card reader */
+
+/* B&B Electronics products */
+#define USB_PRODUCT_BBELECTRONICS_USOTL4 0xAC01 /* RS-422/485 */
+
+/* Belkin products */
+/*product BELKIN F5U111 0x???? F5U111 Ethernet*/
+#define USB_PRODUCT_BELKIN_F5D6050 0x0050 /* F5D6050 802.11b Wireless Adapter */
+#define USB_PRODUCT_BELKIN_FBT001V 0x0081 /* FBT001v2 Bluetooth */
+#define USB_PRODUCT_BELKIN_FBT003V 0x0084 /* FBT003v2 Bluetooth */
+#define USB_PRODUCT_BELKIN_F5U103 0x0103 /* F5U103 Serial */
+#define USB_PRODUCT_BELKIN_F5U109 0x0109 /* F5U109 Serial */
+#define USB_PRODUCT_BELKIN_USB2SCSI 0x0115 /* USB to SCSI */
+#define USB_PRODUCT_BELKIN_F8T012 0x0121 /* F8T012xx1 Bluetooth USB Adapter */
+#define USB_PRODUCT_BELKIN_USB2LAN 0x0121 /* USB to LAN */
+#define USB_PRODUCT_BELKIN_F5U208 0x0208 /* F5U208 VideoBus II */
+#define USB_PRODUCT_BELKIN_F5U237 0x0237 /* F5U237 USB 2.0 7-Port Hub */
+#define USB_PRODUCT_BELKIN_F5U257 0x0257 /* F5U257 Serial */
+#define USB_PRODUCT_BELKIN_F5U409 0x0409 /* F5U409 Serial */
+#define USB_PRODUCT_BELKIN_F6C550AVR 0x0551 /* F6C550-AVR UPS */
+#define USB_PRODUCT_BELKIN_F5U120 0x1203 /* F5U120-PC Hub */
+#define USB_PRODUCT_BELKIN_ZD1211B 0x4050 /* ZD1211B */
+#define USB_PRODUCT_BELKIN_F5D5055 0x5055 /* F5D5055 */
+#define USB_PRODUCT_BELKIN_F5D7050 0x7050 /* F5D7050 Wireless Adapter */
+#define USB_PRODUCT_BELKIN_F5D7051 0x7051 /* F5D7051 54g USB Network Adapter */
+#define USB_PRODUCT_BELKIN_F5D7050A 0x705a /* F5D7050A Wireless Adapter */
+/* Also sold as 'Ativa 802.11g wireless card' */
+#define USB_PRODUCT_BELKIN_F5D7050_V4000 0x705c /* F5D7050 v4000 Wireless Adapter */
+#define USB_PRODUCT_BELKIN_F5D7050E 0x705e /* F5D7050E Wireless Adapter */
+#define USB_PRODUCT_BELKIN_RT2870_1 0x8053 /* RT2870 */
+#define USB_PRODUCT_BELKIN_RT2870_2 0x805c /* RT2870 */
+#define USB_PRODUCT_BELKIN_F5D8053V3 0x815c /* F5D8053 v3 */
+#define USB_PRODUCT_BELKIN_F5D8055 0x825a /* F5D8055 */
+#define USB_PRODUCT_BELKIN_F5D9050V3 0x905b /* F5D9050 ver 3 Wireless Adapter */
+#define USB_PRODUCT_BELKIN2_F5U002 0x0002 /* F5U002 Parallel printer */
+#define USB_PRODUCT_BELKIN_F6D4050V1 0x935a /* F6D4050 v1 */
+
+/* Billionton products */
+#define USB_PRODUCT_BILLIONTON_USB100 0x0986 /* USB100N 10/100 FastEthernet */
+#define USB_PRODUCT_BILLIONTON_USBLP100 0x0987 /* USB100LP */
+#define USB_PRODUCT_BILLIONTON_USBEL100 0x0988 /* USB100EL */
+#define USB_PRODUCT_BILLIONTON_USBE100 0x8511 /* USBE100 */
+#define USB_PRODUCT_BILLIONTON_USB2AR 0x90ff /* USB2AR Ethernet */
+
+/* Broadcom products */
+#define USB_PRODUCT_BROADCOM_BCM2033 0x2033 /* BCM2033 Bluetooth USB dongle */
+
+/* Brother Industries products */
+#define USB_PRODUCT_BROTHER_HL1050 0x0002 /* HL-1050 laser printer */
+#define USB_PRODUCT_BROTHER_MFC8600_9650 0x0100 /* MFC8600/9650 multifunction device */
+
+/* Behavior Technology Computer products */
+#define USB_PRODUCT_BTC_BTC6100 0x5550 /* 6100C Keyboard */
+#define USB_PRODUCT_BTC_BTC7932 0x6782 /* Keyboard with mouse port */
+
+/* Canon, Inc. products */
+#define USB_PRODUCT_CANON_N656U 0x2206 /* CanoScan N656U */
+#define USB_PRODUCT_CANON_N1220U 0x2207 /* CanoScan N1220U */
+#define USB_PRODUCT_CANON_D660U 0x2208 /* CanoScan D660U */
+#define USB_PRODUCT_CANON_N676U 0x220d /* CanoScan N676U */
+#define USB_PRODUCT_CANON_N1240U 0x220e /* CanoScan N1240U */
+#define USB_PRODUCT_CANON_LIDE25 0x2220 /* CanoScan LIDE 25 */
+#define USB_PRODUCT_CANON_S10 0x3041 /* PowerShot S10 */
+#define USB_PRODUCT_CANON_S100 0x3045 /* PowerShot S100 */
+#define USB_PRODUCT_CANON_S200 0x3065 /* PowerShot S200 */
+#define USB_PRODUCT_CANON_REBELXT 0x30ef /* Digital Rebel XT */
+
+/* CATC products */
+#define USB_PRODUCT_CATC_NETMATE 0x000a /* Netmate Ethernet */
+#define USB_PRODUCT_CATC_NETMATE2 0x000c /* Netmate2 Ethernet */
+#define USB_PRODUCT_CATC_CHIEF 0x000d /* USB Chief Bus & Protocol Analyzer */
+#define USB_PRODUCT_CATC_ANDROMEDA 0x1237 /* Andromeda hub */
+
+/* CASIO products */
+#define USB_PRODUCT_CASIO_QV_DIGICAM 0x1001 /* QV DigiCam */
+#define USB_PRODUCT_CASIO_EXS880 0x1105 /* Exilim EX-S880 */
+#define USB_PRODUCT_CASIO_BE300 0x2002 /* BE-300 PDA */
+#define USB_PRODUCT_CASIO_NAMELAND 0x4001 /* CASIO Nameland EZ-USB */
+
+/* CCYU products */
+#define USB_PRODUCT_CCYU_ED1064 0x2136 /* EasyDisk ED1064 */
+
+/* Century products */
+#define USB_PRODUCT_CENTURY_EX35QUAT 0x011e /* Century USB Disk Enclosure */
+#define USB_PRODUCT_CENTURY_EX35SW4_SB4 0x011f /* Century USB Disk Enclosure */
+
+/* Cherry products */
+#define USB_PRODUCT_CHERRY_MY3000KBD 0x0001 /* My3000 keyboard */
+#define USB_PRODUCT_CHERRY_MY3000HUB 0x0003 /* My3000 hub */
+#define USB_PRODUCT_CHERRY_CYBOARD 0x0004 /* CyBoard Keyboard */
+
+/* Chic Technology products */
+#define USB_PRODUCT_CHIC_MOUSE1 0x0001 /* mouse */
+#define USB_PRODUCT_CHIC_CYPRESS 0x0003 /* Cypress USB Mouse */
+
+/* Chicony products */
+#define USB_PRODUCT_CHICONY_KB8933 0x0001 /* KB-8933 keyboard */
+#define USB_PRODUCT_CHICONY_KU0325 0x0116 /* KU-0325 keyboard */
+#define USB_PRODUCT_CHICONY_CNF7129 0xb071 /* Notebook Web Camera */
+#define USB_PRODUCT_CHICONY2_TWINKLECAM 0x600d /* TwinkleCam USB camera */
+
+/* CH Products */
+#define USB_PRODUCT_CHPRODUCTS_PROTHROTTLE 0x00f1 /* Pro Throttle */
+#define USB_PRODUCT_CHPRODUCTS_PROPEDALS 0x00f2 /* Pro Pedals */
+#define USB_PRODUCT_CHPRODUCTS_FIGHTERSTICK 0x00f3 /* Fighterstick */
+#define USB_PRODUCT_CHPRODUCTS_FLIGHTYOKE 0x00ff /* Flight Sim Yoke */
+
+/* Cisco-Linksys products */
+#define USB_PRODUCT_CISCOLINKSYS_WUSB54AG 0x000c /* WUSB54AG Wireless Adapter */
+#define USB_PRODUCT_CISCOLINKSYS_WUSB54G 0x000d /* WUSB54G Wireless Adapter */
+#define USB_PRODUCT_CISCOLINKSYS_WUSB54GP 0x0011 /* WUSB54GP Wireless Adapter */
+#define USB_PRODUCT_CISCOLINKSYS_USB200MV2 0x0018 /* USB200M v2 */
+#define USB_PRODUCT_CISCOLINKSYS_HU200TS 0x001a /* HU200TS Wireless Adapter */
+#define USB_PRODUCT_CISCOLINKSYS_WUSB54GC 0x0020 /* WUSB54GC */
+#define USB_PRODUCT_CISCOLINKSYS_WUSB54GR 0x0023 /* WUSB54GR */
+#define USB_PRODUCT_CISCOLINKSYS_WUSBF54G 0x0024 /* WUSBF54G */
+#define USB_PRODUCT_CISCOLINKSYS2_RT3070 0x4001 /* RT3070 */
+#define USB_PRODUCT_CISCOLINKSYS3_RT3070 0x0101 /* RT3070 */
+
+/* Clipsal products */
+#define USB_PRODUCT_CLIPSAL_5500PCU 0x0303 /* 5500PCU C-Bus */
+
+/* CMOTECH products */
+#define USB_PRODUCT_CMOTECH_CNU510 0x5141 /* CDMA Technologies USB modem */
+#define USB_PRODUCT_CMOTECH_CNU550 0x5543 /* CDMA 2000 1xRTT/1xEVDO USB modem */
+#define USB_PRODUCT_CMOTECH_CGU628 0x6006 /* CGU-628 */
+#define USB_PRODUCT_CMOTECH_CDMA_MODEM1 0x6280 /* CDMA Technologies USB modem */
+#define USB_PRODUCT_CMOTECH_DISK 0xf000 /* disk mode */
+
+/* Compaq products */
+#define USB_PRODUCT_COMPAQ_IPAQPOCKETPC 0x0003 /* iPAQ PocketPC */
+#define USB_PRODUCT_COMPAQ_PJB100 0x504a /* Personal Jukebox PJB100 */
+#define USB_PRODUCT_COMPAQ_IPAQLINUX 0x505a /* iPAQ Linux */
+
+/* Composite Corp products looks the same as "TANGTOP" */
+#define USB_PRODUCT_COMPOSITE_USBPS2 0x0001 /* USB to PS2 Adaptor */
+
+/* Conceptronic products */
+#define USB_PRODUCT_CONCEPTRONIC_PRISM_GT 0x3762 /* PrismGT USB 2.0 WLAN */
+#define USB_PRODUCT_CONCEPTRONIC_C11U 0x7100 /* C11U */
+#define USB_PRODUCT_CONCEPTRONIC_WL210 0x7110 /* WL-210 */
+#define USB_PRODUCT_CONCEPTRONIC_AR5523_1 0x7801 /* AR5523 */
+#define USB_PRODUCT_CONCEPTRONIC_AR5523_1_NF 0x7802 /* AR5523 (no firmware) */
+#define USB_PRODUCT_CONCEPTRONIC_AR5523_2 0x7811 /* AR5523 */
+#define USB_PRODUCT_CONCEPTRONIC_AR5523_2_NF 0x7812 /* AR5523 (no firmware) */
+#define USB_PRODUCT_CONCEPTRONIC2_C54RU 0x3c02 /* C54RU WLAN */
+#define USB_PRODUCT_CONCEPTRONIC2_C54RU2 0x3c22 /* C54RU */
+#define USB_PRODUCT_CONCEPTRONIC2_RT3070_1 0x3c08 /* RT3070 */
+#define USB_PRODUCT_CONCEPTRONIC2_RT3070_2 0x3c11 /* RT3070 */
+#define USB_PRODUCT_CONCEPTRONIC2_VIGORN61 0x3c25 /* VIGORN61 */
+#define USB_PRODUCT_CONCEPTRONIC2_RT2870_1 0x3c06 /* RT2870 */
+#define USB_PRODUCT_CONCEPTRONIC2_RT2870_2 0x3c07 /* RT2870 */
+#define USB_PRODUCT_CONCEPTRONIC2_RT2870_7 0x3c09 /* RT2870 */
+#define USB_PRODUCT_CONCEPTRONIC2_RT2870_8 0x3c12 /* RT2870 */
+#define USB_PRODUCT_CONCEPTRONIC2_RT2870_3 0x3c23 /* RT2870 */
+#define USB_PRODUCT_CONCEPTRONIC2_RT2870_4 0x3c25 /* RT2870 */
+#define USB_PRODUCT_CONCEPTRONIC2_RT2870_5 0x3c27 /* RT2870 */
+#define USB_PRODUCT_CONCEPTRONIC2_RT2870_6 0x3c28 /* RT2870 */
+
+/* Connectix products */
+#define USB_PRODUCT_CONNECTIX_QUICKCAM 0x0001 /* QuickCam */
+
+/* Corega products */
+#define USB_PRODUCT_COREGA_ETHER_USB_T 0x0001 /* Ether USB-T */
+#define USB_PRODUCT_COREGA_FETHER_USB_TX 0x0004 /* FEther USB-TX */
+#define USB_PRODUCT_COREGA_WLAN_USB_USB_11 0x000c /* WirelessLAN USB-11 */
+#define USB_PRODUCT_COREGA_FETHER_USB_TXS 0x000d /* FEther USB-TXS */
+#define USB_PRODUCT_COREGA_WLANUSB 0x0012 /* Wireless LAN Stick-11 */
+#define USB_PRODUCT_COREGA_FETHER_USB2_TX 0x0017 /* FEther USB2-TX */
+#define USB_PRODUCT_COREGA_WLUSB_11_KEY 0x001a /* ULUSB-11 Key */
+#define USB_PRODUCT_COREGA_CGUSBRS232R 0x002a /* CG-USBRS232R */
+#define USB_PRODUCT_COREGA_CGWLUSB2GL 0x002d /* CG-WLUSB2GL */
+#define USB_PRODUCT_COREGA_CGWLUSB2GPX 0x002e /* CG-WLUSB2GPX */
+#define USB_PRODUCT_COREGA_RT2870_1 0x002f /* RT2870 */
+#define USB_PRODUCT_COREGA_RT2870_2 0x003c /* RT2870 */
+#define USB_PRODUCT_COREGA_RT2870_3 0x003f /* RT2870 */
+#define USB_PRODUCT_COREGA_RT3070 0x0041 /* RT3070 */
+#define USB_PRODUCT_COREGA_CGWLUSB300GNM 0x0042 /* CG-WLUSB300GNM */
+
+#define USB_PRODUCT_COREGA_WLUSB_11_STICK 0x7613 /* WLAN USB Stick 11 */
+#define USB_PRODUCT_COREGA_FETHER_USB_TXC 0x9601 /* FEther USB-TXC */
+
+/* Creative products */
+#define USB_PRODUCT_CREATIVE_NOMAD_II 0x1002 /* Nomad II MP3 player */
+#define USB_PRODUCT_CREATIVE_NOMAD_IIMG 0x4004 /* Nomad II MG */
+#define USB_PRODUCT_CREATIVE_NOMAD 0x4106 /* Nomad */
+#define USB_PRODUCT_CREATIVE2_VOIP_BLASTER 0x0258 /* Voip Blaster */
+#define USB_PRODUCT_CREATIVE3_OPTICAL_MOUSE 0x0001 /* Notebook Optical Mouse */
+
+/* Cambridge Silicon Radio Ltd. products */
+#define USB_PRODUCT_CSR_BT_DONGLE 0x0001 /* Bluetooth USB dongle */
+#define USB_PRODUCT_CSR_CSRDFU 0xffff /* USB Bluetooth Device in DFU State */
+
+/* Chipsbank Microelectronics Co., Ltd */
+#define USB_PRODUCT_CHIPSBANK_USBMEMSTICK 0x6025 /* CBM2080 Flash drive controller */
+#define USB_PRODUCT_CHIPSBANK_USBMEMSTICK1 0x6026 /* CBM1180 Flash drive controller */
+
+/* CTX products */
+#define USB_PRODUCT_CTX_EX1300 0x9999 /* Ex1300 hub */
+
+/* Curitel products */
+#define USB_PRODUCT_CURITEL_HX550C 0x1101 /* CDMA 2000 1xRTT USB modem (HX-550C) */
+#define USB_PRODUCT_CURITEL_HX57XB 0x2101 /* CDMA 2000 1xRTT USB modem (HX-570/575B/PR-600) */
+#define USB_PRODUCT_CURITEL_PC5740 0x3701 /* Broadband Wireless modem */
+#define USB_PRODUCT_CURITEL_UM175 0x3714 /* EVDO modem */
+
+/* CyberPower products */
+#define USB_PRODUCT_CYBERPOWER_1500CAVRLCD 0x0501 /* 1500CAVRLCD */
+
+/* CyberTAN Technology products */
+#define USB_PRODUCT_CYBERTAN_TG54USB 0x1666 /* TG54USB */
+#define USB_PRODUCT_CYBERTAN_RT2870 0x1828 /* RT2870 */
+
+/* Cypress Semiconductor products */
+#define USB_PRODUCT_CYPRESS_MOUSE 0x0001 /* mouse */
+#define USB_PRODUCT_CYPRESS_THERMO 0x0002 /* thermometer */
+#define USB_PRODUCT_CYPRESS_WISPY1A 0x0bad /* MetaGeek Wi-Spy */
+#define USB_PRODUCT_CYPRESS_KBDHUB 0x0101 /* Keyboard/Hub */
+#define USB_PRODUCT_CYPRESS_FMRADIO 0x1002 /* FM Radio */
+#define USB_PRODUCT_CYPRESS_IKARILASER 0x121f /* Ikari Laser SteelSeries ApS */
+
+#define USB_PRODUCT_CYPRESS_USBRS232 0x5500 /* USB-RS232 Interface */
+#define USB_PRODUCT_CYPRESS_SLIM_HUB 0x6560 /* Slim Hub */
+#define USB_PRODUCT_CYPRESS_XX6830XX 0x6830 /* PATA Storage Device */
+#define USB_PRODUCT_CYPRESS_SILVERSHIELD 0xfd13 /* Gembird Silver Shield PM */
+
+/* Daisy Technology products */
+#define USB_PRODUCT_DAISY_DMC 0x6901 /* USB MultiMedia Reader */
+
+/* Dallas Semiconductor products */
+#define USB_PRODUCT_DALLAS_J6502 0x4201 /* J-6502 speakers */
+
+/* DataApex products */
+#define USB_PRODUCT_DATAAPEX_MULTICOM 0xead6 /* MultiCom */
+
+/* Dell products */
+#define USB_PRODUCT_DELL_PORT 0x0058 /* Port Replicator */
+#define USB_PRODUCT_DELL_AIO926 0x5115 /* Photo AIO Printer 926 */
+#define USB_PRODUCT_DELL_BC02 0x8000 /* BC02 Bluetooth USB Adapter */
+#define USB_PRODUCT_DELL_PRISM_GT_1 0x8102 /* PrismGT USB 2.0 WLAN */
+#define USB_PRODUCT_DELL_TM350 0x8103 /* TrueMobile 350 Bluetooth USB Adapter */
+#define USB_PRODUCT_DELL_PRISM_GT_2 0x8104 /* PrismGT USB 2.0 WLAN */
+#define USB_PRODUCT_DELL_U5700 0x8114 /* Dell 5700 3G */
+#define USB_PRODUCT_DELL_U5500 0x8115 /* Dell 5500 3G */
+#define USB_PRODUCT_DELL_U5505 0x8116 /* Dell 5505 3G */
+#define USB_PRODUCT_DELL_U5700_2 0x8117 /* Dell 5700 3G */
+#define USB_PRODUCT_DELL_U5510 0x8118 /* Dell 5510 3G */
+#define USB_PRODUCT_DELL_U5700_3 0x8128 /* Dell 5700 3G */
+#define USB_PRODUCT_DELL_U5700_4 0x8129 /* Dell 5700 3G */
+#define USB_PRODUCT_DELL_U5720 0x8133 /* Dell 5720 3G */
+#define USB_PRODUCT_DELL_U5720_2 0x8134 /* Dell 5720 3G */
+#define USB_PRODUCT_DELL_U740 0x8135 /* Dell U740 CDMA */
+#define USB_PRODUCT_DELL_U5520 0x8136 /* Dell 5520 3G */
+#define USB_PRODUCT_DELL_U5520_2 0x8137 /* Dell 5520 3G */
+#define USB_PRODUCT_DELL_U5520_3 0x8138 /* Dell 5520 3G */
+#define USB_PRODUCT_DELL_U5730 0x8180 /* Dell 5730 3G */
+#define USB_PRODUCT_DELL_U5730_2 0x8181 /* Dell 5730 3G */
+#define USB_PRODUCT_DELL_U5730_3 0x8182 /* Dell 5730 3G */
+#define USB_PRODUCT_DELL_DW700 0x9500 /* Dell DW700 GPS */
+
+/* Delorme Paublishing products */
+#define USB_PRODUCT_DELORME_EARTHMATE 0x0100 /* Earthmate GPS */
+
+/* Desknote products */
+#define USB_PRODUCT_DESKNOTE_UCR_61S2B 0x0c55 /* UCR-61S2B */
+
+/* Diamond products */
+#define USB_PRODUCT_DIAMOND_RIO500USB 0x0001 /* Rio 500 USB */
+
+/* Dick Smith Electronics (really C-Net) products */
+#define USB_PRODUCT_DICKSMITH_RT2573 0x9022 /* RT2573 */
+#define USB_PRODUCT_DICKSMITH_CWD854F 0x9032 /* C-Net CWD-854 rev F */
+
+/* Digi International products */
+#define USB_PRODUCT_DIGI_ACCELEPORT2 0x0002 /* AccelePort USB 2 */
+#define USB_PRODUCT_DIGI_ACCELEPORT4 0x0004 /* AccelePort USB 4 */
+#define USB_PRODUCT_DIGI_ACCELEPORT8 0x0008 /* AccelePort USB 8 */
+
+/* Digianswer A/S products */
+#define USB_PRODUCT_DIGIANSWER_ZIGBEE802154 0x000a /* ZigBee/802.15.4 MAC */
+
+/* D-Link products */
+/*product DLINK DSBS25 0x0100 DSB-S25 serial*/
+#define USB_PRODUCT_DLINK_DUBE100 0x1a00 /* 10/100 Ethernet */
+#define USB_PRODUCT_DLINK_DSB650TX4 0x200c /* 10/100 Ethernet */
+#define USB_PRODUCT_DLINK_DWL120E 0x3200 /* DWL-120 rev E */
+#define USB_PRODUCT_DLINK_DWL122 0x3700 /* DWL-122 */
+#define USB_PRODUCT_DLINK_DWLG120 0x3701 /* DWL-G120 */
+#define USB_PRODUCT_DLINK_DWL120F 0x3702 /* DWL-120 rev F */
+#define USB_PRODUCT_DLINK_DWLAG132 0x3a00 /* DWL-AG132 */
+#define USB_PRODUCT_DLINK_DWLAG132_NF 0x3a01 /* DWL-AG132 (no firmware) */
+#define USB_PRODUCT_DLINK_DWLG132 0x3a02 /* DWL-G132 */
+#define USB_PRODUCT_DLINK_DWLG132_NF 0x3a03 /* DWL-G132 (no firmware) */
+#define USB_PRODUCT_DLINK_DWLAG122 0x3a04 /* DWL-AG122 */
+#define USB_PRODUCT_DLINK_DWLAG122_NF 0x3a05 /* DWL-AG122 (no firmware) */
+#define USB_PRODUCT_DLINK_DWLG122 0x3c00 /* DWL-G122 b1 Wireless Adapter */
+#define USB_PRODUCT_DLINK_DUBE100B1 0x3c05 /* DUB-E100 rev B1 */
+#define USB_PRODUCT_DLINK_RT2870 0x3c09 /* RT2870 */
+#define USB_PRODUCT_DLINK_RT3072 0x3c0a /* RT3072 */
+#define USB_PRODUCT_DLINK_DSB650C 0x4000 /* 10Mbps Ethernet */
+#define USB_PRODUCT_DLINK_DSB650TX1 0x4001 /* 10/100 Ethernet */
+#define USB_PRODUCT_DLINK_DSB650TX 0x4002 /* 10/100 Ethernet */
+#define USB_PRODUCT_DLINK_DSB650TX_PNA 0x4003 /* 1/10/100 Ethernet */
+#define USB_PRODUCT_DLINK_DSB650TX3 0x400b /* 10/100 Ethernet */
+#define USB_PRODUCT_DLINK_DSB650TX2 0x4102 /* 10/100 Ethernet */
+#define USB_PRODUCT_DLINK_DSB650 0xabc1 /* 10/100 Ethernet */
+#define USB_PRODUCT_DLINK_DUBH7 0xf103 /* DUB-H7 USB 2.0 7-Port Hub */
+#define USB_PRODUCT_DLINK2_DWA120 0x3a0c /* DWA-120 */
+#define USB_PRODUCT_DLINK2_DWA120_NF 0x3a0d /* DWA-120 (no firmware) */
+#define USB_PRODUCT_DLINK2_DWLG122C1 0x3c03 /* DWL-G122 c1 */
+#define USB_PRODUCT_DLINK2_WUA1340 0x3c04 /* WUA-1340 */
+#define USB_PRODUCT_DLINK2_DWA111 0x3c06 /* DWA-111 */
+#define USB_PRODUCT_DLINK2_RT2870_1 0x3c09 /* RT2870 */
+#define USB_PRODUCT_DLINK2_DWA110 0x3c07 /* DWA-110 */
+#define USB_PRODUCT_DLINK2_RT3072 0x3c0a /* RT3072 */
+#define USB_PRODUCT_DLINK2_RT3072_1 0x3c0b /* RT3072 */
+#define USB_PRODUCT_DLINK2_RT3070_1 0x3c0d /* RT3070 */
+#define USB_PRODUCT_DLINK2_RT3070_2 0x3c0e /* RT3070 */
+#define USB_PRODUCT_DLINK2_RT3070_3 0x3c0f /* RT3070 */
+#define USB_PRODUCT_DLINK2_RT2870_2 0x3c11 /* RT2870 */
+#define USB_PRODUCT_DLINK2_DWA130 0x3c13 /* DWA-130 */
+#define USB_PRODUCT_DLINK2_RT3070_4 0x3c15 /* RT3070 */
+#define USB_PRODUCT_DLINK2_RT3070_5 0x3c16 /* RT3070 */
+#define USB_PRODUCT_DLINK3_DWM652 0x3e04 /* DWM-652 */
+
+/* DMI products */
+#define USB_PRODUCT_DMI_CFSM_RW 0xa109 /* CF/SM Reader/Writer */
+#define USB_PRODUCT_DMI_DISK 0x2bcf /* Generic Disk */
+
+/* DrayTek products */
+#define USB_PRODUCT_DRAYTEK_VIGOR550 0x0550 /* Vigor550 */
+
+/* dresden elektronik products */
+#define USB_PRODUCT_DRESDENELEKTRONIK_SENSORTERMINALBOARD 0x0001 /* SensorTerminalBoard */
+#define USB_PRODUCT_DRESDENELEKTRONIK_WIRELESSHANDHELDTERMINAL 0x0004 /* Wireless Handheld Terminal */
+
+/* Dynastream Innovations */
+#define USB_PRODUCT_DYNASTREAM_ANTDEVBOARD 0x1003 /* ANT dev board */
+#define USB_PRODUCT_DYNASTREAM_ANT2USB 0x1004 /* ANT2USB */
+#define USB_PRODUCT_DYNASTREAM_ANTDEVBOARD2 0x1006 /* ANT dev board */
+
+/* Edimax products */
+#define USB_PRODUCT_EDIMAX_EW7318USG 0x7318 /* USB Wireless dongle */
+#define USB_PRODUCT_EDIMAX_RT2870_1 0x7711 /* RT2870 */
+#define USB_PRODUCT_EDIMAX_EW7717 0x7717 /* EW-7717 */
+#define USB_PRODUCT_EDIMAX_EW7718 0x7718 /* EW-7718 */
+
+/* eGalax Products */
+#define USB_PRODUCT_EGALAX_TPANEL 0x0001 /* Touch Panel */
+#define USB_PRODUCT_EGALAX_TPANEL2 0x0002 /* Touch Panel */
+#define USB_PRODUCT_EGALAX2_TPANEL 0x0001 /* Touch Panel */
+
+/* Eicon Networks */
+#define USB_PRODUCT_EICON_DIVA852 0x4905 /* Diva 852 ISDN TA */
+
+/* EIZO products */
+#define USB_PRODUCT_EIZO_HUB 0x0000 /* hub */
+#define USB_PRODUCT_EIZO_MONITOR 0x0001 /* monitor */
+
+/* ELCON Systemtechnik products */
+#define USB_PRODUCT_ELCON_PLAN 0x0002 /* Goldpfeil P-LAN */
+
+/* Elecom products */
+#define USB_PRODUCT_ELECOM_MOUSE29UO 0x0002 /* mouse 29UO */
+#define USB_PRODUCT_ELECOM_LDUSBTX0 0x200c /* LD-USB/TX */
+#define USB_PRODUCT_ELECOM_LDUSBTX1 0x4002 /* LD-USB/TX */
+#define USB_PRODUCT_ELECOM_LDUSBLTX 0x4005 /* LD-USBL/TX */
+#define USB_PRODUCT_ELECOM_LDUSBTX2 0x400b /* LD-USB/TX */
+#define USB_PRODUCT_ELECOM_LDUSB20 0x4010 /* LD-USB20 */
+#define USB_PRODUCT_ELECOM_UCSGT 0x5003 /* UC-SGT */
+#define USB_PRODUCT_ELECOM_UCSGT0 0x5004 /* UC-SGT */
+#define USB_PRODUCT_ELECOM_LDUSBTX3 0xabc1 /* LD-USB/TX */
+
+/* Elsa products */
+#define USB_PRODUCT_ELSA_MODEM1 0x2265 /* ELSA Modem Board */
+#define USB_PRODUCT_ELSA_USB2ETHERNET 0x3000 /* Microlink USB2Ethernet */
+
+/* ELV products */
+#define USB_PRODUCT_ELV_USBI2C 0xe00f /* USB-I2C interface */
+
+/* EMS products */
+#define USB_PRODUCT_EMS_DUAL_SHOOTER 0x0003 /* PSX gun controller converter */
+
+/* Encore products */
+#define USB_PRODUCT_ENCORE_RT3070_1 0x1480 /* RT3070 */
+#define USB_PRODUCT_ENCORE_RT3070_2 0x14a1 /* RT3070 */
+#define USB_PRODUCT_ENCORE_RT3070_3 0x14a9 /* RT3070 */
+
+/* Entrega products */
+#define USB_PRODUCT_ENTREGA_1S 0x0001 /* 1S serial */
+#define USB_PRODUCT_ENTREGA_2S 0x0002 /* 2S serial */
+#define USB_PRODUCT_ENTREGA_1S25 0x0003 /* 1S25 serial */
+#define USB_PRODUCT_ENTREGA_4S 0x0004 /* 4S serial */
+#define USB_PRODUCT_ENTREGA_E45 0x0005 /* E45 Ethernet */
+#define USB_PRODUCT_ENTREGA_CENTRONICS 0x0006 /* Parallel Port */
+#define USB_PRODUCT_ENTREGA_XX1 0x0008 /* Ethernet */
+#define USB_PRODUCT_ENTREGA_1S9 0x0093 /* 1S9 serial */
+#define USB_PRODUCT_ENTREGA_EZUSB 0x8000 /* EZ-USB */
+/*product ENTREGA SERIAL 0x8001 DB25 Serial*/
+#define USB_PRODUCT_ENTREGA_2U4S 0x8004 /* 2U4S serial/usb hub */
+#define USB_PRODUCT_ENTREGA_XX2 0x8005 /* Ethernet */
+/*product ENTREGA SERIAL_DB9 0x8093 DB9 Serial*/
+
+/* Epson products */
+#define USB_PRODUCT_EPSON_PRINTER1 0x0001 /* USB Printer */
+#define USB_PRODUCT_EPSON_PRINTER2 0x0002 /* ISD USB Smart Cable for Mac */
+#define USB_PRODUCT_EPSON_PRINTER3 0x0003 /* ISD USB Smart Cable */
+#define USB_PRODUCT_EPSON_PRINTER5 0x0005 /* USB Printer */
+#define USB_PRODUCT_EPSON_636 0x0101 /* Perfection 636U / 636Photo scanner */
+#define USB_PRODUCT_EPSON_610 0x0103 /* Perfection 610 scanner */
+#define USB_PRODUCT_EPSON_1200 0x0104 /* Perfection 1200U / 1200Photo scanner */
+#define USB_PRODUCT_EPSON_1600 0x0107 /* Expression 1600 scanner */
+#define USB_PRODUCT_EPSON_1640 0x010a /* Perfection 1640SU scanner */
+#define USB_PRODUCT_EPSON_1240 0x010b /* Perfection 1240U / 1240Photo scanner */
+#define USB_PRODUCT_EPSON_640U 0x010c /* Perfection 640U scanner */
+#define USB_PRODUCT_EPSON_1250 0x010f /* Perfection 1250U / 1250Photo scanner */
+#define USB_PRODUCT_EPSON_1650 0x0110 /* Perfection 1650 scanner */
+#define USB_PRODUCT_EPSON_GT9700F 0x0112 /* GT-9700F scanner */
+#define USB_PRODUCT_EPSON_GT9300UF 0x011b /* GT-9300UF scanner */
+#define USB_PRODUCT_EPSON_3200 0x011c /* Perfection 3200 scanner */
+#define USB_PRODUCT_EPSON_1260 0x011d /* Perfection 1260 scanner */
+#define USB_PRODUCT_EPSON_1660 0x011e /* Perfection 1660 scanner */
+#define USB_PRODUCT_EPSON_1670 0x011f /* Perfection 1670 scanner */
+#define USB_PRODUCT_EPSON_1270 0x0120 /* Perfection 1270 scanner */
+#define USB_PRODUCT_EPSON_2480 0x0121 /* Perfection 2480 scanner */
+#define USB_PRODUCT_EPSON_3590 0x0122 /* Perfection 3590 scanner */
+#define USB_PRODUCT_EPSON_4990 0x012a /* Perfection 4990 Photo scanner */
+#define USB_PRODUCT_EPSON_CRESSI_EDY 0x0521 /* Cressi Edy diving computer */
+#define USB_PRODUCT_EPSON_STYLUS_875DC 0x0601 /* Stylus Photo 875DC Card Reader */
+#define USB_PRODUCT_EPSON_STYLUS_895 0x0602 /* Stylus Photo 895 Card Reader */
+#define USB_PRODUCT_EPSON_CX5400 0x0808 /* CX5400 scanner */
+#define USB_PRODUCT_EPSON_3500 0x080e /* CX-3500/3600/3650 MFP */
+#define USB_PRODUCT_EPSON_RX425 0x080f /* Stylus Photo RX425 scanner */
+#define USB_PRODUCT_EPSON_DX3800 0x0818 /* CX3700/CX3800/DX38x0 MFP scanner */
+#define USB_PRODUCT_EPSON_4800 0x0819 /* CX4700/CX4800/DX48x0 MFP scanner */
+#define USB_PRODUCT_EPSON_4200 0x0820 /* CX4100/CX4200/DX4200 MFP scanner */
+#define USB_PRODUCT_EPSON_5000 0x082b /* CX4900/CX5000/DX50x0 MFP scanner */
+#define USB_PRODUCT_EPSON_6000 0x082e /* CX5900/CX6000/DX60x0 MFP scanner */
+#define USB_PRODUCT_EPSON_DX4000 0x082f /* DX4000 MFP scanner */
+#define USB_PRODUCT_EPSON_DX7400 0x0838 /* CX7300/CX7400/DX7400 MFP scanner */
+#define USB_PRODUCT_EPSON_DX8400 0x0839 /* CX8300/CX8400/DX8400 MFP scanner */
+#define USB_PRODUCT_EPSON_SX100 0x0841 /* SX100/NX100 MFP scanner */
+#define USB_PRODUCT_EPSON_NX300 0x0848 /* NX300 MFP scanner */
+#define USB_PRODUCT_EPSON_SX200 0x0849 /* SX200/SX205 MFP scanner */
+#define USB_PRODUCT_EPSON_SX400 0x084a /* SX400/NX400/TX400 MFP scanner */
+
+/* e-TEK Labs products */
+#define USB_PRODUCT_ETEK_1COM 0x8007 /* Serial */
+
+/* Extended Systems products */
+#define USB_PRODUCT_EXTENDED_XTNDACCESS 0x0100 /* XTNDAccess IrDA */
+
+/* FEIYA products */
+#define USB_PRODUCT_FEIYA_5IN1 0x1132 /* 5-in-1 Card Reader */
+
+/* Fiberline */
+#define USB_PRODUCT_FIBERLINE_WL430U 0x6003 /* WL-430U */
+
+/* Fossil, Inc products */
+#define USB_PRODUCT_FOSSIL_WRISTPDA 0x0002 /* Wrist PDA */
+
+/* Foxconn products */
+#define USB_PRODUCT_FOXCONN_PIRELLI_DP_L10 0xe000 /* Pirelli DP-L10 */
+
+/* Freecom products */
+#define USB_PRODUCT_FREECOM_DVD 0xfc01 /* DVD drive */
+#define USB_PRODUCT_FREECOM_HDD 0xfc05 /* Classic SL Hard Drive */
+
+/* Fujitsu Siemens Computers products */
+#define USB_PRODUCT_FSC_E5400 0x1009 /* PrismGT USB 2.0 WLAN */
+
+/* Future Technology Devices products */
+#define USB_PRODUCT_FTDI_SERIAL_8U100AX 0x8372 /* 8U100AX Serial */
+#define USB_PRODUCT_FTDI_SERIAL_8U232AM 0x6001 /* 8U232AM Serial */
+#define USB_PRODUCT_FTDI_SERIAL_8U232AM4 0x6004 /* 8U232AM Serial */
+#define USB_PRODUCT_FTDI_SERIAL_2232C 0x6010 /* FT2232C Dual port Serial */
+#define USB_PRODUCT_FTDI_SERIAL_2232D 0x9e90 /* FT2232D Dual port Serial */
+#define USB_PRODUCT_FTDI_SERIAL_4232H 0x6011 /* FT4232H Quad port Serial */
+/* Gude Analog- und Digitalsysteme products also uses FTDI's id: */
+#define USB_PRODUCT_FTDI_TACTRIX_OPENPORT_13M 0xcc48 /* OpenPort 1.3 Mitsubishi */
+#define USB_PRODUCT_FTDI_TACTRIX_OPENPORT_13S 0xcc49 /* OpenPort 1.3 Subaru */
+#define USB_PRODUCT_FTDI_TACTRIX_OPENPORT_13U 0xcc4a /* OpenPort 1.3 Universal */
+#define USB_PRODUCT_FTDI_GAMMASCOUT 0xd678 /* Gamma-Scout */
+#define USB_PRODUCT_FTDI_KBS 0xe6c8 /* Pyramid KBS USB LCD */
+#define USB_PRODUCT_FTDI_EISCOU 0xe888 /* Expert ISDN Control USB */
+#define USB_PRODUCT_FTDI_UOPTBR 0xe889 /* USB-RS232 OptoBridge */
+#define USB_PRODUCT_FTDI_EMCU2D 0xe88a /* Expert mouseCLOCK USB II */
+#define USB_PRODUCT_FTDI_PCMSFU 0xe88b /* Precision Clock MSF USB */
+#define USB_PRODUCT_FTDI_EMCU2H 0xe88c /* Expert mouseCLOCK USB II HBG */
+#define USB_PRODUCT_FTDI_MAXSTREAM 0xee18 /* Maxstream PKG-U */
+#define USB_PRODUCT_FTDI_USB_UIRT 0xf850 /* USB-UIRT */
+#define USB_PRODUCT_FTDI_USBSERIAL 0xfa00 /* Matrix Orbital USB Serial */
+#define USB_PRODUCT_FTDI_MX2_3 0xfa01 /* Matrix Orbital MX2 or MX3 */
+#define USB_PRODUCT_FTDI_MX4_5 0xfa02 /* Matrix Orbital MX4 or MX5 */
+#define USB_PRODUCT_FTDI_LK202 0xfa03 /* Matrix Orbital VK/LK202 Family */
+#define USB_PRODUCT_FTDI_LK204 0xfa04 /* Matrix Orbital VK/LK204 Family */
+#define USB_PRODUCT_FTDI_CFA_632 0xfc08 /* Crystalfontz CFA-632 USB LCD */
+#define USB_PRODUCT_FTDI_CFA_634 0xfc09 /* Crystalfontz CFA-634 USB LCD */
+#define USB_PRODUCT_FTDI_CFA_633 0xfc0b /* Crystalfontz CFA-633 USB LCD */
+#define USB_PRODUCT_FTDI_CFA_631 0xfc0c /* Crystalfontz CFA-631 USB LCD */
+#define USB_PRODUCT_FTDI_CFA_635 0xfc0d /* Crystalfontz CFA-635 USB LCD */
+#define USB_PRODUCT_FTDI_SEMC_DSS20 0xfc82 /* SEMC DSS-20 SyncStation */
+/* Commerzielle und Technische Informationssysteme GmbH products */
+#define USB_PRODUCT_FTDI_CTI_USB_NANO_485 0xf60b /* CTI USB-Nano 485 */
+#define USB_PRODUCT_FTDI_CTI_USB_MINI_485 0xf608 /* CTI USB-Mini 485 */
+
+/* Fuji photo products */
+#define USB_PRODUCT_FUJIPHOTO_MASS0100 0x0100 /* Mass Storage */
+
+/* Fujitsu protducts */
+#define USB_PRODUCT_FUJITSU_AH_F401U 0x105b /* AH-F401U Air H device */
+
+/* Fujitsu-Siemens protducts */
+#define USB_PRODUCT_FUJITSUSIEMENS_SCR 0x0009 /* Fujitsu-Siemens SCR USB Reader */
+
+/* Garmin products */
+#define USB_PRODUCT_GARMIN_IQUE_3600 0x0004 /* iQue 3600 */
+
+/* Gemalto products */
+#define USB_PRODUCT_GEMALTO_PROXPU 0x5501 /* Prox-PU/CU */
+
+/* General Instruments (Motorola) products */
+#define USB_PRODUCT_GENERALINSTMNTS_SB5100 0x5100 /* SURFboard SB5100 Cable modem */
+
+/* Genesys Logic products */
+#define USB_PRODUCT_GENESYS_GL620USB 0x0501 /* GL620USB Host-Host interface */
+#define USB_PRODUCT_GENESYS_GL650 0x0604 /* GL650 HUB */
+#define USB_PRODUCT_GENESYS_GL606 0x0606 /* USB 2.0 HUB */
+#define USB_PRODUCT_GENESYS_GL641USB 0x0700 /* GL641USB CompactFlash Card Reader */
+#define USB_PRODUCT_GENESYS_GL641USB2IDE_2 0x0701 /* GL641USB USB-IDE Bridge No 2 */
+#define USB_PRODUCT_GENESYS_GL641USB2IDE 0x0702 /* GL641USB USB-IDE Bridge */
+#define USB_PRODUCT_GENESYS_GL641USB_2 0x0760 /* GL641USB 6-in-1 Card Reader */
+
+/* GIGABYTE products */
+#define USB_PRODUCT_GIGABYTE_GN54G 0x8001 /* GN-54G */
+#define USB_PRODUCT_GIGABYTE_GNBR402W 0x8002 /* GN-BR402W */
+#define USB_PRODUCT_GIGABYTE_GNWLBM101 0x8003 /* GN-WLBM101 */
+#define USB_PRODUCT_GIGABYTE_GNWBKG 0x8007 /* GN-WBKG */
+#define USB_PRODUCT_GIGABYTE_GNWB01GS 0x8008 /* GN-WB01GS */
+#define USB_PRODUCT_GIGABYTE_GNWI05GS 0x800a /* GN-WI05GS */
+
+/* Gigaset products */
+#define USB_PRODUCT_GIGASET_WLAN 0x0701 /* WLAN */
+#define USB_PRODUCT_GIGASET_SMCWUSBTG 0x0710 /* SMCWUSBT-G */
+#define USB_PRODUCT_GIGASET_SMCWUSBTG_NF 0x0711 /* SMCWUSBT-G (no firmware) */
+#define USB_PRODUCT_GIGASET_AR5523 0x0712 /* AR5523 */
+#define USB_PRODUCT_GIGASET_AR5523_NF 0x0713 /* AR5523 (no firmware) */
+#define USB_PRODUCT_GIGASET_RT2573 0x0722 /* RT2573 */
+#define USB_PRODUCT_GIGASET_RT3070_1 0x0740 /* RT3070 */
+#define USB_PRODUCT_GIGASET_RT3070_2 0x0744 /* RT3070 */
+#define USB_PRODUCT_GIGABYTE_RT2870_1 0x800b /* RT2870 */
+#define USB_PRODUCT_GIGABYTE_GNWB31N 0x800c /* GN-WB31N */
+#define USB_PRODUCT_GIGABYTE_GNWB32L 0x800d /* GN-WB32L */
+
+/* Global Sun Technology product */
+#define USB_PRODUCT_GLOBALSUN_AR5523_1 0x7801 /* AR5523 */
+#define USB_PRODUCT_GLOBALSUN_AR5523_1_NF 0x7802 /* AR5523 (no firmware) */
+#define USB_PRODUCT_GLOBALSUN_AR5523_2 0x7811 /* AR5523 */
+#define USB_PRODUCT_GLOBALSUN_AR5523_2_NF 0x7812 /* AR5523 (no firmware) */
+
+/* Globespan products */
+#define USB_PRODUCT_GLOBESPAN_PRISM_GT_1 0x2000 /* PrismGT USB 2.0 WLAN */
+#define USB_PRODUCT_GLOBESPAN_PRISM_GT_2 0x2002 /* PrismGT USB 2.0 WLAN */
+
+/* G.Mate, Inc products */
+#define USB_PRODUCT_GMATE_YP3X00 0x1001 /* YP3X00 PDA */
+
+/* GoHubs products */
+#define USB_PRODUCT_GOHUBS_GOCOM232 0x1001 /* GoCOM232 Serial */
+
+/* Good Way Technology products */
+#define USB_PRODUCT_GOODWAY_GWUSB2E 0x6200 /* GWUSB2E */
+#define USB_PRODUCT_GOODWAY_RT2573 0xc019 /* RT2573 */
+
+/* Google products */
+#define USB_PRODUCT_GOOGLE_NEXUSONE 0x4e11 /* Nexus One */
+
+/* Gravis products */
+#define USB_PRODUCT_GRAVIS_GAMEPADPRO 0x4001 /* GamePad Pro */
+
+/* GREENHOUSE products */
+#define USB_PRODUCT_GREENHOUSE_KANA21 0x0001 /* CF-writer with MP3 */
+
+/* Griffin Technology */
+#define USB_PRODUCT_GRIFFIN_IMATE 0x0405 /* iMate, ADB Adapter */
+
+/* Guillemot Corporation */
+#define USB_PRODUCT_GUILLEMOT_DALEADER 0xa300 /* DA Leader */
+#define USB_PRODUCT_GUILLEMOT_HWGUSB254 0xe000 /* HWGUSB2-54 WLAN */
+#define USB_PRODUCT_GUILLEMOT_HWGUSB254LB 0xe010 /* HWGUSB2-54-LB */
+#define USB_PRODUCT_GUILLEMOT_HWGUSB254V2AP 0xe020 /* HWGUSB2-54V2-AP */
+#define USB_PRODUCT_GUILLEMOT_HWNU300 0xe030 /* HWNU-300 */
+
+/* Hagiwara products */
+#define USB_PRODUCT_HAGIWARA_FGSM 0x0002 /* FlashGate SmartMedia Card Reader */
+#define USB_PRODUCT_HAGIWARA_FGCF 0x0003 /* FlashGate CompactFlash Card Reader */
+#define USB_PRODUCT_HAGIWARA_FG 0x0005 /* FlashGate */
+
+/* HAL Corporation products */
+#define USB_PRODUCT_HAL_IMR001 0x0011 /* Crossam2+USB IR commander */
+
+/* Handspring, Inc. */
+#define USB_PRODUCT_HANDSPRING_VISOR 0x0100 /* Handspring Visor */
+#define USB_PRODUCT_HANDSPRING_TREO 0x0200 /* Handspring Treo */
+#define USB_PRODUCT_HANDSPRING_TREO600 0x0300 /* Handspring Treo 600 */
+
+/* Hauppauge Computer Works */
+#define USB_PRODUCT_HAUPPAUGE_WINTV_USB_FM 0x4d12 /* WinTV USB FM */
+#define USB_PRODUCT_HAUPPAUGE2_NOVAT500 0x9580 /* NovaT 500Stick */
+
+/* Hawking Technologies products */
+#define USB_PRODUCT_HAWKING_RT2870_1 0x0001 /* RT2870 */
+#define USB_PRODUCT_HAWKING_RT2870_2 0x0003 /* RT2870 */
+#define USB_PRODUCT_HAWKING_HWUN2 0x0009 /* HWUN2 */
+#define USB_PRODUCT_HAWKING_RT3070 0x000b /* RT3070 */
+#define USB_PRODUCT_HAWKING_UF100 0x400c /* 10/100 USB Ethernet */
+
+/* HID Global GmbH products */
+#define USB_PRODUCT_HIDGLOBAL_CM2020 0x0596 /* Omnikey Cardman 2020 */
+#define USB_PRODUCT_HIDGLOBAL_CM6020 0x1784 /* Omnikey Cardman 6020 */
+
+/* Hitachi, Ltd. products */
+#define USB_PRODUCT_HITACHI_DVDCAM_DZ_MV100A 0x0004 /* DVD-CAM DZ-MV100A Camcorder */
+#define USB_PRODUCT_HITACHI_DVDCAM_USB 0x001e /* DVDCAM USB HS Interface */
+
+/* HP products */
+#define USB_PRODUCT_HP_895C 0x0004 /* DeskJet 895C */
+#define USB_PRODUCT_HP_4100C 0x0101 /* Scanjet 4100C */
+#define USB_PRODUCT_HP_S20 0x0102 /* Photosmart S20 */
+#define USB_PRODUCT_HP_880C 0x0104 /* DeskJet 880C */
+#define USB_PRODUCT_HP_4200C 0x0105 /* ScanJet 4200C */
+#define USB_PRODUCT_HP_CDWRITERPLUS 0x0107 /* CD-Writer Plus */
+#define USB_PRODUCT_HP_KBDHUB 0x010c /* Multimedia Keyboard Hub */
+#define USB_PRODUCT_HP_G55XI 0x0111 /* OfficeJet G55xi */
+#define USB_PRODUCT_HP_HN210W 0x011c /* HN210W 802.11b WLAN */
+#define USB_PRODUCT_HP_49GPLUS 0x0121 /* 49g+ graphing calculator */
+#define USB_PRODUCT_HP_6200C 0x0201 /* ScanJet 6200C */
+#define USB_PRODUCT_HP_S20b 0x0202 /* PhotoSmart S20 */
+#define USB_PRODUCT_HP_815C 0x0204 /* DeskJet 815C */
+#define USB_PRODUCT_HP_3300C 0x0205 /* ScanJet 3300C */
+#define USB_PRODUCT_HP_CDW8200 0x0207 /* CD-Writer Plus 8200e */
+#define USB_PRODUCT_HP_MMKEYB 0x020c /* Multimedia keyboard */
+#define USB_PRODUCT_HP_1220C 0x0212 /* DeskJet 1220C */
+#define USB_PRODUCT_HP_810C 0x0304 /* DeskJet 810C/812C */
+#define USB_PRODUCT_HP_4300C 0x0305 /* Scanjet 4300C */
+#define USB_PRODUCT_HP_CDW4E 0x0307 /* CD-Writer+ CD-4e */
+#define USB_PRODUCT_HP_G85XI 0x0311 /* OfficeJet G85xi */
+#define USB_PRODUCT_HP_1200 0x0317 /* LaserJet 1200 */
+#define USB_PRODUCT_HP_5200C 0x0401 /* Scanjet 5200C */
+#define USB_PRODUCT_HP_830C 0x0404 /* DeskJet 830C */
+#define USB_PRODUCT_HP_3400CSE 0x0405 /* ScanJet 3400cse */
+#define USB_PRODUCT_HP_6300C 0x0601 /* Scanjet 6300C */
+#define USB_PRODUCT_HP_840C 0x0604 /* DeskJet 840c */
+#define USB_PRODUCT_HP_2200C 0x0605 /* ScanJet 2200C */
+#define USB_PRODUCT_HP_5300C 0x0701 /* Scanjet 5300C */
+#define USB_PRODUCT_HP_4400C 0x0705 /* Scanjet 4400C */
+#define USB_PRODUCT_HP_4470C 0x0805 /* Scanjet 4470C */
+#define USB_PRODUCT_HP_82x0C 0x0b01 /* Scanjet 82x0C */
+#define USB_PRODUCT_HP_2300D 0x0b17 /* Laserjet 2300d */
+#define USB_PRODUCT_HP_970CSE 0x1004 /* Deskjet 970Cse */
+#define USB_PRODUCT_HP_5400C 0x1005 /* Scanjet 5400C */
+#define USB_PRODUCT_HP_2215 0x1016 /* iPAQ 22xx/Jornada 548 */
+#define USB_PRODUCT_HP_568J 0x1116 /* Jornada 568 */
+#define USB_PRODUCT_HP_930C 0x1204 /* DeskJet 930c */
+#define USB_PRODUCT_HP_P2000U 0x1801 /* Inkjet P-2000U */
+#define USB_PRODUCT_HP_HS2300 0x1e1d /* HS2300 HSDPA (aka MC8775) */
+#define USB_PRODUCT_HP_640C 0x2004 /* DeskJet 640c */
+#define USB_PRODUCT_HP_4670V 0x3005 /* ScanJet 4670v */
+#define USB_PRODUCT_HP_P1100 0x3102 /* Photosmart P1100 */
+#define USB_PRODUCT_HP_LD220 0x3524 /* LD220 POS Display */
+#define USB_PRODUCT_HP_OJ4215 0x3d11 /* OfficeJet 4215 */
+#define USB_PRODUCT_HP_HN210E 0x811c /* Ethernet HN210E */
+#define USB_PRODUCT_HP2_C500 0x6002 /* PhotoSmart C500 */
+#define USB_PRODUCT_HP_EV2200 0x1b1d /* ev2200 HSDPA (aka MC5720) */
+#define USB_PRODUCT_HP_HS2300 0x1e1d /* hs2300 HSDPA (aka MC8775) */
+
+/* HTC products */
+#define USB_PRODUCT_HTC_WINMOBILE 0x00ce /* HTC USB Sync */
+#define USB_PRODUCT_HTC_PPC6700MODEM 0x00cf /* PPC6700 Modem */
+#define USB_PRODUCT_HTC_SMARTPHONE 0x0a51 /* SmartPhone USB Sync */
+#define USB_PRODUCT_HTC_WIZARD 0x0bce /* HTC Wizard USB Sync */
+#define USB_PRODUCT_HTC_LEGENDSYNC 0x0c97 /* HTC Legend USB Sync */
+#define USB_PRODUCT_HTC_LEGEND 0x0ff9 /* HTC Legend */
+#define USB_PRODUCT_HTC_LEGENDINTERNET 0x0ffe /* HTC Legend Internet Sharing */
+
+/* HUAWEI products */
+#define USB_PRODUCT_HUAWEI_MOBILE 0x1001 /* Huawei Mobile */
+#define USB_PRODUCT_HUAWEI_E220 0x1003 /* HSDPA modem */
+#define USB_PRODUCT_HUAWEI_E220BIS 0x1004 /* HSDPA modem */
+#define USB_PRODUCT_HUAWEI_E1401 0x1401 /* 3G modem */
+#define USB_PRODUCT_HUAWEI_E1402 0x1402 /* 3G modem */
+#define USB_PRODUCT_HUAWEI_E1403 0x1403 /* 3G modem */
+#define USB_PRODUCT_HUAWEI_E1404 0x1404 /* 3G modem */
+#define USB_PRODUCT_HUAWEI_E1405 0x1405 /* 3G modem */
+#define USB_PRODUCT_HUAWEI_E1406 0x1406 /* 3G modem */
+#define USB_PRODUCT_HUAWEI_E1407 0x1407 /* 3G modem */
+#define USB_PRODUCT_HUAWEI_E1408 0x1408 /* 3G modem */
+#define USB_PRODUCT_HUAWEI_E1409 0x1409 /* 3G modem */
+#define USB_PRODUCT_HUAWEI_E140A 0x140a /* 3G modem */
+#define USB_PRODUCT_HUAWEI_E140B 0x140b /* 3G modem */
+#define USB_PRODUCT_HUAWEI_E180V 0x140c /* E180V */
+#define USB_PRODUCT_HUAWEI_E140D 0x140d /* 3G modem */
+#define USB_PRODUCT_HUAWEI_E140E 0x140e /* 3G modem */
+#define USB_PRODUCT_HUAWEI_E140F 0x140f /* 3G modem */
+#define USB_PRODUCT_HUAWEI_E1410 0x1410 /* 3G modem */
+#define USB_PRODUCT_HUAWEI_E1411 0x1411 /* 3G modem */
+#define USB_PRODUCT_HUAWEI_E1412 0x1412 /* 3G modem */
+#define USB_PRODUCT_HUAWEI_E1413 0x1413 /* 3G modem */
+#define USB_PRODUCT_HUAWEI_E1414 0x1414 /* 3G modem */
+#define USB_PRODUCT_HUAWEI_E1415 0x1415 /* 3G modem */
+#define USB_PRODUCT_HUAWEI_E1416 0x1416 /* 3G modem */
+#define USB_PRODUCT_HUAWEI_E1417 0x1417 /* 3G modem */
+#define USB_PRODUCT_HUAWEI_E1418 0x1418 /* 3G modem */
+#define USB_PRODUCT_HUAWEI_E1419 0x1419 /* 3G modem */
+#define USB_PRODUCT_HUAWEI_E141A 0x141a /* 3G modem */
+#define USB_PRODUCT_HUAWEI_E141B 0x141b /* 3G modem */
+#define USB_PRODUCT_HUAWEI_E141C 0x141c /* 3G modem */
+#define USB_PRODUCT_HUAWEI_E141D 0x141d /* 3G modem */
+#define USB_PRODUCT_HUAWEI_E141E 0x141e /* 3G modem */
+#define USB_PRODUCT_HUAWEI_E141F 0x141f /* 3G modem */
+#define USB_PRODUCT_HUAWEI_E1420 0x1420 /* 3G modem */
+#define USB_PRODUCT_HUAWEI_E1421 0x1421 /* 3G modem */
+#define USB_PRODUCT_HUAWEI_E1422 0x1422 /* 3G modem */
+#define USB_PRODUCT_HUAWEI_E1423 0x1423 /* 3G modem */
+#define USB_PRODUCT_HUAWEI_E1424 0x1424 /* 3G modem */
+#define USB_PRODUCT_HUAWEI_E1425 0x1425 /* 3G modem */
+#define USB_PRODUCT_HUAWEI_E1426 0x1426 /* 3G modem */
+#define USB_PRODUCT_HUAWEI_E1427 0x1427 /* 3G modem */
+#define USB_PRODUCT_HUAWEI_E1428 0x1428 /* 3G modem */
+#define USB_PRODUCT_HUAWEI_E1429 0x1429 /* 3G modem */
+#define USB_PRODUCT_HUAWEI_E142A 0x142a /* 3G modem */
+#define USB_PRODUCT_HUAWEI_E142B 0x142b /* 3G modem */
+#define USB_PRODUCT_HUAWEI_E142C 0x142c /* 3G modem */
+#define USB_PRODUCT_HUAWEI_E142D 0x142d /* 3G modem */
+#define USB_PRODUCT_HUAWEI_E142E 0x142e /* 3G modem */
+#define USB_PRODUCT_HUAWEI_E142F 0x142f /* 3G modem */
+#define USB_PRODUCT_HUAWEI_E1430 0x1430 /* 3G modem */
+#define USB_PRODUCT_HUAWEI_E1431 0x1431 /* 3G modem */
+#define USB_PRODUCT_HUAWEI_E1432 0x1432 /* 3G modem */
+#define USB_PRODUCT_HUAWEI_E1433 0x1433 /* 3G modem */
+#define USB_PRODUCT_HUAWEI_E1434 0x1434 /* 3G modem */
+#define USB_PRODUCT_HUAWEI_E1435 0x1435 /* 3G modem */
+#define USB_PRODUCT_HUAWEI_E1436 0x1436 /* 3G modem */
+#define USB_PRODUCT_HUAWEI_E1437 0x1437 /* 3G modem */
+#define USB_PRODUCT_HUAWEI_E1438 0x1438 /* 3G modem */
+#define USB_PRODUCT_HUAWEI_E1439 0x1439 /* 3G modem */
+#define USB_PRODUCT_HUAWEI_E143A 0x143a /* 3G modem */
+#define USB_PRODUCT_HUAWEI_E143B 0x143b /* 3G modem */
+#define USB_PRODUCT_HUAWEI_E143C 0x143c /* 3G modem */
+#define USB_PRODUCT_HUAWEI_E143D 0x143d /* 3G modem */
+#define USB_PRODUCT_HUAWEI_E143E 0x143e /* 3G modem */
+#define USB_PRODUCT_HUAWEI_E143F 0x143f /* 3G modem */
+#define USB_PRODUCT_HUAWEI_E1752 0x1446 /* 3G modem */
+#define USB_PRODUCT_HUAWEI_K3765 0x1465 /* 3G modem */
+#define USB_PRODUCT_HUAWEI_E1820 0x14ac /* E1820 HSPA+ USB Slider */
+#define USB_PRODUCT_HUAWEI_K3765_INIT 0x1520 /* K3765 Initial */
+
+/* HUAWEI 3com products */
+#define USB_PRODUCT_HUAWEI3COM_WUB320G 0x0009 /* Aolynk WUB320g */
+
+/* IBM Corporation */
+#define USB_PRODUCT_IBM_USBCDROMDRIVE 0x4427 /* USB CD-ROM Drive */
+
+/* Imagination Technologies products */
+#define USB_PRODUCT_IMAGINATION_DBX1 0x2107 /* DBX1 DSP core */
+
+/* Inside Out Networks products */
+#define USB_PRODUCT_INSIDEOUT_EDGEPORT4 0x0001 /* EdgePort/4 serial ports */
+
+/* In-System products */
+#define USB_PRODUCT_INSYSTEM_F5U002 0x0002 /* Parallel printer */
+#define USB_PRODUCT_INSYSTEM_ATAPI 0x0031 /* ATAPI Adapter */
+#define USB_PRODUCT_INSYSTEM_ISD110 0x0200 /* IDE Adapter ISD110 */
+#define USB_PRODUCT_INSYSTEM_ISD105 0x0202 /* IDE Adapter ISD105 */
+#define USB_PRODUCT_INSYSTEM_USBCABLE 0x081a /* USB cable */
+#define USB_PRODUCT_INSYSTEM_STORAGE_V2 0x5701 /* USB Storage Adapter V2 */
+
+/* Intel products */
+#define USB_PRODUCT_INTEL_EASYPC_CAMERA 0x0110 /* Easy PC Camera */
+#define USB_PRODUCT_INTEL_TESTBOARD 0x9890 /* 82930 test board */
+#define USB_PRODUCT_INTEL2_IRMH 0x0020 /* Integrated Rate Matching Hub */
+
+/* Intersil products */
+#define USB_PRODUCT_INTERSIL_PRISM_GT 0x1000 /* PrismGT USB 2.0 WLAN */
+#define USB_PRODUCT_INTERSIL_PRISM_2X 0x3642 /* Prism2.x or Atmel WLAN */
+
+/* Interpid Control Systems products */
+#define USB_PRODUCT_INTREPIDCS_VALUECAN 0x0601 /* ValueCAN CAN bus interface */
+#define USB_PRODUCT_INTREPIDCS_NEOVI 0x0701 /* NeoVI Blue vehicle bus interface */
+
+/* I/O DATA products */
+#define USB_PRODUCT_IODATA_IU_CD2 0x0204 /* DVD Multi-plus unit iU-CD2 */
+#define USB_PRODUCT_IODATA_DVR_UEH8 0x0206 /* DVD Multi-plus unit DVR-UEH8 */
+#define USB_PRODUCT_IODATA_USBSSMRW 0x0314 /* USB-SSMRW SD-card */
+#define USB_PRODUCT_IODATA_USBSDRW 0x031e /* USB-SDRW SD-card */
+#define USB_PRODUCT_IODATA_USBETT 0x0901 /* USB ETT */
+#define USB_PRODUCT_IODATA_USBETTX 0x0904 /* USB ETTX */
+#define USB_PRODUCT_IODATA_USBETTXS 0x0913 /* USB ETTX */
+#define USB_PRODUCT_IODATA_USBWNB11A 0x0919 /* USB WN-B11 */
+#define USB_PRODUCT_IODATA_USBWNB11 0x0922 /* USB Airport WN-B11 */
+#define USB_PRODUCT_IODATA_ETGUS2 0x0930 /* ETG-US2 */
+#define USB_PRODUCT_IODATA_RT3072_1 0x0944 /* RT3072 */
+#define USB_PRODUCT_IODATA_RT3072_2 0x0945 /* RT3072 */
+#define USB_PRODUCT_IODATA_RT3072_3 0x0947 /* RT3072 */
+#define USB_PRODUCT_IODATA_RT3072_4 0x0948 /* RT3072 */
+#define USB_PRODUCT_IODATA_USBRSAQ 0x0a03 /* Serial USB-RSAQ1 */
+#define USB_PRODUCT_IODATA_USBRSAQ5 0x0a0e /* Serial USB-RSAQ5 */
+#define USB_PRODUCT_IODATA2_USB2SC 0x0a09 /* USB2.0-SCSI Bridge USB2-SC */
+
+/* Iomega products */
+#define USB_PRODUCT_IOMEGA_ZIP100 0x0001 /* Zip 100 */
+#define USB_PRODUCT_IOMEGA_ZIP250 0x0030 /* Zip 250 */
+
+/* Integrated System Solution Corp. products */
+#define USB_PRODUCT_ISSC_ISSCBTA 0x1001 /* Bluetooth USB Adapter */
+
+/* iTegno products */
+#define USB_PRODUCT_ITEGNO_WM1080A 0x1080 /* WM1080A GSM/GPRS modem */
+#define USB_PRODUCT_ITEGNO_WM2080A 0x2080 /* WM2080A CDMA modem */
+
+/* Ituner networks products */
+#define USB_PRODUCT_ITUNERNET_USBLCD2X20 0x0002 /* USB-LCD 2x20 */
+#define USB_PRODUCT_ITUNERNET_USBLCD4X20 0xc001 /* USB-LCD 4x20 */
+
+/* Jablotron products */
+#define USB_PRODUCT_JABLOTRON_PC60B 0x0001 /* PC-60B */
+
+/* Jaton products */
+#define USB_PRODUCT_JATON_EDA 0x5704 /* Ethernet */
+
+/* JMicron products */
+#define USB_PRODUCT_JMICRON_JM20336 0x2336 /* USB to SATA Bridge */
+#define USB_PRODUCT_JMICRON_JM20337 0x2338 /* USB to ATA/ATAPI Bridge */
+
+/* JVC products */
+#define USB_PRODUCT_JVC_GR_DX95 0x000a /* GR-DX95 */
+#define USB_PRODUCT_JVC_MP_PRX1 0x3008 /* MP-PRX1 Ethernet */
+
+/* JRC products */
+#define USB_PRODUCT_JRC_AH_J3001V_J3002V 0x0001 /* AirH PHONE AH-J3001V/J3002V */
+
+/* Kawatsu products */
+#define USB_PRODUCT_KAWATSU_MH4000P 0x0003 /* MiniHub 4000P */
+
+/* Keisokugiken Corp. products */
+#define USB_PRODUCT_KEISOKUGIKEN_USBDAQ 0x0068 /* HKS-0200 USBDAQ */
+
+/* Kensington products */
+#define USB_PRODUCT_KENSINGTON_ORBIT 0x1003 /* Orbit USB/PS2 trackball */
+#define USB_PRODUCT_KENSINGTON_TURBOBALL 0x1005 /* TurboBall */
+
+/* Keyspan products */
+#define USB_PRODUCT_KEYSPAN_USA28_NF 0x0101 /* USA-28 serial Adapter (no firmware) */
+#define USB_PRODUCT_KEYSPAN_USA28X_NF 0x0102 /* USA-28X serial Adapter (no firmware) */
+#define USB_PRODUCT_KEYSPAN_USA19_NF 0x0103 /* USA-19 serial Adapter (no firmware) */
+#define USB_PRODUCT_KEYSPAN_USA18_NF 0x0104 /* USA-18 serial Adapter (no firmware) */
+#define USB_PRODUCT_KEYSPAN_USA18X_NF 0x0105 /* USA-18X serial Adapter (no firmware) */
+#define USB_PRODUCT_KEYSPAN_USA19W_NF 0x0106 /* USA-19W serial Adapter (no firmware) */
+#define USB_PRODUCT_KEYSPAN_USA19 0x0107 /* USA-19 serial Adapter */
+#define USB_PRODUCT_KEYSPAN_USA19W 0x0108 /* USA-19W serial Adapter */
+#define USB_PRODUCT_KEYSPAN_USA49W_NF 0x0109 /* USA-49W serial Adapter (no firmware) */
+#define USB_PRODUCT_KEYSPAN_USA49W 0x010a /* USA-49W serial Adapter */
+#define USB_PRODUCT_KEYSPAN_USA19QI_NF 0x010b /* USA-19QI serial Adapter (no firmware) */
+#define USB_PRODUCT_KEYSPAN_USA19QI 0x010c /* USA-19QI serial Adapter */
+#define USB_PRODUCT_KEYSPAN_USA19Q_NF 0x010d /* USA-19Q serial Adapter (no firmware) */
+#define USB_PRODUCT_KEYSPAN_USA19Q 0x010e /* USA-19Q serial Adapter */
+#define USB_PRODUCT_KEYSPAN_USA28 0x010f /* USA-28 serial Adapter */
+#define USB_PRODUCT_KEYSPAN_USA28XXB 0x0110 /* USA-28X/XB serial Adapter */
+#define USB_PRODUCT_KEYSPAN_USA18 0x0111 /* USA-18 serial Adapter */
+#define USB_PRODUCT_KEYSPAN_USA18X 0x0112 /* USA-18X serial Adapter */
+#define USB_PRODUCT_KEYSPAN_USA28XB_NF 0x0113 /* USA-28XB serial Adapter (no firmware) */
+#define USB_PRODUCT_KEYSPAN_USA28XA_NF 0x0114 /* USA-28XB serial Adapter (no firmware) */
+#define USB_PRODUCT_KEYSPAN_USA28XA 0x0115 /* USA-28XA serial Adapter */
+#define USB_PRODUCT_KEYSPAN_USA18XA_NF 0x0116 /* USA-18XA serial Adapter (no firmware) */
+#define USB_PRODUCT_KEYSPAN_USA18XA 0x0117 /* USA-18XA serial Adapter */
+#define USB_PRODUCT_KEYSPAN_USA19QW_NF 0x0118 /* USA-19WQ serial Adapter (no firmware) */
+#define USB_PRODUCT_KEYSPAN_USA19QW 0x0119 /* USA-19WQ serial Adapter */
+#define USB_PRODUCT_KEYSPAN_USA19HA 0x0121 /* USA-19HS serial Adapter */
+#define USB_PRODUCT_KEYSPAN_UIA10 0x0201 /* UIA-10 remote control */
+#define USB_PRODUCT_KEYSPAN_UIA11 0x0202 /* UIA-11 remote control */
+
+/* Kingston products */
+#define USB_PRODUCT_KINGSTON_XX1 0x0008 /* Ethernet */
+#define USB_PRODUCT_KINGSTON_KNU101TX 0x000a /* KNU101TX USB Ethernet */
+
+/* Kawasaki products */
+#define USB_PRODUCT_KLSI_DUH3E10BT 0x0008 /* USB Ethernet */
+#define USB_PRODUCT_KLSI_DUH3E10BTN 0x0009 /* USB Ethernet */
+
+/* Kodak products */
+#define USB_PRODUCT_KODAK_DC220 0x0100 /* Digital Science DC220 */
+#define USB_PRODUCT_KODAK_DC260 0x0110 /* Digital Science DC260 */
+#define USB_PRODUCT_KODAK_DC265 0x0111 /* Digital Science DC265 */
+#define USB_PRODUCT_KODAK_DC290 0x0112 /* Digital Science DC290 */
+#define USB_PRODUCT_KODAK_DC240 0x0120 /* Digital Science DC240 */
+#define USB_PRODUCT_KODAK_DC280 0x0130 /* Digital Science DC280 */
+
+/* Konica Corp. Products */
+#define USB_PRODUCT_KONICA_CAMERA 0x0720 /* Digital Color Camera */
+
+/* KYE products */
+#define USB_PRODUCT_KYE_NICHE 0x0001 /* Niche mouse */
+#define USB_PRODUCT_KYE_NETSCROLL 0x0003 /* Genius NetScroll mouse */
+#define USB_PRODUCT_KYE_FLIGHT2000 0x1004 /* Flight 2000 joystick */
+#define USB_PRODUCT_KYE_VIVIDPRO 0x2001 /* ColorPage Vivid-Pro scanner */
+
+/* Kyocera products */
+#define USB_PRODUCT_KYOCERA_FINECAM_S3X 0x0100 /* Finecam S3x */
+#define USB_PRODUCT_KYOCERA_FINECAM_S4 0x0101 /* Finecam S4 */
+#define USB_PRODUCT_KYOCERA_FINECAM_S5 0x0103 /* Finecam S5 */
+#define USB_PRODUCT_KYOCERA_FINECAM_L3 0x0105 /* Finecam L3 */
+#define USB_PRODUCT_KYOCERA_AHK3001V 0x0203 /* AH-K3001V */
+#define USB_PRODUCT_KYOCERA2_CDMA_MSM_K 0x17da /* Qualcomm Kyocera CDMA Technologies MSM */
+#define USB_PRODUCT_KYOCERA2_KPC680 0x180a /* Qualcomm Kyocera CDMA Technologies MSM */
+
+/* LaCie products */
+#define USB_PRODUCT_LACIE_HD 0xa601 /* Hard Disk */
+#define USB_PRODUCT_LACIE_CDRW 0xa602 /* CD R/W */
+
+/* Leadtek products */
+#define USB_PRODUCT_LEADTEK_9531 0x2101 /* 9531 GPS */
+
+/* Lexar products */
+#define USB_PRODUCT_LEXAR_JUMPSHOT 0x0001 /* jumpSHOT CompactFlash Reader */
+#define USB_PRODUCT_LEXAR_CF_READER 0xb002 /* USB CF Reader */
+
+/* Lexmark products */
+#define USB_PRODUCT_LEXMARK_S2450 0x0009 /* Optra S 2450 */
+
+/* Liebert products */
+#define USB_PRODUCT_LIEBERT_POWERSURE_PXT 0xffff /* PowerSure Personal XT */
+
+/* Linksys products */
+#define USB_PRODUCT_LINKSYS_MAUSB2 0x0105 /* Camedia MAUSB-2 */
+#define USB_PRODUCT_LINKSYS_USB10TX1 0x200c /* USB10TX */
+#define USB_PRODUCT_LINKSYS_USB10T 0x2202 /* USB10T Ethernet */
+#define USB_PRODUCT_LINKSYS_USB100TX 0x2203 /* USB100TX Ethernet */
+#define USB_PRODUCT_LINKSYS_USB100H1 0x2204 /* USB100H1 Ethernet/HPNA */
+#define USB_PRODUCT_LINKSYS_USB10TA 0x2206 /* USB10TA Ethernet */
+#define USB_PRODUCT_LINKSYS_USB10TX2 0x400b /* USB10TX */
+#define USB_PRODUCT_LINKSYS2_WUSB11 0x2219 /* WUSB11 Wireless Adapter */
+#define USB_PRODUCT_LINKSYS2_USB200M 0x2226 /* USB 2.0 10/100 Ethernet */
+#define USB_PRODUCT_LINKSYS3_WUSB11v28 0x2233 /* WUSB11 v2.8 Wireless Adapter */
+#define USB_PRODUCT_LINKSYS4_USB1000 0x0039 /* USB1000 */
+#define USB_PRODUCT_LINKSYS4_WUSB100 0x0070 /* WUSB100 */
+#define USB_PRODUCT_LINKSYS4_WUSB600N 0x0071 /* WUSB600N */
+#define USB_PRODUCT_LINKSYS4_WUSB54GCV2 0x0073 /* WUSB54GC v2 */
+#define USB_PRODUCT_LINKSYS4_WUSB54GCV3 0x0077 /* WUSB54GC v3 */
+#define USB_PRODUCT_LINKSYS4_RT3070 0x0078 /* RT3070 */
+#define USB_PRODUCT_LINKSYS4_WUSB600NV2 0x0079 /* WUSB600N v2 */
+
+/* Logitech products */
+#define USB_PRODUCT_LOGITECH_M2452 0x0203 /* M2452 keyboard */
+#define USB_PRODUCT_LOGITECH_M4848 0x0301 /* M4848 mouse */
+#define USB_PRODUCT_LOGITECH_PAGESCAN 0x040f /* PageScan */
+#define USB_PRODUCT_LOGITECH_QUICKCAMWEB 0x0801 /* QuickCam Web */
+#define USB_PRODUCT_LOGITECH_QUICKCAMPRO 0x0810 /* QuickCam Pro */
+#define USB_PRODUCT_LOGITECH_QUICKCAMEXP 0x0840 /* QuickCam Express */
+#define USB_PRODUCT_LOGITECH_QUICKCAM 0x0850 /* QuickCam */
+#define USB_PRODUCT_LOGITECH_QUICKCAMPRO3 0x0990 /* QuickCam Pro 9000 */
+#define USB_PRODUCT_LOGITECH_N43 0xc000 /* N43 */
+#define USB_PRODUCT_LOGITECH_N48 0xc001 /* N48 mouse */
+#define USB_PRODUCT_LOGITECH_MBA47 0xc002 /* M-BA47 mouse */
+#define USB_PRODUCT_LOGITECH_WMMOUSE 0xc004 /* WingMan Gaming Mouse */
+#define USB_PRODUCT_LOGITECH_BD58 0xc00c /* BD58 mouse */
+#define USB_PRODUCT_LOGITECH_UN58A 0xc030 /* iFeel Mouse */
+#define USB_PRODUCT_LOGITECH_UN53B 0xc032 /* iFeel MouseMan */
+#define USB_PRODUCT_LOGITECH_WMPAD 0xc208 /* WingMan GamePad Extreme */
+#define USB_PRODUCT_LOGITECH_WMRPAD 0xc20a /* WingMan RumblePad */
+#define USB_PRODUCT_LOGITECH_WMJOY 0xc281 /* WingMan Force joystick */
+#define USB_PRODUCT_LOGITECH_BB13 0xc401 /* USB-PS/2 Trackball */
+#define USB_PRODUCT_LOGITECH_RK53 0xc501 /* Cordless mouse */
+#define USB_PRODUCT_LOGITECH_RB6 0xc503 /* Cordless keyboard */
+#define USB_PRODUCT_LOGITECH_MX700 0xc506 /* Cordless optical mouse */
+#define USB_PRODUCT_LOGITECH_QUICKCAMPRO2 0xd001 /* QuickCam Pro */
+
+/* Logitec Corp. products */
+#define USB_PRODUCT_LOGITEC_LDR_H443SU2 0x0033 /* DVD Multi-plus unit LDR-H443SU2 */
+#define USB_PRODUCT_LOGITEC_LDR_H443U2 0x00b3 /* DVD Multi-plus unit LDR-H443U2 */
+#define USB_PRODUCT_LOGITEC_LAN_GTJU2A 0x0160 /* LAN-GTJ/U2A Ethernet */
+#define USB_PRODUCT_LOGITEC_RT2870_1 0x0162 /* RT2870 */
+#define USB_PRODUCT_LOGITEC_RT2870_2 0x0163 /* RT2870 */
+#define USB_PRODUCT_LOGITEC_RT2870_3 0x0164 /* RT2870 */
+
+/* Longcheer Holdings, Ltd. products */
+#define USB_PRODUCT_LONGCHEER_WM66 0x6061 /* Longcheer WM66 HSDPA */
+#define USB_PRODUCT_LONGCHEER_W14 0x9603 /* Mobilcom W14 */
+#define USB_PRODUCT_LONGCHEER_DISK 0xf000 /* Driver disk */
+
+
+/* Lucent products */
+#define USB_PRODUCT_LUCENT_EVALKIT 0x1001 /* USS-720 evaluation kit */
+
+/* Luwen products */
+#define USB_PRODUCT_LUWEN_EASYDISK 0x0005 /* EasyDisc */
+
+/* Macally products */
+#define USB_PRODUCT_MACALLY_MOUSE1 0x0101 /* mouse */
+
+/* Marvell Technology Group, Ltd. products */
+#define USB_PRODUCT_MARVELL_SHEEVAPLUG 0x9e8f /* SheevaPlug serial interface */
+
+/* Matrix Orbital products */
+#define USB_PRODUCT_MATRIXORBITAL_MOUA 0x0153 /* Martrix Orbital MOU-Axxxx LCD displays */
+
+/* MCT Corp. */
+#define USB_PRODUCT_MCT_HUB0100 0x0100 /* Hub */
+#define USB_PRODUCT_MCT_DU_H3SP_USB232 0x0200 /* D-Link DU-H3SP USB BAY Hub */
+#define USB_PRODUCT_MCT_USB232 0x0210 /* USB-232 Interface */
+#define USB_PRODUCT_MCT_SITECOM_USB232 0x0230 /* Sitecom USB-232 Products */
+
+/* Meizu Electronics */
+#define USB_PRODUCT_MEIZU_M6_SL 0x0140 /* MiniPlayer M6 (SL) */
+
+/* Melco, Inc products */
+#define USB_PRODUCT_MELCO_LUATX1 0x0001 /* LUA-TX Ethernet */
+#define USB_PRODUCT_MELCO_LUATX5 0x0005 /* LUA-TX Ethernet */
+#define USB_PRODUCT_MELCO_LUA2TX5 0x0009 /* LUA2-TX Ethernet */
+#define USB_PRODUCT_MELCO_LUAKTX 0x0012 /* LUA-KTX Ethernet */
+#define USB_PRODUCT_MELCO_DUBPXXG 0x001c /* DUB-PxxG */
+#define USB_PRODUCT_MELCO_LUAU2KTX 0x003d /* LUA-U2-KTX Ethernet */
+#define USB_PRODUCT_MELCO_KG54YB 0x005e /* WLI-U2-KG54-YB WLAN */
+#define USB_PRODUCT_MELCO_KG54 0x0066 /* WLI-U2-KG54 WLAN */
+#define USB_PRODUCT_MELCO_KG54AI 0x0067 /* WLI-U2-KG54-AI WLAN */
+#define USB_PRODUCT_MELCO_LUA3U2AGT 0x006e /* LUA3-U2-AGT */
+#define USB_PRODUCT_MELCO_NINWIFI 0x008b /* Nintendo Wi-Fi */
+#define USB_PRODUCT_MELCO_PCOPRS1 0x00b3 /* PC-OP-RS1 RemoteStation */
+#define USB_PRODUCT_MELCO_SG54HP 0x00d8 /* WLI-U2-SG54HP */
+#define USB_PRODUCT_MELCO_G54HP 0x00d9 /* WLI-U2-G54HP */
+#define USB_PRODUCT_MELCO_KG54L 0x00da /* WLI-U2-KG54L */
+#define USB_PRODUCT_MELCO_WLIUCG300N 0x00e8 /* WLI-UC-G300N */
+#define USB_PRODUCT_MELCO_SG54HG 0x00f4 /* WLI-U2-SG54HG */
+#define USB_PRODUCT_MELCO_WLRUCG 0x0116 /* WLR-UC-G */
+#define USB_PRODUCT_MELCO_WLRUCGAOSS 0x0119 /* WLR-UC-G-AOSS */
+#define USB_PRODUCT_MELCO_WLIUCAG300N 0x012e /* WLI-UC-AG300N */
+#define USB_PRODUCT_MELCO_RT2870_1 0x0148 /* RT2870 */
+#define USB_PRODUCT_MELCO_RT2870_2 0x0150 /* RT2870 */
+#define USB_PRODUCT_MELCO_WLIUCGN 0x015d /* WLI-UC-GN */
+
+/* Merlin products */
+#define USB_PRODUCT_MERLIN_V620 0x1110 /* Merlin V620 */
+
+/* MetaGeek products */
+#define USB_PRODUCT_METAGEEK_WISPY1B 0x083e /* MetaGeek Wi-Spy */
+#define USB_PRODUCT_METAGEEK_WISPY24X 0x083f /* MetaGeek Wi-Spy 2.4x */
+#define USB_PRODUCT_METAGEEK2_WISPYDBX 0x5000 /* MetaGeek Wi-Spy DBx */
+
+/* Metricom products */
+#define USB_PRODUCT_METRICOM_RICOCHET_GS 0x0001 /* Ricochet GS */
+
+/* MGE UPS Systems */
+#define USB_PRODUCT_MGE_UPS1 0x0001 /* MGE UPS SYSTEMS PROTECTIONCENTER 1 */
+#define USB_PRODUCT_MGE_UPS2 0xffff /* MGE UPS SYSTEMS PROTECTIONCENTER 2 */
+
+/* MEI products */
+#define USB_PRODUCT_MEI_CASHFLOW_SC 0x1100 /* Cashflow-SC Cash Acceptor */
+#define USB_PRODUCT_MEI_S2000 0x1101 /* Seies 2000 Combo Acceptor */
+
+/* Micro Star International products */
+#define USB_PRODUCT_MSI_BT_DONGLE 0x1967 /* Bluetooth USB dongle */
+#define USB_PRODUCT_MSI_RT3070_1 0x3820 /* RT3070 */
+#define USB_PRODUCT_MSI_RT3070_2 0x3821 /* RT3070 */
+#define USB_PRODUCT_MSI_RT3070_8 0x3822 /* RT3070 */
+#define USB_PRODUCT_MSI_RT3070_3 0x3870 /* RT3070 */
+#define USB_PRODUCT_MSI_RT3070_9 0x3871 /* RT3070 */
+#define USB_PRODUCT_MSI_UB11B 0x6823 /* UB11B */
+#define USB_PRODUCT_MSI_RT2570 0x6861 /* RT2570 */
+#define USB_PRODUCT_MSI_RT2570_2 0x6865 /* RT2570 */
+#define USB_PRODUCT_MSI_RT2570_3 0x6869 /* RT2570 */
+#define USB_PRODUCT_MSI_RT2573_1 0x6874 /* RT2573 */
+#define USB_PRODUCT_MSI_RT2573_2 0x6877 /* RT2573 */
+#define USB_PRODUCT_MSI_RT3070_4 0x6899 /* RT3070 */
+#define USB_PRODUCT_MSI_RT3070_5 0x821a /* RT3070 */
+#define USB_PRODUCT_MSI_RT3070_10 0x822a /* RT3070 */
+#define USB_PRODUCT_MSI_RT3070_6 0x870a /* RT3070 */
+#define USB_PRODUCT_MSI_RT3070_11 0x871a /* RT3070 */
+#define USB_PRODUCT_MSI_RT3070_7 0x899a /* RT3070 */
+#define USB_PRODUCT_MSI_RT2573_3 0xa861 /* RT2573 */
+#define USB_PRODUCT_MSI_RT2573_4 0xa874 /* RT2573 */
+
+/* Microsoft products */
+#define USB_PRODUCT_MICROSOFT_SIDEPREC 0x0008 /* SideWinder Precision Pro */
+#define USB_PRODUCT_MICROSOFT_INTELLIMOUSE 0x0009 /* IntelliMouse */
+#define USB_PRODUCT_MICROSOFT_NATURALKBD 0x000b /* Natural Keyboard Elite */
+#define USB_PRODUCT_MICROSOFT_DDS80 0x0014 /* Digital Sound System 80 */
+#define USB_PRODUCT_MICROSOFT_SIDEWINDER 0x001a /* Sidewinder Precision Racing Wheel */
+#define USB_PRODUCT_MICROSOFT_INETPRO 0x001c /* Internet Keyboard Pro */
+#define USB_PRODUCT_MICROSOFT_TBEXPLORER 0x0024 /* Trackball Explorer */
+#define USB_PRODUCT_MICROSOFT_INTELLIEYE 0x0025 /* IntelliEye mouse */
+#define USB_PRODUCT_MICROSOFT_INETPRO2 0x002b /* Internet Keyboard Pro */
+#define USB_PRODUCT_MICROSOFT_INTELLIMOUSE5 0x0039 /* IntelliMouse 1.1 5-Button Mouse */
+#define USB_PRODUCT_MICROSOFT_WHEELMOUSE 0x0040 /* Wheel Mouse Optical */
+#define USB_PRODUCT_MICROSOFT_MN510 0x006e /* MN510 Wireless */
+#define USB_PRODUCT_MICROSOFT_700WX 0x0079 /* Palm 700WX */
+#define USB_PRODUCT_MICROSOFT_MN110 0x007a /* 10/100 USB NIC */
+#define USB_PRODUCT_MICROSOFT_WLINTELLIMOUSE 0x008c /* Wireless Optical IntelliMouse */
+#define USB_PRODUCT_MICROSOFT_WLNOTEBOOK 0x00b9 /* Wireless Optical Mouse (Model 1023) */
+#define USB_PRODUCT_MICROSOFT_COMFORT3000 0x00d1 /* Comfort Optical Mouse 3000 (Model 1043) */
+#define USB_PRODUCT_MICROSOFT_WLNOTEBOOK3 0x00d2 /* Wireless Optical Mouse 3000 (Model 1049) */
+#define USB_PRODUCT_MICROSOFT_NATURAL4000 0x00db /* Natural Ergonomic Keyboard 4000 */
+#define USB_PRODUCT_MICROSOFT_WLNOTEBOOK2 0x00e1 /* Wireless Optical Mouse 3000 (Model 1056) */
+#define USB_PRODUCT_MICROSOFT_XBOX360 0x0292 /* XBOX 360 WLAN */
+
+/* Microtech products */
+#define USB_PRODUCT_MICROTECH_SCSIDB25 0x0004 /* USB-SCSI-DB25 */
+#define USB_PRODUCT_MICROTECH_SCSIHD50 0x0005 /* USB-SCSI-HD50 */
+#define USB_PRODUCT_MICROTECH_DPCM 0x0006 /* USB CameraMate */
+#define USB_PRODUCT_MICROTECH_FREECOM 0xfc01 /* Freecom USB-IDE */
+
+/* Microtek products */
+#define USB_PRODUCT_MICROTEK_336CX 0x0094 /* Phantom 336CX - C3 scanner */
+#define USB_PRODUCT_MICROTEK_X6U 0x0099 /* ScanMaker X6 - X6U */
+#define USB_PRODUCT_MICROTEK_C6 0x009a /* Phantom C6 scanner */
+#define USB_PRODUCT_MICROTEK_336CX2 0x00a0 /* Phantom 336CX - C3 scanner */
+#define USB_PRODUCT_MICROTEK_V6USL 0x00a3 /* ScanMaker V6USL */
+#define USB_PRODUCT_MICROTEK_V6USL2 0x80a3 /* ScanMaker V6USL */
+#define USB_PRODUCT_MICROTEK_V6UL 0x80ac /* ScanMaker V6UL */
+
+/* Microtune, Inc. products */
+#define USB_PRODUCT_MICROTUNE_BT_DONGLE 0x1000 /* Bluetooth USB dongle */
+
+/* Midiman products */
+#define USB_PRODUCT_MIDIMAN_MIDISPORT2X2 0x1001 /* Midisport 2x2 */
+
+/* MindsAtWork products */
+#define USB_PRODUCT_MINDSATWORK_WALLET 0x0001 /* Digital Wallet */
+
+/* Minolta Co., Ltd. */
+#define USB_PRODUCT_MINOLTA_2300 0x4001 /* Dimage 2300 */
+#define USB_PRODUCT_MINOLTA_S304 0x4007 /* Dimage S304 */
+#define USB_PRODUCT_MINOLTA_X 0x4009 /* Dimage X */
+#define USB_PRODUCT_MINOLTA_5400 0x400e /* Dimage 5400 */
+#define USB_PRODUCT_MINOLTA_F300 0x4011 /* Dimage F300 */
+#define USB_PRODUCT_MINOLTA_E223 0x4017 /* Dimage E223 */
+
+/* Mitsumi products */
+#define USB_PRODUCT_MITSUMI_CDRRW 0x0000 /* CD-R/RW Drive */
+#define USB_PRODUCT_MITSUMI_BT_DONGLE 0x641f /* Bluetooth USB dongle */
+#define USB_PRODUCT_MITSUMI_FDD 0x6901 /* USB FDD */
+
+/* Mobile Action products */
+#define USB_PRODUCT_MOBILEACTION_MA620 0x0620 /* MA-620 Infrared Adapter */
+
+/* Mobility products */
+#define USB_PRODUCT_MOBILITY_EA 0x0204 /* Ethernet */
+#define USB_PRODUCT_MOBILITY_EASIDOCK 0x0304 /* EasiDock Ethernet */
+
+/* MosChip products */
+#define USB_PRODUCT_MOSCHIP_MCS7703 0x7703 /* MCS7703 Serial Port Adapter */
+#define USB_PRODUCT_MOSCHIP_MCS7830 0x7830 /* MCS7830 Ethernet */
+
+/* Motorola products */
+#define USB_PRODUCT_MOTOROLA_MC141555 0x1555 /* MC141555 hub controller */
+#define USB_PRODUCT_MOTOROLA_SB4100 0x4100 /* SB4100 USB Cable Modem */
+#define USB_PRODUCT_MOTOROLA2_T720C 0x2822 /* T720c */
+#define USB_PRODUCT_MOTOROLA2_A41XV32X 0x2a22 /* A41x/V32x Mobile Phones */
+#define USB_PRODUCT_MOTOROLA2_E398 0x4810 /* E398 Mobile Phone */
+#define USB_PRODUCT_MOTOROLA2_USBLAN 0x600c /* USBLAN */
+#define USB_PRODUCT_MOTOROLA2_USBLAN2 0x6027 /* USBLAN */
+#define USB_PRODUCT_MOTOROLA4_RT2770 0x9031 /* RT2770 */
+#define USB_PRODUCT_MOTOROLA4_RT3070 0x9032 /* RT3070 */
+
+/* MultiTech products */
+#define USB_PRODUCT_MULTITECH_ATLAS 0xf101 /* MT5634ZBA-USB modem */
+
+/* Mustek products */
+#define USB_PRODUCT_MUSTEK_1200CU 0x0001 /* 1200 CU scanner */
+#define USB_PRODUCT_MUSTEK_600CU 0x0002 /* 600 CU scanner */
+#define USB_PRODUCT_MUSTEK_1200USB 0x0003 /* 1200 USB scanner */
+#define USB_PRODUCT_MUSTEK_1200UB 0x0006 /* 1200 UB scanner */
+#define USB_PRODUCT_MUSTEK_1200USBPLUS 0x0007 /* 1200 USB Plus scanner */
+#define USB_PRODUCT_MUSTEK_1200CUPLUS 0x0008 /* 1200 CU Plus scanner */
+#define USB_PRODUCT_MUSTEK_BEARPAW1200F 0x0010 /* BearPaw 1200F scanner */
+#define USB_PRODUCT_MUSTEK_BEARPAW2400TA 0x0218 /* BearPaw 2400TA scanner */
+#define USB_PRODUCT_MUSTEK_BEARPAW1200TA 0x021e /* BearPaw 1200TA scanner */
+#define USB_PRODUCT_MUSTEK_600USB 0x0873 /* 600 USB scanner */
+#define USB_PRODUCT_MUSTEK_MDC800 0xa800 /* MDC-800 digital camera */
+
+/* M-Systems products */
+#define USB_PRODUCT_MSYSTEMS_DISKONKEY 0x0010 /* DiskOnKey */
+#define USB_PRODUCT_MSYSTEMS_DISKONKEY2 0x0011 /* DiskOnKey */
+
+/* Myson products */
+#define USB_PRODUCT_MYSON_HEDEN_8813 0x8813 /* USB-IDE */
+#define USB_PRODUCT_MYSON_HEDEN 0x8818 /* USB-IDE */
+#define USB_PRODUCT_MYSON_HUBREADER 0x8819 /* COMBO Card reader with USB HUB */
+#define USB_PRODUCT_MYSON_STARREADER 0x9920 /* USB flash card adapter */
+
+/* National Semiconductor */
+#define USB_PRODUCT_NATIONAL_BEARPAW1200 0x1000 /* BearPaw 1200 */
+#define USB_PRODUCT_NATIONAL_BEARPAW2400 0x1001 /* BearPaw 2400 */
+
+/* NEC products */
+#define USB_PRODUCT_NEC_HUB_0050 0x0050 /* USB 2.0 7-Port Hub */
+#define USB_PRODUCT_NEC_HUB_005A 0x005a /* USB 2.0 4-Port Hub */
+#define USB_PRODUCT_NEC_HUB 0x55aa /* hub */
+#define USB_PRODUCT_NEC_HUB_B 0x55ab /* hub */
+
+/* NEODIO products */
+#define USB_PRODUCT_NEODIO_ND3260 0x3260 /* 8-in-1 Multi-format Flash Controller */
+#define USB_PRODUCT_NEODIO_ND5010 0x5010 /* Multi-format Flash Controller */
+
+/* Neotel products */
+#define USB_PRODUCT_NEOTEL_PRIME 0x4000 /* Prime USB modem */
+
+/* Netac products */
+#define USB_PRODUCT_NETAC_CF_CARD 0x1060 /* USB-CF-Card */
+#define USB_PRODUCT_NETAC_ONLYDISK 0x0003 /* OnlyDisk */
+
+/* NetChip Technology Products */
+#define USB_PRODUCT_NETCHIP_TURBOCONNECT 0x1080 /* Turbo-Connect */
+#define USB_PRODUCT_NETCHIP_CLIK_40 0xa140 /* USB Clik! 40 */
+#define USB_PRODUCT_NETCHIP_ETHERNETGADGET 0xa4a2 /* Linux Ethernet/RNDIS gadget on pxa210/25x/26x */
+
+/* Netgear products */
+#define USB_PRODUCT_NETGEAR_EA101 0x1001 /* Ethernet */
+#define USB_PRODUCT_NETGEAR_EA101X 0x1002 /* Ethernet */
+#define USB_PRODUCT_NETGEAR_FA101 0x1020 /* Ethernet 10/100, USB1.1 */
+#define USB_PRODUCT_NETGEAR_FA120 0x1040 /* USB 2.0 Ethernet */
+#define USB_PRODUCT_NETGEAR_WG111V2_2 0x4240 /* PrismGT USB 2.0 WLAN */
+#define USB_PRODUCT_NETGEAR_WG111V3 0x4260 /* WG111v3 */
+#define USB_PRODUCT_NETGEAR_WG111U 0x4300 /* WG111U */
+#define USB_PRODUCT_NETGEAR_WG111U_NF 0x4301 /* WG111U (no firmware) */
+#define USB_PRODUCT_NETGEAR_WG111V2 0x6a00 /* WG111V2 */
+#define USB_PRODUCT_NETGEAR2_MA101 0x4100 /* MA101 */
+#define USB_PRODUCT_NETGEAR2_MA101B 0x4102 /* MA101 Rev B */
+#define USB_PRODUCT_NETGEAR3_WG111T 0x4250 /* WG111T */
+#define USB_PRODUCT_NETGEAR3_WG111T_NF 0x4251 /* WG111T (no firmware) */
+#define USB_PRODUCT_NETGEAR3_WPN111 0x5f00 /* WPN111 */
+#define USB_PRODUCT_NETGEAR3_WPN111_NF 0x5f01 /* WPN111 (no firmware) */
+#define USB_PRODUCT_NETGEAR3_WPN111_2 0x5f02 /* WPN111 */
+
+/* NetIndex products */
+#define USB_PRODUCT_NETINDEX_WS002IN 0x2001 /* Willcom WS002IN */
+
+/* NEWlink */
+#define USB_PRODUCT_NEWLINK_USB2IDEBRIDGE 0x00ff /* USB 2.0 Hard Drive Enclosure */
+
+/* Nikon products */
+#define USB_PRODUCT_NIKON_E990 0x0102 /* Digital Camera E990 */
+#define USB_PRODUCT_NIKON_LS40 0x4000 /* CoolScan LS40 ED */
+#define USB_PRODUCT_NIKON_D300 0x041a /* Digital Camera D300 */
+
+/* NovaTech Products */
+#define USB_PRODUCT_NOVATECH_NV902 0x9020 /* NovaTech NV-902W */
+#define USB_PRODUCT_NOVATECH_RT2573 0x9021 /* RT2573 */
+
+/* Nokia products */
+#define USB_PRODUCT_NOKIA_N958GB 0x0070 /* Nokia N95 8GBc */
+#define USB_PRODUCT_NOKIA2_CA42 0x1234 /* CA-42 cable */
+
+/* Novatel Wireless products */
+#define USB_PRODUCT_NOVATEL_V640 0x1100 /* Merlin V620 */
+#define USB_PRODUCT_NOVATEL_CDMA_MODEM 0x1110 /* Novatel Wireless Merlin CDMA */
+#define USB_PRODUCT_NOVATEL_V620 0x1110 /* Merlin V620 */
+#define USB_PRODUCT_NOVATEL_V740 0x1120 /* Merlin V740 */
+#define USB_PRODUCT_NOVATEL_V720 0x1130 /* Merlin V720 */
+#define USB_PRODUCT_NOVATEL_U740 0x1400 /* Merlin U740 */
+#define USB_PRODUCT_NOVATEL_U740_2 0x1410 /* Merlin U740 */
+#define USB_PRODUCT_NOVATEL_U870 0x1420 /* Merlin U870 */
+#define USB_PRODUCT_NOVATEL_XU870 0x1430 /* Merlin XU870 */
+#define USB_PRODUCT_NOVATEL_X950D 0x1450 /* Merlin X950D */
+#define USB_PRODUCT_NOVATEL_ES620 0x2100 /* Expedite ES620 */
+#define USB_PRODUCT_NOVATEL_E725 0x2120 /* Expedite E725 */
+#define USB_PRODUCT_NOVATEL_ES620_2 0x2130 /* Expedite ES620 */
+#define USB_PRODUCT_NOVATEL_ES620 0x2100 /* ES620 CDMA */
+#define USB_PRODUCT_NOVATEL_U720 0x2110 /* Merlin U720 */
+#define USB_PRODUCT_NOVATEL_EU730 0x2400 /* Expedite EU730 */
+#define USB_PRODUCT_NOVATEL_EU740 0x2410 /* Expedite EU740 */
+#define USB_PRODUCT_NOVATEL_EU870D 0x2420 /* Expedite EU870D */
+#define USB_PRODUCT_NOVATEL_U727 0x4100 /* Merlin U727 CDMA */
+#define USB_PRODUCT_NOVATEL_MC950D 0x4400 /* Novatel MC950D HSUPA */
+#define USB_PRODUCT_NOVATEL_ZEROCD 0x5010 /* Novatel ZeroCD */
+#define USB_PRODUCT_NOVATEL_ZEROCD2 0x5030 /* Novatel ZeroCD */
+#define USB_PRODUCT_NOVATEL_U727_2 0x5100 /* Merlin U727 CDMA */
+#define USB_PRODUCT_NOVATEL_U760 0x6000 /* Novatel U760 */
+#define USB_PRODUCT_NOVATEL_MC760 0x6002 /* Novatel MC760 */
+#define USB_PRODUCT_NOVATEL2_FLEXPACKGPS 0x0100 /* NovAtel FlexPack GPS receiver */
+
+/* Merlin products */
+#define USB_PRODUCT_MERLIN_V620 0x1110 /* Merlin V620 */
+
+/* O2Micro products */
+#define USB_PRODUCT_O2MICRO_OZ776_HUB 0x7761 /* OZ776 hub */
+#define USB_PRODUCT_O2MICRO_OZ776_CCID_SC 0x7772 /* OZ776 CCID SC Reader */
+
+/* Olympus products */
+#define USB_PRODUCT_OLYMPUS_C1 0x0102 /* C-1 Digital Camera */
+#define USB_PRODUCT_OLYMPUS_C700 0x0105 /* C-700 Ultra Zoom */
+
+/* OmniVision Technologies, Inc. products */
+#define USB_PRODUCT_OMNIVISION_OV511 0x0511 /* OV511 Camera */
+#define USB_PRODUCT_OMNIVISION_OV511PLUS 0xa511 /* OV511+ Camera */
+
+/* OnSpec Electronic, Inc. */
+#define USB_PRODUCT_ONSPEC_SDS_HOTFIND_D 0x0400 /* SDS-infrared.com Hotfind-D Infrared Camera */
+#define USB_PRODUCT_ONSPEC_MDCFE_B_CF_READER 0xa000 /* MDCFE-B USB CF Reader */
+#define USB_PRODUCT_ONSPEC_CFMS_RW 0xa001 /* SIIG/Datafab Memory Stick+CF Reader/Writer */
+#define USB_PRODUCT_ONSPEC_READER 0xa003 /* Datafab-based Reader */
+#define USB_PRODUCT_ONSPEC_CFSM_READER 0xa005 /* PNY/Datafab CF+SM Reader */
+#define USB_PRODUCT_ONSPEC_CFSM_READER2 0xa006 /* Simple Tech/Datafab CF+SM Reader */
+#define USB_PRODUCT_ONSPEC_MDSM_B_READER 0xa103 /* MDSM-B reader */
+#define USB_PRODUCT_ONSPEC_CFSM_COMBO 0xa109 /* USB to CF + SM Combo (LC1) */
+#define USB_PRODUCT_ONSPEC_UCF100 0xa400 /* FlashLink UCF-100 CompactFlash Reader */
+#define USB_PRODUCT_ONSPEC2_IMAGEMATE_SDDR55 0xa103 /* ImageMate SDDR55 */
+
+/* Option products */
+#define USB_PRODUCT_OPTION_VODAFONEMC3G 0x5000 /* Vodafone Mobile Connect 3G datacard */
+#define USB_PRODUCT_OPTION_GT3G 0x6000 /* GlobeTrotter 3G datacard */
+#define USB_PRODUCT_OPTION_GT3GQUAD 0x6300 /* GlobeTrotter 3G QUAD datacard */
+#define USB_PRODUCT_OPTION_GT3GPLUS 0x6600 /* GlobeTrotter 3G+ datacard */
+#define USB_PRODUCT_OPTION_GTICON322 0xd033 /* GlobeTrotter Icon322 storage */
+#define USB_PRODUCT_OPTION_GTMAX36 0x6701 /* GlobeTrotter Max 3.6 Modem */
+#define USB_PRODUCT_OPTION_GTHSDPA 0x6971 /* GlobeTrotter HSDPA */
+#define USB_PRODUCT_OPTION_GTMAXHSUPA 0x7001 /* GlobeTrotter HSUPA */
+#define USB_PRODUCT_OPTION_GTMAXHSUPAE 0x6901 /* GlobeTrotter HSUPA PCIe */
+#define USB_PRODUCT_OPTION_GTMAX380HSUPAE 0x7211 /* GlobeTrotter 380HSUPA PCIe */
+#define USB_PRODUCT_OPTION_GT3G_1 0x6050 /* 3G modem */
+#define USB_PRODUCT_OPTION_GT3G_2 0x6100 /* 3G modem */
+#define USB_PRODUCT_OPTION_GT3G_3 0x6150 /* 3G modem */
+#define USB_PRODUCT_OPTION_GT3G_4 0x6200 /* 3G modem */
+#define USB_PRODUCT_OPTION_GT3G_5 0x6250 /* 3G modem */
+#define USB_PRODUCT_OPTION_GT3G_6 0x6350 /* 3G modem */
+#define USB_PRODUCT_OPTION_E6500 0x6500 /* 3G modem */
+#define USB_PRODUCT_OPTION_E6501 0x6501 /* 3G modem */
+#define USB_PRODUCT_OPTION_E6601 0x6601 /* 3G modem */
+#define USB_PRODUCT_OPTION_E6721 0x6721 /* 3G modem */
+#define USB_PRODUCT_OPTION_E6741 0x6741 /* 3G modem */
+#define USB_PRODUCT_OPTION_E6761 0x6761 /* 3G modem */
+#define USB_PRODUCT_OPTION_E6800 0x6800 /* 3G modem */
+#define USB_PRODUCT_OPTION_E7021 0x7021 /* 3G modem */
+#define USB_PRODUCT_OPTION_E7041 0x7041 /* 3G modem */
+#define USB_PRODUCT_OPTION_E7061 0x7061 /* 3G modem */
+#define USB_PRODUCT_OPTION_E7100 0x7100 /* 3G modem */
+#define USB_PRODUCT_OPTION_GTM380 0x7201 /* 3G modem */
+#define USB_PRODUCT_OPTION_GE40X 0x7601 /* Globetrotter HSUPA */
+#define USB_PRODUCT_OPTION_GSICON72 0x6911 /* GlobeSurfer iCON */
+#define USB_PRODUCT_OPTION_GSICONHSUPA 0x7251 /* Globetrotter HSUPA */
+#define USB_PRODUCT_OPTION_ICON401 0x7401 /* GlobeSurfer iCON 401 */
+#define USB_PRODUCT_OPTION_GTHSUPA 0x7011 /* Globetrotter HSUPA */
+#define USB_PRODUCT_OPTION_GMT382 0x7501 /* Globetrotter HSUPA */
+#define USB_PRODUCT_OPTION_GE40X_1 0x7301 /* Globetrotter HSUPA */
+#define USB_PRODUCT_OPTION_GE40X_2 0x7361 /* Globetrotter HSUPA */
+#define USB_PRODUCT_OPTION_GE40X_3 0x7381 /* Globetrotter HSUPA */
+#define USB_PRODUCT_OPTION_ICONEDGE 0xc031 /* GlobeSurfer iCON EDGE */
+#define USB_PRODUCT_OPTION_MODHSXPA 0xd013 /* Globetrotter HSUPA */
+#define USB_PRODUCT_OPTION_ICON321 0xd031 /* Globetrotter HSUPA */
+#define USB_PRODUCT_OPTION_ICON505 0xd055 /* Globetrotter iCON 505 */
+#define USB_PRODUCT_OPTION_ICON452 0x7901 /* Globetrotter iCON 452 */
+
+/* OvisLink product */
+#define USB_PRODUCT_OVISLINK_RT3072 0x3072 /* RT3072 */
+
+/* OQO */
+#define USB_PRODUCT_OQO_WIFI01 0x0002 /* model 01 WiFi interface */
+#define USB_PRODUCT_OQO_BT01 0x0003 /* model 01 Bluetooth interface */
+#define USB_PRODUCT_OQO_ETHER01PLUS 0x7720 /* model 01+ Ethernet */
+#define USB_PRODUCT_OQO_ETHER01 0x8150 /* model 01 Ethernet interface */
+
+/* Ours Technology Inc. */
+#define USB_PRODUCT_OTI_DKU5 0x6858 /* DKU-5 Serial */
+
+/* Owen.ru products */
+#define USB_PRODUCT_OWEN_AC4 0x0004 /* AC4 USB-RS485 converter */
+
+/* Palm Computing, Inc. product */
+#define USB_PRODUCT_PALM_SERIAL 0x0080 /* USB Serial */
+#define USB_PRODUCT_PALM_M500 0x0001 /* Palm m500 */
+#define USB_PRODUCT_PALM_M505 0x0002 /* Palm m505 */
+#define USB_PRODUCT_PALM_M515 0x0003 /* Palm m515 */
+#define USB_PRODUCT_PALM_I705 0x0020 /* Palm i705 */
+#define USB_PRODUCT_PALM_TUNGSTEN_Z 0x0031 /* Palm Tungsten Z */
+#define USB_PRODUCT_PALM_M125 0x0040 /* Palm m125 */
+#define USB_PRODUCT_PALM_M130 0x0050 /* Palm m130 */
+#define USB_PRODUCT_PALM_TUNGSTEN_T 0x0060 /* Palm Tungsten T */
+#define USB_PRODUCT_PALM_ZIRE31 0x0061 /* Palm Zire 31 */
+#define USB_PRODUCT_PALM_ZIRE 0x0070 /* Palm Zire */
+
+/* Panasonic products */
+#define USB_PRODUCT_PANASONIC_LS120CAM 0x0901 /* LS-120 Camera */
+#define USB_PRODUCT_PANASONIC_KXL840AN 0x0d01 /* CD-R Drive KXL-840AN */
+#define USB_PRODUCT_PANASONIC_KXLRW32AN 0x0d09 /* CD-R Drive KXL-RW32AN */
+#define USB_PRODUCT_PANASONIC_KXLCB20AN 0x0d0a /* CD-R Drive KXL-CB20AN */
+#define USB_PRODUCT_PANASONIC_KXLCB35AN 0x0d0e /* DVD-ROM & CD-R/RW */
+#define USB_PRODUCT_PANASONIC_SDCAAE 0x1b00 /* MultiMediaCard */
+#define USB_PRODUCT_PANASONIC_TYTP50P6S 0x3900 /* TY-TP50P6-S 50in Touch Panel */
+
+/* PARA Industrial products */
+#define USB_PRODUCT_PARA_RT3070 0x8888 /* RT3070 */
+
+/* Pegatron products */
+#define USB_PRODUCT_PEGATRON_RT2870 0x0002 /* RT2870 */
+#define USB_PRODUCT_PEGATRON_RT3070 0x000c /* RT3070 */
+#define USB_PRODUCT_PEGATRON_RT3070_2 0x000e /* RT3070 */
+#define USB_PRODUCT_PEGATRON_RT3070_3 0x0010 /* RT3070 */
+
+/* Peracom products */
+#define USB_PRODUCT_PERACOM_SERIAL1 0x0001 /* Serial */
+#define USB_PRODUCT_PERACOM_ENET 0x0002 /* Ethernet */
+#define USB_PRODUCT_PERACOM_ENET3 0x0003 /* At Home Ethernet */
+#define USB_PRODUCT_PERACOM_ENET2 0x0005 /* Ethernet */
+
+/* Philips products */
+#define USB_PRODUCT_PHILIPS_DSS350 0x0101 /* DSS 350 Digital Speaker System */
+#define USB_PRODUCT_PHILIPS_DSS 0x0104 /* DSS XXX Digital Speaker System */
+#define USB_PRODUCT_PHILIPS_HUB 0x0201 /* hub */
+#define USB_PRODUCT_PHILIPS_PCA646VC 0x0303 /* PCA646VC PC Camera */
+#define USB_PRODUCT_PHILIPS_PCVC680K 0x0308 /* PCVC680K Vesta Pro PC Camera */
+#define USB_PRODUCT_PHILIPS_DSS150 0x0471 /* DSS 150 Digital Speaker System */
+#define USB_PRODUCT_PHILIPS_ACE1001 0x066a /* AKTAKOM ACE-1001 cable */
+#define USB_PRODUCT_PHILIPS_SPE3030CC 0x083a /* USB 2.0 External Disk */
+#define USB_PRODUCT_PHILIPS_SNU5600 0x1236 /* SNU5600 */
+#define USB_PRODUCT_PHILIPS_UM10016 0x1552 /* ISP 1581 Hi-Speed USB MPEG2 Encoder Reference Kit */
+#define USB_PRODUCT_PHILIPS_DIVAUSB 0x1801 /* DIVA USB mp3 player */
+#define USB_PRODUCT_PHILIPS_RT2870 0x200f /* RT2870 */
+
+/* Philips Semiconductor products */
+#define USB_PRODUCT_PHILIPSSEMI_HUB1122 0x1122 /* HUB */
+
+/* Megatec */
+#define USB_PRODUCT_MEGATEC_UPS 0x5161 /* Phoenixtec protocol based UPS */
+
+/* P.I. Engineering products */
+#define USB_PRODUCT_PIENGINEERING_PS2USB 0x020b /* PS2 to Mac USB Adapter */
+
+/* Planex Communications products */
+#define USB_PRODUCT_PLANEX_GW_US11H 0x14ea /* GW-US11H WLAN */
+#define USB_PRODUCT_PLANEX2_GW_US11S 0x3220 /* GW-US11S WLAN */
+#define USB_PRODUCT_PLANEX2_GW_US54GXS 0x5303 /* GW-US54GXS WLAN */
+#define USB_PRODUCT_PLANEX2_GWUS54HP 0xab01 /* GW-US54HP */
+#define USB_PRODUCT_PLANEX2_GWUS300MINIS 0xab24 /* GW-US300MiniS */
+#define USB_PRODUCT_PLANEX2_RT3070 0xab25 /* RT3070 */
+#define USB_PRODUCT_PLANEX2_GWUS54MINI2 0xab50 /* GW-US54Mini2 */
+#define USB_PRODUCT_PLANEX2_GWUS54SG 0xc002 /* GW-US54SG */
+#define USB_PRODUCT_PLANEX2_GWUS54GZL 0xc007 /* GW-US54GZL */
+#define USB_PRODUCT_PLANEX2_GWUS54GD 0xed01 /* GW-US54GD */
+#define USB_PRODUCT_PLANEX2_GWUSMM 0xed02 /* GW-USMM */
+#define USB_PRODUCT_PLANEX2_RT2870 0xed06 /* RT2870 */
+#define USB_PRODUCT_PLANEX2_GWUSMICRON 0xed14 /* GW-USMicroN */
+#define USB_PRODUCT_PLANEX3_GWUS54GZ 0xab10 /* GW-US54GZ */
+#define USB_PRODUCT_PLANEX3_GU1000T 0xab11 /* GU-1000T */
+#define USB_PRODUCT_PLANEX3_GWUS54MINI 0xab13 /* GW-US54Mini */
+
+/* Plextor Corp. */
+#define USB_PRODUCT_PLEXTOR_40_12_40U 0x0011 /* PlexWriter 40/12/40U */
+
+/* PLX products */
+#define USB_PRODUCT_PLX_TESTBOARD 0x9060 /* test board */
+#define USB_PRODUCT_PLX_CA42 0xac70 /* CA-42 */
+
+/* PNY products */
+#define USB_PRODUCT_PNY_ATTACHE2 0x0010 /* USB 2.0 Flash Drive */
+
+/* PortGear products */
+#define USB_PRODUCT_PORTGEAR_EA8 0x0008 /* Ethernet */
+#define USB_PRODUCT_PORTGEAR_EA9 0x0009 /* Ethernet */
+
+/* Portsmith products */
+#define USB_PRODUCT_PORTSMITH_EEA 0x3003 /* Express Ethernet */
+
+/* Primax products */
+#define USB_PRODUCT_PRIMAX_G2X300 0x0300 /* G2-200 scanner */
+#define USB_PRODUCT_PRIMAX_G2E300 0x0301 /* G2E-300 scanner */
+#define USB_PRODUCT_PRIMAX_G2300 0x0302 /* G2-300 scanner */
+#define USB_PRODUCT_PRIMAX_G2E3002 0x0303 /* G2E-300 scanner */
+#define USB_PRODUCT_PRIMAX_9600 0x0340 /* Colorado USB 9600 scanner */
+#define USB_PRODUCT_PRIMAX_600U 0x0341 /* Colorado 600u scanner */
+#define USB_PRODUCT_PRIMAX_6200 0x0345 /* Visioneer 6200 scanner */
+#define USB_PRODUCT_PRIMAX_19200 0x0360 /* Colorado USB 19200 scanner */
+#define USB_PRODUCT_PRIMAX_1200U 0x0361 /* Colorado 1200u scanner */
+#define USB_PRODUCT_PRIMAX_G600 0x0380 /* G2-600 scanner */
+#define USB_PRODUCT_PRIMAX_636I 0x0381 /* ReadyScan 636i */
+#define USB_PRODUCT_PRIMAX_G2600 0x0382 /* G2-600 scanner */
+#define USB_PRODUCT_PRIMAX_G2E600 0x0383 /* G2E-600 scanner */
+#define USB_PRODUCT_PRIMAX_COMFORT 0x4d01 /* Comfort */
+#define USB_PRODUCT_PRIMAX_MOUSEINABOX 0x4d02 /* Mouse-in-a-Box */
+#define USB_PRODUCT_PRIMAX_PCGAUMS1 0x4d04 /* Sony PCGA-UMS1 */
+#define USB_PRODUCT_PRIMAX_HP_RH304AA 0x4d17 /* HP RH304AA mouse */
+
+/* Prolific products */
+#define USB_PRODUCT_PROLIFIC_PL2301 0x0000 /* PL2301 Host-Host interface */
+#define USB_PRODUCT_PROLIFIC_PL2302 0x0001 /* PL2302 Host-Host interface */
+#define USB_PRODUCT_PROLIFIC_RSAQ2 0x04bb /* PL2303 Serial (IODATA USB-RSAQ2) */
+#define USB_PRODUCT_PROLIFIC_ALLTRONIX_GPRS 0x0609 /* Alltronix ACM003U00 modem */
+#define USB_PRODUCT_PROLIFIC_ALDIGA_AL11U 0x0611 /* AlDiga AL-11U modem */
+#define USB_PRODUCT_PROLIFIC_MICROMAX_610U 0x0612 /* Micromax 610U */
+#define USB_PRODUCT_PROLIFIC_DCU11 0x1234 /* DCU-11 Phone Cable */
+#define USB_PRODUCT_PROLIFIC_PL2303 0x2303 /* PL2303 Serial (ATEN/IOGEAR UC232A) */
+#define USB_PRODUCT_PROLIFIC_PL2305 0x2305 /* Parallel printer */
+#define USB_PRODUCT_PROLIFIC_ATAPI4 0x2307 /* ATAPI-4 Controller */
+#define USB_PRODUCT_PROLIFIC_PL2501 0x2501 /* PL2501 Host-Host interface */
+#define USB_PRODUCT_PROLIFIC_PL2506 0x2506 /* PL2506 USB to IDE Bridge */
+#define USB_PRODUCT_PROLIFIC_HCR331 0x331a /* HCR331 Hybrid Card Reader */
+#define USB_PRODUCT_PROLIFIC_PHAROS 0xaaa0 /* Prolific Pharos */
+#define USB_PRODUCT_PROLIFIC_RSAQ3 0xaaa2 /* PL2303 Serial Adapter (IODATA USB-RSAQ3) */
+#define USB_PRODUCT_PROLIFIC2_PL2303 0x2303 /* PL2303 Serial Adapter */
+
+/* Putercom products */
+#define USB_PRODUCT_PUTERCOM_UPA100 0x047e /* USB-1284 BRIDGE */
+
+/* Qcom products */
+#define USB_PRODUCT_QCOM_RT2573 0x6196 /* RT2573 */
+#define USB_PRODUCT_QCOM_RT2573_2 0x6229 /* RT2573 */
+#define USB_PRODUCT_QCOM_RT2573_3 0x6238 /* RT2573 */
+#define USB_PRODUCT_QCOM_RT2870 0x6259 /* RT2870 */
+
+/* Qisda products */
+#define USB_PRODUCT_QISDA_H21_1 0x4512 /* 3G modem */
+#define USB_PRODUCT_QISDA_H21_2 0x4523 /* 3G modem */
+#define USB_PRODUCT_QISDA_H20_1 0x4515 /* 3G modem */
+#define USB_PRODUCT_QISDA_H20_2 0x4519 /* 3G modem */
+
+/* Qualcomm products */
+#define USB_PRODUCT_QUALCOMM_CDMA_MSM 0x6000 /* CDMA Technologies MSM phone */
+#define USB_PRODUCT_QUALCOMM2_MF330 0x6613 /* MF330 */
+#define USB_PRODUCT_QUALCOMM2_RWT_FCT 0x3100 /* RWT FCT-CDMA 2000 1xRTT modem */
+#define USB_PRODUCT_QUALCOMM2_CDMA_MSM 0x3196 /* CDMA Technologies MSM modem */
+#define USB_PRODUCT_QUALCOMM2_AC8700 0x6000 /* AC8700 */
+#define USB_PRODUCT_QUALCOMMINC_CDMA_MSM 0x0001 /* CDMA Technologies MSM modem */
+#define USB_PRODUCT_QUALCOMMINC_E0002 0x0002 /* 3G modem */
+#define USB_PRODUCT_QUALCOMMINC_E0003 0x0003 /* 3G modem */
+#define USB_PRODUCT_QUALCOMMINC_E0004 0x0004 /* 3G modem */
+#define USB_PRODUCT_QUALCOMMINC_E0005 0x0005 /* 3G modem */
+#define USB_PRODUCT_QUALCOMMINC_E0006 0x0006 /* 3G modem */
+#define USB_PRODUCT_QUALCOMMINC_E0007 0x0007 /* 3G modem */
+#define USB_PRODUCT_QUALCOMMINC_E0008 0x0008 /* 3G modem */
+#define USB_PRODUCT_QUALCOMMINC_E0009 0x0009 /* 3G modem */
+#define USB_PRODUCT_QUALCOMMINC_E000A 0x000a /* 3G modem */
+#define USB_PRODUCT_QUALCOMMINC_E000B 0x000b /* 3G modem */
+#define USB_PRODUCT_QUALCOMMINC_E000C 0x000c /* 3G modem */
+#define USB_PRODUCT_QUALCOMMINC_E000D 0x000d /* 3G modem */
+#define USB_PRODUCT_QUALCOMMINC_E000E 0x000e /* 3G modem */
+#define USB_PRODUCT_QUALCOMMINC_E000F 0x000f /* 3G modem */
+#define USB_PRODUCT_QUALCOMMINC_E0010 0x0010 /* 3G modem */
+#define USB_PRODUCT_QUALCOMMINC_E0011 0x0011 /* 3G modem */
+#define USB_PRODUCT_QUALCOMMINC_E0012 0x0012 /* 3G modem */
+#define USB_PRODUCT_QUALCOMMINC_E0013 0x0013 /* 3G modem */
+#define USB_PRODUCT_QUALCOMMINC_E0014 0x0014 /* 3G modem */
+#define USB_PRODUCT_QUALCOMMINC_MF628 0x0015 /* 3G modem */
+#define USB_PRODUCT_QUALCOMMINC_MF633R 0x0016 /* ZTE WCDMA modem */
+#define USB_PRODUCT_QUALCOMMINC_E0017 0x0017 /* 3G modem */
+#define USB_PRODUCT_QUALCOMMINC_E0018 0x0018 /* 3G modem */
+#define USB_PRODUCT_QUALCOMMINC_E0019 0x0019 /* 3G modem */
+#define USB_PRODUCT_QUALCOMMINC_E0020 0x0020 /* 3G modem */
+#define USB_PRODUCT_QUALCOMMINC_E0021 0x0021 /* 3G modem */
+#define USB_PRODUCT_QUALCOMMINC_E0022 0x0022 /* 3G modem */
+#define USB_PRODUCT_QUALCOMMINC_E0023 0x0023 /* 3G modem */
+#define USB_PRODUCT_QUALCOMMINC_E0024 0x0024 /* 3G modem */
+#define USB_PRODUCT_QUALCOMMINC_E0025 0x0025 /* 3G modem */
+#define USB_PRODUCT_QUALCOMMINC_E0026 0x0026 /* 3G modem */
+#define USB_PRODUCT_QUALCOMMINC_E0027 0x0027 /* 3G modem */
+#define USB_PRODUCT_QUALCOMMINC_E0028 0x0028 /* 3G modem */
+#define USB_PRODUCT_QUALCOMMINC_E0029 0x0029 /* 3G modem */
+#define USB_PRODUCT_QUALCOMMINC_E0030 0x0030 /* 3G modem */
+#define USB_PRODUCT_QUALCOMMINC_MF626 0x0031 /* 3G modem */
+#define USB_PRODUCT_QUALCOMMINC_E0032 0x0032 /* 3G modem */
+#define USB_PRODUCT_QUALCOMMINC_E0033 0x0033 /* 3G modem */
+#define USB_PRODUCT_QUALCOMMINC_E0037 0x0037 /* 3G modem */
+#define USB_PRODUCT_QUALCOMMINC_E0039 0x0039 /* 3G modem */
+#define USB_PRODUCT_QUALCOMMINC_E0042 0x0042 /* 3G modem */
+#define USB_PRODUCT_QUALCOMMINC_E0043 0x0043 /* 3G modem */
+#define USB_PRODUCT_QUALCOMMINC_E0048 0x0048 /* 3G modem */
+#define USB_PRODUCT_QUALCOMMINC_E0049 0x0049 /* 3G modem */
+#define USB_PRODUCT_QUALCOMMINC_E0051 0x0051 /* 3G modem */
+#define USB_PRODUCT_QUALCOMMINC_E0052 0x0052 /* 3G modem */
+#define USB_PRODUCT_QUALCOMMINC_ZTE_STOR2 0x0053 /* USB ZTE Storage */
+#define USB_PRODUCT_QUALCOMMINC_E0054 0x0054 /* 3G modem */
+#define USB_PRODUCT_QUALCOMMINC_E0055 0x0055 /* 3G modem */
+#define USB_PRODUCT_QUALCOMMINC_E0057 0x0057 /* 3G modem */
+#define USB_PRODUCT_QUALCOMMINC_E0058 0x0058 /* 3G modem */
+#define USB_PRODUCT_QUALCOMMINC_E0059 0x0059 /* 3G modem */
+#define USB_PRODUCT_QUALCOMMINC_E0060 0x0060 /* 3G modem */
+#define USB_PRODUCT_QUALCOMMINC_E0061 0x0061 /* 3G modem */
+#define USB_PRODUCT_QUALCOMMINC_E0062 0x0062 /* 3G modem */
+#define USB_PRODUCT_QUALCOMMINC_E0063 0x0063 /* 3G modem */
+#define USB_PRODUCT_QUALCOMMINC_E0064 0x0064 /* 3G modem */
+#define USB_PRODUCT_QUALCOMMINC_E0066 0x0066 /* 3G modem */
+#define USB_PRODUCT_QUALCOMMINC_E0069 0x0069 /* 3G modem */
+#define USB_PRODUCT_QUALCOMMINC_E0070 0x0070 /* 3G modem */
+#define USB_PRODUCT_QUALCOMMINC_E0073 0x0073 /* 3G modem */
+#define USB_PRODUCT_QUALCOMMINC_E0076 0x0076 /* 3G modem */
+#define USB_PRODUCT_QUALCOMMINC_E0078 0x0078 /* 3G modem */
+#define USB_PRODUCT_QUALCOMMINC_E0082 0x0082 /* 3G modem */
+#define USB_PRODUCT_QUALCOMMINC_E0086 0x0086 /* 3G modem */
+#define USB_PRODUCT_QUALCOMMINC_ZTE_STOR 0x2000 /* USB ZTE Storage */
+#define USB_PRODUCT_QUALCOMMINC_E2002 0x2002 /* 3G modem */
+#define USB_PRODUCT_QUALCOMMINC_E2003 0x2003 /* 3G modem */
+#define USB_PRODUCT_QUALCOMMINC_AC8710 0xfff1 /* 3G modem */
+#define USB_PRODUCT_QUALCOMMINC_AC2726 0xfff5 /* 3G modem */
+#define USB_PRODUCT_QUALCOMMINC_AC8700 0xfffe /* CDMA 1xEVDO USB modem */
+
+/* Quanta products */
+#define USB_PRODUCT_QUANTA_RW6815_1 0x00ce /* HP iPAQ rw6815 */
+#define USB_PRODUCT_QUANTA_RT3070 0x0304 /* RT3070 */
+#define USB_PRODUCT_QUANTA_Q101_STOR 0x1000 /* USB Q101 Storage */
+#define USB_PRODUCT_QUANTA_Q101 0xea02 /* HSDPA modem */
+#define USB_PRODUCT_QUANTA_Q111 0xea03 /* HSDPA modem */
+#define USB_PRODUCT_QUANTA_GLX 0xea04 /* HSDPA modem */
+#define USB_PRODUCT_QUANTA_GKE 0xea05 /* HSDPA modem */
+#define USB_PRODUCT_QUANTA_GLE 0xea06 /* HSDPA modem */
+#define USB_PRODUCT_QUANTA_RW6815R 0xf003 /* HP iPAQ rw6815 RNDIS */
+
+/* Qtronix products */
+#define USB_PRODUCT_QTRONIX_980N 0x2011 /* Scorpion-980N keyboard */
+
+/* Quickshot products */
+#define USB_PRODUCT_QUICKSHOT_STRIKEPAD 0x6238 /* USB StrikePad */
+
+/* Radio Shack */
+#define USB_PRODUCT_RADIOSHACK_USBCABLE 0x4026 /* USB to Serial Cable */
+
+/* Rainbow Technologies products */
+#define USB_PRODUCT_RAINBOW_IKEY2000 0x1200 /* i-Key 2000 */
+
+/* Ralink Technology products */
+#define USB_PRODUCT_RALINK_RT2570 0x1706 /* RT2500USB Wireless Adapter */
+#define USB_PRODUCT_RALINK_RT2070 0x2070 /* RT2070 */
+#define USB_PRODUCT_RALINK_RT2570_2 0x2570 /* RT2500USB Wireless Adapter */
+#define USB_PRODUCT_RALINK_RT2573 0x2573 /* RT2501USB Wireless Adapter */
+#define USB_PRODUCT_RALINK_RT2671 0x2671 /* RT2601USB Wireless Adapter */
+#define USB_PRODUCT_RALINK_RT2770 0x2770 /* RT2770 */
+#define USB_PRODUCT_RALINK_RT2870 0x2870 /* RT2870 */
+#define USB_PRODUCT_RALINK_RT3070 0x3070 /* RT3070 */
+#define USB_PRODUCT_RALINK_RT3071 0x3071 /* RT3071 */
+#define USB_PRODUCT_RALINK_RT3072 0x3072 /* RT3072 */
+#define USB_PRODUCT_RALINK_RT3370 0x3370 /* RT3370 */
+#define USB_PRODUCT_RALINK_RT3572 0x3572 /* RT3572 */
+#define USB_PRODUCT_RALINK_RT8070 0x8070 /* RT8070 */
+#define USB_PRODUCT_RALINK_RT2570_3 0x9020 /* RT2500USB Wireless Adapter */
+#define USB_PRODUCT_RALINK_RT2573_2 0x9021 /* RT2501USB Wireless Adapter */
+
+/* RATOC Systems products */
+#define USB_PRODUCT_RATOC_REXUSB60 0xb000 /* USB serial adapter REX-USB60 */
+#define USB_PRODUCT_RATOC_REXUSB60F 0xb020 /* USB serial adapter REX-USB60F */
+
+/* ReakTek products */
+/* Green House and CompUSA OEM this part */
+#define USB_PRODUCT_REALTEK_USB20CRW 0x0158 /* USB20CRW Card Reader */
+#define USB_PRODUCT_REALTEK_USBKR100 0x8150 /* USBKR100 USB Ethernet */
+#define USB_PRODUCT_REALTEK_RTL8187 0x8187 /* RTL8187 Wireless Adapter */
+#define USB_PRODUCT_REALTEK_RTL8187B_0 0x8189 /* RTL8187B Wireless Adapter */
+#define USB_PRODUCT_REALTEK_RTL8187B_1 0x8197 /* RTL8187B Wireless Adapter */
+#define USB_PRODUCT_REALTEK_RTL8187B_2 0x8198 /* RTL8187B Wireless Adapter */
+
+/* Ricoh products */
+#define USB_PRODUCT_RICOH_VGPVCC2 0x1830 /* VGP-VCC2 Camera */
+#define USB_PRODUCT_RICOH_VGPVCC3 0x1832 /* VGP-VCC3 Camera */
+#define USB_PRODUCT_RICOH_VGPVCC2_2 0x1833 /* VGP-VCC2 Camera */
+#define USB_PRODUCT_RICOH_VGPVCC2_3 0x1834 /* VGP-VCC2 Camera */
+#define USB_PRODUCT_RICOH_VGPVCC7 0x183a /* VGP-VCC7 Camera */
+#define USB_PRODUCT_RICOH_VGPVCC8 0x183b /* VGP-VCC8 Camera */
+
+/* Reiner-SCT products */
+#define USB_PRODUCT_REINERSCT_CYBERJACK_ECOM 0x0100 /* e-com cyberJack */
+
+/* Roland products */
+#define USB_PRODUCT_ROLAND_UM1 0x0009 /* UM-1 MIDI I/F */
+#define USB_PRODUCT_ROLAND_UM880N 0x0014 /* EDIROL UM-880 MIDI I/F (native) */
+#define USB_PRODUCT_ROLAND_UM880G 0x0015 /* EDIROL UM-880 MIDI I/F (generic) */
+
+/* Rockfire products */
+#define USB_PRODUCT_ROCKFIRE_GAMEPAD 0x2033 /* gamepad 203USB */
+
+/* RATOC Systems products */
+#define USB_PRODUCT_RATOC_REXUSB60 0xb000 /* REX-USB60 */
+#define USB_PRODUCT_RATOC_REXUSB60F 0xb020 /* REX-USB60F */
+
+/* Sagem products */
+#define USB_PRODUCT_SAGEM_USBSERIAL 0x0027 /* USB-Serial Controller */
+#define USB_PRODUCT_SAGEM_XG760A 0x004a /* XG-760A */
+#define USB_PRODUCT_SAGEM_XG76NA 0x0062 /* XG-76NA */
+
+/* Samsung products */
+#define USB_PRODUCT_SAMSUNG_ML6060 0x3008 /* ML-6060 laser printer */
+#define USB_PRODUCT_SAMSUNG_YP_U2 0x5050 /* YP-U2 MP3 Player */
+#define USB_PRODUCT_SAMSUNG_YP_U4 0x5092 /* YP-U4 MP3 Player */
+#define USB_PRODUCT_SAMSUNG_I500 0x6601 /* I500 Palm USB Phone */
+#define USB_PRODUCT_SAMSUNG_I330 0x8001 /* I330 phone cradle */
+#define USB_PRODUCT_SAMSUNG2_RT2870_1 0x2018 /* RT2870 */
+
+/* Samsung Techwin products */
+#define USB_PRODUCT_SAMSUNG_TECHWIN_DIGIMAX_410 0x000a /* Digimax 410 */
+
+/* SanDisk products */
+#define USB_PRODUCT_SANDISK_SDDR05A 0x0001 /* ImageMate SDDR-05a */
+#define USB_PRODUCT_SANDISK_SDDR31 0x0002 /* ImageMate SDDR-31 */
+#define USB_PRODUCT_SANDISK_SDDR05 0x0005 /* ImageMate SDDR-05 */
+#define USB_PRODUCT_SANDISK_SDDR12 0x0100 /* ImageMate SDDR-12 */
+#define USB_PRODUCT_SANDISK_SDDR09 0x0200 /* ImageMate SDDR-09 */
+#define USB_PRODUCT_SANDISK_SDDR75 0x0810 /* ImageMate SDDR-75 */
+#define USB_PRODUCT_SANDISK_SDCZ2_256 0x7104 /* Cruzer Mini 256MB */
+#define USB_PRODUCT_SANDISK_SDCZ4_128 0x7112 /* Cruzer Micro 128MB */
+#define USB_PRODUCT_SANDISK_SDCZ4_256 0x7113 /* Cruzer Micro 256MB */
+
+/* Sanwa Electric Instrument Co., Ltd. products */
+#define USB_PRODUCT_SANWA_KB_USB2 0x0701 /* KB-USB2 multimeter cable */
+
+/* Sanyo Electric products */
+#define USB_PRODUCT_SANYO_SCP4900 0x0701 /* Sanyo SCP-4900 USB Phone */
+
+/* ScanLogic products */
+#define USB_PRODUCT_SCANLOGIC_SL11R 0x0002 /* SL11R IDE Adapter */
+#define USB_PRODUCT_SCANLOGIC_336CX 0x0300 /* Phantom 336CX - C3 scanner */
+
+/* Senao products */
+#define USB_PRODUCT_SENAO_RT2870_3 0x0605 /* RT2870 */
+#define USB_PRODUCT_SENAO_RT2870_4 0x0615 /* RT2870 */
+#define USB_PRODUCT_SENAO_NUB8301 0x2000 /* NUB-8301 */
+#define USB_PRODUCT_SENAO_RT2870_1 0x9701 /* RT2870 */
+#define USB_PRODUCT_SENAO_RT2870_2 0x9702 /* RT2870 */
+#define USB_PRODUCT_SENAO_RT3070 0x9703 /* RT3070 */
+#define USB_PRODUCT_SENAO_RT3071 0x9705 /* RT3071 */
+#define USB_PRODUCT_SENAO_RT3072_1 0x9706 /* RT3072 */
+#define USB_PRODUCT_SENAO_RT3072_2 0x9707 /* RT3072 */
+#define USB_PRODUCT_SENAO_RT3072_3 0x9708 /* RT3072 */
+#define USB_PRODUCT_SENAO_RT3072_4 0x9709 /* RT3072 */
+#define USB_PRODUCT_SENAO_RT3072_5 0x9801 /* RT3072 */
+
+/* ShanTou products */
+#define USB_PRODUCT_SHANTOU_ST268 0x0268 /* ST268 */
+#define USB_PRODUCT_SHANTOU_DM9601 0x9601 /* DM 9601 */
+
+/* Shark products */
+#define USB_PRODUCT_SHARK_PA 0x0400 /* Pocket Adapter */
+
+/* Sharp products */
+#define USB_PRODUCT_SHARP_SL5500 0x8004 /* Zaurus SL-5500 PDA */
+#define USB_PRODUCT_SHARP_SLA300 0x8005 /* Zaurus SL-A300 PDA */
+#define USB_PRODUCT_SHARP_SL5600 0x8006 /* Zaurus SL-5600 PDA */
+#define USB_PRODUCT_SHARP_SLC700 0x8007 /* Zaurus SL-C700 PDA */
+#define USB_PRODUCT_SHARP_SLC750 0x9031 /* Zaurus SL-C750 PDA */
+#define USB_PRODUCT_SHARP_WZERO3ES 0x9123 /* W-ZERO3 ES Smartphone */
+#define USB_PRODUCT_SHARP_WZERO3ADES 0x91ac /* Advanced W-ZERO3 ES Smartphone */
+#define USB_PRODUCT_SHARP_WILLCOM03 0x9242 /* WILLCOM03 */
+
+/* Shuttle Technology products */
+#define USB_PRODUCT_SHUTTLE_EUSB 0x0001 /* E-USB Bridge */
+#define USB_PRODUCT_SHUTTLE_EUSCSI 0x0002 /* eUSCSI Bridge */
+#define USB_PRODUCT_SHUTTLE_SDDR09 0x0003 /* ImageMate SDDR09 */
+#define USB_PRODUCT_SHUTTLE_EUSBCFSM 0x0005 /* eUSB SmartMedia / CompactFlash Adapter */
+#define USB_PRODUCT_SHUTTLE_ZIOMMC 0x0006 /* eUSB MultiMediaCard Adapter */
+#define USB_PRODUCT_SHUTTLE_HIFD 0x0007 /* Sony Hifd */
+#define USB_PRODUCT_SHUTTLE_EUSBATAPI 0x0009 /* eUSB ATA/ATAPI Adapter */
+#define USB_PRODUCT_SHUTTLE_CF 0x000a /* eUSB CompactFlash Adapter */
+#define USB_PRODUCT_SHUTTLE_EUSCSI_B 0x000b /* eUSCSI Bridge */
+#define USB_PRODUCT_SHUTTLE_EUSCSI_C 0x000c /* eUSCSI Bridge */
+#define USB_PRODUCT_SHUTTLE_CDRW 0x0101 /* CD-RW Device */
+#define USB_PRODUCT_SHUTTLE_EUSBORCA 0x0325 /* eUSB ORCA Quad Reader */
+
+/* Siemens products */
+#define USB_PRODUCT_SIEMENS_SPEEDSTREAM 0x1001 /* SpeedStream */
+#define USB_PRODUCT_SIEMENS_SPEEDSTREAM22 0x1022 /* SpeedStream 1022 */
+#define USB_PRODUCT_SIEMENS2_WLL013 0x001b /* WLL013 */
+#define USB_PRODUCT_SIEMENS2_ES75 0x0034 /* GSM module MC35 */
+#define USB_PRODUCT_SIEMENS2_WL54G 0x3c06 /* 54g USB Network Adapter */
+#define USB_PRODUCT_SIEMENS3_SX1 0x0001 /* SX1 */
+#define USB_PRODUCT_SIEMENS3_X65 0x0003 /* X65 */
+#define USB_PRODUCT_SIEMENS3_X75 0x0004 /* X75 */
+#define USB_PRODUCT_SIEMENS3_EF81 0x0005 /* EF81 */
+
+/* Sierra Wireless products */
+#define USB_PRODUCT_SIERRA_EM5625 0x0017 /* EM5625 */
+#define USB_PRODUCT_SIERRA_MC5720_2 0x0018 /* MC5720 */
+#define USB_PRODUCT_SIERRA_MC5725 0x0020 /* MC5725 */
+#define USB_PRODUCT_SIERRA_AIRCARD580 0x0112 /* Sierra Wireless AirCard 580 */
+#define USB_PRODUCT_SIERRA_AIRCARD595 0x0019 /* Sierra Wireless AirCard 595 */
+#define USB_PRODUCT_SIERRA_AC595U 0x0120 /* Sierra Wireless AirCard 595U */
+#define USB_PRODUCT_SIERRA_AC597E 0x0021 /* Sierra Wireless AirCard 597E */
+#define USB_PRODUCT_SIERRA_EM5725 0x0022 /* EM5725 */
+#define USB_PRODUCT_SIERRA_C597 0x0023 /* Sierra Wireless Compass 597 */
+#define USB_PRODUCT_SIERRA_MC5727 0x0024 /* MC5727 */
+#define USB_PRODUCT_SIERRA_T598 0x0025 /* T598 */
+#define USB_PRODUCT_SIERRA_T11 0x0026 /* T11 */
+#define USB_PRODUCT_SIERRA_AC402 0x0027 /* AC402 */
+#define USB_PRODUCT_SIERRA_MC5728 0x0028 /* MC5728 */
+#define USB_PRODUCT_SIERRA_E0029 0x0029 /* E0029 */
+#define USB_PRODUCT_SIERRA_AIRCARD580 0x0112 /* Sierra Wireless AirCard 580 */
+#define USB_PRODUCT_SIERRA_AC595U 0x0120 /* Sierra Wireless AirCard 595U */
+#define USB_PRODUCT_SIERRA_MC5720 0x0218 /* MC5720 Wireless Modem */
+#define USB_PRODUCT_SIERRA_MINI5725 0x0220 /* Sierra Wireless miniPCI 5275 */
+#define USB_PRODUCT_SIERRA_MC5727_2 0x0224 /* MC5727 */
+#define USB_PRODUCT_SIERRA_MC8755_2 0x6802 /* MC8755 */
+#define USB_PRODUCT_SIERRA_MC8765 0x6803 /* MC8765 */
+#define USB_PRODUCT_SIERRA_MC8755 0x6804 /* MC8755 */
+#define USB_PRODUCT_SIERRA_MC8765_2 0x6805 /* MC8765 */
+#define USB_PRODUCT_SIERRA_MC8755_4 0x6808 /* MC8755 */
+#define USB_PRODUCT_SIERRA_MC8765_3 0x6809 /* MC8765 */
+#define USB_PRODUCT_SIERRA_AC875U 0x6812 /* AC875U HSDPA USB Modem */
+#define USB_PRODUCT_SIERRA_MC8755_3 0x6813 /* MC8755 HSDPA */
+#define USB_PRODUCT_SIERRA_MC8775_2 0x6815 /* MC8775 */
+#define USB_PRODUCT_SIERRA_MC8775 0x6816 /* MC8775 */
+#define USB_PRODUCT_SIERRA_AC875 0x6820 /* Sierra Wireless AirCard 875 */
+#define USB_PRODUCT_SIERRA_AC875U_2 0x6821 /* AC875U */
+#define USB_PRODUCT_SIERRA_AC875E 0x6822 /* AC875E */
+#define USB_PRODUCT_SIERRA_MC8780 0x6832 /* MC8780 */
+#define USB_PRODUCT_SIERRA_MC8781 0x6833 /* MC8781 */
+#define USB_PRODUCT_SIERRA_MC8780_2 0x6834 /* MC8780 */
+#define USB_PRODUCT_SIERRA_MC8781_2 0x6835 /* MC8781 */
+#define USB_PRODUCT_SIERRA_MC8780_3 0x6838 /* MC8780 */
+#define USB_PRODUCT_SIERRA_MC8781_3 0x6839 /* MC8781 */
+#define USB_PRODUCT_SIERRA_MC8785 0x683A /* MC8785 */
+#define USB_PRODUCT_SIERRA_MC8785_2 0x683B /* MC8785 */
+#define USB_PRODUCT_SIERRA_MC8790 0x683C /* MC8790 */
+#define USB_PRODUCT_SIERRA_MC8791 0x683D /* MC8791 */
+#define USB_PRODUCT_SIERRA_MC8792 0x683E /* MC8792 */
+#define USB_PRODUCT_SIERRA_AC880 0x6850 /* Sierra Wireless AirCard 880 */
+#define USB_PRODUCT_SIERRA_AC881 0x6851 /* Sierra Wireless AirCard 881 */
+#define USB_PRODUCT_SIERRA_AC880E 0x6852 /* Sierra Wireless AirCard 880E */
+#define USB_PRODUCT_SIERRA_AC881E 0x6853 /* Sierra Wireless AirCard 881E */
+#define USB_PRODUCT_SIERRA_AC880U 0x6855 /* Sierra Wireless AirCard 880U */
+#define USB_PRODUCT_SIERRA_AC881U 0x6856 /* Sierra Wireless AirCard 881U */
+#define USB_PRODUCT_SIERRA_AC885E 0x6859 /* AC885E */
+#define USB_PRODUCT_SIERRA_AC885E_2 0x685A /* AC885E */
+#define USB_PRODUCT_SIERRA_AC885U 0x6880 /* Sierra Wireless AirCard 885U */
+#define USB_PRODUCT_SIERRA_C888 0x6890 /* C888 */
+#define USB_PRODUCT_SIERRA_C22 0x6891 /* C22 */
+#define USB_PRODUCT_SIERRA_E6892 0x6892 /* E6892 */
+#define USB_PRODUCT_SIERRA_E6893 0x6893 /* E6893 */
+#define USB_PRODUCT_SIERRA_MC8700 0x68A3 /* MC8700 */
+#define USB_PRODUCT_SIERRA_AIRCARD875 0x6820 /* Aircard 875 HSDPA */
+#define USB_PRODUCT_SIERRA_TRUINSTALL 0x0fff /* Aircard Tru Installer */
+
+/* Sigmatel products */
+#define USB_PRODUCT_SIGMATEL_WBT_3052 0x4200 /* WBT-3052 IrDA/USB Bridge */
+#define USB_PRODUCT_SIGMATEL_I_BEAD100 0x8008 /* i-Bead 100 MP3 Player */
+
+/* SIIG products */
+/* Also: Omnidirectional Control Technology products */
+#define USB_PRODUCT_SIIG_DIGIFILMREADER 0x0004 /* DigiFilm-Combo Reader */
+#define USB_PRODUCT_SIIG_WINTERREADER 0x0330 /* WINTERREADER Reader */
+#define USB_PRODUCT_SIIG2_USBTOETHER 0x0109 /* USB TO Ethernet */
+#define USB_PRODUCT_SIIG2_US2308 0x0421 /* Serial */
+
+/* Silicom products */
+#define USB_PRODUCT_SILICOM_U2E 0x0001 /* U2E */
+#define USB_PRODUCT_SILICOM_GPE 0x0002 /* Psion Gold Port Ethernet */
+
+/* SI Labs */
+#define USB_PRODUCT_SILABS_VSTABI 0x0f91 /* Vstabi */
+#define USB_PRODUCT_SILABS_ARKHAM_DS101_M 0x1101 /* Arkham DS101 Monitor */
+#define USB_PRODUCT_SILABS_ARKHAM_DS101_A 0x1601 /* Arkham DS101 Adapter */
+#define USB_PRODUCT_SILABS_BSM7DUSB 0x800a /* BSM7-D-USB */
+#define USB_PRODUCT_SILABS_POLOLU 0x803b /* Pololu Serial */
+#define USB_PRODUCT_SILABS_CYGNAL_DEBUG 0x8044 /* Cygnal Debug Adapter */
+#define USB_PRODUCT_SILABS_SB_PARAMOUNT_ME 0x8043 /* Software Bisque Paramount ME */
+#define USB_PRODUCT_SILABS_SAEL 0x8053 /* SA-EL USB */
+#define USB_PRODUCT_SILABS_GSM2228 0x8054 /* Enfora GSM2228 USB */
+#define USB_PRODUCT_SILABS_ARGUSISP 0x8066 /* Argussoft ISP */
+#define USB_PRODUCT_SILABS_IMS_USB_RS422 0x806f /* IMS USB-RS422 */
+#define USB_PRODUCT_SILABS_CRUMB128 0x807a /* Crumb128 board */
+#define USB_PRODUCT_SILABS_DEGREE 0x80ca /* Degree Controls Inc */
+#define USB_PRODUCT_SILABS_TRACIENT 0x80dd /* Tracient RFID */
+#define USB_PRODUCT_SILABS_TRAQMATE 0x80ed /* Track Systems Traqmate */
+#define USB_PRODUCT_SILABS_SUUNTO 0x80f6 /* Suunto Sports Instrument */
+#define USB_PRODUCT_SILABS_ARYGON_MIFARE 0x8115 /* Arygon Mifare RFID reader */
+#define USB_PRODUCT_SILABS_BURNSIDE 0x813d /* Burnside Telecon Deskmobile */
+#define USB_PRODUCT_SILABS_TAMSMASTER 0x813f /* Tams Master Easy Control */
+#define USB_PRODUCT_SILABS_WMRBATT 0x814a /* WMR RIGblaster Plug&Play */
+#define USB_PRODUCT_SILABS_WMRRIGBLASTER 0x814a /* WMR RIGblaster Plug&Play */
+#define USB_PRODUCT_SILABS_WMRRIGTALK 0x814b /* WMR RIGtalk RT1 */
+#define USB_PRODUCT_SILABS_HELICOM 0x815e /* Helicomm IP-Link 1220-DVM */
+#define USB_PRODUCT_SILABS_AVIT_USB_TTL 0x818b /* AVIT Research USB-TTL */
+#define USB_PRODUCT_SILABS_MJS_TOSLINK 0x819f /* MJS USB-TOSLINk */
+#define USB_PRODUCT_SILABS_WAVIT 0x81a6 /* ThinkOptics WavIt */
+#define USB_PRODUCT_SILABS_MSD_DASHHAWK 0x81ac /* MSD DashHawk */
+#define USB_PRODUCT_SILABS_INSYS_MODEM 0x81ad /* INSYS Modem */
+#define USB_PRODUCT_SILABS_LIPOWSKY_JTAG 0x81c8 /* Lipowsky Baby-JTAG */
+#define USB_PRODUCT_SILABS_LIPOWSKY_LIN 0x81e2 /* Lipowsky Baby-LIN */
+#define USB_PRODUCT_SILABS_AEROCOMM 0x81e7 /* Aerocomm Radio */
+#define USB_PRODUCT_SILABS_ZEPHYR_BIO 0x81e8 /* Zephyr Bioharness */
+#define USB_PRODUCT_SILABS_EMS_C1007 0x81f2 /* EMS C1007 HF RFID controller */
+#define USB_PRODUCT_SILABS_LIPOWSKY_HARP 0x8218 /* Lipowsky HARP-1 */
+#define USB_PRODUCT_SILABS_C2_EDGE_MODEM 0x822b /* Commander 2 EDGE(GSM) Modem */
+#define USB_PRODUCT_SILABS_CYGNAL_GPS 0x826b /* Cygnal Fasttrax GPS */
+#define USB_PRODUCT_SILABS_TELEGESYS_ETRX2 0x8293 /* Telegesys ETRX2USB */
+#define USB_PRODUCT_SILABS_PROCYON_AVS 0x82f9 /* Procyon AVS */
+#define USB_PRODUCT_SILABS_MC35PU 0x8341 /* MC35pu */
+#define USB_PRODUCT_SILABS_CYGNAL 0x8382 /* Cygnal */
+#define USB_PRODUCT_SILABS_AMBER_AMB2560 0x83a8 /* Amber Wireless AMB2560 */
+#define USB_PRODUCT_SILABS_KYOCERA_GPS 0x8411 /* Kyocera GPS */
+#define USB_PRODUCT_SILABS_BEI_VCP 0x846e /* BEI USB Sensor (VCP) */
+#define USB_PRODUCT_SILABS_CP2102 0xea60 /* SILABS USB UART */
+#define USB_PRODUCT_SILABS_CP210X_2 0xea61 /* CP210x Serial */
+#define USB_PRODUCT_SILABS_INFINITY_MIC 0xea71 /* Infinity GPS-MIC-1 Radio Monophone */
+#define USB_PRODUCT_SILABS_USBSCOPE50 0xf001 /* USBscope50 */
+#define USB_PRODUCT_SILABS_USBWAVE12 0xf002 /* USBwave12 */
+#define USB_PRODUCT_SILABS_USBPULSE100 0xf003 /* USBpulse100 */
+#define USB_PRODUCT_SILABS_USBCOUNT50 0xf004 /* USBcount50 */
+#define USB_PRODUCT_SILABS2_DCU11CLONE 0xaa26 /* DCU-11 clone */
+#define USB_PRODUCT_SILABS3_GPRS_MODEM 0xea61 /* GPRS Modem */
+#define USB_PRODUCT_SILABS4_100EU_MODEM 0xea61 /* GPRS Modem 100EU */
+
+/* Silicon Portals Inc. */
+#define USB_PRODUCT_SILICONPORTALS_YAPPH_NF 0x0200 /* YAP Phone (no firmware) */
+#define USB_PRODUCT_SILICONPORTALS_YAPPHONE 0x0201 /* YAP Phone */
+
+/* Sirius Technologies products */
+#define USB_PRODUCT_SIRIUS_ROADSTER 0x0001 /* NetComm Roadster II 56 USB */
+
+/* Sitecom products */
+#define USB_PRODUCT_SITECOM_LN029 0x182d /* USB 2.0 Ethernet */
+#define USB_PRODUCT_SITECOM_SERIAL 0x2068 /* USB to serial cable (v2) */
+#define USB_PRODUCT_SITECOM2_WL022 0x182d /* WL-022 */
+
+/* Sitecom Europe products */
+#define USB_PRODUCT_SITECOMEU_RT2870_1 0x0017 /* RT2870 */
+#define USB_PRODUCT_SITECOMEU_WL168V1 0x000d /* WL-168 v1 */
+#define USB_PRODUCT_SITECOMEU_WL168V4 0x0028 /* WL-168 v4 */
+#define USB_PRODUCT_SITECOMEU_RT2870_2 0x002b /* RT2870 */
+#define USB_PRODUCT_SITECOMEU_RT2870_3 0x002c /* RT2870 */
+#define USB_PRODUCT_SITECOMEU_RT2870_4 0x002d /* RT2870 */
+#define USB_PRODUCT_SITECOMEU_RT2770 0x0039 /* RT2770 */
+#define USB_PRODUCT_SITECOMEU_RT3070_2 0x003b /* RT3070 */
+#define USB_PRODUCT_SITECOMEU_RT3070_3 0x003c /* RT3070 */
+#define USB_PRODUCT_SITECOMEU_RT3070_4 0x003d /* RT3070 */
+#define USB_PRODUCT_SITECOMEU_RT3070 0x003e /* RT3070 */
+#define USB_PRODUCT_SITECOMEU_WL608 0x003f /* WL-608 */
+#define USB_PRODUCT_SITECOMEU_RT3071 0x0040 /* RT3071 */
+#define USB_PRODUCT_SITECOMEU_RT3072_1 0x0041 /* RT3072 */
+#define USB_PRODUCT_SITECOMEU_RT3072_2 0x0042 /* RT3072 */
+#define USB_PRODUCT_SITECOMEU_RT3072_3 0x0047 /* RT3072 */
+#define USB_PRODUCT_SITECOMEU_RT3072_4 0x0048 /* RT3072 */
+#define USB_PRODUCT_SITECOMEU_RT3072_5 0x004a /* RT3072 */
+#define USB_PRODUCT_SITECOMEU_RT3072_6 0x004d /* RT3072 */
+#define USB_PRODUCT_SITECOMEU_LN028 0x061c /* LN-028 */
+#define USB_PRODUCT_SITECOMEU_WL113 0x9071 /* WL-113 */
+#define USB_PRODUCT_SITECOMEU_ZD1211B 0x9075 /* ZD1211B */
+#define USB_PRODUCT_SITECOMEU_WL172 0x90ac /* WL-172 */
+#define USB_PRODUCT_SITECOMEU_WL113R2 0x9712 /* WL-113 rev 2 */
+
+/* Skanhex Technology products */
+#define USB_PRODUCT_SKANHEX_MD_7425 0x410a /* MD 7425 Camera */
+#define USB_PRODUCT_SKANHEX_SX_520Z 0x5200 /* SX 520z Camera */
+
+/* Smart Technologies products */
+#define USB_PRODUCT_SMART_PL2303 0x2303 /* Serial adapter */
+
+/* SmartBridges products */
+#define USB_PRODUCT_SMARTBRIDGES_SMARTLINK 0x0001 /* SmartLink USB Ethernet */
+#define USB_PRODUCT_SMARTBRIDGES_SMARTNIC 0x0003 /* smartNIC 2 PnP Ethernet */
+
+/* SMC products */
+#define USB_PRODUCT_SMC_2102USB 0x0100 /* 10Mbps Ethernet */
+#define USB_PRODUCT_SMC_2202USB 0x0200 /* 10/100 Ethernet */
+#define USB_PRODUCT_SMC_2206USB 0x0201 /* EZ Connect USB Ethernet */
+#define USB_PRODUCT_SMC_2862WG 0xee13 /* EZ Connect Wireless Adapter */
+#define USB_PRODUCT_SMC2_2020HUB 0x2020 /* USB Hub */
+#define USB_PRODUCT_SMC2_2514HUB 0x2514 /* USB Hub */
+#define USB_PRODUCT_SMC3_2662WUSB 0xa002 /* 2662W-AR Wireless */
+
+/* SOHOware products */
+#define USB_PRODUCT_SOHOWARE_NUB100 0x9100 /* 10/100 USB Ethernet */
+#define USB_PRODUCT_SOHOWARE_NUB110 0x9110 /* 10/100 USB Ethernet */
+
+/* SOLID YEAR products */
+#define USB_PRODUCT_SOLIDYEAR_KEYBOARD 0x2101 /* Solid Year USB keyboard */
+
+/* SONY products */
+#define USB_PRODUCT_SONY_DSC 0x0010 /* DSC cameras */
+#define USB_PRODUCT_SONY_MS_NW_MS7 0x0025 /* Memorystick NW-MS7 */
+#define USB_PRODUCT_SONY_PORTABLE_HDD_V2 0x002b /* Portable USB Harddrive V2 */
+#define USB_PRODUCT_SONY_MSACUS1 0x002d /* Memorystick MSAC-US1 */
+#define USB_PRODUCT_SONY_HANDYCAM 0x002e /* Handycam */
+#define USB_PRODUCT_SONY_MSC 0x0032 /* MSC memory stick slot */
+#define USB_PRODUCT_SONY_CLIE_35 0x0038 /* Sony Clie v3.5 */
+#define USB_PRODUCT_SONY_MS_PEG_N760C 0x0058 /* PEG N760c Memorystick */
+#define USB_PRODUCT_SONY_CLIE_40 0x0066 /* Sony Clie v4.0 */
+#define USB_PRODUCT_SONY_MS_MSC_U03 0x0069 /* Memorystick MSC-U03 */
+#define USB_PRODUCT_SONY_CLIE_40_MS 0x006d /* Sony Clie v4.0 Memory Stick slot */
+#define USB_PRODUCT_SONY_CLIE_S360 0x0095 /* Sony Clie s360 */
+#define USB_PRODUCT_SONY_CLIE_41_MS 0x0099 /* Sony Clie v4.1 Memory Stick slot */
+#define USB_PRODUCT_SONY_CLIE_41 0x009a /* Sony Clie v4.1 */
+#define USB_PRODUCT_SONY_CLIE_NX60 0x00da /* Sony Clie nx60 */
+#define USB_PRODUCT_SONY_CLIE_TH55 0x0144 /* Sony Clie th55 */
+#define USB_PRODUCT_SONY_CLIE_TJ37 0x0169 /* Sony Clie tj37 */
+#define USB_PRODUCT_SONY_RF_RECEIVER 0x01db /* Sony RF mouse/kbd Receiver VGP-WRC1 */
+#define USB_PRODUCT_SONY_QN3 0x0437 /* Sony QN3 CMD-Jxx phone cable */
+
+/* Sony Ericsson products */
+#define USB_PRODUCT_SONYERICSSON_DCU10 0x0528 /* DCU-10 Phone Data Cable */
+#define USB_PRODUCT_SONYERICSSON_DATAPILOT 0x2003 /* Datapilot Phone Cable */
+
+/* SOURCENEXT products */
+#define USB_PRODUCT_SOURCENEXT_KEIKAI8 0x039f /* KeikaiDenwa 8 */
+#define USB_PRODUCT_SOURCENEXT_KEIKAI8_CHG 0x012e /* KeikaiDenwa 8 with charger */
+
+/* SparkLAN products */
+#define USB_PRODUCT_SPARKLAN_RT2573 0x0004 /* RT2573 */
+#define USB_PRODUCT_SPARKLAN_RT2870_1 0x0006 /* RT2870 */
+#define USB_PRODUCT_SPARKLAN_RT3070 0x0010 /* RT3070 */
+
+/* Speed Dragon Multimedia products */
+#define USB_PRODUCT_SPEEDDRAGON_MS3303H 0x110b /* MS3303H Serial */
+
+/* Sphairon Access Systems GmbH products */
+#define USB_PRODUCT_SPHAIRON_UB801R 0x0110 /* UB801R */
+
+/* Stelera Wireless products */
+#define USB_PRODUCT_STELERA_ZEROCD 0x1000 /* Zerocd Installer */
+#define USB_PRODUCT_STELERA_C105 0x1002 /* Stelera/Bandrish C105 USB */
+#define USB_PRODUCT_STELERA_E1003 0x1003 /* 3G modem */
+#define USB_PRODUCT_STELERA_E1004 0x1004 /* 3G modem */
+#define USB_PRODUCT_STELERA_E1005 0x1005 /* 3G modem */
+#define USB_PRODUCT_STELERA_E1006 0x1006 /* 3G modem */
+#define USB_PRODUCT_STELERA_E1007 0x1007 /* 3G modem */
+#define USB_PRODUCT_STELERA_E1008 0x1008 /* 3G modem */
+#define USB_PRODUCT_STELERA_E1009 0x1009 /* 3G modem */
+#define USB_PRODUCT_STELERA_E100A 0x100a /* 3G modem */
+#define USB_PRODUCT_STELERA_E100B 0x100b /* 3G modem */
+#define USB_PRODUCT_STELERA_E100C 0x100c /* 3G modem */
+#define USB_PRODUCT_STELERA_E100D 0x100d /* 3G modem */
+#define USB_PRODUCT_STELERA_E100E 0x100e /* 3G modem */
+#define USB_PRODUCT_STELERA_E100F 0x100f /* 3G modem */
+#define USB_PRODUCT_STELERA_E1010 0x1010 /* 3G modem */
+#define USB_PRODUCT_STELERA_E1011 0x1011 /* 3G modem */
+#define USB_PRODUCT_STELERA_E1012 0x1012 /* 3G modem */
+
+/* MpMan products */
+#define USB_PRODUCT_MPMAN_MPF400_1 0x36d0 /* MPF400 Music Player 1Go */
+#define USB_PRODUCT_MPMAN_MPF400_2 0x25a8 /* MPF400 Music Player 2Go */
+
+/* STMicroelectronics products */
+#define USB_PRODUCT_STMICRO_BIOCPU 0x2016 /* Biometric Coprocessor */
+#define USB_PRODUCT_STMICRO_COMMUNICATOR 0x7554 /* USB Communicator */
+
+/* STSN products */
+#define USB_PRODUCT_STSN_STSN0001 0x0001 /* Internet Access Device */
+
+/* SUN Corporation products */
+#define USB_PRODUCT_SUNTAC_DS96L 0x0003 /* SUNTAC U-Cable type D2 */
+#define USB_PRODUCT_SUNTAC_PS64P1 0x0005 /* SUNTAC U-Cable type P1 */
+#define USB_PRODUCT_SUNTAC_VS10U 0x0009 /* SUNTAC Slipper U */
+#define USB_PRODUCT_SUNTAC_IS96U 0x000a /* SUNTAC Ir-Trinity */
+#define USB_PRODUCT_SUNTAC_AS64LX 0x000b /* SUNTAC U-Cable type A3 */
+#define USB_PRODUCT_SUNTAC_AS144L4 0x0011 /* SUNTAC U-Cable type A4 */
+
+/* Sun Microsystems products */
+#define USB_PRODUCT_SUN_KEYBOARD_TYPE_6 0x0005 /* Type 6 USB keyboard */
+#define USB_PRODUCT_SUN_KEYBOARD_TYPE_7 0x00a2 /* Type 7 USB keyboard */
+/* XXX The above is a North American PC style keyboard possibly */
+#define USB_PRODUCT_SUN_MOUSE 0x0100 /* Type 6 USB mouse */
+#define USB_PRODUCT_SUN_KBD_HUB 0x100e /* Kbd Hub */
+
+/* Super Top products */
+#define USB_PRODUCT_SUPERTOP_IDE 0x6600 /* USB-IDE */
+
+/* Syntech products */
+#define USB_PRODUCT_SYNTECH_CPT8001C 0x0001 /* CPT-8001C Barcode scanner */
+#define USB_PRODUCT_SYNTECH_CYPHERLAB100 0x1000 /* CipherLab USB Barcode Scanner */
+
+/* Teclast products */
+#define USB_PRODUCT_TECLAST_TLC300 0x3203 /* USB Media Player */
+
+/* Supra products */
+#define USB_PRODUCT_DIAMOND2_SUPRAEXPRESS56K 0x07da /* Supra Express 56K modem */
+#define USB_PRODUCT_DIAMOND2_SUPRA2890 0x0b4a /* SupraMax 2890 56K Modem */
+#define USB_PRODUCT_DIAMOND2_RIO600USB 0x5001 /* Rio 600 USB */
+#define USB_PRODUCT_DIAMOND2_RIO800USB 0x5002 /* Rio 800 USB */
+
+/* Surecom Technology products */
+#define USB_PRODUCT_SURECOM_EP9001G2A 0x11f2 /* EP-9001-G rev 2A */
+#define USB_PRODUCT_SURECOM_RT2570 0x11f3 /* RT2570 */
+#define USB_PRODUCT_SURECOM_RT2573 0x31f3 /* RT2573 */
+
+/* Sweex products */
+#define USB_PRODUCT_SWEEX_ZD1211 0x1809 /* ZD1211 */
+#define USB_PRODUCT_SWEEX2_LW153 0x0153 /* LW153 */
+#define USB_PRODUCT_SWEEX2_LW303 0x0302 /* LW303 */
+#define USB_PRODUCT_SWEEX2_LW313 0x0313 /* LW313 */
+
+/* System TALKS, Inc. */
+#define USB_PRODUCT_SYSTEMTALKS_SGCX2UL 0x1920 /* SGC-X2UL */
+
+/* Tapwave products */
+#define USB_PRODUCT_TAPWAVE_ZODIAC 0x0100 /* Zodiac */
+
+/* Taugagreining products */
+#define USB_PRODUCT_TAUGA_CAMERAMATE 0x0005 /* CameraMate (DPCM_USB) */
+
+/* TCTMobile products */
+#define USB_PRODUCT_TCTMOBILE_X060S 0x0000 /* X060S 3G modem */
+#define USB_PRODUCT_TCTMOBILE_X080S 0xf000 /* X080S 3G modem */
+
+/* TDK products */
+#define USB_PRODUCT_TDK_UPA9664 0x0115 /* USB-PDC Adapter UPA9664 */
+#define USB_PRODUCT_TDK_UCA1464 0x0116 /* USB-cdmaOne Adapter UCA1464 */
+#define USB_PRODUCT_TDK_UHA6400 0x0117 /* USB-PHS Adapter UHA6400 */
+#define USB_PRODUCT_TDK_UPA6400 0x0118 /* USB-PHS Adapter UPA6400 */
+#define USB_PRODUCT_TDK_BT_DONGLE 0x0309 /* Bluetooth USB dongle */
+
+/* TEAC products */
+#define USB_PRODUCT_TEAC_FD05PUB 0x0000 /* FD-05PUB floppy */
+
+/* Tekram Technology products */
+#define USB_PRODUCT_TEKRAM_QUICKWLAN 0x1630 /* QuickWLAN */
+#define USB_PRODUCT_TEKRAM_ZD1211_1 0x5630 /* ZD1211 */
+#define USB_PRODUCT_TEKRAM_ZD1211_2 0x6630 /* ZD1211 */
+
+/* Telex Communications products */
+#define USB_PRODUCT_TELEX_MIC1 0x0001 /* Enhanced USB Microphone */
+
+/* Telit products */
+#define USB_PRODUCT_TELIT_UC864E 0x1003 /* UC864E 3G modem */
+#define USB_PRODUCT_TELIT_UC864G 0x1004 /* UC864G 3G modem */
+
+/* Ten X Technology, Inc. */
+#define USB_PRODUCT_TENX_UAUDIO0 0xf211 /* USB audio headset */
+
+/* Texas Intel products */
+#define USB_PRODUCT_TI_UTUSB41 0x1446 /* UT-USB41 hub */
+#define USB_PRODUCT_TI_TUSB2046 0x2046 /* TUSB2046 hub */
+
+/* Thrustmaster products */
+#define USB_PRODUCT_THRUST_FUSION_PAD 0xa0a3 /* Fusion Digital Gamepad */
+
+/* TLayTech products */
+#define USB_PRODUCT_TLAYTECH_TEU800 0x1682 /* TEU800 3G modem */
+
+/* Topre Corporation products */
+#define USB_PRODUCT_TOPRE_HHKB 0x0100 /* HHKB Professional */
+
+/* Toshiba Corporation products */
+#define USB_PRODUCT_TOSHIBA_POCKETPC_E740 0x0706 /* PocketPC e740 */
+#define USB_PRODUCT_TOSHIBA_RT3070 0x0a07 /* RT3070 */
+#define USB_PRODUCT_TOSHIBA_G450 0x0d45 /* G450 modem */
+#define USB_PRODUCT_TOSHIBA_HSDPA 0x1302 /* G450 modem */
+
+/* Trek Technology products */
+#define USB_PRODUCT_TREK_THUMBDRIVE 0x1111 /* ThumbDrive */
+#define USB_PRODUCT_TREK_MEMKEY 0x8888 /* IBM USB Memory Key */
+#define USB_PRODUCT_TREK_THUMBDRIVE_8MB 0x9988 /* ThumbDrive_8MB */
+
+/* Tripp-Lite products */
+#define USB_PRODUCT_TRIPPLITE_U209 0x2008 /* Serial */
+
+/* Trumpion products */
+#define USB_PRODUCT_TRUMPION_T33520 0x1001 /* T33520 USB Flash Card Controller */
+#define USB_PRODUCT_TRUMPION_C3310 0x1100 /* Comotron C3310 MP3 player */
+#define USB_PRODUCT_TRUMPION_MP3 0x1200 /* MP3 player */
+
+/* TwinMOS */
+#define USB_PRODUCT_TWINMOS_G240 0xa006 /* G240 */
+#define USB_PRODUCT_TWINMOS_MDIV 0x1325 /* Memory Disk IV */
+
+/* Ubiquam products */
+#define USB_PRODUCT_UBIQUAM_UALL 0x3100 /* CDMA 1xRTT USB Modem (U-100/105/200/300/520) */
+
+/* Ultima products */
+#define USB_PRODUCT_ULTIMA_1200UBPLUS 0x4002 /* 1200 UB Plus scanner */
+
+/* UMAX products */
+#define USB_PRODUCT_UMAX_ASTRA1236U 0x0002 /* Astra 1236U Scanner */
+#define USB_PRODUCT_UMAX_ASTRA1220U 0x0010 /* Astra 1220U Scanner */
+#define USB_PRODUCT_UMAX_ASTRA2000U 0x0030 /* Astra 2000U Scanner */
+#define USB_PRODUCT_UMAX_ASTRA2100U 0x0130 /* Astra 2100U Scanner */
+#define USB_PRODUCT_UMAX_ASTRA2200U 0x0230 /* Astra 2200U Scanner */
+#define USB_PRODUCT_UMAX_ASTRA3400 0x0060 /* Astra 3400 Scanner */
+
+/* U-MEDIA Communications products */
+#define USB_PRODUCT_UMEDIA_TEW444UBEU 0x3006 /* TEW-444UB EU */
+#define USB_PRODUCT_UMEDIA_TEW444UBEU_NF 0x3007 /* TEW-444UB EU (no firmware) */
+#define USB_PRODUCT_UMEDIA_TEW429UB_A 0x300a /* TEW-429UB_A */
+#define USB_PRODUCT_UMEDIA_TEW429UB 0x300b /* TEW-429UB */
+#define USB_PRODUCT_UMEDIA_TEW429UBC1 0x300d /* TEW-429UB C1 */
+#define USB_PRODUCT_UMEDIA_RT2870_1 0x300e /* RT2870 */
+#define USB_PRODUCT_UMEDIA_ALL0298V2 0x3204 /* ALL0298 v2 */
+#define USB_PRODUCT_UMEDIA_AR5523_2 0x3205 /* AR5523 */
+#define USB_PRODUCT_UMEDIA_AR5523_2_NF 0x3206 /* AR5523 (no firmware) */
+
+/* Universal Access products */
+#define USB_PRODUCT_UNIACCESS_PANACHE 0x0101 /* Panache Surf USB ISDN Adapter */
+
+/* USI products */
+#define USB_PRODUCT_USI_MC60 0x10c5 /* MC60 Serial */
+
+/* U.S. Robotics products */
+#define USB_PRODUCT_USR_USR5422 0x0118 /* USR5422 WLAN */
+#define USB_PRODUCT_USR_USR5423 0x0121 /* USR5423 WLAN */
+
+/* VIA Technologies products */
+#define USB_PRODUCT_VIA_USB2IDEBRIDGE 0x6204 /* USB 2.0 IDE Bridge */
+
+/* Vaisala products */
+#define USB_PRODUCT_VAISALA_CABLE 0x0200 /* USB Interface cable */
+
+/* VidzMedia products */
+#define USB_PRODUCT_VIDZMEDIA_MONSTERTV 0x4fb1 /* MonsterTV P2H */
+
+/* Vision products */
+#define USB_PRODUCT_VISION_VC6452V002 0x0002 /* CPiA Camera */
+
+/* Visioneer products */
+#define USB_PRODUCT_VISIONEER_7600 0x0211 /* OneTouch 7600 */
+#define USB_PRODUCT_VISIONEER_5300 0x0221 /* OneTouch 5300 */
+#define USB_PRODUCT_VISIONEER_3000 0x0224 /* Scanport 3000 */
+#define USB_PRODUCT_VISIONEER_6100 0x0231 /* OneTouch 6100 */
+#define USB_PRODUCT_VISIONEER_6200 0x0311 /* OneTouch 6200 */
+#define USB_PRODUCT_VISIONEER_8100 0x0321 /* OneTouch 8100 */
+#define USB_PRODUCT_VISIONEER_8600 0x0331 /* OneTouch 8600 */
+
+/* Vivitar products */
+#define USB_PRODUCT_VIVITAR_35XX 0x0003 /* Vivicam 35Xx */
+
+/* VTech products */
+#define USB_PRODUCT_VTECH_RT2570 0x3012 /* RT2570 */
+#define USB_PRODUCT_VTECH_ZD1211B 0x3014 /* ZD1211B */
+
+/* Wacom products */
+#define USB_PRODUCT_WACOM_CT0405U 0x0000 /* CT-0405-U Tablet */
+#define USB_PRODUCT_WACOM_GRAPHIRE 0x0010 /* Graphire */
+#define USB_PRODUCT_WACOM_GRAPHIRE3_4X5 0x0013 /* Graphire 3 4x5 */
+#define USB_PRODUCT_WACOM_INTUOSA5 0x0021 /* Intuos A5 */
+#define USB_PRODUCT_WACOM_GD0912U 0x0022 /* Intuos 9x12 Graphics Tablet */
+
+/* WaveSense products */
+#define USB_PRODUCT_WAVESENSE_JAZZ 0xaaaa /* Jazz blood glucose meter */
+
+/* WCH products */
+#define USB_PRODUCT_WCH_CH341SER 0x5523 /* CH341/CH340 USB-Serial Bridge */
+#define USB_PRODUCT_WCH2_CH341SER 0x7523 /* CH341/CH340 USB-Serial Bridge */
+
+/* Western Digital products */
+#define USB_PRODUCT_WESTERN_COMBO 0x0200 /* Firewire USB Combo */
+#define USB_PRODUCT_WESTERN_EXTHDD 0x0400 /* External HDD */
+#define USB_PRODUCT_WESTERN_HUB 0x0500 /* USB HUB */
+#define USB_PRODUCT_WESTERN_MYBOOK 0x0901 /* MyBook External HDD */
+#define USB_PRODUCT_WESTERN_MYPASSWORD 0x0704 /* MyPassword External HDD */
+
+/* Windbond Electronics */
+#define USB_PRODUCT_WINBOND_UH104 0x5518 /* 4-port USB Hub */
+
+/* WinMaxGroup products */
+#define USB_PRODUCT_WINMAXGROUP_FLASH64MC 0x6660 /* USB Flash Disk 64M-C */
+
+/* Wistron NeWeb products */
+#define USB_PRODUCT_WISTRONNEWEB_UR045G 0x0427 /* PrismGT USB 2.0 WLAN */
+#define USB_PRODUCT_WISTRONNEWEB_UR055G 0x0711 /* UR055G */
+#define USB_PRODUCT_WISTRONNEWEB_AR5523_1 0x0826 /* AR5523 */
+#define USB_PRODUCT_WISTRONNEWEB_AR5523_1_NF 0x0827 /* AR5523 (no firmware) */
+#define USB_PRODUCT_WISTRONNEWEB_AR5523_2 0x082a /* AR5523 */
+#define USB_PRODUCT_WISTRONNEWEB_AR5523_2_NF 0x0829 /* AR5523 (no firmware) */
+
+/* Xerox products */
+#define USB_PRODUCT_XEROX_WCM15 0xffef /* WorkCenter M15 */
+
+/* Xirlink products */
+#define USB_PRODUCT_XIRLINK_PCCAM 0x8080 /* IBM PC Camera */
+
+/* Xyratex products */
+#define USB_PRODUCT_XYRATEX_PRISM_GT_1 0x2000 /* PrismGT USB 2.0 WLAN */
+#define USB_PRODUCT_XYRATEX_PRISM_GT_2 0x2002 /* PrismGT USB 2.0 WLAN */
+
+/* Yamaha products */
+#define USB_PRODUCT_YAMAHA_UX256 0x1000 /* UX256 MIDI I/F */
+#define USB_PRODUCT_YAMAHA_UX96 0x1008 /* UX96 MIDI I/F */
+#define USB_PRODUCT_YAMAHA_RTA54I 0x4000 /* NetVolante RTA54i Broadband&ISDN Router */
+#define USB_PRODUCT_YAMAHA_RTA55I 0x4004 /* NetVolante RTA55i Broadband VoIP Router */
+#define USB_PRODUCT_YAMAHA_RTW65B 0x4001 /* NetVolante RTW65b Broadband Wireless Router */
+#define USB_PRODUCT_YAMAHA_RTW65I 0x4002 /* NetVolante RTW65i Broadband&ISDN Wireless Router */
+
+/* Yano products */
+#define USB_PRODUCT_YANO_U640MO 0x0101 /* U640MO-03 */
+#define USB_PRODUCT_YANO_FW800HD 0x05fc /* METALWEAR-HDD */
+
+/* Y.C. Cable products */
+#define USB_PRODUCT_YCCABLE_PL2303 0x0fba /* PL2303 Serial */
+
+/* Y-E Data products */
+#define USB_PRODUCT_YEDATA_FLASHBUSTERU 0x0000 /* Flashbuster-U */
+
+/* Yiso Wireless Co. products */
+#define USB_PRODUCT_YISO_C893 0xc893 /* CDMA 2000 1xEVDO PC Card */
+
+/* Z-Com products */
+#define USB_PRODUCT_ZCOM_M4Y750 0x0001 /* M4Y-750 */
+#define USB_PRODUCT_ZCOM_XI725 0x0002 /* XI-725/726 */
+#define USB_PRODUCT_ZCOM_XI735 0x0005 /* XI-735 */
+#define USB_PRODUCT_ZCOM_XG703A 0x0008 /* PrismGT USB 2.0 WLAN */
+#define USB_PRODUCT_ZCOM_ZD1211 0x0011 /* ZD1211 */
+#define USB_PRODUCT_ZCOM_AR5523 0x0012 /* AR5523 */
+#define USB_PRODUCT_ZCOM_AR5523_NF 0x0013 /* AR5523 driver (no firmware) */
+#define USB_PRODUCT_ZCOM_XM142 0x0015 /* XM-142 */
+#define USB_PRODUCT_ZCOM_ZD1211B 0x001a /* ZD1211B */
+#define USB_PRODUCT_ZCOM_RT2870_1 0x0022 /* RT2870 */
+#define USB_PRODUCT_ZCOM_RT2870_2 0x0025 /* RT2870 */
+
+/* Zinwell products */
+#define USB_PRODUCT_ZINWELL_RT2570 0x0260 /* RT2570 */
+#define USB_PRODUCT_ZINWELL_RT2870_1 0x0280 /* RT2870 */
+#define USB_PRODUCT_ZINWELL_RT2870_2 0x0282 /* RT2870 */
+#define USB_PRODUCT_ZINWELL_RT3072_1 0x0283 /* RT3072 */
+#define USB_PRODUCT_ZINWELL_RT3072_2 0x0284 /* RT3072 */
+#define USB_PRODUCT_ZINWELL_RT3070 0x5257 /* RT3070 */
+
+/* Zoom Telephonics, Inc. products */
+#define USB_PRODUCT_ZOOM_2986L 0x9700 /* 2986L Fax modem */
+
+/* Zoran Microelectronics products */
+#define USB_PRODUCT_ZORAN_EX20DSC 0x4343 /* Digital Camera EX-20 DSC */
+
+/* Zydas Technology Corporation products */
+#define USB_PRODUCT_ZYDAS_ZD1211 0x1211 /* ZD1211 WLAN abg */
+#define USB_PRODUCT_ZYDAS_ZD1211B 0x1215 /* ZD1211B */
+
+/* ZyXEL Communication Co. products */
+#define USB_PRODUCT_ZYXEL_OMNI56K 0x1500 /* Omni 56K Plus */
+#define USB_PRODUCT_ZYXEL_980N 0x2011 /* Scorpion-980N keyboard */
+#define USB_PRODUCT_ZYXEL_ZYAIRG220 0x3401 /* ZyAIR G-220 */
+#define USB_PRODUCT_ZYXEL_G200V2 0x3407 /* G-200 v2 */
+#define USB_PRODUCT_ZYXEL_AG225H 0x3409 /* AG-225H */
+#define USB_PRODUCT_ZYXEL_M202 0x340a /* M-202 */
+#define USB_PRODUCT_ZYXEL_G220V2 0x340f /* G-220 v2 */
+#define USB_PRODUCT_ZYXEL_G202 0x3410 /* G-202 */
+#define USB_PRODUCT_ZYXEL_RT2870_1 0x3416 /* RT2870 */
+#define USB_PRODUCT_ZYXEL_RT2870_2 0x341a /* RT2870 */
diff --git a/rtems/freebsd/local/usbdevs_data.h b/rtems/freebsd/local/usbdevs_data.h
new file mode 100644
index 00000000..6bd20f55
--- /dev/null
+++ b/rtems/freebsd/local/usbdevs_data.h
@@ -0,0 +1,15530 @@
+/* ??? */
+
+/*
+ * THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT.
+ *
+ * generated from:
+ * FreeBSD
+ */
+/* $NetBSD: usbdevs,v 1.392 2004/12/29 08:38:44 imp Exp $ */
+
+/*-
+ * Copyright (c) 1998-2004 The NetBSD Foundation, Inc.
+ * All rights reserved.
+ *
+ * This code is derived from software contributed to The NetBSD Foundation
+ * by Lennart Augustsson (lennart@augustsson.net) at
+ * Carlstedt Research & Technology.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the NetBSD
+ * Foundation, Inc. and its contributors.
+ * 4. Neither the name of The NetBSD Foundation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
+ * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+const struct usb_knowndev usb_knowndevs[] = {
+ {
+ USB_VENDOR_3COM, USB_PRODUCT_3COM_HOMECONN,
+ 0,
+ "3Com",
+ "HomeConnect Camera",
+ },
+ {
+ USB_VENDOR_3COM, USB_PRODUCT_3COM_3CREB96,
+ 0,
+ "3Com",
+ "Bluetooth USB Adapter",
+ },
+ {
+ USB_VENDOR_3COM, USB_PRODUCT_3COM_3C19250,
+ 0,
+ "3Com",
+ "3C19250 Ethernet Adapter",
+ },
+ {
+ USB_VENDOR_3COM, USB_PRODUCT_3COM_3CRSHEW696,
+ 0,
+ "3Com",
+ "3CRSHEW696 Wireless Adapter",
+ },
+ {
+ USB_VENDOR_3COM, USB_PRODUCT_3COM_3C460,
+ 0,
+ "3Com",
+ "HomeConnect 3C460",
+ },
+ {
+ USB_VENDOR_3COM, USB_PRODUCT_3COM_USR56K,
+ 0,
+ "3Com",
+ "U.S.Robotics 56000 Voice FaxModem Pro",
+ },
+ {
+ USB_VENDOR_3COM, USB_PRODUCT_3COM_3C460B,
+ 0,
+ "3Com",
+ "HomeConnect 3C460B",
+ },
+ {
+ USB_VENDOR_3COM2, USB_PRODUCT_3COM2_3CRUSB10075,
+ 0,
+ "3Com",
+ "3CRUSB10075",
+ },
+ {
+ USB_VENDOR_3COM3, USB_PRODUCT_3COM3_AR5523_1,
+ 0,
+ "3Com",
+ "AR5523",
+ },
+ {
+ USB_VENDOR_3COM3, USB_PRODUCT_3COM3_AR5523_2,
+ 0,
+ "3Com",
+ "AR5523",
+ },
+ {
+ USB_VENDOR_3COM3, USB_PRODUCT_3COM3_AR5523_3,
+ 0,
+ "3Com",
+ "AR5523",
+ },
+ {
+ USB_VENDOR_3COMUSR, USB_PRODUCT_3COMUSR_OFFICECONN,
+ 0,
+ "U.S. Robotics",
+ "3Com OfficeConnect Analog Modem",
+ },
+ {
+ USB_VENDOR_3COMUSR, USB_PRODUCT_3COMUSR_USRISDN,
+ 0,
+ "U.S. Robotics",
+ "3Com U.S. Robotics Pro ISDN TA",
+ },
+ {
+ USB_VENDOR_3COMUSR, USB_PRODUCT_3COMUSR_HOMECONN,
+ 0,
+ "U.S. Robotics",
+ "3Com HomeConnect Camera",
+ },
+ {
+ USB_VENDOR_3COMUSR, USB_PRODUCT_3COMUSR_USR56K,
+ 0,
+ "U.S. Robotics",
+ "U.S. Robotics 56000 Voice FaxModem Pro",
+ },
+ {
+ USB_VENDOR_ABOCOM, USB_PRODUCT_ABOCOM_XX1,
+ 0,
+ "AboCom Systems",
+ "XX1",
+ },
+ {
+ USB_VENDOR_ABOCOM, USB_PRODUCT_ABOCOM_XX2,
+ 0,
+ "AboCom Systems",
+ "XX2",
+ },
+ {
+ USB_VENDOR_ABOCOM, USB_PRODUCT_ABOCOM_RT2770,
+ 0,
+ "AboCom Systems",
+ "RT2770",
+ },
+ {
+ USB_VENDOR_ABOCOM, USB_PRODUCT_ABOCOM_RT2870,
+ 0,
+ "AboCom Systems",
+ "RT2870",
+ },
+ {
+ USB_VENDOR_ABOCOM, USB_PRODUCT_ABOCOM_RT3070,
+ 0,
+ "AboCom Systems",
+ "RT3070",
+ },
+ {
+ USB_VENDOR_ABOCOM, USB_PRODUCT_ABOCOM_RT3071,
+ 0,
+ "AboCom Systems",
+ "RT3071",
+ },
+ {
+ USB_VENDOR_ABOCOM, USB_PRODUCT_ABOCOM_RT3072,
+ 0,
+ "AboCom Systems",
+ "RT3072",
+ },
+ {
+ USB_VENDOR_ABOCOM2, USB_PRODUCT_ABOCOM2_RT2870_1,
+ 0,
+ "AboCom Systems",
+ "RT2870",
+ },
+ {
+ USB_VENDOR_ABOCOM, USB_PRODUCT_ABOCOM_URE450,
+ 0,
+ "AboCom Systems",
+ "URE450 Ethernet Adapter",
+ },
+ {
+ USB_VENDOR_ABOCOM, USB_PRODUCT_ABOCOM_UFE1000,
+ 0,
+ "AboCom Systems",
+ "UFE1000 Fast Ethernet Adapter",
+ },
+ {
+ USB_VENDOR_ABOCOM, USB_PRODUCT_ABOCOM_DSB650TX_PNA,
+ 0,
+ "AboCom Systems",
+ "1/10/100 Ethernet Adapter",
+ },
+ {
+ USB_VENDOR_ABOCOM, USB_PRODUCT_ABOCOM_XX4,
+ 0,
+ "AboCom Systems",
+ "XX4",
+ },
+ {
+ USB_VENDOR_ABOCOM, USB_PRODUCT_ABOCOM_XX5,
+ 0,
+ "AboCom Systems",
+ "XX5",
+ },
+ {
+ USB_VENDOR_ABOCOM, USB_PRODUCT_ABOCOM_XX6,
+ 0,
+ "AboCom Systems",
+ "XX6",
+ },
+ {
+ USB_VENDOR_ABOCOM, USB_PRODUCT_ABOCOM_XX7,
+ 0,
+ "AboCom Systems",
+ "XX7",
+ },
+ {
+ USB_VENDOR_ABOCOM, USB_PRODUCT_ABOCOM_RTL8151,
+ 0,
+ "AboCom Systems",
+ "RTL8151",
+ },
+ {
+ USB_VENDOR_ABOCOM, USB_PRODUCT_ABOCOM_XX8,
+ 0,
+ "AboCom Systems",
+ "XX8",
+ },
+ {
+ USB_VENDOR_ABOCOM, USB_PRODUCT_ABOCOM_XX9,
+ 0,
+ "AboCom Systems",
+ "XX9",
+ },
+ {
+ USB_VENDOR_ABOCOM, USB_PRODUCT_ABOCOM_UF200,
+ 0,
+ "AboCom Systems",
+ "UF200 Ethernet",
+ },
+ {
+ USB_VENDOR_ABOCOM, USB_PRODUCT_ABOCOM_WL54,
+ 0,
+ "AboCom Systems",
+ "WL54",
+ },
+ {
+ USB_VENDOR_ABOCOM, USB_PRODUCT_ABOCOM_XX10,
+ 0,
+ "AboCom Systems",
+ "XX10",
+ },
+ {
+ USB_VENDOR_ABOCOM, USB_PRODUCT_ABOCOM_BWU613,
+ 0,
+ "AboCom Systems",
+ "BWU613",
+ },
+ {
+ USB_VENDOR_ABOCOM, USB_PRODUCT_ABOCOM_HWU54DM,
+ 0,
+ "AboCom Systems",
+ "HWU54DM",
+ },
+ {
+ USB_VENDOR_ABOCOM, USB_PRODUCT_ABOCOM_RT2573_2,
+ 0,
+ "AboCom Systems",
+ "RT2573",
+ },
+ {
+ USB_VENDOR_ABOCOM, USB_PRODUCT_ABOCOM_RT2573_3,
+ 0,
+ "AboCom Systems",
+ "RT2573",
+ },
+ {
+ USB_VENDOR_ABOCOM, USB_PRODUCT_ABOCOM_RT2573_4,
+ 0,
+ "AboCom Systems",
+ "RT2573",
+ },
+ {
+ USB_VENDOR_ABOCOM, USB_PRODUCT_ABOCOM_WUG2700,
+ 0,
+ "AboCom Systems",
+ "WUG2700",
+ },
+ {
+ USB_VENDOR_ACCTON, USB_PRODUCT_ACCTON_USB320_EC,
+ 0,
+ "Accton Technology",
+ "USB320-EC Ethernet Adapter",
+ },
+ {
+ USB_VENDOR_ACCTON, USB_PRODUCT_ACCTON_2664W,
+ 0,
+ "Accton Technology",
+ "2664W",
+ },
+ {
+ USB_VENDOR_ACCTON, USB_PRODUCT_ACCTON_111,
+ 0,
+ "Accton Technology",
+ "T-Sinus 111 Wireless Adapter",
+ },
+ {
+ USB_VENDOR_ACCTON, USB_PRODUCT_ACCTON_SMCWUSBG_NF,
+ 0,
+ "Accton Technology",
+ "SMCWUSB-G (no firmware)",
+ },
+ {
+ USB_VENDOR_ACCTON, USB_PRODUCT_ACCTON_SMCWUSBG,
+ 0,
+ "Accton Technology",
+ "SMCWUSB-G",
+ },
+ {
+ USB_VENDOR_ACCTON, USB_PRODUCT_ACCTON_SMCWUSBTG2_NF,
+ 0,
+ "Accton Technology",
+ "SMCWUSBT-G2 (no firmware)",
+ },
+ {
+ USB_VENDOR_ACCTON, USB_PRODUCT_ACCTON_SMCWUSBTG2,
+ 0,
+ "Accton Technology",
+ "SMCWUSBT-G2",
+ },
+ {
+ USB_VENDOR_ACCTON, USB_PRODUCT_ACCTON_PRISM_GT,
+ 0,
+ "Accton Technology",
+ "PrismGT USB 2.0 WLAN",
+ },
+ {
+ USB_VENDOR_ACCTON, USB_PRODUCT_ACCTON_SS1001,
+ 0,
+ "Accton Technology",
+ "SpeedStream Ethernet Adapter",
+ },
+ {
+ USB_VENDOR_ACCTON, USB_PRODUCT_ACCTON_RT2870_2,
+ 0,
+ "Accton Technology",
+ "RT2870",
+ },
+ {
+ USB_VENDOR_ACCTON, USB_PRODUCT_ACCTON_RT3070,
+ 0,
+ "Accton Technology",
+ "RT3070",
+ },
+ {
+ USB_VENDOR_ACCTON, USB_PRODUCT_ACCTON_RT2770,
+ 0,
+ "Accton Technology",
+ "RT2770",
+ },
+ {
+ USB_VENDOR_ACCTON, USB_PRODUCT_ACCTON_RT2870_3,
+ 0,
+ "Accton Technology",
+ "RT2870",
+ },
+ {
+ USB_VENDOR_ACCTON, USB_PRODUCT_ACCTON_RT2870_5,
+ 0,
+ "Accton Technology",
+ "RT2870",
+ },
+ {
+ USB_VENDOR_ACCTON, USB_PRODUCT_ACCTON_RT3070_4,
+ 0,
+ "Accton Technology",
+ "RT3070",
+ },
+ {
+ USB_VENDOR_ACCTON, USB_PRODUCT_ACCTON_RT2870_4,
+ 0,
+ "Accton Technology",
+ "RT2870",
+ },
+ {
+ USB_VENDOR_ACCTON, USB_PRODUCT_ACCTON_RT3070_1,
+ 0,
+ "Accton Technology",
+ "RT3070",
+ },
+ {
+ USB_VENDOR_ACCTON, USB_PRODUCT_ACCTON_RT3070_2,
+ 0,
+ "Accton Technology",
+ "RT3070",
+ },
+ {
+ USB_VENDOR_ACCTON, USB_PRODUCT_ACCTON_RT2870_1,
+ 0,
+ "Accton Technology",
+ "RT2870",
+ },
+ {
+ USB_VENDOR_ACCTON, USB_PRODUCT_ACCTON_RT3070_3,
+ 0,
+ "Accton Technology",
+ "RT3070",
+ },
+ {
+ USB_VENDOR_ACCTON, USB_PRODUCT_ACCTON_RT3070_5,
+ 0,
+ "Accton Technology",
+ "RT3070",
+ },
+ {
+ USB_VENDOR_ACCTON, USB_PRODUCT_ACCTON_ZD1211B,
+ 0,
+ "Accton Technology",
+ "ZD1211B",
+ },
+ {
+ USB_VENDOR_ACEECA, USB_PRODUCT_ACEECA_MEZ1000,
+ 0,
+ "Aceeca",
+ "MEZ1000 RDA",
+ },
+ {
+ USB_VENDOR_ACERCM, USB_PRODUCT_ACERCM_EP1427X2,
+ 0,
+ "Acer Communications & Multimedia",
+ "EP-1427X-2 Ethernet Adapter",
+ },
+ {
+ USB_VENDOR_ACERLABS, USB_PRODUCT_ACERLABS_M5632,
+ 0,
+ "Acer Labs",
+ "USB 2.0 Data Link",
+ },
+ {
+ USB_VENDOR_ACERP, USB_PRODUCT_ACERP_ACERSCAN_C310U,
+ 0,
+ "Acer Peripherals",
+ "Acerscan C310U",
+ },
+ {
+ USB_VENDOR_ACERP, USB_PRODUCT_ACERP_ACERSCAN_320U,
+ 0,
+ "Acer Peripherals",
+ "Acerscan 320U",
+ },
+ {
+ USB_VENDOR_ACERP, USB_PRODUCT_ACERP_ACERSCAN_640U,
+ 0,
+ "Acer Peripherals",
+ "Acerscan 640U",
+ },
+ {
+ USB_VENDOR_ACERP, USB_PRODUCT_ACERP_ACERSCAN_620U,
+ 0,
+ "Acer Peripherals",
+ "Acerscan 620U",
+ },
+ {
+ USB_VENDOR_ACERP, USB_PRODUCT_ACERP_ACERSCAN_4300U,
+ 0,
+ "Acer Peripherals",
+ "Benq 3300U/4300U",
+ },
+ {
+ USB_VENDOR_ACERP, USB_PRODUCT_ACERP_ACERSCAN_640BT,
+ 0,
+ "Acer Peripherals",
+ "Acerscan 640BT",
+ },
+ {
+ USB_VENDOR_ACERP, USB_PRODUCT_ACERP_ACERSCAN_1240U,
+ 0,
+ "Acer Peripherals",
+ "Acerscan 1240U",
+ },
+ {
+ USB_VENDOR_ACERP, USB_PRODUCT_ACERP_S81,
+ 0,
+ "Acer Peripherals",
+ "BenQ S81 phone",
+ },
+ {
+ USB_VENDOR_ACERP, USB_PRODUCT_ACERP_H10,
+ 0,
+ "Acer Peripherals",
+ "AWL400 Wireless Adapter",
+ },
+ {
+ USB_VENDOR_ACERP, USB_PRODUCT_ACERP_ATAPI,
+ 0,
+ "Acer Peripherals",
+ "ATA/ATAPI Adapter",
+ },
+ {
+ USB_VENDOR_ACERP, USB_PRODUCT_ACERP_AWL300,
+ 0,
+ "Acer Peripherals",
+ "AWL300 Wireless Adapter",
+ },
+ {
+ USB_VENDOR_ACERP, USB_PRODUCT_ACERP_AWL400,
+ 0,
+ "Acer Peripherals",
+ "AWL400 Wireless Adapter",
+ },
+ {
+ USB_VENDOR_ACERW, USB_PRODUCT_ACERW_WARPLINK,
+ 0,
+ "Acer",
+ "Warplink",
+ },
+ {
+ USB_VENDOR_ACTIONS, USB_PRODUCT_ACTIONS_MP4,
+ 0,
+ "Actions",
+ "Actions MP4 Player",
+ },
+ {
+ USB_VENDOR_ACTIONTEC, USB_PRODUCT_ACTIONTEC_PRISM_25,
+ 0,
+ "Actiontec Electronics",
+ "Prism2.5 Wireless Adapter",
+ },
+ {
+ USB_VENDOR_ACTIONTEC, USB_PRODUCT_ACTIONTEC_PRISM_25A,
+ 0,
+ "Actiontec Electronics",
+ "Prism2.5 Wireless Adapter A",
+ },
+ {
+ USB_VENDOR_ACTIONTEC, USB_PRODUCT_ACTIONTEC_FREELAN,
+ 0,
+ "Actiontec Electronics",
+ "ROPEX FreeLan 802.11b",
+ },
+ {
+ USB_VENDOR_ACTIONTEC, USB_PRODUCT_ACTIONTEC_UAT1,
+ 0,
+ "Actiontec Electronics",
+ "UAT1 Wireless Ethernet Adapter",
+ },
+ {
+ USB_VENDOR_ACTISYS, USB_PRODUCT_ACTISYS_IR2000U,
+ 0,
+ "ACTiSYS",
+ "ACT-IR2000U FIR",
+ },
+ {
+ USB_VENDOR_ACTIVEWIRE, USB_PRODUCT_ACTIVEWIRE_IOBOARD,
+ 0,
+ "ActiveWire",
+ "I/O Board",
+ },
+ {
+ USB_VENDOR_ACTIVEWIRE, USB_PRODUCT_ACTIVEWIRE_IOBOARD_FW1,
+ 0,
+ "ActiveWire",
+ "I/O Board, rev. 1 firmware",
+ },
+ {
+ USB_VENDOR_ADAPTEC, USB_PRODUCT_ADAPTEC_AWN8020,
+ 0,
+ "Adaptec",
+ "AWN-8020 WLAN",
+ },
+ {
+ USB_VENDOR_ADDTRON, USB_PRODUCT_ADDTRON_AWU120,
+ 0,
+ "Addtron",
+ "AWU-120",
+ },
+ {
+ USB_VENDOR_ADLINK, USB_PRODUCT_ADLINK_ND6530,
+ 0,
+ "ADLINK Technoligy, Inc.",
+ "ND-6530 USB-Serial",
+ },
+ {
+ USB_VENDOR_ADMTEK, USB_PRODUCT_ADMTEK_PEGASUSII_4,
+ 0,
+ "ADMtek",
+ "AN986A Ethernet",
+ },
+ {
+ USB_VENDOR_ADMTEK, USB_PRODUCT_ADMTEK_PEGASUS,
+ 0,
+ "ADMtek",
+ "AN986 Ethernet",
+ },
+ {
+ USB_VENDOR_ADMTEK, USB_PRODUCT_ADMTEK_PEGASUSII,
+ 0,
+ "ADMtek",
+ "AN8511 Ethernet",
+ },
+ {
+ USB_VENDOR_ADMTEK, USB_PRODUCT_ADMTEK_PEGASUSII_2,
+ 0,
+ "ADMtek",
+ "AN8513 Ethernet",
+ },
+ {
+ USB_VENDOR_ADMTEK, USB_PRODUCT_ADMTEK_PEGASUSII_3,
+ 0,
+ "ADMtek",
+ "AN8515 Ethernet",
+ },
+ {
+ USB_VENDOR_ADDON, USB_PRODUCT_ADDON_ATTACHE,
+ 0,
+ "Add-on Technology",
+ "USB 2.0 Flash Drive",
+ },
+ {
+ USB_VENDOR_ADDON, USB_PRODUCT_ADDON_ATTACHE,
+ 0,
+ "Add-on Technology",
+ "USB 2.0 Flash Drive",
+ },
+ {
+ USB_VENDOR_ADDON, USB_PRODUCT_ADDON_A256MB,
+ 0,
+ "Add-on Technology",
+ "Attache 256MB USB 2.0 Flash Drive",
+ },
+ {
+ USB_VENDOR_ADDON, USB_PRODUCT_ADDON_DISKPRO512,
+ 0,
+ "Add-on Technology",
+ "USB 2.0 Flash Drive (DANE-ELEC zMate 512MB USB flash drive)",
+ },
+ {
+ USB_VENDOR_ADDONICS2, USB_PRODUCT_ADDONICS2_CABLE_205,
+ 0,
+ "Addonics Technology",
+ "Cable 205",
+ },
+ {
+ USB_VENDOR_ADS, USB_PRODUCT_ADS_UBS10BT,
+ 0,
+ "ADS Technologies",
+ "UBS-10BT Ethernet",
+ },
+ {
+ USB_VENDOR_ADS, USB_PRODUCT_ADS_UBS10BTX,
+ 0,
+ "ADS Technologies",
+ "UBS-10BT Ethernet",
+ },
+ {
+ USB_VENDOR_AEI, USB_PRODUCT_AEI_FASTETHERNET,
+ 0,
+ "AEI",
+ "Fast Ethernet",
+ },
+ {
+ USB_VENDOR_AGATE, USB_PRODUCT_AGATE_QDRIVE,
+ 0,
+ "Agate Technologies",
+ "Q-Drive",
+ },
+ {
+ USB_VENDOR_AGFA, USB_PRODUCT_AGFA_SNAPSCAN1212U,
+ 0,
+ "AGFA-Gevaert",
+ "SnapScan 1212U",
+ },
+ {
+ USB_VENDOR_AGFA, USB_PRODUCT_AGFA_SNAPSCAN1236U,
+ 0,
+ "AGFA-Gevaert",
+ "SnapScan 1236U",
+ },
+ {
+ USB_VENDOR_AGFA, USB_PRODUCT_AGFA_SNAPSCANTOUCH,
+ 0,
+ "AGFA-Gevaert",
+ "SnapScan Touch",
+ },
+ {
+ USB_VENDOR_AGFA, USB_PRODUCT_AGFA_SNAPSCAN1212U2,
+ 0,
+ "AGFA-Gevaert",
+ "SnapScan 1212U",
+ },
+ {
+ USB_VENDOR_AGFA, USB_PRODUCT_AGFA_SNAPSCANE40,
+ 0,
+ "AGFA-Gevaert",
+ "SnapScan e40",
+ },
+ {
+ USB_VENDOR_AGFA, USB_PRODUCT_AGFA_SNAPSCANE50,
+ 0,
+ "AGFA-Gevaert",
+ "SnapScan e50",
+ },
+ {
+ USB_VENDOR_AGFA, USB_PRODUCT_AGFA_SNAPSCANE20,
+ 0,
+ "AGFA-Gevaert",
+ "SnapScan e20",
+ },
+ {
+ USB_VENDOR_AGFA, USB_PRODUCT_AGFA_SNAPSCANE25,
+ 0,
+ "AGFA-Gevaert",
+ "SnapScan e25",
+ },
+ {
+ USB_VENDOR_AGFA, USB_PRODUCT_AGFA_SNAPSCANE26,
+ 0,
+ "AGFA-Gevaert",
+ "SnapScan e26",
+ },
+ {
+ USB_VENDOR_AGFA, USB_PRODUCT_AGFA_SNAPSCANE52,
+ 0,
+ "AGFA-Gevaert",
+ "SnapScan e52",
+ },
+ {
+ USB_VENDOR_AINCOMM, USB_PRODUCT_AINCOMM_AWU2000B,
+ 0,
+ "Aincomm",
+ "AWU2000B Wireless Adapter",
+ },
+ {
+ USB_VENDOR_AIPTEK, USB_PRODUCT_AIPTEK_POCKETCAM3M,
+ 0,
+ "AIPTEK International",
+ "PocketCAM 3Mega",
+ },
+ {
+ USB_VENDOR_AIPTEK2, USB_PRODUCT_AIPTEK2_PENCAM_MEGA_1_3,
+ 0,
+ "AIPTEK International",
+ "PenCam Mega 1.3",
+ },
+ {
+ USB_VENDOR_AIPTEK2, USB_PRODUCT_AIPTEK2_SUNPLUS_TECH,
+ 0,
+ "AIPTEK International",
+ "Sunplus Technology Inc.",
+ },
+ {
+ USB_VENDOR_AIRPLUS, USB_PRODUCT_AIRPLUS_MCD650,
+ 0,
+ "Airplus",
+ "MCD650 modem",
+ },
+ {
+ USB_VENDOR_AIRPRIME, USB_PRODUCT_AIRPRIME_PC5220,
+ 0,
+ "AirPrime, Inc.",
+ "CDMA Wireless PC Card",
+ },
+ {
+ USB_VENDOR_AIRTIES, USB_PRODUCT_AIRTIES_RT3070,
+ 0,
+ "AirTies",
+ "RT3070",
+ },
+ {
+ USB_VENDOR_AKS, USB_PRODUCT_AKS_USBHASP,
+ 0,
+ "Aladdin Knowledge Systems",
+ "USB-HASP 0.06",
+ },
+ {
+ USB_VENDOR_ALCATEL, USB_PRODUCT_ALCATEL_OT535,
+ 0,
+ "Alcatel",
+ "One Touch 535/735",
+ },
+ {
+ USB_VENDOR_ALCOR2, USB_PRODUCT_ALCOR2_KBD_HUB,
+ 0,
+ "Alcor Micro",
+ "Kbd Hub",
+ },
+ {
+ USB_VENDOR_ALCOR, USB_PRODUCT_ALCOR_SDCR_6335,
+ 0,
+ "Alcor Micro",
+ "SD/MMC Card Reader",
+ },
+ {
+ USB_VENDOR_ALCOR, USB_PRODUCT_ALCOR_SDCR_6362,
+ 0,
+ "Alcor Micro",
+ "SD/MMC Card Reader",
+ },
+ {
+ USB_VENDOR_ALCOR, USB_PRODUCT_ALCOR_TRANSCEND,
+ 0,
+ "Alcor Micro",
+ "Transcend JetFlash Drive",
+ },
+ {
+ USB_VENDOR_ALCOR, USB_PRODUCT_ALCOR_MA_KBD_HUB,
+ 0,
+ "Alcor Micro",
+ "MacAlly Kbd Hub",
+ },
+ {
+ USB_VENDOR_ALCOR, USB_PRODUCT_ALCOR_AU9814,
+ 0,
+ "Alcor Micro",
+ "AU9814 Hub",
+ },
+ {
+ USB_VENDOR_ALCOR, USB_PRODUCT_ALCOR_UMCR_9361,
+ 0,
+ "Alcor Micro",
+ "USB Multimedia Card Reader",
+ },
+ {
+ USB_VENDOR_ALCOR, USB_PRODUCT_ALCOR_SM_KBD,
+ 0,
+ "Alcor Micro",
+ "MicroConnectors/StrongMan Keyboard",
+ },
+ {
+ USB_VENDOR_ALCOR, USB_PRODUCT_ALCOR_NEC_KBD_HUB,
+ 0,
+ "Alcor Micro",
+ "NEC Kbd Hub",
+ },
+ {
+ USB_VENDOR_ALCOR, USB_PRODUCT_ALCOR_AU9720,
+ 0,
+ "Alcor Micro",
+ "USB2 - RS-232",
+ },
+ {
+ USB_VENDOR_ALCOR, USB_PRODUCT_ALCOR_AU6390,
+ 0,
+ "Alcor Micro",
+ "AU6390 USB-IDE converter",
+ },
+ {
+ USB_VENDOR_ALINK, USB_PRODUCT_ALINK_DWM652U5,
+ 0,
+ "Alink",
+ "DWM-652",
+ },
+ {
+ USB_VENDOR_ALINK, USB_PRODUCT_ALINK_3G,
+ 0,
+ "Alink",
+ "3G modem",
+ },
+ {
+ USB_VENDOR_ALINK, USB_PRODUCT_ALINK_3GU,
+ 0,
+ "Alink",
+ "3G modem",
+ },
+ {
+ USB_VENDOR_ALTEC, USB_PRODUCT_ALTEC_ADA70,
+ 0,
+ "Altec Lansing",
+ "ADA70 Speakers",
+ },
+ {
+ USB_VENDOR_ALTEC, USB_PRODUCT_ALTEC_ASC495,
+ 0,
+ "Altec Lansing",
+ "ASC495 Speakers",
+ },
+ {
+ USB_VENDOR_ALLIEDTELESYN, USB_PRODUCT_ALLIEDTELESYN_ATUSB100,
+ 0,
+ "Allied Telesyn International",
+ "AT-USB100",
+ },
+ {
+ USB_VENDOR_ALLWIN, USB_PRODUCT_ALLWIN_RT2070,
+ 0,
+ "ALLWIN Tech",
+ "RT2070",
+ },
+ {
+ USB_VENDOR_ALLWIN, USB_PRODUCT_ALLWIN_RT2770,
+ 0,
+ "ALLWIN Tech",
+ "RT2770",
+ },
+ {
+ USB_VENDOR_ALLWIN, USB_PRODUCT_ALLWIN_RT2870,
+ 0,
+ "ALLWIN Tech",
+ "RT2870",
+ },
+ {
+ USB_VENDOR_ALLWIN, USB_PRODUCT_ALLWIN_RT3070,
+ 0,
+ "ALLWIN Tech",
+ "RT3070",
+ },
+ {
+ USB_VENDOR_ALLWIN, USB_PRODUCT_ALLWIN_RT3071,
+ 0,
+ "ALLWIN Tech",
+ "RT3071",
+ },
+ {
+ USB_VENDOR_ALLWIN, USB_PRODUCT_ALLWIN_RT3072,
+ 0,
+ "ALLWIN Tech",
+ "RT3072",
+ },
+ {
+ USB_VENDOR_ALLWIN, USB_PRODUCT_ALLWIN_RT3572,
+ 0,
+ "ALLWIN Tech",
+ "RT3572",
+ },
+ {
+ USB_VENDOR_ALPHASMART, USB_PRODUCT_ALPHASMART_DANA_KB,
+ 0,
+ "AlphaSmart, Inc.",
+ "AlphaSmart Dana Keyboard",
+ },
+ {
+ USB_VENDOR_ALPHASMART, USB_PRODUCT_ALPHASMART_DANA_SYNC,
+ 0,
+ "AlphaSmart, Inc.",
+ "AlphaSmart Dana HotSync",
+ },
+ {
+ USB_VENDOR_AMOI, USB_PRODUCT_AMOI_H01,
+ 0,
+ "Amoi Electronics",
+ "H01 3G modem",
+ },
+ {
+ USB_VENDOR_AMOI, USB_PRODUCT_AMOI_H01A,
+ 0,
+ "Amoi Electronics",
+ "H01A 3G modem",
+ },
+ {
+ USB_VENDOR_AMOI, USB_PRODUCT_AMOI_H02,
+ 0,
+ "Amoi Electronics",
+ "H02 3G modem",
+ },
+ {
+ USB_VENDOR_APC, USB_PRODUCT_APC_UPS,
+ 0,
+ "American Power Conversion",
+ "Uninterruptible Power Supply",
+ },
+ {
+ USB_VENDOR_AMBIT, USB_PRODUCT_AMBIT_WLAN,
+ 0,
+ "Ambit Microsystems",
+ "WLAN",
+ },
+ {
+ USB_VENDOR_AMBIT, USB_PRODUCT_AMBIT_NTL_250,
+ 0,
+ "Ambit Microsystems",
+ "NTL 250 cable modem",
+ },
+ {
+ USB_VENDOR_APACER, USB_PRODUCT_APACER_HT202,
+ 0,
+ "Apacer",
+ "USB 2.0 Flash Drive",
+ },
+ {
+ USB_VENDOR_APC, USB_PRODUCT_APC_UPS,
+ 0,
+ "American Power Conversion",
+ "Uninterruptible Power Supply",
+ },
+ {
+ USB_VENDOR_AMIGO, USB_PRODUCT_AMIGO_RT2870_1,
+ 0,
+ "Amigo Technology",
+ "RT2870",
+ },
+ {
+ USB_VENDOR_AMIGO, USB_PRODUCT_AMIGO_RT2870_2,
+ 0,
+ "Amigo Technology",
+ "RT2870",
+ },
+ {
+ USB_VENDOR_AMIT, USB_PRODUCT_AMIT_CGWLUSB2GO,
+ 0,
+ "AMIT",
+ "CG-WLUSB2GO",
+ },
+ {
+ USB_VENDOR_AMIT, USB_PRODUCT_AMIT_CGWLUSB2GNR,
+ 0,
+ "AMIT",
+ "CG-WLUSB2GNR",
+ },
+ {
+ USB_VENDOR_AMIT, USB_PRODUCT_AMIT_RT2870_1,
+ 0,
+ "AMIT",
+ "RT2870",
+ },
+ {
+ USB_VENDOR_AMIT2, USB_PRODUCT_AMIT2_RT2870,
+ 0,
+ "AMIT",
+ "RT2870",
+ },
+ {
+ USB_VENDOR_ANCHOR, USB_PRODUCT_ANCHOR_SERIAL,
+ 0,
+ "Anchor Chips",
+ "Serial",
+ },
+ {
+ USB_VENDOR_ANCHOR, USB_PRODUCT_ANCHOR_EZUSB,
+ 0,
+ "Anchor Chips",
+ "EZUSB",
+ },
+ {
+ USB_VENDOR_ANCHOR, USB_PRODUCT_ANCHOR_EZLINK,
+ 0,
+ "Anchor Chips",
+ "EZLINK",
+ },
+ {
+ USB_VENDOR_ANYDATA, USB_PRODUCT_ANYDATA_ADU_620UW,
+ 0,
+ "AnyDATA Corporation",
+ "CDMA 2000 EV-DO USB Modem",
+ },
+ {
+ USB_VENDOR_ANYDATA, USB_PRODUCT_ANYDATA_ADU_E100X,
+ 0,
+ "AnyDATA Corporation",
+ "CDMA 2000 1xRTT/EV-DO USB Modem",
+ },
+ {
+ USB_VENDOR_ANYDATA, USB_PRODUCT_ANYDATA_ADU_500A,
+ 0,
+ "AnyDATA Corporation",
+ "CDMA 2000 EV-DO USB Modem",
+ },
+ {
+ USB_VENDOR_AOX, USB_PRODUCT_AOX_USB101,
+ 0,
+ "AOX",
+ "Ethernet",
+ },
+ {
+ USB_VENDOR_APC, USB_PRODUCT_APC_UPS,
+ 0,
+ "American Power Conversion",
+ "Uninterruptible Power Supply",
+ },
+ {
+ USB_VENDOR_APPLE, USB_PRODUCT_APPLE_IMAC_KBD,
+ 0,
+ "Apple Computer",
+ "USB iMac Keyboard",
+ },
+ {
+ USB_VENDOR_APPLE, USB_PRODUCT_APPLE_KBD,
+ 0,
+ "Apple Computer",
+ "USB Keyboard M2452",
+ },
+ {
+ USB_VENDOR_APPLE, USB_PRODUCT_APPLE_EXT_KBD,
+ 0,
+ "Apple Computer",
+ "Apple Extended USB Keyboard",
+ },
+ {
+ USB_VENDOR_APPLE, USB_PRODUCT_APPLE_KBD_TP_ANSI,
+ 0,
+ "Apple Computer",
+ "Apple Internal Keyboard/Trackpad (Wellspring/ANSI)",
+ },
+ {
+ USB_VENDOR_APPLE, USB_PRODUCT_APPLE_KBD_TP_ISO,
+ 0,
+ "Apple Computer",
+ "Apple Internal Keyboard/Trackpad (Wellspring/ISO)",
+ },
+ {
+ USB_VENDOR_APPLE, USB_PRODUCT_APPLE_KBD_TP_JIS,
+ 0,
+ "Apple Computer",
+ "Apple Internal Keyboard/Trackpad (Wellspring/JIS)",
+ },
+ {
+ USB_VENDOR_APPLE, USB_PRODUCT_APPLE_KBD_TP_ANSI2,
+ 0,
+ "Apple Computer",
+ "Apple Internal Keyboard/Trackpad (Wellspring2/ANSI)",
+ },
+ {
+ USB_VENDOR_APPLE, USB_PRODUCT_APPLE_KBD_TP_ISO2,
+ 0,
+ "Apple Computer",
+ "Apple Internal Keyboard/Trackpad (Wellspring2/ISO)",
+ },
+ {
+ USB_VENDOR_APPLE, USB_PRODUCT_APPLE_KBD_TP_JIS2,
+ 0,
+ "Apple Computer",
+ "Apple Internal Keyboard/Trackpad (Wellspring2/JIS)",
+ },
+ {
+ USB_VENDOR_APPLE, USB_PRODUCT_APPLE_MOUSE,
+ 0,
+ "Apple Computer",
+ "Mouse M4848",
+ },
+ {
+ USB_VENDOR_APPLE, USB_PRODUCT_APPLE_OPTMOUSE,
+ 0,
+ "Apple Computer",
+ "Optical mouse",
+ },
+ {
+ USB_VENDOR_APPLE, USB_PRODUCT_APPLE_MIGHTYMOUSE,
+ 0,
+ "Apple Computer",
+ "Mighty Mouse",
+ },
+ {
+ USB_VENDOR_APPLE, USB_PRODUCT_APPLE_KBD_HUB,
+ 0,
+ "Apple Computer",
+ "Hub in Apple USB Keyboard",
+ },
+ {
+ USB_VENDOR_APPLE, USB_PRODUCT_APPLE_EXT_KBD_HUB,
+ 0,
+ "Apple Computer",
+ "Hub in Apple Extended USB Keyboard",
+ },
+ {
+ USB_VENDOR_APPLE, USB_PRODUCT_APPLE_SPEAKERS,
+ 0,
+ "Apple Computer",
+ "Speakers",
+ },
+ {
+ USB_VENDOR_APPLE, USB_PRODUCT_APPLE_IPOD,
+ 0,
+ "Apple Computer",
+ "iPod",
+ },
+ {
+ USB_VENDOR_APPLE, USB_PRODUCT_APPLE_IPOD2G,
+ 0,
+ "Apple Computer",
+ "iPod 2G",
+ },
+ {
+ USB_VENDOR_APPLE, USB_PRODUCT_APPLE_IPOD3G,
+ 0,
+ "Apple Computer",
+ "iPod 3G",
+ },
+ {
+ USB_VENDOR_APPLE, USB_PRODUCT_APPLE_IPOD_04,
+ 0,
+ "Apple Computer",
+ "iPod '04'",
+ },
+ {
+ USB_VENDOR_APPLE, USB_PRODUCT_APPLE_IPODMINI,
+ 0,
+ "Apple Computer",
+ "iPod Mini",
+ },
+ {
+ USB_VENDOR_APPLE, USB_PRODUCT_APPLE_IPOD_06,
+ 0,
+ "Apple Computer",
+ "iPod '06'",
+ },
+ {
+ USB_VENDOR_APPLE, USB_PRODUCT_APPLE_IPOD_07,
+ 0,
+ "Apple Computer",
+ "iPod '07'",
+ },
+ {
+ USB_VENDOR_APPLE, USB_PRODUCT_APPLE_IPOD_08,
+ 0,
+ "Apple Computer",
+ "iPod '08'",
+ },
+ {
+ USB_VENDOR_APPLE, USB_PRODUCT_APPLE_IPODVIDEO,
+ 0,
+ "Apple Computer",
+ "iPod Video",
+ },
+ {
+ USB_VENDOR_APPLE, USB_PRODUCT_APPLE_IPODNANO,
+ 0,
+ "Apple Computer",
+ "iPod Nano",
+ },
+ {
+ USB_VENDOR_APPLE, USB_PRODUCT_APPLE_IPHONE,
+ 0,
+ "Apple Computer",
+ "iPhone",
+ },
+ {
+ USB_VENDOR_APPLE, USB_PRODUCT_APPLE_IPOD_TOUCH,
+ 0,
+ "Apple Computer",
+ "iPod Touch",
+ },
+ {
+ USB_VENDOR_APPLE, USB_PRODUCT_APPLE_IPHONE_3G,
+ 0,
+ "Apple Computer",
+ "iPhone 3G",
+ },
+ {
+ USB_VENDOR_APPLE, USB_PRODUCT_APPLE_IPHONE_3GS,
+ 0,
+ "Apple Computer",
+ "iPhone 3GS",
+ },
+ {
+ USB_VENDOR_APPLE, USB_PRODUCT_APPLE_IPHONE_4,
+ 0,
+ "Apple Computer",
+ "iPhone 4",
+ },
+ {
+ USB_VENDOR_APPLE, USB_PRODUCT_APPLE_IPAD,
+ 0,
+ "Apple Computer",
+ "iPad",
+ },
+ {
+ USB_VENDOR_APPLE, USB_PRODUCT_APPLE_ETHERNET,
+ 0,
+ "Apple Computer",
+ "Ethernet A1277",
+ },
+ {
+ USB_VENDOR_ARKMICRO, USB_PRODUCT_ARKMICRO_ARK3116,
+ 0,
+ "Arkmicro Technologies Inc.",
+ "ARK3116 Serial",
+ },
+ {
+ USB_VENDOR_ASAHIOPTICAL, USB_PRODUCT_ASAHIOPTICAL_OPTIO230,
+ 0,
+ "Asahi Optical",
+ "Digital camera",
+ },
+ {
+ USB_VENDOR_ASAHIOPTICAL, USB_PRODUCT_ASAHIOPTICAL_OPTIO330,
+ 0,
+ "Asahi Optical",
+ "Digital camera",
+ },
+ {
+ USB_VENDOR_ASANTE, USB_PRODUCT_ASANTE_EA,
+ 0,
+ "Asante",
+ "Ethernet",
+ },
+ {
+ USB_VENDOR_ASIX, USB_PRODUCT_ASIX_AX88172,
+ 0,
+ "ASIX Electronics",
+ "10/100 Ethernet",
+ },
+ {
+ USB_VENDOR_ASIX, USB_PRODUCT_ASIX_AX88178,
+ 0,
+ "ASIX Electronics",
+ "AX88178",
+ },
+ {
+ USB_VENDOR_ASIX, USB_PRODUCT_ASIX_AX88772,
+ 0,
+ "ASIX Electronics",
+ "AX88772",
+ },
+ {
+ USB_VENDOR_ASIX, USB_PRODUCT_ASIX_AX88772A,
+ 0,
+ "ASIX Electronics",
+ "AX88772A USB 2.0 10/100 Ethernet",
+ },
+ {
+ USB_VENDOR_ASUS2, USB_PRODUCT_ASUS2_USBN11,
+ 0,
+ "ASUS",
+ "USB-N11",
+ },
+ {
+ USB_VENDOR_ASUS, USB_PRODUCT_ASUS_WL167G,
+ 0,
+ "ASUSTeK Computer",
+ "WL-167g Wireless Adapter",
+ },
+ {
+ USB_VENDOR_ASUS, USB_PRODUCT_ASUS_WL159G,
+ 0,
+ "ASUSTeK Computer",
+ "WL-159g",
+ },
+ {
+ USB_VENDOR_ASUS, USB_PRODUCT_ASUS_A9T_WIFI,
+ 0,
+ "ASUSTeK Computer",
+ "A9T wireless",
+ },
+ {
+ USB_VENDOR_ASUS, USB_PRODUCT_ASUS_P5B_WIFI,
+ 0,
+ "ASUSTeK Computer",
+ "P5B wireless",
+ },
+ {
+ USB_VENDOR_ASUS, USB_PRODUCT_ASUS_RT2573_1,
+ 0,
+ "ASUSTeK Computer",
+ "RT2573",
+ },
+ {
+ USB_VENDOR_ASUS, USB_PRODUCT_ASUS_RT2573_2,
+ 0,
+ "ASUSTeK Computer",
+ "RT2573",
+ },
+ {
+ USB_VENDOR_ASUS, USB_PRODUCT_ASUS_LCM,
+ 0,
+ "ASUSTeK Computer",
+ "LCM display",
+ },
+ {
+ USB_VENDOR_ASUS, USB_PRODUCT_ASUS_RT2870_1,
+ 0,
+ "ASUSTeK Computer",
+ "RT2870",
+ },
+ {
+ USB_VENDOR_ASUS, USB_PRODUCT_ASUS_RT2870_2,
+ 0,
+ "ASUSTeK Computer",
+ "RT2870",
+ },
+ {
+ USB_VENDOR_ASUS, USB_PRODUCT_ASUS_RT2870_3,
+ 0,
+ "ASUSTeK Computer",
+ "RT2870",
+ },
+ {
+ USB_VENDOR_ASUS, USB_PRODUCT_ASUS_RT2870_4,
+ 0,
+ "ASUSTeK Computer",
+ "RT2870",
+ },
+ {
+ USB_VENDOR_ASUS, USB_PRODUCT_ASUS_RT2870_5,
+ 0,
+ "ASUSTeK Computer",
+ "RT2870",
+ },
+ {
+ USB_VENDOR_ASUS, USB_PRODUCT_ASUS_USBN13,
+ 0,
+ "ASUSTeK Computer",
+ "USB-N13",
+ },
+ {
+ USB_VENDOR_ASUS, USB_PRODUCT_ASUS_RT3070_1,
+ 0,
+ "ASUSTeK Computer",
+ "RT3070",
+ },
+ {
+ USB_VENDOR_ASUS, USB_PRODUCT_ASUS_A730W,
+ 0,
+ "ASUSTeK Computer",
+ "ASUS MyPal A730W",
+ },
+ {
+ USB_VENDOR_ASUS, USB_PRODUCT_ASUS_P535,
+ 0,
+ "ASUSTeK Computer",
+ "ASUS P535 PDA",
+ },
+ {
+ USB_VENDOR_ASUS, USB_PRODUCT_ASUS_GMSC,
+ 0,
+ "ASUSTeK Computer",
+ "ASUS Generic Mass Storage",
+ },
+ {
+ USB_VENDOR_ASUS, USB_PRODUCT_ASUS_RT2570,
+ 0,
+ "ASUSTeK Computer",
+ "RT2500USB Wireless Adapter",
+ },
+ {
+ USB_VENDOR_ATEN, USB_PRODUCT_ATEN_UC1284,
+ 0,
+ "ATEN International",
+ "Parallel printer",
+ },
+ {
+ USB_VENDOR_ATEN, USB_PRODUCT_ATEN_UC10T,
+ 0,
+ "ATEN International",
+ "10Mbps Ethernet",
+ },
+ {
+ USB_VENDOR_ATEN, USB_PRODUCT_ATEN_UC110T,
+ 0,
+ "ATEN International",
+ "UC-110T Ethernet",
+ },
+ {
+ USB_VENDOR_ATEN, USB_PRODUCT_ATEN_UC232A,
+ 0,
+ "ATEN International",
+ "Serial",
+ },
+ {
+ USB_VENDOR_ATEN, USB_PRODUCT_ATEN_UC210T,
+ 0,
+ "ATEN International",
+ "UC-210T Ethernet",
+ },
+ {
+ USB_VENDOR_ATEN, USB_PRODUCT_ATEN_DSB650C,
+ 0,
+ "ATEN International",
+ "DSB-650C",
+ },
+ {
+ USB_VENDOR_ATHEROS, USB_PRODUCT_ATHEROS_AR5523,
+ 0,
+ "Atheros Communications",
+ "AR5523",
+ },
+ {
+ USB_VENDOR_ATHEROS, USB_PRODUCT_ATHEROS_AR5523_NF,
+ 0,
+ "Atheros Communications",
+ "AR5523 (no firmware)",
+ },
+ {
+ USB_VENDOR_ATHEROS2, USB_PRODUCT_ATHEROS2_AR5523_1,
+ 0,
+ "Atheros Communications",
+ "AR5523",
+ },
+ {
+ USB_VENDOR_ATHEROS2, USB_PRODUCT_ATHEROS2_AR5523_1_NF,
+ 0,
+ "Atheros Communications",
+ "AR5523 (no firmware)",
+ },
+ {
+ USB_VENDOR_ATHEROS2, USB_PRODUCT_ATHEROS2_AR5523_2,
+ 0,
+ "Atheros Communications",
+ "AR5523",
+ },
+ {
+ USB_VENDOR_ATHEROS2, USB_PRODUCT_ATHEROS2_AR5523_2_NF,
+ 0,
+ "Atheros Communications",
+ "AR5523 (no firmware)",
+ },
+ {
+ USB_VENDOR_ATHEROS2, USB_PRODUCT_ATHEROS2_AR5523_3,
+ 0,
+ "Atheros Communications",
+ "AR5523",
+ },
+ {
+ USB_VENDOR_ATHEROS2, USB_PRODUCT_ATHEROS2_AR5523_3_NF,
+ 0,
+ "Atheros Communications",
+ "AR5523 (no firmware)",
+ },
+ {
+ USB_VENDOR_ATMEL, USB_PRODUCT_ATMEL_STK541,
+ 0,
+ "Atmel",
+ "Zigbee Controller",
+ },
+ {
+ USB_VENDOR_ATMEL, USB_PRODUCT_ATMEL_UHB124,
+ 0,
+ "Atmel",
+ "UHB124 hub",
+ },
+ {
+ USB_VENDOR_ATMEL, USB_PRODUCT_ATMEL_DWL120,
+ 0,
+ "Atmel",
+ "DWL-120 Wireless Adapter",
+ },
+ {
+ USB_VENDOR_ATMEL, USB_PRODUCT_ATMEL_BW002,
+ 0,
+ "Atmel",
+ "BW002 Wireless Adapter",
+ },
+ {
+ USB_VENDOR_ATMEL, USB_PRODUCT_ATMEL_WL1130USB,
+ 0,
+ "Atmel",
+ "WL-1130 USB",
+ },
+ {
+ USB_VENDOR_ATMEL, USB_PRODUCT_ATMEL_AT76C505A,
+ 0,
+ "Atmel",
+ "AT76c505a Wireless Adapter",
+ },
+ {
+ USB_VENDOR_AUTHENTEC, USB_PRODUCT_AUTHENTEC_AES1610,
+ 0,
+ "AuthenTec",
+ "AES1610 Fingerprint Sensor",
+ },
+ {
+ USB_VENDOR_AVISION, USB_PRODUCT_AVISION_1200U,
+ 0,
+ "Avision",
+ "1200U scanner",
+ },
+ {
+ USB_VENDOR_AXESSTEL, USB_PRODUCT_AXESSTEL_DATAMODEM,
+ 0,
+ "Axesstel Co., Ltd.",
+ "Data Modem",
+ },
+ {
+ USB_VENDOR_AZUREWAVE, USB_PRODUCT_AZUREWAVE_RT2870_1,
+ 0,
+ "AsureWave",
+ "RT2870",
+ },
+ {
+ USB_VENDOR_AZUREWAVE, USB_PRODUCT_AZUREWAVE_RT2870_2,
+ 0,
+ "AsureWave",
+ "RT2870",
+ },
+ {
+ USB_VENDOR_AZUREWAVE, USB_PRODUCT_AZUREWAVE_RT3070_1,
+ 0,
+ "AsureWave",
+ "RT3070",
+ },
+ {
+ USB_VENDOR_AZUREWAVE, USB_PRODUCT_AZUREWAVE_RT3070_2,
+ 0,
+ "AsureWave",
+ "RT3070",
+ },
+ {
+ USB_VENDOR_AZUREWAVE, USB_PRODUCT_AZUREWAVE_RT3070_3,
+ 0,
+ "AsureWave",
+ "RT3070",
+ },
+ {
+ USB_VENDOR_BALTECH, USB_PRODUCT_BALTECH_CARDREADER,
+ 0,
+ "Baltech",
+ "Card reader",
+ },
+ {
+ USB_VENDOR_BBELECTRONICS, USB_PRODUCT_BBELECTRONICS_USOTL4,
+ 0,
+ "B&B Electronics",
+ "RS-422/485",
+ },
+ {
+ USB_VENDOR_BELKIN, USB_PRODUCT_BELKIN_F5D6050,
+ 0,
+ "Belkin Components",
+ "F5D6050 802.11b Wireless Adapter",
+ },
+ {
+ USB_VENDOR_BELKIN, USB_PRODUCT_BELKIN_FBT001V,
+ 0,
+ "Belkin Components",
+ "FBT001v2 Bluetooth",
+ },
+ {
+ USB_VENDOR_BELKIN, USB_PRODUCT_BELKIN_FBT003V,
+ 0,
+ "Belkin Components",
+ "FBT003v2 Bluetooth",
+ },
+ {
+ USB_VENDOR_BELKIN, USB_PRODUCT_BELKIN_F5U103,
+ 0,
+ "Belkin Components",
+ "F5U103 Serial",
+ },
+ {
+ USB_VENDOR_BELKIN, USB_PRODUCT_BELKIN_F5U109,
+ 0,
+ "Belkin Components",
+ "F5U109 Serial",
+ },
+ {
+ USB_VENDOR_BELKIN, USB_PRODUCT_BELKIN_USB2SCSI,
+ 0,
+ "Belkin Components",
+ "USB to SCSI",
+ },
+ {
+ USB_VENDOR_BELKIN, USB_PRODUCT_BELKIN_F8T012,
+ 0,
+ "Belkin Components",
+ "F8T012xx1 Bluetooth USB Adapter",
+ },
+ {
+ USB_VENDOR_BELKIN, USB_PRODUCT_BELKIN_USB2LAN,
+ 0,
+ "Belkin Components",
+ "USB to LAN",
+ },
+ {
+ USB_VENDOR_BELKIN, USB_PRODUCT_BELKIN_F5U208,
+ 0,
+ "Belkin Components",
+ "F5U208 VideoBus II",
+ },
+ {
+ USB_VENDOR_BELKIN, USB_PRODUCT_BELKIN_F5U237,
+ 0,
+ "Belkin Components",
+ "F5U237 USB 2.0 7-Port Hub",
+ },
+ {
+ USB_VENDOR_BELKIN, USB_PRODUCT_BELKIN_F5U257,
+ 0,
+ "Belkin Components",
+ "F5U257 Serial",
+ },
+ {
+ USB_VENDOR_BELKIN, USB_PRODUCT_BELKIN_F5U409,
+ 0,
+ "Belkin Components",
+ "F5U409 Serial",
+ },
+ {
+ USB_VENDOR_BELKIN, USB_PRODUCT_BELKIN_F6C550AVR,
+ 0,
+ "Belkin Components",
+ "F6C550-AVR UPS",
+ },
+ {
+ USB_VENDOR_BELKIN, USB_PRODUCT_BELKIN_F5U120,
+ 0,
+ "Belkin Components",
+ "F5U120-PC Hub",
+ },
+ {
+ USB_VENDOR_BELKIN, USB_PRODUCT_BELKIN_ZD1211B,
+ 0,
+ "Belkin Components",
+ "ZD1211B",
+ },
+ {
+ USB_VENDOR_BELKIN, USB_PRODUCT_BELKIN_F5D5055,
+ 0,
+ "Belkin Components",
+ "F5D5055",
+ },
+ {
+ USB_VENDOR_BELKIN, USB_PRODUCT_BELKIN_F5D7050,
+ 0,
+ "Belkin Components",
+ "F5D7050 Wireless Adapter",
+ },
+ {
+ USB_VENDOR_BELKIN, USB_PRODUCT_BELKIN_F5D7051,
+ 0,
+ "Belkin Components",
+ "F5D7051 54g USB Network Adapter",
+ },
+ {
+ USB_VENDOR_BELKIN, USB_PRODUCT_BELKIN_F5D7050A,
+ 0,
+ "Belkin Components",
+ "F5D7050A Wireless Adapter",
+ },
+ {
+ USB_VENDOR_BELKIN, USB_PRODUCT_BELKIN_F5D7050_V4000,
+ 0,
+ "Belkin Components",
+ "F5D7050 v4000 Wireless Adapter",
+ },
+ {
+ USB_VENDOR_BELKIN, USB_PRODUCT_BELKIN_F5D7050E,
+ 0,
+ "Belkin Components",
+ "F5D7050E Wireless Adapter",
+ },
+ {
+ USB_VENDOR_BELKIN, USB_PRODUCT_BELKIN_RT2870_1,
+ 0,
+ "Belkin Components",
+ "RT2870",
+ },
+ {
+ USB_VENDOR_BELKIN, USB_PRODUCT_BELKIN_RT2870_2,
+ 0,
+ "Belkin Components",
+ "RT2870",
+ },
+ {
+ USB_VENDOR_BELKIN, USB_PRODUCT_BELKIN_F5D8053V3,
+ 0,
+ "Belkin Components",
+ "F5D8053 v3",
+ },
+ {
+ USB_VENDOR_BELKIN, USB_PRODUCT_BELKIN_F5D8055,
+ 0,
+ "Belkin Components",
+ "F5D8055",
+ },
+ {
+ USB_VENDOR_BELKIN, USB_PRODUCT_BELKIN_F5D9050V3,
+ 0,
+ "Belkin Components",
+ "F5D9050 ver 3 Wireless Adapter",
+ },
+ {
+ USB_VENDOR_BELKIN2, USB_PRODUCT_BELKIN2_F5U002,
+ 0,
+ "Belkin Components",
+ "F5U002 Parallel printer",
+ },
+ {
+ USB_VENDOR_BELKIN, USB_PRODUCT_BELKIN_F6D4050V1,
+ 0,
+ "Belkin Components",
+ "F6D4050 v1",
+ },
+ {
+ USB_VENDOR_BILLIONTON, USB_PRODUCT_BILLIONTON_USB100,
+ 0,
+ "Billionton Systems",
+ "USB100N 10/100 FastEthernet",
+ },
+ {
+ USB_VENDOR_BILLIONTON, USB_PRODUCT_BILLIONTON_USBLP100,
+ 0,
+ "Billionton Systems",
+ "USB100LP",
+ },
+ {
+ USB_VENDOR_BILLIONTON, USB_PRODUCT_BILLIONTON_USBEL100,
+ 0,
+ "Billionton Systems",
+ "USB100EL",
+ },
+ {
+ USB_VENDOR_BILLIONTON, USB_PRODUCT_BILLIONTON_USBE100,
+ 0,
+ "Billionton Systems",
+ "USBE100",
+ },
+ {
+ USB_VENDOR_BILLIONTON, USB_PRODUCT_BILLIONTON_USB2AR,
+ 0,
+ "Billionton Systems",
+ "USB2AR Ethernet",
+ },
+ {
+ USB_VENDOR_BROADCOM, USB_PRODUCT_BROADCOM_BCM2033,
+ 0,
+ "Broadcom",
+ "BCM2033 Bluetooth USB dongle",
+ },
+ {
+ USB_VENDOR_BROTHER, USB_PRODUCT_BROTHER_HL1050,
+ 0,
+ "Brother Industries",
+ "HL-1050 laser printer",
+ },
+ {
+ USB_VENDOR_BROTHER, USB_PRODUCT_BROTHER_MFC8600_9650,
+ 0,
+ "Brother Industries",
+ "MFC8600/9650 multifunction device",
+ },
+ {
+ USB_VENDOR_BTC, USB_PRODUCT_BTC_BTC6100,
+ 0,
+ "Behavior Tech. Computer",
+ "6100C Keyboard",
+ },
+ {
+ USB_VENDOR_BTC, USB_PRODUCT_BTC_BTC7932,
+ 0,
+ "Behavior Tech. Computer",
+ "Keyboard with mouse port",
+ },
+ {
+ USB_VENDOR_CANON, USB_PRODUCT_CANON_N656U,
+ 0,
+ "Canon",
+ "CanoScan N656U",
+ },
+ {
+ USB_VENDOR_CANON, USB_PRODUCT_CANON_N1220U,
+ 0,
+ "Canon",
+ "CanoScan N1220U",
+ },
+ {
+ USB_VENDOR_CANON, USB_PRODUCT_CANON_D660U,
+ 0,
+ "Canon",
+ "CanoScan D660U",
+ },
+ {
+ USB_VENDOR_CANON, USB_PRODUCT_CANON_N676U,
+ 0,
+ "Canon",
+ "CanoScan N676U",
+ },
+ {
+ USB_VENDOR_CANON, USB_PRODUCT_CANON_N1240U,
+ 0,
+ "Canon",
+ "CanoScan N1240U",
+ },
+ {
+ USB_VENDOR_CANON, USB_PRODUCT_CANON_LIDE25,
+ 0,
+ "Canon",
+ "CanoScan LIDE 25",
+ },
+ {
+ USB_VENDOR_CANON, USB_PRODUCT_CANON_S10,
+ 0,
+ "Canon",
+ "PowerShot S10",
+ },
+ {
+ USB_VENDOR_CANON, USB_PRODUCT_CANON_S100,
+ 0,
+ "Canon",
+ "PowerShot S100",
+ },
+ {
+ USB_VENDOR_CANON, USB_PRODUCT_CANON_S200,
+ 0,
+ "Canon",
+ "PowerShot S200",
+ },
+ {
+ USB_VENDOR_CANON, USB_PRODUCT_CANON_REBELXT,
+ 0,
+ "Canon",
+ "Digital Rebel XT",
+ },
+ {
+ USB_VENDOR_CATC, USB_PRODUCT_CATC_NETMATE,
+ 0,
+ "Computer Access Technology",
+ "Netmate Ethernet",
+ },
+ {
+ USB_VENDOR_CATC, USB_PRODUCT_CATC_NETMATE2,
+ 0,
+ "Computer Access Technology",
+ "Netmate2 Ethernet",
+ },
+ {
+ USB_VENDOR_CATC, USB_PRODUCT_CATC_CHIEF,
+ 0,
+ "Computer Access Technology",
+ "USB Chief Bus & Protocol Analyzer",
+ },
+ {
+ USB_VENDOR_CATC, USB_PRODUCT_CATC_ANDROMEDA,
+ 0,
+ "Computer Access Technology",
+ "Andromeda hub",
+ },
+ {
+ USB_VENDOR_CASIO, USB_PRODUCT_CASIO_QV_DIGICAM,
+ 0,
+ "CASIO",
+ "QV DigiCam",
+ },
+ {
+ USB_VENDOR_CASIO, USB_PRODUCT_CASIO_EXS880,
+ 0,
+ "CASIO",
+ "Exilim EX-S880",
+ },
+ {
+ USB_VENDOR_CASIO, USB_PRODUCT_CASIO_BE300,
+ 0,
+ "CASIO",
+ "BE-300 PDA",
+ },
+ {
+ USB_VENDOR_CASIO, USB_PRODUCT_CASIO_NAMELAND,
+ 0,
+ "CASIO",
+ "CASIO Nameland EZ-USB",
+ },
+ {
+ USB_VENDOR_CCYU, USB_PRODUCT_CCYU_ED1064,
+ 0,
+ "CCYU Technology",
+ "EasyDisk ED1064",
+ },
+ {
+ USB_VENDOR_CENTURY, USB_PRODUCT_CENTURY_EX35QUAT,
+ 0,
+ "Century Corp",
+ "Century USB Disk Enclosure",
+ },
+ {
+ USB_VENDOR_CENTURY, USB_PRODUCT_CENTURY_EX35SW4_SB4,
+ 0,
+ "Century Corp",
+ "Century USB Disk Enclosure",
+ },
+ {
+ USB_VENDOR_CHERRY, USB_PRODUCT_CHERRY_MY3000KBD,
+ 0,
+ "Cherry Mikroschalter",
+ "My3000 keyboard",
+ },
+ {
+ USB_VENDOR_CHERRY, USB_PRODUCT_CHERRY_MY3000HUB,
+ 0,
+ "Cherry Mikroschalter",
+ "My3000 hub",
+ },
+ {
+ USB_VENDOR_CHERRY, USB_PRODUCT_CHERRY_CYBOARD,
+ 0,
+ "Cherry Mikroschalter",
+ "CyBoard Keyboard",
+ },
+ {
+ USB_VENDOR_CHIC, USB_PRODUCT_CHIC_MOUSE1,
+ 0,
+ "Chic Technology",
+ "mouse",
+ },
+ {
+ USB_VENDOR_CHIC, USB_PRODUCT_CHIC_CYPRESS,
+ 0,
+ "Chic Technology",
+ "Cypress USB Mouse",
+ },
+ {
+ USB_VENDOR_CHICONY, USB_PRODUCT_CHICONY_KB8933,
+ 0,
+ "Chicony Electronics",
+ "KB-8933 keyboard",
+ },
+ {
+ USB_VENDOR_CHICONY, USB_PRODUCT_CHICONY_KU0325,
+ 0,
+ "Chicony Electronics",
+ "KU-0325 keyboard",
+ },
+ {
+ USB_VENDOR_CHICONY, USB_PRODUCT_CHICONY_CNF7129,
+ 0,
+ "Chicony Electronics",
+ "Notebook Web Camera",
+ },
+ {
+ USB_VENDOR_CHICONY2, USB_PRODUCT_CHICONY2_TWINKLECAM,
+ 0,
+ "Chicony",
+ "TwinkleCam USB camera",
+ },
+ {
+ USB_VENDOR_CHPRODUCTS, USB_PRODUCT_CHPRODUCTS_PROTHROTTLE,
+ 0,
+ "CH Products",
+ "Pro Throttle",
+ },
+ {
+ USB_VENDOR_CHPRODUCTS, USB_PRODUCT_CHPRODUCTS_PROPEDALS,
+ 0,
+ "CH Products",
+ "Pro Pedals",
+ },
+ {
+ USB_VENDOR_CHPRODUCTS, USB_PRODUCT_CHPRODUCTS_FIGHTERSTICK,
+ 0,
+ "CH Products",
+ "Fighterstick",
+ },
+ {
+ USB_VENDOR_CHPRODUCTS, USB_PRODUCT_CHPRODUCTS_FLIGHTYOKE,
+ 0,
+ "CH Products",
+ "Flight Sim Yoke",
+ },
+ {
+ USB_VENDOR_CISCOLINKSYS, USB_PRODUCT_CISCOLINKSYS_WUSB54AG,
+ 0,
+ "Cisco-Linksys",
+ "WUSB54AG Wireless Adapter",
+ },
+ {
+ USB_VENDOR_CISCOLINKSYS, USB_PRODUCT_CISCOLINKSYS_WUSB54G,
+ 0,
+ "Cisco-Linksys",
+ "WUSB54G Wireless Adapter",
+ },
+ {
+ USB_VENDOR_CISCOLINKSYS, USB_PRODUCT_CISCOLINKSYS_WUSB54GP,
+ 0,
+ "Cisco-Linksys",
+ "WUSB54GP Wireless Adapter",
+ },
+ {
+ USB_VENDOR_CISCOLINKSYS, USB_PRODUCT_CISCOLINKSYS_USB200MV2,
+ 0,
+ "Cisco-Linksys",
+ "USB200M v2",
+ },
+ {
+ USB_VENDOR_CISCOLINKSYS, USB_PRODUCT_CISCOLINKSYS_HU200TS,
+ 0,
+ "Cisco-Linksys",
+ "HU200TS Wireless Adapter",
+ },
+ {
+ USB_VENDOR_CISCOLINKSYS, USB_PRODUCT_CISCOLINKSYS_WUSB54GC,
+ 0,
+ "Cisco-Linksys",
+ "WUSB54GC",
+ },
+ {
+ USB_VENDOR_CISCOLINKSYS, USB_PRODUCT_CISCOLINKSYS_WUSB54GR,
+ 0,
+ "Cisco-Linksys",
+ "WUSB54GR",
+ },
+ {
+ USB_VENDOR_CISCOLINKSYS, USB_PRODUCT_CISCOLINKSYS_WUSBF54G,
+ 0,
+ "Cisco-Linksys",
+ "WUSBF54G",
+ },
+ {
+ USB_VENDOR_CISCOLINKSYS2, USB_PRODUCT_CISCOLINKSYS2_RT3070,
+ 0,
+ "Cisco-Linksys",
+ "RT3070",
+ },
+ {
+ USB_VENDOR_CISCOLINKSYS3, USB_PRODUCT_CISCOLINKSYS3_RT3070,
+ 0,
+ "Cisco-Linksys",
+ "RT3070",
+ },
+ {
+ USB_VENDOR_CLIPSAL, USB_PRODUCT_CLIPSAL_5500PCU,
+ 0,
+ "Clipsal",
+ "5500PCU C-Bus",
+ },
+ {
+ USB_VENDOR_CMOTECH, USB_PRODUCT_CMOTECH_CNU510,
+ 0,
+ "C-motech",
+ "CDMA Technologies USB modem",
+ },
+ {
+ USB_VENDOR_CMOTECH, USB_PRODUCT_CMOTECH_CNU550,
+ 0,
+ "C-motech",
+ "CDMA 2000 1xRTT/1xEVDO USB modem",
+ },
+ {
+ USB_VENDOR_CMOTECH, USB_PRODUCT_CMOTECH_CGU628,
+ 0,
+ "C-motech",
+ "CGU-628",
+ },
+ {
+ USB_VENDOR_CMOTECH, USB_PRODUCT_CMOTECH_CDMA_MODEM1,
+ 0,
+ "C-motech",
+ "CDMA Technologies USB modem",
+ },
+ {
+ USB_VENDOR_CMOTECH, USB_PRODUCT_CMOTECH_DISK,
+ 0,
+ "C-motech",
+ "disk mode",
+ },
+ {
+ USB_VENDOR_COMPAQ, USB_PRODUCT_COMPAQ_IPAQPOCKETPC,
+ 0,
+ "Compaq",
+ "iPAQ PocketPC",
+ },
+ {
+ USB_VENDOR_COMPAQ, USB_PRODUCT_COMPAQ_PJB100,
+ 0,
+ "Compaq",
+ "Personal Jukebox PJB100",
+ },
+ {
+ USB_VENDOR_COMPAQ, USB_PRODUCT_COMPAQ_IPAQLINUX,
+ 0,
+ "Compaq",
+ "iPAQ Linux",
+ },
+ {
+ USB_VENDOR_COMPOSITE, USB_PRODUCT_COMPOSITE_USBPS2,
+ 0,
+ "Composite",
+ "USB to PS2 Adaptor",
+ },
+ {
+ USB_VENDOR_CONCEPTRONIC, USB_PRODUCT_CONCEPTRONIC_PRISM_GT,
+ 0,
+ "Conceptronic",
+ "PrismGT USB 2.0 WLAN",
+ },
+ {
+ USB_VENDOR_CONCEPTRONIC, USB_PRODUCT_CONCEPTRONIC_C11U,
+ 0,
+ "Conceptronic",
+ "C11U",
+ },
+ {
+ USB_VENDOR_CONCEPTRONIC, USB_PRODUCT_CONCEPTRONIC_WL210,
+ 0,
+ "Conceptronic",
+ "WL-210",
+ },
+ {
+ USB_VENDOR_CONCEPTRONIC, USB_PRODUCT_CONCEPTRONIC_AR5523_1,
+ 0,
+ "Conceptronic",
+ "AR5523",
+ },
+ {
+ USB_VENDOR_CONCEPTRONIC, USB_PRODUCT_CONCEPTRONIC_AR5523_1_NF,
+ 0,
+ "Conceptronic",
+ "AR5523 (no firmware)",
+ },
+ {
+ USB_VENDOR_CONCEPTRONIC, USB_PRODUCT_CONCEPTRONIC_AR5523_2,
+ 0,
+ "Conceptronic",
+ "AR5523",
+ },
+ {
+ USB_VENDOR_CONCEPTRONIC, USB_PRODUCT_CONCEPTRONIC_AR5523_2_NF,
+ 0,
+ "Conceptronic",
+ "AR5523 (no firmware)",
+ },
+ {
+ USB_VENDOR_CONCEPTRONIC2, USB_PRODUCT_CONCEPTRONIC2_C54RU,
+ 0,
+ "Conceptronic",
+ "C54RU WLAN",
+ },
+ {
+ USB_VENDOR_CONCEPTRONIC2, USB_PRODUCT_CONCEPTRONIC2_C54RU2,
+ 0,
+ "Conceptronic",
+ "C54RU",
+ },
+ {
+ USB_VENDOR_CONCEPTRONIC2, USB_PRODUCT_CONCEPTRONIC2_RT3070_1,
+ 0,
+ "Conceptronic",
+ "RT3070",
+ },
+ {
+ USB_VENDOR_CONCEPTRONIC2, USB_PRODUCT_CONCEPTRONIC2_RT3070_2,
+ 0,
+ "Conceptronic",
+ "RT3070",
+ },
+ {
+ USB_VENDOR_CONCEPTRONIC2, USB_PRODUCT_CONCEPTRONIC2_VIGORN61,
+ 0,
+ "Conceptronic",
+ "VIGORN61",
+ },
+ {
+ USB_VENDOR_CONCEPTRONIC2, USB_PRODUCT_CONCEPTRONIC2_RT2870_1,
+ 0,
+ "Conceptronic",
+ "RT2870",
+ },
+ {
+ USB_VENDOR_CONCEPTRONIC2, USB_PRODUCT_CONCEPTRONIC2_RT2870_2,
+ 0,
+ "Conceptronic",
+ "RT2870",
+ },
+ {
+ USB_VENDOR_CONCEPTRONIC2, USB_PRODUCT_CONCEPTRONIC2_RT2870_7,
+ 0,
+ "Conceptronic",
+ "RT2870",
+ },
+ {
+ USB_VENDOR_CONCEPTRONIC2, USB_PRODUCT_CONCEPTRONIC2_RT2870_8,
+ 0,
+ "Conceptronic",
+ "RT2870",
+ },
+ {
+ USB_VENDOR_CONCEPTRONIC2, USB_PRODUCT_CONCEPTRONIC2_RT2870_3,
+ 0,
+ "Conceptronic",
+ "RT2870",
+ },
+ {
+ USB_VENDOR_CONCEPTRONIC2, USB_PRODUCT_CONCEPTRONIC2_RT2870_4,
+ 0,
+ "Conceptronic",
+ "RT2870",
+ },
+ {
+ USB_VENDOR_CONCEPTRONIC2, USB_PRODUCT_CONCEPTRONIC2_RT2870_5,
+ 0,
+ "Conceptronic",
+ "RT2870",
+ },
+ {
+ USB_VENDOR_CONCEPTRONIC2, USB_PRODUCT_CONCEPTRONIC2_RT2870_6,
+ 0,
+ "Conceptronic",
+ "RT2870",
+ },
+ {
+ USB_VENDOR_CONNECTIX, USB_PRODUCT_CONNECTIX_QUICKCAM,
+ 0,
+ "Connectix",
+ "QuickCam",
+ },
+ {
+ USB_VENDOR_COREGA, USB_PRODUCT_COREGA_ETHER_USB_T,
+ 0,
+ "Corega",
+ "Ether USB-T",
+ },
+ {
+ USB_VENDOR_COREGA, USB_PRODUCT_COREGA_FETHER_USB_TX,
+ 0,
+ "Corega",
+ "FEther USB-TX",
+ },
+ {
+ USB_VENDOR_COREGA, USB_PRODUCT_COREGA_WLAN_USB_USB_11,
+ 0,
+ "Corega",
+ "WirelessLAN USB-11",
+ },
+ {
+ USB_VENDOR_COREGA, USB_PRODUCT_COREGA_FETHER_USB_TXS,
+ 0,
+ "Corega",
+ "FEther USB-TXS",
+ },
+ {
+ USB_VENDOR_COREGA, USB_PRODUCT_COREGA_WLANUSB,
+ 0,
+ "Corega",
+ "Wireless LAN Stick-11",
+ },
+ {
+ USB_VENDOR_COREGA, USB_PRODUCT_COREGA_FETHER_USB2_TX,
+ 0,
+ "Corega",
+ "FEther USB2-TX",
+ },
+ {
+ USB_VENDOR_COREGA, USB_PRODUCT_COREGA_WLUSB_11_KEY,
+ 0,
+ "Corega",
+ "ULUSB-11 Key",
+ },
+ {
+ USB_VENDOR_COREGA, USB_PRODUCT_COREGA_CGUSBRS232R,
+ 0,
+ "Corega",
+ "CG-USBRS232R",
+ },
+ {
+ USB_VENDOR_COREGA, USB_PRODUCT_COREGA_CGWLUSB2GL,
+ 0,
+ "Corega",
+ "CG-WLUSB2GL",
+ },
+ {
+ USB_VENDOR_COREGA, USB_PRODUCT_COREGA_CGWLUSB2GPX,
+ 0,
+ "Corega",
+ "CG-WLUSB2GPX",
+ },
+ {
+ USB_VENDOR_COREGA, USB_PRODUCT_COREGA_RT2870_1,
+ 0,
+ "Corega",
+ "RT2870",
+ },
+ {
+ USB_VENDOR_COREGA, USB_PRODUCT_COREGA_RT2870_2,
+ 0,
+ "Corega",
+ "RT2870",
+ },
+ {
+ USB_VENDOR_COREGA, USB_PRODUCT_COREGA_RT2870_3,
+ 0,
+ "Corega",
+ "RT2870",
+ },
+ {
+ USB_VENDOR_COREGA, USB_PRODUCT_COREGA_RT3070,
+ 0,
+ "Corega",
+ "RT3070",
+ },
+ {
+ USB_VENDOR_COREGA, USB_PRODUCT_COREGA_CGWLUSB300GNM,
+ 0,
+ "Corega",
+ "CG-WLUSB300GNM",
+ },
+ {
+ USB_VENDOR_COREGA, USB_PRODUCT_COREGA_WLUSB_11_STICK,
+ 0,
+ "Corega",
+ "WLAN USB Stick 11",
+ },
+ {
+ USB_VENDOR_COREGA, USB_PRODUCT_COREGA_FETHER_USB_TXC,
+ 0,
+ "Corega",
+ "FEther USB-TXC",
+ },
+ {
+ USB_VENDOR_CREATIVE, USB_PRODUCT_CREATIVE_NOMAD_II,
+ 0,
+ "Creative Labs",
+ "Nomad II MP3 player",
+ },
+ {
+ USB_VENDOR_CREATIVE, USB_PRODUCT_CREATIVE_NOMAD_IIMG,
+ 0,
+ "Creative Labs",
+ "Nomad II MG",
+ },
+ {
+ USB_VENDOR_CREATIVE, USB_PRODUCT_CREATIVE_NOMAD,
+ 0,
+ "Creative Labs",
+ "Nomad",
+ },
+ {
+ USB_VENDOR_CREATIVE2, USB_PRODUCT_CREATIVE2_VOIP_BLASTER,
+ 0,
+ "Creative Labs",
+ "Voip Blaster",
+ },
+ {
+ USB_VENDOR_CREATIVE3, USB_PRODUCT_CREATIVE3_OPTICAL_MOUSE,
+ 0,
+ "Creative Labs",
+ "Notebook Optical Mouse",
+ },
+ {
+ USB_VENDOR_CSR, USB_PRODUCT_CSR_BT_DONGLE,
+ 0,
+ "Cambridge Silicon Radio",
+ "Bluetooth USB dongle",
+ },
+ {
+ USB_VENDOR_CSR, USB_PRODUCT_CSR_CSRDFU,
+ 0,
+ "Cambridge Silicon Radio",
+ "USB Bluetooth Device in DFU State",
+ },
+ {
+ USB_VENDOR_CHIPSBANK, USB_PRODUCT_CHIPSBANK_USBMEMSTICK,
+ 0,
+ "Chipsbank Microelectronics Co.",
+ "CBM2080 Flash drive controller",
+ },
+ {
+ USB_VENDOR_CHIPSBANK, USB_PRODUCT_CHIPSBANK_USBMEMSTICK1,
+ 0,
+ "Chipsbank Microelectronics Co.",
+ "CBM1180 Flash drive controller",
+ },
+ {
+ USB_VENDOR_CTX, USB_PRODUCT_CTX_EX1300,
+ 0,
+ "Chuntex",
+ "Ex1300 hub",
+ },
+ {
+ USB_VENDOR_CURITEL, USB_PRODUCT_CURITEL_HX550C,
+ 0,
+ "Curitel Communications Inc",
+ "CDMA 2000 1xRTT USB modem (HX-550C)",
+ },
+ {
+ USB_VENDOR_CURITEL, USB_PRODUCT_CURITEL_HX57XB,
+ 0,
+ "Curitel Communications Inc",
+ "CDMA 2000 1xRTT USB modem (HX-570/575B/PR-600)",
+ },
+ {
+ USB_VENDOR_CURITEL, USB_PRODUCT_CURITEL_PC5740,
+ 0,
+ "Curitel Communications Inc",
+ "Broadband Wireless modem",
+ },
+ {
+ USB_VENDOR_CURITEL, USB_PRODUCT_CURITEL_UM175,
+ 0,
+ "Curitel Communications Inc",
+ "EVDO modem",
+ },
+ {
+ USB_VENDOR_CYBERPOWER, USB_PRODUCT_CYBERPOWER_1500CAVRLCD,
+ 0,
+ "Cyber Power Systems, Inc.",
+ "1500CAVRLCD",
+ },
+ {
+ USB_VENDOR_CYBERTAN, USB_PRODUCT_CYBERTAN_TG54USB,
+ 0,
+ "CyberTAN Technology",
+ "TG54USB",
+ },
+ {
+ USB_VENDOR_CYBERTAN, USB_PRODUCT_CYBERTAN_RT2870,
+ 0,
+ "CyberTAN Technology",
+ "RT2870",
+ },
+ {
+ USB_VENDOR_CYPRESS, USB_PRODUCT_CYPRESS_MOUSE,
+ 0,
+ "Cypress Semiconductor",
+ "mouse",
+ },
+ {
+ USB_VENDOR_CYPRESS, USB_PRODUCT_CYPRESS_THERMO,
+ 0,
+ "Cypress Semiconductor",
+ "thermometer",
+ },
+ {
+ USB_VENDOR_CYPRESS, USB_PRODUCT_CYPRESS_WISPY1A,
+ 0,
+ "Cypress Semiconductor",
+ "MetaGeek Wi-Spy",
+ },
+ {
+ USB_VENDOR_CYPRESS, USB_PRODUCT_CYPRESS_KBDHUB,
+ 0,
+ "Cypress Semiconductor",
+ "Keyboard/Hub",
+ },
+ {
+ USB_VENDOR_CYPRESS, USB_PRODUCT_CYPRESS_FMRADIO,
+ 0,
+ "Cypress Semiconductor",
+ "FM Radio",
+ },
+ {
+ USB_VENDOR_CYPRESS, USB_PRODUCT_CYPRESS_IKARILASER,
+ 0,
+ "Cypress Semiconductor",
+ "Ikari Laser SteelSeries ApS",
+ },
+ {
+ USB_VENDOR_CYPRESS, USB_PRODUCT_CYPRESS_USBRS232,
+ 0,
+ "Cypress Semiconductor",
+ "USB-RS232 Interface",
+ },
+ {
+ USB_VENDOR_CYPRESS, USB_PRODUCT_CYPRESS_SLIM_HUB,
+ 0,
+ "Cypress Semiconductor",
+ "Slim Hub",
+ },
+ {
+ USB_VENDOR_CYPRESS, USB_PRODUCT_CYPRESS_XX6830XX,
+ 0,
+ "Cypress Semiconductor",
+ "PATA Storage Device",
+ },
+ {
+ USB_VENDOR_CYPRESS, USB_PRODUCT_CYPRESS_SILVERSHIELD,
+ 0,
+ "Cypress Semiconductor",
+ "Gembird Silver Shield PM",
+ },
+ {
+ USB_VENDOR_DAISY, USB_PRODUCT_DAISY_DMC,
+ 0,
+ "Daisy Technology",
+ "USB MultiMedia Reader",
+ },
+ {
+ USB_VENDOR_DALLAS, USB_PRODUCT_DALLAS_J6502,
+ 0,
+ "Dallas Semiconductor",
+ "J-6502 speakers",
+ },
+ {
+ USB_VENDOR_DATAAPEX, USB_PRODUCT_DATAAPEX_MULTICOM,
+ 0,
+ "DataApex",
+ "MultiCom",
+ },
+ {
+ USB_VENDOR_DELL, USB_PRODUCT_DELL_PORT,
+ 0,
+ "Dell",
+ "Port Replicator",
+ },
+ {
+ USB_VENDOR_DELL, USB_PRODUCT_DELL_AIO926,
+ 0,
+ "Dell",
+ "Photo AIO Printer 926",
+ },
+ {
+ USB_VENDOR_DELL, USB_PRODUCT_DELL_BC02,
+ 0,
+ "Dell",
+ "BC02 Bluetooth USB Adapter",
+ },
+ {
+ USB_VENDOR_DELL, USB_PRODUCT_DELL_PRISM_GT_1,
+ 0,
+ "Dell",
+ "PrismGT USB 2.0 WLAN",
+ },
+ {
+ USB_VENDOR_DELL, USB_PRODUCT_DELL_TM350,
+ 0,
+ "Dell",
+ "TrueMobile 350 Bluetooth USB Adapter",
+ },
+ {
+ USB_VENDOR_DELL, USB_PRODUCT_DELL_PRISM_GT_2,
+ 0,
+ "Dell",
+ "PrismGT USB 2.0 WLAN",
+ },
+ {
+ USB_VENDOR_DELL, USB_PRODUCT_DELL_U5700,
+ 0,
+ "Dell",
+ "Dell 5700 3G",
+ },
+ {
+ USB_VENDOR_DELL, USB_PRODUCT_DELL_U5500,
+ 0,
+ "Dell",
+ "Dell 5500 3G",
+ },
+ {
+ USB_VENDOR_DELL, USB_PRODUCT_DELL_U5505,
+ 0,
+ "Dell",
+ "Dell 5505 3G",
+ },
+ {
+ USB_VENDOR_DELL, USB_PRODUCT_DELL_U5700_2,
+ 0,
+ "Dell",
+ "Dell 5700 3G",
+ },
+ {
+ USB_VENDOR_DELL, USB_PRODUCT_DELL_U5510,
+ 0,
+ "Dell",
+ "Dell 5510 3G",
+ },
+ {
+ USB_VENDOR_DELL, USB_PRODUCT_DELL_U5700_3,
+ 0,
+ "Dell",
+ "Dell 5700 3G",
+ },
+ {
+ USB_VENDOR_DELL, USB_PRODUCT_DELL_U5700_4,
+ 0,
+ "Dell",
+ "Dell 5700 3G",
+ },
+ {
+ USB_VENDOR_DELL, USB_PRODUCT_DELL_U5720,
+ 0,
+ "Dell",
+ "Dell 5720 3G",
+ },
+ {
+ USB_VENDOR_DELL, USB_PRODUCT_DELL_U5720_2,
+ 0,
+ "Dell",
+ "Dell 5720 3G",
+ },
+ {
+ USB_VENDOR_DELL, USB_PRODUCT_DELL_U740,
+ 0,
+ "Dell",
+ "Dell U740 CDMA",
+ },
+ {
+ USB_VENDOR_DELL, USB_PRODUCT_DELL_U5520,
+ 0,
+ "Dell",
+ "Dell 5520 3G",
+ },
+ {
+ USB_VENDOR_DELL, USB_PRODUCT_DELL_U5520_2,
+ 0,
+ "Dell",
+ "Dell 5520 3G",
+ },
+ {
+ USB_VENDOR_DELL, USB_PRODUCT_DELL_U5520_3,
+ 0,
+ "Dell",
+ "Dell 5520 3G",
+ },
+ {
+ USB_VENDOR_DELL, USB_PRODUCT_DELL_U5730,
+ 0,
+ "Dell",
+ "Dell 5730 3G",
+ },
+ {
+ USB_VENDOR_DELL, USB_PRODUCT_DELL_U5730_2,
+ 0,
+ "Dell",
+ "Dell 5730 3G",
+ },
+ {
+ USB_VENDOR_DELL, USB_PRODUCT_DELL_U5730_3,
+ 0,
+ "Dell",
+ "Dell 5730 3G",
+ },
+ {
+ USB_VENDOR_DELL, USB_PRODUCT_DELL_DW700,
+ 0,
+ "Dell",
+ "Dell DW700 GPS",
+ },
+ {
+ USB_VENDOR_DELORME, USB_PRODUCT_DELORME_EARTHMATE,
+ 0,
+ "DeLorme",
+ "Earthmate GPS",
+ },
+ {
+ USB_VENDOR_DESKNOTE, USB_PRODUCT_DESKNOTE_UCR_61S2B,
+ 0,
+ "Desknote",
+ "UCR-61S2B",
+ },
+ {
+ USB_VENDOR_DIAMOND, USB_PRODUCT_DIAMOND_RIO500USB,
+ 0,
+ "Diamond",
+ "Rio 500 USB",
+ },
+ {
+ USB_VENDOR_DICKSMITH, USB_PRODUCT_DICKSMITH_RT2573,
+ 0,
+ "Dick Smith Electronics",
+ "RT2573",
+ },
+ {
+ USB_VENDOR_DICKSMITH, USB_PRODUCT_DICKSMITH_CWD854F,
+ 0,
+ "Dick Smith Electronics",
+ "C-Net CWD-854 rev F",
+ },
+ {
+ USB_VENDOR_DIGI, USB_PRODUCT_DIGI_ACCELEPORT2,
+ 0,
+ "Digi International",
+ "AccelePort USB 2",
+ },
+ {
+ USB_VENDOR_DIGI, USB_PRODUCT_DIGI_ACCELEPORT4,
+ 0,
+ "Digi International",
+ "AccelePort USB 4",
+ },
+ {
+ USB_VENDOR_DIGI, USB_PRODUCT_DIGI_ACCELEPORT8,
+ 0,
+ "Digi International",
+ "AccelePort USB 8",
+ },
+ {
+ USB_VENDOR_DIGIANSWER, USB_PRODUCT_DIGIANSWER_ZIGBEE802154,
+ 0,
+ "Digianswer",
+ "ZigBee/802.15.4 MAC",
+ },
+ {
+ USB_VENDOR_DLINK, USB_PRODUCT_DLINK_DUBE100,
+ 0,
+ "D-Link",
+ "10/100 Ethernet",
+ },
+ {
+ USB_VENDOR_DLINK, USB_PRODUCT_DLINK_DSB650TX4,
+ 0,
+ "D-Link",
+ "10/100 Ethernet",
+ },
+ {
+ USB_VENDOR_DLINK, USB_PRODUCT_DLINK_DWL120E,
+ 0,
+ "D-Link",
+ "DWL-120 rev E",
+ },
+ {
+ USB_VENDOR_DLINK, USB_PRODUCT_DLINK_DWL122,
+ 0,
+ "D-Link",
+ "DWL-122",
+ },
+ {
+ USB_VENDOR_DLINK, USB_PRODUCT_DLINK_DWLG120,
+ 0,
+ "D-Link",
+ "DWL-G120",
+ },
+ {
+ USB_VENDOR_DLINK, USB_PRODUCT_DLINK_DWL120F,
+ 0,
+ "D-Link",
+ "DWL-120 rev F",
+ },
+ {
+ USB_VENDOR_DLINK, USB_PRODUCT_DLINK_DWLAG132,
+ 0,
+ "D-Link",
+ "DWL-AG132",
+ },
+ {
+ USB_VENDOR_DLINK, USB_PRODUCT_DLINK_DWLAG132_NF,
+ 0,
+ "D-Link",
+ "DWL-AG132 (no firmware)",
+ },
+ {
+ USB_VENDOR_DLINK, USB_PRODUCT_DLINK_DWLG132,
+ 0,
+ "D-Link",
+ "DWL-G132",
+ },
+ {
+ USB_VENDOR_DLINK, USB_PRODUCT_DLINK_DWLG132_NF,
+ 0,
+ "D-Link",
+ "DWL-G132 (no firmware)",
+ },
+ {
+ USB_VENDOR_DLINK, USB_PRODUCT_DLINK_DWLAG122,
+ 0,
+ "D-Link",
+ "DWL-AG122",
+ },
+ {
+ USB_VENDOR_DLINK, USB_PRODUCT_DLINK_DWLAG122_NF,
+ 0,
+ "D-Link",
+ "DWL-AG122 (no firmware)",
+ },
+ {
+ USB_VENDOR_DLINK, USB_PRODUCT_DLINK_DWLG122,
+ 0,
+ "D-Link",
+ "DWL-G122 b1 Wireless Adapter",
+ },
+ {
+ USB_VENDOR_DLINK, USB_PRODUCT_DLINK_DUBE100B1,
+ 0,
+ "D-Link",
+ "DUB-E100 rev B1",
+ },
+ {
+ USB_VENDOR_DLINK, USB_PRODUCT_DLINK_RT2870,
+ 0,
+ "D-Link",
+ "RT2870",
+ },
+ {
+ USB_VENDOR_DLINK, USB_PRODUCT_DLINK_RT3072,
+ 0,
+ "D-Link",
+ "RT3072",
+ },
+ {
+ USB_VENDOR_DLINK, USB_PRODUCT_DLINK_DSB650C,
+ 0,
+ "D-Link",
+ "10Mbps Ethernet",
+ },
+ {
+ USB_VENDOR_DLINK, USB_PRODUCT_DLINK_DSB650TX1,
+ 0,
+ "D-Link",
+ "10/100 Ethernet",
+ },
+ {
+ USB_VENDOR_DLINK, USB_PRODUCT_DLINK_DSB650TX,
+ 0,
+ "D-Link",
+ "10/100 Ethernet",
+ },
+ {
+ USB_VENDOR_DLINK, USB_PRODUCT_DLINK_DSB650TX_PNA,
+ 0,
+ "D-Link",
+ "1/10/100 Ethernet",
+ },
+ {
+ USB_VENDOR_DLINK, USB_PRODUCT_DLINK_DSB650TX3,
+ 0,
+ "D-Link",
+ "10/100 Ethernet",
+ },
+ {
+ USB_VENDOR_DLINK, USB_PRODUCT_DLINK_DSB650TX2,
+ 0,
+ "D-Link",
+ "10/100 Ethernet",
+ },
+ {
+ USB_VENDOR_DLINK, USB_PRODUCT_DLINK_DSB650,
+ 0,
+ "D-Link",
+ "10/100 Ethernet",
+ },
+ {
+ USB_VENDOR_DLINK, USB_PRODUCT_DLINK_DUBH7,
+ 0,
+ "D-Link",
+ "DUB-H7 USB 2.0 7-Port Hub",
+ },
+ {
+ USB_VENDOR_DLINK2, USB_PRODUCT_DLINK2_DWA120,
+ 0,
+ "D-Link",
+ "DWA-120",
+ },
+ {
+ USB_VENDOR_DLINK2, USB_PRODUCT_DLINK2_DWA120_NF,
+ 0,
+ "D-Link",
+ "DWA-120 (no firmware)",
+ },
+ {
+ USB_VENDOR_DLINK2, USB_PRODUCT_DLINK2_DWLG122C1,
+ 0,
+ "D-Link",
+ "DWL-G122 c1",
+ },
+ {
+ USB_VENDOR_DLINK2, USB_PRODUCT_DLINK2_WUA1340,
+ 0,
+ "D-Link",
+ "WUA-1340",
+ },
+ {
+ USB_VENDOR_DLINK2, USB_PRODUCT_DLINK2_DWA111,
+ 0,
+ "D-Link",
+ "DWA-111",
+ },
+ {
+ USB_VENDOR_DLINK2, USB_PRODUCT_DLINK2_RT2870_1,
+ 0,
+ "D-Link",
+ "RT2870",
+ },
+ {
+ USB_VENDOR_DLINK2, USB_PRODUCT_DLINK2_DWA110,
+ 0,
+ "D-Link",
+ "DWA-110",
+ },
+ {
+ USB_VENDOR_DLINK2, USB_PRODUCT_DLINK2_RT3072,
+ 0,
+ "D-Link",
+ "RT3072",
+ },
+ {
+ USB_VENDOR_DLINK2, USB_PRODUCT_DLINK2_RT3072_1,
+ 0,
+ "D-Link",
+ "RT3072",
+ },
+ {
+ USB_VENDOR_DLINK2, USB_PRODUCT_DLINK2_RT3070_1,
+ 0,
+ "D-Link",
+ "RT3070",
+ },
+ {
+ USB_VENDOR_DLINK2, USB_PRODUCT_DLINK2_RT3070_2,
+ 0,
+ "D-Link",
+ "RT3070",
+ },
+ {
+ USB_VENDOR_DLINK2, USB_PRODUCT_DLINK2_RT3070_3,
+ 0,
+ "D-Link",
+ "RT3070",
+ },
+ {
+ USB_VENDOR_DLINK2, USB_PRODUCT_DLINK2_RT2870_2,
+ 0,
+ "D-Link",
+ "RT2870",
+ },
+ {
+ USB_VENDOR_DLINK2, USB_PRODUCT_DLINK2_DWA130,
+ 0,
+ "D-Link",
+ "DWA-130",
+ },
+ {
+ USB_VENDOR_DLINK2, USB_PRODUCT_DLINK2_RT3070_4,
+ 0,
+ "D-Link",
+ "RT3070",
+ },
+ {
+ USB_VENDOR_DLINK2, USB_PRODUCT_DLINK2_RT3070_5,
+ 0,
+ "D-Link",
+ "RT3070",
+ },
+ {
+ USB_VENDOR_DLINK3, USB_PRODUCT_DLINK3_DWM652,
+ 0,
+ "Dlink",
+ "DWM-652",
+ },
+ {
+ USB_VENDOR_DMI, USB_PRODUCT_DMI_CFSM_RW,
+ 0,
+ "DMI",
+ "CF/SM Reader/Writer",
+ },
+ {
+ USB_VENDOR_DMI, USB_PRODUCT_DMI_DISK,
+ 0,
+ "DMI",
+ "Generic Disk",
+ },
+ {
+ USB_VENDOR_DRAYTEK, USB_PRODUCT_DRAYTEK_VIGOR550,
+ 0,
+ "DrayTek",
+ "Vigor550",
+ },
+ {
+ USB_VENDOR_DRESDENELEKTRONIK, USB_PRODUCT_DRESDENELEKTRONIK_SENSORTERMINALBOARD,
+ 0,
+ "dresden elektronik",
+ "SensorTerminalBoard",
+ },
+ {
+ USB_VENDOR_DRESDENELEKTRONIK, USB_PRODUCT_DRESDENELEKTRONIK_WIRELESSHANDHELDTERMINAL,
+ 0,
+ "dresden elektronik",
+ "Wireless Handheld Terminal",
+ },
+ {
+ USB_VENDOR_DYNASTREAM, USB_PRODUCT_DYNASTREAM_ANTDEVBOARD,
+ 0,
+ "Dynastream Innovations",
+ "ANT dev board",
+ },
+ {
+ USB_VENDOR_DYNASTREAM, USB_PRODUCT_DYNASTREAM_ANT2USB,
+ 0,
+ "Dynastream Innovations",
+ "ANT2USB",
+ },
+ {
+ USB_VENDOR_DYNASTREAM, USB_PRODUCT_DYNASTREAM_ANTDEVBOARD2,
+ 0,
+ "Dynastream Innovations",
+ "ANT dev board",
+ },
+ {
+ USB_VENDOR_EDIMAX, USB_PRODUCT_EDIMAX_EW7318USG,
+ 0,
+ "Edimax",
+ "USB Wireless dongle",
+ },
+ {
+ USB_VENDOR_EDIMAX, USB_PRODUCT_EDIMAX_RT2870_1,
+ 0,
+ "Edimax",
+ "RT2870",
+ },
+ {
+ USB_VENDOR_EDIMAX, USB_PRODUCT_EDIMAX_EW7717,
+ 0,
+ "Edimax",
+ "EW-7717",
+ },
+ {
+ USB_VENDOR_EDIMAX, USB_PRODUCT_EDIMAX_EW7718,
+ 0,
+ "Edimax",
+ "EW-7718",
+ },
+ {
+ USB_VENDOR_EGALAX, USB_PRODUCT_EGALAX_TPANEL,
+ 0,
+ "eGalax, Inc.",
+ "Touch Panel",
+ },
+ {
+ USB_VENDOR_EGALAX, USB_PRODUCT_EGALAX_TPANEL2,
+ 0,
+ "eGalax, Inc.",
+ "Touch Panel",
+ },
+ {
+ USB_VENDOR_EGALAX2, USB_PRODUCT_EGALAX2_TPANEL,
+ 0,
+ "eGalax, Inc.",
+ "Touch Panel",
+ },
+ {
+ USB_VENDOR_EICON, USB_PRODUCT_EICON_DIVA852,
+ 0,
+ "Eicon Networks",
+ "Diva 852 ISDN TA",
+ },
+ {
+ USB_VENDOR_EIZO, USB_PRODUCT_EIZO_HUB,
+ 0,
+ "EIZO",
+ "hub",
+ },
+ {
+ USB_VENDOR_EIZO, USB_PRODUCT_EIZO_MONITOR,
+ 0,
+ "EIZO",
+ "monitor",
+ },
+ {
+ USB_VENDOR_ELCON, USB_PRODUCT_ELCON_PLAN,
+ 0,
+ "ELCON Systemtechnik",
+ "Goldpfeil P-LAN",
+ },
+ {
+ USB_VENDOR_ELECOM, USB_PRODUCT_ELECOM_MOUSE29UO,
+ 0,
+ "Elecom",
+ "mouse 29UO",
+ },
+ {
+ USB_VENDOR_ELECOM, USB_PRODUCT_ELECOM_LDUSBTX0,
+ 0,
+ "Elecom",
+ "LD-USB/TX",
+ },
+ {
+ USB_VENDOR_ELECOM, USB_PRODUCT_ELECOM_LDUSBTX1,
+ 0,
+ "Elecom",
+ "LD-USB/TX",
+ },
+ {
+ USB_VENDOR_ELECOM, USB_PRODUCT_ELECOM_LDUSBLTX,
+ 0,
+ "Elecom",
+ "LD-USBL/TX",
+ },
+ {
+ USB_VENDOR_ELECOM, USB_PRODUCT_ELECOM_LDUSBTX2,
+ 0,
+ "Elecom",
+ "LD-USB/TX",
+ },
+ {
+ USB_VENDOR_ELECOM, USB_PRODUCT_ELECOM_LDUSB20,
+ 0,
+ "Elecom",
+ "LD-USB20",
+ },
+ {
+ USB_VENDOR_ELECOM, USB_PRODUCT_ELECOM_UCSGT,
+ 0,
+ "Elecom",
+ "UC-SGT",
+ },
+ {
+ USB_VENDOR_ELECOM, USB_PRODUCT_ELECOM_UCSGT0,
+ 0,
+ "Elecom",
+ "UC-SGT",
+ },
+ {
+ USB_VENDOR_ELECOM, USB_PRODUCT_ELECOM_LDUSBTX3,
+ 0,
+ "Elecom",
+ "LD-USB/TX",
+ },
+ {
+ USB_VENDOR_ELSA, USB_PRODUCT_ELSA_MODEM1,
+ 0,
+ "ELSA",
+ "ELSA Modem Board",
+ },
+ {
+ USB_VENDOR_ELSA, USB_PRODUCT_ELSA_USB2ETHERNET,
+ 0,
+ "ELSA",
+ "Microlink USB2Ethernet",
+ },
+ {
+ USB_VENDOR_ELV, USB_PRODUCT_ELV_USBI2C,
+ 0,
+ "ELV",
+ "USB-I2C interface",
+ },
+ {
+ USB_VENDOR_EMS, USB_PRODUCT_EMS_DUAL_SHOOTER,
+ 0,
+ "EMS Production",
+ "PSX gun controller converter",
+ },
+ {
+ USB_VENDOR_ENCORE, USB_PRODUCT_ENCORE_RT3070_1,
+ 0,
+ "Encore",
+ "RT3070",
+ },
+ {
+ USB_VENDOR_ENCORE, USB_PRODUCT_ENCORE_RT3070_2,
+ 0,
+ "Encore",
+ "RT3070",
+ },
+ {
+ USB_VENDOR_ENCORE, USB_PRODUCT_ENCORE_RT3070_3,
+ 0,
+ "Encore",
+ "RT3070",
+ },
+ {
+ USB_VENDOR_ENTREGA, USB_PRODUCT_ENTREGA_1S,
+ 0,
+ "Entrega",
+ "1S serial",
+ },
+ {
+ USB_VENDOR_ENTREGA, USB_PRODUCT_ENTREGA_2S,
+ 0,
+ "Entrega",
+ "2S serial",
+ },
+ {
+ USB_VENDOR_ENTREGA, USB_PRODUCT_ENTREGA_1S25,
+ 0,
+ "Entrega",
+ "1S25 serial",
+ },
+ {
+ USB_VENDOR_ENTREGA, USB_PRODUCT_ENTREGA_4S,
+ 0,
+ "Entrega",
+ "4S serial",
+ },
+ {
+ USB_VENDOR_ENTREGA, USB_PRODUCT_ENTREGA_E45,
+ 0,
+ "Entrega",
+ "E45 Ethernet",
+ },
+ {
+ USB_VENDOR_ENTREGA, USB_PRODUCT_ENTREGA_CENTRONICS,
+ 0,
+ "Entrega",
+ "Parallel Port",
+ },
+ {
+ USB_VENDOR_ENTREGA, USB_PRODUCT_ENTREGA_XX1,
+ 0,
+ "Entrega",
+ "Ethernet",
+ },
+ {
+ USB_VENDOR_ENTREGA, USB_PRODUCT_ENTREGA_1S9,
+ 0,
+ "Entrega",
+ "1S9 serial",
+ },
+ {
+ USB_VENDOR_ENTREGA, USB_PRODUCT_ENTREGA_EZUSB,
+ 0,
+ "Entrega",
+ "EZ-USB",
+ },
+ {
+ USB_VENDOR_ENTREGA, USB_PRODUCT_ENTREGA_2U4S,
+ 0,
+ "Entrega",
+ "2U4S serial/usb hub",
+ },
+ {
+ USB_VENDOR_ENTREGA, USB_PRODUCT_ENTREGA_XX2,
+ 0,
+ "Entrega",
+ "Ethernet",
+ },
+ {
+ USB_VENDOR_EPSON, USB_PRODUCT_EPSON_PRINTER1,
+ 0,
+ "Seiko Epson",
+ "USB Printer",
+ },
+ {
+ USB_VENDOR_EPSON, USB_PRODUCT_EPSON_PRINTER2,
+ 0,
+ "Seiko Epson",
+ "ISD USB Smart Cable for Mac",
+ },
+ {
+ USB_VENDOR_EPSON, USB_PRODUCT_EPSON_PRINTER3,
+ 0,
+ "Seiko Epson",
+ "ISD USB Smart Cable",
+ },
+ {
+ USB_VENDOR_EPSON, USB_PRODUCT_EPSON_PRINTER5,
+ 0,
+ "Seiko Epson",
+ "USB Printer",
+ },
+ {
+ USB_VENDOR_EPSON, USB_PRODUCT_EPSON_636,
+ 0,
+ "Seiko Epson",
+ "Perfection 636U / 636Photo scanner",
+ },
+ {
+ USB_VENDOR_EPSON, USB_PRODUCT_EPSON_610,
+ 0,
+ "Seiko Epson",
+ "Perfection 610 scanner",
+ },
+ {
+ USB_VENDOR_EPSON, USB_PRODUCT_EPSON_1200,
+ 0,
+ "Seiko Epson",
+ "Perfection 1200U / 1200Photo scanner",
+ },
+ {
+ USB_VENDOR_EPSON, USB_PRODUCT_EPSON_1600,
+ 0,
+ "Seiko Epson",
+ "Expression 1600 scanner",
+ },
+ {
+ USB_VENDOR_EPSON, USB_PRODUCT_EPSON_1640,
+ 0,
+ "Seiko Epson",
+ "Perfection 1640SU scanner",
+ },
+ {
+ USB_VENDOR_EPSON, USB_PRODUCT_EPSON_1240,
+ 0,
+ "Seiko Epson",
+ "Perfection 1240U / 1240Photo scanner",
+ },
+ {
+ USB_VENDOR_EPSON, USB_PRODUCT_EPSON_640U,
+ 0,
+ "Seiko Epson",
+ "Perfection 640U scanner",
+ },
+ {
+ USB_VENDOR_EPSON, USB_PRODUCT_EPSON_1250,
+ 0,
+ "Seiko Epson",
+ "Perfection 1250U / 1250Photo scanner",
+ },
+ {
+ USB_VENDOR_EPSON, USB_PRODUCT_EPSON_1650,
+ 0,
+ "Seiko Epson",
+ "Perfection 1650 scanner",
+ },
+ {
+ USB_VENDOR_EPSON, USB_PRODUCT_EPSON_GT9700F,
+ 0,
+ "Seiko Epson",
+ "GT-9700F scanner",
+ },
+ {
+ USB_VENDOR_EPSON, USB_PRODUCT_EPSON_GT9300UF,
+ 0,
+ "Seiko Epson",
+ "GT-9300UF scanner",
+ },
+ {
+ USB_VENDOR_EPSON, USB_PRODUCT_EPSON_3200,
+ 0,
+ "Seiko Epson",
+ "Perfection 3200 scanner",
+ },
+ {
+ USB_VENDOR_EPSON, USB_PRODUCT_EPSON_1260,
+ 0,
+ "Seiko Epson",
+ "Perfection 1260 scanner",
+ },
+ {
+ USB_VENDOR_EPSON, USB_PRODUCT_EPSON_1660,
+ 0,
+ "Seiko Epson",
+ "Perfection 1660 scanner",
+ },
+ {
+ USB_VENDOR_EPSON, USB_PRODUCT_EPSON_1670,
+ 0,
+ "Seiko Epson",
+ "Perfection 1670 scanner",
+ },
+ {
+ USB_VENDOR_EPSON, USB_PRODUCT_EPSON_1270,
+ 0,
+ "Seiko Epson",
+ "Perfection 1270 scanner",
+ },
+ {
+ USB_VENDOR_EPSON, USB_PRODUCT_EPSON_2480,
+ 0,
+ "Seiko Epson",
+ "Perfection 2480 scanner",
+ },
+ {
+ USB_VENDOR_EPSON, USB_PRODUCT_EPSON_3590,
+ 0,
+ "Seiko Epson",
+ "Perfection 3590 scanner",
+ },
+ {
+ USB_VENDOR_EPSON, USB_PRODUCT_EPSON_4990,
+ 0,
+ "Seiko Epson",
+ "Perfection 4990 Photo scanner",
+ },
+ {
+ USB_VENDOR_EPSON, USB_PRODUCT_EPSON_CRESSI_EDY,
+ 0,
+ "Seiko Epson",
+ "Cressi Edy diving computer",
+ },
+ {
+ USB_VENDOR_EPSON, USB_PRODUCT_EPSON_STYLUS_875DC,
+ 0,
+ "Seiko Epson",
+ "Stylus Photo 875DC Card Reader",
+ },
+ {
+ USB_VENDOR_EPSON, USB_PRODUCT_EPSON_STYLUS_895,
+ 0,
+ "Seiko Epson",
+ "Stylus Photo 895 Card Reader",
+ },
+ {
+ USB_VENDOR_EPSON, USB_PRODUCT_EPSON_CX5400,
+ 0,
+ "Seiko Epson",
+ "CX5400 scanner",
+ },
+ {
+ USB_VENDOR_EPSON, USB_PRODUCT_EPSON_3500,
+ 0,
+ "Seiko Epson",
+ "CX-3500/3600/3650 MFP",
+ },
+ {
+ USB_VENDOR_EPSON, USB_PRODUCT_EPSON_RX425,
+ 0,
+ "Seiko Epson",
+ "Stylus Photo RX425 scanner",
+ },
+ {
+ USB_VENDOR_EPSON, USB_PRODUCT_EPSON_DX3800,
+ 0,
+ "Seiko Epson",
+ "CX3700/CX3800/DX38x0 MFP scanner",
+ },
+ {
+ USB_VENDOR_EPSON, USB_PRODUCT_EPSON_4800,
+ 0,
+ "Seiko Epson",
+ "CX4700/CX4800/DX48x0 MFP scanner",
+ },
+ {
+ USB_VENDOR_EPSON, USB_PRODUCT_EPSON_4200,
+ 0,
+ "Seiko Epson",
+ "CX4100/CX4200/DX4200 MFP scanner",
+ },
+ {
+ USB_VENDOR_EPSON, USB_PRODUCT_EPSON_5000,
+ 0,
+ "Seiko Epson",
+ "CX4900/CX5000/DX50x0 MFP scanner",
+ },
+ {
+ USB_VENDOR_EPSON, USB_PRODUCT_EPSON_6000,
+ 0,
+ "Seiko Epson",
+ "CX5900/CX6000/DX60x0 MFP scanner",
+ },
+ {
+ USB_VENDOR_EPSON, USB_PRODUCT_EPSON_DX4000,
+ 0,
+ "Seiko Epson",
+ "DX4000 MFP scanner",
+ },
+ {
+ USB_VENDOR_EPSON, USB_PRODUCT_EPSON_DX7400,
+ 0,
+ "Seiko Epson",
+ "CX7300/CX7400/DX7400 MFP scanner",
+ },
+ {
+ USB_VENDOR_EPSON, USB_PRODUCT_EPSON_DX8400,
+ 0,
+ "Seiko Epson",
+ "CX8300/CX8400/DX8400 MFP scanner",
+ },
+ {
+ USB_VENDOR_EPSON, USB_PRODUCT_EPSON_SX100,
+ 0,
+ "Seiko Epson",
+ "SX100/NX100 MFP scanner",
+ },
+ {
+ USB_VENDOR_EPSON, USB_PRODUCT_EPSON_NX300,
+ 0,
+ "Seiko Epson",
+ "NX300 MFP scanner",
+ },
+ {
+ USB_VENDOR_EPSON, USB_PRODUCT_EPSON_SX200,
+ 0,
+ "Seiko Epson",
+ "SX200/SX205 MFP scanner",
+ },
+ {
+ USB_VENDOR_EPSON, USB_PRODUCT_EPSON_SX400,
+ 0,
+ "Seiko Epson",
+ "SX400/NX400/TX400 MFP scanner",
+ },
+ {
+ USB_VENDOR_ETEK, USB_PRODUCT_ETEK_1COM,
+ 0,
+ "e-TEK Labs",
+ "Serial",
+ },
+ {
+ USB_VENDOR_EXTENDED, USB_PRODUCT_EXTENDED_XTNDACCESS,
+ 0,
+ "Extended Systems",
+ "XTNDAccess IrDA",
+ },
+ {
+ USB_VENDOR_FEIYA, USB_PRODUCT_FEIYA_5IN1,
+ 0,
+ "Feiya",
+ "5-in-1 Card Reader",
+ },
+ {
+ USB_VENDOR_FIBERLINE, USB_PRODUCT_FIBERLINE_WL430U,
+ 0,
+ "Fiberline",
+ "WL-430U",
+ },
+ {
+ USB_VENDOR_FOSSIL, USB_PRODUCT_FOSSIL_WRISTPDA,
+ 0,
+ "Fossil, Inc",
+ "Wrist PDA",
+ },
+ {
+ USB_VENDOR_FOXCONN, USB_PRODUCT_FOXCONN_PIRELLI_DP_L10,
+ 0,
+ "Foxconn",
+ "Pirelli DP-L10",
+ },
+ {
+ USB_VENDOR_FREECOM, USB_PRODUCT_FREECOM_DVD,
+ 0,
+ "Freecom",
+ "DVD drive",
+ },
+ {
+ USB_VENDOR_FREECOM, USB_PRODUCT_FREECOM_HDD,
+ 0,
+ "Freecom",
+ "Classic SL Hard Drive",
+ },
+ {
+ USB_VENDOR_FSC, USB_PRODUCT_FSC_E5400,
+ 0,
+ "Fujitsu Siemens Computers",
+ "PrismGT USB 2.0 WLAN",
+ },
+ {
+ USB_VENDOR_FTDI, USB_PRODUCT_FTDI_SERIAL_8U100AX,
+ 0,
+ "Future Technology Devices",
+ "8U100AX Serial",
+ },
+ {
+ USB_VENDOR_FTDI, USB_PRODUCT_FTDI_SERIAL_8U232AM,
+ 0,
+ "Future Technology Devices",
+ "8U232AM Serial",
+ },
+ {
+ USB_VENDOR_FTDI, USB_PRODUCT_FTDI_SERIAL_8U232AM4,
+ 0,
+ "Future Technology Devices",
+ "8U232AM Serial",
+ },
+ {
+ USB_VENDOR_FTDI, USB_PRODUCT_FTDI_SERIAL_2232C,
+ 0,
+ "Future Technology Devices",
+ "FT2232C Dual port Serial",
+ },
+ {
+ USB_VENDOR_FTDI, USB_PRODUCT_FTDI_SERIAL_2232D,
+ 0,
+ "Future Technology Devices",
+ "FT2232D Dual port Serial",
+ },
+ {
+ USB_VENDOR_FTDI, USB_PRODUCT_FTDI_SERIAL_4232H,
+ 0,
+ "Future Technology Devices",
+ "FT4232H Quad port Serial",
+ },
+ {
+ USB_VENDOR_FTDI, USB_PRODUCT_FTDI_TACTRIX_OPENPORT_13M,
+ 0,
+ "Future Technology Devices",
+ "OpenPort 1.3 Mitsubishi",
+ },
+ {
+ USB_VENDOR_FTDI, USB_PRODUCT_FTDI_TACTRIX_OPENPORT_13S,
+ 0,
+ "Future Technology Devices",
+ "OpenPort 1.3 Subaru",
+ },
+ {
+ USB_VENDOR_FTDI, USB_PRODUCT_FTDI_TACTRIX_OPENPORT_13U,
+ 0,
+ "Future Technology Devices",
+ "OpenPort 1.3 Universal",
+ },
+ {
+ USB_VENDOR_FTDI, USB_PRODUCT_FTDI_GAMMASCOUT,
+ 0,
+ "Future Technology Devices",
+ "Gamma-Scout",
+ },
+ {
+ USB_VENDOR_FTDI, USB_PRODUCT_FTDI_KBS,
+ 0,
+ "Future Technology Devices",
+ "Pyramid KBS USB LCD",
+ },
+ {
+ USB_VENDOR_FTDI, USB_PRODUCT_FTDI_EISCOU,
+ 0,
+ "Future Technology Devices",
+ "Expert ISDN Control USB",
+ },
+ {
+ USB_VENDOR_FTDI, USB_PRODUCT_FTDI_UOPTBR,
+ 0,
+ "Future Technology Devices",
+ "USB-RS232 OptoBridge",
+ },
+ {
+ USB_VENDOR_FTDI, USB_PRODUCT_FTDI_EMCU2D,
+ 0,
+ "Future Technology Devices",
+ "Expert mouseCLOCK USB II",
+ },
+ {
+ USB_VENDOR_FTDI, USB_PRODUCT_FTDI_PCMSFU,
+ 0,
+ "Future Technology Devices",
+ "Precision Clock MSF USB",
+ },
+ {
+ USB_VENDOR_FTDI, USB_PRODUCT_FTDI_EMCU2H,
+ 0,
+ "Future Technology Devices",
+ "Expert mouseCLOCK USB II HBG",
+ },
+ {
+ USB_VENDOR_FTDI, USB_PRODUCT_FTDI_MAXSTREAM,
+ 0,
+ "Future Technology Devices",
+ "Maxstream PKG-U",
+ },
+ {
+ USB_VENDOR_FTDI, USB_PRODUCT_FTDI_USB_UIRT,
+ 0,
+ "Future Technology Devices",
+ "USB-UIRT",
+ },
+ {
+ USB_VENDOR_FTDI, USB_PRODUCT_FTDI_USBSERIAL,
+ 0,
+ "Future Technology Devices",
+ "Matrix Orbital USB Serial",
+ },
+ {
+ USB_VENDOR_FTDI, USB_PRODUCT_FTDI_MX2_3,
+ 0,
+ "Future Technology Devices",
+ "Matrix Orbital MX2 or MX3",
+ },
+ {
+ USB_VENDOR_FTDI, USB_PRODUCT_FTDI_MX4_5,
+ 0,
+ "Future Technology Devices",
+ "Matrix Orbital MX4 or MX5",
+ },
+ {
+ USB_VENDOR_FTDI, USB_PRODUCT_FTDI_LK202,
+ 0,
+ "Future Technology Devices",
+ "Matrix Orbital VK/LK202 Family",
+ },
+ {
+ USB_VENDOR_FTDI, USB_PRODUCT_FTDI_LK204,
+ 0,
+ "Future Technology Devices",
+ "Matrix Orbital VK/LK204 Family",
+ },
+ {
+ USB_VENDOR_FTDI, USB_PRODUCT_FTDI_CFA_632,
+ 0,
+ "Future Technology Devices",
+ "Crystalfontz CFA-632 USB LCD",
+ },
+ {
+ USB_VENDOR_FTDI, USB_PRODUCT_FTDI_CFA_634,
+ 0,
+ "Future Technology Devices",
+ "Crystalfontz CFA-634 USB LCD",
+ },
+ {
+ USB_VENDOR_FTDI, USB_PRODUCT_FTDI_CFA_633,
+ 0,
+ "Future Technology Devices",
+ "Crystalfontz CFA-633 USB LCD",
+ },
+ {
+ USB_VENDOR_FTDI, USB_PRODUCT_FTDI_CFA_631,
+ 0,
+ "Future Technology Devices",
+ "Crystalfontz CFA-631 USB LCD",
+ },
+ {
+ USB_VENDOR_FTDI, USB_PRODUCT_FTDI_CFA_635,
+ 0,
+ "Future Technology Devices",
+ "Crystalfontz CFA-635 USB LCD",
+ },
+ {
+ USB_VENDOR_FTDI, USB_PRODUCT_FTDI_SEMC_DSS20,
+ 0,
+ "Future Technology Devices",
+ "SEMC DSS-20 SyncStation",
+ },
+ {
+ USB_VENDOR_FTDI, USB_PRODUCT_FTDI_CTI_USB_NANO_485,
+ 0,
+ "Future Technology Devices",
+ "CTI USB-Nano 485",
+ },
+ {
+ USB_VENDOR_FTDI, USB_PRODUCT_FTDI_CTI_USB_MINI_485,
+ 0,
+ "Future Technology Devices",
+ "CTI USB-Mini 485",
+ },
+ {
+ USB_VENDOR_FUJIPHOTO, USB_PRODUCT_FUJIPHOTO_MASS0100,
+ 0,
+ "Fuji Photo Film",
+ "Mass Storage",
+ },
+ {
+ USB_VENDOR_FUJITSU, USB_PRODUCT_FUJITSU_AH_F401U,
+ 0,
+ "Fujitsu",
+ "AH-F401U Air H device",
+ },
+ {
+ USB_VENDOR_FUJITSUSIEMENS, USB_PRODUCT_FUJITSUSIEMENS_SCR,
+ 0,
+ "Fujitsu-Siemens",
+ "Fujitsu-Siemens SCR USB Reader",
+ },
+ {
+ USB_VENDOR_GARMIN, USB_PRODUCT_GARMIN_IQUE_3600,
+ 0,
+ "Garmin International",
+ "iQue 3600",
+ },
+ {
+ USB_VENDOR_GEMALTO, USB_PRODUCT_GEMALTO_PROXPU,
+ 0,
+ "Gemalto SA",
+ "Prox-PU/CU",
+ },
+ {
+ USB_VENDOR_GENERALINSTMNTS, USB_PRODUCT_GENERALINSTMNTS_SB5100,
+ 0,
+ "General Instruments (Motorola)",
+ "SURFboard SB5100 Cable modem",
+ },
+ {
+ USB_VENDOR_GENESYS, USB_PRODUCT_GENESYS_GL620USB,
+ 0,
+ "Genesys Logic",
+ "GL620USB Host-Host interface",
+ },
+ {
+ USB_VENDOR_GENESYS, USB_PRODUCT_GENESYS_GL650,
+ 0,
+ "Genesys Logic",
+ "GL650 HUB",
+ },
+ {
+ USB_VENDOR_GENESYS, USB_PRODUCT_GENESYS_GL606,
+ 0,
+ "Genesys Logic",
+ "USB 2.0 HUB",
+ },
+ {
+ USB_VENDOR_GENESYS, USB_PRODUCT_GENESYS_GL641USB,
+ 0,
+ "Genesys Logic",
+ "GL641USB CompactFlash Card Reader",
+ },
+ {
+ USB_VENDOR_GENESYS, USB_PRODUCT_GENESYS_GL641USB2IDE_2,
+ 0,
+ "Genesys Logic",
+ "GL641USB USB-IDE Bridge No 2",
+ },
+ {
+ USB_VENDOR_GENESYS, USB_PRODUCT_GENESYS_GL641USB2IDE,
+ 0,
+ "Genesys Logic",
+ "GL641USB USB-IDE Bridge",
+ },
+ {
+ USB_VENDOR_GENESYS, USB_PRODUCT_GENESYS_GL641USB_2,
+ 0,
+ "Genesys Logic",
+ "GL641USB 6-in-1 Card Reader",
+ },
+ {
+ USB_VENDOR_GIGABYTE, USB_PRODUCT_GIGABYTE_GN54G,
+ 0,
+ "GIGABYTE",
+ "GN-54G",
+ },
+ {
+ USB_VENDOR_GIGABYTE, USB_PRODUCT_GIGABYTE_GNBR402W,
+ 0,
+ "GIGABYTE",
+ "GN-BR402W",
+ },
+ {
+ USB_VENDOR_GIGABYTE, USB_PRODUCT_GIGABYTE_GNWLBM101,
+ 0,
+ "GIGABYTE",
+ "GN-WLBM101",
+ },
+ {
+ USB_VENDOR_GIGABYTE, USB_PRODUCT_GIGABYTE_GNWBKG,
+ 0,
+ "GIGABYTE",
+ "GN-WBKG",
+ },
+ {
+ USB_VENDOR_GIGABYTE, USB_PRODUCT_GIGABYTE_GNWB01GS,
+ 0,
+ "GIGABYTE",
+ "GN-WB01GS",
+ },
+ {
+ USB_VENDOR_GIGABYTE, USB_PRODUCT_GIGABYTE_GNWI05GS,
+ 0,
+ "GIGABYTE",
+ "GN-WI05GS",
+ },
+ {
+ USB_VENDOR_GIGASET, USB_PRODUCT_GIGASET_WLAN,
+ 0,
+ "Gigaset",
+ "WLAN",
+ },
+ {
+ USB_VENDOR_GIGASET, USB_PRODUCT_GIGASET_SMCWUSBTG,
+ 0,
+ "Gigaset",
+ "SMCWUSBT-G",
+ },
+ {
+ USB_VENDOR_GIGASET, USB_PRODUCT_GIGASET_SMCWUSBTG_NF,
+ 0,
+ "Gigaset",
+ "SMCWUSBT-G (no firmware)",
+ },
+ {
+ USB_VENDOR_GIGASET, USB_PRODUCT_GIGASET_AR5523,
+ 0,
+ "Gigaset",
+ "AR5523",
+ },
+ {
+ USB_VENDOR_GIGASET, USB_PRODUCT_GIGASET_AR5523_NF,
+ 0,
+ "Gigaset",
+ "AR5523 (no firmware)",
+ },
+ {
+ USB_VENDOR_GIGASET, USB_PRODUCT_GIGASET_RT2573,
+ 0,
+ "Gigaset",
+ "RT2573",
+ },
+ {
+ USB_VENDOR_GIGASET, USB_PRODUCT_GIGASET_RT3070_1,
+ 0,
+ "Gigaset",
+ "RT3070",
+ },
+ {
+ USB_VENDOR_GIGASET, USB_PRODUCT_GIGASET_RT3070_2,
+ 0,
+ "Gigaset",
+ "RT3070",
+ },
+ {
+ USB_VENDOR_GIGABYTE, USB_PRODUCT_GIGABYTE_RT2870_1,
+ 0,
+ "GIGABYTE",
+ "RT2870",
+ },
+ {
+ USB_VENDOR_GIGABYTE, USB_PRODUCT_GIGABYTE_GNWB31N,
+ 0,
+ "GIGABYTE",
+ "GN-WB31N",
+ },
+ {
+ USB_VENDOR_GIGABYTE, USB_PRODUCT_GIGABYTE_GNWB32L,
+ 0,
+ "GIGABYTE",
+ "GN-WB32L",
+ },
+ {
+ USB_VENDOR_GLOBALSUN, USB_PRODUCT_GLOBALSUN_AR5523_1,
+ 0,
+ "Global Sun Technology",
+ "AR5523",
+ },
+ {
+ USB_VENDOR_GLOBALSUN, USB_PRODUCT_GLOBALSUN_AR5523_1_NF,
+ 0,
+ "Global Sun Technology",
+ "AR5523 (no firmware)",
+ },
+ {
+ USB_VENDOR_GLOBALSUN, USB_PRODUCT_GLOBALSUN_AR5523_2,
+ 0,
+ "Global Sun Technology",
+ "AR5523",
+ },
+ {
+ USB_VENDOR_GLOBALSUN, USB_PRODUCT_GLOBALSUN_AR5523_2_NF,
+ 0,
+ "Global Sun Technology",
+ "AR5523 (no firmware)",
+ },
+ {
+ USB_VENDOR_GLOBESPAN, USB_PRODUCT_GLOBESPAN_PRISM_GT_1,
+ 0,
+ "Globespan",
+ "PrismGT USB 2.0 WLAN",
+ },
+ {
+ USB_VENDOR_GLOBESPAN, USB_PRODUCT_GLOBESPAN_PRISM_GT_2,
+ 0,
+ "Globespan",
+ "PrismGT USB 2.0 WLAN",
+ },
+ {
+ USB_VENDOR_GMATE, USB_PRODUCT_GMATE_YP3X00,
+ 0,
+ "G.Mate, Inc",
+ "YP3X00 PDA",
+ },
+ {
+ USB_VENDOR_GOHUBS, USB_PRODUCT_GOHUBS_GOCOM232,
+ 0,
+ "GoHubs",
+ "GoCOM232 Serial",
+ },
+ {
+ USB_VENDOR_GOODWAY, USB_PRODUCT_GOODWAY_GWUSB2E,
+ 0,
+ "Good Way Technology",
+ "GWUSB2E",
+ },
+ {
+ USB_VENDOR_GOODWAY, USB_PRODUCT_GOODWAY_RT2573,
+ 0,
+ "Good Way Technology",
+ "RT2573",
+ },
+ {
+ USB_VENDOR_GOOGLE, USB_PRODUCT_GOOGLE_NEXUSONE,
+ 0,
+ "Google",
+ "Nexus One",
+ },
+ {
+ USB_VENDOR_GRAVIS, USB_PRODUCT_GRAVIS_GAMEPADPRO,
+ 0,
+ "Advanced Gravis Computer",
+ "GamePad Pro",
+ },
+ {
+ USB_VENDOR_GREENHOUSE, USB_PRODUCT_GREENHOUSE_KANA21,
+ 0,
+ "GREENHOUSE",
+ "CF-writer with MP3",
+ },
+ {
+ USB_VENDOR_GRIFFIN, USB_PRODUCT_GRIFFIN_IMATE,
+ 0,
+ "Griffin Technology",
+ "iMate, ADB Adapter",
+ },
+ {
+ USB_VENDOR_GUILLEMOT, USB_PRODUCT_GUILLEMOT_DALEADER,
+ 0,
+ "Guillemot",
+ "DA Leader",
+ },
+ {
+ USB_VENDOR_GUILLEMOT, USB_PRODUCT_GUILLEMOT_HWGUSB254,
+ 0,
+ "Guillemot",
+ "HWGUSB2-54 WLAN",
+ },
+ {
+ USB_VENDOR_GUILLEMOT, USB_PRODUCT_GUILLEMOT_HWGUSB254LB,
+ 0,
+ "Guillemot",
+ "HWGUSB2-54-LB",
+ },
+ {
+ USB_VENDOR_GUILLEMOT, USB_PRODUCT_GUILLEMOT_HWGUSB254V2AP,
+ 0,
+ "Guillemot",
+ "HWGUSB2-54V2-AP",
+ },
+ {
+ USB_VENDOR_GUILLEMOT, USB_PRODUCT_GUILLEMOT_HWNU300,
+ 0,
+ "Guillemot",
+ "HWNU-300",
+ },
+ {
+ USB_VENDOR_HAGIWARA, USB_PRODUCT_HAGIWARA_FGSM,
+ 0,
+ "Hagiwara Sys-Com",
+ "FlashGate SmartMedia Card Reader",
+ },
+ {
+ USB_VENDOR_HAGIWARA, USB_PRODUCT_HAGIWARA_FGCF,
+ 0,
+ "Hagiwara Sys-Com",
+ "FlashGate CompactFlash Card Reader",
+ },
+ {
+ USB_VENDOR_HAGIWARA, USB_PRODUCT_HAGIWARA_FG,
+ 0,
+ "Hagiwara Sys-Com",
+ "FlashGate",
+ },
+ {
+ USB_VENDOR_HAL, USB_PRODUCT_HAL_IMR001,
+ 0,
+ "HAL Corporation",
+ "Crossam2+USB IR commander",
+ },
+ {
+ USB_VENDOR_HANDSPRING, USB_PRODUCT_HANDSPRING_VISOR,
+ 0,
+ "Handspring",
+ "Handspring Visor",
+ },
+ {
+ USB_VENDOR_HANDSPRING, USB_PRODUCT_HANDSPRING_TREO,
+ 0,
+ "Handspring",
+ "Handspring Treo",
+ },
+ {
+ USB_VENDOR_HANDSPRING, USB_PRODUCT_HANDSPRING_TREO600,
+ 0,
+ "Handspring",
+ "Handspring Treo 600",
+ },
+ {
+ USB_VENDOR_HAUPPAUGE, USB_PRODUCT_HAUPPAUGE_WINTV_USB_FM,
+ 0,
+ "Hauppauge Computer Works",
+ "WinTV USB FM",
+ },
+ {
+ USB_VENDOR_HAUPPAUGE2, USB_PRODUCT_HAUPPAUGE2_NOVAT500,
+ 0,
+ "Hauppauge Computer Works",
+ "NovaT 500Stick",
+ },
+ {
+ USB_VENDOR_HAWKING, USB_PRODUCT_HAWKING_RT2870_1,
+ 0,
+ "Hawking",
+ "RT2870",
+ },
+ {
+ USB_VENDOR_HAWKING, USB_PRODUCT_HAWKING_RT2870_2,
+ 0,
+ "Hawking",
+ "RT2870",
+ },
+ {
+ USB_VENDOR_HAWKING, USB_PRODUCT_HAWKING_HWUN2,
+ 0,
+ "Hawking",
+ "HWUN2",
+ },
+ {
+ USB_VENDOR_HAWKING, USB_PRODUCT_HAWKING_RT3070,
+ 0,
+ "Hawking",
+ "RT3070",
+ },
+ {
+ USB_VENDOR_HAWKING, USB_PRODUCT_HAWKING_UF100,
+ 0,
+ "Hawking",
+ "10/100 USB Ethernet",
+ },
+ {
+ USB_VENDOR_HIDGLOBAL, USB_PRODUCT_HIDGLOBAL_CM2020,
+ 0,
+ "HID Global",
+ "Omnikey Cardman 2020",
+ },
+ {
+ USB_VENDOR_HIDGLOBAL, USB_PRODUCT_HIDGLOBAL_CM6020,
+ 0,
+ "HID Global",
+ "Omnikey Cardman 6020",
+ },
+ {
+ USB_VENDOR_HITACHI, USB_PRODUCT_HITACHI_DVDCAM_DZ_MV100A,
+ 0,
+ "Hitachi",
+ "DVD-CAM DZ-MV100A Camcorder",
+ },
+ {
+ USB_VENDOR_HITACHI, USB_PRODUCT_HITACHI_DVDCAM_USB,
+ 0,
+ "Hitachi",
+ "DVDCAM USB HS Interface",
+ },
+ {
+ USB_VENDOR_HP, USB_PRODUCT_HP_895C,
+ 0,
+ "Hewlett Packard",
+ "DeskJet 895C",
+ },
+ {
+ USB_VENDOR_HP, USB_PRODUCT_HP_4100C,
+ 0,
+ "Hewlett Packard",
+ "Scanjet 4100C",
+ },
+ {
+ USB_VENDOR_HP, USB_PRODUCT_HP_S20,
+ 0,
+ "Hewlett Packard",
+ "Photosmart S20",
+ },
+ {
+ USB_VENDOR_HP, USB_PRODUCT_HP_880C,
+ 0,
+ "Hewlett Packard",
+ "DeskJet 880C",
+ },
+ {
+ USB_VENDOR_HP, USB_PRODUCT_HP_4200C,
+ 0,
+ "Hewlett Packard",
+ "ScanJet 4200C",
+ },
+ {
+ USB_VENDOR_HP, USB_PRODUCT_HP_CDWRITERPLUS,
+ 0,
+ "Hewlett Packard",
+ "CD-Writer Plus",
+ },
+ {
+ USB_VENDOR_HP, USB_PRODUCT_HP_KBDHUB,
+ 0,
+ "Hewlett Packard",
+ "Multimedia Keyboard Hub",
+ },
+ {
+ USB_VENDOR_HP, USB_PRODUCT_HP_G55XI,
+ 0,
+ "Hewlett Packard",
+ "OfficeJet G55xi",
+ },
+ {
+ USB_VENDOR_HP, USB_PRODUCT_HP_HN210W,
+ 0,
+ "Hewlett Packard",
+ "HN210W 802.11b WLAN",
+ },
+ {
+ USB_VENDOR_HP, USB_PRODUCT_HP_49GPLUS,
+ 0,
+ "Hewlett Packard",
+ "49g+ graphing calculator",
+ },
+ {
+ USB_VENDOR_HP, USB_PRODUCT_HP_6200C,
+ 0,
+ "Hewlett Packard",
+ "ScanJet 6200C",
+ },
+ {
+ USB_VENDOR_HP, USB_PRODUCT_HP_S20b,
+ 0,
+ "Hewlett Packard",
+ "PhotoSmart S20",
+ },
+ {
+ USB_VENDOR_HP, USB_PRODUCT_HP_815C,
+ 0,
+ "Hewlett Packard",
+ "DeskJet 815C",
+ },
+ {
+ USB_VENDOR_HP, USB_PRODUCT_HP_3300C,
+ 0,
+ "Hewlett Packard",
+ "ScanJet 3300C",
+ },
+ {
+ USB_VENDOR_HP, USB_PRODUCT_HP_CDW8200,
+ 0,
+ "Hewlett Packard",
+ "CD-Writer Plus 8200e",
+ },
+ {
+ USB_VENDOR_HP, USB_PRODUCT_HP_MMKEYB,
+ 0,
+ "Hewlett Packard",
+ "Multimedia keyboard",
+ },
+ {
+ USB_VENDOR_HP, USB_PRODUCT_HP_1220C,
+ 0,
+ "Hewlett Packard",
+ "DeskJet 1220C",
+ },
+ {
+ USB_VENDOR_HP, USB_PRODUCT_HP_810C,
+ 0,
+ "Hewlett Packard",
+ "DeskJet 810C/812C",
+ },
+ {
+ USB_VENDOR_HP, USB_PRODUCT_HP_4300C,
+ 0,
+ "Hewlett Packard",
+ "Scanjet 4300C",
+ },
+ {
+ USB_VENDOR_HP, USB_PRODUCT_HP_CDW4E,
+ 0,
+ "Hewlett Packard",
+ "CD-Writer+ CD-4e",
+ },
+ {
+ USB_VENDOR_HP, USB_PRODUCT_HP_G85XI,
+ 0,
+ "Hewlett Packard",
+ "OfficeJet G85xi",
+ },
+ {
+ USB_VENDOR_HP, USB_PRODUCT_HP_1200,
+ 0,
+ "Hewlett Packard",
+ "LaserJet 1200",
+ },
+ {
+ USB_VENDOR_HP, USB_PRODUCT_HP_5200C,
+ 0,
+ "Hewlett Packard",
+ "Scanjet 5200C",
+ },
+ {
+ USB_VENDOR_HP, USB_PRODUCT_HP_830C,
+ 0,
+ "Hewlett Packard",
+ "DeskJet 830C",
+ },
+ {
+ USB_VENDOR_HP, USB_PRODUCT_HP_3400CSE,
+ 0,
+ "Hewlett Packard",
+ "ScanJet 3400cse",
+ },
+ {
+ USB_VENDOR_HP, USB_PRODUCT_HP_6300C,
+ 0,
+ "Hewlett Packard",
+ "Scanjet 6300C",
+ },
+ {
+ USB_VENDOR_HP, USB_PRODUCT_HP_840C,
+ 0,
+ "Hewlett Packard",
+ "DeskJet 840c",
+ },
+ {
+ USB_VENDOR_HP, USB_PRODUCT_HP_2200C,
+ 0,
+ "Hewlett Packard",
+ "ScanJet 2200C",
+ },
+ {
+ USB_VENDOR_HP, USB_PRODUCT_HP_5300C,
+ 0,
+ "Hewlett Packard",
+ "Scanjet 5300C",
+ },
+ {
+ USB_VENDOR_HP, USB_PRODUCT_HP_4400C,
+ 0,
+ "Hewlett Packard",
+ "Scanjet 4400C",
+ },
+ {
+ USB_VENDOR_HP, USB_PRODUCT_HP_4470C,
+ 0,
+ "Hewlett Packard",
+ "Scanjet 4470C",
+ },
+ {
+ USB_VENDOR_HP, USB_PRODUCT_HP_82x0C,
+ 0,
+ "Hewlett Packard",
+ "Scanjet 82x0C",
+ },
+ {
+ USB_VENDOR_HP, USB_PRODUCT_HP_2300D,
+ 0,
+ "Hewlett Packard",
+ "Laserjet 2300d",
+ },
+ {
+ USB_VENDOR_HP, USB_PRODUCT_HP_970CSE,
+ 0,
+ "Hewlett Packard",
+ "Deskjet 970Cse",
+ },
+ {
+ USB_VENDOR_HP, USB_PRODUCT_HP_5400C,
+ 0,
+ "Hewlett Packard",
+ "Scanjet 5400C",
+ },
+ {
+ USB_VENDOR_HP, USB_PRODUCT_HP_2215,
+ 0,
+ "Hewlett Packard",
+ "iPAQ 22xx/Jornada 548",
+ },
+ {
+ USB_VENDOR_HP, USB_PRODUCT_HP_568J,
+ 0,
+ "Hewlett Packard",
+ "Jornada 568",
+ },
+ {
+ USB_VENDOR_HP, USB_PRODUCT_HP_930C,
+ 0,
+ "Hewlett Packard",
+ "DeskJet 930c",
+ },
+ {
+ USB_VENDOR_HP, USB_PRODUCT_HP_P2000U,
+ 0,
+ "Hewlett Packard",
+ "Inkjet P-2000U",
+ },
+ {
+ USB_VENDOR_HP, USB_PRODUCT_HP_HS2300,
+ 0,
+ "Hewlett Packard",
+ "HS2300 HSDPA (aka MC8775)",
+ },
+ {
+ USB_VENDOR_HP, USB_PRODUCT_HP_640C,
+ 0,
+ "Hewlett Packard",
+ "DeskJet 640c",
+ },
+ {
+ USB_VENDOR_HP, USB_PRODUCT_HP_4670V,
+ 0,
+ "Hewlett Packard",
+ "ScanJet 4670v",
+ },
+ {
+ USB_VENDOR_HP, USB_PRODUCT_HP_P1100,
+ 0,
+ "Hewlett Packard",
+ "Photosmart P1100",
+ },
+ {
+ USB_VENDOR_HP, USB_PRODUCT_HP_LD220,
+ 0,
+ "Hewlett Packard",
+ "LD220 POS Display",
+ },
+ {
+ USB_VENDOR_HP, USB_PRODUCT_HP_OJ4215,
+ 0,
+ "Hewlett Packard",
+ "OfficeJet 4215",
+ },
+ {
+ USB_VENDOR_HP, USB_PRODUCT_HP_HN210E,
+ 0,
+ "Hewlett Packard",
+ "Ethernet HN210E",
+ },
+ {
+ USB_VENDOR_HP2, USB_PRODUCT_HP2_C500,
+ 0,
+ "Hewlett Packard",
+ "PhotoSmart C500",
+ },
+ {
+ USB_VENDOR_HP, USB_PRODUCT_HP_EV2200,
+ 0,
+ "Hewlett Packard",
+ "ev2200 HSDPA (aka MC5720)",
+ },
+ {
+ USB_VENDOR_HP, USB_PRODUCT_HP_HS2300,
+ 0,
+ "Hewlett Packard",
+ "hs2300 HSDPA (aka MC8775)",
+ },
+ {
+ USB_VENDOR_HTC, USB_PRODUCT_HTC_WINMOBILE,
+ 0,
+ "HTC",
+ "HTC USB Sync",
+ },
+ {
+ USB_VENDOR_HTC, USB_PRODUCT_HTC_PPC6700MODEM,
+ 0,
+ "HTC",
+ "PPC6700 Modem",
+ },
+ {
+ USB_VENDOR_HTC, USB_PRODUCT_HTC_SMARTPHONE,
+ 0,
+ "HTC",
+ "SmartPhone USB Sync",
+ },
+ {
+ USB_VENDOR_HTC, USB_PRODUCT_HTC_WIZARD,
+ 0,
+ "HTC",
+ "HTC Wizard USB Sync",
+ },
+ {
+ USB_VENDOR_HTC, USB_PRODUCT_HTC_LEGENDSYNC,
+ 0,
+ "HTC",
+ "HTC Legend USB Sync",
+ },
+ {
+ USB_VENDOR_HTC, USB_PRODUCT_HTC_LEGEND,
+ 0,
+ "HTC",
+ "HTC Legend",
+ },
+ {
+ USB_VENDOR_HTC, USB_PRODUCT_HTC_LEGENDINTERNET,
+ 0,
+ "HTC",
+ "HTC Legend Internet Sharing",
+ },
+ {
+ USB_VENDOR_HUAWEI, USB_PRODUCT_HUAWEI_MOBILE,
+ 0,
+ "Huawei Technologies",
+ "Huawei Mobile",
+ },
+ {
+ USB_VENDOR_HUAWEI, USB_PRODUCT_HUAWEI_E220,
+ 0,
+ "Huawei Technologies",
+ "HSDPA modem",
+ },
+ {
+ USB_VENDOR_HUAWEI, USB_PRODUCT_HUAWEI_E220BIS,
+ 0,
+ "Huawei Technologies",
+ "HSDPA modem",
+ },
+ {
+ USB_VENDOR_HUAWEI, USB_PRODUCT_HUAWEI_E1401,
+ 0,
+ "Huawei Technologies",
+ "3G modem",
+ },
+ {
+ USB_VENDOR_HUAWEI, USB_PRODUCT_HUAWEI_E1402,
+ 0,
+ "Huawei Technologies",
+ "3G modem",
+ },
+ {
+ USB_VENDOR_HUAWEI, USB_PRODUCT_HUAWEI_E1403,
+ 0,
+ "Huawei Technologies",
+ "3G modem",
+ },
+ {
+ USB_VENDOR_HUAWEI, USB_PRODUCT_HUAWEI_E1404,
+ 0,
+ "Huawei Technologies",
+ "3G modem",
+ },
+ {
+ USB_VENDOR_HUAWEI, USB_PRODUCT_HUAWEI_E1405,
+ 0,
+ "Huawei Technologies",
+ "3G modem",
+ },
+ {
+ USB_VENDOR_HUAWEI, USB_PRODUCT_HUAWEI_E1406,
+ 0,
+ "Huawei Technologies",
+ "3G modem",
+ },
+ {
+ USB_VENDOR_HUAWEI, USB_PRODUCT_HUAWEI_E1407,
+ 0,
+ "Huawei Technologies",
+ "3G modem",
+ },
+ {
+ USB_VENDOR_HUAWEI, USB_PRODUCT_HUAWEI_E1408,
+ 0,
+ "Huawei Technologies",
+ "3G modem",
+ },
+ {
+ USB_VENDOR_HUAWEI, USB_PRODUCT_HUAWEI_E1409,
+ 0,
+ "Huawei Technologies",
+ "3G modem",
+ },
+ {
+ USB_VENDOR_HUAWEI, USB_PRODUCT_HUAWEI_E140A,
+ 0,
+ "Huawei Technologies",
+ "3G modem",
+ },
+ {
+ USB_VENDOR_HUAWEI, USB_PRODUCT_HUAWEI_E140B,
+ 0,
+ "Huawei Technologies",
+ "3G modem",
+ },
+ {
+ USB_VENDOR_HUAWEI, USB_PRODUCT_HUAWEI_E180V,
+ 0,
+ "Huawei Technologies",
+ "E180V",
+ },
+ {
+ USB_VENDOR_HUAWEI, USB_PRODUCT_HUAWEI_E140D,
+ 0,
+ "Huawei Technologies",
+ "3G modem",
+ },
+ {
+ USB_VENDOR_HUAWEI, USB_PRODUCT_HUAWEI_E140E,
+ 0,
+ "Huawei Technologies",
+ "3G modem",
+ },
+ {
+ USB_VENDOR_HUAWEI, USB_PRODUCT_HUAWEI_E140F,
+ 0,
+ "Huawei Technologies",
+ "3G modem",
+ },
+ {
+ USB_VENDOR_HUAWEI, USB_PRODUCT_HUAWEI_E1410,
+ 0,
+ "Huawei Technologies",
+ "3G modem",
+ },
+ {
+ USB_VENDOR_HUAWEI, USB_PRODUCT_HUAWEI_E1411,
+ 0,
+ "Huawei Technologies",
+ "3G modem",
+ },
+ {
+ USB_VENDOR_HUAWEI, USB_PRODUCT_HUAWEI_E1412,
+ 0,
+ "Huawei Technologies",
+ "3G modem",
+ },
+ {
+ USB_VENDOR_HUAWEI, USB_PRODUCT_HUAWEI_E1413,
+ 0,
+ "Huawei Technologies",
+ "3G modem",
+ },
+ {
+ USB_VENDOR_HUAWEI, USB_PRODUCT_HUAWEI_E1414,
+ 0,
+ "Huawei Technologies",
+ "3G modem",
+ },
+ {
+ USB_VENDOR_HUAWEI, USB_PRODUCT_HUAWEI_E1415,
+ 0,
+ "Huawei Technologies",
+ "3G modem",
+ },
+ {
+ USB_VENDOR_HUAWEI, USB_PRODUCT_HUAWEI_E1416,
+ 0,
+ "Huawei Technologies",
+ "3G modem",
+ },
+ {
+ USB_VENDOR_HUAWEI, USB_PRODUCT_HUAWEI_E1417,
+ 0,
+ "Huawei Technologies",
+ "3G modem",
+ },
+ {
+ USB_VENDOR_HUAWEI, USB_PRODUCT_HUAWEI_E1418,
+ 0,
+ "Huawei Technologies",
+ "3G modem",
+ },
+ {
+ USB_VENDOR_HUAWEI, USB_PRODUCT_HUAWEI_E1419,
+ 0,
+ "Huawei Technologies",
+ "3G modem",
+ },
+ {
+ USB_VENDOR_HUAWEI, USB_PRODUCT_HUAWEI_E141A,
+ 0,
+ "Huawei Technologies",
+ "3G modem",
+ },
+ {
+ USB_VENDOR_HUAWEI, USB_PRODUCT_HUAWEI_E141B,
+ 0,
+ "Huawei Technologies",
+ "3G modem",
+ },
+ {
+ USB_VENDOR_HUAWEI, USB_PRODUCT_HUAWEI_E141C,
+ 0,
+ "Huawei Technologies",
+ "3G modem",
+ },
+ {
+ USB_VENDOR_HUAWEI, USB_PRODUCT_HUAWEI_E141D,
+ 0,
+ "Huawei Technologies",
+ "3G modem",
+ },
+ {
+ USB_VENDOR_HUAWEI, USB_PRODUCT_HUAWEI_E141E,
+ 0,
+ "Huawei Technologies",
+ "3G modem",
+ },
+ {
+ USB_VENDOR_HUAWEI, USB_PRODUCT_HUAWEI_E141F,
+ 0,
+ "Huawei Technologies",
+ "3G modem",
+ },
+ {
+ USB_VENDOR_HUAWEI, USB_PRODUCT_HUAWEI_E1420,
+ 0,
+ "Huawei Technologies",
+ "3G modem",
+ },
+ {
+ USB_VENDOR_HUAWEI, USB_PRODUCT_HUAWEI_E1421,
+ 0,
+ "Huawei Technologies",
+ "3G modem",
+ },
+ {
+ USB_VENDOR_HUAWEI, USB_PRODUCT_HUAWEI_E1422,
+ 0,
+ "Huawei Technologies",
+ "3G modem",
+ },
+ {
+ USB_VENDOR_HUAWEI, USB_PRODUCT_HUAWEI_E1423,
+ 0,
+ "Huawei Technologies",
+ "3G modem",
+ },
+ {
+ USB_VENDOR_HUAWEI, USB_PRODUCT_HUAWEI_E1424,
+ 0,
+ "Huawei Technologies",
+ "3G modem",
+ },
+ {
+ USB_VENDOR_HUAWEI, USB_PRODUCT_HUAWEI_E1425,
+ 0,
+ "Huawei Technologies",
+ "3G modem",
+ },
+ {
+ USB_VENDOR_HUAWEI, USB_PRODUCT_HUAWEI_E1426,
+ 0,
+ "Huawei Technologies",
+ "3G modem",
+ },
+ {
+ USB_VENDOR_HUAWEI, USB_PRODUCT_HUAWEI_E1427,
+ 0,
+ "Huawei Technologies",
+ "3G modem",
+ },
+ {
+ USB_VENDOR_HUAWEI, USB_PRODUCT_HUAWEI_E1428,
+ 0,
+ "Huawei Technologies",
+ "3G modem",
+ },
+ {
+ USB_VENDOR_HUAWEI, USB_PRODUCT_HUAWEI_E1429,
+ 0,
+ "Huawei Technologies",
+ "3G modem",
+ },
+ {
+ USB_VENDOR_HUAWEI, USB_PRODUCT_HUAWEI_E142A,
+ 0,
+ "Huawei Technologies",
+ "3G modem",
+ },
+ {
+ USB_VENDOR_HUAWEI, USB_PRODUCT_HUAWEI_E142B,
+ 0,
+ "Huawei Technologies",
+ "3G modem",
+ },
+ {
+ USB_VENDOR_HUAWEI, USB_PRODUCT_HUAWEI_E142C,
+ 0,
+ "Huawei Technologies",
+ "3G modem",
+ },
+ {
+ USB_VENDOR_HUAWEI, USB_PRODUCT_HUAWEI_E142D,
+ 0,
+ "Huawei Technologies",
+ "3G modem",
+ },
+ {
+ USB_VENDOR_HUAWEI, USB_PRODUCT_HUAWEI_E142E,
+ 0,
+ "Huawei Technologies",
+ "3G modem",
+ },
+ {
+ USB_VENDOR_HUAWEI, USB_PRODUCT_HUAWEI_E142F,
+ 0,
+ "Huawei Technologies",
+ "3G modem",
+ },
+ {
+ USB_VENDOR_HUAWEI, USB_PRODUCT_HUAWEI_E1430,
+ 0,
+ "Huawei Technologies",
+ "3G modem",
+ },
+ {
+ USB_VENDOR_HUAWEI, USB_PRODUCT_HUAWEI_E1431,
+ 0,
+ "Huawei Technologies",
+ "3G modem",
+ },
+ {
+ USB_VENDOR_HUAWEI, USB_PRODUCT_HUAWEI_E1432,
+ 0,
+ "Huawei Technologies",
+ "3G modem",
+ },
+ {
+ USB_VENDOR_HUAWEI, USB_PRODUCT_HUAWEI_E1433,
+ 0,
+ "Huawei Technologies",
+ "3G modem",
+ },
+ {
+ USB_VENDOR_HUAWEI, USB_PRODUCT_HUAWEI_E1434,
+ 0,
+ "Huawei Technologies",
+ "3G modem",
+ },
+ {
+ USB_VENDOR_HUAWEI, USB_PRODUCT_HUAWEI_E1435,
+ 0,
+ "Huawei Technologies",
+ "3G modem",
+ },
+ {
+ USB_VENDOR_HUAWEI, USB_PRODUCT_HUAWEI_E1436,
+ 0,
+ "Huawei Technologies",
+ "3G modem",
+ },
+ {
+ USB_VENDOR_HUAWEI, USB_PRODUCT_HUAWEI_E1437,
+ 0,
+ "Huawei Technologies",
+ "3G modem",
+ },
+ {
+ USB_VENDOR_HUAWEI, USB_PRODUCT_HUAWEI_E1438,
+ 0,
+ "Huawei Technologies",
+ "3G modem",
+ },
+ {
+ USB_VENDOR_HUAWEI, USB_PRODUCT_HUAWEI_E1439,
+ 0,
+ "Huawei Technologies",
+ "3G modem",
+ },
+ {
+ USB_VENDOR_HUAWEI, USB_PRODUCT_HUAWEI_E143A,
+ 0,
+ "Huawei Technologies",
+ "3G modem",
+ },
+ {
+ USB_VENDOR_HUAWEI, USB_PRODUCT_HUAWEI_E143B,
+ 0,
+ "Huawei Technologies",
+ "3G modem",
+ },
+ {
+ USB_VENDOR_HUAWEI, USB_PRODUCT_HUAWEI_E143C,
+ 0,
+ "Huawei Technologies",
+ "3G modem",
+ },
+ {
+ USB_VENDOR_HUAWEI, USB_PRODUCT_HUAWEI_E143D,
+ 0,
+ "Huawei Technologies",
+ "3G modem",
+ },
+ {
+ USB_VENDOR_HUAWEI, USB_PRODUCT_HUAWEI_E143E,
+ 0,
+ "Huawei Technologies",
+ "3G modem",
+ },
+ {
+ USB_VENDOR_HUAWEI, USB_PRODUCT_HUAWEI_E143F,
+ 0,
+ "Huawei Technologies",
+ "3G modem",
+ },
+ {
+ USB_VENDOR_HUAWEI, USB_PRODUCT_HUAWEI_E1752,
+ 0,
+ "Huawei Technologies",
+ "3G modem",
+ },
+ {
+ USB_VENDOR_HUAWEI, USB_PRODUCT_HUAWEI_K3765,
+ 0,
+ "Huawei Technologies",
+ "3G modem",
+ },
+ {
+ USB_VENDOR_HUAWEI, USB_PRODUCT_HUAWEI_E1820,
+ 0,
+ "Huawei Technologies",
+ "E1820 HSPA+ USB Slider",
+ },
+ {
+ USB_VENDOR_HUAWEI, USB_PRODUCT_HUAWEI_K3765_INIT,
+ 0,
+ "Huawei Technologies",
+ "K3765 Initial",
+ },
+ {
+ USB_VENDOR_HUAWEI3COM, USB_PRODUCT_HUAWEI3COM_WUB320G,
+ 0,
+ "Huawei-3Com",
+ "Aolynk WUB320g",
+ },
+ {
+ USB_VENDOR_IBM, USB_PRODUCT_IBM_USBCDROMDRIVE,
+ 0,
+ "IBM",
+ "USB CD-ROM Drive",
+ },
+ {
+ USB_VENDOR_IMAGINATION, USB_PRODUCT_IMAGINATION_DBX1,
+ 0,
+ "Imagination Technologies",
+ "DBX1 DSP core",
+ },
+ {
+ USB_VENDOR_INSIDEOUT, USB_PRODUCT_INSIDEOUT_EDGEPORT4,
+ 0,
+ "Inside Out Networks",
+ "EdgePort/4 serial ports",
+ },
+ {
+ USB_VENDOR_INSYSTEM, USB_PRODUCT_INSYSTEM_F5U002,
+ 0,
+ "In-System Design",
+ "Parallel printer",
+ },
+ {
+ USB_VENDOR_INSYSTEM, USB_PRODUCT_INSYSTEM_ATAPI,
+ 0,
+ "In-System Design",
+ "ATAPI Adapter",
+ },
+ {
+ USB_VENDOR_INSYSTEM, USB_PRODUCT_INSYSTEM_ISD110,
+ 0,
+ "In-System Design",
+ "IDE Adapter ISD110",
+ },
+ {
+ USB_VENDOR_INSYSTEM, USB_PRODUCT_INSYSTEM_ISD105,
+ 0,
+ "In-System Design",
+ "IDE Adapter ISD105",
+ },
+ {
+ USB_VENDOR_INSYSTEM, USB_PRODUCT_INSYSTEM_USBCABLE,
+ 0,
+ "In-System Design",
+ "USB cable",
+ },
+ {
+ USB_VENDOR_INSYSTEM, USB_PRODUCT_INSYSTEM_STORAGE_V2,
+ 0,
+ "In-System Design",
+ "USB Storage Adapter V2",
+ },
+ {
+ USB_VENDOR_INTEL, USB_PRODUCT_INTEL_EASYPC_CAMERA,
+ 0,
+ "Intel",
+ "Easy PC Camera",
+ },
+ {
+ USB_VENDOR_INTEL, USB_PRODUCT_INTEL_TESTBOARD,
+ 0,
+ "Intel",
+ "82930 test board",
+ },
+ {
+ USB_VENDOR_INTEL2, USB_PRODUCT_INTEL2_IRMH,
+ 0,
+ "Intel",
+ "Integrated Rate Matching Hub",
+ },
+ {
+ USB_VENDOR_INTERSIL, USB_PRODUCT_INTERSIL_PRISM_GT,
+ 0,
+ "Intersil",
+ "PrismGT USB 2.0 WLAN",
+ },
+ {
+ USB_VENDOR_INTERSIL, USB_PRODUCT_INTERSIL_PRISM_2X,
+ 0,
+ "Intersil",
+ "Prism2.x or Atmel WLAN",
+ },
+ {
+ USB_VENDOR_INTREPIDCS, USB_PRODUCT_INTREPIDCS_VALUECAN,
+ 0,
+ "Intrepid",
+ "ValueCAN CAN bus interface",
+ },
+ {
+ USB_VENDOR_INTREPIDCS, USB_PRODUCT_INTREPIDCS_NEOVI,
+ 0,
+ "Intrepid",
+ "NeoVI Blue vehicle bus interface",
+ },
+ {
+ USB_VENDOR_IODATA, USB_PRODUCT_IODATA_IU_CD2,
+ 0,
+ "I-O Data",
+ "DVD Multi-plus unit iU-CD2",
+ },
+ {
+ USB_VENDOR_IODATA, USB_PRODUCT_IODATA_DVR_UEH8,
+ 0,
+ "I-O Data",
+ "DVD Multi-plus unit DVR-UEH8",
+ },
+ {
+ USB_VENDOR_IODATA, USB_PRODUCT_IODATA_USBSSMRW,
+ 0,
+ "I-O Data",
+ "USB-SSMRW SD-card",
+ },
+ {
+ USB_VENDOR_IODATA, USB_PRODUCT_IODATA_USBSDRW,
+ 0,
+ "I-O Data",
+ "USB-SDRW SD-card",
+ },
+ {
+ USB_VENDOR_IODATA, USB_PRODUCT_IODATA_USBETT,
+ 0,
+ "I-O Data",
+ "USB ETT",
+ },
+ {
+ USB_VENDOR_IODATA, USB_PRODUCT_IODATA_USBETTX,
+ 0,
+ "I-O Data",
+ "USB ETTX",
+ },
+ {
+ USB_VENDOR_IODATA, USB_PRODUCT_IODATA_USBETTXS,
+ 0,
+ "I-O Data",
+ "USB ETTX",
+ },
+ {
+ USB_VENDOR_IODATA, USB_PRODUCT_IODATA_USBWNB11A,
+ 0,
+ "I-O Data",
+ "USB WN-B11",
+ },
+ {
+ USB_VENDOR_IODATA, USB_PRODUCT_IODATA_USBWNB11,
+ 0,
+ "I-O Data",
+ "USB Airport WN-B11",
+ },
+ {
+ USB_VENDOR_IODATA, USB_PRODUCT_IODATA_ETGUS2,
+ 0,
+ "I-O Data",
+ "ETG-US2",
+ },
+ {
+ USB_VENDOR_IODATA, USB_PRODUCT_IODATA_RT3072_1,
+ 0,
+ "I-O Data",
+ "RT3072",
+ },
+ {
+ USB_VENDOR_IODATA, USB_PRODUCT_IODATA_RT3072_2,
+ 0,
+ "I-O Data",
+ "RT3072",
+ },
+ {
+ USB_VENDOR_IODATA, USB_PRODUCT_IODATA_RT3072_3,
+ 0,
+ "I-O Data",
+ "RT3072",
+ },
+ {
+ USB_VENDOR_IODATA, USB_PRODUCT_IODATA_RT3072_4,
+ 0,
+ "I-O Data",
+ "RT3072",
+ },
+ {
+ USB_VENDOR_IODATA, USB_PRODUCT_IODATA_USBRSAQ,
+ 0,
+ "I-O Data",
+ "Serial USB-RSAQ1",
+ },
+ {
+ USB_VENDOR_IODATA, USB_PRODUCT_IODATA_USBRSAQ5,
+ 0,
+ "I-O Data",
+ "Serial USB-RSAQ5",
+ },
+ {
+ USB_VENDOR_IODATA2, USB_PRODUCT_IODATA2_USB2SC,
+ 0,
+ "I-O Data",
+ "USB2.0-SCSI Bridge USB2-SC",
+ },
+ {
+ USB_VENDOR_IOMEGA, USB_PRODUCT_IOMEGA_ZIP100,
+ 0,
+ "Iomega",
+ "Zip 100",
+ },
+ {
+ USB_VENDOR_IOMEGA, USB_PRODUCT_IOMEGA_ZIP250,
+ 0,
+ "Iomega",
+ "Zip 250",
+ },
+ {
+ USB_VENDOR_ISSC, USB_PRODUCT_ISSC_ISSCBTA,
+ 0,
+ "Integrated System Solution Corp.",
+ "Bluetooth USB Adapter",
+ },
+ {
+ USB_VENDOR_ITEGNO, USB_PRODUCT_ITEGNO_WM1080A,
+ 0,
+ "iTegno",
+ "WM1080A GSM/GPRS modem",
+ },
+ {
+ USB_VENDOR_ITEGNO, USB_PRODUCT_ITEGNO_WM2080A,
+ 0,
+ "iTegno",
+ "WM2080A CDMA modem",
+ },
+ {
+ USB_VENDOR_ITUNERNET, USB_PRODUCT_ITUNERNET_USBLCD2X20,
+ 0,
+ "I-Tuner Networks",
+ "USB-LCD 2x20",
+ },
+ {
+ USB_VENDOR_ITUNERNET, USB_PRODUCT_ITUNERNET_USBLCD4X20,
+ 0,
+ "I-Tuner Networks",
+ "USB-LCD 4x20",
+ },
+ {
+ USB_VENDOR_JABLOTRON, USB_PRODUCT_JABLOTRON_PC60B,
+ 0,
+ "Jablotron",
+ "PC-60B",
+ },
+ {
+ USB_VENDOR_JATON, USB_PRODUCT_JATON_EDA,
+ 0,
+ "Jaton",
+ "Ethernet",
+ },
+ {
+ USB_VENDOR_JMICRON, USB_PRODUCT_JMICRON_JM20336,
+ 0,
+ "JMicron",
+ "USB to SATA Bridge",
+ },
+ {
+ USB_VENDOR_JMICRON, USB_PRODUCT_JMICRON_JM20337,
+ 0,
+ "JMicron",
+ "USB to ATA/ATAPI Bridge",
+ },
+ {
+ USB_VENDOR_JVC, USB_PRODUCT_JVC_GR_DX95,
+ 0,
+ "JVC",
+ "GR-DX95",
+ },
+ {
+ USB_VENDOR_JVC, USB_PRODUCT_JVC_MP_PRX1,
+ 0,
+ "JVC",
+ "MP-PRX1 Ethernet",
+ },
+ {
+ USB_VENDOR_JRC, USB_PRODUCT_JRC_AH_J3001V_J3002V,
+ 0,
+ "Japan Radio Company",
+ "AirH PHONE AH-J3001V/J3002V",
+ },
+ {
+ USB_VENDOR_KAWATSU, USB_PRODUCT_KAWATSU_MH4000P,
+ 0,
+ "Kawatsu Semiconductor",
+ "MiniHub 4000P",
+ },
+ {
+ USB_VENDOR_KEISOKUGIKEN, USB_PRODUCT_KEISOKUGIKEN_USBDAQ,
+ 0,
+ "Keisokugiken",
+ "HKS-0200 USBDAQ",
+ },
+ {
+ USB_VENDOR_KENSINGTON, USB_PRODUCT_KENSINGTON_ORBIT,
+ 0,
+ "Kensington",
+ "Orbit USB/PS2 trackball",
+ },
+ {
+ USB_VENDOR_KENSINGTON, USB_PRODUCT_KENSINGTON_TURBOBALL,
+ 0,
+ "Kensington",
+ "TurboBall",
+ },
+ {
+ USB_VENDOR_KEYSPAN, USB_PRODUCT_KEYSPAN_USA28_NF,
+ 0,
+ "Keyspan / InnoSys Inc.",
+ "USA-28 serial Adapter (no firmware)",
+ },
+ {
+ USB_VENDOR_KEYSPAN, USB_PRODUCT_KEYSPAN_USA28X_NF,
+ 0,
+ "Keyspan / InnoSys Inc.",
+ "USA-28X serial Adapter (no firmware)",
+ },
+ {
+ USB_VENDOR_KEYSPAN, USB_PRODUCT_KEYSPAN_USA19_NF,
+ 0,
+ "Keyspan / InnoSys Inc.",
+ "USA-19 serial Adapter (no firmware)",
+ },
+ {
+ USB_VENDOR_KEYSPAN, USB_PRODUCT_KEYSPAN_USA18_NF,
+ 0,
+ "Keyspan / InnoSys Inc.",
+ "USA-18 serial Adapter (no firmware)",
+ },
+ {
+ USB_VENDOR_KEYSPAN, USB_PRODUCT_KEYSPAN_USA18X_NF,
+ 0,
+ "Keyspan / InnoSys Inc.",
+ "USA-18X serial Adapter (no firmware)",
+ },
+ {
+ USB_VENDOR_KEYSPAN, USB_PRODUCT_KEYSPAN_USA19W_NF,
+ 0,
+ "Keyspan / InnoSys Inc.",
+ "USA-19W serial Adapter (no firmware)",
+ },
+ {
+ USB_VENDOR_KEYSPAN, USB_PRODUCT_KEYSPAN_USA19,
+ 0,
+ "Keyspan / InnoSys Inc.",
+ "USA-19 serial Adapter",
+ },
+ {
+ USB_VENDOR_KEYSPAN, USB_PRODUCT_KEYSPAN_USA19W,
+ 0,
+ "Keyspan / InnoSys Inc.",
+ "USA-19W serial Adapter",
+ },
+ {
+ USB_VENDOR_KEYSPAN, USB_PRODUCT_KEYSPAN_USA49W_NF,
+ 0,
+ "Keyspan / InnoSys Inc.",
+ "USA-49W serial Adapter (no firmware)",
+ },
+ {
+ USB_VENDOR_KEYSPAN, USB_PRODUCT_KEYSPAN_USA49W,
+ 0,
+ "Keyspan / InnoSys Inc.",
+ "USA-49W serial Adapter",
+ },
+ {
+ USB_VENDOR_KEYSPAN, USB_PRODUCT_KEYSPAN_USA19QI_NF,
+ 0,
+ "Keyspan / InnoSys Inc.",
+ "USA-19QI serial Adapter (no firmware)",
+ },
+ {
+ USB_VENDOR_KEYSPAN, USB_PRODUCT_KEYSPAN_USA19QI,
+ 0,
+ "Keyspan / InnoSys Inc.",
+ "USA-19QI serial Adapter",
+ },
+ {
+ USB_VENDOR_KEYSPAN, USB_PRODUCT_KEYSPAN_USA19Q_NF,
+ 0,
+ "Keyspan / InnoSys Inc.",
+ "USA-19Q serial Adapter (no firmware)",
+ },
+ {
+ USB_VENDOR_KEYSPAN, USB_PRODUCT_KEYSPAN_USA19Q,
+ 0,
+ "Keyspan / InnoSys Inc.",
+ "USA-19Q serial Adapter",
+ },
+ {
+ USB_VENDOR_KEYSPAN, USB_PRODUCT_KEYSPAN_USA28,
+ 0,
+ "Keyspan / InnoSys Inc.",
+ "USA-28 serial Adapter",
+ },
+ {
+ USB_VENDOR_KEYSPAN, USB_PRODUCT_KEYSPAN_USA28XXB,
+ 0,
+ "Keyspan / InnoSys Inc.",
+ "USA-28X/XB serial Adapter",
+ },
+ {
+ USB_VENDOR_KEYSPAN, USB_PRODUCT_KEYSPAN_USA18,
+ 0,
+ "Keyspan / InnoSys Inc.",
+ "USA-18 serial Adapter",
+ },
+ {
+ USB_VENDOR_KEYSPAN, USB_PRODUCT_KEYSPAN_USA18X,
+ 0,
+ "Keyspan / InnoSys Inc.",
+ "USA-18X serial Adapter",
+ },
+ {
+ USB_VENDOR_KEYSPAN, USB_PRODUCT_KEYSPAN_USA28XB_NF,
+ 0,
+ "Keyspan / InnoSys Inc.",
+ "USA-28XB serial Adapter (no firmware)",
+ },
+ {
+ USB_VENDOR_KEYSPAN, USB_PRODUCT_KEYSPAN_USA28XA_NF,
+ 0,
+ "Keyspan / InnoSys Inc.",
+ "USA-28XB serial Adapter (no firmware)",
+ },
+ {
+ USB_VENDOR_KEYSPAN, USB_PRODUCT_KEYSPAN_USA28XA,
+ 0,
+ "Keyspan / InnoSys Inc.",
+ "USA-28XA serial Adapter",
+ },
+ {
+ USB_VENDOR_KEYSPAN, USB_PRODUCT_KEYSPAN_USA18XA_NF,
+ 0,
+ "Keyspan / InnoSys Inc.",
+ "USA-18XA serial Adapter (no firmware)",
+ },
+ {
+ USB_VENDOR_KEYSPAN, USB_PRODUCT_KEYSPAN_USA18XA,
+ 0,
+ "Keyspan / InnoSys Inc.",
+ "USA-18XA serial Adapter",
+ },
+ {
+ USB_VENDOR_KEYSPAN, USB_PRODUCT_KEYSPAN_USA19QW_NF,
+ 0,
+ "Keyspan / InnoSys Inc.",
+ "USA-19WQ serial Adapter (no firmware)",
+ },
+ {
+ USB_VENDOR_KEYSPAN, USB_PRODUCT_KEYSPAN_USA19QW,
+ 0,
+ "Keyspan / InnoSys Inc.",
+ "USA-19WQ serial Adapter",
+ },
+ {
+ USB_VENDOR_KEYSPAN, USB_PRODUCT_KEYSPAN_USA19HA,
+ 0,
+ "Keyspan / InnoSys Inc.",
+ "USA-19HS serial Adapter",
+ },
+ {
+ USB_VENDOR_KEYSPAN, USB_PRODUCT_KEYSPAN_UIA10,
+ 0,
+ "Keyspan / InnoSys Inc.",
+ "UIA-10 remote control",
+ },
+ {
+ USB_VENDOR_KEYSPAN, USB_PRODUCT_KEYSPAN_UIA11,
+ 0,
+ "Keyspan / InnoSys Inc.",
+ "UIA-11 remote control",
+ },
+ {
+ USB_VENDOR_KINGSTON, USB_PRODUCT_KINGSTON_XX1,
+ 0,
+ "Kingston Technology",
+ "Ethernet",
+ },
+ {
+ USB_VENDOR_KINGSTON, USB_PRODUCT_KINGSTON_KNU101TX,
+ 0,
+ "Kingston Technology",
+ "KNU101TX USB Ethernet",
+ },
+ {
+ USB_VENDOR_KLSI, USB_PRODUCT_KLSI_DUH3E10BT,
+ 0,
+ "Kawasaki LSI",
+ "USB Ethernet",
+ },
+ {
+ USB_VENDOR_KLSI, USB_PRODUCT_KLSI_DUH3E10BTN,
+ 0,
+ "Kawasaki LSI",
+ "USB Ethernet",
+ },
+ {
+ USB_VENDOR_KODAK, USB_PRODUCT_KODAK_DC220,
+ 0,
+ "Eastman Kodak",
+ "Digital Science DC220",
+ },
+ {
+ USB_VENDOR_KODAK, USB_PRODUCT_KODAK_DC260,
+ 0,
+ "Eastman Kodak",
+ "Digital Science DC260",
+ },
+ {
+ USB_VENDOR_KODAK, USB_PRODUCT_KODAK_DC265,
+ 0,
+ "Eastman Kodak",
+ "Digital Science DC265",
+ },
+ {
+ USB_VENDOR_KODAK, USB_PRODUCT_KODAK_DC290,
+ 0,
+ "Eastman Kodak",
+ "Digital Science DC290",
+ },
+ {
+ USB_VENDOR_KODAK, USB_PRODUCT_KODAK_DC240,
+ 0,
+ "Eastman Kodak",
+ "Digital Science DC240",
+ },
+ {
+ USB_VENDOR_KODAK, USB_PRODUCT_KODAK_DC280,
+ 0,
+ "Eastman Kodak",
+ "Digital Science DC280",
+ },
+ {
+ USB_VENDOR_KONICA, USB_PRODUCT_KONICA_CAMERA,
+ 0,
+ "Konica",
+ "Digital Color Camera",
+ },
+ {
+ USB_VENDOR_KYE, USB_PRODUCT_KYE_NICHE,
+ 0,
+ "KYE Systems",
+ "Niche mouse",
+ },
+ {
+ USB_VENDOR_KYE, USB_PRODUCT_KYE_NETSCROLL,
+ 0,
+ "KYE Systems",
+ "Genius NetScroll mouse",
+ },
+ {
+ USB_VENDOR_KYE, USB_PRODUCT_KYE_FLIGHT2000,
+ 0,
+ "KYE Systems",
+ "Flight 2000 joystick",
+ },
+ {
+ USB_VENDOR_KYE, USB_PRODUCT_KYE_VIVIDPRO,
+ 0,
+ "KYE Systems",
+ "ColorPage Vivid-Pro scanner",
+ },
+ {
+ USB_VENDOR_KYOCERA, USB_PRODUCT_KYOCERA_FINECAM_S3X,
+ 0,
+ "Kyocera Wireless Corp.",
+ "Finecam S3x",
+ },
+ {
+ USB_VENDOR_KYOCERA, USB_PRODUCT_KYOCERA_FINECAM_S4,
+ 0,
+ "Kyocera Wireless Corp.",
+ "Finecam S4",
+ },
+ {
+ USB_VENDOR_KYOCERA, USB_PRODUCT_KYOCERA_FINECAM_S5,
+ 0,
+ "Kyocera Wireless Corp.",
+ "Finecam S5",
+ },
+ {
+ USB_VENDOR_KYOCERA, USB_PRODUCT_KYOCERA_FINECAM_L3,
+ 0,
+ "Kyocera Wireless Corp.",
+ "Finecam L3",
+ },
+ {
+ USB_VENDOR_KYOCERA, USB_PRODUCT_KYOCERA_AHK3001V,
+ 0,
+ "Kyocera Wireless Corp.",
+ "AH-K3001V",
+ },
+ {
+ USB_VENDOR_KYOCERA2, USB_PRODUCT_KYOCERA2_CDMA_MSM_K,
+ 0,
+ "Kyocera Wireless Corp.",
+ "Qualcomm Kyocera CDMA Technologies MSM",
+ },
+ {
+ USB_VENDOR_KYOCERA2, USB_PRODUCT_KYOCERA2_KPC680,
+ 0,
+ "Kyocera Wireless Corp.",
+ "Qualcomm Kyocera CDMA Technologies MSM",
+ },
+ {
+ USB_VENDOR_LACIE, USB_PRODUCT_LACIE_HD,
+ 0,
+ "LaCie",
+ "Hard Disk",
+ },
+ {
+ USB_VENDOR_LACIE, USB_PRODUCT_LACIE_CDRW,
+ 0,
+ "LaCie",
+ "CD R/W",
+ },
+ {
+ USB_VENDOR_LEADTEK, USB_PRODUCT_LEADTEK_9531,
+ 0,
+ "Leadtek",
+ "9531 GPS",
+ },
+ {
+ USB_VENDOR_LEXAR, USB_PRODUCT_LEXAR_JUMPSHOT,
+ 0,
+ "Lexar Media",
+ "jumpSHOT CompactFlash Reader",
+ },
+ {
+ USB_VENDOR_LEXAR, USB_PRODUCT_LEXAR_CF_READER,
+ 0,
+ "Lexar Media",
+ "USB CF Reader",
+ },
+ {
+ USB_VENDOR_LEXMARK, USB_PRODUCT_LEXMARK_S2450,
+ 0,
+ "Lexmark International",
+ "Optra S 2450",
+ },
+ {
+ USB_VENDOR_LIEBERT, USB_PRODUCT_LIEBERT_POWERSURE_PXT,
+ 0,
+ "Liebert",
+ "PowerSure Personal XT",
+ },
+ {
+ USB_VENDOR_LINKSYS, USB_PRODUCT_LINKSYS_MAUSB2,
+ 0,
+ "Linksys",
+ "Camedia MAUSB-2",
+ },
+ {
+ USB_VENDOR_LINKSYS, USB_PRODUCT_LINKSYS_USB10TX1,
+ 0,
+ "Linksys",
+ "USB10TX",
+ },
+ {
+ USB_VENDOR_LINKSYS, USB_PRODUCT_LINKSYS_USB10T,
+ 0,
+ "Linksys",
+ "USB10T Ethernet",
+ },
+ {
+ USB_VENDOR_LINKSYS, USB_PRODUCT_LINKSYS_USB100TX,
+ 0,
+ "Linksys",
+ "USB100TX Ethernet",
+ },
+ {
+ USB_VENDOR_LINKSYS, USB_PRODUCT_LINKSYS_USB100H1,
+ 0,
+ "Linksys",
+ "USB100H1 Ethernet/HPNA",
+ },
+ {
+ USB_VENDOR_LINKSYS, USB_PRODUCT_LINKSYS_USB10TA,
+ 0,
+ "Linksys",
+ "USB10TA Ethernet",
+ },
+ {
+ USB_VENDOR_LINKSYS, USB_PRODUCT_LINKSYS_USB10TX2,
+ 0,
+ "Linksys",
+ "USB10TX",
+ },
+ {
+ USB_VENDOR_LINKSYS2, USB_PRODUCT_LINKSYS2_WUSB11,
+ 0,
+ "Linksys",
+ "WUSB11 Wireless Adapter",
+ },
+ {
+ USB_VENDOR_LINKSYS2, USB_PRODUCT_LINKSYS2_USB200M,
+ 0,
+ "Linksys",
+ "USB 2.0 10/100 Ethernet",
+ },
+ {
+ USB_VENDOR_LINKSYS3, USB_PRODUCT_LINKSYS3_WUSB11v28,
+ 0,
+ "Linksys",
+ "WUSB11 v2.8 Wireless Adapter",
+ },
+ {
+ USB_VENDOR_LINKSYS4, USB_PRODUCT_LINKSYS4_USB1000,
+ 0,
+ "Linksys",
+ "USB1000",
+ },
+ {
+ USB_VENDOR_LINKSYS4, USB_PRODUCT_LINKSYS4_WUSB100,
+ 0,
+ "Linksys",
+ "WUSB100",
+ },
+ {
+ USB_VENDOR_LINKSYS4, USB_PRODUCT_LINKSYS4_WUSB600N,
+ 0,
+ "Linksys",
+ "WUSB600N",
+ },
+ {
+ USB_VENDOR_LINKSYS4, USB_PRODUCT_LINKSYS4_WUSB54GCV2,
+ 0,
+ "Linksys",
+ "WUSB54GC v2",
+ },
+ {
+ USB_VENDOR_LINKSYS4, USB_PRODUCT_LINKSYS4_WUSB54GCV3,
+ 0,
+ "Linksys",
+ "WUSB54GC v3",
+ },
+ {
+ USB_VENDOR_LINKSYS4, USB_PRODUCT_LINKSYS4_RT3070,
+ 0,
+ "Linksys",
+ "RT3070",
+ },
+ {
+ USB_VENDOR_LINKSYS4, USB_PRODUCT_LINKSYS4_WUSB600NV2,
+ 0,
+ "Linksys",
+ "WUSB600N v2",
+ },
+ {
+ USB_VENDOR_LOGITECH, USB_PRODUCT_LOGITECH_M2452,
+ 0,
+ "Logitech",
+ "M2452 keyboard",
+ },
+ {
+ USB_VENDOR_LOGITECH, USB_PRODUCT_LOGITECH_M4848,
+ 0,
+ "Logitech",
+ "M4848 mouse",
+ },
+ {
+ USB_VENDOR_LOGITECH, USB_PRODUCT_LOGITECH_PAGESCAN,
+ 0,
+ "Logitech",
+ "PageScan",
+ },
+ {
+ USB_VENDOR_LOGITECH, USB_PRODUCT_LOGITECH_QUICKCAMWEB,
+ 0,
+ "Logitech",
+ "QuickCam Web",
+ },
+ {
+ USB_VENDOR_LOGITECH, USB_PRODUCT_LOGITECH_QUICKCAMPRO,
+ 0,
+ "Logitech",
+ "QuickCam Pro",
+ },
+ {
+ USB_VENDOR_LOGITECH, USB_PRODUCT_LOGITECH_QUICKCAMEXP,
+ 0,
+ "Logitech",
+ "QuickCam Express",
+ },
+ {
+ USB_VENDOR_LOGITECH, USB_PRODUCT_LOGITECH_QUICKCAM,
+ 0,
+ "Logitech",
+ "QuickCam",
+ },
+ {
+ USB_VENDOR_LOGITECH, USB_PRODUCT_LOGITECH_QUICKCAMPRO3,
+ 0,
+ "Logitech",
+ "QuickCam Pro 9000",
+ },
+ {
+ USB_VENDOR_LOGITECH, USB_PRODUCT_LOGITECH_N43,
+ 0,
+ "Logitech",
+ "N43",
+ },
+ {
+ USB_VENDOR_LOGITECH, USB_PRODUCT_LOGITECH_N48,
+ 0,
+ "Logitech",
+ "N48 mouse",
+ },
+ {
+ USB_VENDOR_LOGITECH, USB_PRODUCT_LOGITECH_MBA47,
+ 0,
+ "Logitech",
+ "M-BA47 mouse",
+ },
+ {
+ USB_VENDOR_LOGITECH, USB_PRODUCT_LOGITECH_WMMOUSE,
+ 0,
+ "Logitech",
+ "WingMan Gaming Mouse",
+ },
+ {
+ USB_VENDOR_LOGITECH, USB_PRODUCT_LOGITECH_BD58,
+ 0,
+ "Logitech",
+ "BD58 mouse",
+ },
+ {
+ USB_VENDOR_LOGITECH, USB_PRODUCT_LOGITECH_UN58A,
+ 0,
+ "Logitech",
+ "iFeel Mouse",
+ },
+ {
+ USB_VENDOR_LOGITECH, USB_PRODUCT_LOGITECH_UN53B,
+ 0,
+ "Logitech",
+ "iFeel MouseMan",
+ },
+ {
+ USB_VENDOR_LOGITECH, USB_PRODUCT_LOGITECH_WMPAD,
+ 0,
+ "Logitech",
+ "WingMan GamePad Extreme",
+ },
+ {
+ USB_VENDOR_LOGITECH, USB_PRODUCT_LOGITECH_WMRPAD,
+ 0,
+ "Logitech",
+ "WingMan RumblePad",
+ },
+ {
+ USB_VENDOR_LOGITECH, USB_PRODUCT_LOGITECH_WMJOY,
+ 0,
+ "Logitech",
+ "WingMan Force joystick",
+ },
+ {
+ USB_VENDOR_LOGITECH, USB_PRODUCT_LOGITECH_BB13,
+ 0,
+ "Logitech",
+ "USB-PS/2 Trackball",
+ },
+ {
+ USB_VENDOR_LOGITECH, USB_PRODUCT_LOGITECH_RK53,
+ 0,
+ "Logitech",
+ "Cordless mouse",
+ },
+ {
+ USB_VENDOR_LOGITECH, USB_PRODUCT_LOGITECH_RB6,
+ 0,
+ "Logitech",
+ "Cordless keyboard",
+ },
+ {
+ USB_VENDOR_LOGITECH, USB_PRODUCT_LOGITECH_MX700,
+ 0,
+ "Logitech",
+ "Cordless optical mouse",
+ },
+ {
+ USB_VENDOR_LOGITECH, USB_PRODUCT_LOGITECH_QUICKCAMPRO2,
+ 0,
+ "Logitech",
+ "QuickCam Pro",
+ },
+ {
+ USB_VENDOR_LOGITEC, USB_PRODUCT_LOGITEC_LDR_H443SU2,
+ 0,
+ "Logitec",
+ "DVD Multi-plus unit LDR-H443SU2",
+ },
+ {
+ USB_VENDOR_LOGITEC, USB_PRODUCT_LOGITEC_LDR_H443U2,
+ 0,
+ "Logitec",
+ "DVD Multi-plus unit LDR-H443U2",
+ },
+ {
+ USB_VENDOR_LOGITEC, USB_PRODUCT_LOGITEC_LAN_GTJU2A,
+ 0,
+ "Logitec",
+ "LAN-GTJ/U2A Ethernet",
+ },
+ {
+ USB_VENDOR_LOGITEC, USB_PRODUCT_LOGITEC_RT2870_1,
+ 0,
+ "Logitec",
+ "RT2870",
+ },
+ {
+ USB_VENDOR_LOGITEC, USB_PRODUCT_LOGITEC_RT2870_2,
+ 0,
+ "Logitec",
+ "RT2870",
+ },
+ {
+ USB_VENDOR_LOGITEC, USB_PRODUCT_LOGITEC_RT2870_3,
+ 0,
+ "Logitec",
+ "RT2870",
+ },
+ {
+ USB_VENDOR_LONGCHEER, USB_PRODUCT_LONGCHEER_WM66,
+ 0,
+ "Longcheer Holdings, Ltd.",
+ "Longcheer WM66 HSDPA",
+ },
+ {
+ USB_VENDOR_LONGCHEER, USB_PRODUCT_LONGCHEER_W14,
+ 0,
+ "Longcheer Holdings, Ltd.",
+ "Mobilcom W14",
+ },
+ {
+ USB_VENDOR_LONGCHEER, USB_PRODUCT_LONGCHEER_DISK,
+ 0,
+ "Longcheer Holdings, Ltd.",
+ "Driver disk",
+ },
+ {
+ USB_VENDOR_LUCENT, USB_PRODUCT_LUCENT_EVALKIT,
+ 0,
+ "Lucent",
+ "USS-720 evaluation kit",
+ },
+ {
+ USB_VENDOR_LUWEN, USB_PRODUCT_LUWEN_EASYDISK,
+ 0,
+ "Luwen",
+ "EasyDisc",
+ },
+ {
+ USB_VENDOR_MACALLY, USB_PRODUCT_MACALLY_MOUSE1,
+ 0,
+ "Macally",
+ "mouse",
+ },
+ {
+ USB_VENDOR_MARVELL, USB_PRODUCT_MARVELL_SHEEVAPLUG,
+ 0,
+ "Marvell Technology Group Ltd.",
+ "SheevaPlug serial interface",
+ },
+ {
+ USB_VENDOR_MATRIXORBITAL, USB_PRODUCT_MATRIXORBITAL_MOUA,
+ 0,
+ "Matrix Orbital",
+ "Martrix Orbital MOU-Axxxx LCD displays",
+ },
+ {
+ USB_VENDOR_MCT, USB_PRODUCT_MCT_HUB0100,
+ 0,
+ "MCT",
+ "Hub",
+ },
+ {
+ USB_VENDOR_MCT, USB_PRODUCT_MCT_DU_H3SP_USB232,
+ 0,
+ "MCT",
+ "D-Link DU-H3SP USB BAY Hub",
+ },
+ {
+ USB_VENDOR_MCT, USB_PRODUCT_MCT_USB232,
+ 0,
+ "MCT",
+ "USB-232 Interface",
+ },
+ {
+ USB_VENDOR_MCT, USB_PRODUCT_MCT_SITECOM_USB232,
+ 0,
+ "MCT",
+ "Sitecom USB-232 Products",
+ },
+ {
+ USB_VENDOR_MEIZU, USB_PRODUCT_MEIZU_M6_SL,
+ 0,
+ "Meizu Electronics",
+ "MiniPlayer M6 (SL)",
+ },
+ {
+ USB_VENDOR_MELCO, USB_PRODUCT_MELCO_LUATX1,
+ 0,
+ "Melco",
+ "LUA-TX Ethernet",
+ },
+ {
+ USB_VENDOR_MELCO, USB_PRODUCT_MELCO_LUATX5,
+ 0,
+ "Melco",
+ "LUA-TX Ethernet",
+ },
+ {
+ USB_VENDOR_MELCO, USB_PRODUCT_MELCO_LUA2TX5,
+ 0,
+ "Melco",
+ "LUA2-TX Ethernet",
+ },
+ {
+ USB_VENDOR_MELCO, USB_PRODUCT_MELCO_LUAKTX,
+ 0,
+ "Melco",
+ "LUA-KTX Ethernet",
+ },
+ {
+ USB_VENDOR_MELCO, USB_PRODUCT_MELCO_DUBPXXG,
+ 0,
+ "Melco",
+ "DUB-PxxG",
+ },
+ {
+ USB_VENDOR_MELCO, USB_PRODUCT_MELCO_LUAU2KTX,
+ 0,
+ "Melco",
+ "LUA-U2-KTX Ethernet",
+ },
+ {
+ USB_VENDOR_MELCO, USB_PRODUCT_MELCO_KG54YB,
+ 0,
+ "Melco",
+ "WLI-U2-KG54-YB WLAN",
+ },
+ {
+ USB_VENDOR_MELCO, USB_PRODUCT_MELCO_KG54,
+ 0,
+ "Melco",
+ "WLI-U2-KG54 WLAN",
+ },
+ {
+ USB_VENDOR_MELCO, USB_PRODUCT_MELCO_KG54AI,
+ 0,
+ "Melco",
+ "WLI-U2-KG54-AI WLAN",
+ },
+ {
+ USB_VENDOR_MELCO, USB_PRODUCT_MELCO_LUA3U2AGT,
+ 0,
+ "Melco",
+ "LUA3-U2-AGT",
+ },
+ {
+ USB_VENDOR_MELCO, USB_PRODUCT_MELCO_NINWIFI,
+ 0,
+ "Melco",
+ "Nintendo Wi-Fi",
+ },
+ {
+ USB_VENDOR_MELCO, USB_PRODUCT_MELCO_PCOPRS1,
+ 0,
+ "Melco",
+ "PC-OP-RS1 RemoteStation",
+ },
+ {
+ USB_VENDOR_MELCO, USB_PRODUCT_MELCO_SG54HP,
+ 0,
+ "Melco",
+ "WLI-U2-SG54HP",
+ },
+ {
+ USB_VENDOR_MELCO, USB_PRODUCT_MELCO_G54HP,
+ 0,
+ "Melco",
+ "WLI-U2-G54HP",
+ },
+ {
+ USB_VENDOR_MELCO, USB_PRODUCT_MELCO_KG54L,
+ 0,
+ "Melco",
+ "WLI-U2-KG54L",
+ },
+ {
+ USB_VENDOR_MELCO, USB_PRODUCT_MELCO_WLIUCG300N,
+ 0,
+ "Melco",
+ "WLI-UC-G300N",
+ },
+ {
+ USB_VENDOR_MELCO, USB_PRODUCT_MELCO_SG54HG,
+ 0,
+ "Melco",
+ "WLI-U2-SG54HG",
+ },
+ {
+ USB_VENDOR_MELCO, USB_PRODUCT_MELCO_WLRUCG,
+ 0,
+ "Melco",
+ "WLR-UC-G",
+ },
+ {
+ USB_VENDOR_MELCO, USB_PRODUCT_MELCO_WLRUCGAOSS,
+ 0,
+ "Melco",
+ "WLR-UC-G-AOSS",
+ },
+ {
+ USB_VENDOR_MELCO, USB_PRODUCT_MELCO_WLIUCAG300N,
+ 0,
+ "Melco",
+ "WLI-UC-AG300N",
+ },
+ {
+ USB_VENDOR_MELCO, USB_PRODUCT_MELCO_RT2870_1,
+ 0,
+ "Melco",
+ "RT2870",
+ },
+ {
+ USB_VENDOR_MELCO, USB_PRODUCT_MELCO_RT2870_2,
+ 0,
+ "Melco",
+ "RT2870",
+ },
+ {
+ USB_VENDOR_MELCO, USB_PRODUCT_MELCO_WLIUCGN,
+ 0,
+ "Melco",
+ "WLI-UC-GN",
+ },
+ {
+ USB_VENDOR_MERLIN, USB_PRODUCT_MERLIN_V620,
+ 0,
+ "Merlin",
+ "Merlin V620",
+ },
+ {
+ USB_VENDOR_METAGEEK, USB_PRODUCT_METAGEEK_WISPY1B,
+ 0,
+ "MetaGeek",
+ "MetaGeek Wi-Spy",
+ },
+ {
+ USB_VENDOR_METAGEEK, USB_PRODUCT_METAGEEK_WISPY24X,
+ 0,
+ "MetaGeek",
+ "MetaGeek Wi-Spy 2.4x",
+ },
+ {
+ USB_VENDOR_METAGEEK2, USB_PRODUCT_METAGEEK2_WISPYDBX,
+ 0,
+ "MetaGeek",
+ "MetaGeek Wi-Spy DBx",
+ },
+ {
+ USB_VENDOR_METRICOM, USB_PRODUCT_METRICOM_RICOCHET_GS,
+ 0,
+ "Metricom",
+ "Ricochet GS",
+ },
+ {
+ USB_VENDOR_MGE, USB_PRODUCT_MGE_UPS1,
+ 0,
+ "MGE UPS Systems",
+ "MGE UPS SYSTEMS PROTECTIONCENTER 1",
+ },
+ {
+ USB_VENDOR_MGE, USB_PRODUCT_MGE_UPS2,
+ 0,
+ "MGE UPS Systems",
+ "MGE UPS SYSTEMS PROTECTIONCENTER 2",
+ },
+ {
+ USB_VENDOR_MEI, USB_PRODUCT_MEI_CASHFLOW_SC,
+ 0,
+ "MEI",
+ "Cashflow-SC Cash Acceptor",
+ },
+ {
+ USB_VENDOR_MEI, USB_PRODUCT_MEI_S2000,
+ 0,
+ "MEI",
+ "Seies 2000 Combo Acceptor",
+ },
+ {
+ USB_VENDOR_MSI, USB_PRODUCT_MSI_BT_DONGLE,
+ 0,
+ "Micro Star International",
+ "Bluetooth USB dongle",
+ },
+ {
+ USB_VENDOR_MSI, USB_PRODUCT_MSI_RT3070_1,
+ 0,
+ "Micro Star International",
+ "RT3070",
+ },
+ {
+ USB_VENDOR_MSI, USB_PRODUCT_MSI_RT3070_2,
+ 0,
+ "Micro Star International",
+ "RT3070",
+ },
+ {
+ USB_VENDOR_MSI, USB_PRODUCT_MSI_RT3070_8,
+ 0,
+ "Micro Star International",
+ "RT3070",
+ },
+ {
+ USB_VENDOR_MSI, USB_PRODUCT_MSI_RT3070_3,
+ 0,
+ "Micro Star International",
+ "RT3070",
+ },
+ {
+ USB_VENDOR_MSI, USB_PRODUCT_MSI_RT3070_9,
+ 0,
+ "Micro Star International",
+ "RT3070",
+ },
+ {
+ USB_VENDOR_MSI, USB_PRODUCT_MSI_UB11B,
+ 0,
+ "Micro Star International",
+ "UB11B",
+ },
+ {
+ USB_VENDOR_MSI, USB_PRODUCT_MSI_RT2570,
+ 0,
+ "Micro Star International",
+ "RT2570",
+ },
+ {
+ USB_VENDOR_MSI, USB_PRODUCT_MSI_RT2570_2,
+ 0,
+ "Micro Star International",
+ "RT2570",
+ },
+ {
+ USB_VENDOR_MSI, USB_PRODUCT_MSI_RT2570_3,
+ 0,
+ "Micro Star International",
+ "RT2570",
+ },
+ {
+ USB_VENDOR_MSI, USB_PRODUCT_MSI_RT2573_1,
+ 0,
+ "Micro Star International",
+ "RT2573",
+ },
+ {
+ USB_VENDOR_MSI, USB_PRODUCT_MSI_RT2573_2,
+ 0,
+ "Micro Star International",
+ "RT2573",
+ },
+ {
+ USB_VENDOR_MSI, USB_PRODUCT_MSI_RT3070_4,
+ 0,
+ "Micro Star International",
+ "RT3070",
+ },
+ {
+ USB_VENDOR_MSI, USB_PRODUCT_MSI_RT3070_5,
+ 0,
+ "Micro Star International",
+ "RT3070",
+ },
+ {
+ USB_VENDOR_MSI, USB_PRODUCT_MSI_RT3070_10,
+ 0,
+ "Micro Star International",
+ "RT3070",
+ },
+ {
+ USB_VENDOR_MSI, USB_PRODUCT_MSI_RT3070_6,
+ 0,
+ "Micro Star International",
+ "RT3070",
+ },
+ {
+ USB_VENDOR_MSI, USB_PRODUCT_MSI_RT3070_11,
+ 0,
+ "Micro Star International",
+ "RT3070",
+ },
+ {
+ USB_VENDOR_MSI, USB_PRODUCT_MSI_RT3070_7,
+ 0,
+ "Micro Star International",
+ "RT3070",
+ },
+ {
+ USB_VENDOR_MSI, USB_PRODUCT_MSI_RT2573_3,
+ 0,
+ "Micro Star International",
+ "RT2573",
+ },
+ {
+ USB_VENDOR_MSI, USB_PRODUCT_MSI_RT2573_4,
+ 0,
+ "Micro Star International",
+ "RT2573",
+ },
+ {
+ USB_VENDOR_MICROSOFT, USB_PRODUCT_MICROSOFT_SIDEPREC,
+ 0,
+ "Microsoft",
+ "SideWinder Precision Pro",
+ },
+ {
+ USB_VENDOR_MICROSOFT, USB_PRODUCT_MICROSOFT_INTELLIMOUSE,
+ 0,
+ "Microsoft",
+ "IntelliMouse",
+ },
+ {
+ USB_VENDOR_MICROSOFT, USB_PRODUCT_MICROSOFT_NATURALKBD,
+ 0,
+ "Microsoft",
+ "Natural Keyboard Elite",
+ },
+ {
+ USB_VENDOR_MICROSOFT, USB_PRODUCT_MICROSOFT_DDS80,
+ 0,
+ "Microsoft",
+ "Digital Sound System 80",
+ },
+ {
+ USB_VENDOR_MICROSOFT, USB_PRODUCT_MICROSOFT_SIDEWINDER,
+ 0,
+ "Microsoft",
+ "Sidewinder Precision Racing Wheel",
+ },
+ {
+ USB_VENDOR_MICROSOFT, USB_PRODUCT_MICROSOFT_INETPRO,
+ 0,
+ "Microsoft",
+ "Internet Keyboard Pro",
+ },
+ {
+ USB_VENDOR_MICROSOFT, USB_PRODUCT_MICROSOFT_TBEXPLORER,
+ 0,
+ "Microsoft",
+ "Trackball Explorer",
+ },
+ {
+ USB_VENDOR_MICROSOFT, USB_PRODUCT_MICROSOFT_INTELLIEYE,
+ 0,
+ "Microsoft",
+ "IntelliEye mouse",
+ },
+ {
+ USB_VENDOR_MICROSOFT, USB_PRODUCT_MICROSOFT_INETPRO2,
+ 0,
+ "Microsoft",
+ "Internet Keyboard Pro",
+ },
+ {
+ USB_VENDOR_MICROSOFT, USB_PRODUCT_MICROSOFT_INTELLIMOUSE5,
+ 0,
+ "Microsoft",
+ "IntelliMouse 1.1 5-Button Mouse",
+ },
+ {
+ USB_VENDOR_MICROSOFT, USB_PRODUCT_MICROSOFT_WHEELMOUSE,
+ 0,
+ "Microsoft",
+ "Wheel Mouse Optical",
+ },
+ {
+ USB_VENDOR_MICROSOFT, USB_PRODUCT_MICROSOFT_MN510,
+ 0,
+ "Microsoft",
+ "MN510 Wireless",
+ },
+ {
+ USB_VENDOR_MICROSOFT, USB_PRODUCT_MICROSOFT_700WX,
+ 0,
+ "Microsoft",
+ "Palm 700WX",
+ },
+ {
+ USB_VENDOR_MICROSOFT, USB_PRODUCT_MICROSOFT_MN110,
+ 0,
+ "Microsoft",
+ "10/100 USB NIC",
+ },
+ {
+ USB_VENDOR_MICROSOFT, USB_PRODUCT_MICROSOFT_WLINTELLIMOUSE,
+ 0,
+ "Microsoft",
+ "Wireless Optical IntelliMouse",
+ },
+ {
+ USB_VENDOR_MICROSOFT, USB_PRODUCT_MICROSOFT_WLNOTEBOOK,
+ 0,
+ "Microsoft",
+ "Wireless Optical Mouse (Model 1023)",
+ },
+ {
+ USB_VENDOR_MICROSOFT, USB_PRODUCT_MICROSOFT_COMFORT3000,
+ 0,
+ "Microsoft",
+ "Comfort Optical Mouse 3000 (Model 1043)",
+ },
+ {
+ USB_VENDOR_MICROSOFT, USB_PRODUCT_MICROSOFT_WLNOTEBOOK3,
+ 0,
+ "Microsoft",
+ "Wireless Optical Mouse 3000 (Model 1049)",
+ },
+ {
+ USB_VENDOR_MICROSOFT, USB_PRODUCT_MICROSOFT_NATURAL4000,
+ 0,
+ "Microsoft",
+ "Natural Ergonomic Keyboard 4000",
+ },
+ {
+ USB_VENDOR_MICROSOFT, USB_PRODUCT_MICROSOFT_WLNOTEBOOK2,
+ 0,
+ "Microsoft",
+ "Wireless Optical Mouse 3000 (Model 1056)",
+ },
+ {
+ USB_VENDOR_MICROSOFT, USB_PRODUCT_MICROSOFT_XBOX360,
+ 0,
+ "Microsoft",
+ "XBOX 360 WLAN",
+ },
+ {
+ USB_VENDOR_MICROTECH, USB_PRODUCT_MICROTECH_SCSIDB25,
+ 0,
+ "Microtech",
+ "USB-SCSI-DB25",
+ },
+ {
+ USB_VENDOR_MICROTECH, USB_PRODUCT_MICROTECH_SCSIHD50,
+ 0,
+ "Microtech",
+ "USB-SCSI-HD50",
+ },
+ {
+ USB_VENDOR_MICROTECH, USB_PRODUCT_MICROTECH_DPCM,
+ 0,
+ "Microtech",
+ "USB CameraMate",
+ },
+ {
+ USB_VENDOR_MICROTECH, USB_PRODUCT_MICROTECH_FREECOM,
+ 0,
+ "Microtech",
+ "Freecom USB-IDE",
+ },
+ {
+ USB_VENDOR_MICROTEK, USB_PRODUCT_MICROTEK_336CX,
+ 0,
+ "Microtek",
+ "Phantom 336CX - C3 scanner",
+ },
+ {
+ USB_VENDOR_MICROTEK, USB_PRODUCT_MICROTEK_X6U,
+ 0,
+ "Microtek",
+ "ScanMaker X6 - X6U",
+ },
+ {
+ USB_VENDOR_MICROTEK, USB_PRODUCT_MICROTEK_C6,
+ 0,
+ "Microtek",
+ "Phantom C6 scanner",
+ },
+ {
+ USB_VENDOR_MICROTEK, USB_PRODUCT_MICROTEK_336CX2,
+ 0,
+ "Microtek",
+ "Phantom 336CX - C3 scanner",
+ },
+ {
+ USB_VENDOR_MICROTEK, USB_PRODUCT_MICROTEK_V6USL,
+ 0,
+ "Microtek",
+ "ScanMaker V6USL",
+ },
+ {
+ USB_VENDOR_MICROTEK, USB_PRODUCT_MICROTEK_V6USL2,
+ 0,
+ "Microtek",
+ "ScanMaker V6USL",
+ },
+ {
+ USB_VENDOR_MICROTEK, USB_PRODUCT_MICROTEK_V6UL,
+ 0,
+ "Microtek",
+ "ScanMaker V6UL",
+ },
+ {
+ USB_VENDOR_MICROTUNE, USB_PRODUCT_MICROTUNE_BT_DONGLE,
+ 0,
+ "Microtune",
+ "Bluetooth USB dongle",
+ },
+ {
+ USB_VENDOR_MIDIMAN, USB_PRODUCT_MIDIMAN_MIDISPORT2X2,
+ 0,
+ "Midiman",
+ "Midisport 2x2",
+ },
+ {
+ USB_VENDOR_MINDSATWORK, USB_PRODUCT_MINDSATWORK_WALLET,
+ 0,
+ "Minds At Work",
+ "Digital Wallet",
+ },
+ {
+ USB_VENDOR_MINOLTA, USB_PRODUCT_MINOLTA_2300,
+ 0,
+ "Minolta",
+ "Dimage 2300",
+ },
+ {
+ USB_VENDOR_MINOLTA, USB_PRODUCT_MINOLTA_S304,
+ 0,
+ "Minolta",
+ "Dimage S304",
+ },
+ {
+ USB_VENDOR_MINOLTA, USB_PRODUCT_MINOLTA_X,
+ 0,
+ "Minolta",
+ "Dimage X",
+ },
+ {
+ USB_VENDOR_MINOLTA, USB_PRODUCT_MINOLTA_5400,
+ 0,
+ "Minolta",
+ "Dimage 5400",
+ },
+ {
+ USB_VENDOR_MINOLTA, USB_PRODUCT_MINOLTA_F300,
+ 0,
+ "Minolta",
+ "Dimage F300",
+ },
+ {
+ USB_VENDOR_MINOLTA, USB_PRODUCT_MINOLTA_E223,
+ 0,
+ "Minolta",
+ "Dimage E223",
+ },
+ {
+ USB_VENDOR_MITSUMI, USB_PRODUCT_MITSUMI_CDRRW,
+ 0,
+ "Mitsumi",
+ "CD-R/RW Drive",
+ },
+ {
+ USB_VENDOR_MITSUMI, USB_PRODUCT_MITSUMI_BT_DONGLE,
+ 0,
+ "Mitsumi",
+ "Bluetooth USB dongle",
+ },
+ {
+ USB_VENDOR_MITSUMI, USB_PRODUCT_MITSUMI_FDD,
+ 0,
+ "Mitsumi",
+ "USB FDD",
+ },
+ {
+ USB_VENDOR_MOBILEACTION, USB_PRODUCT_MOBILEACTION_MA620,
+ 0,
+ "Mobile Action",
+ "MA-620 Infrared Adapter",
+ },
+ {
+ USB_VENDOR_MOBILITY, USB_PRODUCT_MOBILITY_EA,
+ 0,
+ "Mobility",
+ "Ethernet",
+ },
+ {
+ USB_VENDOR_MOBILITY, USB_PRODUCT_MOBILITY_EASIDOCK,
+ 0,
+ "Mobility",
+ "EasiDock Ethernet",
+ },
+ {
+ USB_VENDOR_MOSCHIP, USB_PRODUCT_MOSCHIP_MCS7703,
+ 0,
+ "MosChip Semiconductor",
+ "MCS7703 Serial Port Adapter",
+ },
+ {
+ USB_VENDOR_MOSCHIP, USB_PRODUCT_MOSCHIP_MCS7830,
+ 0,
+ "MosChip Semiconductor",
+ "MCS7830 Ethernet",
+ },
+ {
+ USB_VENDOR_MOTOROLA, USB_PRODUCT_MOTOROLA_MC141555,
+ 0,
+ "Motorola",
+ "MC141555 hub controller",
+ },
+ {
+ USB_VENDOR_MOTOROLA, USB_PRODUCT_MOTOROLA_SB4100,
+ 0,
+ "Motorola",
+ "SB4100 USB Cable Modem",
+ },
+ {
+ USB_VENDOR_MOTOROLA2, USB_PRODUCT_MOTOROLA2_T720C,
+ 0,
+ "Motorola",
+ "T720c",
+ },
+ {
+ USB_VENDOR_MOTOROLA2, USB_PRODUCT_MOTOROLA2_A41XV32X,
+ 0,
+ "Motorola",
+ "A41x/V32x Mobile Phones",
+ },
+ {
+ USB_VENDOR_MOTOROLA2, USB_PRODUCT_MOTOROLA2_E398,
+ 0,
+ "Motorola",
+ "E398 Mobile Phone",
+ },
+ {
+ USB_VENDOR_MOTOROLA2, USB_PRODUCT_MOTOROLA2_USBLAN,
+ 0,
+ "Motorola",
+ "USBLAN",
+ },
+ {
+ USB_VENDOR_MOTOROLA2, USB_PRODUCT_MOTOROLA2_USBLAN2,
+ 0,
+ "Motorola",
+ "USBLAN",
+ },
+ {
+ USB_VENDOR_MOTOROLA4, USB_PRODUCT_MOTOROLA4_RT2770,
+ 0,
+ "Motorola",
+ "RT2770",
+ },
+ {
+ USB_VENDOR_MOTOROLA4, USB_PRODUCT_MOTOROLA4_RT3070,
+ 0,
+ "Motorola",
+ "RT3070",
+ },
+ {
+ USB_VENDOR_MULTITECH, USB_PRODUCT_MULTITECH_ATLAS,
+ 0,
+ "MultiTech",
+ "MT5634ZBA-USB modem",
+ },
+ {
+ USB_VENDOR_MUSTEK, USB_PRODUCT_MUSTEK_1200CU,
+ 0,
+ "Mustek Systems",
+ "1200 CU scanner",
+ },
+ {
+ USB_VENDOR_MUSTEK, USB_PRODUCT_MUSTEK_600CU,
+ 0,
+ "Mustek Systems",
+ "600 CU scanner",
+ },
+ {
+ USB_VENDOR_MUSTEK, USB_PRODUCT_MUSTEK_1200USB,
+ 0,
+ "Mustek Systems",
+ "1200 USB scanner",
+ },
+ {
+ USB_VENDOR_MUSTEK, USB_PRODUCT_MUSTEK_1200UB,
+ 0,
+ "Mustek Systems",
+ "1200 UB scanner",
+ },
+ {
+ USB_VENDOR_MUSTEK, USB_PRODUCT_MUSTEK_1200USBPLUS,
+ 0,
+ "Mustek Systems",
+ "1200 USB Plus scanner",
+ },
+ {
+ USB_VENDOR_MUSTEK, USB_PRODUCT_MUSTEK_1200CUPLUS,
+ 0,
+ "Mustek Systems",
+ "1200 CU Plus scanner",
+ },
+ {
+ USB_VENDOR_MUSTEK, USB_PRODUCT_MUSTEK_BEARPAW1200F,
+ 0,
+ "Mustek Systems",
+ "BearPaw 1200F scanner",
+ },
+ {
+ USB_VENDOR_MUSTEK, USB_PRODUCT_MUSTEK_BEARPAW2400TA,
+ 0,
+ "Mustek Systems",
+ "BearPaw 2400TA scanner",
+ },
+ {
+ USB_VENDOR_MUSTEK, USB_PRODUCT_MUSTEK_BEARPAW1200TA,
+ 0,
+ "Mustek Systems",
+ "BearPaw 1200TA scanner",
+ },
+ {
+ USB_VENDOR_MUSTEK, USB_PRODUCT_MUSTEK_600USB,
+ 0,
+ "Mustek Systems",
+ "600 USB scanner",
+ },
+ {
+ USB_VENDOR_MUSTEK, USB_PRODUCT_MUSTEK_MDC800,
+ 0,
+ "Mustek Systems",
+ "MDC-800 digital camera",
+ },
+ {
+ USB_VENDOR_MSYSTEMS, USB_PRODUCT_MSYSTEMS_DISKONKEY,
+ 0,
+ "M-Systems",
+ "DiskOnKey",
+ },
+ {
+ USB_VENDOR_MSYSTEMS, USB_PRODUCT_MSYSTEMS_DISKONKEY2,
+ 0,
+ "M-Systems",
+ "DiskOnKey",
+ },
+ {
+ USB_VENDOR_MYSON, USB_PRODUCT_MYSON_HEDEN_8813,
+ 0,
+ "Myson Technology",
+ "USB-IDE",
+ },
+ {
+ USB_VENDOR_MYSON, USB_PRODUCT_MYSON_HEDEN,
+ 0,
+ "Myson Technology",
+ "USB-IDE",
+ },
+ {
+ USB_VENDOR_MYSON, USB_PRODUCT_MYSON_HUBREADER,
+ 0,
+ "Myson Technology",
+ "COMBO Card reader with USB HUB",
+ },
+ {
+ USB_VENDOR_MYSON, USB_PRODUCT_MYSON_STARREADER,
+ 0,
+ "Myson Technology",
+ "USB flash card adapter",
+ },
+ {
+ USB_VENDOR_NATIONAL, USB_PRODUCT_NATIONAL_BEARPAW1200,
+ 0,
+ "National Semiconductor",
+ "BearPaw 1200",
+ },
+ {
+ USB_VENDOR_NATIONAL, USB_PRODUCT_NATIONAL_BEARPAW2400,
+ 0,
+ "National Semiconductor",
+ "BearPaw 2400",
+ },
+ {
+ USB_VENDOR_NEC, USB_PRODUCT_NEC_HUB_0050,
+ 0,
+ "NEC",
+ "USB 2.0 7-Port Hub",
+ },
+ {
+ USB_VENDOR_NEC, USB_PRODUCT_NEC_HUB_005A,
+ 0,
+ "NEC",
+ "USB 2.0 4-Port Hub",
+ },
+ {
+ USB_VENDOR_NEC, USB_PRODUCT_NEC_HUB,
+ 0,
+ "NEC",
+ "hub",
+ },
+ {
+ USB_VENDOR_NEC, USB_PRODUCT_NEC_HUB_B,
+ 0,
+ "NEC",
+ "hub",
+ },
+ {
+ USB_VENDOR_NEODIO, USB_PRODUCT_NEODIO_ND3260,
+ 0,
+ "Neodio",
+ "8-in-1 Multi-format Flash Controller",
+ },
+ {
+ USB_VENDOR_NEODIO, USB_PRODUCT_NEODIO_ND5010,
+ 0,
+ "Neodio",
+ "Multi-format Flash Controller",
+ },
+ {
+ USB_VENDOR_NEOTEL, USB_PRODUCT_NEOTEL_PRIME,
+ 0,
+ "Neotel",
+ "Prime USB modem",
+ },
+ {
+ USB_VENDOR_NETAC, USB_PRODUCT_NETAC_CF_CARD,
+ 0,
+ "Netac",
+ "USB-CF-Card",
+ },
+ {
+ USB_VENDOR_NETAC, USB_PRODUCT_NETAC_ONLYDISK,
+ 0,
+ "Netac",
+ "OnlyDisk",
+ },
+ {
+ USB_VENDOR_NETCHIP, USB_PRODUCT_NETCHIP_TURBOCONNECT,
+ 0,
+ "NetChip Technology",
+ "Turbo-Connect",
+ },
+ {
+ USB_VENDOR_NETCHIP, USB_PRODUCT_NETCHIP_CLIK_40,
+ 0,
+ "NetChip Technology",
+ "USB Clik! 40",
+ },
+ {
+ USB_VENDOR_NETCHIP, USB_PRODUCT_NETCHIP_ETHERNETGADGET,
+ 0,
+ "NetChip Technology",
+ "Linux Ethernet/RNDIS gadget on pxa210/25x/26x",
+ },
+ {
+ USB_VENDOR_NETGEAR, USB_PRODUCT_NETGEAR_EA101,
+ 0,
+ "BayNETGEAR",
+ "Ethernet",
+ },
+ {
+ USB_VENDOR_NETGEAR, USB_PRODUCT_NETGEAR_EA101X,
+ 0,
+ "BayNETGEAR",
+ "Ethernet",
+ },
+ {
+ USB_VENDOR_NETGEAR, USB_PRODUCT_NETGEAR_FA101,
+ 0,
+ "BayNETGEAR",
+ "Ethernet 10/100, USB1.1",
+ },
+ {
+ USB_VENDOR_NETGEAR, USB_PRODUCT_NETGEAR_FA120,
+ 0,
+ "BayNETGEAR",
+ "USB 2.0 Ethernet",
+ },
+ {
+ USB_VENDOR_NETGEAR, USB_PRODUCT_NETGEAR_WG111V2_2,
+ 0,
+ "BayNETGEAR",
+ "PrismGT USB 2.0 WLAN",
+ },
+ {
+ USB_VENDOR_NETGEAR, USB_PRODUCT_NETGEAR_WG111V3,
+ 0,
+ "BayNETGEAR",
+ "WG111v3",
+ },
+ {
+ USB_VENDOR_NETGEAR, USB_PRODUCT_NETGEAR_WG111U,
+ 0,
+ "BayNETGEAR",
+ "WG111U",
+ },
+ {
+ USB_VENDOR_NETGEAR, USB_PRODUCT_NETGEAR_WG111U_NF,
+ 0,
+ "BayNETGEAR",
+ "WG111U (no firmware)",
+ },
+ {
+ USB_VENDOR_NETGEAR, USB_PRODUCT_NETGEAR_WG111V2,
+ 0,
+ "BayNETGEAR",
+ "WG111V2",
+ },
+ {
+ USB_VENDOR_NETGEAR2, USB_PRODUCT_NETGEAR2_MA101,
+ 0,
+ "Netgear",
+ "MA101",
+ },
+ {
+ USB_VENDOR_NETGEAR2, USB_PRODUCT_NETGEAR2_MA101B,
+ 0,
+ "Netgear",
+ "MA101 Rev B",
+ },
+ {
+ USB_VENDOR_NETGEAR3, USB_PRODUCT_NETGEAR3_WG111T,
+ 0,
+ "Netgear",
+ "WG111T",
+ },
+ {
+ USB_VENDOR_NETGEAR3, USB_PRODUCT_NETGEAR3_WG111T_NF,
+ 0,
+ "Netgear",
+ "WG111T (no firmware)",
+ },
+ {
+ USB_VENDOR_NETGEAR3, USB_PRODUCT_NETGEAR3_WPN111,
+ 0,
+ "Netgear",
+ "WPN111",
+ },
+ {
+ USB_VENDOR_NETGEAR3, USB_PRODUCT_NETGEAR3_WPN111_NF,
+ 0,
+ "Netgear",
+ "WPN111 (no firmware)",
+ },
+ {
+ USB_VENDOR_NETGEAR3, USB_PRODUCT_NETGEAR3_WPN111_2,
+ 0,
+ "Netgear",
+ "WPN111",
+ },
+ {
+ USB_VENDOR_NETINDEX, USB_PRODUCT_NETINDEX_WS002IN,
+ 0,
+ "NetIndex",
+ "Willcom WS002IN",
+ },
+ {
+ USB_VENDOR_NEWLINK, USB_PRODUCT_NEWLINK_USB2IDEBRIDGE,
+ 0,
+ "NEWlink",
+ "USB 2.0 Hard Drive Enclosure",
+ },
+ {
+ USB_VENDOR_NIKON, USB_PRODUCT_NIKON_E990,
+ 0,
+ "Nikon",
+ "Digital Camera E990",
+ },
+ {
+ USB_VENDOR_NIKON, USB_PRODUCT_NIKON_LS40,
+ 0,
+ "Nikon",
+ "CoolScan LS40 ED",
+ },
+ {
+ USB_VENDOR_NIKON, USB_PRODUCT_NIKON_D300,
+ 0,
+ "Nikon",
+ "Digital Camera D300",
+ },
+ {
+ USB_VENDOR_NOVATECH, USB_PRODUCT_NOVATECH_NV902,
+ 0,
+ "NovaTech",
+ "NovaTech NV-902W",
+ },
+ {
+ USB_VENDOR_NOVATECH, USB_PRODUCT_NOVATECH_RT2573,
+ 0,
+ "NovaTech",
+ "RT2573",
+ },
+ {
+ USB_VENDOR_NOKIA, USB_PRODUCT_NOKIA_N958GB,
+ 0,
+ "Nokia",
+ "Nokia N95 8GBc",
+ },
+ {
+ USB_VENDOR_NOKIA2, USB_PRODUCT_NOKIA2_CA42,
+ 0,
+ "Nokia",
+ "CA-42 cable",
+ },
+ {
+ USB_VENDOR_NOVATEL, USB_PRODUCT_NOVATEL_V640,
+ 0,
+ "Novatel Wireless",
+ "Merlin V620",
+ },
+ {
+ USB_VENDOR_NOVATEL, USB_PRODUCT_NOVATEL_CDMA_MODEM,
+ 0,
+ "Novatel Wireless",
+ "Novatel Wireless Merlin CDMA",
+ },
+ {
+ USB_VENDOR_NOVATEL, USB_PRODUCT_NOVATEL_V620,
+ 0,
+ "Novatel Wireless",
+ "Merlin V620",
+ },
+ {
+ USB_VENDOR_NOVATEL, USB_PRODUCT_NOVATEL_V740,
+ 0,
+ "Novatel Wireless",
+ "Merlin V740",
+ },
+ {
+ USB_VENDOR_NOVATEL, USB_PRODUCT_NOVATEL_V720,
+ 0,
+ "Novatel Wireless",
+ "Merlin V720",
+ },
+ {
+ USB_VENDOR_NOVATEL, USB_PRODUCT_NOVATEL_U740,
+ 0,
+ "Novatel Wireless",
+ "Merlin U740",
+ },
+ {
+ USB_VENDOR_NOVATEL, USB_PRODUCT_NOVATEL_U740_2,
+ 0,
+ "Novatel Wireless",
+ "Merlin U740",
+ },
+ {
+ USB_VENDOR_NOVATEL, USB_PRODUCT_NOVATEL_U870,
+ 0,
+ "Novatel Wireless",
+ "Merlin U870",
+ },
+ {
+ USB_VENDOR_NOVATEL, USB_PRODUCT_NOVATEL_XU870,
+ 0,
+ "Novatel Wireless",
+ "Merlin XU870",
+ },
+ {
+ USB_VENDOR_NOVATEL, USB_PRODUCT_NOVATEL_X950D,
+ 0,
+ "Novatel Wireless",
+ "Merlin X950D",
+ },
+ {
+ USB_VENDOR_NOVATEL, USB_PRODUCT_NOVATEL_ES620,
+ 0,
+ "Novatel Wireless",
+ "Expedite ES620",
+ },
+ {
+ USB_VENDOR_NOVATEL, USB_PRODUCT_NOVATEL_E725,
+ 0,
+ "Novatel Wireless",
+ "Expedite E725",
+ },
+ {
+ USB_VENDOR_NOVATEL, USB_PRODUCT_NOVATEL_ES620_2,
+ 0,
+ "Novatel Wireless",
+ "Expedite ES620",
+ },
+ {
+ USB_VENDOR_NOVATEL, USB_PRODUCT_NOVATEL_ES620,
+ 0,
+ "Novatel Wireless",
+ "ES620 CDMA",
+ },
+ {
+ USB_VENDOR_NOVATEL, USB_PRODUCT_NOVATEL_U720,
+ 0,
+ "Novatel Wireless",
+ "Merlin U720",
+ },
+ {
+ USB_VENDOR_NOVATEL, USB_PRODUCT_NOVATEL_EU730,
+ 0,
+ "Novatel Wireless",
+ "Expedite EU730",
+ },
+ {
+ USB_VENDOR_NOVATEL, USB_PRODUCT_NOVATEL_EU740,
+ 0,
+ "Novatel Wireless",
+ "Expedite EU740",
+ },
+ {
+ USB_VENDOR_NOVATEL, USB_PRODUCT_NOVATEL_EU870D,
+ 0,
+ "Novatel Wireless",
+ "Expedite EU870D",
+ },
+ {
+ USB_VENDOR_NOVATEL, USB_PRODUCT_NOVATEL_U727,
+ 0,
+ "Novatel Wireless",
+ "Merlin U727 CDMA",
+ },
+ {
+ USB_VENDOR_NOVATEL, USB_PRODUCT_NOVATEL_MC950D,
+ 0,
+ "Novatel Wireless",
+ "Novatel MC950D HSUPA",
+ },
+ {
+ USB_VENDOR_NOVATEL, USB_PRODUCT_NOVATEL_ZEROCD,
+ 0,
+ "Novatel Wireless",
+ "Novatel ZeroCD",
+ },
+ {
+ USB_VENDOR_NOVATEL, USB_PRODUCT_NOVATEL_ZEROCD2,
+ 0,
+ "Novatel Wireless",
+ "Novatel ZeroCD",
+ },
+ {
+ USB_VENDOR_NOVATEL, USB_PRODUCT_NOVATEL_U727_2,
+ 0,
+ "Novatel Wireless",
+ "Merlin U727 CDMA",
+ },
+ {
+ USB_VENDOR_NOVATEL, USB_PRODUCT_NOVATEL_U760,
+ 0,
+ "Novatel Wireless",
+ "Novatel U760",
+ },
+ {
+ USB_VENDOR_NOVATEL, USB_PRODUCT_NOVATEL_MC760,
+ 0,
+ "Novatel Wireless",
+ "Novatel MC760",
+ },
+ {
+ USB_VENDOR_NOVATEL2, USB_PRODUCT_NOVATEL2_FLEXPACKGPS,
+ 0,
+ "Novatel Wireless",
+ "NovAtel FlexPack GPS receiver",
+ },
+ {
+ USB_VENDOR_MERLIN, USB_PRODUCT_MERLIN_V620,
+ 0,
+ "Merlin",
+ "Merlin V620",
+ },
+ {
+ USB_VENDOR_O2MICRO, USB_PRODUCT_O2MICRO_OZ776_HUB,
+ 0,
+ "O2 Micro, Inc.",
+ "OZ776 hub",
+ },
+ {
+ USB_VENDOR_O2MICRO, USB_PRODUCT_O2MICRO_OZ776_CCID_SC,
+ 0,
+ "O2 Micro, Inc.",
+ "OZ776 CCID SC Reader",
+ },
+ {
+ USB_VENDOR_OLYMPUS, USB_PRODUCT_OLYMPUS_C1,
+ 0,
+ "Olympus",
+ "C-1 Digital Camera",
+ },
+ {
+ USB_VENDOR_OLYMPUS, USB_PRODUCT_OLYMPUS_C700,
+ 0,
+ "Olympus",
+ "C-700 Ultra Zoom",
+ },
+ {
+ USB_VENDOR_OMNIVISION, USB_PRODUCT_OMNIVISION_OV511,
+ 0,
+ "OmniVision",
+ "OV511 Camera",
+ },
+ {
+ USB_VENDOR_OMNIVISION, USB_PRODUCT_OMNIVISION_OV511PLUS,
+ 0,
+ "OmniVision",
+ "OV511+ Camera",
+ },
+ {
+ USB_VENDOR_ONSPEC, USB_PRODUCT_ONSPEC_SDS_HOTFIND_D,
+ 0,
+ "OnSpec",
+ "SDS-infrared.com Hotfind-D Infrared Camera",
+ },
+ {
+ USB_VENDOR_ONSPEC, USB_PRODUCT_ONSPEC_MDCFE_B_CF_READER,
+ 0,
+ "OnSpec",
+ "MDCFE-B USB CF Reader",
+ },
+ {
+ USB_VENDOR_ONSPEC, USB_PRODUCT_ONSPEC_CFMS_RW,
+ 0,
+ "OnSpec",
+ "SIIG/Datafab Memory Stick+CF Reader/Writer",
+ },
+ {
+ USB_VENDOR_ONSPEC, USB_PRODUCT_ONSPEC_READER,
+ 0,
+ "OnSpec",
+ "Datafab-based Reader",
+ },
+ {
+ USB_VENDOR_ONSPEC, USB_PRODUCT_ONSPEC_CFSM_READER,
+ 0,
+ "OnSpec",
+ "PNY/Datafab CF+SM Reader",
+ },
+ {
+ USB_VENDOR_ONSPEC, USB_PRODUCT_ONSPEC_CFSM_READER2,
+ 0,
+ "OnSpec",
+ "Simple Tech/Datafab CF+SM Reader",
+ },
+ {
+ USB_VENDOR_ONSPEC, USB_PRODUCT_ONSPEC_MDSM_B_READER,
+ 0,
+ "OnSpec",
+ "MDSM-B reader",
+ },
+ {
+ USB_VENDOR_ONSPEC, USB_PRODUCT_ONSPEC_CFSM_COMBO,
+ 0,
+ "OnSpec",
+ "USB to CF + SM Combo (LC1)",
+ },
+ {
+ USB_VENDOR_ONSPEC, USB_PRODUCT_ONSPEC_UCF100,
+ 0,
+ "OnSpec",
+ "FlashLink UCF-100 CompactFlash Reader",
+ },
+ {
+ USB_VENDOR_ONSPEC2, USB_PRODUCT_ONSPEC2_IMAGEMATE_SDDR55,
+ 0,
+ "OnSpec Electronic Inc.",
+ "ImageMate SDDR55",
+ },
+ {
+ USB_VENDOR_OPTION, USB_PRODUCT_OPTION_VODAFONEMC3G,
+ 0,
+ "Option N.V.",
+ "Vodafone Mobile Connect 3G datacard",
+ },
+ {
+ USB_VENDOR_OPTION, USB_PRODUCT_OPTION_GT3G,
+ 0,
+ "Option N.V.",
+ "GlobeTrotter 3G datacard",
+ },
+ {
+ USB_VENDOR_OPTION, USB_PRODUCT_OPTION_GT3GQUAD,
+ 0,
+ "Option N.V.",
+ "GlobeTrotter 3G QUAD datacard",
+ },
+ {
+ USB_VENDOR_OPTION, USB_PRODUCT_OPTION_GT3GPLUS,
+ 0,
+ "Option N.V.",
+ "GlobeTrotter 3G+ datacard",
+ },
+ {
+ USB_VENDOR_OPTION, USB_PRODUCT_OPTION_GTICON322,
+ 0,
+ "Option N.V.",
+ "GlobeTrotter Icon322 storage",
+ },
+ {
+ USB_VENDOR_OPTION, USB_PRODUCT_OPTION_GTMAX36,
+ 0,
+ "Option N.V.",
+ "GlobeTrotter Max 3.6 Modem",
+ },
+ {
+ USB_VENDOR_OPTION, USB_PRODUCT_OPTION_GTHSDPA,
+ 0,
+ "Option N.V.",
+ "GlobeTrotter HSDPA",
+ },
+ {
+ USB_VENDOR_OPTION, USB_PRODUCT_OPTION_GTMAXHSUPA,
+ 0,
+ "Option N.V.",
+ "GlobeTrotter HSUPA",
+ },
+ {
+ USB_VENDOR_OPTION, USB_PRODUCT_OPTION_GTMAXHSUPAE,
+ 0,
+ "Option N.V.",
+ "GlobeTrotter HSUPA PCIe",
+ },
+ {
+ USB_VENDOR_OPTION, USB_PRODUCT_OPTION_GTMAX380HSUPAE,
+ 0,
+ "Option N.V.",
+ "GlobeTrotter 380HSUPA PCIe",
+ },
+ {
+ USB_VENDOR_OPTION, USB_PRODUCT_OPTION_GT3G_1,
+ 0,
+ "Option N.V.",
+ "3G modem",
+ },
+ {
+ USB_VENDOR_OPTION, USB_PRODUCT_OPTION_GT3G_2,
+ 0,
+ "Option N.V.",
+ "3G modem",
+ },
+ {
+ USB_VENDOR_OPTION, USB_PRODUCT_OPTION_GT3G_3,
+ 0,
+ "Option N.V.",
+ "3G modem",
+ },
+ {
+ USB_VENDOR_OPTION, USB_PRODUCT_OPTION_GT3G_4,
+ 0,
+ "Option N.V.",
+ "3G modem",
+ },
+ {
+ USB_VENDOR_OPTION, USB_PRODUCT_OPTION_GT3G_5,
+ 0,
+ "Option N.V.",
+ "3G modem",
+ },
+ {
+ USB_VENDOR_OPTION, USB_PRODUCT_OPTION_GT3G_6,
+ 0,
+ "Option N.V.",
+ "3G modem",
+ },
+ {
+ USB_VENDOR_OPTION, USB_PRODUCT_OPTION_E6500,
+ 0,
+ "Option N.V.",
+ "3G modem",
+ },
+ {
+ USB_VENDOR_OPTION, USB_PRODUCT_OPTION_E6501,
+ 0,
+ "Option N.V.",
+ "3G modem",
+ },
+ {
+ USB_VENDOR_OPTION, USB_PRODUCT_OPTION_E6601,
+ 0,
+ "Option N.V.",
+ "3G modem",
+ },
+ {
+ USB_VENDOR_OPTION, USB_PRODUCT_OPTION_E6721,
+ 0,
+ "Option N.V.",
+ "3G modem",
+ },
+ {
+ USB_VENDOR_OPTION, USB_PRODUCT_OPTION_E6741,
+ 0,
+ "Option N.V.",
+ "3G modem",
+ },
+ {
+ USB_VENDOR_OPTION, USB_PRODUCT_OPTION_E6761,
+ 0,
+ "Option N.V.",
+ "3G modem",
+ },
+ {
+ USB_VENDOR_OPTION, USB_PRODUCT_OPTION_E6800,
+ 0,
+ "Option N.V.",
+ "3G modem",
+ },
+ {
+ USB_VENDOR_OPTION, USB_PRODUCT_OPTION_E7021,
+ 0,
+ "Option N.V.",
+ "3G modem",
+ },
+ {
+ USB_VENDOR_OPTION, USB_PRODUCT_OPTION_E7041,
+ 0,
+ "Option N.V.",
+ "3G modem",
+ },
+ {
+ USB_VENDOR_OPTION, USB_PRODUCT_OPTION_E7061,
+ 0,
+ "Option N.V.",
+ "3G modem",
+ },
+ {
+ USB_VENDOR_OPTION, USB_PRODUCT_OPTION_E7100,
+ 0,
+ "Option N.V.",
+ "3G modem",
+ },
+ {
+ USB_VENDOR_OPTION, USB_PRODUCT_OPTION_GTM380,
+ 0,
+ "Option N.V.",
+ "3G modem",
+ },
+ {
+ USB_VENDOR_OPTION, USB_PRODUCT_OPTION_GE40X,
+ 0,
+ "Option N.V.",
+ "Globetrotter HSUPA",
+ },
+ {
+ USB_VENDOR_OPTION, USB_PRODUCT_OPTION_GSICON72,
+ 0,
+ "Option N.V.",
+ "GlobeSurfer iCON",
+ },
+ {
+ USB_VENDOR_OPTION, USB_PRODUCT_OPTION_GSICONHSUPA,
+ 0,
+ "Option N.V.",
+ "Globetrotter HSUPA",
+ },
+ {
+ USB_VENDOR_OPTION, USB_PRODUCT_OPTION_ICON401,
+ 0,
+ "Option N.V.",
+ "GlobeSurfer iCON 401",
+ },
+ {
+ USB_VENDOR_OPTION, USB_PRODUCT_OPTION_GTHSUPA,
+ 0,
+ "Option N.V.",
+ "Globetrotter HSUPA",
+ },
+ {
+ USB_VENDOR_OPTION, USB_PRODUCT_OPTION_GMT382,
+ 0,
+ "Option N.V.",
+ "Globetrotter HSUPA",
+ },
+ {
+ USB_VENDOR_OPTION, USB_PRODUCT_OPTION_GE40X_1,
+ 0,
+ "Option N.V.",
+ "Globetrotter HSUPA",
+ },
+ {
+ USB_VENDOR_OPTION, USB_PRODUCT_OPTION_GE40X_2,
+ 0,
+ "Option N.V.",
+ "Globetrotter HSUPA",
+ },
+ {
+ USB_VENDOR_OPTION, USB_PRODUCT_OPTION_GE40X_3,
+ 0,
+ "Option N.V.",
+ "Globetrotter HSUPA",
+ },
+ {
+ USB_VENDOR_OPTION, USB_PRODUCT_OPTION_ICONEDGE,
+ 0,
+ "Option N.V.",
+ "GlobeSurfer iCON EDGE",
+ },
+ {
+ USB_VENDOR_OPTION, USB_PRODUCT_OPTION_MODHSXPA,
+ 0,
+ "Option N.V.",
+ "Globetrotter HSUPA",
+ },
+ {
+ USB_VENDOR_OPTION, USB_PRODUCT_OPTION_ICON321,
+ 0,
+ "Option N.V.",
+ "Globetrotter HSUPA",
+ },
+ {
+ USB_VENDOR_OPTION, USB_PRODUCT_OPTION_ICON505,
+ 0,
+ "Option N.V.",
+ "Globetrotter iCON 505",
+ },
+ {
+ USB_VENDOR_OPTION, USB_PRODUCT_OPTION_ICON452,
+ 0,
+ "Option N.V.",
+ "Globetrotter iCON 452",
+ },
+ {
+ USB_VENDOR_OVISLINK, USB_PRODUCT_OVISLINK_RT3072,
+ 0,
+ "OvisLink",
+ "RT3072",
+ },
+ {
+ USB_VENDOR_OQO, USB_PRODUCT_OQO_WIFI01,
+ 0,
+ "OQO",
+ "model 01 WiFi interface",
+ },
+ {
+ USB_VENDOR_OQO, USB_PRODUCT_OQO_BT01,
+ 0,
+ "OQO",
+ "model 01 Bluetooth interface",
+ },
+ {
+ USB_VENDOR_OQO, USB_PRODUCT_OQO_ETHER01PLUS,
+ 0,
+ "OQO",
+ "model 01+ Ethernet",
+ },
+ {
+ USB_VENDOR_OQO, USB_PRODUCT_OQO_ETHER01,
+ 0,
+ "OQO",
+ "model 01 Ethernet interface",
+ },
+ {
+ USB_VENDOR_OTI, USB_PRODUCT_OTI_DKU5,
+ 0,
+ "Ours Technology",
+ "DKU-5 Serial",
+ },
+ {
+ USB_VENDOR_OWEN, USB_PRODUCT_OWEN_AC4,
+ 0,
+ "Owen",
+ "AC4 USB-RS485 converter",
+ },
+ {
+ USB_VENDOR_PALM, USB_PRODUCT_PALM_SERIAL,
+ 0,
+ "Palm Computing",
+ "USB Serial",
+ },
+ {
+ USB_VENDOR_PALM, USB_PRODUCT_PALM_M500,
+ 0,
+ "Palm Computing",
+ "Palm m500",
+ },
+ {
+ USB_VENDOR_PALM, USB_PRODUCT_PALM_M505,
+ 0,
+ "Palm Computing",
+ "Palm m505",
+ },
+ {
+ USB_VENDOR_PALM, USB_PRODUCT_PALM_M515,
+ 0,
+ "Palm Computing",
+ "Palm m515",
+ },
+ {
+ USB_VENDOR_PALM, USB_PRODUCT_PALM_I705,
+ 0,
+ "Palm Computing",
+ "Palm i705",
+ },
+ {
+ USB_VENDOR_PALM, USB_PRODUCT_PALM_TUNGSTEN_Z,
+ 0,
+ "Palm Computing",
+ "Palm Tungsten Z",
+ },
+ {
+ USB_VENDOR_PALM, USB_PRODUCT_PALM_M125,
+ 0,
+ "Palm Computing",
+ "Palm m125",
+ },
+ {
+ USB_VENDOR_PALM, USB_PRODUCT_PALM_M130,
+ 0,
+ "Palm Computing",
+ "Palm m130",
+ },
+ {
+ USB_VENDOR_PALM, USB_PRODUCT_PALM_TUNGSTEN_T,
+ 0,
+ "Palm Computing",
+ "Palm Tungsten T",
+ },
+ {
+ USB_VENDOR_PALM, USB_PRODUCT_PALM_ZIRE31,
+ 0,
+ "Palm Computing",
+ "Palm Zire 31",
+ },
+ {
+ USB_VENDOR_PALM, USB_PRODUCT_PALM_ZIRE,
+ 0,
+ "Palm Computing",
+ "Palm Zire",
+ },
+ {
+ USB_VENDOR_PANASONIC, USB_PRODUCT_PANASONIC_LS120CAM,
+ 0,
+ "Panasonic (Matsushita)",
+ "LS-120 Camera",
+ },
+ {
+ USB_VENDOR_PANASONIC, USB_PRODUCT_PANASONIC_KXL840AN,
+ 0,
+ "Panasonic (Matsushita)",
+ "CD-R Drive KXL-840AN",
+ },
+ {
+ USB_VENDOR_PANASONIC, USB_PRODUCT_PANASONIC_KXLRW32AN,
+ 0,
+ "Panasonic (Matsushita)",
+ "CD-R Drive KXL-RW32AN",
+ },
+ {
+ USB_VENDOR_PANASONIC, USB_PRODUCT_PANASONIC_KXLCB20AN,
+ 0,
+ "Panasonic (Matsushita)",
+ "CD-R Drive KXL-CB20AN",
+ },
+ {
+ USB_VENDOR_PANASONIC, USB_PRODUCT_PANASONIC_KXLCB35AN,
+ 0,
+ "Panasonic (Matsushita)",
+ "DVD-ROM & CD-R/RW",
+ },
+ {
+ USB_VENDOR_PANASONIC, USB_PRODUCT_PANASONIC_SDCAAE,
+ 0,
+ "Panasonic (Matsushita)",
+ "MultiMediaCard",
+ },
+ {
+ USB_VENDOR_PANASONIC, USB_PRODUCT_PANASONIC_TYTP50P6S,
+ 0,
+ "Panasonic (Matsushita)",
+ "TY-TP50P6-S 50in Touch Panel",
+ },
+ {
+ USB_VENDOR_PARA, USB_PRODUCT_PARA_RT3070,
+ 0,
+ "PARA Industrial",
+ "RT3070",
+ },
+ {
+ USB_VENDOR_PEGATRON, USB_PRODUCT_PEGATRON_RT2870,
+ 0,
+ "Pegatron",
+ "RT2870",
+ },
+ {
+ USB_VENDOR_PEGATRON, USB_PRODUCT_PEGATRON_RT3070,
+ 0,
+ "Pegatron",
+ "RT3070",
+ },
+ {
+ USB_VENDOR_PEGATRON, USB_PRODUCT_PEGATRON_RT3070_2,
+ 0,
+ "Pegatron",
+ "RT3070",
+ },
+ {
+ USB_VENDOR_PEGATRON, USB_PRODUCT_PEGATRON_RT3070_3,
+ 0,
+ "Pegatron",
+ "RT3070",
+ },
+ {
+ USB_VENDOR_PERACOM, USB_PRODUCT_PERACOM_SERIAL1,
+ 0,
+ "Peracom Networks",
+ "Serial",
+ },
+ {
+ USB_VENDOR_PERACOM, USB_PRODUCT_PERACOM_ENET,
+ 0,
+ "Peracom Networks",
+ "Ethernet",
+ },
+ {
+ USB_VENDOR_PERACOM, USB_PRODUCT_PERACOM_ENET3,
+ 0,
+ "Peracom Networks",
+ "At Home Ethernet",
+ },
+ {
+ USB_VENDOR_PERACOM, USB_PRODUCT_PERACOM_ENET2,
+ 0,
+ "Peracom Networks",
+ "Ethernet",
+ },
+ {
+ USB_VENDOR_PHILIPS, USB_PRODUCT_PHILIPS_DSS350,
+ 0,
+ "Philips",
+ "DSS 350 Digital Speaker System",
+ },
+ {
+ USB_VENDOR_PHILIPS, USB_PRODUCT_PHILIPS_DSS,
+ 0,
+ "Philips",
+ "DSS XXX Digital Speaker System",
+ },
+ {
+ USB_VENDOR_PHILIPS, USB_PRODUCT_PHILIPS_HUB,
+ 0,
+ "Philips",
+ "hub",
+ },
+ {
+ USB_VENDOR_PHILIPS, USB_PRODUCT_PHILIPS_PCA646VC,
+ 0,
+ "Philips",
+ "PCA646VC PC Camera",
+ },
+ {
+ USB_VENDOR_PHILIPS, USB_PRODUCT_PHILIPS_PCVC680K,
+ 0,
+ "Philips",
+ "PCVC680K Vesta Pro PC Camera",
+ },
+ {
+ USB_VENDOR_PHILIPS, USB_PRODUCT_PHILIPS_DSS150,
+ 0,
+ "Philips",
+ "DSS 150 Digital Speaker System",
+ },
+ {
+ USB_VENDOR_PHILIPS, USB_PRODUCT_PHILIPS_ACE1001,
+ 0,
+ "Philips",
+ "AKTAKOM ACE-1001 cable",
+ },
+ {
+ USB_VENDOR_PHILIPS, USB_PRODUCT_PHILIPS_SPE3030CC,
+ 0,
+ "Philips",
+ "USB 2.0 External Disk",
+ },
+ {
+ USB_VENDOR_PHILIPS, USB_PRODUCT_PHILIPS_SNU5600,
+ 0,
+ "Philips",
+ "SNU5600",
+ },
+ {
+ USB_VENDOR_PHILIPS, USB_PRODUCT_PHILIPS_UM10016,
+ 0,
+ "Philips",
+ "ISP 1581 Hi-Speed USB MPEG2 Encoder Reference Kit",
+ },
+ {
+ USB_VENDOR_PHILIPS, USB_PRODUCT_PHILIPS_DIVAUSB,
+ 0,
+ "Philips",
+ "DIVA USB mp3 player",
+ },
+ {
+ USB_VENDOR_PHILIPS, USB_PRODUCT_PHILIPS_RT2870,
+ 0,
+ "Philips",
+ "RT2870",
+ },
+ {
+ USB_VENDOR_PHILIPSSEMI, USB_PRODUCT_PHILIPSSEMI_HUB1122,
+ 0,
+ "Philips Semiconductors",
+ "HUB",
+ },
+ {
+ USB_VENDOR_MEGATEC, USB_PRODUCT_MEGATEC_UPS,
+ 0,
+ "Megatec",
+ "Phoenixtec protocol based UPS",
+ },
+ {
+ USB_VENDOR_PIENGINEERING, USB_PRODUCT_PIENGINEERING_PS2USB,
+ 0,
+ "P.I. Engineering",
+ "PS2 to Mac USB Adapter",
+ },
+ {
+ USB_VENDOR_PLANEX, USB_PRODUCT_PLANEX_GW_US11H,
+ 0,
+ "Planex Communications",
+ "GW-US11H WLAN",
+ },
+ {
+ USB_VENDOR_PLANEX2, USB_PRODUCT_PLANEX2_GW_US11S,
+ 0,
+ "Planex Communications",
+ "GW-US11S WLAN",
+ },
+ {
+ USB_VENDOR_PLANEX2, USB_PRODUCT_PLANEX2_GW_US54GXS,
+ 0,
+ "Planex Communications",
+ "GW-US54GXS WLAN",
+ },
+ {
+ USB_VENDOR_PLANEX2, USB_PRODUCT_PLANEX2_GWUS54HP,
+ 0,
+ "Planex Communications",
+ "GW-US54HP",
+ },
+ {
+ USB_VENDOR_PLANEX2, USB_PRODUCT_PLANEX2_GWUS300MINIS,
+ 0,
+ "Planex Communications",
+ "GW-US300MiniS",
+ },
+ {
+ USB_VENDOR_PLANEX2, USB_PRODUCT_PLANEX2_RT3070,
+ 0,
+ "Planex Communications",
+ "RT3070",
+ },
+ {
+ USB_VENDOR_PLANEX2, USB_PRODUCT_PLANEX2_GWUS54MINI2,
+ 0,
+ "Planex Communications",
+ "GW-US54Mini2",
+ },
+ {
+ USB_VENDOR_PLANEX2, USB_PRODUCT_PLANEX2_GWUS54SG,
+ 0,
+ "Planex Communications",
+ "GW-US54SG",
+ },
+ {
+ USB_VENDOR_PLANEX2, USB_PRODUCT_PLANEX2_GWUS54GZL,
+ 0,
+ "Planex Communications",
+ "GW-US54GZL",
+ },
+ {
+ USB_VENDOR_PLANEX2, USB_PRODUCT_PLANEX2_GWUS54GD,
+ 0,
+ "Planex Communications",
+ "GW-US54GD",
+ },
+ {
+ USB_VENDOR_PLANEX2, USB_PRODUCT_PLANEX2_GWUSMM,
+ 0,
+ "Planex Communications",
+ "GW-USMM",
+ },
+ {
+ USB_VENDOR_PLANEX2, USB_PRODUCT_PLANEX2_RT2870,
+ 0,
+ "Planex Communications",
+ "RT2870",
+ },
+ {
+ USB_VENDOR_PLANEX2, USB_PRODUCT_PLANEX2_GWUSMICRON,
+ 0,
+ "Planex Communications",
+ "GW-USMicroN",
+ },
+ {
+ USB_VENDOR_PLANEX3, USB_PRODUCT_PLANEX3_GWUS54GZ,
+ 0,
+ "Planex Communications",
+ "GW-US54GZ",
+ },
+ {
+ USB_VENDOR_PLANEX3, USB_PRODUCT_PLANEX3_GU1000T,
+ 0,
+ "Planex Communications",
+ "GU-1000T",
+ },
+ {
+ USB_VENDOR_PLANEX3, USB_PRODUCT_PLANEX3_GWUS54MINI,
+ 0,
+ "Planex Communications",
+ "GW-US54Mini",
+ },
+ {
+ USB_VENDOR_PLEXTOR, USB_PRODUCT_PLEXTOR_40_12_40U,
+ 0,
+ "Plextor",
+ "PlexWriter 40/12/40U",
+ },
+ {
+ USB_VENDOR_PLX, USB_PRODUCT_PLX_TESTBOARD,
+ 0,
+ "PLX",
+ "test board",
+ },
+ {
+ USB_VENDOR_PLX, USB_PRODUCT_PLX_CA42,
+ 0,
+ "PLX",
+ "CA-42",
+ },
+ {
+ USB_VENDOR_PNY, USB_PRODUCT_PNY_ATTACHE2,
+ 0,
+ "PNY",
+ "USB 2.0 Flash Drive",
+ },
+ {
+ USB_VENDOR_PORTGEAR, USB_PRODUCT_PORTGEAR_EA8,
+ 0,
+ "PortGear",
+ "Ethernet",
+ },
+ {
+ USB_VENDOR_PORTGEAR, USB_PRODUCT_PORTGEAR_EA9,
+ 0,
+ "PortGear",
+ "Ethernet",
+ },
+ {
+ USB_VENDOR_PORTSMITH, USB_PRODUCT_PORTSMITH_EEA,
+ 0,
+ "Portsmith",
+ "Express Ethernet",
+ },
+ {
+ USB_VENDOR_PRIMAX, USB_PRODUCT_PRIMAX_G2X300,
+ 0,
+ "Primax Electronics",
+ "G2-200 scanner",
+ },
+ {
+ USB_VENDOR_PRIMAX, USB_PRODUCT_PRIMAX_G2E300,
+ 0,
+ "Primax Electronics",
+ "G2E-300 scanner",
+ },
+ {
+ USB_VENDOR_PRIMAX, USB_PRODUCT_PRIMAX_G2300,
+ 0,
+ "Primax Electronics",
+ "G2-300 scanner",
+ },
+ {
+ USB_VENDOR_PRIMAX, USB_PRODUCT_PRIMAX_G2E3002,
+ 0,
+ "Primax Electronics",
+ "G2E-300 scanner",
+ },
+ {
+ USB_VENDOR_PRIMAX, USB_PRODUCT_PRIMAX_9600,
+ 0,
+ "Primax Electronics",
+ "Colorado USB 9600 scanner",
+ },
+ {
+ USB_VENDOR_PRIMAX, USB_PRODUCT_PRIMAX_600U,
+ 0,
+ "Primax Electronics",
+ "Colorado 600u scanner",
+ },
+ {
+ USB_VENDOR_PRIMAX, USB_PRODUCT_PRIMAX_6200,
+ 0,
+ "Primax Electronics",
+ "Visioneer 6200 scanner",
+ },
+ {
+ USB_VENDOR_PRIMAX, USB_PRODUCT_PRIMAX_19200,
+ 0,
+ "Primax Electronics",
+ "Colorado USB 19200 scanner",
+ },
+ {
+ USB_VENDOR_PRIMAX, USB_PRODUCT_PRIMAX_1200U,
+ 0,
+ "Primax Electronics",
+ "Colorado 1200u scanner",
+ },
+ {
+ USB_VENDOR_PRIMAX, USB_PRODUCT_PRIMAX_G600,
+ 0,
+ "Primax Electronics",
+ "G2-600 scanner",
+ },
+ {
+ USB_VENDOR_PRIMAX, USB_PRODUCT_PRIMAX_636I,
+ 0,
+ "Primax Electronics",
+ "ReadyScan 636i",
+ },
+ {
+ USB_VENDOR_PRIMAX, USB_PRODUCT_PRIMAX_G2600,
+ 0,
+ "Primax Electronics",
+ "G2-600 scanner",
+ },
+ {
+ USB_VENDOR_PRIMAX, USB_PRODUCT_PRIMAX_G2E600,
+ 0,
+ "Primax Electronics",
+ "G2E-600 scanner",
+ },
+ {
+ USB_VENDOR_PRIMAX, USB_PRODUCT_PRIMAX_COMFORT,
+ 0,
+ "Primax Electronics",
+ "Comfort",
+ },
+ {
+ USB_VENDOR_PRIMAX, USB_PRODUCT_PRIMAX_MOUSEINABOX,
+ 0,
+ "Primax Electronics",
+ "Mouse-in-a-Box",
+ },
+ {
+ USB_VENDOR_PRIMAX, USB_PRODUCT_PRIMAX_PCGAUMS1,
+ 0,
+ "Primax Electronics",
+ "Sony PCGA-UMS1",
+ },
+ {
+ USB_VENDOR_PRIMAX, USB_PRODUCT_PRIMAX_HP_RH304AA,
+ 0,
+ "Primax Electronics",
+ "HP RH304AA mouse",
+ },
+ {
+ USB_VENDOR_PROLIFIC, USB_PRODUCT_PROLIFIC_PL2301,
+ 0,
+ "Prolific Technology",
+ "PL2301 Host-Host interface",
+ },
+ {
+ USB_VENDOR_PROLIFIC, USB_PRODUCT_PROLIFIC_PL2302,
+ 0,
+ "Prolific Technology",
+ "PL2302 Host-Host interface",
+ },
+ {
+ USB_VENDOR_PROLIFIC, USB_PRODUCT_PROLIFIC_RSAQ2,
+ 0,
+ "Prolific Technology",
+ "PL2303 Serial (IODATA USB-RSAQ2)",
+ },
+ {
+ USB_VENDOR_PROLIFIC, USB_PRODUCT_PROLIFIC_ALLTRONIX_GPRS,
+ 0,
+ "Prolific Technology",
+ "Alltronix ACM003U00 modem",
+ },
+ {
+ USB_VENDOR_PROLIFIC, USB_PRODUCT_PROLIFIC_ALDIGA_AL11U,
+ 0,
+ "Prolific Technology",
+ "AlDiga AL-11U modem",
+ },
+ {
+ USB_VENDOR_PROLIFIC, USB_PRODUCT_PROLIFIC_MICROMAX_610U,
+ 0,
+ "Prolific Technology",
+ "Micromax 610U",
+ },
+ {
+ USB_VENDOR_PROLIFIC, USB_PRODUCT_PROLIFIC_DCU11,
+ 0,
+ "Prolific Technology",
+ "DCU-11 Phone Cable",
+ },
+ {
+ USB_VENDOR_PROLIFIC, USB_PRODUCT_PROLIFIC_PL2303,
+ 0,
+ "Prolific Technology",
+ "PL2303 Serial (ATEN/IOGEAR UC232A)",
+ },
+ {
+ USB_VENDOR_PROLIFIC, USB_PRODUCT_PROLIFIC_PL2305,
+ 0,
+ "Prolific Technology",
+ "Parallel printer",
+ },
+ {
+ USB_VENDOR_PROLIFIC, USB_PRODUCT_PROLIFIC_ATAPI4,
+ 0,
+ "Prolific Technology",
+ "ATAPI-4 Controller",
+ },
+ {
+ USB_VENDOR_PROLIFIC, USB_PRODUCT_PROLIFIC_PL2501,
+ 0,
+ "Prolific Technology",
+ "PL2501 Host-Host interface",
+ },
+ {
+ USB_VENDOR_PROLIFIC, USB_PRODUCT_PROLIFIC_PL2506,
+ 0,
+ "Prolific Technology",
+ "PL2506 USB to IDE Bridge",
+ },
+ {
+ USB_VENDOR_PROLIFIC, USB_PRODUCT_PROLIFIC_HCR331,
+ 0,
+ "Prolific Technology",
+ "HCR331 Hybrid Card Reader",
+ },
+ {
+ USB_VENDOR_PROLIFIC, USB_PRODUCT_PROLIFIC_PHAROS,
+ 0,
+ "Prolific Technology",
+ "Prolific Pharos",
+ },
+ {
+ USB_VENDOR_PROLIFIC, USB_PRODUCT_PROLIFIC_RSAQ3,
+ 0,
+ "Prolific Technology",
+ "PL2303 Serial Adapter (IODATA USB-RSAQ3)",
+ },
+ {
+ USB_VENDOR_PROLIFIC2, USB_PRODUCT_PROLIFIC2_PL2303,
+ 0,
+ "Prolific Technologies",
+ "PL2303 Serial Adapter",
+ },
+ {
+ USB_VENDOR_PUTERCOM, USB_PRODUCT_PUTERCOM_UPA100,
+ 0,
+ "Putercom",
+ "USB-1284 BRIDGE",
+ },
+ {
+ USB_VENDOR_QCOM, USB_PRODUCT_QCOM_RT2573,
+ 0,
+ "Qcom",
+ "RT2573",
+ },
+ {
+ USB_VENDOR_QCOM, USB_PRODUCT_QCOM_RT2573_2,
+ 0,
+ "Qcom",
+ "RT2573",
+ },
+ {
+ USB_VENDOR_QCOM, USB_PRODUCT_QCOM_RT2573_3,
+ 0,
+ "Qcom",
+ "RT2573",
+ },
+ {
+ USB_VENDOR_QCOM, USB_PRODUCT_QCOM_RT2870,
+ 0,
+ "Qcom",
+ "RT2870",
+ },
+ {
+ USB_VENDOR_QISDA, USB_PRODUCT_QISDA_H21_1,
+ 0,
+ "Qisda",
+ "3G modem",
+ },
+ {
+ USB_VENDOR_QISDA, USB_PRODUCT_QISDA_H21_2,
+ 0,
+ "Qisda",
+ "3G modem",
+ },
+ {
+ USB_VENDOR_QISDA, USB_PRODUCT_QISDA_H20_1,
+ 0,
+ "Qisda",
+ "3G modem",
+ },
+ {
+ USB_VENDOR_QISDA, USB_PRODUCT_QISDA_H20_2,
+ 0,
+ "Qisda",
+ "3G modem",
+ },
+ {
+ USB_VENDOR_QUALCOMM, USB_PRODUCT_QUALCOMM_CDMA_MSM,
+ 0,
+ "Qualcomm",
+ "CDMA Technologies MSM phone",
+ },
+ {
+ USB_VENDOR_QUALCOMM2, USB_PRODUCT_QUALCOMM2_MF330,
+ 0,
+ "Qualcomm",
+ "MF330",
+ },
+ {
+ USB_VENDOR_QUALCOMM2, USB_PRODUCT_QUALCOMM2_RWT_FCT,
+ 0,
+ "Qualcomm",
+ "RWT FCT-CDMA 2000 1xRTT modem",
+ },
+ {
+ USB_VENDOR_QUALCOMM2, USB_PRODUCT_QUALCOMM2_CDMA_MSM,
+ 0,
+ "Qualcomm",
+ "CDMA Technologies MSM modem",
+ },
+ {
+ USB_VENDOR_QUALCOMM2, USB_PRODUCT_QUALCOMM2_AC8700,
+ 0,
+ "Qualcomm",
+ "AC8700",
+ },
+ {
+ USB_VENDOR_QUALCOMMINC, USB_PRODUCT_QUALCOMMINC_CDMA_MSM,
+ 0,
+ "Qualcomm, Incorporated",
+ "CDMA Technologies MSM modem",
+ },
+ {
+ USB_VENDOR_QUALCOMMINC, USB_PRODUCT_QUALCOMMINC_E0002,
+ 0,
+ "Qualcomm, Incorporated",
+ "3G modem",
+ },
+ {
+ USB_VENDOR_QUALCOMMINC, USB_PRODUCT_QUALCOMMINC_E0003,
+ 0,
+ "Qualcomm, Incorporated",
+ "3G modem",
+ },
+ {
+ USB_VENDOR_QUALCOMMINC, USB_PRODUCT_QUALCOMMINC_E0004,
+ 0,
+ "Qualcomm, Incorporated",
+ "3G modem",
+ },
+ {
+ USB_VENDOR_QUALCOMMINC, USB_PRODUCT_QUALCOMMINC_E0005,
+ 0,
+ "Qualcomm, Incorporated",
+ "3G modem",
+ },
+ {
+ USB_VENDOR_QUALCOMMINC, USB_PRODUCT_QUALCOMMINC_E0006,
+ 0,
+ "Qualcomm, Incorporated",
+ "3G modem",
+ },
+ {
+ USB_VENDOR_QUALCOMMINC, USB_PRODUCT_QUALCOMMINC_E0007,
+ 0,
+ "Qualcomm, Incorporated",
+ "3G modem",
+ },
+ {
+ USB_VENDOR_QUALCOMMINC, USB_PRODUCT_QUALCOMMINC_E0008,
+ 0,
+ "Qualcomm, Incorporated",
+ "3G modem",
+ },
+ {
+ USB_VENDOR_QUALCOMMINC, USB_PRODUCT_QUALCOMMINC_E0009,
+ 0,
+ "Qualcomm, Incorporated",
+ "3G modem",
+ },
+ {
+ USB_VENDOR_QUALCOMMINC, USB_PRODUCT_QUALCOMMINC_E000A,
+ 0,
+ "Qualcomm, Incorporated",
+ "3G modem",
+ },
+ {
+ USB_VENDOR_QUALCOMMINC, USB_PRODUCT_QUALCOMMINC_E000B,
+ 0,
+ "Qualcomm, Incorporated",
+ "3G modem",
+ },
+ {
+ USB_VENDOR_QUALCOMMINC, USB_PRODUCT_QUALCOMMINC_E000C,
+ 0,
+ "Qualcomm, Incorporated",
+ "3G modem",
+ },
+ {
+ USB_VENDOR_QUALCOMMINC, USB_PRODUCT_QUALCOMMINC_E000D,
+ 0,
+ "Qualcomm, Incorporated",
+ "3G modem",
+ },
+ {
+ USB_VENDOR_QUALCOMMINC, USB_PRODUCT_QUALCOMMINC_E000E,
+ 0,
+ "Qualcomm, Incorporated",
+ "3G modem",
+ },
+ {
+ USB_VENDOR_QUALCOMMINC, USB_PRODUCT_QUALCOMMINC_E000F,
+ 0,
+ "Qualcomm, Incorporated",
+ "3G modem",
+ },
+ {
+ USB_VENDOR_QUALCOMMINC, USB_PRODUCT_QUALCOMMINC_E0010,
+ 0,
+ "Qualcomm, Incorporated",
+ "3G modem",
+ },
+ {
+ USB_VENDOR_QUALCOMMINC, USB_PRODUCT_QUALCOMMINC_E0011,
+ 0,
+ "Qualcomm, Incorporated",
+ "3G modem",
+ },
+ {
+ USB_VENDOR_QUALCOMMINC, USB_PRODUCT_QUALCOMMINC_E0012,
+ 0,
+ "Qualcomm, Incorporated",
+ "3G modem",
+ },
+ {
+ USB_VENDOR_QUALCOMMINC, USB_PRODUCT_QUALCOMMINC_E0013,
+ 0,
+ "Qualcomm, Incorporated",
+ "3G modem",
+ },
+ {
+ USB_VENDOR_QUALCOMMINC, USB_PRODUCT_QUALCOMMINC_E0014,
+ 0,
+ "Qualcomm, Incorporated",
+ "3G modem",
+ },
+ {
+ USB_VENDOR_QUALCOMMINC, USB_PRODUCT_QUALCOMMINC_MF628,
+ 0,
+ "Qualcomm, Incorporated",
+ "3G modem",
+ },
+ {
+ USB_VENDOR_QUALCOMMINC, USB_PRODUCT_QUALCOMMINC_MF633R,
+ 0,
+ "Qualcomm, Incorporated",
+ "ZTE WCDMA modem",
+ },
+ {
+ USB_VENDOR_QUALCOMMINC, USB_PRODUCT_QUALCOMMINC_E0017,
+ 0,
+ "Qualcomm, Incorporated",
+ "3G modem",
+ },
+ {
+ USB_VENDOR_QUALCOMMINC, USB_PRODUCT_QUALCOMMINC_E0018,
+ 0,
+ "Qualcomm, Incorporated",
+ "3G modem",
+ },
+ {
+ USB_VENDOR_QUALCOMMINC, USB_PRODUCT_QUALCOMMINC_E0019,
+ 0,
+ "Qualcomm, Incorporated",
+ "3G modem",
+ },
+ {
+ USB_VENDOR_QUALCOMMINC, USB_PRODUCT_QUALCOMMINC_E0020,
+ 0,
+ "Qualcomm, Incorporated",
+ "3G modem",
+ },
+ {
+ USB_VENDOR_QUALCOMMINC, USB_PRODUCT_QUALCOMMINC_E0021,
+ 0,
+ "Qualcomm, Incorporated",
+ "3G modem",
+ },
+ {
+ USB_VENDOR_QUALCOMMINC, USB_PRODUCT_QUALCOMMINC_E0022,
+ 0,
+ "Qualcomm, Incorporated",
+ "3G modem",
+ },
+ {
+ USB_VENDOR_QUALCOMMINC, USB_PRODUCT_QUALCOMMINC_E0023,
+ 0,
+ "Qualcomm, Incorporated",
+ "3G modem",
+ },
+ {
+ USB_VENDOR_QUALCOMMINC, USB_PRODUCT_QUALCOMMINC_E0024,
+ 0,
+ "Qualcomm, Incorporated",
+ "3G modem",
+ },
+ {
+ USB_VENDOR_QUALCOMMINC, USB_PRODUCT_QUALCOMMINC_E0025,
+ 0,
+ "Qualcomm, Incorporated",
+ "3G modem",
+ },
+ {
+ USB_VENDOR_QUALCOMMINC, USB_PRODUCT_QUALCOMMINC_E0026,
+ 0,
+ "Qualcomm, Incorporated",
+ "3G modem",
+ },
+ {
+ USB_VENDOR_QUALCOMMINC, USB_PRODUCT_QUALCOMMINC_E0027,
+ 0,
+ "Qualcomm, Incorporated",
+ "3G modem",
+ },
+ {
+ USB_VENDOR_QUALCOMMINC, USB_PRODUCT_QUALCOMMINC_E0028,
+ 0,
+ "Qualcomm, Incorporated",
+ "3G modem",
+ },
+ {
+ USB_VENDOR_QUALCOMMINC, USB_PRODUCT_QUALCOMMINC_E0029,
+ 0,
+ "Qualcomm, Incorporated",
+ "3G modem",
+ },
+ {
+ USB_VENDOR_QUALCOMMINC, USB_PRODUCT_QUALCOMMINC_E0030,
+ 0,
+ "Qualcomm, Incorporated",
+ "3G modem",
+ },
+ {
+ USB_VENDOR_QUALCOMMINC, USB_PRODUCT_QUALCOMMINC_MF626,
+ 0,
+ "Qualcomm, Incorporated",
+ "3G modem",
+ },
+ {
+ USB_VENDOR_QUALCOMMINC, USB_PRODUCT_QUALCOMMINC_E0032,
+ 0,
+ "Qualcomm, Incorporated",
+ "3G modem",
+ },
+ {
+ USB_VENDOR_QUALCOMMINC, USB_PRODUCT_QUALCOMMINC_E0033,
+ 0,
+ "Qualcomm, Incorporated",
+ "3G modem",
+ },
+ {
+ USB_VENDOR_QUALCOMMINC, USB_PRODUCT_QUALCOMMINC_E0037,
+ 0,
+ "Qualcomm, Incorporated",
+ "3G modem",
+ },
+ {
+ USB_VENDOR_QUALCOMMINC, USB_PRODUCT_QUALCOMMINC_E0039,
+ 0,
+ "Qualcomm, Incorporated",
+ "3G modem",
+ },
+ {
+ USB_VENDOR_QUALCOMMINC, USB_PRODUCT_QUALCOMMINC_E0042,
+ 0,
+ "Qualcomm, Incorporated",
+ "3G modem",
+ },
+ {
+ USB_VENDOR_QUALCOMMINC, USB_PRODUCT_QUALCOMMINC_E0043,
+ 0,
+ "Qualcomm, Incorporated",
+ "3G modem",
+ },
+ {
+ USB_VENDOR_QUALCOMMINC, USB_PRODUCT_QUALCOMMINC_E0048,
+ 0,
+ "Qualcomm, Incorporated",
+ "3G modem",
+ },
+ {
+ USB_VENDOR_QUALCOMMINC, USB_PRODUCT_QUALCOMMINC_E0049,
+ 0,
+ "Qualcomm, Incorporated",
+ "3G modem",
+ },
+ {
+ USB_VENDOR_QUALCOMMINC, USB_PRODUCT_QUALCOMMINC_E0051,
+ 0,
+ "Qualcomm, Incorporated",
+ "3G modem",
+ },
+ {
+ USB_VENDOR_QUALCOMMINC, USB_PRODUCT_QUALCOMMINC_E0052,
+ 0,
+ "Qualcomm, Incorporated",
+ "3G modem",
+ },
+ {
+ USB_VENDOR_QUALCOMMINC, USB_PRODUCT_QUALCOMMINC_ZTE_STOR2,
+ 0,
+ "Qualcomm, Incorporated",
+ "USB ZTE Storage",
+ },
+ {
+ USB_VENDOR_QUALCOMMINC, USB_PRODUCT_QUALCOMMINC_E0054,
+ 0,
+ "Qualcomm, Incorporated",
+ "3G modem",
+ },
+ {
+ USB_VENDOR_QUALCOMMINC, USB_PRODUCT_QUALCOMMINC_E0055,
+ 0,
+ "Qualcomm, Incorporated",
+ "3G modem",
+ },
+ {
+ USB_VENDOR_QUALCOMMINC, USB_PRODUCT_QUALCOMMINC_E0057,
+ 0,
+ "Qualcomm, Incorporated",
+ "3G modem",
+ },
+ {
+ USB_VENDOR_QUALCOMMINC, USB_PRODUCT_QUALCOMMINC_E0058,
+ 0,
+ "Qualcomm, Incorporated",
+ "3G modem",
+ },
+ {
+ USB_VENDOR_QUALCOMMINC, USB_PRODUCT_QUALCOMMINC_E0059,
+ 0,
+ "Qualcomm, Incorporated",
+ "3G modem",
+ },
+ {
+ USB_VENDOR_QUALCOMMINC, USB_PRODUCT_QUALCOMMINC_E0060,
+ 0,
+ "Qualcomm, Incorporated",
+ "3G modem",
+ },
+ {
+ USB_VENDOR_QUALCOMMINC, USB_PRODUCT_QUALCOMMINC_E0061,
+ 0,
+ "Qualcomm, Incorporated",
+ "3G modem",
+ },
+ {
+ USB_VENDOR_QUALCOMMINC, USB_PRODUCT_QUALCOMMINC_E0062,
+ 0,
+ "Qualcomm, Incorporated",
+ "3G modem",
+ },
+ {
+ USB_VENDOR_QUALCOMMINC, USB_PRODUCT_QUALCOMMINC_E0063,
+ 0,
+ "Qualcomm, Incorporated",
+ "3G modem",
+ },
+ {
+ USB_VENDOR_QUALCOMMINC, USB_PRODUCT_QUALCOMMINC_E0064,
+ 0,
+ "Qualcomm, Incorporated",
+ "3G modem",
+ },
+ {
+ USB_VENDOR_QUALCOMMINC, USB_PRODUCT_QUALCOMMINC_E0066,
+ 0,
+ "Qualcomm, Incorporated",
+ "3G modem",
+ },
+ {
+ USB_VENDOR_QUALCOMMINC, USB_PRODUCT_QUALCOMMINC_E0069,
+ 0,
+ "Qualcomm, Incorporated",
+ "3G modem",
+ },
+ {
+ USB_VENDOR_QUALCOMMINC, USB_PRODUCT_QUALCOMMINC_E0070,
+ 0,
+ "Qualcomm, Incorporated",
+ "3G modem",
+ },
+ {
+ USB_VENDOR_QUALCOMMINC, USB_PRODUCT_QUALCOMMINC_E0073,
+ 0,
+ "Qualcomm, Incorporated",
+ "3G modem",
+ },
+ {
+ USB_VENDOR_QUALCOMMINC, USB_PRODUCT_QUALCOMMINC_E0076,
+ 0,
+ "Qualcomm, Incorporated",
+ "3G modem",
+ },
+ {
+ USB_VENDOR_QUALCOMMINC, USB_PRODUCT_QUALCOMMINC_E0078,
+ 0,
+ "Qualcomm, Incorporated",
+ "3G modem",
+ },
+ {
+ USB_VENDOR_QUALCOMMINC, USB_PRODUCT_QUALCOMMINC_E0082,
+ 0,
+ "Qualcomm, Incorporated",
+ "3G modem",
+ },
+ {
+ USB_VENDOR_QUALCOMMINC, USB_PRODUCT_QUALCOMMINC_E0086,
+ 0,
+ "Qualcomm, Incorporated",
+ "3G modem",
+ },
+ {
+ USB_VENDOR_QUALCOMMINC, USB_PRODUCT_QUALCOMMINC_ZTE_STOR,
+ 0,
+ "Qualcomm, Incorporated",
+ "USB ZTE Storage",
+ },
+ {
+ USB_VENDOR_QUALCOMMINC, USB_PRODUCT_QUALCOMMINC_E2002,
+ 0,
+ "Qualcomm, Incorporated",
+ "3G modem",
+ },
+ {
+ USB_VENDOR_QUALCOMMINC, USB_PRODUCT_QUALCOMMINC_E2003,
+ 0,
+ "Qualcomm, Incorporated",
+ "3G modem",
+ },
+ {
+ USB_VENDOR_QUALCOMMINC, USB_PRODUCT_QUALCOMMINC_AC8710,
+ 0,
+ "Qualcomm, Incorporated",
+ "3G modem",
+ },
+ {
+ USB_VENDOR_QUALCOMMINC, USB_PRODUCT_QUALCOMMINC_AC2726,
+ 0,
+ "Qualcomm, Incorporated",
+ "3G modem",
+ },
+ {
+ USB_VENDOR_QUALCOMMINC, USB_PRODUCT_QUALCOMMINC_AC8700,
+ 0,
+ "Qualcomm, Incorporated",
+ "CDMA 1xEVDO USB modem",
+ },
+ {
+ USB_VENDOR_QUANTA, USB_PRODUCT_QUANTA_RW6815_1,
+ 0,
+ "Quanta",
+ "HP iPAQ rw6815",
+ },
+ {
+ USB_VENDOR_QUANTA, USB_PRODUCT_QUANTA_RT3070,
+ 0,
+ "Quanta",
+ "RT3070",
+ },
+ {
+ USB_VENDOR_QUANTA, USB_PRODUCT_QUANTA_Q101_STOR,
+ 0,
+ "Quanta",
+ "USB Q101 Storage",
+ },
+ {
+ USB_VENDOR_QUANTA, USB_PRODUCT_QUANTA_Q101,
+ 0,
+ "Quanta",
+ "HSDPA modem",
+ },
+ {
+ USB_VENDOR_QUANTA, USB_PRODUCT_QUANTA_Q111,
+ 0,
+ "Quanta",
+ "HSDPA modem",
+ },
+ {
+ USB_VENDOR_QUANTA, USB_PRODUCT_QUANTA_GLX,
+ 0,
+ "Quanta",
+ "HSDPA modem",
+ },
+ {
+ USB_VENDOR_QUANTA, USB_PRODUCT_QUANTA_GKE,
+ 0,
+ "Quanta",
+ "HSDPA modem",
+ },
+ {
+ USB_VENDOR_QUANTA, USB_PRODUCT_QUANTA_GLE,
+ 0,
+ "Quanta",
+ "HSDPA modem",
+ },
+ {
+ USB_VENDOR_QUANTA, USB_PRODUCT_QUANTA_RW6815R,
+ 0,
+ "Quanta",
+ "HP iPAQ rw6815 RNDIS",
+ },
+ {
+ USB_VENDOR_QTRONIX, USB_PRODUCT_QTRONIX_980N,
+ 0,
+ "Qtronix",
+ "Scorpion-980N keyboard",
+ },
+ {
+ USB_VENDOR_QUICKSHOT, USB_PRODUCT_QUICKSHOT_STRIKEPAD,
+ 0,
+ "Quickshot",
+ "USB StrikePad",
+ },
+ {
+ USB_VENDOR_RADIOSHACK, USB_PRODUCT_RADIOSHACK_USBCABLE,
+ 0,
+ "Radio Shack",
+ "USB to Serial Cable",
+ },
+ {
+ USB_VENDOR_RAINBOW, USB_PRODUCT_RAINBOW_IKEY2000,
+ 0,
+ "Rainbow Technologies",
+ "i-Key 2000",
+ },
+ {
+ USB_VENDOR_RALINK, USB_PRODUCT_RALINK_RT2570,
+ 0,
+ "Ralink Technology",
+ "RT2500USB Wireless Adapter",
+ },
+ {
+ USB_VENDOR_RALINK, USB_PRODUCT_RALINK_RT2070,
+ 0,
+ "Ralink Technology",
+ "RT2070",
+ },
+ {
+ USB_VENDOR_RALINK, USB_PRODUCT_RALINK_RT2570_2,
+ 0,
+ "Ralink Technology",
+ "RT2500USB Wireless Adapter",
+ },
+ {
+ USB_VENDOR_RALINK, USB_PRODUCT_RALINK_RT2573,
+ 0,
+ "Ralink Technology",
+ "RT2501USB Wireless Adapter",
+ },
+ {
+ USB_VENDOR_RALINK, USB_PRODUCT_RALINK_RT2671,
+ 0,
+ "Ralink Technology",
+ "RT2601USB Wireless Adapter",
+ },
+ {
+ USB_VENDOR_RALINK, USB_PRODUCT_RALINK_RT2770,
+ 0,
+ "Ralink Technology",
+ "RT2770",
+ },
+ {
+ USB_VENDOR_RALINK, USB_PRODUCT_RALINK_RT2870,
+ 0,
+ "Ralink Technology",
+ "RT2870",
+ },
+ {
+ USB_VENDOR_RALINK, USB_PRODUCT_RALINK_RT3070,
+ 0,
+ "Ralink Technology",
+ "RT3070",
+ },
+ {
+ USB_VENDOR_RALINK, USB_PRODUCT_RALINK_RT3071,
+ 0,
+ "Ralink Technology",
+ "RT3071",
+ },
+ {
+ USB_VENDOR_RALINK, USB_PRODUCT_RALINK_RT3072,
+ 0,
+ "Ralink Technology",
+ "RT3072",
+ },
+ {
+ USB_VENDOR_RALINK, USB_PRODUCT_RALINK_RT3370,
+ 0,
+ "Ralink Technology",
+ "RT3370",
+ },
+ {
+ USB_VENDOR_RALINK, USB_PRODUCT_RALINK_RT3572,
+ 0,
+ "Ralink Technology",
+ "RT3572",
+ },
+ {
+ USB_VENDOR_RALINK, USB_PRODUCT_RALINK_RT8070,
+ 0,
+ "Ralink Technology",
+ "RT8070",
+ },
+ {
+ USB_VENDOR_RALINK, USB_PRODUCT_RALINK_RT2570_3,
+ 0,
+ "Ralink Technology",
+ "RT2500USB Wireless Adapter",
+ },
+ {
+ USB_VENDOR_RALINK, USB_PRODUCT_RALINK_RT2573_2,
+ 0,
+ "Ralink Technology",
+ "RT2501USB Wireless Adapter",
+ },
+ {
+ USB_VENDOR_RATOC, USB_PRODUCT_RATOC_REXUSB60,
+ 0,
+ "RATOC Systems",
+ "USB serial adapter REX-USB60",
+ },
+ {
+ USB_VENDOR_RATOC, USB_PRODUCT_RATOC_REXUSB60F,
+ 0,
+ "RATOC Systems",
+ "USB serial adapter REX-USB60F",
+ },
+ {
+ USB_VENDOR_REALTEK, USB_PRODUCT_REALTEK_USB20CRW,
+ 0,
+ "Realtek",
+ "USB20CRW Card Reader",
+ },
+ {
+ USB_VENDOR_REALTEK, USB_PRODUCT_REALTEK_USBKR100,
+ 0,
+ "Realtek",
+ "USBKR100 USB Ethernet",
+ },
+ {
+ USB_VENDOR_REALTEK, USB_PRODUCT_REALTEK_RTL8187,
+ 0,
+ "Realtek",
+ "RTL8187 Wireless Adapter",
+ },
+ {
+ USB_VENDOR_REALTEK, USB_PRODUCT_REALTEK_RTL8187B_0,
+ 0,
+ "Realtek",
+ "RTL8187B Wireless Adapter",
+ },
+ {
+ USB_VENDOR_REALTEK, USB_PRODUCT_REALTEK_RTL8187B_1,
+ 0,
+ "Realtek",
+ "RTL8187B Wireless Adapter",
+ },
+ {
+ USB_VENDOR_REALTEK, USB_PRODUCT_REALTEK_RTL8187B_2,
+ 0,
+ "Realtek",
+ "RTL8187B Wireless Adapter",
+ },
+ {
+ USB_VENDOR_RICOH, USB_PRODUCT_RICOH_VGPVCC2,
+ 0,
+ "Ricoh",
+ "VGP-VCC2 Camera",
+ },
+ {
+ USB_VENDOR_RICOH, USB_PRODUCT_RICOH_VGPVCC3,
+ 0,
+ "Ricoh",
+ "VGP-VCC3 Camera",
+ },
+ {
+ USB_VENDOR_RICOH, USB_PRODUCT_RICOH_VGPVCC2_2,
+ 0,
+ "Ricoh",
+ "VGP-VCC2 Camera",
+ },
+ {
+ USB_VENDOR_RICOH, USB_PRODUCT_RICOH_VGPVCC2_3,
+ 0,
+ "Ricoh",
+ "VGP-VCC2 Camera",
+ },
+ {
+ USB_VENDOR_RICOH, USB_PRODUCT_RICOH_VGPVCC7,
+ 0,
+ "Ricoh",
+ "VGP-VCC7 Camera",
+ },
+ {
+ USB_VENDOR_RICOH, USB_PRODUCT_RICOH_VGPVCC8,
+ 0,
+ "Ricoh",
+ "VGP-VCC8 Camera",
+ },
+ {
+ USB_VENDOR_REINERSCT, USB_PRODUCT_REINERSCT_CYBERJACK_ECOM,
+ 0,
+ "Reiner-SCT",
+ "e-com cyberJack",
+ },
+ {
+ USB_VENDOR_ROLAND, USB_PRODUCT_ROLAND_UM1,
+ 0,
+ "Roland",
+ "UM-1 MIDI I/F",
+ },
+ {
+ USB_VENDOR_ROLAND, USB_PRODUCT_ROLAND_UM880N,
+ 0,
+ "Roland",
+ "EDIROL UM-880 MIDI I/F (native)",
+ },
+ {
+ USB_VENDOR_ROLAND, USB_PRODUCT_ROLAND_UM880G,
+ 0,
+ "Roland",
+ "EDIROL UM-880 MIDI I/F (generic)",
+ },
+ {
+ USB_VENDOR_ROCKFIRE, USB_PRODUCT_ROCKFIRE_GAMEPAD,
+ 0,
+ "Rockfire",
+ "gamepad 203USB",
+ },
+ {
+ USB_VENDOR_RATOC, USB_PRODUCT_RATOC_REXUSB60,
+ 0,
+ "RATOC Systems",
+ "REX-USB60",
+ },
+ {
+ USB_VENDOR_RATOC, USB_PRODUCT_RATOC_REXUSB60F,
+ 0,
+ "RATOC Systems",
+ "REX-USB60F",
+ },
+ {
+ USB_VENDOR_SAGEM, USB_PRODUCT_SAGEM_USBSERIAL,
+ 0,
+ "Sagem",
+ "USB-Serial Controller",
+ },
+ {
+ USB_VENDOR_SAGEM, USB_PRODUCT_SAGEM_XG760A,
+ 0,
+ "Sagem",
+ "XG-760A",
+ },
+ {
+ USB_VENDOR_SAGEM, USB_PRODUCT_SAGEM_XG76NA,
+ 0,
+ "Sagem",
+ "XG-76NA",
+ },
+ {
+ USB_VENDOR_SAMSUNG, USB_PRODUCT_SAMSUNG_ML6060,
+ 0,
+ "Samsung Electronics",
+ "ML-6060 laser printer",
+ },
+ {
+ USB_VENDOR_SAMSUNG, USB_PRODUCT_SAMSUNG_YP_U2,
+ 0,
+ "Samsung Electronics",
+ "YP-U2 MP3 Player",
+ },
+ {
+ USB_VENDOR_SAMSUNG, USB_PRODUCT_SAMSUNG_YP_U4,
+ 0,
+ "Samsung Electronics",
+ "YP-U4 MP3 Player",
+ },
+ {
+ USB_VENDOR_SAMSUNG, USB_PRODUCT_SAMSUNG_I500,
+ 0,
+ "Samsung Electronics",
+ "I500 Palm USB Phone",
+ },
+ {
+ USB_VENDOR_SAMSUNG, USB_PRODUCT_SAMSUNG_I330,
+ 0,
+ "Samsung Electronics",
+ "I330 phone cradle",
+ },
+ {
+ USB_VENDOR_SAMSUNG2, USB_PRODUCT_SAMSUNG2_RT2870_1,
+ 0,
+ "Samsung Electronics",
+ "RT2870",
+ },
+ {
+ USB_VENDOR_SAMSUNG_TECHWIN, USB_PRODUCT_SAMSUNG_TECHWIN_DIGIMAX_410,
+ 0,
+ "Samsung Techwin",
+ "Digimax 410",
+ },
+ {
+ USB_VENDOR_SANDISK, USB_PRODUCT_SANDISK_SDDR05A,
+ 0,
+ "SanDisk",
+ "ImageMate SDDR-05a",
+ },
+ {
+ USB_VENDOR_SANDISK, USB_PRODUCT_SANDISK_SDDR31,
+ 0,
+ "SanDisk",
+ "ImageMate SDDR-31",
+ },
+ {
+ USB_VENDOR_SANDISK, USB_PRODUCT_SANDISK_SDDR05,
+ 0,
+ "SanDisk",
+ "ImageMate SDDR-05",
+ },
+ {
+ USB_VENDOR_SANDISK, USB_PRODUCT_SANDISK_SDDR12,
+ 0,
+ "SanDisk",
+ "ImageMate SDDR-12",
+ },
+ {
+ USB_VENDOR_SANDISK, USB_PRODUCT_SANDISK_SDDR09,
+ 0,
+ "SanDisk",
+ "ImageMate SDDR-09",
+ },
+ {
+ USB_VENDOR_SANDISK, USB_PRODUCT_SANDISK_SDDR75,
+ 0,
+ "SanDisk",
+ "ImageMate SDDR-75",
+ },
+ {
+ USB_VENDOR_SANDISK, USB_PRODUCT_SANDISK_SDCZ2_256,
+ 0,
+ "SanDisk",
+ "Cruzer Mini 256MB",
+ },
+ {
+ USB_VENDOR_SANDISK, USB_PRODUCT_SANDISK_SDCZ4_128,
+ 0,
+ "SanDisk",
+ "Cruzer Micro 128MB",
+ },
+ {
+ USB_VENDOR_SANDISK, USB_PRODUCT_SANDISK_SDCZ4_256,
+ 0,
+ "SanDisk",
+ "Cruzer Micro 256MB",
+ },
+ {
+ USB_VENDOR_SANWA, USB_PRODUCT_SANWA_KB_USB2,
+ 0,
+ "Sanwa Electric Instrument Co., Ltd.",
+ "KB-USB2 multimeter cable",
+ },
+ {
+ USB_VENDOR_SANYO, USB_PRODUCT_SANYO_SCP4900,
+ 0,
+ "Sanyo Electric",
+ "Sanyo SCP-4900 USB Phone",
+ },
+ {
+ USB_VENDOR_SCANLOGIC, USB_PRODUCT_SCANLOGIC_SL11R,
+ 0,
+ "ScanLogic",
+ "SL11R IDE Adapter",
+ },
+ {
+ USB_VENDOR_SCANLOGIC, USB_PRODUCT_SCANLOGIC_336CX,
+ 0,
+ "ScanLogic",
+ "Phantom 336CX - C3 scanner",
+ },
+ {
+ USB_VENDOR_SENAO, USB_PRODUCT_SENAO_RT2870_3,
+ 0,
+ "Senao",
+ "RT2870",
+ },
+ {
+ USB_VENDOR_SENAO, USB_PRODUCT_SENAO_RT2870_4,
+ 0,
+ "Senao",
+ "RT2870",
+ },
+ {
+ USB_VENDOR_SENAO, USB_PRODUCT_SENAO_NUB8301,
+ 0,
+ "Senao",
+ "NUB-8301",
+ },
+ {
+ USB_VENDOR_SENAO, USB_PRODUCT_SENAO_RT2870_1,
+ 0,
+ "Senao",
+ "RT2870",
+ },
+ {
+ USB_VENDOR_SENAO, USB_PRODUCT_SENAO_RT2870_2,
+ 0,
+ "Senao",
+ "RT2870",
+ },
+ {
+ USB_VENDOR_SENAO, USB_PRODUCT_SENAO_RT3070,
+ 0,
+ "Senao",
+ "RT3070",
+ },
+ {
+ USB_VENDOR_SENAO, USB_PRODUCT_SENAO_RT3071,
+ 0,
+ "Senao",
+ "RT3071",
+ },
+ {
+ USB_VENDOR_SENAO, USB_PRODUCT_SENAO_RT3072_1,
+ 0,
+ "Senao",
+ "RT3072",
+ },
+ {
+ USB_VENDOR_SENAO, USB_PRODUCT_SENAO_RT3072_2,
+ 0,
+ "Senao",
+ "RT3072",
+ },
+ {
+ USB_VENDOR_SENAO, USB_PRODUCT_SENAO_RT3072_3,
+ 0,
+ "Senao",
+ "RT3072",
+ },
+ {
+ USB_VENDOR_SENAO, USB_PRODUCT_SENAO_RT3072_4,
+ 0,
+ "Senao",
+ "RT3072",
+ },
+ {
+ USB_VENDOR_SENAO, USB_PRODUCT_SENAO_RT3072_5,
+ 0,
+ "Senao",
+ "RT3072",
+ },
+ {
+ USB_VENDOR_SHANTOU, USB_PRODUCT_SHANTOU_ST268,
+ 0,
+ "ShanTou",
+ "ST268",
+ },
+ {
+ USB_VENDOR_SHANTOU, USB_PRODUCT_SHANTOU_DM9601,
+ 0,
+ "ShanTou",
+ "DM 9601",
+ },
+ {
+ USB_VENDOR_SHARK, USB_PRODUCT_SHARK_PA,
+ 0,
+ "Shark",
+ "Pocket Adapter",
+ },
+ {
+ USB_VENDOR_SHARP, USB_PRODUCT_SHARP_SL5500,
+ 0,
+ "Sharp",
+ "Zaurus SL-5500 PDA",
+ },
+ {
+ USB_VENDOR_SHARP, USB_PRODUCT_SHARP_SLA300,
+ 0,
+ "Sharp",
+ "Zaurus SL-A300 PDA",
+ },
+ {
+ USB_VENDOR_SHARP, USB_PRODUCT_SHARP_SL5600,
+ 0,
+ "Sharp",
+ "Zaurus SL-5600 PDA",
+ },
+ {
+ USB_VENDOR_SHARP, USB_PRODUCT_SHARP_SLC700,
+ 0,
+ "Sharp",
+ "Zaurus SL-C700 PDA",
+ },
+ {
+ USB_VENDOR_SHARP, USB_PRODUCT_SHARP_SLC750,
+ 0,
+ "Sharp",
+ "Zaurus SL-C750 PDA",
+ },
+ {
+ USB_VENDOR_SHARP, USB_PRODUCT_SHARP_WZERO3ES,
+ 0,
+ "Sharp",
+ "W-ZERO3 ES Smartphone",
+ },
+ {
+ USB_VENDOR_SHARP, USB_PRODUCT_SHARP_WZERO3ADES,
+ 0,
+ "Sharp",
+ "Advanced W-ZERO3 ES Smartphone",
+ },
+ {
+ USB_VENDOR_SHARP, USB_PRODUCT_SHARP_WILLCOM03,
+ 0,
+ "Sharp",
+ "WILLCOM03",
+ },
+ {
+ USB_VENDOR_SHUTTLE, USB_PRODUCT_SHUTTLE_EUSB,
+ 0,
+ "Shuttle Technology",
+ "E-USB Bridge",
+ },
+ {
+ USB_VENDOR_SHUTTLE, USB_PRODUCT_SHUTTLE_EUSCSI,
+ 0,
+ "Shuttle Technology",
+ "eUSCSI Bridge",
+ },
+ {
+ USB_VENDOR_SHUTTLE, USB_PRODUCT_SHUTTLE_SDDR09,
+ 0,
+ "Shuttle Technology",
+ "ImageMate SDDR09",
+ },
+ {
+ USB_VENDOR_SHUTTLE, USB_PRODUCT_SHUTTLE_EUSBCFSM,
+ 0,
+ "Shuttle Technology",
+ "eUSB SmartMedia / CompactFlash Adapter",
+ },
+ {
+ USB_VENDOR_SHUTTLE, USB_PRODUCT_SHUTTLE_ZIOMMC,
+ 0,
+ "Shuttle Technology",
+ "eUSB MultiMediaCard Adapter",
+ },
+ {
+ USB_VENDOR_SHUTTLE, USB_PRODUCT_SHUTTLE_HIFD,
+ 0,
+ "Shuttle Technology",
+ "Sony Hifd",
+ },
+ {
+ USB_VENDOR_SHUTTLE, USB_PRODUCT_SHUTTLE_EUSBATAPI,
+ 0,
+ "Shuttle Technology",
+ "eUSB ATA/ATAPI Adapter",
+ },
+ {
+ USB_VENDOR_SHUTTLE, USB_PRODUCT_SHUTTLE_CF,
+ 0,
+ "Shuttle Technology",
+ "eUSB CompactFlash Adapter",
+ },
+ {
+ USB_VENDOR_SHUTTLE, USB_PRODUCT_SHUTTLE_EUSCSI_B,
+ 0,
+ "Shuttle Technology",
+ "eUSCSI Bridge",
+ },
+ {
+ USB_VENDOR_SHUTTLE, USB_PRODUCT_SHUTTLE_EUSCSI_C,
+ 0,
+ "Shuttle Technology",
+ "eUSCSI Bridge",
+ },
+ {
+ USB_VENDOR_SHUTTLE, USB_PRODUCT_SHUTTLE_CDRW,
+ 0,
+ "Shuttle Technology",
+ "CD-RW Device",
+ },
+ {
+ USB_VENDOR_SHUTTLE, USB_PRODUCT_SHUTTLE_EUSBORCA,
+ 0,
+ "Shuttle Technology",
+ "eUSB ORCA Quad Reader",
+ },
+ {
+ USB_VENDOR_SIEMENS, USB_PRODUCT_SIEMENS_SPEEDSTREAM,
+ 0,
+ "Siemens",
+ "SpeedStream",
+ },
+ {
+ USB_VENDOR_SIEMENS, USB_PRODUCT_SIEMENS_SPEEDSTREAM22,
+ 0,
+ "Siemens",
+ "SpeedStream 1022",
+ },
+ {
+ USB_VENDOR_SIEMENS2, USB_PRODUCT_SIEMENS2_WLL013,
+ 0,
+ "Siemens",
+ "WLL013",
+ },
+ {
+ USB_VENDOR_SIEMENS2, USB_PRODUCT_SIEMENS2_ES75,
+ 0,
+ "Siemens",
+ "GSM module MC35",
+ },
+ {
+ USB_VENDOR_SIEMENS2, USB_PRODUCT_SIEMENS2_WL54G,
+ 0,
+ "Siemens",
+ "54g USB Network Adapter",
+ },
+ {
+ USB_VENDOR_SIEMENS3, USB_PRODUCT_SIEMENS3_SX1,
+ 0,
+ "Siemens",
+ "SX1",
+ },
+ {
+ USB_VENDOR_SIEMENS3, USB_PRODUCT_SIEMENS3_X65,
+ 0,
+ "Siemens",
+ "X65",
+ },
+ {
+ USB_VENDOR_SIEMENS3, USB_PRODUCT_SIEMENS3_X75,
+ 0,
+ "Siemens",
+ "X75",
+ },
+ {
+ USB_VENDOR_SIEMENS3, USB_PRODUCT_SIEMENS3_EF81,
+ 0,
+ "Siemens",
+ "EF81",
+ },
+ {
+ USB_VENDOR_SIERRA, USB_PRODUCT_SIERRA_EM5625,
+ 0,
+ "Sierra Wireless",
+ "EM5625",
+ },
+ {
+ USB_VENDOR_SIERRA, USB_PRODUCT_SIERRA_MC5720_2,
+ 0,
+ "Sierra Wireless",
+ "MC5720",
+ },
+ {
+ USB_VENDOR_SIERRA, USB_PRODUCT_SIERRA_MC5725,
+ 0,
+ "Sierra Wireless",
+ "MC5725",
+ },
+ {
+ USB_VENDOR_SIERRA, USB_PRODUCT_SIERRA_AIRCARD580,
+ 0,
+ "Sierra Wireless",
+ "Sierra Wireless AirCard 580",
+ },
+ {
+ USB_VENDOR_SIERRA, USB_PRODUCT_SIERRA_AIRCARD595,
+ 0,
+ "Sierra Wireless",
+ "Sierra Wireless AirCard 595",
+ },
+ {
+ USB_VENDOR_SIERRA, USB_PRODUCT_SIERRA_AC595U,
+ 0,
+ "Sierra Wireless",
+ "Sierra Wireless AirCard 595U",
+ },
+ {
+ USB_VENDOR_SIERRA, USB_PRODUCT_SIERRA_AC597E,
+ 0,
+ "Sierra Wireless",
+ "Sierra Wireless AirCard 597E",
+ },
+ {
+ USB_VENDOR_SIERRA, USB_PRODUCT_SIERRA_EM5725,
+ 0,
+ "Sierra Wireless",
+ "EM5725",
+ },
+ {
+ USB_VENDOR_SIERRA, USB_PRODUCT_SIERRA_C597,
+ 0,
+ "Sierra Wireless",
+ "Sierra Wireless Compass 597",
+ },
+ {
+ USB_VENDOR_SIERRA, USB_PRODUCT_SIERRA_MC5727,
+ 0,
+ "Sierra Wireless",
+ "MC5727",
+ },
+ {
+ USB_VENDOR_SIERRA, USB_PRODUCT_SIERRA_T598,
+ 0,
+ "Sierra Wireless",
+ "T598",
+ },
+ {
+ USB_VENDOR_SIERRA, USB_PRODUCT_SIERRA_T11,
+ 0,
+ "Sierra Wireless",
+ "T11",
+ },
+ {
+ USB_VENDOR_SIERRA, USB_PRODUCT_SIERRA_AC402,
+ 0,
+ "Sierra Wireless",
+ "AC402",
+ },
+ {
+ USB_VENDOR_SIERRA, USB_PRODUCT_SIERRA_MC5728,
+ 0,
+ "Sierra Wireless",
+ "MC5728",
+ },
+ {
+ USB_VENDOR_SIERRA, USB_PRODUCT_SIERRA_E0029,
+ 0,
+ "Sierra Wireless",
+ "E0029",
+ },
+ {
+ USB_VENDOR_SIERRA, USB_PRODUCT_SIERRA_AIRCARD580,
+ 0,
+ "Sierra Wireless",
+ "Sierra Wireless AirCard 580",
+ },
+ {
+ USB_VENDOR_SIERRA, USB_PRODUCT_SIERRA_AC595U,
+ 0,
+ "Sierra Wireless",
+ "Sierra Wireless AirCard 595U",
+ },
+ {
+ USB_VENDOR_SIERRA, USB_PRODUCT_SIERRA_MC5720,
+ 0,
+ "Sierra Wireless",
+ "MC5720 Wireless Modem",
+ },
+ {
+ USB_VENDOR_SIERRA, USB_PRODUCT_SIERRA_MINI5725,
+ 0,
+ "Sierra Wireless",
+ "Sierra Wireless miniPCI 5275",
+ },
+ {
+ USB_VENDOR_SIERRA, USB_PRODUCT_SIERRA_MC5727_2,
+ 0,
+ "Sierra Wireless",
+ "MC5727",
+ },
+ {
+ USB_VENDOR_SIERRA, USB_PRODUCT_SIERRA_MC8755_2,
+ 0,
+ "Sierra Wireless",
+ "MC8755",
+ },
+ {
+ USB_VENDOR_SIERRA, USB_PRODUCT_SIERRA_MC8765,
+ 0,
+ "Sierra Wireless",
+ "MC8765",
+ },
+ {
+ USB_VENDOR_SIERRA, USB_PRODUCT_SIERRA_MC8755,
+ 0,
+ "Sierra Wireless",
+ "MC8755",
+ },
+ {
+ USB_VENDOR_SIERRA, USB_PRODUCT_SIERRA_MC8765_2,
+ 0,
+ "Sierra Wireless",
+ "MC8765",
+ },
+ {
+ USB_VENDOR_SIERRA, USB_PRODUCT_SIERRA_MC8755_4,
+ 0,
+ "Sierra Wireless",
+ "MC8755",
+ },
+ {
+ USB_VENDOR_SIERRA, USB_PRODUCT_SIERRA_MC8765_3,
+ 0,
+ "Sierra Wireless",
+ "MC8765",
+ },
+ {
+ USB_VENDOR_SIERRA, USB_PRODUCT_SIERRA_AC875U,
+ 0,
+ "Sierra Wireless",
+ "AC875U HSDPA USB Modem",
+ },
+ {
+ USB_VENDOR_SIERRA, USB_PRODUCT_SIERRA_MC8755_3,
+ 0,
+ "Sierra Wireless",
+ "MC8755 HSDPA",
+ },
+ {
+ USB_VENDOR_SIERRA, USB_PRODUCT_SIERRA_MC8775_2,
+ 0,
+ "Sierra Wireless",
+ "MC8775",
+ },
+ {
+ USB_VENDOR_SIERRA, USB_PRODUCT_SIERRA_MC8775,
+ 0,
+ "Sierra Wireless",
+ "MC8775",
+ },
+ {
+ USB_VENDOR_SIERRA, USB_PRODUCT_SIERRA_AC875,
+ 0,
+ "Sierra Wireless",
+ "Sierra Wireless AirCard 875",
+ },
+ {
+ USB_VENDOR_SIERRA, USB_PRODUCT_SIERRA_AC875U_2,
+ 0,
+ "Sierra Wireless",
+ "AC875U",
+ },
+ {
+ USB_VENDOR_SIERRA, USB_PRODUCT_SIERRA_AC875E,
+ 0,
+ "Sierra Wireless",
+ "AC875E",
+ },
+ {
+ USB_VENDOR_SIERRA, USB_PRODUCT_SIERRA_MC8780,
+ 0,
+ "Sierra Wireless",
+ "MC8780",
+ },
+ {
+ USB_VENDOR_SIERRA, USB_PRODUCT_SIERRA_MC8781,
+ 0,
+ "Sierra Wireless",
+ "MC8781",
+ },
+ {
+ USB_VENDOR_SIERRA, USB_PRODUCT_SIERRA_MC8780_2,
+ 0,
+ "Sierra Wireless",
+ "MC8780",
+ },
+ {
+ USB_VENDOR_SIERRA, USB_PRODUCT_SIERRA_MC8781_2,
+ 0,
+ "Sierra Wireless",
+ "MC8781",
+ },
+ {
+ USB_VENDOR_SIERRA, USB_PRODUCT_SIERRA_MC8780_3,
+ 0,
+ "Sierra Wireless",
+ "MC8780",
+ },
+ {
+ USB_VENDOR_SIERRA, USB_PRODUCT_SIERRA_MC8781_3,
+ 0,
+ "Sierra Wireless",
+ "MC8781",
+ },
+ {
+ USB_VENDOR_SIERRA, USB_PRODUCT_SIERRA_MC8785,
+ 0,
+ "Sierra Wireless",
+ "MC8785",
+ },
+ {
+ USB_VENDOR_SIERRA, USB_PRODUCT_SIERRA_MC8785_2,
+ 0,
+ "Sierra Wireless",
+ "MC8785",
+ },
+ {
+ USB_VENDOR_SIERRA, USB_PRODUCT_SIERRA_MC8790,
+ 0,
+ "Sierra Wireless",
+ "MC8790",
+ },
+ {
+ USB_VENDOR_SIERRA, USB_PRODUCT_SIERRA_MC8791,
+ 0,
+ "Sierra Wireless",
+ "MC8791",
+ },
+ {
+ USB_VENDOR_SIERRA, USB_PRODUCT_SIERRA_MC8792,
+ 0,
+ "Sierra Wireless",
+ "MC8792",
+ },
+ {
+ USB_VENDOR_SIERRA, USB_PRODUCT_SIERRA_AC880,
+ 0,
+ "Sierra Wireless",
+ "Sierra Wireless AirCard 880",
+ },
+ {
+ USB_VENDOR_SIERRA, USB_PRODUCT_SIERRA_AC881,
+ 0,
+ "Sierra Wireless",
+ "Sierra Wireless AirCard 881",
+ },
+ {
+ USB_VENDOR_SIERRA, USB_PRODUCT_SIERRA_AC880E,
+ 0,
+ "Sierra Wireless",
+ "Sierra Wireless AirCard 880E",
+ },
+ {
+ USB_VENDOR_SIERRA, USB_PRODUCT_SIERRA_AC881E,
+ 0,
+ "Sierra Wireless",
+ "Sierra Wireless AirCard 881E",
+ },
+ {
+ USB_VENDOR_SIERRA, USB_PRODUCT_SIERRA_AC880U,
+ 0,
+ "Sierra Wireless",
+ "Sierra Wireless AirCard 880U",
+ },
+ {
+ USB_VENDOR_SIERRA, USB_PRODUCT_SIERRA_AC881U,
+ 0,
+ "Sierra Wireless",
+ "Sierra Wireless AirCard 881U",
+ },
+ {
+ USB_VENDOR_SIERRA, USB_PRODUCT_SIERRA_AC885E,
+ 0,
+ "Sierra Wireless",
+ "AC885E",
+ },
+ {
+ USB_VENDOR_SIERRA, USB_PRODUCT_SIERRA_AC885E_2,
+ 0,
+ "Sierra Wireless",
+ "AC885E",
+ },
+ {
+ USB_VENDOR_SIERRA, USB_PRODUCT_SIERRA_AC885U,
+ 0,
+ "Sierra Wireless",
+ "Sierra Wireless AirCard 885U",
+ },
+ {
+ USB_VENDOR_SIERRA, USB_PRODUCT_SIERRA_C888,
+ 0,
+ "Sierra Wireless",
+ "C888",
+ },
+ {
+ USB_VENDOR_SIERRA, USB_PRODUCT_SIERRA_C22,
+ 0,
+ "Sierra Wireless",
+ "C22",
+ },
+ {
+ USB_VENDOR_SIERRA, USB_PRODUCT_SIERRA_E6892,
+ 0,
+ "Sierra Wireless",
+ "E6892",
+ },
+ {
+ USB_VENDOR_SIERRA, USB_PRODUCT_SIERRA_E6893,
+ 0,
+ "Sierra Wireless",
+ "E6893",
+ },
+ {
+ USB_VENDOR_SIERRA, USB_PRODUCT_SIERRA_MC8700,
+ 0,
+ "Sierra Wireless",
+ "MC8700",
+ },
+ {
+ USB_VENDOR_SIERRA, USB_PRODUCT_SIERRA_AIRCARD875,
+ 0,
+ "Sierra Wireless",
+ "Aircard 875 HSDPA",
+ },
+ {
+ USB_VENDOR_SIERRA, USB_PRODUCT_SIERRA_TRUINSTALL,
+ 0,
+ "Sierra Wireless",
+ "Aircard Tru Installer",
+ },
+ {
+ USB_VENDOR_SIGMATEL, USB_PRODUCT_SIGMATEL_WBT_3052,
+ 0,
+ "Sigmatel",
+ "WBT-3052 IrDA/USB Bridge",
+ },
+ {
+ USB_VENDOR_SIGMATEL, USB_PRODUCT_SIGMATEL_I_BEAD100,
+ 0,
+ "Sigmatel",
+ "i-Bead 100 MP3 Player",
+ },
+ {
+ USB_VENDOR_SIIG, USB_PRODUCT_SIIG_DIGIFILMREADER,
+ 0,
+ "SIIG",
+ "DigiFilm-Combo Reader",
+ },
+ {
+ USB_VENDOR_SIIG, USB_PRODUCT_SIIG_WINTERREADER,
+ 0,
+ "SIIG",
+ "WINTERREADER Reader",
+ },
+ {
+ USB_VENDOR_SIIG2, USB_PRODUCT_SIIG2_USBTOETHER,
+ 0,
+ "SIIG",
+ "USB TO Ethernet",
+ },
+ {
+ USB_VENDOR_SIIG2, USB_PRODUCT_SIIG2_US2308,
+ 0,
+ "SIIG",
+ "Serial",
+ },
+ {
+ USB_VENDOR_SILICOM, USB_PRODUCT_SILICOM_U2E,
+ 0,
+ "Silicom",
+ "U2E",
+ },
+ {
+ USB_VENDOR_SILICOM, USB_PRODUCT_SILICOM_GPE,
+ 0,
+ "Silicom",
+ "Psion Gold Port Ethernet",
+ },
+ {
+ USB_VENDOR_SILABS, USB_PRODUCT_SILABS_VSTABI,
+ 0,
+ "Silicon Labs",
+ "Vstabi",
+ },
+ {
+ USB_VENDOR_SILABS, USB_PRODUCT_SILABS_ARKHAM_DS101_M,
+ 0,
+ "Silicon Labs",
+ "Arkham DS101 Monitor",
+ },
+ {
+ USB_VENDOR_SILABS, USB_PRODUCT_SILABS_ARKHAM_DS101_A,
+ 0,
+ "Silicon Labs",
+ "Arkham DS101 Adapter",
+ },
+ {
+ USB_VENDOR_SILABS, USB_PRODUCT_SILABS_BSM7DUSB,
+ 0,
+ "Silicon Labs",
+ "BSM7-D-USB",
+ },
+ {
+ USB_VENDOR_SILABS, USB_PRODUCT_SILABS_POLOLU,
+ 0,
+ "Silicon Labs",
+ "Pololu Serial",
+ },
+ {
+ USB_VENDOR_SILABS, USB_PRODUCT_SILABS_CYGNAL_DEBUG,
+ 0,
+ "Silicon Labs",
+ "Cygnal Debug Adapter",
+ },
+ {
+ USB_VENDOR_SILABS, USB_PRODUCT_SILABS_SB_PARAMOUNT_ME,
+ 0,
+ "Silicon Labs",
+ "Software Bisque Paramount ME",
+ },
+ {
+ USB_VENDOR_SILABS, USB_PRODUCT_SILABS_SAEL,
+ 0,
+ "Silicon Labs",
+ "SA-EL USB",
+ },
+ {
+ USB_VENDOR_SILABS, USB_PRODUCT_SILABS_GSM2228,
+ 0,
+ "Silicon Labs",
+ "Enfora GSM2228 USB",
+ },
+ {
+ USB_VENDOR_SILABS, USB_PRODUCT_SILABS_ARGUSISP,
+ 0,
+ "Silicon Labs",
+ "Argussoft ISP",
+ },
+ {
+ USB_VENDOR_SILABS, USB_PRODUCT_SILABS_IMS_USB_RS422,
+ 0,
+ "Silicon Labs",
+ "IMS USB-RS422",
+ },
+ {
+ USB_VENDOR_SILABS, USB_PRODUCT_SILABS_CRUMB128,
+ 0,
+ "Silicon Labs",
+ "Crumb128 board",
+ },
+ {
+ USB_VENDOR_SILABS, USB_PRODUCT_SILABS_DEGREE,
+ 0,
+ "Silicon Labs",
+ "Degree Controls Inc",
+ },
+ {
+ USB_VENDOR_SILABS, USB_PRODUCT_SILABS_TRACIENT,
+ 0,
+ "Silicon Labs",
+ "Tracient RFID",
+ },
+ {
+ USB_VENDOR_SILABS, USB_PRODUCT_SILABS_TRAQMATE,
+ 0,
+ "Silicon Labs",
+ "Track Systems Traqmate",
+ },
+ {
+ USB_VENDOR_SILABS, USB_PRODUCT_SILABS_SUUNTO,
+ 0,
+ "Silicon Labs",
+ "Suunto Sports Instrument",
+ },
+ {
+ USB_VENDOR_SILABS, USB_PRODUCT_SILABS_ARYGON_MIFARE,
+ 0,
+ "Silicon Labs",
+ "Arygon Mifare RFID reader",
+ },
+ {
+ USB_VENDOR_SILABS, USB_PRODUCT_SILABS_BURNSIDE,
+ 0,
+ "Silicon Labs",
+ "Burnside Telecon Deskmobile",
+ },
+ {
+ USB_VENDOR_SILABS, USB_PRODUCT_SILABS_TAMSMASTER,
+ 0,
+ "Silicon Labs",
+ "Tams Master Easy Control",
+ },
+ {
+ USB_VENDOR_SILABS, USB_PRODUCT_SILABS_WMRBATT,
+ 0,
+ "Silicon Labs",
+ "WMR RIGblaster Plug&Play",
+ },
+ {
+ USB_VENDOR_SILABS, USB_PRODUCT_SILABS_WMRRIGBLASTER,
+ 0,
+ "Silicon Labs",
+ "WMR RIGblaster Plug&Play",
+ },
+ {
+ USB_VENDOR_SILABS, USB_PRODUCT_SILABS_WMRRIGTALK,
+ 0,
+ "Silicon Labs",
+ "WMR RIGtalk RT1",
+ },
+ {
+ USB_VENDOR_SILABS, USB_PRODUCT_SILABS_HELICOM,
+ 0,
+ "Silicon Labs",
+ "Helicomm IP-Link 1220-DVM",
+ },
+ {
+ USB_VENDOR_SILABS, USB_PRODUCT_SILABS_AVIT_USB_TTL,
+ 0,
+ "Silicon Labs",
+ "AVIT Research USB-TTL",
+ },
+ {
+ USB_VENDOR_SILABS, USB_PRODUCT_SILABS_MJS_TOSLINK,
+ 0,
+ "Silicon Labs",
+ "MJS USB-TOSLINk",
+ },
+ {
+ USB_VENDOR_SILABS, USB_PRODUCT_SILABS_WAVIT,
+ 0,
+ "Silicon Labs",
+ "ThinkOptics WavIt",
+ },
+ {
+ USB_VENDOR_SILABS, USB_PRODUCT_SILABS_MSD_DASHHAWK,
+ 0,
+ "Silicon Labs",
+ "MSD DashHawk",
+ },
+ {
+ USB_VENDOR_SILABS, USB_PRODUCT_SILABS_INSYS_MODEM,
+ 0,
+ "Silicon Labs",
+ "INSYS Modem",
+ },
+ {
+ USB_VENDOR_SILABS, USB_PRODUCT_SILABS_LIPOWSKY_JTAG,
+ 0,
+ "Silicon Labs",
+ "Lipowsky Baby-JTAG",
+ },
+ {
+ USB_VENDOR_SILABS, USB_PRODUCT_SILABS_LIPOWSKY_LIN,
+ 0,
+ "Silicon Labs",
+ "Lipowsky Baby-LIN",
+ },
+ {
+ USB_VENDOR_SILABS, USB_PRODUCT_SILABS_AEROCOMM,
+ 0,
+ "Silicon Labs",
+ "Aerocomm Radio",
+ },
+ {
+ USB_VENDOR_SILABS, USB_PRODUCT_SILABS_ZEPHYR_BIO,
+ 0,
+ "Silicon Labs",
+ "Zephyr Bioharness",
+ },
+ {
+ USB_VENDOR_SILABS, USB_PRODUCT_SILABS_EMS_C1007,
+ 0,
+ "Silicon Labs",
+ "EMS C1007 HF RFID controller",
+ },
+ {
+ USB_VENDOR_SILABS, USB_PRODUCT_SILABS_LIPOWSKY_HARP,
+ 0,
+ "Silicon Labs",
+ "Lipowsky HARP-1",
+ },
+ {
+ USB_VENDOR_SILABS, USB_PRODUCT_SILABS_C2_EDGE_MODEM,
+ 0,
+ "Silicon Labs",
+ "Commander 2 EDGE(GSM) Modem",
+ },
+ {
+ USB_VENDOR_SILABS, USB_PRODUCT_SILABS_CYGNAL_GPS,
+ 0,
+ "Silicon Labs",
+ "Cygnal Fasttrax GPS",
+ },
+ {
+ USB_VENDOR_SILABS, USB_PRODUCT_SILABS_TELEGESYS_ETRX2,
+ 0,
+ "Silicon Labs",
+ "Telegesys ETRX2USB",
+ },
+ {
+ USB_VENDOR_SILABS, USB_PRODUCT_SILABS_PROCYON_AVS,
+ 0,
+ "Silicon Labs",
+ "Procyon AVS",
+ },
+ {
+ USB_VENDOR_SILABS, USB_PRODUCT_SILABS_MC35PU,
+ 0,
+ "Silicon Labs",
+ "MC35pu",
+ },
+ {
+ USB_VENDOR_SILABS, USB_PRODUCT_SILABS_CYGNAL,
+ 0,
+ "Silicon Labs",
+ "Cygnal",
+ },
+ {
+ USB_VENDOR_SILABS, USB_PRODUCT_SILABS_AMBER_AMB2560,
+ 0,
+ "Silicon Labs",
+ "Amber Wireless AMB2560",
+ },
+ {
+ USB_VENDOR_SILABS, USB_PRODUCT_SILABS_KYOCERA_GPS,
+ 0,
+ "Silicon Labs",
+ "Kyocera GPS",
+ },
+ {
+ USB_VENDOR_SILABS, USB_PRODUCT_SILABS_BEI_VCP,
+ 0,
+ "Silicon Labs",
+ "BEI USB Sensor (VCP)",
+ },
+ {
+ USB_VENDOR_SILABS, USB_PRODUCT_SILABS_CP2102,
+ 0,
+ "Silicon Labs",
+ "SILABS USB UART",
+ },
+ {
+ USB_VENDOR_SILABS, USB_PRODUCT_SILABS_CP210X_2,
+ 0,
+ "Silicon Labs",
+ "CP210x Serial",
+ },
+ {
+ USB_VENDOR_SILABS, USB_PRODUCT_SILABS_INFINITY_MIC,
+ 0,
+ "Silicon Labs",
+ "Infinity GPS-MIC-1 Radio Monophone",
+ },
+ {
+ USB_VENDOR_SILABS, USB_PRODUCT_SILABS_USBSCOPE50,
+ 0,
+ "Silicon Labs",
+ "USBscope50",
+ },
+ {
+ USB_VENDOR_SILABS, USB_PRODUCT_SILABS_USBWAVE12,
+ 0,
+ "Silicon Labs",
+ "USBwave12",
+ },
+ {
+ USB_VENDOR_SILABS, USB_PRODUCT_SILABS_USBPULSE100,
+ 0,
+ "Silicon Labs",
+ "USBpulse100",
+ },
+ {
+ USB_VENDOR_SILABS, USB_PRODUCT_SILABS_USBCOUNT50,
+ 0,
+ "Silicon Labs",
+ "USBcount50",
+ },
+ {
+ USB_VENDOR_SILABS2, USB_PRODUCT_SILABS2_DCU11CLONE,
+ 0,
+ "SILABS2",
+ "DCU-11 clone",
+ },
+ {
+ USB_VENDOR_SILABS3, USB_PRODUCT_SILABS3_GPRS_MODEM,
+ 0,
+ "Silicon Labs",
+ "GPRS Modem",
+ },
+ {
+ USB_VENDOR_SILABS4, USB_PRODUCT_SILABS4_100EU_MODEM,
+ 0,
+ "Silicon Labs",
+ "GPRS Modem 100EU",
+ },
+ {
+ USB_VENDOR_SILICONPORTALS, USB_PRODUCT_SILICONPORTALS_YAPPH_NF,
+ 0,
+ "Silicon Portals",
+ "YAP Phone (no firmware)",
+ },
+ {
+ USB_VENDOR_SILICONPORTALS, USB_PRODUCT_SILICONPORTALS_YAPPHONE,
+ 0,
+ "Silicon Portals",
+ "YAP Phone",
+ },
+ {
+ USB_VENDOR_SIRIUS, USB_PRODUCT_SIRIUS_ROADSTER,
+ 0,
+ "Sirius Technologies",
+ "NetComm Roadster II 56 USB",
+ },
+ {
+ USB_VENDOR_SITECOM, USB_PRODUCT_SITECOM_LN029,
+ 0,
+ "Sitecom",
+ "USB 2.0 Ethernet",
+ },
+ {
+ USB_VENDOR_SITECOM, USB_PRODUCT_SITECOM_SERIAL,
+ 0,
+ "Sitecom",
+ "USB to serial cable (v2)",
+ },
+ {
+ USB_VENDOR_SITECOM2, USB_PRODUCT_SITECOM2_WL022,
+ 0,
+ "Sitecom",
+ "WL-022",
+ },
+ {
+ USB_VENDOR_SITECOMEU, USB_PRODUCT_SITECOMEU_RT2870_1,
+ 0,
+ "Sitecom Europe",
+ "RT2870",
+ },
+ {
+ USB_VENDOR_SITECOMEU, USB_PRODUCT_SITECOMEU_WL168V1,
+ 0,
+ "Sitecom Europe",
+ "WL-168 v1",
+ },
+ {
+ USB_VENDOR_SITECOMEU, USB_PRODUCT_SITECOMEU_WL168V4,
+ 0,
+ "Sitecom Europe",
+ "WL-168 v4",
+ },
+ {
+ USB_VENDOR_SITECOMEU, USB_PRODUCT_SITECOMEU_RT2870_2,
+ 0,
+ "Sitecom Europe",
+ "RT2870",
+ },
+ {
+ USB_VENDOR_SITECOMEU, USB_PRODUCT_SITECOMEU_RT2870_3,
+ 0,
+ "Sitecom Europe",
+ "RT2870",
+ },
+ {
+ USB_VENDOR_SITECOMEU, USB_PRODUCT_SITECOMEU_RT2870_4,
+ 0,
+ "Sitecom Europe",
+ "RT2870",
+ },
+ {
+ USB_VENDOR_SITECOMEU, USB_PRODUCT_SITECOMEU_RT2770,
+ 0,
+ "Sitecom Europe",
+ "RT2770",
+ },
+ {
+ USB_VENDOR_SITECOMEU, USB_PRODUCT_SITECOMEU_RT3070_2,
+ 0,
+ "Sitecom Europe",
+ "RT3070",
+ },
+ {
+ USB_VENDOR_SITECOMEU, USB_PRODUCT_SITECOMEU_RT3070_3,
+ 0,
+ "Sitecom Europe",
+ "RT3070",
+ },
+ {
+ USB_VENDOR_SITECOMEU, USB_PRODUCT_SITECOMEU_RT3070_4,
+ 0,
+ "Sitecom Europe",
+ "RT3070",
+ },
+ {
+ USB_VENDOR_SITECOMEU, USB_PRODUCT_SITECOMEU_RT3070,
+ 0,
+ "Sitecom Europe",
+ "RT3070",
+ },
+ {
+ USB_VENDOR_SITECOMEU, USB_PRODUCT_SITECOMEU_WL608,
+ 0,
+ "Sitecom Europe",
+ "WL-608",
+ },
+ {
+ USB_VENDOR_SITECOMEU, USB_PRODUCT_SITECOMEU_RT3071,
+ 0,
+ "Sitecom Europe",
+ "RT3071",
+ },
+ {
+ USB_VENDOR_SITECOMEU, USB_PRODUCT_SITECOMEU_RT3072_1,
+ 0,
+ "Sitecom Europe",
+ "RT3072",
+ },
+ {
+ USB_VENDOR_SITECOMEU, USB_PRODUCT_SITECOMEU_RT3072_2,
+ 0,
+ "Sitecom Europe",
+ "RT3072",
+ },
+ {
+ USB_VENDOR_SITECOMEU, USB_PRODUCT_SITECOMEU_RT3072_3,
+ 0,
+ "Sitecom Europe",
+ "RT3072",
+ },
+ {
+ USB_VENDOR_SITECOMEU, USB_PRODUCT_SITECOMEU_RT3072_4,
+ 0,
+ "Sitecom Europe",
+ "RT3072",
+ },
+ {
+ USB_VENDOR_SITECOMEU, USB_PRODUCT_SITECOMEU_RT3072_5,
+ 0,
+ "Sitecom Europe",
+ "RT3072",
+ },
+ {
+ USB_VENDOR_SITECOMEU, USB_PRODUCT_SITECOMEU_RT3072_6,
+ 0,
+ "Sitecom Europe",
+ "RT3072",
+ },
+ {
+ USB_VENDOR_SITECOMEU, USB_PRODUCT_SITECOMEU_LN028,
+ 0,
+ "Sitecom Europe",
+ "LN-028",
+ },
+ {
+ USB_VENDOR_SITECOMEU, USB_PRODUCT_SITECOMEU_WL113,
+ 0,
+ "Sitecom Europe",
+ "WL-113",
+ },
+ {
+ USB_VENDOR_SITECOMEU, USB_PRODUCT_SITECOMEU_ZD1211B,
+ 0,
+ "Sitecom Europe",
+ "ZD1211B",
+ },
+ {
+ USB_VENDOR_SITECOMEU, USB_PRODUCT_SITECOMEU_WL172,
+ 0,
+ "Sitecom Europe",
+ "WL-172",
+ },
+ {
+ USB_VENDOR_SITECOMEU, USB_PRODUCT_SITECOMEU_WL113R2,
+ 0,
+ "Sitecom Europe",
+ "WL-113 rev 2",
+ },
+ {
+ USB_VENDOR_SKANHEX, USB_PRODUCT_SKANHEX_MD_7425,
+ 0,
+ "Skanhex Technology, Inc.",
+ "MD 7425 Camera",
+ },
+ {
+ USB_VENDOR_SKANHEX, USB_PRODUCT_SKANHEX_SX_520Z,
+ 0,
+ "Skanhex Technology, Inc.",
+ "SX 520z Camera",
+ },
+ {
+ USB_VENDOR_SMART, USB_PRODUCT_SMART_PL2303,
+ 0,
+ "Smart Technologies",
+ "Serial adapter",
+ },
+ {
+ USB_VENDOR_SMARTBRIDGES, USB_PRODUCT_SMARTBRIDGES_SMARTLINK,
+ 0,
+ "SmartBridges",
+ "SmartLink USB Ethernet",
+ },
+ {
+ USB_VENDOR_SMARTBRIDGES, USB_PRODUCT_SMARTBRIDGES_SMARTNIC,
+ 0,
+ "SmartBridges",
+ "smartNIC 2 PnP Ethernet",
+ },
+ {
+ USB_VENDOR_SMC, USB_PRODUCT_SMC_2102USB,
+ 0,
+ "Standard Microsystems",
+ "10Mbps Ethernet",
+ },
+ {
+ USB_VENDOR_SMC, USB_PRODUCT_SMC_2202USB,
+ 0,
+ "Standard Microsystems",
+ "10/100 Ethernet",
+ },
+ {
+ USB_VENDOR_SMC, USB_PRODUCT_SMC_2206USB,
+ 0,
+ "Standard Microsystems",
+ "EZ Connect USB Ethernet",
+ },
+ {
+ USB_VENDOR_SMC, USB_PRODUCT_SMC_2862WG,
+ 0,
+ "Standard Microsystems",
+ "EZ Connect Wireless Adapter",
+ },
+ {
+ USB_VENDOR_SMC2, USB_PRODUCT_SMC2_2020HUB,
+ 0,
+ "Standard Microsystems",
+ "USB Hub",
+ },
+ {
+ USB_VENDOR_SMC2, USB_PRODUCT_SMC2_2514HUB,
+ 0,
+ "Standard Microsystems",
+ "USB Hub",
+ },
+ {
+ USB_VENDOR_SMC3, USB_PRODUCT_SMC3_2662WUSB,
+ 0,
+ "Standard Microsystems",
+ "2662W-AR Wireless",
+ },
+ {
+ USB_VENDOR_SOHOWARE, USB_PRODUCT_SOHOWARE_NUB100,
+ 0,
+ "SOHOware",
+ "10/100 USB Ethernet",
+ },
+ {
+ USB_VENDOR_SOHOWARE, USB_PRODUCT_SOHOWARE_NUB110,
+ 0,
+ "SOHOware",
+ "10/100 USB Ethernet",
+ },
+ {
+ USB_VENDOR_SOLIDYEAR, USB_PRODUCT_SOLIDYEAR_KEYBOARD,
+ 0,
+ "Solid Year",
+ "Solid Year USB keyboard",
+ },
+ {
+ USB_VENDOR_SONY, USB_PRODUCT_SONY_DSC,
+ 0,
+ "Sony",
+ "DSC cameras",
+ },
+ {
+ USB_VENDOR_SONY, USB_PRODUCT_SONY_MS_NW_MS7,
+ 0,
+ "Sony",
+ "Memorystick NW-MS7",
+ },
+ {
+ USB_VENDOR_SONY, USB_PRODUCT_SONY_PORTABLE_HDD_V2,
+ 0,
+ "Sony",
+ "Portable USB Harddrive V2",
+ },
+ {
+ USB_VENDOR_SONY, USB_PRODUCT_SONY_MSACUS1,
+ 0,
+ "Sony",
+ "Memorystick MSAC-US1",
+ },
+ {
+ USB_VENDOR_SONY, USB_PRODUCT_SONY_HANDYCAM,
+ 0,
+ "Sony",
+ "Handycam",
+ },
+ {
+ USB_VENDOR_SONY, USB_PRODUCT_SONY_MSC,
+ 0,
+ "Sony",
+ "MSC memory stick slot",
+ },
+ {
+ USB_VENDOR_SONY, USB_PRODUCT_SONY_CLIE_35,
+ 0,
+ "Sony",
+ "Sony Clie v3.5",
+ },
+ {
+ USB_VENDOR_SONY, USB_PRODUCT_SONY_MS_PEG_N760C,
+ 0,
+ "Sony",
+ "PEG N760c Memorystick",
+ },
+ {
+ USB_VENDOR_SONY, USB_PRODUCT_SONY_CLIE_40,
+ 0,
+ "Sony",
+ "Sony Clie v4.0",
+ },
+ {
+ USB_VENDOR_SONY, USB_PRODUCT_SONY_MS_MSC_U03,
+ 0,
+ "Sony",
+ "Memorystick MSC-U03",
+ },
+ {
+ USB_VENDOR_SONY, USB_PRODUCT_SONY_CLIE_40_MS,
+ 0,
+ "Sony",
+ "Sony Clie v4.0 Memory Stick slot",
+ },
+ {
+ USB_VENDOR_SONY, USB_PRODUCT_SONY_CLIE_S360,
+ 0,
+ "Sony",
+ "Sony Clie s360",
+ },
+ {
+ USB_VENDOR_SONY, USB_PRODUCT_SONY_CLIE_41_MS,
+ 0,
+ "Sony",
+ "Sony Clie v4.1 Memory Stick slot",
+ },
+ {
+ USB_VENDOR_SONY, USB_PRODUCT_SONY_CLIE_41,
+ 0,
+ "Sony",
+ "Sony Clie v4.1",
+ },
+ {
+ USB_VENDOR_SONY, USB_PRODUCT_SONY_CLIE_NX60,
+ 0,
+ "Sony",
+ "Sony Clie nx60",
+ },
+ {
+ USB_VENDOR_SONY, USB_PRODUCT_SONY_CLIE_TH55,
+ 0,
+ "Sony",
+ "Sony Clie th55",
+ },
+ {
+ USB_VENDOR_SONY, USB_PRODUCT_SONY_CLIE_TJ37,
+ 0,
+ "Sony",
+ "Sony Clie tj37",
+ },
+ {
+ USB_VENDOR_SONY, USB_PRODUCT_SONY_RF_RECEIVER,
+ 0,
+ "Sony",
+ "Sony RF mouse/kbd Receiver VGP-WRC1",
+ },
+ {
+ USB_VENDOR_SONY, USB_PRODUCT_SONY_QN3,
+ 0,
+ "Sony",
+ "Sony QN3 CMD-Jxx phone cable",
+ },
+ {
+ USB_VENDOR_SONYERICSSON, USB_PRODUCT_SONYERICSSON_DCU10,
+ 0,
+ "Sony Ericsson",
+ "DCU-10 Phone Data Cable",
+ },
+ {
+ USB_VENDOR_SONYERICSSON, USB_PRODUCT_SONYERICSSON_DATAPILOT,
+ 0,
+ "Sony Ericsson",
+ "Datapilot Phone Cable",
+ },
+ {
+ USB_VENDOR_SOURCENEXT, USB_PRODUCT_SOURCENEXT_KEIKAI8,
+ 0,
+ "SOURCENEXT",
+ "KeikaiDenwa 8",
+ },
+ {
+ USB_VENDOR_SOURCENEXT, USB_PRODUCT_SOURCENEXT_KEIKAI8_CHG,
+ 0,
+ "SOURCENEXT",
+ "KeikaiDenwa 8 with charger",
+ },
+ {
+ USB_VENDOR_SPARKLAN, USB_PRODUCT_SPARKLAN_RT2573,
+ 0,
+ "SparkLAN",
+ "RT2573",
+ },
+ {
+ USB_VENDOR_SPARKLAN, USB_PRODUCT_SPARKLAN_RT2870_1,
+ 0,
+ "SparkLAN",
+ "RT2870",
+ },
+ {
+ USB_VENDOR_SPARKLAN, USB_PRODUCT_SPARKLAN_RT3070,
+ 0,
+ "SparkLAN",
+ "RT3070",
+ },
+ {
+ USB_VENDOR_SPEEDDRAGON, USB_PRODUCT_SPEEDDRAGON_MS3303H,
+ 0,
+ "Speed Dragon Multimedia",
+ "MS3303H Serial",
+ },
+ {
+ USB_VENDOR_SPHAIRON, USB_PRODUCT_SPHAIRON_UB801R,
+ 0,
+ "Sphairon Access Systems GmbH",
+ "UB801R",
+ },
+ {
+ USB_VENDOR_STELERA, USB_PRODUCT_STELERA_ZEROCD,
+ 0,
+ "Stelera Wireless",
+ "Zerocd Installer",
+ },
+ {
+ USB_VENDOR_STELERA, USB_PRODUCT_STELERA_C105,
+ 0,
+ "Stelera Wireless",
+ "Stelera/Bandrish C105 USB",
+ },
+ {
+ USB_VENDOR_STELERA, USB_PRODUCT_STELERA_E1003,
+ 0,
+ "Stelera Wireless",
+ "3G modem",
+ },
+ {
+ USB_VENDOR_STELERA, USB_PRODUCT_STELERA_E1004,
+ 0,
+ "Stelera Wireless",
+ "3G modem",
+ },
+ {
+ USB_VENDOR_STELERA, USB_PRODUCT_STELERA_E1005,
+ 0,
+ "Stelera Wireless",
+ "3G modem",
+ },
+ {
+ USB_VENDOR_STELERA, USB_PRODUCT_STELERA_E1006,
+ 0,
+ "Stelera Wireless",
+ "3G modem",
+ },
+ {
+ USB_VENDOR_STELERA, USB_PRODUCT_STELERA_E1007,
+ 0,
+ "Stelera Wireless",
+ "3G modem",
+ },
+ {
+ USB_VENDOR_STELERA, USB_PRODUCT_STELERA_E1008,
+ 0,
+ "Stelera Wireless",
+ "3G modem",
+ },
+ {
+ USB_VENDOR_STELERA, USB_PRODUCT_STELERA_E1009,
+ 0,
+ "Stelera Wireless",
+ "3G modem",
+ },
+ {
+ USB_VENDOR_STELERA, USB_PRODUCT_STELERA_E100A,
+ 0,
+ "Stelera Wireless",
+ "3G modem",
+ },
+ {
+ USB_VENDOR_STELERA, USB_PRODUCT_STELERA_E100B,
+ 0,
+ "Stelera Wireless",
+ "3G modem",
+ },
+ {
+ USB_VENDOR_STELERA, USB_PRODUCT_STELERA_E100C,
+ 0,
+ "Stelera Wireless",
+ "3G modem",
+ },
+ {
+ USB_VENDOR_STELERA, USB_PRODUCT_STELERA_E100D,
+ 0,
+ "Stelera Wireless",
+ "3G modem",
+ },
+ {
+ USB_VENDOR_STELERA, USB_PRODUCT_STELERA_E100E,
+ 0,
+ "Stelera Wireless",
+ "3G modem",
+ },
+ {
+ USB_VENDOR_STELERA, USB_PRODUCT_STELERA_E100F,
+ 0,
+ "Stelera Wireless",
+ "3G modem",
+ },
+ {
+ USB_VENDOR_STELERA, USB_PRODUCT_STELERA_E1010,
+ 0,
+ "Stelera Wireless",
+ "3G modem",
+ },
+ {
+ USB_VENDOR_STELERA, USB_PRODUCT_STELERA_E1011,
+ 0,
+ "Stelera Wireless",
+ "3G modem",
+ },
+ {
+ USB_VENDOR_STELERA, USB_PRODUCT_STELERA_E1012,
+ 0,
+ "Stelera Wireless",
+ "3G modem",
+ },
+ {
+ USB_VENDOR_MPMAN, USB_PRODUCT_MPMAN_MPF400_1,
+ 0,
+ "MpMan",
+ "MPF400 Music Player 1Go",
+ },
+ {
+ USB_VENDOR_MPMAN, USB_PRODUCT_MPMAN_MPF400_2,
+ 0,
+ "MpMan",
+ "MPF400 Music Player 2Go",
+ },
+ {
+ USB_VENDOR_STMICRO, USB_PRODUCT_STMICRO_BIOCPU,
+ 0,
+ "STMicroelectronics",
+ "Biometric Coprocessor",
+ },
+ {
+ USB_VENDOR_STMICRO, USB_PRODUCT_STMICRO_COMMUNICATOR,
+ 0,
+ "STMicroelectronics",
+ "USB Communicator",
+ },
+ {
+ USB_VENDOR_STSN, USB_PRODUCT_STSN_STSN0001,
+ 0,
+ "STSN",
+ "Internet Access Device",
+ },
+ {
+ USB_VENDOR_SUNTAC, USB_PRODUCT_SUNTAC_DS96L,
+ 0,
+ "SUN Corporation",
+ "SUNTAC U-Cable type D2",
+ },
+ {
+ USB_VENDOR_SUNTAC, USB_PRODUCT_SUNTAC_PS64P1,
+ 0,
+ "SUN Corporation",
+ "SUNTAC U-Cable type P1",
+ },
+ {
+ USB_VENDOR_SUNTAC, USB_PRODUCT_SUNTAC_VS10U,
+ 0,
+ "SUN Corporation",
+ "SUNTAC Slipper U",
+ },
+ {
+ USB_VENDOR_SUNTAC, USB_PRODUCT_SUNTAC_IS96U,
+ 0,
+ "SUN Corporation",
+ "SUNTAC Ir-Trinity",
+ },
+ {
+ USB_VENDOR_SUNTAC, USB_PRODUCT_SUNTAC_AS64LX,
+ 0,
+ "SUN Corporation",
+ "SUNTAC U-Cable type A3",
+ },
+ {
+ USB_VENDOR_SUNTAC, USB_PRODUCT_SUNTAC_AS144L4,
+ 0,
+ "SUN Corporation",
+ "SUNTAC U-Cable type A4",
+ },
+ {
+ USB_VENDOR_SUN, USB_PRODUCT_SUN_KEYBOARD_TYPE_6,
+ 0,
+ "Sun Microsystems",
+ "Type 6 USB keyboard",
+ },
+ {
+ USB_VENDOR_SUN, USB_PRODUCT_SUN_KEYBOARD_TYPE_7,
+ 0,
+ "Sun Microsystems",
+ "Type 7 USB keyboard",
+ },
+ {
+ USB_VENDOR_SUN, USB_PRODUCT_SUN_MOUSE,
+ 0,
+ "Sun Microsystems",
+ "Type 6 USB mouse",
+ },
+ {
+ USB_VENDOR_SUN, USB_PRODUCT_SUN_KBD_HUB,
+ 0,
+ "Sun Microsystems",
+ "Kbd Hub",
+ },
+ {
+ USB_VENDOR_SUPERTOP, USB_PRODUCT_SUPERTOP_IDE,
+ 0,
+ "Super Top",
+ "USB-IDE",
+ },
+ {
+ USB_VENDOR_SYNTECH, USB_PRODUCT_SYNTECH_CPT8001C,
+ 0,
+ "Syntech Information",
+ "CPT-8001C Barcode scanner",
+ },
+ {
+ USB_VENDOR_SYNTECH, USB_PRODUCT_SYNTECH_CYPHERLAB100,
+ 0,
+ "Syntech Information",
+ "CipherLab USB Barcode Scanner",
+ },
+ {
+ USB_VENDOR_TECLAST, USB_PRODUCT_TECLAST_TLC300,
+ 0,
+ "Teclast",
+ "USB Media Player",
+ },
+ {
+ USB_VENDOR_DIAMOND2, USB_PRODUCT_DIAMOND2_SUPRAEXPRESS56K,
+ 0,
+ "Diamond (Supra)",
+ "Supra Express 56K modem",
+ },
+ {
+ USB_VENDOR_DIAMOND2, USB_PRODUCT_DIAMOND2_SUPRA2890,
+ 0,
+ "Diamond (Supra)",
+ "SupraMax 2890 56K Modem",
+ },
+ {
+ USB_VENDOR_DIAMOND2, USB_PRODUCT_DIAMOND2_RIO600USB,
+ 0,
+ "Diamond (Supra)",
+ "Rio 600 USB",
+ },
+ {
+ USB_VENDOR_DIAMOND2, USB_PRODUCT_DIAMOND2_RIO800USB,
+ 0,
+ "Diamond (Supra)",
+ "Rio 800 USB",
+ },
+ {
+ USB_VENDOR_SURECOM, USB_PRODUCT_SURECOM_EP9001G2A,
+ 0,
+ "Surecom Technology",
+ "EP-9001-G rev 2A",
+ },
+ {
+ USB_VENDOR_SURECOM, USB_PRODUCT_SURECOM_RT2570,
+ 0,
+ "Surecom Technology",
+ "RT2570",
+ },
+ {
+ USB_VENDOR_SURECOM, USB_PRODUCT_SURECOM_RT2573,
+ 0,
+ "Surecom Technology",
+ "RT2573",
+ },
+ {
+ USB_VENDOR_SWEEX, USB_PRODUCT_SWEEX_ZD1211,
+ 0,
+ "Sweex",
+ "ZD1211",
+ },
+ {
+ USB_VENDOR_SWEEX2, USB_PRODUCT_SWEEX2_LW153,
+ 0,
+ "Sweex",
+ "LW153",
+ },
+ {
+ USB_VENDOR_SWEEX2, USB_PRODUCT_SWEEX2_LW303,
+ 0,
+ "Sweex",
+ "LW303",
+ },
+ {
+ USB_VENDOR_SWEEX2, USB_PRODUCT_SWEEX2_LW313,
+ 0,
+ "Sweex",
+ "LW313",
+ },
+ {
+ USB_VENDOR_SYSTEMTALKS, USB_PRODUCT_SYSTEMTALKS_SGCX2UL,
+ 0,
+ "System Talks",
+ "SGC-X2UL",
+ },
+ {
+ USB_VENDOR_TAPWAVE, USB_PRODUCT_TAPWAVE_ZODIAC,
+ 0,
+ "Tapwave",
+ "Zodiac",
+ },
+ {
+ USB_VENDOR_TAUGA, USB_PRODUCT_TAUGA_CAMERAMATE,
+ 0,
+ "Taugagreining HF",
+ "CameraMate (DPCM_USB)",
+ },
+ {
+ USB_VENDOR_TCTMOBILE, USB_PRODUCT_TCTMOBILE_X060S,
+ 0,
+ "TCT Mobile",
+ "X060S 3G modem",
+ },
+ {
+ USB_VENDOR_TCTMOBILE, USB_PRODUCT_TCTMOBILE_X080S,
+ 0,
+ "TCT Mobile",
+ "X080S 3G modem",
+ },
+ {
+ USB_VENDOR_TDK, USB_PRODUCT_TDK_UPA9664,
+ 0,
+ "TDK",
+ "USB-PDC Adapter UPA9664",
+ },
+ {
+ USB_VENDOR_TDK, USB_PRODUCT_TDK_UCA1464,
+ 0,
+ "TDK",
+ "USB-cdmaOne Adapter UCA1464",
+ },
+ {
+ USB_VENDOR_TDK, USB_PRODUCT_TDK_UHA6400,
+ 0,
+ "TDK",
+ "USB-PHS Adapter UHA6400",
+ },
+ {
+ USB_VENDOR_TDK, USB_PRODUCT_TDK_UPA6400,
+ 0,
+ "TDK",
+ "USB-PHS Adapter UPA6400",
+ },
+ {
+ USB_VENDOR_TDK, USB_PRODUCT_TDK_BT_DONGLE,
+ 0,
+ "TDK",
+ "Bluetooth USB dongle",
+ },
+ {
+ USB_VENDOR_TEAC, USB_PRODUCT_TEAC_FD05PUB,
+ 0,
+ "TEAC",
+ "FD-05PUB floppy",
+ },
+ {
+ USB_VENDOR_TEKRAM, USB_PRODUCT_TEKRAM_QUICKWLAN,
+ 0,
+ "Tekram Technology",
+ "QuickWLAN",
+ },
+ {
+ USB_VENDOR_TEKRAM, USB_PRODUCT_TEKRAM_ZD1211_1,
+ 0,
+ "Tekram Technology",
+ "ZD1211",
+ },
+ {
+ USB_VENDOR_TEKRAM, USB_PRODUCT_TEKRAM_ZD1211_2,
+ 0,
+ "Tekram Technology",
+ "ZD1211",
+ },
+ {
+ USB_VENDOR_TELEX, USB_PRODUCT_TELEX_MIC1,
+ 0,
+ "Telex Communications",
+ "Enhanced USB Microphone",
+ },
+ {
+ USB_VENDOR_TELIT, USB_PRODUCT_TELIT_UC864E,
+ 0,
+ "Telit",
+ "UC864E 3G modem",
+ },
+ {
+ USB_VENDOR_TELIT, USB_PRODUCT_TELIT_UC864G,
+ 0,
+ "Telit",
+ "UC864G 3G modem",
+ },
+ {
+ USB_VENDOR_TENX, USB_PRODUCT_TENX_UAUDIO0,
+ 0,
+ "Ten X Technology, Inc.",
+ "USB audio headset",
+ },
+ {
+ USB_VENDOR_TI, USB_PRODUCT_TI_UTUSB41,
+ 0,
+ "Texas Instruments",
+ "UT-USB41 hub",
+ },
+ {
+ USB_VENDOR_TI, USB_PRODUCT_TI_TUSB2046,
+ 0,
+ "Texas Instruments",
+ "TUSB2046 hub",
+ },
+ {
+ USB_VENDOR_THRUST, USB_PRODUCT_THRUST_FUSION_PAD,
+ 0,
+ "Thrustmaster",
+ "Fusion Digital Gamepad",
+ },
+ {
+ USB_VENDOR_TLAYTECH, USB_PRODUCT_TLAYTECH_TEU800,
+ 0,
+ "Tlay Tech",
+ "TEU800 3G modem",
+ },
+ {
+ USB_VENDOR_TOPRE, USB_PRODUCT_TOPRE_HHKB,
+ 0,
+ "Topre Corporation",
+ "HHKB Professional",
+ },
+ {
+ USB_VENDOR_TOSHIBA, USB_PRODUCT_TOSHIBA_POCKETPC_E740,
+ 0,
+ "Toshiba",
+ "PocketPC e740",
+ },
+ {
+ USB_VENDOR_TOSHIBA, USB_PRODUCT_TOSHIBA_RT3070,
+ 0,
+ "Toshiba",
+ "RT3070",
+ },
+ {
+ USB_VENDOR_TOSHIBA, USB_PRODUCT_TOSHIBA_G450,
+ 0,
+ "Toshiba",
+ "G450 modem",
+ },
+ {
+ USB_VENDOR_TOSHIBA, USB_PRODUCT_TOSHIBA_HSDPA,
+ 0,
+ "Toshiba",
+ "G450 modem",
+ },
+ {
+ USB_VENDOR_TREK, USB_PRODUCT_TREK_THUMBDRIVE,
+ 0,
+ "Trek Technology",
+ "ThumbDrive",
+ },
+ {
+ USB_VENDOR_TREK, USB_PRODUCT_TREK_MEMKEY,
+ 0,
+ "Trek Technology",
+ "IBM USB Memory Key",
+ },
+ {
+ USB_VENDOR_TREK, USB_PRODUCT_TREK_THUMBDRIVE_8MB,
+ 0,
+ "Trek Technology",
+ "ThumbDrive_8MB",
+ },
+ {
+ USB_VENDOR_TRIPPLITE, USB_PRODUCT_TRIPPLITE_U209,
+ 0,
+ "Tripp-Lite",
+ "Serial",
+ },
+ {
+ USB_VENDOR_TRUMPION, USB_PRODUCT_TRUMPION_T33520,
+ 0,
+ "Trumpion Microelectronics",
+ "T33520 USB Flash Card Controller",
+ },
+ {
+ USB_VENDOR_TRUMPION, USB_PRODUCT_TRUMPION_C3310,
+ 0,
+ "Trumpion Microelectronics",
+ "Comotron C3310 MP3 player",
+ },
+ {
+ USB_VENDOR_TRUMPION, USB_PRODUCT_TRUMPION_MP3,
+ 0,
+ "Trumpion Microelectronics",
+ "MP3 player",
+ },
+ {
+ USB_VENDOR_TWINMOS, USB_PRODUCT_TWINMOS_G240,
+ 0,
+ "TwinMOS",
+ "G240",
+ },
+ {
+ USB_VENDOR_TWINMOS, USB_PRODUCT_TWINMOS_MDIV,
+ 0,
+ "TwinMOS",
+ "Memory Disk IV",
+ },
+ {
+ USB_VENDOR_UBIQUAM, USB_PRODUCT_UBIQUAM_UALL,
+ 0,
+ "UBIQUAM Co., Ltd.",
+ "CDMA 1xRTT USB Modem (U-100/105/200/300/520)",
+ },
+ {
+ USB_VENDOR_ULTIMA, USB_PRODUCT_ULTIMA_1200UBPLUS,
+ 0,
+ "Ultima",
+ "1200 UB Plus scanner",
+ },
+ {
+ USB_VENDOR_UMAX, USB_PRODUCT_UMAX_ASTRA1236U,
+ 0,
+ "UMAX Data Systems",
+ "Astra 1236U Scanner",
+ },
+ {
+ USB_VENDOR_UMAX, USB_PRODUCT_UMAX_ASTRA1220U,
+ 0,
+ "UMAX Data Systems",
+ "Astra 1220U Scanner",
+ },
+ {
+ USB_VENDOR_UMAX, USB_PRODUCT_UMAX_ASTRA2000U,
+ 0,
+ "UMAX Data Systems",
+ "Astra 2000U Scanner",
+ },
+ {
+ USB_VENDOR_UMAX, USB_PRODUCT_UMAX_ASTRA2100U,
+ 0,
+ "UMAX Data Systems",
+ "Astra 2100U Scanner",
+ },
+ {
+ USB_VENDOR_UMAX, USB_PRODUCT_UMAX_ASTRA2200U,
+ 0,
+ "UMAX Data Systems",
+ "Astra 2200U Scanner",
+ },
+ {
+ USB_VENDOR_UMAX, USB_PRODUCT_UMAX_ASTRA3400,
+ 0,
+ "UMAX Data Systems",
+ "Astra 3400 Scanner",
+ },
+ {
+ USB_VENDOR_UMEDIA, USB_PRODUCT_UMEDIA_TEW444UBEU,
+ 0,
+ "U-MEDIA Communications",
+ "TEW-444UB EU",
+ },
+ {
+ USB_VENDOR_UMEDIA, USB_PRODUCT_UMEDIA_TEW444UBEU_NF,
+ 0,
+ "U-MEDIA Communications",
+ "TEW-444UB EU (no firmware)",
+ },
+ {
+ USB_VENDOR_UMEDIA, USB_PRODUCT_UMEDIA_TEW429UB_A,
+ 0,
+ "U-MEDIA Communications",
+ "TEW-429UB_A",
+ },
+ {
+ USB_VENDOR_UMEDIA, USB_PRODUCT_UMEDIA_TEW429UB,
+ 0,
+ "U-MEDIA Communications",
+ "TEW-429UB",
+ },
+ {
+ USB_VENDOR_UMEDIA, USB_PRODUCT_UMEDIA_TEW429UBC1,
+ 0,
+ "U-MEDIA Communications",
+ "TEW-429UB C1",
+ },
+ {
+ USB_VENDOR_UMEDIA, USB_PRODUCT_UMEDIA_RT2870_1,
+ 0,
+ "U-MEDIA Communications",
+ "RT2870",
+ },
+ {
+ USB_VENDOR_UMEDIA, USB_PRODUCT_UMEDIA_ALL0298V2,
+ 0,
+ "U-MEDIA Communications",
+ "ALL0298 v2",
+ },
+ {
+ USB_VENDOR_UMEDIA, USB_PRODUCT_UMEDIA_AR5523_2,
+ 0,
+ "U-MEDIA Communications",
+ "AR5523",
+ },
+ {
+ USB_VENDOR_UMEDIA, USB_PRODUCT_UMEDIA_AR5523_2_NF,
+ 0,
+ "U-MEDIA Communications",
+ "AR5523 (no firmware)",
+ },
+ {
+ USB_VENDOR_UNIACCESS, USB_PRODUCT_UNIACCESS_PANACHE,
+ 0,
+ "Universal Access",
+ "Panache Surf USB ISDN Adapter",
+ },
+ {
+ USB_VENDOR_USI, USB_PRODUCT_USI_MC60,
+ 0,
+ "USI",
+ "MC60 Serial",
+ },
+ {
+ USB_VENDOR_USR, USB_PRODUCT_USR_USR5422,
+ 0,
+ "U.S. Robotics",
+ "USR5422 WLAN",
+ },
+ {
+ USB_VENDOR_USR, USB_PRODUCT_USR_USR5423,
+ 0,
+ "U.S. Robotics",
+ "USR5423 WLAN",
+ },
+ {
+ USB_VENDOR_VIA, USB_PRODUCT_VIA_USB2IDEBRIDGE,
+ 0,
+ "VIA",
+ "USB 2.0 IDE Bridge",
+ },
+ {
+ USB_VENDOR_VAISALA, USB_PRODUCT_VAISALA_CABLE,
+ 0,
+ "Vaisala",
+ "USB Interface cable",
+ },
+ {
+ USB_VENDOR_VIDZMEDIA, USB_PRODUCT_VIDZMEDIA_MONSTERTV,
+ 0,
+ "VidzMedia Pte Ltd",
+ "MonsterTV P2H",
+ },
+ {
+ USB_VENDOR_VISION, USB_PRODUCT_VISION_VC6452V002,
+ 0,
+ "VLSI Vision",
+ "CPiA Camera",
+ },
+ {
+ USB_VENDOR_VISIONEER, USB_PRODUCT_VISIONEER_7600,
+ 0,
+ "Visioneer",
+ "OneTouch 7600",
+ },
+ {
+ USB_VENDOR_VISIONEER, USB_PRODUCT_VISIONEER_5300,
+ 0,
+ "Visioneer",
+ "OneTouch 5300",
+ },
+ {
+ USB_VENDOR_VISIONEER, USB_PRODUCT_VISIONEER_3000,
+ 0,
+ "Visioneer",
+ "Scanport 3000",
+ },
+ {
+ USB_VENDOR_VISIONEER, USB_PRODUCT_VISIONEER_6100,
+ 0,
+ "Visioneer",
+ "OneTouch 6100",
+ },
+ {
+ USB_VENDOR_VISIONEER, USB_PRODUCT_VISIONEER_6200,
+ 0,
+ "Visioneer",
+ "OneTouch 6200",
+ },
+ {
+ USB_VENDOR_VISIONEER, USB_PRODUCT_VISIONEER_8100,
+ 0,
+ "Visioneer",
+ "OneTouch 8100",
+ },
+ {
+ USB_VENDOR_VISIONEER, USB_PRODUCT_VISIONEER_8600,
+ 0,
+ "Visioneer",
+ "OneTouch 8600",
+ },
+ {
+ USB_VENDOR_VIVITAR, USB_PRODUCT_VIVITAR_35XX,
+ 0,
+ "Vivitar",
+ "Vivicam 35Xx",
+ },
+ {
+ USB_VENDOR_VTECH, USB_PRODUCT_VTECH_RT2570,
+ 0,
+ "VTech",
+ "RT2570",
+ },
+ {
+ USB_VENDOR_VTECH, USB_PRODUCT_VTECH_ZD1211B,
+ 0,
+ "VTech",
+ "ZD1211B",
+ },
+ {
+ USB_VENDOR_WACOM, USB_PRODUCT_WACOM_CT0405U,
+ 0,
+ "WACOM",
+ "CT-0405-U Tablet",
+ },
+ {
+ USB_VENDOR_WACOM, USB_PRODUCT_WACOM_GRAPHIRE,
+ 0,
+ "WACOM",
+ "Graphire",
+ },
+ {
+ USB_VENDOR_WACOM, USB_PRODUCT_WACOM_GRAPHIRE3_4X5,
+ 0,
+ "WACOM",
+ "Graphire 3 4x5",
+ },
+ {
+ USB_VENDOR_WACOM, USB_PRODUCT_WACOM_INTUOSA5,
+ 0,
+ "WACOM",
+ "Intuos A5",
+ },
+ {
+ USB_VENDOR_WACOM, USB_PRODUCT_WACOM_GD0912U,
+ 0,
+ "WACOM",
+ "Intuos 9x12 Graphics Tablet",
+ },
+ {
+ USB_VENDOR_WAVESENSE, USB_PRODUCT_WAVESENSE_JAZZ,
+ 0,
+ "WaveSense",
+ "Jazz blood glucose meter",
+ },
+ {
+ USB_VENDOR_WCH, USB_PRODUCT_WCH_CH341SER,
+ 0,
+ "QinHeng Electronics",
+ "CH341/CH340 USB-Serial Bridge",
+ },
+ {
+ USB_VENDOR_WCH2, USB_PRODUCT_WCH2_CH341SER,
+ 0,
+ "QinHeng Electronics",
+ "CH341/CH340 USB-Serial Bridge",
+ },
+ {
+ USB_VENDOR_WESTERN, USB_PRODUCT_WESTERN_COMBO,
+ 0,
+ "Western Digital",
+ "Firewire USB Combo",
+ },
+ {
+ USB_VENDOR_WESTERN, USB_PRODUCT_WESTERN_EXTHDD,
+ 0,
+ "Western Digital",
+ "External HDD",
+ },
+ {
+ USB_VENDOR_WESTERN, USB_PRODUCT_WESTERN_HUB,
+ 0,
+ "Western Digital",
+ "USB HUB",
+ },
+ {
+ USB_VENDOR_WESTERN, USB_PRODUCT_WESTERN_MYBOOK,
+ 0,
+ "Western Digital",
+ "MyBook External HDD",
+ },
+ {
+ USB_VENDOR_WESTERN, USB_PRODUCT_WESTERN_MYPASSWORD,
+ 0,
+ "Western Digital",
+ "MyPassword External HDD",
+ },
+ {
+ USB_VENDOR_WINBOND, USB_PRODUCT_WINBOND_UH104,
+ 0,
+ "Winbond",
+ "4-port USB Hub",
+ },
+ {
+ USB_VENDOR_WINMAXGROUP, USB_PRODUCT_WINMAXGROUP_FLASH64MC,
+ 0,
+ "WinMaxGroup",
+ "USB Flash Disk 64M-C",
+ },
+ {
+ USB_VENDOR_WISTRONNEWEB, USB_PRODUCT_WISTRONNEWEB_UR045G,
+ 0,
+ "Wistron NeWeb",
+ "PrismGT USB 2.0 WLAN",
+ },
+ {
+ USB_VENDOR_WISTRONNEWEB, USB_PRODUCT_WISTRONNEWEB_UR055G,
+ 0,
+ "Wistron NeWeb",
+ "UR055G",
+ },
+ {
+ USB_VENDOR_WISTRONNEWEB, USB_PRODUCT_WISTRONNEWEB_AR5523_1,
+ 0,
+ "Wistron NeWeb",
+ "AR5523",
+ },
+ {
+ USB_VENDOR_WISTRONNEWEB, USB_PRODUCT_WISTRONNEWEB_AR5523_1_NF,
+ 0,
+ "Wistron NeWeb",
+ "AR5523 (no firmware)",
+ },
+ {
+ USB_VENDOR_WISTRONNEWEB, USB_PRODUCT_WISTRONNEWEB_AR5523_2,
+ 0,
+ "Wistron NeWeb",
+ "AR5523",
+ },
+ {
+ USB_VENDOR_WISTRONNEWEB, USB_PRODUCT_WISTRONNEWEB_AR5523_2_NF,
+ 0,
+ "Wistron NeWeb",
+ "AR5523 (no firmware)",
+ },
+ {
+ USB_VENDOR_XEROX, USB_PRODUCT_XEROX_WCM15,
+ 0,
+ "Xerox",
+ "WorkCenter M15",
+ },
+ {
+ USB_VENDOR_XIRLINK, USB_PRODUCT_XIRLINK_PCCAM,
+ 0,
+ "Xirlink",
+ "IBM PC Camera",
+ },
+ {
+ USB_VENDOR_XYRATEX, USB_PRODUCT_XYRATEX_PRISM_GT_1,
+ 0,
+ "Xyratex",
+ "PrismGT USB 2.0 WLAN",
+ },
+ {
+ USB_VENDOR_XYRATEX, USB_PRODUCT_XYRATEX_PRISM_GT_2,
+ 0,
+ "Xyratex",
+ "PrismGT USB 2.0 WLAN",
+ },
+ {
+ USB_VENDOR_YAMAHA, USB_PRODUCT_YAMAHA_UX256,
+ 0,
+ "YAMAHA",
+ "UX256 MIDI I/F",
+ },
+ {
+ USB_VENDOR_YAMAHA, USB_PRODUCT_YAMAHA_UX96,
+ 0,
+ "YAMAHA",
+ "UX96 MIDI I/F",
+ },
+ {
+ USB_VENDOR_YAMAHA, USB_PRODUCT_YAMAHA_RTA54I,
+ 0,
+ "YAMAHA",
+ "NetVolante RTA54i Broadband&ISDN Router",
+ },
+ {
+ USB_VENDOR_YAMAHA, USB_PRODUCT_YAMAHA_RTA55I,
+ 0,
+ "YAMAHA",
+ "NetVolante RTA55i Broadband VoIP Router",
+ },
+ {
+ USB_VENDOR_YAMAHA, USB_PRODUCT_YAMAHA_RTW65B,
+ 0,
+ "YAMAHA",
+ "NetVolante RTW65b Broadband Wireless Router",
+ },
+ {
+ USB_VENDOR_YAMAHA, USB_PRODUCT_YAMAHA_RTW65I,
+ 0,
+ "YAMAHA",
+ "NetVolante RTW65i Broadband&ISDN Wireless Router",
+ },
+ {
+ USB_VENDOR_YANO, USB_PRODUCT_YANO_U640MO,
+ 0,
+ "Yano",
+ "U640MO-03",
+ },
+ {
+ USB_VENDOR_YANO, USB_PRODUCT_YANO_FW800HD,
+ 0,
+ "Yano",
+ "METALWEAR-HDD",
+ },
+ {
+ USB_VENDOR_YCCABLE, USB_PRODUCT_YCCABLE_PL2303,
+ 0,
+ "Y.C. Cable",
+ "PL2303 Serial",
+ },
+ {
+ USB_VENDOR_YEDATA, USB_PRODUCT_YEDATA_FLASHBUSTERU,
+ 0,
+ "Y-E Data",
+ "Flashbuster-U",
+ },
+ {
+ USB_VENDOR_YISO, USB_PRODUCT_YISO_C893,
+ 0,
+ "Yiso Wireless Co.",
+ "CDMA 2000 1xEVDO PC Card",
+ },
+ {
+ USB_VENDOR_ZCOM, USB_PRODUCT_ZCOM_M4Y750,
+ 0,
+ "Z-Com",
+ "M4Y-750",
+ },
+ {
+ USB_VENDOR_ZCOM, USB_PRODUCT_ZCOM_XI725,
+ 0,
+ "Z-Com",
+ "XI-725/726",
+ },
+ {
+ USB_VENDOR_ZCOM, USB_PRODUCT_ZCOM_XI735,
+ 0,
+ "Z-Com",
+ "XI-735",
+ },
+ {
+ USB_VENDOR_ZCOM, USB_PRODUCT_ZCOM_XG703A,
+ 0,
+ "Z-Com",
+ "PrismGT USB 2.0 WLAN",
+ },
+ {
+ USB_VENDOR_ZCOM, USB_PRODUCT_ZCOM_ZD1211,
+ 0,
+ "Z-Com",
+ "ZD1211",
+ },
+ {
+ USB_VENDOR_ZCOM, USB_PRODUCT_ZCOM_AR5523,
+ 0,
+ "Z-Com",
+ "AR5523",
+ },
+ {
+ USB_VENDOR_ZCOM, USB_PRODUCT_ZCOM_AR5523_NF,
+ 0,
+ "Z-Com",
+ "AR5523 driver (no firmware)",
+ },
+ {
+ USB_VENDOR_ZCOM, USB_PRODUCT_ZCOM_XM142,
+ 0,
+ "Z-Com",
+ "XM-142",
+ },
+ {
+ USB_VENDOR_ZCOM, USB_PRODUCT_ZCOM_ZD1211B,
+ 0,
+ "Z-Com",
+ "ZD1211B",
+ },
+ {
+ USB_VENDOR_ZCOM, USB_PRODUCT_ZCOM_RT2870_1,
+ 0,
+ "Z-Com",
+ "RT2870",
+ },
+ {
+ USB_VENDOR_ZCOM, USB_PRODUCT_ZCOM_RT2870_2,
+ 0,
+ "Z-Com",
+ "RT2870",
+ },
+ {
+ USB_VENDOR_ZINWELL, USB_PRODUCT_ZINWELL_RT2570,
+ 0,
+ "Zinwell",
+ "RT2570",
+ },
+ {
+ USB_VENDOR_ZINWELL, USB_PRODUCT_ZINWELL_RT2870_1,
+ 0,
+ "Zinwell",
+ "RT2870",
+ },
+ {
+ USB_VENDOR_ZINWELL, USB_PRODUCT_ZINWELL_RT2870_2,
+ 0,
+ "Zinwell",
+ "RT2870",
+ },
+ {
+ USB_VENDOR_ZINWELL, USB_PRODUCT_ZINWELL_RT3072_1,
+ 0,
+ "Zinwell",
+ "RT3072",
+ },
+ {
+ USB_VENDOR_ZINWELL, USB_PRODUCT_ZINWELL_RT3072_2,
+ 0,
+ "Zinwell",
+ "RT3072",
+ },
+ {
+ USB_VENDOR_ZINWELL, USB_PRODUCT_ZINWELL_RT3070,
+ 0,
+ "Zinwell",
+ "RT3070",
+ },
+ {
+ USB_VENDOR_ZOOM, USB_PRODUCT_ZOOM_2986L,
+ 0,
+ "Zoom Telephonics",
+ "2986L Fax modem",
+ },
+ {
+ USB_VENDOR_ZORAN, USB_PRODUCT_ZORAN_EX20DSC,
+ 0,
+ "Zoran Microelectronics",
+ "Digital Camera EX-20 DSC",
+ },
+ {
+ USB_VENDOR_ZYDAS, USB_PRODUCT_ZYDAS_ZD1211,
+ 0,
+ "Zydas Technology Corporation",
+ "ZD1211 WLAN abg",
+ },
+ {
+ USB_VENDOR_ZYDAS, USB_PRODUCT_ZYDAS_ZD1211B,
+ 0,
+ "Zydas Technology Corporation",
+ "ZD1211B",
+ },
+ {
+ USB_VENDOR_ZYXEL, USB_PRODUCT_ZYXEL_OMNI56K,
+ 0,
+ "ZyXEL Communication",
+ "Omni 56K Plus",
+ },
+ {
+ USB_VENDOR_ZYXEL, USB_PRODUCT_ZYXEL_980N,
+ 0,
+ "ZyXEL Communication",
+ "Scorpion-980N keyboard",
+ },
+ {
+ USB_VENDOR_ZYXEL, USB_PRODUCT_ZYXEL_ZYAIRG220,
+ 0,
+ "ZyXEL Communication",
+ "ZyAIR G-220",
+ },
+ {
+ USB_VENDOR_ZYXEL, USB_PRODUCT_ZYXEL_G200V2,
+ 0,
+ "ZyXEL Communication",
+ "G-200 v2",
+ },
+ {
+ USB_VENDOR_ZYXEL, USB_PRODUCT_ZYXEL_AG225H,
+ 0,
+ "ZyXEL Communication",
+ "AG-225H",
+ },
+ {
+ USB_VENDOR_ZYXEL, USB_PRODUCT_ZYXEL_M202,
+ 0,
+ "ZyXEL Communication",
+ "M-202",
+ },
+ {
+ USB_VENDOR_ZYXEL, USB_PRODUCT_ZYXEL_G220V2,
+ 0,
+ "ZyXEL Communication",
+ "G-220 v2",
+ },
+ {
+ USB_VENDOR_ZYXEL, USB_PRODUCT_ZYXEL_G202,
+ 0,
+ "ZyXEL Communication",
+ "G-202",
+ },
+ {
+ USB_VENDOR_ZYXEL, USB_PRODUCT_ZYXEL_RT2870_1,
+ 0,
+ "ZyXEL Communication",
+ "RT2870",
+ },
+ {
+ USB_VENDOR_ZYXEL, USB_PRODUCT_ZYXEL_RT2870_2,
+ 0,
+ "ZyXEL Communication",
+ "RT2870",
+ },
+ {
+ USB_VENDOR_UNKNOWN1, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Unknown vendor",
+ NULL,
+ },
+ {
+ USB_VENDOR_UNKNOWN2, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Unknown vendor",
+ NULL,
+ },
+ {
+ USB_VENDOR_EGALAX2, 0,
+ USB_KNOWNDEV_NOPROD,
+ "eGalax, Inc.",
+ NULL,
+ },
+ {
+ USB_VENDOR_CHIPSBANK, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Chipsbank Microelectronics Co.",
+ NULL,
+ },
+ {
+ USB_VENDOR_HUMAX, 0,
+ USB_KNOWNDEV_NOPROD,
+ "HUMAX",
+ NULL,
+ },
+ {
+ USB_VENDOR_LTS, 0,
+ USB_KNOWNDEV_NOPROD,
+ "LTS",
+ NULL,
+ },
+ {
+ USB_VENDOR_BWCT, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Bernd Walter Computer Technology",
+ NULL,
+ },
+ {
+ USB_VENDOR_AOX, 0,
+ USB_KNOWNDEV_NOPROD,
+ "AOX",
+ NULL,
+ },
+ {
+ USB_VENDOR_THESYS, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Thesys",
+ NULL,
+ },
+ {
+ USB_VENDOR_DATABROADCAST, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Data Broadcasting",
+ NULL,
+ },
+ {
+ USB_VENDOR_ATMEL, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Atmel",
+ NULL,
+ },
+ {
+ USB_VENDOR_IWATSU, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Iwatsu America",
+ NULL,
+ },
+ {
+ USB_VENDOR_MITSUMI, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Mitsumi",
+ NULL,
+ },
+ {
+ USB_VENDOR_HP, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Hewlett Packard",
+ NULL,
+ },
+ {
+ USB_VENDOR_GENOA, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Genoa",
+ NULL,
+ },
+ {
+ USB_VENDOR_OAK, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Oak",
+ NULL,
+ },
+ {
+ USB_VENDOR_ADAPTEC, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Adaptec",
+ NULL,
+ },
+ {
+ USB_VENDOR_DIEBOLD, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Diebold",
+ NULL,
+ },
+ {
+ USB_VENDOR_SIEMENSELECTRO, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Siemens Electromechanical",
+ NULL,
+ },
+ {
+ USB_VENDOR_EPSONIMAGING, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Epson Imaging",
+ NULL,
+ },
+ {
+ USB_VENDOR_KEYTRONIC, 0,
+ USB_KNOWNDEV_NOPROD,
+ "KeyTronic",
+ NULL,
+ },
+ {
+ USB_VENDOR_OPTI, 0,
+ USB_KNOWNDEV_NOPROD,
+ "OPTi",
+ NULL,
+ },
+ {
+ USB_VENDOR_ELITEGROUP, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Elitegroup",
+ NULL,
+ },
+ {
+ USB_VENDOR_XILINX, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Xilinx",
+ NULL,
+ },
+ {
+ USB_VENDOR_FARALLON, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Farallon Communications",
+ NULL,
+ },
+ {
+ USB_VENDOR_NATIONAL, 0,
+ USB_KNOWNDEV_NOPROD,
+ "National Semiconductor",
+ NULL,
+ },
+ {
+ USB_VENDOR_NATIONALREG, 0,
+ USB_KNOWNDEV_NOPROD,
+ "National Registry",
+ NULL,
+ },
+ {
+ USB_VENDOR_ACERLABS, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Acer Labs",
+ NULL,
+ },
+ {
+ USB_VENDOR_FTDI, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Future Technology Devices",
+ NULL,
+ },
+ {
+ USB_VENDOR_NCR, 0,
+ USB_KNOWNDEV_NOPROD,
+ "NCR",
+ NULL,
+ },
+ {
+ USB_VENDOR_SYNOPSYS2, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Synopsys",
+ NULL,
+ },
+ {
+ USB_VENDOR_FUJITSUICL, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Fujitsu-ICL",
+ NULL,
+ },
+ {
+ USB_VENDOR_FUJITSU2, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Fujitsu Personal Systems",
+ NULL,
+ },
+ {
+ USB_VENDOR_QUANTA, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Quanta",
+ NULL,
+ },
+ {
+ USB_VENDOR_NEC, 0,
+ USB_KNOWNDEV_NOPROD,
+ "NEC",
+ NULL,
+ },
+ {
+ USB_VENDOR_KODAK, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Eastman Kodak",
+ NULL,
+ },
+ {
+ USB_VENDOR_WELTREND, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Weltrend",
+ NULL,
+ },
+ {
+ USB_VENDOR_VIA, 0,
+ USB_KNOWNDEV_NOPROD,
+ "VIA",
+ NULL,
+ },
+ {
+ USB_VENDOR_MCCI, 0,
+ USB_KNOWNDEV_NOPROD,
+ "MCCI",
+ NULL,
+ },
+ {
+ USB_VENDOR_MELCO, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Melco",
+ NULL,
+ },
+ {
+ USB_VENDOR_LEADTEK, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Leadtek",
+ NULL,
+ },
+ {
+ USB_VENDOR_WINBOND, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Winbond",
+ NULL,
+ },
+ {
+ USB_VENDOR_PHOENIX, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Phoenix",
+ NULL,
+ },
+ {
+ USB_VENDOR_CREATIVE, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Creative Labs",
+ NULL,
+ },
+ {
+ USB_VENDOR_NOKIA, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Nokia",
+ NULL,
+ },
+ {
+ USB_VENDOR_ADI, 0,
+ USB_KNOWNDEV_NOPROD,
+ "ADI Systems",
+ NULL,
+ },
+ {
+ USB_VENDOR_CATC, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Computer Access Technology",
+ NULL,
+ },
+ {
+ USB_VENDOR_SMC2, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Standard Microsystems",
+ NULL,
+ },
+ {
+ USB_VENDOR_MOTOROLA_HK, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Motorola HK",
+ NULL,
+ },
+ {
+ USB_VENDOR_GRAVIS, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Advanced Gravis Computer",
+ NULL,
+ },
+ {
+ USB_VENDOR_CIRRUSLOGIC, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Cirrus Logic",
+ NULL,
+ },
+ {
+ USB_VENDOR_INNOVATIVE, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Innovative Semiconductors",
+ NULL,
+ },
+ {
+ USB_VENDOR_MOLEX, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Molex",
+ NULL,
+ },
+ {
+ USB_VENDOR_SUN, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Sun Microsystems",
+ NULL,
+ },
+ {
+ USB_VENDOR_UNISYS, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Unisys",
+ NULL,
+ },
+ {
+ USB_VENDOR_TAUGA, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Taugagreining HF",
+ NULL,
+ },
+ {
+ USB_VENDOR_AMD, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Advanced Micro Devices",
+ NULL,
+ },
+ {
+ USB_VENDOR_LEXMARK, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Lexmark International",
+ NULL,
+ },
+ {
+ USB_VENDOR_LG, 0,
+ USB_KNOWNDEV_NOPROD,
+ "LG Electronics",
+ NULL,
+ },
+ {
+ USB_VENDOR_NANAO, 0,
+ USB_KNOWNDEV_NOPROD,
+ "NANAO",
+ NULL,
+ },
+ {
+ USB_VENDOR_GATEWAY, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Gateway 2000",
+ NULL,
+ },
+ {
+ USB_VENDOR_NMB, 0,
+ USB_KNOWNDEV_NOPROD,
+ "NMB",
+ NULL,
+ },
+ {
+ USB_VENDOR_ALPS, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Alps Electric",
+ NULL,
+ },
+ {
+ USB_VENDOR_THRUST, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Thrustmaster",
+ NULL,
+ },
+ {
+ USB_VENDOR_TI, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Texas Instruments",
+ NULL,
+ },
+ {
+ USB_VENDOR_ANALOGDEVICES, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Analog Devices",
+ NULL,
+ },
+ {
+ USB_VENDOR_SIS, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Silicon Integrated Systems Corp.",
+ NULL,
+ },
+ {
+ USB_VENDOR_KYE, 0,
+ USB_KNOWNDEV_NOPROD,
+ "KYE Systems",
+ NULL,
+ },
+ {
+ USB_VENDOR_DIAMOND2, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Diamond (Supra)",
+ NULL,
+ },
+ {
+ USB_VENDOR_RENESAS, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Renesas",
+ NULL,
+ },
+ {
+ USB_VENDOR_MICROSOFT, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Microsoft",
+ NULL,
+ },
+ {
+ USB_VENDOR_PRIMAX, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Primax Electronics",
+ NULL,
+ },
+ {
+ USB_VENDOR_MGE, 0,
+ USB_KNOWNDEV_NOPROD,
+ "MGE UPS Systems",
+ NULL,
+ },
+ {
+ USB_VENDOR_AMP, 0,
+ USB_KNOWNDEV_NOPROD,
+ "AMP",
+ NULL,
+ },
+ {
+ USB_VENDOR_CHERRY, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Cherry Mikroschalter",
+ NULL,
+ },
+ {
+ USB_VENDOR_MEGATRENDS, 0,
+ USB_KNOWNDEV_NOPROD,
+ "American Megatrends",
+ NULL,
+ },
+ {
+ USB_VENDOR_LOGITECH, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Logitech",
+ NULL,
+ },
+ {
+ USB_VENDOR_BTC, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Behavior Tech. Computer",
+ NULL,
+ },
+ {
+ USB_VENDOR_PHILIPS, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Philips",
+ NULL,
+ },
+ {
+ USB_VENDOR_SUN2, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Sun Microsystems (offical)",
+ NULL,
+ },
+ {
+ USB_VENDOR_SANYO, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Sanyo Electric",
+ NULL,
+ },
+ {
+ USB_VENDOR_SEAGATE, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Seagate",
+ NULL,
+ },
+ {
+ USB_VENDOR_CONNECTIX, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Connectix",
+ NULL,
+ },
+ {
+ USB_VENDOR_SEMTECH, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Semtech",
+ NULL,
+ },
+ {
+ USB_VENDOR_KENSINGTON, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Kensington",
+ NULL,
+ },
+ {
+ USB_VENDOR_LUCENT, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Lucent",
+ NULL,
+ },
+ {
+ USB_VENDOR_PLANTRONICS, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Plantronics",
+ NULL,
+ },
+ {
+ USB_VENDOR_KYOCERA, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Kyocera Wireless Corp.",
+ NULL,
+ },
+ {
+ USB_VENDOR_STMICRO, 0,
+ USB_KNOWNDEV_NOPROD,
+ "STMicroelectronics",
+ NULL,
+ },
+ {
+ USB_VENDOR_FOXCONN, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Foxconn",
+ NULL,
+ },
+ {
+ USB_VENDOR_MEIZU, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Meizu Electronics",
+ NULL,
+ },
+ {
+ USB_VENDOR_YAMAHA, 0,
+ USB_KNOWNDEV_NOPROD,
+ "YAMAHA",
+ NULL,
+ },
+ {
+ USB_VENDOR_COMPAQ, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Compaq",
+ NULL,
+ },
+ {
+ USB_VENDOR_HITACHI, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Hitachi",
+ NULL,
+ },
+ {
+ USB_VENDOR_ACERP, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Acer Peripherals",
+ NULL,
+ },
+ {
+ USB_VENDOR_DAVICOM, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Davicom",
+ NULL,
+ },
+ {
+ USB_VENDOR_VISIONEER, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Visioneer",
+ NULL,
+ },
+ {
+ USB_VENDOR_CANON, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Canon",
+ NULL,
+ },
+ {
+ USB_VENDOR_NIKON, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Nikon",
+ NULL,
+ },
+ {
+ USB_VENDOR_PAN, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Pan International",
+ NULL,
+ },
+ {
+ USB_VENDOR_IBM, 0,
+ USB_KNOWNDEV_NOPROD,
+ "IBM",
+ NULL,
+ },
+ {
+ USB_VENDOR_CYPRESS, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Cypress Semiconductor",
+ NULL,
+ },
+ {
+ USB_VENDOR_ROHM, 0,
+ USB_KNOWNDEV_NOPROD,
+ "ROHM",
+ NULL,
+ },
+ {
+ USB_VENDOR_COMPAL, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Compal",
+ NULL,
+ },
+ {
+ USB_VENDOR_EPSON, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Seiko Epson",
+ NULL,
+ },
+ {
+ USB_VENDOR_RAINBOW, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Rainbow Technologies",
+ NULL,
+ },
+ {
+ USB_VENDOR_IODATA, 0,
+ USB_KNOWNDEV_NOPROD,
+ "I-O Data",
+ NULL,
+ },
+ {
+ USB_VENDOR_TDK, 0,
+ USB_KNOWNDEV_NOPROD,
+ "TDK",
+ NULL,
+ },
+ {
+ USB_VENDOR_3COMUSR, 0,
+ USB_KNOWNDEV_NOPROD,
+ "U.S. Robotics",
+ NULL,
+ },
+ {
+ USB_VENDOR_METHODE, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Methode Electronics Far East",
+ NULL,
+ },
+ {
+ USB_VENDOR_MAXISWITCH, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Maxi Switch",
+ NULL,
+ },
+ {
+ USB_VENDOR_LOCKHEEDMER, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Lockheed Martin Energy Research",
+ NULL,
+ },
+ {
+ USB_VENDOR_FUJITSU, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Fujitsu",
+ NULL,
+ },
+ {
+ USB_VENDOR_TOSHIBAAM, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Toshiba America",
+ NULL,
+ },
+ {
+ USB_VENDOR_MICROMACRO, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Micro Macro Technologies",
+ NULL,
+ },
+ {
+ USB_VENDOR_KONICA, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Konica",
+ NULL,
+ },
+ {
+ USB_VENDOR_LITEON, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Lite-On Technology",
+ NULL,
+ },
+ {
+ USB_VENDOR_FUJIPHOTO, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Fuji Photo Film",
+ NULL,
+ },
+ {
+ USB_VENDOR_PHILIPSSEMI, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Philips Semiconductors",
+ NULL,
+ },
+ {
+ USB_VENDOR_TATUNG, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Tatung Co. Of America",
+ NULL,
+ },
+ {
+ USB_VENDOR_SCANLOGIC, 0,
+ USB_KNOWNDEV_NOPROD,
+ "ScanLogic",
+ NULL,
+ },
+ {
+ USB_VENDOR_MYSON, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Myson Technology",
+ NULL,
+ },
+ {
+ USB_VENDOR_DIGI2, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Digi",
+ NULL,
+ },
+ {
+ USB_VENDOR_ITTCANON, 0,
+ USB_KNOWNDEV_NOPROD,
+ "ITT Canon",
+ NULL,
+ },
+ {
+ USB_VENDOR_ALTEC, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Altec Lansing",
+ NULL,
+ },
+ {
+ USB_VENDOR_LSI, 0,
+ USB_KNOWNDEV_NOPROD,
+ "LSI",
+ NULL,
+ },
+ {
+ USB_VENDOR_MENTORGRAPHICS, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Mentor Graphics",
+ NULL,
+ },
+ {
+ USB_VENDOR_ITUNERNET, 0,
+ USB_KNOWNDEV_NOPROD,
+ "I-Tuner Networks",
+ NULL,
+ },
+ {
+ USB_VENDOR_HOLTEK, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Holtek Semiconductor, Inc.",
+ NULL,
+ },
+ {
+ USB_VENDOR_PANASONIC, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Panasonic (Matsushita)",
+ NULL,
+ },
+ {
+ USB_VENDOR_HUANHSIN, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Huan Hsin",
+ NULL,
+ },
+ {
+ USB_VENDOR_SHARP, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Sharp",
+ NULL,
+ },
+ {
+ USB_VENDOR_IIYAMA, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Iiyama",
+ NULL,
+ },
+ {
+ USB_VENDOR_SHUTTLE, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Shuttle Technology",
+ NULL,
+ },
+ {
+ USB_VENDOR_ELO, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Elo TouchSystems",
+ NULL,
+ },
+ {
+ USB_VENDOR_SAMSUNG, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Samsung Electronics",
+ NULL,
+ },
+ {
+ USB_VENDOR_NORTHSTAR, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Northstar",
+ NULL,
+ },
+ {
+ USB_VENDOR_TOKYOELECTRON, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Tokyo Electron",
+ NULL,
+ },
+ {
+ USB_VENDOR_ANNABOOKS, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Annabooks",
+ NULL,
+ },
+ {
+ USB_VENDOR_JVC, 0,
+ USB_KNOWNDEV_NOPROD,
+ "JVC",
+ NULL,
+ },
+ {
+ USB_VENDOR_CHICONY, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Chicony Electronics",
+ NULL,
+ },
+ {
+ USB_VENDOR_ELAN, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Elan",
+ NULL,
+ },
+ {
+ USB_VENDOR_NEWNEX, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Newnex",
+ NULL,
+ },
+ {
+ USB_VENDOR_BROTHER, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Brother Industries",
+ NULL,
+ },
+ {
+ USB_VENDOR_DALLAS, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Dallas Semiconductor",
+ NULL,
+ },
+ {
+ USB_VENDOR_AIPTEK2, 0,
+ USB_KNOWNDEV_NOPROD,
+ "AIPTEK International",
+ NULL,
+ },
+ {
+ USB_VENDOR_PFU, 0,
+ USB_KNOWNDEV_NOPROD,
+ "PFU",
+ NULL,
+ },
+ {
+ USB_VENDOR_FUJIKURA, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Fujikura/DDK",
+ NULL,
+ },
+ {
+ USB_VENDOR_ACER, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Acer",
+ NULL,
+ },
+ {
+ USB_VENDOR_3COM, 0,
+ USB_KNOWNDEV_NOPROD,
+ "3Com",
+ NULL,
+ },
+ {
+ USB_VENDOR_HOSIDEN, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Hosiden Corporation",
+ NULL,
+ },
+ {
+ USB_VENDOR_AZTECH, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Aztech Systems",
+ NULL,
+ },
+ {
+ USB_VENDOR_BELKIN, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Belkin Components",
+ NULL,
+ },
+ {
+ USB_VENDOR_KAWATSU, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Kawatsu Semiconductor",
+ NULL,
+ },
+ {
+ USB_VENDOR_FCI, 0,
+ USB_KNOWNDEV_NOPROD,
+ "FCI",
+ NULL,
+ },
+ {
+ USB_VENDOR_LONGWELL, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Longwell",
+ NULL,
+ },
+ {
+ USB_VENDOR_COMPOSITE, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Composite",
+ NULL,
+ },
+ {
+ USB_VENDOR_STAR, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Star Micronics",
+ NULL,
+ },
+ {
+ USB_VENDOR_APC, 0,
+ USB_KNOWNDEV_NOPROD,
+ "American Power Conversion",
+ NULL,
+ },
+ {
+ USB_VENDOR_SCIATLANTA, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Scientific Atlanta",
+ NULL,
+ },
+ {
+ USB_VENDOR_TSM, 0,
+ USB_KNOWNDEV_NOPROD,
+ "TSM",
+ NULL,
+ },
+ {
+ USB_VENDOR_CONNECTEK, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Advanced Connectek USA",
+ NULL,
+ },
+ {
+ USB_VENDOR_NETCHIP, 0,
+ USB_KNOWNDEV_NOPROD,
+ "NetChip Technology",
+ NULL,
+ },
+ {
+ USB_VENDOR_ALTRA, 0,
+ USB_KNOWNDEV_NOPROD,
+ "ALTRA",
+ NULL,
+ },
+ {
+ USB_VENDOR_ATI, 0,
+ USB_KNOWNDEV_NOPROD,
+ "ATI Technologies",
+ NULL,
+ },
+ {
+ USB_VENDOR_AKS, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Aladdin Knowledge Systems",
+ NULL,
+ },
+ {
+ USB_VENDOR_TEKOM, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Tekom",
+ NULL,
+ },
+ {
+ USB_VENDOR_CANONDEV, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Canon",
+ NULL,
+ },
+ {
+ USB_VENDOR_WACOMTECH, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Wacom",
+ NULL,
+ },
+ {
+ USB_VENDOR_INVENTEC, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Inventec",
+ NULL,
+ },
+ {
+ USB_VENDOR_SHYHSHIUN, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Shyh Shiun Terminals",
+ NULL,
+ },
+ {
+ USB_VENDOR_PREHWERKE, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Preh Werke Gmbh & Co. KG",
+ NULL,
+ },
+ {
+ USB_VENDOR_SYNOPSYS, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Synopsys",
+ NULL,
+ },
+ {
+ USB_VENDOR_UNIACCESS, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Universal Access",
+ NULL,
+ },
+ {
+ USB_VENDOR_VIEWSONIC, 0,
+ USB_KNOWNDEV_NOPROD,
+ "ViewSonic",
+ NULL,
+ },
+ {
+ USB_VENDOR_XIRLINK, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Xirlink",
+ NULL,
+ },
+ {
+ USB_VENDOR_ANCHOR, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Anchor Chips",
+ NULL,
+ },
+ {
+ USB_VENDOR_SONY, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Sony",
+ NULL,
+ },
+ {
+ USB_VENDOR_FUJIXEROX, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Fuji Xerox",
+ NULL,
+ },
+ {
+ USB_VENDOR_VISION, 0,
+ USB_KNOWNDEV_NOPROD,
+ "VLSI Vision",
+ NULL,
+ },
+ {
+ USB_VENDOR_ASAHIKASEI, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Asahi Kasei Microsystems",
+ NULL,
+ },
+ {
+ USB_VENDOR_ATEN, 0,
+ USB_KNOWNDEV_NOPROD,
+ "ATEN International",
+ NULL,
+ },
+ {
+ USB_VENDOR_SAMSUNG2, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Samsung Electronics",
+ NULL,
+ },
+ {
+ USB_VENDOR_MUSTEK, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Mustek Systems",
+ NULL,
+ },
+ {
+ USB_VENDOR_TELEX, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Telex Communications",
+ NULL,
+ },
+ {
+ USB_VENDOR_CHINON, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Chinon",
+ NULL,
+ },
+ {
+ USB_VENDOR_PERACOM, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Peracom Networks",
+ NULL,
+ },
+ {
+ USB_VENDOR_ALCOR2, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Alcor Micro",
+ NULL,
+ },
+ {
+ USB_VENDOR_XYRATEX, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Xyratex",
+ NULL,
+ },
+ {
+ USB_VENDOR_WACOM, 0,
+ USB_KNOWNDEV_NOPROD,
+ "WACOM",
+ NULL,
+ },
+ {
+ USB_VENDOR_ETEK, 0,
+ USB_KNOWNDEV_NOPROD,
+ "e-TEK Labs",
+ NULL,
+ },
+ {
+ USB_VENDOR_EIZO, 0,
+ USB_KNOWNDEV_NOPROD,
+ "EIZO",
+ NULL,
+ },
+ {
+ USB_VENDOR_ELECOM, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Elecom",
+ NULL,
+ },
+ {
+ USB_VENDOR_CONEXANT, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Conexant",
+ NULL,
+ },
+ {
+ USB_VENDOR_HAUPPAUGE, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Hauppauge Computer Works",
+ NULL,
+ },
+ {
+ USB_VENDOR_BAFO, 0,
+ USB_KNOWNDEV_NOPROD,
+ "BAFO/Quality Computer Accessories",
+ NULL,
+ },
+ {
+ USB_VENDOR_YEDATA, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Y-E Data",
+ NULL,
+ },
+ {
+ USB_VENDOR_AVM, 0,
+ USB_KNOWNDEV_NOPROD,
+ "AVM",
+ NULL,
+ },
+ {
+ USB_VENDOR_QUICKSHOT, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Quickshot",
+ NULL,
+ },
+ {
+ USB_VENDOR_ROLAND, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Roland",
+ NULL,
+ },
+ {
+ USB_VENDOR_ROCKFIRE, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Rockfire",
+ NULL,
+ },
+ {
+ USB_VENDOR_RATOC, 0,
+ USB_KNOWNDEV_NOPROD,
+ "RATOC Systems",
+ NULL,
+ },
+ {
+ USB_VENDOR_ZYXEL, 0,
+ USB_KNOWNDEV_NOPROD,
+ "ZyXEL Communication",
+ NULL,
+ },
+ {
+ USB_VENDOR_INFINEON, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Infineon",
+ NULL,
+ },
+ {
+ USB_VENDOR_MICREL, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Micrel",
+ NULL,
+ },
+ {
+ USB_VENDOR_ALCOR, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Alcor Micro",
+ NULL,
+ },
+ {
+ USB_VENDOR_OMRON, 0,
+ USB_KNOWNDEV_NOPROD,
+ "OMRON",
+ NULL,
+ },
+ {
+ USB_VENDOR_ZORAN, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Zoran Microelectronics",
+ NULL,
+ },
+ {
+ USB_VENDOR_NIIGATA, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Niigata",
+ NULL,
+ },
+ {
+ USB_VENDOR_IOMEGA, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Iomega",
+ NULL,
+ },
+ {
+ USB_VENDOR_ATREND, 0,
+ USB_KNOWNDEV_NOPROD,
+ "A-Trend Technology",
+ NULL,
+ },
+ {
+ USB_VENDOR_AID, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Advanced Input Devices",
+ NULL,
+ },
+ {
+ USB_VENDOR_LACIE, 0,
+ USB_KNOWNDEV_NOPROD,
+ "LaCie",
+ NULL,
+ },
+ {
+ USB_VENDOR_FUJIFILM, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Fuji Film",
+ NULL,
+ },
+ {
+ USB_VENDOR_ARC, 0,
+ USB_KNOWNDEV_NOPROD,
+ "ARC",
+ NULL,
+ },
+ {
+ USB_VENDOR_ORTEK, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Ortek",
+ NULL,
+ },
+ {
+ USB_VENDOR_CISCOLINKSYS3, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Cisco-Linksys",
+ NULL,
+ },
+ {
+ USB_VENDOR_BOSE, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Bose",
+ NULL,
+ },
+ {
+ USB_VENDOR_OMNIVISION, 0,
+ USB_KNOWNDEV_NOPROD,
+ "OmniVision",
+ NULL,
+ },
+ {
+ USB_VENDOR_INSYSTEM, 0,
+ USB_KNOWNDEV_NOPROD,
+ "In-System Design",
+ NULL,
+ },
+ {
+ USB_VENDOR_APPLE, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Apple Computer",
+ NULL,
+ },
+ {
+ USB_VENDOR_YCCABLE, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Y.C. Cable",
+ NULL,
+ },
+ {
+ USB_VENDOR_DIGITALPERSONA, 0,
+ USB_KNOWNDEV_NOPROD,
+ "DigitalPersona",
+ NULL,
+ },
+ {
+ USB_VENDOR_3G, 0,
+ USB_KNOWNDEV_NOPROD,
+ "3G Green Green Globe",
+ NULL,
+ },
+ {
+ USB_VENDOR_RAFI, 0,
+ USB_KNOWNDEV_NOPROD,
+ "RAFI",
+ NULL,
+ },
+ {
+ USB_VENDOR_TYCO, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Tyco",
+ NULL,
+ },
+ {
+ USB_VENDOR_KAWASAKI, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Kawasaki",
+ NULL,
+ },
+ {
+ USB_VENDOR_DIGI, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Digi International",
+ NULL,
+ },
+ {
+ USB_VENDOR_QUALCOMM2, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Qualcomm",
+ NULL,
+ },
+ {
+ USB_VENDOR_QTRONIX, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Qtronix",
+ NULL,
+ },
+ {
+ USB_VENDOR_FOXLINK, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Foxlink",
+ NULL,
+ },
+ {
+ USB_VENDOR_RICOH, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Ricoh",
+ NULL,
+ },
+ {
+ USB_VENDOR_ELSA, 0,
+ USB_KNOWNDEV_NOPROD,
+ "ELSA",
+ NULL,
+ },
+ {
+ USB_VENDOR_SCIWORX, 0,
+ USB_KNOWNDEV_NOPROD,
+ "sci-worx",
+ NULL,
+ },
+ {
+ USB_VENDOR_BRAINBOXES, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Brainboxes Limited",
+ NULL,
+ },
+ {
+ USB_VENDOR_ULTIMA, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Ultima",
+ NULL,
+ },
+ {
+ USB_VENDOR_AXIOHM, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Axiohm Transaction Solutions",
+ NULL,
+ },
+ {
+ USB_VENDOR_MICROTEK, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Microtek",
+ NULL,
+ },
+ {
+ USB_VENDOR_SUNTAC, 0,
+ USB_KNOWNDEV_NOPROD,
+ "SUN Corporation",
+ NULL,
+ },
+ {
+ USB_VENDOR_LEXAR, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Lexar Media",
+ NULL,
+ },
+ {
+ USB_VENDOR_ADDTRON, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Addtron",
+ NULL,
+ },
+ {
+ USB_VENDOR_SYMBOL, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Symbol Technologies",
+ NULL,
+ },
+ {
+ USB_VENDOR_SYNTEK, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Syntek",
+ NULL,
+ },
+ {
+ USB_VENDOR_GENESYS, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Genesys Logic",
+ NULL,
+ },
+ {
+ USB_VENDOR_FUJI, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Fuji Electric",
+ NULL,
+ },
+ {
+ USB_VENDOR_KEITHLEY, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Keithley Instruments",
+ NULL,
+ },
+ {
+ USB_VENDOR_EIZONANAO, 0,
+ USB_KNOWNDEV_NOPROD,
+ "EIZO Nanao",
+ NULL,
+ },
+ {
+ USB_VENDOR_KLSI, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Kawasaki LSI",
+ NULL,
+ },
+ {
+ USB_VENDOR_FFC, 0,
+ USB_KNOWNDEV_NOPROD,
+ "FFC",
+ NULL,
+ },
+ {
+ USB_VENDOR_ANKO, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Anko Electronic",
+ NULL,
+ },
+ {
+ USB_VENDOR_PIENGINEERING, 0,
+ USB_KNOWNDEV_NOPROD,
+ "P.I. Engineering",
+ NULL,
+ },
+ {
+ USB_VENDOR_AOC, 0,
+ USB_KNOWNDEV_NOPROD,
+ "AOC International",
+ NULL,
+ },
+ {
+ USB_VENDOR_CHIC, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Chic Technology",
+ NULL,
+ },
+ {
+ USB_VENDOR_BARCO, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Barco Display Systems",
+ NULL,
+ },
+ {
+ USB_VENDOR_BRIDGE, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Bridge Information",
+ NULL,
+ },
+ {
+ USB_VENDOR_SOLIDYEAR, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Solid Year",
+ NULL,
+ },
+ {
+ USB_VENDOR_BIORAD, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Bio-Rad Laboratories",
+ NULL,
+ },
+ {
+ USB_VENDOR_MACALLY, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Macally",
+ NULL,
+ },
+ {
+ USB_VENDOR_ACTLABS, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Act Labs",
+ NULL,
+ },
+ {
+ USB_VENDOR_ALARIS, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Alaris",
+ NULL,
+ },
+ {
+ USB_VENDOR_APEX, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Apex",
+ NULL,
+ },
+ {
+ USB_VENDOR_CREATIVE3, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Creative Labs",
+ NULL,
+ },
+ {
+ USB_VENDOR_VIVITAR, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Vivitar",
+ NULL,
+ },
+ {
+ USB_VENDOR_GUNZE, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Gunze Electronics USA",
+ NULL,
+ },
+ {
+ USB_VENDOR_AVISION, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Avision",
+ NULL,
+ },
+ {
+ USB_VENDOR_TEAC, 0,
+ USB_KNOWNDEV_NOPROD,
+ "TEAC",
+ NULL,
+ },
+ {
+ USB_VENDOR_SGI, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Silicon Graphics",
+ NULL,
+ },
+ {
+ USB_VENDOR_SANWASUPPLY, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Sanwa Supply",
+ NULL,
+ },
+ {
+ USB_VENDOR_MEGATEC, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Megatec",
+ NULL,
+ },
+ {
+ USB_VENDOR_LINKSYS, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Linksys",
+ NULL,
+ },
+ {
+ USB_VENDOR_ACERSA, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Acer Semiconductor America",
+ NULL,
+ },
+ {
+ USB_VENDOR_SIGMATEL, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Sigmatel",
+ NULL,
+ },
+ {
+ USB_VENDOR_DRAYTEK, 0,
+ USB_KNOWNDEV_NOPROD,
+ "DrayTek",
+ NULL,
+ },
+ {
+ USB_VENDOR_AIWA, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Aiwa",
+ NULL,
+ },
+ {
+ USB_VENDOR_ACARD, 0,
+ USB_KNOWNDEV_NOPROD,
+ "ACARD Technology",
+ NULL,
+ },
+ {
+ USB_VENDOR_PROLIFIC, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Prolific Technology",
+ NULL,
+ },
+ {
+ USB_VENDOR_SIEMENS, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Siemens",
+ NULL,
+ },
+ {
+ USB_VENDOR_AVANCELOGIC, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Avance Logic",
+ NULL,
+ },
+ {
+ USB_VENDOR_SIEMENS2, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Siemens",
+ NULL,
+ },
+ {
+ USB_VENDOR_MINOLTA, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Minolta",
+ NULL,
+ },
+ {
+ USB_VENDOR_CHPRODUCTS, 0,
+ USB_KNOWNDEV_NOPROD,
+ "CH Products",
+ NULL,
+ },
+ {
+ USB_VENDOR_HAGIWARA, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Hagiwara Sys-Com",
+ NULL,
+ },
+ {
+ USB_VENDOR_CTX, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Chuntex",
+ NULL,
+ },
+ {
+ USB_VENDOR_ASKEY, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Askey Computer",
+ NULL,
+ },
+ {
+ USB_VENDOR_SAITEK, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Saitek",
+ NULL,
+ },
+ {
+ USB_VENDOR_ALCATELT, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Alcatel Telecom",
+ NULL,
+ },
+ {
+ USB_VENDOR_AGFA, 0,
+ USB_KNOWNDEV_NOPROD,
+ "AGFA-Gevaert",
+ NULL,
+ },
+ {
+ USB_VENDOR_ASIAMD, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Asia Microelectronic Development",
+ NULL,
+ },
+ {
+ USB_VENDOR_BIZLINK, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Bizlink International",
+ NULL,
+ },
+ {
+ USB_VENDOR_KEYSPAN, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Keyspan / InnoSys Inc.",
+ NULL,
+ },
+ {
+ USB_VENDOR_AASHIMA, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Aashima Technology",
+ NULL,
+ },
+ {
+ USB_VENDOR_LIEBERT, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Liebert",
+ NULL,
+ },
+ {
+ USB_VENDOR_MULTITECH, 0,
+ USB_KNOWNDEV_NOPROD,
+ "MultiTech",
+ NULL,
+ },
+ {
+ USB_VENDOR_ADS, 0,
+ USB_KNOWNDEV_NOPROD,
+ "ADS Technologies",
+ NULL,
+ },
+ {
+ USB_VENDOR_ALCATELM, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Alcatel Microelectronics",
+ NULL,
+ },
+ {
+ USB_VENDOR_SIRIUS, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Sirius Technologies",
+ NULL,
+ },
+ {
+ USB_VENDOR_GUILLEMOT, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Guillemot",
+ NULL,
+ },
+ {
+ USB_VENDOR_BOSTON, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Boston Acoustics",
+ NULL,
+ },
+ {
+ USB_VENDOR_SMC, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Standard Microsystems",
+ NULL,
+ },
+ {
+ USB_VENDOR_PUTERCOM, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Putercom",
+ NULL,
+ },
+ {
+ USB_VENDOR_MCT, 0,
+ USB_KNOWNDEV_NOPROD,
+ "MCT",
+ NULL,
+ },
+ {
+ USB_VENDOR_IMATION, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Imation",
+ NULL,
+ },
+ {
+ USB_VENDOR_TECLAST, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Teclast",
+ NULL,
+ },
+ {
+ USB_VENDOR_SONYERICSSON, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Sony Ericsson",
+ NULL,
+ },
+ {
+ USB_VENDOR_EICON, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Eicon Networks",
+ NULL,
+ },
+ {
+ USB_VENDOR_SYNTECH, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Syntech Information",
+ NULL,
+ },
+ {
+ USB_VENDOR_DIGITALSTREAM, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Digital Stream",
+ NULL,
+ },
+ {
+ USB_VENDOR_AUREAL, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Aureal Semiconductor",
+ NULL,
+ },
+ {
+ USB_VENDOR_MIDIMAN, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Midiman",
+ NULL,
+ },
+ {
+ USB_VENDOR_CYBERPOWER, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Cyber Power Systems, Inc.",
+ NULL,
+ },
+ {
+ USB_VENDOR_SURECOM, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Surecom Technology",
+ NULL,
+ },
+ {
+ USB_VENDOR_HIDGLOBAL, 0,
+ USB_KNOWNDEV_NOPROD,
+ "HID Global",
+ NULL,
+ },
+ {
+ USB_VENDOR_LINKSYS2, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Linksys",
+ NULL,
+ },
+ {
+ USB_VENDOR_GRIFFIN, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Griffin Technology",
+ NULL,
+ },
+ {
+ USB_VENDOR_SANDISK, 0,
+ USB_KNOWNDEV_NOPROD,
+ "SanDisk",
+ NULL,
+ },
+ {
+ USB_VENDOR_JENOPTIK, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Jenoptik",
+ NULL,
+ },
+ {
+ USB_VENDOR_LOGITEC, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Logitec",
+ NULL,
+ },
+ {
+ USB_VENDOR_NOKIA2, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Nokia",
+ NULL,
+ },
+ {
+ USB_VENDOR_BRIMAX, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Brimax",
+ NULL,
+ },
+ {
+ USB_VENDOR_AXIS, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Axis Communications",
+ NULL,
+ },
+ {
+ USB_VENDOR_ABL, 0,
+ USB_KNOWNDEV_NOPROD,
+ "ABL Electronics",
+ NULL,
+ },
+ {
+ USB_VENDOR_SAGEM, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Sagem",
+ NULL,
+ },
+ {
+ USB_VENDOR_SUNCOMM, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Sun Communications, Inc.",
+ NULL,
+ },
+ {
+ USB_VENDOR_ALFADATA, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Alfadata Computer",
+ NULL,
+ },
+ {
+ USB_VENDOR_NATIONALTECH, 0,
+ USB_KNOWNDEV_NOPROD,
+ "National Technical Systems",
+ NULL,
+ },
+ {
+ USB_VENDOR_ONNTO, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Onnto",
+ NULL,
+ },
+ {
+ USB_VENDOR_BE, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Be",
+ NULL,
+ },
+ {
+ USB_VENDOR_ADMTEK, 0,
+ USB_KNOWNDEV_NOPROD,
+ "ADMtek",
+ NULL,
+ },
+ {
+ USB_VENDOR_COREGA, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Corega",
+ NULL,
+ },
+ {
+ USB_VENDOR_FREECOM, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Freecom",
+ NULL,
+ },
+ {
+ USB_VENDOR_MICROTECH, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Microtech",
+ NULL,
+ },
+ {
+ USB_VENDOR_GENERALINSTMNTS, 0,
+ USB_KNOWNDEV_NOPROD,
+ "General Instruments (Motorola)",
+ NULL,
+ },
+ {
+ USB_VENDOR_OLYMPUS, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Olympus",
+ NULL,
+ },
+ {
+ USB_VENDOR_ABOCOM, 0,
+ USB_KNOWNDEV_NOPROD,
+ "AboCom Systems",
+ NULL,
+ },
+ {
+ USB_VENDOR_KEISOKUGIKEN, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Keisokugiken",
+ NULL,
+ },
+ {
+ USB_VENDOR_ONSPEC, 0,
+ USB_KNOWNDEV_NOPROD,
+ "OnSpec",
+ NULL,
+ },
+ {
+ USB_VENDOR_APG, 0,
+ USB_KNOWNDEV_NOPROD,
+ "APG Cash Drawer",
+ NULL,
+ },
+ {
+ USB_VENDOR_BUG, 0,
+ USB_KNOWNDEV_NOPROD,
+ "B.U.G.",
+ NULL,
+ },
+ {
+ USB_VENDOR_ALLIEDTELESYN, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Allied Telesyn International",
+ NULL,
+ },
+ {
+ USB_VENDOR_AVERMEDIA, 0,
+ USB_KNOWNDEV_NOPROD,
+ "AVerMedia Technologies",
+ NULL,
+ },
+ {
+ USB_VENDOR_SIIG, 0,
+ USB_KNOWNDEV_NOPROD,
+ "SIIG",
+ NULL,
+ },
+ {
+ USB_VENDOR_CASIO, 0,
+ USB_KNOWNDEV_NOPROD,
+ "CASIO",
+ NULL,
+ },
+ {
+ USB_VENDOR_DLINK2, 0,
+ USB_KNOWNDEV_NOPROD,
+ "D-Link",
+ NULL,
+ },
+ {
+ USB_VENDOR_APTIO, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Aptio Products",
+ NULL,
+ },
+ {
+ USB_VENDOR_ARASAN, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Arasan Chip Systems",
+ NULL,
+ },
+ {
+ USB_VENDOR_ALLIEDCABLE, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Allied Cable",
+ NULL,
+ },
+ {
+ USB_VENDOR_STSN, 0,
+ USB_KNOWNDEV_NOPROD,
+ "STSN",
+ NULL,
+ },
+ {
+ USB_VENDOR_CENTURY, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Century Corp",
+ NULL,
+ },
+ {
+ USB_VENDOR_NEWLINK, 0,
+ USB_KNOWNDEV_NOPROD,
+ "NEWlink",
+ NULL,
+ },
+ {
+ USB_VENDOR_ZOOM, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Zoom Telephonics",
+ NULL,
+ },
+ {
+ USB_VENDOR_PCS, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Personal Communication Systems",
+ NULL,
+ },
+ {
+ USB_VENDOR_ALPHASMART, 0,
+ USB_KNOWNDEV_NOPROD,
+ "AlphaSmart, Inc.",
+ NULL,
+ },
+ {
+ USB_VENDOR_BROADLOGIC, 0,
+ USB_KNOWNDEV_NOPROD,
+ "BroadLogic",
+ NULL,
+ },
+ {
+ USB_VENDOR_HANDSPRING, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Handspring",
+ NULL,
+ },
+ {
+ USB_VENDOR_PALM, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Palm Computing",
+ NULL,
+ },
+ {
+ USB_VENDOR_SOURCENEXT, 0,
+ USB_KNOWNDEV_NOPROD,
+ "SOURCENEXT",
+ NULL,
+ },
+ {
+ USB_VENDOR_ACTIONSTAR, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Action Star Enterprise",
+ NULL,
+ },
+ {
+ USB_VENDOR_SAMSUNG_TECHWIN, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Samsung Techwin",
+ NULL,
+ },
+ {
+ USB_VENDOR_ACCTON, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Accton Technology",
+ NULL,
+ },
+ {
+ USB_VENDOR_DIAMOND, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Diamond",
+ NULL,
+ },
+ {
+ USB_VENDOR_NETGEAR, 0,
+ USB_KNOWNDEV_NOPROD,
+ "BayNETGEAR",
+ NULL,
+ },
+ {
+ USB_VENDOR_TOPRE, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Topre Corporation",
+ NULL,
+ },
+ {
+ USB_VENDOR_ACTIVEWIRE, 0,
+ USB_KNOWNDEV_NOPROD,
+ "ActiveWire",
+ NULL,
+ },
+ {
+ USB_VENDOR_BBELECTRONICS, 0,
+ USB_KNOWNDEV_NOPROD,
+ "B&B Electronics",
+ NULL,
+ },
+ {
+ USB_VENDOR_PORTGEAR, 0,
+ USB_KNOWNDEV_NOPROD,
+ "PortGear",
+ NULL,
+ },
+ {
+ USB_VENDOR_NETGEAR2, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Netgear",
+ NULL,
+ },
+ {
+ USB_VENDOR_SYSTEMTALKS, 0,
+ USB_KNOWNDEV_NOPROD,
+ "System Talks",
+ NULL,
+ },
+ {
+ USB_VENDOR_METRICOM, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Metricom",
+ NULL,
+ },
+ {
+ USB_VENDOR_ADESSOKBTEK, 0,
+ USB_KNOWNDEV_NOPROD,
+ "ADESSO/Kbtek America",
+ NULL,
+ },
+ {
+ USB_VENDOR_JATON, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Jaton",
+ NULL,
+ },
+ {
+ USB_VENDOR_APT, 0,
+ USB_KNOWNDEV_NOPROD,
+ "APT Technologies",
+ NULL,
+ },
+ {
+ USB_VENDOR_BOCARESEARCH, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Boca Research",
+ NULL,
+ },
+ {
+ USB_VENDOR_ANDREA, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Andrea Electronics",
+ NULL,
+ },
+ {
+ USB_VENDOR_BURRBROWN, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Burr-Brown Japan",
+ NULL,
+ },
+ {
+ USB_VENDOR_2WIRE, 0,
+ USB_KNOWNDEV_NOPROD,
+ "2Wire",
+ NULL,
+ },
+ {
+ USB_VENDOR_AIPTEK, 0,
+ USB_KNOWNDEV_NOPROD,
+ "AIPTEK International",
+ NULL,
+ },
+ {
+ USB_VENDOR_SMARTBRIDGES, 0,
+ USB_KNOWNDEV_NOPROD,
+ "SmartBridges",
+ NULL,
+ },
+ {
+ USB_VENDOR_FUJITSUSIEMENS, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Fujitsu-Siemens",
+ NULL,
+ },
+ {
+ USB_VENDOR_BILLIONTON, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Billionton Systems",
+ NULL,
+ },
+ {
+ USB_VENDOR_GEMALTO, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Gemalto SA",
+ NULL,
+ },
+ {
+ USB_VENDOR_EXTENDED, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Extended Systems",
+ NULL,
+ },
+ {
+ USB_VENDOR_MSYSTEMS, 0,
+ USB_KNOWNDEV_NOPROD,
+ "M-Systems",
+ NULL,
+ },
+ {
+ USB_VENDOR_DIGIANSWER, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Digianswer",
+ NULL,
+ },
+ {
+ USB_VENDOR_AUTHENTEC, 0,
+ USB_KNOWNDEV_NOPROD,
+ "AuthenTec",
+ NULL,
+ },
+ {
+ USB_VENDOR_AUDIOTECHNICA, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Audio-Technica",
+ NULL,
+ },
+ {
+ USB_VENDOR_TRUMPION, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Trumpion Microelectronics",
+ NULL,
+ },
+ {
+ USB_VENDOR_FEIYA, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Feiya",
+ NULL,
+ },
+ {
+ USB_VENDOR_ALATION, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Alation Systems",
+ NULL,
+ },
+ {
+ USB_VENDOR_GLOBESPAN, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Globespan",
+ NULL,
+ },
+ {
+ USB_VENDOR_CONCORDCAMERA, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Concord Camera",
+ NULL,
+ },
+ {
+ USB_VENDOR_GARMIN, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Garmin International",
+ NULL,
+ },
+ {
+ USB_VENDOR_GOHUBS, 0,
+ USB_KNOWNDEV_NOPROD,
+ "GoHubs",
+ NULL,
+ },
+ {
+ USB_VENDOR_XEROX, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Xerox",
+ NULL,
+ },
+ {
+ USB_VENDOR_BIOMETRIC, 0,
+ USB_KNOWNDEV_NOPROD,
+ "American Biometric Company",
+ NULL,
+ },
+ {
+ USB_VENDOR_TOSHIBA, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Toshiba",
+ NULL,
+ },
+ {
+ USB_VENDOR_PLEXTOR, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Plextor",
+ NULL,
+ },
+ {
+ USB_VENDOR_INTREPIDCS, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Intrepid",
+ NULL,
+ },
+ {
+ USB_VENDOR_YANO, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Yano",
+ NULL,
+ },
+ {
+ USB_VENDOR_KINGSTON, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Kingston Technology",
+ NULL,
+ },
+ {
+ USB_VENDOR_BLUEWATER, 0,
+ USB_KNOWNDEV_NOPROD,
+ "BlueWater Systems",
+ NULL,
+ },
+ {
+ USB_VENDOR_AGILENT, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Agilent Technologies",
+ NULL,
+ },
+ {
+ USB_VENDOR_GUDE, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Gude ADS",
+ NULL,
+ },
+ {
+ USB_VENDOR_PORTSMITH, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Portsmith",
+ NULL,
+ },
+ {
+ USB_VENDOR_ACERW, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Acer",
+ NULL,
+ },
+ {
+ USB_VENDOR_ADIRONDACK, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Adirondack Wire & Cable",
+ NULL,
+ },
+ {
+ USB_VENDOR_BECKHOFF, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Beckhoff",
+ NULL,
+ },
+ {
+ USB_VENDOR_MINDSATWORK, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Minds At Work",
+ NULL,
+ },
+ {
+ USB_VENDOR_POINTCHIPS, 0,
+ USB_KNOWNDEV_NOPROD,
+ "PointChips",
+ NULL,
+ },
+ {
+ USB_VENDOR_INTERSIL, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Intersil",
+ NULL,
+ },
+ {
+ USB_VENDOR_ALTIUS, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Altius Solutions",
+ NULL,
+ },
+ {
+ USB_VENDOR_ARRIS, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Arris Interactive",
+ NULL,
+ },
+ {
+ USB_VENDOR_ACTIVCARD, 0,
+ USB_KNOWNDEV_NOPROD,
+ "ACTIVCARD",
+ NULL,
+ },
+ {
+ USB_VENDOR_ACTISYS, 0,
+ USB_KNOWNDEV_NOPROD,
+ "ACTiSYS",
+ NULL,
+ },
+ {
+ USB_VENDOR_NOVATEL2, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Novatel Wireless",
+ NULL,
+ },
+ {
+ USB_VENDOR_AFOURTECH, 0,
+ USB_KNOWNDEV_NOPROD,
+ "A-FOUR TECH",
+ NULL,
+ },
+ {
+ USB_VENDOR_AIMEX, 0,
+ USB_KNOWNDEV_NOPROD,
+ "AIMEX",
+ NULL,
+ },
+ {
+ USB_VENDOR_ADDONICS, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Addonics Technologies",
+ NULL,
+ },
+ {
+ USB_VENDOR_AKAI, 0,
+ USB_KNOWNDEV_NOPROD,
+ "AKAI professional M.I.",
+ NULL,
+ },
+ {
+ USB_VENDOR_ARESCOM, 0,
+ USB_KNOWNDEV_NOPROD,
+ "ARESCOM",
+ NULL,
+ },
+ {
+ USB_VENDOR_BAY, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Bay Associates",
+ NULL,
+ },
+ {
+ USB_VENDOR_ALTERA, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Altera",
+ NULL,
+ },
+ {
+ USB_VENDOR_CSR, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Cambridge Silicon Radio",
+ NULL,
+ },
+ {
+ USB_VENDOR_TREK, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Trek Technology",
+ NULL,
+ },
+ {
+ USB_VENDOR_ASAHIOPTICAL, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Asahi Optical",
+ NULL,
+ },
+ {
+ USB_VENDOR_BOCASYSTEMS, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Boca Systems",
+ NULL,
+ },
+ {
+ USB_VENDOR_SHANTOU, 0,
+ USB_KNOWNDEV_NOPROD,
+ "ShanTou",
+ NULL,
+ },
+ {
+ USB_VENDOR_MEDIAGEAR, 0,
+ USB_KNOWNDEV_NOPROD,
+ "MediaGear",
+ NULL,
+ },
+ {
+ USB_VENDOR_BROADCOM, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Broadcom",
+ NULL,
+ },
+ {
+ USB_VENDOR_GREENHOUSE, 0,
+ USB_KNOWNDEV_NOPROD,
+ "GREENHOUSE",
+ NULL,
+ },
+ {
+ USB_VENDOR_GEOCAST, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Geocast Network Systems",
+ NULL,
+ },
+ {
+ USB_VENDOR_IDQUANTIQUE, 0,
+ USB_KNOWNDEV_NOPROD,
+ "id Quantique",
+ NULL,
+ },
+ {
+ USB_VENDOR_ZYDAS, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Zydas Technology Corporation",
+ NULL,
+ },
+ {
+ USB_VENDOR_NEODIO, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Neodio",
+ NULL,
+ },
+ {
+ USB_VENDOR_OPTION, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Option N.V.",
+ NULL,
+ },
+ {
+ USB_VENDOR_ASUS, 0,
+ USB_KNOWNDEV_NOPROD,
+ "ASUSTeK Computer",
+ NULL,
+ },
+ {
+ USB_VENDOR_TODOS, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Todos Data System",
+ NULL,
+ },
+ {
+ USB_VENDOR_SIIG2, 0,
+ USB_KNOWNDEV_NOPROD,
+ "SIIG",
+ NULL,
+ },
+ {
+ USB_VENDOR_TEKRAM, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Tekram Technology",
+ NULL,
+ },
+ {
+ USB_VENDOR_HAL, 0,
+ USB_KNOWNDEV_NOPROD,
+ "HAL Corporation",
+ NULL,
+ },
+ {
+ USB_VENDOR_EMS, 0,
+ USB_KNOWNDEV_NOPROD,
+ "EMS Production",
+ NULL,
+ },
+ {
+ USB_VENDOR_NEC2, 0,
+ USB_KNOWNDEV_NOPROD,
+ "NEC",
+ NULL,
+ },
+ {
+ USB_VENDOR_ADLINK, 0,
+ USB_KNOWNDEV_NOPROD,
+ "ADLINK Technoligy, Inc.",
+ NULL,
+ },
+ {
+ USB_VENDOR_ATI2, 0,
+ USB_KNOWNDEV_NOPROD,
+ "ATI",
+ NULL,
+ },
+ {
+ USB_VENDOR_ZEEVO, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Zeevo, Inc.",
+ NULL,
+ },
+ {
+ USB_VENDOR_KURUSUGAWA, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Kurusugawa Electronics, Inc.",
+ NULL,
+ },
+ {
+ USB_VENDOR_SMART, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Smart Technologies",
+ NULL,
+ },
+ {
+ USB_VENDOR_ASIX, 0,
+ USB_KNOWNDEV_NOPROD,
+ "ASIX Electronics",
+ NULL,
+ },
+ {
+ USB_VENDOR_O2MICRO, 0,
+ USB_KNOWNDEV_NOPROD,
+ "O2 Micro, Inc.",
+ NULL,
+ },
+ {
+ USB_VENDOR_USR, 0,
+ USB_KNOWNDEV_NOPROD,
+ "U.S. Robotics",
+ NULL,
+ },
+ {
+ USB_VENDOR_AMBIT, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Ambit Microsystems",
+ NULL,
+ },
+ {
+ USB_VENDOR_HTC, 0,
+ USB_KNOWNDEV_NOPROD,
+ "HTC",
+ NULL,
+ },
+ {
+ USB_VENDOR_REALTEK, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Realtek",
+ NULL,
+ },
+ {
+ USB_VENDOR_MEI, 0,
+ USB_KNOWNDEV_NOPROD,
+ "MEI",
+ NULL,
+ },
+ {
+ USB_VENDOR_ADDONICS2, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Addonics Technology",
+ NULL,
+ },
+ {
+ USB_VENDOR_FSC, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Fujitsu Siemens Computers",
+ NULL,
+ },
+ {
+ USB_VENDOR_AGATE, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Agate Technologies",
+ NULL,
+ },
+ {
+ USB_VENDOR_DMI, 0,
+ USB_KNOWNDEV_NOPROD,
+ "DMI",
+ NULL,
+ },
+ {
+ USB_VENDOR_CHICONY2, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Chicony",
+ NULL,
+ },
+ {
+ USB_VENDOR_REINERSCT, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Reiner-SCT",
+ NULL,
+ },
+ {
+ USB_VENDOR_SEALEVEL, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Sealevel System",
+ NULL,
+ },
+ {
+ USB_VENDOR_LUWEN, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Luwen",
+ NULL,
+ },
+ {
+ USB_VENDOR_KYOCERA2, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Kyocera Wireless Corp.",
+ NULL,
+ },
+ {
+ USB_VENDOR_ZCOM, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Z-Com",
+ NULL,
+ },
+ {
+ USB_VENDOR_ATHEROS2, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Atheros Communications",
+ NULL,
+ },
+ {
+ USB_VENDOR_TANGTOP, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Tangtop",
+ NULL,
+ },
+ {
+ USB_VENDOR_SMC3, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Standard Microsystems",
+ NULL,
+ },
+ {
+ USB_VENDOR_ADDON, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Add-on Technology",
+ NULL,
+ },
+ {
+ USB_VENDOR_ACDC, 0,
+ USB_KNOWNDEV_NOPROD,
+ "American Computer & Digital Components",
+ NULL,
+ },
+ {
+ USB_VENDOR_CMEDIA, 0,
+ USB_KNOWNDEV_NOPROD,
+ "CMEDIA",
+ NULL,
+ },
+ {
+ USB_VENDOR_CONCEPTRONIC, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Conceptronic",
+ NULL,
+ },
+ {
+ USB_VENDOR_SKANHEX, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Skanhex Technology, Inc.",
+ NULL,
+ },
+ {
+ USB_VENDOR_MSI, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Micro Star International",
+ NULL,
+ },
+ {
+ USB_VENDOR_ELCON, 0,
+ USB_KNOWNDEV_NOPROD,
+ "ELCON Systemtechnik",
+ NULL,
+ },
+ {
+ USB_VENDOR_NETAC, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Netac",
+ NULL,
+ },
+ {
+ USB_VENDOR_SITECOMEU, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Sitecom Europe",
+ NULL,
+ },
+ {
+ USB_VENDOR_MOBILEACTION, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Mobile Action",
+ NULL,
+ },
+ {
+ USB_VENDOR_AMIGO, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Amigo Technology",
+ NULL,
+ },
+ {
+ USB_VENDOR_SPEEDDRAGON, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Speed Dragon Multimedia",
+ NULL,
+ },
+ {
+ USB_VENDOR_HAWKING, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Hawking",
+ NULL,
+ },
+ {
+ USB_VENDOR_FOSSIL, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Fossil, Inc",
+ NULL,
+ },
+ {
+ USB_VENDOR_GMATE, 0,
+ USB_KNOWNDEV_NOPROD,
+ "G.Mate, Inc",
+ NULL,
+ },
+ {
+ USB_VENDOR_OTI, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Ours Technology",
+ NULL,
+ },
+ {
+ USB_VENDOR_YISO, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Yiso Wireless Co.",
+ NULL,
+ },
+ {
+ USB_VENDOR_PILOTECH, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Pilotech",
+ NULL,
+ },
+ {
+ USB_VENDOR_NOVATECH, 0,
+ USB_KNOWNDEV_NOPROD,
+ "NovaTech",
+ NULL,
+ },
+ {
+ USB_VENDOR_ITEGNO, 0,
+ USB_KNOWNDEV_NOPROD,
+ "iTegno",
+ NULL,
+ },
+ {
+ USB_VENDOR_WINMAXGROUP, 0,
+ USB_KNOWNDEV_NOPROD,
+ "WinMaxGroup",
+ NULL,
+ },
+ {
+ USB_VENDOR_TOD, 0,
+ USB_KNOWNDEV_NOPROD,
+ "TOD",
+ NULL,
+ },
+ {
+ USB_VENDOR_EGALAX, 0,
+ USB_KNOWNDEV_NOPROD,
+ "eGalax, Inc.",
+ NULL,
+ },
+ {
+ USB_VENDOR_AIRPRIME, 0,
+ USB_KNOWNDEV_NOPROD,
+ "AirPrime, Inc.",
+ NULL,
+ },
+ {
+ USB_VENDOR_MICROTUNE, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Microtune",
+ NULL,
+ },
+ {
+ USB_VENDOR_VTECH, 0,
+ USB_KNOWNDEV_NOPROD,
+ "VTech",
+ NULL,
+ },
+ {
+ USB_VENDOR_FALCOM, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Falcom Wireless Communications GmbH",
+ NULL,
+ },
+ {
+ USB_VENDOR_RIM, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Research In Motion",
+ NULL,
+ },
+ {
+ USB_VENDOR_DYNASTREAM, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Dynastream Innovations",
+ NULL,
+ },
+ {
+ USB_VENDOR_QUALCOMM, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Qualcomm",
+ NULL,
+ },
+ {
+ USB_VENDOR_APACER, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Apacer",
+ NULL,
+ },
+ {
+ USB_VENDOR_MOTOROLA4, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Motorola",
+ NULL,
+ },
+ {
+ USB_VENDOR_AIRPLUS, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Airplus",
+ NULL,
+ },
+ {
+ USB_VENDOR_DESKNOTE, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Desknote",
+ NULL,
+ },
+ {
+ USB_VENDOR_GIGABYTE, 0,
+ USB_KNOWNDEV_NOPROD,
+ "GIGABYTE",
+ NULL,
+ },
+ {
+ USB_VENDOR_WESTERN, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Western Digital",
+ NULL,
+ },
+ {
+ USB_VENDOR_MOTOROLA, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Motorola",
+ NULL,
+ },
+ {
+ USB_VENDOR_CCYU, 0,
+ USB_KNOWNDEV_NOPROD,
+ "CCYU Technology",
+ NULL,
+ },
+ {
+ USB_VENDOR_CURITEL, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Curitel Communications Inc",
+ NULL,
+ },
+ {
+ USB_VENDOR_SILABS2, 0,
+ USB_KNOWNDEV_NOPROD,
+ "SILABS2",
+ NULL,
+ },
+ {
+ USB_VENDOR_USI, 0,
+ USB_KNOWNDEV_NOPROD,
+ "USI",
+ NULL,
+ },
+ {
+ USB_VENDOR_PLX, 0,
+ USB_KNOWNDEV_NOPROD,
+ "PLX",
+ NULL,
+ },
+ {
+ USB_VENDOR_ASANTE, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Asante",
+ NULL,
+ },
+ {
+ USB_VENDOR_SILABS, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Silicon Labs",
+ NULL,
+ },
+ {
+ USB_VENDOR_SILABS3, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Silicon Labs",
+ NULL,
+ },
+ {
+ USB_VENDOR_SILABS4, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Silicon Labs",
+ NULL,
+ },
+ {
+ USB_VENDOR_ACTIONS, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Actions",
+ NULL,
+ },
+ {
+ USB_VENDOR_ANALOG, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Analog Devices",
+ NULL,
+ },
+ {
+ USB_VENDOR_TENX, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Ten X Technology, Inc.",
+ NULL,
+ },
+ {
+ USB_VENDOR_ISSC, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Integrated System Solution Corp.",
+ NULL,
+ },
+ {
+ USB_VENDOR_JRC, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Japan Radio Company",
+ NULL,
+ },
+ {
+ USB_VENDOR_SPHAIRON, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Sphairon Access Systems GmbH",
+ NULL,
+ },
+ {
+ USB_VENDOR_DELORME, 0,
+ USB_KNOWNDEV_NOPROD,
+ "DeLorme",
+ NULL,
+ },
+ {
+ USB_VENDOR_SERVERWORKS, 0,
+ USB_KNOWNDEV_NOPROD,
+ "ServerWorks",
+ NULL,
+ },
+ {
+ USB_VENDOR_DLINK3, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Dlink",
+ NULL,
+ },
+ {
+ USB_VENDOR_ACERCM, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Acer Communications & Multimedia",
+ NULL,
+ },
+ {
+ USB_VENDOR_SIERRA, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Sierra Wireless",
+ NULL,
+ },
+ {
+ USB_VENDOR_SANWA, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Sanwa Electric Instrument Co., Ltd.",
+ NULL,
+ },
+ {
+ USB_VENDOR_TOPFIELD, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Topfield Co., Ltd",
+ NULL,
+ },
+ {
+ USB_VENDOR_SIEMENS3, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Siemens",
+ NULL,
+ },
+ {
+ USB_VENDOR_NETINDEX, 0,
+ USB_KNOWNDEV_NOPROD,
+ "NetIndex",
+ NULL,
+ },
+ {
+ USB_VENDOR_ALCATEL, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Alcatel",
+ NULL,
+ },
+ {
+ USB_VENDOR_UNKNOWN3, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Unknown vendor",
+ NULL,
+ },
+ {
+ USB_VENDOR_TSUNAMI, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Tsunami",
+ NULL,
+ },
+ {
+ USB_VENDOR_PHEENET, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Pheenet",
+ NULL,
+ },
+ {
+ USB_VENDOR_TARGUS, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Targus",
+ NULL,
+ },
+ {
+ USB_VENDOR_TWINMOS, 0,
+ USB_KNOWNDEV_NOPROD,
+ "TwinMOS",
+ NULL,
+ },
+ {
+ USB_VENDOR_TENDA, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Tenda",
+ NULL,
+ },
+ {
+ USB_VENDOR_CREATIVE2, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Creative Labs",
+ NULL,
+ },
+ {
+ USB_VENDOR_BELKIN2, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Belkin Components",
+ NULL,
+ },
+ {
+ USB_VENDOR_CYBERTAN, 0,
+ USB_KNOWNDEV_NOPROD,
+ "CyberTAN Technology",
+ NULL,
+ },
+ {
+ USB_VENDOR_HUAWEI, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Huawei Technologies",
+ NULL,
+ },
+ {
+ USB_VENDOR_ARANEUS, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Araneus Information Systems",
+ NULL,
+ },
+ {
+ USB_VENDOR_TAPWAVE, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Tapwave",
+ NULL,
+ },
+ {
+ USB_VENDOR_AINCOMM, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Aincomm",
+ NULL,
+ },
+ {
+ USB_VENDOR_MOBILITY, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Mobility",
+ NULL,
+ },
+ {
+ USB_VENDOR_DICKSMITH, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Dick Smith Electronics",
+ NULL,
+ },
+ {
+ USB_VENDOR_NETGEAR3, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Netgear",
+ NULL,
+ },
+ {
+ USB_VENDOR_BALTECH, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Baltech",
+ NULL,
+ },
+ {
+ USB_VENDOR_CISCOLINKSYS, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Cisco-Linksys",
+ NULL,
+ },
+ {
+ USB_VENDOR_SHARK, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Shark",
+ NULL,
+ },
+ {
+ USB_VENDOR_AZUREWAVE, 0,
+ USB_KNOWNDEV_NOPROD,
+ "AsureWave",
+ NULL,
+ },
+ {
+ USB_VENDOR_EMTEC, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Emtec",
+ NULL,
+ },
+ {
+ USB_VENDOR_NOVATEL, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Novatel Wireless",
+ NULL,
+ },
+ {
+ USB_VENDOR_MERLIN, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Merlin",
+ NULL,
+ },
+ {
+ USB_VENDOR_WISTRONNEWEB, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Wistron NeWeb",
+ NULL,
+ },
+ {
+ USB_VENDOR_RADIOSHACK, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Radio Shack",
+ NULL,
+ },
+ {
+ USB_VENDOR_HUAWEI3COM, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Huawei-3Com",
+ NULL,
+ },
+ {
+ USB_VENDOR_ABOCOM2, 0,
+ USB_KNOWNDEV_NOPROD,
+ "AboCom Systems",
+ NULL,
+ },
+ {
+ USB_VENDOR_SILICOM, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Silicom",
+ NULL,
+ },
+ {
+ USB_VENDOR_RALINK, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Ralink Technology",
+ NULL,
+ },
+ {
+ USB_VENDOR_IMAGINATION, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Imagination Technologies",
+ NULL,
+ },
+ {
+ USB_VENDOR_CONCEPTRONIC2, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Conceptronic",
+ NULL,
+ },
+ {
+ USB_VENDOR_SUPERTOP, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Super Top",
+ NULL,
+ },
+ {
+ USB_VENDOR_PLANEX3, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Planex Communications",
+ NULL,
+ },
+ {
+ USB_VENDOR_SILICONPORTALS, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Silicon Portals",
+ NULL,
+ },
+ {
+ USB_VENDOR_UBIQUAM, 0,
+ USB_KNOWNDEV_NOPROD,
+ "UBIQUAM Co., Ltd.",
+ NULL,
+ },
+ {
+ USB_VENDOR_JMICRON, 0,
+ USB_KNOWNDEV_NOPROD,
+ "JMicron",
+ NULL,
+ },
+ {
+ USB_VENDOR_UBLOX, 0,
+ USB_KNOWNDEV_NOPROD,
+ "U-blox",
+ NULL,
+ },
+ {
+ USB_VENDOR_PNY, 0,
+ USB_KNOWNDEV_NOPROD,
+ "PNY",
+ NULL,
+ },
+ {
+ USB_VENDOR_OWEN, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Owen",
+ NULL,
+ },
+ {
+ USB_VENDOR_OQO, 0,
+ USB_KNOWNDEV_NOPROD,
+ "OQO",
+ NULL,
+ },
+ {
+ USB_VENDOR_UMEDIA, 0,
+ USB_KNOWNDEV_NOPROD,
+ "U-MEDIA Communications",
+ NULL,
+ },
+ {
+ USB_VENDOR_FIBERLINE, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Fiberline",
+ NULL,
+ },
+ {
+ USB_VENDOR_SPARKLAN, 0,
+ USB_KNOWNDEV_NOPROD,
+ "SparkLAN",
+ NULL,
+ },
+ {
+ USB_VENDOR_AMIT2, 0,
+ USB_KNOWNDEV_NOPROD,
+ "AMIT",
+ NULL,
+ },
+ {
+ USB_VENDOR_SOHOWARE, 0,
+ USB_KNOWNDEV_NOPROD,
+ "SOHOware",
+ NULL,
+ },
+ {
+ USB_VENDOR_UMAX, 0,
+ USB_KNOWNDEV_NOPROD,
+ "UMAX Data Systems",
+ NULL,
+ },
+ {
+ USB_VENDOR_INSIDEOUT, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Inside Out Networks",
+ NULL,
+ },
+ {
+ USB_VENDOR_AMOI, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Amoi Electronics",
+ NULL,
+ },
+ {
+ USB_VENDOR_GOODWAY, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Good Way Technology",
+ NULL,
+ },
+ {
+ USB_VENDOR_ENTREGA, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Entrega",
+ NULL,
+ },
+ {
+ USB_VENDOR_ACTIONTEC, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Actiontec Electronics",
+ NULL,
+ },
+ {
+ USB_VENDOR_CLIPSAL, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Clipsal",
+ NULL,
+ },
+ {
+ USB_VENDOR_CISCOLINKSYS2, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Cisco-Linksys",
+ NULL,
+ },
+ {
+ USB_VENDOR_ATHEROS, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Atheros Communications",
+ NULL,
+ },
+ {
+ USB_VENDOR_GIGASET, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Gigaset",
+ NULL,
+ },
+ {
+ USB_VENDOR_GLOBALSUN, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Global Sun Technology",
+ NULL,
+ },
+ {
+ USB_VENDOR_ANYDATA, 0,
+ USB_KNOWNDEV_NOPROD,
+ "AnyDATA Corporation",
+ NULL,
+ },
+ {
+ USB_VENDOR_JABLOTRON, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Jablotron",
+ NULL,
+ },
+ {
+ USB_VENDOR_CMOTECH, 0,
+ USB_KNOWNDEV_NOPROD,
+ "C-motech",
+ NULL,
+ },
+ {
+ USB_VENDOR_AXESSTEL, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Axesstel Co., Ltd.",
+ NULL,
+ },
+ {
+ USB_VENDOR_LINKSYS4, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Linksys",
+ NULL,
+ },
+ {
+ USB_VENDOR_SENAO, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Senao",
+ NULL,
+ },
+ {
+ USB_VENDOR_ASUS2, 0,
+ USB_KNOWNDEV_NOPROD,
+ "ASUS",
+ NULL,
+ },
+ {
+ USB_VENDOR_SWEEX2, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Sweex",
+ NULL,
+ },
+ {
+ USB_VENDOR_METAGEEK, 0,
+ USB_KNOWNDEV_NOPROD,
+ "MetaGeek",
+ NULL,
+ },
+ {
+ USB_VENDOR_WAVESENSE, 0,
+ USB_KNOWNDEV_NOPROD,
+ "WaveSense",
+ NULL,
+ },
+ {
+ USB_VENDOR_VAISALA, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Vaisala",
+ NULL,
+ },
+ {
+ USB_VENDOR_AMIT, 0,
+ USB_KNOWNDEV_NOPROD,
+ "AMIT",
+ NULL,
+ },
+ {
+ USB_VENDOR_GOOGLE, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Google",
+ NULL,
+ },
+ {
+ USB_VENDOR_QCOM, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Qcom",
+ NULL,
+ },
+ {
+ USB_VENDOR_ELV, 0,
+ USB_KNOWNDEV_NOPROD,
+ "ELV",
+ NULL,
+ },
+ {
+ USB_VENDOR_LINKSYS3, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Linksys",
+ NULL,
+ },
+ {
+ USB_VENDOR_QUALCOMMINC, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Qualcomm, Incorporated",
+ NULL,
+ },
+ {
+ USB_VENDOR_WCH2, 0,
+ USB_KNOWNDEV_NOPROD,
+ "QinHeng Electronics",
+ NULL,
+ },
+ {
+ USB_VENDOR_STELERA, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Stelera Wireless",
+ NULL,
+ },
+ {
+ USB_VENDOR_MATRIXORBITAL, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Matrix Orbital",
+ NULL,
+ },
+ {
+ USB_VENDOR_OVISLINK, 0,
+ USB_KNOWNDEV_NOPROD,
+ "OvisLink",
+ NULL,
+ },
+ {
+ USB_VENDOR_TCTMOBILE, 0,
+ USB_KNOWNDEV_NOPROD,
+ "TCT Mobile",
+ NULL,
+ },
+ {
+ USB_VENDOR_TELIT, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Telit",
+ NULL,
+ },
+ {
+ USB_VENDOR_LONGCHEER, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Longcheer Holdings, Ltd.",
+ NULL,
+ },
+ {
+ USB_VENDOR_MPMAN, 0,
+ USB_KNOWNDEV_NOPROD,
+ "MpMan",
+ NULL,
+ },
+ {
+ USB_VENDOR_DRESDENELEKTRONIK, 0,
+ USB_KNOWNDEV_NOPROD,
+ "dresden elektronik",
+ NULL,
+ },
+ {
+ USB_VENDOR_NEOTEL, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Neotel",
+ NULL,
+ },
+ {
+ USB_VENDOR_PEGATRON, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Pegatron",
+ NULL,
+ },
+ {
+ USB_VENDOR_QISDA, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Qisda",
+ NULL,
+ },
+ {
+ USB_VENDOR_METAGEEK2, 0,
+ USB_KNOWNDEV_NOPROD,
+ "MetaGeek",
+ NULL,
+ },
+ {
+ USB_VENDOR_ALINK, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Alink",
+ NULL,
+ },
+ {
+ USB_VENDOR_AIRTIES, 0,
+ USB_KNOWNDEV_NOPROD,
+ "AirTies",
+ NULL,
+ },
+ {
+ USB_VENDOR_DLINK, 0,
+ USB_KNOWNDEV_NOPROD,
+ "D-Link",
+ NULL,
+ },
+ {
+ USB_VENDOR_PLANEX2, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Planex Communications",
+ NULL,
+ },
+ {
+ USB_VENDOR_HAUPPAUGE2, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Hauppauge Computer Works",
+ NULL,
+ },
+ {
+ USB_VENDOR_TLAYTECH, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Tlay Tech",
+ NULL,
+ },
+ {
+ USB_VENDOR_ENCORE, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Encore",
+ NULL,
+ },
+ {
+ USB_VENDOR_PARA, 0,
+ USB_KNOWNDEV_NOPROD,
+ "PARA Industrial",
+ NULL,
+ },
+ {
+ USB_VENDOR_ERICSSON, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Ericsson",
+ NULL,
+ },
+ {
+ USB_VENDOR_MOTOROLA2, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Motorola",
+ NULL,
+ },
+ {
+ USB_VENDOR_TRIPPLITE, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Tripp-Lite",
+ NULL,
+ },
+ {
+ USB_VENDOR_HIROSE, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Hirose Electric",
+ NULL,
+ },
+ {
+ USB_VENDOR_NHJ, 0,
+ USB_KNOWNDEV_NOPROD,
+ "NHJ",
+ NULL,
+ },
+ {
+ USB_VENDOR_PLANEX, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Planex Communications",
+ NULL,
+ },
+ {
+ USB_VENDOR_VIDZMEDIA, 0,
+ USB_KNOWNDEV_NOPROD,
+ "VidzMedia Pte Ltd",
+ NULL,
+ },
+ {
+ USB_VENDOR_AEI, 0,
+ USB_KNOWNDEV_NOPROD,
+ "AEI",
+ NULL,
+ },
+ {
+ USB_VENDOR_HANK, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Hank Connection",
+ NULL,
+ },
+ {
+ USB_VENDOR_PQI, 0,
+ USB_KNOWNDEV_NOPROD,
+ "PQI",
+ NULL,
+ },
+ {
+ USB_VENDOR_DAISY, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Daisy Technology",
+ NULL,
+ },
+ {
+ USB_VENDOR_NI, 0,
+ USB_KNOWNDEV_NOPROD,
+ "National Instruments",
+ NULL,
+ },
+ {
+ USB_VENDOR_MICRONET, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Micronet Communications",
+ NULL,
+ },
+ {
+ USB_VENDOR_IODATA2, 0,
+ USB_KNOWNDEV_NOPROD,
+ "I-O Data",
+ NULL,
+ },
+ {
+ USB_VENDOR_IRIVER, 0,
+ USB_KNOWNDEV_NOPROD,
+ "iRiver",
+ NULL,
+ },
+ {
+ USB_VENDOR_DELL, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Dell",
+ NULL,
+ },
+ {
+ USB_VENDOR_WCH, 0,
+ USB_KNOWNDEV_NOPROD,
+ "QinHeng Electronics",
+ NULL,
+ },
+ {
+ USB_VENDOR_ACEECA, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Aceeca",
+ NULL,
+ },
+ {
+ USB_VENDOR_AVERATEC, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Averatec",
+ NULL,
+ },
+ {
+ USB_VENDOR_SWEEX, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Sweex",
+ NULL,
+ },
+ {
+ USB_VENDOR_PROLIFIC2, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Prolific Technologies",
+ NULL,
+ },
+ {
+ USB_VENDOR_ONSPEC2, 0,
+ USB_KNOWNDEV_NOPROD,
+ "OnSpec Electronic Inc.",
+ NULL,
+ },
+ {
+ USB_VENDOR_ZINWELL, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Zinwell",
+ NULL,
+ },
+ {
+ USB_VENDOR_SITECOM, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Sitecom",
+ NULL,
+ },
+ {
+ USB_VENDOR_ARKMICRO, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Arkmicro Technologies Inc.",
+ NULL,
+ },
+ {
+ USB_VENDOR_3COM2, 0,
+ USB_KNOWNDEV_NOPROD,
+ "3Com",
+ NULL,
+ },
+ {
+ USB_VENDOR_EDIMAX, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Edimax",
+ NULL,
+ },
+ {
+ USB_VENDOR_INTEL, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Intel",
+ NULL,
+ },
+ {
+ USB_VENDOR_INTEL2, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Intel",
+ NULL,
+ },
+ {
+ USB_VENDOR_ALLWIN, 0,
+ USB_KNOWNDEV_NOPROD,
+ "ALLWIN Tech",
+ NULL,
+ },
+ {
+ USB_VENDOR_SITECOM2, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Sitecom",
+ NULL,
+ },
+ {
+ USB_VENDOR_MOSCHIP, 0,
+ USB_KNOWNDEV_NOPROD,
+ "MosChip Semiconductor",
+ NULL,
+ },
+ {
+ USB_VENDOR_MARVELL, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Marvell Technology Group Ltd.",
+ NULL,
+ },
+ {
+ USB_VENDOR_3COM3, 0,
+ USB_KNOWNDEV_NOPROD,
+ "3Com",
+ NULL,
+ },
+ {
+ USB_VENDOR_DATAAPEX, 0,
+ USB_KNOWNDEV_NOPROD,
+ "DataApex",
+ NULL,
+ },
+ {
+ USB_VENDOR_HP2, 0,
+ USB_KNOWNDEV_NOPROD,
+ "Hewlett Packard",
+ NULL,
+ },
+ {
+ USB_VENDOR_USRP, 0,
+ USB_KNOWNDEV_NOPROD,
+ "GNU Radio USRP",
+ NULL,
+ },
+ { 0, 0, 0, NULL, NULL, }
+};
diff --git a/rtems/freebsd/local/vnode_if.h b/rtems/freebsd/local/vnode_if.h
new file mode 100644
index 00000000..de1005b9
--- /dev/null
+++ b/rtems/freebsd/local/vnode_if.h
@@ -0,0 +1,1546 @@
+/*
+ * This file is produced automatically.
+ * Do not modify anything in here by hand.
+ *
+ * Created from $FreeBSD$
+ */
+
+extern struct vnodeop_desc vop_default_desc;
+#include <rtems/freebsd/local/vnode_if_typedef.h>
+#include <rtems/freebsd/local/vnode_if_newproto.h>
+struct vop_islocked_args {
+ struct vop_generic_args a_gen;
+ struct vnode *a_vp;
+};
+
+extern struct vnodeop_desc vop_islocked_desc;
+
+int VOP_ISLOCKED_AP(struct vop_islocked_args *);
+int VOP_ISLOCKED_APV(struct vop_vector *vop, struct vop_islocked_args *);
+
+static __inline int VOP_ISLOCKED(
+ struct vnode *vp)
+{
+ struct vop_islocked_args a;
+
+ a.a_gen.a_desc = &vop_islocked_desc;
+ a.a_vp = vp;
+ return (VOP_ISLOCKED_APV(vp->v_op, &a));
+}
+
+struct vop_lookup_args {
+ struct vop_generic_args a_gen;
+ struct vnode *a_dvp;
+ struct vnode **a_vpp;
+ struct componentname *a_cnp;
+};
+
+extern struct vnodeop_desc vop_lookup_desc;
+
+int VOP_LOOKUP_AP(struct vop_lookup_args *);
+int VOP_LOOKUP_APV(struct vop_vector *vop, struct vop_lookup_args *);
+
+static __inline int VOP_LOOKUP(
+ struct vnode *dvp,
+ struct vnode **vpp,
+ struct componentname *cnp)
+{
+ struct vop_lookup_args a;
+
+ a.a_gen.a_desc = &vop_lookup_desc;
+ a.a_dvp = dvp;
+ a.a_vpp = vpp;
+ a.a_cnp = cnp;
+ return (VOP_LOOKUP_APV(dvp->v_op, &a));
+}
+
+struct vop_cachedlookup_args {
+ struct vop_generic_args a_gen;
+ struct vnode *a_dvp;
+ struct vnode **a_vpp;
+ struct componentname *a_cnp;
+};
+
+extern struct vnodeop_desc vop_cachedlookup_desc;
+
+int VOP_CACHEDLOOKUP_AP(struct vop_cachedlookup_args *);
+int VOP_CACHEDLOOKUP_APV(struct vop_vector *vop, struct vop_cachedlookup_args *);
+
+static __inline int VOP_CACHEDLOOKUP(
+ struct vnode *dvp,
+ struct vnode **vpp,
+ struct componentname *cnp)
+{
+ struct vop_cachedlookup_args a;
+
+ a.a_gen.a_desc = &vop_cachedlookup_desc;
+ a.a_dvp = dvp;
+ a.a_vpp = vpp;
+ a.a_cnp = cnp;
+ return (VOP_CACHEDLOOKUP_APV(dvp->v_op, &a));
+}
+
+struct vop_create_args {
+ struct vop_generic_args a_gen;
+ struct vnode *a_dvp;
+ struct vnode **a_vpp;
+ struct componentname *a_cnp;
+ struct vattr *a_vap;
+};
+
+extern struct vnodeop_desc vop_create_desc;
+
+int VOP_CREATE_AP(struct vop_create_args *);
+int VOP_CREATE_APV(struct vop_vector *vop, struct vop_create_args *);
+
+static __inline int VOP_CREATE(
+ struct vnode *dvp,
+ struct vnode **vpp,
+ struct componentname *cnp,
+ struct vattr *vap)
+{
+ struct vop_create_args a;
+
+ a.a_gen.a_desc = &vop_create_desc;
+ a.a_dvp = dvp;
+ a.a_vpp = vpp;
+ a.a_cnp = cnp;
+ a.a_vap = vap;
+ return (VOP_CREATE_APV(dvp->v_op, &a));
+}
+
+struct vop_whiteout_args {
+ struct vop_generic_args a_gen;
+ struct vnode *a_dvp;
+ struct componentname *a_cnp;
+ int a_flags;
+};
+
+extern struct vnodeop_desc vop_whiteout_desc;
+
+int VOP_WHITEOUT_AP(struct vop_whiteout_args *);
+int VOP_WHITEOUT_APV(struct vop_vector *vop, struct vop_whiteout_args *);
+
+static __inline int VOP_WHITEOUT(
+ struct vnode *dvp,
+ struct componentname *cnp,
+ int flags)
+{
+ struct vop_whiteout_args a;
+
+ a.a_gen.a_desc = &vop_whiteout_desc;
+ a.a_dvp = dvp;
+ a.a_cnp = cnp;
+ a.a_flags = flags;
+ return (VOP_WHITEOUT_APV(dvp->v_op, &a));
+}
+
+struct vop_mknod_args {
+ struct vop_generic_args a_gen;
+ struct vnode *a_dvp;
+ struct vnode **a_vpp;
+ struct componentname *a_cnp;
+ struct vattr *a_vap;
+};
+
+extern struct vnodeop_desc vop_mknod_desc;
+
+int VOP_MKNOD_AP(struct vop_mknod_args *);
+int VOP_MKNOD_APV(struct vop_vector *vop, struct vop_mknod_args *);
+
+static __inline int VOP_MKNOD(
+ struct vnode *dvp,
+ struct vnode **vpp,
+ struct componentname *cnp,
+ struct vattr *vap)
+{
+ struct vop_mknod_args a;
+
+ a.a_gen.a_desc = &vop_mknod_desc;
+ a.a_dvp = dvp;
+ a.a_vpp = vpp;
+ a.a_cnp = cnp;
+ a.a_vap = vap;
+ return (VOP_MKNOD_APV(dvp->v_op, &a));
+}
+
+struct vop_open_args {
+ struct vop_generic_args a_gen;
+ struct vnode *a_vp;
+ int a_mode;
+ struct ucred *a_cred;
+ struct thread *a_td;
+ struct file *a_fp;
+};
+
+extern struct vnodeop_desc vop_open_desc;
+
+int VOP_OPEN_AP(struct vop_open_args *);
+int VOP_OPEN_APV(struct vop_vector *vop, struct vop_open_args *);
+
+static __inline int VOP_OPEN(
+ struct vnode *vp,
+ int mode,
+ struct ucred *cred,
+ struct thread *td,
+ struct file *fp)
+{
+ struct vop_open_args a;
+
+ a.a_gen.a_desc = &vop_open_desc;
+ a.a_vp = vp;
+ a.a_mode = mode;
+ a.a_cred = cred;
+ a.a_td = td;
+ a.a_fp = fp;
+ return (VOP_OPEN_APV(vp->v_op, &a));
+}
+
+struct vop_close_args {
+ struct vop_generic_args a_gen;
+ struct vnode *a_vp;
+ int a_fflag;
+ struct ucred *a_cred;
+ struct thread *a_td;
+};
+
+extern struct vnodeop_desc vop_close_desc;
+
+int VOP_CLOSE_AP(struct vop_close_args *);
+int VOP_CLOSE_APV(struct vop_vector *vop, struct vop_close_args *);
+
+static __inline int VOP_CLOSE(
+ struct vnode *vp,
+ int fflag,
+ struct ucred *cred,
+ struct thread *td)
+{
+ struct vop_close_args a;
+
+ a.a_gen.a_desc = &vop_close_desc;
+ a.a_vp = vp;
+ a.a_fflag = fflag;
+ a.a_cred = cred;
+ a.a_td = td;
+ return (VOP_CLOSE_APV(vp->v_op, &a));
+}
+
+struct vop_access_args {
+ struct vop_generic_args a_gen;
+ struct vnode *a_vp;
+ accmode_t a_accmode;
+ struct ucred *a_cred;
+ struct thread *a_td;
+};
+
+extern struct vnodeop_desc vop_access_desc;
+
+int VOP_ACCESS_AP(struct vop_access_args *);
+int VOP_ACCESS_APV(struct vop_vector *vop, struct vop_access_args *);
+
+static __inline int VOP_ACCESS(
+ struct vnode *vp,
+ accmode_t accmode,
+ struct ucred *cred,
+ struct thread *td)
+{
+ struct vop_access_args a;
+
+ a.a_gen.a_desc = &vop_access_desc;
+ a.a_vp = vp;
+ a.a_accmode = accmode;
+ a.a_cred = cred;
+ a.a_td = td;
+ return (VOP_ACCESS_APV(vp->v_op, &a));
+}
+
+struct vop_accessx_args {
+ struct vop_generic_args a_gen;
+ struct vnode *a_vp;
+ accmode_t a_accmode;
+ struct ucred *a_cred;
+ struct thread *a_td;
+};
+
+extern struct vnodeop_desc vop_accessx_desc;
+
+int VOP_ACCESSX_AP(struct vop_accessx_args *);
+int VOP_ACCESSX_APV(struct vop_vector *vop, struct vop_accessx_args *);
+
+static __inline int VOP_ACCESSX(
+ struct vnode *vp,
+ accmode_t accmode,
+ struct ucred *cred,
+ struct thread *td)
+{
+ struct vop_accessx_args a;
+
+ a.a_gen.a_desc = &vop_accessx_desc;
+ a.a_vp = vp;
+ a.a_accmode = accmode;
+ a.a_cred = cred;
+ a.a_td = td;
+ return (VOP_ACCESSX_APV(vp->v_op, &a));
+}
+
+struct vop_getattr_args {
+ struct vop_generic_args a_gen;
+ struct vnode *a_vp;
+ struct vattr *a_vap;
+ struct ucred *a_cred;
+};
+
+extern struct vnodeop_desc vop_getattr_desc;
+
+int VOP_GETATTR_AP(struct vop_getattr_args *);
+int VOP_GETATTR_APV(struct vop_vector *vop, struct vop_getattr_args *);
+
+static __inline int VOP_GETATTR(
+ struct vnode *vp,
+ struct vattr *vap,
+ struct ucred *cred)
+{
+ struct vop_getattr_args a;
+
+ a.a_gen.a_desc = &vop_getattr_desc;
+ a.a_vp = vp;
+ a.a_vap = vap;
+ a.a_cred = cred;
+ return (VOP_GETATTR_APV(vp->v_op, &a));
+}
+
+struct vop_setattr_args {
+ struct vop_generic_args a_gen;
+ struct vnode *a_vp;
+ struct vattr *a_vap;
+ struct ucred *a_cred;
+};
+
+extern struct vnodeop_desc vop_setattr_desc;
+
+int VOP_SETATTR_AP(struct vop_setattr_args *);
+int VOP_SETATTR_APV(struct vop_vector *vop, struct vop_setattr_args *);
+
+static __inline int VOP_SETATTR(
+ struct vnode *vp,
+ struct vattr *vap,
+ struct ucred *cred)
+{
+ struct vop_setattr_args a;
+
+ a.a_gen.a_desc = &vop_setattr_desc;
+ a.a_vp = vp;
+ a.a_vap = vap;
+ a.a_cred = cred;
+ return (VOP_SETATTR_APV(vp->v_op, &a));
+}
+
+struct vop_markatime_args {
+ struct vop_generic_args a_gen;
+ struct vnode *a_vp;
+};
+
+extern struct vnodeop_desc vop_markatime_desc;
+
+int VOP_MARKATIME_AP(struct vop_markatime_args *);
+int VOP_MARKATIME_APV(struct vop_vector *vop, struct vop_markatime_args *);
+
+static __inline int VOP_MARKATIME(
+ struct vnode *vp)
+{
+ struct vop_markatime_args a;
+
+ a.a_gen.a_desc = &vop_markatime_desc;
+ a.a_vp = vp;
+ return (VOP_MARKATIME_APV(vp->v_op, &a));
+}
+
+struct vop_read_args {
+ struct vop_generic_args a_gen;
+ struct vnode *a_vp;
+ struct uio *a_uio;
+ int a_ioflag;
+ struct ucred *a_cred;
+};
+
+extern struct vnodeop_desc vop_read_desc;
+
+int VOP_READ_AP(struct vop_read_args *);
+int VOP_READ_APV(struct vop_vector *vop, struct vop_read_args *);
+
+static __inline int VOP_READ(
+ struct vnode *vp,
+ struct uio *uio,
+ int ioflag,
+ struct ucred *cred)
+{
+ struct vop_read_args a;
+
+ a.a_gen.a_desc = &vop_read_desc;
+ a.a_vp = vp;
+ a.a_uio = uio;
+ a.a_ioflag = ioflag;
+ a.a_cred = cred;
+ return (VOP_READ_APV(vp->v_op, &a));
+}
+
+struct vop_write_args {
+ struct vop_generic_args a_gen;
+ struct vnode *a_vp;
+ struct uio *a_uio;
+ int a_ioflag;
+ struct ucred *a_cred;
+};
+
+extern struct vnodeop_desc vop_write_desc;
+
+int VOP_WRITE_AP(struct vop_write_args *);
+int VOP_WRITE_APV(struct vop_vector *vop, struct vop_write_args *);
+
+static __inline int VOP_WRITE(
+ struct vnode *vp,
+ struct uio *uio,
+ int ioflag,
+ struct ucred *cred)
+{
+ struct vop_write_args a;
+
+ a.a_gen.a_desc = &vop_write_desc;
+ a.a_vp = vp;
+ a.a_uio = uio;
+ a.a_ioflag = ioflag;
+ a.a_cred = cred;
+ return (VOP_WRITE_APV(vp->v_op, &a));
+}
+
+struct vop_ioctl_args {
+ struct vop_generic_args a_gen;
+ struct vnode *a_vp;
+ u_long a_command;
+ void *a_data;
+ int a_fflag;
+ struct ucred *a_cred;
+ struct thread *a_td;
+};
+
+extern struct vnodeop_desc vop_ioctl_desc;
+
+int VOP_IOCTL_AP(struct vop_ioctl_args *);
+int VOP_IOCTL_APV(struct vop_vector *vop, struct vop_ioctl_args *);
+
+static __inline int VOP_IOCTL(
+ struct vnode *vp,
+ u_long command,
+ void *data,
+ int fflag,
+ struct ucred *cred,
+ struct thread *td)
+{
+ struct vop_ioctl_args a;
+
+ a.a_gen.a_desc = &vop_ioctl_desc;
+ a.a_vp = vp;
+ a.a_command = command;
+ a.a_data = data;
+ a.a_fflag = fflag;
+ a.a_cred = cred;
+ a.a_td = td;
+ return (VOP_IOCTL_APV(vp->v_op, &a));
+}
+
+struct vop_poll_args {
+ struct vop_generic_args a_gen;
+ struct vnode *a_vp;
+ int a_events;
+ struct ucred *a_cred;
+ struct thread *a_td;
+};
+
+extern struct vnodeop_desc vop_poll_desc;
+
+int VOP_POLL_AP(struct vop_poll_args *);
+int VOP_POLL_APV(struct vop_vector *vop, struct vop_poll_args *);
+
+static __inline int VOP_POLL(
+ struct vnode *vp,
+ int events,
+ struct ucred *cred,
+ struct thread *td)
+{
+ struct vop_poll_args a;
+
+ a.a_gen.a_desc = &vop_poll_desc;
+ a.a_vp = vp;
+ a.a_events = events;
+ a.a_cred = cred;
+ a.a_td = td;
+ return (VOP_POLL_APV(vp->v_op, &a));
+}
+
+struct vop_kqfilter_args {
+ struct vop_generic_args a_gen;
+ struct vnode *a_vp;
+ struct knote *a_kn;
+};
+
+extern struct vnodeop_desc vop_kqfilter_desc;
+
+int VOP_KQFILTER_AP(struct vop_kqfilter_args *);
+int VOP_KQFILTER_APV(struct vop_vector *vop, struct vop_kqfilter_args *);
+
+static __inline int VOP_KQFILTER(
+ struct vnode *vp,
+ struct knote *kn)
+{
+ struct vop_kqfilter_args a;
+
+ a.a_gen.a_desc = &vop_kqfilter_desc;
+ a.a_vp = vp;
+ a.a_kn = kn;
+ return (VOP_KQFILTER_APV(vp->v_op, &a));
+}
+
+struct vop_revoke_args {
+ struct vop_generic_args a_gen;
+ struct vnode *a_vp;
+ int a_flags;
+};
+
+extern struct vnodeop_desc vop_revoke_desc;
+
+int VOP_REVOKE_AP(struct vop_revoke_args *);
+int VOP_REVOKE_APV(struct vop_vector *vop, struct vop_revoke_args *);
+
+static __inline int VOP_REVOKE(
+ struct vnode *vp,
+ int flags)
+{
+ struct vop_revoke_args a;
+
+ a.a_gen.a_desc = &vop_revoke_desc;
+ a.a_vp = vp;
+ a.a_flags = flags;
+ return (VOP_REVOKE_APV(vp->v_op, &a));
+}
+
+struct vop_fsync_args {
+ struct vop_generic_args a_gen;
+ struct vnode *a_vp;
+ int a_waitfor;
+ struct thread *a_td;
+};
+
+extern struct vnodeop_desc vop_fsync_desc;
+
+int VOP_FSYNC_AP(struct vop_fsync_args *);
+int VOP_FSYNC_APV(struct vop_vector *vop, struct vop_fsync_args *);
+
+static __inline int VOP_FSYNC(
+ struct vnode *vp,
+ int waitfor,
+ struct thread *td)
+{
+ struct vop_fsync_args a;
+
+ a.a_gen.a_desc = &vop_fsync_desc;
+ a.a_vp = vp;
+ a.a_waitfor = waitfor;
+ a.a_td = td;
+ return (VOP_FSYNC_APV(vp->v_op, &a));
+}
+
+struct vop_remove_args {
+ struct vop_generic_args a_gen;
+ struct vnode *a_dvp;
+ struct vnode *a_vp;
+ struct componentname *a_cnp;
+};
+
+extern struct vnodeop_desc vop_remove_desc;
+
+int VOP_REMOVE_AP(struct vop_remove_args *);
+int VOP_REMOVE_APV(struct vop_vector *vop, struct vop_remove_args *);
+
+static __inline int VOP_REMOVE(
+ struct vnode *dvp,
+ struct vnode *vp,
+ struct componentname *cnp)
+{
+ struct vop_remove_args a;
+
+ a.a_gen.a_desc = &vop_remove_desc;
+ a.a_dvp = dvp;
+ a.a_vp = vp;
+ a.a_cnp = cnp;
+ return (VOP_REMOVE_APV(dvp->v_op, &a));
+}
+
+struct vop_link_args {
+ struct vop_generic_args a_gen;
+ struct vnode *a_tdvp;
+ struct vnode *a_vp;
+ struct componentname *a_cnp;
+};
+
+extern struct vnodeop_desc vop_link_desc;
+
+int VOP_LINK_AP(struct vop_link_args *);
+int VOP_LINK_APV(struct vop_vector *vop, struct vop_link_args *);
+
+static __inline int VOP_LINK(
+ struct vnode *tdvp,
+ struct vnode *vp,
+ struct componentname *cnp)
+{
+ struct vop_link_args a;
+
+ a.a_gen.a_desc = &vop_link_desc;
+ a.a_tdvp = tdvp;
+ a.a_vp = vp;
+ a.a_cnp = cnp;
+ return (VOP_LINK_APV(tdvp->v_op, &a));
+}
+
+struct vop_rename_args {
+ struct vop_generic_args a_gen;
+ struct vnode *a_fdvp;
+ struct vnode *a_fvp;
+ struct componentname *a_fcnp;
+ struct vnode *a_tdvp;
+ struct vnode *a_tvp;
+ struct componentname *a_tcnp;
+};
+
+extern struct vnodeop_desc vop_rename_desc;
+
+int VOP_RENAME_AP(struct vop_rename_args *);
+int VOP_RENAME_APV(struct vop_vector *vop, struct vop_rename_args *);
+
+static __inline int VOP_RENAME(
+ struct vnode *fdvp,
+ struct vnode *fvp,
+ struct componentname *fcnp,
+ struct vnode *tdvp,
+ struct vnode *tvp,
+ struct componentname *tcnp)
+{
+ struct vop_rename_args a;
+
+ a.a_gen.a_desc = &vop_rename_desc;
+ a.a_fdvp = fdvp;
+ a.a_fvp = fvp;
+ a.a_fcnp = fcnp;
+ a.a_tdvp = tdvp;
+ a.a_tvp = tvp;
+ a.a_tcnp = tcnp;
+ return (VOP_RENAME_APV(fdvp->v_op, &a));
+}
+
+struct vop_mkdir_args {
+ struct vop_generic_args a_gen;
+ struct vnode *a_dvp;
+ struct vnode **a_vpp;
+ struct componentname *a_cnp;
+ struct vattr *a_vap;
+};
+
+extern struct vnodeop_desc vop_mkdir_desc;
+
+int VOP_MKDIR_AP(struct vop_mkdir_args *);
+int VOP_MKDIR_APV(struct vop_vector *vop, struct vop_mkdir_args *);
+
+static __inline int VOP_MKDIR(
+ struct vnode *dvp,
+ struct vnode **vpp,
+ struct componentname *cnp,
+ struct vattr *vap)
+{
+ struct vop_mkdir_args a;
+
+ a.a_gen.a_desc = &vop_mkdir_desc;
+ a.a_dvp = dvp;
+ a.a_vpp = vpp;
+ a.a_cnp = cnp;
+ a.a_vap = vap;
+ return (VOP_MKDIR_APV(dvp->v_op, &a));
+}
+
+struct vop_rmdir_args {
+ struct vop_generic_args a_gen;
+ struct vnode *a_dvp;
+ struct vnode *a_vp;
+ struct componentname *a_cnp;
+};
+
+extern struct vnodeop_desc vop_rmdir_desc;
+
+int VOP_RMDIR_AP(struct vop_rmdir_args *);
+int VOP_RMDIR_APV(struct vop_vector *vop, struct vop_rmdir_args *);
+
+static __inline int VOP_RMDIR(
+ struct vnode *dvp,
+ struct vnode *vp,
+ struct componentname *cnp)
+{
+ struct vop_rmdir_args a;
+
+ a.a_gen.a_desc = &vop_rmdir_desc;
+ a.a_dvp = dvp;
+ a.a_vp = vp;
+ a.a_cnp = cnp;
+ return (VOP_RMDIR_APV(dvp->v_op, &a));
+}
+
+struct vop_symlink_args {
+ struct vop_generic_args a_gen;
+ struct vnode *a_dvp;
+ struct vnode **a_vpp;
+ struct componentname *a_cnp;
+ struct vattr *a_vap;
+ char *a_target;
+};
+
+extern struct vnodeop_desc vop_symlink_desc;
+
+int VOP_SYMLINK_AP(struct vop_symlink_args *);
+int VOP_SYMLINK_APV(struct vop_vector *vop, struct vop_symlink_args *);
+
+static __inline int VOP_SYMLINK(
+ struct vnode *dvp,
+ struct vnode **vpp,
+ struct componentname *cnp,
+ struct vattr *vap,
+ char *target)
+{
+ struct vop_symlink_args a;
+
+ a.a_gen.a_desc = &vop_symlink_desc;
+ a.a_dvp = dvp;
+ a.a_vpp = vpp;
+ a.a_cnp = cnp;
+ a.a_vap = vap;
+ a.a_target = target;
+ return (VOP_SYMLINK_APV(dvp->v_op, &a));
+}
+
+struct vop_readdir_args {
+ struct vop_generic_args a_gen;
+ struct vnode *a_vp;
+ struct uio *a_uio;
+ struct ucred *a_cred;
+ int *a_eofflag;
+ int *a_ncookies;
+ u_long **a_cookies;
+};
+
+extern struct vnodeop_desc vop_readdir_desc;
+
+int VOP_READDIR_AP(struct vop_readdir_args *);
+int VOP_READDIR_APV(struct vop_vector *vop, struct vop_readdir_args *);
+
+static __inline int VOP_READDIR(
+ struct vnode *vp,
+ struct uio *uio,
+ struct ucred *cred,
+ int *eofflag,
+ int *ncookies,
+ u_long **cookies)
+{
+ struct vop_readdir_args a;
+
+ a.a_gen.a_desc = &vop_readdir_desc;
+ a.a_vp = vp;
+ a.a_uio = uio;
+ a.a_cred = cred;
+ a.a_eofflag = eofflag;
+ a.a_ncookies = ncookies;
+ a.a_cookies = cookies;
+ return (VOP_READDIR_APV(vp->v_op, &a));
+}
+
+struct vop_readlink_args {
+ struct vop_generic_args a_gen;
+ struct vnode *a_vp;
+ struct uio *a_uio;
+ struct ucred *a_cred;
+};
+
+extern struct vnodeop_desc vop_readlink_desc;
+
+int VOP_READLINK_AP(struct vop_readlink_args *);
+int VOP_READLINK_APV(struct vop_vector *vop, struct vop_readlink_args *);
+
+static __inline int VOP_READLINK(
+ struct vnode *vp,
+ struct uio *uio,
+ struct ucred *cred)
+{
+ struct vop_readlink_args a;
+
+ a.a_gen.a_desc = &vop_readlink_desc;
+ a.a_vp = vp;
+ a.a_uio = uio;
+ a.a_cred = cred;
+ return (VOP_READLINK_APV(vp->v_op, &a));
+}
+
+struct vop_inactive_args {
+ struct vop_generic_args a_gen;
+ struct vnode *a_vp;
+ struct thread *a_td;
+};
+
+extern struct vnodeop_desc vop_inactive_desc;
+
+int VOP_INACTIVE_AP(struct vop_inactive_args *);
+int VOP_INACTIVE_APV(struct vop_vector *vop, struct vop_inactive_args *);
+
+static __inline int VOP_INACTIVE(
+ struct vnode *vp,
+ struct thread *td)
+{
+ struct vop_inactive_args a;
+
+ a.a_gen.a_desc = &vop_inactive_desc;
+ a.a_vp = vp;
+ a.a_td = td;
+ return (VOP_INACTIVE_APV(vp->v_op, &a));
+}
+
+struct vop_reclaim_args {
+ struct vop_generic_args a_gen;
+ struct vnode *a_vp;
+ struct thread *a_td;
+};
+
+extern struct vnodeop_desc vop_reclaim_desc;
+
+int VOP_RECLAIM_AP(struct vop_reclaim_args *);
+int VOP_RECLAIM_APV(struct vop_vector *vop, struct vop_reclaim_args *);
+
+static __inline int VOP_RECLAIM(
+ struct vnode *vp,
+ struct thread *td)
+{
+ struct vop_reclaim_args a;
+
+ a.a_gen.a_desc = &vop_reclaim_desc;
+ a.a_vp = vp;
+ a.a_td = td;
+ return (VOP_RECLAIM_APV(vp->v_op, &a));
+}
+
+struct vop_lock1_args {
+ struct vop_generic_args a_gen;
+ struct vnode *a_vp;
+ int a_flags;
+ char *a_file;
+ int a_line;
+};
+
+extern struct vnodeop_desc vop_lock1_desc;
+
+int VOP_LOCK1_AP(struct vop_lock1_args *);
+int VOP_LOCK1_APV(struct vop_vector *vop, struct vop_lock1_args *);
+
+static __inline int VOP_LOCK1(
+ struct vnode *vp,
+ int flags,
+ char *file,
+ int line)
+{
+ struct vop_lock1_args a;
+
+ a.a_gen.a_desc = &vop_lock1_desc;
+ a.a_vp = vp;
+ a.a_flags = flags;
+ a.a_file = file;
+ a.a_line = line;
+ return (VOP_LOCK1_APV(vp->v_op, &a));
+}
+
+struct vop_unlock_args {
+ struct vop_generic_args a_gen;
+ struct vnode *a_vp;
+ int a_flags;
+};
+
+extern struct vnodeop_desc vop_unlock_desc;
+
+int VOP_UNLOCK_AP(struct vop_unlock_args *);
+int VOP_UNLOCK_APV(struct vop_vector *vop, struct vop_unlock_args *);
+
+static __inline int VOP_UNLOCK(
+ struct vnode *vp,
+ int flags)
+{
+ struct vop_unlock_args a;
+
+ a.a_gen.a_desc = &vop_unlock_desc;
+ a.a_vp = vp;
+ a.a_flags = flags;
+ return (VOP_UNLOCK_APV(vp->v_op, &a));
+}
+
+struct vop_bmap_args {
+ struct vop_generic_args a_gen;
+ struct vnode *a_vp;
+ daddr_t a_bn;
+ struct bufobj **a_bop;
+ daddr_t *a_bnp;
+ int *a_runp;
+ int *a_runb;
+};
+
+extern struct vnodeop_desc vop_bmap_desc;
+
+int VOP_BMAP_AP(struct vop_bmap_args *);
+int VOP_BMAP_APV(struct vop_vector *vop, struct vop_bmap_args *);
+
+static __inline int VOP_BMAP(
+ struct vnode *vp,
+ daddr_t bn,
+ struct bufobj **bop,
+ daddr_t *bnp,
+ int *runp,
+ int *runb)
+{
+ struct vop_bmap_args a;
+
+ a.a_gen.a_desc = &vop_bmap_desc;
+ a.a_vp = vp;
+ a.a_bn = bn;
+ a.a_bop = bop;
+ a.a_bnp = bnp;
+ a.a_runp = runp;
+ a.a_runb = runb;
+ return (VOP_BMAP_APV(vp->v_op, &a));
+}
+
+struct vop_strategy_args {
+ struct vop_generic_args a_gen;
+ struct vnode *a_vp;
+ struct buf *a_bp;
+};
+
+extern struct vnodeop_desc vop_strategy_desc;
+
+int VOP_STRATEGY_AP(struct vop_strategy_args *);
+int VOP_STRATEGY_APV(struct vop_vector *vop, struct vop_strategy_args *);
+
+static __inline int VOP_STRATEGY(
+ struct vnode *vp,
+ struct buf *bp)
+{
+ struct vop_strategy_args a;
+
+ a.a_gen.a_desc = &vop_strategy_desc;
+ a.a_vp = vp;
+ a.a_bp = bp;
+ return (VOP_STRATEGY_APV(vp->v_op, &a));
+}
+
+struct vop_getwritemount_args {
+ struct vop_generic_args a_gen;
+ struct vnode *a_vp;
+ struct mount **a_mpp;
+};
+
+extern struct vnodeop_desc vop_getwritemount_desc;
+
+int VOP_GETWRITEMOUNT_AP(struct vop_getwritemount_args *);
+int VOP_GETWRITEMOUNT_APV(struct vop_vector *vop, struct vop_getwritemount_args *);
+
+static __inline int VOP_GETWRITEMOUNT(
+ struct vnode *vp,
+ struct mount **mpp)
+{
+ struct vop_getwritemount_args a;
+
+ a.a_gen.a_desc = &vop_getwritemount_desc;
+ a.a_vp = vp;
+ a.a_mpp = mpp;
+ return (VOP_GETWRITEMOUNT_APV(vp->v_op, &a));
+}
+
+struct vop_print_args {
+ struct vop_generic_args a_gen;
+ struct vnode *a_vp;
+};
+
+extern struct vnodeop_desc vop_print_desc;
+
+int VOP_PRINT_AP(struct vop_print_args *);
+int VOP_PRINT_APV(struct vop_vector *vop, struct vop_print_args *);
+
+static __inline int VOP_PRINT(
+ struct vnode *vp)
+{
+ struct vop_print_args a;
+
+ a.a_gen.a_desc = &vop_print_desc;
+ a.a_vp = vp;
+ return (VOP_PRINT_APV(vp->v_op, &a));
+}
+
+struct vop_pathconf_args {
+ struct vop_generic_args a_gen;
+ struct vnode *a_vp;
+ int a_name;
+ register_t *a_retval;
+};
+
+extern struct vnodeop_desc vop_pathconf_desc;
+
+int VOP_PATHCONF_AP(struct vop_pathconf_args *);
+int VOP_PATHCONF_APV(struct vop_vector *vop, struct vop_pathconf_args *);
+
+static __inline int VOP_PATHCONF(
+ struct vnode *vp,
+ int name,
+ register_t *retval)
+{
+ struct vop_pathconf_args a;
+
+ a.a_gen.a_desc = &vop_pathconf_desc;
+ a.a_vp = vp;
+ a.a_name = name;
+ a.a_retval = retval;
+ return (VOP_PATHCONF_APV(vp->v_op, &a));
+}
+
+struct vop_advlock_args {
+ struct vop_generic_args a_gen;
+ struct vnode *a_vp;
+ void *a_id;
+ int a_op;
+ struct flock *a_fl;
+ int a_flags;
+};
+
+extern struct vnodeop_desc vop_advlock_desc;
+
+int VOP_ADVLOCK_AP(struct vop_advlock_args *);
+int VOP_ADVLOCK_APV(struct vop_vector *vop, struct vop_advlock_args *);
+
+static __inline int VOP_ADVLOCK(
+ struct vnode *vp,
+ void *id,
+ int op,
+ struct flock *fl,
+ int flags)
+{
+ struct vop_advlock_args a;
+
+ a.a_gen.a_desc = &vop_advlock_desc;
+ a.a_vp = vp;
+ a.a_id = id;
+ a.a_op = op;
+ a.a_fl = fl;
+ a.a_flags = flags;
+ return (VOP_ADVLOCK_APV(vp->v_op, &a));
+}
+
+struct vop_advlockasync_args {
+ struct vop_generic_args a_gen;
+ struct vnode *a_vp;
+ void *a_id;
+ int a_op;
+ struct flock *a_fl;
+ int a_flags;
+ struct task *a_task;
+ void **a_cookiep;
+};
+
+extern struct vnodeop_desc vop_advlockasync_desc;
+
+int VOP_ADVLOCKASYNC_AP(struct vop_advlockasync_args *);
+int VOP_ADVLOCKASYNC_APV(struct vop_vector *vop, struct vop_advlockasync_args *);
+
+static __inline int VOP_ADVLOCKASYNC(
+ struct vnode *vp,
+ void *id,
+ int op,
+ struct flock *fl,
+ int flags,
+ struct task *task,
+ void **cookiep)
+{
+ struct vop_advlockasync_args a;
+
+ a.a_gen.a_desc = &vop_advlockasync_desc;
+ a.a_vp = vp;
+ a.a_id = id;
+ a.a_op = op;
+ a.a_fl = fl;
+ a.a_flags = flags;
+ a.a_task = task;
+ a.a_cookiep = cookiep;
+ return (VOP_ADVLOCKASYNC_APV(vp->v_op, &a));
+}
+
+struct vop_reallocblks_args {
+ struct vop_generic_args a_gen;
+ struct vnode *a_vp;
+ struct cluster_save *a_buflist;
+};
+
+extern struct vnodeop_desc vop_reallocblks_desc;
+
+int VOP_REALLOCBLKS_AP(struct vop_reallocblks_args *);
+int VOP_REALLOCBLKS_APV(struct vop_vector *vop, struct vop_reallocblks_args *);
+
+static __inline int VOP_REALLOCBLKS(
+ struct vnode *vp,
+ struct cluster_save *buflist)
+{
+ struct vop_reallocblks_args a;
+
+ a.a_gen.a_desc = &vop_reallocblks_desc;
+ a.a_vp = vp;
+ a.a_buflist = buflist;
+ return (VOP_REALLOCBLKS_APV(vp->v_op, &a));
+}
+
+struct vop_getpages_args {
+ struct vop_generic_args a_gen;
+ struct vnode *a_vp;
+ vm_page_t *a_m;
+ int a_count;
+ int a_reqpage;
+ vm_ooffset_t a_offset;
+};
+
+extern struct vnodeop_desc vop_getpages_desc;
+
+int VOP_GETPAGES_AP(struct vop_getpages_args *);
+int VOP_GETPAGES_APV(struct vop_vector *vop, struct vop_getpages_args *);
+
+static __inline int VOP_GETPAGES(
+ struct vnode *vp,
+ vm_page_t *m,
+ int count,
+ int reqpage,
+ vm_ooffset_t offset)
+{
+ struct vop_getpages_args a;
+
+ a.a_gen.a_desc = &vop_getpages_desc;
+ a.a_vp = vp;
+ a.a_m = m;
+ a.a_count = count;
+ a.a_reqpage = reqpage;
+ a.a_offset = offset;
+ return (VOP_GETPAGES_APV(vp->v_op, &a));
+}
+
+struct vop_putpages_args {
+ struct vop_generic_args a_gen;
+ struct vnode *a_vp;
+ vm_page_t *a_m;
+ int a_count;
+ int a_sync;
+ int *a_rtvals;
+ vm_ooffset_t a_offset;
+};
+
+extern struct vnodeop_desc vop_putpages_desc;
+
+int VOP_PUTPAGES_AP(struct vop_putpages_args *);
+int VOP_PUTPAGES_APV(struct vop_vector *vop, struct vop_putpages_args *);
+
+static __inline int VOP_PUTPAGES(
+ struct vnode *vp,
+ vm_page_t *m,
+ int count,
+ int sync,
+ int *rtvals,
+ vm_ooffset_t offset)
+{
+ struct vop_putpages_args a;
+
+ a.a_gen.a_desc = &vop_putpages_desc;
+ a.a_vp = vp;
+ a.a_m = m;
+ a.a_count = count;
+ a.a_sync = sync;
+ a.a_rtvals = rtvals;
+ a.a_offset = offset;
+ return (VOP_PUTPAGES_APV(vp->v_op, &a));
+}
+
+struct vop_getacl_args {
+ struct vop_generic_args a_gen;
+ struct vnode *a_vp;
+ acl_type_t a_type;
+ struct acl *a_aclp;
+ struct ucred *a_cred;
+ struct thread *a_td;
+};
+
+extern struct vnodeop_desc vop_getacl_desc;
+
+int VOP_GETACL_AP(struct vop_getacl_args *);
+int VOP_GETACL_APV(struct vop_vector *vop, struct vop_getacl_args *);
+
+static __inline int VOP_GETACL(
+ struct vnode *vp,
+ acl_type_t type,
+ struct acl *aclp,
+ struct ucred *cred,
+ struct thread *td)
+{
+ struct vop_getacl_args a;
+
+ a.a_gen.a_desc = &vop_getacl_desc;
+ a.a_vp = vp;
+ a.a_type = type;
+ a.a_aclp = aclp;
+ a.a_cred = cred;
+ a.a_td = td;
+ return (VOP_GETACL_APV(vp->v_op, &a));
+}
+
+struct vop_setacl_args {
+ struct vop_generic_args a_gen;
+ struct vnode *a_vp;
+ acl_type_t a_type;
+ struct acl *a_aclp;
+ struct ucred *a_cred;
+ struct thread *a_td;
+};
+
+extern struct vnodeop_desc vop_setacl_desc;
+
+int VOP_SETACL_AP(struct vop_setacl_args *);
+int VOP_SETACL_APV(struct vop_vector *vop, struct vop_setacl_args *);
+
+static __inline int VOP_SETACL(
+ struct vnode *vp,
+ acl_type_t type,
+ struct acl *aclp,
+ struct ucred *cred,
+ struct thread *td)
+{
+ struct vop_setacl_args a;
+
+ a.a_gen.a_desc = &vop_setacl_desc;
+ a.a_vp = vp;
+ a.a_type = type;
+ a.a_aclp = aclp;
+ a.a_cred = cred;
+ a.a_td = td;
+ return (VOP_SETACL_APV(vp->v_op, &a));
+}
+
+struct vop_aclcheck_args {
+ struct vop_generic_args a_gen;
+ struct vnode *a_vp;
+ acl_type_t a_type;
+ struct acl *a_aclp;
+ struct ucred *a_cred;
+ struct thread *a_td;
+};
+
+extern struct vnodeop_desc vop_aclcheck_desc;
+
+int VOP_ACLCHECK_AP(struct vop_aclcheck_args *);
+int VOP_ACLCHECK_APV(struct vop_vector *vop, struct vop_aclcheck_args *);
+
+static __inline int VOP_ACLCHECK(
+ struct vnode *vp,
+ acl_type_t type,
+ struct acl *aclp,
+ struct ucred *cred,
+ struct thread *td)
+{
+ struct vop_aclcheck_args a;
+
+ a.a_gen.a_desc = &vop_aclcheck_desc;
+ a.a_vp = vp;
+ a.a_type = type;
+ a.a_aclp = aclp;
+ a.a_cred = cred;
+ a.a_td = td;
+ return (VOP_ACLCHECK_APV(vp->v_op, &a));
+}
+
+struct vop_closeextattr_args {
+ struct vop_generic_args a_gen;
+ struct vnode *a_vp;
+ int a_commit;
+ struct ucred *a_cred;
+ struct thread *a_td;
+};
+
+extern struct vnodeop_desc vop_closeextattr_desc;
+
+int VOP_CLOSEEXTATTR_AP(struct vop_closeextattr_args *);
+int VOP_CLOSEEXTATTR_APV(struct vop_vector *vop, struct vop_closeextattr_args *);
+
+static __inline int VOP_CLOSEEXTATTR(
+ struct vnode *vp,
+ int commit,
+ struct ucred *cred,
+ struct thread *td)
+{
+ struct vop_closeextattr_args a;
+
+ a.a_gen.a_desc = &vop_closeextattr_desc;
+ a.a_vp = vp;
+ a.a_commit = commit;
+ a.a_cred = cred;
+ a.a_td = td;
+ return (VOP_CLOSEEXTATTR_APV(vp->v_op, &a));
+}
+
+struct vop_getextattr_args {
+ struct vop_generic_args a_gen;
+ struct vnode *a_vp;
+ int a_attrnamespace;
+ const char *a_name;
+ struct uio *a_uio;
+ size_t *a_size;
+ struct ucred *a_cred;
+ struct thread *a_td;
+};
+
+extern struct vnodeop_desc vop_getextattr_desc;
+
+int VOP_GETEXTATTR_AP(struct vop_getextattr_args *);
+int VOP_GETEXTATTR_APV(struct vop_vector *vop, struct vop_getextattr_args *);
+
+static __inline int VOP_GETEXTATTR(
+ struct vnode *vp,
+ int attrnamespace,
+ const char *name,
+ struct uio *uio,
+ size_t *size,
+ struct ucred *cred,
+ struct thread *td)
+{
+ struct vop_getextattr_args a;
+
+ a.a_gen.a_desc = &vop_getextattr_desc;
+ a.a_vp = vp;
+ a.a_attrnamespace = attrnamespace;
+ a.a_name = name;
+ a.a_uio = uio;
+ a.a_size = size;
+ a.a_cred = cred;
+ a.a_td = td;
+ return (VOP_GETEXTATTR_APV(vp->v_op, &a));
+}
+
+struct vop_listextattr_args {
+ struct vop_generic_args a_gen;
+ struct vnode *a_vp;
+ int a_attrnamespace;
+ struct uio *a_uio;
+ size_t *a_size;
+ struct ucred *a_cred;
+ struct thread *a_td;
+};
+
+extern struct vnodeop_desc vop_listextattr_desc;
+
+int VOP_LISTEXTATTR_AP(struct vop_listextattr_args *);
+int VOP_LISTEXTATTR_APV(struct vop_vector *vop, struct vop_listextattr_args *);
+
+static __inline int VOP_LISTEXTATTR(
+ struct vnode *vp,
+ int attrnamespace,
+ struct uio *uio,
+ size_t *size,
+ struct ucred *cred,
+ struct thread *td)
+{
+ struct vop_listextattr_args a;
+
+ a.a_gen.a_desc = &vop_listextattr_desc;
+ a.a_vp = vp;
+ a.a_attrnamespace = attrnamespace;
+ a.a_uio = uio;
+ a.a_size = size;
+ a.a_cred = cred;
+ a.a_td = td;
+ return (VOP_LISTEXTATTR_APV(vp->v_op, &a));
+}
+
+struct vop_openextattr_args {
+ struct vop_generic_args a_gen;
+ struct vnode *a_vp;
+ struct ucred *a_cred;
+ struct thread *a_td;
+};
+
+extern struct vnodeop_desc vop_openextattr_desc;
+
+int VOP_OPENEXTATTR_AP(struct vop_openextattr_args *);
+int VOP_OPENEXTATTR_APV(struct vop_vector *vop, struct vop_openextattr_args *);
+
+static __inline int VOP_OPENEXTATTR(
+ struct vnode *vp,
+ struct ucred *cred,
+ struct thread *td)
+{
+ struct vop_openextattr_args a;
+
+ a.a_gen.a_desc = &vop_openextattr_desc;
+ a.a_vp = vp;
+ a.a_cred = cred;
+ a.a_td = td;
+ return (VOP_OPENEXTATTR_APV(vp->v_op, &a));
+}
+
+struct vop_deleteextattr_args {
+ struct vop_generic_args a_gen;
+ struct vnode *a_vp;
+ int a_attrnamespace;
+ const char *a_name;
+ struct ucred *a_cred;
+ struct thread *a_td;
+};
+
+extern struct vnodeop_desc vop_deleteextattr_desc;
+
+int VOP_DELETEEXTATTR_AP(struct vop_deleteextattr_args *);
+int VOP_DELETEEXTATTR_APV(struct vop_vector *vop, struct vop_deleteextattr_args *);
+
+static __inline int VOP_DELETEEXTATTR(
+ struct vnode *vp,
+ int attrnamespace,
+ const char *name,
+ struct ucred *cred,
+ struct thread *td)
+{
+ struct vop_deleteextattr_args a;
+
+ a.a_gen.a_desc = &vop_deleteextattr_desc;
+ a.a_vp = vp;
+ a.a_attrnamespace = attrnamespace;
+ a.a_name = name;
+ a.a_cred = cred;
+ a.a_td = td;
+ return (VOP_DELETEEXTATTR_APV(vp->v_op, &a));
+}
+
+struct vop_setextattr_args {
+ struct vop_generic_args a_gen;
+ struct vnode *a_vp;
+ int a_attrnamespace;
+ const char *a_name;
+ struct uio *a_uio;
+ struct ucred *a_cred;
+ struct thread *a_td;
+};
+
+extern struct vnodeop_desc vop_setextattr_desc;
+
+int VOP_SETEXTATTR_AP(struct vop_setextattr_args *);
+int VOP_SETEXTATTR_APV(struct vop_vector *vop, struct vop_setextattr_args *);
+
+static __inline int VOP_SETEXTATTR(
+ struct vnode *vp,
+ int attrnamespace,
+ const char *name,
+ struct uio *uio,
+ struct ucred *cred,
+ struct thread *td)
+{
+ struct vop_setextattr_args a;
+
+ a.a_gen.a_desc = &vop_setextattr_desc;
+ a.a_vp = vp;
+ a.a_attrnamespace = attrnamespace;
+ a.a_name = name;
+ a.a_uio = uio;
+ a.a_cred = cred;
+ a.a_td = td;
+ return (VOP_SETEXTATTR_APV(vp->v_op, &a));
+}
+
+struct vop_setlabel_args {
+ struct vop_generic_args a_gen;
+ struct vnode *a_vp;
+ struct label *a_label;
+ struct ucred *a_cred;
+ struct thread *a_td;
+};
+
+extern struct vnodeop_desc vop_setlabel_desc;
+
+int VOP_SETLABEL_AP(struct vop_setlabel_args *);
+int VOP_SETLABEL_APV(struct vop_vector *vop, struct vop_setlabel_args *);
+
+static __inline int VOP_SETLABEL(
+ struct vnode *vp,
+ struct label *label,
+ struct ucred *cred,
+ struct thread *td)
+{
+ struct vop_setlabel_args a;
+
+ a.a_gen.a_desc = &vop_setlabel_desc;
+ a.a_vp = vp;
+ a.a_label = label;
+ a.a_cred = cred;
+ a.a_td = td;
+ return (VOP_SETLABEL_APV(vp->v_op, &a));
+}
+
+struct vop_vptofh_args {
+ struct vop_generic_args a_gen;
+ struct vnode *a_vp;
+ struct fid *a_fhp;
+};
+
+extern struct vnodeop_desc vop_vptofh_desc;
+
+int VOP_VPTOFH_AP(struct vop_vptofh_args *);
+int VOP_VPTOFH_APV(struct vop_vector *vop, struct vop_vptofh_args *);
+
+static __inline int VOP_VPTOFH(
+ struct vnode *vp,
+ struct fid *fhp)
+{
+ struct vop_vptofh_args a;
+
+ a.a_gen.a_desc = &vop_vptofh_desc;
+ a.a_vp = vp;
+ a.a_fhp = fhp;
+ return (VOP_VPTOFH_APV(vp->v_op, &a));
+}
+
+struct vop_vptocnp_args {
+ struct vop_generic_args a_gen;
+ struct vnode *a_vp;
+ struct vnode **a_vpp;
+ struct ucred *a_cred;
+ char *a_buf;
+ int *a_buflen;
+};
+
+extern struct vnodeop_desc vop_vptocnp_desc;
+
+int VOP_VPTOCNP_AP(struct vop_vptocnp_args *);
+int VOP_VPTOCNP_APV(struct vop_vector *vop, struct vop_vptocnp_args *);
+
+static __inline int VOP_VPTOCNP(
+ struct vnode *vp,
+ struct vnode **vpp,
+ struct ucred *cred,
+ char *buf,
+ int *buflen)
+{
+ struct vop_vptocnp_args a;
+
+ a.a_gen.a_desc = &vop_vptocnp_desc;
+ a.a_vp = vp;
+ a.a_vpp = vpp;
+ a.a_cred = cred;
+ a.a_buf = buf;
+ a.a_buflen = buflen;
+ return (VOP_VPTOCNP_APV(vp->v_op, &a));
+}
+
diff --git a/rtems/freebsd/local/vnode_if_newproto.h b/rtems/freebsd/local/vnode_if_newproto.h
new file mode 100644
index 00000000..4b888acd
--- /dev/null
+++ b/rtems/freebsd/local/vnode_if_newproto.h
@@ -0,0 +1,66 @@
+/*
+ * This file is produced automatically.
+ * Do not modify anything in here by hand.
+ *
+ * Created from $FreeBSD$
+ */
+
+
+struct vop_vector {
+ struct vop_vector *vop_default;
+ vop_bypass_t *vop_bypass;
+ vop_islocked_t *vop_islocked;
+ vop_lookup_t *vop_lookup;
+ vop_cachedlookup_t *vop_cachedlookup;
+ vop_create_t *vop_create;
+ vop_whiteout_t *vop_whiteout;
+ vop_mknod_t *vop_mknod;
+ vop_open_t *vop_open;
+ vop_close_t *vop_close;
+ vop_access_t *vop_access;
+ vop_accessx_t *vop_accessx;
+ vop_getattr_t *vop_getattr;
+ vop_setattr_t *vop_setattr;
+ vop_markatime_t *vop_markatime;
+ vop_read_t *vop_read;
+ vop_write_t *vop_write;
+ vop_ioctl_t *vop_ioctl;
+ vop_poll_t *vop_poll;
+ vop_kqfilter_t *vop_kqfilter;
+ vop_revoke_t *vop_revoke;
+ vop_fsync_t *vop_fsync;
+ vop_remove_t *vop_remove;
+ vop_link_t *vop_link;
+ vop_rename_t *vop_rename;
+ vop_mkdir_t *vop_mkdir;
+ vop_rmdir_t *vop_rmdir;
+ vop_symlink_t *vop_symlink;
+ vop_readdir_t *vop_readdir;
+ vop_readlink_t *vop_readlink;
+ vop_inactive_t *vop_inactive;
+ vop_reclaim_t *vop_reclaim;
+ vop_lock1_t *vop_lock1;
+ vop_unlock_t *vop_unlock;
+ vop_bmap_t *vop_bmap;
+ vop_strategy_t *vop_strategy;
+ vop_getwritemount_t *vop_getwritemount;
+ vop_print_t *vop_print;
+ vop_pathconf_t *vop_pathconf;
+ vop_advlock_t *vop_advlock;
+ vop_advlockasync_t *vop_advlockasync;
+ vop_reallocblks_t *vop_reallocblks;
+ vop_getpages_t *vop_getpages;
+ vop_putpages_t *vop_putpages;
+ vop_getacl_t *vop_getacl;
+ vop_setacl_t *vop_setacl;
+ vop_aclcheck_t *vop_aclcheck;
+ vop_closeextattr_t *vop_closeextattr;
+ vop_getextattr_t *vop_getextattr;
+ vop_listextattr_t *vop_listextattr;
+ vop_openextattr_t *vop_openextattr;
+ vop_deleteextattr_t *vop_deleteextattr;
+ vop_setextattr_t *vop_setextattr;
+ vop_setlabel_t *vop_setlabel;
+ vop_vptofh_t *vop_vptofh;
+ vop_vptocnp_t *vop_vptocnp;
+};
diff --git a/rtems/freebsd/local/vnode_if_typedef.h b/rtems/freebsd/local/vnode_if_typedef.h
new file mode 100644
index 00000000..b7157011
--- /dev/null
+++ b/rtems/freebsd/local/vnode_if_typedef.h
@@ -0,0 +1,170 @@
+/*
+ * This file is produced automatically.
+ * Do not modify anything in here by hand.
+ *
+ * Created from $FreeBSD$
+ */
+
+
+struct vop_islocked_args;
+typedef int vop_islocked_t(struct vop_islocked_args *);
+
+struct vop_lookup_args;
+typedef int vop_lookup_t(struct vop_lookup_args *);
+
+struct vop_cachedlookup_args;
+typedef int vop_cachedlookup_t(struct vop_cachedlookup_args *);
+
+struct vop_create_args;
+typedef int vop_create_t(struct vop_create_args *);
+
+struct vop_whiteout_args;
+typedef int vop_whiteout_t(struct vop_whiteout_args *);
+
+struct vop_mknod_args;
+typedef int vop_mknod_t(struct vop_mknod_args *);
+
+struct vop_open_args;
+typedef int vop_open_t(struct vop_open_args *);
+
+struct vop_close_args;
+typedef int vop_close_t(struct vop_close_args *);
+
+struct vop_access_args;
+typedef int vop_access_t(struct vop_access_args *);
+
+struct vop_accessx_args;
+typedef int vop_accessx_t(struct vop_accessx_args *);
+
+struct vop_getattr_args;
+typedef int vop_getattr_t(struct vop_getattr_args *);
+
+struct vop_setattr_args;
+typedef int vop_setattr_t(struct vop_setattr_args *);
+
+struct vop_markatime_args;
+typedef int vop_markatime_t(struct vop_markatime_args *);
+
+struct vop_read_args;
+typedef int vop_read_t(struct vop_read_args *);
+
+struct vop_write_args;
+typedef int vop_write_t(struct vop_write_args *);
+
+struct vop_ioctl_args;
+typedef int vop_ioctl_t(struct vop_ioctl_args *);
+
+struct vop_poll_args;
+typedef int vop_poll_t(struct vop_poll_args *);
+
+struct vop_kqfilter_args;
+typedef int vop_kqfilter_t(struct vop_kqfilter_args *);
+
+struct vop_revoke_args;
+typedef int vop_revoke_t(struct vop_revoke_args *);
+
+struct vop_fsync_args;
+typedef int vop_fsync_t(struct vop_fsync_args *);
+
+struct vop_remove_args;
+typedef int vop_remove_t(struct vop_remove_args *);
+
+struct vop_link_args;
+typedef int vop_link_t(struct vop_link_args *);
+
+struct vop_rename_args;
+typedef int vop_rename_t(struct vop_rename_args *);
+
+struct vop_mkdir_args;
+typedef int vop_mkdir_t(struct vop_mkdir_args *);
+
+struct vop_rmdir_args;
+typedef int vop_rmdir_t(struct vop_rmdir_args *);
+
+struct vop_symlink_args;
+typedef int vop_symlink_t(struct vop_symlink_args *);
+
+struct vop_readdir_args;
+typedef int vop_readdir_t(struct vop_readdir_args *);
+
+struct vop_readlink_args;
+typedef int vop_readlink_t(struct vop_readlink_args *);
+
+struct vop_inactive_args;
+typedef int vop_inactive_t(struct vop_inactive_args *);
+
+struct vop_reclaim_args;
+typedef int vop_reclaim_t(struct vop_reclaim_args *);
+
+struct vop_lock1_args;
+typedef int vop_lock1_t(struct vop_lock1_args *);
+
+struct vop_unlock_args;
+typedef int vop_unlock_t(struct vop_unlock_args *);
+
+struct vop_bmap_args;
+typedef int vop_bmap_t(struct vop_bmap_args *);
+
+struct vop_strategy_args;
+typedef int vop_strategy_t(struct vop_strategy_args *);
+
+struct vop_getwritemount_args;
+typedef int vop_getwritemount_t(struct vop_getwritemount_args *);
+
+struct vop_print_args;
+typedef int vop_print_t(struct vop_print_args *);
+
+struct vop_pathconf_args;
+typedef int vop_pathconf_t(struct vop_pathconf_args *);
+
+struct vop_advlock_args;
+typedef int vop_advlock_t(struct vop_advlock_args *);
+
+struct vop_advlockasync_args;
+typedef int vop_advlockasync_t(struct vop_advlockasync_args *);
+
+struct vop_reallocblks_args;
+typedef int vop_reallocblks_t(struct vop_reallocblks_args *);
+
+struct vop_getpages_args;
+typedef int vop_getpages_t(struct vop_getpages_args *);
+
+struct vop_putpages_args;
+typedef int vop_putpages_t(struct vop_putpages_args *);
+
+struct vop_getacl_args;
+typedef int vop_getacl_t(struct vop_getacl_args *);
+
+struct vop_setacl_args;
+typedef int vop_setacl_t(struct vop_setacl_args *);
+
+struct vop_aclcheck_args;
+typedef int vop_aclcheck_t(struct vop_aclcheck_args *);
+
+struct vop_closeextattr_args;
+typedef int vop_closeextattr_t(struct vop_closeextattr_args *);
+
+struct vop_getextattr_args;
+typedef int vop_getextattr_t(struct vop_getextattr_args *);
+
+struct vop_listextattr_args;
+typedef int vop_listextattr_t(struct vop_listextattr_args *);
+
+struct vop_openextattr_args;
+typedef int vop_openextattr_t(struct vop_openextattr_args *);
+
+struct vop_deleteextattr_args;
+typedef int vop_deleteextattr_t(struct vop_deleteextattr_args *);
+
+struct vop_setextattr_args;
+typedef int vop_setextattr_t(struct vop_setextattr_args *);
+
+struct vop_setlabel_args;
+typedef int vop_setlabel_t(struct vop_setlabel_args *);
+
+struct vop_vptofh_args;
+typedef int vop_vptofh_t(struct vop_vptofh_args *);
+
+struct vop_vptocnp_args;
+typedef int vop_vptocnp_t(struct vop_vptocnp_args *);
+
diff --git a/rtems/freebsd/machine/_align.h b/rtems/freebsd/machine/_align.h
new file mode 100644
index 00000000..d28f51eb
--- /dev/null
+++ b/rtems/freebsd/machine/_align.h
@@ -0,0 +1,33 @@
+/**
+ * @file
+ *
+ * @ingroup rtems_bsd_machine
+ *
+ * @brief TODO.
+ */
+
+/*
+ * Copyright (c) 2009, 2010 embedded brains GmbH. All rights reserved.
+ *
+ * embedded brains GmbH
+ * Obere Lagerstr. 30
+ * 82178 Puchheim
+ * Germany
+ * <rtems@embedded-brains.de>
+ *
+ * The license and distribution terms for this file may be
+ * found in the file LICENSE in this distribution or at
+ * http://www.rtems.com/license/LICENSE.
+ */
+
+#ifndef _RTEMS_BSD_MACHINE__ALIGN_H_
+#define _RTEMS_BSD_MACHINE__ALIGN_H_
+
+#ifndef _RTEMS_BSD_MACHINE_RTEMS_BSD_CONFIG_H_
+#error "the header file <rtems/freebsd/machine/rtems-bsd-config.h> must be included first"
+#endif
+
+#define _ALIGNBYTES ((uintptr_t) (CPU_ALIGNMENT - 1))
+#define _ALIGN(p) (((uintptr_t) (p) + _ALIGNBYTES) & ~_ALIGNBYTES)
+
+#endif /* _RTEMS_BSD_MACHINE__ALIGN_H_ */
diff --git a/rtems/freebsd/machine/_bus.h b/rtems/freebsd/machine/_bus.h
new file mode 100644
index 00000000..b3025ccb
--- /dev/null
+++ b/rtems/freebsd/machine/_bus.h
@@ -0,0 +1 @@
+#include <rtems/freebsd/machine/bus.h>
diff --git a/rtems/freebsd/machine/_limits.h b/rtems/freebsd/machine/_limits.h
new file mode 100644
index 00000000..5408c470
--- /dev/null
+++ b/rtems/freebsd/machine/_limits.h
@@ -0,0 +1,30 @@
+/**
+ * @file
+ *
+ * @ingroup rtems_bsd_machine
+ *
+ * @brief TODO.
+ */
+
+/*
+ * Copyright (c) 2009, 2010 embedded brains GmbH. All rights reserved.
+ *
+ * embedded brains GmbH
+ * Obere Lagerstr. 30
+ * 82178 Puchheim
+ * Germany
+ * <rtems@embedded-brains.de>
+ *
+ * The license and distribution terms for this file may be
+ * found in the file LICENSE in this distribution or at
+ * http://www.rtems.com/license/LICENSE.
+ */
+
+#ifndef _RTEMS_BSD_MACHINE__LIMITS_H_
+#define _RTEMS_BSD_MACHINE__LIMITS_H_
+
+#ifndef _RTEMS_BSD_MACHINE_RTEMS_BSD_CONFIG_H_
+#error "the header file <rtems/freebsd/machine/rtems-bsd-config.h> must be included first"
+#endif
+
+#endif /* _RTEMS_BSD_MACHINE__LIMITS_H_ */
diff --git a/rtems/freebsd/machine/_stdint.h b/rtems/freebsd/machine/_stdint.h
new file mode 100644
index 00000000..0dd26f90
--- /dev/null
+++ b/rtems/freebsd/machine/_stdint.h
@@ -0,0 +1,30 @@
+/**
+ * @file
+ *
+ * @ingroup rtems_bsd_machine
+ *
+ * @brief TODO.
+ */
+
+/*
+ * Copyright (c) 2009, 2010 embedded brains GmbH. All rights reserved.
+ *
+ * embedded brains GmbH
+ * Obere Lagerstr. 30
+ * 82178 Puchheim
+ * Germany
+ * <rtems@embedded-brains.de>
+ *
+ * The license and distribution terms for this file may be
+ * found in the file LICENSE in this distribution or at
+ * http://www.rtems.com/license/LICENSE.
+ */
+
+#ifndef _RTEMS_BSD_MACHINE__STDINT_H_
+#define _RTEMS_BSD_MACHINE__STDINT_H_
+
+#ifndef _RTEMS_BSD_MACHINE_RTEMS_BSD_CONFIG_H_
+#error "the header file <rtems/freebsd/machine/rtems-bsd-config.h> must be included first"
+#endif
+
+#endif /* _RTEMS_BSD_MACHINE__STDINT_H_ */
diff --git a/rtems/freebsd/machine/_types.h b/rtems/freebsd/machine/_types.h
new file mode 100644
index 00000000..cb4f3e84
--- /dev/null
+++ b/rtems/freebsd/machine/_types.h
@@ -0,0 +1,30 @@
+/**
+ * @file
+ *
+ * @ingroup rtems_bsd_machine
+ *
+ * @brief TODO.
+ */
+
+/*
+ * Copyright (c) 2009, 2010 embedded brains GmbH. All rights reserved.
+ *
+ * embedded brains GmbH
+ * Obere Lagerstr. 30
+ * 82178 Puchheim
+ * Germany
+ * <rtems@embedded-brains.de>
+ *
+ * The license and distribution terms for this file may be
+ * found in the file LICENSE in this distribution or at
+ * http://www.rtems.com/license/LICENSE.
+ */
+
+#ifndef _RTEMS_BSD_MACHINE__TYPES_H_
+#define _RTEMS_BSD_MACHINE__TYPES_H_
+
+#ifndef _RTEMS_BSD_MACHINE_RTEMS_BSD_CONFIG_H_
+#error "the header file <rtems/freebsd/machine/rtems-bsd-config.h> must be included first"
+#endif
+
+#endif /* _RTEMS_BSD_MACHINE__TYPES_H_ */
diff --git a/rtems/freebsd/machine/atomic.h b/rtems/freebsd/machine/atomic.h
new file mode 100644
index 00000000..44c61842
--- /dev/null
+++ b/rtems/freebsd/machine/atomic.h
@@ -0,0 +1,367 @@
+/**
+ * @file
+ *
+ * @ingroup rtems_bsd_machine
+ *
+ * @brief TODO.
+ */
+
+/*
+ * Copyright (c) 2009, 2010 embedded brains GmbH. All rights reserved.
+ *
+ * embedded brains GmbH
+ * Obere Lagerstr. 30
+ * 82178 Puchheim
+ * Germany
+ * <rtems@embedded-brains.de>
+ *
+ * The license and distribution terms for this file may be
+ * found in the file LICENSE in this distribution or at
+ * http://www.rtems.com/license/LICENSE.
+ */
+
+#ifndef _RTEMS_BSD_MACHINE_ATOMIC_H_
+#define _RTEMS_BSD_MACHINE_ATOMIC_H_
+
+#ifndef _RTEMS_BSD_MACHINE_RTEMS_BSD_CONFIG_H_
+#error "the header file <rtems/freebsd/machine/rtems-bsd-config.h> must be included first"
+#endif
+
+#define mb() RTEMS_COMPILER_MEMORY_BARRIER()
+#define wmb() RTEMS_COMPILER_MEMORY_BARRIER()
+#define rmb() RTEMS_COMPILER_MEMORY_BARRIER()
+
+static inline void
+atomic_add_int(volatile int *p, int v)
+{
+ rtems_interrupt_level level;
+
+ rtems_interrupt_disable(level);
+ *p += v;
+ rtems_interrupt_enable(level);
+}
+
+#define atomic_add_acq_int atomic_add_int
+#define atomic_add_rel_int atomic_add_int
+
+static inline void
+atomic_subtract_int(volatile int *p, int v)
+{
+ rtems_interrupt_level level;
+
+ rtems_interrupt_disable(level);
+ *p -= v;
+ rtems_interrupt_enable(level);
+}
+
+#define atomic_subtract_acq_int atomic_subtract_int
+#define atomic_subtract_rel_int atomic_subtract_int
+
+static inline void
+atomic_set_int(volatile int *p, int v)
+{
+ rtems_interrupt_level level;
+
+ rtems_interrupt_disable(level);
+ *p |= v;
+ rtems_interrupt_enable(level);
+}
+
+#define atomic_set_acq_int atomic_set_int
+#define atomic_set_rel_int atomic_set_int
+
+static inline void
+atomic_clear_int(volatile int *p, int v)
+{
+ rtems_interrupt_level level;
+
+ rtems_interrupt_disable(level);
+ *p &= ~v;
+ rtems_interrupt_enable(level);
+}
+
+#define atomic_clear_acq_int atomic_clear_int
+#define atomic_clear_rel_int atomic_clear_int
+
+static inline int
+atomic_cmpset_int(volatile int *p, int cmp, int set)
+{
+ rtems_interrupt_level level;
+ int rv;
+
+ rtems_interrupt_disable(level);
+ rv = *p == cmp;
+ if (rv) {
+ *p = set;
+ }
+ rtems_interrupt_enable(level);
+
+ return rv;
+}
+
+#define atomic_cmpset_acq_int atomic_cmpset_int
+#define atomic_cmpset_rel_int atomic_cmpset_int
+
+static inline int
+atomic_fetchadd_int(volatile int *p, int v)
+{
+ rtems_interrupt_level level;
+ int tmp;
+
+ rtems_interrupt_disable(level);
+ tmp = *p;
+ *p += v;
+ rtems_interrupt_enable(level);
+
+ return tmp;
+}
+
+static inline int
+atomic_readandclear_int(volatile int *p)
+{
+ rtems_interrupt_level level;
+ int tmp;
+
+ rtems_interrupt_disable(level);
+ tmp = *p;
+ *p = 0;
+ rtems_interrupt_enable(level);
+
+ return tmp;
+}
+
+static inline int
+atomic_load_acq_int(volatile int *p)
+{
+ return *p;
+}
+
+static inline void
+atomic_store_rel_int(volatile int *p, int v)
+{
+ *p = v;
+}
+
+static inline void
+atomic_add_32(volatile uint32_t *p, uint32_t v)
+{
+ rtems_interrupt_level level;
+
+ rtems_interrupt_disable(level);
+ *p += v;
+ rtems_interrupt_enable(level);
+}
+
+#define atomic_add_acq_32 atomic_add_32
+#define atomic_add_rel_32 atomic_add_32
+
+static inline void
+atomic_subtract_32(volatile uint32_t *p, uint32_t v)
+{
+ rtems_interrupt_level level;
+
+ rtems_interrupt_disable(level);
+ *p -= v;
+ rtems_interrupt_enable(level);
+}
+
+#define atomic_subtract_acq_32 atomic_subtract_32
+#define atomic_subtract_rel_32 atomic_subtract_32
+
+static inline void
+atomic_set_32(volatile uint32_t *p, uint32_t v)
+{
+ rtems_interrupt_level level;
+
+ rtems_interrupt_disable(level);
+ *p |= v;
+ rtems_interrupt_enable(level);
+}
+
+#define atomic_set_acq_32 atomic_set_32
+#define atomic_set_rel_32 atomic_set_32
+
+static inline void
+atomic_clear_32(volatile uint32_t *p, uint32_t v)
+{
+ rtems_interrupt_level level;
+
+ rtems_interrupt_disable(level);
+ *p &= ~v;
+ rtems_interrupt_enable(level);
+}
+
+#define atomic_clear_acq_32 atomic_clear_32
+#define atomic_clear_rel_32 atomic_clear_32
+
+static inline int
+atomic_cmpset_32(volatile uint32_t *p, uint32_t cmp, uint32_t set)
+{
+ rtems_interrupt_level level;
+ int rv;
+
+ rtems_interrupt_disable(level);
+ rv = *p == cmp;
+ if (rv) {
+ *p = set;
+ }
+ rtems_interrupt_enable(level);
+
+ return rv;
+}
+
+#define atomic_cmpset_acq_32 atomic_cmpset_32
+#define atomic_cmpset_rel_32 atomic_cmpset_32
+
+static inline uint32_t
+atomic_fetchadd_32(volatile uint32_t *p, uint32_t v)
+{
+ rtems_interrupt_level level;
+ uint32_t tmp;
+
+ rtems_interrupt_disable(level);
+ tmp = *p;
+ *p += v;
+ rtems_interrupt_enable(level);
+
+ return tmp;
+}
+
+static inline uint32_t
+atomic_readandclear_32(volatile uint32_t *p)
+{
+ rtems_interrupt_level level;
+ uint32_t tmp;
+
+ rtems_interrupt_disable(level);
+ tmp = *p;
+ *p = 0;
+ rtems_interrupt_enable(level);
+
+ return tmp;
+}
+
+static inline uint32_t
+atomic_load_acq_32(volatile uint32_t *p)
+{
+ return *p;
+}
+
+static inline void
+atomic_store_rel_32(volatile uint32_t *p, uint32_t v)
+{
+ *p = v;
+}
+
+static inline void
+atomic_add_long(volatile long *p, long v)
+{
+ rtems_interrupt_level level;
+
+ rtems_interrupt_disable(level);
+ *p += v;
+ rtems_interrupt_enable(level);
+}
+
+#define atomic_add_acq_long atomic_add_long
+#define atomic_add_rel_long atomic_add_long
+
+static inline void
+atomic_subtract_long(volatile long *p, long v)
+{
+ rtems_interrupt_level level;
+
+ rtems_interrupt_disable(level);
+ *p -= v;
+ rtems_interrupt_enable(level);
+}
+
+#define atomic_subtract_acq_long atomic_subtract_long
+#define atomic_subtract_rel_long atomic_subtract_long
+
+static inline void
+atomic_set_long(volatile long *p, long v)
+{
+ rtems_interrupt_level level;
+
+ rtems_interrupt_disable(level);
+ *p |= v;
+ rtems_interrupt_enable(level);
+}
+
+#define atomic_set_acq_long atomic_set_long
+#define atomic_set_rel_long atomic_set_long
+
+static inline void
+atomic_clear_long(volatile long *p, long v)
+{
+ rtems_interrupt_level level;
+
+ rtems_interrupt_disable(level);
+ *p &= ~v;
+ rtems_interrupt_enable(level);
+}
+
+#define atomic_clear_acq_long atomic_clear_long
+#define atomic_clear_rel_long atomic_clear_long
+
+static inline int
+atomic_cmpset_long(volatile long *p, long cmp, long set)
+{
+ rtems_interrupt_level level;
+ int rv;
+
+ rtems_interrupt_disable(level);
+ rv = *p == cmp;
+ if (rv) {
+ *p = set;
+ }
+ rtems_interrupt_enable(level);
+
+ return rv;
+}
+
+#define atomic_cmpset_acq_long atomic_cmpset_long
+#define atomic_cmpset_rel_long atomic_cmpset_long
+
+static inline long
+atomic_fetchadd_long(volatile long *p, long v)
+{
+ rtems_interrupt_level level;
+ long tmp;
+
+ rtems_interrupt_disable(level);
+ tmp = *p;
+ *p += v;
+ rtems_interrupt_enable(level);
+
+ return tmp;
+}
+
+static inline long
+atomic_readandclear_long(volatile long *p)
+{
+ rtems_interrupt_level level;
+ long tmp;
+
+ rtems_interrupt_disable(level);
+ tmp = *p;
+ *p = 0;
+ rtems_interrupt_enable(level);
+
+ return tmp;
+}
+
+static inline long
+atomic_load_acq_long(volatile long *p)
+{
+ return *p;
+}
+
+static inline void
+atomic_store_rel_long(volatile long *p, long v)
+{
+ *p = v;
+}
+
+#endif /* _RTEMS_BSD_MACHINE_ATOMIC_H_ */
diff --git a/rtems/freebsd/machine/bus.h b/rtems/freebsd/machine/bus.h
new file mode 100644
index 00000000..3e01ce4a
--- /dev/null
+++ b/rtems/freebsd/machine/bus.h
@@ -0,0 +1,781 @@
+/**
+ * @file
+ *
+ * @ingroup rtems_bsd_machine
+ *
+ * @brief TODO.
+ *
+ * File origin from FreeBSD 'sys/amd64/include/bus.h'.
+ */
+
+/*-
+ * Copyright (c) 2009, 2010 embedded brains GmbH. All rights reserved.
+ *
+ * embedded brains GmbH
+ * Obere Lagerstr. 30
+ * 82178 Puchheim
+ * Germany
+ * <rtems@embedded-brains.de>
+ *
+ * Copyright (c) KATO Takenori, 1999.
+ *
+ * All rights reserved. Unpublished rights reserved under the copyright
+ * laws of Japan.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer as
+ * the first lines of this file unmodified.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*-
+ * Copyright (c) 1996, 1997 The NetBSD Foundation, Inc.
+ * All rights reserved.
+ *
+ * This code is derived from software contributed to The NetBSD Foundation
+ * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
+ * NASA Ames Research Center.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the NetBSD
+ * Foundation, Inc. and its contributors.
+ * 4. Neither the name of The NetBSD Foundation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
+ * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*-
+ * Copyright (c) 1996 Charles M. Hannum. All rights reserved.
+ * Copyright (c) 1996 Christopher G. Demetriou. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by Christopher G. Demetriou
+ * for the NetBSD Project.
+ * 4. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _RTEMS_BSD_MACHINE_BUS_H_
+#define _RTEMS_BSD_MACHINE_BUS_H_
+
+#ifndef _RTEMS_BSD_MACHINE_RTEMS_BSD_CONFIG_H_
+#error "the header file <rtems/freebsd/machine/rtems-bsd-config.h> must be included first"
+#endif
+
+/*
+ * Bus address alignment.
+ */
+#define BUS_SPACE_ALIGNED_POINTER(p, t) ALIGNED_POINTER(p, t)
+
+/*
+ * Bus address maxima.
+ */
+#define BUS_SPACE_MAXADDR_24BIT 0xffffffU
+#define BUS_SPACE_MAXADDR_32BIT 0xffffffffU
+#define BUS_SPACE_MAXADDR 0xffffffffU
+#define BUS_SPACE_MAXSIZE_24BIT 0xffffffU
+#define BUS_SPACE_MAXSIZE_32BIT 0xffffffffU
+#define BUS_SPACE_MAXSIZE 0xffffffffU
+
+/*
+ * Bus access.
+ */
+#define BUS_SPACE_UNRESTRICTED (~0U)
+
+/*
+ * Bus read/write barrier method.
+ */
+#define BUS_SPACE_BARRIER_READ 0x01 /* force read barrier */
+#define BUS_SPACE_BARRIER_WRITE 0x02 /* force write barrier */
+
+/*
+ * Bus address and size types
+ */
+typedef unsigned int bus_addr_t;
+typedef unsigned int bus_size_t;
+
+/*
+ * Access methods for bus resources and address space.
+ */
+typedef int bus_space_tag_t;
+typedef unsigned int bus_space_handle_t;
+
+/*
+ * Map a region of device bus space into CPU virtual address space.
+ */
+
+static __inline int
+bus_space_map(bus_space_tag_t t __unused, bus_addr_t addr,
+ bus_size_t size __unused, int flags __unused,
+ bus_space_handle_t *bshp)
+{
+ *bshp = addr;
+ return (0);
+}
+
+/*
+ * Unmap a region of device bus space.
+ */
+static __inline void
+bus_space_unmap(bus_space_tag_t bst __unused, bus_space_handle_t bsh __unused,
+ bus_size_t size __unused)
+{
+ /* Do nothing */
+}
+
+
+/*
+ * Get a new handle for a subregion of an already-mapped area of bus space.
+ */
+static __inline int
+bus_space_subregion(bus_space_tag_t bst __unused, bus_space_handle_t bsh,
+ bus_size_t ofs, bus_size_t size, bus_space_handle_t *nbshp)
+{
+ *nbshp = bsh + ofs;
+ return (0);
+}
+
+
+/*
+ * Allocate a region of memory that is accessible to devices in bus space.
+ */
+int
+bus_space_alloc(bus_space_tag_t bst __unused, bus_addr_t rstart, bus_addr_t rend,
+ bus_size_t size, bus_size_t align, bus_size_t boundary, int flags,
+ bus_addr_t *addrp, bus_space_handle_t *bshp);
+
+
+/*
+ * Free a region of bus space accessible memory.
+ */
+void
+bus_space_free(bus_space_tag_t bst __unused, bus_space_handle_t bsh, bus_size_t size);
+
+
+static __inline void
+bus_space_barrier(bus_space_tag_t bst __unused, bus_space_handle_t bsh, bus_size_t ofs,
+ bus_size_t size, int flags)
+{
+ /* Do nothing */
+}
+
+
+/*
+ * Read 1 unit of data from bus space described by the tag, handle and ofs
+ * tuple. A unit of data can be 1 byte, 2 bytes, 4 bytes or 8 bytes. The
+ * data is returned.
+ */
+static __inline uint8_t
+bus_space_read_1(bus_space_tag_t bst __unused, bus_space_handle_t bsh, bus_size_t ofs)
+{
+ uint8_t __volatile *bsp = (uint8_t __volatile *)(bsh + ofs);
+ return (*bsp);
+}
+
+static __inline uint16_t
+bus_space_read_2(bus_space_tag_t bst __unused, bus_space_handle_t bsh, bus_size_t ofs)
+{
+ uint16_t __volatile *bsp = (uint16_t __volatile *)(bsh + ofs);
+ return (*bsp);
+}
+
+static __inline uint32_t
+bus_space_read_4(bus_space_tag_t bst __unused, bus_space_handle_t bsh, bus_size_t ofs)
+{
+ uint32_t __volatile *bsp = (uint32_t __volatile *)(bsh + ofs);
+ return (*bsp);
+}
+
+static __inline uint64_t
+bus_space_read_8(bus_space_tag_t bst __unused, bus_space_handle_t bsh, bus_size_t ofs)
+{
+ uint64_t __volatile *bsp = (uint64_t __volatile *)(bsh + ofs);
+ return (*bsp);
+}
+
+
+/*
+ * Write 1 unit of data to bus space described by the tag, handle and ofs
+ * tuple. A unit of data can be 1 byte, 2 bytes, 4 bytes or 8 bytes. The
+ * data is passed by value.
+ */
+static __inline void
+bus_space_write_1(bus_space_tag_t bst __unused, bus_space_handle_t bsh, bus_size_t ofs,
+ uint8_t val)
+{
+ uint8_t __volatile *bsp = (uint8_t __volatile *)(bsh + ofs);
+ *bsp = val;
+}
+
+static __inline void
+bus_space_write_2(bus_space_tag_t bst __unused, bus_space_handle_t bsh, bus_size_t ofs,
+ uint16_t val)
+{
+ uint16_t __volatile *bsp = (uint16_t __volatile *)(bsh + ofs);
+ *bsp = val;
+}
+
+static __inline void
+bus_space_write_4(bus_space_tag_t bst __unused, bus_space_handle_t bsh, bus_size_t ofs,
+ uint32_t val)
+{
+ uint32_t __volatile *bsp = (uint32_t __volatile *)(bsh + ofs);
+ *bsp = val;
+}
+
+static __inline void
+bus_space_write_8(bus_space_tag_t bst __unused, bus_space_handle_t bsh, bus_size_t ofs,
+ uint64_t val)
+{
+ uint64_t __volatile *bsp = (uint64_t __volatile *)(bsh + ofs);
+ *bsp = val;
+}
+
+
+/*
+ * Read count units of data from bus space described by the tag, handle and
+ * ofs tuple. A unit of data can be 1 byte, 2 bytes, 4 bytes or 8 bytes. The
+ * data is returned in the buffer passed by reference.
+ */
+static __inline void
+bus_space_read_multi_1(bus_space_tag_t bst __unused, bus_space_handle_t bsh,
+ bus_size_t ofs, uint8_t *bufp, bus_size_t count)
+{
+ uint8_t __volatile *bsp = (uint8_t __volatile *)(bsh + ofs);
+ while (count-- > 0) {
+ *bufp++ = *bsp;
+ }
+}
+
+static __inline void
+bus_space_read_multi_2(bus_space_tag_t bst __unused, bus_space_handle_t bsh,
+ bus_size_t ofs, uint16_t *bufp, bus_size_t count)
+{
+ uint16_t __volatile *bsp = (uint16_t __volatile *)(bsh + ofs);
+ while (count-- > 0) {
+ *bufp++ = *bsp;
+ }
+}
+
+static __inline void
+bus_space_read_multi_4(bus_space_tag_t bst __unused, bus_space_handle_t bsh,
+ bus_size_t ofs, uint32_t *bufp, bus_size_t count)
+{
+ uint32_t __volatile *bsp = (uint32_t __volatile *)(bsh + ofs);
+ while (count-- > 0) {
+ *bufp++ = *bsp;
+ }
+}
+
+static __inline void
+bus_space_read_multi_8(bus_space_tag_t bst __unused, bus_space_handle_t bsh,
+ bus_size_t ofs, uint64_t *bufp, bus_size_t count)
+{
+ uint64_t __volatile *bsp = (uint64_t __volatile *)(bsh + ofs);
+ while (count-- > 0) {
+ *bufp++ = *bsp;
+ }
+}
+
+
+/*
+ * Write count units of data to bus space described by the tag, handle and
+ * ofs tuple. A unit of data can be 1 byte, 2 bytes, 4 bytes or 8 bytes. The
+ * data is read from the buffer passed by reference.
+ */
+static __inline void
+bus_space_write_multi_1(bus_space_tag_t bst __unused, bus_space_handle_t bsh,
+ bus_size_t ofs, const uint8_t *bufp, bus_size_t count)
+{
+ uint8_t __volatile *bsp = (uint8_t __volatile *)(bsh + ofs);
+ while (count-- > 0) {
+ *bsp = *bufp++;
+ }
+}
+
+static __inline void
+bus_space_write_multi_2(bus_space_tag_t bst __unused, bus_space_handle_t bsh,
+ bus_size_t ofs, const uint16_t *bufp, bus_size_t count)
+{
+ uint16_t __volatile *bsp = (uint16_t __volatile *)(bsh + ofs);
+ while (count-- > 0) {
+ *bsp = *bufp++;
+ }
+}
+
+static __inline void
+bus_space_write_multi_4(bus_space_tag_t bst __unused, bus_space_handle_t bsh,
+ bus_size_t ofs, const uint32_t *bufp, bus_size_t count)
+{
+ uint32_t __volatile *bsp = (uint32_t __volatile *)(bsh + ofs);
+ while (count-- > 0) {
+ *bsp = *bufp++;
+ }
+}
+
+static __inline void
+bus_space_write_multi_8(bus_space_tag_t bst __unused, bus_space_handle_t bsh,
+ bus_size_t ofs, const uint64_t *bufp, bus_size_t count)
+{
+ uint64_t __volatile *bsp = (uint64_t __volatile *)(bsh + ofs);
+ while (count-- > 0) {
+ *bsp = *bufp++;
+ }
+}
+
+
+/*
+ * Read count units of data from bus space described by the tag, handle and
+ * ofs tuple. A unit of data can be 1 byte, 2 bytes, 4 bytes or 8 bytes. The
+ * data is written to the buffer passed by reference and read from successive
+ * bus space addresses. Access is unordered.
+ */
+static __inline void
+bus_space_read_region_1(bus_space_tag_t bst __unused, bus_space_handle_t bsh,
+ bus_size_t ofs, uint8_t *bufp, bus_size_t count)
+{
+ while (count-- > 0) {
+ uint8_t __volatile *bsp = (uint8_t __volatile *)(bsh + ofs);
+ *bufp++ = *bsp;
+ ofs += 1;
+ }
+}
+
+static __inline void
+bus_space_read_region_2(bus_space_tag_t bst __unused, bus_space_handle_t bsh,
+ bus_size_t ofs, uint16_t *bufp, bus_size_t count)
+{
+ while (count-- > 0) {
+ uint16_t __volatile *bsp = (uint16_t __volatile *)(bsh + ofs);
+ *bufp++ = *bsp;
+ ofs += 2;
+ }
+}
+
+static __inline void
+bus_space_read_region_4(bus_space_tag_t bst __unused, bus_space_handle_t bsh,
+ bus_size_t ofs, uint32_t *bufp, bus_size_t count)
+{
+ while (count-- > 0) {
+ uint32_t __volatile *bsp = (uint32_t __volatile *)(bsh + ofs);
+ *bufp++ = *bsp;
+ ofs += 4;
+ }
+}
+
+static __inline void
+bus_space_read_region_8(bus_space_tag_t bst __unused, bus_space_handle_t bsh,
+ bus_size_t ofs, uint64_t *bufp, bus_size_t count)
+{
+ while (count-- > 0) {
+ uint64_t __volatile *bsp = (uint64_t __volatile *)(bsh + ofs);
+ *bufp++ = *bsp;
+ ofs += 8;
+ }
+}
+
+
+/*
+ * Write count units of data from bus space described by the tag, handle and
+ * ofs tuple. A unit of data can be 1 byte, 2 bytes, 4 bytes or 8 bytes. The
+ * data is read from the buffer passed by reference and written to successive
+ * bus space addresses. Access is unordered.
+ */
+static __inline void
+bus_space_write_region_1(bus_space_tag_t bst __unused, bus_space_handle_t bsh,
+ bus_size_t ofs, const uint8_t *bufp, bus_size_t count)
+{
+ while (count-- > 0) {
+ uint8_t __volatile *bsp = (uint8_t __volatile *)(bsh + ofs);
+ *bsp = *bufp++;
+ ofs += 1;
+ }
+}
+
+static __inline void
+bus_space_write_region_2(bus_space_tag_t bst __unused, bus_space_handle_t bsh,
+ bus_size_t ofs, const uint16_t *bufp, bus_size_t count)
+{
+ while (count-- > 0) {
+ uint16_t __volatile *bsp = (uint16_t __volatile *)(bsh + ofs);
+ *bsp = *bufp++;
+ ofs += 2;
+ }
+}
+
+static __inline void
+bus_space_write_region_4(bus_space_tag_t bst __unused, bus_space_handle_t bsh,
+ bus_size_t ofs, const uint32_t *bufp, bus_size_t count)
+{
+ while (count-- > 0) {
+ uint32_t __volatile *bsp = (uint32_t __volatile *)(bsh + ofs);
+ *bsp = *bufp++;
+ ofs += 4;
+ }
+}
+
+static __inline void
+bus_space_write_region_8(bus_space_tag_t bst __unused, bus_space_handle_t bsh,
+ bus_size_t ofs, const uint64_t *bufp, bus_size_t count)
+{
+ while (count-- > 0) {
+ uint64_t __volatile *bsp = (uint64_t __volatile *)(bsh + ofs);
+ *bsp = *bufp++;
+ ofs += 8;
+ }
+}
+
+
+/*
+ * Write count units of data from bus space described by the tag, handle and
+ * ofs tuple. A unit of data can be 1 byte, 2 bytes, 4 bytes or 8 bytes. The
+ * data is passed by value. Writes are unordered.
+ */
+static __inline void
+bus_space_set_multi_1(bus_space_tag_t bst __unused, bus_space_handle_t bsh,
+ bus_size_t ofs, uint8_t val, bus_size_t count)
+{
+ uint8_t __volatile *bsp = (uint8_t __volatile *)(bsh + ofs);
+ while (count-- > 0) {
+ *bsp = val;
+ }
+}
+
+static __inline void
+bus_space_set_multi_2(bus_space_tag_t bst __unused, bus_space_handle_t bsh,
+ bus_size_t ofs, uint16_t val, bus_size_t count)
+{
+ uint16_t __volatile *bsp = (uint16_t __volatile *)(bsh + ofs);
+ while (count-- > 0) {
+ *bsp = val;
+ }
+}
+
+static __inline void
+bus_space_set_multi_4(bus_space_tag_t bst __unused, bus_space_handle_t bsh,
+ bus_size_t ofs, uint32_t val, bus_size_t count)
+{
+ uint32_t __volatile *bsp = (uint32_t __volatile *)(bsh + ofs);
+ while (count-- > 0) {
+ *bsp = val;
+ }
+}
+
+static __inline void
+bus_space_set_multi_8(bus_space_tag_t bst __unused, bus_space_handle_t bsh,
+ bus_size_t ofs, uint64_t val, bus_size_t count)
+{
+ uint64_t __volatile *bsp = (uint64_t __volatile *)(bsh + ofs);
+ while (count-- > 0) {
+ *bsp = val;
+ }
+}
+
+
+/*
+ * Write count units of data from bus space described by the tag, handle and
+ * ofs tuple. A unit of data can be 1 byte, 2 bytes, 4 bytes or 8 bytes. The
+ * data is passed by value and written to successive bus space addresses.
+ * Writes are unordered.
+ */
+static __inline void
+bus_space_set_region_1(bus_space_tag_t bst __unused, bus_space_handle_t bsh,
+ bus_size_t ofs, uint8_t val, bus_size_t count)
+{
+ while (count-- > 0) {
+ uint8_t __volatile *bsp = (uint8_t __volatile *)(bsh + ofs);
+ *bsp = val;
+ ofs += 1;
+ }
+}
+
+static __inline void
+bus_space_set_region_2(bus_space_tag_t bst __unused, bus_space_handle_t bsh,
+ bus_size_t ofs, uint16_t val, bus_size_t count)
+{
+ while (count-- > 0) {
+ uint16_t __volatile *bsp = (uint16_t __volatile *)(bsh + ofs);
+ *bsp = val;
+ ofs += 2;
+ }
+}
+
+static __inline void
+bus_space_set_region_4(bus_space_tag_t bst __unused, bus_space_handle_t bsh,
+ bus_size_t ofs, uint32_t val, bus_size_t count)
+{
+ while (count-- > 0) {
+ uint32_t __volatile *bsp = (uint32_t __volatile *)(bsh + ofs);
+ *bsp = val;
+ ofs += 4;
+ }
+}
+
+static __inline void
+bus_space_set_region_8(bus_space_tag_t bst __unused, bus_space_handle_t bsh,
+ bus_size_t ofs, uint64_t val, bus_size_t count)
+{
+ while (count-- > 0) {
+ uint64_t __volatile *bsp = (uint64_t __volatile *)(bsh + ofs);
+ *bsp = val;
+ ofs += 8;
+ }
+}
+
+
+/*
+ * Copy count units of data from bus space described by the tag and the first
+ * handle and ofs pair to bus space described by the tag and the second handle
+ * and ofs pair. A unit of data can be 1 byte, 2 bytes, 4 bytes or 8 bytes.
+ * The data is read from successive bus space addresses and also written to
+ * successive bus space addresses. Both reads and writes are unordered.
+ */
+static __inline void
+bus_space_copy_region_1(bus_space_tag_t bst __unused, bus_space_handle_t bsh1,
+ bus_size_t ofs1, bus_space_handle_t bsh2, bus_size_t ofs2, bus_size_t count)
+{
+ bus_addr_t dst = bsh1 + ofs1;
+ bus_addr_t src = bsh2 + ofs2;
+ uint8_t __volatile *dstp = (uint8_t __volatile *) dst;
+ uint8_t __volatile *srcp = (uint8_t __volatile *) src;
+ if (dst > src) {
+ src += count - 1;
+ dst += count - 1;
+ while (count-- > 0) {
+ *dstp = *srcp;
+ src -= 1;
+ dst -= 1;
+ }
+ } else {
+ while (count-- > 0) {
+ *dstp = *srcp;
+ src += 1;
+ dst += 1;
+ }
+ }
+}
+
+static __inline void
+bus_space_copy_region_2(bus_space_tag_t bst __unused, bus_space_handle_t bsh1,
+ bus_size_t ofs1, bus_space_handle_t bsh2, bus_size_t ofs2, bus_size_t count)
+{
+ bus_addr_t dst = bsh1 + ofs1;
+ bus_addr_t src = bsh2 + ofs2;
+ uint16_t __volatile *dstp = (uint16_t __volatile *) dst;
+ uint16_t __volatile *srcp = (uint16_t __volatile *) src;
+ if (dst > src) {
+ src += (count - 1) << 1;
+ dst += (count - 1) << 1;
+ while (count-- > 0) {
+ *dstp = *srcp;
+ src -= 2;
+ dst -= 2;
+ }
+ } else {
+ while (count-- > 0) {
+ *dstp = *srcp;
+ src += 2;
+ dst += 2;
+ }
+ }
+}
+
+static __inline void
+bus_space_copy_region_4(bus_space_tag_t bst __unused, bus_space_handle_t bsh1,
+ bus_size_t ofs1, bus_space_handle_t bsh2, bus_size_t ofs2, bus_size_t count)
+{
+ bus_addr_t dst = bsh1 + ofs1;
+ bus_addr_t src = bsh2 + ofs2;
+ uint32_t __volatile *dstp = (uint32_t __volatile *) dst;
+ uint32_t __volatile *srcp = (uint32_t __volatile *) src;
+ if (dst > src) {
+ src += (count - 1) << 2;
+ dst += (count - 1) << 2;
+ while (count-- > 0) {
+ *dstp = *srcp;
+ src -= 4;
+ dst -= 4;
+ }
+ } else {
+ while (count-- > 0) {
+ *dstp = *srcp;
+ src += 4;
+ dst += 4;
+ }
+ }
+}
+
+static __inline void
+bus_space_copy_region_8(bus_space_tag_t bst __unused, bus_space_handle_t bsh1,
+ bus_size_t ofs1, bus_space_handle_t bsh2, bus_size_t ofs2, bus_size_t count)
+{
+ bus_addr_t dst = bsh1 + ofs1;
+ bus_addr_t src = bsh2 + ofs2;
+ uint64_t __volatile *dstp = (uint64_t __volatile *) dst;
+ uint64_t __volatile *srcp = (uint64_t __volatile *) src;
+ if (dst > src) {
+ src += (count - 1) << 3;
+ dst += (count - 1) << 3;
+ while (count-- > 0) {
+ *dstp = *srcp;
+ src -= 8;
+ dst -= 8;
+ }
+ } else {
+ while (count-- > 0) {
+ *dstp = *srcp;
+ src += 8;
+ dst += 8;
+ }
+ }
+}
+
+
+/*
+ * Stream accesses are the same as normal accesses on RTEMS; there are no
+ * supported bus systems with an endianess different from the host one.
+ */
+#define bus_space_read_stream_1(t, h, o) \
+ bus_space_read_1(t, h, o)
+#define bus_space_read_stream_2(t, h, o) \
+ bus_space_read_2(t, h, o)
+#define bus_space_read_stream_4(t, h, o) \
+ bus_space_read_4(t, h, o)
+#define bus_space_read_stream_8(t, h, o) \
+ bus_space_read_8(t, h, o)
+
+#define bus_space_read_multi_stream_1(t, h, o, a, c) \
+ bus_space_read_multi_1(t, h, o, a, c)
+#define bus_space_read_multi_stream_2(t, h, o, a, c) \
+ bus_space_read_multi_2(t, h, o, a, c)
+#define bus_space_read_multi_stream_4(t, h, o, a, c) \
+ bus_space_read_multi_4(t, h, o, a, c)
+#define bus_space_read_multi_stream_8(t, h, o, a, c) \
+ bus_space_read_multi_8(t, h, o, a, c)
+
+#define bus_space_write_stream_1(t, h, o, v) \
+ bus_space_write_1(t, h, o, v)
+#define bus_space_write_stream_2(t, h, o, v) \
+ bus_space_write_2(t, h, o, v)
+#define bus_space_write_stream_4(t, h, o, v) \
+ bus_space_write_4(t, h, o, v)
+#define bus_space_write_stream_8(t, h, o, v) \
+ bus_space_write_8(t, h, o, v)
+
+#define bus_space_write_multi_stream_1(t, h, o, a, c) \
+ bus_space_write_multi_1(t, h, o, a, c)
+#define bus_space_write_multi_stream_2(t, h, o, a, c) \
+ bus_space_write_multi_2(t, h, o, a, c)
+#define bus_space_write_multi_stream_4(t, h, o, a, c) \
+ bus_space_write_multi_4(t, h, o, a, c)
+#define bus_space_write_multi_stream_8(t, h, o, a, c) \
+ bus_space_write_multi_8(t, h, o, a, c)
+
+#define bus_space_set_multi_stream_1(t, h, o, v, c) \
+ bus_space_set_multi_1(t, h, o, v, c)
+#define bus_space_set_multi_stream_2(t, h, o, v, c) \
+ bus_space_set_multi_2(t, h, o, v, c)
+#define bus_space_set_multi_stream_4(t, h, o, v, c) \
+ bus_space_set_multi_4(t, h, o, v, c)
+#define bus_space_set_multi_stream_8(t, h, o, v, c) \
+ bus_space_set_multi_8(t, h, o, v, c)
+
+#define bus_space_read_region_stream_1(t, h, o, a, c) \
+ bus_space_read_region_1(t, h, o, a, c)
+#define bus_space_read_region_stream_2(t, h, o, a, c) \
+ bus_space_read_region_2(t, h, o, a, c)
+#define bus_space_read_region_stream_4(t, h, o, a, c) \
+ bus_space_read_region_4(t, h, o, a, c)
+#define bus_space_read_region_stream_8(t, h, o, a, c) \
+ bus_space_read_region_8(t, h, o, a, c)
+
+#define bus_space_write_region_stream_1(t, h, o, a, c) \
+ bus_space_write_region_1(t, h, o, a, c)
+#define bus_space_write_region_stream_2(t, h, o, a, c) \
+ bus_space_write_region_2(t, h, o, a, c)
+#define bus_space_write_region_stream_4(t, h, o, a, c) \
+ bus_space_write_region_4(t, h, o, a, c)
+#define bus_space_write_region_stream_8(t, h, o, a, c) \
+ bus_space_write_region_8(t, h, o, a, c)
+
+#define bus_space_set_region_stream_1(t, h, o, v, c) \
+ bus_space_set_region_1(t, h, o, v, c)
+#define bus_space_set_region_stream_2(t, h, o, v, c) \
+ bus_space_set_region_2(t, h, o, v, c)
+#define bus_space_set_region_stream_4(t, h, o, v, c) \
+ bus_space_set_region_4(t, h, o, v, c)
+#define bus_space_set_region_stream_8(t, h, o, v, c) \
+ bus_space_set_region_8(t, h, o, v, c)
+
+#define bus_space_copy_region_stream_1(t, h1, o1, h2, o2, c) \
+ bus_space_copy_region_1(t, h1, o1, h2, o2, c)
+#define bus_space_copy_region_stream_2(t, h1, o1, h2, o2, c) \
+ bus_space_copy_region_2(t, h1, o1, h2, o2, c)
+#define bus_space_copy_region_stream_4(t, h1, o1, h2, o2, c) \
+ bus_space_copy_region_4(t, h1, o1, h2, o2, c)
+#define bus_space_copy_region_stream_8(t, h1, o1, h2, o2, c) \
+ bus_space_copy_region_8(t, h1, o1, h2, o2, c)
+
+#include <rtems/freebsd/machine/bus_dma.h>
+
+#endif /* _RTEMS_BSD_MACHINE_BUS_H_ */
diff --git a/rtems/freebsd/machine/bus_dma.h b/rtems/freebsd/machine/bus_dma.h
new file mode 100644
index 00000000..b2629b5a
--- /dev/null
+++ b/rtems/freebsd/machine/bus_dma.h
@@ -0,0 +1,32 @@
+/**
+ * @file
+ *
+ * @ingroup rtems_bsd_machine
+ *
+ * @brief TODO.
+ */
+
+/*
+ * Copyright (c) 2009, 2010 embedded brains GmbH. All rights reserved.
+ *
+ * embedded brains GmbH
+ * Obere Lagerstr. 30
+ * 82178 Puchheim
+ * Germany
+ * <rtems@embedded-brains.de>
+ *
+ * The license and distribution terms for this file may be
+ * found in the file LICENSE in this distribution or at
+ * http://www.rtems.com/license/LICENSE.
+ */
+
+#ifndef _RTEMS_BSD_MACHINE_BUS_DMA_H_
+#define _RTEMS_BSD_MACHINE_BUS_DMA_H_
+
+#ifndef _RTEMS_BSD_MACHINE_RTEMS_BSD_CONFIG_H_
+#error "the header file <rtems/freebsd/machine/rtems-bsd-config.h> must be included first"
+#endif
+
+#include <rtems/freebsd/sys/bus_dma.h>
+
+#endif /* _RTEMS_BSD_MACHINE_BUS_DMA_H_ */
diff --git a/rtems/freebsd/machine/clock.h b/rtems/freebsd/machine/clock.h
new file mode 100644
index 00000000..4b12f3b7
--- /dev/null
+++ b/rtems/freebsd/machine/clock.h
@@ -0,0 +1,35 @@
+/**
+ * @file
+ *
+ * @ingroup rtems_bsd_machine
+ *
+ * @brief TODO.
+ */
+
+/*
+ * Copyright (c) 2009, 2010 embedded brains GmbH. All rights reserved.
+ *
+ * embedded brains GmbH
+ * Obere Lagerstr. 30
+ * 82178 Puchheim
+ * Germany
+ * <rtems@embedded-brains.de>
+ *
+ * The license and distribution terms for this file may be
+ * found in the file LICENSE in this distribution or at
+ * http://www.rtems.com/license/LICENSE.
+ */
+
+#ifndef _RTEMS_BSD_MACHINE_CLOCK_H_
+#define _RTEMS_BSD_MACHINE_CLOCK_H_
+
+#ifndef _RTEMS_BSD_MACHINE_RTEMS_BSD_CONFIG_H_
+#error "the header file <rtems/freebsd/machine/rtems-bsd-config.h> must be included first"
+#endif
+
+#ifdef _KERNEL
+
+
+#endif /* _KERNEL */
+
+#endif
diff --git a/rtems/freebsd/machine/cpu.h b/rtems/freebsd/machine/cpu.h
new file mode 100644
index 00000000..936ffd88
--- /dev/null
+++ b/rtems/freebsd/machine/cpu.h
@@ -0,0 +1 @@
+/* EMPTY */
diff --git a/rtems/freebsd/machine/cpufunc.h b/rtems/freebsd/machine/cpufunc.h
new file mode 100644
index 00000000..087a9404
--- /dev/null
+++ b/rtems/freebsd/machine/cpufunc.h
@@ -0,0 +1,30 @@
+/**
+ * @file
+ *
+ * @ingroup rtems_bsd_machine
+ *
+ * @brief TODO.
+ */
+
+/*
+ * Copyright (c) 2009, 2010 embedded brains GmbH. All rights reserved.
+ *
+ * embedded brains GmbH
+ * Obere Lagerstr. 30
+ * 82178 Puchheim
+ * Germany
+ * <rtems@embedded-brains.de>
+ *
+ * The license and distribution terms for this file may be
+ * found in the file LICENSE in this distribution or at
+ * http://www.rtems.com/license/LICENSE.
+ */
+
+#ifndef _RTEMS_BSD_MACHINE_CPUFUNC_H_
+#define _RTEMS_BSD_MACHINE_CPUFUNC_H_
+
+#ifndef _RTEMS_BSD_MACHINE_RTEMS_BSD_CONFIG_H_
+#error "the header file <rtems/freebsd/machine/rtems-bsd-config.h> must be included first"
+#endif
+
+#endif /* _RTEMS_BSD_MACHINE_CPUFUNC_H_ */
diff --git a/rtems/freebsd/machine/elf.h b/rtems/freebsd/machine/elf.h
new file mode 100644
index 00000000..936ffd88
--- /dev/null
+++ b/rtems/freebsd/machine/elf.h
@@ -0,0 +1 @@
+/* EMPTY */
diff --git a/rtems/freebsd/machine/endian.h b/rtems/freebsd/machine/endian.h
new file mode 100644
index 00000000..89cc9930
--- /dev/null
+++ b/rtems/freebsd/machine/endian.h
@@ -0,0 +1,48 @@
+/**
+ * @file
+ *
+ * @ingroup rtems_bsd_machine
+ *
+ * @brief TODO.
+ */
+
+/*
+ * Copyright (c) 2009, 2010 embedded brains GmbH. All rights reserved.
+ *
+ * embedded brains GmbH
+ * Obere Lagerstr. 30
+ * 82178 Puchheim
+ * Germany
+ * <rtems@embedded-brains.de>
+ *
+ * The license and distribution terms for this file may be
+ * found in the file LICENSE in this distribution or at
+ * http://www.rtems.com/license/LICENSE.
+ */
+
+#ifndef _RTEMS_BSD_MACHINE_ENDIAN_H
+#define _RTEMS_BSD_MACHINE_ENDIAN_H
+
+#ifndef _RTEMS_BSD_MACHINE_RTEMS_BSD_CONFIG_H_
+#error "the header file <rtems/freebsd/machine/rtems-bsd-config.h> must be included first"
+#endif
+
+#include <rtems/endian.h>
+
+#if CPU_BIG_ENDIAN
+# define _BYTE_ORDER _BIG_ENDIAN
+#elif CPU_LITTLE_ENDIAN
+# define _BYTE_ORDER _LITTLE_ENDIAN
+#else
+# error "undefined endian"
+#endif
+
+#define __bswap16(x) CPU_swap_u16(x)
+#define __bswap32(x) CPU_swap_u32(x)
+
+#define __htonl(x) __bswap32(x)
+#define __htons(x) __bswap16(x)
+#define __ntohl(x) __bswap32(x)
+#define __ntohs(x) __bswap16(x)
+
+#endif /* _RTEMS_BSD_MACHINE_ENDIAN_H */
diff --git a/rtems/freebsd/machine/in_cksum.h b/rtems/freebsd/machine/in_cksum.h
new file mode 100644
index 00000000..37d88e2e
--- /dev/null
+++ b/rtems/freebsd/machine/in_cksum.h
@@ -0,0 +1,77 @@
+/*-
+ * Copyright (c) 1990 The Regents of the University of California.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * from tahoe: in_cksum.c 1.2 86/01/05
+ * from: @(#)in_cksum.c 1.3 (Berkeley) 1/19/91
+ * from: Id: in_cksum.c,v 1.8 1995/12/03 18:35:19 bde Exp
+ * from: src/sys/alpha/include/in_cksum.h,v 1.7 2005/03/02 21:33:20 joerg
+ * $FreeBSD$
+ */
+
+#ifndef _MACHINE_IN_CKSUM_H_
+#define _MACHINE_IN_CKSUM_H_ 1
+
+#include <sys/cdefs.h>
+
+#define in_cksum(m, len) in_cksum_skip(m, len, 0)
+
+/*
+ * It it useful to have an Internet checksum routine which is inlineable
+ * and optimized specifically for the task of computing IP header checksums
+ * in the normal case (where there are no options and the header length is
+ * therefore always exactly five 32-bit words.
+ */
+#ifdef __CC_SUPPORTS___INLINE
+
+static __inline void
+in_cksum_update(struct ip *ip)
+{
+ int __tmpsum;
+ __tmpsum = (int)ntohs(ip->ip_sum) + 256;
+ ip->ip_sum = htons(__tmpsum + (__tmpsum >> 16));
+}
+
+#else
+
+#define in_cksum_update(ip) \
+ do { \
+ int __tmpsum; \
+ __tmpsum = (int)ntohs(ip->ip_sum) + 256; \
+ ip->ip_sum = htons(__tmpsum + (__tmpsum >> 16)); \
+ } while(0)
+
+#endif
+
+#ifdef _KERNEL
+u_int in_cksum_hdr(const struct ip *ip);
+u_short in_addword(u_short sum, u_short b);
+u_short in_pseudo(u_int sum, u_int b, u_int c);
+u_short in_cksum_skip(struct mbuf *m, int len, int skip);
+#endif
+
+#endif /* _MACHINE_IN_CKSUM_H_ */
diff --git a/rtems/freebsd/machine/mutex.h b/rtems/freebsd/machine/mutex.h
new file mode 100644
index 00000000..45d2fc47
--- /dev/null
+++ b/rtems/freebsd/machine/mutex.h
@@ -0,0 +1,30 @@
+/**
+ * @file
+ *
+ * @ingroup rtems_bsd_machine
+ *
+ * @brief TODO.
+ */
+
+/*
+ * Copyright (c) 2009, 2010 embedded brains GmbH. All rights reserved.
+ *
+ * embedded brains GmbH
+ * Obere Lagerstr. 30
+ * 82178 Puchheim
+ * Germany
+ * <rtems@embedded-brains.de>
+ *
+ * The license and distribution terms for this file may be
+ * found in the file LICENSE in this distribution or at
+ * http://www.rtems.com/license/LICENSE.
+ */
+
+#ifndef _RTEMS_BSD_MACHINE_MUTEX_H_
+#define _RTEMS_BSD_MACHINE_MUTEX_H_
+
+#ifndef _RTEMS_BSD_MACHINE_RTEMS_BSD_CONFIG_H_
+#error "the header file <rtems/freebsd/machine/rtems-bsd-config.h> must be included first"
+#endif
+
+#endif /* _RTEMS_BSD_MACHINE_MUTEX_H_ */
diff --git a/rtems/freebsd/machine/param.h b/rtems/freebsd/machine/param.h
new file mode 100644
index 00000000..cf621e6d
--- /dev/null
+++ b/rtems/freebsd/machine/param.h
@@ -0,0 +1,42 @@
+/**
+ * @file
+ *
+ * @ingroup rtems_bsd_machine
+ *
+ * @brief TODO.
+ */
+
+/*
+ * Copyright (c) 2009, 2010 embedded brains GmbH. All rights reserved.
+ *
+ * embedded brains GmbH
+ * Obere Lagerstr. 30
+ * 82178 Puchheim
+ * Germany
+ * <rtems@embedded-brains.de>
+ *
+ * The license and distribution terms for this file may be
+ * found in the file LICENSE in this distribution or at
+ * http://www.rtems.com/license/LICENSE.
+ */
+
+#ifndef _RTEMS_BSD_MACHINE_PARAM_H_
+#define _RTEMS_BSD_MACHINE_PARAM_H_
+
+#ifndef _RTEMS_BSD_MACHINE_RTEMS_BSD_CONFIG_H_
+#error "the header file <rtems/freebsd/machine/rtems-bsd-config.h> must be included first"
+#endif
+
+#include <machine/param.h>
+
+#define MAXCPU 1
+
+#define CACHE_LINE_SHIFT 7
+
+#define CACHE_LINE_SIZE (1 << CACHE_LINE_SHIFT)
+
+#define MAXPAGESIZES 1 /* maximum number of supported page sizes */
+
+#define MACHINE_ARCH "rtems"
+
+#endif /* _RTEMS_BSD_MACHINE_PARAM_H_ */
diff --git a/rtems/freebsd/machine/pcpu.h b/rtems/freebsd/machine/pcpu.h
new file mode 100644
index 00000000..42c9de84
--- /dev/null
+++ b/rtems/freebsd/machine/pcpu.h
@@ -0,0 +1,41 @@
+/**
+ * @file
+ *
+ * @ingroup rtems_bsd_machine
+ *
+ * @brief TODO.
+ */
+
+/*
+ * Copyright (c) 2009, 2010 embedded brains GmbH. All rights reserved.
+ *
+ * embedded brains GmbH
+ * Obere Lagerstr. 30
+ * 82178 Puchheim
+ * Germany
+ * <rtems@embedded-brains.de>
+ *
+ * The license and distribution terms for this file may be
+ * found in the file LICENSE in this distribution or at
+ * http://www.rtems.com/license/LICENSE.
+ */
+
+#ifndef _RTEMS_BSD_MACHINE_PCPU_H_
+#define _RTEMS_BSD_MACHINE_PCPU_H_
+
+#ifndef _RTEMS_BSD_MACHINE_RTEMS_BSD_CONFIG_H_
+#error "the header file <rtems/freebsd/machine/rtems-bsd-config.h> must be included first"
+#endif
+
+#define curthread (( struct thread * )(( RTEMS_API_Control * )_Thread_Executing->API_Extensions[THREAD_API_RTEMS] )->Notepads[RTEMS_NOTEPAD_0] )
+
+extern struct pcpu *pcpup;
+
+#define PCPU_MD_FIELDS
+#define PCPU_GET(member) (0)
+#define PCPU_SET(member, val)
+
+//#define PCPU_GET(member) (pcpup->pc_ ## member)
+//#define PCPU_SET(member, val) (pcpup->pc_ ## member = (val))
+
+#endif /* _RTEMS_BSD_MACHINE_PCPU_H_ */
diff --git a/rtems/freebsd/machine/proc.h b/rtems/freebsd/machine/proc.h
new file mode 100644
index 00000000..d1332ac6
--- /dev/null
+++ b/rtems/freebsd/machine/proc.h
@@ -0,0 +1,38 @@
+/**
+ * @file
+ *
+ * @ingroup rtems_bsd_machine
+ *
+ * @brief TODO.
+ */
+
+/*
+ * Copyright (c) 2009, 2010 embedded brains GmbH. All rights reserved.
+ *
+ * embedded brains GmbH
+ * Obere Lagerstr. 30
+ * 82178 Puchheim
+ * Germany
+ * <rtems@embedded-brains.de>
+ *
+ * The license and distribution terms for this file may be
+ * found in the file LICENSE in this distribution or at
+ * http://www.rtems.com/license/LICENSE.
+ */
+
+#ifndef _RTEMS_BSD_MACHINE_PROC_H_
+#define _RTEMS_BSD_MACHINE_PROC_H_
+
+#ifndef _RTEMS_BSD_MACHINE_RTEMS_BSD_CONFIG_H_
+#error "the header file <rtems/freebsd/machine/rtems-bsd-config.h> must be included first"
+#endif
+
+struct mdthread {
+ int dummy;
+};
+
+struct mdproc {
+ int dummy;
+};
+
+#endif /* _RTEMS_BSD_MACHINE_PROC_H_ */
diff --git a/rtems/freebsd/machine/resource.h b/rtems/freebsd/machine/resource.h
new file mode 100644
index 00000000..93220dcf
--- /dev/null
+++ b/rtems/freebsd/machine/resource.h
@@ -0,0 +1,30 @@
+/**
+ * @file
+ *
+ * @ingroup rtems_bsd_machine
+ *
+ * @brief TODO.
+ */
+
+/*
+ * Copyright (c) 2009, 2010 embedded brains GmbH. All rights reserved.
+ *
+ * embedded brains GmbH
+ * Obere Lagerstr. 30
+ * 82178 Puchheim
+ * Germany
+ * <rtems@embedded-brains.de>
+ *
+ * The license and distribution terms for this file may be
+ * found in the file LICENSE in this distribution or at
+ * http://www.rtems.com/license/LICENSE.
+ */
+
+#ifndef _RTEMS_BSD_MACHINE_RESOURCE_H_
+#define _RTEMS_BSD_MACHINE_RESOURCE_H_
+
+#ifndef _RTEMS_BSD_MACHINE_RTEMS_BSD_CONFIG_H_
+#error "the header file <rtems/freebsd/machine/rtems-bsd-config.h> must be included first"
+#endif
+
+#endif /* _RTEMS_BSD_MACHINE_RESOURCE_H_ */
diff --git a/rtems/freebsd/machine/rtems-bsd-cache.h b/rtems/freebsd/machine/rtems-bsd-cache.h
new file mode 100644
index 00000000..77cc0794
--- /dev/null
+++ b/rtems/freebsd/machine/rtems-bsd-cache.h
@@ -0,0 +1,37 @@
+/**
+ * @file
+ *
+ * @ingroup rtems_bsd_machine
+ *
+ * @brief TODO.
+ */
+
+/*
+ * Copyright (c) 2010 embedded brains GmbH. All rights reserved.
+ *
+ * embedded brains GmbH
+ * Obere Lagerstr. 30
+ * 82178 Puchheim
+ * Germany
+ * <rtems@embedded-brains.de>
+ *
+ * The license and distribution terms for this file may be
+ * found in the file LICENSE in this distribution or at
+ * http://www.rtems.com/license/LICENSE.
+ */
+
+#ifndef _RTEMS_BSD_MACHINE_RTEMS_BSD_CACHE_H_
+#define _RTEMS_BSD_MACHINE_RTEMS_BSD_CACHE_H_
+
+#include <bsp.h>
+
+#if defined(LIBBSP_ARM_LPC24XX_BSP_H)
+ /* No cache */
+#elif defined(LIBBSP_ARM_LPC32XX_BSP_H)
+ /* With cache, no coherency support in hardware */
+ #include <libcpu/cache.h>
+#elif defined(__GEN83xx_BSP_h)
+ /* With cache, coherency support in hardware */
+#endif
+
+#endif /* _RTEMS_BSD_MACHINE_RTEMS_BSD_CACHE_H_ */
diff --git a/rtems/freebsd/machine/rtems-bsd-config.h b/rtems/freebsd/machine/rtems-bsd-config.h
new file mode 100644
index 00000000..34562b11
--- /dev/null
+++ b/rtems/freebsd/machine/rtems-bsd-config.h
@@ -0,0 +1,255 @@
+/**
+ * @file
+ *
+ * @ingroup rtems_bsd_machine
+ *
+ * @brief TODO.
+ */
+
+/*
+ * Copyright (c) 2009, 2010 embedded brains GmbH. All rights reserved.
+ *
+ * embedded brains GmbH
+ * Obere Lagerstr. 30
+ * 82178 Puchheim
+ * Germany
+ * <rtems@embedded-brains.de>
+ *
+ * The license and distribution terms for this file may be
+ * found in the file LICENSE in this distribution or at
+ * http://www.rtems.com/license/LICENSE.
+ */
+
+#ifndef _RTEMS_BSD_MACHINE_RTEMS_BSD_CONFIG_H_
+#define _RTEMS_BSD_MACHINE_RTEMS_BSD_CONFIG_H_
+
+/* We compile for RTEMS and FreeBSD */
+#define __rtems__ 1
+#define __FreeBSD__ 1
+#define __BSD_VISIBLE 1
+
+/* Disable procedure call definitions */
+#undef __P
+#undef __strong_reference
+
+#ifndef _RTEMS_BSD_BSD_HH_
+/* General defines to activate BSD kernel parts */
+#define _KERNEL 1
+
+/* Disable standard system headers */
+#undef _SYS_UNISTD_H
+#define _SYS_UNISTD_H 1
+#undef _SYS_FEATURES_H
+#define _SYS_FEATURES_H 1
+#undef _SYS_TTYCOM_H_
+#define _SYS_TTYCOM_H_ 1
+
+/* Disable procedure call definitions */
+#undef __P
+#undef __strong_reference
+
+/* Disable some quirks in the standard headers */
+#define _POSIX_SOURCE 1
+
+/* We need POSIX threads */
+#define _POSIX_THREADS 1
+
+#endif /* !_RTEMS_BSD_BSD_HH_ */
+
+/* Type set from the C standard */
+#include <stdarg.h>
+#include <stddef.h>
+#include <stdint.h>
+#include <limits.h>
+#include <string.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <errno.h>
+#include <sys/types.h>
+
+/* Ensure that we are RTEMS compatible and can use RTEMS */
+#include <rtems.h>
+#include <rtems/error.h>
+#include <rtems/chain.h>
+#include <rtems/libio.h>
+
+#ifndef _RTEMS_BSD_BSD_HH_
+/* Ensure that we can use POSIX threads */
+#include <pthread.h>
+
+/* The BSD kernel is not a POSIX source */
+#undef _POSIX_SOURCE
+
+/* Symbol undefines */
+#undef MAXPATHLEN
+
+#endif /* !_RTEMS_BSD_BSD_HH_ */
+
+/* Networking */
+#define IPSEC 1
+#define INET 1
+#define INET6 1
+#define TCP_SIGNATURE 1
+
+/* Integer type definitions */
+
+#define __INT_MAX INT_MAX
+#define GID_MAX UINT_MAX /* max value for a gid_t */
+#define UID_MAX UINT_MAX /* max value for a uid_t */
+
+#define __int8_t int8_t
+#define __int16_t int16_t
+#define __int32_t int32_t
+#define __int64_t int64_t
+
+#define __int_least8_t int_least8_t
+#define __int_least16_t int_least16_t
+#define __int_least32_t int_least32_t
+#define __int_least64_t int_least64_t
+
+#define __int_fast8_t int_fast8_t
+#define __int_fast16_t int_fast16_t
+#define __int_fast32_t int_fast32_t
+#define __int_fast64_t int_fast64_t
+
+#define __uint8_t uint8_t
+#define __uint16_t uint16_t
+#define __uint32_t uint32_t
+#define __uint64_t uint64_t
+
+#define __uint_least8_t uint_least8_t
+#define __uint_least16_t uint_least16_t
+#define __uint_least32_t uint_least32_t
+#define __uint_least64_t uint_least64_t
+
+#define __uint_fast8_t uint_fast8_t
+#define __uint_fast16_t uint_fast16_t
+#define __uint_fast32_t uint_fast32_t
+#define __uint_fast64_t uint_fast64_t
+
+#define __intfptr_t intptr_t
+#define __uintfptr_t uintptr_t
+
+#define __intptr_t intptr_t
+#define __uintptr_t uintptr_t
+
+#define __intmax_t intmax_t
+#define __uintmax_t uintmax_t
+
+#define __register_t int
+#define __u_register_t unsigned int
+
+#define __float_t float
+#define __double_t double
+
+#define __vm_offset_t uintptr_t
+#define __vm_ooffset_t uint64_t
+#define __vm_paddr_t uintptr_t
+#define __vm_pindex_t uint64_t
+#define __vm_size_t uintptr_t
+
+#define __clock_t clock_t
+#define __cpumask_t unsigned int
+#define __critical_t intptr_t
+#define __ptrdiff_t ptrdiff_t
+#define __segsz_t intptr_t
+#define __time_t time_t
+
+#define __va_list va_list
+
+#undef __size_t
+#define __size_t size_t
+
+#define _CLOCKID_T_DECLARED 1
+#define _DEV_T_DECLARED 1
+#define _GID_T_DECLARED 1
+#define _MODE_T_DECLARED 1
+#define _OFF_T_DECLARED 1
+#define _PID_T_DECLARED 1
+#define _SSIZE_T_DECLARED 1
+#define _TIMER_T_DECLARED 1
+#define _TIME_T_DECLARED 1
+#define _UID_T_DECLARED 1
+#define _USECONDS_T_DECLARED 1
+#define _FSBLKCNT_T_DECLARED 1
+#define _BLKSIZE_T_DECLARED 1
+#define _BLKCNT_T_DECLARED 1
+
+#define __dev_t _bsd_dev_t
+#define __gid_t _bsd_gid_t
+#define __off_t _bsd_off_t
+#define __pid_t _bsd_pid_t
+#define __uid_t _bsd_uid_t
+
+/* Missing error number */
+//#define ENOIOCTL EINVAL
+
+#ifndef _RTEMS_BSD_BSD_HH_
+/* Symbol rename */
+
+#include <rtems/freebsd/machine/rtems-bsd-symbols.h>
+
+#define gets _bsd_gets
+#define realloc _bsd_realloc
+#define reallocf _bsd_reallocf
+#define setenv _bsd_setenv
+#define abs _bsd_abs
+#define labs _bsd_labs
+
+#define ticks _Watchdog_Ticks_since_boot
+
+/* Debug */
+
+void rtems_bsd_assert_func(const char *file, int line, const char *func, const char *expr);
+
+#define BSD_PRINTF(fmt, ...) printf("%s: " fmt, __func__, ##__VA_ARGS__)
+
+#define BSD_PANIC(fmt, ...) panic("%s: " fmt "\n", __func__, ##__VA_ARGS__)
+
+#ifdef NDEBUG
+# define BSD_ASSERT(expr) ((void) 0)
+#else
+# define BSD_ASSERT(expr) ((expr) ? (void) 0 : rtems_bsd_assert_func(__FILE__, __LINE__, __func__, #expr))
+#endif
+
+#define BSD_ASSERT_SC(sc) BSD_ASSERT((sc) == RTEMS_SUCCESSFUL)
+
+#define BSD_ASSERT_RV(rv) BSD_ASSERT((rv) == 0)
+
+/* General definitions */
+
+#define BSD_TASK_PRIORITY_NORMAL 120
+
+#define BSD_TASK_PRIORITY_TIMER 110
+
+#define BSD_TASK_PRIORITY_INTERRUPT 100
+
+#define BSD_TASK_PRIORITY_RESOURCE_OWNER 100
+
+/* FIXME */
+#define BSD_MINIMUM_TASK_STACK_SIZE ((size_t) 32 * 1024)
+
+#define M_RTEMS_HEAP 0
+
+#define BSD_MAXIMUM_SLEEP_QUEUES 32
+
+extern rtems_chain_control rtems_bsd_lock_chain;
+
+extern rtems_chain_control rtems_bsd_mtx_chain;
+
+extern rtems_chain_control rtems_bsd_sx_chain;
+
+extern rtems_chain_control rtems_bsd_condvar_chain;
+
+extern rtems_chain_control rtems_bsd_callout_chain;
+
+extern rtems_chain_control rtems_bsd_thread_chain;
+
+extern rtems_chain_control rtems_bsd_malloc_chain;
+
+/* CPU definitions */
+#define cpu_spinwait() /* nothing */
+
+#endif /* !_RTEMS_BSD_BSD_HH_ */
+
+#endif /* _RTEMS_BSD_MACHINE_RTEMS_BSD_CONFIG_H_ */
diff --git a/rtems/freebsd/machine/rtems-bsd-select.h b/rtems/freebsd/machine/rtems-bsd-select.h
new file mode 100644
index 00000000..368c285b
--- /dev/null
+++ b/rtems/freebsd/machine/rtems-bsd-select.h
@@ -0,0 +1,76 @@
+/*-
+ * Copyright (c) 1992, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the University of
+ * California, Berkeley and its contributors.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * @(#)select.h 8.2 (Berkeley) 1/4/94
+ * $Id: select.h,v 1.7 2010/04/02 07:39:34 ralf Exp $
+ */
+
+#ifndef _RTEMS_BSD_MACHINE_RTEMS_BSD_SELECT_H_
+#define _RTEMS_BSD_MACHINE_RTEMS_BSD_SELECT_H_
+
+#ifndef _KERNEL
+
+#include <sys/time.h> /* struct timeval */
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/*
+ * Used to maintain information about processes that wish to be
+ * notified when I/O becomes possible.
+ */
+#if 0
+struct selinfo {
+ pid_t si_pid; /* process to be notified */
+ short si_flags; /* see below */
+};
+#endif
+#define SI_COLL 0x0001 /* collision occurred */
+
+/* Check the first NFDS descriptors each in READFDS (if not NULL) for read
+ readiness, in WRITEFDS (if not NULL) for write readiness, and in EXCEPTFDS
+ (if not NULL) for exceptional conditions. If TIMEOUT is not NULL, time out
+ after waiting the interval specified therein. Returns the number of ready
+ descriptors, or -1 for errors. */
+extern int select (int __nfds, fd_set *__readfds,
+ fd_set *__writefds,
+ fd_set *__exceptfds,
+ struct timeval *__timeout);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* !_KERNEL */
+
+#endif /* !_RTEMS_BSD_MACHINE_RTEMS_BSD_SELECT_H_ */
diff --git a/rtems/freebsd/machine/rtems-bsd-symbols.h b/rtems/freebsd/machine/rtems-bsd-symbols.h
new file mode 100644
index 00000000..9cf8da52
--- /dev/null
+++ b/rtems/freebsd/machine/rtems-bsd-symbols.h
@@ -0,0 +1,561 @@
+/**
+ * @file
+ *
+ * @ingroup rtems_bsd_machine
+ *
+ * @brief TODO.
+ */
+
+/*
+ * Copyright (c) 2009, 2010 embedded brains GmbH. All rights reserved.
+ *
+ * embedded brains GmbH
+ * Obere Lagerstr. 30
+ * 82178 Puchheim
+ * Germany
+ * <rtems@embedded-brains.de>
+ *
+ * The license and distribution terms for this file may be
+ * found in the file LICENSE in this distribution or at
+ * http://www.rtems.com/license/LICENSE.
+ */
+
+#ifndef _RTEMS_BSD_MACHINE_RTEMS_BSD_SYMBOLS_H_
+#define _RTEMS_BSD_MACHINE_RTEMS_BSD_SYMBOLS_H_
+
+#define bus_activate_resource _bsd_bus_activate_resource
+#define bus_activate_resource_desc _bsd_bus_activate_resource_desc
+#define bus_activate_resource_method_default _bsd_bus_activate_resource_method_default
+#define bus_add_child_desc _bsd_bus_add_child_desc
+#define bus_add_child_method_default _bsd_bus_add_child_method_default
+#define bus_alloc_resource _bsd_bus_alloc_resource
+#define bus_alloc_resource_desc _bsd_bus_alloc_resource_desc
+#define bus_alloc_resource_method_default _bsd_bus_alloc_resource_method_default
+#define bus_alloc_resources _bsd_bus_alloc_resources
+#define bus_bind_intr _bsd_bus_bind_intr
+#define bus_bind_intr_desc _bsd_bus_bind_intr_desc
+#define bus_bind_intr_method_default _bsd_bus_bind_intr_method_default
+#define bus_child_detached_desc _bsd_bus_child_detached_desc
+#define bus_child_detached_method_default _bsd_bus_child_detached_method_default
+#define bus_child_location_str _bsd_bus_child_location_str
+#define bus_child_location_str_desc _bsd_bus_child_location_str_desc
+#define bus_child_location_str_method_default _bsd_bus_child_location_str_method_default
+#define bus_child_pnpinfo_str _bsd_bus_child_pnpinfo_str
+#define bus_child_pnpinfo_str_desc _bsd_bus_child_pnpinfo_str_desc
+#define bus_child_pnpinfo_str_method_default _bsd_bus_child_pnpinfo_str_method_default
+#define bus_child_present _bsd_bus_child_present
+#define bus_child_present_desc _bsd_bus_child_present_desc
+#define bus_child_present_method_default _bsd_bus_child_present_method_default
+#define bus_config_intr_desc _bsd_bus_config_intr_desc
+#define bus_config_intr_method_default _bsd_bus_config_intr_method_default
+#define bus_current_pass _bsd_bus_current_pass
+#define bus_data_generation_check _bsd_bus_data_generation_check
+#define bus_data_generation_update _bsd_bus_data_generation_update
+#define bus_deactivate_resource _bsd_bus_deactivate_resource
+#define bus_deactivate_resource_desc _bsd_bus_deactivate_resource_desc
+#define bus_deactivate_resource_method_default _bsd_bus_deactivate_resource_method_default
+#define bus_delete_resource _bsd_bus_delete_resource
+#define bus_delete_resource_desc _bsd_bus_delete_resource_desc
+#define bus_delete_resource_method_default _bsd_bus_delete_resource_method_default
+#define bus_describe_intr _bsd_bus_describe_intr
+#define bus_describe_intr_desc _bsd_bus_describe_intr_desc
+#define bus_describe_intr_method_default _bsd_bus_describe_intr_method_default
+#define busdma_lock_mutex _bsd_busdma_lock_mutex
+#define bus_dmamap_create _bsd_bus_dmamap_create
+#define bus_dmamap_destroy _bsd_bus_dmamap_destroy
+#define bus_dmamap_load _bsd_bus_dmamap_load
+#define _bus_dmamap_sync _bsd__bus_dmamap_sync
+#define _bus_dmamap_unload _bsd__bus_dmamap_unload
+#define bus_dmamem_alloc _bsd_bus_dmamem_alloc
+#define bus_dmamem_free _bsd_bus_dmamem_free
+#define bus_dma_tag_create _bsd_bus_dma_tag_create
+#define bus_dma_tag_destroy _bsd_bus_dma_tag_destroy
+#define bus_driver_added_desc _bsd_bus_driver_added_desc
+#define bus_driver_added_method_default _bsd_bus_driver_added_method_default
+#define bus_generic_add_child _bsd_bus_generic_add_child
+#define bus_generic_attach _bsd_bus_generic_attach
+#define bus_generic_bind_intr _bsd_bus_generic_bind_intr
+#define bus_generic_child_present _bsd_bus_generic_child_present
+#define bus_generic_config_intr _bsd_bus_generic_config_intr
+#define bus_generic_describe_intr _bsd_bus_generic_describe_intr
+#define bus_generic_detach _bsd_bus_generic_detach
+#define bus_generic_driver_added _bsd_bus_generic_driver_added
+#define bus_generic_get_dma_tag _bsd_bus_generic_get_dma_tag
+#define bus_generic_get_resource_list _bsd_bus_generic_get_resource_list
+#define bus_generic_new_pass _bsd_bus_generic_new_pass
+#define bus_generic_print_child _bsd_bus_generic_print_child
+#define bus_generic_probe _bsd_bus_generic_probe
+#define bus_generic_read_ivar _bsd_bus_generic_read_ivar
+#define bus_generic_resume _bsd_bus_generic_resume
+#define bus_generic_setup_intr _bsd_bus_generic_setup_intr
+#define bus_generic_shutdown _bsd_bus_generic_shutdown
+#define bus_generic_suspend _bsd_bus_generic_suspend
+#define bus_generic_teardown_intr _bsd_bus_generic_teardown_intr
+#define bus_generic_write_ivar _bsd_bus_generic_write_ivar
+#define bus_get_dma_tag _bsd_bus_get_dma_tag
+#define bus_get_dma_tag_desc _bsd_bus_get_dma_tag_desc
+#define bus_get_dma_tag_method_default _bsd_bus_get_dma_tag_method_default
+#define bus_get_resource _bsd_bus_get_resource
+#define bus_get_resource_count _bsd_bus_get_resource_count
+#define bus_get_resource_desc _bsd_bus_get_resource_desc
+#define bus_get_resource_list_desc _bsd_bus_get_resource_list_desc
+#define bus_get_resource_list_method_default _bsd_bus_get_resource_list_method_default
+#define bus_get_resource_method_default _bsd_bus_get_resource_method_default
+#define bus_get_resource_start _bsd_bus_get_resource_start
+#define bus_hint_device_unit_desc _bsd_bus_hint_device_unit_desc
+#define bus_hint_device_unit_method_default _bsd_bus_hint_device_unit_method_default
+#define bus_hinted_child_desc _bsd_bus_hinted_child_desc
+#define bus_hinted_child_method_default _bsd_bus_hinted_child_method_default
+#define bus_new_pass_desc _bsd_bus_new_pass_desc
+#define bus_new_pass_method_default _bsd_bus_new_pass_method_default
+#define bus_print_child_desc _bsd_bus_print_child_desc
+#define bus_print_child_footer _bsd_bus_print_child_footer
+#define bus_print_child_header _bsd_bus_print_child_header
+#define bus_print_child_method_default _bsd_bus_print_child_method_default
+#define bus_probe_nomatch_desc _bsd_bus_probe_nomatch_desc
+#define bus_probe_nomatch_method_default _bsd_bus_probe_nomatch_method_default
+#define bus_read_ivar_desc _bsd_bus_read_ivar_desc
+#define bus_read_ivar_method_default _bsd_bus_read_ivar_method_default
+#define bus_release_resource _bsd_bus_release_resource
+#define bus_release_resource_desc _bsd_bus_release_resource_desc
+#define bus_release_resource_method_default _bsd_bus_release_resource_method_default
+#define bus_release_resources _bsd_bus_release_resources
+#define bus_remap_intr_desc _bsd_bus_remap_intr_desc
+#define bus_remap_intr_method_default _bsd_bus_remap_intr_method_default
+#define bus_set_pass _bsd_bus_set_pass
+#define bus_set_resource _bsd_bus_set_resource
+#define bus_set_resource_desc _bsd_bus_set_resource_desc
+#define bus_set_resource_method_default _bsd_bus_set_resource_method_default
+#define bus_setup_intr _bsd_bus_setup_intr
+#define bus_setup_intr_desc _bsd_bus_setup_intr_desc
+#define bus_setup_intr_method_default _bsd_bus_setup_intr_method_default
+#define bus_teardown_intr _bsd_bus_teardown_intr
+#define bus_teardown_intr_desc _bsd_bus_teardown_intr_desc
+#define bus_teardown_intr_method_default _bsd_bus_teardown_intr_method_default
+#define bus_write_ivar_desc _bsd_bus_write_ivar_desc
+#define bus_write_ivar_method_default _bsd_bus_write_ivar_method_default
+#define callout_init _bsd_callout_init
+#define _callout_init_lock _bsd__callout_init_lock
+#define callout_reset _bsd_callout_reset
+#define callout_schedule _bsd_callout_schedule
+#define _callout_stop_safe _bsd__callout_stop_safe
+#define cam_fetch_status_entry _bsd_cam_fetch_status_entry
+#define cam_quirkmatch _bsd_cam_quirkmatch
+#define cam_sim_alloc _bsd_cam_sim_alloc
+#define cam_sim_free _bsd_cam_sim_free
+#define cam_simq_alloc _bsd_cam_simq_alloc
+#define cam_simq_free _bsd_cam_simq_free
+#define cam_status_table _bsd_cam_status_table
+#define cam_strmatch _bsd_cam_strmatch
+#define cam_strvis _bsd_cam_strvis
+#define cv_broadcastpri _bsd_cv_broadcastpri
+#define cv_destroy _bsd_cv_destroy
+#define cv_init _bsd_cv_init
+#define cv_signal _bsd_cv_signal
+#define _cv_timedwait _bsd__cv_timedwait
+#define _cv_wait _bsd__cv_wait
+#define _cv_wait_unlock _bsd__cv_wait_unlock
+#define DELAY _bsd_DELAY
+#define devclass_create _bsd_devclass_create
+#define devclass_find _bsd_devclass_find
+#define devclass_find_free_unit _bsd_devclass_find_free_unit
+#define devclass_get_count _bsd_devclass_get_count
+#define devclass_get_device _bsd_devclass_get_device
+#define devclass_get_devices _bsd_devclass_get_devices
+#define devclass_get_drivers _bsd_devclass_get_drivers
+#define devclass_get_maxunit _bsd_devclass_get_maxunit
+#define devclass_get_name _bsd_devclass_get_name
+#define devclass_get_parent _bsd_devclass_get_parent
+#define devclass_get_softc _bsd_devclass_get_softc
+#define devclass_set_parent _bsd_devclass_set_parent
+#define devctl_notify _bsd_devctl_notify
+#define devctl_notify_f _bsd_devctl_notify_f
+#define devctl_queue_data _bsd_devctl_queue_data
+#define devctl_queue_data_f _bsd_devctl_queue_data_f
+#define device_add_child _bsd_device_add_child
+#define device_add_child_ordered _bsd_device_add_child_ordered
+#define device_attach _bsd_device_attach
+#define device_attach_desc _bsd_device_attach_desc
+#define device_attach_method_default _bsd_device_attach_method_default
+#define device_busy _bsd_device_busy
+#define device_delete_all_children _bsd_device_delete_all_children
+#define device_delete_child _bsd_device_delete_child
+#define device_detach _bsd_device_detach
+#define device_detach_desc _bsd_device_detach_desc
+#define device_detach_method_default _bsd_device_detach_method_default
+#define device_disable _bsd_device_disable
+#define device_enable _bsd_device_enable
+#define device_find_child _bsd_device_find_child
+#define device_get_children _bsd_device_get_children
+#define device_get_desc _bsd_device_get_desc
+#define device_get_devclass _bsd_device_get_devclass
+#define device_get_driver _bsd_device_get_driver
+#define device_get_flags _bsd_device_get_flags
+#define device_get_ivars _bsd_device_get_ivars
+#define device_get_name _bsd_device_get_name
+#define device_get_nameunit _bsd_device_get_nameunit
+#define device_get_parent _bsd_device_get_parent
+#define device_get_softc _bsd_device_get_softc
+#define device_get_state _bsd_device_get_state
+#define device_get_unit _bsd_device_get_unit
+#define device_identify_desc _bsd_device_identify_desc
+#define device_identify_method_default _bsd_device_identify_method_default
+#define device_is_alive _bsd_device_is_alive
+#define device_is_attached _bsd_device_is_attached
+#define device_is_enabled _bsd_device_is_enabled
+#define device_is_quiet _bsd_device_is_quiet
+#define device_printf _bsd_device_printf
+#define device_print_prettyname _bsd_device_print_prettyname
+#define device_probe _bsd_device_probe
+#define device_probe_and_attach _bsd_device_probe_and_attach
+#define device_probe_child _bsd_device_probe_child
+#define device_probe_desc _bsd_device_probe_desc
+#define device_probe_method_default _bsd_device_probe_method_default
+#define device_quiesce _bsd_device_quiesce
+#define device_quiesce_desc _bsd_device_quiesce_desc
+#define device_quiesce_method_default _bsd_device_quiesce_method_default
+#define device_quiet _bsd_device_quiet
+#define device_resume_desc _bsd_device_resume_desc
+#define device_resume_method_default _bsd_device_resume_method_default
+#define device_set_desc _bsd_device_set_desc
+#define device_set_desc_copy _bsd_device_set_desc_copy
+#define device_set_devclass _bsd_device_set_devclass
+#define device_set_driver _bsd_device_set_driver
+#define device_set_flags _bsd_device_set_flags
+#define device_set_ivars _bsd_device_set_ivars
+#define device_set_softc _bsd_device_set_softc
+#define device_set_unit _bsd_device_set_unit
+#define device_set_usb_desc _bsd_device_set_usb_desc
+#define device_shutdown _bsd_device_shutdown
+#define device_shutdown_desc _bsd_device_shutdown_desc
+#define device_shutdown_method_default _bsd_device_shutdown_method_default
+#define device_suspend_desc _bsd_device_suspend_desc
+#define device_suspend_method_default _bsd_device_suspend_method_default
+#define device_unbusy _bsd_device_unbusy
+#define device_verbose _bsd_device_verbose
+#define driver_module_handler _bsd_driver_module_handler
+#define ehci_bus_methods _bsd_ehci_bus_methods
+#define ehci_detach _bsd_ehci_detach
+#define ehci_device_bulk_methods _bsd_ehci_device_bulk_methods
+#define ehci_device_ctrl_methods _bsd_ehci_device_ctrl_methods
+#define ehci_device_intr_methods _bsd_ehci_device_intr_methods
+#define ehci_device_isoc_fs_methods _bsd_ehci_device_isoc_fs_methods
+#define ehci_device_isoc_hs_methods _bsd_ehci_device_isoc_hs_methods
+#define ehci_init _bsd_ehci_init
+#define ehci_interrupt _bsd_ehci_interrupt
+#define ehci_iterate_hw_softc _bsd_ehci_iterate_hw_softc
+#define ehci_reset _bsd_ehci_reset
+#define ehci_resume _bsd_ehci_resume
+#define ehci_shutdown _bsd_ehci_shutdown
+#define ehci_suspend _bsd_ehci_suspend
+#define free _bsd_free
+#define hid_end_parse _bsd_hid_end_parse
+#define hid_get_data _bsd_hid_get_data
+#define hid_get_data_unsigned _bsd_hid_get_data_unsigned
+#define hid_get_descriptor_from_usb _bsd_hid_get_descriptor_from_usb
+#define hid_get_item _bsd_hid_get_item
+#define hid_is_collection _bsd_hid_is_collection
+#define hid_locate _bsd_hid_locate
+#define hid_report_size _bsd_hid_report_size
+#define hid_start_parse _bsd_hid_start_parse
+#define kernel_sysctl _bsd_kernel_sysctl
+#define kobj_class_compile _bsd_kobj_class_compile
+#define kobj_class_compile_static _bsd_kobj_class_compile_static
+#define kobj_class_free _bsd_kobj_class_free
+#define kobj_create _bsd_kobj_create
+#define kobj_delete _bsd_kobj_delete
+#define kobj_error_method _bsd_kobj_error_method
+#define kobj_init _bsd_kobj_init
+#define kobj_lookup_method _bsd_kobj_lookup_method
+#define kproc_create _bsd_kproc_create
+#define kproc_exit _bsd_kproc_exit
+#define kproc_kthread_add _bsd_kproc_kthread_add
+#define kproc_start _bsd_kproc_start
+#define kthread_add _bsd_kthread_add
+#define kthread_exit _bsd_kthread_exit
+#define kthread_start _bsd_kthread_start
+#define malloc _bsd_malloc
+#define malloc_init _bsd_malloc_init
+#define malloc_uninit _bsd_malloc_uninit
+#define M_CAMSIM _bsd_M_CAMSIM
+#define M_DEVBUF _bsd_M_DEVBUF
+#define M_SOCKET _bsd_M_SOCKET
+#define mi_startup _bsd_mi_startup
+#define module_lookupbyname _bsd_module_lookupbyname
+#define module_register _bsd_module_register
+#define module_register_init _bsd_module_register_init
+#define module_release _bsd_module_release
+#define M_TEMP _bsd_M_TEMP
+#define mtx_destroy _bsd_mtx_destroy
+#define mtx_init _bsd_mtx_init
+#define _mtx_lock_flags _bsd__mtx_lock_flags
+#define mtx_owned _bsd_mtx_owned
+#define mtx_recursed _bsd_mtx_recursed
+#define mtx_sysinit _bsd_mtx_sysinit
+#define _mtx_trylock _bsd__mtx_trylock
+#define _mtx_unlock_flags _bsd__mtx_unlock_flags
+#define M_USB _bsd_M_USB
+#define M_USBDEV _bsd_M_USBDEV
+#define M_USBHC _bsd_M_USBHC
+#define mutex_init _bsd_mutex_init
+#define null_class _bsd_null_class
+#define num_cam_status_entries _bsd_num_cam_status_entries
+#define ohci_bus_methods _bsd_ohci_bus_methods
+#define ohci_detach _bsd_ohci_detach
+#define ohci_device_bulk_methods _bsd_ohci_device_bulk_methods
+#define ohci_device_ctrl_methods _bsd_ohci_device_ctrl_methods
+#define ohci_device_intr_methods _bsd_ohci_device_intr_methods
+#define ohci_device_isoc_methods _bsd_ohci_device_isoc_methods
+#define ohci_init _bsd_ohci_init
+#define ohci_interrupt _bsd_ohci_interrupt
+#define ohci_iterate_hw_softc _bsd_ohci_iterate_hw_softc
+#define ohci_resume _bsd_ohci_resume
+#define ohci_suspend _bsd_ohci_suspend
+#define panic _bsd_panic
+#define pause _bsd_pause
+#define psignal _bsd_psignal
+#define root_bus_configure _bsd_root_bus_configure
+#define scsi_inquiry _bsd_scsi_inquiry
+#define scsi_print_inquiry _bsd_scsi_print_inquiry
+#define scsi_read_capacity _bsd_scsi_read_capacity
+#define scsi_read_write _bsd_scsi_read_write
+#define scsi_test_unit_ready _bsd_scsi_test_unit_ready
+#define selrecord _bsd_selrecord
+#define seltdfini _bsd_seltdfini
+#define selwakeup _bsd_selwakeup
+#define selwakeuppri _bsd_selwakeuppri
+#define strdup _bsd_strdup
+#define sx_destroy _bsd_sx_destroy
+#define _sx_downgrade _bsd__sx_downgrade
+#define sx_init_flags _bsd_sx_init_flags
+#define sx_sysinit _bsd_sx_sysinit
+#define _sx_try_upgrade _bsd__sx_try_upgrade
+#define _sx_try_xlock _bsd__sx_try_xlock
+#define _sx_xlock _bsd__sx_xlock
+#define sx_xlocked _bsd_sx_xlocked
+#define _sx_xunlock _bsd__sx_xunlock
+#define sysctl _bsd_sysctl
+#define sysctl_add_oid _bsd_sysctl_add_oid
+#define sysctlbyname _bsd_sysctlbyname
+#define sysctl_ctx_entry_add _bsd_sysctl_ctx_entry_add
+#define sysctl_ctx_entry_del _bsd_sysctl_ctx_entry_del
+#define sysctl_ctx_entry_find _bsd_sysctl_ctx_entry_find
+#define sysctl_ctx_free _bsd_sysctl_ctx_free
+#define sysctl_ctx_init _bsd_sysctl_ctx_init
+#define sysctl_find_oid _bsd_sysctl_find_oid
+#define sysctl_handle_int _bsd_sysctl_handle_int
+#define sysctl_handle_long _bsd_sysctl_handle_long
+#define sysctl_handle_opaque _bsd_sysctl_handle_opaque
+#define sysctl_handle_quad _bsd_sysctl_handle_quad
+#define sysctl_handle_string _bsd_sysctl_handle_string
+#define sysctl_lock _bsd_sysctl_lock
+#define sysctl_move_oid _bsd_sysctl_move_oid
+#define sysctl_msec_to_ticks _bsd_sysctl_msec_to_ticks
+#define sysctlnametomib _bsd_sysctlnametomib
+#define sysctl_register_oid _bsd_sysctl_register_oid
+#define sysctl_remove_oid _bsd_sysctl_remove_oid
+#define sysctl_rename_oid _bsd_sysctl_rename_oid
+#define sysctl_unlock _bsd_sysctl_unlock
+#define sysctl_unregister_oid _bsd_sysctl_unregister_oid
+#define uhub_root_intr _bsd_uhub_root_intr
+#define usb_alloc_device _bsd_usb_alloc_device
+#define usb_alloc_mbufs _bsd_usb_alloc_mbufs
+#define usb_bdma_done_event _bsd_usb_bdma_done_event
+#define usb_bdma_post_sync _bsd_usb_bdma_post_sync
+#define usb_bdma_pre_sync _bsd_usb_bdma_pre_sync
+#define usb_bdma_work_loop _bsd_usb_bdma_work_loop
+#define usb_bus_mem_alloc_all _bsd_usb_bus_mem_alloc_all
+#define usb_bus_mem_flush_all _bsd_usb_bus_mem_flush_all
+#define usb_bus_mem_free_all _bsd_usb_bus_mem_free_all
+#define usb_bus_port_get_device _bsd_usb_bus_port_get_device
+#define usb_bus_port_set_device _bsd_usb_bus_port_set_device
+#define usb_bus_powerd _bsd_usb_bus_powerd
+#define usb_bus_power_update _bsd_usb_bus_power_update
+#define usb_bus_unload _bsd_usb_bus_unload
+#define usb_command_wrapper _bsd_usb_command_wrapper
+#define usb_config_parse _bsd_usb_config_parse
+#define usbd_clear_data_toggle _bsd_usbd_clear_data_toggle
+#define usbd_clear_stall_callback _bsd_usbd_clear_stall_callback
+#define usbd_copy_in _bsd_usbd_copy_in
+#define usbd_copy_out _bsd_usbd_copy_out
+#define usbd_ctrl_transfer_setup _bsd_usbd_ctrl_transfer_setup
+#define usbd_device_attached _bsd_usbd_device_attached
+#define usbd_do_request_callback _bsd_usbd_do_request_callback
+#define usbd_do_request_flags _bsd_usbd_do_request_flags
+#define usbd_do_request_proc _bsd_usbd_do_request_proc
+#define usb_debug _bsd_usb_debug
+#define usbd_enum_is_locked _bsd_usbd_enum_is_locked
+#define usbd_enum_lock _bsd_usbd_enum_lock
+#define usbd_enum_unlock _bsd_usbd_enum_unlock
+#define usbd_errstr _bsd_usbd_errstr
+#define usb_desc_foreach _bsd_usb_desc_foreach
+#define usb_detach_device _bsd_usb_detach_device
+#define usb_devclass_ptr _bsd_usb_devclass_ptr
+#define usb_devinfo _bsd_usb_devinfo
+#define usbd_filter_power_mode _bsd_usbd_filter_power_mode
+#define usbd_find_descriptor _bsd_usbd_find_descriptor
+#define usbd_frame_zero _bsd_usbd_frame_zero
+#define usbd_fs_isoc_schedule_alloc _bsd_usbd_fs_isoc_schedule_alloc
+#define usbd_fs_isoc_schedule_init_all _bsd_usbd_fs_isoc_schedule_init_all
+#define usbd_fs_isoc_schedule_isoc_time_expand _bsd_usbd_fs_isoc_schedule_isoc_time_expand
+#define usbd_get_bus_index _bsd_usbd_get_bus_index
+#define usbd_get_config_descriptor _bsd_usbd_get_config_descriptor
+#define usbd_get_device_descriptor _bsd_usbd_get_device_descriptor
+#define usbd_get_device_index _bsd_usbd_get_device_index
+#define usbd_get_dma_delay _bsd_usbd_get_dma_delay
+#define usbd_get_endpoint _bsd_usbd_get_endpoint
+#define usbd_get_ep_by_addr _bsd_usbd_get_ep_by_addr
+#define usbd_get_iface _bsd_usbd_get_iface
+#define usbd_get_interface_altindex _bsd_usbd_get_interface_altindex
+#define usbd_get_interface_descriptor _bsd_usbd_get_interface_descriptor
+#define usbd_get_isoc_fps _bsd_usbd_get_isoc_fps
+#define usbd_get_mode _bsd_usbd_get_mode
+#define usbd_get_no_alts _bsd_usbd_get_no_alts
+#define usbd_get_no_descriptors _bsd_usbd_get_no_descriptors
+#define usbd_get_page _bsd_usbd_get_page
+#define usbd_get_speed _bsd_usbd_get_speed
+#define usbd_interface_count _bsd_usbd_interface_count
+#define usbd_lookup_id_by_info _bsd_usbd_lookup_id_by_info
+#define usbd_lookup_id_by_uaa _bsd_usbd_lookup_id_by_uaa
+#define usb_dma_tag_find _bsd_usb_dma_tag_find
+#define usb_dma_tag_setup _bsd_usb_dma_tag_setup
+#define usb_dma_tag_unsetup _bsd_usb_dma_tag_unsetup
+#define usb_do_clear_stall_callback _bsd_usb_do_clear_stall_callback
+#define usbd_pipe_enter _bsd_usbd_pipe_enter
+#define usbd_pipe_start _bsd_usbd_pipe_start
+#define usbd_req_clear_device_feature _bsd_usbd_req_clear_device_feature
+#define usbd_req_clear_hub_feature _bsd_usbd_req_clear_hub_feature
+#define usbd_req_clear_port_feature _bsd_usbd_req_clear_port_feature
+#define usbd_req_get_alt_interface_no _bsd_usbd_req_get_alt_interface_no
+#define usbd_req_get_config _bsd_usbd_req_get_config
+#define usbd_req_get_config_desc _bsd_usbd_req_get_config_desc
+#define usbd_req_get_config_desc_full _bsd_usbd_req_get_config_desc_full
+#define usbd_req_get_desc _bsd_usbd_req_get_desc
+#define usbd_req_get_descriptor_ptr _bsd_usbd_req_get_descriptor_ptr
+#define usbd_req_get_device_desc _bsd_usbd_req_get_device_desc
+#define usbd_req_get_device_status _bsd_usbd_req_get_device_status
+#define usbd_req_get_hid_desc _bsd_usbd_req_get_hid_desc
+#define usbd_req_get_hub_descriptor _bsd_usbd_req_get_hub_descriptor
+#define usbd_req_get_hub_status _bsd_usbd_req_get_hub_status
+#define usbd_req_get_port_status _bsd_usbd_req_get_port_status
+#define usbd_req_get_report _bsd_usbd_req_get_report
+#define usbd_req_get_report_descriptor _bsd_usbd_req_get_report_descriptor
+#define usbd_req_get_string_any _bsd_usbd_req_get_string_any
+#define usbd_req_get_string_desc _bsd_usbd_req_get_string_desc
+#define usbd_req_re_enumerate _bsd_usbd_req_re_enumerate
+#define usbd_req_reset_port _bsd_usbd_req_reset_port
+#define usbd_req_set_address _bsd_usbd_req_set_address
+#define usbd_req_set_alt_interface_no _bsd_usbd_req_set_alt_interface_no
+#define usbd_req_set_config _bsd_usbd_req_set_config
+#define usbd_req_set_device_feature _bsd_usbd_req_set_device_feature
+#define usbd_req_set_hub_feature _bsd_usbd_req_set_hub_feature
+#define usbd_req_set_idle _bsd_usbd_req_set_idle
+#define usbd_req_set_port_feature _bsd_usbd_req_set_port_feature
+#define usbd_req_set_protocol _bsd_usbd_req_set_protocol
+#define usbd_req_set_report _bsd_usbd_req_set_report
+#define usbd_set_alt_interface_index _bsd_usbd_set_alt_interface_index
+#define usbd_set_config_index _bsd_usbd_set_config_index
+#define usbd_set_endpoint_stall _bsd_usbd_set_endpoint_stall
+#define usbd_set_parent_iface _bsd_usbd_set_parent_iface
+#define usbd_set_power_mode _bsd_usbd_set_power_mode
+#define usbd_sr_lock _bsd_usbd_sr_lock
+#define usbd_sr_unlock _bsd_usbd_sr_unlock
+#define usbd_transfer_clear_stall _bsd_usbd_transfer_clear_stall
+#define usbd_transfer_dequeue _bsd_usbd_transfer_dequeue
+#define usbd_transfer_done _bsd_usbd_transfer_done
+#define usbd_transfer_drain _bsd_usbd_transfer_drain
+#define usbd_transfer_enqueue _bsd_usbd_transfer_enqueue
+#define usbd_transfer_pending _bsd_usbd_transfer_pending
+#define usbd_transfer_poll _bsd_usbd_transfer_poll
+#define usbd_transfer_power_ref _bsd_usbd_transfer_power_ref
+#define usbd_transfer_setup _bsd_usbd_transfer_setup
+#define usbd_transfer_setup_sub _bsd_usbd_transfer_setup_sub
+#define usbd_transfer_setup_sub_malloc _bsd_usbd_transfer_setup_sub_malloc
+#define usbd_transfer_start _bsd_usbd_transfer_start
+#define usbd_transfer_stop _bsd_usbd_transfer_stop
+#define usbd_transfer_submit _bsd_usbd_transfer_submit
+#define usbd_transfer_timeout_ms _bsd_usbd_transfer_timeout_ms
+#define usbd_transfer_unsetup _bsd_usbd_transfer_unsetup
+#define usb_dump_device _bsd_usb_dump_device
+#define usb_dump_endpoint _bsd_usb_dump_endpoint
+#define usb_dump_iface _bsd_usb_dump_iface
+#define usb_dump_queue _bsd_usb_dump_queue
+#define usb_dump_xfer _bsd_usb_dump_xfer
+#define usbd_xfer_clr_flag _bsd_usbd_xfer_clr_flag
+#define usbd_xfer_frame_data _bsd_usbd_xfer_frame_data
+#define usbd_xfer_frame_len _bsd_usbd_xfer_frame_len
+#define usbd_xfer_get_fps_shift _bsd_usbd_xfer_get_fps_shift
+#define usbd_xfer_get_frame _bsd_usbd_xfer_get_frame
+#define usbd_xfer_get_priv _bsd_usbd_xfer_get_priv
+#define usbd_xfer_get_timestamp _bsd_usbd_xfer_get_timestamp
+#define usbd_xfer_is_stalled _bsd_usbd_xfer_is_stalled
+#define usbd_xfer_max_framelen _bsd_usbd_xfer_max_framelen
+#define usbd_xfer_max_frames _bsd_usbd_xfer_max_frames
+#define usbd_xfer_max_len _bsd_usbd_xfer_max_len
+#define usbd_xfer_set_flag _bsd_usbd_xfer_set_flag
+#define usbd_xfer_set_frame_data _bsd_usbd_xfer_set_frame_data
+#define usbd_xfer_set_frame_len _bsd_usbd_xfer_set_frame_len
+#define usbd_xfer_set_frame_offset _bsd_usbd_xfer_set_frame_offset
+#define usbd_xfer_set_frames _bsd_usbd_xfer_set_frames
+#define usbd_xfer_set_interval _bsd_usbd_xfer_set_interval
+#define usbd_xfer_set_priv _bsd_usbd_xfer_set_priv
+#define usbd_xfer_set_stall _bsd_usbd_xfer_set_stall
+#define usbd_xfer_set_timeout _bsd_usbd_xfer_set_timeout
+#define usbd_xfer_softc _bsd_usbd_xfer_softc
+#define usbd_xfer_state _bsd_usbd_xfer_state
+#define usbd_xfer_status _bsd_usbd_xfer_status
+#define usb_edesc_foreach _bsd_usb_edesc_foreach
+#define usb_endpoint_foreach _bsd_usb_endpoint_foreach
+#define usb_free_device _bsd_usb_free_device
+#define usb_get_manufacturer _bsd_usb_get_manufacturer
+#define usb_get_product _bsd_usb_get_product
+#define usb_get_serial _bsd_usb_get_serial
+#define usb_handle_request_callback _bsd_usb_handle_request_callback
+#define usb_handle_request_desc _bsd_usb_handle_request_desc
+#define usb_handle_request_method_default _bsd_usb_handle_request_method_default
+#define usb_hs_bandwidth_alloc _bsd_usb_hs_bandwidth_alloc
+#define usb_hs_bandwidth_free _bsd_usb_hs_bandwidth_free
+#define usb_idesc_foreach _bsd_usb_idesc_foreach
+#define usb_iface_is_cdrom _bsd_usb_iface_is_cdrom
+#define usb_isoc_time_expand _bsd_usb_isoc_time_expand
+#define usb_make_str_desc _bsd_usb_make_str_desc
+#define usb_msc_eject _bsd_usb_msc_eject
+#define usb_needs_explore _bsd_usb_needs_explore
+#define usb_needs_explore_all _bsd_usb_needs_explore_all
+#define usb_pause_mtx _bsd_usb_pause_mtx
+#define usb_pc_alloc_mem _bsd_usb_pc_alloc_mem
+#define usb_pc_cpu_flush _bsd_usb_pc_cpu_flush
+#define usb_pc_cpu_invalidate _bsd_usb_pc_cpu_invalidate
+#define usb_pc_dmamap_create _bsd_usb_pc_dmamap_create
+#define usb_pc_dmamap_destroy _bsd_usb_pc_dmamap_destroy
+#define usb_pc_free_mem _bsd_usb_pc_free_mem
+#define usb_pc_load_mem _bsd_usb_pc_load_mem
+#define usb_peer_can_wakeup _bsd_usb_peer_can_wakeup
+#define usb_printbcd _bsd_usb_printbcd
+#define usb_probe_and_attach _bsd_usb_probe_and_attach
+#define usb_proc_create _bsd_usb_proc_create
+#define usb_proc_drain _bsd_usb_proc_drain
+#define usb_proc_free _bsd_usb_proc_free
+#define usb_proc_is_gone _bsd_usb_proc_is_gone
+#define usb_proc_msignal _bsd_usb_proc_msignal
+#define usb_proc_mwait _bsd_usb_proc_mwait
+#define usb_proc_rewakeup _bsd_usb_proc_rewakeup
+#define usb_quirk_ioctl_p _bsd_usb_quirk_ioctl_p
+#define usb_quirk_unload _bsd_usb_quirk_unload
+#define usb_reset_iface_endpoints _bsd_usb_reset_iface_endpoints
+#define usb_set_device_state _bsd_usb_set_device_state
+#define usb_statestr _bsd_usb_statestr
+#define usb_suspend_resume _bsd_usb_suspend_resume
+#define usb_temp_get_desc_p _bsd_usb_temp_get_desc_p
+#define usb_template _bsd_usb_template
+#define usb_temp_setup_by_index_p _bsd_usb_temp_setup_by_index_p
+#define usb_temp_unload _bsd_usb_temp_unload
+#define usb_temp_unsetup_p _bsd_usb_temp_unsetup_p
+#define usb_test_quirk _bsd_usb_test_quirk
+#define usb_test_quirk_p _bsd_usb_test_quirk_p
+#define usb_trim_spaces _bsd_usb_trim_spaces
+#define xpt_bus_deregister _bsd_xpt_bus_deregister
+#define xpt_bus_register _bsd_xpt_bus_register
+#define xpt_done _bsd_xpt_done
+
+#endif /* _RTEMS_BSD_MACHINE_RTEMS_BSD_SYMBOLS_H_ */
diff --git a/rtems/freebsd/machine/rtems-bsd-sysinit.h b/rtems/freebsd/machine/rtems-bsd-sysinit.h
new file mode 100644
index 00000000..98de2b80
--- /dev/null
+++ b/rtems/freebsd/machine/rtems-bsd-sysinit.h
@@ -0,0 +1,67 @@
+/**
+ * @file
+ *
+ * @ingroup rtems_bsd_machine
+ *
+ * @brief TODO.
+ */
+
+/*
+ * Copyright (c) 2009, 2010 embedded brains GmbH. All rights reserved.
+ *
+ * embedded brains GmbH
+ * Obere Lagerstr. 30
+ * 82178 Puchheim
+ * Germany
+ * <rtems@embedded-brains.de>
+ *
+ * The license and distribution terms for this file may be
+ * found in the file LICENSE in this distribution or at
+ * http://www.rtems.com/license/LICENSE.
+ */
+
+#ifndef _RTEMS_BSD_MACHINE_RTEMS_BSD_SYSINIT_H_
+#define _RTEMS_BSD_MACHINE_RTEMS_BSD_SYSINIT_H_
+
+#include <rtems/freebsd/sys/cdefs.h>
+#include <rtems/freebsd/sys/queue.h>
+#include <rtems/freebsd/sys/kernel.h>
+
+#define SYSINIT_NEED_FREEBSD_CORE \
+ SYSINIT_REFERENCE(configure1); \
+ SYSINIT_REFERENCE(module); \
+ SYSINIT_REFERENCE(kobj); \
+ SYSINIT_REFERENCE(linker_kernel); \
+ SYSINIT_MODULE_REFERENCE(rootbus); \
+ SYSINIT_DRIVER_REFERENCE(nexus, root)
+
+#define SYSINIT_NEED_USB_CORE \
+ SYSINIT_REFERENCE(usb_quirk_init); \
+ SYSINIT_DRIVER_REFERENCE(uhub, usbus)
+
+#define SYSINIT_NEED_USB_OHCI \
+ SYSINIT_DRIVER_REFERENCE(ohci, nexus); \
+ SYSINIT_DRIVER_REFERENCE(usbus, ohci)
+
+#define SYSINIT_NEED_USB_EHCI \
+ SYSINIT_DRIVER_REFERENCE(ehci, nexus); \
+ SYSINIT_DRIVER_REFERENCE(usbus, ehci)
+
+#define SYSINIT_NEED_USB_MASS_STORAGE \
+ SYSINIT_DRIVER_REFERENCE(umass, uhub)
+
+#define SYSINIT_NEED_USB_MOUSE \
+ SYSINIT_DRIVER_REFERENCE(umass, uhub)
+
+#define SYSINIT_NEED_SDHC \
+ SYSINIT_DRIVER_REFERENCE(sdhci, nexus); \
+ SYSINIT_DRIVER_REFERENCE(mmc, sdhci); \
+ SYSINIT_DRIVER_REFERENCE(mmcsd, mmc)
+
+#define SYSINIT_NEED_NET_MII \
+ SYSINIT_DRIVER_REFERENCE(icsphy, miibus);
+
+/* FIXME */
+extern const char *const _bsd_nexus_devices [];
+
+#endif /* _RTEMS_BSD_MACHINE_RTEMS_BSD_SYSINIT_H_ */
diff --git a/rtems/freebsd/machine/runq.h b/rtems/freebsd/machine/runq.h
new file mode 100644
index 00000000..c03945a2
--- /dev/null
+++ b/rtems/freebsd/machine/runq.h
@@ -0,0 +1,41 @@
+/**
+ * @file
+ *
+ * @ingroup rtems_bsd_machine
+ *
+ * @brief TODO.
+ */
+
+/*
+ * Copyright (c) 2009, 2010 embedded brains GmbH. All rights reserved.
+ *
+ * embedded brains GmbH
+ * Obere Lagerstr. 30
+ * 82178 Puchheim
+ * Germany
+ * <rtems@embedded-brains.de>
+ *
+ * The license and distribution terms for this file may be
+ * found in the file LICENSE in this distribution or at
+ * http://www.rtems.com/license/LICENSE.
+ */
+
+#ifndef _RTEMS_BSD_MACHINE_RUNQ_H_
+#define _RTEMS_BSD_MACHINE_RUNQ_H_
+
+#ifndef _RTEMS_BSD_MACHINE_RTEMS_BSD_CONFIG_H_
+#error "the header file <rtems/freebsd/machine/rtems-bsd-config.h> must be included first"
+#endif
+
+#define RQB_LEN 0
+#define RQB_L2BPW 0
+#define RQB_BPW 0
+
+#define RQB_BIT(pri) 0
+#define RQB_WORD(pri) 0
+
+#define RQB_FFS(word) 0
+
+typedef uintptr_t rqb_word_t;
+
+#endif /* _RTEMS_BSD_MACHINE_RUNQ_H_ */
diff --git a/rtems/freebsd/machine/sf_buf.h b/rtems/freebsd/machine/sf_buf.h
new file mode 100644
index 00000000..936ffd88
--- /dev/null
+++ b/rtems/freebsd/machine/sf_buf.h
@@ -0,0 +1 @@
+/* EMPTY */
diff --git a/rtems/freebsd/machine/signal.h b/rtems/freebsd/machine/signal.h
new file mode 100644
index 00000000..72aebe22
--- /dev/null
+++ b/rtems/freebsd/machine/signal.h
@@ -0,0 +1,30 @@
+/**
+ * @file
+ *
+ * @ingroup rtems_bsd_machine
+ *
+ * @brief TODO.
+ */
+
+/*
+ * Copyright (c) 2009, 2010 embedded brains GmbH. All rights reserved.
+ *
+ * embedded brains GmbH
+ * Obere Lagerstr. 30
+ * 82178 Puchheim
+ * Germany
+ * <rtems@embedded-brains.de>
+ *
+ * The license and distribution terms for this file may be
+ * found in the file LICENSE in this distribution or at
+ * http://www.rtems.com/license/LICENSE.
+ */
+
+#ifndef _RTEMS_BSD_MACHINE_SIGNAL_H_
+#define _RTEMS_BSD_MACHINE_SIGNAL_H_
+
+#ifndef _RTEMS_BSD_MACHINE_RTEMS_BSD_CONFIG_H_
+#error "the header file <rtems/freebsd/machine/rtems-bsd-config.h> must be included first"
+#endif
+
+#endif /* _RTEMS_BSD_MACHINE_SIGNAL_H_ */
diff --git a/rtems/freebsd/machine/stdarg.h b/rtems/freebsd/machine/stdarg.h
new file mode 100644
index 00000000..08fc1fb7
--- /dev/null
+++ b/rtems/freebsd/machine/stdarg.h
@@ -0,0 +1,30 @@
+/**
+ * @file
+ *
+ * @ingroup rtems_bsd_machine
+ *
+ * @brief TODO.
+ */
+
+/*
+ * Copyright (c) 2009, 2010 embedded brains GmbH. All rights reserved.
+ *
+ * embedded brains GmbH
+ * Obere Lagerstr. 30
+ * 82178 Puchheim
+ * Germany
+ * <rtems@embedded-brains.de>
+ *
+ * The license and distribution terms for this file may be
+ * found in the file LICENSE in this distribution or at
+ * http://www.rtems.com/license/LICENSE.
+ */
+
+#ifndef _RTEMS_BSD_MACHINE_STDARG_H_
+#define _RTEMS_BSD_MACHINE_STDARG_H_
+
+#ifndef _RTEMS_BSD_MACHINE_RTEMS_BSD_CONFIG_H_
+#error "the header file <rtems/freebsd/machine/rtems-bsd-config.h> must be included first"
+#endif
+
+#endif /* _RTEMS_BSD_MACHINE_STDARG_H_ */
diff --git a/rtems/freebsd/machine/ucontext.h b/rtems/freebsd/machine/ucontext.h
new file mode 100644
index 00000000..9a46840c
--- /dev/null
+++ b/rtems/freebsd/machine/ucontext.h
@@ -0,0 +1,32 @@
+/**
+ * @file
+ *
+ * @ingroup rtems_bsd_machine
+ *
+ * @brief TODO.
+ */
+
+/*
+ * Copyright (c) 2009, 2010 embedded brains GmbH. All rights reserved.
+ *
+ * embedded brains GmbH
+ * Obere Lagerstr. 30
+ * 82178 Puchheim
+ * Germany
+ * <rtems@embedded-brains.de>
+ *
+ * The license and distribution terms for this file may be
+ * found in the file LICENSE in this distribution or at
+ * http://www.rtems.com/license/LICENSE.
+ */
+
+#ifndef _RTEMS_BSD_MACHINE_UCONTEXT_H_
+#define _RTEMS_BSD_MACHINE_UCONTEXT_H_
+
+#ifndef _RTEMS_BSD_MACHINE_RTEMS_BSD_CONFIG_H_
+#error "the header file <rtems/freebsd/machine/rtems-bsd-config.h> must be included first"
+#endif
+
+typedef int mcontext_t;
+
+#endif /* _RTEMS_BSD_MACHINE_UCONTEXT_H_ */
diff --git a/rtems/freebsd/net/bpf.c b/rtems/freebsd/net/bpf.c
new file mode 100644
index 00000000..844367fe
--- /dev/null
+++ b/rtems/freebsd/net/bpf.c
@@ -0,0 +1,2398 @@
+#include <rtems/freebsd/machine/rtems-bsd-config.h>
+
+/*-
+ * Copyright (c) 1990, 1991, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * This code is derived from the Stanford/CMU enet packet filter,
+ * (net/enet.c) distributed as part of 4.3BSD, and code contributed
+ * to Berkeley by Steven McCanne and Van Jacobson both of Lawrence
+ * Berkeley Laboratory.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * @(#)bpf.c 8.4 (Berkeley) 1/9/95
+ */
+
+#include <rtems/freebsd/sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <rtems/freebsd/local/opt_bpf.h>
+#include <rtems/freebsd/local/opt_compat.h>
+#include <rtems/freebsd/local/opt_netgraph.h>
+
+#include <rtems/freebsd/sys/types.h>
+#include <rtems/freebsd/sys/param.h>
+#include <rtems/freebsd/sys/systm.h>
+#include <rtems/freebsd/sys/conf.h>
+#include <rtems/freebsd/sys/fcntl.h>
+#include <rtems/freebsd/sys/jail.h>
+#include <rtems/freebsd/sys/malloc.h>
+#include <rtems/freebsd/sys/mbuf.h>
+#include <rtems/freebsd/sys/time.h>
+#include <rtems/freebsd/sys/priv.h>
+#include <rtems/freebsd/sys/proc.h>
+#include <rtems/freebsd/sys/signalvar.h>
+#include <rtems/freebsd/sys/filio.h>
+#include <rtems/freebsd/sys/sockio.h>
+#include <rtems/freebsd/sys/ttycom.h>
+#include <rtems/freebsd/sys/uio.h>
+
+#include <rtems/freebsd/sys/event.h>
+#include <rtems/freebsd/sys/file.h>
+#include <rtems/freebsd/sys/poll.h>
+#include <rtems/freebsd/sys/proc.h>
+
+#include <rtems/freebsd/sys/socket.h>
+
+#include <rtems/freebsd/net/if.h>
+#include <rtems/freebsd/net/bpf.h>
+#include <rtems/freebsd/net/bpf_buffer.h>
+#ifdef BPF_JITTER
+#include <rtems/freebsd/net/bpf_jitter.h>
+#endif
+#include <rtems/freebsd/net/bpf_zerocopy.h>
+#include <rtems/freebsd/net/bpfdesc.h>
+#include <rtems/freebsd/net/vnet.h>
+
+#include <rtems/freebsd/netinet/in.h>
+#include <rtems/freebsd/netinet/if_ether.h>
+#include <rtems/freebsd/sys/kernel.h>
+#include <rtems/freebsd/sys/sysctl.h>
+
+#include <rtems/freebsd/net80211/ieee80211_freebsd.h>
+
+#include <rtems/freebsd/security/mac/mac_framework.h>
+
+MALLOC_DEFINE(M_BPF, "BPF", "BPF data");
+
+#if defined(DEV_BPF) || defined(NETGRAPH_BPF)
+
+#define PRINET 26 /* interruptible */
+
+#ifdef COMPAT_FREEBSD32
+#include <rtems/freebsd/sys/mount.h>
+#include <rtems/freebsd/compat/freebsd32/freebsd32.h>
+#define BPF_ALIGNMENT32 sizeof(int32_t)
+#define BPF_WORDALIGN32(x) (((x)+(BPF_ALIGNMENT32-1))&~(BPF_ALIGNMENT32-1))
+
+/*
+ * 32-bit version of structure prepended to each packet. We use this header
+ * instead of the standard one for 32-bit streams. We mark the a stream as
+ * 32-bit the first time we see a 32-bit compat ioctl request.
+ */
+struct bpf_hdr32 {
+ struct timeval32 bh_tstamp; /* time stamp */
+ uint32_t bh_caplen; /* length of captured portion */
+ uint32_t bh_datalen; /* original length of packet */
+ uint16_t bh_hdrlen; /* length of bpf header (this struct
+ plus alignment padding) */
+};
+
+struct bpf_program32 {
+ u_int bf_len;
+ uint32_t bf_insns;
+};
+
+struct bpf_dltlist32 {
+ u_int bfl_len;
+ u_int bfl_list;
+};
+
+#define BIOCSETF32 _IOW('B', 103, struct bpf_program32)
+#define BIOCSRTIMEOUT32 _IOW('B',109, struct timeval32)
+#define BIOCGRTIMEOUT32 _IOR('B',110, struct timeval32)
+#define BIOCGDLTLIST32 _IOWR('B',121, struct bpf_dltlist32)
+#define BIOCSETWF32 _IOW('B',123, struct bpf_program32)
+#define BIOCSETFNR32 _IOW('B',130, struct bpf_program32)
+#endif
+
+/*
+ * bpf_iflist is a list of BPF interface structures, each corresponding to a
+ * specific DLT. The same network interface might have several BPF interface
+ * structures registered by different layers in the stack (i.e., 802.11
+ * frames, ethernet frames, etc).
+ */
+static LIST_HEAD(, bpf_if) bpf_iflist;
+static struct mtx bpf_mtx; /* bpf global lock */
+static int bpf_bpfd_cnt;
+
+static void bpf_attachd(struct bpf_d *, struct bpf_if *);
+static void bpf_detachd(struct bpf_d *);
+static void bpf_freed(struct bpf_d *);
+static int bpf_movein(struct uio *, int, struct ifnet *, struct mbuf **,
+ struct sockaddr *, int *, struct bpf_insn *);
+static int bpf_setif(struct bpf_d *, struct ifreq *);
+static void bpf_timed_out(void *);
+static __inline void
+ bpf_wakeup(struct bpf_d *);
+static void catchpacket(struct bpf_d *, u_char *, u_int, u_int,
+ void (*)(struct bpf_d *, caddr_t, u_int, void *, u_int),
+ struct timeval *);
+static void reset_d(struct bpf_d *);
+static int bpf_setf(struct bpf_d *, struct bpf_program *, u_long cmd);
+static int bpf_getdltlist(struct bpf_d *, struct bpf_dltlist *);
+static int bpf_setdlt(struct bpf_d *, u_int);
+static void filt_bpfdetach(struct knote *);
+static int filt_bpfread(struct knote *, long);
+static void bpf_drvinit(void *);
+static int bpf_stats_sysctl(SYSCTL_HANDLER_ARGS);
+
+SYSCTL_NODE(_net, OID_AUTO, bpf, CTLFLAG_RW, 0, "bpf sysctl");
+int bpf_maxinsns = BPF_MAXINSNS;
+SYSCTL_INT(_net_bpf, OID_AUTO, maxinsns, CTLFLAG_RW,
+ &bpf_maxinsns, 0, "Maximum bpf program instructions");
+static int bpf_zerocopy_enable = 0;
+SYSCTL_INT(_net_bpf, OID_AUTO, zerocopy_enable, CTLFLAG_RW,
+ &bpf_zerocopy_enable, 0, "Enable new zero-copy BPF buffer sessions");
+SYSCTL_NODE(_net_bpf, OID_AUTO, stats, CTLFLAG_MPSAFE | CTLFLAG_RW,
+ bpf_stats_sysctl, "bpf statistics portal");
+
+static d_open_t bpfopen;
+static d_read_t bpfread;
+static d_write_t bpfwrite;
+static d_ioctl_t bpfioctl;
+static d_poll_t bpfpoll;
+static d_kqfilter_t bpfkqfilter;
+
+static struct cdevsw bpf_cdevsw = {
+ .d_version = D_VERSION,
+ .d_open = bpfopen,
+ .d_read = bpfread,
+ .d_write = bpfwrite,
+ .d_ioctl = bpfioctl,
+ .d_poll = bpfpoll,
+ .d_name = "bpf",
+ .d_kqfilter = bpfkqfilter,
+};
+
+static struct filterops bpfread_filtops =
+ { 1, NULL, filt_bpfdetach, filt_bpfread };
+
+/*
+ * Wrapper functions for various buffering methods. If the set of buffer
+ * modes expands, we will probably want to introduce a switch data structure
+ * similar to protosw, et.
+ */
+static void
+bpf_append_bytes(struct bpf_d *d, caddr_t buf, u_int offset, void *src,
+ u_int len)
+{
+
+ BPFD_LOCK_ASSERT(d);
+
+ switch (d->bd_bufmode) {
+ case BPF_BUFMODE_BUFFER:
+ return (bpf_buffer_append_bytes(d, buf, offset, src, len));
+
+ case BPF_BUFMODE_ZBUF:
+ d->bd_zcopy++;
+ return (bpf_zerocopy_append_bytes(d, buf, offset, src, len));
+
+ default:
+ panic("bpf_buf_append_bytes");
+ }
+}
+
+static void
+bpf_append_mbuf(struct bpf_d *d, caddr_t buf, u_int offset, void *src,
+ u_int len)
+{
+
+ BPFD_LOCK_ASSERT(d);
+
+ switch (d->bd_bufmode) {
+ case BPF_BUFMODE_BUFFER:
+ return (bpf_buffer_append_mbuf(d, buf, offset, src, len));
+
+ case BPF_BUFMODE_ZBUF:
+ d->bd_zcopy++;
+ return (bpf_zerocopy_append_mbuf(d, buf, offset, src, len));
+
+ default:
+ panic("bpf_buf_append_mbuf");
+ }
+}
+
+/*
+ * This function gets called when the free buffer is re-assigned.
+ */
+static void
+bpf_buf_reclaimed(struct bpf_d *d)
+{
+
+ BPFD_LOCK_ASSERT(d);
+
+ switch (d->bd_bufmode) {
+ case BPF_BUFMODE_BUFFER:
+ return;
+
+ case BPF_BUFMODE_ZBUF:
+ bpf_zerocopy_buf_reclaimed(d);
+ return;
+
+ default:
+ panic("bpf_buf_reclaimed");
+ }
+}
+
+/*
+ * If the buffer mechanism has a way to decide that a held buffer can be made
+ * free, then it is exposed via the bpf_canfreebuf() interface. (1) is
+ * returned if the buffer can be discarded, (0) is returned if it cannot.
+ */
+static int
+bpf_canfreebuf(struct bpf_d *d)
+{
+
+ BPFD_LOCK_ASSERT(d);
+
+ switch (d->bd_bufmode) {
+ case BPF_BUFMODE_ZBUF:
+ return (bpf_zerocopy_canfreebuf(d));
+ }
+ return (0);
+}
+
+/*
+ * Allow the buffer model to indicate that the current store buffer is
+ * immutable, regardless of the appearance of space. Return (1) if the
+ * buffer is writable, and (0) if not.
+ */
+static int
+bpf_canwritebuf(struct bpf_d *d)
+{
+
+ BPFD_LOCK_ASSERT(d);
+
+ switch (d->bd_bufmode) {
+ case BPF_BUFMODE_ZBUF:
+ return (bpf_zerocopy_canwritebuf(d));
+ }
+ return (1);
+}
+
+/*
+ * Notify buffer model that an attempt to write to the store buffer has
+ * resulted in a dropped packet, in which case the buffer may be considered
+ * full.
+ */
+static void
+bpf_buffull(struct bpf_d *d)
+{
+
+ BPFD_LOCK_ASSERT(d);
+
+ switch (d->bd_bufmode) {
+ case BPF_BUFMODE_ZBUF:
+ bpf_zerocopy_buffull(d);
+ break;
+ }
+}
+
+/*
+ * Notify the buffer model that a buffer has moved into the hold position.
+ */
+void
+bpf_bufheld(struct bpf_d *d)
+{
+
+ BPFD_LOCK_ASSERT(d);
+
+ switch (d->bd_bufmode) {
+ case BPF_BUFMODE_ZBUF:
+ bpf_zerocopy_bufheld(d);
+ break;
+ }
+}
+
+static void
+bpf_free(struct bpf_d *d)
+{
+
+ switch (d->bd_bufmode) {
+ case BPF_BUFMODE_BUFFER:
+ return (bpf_buffer_free(d));
+
+ case BPF_BUFMODE_ZBUF:
+ return (bpf_zerocopy_free(d));
+
+ default:
+ panic("bpf_buf_free");
+ }
+}
+
+static int
+bpf_uiomove(struct bpf_d *d, caddr_t buf, u_int len, struct uio *uio)
+{
+
+ if (d->bd_bufmode != BPF_BUFMODE_BUFFER)
+ return (EOPNOTSUPP);
+ return (bpf_buffer_uiomove(d, buf, len, uio));
+}
+
+static int
+bpf_ioctl_sblen(struct bpf_d *d, u_int *i)
+{
+
+ if (d->bd_bufmode != BPF_BUFMODE_BUFFER)
+ return (EOPNOTSUPP);
+ return (bpf_buffer_ioctl_sblen(d, i));
+}
+
+static int
+bpf_ioctl_getzmax(struct thread *td, struct bpf_d *d, size_t *i)
+{
+
+ if (d->bd_bufmode != BPF_BUFMODE_ZBUF)
+ return (EOPNOTSUPP);
+ return (bpf_zerocopy_ioctl_getzmax(td, d, i));
+}
+
+static int
+bpf_ioctl_rotzbuf(struct thread *td, struct bpf_d *d, struct bpf_zbuf *bz)
+{
+
+ if (d->bd_bufmode != BPF_BUFMODE_ZBUF)
+ return (EOPNOTSUPP);
+ return (bpf_zerocopy_ioctl_rotzbuf(td, d, bz));
+}
+
+static int
+bpf_ioctl_setzbuf(struct thread *td, struct bpf_d *d, struct bpf_zbuf *bz)
+{
+
+ if (d->bd_bufmode != BPF_BUFMODE_ZBUF)
+ return (EOPNOTSUPP);
+ return (bpf_zerocopy_ioctl_setzbuf(td, d, bz));
+}
+
+/*
+ * General BPF functions.
+ */
+static int
+bpf_movein(struct uio *uio, int linktype, struct ifnet *ifp, struct mbuf **mp,
+ struct sockaddr *sockp, int *hdrlen, struct bpf_insn *wfilter)
+{
+ const struct ieee80211_bpf_params *p;
+ struct ether_header *eh;
+ struct mbuf *m;
+ int error;
+ int len;
+ int hlen;
+ int slen;
+
+ /*
+ * Build a sockaddr based on the data link layer type.
+ * We do this at this level because the ethernet header
+ * is copied directly into the data field of the sockaddr.
+ * In the case of SLIP, there is no header and the packet
+ * is forwarded as is.
+ * Also, we are careful to leave room at the front of the mbuf
+ * for the link level header.
+ */
+ switch (linktype) {
+
+ case DLT_SLIP:
+ sockp->sa_family = AF_INET;
+ hlen = 0;
+ break;
+
+ case DLT_EN10MB:
+ sockp->sa_family = AF_UNSPEC;
+ /* XXX Would MAXLINKHDR be better? */
+ hlen = ETHER_HDR_LEN;
+ break;
+
+ case DLT_FDDI:
+ sockp->sa_family = AF_IMPLINK;
+ hlen = 0;
+ break;
+
+ case DLT_RAW:
+ sockp->sa_family = AF_UNSPEC;
+ hlen = 0;
+ break;
+
+ case DLT_NULL:
+ /*
+ * null interface types require a 4 byte pseudo header which
+ * corresponds to the address family of the packet.
+ */
+ sockp->sa_family = AF_UNSPEC;
+ hlen = 4;
+ break;
+
+ case DLT_ATM_RFC1483:
+ /*
+ * en atm driver requires 4-byte atm pseudo header.
+ * though it isn't standard, vpi:vci needs to be
+ * specified anyway.
+ */
+ sockp->sa_family = AF_UNSPEC;
+ hlen = 12; /* XXX 4(ATM_PH) + 3(LLC) + 5(SNAP) */
+ break;
+
+ case DLT_PPP:
+ sockp->sa_family = AF_UNSPEC;
+ hlen = 4; /* This should match PPP_HDRLEN */
+ break;
+
+ case DLT_IEEE802_11: /* IEEE 802.11 wireless */
+ sockp->sa_family = AF_IEEE80211;
+ hlen = 0;
+ break;
+
+ case DLT_IEEE802_11_RADIO: /* IEEE 802.11 wireless w/ phy params */
+ sockp->sa_family = AF_IEEE80211;
+ sockp->sa_len = 12; /* XXX != 0 */
+ hlen = sizeof(struct ieee80211_bpf_params);
+ break;
+
+ default:
+ return (EIO);
+ }
+
+ len = uio->uio_resid;
+
+ if (len - hlen > ifp->if_mtu)
+ return (EMSGSIZE);
+
+ if ((unsigned)len > MJUM16BYTES)
+ return (EIO);
+
+ if (len <= MHLEN)
+ MGETHDR(m, M_WAIT, MT_DATA);
+ else if (len <= MCLBYTES)
+ m = m_getcl(M_WAIT, MT_DATA, M_PKTHDR);
+ else
+ m = m_getjcl(M_WAIT, MT_DATA, M_PKTHDR,
+#if (MJUMPAGESIZE > MCLBYTES)
+ len <= MJUMPAGESIZE ? MJUMPAGESIZE :
+#endif
+ (len <= MJUM9BYTES ? MJUM9BYTES : MJUM16BYTES));
+ m->m_pkthdr.len = m->m_len = len;
+ m->m_pkthdr.rcvif = NULL;
+ *mp = m;
+
+ if (m->m_len < hlen) {
+ error = EPERM;
+ goto bad;
+ }
+
+ error = uiomove(mtod(m, u_char *), len, uio);
+ if (error)
+ goto bad;
+
+ slen = bpf_filter(wfilter, mtod(m, u_char *), len, len);
+ if (slen == 0) {
+ error = EPERM;
+ goto bad;
+ }
+
+ /* Check for multicast destination */
+ switch (linktype) {
+ case DLT_EN10MB:
+ eh = mtod(m, struct ether_header *);
+ if (ETHER_IS_MULTICAST(eh->ether_dhost)) {
+ if (bcmp(ifp->if_broadcastaddr, eh->ether_dhost,
+ ETHER_ADDR_LEN) == 0)
+ m->m_flags |= M_BCAST;
+ else
+ m->m_flags |= M_MCAST;
+ }
+ break;
+ }
+
+ /*
+ * Make room for link header, and copy it to sockaddr
+ */
+ if (hlen != 0) {
+ if (sockp->sa_family == AF_IEEE80211) {
+ /*
+ * Collect true length from the parameter header
+ * NB: sockp is known to be zero'd so if we do a
+ * short copy unspecified parameters will be
+ * zero.
+ * NB: packet may not be aligned after stripping
+ * bpf params
+ * XXX check ibp_vers
+ */
+ p = mtod(m, const struct ieee80211_bpf_params *);
+ hlen = p->ibp_len;
+ if (hlen > sizeof(sockp->sa_data)) {
+ error = EINVAL;
+ goto bad;
+ }
+ }
+ bcopy(m->m_data, sockp->sa_data, hlen);
+ }
+ *hdrlen = hlen;
+
+ return (0);
+bad:
+ m_freem(m);
+ return (error);
+}
+
+/*
+ * Attach file to the bpf interface, i.e. make d listen on bp.
+ */
+static void
+bpf_attachd(struct bpf_d *d, struct bpf_if *bp)
+{
+ /*
+ * Point d at bp, and add d to the interface's list of listeners.
+ * Finally, point the driver's bpf cookie at the interface so
+ * it will divert packets to bpf.
+ */
+ BPFIF_LOCK(bp);
+ d->bd_bif = bp;
+ LIST_INSERT_HEAD(&bp->bif_dlist, d, bd_next);
+
+ bpf_bpfd_cnt++;
+ BPFIF_UNLOCK(bp);
+
+ EVENTHANDLER_INVOKE(bpf_track, bp->bif_ifp, bp->bif_dlt, 1);
+}
+
+/*
+ * Detach a file from its interface.
+ */
+static void
+bpf_detachd(struct bpf_d *d)
+{
+ int error;
+ struct bpf_if *bp;
+ struct ifnet *ifp;
+
+ bp = d->bd_bif;
+ BPFIF_LOCK(bp);
+ BPFD_LOCK(d);
+ ifp = d->bd_bif->bif_ifp;
+
+ /*
+ * Remove d from the interface's descriptor list.
+ */
+ LIST_REMOVE(d, bd_next);
+
+ bpf_bpfd_cnt--;
+ d->bd_bif = NULL;
+ BPFD_UNLOCK(d);
+ BPFIF_UNLOCK(bp);
+
+ EVENTHANDLER_INVOKE(bpf_track, ifp, bp->bif_dlt, 0);
+
+ /*
+ * Check if this descriptor had requested promiscuous mode.
+ * If so, turn it off.
+ */
+ if (d->bd_promisc) {
+ d->bd_promisc = 0;
+ CURVNET_SET(ifp->if_vnet);
+ error = ifpromisc(ifp, 0);
+ CURVNET_RESTORE();
+ if (error != 0 && error != ENXIO) {
+ /*
+ * ENXIO can happen if a pccard is unplugged
+ * Something is really wrong if we were able to put
+ * the driver into promiscuous mode, but can't
+ * take it out.
+ */
+ if_printf(bp->bif_ifp,
+ "bpf_detach: ifpromisc failed (%d)\n", error);
+ }
+ }
+}
+
+/*
+ * Close the descriptor by detaching it from its interface,
+ * deallocating its buffers, and marking it free.
+ */
+static void
+bpf_dtor(void *data)
+{
+ struct bpf_d *d = data;
+
+ BPFD_LOCK(d);
+ if (d->bd_state == BPF_WAITING)
+ callout_stop(&d->bd_callout);
+ d->bd_state = BPF_IDLE;
+ BPFD_UNLOCK(d);
+ funsetown(&d->bd_sigio);
+ mtx_lock(&bpf_mtx);
+ if (d->bd_bif)
+ bpf_detachd(d);
+ mtx_unlock(&bpf_mtx);
+ selwakeuppri(&d->bd_sel, PRINET);
+#ifdef MAC
+ mac_bpfdesc_destroy(d);
+#endif /* MAC */
+ knlist_destroy(&d->bd_sel.si_note);
+ callout_drain(&d->bd_callout);
+ bpf_freed(d);
+ free(d, M_BPF);
+}
+
+/*
+ * Open ethernet device. Returns ENXIO for illegal minor device number,
+ * EBUSY if file is open by another process.
+ */
+/* ARGSUSED */
+static int
+bpfopen(struct cdev *dev, int flags, int fmt, struct thread *td)
+{
+ struct bpf_d *d;
+ int error;
+
+ d = malloc(sizeof(*d), M_BPF, M_WAITOK | M_ZERO);
+ error = devfs_set_cdevpriv(d, bpf_dtor);
+ if (error != 0) {
+ free(d, M_BPF);
+ return (error);
+ }
+
+ /*
+ * For historical reasons, perform a one-time initialization call to
+ * the buffer routines, even though we're not yet committed to a
+ * particular buffer method.
+ */
+ bpf_buffer_init(d);
+ d->bd_bufmode = BPF_BUFMODE_BUFFER;
+ d->bd_sig = SIGIO;
+ d->bd_direction = BPF_D_INOUT;
+ d->bd_pid = td->td_proc->p_pid;
+#ifdef MAC
+ mac_bpfdesc_init(d);
+ mac_bpfdesc_create(td->td_ucred, d);
+#endif
+ mtx_init(&d->bd_mtx, devtoname(dev), "bpf cdev lock", MTX_DEF);
+ callout_init_mtx(&d->bd_callout, &d->bd_mtx, 0);
+ knlist_init_mtx(&d->bd_sel.si_note, &d->bd_mtx);
+
+ return (0);
+}
+
+/*
+ * bpfread - read next chunk of packets from buffers
+ */
+static int
+bpfread(struct cdev *dev, struct uio *uio, int ioflag)
+{
+ struct bpf_d *d;
+ int error;
+ int non_block;
+ int timed_out;
+
+ error = devfs_get_cdevpriv((void **)&d);
+ if (error != 0)
+ return (error);
+
+ /*
+ * Restrict application to use a buffer the same size as
+ * as kernel buffers.
+ */
+ if (uio->uio_resid != d->bd_bufsize)
+ return (EINVAL);
+
+ non_block = ((ioflag & O_NONBLOCK) != 0);
+
+ BPFD_LOCK(d);
+ d->bd_pid = curthread->td_proc->p_pid;
+ if (d->bd_bufmode != BPF_BUFMODE_BUFFER) {
+ BPFD_UNLOCK(d);
+ return (EOPNOTSUPP);
+ }
+ if (d->bd_state == BPF_WAITING)
+ callout_stop(&d->bd_callout);
+ timed_out = (d->bd_state == BPF_TIMED_OUT);
+ d->bd_state = BPF_IDLE;
+ /*
+ * If the hold buffer is empty, then do a timed sleep, which
+ * ends when the timeout expires or when enough packets
+ * have arrived to fill the store buffer.
+ */
+ while (d->bd_hbuf == NULL) {
+ if (d->bd_slen != 0) {
+ /*
+ * A packet(s) either arrived since the previous
+ * read or arrived while we were asleep.
+ */
+ if (d->bd_immediate || non_block || timed_out) {
+ /*
+ * Rotate the buffers and return what's here
+ * if we are in immediate mode, non-blocking
+ * flag is set, or this descriptor timed out.
+ */
+ ROTATE_BUFFERS(d);
+ break;
+ }
+ }
+
+ /*
+ * No data is available, check to see if the bpf device
+ * is still pointed at a real interface. If not, return
+ * ENXIO so that the userland process knows to rebind
+ * it before using it again.
+ */
+ if (d->bd_bif == NULL) {
+ BPFD_UNLOCK(d);
+ return (ENXIO);
+ }
+
+ if (non_block) {
+ BPFD_UNLOCK(d);
+ return (EWOULDBLOCK);
+ }
+ error = msleep(d, &d->bd_mtx, PRINET|PCATCH,
+ "bpf", d->bd_rtout);
+ if (error == EINTR || error == ERESTART) {
+ BPFD_UNLOCK(d);
+ return (error);
+ }
+ if (error == EWOULDBLOCK) {
+ /*
+ * On a timeout, return what's in the buffer,
+ * which may be nothing. If there is something
+ * in the store buffer, we can rotate the buffers.
+ */
+ if (d->bd_hbuf)
+ /*
+ * We filled up the buffer in between
+ * getting the timeout and arriving
+ * here, so we don't need to rotate.
+ */
+ break;
+
+ if (d->bd_slen == 0) {
+ BPFD_UNLOCK(d);
+ return (0);
+ }
+ ROTATE_BUFFERS(d);
+ break;
+ }
+ }
+ /*
+ * At this point, we know we have something in the hold slot.
+ */
+ BPFD_UNLOCK(d);
+
+ /*
+ * Move data from hold buffer into user space.
+ * We know the entire buffer is transferred since
+ * we checked above that the read buffer is bpf_bufsize bytes.
+ *
+ * XXXRW: More synchronization needed here: what if a second thread
+ * issues a read on the same fd at the same time? Don't want this
+ * getting invalidated.
+ */
+ error = bpf_uiomove(d, d->bd_hbuf, d->bd_hlen, uio);
+
+ BPFD_LOCK(d);
+ d->bd_fbuf = d->bd_hbuf;
+ d->bd_hbuf = NULL;
+ d->bd_hlen = 0;
+ bpf_buf_reclaimed(d);
+ BPFD_UNLOCK(d);
+
+ return (error);
+}
+
+/*
+ * If there are processes sleeping on this descriptor, wake them up.
+ */
+static __inline void
+bpf_wakeup(struct bpf_d *d)
+{
+
+ BPFD_LOCK_ASSERT(d);
+ if (d->bd_state == BPF_WAITING) {
+ callout_stop(&d->bd_callout);
+ d->bd_state = BPF_IDLE;
+ }
+ wakeup(d);
+ if (d->bd_async && d->bd_sig && d->bd_sigio)
+ pgsigio(&d->bd_sigio, d->bd_sig, 0);
+
+ selwakeuppri(&d->bd_sel, PRINET);
+ KNOTE_LOCKED(&d->bd_sel.si_note, 0);
+}
+
+static void
+bpf_timed_out(void *arg)
+{
+ struct bpf_d *d = (struct bpf_d *)arg;
+
+ BPFD_LOCK_ASSERT(d);
+
+ if (callout_pending(&d->bd_callout) || !callout_active(&d->bd_callout))
+ return;
+ if (d->bd_state == BPF_WAITING) {
+ d->bd_state = BPF_TIMED_OUT;
+ if (d->bd_slen != 0)
+ bpf_wakeup(d);
+ }
+}
+
+static int
+bpf_ready(struct bpf_d *d)
+{
+
+ BPFD_LOCK_ASSERT(d);
+
+ if (!bpf_canfreebuf(d) && d->bd_hlen != 0)
+ return (1);
+ if ((d->bd_immediate || d->bd_state == BPF_TIMED_OUT) &&
+ d->bd_slen != 0)
+ return (1);
+ return (0);
+}
+
+static int
+bpfwrite(struct cdev *dev, struct uio *uio, int ioflag)
+{
+ struct bpf_d *d;
+ struct ifnet *ifp;
+ struct mbuf *m, *mc;
+ struct sockaddr dst;
+ int error, hlen;
+
+ error = devfs_get_cdevpriv((void **)&d);
+ if (error != 0)
+ return (error);
+
+ d->bd_pid = curthread->td_proc->p_pid;
+ d->bd_wcount++;
+ if (d->bd_bif == NULL) {
+ d->bd_wdcount++;
+ return (ENXIO);
+ }
+
+ ifp = d->bd_bif->bif_ifp;
+
+ if ((ifp->if_flags & IFF_UP) == 0) {
+ d->bd_wdcount++;
+ return (ENETDOWN);
+ }
+
+ if (uio->uio_resid == 0) {
+ d->bd_wdcount++;
+ return (0);
+ }
+
+ bzero(&dst, sizeof(dst));
+ m = NULL;
+ hlen = 0;
+ error = bpf_movein(uio, (int)d->bd_bif->bif_dlt, ifp,
+ &m, &dst, &hlen, d->bd_wfilter);
+ if (error) {
+ d->bd_wdcount++;
+ return (error);
+ }
+ d->bd_wfcount++;
+ if (d->bd_hdrcmplt)
+ dst.sa_family = pseudo_AF_HDRCMPLT;
+
+ if (d->bd_feedback) {
+ mc = m_dup(m, M_DONTWAIT);
+ if (mc != NULL)
+ mc->m_pkthdr.rcvif = ifp;
+ /* Set M_PROMISC for outgoing packets to be discarded. */
+ if (d->bd_direction == BPF_D_INOUT)
+ m->m_flags |= M_PROMISC;
+ } else
+ mc = NULL;
+
+ m->m_pkthdr.len -= hlen;
+ m->m_len -= hlen;
+ m->m_data += hlen; /* XXX */
+
+ CURVNET_SET(ifp->if_vnet);
+#ifdef MAC
+ BPFD_LOCK(d);
+ mac_bpfdesc_create_mbuf(d, m);
+ if (mc != NULL)
+ mac_bpfdesc_create_mbuf(d, mc);
+ BPFD_UNLOCK(d);
+#endif
+
+ error = (*ifp->if_output)(ifp, m, &dst, NULL);
+ if (error)
+ d->bd_wdcount++;
+
+ if (mc != NULL) {
+ if (error == 0)
+ (*ifp->if_input)(ifp, mc);
+ else
+ m_freem(mc);
+ }
+ CURVNET_RESTORE();
+
+ return (error);
+}
+
+/*
+ * Reset a descriptor by flushing its packet buffer and clearing the receive
+ * and drop counts. This is doable for kernel-only buffers, but with
+ * zero-copy buffers, we can't write to (or rotate) buffers that are
+ * currently owned by userspace. It would be nice if we could encapsulate
+ * this logic in the buffer code rather than here.
+ */
+static void
+reset_d(struct bpf_d *d)
+{
+
+ mtx_assert(&d->bd_mtx, MA_OWNED);
+
+ if ((d->bd_hbuf != NULL) &&
+ (d->bd_bufmode != BPF_BUFMODE_ZBUF || bpf_canfreebuf(d))) {
+ /* Free the hold buffer. */
+ d->bd_fbuf = d->bd_hbuf;
+ d->bd_hbuf = NULL;
+ d->bd_hlen = 0;
+ bpf_buf_reclaimed(d);
+ }
+ if (bpf_canwritebuf(d))
+ d->bd_slen = 0;
+ d->bd_rcount = 0;
+ d->bd_dcount = 0;
+ d->bd_fcount = 0;
+ d->bd_wcount = 0;
+ d->bd_wfcount = 0;
+ d->bd_wdcount = 0;
+ d->bd_zcopy = 0;
+}
+
+/*
+ * FIONREAD Check for read packet available.
+ * SIOCGIFADDR Get interface address - convenient hook to driver.
+ * BIOCGBLEN Get buffer len [for read()].
+ * BIOCSETF Set read filter.
+ * BIOCSETFNR Set read filter without resetting descriptor.
+ * BIOCSETWF Set write filter.
+ * BIOCFLUSH Flush read packet buffer.
+ * BIOCPROMISC Put interface into promiscuous mode.
+ * BIOCGDLT Get link layer type.
+ * BIOCGETIF Get interface name.
+ * BIOCSETIF Set interface.
+ * BIOCSRTIMEOUT Set read timeout.
+ * BIOCGRTIMEOUT Get read timeout.
+ * BIOCGSTATS Get packet stats.
+ * BIOCIMMEDIATE Set immediate mode.
+ * BIOCVERSION Get filter language version.
+ * BIOCGHDRCMPLT Get "header already complete" flag
+ * BIOCSHDRCMPLT Set "header already complete" flag
+ * BIOCGDIRECTION Get packet direction flag
+ * BIOCSDIRECTION Set packet direction flag
+ * BIOCLOCK Set "locked" flag
+ * BIOCFEEDBACK Set packet feedback mode.
+ * BIOCSETZBUF Set current zero-copy buffer locations.
+ * BIOCGETZMAX Get maximum zero-copy buffer size.
+ * BIOCROTZBUF Force rotation of zero-copy buffer
+ * BIOCSETBUFMODE Set buffer mode.
+ * BIOCGETBUFMODE Get current buffer mode.
+ */
+/* ARGSUSED */
+static int
+bpfioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flags,
+ struct thread *td)
+{
+ struct bpf_d *d;
+ int error;
+
+ error = devfs_get_cdevpriv((void **)&d);
+ if (error != 0)
+ return (error);
+
+ /*
+ * Refresh PID associated with this descriptor.
+ */
+ BPFD_LOCK(d);
+ d->bd_pid = td->td_proc->p_pid;
+ if (d->bd_state == BPF_WAITING)
+ callout_stop(&d->bd_callout);
+ d->bd_state = BPF_IDLE;
+ BPFD_UNLOCK(d);
+
+ if (d->bd_locked == 1) {
+ switch (cmd) {
+ case BIOCGBLEN:
+ case BIOCFLUSH:
+ case BIOCGDLT:
+ case BIOCGDLTLIST:
+#ifdef COMPAT_FREEBSD32
+ case BIOCGDLTLIST32:
+#endif
+ case BIOCGETIF:
+ case BIOCGRTIMEOUT:
+#ifdef COMPAT_FREEBSD32
+ case BIOCGRTIMEOUT32:
+#endif
+ case BIOCGSTATS:
+ case BIOCVERSION:
+ case BIOCGRSIG:
+ case BIOCGHDRCMPLT:
+ case BIOCFEEDBACK:
+ case FIONREAD:
+ case BIOCLOCK:
+ case BIOCSRTIMEOUT:
+#ifdef COMPAT_FREEBSD32
+ case BIOCSRTIMEOUT32:
+#endif
+ case BIOCIMMEDIATE:
+ case TIOCGPGRP:
+ case BIOCROTZBUF:
+ break;
+ default:
+ return (EPERM);
+ }
+ }
+#ifdef COMPAT_FREEBSD32
+ /*
+ * If we see a 32-bit compat ioctl, mark the stream as 32-bit so
+ * that it will get 32-bit packet headers.
+ */
+ switch (cmd) {
+ case BIOCSETF32:
+ case BIOCSETFNR32:
+ case BIOCSETWF32:
+ case BIOCGDLTLIST32:
+ case BIOCGRTIMEOUT32:
+ case BIOCSRTIMEOUT32:
+ d->bd_compat32 = 1;
+ }
+#endif
+
+ CURVNET_SET(TD_TO_VNET(td));
+ switch (cmd) {
+
+ default:
+ error = EINVAL;
+ break;
+
+ /*
+ * Check for read packet available.
+ */
+ case FIONREAD:
+ {
+ int n;
+
+ BPFD_LOCK(d);
+ n = d->bd_slen;
+ if (d->bd_hbuf)
+ n += d->bd_hlen;
+ BPFD_UNLOCK(d);
+
+ *(int *)addr = n;
+ break;
+ }
+
+ case SIOCGIFADDR:
+ {
+ struct ifnet *ifp;
+
+ if (d->bd_bif == NULL)
+ error = EINVAL;
+ else {
+ ifp = d->bd_bif->bif_ifp;
+ error = (*ifp->if_ioctl)(ifp, cmd, addr);
+ }
+ break;
+ }
+
+ /*
+ * Get buffer len [for read()].
+ */
+ case BIOCGBLEN:
+ *(u_int *)addr = d->bd_bufsize;
+ break;
+
+ /*
+ * Set buffer length.
+ */
+ case BIOCSBLEN:
+ error = bpf_ioctl_sblen(d, (u_int *)addr);
+ break;
+
+ /*
+ * Set link layer read filter.
+ */
+ case BIOCSETF:
+ case BIOCSETFNR:
+ case BIOCSETWF:
+#ifdef COMPAT_FREEBSD32
+ case BIOCSETF32:
+ case BIOCSETFNR32:
+ case BIOCSETWF32:
+#endif
+ error = bpf_setf(d, (struct bpf_program *)addr, cmd);
+ break;
+
+ /*
+ * Flush read packet buffer.
+ */
+ case BIOCFLUSH:
+ BPFD_LOCK(d);
+ reset_d(d);
+ BPFD_UNLOCK(d);
+ break;
+
+ /*
+ * Put interface into promiscuous mode.
+ */
+ case BIOCPROMISC:
+ if (d->bd_bif == NULL) {
+ /*
+ * No interface attached yet.
+ */
+ error = EINVAL;
+ break;
+ }
+ if (d->bd_promisc == 0) {
+ error = ifpromisc(d->bd_bif->bif_ifp, 1);
+ if (error == 0)
+ d->bd_promisc = 1;
+ }
+ break;
+
+ /*
+ * Get current data link type.
+ */
+ case BIOCGDLT:
+ if (d->bd_bif == NULL)
+ error = EINVAL;
+ else
+ *(u_int *)addr = d->bd_bif->bif_dlt;
+ break;
+
+ /*
+ * Get a list of supported data link types.
+ */
+#ifdef COMPAT_FREEBSD32
+ case BIOCGDLTLIST32:
+ {
+ struct bpf_dltlist32 *list32;
+ struct bpf_dltlist dltlist;
+
+ list32 = (struct bpf_dltlist32 *)addr;
+ dltlist.bfl_len = list32->bfl_len;
+ dltlist.bfl_list = PTRIN(list32->bfl_list);
+ if (d->bd_bif == NULL)
+ error = EINVAL;
+ else {
+ error = bpf_getdltlist(d, &dltlist);
+ if (error == 0)
+ list32->bfl_len = dltlist.bfl_len;
+ }
+ break;
+ }
+#endif
+
+ case BIOCGDLTLIST:
+ if (d->bd_bif == NULL)
+ error = EINVAL;
+ else
+ error = bpf_getdltlist(d, (struct bpf_dltlist *)addr);
+ break;
+
+ /*
+ * Set data link type.
+ */
+ case BIOCSDLT:
+ if (d->bd_bif == NULL)
+ error = EINVAL;
+ else
+ error = bpf_setdlt(d, *(u_int *)addr);
+ break;
+
+ /*
+ * Get interface name.
+ */
+ case BIOCGETIF:
+ if (d->bd_bif == NULL)
+ error = EINVAL;
+ else {
+ struct ifnet *const ifp = d->bd_bif->bif_ifp;
+ struct ifreq *const ifr = (struct ifreq *)addr;
+
+ strlcpy(ifr->ifr_name, ifp->if_xname,
+ sizeof(ifr->ifr_name));
+ }
+ break;
+
+ /*
+ * Set interface.
+ */
+ case BIOCSETIF:
+ error = bpf_setif(d, (struct ifreq *)addr);
+ break;
+
+ /*
+ * Set read timeout.
+ */
+ case BIOCSRTIMEOUT:
+#ifdef COMPAT_FREEBSD32
+ case BIOCSRTIMEOUT32:
+#endif
+ {
+ struct timeval *tv = (struct timeval *)addr;
+#ifdef COMPAT_FREEBSD32
+ struct timeval32 *tv32;
+ struct timeval tv64;
+
+ if (cmd == BIOCSRTIMEOUT32) {
+ tv32 = (struct timeval32 *)addr;
+ tv = &tv64;
+ tv->tv_sec = tv32->tv_sec;
+ tv->tv_usec = tv32->tv_usec;
+ } else
+#endif
+ tv = (struct timeval *)addr;
+
+ /*
+ * Subtract 1 tick from tvtohz() since this isn't
+ * a one-shot timer.
+ */
+ if ((error = itimerfix(tv)) == 0)
+ d->bd_rtout = tvtohz(tv) - 1;
+ break;
+ }
+
+ /*
+ * Get read timeout.
+ */
+ case BIOCGRTIMEOUT:
+#ifdef COMPAT_FREEBSD32
+ case BIOCGRTIMEOUT32:
+#endif
+ {
+ struct timeval *tv;
+#ifdef COMPAT_FREEBSD32
+ struct timeval32 *tv32;
+ struct timeval tv64;
+
+ if (cmd == BIOCGRTIMEOUT32)
+ tv = &tv64;
+ else
+#endif
+ tv = (struct timeval *)addr;
+
+ tv->tv_sec = d->bd_rtout / hz;
+ tv->tv_usec = (d->bd_rtout % hz) * tick;
+#ifdef COMPAT_FREEBSD32
+ if (cmd == BIOCGRTIMEOUT32) {
+ tv32 = (struct timeval32 *)addr;
+ tv32->tv_sec = tv->tv_sec;
+ tv32->tv_usec = tv->tv_usec;
+ }
+#endif
+
+ break;
+ }
+
+ /*
+ * Get packet stats.
+ */
+ case BIOCGSTATS:
+ {
+ struct bpf_stat *bs = (struct bpf_stat *)addr;
+
+ /* XXXCSJP overflow */
+ bs->bs_recv = d->bd_rcount;
+ bs->bs_drop = d->bd_dcount;
+ break;
+ }
+
+ /*
+ * Set immediate mode.
+ */
+ case BIOCIMMEDIATE:
+ d->bd_immediate = *(u_int *)addr;
+ break;
+
+ case BIOCVERSION:
+ {
+ struct bpf_version *bv = (struct bpf_version *)addr;
+
+ bv->bv_major = BPF_MAJOR_VERSION;
+ bv->bv_minor = BPF_MINOR_VERSION;
+ break;
+ }
+
+ /*
+ * Get "header already complete" flag
+ */
+ case BIOCGHDRCMPLT:
+ *(u_int *)addr = d->bd_hdrcmplt;
+ break;
+
+ /*
+ * Set "header already complete" flag
+ */
+ case BIOCSHDRCMPLT:
+ d->bd_hdrcmplt = *(u_int *)addr ? 1 : 0;
+ break;
+
+ /*
+ * Get packet direction flag
+ */
+ case BIOCGDIRECTION:
+ *(u_int *)addr = d->bd_direction;
+ break;
+
+ /*
+ * Set packet direction flag
+ */
+ case BIOCSDIRECTION:
+ {
+ u_int direction;
+
+ direction = *(u_int *)addr;
+ switch (direction) {
+ case BPF_D_IN:
+ case BPF_D_INOUT:
+ case BPF_D_OUT:
+ d->bd_direction = direction;
+ break;
+ default:
+ error = EINVAL;
+ }
+ }
+ break;
+
+ case BIOCFEEDBACK:
+ d->bd_feedback = *(u_int *)addr;
+ break;
+
+ case BIOCLOCK:
+ d->bd_locked = 1;
+ break;
+
+ case FIONBIO: /* Non-blocking I/O */
+ break;
+
+ case FIOASYNC: /* Send signal on receive packets */
+ d->bd_async = *(int *)addr;
+ break;
+
+ case FIOSETOWN:
+ error = fsetown(*(int *)addr, &d->bd_sigio);
+ break;
+
+ case FIOGETOWN:
+ *(int *)addr = fgetown(&d->bd_sigio);
+ break;
+
+ /* This is deprecated, FIOSETOWN should be used instead. */
+ case TIOCSPGRP:
+ error = fsetown(-(*(int *)addr), &d->bd_sigio);
+ break;
+
+ /* This is deprecated, FIOGETOWN should be used instead. */
+ case TIOCGPGRP:
+ *(int *)addr = -fgetown(&d->bd_sigio);
+ break;
+
+ case BIOCSRSIG: /* Set receive signal */
+ {
+ u_int sig;
+
+ sig = *(u_int *)addr;
+
+ if (sig >= NSIG)
+ error = EINVAL;
+ else
+ d->bd_sig = sig;
+ break;
+ }
+ case BIOCGRSIG:
+ *(u_int *)addr = d->bd_sig;
+ break;
+
+ case BIOCGETBUFMODE:
+ *(u_int *)addr = d->bd_bufmode;
+ break;
+
+ case BIOCSETBUFMODE:
+ /*
+ * Allow the buffering mode to be changed as long as we
+ * haven't yet committed to a particular mode. Our
+ * definition of commitment, for now, is whether or not a
+ * buffer has been allocated or an interface attached, since
+ * that's the point where things get tricky.
+ */
+ switch (*(u_int *)addr) {
+ case BPF_BUFMODE_BUFFER:
+ break;
+
+ case BPF_BUFMODE_ZBUF:
+ if (bpf_zerocopy_enable)
+ break;
+ /* FALLSTHROUGH */
+
+ default:
+ CURVNET_RESTORE();
+ return (EINVAL);
+ }
+
+ BPFD_LOCK(d);
+ if (d->bd_sbuf != NULL || d->bd_hbuf != NULL ||
+ d->bd_fbuf != NULL || d->bd_bif != NULL) {
+ BPFD_UNLOCK(d);
+ CURVNET_RESTORE();
+ return (EBUSY);
+ }
+ d->bd_bufmode = *(u_int *)addr;
+ BPFD_UNLOCK(d);
+ break;
+
+ case BIOCGETZMAX:
+ error = bpf_ioctl_getzmax(td, d, (size_t *)addr);
+ break;
+
+ case BIOCSETZBUF:
+ error = bpf_ioctl_setzbuf(td, d, (struct bpf_zbuf *)addr);
+ break;
+
+ case BIOCROTZBUF:
+ error = bpf_ioctl_rotzbuf(td, d, (struct bpf_zbuf *)addr);
+ break;
+ }
+ CURVNET_RESTORE();
+ return (error);
+}
+
+/*
+ * Set d's packet filter program to fp. If this file already has a filter,
+ * free it and replace it. Returns EINVAL for bogus requests.
+ */
+static int
+bpf_setf(struct bpf_d *d, struct bpf_program *fp, u_long cmd)
+{
+ struct bpf_insn *fcode, *old;
+ u_int wfilter, flen, size;
+#ifdef BPF_JITTER
+ bpf_jit_filter *ofunc;
+#endif
+#ifdef COMPAT_FREEBSD32
+ struct bpf_program32 *fp32;
+ struct bpf_program fp_swab;
+
+ if (cmd == BIOCSETWF32 || cmd == BIOCSETF32 || cmd == BIOCSETFNR32) {
+ fp32 = (struct bpf_program32 *)fp;
+ fp_swab.bf_len = fp32->bf_len;
+ fp_swab.bf_insns = (struct bpf_insn *)(uintptr_t)fp32->bf_insns;
+ fp = &fp_swab;
+ if (cmd == BIOCSETWF32)
+ cmd = BIOCSETWF;
+ }
+#endif
+ if (cmd == BIOCSETWF) {
+ old = d->bd_wfilter;
+ wfilter = 1;
+#ifdef BPF_JITTER
+ ofunc = NULL;
+#endif
+ } else {
+ wfilter = 0;
+ old = d->bd_rfilter;
+#ifdef BPF_JITTER
+ ofunc = d->bd_bfilter;
+#endif
+ }
+ if (fp->bf_insns == NULL) {
+ if (fp->bf_len != 0)
+ return (EINVAL);
+ BPFD_LOCK(d);
+ if (wfilter)
+ d->bd_wfilter = NULL;
+ else {
+ d->bd_rfilter = NULL;
+#ifdef BPF_JITTER
+ d->bd_bfilter = NULL;
+#endif
+ if (cmd == BIOCSETF)
+ reset_d(d);
+ }
+ BPFD_UNLOCK(d);
+ if (old != NULL)
+ free((caddr_t)old, M_BPF);
+#ifdef BPF_JITTER
+ if (ofunc != NULL)
+ bpf_destroy_jit_filter(ofunc);
+#endif
+ return (0);
+ }
+ flen = fp->bf_len;
+ if (flen > bpf_maxinsns)
+ return (EINVAL);
+
+ size = flen * sizeof(*fp->bf_insns);
+ fcode = (struct bpf_insn *)malloc(size, M_BPF, M_WAITOK);
+ if (copyin((caddr_t)fp->bf_insns, (caddr_t)fcode, size) == 0 &&
+ bpf_validate(fcode, (int)flen)) {
+ BPFD_LOCK(d);
+ if (wfilter)
+ d->bd_wfilter = fcode;
+ else {
+ d->bd_rfilter = fcode;
+#ifdef BPF_JITTER
+ d->bd_bfilter = bpf_jitter(fcode, flen);
+#endif
+ if (cmd == BIOCSETF)
+ reset_d(d);
+ }
+ BPFD_UNLOCK(d);
+ if (old != NULL)
+ free((caddr_t)old, M_BPF);
+#ifdef BPF_JITTER
+ if (ofunc != NULL)
+ bpf_destroy_jit_filter(ofunc);
+#endif
+
+ return (0);
+ }
+ free((caddr_t)fcode, M_BPF);
+ return (EINVAL);
+}
+
+/*
+ * Detach a file from its current interface (if attached at all) and attach
+ * to the interface indicated by the name stored in ifr.
+ * Return an errno or 0.
+ */
+static int
+bpf_setif(struct bpf_d *d, struct ifreq *ifr)
+{
+ struct bpf_if *bp;
+ struct ifnet *theywant;
+
+ theywant = ifunit(ifr->ifr_name);
+ if (theywant == NULL || theywant->if_bpf == NULL)
+ return (ENXIO);
+
+ bp = theywant->if_bpf;
+
+ /*
+ * Behavior here depends on the buffering model. If we're using
+ * kernel memory buffers, then we can allocate them here. If we're
+ * using zero-copy, then the user process must have registered
+ * buffers by the time we get here. If not, return an error.
+ *
+ * XXXRW: There are locking issues here with multi-threaded use: what
+ * if two threads try to set the interface at once?
+ */
+ switch (d->bd_bufmode) {
+ case BPF_BUFMODE_BUFFER:
+ if (d->bd_sbuf == NULL)
+ bpf_buffer_alloc(d);
+ KASSERT(d->bd_sbuf != NULL, ("bpf_setif: bd_sbuf NULL"));
+ break;
+
+ case BPF_BUFMODE_ZBUF:
+ if (d->bd_sbuf == NULL)
+ return (EINVAL);
+ break;
+
+ default:
+ panic("bpf_setif: bufmode %d", d->bd_bufmode);
+ }
+ if (bp != d->bd_bif) {
+ if (d->bd_bif)
+ /*
+ * Detach if attached to something else.
+ */
+ bpf_detachd(d);
+
+ bpf_attachd(d, bp);
+ }
+ BPFD_LOCK(d);
+ reset_d(d);
+ BPFD_UNLOCK(d);
+ return (0);
+}
+
+/*
+ * Support for select() and poll() system calls
+ *
+ * Return true iff the specific operation will not block indefinitely.
+ * Otherwise, return false but make a note that a selwakeup() must be done.
+ */
+static int
+bpfpoll(struct cdev *dev, int events, struct thread *td)
+{
+ struct bpf_d *d;
+ int revents;
+
+ if (devfs_get_cdevpriv((void **)&d) != 0 || d->bd_bif == NULL)
+ return (events &
+ (POLLHUP|POLLIN|POLLRDNORM|POLLOUT|POLLWRNORM));
+
+ /*
+ * Refresh PID associated with this descriptor.
+ */
+ revents = events & (POLLOUT | POLLWRNORM);
+ BPFD_LOCK(d);
+ d->bd_pid = td->td_proc->p_pid;
+ if (events & (POLLIN | POLLRDNORM)) {
+ if (bpf_ready(d))
+ revents |= events & (POLLIN | POLLRDNORM);
+ else {
+ selrecord(td, &d->bd_sel);
+ /* Start the read timeout if necessary. */
+ if (d->bd_rtout > 0 && d->bd_state == BPF_IDLE) {
+ callout_reset(&d->bd_callout, d->bd_rtout,
+ bpf_timed_out, d);
+ d->bd_state = BPF_WAITING;
+ }
+ }
+ }
+ BPFD_UNLOCK(d);
+ return (revents);
+}
+
+/*
+ * Support for kevent() system call. Register EVFILT_READ filters and
+ * reject all others.
+ */
+int
+bpfkqfilter(struct cdev *dev, struct knote *kn)
+{
+ struct bpf_d *d;
+
+ if (devfs_get_cdevpriv((void **)&d) != 0 ||
+ kn->kn_filter != EVFILT_READ)
+ return (1);
+
+ /*
+ * Refresh PID associated with this descriptor.
+ */
+ BPFD_LOCK(d);
+ d->bd_pid = curthread->td_proc->p_pid;
+ kn->kn_fop = &bpfread_filtops;
+ kn->kn_hook = d;
+ knlist_add(&d->bd_sel.si_note, kn, 1);
+ BPFD_UNLOCK(d);
+
+ return (0);
+}
+
+static void
+filt_bpfdetach(struct knote *kn)
+{
+ struct bpf_d *d = (struct bpf_d *)kn->kn_hook;
+
+ knlist_remove(&d->bd_sel.si_note, kn, 0);
+}
+
+static int
+filt_bpfread(struct knote *kn, long hint)
+{
+ struct bpf_d *d = (struct bpf_d *)kn->kn_hook;
+ int ready;
+
+ BPFD_LOCK_ASSERT(d);
+ ready = bpf_ready(d);
+ if (ready) {
+ kn->kn_data = d->bd_slen;
+ if (d->bd_hbuf)
+ kn->kn_data += d->bd_hlen;
+ } else if (d->bd_rtout > 0 && d->bd_state == BPF_IDLE) {
+ callout_reset(&d->bd_callout, d->bd_rtout,
+ bpf_timed_out, d);
+ d->bd_state = BPF_WAITING;
+ }
+
+ return (ready);
+}
+
+/*
+ * Incoming linkage from device drivers. Process the packet pkt, of length
+ * pktlen, which is stored in a contiguous buffer. The packet is parsed
+ * by each process' filter, and if accepted, stashed into the corresponding
+ * buffer.
+ */
+void
+bpf_tap(struct bpf_if *bp, u_char *pkt, u_int pktlen)
+{
+ struct bpf_d *d;
+#ifdef BPF_JITTER
+ bpf_jit_filter *bf;
+#endif
+ u_int slen;
+ int gottime;
+ struct timeval tv;
+
+ gottime = 0;
+ BPFIF_LOCK(bp);
+ LIST_FOREACH(d, &bp->bif_dlist, bd_next) {
+ BPFD_LOCK(d);
+ ++d->bd_rcount;
+ /*
+ * NB: We dont call BPF_CHECK_DIRECTION() here since there is no
+ * way for the caller to indiciate to us whether this packet
+ * is inbound or outbound. In the bpf_mtap() routines, we use
+ * the interface pointers on the mbuf to figure it out.
+ */
+#ifdef BPF_JITTER
+ bf = bpf_jitter_enable != 0 ? d->bd_bfilter : NULL;
+ if (bf != NULL)
+ slen = (*(bf->func))(pkt, pktlen, pktlen);
+ else
+#endif
+ slen = bpf_filter(d->bd_rfilter, pkt, pktlen, pktlen);
+ if (slen != 0) {
+ d->bd_fcount++;
+ if (!gottime) {
+ microtime(&tv);
+ gottime = 1;
+ }
+#ifdef MAC
+ if (mac_bpfdesc_check_receive(d, bp->bif_ifp) == 0)
+#endif
+ catchpacket(d, pkt, pktlen, slen,
+ bpf_append_bytes, &tv);
+ }
+ BPFD_UNLOCK(d);
+ }
+ BPFIF_UNLOCK(bp);
+}
+
+#define BPF_CHECK_DIRECTION(d, r, i) \
+ (((d)->bd_direction == BPF_D_IN && (r) != (i)) || \
+ ((d)->bd_direction == BPF_D_OUT && (r) == (i)))
+
+/*
+ * Incoming linkage from device drivers, when packet is in an mbuf chain.
+ */
+void
+bpf_mtap(struct bpf_if *bp, struct mbuf *m)
+{
+ struct bpf_d *d;
+#ifdef BPF_JITTER
+ bpf_jit_filter *bf;
+#endif
+ u_int pktlen, slen;
+ int gottime;
+ struct timeval tv;
+
+ /* Skip outgoing duplicate packets. */
+ if ((m->m_flags & M_PROMISC) != 0 && m->m_pkthdr.rcvif == NULL) {
+ m->m_flags &= ~M_PROMISC;
+ return;
+ }
+
+ gottime = 0;
+
+ pktlen = m_length(m, NULL);
+
+ BPFIF_LOCK(bp);
+ LIST_FOREACH(d, &bp->bif_dlist, bd_next) {
+ if (BPF_CHECK_DIRECTION(d, m->m_pkthdr.rcvif, bp->bif_ifp))
+ continue;
+ BPFD_LOCK(d);
+ ++d->bd_rcount;
+#ifdef BPF_JITTER
+ bf = bpf_jitter_enable != 0 ? d->bd_bfilter : NULL;
+ /* XXX We cannot handle multiple mbufs. */
+ if (bf != NULL && m->m_next == NULL)
+ slen = (*(bf->func))(mtod(m, u_char *), pktlen, pktlen);
+ else
+#endif
+ slen = bpf_filter(d->bd_rfilter, (u_char *)m, pktlen, 0);
+ if (slen != 0) {
+ d->bd_fcount++;
+ if (!gottime) {
+ microtime(&tv);
+ gottime = 1;
+ }
+#ifdef MAC
+ if (mac_bpfdesc_check_receive(d, bp->bif_ifp) == 0)
+#endif
+ catchpacket(d, (u_char *)m, pktlen, slen,
+ bpf_append_mbuf, &tv);
+ }
+ BPFD_UNLOCK(d);
+ }
+ BPFIF_UNLOCK(bp);
+}
+
+/*
+ * Incoming linkage from device drivers, when packet is in
+ * an mbuf chain and to be prepended by a contiguous header.
+ */
+void
+bpf_mtap2(struct bpf_if *bp, void *data, u_int dlen, struct mbuf *m)
+{
+ struct mbuf mb;
+ struct bpf_d *d;
+ u_int pktlen, slen;
+ int gottime;
+ struct timeval tv;
+
+ /* Skip outgoing duplicate packets. */
+ if ((m->m_flags & M_PROMISC) != 0 && m->m_pkthdr.rcvif == NULL) {
+ m->m_flags &= ~M_PROMISC;
+ return;
+ }
+
+ gottime = 0;
+
+ pktlen = m_length(m, NULL);
+ /*
+ * Craft on-stack mbuf suitable for passing to bpf_filter.
+ * Note that we cut corners here; we only setup what's
+ * absolutely needed--this mbuf should never go anywhere else.
+ */
+ mb.m_next = m;
+ mb.m_data = data;
+ mb.m_len = dlen;
+ pktlen += dlen;
+
+ BPFIF_LOCK(bp);
+ LIST_FOREACH(d, &bp->bif_dlist, bd_next) {
+ if (BPF_CHECK_DIRECTION(d, m->m_pkthdr.rcvif, bp->bif_ifp))
+ continue;
+ BPFD_LOCK(d);
+ ++d->bd_rcount;
+ slen = bpf_filter(d->bd_rfilter, (u_char *)&mb, pktlen, 0);
+ if (slen != 0) {
+ d->bd_fcount++;
+ if (!gottime) {
+ microtime(&tv);
+ gottime = 1;
+ }
+#ifdef MAC
+ if (mac_bpfdesc_check_receive(d, bp->bif_ifp) == 0)
+#endif
+ catchpacket(d, (u_char *)&mb, pktlen, slen,
+ bpf_append_mbuf, &tv);
+ }
+ BPFD_UNLOCK(d);
+ }
+ BPFIF_UNLOCK(bp);
+}
+
+#undef BPF_CHECK_DIRECTION
+
+/*
+ * Move the packet data from interface memory (pkt) into the
+ * store buffer. "cpfn" is the routine called to do the actual data
+ * transfer. bcopy is passed in to copy contiguous chunks, while
+ * bpf_append_mbuf is passed in to copy mbuf chains. In the latter case,
+ * pkt is really an mbuf.
+ */
+static void
+catchpacket(struct bpf_d *d, u_char *pkt, u_int pktlen, u_int snaplen,
+ void (*cpfn)(struct bpf_d *, caddr_t, u_int, void *, u_int),
+ struct timeval *tv)
+{
+ struct bpf_hdr hdr;
+#ifdef COMPAT_FREEBSD32
+ struct bpf_hdr32 hdr32;
+#endif
+ int totlen, curlen;
+ int hdrlen = d->bd_bif->bif_hdrlen;
+ int do_wakeup = 0;
+
+ BPFD_LOCK_ASSERT(d);
+
+ /*
+ * Detect whether user space has released a buffer back to us, and if
+ * so, move it from being a hold buffer to a free buffer. This may
+ * not be the best place to do it (for example, we might only want to
+ * run this check if we need the space), but for now it's a reliable
+ * spot to do it.
+ */
+ if (d->bd_fbuf == NULL && bpf_canfreebuf(d)) {
+ d->bd_fbuf = d->bd_hbuf;
+ d->bd_hbuf = NULL;
+ d->bd_hlen = 0;
+ bpf_buf_reclaimed(d);
+ }
+
+ /*
+ * Figure out how many bytes to move. If the packet is
+ * greater or equal to the snapshot length, transfer that
+ * much. Otherwise, transfer the whole packet (unless
+ * we hit the buffer size limit).
+ */
+ totlen = hdrlen + min(snaplen, pktlen);
+ if (totlen > d->bd_bufsize)
+ totlen = d->bd_bufsize;
+
+ /*
+ * Round up the end of the previous packet to the next longword.
+ *
+ * Drop the packet if there's no room and no hope of room
+ * If the packet would overflow the storage buffer or the storage
+ * buffer is considered immutable by the buffer model, try to rotate
+ * the buffer and wakeup pending processes.
+ */
+#ifdef COMPAT_FREEBSD32
+ if (d->bd_compat32)
+ curlen = BPF_WORDALIGN32(d->bd_slen);
+ else
+#endif
+ curlen = BPF_WORDALIGN(d->bd_slen);
+ if (curlen + totlen > d->bd_bufsize || !bpf_canwritebuf(d)) {
+ if (d->bd_fbuf == NULL) {
+ /*
+ * There's no room in the store buffer, and no
+ * prospect of room, so drop the packet. Notify the
+ * buffer model.
+ */
+ bpf_buffull(d);
+ ++d->bd_dcount;
+ return;
+ }
+ ROTATE_BUFFERS(d);
+ do_wakeup = 1;
+ curlen = 0;
+ } else if (d->bd_immediate || d->bd_state == BPF_TIMED_OUT)
+ /*
+ * Immediate mode is set, or the read timeout has already
+ * expired during a select call. A packet arrived, so the
+ * reader should be woken up.
+ */
+ do_wakeup = 1;
+#ifdef COMPAT_FREEBSD32
+ /*
+ * If this is a 32-bit stream, then stick a 32-bit header at the
+ * front and copy the data into the buffer.
+ */
+ if (d->bd_compat32) {
+ bzero(&hdr32, sizeof(hdr32));
+ hdr32.bh_tstamp.tv_sec = tv->tv_sec;
+ hdr32.bh_tstamp.tv_usec = tv->tv_usec;
+ hdr32.bh_datalen = pktlen;
+ hdr32.bh_hdrlen = hdrlen;
+ hdr.bh_caplen = hdr32.bh_caplen = totlen - hdrlen;
+ bpf_append_bytes(d, d->bd_sbuf, curlen, &hdr32, sizeof(hdr32));
+ goto copy;
+ }
+#endif
+
+ /*
+ * Append the bpf header. Note we append the actual header size, but
+ * move forward the length of the header plus padding.
+ */
+ bzero(&hdr, sizeof(hdr));
+ hdr.bh_tstamp = *tv;
+ hdr.bh_datalen = pktlen;
+ hdr.bh_hdrlen = hdrlen;
+ hdr.bh_caplen = totlen - hdrlen;
+ bpf_append_bytes(d, d->bd_sbuf, curlen, &hdr, sizeof(hdr));
+
+ /*
+ * Copy the packet data into the store buffer and update its length.
+ */
+#ifdef COMPAT_FREEBSD32
+ copy:
+#endif
+ (*cpfn)(d, d->bd_sbuf, curlen + hdrlen, pkt, hdr.bh_caplen);
+ d->bd_slen = curlen + totlen;
+
+ if (do_wakeup)
+ bpf_wakeup(d);
+}
+
+/*
+ * Free buffers currently in use by a descriptor.
+ * Called on close.
+ */
+static void
+bpf_freed(struct bpf_d *d)
+{
+
+ /*
+ * We don't need to lock out interrupts since this descriptor has
+ * been detached from its interface and it yet hasn't been marked
+ * free.
+ */
+ bpf_free(d);
+ if (d->bd_rfilter != NULL) {
+ free((caddr_t)d->bd_rfilter, M_BPF);
+#ifdef BPF_JITTER
+ if (d->bd_bfilter != NULL)
+ bpf_destroy_jit_filter(d->bd_bfilter);
+#endif
+ }
+ if (d->bd_wfilter != NULL)
+ free((caddr_t)d->bd_wfilter, M_BPF);
+ mtx_destroy(&d->bd_mtx);
+}
+
+/*
+ * Attach an interface to bpf. dlt is the link layer type; hdrlen is the
+ * fixed size of the link header (variable length headers not yet supported).
+ */
+void
+bpfattach(struct ifnet *ifp, u_int dlt, u_int hdrlen)
+{
+
+ bpfattach2(ifp, dlt, hdrlen, &ifp->if_bpf);
+}
+
+/*
+ * Attach an interface to bpf. ifp is a pointer to the structure
+ * defining the interface to be attached, dlt is the link layer type,
+ * and hdrlen is the fixed size of the link header (variable length
+ * headers are not yet supporrted).
+ */
+void
+bpfattach2(struct ifnet *ifp, u_int dlt, u_int hdrlen, struct bpf_if **driverp)
+{
+ struct bpf_if *bp;
+
+ bp = malloc(sizeof(*bp), M_BPF, M_NOWAIT | M_ZERO);
+ if (bp == NULL)
+ panic("bpfattach");
+
+ LIST_INIT(&bp->bif_dlist);
+ bp->bif_ifp = ifp;
+ bp->bif_dlt = dlt;
+ mtx_init(&bp->bif_mtx, "bpf interface lock", NULL, MTX_DEF);
+ KASSERT(*driverp == NULL, ("bpfattach2: driverp already initialized"));
+ *driverp = bp;
+
+ mtx_lock(&bpf_mtx);
+ LIST_INSERT_HEAD(&bpf_iflist, bp, bif_next);
+ mtx_unlock(&bpf_mtx);
+
+ /*
+ * Compute the length of the bpf header. This is not necessarily
+ * equal to SIZEOF_BPF_HDR because we want to insert spacing such
+ * that the network layer header begins on a longword boundary (for
+ * performance reasons and to alleviate alignment restrictions).
+ */
+ bp->bif_hdrlen = BPF_WORDALIGN(hdrlen + SIZEOF_BPF_HDR) - hdrlen;
+
+ if (bootverbose)
+ if_printf(ifp, "bpf attached\n");
+}
+
+/*
+ * Detach bpf from an interface. This involves detaching each descriptor
+ * associated with the interface, and leaving bd_bif NULL. Notify each
+ * descriptor as it's detached so that any sleepers wake up and get
+ * ENXIO.
+ */
+void
+bpfdetach(struct ifnet *ifp)
+{
+ struct bpf_if *bp;
+ struct bpf_d *d;
+
+ /* Locate BPF interface information */
+ mtx_lock(&bpf_mtx);
+ LIST_FOREACH(bp, &bpf_iflist, bif_next) {
+ if (ifp == bp->bif_ifp)
+ break;
+ }
+
+ /* Interface wasn't attached */
+ if ((bp == NULL) || (bp->bif_ifp == NULL)) {
+ mtx_unlock(&bpf_mtx);
+ printf("bpfdetach: %s was not attached\n", ifp->if_xname);
+ return;
+ }
+
+ LIST_REMOVE(bp, bif_next);
+ mtx_unlock(&bpf_mtx);
+
+ while ((d = LIST_FIRST(&bp->bif_dlist)) != NULL) {
+ bpf_detachd(d);
+ BPFD_LOCK(d);
+ bpf_wakeup(d);
+ BPFD_UNLOCK(d);
+ }
+
+ mtx_destroy(&bp->bif_mtx);
+ free(bp, M_BPF);
+}
+
+/*
+ * Get a list of available data link type of the interface.
+ */
+static int
+bpf_getdltlist(struct bpf_d *d, struct bpf_dltlist *bfl)
+{
+ int n, error;
+ struct ifnet *ifp;
+ struct bpf_if *bp;
+
+ ifp = d->bd_bif->bif_ifp;
+ n = 0;
+ error = 0;
+ mtx_lock(&bpf_mtx);
+ LIST_FOREACH(bp, &bpf_iflist, bif_next) {
+ if (bp->bif_ifp != ifp)
+ continue;
+ if (bfl->bfl_list != NULL) {
+ if (n >= bfl->bfl_len) {
+ mtx_unlock(&bpf_mtx);
+ return (ENOMEM);
+ }
+ error = copyout(&bp->bif_dlt,
+ bfl->bfl_list + n, sizeof(u_int));
+ }
+ n++;
+ }
+ mtx_unlock(&bpf_mtx);
+ bfl->bfl_len = n;
+ return (error);
+}
+
+/*
+ * Set the data link type of a BPF instance.
+ */
+static int
+bpf_setdlt(struct bpf_d *d, u_int dlt)
+{
+ int error, opromisc;
+ struct ifnet *ifp;
+ struct bpf_if *bp;
+
+ if (d->bd_bif->bif_dlt == dlt)
+ return (0);
+ ifp = d->bd_bif->bif_ifp;
+ mtx_lock(&bpf_mtx);
+ LIST_FOREACH(bp, &bpf_iflist, bif_next) {
+ if (bp->bif_ifp == ifp && bp->bif_dlt == dlt)
+ break;
+ }
+ mtx_unlock(&bpf_mtx);
+ if (bp != NULL) {
+ opromisc = d->bd_promisc;
+ bpf_detachd(d);
+ bpf_attachd(d, bp);
+ BPFD_LOCK(d);
+ reset_d(d);
+ BPFD_UNLOCK(d);
+ if (opromisc) {
+ error = ifpromisc(bp->bif_ifp, 1);
+ if (error)
+ if_printf(bp->bif_ifp,
+ "bpf_setdlt: ifpromisc failed (%d)\n",
+ error);
+ else
+ d->bd_promisc = 1;
+ }
+ }
+ return (bp == NULL ? EINVAL : 0);
+}
+
+static void
+bpf_drvinit(void *unused)
+{
+ struct cdev *dev;
+
+ mtx_init(&bpf_mtx, "bpf global lock", NULL, MTX_DEF);
+ LIST_INIT(&bpf_iflist);
+
+ dev = make_dev(&bpf_cdevsw, 0, UID_ROOT, GID_WHEEL, 0600, "bpf");
+ /* For compatibility */
+ make_dev_alias(dev, "bpf0");
+}
+
+/*
+ * Zero out the various packet counters associated with all of the bpf
+ * descriptors. At some point, we will probably want to get a bit more
+ * granular and allow the user to specify descriptors to be zeroed.
+ */
+static void
+bpf_zero_counters(void)
+{
+ struct bpf_if *bp;
+ struct bpf_d *bd;
+
+ mtx_lock(&bpf_mtx);
+ LIST_FOREACH(bp, &bpf_iflist, bif_next) {
+ BPFIF_LOCK(bp);
+ LIST_FOREACH(bd, &bp->bif_dlist, bd_next) {
+ BPFD_LOCK(bd);
+ bd->bd_rcount = 0;
+ bd->bd_dcount = 0;
+ bd->bd_fcount = 0;
+ bd->bd_wcount = 0;
+ bd->bd_wfcount = 0;
+ bd->bd_zcopy = 0;
+ BPFD_UNLOCK(bd);
+ }
+ BPFIF_UNLOCK(bp);
+ }
+ mtx_unlock(&bpf_mtx);
+}
+
+static void
+bpfstats_fill_xbpf(struct xbpf_d *d, struct bpf_d *bd)
+{
+
+ bzero(d, sizeof(*d));
+ BPFD_LOCK_ASSERT(bd);
+ d->bd_structsize = sizeof(*d);
+ d->bd_immediate = bd->bd_immediate;
+ d->bd_promisc = bd->bd_promisc;
+ d->bd_hdrcmplt = bd->bd_hdrcmplt;
+ d->bd_direction = bd->bd_direction;
+ d->bd_feedback = bd->bd_feedback;
+ d->bd_async = bd->bd_async;
+ d->bd_rcount = bd->bd_rcount;
+ d->bd_dcount = bd->bd_dcount;
+ d->bd_fcount = bd->bd_fcount;
+ d->bd_sig = bd->bd_sig;
+ d->bd_slen = bd->bd_slen;
+ d->bd_hlen = bd->bd_hlen;
+ d->bd_bufsize = bd->bd_bufsize;
+ d->bd_pid = bd->bd_pid;
+ strlcpy(d->bd_ifname,
+ bd->bd_bif->bif_ifp->if_xname, IFNAMSIZ);
+ d->bd_locked = bd->bd_locked;
+ d->bd_wcount = bd->bd_wcount;
+ d->bd_wdcount = bd->bd_wdcount;
+ d->bd_wfcount = bd->bd_wfcount;
+ d->bd_zcopy = bd->bd_zcopy;
+ d->bd_bufmode = bd->bd_bufmode;
+}
+
+static int
+bpf_stats_sysctl(SYSCTL_HANDLER_ARGS)
+{
+ struct xbpf_d *xbdbuf, *xbd, zerostats;
+ int index, error;
+ struct bpf_if *bp;
+ struct bpf_d *bd;
+
+ /*
+ * XXX This is not technically correct. It is possible for non
+ * privileged users to open bpf devices. It would make sense
+ * if the users who opened the devices were able to retrieve
+ * the statistics for them, too.
+ */
+ error = priv_check(req->td, PRIV_NET_BPF);
+ if (error)
+ return (error);
+ /*
+ * Check to see if the user is requesting that the counters be
+ * zeroed out. Explicitly check that the supplied data is zeroed,
+ * as we aren't allowing the user to set the counters currently.
+ */
+ if (req->newptr != NULL) {
+ if (req->newlen != sizeof(zerostats))
+ return (EINVAL);
+ bzero(&zerostats, sizeof(zerostats));
+ xbd = req->newptr;
+ if (bcmp(xbd, &zerostats, sizeof(*xbd)) != 0)
+ return (EINVAL);
+ bpf_zero_counters();
+ return (0);
+ }
+ if (req->oldptr == NULL)
+ return (SYSCTL_OUT(req, 0, bpf_bpfd_cnt * sizeof(*xbd)));
+ if (bpf_bpfd_cnt == 0)
+ return (SYSCTL_OUT(req, 0, 0));
+ xbdbuf = malloc(req->oldlen, M_BPF, M_WAITOK);
+ mtx_lock(&bpf_mtx);
+ if (req->oldlen < (bpf_bpfd_cnt * sizeof(*xbd))) {
+ mtx_unlock(&bpf_mtx);
+ free(xbdbuf, M_BPF);
+ return (ENOMEM);
+ }
+ index = 0;
+ LIST_FOREACH(bp, &bpf_iflist, bif_next) {
+ BPFIF_LOCK(bp);
+ LIST_FOREACH(bd, &bp->bif_dlist, bd_next) {
+ xbd = &xbdbuf[index++];
+ BPFD_LOCK(bd);
+ bpfstats_fill_xbpf(xbd, bd);
+ BPFD_UNLOCK(bd);
+ }
+ BPFIF_UNLOCK(bp);
+ }
+ mtx_unlock(&bpf_mtx);
+ error = SYSCTL_OUT(req, xbdbuf, index * sizeof(*xbd));
+ free(xbdbuf, M_BPF);
+ return (error);
+}
+
+SYSINIT(bpfdev,SI_SUB_DRIVERS,SI_ORDER_MIDDLE,bpf_drvinit,NULL);
+
+#else /* !DEV_BPF && !NETGRAPH_BPF */
+/*
+ * NOP stubs to allow bpf-using drivers to load and function.
+ *
+ * A 'better' implementation would allow the core bpf functionality
+ * to be loaded at runtime.
+ */
+static struct bpf_if bp_null;
+
+void
+bpf_tap(struct bpf_if *bp, u_char *pkt, u_int pktlen)
+{
+}
+
+void
+bpf_mtap(struct bpf_if *bp, struct mbuf *m)
+{
+}
+
+void
+bpf_mtap2(struct bpf_if *bp, void *d, u_int l, struct mbuf *m)
+{
+}
+
+void
+bpfattach(struct ifnet *ifp, u_int dlt, u_int hdrlen)
+{
+
+ bpfattach2(ifp, dlt, hdrlen, &ifp->if_bpf);
+}
+
+void
+bpfattach2(struct ifnet *ifp, u_int dlt, u_int hdrlen, struct bpf_if **driverp)
+{
+
+ *driverp = &bp_null;
+}
+
+void
+bpfdetach(struct ifnet *ifp)
+{
+}
+
+u_int
+bpf_filter(const struct bpf_insn *pc, u_char *p, u_int wirelen, u_int buflen)
+{
+ return -1; /* "no filter" behaviour */
+}
+
+int
+bpf_validate(const struct bpf_insn *f, int len)
+{
+ return 0; /* false */
+}
+
+#endif /* !DEV_BPF && !NETGRAPH_BPF */
diff --git a/rtems/freebsd/net/bpf.h b/rtems/freebsd/net/bpf.h
new file mode 100644
index 00000000..d9dd4289
--- /dev/null
+++ b/rtems/freebsd/net/bpf.h
@@ -0,0 +1,974 @@
+/*-
+ * Copyright (c) 1990, 1991, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * This code is derived from the Stanford/CMU enet packet filter,
+ * (net/enet.c) distributed as part of 4.3BSD, and code contributed
+ * to Berkeley by Steven McCanne and Van Jacobson both of Lawrence
+ * Berkeley Laboratory.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * @(#)bpf.h 8.1 (Berkeley) 6/10/93
+ * @(#)bpf.h 1.34 (LBL) 6/16/96
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _NET_BPF_HH_
+#define _NET_BPF_HH_
+
+/* BSD style release date */
+#define BPF_RELEASE 199606
+
+typedef int32_t bpf_int32;
+typedef u_int32_t bpf_u_int32;
+
+/*
+ * Alignment macros. BPF_WORDALIGN rounds up to the next
+ * even multiple of BPF_ALIGNMENT.
+ */
+#define BPF_ALIGNMENT sizeof(long)
+#define BPF_WORDALIGN(x) (((x)+(BPF_ALIGNMENT-1))&~(BPF_ALIGNMENT-1))
+
+#define BPF_MAXINSNS 512
+#define BPF_MAXBUFSIZE 0x80000
+#define BPF_MINBUFSIZE 32
+
+/*
+ * Structure for BIOCSETF.
+ */
+struct bpf_program {
+ u_int bf_len;
+ struct bpf_insn *bf_insns;
+};
+
+/*
+ * Struct returned by BIOCGSTATS.
+ */
+struct bpf_stat {
+ u_int bs_recv; /* number of packets received */
+ u_int bs_drop; /* number of packets dropped */
+};
+
+/*
+ * Struct return by BIOCVERSION. This represents the version number of
+ * the filter language described by the instruction encodings below.
+ * bpf understands a program iff kernel_major == filter_major &&
+ * kernel_minor >= filter_minor, that is, if the value returned by the
+ * running kernel has the same major number and a minor number equal
+ * equal to or less than the filter being downloaded. Otherwise, the
+ * results are undefined, meaning an error may be returned or packets
+ * may be accepted haphazardly.
+ * It has nothing to do with the source code version.
+ */
+struct bpf_version {
+ u_short bv_major;
+ u_short bv_minor;
+};
+/* Current version number of filter architecture. */
+#define BPF_MAJOR_VERSION 1
+#define BPF_MINOR_VERSION 1
+
+/*
+ * Historically, BPF has supported a single buffering model, first using mbuf
+ * clusters in kernel, and later using malloc(9) buffers in kernel. We now
+ * support multiple buffering modes, which may be queried and set using
+ * BIOCGETBUFMODE and BIOCSETBUFMODE. So as to avoid handling the complexity
+ * of changing modes while sniffing packets, the mode becomes fixed once an
+ * interface has been attached to the BPF descriptor.
+ */
+#define BPF_BUFMODE_BUFFER 1 /* Kernel buffers with read(). */
+#define BPF_BUFMODE_ZBUF 2 /* Zero-copy buffers. */
+
+/*-
+ * Struct used by BIOCSETZBUF, BIOCROTZBUF: describes up to two zero-copy
+ * buffer as used by BPF.
+ */
+struct bpf_zbuf {
+ void *bz_bufa; /* Location of 'a' zero-copy buffer. */
+ void *bz_bufb; /* Location of 'b' zero-copy buffer. */
+ size_t bz_buflen; /* Size of zero-copy buffers. */
+};
+
+#define BIOCGBLEN _IOR('B',102, u_int)
+#define BIOCSBLEN _IOWR('B',102, u_int)
+#define BIOCSETF _IOW('B',103, struct bpf_program)
+#define BIOCFLUSH _IO('B',104)
+#define BIOCPROMISC _IO('B',105)
+#define BIOCGDLT _IOR('B',106, u_int)
+#define BIOCGETIF _IOR('B',107, struct ifreq)
+#define BIOCSETIF _IOW('B',108, struct ifreq)
+#define BIOCSRTIMEOUT _IOW('B',109, struct timeval)
+#define BIOCGRTIMEOUT _IOR('B',110, struct timeval)
+#define BIOCGSTATS _IOR('B',111, struct bpf_stat)
+#define BIOCIMMEDIATE _IOW('B',112, u_int)
+#define BIOCVERSION _IOR('B',113, struct bpf_version)
+#define BIOCGRSIG _IOR('B',114, u_int)
+#define BIOCSRSIG _IOW('B',115, u_int)
+#define BIOCGHDRCMPLT _IOR('B',116, u_int)
+#define BIOCSHDRCMPLT _IOW('B',117, u_int)
+#define BIOCGDIRECTION _IOR('B',118, u_int)
+#define BIOCSDIRECTION _IOW('B',119, u_int)
+#define BIOCSDLT _IOW('B',120, u_int)
+#define BIOCGDLTLIST _IOWR('B',121, struct bpf_dltlist)
+#define BIOCLOCK _IO('B', 122)
+#define BIOCSETWF _IOW('B',123, struct bpf_program)
+#define BIOCFEEDBACK _IOW('B',124, u_int)
+#define BIOCGETBUFMODE _IOR('B',125, u_int)
+#define BIOCSETBUFMODE _IOW('B',126, u_int)
+#define BIOCGETZMAX _IOR('B',127, size_t)
+#define BIOCROTZBUF _IOR('B',128, struct bpf_zbuf)
+#define BIOCSETZBUF _IOW('B',129, struct bpf_zbuf)
+#define BIOCSETFNR _IOW('B',130, struct bpf_program)
+
+/* Obsolete */
+#define BIOCGSEESENT BIOCGDIRECTION
+#define BIOCSSEESENT BIOCSDIRECTION
+
+/* Packet directions */
+enum bpf_direction {
+ BPF_D_IN, /* See incoming packets */
+ BPF_D_INOUT, /* See incoming and outgoing packets */
+ BPF_D_OUT /* See outgoing packets */
+};
+
+/*
+ * Structure prepended to each packet.
+ */
+struct bpf_hdr {
+ struct timeval bh_tstamp; /* time stamp */
+ bpf_u_int32 bh_caplen; /* length of captured portion */
+ bpf_u_int32 bh_datalen; /* original length of packet */
+ u_short bh_hdrlen; /* length of bpf header (this struct
+ plus alignment padding) */
+};
+/*
+ * Because the structure above is not a multiple of 4 bytes, some compilers
+ * will insist on inserting padding; hence, sizeof(struct bpf_hdr) won't work.
+ * Only the kernel needs to know about it; applications use bh_hdrlen.
+ */
+#ifdef _KERNEL
+#define SIZEOF_BPF_HDR (sizeof(struct bpf_hdr) <= 20 ? 18 : \
+ sizeof(struct bpf_hdr))
+#endif
+
+/*
+ * When using zero-copy BPF buffers, a shared memory header is present
+ * allowing the kernel BPF implementation and user process to synchronize
+ * without using system calls. This structure defines that header. When
+ * accessing these fields, appropriate atomic operation and memory barriers
+ * are required in order not to see stale or out-of-order data; see bpf(4)
+ * for reference code to access these fields from userspace.
+ *
+ * The layout of this structure is critical, and must not be changed; if must
+ * fit in a single page on all architectures.
+ */
+struct bpf_zbuf_header {
+ volatile u_int bzh_kernel_gen; /* Kernel generation number. */
+ volatile u_int bzh_kernel_len; /* Length of data in the buffer. */
+ volatile u_int bzh_user_gen; /* User generation number. */
+ u_int _bzh_pad[5];
+};
+
+/*
+ * Data-link level type codes.
+ */
+#define DLT_NULL 0 /* BSD loopback encapsulation */
+#define DLT_EN10MB 1 /* Ethernet (10Mb) */
+#define DLT_EN3MB 2 /* Experimental Ethernet (3Mb) */
+#define DLT_AX25 3 /* Amateur Radio AX.25 */
+#define DLT_PRONET 4 /* Proteon ProNET Token Ring */
+#define DLT_CHAOS 5 /* Chaos */
+#define DLT_IEEE802 6 /* IEEE 802 Networks */
+#define DLT_ARCNET 7 /* ARCNET */
+#define DLT_SLIP 8 /* Serial Line IP */
+#define DLT_PPP 9 /* Point-to-point Protocol */
+#define DLT_FDDI 10 /* FDDI */
+#define DLT_ATM_RFC1483 11 /* LLC/SNAP encapsulated atm */
+#define DLT_RAW 12 /* raw IP */
+
+/*
+ * These are values from BSD/OS's "bpf.h".
+ * These are not the same as the values from the traditional libpcap
+ * "bpf.h"; however, these values shouldn't be generated by any
+ * OS other than BSD/OS, so the correct values to use here are the
+ * BSD/OS values.
+ *
+ * Platforms that have already assigned these values to other
+ * DLT_ codes, however, should give these codes the values
+ * from that platform, so that programs that use these codes will
+ * continue to compile - even though they won't correctly read
+ * files of these types.
+ */
+#define DLT_SLIP_BSDOS 15 /* BSD/OS Serial Line IP */
+#define DLT_PPP_BSDOS 16 /* BSD/OS Point-to-point Protocol */
+
+#define DLT_ATM_CLIP 19 /* Linux Classical-IP over ATM */
+
+/*
+ * These values are defined by NetBSD; other platforms should refrain from
+ * using them for other purposes, so that NetBSD savefiles with link
+ * types of 50 or 51 can be read as this type on all platforms.
+ */
+#define DLT_PPP_SERIAL 50 /* PPP over serial with HDLC encapsulation */
+#define DLT_PPP_ETHER 51 /* PPP over Ethernet */
+
+/*
+ * Reserved for the Symantec Enterprise Firewall.
+ */
+#define DLT_SYMANTEC_FIREWALL 99
+
+
+/*
+ * This value was defined by libpcap 0.5; platforms that have defined
+ * it with a different value should define it here with that value -
+ * a link type of 104 in a save file will be mapped to DLT_C_HDLC,
+ * whatever value that happens to be, so programs will correctly
+ * handle files with that link type regardless of the value of
+ * DLT_C_HDLC.
+ *
+ * The name DLT_C_HDLC was used by BSD/OS; we use that name for source
+ * compatibility with programs written for BSD/OS.
+ *
+ * libpcap 0.5 defined it as DLT_CHDLC; we define DLT_CHDLC as well,
+ * for source compatibility with programs written for libpcap 0.5.
+ */
+#define DLT_C_HDLC 104 /* Cisco HDLC */
+#define DLT_CHDLC DLT_C_HDLC
+
+#define DLT_IEEE802_11 105 /* IEEE 802.11 wireless */
+
+/*
+ * Values between 106 and 107 are used in capture file headers as
+ * link-layer types corresponding to DLT_ types that might differ
+ * between platforms; don't use those values for new DLT_ new types.
+ */
+
+/*
+ * Frame Relay; BSD/OS has a DLT_FR with a value of 11, but that collides
+ * with other values.
+ * DLT_FR and DLT_FRELAY packets start with the Q.922 Frame Relay header
+ * (DLCI, etc.).
+ */
+#define DLT_FRELAY 107
+
+/*
+ * OpenBSD DLT_LOOP, for loopback devices; it's like DLT_NULL, except
+ * that the AF_ type in the link-layer header is in network byte order.
+ *
+ * OpenBSD defines it as 12, but that collides with DLT_RAW, so we
+ * define it as 108 here. If OpenBSD picks up this file, it should
+ * define DLT_LOOP as 12 in its version, as per the comment above -
+ * and should not use 108 as a DLT_ value.
+ */
+#define DLT_LOOP 108
+
+/*
+ * Values between 109 and 112 are used in capture file headers as
+ * link-layer types corresponding to DLT_ types that might differ
+ * between platforms; don't use those values for new DLT_ new types.
+ */
+
+/*
+ * Encapsulated packets for IPsec; DLT_ENC is 13 in OpenBSD, but that's
+ * DLT_SLIP_BSDOS in NetBSD, so we don't use 13 for it in OSes other
+ * than OpenBSD.
+ */
+#define DLT_ENC 109
+
+/*
+ * This is for Linux cooked sockets.
+ */
+#define DLT_LINUX_SLL 113
+
+/*
+ * Apple LocalTalk hardware.
+ */
+#define DLT_LTALK 114
+
+/*
+ * Acorn Econet.
+ */
+#define DLT_ECONET 115
+
+/*
+ * Reserved for use with OpenBSD ipfilter.
+ */
+#define DLT_IPFILTER 116
+
+/*
+ * Reserved for use in capture-file headers as a link-layer type
+ * corresponding to OpenBSD DLT_PFLOG; DLT_PFLOG is 17 in OpenBSD,
+ * but that's DLT_LANE8023 in SuSE 6.3, so we can't use 17 for it
+ * in capture-file headers.
+ */
+#define DLT_PFLOG 117
+
+/*
+ * Registered for Cisco-internal use.
+ */
+#define DLT_CISCO_IOS 118
+
+/*
+ * Reserved for 802.11 cards using the Prism II chips, with a link-layer
+ * header including Prism monitor mode information plus an 802.11
+ * header.
+ */
+#define DLT_PRISM_HEADER 119
+
+/*
+ * Reserved for Aironet 802.11 cards, with an Aironet link-layer header
+ * (see Doug Ambrisko's FreeBSD patches).
+ */
+#define DLT_AIRONET_HEADER 120
+
+/*
+ * Reserved for use by OpenBSD's pfsync device.
+ */
+#define DLT_PFSYNC 121
+
+/*
+ * Reserved for Siemens HiPath HDLC. XXX
+ */
+#define DLT_HHDLC 121
+
+/*
+ * Reserved for RFC 2625 IP-over-Fibre Channel.
+ */
+#define DLT_IP_OVER_FC 122
+
+/*
+ * Reserved for Full Frontal ATM on Solaris.
+ */
+#define DLT_SUNATM 123
+
+/*
+ * Reserved as per request from Kent Dahlgren <kent@praesum.com>
+ * for private use.
+ */
+#define DLT_RIO 124 /* RapidIO */
+#define DLT_PCI_EXP 125 /* PCI Express */
+#define DLT_AURORA 126 /* Xilinx Aurora link layer */
+
+/*
+ * BSD header for 802.11 plus a number of bits of link-layer information
+ * including radio information.
+ */
+#ifndef DLT_IEEE802_11_RADIO
+#define DLT_IEEE802_11_RADIO 127
+#endif
+
+/*
+ * Reserved for TZSP encapsulation.
+ */
+#define DLT_TZSP 128 /* Tazmen Sniffer Protocol */
+
+/*
+ * Reserved for Linux ARCNET.
+ */
+#define DLT_ARCNET_LINUX 129
+
+/*
+ * Juniper-private data link types.
+ */
+#define DLT_JUNIPER_MLPPP 130
+#define DLT_JUNIPER_MLFR 131
+#define DLT_JUNIPER_ES 132
+#define DLT_JUNIPER_GGSN 133
+#define DLT_JUNIPER_MFR 134
+#define DLT_JUNIPER_ATM2 135
+#define DLT_JUNIPER_SERVICES 136
+#define DLT_JUNIPER_ATM1 137
+
+/*
+ * Apple IP-over-IEEE 1394, as per a request from Dieter Siegmund
+ * <dieter@apple.com>. The header that's presented is an Ethernet-like
+ * header:
+ *
+ * #define FIREWIRE_EUI64_LEN 8
+ * struct firewire_header {
+ * u_char firewire_dhost[FIREWIRE_EUI64_LEN];
+ * u_char firewire_shost[FIREWIRE_EUI64_LEN];
+ * u_short firewire_type;
+ * };
+ *
+ * with "firewire_type" being an Ethernet type value, rather than,
+ * for example, raw GASP frames being handed up.
+ */
+#define DLT_APPLE_IP_OVER_IEEE1394 138
+
+/*
+ * Various SS7 encapsulations, as per a request from Jeff Morriss
+ * <jeff.morriss[AT]ulticom.com> and subsequent discussions.
+ */
+#define DLT_MTP2_WITH_PHDR 139 /* pseudo-header with various info, followed by MTP2 */
+#define DLT_MTP2 140 /* MTP2, without pseudo-header */
+#define DLT_MTP3 141 /* MTP3, without pseudo-header or MTP2 */
+#define DLT_SCCP 142 /* SCCP, without pseudo-header or MTP2 or MTP3 */
+
+/*
+ * Reserved for DOCSIS.
+ */
+#define DLT_DOCSIS 143
+
+/*
+ * Reserved for Linux IrDA.
+ */
+#define DLT_LINUX_IRDA 144
+
+/*
+ * Reserved for IBM SP switch and IBM Next Federation switch.
+ */
+#define DLT_IBM_SP 145
+#define DLT_IBM_SN 146
+
+/*
+ * Reserved for private use. If you have some link-layer header type
+ * that you want to use within your organization, with the capture files
+ * using that link-layer header type not ever be sent outside your
+ * organization, you can use these values.
+ *
+ * No libpcap release will use these for any purpose, nor will any
+ * tcpdump release use them, either.
+ *
+ * Do *NOT* use these in capture files that you expect anybody not using
+ * your private versions of capture-file-reading tools to read; in
+ * particular, do *NOT* use them in products, otherwise you may find that
+ * people won't be able to use tcpdump, or snort, or Ethereal, or... to
+ * read capture files from your firewall/intrusion detection/traffic
+ * monitoring/etc. appliance, or whatever product uses that DLT_ value,
+ * and you may also find that the developers of those applications will
+ * not accept patches to let them read those files.
+ *
+ * Also, do not use them if somebody might send you a capture using them
+ * for *their* private type and tools using them for *your* private type
+ * would have to read them.
+ *
+ * Instead, ask "tcpdump-workers@tcpdump.org" for a new DLT_ value,
+ * as per the comment above, and use the type you're given.
+ */
+#define DLT_USER0 147
+#define DLT_USER1 148
+#define DLT_USER2 149
+#define DLT_USER3 150
+#define DLT_USER4 151
+#define DLT_USER5 152
+#define DLT_USER6 153
+#define DLT_USER7 154
+#define DLT_USER8 155
+#define DLT_USER9 156
+#define DLT_USER10 157
+#define DLT_USER11 158
+#define DLT_USER12 159
+#define DLT_USER13 160
+#define DLT_USER14 161
+#define DLT_USER15 162
+
+/*
+ * For future use with 802.11 captures - defined by AbsoluteValue
+ * Systems to store a number of bits of link-layer information
+ * including radio information:
+ *
+ * http://www.shaftnet.org/~pizza/software/capturefrm.txt
+ *
+ * but it might be used by some non-AVS drivers now or in the
+ * future.
+ */
+#define DLT_IEEE802_11_RADIO_AVS 163 /* 802.11 plus AVS radio header */
+
+/*
+ * Juniper-private data link type, as per request from
+ * Hannes Gredler <hannes@juniper.net>. The DLT_s are used
+ * for passing on chassis-internal metainformation such as
+ * QOS profiles, etc..
+ */
+#define DLT_JUNIPER_MONITOR 164
+
+/*
+ * Reserved for BACnet MS/TP.
+ */
+#define DLT_BACNET_MS_TP 165
+
+/*
+ * Another PPP variant as per request from Karsten Keil <kkeil@suse.de>.
+ *
+ * This is used in some OSes to allow a kernel socket filter to distinguish
+ * between incoming and outgoing packets, on a socket intended to
+ * supply pppd with outgoing packets so it can do dial-on-demand and
+ * hangup-on-lack-of-demand; incoming packets are filtered out so they
+ * don't cause pppd to hold the connection up (you don't want random
+ * input packets such as port scans, packets from old lost connections,
+ * etc. to force the connection to stay up).
+ *
+ * The first byte of the PPP header (0xff03) is modified to accomodate
+ * the direction - 0x00 = IN, 0x01 = OUT.
+ */
+#define DLT_PPP_PPPD 166
+
+/*
+ * Names for backwards compatibility with older versions of some PPP
+ * software; new software should use DLT_PPP_PPPD.
+ */
+#define DLT_PPP_WITH_DIRECTION DLT_PPP_PPPD
+#define DLT_LINUX_PPP_WITHDIRECTION DLT_PPP_PPPD
+
+/*
+ * Juniper-private data link type, as per request from
+ * Hannes Gredler <hannes@juniper.net>. The DLT_s are used
+ * for passing on chassis-internal metainformation such as
+ * QOS profiles, cookies, etc..
+ */
+#define DLT_JUNIPER_PPPOE 167
+#define DLT_JUNIPER_PPPOE_ATM 168
+
+#define DLT_GPRS_LLC 169 /* GPRS LLC */
+#define DLT_GPF_T 170 /* GPF-T (ITU-T G.7041/Y.1303) */
+#define DLT_GPF_F 171 /* GPF-F (ITU-T G.7041/Y.1303) */
+
+/*
+ * Requested by Oolan Zimmer <oz@gcom.com> for use in Gcom's T1/E1 line
+ * monitoring equipment.
+ */
+#define DLT_GCOM_T1E1 172
+#define DLT_GCOM_SERIAL 173
+
+/*
+ * Juniper-private data link type, as per request from
+ * Hannes Gredler <hannes@juniper.net>. The DLT_ is used
+ * for internal communication to Physical Interface Cards (PIC)
+ */
+#define DLT_JUNIPER_PIC_PEER 174
+
+/*
+ * Link types requested by Gregor Maier <gregor@endace.com> of Endace
+ * Measurement Systems. They add an ERF header (see
+ * http://www.endace.com/support/EndaceRecordFormat.pdf) in front of
+ * the link-layer header.
+ */
+#define DLT_ERF_ETH 175 /* Ethernet */
+#define DLT_ERF_POS 176 /* Packet-over-SONET */
+
+/*
+ * Requested by Daniele Orlandi <daniele@orlandi.com> for raw LAPD
+ * for vISDN (http://www.orlandi.com/visdn/). Its link-layer header
+ * includes additional information before the LAPD header, so it's
+ * not necessarily a generic LAPD header.
+ */
+#define DLT_LINUX_LAPD 177
+
+/*
+ * Juniper-private data link type, as per request from
+ * Hannes Gredler <hannes@juniper.net>.
+ * The DLT_ are used for prepending meta-information
+ * like interface index, interface name
+ * before standard Ethernet, PPP, Frelay & C-HDLC Frames
+ */
+#define DLT_JUNIPER_ETHER 178
+#define DLT_JUNIPER_PPP 179
+#define DLT_JUNIPER_FRELAY 180
+#define DLT_JUNIPER_CHDLC 181
+
+/*
+ * Multi Link Frame Relay (FRF.16)
+ */
+#define DLT_MFR 182
+
+/*
+ * Juniper-private data link type, as per request from
+ * Hannes Gredler <hannes@juniper.net>.
+ * The DLT_ is used for internal communication with a
+ * voice Adapter Card (PIC)
+ */
+#define DLT_JUNIPER_VP 183
+
+/*
+ * Arinc 429 frames.
+ * DLT_ requested by Gianluca Varenni <gianluca.varenni@cacetech.com>.
+ * Every frame contains a 32bit A429 label.
+ * More documentation on Arinc 429 can be found at
+ * http://www.condoreng.com/support/downloads/tutorials/ARINCTutorial.pdf
+ */
+#define DLT_A429 184
+
+/*
+ * Arinc 653 Interpartition Communication messages.
+ * DLT_ requested by Gianluca Varenni <gianluca.varenni@cacetech.com>.
+ * Please refer to the A653-1 standard for more information.
+ */
+#define DLT_A653_ICM 185
+
+/*
+ * USB packets, beginning with a USB setup header; requested by
+ * Paolo Abeni <paolo.abeni@email.it>.
+ */
+#define DLT_USB 186
+
+/*
+ * Bluetooth HCI UART transport layer (part H:4); requested by
+ * Paolo Abeni.
+ */
+#define DLT_BLUETOOTH_HCI_H4 187
+
+/*
+ * IEEE 802.16 MAC Common Part Sublayer; requested by Maria Cruz
+ * <cruz_petagay@bah.com>.
+ */
+#define DLT_IEEE802_16_MAC_CPS 188
+
+/*
+ * USB packets, beginning with a Linux USB header; requested by
+ * Paolo Abeni <paolo.abeni@email.it>.
+ */
+#define DLT_USB_LINUX 189
+
+/*
+ * Controller Area Network (CAN) v. 2.0B packets.
+ * DLT_ requested by Gianluca Varenni <gianluca.varenni@cacetech.com>.
+ * Used to dump CAN packets coming from a CAN Vector board.
+ * More documentation on the CAN v2.0B frames can be found at
+ * http://www.can-cia.org/downloads/?269
+ */
+#define DLT_CAN20B 190
+
+/*
+ * IEEE 802.15.4, with address fields padded, as is done by Linux
+ * drivers; requested by Juergen Schimmer.
+ */
+#define DLT_IEEE802_15_4_LINUX 191
+
+/*
+ * Per Packet Information encapsulated packets.
+ * DLT_ requested by Gianluca Varenni <gianluca.varenni@cacetech.com>.
+ */
+#define DLT_PPI 192
+
+/*
+ * Header for 802.16 MAC Common Part Sublayer plus a radiotap radio header;
+ * requested by Charles Clancy.
+ */
+#define DLT_IEEE802_16_MAC_CPS_RADIO 193
+
+/*
+ * Juniper-private data link type, as per request from
+ * Hannes Gredler <hannes@juniper.net>.
+ * The DLT_ is used for internal communication with a
+ * integrated service module (ISM).
+ */
+#define DLT_JUNIPER_ISM 194
+
+/*
+ * IEEE 802.15.4, exactly as it appears in the spec (no padding, no
+ * nothing); requested by Mikko Saarnivala <mikko.saarnivala@sensinode.com>.
+ */
+#define DLT_IEEE802_15_4 195
+
+/*
+ * Various link-layer types, with a pseudo-header, for SITA
+ * (http://www.sita.aero/); requested by Fulko Hew (fulko.hew@gmail.com).
+ */
+#define DLT_SITA 196
+
+/*
+ * Various link-layer types, with a pseudo-header, for Endace DAG cards;
+ * encapsulates Endace ERF records. Requested by Stephen Donnelly
+ * <stephen@endace.com>.
+ */
+#define DLT_ERF 197
+
+/*
+ * Special header prepended to Ethernet packets when capturing from a
+ * u10 Networks board. Requested by Phil Mulholland
+ * <phil@u10networks.com>.
+ */
+#define DLT_RAIF1 198
+
+/*
+ * IPMB packet for IPMI, beginning with the I2C slave address, followed
+ * by the netFn and LUN, etc.. Requested by Chanthy Toeung
+ * <chanthy.toeung@ca.kontron.com>.
+ */
+#define DLT_IPMB 199
+
+/*
+ * Juniper-private data link type, as per request from
+ * Hannes Gredler <hannes@juniper.net>.
+ * The DLT_ is used for capturing data on a secure tunnel interface.
+ */
+#define DLT_JUNIPER_ST 200
+
+/*
+ * Bluetooth HCI UART transport layer (part H:4), with pseudo-header
+ * that includes direction information; requested by Paolo Abeni.
+ */
+#define DLT_BLUETOOTH_HCI_H4_WITH_PHDR 201
+
+/*
+ * AX.25 packet with a 1-byte KISS header; see
+ *
+ * http://www.ax25.net/kiss.htm
+ *
+ * as per Richard Stearn <richard@rns-stearn.demon.co.uk>.
+ */
+#define DLT_AX25_KISS 202
+
+/*
+ * LAPD packets from an ISDN channel, starting with the address field,
+ * with no pseudo-header.
+ * Requested by Varuna De Silva <varunax@gmail.com>.
+ */
+#define DLT_LAPD 203
+
+/*
+ * Variants of various link-layer headers, with a one-byte direction
+ * pseudo-header prepended - zero means "received by this host",
+ * non-zero (any non-zero value) means "sent by this host" - as per
+ * Will Barker <w.barker@zen.co.uk>.
+ */
+#define DLT_PPP_WITH_DIR 204 /* PPP - don't confuse with DLT_PPP_WITH_DIRECTION */
+#define DLT_C_HDLC_WITH_DIR 205 /* Cisco HDLC */
+#define DLT_FRELAY_WITH_DIR 206 /* Frame Relay */
+#define DLT_LAPB_WITH_DIR 207 /* LAPB */
+
+/*
+ * 208 is reserved for an as-yet-unspecified proprietary link-layer
+ * type, as requested by Will Barker.
+ */
+
+/*
+ * IPMB with a Linux-specific pseudo-header; as requested by Alexey Neyman
+ * <avn@pigeonpoint.com>.
+ */
+#define DLT_IPMB_LINUX 209
+
+/*
+ * FlexRay automotive bus - http://www.flexray.com/ - as requested
+ * by Hannes Kaelber <hannes.kaelber@x2e.de>.
+ */
+#define DLT_FLEXRAY 210
+
+/*
+ * Media Oriented Systems Transport (MOST) bus for multimedia
+ * transport - http://www.mostcooperation.com/ - as requested
+ * by Hannes Kaelber <hannes.kaelber@x2e.de>.
+ */
+#define DLT_MOST 211
+
+/*
+ * Local Interconnect Network (LIN) bus for vehicle networks -
+ * http://www.lin-subbus.org/ - as requested by Hannes Kaelber
+ * <hannes.kaelber@x2e.de>.
+ */
+#define DLT_LIN 212
+
+/*
+ * X2E-private data link type used for serial line capture,
+ * as requested by Hannes Kaelber <hannes.kaelber@x2e.de>.
+ */
+#define DLT_X2E_SERIAL 213
+
+/*
+ * X2E-private data link type used for the Xoraya data logger
+ * family, as requested by Hannes Kaelber <hannes.kaelber@x2e.de>.
+ */
+#define DLT_X2E_XORAYA 214
+
+/*
+ * IEEE 802.15.4, exactly as it appears in the spec (no padding, no
+ * nothing), but with the PHY-level data for non-ASK PHYs (4 octets
+ * of 0 as preamble, one octet of SFD, one octet of frame length+
+ * reserved bit, and then the MAC-layer data, starting with the
+ * frame control field).
+ *
+ * Requested by Max Filippov <jcmvbkbc@gmail.com>.
+ */
+#define DLT_IEEE802_15_4_NONASK_PHY 215
+
+/*
+ * DLT and savefile link type values are split into a class and
+ * a member of that class. A class value of 0 indicates a regular
+ * DLT_/LINKTYPE_ value.
+ */
+#define DLT_CLASS(x) ((x) & 0x03ff0000)
+
+/*
+ * The instruction encodings.
+ */
+/* instruction classes */
+#define BPF_CLASS(code) ((code) & 0x07)
+#define BPF_LD 0x00
+#define BPF_LDX 0x01
+#define BPF_ST 0x02
+#define BPF_STX 0x03
+#define BPF_ALU 0x04
+#define BPF_JMP 0x05
+#define BPF_RET 0x06
+#define BPF_MISC 0x07
+
+/* ld/ldx fields */
+#define BPF_SIZE(code) ((code) & 0x18)
+#define BPF_W 0x00
+#define BPF_H 0x08
+#define BPF_B 0x10
+#define BPF_MODE(code) ((code) & 0xe0)
+#define BPF_IMM 0x00
+#define BPF_ABS 0x20
+#define BPF_IND 0x40
+#define BPF_MEM 0x60
+#define BPF_LEN 0x80
+#define BPF_MSH 0xa0
+
+/* alu/jmp fields */
+#define BPF_OP(code) ((code) & 0xf0)
+#define BPF_ADD 0x00
+#define BPF_SUB 0x10
+#define BPF_MUL 0x20
+#define BPF_DIV 0x30
+#define BPF_OR 0x40
+#define BPF_AND 0x50
+#define BPF_LSH 0x60
+#define BPF_RSH 0x70
+#define BPF_NEG 0x80
+#define BPF_JA 0x00
+#define BPF_JEQ 0x10
+#define BPF_JGT 0x20
+#define BPF_JGE 0x30
+#define BPF_JSET 0x40
+#define BPF_SRC(code) ((code) & 0x08)
+#define BPF_K 0x00
+#define BPF_X 0x08
+
+/* ret - BPF_K and BPF_X also apply */
+#define BPF_RVAL(code) ((code) & 0x18)
+#define BPF_A 0x10
+
+/* misc */
+#define BPF_MISCOP(code) ((code) & 0xf8)
+#define BPF_TAX 0x00
+#define BPF_TXA 0x80
+
+/*
+ * The instruction data structure.
+ */
+struct bpf_insn {
+ u_short code;
+ u_char jt;
+ u_char jf;
+ bpf_u_int32 k;
+};
+
+/*
+ * Macros for insn array initializers.
+ */
+#define BPF_STMT(code, k) { (u_short)(code), 0, 0, k }
+#define BPF_JUMP(code, k, jt, jf) { (u_short)(code), jt, jf, k }
+
+/*
+ * Structure to retrieve available DLTs for the interface.
+ */
+struct bpf_dltlist {
+ u_int bfl_len; /* number of bfd_list array */
+ u_int *bfl_list; /* array of DLTs */
+};
+
+#ifdef _KERNEL
+#ifdef MALLOC_DECLARE
+MALLOC_DECLARE(M_BPF);
+#endif
+#ifdef SYSCTL_DECL
+SYSCTL_DECL(_net_bpf);
+#endif
+
+/*
+ * Rotate the packet buffers in descriptor d. Move the store buffer into the
+ * hold slot, and the free buffer ino the store slot. Zero the length of the
+ * new store buffer. Descriptor lock should be held.
+ */
+#define ROTATE_BUFFERS(d) do { \
+ (d)->bd_hbuf = (d)->bd_sbuf; \
+ (d)->bd_hlen = (d)->bd_slen; \
+ (d)->bd_sbuf = (d)->bd_fbuf; \
+ (d)->bd_slen = 0; \
+ (d)->bd_fbuf = NULL; \
+ bpf_bufheld(d); \
+} while (0)
+
+/*
+ * Descriptor associated with each attached hardware interface.
+ */
+struct bpf_if {
+ LIST_ENTRY(bpf_if) bif_next; /* list of all interfaces */
+ LIST_HEAD(, bpf_d) bif_dlist; /* descriptor list */
+ u_int bif_dlt; /* link layer type */
+ u_int bif_hdrlen; /* length of header (with padding) */
+ struct ifnet *bif_ifp; /* corresponding interface */
+ struct mtx bif_mtx; /* mutex for interface */
+};
+
+void bpf_bufheld(struct bpf_d *d);
+int bpf_validate(const struct bpf_insn *, int);
+void bpf_tap(struct bpf_if *, u_char *, u_int);
+void bpf_mtap(struct bpf_if *, struct mbuf *);
+void bpf_mtap2(struct bpf_if *, void *, u_int, struct mbuf *);
+void bpfattach(struct ifnet *, u_int, u_int);
+void bpfattach2(struct ifnet *, u_int, u_int, struct bpf_if **);
+void bpfdetach(struct ifnet *);
+
+void bpfilterattach(int);
+u_int bpf_filter(const struct bpf_insn *, u_char *, u_int, u_int);
+
+static __inline int
+bpf_peers_present(struct bpf_if *bpf)
+{
+
+ if (!LIST_EMPTY(&bpf->bif_dlist))
+ return (1);
+ return (0);
+}
+
+#define BPF_TAP(_ifp,_pkt,_pktlen) do { \
+ if (bpf_peers_present((_ifp)->if_bpf)) \
+ bpf_tap((_ifp)->if_bpf, (_pkt), (_pktlen)); \
+} while (0)
+#define BPF_MTAP(_ifp,_m) do { \
+ if (bpf_peers_present((_ifp)->if_bpf)) { \
+ M_ASSERTVALID(_m); \
+ bpf_mtap((_ifp)->if_bpf, (_m)); \
+ } \
+} while (0)
+#define BPF_MTAP2(_ifp,_data,_dlen,_m) do { \
+ if (bpf_peers_present((_ifp)->if_bpf)) { \
+ M_ASSERTVALID(_m); \
+ bpf_mtap2((_ifp)->if_bpf,(_data),(_dlen),(_m)); \
+ } \
+} while (0)
+#endif
+
+/*
+ * Number of scratch memory words (for BPF_LD|BPF_MEM and BPF_ST).
+ */
+#define BPF_MEMWORDS 16
+
+#endif /* _NET_BPF_HH_ */
diff --git a/rtems/freebsd/net/bpf_buffer.c b/rtems/freebsd/net/bpf_buffer.c
new file mode 100644
index 00000000..deb54652
--- /dev/null
+++ b/rtems/freebsd/net/bpf_buffer.c
@@ -0,0 +1,212 @@
+#include <rtems/freebsd/machine/rtems-bsd-config.h>
+
+/*-
+ * Copyright (c) 2007 Seccuris Inc.
+ * All rights reserved.
+ *
+ * This sofware was developed by Robert N. M. Watson under contract to
+ * Seccuris Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * Copyright (c) 1990, 1991, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * This code is derived from the Stanford/CMU enet packet filter,
+ * (net/enet.c) distributed as part of 4.3BSD, and code contributed
+ * to Berkeley by Steven McCanne and Van Jacobson both of Lawrence
+ * Berkeley Laboratory.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * @(#)bpf.c 8.4 (Berkeley) 1/9/95
+ */
+
+#include <rtems/freebsd/sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <rtems/freebsd/local/opt_bpf.h>
+
+#include <rtems/freebsd/sys/param.h>
+#include <rtems/freebsd/sys/malloc.h>
+#include <rtems/freebsd/sys/mbuf.h>
+#include <rtems/freebsd/sys/socket.h>
+#include <rtems/freebsd/sys/uio.h>
+#include <rtems/freebsd/sys/kernel.h>
+#include <rtems/freebsd/sys/sysctl.h>
+
+#include <rtems/freebsd/net/if.h>
+#include <rtems/freebsd/net/bpf.h>
+#include <rtems/freebsd/net/bpf_buffer.h>
+#include <rtems/freebsd/net/bpfdesc.h>
+
+/*
+ * Implement historical kernel memory buffering model for BPF: two malloc(9)
+ * kernel buffers are hung off of the descriptor. The size is fixed prior to
+ * attaching to an ifnet, ad cannot be changed after that. read(2) simply
+ * copies the data to user space using uiomove(9).
+ */
+
+static int bpf_bufsize = 4096;
+SYSCTL_INT(_net_bpf, OID_AUTO, bufsize, CTLFLAG_RW,
+ &bpf_bufsize, 0, "Maximum capture buffer size in bytes");
+static int bpf_maxbufsize = BPF_MAXBUFSIZE;
+SYSCTL_INT(_net_bpf, OID_AUTO, maxbufsize, CTLFLAG_RW,
+ &bpf_maxbufsize, 0, "Default capture buffer in bytes");
+
+void
+bpf_buffer_alloc(struct bpf_d *d)
+{
+
+ KASSERT(d->bd_fbuf == NULL, ("bpf_buffer_alloc: bd_fbuf != NULL"));
+ KASSERT(d->bd_sbuf == NULL, ("bpf_buffer_alloc: bd_sbuf != NULL"));
+ KASSERT(d->bd_hbuf == NULL, ("bpf_buffer_alloc: bd_hbuf != NULL"));
+
+ d->bd_fbuf = (caddr_t)malloc(d->bd_bufsize, M_BPF, M_WAITOK);
+ d->bd_sbuf = (caddr_t)malloc(d->bd_bufsize, M_BPF, M_WAITOK);
+ d->bd_hbuf = NULL;
+ d->bd_slen = 0;
+ d->bd_hlen = 0;
+}
+
+/*
+ * Simple data copy to the current kernel buffer.
+ */
+void
+bpf_buffer_append_bytes(struct bpf_d *d, caddr_t buf, u_int offset,
+ void *src, u_int len)
+{
+ u_char *src_bytes;
+
+ src_bytes = (u_char *)src;
+ bcopy(src_bytes, buf + offset, len);
+}
+
+/*
+ * Scatter-gather data copy from an mbuf chain to the current kernel buffer.
+ */
+void
+bpf_buffer_append_mbuf(struct bpf_d *d, caddr_t buf, u_int offset, void *src,
+ u_int len)
+{
+ const struct mbuf *m;
+ u_char *dst;
+ u_int count;
+
+ m = (struct mbuf *)src;
+ dst = (u_char *)buf + offset;
+ while (len > 0) {
+ if (m == NULL)
+ panic("bpf_mcopy");
+ count = min(m->m_len, len);
+ bcopy(mtod(m, void *), dst, count);
+ m = m->m_next;
+ dst += count;
+ len -= count;
+ }
+}
+
+/*
+ * Free BPF kernel buffers on device close.
+ */
+void
+bpf_buffer_free(struct bpf_d *d)
+{
+
+ if (d->bd_sbuf != NULL)
+ free(d->bd_sbuf, M_BPF);
+ if (d->bd_hbuf != NULL)
+ free(d->bd_hbuf, M_BPF);
+ if (d->bd_fbuf != NULL)
+ free(d->bd_fbuf, M_BPF);
+
+#ifdef INVARIANTS
+ d->bd_sbuf = d->bd_hbuf = d->bd_fbuf = (caddr_t)~0;
+#endif
+}
+
+/*
+ * This is a historical initialization that occurs when the BPF descriptor is
+ * first opened. It does not imply selection of a buffer mode, so we don't
+ * allocate buffers here.
+ */
+void
+bpf_buffer_init(struct bpf_d *d)
+{
+
+ d->bd_bufsize = bpf_bufsize;
+}
+
+/*
+ * Allocate or resize buffers.
+ */
+int
+bpf_buffer_ioctl_sblen(struct bpf_d *d, u_int *i)
+{
+ u_int size;
+
+ BPFD_LOCK(d);
+ if (d->bd_bif != NULL) {
+ BPFD_UNLOCK(d);
+ return (EINVAL);
+ }
+ size = *i;
+ if (size > bpf_maxbufsize)
+ *i = size = bpf_maxbufsize;
+ else if (size < BPF_MINBUFSIZE)
+ *i = size = BPF_MINBUFSIZE;
+ d->bd_bufsize = size;
+ BPFD_UNLOCK(d);
+ return (0);
+}
+
+/*
+ * Copy buffer storage to user space in read().
+ */
+int
+bpf_buffer_uiomove(struct bpf_d *d, caddr_t buf, u_int len, struct uio *uio)
+{
+
+ return (uiomove(buf, len, uio));
+}
diff --git a/rtems/freebsd/net/bpf_buffer.h b/rtems/freebsd/net/bpf_buffer.h
new file mode 100644
index 00000000..545ddb22
--- /dev/null
+++ b/rtems/freebsd/net/bpf_buffer.h
@@ -0,0 +1,50 @@
+/*-
+ * Copyright (c) 2007 Seccuris Inc.
+ * All rights reserved.
+ *
+ * This sofware was developed by Robert N. M. Watson under contract to
+ * Seccuris Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _NET_BPF_BUFFER_HH_
+#define _NET_BPF_BUFFER_HH_
+
+#ifndef _KERNEL
+#error "no user-serviceable parts inside"
+#endif
+
+void bpf_buffer_alloc(struct bpf_d *d);
+void bpf_buffer_append_bytes(struct bpf_d *d, caddr_t buf, u_int offset,
+ void *src, u_int len);
+void bpf_buffer_append_mbuf(struct bpf_d *d, caddr_t buf, u_int offset,
+ void *src, u_int len);
+void bpf_buffer_free(struct bpf_d *d);
+void bpf_buffer_init(struct bpf_d *d);
+int bpf_buffer_ioctl_sblen(struct bpf_d *d, u_int *i);
+int bpf_buffer_uiomove(struct bpf_d *d, caddr_t buf, u_int len,
+ struct uio *uio);
+
+#endif /* !_NET_BPF_BUFFER_HH_ */
diff --git a/rtems/freebsd/net/bpf_filter.c b/rtems/freebsd/net/bpf_filter.c
new file mode 100644
index 00000000..e819b496
--- /dev/null
+++ b/rtems/freebsd/net/bpf_filter.c
@@ -0,0 +1,582 @@
+#include <rtems/freebsd/machine/rtems-bsd-config.h>
+
+/*-
+ * Copyright (c) 1990, 1991, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * This code is derived from the Stanford/CMU enet packet filter,
+ * (net/enet.c) distributed as part of 4.3BSD, and code contributed
+ * to Berkeley by Steven McCanne and Van Jacobson both of Lawrence
+ * Berkeley Laboratory.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * @(#)bpf_filter.c 8.1 (Berkeley) 6/10/93
+ */
+
+#include <rtems/freebsd/sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <rtems/freebsd/sys/param.h>
+
+#if !defined(_KERNEL) || defined(sun)
+#include <rtems/freebsd/netinet/in.h>
+#endif
+
+#ifndef __i386__
+#define BPF_ALIGN
+#endif
+
+#ifndef BPF_ALIGN
+#define EXTRACT_SHORT(p) ((u_int16_t)ntohs(*(u_int16_t *)p))
+#define EXTRACT_LONG(p) (ntohl(*(u_int32_t *)p))
+#else
+#define EXTRACT_SHORT(p)\
+ ((u_int16_t)\
+ ((u_int16_t)*((u_char *)p+0)<<8|\
+ (u_int16_t)*((u_char *)p+1)<<0))
+#define EXTRACT_LONG(p)\
+ ((u_int32_t)*((u_char *)p+0)<<24|\
+ (u_int32_t)*((u_char *)p+1)<<16|\
+ (u_int32_t)*((u_char *)p+2)<<8|\
+ (u_int32_t)*((u_char *)p+3)<<0)
+#endif
+
+#ifdef _KERNEL
+#include <rtems/freebsd/sys/mbuf.h>
+#else
+#include <rtems/freebsd/stdlib.h>
+#endif
+#include <rtems/freebsd/net/bpf.h>
+#ifdef _KERNEL
+#define MINDEX(m, k) \
+{ \
+ register int len = m->m_len; \
+ \
+ while (k >= len) { \
+ k -= len; \
+ m = m->m_next; \
+ if (m == 0) \
+ return (0); \
+ len = m->m_len; \
+ } \
+}
+
+static u_int16_t m_xhalf(struct mbuf *m, bpf_u_int32 k, int *err);
+static u_int32_t m_xword(struct mbuf *m, bpf_u_int32 k, int *err);
+
+static u_int32_t
+m_xword(struct mbuf *m, bpf_u_int32 k, int *err)
+{
+ size_t len;
+ u_char *cp, *np;
+ struct mbuf *m0;
+
+ len = m->m_len;
+ while (k >= len) {
+ k -= len;
+ m = m->m_next;
+ if (m == 0)
+ goto bad;
+ len = m->m_len;
+ }
+ cp = mtod(m, u_char *) + k;
+ if (len - k >= 4) {
+ *err = 0;
+ return (EXTRACT_LONG(cp));
+ }
+ m0 = m->m_next;
+ if (m0 == 0 || m0->m_len + len - k < 4)
+ goto bad;
+ *err = 0;
+ np = mtod(m0, u_char *);
+ switch (len - k) {
+ case 1:
+ return (((u_int32_t)cp[0] << 24) |
+ ((u_int32_t)np[0] << 16) |
+ ((u_int32_t)np[1] << 8) |
+ (u_int32_t)np[2]);
+
+ case 2:
+ return (((u_int32_t)cp[0] << 24) |
+ ((u_int32_t)cp[1] << 16) |
+ ((u_int32_t)np[0] << 8) |
+ (u_int32_t)np[1]);
+
+ default:
+ return (((u_int32_t)cp[0] << 24) |
+ ((u_int32_t)cp[1] << 16) |
+ ((u_int32_t)cp[2] << 8) |
+ (u_int32_t)np[0]);
+ }
+ bad:
+ *err = 1;
+ return (0);
+}
+
+static u_int16_t
+m_xhalf(struct mbuf *m, bpf_u_int32 k, int *err)
+{
+ size_t len;
+ u_char *cp;
+ struct mbuf *m0;
+
+ len = m->m_len;
+ while (k >= len) {
+ k -= len;
+ m = m->m_next;
+ if (m == 0)
+ goto bad;
+ len = m->m_len;
+ }
+ cp = mtod(m, u_char *) + k;
+ if (len - k >= 2) {
+ *err = 0;
+ return (EXTRACT_SHORT(cp));
+ }
+ m0 = m->m_next;
+ if (m0 == 0)
+ goto bad;
+ *err = 0;
+ return ((cp[0] << 8) | mtod(m0, u_char *)[0]);
+ bad:
+ *err = 1;
+ return (0);
+}
+#endif
+
+/*
+ * Execute the filter program starting at pc on the packet p
+ * wirelen is the length of the original packet
+ * buflen is the amount of data present
+ */
+u_int
+bpf_filter(const struct bpf_insn *pc, u_char *p, u_int wirelen, u_int buflen)
+{
+ u_int32_t A = 0, X = 0;
+ bpf_u_int32 k;
+ u_int32_t mem[BPF_MEMWORDS];
+
+ if (pc == NULL)
+ /*
+ * No filter means accept all.
+ */
+ return ((u_int)-1);
+
+ --pc;
+ while (1) {
+ ++pc;
+ switch (pc->code) {
+ default:
+#ifdef _KERNEL
+ return (0);
+#else
+ abort();
+#endif
+
+ case BPF_RET|BPF_K:
+ return ((u_int)pc->k);
+
+ case BPF_RET|BPF_A:
+ return ((u_int)A);
+
+ case BPF_LD|BPF_W|BPF_ABS:
+ k = pc->k;
+ if (k > buflen || sizeof(int32_t) > buflen - k) {
+#ifdef _KERNEL
+ int merr;
+
+ if (buflen != 0)
+ return (0);
+ A = m_xword((struct mbuf *)p, k, &merr);
+ if (merr != 0)
+ return (0);
+ continue;
+#else
+ return (0);
+#endif
+ }
+#ifdef BPF_ALIGN
+ if (((intptr_t)(p + k) & 3) != 0)
+ A = EXTRACT_LONG(&p[k]);
+ else
+#endif
+ A = ntohl(*(int32_t *)(p + k));
+ continue;
+
+ case BPF_LD|BPF_H|BPF_ABS:
+ k = pc->k;
+ if (k > buflen || sizeof(int16_t) > buflen - k) {
+#ifdef _KERNEL
+ int merr;
+
+ if (buflen != 0)
+ return (0);
+ A = m_xhalf((struct mbuf *)p, k, &merr);
+ continue;
+#else
+ return (0);
+#endif
+ }
+ A = EXTRACT_SHORT(&p[k]);
+ continue;
+
+ case BPF_LD|BPF_B|BPF_ABS:
+ k = pc->k;
+ if (k >= buflen) {
+#ifdef _KERNEL
+ struct mbuf *m;
+
+ if (buflen != 0)
+ return (0);
+ m = (struct mbuf *)p;
+ MINDEX(m, k);
+ A = mtod(m, u_char *)[k];
+ continue;
+#else
+ return (0);
+#endif
+ }
+ A = p[k];
+ continue;
+
+ case BPF_LD|BPF_W|BPF_LEN:
+ A = wirelen;
+ continue;
+
+ case BPF_LDX|BPF_W|BPF_LEN:
+ X = wirelen;
+ continue;
+
+ case BPF_LD|BPF_W|BPF_IND:
+ k = X + pc->k;
+ if (pc->k > buflen || X > buflen - pc->k ||
+ sizeof(int32_t) > buflen - k) {
+#ifdef _KERNEL
+ int merr;
+
+ if (buflen != 0)
+ return (0);
+ A = m_xword((struct mbuf *)p, k, &merr);
+ if (merr != 0)
+ return (0);
+ continue;
+#else
+ return (0);
+#endif
+ }
+#ifdef BPF_ALIGN
+ if (((intptr_t)(p + k) & 3) != 0)
+ A = EXTRACT_LONG(&p[k]);
+ else
+#endif
+ A = ntohl(*(int32_t *)(p + k));
+ continue;
+
+ case BPF_LD|BPF_H|BPF_IND:
+ k = X + pc->k;
+ if (X > buflen || pc->k > buflen - X ||
+ sizeof(int16_t) > buflen - k) {
+#ifdef _KERNEL
+ int merr;
+
+ if (buflen != 0)
+ return (0);
+ A = m_xhalf((struct mbuf *)p, k, &merr);
+ if (merr != 0)
+ return (0);
+ continue;
+#else
+ return (0);
+#endif
+ }
+ A = EXTRACT_SHORT(&p[k]);
+ continue;
+
+ case BPF_LD|BPF_B|BPF_IND:
+ k = X + pc->k;
+ if (pc->k >= buflen || X >= buflen - pc->k) {
+#ifdef _KERNEL
+ struct mbuf *m;
+
+ if (buflen != 0)
+ return (0);
+ m = (struct mbuf *)p;
+ MINDEX(m, k);
+ A = mtod(m, u_char *)[k];
+ continue;
+#else
+ return (0);
+#endif
+ }
+ A = p[k];
+ continue;
+
+ case BPF_LDX|BPF_MSH|BPF_B:
+ k = pc->k;
+ if (k >= buflen) {
+#ifdef _KERNEL
+ register struct mbuf *m;
+
+ if (buflen != 0)
+ return (0);
+ m = (struct mbuf *)p;
+ MINDEX(m, k);
+ X = (mtod(m, u_char *)[k] & 0xf) << 2;
+ continue;
+#else
+ return (0);
+#endif
+ }
+ X = (p[pc->k] & 0xf) << 2;
+ continue;
+
+ case BPF_LD|BPF_IMM:
+ A = pc->k;
+ continue;
+
+ case BPF_LDX|BPF_IMM:
+ X = pc->k;
+ continue;
+
+ case BPF_LD|BPF_MEM:
+ A = mem[pc->k];
+ continue;
+
+ case BPF_LDX|BPF_MEM:
+ X = mem[pc->k];
+ continue;
+
+ case BPF_ST:
+ mem[pc->k] = A;
+ continue;
+
+ case BPF_STX:
+ mem[pc->k] = X;
+ continue;
+
+ case BPF_JMP|BPF_JA:
+ pc += pc->k;
+ continue;
+
+ case BPF_JMP|BPF_JGT|BPF_K:
+ pc += (A > pc->k) ? pc->jt : pc->jf;
+ continue;
+
+ case BPF_JMP|BPF_JGE|BPF_K:
+ pc += (A >= pc->k) ? pc->jt : pc->jf;
+ continue;
+
+ case BPF_JMP|BPF_JEQ|BPF_K:
+ pc += (A == pc->k) ? pc->jt : pc->jf;
+ continue;
+
+ case BPF_JMP|BPF_JSET|BPF_K:
+ pc += (A & pc->k) ? pc->jt : pc->jf;
+ continue;
+
+ case BPF_JMP|BPF_JGT|BPF_X:
+ pc += (A > X) ? pc->jt : pc->jf;
+ continue;
+
+ case BPF_JMP|BPF_JGE|BPF_X:
+ pc += (A >= X) ? pc->jt : pc->jf;
+ continue;
+
+ case BPF_JMP|BPF_JEQ|BPF_X:
+ pc += (A == X) ? pc->jt : pc->jf;
+ continue;
+
+ case BPF_JMP|BPF_JSET|BPF_X:
+ pc += (A & X) ? pc->jt : pc->jf;
+ continue;
+
+ case BPF_ALU|BPF_ADD|BPF_X:
+ A += X;
+ continue;
+
+ case BPF_ALU|BPF_SUB|BPF_X:
+ A -= X;
+ continue;
+
+ case BPF_ALU|BPF_MUL|BPF_X:
+ A *= X;
+ continue;
+
+ case BPF_ALU|BPF_DIV|BPF_X:
+ if (X == 0)
+ return (0);
+ A /= X;
+ continue;
+
+ case BPF_ALU|BPF_AND|BPF_X:
+ A &= X;
+ continue;
+
+ case BPF_ALU|BPF_OR|BPF_X:
+ A |= X;
+ continue;
+
+ case BPF_ALU|BPF_LSH|BPF_X:
+ A <<= X;
+ continue;
+
+ case BPF_ALU|BPF_RSH|BPF_X:
+ A >>= X;
+ continue;
+
+ case BPF_ALU|BPF_ADD|BPF_K:
+ A += pc->k;
+ continue;
+
+ case BPF_ALU|BPF_SUB|BPF_K:
+ A -= pc->k;
+ continue;
+
+ case BPF_ALU|BPF_MUL|BPF_K:
+ A *= pc->k;
+ continue;
+
+ case BPF_ALU|BPF_DIV|BPF_K:
+ A /= pc->k;
+ continue;
+
+ case BPF_ALU|BPF_AND|BPF_K:
+ A &= pc->k;
+ continue;
+
+ case BPF_ALU|BPF_OR|BPF_K:
+ A |= pc->k;
+ continue;
+
+ case BPF_ALU|BPF_LSH|BPF_K:
+ A <<= pc->k;
+ continue;
+
+ case BPF_ALU|BPF_RSH|BPF_K:
+ A >>= pc->k;
+ continue;
+
+ case BPF_ALU|BPF_NEG:
+ A = -A;
+ continue;
+
+ case BPF_MISC|BPF_TAX:
+ X = A;
+ continue;
+
+ case BPF_MISC|BPF_TXA:
+ A = X;
+ continue;
+ }
+ }
+}
+
+#ifdef _KERNEL
+static const u_short bpf_code_map[] = {
+ 0x10ff, /* 0x00-0x0f: 1111111100001000 */
+ 0x3070, /* 0x10-0x1f: 0000111000001100 */
+ 0x3131, /* 0x20-0x2f: 1000110010001100 */
+ 0x3031, /* 0x30-0x3f: 1000110000001100 */
+ 0x3131, /* 0x40-0x4f: 1000110010001100 */
+ 0x1011, /* 0x50-0x5f: 1000100000001000 */
+ 0x1013, /* 0x60-0x6f: 1100100000001000 */
+ 0x1010, /* 0x70-0x7f: 0000100000001000 */
+ 0x0093, /* 0x80-0x8f: 1100100100000000 */
+ 0x0000, /* 0x90-0x9f: 0000000000000000 */
+ 0x0000, /* 0xa0-0xaf: 0000000000000000 */
+ 0x0002, /* 0xb0-0xbf: 0100000000000000 */
+ 0x0000, /* 0xc0-0xcf: 0000000000000000 */
+ 0x0000, /* 0xd0-0xdf: 0000000000000000 */
+ 0x0000, /* 0xe0-0xef: 0000000000000000 */
+ 0x0000 /* 0xf0-0xff: 0000000000000000 */
+};
+
+#define BPF_VALIDATE_CODE(c) \
+ ((c) <= 0xff && (bpf_code_map[(c) >> 4] & (1 << ((c) & 0xf))) != 0)
+
+/*
+ * Return true if the 'fcode' is a valid filter program.
+ * The constraints are that each jump be forward and to a valid
+ * code. The code must terminate with either an accept or reject.
+ *
+ * The kernel needs to be able to verify an application's filter code.
+ * Otherwise, a bogus program could easily crash the system.
+ */
+int
+bpf_validate(const struct bpf_insn *f, int len)
+{
+ register int i;
+ register const struct bpf_insn *p;
+
+ /* Do not accept negative length filter. */
+ if (len < 0)
+ return (0);
+
+ /* An empty filter means accept all. */
+ if (len == 0)
+ return (1);
+
+ for (i = 0; i < len; ++i) {
+ p = &f[i];
+ /*
+ * Check that the code is valid.
+ */
+ if (!BPF_VALIDATE_CODE(p->code))
+ return (0);
+ /*
+ * Check that that jumps are forward, and within
+ * the code block.
+ */
+ if (BPF_CLASS(p->code) == BPF_JMP) {
+ register u_int offset;
+
+ if (p->code == (BPF_JMP|BPF_JA))
+ offset = p->k;
+ else
+ offset = p->jt > p->jf ? p->jt : p->jf;
+ if (offset >= (u_int)(len - i) - 1)
+ return (0);
+ continue;
+ }
+ /*
+ * Check that memory operations use valid addresses.
+ */
+ if (p->code == BPF_ST || p->code == BPF_STX ||
+ p->code == (BPF_LD|BPF_MEM) ||
+ p->code == (BPF_LDX|BPF_MEM)) {
+ if (p->k >= BPF_MEMWORDS)
+ return (0);
+ continue;
+ }
+ /*
+ * Check for constant division by 0.
+ */
+ if (p->code == (BPF_ALU|BPF_DIV|BPF_K) && p->k == 0)
+ return (0);
+ }
+ return (BPF_CLASS(f[len - 1].code) == BPF_RET);
+}
+#endif
diff --git a/rtems/freebsd/net/bpf_jitter.c b/rtems/freebsd/net/bpf_jitter.c
new file mode 100644
index 00000000..df7c5848
--- /dev/null
+++ b/rtems/freebsd/net/bpf_jitter.c
@@ -0,0 +1,143 @@
+#include <rtems/freebsd/machine/rtems-bsd-config.h>
+
+/*-
+ * Copyright (C) 2002-2003 NetGroup, Politecnico di Torino (Italy)
+ * Copyright (C) 2005-2008 Jung-uk Kim <jkim@FreeBSD.org>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the Politecnico di Torino nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <rtems/freebsd/sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#ifdef _KERNEL
+#include <rtems/freebsd/local/opt_bpf.h>
+
+#include <rtems/freebsd/sys/param.h>
+#include <rtems/freebsd/sys/kernel.h>
+#include <rtems/freebsd/sys/malloc.h>
+#include <rtems/freebsd/sys/mbuf.h>
+#include <rtems/freebsd/sys/sysctl.h>
+#else
+#include <rtems/freebsd/stdlib.h>
+#include <rtems/freebsd/string.h>
+#include <rtems/freebsd/sys/types.h>
+#endif
+
+#include <rtems/freebsd/net/bpf.h>
+#include <rtems/freebsd/net/bpf_jitter.h>
+
+bpf_filter_func bpf_jit_compile(struct bpf_insn *, u_int, int *);
+
+static u_int bpf_jit_accept_all(u_char *, u_int, u_int);
+
+#ifdef _KERNEL
+MALLOC_DEFINE(M_BPFJIT, "BPF_JIT", "BPF JIT compiler");
+
+SYSCTL_NODE(_net, OID_AUTO, bpf_jitter, CTLFLAG_RW, 0, "BPF JIT compiler");
+int bpf_jitter_enable = 1;
+SYSCTL_INT(_net_bpf_jitter, OID_AUTO, enable, CTLFLAG_RW,
+ &bpf_jitter_enable, 0, "enable BPF JIT compiler");
+
+bpf_jit_filter *
+bpf_jitter(struct bpf_insn *fp, int nins)
+{
+ bpf_jit_filter *filter;
+
+ /* Allocate the filter structure */
+ filter = (struct bpf_jit_filter *)malloc(sizeof(*filter),
+ M_BPFJIT, M_NOWAIT | M_ZERO);
+ if (filter == NULL)
+ return (NULL);
+
+ /* No filter means accept all */
+ if (fp == NULL || nins == 0) {
+ filter->func = bpf_jit_accept_all;
+ return (filter);
+ }
+
+ /* Create the binary */
+ if ((filter->func = bpf_jit_compile(fp, nins, filter->mem)) == NULL) {
+ free(filter, M_BPFJIT);
+ return (NULL);
+ }
+
+ return (filter);
+}
+
+void
+bpf_destroy_jit_filter(bpf_jit_filter *filter)
+{
+
+ if (filter->func != bpf_jit_accept_all)
+ free(filter->func, M_BPFJIT);
+ free(filter, M_BPFJIT);
+}
+#else
+bpf_jit_filter *
+bpf_jitter(struct bpf_insn *fp, int nins)
+{
+ bpf_jit_filter *filter;
+
+ /* Allocate the filter structure */
+ filter = (struct bpf_jit_filter *)malloc(sizeof(*filter));
+ if (filter == NULL)
+ return (NULL);
+ memset(filter, 0, sizeof(*filter));
+
+ /* No filter means accept all */
+ if (fp == NULL || nins == 0) {
+ filter->func = bpf_jit_accept_all;
+ return (filter);
+ }
+
+ /* Create the binary */
+ if ((filter->func = bpf_jit_compile(fp, nins, filter->mem)) == NULL) {
+ free(filter);
+ return (NULL);
+ }
+
+ return (filter);
+}
+
+void
+bpf_destroy_jit_filter(bpf_jit_filter *filter)
+{
+
+ if (filter->func != bpf_jit_accept_all)
+ free(filter->func);
+ free(filter);
+}
+#endif
+
+static u_int
+bpf_jit_accept_all(__unused u_char *p, __unused u_int wirelen,
+ __unused u_int buflen)
+{
+
+ return ((u_int)-1);
+}
diff --git a/rtems/freebsd/net/bpf_jitter.h b/rtems/freebsd/net/bpf_jitter.h
new file mode 100644
index 00000000..c0dd7e04
--- /dev/null
+++ b/rtems/freebsd/net/bpf_jitter.h
@@ -0,0 +1,84 @@
+/*-
+ * Copyright (C) 2002-2003 NetGroup, Politecnico di Torino (Italy)
+ * Copyright (C) 2005-2008 Jung-uk Kim <jkim@FreeBSD.org>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the Politecnico di Torino nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _NET_BPF_JITTER_HH_
+#define _NET_BPF_JITTER_HH_
+
+#ifdef _KERNEL
+MALLOC_DECLARE(M_BPFJIT);
+#endif
+
+extern int bpf_jitter_enable;
+
+/*
+ * Prototype of a filtering function created by the jitter.
+ *
+ * The syntax and the meaning of the parameters is analogous to the one of
+ * bpf_filter(). Notice that the filter is not among the parameters because
+ * it is hardwired in the function.
+ */
+typedef u_int (*bpf_filter_func)(u_char *, u_int, u_int);
+
+/* Structure describing a native filtering program created by the jitter. */
+typedef struct bpf_jit_filter {
+ /* The native filtering binary, in the form of a bpf_filter_func. */
+ bpf_filter_func func;
+
+ int mem[BPF_MEMWORDS]; /* Scratch memory */
+} bpf_jit_filter;
+
+/*
+ * BPF jitter, builds a machine function from a BPF program.
+ *
+ * param fp The BPF pseudo-assembly filter that will be translated
+ * into native code.
+ * param nins Number of instructions of the input filter.
+ * return The bpf_jit_filter structure containing the native filtering
+ * binary.
+ *
+ * bpf_jitter allocates the buffers for the new native filter and
+ * then translates the program pointed by fp calling bpf_jit_compile().
+ */
+bpf_jit_filter *bpf_jitter(struct bpf_insn *fp, int nins);
+
+/*
+ * Deletes a filtering function that was previously created by bpf_jitter().
+ *
+ * param filter The filter to destroy.
+ *
+ * This function frees the variuos buffers (code, memory, etc.) associated
+ * with a filtering function.
+ */
+void bpf_destroy_jit_filter(bpf_jit_filter *filter);
+
+#endif /* _NET_BPF_JITTER_HH_ */
diff --git a/rtems/freebsd/net/bpf_zerocopy.h b/rtems/freebsd/net/bpf_zerocopy.h
new file mode 100644
index 00000000..455bd41c
--- /dev/null
+++ b/rtems/freebsd/net/bpf_zerocopy.h
@@ -0,0 +1,56 @@
+/*-
+ * Copyright (c) 2007 Seccuris Inc.
+ * All rights reserved.
+ *
+ * This sofware was developed by Robert N. M. Watson under contract to
+ * Seccuris Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _NET_BPF_ZEROCOPY_HH_
+#define _NET_BPF_ZEROCOPY_HH_
+
+#ifndef _KERNEL
+#error "no user-serviceable parts inside"
+#endif
+
+void bpf_zerocopy_append_bytes(struct bpf_d *d, caddr_t buf, u_int offset,
+ void *src, u_int len);
+void bpf_zerocopy_append_mbuf(struct bpf_d *d, caddr_t buf, u_int offset,
+ void *src, u_int len);
+void bpf_zerocopy_buffull(struct bpf_d *);
+void bpf_zerocopy_bufheld(struct bpf_d *);
+void bpf_zerocopy_buf_reclaimed(struct bpf_d *);
+int bpf_zerocopy_canfreebuf(struct bpf_d *);
+int bpf_zerocopy_canwritebuf(struct bpf_d *);
+void bpf_zerocopy_free(struct bpf_d *d);
+int bpf_zerocopy_ioctl_getzmax(struct thread *td, struct bpf_d *d,
+ size_t *i);
+int bpf_zerocopy_ioctl_rotzbuf(struct thread *td, struct bpf_d *d,
+ struct bpf_zbuf *bz);
+int bpf_zerocopy_ioctl_setzbuf(struct thread *td, struct bpf_d *d,
+ struct bpf_zbuf *bz);
+
+#endif /* !_NET_BPF_ZEROCOPY_HH_ */
diff --git a/rtems/freebsd/net/bpfdesc.h b/rtems/freebsd/net/bpfdesc.h
new file mode 100644
index 00000000..ae4e0124
--- /dev/null
+++ b/rtems/freebsd/net/bpfdesc.h
@@ -0,0 +1,149 @@
+/*-
+ * Copyright (c) 1990, 1991, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * This code is derived from the Stanford/CMU enet packet filter,
+ * (net/enet.c) distributed as part of 4.3BSD, and code contributed
+ * to Berkeley by Steven McCanne and Van Jacobson both of Lawrence
+ * Berkeley Laboratory.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * @(#)bpfdesc.h 8.1 (Berkeley) 6/10/93
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _NET_BPFDESC_HH_
+#define _NET_BPFDESC_HH_
+
+#include <rtems/freebsd/sys/callout.h>
+#include <rtems/freebsd/sys/selinfo.h>
+#include <rtems/freebsd/sys/queue.h>
+#include <rtems/freebsd/sys/conf.h>
+#include <rtems/freebsd/net/if.h>
+
+/*
+ * Descriptor associated with each open bpf file.
+ */
+struct zbuf;
+struct bpf_d {
+ LIST_ENTRY(bpf_d) bd_next; /* Linked list of descriptors */
+ /*
+ * Buffer slots: two memory buffers store the incoming packets.
+ * The model has three slots. Sbuf is always occupied.
+ * sbuf (store) - Receive interrupt puts packets here.
+ * hbuf (hold) - When sbuf is full, put buffer here and
+ * wakeup read (replace sbuf with fbuf).
+ * fbuf (free) - When read is done, put buffer here.
+ * On receiving, if sbuf is full and fbuf is 0, packet is dropped.
+ */
+ caddr_t bd_sbuf; /* store slot */
+ caddr_t bd_hbuf; /* hold slot */
+ caddr_t bd_fbuf; /* free slot */
+ int bd_slen; /* current length of store buffer */
+ int bd_hlen; /* current length of hold buffer */
+
+ int bd_bufsize; /* absolute length of buffers */
+
+ struct bpf_if * bd_bif; /* interface descriptor */
+ u_long bd_rtout; /* Read timeout in 'ticks' */
+ struct bpf_insn *bd_rfilter; /* read filter code */
+ struct bpf_insn *bd_wfilter; /* write filter code */
+ void *bd_bfilter; /* binary filter code */
+ u_int64_t bd_rcount; /* number of packets received */
+ u_int64_t bd_dcount; /* number of packets dropped */
+
+ u_char bd_promisc; /* true if listening promiscuously */
+ u_char bd_state; /* idle, waiting, or timed out */
+ u_char bd_immediate; /* true to return on packet arrival */
+ int bd_hdrcmplt; /* false to fill in src lladdr automatically */
+ int bd_direction; /* select packet direction */
+ int bd_feedback; /* true to feed back sent packets */
+ int bd_async; /* non-zero if packet reception should generate signal */
+ int bd_sig; /* signal to send upon packet reception */
+ struct sigio * bd_sigio; /* information for async I/O */
+ struct selinfo bd_sel; /* bsd select info */
+ struct mtx bd_mtx; /* mutex for this descriptor */
+ struct callout bd_callout; /* for BPF timeouts with select */
+ struct label *bd_label; /* MAC label for descriptor */
+ u_int64_t bd_fcount; /* number of packets which matched filter */
+ pid_t bd_pid; /* PID which created descriptor */
+ int bd_locked; /* true if descriptor is locked */
+ u_int bd_bufmode; /* Current buffer mode. */
+ u_int64_t bd_wcount; /* number of packets written */
+ u_int64_t bd_wfcount; /* number of packets that matched write filter */
+ u_int64_t bd_wdcount; /* number of packets dropped during a write */
+ u_int64_t bd_zcopy; /* number of zero copy operations */
+ u_char bd_compat32; /* 32-bit stream on LP64 system */
+};
+
+/* Values for bd_state */
+#define BPF_IDLE 0 /* no select in progress */
+#define BPF_WAITING 1 /* waiting for read timeout in select */
+#define BPF_TIMED_OUT 2 /* read timeout has expired in select */
+
+#define BPFD_LOCK(bd) mtx_lock(&(bd)->bd_mtx)
+#define BPFD_UNLOCK(bd) mtx_unlock(&(bd)->bd_mtx)
+#define BPFD_LOCK_ASSERT(bd) mtx_assert(&(bd)->bd_mtx, MA_OWNED)
+
+/*
+ * External representation of the bpf descriptor
+ */
+struct xbpf_d {
+ u_int bd_structsize; /* Size of this structure. */
+ u_char bd_promisc;
+ u_char bd_immediate;
+ u_char __bd_pad[6];
+ int bd_hdrcmplt;
+ int bd_direction;
+ int bd_feedback;
+ int bd_async;
+ u_int64_t bd_rcount;
+ u_int64_t bd_dcount;
+ u_int64_t bd_fcount;
+ int bd_sig;
+ int bd_slen;
+ int bd_hlen;
+ int bd_bufsize;
+ pid_t bd_pid;
+ char bd_ifname[IFNAMSIZ];
+ int bd_locked;
+ u_int64_t bd_wcount;
+ u_int64_t bd_wfcount;
+ u_int64_t bd_wdcount;
+ u_int64_t bd_zcopy;
+ int bd_bufmode;
+ /*
+ * Allocate 4 64 bit unsigned integers for future expansion so we do
+ * not have to worry about breaking the ABI.
+ */
+ u_int64_t bd_spare[4];
+};
+
+#define BPFIF_LOCK(bif) mtx_lock(&(bif)->bif_mtx)
+#define BPFIF_UNLOCK(bif) mtx_unlock(&(bif)->bif_mtx)
+
+#endif
diff --git a/rtems/freebsd/net/bridgestp.c b/rtems/freebsd/net/bridgestp.c
new file mode 100644
index 00000000..b4b0219f
--- /dev/null
+++ b/rtems/freebsd/net/bridgestp.c
@@ -0,0 +1,2250 @@
+#include <rtems/freebsd/machine/rtems-bsd-config.h>
+
+/* $NetBSD: bridgestp.c,v 1.5 2003/11/28 08:56:48 keihan Exp $ */
+
+/*
+ * Copyright (c) 2000 Jason L. Wright (jason@thought.net)
+ * Copyright (c) 2006 Andrew Thompson (thompsa@FreeBSD.org)
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
+ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+ * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ * OpenBSD: bridgestp.c,v 1.5 2001/03/22 03:48:29 jason Exp
+ */
+
+/*
+ * Implementation of the spanning tree protocol as defined in
+ * ISO/IEC 802.1D-2004, June 9, 2004.
+ */
+
+#include <rtems/freebsd/sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <rtems/freebsd/sys/param.h>
+#include <rtems/freebsd/sys/systm.h>
+#include <rtems/freebsd/sys/mbuf.h>
+#include <rtems/freebsd/sys/socket.h>
+#include <rtems/freebsd/sys/sockio.h>
+#include <rtems/freebsd/sys/kernel.h>
+#include <rtems/freebsd/sys/callout.h>
+#include <rtems/freebsd/sys/module.h>
+#include <rtems/freebsd/sys/proc.h>
+#include <rtems/freebsd/sys/lock.h>
+#include <rtems/freebsd/sys/mutex.h>
+#include <rtems/freebsd/sys/taskqueue.h>
+
+#include <rtems/freebsd/net/if.h>
+#include <rtems/freebsd/net/if_dl.h>
+#include <rtems/freebsd/net/if_types.h>
+#include <rtems/freebsd/net/if_llc.h>
+#include <rtems/freebsd/net/if_media.h>
+#include <rtems/freebsd/net/vnet.h>
+
+#include <rtems/freebsd/netinet/in.h>
+#include <rtems/freebsd/netinet/in_systm.h>
+#include <rtems/freebsd/netinet/in_var.h>
+#include <rtems/freebsd/netinet/if_ether.h>
+#include <rtems/freebsd/net/bridgestp.h>
+
+#ifdef BRIDGESTP_DEBUG
+#define DPRINTF(fmt, arg...) printf("bstp: " fmt, ##arg)
+#else
+#define DPRINTF(fmt, arg...) (void)0
+#endif
+
+#define PV2ADDR(pv, eaddr) do { \
+ eaddr[0] = pv >> 40; \
+ eaddr[1] = pv >> 32; \
+ eaddr[2] = pv >> 24; \
+ eaddr[3] = pv >> 16; \
+ eaddr[4] = pv >> 8; \
+ eaddr[5] = pv >> 0; \
+} while (0)
+
+#define INFO_BETTER 1
+#define INFO_SAME 0
+#define INFO_WORSE -1
+
+const uint8_t bstp_etheraddr[] = { 0x01, 0x80, 0xc2, 0x00, 0x00, 0x00 };
+
+LIST_HEAD(, bstp_state) bstp_list;
+static struct mtx bstp_list_mtx;
+
+static void bstp_transmit(struct bstp_state *, struct bstp_port *);
+static void bstp_transmit_bpdu(struct bstp_state *, struct bstp_port *);
+static void bstp_transmit_tcn(struct bstp_state *, struct bstp_port *);
+static void bstp_decode_bpdu(struct bstp_port *, struct bstp_cbpdu *,
+ struct bstp_config_unit *);
+static void bstp_send_bpdu(struct bstp_state *, struct bstp_port *,
+ struct bstp_cbpdu *);
+static int bstp_pdu_flags(struct bstp_port *);
+static void bstp_received_stp(struct bstp_state *, struct bstp_port *,
+ struct mbuf **, struct bstp_tbpdu *);
+static void bstp_received_rstp(struct bstp_state *, struct bstp_port *,
+ struct mbuf **, struct bstp_tbpdu *);
+static void bstp_received_tcn(struct bstp_state *, struct bstp_port *,
+ struct bstp_tcn_unit *);
+static void bstp_received_bpdu(struct bstp_state *, struct bstp_port *,
+ struct bstp_config_unit *);
+static int bstp_pdu_rcvtype(struct bstp_port *, struct bstp_config_unit *);
+static int bstp_pdu_bettersame(struct bstp_port *, int);
+static int bstp_info_cmp(struct bstp_pri_vector *,
+ struct bstp_pri_vector *);
+static int bstp_info_superior(struct bstp_pri_vector *,
+ struct bstp_pri_vector *);
+static void bstp_assign_roles(struct bstp_state *);
+static void bstp_update_roles(struct bstp_state *, struct bstp_port *);
+static void bstp_update_state(struct bstp_state *, struct bstp_port *);
+static void bstp_update_tc(struct bstp_port *);
+static void bstp_update_info(struct bstp_port *);
+static void bstp_set_other_tcprop(struct bstp_port *);
+static void bstp_set_all_reroot(struct bstp_state *);
+static void bstp_set_all_sync(struct bstp_state *);
+static void bstp_set_port_state(struct bstp_port *, int);
+static void bstp_set_port_role(struct bstp_port *, int);
+static void bstp_set_port_proto(struct bstp_port *, int);
+static void bstp_set_port_tc(struct bstp_port *, int);
+static void bstp_set_timer_tc(struct bstp_port *);
+static void bstp_set_timer_msgage(struct bstp_port *);
+static int bstp_rerooted(struct bstp_state *, struct bstp_port *);
+static uint32_t bstp_calc_path_cost(struct bstp_port *);
+static void bstp_notify_state(void *, int);
+static void bstp_notify_rtage(void *, int);
+static void bstp_ifupdstatus(struct bstp_state *, struct bstp_port *);
+static void bstp_enable_port(struct bstp_state *, struct bstp_port *);
+static void bstp_disable_port(struct bstp_state *, struct bstp_port *);
+static void bstp_tick(void *);
+static void bstp_timer_start(struct bstp_timer *, uint16_t);
+static void bstp_timer_stop(struct bstp_timer *);
+static void bstp_timer_latch(struct bstp_timer *);
+static int bstp_timer_expired(struct bstp_timer *);
+static void bstp_hello_timer_expiry(struct bstp_state *,
+ struct bstp_port *);
+static void bstp_message_age_expiry(struct bstp_state *,
+ struct bstp_port *);
+static void bstp_migrate_delay_expiry(struct bstp_state *,
+ struct bstp_port *);
+static void bstp_edge_delay_expiry(struct bstp_state *,
+ struct bstp_port *);
+static int bstp_addr_cmp(const uint8_t *, const uint8_t *);
+static int bstp_same_bridgeid(uint64_t, uint64_t);
+static void bstp_reinit(struct bstp_state *);
+
+static void
+bstp_transmit(struct bstp_state *bs, struct bstp_port *bp)
+{
+ if (bs->bs_running == 0)
+ return;
+
+ /*
+ * a PDU can only be sent if we have tx quota left and the
+ * hello timer is running.
+ */
+ if (bp->bp_hello_timer.active == 0) {
+ /* Test if it needs to be reset */
+ bstp_hello_timer_expiry(bs, bp);
+ return;
+ }
+ if (bp->bp_txcount > bs->bs_txholdcount)
+ /* Ran out of karma */
+ return;
+
+ if (bp->bp_protover == BSTP_PROTO_RSTP) {
+ bstp_transmit_bpdu(bs, bp);
+ bp->bp_tc_ack = 0;
+ } else { /* STP */
+ switch (bp->bp_role) {
+ case BSTP_ROLE_DESIGNATED:
+ bstp_transmit_bpdu(bs, bp);
+ bp->bp_tc_ack = 0;
+ break;
+
+ case BSTP_ROLE_ROOT:
+ bstp_transmit_tcn(bs, bp);
+ break;
+ }
+ }
+ bstp_timer_start(&bp->bp_hello_timer, bp->bp_desg_htime);
+ bp->bp_flags &= ~BSTP_PORT_NEWINFO;
+}
+
+static void
+bstp_transmit_bpdu(struct bstp_state *bs, struct bstp_port *bp)
+{
+ struct bstp_cbpdu bpdu;
+
+ BSTP_LOCK_ASSERT(bs);
+
+ bpdu.cbu_rootpri = htons(bp->bp_desg_pv.pv_root_id >> 48);
+ PV2ADDR(bp->bp_desg_pv.pv_root_id, bpdu.cbu_rootaddr);
+
+ bpdu.cbu_rootpathcost = htonl(bp->bp_desg_pv.pv_cost);
+
+ bpdu.cbu_bridgepri = htons(bp->bp_desg_pv.pv_dbridge_id >> 48);
+ PV2ADDR(bp->bp_desg_pv.pv_dbridge_id, bpdu.cbu_bridgeaddr);
+
+ bpdu.cbu_portid = htons(bp->bp_port_id);
+ bpdu.cbu_messageage = htons(bp->bp_desg_msg_age);
+ bpdu.cbu_maxage = htons(bp->bp_desg_max_age);
+ bpdu.cbu_hellotime = htons(bp->bp_desg_htime);
+ bpdu.cbu_forwarddelay = htons(bp->bp_desg_fdelay);
+
+ bpdu.cbu_flags = bstp_pdu_flags(bp);
+
+ switch (bp->bp_protover) {
+ case BSTP_PROTO_STP:
+ bpdu.cbu_bpdutype = BSTP_MSGTYPE_CFG;
+ break;
+
+ case BSTP_PROTO_RSTP:
+ bpdu.cbu_bpdutype = BSTP_MSGTYPE_RSTP;
+ break;
+ }
+
+ bstp_send_bpdu(bs, bp, &bpdu);
+}
+
+static void
+bstp_transmit_tcn(struct bstp_state *bs, struct bstp_port *bp)
+{
+ struct bstp_tbpdu bpdu;
+ struct ifnet *ifp = bp->bp_ifp;
+ struct ether_header *eh;
+ struct mbuf *m;
+
+ KASSERT(bp == bs->bs_root_port, ("%s: bad root port\n", __func__));
+
+ if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
+ return;
+
+ MGETHDR(m, M_DONTWAIT, MT_DATA);
+ if (m == NULL)
+ return;
+
+ m->m_pkthdr.rcvif = ifp;
+ m->m_pkthdr.len = sizeof(*eh) + sizeof(bpdu);
+ m->m_len = m->m_pkthdr.len;
+
+ eh = mtod(m, struct ether_header *);
+
+ memcpy(eh->ether_shost, IF_LLADDR(ifp), ETHER_ADDR_LEN);
+ memcpy(eh->ether_dhost, bstp_etheraddr, ETHER_ADDR_LEN);
+ eh->ether_type = htons(sizeof(bpdu));
+
+ bpdu.tbu_ssap = bpdu.tbu_dsap = LLC_8021D_LSAP;
+ bpdu.tbu_ctl = LLC_UI;
+ bpdu.tbu_protoid = 0;
+ bpdu.tbu_protover = 0;
+ bpdu.tbu_bpdutype = BSTP_MSGTYPE_TCN;
+
+ memcpy(mtod(m, caddr_t) + sizeof(*eh), &bpdu, sizeof(bpdu));
+
+ bp->bp_txcount++;
+ ifp->if_transmit(ifp, m);
+}
+
+static void
+bstp_decode_bpdu(struct bstp_port *bp, struct bstp_cbpdu *cpdu,
+ struct bstp_config_unit *cu)
+{
+ int flags;
+
+ cu->cu_pv.pv_root_id =
+ (((uint64_t)ntohs(cpdu->cbu_rootpri)) << 48) |
+ (((uint64_t)cpdu->cbu_rootaddr[0]) << 40) |
+ (((uint64_t)cpdu->cbu_rootaddr[1]) << 32) |
+ (((uint64_t)cpdu->cbu_rootaddr[2]) << 24) |
+ (((uint64_t)cpdu->cbu_rootaddr[3]) << 16) |
+ (((uint64_t)cpdu->cbu_rootaddr[4]) << 8) |
+ (((uint64_t)cpdu->cbu_rootaddr[5]) << 0);
+
+ cu->cu_pv.pv_dbridge_id =
+ (((uint64_t)ntohs(cpdu->cbu_bridgepri)) << 48) |
+ (((uint64_t)cpdu->cbu_bridgeaddr[0]) << 40) |
+ (((uint64_t)cpdu->cbu_bridgeaddr[1]) << 32) |
+ (((uint64_t)cpdu->cbu_bridgeaddr[2]) << 24) |
+ (((uint64_t)cpdu->cbu_bridgeaddr[3]) << 16) |
+ (((uint64_t)cpdu->cbu_bridgeaddr[4]) << 8) |
+ (((uint64_t)cpdu->cbu_bridgeaddr[5]) << 0);
+
+ cu->cu_pv.pv_cost = ntohl(cpdu->cbu_rootpathcost);
+ cu->cu_message_age = ntohs(cpdu->cbu_messageage);
+ cu->cu_max_age = ntohs(cpdu->cbu_maxage);
+ cu->cu_hello_time = ntohs(cpdu->cbu_hellotime);
+ cu->cu_forward_delay = ntohs(cpdu->cbu_forwarddelay);
+ cu->cu_pv.pv_dport_id = ntohs(cpdu->cbu_portid);
+ cu->cu_pv.pv_port_id = bp->bp_port_id;
+ cu->cu_message_type = cpdu->cbu_bpdutype;
+
+ /* Strip off unused flags in STP mode */
+ flags = cpdu->cbu_flags;
+ switch (cpdu->cbu_protover) {
+ case BSTP_PROTO_STP:
+ flags &= BSTP_PDU_STPMASK;
+ /* A STP BPDU explicitly conveys a Designated Port */
+ cu->cu_role = BSTP_ROLE_DESIGNATED;
+ break;
+
+ case BSTP_PROTO_RSTP:
+ flags &= BSTP_PDU_RSTPMASK;
+ break;
+ }
+
+ cu->cu_topology_change_ack =
+ (flags & BSTP_PDU_F_TCA) ? 1 : 0;
+ cu->cu_proposal =
+ (flags & BSTP_PDU_F_P) ? 1 : 0;
+ cu->cu_agree =
+ (flags & BSTP_PDU_F_A) ? 1 : 0;
+ cu->cu_learning =
+ (flags & BSTP_PDU_F_L) ? 1 : 0;
+ cu->cu_forwarding =
+ (flags & BSTP_PDU_F_F) ? 1 : 0;
+ cu->cu_topology_change =
+ (flags & BSTP_PDU_F_TC) ? 1 : 0;
+
+ switch ((flags & BSTP_PDU_PRMASK) >> BSTP_PDU_PRSHIFT) {
+ case BSTP_PDU_F_ROOT:
+ cu->cu_role = BSTP_ROLE_ROOT;
+ break;
+ case BSTP_PDU_F_ALT:
+ cu->cu_role = BSTP_ROLE_ALTERNATE;
+ break;
+ case BSTP_PDU_F_DESG:
+ cu->cu_role = BSTP_ROLE_DESIGNATED;
+ break;
+ }
+}
+
+static void
+bstp_send_bpdu(struct bstp_state *bs, struct bstp_port *bp,
+ struct bstp_cbpdu *bpdu)
+{
+ struct ifnet *ifp;
+ struct mbuf *m;
+ struct ether_header *eh;
+
+ BSTP_LOCK_ASSERT(bs);
+
+ ifp = bp->bp_ifp;
+
+ if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
+ return;
+
+ MGETHDR(m, M_DONTWAIT, MT_DATA);
+ if (m == NULL)
+ return;
+
+ eh = mtod(m, struct ether_header *);
+
+ bpdu->cbu_ssap = bpdu->cbu_dsap = LLC_8021D_LSAP;
+ bpdu->cbu_ctl = LLC_UI;
+ bpdu->cbu_protoid = htons(BSTP_PROTO_ID);
+
+ memcpy(eh->ether_shost, IF_LLADDR(ifp), ETHER_ADDR_LEN);
+ memcpy(eh->ether_dhost, bstp_etheraddr, ETHER_ADDR_LEN);
+
+ switch (bpdu->cbu_bpdutype) {
+ case BSTP_MSGTYPE_CFG:
+ bpdu->cbu_protover = BSTP_PROTO_STP;
+ m->m_pkthdr.len = sizeof(*eh) + BSTP_BPDU_STP_LEN;
+ eh->ether_type = htons(BSTP_BPDU_STP_LEN);
+ memcpy(mtod(m, caddr_t) + sizeof(*eh), bpdu,
+ BSTP_BPDU_STP_LEN);
+ break;
+
+ case BSTP_MSGTYPE_RSTP:
+ bpdu->cbu_protover = BSTP_PROTO_RSTP;
+ bpdu->cbu_versionlen = htons(0);
+ m->m_pkthdr.len = sizeof(*eh) + BSTP_BPDU_RSTP_LEN;
+ eh->ether_type = htons(BSTP_BPDU_RSTP_LEN);
+ memcpy(mtod(m, caddr_t) + sizeof(*eh), bpdu,
+ BSTP_BPDU_RSTP_LEN);
+ break;
+
+ default:
+ panic("not implemented");
+ }
+ m->m_pkthdr.rcvif = ifp;
+ m->m_len = m->m_pkthdr.len;
+
+ bp->bp_txcount++;
+ ifp->if_transmit(ifp, m);
+}
+
+static int
+bstp_pdu_flags(struct bstp_port *bp)
+{
+ int flags = 0;
+
+ if (bp->bp_proposing && bp->bp_state != BSTP_IFSTATE_FORWARDING)
+ flags |= BSTP_PDU_F_P;
+
+ if (bp->bp_agree)
+ flags |= BSTP_PDU_F_A;
+
+ if (bp->bp_tc_timer.active)
+ flags |= BSTP_PDU_F_TC;
+
+ if (bp->bp_tc_ack)
+ flags |= BSTP_PDU_F_TCA;
+
+ switch (bp->bp_state) {
+ case BSTP_IFSTATE_LEARNING:
+ flags |= BSTP_PDU_F_L;
+ break;
+
+ case BSTP_IFSTATE_FORWARDING:
+ flags |= (BSTP_PDU_F_L | BSTP_PDU_F_F);
+ break;
+ }
+
+ switch (bp->bp_role) {
+ case BSTP_ROLE_ROOT:
+ flags |=
+ (BSTP_PDU_F_ROOT << BSTP_PDU_PRSHIFT);
+ break;
+
+ case BSTP_ROLE_ALTERNATE:
+ case BSTP_ROLE_BACKUP: /* fall through */
+ flags |=
+ (BSTP_PDU_F_ALT << BSTP_PDU_PRSHIFT);
+ break;
+
+ case BSTP_ROLE_DESIGNATED:
+ flags |=
+ (BSTP_PDU_F_DESG << BSTP_PDU_PRSHIFT);
+ break;
+ }
+
+ /* Strip off unused flags in either mode */
+ switch (bp->bp_protover) {
+ case BSTP_PROTO_STP:
+ flags &= BSTP_PDU_STPMASK;
+ break;
+ case BSTP_PROTO_RSTP:
+ flags &= BSTP_PDU_RSTPMASK;
+ break;
+ }
+ return (flags);
+}
+
+struct mbuf *
+bstp_input(struct bstp_port *bp, struct ifnet *ifp, struct mbuf *m)
+{
+ struct bstp_state *bs = bp->bp_bs;
+ struct ether_header *eh;
+ struct bstp_tbpdu tpdu;
+ uint16_t len;
+
+ if (bp->bp_active == 0) {
+ m_freem(m);
+ return (NULL);
+ }
+
+ BSTP_LOCK(bs);
+
+ eh = mtod(m, struct ether_header *);
+
+ len = ntohs(eh->ether_type);
+ if (len < sizeof(tpdu))
+ goto out;
+
+ m_adj(m, ETHER_HDR_LEN);
+
+ if (m->m_pkthdr.len > len)
+ m_adj(m, len - m->m_pkthdr.len);
+ if (m->m_len < sizeof(tpdu) &&
+ (m = m_pullup(m, sizeof(tpdu))) == NULL)
+ goto out;
+
+ memcpy(&tpdu, mtod(m, caddr_t), sizeof(tpdu));
+
+ /* basic packet checks */
+ if (tpdu.tbu_dsap != LLC_8021D_LSAP ||
+ tpdu.tbu_ssap != LLC_8021D_LSAP ||
+ tpdu.tbu_ctl != LLC_UI)
+ goto out;
+ if (tpdu.tbu_protoid != BSTP_PROTO_ID)
+ goto out;
+
+ /*
+ * We can treat later versions of the PDU as the same as the maximum
+ * version we implement. All additional parameters/flags are ignored.
+ */
+ if (tpdu.tbu_protover > BSTP_PROTO_MAX)
+ tpdu.tbu_protover = BSTP_PROTO_MAX;
+
+ if (tpdu.tbu_protover != bp->bp_protover) {
+ /*
+ * Wait for the migration delay timer to expire before changing
+ * protocol version to avoid flip-flops.
+ */
+ if (bp->bp_flags & BSTP_PORT_CANMIGRATE)
+ bstp_set_port_proto(bp, tpdu.tbu_protover);
+ else
+ goto out;
+ }
+
+ /* Clear operedge upon receiving a PDU on the port */
+ bp->bp_operedge = 0;
+ bstp_timer_start(&bp->bp_edge_delay_timer,
+ BSTP_DEFAULT_MIGRATE_DELAY);
+
+ switch (tpdu.tbu_protover) {
+ case BSTP_PROTO_STP:
+ bstp_received_stp(bs, bp, &m, &tpdu);
+ break;
+
+ case BSTP_PROTO_RSTP:
+ bstp_received_rstp(bs, bp, &m, &tpdu);
+ break;
+ }
+out:
+ BSTP_UNLOCK(bs);
+ if (m)
+ m_freem(m);
+ return (NULL);
+}
+
+static void
+bstp_received_stp(struct bstp_state *bs, struct bstp_port *bp,
+ struct mbuf **mp, struct bstp_tbpdu *tpdu)
+{
+ struct bstp_cbpdu cpdu;
+ struct bstp_config_unit *cu = &bp->bp_msg_cu;
+ struct bstp_tcn_unit tu;
+
+ switch (tpdu->tbu_bpdutype) {
+ case BSTP_MSGTYPE_TCN:
+ tu.tu_message_type = tpdu->tbu_bpdutype;
+ bstp_received_tcn(bs, bp, &tu);
+ break;
+ case BSTP_MSGTYPE_CFG:
+ if ((*mp)->m_len < BSTP_BPDU_STP_LEN &&
+ (*mp = m_pullup(*mp, BSTP_BPDU_STP_LEN)) == NULL)
+ return;
+ memcpy(&cpdu, mtod(*mp, caddr_t), BSTP_BPDU_STP_LEN);
+
+ bstp_decode_bpdu(bp, &cpdu, cu);
+ bstp_received_bpdu(bs, bp, cu);
+ break;
+ }
+}
+
+static void
+bstp_received_rstp(struct bstp_state *bs, struct bstp_port *bp,
+ struct mbuf **mp, struct bstp_tbpdu *tpdu)
+{
+ struct bstp_cbpdu cpdu;
+ struct bstp_config_unit *cu = &bp->bp_msg_cu;
+
+ if (tpdu->tbu_bpdutype != BSTP_MSGTYPE_RSTP)
+ return;
+
+ if ((*mp)->m_len < BSTP_BPDU_RSTP_LEN &&
+ (*mp = m_pullup(*mp, BSTP_BPDU_RSTP_LEN)) == NULL)
+ return;
+ memcpy(&cpdu, mtod(*mp, caddr_t), BSTP_BPDU_RSTP_LEN);
+
+ bstp_decode_bpdu(bp, &cpdu, cu);
+ bstp_received_bpdu(bs, bp, cu);
+}
+
+static void
+bstp_received_tcn(struct bstp_state *bs, struct bstp_port *bp,
+ struct bstp_tcn_unit *tcn)
+{
+ bp->bp_rcvdtcn = 1;
+ bstp_update_tc(bp);
+}
+
+static void
+bstp_received_bpdu(struct bstp_state *bs, struct bstp_port *bp,
+ struct bstp_config_unit *cu)
+{
+ int type;
+
+ BSTP_LOCK_ASSERT(bs);
+
+ /* We need to have transitioned to INFO_MINE before proceeding */
+ switch (bp->bp_infois) {
+ case BSTP_INFO_DISABLED:
+ case BSTP_INFO_AGED:
+ return;
+ }
+
+ type = bstp_pdu_rcvtype(bp, cu);
+
+ switch (type) {
+ case BSTP_PDU_SUPERIOR:
+ bs->bs_allsynced = 0;
+ bp->bp_agreed = 0;
+ bp->bp_proposing = 0;
+
+ if (cu->cu_proposal && cu->cu_forwarding == 0)
+ bp->bp_proposed = 1;
+ if (cu->cu_topology_change)
+ bp->bp_rcvdtc = 1;
+ if (cu->cu_topology_change_ack)
+ bp->bp_rcvdtca = 1;
+
+ if (bp->bp_agree &&
+ !bstp_pdu_bettersame(bp, BSTP_INFO_RECEIVED))
+ bp->bp_agree = 0;
+
+ /* copy the received priority and timers to the port */
+ bp->bp_port_pv = cu->cu_pv;
+ bp->bp_port_msg_age = cu->cu_message_age;
+ bp->bp_port_max_age = cu->cu_max_age;
+ bp->bp_port_fdelay = cu->cu_forward_delay;
+ bp->bp_port_htime =
+ (cu->cu_hello_time > BSTP_MIN_HELLO_TIME ?
+ cu->cu_hello_time : BSTP_MIN_HELLO_TIME);
+
+ /* set expiry for the new info */
+ bstp_set_timer_msgage(bp);
+
+ bp->bp_infois = BSTP_INFO_RECEIVED;
+ bstp_assign_roles(bs);
+ break;
+
+ case BSTP_PDU_REPEATED:
+ if (cu->cu_proposal && cu->cu_forwarding == 0)
+ bp->bp_proposed = 1;
+ if (cu->cu_topology_change)
+ bp->bp_rcvdtc = 1;
+ if (cu->cu_topology_change_ack)
+ bp->bp_rcvdtca = 1;
+
+ /* rearm the age timer */
+ bstp_set_timer_msgage(bp);
+ break;
+
+ case BSTP_PDU_INFERIOR:
+ if (cu->cu_learning) {
+ bp->bp_agreed = 1;
+ bp->bp_proposing = 0;
+ }
+ break;
+
+ case BSTP_PDU_INFERIORALT:
+ /*
+ * only point to point links are allowed fast
+ * transitions to forwarding.
+ */
+ if (cu->cu_agree && bp->bp_ptp_link) {
+ bp->bp_agreed = 1;
+ bp->bp_proposing = 0;
+ } else
+ bp->bp_agreed = 0;
+
+ if (cu->cu_topology_change)
+ bp->bp_rcvdtc = 1;
+ if (cu->cu_topology_change_ack)
+ bp->bp_rcvdtca = 1;
+ break;
+
+ case BSTP_PDU_OTHER:
+ return; /* do nothing */
+ }
+ /* update the state machines with the new data */
+ bstp_update_state(bs, bp);
+}
+
+static int
+bstp_pdu_rcvtype(struct bstp_port *bp, struct bstp_config_unit *cu)
+{
+ int type;
+
+ /* default return type */
+ type = BSTP_PDU_OTHER;
+
+ switch (cu->cu_role) {
+ case BSTP_ROLE_DESIGNATED:
+ if (bstp_info_superior(&bp->bp_port_pv, &cu->cu_pv))
+ /* bpdu priority is superior */
+ type = BSTP_PDU_SUPERIOR;
+ else if (bstp_info_cmp(&bp->bp_port_pv, &cu->cu_pv) ==
+ INFO_SAME) {
+ if (bp->bp_port_msg_age != cu->cu_message_age ||
+ bp->bp_port_max_age != cu->cu_max_age ||
+ bp->bp_port_fdelay != cu->cu_forward_delay ||
+ bp->bp_port_htime != cu->cu_hello_time)
+ /* bpdu priority is equal and timers differ */
+ type = BSTP_PDU_SUPERIOR;
+ else
+ /* bpdu is equal */
+ type = BSTP_PDU_REPEATED;
+ } else
+ /* bpdu priority is worse */
+ type = BSTP_PDU_INFERIOR;
+
+ break;
+
+ case BSTP_ROLE_ROOT:
+ case BSTP_ROLE_ALTERNATE:
+ case BSTP_ROLE_BACKUP:
+ if (bstp_info_cmp(&bp->bp_port_pv, &cu->cu_pv) <= INFO_SAME)
+ /*
+ * not a designated port and priority is the same or
+ * worse
+ */
+ type = BSTP_PDU_INFERIORALT;
+ break;
+ }
+
+ return (type);
+}
+
+static int
+bstp_pdu_bettersame(struct bstp_port *bp, int newinfo)
+{
+ if (newinfo == BSTP_INFO_RECEIVED &&
+ bp->bp_infois == BSTP_INFO_RECEIVED &&
+ bstp_info_cmp(&bp->bp_port_pv, &bp->bp_msg_cu.cu_pv) >= INFO_SAME)
+ return (1);
+
+ if (newinfo == BSTP_INFO_MINE &&
+ bp->bp_infois == BSTP_INFO_MINE &&
+ bstp_info_cmp(&bp->bp_port_pv, &bp->bp_desg_pv) >= INFO_SAME)
+ return (1);
+
+ return (0);
+}
+
+static int
+bstp_info_cmp(struct bstp_pri_vector *pv,
+ struct bstp_pri_vector *cpv)
+{
+ if (cpv->pv_root_id < pv->pv_root_id)
+ return (INFO_BETTER);
+ if (cpv->pv_root_id > pv->pv_root_id)
+ return (INFO_WORSE);
+
+ if (cpv->pv_cost < pv->pv_cost)
+ return (INFO_BETTER);
+ if (cpv->pv_cost > pv->pv_cost)
+ return (INFO_WORSE);
+
+ if (cpv->pv_dbridge_id < pv->pv_dbridge_id)
+ return (INFO_BETTER);
+ if (cpv->pv_dbridge_id > pv->pv_dbridge_id)
+ return (INFO_WORSE);
+
+ if (cpv->pv_dport_id < pv->pv_dport_id)
+ return (INFO_BETTER);
+ if (cpv->pv_dport_id > pv->pv_dport_id)
+ return (INFO_WORSE);
+
+ return (INFO_SAME);
+}
+
+/*
+ * This message priority vector is superior to the port priority vector and
+ * will replace it if, and only if, the message priority vector is better than
+ * the port priority vector, or the message has been transmitted from the same
+ * designated bridge and designated port as the port priority vector.
+ */
+static int
+bstp_info_superior(struct bstp_pri_vector *pv,
+ struct bstp_pri_vector *cpv)
+{
+ if (bstp_info_cmp(pv, cpv) == INFO_BETTER ||
+ (bstp_same_bridgeid(pv->pv_dbridge_id, cpv->pv_dbridge_id) &&
+ (cpv->pv_dport_id & 0xfff) == (pv->pv_dport_id & 0xfff)))
+ return (1);
+ return (0);
+}
+
+static void
+bstp_assign_roles(struct bstp_state *bs)
+{
+ struct bstp_port *bp, *rbp = NULL;
+ struct bstp_pri_vector pv;
+
+ /* default to our priority vector */
+ bs->bs_root_pv = bs->bs_bridge_pv;
+ bs->bs_root_msg_age = 0;
+ bs->bs_root_max_age = bs->bs_bridge_max_age;
+ bs->bs_root_fdelay = bs->bs_bridge_fdelay;
+ bs->bs_root_htime = bs->bs_bridge_htime;
+ bs->bs_root_port = NULL;
+
+ /* check if any recieved info supersedes us */
+ LIST_FOREACH(bp, &bs->bs_bplist, bp_next) {
+ if (bp->bp_infois != BSTP_INFO_RECEIVED)
+ continue;
+
+ pv = bp->bp_port_pv;
+ pv.pv_cost += bp->bp_path_cost;
+
+ /*
+ * The root priority vector is the best of the set comprising
+ * the bridge priority vector plus all root path priority
+ * vectors whose bridge address is not equal to us.
+ */
+ if (bstp_same_bridgeid(pv.pv_dbridge_id,
+ bs->bs_bridge_pv.pv_dbridge_id) == 0 &&
+ bstp_info_cmp(&bs->bs_root_pv, &pv) == INFO_BETTER) {
+ /* the port vector replaces the root */
+ bs->bs_root_pv = pv;
+ bs->bs_root_msg_age = bp->bp_port_msg_age +
+ BSTP_MESSAGE_AGE_INCR;
+ bs->bs_root_max_age = bp->bp_port_max_age;
+ bs->bs_root_fdelay = bp->bp_port_fdelay;
+ bs->bs_root_htime = bp->bp_port_htime;
+ rbp = bp;
+ }
+ }
+
+ LIST_FOREACH(bp, &bs->bs_bplist, bp_next) {
+ /* calculate the port designated vector */
+ bp->bp_desg_pv.pv_root_id = bs->bs_root_pv.pv_root_id;
+ bp->bp_desg_pv.pv_cost = bs->bs_root_pv.pv_cost;
+ bp->bp_desg_pv.pv_dbridge_id = bs->bs_bridge_pv.pv_dbridge_id;
+ bp->bp_desg_pv.pv_dport_id = bp->bp_port_id;
+ bp->bp_desg_pv.pv_port_id = bp->bp_port_id;
+
+ /* calculate designated times */
+ bp->bp_desg_msg_age = bs->bs_root_msg_age;
+ bp->bp_desg_max_age = bs->bs_root_max_age;
+ bp->bp_desg_fdelay = bs->bs_root_fdelay;
+ bp->bp_desg_htime = bs->bs_bridge_htime;
+
+
+ switch (bp->bp_infois) {
+ case BSTP_INFO_DISABLED:
+ bstp_set_port_role(bp, BSTP_ROLE_DISABLED);
+ break;
+
+ case BSTP_INFO_AGED:
+ bstp_set_port_role(bp, BSTP_ROLE_DESIGNATED);
+ bstp_update_info(bp);
+ break;
+
+ case BSTP_INFO_MINE:
+ bstp_set_port_role(bp, BSTP_ROLE_DESIGNATED);
+ /* update the port info if stale */
+ if (bstp_info_cmp(&bp->bp_port_pv,
+ &bp->bp_desg_pv) != INFO_SAME ||
+ (rbp != NULL &&
+ (bp->bp_port_msg_age != rbp->bp_port_msg_age ||
+ bp->bp_port_max_age != rbp->bp_port_max_age ||
+ bp->bp_port_fdelay != rbp->bp_port_fdelay ||
+ bp->bp_port_htime != rbp->bp_port_htime)))
+ bstp_update_info(bp);
+ break;
+
+ case BSTP_INFO_RECEIVED:
+ if (bp == rbp) {
+ /*
+ * root priority is derived from this
+ * port, make it the root port.
+ */
+ bstp_set_port_role(bp, BSTP_ROLE_ROOT);
+ bs->bs_root_port = bp;
+ } else if (bstp_info_cmp(&bp->bp_port_pv,
+ &bp->bp_desg_pv) == INFO_BETTER) {
+ /*
+ * the port priority is lower than the root
+ * port.
+ */
+ bstp_set_port_role(bp, BSTP_ROLE_DESIGNATED);
+ bstp_update_info(bp);
+ } else {
+ if (bstp_same_bridgeid(
+ bp->bp_port_pv.pv_dbridge_id,
+ bs->bs_bridge_pv.pv_dbridge_id)) {
+ /*
+ * the designated bridge refers to
+ * another port on this bridge.
+ */
+ bstp_set_port_role(bp,
+ BSTP_ROLE_BACKUP);
+ } else {
+ /*
+ * the port is an inferior path to the
+ * root bridge.
+ */
+ bstp_set_port_role(bp,
+ BSTP_ROLE_ALTERNATE);
+ }
+ }
+ break;
+ }
+ }
+}
+
+static void
+bstp_update_state(struct bstp_state *bs, struct bstp_port *bp)
+{
+ struct bstp_port *bp2;
+ int synced;
+
+ BSTP_LOCK_ASSERT(bs);
+
+ /* check if all the ports have syncronised again */
+ if (!bs->bs_allsynced) {
+ synced = 1;
+ LIST_FOREACH(bp2, &bs->bs_bplist, bp_next) {
+ if (!(bp2->bp_synced ||
+ bp2->bp_role == BSTP_ROLE_ROOT)) {
+ synced = 0;
+ break;
+ }
+ }
+ bs->bs_allsynced = synced;
+ }
+
+ bstp_update_roles(bs, bp);
+ bstp_update_tc(bp);
+}
+
+static void
+bstp_update_roles(struct bstp_state *bs, struct bstp_port *bp)
+{
+ switch (bp->bp_role) {
+ case BSTP_ROLE_DISABLED:
+ /* Clear any flags if set */
+ if (bp->bp_sync || !bp->bp_synced || bp->bp_reroot) {
+ bp->bp_sync = 0;
+ bp->bp_synced = 1;
+ bp->bp_reroot = 0;
+ }
+ break;
+
+ case BSTP_ROLE_ALTERNATE:
+ case BSTP_ROLE_BACKUP:
+ if ((bs->bs_allsynced && !bp->bp_agree) ||
+ (bp->bp_proposed && bp->bp_agree)) {
+ bp->bp_proposed = 0;
+ bp->bp_agree = 1;
+ bp->bp_flags |= BSTP_PORT_NEWINFO;
+ DPRINTF("%s -> ALTERNATE_AGREED\n",
+ bp->bp_ifp->if_xname);
+ }
+
+ if (bp->bp_proposed && !bp->bp_agree) {
+ bstp_set_all_sync(bs);
+ bp->bp_proposed = 0;
+ DPRINTF("%s -> ALTERNATE_PROPOSED\n",
+ bp->bp_ifp->if_xname);
+ }
+
+ /* Clear any flags if set */
+ if (bp->bp_sync || !bp->bp_synced || bp->bp_reroot) {
+ bp->bp_sync = 0;
+ bp->bp_synced = 1;
+ bp->bp_reroot = 0;
+ DPRINTF("%s -> ALTERNATE_PORT\n", bp->bp_ifp->if_xname);
+ }
+ break;
+
+ case BSTP_ROLE_ROOT:
+ if (bp->bp_state != BSTP_IFSTATE_FORWARDING && !bp->bp_reroot) {
+ bstp_set_all_reroot(bs);
+ DPRINTF("%s -> ROOT_REROOT\n", bp->bp_ifp->if_xname);
+ }
+
+ if ((bs->bs_allsynced && !bp->bp_agree) ||
+ (bp->bp_proposed && bp->bp_agree)) {
+ bp->bp_proposed = 0;
+ bp->bp_sync = 0;
+ bp->bp_agree = 1;
+ bp->bp_flags |= BSTP_PORT_NEWINFO;
+ DPRINTF("%s -> ROOT_AGREED\n", bp->bp_ifp->if_xname);
+ }
+
+ if (bp->bp_proposed && !bp->bp_agree) {
+ bstp_set_all_sync(bs);
+ bp->bp_proposed = 0;
+ DPRINTF("%s -> ROOT_PROPOSED\n", bp->bp_ifp->if_xname);
+ }
+
+ if (bp->bp_state != BSTP_IFSTATE_FORWARDING &&
+ (bp->bp_forward_delay_timer.active == 0 ||
+ (bstp_rerooted(bs, bp) &&
+ bp->bp_recent_backup_timer.active == 0 &&
+ bp->bp_protover == BSTP_PROTO_RSTP))) {
+ switch (bp->bp_state) {
+ case BSTP_IFSTATE_DISCARDING:
+ bstp_set_port_state(bp, BSTP_IFSTATE_LEARNING);
+ break;
+ case BSTP_IFSTATE_LEARNING:
+ bstp_set_port_state(bp,
+ BSTP_IFSTATE_FORWARDING);
+ break;
+ }
+ }
+
+ if (bp->bp_state == BSTP_IFSTATE_FORWARDING && bp->bp_reroot) {
+ bp->bp_reroot = 0;
+ DPRINTF("%s -> ROOT_REROOTED\n", bp->bp_ifp->if_xname);
+ }
+ break;
+
+ case BSTP_ROLE_DESIGNATED:
+ if (bp->bp_recent_root_timer.active == 0 && bp->bp_reroot) {
+ bp->bp_reroot = 0;
+ DPRINTF("%s -> DESIGNATED_RETIRED\n",
+ bp->bp_ifp->if_xname);
+ }
+
+ if ((bp->bp_state == BSTP_IFSTATE_DISCARDING &&
+ !bp->bp_synced) || (bp->bp_agreed && !bp->bp_synced) ||
+ (bp->bp_operedge && !bp->bp_synced) ||
+ (bp->bp_sync && bp->bp_synced)) {
+ bstp_timer_stop(&bp->bp_recent_root_timer);
+ bp->bp_synced = 1;
+ bp->bp_sync = 0;
+ DPRINTF("%s -> DESIGNATED_SYNCED\n",
+ bp->bp_ifp->if_xname);
+ }
+
+ if (bp->bp_state != BSTP_IFSTATE_FORWARDING &&
+ !bp->bp_agreed && !bp->bp_proposing &&
+ !bp->bp_operedge) {
+ bp->bp_proposing = 1;
+ bp->bp_flags |= BSTP_PORT_NEWINFO;
+ bstp_timer_start(&bp->bp_edge_delay_timer,
+ (bp->bp_ptp_link ? BSTP_DEFAULT_MIGRATE_DELAY :
+ bp->bp_desg_max_age));
+ DPRINTF("%s -> DESIGNATED_PROPOSE\n",
+ bp->bp_ifp->if_xname);
+ }
+
+ if (bp->bp_state != BSTP_IFSTATE_FORWARDING &&
+ (bp->bp_forward_delay_timer.active == 0 || bp->bp_agreed ||
+ bp->bp_operedge) &&
+ (bp->bp_recent_root_timer.active == 0 || !bp->bp_reroot) &&
+ !bp->bp_sync) {
+ if (bp->bp_agreed)
+ DPRINTF("%s -> AGREED\n", bp->bp_ifp->if_xname);
+ /*
+ * If agreed|operedge then go straight to forwarding,
+ * otherwise follow discard -> learn -> forward.
+ */
+ if (bp->bp_agreed || bp->bp_operedge ||
+ bp->bp_state == BSTP_IFSTATE_LEARNING) {
+ bstp_set_port_state(bp,
+ BSTP_IFSTATE_FORWARDING);
+ bp->bp_agreed = bp->bp_protover;
+ } else if (bp->bp_state == BSTP_IFSTATE_DISCARDING)
+ bstp_set_port_state(bp, BSTP_IFSTATE_LEARNING);
+ }
+
+ if (((bp->bp_sync && !bp->bp_synced) ||
+ (bp->bp_reroot && bp->bp_recent_root_timer.active) ||
+ (bp->bp_flags & BSTP_PORT_DISPUTED)) && !bp->bp_operedge &&
+ bp->bp_state != BSTP_IFSTATE_DISCARDING) {
+ bstp_set_port_state(bp, BSTP_IFSTATE_DISCARDING);
+ bp->bp_flags &= ~BSTP_PORT_DISPUTED;
+ bstp_timer_start(&bp->bp_forward_delay_timer,
+ bp->bp_protover == BSTP_PROTO_RSTP ?
+ bp->bp_desg_htime : bp->bp_desg_fdelay);
+ DPRINTF("%s -> DESIGNATED_DISCARD\n",
+ bp->bp_ifp->if_xname);
+ }
+ break;
+ }
+
+ if (bp->bp_flags & BSTP_PORT_NEWINFO)
+ bstp_transmit(bs, bp);
+}
+
+static void
+bstp_update_tc(struct bstp_port *bp)
+{
+ switch (bp->bp_tcstate) {
+ case BSTP_TCSTATE_ACTIVE:
+ if ((bp->bp_role != BSTP_ROLE_DESIGNATED &&
+ bp->bp_role != BSTP_ROLE_ROOT) || bp->bp_operedge)
+ bstp_set_port_tc(bp, BSTP_TCSTATE_LEARNING);
+
+ if (bp->bp_rcvdtcn)
+ bstp_set_port_tc(bp, BSTP_TCSTATE_TCN);
+ if (bp->bp_rcvdtc)
+ bstp_set_port_tc(bp, BSTP_TCSTATE_TC);
+
+ if (bp->bp_tc_prop && !bp->bp_operedge)
+ bstp_set_port_tc(bp, BSTP_TCSTATE_PROPAG);
+
+ if (bp->bp_rcvdtca)
+ bstp_set_port_tc(bp, BSTP_TCSTATE_ACK);
+ break;
+
+ case BSTP_TCSTATE_INACTIVE:
+ if ((bp->bp_state == BSTP_IFSTATE_LEARNING ||
+ bp->bp_state == BSTP_IFSTATE_FORWARDING) &&
+ bp->bp_fdbflush == 0)
+ bstp_set_port_tc(bp, BSTP_TCSTATE_LEARNING);
+ break;
+
+ case BSTP_TCSTATE_LEARNING:
+ if (bp->bp_rcvdtc || bp->bp_rcvdtcn || bp->bp_rcvdtca ||
+ bp->bp_tc_prop)
+ bstp_set_port_tc(bp, BSTP_TCSTATE_LEARNING);
+ else if (bp->bp_role != BSTP_ROLE_DESIGNATED &&
+ bp->bp_role != BSTP_ROLE_ROOT &&
+ bp->bp_state == BSTP_IFSTATE_DISCARDING)
+ bstp_set_port_tc(bp, BSTP_TCSTATE_INACTIVE);
+
+ if ((bp->bp_role == BSTP_ROLE_DESIGNATED ||
+ bp->bp_role == BSTP_ROLE_ROOT) &&
+ bp->bp_state == BSTP_IFSTATE_FORWARDING &&
+ !bp->bp_operedge)
+ bstp_set_port_tc(bp, BSTP_TCSTATE_DETECTED);
+ break;
+
+ /* these are transient states and go straight back to ACTIVE */
+ case BSTP_TCSTATE_DETECTED:
+ case BSTP_TCSTATE_TCN:
+ case BSTP_TCSTATE_TC:
+ case BSTP_TCSTATE_PROPAG:
+ case BSTP_TCSTATE_ACK:
+ DPRINTF("Invalid TC state for %s\n",
+ bp->bp_ifp->if_xname);
+ break;
+ }
+
+}
+
+static void
+bstp_update_info(struct bstp_port *bp)
+{
+ struct bstp_state *bs = bp->bp_bs;
+
+ bp->bp_proposing = 0;
+ bp->bp_proposed = 0;
+
+ if (bp->bp_agreed && !bstp_pdu_bettersame(bp, BSTP_INFO_MINE))
+ bp->bp_agreed = 0;
+
+ if (bp->bp_synced && !bp->bp_agreed) {
+ bp->bp_synced = 0;
+ bs->bs_allsynced = 0;
+ }
+
+ /* copy the designated pv to the port */
+ bp->bp_port_pv = bp->bp_desg_pv;
+ bp->bp_port_msg_age = bp->bp_desg_msg_age;
+ bp->bp_port_max_age = bp->bp_desg_max_age;
+ bp->bp_port_fdelay = bp->bp_desg_fdelay;
+ bp->bp_port_htime = bp->bp_desg_htime;
+ bp->bp_infois = BSTP_INFO_MINE;
+
+ /* Set transmit flag but do not immediately send */
+ bp->bp_flags |= BSTP_PORT_NEWINFO;
+}
+
+/* set tcprop on every port other than the caller */
+static void
+bstp_set_other_tcprop(struct bstp_port *bp)
+{
+ struct bstp_state *bs = bp->bp_bs;
+ struct bstp_port *bp2;
+
+ BSTP_LOCK_ASSERT(bs);
+
+ LIST_FOREACH(bp2, &bs->bs_bplist, bp_next) {
+ if (bp2 == bp)
+ continue;
+ bp2->bp_tc_prop = 1;
+ }
+}
+
+static void
+bstp_set_all_reroot(struct bstp_state *bs)
+{
+ struct bstp_port *bp;
+
+ BSTP_LOCK_ASSERT(bs);
+
+ LIST_FOREACH(bp, &bs->bs_bplist, bp_next)
+ bp->bp_reroot = 1;
+}
+
+static void
+bstp_set_all_sync(struct bstp_state *bs)
+{
+ struct bstp_port *bp;
+
+ BSTP_LOCK_ASSERT(bs);
+
+ LIST_FOREACH(bp, &bs->bs_bplist, bp_next) {
+ bp->bp_sync = 1;
+ bp->bp_synced = 0; /* Not explicit in spec */
+ }
+
+ bs->bs_allsynced = 0;
+}
+
+static void
+bstp_set_port_state(struct bstp_port *bp, int state)
+{
+ if (bp->bp_state == state)
+ return;
+
+ bp->bp_state = state;
+
+ switch (bp->bp_state) {
+ case BSTP_IFSTATE_DISCARDING:
+ DPRINTF("state changed to DISCARDING on %s\n",
+ bp->bp_ifp->if_xname);
+ break;
+
+ case BSTP_IFSTATE_LEARNING:
+ DPRINTF("state changed to LEARNING on %s\n",
+ bp->bp_ifp->if_xname);
+
+ bstp_timer_start(&bp->bp_forward_delay_timer,
+ bp->bp_protover == BSTP_PROTO_RSTP ?
+ bp->bp_desg_htime : bp->bp_desg_fdelay);
+ break;
+
+ case BSTP_IFSTATE_FORWARDING:
+ DPRINTF("state changed to FORWARDING on %s\n",
+ bp->bp_ifp->if_xname);
+
+ bstp_timer_stop(&bp->bp_forward_delay_timer);
+ /* Record that we enabled forwarding */
+ bp->bp_forward_transitions++;
+ break;
+ }
+
+ /* notify the parent bridge */
+ taskqueue_enqueue(taskqueue_swi, &bp->bp_statetask);
+}
+
+static void
+bstp_set_port_role(struct bstp_port *bp, int role)
+{
+ struct bstp_state *bs = bp->bp_bs;
+
+ if (bp->bp_role == role)
+ return;
+
+ /* perform pre-change tasks */
+ switch (bp->bp_role) {
+ case BSTP_ROLE_DISABLED:
+ bstp_timer_start(&bp->bp_forward_delay_timer,
+ bp->bp_desg_max_age);
+ break;
+
+ case BSTP_ROLE_BACKUP:
+ bstp_timer_start(&bp->bp_recent_backup_timer,
+ bp->bp_desg_htime * 2);
+ /* fall through */
+ case BSTP_ROLE_ALTERNATE:
+ bstp_timer_start(&bp->bp_forward_delay_timer,
+ bp->bp_desg_fdelay);
+ bp->bp_sync = 0;
+ bp->bp_synced = 1;
+ bp->bp_reroot = 0;
+ break;
+
+ case BSTP_ROLE_ROOT:
+ bstp_timer_start(&bp->bp_recent_root_timer,
+ BSTP_DEFAULT_FORWARD_DELAY);
+ break;
+ }
+
+ bp->bp_role = role;
+ /* clear values not carried between roles */
+ bp->bp_proposing = 0;
+ bs->bs_allsynced = 0;
+
+ /* initialise the new role */
+ switch (bp->bp_role) {
+ case BSTP_ROLE_DISABLED:
+ case BSTP_ROLE_ALTERNATE:
+ case BSTP_ROLE_BACKUP:
+ DPRINTF("%s role -> ALT/BACK/DISABLED\n",
+ bp->bp_ifp->if_xname);
+ bstp_set_port_state(bp, BSTP_IFSTATE_DISCARDING);
+ bstp_timer_stop(&bp->bp_recent_root_timer);
+ bstp_timer_latch(&bp->bp_forward_delay_timer);
+ bp->bp_sync = 0;
+ bp->bp_synced = 1;
+ bp->bp_reroot = 0;
+ break;
+
+ case BSTP_ROLE_ROOT:
+ DPRINTF("%s role -> ROOT\n",
+ bp->bp_ifp->if_xname);
+ bstp_set_port_state(bp, BSTP_IFSTATE_DISCARDING);
+ bstp_timer_latch(&bp->bp_recent_root_timer);
+ bp->bp_proposing = 0;
+ break;
+
+ case BSTP_ROLE_DESIGNATED:
+ DPRINTF("%s role -> DESIGNATED\n",
+ bp->bp_ifp->if_xname);
+ bstp_timer_start(&bp->bp_hello_timer,
+ bp->bp_desg_htime);
+ bp->bp_agree = 0;
+ break;
+ }
+
+ /* let the TC state know that the role changed */
+ bstp_update_tc(bp);
+}
+
+static void
+bstp_set_port_proto(struct bstp_port *bp, int proto)
+{
+ struct bstp_state *bs = bp->bp_bs;
+
+ /* supported protocol versions */
+ switch (proto) {
+ case BSTP_PROTO_STP:
+ /* we can downgrade protocols only */
+ bstp_timer_stop(&bp->bp_migrate_delay_timer);
+ /* clear unsupported features */
+ bp->bp_operedge = 0;
+ /* STP compat mode only uses 16 bits of the 32 */
+ if (bp->bp_path_cost > 65535)
+ bp->bp_path_cost = 65535;
+ break;
+
+ case BSTP_PROTO_RSTP:
+ bstp_timer_start(&bp->bp_migrate_delay_timer,
+ bs->bs_migration_delay);
+ break;
+
+ default:
+ DPRINTF("Unsupported STP version %d\n", proto);
+ return;
+ }
+
+ bp->bp_protover = proto;
+ bp->bp_flags &= ~BSTP_PORT_CANMIGRATE;
+}
+
+static void
+bstp_set_port_tc(struct bstp_port *bp, int state)
+{
+ struct bstp_state *bs = bp->bp_bs;
+
+ bp->bp_tcstate = state;
+
+ /* initialise the new state */
+ switch (bp->bp_tcstate) {
+ case BSTP_TCSTATE_ACTIVE:
+ DPRINTF("%s -> TC_ACTIVE\n", bp->bp_ifp->if_xname);
+ /* nothing to do */
+ break;
+
+ case BSTP_TCSTATE_INACTIVE:
+ bstp_timer_stop(&bp->bp_tc_timer);
+ /* flush routes on the parent bridge */
+ bp->bp_fdbflush = 1;
+ taskqueue_enqueue(taskqueue_swi, &bp->bp_rtagetask);
+ bp->bp_tc_ack = 0;
+ DPRINTF("%s -> TC_INACTIVE\n", bp->bp_ifp->if_xname);
+ break;
+
+ case BSTP_TCSTATE_LEARNING:
+ bp->bp_rcvdtc = 0;
+ bp->bp_rcvdtcn = 0;
+ bp->bp_rcvdtca = 0;
+ bp->bp_tc_prop = 0;
+ DPRINTF("%s -> TC_LEARNING\n", bp->bp_ifp->if_xname);
+ break;
+
+ case BSTP_TCSTATE_DETECTED:
+ bstp_set_timer_tc(bp);
+ bstp_set_other_tcprop(bp);
+ /* send out notification */
+ bp->bp_flags |= BSTP_PORT_NEWINFO;
+ bstp_transmit(bs, bp);
+ getmicrotime(&bs->bs_last_tc_time);
+ DPRINTF("%s -> TC_DETECTED\n", bp->bp_ifp->if_xname);
+ bp->bp_tcstate = BSTP_TCSTATE_ACTIVE; /* UCT */
+ break;
+
+ case BSTP_TCSTATE_TCN:
+ bstp_set_timer_tc(bp);
+ DPRINTF("%s -> TC_TCN\n", bp->bp_ifp->if_xname);
+ /* fall through */
+ case BSTP_TCSTATE_TC:
+ bp->bp_rcvdtc = 0;
+ bp->bp_rcvdtcn = 0;
+ if (bp->bp_role == BSTP_ROLE_DESIGNATED)
+ bp->bp_tc_ack = 1;
+
+ bstp_set_other_tcprop(bp);
+ DPRINTF("%s -> TC_TC\n", bp->bp_ifp->if_xname);
+ bp->bp_tcstate = BSTP_TCSTATE_ACTIVE; /* UCT */
+ break;
+
+ case BSTP_TCSTATE_PROPAG:
+ /* flush routes on the parent bridge */
+ bp->bp_fdbflush = 1;
+ taskqueue_enqueue(taskqueue_swi, &bp->bp_rtagetask);
+ bp->bp_tc_prop = 0;
+ bstp_set_timer_tc(bp);
+ DPRINTF("%s -> TC_PROPAG\n", bp->bp_ifp->if_xname);
+ bp->bp_tcstate = BSTP_TCSTATE_ACTIVE; /* UCT */
+ break;
+
+ case BSTP_TCSTATE_ACK:
+ bstp_timer_stop(&bp->bp_tc_timer);
+ bp->bp_rcvdtca = 0;
+ DPRINTF("%s -> TC_ACK\n", bp->bp_ifp->if_xname);
+ bp->bp_tcstate = BSTP_TCSTATE_ACTIVE; /* UCT */
+ break;
+ }
+}
+
+static void
+bstp_set_timer_tc(struct bstp_port *bp)
+{
+ struct bstp_state *bs = bp->bp_bs;
+
+ if (bp->bp_tc_timer.active)
+ return;
+
+ switch (bp->bp_protover) {
+ case BSTP_PROTO_RSTP:
+ bstp_timer_start(&bp->bp_tc_timer,
+ bp->bp_desg_htime + BSTP_TICK_VAL);
+ bp->bp_flags |= BSTP_PORT_NEWINFO;
+ break;
+
+ case BSTP_PROTO_STP:
+ bstp_timer_start(&bp->bp_tc_timer,
+ bs->bs_root_max_age + bs->bs_root_fdelay);
+ break;
+ }
+}
+
+static void
+bstp_set_timer_msgage(struct bstp_port *bp)
+{
+ if (bp->bp_port_msg_age + BSTP_MESSAGE_AGE_INCR <=
+ bp->bp_port_max_age) {
+ bstp_timer_start(&bp->bp_message_age_timer,
+ bp->bp_port_htime * 3);
+ } else
+ /* expires immediately */
+ bstp_timer_start(&bp->bp_message_age_timer, 0);
+}
+
+static int
+bstp_rerooted(struct bstp_state *bs, struct bstp_port *bp)
+{
+ struct bstp_port *bp2;
+ int rr_set = 0;
+
+ LIST_FOREACH(bp2, &bs->bs_bplist, bp_next) {
+ if (bp2 == bp)
+ continue;
+ if (bp2->bp_recent_root_timer.active) {
+ rr_set = 1;
+ break;
+ }
+ }
+ return (!rr_set);
+}
+
+int
+bstp_set_htime(struct bstp_state *bs, int t)
+{
+ /* convert seconds to ticks */
+ t *= BSTP_TICK_VAL;
+
+ /* value can only be changed in leagacy stp mode */
+ if (bs->bs_protover != BSTP_PROTO_STP)
+ return (EPERM);
+
+ if (t < BSTP_MIN_HELLO_TIME || t > BSTP_MAX_HELLO_TIME)
+ return (EINVAL);
+
+ BSTP_LOCK(bs);
+ bs->bs_bridge_htime = t;
+ bstp_reinit(bs);
+ BSTP_UNLOCK(bs);
+ return (0);
+}
+
+int
+bstp_set_fdelay(struct bstp_state *bs, int t)
+{
+ /* convert seconds to ticks */
+ t *= BSTP_TICK_VAL;
+
+ if (t < BSTP_MIN_FORWARD_DELAY || t > BSTP_MAX_FORWARD_DELAY)
+ return (EINVAL);
+
+ BSTP_LOCK(bs);
+ bs->bs_bridge_fdelay = t;
+ bstp_reinit(bs);
+ BSTP_UNLOCK(bs);
+ return (0);
+}
+
+int
+bstp_set_maxage(struct bstp_state *bs, int t)
+{
+ /* convert seconds to ticks */
+ t *= BSTP_TICK_VAL;
+
+ if (t < BSTP_MIN_MAX_AGE || t > BSTP_MAX_MAX_AGE)
+ return (EINVAL);
+
+ BSTP_LOCK(bs);
+ bs->bs_bridge_max_age = t;
+ bstp_reinit(bs);
+ BSTP_UNLOCK(bs);
+ return (0);
+}
+
+int
+bstp_set_holdcount(struct bstp_state *bs, int count)
+{
+ struct bstp_port *bp;
+
+ if (count < BSTP_MIN_HOLD_COUNT ||
+ count > BSTP_MAX_HOLD_COUNT)
+ return (EINVAL);
+
+ BSTP_LOCK(bs);
+ bs->bs_txholdcount = count;
+ LIST_FOREACH(bp, &bs->bs_bplist, bp_next)
+ bp->bp_txcount = 0;
+ BSTP_UNLOCK(bs);
+ return (0);
+}
+
+int
+bstp_set_protocol(struct bstp_state *bs, int proto)
+{
+ struct bstp_port *bp;
+
+ switch (proto) {
+ /* Supported protocol versions */
+ case BSTP_PROTO_STP:
+ case BSTP_PROTO_RSTP:
+ break;
+
+ default:
+ return (EINVAL);
+ }
+
+ BSTP_LOCK(bs);
+ bs->bs_protover = proto;
+ bs->bs_bridge_htime = BSTP_DEFAULT_HELLO_TIME;
+ LIST_FOREACH(bp, &bs->bs_bplist, bp_next) {
+ /* reinit state */
+ bp->bp_infois = BSTP_INFO_DISABLED;
+ bp->bp_txcount = 0;
+ bstp_set_port_proto(bp, bs->bs_protover);
+ bstp_set_port_role(bp, BSTP_ROLE_DISABLED);
+ bstp_set_port_tc(bp, BSTP_TCSTATE_INACTIVE);
+ bstp_timer_stop(&bp->bp_recent_backup_timer);
+ }
+ bstp_reinit(bs);
+ BSTP_UNLOCK(bs);
+ return (0);
+}
+
+int
+bstp_set_priority(struct bstp_state *bs, int pri)
+{
+ if (pri < 0 || pri > BSTP_MAX_PRIORITY)
+ return (EINVAL);
+
+ /* Limit to steps of 4096 */
+ pri -= pri % 4096;
+
+ BSTP_LOCK(bs);
+ bs->bs_bridge_priority = pri;
+ bstp_reinit(bs);
+ BSTP_UNLOCK(bs);
+ return (0);
+}
+
+int
+bstp_set_port_priority(struct bstp_port *bp, int pri)
+{
+ struct bstp_state *bs = bp->bp_bs;
+
+ if (pri < 0 || pri > BSTP_MAX_PORT_PRIORITY)
+ return (EINVAL);
+
+ /* Limit to steps of 16 */
+ pri -= pri % 16;
+
+ BSTP_LOCK(bs);
+ bp->bp_priority = pri;
+ bstp_reinit(bs);
+ BSTP_UNLOCK(bs);
+ return (0);
+}
+
+int
+bstp_set_path_cost(struct bstp_port *bp, uint32_t path_cost)
+{
+ struct bstp_state *bs = bp->bp_bs;
+
+ if (path_cost > BSTP_MAX_PATH_COST)
+ return (EINVAL);
+
+ /* STP compat mode only uses 16 bits of the 32 */
+ if (bp->bp_protover == BSTP_PROTO_STP && path_cost > 65535)
+ path_cost = 65535;
+
+ BSTP_LOCK(bs);
+
+ if (path_cost == 0) { /* use auto */
+ bp->bp_flags &= ~BSTP_PORT_ADMCOST;
+ bp->bp_path_cost = bstp_calc_path_cost(bp);
+ } else {
+ bp->bp_path_cost = path_cost;
+ bp->bp_flags |= BSTP_PORT_ADMCOST;
+ }
+ bstp_reinit(bs);
+ BSTP_UNLOCK(bs);
+ return (0);
+}
+
+int
+bstp_set_edge(struct bstp_port *bp, int set)
+{
+ struct bstp_state *bs = bp->bp_bs;
+
+ BSTP_LOCK(bs);
+ if ((bp->bp_operedge = set) == 0)
+ bp->bp_flags &= ~BSTP_PORT_ADMEDGE;
+ else
+ bp->bp_flags |= BSTP_PORT_ADMEDGE;
+ BSTP_UNLOCK(bs);
+ return (0);
+}
+
+int
+bstp_set_autoedge(struct bstp_port *bp, int set)
+{
+ struct bstp_state *bs = bp->bp_bs;
+
+ BSTP_LOCK(bs);
+ if (set) {
+ bp->bp_flags |= BSTP_PORT_AUTOEDGE;
+ /* we may be able to transition straight to edge */
+ if (bp->bp_edge_delay_timer.active == 0)
+ bstp_edge_delay_expiry(bs, bp);
+ } else
+ bp->bp_flags &= ~BSTP_PORT_AUTOEDGE;
+ BSTP_UNLOCK(bs);
+ return (0);
+}
+
+int
+bstp_set_ptp(struct bstp_port *bp, int set)
+{
+ struct bstp_state *bs = bp->bp_bs;
+
+ BSTP_LOCK(bs);
+ bp->bp_ptp_link = set;
+ BSTP_UNLOCK(bs);
+ return (0);
+}
+
+int
+bstp_set_autoptp(struct bstp_port *bp, int set)
+{
+ struct bstp_state *bs = bp->bp_bs;
+
+ BSTP_LOCK(bs);
+ if (set) {
+ bp->bp_flags |= BSTP_PORT_AUTOPTP;
+ if (bp->bp_role != BSTP_ROLE_DISABLED)
+ bstp_ifupdstatus(bs, bp);
+ } else
+ bp->bp_flags &= ~BSTP_PORT_AUTOPTP;
+ BSTP_UNLOCK(bs);
+ return (0);
+}
+
+/*
+ * Calculate the path cost according to the link speed.
+ */
+static uint32_t
+bstp_calc_path_cost(struct bstp_port *bp)
+{
+ struct ifnet *ifp = bp->bp_ifp;
+ uint32_t path_cost;
+
+ /* If the priority has been manually set then retain the value */
+ if (bp->bp_flags & BSTP_PORT_ADMCOST)
+ return bp->bp_path_cost;
+
+ if (ifp->if_link_state == LINK_STATE_DOWN) {
+ /* Recalc when the link comes up again */
+ bp->bp_flags |= BSTP_PORT_PNDCOST;
+ return (BSTP_DEFAULT_PATH_COST);
+ }
+
+ if (ifp->if_baudrate < 1000)
+ return (BSTP_DEFAULT_PATH_COST);
+
+ /* formula from section 17.14, IEEE Std 802.1D-2004 */
+ path_cost = 20000000000ULL / (ifp->if_baudrate / 1000);
+
+ if (path_cost > BSTP_MAX_PATH_COST)
+ path_cost = BSTP_MAX_PATH_COST;
+
+ /* STP compat mode only uses 16 bits of the 32 */
+ if (bp->bp_protover == BSTP_PROTO_STP && path_cost > 65535)
+ path_cost = 65535;
+
+ return (path_cost);
+}
+
+/*
+ * Notify the bridge that a port state has changed, we need to do this from a
+ * taskqueue to avoid a LOR.
+ */
+static void
+bstp_notify_state(void *arg, int pending)
+{
+ struct bstp_port *bp = (struct bstp_port *)arg;
+ struct bstp_state *bs = bp->bp_bs;
+
+ if (bp->bp_active == 1 && bs->bs_state_cb != NULL)
+ (*bs->bs_state_cb)(bp->bp_ifp, bp->bp_state);
+}
+
+/*
+ * Flush the routes on the bridge port, we need to do this from a
+ * taskqueue to avoid a LOR.
+ */
+static void
+bstp_notify_rtage(void *arg, int pending)
+{
+ struct bstp_port *bp = (struct bstp_port *)arg;
+ struct bstp_state *bs = bp->bp_bs;
+ int age = 0;
+
+ BSTP_LOCK(bs);
+ switch (bp->bp_protover) {
+ case BSTP_PROTO_STP:
+ /* convert to seconds */
+ age = bp->bp_desg_fdelay / BSTP_TICK_VAL;
+ break;
+
+ case BSTP_PROTO_RSTP:
+ age = 0;
+ break;
+ }
+ BSTP_UNLOCK(bs);
+
+ if (bp->bp_active == 1 && bs->bs_rtage_cb != NULL)
+ (*bs->bs_rtage_cb)(bp->bp_ifp, age);
+
+ /* flush is complete */
+ BSTP_LOCK(bs);
+ bp->bp_fdbflush = 0;
+ BSTP_UNLOCK(bs);
+}
+
+void
+bstp_linkstate(struct ifnet *ifp, int state)
+{
+ struct bstp_state *bs;
+ struct bstp_port *bp;
+
+ /* search for the stp port */
+ mtx_lock(&bstp_list_mtx);
+ LIST_FOREACH(bs, &bstp_list, bs_list) {
+ BSTP_LOCK(bs);
+ LIST_FOREACH(bp, &bs->bs_bplist, bp_next) {
+ if (bp->bp_ifp == ifp) {
+ bstp_ifupdstatus(bs, bp);
+ bstp_update_state(bs, bp);
+ /* it only exists once so return */
+ BSTP_UNLOCK(bs);
+ mtx_unlock(&bstp_list_mtx);
+ return;
+ }
+ }
+ BSTP_UNLOCK(bs);
+ }
+ mtx_unlock(&bstp_list_mtx);
+}
+
+static void
+bstp_ifupdstatus(struct bstp_state *bs, struct bstp_port *bp)
+{
+ struct ifnet *ifp = bp->bp_ifp;
+ struct ifmediareq ifmr;
+ int error = 0;
+
+ BSTP_LOCK_ASSERT(bs);
+
+ bzero((char *)&ifmr, sizeof(ifmr));
+ error = (*ifp->if_ioctl)(ifp, SIOCGIFMEDIA, (caddr_t)&ifmr);
+
+ if ((error == 0) && (ifp->if_flags & IFF_UP)) {
+ if (ifmr.ifm_status & IFM_ACTIVE) {
+ /* A full-duplex link is assumed to be point to point */
+ if (bp->bp_flags & BSTP_PORT_AUTOPTP) {
+ bp->bp_ptp_link =
+ ifmr.ifm_active & IFM_FDX ? 1 : 0;
+ }
+
+ /* Calc the cost if the link was down previously */
+ if (bp->bp_flags & BSTP_PORT_PNDCOST) {
+ bp->bp_path_cost = bstp_calc_path_cost(bp);
+ bp->bp_flags &= ~BSTP_PORT_PNDCOST;
+ }
+
+ if (bp->bp_role == BSTP_ROLE_DISABLED)
+ bstp_enable_port(bs, bp);
+ } else {
+ if (bp->bp_role != BSTP_ROLE_DISABLED) {
+ bstp_disable_port(bs, bp);
+ if ((bp->bp_flags & BSTP_PORT_ADMEDGE) &&
+ bp->bp_protover == BSTP_PROTO_RSTP)
+ bp->bp_operedge = 1;
+ }
+ }
+ return;
+ }
+
+ if (bp->bp_infois != BSTP_INFO_DISABLED)
+ bstp_disable_port(bs, bp);
+}
+
+static void
+bstp_enable_port(struct bstp_state *bs, struct bstp_port *bp)
+{
+ bp->bp_infois = BSTP_INFO_AGED;
+ bstp_assign_roles(bs);
+}
+
+static void
+bstp_disable_port(struct bstp_state *bs, struct bstp_port *bp)
+{
+ bp->bp_infois = BSTP_INFO_DISABLED;
+ bstp_assign_roles(bs);
+}
+
+static void
+bstp_tick(void *arg)
+{
+ struct bstp_state *bs = arg;
+ struct bstp_port *bp;
+
+ BSTP_LOCK_ASSERT(bs);
+
+ if (bs->bs_running == 0)
+ return;
+
+ /* slow timer to catch missed link events */
+ if (bstp_timer_expired(&bs->bs_link_timer)) {
+ LIST_FOREACH(bp, &bs->bs_bplist, bp_next)
+ bstp_ifupdstatus(bs, bp);
+ bstp_timer_start(&bs->bs_link_timer, BSTP_LINK_TIMER);
+ }
+
+ LIST_FOREACH(bp, &bs->bs_bplist, bp_next) {
+ /* no events need to happen for these */
+ bstp_timer_expired(&bp->bp_tc_timer);
+ bstp_timer_expired(&bp->bp_recent_root_timer);
+ bstp_timer_expired(&bp->bp_forward_delay_timer);
+ bstp_timer_expired(&bp->bp_recent_backup_timer);
+
+ if (bstp_timer_expired(&bp->bp_hello_timer))
+ bstp_hello_timer_expiry(bs, bp);
+
+ if (bstp_timer_expired(&bp->bp_message_age_timer))
+ bstp_message_age_expiry(bs, bp);
+
+ if (bstp_timer_expired(&bp->bp_migrate_delay_timer))
+ bstp_migrate_delay_expiry(bs, bp);
+
+ if (bstp_timer_expired(&bp->bp_edge_delay_timer))
+ bstp_edge_delay_expiry(bs, bp);
+
+ /* update the various state machines for the port */
+ bstp_update_state(bs, bp);
+
+ if (bp->bp_txcount > 0)
+ bp->bp_txcount--;
+ }
+
+ callout_reset(&bs->bs_bstpcallout, hz, bstp_tick, bs);
+}
+
+static void
+bstp_timer_start(struct bstp_timer *t, uint16_t v)
+{
+ t->value = v;
+ t->active = 1;
+ t->latched = 0;
+}
+
+static void
+bstp_timer_stop(struct bstp_timer *t)
+{
+ t->value = 0;
+ t->active = 0;
+ t->latched = 0;
+}
+
+static void
+bstp_timer_latch(struct bstp_timer *t)
+{
+ t->latched = 1;
+ t->active = 1;
+}
+
+static int
+bstp_timer_expired(struct bstp_timer *t)
+{
+ if (t->active == 0 || t->latched)
+ return (0);
+ t->value -= BSTP_TICK_VAL;
+ if (t->value <= 0) {
+ bstp_timer_stop(t);
+ return (1);
+ }
+ return (0);
+}
+
+static void
+bstp_hello_timer_expiry(struct bstp_state *bs, struct bstp_port *bp)
+{
+ if ((bp->bp_flags & BSTP_PORT_NEWINFO) ||
+ bp->bp_role == BSTP_ROLE_DESIGNATED ||
+ (bp->bp_role == BSTP_ROLE_ROOT &&
+ bp->bp_tc_timer.active == 1)) {
+ bstp_timer_start(&bp->bp_hello_timer, bp->bp_desg_htime);
+ bp->bp_flags |= BSTP_PORT_NEWINFO;
+ bstp_transmit(bs, bp);
+ }
+}
+
+static void
+bstp_message_age_expiry(struct bstp_state *bs, struct bstp_port *bp)
+{
+ if (bp->bp_infois == BSTP_INFO_RECEIVED) {
+ bp->bp_infois = BSTP_INFO_AGED;
+ bstp_assign_roles(bs);
+ DPRINTF("aged info on %s\n", bp->bp_ifp->if_xname);
+ }
+}
+
+static void
+bstp_migrate_delay_expiry(struct bstp_state *bs, struct bstp_port *bp)
+{
+ bp->bp_flags |= BSTP_PORT_CANMIGRATE;
+}
+
+static void
+bstp_edge_delay_expiry(struct bstp_state *bs, struct bstp_port *bp)
+{
+ if ((bp->bp_flags & BSTP_PORT_AUTOEDGE) &&
+ bp->bp_protover == BSTP_PROTO_RSTP && bp->bp_proposing &&
+ bp->bp_role == BSTP_ROLE_DESIGNATED) {
+ bp->bp_operedge = 1;
+ DPRINTF("%s -> edge port\n", bp->bp_ifp->if_xname);
+ }
+}
+
+static int
+bstp_addr_cmp(const uint8_t *a, const uint8_t *b)
+{
+ int i, d;
+
+ for (i = 0, d = 0; i < ETHER_ADDR_LEN && d == 0; i++) {
+ d = ((int)a[i]) - ((int)b[i]);
+ }
+
+ return (d);
+}
+
+/*
+ * compare the bridge address component of the bridgeid
+ */
+static int
+bstp_same_bridgeid(uint64_t id1, uint64_t id2)
+{
+ u_char addr1[ETHER_ADDR_LEN];
+ u_char addr2[ETHER_ADDR_LEN];
+
+ PV2ADDR(id1, addr1);
+ PV2ADDR(id2, addr2);
+
+ if (bstp_addr_cmp(addr1, addr2) == 0)
+ return (1);
+
+ return (0);
+}
+
+void
+bstp_reinit(struct bstp_state *bs)
+{
+ struct bstp_port *bp;
+ struct ifnet *ifp, *mif;
+ u_char *e_addr;
+ static const u_char llzero[ETHER_ADDR_LEN]; /* 00:00:00:00:00:00 */
+
+ BSTP_LOCK_ASSERT(bs);
+
+ mif = NULL;
+ /*
+ * Search through the Ethernet adapters and find the one with the
+ * lowest value. The adapter which we take the MAC address from does
+ * not need to be part of the bridge, it just needs to be a unique
+ * value.
+ */
+ IFNET_RLOCK_NOSLEEP();
+ TAILQ_FOREACH(ifp, &V_ifnet, if_link) {
+ if (ifp->if_type != IFT_ETHER)
+ continue;
+
+ if (bstp_addr_cmp(IF_LLADDR(ifp), llzero) == 0)
+ continue;
+
+ if (mif == NULL) {
+ mif = ifp;
+ continue;
+ }
+ if (bstp_addr_cmp(IF_LLADDR(ifp), IF_LLADDR(mif)) < 0) {
+ mif = ifp;
+ continue;
+ }
+ }
+ IFNET_RUNLOCK_NOSLEEP();
+
+ if (LIST_EMPTY(&bs->bs_bplist) || mif == NULL) {
+ /* Set the bridge and root id (lower bits) to zero */
+ bs->bs_bridge_pv.pv_dbridge_id =
+ ((uint64_t)bs->bs_bridge_priority) << 48;
+ bs->bs_bridge_pv.pv_root_id = bs->bs_bridge_pv.pv_dbridge_id;
+ bs->bs_root_pv = bs->bs_bridge_pv;
+ /* Disable any remaining ports, they will have no MAC address */
+ LIST_FOREACH(bp, &bs->bs_bplist, bp_next) {
+ bp->bp_infois = BSTP_INFO_DISABLED;
+ bstp_set_port_role(bp, BSTP_ROLE_DISABLED);
+ }
+ callout_stop(&bs->bs_bstpcallout);
+ return;
+ }
+
+ e_addr = IF_LLADDR(mif);
+ bs->bs_bridge_pv.pv_dbridge_id =
+ (((uint64_t)bs->bs_bridge_priority) << 48) |
+ (((uint64_t)e_addr[0]) << 40) |
+ (((uint64_t)e_addr[1]) << 32) |
+ (((uint64_t)e_addr[2]) << 24) |
+ (((uint64_t)e_addr[3]) << 16) |
+ (((uint64_t)e_addr[4]) << 8) |
+ (((uint64_t)e_addr[5]));
+
+ bs->bs_bridge_pv.pv_root_id = bs->bs_bridge_pv.pv_dbridge_id;
+ bs->bs_bridge_pv.pv_cost = 0;
+ bs->bs_bridge_pv.pv_dport_id = 0;
+ bs->bs_bridge_pv.pv_port_id = 0;
+
+ if (bs->bs_running && callout_pending(&bs->bs_bstpcallout) == 0)
+ callout_reset(&bs->bs_bstpcallout, hz, bstp_tick, bs);
+
+ LIST_FOREACH(bp, &bs->bs_bplist, bp_next) {
+ bp->bp_port_id = (bp->bp_priority << 8) |
+ (bp->bp_ifp->if_index & 0xfff);
+ bstp_ifupdstatus(bs, bp);
+ }
+
+ bstp_assign_roles(bs);
+ bstp_timer_start(&bs->bs_link_timer, BSTP_LINK_TIMER);
+}
+
+static int
+bstp_modevent(module_t mod, int type, void *data)
+{
+ switch (type) {
+ case MOD_LOAD:
+ mtx_init(&bstp_list_mtx, "bridgestp list", NULL, MTX_DEF);
+ LIST_INIT(&bstp_list);
+ bstp_linkstate_p = bstp_linkstate;
+ break;
+ case MOD_UNLOAD:
+ bstp_linkstate_p = NULL;
+ mtx_destroy(&bstp_list_mtx);
+ break;
+ default:
+ return (EOPNOTSUPP);
+ }
+ return (0);
+}
+
+static moduledata_t bstp_mod = {
+ "bridgestp",
+ bstp_modevent,
+ 0
+};
+
+DECLARE_MODULE(bridgestp, bstp_mod, SI_SUB_PSEUDO, SI_ORDER_ANY);
+MODULE_VERSION(bridgestp, 1);
+
+void
+bstp_attach(struct bstp_state *bs, struct bstp_cb_ops *cb)
+{
+ BSTP_LOCK_INIT(bs);
+ callout_init_mtx(&bs->bs_bstpcallout, &bs->bs_mtx, 0);
+ LIST_INIT(&bs->bs_bplist);
+
+ bs->bs_bridge_max_age = BSTP_DEFAULT_MAX_AGE;
+ bs->bs_bridge_htime = BSTP_DEFAULT_HELLO_TIME;
+ bs->bs_bridge_fdelay = BSTP_DEFAULT_FORWARD_DELAY;
+ bs->bs_bridge_priority = BSTP_DEFAULT_BRIDGE_PRIORITY;
+ bs->bs_hold_time = BSTP_DEFAULT_HOLD_TIME;
+ bs->bs_migration_delay = BSTP_DEFAULT_MIGRATE_DELAY;
+ bs->bs_txholdcount = BSTP_DEFAULT_HOLD_COUNT;
+ bs->bs_protover = BSTP_PROTO_RSTP;
+ bs->bs_state_cb = cb->bcb_state;
+ bs->bs_rtage_cb = cb->bcb_rtage;
+
+ getmicrotime(&bs->bs_last_tc_time);
+
+ mtx_lock(&bstp_list_mtx);
+ LIST_INSERT_HEAD(&bstp_list, bs, bs_list);
+ mtx_unlock(&bstp_list_mtx);
+}
+
+void
+bstp_detach(struct bstp_state *bs)
+{
+ KASSERT(LIST_EMPTY(&bs->bs_bplist), ("bstp still active"));
+
+ mtx_lock(&bstp_list_mtx);
+ LIST_REMOVE(bs, bs_list);
+ mtx_unlock(&bstp_list_mtx);
+ callout_drain(&bs->bs_bstpcallout);
+ BSTP_LOCK_DESTROY(bs);
+}
+
+void
+bstp_init(struct bstp_state *bs)
+{
+ BSTP_LOCK(bs);
+ callout_reset(&bs->bs_bstpcallout, hz, bstp_tick, bs);
+ bs->bs_running = 1;
+ bstp_reinit(bs);
+ BSTP_UNLOCK(bs);
+}
+
+void
+bstp_stop(struct bstp_state *bs)
+{
+ struct bstp_port *bp;
+
+ BSTP_LOCK(bs);
+
+ LIST_FOREACH(bp, &bs->bs_bplist, bp_next)
+ bstp_set_port_state(bp, BSTP_IFSTATE_DISCARDING);
+
+ bs->bs_running = 0;
+ callout_stop(&bs->bs_bstpcallout);
+ BSTP_UNLOCK(bs);
+}
+
+int
+bstp_create(struct bstp_state *bs, struct bstp_port *bp, struct ifnet *ifp)
+{
+ bzero(bp, sizeof(struct bstp_port));
+
+ BSTP_LOCK(bs);
+ bp->bp_ifp = ifp;
+ bp->bp_bs = bs;
+ bp->bp_priority = BSTP_DEFAULT_PORT_PRIORITY;
+ TASK_INIT(&bp->bp_statetask, 0, bstp_notify_state, bp);
+ TASK_INIT(&bp->bp_rtagetask, 0, bstp_notify_rtage, bp);
+
+ /* Init state */
+ bp->bp_infois = BSTP_INFO_DISABLED;
+ bp->bp_flags = BSTP_PORT_AUTOEDGE|BSTP_PORT_AUTOPTP;
+ bstp_set_port_state(bp, BSTP_IFSTATE_DISCARDING);
+ bstp_set_port_proto(bp, bs->bs_protover);
+ bstp_set_port_role(bp, BSTP_ROLE_DISABLED);
+ bstp_set_port_tc(bp, BSTP_TCSTATE_INACTIVE);
+ bp->bp_path_cost = bstp_calc_path_cost(bp);
+ BSTP_UNLOCK(bs);
+ return (0);
+}
+
+int
+bstp_enable(struct bstp_port *bp)
+{
+ struct bstp_state *bs = bp->bp_bs;
+ struct ifnet *ifp = bp->bp_ifp;
+
+ KASSERT(bp->bp_active == 0, ("already a bstp member"));
+
+ switch (ifp->if_type) {
+ case IFT_ETHER: /* These can do spanning tree. */
+ break;
+ default:
+ /* Nothing else can. */
+ return (EINVAL);
+ }
+
+ BSTP_LOCK(bs);
+ LIST_INSERT_HEAD(&bs->bs_bplist, bp, bp_next);
+ bp->bp_active = 1;
+ bp->bp_flags |= BSTP_PORT_NEWINFO;
+ bstp_reinit(bs);
+ bstp_update_roles(bs, bp);
+ BSTP_UNLOCK(bs);
+ return (0);
+}
+
+void
+bstp_disable(struct bstp_port *bp)
+{
+ struct bstp_state *bs = bp->bp_bs;
+
+ KASSERT(bp->bp_active == 1, ("not a bstp member"));
+
+ BSTP_LOCK(bs);
+ bstp_disable_port(bs, bp);
+ LIST_REMOVE(bp, bp_next);
+ bp->bp_active = 0;
+ bstp_reinit(bs);
+ BSTP_UNLOCK(bs);
+}
+
+/*
+ * The bstp_port structure is about to be freed by the parent bridge.
+ */
+void
+bstp_destroy(struct bstp_port *bp)
+{
+ KASSERT(bp->bp_active == 0, ("port is still attached"));
+ taskqueue_drain(taskqueue_swi, &bp->bp_statetask);
+ taskqueue_drain(taskqueue_swi, &bp->bp_rtagetask);
+}
diff --git a/rtems/freebsd/net/bridgestp.h b/rtems/freebsd/net/bridgestp.h
new file mode 100644
index 00000000..0ab42ebd
--- /dev/null
+++ b/rtems/freebsd/net/bridgestp.h
@@ -0,0 +1,396 @@
+/* $NetBSD: if_bridgevar.h,v 1.4 2003/07/08 07:13:50 itojun Exp $ */
+
+/*
+ * Copyright 2001 Wasabi Systems, Inc.
+ * All rights reserved.
+ *
+ * Written by Jason R. Thorpe for Wasabi Systems, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed for the NetBSD Project by
+ * Wasabi Systems, Inc.
+ * 4. The name of Wasabi Systems, Inc. may not be used to endorse
+ * or promote products derived from this software without specific prior
+ * written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * Copyright (c) 1999, 2000 Jason L. Wright (jason@thought.net)
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by Jason L. Wright
+ * 4. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
+ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+ * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ * OpenBSD: if_bridge.h,v 1.14 2001/03/22 03:48:29 jason Exp
+ *
+ * $FreeBSD$
+ */
+
+/*
+ * Data structure and control definitions for STP interfaces.
+ */
+
+#include <rtems/freebsd/sys/callout.h>
+#include <rtems/freebsd/sys/queue.h>
+
+/* STP port states */
+#define BSTP_IFSTATE_DISABLED 0
+#define BSTP_IFSTATE_LISTENING 1
+#define BSTP_IFSTATE_LEARNING 2
+#define BSTP_IFSTATE_FORWARDING 3
+#define BSTP_IFSTATE_BLOCKING 4
+#define BSTP_IFSTATE_DISCARDING 5
+
+#define BSTP_TCSTATE_ACTIVE 1
+#define BSTP_TCSTATE_DETECTED 2
+#define BSTP_TCSTATE_INACTIVE 3
+#define BSTP_TCSTATE_LEARNING 4
+#define BSTP_TCSTATE_PROPAG 5
+#define BSTP_TCSTATE_ACK 6
+#define BSTP_TCSTATE_TC 7
+#define BSTP_TCSTATE_TCN 8
+
+#define BSTP_ROLE_DISABLED 0
+#define BSTP_ROLE_ROOT 1
+#define BSTP_ROLE_DESIGNATED 2
+#define BSTP_ROLE_ALTERNATE 3
+#define BSTP_ROLE_BACKUP 4
+
+#ifdef _KERNEL
+
+/* STP port flags */
+#define BSTP_PORT_CANMIGRATE 0x0001
+#define BSTP_PORT_NEWINFO 0x0002
+#define BSTP_PORT_DISPUTED 0x0004
+#define BSTP_PORT_ADMCOST 0x0008
+#define BSTP_PORT_AUTOEDGE 0x0010
+#define BSTP_PORT_AUTOPTP 0x0020
+#define BSTP_PORT_ADMEDGE 0x0040
+#define BSTP_PORT_PNDCOST 0x0080
+
+/* BPDU priority */
+#define BSTP_PDU_SUPERIOR 1
+#define BSTP_PDU_REPEATED 2
+#define BSTP_PDU_INFERIOR 3
+#define BSTP_PDU_INFERIORALT 4
+#define BSTP_PDU_OTHER 5
+
+/* BPDU flags */
+#define BSTP_PDU_PRMASK 0x0c /* Port Role */
+#define BSTP_PDU_PRSHIFT 2 /* Port Role offset */
+#define BSTP_PDU_F_UNKN 0x00 /* Unknown port (00) */
+#define BSTP_PDU_F_ALT 0x01 /* Alt/Backup port (01) */
+#define BSTP_PDU_F_ROOT 0x02 /* Root port (10) */
+#define BSTP_PDU_F_DESG 0x03 /* Designated port (11) */
+
+#define BSTP_PDU_STPMASK 0x81 /* strip unused STP flags */
+#define BSTP_PDU_RSTPMASK 0x7f /* strip unused RSTP flags */
+#define BSTP_PDU_F_TC 0x01 /* Topology change */
+#define BSTP_PDU_F_P 0x02 /* Proposal flag */
+#define BSTP_PDU_F_L 0x10 /* Learning flag */
+#define BSTP_PDU_F_F 0x20 /* Forwarding flag */
+#define BSTP_PDU_F_A 0x40 /* Agreement flag */
+#define BSTP_PDU_F_TCA 0x80 /* Topology change ack */
+
+/*
+ * Spanning tree defaults.
+ */
+#define BSTP_DEFAULT_MAX_AGE (20 * 256)
+#define BSTP_DEFAULT_HELLO_TIME (2 * 256)
+#define BSTP_DEFAULT_FORWARD_DELAY (15 * 256)
+#define BSTP_DEFAULT_HOLD_TIME (1 * 256)
+#define BSTP_DEFAULT_MIGRATE_DELAY (3 * 256)
+#define BSTP_DEFAULT_HOLD_COUNT 6
+#define BSTP_DEFAULT_BRIDGE_PRIORITY 0x8000
+#define BSTP_DEFAULT_PORT_PRIORITY 0x80
+#define BSTP_DEFAULT_PATH_COST 55
+#define BSTP_MIN_HELLO_TIME (1 * 256)
+#define BSTP_MIN_MAX_AGE (6 * 256)
+#define BSTP_MIN_FORWARD_DELAY (4 * 256)
+#define BSTP_MIN_HOLD_COUNT 1
+#define BSTP_MAX_HELLO_TIME (2 * 256)
+#define BSTP_MAX_MAX_AGE (40 * 256)
+#define BSTP_MAX_FORWARD_DELAY (30 * 256)
+#define BSTP_MAX_HOLD_COUNT 10
+#define BSTP_MAX_PRIORITY 61440
+#define BSTP_MAX_PORT_PRIORITY 240
+#define BSTP_MAX_PATH_COST 200000000
+
+/* BPDU message types */
+#define BSTP_MSGTYPE_CFG 0x00 /* Configuration */
+#define BSTP_MSGTYPE_RSTP 0x02 /* Rapid STP */
+#define BSTP_MSGTYPE_TCN 0x80 /* Topology chg notification */
+
+/* Protocol versions */
+#define BSTP_PROTO_ID 0x00
+#define BSTP_PROTO_STP 0x00
+#define BSTP_PROTO_RSTP 0x02
+#define BSTP_PROTO_MAX BSTP_PROTO_RSTP
+
+#define BSTP_INFO_RECEIVED 1
+#define BSTP_INFO_MINE 2
+#define BSTP_INFO_AGED 3
+#define BSTP_INFO_DISABLED 4
+
+
+#define BSTP_MESSAGE_AGE_INCR (1 * 256) /* in 256ths of a second */
+#define BSTP_TICK_VAL (1 * 256) /* in 256ths of a second */
+#define BSTP_LINK_TIMER (BSTP_TICK_VAL * 15)
+
+/*
+ * Driver callbacks for STP state changes
+ */
+typedef void (*bstp_state_cb_t)(struct ifnet *, int);
+typedef void (*bstp_rtage_cb_t)(struct ifnet *, int);
+struct bstp_cb_ops {
+ bstp_state_cb_t bcb_state;
+ bstp_rtage_cb_t bcb_rtage;
+};
+
+/*
+ * Because BPDU's do not make nicely aligned structures, two different
+ * declarations are used: bstp_?bpdu (wire representation, packed) and
+ * bstp_*_unit (internal, nicely aligned version).
+ */
+
+/* configuration bridge protocol data unit */
+struct bstp_cbpdu {
+ uint8_t cbu_dsap; /* LLC: destination sap */
+ uint8_t cbu_ssap; /* LLC: source sap */
+ uint8_t cbu_ctl; /* LLC: control */
+ uint16_t cbu_protoid; /* protocol id */
+ uint8_t cbu_protover; /* protocol version */
+ uint8_t cbu_bpdutype; /* message type */
+ uint8_t cbu_flags; /* flags (below) */
+
+ /* root id */
+ uint16_t cbu_rootpri; /* root priority */
+ uint8_t cbu_rootaddr[6]; /* root address */
+
+ uint32_t cbu_rootpathcost; /* root path cost */
+
+ /* bridge id */
+ uint16_t cbu_bridgepri; /* bridge priority */
+ uint8_t cbu_bridgeaddr[6]; /* bridge address */
+
+ uint16_t cbu_portid; /* port id */
+ uint16_t cbu_messageage; /* current message age */
+ uint16_t cbu_maxage; /* maximum age */
+ uint16_t cbu_hellotime; /* hello time */
+ uint16_t cbu_forwarddelay; /* forwarding delay */
+ uint8_t cbu_versionlen; /* version 1 length */
+} __packed;
+#define BSTP_BPDU_STP_LEN (3 + 35) /* LLC + STP pdu */
+#define BSTP_BPDU_RSTP_LEN (3 + 36) /* LLC + RSTP pdu */
+
+/* topology change notification bridge protocol data unit */
+struct bstp_tbpdu {
+ uint8_t tbu_dsap; /* LLC: destination sap */
+ uint8_t tbu_ssap; /* LLC: source sap */
+ uint8_t tbu_ctl; /* LLC: control */
+ uint16_t tbu_protoid; /* protocol id */
+ uint8_t tbu_protover; /* protocol version */
+ uint8_t tbu_bpdutype; /* message type */
+} __packed;
+
+/*
+ * Timekeeping structure used in spanning tree code.
+ */
+struct bstp_timer {
+ int active;
+ int latched;
+ int value;
+};
+
+struct bstp_pri_vector {
+ uint64_t pv_root_id;
+ uint32_t pv_cost;
+ uint64_t pv_dbridge_id;
+ uint16_t pv_dport_id;
+ uint16_t pv_port_id;
+};
+
+struct bstp_config_unit {
+ struct bstp_pri_vector cu_pv;
+ uint16_t cu_message_age;
+ uint16_t cu_max_age;
+ uint16_t cu_forward_delay;
+ uint16_t cu_hello_time;
+ uint8_t cu_message_type;
+ uint8_t cu_topology_change_ack;
+ uint8_t cu_topology_change;
+ uint8_t cu_proposal;
+ uint8_t cu_agree;
+ uint8_t cu_learning;
+ uint8_t cu_forwarding;
+ uint8_t cu_role;
+};
+
+struct bstp_tcn_unit {
+ uint8_t tu_message_type;
+};
+
+struct bstp_port {
+ LIST_ENTRY(bstp_port) bp_next;
+ struct ifnet *bp_ifp; /* parent if */
+ struct bstp_state *bp_bs;
+ uint8_t bp_active;
+ uint8_t bp_protover;
+ uint32_t bp_flags;
+ uint32_t bp_path_cost;
+ uint16_t bp_port_msg_age;
+ uint16_t bp_port_max_age;
+ uint16_t bp_port_fdelay;
+ uint16_t bp_port_htime;
+ uint16_t bp_desg_msg_age;
+ uint16_t bp_desg_max_age;
+ uint16_t bp_desg_fdelay;
+ uint16_t bp_desg_htime;
+ struct bstp_timer bp_edge_delay_timer;
+ struct bstp_timer bp_forward_delay_timer;
+ struct bstp_timer bp_hello_timer;
+ struct bstp_timer bp_message_age_timer;
+ struct bstp_timer bp_migrate_delay_timer;
+ struct bstp_timer bp_recent_backup_timer;
+ struct bstp_timer bp_recent_root_timer;
+ struct bstp_timer bp_tc_timer;
+ struct bstp_config_unit bp_msg_cu;
+ struct bstp_pri_vector bp_desg_pv;
+ struct bstp_pri_vector bp_port_pv;
+ uint16_t bp_port_id;
+ uint8_t bp_state;
+ uint8_t bp_tcstate;
+ uint8_t bp_role;
+ uint8_t bp_infois;
+ uint8_t bp_tc_ack;
+ uint8_t bp_tc_prop;
+ uint8_t bp_fdbflush;
+ uint8_t bp_priority;
+ uint8_t bp_ptp_link;
+ uint8_t bp_agree;
+ uint8_t bp_agreed;
+ uint8_t bp_sync;
+ uint8_t bp_synced;
+ uint8_t bp_proposing;
+ uint8_t bp_proposed;
+ uint8_t bp_operedge;
+ uint8_t bp_reroot;
+ uint8_t bp_rcvdtc;
+ uint8_t bp_rcvdtca;
+ uint8_t bp_rcvdtcn;
+ uint32_t bp_forward_transitions;
+ uint8_t bp_txcount;
+ struct task bp_statetask;
+ struct task bp_rtagetask;
+};
+
+/*
+ * Software state for each bridge STP.
+ */
+struct bstp_state {
+ LIST_ENTRY(bstp_state) bs_list;
+ uint8_t bs_running;
+ struct mtx bs_mtx;
+ struct bstp_pri_vector bs_bridge_pv;
+ struct bstp_pri_vector bs_root_pv;
+ struct bstp_port *bs_root_port;
+ uint8_t bs_protover;
+ uint16_t bs_migration_delay;
+ uint16_t bs_edge_delay;
+ uint16_t bs_bridge_max_age;
+ uint16_t bs_bridge_fdelay;
+ uint16_t bs_bridge_htime;
+ uint16_t bs_root_msg_age;
+ uint16_t bs_root_max_age;
+ uint16_t bs_root_fdelay;
+ uint16_t bs_root_htime;
+ uint16_t bs_hold_time;
+ uint16_t bs_bridge_priority;
+ uint8_t bs_txholdcount;
+ uint8_t bs_allsynced;
+ struct callout bs_bstpcallout; /* STP callout */
+ struct bstp_timer bs_link_timer;
+ struct timeval bs_last_tc_time;
+ LIST_HEAD(, bstp_port) bs_bplist;
+ bstp_state_cb_t bs_state_cb;
+ bstp_rtage_cb_t bs_rtage_cb;
+};
+
+#define BSTP_LOCK_INIT(_bs) mtx_init(&(_bs)->bs_mtx, "bstp", NULL, MTX_DEF)
+#define BSTP_LOCK_DESTROY(_bs) mtx_destroy(&(_bs)->bs_mtx)
+#define BSTP_LOCK(_bs) mtx_lock(&(_bs)->bs_mtx)
+#define BSTP_UNLOCK(_bs) mtx_unlock(&(_bs)->bs_mtx)
+#define BSTP_LOCK_ASSERT(_bs) mtx_assert(&(_bs)->bs_mtx, MA_OWNED)
+
+extern const uint8_t bstp_etheraddr[];
+
+extern void (*bstp_linkstate_p)(struct ifnet *ifp, int state);
+
+void bstp_attach(struct bstp_state *, struct bstp_cb_ops *);
+void bstp_detach(struct bstp_state *);
+void bstp_init(struct bstp_state *);
+void bstp_stop(struct bstp_state *);
+int bstp_create(struct bstp_state *, struct bstp_port *, struct ifnet *);
+int bstp_enable(struct bstp_port *);
+void bstp_disable(struct bstp_port *);
+void bstp_destroy(struct bstp_port *);
+void bstp_linkstate(struct ifnet *, int);
+int bstp_set_htime(struct bstp_state *, int);
+int bstp_set_fdelay(struct bstp_state *, int);
+int bstp_set_maxage(struct bstp_state *, int);
+int bstp_set_holdcount(struct bstp_state *, int);
+int bstp_set_protocol(struct bstp_state *, int);
+int bstp_set_priority(struct bstp_state *, int);
+int bstp_set_port_priority(struct bstp_port *, int);
+int bstp_set_path_cost(struct bstp_port *, uint32_t);
+int bstp_set_edge(struct bstp_port *, int);
+int bstp_set_autoedge(struct bstp_port *, int);
+int bstp_set_ptp(struct bstp_port *, int);
+int bstp_set_autoptp(struct bstp_port *, int);
+struct mbuf *bstp_input(struct bstp_port *, struct ifnet *, struct mbuf *);
+
+#endif /* _KERNEL */
diff --git a/rtems/freebsd/net/ethernet.h b/rtems/freebsd/net/ethernet.h
new file mode 100644
index 00000000..4eb34811
--- /dev/null
+++ b/rtems/freebsd/net/ethernet.h
@@ -0,0 +1,405 @@
+/*
+ * Fundamental constants relating to ethernet.
+ *
+ * $FreeBSD$
+ *
+ */
+
+#ifndef _NET_ETHERNET_HH_
+#define _NET_ETHERNET_HH_
+
+/*
+ * Some basic Ethernet constants.
+ */
+#define ETHER_ADDR_LEN 6 /* length of an Ethernet address */
+#define ETHER_TYPE_LEN 2 /* length of the Ethernet type field */
+#define ETHER_CRC_LEN 4 /* length of the Ethernet CRC */
+#define ETHER_HDR_LEN (ETHER_ADDR_LEN*2+ETHER_TYPE_LEN)
+#define ETHER_MIN_LEN 64 /* minimum frame len, including CRC */
+#define ETHER_MAX_LEN 1518 /* maximum frame len, including CRC */
+#define ETHER_MAX_LEN_JUMBO 9018 /* max jumbo frame len, including CRC */
+
+#define ETHER_VLAN_ENCAP_LEN 4 /* len of 802.1Q VLAN encapsulation */
+/*
+ * Mbuf adjust factor to force 32-bit alignment of IP header.
+ * Drivers should do m_adj(m, ETHER_ALIGN) when setting up a
+ * receive so the upper layers get the IP header properly aligned
+ * past the 14-byte Ethernet header.
+ */
+#define ETHER_ALIGN 2 /* driver adjust for IP hdr alignment */
+
+/*
+ * Compute the maximum frame size based on ethertype (i.e. possible
+ * encapsulation) and whether or not an FCS is present.
+ */
+#define ETHER_MAX_FRAME(ifp, etype, hasfcs) \
+ ((ifp)->if_mtu + ETHER_HDR_LEN + \
+ ((hasfcs) ? ETHER_CRC_LEN : 0) + \
+ (((etype) == ETHERTYPE_VLAN) ? ETHER_VLAN_ENCAP_LEN : 0))
+
+/*
+ * Ethernet-specific mbuf flags.
+ */
+#define M_HASFCS M_PROTO5 /* FCS included at end of frame */
+
+/*
+ * Ethernet CRC32 polynomials (big- and little-endian verions).
+ */
+#define ETHER_CRC_POLY_LE 0xedb88320
+#define ETHER_CRC_POLY_BE 0x04c11db6
+
+/*
+ * A macro to validate a length with
+ */
+#define ETHER_IS_VALID_LEN(foo) \
+ ((foo) >= ETHER_MIN_LEN && (foo) <= ETHER_MAX_LEN)
+
+/*
+ * Structure of a 10Mb/s Ethernet header.
+ */
+struct ether_header {
+ u_char ether_dhost[ETHER_ADDR_LEN];
+ u_char ether_shost[ETHER_ADDR_LEN];
+ u_short ether_type;
+} __packed;
+
+/*
+ * Structure of a 48-bit Ethernet address.
+ */
+struct ether_addr {
+ u_char octet[ETHER_ADDR_LEN];
+} __packed;
+
+#define ETHER_IS_MULTICAST(addr) (*(addr) & 0x01) /* is address mcast/bcast? */
+
+/*
+ * NOTE: 0x0000-0x05DC (0..1500) are generally IEEE 802.3 length fields.
+ * However, there are some conflicts.
+ */
+
+#define ETHERTYPE_8023 0x0004 /* IEEE 802.3 packet */
+ /* 0x0101 .. 0x1FF Experimental */
+#define ETHERTYPE_PUP 0x0200 /* Xerox PUP protocol - see 0A00 */
+#define ETHERTYPE_PUPAT 0x0200 /* PUP Address Translation - see 0A01 */
+#define ETHERTYPE_SPRITE 0x0500 /* ??? */
+ /* 0x0400 Nixdorf */
+#define ETHERTYPE_NS 0x0600 /* XNS */
+#define ETHERTYPE_NSAT 0x0601 /* XNS Address Translation (3Mb only) */
+#define ETHERTYPE_DLOG1 0x0660 /* DLOG (?) */
+#define ETHERTYPE_DLOG2 0x0661 /* DLOG (?) */
+#define ETHERTYPE_IP 0x0800 /* IP protocol */
+#define ETHERTYPE_X75 0x0801 /* X.75 Internet */
+#define ETHERTYPE_NBS 0x0802 /* NBS Internet */
+#define ETHERTYPE_ECMA 0x0803 /* ECMA Internet */
+#define ETHERTYPE_CHAOS 0x0804 /* CHAOSnet */
+#define ETHERTYPE_X25 0x0805 /* X.25 Level 3 */
+#define ETHERTYPE_ARP 0x0806 /* Address resolution protocol */
+#define ETHERTYPE_NSCOMPAT 0x0807 /* XNS Compatibility */
+#define ETHERTYPE_FRARP 0x0808 /* Frame Relay ARP (RFC1701) */
+ /* 0x081C Symbolics Private */
+ /* 0x0888 - 0x088A Xyplex */
+#define ETHERTYPE_UBDEBUG 0x0900 /* Ungermann-Bass network debugger */
+#define ETHERTYPE_IEEEPUP 0x0A00 /* Xerox IEEE802.3 PUP */
+#define ETHERTYPE_IEEEPUPAT 0x0A01 /* Xerox IEEE802.3 PUP Address Translation */
+#define ETHERTYPE_VINES 0x0BAD /* Banyan VINES */
+#define ETHERTYPE_VINESLOOP 0x0BAE /* Banyan VINES Loopback */
+#define ETHERTYPE_VINESECHO 0x0BAF /* Banyan VINES Echo */
+
+/* 0x1000 - 0x100F Berkeley Trailer */
+/*
+ * The ETHERTYPE_NTRAILER packet types starting at ETHERTYPE_TRAIL have
+ * (type-ETHERTYPE_TRAIL)*512 bytes of data followed
+ * by an ETHER type (as given above) and then the (variable-length) header.
+ */
+#define ETHERTYPE_TRAIL 0x1000 /* Trailer packet */
+#define ETHERTYPE_NTRAILER 16
+
+#define ETHERTYPE_DCA 0x1234 /* DCA - Multicast */
+#define ETHERTYPE_VALID 0x1600 /* VALID system protocol */
+#define ETHERTYPE_DOGFIGHT 0x1989 /* Artificial Horizons ("Aviator" dogfight simulator [on Sun]) */
+#define ETHERTYPE_RCL 0x1995 /* Datapoint Corporation (RCL lan protocol) */
+
+ /* The following 3C0x types
+ are unregistered: */
+#define ETHERTYPE_NBPVCD 0x3C00 /* 3Com NBP virtual circuit datagram (like XNS SPP) not registered */
+#define ETHERTYPE_NBPSCD 0x3C01 /* 3Com NBP System control datagram not registered */
+#define ETHERTYPE_NBPCREQ 0x3C02 /* 3Com NBP Connect request (virtual cct) not registered */
+#define ETHERTYPE_NBPCRSP 0x3C03 /* 3Com NBP Connect response not registered */
+#define ETHERTYPE_NBPCC 0x3C04 /* 3Com NBP Connect complete not registered */
+#define ETHERTYPE_NBPCLREQ 0x3C05 /* 3Com NBP Close request (virtual cct) not registered */
+#define ETHERTYPE_NBPCLRSP 0x3C06 /* 3Com NBP Close response not registered */
+#define ETHERTYPE_NBPDG 0x3C07 /* 3Com NBP Datagram (like XNS IDP) not registered */
+#define ETHERTYPE_NBPDGB 0x3C08 /* 3Com NBP Datagram broadcast not registered */
+#define ETHERTYPE_NBPCLAIM 0x3C09 /* 3Com NBP Claim NetBIOS name not registered */
+#define ETHERTYPE_NBPDLTE 0x3C0A /* 3Com NBP Delete NetBIOS name not registered */
+#define ETHERTYPE_NBPRAS 0x3C0B /* 3Com NBP Remote adaptor status request not registered */
+#define ETHERTYPE_NBPRAR 0x3C0C /* 3Com NBP Remote adaptor response not registered */
+#define ETHERTYPE_NBPRST 0x3C0D /* 3Com NBP Reset not registered */
+
+#define ETHERTYPE_PCS 0x4242 /* PCS Basic Block Protocol */
+#define ETHERTYPE_IMLBLDIAG 0x424C /* Information Modes Little Big LAN diagnostic */
+#define ETHERTYPE_DIDDLE 0x4321 /* THD - Diddle */
+#define ETHERTYPE_IMLBL 0x4C42 /* Information Modes Little Big LAN */
+#define ETHERTYPE_SIMNET 0x5208 /* BBN Simnet Private */
+#define ETHERTYPE_DECEXPER 0x6000 /* DEC Unassigned, experimental */
+#define ETHERTYPE_MOPDL 0x6001 /* DEC MOP dump/load */
+#define ETHERTYPE_MOPRC 0x6002 /* DEC MOP remote console */
+#define ETHERTYPE_DECnet 0x6003 /* DEC DECNET Phase IV route */
+#define ETHERTYPE_DN ETHERTYPE_DECnet /* libpcap, tcpdump */
+#define ETHERTYPE_LAT 0x6004 /* DEC LAT */
+#define ETHERTYPE_DECDIAG 0x6005 /* DEC diagnostic protocol (at interface initialization?) */
+#define ETHERTYPE_DECCUST 0x6006 /* DEC customer protocol */
+#define ETHERTYPE_SCA 0x6007 /* DEC LAVC, SCA */
+#define ETHERTYPE_AMBER 0x6008 /* DEC AMBER */
+#define ETHERTYPE_DECMUMPS 0x6009 /* DEC MUMPS */
+ /* 0x6010 - 0x6014 3Com Corporation */
+#define ETHERTYPE_TRANSETHER 0x6558 /* Trans Ether Bridging (RFC1701)*/
+#define ETHERTYPE_RAWFR 0x6559 /* Raw Frame Relay (RFC1701) */
+#define ETHERTYPE_UBDL 0x7000 /* Ungermann-Bass download */
+#define ETHERTYPE_UBNIU 0x7001 /* Ungermann-Bass NIUs */
+#define ETHERTYPE_UBDIAGLOOP 0x7002 /* Ungermann-Bass diagnostic/loopback */
+#define ETHERTYPE_UBNMC 0x7003 /* Ungermann-Bass ??? (NMC to/from UB Bridge) */
+#define ETHERTYPE_UBBST 0x7005 /* Ungermann-Bass Bridge Spanning Tree */
+#define ETHERTYPE_OS9 0x7007 /* OS/9 Microware */
+#define ETHERTYPE_OS9NET 0x7009 /* OS/9 Net? */
+ /* 0x7020 - 0x7029 LRT (England) (now Sintrom) */
+#define ETHERTYPE_RACAL 0x7030 /* Racal-Interlan */
+#define ETHERTYPE_PRIMENTS 0x7031 /* Prime NTS (Network Terminal Service) */
+#define ETHERTYPE_CABLETRON 0x7034 /* Cabletron */
+#define ETHERTYPE_CRONUSVLN 0x8003 /* Cronus VLN */
+#define ETHERTYPE_CRONUS 0x8004 /* Cronus Direct */
+#define ETHERTYPE_HP 0x8005 /* HP Probe */
+#define ETHERTYPE_NESTAR 0x8006 /* Nestar */
+#define ETHERTYPE_ATTSTANFORD 0x8008 /* AT&T/Stanford (local use) */
+#define ETHERTYPE_EXCELAN 0x8010 /* Excelan */
+#define ETHERTYPE_SG_DIAG 0x8013 /* SGI diagnostic type */
+#define ETHERTYPE_SG_NETGAMES 0x8014 /* SGI network games */
+#define ETHERTYPE_SG_RESV 0x8015 /* SGI reserved type */
+#define ETHERTYPE_SG_BOUNCE 0x8016 /* SGI bounce server */
+#define ETHERTYPE_APOLLODOMAIN 0x8019 /* Apollo DOMAIN */
+#define ETHERTYPE_TYMSHARE 0x802E /* Tymeshare */
+#define ETHERTYPE_TIGAN 0x802F /* Tigan, Inc. */
+#define ETHERTYPE_REVARP 0x8035 /* Reverse addr resolution protocol */
+#define ETHERTYPE_AEONIC 0x8036 /* Aeonic Systems */
+#define ETHERTYPE_IPXNEW 0x8037 /* IPX (Novell Netware?) */
+#define ETHERTYPE_LANBRIDGE 0x8038 /* DEC LANBridge */
+#define ETHERTYPE_DSMD 0x8039 /* DEC DSM/DDP */
+#define ETHERTYPE_ARGONAUT 0x803A /* DEC Argonaut Console */
+#define ETHERTYPE_VAXELN 0x803B /* DEC VAXELN */
+#define ETHERTYPE_DECDNS 0x803C /* DEC DNS Naming Service */
+#define ETHERTYPE_ENCRYPT 0x803D /* DEC Ethernet Encryption */
+#define ETHERTYPE_DECDTS 0x803E /* DEC Distributed Time Service */
+#define ETHERTYPE_DECLTM 0x803F /* DEC LAN Traffic Monitor */
+#define ETHERTYPE_DECNETBIOS 0x8040 /* DEC PATHWORKS DECnet NETBIOS Emulation */
+#define ETHERTYPE_DECLAST 0x8041 /* DEC Local Area System Transport */
+ /* 0x8042 DEC Unassigned */
+#define ETHERTYPE_PLANNING 0x8044 /* Planning Research Corp. */
+ /* 0x8046 - 0x8047 AT&T */
+#define ETHERTYPE_DECAM 0x8048 /* DEC Availability Manager for Distributed Systems DECamds (but someone at DEC says not) */
+#define ETHERTYPE_EXPERDATA 0x8049 /* ExperData */
+#define ETHERTYPE_VEXP 0x805B /* Stanford V Kernel exp. */
+#define ETHERTYPE_VPROD 0x805C /* Stanford V Kernel prod. */
+#define ETHERTYPE_ES 0x805D /* Evans & Sutherland */
+#define ETHERTYPE_LITTLE 0x8060 /* Little Machines */
+#define ETHERTYPE_COUNTERPOINT 0x8062 /* Counterpoint Computers */
+ /* 0x8065 - 0x8066 Univ. of Mass @ Amherst */
+#define ETHERTYPE_VEECO 0x8067 /* Veeco Integrated Auto. */
+#define ETHERTYPE_GENDYN 0x8068 /* General Dynamics */
+#define ETHERTYPE_ATT 0x8069 /* AT&T */
+#define ETHERTYPE_AUTOPHON 0x806A /* Autophon */
+#define ETHERTYPE_COMDESIGN 0x806C /* ComDesign */
+#define ETHERTYPE_COMPUGRAPHIC 0x806D /* Compugraphic Corporation */
+ /* 0x806E - 0x8077 Landmark Graphics Corp. */
+#define ETHERTYPE_MATRA 0x807A /* Matra */
+#define ETHERTYPE_DDE 0x807B /* Dansk Data Elektronik */
+#define ETHERTYPE_MERIT 0x807C /* Merit Internodal (or Univ of Michigan?) */
+ /* 0x807D - 0x807F Vitalink Communications */
+#define ETHERTYPE_VLTLMAN 0x8080 /* Vitalink TransLAN III Management */
+ /* 0x8081 - 0x8083 Counterpoint Computers */
+ /* 0x8088 - 0x808A Xyplex */
+#define ETHERTYPE_ATALK 0x809B /* AppleTalk */
+#define ETHERTYPE_AT ETHERTYPE_ATALK /* old NetBSD */
+#define ETHERTYPE_APPLETALK ETHERTYPE_ATALK /* HP-UX */
+ /* 0x809C - 0x809E Datability */
+#define ETHERTYPE_SPIDER 0x809F /* Spider Systems Ltd. */
+ /* 0x80A3 Nixdorf */
+ /* 0x80A4 - 0x80B3 Siemens Gammasonics Inc. */
+ /* 0x80C0 - 0x80C3 DCA (Digital Comm. Assoc.) Data Exchange Cluster */
+ /* 0x80C4 - 0x80C5 Banyan Systems */
+#define ETHERTYPE_PACER 0x80C6 /* Pacer Software */
+#define ETHERTYPE_APPLITEK 0x80C7 /* Applitek Corporation */
+ /* 0x80C8 - 0x80CC Intergraph Corporation */
+ /* 0x80CD - 0x80CE Harris Corporation */
+ /* 0x80CF - 0x80D2 Taylor Instrument */
+ /* 0x80D3 - 0x80D4 Rosemount Corporation */
+#define ETHERTYPE_SNA 0x80D5 /* IBM SNA Services over Ethernet */
+#define ETHERTYPE_VARIAN 0x80DD /* Varian Associates */
+ /* 0x80DE - 0x80DF TRFS (Integrated Solutions Transparent Remote File System) */
+ /* 0x80E0 - 0x80E3 Allen-Bradley */
+ /* 0x80E4 - 0x80F0 Datability */
+#define ETHERTYPE_RETIX 0x80F2 /* Retix */
+#define ETHERTYPE_AARP 0x80F3 /* AppleTalk AARP */
+ /* 0x80F4 - 0x80F5 Kinetics */
+#define ETHERTYPE_APOLLO 0x80F7 /* Apollo Computer */
+#define ETHERTYPE_VLAN 0x8100 /* IEEE 802.1Q VLAN tagging (XXX conflicts) */
+ /* 0x80FF - 0x8101 Wellfleet Communications (XXX conflicts) */
+#define ETHERTYPE_BOFL 0x8102 /* Wellfleet; BOFL (Breath OF Life) pkts [every 5-10 secs.] */
+#define ETHERTYPE_WELLFLEET 0x8103 /* Wellfleet Communications */
+ /* 0x8107 - 0x8109 Symbolics Private */
+#define ETHERTYPE_TALARIS 0x812B /* Talaris */
+#define ETHERTYPE_WATERLOO 0x8130 /* Waterloo Microsystems Inc. (XXX which?) */
+#define ETHERTYPE_HAYES 0x8130 /* Hayes Microcomputers (XXX which?) */
+#define ETHERTYPE_VGLAB 0x8131 /* VG Laboratory Systems */
+ /* 0x8132 - 0x8137 Bridge Communications */
+#define ETHERTYPE_IPX 0x8137 /* Novell (old) NetWare IPX (ECONFIG E option) */
+#define ETHERTYPE_NOVELL 0x8138 /* Novell, Inc. */
+ /* 0x8139 - 0x813D KTI */
+#define ETHERTYPE_MUMPS 0x813F /* M/MUMPS data sharing */
+#define ETHERTYPE_AMOEBA 0x8145 /* Vrije Universiteit (NL) Amoeba 4 RPC (obsolete) */
+#define ETHERTYPE_FLIP 0x8146 /* Vrije Universiteit (NL) FLIP (Fast Local Internet Protocol) */
+#define ETHERTYPE_VURESERVED 0x8147 /* Vrije Universiteit (NL) [reserved] */
+#define ETHERTYPE_LOGICRAFT 0x8148 /* Logicraft */
+#define ETHERTYPE_NCD 0x8149 /* Network Computing Devices */
+#define ETHERTYPE_ALPHA 0x814A /* Alpha Micro */
+#define ETHERTYPE_SNMP 0x814C /* SNMP over Ethernet (see RFC1089) */
+ /* 0x814D - 0x814E BIIN */
+#define ETHERTYPE_TEC 0x814F /* Technically Elite Concepts */
+#define ETHERTYPE_RATIONAL 0x8150 /* Rational Corp */
+ /* 0x8151 - 0x8153 Qualcomm */
+ /* 0x815C - 0x815E Computer Protocol Pty Ltd */
+ /* 0x8164 - 0x8166 Charles River Data Systems */
+#define ETHERTYPE_XTP 0x817D /* Protocol Engines XTP */
+#define ETHERTYPE_SGITW 0x817E /* SGI/Time Warner prop. */
+#define ETHERTYPE_HIPPI_FP 0x8180 /* HIPPI-FP encapsulation */
+#define ETHERTYPE_STP 0x8181 /* Scheduled Transfer STP, HIPPI-ST */
+ /* 0x8182 - 0x8183 Reserved for HIPPI-6400 */
+ /* 0x8184 - 0x818C SGI prop. */
+#define ETHERTYPE_MOTOROLA 0x818D /* Motorola */
+#define ETHERTYPE_NETBEUI 0x8191 /* PowerLAN NetBIOS/NetBEUI (PC) */
+ /* 0x819A - 0x81A3 RAD Network Devices */
+ /* 0x81B7 - 0x81B9 Xyplex */
+ /* 0x81CC - 0x81D5 Apricot Computers */
+ /* 0x81D6 - 0x81DD Artisoft Lantastic */
+ /* 0x81E6 - 0x81EF Polygon */
+ /* 0x81F0 - 0x81F2 Comsat Labs */
+ /* 0x81F3 - 0x81F5 SAIC */
+ /* 0x81F6 - 0x81F8 VG Analytical */
+ /* 0x8203 - 0x8205 QNX Software Systems Ltd. */
+ /* 0x8221 - 0x8222 Ascom Banking Systems */
+ /* 0x823E - 0x8240 Advanced Encryption Systems */
+ /* 0x8263 - 0x826A Charles River Data Systems */
+ /* 0x827F - 0x8282 Athena Programming */
+ /* 0x829A - 0x829B Inst Ind Info Tech */
+ /* 0x829C - 0x82AB Taurus Controls */
+ /* 0x82AC - 0x8693 Walker Richer & Quinn */
+#define ETHERTYPE_ACCTON 0x8390 /* Accton Technologies (unregistered) */
+#define ETHERTYPE_TALARISMC 0x852B /* Talaris multicast */
+#define ETHERTYPE_KALPANA 0x8582 /* Kalpana */
+ /* 0x8694 - 0x869D Idea Courier */
+ /* 0x869E - 0x86A1 Computer Network Tech */
+ /* 0x86A3 - 0x86AC Gateway Communications */
+#define ETHERTYPE_SECTRA 0x86DB /* SECTRA */
+#define ETHERTYPE_IPV6 0x86DD /* IP protocol version 6 */
+#define ETHERTYPE_DELTACON 0x86DE /* Delta Controls */
+#define ETHERTYPE_ATOMIC 0x86DF /* ATOMIC */
+ /* 0x86E0 - 0x86EF Landis & Gyr Powers */
+ /* 0x8700 - 0x8710 Motorola */
+#define ETHERTYPE_RDP 0x8739 /* Control Technology Inc. RDP Without IP */
+#define ETHERTYPE_MICP 0x873A /* Control Technology Inc. Mcast Industrial Ctrl Proto. */
+ /* 0x873B - 0x873C Control Technology Inc. Proprietary */
+#define ETHERTYPE_TCPCOMP 0x876B /* TCP/IP Compression (RFC1701) */
+#define ETHERTYPE_IPAS 0x876C /* IP Autonomous Systems (RFC1701) */
+#define ETHERTYPE_SECUREDATA 0x876D /* Secure Data (RFC1701) */
+#define ETHERTYPE_FLOWCONTROL 0x8808 /* 802.3x flow control packet */
+#define ETHERTYPE_SLOW 0x8809 /* 802.3ad link aggregation (LACP) */
+#define ETHERTYPE_PPP 0x880B /* PPP (obsolete by PPPoE) */
+#define ETHERTYPE_HITACHI 0x8820 /* Hitachi Cable (Optoelectronic Systems Laboratory) */
+#define ETHERTYPE_MPLS 0x8847 /* MPLS Unicast */
+#define ETHERTYPE_MPLS_MCAST 0x8848 /* MPLS Multicast */
+#define ETHERTYPE_AXIS 0x8856 /* Axis Communications AB proprietary bootstrap/config */
+#define ETHERTYPE_PPPOEDISC 0x8863 /* PPP Over Ethernet Discovery Stage */
+#define ETHERTYPE_PPPOE 0x8864 /* PPP Over Ethernet Session Stage */
+#define ETHERTYPE_LANPROBE 0x8888 /* HP LanProbe test? */
+#define ETHERTYPE_PAE 0x888e /* EAPOL PAE/802.1x */
+#define ETHERTYPE_LOOPBACK 0x9000 /* Loopback: used to test interfaces */
+#define ETHERTYPE_LBACK ETHERTYPE_LOOPBACK /* DEC MOP loopback */
+#define ETHERTYPE_XNSSM 0x9001 /* 3Com (Formerly Bridge Communications), XNS Systems Management */
+#define ETHERTYPE_TCPSM 0x9002 /* 3Com (Formerly Bridge Communications), TCP/IP Systems Management */
+#define ETHERTYPE_BCLOOP 0x9003 /* 3Com (Formerly Bridge Communications), loopback detection */
+#define ETHERTYPE_DEBNI 0xAAAA /* DECNET? Used by VAX 6220 DEBNI */
+#define ETHERTYPE_SONIX 0xFAF5 /* Sonix Arpeggio */
+#define ETHERTYPE_VITAL 0xFF00 /* BBN VITAL-LanBridge cache wakeups */
+ /* 0xFF00 - 0xFFOF ISC Bunker Ramo */
+
+#define ETHERTYPE_MAX 0xFFFF /* Maximum valid ethernet type, reserved */
+
+/*
+ * The ETHERTYPE_NTRAILER packet types starting at ETHERTYPE_TRAIL have
+ * (type-ETHERTYPE_TRAIL)*512 bytes of data followed
+ * by an ETHER type (as given above) and then the (variable-length) header.
+ */
+#define ETHERTYPE_TRAIL 0x1000 /* Trailer packet */
+#define ETHERTYPE_NTRAILER 16
+
+#define ETHERMTU (ETHER_MAX_LEN-ETHER_HDR_LEN-ETHER_CRC_LEN)
+#define ETHERMIN (ETHER_MIN_LEN-ETHER_HDR_LEN-ETHER_CRC_LEN)
+#define ETHERMTU_JUMBO (ETHER_MAX_LEN_JUMBO - ETHER_HDR_LEN - ETHER_CRC_LEN)
+/*
+ * The ETHER_BPF_MTAP macro should be used by drivers which support hardware
+ * offload for VLAN tag processing. It will check the mbuf to see if it has
+ * M_VLANTAG set, and if it does, will pass the packet along to
+ * ether_vlan_mtap. This function will re-insert VLAN tags for the duration
+ * of the tap, so they show up properly for network analyzers.
+ */
+#define ETHER_BPF_MTAP(_ifp, _m) do { \
+ if (bpf_peers_present((_ifp)->if_bpf)) { \
+ M_ASSERTVALID(_m); \
+ if (((_m)->m_flags & M_VLANTAG) != 0) \
+ ether_vlan_mtap((_ifp)->if_bpf, (_m), NULL, 0); \
+ else \
+ bpf_mtap((_ifp)->if_bpf, (_m)); \
+ } \
+} while (0)
+
+#ifdef _KERNEL
+
+struct ifnet;
+struct mbuf;
+struct route;
+struct sockaddr;
+struct bpf_if;
+
+extern uint32_t ether_crc32_le(const uint8_t *, size_t);
+extern uint32_t ether_crc32_be(const uint8_t *, size_t);
+extern void ether_demux(struct ifnet *, struct mbuf *);
+extern void ether_ifattach(struct ifnet *, const u_int8_t *);
+extern void ether_ifdetach(struct ifnet *);
+extern int ether_ioctl(struct ifnet *, u_long, caddr_t);
+extern int ether_output(struct ifnet *,
+ struct mbuf *, struct sockaddr *, struct route *);
+extern int ether_output_frame(struct ifnet *, struct mbuf *);
+extern char *ether_sprintf(const u_int8_t *);
+void ether_vlan_mtap(struct bpf_if *, struct mbuf *,
+ void *, u_int);
+struct mbuf *ether_vlanencap(struct mbuf *, uint16_t);
+
+#else /* _KERNEL */
+
+#include <rtems/freebsd/sys/cdefs.h>
+
+/*
+ * Ethernet address conversion/parsing routines.
+ */
+__BEGIN_DECLS
+struct ether_addr *ether_aton(const char *);
+struct ether_addr *ether_aton_r(const char *, struct ether_addr *);
+int ether_hostton(const char *, struct ether_addr *);
+int ether_line(const char *, struct ether_addr *, char *);
+char *ether_ntoa(const struct ether_addr *);
+char *ether_ntoa_r(const struct ether_addr *, char *);
+int ether_ntohost(char *, const struct ether_addr *);
+__END_DECLS
+
+#endif /* !_KERNEL */
+
+#endif /* !_NET_ETHERNET_HH_ */
diff --git a/rtems/freebsd/net/fddi.h b/rtems/freebsd/net/fddi.h
new file mode 100644
index 00000000..03deabff
--- /dev/null
+++ b/rtems/freebsd/net/fddi.h
@@ -0,0 +1,105 @@
+/*-
+ * Copyright (c) 1982, 1986, 1993
+ * The Regents of the University of California. All rights reserved.
+ * Copyright (c) 1995 Matt Thomas (thomas@lkg.dec.com)
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the University of
+ * California, Berkeley and its contributors.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * @(#)if_fddi.h 8.1 (Berkeley) 6/10/93
+ * $FreeBSD$
+ */
+
+#ifndef _NETINET_IF_FDDI_HH_
+#define _NETINET_IF_FDDI_HH_
+
+#define FDDIIPMTU 4352
+#define FDDIMTU 4470
+#define FDDIMIN 3
+
+#define FDDIFC_C 0x80 /* 0b10000000 */
+#define FDDIFC_L 0x40 /* 0b01000000 */
+#define FDDIFC_F 0x30 /* 0b00110000 */
+#define FDDIFC_Z 0x0F /* 0b00001111 */
+#define FDDIFC_CLFF 0xF0 /* Class/Length/Format bits */
+#define FDDIFC_ZZZZ 0x0F /* Control bits */
+
+/*
+ * FDDI Frame Control values. (48-bit addressing only).
+ */
+#define FDDIFC_VOID 0x40 /* Void frame */
+#define FDDIFC_NRT 0x80 /* Nonrestricted token */
+#define FDDIFC_RT 0xc0 /* Restricted token */
+#define FDDIFC_MAC_BEACON 0xc2 /* MAC Beacon frame */
+#define FDDIFC_MAC_CLAIM 0xc3 /* MAC Claim frame */
+#define FDDIFC_LLC_ASYNC 0x50
+#define FDDIFC_LLC_PRIO0 0
+#define FDDIFC_LLC_PRIO1 1
+#define FDDIFC_LLC_PRIO2 2
+#define FDDIFC_LLC_PRIO3 3
+#define FDDIFC_LLC_PRIO4 4
+#define FDDIFC_LLC_PRIO5 5
+#define FDDIFC_LLC_PRIO6 6
+#define FDDIFC_LLC_PRIO7 7
+#define FDDIFC_LLC_SYNC 0xd0
+#define FDDIFC_IMP_ASYNC 0x60 /* Implementor Async. */
+#define FDDIFC_IMP_SYNC 0xe0 /* Implementor Synch. */
+#define FDDIFC_SMT 0x40
+#define FDDIFC_SMT_INFO 0x41 /* SMT Info */
+#define FDDIFC_SMT_NSA 0x4F /* SMT Next station adrs */
+#define FDDIFC_MAC 0xc0 /* MAC frame */
+
+#define FDDI_ADDR_LEN 6
+#define FDDI_HDR_LEN (sizeof(struct fddi_header))
+
+/*
+ * Structure of an 100Mb/s FDDI header.
+ */
+struct fddi_header {
+ u_char fddi_fc;
+ u_char fddi_dhost[FDDI_ADDR_LEN];
+ u_char fddi_shost[FDDI_ADDR_LEN];
+};
+
+#if defined(_KERNEL)
+#define fddi_ipmulticast_min ether_ipmulticast_min
+#define fddi_ipmulticast_max ether_ipmulticast_max
+#define fddi_addmulti ether_addmulti
+#define fddi_delmulti ether_delmulti
+#define fddi_sprintf ether_sprintf
+
+#define FDDI_BPF_UNSUPPORTED 0
+#define FDDI_BPF_SUPPORTED 1
+
+void fddi_ifattach(struct ifnet *, const u_int8_t *, int);
+void fddi_ifdetach(struct ifnet *, int);
+int fddi_ioctl(struct ifnet *, u_long, caddr_t);
+
+#endif /* _KERNEL */
+#endif /* _NET_FDDI_HH_ */
diff --git a/rtems/freebsd/net/firewire.h b/rtems/freebsd/net/firewire.h
new file mode 100644
index 00000000..5411dbf8
--- /dev/null
+++ b/rtems/freebsd/net/firewire.h
@@ -0,0 +1,142 @@
+/*-
+ * Copyright (c) 2004 Doug Rabson
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _NET_FIREWIRE_HH_
+#define _NET_FIREWIRE_HH_
+
+#define FW_ENCAP_UNFRAG 0
+#define FW_ENCAP_FIRST 1
+#define FW_ENCAP_LAST 2
+#define FW_ENCAP_NEXT 3
+
+union fw_encap {
+ uint32_t ul[2];
+ struct {
+#if BYTE_ORDER == BIG_ENDIAN
+ uint32_t lf :2;
+ uint32_t reserved :14;
+ uint32_t ether_type :16;
+#else
+ uint32_t ether_type :16;
+ uint32_t reserved :14;
+ uint32_t lf :2;
+#endif
+ } unfrag;
+ struct {
+#if BYTE_ORDER == BIG_ENDIAN
+ uint32_t lf :2;
+ uint32_t reserved1 :2;
+ uint32_t datagram_size :12;
+ uint32_t ether_type :16;
+ uint32_t dgl :16;
+ uint32_t reserved2 :16;
+#else
+ uint32_t ether_type :16;
+ uint32_t datagram_size :12;
+ uint32_t reserved1 :2;
+ uint32_t lf :2;
+ uint32_t reserved2 :16;
+ uint32_t dgl :16;
+#endif
+ } firstfrag;
+ struct {
+#if BYTE_ORDER == BIG_ENDIAN
+ uint32_t lf :2;
+ uint32_t reserved1 :2;
+ uint32_t datagram_size :12;
+ uint32_t reserved2 :4;
+ uint32_t fragment_offset :12;
+ uint32_t dgl :16;
+ uint32_t reserved3 :16;
+#else
+ uint32_t fragment_offset :12;
+ uint32_t reserved2 :4;
+ uint32_t datagram_size :12;
+ uint32_t reserved1 :2;
+ uint32_t lf :2;
+ uint32_t reserved3 :16;
+ uint32_t dgl :16;
+#endif
+ } nextfrag;
+};
+
+#define MTAG_FIREWIRE 1394
+#define MTAG_FIREWIRE_HWADDR 0
+#define MTAG_FIREWIRE_SENDER_EUID 1
+
+struct fw_hwaddr {
+ uint32_t sender_unique_ID_hi;
+ uint32_t sender_unique_ID_lo;
+ uint8_t sender_max_rec;
+ uint8_t sspd;
+ uint16_t sender_unicast_FIFO_hi;
+ uint32_t sender_unicast_FIFO_lo;
+};
+
+/*
+ * BPF wants to see one of these.
+ */
+struct fw_bpfhdr {
+ uint8_t firewire_dhost[8];
+ uint8_t firewire_shost[8];
+ uint16_t firewire_type;
+};
+
+#ifdef _KERNEL
+
+/*
+ * A structure to track the reassembly of a link-level fragmented
+ * datagram.
+ */
+struct fw_reass {
+ STAILQ_ENTRY(fw_reass) fr_link;
+ uint32_t fr_id; /* host+dgl */
+ struct mbuf *fr_frags; /* chain of frags */
+};
+STAILQ_HEAD(fw_reass_list, fw_reass);
+
+struct fw_com {
+ struct ifnet *fc_ifp;
+ struct fw_hwaddr fc_hwaddr;
+ struct firewire_comm *fc_fc;
+ uint8_t fc_broadcast_channel;
+ uint8_t fc_speed; /* our speed */
+ uint16_t fc_node; /* our nodeid */
+ struct fw_reass_list fc_frags; /* partial datagrams */
+};
+#define IFP2FWC(ifp) ((struct fw_com *)(ifp)->if_l2com)
+
+extern void firewire_input(struct ifnet *ifp, struct mbuf *m, uint16_t src);
+extern void firewire_ifattach(struct ifnet *, struct fw_hwaddr *);
+extern void firewire_ifdetach(struct ifnet *);
+extern void firewire_busreset(struct ifnet *);
+extern int firewire_ioctl(struct ifnet *, u_long, caddr_t);
+
+#endif /* !_KERNEL */
+
+#endif /* !_NET_FIREWIRE_HH_ */
diff --git a/rtems/freebsd/net/flowtable.h b/rtems/freebsd/net/flowtable.h
new file mode 100644
index 00000000..c4a09659
--- /dev/null
+++ b/rtems/freebsd/net/flowtable.h
@@ -0,0 +1,82 @@
+/**************************************************************************
+
+Copyright (c) 2008-2010, BitGravity Inc.
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Neither the name of the BitGravity Corporation nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE.
+
+$FreeBSD$
+
+***************************************************************************/
+
+#ifndef _NET_FLOWTABLE_HH_
+#define _NET_FLOWTABLE_HH_
+
+#ifdef _KERNEL
+
+#define FL_HASH_ALL (1<<0) /* hash 4-tuple + protocol */
+#define FL_PCPU (1<<1) /* pcpu cache */
+#define FL_NOAUTO (1<<2) /* don't automatically add flentry on miss */
+
+#define FL_TCP (1<<11)
+#define FL_SCTP (1<<12)
+#define FL_UDP (1<<13)
+#define FL_DEBUG (1<<14)
+#define FL_DEBUG_ALL (1<<15)
+
+struct flowtable;
+struct flentry;
+struct route;
+struct route_in6;
+
+VNET_DECLARE(struct flowtable *, ip_ft);
+#define V_ip_ft VNET(ip_ft)
+
+VNET_DECLARE(struct flowtable *, ip6_ft);
+#define V_ip6_ft VNET(ip6_ft)
+
+struct flowtable *flowtable_alloc(char *name, int nentry, int flags);
+
+/*
+ * Given a flow table, look up the L3 and L2 information and
+ * return it in the route.
+ *
+ */
+struct flentry *flowtable_lookup_mbuf(struct flowtable *ft, struct mbuf *m, int af);
+
+struct flentry *flowtable_lookup(struct flowtable *ft, struct sockaddr_storage *ssa,
+ struct sockaddr_storage *dsa, uint32_t fibnum, int flags);
+
+int kern_flowtable_insert(struct flowtable *ft, struct sockaddr_storage *ssa,
+ struct sockaddr_storage *dsa, struct route *ro, uint32_t fibnum, int flags);
+
+void flow_invalidate(struct flentry *fl);
+void flowtable_route_flush(struct flowtable *ft, struct rtentry *rt);
+
+void flow_to_route(struct flentry *fl, struct route *ro);
+
+void flow_to_route_in6(struct flentry *fl, struct route_in6 *ro);
+
+
+#endif /* _KERNEL */
+#endif
diff --git a/rtems/freebsd/net/ieee8023ad_lacp.c b/rtems/freebsd/net/ieee8023ad_lacp.c
new file mode 100644
index 00000000..a4a020e0
--- /dev/null
+++ b/rtems/freebsd/net/ieee8023ad_lacp.c
@@ -0,0 +1,1947 @@
+#include <rtems/freebsd/machine/rtems-bsd-config.h>
+
+/* $NetBSD: ieee8023ad_lacp.c,v 1.3 2005/12/11 12:24:54 christos Exp $ */
+
+/*-
+ * Copyright (c)2005 YAMAMOTO Takashi,
+ * Copyright (c)2008 Andrew Thompson <thompsa@FreeBSD.org>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <rtems/freebsd/sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <rtems/freebsd/sys/param.h>
+#include <rtems/freebsd/sys/callout.h>
+#include <rtems/freebsd/sys/mbuf.h>
+#include <rtems/freebsd/sys/systm.h>
+#include <rtems/freebsd/sys/malloc.h>
+#include <rtems/freebsd/sys/kernel.h> /* hz */
+#include <rtems/freebsd/sys/socket.h> /* for net/if.h */
+#include <rtems/freebsd/sys/sockio.h>
+#include <rtems/freebsd/machine/stdarg.h>
+#include <rtems/freebsd/sys/lock.h>
+#include <rtems/freebsd/sys/rwlock.h>
+
+#include <rtems/freebsd/net/if.h>
+#include <rtems/freebsd/net/if_dl.h>
+#include <rtems/freebsd/net/ethernet.h>
+#include <rtems/freebsd/net/if_media.h>
+#include <rtems/freebsd/net/if_types.h>
+
+#include <rtems/freebsd/net/if_lagg.h>
+#include <rtems/freebsd/net/ieee8023ad_lacp.h>
+
+/*
+ * actor system priority and port priority.
+ * XXX should be configurable.
+ */
+
+#define LACP_SYSTEM_PRIO 0x8000
+#define LACP_PORT_PRIO 0x8000
+
+const uint8_t ethermulticastaddr_slowprotocols[ETHER_ADDR_LEN] =
+ { 0x01, 0x80, 0xc2, 0x00, 0x00, 0x02 };
+
+static const struct tlv_template lacp_info_tlv_template[] = {
+ { LACP_TYPE_ACTORINFO,
+ sizeof(struct tlvhdr) + sizeof(struct lacp_peerinfo) },
+ { LACP_TYPE_PARTNERINFO,
+ sizeof(struct tlvhdr) + sizeof(struct lacp_peerinfo) },
+ { LACP_TYPE_COLLECTORINFO,
+ sizeof(struct tlvhdr) + sizeof(struct lacp_collectorinfo) },
+ { 0, 0 },
+};
+
+static const struct tlv_template marker_info_tlv_template[] = {
+ { MARKER_TYPE_INFO,
+ sizeof(struct tlvhdr) + sizeof(struct lacp_markerinfo) },
+ { 0, 0 },
+};
+
+static const struct tlv_template marker_response_tlv_template[] = {
+ { MARKER_TYPE_RESPONSE,
+ sizeof(struct tlvhdr) + sizeof(struct lacp_markerinfo) },
+ { 0, 0 },
+};
+
+typedef void (*lacp_timer_func_t)(struct lacp_port *);
+
+static void lacp_fill_actorinfo(struct lacp_port *, struct lacp_peerinfo *);
+static void lacp_fill_markerinfo(struct lacp_port *,
+ struct lacp_markerinfo *);
+
+static uint64_t lacp_aggregator_bandwidth(struct lacp_aggregator *);
+static void lacp_suppress_distributing(struct lacp_softc *,
+ struct lacp_aggregator *);
+static void lacp_transit_expire(void *);
+static void lacp_update_portmap(struct lacp_softc *);
+static void lacp_select_active_aggregator(struct lacp_softc *);
+static uint16_t lacp_compose_key(struct lacp_port *);
+static int tlv_check(const void *, size_t, const struct tlvhdr *,
+ const struct tlv_template *, boolean_t);
+static void lacp_tick(void *);
+
+static void lacp_fill_aggregator_id(struct lacp_aggregator *,
+ const struct lacp_port *);
+static void lacp_fill_aggregator_id_peer(struct lacp_peerinfo *,
+ const struct lacp_peerinfo *);
+static int lacp_aggregator_is_compatible(const struct lacp_aggregator *,
+ const struct lacp_port *);
+static int lacp_peerinfo_is_compatible(const struct lacp_peerinfo *,
+ const struct lacp_peerinfo *);
+
+static struct lacp_aggregator *lacp_aggregator_get(struct lacp_softc *,
+ struct lacp_port *);
+static void lacp_aggregator_addref(struct lacp_softc *,
+ struct lacp_aggregator *);
+static void lacp_aggregator_delref(struct lacp_softc *,
+ struct lacp_aggregator *);
+
+/* receive machine */
+
+static int lacp_pdu_input(struct lacp_port *, struct mbuf *);
+static int lacp_marker_input(struct lacp_port *, struct mbuf *);
+static void lacp_sm_rx(struct lacp_port *, const struct lacpdu *);
+static void lacp_sm_rx_timer(struct lacp_port *);
+static void lacp_sm_rx_set_expired(struct lacp_port *);
+static void lacp_sm_rx_update_ntt(struct lacp_port *,
+ const struct lacpdu *);
+static void lacp_sm_rx_record_pdu(struct lacp_port *,
+ const struct lacpdu *);
+static void lacp_sm_rx_update_selected(struct lacp_port *,
+ const struct lacpdu *);
+static void lacp_sm_rx_record_default(struct lacp_port *);
+static void lacp_sm_rx_update_default_selected(struct lacp_port *);
+static void lacp_sm_rx_update_selected_from_peerinfo(struct lacp_port *,
+ const struct lacp_peerinfo *);
+
+/* mux machine */
+
+static void lacp_sm_mux(struct lacp_port *);
+static void lacp_set_mux(struct lacp_port *, enum lacp_mux_state);
+static void lacp_sm_mux_timer(struct lacp_port *);
+
+/* periodic transmit machine */
+
+static void lacp_sm_ptx_update_timeout(struct lacp_port *, uint8_t);
+static void lacp_sm_ptx_tx_schedule(struct lacp_port *);
+static void lacp_sm_ptx_timer(struct lacp_port *);
+
+/* transmit machine */
+
+static void lacp_sm_tx(struct lacp_port *);
+static void lacp_sm_assert_ntt(struct lacp_port *);
+
+static void lacp_run_timers(struct lacp_port *);
+static int lacp_compare_peerinfo(const struct lacp_peerinfo *,
+ const struct lacp_peerinfo *);
+static int lacp_compare_systemid(const struct lacp_systemid *,
+ const struct lacp_systemid *);
+static void lacp_port_enable(struct lacp_port *);
+static void lacp_port_disable(struct lacp_port *);
+static void lacp_select(struct lacp_port *);
+static void lacp_unselect(struct lacp_port *);
+static void lacp_disable_collecting(struct lacp_port *);
+static void lacp_enable_collecting(struct lacp_port *);
+static void lacp_disable_distributing(struct lacp_port *);
+static void lacp_enable_distributing(struct lacp_port *);
+static int lacp_xmit_lacpdu(struct lacp_port *);
+static int lacp_xmit_marker(struct lacp_port *);
+
+#if defined(LACP_DEBUG)
+static void lacp_dump_lacpdu(const struct lacpdu *);
+static const char *lacp_format_partner(const struct lacp_peerinfo *, char *,
+ size_t);
+static const char *lacp_format_lagid(const struct lacp_peerinfo *,
+ const struct lacp_peerinfo *, char *, size_t);
+static const char *lacp_format_lagid_aggregator(const struct lacp_aggregator *,
+ char *, size_t);
+static const char *lacp_format_state(uint8_t, char *, size_t);
+static const char *lacp_format_mac(const uint8_t *, char *, size_t);
+static const char *lacp_format_systemid(const struct lacp_systemid *, char *,
+ size_t);
+static const char *lacp_format_portid(const struct lacp_portid *, char *,
+ size_t);
+static void lacp_dprintf(const struct lacp_port *, const char *, ...)
+ __attribute__((__format__(__printf__, 2, 3)));
+#define LACP_DPRINTF(a) lacp_dprintf a
+#else
+#define LACP_DPRINTF(a) /* nothing */
+#endif
+
+/*
+ * partner administration variables.
+ * XXX should be configurable.
+ */
+
+static const struct lacp_peerinfo lacp_partner_admin = {
+ .lip_systemid = { .lsi_prio = 0xffff },
+ .lip_portid = { .lpi_prio = 0xffff },
+#if 1
+ /* optimistic */
+ .lip_state = LACP_STATE_SYNC | LACP_STATE_AGGREGATION |
+ LACP_STATE_COLLECTING | LACP_STATE_DISTRIBUTING,
+#else
+ /* pessimistic */
+ .lip_state = 0,
+#endif
+};
+
+static const lacp_timer_func_t lacp_timer_funcs[LACP_NTIMER] = {
+ [LACP_TIMER_CURRENT_WHILE] = lacp_sm_rx_timer,
+ [LACP_TIMER_PERIODIC] = lacp_sm_ptx_timer,
+ [LACP_TIMER_WAIT_WHILE] = lacp_sm_mux_timer,
+};
+
+struct mbuf *
+lacp_input(struct lagg_port *lgp, struct mbuf *m)
+{
+ struct lacp_port *lp = LACP_PORT(lgp);
+ uint8_t subtype;
+
+ if (m->m_pkthdr.len < sizeof(struct ether_header) + sizeof(subtype)) {
+ m_freem(m);
+ return (NULL);
+ }
+
+ m_copydata(m, sizeof(struct ether_header), sizeof(subtype), &subtype);
+ switch (subtype) {
+ case SLOWPROTOCOLS_SUBTYPE_LACP:
+ lacp_pdu_input(lp, m);
+ return (NULL);
+
+ case SLOWPROTOCOLS_SUBTYPE_MARKER:
+ lacp_marker_input(lp, m);
+ return (NULL);
+ }
+
+ /* Not a subtype we are interested in */
+ return (m);
+}
+
+/*
+ * lacp_pdu_input: process lacpdu
+ */
+static int
+lacp_pdu_input(struct lacp_port *lp, struct mbuf *m)
+{
+ struct lacp_softc *lsc = lp->lp_lsc;
+ struct lacpdu *du;
+ int error = 0;
+
+ if (m->m_pkthdr.len != sizeof(*du)) {
+ goto bad;
+ }
+
+ if ((m->m_flags & M_MCAST) == 0) {
+ goto bad;
+ }
+
+ if (m->m_len < sizeof(*du)) {
+ m = m_pullup(m, sizeof(*du));
+ if (m == NULL) {
+ return (ENOMEM);
+ }
+ }
+
+ du = mtod(m, struct lacpdu *);
+
+ if (memcmp(&du->ldu_eh.ether_dhost,
+ &ethermulticastaddr_slowprotocols, ETHER_ADDR_LEN)) {
+ goto bad;
+ }
+
+ /*
+ * ignore the version for compatibility with
+ * the future protocol revisions.
+ */
+#if 0
+ if (du->ldu_sph.sph_version != 1) {
+ goto bad;
+ }
+#endif
+
+ /*
+ * ignore tlv types for compatibility with
+ * the future protocol revisions.
+ */
+ if (tlv_check(du, sizeof(*du), &du->ldu_tlv_actor,
+ lacp_info_tlv_template, FALSE)) {
+ goto bad;
+ }
+
+#if defined(LACP_DEBUG)
+ LACP_DPRINTF((lp, "lacpdu receive\n"));
+ lacp_dump_lacpdu(du);
+#endif /* defined(LACP_DEBUG) */
+
+ LACP_LOCK(lsc);
+ lacp_sm_rx(lp, du);
+ LACP_UNLOCK(lsc);
+
+ m_freem(m);
+ return (error);
+
+bad:
+ m_freem(m);
+ return (EINVAL);
+}
+
+static void
+lacp_fill_actorinfo(struct lacp_port *lp, struct lacp_peerinfo *info)
+{
+ struct lagg_port *lgp = lp->lp_lagg;
+ struct lagg_softc *sc = lgp->lp_softc;
+
+ info->lip_systemid.lsi_prio = htons(LACP_SYSTEM_PRIO);
+ memcpy(&info->lip_systemid.lsi_mac,
+ IF_LLADDR(sc->sc_ifp), ETHER_ADDR_LEN);
+ info->lip_portid.lpi_prio = htons(LACP_PORT_PRIO);
+ info->lip_portid.lpi_portno = htons(lp->lp_ifp->if_index);
+ info->lip_state = lp->lp_state;
+}
+
+static void
+lacp_fill_markerinfo(struct lacp_port *lp, struct lacp_markerinfo *info)
+{
+ struct ifnet *ifp = lp->lp_ifp;
+
+ /* Fill in the port index and system id (encoded as the MAC) */
+ info->mi_rq_port = htons(ifp->if_index);
+ memcpy(&info->mi_rq_system, lp->lp_systemid.lsi_mac, ETHER_ADDR_LEN);
+ info->mi_rq_xid = htonl(0);
+}
+
+static int
+lacp_xmit_lacpdu(struct lacp_port *lp)
+{
+ struct lagg_port *lgp = lp->lp_lagg;
+ struct mbuf *m;
+ struct lacpdu *du;
+ int error;
+
+ LACP_LOCK_ASSERT(lp->lp_lsc);
+
+ m = m_gethdr(M_DONTWAIT, MT_DATA);
+ if (m == NULL) {
+ return (ENOMEM);
+ }
+ m->m_len = m->m_pkthdr.len = sizeof(*du);
+
+ du = mtod(m, struct lacpdu *);
+ memset(du, 0, sizeof(*du));
+
+ memcpy(&du->ldu_eh.ether_dhost, ethermulticastaddr_slowprotocols,
+ ETHER_ADDR_LEN);
+ memcpy(&du->ldu_eh.ether_shost, lgp->lp_lladdr, ETHER_ADDR_LEN);
+ du->ldu_eh.ether_type = htons(ETHERTYPE_SLOW);
+
+ du->ldu_sph.sph_subtype = SLOWPROTOCOLS_SUBTYPE_LACP;
+ du->ldu_sph.sph_version = 1;
+
+ TLV_SET(&du->ldu_tlv_actor, LACP_TYPE_ACTORINFO, sizeof(du->ldu_actor));
+ du->ldu_actor = lp->lp_actor;
+
+ TLV_SET(&du->ldu_tlv_partner, LACP_TYPE_PARTNERINFO,
+ sizeof(du->ldu_partner));
+ du->ldu_partner = lp->lp_partner;
+
+ TLV_SET(&du->ldu_tlv_collector, LACP_TYPE_COLLECTORINFO,
+ sizeof(du->ldu_collector));
+ du->ldu_collector.lci_maxdelay = 0;
+
+#if defined(LACP_DEBUG)
+ LACP_DPRINTF((lp, "lacpdu transmit\n"));
+ lacp_dump_lacpdu(du);
+#endif /* defined(LACP_DEBUG) */
+
+ m->m_flags |= M_MCAST;
+
+ /*
+ * XXX should use higher priority queue.
+ * otherwise network congestion can break aggregation.
+ */
+
+ error = lagg_enqueue(lp->lp_ifp, m);
+ return (error);
+}
+
+static int
+lacp_xmit_marker(struct lacp_port *lp)
+{
+ struct lagg_port *lgp = lp->lp_lagg;
+ struct mbuf *m;
+ struct markerdu *mdu;
+ int error;
+
+ LACP_LOCK_ASSERT(lp->lp_lsc);
+
+ m = m_gethdr(M_DONTWAIT, MT_DATA);
+ if (m == NULL) {
+ return (ENOMEM);
+ }
+ m->m_len = m->m_pkthdr.len = sizeof(*mdu);
+
+ mdu = mtod(m, struct markerdu *);
+ memset(mdu, 0, sizeof(*mdu));
+
+ memcpy(&mdu->mdu_eh.ether_dhost, ethermulticastaddr_slowprotocols,
+ ETHER_ADDR_LEN);
+ memcpy(&mdu->mdu_eh.ether_shost, lgp->lp_lladdr, ETHER_ADDR_LEN);
+ mdu->mdu_eh.ether_type = htons(ETHERTYPE_SLOW);
+
+ mdu->mdu_sph.sph_subtype = SLOWPROTOCOLS_SUBTYPE_MARKER;
+ mdu->mdu_sph.sph_version = 1;
+
+ /* Bump the transaction id and copy over the marker info */
+ lp->lp_marker.mi_rq_xid = htonl(ntohl(lp->lp_marker.mi_rq_xid) + 1);
+ TLV_SET(&mdu->mdu_tlv, MARKER_TYPE_INFO, sizeof(mdu->mdu_info));
+ mdu->mdu_info = lp->lp_marker;
+
+ LACP_DPRINTF((lp, "marker transmit, port=%u, sys=%6D, id=%u\n",
+ ntohs(mdu->mdu_info.mi_rq_port), mdu->mdu_info.mi_rq_system, ":",
+ ntohl(mdu->mdu_info.mi_rq_xid)));
+
+ m->m_flags |= M_MCAST;
+ error = lagg_enqueue(lp->lp_ifp, m);
+ return (error);
+}
+
+void
+lacp_linkstate(struct lagg_port *lgp)
+{
+ struct lacp_port *lp = LACP_PORT(lgp);
+ struct lacp_softc *lsc = lp->lp_lsc;
+ struct ifnet *ifp = lgp->lp_ifp;
+ struct ifmediareq ifmr;
+ int error = 0;
+ u_int media;
+ uint8_t old_state;
+ uint16_t old_key;
+
+ bzero((char *)&ifmr, sizeof(ifmr));
+ error = (*ifp->if_ioctl)(ifp, SIOCGIFMEDIA, (caddr_t)&ifmr);
+ if (error != 0)
+ return;
+
+ LACP_LOCK(lsc);
+ media = ifmr.ifm_active;
+ LACP_DPRINTF((lp, "media changed 0x%x -> 0x%x, ether = %d, fdx = %d, "
+ "link = %d\n", lp->lp_media, media, IFM_TYPE(media) == IFM_ETHER,
+ (media & IFM_FDX) != 0, ifp->if_link_state == LINK_STATE_UP));
+ old_state = lp->lp_state;
+ old_key = lp->lp_key;
+
+ lp->lp_media = media;
+ /*
+ * If the port is not an active full duplex Ethernet link then it can
+ * not be aggregated.
+ */
+ if (IFM_TYPE(media) != IFM_ETHER || (media & IFM_FDX) == 0 ||
+ ifp->if_link_state != LINK_STATE_UP) {
+ lacp_port_disable(lp);
+ } else {
+ lacp_port_enable(lp);
+ }
+ lp->lp_key = lacp_compose_key(lp);
+
+ if (old_state != lp->lp_state || old_key != lp->lp_key) {
+ LACP_DPRINTF((lp, "-> UNSELECTED\n"));
+ lp->lp_selected = LACP_UNSELECTED;
+ }
+ LACP_UNLOCK(lsc);
+}
+
+static void
+lacp_tick(void *arg)
+{
+ struct lacp_softc *lsc = arg;
+ struct lacp_port *lp;
+
+ LIST_FOREACH(lp, &lsc->lsc_ports, lp_next) {
+ if ((lp->lp_state & LACP_STATE_AGGREGATION) == 0)
+ continue;
+
+ lacp_run_timers(lp);
+
+ lacp_select(lp);
+ lacp_sm_mux(lp);
+ lacp_sm_tx(lp);
+ lacp_sm_ptx_tx_schedule(lp);
+ }
+ callout_reset(&lsc->lsc_callout, hz, lacp_tick, lsc);
+}
+
+int
+lacp_port_create(struct lagg_port *lgp)
+{
+ struct lagg_softc *sc = lgp->lp_softc;
+ struct lacp_softc *lsc = LACP_SOFTC(sc);
+ struct lacp_port *lp;
+ struct ifnet *ifp = lgp->lp_ifp;
+ struct sockaddr_dl sdl;
+ struct ifmultiaddr *rifma = NULL;
+ int error;
+
+ boolean_t active = TRUE; /* XXX should be configurable */
+ boolean_t fast = FALSE; /* XXX should be configurable */
+
+ bzero((char *)&sdl, sizeof(sdl));
+ sdl.sdl_len = sizeof(sdl);
+ sdl.sdl_family = AF_LINK;
+ sdl.sdl_index = ifp->if_index;
+ sdl.sdl_type = IFT_ETHER;
+ sdl.sdl_alen = ETHER_ADDR_LEN;
+
+ bcopy(&ethermulticastaddr_slowprotocols,
+ LLADDR(&sdl), ETHER_ADDR_LEN);
+ error = if_addmulti(ifp, (struct sockaddr *)&sdl, &rifma);
+ if (error) {
+ printf("%s: ADDMULTI failed on %s\n", __func__, lgp->lp_ifname);
+ return (error);
+ }
+
+ lp = malloc(sizeof(struct lacp_port),
+ M_DEVBUF, M_NOWAIT|M_ZERO);
+ if (lp == NULL)
+ return (ENOMEM);
+
+ LACP_LOCK(lsc);
+ lgp->lp_psc = (caddr_t)lp;
+ lp->lp_ifp = ifp;
+ lp->lp_lagg = lgp;
+ lp->lp_lsc = lsc;
+ lp->lp_ifma = rifma;
+
+ LIST_INSERT_HEAD(&lsc->lsc_ports, lp, lp_next);
+
+ lacp_fill_actorinfo(lp, &lp->lp_actor);
+ lacp_fill_markerinfo(lp, &lp->lp_marker);
+ lp->lp_state =
+ (active ? LACP_STATE_ACTIVITY : 0) |
+ (fast ? LACP_STATE_TIMEOUT : 0);
+ lp->lp_aggregator = NULL;
+ lacp_sm_rx_set_expired(lp);
+ LACP_UNLOCK(lsc);
+ lacp_linkstate(lgp);
+
+ return (0);
+}
+
+void
+lacp_port_destroy(struct lagg_port *lgp)
+{
+ struct lacp_port *lp = LACP_PORT(lgp);
+ struct lacp_softc *lsc = lp->lp_lsc;
+ int i;
+
+ LACP_LOCK(lsc);
+ for (i = 0; i < LACP_NTIMER; i++) {
+ LACP_TIMER_DISARM(lp, i);
+ }
+
+ lacp_disable_collecting(lp);
+ lacp_disable_distributing(lp);
+ lacp_unselect(lp);
+
+ /* The address may have already been removed by if_purgemaddrs() */
+ if (!lgp->lp_detaching)
+ if_delmulti_ifma(lp->lp_ifma);
+
+ LIST_REMOVE(lp, lp_next);
+ LACP_UNLOCK(lsc);
+ free(lp, M_DEVBUF);
+}
+
+void
+lacp_req(struct lagg_softc *sc, caddr_t data)
+{
+ struct lacp_opreq *req = (struct lacp_opreq *)data;
+ struct lacp_softc *lsc = LACP_SOFTC(sc);
+ struct lacp_aggregator *la = lsc->lsc_active_aggregator;
+
+ LACP_LOCK(lsc);
+ bzero(req, sizeof(struct lacp_opreq));
+ if (la != NULL) {
+ req->actor_prio = ntohs(la->la_actor.lip_systemid.lsi_prio);
+ memcpy(&req->actor_mac, &la->la_actor.lip_systemid.lsi_mac,
+ ETHER_ADDR_LEN);
+ req->actor_key = ntohs(la->la_actor.lip_key);
+ req->actor_portprio = ntohs(la->la_actor.lip_portid.lpi_prio);
+ req->actor_portno = ntohs(la->la_actor.lip_portid.lpi_portno);
+ req->actor_state = la->la_actor.lip_state;
+
+ req->partner_prio = ntohs(la->la_partner.lip_systemid.lsi_prio);
+ memcpy(&req->partner_mac, &la->la_partner.lip_systemid.lsi_mac,
+ ETHER_ADDR_LEN);
+ req->partner_key = ntohs(la->la_partner.lip_key);
+ req->partner_portprio = ntohs(la->la_partner.lip_portid.lpi_prio);
+ req->partner_portno = ntohs(la->la_partner.lip_portid.lpi_portno);
+ req->partner_state = la->la_partner.lip_state;
+ }
+ LACP_UNLOCK(lsc);
+}
+
+void
+lacp_portreq(struct lagg_port *lgp, caddr_t data)
+{
+ struct lacp_opreq *req = (struct lacp_opreq *)data;
+ struct lacp_port *lp = LACP_PORT(lgp);
+ struct lacp_softc *lsc = lp->lp_lsc;
+
+ LACP_LOCK(lsc);
+ req->actor_prio = ntohs(lp->lp_actor.lip_systemid.lsi_prio);
+ memcpy(&req->actor_mac, &lp->lp_actor.lip_systemid.lsi_mac,
+ ETHER_ADDR_LEN);
+ req->actor_key = ntohs(lp->lp_actor.lip_key);
+ req->actor_portprio = ntohs(lp->lp_actor.lip_portid.lpi_prio);
+ req->actor_portno = ntohs(lp->lp_actor.lip_portid.lpi_portno);
+ req->actor_state = lp->lp_actor.lip_state;
+
+ req->partner_prio = ntohs(lp->lp_partner.lip_systemid.lsi_prio);
+ memcpy(&req->partner_mac, &lp->lp_partner.lip_systemid.lsi_mac,
+ ETHER_ADDR_LEN);
+ req->partner_key = ntohs(lp->lp_partner.lip_key);
+ req->partner_portprio = ntohs(lp->lp_partner.lip_portid.lpi_prio);
+ req->partner_portno = ntohs(lp->lp_partner.lip_portid.lpi_portno);
+ req->partner_state = lp->lp_partner.lip_state;
+ LACP_UNLOCK(lsc);
+}
+
+static void
+lacp_disable_collecting(struct lacp_port *lp)
+{
+ LACP_DPRINTF((lp, "collecting disabled\n"));
+ lp->lp_state &= ~LACP_STATE_COLLECTING;
+}
+
+static void
+lacp_enable_collecting(struct lacp_port *lp)
+{
+ LACP_DPRINTF((lp, "collecting enabled\n"));
+ lp->lp_state |= LACP_STATE_COLLECTING;
+}
+
+static void
+lacp_disable_distributing(struct lacp_port *lp)
+{
+ struct lacp_aggregator *la = lp->lp_aggregator;
+ struct lacp_softc *lsc = lp->lp_lsc;
+#if defined(LACP_DEBUG)
+ char buf[LACP_LAGIDSTR_MAX+1];
+#endif /* defined(LACP_DEBUG) */
+
+ LACP_LOCK_ASSERT(lsc);
+
+ if (la == NULL || (lp->lp_state & LACP_STATE_DISTRIBUTING) == 0) {
+ return;
+ }
+
+ KASSERT(!TAILQ_EMPTY(&la->la_ports), ("no aggregator ports"));
+ KASSERT(la->la_nports > 0, ("nports invalid (%d)", la->la_nports));
+ KASSERT(la->la_refcnt >= la->la_nports, ("aggregator refcnt invalid"));
+
+ LACP_DPRINTF((lp, "disable distributing on aggregator %s, "
+ "nports %d -> %d\n",
+ lacp_format_lagid_aggregator(la, buf, sizeof(buf)),
+ la->la_nports, la->la_nports - 1));
+
+ TAILQ_REMOVE(&la->la_ports, lp, lp_dist_q);
+ la->la_nports--;
+
+ if (lsc->lsc_active_aggregator == la) {
+ lacp_suppress_distributing(lsc, la);
+ lacp_select_active_aggregator(lsc);
+ /* regenerate the port map, the active aggregator has changed */
+ lacp_update_portmap(lsc);
+ }
+
+ lp->lp_state &= ~LACP_STATE_DISTRIBUTING;
+}
+
+static void
+lacp_enable_distributing(struct lacp_port *lp)
+{
+ struct lacp_aggregator *la = lp->lp_aggregator;
+ struct lacp_softc *lsc = lp->lp_lsc;
+#if defined(LACP_DEBUG)
+ char buf[LACP_LAGIDSTR_MAX+1];
+#endif /* defined(LACP_DEBUG) */
+
+ LACP_LOCK_ASSERT(lsc);
+
+ if ((lp->lp_state & LACP_STATE_DISTRIBUTING) != 0) {
+ return;
+ }
+
+ LACP_DPRINTF((lp, "enable distributing on aggregator %s, "
+ "nports %d -> %d\n",
+ lacp_format_lagid_aggregator(la, buf, sizeof(buf)),
+ la->la_nports, la->la_nports + 1));
+
+ KASSERT(la->la_refcnt > la->la_nports, ("aggregator refcnt invalid"));
+ TAILQ_INSERT_HEAD(&la->la_ports, lp, lp_dist_q);
+ la->la_nports++;
+
+ lp->lp_state |= LACP_STATE_DISTRIBUTING;
+
+ if (lsc->lsc_active_aggregator == la) {
+ lacp_suppress_distributing(lsc, la);
+ lacp_update_portmap(lsc);
+ } else
+ /* try to become the active aggregator */
+ lacp_select_active_aggregator(lsc);
+}
+
+static void
+lacp_transit_expire(void *vp)
+{
+ struct lacp_softc *lsc = vp;
+
+ LACP_LOCK_ASSERT(lsc);
+
+ LACP_DPRINTF((NULL, "%s\n", __func__));
+ lsc->lsc_suppress_distributing = FALSE;
+}
+
+int
+lacp_attach(struct lagg_softc *sc)
+{
+ struct lacp_softc *lsc;
+
+ lsc = malloc(sizeof(struct lacp_softc),
+ M_DEVBUF, M_NOWAIT|M_ZERO);
+ if (lsc == NULL)
+ return (ENOMEM);
+
+ sc->sc_psc = (caddr_t)lsc;
+ lsc->lsc_softc = sc;
+
+ lsc->lsc_hashkey = arc4random();
+ lsc->lsc_active_aggregator = NULL;
+ LACP_LOCK_INIT(lsc);
+ TAILQ_INIT(&lsc->lsc_aggregators);
+ LIST_INIT(&lsc->lsc_ports);
+
+ callout_init_mtx(&lsc->lsc_transit_callout, &lsc->lsc_mtx, 0);
+ callout_init_mtx(&lsc->lsc_callout, &lsc->lsc_mtx, 0);
+
+ /* if the lagg is already up then do the same */
+ if (sc->sc_ifp->if_drv_flags & IFF_DRV_RUNNING)
+ lacp_init(sc);
+
+ return (0);
+}
+
+int
+lacp_detach(struct lagg_softc *sc)
+{
+ struct lacp_softc *lsc = LACP_SOFTC(sc);
+
+ KASSERT(TAILQ_EMPTY(&lsc->lsc_aggregators),
+ ("aggregators still active"));
+ KASSERT(lsc->lsc_active_aggregator == NULL,
+ ("aggregator still attached"));
+
+ sc->sc_psc = NULL;
+ callout_drain(&lsc->lsc_transit_callout);
+ callout_drain(&lsc->lsc_callout);
+
+ LACP_LOCK_DESTROY(lsc);
+ free(lsc, M_DEVBUF);
+ return (0);
+}
+
+void
+lacp_init(struct lagg_softc *sc)
+{
+ struct lacp_softc *lsc = LACP_SOFTC(sc);
+
+ LACP_LOCK(lsc);
+ callout_reset(&lsc->lsc_callout, hz, lacp_tick, lsc);
+ LACP_UNLOCK(lsc);
+}
+
+void
+lacp_stop(struct lagg_softc *sc)
+{
+ struct lacp_softc *lsc = LACP_SOFTC(sc);
+
+ LACP_LOCK(lsc);
+ callout_stop(&lsc->lsc_transit_callout);
+ callout_stop(&lsc->lsc_callout);
+ LACP_UNLOCK(lsc);
+}
+
+struct lagg_port *
+lacp_select_tx_port(struct lagg_softc *sc, struct mbuf *m)
+{
+ struct lacp_softc *lsc = LACP_SOFTC(sc);
+ struct lacp_portmap *pm;
+ struct lacp_port *lp;
+ uint32_t hash;
+
+ if (__predict_false(lsc->lsc_suppress_distributing)) {
+ LACP_DPRINTF((NULL, "%s: waiting transit\n", __func__));
+ return (NULL);
+ }
+
+ pm = &lsc->lsc_pmap[lsc->lsc_activemap];
+ if (pm->pm_count == 0) {
+ LACP_DPRINTF((NULL, "%s: no active aggregator\n", __func__));
+ return (NULL);
+ }
+
+ if (m->m_flags & M_FLOWID)
+ hash = m->m_pkthdr.flowid;
+ else
+ hash = lagg_hashmbuf(m, lsc->lsc_hashkey);
+ hash %= pm->pm_count;
+ lp = pm->pm_map[hash];
+
+ KASSERT((lp->lp_state & LACP_STATE_DISTRIBUTING) != 0,
+ ("aggregated port is not distributing"));
+
+ return (lp->lp_lagg);
+}
+/*
+ * lacp_suppress_distributing: drop transmit packets for a while
+ * to preserve packet ordering.
+ */
+
+static void
+lacp_suppress_distributing(struct lacp_softc *lsc, struct lacp_aggregator *la)
+{
+ struct lacp_port *lp;
+
+ if (lsc->lsc_active_aggregator != la) {
+ return;
+ }
+
+ LACP_DPRINTF((NULL, "%s\n", __func__));
+ lsc->lsc_suppress_distributing = TRUE;
+
+ /* send a marker frame down each port to verify the queues are empty */
+ LIST_FOREACH(lp, &lsc->lsc_ports, lp_next) {
+ lp->lp_flags |= LACP_PORT_MARK;
+ lacp_xmit_marker(lp);
+ }
+
+ /* set a timeout for the marker frames */
+ callout_reset(&lsc->lsc_transit_callout,
+ LACP_TRANSIT_DELAY * hz / 1000, lacp_transit_expire, lsc);
+}
+
+static int
+lacp_compare_peerinfo(const struct lacp_peerinfo *a,
+ const struct lacp_peerinfo *b)
+{
+ return (memcmp(a, b, offsetof(struct lacp_peerinfo, lip_state)));
+}
+
+static int
+lacp_compare_systemid(const struct lacp_systemid *a,
+ const struct lacp_systemid *b)
+{
+ return (memcmp(a, b, sizeof(*a)));
+}
+
+#if 0 /* unused */
+static int
+lacp_compare_portid(const struct lacp_portid *a,
+ const struct lacp_portid *b)
+{
+ return (memcmp(a, b, sizeof(*a)));
+}
+#endif
+
+static uint64_t
+lacp_aggregator_bandwidth(struct lacp_aggregator *la)
+{
+ struct lacp_port *lp;
+ uint64_t speed;
+
+ lp = TAILQ_FIRST(&la->la_ports);
+ if (lp == NULL) {
+ return (0);
+ }
+
+ speed = ifmedia_baudrate(lp->lp_media);
+ speed *= la->la_nports;
+ if (speed == 0) {
+ LACP_DPRINTF((lp, "speed 0? media=0x%x nports=%d\n",
+ lp->lp_media, la->la_nports));
+ }
+
+ return (speed);
+}
+
+/*
+ * lacp_select_active_aggregator: select an aggregator to be used to transmit
+ * packets from lagg(4) interface.
+ */
+
+static void
+lacp_select_active_aggregator(struct lacp_softc *lsc)
+{
+ struct lagg_softc *sc = lsc->lsc_softc;
+ struct lacp_aggregator *la;
+ struct lacp_aggregator *best_la = NULL;
+ uint64_t best_speed = 0;
+#if defined(LACP_DEBUG)
+ char buf[LACP_LAGIDSTR_MAX+1];
+#endif /* defined(LACP_DEBUG) */
+
+ LACP_DPRINTF((NULL, "%s:\n", __func__));
+
+ TAILQ_FOREACH(la, &lsc->lsc_aggregators, la_q) {
+ uint64_t speed;
+
+ if (la->la_nports == 0) {
+ continue;
+ }
+
+ speed = lacp_aggregator_bandwidth(la);
+ LACP_DPRINTF((NULL, "%s, speed=%jd, nports=%d\n",
+ lacp_format_lagid_aggregator(la, buf, sizeof(buf)),
+ speed, la->la_nports));
+
+ /* This aggregator is chosen if
+ * the partner has a better system priority
+ * or, the total aggregated speed is higher
+ * or, it is already the chosen aggregator
+ */
+ if ((best_la != NULL && LACP_SYS_PRI(la->la_partner) <
+ LACP_SYS_PRI(best_la->la_partner)) ||
+ speed > best_speed ||
+ (speed == best_speed &&
+ la == lsc->lsc_active_aggregator)) {
+ best_la = la;
+ best_speed = speed;
+ }
+ }
+
+ KASSERT(best_la == NULL || best_la->la_nports > 0,
+ ("invalid aggregator refcnt"));
+ KASSERT(best_la == NULL || !TAILQ_EMPTY(&best_la->la_ports),
+ ("invalid aggregator list"));
+
+#if defined(LACP_DEBUG)
+ if (lsc->lsc_active_aggregator != best_la) {
+ LACP_DPRINTF((NULL, "active aggregator changed\n"));
+ LACP_DPRINTF((NULL, "old %s\n",
+ lacp_format_lagid_aggregator(lsc->lsc_active_aggregator,
+ buf, sizeof(buf))));
+ } else {
+ LACP_DPRINTF((NULL, "active aggregator not changed\n"));
+ }
+ LACP_DPRINTF((NULL, "new %s\n",
+ lacp_format_lagid_aggregator(best_la, buf, sizeof(buf))));
+#endif /* defined(LACP_DEBUG) */
+
+ if (lsc->lsc_active_aggregator != best_la) {
+ sc->sc_ifp->if_baudrate = best_speed;
+ lsc->lsc_active_aggregator = best_la;
+ lacp_update_portmap(lsc);
+ if (best_la) {
+ lacp_suppress_distributing(lsc, best_la);
+ }
+ }
+}
+
+/*
+ * Updated the inactive portmap array with the new list of ports and
+ * make it live.
+ */
+static void
+lacp_update_portmap(struct lacp_softc *lsc)
+{
+ struct lacp_aggregator *la;
+ struct lacp_portmap *p;
+ struct lacp_port *lp;
+ u_int newmap;
+ int i;
+
+ newmap = lsc->lsc_activemap == 0 ? 1 : 0;
+ p = &lsc->lsc_pmap[newmap];
+ la = lsc->lsc_active_aggregator;
+ bzero(p, sizeof(struct lacp_portmap));
+
+ if (la != NULL && la->la_nports > 0) {
+ p->pm_count = la->la_nports;
+ i = 0;
+ TAILQ_FOREACH(lp, &la->la_ports, lp_dist_q)
+ p->pm_map[i++] = lp;
+ KASSERT(i == p->pm_count, ("Invalid port count"));
+ }
+
+ /* switch the active portmap over */
+ atomic_store_rel_int(&lsc->lsc_activemap, newmap);
+ LACP_DPRINTF((NULL, "Set table %d with %d ports\n",
+ lsc->lsc_activemap,
+ lsc->lsc_pmap[lsc->lsc_activemap].pm_count));
+}
+
+static uint16_t
+lacp_compose_key(struct lacp_port *lp)
+{
+ struct lagg_port *lgp = lp->lp_lagg;
+ struct lagg_softc *sc = lgp->lp_softc;
+ u_int media = lp->lp_media;
+ uint16_t key;
+
+ if ((lp->lp_state & LACP_STATE_AGGREGATION) == 0) {
+
+ /*
+ * non-aggregatable links should have unique keys.
+ *
+ * XXX this isn't really unique as if_index is 16 bit.
+ */
+
+ /* bit 0..14: (some bits of) if_index of this port */
+ key = lp->lp_ifp->if_index;
+ /* bit 15: 1 */
+ key |= 0x8000;
+ } else {
+ u_int subtype = IFM_SUBTYPE(media);
+
+ KASSERT(IFM_TYPE(media) == IFM_ETHER, ("invalid media type"));
+ KASSERT((media & IFM_FDX) != 0, ("aggregating HDX interface"));
+
+ /* bit 0..4: IFM_SUBTYPE */
+ key = subtype;
+ /* bit 5..14: (some bits of) if_index of lagg device */
+ key |= 0x7fe0 & ((sc->sc_ifp->if_index) << 5);
+ /* bit 15: 0 */
+ }
+ return (htons(key));
+}
+
+static void
+lacp_aggregator_addref(struct lacp_softc *lsc, struct lacp_aggregator *la)
+{
+#if defined(LACP_DEBUG)
+ char buf[LACP_LAGIDSTR_MAX+1];
+#endif
+
+ LACP_DPRINTF((NULL, "%s: lagid=%s, refcnt %d -> %d\n",
+ __func__,
+ lacp_format_lagid(&la->la_actor, &la->la_partner,
+ buf, sizeof(buf)),
+ la->la_refcnt, la->la_refcnt + 1));
+
+ KASSERT(la->la_refcnt > 0, ("refcount <= 0"));
+ la->la_refcnt++;
+ KASSERT(la->la_refcnt > la->la_nports, ("invalid refcount"));
+}
+
+static void
+lacp_aggregator_delref(struct lacp_softc *lsc, struct lacp_aggregator *la)
+{
+#if defined(LACP_DEBUG)
+ char buf[LACP_LAGIDSTR_MAX+1];
+#endif
+
+ LACP_DPRINTF((NULL, "%s: lagid=%s, refcnt %d -> %d\n",
+ __func__,
+ lacp_format_lagid(&la->la_actor, &la->la_partner,
+ buf, sizeof(buf)),
+ la->la_refcnt, la->la_refcnt - 1));
+
+ KASSERT(la->la_refcnt > la->la_nports, ("invalid refcnt"));
+ la->la_refcnt--;
+ if (la->la_refcnt > 0) {
+ return;
+ }
+
+ KASSERT(la->la_refcnt == 0, ("refcount not zero"));
+ KASSERT(lsc->lsc_active_aggregator != la, ("aggregator active"));
+
+ TAILQ_REMOVE(&lsc->lsc_aggregators, la, la_q);
+
+ free(la, M_DEVBUF);
+}
+
+/*
+ * lacp_aggregator_get: allocate an aggregator.
+ */
+
+static struct lacp_aggregator *
+lacp_aggregator_get(struct lacp_softc *lsc, struct lacp_port *lp)
+{
+ struct lacp_aggregator *la;
+
+ la = malloc(sizeof(*la), M_DEVBUF, M_NOWAIT);
+ if (la) {
+ la->la_refcnt = 1;
+ la->la_nports = 0;
+ TAILQ_INIT(&la->la_ports);
+ la->la_pending = 0;
+ TAILQ_INSERT_TAIL(&lsc->lsc_aggregators, la, la_q);
+ }
+
+ return (la);
+}
+
+/*
+ * lacp_fill_aggregator_id: setup a newly allocated aggregator from a port.
+ */
+
+static void
+lacp_fill_aggregator_id(struct lacp_aggregator *la, const struct lacp_port *lp)
+{
+ lacp_fill_aggregator_id_peer(&la->la_partner, &lp->lp_partner);
+ lacp_fill_aggregator_id_peer(&la->la_actor, &lp->lp_actor);
+
+ la->la_actor.lip_state = lp->lp_state & LACP_STATE_AGGREGATION;
+}
+
+static void
+lacp_fill_aggregator_id_peer(struct lacp_peerinfo *lpi_aggr,
+ const struct lacp_peerinfo *lpi_port)
+{
+ memset(lpi_aggr, 0, sizeof(*lpi_aggr));
+ lpi_aggr->lip_systemid = lpi_port->lip_systemid;
+ lpi_aggr->lip_key = lpi_port->lip_key;
+}
+
+/*
+ * lacp_aggregator_is_compatible: check if a port can join to an aggregator.
+ */
+
+static int
+lacp_aggregator_is_compatible(const struct lacp_aggregator *la,
+ const struct lacp_port *lp)
+{
+ if (!(lp->lp_state & LACP_STATE_AGGREGATION) ||
+ !(lp->lp_partner.lip_state & LACP_STATE_AGGREGATION)) {
+ return (0);
+ }
+
+ if (!(la->la_actor.lip_state & LACP_STATE_AGGREGATION)) {
+ return (0);
+ }
+
+ if (!lacp_peerinfo_is_compatible(&la->la_partner, &lp->lp_partner)) {
+ return (0);
+ }
+
+ if (!lacp_peerinfo_is_compatible(&la->la_actor, &lp->lp_actor)) {
+ return (0);
+ }
+
+ return (1);
+}
+
+static int
+lacp_peerinfo_is_compatible(const struct lacp_peerinfo *a,
+ const struct lacp_peerinfo *b)
+{
+ if (memcmp(&a->lip_systemid, &b->lip_systemid,
+ sizeof(a->lip_systemid))) {
+ return (0);
+ }
+
+ if (memcmp(&a->lip_key, &b->lip_key, sizeof(a->lip_key))) {
+ return (0);
+ }
+
+ return (1);
+}
+
+static void
+lacp_port_enable(struct lacp_port *lp)
+{
+ lp->lp_state |= LACP_STATE_AGGREGATION;
+}
+
+static void
+lacp_port_disable(struct lacp_port *lp)
+{
+ lacp_set_mux(lp, LACP_MUX_DETACHED);
+
+ lp->lp_state &= ~LACP_STATE_AGGREGATION;
+ lp->lp_selected = LACP_UNSELECTED;
+ lacp_sm_rx_record_default(lp);
+ lp->lp_partner.lip_state &= ~LACP_STATE_AGGREGATION;
+ lp->lp_state &= ~LACP_STATE_EXPIRED;
+}
+
+/*
+ * lacp_select: select an aggregator. create one if necessary.
+ */
+static void
+lacp_select(struct lacp_port *lp)
+{
+ struct lacp_softc *lsc = lp->lp_lsc;
+ struct lacp_aggregator *la;
+#if defined(LACP_DEBUG)
+ char buf[LACP_LAGIDSTR_MAX+1];
+#endif
+
+ if (lp->lp_aggregator) {
+ return;
+ }
+
+ KASSERT(!LACP_TIMER_ISARMED(lp, LACP_TIMER_WAIT_WHILE),
+ ("timer_wait_while still active"));
+
+ LACP_DPRINTF((lp, "port lagid=%s\n",
+ lacp_format_lagid(&lp->lp_actor, &lp->lp_partner,
+ buf, sizeof(buf))));
+
+ TAILQ_FOREACH(la, &lsc->lsc_aggregators, la_q) {
+ if (lacp_aggregator_is_compatible(la, lp)) {
+ break;
+ }
+ }
+
+ if (la == NULL) {
+ la = lacp_aggregator_get(lsc, lp);
+ if (la == NULL) {
+ LACP_DPRINTF((lp, "aggregator creation failed\n"));
+
+ /*
+ * will retry on the next tick.
+ */
+
+ return;
+ }
+ lacp_fill_aggregator_id(la, lp);
+ LACP_DPRINTF((lp, "aggregator created\n"));
+ } else {
+ LACP_DPRINTF((lp, "compatible aggregator found\n"));
+ if (la->la_refcnt == LACP_MAX_PORTS)
+ return;
+ lacp_aggregator_addref(lsc, la);
+ }
+
+ LACP_DPRINTF((lp, "aggregator lagid=%s\n",
+ lacp_format_lagid(&la->la_actor, &la->la_partner,
+ buf, sizeof(buf))));
+
+ lp->lp_aggregator = la;
+ lp->lp_selected = LACP_SELECTED;
+}
+
+/*
+ * lacp_unselect: finish unselect/detach process.
+ */
+
+static void
+lacp_unselect(struct lacp_port *lp)
+{
+ struct lacp_softc *lsc = lp->lp_lsc;
+ struct lacp_aggregator *la = lp->lp_aggregator;
+
+ KASSERT(!LACP_TIMER_ISARMED(lp, LACP_TIMER_WAIT_WHILE),
+ ("timer_wait_while still active"));
+
+ if (la == NULL) {
+ return;
+ }
+
+ lp->lp_aggregator = NULL;
+ lacp_aggregator_delref(lsc, la);
+}
+
+/* mux machine */
+
+static void
+lacp_sm_mux(struct lacp_port *lp)
+{
+ enum lacp_mux_state new_state;
+ boolean_t p_sync =
+ (lp->lp_partner.lip_state & LACP_STATE_SYNC) != 0;
+ boolean_t p_collecting =
+ (lp->lp_partner.lip_state & LACP_STATE_COLLECTING) != 0;
+ enum lacp_selected selected = lp->lp_selected;
+ struct lacp_aggregator *la;
+
+ /* LACP_DPRINTF((lp, "%s: state %d\n", __func__, lp->lp_mux_state)); */
+
+re_eval:
+ la = lp->lp_aggregator;
+ KASSERT(lp->lp_mux_state == LACP_MUX_DETACHED || la != NULL,
+ ("MUX not detached"));
+ new_state = lp->lp_mux_state;
+ switch (lp->lp_mux_state) {
+ case LACP_MUX_DETACHED:
+ if (selected != LACP_UNSELECTED) {
+ new_state = LACP_MUX_WAITING;
+ }
+ break;
+ case LACP_MUX_WAITING:
+ KASSERT(la->la_pending > 0 ||
+ !LACP_TIMER_ISARMED(lp, LACP_TIMER_WAIT_WHILE),
+ ("timer_wait_while still active"));
+ if (selected == LACP_SELECTED && la->la_pending == 0) {
+ new_state = LACP_MUX_ATTACHED;
+ } else if (selected == LACP_UNSELECTED) {
+ new_state = LACP_MUX_DETACHED;
+ }
+ break;
+ case LACP_MUX_ATTACHED:
+ if (selected == LACP_SELECTED && p_sync) {
+ new_state = LACP_MUX_COLLECTING;
+ } else if (selected != LACP_SELECTED) {
+ new_state = LACP_MUX_DETACHED;
+ }
+ break;
+ case LACP_MUX_COLLECTING:
+ if (selected == LACP_SELECTED && p_sync && p_collecting) {
+ new_state = LACP_MUX_DISTRIBUTING;
+ } else if (selected != LACP_SELECTED || !p_sync) {
+ new_state = LACP_MUX_ATTACHED;
+ }
+ break;
+ case LACP_MUX_DISTRIBUTING:
+ if (selected != LACP_SELECTED || !p_sync || !p_collecting) {
+ new_state = LACP_MUX_COLLECTING;
+ }
+ break;
+ default:
+ panic("%s: unknown state", __func__);
+ }
+
+ if (lp->lp_mux_state == new_state) {
+ return;
+ }
+
+ lacp_set_mux(lp, new_state);
+ goto re_eval;
+}
+
+static void
+lacp_set_mux(struct lacp_port *lp, enum lacp_mux_state new_state)
+{
+ struct lacp_aggregator *la = lp->lp_aggregator;
+
+ if (lp->lp_mux_state == new_state) {
+ return;
+ }
+
+ switch (new_state) {
+ case LACP_MUX_DETACHED:
+ lp->lp_state &= ~LACP_STATE_SYNC;
+ lacp_disable_distributing(lp);
+ lacp_disable_collecting(lp);
+ lacp_sm_assert_ntt(lp);
+ /* cancel timer */
+ if (LACP_TIMER_ISARMED(lp, LACP_TIMER_WAIT_WHILE)) {
+ KASSERT(la->la_pending > 0,
+ ("timer_wait_while not active"));
+ la->la_pending--;
+ }
+ LACP_TIMER_DISARM(lp, LACP_TIMER_WAIT_WHILE);
+ lacp_unselect(lp);
+ break;
+ case LACP_MUX_WAITING:
+ LACP_TIMER_ARM(lp, LACP_TIMER_WAIT_WHILE,
+ LACP_AGGREGATE_WAIT_TIME);
+ la->la_pending++;
+ break;
+ case LACP_MUX_ATTACHED:
+ lp->lp_state |= LACP_STATE_SYNC;
+ lacp_disable_collecting(lp);
+ lacp_sm_assert_ntt(lp);
+ break;
+ case LACP_MUX_COLLECTING:
+ lacp_enable_collecting(lp);
+ lacp_disable_distributing(lp);
+ lacp_sm_assert_ntt(lp);
+ break;
+ case LACP_MUX_DISTRIBUTING:
+ lacp_enable_distributing(lp);
+ break;
+ default:
+ panic("%s: unknown state", __func__);
+ }
+
+ LACP_DPRINTF((lp, "mux_state %d -> %d\n", lp->lp_mux_state, new_state));
+
+ lp->lp_mux_state = new_state;
+}
+
+static void
+lacp_sm_mux_timer(struct lacp_port *lp)
+{
+ struct lacp_aggregator *la = lp->lp_aggregator;
+#if defined(LACP_DEBUG)
+ char buf[LACP_LAGIDSTR_MAX+1];
+#endif
+
+ KASSERT(la->la_pending > 0, ("no pending event"));
+
+ LACP_DPRINTF((lp, "%s: aggregator %s, pending %d -> %d\n", __func__,
+ lacp_format_lagid(&la->la_actor, &la->la_partner,
+ buf, sizeof(buf)),
+ la->la_pending, la->la_pending - 1));
+
+ la->la_pending--;
+}
+
+/* periodic transmit machine */
+
+static void
+lacp_sm_ptx_update_timeout(struct lacp_port *lp, uint8_t oldpstate)
+{
+ if (LACP_STATE_EQ(oldpstate, lp->lp_partner.lip_state,
+ LACP_STATE_TIMEOUT)) {
+ return;
+ }
+
+ LACP_DPRINTF((lp, "partner timeout changed\n"));
+
+ /*
+ * FAST_PERIODIC -> SLOW_PERIODIC
+ * or
+ * SLOW_PERIODIC (-> PERIODIC_TX) -> FAST_PERIODIC
+ *
+ * let lacp_sm_ptx_tx_schedule to update timeout.
+ */
+
+ LACP_TIMER_DISARM(lp, LACP_TIMER_PERIODIC);
+
+ /*
+ * if timeout has been shortened, assert NTT.
+ */
+
+ if ((lp->lp_partner.lip_state & LACP_STATE_TIMEOUT)) {
+ lacp_sm_assert_ntt(lp);
+ }
+}
+
+static void
+lacp_sm_ptx_tx_schedule(struct lacp_port *lp)
+{
+ int timeout;
+
+ if (!(lp->lp_state & LACP_STATE_ACTIVITY) &&
+ !(lp->lp_partner.lip_state & LACP_STATE_ACTIVITY)) {
+
+ /*
+ * NO_PERIODIC
+ */
+
+ LACP_TIMER_DISARM(lp, LACP_TIMER_PERIODIC);
+ return;
+ }
+
+ if (LACP_TIMER_ISARMED(lp, LACP_TIMER_PERIODIC)) {
+ return;
+ }
+
+ timeout = (lp->lp_partner.lip_state & LACP_STATE_TIMEOUT) ?
+ LACP_FAST_PERIODIC_TIME : LACP_SLOW_PERIODIC_TIME;
+
+ LACP_TIMER_ARM(lp, LACP_TIMER_PERIODIC, timeout);
+}
+
+static void
+lacp_sm_ptx_timer(struct lacp_port *lp)
+{
+ lacp_sm_assert_ntt(lp);
+}
+
+static void
+lacp_sm_rx(struct lacp_port *lp, const struct lacpdu *du)
+{
+ int timeout;
+
+ /*
+ * check LACP_DISABLED first
+ */
+
+ if (!(lp->lp_state & LACP_STATE_AGGREGATION)) {
+ return;
+ }
+
+ /*
+ * check loopback condition.
+ */
+
+ if (!lacp_compare_systemid(&du->ldu_actor.lip_systemid,
+ &lp->lp_actor.lip_systemid)) {
+ return;
+ }
+
+ /*
+ * EXPIRED, DEFAULTED, CURRENT -> CURRENT
+ */
+
+ lacp_sm_rx_update_selected(lp, du);
+ lacp_sm_rx_update_ntt(lp, du);
+ lacp_sm_rx_record_pdu(lp, du);
+
+ timeout = (lp->lp_state & LACP_STATE_TIMEOUT) ?
+ LACP_SHORT_TIMEOUT_TIME : LACP_LONG_TIMEOUT_TIME;
+ LACP_TIMER_ARM(lp, LACP_TIMER_CURRENT_WHILE, timeout);
+
+ lp->lp_state &= ~LACP_STATE_EXPIRED;
+
+ /*
+ * kick transmit machine without waiting the next tick.
+ */
+
+ lacp_sm_tx(lp);
+}
+
+static void
+lacp_sm_rx_set_expired(struct lacp_port *lp)
+{
+ lp->lp_partner.lip_state &= ~LACP_STATE_SYNC;
+ lp->lp_partner.lip_state |= LACP_STATE_TIMEOUT;
+ LACP_TIMER_ARM(lp, LACP_TIMER_CURRENT_WHILE, LACP_SHORT_TIMEOUT_TIME);
+ lp->lp_state |= LACP_STATE_EXPIRED;
+}
+
+static void
+lacp_sm_rx_timer(struct lacp_port *lp)
+{
+ if ((lp->lp_state & LACP_STATE_EXPIRED) == 0) {
+ /* CURRENT -> EXPIRED */
+ LACP_DPRINTF((lp, "%s: CURRENT -> EXPIRED\n", __func__));
+ lacp_sm_rx_set_expired(lp);
+ } else {
+ /* EXPIRED -> DEFAULTED */
+ LACP_DPRINTF((lp, "%s: EXPIRED -> DEFAULTED\n", __func__));
+ lacp_sm_rx_update_default_selected(lp);
+ lacp_sm_rx_record_default(lp);
+ lp->lp_state &= ~LACP_STATE_EXPIRED;
+ }
+}
+
+static void
+lacp_sm_rx_record_pdu(struct lacp_port *lp, const struct lacpdu *du)
+{
+ boolean_t active;
+ uint8_t oldpstate;
+#if defined(LACP_DEBUG)
+ char buf[LACP_STATESTR_MAX+1];
+#endif
+
+ /* LACP_DPRINTF((lp, "%s\n", __func__)); */
+
+ oldpstate = lp->lp_partner.lip_state;
+
+ active = (du->ldu_actor.lip_state & LACP_STATE_ACTIVITY)
+ || ((lp->lp_state & LACP_STATE_ACTIVITY) &&
+ (du->ldu_partner.lip_state & LACP_STATE_ACTIVITY));
+
+ lp->lp_partner = du->ldu_actor;
+ if (active &&
+ ((LACP_STATE_EQ(lp->lp_state, du->ldu_partner.lip_state,
+ LACP_STATE_AGGREGATION) &&
+ !lacp_compare_peerinfo(&lp->lp_actor, &du->ldu_partner))
+ || (du->ldu_partner.lip_state & LACP_STATE_AGGREGATION) == 0)) {
+ /* XXX nothing? */
+ } else {
+ lp->lp_partner.lip_state &= ~LACP_STATE_SYNC;
+ }
+
+ lp->lp_state &= ~LACP_STATE_DEFAULTED;
+
+ if (oldpstate != lp->lp_partner.lip_state) {
+ LACP_DPRINTF((lp, "old pstate %s\n",
+ lacp_format_state(oldpstate, buf, sizeof(buf))));
+ LACP_DPRINTF((lp, "new pstate %s\n",
+ lacp_format_state(lp->lp_partner.lip_state, buf,
+ sizeof(buf))));
+ }
+
+ lacp_sm_ptx_update_timeout(lp, oldpstate);
+}
+
+static void
+lacp_sm_rx_update_ntt(struct lacp_port *lp, const struct lacpdu *du)
+{
+ /* LACP_DPRINTF((lp, "%s\n", __func__)); */
+
+ if (lacp_compare_peerinfo(&lp->lp_actor, &du->ldu_partner) ||
+ !LACP_STATE_EQ(lp->lp_state, du->ldu_partner.lip_state,
+ LACP_STATE_ACTIVITY | LACP_STATE_SYNC | LACP_STATE_AGGREGATION)) {
+ LACP_DPRINTF((lp, "%s: assert ntt\n", __func__));
+ lacp_sm_assert_ntt(lp);
+ }
+}
+
+static void
+lacp_sm_rx_record_default(struct lacp_port *lp)
+{
+ uint8_t oldpstate;
+
+ /* LACP_DPRINTF((lp, "%s\n", __func__)); */
+
+ oldpstate = lp->lp_partner.lip_state;
+ lp->lp_partner = lacp_partner_admin;
+ lp->lp_state |= LACP_STATE_DEFAULTED;
+ lacp_sm_ptx_update_timeout(lp, oldpstate);
+}
+
+static void
+lacp_sm_rx_update_selected_from_peerinfo(struct lacp_port *lp,
+ const struct lacp_peerinfo *info)
+{
+ /* LACP_DPRINTF((lp, "%s\n", __func__)); */
+
+ if (lacp_compare_peerinfo(&lp->lp_partner, info) ||
+ !LACP_STATE_EQ(lp->lp_partner.lip_state, info->lip_state,
+ LACP_STATE_AGGREGATION)) {
+ lp->lp_selected = LACP_UNSELECTED;
+ /* mux machine will clean up lp->lp_aggregator */
+ }
+}
+
+static void
+lacp_sm_rx_update_selected(struct lacp_port *lp, const struct lacpdu *du)
+{
+ /* LACP_DPRINTF((lp, "%s\n", __func__)); */
+
+ lacp_sm_rx_update_selected_from_peerinfo(lp, &du->ldu_actor);
+}
+
+static void
+lacp_sm_rx_update_default_selected(struct lacp_port *lp)
+{
+ /* LACP_DPRINTF((lp, "%s\n", __func__)); */
+
+ lacp_sm_rx_update_selected_from_peerinfo(lp, &lacp_partner_admin);
+}
+
+/* transmit machine */
+
+static void
+lacp_sm_tx(struct lacp_port *lp)
+{
+ int error;
+
+ if (!(lp->lp_state & LACP_STATE_AGGREGATION)
+#if 1
+ || (!(lp->lp_state & LACP_STATE_ACTIVITY)
+ && !(lp->lp_partner.lip_state & LACP_STATE_ACTIVITY))
+#endif
+ ) {
+ lp->lp_flags &= ~LACP_PORT_NTT;
+ }
+
+ if (!(lp->lp_flags & LACP_PORT_NTT)) {
+ return;
+ }
+
+ /* Rate limit to 3 PDUs per LACP_FAST_PERIODIC_TIME */
+ if (ppsratecheck(&lp->lp_last_lacpdu, &lp->lp_lacpdu_sent,
+ (3 / LACP_FAST_PERIODIC_TIME)) == 0) {
+ LACP_DPRINTF((lp, "rate limited pdu\n"));
+ return;
+ }
+
+ error = lacp_xmit_lacpdu(lp);
+
+ if (error == 0) {
+ lp->lp_flags &= ~LACP_PORT_NTT;
+ } else {
+ LACP_DPRINTF((lp, "lacpdu transmit failure, error %d\n",
+ error));
+ }
+}
+
+static void
+lacp_sm_assert_ntt(struct lacp_port *lp)
+{
+
+ lp->lp_flags |= LACP_PORT_NTT;
+}
+
+static void
+lacp_run_timers(struct lacp_port *lp)
+{
+ int i;
+
+ for (i = 0; i < LACP_NTIMER; i++) {
+ KASSERT(lp->lp_timer[i] >= 0,
+ ("invalid timer value %d", lp->lp_timer[i]));
+ if (lp->lp_timer[i] == 0) {
+ continue;
+ } else if (--lp->lp_timer[i] <= 0) {
+ if (lacp_timer_funcs[i]) {
+ (*lacp_timer_funcs[i])(lp);
+ }
+ }
+ }
+}
+
+int
+lacp_marker_input(struct lacp_port *lp, struct mbuf *m)
+{
+ struct lacp_softc *lsc = lp->lp_lsc;
+ struct lagg_port *lgp = lp->lp_lagg;
+ struct lacp_port *lp2;
+ struct markerdu *mdu;
+ int error = 0;
+ int pending = 0;
+
+ if (m->m_pkthdr.len != sizeof(*mdu)) {
+ goto bad;
+ }
+
+ if ((m->m_flags & M_MCAST) == 0) {
+ goto bad;
+ }
+
+ if (m->m_len < sizeof(*mdu)) {
+ m = m_pullup(m, sizeof(*mdu));
+ if (m == NULL) {
+ return (ENOMEM);
+ }
+ }
+
+ mdu = mtod(m, struct markerdu *);
+
+ if (memcmp(&mdu->mdu_eh.ether_dhost,
+ &ethermulticastaddr_slowprotocols, ETHER_ADDR_LEN)) {
+ goto bad;
+ }
+
+ if (mdu->mdu_sph.sph_version != 1) {
+ goto bad;
+ }
+
+ switch (mdu->mdu_tlv.tlv_type) {
+ case MARKER_TYPE_INFO:
+ if (tlv_check(mdu, sizeof(*mdu), &mdu->mdu_tlv,
+ marker_info_tlv_template, TRUE)) {
+ goto bad;
+ }
+ mdu->mdu_tlv.tlv_type = MARKER_TYPE_RESPONSE;
+ memcpy(&mdu->mdu_eh.ether_dhost,
+ &ethermulticastaddr_slowprotocols, ETHER_ADDR_LEN);
+ memcpy(&mdu->mdu_eh.ether_shost,
+ lgp->lp_lladdr, ETHER_ADDR_LEN);
+ error = lagg_enqueue(lp->lp_ifp, m);
+ break;
+
+ case MARKER_TYPE_RESPONSE:
+ if (tlv_check(mdu, sizeof(*mdu), &mdu->mdu_tlv,
+ marker_response_tlv_template, TRUE)) {
+ goto bad;
+ }
+ LACP_DPRINTF((lp, "marker response, port=%u, sys=%6D, id=%u\n",
+ ntohs(mdu->mdu_info.mi_rq_port), mdu->mdu_info.mi_rq_system,
+ ":", ntohl(mdu->mdu_info.mi_rq_xid)));
+
+ /* Verify that it is the last marker we sent out */
+ if (memcmp(&mdu->mdu_info, &lp->lp_marker,
+ sizeof(struct lacp_markerinfo)))
+ goto bad;
+
+ LACP_LOCK(lsc);
+ lp->lp_flags &= ~LACP_PORT_MARK;
+
+ if (lsc->lsc_suppress_distributing) {
+ /* Check if any ports are waiting for a response */
+ LIST_FOREACH(lp2, &lsc->lsc_ports, lp_next) {
+ if (lp2->lp_flags & LACP_PORT_MARK) {
+ pending = 1;
+ break;
+ }
+ }
+
+ if (pending == 0) {
+ /* All interface queues are clear */
+ LACP_DPRINTF((NULL, "queue flush complete\n"));
+ lsc->lsc_suppress_distributing = FALSE;
+ }
+ }
+ LACP_UNLOCK(lsc);
+ m_freem(m);
+ break;
+
+ default:
+ goto bad;
+ }
+
+ return (error);
+
+bad:
+ LACP_DPRINTF((lp, "bad marker frame\n"));
+ m_freem(m);
+ return (EINVAL);
+}
+
+static int
+tlv_check(const void *p, size_t size, const struct tlvhdr *tlv,
+ const struct tlv_template *tmpl, boolean_t check_type)
+{
+ while (/* CONSTCOND */ 1) {
+ if ((const char *)tlv - (const char *)p + sizeof(*tlv) > size) {
+ return (EINVAL);
+ }
+ if ((check_type && tlv->tlv_type != tmpl->tmpl_type) ||
+ tlv->tlv_length != tmpl->tmpl_length) {
+ return (EINVAL);
+ }
+ if (tmpl->tmpl_type == 0) {
+ break;
+ }
+ tlv = (const struct tlvhdr *)
+ ((const char *)tlv + tlv->tlv_length);
+ tmpl++;
+ }
+
+ return (0);
+}
+
+#if defined(LACP_DEBUG)
+const char *
+lacp_format_mac(const uint8_t *mac, char *buf, size_t buflen)
+{
+ snprintf(buf, buflen, "%02X-%02X-%02X-%02X-%02X-%02X",
+ (int)mac[0],
+ (int)mac[1],
+ (int)mac[2],
+ (int)mac[3],
+ (int)mac[4],
+ (int)mac[5]);
+
+ return (buf);
+}
+
+const char *
+lacp_format_systemid(const struct lacp_systemid *sysid,
+ char *buf, size_t buflen)
+{
+ char macbuf[LACP_MACSTR_MAX+1];
+
+ snprintf(buf, buflen, "%04X,%s",
+ ntohs(sysid->lsi_prio),
+ lacp_format_mac(sysid->lsi_mac, macbuf, sizeof(macbuf)));
+
+ return (buf);
+}
+
+const char *
+lacp_format_portid(const struct lacp_portid *portid, char *buf, size_t buflen)
+{
+ snprintf(buf, buflen, "%04X,%04X",
+ ntohs(portid->lpi_prio),
+ ntohs(portid->lpi_portno));
+
+ return (buf);
+}
+
+const char *
+lacp_format_partner(const struct lacp_peerinfo *peer, char *buf, size_t buflen)
+{
+ char sysid[LACP_SYSTEMIDSTR_MAX+1];
+ char portid[LACP_PORTIDSTR_MAX+1];
+
+ snprintf(buf, buflen, "(%s,%04X,%s)",
+ lacp_format_systemid(&peer->lip_systemid, sysid, sizeof(sysid)),
+ ntohs(peer->lip_key),
+ lacp_format_portid(&peer->lip_portid, portid, sizeof(portid)));
+
+ return (buf);
+}
+
+const char *
+lacp_format_lagid(const struct lacp_peerinfo *a,
+ const struct lacp_peerinfo *b, char *buf, size_t buflen)
+{
+ char astr[LACP_PARTNERSTR_MAX+1];
+ char bstr[LACP_PARTNERSTR_MAX+1];
+
+#if 0
+ /*
+ * there's a convention to display small numbered peer
+ * in the left.
+ */
+
+ if (lacp_compare_peerinfo(a, b) > 0) {
+ const struct lacp_peerinfo *t;
+
+ t = a;
+ a = b;
+ b = t;
+ }
+#endif
+
+ snprintf(buf, buflen, "[%s,%s]",
+ lacp_format_partner(a, astr, sizeof(astr)),
+ lacp_format_partner(b, bstr, sizeof(bstr)));
+
+ return (buf);
+}
+
+const char *
+lacp_format_lagid_aggregator(const struct lacp_aggregator *la,
+ char *buf, size_t buflen)
+{
+ if (la == NULL) {
+ return ("(none)");
+ }
+
+ return (lacp_format_lagid(&la->la_actor, &la->la_partner, buf, buflen));
+}
+
+const char *
+lacp_format_state(uint8_t state, char *buf, size_t buflen)
+{
+ snprintf(buf, buflen, "%b", state, LACP_STATE_BITS);
+ return (buf);
+}
+
+static void
+lacp_dump_lacpdu(const struct lacpdu *du)
+{
+ char buf[LACP_PARTNERSTR_MAX+1];
+ char buf2[LACP_STATESTR_MAX+1];
+
+ printf("actor=%s\n",
+ lacp_format_partner(&du->ldu_actor, buf, sizeof(buf)));
+ printf("actor.state=%s\n",
+ lacp_format_state(du->ldu_actor.lip_state, buf2, sizeof(buf2)));
+ printf("partner=%s\n",
+ lacp_format_partner(&du->ldu_partner, buf, sizeof(buf)));
+ printf("partner.state=%s\n",
+ lacp_format_state(du->ldu_partner.lip_state, buf2, sizeof(buf2)));
+
+ printf("maxdelay=%d\n", ntohs(du->ldu_collector.lci_maxdelay));
+}
+
+static void
+lacp_dprintf(const struct lacp_port *lp, const char *fmt, ...)
+{
+ va_list va;
+
+ if (lp) {
+ printf("%s: ", lp->lp_ifp->if_xname);
+ }
+
+ va_start(va, fmt);
+ vprintf(fmt, va);
+ va_end(va);
+}
+#endif
diff --git a/rtems/freebsd/net/ieee8023ad_lacp.h b/rtems/freebsd/net/ieee8023ad_lacp.h
new file mode 100644
index 00000000..9cebc591
--- /dev/null
+++ b/rtems/freebsd/net/ieee8023ad_lacp.h
@@ -0,0 +1,333 @@
+/* $NetBSD: ieee8023ad_impl.h,v 1.2 2005/12/10 23:21:39 elad Exp $ */
+
+/*-
+ * Copyright (c)2005 YAMAMOTO Takashi,
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+/*
+ * IEEE802.3ad LACP
+ *
+ * implementation details.
+ */
+
+#define LACP_TIMER_CURRENT_WHILE 0
+#define LACP_TIMER_PERIODIC 1
+#define LACP_TIMER_WAIT_WHILE 2
+#define LACP_NTIMER 3
+
+#define LACP_TIMER_ARM(port, timer, val) \
+ (port)->lp_timer[(timer)] = (val)
+#define LACP_TIMER_DISARM(port, timer) \
+ (port)->lp_timer[(timer)] = 0
+#define LACP_TIMER_ISARMED(port, timer) \
+ ((port)->lp_timer[(timer)] > 0)
+
+/*
+ * IEEE802.3ad LACP
+ *
+ * protocol definitions.
+ */
+
+#define LACP_STATE_ACTIVITY (1<<0)
+#define LACP_STATE_TIMEOUT (1<<1)
+#define LACP_STATE_AGGREGATION (1<<2)
+#define LACP_STATE_SYNC (1<<3)
+#define LACP_STATE_COLLECTING (1<<4)
+#define LACP_STATE_DISTRIBUTING (1<<5)
+#define LACP_STATE_DEFAULTED (1<<6)
+#define LACP_STATE_EXPIRED (1<<7)
+
+#define LACP_PORT_NTT 0x00000001
+#define LACP_PORT_MARK 0x00000002
+
+#define LACP_STATE_BITS \
+ "\020" \
+ "\001ACTIVITY" \
+ "\002TIMEOUT" \
+ "\003AGGREGATION" \
+ "\004SYNC" \
+ "\005COLLECTING" \
+ "\006DISTRIBUTING" \
+ "\007DEFAULTED" \
+ "\010EXPIRED"
+
+/*
+ * IEEE802.3 slow protocols
+ *
+ * protocol (on-wire) definitions.
+ *
+ * XXX should be elsewhere.
+ */
+
+#define SLOWPROTOCOLS_SUBTYPE_LACP 1
+#define SLOWPROTOCOLS_SUBTYPE_MARKER 2
+
+struct slowprothdr {
+ uint8_t sph_subtype;
+ uint8_t sph_version;
+} __packed;
+
+/*
+ * TLV on-wire structure.
+ */
+
+struct tlvhdr {
+ uint8_t tlv_type;
+ uint8_t tlv_length;
+ /* uint8_t tlv_value[]; */
+} __packed;
+
+/*
+ * ... and our implementation.
+ */
+
+#define TLV_SET(tlv, type, length) \
+ do { \
+ (tlv)->tlv_type = (type); \
+ (tlv)->tlv_length = sizeof(*tlv) + (length); \
+ } while (/*CONSTCOND*/0)
+
+struct tlv_template {
+ uint8_t tmpl_type;
+ uint8_t tmpl_length;
+};
+
+struct lacp_systemid {
+ uint16_t lsi_prio;
+ uint8_t lsi_mac[6];
+} __packed;
+
+struct lacp_portid {
+ uint16_t lpi_prio;
+ uint16_t lpi_portno;
+} __packed;
+
+struct lacp_peerinfo {
+ struct lacp_systemid lip_systemid;
+ uint16_t lip_key;
+ struct lacp_portid lip_portid;
+ uint8_t lip_state;
+ uint8_t lip_resv[3];
+} __packed;
+
+struct lacp_collectorinfo {
+ uint16_t lci_maxdelay;
+ uint8_t lci_resv[12];
+} __packed;
+
+struct lacpdu {
+ struct ether_header ldu_eh;
+ struct slowprothdr ldu_sph;
+
+ struct tlvhdr ldu_tlv_actor;
+ struct lacp_peerinfo ldu_actor;
+ struct tlvhdr ldu_tlv_partner;
+ struct lacp_peerinfo ldu_partner;
+ struct tlvhdr ldu_tlv_collector;
+ struct lacp_collectorinfo ldu_collector;
+ struct tlvhdr ldu_tlv_term;
+ uint8_t ldu_resv[50];
+} __packed;
+
+/*
+ * IEEE802.3ad marker protocol
+ *
+ * protocol (on-wire) definitions.
+ */
+struct lacp_markerinfo {
+ uint16_t mi_rq_port;
+ uint8_t mi_rq_system[ETHER_ADDR_LEN];
+ uint32_t mi_rq_xid;
+ uint8_t mi_pad[2];
+} __packed;
+
+struct markerdu {
+ struct ether_header mdu_eh;
+ struct slowprothdr mdu_sph;
+
+ struct tlvhdr mdu_tlv;
+ struct lacp_markerinfo mdu_info;
+ struct tlvhdr mdu_tlv_term;
+ uint8_t mdu_resv[90];
+} __packed;
+
+#define MARKER_TYPE_INFO 0x01
+#define MARKER_TYPE_RESPONSE 0x02
+
+enum lacp_selected {
+ LACP_UNSELECTED,
+ LACP_STANDBY, /* not used in this implementation */
+ LACP_SELECTED,
+};
+
+enum lacp_mux_state {
+ LACP_MUX_DETACHED,
+ LACP_MUX_WAITING,
+ LACP_MUX_ATTACHED,
+ LACP_MUX_COLLECTING,
+ LACP_MUX_DISTRIBUTING,
+};
+
+#define LACP_MAX_PORTS 32
+
+struct lacp_portmap {
+ int pm_count;
+ struct lacp_port *pm_map[LACP_MAX_PORTS];
+};
+
+struct lacp_port {
+ TAILQ_ENTRY(lacp_port) lp_dist_q;
+ LIST_ENTRY(lacp_port) lp_next;
+ struct lacp_softc *lp_lsc;
+ struct lagg_port *lp_lagg;
+ struct ifnet *lp_ifp;
+ struct lacp_peerinfo lp_partner;
+ struct lacp_peerinfo lp_actor;
+ struct lacp_markerinfo lp_marker;
+#define lp_state lp_actor.lip_state
+#define lp_key lp_actor.lip_key
+#define lp_systemid lp_actor.lip_systemid
+ struct timeval lp_last_lacpdu;
+ int lp_lacpdu_sent;
+ enum lacp_mux_state lp_mux_state;
+ enum lacp_selected lp_selected;
+ int lp_flags;
+ u_int lp_media; /* XXX redundant */
+ int lp_timer[LACP_NTIMER];
+ struct ifmultiaddr *lp_ifma;
+
+ struct lacp_aggregator *lp_aggregator;
+};
+
+struct lacp_aggregator {
+ TAILQ_ENTRY(lacp_aggregator) la_q;
+ int la_refcnt; /* num of ports which selected us */
+ int la_nports; /* num of distributing ports */
+ TAILQ_HEAD(, lacp_port) la_ports; /* distributing ports */
+ struct lacp_peerinfo la_partner;
+ struct lacp_peerinfo la_actor;
+ int la_pending; /* number of ports in wait_while */
+};
+
+struct lacp_softc {
+ struct lagg_softc *lsc_softc;
+ struct mtx lsc_mtx;
+ struct lacp_aggregator *lsc_active_aggregator;
+ TAILQ_HEAD(, lacp_aggregator) lsc_aggregators;
+ boolean_t lsc_suppress_distributing;
+ struct callout lsc_transit_callout;
+ struct callout lsc_callout;
+ LIST_HEAD(, lacp_port) lsc_ports;
+ struct lacp_portmap lsc_pmap[2];
+ volatile u_int lsc_activemap;
+ u_int32_t lsc_hashkey;
+};
+
+#define LACP_TYPE_ACTORINFO 1
+#define LACP_TYPE_PARTNERINFO 2
+#define LACP_TYPE_COLLECTORINFO 3
+
+/* timeout values (in sec) */
+#define LACP_FAST_PERIODIC_TIME (1)
+#define LACP_SLOW_PERIODIC_TIME (30)
+#define LACP_SHORT_TIMEOUT_TIME (3 * LACP_FAST_PERIODIC_TIME)
+#define LACP_LONG_TIMEOUT_TIME (3 * LACP_SLOW_PERIODIC_TIME)
+#define LACP_CHURN_DETECTION_TIME (60)
+#define LACP_AGGREGATE_WAIT_TIME (2)
+#define LACP_TRANSIT_DELAY 3000 /* in msec */
+
+#define LACP_STATE_EQ(s1, s2, mask) \
+ ((((s1) ^ (s2)) & (mask)) == 0)
+
+#define LACP_SYS_PRI(peer) (peer).lip_systemid.lsi_prio
+
+#define LACP_PORT(_lp) ((struct lacp_port *)(_lp)->lp_psc)
+#define LACP_SOFTC(_sc) ((struct lacp_softc *)(_sc)->sc_psc)
+
+#define LACP_LOCK_INIT(_lsc) mtx_init(&(_lsc)->lsc_mtx, \
+ "lacp mtx", NULL, MTX_DEF)
+#define LACP_LOCK_DESTROY(_lsc) mtx_destroy(&(_lsc)->lsc_mtx)
+#define LACP_LOCK(_lsc) mtx_lock(&(_lsc)->lsc_mtx)
+#define LACP_UNLOCK(_lsc) mtx_unlock(&(_lsc)->lsc_mtx)
+#define LACP_LOCK_ASSERT(_lsc) mtx_assert(&(_lsc)->lsc_mtx, MA_OWNED)
+
+struct mbuf *lacp_input(struct lagg_port *, struct mbuf *);
+struct lagg_port *lacp_select_tx_port(struct lagg_softc *, struct mbuf *);
+int lacp_attach(struct lagg_softc *);
+int lacp_detach(struct lagg_softc *);
+void lacp_init(struct lagg_softc *);
+void lacp_stop(struct lagg_softc *);
+int lacp_port_create(struct lagg_port *);
+void lacp_port_destroy(struct lagg_port *);
+void lacp_linkstate(struct lagg_port *);
+void lacp_req(struct lagg_softc *, caddr_t);
+void lacp_portreq(struct lagg_port *, caddr_t);
+
+static __inline int
+lacp_isactive(struct lagg_port *lgp)
+{
+ struct lacp_port *lp = LACP_PORT(lgp);
+ struct lacp_softc *lsc = lp->lp_lsc;
+ struct lacp_aggregator *la = lp->lp_aggregator;
+
+ /* This port is joined to the active aggregator */
+ if (la != NULL && la == lsc->lsc_active_aggregator)
+ return (1);
+
+ return (0);
+}
+
+static __inline int
+lacp_iscollecting(struct lagg_port *lgp)
+{
+ struct lacp_port *lp = LACP_PORT(lgp);
+
+ return ((lp->lp_state & LACP_STATE_COLLECTING) != 0);
+}
+
+static __inline int
+lacp_isdistributing(struct lagg_port *lgp)
+{
+ struct lacp_port *lp = LACP_PORT(lgp);
+
+ return ((lp->lp_state & LACP_STATE_DISTRIBUTING) != 0);
+}
+
+/* following constants don't include terminating NUL */
+#define LACP_MACSTR_MAX (2*6 + 5)
+#define LACP_SYSTEMPRIOSTR_MAX (4)
+#define LACP_SYSTEMIDSTR_MAX (LACP_SYSTEMPRIOSTR_MAX + 1 + LACP_MACSTR_MAX)
+#define LACP_PORTPRIOSTR_MAX (4)
+#define LACP_PORTNOSTR_MAX (4)
+#define LACP_PORTIDSTR_MAX (LACP_PORTPRIOSTR_MAX + 1 + LACP_PORTNOSTR_MAX)
+#define LACP_KEYSTR_MAX (4)
+#define LACP_PARTNERSTR_MAX \
+ (1 + LACP_SYSTEMIDSTR_MAX + 1 + LACP_KEYSTR_MAX + 1 \
+ + LACP_PORTIDSTR_MAX + 1)
+#define LACP_LAGIDSTR_MAX \
+ (1 + LACP_PARTNERSTR_MAX + 1 + LACP_PARTNERSTR_MAX + 1)
+#define LACP_STATESTR_MAX (255) /* XXX */
diff --git a/rtems/freebsd/net/if.c b/rtems/freebsd/net/if.c
new file mode 100644
index 00000000..51e1511f
--- /dev/null
+++ b/rtems/freebsd/net/if.c
@@ -0,0 +1,3431 @@
+#include <rtems/freebsd/machine/rtems-bsd-config.h>
+
+/*-
+ * Copyright (c) 1980, 1986, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * @(#)if.c 8.5 (Berkeley) 1/9/95
+ * $FreeBSD$
+ */
+
+#include <rtems/freebsd/local/opt_compat.h>
+#include <rtems/freebsd/local/opt_inet6.h>
+#include <rtems/freebsd/local/opt_inet.h>
+
+#include <rtems/freebsd/sys/param.h>
+#include <rtems/freebsd/sys/types.h>
+#include <rtems/freebsd/sys/conf.h>
+#include <rtems/freebsd/sys/malloc.h>
+#include <rtems/freebsd/sys/sbuf.h>
+#include <rtems/freebsd/sys/bus.h>
+#include <rtems/freebsd/sys/mbuf.h>
+#include <rtems/freebsd/sys/systm.h>
+#include <rtems/freebsd/sys/priv.h>
+#include <rtems/freebsd/sys/proc.h>
+#include <rtems/freebsd/sys/socket.h>
+#include <rtems/freebsd/sys/socketvar.h>
+#include <rtems/freebsd/sys/protosw.h>
+#include <rtems/freebsd/sys/kernel.h>
+#include <rtems/freebsd/sys/lock.h>
+#include <rtems/freebsd/sys/refcount.h>
+#include <rtems/freebsd/sys/module.h>
+#include <rtems/freebsd/sys/rwlock.h>
+#include <rtems/freebsd/sys/sockio.h>
+#include <rtems/freebsd/sys/syslog.h>
+#include <rtems/freebsd/sys/sysctl.h>
+#include <rtems/freebsd/sys/taskqueue.h>
+#include <rtems/freebsd/sys/domain.h>
+#include <rtems/freebsd/sys/jail.h>
+#include <rtems/freebsd/machine/stdarg.h>
+#include <rtems/freebsd/vm/uma.h>
+
+#include <rtems/freebsd/net/if.h>
+#include <rtems/freebsd/net/if_arp.h>
+#include <rtems/freebsd/net/if_clone.h>
+#include <rtems/freebsd/net/if_dl.h>
+#include <rtems/freebsd/net/if_types.h>
+#include <rtems/freebsd/net/if_var.h>
+#include <rtems/freebsd/net/radix.h>
+#include <rtems/freebsd/net/route.h>
+#include <rtems/freebsd/net/vnet.h>
+
+#if defined(INET) || defined(INET6)
+/*XXX*/
+#include <rtems/freebsd/netinet/in.h>
+#include <rtems/freebsd/netinet/in_var.h>
+#include <rtems/freebsd/netinet/ip_carp.h>
+#ifdef INET6
+#include <rtems/freebsd/netinet6/in6_var.h>
+#include <rtems/freebsd/netinet6/in6_ifattach.h>
+#endif
+#endif
+#ifdef INET
+#include <rtems/freebsd/netinet/if_ether.h>
+#endif
+
+#include <rtems/freebsd/security/mac/mac_framework.h>
+
+#ifdef COMPAT_FREEBSD32
+#include <rtems/freebsd/sys/mount.h>
+#include <rtems/freebsd/compat/freebsd32/freebsd32.h>
+#endif
+
+struct ifindex_entry {
+ struct ifnet *ife_ifnet;
+};
+
+static int slowtimo_started;
+
+SYSCTL_NODE(_net, PF_LINK, link, CTLFLAG_RW, 0, "Link layers");
+SYSCTL_NODE(_net_link, 0, generic, CTLFLAG_RW, 0, "Generic link-management");
+
+TUNABLE_INT("net.link.ifqmaxlen", &ifqmaxlen);
+SYSCTL_UINT(_net_link, OID_AUTO, ifqmaxlen, CTLFLAG_RDTUN,
+ &ifqmaxlen, 0, "max send queue size");
+
+/* Log link state change events */
+static int log_link_state_change = 1;
+
+SYSCTL_INT(_net_link, OID_AUTO, log_link_state_change, CTLFLAG_RW,
+ &log_link_state_change, 0,
+ "log interface link state change events");
+
+/* Interface description */
+static unsigned int ifdescr_maxlen = 1024;
+SYSCTL_UINT(_net, OID_AUTO, ifdescr_maxlen, CTLFLAG_RW,
+ &ifdescr_maxlen, 0,
+ "administrative maximum length for interface description");
+
+MALLOC_DEFINE(M_IFDESCR, "ifdescr", "ifnet descriptions");
+
+/* global sx for non-critical path ifdescr */
+static struct sx ifdescr_sx;
+SX_SYSINIT(ifdescr_sx, &ifdescr_sx, "ifnet descr");
+
+void (*bstp_linkstate_p)(struct ifnet *ifp, int state);
+void (*ng_ether_link_state_p)(struct ifnet *ifp, int state);
+void (*lagg_linkstate_p)(struct ifnet *ifp, int state);
+/* These are external hooks for CARP. */
+void (*carp_linkstate_p)(struct ifnet *ifp);
+#if defined(INET) || defined(INET6)
+struct ifnet *(*carp_forus_p)(struct ifnet *ifp, u_char *dhost);
+int (*carp_output_p)(struct ifnet *ifp, struct mbuf *m,
+ struct sockaddr *sa, struct rtentry *rt);
+#endif
+#ifdef INET
+int (*carp_iamatch_p)(struct ifnet *, struct in_ifaddr *, struct in_addr *,
+ u_int8_t **);
+#endif
+#ifdef INET6
+struct ifaddr *(*carp_iamatch6_p)(struct ifnet *ifp, struct in6_addr *taddr6);
+caddr_t (*carp_macmatch6_p)(struct ifnet *ifp, struct mbuf *m,
+ const struct in6_addr *taddr);
+#endif
+
+struct mbuf *(*tbr_dequeue_ptr)(struct ifaltq *, int) = NULL;
+
+/*
+ * XXX: Style; these should be sorted alphabetically, and unprototyped
+ * static functions should be prototyped. Currently they are sorted by
+ * declaration order.
+ */
+static void if_attachdomain(void *);
+static void if_attachdomain1(struct ifnet *);
+static int ifconf(u_long, caddr_t);
+static void if_freemulti(struct ifmultiaddr *);
+static void if_init(void *);
+static void if_grow(void);
+static void if_check(void *);
+static void if_route(struct ifnet *, int flag, int fam);
+static int if_setflag(struct ifnet *, int, int, int *, int);
+static void if_slowtimo(void *);
+static int if_transmit(struct ifnet *ifp, struct mbuf *m);
+static void if_unroute(struct ifnet *, int flag, int fam);
+static void link_rtrequest(int, struct rtentry *, struct rt_addrinfo *);
+static int if_rtdel(struct radix_node *, void *);
+static int ifhwioctl(u_long, struct ifnet *, caddr_t, struct thread *);
+static int if_delmulti_locked(struct ifnet *, struct ifmultiaddr *, int);
+static void do_link_state_change(void *, int);
+static int if_getgroup(struct ifgroupreq *, struct ifnet *);
+static int if_getgroupmembers(struct ifgroupreq *);
+static void if_delgroups(struct ifnet *);
+static void if_attach_internal(struct ifnet *, int);
+static void if_detach_internal(struct ifnet *, int);
+
+#ifdef INET6
+/*
+ * XXX: declare here to avoid to include many inet6 related files..
+ * should be more generalized?
+ */
+extern void nd6_setmtu(struct ifnet *);
+#endif
+
+VNET_DEFINE(int, if_index);
+int ifqmaxlen = IFQ_MAXLEN;
+VNET_DEFINE(struct ifnethead, ifnet); /* depend on static init XXX */
+VNET_DEFINE(struct ifgrouphead, ifg_head);
+
+static VNET_DEFINE(int, if_indexlim) = 8;
+
+/* Table of ifnet by index. */
+VNET_DEFINE(struct ifindex_entry *, ifindex_table);
+
+#define V_if_indexlim VNET(if_indexlim)
+#define V_ifindex_table VNET(ifindex_table)
+
+/*
+ * The global network interface list (V_ifnet) and related state (such as
+ * if_index, if_indexlim, and ifindex_table) are protected by an sxlock and
+ * an rwlock. Either may be acquired shared to stablize the list, but both
+ * must be acquired writable to modify the list. This model allows us to
+ * both stablize the interface list during interrupt thread processing, but
+ * also to stablize it over long-running ioctls, without introducing priority
+ * inversions and deadlocks.
+ */
+struct rwlock ifnet_rwlock;
+struct sx ifnet_sxlock;
+
+/*
+ * The allocation of network interfaces is a rather non-atomic affair; we
+ * need to select an index before we are ready to expose the interface for
+ * use, so will use this pointer value to indicate reservation.
+ */
+#define IFNET_HOLD (void *)(uintptr_t)(-1)
+
+static if_com_alloc_t *if_com_alloc[256];
+static if_com_free_t *if_com_free[256];
+
+/*
+ * System initialization
+ */
+SYSINIT(interface_check, SI_SUB_PROTO_IF, SI_ORDER_FIRST, if_check, NULL);
+
+MALLOC_DEFINE(M_IFNET, "ifnet", "interface internals");
+MALLOC_DEFINE(M_IFADDR, "ifaddr", "interface address");
+MALLOC_DEFINE(M_IFMADDR, "ether_multi", "link-level multicast address");
+
+struct ifnet *
+ifnet_byindex_locked(u_short idx)
+{
+
+ if (idx > V_if_index)
+ return (NULL);
+ if (V_ifindex_table[idx].ife_ifnet == IFNET_HOLD)
+ return (NULL);
+ return (V_ifindex_table[idx].ife_ifnet);
+}
+
+struct ifnet *
+ifnet_byindex(u_short idx)
+{
+ struct ifnet *ifp;
+
+ IFNET_RLOCK_NOSLEEP();
+ ifp = ifnet_byindex_locked(idx);
+ IFNET_RUNLOCK_NOSLEEP();
+ return (ifp);
+}
+
+struct ifnet *
+ifnet_byindex_ref(u_short idx)
+{
+ struct ifnet *ifp;
+
+ IFNET_RLOCK_NOSLEEP();
+ ifp = ifnet_byindex_locked(idx);
+ if (ifp == NULL || (ifp->if_flags & IFF_DYING)) {
+ IFNET_RUNLOCK_NOSLEEP();
+ return (NULL);
+ }
+ if_ref(ifp);
+ IFNET_RUNLOCK_NOSLEEP();
+ return (ifp);
+}
+
+/*
+ * Allocate an ifindex array entry; return 0 on success or an error on
+ * failure.
+ */
+static int
+ifindex_alloc_locked(u_short *idxp)
+{
+ u_short idx;
+
+ IFNET_WLOCK_ASSERT();
+
+ /*
+ * Try to find an empty slot below V_if_index. If we fail, take the
+ * next slot.
+ */
+ for (idx = 1; idx <= V_if_index; idx++) {
+ if (V_ifindex_table[idx].ife_ifnet == NULL)
+ break;
+ }
+
+ /* Catch if_index overflow. */
+ if (idx < 1)
+ return (ENOSPC);
+ if (idx > V_if_index)
+ V_if_index = idx;
+ if (V_if_index >= V_if_indexlim)
+ if_grow();
+ *idxp = idx;
+ return (0);
+}
+
+static void
+ifindex_free_locked(u_short idx)
+{
+
+ IFNET_WLOCK_ASSERT();
+
+ V_ifindex_table[idx].ife_ifnet = NULL;
+ while (V_if_index > 0 &&
+ V_ifindex_table[V_if_index].ife_ifnet == NULL)
+ V_if_index--;
+}
+
+static void
+ifindex_free(u_short idx)
+{
+
+ IFNET_WLOCK();
+ ifindex_free_locked(idx);
+ IFNET_WUNLOCK();
+}
+
+static void
+ifnet_setbyindex_locked(u_short idx, struct ifnet *ifp)
+{
+
+ IFNET_WLOCK_ASSERT();
+
+ V_ifindex_table[idx].ife_ifnet = ifp;
+}
+
+static void
+ifnet_setbyindex(u_short idx, struct ifnet *ifp)
+{
+
+ IFNET_WLOCK();
+ ifnet_setbyindex_locked(idx, ifp);
+ IFNET_WUNLOCK();
+}
+
+struct ifaddr *
+ifaddr_byindex(u_short idx)
+{
+ struct ifaddr *ifa;
+
+ IFNET_RLOCK_NOSLEEP();
+ ifa = ifnet_byindex_locked(idx)->if_addr;
+ if (ifa != NULL)
+ ifa_ref(ifa);
+ IFNET_RUNLOCK_NOSLEEP();
+ return (ifa);
+}
+
+/*
+ * Network interface utility routines.
+ *
+ * Routines with ifa_ifwith* names take sockaddr *'s as
+ * parameters.
+ */
+
+static void
+vnet_if_init(const void *unused __unused)
+{
+
+ TAILQ_INIT(&V_ifnet);
+ TAILQ_INIT(&V_ifg_head);
+ if_grow(); /* create initial table */
+ vnet_if_clone_init();
+}
+VNET_SYSINIT(vnet_if_init, SI_SUB_INIT_IF, SI_ORDER_FIRST, vnet_if_init,
+ NULL);
+
+/* ARGSUSED*/
+static void
+if_init(void *dummy __unused)
+{
+
+ IFNET_LOCK_INIT();
+ if_clone_init();
+}
+SYSINIT(interfaces, SI_SUB_INIT_IF, SI_ORDER_SECOND, if_init, NULL);
+
+
+#ifdef VIMAGE
+static void
+vnet_if_uninit(const void *unused __unused)
+{
+
+ VNET_ASSERT(TAILQ_EMPTY(&V_ifnet));
+ VNET_ASSERT(TAILQ_EMPTY(&V_ifg_head));
+
+ free((caddr_t)V_ifindex_table, M_IFNET);
+}
+VNET_SYSUNINIT(vnet_if_uninit, SI_SUB_INIT_IF, SI_ORDER_FIRST,
+ vnet_if_uninit, NULL);
+#endif
+
+static void
+if_grow(void)
+{
+ u_int n;
+ struct ifindex_entry *e;
+
+ V_if_indexlim <<= 1;
+ n = V_if_indexlim * sizeof(*e);
+ e = malloc(n, M_IFNET, M_WAITOK | M_ZERO);
+ if (V_ifindex_table != NULL) {
+ memcpy((caddr_t)e, (caddr_t)V_ifindex_table, n/2);
+ free((caddr_t)V_ifindex_table, M_IFNET);
+ }
+ V_ifindex_table = e;
+}
+
+static void
+if_check(void *dummy __unused)
+{
+
+ /*
+ * If at least one interface added during boot uses
+ * if_watchdog then start the timer.
+ */
+ if (slowtimo_started)
+ if_slowtimo(0);
+}
+
+/*
+ * Allocate a struct ifnet and an index for an interface. A layer 2
+ * common structure will also be allocated if an allocation routine is
+ * registered for the passed type.
+ */
+struct ifnet *
+if_alloc(u_char type)
+{
+ struct ifnet *ifp;
+ u_short idx;
+
+ ifp = malloc(sizeof(struct ifnet), M_IFNET, M_WAITOK|M_ZERO);
+ IFNET_WLOCK();
+ if (ifindex_alloc_locked(&idx) != 0) {
+ IFNET_WUNLOCK();
+ free(ifp, M_IFNET);
+ return (NULL);
+ }
+ ifnet_setbyindex_locked(idx, IFNET_HOLD);
+ IFNET_WUNLOCK();
+ ifp->if_index = idx;
+ ifp->if_type = type;
+ ifp->if_alloctype = type;
+ if (if_com_alloc[type] != NULL) {
+ ifp->if_l2com = if_com_alloc[type](type, ifp);
+ if (ifp->if_l2com == NULL) {
+ free(ifp, M_IFNET);
+ ifindex_free(idx);
+ return (NULL);
+ }
+ }
+
+ IF_ADDR_LOCK_INIT(ifp);
+ TASK_INIT(&ifp->if_linktask, 0, do_link_state_change, ifp);
+ ifp->if_afdata_initialized = 0;
+ IF_AFDATA_LOCK_INIT(ifp);
+ TAILQ_INIT(&ifp->if_addrhead);
+ TAILQ_INIT(&ifp->if_prefixhead);
+ TAILQ_INIT(&ifp->if_multiaddrs);
+ TAILQ_INIT(&ifp->if_groups);
+#ifdef MAC
+ mac_ifnet_init(ifp);
+#endif
+ ifq_init(&ifp->if_snd, ifp);
+
+ refcount_init(&ifp->if_refcount, 1); /* Index reference. */
+ ifnet_setbyindex(ifp->if_index, ifp);
+ return (ifp);
+}
+
+/*
+ * Do the actual work of freeing a struct ifnet, associated index, and layer
+ * 2 common structure. This call is made when the last reference to an
+ * interface is released.
+ */
+static void
+if_free_internal(struct ifnet *ifp)
+{
+
+ KASSERT((ifp->if_flags & IFF_DYING),
+ ("if_free_internal: interface not dying"));
+
+ IFNET_WLOCK();
+ KASSERT(ifp == ifnet_byindex_locked(ifp->if_index),
+ ("%s: freeing unallocated ifnet", ifp->if_xname));
+
+ ifindex_free_locked(ifp->if_index);
+ IFNET_WUNLOCK();
+
+ if (if_com_free[ifp->if_alloctype] != NULL)
+ if_com_free[ifp->if_alloctype](ifp->if_l2com,
+ ifp->if_alloctype);
+
+#ifdef MAC
+ mac_ifnet_destroy(ifp);
+#endif /* MAC */
+ if (ifp->if_description != NULL)
+ free(ifp->if_description, M_IFDESCR);
+ IF_AFDATA_DESTROY(ifp);
+ IF_ADDR_LOCK_DESTROY(ifp);
+ ifq_delete(&ifp->if_snd);
+ free(ifp, M_IFNET);
+}
+
+/*
+ * This version should only be called by intefaces that switch their type
+ * after calling if_alloc(). if_free_type() will go away again now that we
+ * have if_alloctype to cache the original allocation type. For now, assert
+ * that they match, since we require that in practice.
+ */
+void
+if_free_type(struct ifnet *ifp, u_char type)
+{
+
+ KASSERT(ifp->if_alloctype == type,
+ ("if_free_type: type (%d) != alloctype (%d)", type,
+ ifp->if_alloctype));
+
+ ifp->if_flags |= IFF_DYING; /* XXX: Locking */
+ if (!refcount_release(&ifp->if_refcount))
+ return;
+ if_free_internal(ifp);
+}
+
+/*
+ * This is the normal version of if_free(), used by device drivers to free a
+ * detached network interface. The contents of if_free_type() will move into
+ * here when if_free_type() goes away.
+ */
+void
+if_free(struct ifnet *ifp)
+{
+
+ if_free_type(ifp, ifp->if_alloctype);
+}
+
+/*
+ * Interfaces to keep an ifnet type-stable despite the possibility of the
+ * driver calling if_free(). If there are additional references, we defer
+ * freeing the underlying data structure.
+ */
+void
+if_ref(struct ifnet *ifp)
+{
+
+ /* We don't assert the ifnet list lock here, but arguably should. */
+ refcount_acquire(&ifp->if_refcount);
+}
+
+void
+if_rele(struct ifnet *ifp)
+{
+
+ if (!refcount_release(&ifp->if_refcount))
+ return;
+ if_free_internal(ifp);
+}
+
+void
+ifq_init(struct ifaltq *ifq, struct ifnet *ifp)
+{
+
+ mtx_init(&ifq->ifq_mtx, ifp->if_xname, "if send queue", MTX_DEF);
+
+ if (ifq->ifq_maxlen == 0)
+ ifq->ifq_maxlen = ifqmaxlen;
+
+ ifq->altq_type = 0;
+ ifq->altq_disc = NULL;
+ ifq->altq_flags &= ALTQF_CANTCHANGE;
+ ifq->altq_tbr = NULL;
+ ifq->altq_ifp = ifp;
+}
+
+void
+ifq_delete(struct ifaltq *ifq)
+{
+ mtx_destroy(&ifq->ifq_mtx);
+}
+
+/*
+ * Perform generic interface initalization tasks and attach the interface
+ * to the list of "active" interfaces. If vmove flag is set on entry
+ * to if_attach_internal(), perform only a limited subset of initialization
+ * tasks, given that we are moving from one vnet to another an ifnet which
+ * has already been fully initialized.
+ *
+ * XXX:
+ * - The decision to return void and thus require this function to
+ * succeed is questionable.
+ * - We should probably do more sanity checking. For instance we don't
+ * do anything to insure if_xname is unique or non-empty.
+ */
+void
+if_attach(struct ifnet *ifp)
+{
+
+ if_attach_internal(ifp, 0);
+}
+
+static void
+if_attach_internal(struct ifnet *ifp, int vmove)
+{
+ unsigned socksize, ifasize;
+ int namelen, masklen;
+ struct sockaddr_dl *sdl;
+ struct ifaddr *ifa;
+
+ if (ifp->if_index == 0 || ifp != ifnet_byindex(ifp->if_index))
+ panic ("%s: BUG: if_attach called without if_alloc'd input()\n",
+ ifp->if_xname);
+
+#ifdef VIMAGE
+ ifp->if_vnet = curvnet;
+ if (ifp->if_home_vnet == NULL)
+ ifp->if_home_vnet = curvnet;
+#endif
+
+ if_addgroup(ifp, IFG_ALL);
+
+ getmicrotime(&ifp->if_lastchange);
+ ifp->if_data.ifi_epoch = time_uptime;
+ ifp->if_data.ifi_datalen = sizeof(struct if_data);
+
+ KASSERT((ifp->if_transmit == NULL && ifp->if_qflush == NULL) ||
+ (ifp->if_transmit != NULL && ifp->if_qflush != NULL),
+ ("transmit and qflush must both either be set or both be NULL"));
+ if (ifp->if_transmit == NULL) {
+ ifp->if_transmit = if_transmit;
+ ifp->if_qflush = if_qflush;
+ }
+
+ if (!vmove) {
+#ifdef MAC
+ mac_ifnet_create(ifp);
+#endif
+
+ /*
+ * Create a Link Level name for this device.
+ */
+ namelen = strlen(ifp->if_xname);
+ /*
+ * Always save enough space for any possiable name so we
+ * can do a rename in place later.
+ */
+ masklen = offsetof(struct sockaddr_dl, sdl_data[0]) + IFNAMSIZ;
+ socksize = masklen + ifp->if_addrlen;
+ if (socksize < sizeof(*sdl))
+ socksize = sizeof(*sdl);
+ socksize = roundup2(socksize, sizeof(long));
+ ifasize = sizeof(*ifa) + 2 * socksize;
+ ifa = malloc(ifasize, M_IFADDR, M_WAITOK | M_ZERO);
+ ifa_init(ifa);
+ sdl = (struct sockaddr_dl *)(ifa + 1);
+ sdl->sdl_len = socksize;
+ sdl->sdl_family = AF_LINK;
+ bcopy(ifp->if_xname, sdl->sdl_data, namelen);
+ sdl->sdl_nlen = namelen;
+ sdl->sdl_index = ifp->if_index;
+ sdl->sdl_type = ifp->if_type;
+ ifp->if_addr = ifa;
+ ifa->ifa_ifp = ifp;
+ ifa->ifa_rtrequest = link_rtrequest;
+ ifa->ifa_addr = (struct sockaddr *)sdl;
+ sdl = (struct sockaddr_dl *)(socksize + (caddr_t)sdl);
+ ifa->ifa_netmask = (struct sockaddr *)sdl;
+ sdl->sdl_len = masklen;
+ while (namelen != 0)
+ sdl->sdl_data[--namelen] = 0xff;
+ TAILQ_INSERT_HEAD(&ifp->if_addrhead, ifa, ifa_link);
+ /* Reliably crash if used uninitialized. */
+ ifp->if_broadcastaddr = NULL;
+ }
+#ifdef VIMAGE
+ else {
+ /*
+ * Update the interface index in the link layer address
+ * of the interface.
+ */
+ for (ifa = ifp->if_addr; ifa != NULL;
+ ifa = TAILQ_NEXT(ifa, ifa_link)) {
+ if (ifa->ifa_addr->sa_family == AF_LINK) {
+ sdl = (struct sockaddr_dl *)ifa->ifa_addr;
+ sdl->sdl_index = ifp->if_index;
+ }
+ }
+ }
+#endif
+
+ IFNET_WLOCK();
+ TAILQ_INSERT_TAIL(&V_ifnet, ifp, if_link);
+#ifdef VIMAGE
+ curvnet->vnet_ifcnt++;
+#endif
+ IFNET_WUNLOCK();
+
+ if (domain_init_status >= 2)
+ if_attachdomain1(ifp);
+
+ EVENTHANDLER_INVOKE(ifnet_arrival_event, ifp);
+ if (IS_DEFAULT_VNET(curvnet))
+ devctl_notify("IFNET", ifp->if_xname, "ATTACH", NULL);
+
+ /* Announce the interface. */
+ rt_ifannouncemsg(ifp, IFAN_ARRIVAL);
+
+ if (!vmove && ifp->if_watchdog != NULL) {
+ if_printf(ifp,
+ "WARNING: using obsoleted if_watchdog interface\n");
+
+ /*
+ * Note that we need if_slowtimo(). If this happens after
+ * boot, then call if_slowtimo() directly.
+ */
+ if (atomic_cmpset_int(&slowtimo_started, 0, 1) && !cold)
+ if_slowtimo(0);
+ }
+}
+
+static void
+if_attachdomain(void *dummy)
+{
+ struct ifnet *ifp;
+ int s;
+
+ s = splnet();
+ TAILQ_FOREACH(ifp, &V_ifnet, if_link)
+ if_attachdomain1(ifp);
+ splx(s);
+}
+SYSINIT(domainifattach, SI_SUB_PROTO_IFATTACHDOMAIN, SI_ORDER_SECOND,
+ if_attachdomain, NULL);
+
+static void
+if_attachdomain1(struct ifnet *ifp)
+{
+ struct domain *dp;
+ int s;
+
+ s = splnet();
+
+ /*
+ * Since dp->dom_ifattach calls malloc() with M_WAITOK, we
+ * cannot lock ifp->if_afdata initialization, entirely.
+ */
+ if (IF_AFDATA_TRYLOCK(ifp) == 0) {
+ splx(s);
+ return;
+ }
+ if (ifp->if_afdata_initialized >= domain_init_status) {
+ IF_AFDATA_UNLOCK(ifp);
+ splx(s);
+ printf("if_attachdomain called more than once on %s\n",
+ ifp->if_xname);
+ return;
+ }
+ ifp->if_afdata_initialized = domain_init_status;
+ IF_AFDATA_UNLOCK(ifp);
+
+ /* address family dependent data region */
+ bzero(ifp->if_afdata, sizeof(ifp->if_afdata));
+ for (dp = domains; dp; dp = dp->dom_next) {
+ if (dp->dom_ifattach)
+ ifp->if_afdata[dp->dom_family] =
+ (*dp->dom_ifattach)(ifp);
+ }
+
+ splx(s);
+}
+
+/*
+ * Remove any unicast or broadcast network addresses from an interface.
+ */
+void
+if_purgeaddrs(struct ifnet *ifp)
+{
+ struct ifaddr *ifa, *next;
+
+ TAILQ_FOREACH_SAFE(ifa, &ifp->if_addrhead, ifa_link, next) {
+ if (ifa->ifa_addr->sa_family == AF_LINK)
+ continue;
+#ifdef INET
+ /* XXX: Ugly!! ad hoc just for INET */
+ if (ifa->ifa_addr->sa_family == AF_INET) {
+ struct ifaliasreq ifr;
+
+ bzero(&ifr, sizeof(ifr));
+ ifr.ifra_addr = *ifa->ifa_addr;
+ if (ifa->ifa_dstaddr)
+ ifr.ifra_broadaddr = *ifa->ifa_dstaddr;
+ if (in_control(NULL, SIOCDIFADDR, (caddr_t)&ifr, ifp,
+ NULL) == 0)
+ continue;
+ }
+#endif /* INET */
+#ifdef INET6
+ if (ifa->ifa_addr->sa_family == AF_INET6) {
+ in6_purgeaddr(ifa);
+ /* ifp_addrhead is already updated */
+ continue;
+ }
+#endif /* INET6 */
+ TAILQ_REMOVE(&ifp->if_addrhead, ifa, ifa_link);
+ ifa_free(ifa);
+ }
+}
+
+/*
+ * Remove any multicast network addresses from an interface when an ifnet
+ * is going away.
+ */
+static void
+if_purgemaddrs(struct ifnet *ifp)
+{
+ struct ifmultiaddr *ifma;
+ struct ifmultiaddr *next;
+
+ IF_ADDR_LOCK(ifp);
+ TAILQ_FOREACH_SAFE(ifma, &ifp->if_multiaddrs, ifma_link, next)
+ if_delmulti_locked(ifp, ifma, 1);
+ IF_ADDR_UNLOCK(ifp);
+}
+
+/*
+ * Detach an interface, removing it from the list of "active" interfaces.
+ * If vmove flag is set on entry to if_detach_internal(), perform only a
+ * limited subset of cleanup tasks, given that we are moving an ifnet from
+ * one vnet to another, where it must be fully operational.
+ *
+ * XXXRW: There are some significant questions about event ordering, and
+ * how to prevent things from starting to use the interface during detach.
+ */
+void
+if_detach(struct ifnet *ifp)
+{
+
+ if_detach_internal(ifp, 0);
+}
+
+static void
+if_detach_internal(struct ifnet *ifp, int vmove)
+{
+ struct ifaddr *ifa;
+ struct radix_node_head *rnh;
+ int i, j;
+ struct domain *dp;
+ struct ifnet *iter;
+ int found = 0;
+
+ IFNET_WLOCK();
+ TAILQ_FOREACH(iter, &V_ifnet, if_link)
+ if (iter == ifp) {
+ TAILQ_REMOVE(&V_ifnet, ifp, if_link);
+ found = 1;
+ break;
+ }
+#ifdef VIMAGE
+ if (found)
+ curvnet->vnet_ifcnt--;
+#endif
+ IFNET_WUNLOCK();
+ if (!found) {
+ if (vmove)
+ panic("%s: ifp=%p not on the ifnet tailq %p",
+ __func__, ifp, &V_ifnet);
+ else
+ return; /* XXX this should panic as well? */
+ }
+
+ /*
+ * Remove/wait for pending events.
+ */
+ taskqueue_drain(taskqueue_swi, &ifp->if_linktask);
+
+ /*
+ * Remove routes and flush queues.
+ */
+ if_down(ifp);
+#ifdef ALTQ
+ if (ALTQ_IS_ENABLED(&ifp->if_snd))
+ altq_disable(&ifp->if_snd);
+ if (ALTQ_IS_ATTACHED(&ifp->if_snd))
+ altq_detach(&ifp->if_snd);
+#endif
+
+ if_purgeaddrs(ifp);
+
+#ifdef INET
+ in_ifdetach(ifp);
+#endif
+
+#ifdef INET6
+ /*
+ * Remove all IPv6 kernel structs related to ifp. This should be done
+ * before removing routing entries below, since IPv6 interface direct
+ * routes are expected to be removed by the IPv6-specific kernel API.
+ * Otherwise, the kernel will detect some inconsistency and bark it.
+ */
+ in6_ifdetach(ifp);
+#endif
+ if_purgemaddrs(ifp);
+
+ if (!vmove) {
+ /*
+ * Prevent further calls into the device driver via ifnet.
+ */
+ if_dead(ifp);
+
+ /*
+ * Remove link ifaddr pointer and maybe decrement if_index.
+ * Clean up all addresses.
+ */
+ ifp->if_addr = NULL;
+
+ /* We can now free link ifaddr. */
+ if (!TAILQ_EMPTY(&ifp->if_addrhead)) {
+ ifa = TAILQ_FIRST(&ifp->if_addrhead);
+ TAILQ_REMOVE(&ifp->if_addrhead, ifa, ifa_link);
+ ifa_free(ifa);
+ }
+ }
+
+ /*
+ * Delete all remaining routes using this interface
+ * Unfortuneatly the only way to do this is to slog through
+ * the entire routing table looking for routes which point
+ * to this interface...oh well...
+ */
+ for (i = 1; i <= AF_MAX; i++) {
+ for (j = 0; j < rt_numfibs; j++) {
+ rnh = rt_tables_get_rnh(j, i);
+ if (rnh == NULL)
+ continue;
+ RADIX_NODE_HEAD_LOCK(rnh);
+ (void) rnh->rnh_walktree(rnh, if_rtdel, ifp);
+ RADIX_NODE_HEAD_UNLOCK(rnh);
+ }
+ }
+
+ /* Announce that the interface is gone. */
+ rt_ifannouncemsg(ifp, IFAN_DEPARTURE);
+ EVENTHANDLER_INVOKE(ifnet_departure_event, ifp);
+ if (IS_DEFAULT_VNET(curvnet))
+ devctl_notify("IFNET", ifp->if_xname, "DETACH", NULL);
+ if_delgroups(ifp);
+
+ /*
+ * We cannot hold the lock over dom_ifdetach calls as they might
+ * sleep, for example trying to drain a callout, thus open up the
+ * theoretical race with re-attaching.
+ */
+ IF_AFDATA_LOCK(ifp);
+ i = ifp->if_afdata_initialized;
+ ifp->if_afdata_initialized = 0;
+ IF_AFDATA_UNLOCK(ifp);
+ for (dp = domains; i > 0 && dp; dp = dp->dom_next) {
+ if (dp->dom_ifdetach && ifp->if_afdata[dp->dom_family])
+ (*dp->dom_ifdetach)(ifp,
+ ifp->if_afdata[dp->dom_family]);
+ }
+}
+
+#ifdef VIMAGE
+/*
+ * if_vmove() performs a limited version of if_detach() in current
+ * vnet and if_attach()es the ifnet to the vnet specified as 2nd arg.
+ * An attempt is made to shrink if_index in current vnet, find an
+ * unused if_index in target vnet and calls if_grow() if necessary,
+ * and finally find an unused if_xname for the target vnet.
+ */
+void
+if_vmove(struct ifnet *ifp, struct vnet *new_vnet)
+{
+ u_short idx;
+
+ /*
+ * Detach from current vnet, but preserve LLADDR info, do not
+ * mark as dead etc. so that the ifnet can be reattached later.
+ */
+ if_detach_internal(ifp, 1);
+
+ /*
+ * Unlink the ifnet from ifindex_table[] in current vnet, and shrink
+ * the if_index for that vnet if possible.
+ *
+ * NOTE: IFNET_WLOCK/IFNET_WUNLOCK() are assumed to be unvirtualized,
+ * or we'd lock on one vnet and unlock on another.
+ */
+ IFNET_WLOCK();
+ ifindex_free_locked(ifp->if_index);
+ IFNET_WUNLOCK();
+
+ /*
+ * Perform interface-specific reassignment tasks, if provided by
+ * the driver.
+ */
+ if (ifp->if_reassign != NULL)
+ ifp->if_reassign(ifp, new_vnet, NULL);
+
+ /*
+ * Switch to the context of the target vnet.
+ */
+ CURVNET_SET_QUIET(new_vnet);
+
+ IFNET_WLOCK();
+ if (ifindex_alloc_locked(&idx) != 0) {
+ IFNET_WUNLOCK();
+ panic("if_index overflow");
+ }
+ ifp->if_index = idx;
+ ifnet_setbyindex_locked(ifp->if_index, ifp);
+ IFNET_WUNLOCK();
+
+ if_attach_internal(ifp, 1);
+
+ CURVNET_RESTORE();
+}
+
+/*
+ * Move an ifnet to or from another child prison/vnet, specified by the jail id.
+ */
+static int
+if_vmove_loan(struct thread *td, struct ifnet *ifp, char *ifname, int jid)
+{
+ struct prison *pr;
+ struct ifnet *difp;
+
+ /* Try to find the prison within our visibility. */
+ sx_slock(&allprison_lock);
+ pr = prison_find_child(td->td_ucred->cr_prison, jid);
+ sx_sunlock(&allprison_lock);
+ if (pr == NULL)
+ return (ENXIO);
+ prison_hold_locked(pr);
+ mtx_unlock(&pr->pr_mtx);
+
+ /* Do not try to move the iface from and to the same prison. */
+ if (pr->pr_vnet == ifp->if_vnet) {
+ prison_free(pr);
+ return (EEXIST);
+ }
+
+ /* Make sure the named iface does not exists in the dst. prison/vnet. */
+ /* XXX Lock interfaces to avoid races. */
+ CURVNET_SET_QUIET(pr->pr_vnet);
+ difp = ifunit(ifname);
+ CURVNET_RESTORE();
+ if (difp != NULL) {
+ prison_free(pr);
+ return (EEXIST);
+ }
+
+ /* Move the interface into the child jail/vnet. */
+ if_vmove(ifp, pr->pr_vnet);
+
+ /* Report the new if_xname back to the userland. */
+ sprintf(ifname, "%s", ifp->if_xname);
+
+ prison_free(pr);
+ return (0);
+}
+
+static int
+if_vmove_reclaim(struct thread *td, char *ifname, int jid)
+{
+ struct prison *pr;
+ struct vnet *vnet_dst;
+ struct ifnet *ifp;
+
+ /* Try to find the prison within our visibility. */
+ sx_slock(&allprison_lock);
+ pr = prison_find_child(td->td_ucred->cr_prison, jid);
+ sx_sunlock(&allprison_lock);
+ if (pr == NULL)
+ return (ENXIO);
+ prison_hold_locked(pr);
+ mtx_unlock(&pr->pr_mtx);
+
+ /* Make sure the named iface exists in the source prison/vnet. */
+ CURVNET_SET(pr->pr_vnet);
+ ifp = ifunit(ifname); /* XXX Lock to avoid races. */
+ if (ifp == NULL) {
+ CURVNET_RESTORE();
+ prison_free(pr);
+ return (ENXIO);
+ }
+
+ /* Do not try to move the iface from and to the same prison. */
+ vnet_dst = TD_TO_VNET(td);
+ if (vnet_dst == ifp->if_vnet) {
+ CURVNET_RESTORE();
+ prison_free(pr);
+ return (EEXIST);
+ }
+
+ /* Get interface back from child jail/vnet. */
+ if_vmove(ifp, vnet_dst);
+ CURVNET_RESTORE();
+
+ /* Report the new if_xname back to the userland. */
+ sprintf(ifname, "%s", ifp->if_xname);
+
+ prison_free(pr);
+ return (0);
+}
+#endif /* VIMAGE */
+
+/*
+ * Add a group to an interface
+ */
+int
+if_addgroup(struct ifnet *ifp, const char *groupname)
+{
+ struct ifg_list *ifgl;
+ struct ifg_group *ifg = NULL;
+ struct ifg_member *ifgm;
+
+ if (groupname[0] && groupname[strlen(groupname) - 1] >= '0' &&
+ groupname[strlen(groupname) - 1] <= '9')
+ return (EINVAL);
+
+ IFNET_WLOCK();
+ TAILQ_FOREACH(ifgl, &ifp->if_groups, ifgl_next)
+ if (!strcmp(ifgl->ifgl_group->ifg_group, groupname)) {
+ IFNET_WUNLOCK();
+ return (EEXIST);
+ }
+
+ if ((ifgl = (struct ifg_list *)malloc(sizeof(struct ifg_list), M_TEMP,
+ M_NOWAIT)) == NULL) {
+ IFNET_WUNLOCK();
+ return (ENOMEM);
+ }
+
+ if ((ifgm = (struct ifg_member *)malloc(sizeof(struct ifg_member),
+ M_TEMP, M_NOWAIT)) == NULL) {
+ free(ifgl, M_TEMP);
+ IFNET_WUNLOCK();
+ return (ENOMEM);
+ }
+
+ TAILQ_FOREACH(ifg, &V_ifg_head, ifg_next)
+ if (!strcmp(ifg->ifg_group, groupname))
+ break;
+
+ if (ifg == NULL) {
+ if ((ifg = (struct ifg_group *)malloc(sizeof(struct ifg_group),
+ M_TEMP, M_NOWAIT)) == NULL) {
+ free(ifgl, M_TEMP);
+ free(ifgm, M_TEMP);
+ IFNET_WUNLOCK();
+ return (ENOMEM);
+ }
+ strlcpy(ifg->ifg_group, groupname, sizeof(ifg->ifg_group));
+ ifg->ifg_refcnt = 0;
+ TAILQ_INIT(&ifg->ifg_members);
+ EVENTHANDLER_INVOKE(group_attach_event, ifg);
+ TAILQ_INSERT_TAIL(&V_ifg_head, ifg, ifg_next);
+ }
+
+ ifg->ifg_refcnt++;
+ ifgl->ifgl_group = ifg;
+ ifgm->ifgm_ifp = ifp;
+
+ IF_ADDR_LOCK(ifp);
+ TAILQ_INSERT_TAIL(&ifg->ifg_members, ifgm, ifgm_next);
+ TAILQ_INSERT_TAIL(&ifp->if_groups, ifgl, ifgl_next);
+ IF_ADDR_UNLOCK(ifp);
+
+ IFNET_WUNLOCK();
+
+ EVENTHANDLER_INVOKE(group_change_event, groupname);
+
+ return (0);
+}
+
+/*
+ * Remove a group from an interface
+ */
+int
+if_delgroup(struct ifnet *ifp, const char *groupname)
+{
+ struct ifg_list *ifgl;
+ struct ifg_member *ifgm;
+
+ IFNET_WLOCK();
+ TAILQ_FOREACH(ifgl, &ifp->if_groups, ifgl_next)
+ if (!strcmp(ifgl->ifgl_group->ifg_group, groupname))
+ break;
+ if (ifgl == NULL) {
+ IFNET_WUNLOCK();
+ return (ENOENT);
+ }
+
+ IF_ADDR_LOCK(ifp);
+ TAILQ_REMOVE(&ifp->if_groups, ifgl, ifgl_next);
+ IF_ADDR_UNLOCK(ifp);
+
+ TAILQ_FOREACH(ifgm, &ifgl->ifgl_group->ifg_members, ifgm_next)
+ if (ifgm->ifgm_ifp == ifp)
+ break;
+
+ if (ifgm != NULL) {
+ TAILQ_REMOVE(&ifgl->ifgl_group->ifg_members, ifgm, ifgm_next);
+ free(ifgm, M_TEMP);
+ }
+
+ if (--ifgl->ifgl_group->ifg_refcnt == 0) {
+ TAILQ_REMOVE(&V_ifg_head, ifgl->ifgl_group, ifg_next);
+ EVENTHANDLER_INVOKE(group_detach_event, ifgl->ifgl_group);
+ free(ifgl->ifgl_group, M_TEMP);
+ }
+ IFNET_WUNLOCK();
+
+ free(ifgl, M_TEMP);
+
+ EVENTHANDLER_INVOKE(group_change_event, groupname);
+
+ return (0);
+}
+
+/*
+ * Remove an interface from all groups
+ */
+static void
+if_delgroups(struct ifnet *ifp)
+{
+ struct ifg_list *ifgl;
+ struct ifg_member *ifgm;
+ char groupname[IFNAMSIZ];
+
+ IFNET_WLOCK();
+ while (!TAILQ_EMPTY(&ifp->if_groups)) {
+ ifgl = TAILQ_FIRST(&ifp->if_groups);
+
+ strlcpy(groupname, ifgl->ifgl_group->ifg_group, IFNAMSIZ);
+
+ IF_ADDR_LOCK(ifp);
+ TAILQ_REMOVE(&ifp->if_groups, ifgl, ifgl_next);
+ IF_ADDR_UNLOCK(ifp);
+
+ TAILQ_FOREACH(ifgm, &ifgl->ifgl_group->ifg_members, ifgm_next)
+ if (ifgm->ifgm_ifp == ifp)
+ break;
+
+ if (ifgm != NULL) {
+ TAILQ_REMOVE(&ifgl->ifgl_group->ifg_members, ifgm,
+ ifgm_next);
+ free(ifgm, M_TEMP);
+ }
+
+ if (--ifgl->ifgl_group->ifg_refcnt == 0) {
+ TAILQ_REMOVE(&V_ifg_head, ifgl->ifgl_group, ifg_next);
+ EVENTHANDLER_INVOKE(group_detach_event,
+ ifgl->ifgl_group);
+ free(ifgl->ifgl_group, M_TEMP);
+ }
+ IFNET_WUNLOCK();
+
+ free(ifgl, M_TEMP);
+
+ EVENTHANDLER_INVOKE(group_change_event, groupname);
+
+ IFNET_WLOCK();
+ }
+ IFNET_WUNLOCK();
+}
+
+/*
+ * Stores all groups from an interface in memory pointed
+ * to by data
+ */
+static int
+if_getgroup(struct ifgroupreq *data, struct ifnet *ifp)
+{
+ int len, error;
+ struct ifg_list *ifgl;
+ struct ifg_req ifgrq, *ifgp;
+ struct ifgroupreq *ifgr = data;
+
+ if (ifgr->ifgr_len == 0) {
+ IF_ADDR_LOCK(ifp);
+ TAILQ_FOREACH(ifgl, &ifp->if_groups, ifgl_next)
+ ifgr->ifgr_len += sizeof(struct ifg_req);
+ IF_ADDR_UNLOCK(ifp);
+ return (0);
+ }
+
+ len = ifgr->ifgr_len;
+ ifgp = ifgr->ifgr_groups;
+ /* XXX: wire */
+ IF_ADDR_LOCK(ifp);
+ TAILQ_FOREACH(ifgl, &ifp->if_groups, ifgl_next) {
+ if (len < sizeof(ifgrq)) {
+ IF_ADDR_UNLOCK(ifp);
+ return (EINVAL);
+ }
+ bzero(&ifgrq, sizeof ifgrq);
+ strlcpy(ifgrq.ifgrq_group, ifgl->ifgl_group->ifg_group,
+ sizeof(ifgrq.ifgrq_group));
+ if ((error = copyout(&ifgrq, ifgp, sizeof(struct ifg_req)))) {
+ IF_ADDR_UNLOCK(ifp);
+ return (error);
+ }
+ len -= sizeof(ifgrq);
+ ifgp++;
+ }
+ IF_ADDR_UNLOCK(ifp);
+
+ return (0);
+}
+
+/*
+ * Stores all members of a group in memory pointed to by data
+ */
+static int
+if_getgroupmembers(struct ifgroupreq *data)
+{
+ struct ifgroupreq *ifgr = data;
+ struct ifg_group *ifg;
+ struct ifg_member *ifgm;
+ struct ifg_req ifgrq, *ifgp;
+ int len, error;
+
+ IFNET_RLOCK();
+ TAILQ_FOREACH(ifg, &V_ifg_head, ifg_next)
+ if (!strcmp(ifg->ifg_group, ifgr->ifgr_name))
+ break;
+ if (ifg == NULL) {
+ IFNET_RUNLOCK();
+ return (ENOENT);
+ }
+
+ if (ifgr->ifgr_len == 0) {
+ TAILQ_FOREACH(ifgm, &ifg->ifg_members, ifgm_next)
+ ifgr->ifgr_len += sizeof(ifgrq);
+ IFNET_RUNLOCK();
+ return (0);
+ }
+
+ len = ifgr->ifgr_len;
+ ifgp = ifgr->ifgr_groups;
+ TAILQ_FOREACH(ifgm, &ifg->ifg_members, ifgm_next) {
+ if (len < sizeof(ifgrq)) {
+ IFNET_RUNLOCK();
+ return (EINVAL);
+ }
+ bzero(&ifgrq, sizeof ifgrq);
+ strlcpy(ifgrq.ifgrq_member, ifgm->ifgm_ifp->if_xname,
+ sizeof(ifgrq.ifgrq_member));
+ if ((error = copyout(&ifgrq, ifgp, sizeof(struct ifg_req)))) {
+ IFNET_RUNLOCK();
+ return (error);
+ }
+ len -= sizeof(ifgrq);
+ ifgp++;
+ }
+ IFNET_RUNLOCK();
+
+ return (0);
+}
+
+/*
+ * Delete Routes for a Network Interface
+ *
+ * Called for each routing entry via the rnh->rnh_walktree() call above
+ * to delete all route entries referencing a detaching network interface.
+ *
+ * Arguments:
+ * rn pointer to node in the routing table
+ * arg argument passed to rnh->rnh_walktree() - detaching interface
+ *
+ * Returns:
+ * 0 successful
+ * errno failed - reason indicated
+ *
+ */
+static int
+if_rtdel(struct radix_node *rn, void *arg)
+{
+ struct rtentry *rt = (struct rtentry *)rn;
+ struct ifnet *ifp = arg;
+ int err;
+
+ if (rt->rt_ifp == ifp) {
+
+ /*
+ * Protect (sorta) against walktree recursion problems
+ * with cloned routes
+ */
+ if ((rt->rt_flags & RTF_UP) == 0)
+ return (0);
+
+ err = rtrequest_fib(RTM_DELETE, rt_key(rt), rt->rt_gateway,
+ rt_mask(rt), rt->rt_flags|RTF_RNH_LOCKED,
+ (struct rtentry **) NULL, rt->rt_fibnum);
+ if (err) {
+ log(LOG_WARNING, "if_rtdel: error %d\n", err);
+ }
+ }
+
+ return (0);
+}
+
+/*
+ * Wrapper functions for struct ifnet address list locking macros. These are
+ * used by kernel modules to avoid encoding programming interface or binary
+ * interface assumptions that may be violated when kernel-internal locking
+ * approaches change.
+ */
+void
+if_addr_rlock(struct ifnet *ifp)
+{
+
+ IF_ADDR_LOCK(ifp);
+}
+
+void
+if_addr_runlock(struct ifnet *ifp)
+{
+
+ IF_ADDR_UNLOCK(ifp);
+}
+
+void
+if_maddr_rlock(struct ifnet *ifp)
+{
+
+ IF_ADDR_LOCK(ifp);
+}
+
+void
+if_maddr_runlock(struct ifnet *ifp)
+{
+
+ IF_ADDR_UNLOCK(ifp);
+}
+
+/*
+ * Reference count functions for ifaddrs.
+ */
+void
+ifa_init(struct ifaddr *ifa)
+{
+
+ mtx_init(&ifa->ifa_mtx, "ifaddr", NULL, MTX_DEF);
+ refcount_init(&ifa->ifa_refcnt, 1);
+}
+
+void
+ifa_ref(struct ifaddr *ifa)
+{
+
+ refcount_acquire(&ifa->ifa_refcnt);
+}
+
+void
+ifa_free(struct ifaddr *ifa)
+{
+
+ if (refcount_release(&ifa->ifa_refcnt)) {
+ mtx_destroy(&ifa->ifa_mtx);
+ free(ifa, M_IFADDR);
+ }
+}
+
+int
+ifa_add_loopback_route(struct ifaddr *ifa, struct sockaddr *ia)
+{
+ int error = 0;
+ struct rtentry *rt = NULL;
+ struct rt_addrinfo info;
+ static struct sockaddr_dl null_sdl = {sizeof(null_sdl), AF_LINK};
+
+ bzero(&info, sizeof(info));
+ info.rti_ifp = V_loif;
+ info.rti_flags = ifa->ifa_flags | RTF_HOST | RTF_STATIC;
+ info.rti_info[RTAX_DST] = ia;
+ info.rti_info[RTAX_GATEWAY] = (struct sockaddr *)&null_sdl;
+ error = rtrequest1_fib(RTM_ADD, &info, &rt, 0);
+
+ if (error == 0 && rt != NULL) {
+ RT_LOCK(rt);
+ ((struct sockaddr_dl *)rt->rt_gateway)->sdl_type =
+ ifa->ifa_ifp->if_type;
+ ((struct sockaddr_dl *)rt->rt_gateway)->sdl_index =
+ ifa->ifa_ifp->if_index;
+ RT_REMREF(rt);
+ RT_UNLOCK(rt);
+ } else if (error != 0)
+ log(LOG_INFO, "ifa_add_loopback_route: insertion failed\n");
+
+ return (error);
+}
+
+int
+ifa_del_loopback_route(struct ifaddr *ifa, struct sockaddr *ia)
+{
+ int error = 0;
+ struct rt_addrinfo info;
+ struct sockaddr_dl null_sdl;
+
+ bzero(&null_sdl, sizeof(null_sdl));
+ null_sdl.sdl_len = sizeof(null_sdl);
+ null_sdl.sdl_family = AF_LINK;
+ null_sdl.sdl_type = ifa->ifa_ifp->if_type;
+ null_sdl.sdl_index = ifa->ifa_ifp->if_index;
+ bzero(&info, sizeof(info));
+ info.rti_flags = ifa->ifa_flags | RTF_HOST | RTF_STATIC;
+ info.rti_info[RTAX_DST] = ia;
+ info.rti_info[RTAX_GATEWAY] = (struct sockaddr *)&null_sdl;
+ error = rtrequest1_fib(RTM_DELETE, &info, NULL, 0);
+
+ if (error != 0)
+ log(LOG_INFO, "ifa_del_loopback_route: deletion failed\n");
+
+ return (error);
+}
+
+/*
+ * XXX: Because sockaddr_dl has deeper structure than the sockaddr
+ * structs used to represent other address families, it is necessary
+ * to perform a different comparison.
+ */
+
+#define sa_equal(a1, a2) \
+ (bcmp((a1), (a2), ((a1))->sa_len) == 0)
+
+#define sa_dl_equal(a1, a2) \
+ ((((struct sockaddr_dl *)(a1))->sdl_len == \
+ ((struct sockaddr_dl *)(a2))->sdl_len) && \
+ (bcmp(LLADDR((struct sockaddr_dl *)(a1)), \
+ LLADDR((struct sockaddr_dl *)(a2)), \
+ ((struct sockaddr_dl *)(a1))->sdl_alen) == 0))
+
+/*
+ * Locate an interface based on a complete address.
+ */
+/*ARGSUSED*/
+static struct ifaddr *
+ifa_ifwithaddr_internal(struct sockaddr *addr, int getref)
+{
+ struct ifnet *ifp;
+ struct ifaddr *ifa;
+
+ IFNET_RLOCK_NOSLEEP();
+ TAILQ_FOREACH(ifp, &V_ifnet, if_link) {
+ IF_ADDR_LOCK(ifp);
+ TAILQ_FOREACH(ifa, &ifp->if_addrhead, ifa_link) {
+ if (ifa->ifa_addr->sa_family != addr->sa_family)
+ continue;
+ if (sa_equal(addr, ifa->ifa_addr)) {
+ if (getref)
+ ifa_ref(ifa);
+ IF_ADDR_UNLOCK(ifp);
+ goto done;
+ }
+ /* IP6 doesn't have broadcast */
+ if ((ifp->if_flags & IFF_BROADCAST) &&
+ ifa->ifa_broadaddr &&
+ ifa->ifa_broadaddr->sa_len != 0 &&
+ sa_equal(ifa->ifa_broadaddr, addr)) {
+ if (getref)
+ ifa_ref(ifa);
+ IF_ADDR_UNLOCK(ifp);
+ goto done;
+ }
+ }
+ IF_ADDR_UNLOCK(ifp);
+ }
+ ifa = NULL;
+done:
+ IFNET_RUNLOCK_NOSLEEP();
+ return (ifa);
+}
+
+struct ifaddr *
+ifa_ifwithaddr(struct sockaddr *addr)
+{
+
+ return (ifa_ifwithaddr_internal(addr, 1));
+}
+
+int
+ifa_ifwithaddr_check(struct sockaddr *addr)
+{
+
+ return (ifa_ifwithaddr_internal(addr, 0) != NULL);
+}
+
+/*
+ * Locate an interface based on the broadcast address.
+ */
+/* ARGSUSED */
+struct ifaddr *
+ifa_ifwithbroadaddr(struct sockaddr *addr)
+{
+ struct ifnet *ifp;
+ struct ifaddr *ifa;
+
+ IFNET_RLOCK_NOSLEEP();
+ TAILQ_FOREACH(ifp, &V_ifnet, if_link) {
+ IF_ADDR_LOCK(ifp);
+ TAILQ_FOREACH(ifa, &ifp->if_addrhead, ifa_link) {
+ if (ifa->ifa_addr->sa_family != addr->sa_family)
+ continue;
+ if ((ifp->if_flags & IFF_BROADCAST) &&
+ ifa->ifa_broadaddr &&
+ ifa->ifa_broadaddr->sa_len != 0 &&
+ sa_equal(ifa->ifa_broadaddr, addr)) {
+ ifa_ref(ifa);
+ IF_ADDR_UNLOCK(ifp);
+ goto done;
+ }
+ }
+ IF_ADDR_UNLOCK(ifp);
+ }
+ ifa = NULL;
+done:
+ IFNET_RUNLOCK_NOSLEEP();
+ return (ifa);
+}
+
+/*
+ * Locate the point to point interface with a given destination address.
+ */
+/*ARGSUSED*/
+struct ifaddr *
+ifa_ifwithdstaddr(struct sockaddr *addr)
+{
+ struct ifnet *ifp;
+ struct ifaddr *ifa;
+
+ IFNET_RLOCK_NOSLEEP();
+ TAILQ_FOREACH(ifp, &V_ifnet, if_link) {
+ if ((ifp->if_flags & IFF_POINTOPOINT) == 0)
+ continue;
+ IF_ADDR_LOCK(ifp);
+ TAILQ_FOREACH(ifa, &ifp->if_addrhead, ifa_link) {
+ if (ifa->ifa_addr->sa_family != addr->sa_family)
+ continue;
+ if (ifa->ifa_dstaddr != NULL &&
+ sa_equal(addr, ifa->ifa_dstaddr)) {
+ ifa_ref(ifa);
+ IF_ADDR_UNLOCK(ifp);
+ goto done;
+ }
+ }
+ IF_ADDR_UNLOCK(ifp);
+ }
+ ifa = NULL;
+done:
+ IFNET_RUNLOCK_NOSLEEP();
+ return (ifa);
+}
+
+/*
+ * Find an interface on a specific network. If many, choice
+ * is most specific found.
+ */
+struct ifaddr *
+ifa_ifwithnet(struct sockaddr *addr, int ignore_ptp)
+{
+ struct ifnet *ifp;
+ struct ifaddr *ifa;
+ struct ifaddr *ifa_maybe = NULL;
+ u_int af = addr->sa_family;
+ char *addr_data = addr->sa_data, *cplim;
+
+ /*
+ * AF_LINK addresses can be looked up directly by their index number,
+ * so do that if we can.
+ */
+ if (af == AF_LINK) {
+ struct sockaddr_dl *sdl = (struct sockaddr_dl *)addr;
+ if (sdl->sdl_index && sdl->sdl_index <= V_if_index)
+ return (ifaddr_byindex(sdl->sdl_index));
+ }
+
+ /*
+ * Scan though each interface, looking for ones that have addresses
+ * in this address family. Maintain a reference on ifa_maybe once
+ * we find one, as we release the IF_ADDR_LOCK() that kept it stable
+ * when we move onto the next interface.
+ */
+ IFNET_RLOCK_NOSLEEP();
+ TAILQ_FOREACH(ifp, &V_ifnet, if_link) {
+ IF_ADDR_LOCK(ifp);
+ TAILQ_FOREACH(ifa, &ifp->if_addrhead, ifa_link) {
+ char *cp, *cp2, *cp3;
+
+ if (ifa->ifa_addr->sa_family != af)
+next: continue;
+ if (af == AF_INET &&
+ ifp->if_flags & IFF_POINTOPOINT && !ignore_ptp) {
+ /*
+ * This is a bit broken as it doesn't
+ * take into account that the remote end may
+ * be a single node in the network we are
+ * looking for.
+ * The trouble is that we don't know the
+ * netmask for the remote end.
+ */
+ if (ifa->ifa_dstaddr != NULL &&
+ sa_equal(addr, ifa->ifa_dstaddr)) {
+ ifa_ref(ifa);
+ IF_ADDR_UNLOCK(ifp);
+ goto done;
+ }
+ } else {
+ /*
+ * if we have a special address handler,
+ * then use it instead of the generic one.
+ */
+ if (ifa->ifa_claim_addr) {
+ if ((*ifa->ifa_claim_addr)(ifa, addr)) {
+ ifa_ref(ifa);
+ IF_ADDR_UNLOCK(ifp);
+ goto done;
+ }
+ continue;
+ }
+
+ /*
+ * Scan all the bits in the ifa's address.
+ * If a bit dissagrees with what we are
+ * looking for, mask it with the netmask
+ * to see if it really matters.
+ * (A byte at a time)
+ */
+ if (ifa->ifa_netmask == 0)
+ continue;
+ cp = addr_data;
+ cp2 = ifa->ifa_addr->sa_data;
+ cp3 = ifa->ifa_netmask->sa_data;
+ cplim = ifa->ifa_netmask->sa_len
+ + (char *)ifa->ifa_netmask;
+ while (cp3 < cplim)
+ if ((*cp++ ^ *cp2++) & *cp3++)
+ goto next; /* next address! */
+ /*
+ * If the netmask of what we just found
+ * is more specific than what we had before
+ * (if we had one) then remember the new one
+ * before continuing to search
+ * for an even better one.
+ */
+ if (ifa_maybe == NULL ||
+ rn_refines((caddr_t)ifa->ifa_netmask,
+ (caddr_t)ifa_maybe->ifa_netmask)) {
+ if (ifa_maybe != NULL)
+ ifa_free(ifa_maybe);
+ ifa_maybe = ifa;
+ ifa_ref(ifa_maybe);
+ }
+ }
+ }
+ IF_ADDR_UNLOCK(ifp);
+ }
+ ifa = ifa_maybe;
+ ifa_maybe = NULL;
+done:
+ IFNET_RUNLOCK_NOSLEEP();
+ if (ifa_maybe != NULL)
+ ifa_free(ifa_maybe);
+ return (ifa);
+}
+
+/*
+ * Find an interface address specific to an interface best matching
+ * a given address.
+ */
+struct ifaddr *
+ifaof_ifpforaddr(struct sockaddr *addr, struct ifnet *ifp)
+{
+ struct ifaddr *ifa;
+ char *cp, *cp2, *cp3;
+ char *cplim;
+ struct ifaddr *ifa_maybe = NULL;
+ u_int af = addr->sa_family;
+
+ if (af >= AF_MAX)
+ return (NULL);
+ IF_ADDR_LOCK(ifp);
+ TAILQ_FOREACH(ifa, &ifp->if_addrhead, ifa_link) {
+ if (ifa->ifa_addr->sa_family != af)
+ continue;
+ if (ifa_maybe == NULL)
+ ifa_maybe = ifa;
+ if (ifa->ifa_netmask == 0) {
+ if (sa_equal(addr, ifa->ifa_addr) ||
+ (ifa->ifa_dstaddr &&
+ sa_equal(addr, ifa->ifa_dstaddr)))
+ goto done;
+ continue;
+ }
+ if (ifp->if_flags & IFF_POINTOPOINT) {
+ if (sa_equal(addr, ifa->ifa_dstaddr))
+ goto done;
+ } else {
+ cp = addr->sa_data;
+ cp2 = ifa->ifa_addr->sa_data;
+ cp3 = ifa->ifa_netmask->sa_data;
+ cplim = ifa->ifa_netmask->sa_len + (char *)ifa->ifa_netmask;
+ for (; cp3 < cplim; cp3++)
+ if ((*cp++ ^ *cp2++) & *cp3)
+ break;
+ if (cp3 == cplim)
+ goto done;
+ }
+ }
+ ifa = ifa_maybe;
+done:
+ if (ifa != NULL)
+ ifa_ref(ifa);
+ IF_ADDR_UNLOCK(ifp);
+ return (ifa);
+}
+
+#include <rtems/freebsd/net/if_llatbl.h>
+
+/*
+ * Default action when installing a route with a Link Level gateway.
+ * Lookup an appropriate real ifa to point to.
+ * This should be moved to /sys/net/link.c eventually.
+ */
+static void
+link_rtrequest(int cmd, struct rtentry *rt, struct rt_addrinfo *info)
+{
+ struct ifaddr *ifa, *oifa;
+ struct sockaddr *dst;
+ struct ifnet *ifp;
+
+ RT_LOCK_ASSERT(rt);
+
+ if (cmd != RTM_ADD || ((ifa = rt->rt_ifa) == 0) ||
+ ((ifp = ifa->ifa_ifp) == 0) || ((dst = rt_key(rt)) == 0))
+ return;
+ ifa = ifaof_ifpforaddr(dst, ifp);
+ if (ifa) {
+ oifa = rt->rt_ifa;
+ rt->rt_ifa = ifa;
+ ifa_free(oifa);
+ if (ifa->ifa_rtrequest && ifa->ifa_rtrequest != link_rtrequest)
+ ifa->ifa_rtrequest(cmd, rt, info);
+ }
+}
+
+/*
+ * Mark an interface down and notify protocols of
+ * the transition.
+ * NOTE: must be called at splnet or eqivalent.
+ */
+static void
+if_unroute(struct ifnet *ifp, int flag, int fam)
+{
+ struct ifaddr *ifa;
+
+ KASSERT(flag == IFF_UP, ("if_unroute: flag != IFF_UP"));
+
+ ifp->if_flags &= ~flag;
+ getmicrotime(&ifp->if_lastchange);
+ TAILQ_FOREACH(ifa, &ifp->if_addrhead, ifa_link)
+ if (fam == PF_UNSPEC || (fam == ifa->ifa_addr->sa_family))
+ pfctlinput(PRC_IFDOWN, ifa->ifa_addr);
+ ifp->if_qflush(ifp);
+
+ if (ifp->if_carp)
+ (*carp_linkstate_p)(ifp);
+ rt_ifmsg(ifp);
+}
+
+/*
+ * Mark an interface up and notify protocols of
+ * the transition.
+ * NOTE: must be called at splnet or eqivalent.
+ */
+static void
+if_route(struct ifnet *ifp, int flag, int fam)
+{
+ struct ifaddr *ifa;
+
+ KASSERT(flag == IFF_UP, ("if_route: flag != IFF_UP"));
+
+ ifp->if_flags |= flag;
+ getmicrotime(&ifp->if_lastchange);
+ TAILQ_FOREACH(ifa, &ifp->if_addrhead, ifa_link)
+ if (fam == PF_UNSPEC || (fam == ifa->ifa_addr->sa_family))
+ pfctlinput(PRC_IFUP, ifa->ifa_addr);
+ if (ifp->if_carp)
+ (*carp_linkstate_p)(ifp);
+ rt_ifmsg(ifp);
+#ifdef INET6
+ in6_if_up(ifp);
+#endif
+}
+
+void (*vlan_link_state_p)(struct ifnet *, int); /* XXX: private from if_vlan */
+void (*vlan_trunk_cap_p)(struct ifnet *); /* XXX: private from if_vlan */
+
+/*
+ * Handle a change in the interface link state. To avoid LORs
+ * between driver lock and upper layer locks, as well as possible
+ * recursions, we post event to taskqueue, and all job
+ * is done in static do_link_state_change().
+ */
+void
+if_link_state_change(struct ifnet *ifp, int link_state)
+{
+ /* Return if state hasn't changed. */
+ if (ifp->if_link_state == link_state)
+ return;
+
+ ifp->if_link_state = link_state;
+
+ taskqueue_enqueue(taskqueue_swi, &ifp->if_linktask);
+}
+
+static void
+do_link_state_change(void *arg, int pending)
+{
+ struct ifnet *ifp = (struct ifnet *)arg;
+ int link_state = ifp->if_link_state;
+ CURVNET_SET(ifp->if_vnet);
+
+ /* Notify that the link state has changed. */
+ rt_ifmsg(ifp);
+ if (ifp->if_vlantrunk != NULL)
+ (*vlan_link_state_p)(ifp, 0);
+
+ if ((ifp->if_type == IFT_ETHER || ifp->if_type == IFT_L2VLAN) &&
+ IFP2AC(ifp)->ac_netgraph != NULL)
+ (*ng_ether_link_state_p)(ifp, link_state);
+ if (ifp->if_carp)
+ (*carp_linkstate_p)(ifp);
+ if (ifp->if_bridge) {
+ KASSERT(bstp_linkstate_p != NULL,("if_bridge bstp not loaded!"));
+ (*bstp_linkstate_p)(ifp, link_state);
+ }
+ if (ifp->if_lagg) {
+ KASSERT(lagg_linkstate_p != NULL,("if_lagg not loaded!"));
+ (*lagg_linkstate_p)(ifp, link_state);
+ }
+
+ if (IS_DEFAULT_VNET(curvnet))
+ devctl_notify("IFNET", ifp->if_xname,
+ (link_state == LINK_STATE_UP) ? "LINK_UP" : "LINK_DOWN",
+ NULL);
+ if (pending > 1)
+ if_printf(ifp, "%d link states coalesced\n", pending);
+ if (log_link_state_change)
+ log(LOG_NOTICE, "%s: link state changed to %s\n", ifp->if_xname,
+ (link_state == LINK_STATE_UP) ? "UP" : "DOWN" );
+ CURVNET_RESTORE();
+}
+
+/*
+ * Mark an interface down and notify protocols of
+ * the transition.
+ * NOTE: must be called at splnet or eqivalent.
+ */
+void
+if_down(struct ifnet *ifp)
+{
+
+ if_unroute(ifp, IFF_UP, AF_UNSPEC);
+}
+
+/*
+ * Mark an interface up and notify protocols of
+ * the transition.
+ * NOTE: must be called at splnet or eqivalent.
+ */
+void
+if_up(struct ifnet *ifp)
+{
+
+ if_route(ifp, IFF_UP, AF_UNSPEC);
+}
+
+/*
+ * Flush an interface queue.
+ */
+void
+if_qflush(struct ifnet *ifp)
+{
+ struct mbuf *m, *n;
+ struct ifaltq *ifq;
+
+ ifq = &ifp->if_snd;
+ IFQ_LOCK(ifq);
+#ifdef ALTQ
+ if (ALTQ_IS_ENABLED(ifq))
+ ALTQ_PURGE(ifq);
+#endif
+ n = ifq->ifq_head;
+ while ((m = n) != 0) {
+ n = m->m_act;
+ m_freem(m);
+ }
+ ifq->ifq_head = 0;
+ ifq->ifq_tail = 0;
+ ifq->ifq_len = 0;
+ IFQ_UNLOCK(ifq);
+}
+
+/*
+ * Handle interface watchdog timer routines. Called
+ * from softclock, we decrement timers (if set) and
+ * call the appropriate interface routine on expiration.
+ *
+ * XXXRW: Note that because timeouts run with Giant, if_watchdog() is called
+ * holding Giant.
+ */
+static void
+if_slowtimo(void *arg)
+{
+ VNET_ITERATOR_DECL(vnet_iter);
+ struct ifnet *ifp;
+ int s = splimp();
+
+ VNET_LIST_RLOCK_NOSLEEP();
+ IFNET_RLOCK_NOSLEEP();
+ VNET_FOREACH(vnet_iter) {
+ CURVNET_SET(vnet_iter);
+ TAILQ_FOREACH(ifp, &V_ifnet, if_link) {
+ if (ifp->if_timer == 0 || --ifp->if_timer)
+ continue;
+ if (ifp->if_watchdog)
+ (*ifp->if_watchdog)(ifp);
+ }
+ CURVNET_RESTORE();
+ }
+ IFNET_RUNLOCK_NOSLEEP();
+ VNET_LIST_RUNLOCK_NOSLEEP();
+ splx(s);
+ timeout(if_slowtimo, (void *)0, hz / IFNET_SLOWHZ);
+}
+
+/*
+ * Map interface name to interface structure pointer, with or without
+ * returning a reference.
+ */
+struct ifnet *
+ifunit_ref(const char *name)
+{
+ struct ifnet *ifp;
+
+ IFNET_RLOCK_NOSLEEP();
+ TAILQ_FOREACH(ifp, &V_ifnet, if_link) {
+ if (strncmp(name, ifp->if_xname, IFNAMSIZ) == 0 &&
+ !(ifp->if_flags & IFF_DYING))
+ break;
+ }
+ if (ifp != NULL)
+ if_ref(ifp);
+ IFNET_RUNLOCK_NOSLEEP();
+ return (ifp);
+}
+
+struct ifnet *
+ifunit(const char *name)
+{
+ struct ifnet *ifp;
+
+ IFNET_RLOCK_NOSLEEP();
+ TAILQ_FOREACH(ifp, &V_ifnet, if_link) {
+ if (strncmp(name, ifp->if_xname, IFNAMSIZ) == 0)
+ break;
+ }
+ IFNET_RUNLOCK_NOSLEEP();
+ return (ifp);
+}
+
+/*
+ * Hardware specific interface ioctls.
+ */
+static int
+ifhwioctl(u_long cmd, struct ifnet *ifp, caddr_t data, struct thread *td)
+{
+ struct ifreq *ifr;
+ struct ifstat *ifs;
+ int error = 0;
+ int new_flags, temp_flags;
+ size_t namelen, onamelen;
+ size_t descrlen;
+ char *descrbuf, *odescrbuf;
+ char new_name[IFNAMSIZ];
+ struct ifaddr *ifa;
+ struct sockaddr_dl *sdl;
+
+ ifr = (struct ifreq *)data;
+ switch (cmd) {
+ case SIOCGIFINDEX:
+ ifr->ifr_index = ifp->if_index;
+ break;
+
+ case SIOCGIFFLAGS:
+ temp_flags = ifp->if_flags | ifp->if_drv_flags;
+ ifr->ifr_flags = temp_flags & 0xffff;
+ ifr->ifr_flagshigh = temp_flags >> 16;
+ break;
+
+ case SIOCGIFCAP:
+ ifr->ifr_reqcap = ifp->if_capabilities;
+ ifr->ifr_curcap = ifp->if_capenable;
+ break;
+
+#ifdef MAC
+ case SIOCGIFMAC:
+ error = mac_ifnet_ioctl_get(td->td_ucred, ifr, ifp);
+ break;
+#endif
+
+ case SIOCGIFMETRIC:
+ ifr->ifr_metric = ifp->if_metric;
+ break;
+
+ case SIOCGIFMTU:
+ ifr->ifr_mtu = ifp->if_mtu;
+ break;
+
+ case SIOCGIFPHYS:
+ ifr->ifr_phys = ifp->if_physical;
+ break;
+
+ case SIOCGIFDESCR:
+ error = 0;
+ sx_slock(&ifdescr_sx);
+ if (ifp->if_description == NULL)
+ error = ENOMSG;
+ else {
+ /* space for terminating nul */
+ descrlen = strlen(ifp->if_description) + 1;
+ if (ifr->ifr_buffer.length < descrlen)
+ ifr->ifr_buffer.buffer = NULL;
+ else
+ error = copyout(ifp->if_description,
+ ifr->ifr_buffer.buffer, descrlen);
+ ifr->ifr_buffer.length = descrlen;
+ }
+ sx_sunlock(&ifdescr_sx);
+ break;
+
+ case SIOCSIFDESCR:
+ error = priv_check(td, PRIV_NET_SETIFDESCR);
+ if (error)
+ return (error);
+
+ /*
+ * Copy only (length-1) bytes to make sure that
+ * if_description is always nul terminated. The
+ * length parameter is supposed to count the
+ * terminating nul in.
+ */
+ if (ifr->ifr_buffer.length > ifdescr_maxlen)
+ return (ENAMETOOLONG);
+ else if (ifr->ifr_buffer.length == 0)
+ descrbuf = NULL;
+ else {
+ descrbuf = malloc(ifr->ifr_buffer.length, M_IFDESCR,
+ M_WAITOK | M_ZERO);
+ error = copyin(ifr->ifr_buffer.buffer, descrbuf,
+ ifr->ifr_buffer.length - 1);
+ if (error) {
+ free(descrbuf, M_IFDESCR);
+ break;
+ }
+ }
+
+ sx_xlock(&ifdescr_sx);
+ odescrbuf = ifp->if_description;
+ ifp->if_description = descrbuf;
+ sx_xunlock(&ifdescr_sx);
+
+ getmicrotime(&ifp->if_lastchange);
+ free(odescrbuf, M_IFDESCR);
+ break;
+
+ case SIOCSIFFLAGS:
+ error = priv_check(td, PRIV_NET_SETIFFLAGS);
+ if (error)
+ return (error);
+ /*
+ * Currently, no driver owned flags pass the IFF_CANTCHANGE
+ * check, so we don't need special handling here yet.
+ */
+ new_flags = (ifr->ifr_flags & 0xffff) |
+ (ifr->ifr_flagshigh << 16);
+ if (ifp->if_flags & IFF_SMART) {
+ /* Smart drivers twiddle their own routes */
+ } else if (ifp->if_flags & IFF_UP &&
+ (new_flags & IFF_UP) == 0) {
+ int s = splimp();
+ if_down(ifp);
+ splx(s);
+ } else if (new_flags & IFF_UP &&
+ (ifp->if_flags & IFF_UP) == 0) {
+ int s = splimp();
+ if_up(ifp);
+ splx(s);
+ }
+ /* See if permanently promiscuous mode bit is about to flip */
+ if ((ifp->if_flags ^ new_flags) & IFF_PPROMISC) {
+ if (new_flags & IFF_PPROMISC)
+ ifp->if_flags |= IFF_PROMISC;
+ else if (ifp->if_pcount == 0)
+ ifp->if_flags &= ~IFF_PROMISC;
+ log(LOG_INFO, "%s: permanently promiscuous mode %s\n",
+ ifp->if_xname,
+ (new_flags & IFF_PPROMISC) ? "enabled" : "disabled");
+ }
+ ifp->if_flags = (ifp->if_flags & IFF_CANTCHANGE) |
+ (new_flags &~ IFF_CANTCHANGE);
+ if (ifp->if_ioctl) {
+ (void) (*ifp->if_ioctl)(ifp, cmd, data);
+ }
+ getmicrotime(&ifp->if_lastchange);
+ break;
+
+ case SIOCSIFCAP:
+ error = priv_check(td, PRIV_NET_SETIFCAP);
+ if (error)
+ return (error);
+ if (ifp->if_ioctl == NULL)
+ return (EOPNOTSUPP);
+ if (ifr->ifr_reqcap & ~ifp->if_capabilities)
+ return (EINVAL);
+ error = (*ifp->if_ioctl)(ifp, cmd, data);
+ if (error == 0)
+ getmicrotime(&ifp->if_lastchange);
+ break;
+
+#ifdef MAC
+ case SIOCSIFMAC:
+ error = mac_ifnet_ioctl_set(td->td_ucred, ifr, ifp);
+ break;
+#endif
+
+ case SIOCSIFNAME:
+ error = priv_check(td, PRIV_NET_SETIFNAME);
+ if (error)
+ return (error);
+ error = copyinstr(ifr->ifr_data, new_name, IFNAMSIZ, NULL);
+ if (error != 0)
+ return (error);
+ if (new_name[0] == '\0')
+ return (EINVAL);
+ if (ifunit(new_name) != NULL)
+ return (EEXIST);
+
+ /*
+ * XXX: Locking. Nothing else seems to lock if_flags,
+ * and there are numerous other races with the
+ * ifunit() checks not being atomic with namespace
+ * changes (renames, vmoves, if_attach, etc).
+ */
+ ifp->if_flags |= IFF_RENAMING;
+
+ /* Announce the departure of the interface. */
+ rt_ifannouncemsg(ifp, IFAN_DEPARTURE);
+ EVENTHANDLER_INVOKE(ifnet_departure_event, ifp);
+
+ log(LOG_INFO, "%s: changing name to '%s'\n",
+ ifp->if_xname, new_name);
+
+ strlcpy(ifp->if_xname, new_name, sizeof(ifp->if_xname));
+ ifa = ifp->if_addr;
+ IFA_LOCK(ifa);
+ sdl = (struct sockaddr_dl *)ifa->ifa_addr;
+ namelen = strlen(new_name);
+ onamelen = sdl->sdl_nlen;
+ /*
+ * Move the address if needed. This is safe because we
+ * allocate space for a name of length IFNAMSIZ when we
+ * create this in if_attach().
+ */
+ if (namelen != onamelen) {
+ bcopy(sdl->sdl_data + onamelen,
+ sdl->sdl_data + namelen, sdl->sdl_alen);
+ }
+ bcopy(new_name, sdl->sdl_data, namelen);
+ sdl->sdl_nlen = namelen;
+ sdl = (struct sockaddr_dl *)ifa->ifa_netmask;
+ bzero(sdl->sdl_data, onamelen);
+ while (namelen != 0)
+ sdl->sdl_data[--namelen] = 0xff;
+ IFA_UNLOCK(ifa);
+
+ EVENTHANDLER_INVOKE(ifnet_arrival_event, ifp);
+ /* Announce the return of the interface. */
+ rt_ifannouncemsg(ifp, IFAN_ARRIVAL);
+
+ ifp->if_flags &= ~IFF_RENAMING;
+ break;
+
+#ifdef VIMAGE
+ case SIOCSIFVNET:
+ error = priv_check(td, PRIV_NET_SETIFVNET);
+ if (error)
+ return (error);
+ error = if_vmove_loan(td, ifp, ifr->ifr_name, ifr->ifr_jid);
+ break;
+#endif
+
+ case SIOCSIFMETRIC:
+ error = priv_check(td, PRIV_NET_SETIFMETRIC);
+ if (error)
+ return (error);
+ ifp->if_metric = ifr->ifr_metric;
+ getmicrotime(&ifp->if_lastchange);
+ break;
+
+ case SIOCSIFPHYS:
+ error = priv_check(td, PRIV_NET_SETIFPHYS);
+ if (error)
+ return (error);
+ if (ifp->if_ioctl == NULL)
+ return (EOPNOTSUPP);
+ error = (*ifp->if_ioctl)(ifp, cmd, data);
+ if (error == 0)
+ getmicrotime(&ifp->if_lastchange);
+ break;
+
+ case SIOCSIFMTU:
+ {
+ u_long oldmtu = ifp->if_mtu;
+
+ error = priv_check(td, PRIV_NET_SETIFMTU);
+ if (error)
+ return (error);
+ if (ifr->ifr_mtu < IF_MINMTU || ifr->ifr_mtu > IF_MAXMTU)
+ return (EINVAL);
+ if (ifp->if_ioctl == NULL)
+ return (EOPNOTSUPP);
+ error = (*ifp->if_ioctl)(ifp, cmd, data);
+ if (error == 0) {
+ getmicrotime(&ifp->if_lastchange);
+ rt_ifmsg(ifp);
+ }
+ /*
+ * If the link MTU changed, do network layer specific procedure.
+ */
+ if (ifp->if_mtu != oldmtu) {
+#ifdef INET6
+ nd6_setmtu(ifp);
+#endif
+ }
+ break;
+ }
+
+ case SIOCADDMULTI:
+ case SIOCDELMULTI:
+ if (cmd == SIOCADDMULTI)
+ error = priv_check(td, PRIV_NET_ADDMULTI);
+ else
+ error = priv_check(td, PRIV_NET_DELMULTI);
+ if (error)
+ return (error);
+
+ /* Don't allow group membership on non-multicast interfaces. */
+ if ((ifp->if_flags & IFF_MULTICAST) == 0)
+ return (EOPNOTSUPP);
+
+ /* Don't let users screw up protocols' entries. */
+ if (ifr->ifr_addr.sa_family != AF_LINK)
+ return (EINVAL);
+
+ if (cmd == SIOCADDMULTI) {
+ struct ifmultiaddr *ifma;
+
+ /*
+ * Userland is only permitted to join groups once
+ * via the if_addmulti() KPI, because it cannot hold
+ * struct ifmultiaddr * between calls. It may also
+ * lose a race while we check if the membership
+ * already exists.
+ */
+ IF_ADDR_LOCK(ifp);
+ ifma = if_findmulti(ifp, &ifr->ifr_addr);
+ IF_ADDR_UNLOCK(ifp);
+ if (ifma != NULL)
+ error = EADDRINUSE;
+ else
+ error = if_addmulti(ifp, &ifr->ifr_addr, &ifma);
+ } else {
+ error = if_delmulti(ifp, &ifr->ifr_addr);
+ }
+ if (error == 0)
+ getmicrotime(&ifp->if_lastchange);
+ break;
+
+ case SIOCSIFPHYADDR:
+ case SIOCDIFPHYADDR:
+#ifdef INET6
+ case SIOCSIFPHYADDR_IN6:
+#endif
+ case SIOCSLIFPHYADDR:
+ case SIOCSIFMEDIA:
+ case SIOCSIFGENERIC:
+ error = priv_check(td, PRIV_NET_HWIOCTL);
+ if (error)
+ return (error);
+ if (ifp->if_ioctl == NULL)
+ return (EOPNOTSUPP);
+ error = (*ifp->if_ioctl)(ifp, cmd, data);
+ if (error == 0)
+ getmicrotime(&ifp->if_lastchange);
+ break;
+
+ case SIOCGIFSTATUS:
+ ifs = (struct ifstat *)data;
+ ifs->ascii[0] = '\0';
+
+ case SIOCGIFPSRCADDR:
+ case SIOCGIFPDSTADDR:
+ case SIOCGLIFPHYADDR:
+ case SIOCGIFMEDIA:
+ case SIOCGIFGENERIC:
+ if (ifp->if_ioctl == NULL)
+ return (EOPNOTSUPP);
+ error = (*ifp->if_ioctl)(ifp, cmd, data);
+ break;
+
+ case SIOCSIFLLADDR:
+ error = priv_check(td, PRIV_NET_SETLLADDR);
+ if (error)
+ return (error);
+ error = if_setlladdr(ifp,
+ ifr->ifr_addr.sa_data, ifr->ifr_addr.sa_len);
+ EVENTHANDLER_INVOKE(iflladdr_event, ifp);
+ break;
+
+ case SIOCAIFGROUP:
+ {
+ struct ifgroupreq *ifgr = (struct ifgroupreq *)ifr;
+
+ error = priv_check(td, PRIV_NET_ADDIFGROUP);
+ if (error)
+ return (error);
+ if ((error = if_addgroup(ifp, ifgr->ifgr_group)))
+ return (error);
+ break;
+ }
+
+ case SIOCGIFGROUP:
+ if ((error = if_getgroup((struct ifgroupreq *)ifr, ifp)))
+ return (error);
+ break;
+
+ case SIOCDIFGROUP:
+ {
+ struct ifgroupreq *ifgr = (struct ifgroupreq *)ifr;
+
+ error = priv_check(td, PRIV_NET_DELIFGROUP);
+ if (error)
+ return (error);
+ if ((error = if_delgroup(ifp, ifgr->ifgr_group)))
+ return (error);
+ break;
+ }
+
+ default:
+ error = ENOIOCTL;
+ break;
+ }
+ return (error);
+}
+
+#ifdef COMPAT_FREEBSD32
+struct ifconf32 {
+ int32_t ifc_len;
+ union {
+ uint32_t ifcu_buf;
+ uint32_t ifcu_req;
+ } ifc_ifcu;
+};
+#define SIOCGIFCONF32 _IOWR('i', 36, struct ifconf32)
+#endif
+
+/*
+ * Interface ioctls.
+ */
+int
+ifioctl(struct socket *so, u_long cmd, caddr_t data, struct thread *td)
+{
+ struct ifnet *ifp;
+ struct ifreq *ifr;
+ int error;
+ int oif_flags;
+
+ switch (cmd) {
+ case SIOCGIFCONF:
+ case OSIOCGIFCONF:
+ return (ifconf(cmd, data));
+
+#ifdef COMPAT_FREEBSD32
+ case SIOCGIFCONF32:
+ {
+ struct ifconf32 *ifc32;
+ struct ifconf ifc;
+
+ ifc32 = (struct ifconf32 *)data;
+ ifc.ifc_len = ifc32->ifc_len;
+ ifc.ifc_buf = PTRIN(ifc32->ifc_buf);
+
+ return (ifconf(SIOCGIFCONF, (void *)&ifc));
+ }
+#endif
+ }
+ ifr = (struct ifreq *)data;
+
+ switch (cmd) {
+#ifdef VIMAGE
+ case SIOCSIFRVNET:
+ error = priv_check(td, PRIV_NET_SETIFVNET);
+ if (error)
+ return (error);
+ return (if_vmove_reclaim(td, ifr->ifr_name, ifr->ifr_jid));
+#endif
+ case SIOCIFCREATE:
+ case SIOCIFCREATE2:
+ error = priv_check(td, PRIV_NET_IFCREATE);
+ if (error)
+ return (error);
+ return (if_clone_create(ifr->ifr_name, sizeof(ifr->ifr_name),
+ cmd == SIOCIFCREATE2 ? ifr->ifr_data : NULL));
+ case SIOCIFDESTROY:
+ error = priv_check(td, PRIV_NET_IFDESTROY);
+ if (error)
+ return (error);
+ return if_clone_destroy(ifr->ifr_name);
+
+ case SIOCIFGCLONERS:
+ return (if_clone_list((struct if_clonereq *)data));
+ case SIOCGIFGMEMB:
+ return (if_getgroupmembers((struct ifgroupreq *)data));
+ }
+
+ ifp = ifunit_ref(ifr->ifr_name);
+ if (ifp == NULL)
+ return (ENXIO);
+
+ error = ifhwioctl(cmd, ifp, data, td);
+ if (error != ENOIOCTL) {
+ if_rele(ifp);
+ return (error);
+ }
+
+ oif_flags = ifp->if_flags;
+ if (so->so_proto == NULL) {
+ if_rele(ifp);
+ return (EOPNOTSUPP);
+ }
+#ifndef COMPAT_43
+ error = ((*so->so_proto->pr_usrreqs->pru_control)(so, cmd,
+ data,
+ ifp, td));
+ if (error == EOPNOTSUPP && ifp != NULL && ifp->if_ioctl != NULL)
+ error = (*ifp->if_ioctl)(ifp, cmd, data);
+#else
+ {
+ u_long ocmd = cmd;
+
+ switch (cmd) {
+
+ case SIOCSIFDSTADDR:
+ case SIOCSIFADDR:
+ case SIOCSIFBRDADDR:
+ case SIOCSIFNETMASK:
+#if BYTE_ORDER != BIG_ENDIAN
+ if (ifr->ifr_addr.sa_family == 0 &&
+ ifr->ifr_addr.sa_len < 16) {
+ ifr->ifr_addr.sa_family = ifr->ifr_addr.sa_len;
+ ifr->ifr_addr.sa_len = 16;
+ }
+#else
+ if (ifr->ifr_addr.sa_len == 0)
+ ifr->ifr_addr.sa_len = 16;
+#endif
+ break;
+
+ case OSIOCGIFADDR:
+ cmd = SIOCGIFADDR;
+ break;
+
+ case OSIOCGIFDSTADDR:
+ cmd = SIOCGIFDSTADDR;
+ break;
+
+ case OSIOCGIFBRDADDR:
+ cmd = SIOCGIFBRDADDR;
+ break;
+
+ case OSIOCGIFNETMASK:
+ cmd = SIOCGIFNETMASK;
+ }
+ error = ((*so->so_proto->pr_usrreqs->pru_control)(so,
+ cmd,
+ data,
+ ifp, td));
+ if (error == EOPNOTSUPP && ifp != NULL &&
+ ifp->if_ioctl != NULL)
+ error = (*ifp->if_ioctl)(ifp, cmd, data);
+ switch (ocmd) {
+
+ case OSIOCGIFADDR:
+ case OSIOCGIFDSTADDR:
+ case OSIOCGIFBRDADDR:
+ case OSIOCGIFNETMASK:
+ *(u_short *)&ifr->ifr_addr = ifr->ifr_addr.sa_family;
+
+ }
+ }
+#endif /* COMPAT_43 */
+
+ if ((oif_flags ^ ifp->if_flags) & IFF_UP) {
+#ifdef INET6
+ if (ifp->if_flags & IFF_UP) {
+ int s = splimp();
+ in6_if_up(ifp);
+ splx(s);
+ }
+#endif
+ }
+ if_rele(ifp);
+ return (error);
+}
+
+/*
+ * The code common to handling reference counted flags,
+ * e.g., in ifpromisc() and if_allmulti().
+ * The "pflag" argument can specify a permanent mode flag to check,
+ * such as IFF_PPROMISC for promiscuous mode; should be 0 if none.
+ *
+ * Only to be used on stack-owned flags, not driver-owned flags.
+ */
+static int
+if_setflag(struct ifnet *ifp, int flag, int pflag, int *refcount, int onswitch)
+{
+ struct ifreq ifr;
+ int error;
+ int oldflags, oldcount;
+
+ /* Sanity checks to catch programming errors */
+ KASSERT((flag & (IFF_DRV_OACTIVE|IFF_DRV_RUNNING)) == 0,
+ ("%s: setting driver-owned flag %d", __func__, flag));
+
+ if (onswitch)
+ KASSERT(*refcount >= 0,
+ ("%s: increment negative refcount %d for flag %d",
+ __func__, *refcount, flag));
+ else
+ KASSERT(*refcount > 0,
+ ("%s: decrement non-positive refcount %d for flag %d",
+ __func__, *refcount, flag));
+
+ /* In case this mode is permanent, just touch refcount */
+ if (ifp->if_flags & pflag) {
+ *refcount += onswitch ? 1 : -1;
+ return (0);
+ }
+
+ /* Save ifnet parameters for if_ioctl() may fail */
+ oldcount = *refcount;
+ oldflags = ifp->if_flags;
+
+ /*
+ * See if we aren't the only and touching refcount is enough.
+ * Actually toggle interface flag if we are the first or last.
+ */
+ if (onswitch) {
+ if ((*refcount)++)
+ return (0);
+ ifp->if_flags |= flag;
+ } else {
+ if (--(*refcount))
+ return (0);
+ ifp->if_flags &= ~flag;
+ }
+
+ /* Call down the driver since we've changed interface flags */
+ if (ifp->if_ioctl == NULL) {
+ error = EOPNOTSUPP;
+ goto recover;
+ }
+ ifr.ifr_flags = ifp->if_flags & 0xffff;
+ ifr.ifr_flagshigh = ifp->if_flags >> 16;
+ error = (*ifp->if_ioctl)(ifp, SIOCSIFFLAGS, (caddr_t)&ifr);
+ if (error)
+ goto recover;
+ /* Notify userland that interface flags have changed */
+ rt_ifmsg(ifp);
+ return (0);
+
+recover:
+ /* Recover after driver error */
+ *refcount = oldcount;
+ ifp->if_flags = oldflags;
+ return (error);
+}
+
+/*
+ * Set/clear promiscuous mode on interface ifp based on the truth value
+ * of pswitch. The calls are reference counted so that only the first
+ * "on" request actually has an effect, as does the final "off" request.
+ * Results are undefined if the "off" and "on" requests are not matched.
+ */
+int
+ifpromisc(struct ifnet *ifp, int pswitch)
+{
+ int error;
+ int oldflags = ifp->if_flags;
+
+ error = if_setflag(ifp, IFF_PROMISC, IFF_PPROMISC,
+ &ifp->if_pcount, pswitch);
+ /* If promiscuous mode status has changed, log a message */
+ if (error == 0 && ((ifp->if_flags ^ oldflags) & IFF_PROMISC))
+ log(LOG_INFO, "%s: promiscuous mode %s\n",
+ ifp->if_xname,
+ (ifp->if_flags & IFF_PROMISC) ? "enabled" : "disabled");
+ return (error);
+}
+
+/*
+ * Return interface configuration
+ * of system. List may be used
+ * in later ioctl's (above) to get
+ * other information.
+ */
+/*ARGSUSED*/
+static int
+ifconf(u_long cmd, caddr_t data)
+{
+ struct ifconf *ifc = (struct ifconf *)data;
+ struct ifnet *ifp;
+ struct ifaddr *ifa;
+ struct ifreq ifr;
+ struct sbuf *sb;
+ int error, full = 0, valid_len, max_len;
+
+ /* Limit initial buffer size to MAXPHYS to avoid DoS from userspace. */
+ max_len = MAXPHYS - 1;
+
+ /* Prevent hostile input from being able to crash the system */
+ if (ifc->ifc_len <= 0)
+ return (EINVAL);
+
+again:
+ if (ifc->ifc_len <= max_len) {
+ max_len = ifc->ifc_len;
+ full = 1;
+ }
+ sb = sbuf_new(NULL, NULL, max_len + 1, SBUF_FIXEDLEN);
+ max_len = 0;
+ valid_len = 0;
+
+ IFNET_RLOCK();
+ TAILQ_FOREACH(ifp, &V_ifnet, if_link) {
+ int addrs;
+
+ /*
+ * Zero the ifr_name buffer to make sure we don't
+ * disclose the contents of the stack.
+ */
+ memset(ifr.ifr_name, 0, sizeof(ifr.ifr_name));
+
+ if (strlcpy(ifr.ifr_name, ifp->if_xname, sizeof(ifr.ifr_name))
+ >= sizeof(ifr.ifr_name)) {
+ sbuf_delete(sb);
+ IFNET_RUNLOCK();
+ return (ENAMETOOLONG);
+ }
+
+ addrs = 0;
+ IF_ADDR_LOCK(ifp);
+ TAILQ_FOREACH(ifa, &ifp->if_addrhead, ifa_link) {
+ struct sockaddr *sa = ifa->ifa_addr;
+
+ if (prison_if(curthread->td_ucred, sa) != 0)
+ continue;
+ addrs++;
+#ifdef COMPAT_43
+ if (cmd == OSIOCGIFCONF) {
+ struct osockaddr *osa =
+ (struct osockaddr *)&ifr.ifr_addr;
+ ifr.ifr_addr = *sa;
+ osa->sa_family = sa->sa_family;
+ sbuf_bcat(sb, &ifr, sizeof(ifr));
+ max_len += sizeof(ifr);
+ } else
+#endif
+ if (sa->sa_len <= sizeof(*sa)) {
+ ifr.ifr_addr = *sa;
+ sbuf_bcat(sb, &ifr, sizeof(ifr));
+ max_len += sizeof(ifr);
+ } else {
+ sbuf_bcat(sb, &ifr,
+ offsetof(struct ifreq, ifr_addr));
+ max_len += offsetof(struct ifreq, ifr_addr);
+ sbuf_bcat(sb, sa, sa->sa_len);
+ max_len += sa->sa_len;
+ }
+
+ if (!sbuf_overflowed(sb))
+ valid_len = sbuf_len(sb);
+ }
+ IF_ADDR_UNLOCK(ifp);
+ if (addrs == 0) {
+ bzero((caddr_t)&ifr.ifr_addr, sizeof(ifr.ifr_addr));
+ sbuf_bcat(sb, &ifr, sizeof(ifr));
+ max_len += sizeof(ifr);
+
+ if (!sbuf_overflowed(sb))
+ valid_len = sbuf_len(sb);
+ }
+ }
+ IFNET_RUNLOCK();
+
+ /*
+ * If we didn't allocate enough space (uncommon), try again. If
+ * we have already allocated as much space as we are allowed,
+ * return what we've got.
+ */
+ if (valid_len != max_len && !full) {
+ sbuf_delete(sb);
+ goto again;
+ }
+
+ ifc->ifc_len = valid_len;
+ sbuf_finish(sb);
+ error = copyout(sbuf_data(sb), ifc->ifc_req, ifc->ifc_len);
+ sbuf_delete(sb);
+ return (error);
+}
+
+/*
+ * Just like ifpromisc(), but for all-multicast-reception mode.
+ */
+int
+if_allmulti(struct ifnet *ifp, int onswitch)
+{
+
+ return (if_setflag(ifp, IFF_ALLMULTI, 0, &ifp->if_amcount, onswitch));
+}
+
+struct ifmultiaddr *
+if_findmulti(struct ifnet *ifp, struct sockaddr *sa)
+{
+ struct ifmultiaddr *ifma;
+
+ IF_ADDR_LOCK_ASSERT(ifp);
+
+ TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
+ if (sa->sa_family == AF_LINK) {
+ if (sa_dl_equal(ifma->ifma_addr, sa))
+ break;
+ } else {
+ if (sa_equal(ifma->ifma_addr, sa))
+ break;
+ }
+ }
+
+ return ifma;
+}
+
+/*
+ * Allocate a new ifmultiaddr and initialize based on passed arguments. We
+ * make copies of passed sockaddrs. The ifmultiaddr will not be added to
+ * the ifnet multicast address list here, so the caller must do that and
+ * other setup work (such as notifying the device driver). The reference
+ * count is initialized to 1.
+ */
+static struct ifmultiaddr *
+if_allocmulti(struct ifnet *ifp, struct sockaddr *sa, struct sockaddr *llsa,
+ int mflags)
+{
+ struct ifmultiaddr *ifma;
+ struct sockaddr *dupsa;
+
+ ifma = malloc(sizeof *ifma, M_IFMADDR, mflags |
+ M_ZERO);
+ if (ifma == NULL)
+ return (NULL);
+
+ dupsa = malloc(sa->sa_len, M_IFMADDR, mflags);
+ if (dupsa == NULL) {
+ free(ifma, M_IFMADDR);
+ return (NULL);
+ }
+ bcopy(sa, dupsa, sa->sa_len);
+ ifma->ifma_addr = dupsa;
+
+ ifma->ifma_ifp = ifp;
+ ifma->ifma_refcount = 1;
+ ifma->ifma_protospec = NULL;
+
+ if (llsa == NULL) {
+ ifma->ifma_lladdr = NULL;
+ return (ifma);
+ }
+
+ dupsa = malloc(llsa->sa_len, M_IFMADDR, mflags);
+ if (dupsa == NULL) {
+ free(ifma->ifma_addr, M_IFMADDR);
+ free(ifma, M_IFMADDR);
+ return (NULL);
+ }
+ bcopy(llsa, dupsa, llsa->sa_len);
+ ifma->ifma_lladdr = dupsa;
+
+ return (ifma);
+}
+
+/*
+ * if_freemulti: free ifmultiaddr structure and possibly attached related
+ * addresses. The caller is responsible for implementing reference
+ * counting, notifying the driver, handling routing messages, and releasing
+ * any dependent link layer state.
+ */
+static void
+if_freemulti(struct ifmultiaddr *ifma)
+{
+
+ KASSERT(ifma->ifma_refcount == 0, ("if_freemulti: refcount %d",
+ ifma->ifma_refcount));
+ KASSERT(ifma->ifma_protospec == NULL,
+ ("if_freemulti: protospec not NULL"));
+
+ if (ifma->ifma_lladdr != NULL)
+ free(ifma->ifma_lladdr, M_IFMADDR);
+ free(ifma->ifma_addr, M_IFMADDR);
+ free(ifma, M_IFMADDR);
+}
+
+/*
+ * Register an additional multicast address with a network interface.
+ *
+ * - If the address is already present, bump the reference count on the
+ * address and return.
+ * - If the address is not link-layer, look up a link layer address.
+ * - Allocate address structures for one or both addresses, and attach to the
+ * multicast address list on the interface. If automatically adding a link
+ * layer address, the protocol address will own a reference to the link
+ * layer address, to be freed when it is freed.
+ * - Notify the network device driver of an addition to the multicast address
+ * list.
+ *
+ * 'sa' points to caller-owned memory with the desired multicast address.
+ *
+ * 'retifma' will be used to return a pointer to the resulting multicast
+ * address reference, if desired.
+ */
+int
+if_addmulti(struct ifnet *ifp, struct sockaddr *sa,
+ struct ifmultiaddr **retifma)
+{
+ struct ifmultiaddr *ifma, *ll_ifma;
+ struct sockaddr *llsa;
+ int error;
+
+ /*
+ * If the address is already present, return a new reference to it;
+ * otherwise, allocate storage and set up a new address.
+ */
+ IF_ADDR_LOCK(ifp);
+ ifma = if_findmulti(ifp, sa);
+ if (ifma != NULL) {
+ ifma->ifma_refcount++;
+ if (retifma != NULL)
+ *retifma = ifma;
+ IF_ADDR_UNLOCK(ifp);
+ return (0);
+ }
+
+ /*
+ * The address isn't already present; resolve the protocol address
+ * into a link layer address, and then look that up, bump its
+ * refcount or allocate an ifma for that also. If 'llsa' was
+ * returned, we will need to free it later.
+ */
+ llsa = NULL;
+ ll_ifma = NULL;
+ if (ifp->if_resolvemulti != NULL) {
+ error = ifp->if_resolvemulti(ifp, &llsa, sa);
+ if (error)
+ goto unlock_out;
+ }
+
+ /*
+ * Allocate the new address. Don't hook it up yet, as we may also
+ * need to allocate a link layer multicast address.
+ */
+ ifma = if_allocmulti(ifp, sa, llsa, M_NOWAIT);
+ if (ifma == NULL) {
+ error = ENOMEM;
+ goto free_llsa_out;
+ }
+
+ /*
+ * If a link layer address is found, we'll need to see if it's
+ * already present in the address list, or allocate is as well.
+ * When this block finishes, the link layer address will be on the
+ * list.
+ */
+ if (llsa != NULL) {
+ ll_ifma = if_findmulti(ifp, llsa);
+ if (ll_ifma == NULL) {
+ ll_ifma = if_allocmulti(ifp, llsa, NULL, M_NOWAIT);
+ if (ll_ifma == NULL) {
+ --ifma->ifma_refcount;
+ if_freemulti(ifma);
+ error = ENOMEM;
+ goto free_llsa_out;
+ }
+ TAILQ_INSERT_HEAD(&ifp->if_multiaddrs, ll_ifma,
+ ifma_link);
+ } else
+ ll_ifma->ifma_refcount++;
+ ifma->ifma_llifma = ll_ifma;
+ }
+
+ /*
+ * We now have a new multicast address, ifma, and possibly a new or
+ * referenced link layer address. Add the primary address to the
+ * ifnet address list.
+ */
+ TAILQ_INSERT_HEAD(&ifp->if_multiaddrs, ifma, ifma_link);
+
+ if (retifma != NULL)
+ *retifma = ifma;
+
+ /*
+ * Must generate the message while holding the lock so that 'ifma'
+ * pointer is still valid.
+ */
+ rt_newmaddrmsg(RTM_NEWMADDR, ifma);
+ IF_ADDR_UNLOCK(ifp);
+
+ /*
+ * We are certain we have added something, so call down to the
+ * interface to let them know about it.
+ */
+ if (ifp->if_ioctl != NULL) {
+ (void) (*ifp->if_ioctl)(ifp, SIOCADDMULTI, 0);
+ }
+
+ if (llsa != NULL)
+ free(llsa, M_IFMADDR);
+
+ return (0);
+
+free_llsa_out:
+ if (llsa != NULL)
+ free(llsa, M_IFMADDR);
+
+unlock_out:
+ IF_ADDR_UNLOCK(ifp);
+ return (error);
+}
+
+/*
+ * Delete a multicast group membership by network-layer group address.
+ *
+ * Returns ENOENT if the entry could not be found. If ifp no longer
+ * exists, results are undefined. This entry point should only be used
+ * from subsystems which do appropriate locking to hold ifp for the
+ * duration of the call.
+ * Network-layer protocol domains must use if_delmulti_ifma().
+ */
+int
+if_delmulti(struct ifnet *ifp, struct sockaddr *sa)
+{
+ struct ifmultiaddr *ifma;
+ int lastref;
+#ifdef INVARIANTS
+ struct ifnet *oifp;
+
+ IFNET_RLOCK_NOSLEEP();
+ TAILQ_FOREACH(oifp, &V_ifnet, if_link)
+ if (ifp == oifp)
+ break;
+ if (ifp != oifp)
+ ifp = NULL;
+ IFNET_RUNLOCK_NOSLEEP();
+
+ KASSERT(ifp != NULL, ("%s: ifnet went away", __func__));
+#endif
+ if (ifp == NULL)
+ return (ENOENT);
+
+ IF_ADDR_LOCK(ifp);
+ lastref = 0;
+ ifma = if_findmulti(ifp, sa);
+ if (ifma != NULL)
+ lastref = if_delmulti_locked(ifp, ifma, 0);
+ IF_ADDR_UNLOCK(ifp);
+
+ if (ifma == NULL)
+ return (ENOENT);
+
+ if (lastref && ifp->if_ioctl != NULL) {
+ (void)(*ifp->if_ioctl)(ifp, SIOCDELMULTI, 0);
+ }
+
+ return (0);
+}
+
+/*
+ * Delete all multicast group membership for an interface.
+ * Should be used to quickly flush all multicast filters.
+ */
+void
+if_delallmulti(struct ifnet *ifp)
+{
+ struct ifmultiaddr *ifma;
+ struct ifmultiaddr *next;
+
+ IF_ADDR_LOCK(ifp);
+ TAILQ_FOREACH_SAFE(ifma, &ifp->if_multiaddrs, ifma_link, next)
+ if_delmulti_locked(ifp, ifma, 0);
+ IF_ADDR_UNLOCK(ifp);
+}
+
+/*
+ * Delete a multicast group membership by group membership pointer.
+ * Network-layer protocol domains must use this routine.
+ *
+ * It is safe to call this routine if the ifp disappeared.
+ */
+void
+if_delmulti_ifma(struct ifmultiaddr *ifma)
+{
+ struct ifnet *ifp;
+ int lastref;
+
+ ifp = ifma->ifma_ifp;
+#ifdef DIAGNOSTIC
+ if (ifp == NULL) {
+ printf("%s: ifma_ifp seems to be detached\n", __func__);
+ } else {
+ struct ifnet *oifp;
+
+ IFNET_RLOCK_NOSLEEP();
+ TAILQ_FOREACH(oifp, &V_ifnet, if_link)
+ if (ifp == oifp)
+ break;
+ if (ifp != oifp) {
+ printf("%s: ifnet %p disappeared\n", __func__, ifp);
+ ifp = NULL;
+ }
+ IFNET_RUNLOCK_NOSLEEP();
+ }
+#endif
+ /*
+ * If and only if the ifnet instance exists: Acquire the address lock.
+ */
+ if (ifp != NULL)
+ IF_ADDR_LOCK(ifp);
+
+ lastref = if_delmulti_locked(ifp, ifma, 0);
+
+ if (ifp != NULL) {
+ /*
+ * If and only if the ifnet instance exists:
+ * Release the address lock.
+ * If the group was left: update the hardware hash filter.
+ */
+ IF_ADDR_UNLOCK(ifp);
+ if (lastref && ifp->if_ioctl != NULL) {
+ (void)(*ifp->if_ioctl)(ifp, SIOCDELMULTI, 0);
+ }
+ }
+}
+
+/*
+ * Perform deletion of network-layer and/or link-layer multicast address.
+ *
+ * Return 0 if the reference count was decremented.
+ * Return 1 if the final reference was released, indicating that the
+ * hardware hash filter should be reprogrammed.
+ */
+static int
+if_delmulti_locked(struct ifnet *ifp, struct ifmultiaddr *ifma, int detaching)
+{
+ struct ifmultiaddr *ll_ifma;
+
+ if (ifp != NULL && ifma->ifma_ifp != NULL) {
+ KASSERT(ifma->ifma_ifp == ifp,
+ ("%s: inconsistent ifp %p", __func__, ifp));
+ IF_ADDR_LOCK_ASSERT(ifp);
+ }
+
+ ifp = ifma->ifma_ifp;
+
+ /*
+ * If the ifnet is detaching, null out references to ifnet,
+ * so that upper protocol layers will notice, and not attempt
+ * to obtain locks for an ifnet which no longer exists. The
+ * routing socket announcement must happen before the ifnet
+ * instance is detached from the system.
+ */
+ if (detaching) {
+#ifdef DIAGNOSTIC
+ printf("%s: detaching ifnet instance %p\n", __func__, ifp);
+#endif
+ /*
+ * ifp may already be nulled out if we are being reentered
+ * to delete the ll_ifma.
+ */
+ if (ifp != NULL) {
+ rt_newmaddrmsg(RTM_DELMADDR, ifma);
+ ifma->ifma_ifp = NULL;
+ }
+ }
+
+ if (--ifma->ifma_refcount > 0)
+ return 0;
+
+ /*
+ * If this ifma is a network-layer ifma, a link-layer ifma may
+ * have been associated with it. Release it first if so.
+ */
+ ll_ifma = ifma->ifma_llifma;
+ if (ll_ifma != NULL) {
+ KASSERT(ifma->ifma_lladdr != NULL,
+ ("%s: llifma w/o lladdr", __func__));
+ if (detaching)
+ ll_ifma->ifma_ifp = NULL; /* XXX */
+ if (--ll_ifma->ifma_refcount == 0) {
+ if (ifp != NULL) {
+ TAILQ_REMOVE(&ifp->if_multiaddrs, ll_ifma,
+ ifma_link);
+ }
+ if_freemulti(ll_ifma);
+ }
+ }
+
+ if (ifp != NULL)
+ TAILQ_REMOVE(&ifp->if_multiaddrs, ifma, ifma_link);
+
+ if_freemulti(ifma);
+
+ /*
+ * The last reference to this instance of struct ifmultiaddr
+ * was released; the hardware should be notified of this change.
+ */
+ return 1;
+}
+
+/*
+ * Set the link layer address on an interface.
+ *
+ * At this time we only support certain types of interfaces,
+ * and we don't allow the length of the address to change.
+ */
+int
+if_setlladdr(struct ifnet *ifp, const u_char *lladdr, int len)
+{
+ struct sockaddr_dl *sdl;
+ struct ifaddr *ifa;
+ struct ifreq ifr;
+
+ IF_ADDR_LOCK(ifp);
+ ifa = ifp->if_addr;
+ if (ifa == NULL) {
+ IF_ADDR_UNLOCK(ifp);
+ return (EINVAL);
+ }
+ ifa_ref(ifa);
+ IF_ADDR_UNLOCK(ifp);
+ sdl = (struct sockaddr_dl *)ifa->ifa_addr;
+ if (sdl == NULL) {
+ ifa_free(ifa);
+ return (EINVAL);
+ }
+ if (len != sdl->sdl_alen) { /* don't allow length to change */
+ ifa_free(ifa);
+ return (EINVAL);
+ }
+ switch (ifp->if_type) {
+ case IFT_ETHER:
+ case IFT_FDDI:
+ case IFT_XETHER:
+ case IFT_ISO88025:
+ case IFT_L2VLAN:
+ case IFT_BRIDGE:
+ case IFT_ARCNET:
+ case IFT_IEEE8023ADLAG:
+ case IFT_IEEE80211:
+ bcopy(lladdr, LLADDR(sdl), len);
+ ifa_free(ifa);
+ break;
+ default:
+ ifa_free(ifa);
+ return (ENODEV);
+ }
+
+ /*
+ * If the interface is already up, we need
+ * to re-init it in order to reprogram its
+ * address filter.
+ */
+ if ((ifp->if_flags & IFF_UP) != 0) {
+ if (ifp->if_ioctl) {
+ ifp->if_flags &= ~IFF_UP;
+ ifr.ifr_flags = ifp->if_flags & 0xffff;
+ ifr.ifr_flagshigh = ifp->if_flags >> 16;
+ (*ifp->if_ioctl)(ifp, SIOCSIFFLAGS, (caddr_t)&ifr);
+ ifp->if_flags |= IFF_UP;
+ ifr.ifr_flags = ifp->if_flags & 0xffff;
+ ifr.ifr_flagshigh = ifp->if_flags >> 16;
+ (*ifp->if_ioctl)(ifp, SIOCSIFFLAGS, (caddr_t)&ifr);
+ }
+#ifdef INET
+ /*
+ * Also send gratuitous ARPs to notify other nodes about
+ * the address change.
+ */
+ TAILQ_FOREACH(ifa, &ifp->if_addrhead, ifa_link) {
+ if (ifa->ifa_addr->sa_family == AF_INET)
+ arp_ifinit(ifp, ifa);
+ }
+#endif
+ }
+ return (0);
+}
+
+/*
+ * The name argument must be a pointer to storage which will last as
+ * long as the interface does. For physical devices, the result of
+ * device_get_name(dev) is a good choice and for pseudo-devices a
+ * static string works well.
+ */
+void
+if_initname(struct ifnet *ifp, const char *name, int unit)
+{
+ ifp->if_dname = name;
+ ifp->if_dunit = unit;
+ if (unit != IF_DUNIT_NONE)
+ snprintf(ifp->if_xname, IFNAMSIZ, "%s%d", name, unit);
+ else
+ strlcpy(ifp->if_xname, name, IFNAMSIZ);
+}
+
+int
+if_printf(struct ifnet *ifp, const char * fmt, ...)
+{
+ va_list ap;
+ int retval;
+
+ retval = printf("%s: ", ifp->if_xname);
+ va_start(ap, fmt);
+ retval += vprintf(fmt, ap);
+ va_end(ap);
+ return (retval);
+}
+
+void
+if_start(struct ifnet *ifp)
+{
+
+ (*(ifp)->if_start)(ifp);
+}
+
+/*
+ * Backwards compatibility interface for drivers
+ * that have not implemented it
+ */
+static int
+if_transmit(struct ifnet *ifp, struct mbuf *m)
+{
+ int error;
+
+ IFQ_HANDOFF(ifp, m, error);
+ return (error);
+}
+
+int
+if_handoff(struct ifqueue *ifq, struct mbuf *m, struct ifnet *ifp, int adjust)
+{
+ int active = 0;
+
+ IF_LOCK(ifq);
+ if (_IF_QFULL(ifq)) {
+ _IF_DROP(ifq);
+ IF_UNLOCK(ifq);
+ m_freem(m);
+ return (0);
+ }
+ if (ifp != NULL) {
+ ifp->if_obytes += m->m_pkthdr.len + adjust;
+ if (m->m_flags & (M_BCAST|M_MCAST))
+ ifp->if_omcasts++;
+ active = ifp->if_drv_flags & IFF_DRV_OACTIVE;
+ }
+ _IF_ENQUEUE(ifq, m);
+ IF_UNLOCK(ifq);
+ if (ifp != NULL && !active)
+ (*(ifp)->if_start)(ifp);
+ return (1);
+}
+
+void
+if_register_com_alloc(u_char type,
+ if_com_alloc_t *a, if_com_free_t *f)
+{
+
+ KASSERT(if_com_alloc[type] == NULL,
+ ("if_register_com_alloc: %d already registered", type));
+ KASSERT(if_com_free[type] == NULL,
+ ("if_register_com_alloc: %d free already registered", type));
+
+ if_com_alloc[type] = a;
+ if_com_free[type] = f;
+}
+
+void
+if_deregister_com_alloc(u_char type)
+{
+
+ KASSERT(if_com_alloc[type] != NULL,
+ ("if_deregister_com_alloc: %d not registered", type));
+ KASSERT(if_com_free[type] != NULL,
+ ("if_deregister_com_alloc: %d free not registered", type));
+ if_com_alloc[type] = NULL;
+ if_com_free[type] = NULL;
+}
diff --git a/rtems/freebsd/net/if.h b/rtems/freebsd/net/if.h
new file mode 100644
index 00000000..02489aab
--- /dev/null
+++ b/rtems/freebsd/net/if.h
@@ -0,0 +1,470 @@
+/*-
+ * Copyright (c) 1982, 1986, 1989, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * @(#)if.h 8.1 (Berkeley) 6/10/93
+ * $FreeBSD$
+ */
+
+#ifndef _NET_IF_HH_
+#define _NET_IF_HH_
+
+#include <rtems/freebsd/sys/cdefs.h>
+
+#ifdef _KERNEL
+#include <rtems/freebsd/sys/queue.h>
+#endif
+
+#if __BSD_VISIBLE
+
+#ifdef __rtems__
+#include <rtems/freebsd/sys/time.h>
+#else
+#ifndef _KERNEL
+/*
+ * <net/if.h> does not depend on <sys/time.h> on most other systems. This
+ * helps userland compatibility. (struct timeval ifi_lastchange)
+ */
+#include <rtems/freebsd/sys/time.h>
+#endif
+#endif /* __rtems__ */
+
+struct ifnet;
+#endif
+
+/*
+ * Length of interface external name, including terminating '\0'.
+ * Note: this is the same size as a generic device's external name.
+ */
+#define IF_NAMESIZE 16
+#if __BSD_VISIBLE
+#define IFNAMSIZ IF_NAMESIZE
+#define IF_MAXUNIT 0x7fff /* historical value */
+#endif
+#if __BSD_VISIBLE
+
+/*
+ * Structure used to query names of interface cloners.
+ */
+
+struct if_clonereq {
+ int ifcr_total; /* total cloners (out) */
+ int ifcr_count; /* room for this many in user buffer */
+ char *ifcr_buffer; /* buffer for cloner names */
+};
+
+/*
+ * Structure describing information about an interface
+ * which may be of interest to management entities.
+ */
+struct if_data {
+ /* generic interface information */
+ u_char ifi_type; /* ethernet, tokenring, etc */
+ u_char ifi_physical; /* e.g., AUI, Thinnet, 10base-T, etc */
+ u_char ifi_addrlen; /* media address length */
+ u_char ifi_hdrlen; /* media header length */
+ u_char ifi_link_state; /* current link state */
+ u_char ifi_spare_char1; /* spare byte */
+ u_char ifi_spare_char2; /* spare byte */
+ u_char ifi_datalen; /* length of this data struct */
+ u_long ifi_mtu; /* maximum transmission unit */
+ u_long ifi_metric; /* routing metric (external only) */
+ u_long ifi_baudrate; /* linespeed */
+ /* volatile statistics */
+ u_long ifi_ipackets; /* packets received on interface */
+ u_long ifi_ierrors; /* input errors on interface */
+ u_long ifi_opackets; /* packets sent on interface */
+ u_long ifi_oerrors; /* output errors on interface */
+ u_long ifi_collisions; /* collisions on csma interfaces */
+ u_long ifi_ibytes; /* total number of octets received */
+ u_long ifi_obytes; /* total number of octets sent */
+ u_long ifi_imcasts; /* packets received via multicast */
+ u_long ifi_omcasts; /* packets sent via multicast */
+ u_long ifi_iqdrops; /* dropped on input, this interface */
+ u_long ifi_noproto; /* destined for unsupported protocol */
+ u_long ifi_hwassist; /* HW offload capabilities, see IFCAP */
+ time_t ifi_epoch; /* uptime at attach or stat reset */
+ struct timeval ifi_lastchange; /* time of last administrative change */
+};
+
+/*-
+ * Interface flags are of two types: network stack owned flags, and driver
+ * owned flags. Historically, these values were stored in the same ifnet
+ * flags field, but with the advent of fine-grained locking, they have been
+ * broken out such that the network stack is responsible for synchronizing
+ * the stack-owned fields, and the device driver the device-owned fields.
+ * Both halves can perform lockless reads of the other half's field, subject
+ * to accepting the involved races.
+ *
+ * Both sets of flags come from the same number space, and should not be
+ * permitted to conflict, as they are exposed to user space via a single
+ * field.
+ *
+ * The following symbols identify read and write requirements for fields:
+ *
+ * (i) if_flags field set by device driver before attach, read-only there
+ * after.
+ * (n) if_flags field written only by the network stack, read by either the
+ * stack or driver.
+ * (d) if_drv_flags field written only by the device driver, read by either
+ * the stack or driver.
+ */
+#define IFF_UP 0x1 /* (n) interface is up */
+#define IFF_BROADCAST 0x2 /* (i) broadcast address valid */
+#define IFF_DEBUG 0x4 /* (n) turn on debugging */
+#define IFF_LOOPBACK 0x8 /* (i) is a loopback net */
+#define IFF_POINTOPOINT 0x10 /* (i) is a point-to-point link */
+#define IFF_SMART 0x20 /* (i) interface manages own routes */
+#define IFF_DRV_RUNNING 0x40 /* (d) resources allocated */
+#define IFF_NOARP 0x80 /* (n) no address resolution protocol */
+#define IFF_PROMISC 0x100 /* (n) receive all packets */
+#define IFF_ALLMULTI 0x200 /* (n) receive all multicast packets */
+#define IFF_DRV_OACTIVE 0x400 /* (d) tx hardware queue is full */
+#define IFF_SIMPLEX 0x800 /* (i) can't hear own transmissions */
+#define IFF_LINK0 0x1000 /* per link layer defined bit */
+#define IFF_LINK1 0x2000 /* per link layer defined bit */
+#define IFF_LINK2 0x4000 /* per link layer defined bit */
+#define IFF_ALTPHYS IFF_LINK2 /* use alternate physical connection */
+#define IFF_MULTICAST 0x8000 /* (i) supports multicast */
+/* 0x10000 */
+#define IFF_PPROMISC 0x20000 /* (n) user-requested promisc mode */
+#define IFF_MONITOR 0x40000 /* (n) user-requested monitor mode */
+#define IFF_STATICARP 0x80000 /* (n) static ARP */
+#define IFF_DYING 0x200000 /* (n) interface is winding down */
+#define IFF_RENAMING 0x400000 /* (n) interface is being renamed */
+
+/*
+ * Old names for driver flags so that user space tools can continue to use
+ * the old (portable) names.
+ */
+#ifndef _KERNEL
+#define IFF_RUNNING IFF_DRV_RUNNING
+#define IFF_OACTIVE IFF_DRV_OACTIVE
+#endif
+
+/* flags set internally only: */
+#define IFF_CANTCHANGE \
+ (IFF_BROADCAST|IFF_POINTOPOINT|IFF_DRV_RUNNING|IFF_DRV_OACTIVE|\
+ IFF_SIMPLEX|IFF_MULTICAST|IFF_ALLMULTI|IFF_SMART|IFF_PROMISC|\
+ IFF_DYING)
+
+/*
+ * Values for if_link_state.
+ */
+#define LINK_STATE_UNKNOWN 0 /* link invalid/unknown */
+#define LINK_STATE_DOWN 1 /* link is down */
+#define LINK_STATE_UP 2 /* link is up */
+
+/*
+ * Some convenience macros used for setting ifi_baudrate.
+ * XXX 1000 vs. 1024? --thorpej@netbsd.org
+ */
+#define IF_Kbps(x) ((x) * 1000) /* kilobits/sec. */
+#define IF_Mbps(x) (IF_Kbps((x) * 1000)) /* megabits/sec. */
+#define IF_Gbps(x) (IF_Mbps((x) * 1000)) /* gigabits/sec. */
+
+/*
+ * Capabilities that interfaces can advertise.
+ *
+ * struct ifnet.if_capabilities
+ * contains the optional features & capabilities a particular interface
+ * supports (not only the driver but also the detected hw revision).
+ * Capabilities are defined by IFCAP_* below.
+ * struct ifnet.if_capenable
+ * contains the enabled (either by default or through ifconfig) optional
+ * features & capabilities on this interface.
+ * Capabilities are defined by IFCAP_* below.
+ * struct if_data.ifi_hwassist in mbuf CSUM_ flag form, controlled by above
+ * contains the enabled optional feature & capabilites that can be used
+ * individually per packet and are specified in the mbuf pkthdr.csum_flags
+ * field. IFCAP_* and CSUM_* do not match one to one and CSUM_* may be
+ * more detailed or differenciated than IFCAP_*.
+ * Hwassist features are defined CSUM_* in sys/mbuf.h
+ */
+#define IFCAP_RXCSUM 0x00001 /* can offload checksum on RX */
+#define IFCAP_TXCSUM 0x00002 /* can offload checksum on TX */
+#define IFCAP_NETCONS 0x00004 /* can be a network console */
+#define IFCAP_VLAN_MTU 0x00008 /* VLAN-compatible MTU */
+#define IFCAP_VLAN_HWTAGGING 0x00010 /* hardware VLAN tag support */
+#define IFCAP_JUMBO_MTU 0x00020 /* 9000 byte MTU supported */
+#define IFCAP_POLLING 0x00040 /* driver supports polling */
+#define IFCAP_VLAN_HWCSUM 0x00080 /* can do IFCAP_HWCSUM on VLANs */
+#define IFCAP_TSO4 0x00100 /* can do TCP Segmentation Offload */
+#define IFCAP_TSO6 0x00200 /* can do TCP6 Segmentation Offload */
+#define IFCAP_LRO 0x00400 /* can do Large Receive Offload */
+#define IFCAP_WOL_UCAST 0x00800 /* wake on any unicast frame */
+#define IFCAP_WOL_MCAST 0x01000 /* wake on any multicast frame */
+#define IFCAP_WOL_MAGIC 0x02000 /* wake on any Magic Packet */
+#define IFCAP_TOE4 0x04000 /* interface can offload TCP */
+#define IFCAP_TOE6 0x08000 /* interface can offload TCP6 */
+#define IFCAP_VLAN_HWFILTER 0x10000 /* interface hw can filter vlan tag */
+#define IFCAP_POLLING_NOCOUNT 0x20000 /* polling ticks cannot be fragmented */
+#define IFCAP_VLAN_HWTSO 0x40000 /* can do IFCAP_TSO on VLANs */
+#define IFCAP_LINKSTATE 0x80000 /* the runtime link state is dynamic */
+
+#define IFCAP_HWCSUM (IFCAP_RXCSUM | IFCAP_TXCSUM)
+#define IFCAP_TSO (IFCAP_TSO4 | IFCAP_TSO6)
+#define IFCAP_WOL (IFCAP_WOL_UCAST | IFCAP_WOL_MCAST | IFCAP_WOL_MAGIC)
+#define IFCAP_TOE (IFCAP_TOE4 | IFCAP_TOE6)
+
+#define IFQ_MAXLEN 50
+#define IFNET_SLOWHZ 1 /* granularity is 1 second */
+
+/*
+ * Message format for use in obtaining information about interfaces
+ * from getkerninfo and the routing socket
+ */
+struct if_msghdr {
+ u_short ifm_msglen; /* to skip over non-understood messages */
+ u_char ifm_version; /* future binary compatibility */
+ u_char ifm_type; /* message type */
+ int ifm_addrs; /* like rtm_addrs */
+ int ifm_flags; /* value of if_flags */
+ u_short ifm_index; /* index for associated ifp */
+ struct if_data ifm_data;/* statistics and other data about if */
+};
+
+/*
+ * Message format for use in obtaining information about interface addresses
+ * from getkerninfo and the routing socket
+ */
+struct ifa_msghdr {
+ u_short ifam_msglen; /* to skip over non-understood messages */
+ u_char ifam_version; /* future binary compatibility */
+ u_char ifam_type; /* message type */
+ int ifam_addrs; /* like rtm_addrs */
+ int ifam_flags; /* value of ifa_flags */
+ u_short ifam_index; /* index for associated ifp */
+ int ifam_metric; /* value of ifa_metric */
+};
+
+/*
+ * Message format for use in obtaining information about multicast addresses
+ * from the routing socket
+ */
+struct ifma_msghdr {
+ u_short ifmam_msglen; /* to skip over non-understood messages */
+ u_char ifmam_version; /* future binary compatibility */
+ u_char ifmam_type; /* message type */
+ int ifmam_addrs; /* like rtm_addrs */
+ int ifmam_flags; /* value of ifa_flags */
+ u_short ifmam_index; /* index for associated ifp */
+};
+
+/*
+ * Message format announcing the arrival or departure of a network interface.
+ */
+struct if_announcemsghdr {
+ u_short ifan_msglen; /* to skip over non-understood messages */
+ u_char ifan_version; /* future binary compatibility */
+ u_char ifan_type; /* message type */
+ u_short ifan_index; /* index for associated ifp */
+ char ifan_name[IFNAMSIZ]; /* if name, e.g. "en0" */
+ u_short ifan_what; /* what type of announcement */
+};
+
+#define IFAN_ARRIVAL 0 /* interface arrival */
+#define IFAN_DEPARTURE 1 /* interface departure */
+
+/*
+ * Buffer with length to be used in SIOCGIFDESCR/SIOCSIFDESCR requests
+ */
+struct ifreq_buffer {
+ size_t length;
+ void *buffer;
+};
+
+/*
+ * Interface request structure used for socket
+ * ioctl's. All interface ioctl's must have parameter
+ * definitions which begin with ifr_name. The
+ * remainder may be interface specific.
+ */
+struct ifreq {
+ char ifr_name[IFNAMSIZ]; /* if name, e.g. "en0" */
+ union {
+ struct sockaddr ifru_addr;
+ struct sockaddr ifru_dstaddr;
+ struct sockaddr ifru_broadaddr;
+ struct ifreq_buffer ifru_buffer;
+ short ifru_flags[2];
+ short ifru_index;
+ int ifru_jid;
+ int ifru_metric;
+ int ifru_mtu;
+ int ifru_phys;
+ int ifru_media;
+ caddr_t ifru_data;
+ int ifru_cap[2];
+ } ifr_ifru;
+#define ifr_addr ifr_ifru.ifru_addr /* address */
+#define ifr_dstaddr ifr_ifru.ifru_dstaddr /* other end of p-to-p link */
+#define ifr_broadaddr ifr_ifru.ifru_broadaddr /* broadcast address */
+#define ifr_buffer ifr_ifru.ifru_buffer /* user supplied buffer with its length */
+#define ifr_flags ifr_ifru.ifru_flags[0] /* flags (low 16 bits) */
+#define ifr_flagshigh ifr_ifru.ifru_flags[1] /* flags (high 16 bits) */
+#define ifr_jid ifr_ifru.ifru_jid /* jail/vnet */
+#define ifr_metric ifr_ifru.ifru_metric /* metric */
+#define ifr_mtu ifr_ifru.ifru_mtu /* mtu */
+#define ifr_phys ifr_ifru.ifru_phys /* physical wire */
+#define ifr_media ifr_ifru.ifru_media /* physical media */
+#define ifr_data ifr_ifru.ifru_data /* for use by interface */
+#define ifr_reqcap ifr_ifru.ifru_cap[0] /* requested capabilities */
+#define ifr_curcap ifr_ifru.ifru_cap[1] /* current capabilities */
+#define ifr_index ifr_ifru.ifru_index /* interface index */
+};
+
+#define _SIZEOF_ADDR_IFREQ(ifr) \
+ ((ifr).ifr_addr.sa_len > sizeof(struct sockaddr) ? \
+ (sizeof(struct ifreq) - sizeof(struct sockaddr) + \
+ (ifr).ifr_addr.sa_len) : sizeof(struct ifreq))
+
+struct ifaliasreq {
+ char ifra_name[IFNAMSIZ]; /* if name, e.g. "en0" */
+ struct sockaddr ifra_addr;
+ struct sockaddr ifra_broadaddr;
+ struct sockaddr ifra_mask;
+};
+
+struct ifmediareq {
+ char ifm_name[IFNAMSIZ]; /* if name, e.g. "en0" */
+ int ifm_current; /* current media options */
+ int ifm_mask; /* don't care mask */
+ int ifm_status; /* media status */
+ int ifm_active; /* active options */
+ int ifm_count; /* # entries in ifm_ulist array */
+ int *ifm_ulist; /* media words */
+};
+
+struct ifdrv {
+ char ifd_name[IFNAMSIZ]; /* if name, e.g. "en0" */
+ unsigned long ifd_cmd;
+ size_t ifd_len;
+ void *ifd_data;
+};
+
+/*
+ * Structure used to retrieve aux status data from interfaces.
+ * Kernel suppliers to this interface should respect the formatting
+ * needed by ifconfig(8): each line starts with a TAB and ends with
+ * a newline. The canonical example to copy and paste is in if_tun.c.
+ */
+
+#define IFSTATMAX 800 /* 10 lines of text */
+struct ifstat {
+ char ifs_name[IFNAMSIZ]; /* if name, e.g. "en0" */
+ char ascii[IFSTATMAX + 1];
+};
+
+/*
+ * Structure used in SIOCGIFCONF request.
+ * Used to retrieve interface configuration
+ * for machine (useful for programs which
+ * must know all networks accessible).
+ */
+struct ifconf {
+ int ifc_len; /* size of associated buffer */
+ union {
+ caddr_t ifcu_buf;
+ struct ifreq *ifcu_req;
+ } ifc_ifcu;
+#define ifc_buf ifc_ifcu.ifcu_buf /* buffer address */
+#define ifc_req ifc_ifcu.ifcu_req /* array of structures returned */
+};
+
+/*
+ * interface groups
+ */
+
+#define IFG_ALL "all" /* group contains all interfaces */
+/* XXX: will we implement this? */
+#define IFG_EGRESS "egress" /* if(s) default route(s) point to */
+
+struct ifg_req {
+ union {
+ char ifgrqu_group[IFNAMSIZ];
+ char ifgrqu_member[IFNAMSIZ];
+ } ifgrq_ifgrqu;
+#define ifgrq_group ifgrq_ifgrqu.ifgrqu_group
+#define ifgrq_member ifgrq_ifgrqu.ifgrqu_member
+};
+
+/*
+ * Used to lookup groups for an interface
+ */
+struct ifgroupreq {
+ char ifgr_name[IFNAMSIZ];
+ u_int ifgr_len;
+ union {
+ char ifgru_group[IFNAMSIZ];
+ struct ifg_req *ifgru_groups;
+ } ifgr_ifgru;
+#define ifgr_group ifgr_ifgru.ifgru_group
+#define ifgr_groups ifgr_ifgru.ifgru_groups
+};
+
+/*
+ * Structure for SIOC[AGD]LIFADDR
+ */
+struct if_laddrreq {
+ char iflr_name[IFNAMSIZ];
+ u_int flags;
+#define IFLR_PREFIX 0x8000 /* in: prefix given out: kernel fills id */
+ u_int prefixlen; /* in/out */
+ struct sockaddr_storage addr; /* in/out */
+ struct sockaddr_storage dstaddr; /* out */
+};
+
+#endif /* __BSD_VISIBLE */
+
+#ifdef _KERNEL
+#ifdef MALLOC_DECLARE
+MALLOC_DECLARE(M_IFADDR);
+MALLOC_DECLARE(M_IFMADDR);
+#endif
+#endif
+
+#ifndef _KERNEL
+struct if_nameindex {
+ unsigned int if_index; /* 1, 2, ... */
+ char *if_name; /* null terminated name: "le0", ... */
+};
+
+__BEGIN_DECLS
+void if_freenameindex(struct if_nameindex *);
+char *if_indextoname(unsigned int, char *);
+struct if_nameindex *if_nameindex(void);
+unsigned int if_nametoindex(const char *);
+__END_DECLS
+#endif
+
+#ifdef _KERNEL
+/* XXX - this should go away soon. */
+#include <rtems/freebsd/net/if_var.h>
+#endif
+
+#endif /* !_NET_IF_HH_ */
diff --git a/rtems/freebsd/net/if_arc.h b/rtems/freebsd/net/if_arc.h
new file mode 100644
index 00000000..6be5d4e1
--- /dev/null
+++ b/rtems/freebsd/net/if_arc.h
@@ -0,0 +1,143 @@
+/* $NetBSD: if_arc.h,v 1.13 1999/11/19 20:41:19 thorpej Exp $ */
+/* $FreeBSD$ */
+
+/*-
+ * Copyright (c) 1982, 1986, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * from: NetBSD: if_ether.h,v 1.10 1994/06/29 06:37:55 cgd Exp
+ * @(#)if_ether.h 8.1 (Berkeley) 6/10/93
+ */
+
+#ifndef _NET_IF_ARC_HH_
+#define _NET_IF_ARC_HH_
+
+/*
+ * Arcnet address - 1 octets
+ * don't know who uses this.
+ */
+struct arc_addr {
+ u_int8_t arc_addr_octet[1];
+} __packed;
+
+/*
+ * Structure of a 2.5MB/s Arcnet header.
+ * as given to interface code.
+ */
+struct arc_header {
+ u_int8_t arc_shost;
+ u_int8_t arc_dhost;
+ u_int8_t arc_type;
+ /*
+ * only present for newstyle encoding with LL fragmentation.
+ * Don't use sizeof(anything), use ARC_HDR{,NEW}LEN instead.
+ */
+ u_int8_t arc_flag;
+ u_int16_t arc_seqid;
+
+ /*
+ * only present in exception packets (arc_flag == 0xff)
+ */
+ u_int8_t arc_type2; /* same as arc_type */
+ u_int8_t arc_flag2; /* real flag value */
+ u_int16_t arc_seqid2; /* real seqid value */
+} __packed;
+
+#define ARC_ADDR_LEN 1
+
+#define ARC_HDRLEN 3
+#define ARC_HDRNEWLEN 6
+#define ARC_HDRNEWLEN_EXC 10
+
+/* these lengths are data link layer length - 2 * ARC_ADDR_LEN */
+#define ARC_MIN_LEN 1
+#define ARC_MIN_FORBID_LEN 254
+#define ARC_MAX_FORBID_LEN 256
+#define ARC_MAX_LEN 508
+#define ARC_MAX_DATA 504
+
+/* RFC 1051 */
+#define ARCTYPE_IP_OLD 240 /* IP protocol */
+#define ARCTYPE_ARP_OLD 241 /* address resolution protocol */
+
+/* RFC 1201 */
+#define ARCTYPE_IP 212 /* IP protocol */
+#define ARCTYPE_ARP 213 /* address resolution protocol */
+#define ARCTYPE_REVARP 214 /* reverse addr resolution protocol */
+
+#define ARCTYPE_ATALK 221 /* Appletalk */
+#define ARCTYPE_BANIAN 247 /* Banyan Vines */
+#define ARCTYPE_IPX 250 /* Novell IPX */
+
+#define ARCTYPE_INET6 0xc4 /* IPng */
+#define ARCTYPE_DIAGNOSE 0x80 /* as per ANSI/ATA 878.1 */
+
+#define ARCMTU 507
+#define ARCMIN 0
+
+#define ARC_PHDS_MAXMTU 60480
+
+struct arccom {
+ struct ifnet *ac_ifp; /* network-visible interface */
+
+ u_int16_t ac_seqid; /* seq. id used by PHDS encap. */
+
+ u_int8_t arc_shost;
+ u_int8_t arc_dhost;
+ u_int8_t arc_type;
+
+ u_int8_t dummy0;
+ u_int16_t dummy1;
+ int sflag, fsflag, rsflag;
+ struct mbuf *curr_frag;
+
+ struct ac_frag {
+ u_int8_t af_maxflag; /* from first packet */
+ u_int8_t af_lastseen; /* last split flag seen */
+ u_int16_t af_seqid;
+ struct mbuf *af_packet;
+ } ac_fragtab[256]; /* indexed by sender ll address */
+};
+
+#ifdef _KERNEL
+extern u_int8_t arcbroadcastaddr;
+extern int arc_ipmtu; /* XXX new ip only, no RFC 1051! */
+
+void arc_ifattach(struct ifnet *, u_int8_t);
+void arc_ifdetach(struct ifnet *);
+void arc_storelladdr(struct ifnet *, u_int8_t);
+int arc_isphds(u_int8_t);
+void arc_input(struct ifnet *, struct mbuf *);
+int arc_output(struct ifnet *, struct mbuf *,
+ struct sockaddr *, struct route *);
+int arc_ioctl(struct ifnet *, u_long, caddr_t);
+
+void arc_frag_init(struct ifnet *);
+struct mbuf * arc_frag_next(struct ifnet *);
+#endif
+
+#endif /* _NET_IF_ARC_HH_ */
diff --git a/rtems/freebsd/net/if_arcsubr.c b/rtems/freebsd/net/if_arcsubr.c
new file mode 100644
index 00000000..415e6487
--- /dev/null
+++ b/rtems/freebsd/net/if_arcsubr.c
@@ -0,0 +1,886 @@
+#include <rtems/freebsd/machine/rtems-bsd-config.h>
+
+/* $NetBSD: if_arcsubr.c,v 1.36 2001/06/14 05:44:23 itojun Exp $ */
+/* $FreeBSD$ */
+
+/*-
+ * Copyright (c) 1994, 1995 Ignatios Souvatzis
+ * Copyright (c) 1982, 1989, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the University of
+ * California, Berkeley and its contributors.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * from: NetBSD: if_ethersubr.c,v 1.9 1994/06/29 06:36:11 cgd Exp
+ * @(#)if_ethersubr.c 8.1 (Berkeley) 6/10/93
+ *
+ */
+#include <rtems/freebsd/local/opt_inet.h>
+#include <rtems/freebsd/local/opt_inet6.h>
+#include <rtems/freebsd/local/opt_ipx.h>
+
+#include <rtems/freebsd/sys/param.h>
+#include <rtems/freebsd/sys/systm.h>
+#include <rtems/freebsd/sys/kernel.h>
+#include <rtems/freebsd/sys/module.h>
+#include <rtems/freebsd/sys/malloc.h>
+#include <rtems/freebsd/sys/mbuf.h>
+#include <rtems/freebsd/sys/protosw.h>
+#include <rtems/freebsd/sys/socket.h>
+#include <rtems/freebsd/sys/sockio.h>
+#include <rtems/freebsd/sys/errno.h>
+#include <rtems/freebsd/sys/syslog.h>
+
+#include <rtems/freebsd/machine/cpu.h>
+
+#include <rtems/freebsd/net/if.h>
+#include <rtems/freebsd/net/netisr.h>
+#include <rtems/freebsd/net/route.h>
+#include <rtems/freebsd/net/if_dl.h>
+#include <rtems/freebsd/net/if_types.h>
+#include <rtems/freebsd/net/if_arc.h>
+#include <rtems/freebsd/net/if_arp.h>
+#include <rtems/freebsd/net/bpf.h>
+#include <rtems/freebsd/net/if_llatbl.h>
+
+#if defined(INET) || defined(INET6)
+#include <rtems/freebsd/netinet/in.h>
+#include <rtems/freebsd/netinet/in_var.h>
+#include <rtems/freebsd/netinet/if_ether.h>
+#endif
+
+#ifdef INET6
+#include <rtems/freebsd/netinet6/nd6.h>
+#endif
+
+#ifdef IPX
+#include <rtems/freebsd/netipx/ipx.h>
+#include <rtems/freebsd/netipx/ipx_if.h>
+#endif
+
+#define ARCNET_ALLOW_BROKEN_ARP
+
+static struct mbuf *arc_defrag(struct ifnet *, struct mbuf *);
+static int arc_resolvemulti(struct ifnet *, struct sockaddr **,
+ struct sockaddr *);
+
+u_int8_t arcbroadcastaddr = 0;
+
+#define ARC_LLADDR(ifp) (*(u_int8_t *)IF_LLADDR(ifp))
+
+#define senderr(e) { error = (e); goto bad;}
+#define SIN(s) ((struct sockaddr_in *)s)
+#define SIPX(s) ((struct sockaddr_ipx *)s)
+
+/*
+ * ARCnet output routine.
+ * Encapsulate a packet of type family for the local net.
+ * Assumes that ifp is actually pointer to arccom structure.
+ */
+int
+arc_output(struct ifnet *ifp, struct mbuf *m, struct sockaddr *dst,
+ struct route *ro)
+{
+ struct arc_header *ah;
+ int error;
+ u_int8_t atype, adst;
+ int loop_copy = 0;
+ int isphds;
+#if defined(INET) || defined(INET6)
+ struct llentry *lle;
+#endif
+
+ if (!((ifp->if_flags & IFF_UP) &&
+ (ifp->if_drv_flags & IFF_DRV_RUNNING)))
+ return(ENETDOWN); /* m, m1 aren't initialized yet */
+
+ error = 0;
+
+ switch (dst->sa_family) {
+#ifdef INET
+ case AF_INET:
+
+ /*
+ * For now, use the simple IP addr -> ARCnet addr mapping
+ */
+ if (m->m_flags & (M_BCAST|M_MCAST))
+ adst = arcbroadcastaddr; /* ARCnet broadcast address */
+ else if (ifp->if_flags & IFF_NOARP)
+ adst = ntohl(SIN(dst)->sin_addr.s_addr) & 0xFF;
+ else {
+ error = arpresolve(ifp, ro ? ro->ro_rt : NULL,
+ m, dst, &adst, &lle);
+ if (error)
+ return (error == EWOULDBLOCK ? 0 : error);
+ }
+
+ atype = (ifp->if_flags & IFF_LINK0) ?
+ ARCTYPE_IP_OLD : ARCTYPE_IP;
+ break;
+ case AF_ARP:
+ {
+ struct arphdr *ah;
+ ah = mtod(m, struct arphdr *);
+ ah->ar_hrd = htons(ARPHRD_ARCNET);
+
+ loop_copy = -1; /* if this is for us, don't do it */
+
+ switch(ntohs(ah->ar_op)) {
+ case ARPOP_REVREQUEST:
+ case ARPOP_REVREPLY:
+ atype = ARCTYPE_REVARP;
+ break;
+ case ARPOP_REQUEST:
+ case ARPOP_REPLY:
+ default:
+ atype = ARCTYPE_ARP;
+ break;
+ }
+
+ if (m->m_flags & M_BCAST)
+ bcopy(ifp->if_broadcastaddr, &adst, ARC_ADDR_LEN);
+ else
+ bcopy(ar_tha(ah), &adst, ARC_ADDR_LEN);
+
+ }
+ break;
+#endif
+#ifdef INET6
+ case AF_INET6:
+ error = nd6_storelladdr(ifp, m, dst, (u_char *)&adst, &lle);
+ if (error)
+ return (error);
+ atype = ARCTYPE_INET6;
+ break;
+#endif
+#ifdef IPX
+ case AF_IPX:
+ adst = SIPX(dst)->sipx_addr.x_host.c_host[5];
+ atype = ARCTYPE_IPX;
+ if (adst == 0xff)
+ adst = arcbroadcastaddr;
+ break;
+#endif
+
+ case AF_UNSPEC:
+ loop_copy = -1;
+ ah = (struct arc_header *)dst->sa_data;
+ adst = ah->arc_dhost;
+ atype = ah->arc_type;
+
+ if (atype == ARCTYPE_ARP) {
+ atype = (ifp->if_flags & IFF_LINK0) ?
+ ARCTYPE_ARP_OLD: ARCTYPE_ARP;
+
+#ifdef ARCNET_ALLOW_BROKEN_ARP
+ /*
+ * XXX It's not clear per RFC826 if this is needed, but
+ * "assigned numbers" say this is wrong.
+ * However, e.g., AmiTCP 3.0Beta used it... we make this
+ * switchable for emergency cases. Not perfect, but...
+ */
+ if (ifp->if_flags & IFF_LINK2)
+ mtod(m, struct arphdr *)->ar_pro = atype - 1;
+#endif
+ }
+ break;
+
+ default:
+ if_printf(ifp, "can't handle af%d\n", dst->sa_family);
+ senderr(EAFNOSUPPORT);
+ }
+
+ isphds = arc_isphds(atype);
+ M_PREPEND(m, isphds ? ARC_HDRNEWLEN : ARC_HDRLEN, M_DONTWAIT);
+ if (m == 0)
+ senderr(ENOBUFS);
+ ah = mtod(m, struct arc_header *);
+ ah->arc_type = atype;
+ ah->arc_dhost = adst;
+ ah->arc_shost = ARC_LLADDR(ifp);
+ if (isphds) {
+ ah->arc_flag = 0;
+ ah->arc_seqid = 0;
+ }
+
+ if ((ifp->if_flags & IFF_SIMPLEX) && (loop_copy != -1)) {
+ if ((m->m_flags & M_BCAST) || (loop_copy > 0)) {
+ struct mbuf *n = m_copy(m, 0, (int)M_COPYALL);
+
+ (void) if_simloop(ifp, n, dst->sa_family, ARC_HDRLEN);
+ } else if (ah->arc_dhost == ah->arc_shost) {
+ (void) if_simloop(ifp, m, dst->sa_family, ARC_HDRLEN);
+ return (0); /* XXX */
+ }
+ }
+
+ BPF_MTAP(ifp, m);
+
+ error = ifp->if_transmit(ifp, m);
+
+ return (error);
+
+bad:
+ if (m)
+ m_freem(m);
+ return (error);
+}
+
+void
+arc_frag_init(struct ifnet *ifp)
+{
+ struct arccom *ac;
+
+ ac = (struct arccom *)ifp->if_l2com;
+ ac->curr_frag = 0;
+}
+
+struct mbuf *
+arc_frag_next(struct ifnet *ifp)
+{
+ struct arccom *ac;
+ struct mbuf *m;
+ struct arc_header *ah;
+
+ ac = (struct arccom *)ifp->if_l2com;
+ if ((m = ac->curr_frag) == 0) {
+ int tfrags;
+
+ /* dequeue new packet */
+ IF_DEQUEUE(&ifp->if_snd, m);
+ if (m == 0)
+ return 0;
+
+ ah = mtod(m, struct arc_header *);
+ if (!arc_isphds(ah->arc_type))
+ return m;
+
+ ++ac->ac_seqid; /* make the seqid unique */
+ tfrags = (m->m_pkthdr.len + ARC_MAX_DATA - 1) / ARC_MAX_DATA;
+ ac->fsflag = 2 * tfrags - 3;
+ ac->sflag = 0;
+ ac->rsflag = ac->fsflag;
+ ac->arc_dhost = ah->arc_dhost;
+ ac->arc_shost = ah->arc_shost;
+ ac->arc_type = ah->arc_type;
+
+ m_adj(m, ARC_HDRNEWLEN);
+ ac->curr_frag = m;
+ }
+
+ /* split out next fragment and return it */
+ if (ac->sflag < ac->fsflag) {
+ /* we CAN'T have short packets here */
+ ac->curr_frag = m_split(m, ARC_MAX_DATA, M_DONTWAIT);
+ if (ac->curr_frag == 0) {
+ m_freem(m);
+ return 0;
+ }
+
+ M_PREPEND(m, ARC_HDRNEWLEN, M_DONTWAIT);
+ if (m == 0) {
+ m_freem(ac->curr_frag);
+ ac->curr_frag = 0;
+ return 0;
+ }
+
+ ah = mtod(m, struct arc_header *);
+ ah->arc_flag = ac->rsflag;
+ ah->arc_seqid = ac->ac_seqid;
+
+ ac->sflag += 2;
+ ac->rsflag = ac->sflag;
+ } else if ((m->m_pkthdr.len >=
+ ARC_MIN_FORBID_LEN - ARC_HDRNEWLEN + 2) &&
+ (m->m_pkthdr.len <=
+ ARC_MAX_FORBID_LEN - ARC_HDRNEWLEN + 2)) {
+ ac->curr_frag = 0;
+
+ M_PREPEND(m, ARC_HDRNEWLEN_EXC, M_DONTWAIT);
+ if (m == 0)
+ return 0;
+
+ ah = mtod(m, struct arc_header *);
+ ah->arc_flag = 0xFF;
+ ah->arc_seqid = 0xFFFF;
+ ah->arc_type2 = ac->arc_type;
+ ah->arc_flag2 = ac->sflag;
+ ah->arc_seqid2 = ac->ac_seqid;
+ } else {
+ ac->curr_frag = 0;
+
+ M_PREPEND(m, ARC_HDRNEWLEN, M_DONTWAIT);
+ if (m == 0)
+ return 0;
+
+ ah = mtod(m, struct arc_header *);
+ ah->arc_flag = ac->sflag;
+ ah->arc_seqid = ac->ac_seqid;
+ }
+
+ ah->arc_dhost = ac->arc_dhost;
+ ah->arc_shost = ac->arc_shost;
+ ah->arc_type = ac->arc_type;
+
+ return m;
+}
+
+/*
+ * Defragmenter. Returns mbuf if last packet found, else
+ * NULL. frees imcoming mbuf as necessary.
+ */
+
+static __inline struct mbuf *
+arc_defrag(struct ifnet *ifp, struct mbuf *m)
+{
+ struct arc_header *ah, *ah1;
+ struct arccom *ac;
+ struct ac_frag *af;
+ struct mbuf *m1;
+ char *s;
+ int newflen;
+ u_char src,dst,typ;
+
+ ac = (struct arccom *)ifp->if_l2com;
+
+ if (m->m_len < ARC_HDRNEWLEN) {
+ m = m_pullup(m, ARC_HDRNEWLEN);
+ if (m == NULL) {
+ ++ifp->if_ierrors;
+ return NULL;
+ }
+ }
+
+ ah = mtod(m, struct arc_header *);
+ typ = ah->arc_type;
+
+ if (!arc_isphds(typ))
+ return m;
+
+ src = ah->arc_shost;
+ dst = ah->arc_dhost;
+
+ if (ah->arc_flag == 0xff) {
+ m_adj(m, 4);
+
+ if (m->m_len < ARC_HDRNEWLEN) {
+ m = m_pullup(m, ARC_HDRNEWLEN);
+ if (m == NULL) {
+ ++ifp->if_ierrors;
+ return NULL;
+ }
+ }
+
+ ah = mtod(m, struct arc_header *);
+ }
+
+ af = &ac->ac_fragtab[src];
+ m1 = af->af_packet;
+ s = "debug code error";
+
+ if (ah->arc_flag & 1) {
+ /*
+ * first fragment. We always initialize, which is
+ * about the right thing to do, as we only want to
+ * accept one fragmented packet per src at a time.
+ */
+ if (m1 != NULL)
+ m_freem(m1);
+
+ af->af_packet = m;
+ m1 = m;
+ af->af_maxflag = ah->arc_flag;
+ af->af_lastseen = 0;
+ af->af_seqid = ah->arc_seqid;
+
+ return NULL;
+ /* notreached */
+ } else {
+ /* check for unfragmented packet */
+ if (ah->arc_flag == 0)
+ return m;
+
+ /* do we have a first packet from that src? */
+ if (m1 == NULL) {
+ s = "no first frag";
+ goto outofseq;
+ }
+
+ ah1 = mtod(m1, struct arc_header *);
+
+ if (ah->arc_seqid != ah1->arc_seqid) {
+ s = "seqid differs";
+ goto outofseq;
+ }
+
+ if (typ != ah1->arc_type) {
+ s = "type differs";
+ goto outofseq;
+ }
+
+ if (dst != ah1->arc_dhost) {
+ s = "dest host differs";
+ goto outofseq;
+ }
+
+ /* typ, seqid and dst are ok here. */
+
+ if (ah->arc_flag == af->af_lastseen) {
+ m_freem(m);
+ return NULL;
+ }
+
+ if (ah->arc_flag == af->af_lastseen + 2) {
+ /* ok, this is next fragment */
+ af->af_lastseen = ah->arc_flag;
+ m_adj(m,ARC_HDRNEWLEN);
+
+ /*
+ * m_cat might free the first mbuf (with pkthdr)
+ * in 2nd chain; therefore:
+ */
+
+ newflen = m->m_pkthdr.len;
+
+ m_cat(m1,m);
+
+ m1->m_pkthdr.len += newflen;
+
+ /* is it the last one? */
+ if (af->af_lastseen > af->af_maxflag) {
+ af->af_packet = NULL;
+ return(m1);
+ } else
+ return NULL;
+ }
+ s = "other reason";
+ /* if all else fails, it is out of sequence, too */
+ }
+outofseq:
+ if (m1) {
+ m_freem(m1);
+ af->af_packet = NULL;
+ }
+
+ if (m)
+ m_freem(m);
+
+ log(LOG_INFO,"%s: got out of seq. packet: %s\n",
+ ifp->if_xname, s);
+
+ return NULL;
+}
+
+/*
+ * return 1 if Packet Header Definition Standard, else 0.
+ * For now: old IP, old ARP aren't obviously. Lacking correct information,
+ * we guess that besides new IP and new ARP also IPX and APPLETALK are PHDS.
+ * (Apple and Novell corporations were involved, among others, in PHDS work).
+ * Easiest is to assume that everybody else uses that, too.
+ */
+int
+arc_isphds(u_int8_t type)
+{
+ return (type != ARCTYPE_IP_OLD &&
+ type != ARCTYPE_ARP_OLD &&
+ type != ARCTYPE_DIAGNOSE);
+}
+
+/*
+ * Process a received Arcnet packet;
+ * the packet is in the mbuf chain m with
+ * the ARCnet header.
+ */
+void
+arc_input(struct ifnet *ifp, struct mbuf *m)
+{
+ struct arc_header *ah;
+ int isr;
+ u_int8_t atype;
+
+ if ((ifp->if_flags & IFF_UP) == 0) {
+ m_freem(m);
+ return;
+ }
+
+ /* possibly defragment: */
+ m = arc_defrag(ifp, m);
+ if (m == NULL)
+ return;
+
+ BPF_MTAP(ifp, m);
+
+ ah = mtod(m, struct arc_header *);
+ /* does this belong to us? */
+ if ((ifp->if_flags & IFF_PROMISC) == 0
+ && ah->arc_dhost != arcbroadcastaddr
+ && ah->arc_dhost != ARC_LLADDR(ifp)) {
+ m_freem(m);
+ return;
+ }
+
+ ifp->if_ibytes += m->m_pkthdr.len;
+
+ if (ah->arc_dhost == arcbroadcastaddr) {
+ m->m_flags |= M_BCAST|M_MCAST;
+ ifp->if_imcasts++;
+ }
+
+ atype = ah->arc_type;
+ switch (atype) {
+#ifdef INET
+ case ARCTYPE_IP:
+ m_adj(m, ARC_HDRNEWLEN);
+ if ((m = ip_fastforward(m)) == NULL)
+ return;
+ isr = NETISR_IP;
+ break;
+
+ case ARCTYPE_IP_OLD:
+ m_adj(m, ARC_HDRLEN);
+ if ((m = ip_fastforward(m)) == NULL)
+ return;
+ isr = NETISR_IP;
+ break;
+
+ case ARCTYPE_ARP:
+ if (ifp->if_flags & IFF_NOARP) {
+ /* Discard packet if ARP is disabled on interface */
+ m_freem(m);
+ return;
+ }
+ m_adj(m, ARC_HDRNEWLEN);
+ isr = NETISR_ARP;
+#ifdef ARCNET_ALLOW_BROKEN_ARP
+ mtod(m, struct arphdr *)->ar_pro = htons(ETHERTYPE_IP);
+#endif
+ break;
+
+ case ARCTYPE_ARP_OLD:
+ if (ifp->if_flags & IFF_NOARP) {
+ /* Discard packet if ARP is disabled on interface */
+ m_freem(m);
+ return;
+ }
+ m_adj(m, ARC_HDRLEN);
+ isr = NETISR_ARP;
+#ifdef ARCNET_ALLOW_BROKEN_ARP
+ mtod(m, struct arphdr *)->ar_pro = htons(ETHERTYPE_IP);
+#endif
+ break;
+#endif
+#ifdef INET6
+ case ARCTYPE_INET6:
+ m_adj(m, ARC_HDRNEWLEN);
+ isr = NETISR_IPV6;
+ break;
+#endif
+#ifdef IPX
+ case ARCTYPE_IPX:
+ m_adj(m, ARC_HDRNEWLEN);
+ isr = NETISR_IPX;
+ break;
+#endif
+ default:
+ m_freem(m);
+ return;
+ }
+ netisr_dispatch(isr, m);
+}
+
+/*
+ * Register (new) link level address.
+ */
+void
+arc_storelladdr(struct ifnet *ifp, u_int8_t lla)
+{
+ ARC_LLADDR(ifp) = lla;
+}
+
+/*
+ * Perform common duties while attaching to interface list
+ */
+void
+arc_ifattach(struct ifnet *ifp, u_int8_t lla)
+{
+ struct ifaddr *ifa;
+ struct sockaddr_dl *sdl;
+ struct arccom *ac;
+
+ if_attach(ifp);
+ ifp->if_addrlen = 1;
+ ifp->if_hdrlen = ARC_HDRLEN;
+ ifp->if_mtu = 1500;
+ ifp->if_resolvemulti = arc_resolvemulti;
+ if (ifp->if_baudrate == 0)
+ ifp->if_baudrate = 2500000;
+#if __FreeBSD_version < 500000
+ ifa = ifnet_addrs[ifp->if_index - 1];
+#else
+ ifa = ifp->if_addr;
+#endif
+ KASSERT(ifa != NULL, ("%s: no lladdr!\n", __func__));
+ sdl = (struct sockaddr_dl *)ifa->ifa_addr;
+ sdl->sdl_type = IFT_ARCNET;
+ sdl->sdl_alen = ifp->if_addrlen;
+
+ if (ifp->if_flags & IFF_BROADCAST)
+ ifp->if_flags |= IFF_MULTICAST|IFF_ALLMULTI;
+
+ ac = (struct arccom *)ifp->if_l2com;
+ ac->ac_seqid = (time_second) & 0xFFFF; /* try to make seqid unique */
+ if (lla == 0) {
+ /* XXX this message isn't entirely clear, to me -- cgd */
+ log(LOG_ERR,"%s: link address 0 reserved for broadcasts. Please change it and ifconfig %s down up\n",
+ ifp->if_xname, ifp->if_xname);
+ }
+ arc_storelladdr(ifp, lla);
+
+ ifp->if_broadcastaddr = &arcbroadcastaddr;
+
+ bpfattach(ifp, DLT_ARCNET, ARC_HDRLEN);
+}
+
+void
+arc_ifdetach(struct ifnet *ifp)
+{
+ bpfdetach(ifp);
+ if_detach(ifp);
+}
+
+int
+arc_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
+{
+ struct ifaddr *ifa = (struct ifaddr *) data;
+ struct ifreq *ifr = (struct ifreq *) data;
+ int error = 0;
+
+ switch (command) {
+ case SIOCSIFADDR:
+ ifp->if_flags |= IFF_UP;
+ switch (ifa->ifa_addr->sa_family) {
+#ifdef INET
+ case AF_INET:
+ ifp->if_init(ifp->if_softc); /* before arpwhohas */
+ arp_ifinit(ifp, ifa);
+ break;
+#endif
+#ifdef IPX
+ /*
+ * XXX This code is probably wrong
+ */
+ case AF_IPX:
+ {
+ struct ipx_addr *ina = &(IA_SIPX(ifa)->sipx_addr);
+
+ if (ipx_nullhost(*ina))
+ ina->x_host.c_host[5] = ARC_LLADDR(ifp);
+ else
+ arc_storelladdr(ifp, ina->x_host.c_host[5]);
+
+ /*
+ * Set new address
+ */
+ ifp->if_init(ifp->if_softc);
+ break;
+ }
+#endif
+ default:
+ ifp->if_init(ifp->if_softc);
+ break;
+ }
+ break;
+
+ case SIOCGIFADDR:
+ {
+ struct sockaddr *sa;
+
+ sa = (struct sockaddr *) &ifr->ifr_data;
+ *(u_int8_t *)sa->sa_data = ARC_LLADDR(ifp);
+ }
+ break;
+
+ case SIOCADDMULTI:
+ case SIOCDELMULTI:
+ if (ifr == NULL)
+ error = EAFNOSUPPORT;
+ else {
+ switch (ifr->ifr_addr.sa_family) {
+ case AF_INET:
+ case AF_INET6:
+ error = 0;
+ break;
+ default:
+ error = EAFNOSUPPORT;
+ break;
+ }
+ }
+ break;
+
+ case SIOCSIFMTU:
+ /*
+ * Set the interface MTU.
+ * mtu can't be larger than ARCMTU for RFC1051
+ * and can't be larger than ARC_PHDS_MTU
+ */
+ if (((ifp->if_flags & IFF_LINK0) && ifr->ifr_mtu > ARCMTU) ||
+ ifr->ifr_mtu > ARC_PHDS_MAXMTU)
+ error = EINVAL;
+ else
+ ifp->if_mtu = ifr->ifr_mtu;
+ break;
+ }
+
+ return (error);
+}
+
+/* based on ether_resolvemulti() */
+int
+arc_resolvemulti(struct ifnet *ifp, struct sockaddr **llsa,
+ struct sockaddr *sa)
+{
+ struct sockaddr_dl *sdl;
+#ifdef INET
+ struct sockaddr_in *sin;
+#endif
+#ifdef INET6
+ struct sockaddr_in6 *sin6;
+#endif
+
+ switch(sa->sa_family) {
+ case AF_LINK:
+ /*
+ * No mapping needed. Just check that it's a valid MC address.
+ */
+ sdl = (struct sockaddr_dl *)sa;
+ if (*LLADDR(sdl) != arcbroadcastaddr)
+ return EADDRNOTAVAIL;
+ *llsa = 0;
+ return 0;
+#ifdef INET
+ case AF_INET:
+ sin = (struct sockaddr_in *)sa;
+ if (!IN_MULTICAST(ntohl(sin->sin_addr.s_addr)))
+ return EADDRNOTAVAIL;
+ sdl = malloc(sizeof *sdl, M_IFMADDR,
+ M_NOWAIT | M_ZERO);
+ if (sdl == NULL)
+ return ENOMEM;
+ sdl->sdl_len = sizeof *sdl;
+ sdl->sdl_family = AF_LINK;
+ sdl->sdl_index = ifp->if_index;
+ sdl->sdl_type = IFT_ARCNET;
+ sdl->sdl_alen = ARC_ADDR_LEN;
+ *LLADDR(sdl) = 0;
+ *llsa = (struct sockaddr *)sdl;
+ return 0;
+#endif
+#ifdef INET6
+ case AF_INET6:
+ sin6 = (struct sockaddr_in6 *)sa;
+ if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) {
+ /*
+ * An IP6 address of 0 means listen to all
+ * of the Ethernet multicast address used for IP6.
+ * (This is used for multicast routers.)
+ */
+ ifp->if_flags |= IFF_ALLMULTI;
+ *llsa = 0;
+ return 0;
+ }
+ if (!IN6_IS_ADDR_MULTICAST(&sin6->sin6_addr))
+ return EADDRNOTAVAIL;
+ sdl = malloc(sizeof *sdl, M_IFMADDR,
+ M_NOWAIT | M_ZERO);
+ if (sdl == NULL)
+ return ENOMEM;
+ sdl->sdl_len = sizeof *sdl;
+ sdl->sdl_family = AF_LINK;
+ sdl->sdl_index = ifp->if_index;
+ sdl->sdl_type = IFT_ARCNET;
+ sdl->sdl_alen = ARC_ADDR_LEN;
+ *LLADDR(sdl) = 0;
+ *llsa = (struct sockaddr *)sdl;
+ return 0;
+#endif
+
+ default:
+ /*
+ * Well, the text isn't quite right, but it's the name
+ * that counts...
+ */
+ return EAFNOSUPPORT;
+ }
+}
+
+MALLOC_DEFINE(M_ARCCOM, "arccom", "ARCNET interface internals");
+
+static void*
+arc_alloc(u_char type, struct ifnet *ifp)
+{
+ struct arccom *ac;
+
+ ac = malloc(sizeof(struct arccom), M_ARCCOM, M_WAITOK | M_ZERO);
+ ac->ac_ifp = ifp;
+
+ return (ac);
+}
+
+static void
+arc_free(void *com, u_char type)
+{
+
+ free(com, M_ARCCOM);
+}
+
+static int
+arc_modevent(module_t mod, int type, void *data)
+{
+
+ switch (type) {
+ case MOD_LOAD:
+ if_register_com_alloc(IFT_ARCNET, arc_alloc, arc_free);
+ break;
+ case MOD_UNLOAD:
+ if_deregister_com_alloc(IFT_ARCNET);
+ break;
+ default:
+ return EOPNOTSUPP;
+ }
+
+ return (0);
+}
+
+static moduledata_t arc_mod = {
+ "arcnet",
+ arc_modevent,
+ 0
+};
+
+DECLARE_MODULE(arcnet, arc_mod, SI_SUB_INIT_IF, SI_ORDER_ANY);
+MODULE_VERSION(arcnet, 1);
diff --git a/rtems/freebsd/net/if_arp.h b/rtems/freebsd/net/if_arp.h
new file mode 100644
index 00000000..d2938fbd
--- /dev/null
+++ b/rtems/freebsd/net/if_arp.h
@@ -0,0 +1,138 @@
+/*-
+ * Copyright (c) 1986, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * @(#)if_arp.h 8.1 (Berkeley) 6/10/93
+ * $FreeBSD$
+ */
+
+#ifndef _NET_IF_ARP_HH_
+#define _NET_IF_ARP_HH_
+
+/*
+ * Address Resolution Protocol.
+ *
+ * See RFC 826 for protocol description. ARP packets are variable
+ * in size; the arphdr structure defines the fixed-length portion.
+ * Protocol type values are the same as those for 10 Mb/s Ethernet.
+ * It is followed by the variable-sized fields ar_sha, arp_spa,
+ * arp_tha and arp_tpa in that order, according to the lengths
+ * specified. Field names used correspond to RFC 826.
+ */
+struct arphdr {
+ u_short ar_hrd; /* format of hardware address */
+#define ARPHRD_ETHER 1 /* ethernet hardware format */
+#define ARPHRD_IEEE802 6 /* token-ring hardware format */
+#define ARPHRD_ARCNET 7 /* arcnet hardware format */
+#define ARPHRD_FRELAY 15 /* frame relay hardware format */
+#define ARPHRD_IEEE1394 24 /* firewire hardware format */
+ u_short ar_pro; /* format of protocol address */
+ u_char ar_hln; /* length of hardware address */
+ u_char ar_pln; /* length of protocol address */
+ u_short ar_op; /* one of: */
+#define ARPOP_REQUEST 1 /* request to resolve address */
+#define ARPOP_REPLY 2 /* response to previous request */
+#define ARPOP_REVREQUEST 3 /* request protocol address given hardware */
+#define ARPOP_REVREPLY 4 /* response giving protocol address */
+#define ARPOP_INVREQUEST 8 /* request to identify peer */
+#define ARPOP_INVREPLY 9 /* response identifying peer */
+/*
+ * The remaining fields are variable in size,
+ * according to the sizes above.
+ */
+#ifdef COMMENT_ONLY
+ u_char ar_sha[]; /* sender hardware address */
+ u_char ar_spa[]; /* sender protocol address */
+ u_char ar_tha[]; /* target hardware address */
+ u_char ar_tpa[]; /* target protocol address */
+#endif
+};
+
+#define ar_sha(ap) (((caddr_t)((ap)+1)) + 0)
+#define ar_spa(ap) (((caddr_t)((ap)+1)) + (ap)->ar_hln)
+#define ar_tha(ap) (((caddr_t)((ap)+1)) + (ap)->ar_hln + (ap)->ar_pln)
+#define ar_tpa(ap) (((caddr_t)((ap)+1)) + 2*(ap)->ar_hln + (ap)->ar_pln)
+
+#define arphdr_len2(ar_hln, ar_pln) \
+ (sizeof(struct arphdr) + 2*(ar_hln) + 2*(ar_pln))
+#define arphdr_len(ap) (arphdr_len2((ap)->ar_hln, (ap)->ar_pln))
+
+/*
+ * ARP ioctl request
+ */
+struct arpreq {
+ struct sockaddr arp_pa; /* protocol address */
+ struct sockaddr arp_ha; /* hardware address */
+ int arp_flags; /* flags */
+};
+/* arp_flags and at_flags field values */
+#define ATF_INUSE 0x01 /* entry in use */
+#define ATF_COM 0x02 /* completed entry (enaddr valid) */
+#define ATF_PERM 0x04 /* permanent entry */
+#define ATF_PUBL 0x08 /* publish entry (respond for other host) */
+#define ATF_USETRAILERS 0x10 /* has requested trailers */
+
+#ifdef _KERNEL
+/*
+ * Structure shared between the ethernet driver modules and
+ * the address resolution code.
+ */
+struct arpcom {
+ struct ifnet *ac_ifp; /* network-visible interface */
+ void *ac_netgraph; /* ng_ether(4) netgraph node info */
+};
+#define IFP2AC(ifp) ((struct arpcom *)(ifp->if_l2com))
+#define AC2IFP(ac) ((ac)->ac_ifp)
+
+#endif /* _KERNEL */
+
+struct arpstat {
+ /* Normal things that happen: */
+ u_long txrequests; /* # of ARP requests sent by this host. */
+ u_long txreplies; /* # of ARP replies sent by this host. */
+ u_long rxrequests; /* # of ARP requests received by this host. */
+ u_long rxreplies; /* # of ARP replies received by this host. */
+ u_long received; /* # of ARP packets received by this host. */
+
+ u_long arp_spares[4]; /* For either the upper or lower half. */
+ /* Abnormal event and error counting: */
+ u_long dropped; /* # of packets dropped waiting for a reply. */
+ u_long timeouts; /* # of times with entries removed */
+ /* due to timeout. */
+ u_long dupips; /* # of duplicate IPs detected. */
+};
+
+/*
+ * In-kernel consumers can use these accessor macros directly to update
+ * stats.
+ */
+#define ARPSTAT_ADD(name, val) V_arpstat.name += (val)
+#define ARPSTAT_SUB(name, val) V_arpstat.name -= (val)
+#define ARPSTAT_INC(name) ARPSTAT_ADD(name, 1)
+#define ARPSTAT_DEC(name) ARPSTAT_SUB(name, 1)
+
+#endif /* !_NET_IF_ARP_HH_ */
diff --git a/rtems/freebsd/net/if_atm.h b/rtems/freebsd/net/if_atm.h
new file mode 100644
index 00000000..e8f69da0
--- /dev/null
+++ b/rtems/freebsd/net/if_atm.h
@@ -0,0 +1,337 @@
+/* $NetBSD: if_atm.h,v 1.7 1996/11/09 23:02:27 chuck Exp $ */
+/* $FreeBSD$ */
+
+/*-
+ *
+ * Copyright (c) 1996 Charles D. Cranor and Washington University.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by Charles D. Cranor and
+ * Washington University.
+ * 4. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * net/if_atm.h
+ */
+
+/*
+ * Classification of ATM cards.
+ */
+#define ATM_DEVICE_UNKNOWN 0
+#define ATM_DEVICE_PCA200E 1 /* Fore/Marconi PCA200-E */
+#define ATM_DEVICE_HE155 2 /* Fore/Marconi HE155 */
+#define ATM_DEVICE_HE622 3 /* Fore/Marconi HE622 */
+#define ATM_DEVICE_ENI155P 4 /* Efficient networks 155p */
+#define ATM_DEVICE_ADP155P 5 /* Adaptec 155p */
+#define ATM_DEVICE_FORELE25 6 /* ForeRunnerLE 25 */
+#define ATM_DEVICE_FORELE155 7 /* ForeRunnerLE 155 */
+#define ATM_DEVICE_NICSTAR25 8 /* other 77211 25.6MBit */
+#define ATM_DEVICE_NICSTAR155 9 /* other 77211 155MBit */
+#define ATM_DEVICE_IDTABR25 10 /* 77252 based card 25MBit */
+#define ATM_DEVICE_IDTABR155 11 /* 77252 based card 155MBit */
+#define ATM_DEVICE_PROATM25 12 /* 77252 based ProSum card 25MBit */
+#define ATM_DEVICE_PROATM155 13 /* 77252 based ProSum card 155MBit */
+#define ATM_DEVICE_VIRTUAL 14 /* virtual ATM device (netgraph) */
+
+/* map to strings and vendors */
+#define ATM_DEVICE_NAMES \
+ { "Unknown", "Unknown" }, \
+ { "PCA200-E", "Fore/Marconi" }, \
+ { "HE155", "Fore/Marconi" }, \
+ { "HE622", "Fore/Marconi" }, \
+ { "ENI155p", "Efficient Networks" }, \
+ { "ADP155p", "Adaptec" }, \
+ { "ForeRunnerLE25", "Fore/Marconi" }, \
+ { "ForeRunnerLE155", "Fore/Marconi" }, \
+ { "IDT77211/25", "IDT" }, \
+ { "IDT77211/155", "IDT" }, \
+ { "IDT77252/25", "IDT" }, \
+ { "IDT77252/155", "IDT" }, \
+ { "ProATM/25", "ProSum" }, \
+ { "ProATM/155", "ProSum" }, \
+ { "Virtual", "NetGraph" },
+
+/*
+ * This is the common link layer MIB for all ATM interfaces. Much of the
+ * information here is needed for ILMI. This will be augmented by statistics
+ * at some point.
+ */
+struct ifatm_mib {
+ /* configuration data */
+ uint8_t device; /* type of card */
+ u_char esi[6]; /* end system identifier (MAC) */
+ uint32_t serial; /* card serial number */
+ uint32_t hw_version; /* card version */
+ uint32_t sw_version; /* firmware version (if any) */
+ uint32_t pcr; /* supported peak cell rate */
+ uint32_t media; /* physical media */
+ uint8_t vpi_bits; /* number of used bits in VPI field */
+ uint8_t vci_bits; /* number of used bits in VCI field */
+ uint16_t max_vpcs; /* maximum number of VPCs */
+ uint32_t max_vccs; /* maximum number of VCCs */
+};
+
+/*
+ * Traffic parameters for ATM connections. This contains all parameters
+ * to accomodate UBR, UBR+MCR, CBR, VBR and ABR connections.
+ *
+ * Keep in sync with ng_atm.h
+ */
+struct atmio_tparam {
+ uint32_t pcr; /* 24bit: Peak Cell Rate */
+ uint32_t scr; /* 24bit: VBR Sustainable Cell Rate */
+ uint32_t mbs; /* 24bit: VBR Maximum burst size */
+ uint32_t mcr; /* 24bit: ABR/VBR/UBR+MCR MCR */
+ uint32_t icr; /* 24bit: ABR ICR */
+ uint32_t tbe; /* 24bit: ABR TBE (1...2^24-1) */
+ uint8_t nrm; /* 3bit: ABR Nrm */
+ uint8_t trm; /* 3bit: ABR Trm */
+ uint16_t adtf; /* 10bit: ABR ADTF */
+ uint8_t rif; /* 4bit: ABR RIF */
+ uint8_t rdf; /* 4bit: ABR RDF */
+ uint8_t cdf; /* 3bit: ABR CDF */
+};
+
+/*
+ * VCC parameters
+ *
+ * Keep in sync with ng_atm.h
+ */
+struct atmio_vcc {
+ uint16_t flags; /* VCC flags */
+ uint16_t vpi;
+ uint16_t vci;
+ uint16_t rmtu; /* maximum receive PDU */
+ uint16_t tmtu; /* maximum transmit PDU */
+ uint8_t aal; /* aal type */
+ uint8_t traffic; /* traffic type */
+ struct atmio_tparam tparam; /* traffic parameters */
+};
+
+/* VCC flags */
+#define ATMIO_FLAG_LLCSNAP 0x0002 /* same as ATM_PH_LLCSNAP */
+#define ATMIO_FLAG_NG 0x0010 /* owned by netgraph */
+#define ATMIO_FLAG_HARP 0x0020 /* owned by HARP */
+#define ATMIO_FLAG_NORX 0x0100 /* not receiving on this VCC */
+#define ATMIO_FLAG_NOTX 0x0200 /* not transmitting on this VCC */
+#define ATMIO_FLAG_PVC 0x0400 /* this is a PVC */
+#define ATMIO_FLAG_ASYNC 0x0800 /* async open/close */
+#define ATMIO_FLAGS "\020\2LLCSNAP\5NG\6HARP\11NORX\12NOTX\13PVC\14ASYNC"
+
+#define ATMIO_AAL_0 0 /* pure cells */
+#define ATMIO_AAL_34 4 /* AAL3 and 4 */
+#define ATMIO_AAL_5 5 /* AAL5 */
+#define ATMIO_AAL_RAW 10 /* whatever the card does */
+
+#define ATMIO_TRAFFIC_UBR 0
+#define ATMIO_TRAFFIC_CBR 1
+#define ATMIO_TRAFFIC_ABR 2
+#define ATMIO_TRAFFIC_VBR 3
+
+/*
+ * VCC table
+ *
+ * Keep in sync with ng_atm.h
+ */
+struct atmio_vcctable {
+ uint32_t count; /* number of vccs */
+ struct atmio_vcc vccs[0]; /* array of VCCs */
+};
+
+/*
+ * Peak cell rates for various physical media. Note, that there are
+ * different opinions on what the correct values are.
+ */
+#define ATM_RATE_25_6M 59259
+#define ATM_RATE_155M 353208
+#define ATM_RATE_622M 1412830
+#define ATM_RATE_2_4G 5651320
+
+#ifdef _KERNEL
+/*
+ * Common fields for all ATM interfaces. Each driver's softc must start with
+ * this structure.
+ */
+struct ifatm {
+ struct ifnet *ifp;
+ struct ifatm_mib mib; /* exported data */
+ void *phy; /* usually SUNI */
+ void *ngpriv; /* netgraph link */
+};
+#define IFP2IFATM(ifp) ((struct ifatm *)(ifp)->if_l2com)
+#endif
+
+/*
+ * Keep structures in sync with ng_atm.h
+ *
+ * These are used by netgraph/harp to call the driver
+ * NATM uses the atm_pseudoioctl instead.
+ */
+struct atmio_openvcc {
+ void *rxhand; /* handle argument */
+ struct atmio_vcc param; /* parameters */
+};
+
+struct atmio_closevcc {
+ uint16_t vpi;
+ uint16_t vci;
+};
+
+#if defined(__NetBSD__) || defined(__OpenBSD__) || defined(__bsdi__)
+#define RTALLOC1(A,B) rtalloc1((A),(B))
+#elif defined(__FreeBSD__)
+#define RTALLOC1(A,B) rtalloc1((A),(B),0UL)
+#endif
+
+/*
+ * pseudo header for packet transmission
+ */
+struct atm_pseudohdr {
+ uint8_t atm_ph[4]; /* flags+VPI+VCI1(msb)+VCI2(lsb) */
+};
+
+#define ATM_PH_FLAGS(X) ((X)->atm_ph[0])
+#define ATM_PH_VPI(X) ((X)->atm_ph[1])
+#define ATM_PH_VCI(X) ((((X)->atm_ph[2]) << 8) | ((X)->atm_ph[3]))
+#define ATM_PH_SETVCI(X,V) { \
+ (X)->atm_ph[2] = ((V) >> 8) & 0xff; \
+ (X)->atm_ph[3] = ((V) & 0xff); \
+}
+
+/* use AAL5? (0 == aal0) */
+#define ATM_PH_AAL5 0x01
+/* use the LLC SNAP encoding (iff aal5) */
+#define ATM_PH_LLCSNAP ATMIO_FLAG_LLCSNAP
+
+#define ATM_PH_DRIVER7 0x40 /* reserve for driver's use */
+#define ATM_PH_DRIVER8 0x80 /* reserve for driver's use */
+
+#define ATMMTU 9180 /* ATM MTU size for IP */
+ /* XXX: could be 9188 with LLC/SNAP according
+ to comer */
+
+#define SIOCATMGETVCCS _IOW('a', 125, struct atmio_vcctable)
+#define SIOCATMOPENVCC _IOR('a', 126, struct atmio_openvcc)
+#define SIOCATMCLOSEVCC _IOR('a', 127, struct atmio_closevcc)
+
+#define SIOCATMGVCCS _IOWR('i', 230, struct ifreq)
+
+/*
+ * XXX forget all the garbage in if_llc.h and do it the easy way
+ */
+#define ATMLLC_HDR "\252\252\3\0\0\0"
+struct atmllc {
+ uint8_t llchdr[6]; /* aa.aa.03.00.00.00 */
+ uint8_t type[2]; /* "ethernet" type */
+};
+
+/* ATM_LLC macros: note type code in host byte order */
+#define ATM_LLC_TYPE(X) (((X)->type[0] << 8) | ((X)->type[1]))
+#define ATM_LLC_SETTYPE(X, V) do { \
+ (X)->type[0] = ((V) >> 8) & 0xff; \
+ (X)->type[1] = ((V) & 0xff); \
+ } while (0)
+
+/*
+ * Events that are emitted by the driver. Currently the only consumer
+ * of this is the netgraph node.
+ */
+#define ATMEV_FLOW_CONTROL 0x0001 /* channel busy state changed */
+#define ATMEV_IFSTATE_CHANGED 0x0002 /* up/down or carrier */
+#define ATMEV_VCC_CHANGED 0x0003 /* PVC deleted/create */
+#define ATMEV_ACR_CHANGED 0x0004 /* ABR ACR has changed */
+
+struct atmev_flow_control {
+ uint16_t vpi; /* channel that is changed */
+ uint16_t vci;
+ u_int busy : 1; /* != 0 -> ATM layer busy */
+};
+
+struct atmev_ifstate_changed {
+ u_int running : 1; /* interface is running now */
+ u_int carrier : 1; /* carrier detected (or not) */
+};
+
+struct atmev_vcc_changed {
+ uint16_t vpi; /* channel that is changed */
+ uint16_t vci;
+ u_int up : 1; /* 1 - created, 0 - deleted */
+};
+
+struct atmev_acr_changed {
+ uint16_t vpi; /* channel that is changed */
+ uint16_t vci;
+ uint32_t acr; /* new ACR */
+};
+
+#ifdef _KERNEL
+void atm_ifattach(struct ifnet *);
+void atm_ifdetach(struct ifnet *);
+void atm_input(struct ifnet *, struct atm_pseudohdr *,
+ struct mbuf *, void *);
+int atm_output(struct ifnet *, struct mbuf *, struct sockaddr *,
+ struct route *);
+struct atmio_vcctable *atm_getvccs(struct atmio_vcc **, u_int, u_int,
+ struct mtx *, int);
+
+void atm_event(struct ifnet *, u_int, void *);
+
+#define ATMEV_SEND_FLOW_CONTROL(ATMIF, VPI, VCI, BUSY) \
+ do { \
+ struct atmev_flow_control _arg; \
+ _arg.vpi = (VPI); \
+ _arg.vci = (VCI); \
+ _arg.busy = (BUSY); \
+ atm_event((ATMIF)->ifp, ATMEV_FLOW_CONTROL, &_arg); \
+ } while (0)
+
+#define ATMEV_SEND_VCC_CHANGED(ATMIF, VPI, VCI, UP) \
+ do { \
+ struct atmev_vcc_changed _arg; \
+ _arg.vpi = (VPI); \
+ _arg.vci = (VCI); \
+ _arg.up = (UP); \
+ atm_event((ATMIF)->ifp, ATMEV_VCC_CHANGED, &_arg); \
+ } while (0)
+
+#define ATMEV_SEND_IFSTATE_CHANGED(ATMIF, CARRIER) \
+ do { \
+ struct atmev_ifstate_changed _arg; \
+ _arg.running = (((ATMIF)->ifp->if_drv_flags & \
+ IFF_DRV_RUNNING) != 0); \
+ _arg.carrier = ((CARRIER) != 0); \
+ atm_event((ATMIF)->ifp, ATMEV_IFSTATE_CHANGED, &_arg); \
+ } while (0)
+
+#define ATMEV_SEND_ACR_CHANGED(ATMIF, VPI, VCI, ACR) \
+ do { \
+ struct atmev_acr_changed _arg; \
+ _arg.vpi = (VPI); \
+ _arg.vci = (VCI); \
+ _arg.acr= (ACR); \
+ atm_event((ATMIF)->ifp, ATMEV_ACR_CHANGED, &_arg); \
+ } while (0)
+#endif
diff --git a/rtems/freebsd/net/if_atmsubr.c b/rtems/freebsd/net/if_atmsubr.c
new file mode 100644
index 00000000..b053aaaf
--- /dev/null
+++ b/rtems/freebsd/net/if_atmsubr.c
@@ -0,0 +1,504 @@
+#include <rtems/freebsd/machine/rtems-bsd-config.h>
+
+/* $NetBSD: if_atmsubr.c,v 1.10 1997/03/11 23:19:51 chuck Exp $ */
+
+/*-
+ *
+ * Copyright (c) 1996 Charles D. Cranor and Washington University.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by Charles D. Cranor and
+ * Washington University.
+ * 4. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * if_atmsubr.c
+ */
+
+#include <rtems/freebsd/sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <rtems/freebsd/local/opt_inet.h>
+#include <rtems/freebsd/local/opt_inet6.h>
+#include <rtems/freebsd/local/opt_natm.h>
+
+#include <rtems/freebsd/sys/param.h>
+#include <rtems/freebsd/sys/systm.h>
+#include <rtems/freebsd/sys/kernel.h>
+#include <rtems/freebsd/sys/module.h>
+#include <rtems/freebsd/sys/mbuf.h>
+#include <rtems/freebsd/sys/socket.h>
+#include <rtems/freebsd/sys/sockio.h>
+#include <rtems/freebsd/sys/errno.h>
+#include <rtems/freebsd/sys/sysctl.h>
+#include <rtems/freebsd/sys/malloc.h>
+
+#include <rtems/freebsd/net/if.h>
+#include <rtems/freebsd/net/netisr.h>
+#include <rtems/freebsd/net/route.h>
+#include <rtems/freebsd/net/if_dl.h>
+#include <rtems/freebsd/net/if_types.h>
+#include <rtems/freebsd/net/if_atm.h>
+
+#include <rtems/freebsd/netinet/in.h>
+#include <rtems/freebsd/netinet/if_atm.h>
+#include <rtems/freebsd/netinet/if_ether.h> /* XXX: for ETHERTYPE_* */
+#if defined(INET) || defined(INET6)
+#include <rtems/freebsd/netinet/in_var.h>
+#endif
+#ifdef NATM
+#include <rtems/freebsd/netnatm/natm.h>
+#endif
+
+#include <rtems/freebsd/security/mac/mac_framework.h>
+
+/*
+ * Netgraph interface functions.
+ * These need not be protected by a lock, because ng_atm nodes are persitent.
+ * The ng_atm module can be unloaded only if all ATM interfaces have been
+ * unloaded, so nobody should be in the code paths accessing these function
+ * pointers.
+ */
+void (*ng_atm_attach_p)(struct ifnet *);
+void (*ng_atm_detach_p)(struct ifnet *);
+int (*ng_atm_output_p)(struct ifnet *, struct mbuf **);
+void (*ng_atm_input_p)(struct ifnet *, struct mbuf **,
+ struct atm_pseudohdr *, void *);
+void (*ng_atm_input_orphan_p)(struct ifnet *, struct mbuf *,
+ struct atm_pseudohdr *, void *);
+void (*ng_atm_event_p)(struct ifnet *, uint32_t, void *);
+
+/*
+ * Harp pseudo interface hooks
+ */
+void (*atm_harp_input_p)(struct ifnet *ifp, struct mbuf **m,
+ struct atm_pseudohdr *ah, void *rxhand);
+void (*atm_harp_attach_p)(struct ifnet *);
+void (*atm_harp_detach_p)(struct ifnet *);
+void (*atm_harp_event_p)(struct ifnet *, uint32_t, void *);
+
+SYSCTL_NODE(_hw, OID_AUTO, atm, CTLFLAG_RW, 0, "ATM hardware");
+
+MALLOC_DEFINE(M_IFATM, "ifatm", "atm interface internals");
+
+#ifndef ETHERTYPE_IPV6
+#define ETHERTYPE_IPV6 0x86dd
+#endif
+
+#define senderr(e) do { error = (e); goto bad; } while (0)
+
+/*
+ * atm_output: ATM output routine
+ * inputs:
+ * "ifp" = ATM interface to output to
+ * "m0" = the packet to output
+ * "dst" = the sockaddr to send to (either IP addr, or raw VPI/VCI)
+ * "ro" = the route to use
+ * returns: error code [0 == ok]
+ *
+ * note: special semantic: if (dst == NULL) then we assume "m" already
+ * has an atm_pseudohdr on it and just send it directly.
+ * [for native mode ATM output] if dst is null, then
+ * ro->ro_rt must also be NULL.
+ */
+int
+atm_output(struct ifnet *ifp, struct mbuf *m0, struct sockaddr *dst,
+ struct route *ro)
+{
+ u_int16_t etype = 0; /* if using LLC/SNAP */
+ int error = 0, sz;
+ struct atm_pseudohdr atmdst, *ad;
+ struct mbuf *m = m0;
+ struct atmllc *atmllc;
+ struct atmllc *llc_hdr = NULL;
+ u_int32_t atm_flags;
+
+#ifdef MAC
+ error = mac_ifnet_check_transmit(ifp, m);
+ if (error)
+ senderr(error);
+#endif
+
+ if (!((ifp->if_flags & IFF_UP) &&
+ (ifp->if_drv_flags & IFF_DRV_RUNNING)))
+ senderr(ENETDOWN);
+
+ /*
+ * check for non-native ATM traffic (dst != NULL)
+ */
+ if (dst) {
+ switch (dst->sa_family) {
+
+#if defined(INET) || defined(INET6)
+ case AF_INET:
+ case AF_INET6:
+ {
+ if (dst->sa_family == AF_INET6)
+ etype = ETHERTYPE_IPV6;
+ else
+ etype = ETHERTYPE_IP;
+ if (!atmresolve(ro->ro_rt, m, dst, &atmdst)) {
+ m = NULL;
+ /* XXX: atmresolve already free'd it */
+ senderr(EHOSTUNREACH);
+ /* XXX: put ATMARP stuff here */
+ /* XXX: watch who frees m on failure */
+ }
+ }
+ break;
+#endif /* INET || INET6 */
+
+ case AF_UNSPEC:
+ /*
+ * XXX: bpfwrite. assuming dst contains 12 bytes
+ * (atm pseudo header (4) + LLC/SNAP (8))
+ */
+ bcopy(dst->sa_data, &atmdst, sizeof(atmdst));
+ llc_hdr = (struct atmllc *)(dst->sa_data +
+ sizeof(atmdst));
+ break;
+
+ default:
+ printf("%s: can't handle af%d\n", ifp->if_xname,
+ dst->sa_family);
+ senderr(EAFNOSUPPORT);
+ }
+
+ /*
+ * must add atm_pseudohdr to data
+ */
+ sz = sizeof(atmdst);
+ atm_flags = ATM_PH_FLAGS(&atmdst);
+ if (atm_flags & ATM_PH_LLCSNAP)
+ sz += 8; /* sizeof snap == 8 */
+ M_PREPEND(m, sz, M_DONTWAIT);
+ if (m == 0)
+ senderr(ENOBUFS);
+ ad = mtod(m, struct atm_pseudohdr *);
+ *ad = atmdst;
+ if (atm_flags & ATM_PH_LLCSNAP) {
+ atmllc = (struct atmllc *)(ad + 1);
+ if (llc_hdr == NULL) {
+ bcopy(ATMLLC_HDR, atmllc->llchdr,
+ sizeof(atmllc->llchdr));
+ /* note: in host order */
+ ATM_LLC_SETTYPE(atmllc, etype);
+ }
+ else
+ bcopy(llc_hdr, atmllc, sizeof(struct atmllc));
+ }
+ }
+
+ if (ng_atm_output_p != NULL) {
+ if ((error = (*ng_atm_output_p)(ifp, &m)) != 0) {
+ if (m != NULL)
+ m_freem(m);
+ return (error);
+ }
+ if (m == NULL)
+ return (0);
+ }
+
+ /*
+ * Queue message on interface, and start output if interface
+ * not yet active.
+ */
+ if (!IF_HANDOFF_ADJ(&ifp->if_snd, m, ifp,
+ -(int)sizeof(struct atm_pseudohdr)))
+ return (ENOBUFS);
+ return (error);
+
+bad:
+ if (m)
+ m_freem(m);
+ return (error);
+}
+
+/*
+ * Process a received ATM packet;
+ * the packet is in the mbuf chain m.
+ */
+void
+atm_input(struct ifnet *ifp, struct atm_pseudohdr *ah, struct mbuf *m,
+ void *rxhand)
+{
+ int isr;
+ u_int16_t etype = ETHERTYPE_IP; /* default */
+
+ if ((ifp->if_flags & IFF_UP) == 0) {
+ m_freem(m);
+ return;
+ }
+#ifdef MAC
+ mac_ifnet_create_mbuf(ifp, m);
+#endif
+ ifp->if_ibytes += m->m_pkthdr.len;
+
+ if (ng_atm_input_p != NULL) {
+ (*ng_atm_input_p)(ifp, &m, ah, rxhand);
+ if (m == NULL)
+ return;
+ }
+
+ /* not eaten by ng_atm. Maybe it's a pseudo-harp PDU? */
+ if (atm_harp_input_p != NULL) {
+ (*atm_harp_input_p)(ifp, &m, ah, rxhand);
+ if (m == NULL)
+ return;
+ }
+
+ if (rxhand) {
+#ifdef NATM
+ struct natmpcb *npcb;
+
+ /*
+ * XXXRW: this use of 'rxhand' is not a very good idea, and
+ * was subject to races even before SMPng due to the release
+ * of spl here.
+ */
+ NATM_LOCK();
+ npcb = rxhand;
+ npcb->npcb_inq++; /* count # in queue */
+ isr = NETISR_NATM;
+ m->m_pkthdr.rcvif = rxhand; /* XXX: overload */
+ NATM_UNLOCK();
+#else
+ printf("atm_input: NATM detected but not "
+ "configured in kernel\n");
+ goto dropit;
+#endif
+ } else {
+ /*
+ * handle LLC/SNAP header, if present
+ */
+ if (ATM_PH_FLAGS(ah) & ATM_PH_LLCSNAP) {
+ struct atmllc *alc;
+
+ if (m->m_len < sizeof(*alc) &&
+ (m = m_pullup(m, sizeof(*alc))) == 0)
+ return; /* failed */
+ alc = mtod(m, struct atmllc *);
+ if (bcmp(alc, ATMLLC_HDR, 6)) {
+ printf("%s: recv'd invalid LLC/SNAP frame "
+ "[vp=%d,vc=%d]\n", ifp->if_xname,
+ ATM_PH_VPI(ah), ATM_PH_VCI(ah));
+ m_freem(m);
+ return;
+ }
+ etype = ATM_LLC_TYPE(alc);
+ m_adj(m, sizeof(*alc));
+ }
+
+ switch (etype) {
+
+#ifdef INET
+ case ETHERTYPE_IP:
+ isr = NETISR_IP;
+ break;
+#endif
+
+#ifdef INET6
+ case ETHERTYPE_IPV6:
+ isr = NETISR_IPV6;
+ break;
+#endif
+ default:
+#ifndef NATM
+ dropit:
+#endif
+ if (ng_atm_input_orphan_p != NULL)
+ (*ng_atm_input_orphan_p)(ifp, m, ah, rxhand);
+ else
+ m_freem(m);
+ return;
+ }
+ }
+ netisr_dispatch(isr, m);
+}
+
+/*
+ * Perform common duties while attaching to interface list.
+ */
+void
+atm_ifattach(struct ifnet *ifp)
+{
+ struct ifaddr *ifa;
+ struct sockaddr_dl *sdl;
+ struct ifatm *ifatm = ifp->if_l2com;
+
+ ifp->if_addrlen = 0;
+ ifp->if_hdrlen = 0;
+ if_attach(ifp);
+ ifp->if_mtu = ATMMTU;
+ ifp->if_output = atm_output;
+#if 0
+ ifp->if_input = atm_input;
+#endif
+ ifp->if_snd.ifq_maxlen = 50; /* dummy */
+
+ TAILQ_FOREACH(ifa, &ifp->if_addrhead, ifa_link)
+ if (ifa->ifa_addr->sa_family == AF_LINK) {
+ sdl = (struct sockaddr_dl *)ifa->ifa_addr;
+ sdl->sdl_type = IFT_ATM;
+ sdl->sdl_alen = ifp->if_addrlen;
+#ifdef notyet /* if using ATMARP, store hardware address using the next line */
+ bcopy(ifp->hw_addr, LLADDR(sdl), ifp->if_addrlen);
+#endif
+ break;
+ }
+
+ ifp->if_linkmib = &ifatm->mib;
+ ifp->if_linkmiblen = sizeof(ifatm->mib);
+
+ if(ng_atm_attach_p)
+ (*ng_atm_attach_p)(ifp);
+ if (atm_harp_attach_p)
+ (*atm_harp_attach_p)(ifp);
+}
+
+/*
+ * Common stuff for detaching an ATM interface
+ */
+void
+atm_ifdetach(struct ifnet *ifp)
+{
+ if (atm_harp_detach_p)
+ (*atm_harp_detach_p)(ifp);
+ if(ng_atm_detach_p)
+ (*ng_atm_detach_p)(ifp);
+ if_detach(ifp);
+}
+
+/*
+ * Support routine for the SIOCATMGVCCS ioctl().
+ *
+ * This routine assumes, that the private VCC structures used by the driver
+ * begin with a struct atmio_vcc.
+ *
+ * Return a table of VCCs in a freshly allocated memory area.
+ * Here we have a problem: we first count, how many vccs we need
+ * to return. The we allocate the memory and finally fill it in.
+ * Because we cannot lock while calling malloc, the number of active
+ * vccs may change while we're in malloc. So we allocate a couple of
+ * vccs more and if space anyway is not enough re-iterate.
+ *
+ * We could use an sx lock for the vcc tables.
+ */
+struct atmio_vcctable *
+atm_getvccs(struct atmio_vcc **table, u_int size, u_int start,
+ struct mtx *lock, int waitok)
+{
+ u_int cid, alloc;
+ size_t len;
+ struct atmio_vcctable *vccs;
+ struct atmio_vcc *v;
+
+ alloc = start + 10;
+ vccs = NULL;
+
+ for (;;) {
+ len = sizeof(*vccs) + alloc * sizeof(vccs->vccs[0]);
+ vccs = reallocf(vccs, len, M_TEMP,
+ waitok ? M_WAITOK : M_NOWAIT);
+ if (vccs == NULL)
+ return (NULL);
+ bzero(vccs, len);
+
+ vccs->count = 0;
+ v = vccs->vccs;
+
+ mtx_lock(lock);
+ for (cid = 0; cid < size; cid++)
+ if (table[cid] != NULL) {
+ if (++vccs->count == alloc)
+ /* too many - try again */
+ break;
+ *v++ = *table[cid];
+ }
+ mtx_unlock(lock);
+
+ if (cid == size)
+ break;
+
+ alloc *= 2;
+ }
+ return (vccs);
+}
+
+/*
+ * Driver or channel state has changed. Inform whoever is interested
+ * in these events.
+ */
+void
+atm_event(struct ifnet *ifp, u_int event, void *arg)
+{
+ if (ng_atm_event_p != NULL)
+ (*ng_atm_event_p)(ifp, event, arg);
+ if (atm_harp_event_p != NULL)
+ (*atm_harp_event_p)(ifp, event, arg);
+}
+
+static void *
+atm_alloc(u_char type, struct ifnet *ifp)
+{
+ struct ifatm *ifatm;
+
+ ifatm = malloc(sizeof(struct ifatm), M_IFATM, M_WAITOK | M_ZERO);
+ ifatm->ifp = ifp;
+
+ return (ifatm);
+}
+
+static void
+atm_free(void *com, u_char type)
+{
+
+ free(com, M_IFATM);
+}
+
+static int
+atm_modevent(module_t mod, int type, void *data)
+{
+ switch (type) {
+ case MOD_LOAD:
+ if_register_com_alloc(IFT_ATM, atm_alloc, atm_free);
+ break;
+ case MOD_UNLOAD:
+ if_deregister_com_alloc(IFT_ATM);
+ break;
+ default:
+ return (EOPNOTSUPP);
+ }
+
+ return (0);
+}
+
+static moduledata_t atm_mod = {
+ "atm",
+ atm_modevent,
+ 0
+};
+
+DECLARE_MODULE(atm, atm_mod, SI_SUB_INIT_IF, SI_ORDER_ANY);
+MODULE_VERSION(atm, 1);
diff --git a/rtems/freebsd/net/if_bridge.c b/rtems/freebsd/net/if_bridge.c
new file mode 100644
index 00000000..88d73f87
--- /dev/null
+++ b/rtems/freebsd/net/if_bridge.c
@@ -0,0 +1,3458 @@
+#include <rtems/freebsd/machine/rtems-bsd-config.h>
+
+/* $NetBSD: if_bridge.c,v 1.31 2005/06/01 19:45:34 jdc Exp $ */
+
+/*
+ * Copyright 2001 Wasabi Systems, Inc.
+ * All rights reserved.
+ *
+ * Written by Jason R. Thorpe for Wasabi Systems, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed for the NetBSD Project by
+ * Wasabi Systems, Inc.
+ * 4. The name of Wasabi Systems, Inc. may not be used to endorse
+ * or promote products derived from this software without specific prior
+ * written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * Copyright (c) 1999, 2000 Jason L. Wright (jason@thought.net)
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
+ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+ * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ * OpenBSD: if_bridge.c,v 1.60 2001/06/15 03:38:33 itojun Exp
+ */
+
+/*
+ * Network interface bridge support.
+ *
+ * TODO:
+ *
+ * - Currently only supports Ethernet-like interfaces (Ethernet,
+ * 802.11, VLANs on Ethernet, etc.) Figure out a nice way
+ * to bridge other types of interfaces (FDDI-FDDI, and maybe
+ * consider heterogenous bridges).
+ */
+
+#include <rtems/freebsd/sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <rtems/freebsd/local/opt_inet.h>
+#include <rtems/freebsd/local/opt_inet6.h>
+
+#include <rtems/freebsd/sys/param.h>
+#include <rtems/freebsd/sys/mbuf.h>
+#include <rtems/freebsd/sys/malloc.h>
+#include <rtems/freebsd/sys/protosw.h>
+#include <rtems/freebsd/sys/systm.h>
+#include <rtems/freebsd/sys/time.h>
+#include <rtems/freebsd/sys/socket.h> /* for net/if.h */
+#include <rtems/freebsd/sys/sockio.h>
+#include <rtems/freebsd/sys/ctype.h> /* string functions */
+#include <rtems/freebsd/sys/kernel.h>
+#include <rtems/freebsd/sys/random.h>
+#include <rtems/freebsd/sys/syslog.h>
+#include <rtems/freebsd/sys/sysctl.h>
+#include <rtems/freebsd/vm/uma.h>
+#include <rtems/freebsd/sys/module.h>
+#include <rtems/freebsd/sys/priv.h>
+#include <rtems/freebsd/sys/proc.h>
+#include <rtems/freebsd/sys/lock.h>
+#include <rtems/freebsd/sys/mutex.h>
+#include <rtems/freebsd/sys/rwlock.h>
+
+#include <rtems/freebsd/net/bpf.h>
+#include <rtems/freebsd/net/if.h>
+#include <rtems/freebsd/net/if_clone.h>
+#include <rtems/freebsd/net/if_dl.h>
+#include <rtems/freebsd/net/if_types.h>
+#include <rtems/freebsd/net/if_var.h>
+#include <rtems/freebsd/net/pfil.h>
+#include <rtems/freebsd/net/vnet.h>
+
+#include <rtems/freebsd/netinet/in.h> /* for struct arpcom */
+#include <rtems/freebsd/netinet/in_systm.h>
+#include <rtems/freebsd/netinet/in_var.h>
+#include <rtems/freebsd/netinet/ip.h>
+#include <rtems/freebsd/netinet/ip_var.h>
+#ifdef INET6
+#include <rtems/freebsd/netinet/ip6.h>
+#include <rtems/freebsd/netinet6/ip6_var.h>
+#endif
+#if defined(INET) || defined(INET6)
+#include <rtems/freebsd/netinet/ip_carp.h>
+#endif
+#include <rtems/freebsd/machine/in_cksum.h>
+#include <rtems/freebsd/netinet/if_ether.h> /* for struct arpcom */
+#include <rtems/freebsd/net/bridgestp.h>
+#include <rtems/freebsd/net/if_bridgevar.h>
+#include <rtems/freebsd/net/if_llc.h>
+#include <rtems/freebsd/net/if_vlan_var.h>
+
+#include <rtems/freebsd/net/route.h>
+#include <rtems/freebsd/netinet/ip_fw.h>
+#include <rtems/freebsd/netinet/ipfw/ip_fw_private.h>
+
+/*
+ * Size of the route hash table. Must be a power of two.
+ */
+#ifndef BRIDGE_RTHASH_SIZE
+#define BRIDGE_RTHASH_SIZE 1024
+#endif
+
+#define BRIDGE_RTHASH_MASK (BRIDGE_RTHASH_SIZE - 1)
+
+/*
+ * Maximum number of addresses to cache.
+ */
+#ifndef BRIDGE_RTABLE_MAX
+#define BRIDGE_RTABLE_MAX 100
+#endif
+
+/*
+ * Timeout (in seconds) for entries learned dynamically.
+ */
+#ifndef BRIDGE_RTABLE_TIMEOUT
+#define BRIDGE_RTABLE_TIMEOUT (20 * 60) /* same as ARP */
+#endif
+
+/*
+ * Number of seconds between walks of the route list.
+ */
+#ifndef BRIDGE_RTABLE_PRUNE_PERIOD
+#define BRIDGE_RTABLE_PRUNE_PERIOD (5 * 60)
+#endif
+
+/*
+ * List of capabilities to possibly mask on the member interface.
+ */
+#define BRIDGE_IFCAPS_MASK (IFCAP_TOE|IFCAP_TSO|IFCAP_TXCSUM)
+
+/*
+ * List of capabilities to strip
+ */
+#define BRIDGE_IFCAPS_STRIP IFCAP_LRO
+
+/*
+ * Bridge interface list entry.
+ */
+struct bridge_iflist {
+ LIST_ENTRY(bridge_iflist) bif_next;
+ struct ifnet *bif_ifp; /* member if */
+ struct bstp_port bif_stp; /* STP state */
+ uint32_t bif_flags; /* member if flags */
+ int bif_savedcaps; /* saved capabilities */
+ uint32_t bif_addrmax; /* max # of addresses */
+ uint32_t bif_addrcnt; /* cur. # of addresses */
+ uint32_t bif_addrexceeded;/* # of address violations */
+};
+
+/*
+ * Bridge route node.
+ */
+struct bridge_rtnode {
+ LIST_ENTRY(bridge_rtnode) brt_hash; /* hash table linkage */
+ LIST_ENTRY(bridge_rtnode) brt_list; /* list linkage */
+ struct bridge_iflist *brt_dst; /* destination if */
+ unsigned long brt_expire; /* expiration time */
+ uint8_t brt_flags; /* address flags */
+ uint8_t brt_addr[ETHER_ADDR_LEN];
+ uint16_t brt_vlan; /* vlan id */
+};
+#define brt_ifp brt_dst->bif_ifp
+
+/*
+ * Software state for each bridge.
+ */
+struct bridge_softc {
+ struct ifnet *sc_ifp; /* make this an interface */
+ LIST_ENTRY(bridge_softc) sc_list;
+ struct mtx sc_mtx;
+ struct cv sc_cv;
+ uint32_t sc_brtmax; /* max # of addresses */
+ uint32_t sc_brtcnt; /* cur. # of addresses */
+ uint32_t sc_brttimeout; /* rt timeout in seconds */
+ struct callout sc_brcallout; /* bridge callout */
+ uint32_t sc_iflist_ref; /* refcount for sc_iflist */
+ uint32_t sc_iflist_xcnt; /* refcount for sc_iflist */
+ LIST_HEAD(, bridge_iflist) sc_iflist; /* member interface list */
+ LIST_HEAD(, bridge_rtnode) *sc_rthash; /* our forwarding table */
+ LIST_HEAD(, bridge_rtnode) sc_rtlist; /* list version of above */
+ uint32_t sc_rthash_key; /* key for hash */
+ LIST_HEAD(, bridge_iflist) sc_spanlist; /* span ports list */
+ struct bstp_state sc_stp; /* STP state */
+ uint32_t sc_brtexceeded; /* # of cache drops */
+ struct ifnet *sc_ifaddr; /* member mac copied from */
+ u_char sc_defaddr[6]; /* Default MAC address */
+};
+
+static struct mtx bridge_list_mtx;
+eventhandler_tag bridge_detach_cookie = NULL;
+
+int bridge_rtable_prune_period = BRIDGE_RTABLE_PRUNE_PERIOD;
+
+uma_zone_t bridge_rtnode_zone;
+
+static int bridge_clone_create(struct if_clone *, int, caddr_t);
+static void bridge_clone_destroy(struct ifnet *);
+
+static int bridge_ioctl(struct ifnet *, u_long, caddr_t);
+static void bridge_mutecaps(struct bridge_softc *);
+static void bridge_set_ifcap(struct bridge_softc *, struct bridge_iflist *,
+ int);
+static void bridge_ifdetach(void *arg __unused, struct ifnet *);
+static void bridge_init(void *);
+static void bridge_dummynet(struct mbuf *, struct ifnet *);
+static void bridge_stop(struct ifnet *, int);
+static void bridge_start(struct ifnet *);
+static struct mbuf *bridge_input(struct ifnet *, struct mbuf *);
+static int bridge_output(struct ifnet *, struct mbuf *, struct sockaddr *,
+ struct rtentry *);
+static void bridge_enqueue(struct bridge_softc *, struct ifnet *,
+ struct mbuf *);
+static void bridge_rtdelete(struct bridge_softc *, struct ifnet *ifp, int);
+
+static void bridge_forward(struct bridge_softc *, struct bridge_iflist *,
+ struct mbuf *m);
+
+static void bridge_timer(void *);
+
+static void bridge_broadcast(struct bridge_softc *, struct ifnet *,
+ struct mbuf *, int);
+static void bridge_span(struct bridge_softc *, struct mbuf *);
+
+static int bridge_rtupdate(struct bridge_softc *, const uint8_t *,
+ uint16_t, struct bridge_iflist *, int, uint8_t);
+static struct ifnet *bridge_rtlookup(struct bridge_softc *, const uint8_t *,
+ uint16_t);
+static void bridge_rttrim(struct bridge_softc *);
+static void bridge_rtage(struct bridge_softc *);
+static void bridge_rtflush(struct bridge_softc *, int);
+static int bridge_rtdaddr(struct bridge_softc *, const uint8_t *,
+ uint16_t);
+
+static int bridge_rtable_init(struct bridge_softc *);
+static void bridge_rtable_fini(struct bridge_softc *);
+
+static int bridge_rtnode_addr_cmp(const uint8_t *, const uint8_t *);
+static struct bridge_rtnode *bridge_rtnode_lookup(struct bridge_softc *,
+ const uint8_t *, uint16_t);
+static int bridge_rtnode_insert(struct bridge_softc *,
+ struct bridge_rtnode *);
+static void bridge_rtnode_destroy(struct bridge_softc *,
+ struct bridge_rtnode *);
+static void bridge_rtable_expire(struct ifnet *, int);
+static void bridge_state_change(struct ifnet *, int);
+
+static struct bridge_iflist *bridge_lookup_member(struct bridge_softc *,
+ const char *name);
+static struct bridge_iflist *bridge_lookup_member_if(struct bridge_softc *,
+ struct ifnet *ifp);
+static void bridge_delete_member(struct bridge_softc *,
+ struct bridge_iflist *, int);
+static void bridge_delete_span(struct bridge_softc *,
+ struct bridge_iflist *);
+
+static int bridge_ioctl_add(struct bridge_softc *, void *);
+static int bridge_ioctl_del(struct bridge_softc *, void *);
+static int bridge_ioctl_gifflags(struct bridge_softc *, void *);
+static int bridge_ioctl_sifflags(struct bridge_softc *, void *);
+static int bridge_ioctl_scache(struct bridge_softc *, void *);
+static int bridge_ioctl_gcache(struct bridge_softc *, void *);
+static int bridge_ioctl_gifs(struct bridge_softc *, void *);
+static int bridge_ioctl_rts(struct bridge_softc *, void *);
+static int bridge_ioctl_saddr(struct bridge_softc *, void *);
+static int bridge_ioctl_sto(struct bridge_softc *, void *);
+static int bridge_ioctl_gto(struct bridge_softc *, void *);
+static int bridge_ioctl_daddr(struct bridge_softc *, void *);
+static int bridge_ioctl_flush(struct bridge_softc *, void *);
+static int bridge_ioctl_gpri(struct bridge_softc *, void *);
+static int bridge_ioctl_spri(struct bridge_softc *, void *);
+static int bridge_ioctl_ght(struct bridge_softc *, void *);
+static int bridge_ioctl_sht(struct bridge_softc *, void *);
+static int bridge_ioctl_gfd(struct bridge_softc *, void *);
+static int bridge_ioctl_sfd(struct bridge_softc *, void *);
+static int bridge_ioctl_gma(struct bridge_softc *, void *);
+static int bridge_ioctl_sma(struct bridge_softc *, void *);
+static int bridge_ioctl_sifprio(struct bridge_softc *, void *);
+static int bridge_ioctl_sifcost(struct bridge_softc *, void *);
+static int bridge_ioctl_sifmaxaddr(struct bridge_softc *, void *);
+static int bridge_ioctl_addspan(struct bridge_softc *, void *);
+static int bridge_ioctl_delspan(struct bridge_softc *, void *);
+static int bridge_ioctl_gbparam(struct bridge_softc *, void *);
+static int bridge_ioctl_grte(struct bridge_softc *, void *);
+static int bridge_ioctl_gifsstp(struct bridge_softc *, void *);
+static int bridge_ioctl_sproto(struct bridge_softc *, void *);
+static int bridge_ioctl_stxhc(struct bridge_softc *, void *);
+static int bridge_pfil(struct mbuf **, struct ifnet *, struct ifnet *,
+ int);
+static int bridge_ip_checkbasic(struct mbuf **mp);
+#ifdef INET6
+static int bridge_ip6_checkbasic(struct mbuf **mp);
+#endif /* INET6 */
+static int bridge_fragment(struct ifnet *, struct mbuf *,
+ struct ether_header *, int, struct llc *);
+
+/* The default bridge vlan is 1 (IEEE 802.1Q-2003 Table 9-2) */
+#define VLANTAGOF(_m) \
+ (_m->m_flags & M_VLANTAG) ? EVL_VLANOFTAG(_m->m_pkthdr.ether_vtag) : 1
+
+static struct bstp_cb_ops bridge_ops = {
+ .bcb_state = bridge_state_change,
+ .bcb_rtage = bridge_rtable_expire
+};
+
+SYSCTL_DECL(_net_link);
+SYSCTL_NODE(_net_link, IFT_BRIDGE, bridge, CTLFLAG_RW, 0, "Bridge");
+
+static int pfil_onlyip = 1; /* only pass IP[46] packets when pfil is enabled */
+static int pfil_bridge = 1; /* run pfil hooks on the bridge interface */
+static int pfil_member = 1; /* run pfil hooks on the member interface */
+static int pfil_ipfw = 0; /* layer2 filter with ipfw */
+static int pfil_ipfw_arp = 0; /* layer2 filter with ipfw */
+static int pfil_local_phys = 0; /* run pfil hooks on the physical interface for
+ locally destined packets */
+static int log_stp = 0; /* log STP state changes */
+static int bridge_inherit_mac = 0; /* share MAC with first bridge member */
+SYSCTL_INT(_net_link_bridge, OID_AUTO, pfil_onlyip, CTLFLAG_RW,
+ &pfil_onlyip, 0, "Only pass IP packets when pfil is enabled");
+SYSCTL_INT(_net_link_bridge, OID_AUTO, ipfw_arp, CTLFLAG_RW,
+ &pfil_ipfw_arp, 0, "Filter ARP packets through IPFW layer2");
+SYSCTL_INT(_net_link_bridge, OID_AUTO, pfil_bridge, CTLFLAG_RW,
+ &pfil_bridge, 0, "Packet filter on the bridge interface");
+SYSCTL_INT(_net_link_bridge, OID_AUTO, pfil_member, CTLFLAG_RW,
+ &pfil_member, 0, "Packet filter on the member interface");
+SYSCTL_INT(_net_link_bridge, OID_AUTO, pfil_local_phys, CTLFLAG_RW,
+ &pfil_local_phys, 0,
+ "Packet filter on the physical interface for locally destined packets");
+SYSCTL_INT(_net_link_bridge, OID_AUTO, log_stp, CTLFLAG_RW,
+ &log_stp, 0, "Log STP state changes");
+SYSCTL_INT(_net_link_bridge, OID_AUTO, inherit_mac, CTLFLAG_RW,
+ &bridge_inherit_mac, 0,
+ "Inherit MAC address from the first bridge member");
+
+struct bridge_control {
+ int (*bc_func)(struct bridge_softc *, void *);
+ int bc_argsize;
+ int bc_flags;
+};
+
+#define BC_F_COPYIN 0x01 /* copy arguments in */
+#define BC_F_COPYOUT 0x02 /* copy arguments out */
+#define BC_F_SUSER 0x04 /* do super-user check */
+
+const struct bridge_control bridge_control_table[] = {
+ { bridge_ioctl_add, sizeof(struct ifbreq),
+ BC_F_COPYIN|BC_F_SUSER },
+ { bridge_ioctl_del, sizeof(struct ifbreq),
+ BC_F_COPYIN|BC_F_SUSER },
+
+ { bridge_ioctl_gifflags, sizeof(struct ifbreq),
+ BC_F_COPYIN|BC_F_COPYOUT },
+ { bridge_ioctl_sifflags, sizeof(struct ifbreq),
+ BC_F_COPYIN|BC_F_SUSER },
+
+ { bridge_ioctl_scache, sizeof(struct ifbrparam),
+ BC_F_COPYIN|BC_F_SUSER },
+ { bridge_ioctl_gcache, sizeof(struct ifbrparam),
+ BC_F_COPYOUT },
+
+ { bridge_ioctl_gifs, sizeof(struct ifbifconf),
+ BC_F_COPYIN|BC_F_COPYOUT },
+ { bridge_ioctl_rts, sizeof(struct ifbaconf),
+ BC_F_COPYIN|BC_F_COPYOUT },
+
+ { bridge_ioctl_saddr, sizeof(struct ifbareq),
+ BC_F_COPYIN|BC_F_SUSER },
+
+ { bridge_ioctl_sto, sizeof(struct ifbrparam),
+ BC_F_COPYIN|BC_F_SUSER },
+ { bridge_ioctl_gto, sizeof(struct ifbrparam),
+ BC_F_COPYOUT },
+
+ { bridge_ioctl_daddr, sizeof(struct ifbareq),
+ BC_F_COPYIN|BC_F_SUSER },
+
+ { bridge_ioctl_flush, sizeof(struct ifbreq),
+ BC_F_COPYIN|BC_F_SUSER },
+
+ { bridge_ioctl_gpri, sizeof(struct ifbrparam),
+ BC_F_COPYOUT },
+ { bridge_ioctl_spri, sizeof(struct ifbrparam),
+ BC_F_COPYIN|BC_F_SUSER },
+
+ { bridge_ioctl_ght, sizeof(struct ifbrparam),
+ BC_F_COPYOUT },
+ { bridge_ioctl_sht, sizeof(struct ifbrparam),
+ BC_F_COPYIN|BC_F_SUSER },
+
+ { bridge_ioctl_gfd, sizeof(struct ifbrparam),
+ BC_F_COPYOUT },
+ { bridge_ioctl_sfd, sizeof(struct ifbrparam),
+ BC_F_COPYIN|BC_F_SUSER },
+
+ { bridge_ioctl_gma, sizeof(struct ifbrparam),
+ BC_F_COPYOUT },
+ { bridge_ioctl_sma, sizeof(struct ifbrparam),
+ BC_F_COPYIN|BC_F_SUSER },
+
+ { bridge_ioctl_sifprio, sizeof(struct ifbreq),
+ BC_F_COPYIN|BC_F_SUSER },
+
+ { bridge_ioctl_sifcost, sizeof(struct ifbreq),
+ BC_F_COPYIN|BC_F_SUSER },
+
+ { bridge_ioctl_addspan, sizeof(struct ifbreq),
+ BC_F_COPYIN|BC_F_SUSER },
+ { bridge_ioctl_delspan, sizeof(struct ifbreq),
+ BC_F_COPYIN|BC_F_SUSER },
+
+ { bridge_ioctl_gbparam, sizeof(struct ifbropreq),
+ BC_F_COPYOUT },
+
+ { bridge_ioctl_grte, sizeof(struct ifbrparam),
+ BC_F_COPYOUT },
+
+ { bridge_ioctl_gifsstp, sizeof(struct ifbpstpconf),
+ BC_F_COPYIN|BC_F_COPYOUT },
+
+ { bridge_ioctl_sproto, sizeof(struct ifbrparam),
+ BC_F_COPYIN|BC_F_SUSER },
+
+ { bridge_ioctl_stxhc, sizeof(struct ifbrparam),
+ BC_F_COPYIN|BC_F_SUSER },
+
+ { bridge_ioctl_sifmaxaddr, sizeof(struct ifbreq),
+ BC_F_COPYIN|BC_F_SUSER },
+
+};
+const int bridge_control_table_size =
+ sizeof(bridge_control_table) / sizeof(bridge_control_table[0]);
+
+LIST_HEAD(, bridge_softc) bridge_list;
+
+IFC_SIMPLE_DECLARE(bridge, 0);
+
+static int
+bridge_modevent(module_t mod, int type, void *data)
+{
+
+ switch (type) {
+ case MOD_LOAD:
+ mtx_init(&bridge_list_mtx, "if_bridge list", NULL, MTX_DEF);
+ if_clone_attach(&bridge_cloner);
+ bridge_rtnode_zone = uma_zcreate("bridge_rtnode",
+ sizeof(struct bridge_rtnode), NULL, NULL, NULL, NULL,
+ UMA_ALIGN_PTR, 0);
+ LIST_INIT(&bridge_list);
+ bridge_input_p = bridge_input;
+ bridge_output_p = bridge_output;
+ bridge_dn_p = bridge_dummynet;
+ bridge_detach_cookie = EVENTHANDLER_REGISTER(
+ ifnet_departure_event, bridge_ifdetach, NULL,
+ EVENTHANDLER_PRI_ANY);
+ break;
+ case MOD_UNLOAD:
+ EVENTHANDLER_DEREGISTER(ifnet_departure_event,
+ bridge_detach_cookie);
+ if_clone_detach(&bridge_cloner);
+ uma_zdestroy(bridge_rtnode_zone);
+ bridge_input_p = NULL;
+ bridge_output_p = NULL;
+ bridge_dn_p = NULL;
+ mtx_destroy(&bridge_list_mtx);
+ break;
+ default:
+ return (EOPNOTSUPP);
+ }
+ return (0);
+}
+
+static moduledata_t bridge_mod = {
+ "if_bridge",
+ bridge_modevent,
+ 0
+};
+
+DECLARE_MODULE(if_bridge, bridge_mod, SI_SUB_PSEUDO, SI_ORDER_ANY);
+MODULE_DEPEND(if_bridge, bridgestp, 1, 1, 1);
+
+/*
+ * handler for net.link.bridge.pfil_ipfw
+ */
+static int
+sysctl_pfil_ipfw(SYSCTL_HANDLER_ARGS)
+{
+ int enable = pfil_ipfw;
+ int error;
+
+ error = sysctl_handle_int(oidp, &enable, 0, req);
+ enable = (enable) ? 1 : 0;
+
+ if (enable != pfil_ipfw) {
+ pfil_ipfw = enable;
+
+ /*
+ * Disable pfil so that ipfw doesnt run twice, if the user
+ * really wants both then they can re-enable pfil_bridge and/or
+ * pfil_member. Also allow non-ip packets as ipfw can filter by
+ * layer2 type.
+ */
+ if (pfil_ipfw) {
+ pfil_onlyip = 0;
+ pfil_bridge = 0;
+ pfil_member = 0;
+ }
+ }
+
+ return (error);
+}
+SYSCTL_PROC(_net_link_bridge, OID_AUTO, ipfw, CTLTYPE_INT|CTLFLAG_RW,
+ &pfil_ipfw, 0, &sysctl_pfil_ipfw, "I", "Layer2 filter with IPFW");
+
+/*
+ * bridge_clone_create:
+ *
+ * Create a new bridge instance.
+ */
+static int
+bridge_clone_create(struct if_clone *ifc, int unit, caddr_t params)
+{
+ struct bridge_softc *sc, *sc2;
+ struct ifnet *bifp, *ifp;
+ int retry;
+
+ sc = malloc(sizeof(*sc), M_DEVBUF, M_WAITOK|M_ZERO);
+ ifp = sc->sc_ifp = if_alloc(IFT_ETHER);
+ if (ifp == NULL) {
+ free(sc, M_DEVBUF);
+ return (ENOSPC);
+ }
+
+ BRIDGE_LOCK_INIT(sc);
+ sc->sc_brtmax = BRIDGE_RTABLE_MAX;
+ sc->sc_brttimeout = BRIDGE_RTABLE_TIMEOUT;
+
+ /* Initialize our routing table. */
+ bridge_rtable_init(sc);
+
+ callout_init_mtx(&sc->sc_brcallout, &sc->sc_mtx, 0);
+
+ LIST_INIT(&sc->sc_iflist);
+ LIST_INIT(&sc->sc_spanlist);
+
+ ifp->if_softc = sc;
+ if_initname(ifp, ifc->ifc_name, unit);
+ ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
+ ifp->if_ioctl = bridge_ioctl;
+ ifp->if_start = bridge_start;
+ ifp->if_init = bridge_init;
+ ifp->if_type = IFT_BRIDGE;
+ IFQ_SET_MAXLEN(&ifp->if_snd, ifqmaxlen);
+ ifp->if_snd.ifq_drv_maxlen = ifqmaxlen;
+ IFQ_SET_READY(&ifp->if_snd);
+
+ /*
+ * Generate a random ethernet address with a locally administered
+ * address.
+ *
+ * Since we are using random ethernet addresses for the bridge, it is
+ * possible that we might have address collisions, so make sure that
+ * this hardware address isn't already in use on another bridge.
+ */
+ for (retry = 1; retry != 0;) {
+ arc4rand(sc->sc_defaddr, ETHER_ADDR_LEN, 1);
+ sc->sc_defaddr[0] &= ~1; /* clear multicast bit */
+ sc->sc_defaddr[0] |= 2; /* set the LAA bit */
+ retry = 0;
+ mtx_lock(&bridge_list_mtx);
+ LIST_FOREACH(sc2, &bridge_list, sc_list) {
+ bifp = sc2->sc_ifp;
+ if (memcmp(sc->sc_defaddr,
+ IF_LLADDR(bifp), ETHER_ADDR_LEN) == 0)
+ retry = 1;
+ }
+ mtx_unlock(&bridge_list_mtx);
+ }
+
+ bstp_attach(&sc->sc_stp, &bridge_ops);
+ ether_ifattach(ifp, sc->sc_defaddr);
+ /* Now undo some of the damage... */
+ ifp->if_baudrate = 0;
+ ifp->if_type = IFT_BRIDGE;
+
+ mtx_lock(&bridge_list_mtx);
+ LIST_INSERT_HEAD(&bridge_list, sc, sc_list);
+ mtx_unlock(&bridge_list_mtx);
+
+ return (0);
+}
+
+/*
+ * bridge_clone_destroy:
+ *
+ * Destroy a bridge instance.
+ */
+static void
+bridge_clone_destroy(struct ifnet *ifp)
+{
+ struct bridge_softc *sc = ifp->if_softc;
+ struct bridge_iflist *bif;
+
+ BRIDGE_LOCK(sc);
+
+ bridge_stop(ifp, 1);
+ ifp->if_flags &= ~IFF_UP;
+
+ while ((bif = LIST_FIRST(&sc->sc_iflist)) != NULL)
+ bridge_delete_member(sc, bif, 0);
+
+ while ((bif = LIST_FIRST(&sc->sc_spanlist)) != NULL) {
+ bridge_delete_span(sc, bif);
+ }
+
+ BRIDGE_UNLOCK(sc);
+
+ callout_drain(&sc->sc_brcallout);
+
+ mtx_lock(&bridge_list_mtx);
+ LIST_REMOVE(sc, sc_list);
+ mtx_unlock(&bridge_list_mtx);
+
+ bstp_detach(&sc->sc_stp);
+ ether_ifdetach(ifp);
+ if_free_type(ifp, IFT_ETHER);
+
+ /* Tear down the routing table. */
+ bridge_rtable_fini(sc);
+
+ BRIDGE_LOCK_DESTROY(sc);
+ free(sc, M_DEVBUF);
+}
+
+/*
+ * bridge_ioctl:
+ *
+ * Handle a control request from the operator.
+ */
+static int
+bridge_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
+{
+ struct bridge_softc *sc = ifp->if_softc;
+ struct ifreq *ifr = (struct ifreq *)data;
+ struct bridge_iflist *bif;
+ struct thread *td = curthread;
+ union {
+ struct ifbreq ifbreq;
+ struct ifbifconf ifbifconf;
+ struct ifbareq ifbareq;
+ struct ifbaconf ifbaconf;
+ struct ifbrparam ifbrparam;
+ struct ifbropreq ifbropreq;
+ } args;
+ struct ifdrv *ifd = (struct ifdrv *) data;
+ const struct bridge_control *bc;
+ int error = 0;
+
+ switch (cmd) {
+
+ case SIOCADDMULTI:
+ case SIOCDELMULTI:
+ break;
+
+ case SIOCGDRVSPEC:
+ case SIOCSDRVSPEC:
+ if (ifd->ifd_cmd >= bridge_control_table_size) {
+ error = EINVAL;
+ break;
+ }
+ bc = &bridge_control_table[ifd->ifd_cmd];
+
+ if (cmd == SIOCGDRVSPEC &&
+ (bc->bc_flags & BC_F_COPYOUT) == 0) {
+ error = EINVAL;
+ break;
+ }
+ else if (cmd == SIOCSDRVSPEC &&
+ (bc->bc_flags & BC_F_COPYOUT) != 0) {
+ error = EINVAL;
+ break;
+ }
+
+ if (bc->bc_flags & BC_F_SUSER) {
+ error = priv_check(td, PRIV_NET_BRIDGE);
+ if (error)
+ break;
+ }
+
+ if (ifd->ifd_len != bc->bc_argsize ||
+ ifd->ifd_len > sizeof(args)) {
+ error = EINVAL;
+ break;
+ }
+
+ bzero(&args, sizeof(args));
+ if (bc->bc_flags & BC_F_COPYIN) {
+ error = copyin(ifd->ifd_data, &args, ifd->ifd_len);
+ if (error)
+ break;
+ }
+
+ BRIDGE_LOCK(sc);
+ error = (*bc->bc_func)(sc, &args);
+ BRIDGE_UNLOCK(sc);
+ if (error)
+ break;
+
+ if (bc->bc_flags & BC_F_COPYOUT)
+ error = copyout(&args, ifd->ifd_data, ifd->ifd_len);
+
+ break;
+
+ case SIOCSIFFLAGS:
+ if (!(ifp->if_flags & IFF_UP) &&
+ (ifp->if_drv_flags & IFF_DRV_RUNNING)) {
+ /*
+ * If interface is marked down and it is running,
+ * then stop and disable it.
+ */
+ BRIDGE_LOCK(sc);
+ bridge_stop(ifp, 1);
+ BRIDGE_UNLOCK(sc);
+ } else if ((ifp->if_flags & IFF_UP) &&
+ !(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
+ /*
+ * If interface is marked up and it is stopped, then
+ * start it.
+ */
+ (*ifp->if_init)(sc);
+ }
+ break;
+
+ case SIOCSIFMTU:
+ if (ifr->ifr_mtu < 576) {
+ error = EINVAL;
+ break;
+ }
+ if (LIST_EMPTY(&sc->sc_iflist)) {
+ sc->sc_ifp->if_mtu = ifr->ifr_mtu;
+ break;
+ }
+ BRIDGE_LOCK(sc);
+ LIST_FOREACH(bif, &sc->sc_iflist, bif_next) {
+ if (bif->bif_ifp->if_mtu != ifr->ifr_mtu) {
+ log(LOG_NOTICE, "%s: invalid MTU: %lu(%s)"
+ " != %d\n", sc->sc_ifp->if_xname,
+ bif->bif_ifp->if_mtu,
+ bif->bif_ifp->if_xname, ifr->ifr_mtu);
+ error = EINVAL;
+ break;
+ }
+ }
+ if (!error)
+ sc->sc_ifp->if_mtu = ifr->ifr_mtu;
+ BRIDGE_UNLOCK(sc);
+ break;
+ default:
+ /*
+ * drop the lock as ether_ioctl() will call bridge_start() and
+ * cause the lock to be recursed.
+ */
+ error = ether_ioctl(ifp, cmd, data);
+ break;
+ }
+
+ return (error);
+}
+
+/*
+ * bridge_mutecaps:
+ *
+ * Clear or restore unwanted capabilities on the member interface
+ */
+static void
+bridge_mutecaps(struct bridge_softc *sc)
+{
+ struct bridge_iflist *bif;
+ int enabled, mask;
+
+ /* Initial bitmask of capabilities to test */
+ mask = BRIDGE_IFCAPS_MASK;
+
+ LIST_FOREACH(bif, &sc->sc_iflist, bif_next) {
+ /* Every member must support it or its disabled */
+ mask &= bif->bif_savedcaps;
+ }
+
+ LIST_FOREACH(bif, &sc->sc_iflist, bif_next) {
+ enabled = bif->bif_ifp->if_capenable;
+ enabled &= ~BRIDGE_IFCAPS_STRIP;
+ /* strip off mask bits and enable them again if allowed */
+ enabled &= ~BRIDGE_IFCAPS_MASK;
+ enabled |= mask;
+ bridge_set_ifcap(sc, bif, enabled);
+ }
+
+}
+
+static void
+bridge_set_ifcap(struct bridge_softc *sc, struct bridge_iflist *bif, int set)
+{
+ struct ifnet *ifp = bif->bif_ifp;
+ struct ifreq ifr;
+ int error;
+
+ bzero(&ifr, sizeof(ifr));
+ ifr.ifr_reqcap = set;
+
+ if (ifp->if_capenable != set) {
+ error = (*ifp->if_ioctl)(ifp, SIOCSIFCAP, (caddr_t)&ifr);
+ if (error)
+ if_printf(sc->sc_ifp,
+ "error setting interface capabilities on %s\n",
+ ifp->if_xname);
+ }
+}
+
+/*
+ * bridge_lookup_member:
+ *
+ * Lookup a bridge member interface.
+ */
+static struct bridge_iflist *
+bridge_lookup_member(struct bridge_softc *sc, const char *name)
+{
+ struct bridge_iflist *bif;
+ struct ifnet *ifp;
+
+ BRIDGE_LOCK_ASSERT(sc);
+
+ LIST_FOREACH(bif, &sc->sc_iflist, bif_next) {
+ ifp = bif->bif_ifp;
+ if (strcmp(ifp->if_xname, name) == 0)
+ return (bif);
+ }
+
+ return (NULL);
+}
+
+/*
+ * bridge_lookup_member_if:
+ *
+ * Lookup a bridge member interface by ifnet*.
+ */
+static struct bridge_iflist *
+bridge_lookup_member_if(struct bridge_softc *sc, struct ifnet *member_ifp)
+{
+ struct bridge_iflist *bif;
+
+ BRIDGE_LOCK_ASSERT(sc);
+
+ LIST_FOREACH(bif, &sc->sc_iflist, bif_next) {
+ if (bif->bif_ifp == member_ifp)
+ return (bif);
+ }
+
+ return (NULL);
+}
+
+/*
+ * bridge_delete_member:
+ *
+ * Delete the specified member interface.
+ */
+static void
+bridge_delete_member(struct bridge_softc *sc, struct bridge_iflist *bif,
+ int gone)
+{
+ struct ifnet *ifs = bif->bif_ifp;
+ struct ifnet *fif = NULL;
+
+ BRIDGE_LOCK_ASSERT(sc);
+
+ if (bif->bif_flags & IFBIF_STP)
+ bstp_disable(&bif->bif_stp);
+
+ ifs->if_bridge = NULL;
+ BRIDGE_XLOCK(sc);
+ LIST_REMOVE(bif, bif_next);
+ BRIDGE_XDROP(sc);
+
+ /*
+ * If removing the interface that gave the bridge its mac address, set
+ * the mac address of the bridge to the address of the next member, or
+ * to its default address if no members are left.
+ */
+ if (bridge_inherit_mac && sc->sc_ifaddr == ifs) {
+ if (LIST_EMPTY(&sc->sc_iflist)) {
+ bcopy(sc->sc_defaddr,
+ IF_LLADDR(sc->sc_ifp), ETHER_ADDR_LEN);
+ sc->sc_ifaddr = NULL;
+ } else {
+ fif = LIST_FIRST(&sc->sc_iflist)->bif_ifp;
+ bcopy(IF_LLADDR(fif),
+ IF_LLADDR(sc->sc_ifp), ETHER_ADDR_LEN);
+ sc->sc_ifaddr = fif;
+ }
+ EVENTHANDLER_INVOKE(iflladdr_event, sc->sc_ifp);
+ }
+
+ bridge_mutecaps(sc); /* recalcuate now this interface is removed */
+ bridge_rtdelete(sc, ifs, IFBF_FLUSHALL);
+ KASSERT(bif->bif_addrcnt == 0,
+ ("%s: %d bridge routes referenced", __func__, bif->bif_addrcnt));
+
+ BRIDGE_UNLOCK(sc);
+ if (!gone) {
+ switch (ifs->if_type) {
+ case IFT_ETHER:
+ case IFT_L2VLAN:
+ /*
+ * Take the interface out of promiscuous mode.
+ */
+ (void) ifpromisc(ifs, 0);
+ break;
+
+ case IFT_GIF:
+ break;
+
+ default:
+#ifdef DIAGNOSTIC
+ panic("bridge_delete_member: impossible");
+#endif
+ break;
+ }
+ /* reneable any interface capabilities */
+ bridge_set_ifcap(sc, bif, bif->bif_savedcaps);
+ }
+ bstp_destroy(&bif->bif_stp); /* prepare to free */
+ BRIDGE_LOCK(sc);
+ free(bif, M_DEVBUF);
+}
+
+/*
+ * bridge_delete_span:
+ *
+ * Delete the specified span interface.
+ */
+static void
+bridge_delete_span(struct bridge_softc *sc, struct bridge_iflist *bif)
+{
+ BRIDGE_LOCK_ASSERT(sc);
+
+ KASSERT(bif->bif_ifp->if_bridge == NULL,
+ ("%s: not a span interface", __func__));
+
+ LIST_REMOVE(bif, bif_next);
+ free(bif, M_DEVBUF);
+}
+
+static int
+bridge_ioctl_add(struct bridge_softc *sc, void *arg)
+{
+ struct ifbreq *req = arg;
+ struct bridge_iflist *bif = NULL;
+ struct ifnet *ifs;
+ int error = 0;
+
+ ifs = ifunit(req->ifbr_ifsname);
+ if (ifs == NULL)
+ return (ENOENT);
+ if (ifs->if_ioctl == NULL) /* must be supported */
+ return (EINVAL);
+
+ /* If it's in the span list, it can't be a member. */
+ LIST_FOREACH(bif, &sc->sc_spanlist, bif_next)
+ if (ifs == bif->bif_ifp)
+ return (EBUSY);
+
+ if (ifs->if_bridge == sc)
+ return (EEXIST);
+
+ if (ifs->if_bridge != NULL)
+ return (EBUSY);
+
+ bif = malloc(sizeof(*bif), M_DEVBUF, M_NOWAIT|M_ZERO);
+ if (bif == NULL)
+ return (ENOMEM);
+
+ bif->bif_ifp = ifs;
+ bif->bif_flags = IFBIF_LEARNING | IFBIF_DISCOVER;
+ bif->bif_savedcaps = ifs->if_capenable;
+
+ switch (ifs->if_type) {
+ case IFT_ETHER:
+ case IFT_L2VLAN:
+ case IFT_GIF:
+ /* permitted interface types */
+ break;
+ default:
+ error = EINVAL;
+ goto out;
+ }
+
+ /* Allow the first Ethernet member to define the MTU */
+ if (LIST_EMPTY(&sc->sc_iflist))
+ sc->sc_ifp->if_mtu = ifs->if_mtu;
+ else if (sc->sc_ifp->if_mtu != ifs->if_mtu) {
+ if_printf(sc->sc_ifp, "invalid MTU: %lu(%s) != %lu\n",
+ ifs->if_mtu, ifs->if_xname, sc->sc_ifp->if_mtu);
+ error = EINVAL;
+ goto out;
+ }
+
+ /*
+ * Assign the interface's MAC address to the bridge if it's the first
+ * member and the MAC address of the bridge has not been changed from
+ * the default randomly generated one.
+ */
+ if (bridge_inherit_mac && LIST_EMPTY(&sc->sc_iflist) &&
+ !memcmp(IF_LLADDR(sc->sc_ifp), sc->sc_defaddr, ETHER_ADDR_LEN)) {
+ bcopy(IF_LLADDR(ifs), IF_LLADDR(sc->sc_ifp), ETHER_ADDR_LEN);
+ sc->sc_ifaddr = ifs;
+ EVENTHANDLER_INVOKE(iflladdr_event, sc->sc_ifp);
+ }
+
+ ifs->if_bridge = sc;
+ bstp_create(&sc->sc_stp, &bif->bif_stp, bif->bif_ifp);
+ /*
+ * XXX: XLOCK HERE!?!
+ *
+ * NOTE: insert_***HEAD*** should be safe for the traversals.
+ */
+ LIST_INSERT_HEAD(&sc->sc_iflist, bif, bif_next);
+
+ /* Set interface capabilities to the intersection set of all members */
+ bridge_mutecaps(sc);
+
+ switch (ifs->if_type) {
+ case IFT_ETHER:
+ case IFT_L2VLAN:
+ /*
+ * Place the interface into promiscuous mode.
+ */
+ BRIDGE_UNLOCK(sc);
+ error = ifpromisc(ifs, 1);
+ BRIDGE_LOCK(sc);
+ break;
+ }
+ if (error)
+ bridge_delete_member(sc, bif, 0);
+out:
+ if (error) {
+ if (bif != NULL)
+ free(bif, M_DEVBUF);
+ }
+ return (error);
+}
+
+static int
+bridge_ioctl_del(struct bridge_softc *sc, void *arg)
+{
+ struct ifbreq *req = arg;
+ struct bridge_iflist *bif;
+
+ bif = bridge_lookup_member(sc, req->ifbr_ifsname);
+ if (bif == NULL)
+ return (ENOENT);
+
+ bridge_delete_member(sc, bif, 0);
+
+ return (0);
+}
+
+static int
+bridge_ioctl_gifflags(struct bridge_softc *sc, void *arg)
+{
+ struct ifbreq *req = arg;
+ struct bridge_iflist *bif;
+ struct bstp_port *bp;
+
+ bif = bridge_lookup_member(sc, req->ifbr_ifsname);
+ if (bif == NULL)
+ return (ENOENT);
+
+ bp = &bif->bif_stp;
+ req->ifbr_ifsflags = bif->bif_flags;
+ req->ifbr_state = bp->bp_state;
+ req->ifbr_priority = bp->bp_priority;
+ req->ifbr_path_cost = bp->bp_path_cost;
+ req->ifbr_portno = bif->bif_ifp->if_index & 0xfff;
+ req->ifbr_proto = bp->bp_protover;
+ req->ifbr_role = bp->bp_role;
+ req->ifbr_stpflags = bp->bp_flags;
+ req->ifbr_addrcnt = bif->bif_addrcnt;
+ req->ifbr_addrmax = bif->bif_addrmax;
+ req->ifbr_addrexceeded = bif->bif_addrexceeded;
+
+ /* Copy STP state options as flags */
+ if (bp->bp_operedge)
+ req->ifbr_ifsflags |= IFBIF_BSTP_EDGE;
+ if (bp->bp_flags & BSTP_PORT_AUTOEDGE)
+ req->ifbr_ifsflags |= IFBIF_BSTP_AUTOEDGE;
+ if (bp->bp_ptp_link)
+ req->ifbr_ifsflags |= IFBIF_BSTP_PTP;
+ if (bp->bp_flags & BSTP_PORT_AUTOPTP)
+ req->ifbr_ifsflags |= IFBIF_BSTP_AUTOPTP;
+ if (bp->bp_flags & BSTP_PORT_ADMEDGE)
+ req->ifbr_ifsflags |= IFBIF_BSTP_ADMEDGE;
+ if (bp->bp_flags & BSTP_PORT_ADMCOST)
+ req->ifbr_ifsflags |= IFBIF_BSTP_ADMCOST;
+ return (0);
+}
+
+static int
+bridge_ioctl_sifflags(struct bridge_softc *sc, void *arg)
+{
+ struct ifbreq *req = arg;
+ struct bridge_iflist *bif;
+ struct bstp_port *bp;
+ int error;
+
+ bif = bridge_lookup_member(sc, req->ifbr_ifsname);
+ if (bif == NULL)
+ return (ENOENT);
+ bp = &bif->bif_stp;
+
+ if (req->ifbr_ifsflags & IFBIF_SPAN)
+ /* SPAN is readonly */
+ return (EINVAL);
+
+ if (req->ifbr_ifsflags & IFBIF_STP) {
+ if ((bif->bif_flags & IFBIF_STP) == 0) {
+ error = bstp_enable(&bif->bif_stp);
+ if (error)
+ return (error);
+ }
+ } else {
+ if ((bif->bif_flags & IFBIF_STP) != 0)
+ bstp_disable(&bif->bif_stp);
+ }
+
+ /* Pass on STP flags */
+ bstp_set_edge(bp, req->ifbr_ifsflags & IFBIF_BSTP_EDGE ? 1 : 0);
+ bstp_set_autoedge(bp, req->ifbr_ifsflags & IFBIF_BSTP_AUTOEDGE ? 1 : 0);
+ bstp_set_ptp(bp, req->ifbr_ifsflags & IFBIF_BSTP_PTP ? 1 : 0);
+ bstp_set_autoptp(bp, req->ifbr_ifsflags & IFBIF_BSTP_AUTOPTP ? 1 : 0);
+
+ /* Save the bits relating to the bridge */
+ bif->bif_flags = req->ifbr_ifsflags & IFBIFMASK;
+
+ return (0);
+}
+
+static int
+bridge_ioctl_scache(struct bridge_softc *sc, void *arg)
+{
+ struct ifbrparam *param = arg;
+
+ sc->sc_brtmax = param->ifbrp_csize;
+ bridge_rttrim(sc);
+
+ return (0);
+}
+
+static int
+bridge_ioctl_gcache(struct bridge_softc *sc, void *arg)
+{
+ struct ifbrparam *param = arg;
+
+ param->ifbrp_csize = sc->sc_brtmax;
+
+ return (0);
+}
+
+static int
+bridge_ioctl_gifs(struct bridge_softc *sc, void *arg)
+{
+ struct ifbifconf *bifc = arg;
+ struct bridge_iflist *bif;
+ struct ifbreq breq;
+ char *buf, *outbuf;
+ int count, buflen, len, error = 0;
+
+ count = 0;
+ LIST_FOREACH(bif, &sc->sc_iflist, bif_next)
+ count++;
+ LIST_FOREACH(bif, &sc->sc_spanlist, bif_next)
+ count++;
+
+ buflen = sizeof(breq) * count;
+ if (bifc->ifbic_len == 0) {
+ bifc->ifbic_len = buflen;
+ return (0);
+ }
+ BRIDGE_UNLOCK(sc);
+ outbuf = malloc(buflen, M_TEMP, M_WAITOK | M_ZERO);
+ BRIDGE_LOCK(sc);
+
+ count = 0;
+ buf = outbuf;
+ len = min(bifc->ifbic_len, buflen);
+ bzero(&breq, sizeof(breq));
+ LIST_FOREACH(bif, &sc->sc_iflist, bif_next) {
+ if (len < sizeof(breq))
+ break;
+
+ strlcpy(breq.ifbr_ifsname, bif->bif_ifp->if_xname,
+ sizeof(breq.ifbr_ifsname));
+ /* Fill in the ifbreq structure */
+ error = bridge_ioctl_gifflags(sc, &breq);
+ if (error)
+ break;
+ memcpy(buf, &breq, sizeof(breq));
+ count++;
+ buf += sizeof(breq);
+ len -= sizeof(breq);
+ }
+ LIST_FOREACH(bif, &sc->sc_spanlist, bif_next) {
+ if (len < sizeof(breq))
+ break;
+
+ strlcpy(breq.ifbr_ifsname, bif->bif_ifp->if_xname,
+ sizeof(breq.ifbr_ifsname));
+ breq.ifbr_ifsflags = bif->bif_flags;
+ breq.ifbr_portno = bif->bif_ifp->if_index & 0xfff;
+ memcpy(buf, &breq, sizeof(breq));
+ count++;
+ buf += sizeof(breq);
+ len -= sizeof(breq);
+ }
+
+ BRIDGE_UNLOCK(sc);
+ bifc->ifbic_len = sizeof(breq) * count;
+ error = copyout(outbuf, bifc->ifbic_req, bifc->ifbic_len);
+ BRIDGE_LOCK(sc);
+ free(outbuf, M_TEMP);
+ return (error);
+}
+
+static int
+bridge_ioctl_rts(struct bridge_softc *sc, void *arg)
+{
+ struct ifbaconf *bac = arg;
+ struct bridge_rtnode *brt;
+ struct ifbareq bareq;
+ char *buf, *outbuf;
+ int count, buflen, len, error = 0;
+
+ if (bac->ifbac_len == 0)
+ return (0);
+
+ count = 0;
+ LIST_FOREACH(brt, &sc->sc_rtlist, brt_list)
+ count++;
+ buflen = sizeof(bareq) * count;
+
+ BRIDGE_UNLOCK(sc);
+ outbuf = malloc(buflen, M_TEMP, M_WAITOK | M_ZERO);
+ BRIDGE_LOCK(sc);
+
+ count = 0;
+ buf = outbuf;
+ len = min(bac->ifbac_len, buflen);
+ bzero(&bareq, sizeof(bareq));
+ LIST_FOREACH(brt, &sc->sc_rtlist, brt_list) {
+ if (len < sizeof(bareq))
+ goto out;
+ strlcpy(bareq.ifba_ifsname, brt->brt_ifp->if_xname,
+ sizeof(bareq.ifba_ifsname));
+ memcpy(bareq.ifba_dst, brt->brt_addr, sizeof(brt->brt_addr));
+ bareq.ifba_vlan = brt->brt_vlan;
+ if ((brt->brt_flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC &&
+ time_uptime < brt->brt_expire)
+ bareq.ifba_expire = brt->brt_expire - time_uptime;
+ else
+ bareq.ifba_expire = 0;
+ bareq.ifba_flags = brt->brt_flags;
+
+ memcpy(buf, &bareq, sizeof(bareq));
+ count++;
+ buf += sizeof(bareq);
+ len -= sizeof(bareq);
+ }
+out:
+ BRIDGE_UNLOCK(sc);
+ bac->ifbac_len = sizeof(bareq) * count;
+ error = copyout(outbuf, bac->ifbac_req, bac->ifbac_len);
+ BRIDGE_LOCK(sc);
+ free(outbuf, M_TEMP);
+ return (error);
+}
+
+static int
+bridge_ioctl_saddr(struct bridge_softc *sc, void *arg)
+{
+ struct ifbareq *req = arg;
+ struct bridge_iflist *bif;
+ int error;
+
+ bif = bridge_lookup_member(sc, req->ifba_ifsname);
+ if (bif == NULL)
+ return (ENOENT);
+
+ error = bridge_rtupdate(sc, req->ifba_dst, req->ifba_vlan, bif, 1,
+ req->ifba_flags);
+
+ return (error);
+}
+
+static int
+bridge_ioctl_sto(struct bridge_softc *sc, void *arg)
+{
+ struct ifbrparam *param = arg;
+
+ sc->sc_brttimeout = param->ifbrp_ctime;
+ return (0);
+}
+
+static int
+bridge_ioctl_gto(struct bridge_softc *sc, void *arg)
+{
+ struct ifbrparam *param = arg;
+
+ param->ifbrp_ctime = sc->sc_brttimeout;
+ return (0);
+}
+
+static int
+bridge_ioctl_daddr(struct bridge_softc *sc, void *arg)
+{
+ struct ifbareq *req = arg;
+
+ return (bridge_rtdaddr(sc, req->ifba_dst, req->ifba_vlan));
+}
+
+static int
+bridge_ioctl_flush(struct bridge_softc *sc, void *arg)
+{
+ struct ifbreq *req = arg;
+
+ bridge_rtflush(sc, req->ifbr_ifsflags);
+ return (0);
+}
+
+static int
+bridge_ioctl_gpri(struct bridge_softc *sc, void *arg)
+{
+ struct ifbrparam *param = arg;
+ struct bstp_state *bs = &sc->sc_stp;
+
+ param->ifbrp_prio = bs->bs_bridge_priority;
+ return (0);
+}
+
+static int
+bridge_ioctl_spri(struct bridge_softc *sc, void *arg)
+{
+ struct ifbrparam *param = arg;
+
+ return (bstp_set_priority(&sc->sc_stp, param->ifbrp_prio));
+}
+
+static int
+bridge_ioctl_ght(struct bridge_softc *sc, void *arg)
+{
+ struct ifbrparam *param = arg;
+ struct bstp_state *bs = &sc->sc_stp;
+
+ param->ifbrp_hellotime = bs->bs_bridge_htime >> 8;
+ return (0);
+}
+
+static int
+bridge_ioctl_sht(struct bridge_softc *sc, void *arg)
+{
+ struct ifbrparam *param = arg;
+
+ return (bstp_set_htime(&sc->sc_stp, param->ifbrp_hellotime));
+}
+
+static int
+bridge_ioctl_gfd(struct bridge_softc *sc, void *arg)
+{
+ struct ifbrparam *param = arg;
+ struct bstp_state *bs = &sc->sc_stp;
+
+ param->ifbrp_fwddelay = bs->bs_bridge_fdelay >> 8;
+ return (0);
+}
+
+static int
+bridge_ioctl_sfd(struct bridge_softc *sc, void *arg)
+{
+ struct ifbrparam *param = arg;
+
+ return (bstp_set_fdelay(&sc->sc_stp, param->ifbrp_fwddelay));
+}
+
+static int
+bridge_ioctl_gma(struct bridge_softc *sc, void *arg)
+{
+ struct ifbrparam *param = arg;
+ struct bstp_state *bs = &sc->sc_stp;
+
+ param->ifbrp_maxage = bs->bs_bridge_max_age >> 8;
+ return (0);
+}
+
+static int
+bridge_ioctl_sma(struct bridge_softc *sc, void *arg)
+{
+ struct ifbrparam *param = arg;
+
+ return (bstp_set_maxage(&sc->sc_stp, param->ifbrp_maxage));
+}
+
+static int
+bridge_ioctl_sifprio(struct bridge_softc *sc, void *arg)
+{
+ struct ifbreq *req = arg;
+ struct bridge_iflist *bif;
+
+ bif = bridge_lookup_member(sc, req->ifbr_ifsname);
+ if (bif == NULL)
+ return (ENOENT);
+
+ return (bstp_set_port_priority(&bif->bif_stp, req->ifbr_priority));
+}
+
+static int
+bridge_ioctl_sifcost(struct bridge_softc *sc, void *arg)
+{
+ struct ifbreq *req = arg;
+ struct bridge_iflist *bif;
+
+ bif = bridge_lookup_member(sc, req->ifbr_ifsname);
+ if (bif == NULL)
+ return (ENOENT);
+
+ return (bstp_set_path_cost(&bif->bif_stp, req->ifbr_path_cost));
+}
+
+static int
+bridge_ioctl_sifmaxaddr(struct bridge_softc *sc, void *arg)
+{
+ struct ifbreq *req = arg;
+ struct bridge_iflist *bif;
+
+ bif = bridge_lookup_member(sc, req->ifbr_ifsname);
+ if (bif == NULL)
+ return (ENOENT);
+
+ bif->bif_addrmax = req->ifbr_addrmax;
+ return (0);
+}
+
+static int
+bridge_ioctl_addspan(struct bridge_softc *sc, void *arg)
+{
+ struct ifbreq *req = arg;
+ struct bridge_iflist *bif = NULL;
+ struct ifnet *ifs;
+
+ ifs = ifunit(req->ifbr_ifsname);
+ if (ifs == NULL)
+ return (ENOENT);
+
+ LIST_FOREACH(bif, &sc->sc_spanlist, bif_next)
+ if (ifs == bif->bif_ifp)
+ return (EBUSY);
+
+ if (ifs->if_bridge != NULL)
+ return (EBUSY);
+
+ switch (ifs->if_type) {
+ case IFT_ETHER:
+ case IFT_GIF:
+ case IFT_L2VLAN:
+ break;
+ default:
+ return (EINVAL);
+ }
+
+ bif = malloc(sizeof(*bif), M_DEVBUF, M_NOWAIT|M_ZERO);
+ if (bif == NULL)
+ return (ENOMEM);
+
+ bif->bif_ifp = ifs;
+ bif->bif_flags = IFBIF_SPAN;
+
+ LIST_INSERT_HEAD(&sc->sc_spanlist, bif, bif_next);
+
+ return (0);
+}
+
+static int
+bridge_ioctl_delspan(struct bridge_softc *sc, void *arg)
+{
+ struct ifbreq *req = arg;
+ struct bridge_iflist *bif;
+ struct ifnet *ifs;
+
+ ifs = ifunit(req->ifbr_ifsname);
+ if (ifs == NULL)
+ return (ENOENT);
+
+ LIST_FOREACH(bif, &sc->sc_spanlist, bif_next)
+ if (ifs == bif->bif_ifp)
+ break;
+
+ if (bif == NULL)
+ return (ENOENT);
+
+ bridge_delete_span(sc, bif);
+
+ return (0);
+}
+
+static int
+bridge_ioctl_gbparam(struct bridge_softc *sc, void *arg)
+{
+ struct ifbropreq *req = arg;
+ struct bstp_state *bs = &sc->sc_stp;
+ struct bstp_port *root_port;
+
+ req->ifbop_maxage = bs->bs_bridge_max_age >> 8;
+ req->ifbop_hellotime = bs->bs_bridge_htime >> 8;
+ req->ifbop_fwddelay = bs->bs_bridge_fdelay >> 8;
+
+ root_port = bs->bs_root_port;
+ if (root_port == NULL)
+ req->ifbop_root_port = 0;
+ else
+ req->ifbop_root_port = root_port->bp_ifp->if_index;
+
+ req->ifbop_holdcount = bs->bs_txholdcount;
+ req->ifbop_priority = bs->bs_bridge_priority;
+ req->ifbop_protocol = bs->bs_protover;
+ req->ifbop_root_path_cost = bs->bs_root_pv.pv_cost;
+ req->ifbop_bridgeid = bs->bs_bridge_pv.pv_dbridge_id;
+ req->ifbop_designated_root = bs->bs_root_pv.pv_root_id;
+ req->ifbop_designated_bridge = bs->bs_root_pv.pv_dbridge_id;
+ req->ifbop_last_tc_time.tv_sec = bs->bs_last_tc_time.tv_sec;
+ req->ifbop_last_tc_time.tv_usec = bs->bs_last_tc_time.tv_usec;
+
+ return (0);
+}
+
+static int
+bridge_ioctl_grte(struct bridge_softc *sc, void *arg)
+{
+ struct ifbrparam *param = arg;
+
+ param->ifbrp_cexceeded = sc->sc_brtexceeded;
+ return (0);
+}
+
+static int
+bridge_ioctl_gifsstp(struct bridge_softc *sc, void *arg)
+{
+ struct ifbpstpconf *bifstp = arg;
+ struct bridge_iflist *bif;
+ struct bstp_port *bp;
+ struct ifbpstpreq bpreq;
+ char *buf, *outbuf;
+ int count, buflen, len, error = 0;
+
+ count = 0;
+ LIST_FOREACH(bif, &sc->sc_iflist, bif_next) {
+ if ((bif->bif_flags & IFBIF_STP) != 0)
+ count++;
+ }
+
+ buflen = sizeof(bpreq) * count;
+ if (bifstp->ifbpstp_len == 0) {
+ bifstp->ifbpstp_len = buflen;
+ return (0);
+ }
+
+ BRIDGE_UNLOCK(sc);
+ outbuf = malloc(buflen, M_TEMP, M_WAITOK | M_ZERO);
+ BRIDGE_LOCK(sc);
+
+ count = 0;
+ buf = outbuf;
+ len = min(bifstp->ifbpstp_len, buflen);
+ bzero(&bpreq, sizeof(bpreq));
+ LIST_FOREACH(bif, &sc->sc_iflist, bif_next) {
+ if (len < sizeof(bpreq))
+ break;
+
+ if ((bif->bif_flags & IFBIF_STP) == 0)
+ continue;
+
+ bp = &bif->bif_stp;
+ bpreq.ifbp_portno = bif->bif_ifp->if_index & 0xfff;
+ bpreq.ifbp_fwd_trans = bp->bp_forward_transitions;
+ bpreq.ifbp_design_cost = bp->bp_desg_pv.pv_cost;
+ bpreq.ifbp_design_port = bp->bp_desg_pv.pv_port_id;
+ bpreq.ifbp_design_bridge = bp->bp_desg_pv.pv_dbridge_id;
+ bpreq.ifbp_design_root = bp->bp_desg_pv.pv_root_id;
+
+ memcpy(buf, &bpreq, sizeof(bpreq));
+ count++;
+ buf += sizeof(bpreq);
+ len -= sizeof(bpreq);
+ }
+
+ BRIDGE_UNLOCK(sc);
+ bifstp->ifbpstp_len = sizeof(bpreq) * count;
+ error = copyout(outbuf, bifstp->ifbpstp_req, bifstp->ifbpstp_len);
+ BRIDGE_LOCK(sc);
+ free(outbuf, M_TEMP);
+ return (error);
+}
+
+static int
+bridge_ioctl_sproto(struct bridge_softc *sc, void *arg)
+{
+ struct ifbrparam *param = arg;
+
+ return (bstp_set_protocol(&sc->sc_stp, param->ifbrp_proto));
+}
+
+static int
+bridge_ioctl_stxhc(struct bridge_softc *sc, void *arg)
+{
+ struct ifbrparam *param = arg;
+
+ return (bstp_set_holdcount(&sc->sc_stp, param->ifbrp_txhc));
+}
+
+/*
+ * bridge_ifdetach:
+ *
+ * Detach an interface from a bridge. Called when a member
+ * interface is detaching.
+ */
+static void
+bridge_ifdetach(void *arg __unused, struct ifnet *ifp)
+{
+ struct bridge_softc *sc = ifp->if_bridge;
+ struct bridge_iflist *bif;
+
+ /* Check if the interface is a bridge member */
+ if (sc != NULL) {
+ BRIDGE_LOCK(sc);
+
+ bif = bridge_lookup_member_if(sc, ifp);
+ if (bif != NULL)
+ bridge_delete_member(sc, bif, 1);
+
+ BRIDGE_UNLOCK(sc);
+ return;
+ }
+
+ /* Check if the interface is a span port */
+ mtx_lock(&bridge_list_mtx);
+ LIST_FOREACH(sc, &bridge_list, sc_list) {
+ BRIDGE_LOCK(sc);
+ LIST_FOREACH(bif, &sc->sc_spanlist, bif_next)
+ if (ifp == bif->bif_ifp) {
+ bridge_delete_span(sc, bif);
+ break;
+ }
+
+ BRIDGE_UNLOCK(sc);
+ }
+ mtx_unlock(&bridge_list_mtx);
+}
+
+/*
+ * bridge_init:
+ *
+ * Initialize a bridge interface.
+ */
+static void
+bridge_init(void *xsc)
+{
+ struct bridge_softc *sc = (struct bridge_softc *)xsc;
+ struct ifnet *ifp = sc->sc_ifp;
+
+ if (ifp->if_drv_flags & IFF_DRV_RUNNING)
+ return;
+
+ BRIDGE_LOCK(sc);
+ callout_reset(&sc->sc_brcallout, bridge_rtable_prune_period * hz,
+ bridge_timer, sc);
+
+ ifp->if_drv_flags |= IFF_DRV_RUNNING;
+ bstp_init(&sc->sc_stp); /* Initialize Spanning Tree */
+
+ BRIDGE_UNLOCK(sc);
+}
+
+/*
+ * bridge_stop:
+ *
+ * Stop the bridge interface.
+ */
+static void
+bridge_stop(struct ifnet *ifp, int disable)
+{
+ struct bridge_softc *sc = ifp->if_softc;
+
+ BRIDGE_LOCK_ASSERT(sc);
+
+ if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
+ return;
+
+ callout_stop(&sc->sc_brcallout);
+ bstp_stop(&sc->sc_stp);
+
+ bridge_rtflush(sc, IFBF_FLUSHDYN);
+
+ ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
+}
+
+/*
+ * bridge_enqueue:
+ *
+ * Enqueue a packet on a bridge member interface.
+ *
+ */
+static void
+bridge_enqueue(struct bridge_softc *sc, struct ifnet *dst_ifp, struct mbuf *m)
+{
+ int len, err = 0;
+ short mflags;
+ struct mbuf *m0;
+
+ len = m->m_pkthdr.len;
+ mflags = m->m_flags;
+
+ /* We may be sending a fragment so traverse the mbuf */
+ for (; m; m = m0) {
+ m0 = m->m_nextpkt;
+ m->m_nextpkt = NULL;
+
+ /*
+ * If underlying interface can not do VLAN tag insertion itself
+ * then attach a packet tag that holds it.
+ */
+ if ((m->m_flags & M_VLANTAG) &&
+ (dst_ifp->if_capenable & IFCAP_VLAN_HWTAGGING) == 0) {
+ m = ether_vlanencap(m, m->m_pkthdr.ether_vtag);
+ if (m == NULL) {
+ if_printf(dst_ifp,
+ "unable to prepend VLAN header\n");
+ dst_ifp->if_oerrors++;
+ continue;
+ }
+ m->m_flags &= ~M_VLANTAG;
+ }
+
+ if (err == 0)
+ dst_ifp->if_transmit(dst_ifp, m);
+ }
+
+ if (err == 0) {
+ sc->sc_ifp->if_opackets++;
+ sc->sc_ifp->if_obytes += len;
+ if (mflags & M_MCAST)
+ sc->sc_ifp->if_omcasts++;
+ }
+}
+
+/*
+ * bridge_dummynet:
+ *
+ * Receive a queued packet from dummynet and pass it on to the output
+ * interface.
+ *
+ * The mbuf has the Ethernet header already attached.
+ */
+static void
+bridge_dummynet(struct mbuf *m, struct ifnet *ifp)
+{
+ struct bridge_softc *sc;
+
+ sc = ifp->if_bridge;
+
+ /*
+ * The packet didnt originate from a member interface. This should only
+ * ever happen if a member interface is removed while packets are
+ * queued for it.
+ */
+ if (sc == NULL) {
+ m_freem(m);
+ return;
+ }
+
+ if (PFIL_HOOKED(&V_inet_pfil_hook)
+#ifdef INET6
+ || PFIL_HOOKED(&V_inet6_pfil_hook)
+#endif
+ ) {
+ if (bridge_pfil(&m, sc->sc_ifp, ifp, PFIL_OUT) != 0)
+ return;
+ if (m == NULL)
+ return;
+ }
+
+ bridge_enqueue(sc, ifp, m);
+}
+
+/*
+ * bridge_output:
+ *
+ * Send output from a bridge member interface. This
+ * performs the bridging function for locally originated
+ * packets.
+ *
+ * The mbuf has the Ethernet header already attached. We must
+ * enqueue or free the mbuf before returning.
+ */
+static int
+bridge_output(struct ifnet *ifp, struct mbuf *m, struct sockaddr *sa,
+ struct rtentry *rt)
+{
+ struct ether_header *eh;
+ struct ifnet *dst_if;
+ struct bridge_softc *sc;
+ uint16_t vlan;
+
+ if (m->m_len < ETHER_HDR_LEN) {
+ m = m_pullup(m, ETHER_HDR_LEN);
+ if (m == NULL)
+ return (0);
+ }
+
+ eh = mtod(m, struct ether_header *);
+ sc = ifp->if_bridge;
+ vlan = VLANTAGOF(m);
+
+ BRIDGE_LOCK(sc);
+
+ /*
+ * If bridge is down, but the original output interface is up,
+ * go ahead and send out that interface. Otherwise, the packet
+ * is dropped below.
+ */
+ if ((sc->sc_ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
+ dst_if = ifp;
+ goto sendunicast;
+ }
+
+ /*
+ * If the packet is a multicast, or we don't know a better way to
+ * get there, send to all interfaces.
+ */
+ if (ETHER_IS_MULTICAST(eh->ether_dhost))
+ dst_if = NULL;
+ else
+ dst_if = bridge_rtlookup(sc, eh->ether_dhost, vlan);
+ if (dst_if == NULL) {
+ struct bridge_iflist *bif;
+ struct mbuf *mc;
+ int error = 0, used = 0;
+
+ bridge_span(sc, m);
+
+ BRIDGE_LOCK2REF(sc, error);
+ if (error) {
+ m_freem(m);
+ return (0);
+ }
+
+ LIST_FOREACH(bif, &sc->sc_iflist, bif_next) {
+ dst_if = bif->bif_ifp;
+
+ if (dst_if->if_type == IFT_GIF)
+ continue;
+ if ((dst_if->if_drv_flags & IFF_DRV_RUNNING) == 0)
+ continue;
+
+ /*
+ * If this is not the original output interface,
+ * and the interface is participating in spanning
+ * tree, make sure the port is in a state that
+ * allows forwarding.
+ */
+ if (dst_if != ifp && (bif->bif_flags & IFBIF_STP) &&
+ bif->bif_stp.bp_state == BSTP_IFSTATE_DISCARDING)
+ continue;
+
+ if (LIST_NEXT(bif, bif_next) == NULL) {
+ used = 1;
+ mc = m;
+ } else {
+ mc = m_copypacket(m, M_DONTWAIT);
+ if (mc == NULL) {
+ sc->sc_ifp->if_oerrors++;
+ continue;
+ }
+ }
+
+ bridge_enqueue(sc, dst_if, mc);
+ }
+ if (used == 0)
+ m_freem(m);
+ BRIDGE_UNREF(sc);
+ return (0);
+ }
+
+sendunicast:
+ /*
+ * XXX Spanning tree consideration here?
+ */
+
+ bridge_span(sc, m);
+ if ((dst_if->if_drv_flags & IFF_DRV_RUNNING) == 0) {
+ m_freem(m);
+ BRIDGE_UNLOCK(sc);
+ return (0);
+ }
+
+ BRIDGE_UNLOCK(sc);
+ bridge_enqueue(sc, dst_if, m);
+ return (0);
+}
+
+/*
+ * bridge_start:
+ *
+ * Start output on a bridge.
+ *
+ */
+static void
+bridge_start(struct ifnet *ifp)
+{
+ struct bridge_softc *sc;
+ struct mbuf *m;
+ struct ether_header *eh;
+ struct ifnet *dst_if;
+
+ sc = ifp->if_softc;
+
+ ifp->if_drv_flags |= IFF_DRV_OACTIVE;
+ for (;;) {
+ IFQ_DEQUEUE(&ifp->if_snd, m);
+ if (m == 0)
+ break;
+ ETHER_BPF_MTAP(ifp, m);
+
+ eh = mtod(m, struct ether_header *);
+ dst_if = NULL;
+
+ BRIDGE_LOCK(sc);
+ if ((m->m_flags & (M_BCAST|M_MCAST)) == 0) {
+ dst_if = bridge_rtlookup(sc, eh->ether_dhost, 1);
+ }
+
+ if (dst_if == NULL)
+ bridge_broadcast(sc, ifp, m, 0);
+ else {
+ BRIDGE_UNLOCK(sc);
+ bridge_enqueue(sc, dst_if, m);
+ }
+ }
+ ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
+}
+
+/*
+ * bridge_forward:
+ *
+ * The forwarding function of the bridge.
+ *
+ * NOTE: Releases the lock on return.
+ */
+static void
+bridge_forward(struct bridge_softc *sc, struct bridge_iflist *sbif,
+ struct mbuf *m)
+{
+ struct bridge_iflist *dbif;
+ struct ifnet *src_if, *dst_if, *ifp;
+ struct ether_header *eh;
+ uint16_t vlan;
+ uint8_t *dst;
+ int error;
+
+ src_if = m->m_pkthdr.rcvif;
+ ifp = sc->sc_ifp;
+
+ ifp->if_ipackets++;
+ ifp->if_ibytes += m->m_pkthdr.len;
+ vlan = VLANTAGOF(m);
+
+ if ((sbif->bif_flags & IFBIF_STP) &&
+ sbif->bif_stp.bp_state == BSTP_IFSTATE_DISCARDING)
+ goto drop;
+
+ eh = mtod(m, struct ether_header *);
+ dst = eh->ether_dhost;
+
+ /* If the interface is learning, record the address. */
+ if (sbif->bif_flags & IFBIF_LEARNING) {
+ error = bridge_rtupdate(sc, eh->ether_shost, vlan,
+ sbif, 0, IFBAF_DYNAMIC);
+ /*
+ * If the interface has addresses limits then deny any source
+ * that is not in the cache.
+ */
+ if (error && sbif->bif_addrmax)
+ goto drop;
+ }
+
+ if ((sbif->bif_flags & IFBIF_STP) != 0 &&
+ sbif->bif_stp.bp_state == BSTP_IFSTATE_LEARNING)
+ goto drop;
+
+ /*
+ * At this point, the port either doesn't participate
+ * in spanning tree or it is in the forwarding state.
+ */
+
+ /*
+ * If the packet is unicast, destined for someone on
+ * "this" side of the bridge, drop it.
+ */
+ if ((m->m_flags & (M_BCAST|M_MCAST)) == 0) {
+ dst_if = bridge_rtlookup(sc, dst, vlan);
+ if (src_if == dst_if)
+ goto drop;
+ } else {
+ /*
+ * Check if its a reserved multicast address, any address
+ * listed in 802.1D section 7.12.6 may not be forwarded by the
+ * bridge.
+ * This is currently 01-80-C2-00-00-00 to 01-80-C2-00-00-0F
+ */
+ if (dst[0] == 0x01 && dst[1] == 0x80 &&
+ dst[2] == 0xc2 && dst[3] == 0x00 &&
+ dst[4] == 0x00 && dst[5] <= 0x0f)
+ goto drop;
+
+ /* ...forward it to all interfaces. */
+ ifp->if_imcasts++;
+ dst_if = NULL;
+ }
+
+ /*
+ * If we have a destination interface which is a member of our bridge,
+ * OR this is a unicast packet, push it through the bpf(4) machinery.
+ * For broadcast or multicast packets, don't bother because it will
+ * be reinjected into ether_input. We do this before we pass the packets
+ * through the pfil(9) framework, as it is possible that pfil(9) will
+ * drop the packet, or possibly modify it, making it difficult to debug
+ * firewall issues on the bridge.
+ */
+ if (dst_if != NULL || (m->m_flags & (M_BCAST | M_MCAST)) == 0)
+ ETHER_BPF_MTAP(ifp, m);
+
+ /* run the packet filter */
+ if (PFIL_HOOKED(&V_inet_pfil_hook)
+#ifdef INET6
+ || PFIL_HOOKED(&V_inet6_pfil_hook)
+#endif
+ ) {
+ BRIDGE_UNLOCK(sc);
+ if (bridge_pfil(&m, ifp, src_if, PFIL_IN) != 0)
+ return;
+ if (m == NULL)
+ return;
+ BRIDGE_LOCK(sc);
+ }
+
+ if (dst_if == NULL) {
+ bridge_broadcast(sc, src_if, m, 1);
+ return;
+ }
+
+ /*
+ * At this point, we're dealing with a unicast frame
+ * going to a different interface.
+ */
+ if ((dst_if->if_drv_flags & IFF_DRV_RUNNING) == 0)
+ goto drop;
+
+ dbif = bridge_lookup_member_if(sc, dst_if);
+ if (dbif == NULL)
+ /* Not a member of the bridge (anymore?) */
+ goto drop;
+
+ /* Private segments can not talk to each other */
+ if (sbif->bif_flags & dbif->bif_flags & IFBIF_PRIVATE)
+ goto drop;
+
+ if ((dbif->bif_flags & IFBIF_STP) &&
+ dbif->bif_stp.bp_state == BSTP_IFSTATE_DISCARDING)
+ goto drop;
+
+ BRIDGE_UNLOCK(sc);
+
+ if (PFIL_HOOKED(&V_inet_pfil_hook)
+#ifdef INET6
+ || PFIL_HOOKED(&V_inet6_pfil_hook)
+#endif
+ ) {
+ if (bridge_pfil(&m, ifp, dst_if, PFIL_OUT) != 0)
+ return;
+ if (m == NULL)
+ return;
+ }
+
+ bridge_enqueue(sc, dst_if, m);
+ return;
+
+drop:
+ BRIDGE_UNLOCK(sc);
+ m_freem(m);
+}
+
+/*
+ * bridge_input:
+ *
+ * Receive input from a member interface. Queue the packet for
+ * bridging if it is not for us.
+ */
+static struct mbuf *
+bridge_input(struct ifnet *ifp, struct mbuf *m)
+{
+ struct bridge_softc *sc = ifp->if_bridge;
+ struct bridge_iflist *bif, *bif2;
+ struct ifnet *bifp;
+ struct ether_header *eh;
+ struct mbuf *mc, *mc2;
+ uint16_t vlan;
+ int error;
+
+ if ((sc->sc_ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
+ return (m);
+
+ bifp = sc->sc_ifp;
+ vlan = VLANTAGOF(m);
+
+ /*
+ * Implement support for bridge monitoring. If this flag has been
+ * set on this interface, discard the packet once we push it through
+ * the bpf(4) machinery, but before we do, increment the byte and
+ * packet counters associated with this interface.
+ */
+ if ((bifp->if_flags & IFF_MONITOR) != 0) {
+ m->m_pkthdr.rcvif = bifp;
+ ETHER_BPF_MTAP(bifp, m);
+ bifp->if_ipackets++;
+ bifp->if_ibytes += m->m_pkthdr.len;
+ m_freem(m);
+ return (NULL);
+ }
+ BRIDGE_LOCK(sc);
+ bif = bridge_lookup_member_if(sc, ifp);
+ if (bif == NULL) {
+ BRIDGE_UNLOCK(sc);
+ return (m);
+ }
+
+ eh = mtod(m, struct ether_header *);
+
+ bridge_span(sc, m);
+
+ if (m->m_flags & (M_BCAST|M_MCAST)) {
+ /* Tap off 802.1D packets; they do not get forwarded. */
+ if (memcmp(eh->ether_dhost, bstp_etheraddr,
+ ETHER_ADDR_LEN) == 0) {
+ m = bstp_input(&bif->bif_stp, ifp, m);
+ if (m == NULL) {
+ BRIDGE_UNLOCK(sc);
+ return (NULL);
+ }
+ }
+
+ if ((bif->bif_flags & IFBIF_STP) &&
+ bif->bif_stp.bp_state == BSTP_IFSTATE_DISCARDING) {
+ BRIDGE_UNLOCK(sc);
+ return (m);
+ }
+
+ /*
+ * Make a deep copy of the packet and enqueue the copy
+ * for bridge processing; return the original packet for
+ * local processing.
+ */
+ mc = m_dup(m, M_DONTWAIT);
+ if (mc == NULL) {
+ BRIDGE_UNLOCK(sc);
+ return (m);
+ }
+
+ /* Perform the bridge forwarding function with the copy. */
+ bridge_forward(sc, bif, mc);
+
+ /*
+ * Reinject the mbuf as arriving on the bridge so we have a
+ * chance at claiming multicast packets. We can not loop back
+ * here from ether_input as a bridge is never a member of a
+ * bridge.
+ */
+ KASSERT(bifp->if_bridge == NULL,
+ ("loop created in bridge_input"));
+ mc2 = m_dup(m, M_DONTWAIT);
+ if (mc2 != NULL) {
+ /* Keep the layer3 header aligned */
+ int i = min(mc2->m_pkthdr.len, max_protohdr);
+ mc2 = m_copyup(mc2, i, ETHER_ALIGN);
+ }
+ if (mc2 != NULL) {
+ mc2->m_pkthdr.rcvif = bifp;
+ (*bifp->if_input)(bifp, mc2);
+ }
+
+ /* Return the original packet for local processing. */
+ return (m);
+ }
+
+ if ((bif->bif_flags & IFBIF_STP) &&
+ bif->bif_stp.bp_state == BSTP_IFSTATE_DISCARDING) {
+ BRIDGE_UNLOCK(sc);
+ return (m);
+ }
+
+#if (defined(INET) || defined(INET6))
+# define OR_CARP_CHECK_WE_ARE_DST(iface) \
+ || ((iface)->if_carp \
+ && (*carp_forus_p)((iface), eh->ether_dhost))
+# define OR_CARP_CHECK_WE_ARE_SRC(iface) \
+ || ((iface)->if_carp \
+ && (*carp_forus_p)((iface), eh->ether_shost))
+#else
+# define OR_CARP_CHECK_WE_ARE_DST(iface)
+# define OR_CARP_CHECK_WE_ARE_SRC(iface)
+#endif
+
+#ifdef INET6
+# define OR_PFIL_HOOKED_INET6 \
+ || PFIL_HOOKED(&V_inet6_pfil_hook)
+#else
+# define OR_PFIL_HOOKED_INET6
+#endif
+
+#define GRAB_OUR_PACKETS(iface) \
+ if ((iface)->if_type == IFT_GIF) \
+ continue; \
+ /* It is destined for us. */ \
+ if (memcmp(IF_LLADDR((iface)), eh->ether_dhost, ETHER_ADDR_LEN) == 0 \
+ OR_CARP_CHECK_WE_ARE_DST((iface)) \
+ ) { \
+ if ((iface)->if_type == IFT_BRIDGE) { \
+ ETHER_BPF_MTAP(iface, m); \
+ iface->if_ipackets++; \
+ /* Filter on the physical interface. */ \
+ if (pfil_local_phys && \
+ (PFIL_HOOKED(&V_inet_pfil_hook) \
+ OR_PFIL_HOOKED_INET6)) { \
+ if (bridge_pfil(&m, NULL, ifp, \
+ PFIL_IN) != 0 || m == NULL) { \
+ BRIDGE_UNLOCK(sc); \
+ return (NULL); \
+ } \
+ } \
+ } \
+ if (bif->bif_flags & IFBIF_LEARNING) { \
+ error = bridge_rtupdate(sc, eh->ether_shost, \
+ vlan, bif, 0, IFBAF_DYNAMIC); \
+ if (error && bif->bif_addrmax) { \
+ BRIDGE_UNLOCK(sc); \
+ m_freem(m); \
+ return (NULL); \
+ } \
+ } \
+ m->m_pkthdr.rcvif = iface; \
+ BRIDGE_UNLOCK(sc); \
+ return (m); \
+ } \
+ \
+ /* We just received a packet that we sent out. */ \
+ if (memcmp(IF_LLADDR((iface)), eh->ether_shost, ETHER_ADDR_LEN) == 0 \
+ OR_CARP_CHECK_WE_ARE_SRC((iface)) \
+ ) { \
+ BRIDGE_UNLOCK(sc); \
+ m_freem(m); \
+ return (NULL); \
+ }
+
+ /*
+ * Unicast. Make sure it's not for the bridge.
+ */
+ do { GRAB_OUR_PACKETS(bifp) } while (0);
+
+ /*
+ * Give a chance for ifp at first priority. This will help when the
+ * packet comes through the interface like VLAN's with the same MACs
+ * on several interfaces from the same bridge. This also will save
+ * some CPU cycles in case the destination interface and the input
+ * interface (eq ifp) are the same.
+ */
+ do { GRAB_OUR_PACKETS(ifp) } while (0);
+
+ /* Now check the all bridge members. */
+ LIST_FOREACH(bif2, &sc->sc_iflist, bif_next) {
+ GRAB_OUR_PACKETS(bif2->bif_ifp)
+ }
+
+#undef OR_CARP_CHECK_WE_ARE_DST
+#undef OR_CARP_CHECK_WE_ARE_SRC
+#undef OR_PFIL_HOOKED_INET6
+#undef GRAB_OUR_PACKETS
+
+ /* Perform the bridge forwarding function. */
+ bridge_forward(sc, bif, m);
+
+ return (NULL);
+}
+
+/*
+ * bridge_broadcast:
+ *
+ * Send a frame to all interfaces that are members of
+ * the bridge, except for the one on which the packet
+ * arrived.
+ *
+ * NOTE: Releases the lock on return.
+ */
+static void
+bridge_broadcast(struct bridge_softc *sc, struct ifnet *src_if,
+ struct mbuf *m, int runfilt)
+{
+ struct bridge_iflist *dbif, *sbif;
+ struct mbuf *mc;
+ struct ifnet *dst_if;
+ int error = 0, used = 0, i;
+
+ sbif = bridge_lookup_member_if(sc, src_if);
+
+ BRIDGE_LOCK2REF(sc, error);
+ if (error) {
+ m_freem(m);
+ return;
+ }
+
+ /* Filter on the bridge interface before broadcasting */
+ if (runfilt && (PFIL_HOOKED(&V_inet_pfil_hook)
+#ifdef INET6
+ || PFIL_HOOKED(&V_inet6_pfil_hook)
+#endif
+ )) {
+ if (bridge_pfil(&m, sc->sc_ifp, NULL, PFIL_OUT) != 0)
+ goto out;
+ if (m == NULL)
+ goto out;
+ }
+
+ LIST_FOREACH(dbif, &sc->sc_iflist, bif_next) {
+ dst_if = dbif->bif_ifp;
+ if (dst_if == src_if)
+ continue;
+
+ /* Private segments can not talk to each other */
+ if (sbif && (sbif->bif_flags & dbif->bif_flags & IFBIF_PRIVATE))
+ continue;
+
+ if ((dbif->bif_flags & IFBIF_STP) &&
+ dbif->bif_stp.bp_state == BSTP_IFSTATE_DISCARDING)
+ continue;
+
+ if ((dbif->bif_flags & IFBIF_DISCOVER) == 0 &&
+ (m->m_flags & (M_BCAST|M_MCAST)) == 0)
+ continue;
+
+ if ((dst_if->if_drv_flags & IFF_DRV_RUNNING) == 0)
+ continue;
+
+ if (LIST_NEXT(dbif, bif_next) == NULL) {
+ mc = m;
+ used = 1;
+ } else {
+ mc = m_dup(m, M_DONTWAIT);
+ if (mc == NULL) {
+ sc->sc_ifp->if_oerrors++;
+ continue;
+ }
+ }
+
+ /*
+ * Filter on the output interface. Pass a NULL bridge interface
+ * pointer so we do not redundantly filter on the bridge for
+ * each interface we broadcast on.
+ */
+ if (runfilt && (PFIL_HOOKED(&V_inet_pfil_hook)
+#ifdef INET6
+ || PFIL_HOOKED(&V_inet6_pfil_hook)
+#endif
+ )) {
+ if (used == 0) {
+ /* Keep the layer3 header aligned */
+ i = min(mc->m_pkthdr.len, max_protohdr);
+ mc = m_copyup(mc, i, ETHER_ALIGN);
+ if (mc == NULL) {
+ sc->sc_ifp->if_oerrors++;
+ continue;
+ }
+ }
+ if (bridge_pfil(&mc, NULL, dst_if, PFIL_OUT) != 0)
+ continue;
+ if (mc == NULL)
+ continue;
+ }
+
+ bridge_enqueue(sc, dst_if, mc);
+ }
+ if (used == 0)
+ m_freem(m);
+
+out:
+ BRIDGE_UNREF(sc);
+}
+
+/*
+ * bridge_span:
+ *
+ * Duplicate a packet out one or more interfaces that are in span mode,
+ * the original mbuf is unmodified.
+ */
+static void
+bridge_span(struct bridge_softc *sc, struct mbuf *m)
+{
+ struct bridge_iflist *bif;
+ struct ifnet *dst_if;
+ struct mbuf *mc;
+
+ if (LIST_EMPTY(&sc->sc_spanlist))
+ return;
+
+ LIST_FOREACH(bif, &sc->sc_spanlist, bif_next) {
+ dst_if = bif->bif_ifp;
+
+ if ((dst_if->if_drv_flags & IFF_DRV_RUNNING) == 0)
+ continue;
+
+ mc = m_copypacket(m, M_DONTWAIT);
+ if (mc == NULL) {
+ sc->sc_ifp->if_oerrors++;
+ continue;
+ }
+
+ bridge_enqueue(sc, dst_if, mc);
+ }
+}
+
+/*
+ * bridge_rtupdate:
+ *
+ * Add a bridge routing entry.
+ */
+static int
+bridge_rtupdate(struct bridge_softc *sc, const uint8_t *dst, uint16_t vlan,
+ struct bridge_iflist *bif, int setflags, uint8_t flags)
+{
+ struct bridge_rtnode *brt;
+ int error;
+
+ BRIDGE_LOCK_ASSERT(sc);
+
+ /* Check the source address is valid and not multicast. */
+ if (ETHER_IS_MULTICAST(dst) ||
+ (dst[0] == 0 && dst[1] == 0 && dst[2] == 0 &&
+ dst[3] == 0 && dst[4] == 0 && dst[5] == 0) != 0)
+ return (EINVAL);
+
+ /* 802.1p frames map to vlan 1 */
+ if (vlan == 0)
+ vlan = 1;
+
+ /*
+ * A route for this destination might already exist. If so,
+ * update it, otherwise create a new one.
+ */
+ if ((brt = bridge_rtnode_lookup(sc, dst, vlan)) == NULL) {
+ if (sc->sc_brtcnt >= sc->sc_brtmax) {
+ sc->sc_brtexceeded++;
+ return (ENOSPC);
+ }
+ /* Check per interface address limits (if enabled) */
+ if (bif->bif_addrmax && bif->bif_addrcnt >= bif->bif_addrmax) {
+ bif->bif_addrexceeded++;
+ return (ENOSPC);
+ }
+
+ /*
+ * Allocate a new bridge forwarding node, and
+ * initialize the expiration time and Ethernet
+ * address.
+ */
+ brt = uma_zalloc(bridge_rtnode_zone, M_NOWAIT | M_ZERO);
+ if (brt == NULL)
+ return (ENOMEM);
+
+ if (bif->bif_flags & IFBIF_STICKY)
+ brt->brt_flags = IFBAF_STICKY;
+ else
+ brt->brt_flags = IFBAF_DYNAMIC;
+
+ memcpy(brt->brt_addr, dst, ETHER_ADDR_LEN);
+ brt->brt_vlan = vlan;
+
+ if ((error = bridge_rtnode_insert(sc, brt)) != 0) {
+ uma_zfree(bridge_rtnode_zone, brt);
+ return (error);
+ }
+ brt->brt_dst = bif;
+ bif->bif_addrcnt++;
+ }
+
+ if ((brt->brt_flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC &&
+ brt->brt_dst != bif) {
+ brt->brt_dst->bif_addrcnt--;
+ brt->brt_dst = bif;
+ brt->brt_dst->bif_addrcnt++;
+ }
+
+ if ((flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC)
+ brt->brt_expire = time_uptime + sc->sc_brttimeout;
+ if (setflags)
+ brt->brt_flags = flags;
+
+ return (0);
+}
+
+/*
+ * bridge_rtlookup:
+ *
+ * Lookup the destination interface for an address.
+ */
+static struct ifnet *
+bridge_rtlookup(struct bridge_softc *sc, const uint8_t *addr, uint16_t vlan)
+{
+ struct bridge_rtnode *brt;
+
+ BRIDGE_LOCK_ASSERT(sc);
+
+ if ((brt = bridge_rtnode_lookup(sc, addr, vlan)) == NULL)
+ return (NULL);
+
+ return (brt->brt_ifp);
+}
+
+/*
+ * bridge_rttrim:
+ *
+ * Trim the routine table so that we have a number
+ * of routing entries less than or equal to the
+ * maximum number.
+ */
+static void
+bridge_rttrim(struct bridge_softc *sc)
+{
+ struct bridge_rtnode *brt, *nbrt;
+
+ BRIDGE_LOCK_ASSERT(sc);
+
+ /* Make sure we actually need to do this. */
+ if (sc->sc_brtcnt <= sc->sc_brtmax)
+ return;
+
+ /* Force an aging cycle; this might trim enough addresses. */
+ bridge_rtage(sc);
+ if (sc->sc_brtcnt <= sc->sc_brtmax)
+ return;
+
+ LIST_FOREACH_SAFE(brt, &sc->sc_rtlist, brt_list, nbrt) {
+ if ((brt->brt_flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC) {
+ bridge_rtnode_destroy(sc, brt);
+ if (sc->sc_brtcnt <= sc->sc_brtmax)
+ return;
+ }
+ }
+}
+
+/*
+ * bridge_timer:
+ *
+ * Aging timer for the bridge.
+ */
+static void
+bridge_timer(void *arg)
+{
+ struct bridge_softc *sc = arg;
+
+ BRIDGE_LOCK_ASSERT(sc);
+
+ bridge_rtage(sc);
+
+ if (sc->sc_ifp->if_drv_flags & IFF_DRV_RUNNING)
+ callout_reset(&sc->sc_brcallout,
+ bridge_rtable_prune_period * hz, bridge_timer, sc);
+}
+
+/*
+ * bridge_rtage:
+ *
+ * Perform an aging cycle.
+ */
+static void
+bridge_rtage(struct bridge_softc *sc)
+{
+ struct bridge_rtnode *brt, *nbrt;
+
+ BRIDGE_LOCK_ASSERT(sc);
+
+ LIST_FOREACH_SAFE(brt, &sc->sc_rtlist, brt_list, nbrt) {
+ if ((brt->brt_flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC) {
+ if (time_uptime >= brt->brt_expire)
+ bridge_rtnode_destroy(sc, brt);
+ }
+ }
+}
+
+/*
+ * bridge_rtflush:
+ *
+ * Remove all dynamic addresses from the bridge.
+ */
+static void
+bridge_rtflush(struct bridge_softc *sc, int full)
+{
+ struct bridge_rtnode *brt, *nbrt;
+
+ BRIDGE_LOCK_ASSERT(sc);
+
+ LIST_FOREACH_SAFE(brt, &sc->sc_rtlist, brt_list, nbrt) {
+ if (full || (brt->brt_flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC)
+ bridge_rtnode_destroy(sc, brt);
+ }
+}
+
+/*
+ * bridge_rtdaddr:
+ *
+ * Remove an address from the table.
+ */
+static int
+bridge_rtdaddr(struct bridge_softc *sc, const uint8_t *addr, uint16_t vlan)
+{
+ struct bridge_rtnode *brt;
+ int found = 0;
+
+ BRIDGE_LOCK_ASSERT(sc);
+
+ /*
+ * If vlan is zero then we want to delete for all vlans so the lookup
+ * may return more than one.
+ */
+ while ((brt = bridge_rtnode_lookup(sc, addr, vlan)) != NULL) {
+ bridge_rtnode_destroy(sc, brt);
+ found = 1;
+ }
+
+ return (found ? 0 : ENOENT);
+}
+
+/*
+ * bridge_rtdelete:
+ *
+ * Delete routes to a speicifc member interface.
+ */
+static void
+bridge_rtdelete(struct bridge_softc *sc, struct ifnet *ifp, int full)
+{
+ struct bridge_rtnode *brt, *nbrt;
+
+ BRIDGE_LOCK_ASSERT(sc);
+
+ LIST_FOREACH_SAFE(brt, &sc->sc_rtlist, brt_list, nbrt) {
+ if (brt->brt_ifp == ifp && (full ||
+ (brt->brt_flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC))
+ bridge_rtnode_destroy(sc, brt);
+ }
+}
+
+/*
+ * bridge_rtable_init:
+ *
+ * Initialize the route table for this bridge.
+ */
+static int
+bridge_rtable_init(struct bridge_softc *sc)
+{
+ int i;
+
+ sc->sc_rthash = malloc(sizeof(*sc->sc_rthash) * BRIDGE_RTHASH_SIZE,
+ M_DEVBUF, M_NOWAIT);
+ if (sc->sc_rthash == NULL)
+ return (ENOMEM);
+
+ for (i = 0; i < BRIDGE_RTHASH_SIZE; i++)
+ LIST_INIT(&sc->sc_rthash[i]);
+
+ sc->sc_rthash_key = arc4random();
+
+ LIST_INIT(&sc->sc_rtlist);
+
+ return (0);
+}
+
+/*
+ * bridge_rtable_fini:
+ *
+ * Deconstruct the route table for this bridge.
+ */
+static void
+bridge_rtable_fini(struct bridge_softc *sc)
+{
+
+ KASSERT(sc->sc_brtcnt == 0,
+ ("%s: %d bridge routes referenced", __func__, sc->sc_brtcnt));
+ free(sc->sc_rthash, M_DEVBUF);
+}
+
+/*
+ * The following hash function is adapted from "Hash Functions" by Bob Jenkins
+ * ("Algorithm Alley", Dr. Dobbs Journal, September 1997).
+ */
+#define mix(a, b, c) \
+do { \
+ a -= b; a -= c; a ^= (c >> 13); \
+ b -= c; b -= a; b ^= (a << 8); \
+ c -= a; c -= b; c ^= (b >> 13); \
+ a -= b; a -= c; a ^= (c >> 12); \
+ b -= c; b -= a; b ^= (a << 16); \
+ c -= a; c -= b; c ^= (b >> 5); \
+ a -= b; a -= c; a ^= (c >> 3); \
+ b -= c; b -= a; b ^= (a << 10); \
+ c -= a; c -= b; c ^= (b >> 15); \
+} while (/*CONSTCOND*/0)
+
+static __inline uint32_t
+bridge_rthash(struct bridge_softc *sc, const uint8_t *addr)
+{
+ uint32_t a = 0x9e3779b9, b = 0x9e3779b9, c = sc->sc_rthash_key;
+
+ b += addr[5] << 8;
+ b += addr[4];
+ a += addr[3] << 24;
+ a += addr[2] << 16;
+ a += addr[1] << 8;
+ a += addr[0];
+
+ mix(a, b, c);
+
+ return (c & BRIDGE_RTHASH_MASK);
+}
+
+#undef mix
+
+static int
+bridge_rtnode_addr_cmp(const uint8_t *a, const uint8_t *b)
+{
+ int i, d;
+
+ for (i = 0, d = 0; i < ETHER_ADDR_LEN && d == 0; i++) {
+ d = ((int)a[i]) - ((int)b[i]);
+ }
+
+ return (d);
+}
+
+/*
+ * bridge_rtnode_lookup:
+ *
+ * Look up a bridge route node for the specified destination. Compare the
+ * vlan id or if zero then just return the first match.
+ */
+static struct bridge_rtnode *
+bridge_rtnode_lookup(struct bridge_softc *sc, const uint8_t *addr, uint16_t vlan)
+{
+ struct bridge_rtnode *brt;
+ uint32_t hash;
+ int dir;
+
+ BRIDGE_LOCK_ASSERT(sc);
+
+ hash = bridge_rthash(sc, addr);
+ LIST_FOREACH(brt, &sc->sc_rthash[hash], brt_hash) {
+ dir = bridge_rtnode_addr_cmp(addr, brt->brt_addr);
+ if (dir == 0 && (brt->brt_vlan == vlan || vlan == 0))
+ return (brt);
+ if (dir > 0)
+ return (NULL);
+ }
+
+ return (NULL);
+}
+
+/*
+ * bridge_rtnode_insert:
+ *
+ * Insert the specified bridge node into the route table. We
+ * assume the entry is not already in the table.
+ */
+static int
+bridge_rtnode_insert(struct bridge_softc *sc, struct bridge_rtnode *brt)
+{
+ struct bridge_rtnode *lbrt;
+ uint32_t hash;
+ int dir;
+
+ BRIDGE_LOCK_ASSERT(sc);
+
+ hash = bridge_rthash(sc, brt->brt_addr);
+
+ lbrt = LIST_FIRST(&sc->sc_rthash[hash]);
+ if (lbrt == NULL) {
+ LIST_INSERT_HEAD(&sc->sc_rthash[hash], brt, brt_hash);
+ goto out;
+ }
+
+ do {
+ dir = bridge_rtnode_addr_cmp(brt->brt_addr, lbrt->brt_addr);
+ if (dir == 0 && brt->brt_vlan == lbrt->brt_vlan)
+ return (EEXIST);
+ if (dir > 0) {
+ LIST_INSERT_BEFORE(lbrt, brt, brt_hash);
+ goto out;
+ }
+ if (LIST_NEXT(lbrt, brt_hash) == NULL) {
+ LIST_INSERT_AFTER(lbrt, brt, brt_hash);
+ goto out;
+ }
+ lbrt = LIST_NEXT(lbrt, brt_hash);
+ } while (lbrt != NULL);
+
+#ifdef DIAGNOSTIC
+ panic("bridge_rtnode_insert: impossible");
+#endif
+
+out:
+ LIST_INSERT_HEAD(&sc->sc_rtlist, brt, brt_list);
+ sc->sc_brtcnt++;
+
+ return (0);
+}
+
+/*
+ * bridge_rtnode_destroy:
+ *
+ * Destroy a bridge rtnode.
+ */
+static void
+bridge_rtnode_destroy(struct bridge_softc *sc, struct bridge_rtnode *brt)
+{
+ BRIDGE_LOCK_ASSERT(sc);
+
+ LIST_REMOVE(brt, brt_hash);
+
+ LIST_REMOVE(brt, brt_list);
+ sc->sc_brtcnt--;
+ brt->brt_dst->bif_addrcnt--;
+ uma_zfree(bridge_rtnode_zone, brt);
+}
+
+/*
+ * bridge_rtable_expire:
+ *
+ * Set the expiry time for all routes on an interface.
+ */
+static void
+bridge_rtable_expire(struct ifnet *ifp, int age)
+{
+ struct bridge_softc *sc = ifp->if_bridge;
+ struct bridge_rtnode *brt;
+
+ BRIDGE_LOCK(sc);
+
+ /*
+ * If the age is zero then flush, otherwise set all the expiry times to
+ * age for the interface
+ */
+ if (age == 0)
+ bridge_rtdelete(sc, ifp, IFBF_FLUSHDYN);
+ else {
+ LIST_FOREACH(brt, &sc->sc_rtlist, brt_list) {
+ /* Cap the expiry time to 'age' */
+ if (brt->brt_ifp == ifp &&
+ brt->brt_expire > time_uptime + age &&
+ (brt->brt_flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC)
+ brt->brt_expire = time_uptime + age;
+ }
+ }
+ BRIDGE_UNLOCK(sc);
+}
+
+/*
+ * bridge_state_change:
+ *
+ * Callback from the bridgestp code when a port changes states.
+ */
+static void
+bridge_state_change(struct ifnet *ifp, int state)
+{
+ struct bridge_softc *sc = ifp->if_bridge;
+ static const char *stpstates[] = {
+ "disabled",
+ "listening",
+ "learning",
+ "forwarding",
+ "blocking",
+ "discarding"
+ };
+
+ if (log_stp)
+ log(LOG_NOTICE, "%s: state changed to %s on %s\n",
+ sc->sc_ifp->if_xname, stpstates[state], ifp->if_xname);
+}
+
+/*
+ * Send bridge packets through pfil if they are one of the types pfil can deal
+ * with, or if they are ARP or REVARP. (pfil will pass ARP and REVARP without
+ * question.) If *bifp or *ifp are NULL then packet filtering is skipped for
+ * that interface.
+ */
+static int
+bridge_pfil(struct mbuf **mp, struct ifnet *bifp, struct ifnet *ifp, int dir)
+{
+ int snap, error, i, hlen;
+ struct ether_header *eh1, eh2;
+ struct ip_fw_args args;
+ struct ip *ip;
+ struct llc llc1;
+ u_int16_t ether_type;
+
+ snap = 0;
+ error = -1; /* Default error if not error == 0 */
+
+#if 0
+ /* we may return with the IP fields swapped, ensure its not shared */
+ KASSERT(M_WRITABLE(*mp), ("%s: modifying a shared mbuf", __func__));
+#endif
+
+ if (pfil_bridge == 0 && pfil_member == 0 && pfil_ipfw == 0)
+ return (0); /* filtering is disabled */
+
+ i = min((*mp)->m_pkthdr.len, max_protohdr);
+ if ((*mp)->m_len < i) {
+ *mp = m_pullup(*mp, i);
+ if (*mp == NULL) {
+ printf("%s: m_pullup failed\n", __func__);
+ return (-1);
+ }
+ }
+
+ eh1 = mtod(*mp, struct ether_header *);
+ ether_type = ntohs(eh1->ether_type);
+
+ /*
+ * Check for SNAP/LLC.
+ */
+ if (ether_type < ETHERMTU) {
+ struct llc *llc2 = (struct llc *)(eh1 + 1);
+
+ if ((*mp)->m_len >= ETHER_HDR_LEN + 8 &&
+ llc2->llc_dsap == LLC_SNAP_LSAP &&
+ llc2->llc_ssap == LLC_SNAP_LSAP &&
+ llc2->llc_control == LLC_UI) {
+ ether_type = htons(llc2->llc_un.type_snap.ether_type);
+ snap = 1;
+ }
+ }
+
+ /*
+ * If we're trying to filter bridge traffic, don't look at anything
+ * other than IP and ARP traffic. If the filter doesn't understand
+ * IPv6, don't allow IPv6 through the bridge either. This is lame
+ * since if we really wanted, say, an AppleTalk filter, we are hosed,
+ * but of course we don't have an AppleTalk filter to begin with.
+ * (Note that since pfil doesn't understand ARP it will pass *ALL*
+ * ARP traffic.)
+ */
+ switch (ether_type) {
+ case ETHERTYPE_ARP:
+ case ETHERTYPE_REVARP:
+ if (pfil_ipfw_arp == 0)
+ return (0); /* Automatically pass */
+ break;
+
+ case ETHERTYPE_IP:
+#ifdef INET6
+ case ETHERTYPE_IPV6:
+#endif /* INET6 */
+ break;
+ default:
+ /*
+ * Check to see if the user wants to pass non-ip
+ * packets, these will not be checked by pfil(9) and
+ * passed unconditionally so the default is to drop.
+ */
+ if (pfil_onlyip)
+ goto bad;
+ }
+
+ /* Strip off the Ethernet header and keep a copy. */
+ m_copydata(*mp, 0, ETHER_HDR_LEN, (caddr_t) &eh2);
+ m_adj(*mp, ETHER_HDR_LEN);
+
+ /* Strip off snap header, if present */
+ if (snap) {
+ m_copydata(*mp, 0, sizeof(struct llc), (caddr_t) &llc1);
+ m_adj(*mp, sizeof(struct llc));
+ }
+
+ /*
+ * Check the IP header for alignment and errors
+ */
+ if (dir == PFIL_IN) {
+ switch (ether_type) {
+ case ETHERTYPE_IP:
+ error = bridge_ip_checkbasic(mp);
+ break;
+#ifdef INET6
+ case ETHERTYPE_IPV6:
+ error = bridge_ip6_checkbasic(mp);
+ break;
+#endif /* INET6 */
+ default:
+ error = 0;
+ }
+ if (error)
+ goto bad;
+ }
+
+ /* XXX this section is also in if_ethersubr.c */
+ // XXX PFIL_OUT or DIR_OUT ?
+ if (V_ip_fw_chk_ptr && pfil_ipfw != 0 &&
+ dir == PFIL_OUT && ifp != NULL) {
+ struct m_tag *mtag;
+
+ error = -1;
+ /* fetch the start point from existing tags, if any */
+ mtag = m_tag_locate(*mp, MTAG_IPFW_RULE, 0, NULL);
+ if (mtag == NULL) {
+ args.rule.slot = 0;
+ } else {
+ struct ipfw_rule_ref *r;
+
+ /* XXX can we free the tag after use ? */
+ mtag->m_tag_id = PACKET_TAG_NONE;
+ r = (struct ipfw_rule_ref *)(mtag + 1);
+ /* packet already partially processed ? */
+ if (r->info & IPFW_ONEPASS)
+ goto ipfwpass;
+ args.rule = *r;
+ }
+
+ args.m = *mp;
+ args.oif = ifp;
+ args.next_hop = NULL;
+ args.eh = &eh2;
+ args.inp = NULL; /* used by ipfw uid/gid/jail rules */
+ i = V_ip_fw_chk_ptr(&args);
+ *mp = args.m;
+
+ if (*mp == NULL)
+ return (error);
+
+ if (ip_dn_io_ptr && (i == IP_FW_DUMMYNET)) {
+
+ /* put the Ethernet header back on */
+ M_PREPEND(*mp, ETHER_HDR_LEN, M_DONTWAIT);
+ if (*mp == NULL)
+ return (error);
+ bcopy(&eh2, mtod(*mp, caddr_t), ETHER_HDR_LEN);
+
+ /*
+ * Pass the pkt to dummynet, which consumes it. The
+ * packet will return to us via bridge_dummynet().
+ */
+ args.oif = ifp;
+ ip_dn_io_ptr(mp, DIR_FWD | PROTO_IFB, &args);
+ return (error);
+ }
+
+ if (i != IP_FW_PASS) /* drop */
+ goto bad;
+ }
+
+ipfwpass:
+ error = 0;
+
+ /*
+ * Run the packet through pfil
+ */
+ switch (ether_type) {
+ case ETHERTYPE_IP:
+ /*
+ * before calling the firewall, swap fields the same as
+ * IP does. here we assume the header is contiguous
+ */
+ ip = mtod(*mp, struct ip *);
+
+ ip->ip_len = ntohs(ip->ip_len);
+ ip->ip_off = ntohs(ip->ip_off);
+
+ /*
+ * Run pfil on the member interface and the bridge, both can
+ * be skipped by clearing pfil_member or pfil_bridge.
+ *
+ * Keep the order:
+ * in_if -> bridge_if -> out_if
+ */
+ if (pfil_bridge && dir == PFIL_OUT && bifp != NULL)
+ error = pfil_run_hooks(&V_inet_pfil_hook, mp, bifp,
+ dir, NULL);
+
+ if (*mp == NULL || error != 0) /* filter may consume */
+ break;
+
+ if (pfil_member && ifp != NULL)
+ error = pfil_run_hooks(&V_inet_pfil_hook, mp, ifp,
+ dir, NULL);
+
+ if (*mp == NULL || error != 0) /* filter may consume */
+ break;
+
+ if (pfil_bridge && dir == PFIL_IN && bifp != NULL)
+ error = pfil_run_hooks(&V_inet_pfil_hook, mp, bifp,
+ dir, NULL);
+
+ if (*mp == NULL || error != 0) /* filter may consume */
+ break;
+
+ /* check if we need to fragment the packet */
+ if (pfil_member && ifp != NULL && dir == PFIL_OUT) {
+ i = (*mp)->m_pkthdr.len;
+ if (i > ifp->if_mtu) {
+ error = bridge_fragment(ifp, *mp, &eh2, snap,
+ &llc1);
+ return (error);
+ }
+ }
+
+ /* Recalculate the ip checksum and restore byte ordering */
+ ip = mtod(*mp, struct ip *);
+ hlen = ip->ip_hl << 2;
+ if (hlen < sizeof(struct ip))
+ goto bad;
+ if (hlen > (*mp)->m_len) {
+ if ((*mp = m_pullup(*mp, hlen)) == 0)
+ goto bad;
+ ip = mtod(*mp, struct ip *);
+ if (ip == NULL)
+ goto bad;
+ }
+ ip->ip_len = htons(ip->ip_len);
+ ip->ip_off = htons(ip->ip_off);
+ ip->ip_sum = 0;
+ if (hlen == sizeof(struct ip))
+ ip->ip_sum = in_cksum_hdr(ip);
+ else
+ ip->ip_sum = in_cksum(*mp, hlen);
+
+ break;
+#ifdef INET6
+ case ETHERTYPE_IPV6:
+ if (pfil_bridge && dir == PFIL_OUT && bifp != NULL)
+ error = pfil_run_hooks(&V_inet6_pfil_hook, mp, bifp,
+ dir, NULL);
+
+ if (*mp == NULL || error != 0) /* filter may consume */
+ break;
+
+ if (pfil_member && ifp != NULL)
+ error = pfil_run_hooks(&V_inet6_pfil_hook, mp, ifp,
+ dir, NULL);
+
+ if (*mp == NULL || error != 0) /* filter may consume */
+ break;
+
+ if (pfil_bridge && dir == PFIL_IN && bifp != NULL)
+ error = pfil_run_hooks(&V_inet6_pfil_hook, mp, bifp,
+ dir, NULL);
+ break;
+#endif
+ default:
+ error = 0;
+ break;
+ }
+
+ if (*mp == NULL)
+ return (error);
+ if (error != 0)
+ goto bad;
+
+ error = -1;
+
+ /*
+ * Finally, put everything back the way it was and return
+ */
+ if (snap) {
+ M_PREPEND(*mp, sizeof(struct llc), M_DONTWAIT);
+ if (*mp == NULL)
+ return (error);
+ bcopy(&llc1, mtod(*mp, caddr_t), sizeof(struct llc));
+ }
+
+ M_PREPEND(*mp, ETHER_HDR_LEN, M_DONTWAIT);
+ if (*mp == NULL)
+ return (error);
+ bcopy(&eh2, mtod(*mp, caddr_t), ETHER_HDR_LEN);
+
+ return (0);
+
+bad:
+ m_freem(*mp);
+ *mp = NULL;
+ return (error);
+}
+
+/*
+ * Perform basic checks on header size since
+ * pfil assumes ip_input has already processed
+ * it for it. Cut-and-pasted from ip_input.c.
+ * Given how simple the IPv6 version is,
+ * does the IPv4 version really need to be
+ * this complicated?
+ *
+ * XXX Should we update ipstat here, or not?
+ * XXX Right now we update ipstat but not
+ * XXX csum_counter.
+ */
+static int
+bridge_ip_checkbasic(struct mbuf **mp)
+{
+ struct mbuf *m = *mp;
+ struct ip *ip;
+ int len, hlen;
+ u_short sum;
+
+ if (*mp == NULL)
+ return (-1);
+
+ if (IP_HDR_ALIGNED_P(mtod(m, caddr_t)) == 0) {
+ if ((m = m_copyup(m, sizeof(struct ip),
+ (max_linkhdr + 3) & ~3)) == NULL) {
+ /* XXXJRT new stat, please */
+ KMOD_IPSTAT_INC(ips_toosmall);
+ goto bad;
+ }
+ } else if (__predict_false(m->m_len < sizeof (struct ip))) {
+ if ((m = m_pullup(m, sizeof (struct ip))) == NULL) {
+ KMOD_IPSTAT_INC(ips_toosmall);
+ goto bad;
+ }
+ }
+ ip = mtod(m, struct ip *);
+ if (ip == NULL) goto bad;
+
+ if (ip->ip_v != IPVERSION) {
+ KMOD_IPSTAT_INC(ips_badvers);
+ goto bad;
+ }
+ hlen = ip->ip_hl << 2;
+ if (hlen < sizeof(struct ip)) { /* minimum header length */
+ KMOD_IPSTAT_INC(ips_badhlen);
+ goto bad;
+ }
+ if (hlen > m->m_len) {
+ if ((m = m_pullup(m, hlen)) == 0) {
+ KMOD_IPSTAT_INC(ips_badhlen);
+ goto bad;
+ }
+ ip = mtod(m, struct ip *);
+ if (ip == NULL) goto bad;
+ }
+
+ if (m->m_pkthdr.csum_flags & CSUM_IP_CHECKED) {
+ sum = !(m->m_pkthdr.csum_flags & CSUM_IP_VALID);
+ } else {
+ if (hlen == sizeof(struct ip)) {
+ sum = in_cksum_hdr(ip);
+ } else {
+ sum = in_cksum(m, hlen);
+ }
+ }
+ if (sum) {
+ KMOD_IPSTAT_INC(ips_badsum);
+ goto bad;
+ }
+
+ /* Retrieve the packet length. */
+ len = ntohs(ip->ip_len);
+
+ /*
+ * Check for additional length bogosity
+ */
+ if (len < hlen) {
+ KMOD_IPSTAT_INC(ips_badlen);
+ goto bad;
+ }
+
+ /*
+ * Check that the amount of data in the buffers
+ * is as at least much as the IP header would have us expect.
+ * Drop packet if shorter than we expect.
+ */
+ if (m->m_pkthdr.len < len) {
+ KMOD_IPSTAT_INC(ips_tooshort);
+ goto bad;
+ }
+
+ /* Checks out, proceed */
+ *mp = m;
+ return (0);
+
+bad:
+ *mp = m;
+ return (-1);
+}
+
+#ifdef INET6
+/*
+ * Same as above, but for IPv6.
+ * Cut-and-pasted from ip6_input.c.
+ * XXX Should we update ip6stat, or not?
+ */
+static int
+bridge_ip6_checkbasic(struct mbuf **mp)
+{
+ struct mbuf *m = *mp;
+ struct ip6_hdr *ip6;
+
+ /*
+ * If the IPv6 header is not aligned, slurp it up into a new
+ * mbuf with space for link headers, in the event we forward
+ * it. Otherwise, if it is aligned, make sure the entire base
+ * IPv6 header is in the first mbuf of the chain.
+ */
+ if (IP6_HDR_ALIGNED_P(mtod(m, caddr_t)) == 0) {
+ struct ifnet *inifp = m->m_pkthdr.rcvif;
+ if ((m = m_copyup(m, sizeof(struct ip6_hdr),
+ (max_linkhdr + 3) & ~3)) == NULL) {
+ /* XXXJRT new stat, please */
+ V_ip6stat.ip6s_toosmall++;
+ in6_ifstat_inc(inifp, ifs6_in_hdrerr);
+ goto bad;
+ }
+ } else if (__predict_false(m->m_len < sizeof(struct ip6_hdr))) {
+ struct ifnet *inifp = m->m_pkthdr.rcvif;
+ if ((m = m_pullup(m, sizeof(struct ip6_hdr))) == NULL) {
+ V_ip6stat.ip6s_toosmall++;
+ in6_ifstat_inc(inifp, ifs6_in_hdrerr);
+ goto bad;
+ }
+ }
+
+ ip6 = mtod(m, struct ip6_hdr *);
+
+ if ((ip6->ip6_vfc & IPV6_VERSION_MASK) != IPV6_VERSION) {
+ V_ip6stat.ip6s_badvers++;
+ in6_ifstat_inc(m->m_pkthdr.rcvif, ifs6_in_hdrerr);
+ goto bad;
+ }
+
+ /* Checks out, proceed */
+ *mp = m;
+ return (0);
+
+bad:
+ *mp = m;
+ return (-1);
+}
+#endif /* INET6 */
+
+/*
+ * bridge_fragment:
+ *
+ * Return a fragmented mbuf chain.
+ */
+static int
+bridge_fragment(struct ifnet *ifp, struct mbuf *m, struct ether_header *eh,
+ int snap, struct llc *llc)
+{
+ struct mbuf *m0;
+ struct ip *ip;
+ int error = -1;
+
+ if (m->m_len < sizeof(struct ip) &&
+ (m = m_pullup(m, sizeof(struct ip))) == NULL)
+ goto out;
+ ip = mtod(m, struct ip *);
+
+ error = ip_fragment(ip, &m, ifp->if_mtu, ifp->if_hwassist,
+ CSUM_DELAY_IP);
+ if (error)
+ goto out;
+
+ /* walk the chain and re-add the Ethernet header */
+ for (m0 = m; m0; m0 = m0->m_nextpkt) {
+ if (error == 0) {
+ if (snap) {
+ M_PREPEND(m0, sizeof(struct llc), M_DONTWAIT);
+ if (m0 == NULL) {
+ error = ENOBUFS;
+ continue;
+ }
+ bcopy(llc, mtod(m0, caddr_t),
+ sizeof(struct llc));
+ }
+ M_PREPEND(m0, ETHER_HDR_LEN, M_DONTWAIT);
+ if (m0 == NULL) {
+ error = ENOBUFS;
+ continue;
+ }
+ bcopy(eh, mtod(m0, caddr_t), ETHER_HDR_LEN);
+ } else
+ m_freem(m);
+ }
+
+ if (error == 0)
+ KMOD_IPSTAT_INC(ips_fragmented);
+
+ return (error);
+
+out:
+ if (m != NULL)
+ m_freem(m);
+ return (error);
+}
diff --git a/rtems/freebsd/net/if_bridgevar.h b/rtems/freebsd/net/if_bridgevar.h
new file mode 100644
index 00000000..f3473212
--- /dev/null
+++ b/rtems/freebsd/net/if_bridgevar.h
@@ -0,0 +1,328 @@
+/* $NetBSD: if_bridgevar.h,v 1.4 2003/07/08 07:13:50 itojun Exp $ */
+
+/*
+ * Copyright 2001 Wasabi Systems, Inc.
+ * All rights reserved.
+ *
+ * Written by Jason R. Thorpe for Wasabi Systems, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed for the NetBSD Project by
+ * Wasabi Systems, Inc.
+ * 4. The name of Wasabi Systems, Inc. may not be used to endorse
+ * or promote products derived from this software without specific prior
+ * written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * Copyright (c) 1999, 2000 Jason L. Wright (jason@thought.net)
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by Jason L. Wright
+ * 4. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
+ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+ * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ * OpenBSD: if_bridge.h,v 1.14 2001/03/22 03:48:29 jason Exp
+ *
+ * $FreeBSD$
+ */
+
+/*
+ * Data structure and control definitions for bridge interfaces.
+ */
+
+#include <rtems/freebsd/sys/callout.h>
+#include <rtems/freebsd/sys/queue.h>
+#include <rtems/freebsd/sys/condvar.h>
+
+/*
+ * Commands used in the SIOCSDRVSPEC ioctl. Note the lookup of the
+ * bridge interface itself is keyed off the ifdrv structure.
+ */
+#define BRDGADD 0 /* add bridge member (ifbreq) */
+#define BRDGDEL 1 /* delete bridge member (ifbreq) */
+#define BRDGGIFFLGS 2 /* get member if flags (ifbreq) */
+#define BRDGSIFFLGS 3 /* set member if flags (ifbreq) */
+#define BRDGSCACHE 4 /* set cache size (ifbrparam) */
+#define BRDGGCACHE 5 /* get cache size (ifbrparam) */
+#define BRDGGIFS 6 /* get member list (ifbifconf) */
+#define BRDGRTS 7 /* get address list (ifbaconf) */
+#define BRDGSADDR 8 /* set static address (ifbareq) */
+#define BRDGSTO 9 /* set cache timeout (ifbrparam) */
+#define BRDGGTO 10 /* get cache timeout (ifbrparam) */
+#define BRDGDADDR 11 /* delete address (ifbareq) */
+#define BRDGFLUSH 12 /* flush address cache (ifbreq) */
+
+#define BRDGGPRI 13 /* get priority (ifbrparam) */
+#define BRDGSPRI 14 /* set priority (ifbrparam) */
+#define BRDGGHT 15 /* get hello time (ifbrparam) */
+#define BRDGSHT 16 /* set hello time (ifbrparam) */
+#define BRDGGFD 17 /* get forward delay (ifbrparam) */
+#define BRDGSFD 18 /* set forward delay (ifbrparam) */
+#define BRDGGMA 19 /* get max age (ifbrparam) */
+#define BRDGSMA 20 /* set max age (ifbrparam) */
+#define BRDGSIFPRIO 21 /* set if priority (ifbreq) */
+#define BRDGSIFCOST 22 /* set if path cost (ifbreq) */
+#define BRDGADDS 23 /* add bridge span member (ifbreq) */
+#define BRDGDELS 24 /* delete bridge span member (ifbreq) */
+#define BRDGPARAM 25 /* get bridge STP params (ifbropreq) */
+#define BRDGGRTE 26 /* get cache drops (ifbrparam) */
+#define BRDGGIFSSTP 27 /* get member STP params list
+ * (ifbpstpconf) */
+#define BRDGSPROTO 28 /* set protocol (ifbrparam) */
+#define BRDGSTXHC 29 /* set tx hold count (ifbrparam) */
+#define BRDGSIFAMAX 30 /* set max interface addrs (ifbreq) */
+
+/*
+ * Generic bridge control request.
+ */
+struct ifbreq {
+ char ifbr_ifsname[IFNAMSIZ]; /* member if name */
+ uint32_t ifbr_ifsflags; /* member if flags */
+ uint32_t ifbr_stpflags; /* member if STP flags */
+ uint32_t ifbr_path_cost; /* member if STP cost */
+ uint8_t ifbr_portno; /* member if port number */
+ uint8_t ifbr_priority; /* member if STP priority */
+ uint8_t ifbr_proto; /* member if STP protocol */
+ uint8_t ifbr_role; /* member if STP role */
+ uint8_t ifbr_state; /* member if STP state */
+ uint32_t ifbr_addrcnt; /* member if addr number */
+ uint32_t ifbr_addrmax; /* member if addr max */
+ uint32_t ifbr_addrexceeded; /* member if addr violations */
+ uint8_t pad[32];
+};
+
+/* BRDGGIFFLAGS, BRDGSIFFLAGS */
+#define IFBIF_LEARNING 0x0001 /* if can learn */
+#define IFBIF_DISCOVER 0x0002 /* if sends packets w/ unknown dest. */
+#define IFBIF_STP 0x0004 /* if participates in spanning tree */
+#define IFBIF_SPAN 0x0008 /* if is a span port */
+#define IFBIF_STICKY 0x0010 /* if learned addresses stick */
+#define IFBIF_BSTP_EDGE 0x0020 /* member stp edge port */
+#define IFBIF_BSTP_AUTOEDGE 0x0040 /* member stp autoedge enabled */
+#define IFBIF_BSTP_PTP 0x0080 /* member stp point to point */
+#define IFBIF_BSTP_AUTOPTP 0x0100 /* member stp autoptp enabled */
+#define IFBIF_BSTP_ADMEDGE 0x0200 /* member stp admin edge enabled */
+#define IFBIF_BSTP_ADMCOST 0x0400 /* member stp admin path cost */
+#define IFBIF_PRIVATE 0x0800 /* if is a private segment */
+
+#define IFBIFBITS "\020\001LEARNING\002DISCOVER\003STP\004SPAN" \
+ "\005STICKY\014PRIVATE\006EDGE\007AUTOEDGE\010PTP" \
+ "\011AUTOPTP"
+#define IFBIFMASK ~(IFBIF_BSTP_EDGE|IFBIF_BSTP_AUTOEDGE|IFBIF_BSTP_PTP| \
+ IFBIF_BSTP_AUTOPTP|IFBIF_BSTP_ADMEDGE| \
+ IFBIF_BSTP_ADMCOST) /* not saved */
+
+/* BRDGFLUSH */
+#define IFBF_FLUSHDYN 0x00 /* flush learned addresses only */
+#define IFBF_FLUSHALL 0x01 /* flush all addresses */
+
+/*
+ * Interface list structure.
+ */
+struct ifbifconf {
+ uint32_t ifbic_len; /* buffer size */
+ union {
+ caddr_t ifbicu_buf;
+ struct ifbreq *ifbicu_req;
+ } ifbic_ifbicu;
+#define ifbic_buf ifbic_ifbicu.ifbicu_buf
+#define ifbic_req ifbic_ifbicu.ifbicu_req
+};
+
+/*
+ * Bridge address request.
+ */
+struct ifbareq {
+ char ifba_ifsname[IFNAMSIZ]; /* member if name */
+ unsigned long ifba_expire; /* address expire time */
+ uint8_t ifba_flags; /* address flags */
+ uint8_t ifba_dst[ETHER_ADDR_LEN];/* destination address */
+ uint16_t ifba_vlan; /* vlan id */
+};
+
+#define IFBAF_TYPEMASK 0x03 /* address type mask */
+#define IFBAF_DYNAMIC 0x00 /* dynamically learned address */
+#define IFBAF_STATIC 0x01 /* static address */
+#define IFBAF_STICKY 0x02 /* sticky address */
+
+#define IFBAFBITS "\020\1STATIC\2STICKY"
+
+/*
+ * Address list structure.
+ */
+struct ifbaconf {
+ uint32_t ifbac_len; /* buffer size */
+ union {
+ caddr_t ifbacu_buf;
+ struct ifbareq *ifbacu_req;
+ } ifbac_ifbacu;
+#define ifbac_buf ifbac_ifbacu.ifbacu_buf
+#define ifbac_req ifbac_ifbacu.ifbacu_req
+};
+
+/*
+ * Bridge parameter structure.
+ */
+struct ifbrparam {
+ union {
+ uint32_t ifbrpu_int32;
+ uint16_t ifbrpu_int16;
+ uint8_t ifbrpu_int8;
+ } ifbrp_ifbrpu;
+};
+#define ifbrp_csize ifbrp_ifbrpu.ifbrpu_int32 /* cache size */
+#define ifbrp_ctime ifbrp_ifbrpu.ifbrpu_int32 /* cache time (sec) */
+#define ifbrp_prio ifbrp_ifbrpu.ifbrpu_int16 /* bridge priority */
+#define ifbrp_proto ifbrp_ifbrpu.ifbrpu_int8 /* bridge protocol */
+#define ifbrp_txhc ifbrp_ifbrpu.ifbrpu_int8 /* bpdu tx holdcount */
+#define ifbrp_hellotime ifbrp_ifbrpu.ifbrpu_int8 /* hello time (sec) */
+#define ifbrp_fwddelay ifbrp_ifbrpu.ifbrpu_int8 /* fwd time (sec) */
+#define ifbrp_maxage ifbrp_ifbrpu.ifbrpu_int8 /* max age (sec) */
+#define ifbrp_cexceeded ifbrp_ifbrpu.ifbrpu_int32 /* # of cache dropped
+ * adresses */
+/*
+ * Bridge current operational parameters structure.
+ */
+struct ifbropreq {
+ uint8_t ifbop_holdcount;
+ uint8_t ifbop_maxage;
+ uint8_t ifbop_hellotime;
+ uint8_t ifbop_fwddelay;
+ uint8_t ifbop_protocol;
+ uint16_t ifbop_priority;
+ uint16_t ifbop_root_port;
+ uint32_t ifbop_root_path_cost;
+ uint64_t ifbop_bridgeid;
+ uint64_t ifbop_designated_root;
+ uint64_t ifbop_designated_bridge;
+ struct timeval ifbop_last_tc_time;
+};
+
+/*
+ * Bridge member operational STP params structure.
+ */
+struct ifbpstpreq {
+ uint8_t ifbp_portno; /* bp STP port number */
+ uint32_t ifbp_fwd_trans; /* bp STP fwd transitions */
+ uint32_t ifbp_design_cost; /* bp STP designated cost */
+ uint32_t ifbp_design_port; /* bp STP designated port */
+ uint64_t ifbp_design_bridge; /* bp STP designated bridge */
+ uint64_t ifbp_design_root; /* bp STP designated root */
+};
+
+/*
+ * Bridge STP ports list structure.
+ */
+struct ifbpstpconf {
+ uint32_t ifbpstp_len; /* buffer size */
+ union {
+ caddr_t ifbpstpu_buf;
+ struct ifbpstpreq *ifbpstpu_req;
+ } ifbpstp_ifbpstpu;
+#define ifbpstp_buf ifbpstp_ifbpstpu.ifbpstpu_buf
+#define ifbpstp_req ifbpstp_ifbpstpu.ifbpstpu_req
+};
+
+#ifdef _KERNEL
+
+#define BRIDGE_LOCK_INIT(_sc) do { \
+ mtx_init(&(_sc)->sc_mtx, "if_bridge", NULL, MTX_DEF); \
+ cv_init(&(_sc)->sc_cv, "if_bridge_cv"); \
+} while (0)
+#define BRIDGE_LOCK_DESTROY(_sc) do { \
+ mtx_destroy(&(_sc)->sc_mtx); \
+ cv_destroy(&(_sc)->sc_cv); \
+} while (0)
+#define BRIDGE_LOCK(_sc) mtx_lock(&(_sc)->sc_mtx)
+#define BRIDGE_UNLOCK(_sc) mtx_unlock(&(_sc)->sc_mtx)
+#define BRIDGE_LOCK_ASSERT(_sc) mtx_assert(&(_sc)->sc_mtx, MA_OWNED)
+#define BRIDGE_LOCK2REF(_sc, _err) do { \
+ mtx_assert(&(_sc)->sc_mtx, MA_OWNED); \
+ if ((_sc)->sc_iflist_xcnt > 0) \
+ (_err) = EBUSY; \
+ else \
+ (_sc)->sc_iflist_ref++; \
+ mtx_unlock(&(_sc)->sc_mtx); \
+} while (0)
+#define BRIDGE_UNREF(_sc) do { \
+ mtx_lock(&(_sc)->sc_mtx); \
+ (_sc)->sc_iflist_ref--; \
+ if (((_sc)->sc_iflist_xcnt > 0) && ((_sc)->sc_iflist_ref == 0)) \
+ cv_broadcast(&(_sc)->sc_cv); \
+ mtx_unlock(&(_sc)->sc_mtx); \
+} while (0)
+#define BRIDGE_XLOCK(_sc) do { \
+ mtx_assert(&(_sc)->sc_mtx, MA_OWNED); \
+ (_sc)->sc_iflist_xcnt++; \
+ while ((_sc)->sc_iflist_ref > 0) \
+ cv_wait(&(_sc)->sc_cv, &(_sc)->sc_mtx); \
+} while (0)
+#define BRIDGE_XDROP(_sc) do { \
+ mtx_assert(&(_sc)->sc_mtx, MA_OWNED); \
+ (_sc)->sc_iflist_xcnt--; \
+} while (0)
+
+#define BRIDGE_INPUT(_ifp, _m) do { \
+ KASSERT(bridge_input_p != NULL, \
+ ("%s: if_bridge not loaded!", __func__)); \
+ _m = (*bridge_input_p)(_ifp, _m); \
+ if (_m != NULL) \
+ _ifp = _m->m_pkthdr.rcvif; \
+} while (0)
+
+#define BRIDGE_OUTPUT(_ifp, _m, _err) do { \
+ KASSERT(bridge_output_p != NULL, \
+ ("%s: if_bridge not loaded!", __func__)); \
+ _err = (*bridge_output_p)(_ifp, _m, NULL, NULL); \
+} while (0)
+
+extern struct mbuf *(*bridge_input_p)(struct ifnet *, struct mbuf *);
+extern int (*bridge_output_p)(struct ifnet *, struct mbuf *,
+ struct sockaddr *, struct rtentry *);
+extern void (*bridge_dn_p)(struct mbuf *, struct ifnet *);
+
+#endif /* _KERNEL */
diff --git a/rtems/freebsd/net/if_clone.c b/rtems/freebsd/net/if_clone.c
new file mode 100644
index 00000000..19cf2113
--- /dev/null
+++ b/rtems/freebsd/net/if_clone.c
@@ -0,0 +1,617 @@
+#include <rtems/freebsd/machine/rtems-bsd-config.h>
+
+/*-
+ * Copyright (c) 1980, 1986, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * @(#)if.c 8.5 (Berkeley) 1/9/95
+ * $FreeBSD$
+ */
+
+#include <rtems/freebsd/sys/param.h>
+#include <rtems/freebsd/sys/malloc.h>
+#include <rtems/freebsd/sys/limits.h>
+#include <rtems/freebsd/sys/lock.h>
+#include <rtems/freebsd/sys/mutex.h>
+#include <rtems/freebsd/sys/kernel.h>
+#include <rtems/freebsd/sys/systm.h>
+#include <rtems/freebsd/sys/types.h>
+#include <rtems/freebsd/sys/socket.h>
+
+#include <rtems/freebsd/net/if.h>
+#include <rtems/freebsd/net/if_clone.h>
+#if 0
+#include <rtems/freebsd/net/if_dl.h>
+#endif
+#include <rtems/freebsd/net/if_types.h>
+#include <rtems/freebsd/net/if_var.h>
+#include <rtems/freebsd/net/radix.h>
+#include <rtems/freebsd/net/route.h>
+#include <rtems/freebsd/net/vnet.h>
+
+static void if_clone_free(struct if_clone *ifc);
+static int if_clone_createif(struct if_clone *ifc, char *name, size_t len,
+ caddr_t params);
+
+static struct mtx if_cloners_mtx;
+static VNET_DEFINE(int, if_cloners_count);
+VNET_DEFINE(LIST_HEAD(, if_clone), if_cloners);
+
+#define V_if_cloners_count VNET(if_cloners_count)
+#define V_if_cloners VNET(if_cloners)
+
+#define IF_CLONERS_LOCK_INIT() \
+ mtx_init(&if_cloners_mtx, "if_cloners lock", NULL, MTX_DEF)
+#define IF_CLONERS_LOCK_ASSERT() mtx_assert(&if_cloners_mtx, MA_OWNED)
+#define IF_CLONERS_LOCK() mtx_lock(&if_cloners_mtx)
+#define IF_CLONERS_UNLOCK() mtx_unlock(&if_cloners_mtx)
+
+#define IF_CLONE_LOCK_INIT(ifc) \
+ mtx_init(&(ifc)->ifc_mtx, "if_clone lock", NULL, MTX_DEF)
+#define IF_CLONE_LOCK_DESTROY(ifc) mtx_destroy(&(ifc)->ifc_mtx)
+#define IF_CLONE_LOCK_ASSERT(ifc) mtx_assert(&(ifc)->ifc_mtx, MA_OWNED)
+#define IF_CLONE_LOCK(ifc) mtx_lock(&(ifc)->ifc_mtx)
+#define IF_CLONE_UNLOCK(ifc) mtx_unlock(&(ifc)->ifc_mtx)
+
+#define IF_CLONE_ADDREF(ifc) \
+ do { \
+ IF_CLONE_LOCK(ifc); \
+ IF_CLONE_ADDREF_LOCKED(ifc); \
+ IF_CLONE_UNLOCK(ifc); \
+ } while (0)
+#define IF_CLONE_ADDREF_LOCKED(ifc) \
+ do { \
+ IF_CLONE_LOCK_ASSERT(ifc); \
+ KASSERT((ifc)->ifc_refcnt >= 0, \
+ ("negative refcnt %ld", (ifc)->ifc_refcnt)); \
+ (ifc)->ifc_refcnt++; \
+ } while (0)
+#define IF_CLONE_REMREF(ifc) \
+ do { \
+ IF_CLONE_LOCK(ifc); \
+ IF_CLONE_REMREF_LOCKED(ifc); \
+ } while (0)
+#define IF_CLONE_REMREF_LOCKED(ifc) \
+ do { \
+ IF_CLONE_LOCK_ASSERT(ifc); \
+ KASSERT((ifc)->ifc_refcnt > 0, \
+ ("bogus refcnt %ld", (ifc)->ifc_refcnt)); \
+ if (--(ifc)->ifc_refcnt == 0) { \
+ IF_CLONE_UNLOCK(ifc); \
+ if_clone_free(ifc); \
+ } else { \
+ /* silently free the lock */ \
+ IF_CLONE_UNLOCK(ifc); \
+ } \
+ } while (0)
+
+#define IFC_IFLIST_INSERT(_ifc, _ifp) \
+ LIST_INSERT_HEAD(&_ifc->ifc_iflist, _ifp, if_clones)
+#define IFC_IFLIST_REMOVE(_ifc, _ifp) \
+ LIST_REMOVE(_ifp, if_clones)
+
+static MALLOC_DEFINE(M_CLONE, "clone", "interface cloning framework");
+
+void
+vnet_if_clone_init(void)
+{
+
+ LIST_INIT(&V_if_cloners);
+}
+
+void
+if_clone_init(void)
+{
+
+ IF_CLONERS_LOCK_INIT();
+}
+
+/*
+ * Lookup and create a clone network interface.
+ */
+int
+if_clone_create(char *name, size_t len, caddr_t params)
+{
+ struct if_clone *ifc;
+
+ /* Try to find an applicable cloner for this request */
+ IF_CLONERS_LOCK();
+ LIST_FOREACH(ifc, &V_if_cloners, ifc_list) {
+ if (ifc->ifc_match(ifc, name)) {
+ break;
+ }
+ }
+#ifdef VIMAGE
+ if (ifc == NULL && !IS_DEFAULT_VNET(curvnet)) {
+ CURVNET_SET_QUIET(vnet0);
+ LIST_FOREACH(ifc, &V_if_cloners, ifc_list) {
+ if (ifc->ifc_match(ifc, name))
+ break;
+ }
+ CURVNET_RESTORE();
+ }
+#endif
+ IF_CLONERS_UNLOCK();
+
+ if (ifc == NULL)
+ return (EINVAL);
+
+ return (if_clone_createif(ifc, name, len, params));
+}
+
+/*
+ * Create a clone network interface.
+ */
+static int
+if_clone_createif(struct if_clone *ifc, char *name, size_t len, caddr_t params)
+{
+ int err;
+ struct ifnet *ifp;
+
+ if (ifunit(name) != NULL)
+ return (EEXIST);
+
+ err = (*ifc->ifc_create)(ifc, name, len, params);
+
+ if (!err) {
+ ifp = ifunit(name);
+ if (ifp == NULL)
+ panic("%s: lookup failed for %s", __func__, name);
+
+ if_addgroup(ifp, ifc->ifc_name);
+
+ IF_CLONE_LOCK(ifc);
+ IFC_IFLIST_INSERT(ifc, ifp);
+ IF_CLONE_UNLOCK(ifc);
+ }
+
+ return (err);
+}
+
+/*
+ * Lookup and destroy a clone network interface.
+ */
+int
+if_clone_destroy(const char *name)
+{
+ int err;
+ struct if_clone *ifc;
+ struct ifnet *ifp;
+
+ ifp = ifunit_ref(name);
+ if (ifp == NULL)
+ return (ENXIO);
+
+ /* Find the cloner for this interface */
+ IF_CLONERS_LOCK();
+ LIST_FOREACH(ifc, &V_if_cloners, ifc_list) {
+ if (strcmp(ifc->ifc_name, ifp->if_dname) == 0) {
+ break;
+ }
+ }
+#ifdef VIMAGE
+ if (ifc == NULL && !IS_DEFAULT_VNET(curvnet)) {
+ CURVNET_SET_QUIET(vnet0);
+ LIST_FOREACH(ifc, &V_if_cloners, ifc_list) {
+ if (ifc->ifc_match(ifc, name))
+ break;
+ }
+ CURVNET_RESTORE();
+ }
+#endif
+ IF_CLONERS_UNLOCK();
+ if (ifc == NULL) {
+ if_rele(ifp);
+ return (EINVAL);
+ }
+
+ err = if_clone_destroyif(ifc, ifp);
+ if_rele(ifp);
+ return err;
+}
+
+/*
+ * Destroy a clone network interface.
+ */
+int
+if_clone_destroyif(struct if_clone *ifc, struct ifnet *ifp)
+{
+ int err;
+ struct ifnet *ifcifp;
+
+ if (ifc->ifc_destroy == NULL)
+ return(EOPNOTSUPP);
+
+ /*
+ * Given that the cloned ifnet might be attached to a different
+ * vnet from where its cloner was registered, we have to
+ * switch to the vnet context of the target vnet.
+ */
+ CURVNET_SET_QUIET(ifp->if_vnet);
+
+ IF_CLONE_LOCK(ifc);
+ LIST_FOREACH(ifcifp, &ifc->ifc_iflist, if_clones) {
+ if (ifcifp == ifp) {
+ IFC_IFLIST_REMOVE(ifc, ifp);
+ break;
+ }
+ }
+ IF_CLONE_UNLOCK(ifc);
+ if (ifcifp == NULL) {
+ CURVNET_RESTORE();
+ return (ENXIO); /* ifp is not on the list. */
+ }
+
+ if_delgroup(ifp, ifc->ifc_name);
+
+ err = (*ifc->ifc_destroy)(ifc, ifp);
+
+ if (err != 0) {
+ if_addgroup(ifp, ifc->ifc_name);
+
+ IF_CLONE_LOCK(ifc);
+ IFC_IFLIST_INSERT(ifc, ifp);
+ IF_CLONE_UNLOCK(ifc);
+ }
+ CURVNET_RESTORE();
+ return (err);
+}
+
+/*
+ * Register a network interface cloner.
+ */
+void
+if_clone_attach(struct if_clone *ifc)
+{
+ int len, maxclone;
+
+ /*
+ * Compute bitmap size and allocate it.
+ */
+ maxclone = ifc->ifc_maxunit + 1;
+ len = maxclone >> 3;
+ if ((len << 3) < maxclone)
+ len++;
+ ifc->ifc_units = malloc(len, M_CLONE, M_WAITOK | M_ZERO);
+ ifc->ifc_bmlen = len;
+ IF_CLONE_LOCK_INIT(ifc);
+ IF_CLONE_ADDREF(ifc);
+
+ IF_CLONERS_LOCK();
+ LIST_INSERT_HEAD(&V_if_cloners, ifc, ifc_list);
+ V_if_cloners_count++;
+ IF_CLONERS_UNLOCK();
+
+ LIST_INIT(&ifc->ifc_iflist);
+
+ if (ifc->ifc_attach != NULL)
+ (*ifc->ifc_attach)(ifc);
+ EVENTHANDLER_INVOKE(if_clone_event, ifc);
+}
+
+/*
+ * Unregister a network interface cloner.
+ */
+void
+if_clone_detach(struct if_clone *ifc)
+{
+ struct ifc_simple_data *ifcs = ifc->ifc_data;
+
+ IF_CLONERS_LOCK();
+ LIST_REMOVE(ifc, ifc_list);
+ V_if_cloners_count--;
+ IF_CLONERS_UNLOCK();
+
+ /* Allow all simples to be destroyed */
+ if (ifc->ifc_attach == ifc_simple_attach)
+ ifcs->ifcs_minifs = 0;
+
+ /* destroy all interfaces for this cloner */
+ while (!LIST_EMPTY(&ifc->ifc_iflist))
+ if_clone_destroyif(ifc, LIST_FIRST(&ifc->ifc_iflist));
+
+ IF_CLONE_REMREF(ifc);
+}
+
+static void
+if_clone_free(struct if_clone *ifc)
+{
+ for (int bytoff = 0; bytoff < ifc->ifc_bmlen; bytoff++) {
+ KASSERT(ifc->ifc_units[bytoff] == 0x00,
+ ("ifc_units[%d] is not empty", bytoff));
+ }
+
+ KASSERT(LIST_EMPTY(&ifc->ifc_iflist),
+ ("%s: ifc_iflist not empty", __func__));
+
+ IF_CLONE_LOCK_DESTROY(ifc);
+ free(ifc->ifc_units, M_CLONE);
+}
+
+/*
+ * Provide list of interface cloners to userspace.
+ */
+int
+if_clone_list(struct if_clonereq *ifcr)
+{
+ char *buf, *dst, *outbuf = NULL;
+ struct if_clone *ifc;
+ int buf_count, count, err = 0;
+
+ if (ifcr->ifcr_count < 0)
+ return (EINVAL);
+
+ IF_CLONERS_LOCK();
+ /*
+ * Set our internal output buffer size. We could end up not
+ * reporting a cloner that is added between the unlock and lock
+ * below, but that's not a major problem. Not caping our
+ * allocation to the number of cloners actually in the system
+ * could be because that would let arbitrary users cause us to
+ * allocate abritrary amounts of kernel memory.
+ */
+ buf_count = (V_if_cloners_count < ifcr->ifcr_count) ?
+ V_if_cloners_count : ifcr->ifcr_count;
+ IF_CLONERS_UNLOCK();
+
+ outbuf = malloc(IFNAMSIZ*buf_count, M_CLONE, M_WAITOK | M_ZERO);
+
+ IF_CLONERS_LOCK();
+
+ ifcr->ifcr_total = V_if_cloners_count;
+ if ((dst = ifcr->ifcr_buffer) == NULL) {
+ /* Just asking how many there are. */
+ goto done;
+ }
+ count = (V_if_cloners_count < buf_count) ?
+ V_if_cloners_count : buf_count;
+
+ for (ifc = LIST_FIRST(&V_if_cloners), buf = outbuf;
+ ifc != NULL && count != 0;
+ ifc = LIST_NEXT(ifc, ifc_list), count--, buf += IFNAMSIZ) {
+ strlcpy(buf, ifc->ifc_name, IFNAMSIZ);
+ }
+
+done:
+ IF_CLONERS_UNLOCK();
+ if (err == 0)
+ err = copyout(outbuf, dst, buf_count*IFNAMSIZ);
+ if (outbuf != NULL)
+ free(outbuf, M_CLONE);
+ return (err);
+}
+
+/*
+ * A utility function to extract unit numbers from interface names of
+ * the form name###.
+ *
+ * Returns 0 on success and an error on failure.
+ */
+int
+ifc_name2unit(const char *name, int *unit)
+{
+ const char *cp;
+ int cutoff = INT_MAX / 10;
+ int cutlim = INT_MAX % 10;
+
+ for (cp = name; *cp != '\0' && (*cp < '0' || *cp > '9'); cp++);
+ if (*cp == '\0') {
+ *unit = -1;
+ } else if (cp[0] == '0' && cp[1] != '\0') {
+ /* Disallow leading zeroes. */
+ return (EINVAL);
+ } else {
+ for (*unit = 0; *cp != '\0'; cp++) {
+ if (*cp < '0' || *cp > '9') {
+ /* Bogus unit number. */
+ return (EINVAL);
+ }
+ if (*unit > cutoff ||
+ (*unit == cutoff && *cp - '0' > cutlim))
+ return (EINVAL);
+ *unit = (*unit * 10) + (*cp - '0');
+ }
+ }
+
+ return (0);
+}
+
+int
+ifc_alloc_unit(struct if_clone *ifc, int *unit)
+{
+ int wildcard, bytoff, bitoff;
+ int err = 0;
+
+ IF_CLONE_LOCK(ifc);
+
+ bytoff = bitoff = 0;
+ wildcard = (*unit < 0);
+ /*
+ * Find a free unit if none was given.
+ */
+ if (wildcard) {
+ while ((bytoff < ifc->ifc_bmlen)
+ && (ifc->ifc_units[bytoff] == 0xff))
+ bytoff++;
+ if (bytoff >= ifc->ifc_bmlen) {
+ err = ENOSPC;
+ goto done;
+ }
+ while ((ifc->ifc_units[bytoff] & (1 << bitoff)) != 0)
+ bitoff++;
+ *unit = (bytoff << 3) + bitoff;
+ }
+
+ if (*unit > ifc->ifc_maxunit) {
+ err = ENOSPC;
+ goto done;
+ }
+
+ if (!wildcard) {
+ bytoff = *unit >> 3;
+ bitoff = *unit - (bytoff << 3);
+ }
+
+ if((ifc->ifc_units[bytoff] & (1 << bitoff)) != 0) {
+ err = EEXIST;
+ goto done;
+ }
+ /*
+ * Allocate the unit in the bitmap.
+ */
+ KASSERT((ifc->ifc_units[bytoff] & (1 << bitoff)) == 0,
+ ("%s: bit is already set", __func__));
+ ifc->ifc_units[bytoff] |= (1 << bitoff);
+ IF_CLONE_ADDREF_LOCKED(ifc);
+
+done:
+ IF_CLONE_UNLOCK(ifc);
+ return (err);
+}
+
+void
+ifc_free_unit(struct if_clone *ifc, int unit)
+{
+ int bytoff, bitoff;
+
+
+ /*
+ * Compute offset in the bitmap and deallocate the unit.
+ */
+ bytoff = unit >> 3;
+ bitoff = unit - (bytoff << 3);
+
+ IF_CLONE_LOCK(ifc);
+ KASSERT((ifc->ifc_units[bytoff] & (1 << bitoff)) != 0,
+ ("%s: bit is already cleared", __func__));
+ ifc->ifc_units[bytoff] &= ~(1 << bitoff);
+ IF_CLONE_REMREF_LOCKED(ifc); /* releases lock */
+}
+
+void
+ifc_simple_attach(struct if_clone *ifc)
+{
+ int err;
+ int unit;
+ char name[IFNAMSIZ];
+ struct ifc_simple_data *ifcs = ifc->ifc_data;
+
+ KASSERT(ifcs->ifcs_minifs - 1 <= ifc->ifc_maxunit,
+ ("%s: %s requested more units than allowed (%d > %d)",
+ __func__, ifc->ifc_name, ifcs->ifcs_minifs,
+ ifc->ifc_maxunit + 1));
+
+ for (unit = 0; unit < ifcs->ifcs_minifs; unit++) {
+ snprintf(name, IFNAMSIZ, "%s%d", ifc->ifc_name, unit);
+ err = if_clone_createif(ifc, name, IFNAMSIZ, NULL);
+ KASSERT(err == 0,
+ ("%s: failed to create required interface %s",
+ __func__, name));
+ }
+}
+
+int
+ifc_simple_match(struct if_clone *ifc, const char *name)
+{
+ const char *cp;
+ int i;
+
+ /* Match the name */
+ for (cp = name, i = 0; i < strlen(ifc->ifc_name); i++, cp++) {
+ if (ifc->ifc_name[i] != *cp)
+ return (0);
+ }
+
+ /* Make sure there's a unit number or nothing after the name */
+ for (; *cp != '\0'; cp++) {
+ if (*cp < '0' || *cp > '9')
+ return (0);
+ }
+
+ return (1);
+}
+
+int
+ifc_simple_create(struct if_clone *ifc, char *name, size_t len, caddr_t params)
+{
+ char *dp;
+ int wildcard;
+ int unit;
+ int err;
+ struct ifc_simple_data *ifcs = ifc->ifc_data;
+
+ err = ifc_name2unit(name, &unit);
+ if (err != 0)
+ return (err);
+
+ wildcard = (unit < 0);
+
+ err = ifc_alloc_unit(ifc, &unit);
+ if (err != 0)
+ return (err);
+
+ err = ifcs->ifcs_create(ifc, unit, params);
+ if (err != 0) {
+ ifc_free_unit(ifc, unit);
+ return (err);
+ }
+
+ /* In the wildcard case, we need to update the name. */
+ if (wildcard) {
+ for (dp = name; *dp != '\0'; dp++);
+ if (snprintf(dp, len - (dp-name), "%d", unit) >
+ len - (dp-name) - 1) {
+ /*
+ * This can only be a programmer error and
+ * there's no straightforward way to recover if
+ * it happens.
+ */
+ panic("if_clone_create(): interface name too long");
+ }
+
+ }
+
+ return (0);
+}
+
+int
+ifc_simple_destroy(struct if_clone *ifc, struct ifnet *ifp)
+{
+ int unit;
+ struct ifc_simple_data *ifcs = ifc->ifc_data;
+
+ unit = ifp->if_dunit;
+
+ if (unit < ifcs->ifcs_minifs)
+ return (EINVAL);
+
+ ifcs->ifcs_destroy(ifp);
+
+ ifc_free_unit(ifc, unit);
+
+ return (0);
+}
diff --git a/rtems/freebsd/net/if_clone.h b/rtems/freebsd/net/if_clone.h
new file mode 100644
index 00000000..67de320b
--- /dev/null
+++ b/rtems/freebsd/net/if_clone.h
@@ -0,0 +1,116 @@
+/*-
+ * Copyright (c) 1982, 1986, 1989, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * From: @(#)if.h 8.1 (Berkeley) 6/10/93
+ * $FreeBSD$
+ */
+
+#ifndef _NET_IF_CLONE_HH_
+#define _NET_IF_CLONE_HH_
+
+#ifdef _KERNEL
+
+#define IFC_CLONE_INITIALIZER(name, data, maxunit, \
+ attach, match, create, destroy) \
+ { { 0 }, name, maxunit, NULL, 0, data, attach, match, create, destroy }
+
+/*
+ * Structure describing a `cloning' interface.
+ *
+ * List of locks
+ * (c) const until freeing
+ * (d) driver specific data, may need external protection.
+ * (e) locked by if_cloners_mtx
+ * (i) locked by ifc_mtx mtx
+ */
+struct if_clone {
+ LIST_ENTRY(if_clone) ifc_list; /* (e) On list of cloners */
+ const char *ifc_name; /* (c) Name of device, e.g. `gif' */
+ int ifc_maxunit; /* (c) Maximum unit number */
+ unsigned char *ifc_units; /* (i) Bitmap to handle units. */
+ /* Considered private, access */
+ /* via ifc_(alloc|free)_unit(). */
+ int ifc_bmlen; /* (c) Bitmap length. */
+ void *ifc_data; /* (*) Data for ifc_* functions. */
+
+ /* (c) Driver specific cloning functions. Called with no locks held. */
+ void (*ifc_attach)(struct if_clone *);
+ int (*ifc_match)(struct if_clone *, const char *);
+ int (*ifc_create)(struct if_clone *, char *, size_t, caddr_t);
+ int (*ifc_destroy)(struct if_clone *, struct ifnet *);
+
+ long ifc_refcnt; /* (i) Refrence count. */
+ struct mtx ifc_mtx; /* Muted to protect members. */
+ LIST_HEAD(, ifnet) ifc_iflist; /* (i) List of cloned interfaces */
+};
+
+void if_clone_init(void);
+void if_clone_attach(struct if_clone *);
+void if_clone_detach(struct if_clone *);
+void vnet_if_clone_init(void);
+
+int if_clone_create(char *, size_t, caddr_t);
+int if_clone_destroy(const char *);
+int if_clone_destroyif(struct if_clone *, struct ifnet *);
+int if_clone_list(struct if_clonereq *);
+
+int ifc_name2unit(const char *name, int *unit);
+int ifc_alloc_unit(struct if_clone *, int *);
+void ifc_free_unit(struct if_clone *, int);
+
+/*
+ * The ifc_simple functions, structures, and macros implement basic
+ * cloning as in 5.[012].
+ */
+
+struct ifc_simple_data {
+ int ifcs_minifs; /* minimum number of interfaces */
+
+ int (*ifcs_create)(struct if_clone *, int, caddr_t);
+ void (*ifcs_destroy)(struct ifnet *);
+};
+
+/* interface clone event */
+typedef void (*if_clone_event_handler_t)(void *, struct if_clone *);
+EVENTHANDLER_DECLARE(if_clone_event, if_clone_event_handler_t);
+
+#define IFC_SIMPLE_DECLARE(name, minifs) \
+struct ifc_simple_data name##_cloner_data = \
+ {minifs, name##_clone_create, name##_clone_destroy}; \
+struct if_clone name##_cloner = \
+ IFC_CLONE_INITIALIZER(#name, &name##_cloner_data, IF_MAXUNIT, \
+ ifc_simple_attach, ifc_simple_match, ifc_simple_create, ifc_simple_destroy)
+
+void ifc_simple_attach(struct if_clone *);
+int ifc_simple_match(struct if_clone *, const char *);
+int ifc_simple_create(struct if_clone *, char *, size_t, caddr_t);
+int ifc_simple_destroy(struct if_clone *, struct ifnet *);
+
+#endif /* _KERNEL */
+
+#endif /* !_NET_IF_CLONE_HH_ */
diff --git a/rtems/freebsd/net/if_dead.c b/rtems/freebsd/net/if_dead.c
new file mode 100644
index 00000000..d991083d
--- /dev/null
+++ b/rtems/freebsd/net/if_dead.c
@@ -0,0 +1,116 @@
+#include <rtems/freebsd/machine/rtems-bsd-config.h>
+
+/*-
+ * Copyright (c) 2009 Robert N. M. Watson
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+/*
+ * When an interface has been detached but not yet freed, we set the various
+ * ifnet function pointers to "ifdead" versions. This prevents unexpected
+ * calls from the network stack into the device driver after if_detach() has
+ * returned.
+ */
+
+#include <rtems/freebsd/sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <rtems/freebsd/sys/param.h>
+#include <rtems/freebsd/sys/mbuf.h>
+#include <rtems/freebsd/sys/socket.h>
+
+#include <rtems/freebsd/net/if.h>
+#include <rtems/freebsd/net/if_var.h>
+
+static int
+ifdead_output(struct ifnet *ifp, struct mbuf *m, struct sockaddr *sa,
+ struct route *ro)
+{
+
+ m_freem(m);
+ return (ENXIO);
+}
+
+static void
+ifdead_input(struct ifnet *ifp, struct mbuf *m)
+{
+
+ m_freem(m);
+}
+
+static void
+ifdead_start(struct ifnet *ifp)
+{
+
+}
+
+static int
+ifdead_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
+{
+
+ return (ENXIO);
+}
+
+static void
+ifdead_watchdog(struct ifnet *ifp)
+{
+
+}
+
+static int
+ifdead_resolvemulti(struct ifnet *ifp, struct sockaddr **llsa,
+ struct sockaddr *sa)
+{
+
+ *llsa = NULL;
+ return (ENXIO);
+}
+
+static void
+ifdead_qflush(struct ifnet *ifp)
+{
+
+}
+
+static int
+ifdead_transmit(struct ifnet *ifp, struct mbuf *m)
+{
+
+ m_freem(m);
+ return (ENXIO);
+}
+
+void
+if_dead(struct ifnet *ifp)
+{
+
+ ifp->if_output = ifdead_output;
+ ifp->if_input = ifdead_input;
+ ifp->if_start = ifdead_start;
+ ifp->if_ioctl = ifdead_ioctl;
+ ifp->if_watchdog = ifdead_watchdog;
+ ifp->if_resolvemulti = ifdead_resolvemulti;
+ ifp->if_qflush = ifdead_qflush;
+ ifp->if_transmit = ifdead_transmit;
+}
diff --git a/rtems/freebsd/net/if_disc.c b/rtems/freebsd/net/if_disc.c
new file mode 100644
index 00000000..30f419dc
--- /dev/null
+++ b/rtems/freebsd/net/if_disc.c
@@ -0,0 +1,247 @@
+#include <rtems/freebsd/machine/rtems-bsd-config.h>
+
+/*-
+ * Copyright (c) 1982, 1986, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * From: @(#)if_loop.c 8.1 (Berkeley) 6/10/93
+ * $FreeBSD$
+ */
+
+/*
+ * Discard interface driver for protocol testing and timing.
+ * (Based on the loopback.)
+ */
+
+#include <rtems/freebsd/sys/param.h>
+#include <rtems/freebsd/sys/systm.h>
+#include <rtems/freebsd/sys/kernel.h>
+#include <rtems/freebsd/sys/malloc.h>
+#include <rtems/freebsd/sys/module.h>
+#include <rtems/freebsd/sys/mbuf.h>
+#include <rtems/freebsd/sys/socket.h>
+#include <rtems/freebsd/sys/sockio.h>
+
+#include <rtems/freebsd/net/if.h>
+#include <rtems/freebsd/net/if_clone.h>
+#include <rtems/freebsd/net/if_types.h>
+#include <rtems/freebsd/net/route.h>
+#include <rtems/freebsd/net/bpf.h>
+
+#include <rtems/freebsd/local/opt_inet.h>
+#include <rtems/freebsd/local/opt_inet6.h>
+
+#ifdef TINY_DSMTU
+#define DSMTU (1024+512)
+#else
+#define DSMTU 65532
+#endif
+
+#define DISCNAME "disc"
+
+struct disc_softc {
+ struct ifnet *sc_ifp;
+};
+
+static int discoutput(struct ifnet *, struct mbuf *,
+ struct sockaddr *, struct route *);
+static void discrtrequest(int, struct rtentry *, struct rt_addrinfo *);
+static int discioctl(struct ifnet *, u_long, caddr_t);
+static int disc_clone_create(struct if_clone *, int, caddr_t);
+static void disc_clone_destroy(struct ifnet *);
+
+static MALLOC_DEFINE(M_DISC, DISCNAME, "Discard interface");
+
+IFC_SIMPLE_DECLARE(disc, 0);
+
+static int
+disc_clone_create(struct if_clone *ifc, int unit, caddr_t params)
+{
+ struct ifnet *ifp;
+ struct disc_softc *sc;
+
+ sc = malloc(sizeof(struct disc_softc), M_DISC, M_WAITOK | M_ZERO);
+ ifp = sc->sc_ifp = if_alloc(IFT_LOOP);
+ if (ifp == NULL) {
+ free(sc, M_DISC);
+ return (ENOSPC);
+ }
+
+ ifp->if_softc = sc;
+ if_initname(ifp, ifc->ifc_name, unit);
+ ifp->if_mtu = DSMTU;
+ /*
+ * IFF_LOOPBACK should not be removed from disc's flags because
+ * it controls what PF-specific routes are magically added when
+ * a network address is assigned to the interface. Things just
+ * won't work as intended w/o such routes because the output
+ * interface selection for a packet is totally route-driven.
+ * A valid alternative to IFF_LOOPBACK can be IFF_BROADCAST or
+ * IFF_POINTOPOINT, but it would result in different properties
+ * of the interface.
+ */
+ ifp->if_flags = IFF_LOOPBACK | IFF_MULTICAST;
+ ifp->if_drv_flags = IFF_DRV_RUNNING;
+ ifp->if_ioctl = discioctl;
+ ifp->if_output = discoutput;
+ ifp->if_hdrlen = 0;
+ ifp->if_addrlen = 0;
+ ifp->if_snd.ifq_maxlen = 20;
+ if_attach(ifp);
+ bpfattach(ifp, DLT_NULL, sizeof(u_int32_t));
+
+ return (0);
+}
+
+static void
+disc_clone_destroy(struct ifnet *ifp)
+{
+ struct disc_softc *sc;
+
+ sc = ifp->if_softc;
+
+ bpfdetach(ifp);
+ if_detach(ifp);
+ if_free(ifp);
+
+ free(sc, M_DISC);
+}
+
+static int
+disc_modevent(module_t mod, int type, void *data)
+{
+
+ switch (type) {
+ case MOD_LOAD:
+ if_clone_attach(&disc_cloner);
+ break;
+ case MOD_UNLOAD:
+ if_clone_detach(&disc_cloner);
+ break;
+ default:
+ return (EOPNOTSUPP);
+ }
+ return (0);
+}
+
+static moduledata_t disc_mod = {
+ "if_disc",
+ disc_modevent,
+ NULL
+};
+
+DECLARE_MODULE(if_disc, disc_mod, SI_SUB_PSEUDO, SI_ORDER_ANY);
+
+static int
+discoutput(struct ifnet *ifp, struct mbuf *m, struct sockaddr *dst,
+ struct route *ro)
+{
+ u_int32_t af;
+
+ M_ASSERTPKTHDR(m);
+
+ /* BPF writes need to be handled specially. */
+ if (dst->sa_family == AF_UNSPEC) {
+ bcopy(dst->sa_data, &af, sizeof(af));
+ dst->sa_family = af;
+ }
+
+ if (bpf_peers_present(ifp->if_bpf)) {
+ u_int af = dst->sa_family;
+ bpf_mtap2(ifp->if_bpf, &af, sizeof(af), m);
+ }
+ m->m_pkthdr.rcvif = ifp;
+
+ ifp->if_opackets++;
+ ifp->if_obytes += m->m_pkthdr.len;
+
+ m_freem(m);
+ return (0);
+}
+
+/* ARGSUSED */
+static void
+discrtrequest(int cmd, struct rtentry *rt, struct rt_addrinfo *info)
+{
+ RT_LOCK_ASSERT(rt);
+ rt->rt_rmx.rmx_mtu = DSMTU;
+}
+
+/*
+ * Process an ioctl request.
+ */
+static int
+discioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
+{
+ struct ifaddr *ifa;
+ struct ifreq *ifr = (struct ifreq *)data;
+ int error = 0;
+
+ switch (cmd) {
+
+ case SIOCSIFADDR:
+ ifp->if_flags |= IFF_UP;
+ ifa = (struct ifaddr *)data;
+ if (ifa != 0)
+ ifa->ifa_rtrequest = discrtrequest;
+ /*
+ * Everything else is done at a higher level.
+ */
+ break;
+
+ case SIOCADDMULTI:
+ case SIOCDELMULTI:
+ if (ifr == 0) {
+ error = EAFNOSUPPORT; /* XXX */
+ break;
+ }
+ switch (ifr->ifr_addr.sa_family) {
+
+#ifdef INET
+ case AF_INET:
+ break;
+#endif
+#ifdef INET6
+ case AF_INET6:
+ break;
+#endif
+
+ default:
+ error = EAFNOSUPPORT;
+ break;
+ }
+ break;
+
+ case SIOCSIFMTU:
+ ifp->if_mtu = ifr->ifr_mtu;
+ break;
+
+ default:
+ error = EINVAL;
+ }
+ return (error);
+}
diff --git a/rtems/freebsd/net/if_dl.h b/rtems/freebsd/net/if_dl.h
new file mode 100644
index 00000000..425eaf4a
--- /dev/null
+++ b/rtems/freebsd/net/if_dl.h
@@ -0,0 +1,82 @@
+/*-
+ * Copyright (c) 1990, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * @(#)if_dl.h 8.1 (Berkeley) 6/10/93
+ * $FreeBSD$
+ */
+
+#ifndef _NET_IF_DL_HH_
+#define _NET_IF_DL_HH_
+
+/*
+ * A Link-Level Sockaddr may specify the interface in one of two
+ * ways: either by means of a system-provided index number (computed
+ * anew and possibly differently on every reboot), or by a human-readable
+ * string such as "il0" (for managerial convenience).
+ *
+ * Census taking actions, such as something akin to SIOCGCONF would return
+ * both the index and the human name.
+ *
+ * High volume transactions (such as giving a link-level ``from'' address
+ * in a recvfrom or recvmsg call) may be likely only to provide the indexed
+ * form, (which requires fewer copy operations and less space).
+ *
+ * The form and interpretation of the link-level address is purely a matter
+ * of convention between the device driver and its consumers; however, it is
+ * expected that all drivers for an interface of a given if_type will agree.
+ */
+
+/*
+ * Structure of a Link-Level sockaddr:
+ */
+struct sockaddr_dl {
+ u_char sdl_len; /* Total length of sockaddr */
+ u_char sdl_family; /* AF_LINK */
+ u_short sdl_index; /* if != 0, system given index for interface */
+ u_char sdl_type; /* interface type */
+ u_char sdl_nlen; /* interface name length, no trailing 0 reqd. */
+ u_char sdl_alen; /* link level address length */
+ u_char sdl_slen; /* link layer selector length */
+ char sdl_data[46]; /* minimum work area, can be larger;
+ contains both if name and ll address */
+};
+
+#define LLADDR(s) ((caddr_t)((s)->sdl_data + (s)->sdl_nlen))
+
+#ifndef _KERNEL
+
+#include <rtems/freebsd/sys/cdefs.h>
+
+__BEGIN_DECLS
+void link_addr(const char *, struct sockaddr_dl *);
+char *link_ntoa(const struct sockaddr_dl *);
+__END_DECLS
+
+#endif /* !_KERNEL */
+
+#endif
diff --git a/rtems/freebsd/net/if_edsc.c b/rtems/freebsd/net/if_edsc.c
new file mode 100644
index 00000000..d876731a
--- /dev/null
+++ b/rtems/freebsd/net/if_edsc.c
@@ -0,0 +1,356 @@
+#include <rtems/freebsd/machine/rtems-bsd-config.h>
+
+/*-
+ * Copyright (c) 1982, 1986, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following edsclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following edsclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE EDSCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * From: @(#)if_loop.c 8.1 (Berkeley) 6/10/93
+ * $FreeBSD$
+ */
+
+/*
+ * Discard interface driver for protocol testing and timing.
+ * Mimics an Ethernet device so that VLANs can be attached to it etc.
+ */
+
+#include <rtems/freebsd/sys/param.h> /* types, important constants */
+#include <rtems/freebsd/sys/kernel.h> /* SYSINIT for load-time initializations */
+#include <rtems/freebsd/sys/malloc.h> /* malloc(9) */
+#include <rtems/freebsd/sys/module.h> /* module(9) */
+#include <rtems/freebsd/sys/mbuf.h> /* mbuf(9) */
+#include <rtems/freebsd/sys/socket.h> /* struct ifreq */
+#include <rtems/freebsd/sys/sockio.h> /* socket ioctl's */
+/* #include <rtems/freebsd/sys/systm.h> if you need printf(9) or other all-purpose globals */
+
+#include <rtems/freebsd/net/bpf.h> /* bpf(9) */
+#include <rtems/freebsd/net/ethernet.h> /* Ethernet related constants and types */
+#include <rtems/freebsd/net/if.h> /* basic part of ifnet(9) */
+#include <rtems/freebsd/net/if_clone.h> /* network interface cloning */
+#include <rtems/freebsd/net/if_types.h> /* IFT_ETHER and friends */
+#include <rtems/freebsd/net/if_var.h> /* kernel-only part of ifnet(9) */
+
+/*
+ * Software configuration of an interface specific to this device type.
+ */
+struct edsc_softc {
+ struct ifnet *sc_ifp; /* ptr to generic interface configuration */
+
+ /*
+ * A non-null driver can keep various things here, for instance,
+ * the hardware revision, cached values of write-only registers, etc.
+ */
+};
+
+/*
+ * Simple cloning methods.
+ * IFC_SIMPLE_DECLARE() expects precisely these names.
+ */
+static int edsc_clone_create(struct if_clone *, int, caddr_t);
+static void edsc_clone_destroy(struct ifnet *);
+
+/*
+ * Interface driver methods.
+ */
+static void edsc_init(void *dummy);
+/* static void edsc_input(struct ifnet *ifp, struct mbuf *m); would be here */
+static int edsc_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data);
+static void edsc_start(struct ifnet *ifp);
+
+/*
+ * We'll allocate softc instances from this.
+ */
+static MALLOC_DEFINE(M_EDSC, "edsc", "Ethernet discard interface");
+
+/*
+ * Attach to the interface cloning framework under the name of "edsc".
+ * The second argument is the number of units to be created from
+ * the outset. It's also the minimum number of units allowed.
+ * We don't want any units created as soon as the driver is loaded.
+ */
+IFC_SIMPLE_DECLARE(edsc, 0);
+
+/*
+ * Create an interface instance.
+ */
+static int
+edsc_clone_create(struct if_clone *ifc, int unit, caddr_t params)
+{
+ struct edsc_softc *sc;
+ struct ifnet *ifp;
+ static u_char eaddr[ETHER_ADDR_LEN]; /* 0:0:0:0:0:0 */
+
+ /*
+ * Allocate soft and ifnet structures. Link each to the other.
+ */
+ sc = malloc(sizeof(struct edsc_softc), M_EDSC, M_WAITOK | M_ZERO);
+ ifp = sc->sc_ifp = if_alloc(IFT_ETHER);
+ if (ifp == NULL) {
+ free(sc, M_EDSC);
+ return (ENOSPC);
+ }
+
+ ifp->if_softc = sc;
+
+ /*
+ * Get a name for this particular interface in its ifnet structure.
+ */
+ if_initname(ifp, ifc->ifc_name, unit);
+
+ /*
+ * Typical Ethernet interface flags: we can do broadcast and
+ * multicast but can't hear our own broadcasts or multicasts.
+ */
+ ifp->if_flags = IFF_BROADCAST | IFF_MULTICAST | IFF_SIMPLEX;
+
+ /*
+ * We can pretent we have the whole set of hardware features
+ * because we just discard all packets we get from the upper layer.
+ * However, the features are disabled initially. They can be
+ * enabled via edsc_ioctl() when needed.
+ */
+ ifp->if_capabilities =
+ IFCAP_VLAN_MTU | IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_HWCSUM |
+ IFCAP_HWCSUM | IFCAP_TSO |
+ IFCAP_JUMBO_MTU;
+ ifp->if_capenable = 0;
+
+ /*
+ * Set the interface driver methods.
+ */
+ ifp->if_init = edsc_init;
+ /* ifp->if_input = edsc_input; */
+ ifp->if_ioctl = edsc_ioctl;
+ ifp->if_start = edsc_start;
+
+ /*
+ * Set the maximum output queue length from the global parameter.
+ */
+ ifp->if_snd.ifq_maxlen = ifqmaxlen;
+
+ /*
+ * Do ifnet initializations common to all Ethernet drivers
+ * and attach to the network interface framework.
+ * TODO: Pick a non-zero link level address.
+ */
+ ether_ifattach(ifp, eaddr);
+
+ /*
+ * Now we can mark the interface as running, i.e., ready
+ * for operation.
+ */
+ ifp->if_drv_flags |= IFF_DRV_RUNNING;
+
+ return (0);
+}
+
+/*
+ * Destroy an interface instance.
+ */
+static void
+edsc_clone_destroy(struct ifnet *ifp)
+{
+ struct edsc_softc *sc = ifp->if_softc;
+
+ /*
+ * Detach from the network interface framework.
+ */
+ ether_ifdetach(ifp);
+
+ /*
+ * Free memory occupied by ifnet and softc.
+ */
+ if_free(ifp);
+ free(sc, M_EDSC);
+}
+
+/*
+ * This method is invoked from ether_ioctl() when it's time
+ * to bring up the hardware.
+ */
+static void
+edsc_init(void *dummy)
+{
+#if 0 /* what a hardware driver would do here... */
+ struct edsc_soft *sc = (struct edsc_softc *)dummy;
+ struct ifnet *ifp = sc->sc_ifp;
+
+ /* blah-blah-blah */
+#endif
+}
+
+/*
+ * Network interfaces are controlled via the ioctl(2) syscall.
+ */
+static int
+edsc_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
+{
+ struct ifreq *ifr = (struct ifreq *)data;
+
+ switch (cmd) {
+ case SIOCSIFCAP:
+#if 1
+ /*
+ * Just turn on any capabilities requested.
+ * The generic ifioctl() function has already made sure
+ * that they are supported, i.e., set in if_capabilities.
+ */
+ ifp->if_capenable = ifr->ifr_reqcap;
+#else
+ /*
+ * A h/w driver would need to analyze the requested
+ * bits and program the hardware, e.g.:
+ */
+ mask = ifp->if_capenable ^ ifr->ifr_reqcap;
+
+ if (mask & IFCAP_VLAN_HWTAGGING) {
+ ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
+
+ if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING)
+ /* blah-blah-blah */
+ else
+ /* etc-etc-etc */
+ }
+#endif
+ break;
+
+ default:
+ /*
+ * Offload the rest onto the common Ethernet handler.
+ */
+ return (ether_ioctl(ifp, cmd, data));
+ }
+
+ return (0);
+}
+
+/*
+ * Process the output queue.
+ */
+static void
+edsc_start(struct ifnet *ifp)
+{
+ struct mbuf *m;
+
+ /*
+ * A hardware interface driver can set IFF_DRV_OACTIVE
+ * in ifp->if_drv_flags:
+ *
+ * ifp->if_drv_flags |= IFF_DRV_OACTIVE;
+ *
+ * to prevent if_start from being invoked again while the
+ * transmission is under way. The flag is to protect the
+ * device's transmitter, not the method itself. The output
+ * queue is locked and several threads can process it in
+ * parallel safely, so the driver can use other means to
+ * serialize access to the transmitter.
+ *
+ * If using IFF_DRV_OACTIVE, the driver should clear the flag
+ * not earlier than the current transmission is complete, e.g.,
+ * upon an interrupt from the device, not just before returning
+ * from if_start. This method merely starts the transmission,
+ * which may proceed asynchronously.
+ */
+
+ /*
+ * We loop getting packets from the queue until it's empty.
+ * A h/w driver would loop until the device can accept more
+ * data into its buffer, or while there are free transmit
+ * descriptors, or whatever.
+ */
+ for (;;) {
+ /*
+ * Try to dequeue one packet. Stop if the queue is empty.
+ * Use IF_DEQUEUE() here if ALTQ(9) support is unneeded.
+ */
+ IFQ_DEQUEUE(&ifp->if_snd, m);
+ if (m == NULL)
+ break;
+
+ /*
+ * Let bpf(9) at the packet.
+ */
+ BPF_MTAP(ifp, m);
+
+ /*
+ * Update the interface counters.
+ */
+ ifp->if_obytes += m->m_pkthdr.len;
+ ifp->if_opackets++;
+
+ /*
+ * Finally, just drop the packet.
+ * TODO: Reply to ARP requests unless IFF_NOARP is set.
+ */
+ m_freem(m);
+ }
+
+ /*
+ * ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
+ * would be here only if the transmission were synchronous.
+ */
+}
+
+/*
+ * This function provides handlers for module events, namely load and unload.
+ */
+static int
+edsc_modevent(module_t mod, int type, void *data)
+{
+
+ switch (type) {
+ case MOD_LOAD:
+ /*
+ * Connect to the network interface cloning framework.
+ */
+ if_clone_attach(&edsc_cloner);
+ break;
+
+ case MOD_UNLOAD:
+ /*
+ * Disconnect from the cloning framework.
+ * Existing interfaces will be disposed of properly.
+ */
+ if_clone_detach(&edsc_cloner);
+ break;
+
+ default:
+ /*
+ * There are other event types, but we don't handle them.
+ * See module(9).
+ */
+ return (EOPNOTSUPP);
+ }
+ return (0);
+}
+
+static moduledata_t edsc_mod = {
+ "if_edsc", /* name */
+ edsc_modevent, /* event handler */
+ NULL /* additional data */
+};
+
+DECLARE_MODULE(if_edsc, edsc_mod, SI_SUB_PSEUDO, SI_ORDER_ANY);
diff --git a/rtems/freebsd/net/if_ef.c b/rtems/freebsd/net/if_ef.c
new file mode 100644
index 00000000..db215c84
--- /dev/null
+++ b/rtems/freebsd/net/if_ef.c
@@ -0,0 +1,610 @@
+#include <rtems/freebsd/machine/rtems-bsd-config.h>
+
+/*-
+ * Copyright (c) 1999, 2000 Boris Popov
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#include <rtems/freebsd/local/opt_inet.h>
+#include <rtems/freebsd/local/opt_ipx.h>
+#include <rtems/freebsd/local/opt_ef.h>
+
+#include <rtems/freebsd/sys/param.h>
+#include <rtems/freebsd/sys/systm.h>
+#include <rtems/freebsd/sys/sockio.h>
+#include <rtems/freebsd/sys/malloc.h>
+#include <rtems/freebsd/sys/mbuf.h>
+#include <rtems/freebsd/sys/socket.h>
+#include <rtems/freebsd/sys/syslog.h>
+#include <rtems/freebsd/sys/kernel.h>
+#include <rtems/freebsd/sys/module.h>
+
+#include <rtems/freebsd/net/ethernet.h>
+#include <rtems/freebsd/net/if_llc.h>
+#include <rtems/freebsd/net/if.h>
+#include <rtems/freebsd/net/if_arp.h>
+#include <rtems/freebsd/net/if_dl.h>
+#include <rtems/freebsd/net/if_types.h>
+#include <rtems/freebsd/net/netisr.h>
+#include <rtems/freebsd/net/bpf.h>
+#include <rtems/freebsd/net/vnet.h>
+
+#ifdef INET
+#include <rtems/freebsd/netinet/in.h>
+#include <rtems/freebsd/netinet/in_var.h>
+#include <rtems/freebsd/netinet/if_ether.h>
+#endif
+
+#ifdef IPX
+#include <rtems/freebsd/netipx/ipx.h>
+#include <rtems/freebsd/netipx/ipx_if.h>
+#endif
+
+/* If none of the supported layers is enabled explicitly enable them all */
+#if !defined(ETHER_II) && !defined(ETHER_8023) && !defined(ETHER_8022) && \
+ !defined(ETHER_SNAP)
+#define ETHER_II 1
+#define ETHER_8023 1
+#define ETHER_8022 1
+#define ETHER_SNAP 1
+#endif
+
+/* internal frame types */
+#define ETHER_FT_EII 0 /* Ethernet_II - default */
+#define ETHER_FT_8023 1 /* 802.3 (Novell) */
+#define ETHER_FT_8022 2 /* 802.2 */
+#define ETHER_FT_SNAP 3 /* SNAP */
+#define EF_NFT 4 /* total number of frame types */
+
+#ifdef EF_DEBUG
+#define EFDEBUG(format, args...) printf("%s: "format, __func__ ,## args)
+#else
+#define EFDEBUG(format, args...)
+#endif
+
+#define EFERROR(format, args...) printf("%s: "format, __func__ ,## args)
+
+struct efnet {
+ struct ifnet *ef_ifp;
+ struct ifnet *ef_pifp;
+ int ef_frametype;
+};
+
+struct ef_link {
+ SLIST_ENTRY(ef_link) el_next;
+ struct ifnet *el_ifp; /* raw device for this clones */
+ struct efnet *el_units[EF_NFT]; /* our clones */
+};
+
+static SLIST_HEAD(ef_link_head, ef_link) efdev = {NULL};
+static int efcount;
+
+extern int (*ef_inputp)(struct ifnet*, struct ether_header *eh, struct mbuf *m);
+extern int (*ef_outputp)(struct ifnet *ifp, struct mbuf **mp,
+ struct sockaddr *dst, short *tp, int *hlen);
+
+/*
+static void ef_reset (struct ifnet *);
+*/
+static int ef_attach(struct efnet *sc);
+static int ef_detach(struct efnet *sc);
+static void ef_init(void *);
+static int ef_ioctl(struct ifnet *, u_long, caddr_t);
+static void ef_start(struct ifnet *);
+static int ef_input(struct ifnet*, struct ether_header *, struct mbuf *);
+static int ef_output(struct ifnet *ifp, struct mbuf **mp,
+ struct sockaddr *dst, short *tp, int *hlen);
+
+static int ef_load(void);
+static int ef_unload(void);
+
+/*
+ * Install the interface, most of structure initialization done in ef_clone()
+ */
+static int
+ef_attach(struct efnet *sc)
+{
+ struct ifnet *ifp = sc->ef_ifp;
+
+ ifp->if_start = ef_start;
+ ifp->if_init = ef_init;
+ ifp->if_snd.ifq_maxlen = ifqmaxlen;
+ ifp->if_flags = (IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST);
+ /*
+ * Attach the interface
+ */
+ ether_ifattach(ifp, IF_LLADDR(sc->ef_pifp));
+
+ ifp->if_resolvemulti = 0;
+ ifp->if_type = IFT_XETHER;
+ ifp->if_drv_flags |= IFF_DRV_RUNNING;
+
+ EFDEBUG("%s: attached\n", ifp->if_xname);
+ return 1;
+}
+
+/*
+ * This is for _testing_only_, just removes interface from interfaces list
+ */
+static int
+ef_detach(struct efnet *sc)
+{
+ struct ifnet *ifp = sc->ef_ifp;
+ int s;
+
+ s = splimp();
+
+ ether_ifdetach(ifp);
+ if_free(ifp);
+
+ splx(s);
+ return 0;
+}
+
+static void
+ef_init(void *foo) {
+ return;
+}
+
+static int
+ef_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
+{
+ struct efnet *sc = ifp->if_softc;
+ struct ifaddr *ifa = (struct ifaddr*)data;
+ int s, error;
+
+ EFDEBUG("IOCTL %ld for %s\n", cmd, ifp->if_xname);
+ error = 0;
+ s = splimp();
+ switch (cmd) {
+ case SIOCSIFFLAGS:
+ error = 0;
+ break;
+ case SIOCSIFADDR:
+ if (sc->ef_frametype == ETHER_FT_8023 &&
+ ifa->ifa_addr->sa_family != AF_IPX) {
+ error = EAFNOSUPPORT;
+ break;
+ }
+ ifp->if_flags |= IFF_UP;
+ /* FALL THROUGH */
+ default:
+ error = ether_ioctl(ifp, cmd, data);
+ break;
+ }
+ splx(s);
+ return error;
+}
+
+/*
+ * Currently packet prepared in the ether_output(), but this can be a better
+ * place.
+ */
+static void
+ef_start(struct ifnet *ifp)
+{
+ struct efnet *sc = (struct efnet*)ifp->if_softc;
+ struct ifnet *p;
+ struct mbuf *m;
+ int error;
+
+ ifp->if_drv_flags |= IFF_DRV_OACTIVE;
+ p = sc->ef_pifp;
+
+ EFDEBUG("\n");
+ for (;;) {
+ IF_DEQUEUE(&ifp->if_snd, m);
+ if (m == 0)
+ break;
+ BPF_MTAP(ifp, m);
+ error = p->if_transmit(p, m);
+ if (error) {
+ ifp->if_oerrors++;
+ continue;
+ }
+ ifp->if_opackets++;
+ }
+ ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
+ return;
+}
+
+/*
+ * Inline functions do not put additional overhead to procedure call or
+ * parameter passing but simplify the code
+ */
+static int __inline
+ef_inputEII(struct mbuf *m, struct ether_header *eh, u_short ether_type)
+{
+ int isr;
+
+ switch(ether_type) {
+#ifdef IPX
+ case ETHERTYPE_IPX:
+ isr = NETISR_IPX;
+ break;
+#endif
+#ifdef INET
+ case ETHERTYPE_IP:
+ if ((m = ip_fastforward(m)) == NULL)
+ return (0);
+ isr = NETISR_IP;
+ break;
+
+ case ETHERTYPE_ARP:
+ isr = NETISR_ARP;
+ break;
+#endif
+ default:
+ return (EPROTONOSUPPORT);
+ }
+ netisr_dispatch(isr, m);
+ return (0);
+}
+
+static int __inline
+ef_inputSNAP(struct mbuf *m, struct ether_header *eh, struct llc* l,
+ u_short ether_type)
+{
+ int isr;
+
+ switch(ether_type) {
+#ifdef IPX
+ case ETHERTYPE_IPX:
+ m_adj(m, 8);
+ isr = NETISR_IPX;
+ break;
+#endif
+ default:
+ return (EPROTONOSUPPORT);
+ }
+ netisr_dispatch(isr, m);
+ return (0);
+}
+
+static int __inline
+ef_input8022(struct mbuf *m, struct ether_header *eh, struct llc* l,
+ u_short ether_type)
+{
+ int isr;
+
+ switch(ether_type) {
+#ifdef IPX
+ case 0xe0:
+ m_adj(m, 3);
+ isr = NETISR_IPX;
+ break;
+#endif
+ default:
+ return (EPROTONOSUPPORT);
+ }
+ netisr_dispatch(isr, m);
+ return (0);
+}
+
+/*
+ * Called from ether_input()
+ */
+static int
+ef_input(struct ifnet *ifp, struct ether_header *eh, struct mbuf *m)
+{
+ u_short ether_type;
+ int ft = -1;
+ struct efnet *efp;
+ struct ifnet *eifp;
+ struct llc *l;
+ struct ef_link *efl;
+ int isr;
+
+ ether_type = ntohs(eh->ether_type);
+ l = NULL;
+ if (ether_type < ETHERMTU) {
+ l = mtod(m, struct llc*);
+ if (l->llc_dsap == 0xff && l->llc_ssap == 0xff) {
+ /*
+ * Novell's "802.3" frame
+ */
+ ft = ETHER_FT_8023;
+ } else if (l->llc_dsap == 0xaa && l->llc_ssap == 0xaa) {
+ /*
+ * 802.2/SNAP
+ */
+ ft = ETHER_FT_SNAP;
+ ether_type = ntohs(l->llc_un.type_snap.ether_type);
+ } else if (l->llc_dsap == l->llc_ssap) {
+ /*
+ * 802.3/802.2
+ */
+ ft = ETHER_FT_8022;
+ ether_type = l->llc_ssap;
+ }
+ } else
+ ft = ETHER_FT_EII;
+
+ if (ft == -1) {
+ EFDEBUG("Unrecognised ether_type %x\n", ether_type);
+ return EPROTONOSUPPORT;
+ }
+
+ /*
+ * Check if interface configured for the given frame
+ */
+ efp = NULL;
+ SLIST_FOREACH(efl, &efdev, el_next) {
+ if (efl->el_ifp == ifp) {
+ efp = efl->el_units[ft];
+ break;
+ }
+ }
+ if (efp == NULL) {
+ EFDEBUG("Can't find if for %d\n", ft);
+ return EPROTONOSUPPORT;
+ }
+ eifp = efp->ef_ifp;
+ if ((eifp->if_flags & IFF_UP) == 0)
+ return EPROTONOSUPPORT;
+ eifp->if_ibytes += m->m_pkthdr.len + sizeof (*eh);
+ m->m_pkthdr.rcvif = eifp;
+
+ BPF_MTAP2(eifp, eh, ETHER_HDR_LEN, m);
+ /*
+ * Now we ready to adjust mbufs and pass them to protocol intr's
+ */
+ switch(ft) {
+ case ETHER_FT_EII:
+ return (ef_inputEII(m, eh, ether_type));
+#ifdef IPX
+ case ETHER_FT_8023: /* only IPX can be here */
+ isr = NETISR_IPX;
+ break;
+#endif
+ case ETHER_FT_SNAP:
+ return (ef_inputSNAP(m, eh, l, ether_type));
+ case ETHER_FT_8022:
+ return (ef_input8022(m, eh, l, ether_type));
+ default:
+ EFDEBUG("No support for frame %d and proto %04x\n",
+ ft, ether_type);
+ return (EPROTONOSUPPORT);
+ }
+ netisr_dispatch(isr, m);
+ return (0);
+}
+
+static int
+ef_output(struct ifnet *ifp, struct mbuf **mp, struct sockaddr *dst, short *tp,
+ int *hlen)
+{
+ struct efnet *sc = (struct efnet*)ifp->if_softc;
+ struct mbuf *m = *mp;
+ u_char *cp;
+ short type;
+
+ if (ifp->if_type != IFT_XETHER)
+ return ENETDOWN;
+ switch (sc->ef_frametype) {
+ case ETHER_FT_EII:
+#ifdef IPX
+ type = htons(ETHERTYPE_IPX);
+#else
+ return EPFNOSUPPORT;
+#endif
+ break;
+ case ETHER_FT_8023:
+ type = htons(m->m_pkthdr.len);
+ break;
+ case ETHER_FT_8022:
+ M_PREPEND(m, ETHER_HDR_LEN + 3, M_WAIT);
+ /*
+ * Ensure that ethernet header and next three bytes
+ * will fit into single mbuf
+ */
+ m = m_pullup(m, ETHER_HDR_LEN + 3);
+ if (m == NULL) {
+ *mp = NULL;
+ return ENOBUFS;
+ }
+ m_adj(m, ETHER_HDR_LEN);
+ type = htons(m->m_pkthdr.len);
+ cp = mtod(m, u_char *);
+ *cp++ = 0xE0;
+ *cp++ = 0xE0;
+ *cp++ = 0x03;
+ *hlen += 3;
+ break;
+ case ETHER_FT_SNAP:
+ M_PREPEND(m, 8, M_WAIT);
+ type = htons(m->m_pkthdr.len);
+ cp = mtod(m, u_char *);
+ bcopy("\xAA\xAA\x03\x00\x00\x00\x81\x37", cp, 8);
+ *hlen += 8;
+ break;
+ default:
+ return EPFNOSUPPORT;
+ }
+ *mp = m;
+ *tp = type;
+ return 0;
+}
+
+/*
+ * Create clone from the given interface
+ */
+static int
+ef_clone(struct ef_link *efl, int ft)
+{
+ struct efnet *efp;
+ struct ifnet *eifp;
+ struct ifnet *ifp = efl->el_ifp;
+
+ efp = (struct efnet*)malloc(sizeof(struct efnet), M_IFADDR,
+ M_WAITOK | M_ZERO);
+ if (efp == NULL)
+ return ENOMEM;
+ efp->ef_pifp = ifp;
+ efp->ef_frametype = ft;
+ eifp = efp->ef_ifp = if_alloc(IFT_ETHER);
+ if (eifp == NULL) {
+ free(efp, M_IFADDR);
+ return (ENOSPC);
+ }
+ snprintf(eifp->if_xname, IFNAMSIZ,
+ "%sf%d", ifp->if_xname, efp->ef_frametype);
+ eifp->if_dname = "ef";
+ eifp->if_dunit = IF_DUNIT_NONE;
+ eifp->if_softc = efp;
+ if (ifp->if_ioctl)
+ eifp->if_ioctl = ef_ioctl;
+ efl->el_units[ft] = efp;
+ return 0;
+}
+
+static int
+ef_load(void)
+{
+ VNET_ITERATOR_DECL(vnet_iter);
+ struct ifnet *ifp;
+ struct efnet *efp;
+ struct ef_link *efl = NULL, *efl_temp;
+ int error = 0, d;
+
+ VNET_LIST_RLOCK();
+ VNET_FOREACH(vnet_iter) {
+ CURVNET_SET(vnet_iter);
+
+ /*
+ * XXXRW: The following loop walks the ifnet list while
+ * modifying it, something not well-supported by ifnet
+ * locking. To avoid lock upgrade/recursion issues, manually
+ * acquire a write lock of ifnet_sxlock here, rather than a
+ * read lock, so that when if_alloc() recurses the lock, we
+ * don't panic. This structure, in which if_ef automatically
+ * attaches to all ethernet interfaces, should be replaced
+ * with a model like that found in if_vlan, in which
+ * interfaces are explicitly configured, which would avoid
+ * this (and other) problems.
+ */
+ sx_xlock(&ifnet_sxlock);
+ TAILQ_FOREACH(ifp, &V_ifnet, if_link) {
+ if (ifp->if_type != IFT_ETHER) continue;
+ EFDEBUG("Found interface %s\n", ifp->if_xname);
+ efl = (struct ef_link*)malloc(sizeof(struct ef_link),
+ M_IFADDR, M_WAITOK | M_ZERO);
+ if (efl == NULL) {
+ error = ENOMEM;
+ break;
+ }
+
+ efl->el_ifp = ifp;
+#ifdef ETHER_II
+ error = ef_clone(efl, ETHER_FT_EII);
+ if (error) break;
+#endif
+#ifdef ETHER_8023
+ error = ef_clone(efl, ETHER_FT_8023);
+ if (error) break;
+#endif
+#ifdef ETHER_8022
+ error = ef_clone(efl, ETHER_FT_8022);
+ if (error) break;
+#endif
+#ifdef ETHER_SNAP
+ error = ef_clone(efl, ETHER_FT_SNAP);
+ if (error) break;
+#endif
+ efcount++;
+ SLIST_INSERT_HEAD(&efdev, efl, el_next);
+ }
+ sx_xunlock(&ifnet_sxlock);
+ CURVNET_RESTORE();
+ }
+ VNET_LIST_RUNLOCK();
+ if (error) {
+ if (efl)
+ SLIST_INSERT_HEAD(&efdev, efl, el_next);
+ SLIST_FOREACH_SAFE(efl, &efdev, el_next, efl_temp) {
+ for (d = 0; d < EF_NFT; d++)
+ if (efl->el_units[d]) {
+ if (efl->el_units[d]->ef_pifp != NULL)
+ if_free(efl->el_units[d]->ef_pifp);
+ free(efl->el_units[d], M_IFADDR);
+ }
+ free(efl, M_IFADDR);
+ }
+ return error;
+ }
+ SLIST_FOREACH(efl, &efdev, el_next) {
+ for (d = 0; d < EF_NFT; d++) {
+ efp = efl->el_units[d];
+ if (efp)
+ ef_attach(efp);
+ }
+ }
+ ef_inputp = ef_input;
+ ef_outputp = ef_output;
+ EFDEBUG("Loaded\n");
+ return 0;
+}
+
+static int
+ef_unload(void)
+{
+ struct efnet *efp;
+ struct ef_link *efl;
+ int d;
+
+ ef_inputp = NULL;
+ ef_outputp = NULL;
+ SLIST_FOREACH(efl, &efdev, el_next) {
+ for (d = 0; d < EF_NFT; d++) {
+ efp = efl->el_units[d];
+ if (efp) {
+ ef_detach(efp);
+ }
+ }
+ }
+ EFDEBUG("Unloaded\n");
+ return 0;
+}
+
+static int
+if_ef_modevent(module_t mod, int type, void *data)
+{
+ switch ((modeventtype_t)type) {
+ case MOD_LOAD:
+ return ef_load();
+ case MOD_UNLOAD:
+ return ef_unload();
+ default:
+ return EOPNOTSUPP;
+ }
+ return 0;
+}
+
+static moduledata_t if_ef_mod = {
+ "if_ef", if_ef_modevent, NULL
+};
+
+DECLARE_MODULE(if_ef, if_ef_mod, SI_SUB_PSEUDO, SI_ORDER_MIDDLE);
diff --git a/rtems/freebsd/net/if_enc.c b/rtems/freebsd/net/if_enc.c
new file mode 100644
index 00000000..ca777781
--- /dev/null
+++ b/rtems/freebsd/net/if_enc.c
@@ -0,0 +1,375 @@
+#include <rtems/freebsd/machine/rtems-bsd-config.h>
+
+/*-
+ * Copyright (c) 2006 The FreeBSD Project.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#include <rtems/freebsd/sys/param.h>
+#include <rtems/freebsd/sys/systm.h>
+#include <rtems/freebsd/sys/kernel.h>
+#include <rtems/freebsd/sys/malloc.h>
+#include <rtems/freebsd/sys/mbuf.h>
+#include <rtems/freebsd/sys/module.h>
+#include <rtems/freebsd/machine/bus.h>
+#include <rtems/freebsd/sys/rman.h>
+#include <rtems/freebsd/sys/socket.h>
+#include <rtems/freebsd/sys/sockio.h>
+#include <rtems/freebsd/sys/sysctl.h>
+
+#include <rtems/freebsd/net/if.h>
+#include <rtems/freebsd/net/if_clone.h>
+#include <rtems/freebsd/net/if_types.h>
+#include <rtems/freebsd/net/pfil.h>
+#include <rtems/freebsd/net/route.h>
+#include <rtems/freebsd/net/netisr.h>
+#include <rtems/freebsd/net/bpf.h>
+#include <rtems/freebsd/net/vnet.h>
+
+#include <rtems/freebsd/netinet/in.h>
+#include <rtems/freebsd/netinet/in_systm.h>
+#include <rtems/freebsd/netinet/ip.h>
+#include <rtems/freebsd/netinet/ip_var.h>
+#include <rtems/freebsd/netinet/in_var.h>
+#include <rtems/freebsd/local/opt_inet6.h>
+
+#ifdef INET6
+#include <rtems/freebsd/netinet/ip6.h>
+#include <rtems/freebsd/netinet6/ip6_var.h>
+#endif
+
+#include <rtems/freebsd/local/opt_enc.h>
+#include <rtems/freebsd/netipsec/ipsec.h>
+#include <rtems/freebsd/netipsec/xform.h>
+
+#define ENCMTU (1024+512)
+
+/* XXX this define must have the same value as in OpenBSD */
+#define M_CONF 0x0400 /* payload was encrypted (ESP-transport) */
+#define M_AUTH 0x0800 /* payload was authenticated (AH or ESP auth) */
+#define M_AUTH_AH 0x2000 /* header was authenticated (AH) */
+
+struct enchdr {
+ u_int32_t af;
+ u_int32_t spi;
+ u_int32_t flags;
+};
+
+struct ifnet *encif;
+static struct mtx enc_mtx;
+
+struct enc_softc {
+ struct ifnet *sc_ifp;
+};
+
+static int enc_ioctl(struct ifnet *, u_long, caddr_t);
+static int enc_output(struct ifnet *ifp, struct mbuf *m,
+ struct sockaddr *dst, struct route *ro);
+static int enc_clone_create(struct if_clone *, int, caddr_t);
+static void enc_clone_destroy(struct ifnet *);
+
+IFC_SIMPLE_DECLARE(enc, 1);
+
+/*
+ * Sysctls.
+ */
+
+/*
+ * Before and after are relative to when we are stripping the
+ * outer IP header.
+ */
+SYSCTL_NODE(_net, OID_AUTO, enc, CTLFLAG_RW, 0, "enc sysctl");
+
+SYSCTL_NODE(_net_enc, OID_AUTO, in, CTLFLAG_RW, 0, "enc input sysctl");
+static int ipsec_filter_mask_in = ENC_BEFORE;
+SYSCTL_XINT(_net_enc_in, OID_AUTO, ipsec_filter_mask, CTLFLAG_RW,
+ &ipsec_filter_mask_in, 0, "IPsec input firewall filter mask");
+static int ipsec_bpf_mask_in = ENC_BEFORE;
+SYSCTL_XINT(_net_enc_in, OID_AUTO, ipsec_bpf_mask, CTLFLAG_RW,
+ &ipsec_bpf_mask_in, 0, "IPsec input bpf mask");
+
+SYSCTL_NODE(_net_enc, OID_AUTO, out, CTLFLAG_RW, 0, "enc output sysctl");
+static int ipsec_filter_mask_out = ENC_BEFORE;
+SYSCTL_XINT(_net_enc_out, OID_AUTO, ipsec_filter_mask, CTLFLAG_RW,
+ &ipsec_filter_mask_out, 0, "IPsec output firewall filter mask");
+static int ipsec_bpf_mask_out = ENC_BEFORE|ENC_AFTER;
+SYSCTL_XINT(_net_enc_out, OID_AUTO, ipsec_bpf_mask, CTLFLAG_RW,
+ &ipsec_bpf_mask_out, 0, "IPsec output bpf mask");
+
+static void
+enc_clone_destroy(struct ifnet *ifp)
+{
+ KASSERT(ifp != encif, ("%s: destroying encif", __func__));
+
+ bpfdetach(ifp);
+ if_detach(ifp);
+ if_free(ifp);
+}
+
+static int
+enc_clone_create(struct if_clone *ifc, int unit, caddr_t params)
+{
+ struct ifnet *ifp;
+ struct enc_softc *sc;
+
+ sc = malloc(sizeof(*sc), M_DEVBUF, M_WAITOK|M_ZERO);
+ ifp = sc->sc_ifp = if_alloc(IFT_ENC);
+ if (ifp == NULL) {
+ free(sc, M_DEVBUF);
+ return (ENOSPC);
+ }
+
+ if_initname(ifp, ifc->ifc_name, unit);
+ ifp->if_mtu = ENCMTU;
+ ifp->if_ioctl = enc_ioctl;
+ ifp->if_output = enc_output;
+ ifp->if_snd.ifq_maxlen = ifqmaxlen;
+ ifp->if_softc = sc;
+ if_attach(ifp);
+ bpfattach(ifp, DLT_ENC, sizeof(struct enchdr));
+
+ mtx_lock(&enc_mtx);
+ /* grab a pointer to enc0, ignore the rest */
+ if (encif == NULL)
+ encif = ifp;
+ mtx_unlock(&enc_mtx);
+
+ return (0);
+}
+
+static int
+enc_modevent(module_t mod, int type, void *data)
+{
+ switch (type) {
+ case MOD_LOAD:
+ mtx_init(&enc_mtx, "enc mtx", NULL, MTX_DEF);
+ if_clone_attach(&enc_cloner);
+ break;
+ case MOD_UNLOAD:
+ printf("enc module unload - not possible for this module\n");
+ return (EINVAL);
+ default:
+ return (EOPNOTSUPP);
+ }
+ return (0);
+}
+
+static moduledata_t enc_mod = {
+ "enc",
+ enc_modevent,
+ 0
+};
+
+DECLARE_MODULE(enc, enc_mod, SI_SUB_PROTO_IFATTACHDOMAIN, SI_ORDER_ANY);
+
+static int
+enc_output(struct ifnet *ifp, struct mbuf *m, struct sockaddr *dst,
+ struct route *ro)
+{
+ m_freem(m);
+ return (0);
+}
+
+/*
+ * Process an ioctl request.
+ */
+/* ARGSUSED */
+static int
+enc_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
+{
+ int error = 0;
+
+ mtx_lock(&enc_mtx);
+
+ switch (cmd) {
+
+ case SIOCSIFFLAGS:
+ if (ifp->if_flags & IFF_UP)
+ ifp->if_drv_flags |= IFF_DRV_RUNNING;
+ else
+ ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
+
+ break;
+
+ default:
+ error = EINVAL;
+ }
+
+ mtx_unlock(&enc_mtx);
+ return (error);
+}
+
+int
+ipsec_filter(struct mbuf **mp, int dir, int flags)
+{
+ int error, i;
+ struct ip *ip;
+
+ KASSERT(encif != NULL, ("%s: encif is null", __func__));
+ KASSERT(flags & (ENC_IN|ENC_OUT),
+ ("%s: invalid flags: %04x", __func__, flags));
+
+ if ((encif->if_drv_flags & IFF_DRV_RUNNING) == 0)
+ return (0);
+
+ if (flags & ENC_IN) {
+ if ((flags & ipsec_filter_mask_in) == 0)
+ return (0);
+ } else {
+ if ((flags & ipsec_filter_mask_out) == 0)
+ return (0);
+ }
+
+ /* Skip pfil(9) if no filters are loaded */
+ if (!(PFIL_HOOKED(&V_inet_pfil_hook)
+#ifdef INET6
+ || PFIL_HOOKED(&V_inet6_pfil_hook)
+#endif
+ )) {
+ return (0);
+ }
+
+ i = min((*mp)->m_pkthdr.len, max_protohdr);
+ if ((*mp)->m_len < i) {
+ *mp = m_pullup(*mp, i);
+ if (*mp == NULL) {
+ printf("%s: m_pullup failed\n", __func__);
+ return (-1);
+ }
+ }
+
+ error = 0;
+ ip = mtod(*mp, struct ip *);
+ switch (ip->ip_v) {
+ case 4:
+ /*
+ * before calling the firewall, swap fields the same as
+ * IP does. here we assume the header is contiguous
+ */
+ ip->ip_len = ntohs(ip->ip_len);
+ ip->ip_off = ntohs(ip->ip_off);
+
+ error = pfil_run_hooks(&V_inet_pfil_hook, mp,
+ encif, dir, NULL);
+
+ if (*mp == NULL || error != 0)
+ break;
+
+ /* restore byte ordering */
+ ip = mtod(*mp, struct ip *);
+ ip->ip_len = htons(ip->ip_len);
+ ip->ip_off = htons(ip->ip_off);
+ break;
+
+#ifdef INET6
+ case 6:
+ error = pfil_run_hooks(&V_inet6_pfil_hook, mp,
+ encif, dir, NULL);
+ break;
+#endif
+ default:
+ printf("%s: unknown IP version\n", __func__);
+ }
+
+ /*
+ * If the mbuf was consumed by the filter for requeueing (dummynet, etc)
+ * then error will be zero but we still want to return an error to our
+ * caller so the null mbuf isn't forwarded further.
+ */
+ if (*mp == NULL && error == 0)
+ return (-1); /* Consumed by the filter */
+ if (*mp == NULL)
+ return (error);
+ if (error != 0)
+ goto bad;
+
+ return (error);
+
+bad:
+ m_freem(*mp);
+ *mp = NULL;
+ return (error);
+}
+
+void
+ipsec_bpf(struct mbuf *m, struct secasvar *sav, int af, int flags)
+{
+ int mflags;
+ struct enchdr hdr;
+
+ KASSERT(encif != NULL, ("%s: encif is null", __func__));
+ KASSERT(flags & (ENC_IN|ENC_OUT),
+ ("%s: invalid flags: %04x", __func__, flags));
+
+ if ((encif->if_drv_flags & IFF_DRV_RUNNING) == 0)
+ return;
+
+ if (flags & ENC_IN) {
+ if ((flags & ipsec_bpf_mask_in) == 0)
+ return;
+ } else {
+ if ((flags & ipsec_bpf_mask_out) == 0)
+ return;
+ }
+
+ if (bpf_peers_present(encif->if_bpf)) {
+ mflags = 0;
+ hdr.spi = 0;
+ if (!sav) {
+ struct m_tag *mtag;
+ mtag = m_tag_find(m, PACKET_TAG_IPSEC_IN_DONE, NULL);
+ if (mtag != NULL) {
+ struct tdb_ident *tdbi;
+ tdbi = (struct tdb_ident *) (mtag + 1);
+ if (tdbi->alg_enc != SADB_EALG_NONE)
+ mflags |= M_CONF;
+ if (tdbi->alg_auth != SADB_AALG_NONE)
+ mflags |= M_AUTH;
+ hdr.spi = tdbi->spi;
+ }
+ } else {
+ if (sav->alg_enc != SADB_EALG_NONE)
+ mflags |= M_CONF;
+ if (sav->alg_auth != SADB_AALG_NONE)
+ mflags |= M_AUTH;
+ hdr.spi = sav->spi;
+ }
+
+ /*
+ * We need to prepend the address family as a four byte
+ * field. Cons up a dummy header to pacify bpf. This
+ * is safe because bpf will only read from the mbuf
+ * (i.e., it won't try to free it or keep a pointer a
+ * to it).
+ */
+ hdr.af = af;
+ /* hdr.spi already set above */
+ hdr.flags = mflags;
+
+ bpf_mtap2(encif->if_bpf, &hdr, sizeof(hdr), m);
+ }
+}
diff --git a/rtems/freebsd/net/if_enc.h b/rtems/freebsd/net/if_enc.h
new file mode 100644
index 00000000..59a55fcf
--- /dev/null
+++ b/rtems/freebsd/net/if_enc.h
@@ -0,0 +1,35 @@
+/*-
+ * Copyright (c) 2008 The FreeBSD Project.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _NET_IF_ENC_H
+#define _NET_IF_ENC_H
+
+extern struct ifnet *encif;
+
+#endif /* _NET_IF_ENC_H */
diff --git a/rtems/freebsd/net/if_epair.c b/rtems/freebsd/net/if_epair.c
new file mode 100644
index 00000000..8c12efc6
--- /dev/null
+++ b/rtems/freebsd/net/if_epair.c
@@ -0,0 +1,955 @@
+#include <rtems/freebsd/machine/rtems-bsd-config.h>
+
+/*-
+ * Copyright (c) 2008 The FreeBSD Foundation
+ * Copyright (c) 2009-2010 Bjoern A. Zeeb <bz@FreeBSD.org>
+ * All rights reserved.
+ *
+ * This software was developed by CK Software GmbH under sponsorship
+ * from the FreeBSD Foundation.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+/*
+ * A pair of virtual back-to-back connected ethernet like interfaces
+ * (``two interfaces with a virtual cross-over cable'').
+ *
+ * This is mostly intended to be used to provide connectivity between
+ * different virtual network stack instances.
+ */
+/*
+ * Things to re-think once we have more experience:
+ * - ifp->if_reassign function once we can test with vimage. Depending on
+ * how if_vmove() is going to be improved.
+ * - Real random etheraddrs that are checked to be uniquish; we would need
+ * to re-do them in case we move the interface between network stacks
+ * in a private if_reassign function.
+ * In case we bridge to a real interface/network or between indepedent
+ * epairs on multiple stacks/machines, we may need this.
+ * For now let the user handle that case.
+ */
+
+#include <rtems/freebsd/sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <rtems/freebsd/sys/param.h>
+#include <rtems/freebsd/sys/kernel.h>
+#include <rtems/freebsd/sys/mbuf.h>
+#include <rtems/freebsd/sys/module.h>
+#include <rtems/freebsd/sys/refcount.h>
+#include <rtems/freebsd/sys/queue.h>
+#include <rtems/freebsd/sys/smp.h>
+#include <rtems/freebsd/sys/socket.h>
+#include <rtems/freebsd/sys/sockio.h>
+#include <rtems/freebsd/sys/sysctl.h>
+#include <rtems/freebsd/sys/types.h>
+
+#include <rtems/freebsd/net/bpf.h>
+#include <rtems/freebsd/net/ethernet.h>
+#include <rtems/freebsd/net/if.h>
+#include <rtems/freebsd/net/if_clone.h>
+#include <rtems/freebsd/net/if_var.h>
+#include <rtems/freebsd/net/if_types.h>
+#include <rtems/freebsd/net/netisr.h>
+#include <rtems/freebsd/net/vnet.h>
+
+#define EPAIRNAME "epair"
+
+SYSCTL_DECL(_net_link);
+SYSCTL_NODE(_net_link, OID_AUTO, epair, CTLFLAG_RW, 0, "epair sysctl");
+
+#ifdef EPAIR_DEBUG
+static int epair_debug = 0;
+SYSCTL_INT(_net_link_epair, OID_AUTO, epair_debug, CTLFLAG_RW,
+ &epair_debug, 0, "if_epair(4) debugging.");
+#define DPRINTF(fmt, arg...) \
+ if (epair_debug) \
+ printf("[%s:%d] " fmt, __func__, __LINE__, ##arg)
+#else
+#define DPRINTF(fmt, arg...)
+#endif
+
+static void epair_nh_sintr(struct mbuf *);
+static struct mbuf *epair_nh_m2cpuid(struct mbuf *, uintptr_t, u_int *);
+static void epair_nh_drainedcpu(u_int);
+
+static void epair_start_locked(struct ifnet *);
+
+static int epair_clone_match(struct if_clone *, const char *);
+static int epair_clone_create(struct if_clone *, char *, size_t, caddr_t);
+static int epair_clone_destroy(struct if_clone *, struct ifnet *);
+
+/* Netisr realted definitions and sysctl. */
+static struct netisr_handler epair_nh = {
+ .nh_name = EPAIRNAME,
+ .nh_proto = NETISR_EPAIR,
+ .nh_policy = NETISR_POLICY_CPU,
+ .nh_handler = epair_nh_sintr,
+ .nh_m2cpuid = epair_nh_m2cpuid,
+ .nh_drainedcpu = epair_nh_drainedcpu,
+};
+
+static int
+sysctl_epair_netisr_maxqlen(SYSCTL_HANDLER_ARGS)
+{
+ int error, qlimit;
+
+ netisr_getqlimit(&epair_nh, &qlimit);
+ error = sysctl_handle_int(oidp, &qlimit, 0, req);
+ if (error || !req->newptr)
+ return (error);
+ if (qlimit < 1)
+ return (EINVAL);
+ return (netisr_setqlimit(&epair_nh, qlimit));
+}
+SYSCTL_PROC(_net_link_epair, OID_AUTO, netisr_maxqlen, CTLTYPE_INT|CTLFLAG_RW,
+ 0, 0, sysctl_epair_netisr_maxqlen, "I",
+ "Maximum if_epair(4) netisr \"hw\" queue length");
+
+struct epair_softc {
+ struct ifnet *ifp; /* This ifp. */
+ struct ifnet *oifp; /* other ifp of pair. */
+ u_int refcount; /* # of mbufs in flight. */
+ u_int cpuid; /* CPU ID assigned upon creation. */
+ void (*if_qflush)(struct ifnet *);
+ /* Original if_qflush routine. */
+};
+
+/*
+ * Per-CPU list of ifps with data in the ifq that needs to be flushed
+ * to the netisr ``hw'' queue before we allow any further direct queuing
+ * to the ``hw'' queue.
+ */
+struct epair_ifp_drain {
+ STAILQ_ENTRY(epair_ifp_drain) ifp_next;
+ struct ifnet *ifp;
+};
+STAILQ_HEAD(eid_list, epair_ifp_drain);
+
+#define EPAIR_LOCK_INIT(dpcpu) mtx_init(&(dpcpu)->if_epair_mtx, \
+ "if_epair", NULL, MTX_DEF)
+#define EPAIR_LOCK_DESTROY(dpcpu) mtx_destroy(&(dpcpu)->if_epair_mtx)
+#define EPAIR_LOCK_ASSERT(dpcpu) mtx_assert(&(dpcpu)->if_epair_mtx, \
+ MA_OWNED)
+#define EPAIR_LOCK(dpcpu) mtx_lock(&(dpcpu)->if_epair_mtx)
+#define EPAIR_UNLOCK(dpcpu) mtx_unlock(&(dpcpu)->if_epair_mtx)
+
+#ifdef INVARIANTS
+#define EPAIR_REFCOUNT_INIT(r, v) refcount_init((r), (v))
+#define EPAIR_REFCOUNT_AQUIRE(r) refcount_acquire((r))
+#define EPAIR_REFCOUNT_RELEASE(r) refcount_release((r))
+#define EPAIR_REFCOUNT_ASSERT(a, p) KASSERT(a, p)
+#else
+#define EPAIR_REFCOUNT_INIT(r, v)
+#define EPAIR_REFCOUNT_AQUIRE(r)
+#define EPAIR_REFCOUNT_RELEASE(r)
+#define EPAIR_REFCOUNT_ASSERT(a, p)
+#endif
+
+static MALLOC_DEFINE(M_EPAIR, EPAIRNAME,
+ "Pair of virtual cross-over connected Ethernet-like interfaces");
+
+static struct if_clone epair_cloner = IFC_CLONE_INITIALIZER(
+ EPAIRNAME, NULL, IF_MAXUNIT,
+ NULL, epair_clone_match, epair_clone_create, epair_clone_destroy);
+
+/*
+ * DPCPU area and functions.
+ */
+struct epair_dpcpu {
+ struct mtx if_epair_mtx; /* Per-CPU locking. */
+ int epair_drv_flags; /* Per-CPU ``hw'' drv flags. */
+ struct eid_list epair_ifp_drain_list; /* Per-CPU list of ifps with
+ * data in the ifq. */
+};
+DPCPU_DEFINE(struct epair_dpcpu, epair_dpcpu);
+
+static void
+epair_dpcpu_init(void)
+{
+ struct epair_dpcpu *epair_dpcpu;
+ struct eid_list *s;
+ u_int cpuid;
+
+ for (cpuid = 0; cpuid <= mp_maxid; cpuid++) {
+ if (CPU_ABSENT(cpuid))
+ continue;
+
+ epair_dpcpu = DPCPU_ID_PTR(cpuid, epair_dpcpu);
+
+ /* Initialize per-cpu lock. */
+ EPAIR_LOCK_INIT(epair_dpcpu);
+
+ /* Driver flags are per-cpu as are our netisr "hw" queues. */
+ epair_dpcpu->epair_drv_flags = 0;
+
+ /*
+ * Initialize per-cpu drain list.
+ * Manually do what STAILQ_HEAD_INITIALIZER would do.
+ */
+ s = &epair_dpcpu->epair_ifp_drain_list;
+ s->stqh_first = NULL;
+ s->stqh_last = &s->stqh_first;
+ }
+}
+
+static void
+epair_dpcpu_detach(void)
+{
+ struct epair_dpcpu *epair_dpcpu;
+ u_int cpuid;
+
+ for (cpuid = 0; cpuid <= mp_maxid; cpuid++) {
+ if (CPU_ABSENT(cpuid))
+ continue;
+
+ epair_dpcpu = DPCPU_ID_PTR(cpuid, epair_dpcpu);
+
+ /* Destroy per-cpu lock. */
+ EPAIR_LOCK_DESTROY(epair_dpcpu);
+ }
+}
+
+/*
+ * Helper functions.
+ */
+static u_int
+cpuid_from_ifp(struct ifnet *ifp)
+{
+ struct epair_softc *sc;
+
+ if (ifp == NULL)
+ return (0);
+ sc = ifp->if_softc;
+
+ return (sc->cpuid);
+}
+
+/*
+ * Netisr handler functions.
+ */
+static void
+epair_nh_sintr(struct mbuf *m)
+{
+ struct ifnet *ifp;
+ struct epair_softc *sc;
+
+ ifp = m->m_pkthdr.rcvif;
+ (*ifp->if_input)(ifp, m);
+ sc = ifp->if_softc;
+ EPAIR_REFCOUNT_RELEASE(&sc->refcount);
+ EPAIR_REFCOUNT_ASSERT((int)sc->refcount >= 1,
+ ("%s: ifp=%p sc->refcount not >= 1: %d",
+ __func__, ifp, sc->refcount));
+ DPRINTF("ifp=%p refcount=%u\n", ifp, sc->refcount);
+}
+
+static struct mbuf *
+epair_nh_m2cpuid(struct mbuf *m, uintptr_t source, u_int *cpuid)
+{
+
+ *cpuid = cpuid_from_ifp(m->m_pkthdr.rcvif);
+
+ return (m);
+}
+
+static void
+epair_nh_drainedcpu(u_int cpuid)
+{
+ struct epair_dpcpu *epair_dpcpu;
+ struct epair_ifp_drain *elm, *tvar;
+ struct ifnet *ifp;
+
+ epair_dpcpu = DPCPU_ID_PTR(cpuid, epair_dpcpu);
+ EPAIR_LOCK(epair_dpcpu);
+ /*
+ * Assume our "hw" queue and possibly ifq will be emptied
+ * again. In case we will overflow the "hw" queue while
+ * draining, epair_start_locked will set IFF_DRV_OACTIVE
+ * again and we will stop and return.
+ */
+ STAILQ_FOREACH_SAFE(elm, &epair_dpcpu->epair_ifp_drain_list,
+ ifp_next, tvar) {
+ ifp = elm->ifp;
+ epair_dpcpu->epair_drv_flags &= ~IFF_DRV_OACTIVE;
+ ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
+ epair_start_locked(ifp);
+
+ IFQ_LOCK(&ifp->if_snd);
+ if (IFQ_IS_EMPTY(&ifp->if_snd)) {
+ struct epair_softc *sc;
+
+ STAILQ_REMOVE(&epair_dpcpu->epair_ifp_drain_list,
+ elm, epair_ifp_drain, ifp_next);
+ /* The cached ifp goes off the list. */
+ sc = ifp->if_softc;
+ EPAIR_REFCOUNT_RELEASE(&sc->refcount);
+ EPAIR_REFCOUNT_ASSERT((int)sc->refcount >= 1,
+ ("%s: ifp=%p sc->refcount not >= 1: %d",
+ __func__, ifp, sc->refcount));
+ free(elm, M_EPAIR);
+ }
+ IFQ_UNLOCK(&ifp->if_snd);
+
+ if ((ifp->if_drv_flags & IFF_DRV_OACTIVE) != 0) {
+ /* Our "hw"q overflew again. */
+ epair_dpcpu->epair_drv_flags |= IFF_DRV_OACTIVE;
+ DPRINTF("hw queue length overflow at %u\n",
+ epair_nh.nh_qlimit);
+ break;
+ }
+ }
+ EPAIR_UNLOCK(epair_dpcpu);
+}
+
+/*
+ * Network interface (`if') related functions.
+ */
+static void
+epair_remove_ifp_from_draining(struct ifnet *ifp)
+{
+ struct epair_dpcpu *epair_dpcpu;
+ struct epair_ifp_drain *elm, *tvar;
+ u_int cpuid;
+
+ for (cpuid = 0; cpuid <= mp_maxid; cpuid++) {
+ if (CPU_ABSENT(cpuid))
+ continue;
+
+ epair_dpcpu = DPCPU_ID_PTR(cpuid, epair_dpcpu);
+ EPAIR_LOCK(epair_dpcpu);
+ STAILQ_FOREACH_SAFE(elm, &epair_dpcpu->epair_ifp_drain_list,
+ ifp_next, tvar) {
+ if (ifp == elm->ifp) {
+ struct epair_softc *sc;
+
+ STAILQ_REMOVE(
+ &epair_dpcpu->epair_ifp_drain_list, elm,
+ epair_ifp_drain, ifp_next);
+ /* The cached ifp goes off the list. */
+ sc = ifp->if_softc;
+ EPAIR_REFCOUNT_RELEASE(&sc->refcount);
+ EPAIR_REFCOUNT_ASSERT((int)sc->refcount >= 1,
+ ("%s: ifp=%p sc->refcount not >= 1: %d",
+ __func__, ifp, sc->refcount));
+ free(elm, M_EPAIR);
+ }
+ }
+ EPAIR_UNLOCK(epair_dpcpu);
+ }
+}
+
+static int
+epair_add_ifp_for_draining(struct ifnet *ifp)
+{
+ struct epair_dpcpu *epair_dpcpu;
+ struct epair_softc *sc;
+ struct epair_ifp_drain *elm = NULL;
+
+ sc = ifp->if_softc;
+ epair_dpcpu = DPCPU_ID_PTR(sc->cpuid, epair_dpcpu);
+ EPAIR_LOCK_ASSERT(epair_dpcpu);
+ STAILQ_FOREACH(elm, &epair_dpcpu->epair_ifp_drain_list, ifp_next)
+ if (elm->ifp == ifp)
+ break;
+ /* If the ifp is there already, return success. */
+ if (elm != NULL)
+ return (0);
+
+ elm = malloc(sizeof(struct epair_ifp_drain), M_EPAIR, M_NOWAIT|M_ZERO);
+ if (elm == NULL)
+ return (ENOMEM);
+
+ elm->ifp = ifp;
+ /* Add a reference for the ifp pointer on the list. */
+ EPAIR_REFCOUNT_AQUIRE(&sc->refcount);
+ STAILQ_INSERT_TAIL(&epair_dpcpu->epair_ifp_drain_list, elm, ifp_next);
+
+ return (0);
+}
+
+static void
+epair_start_locked(struct ifnet *ifp)
+{
+ struct epair_dpcpu *epair_dpcpu;
+ struct mbuf *m;
+ struct epair_softc *sc;
+ struct ifnet *oifp;
+ int error;
+
+ DPRINTF("ifp=%p\n", ifp);
+ sc = ifp->if_softc;
+ epair_dpcpu = DPCPU_ID_PTR(sc->cpuid, epair_dpcpu);
+ EPAIR_LOCK_ASSERT(epair_dpcpu);
+
+ if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
+ return;
+ if ((ifp->if_flags & IFF_UP) == 0)
+ return;
+
+ /*
+ * We get patckets here from ether_output via if_handoff()
+ * and ned to put them into the input queue of the oifp
+ * and call oifp->if_input() via netisr/epair_sintr().
+ */
+ oifp = sc->oifp;
+ sc = oifp->if_softc;
+ for (;;) {
+ IFQ_DEQUEUE(&ifp->if_snd, m);
+ if (m == NULL)
+ break;
+ BPF_MTAP(ifp, m);
+
+ /*
+ * In case the outgoing interface is not usable,
+ * drop the packet.
+ */
+ if ((oifp->if_drv_flags & IFF_DRV_RUNNING) == 0 ||
+ (oifp->if_flags & IFF_UP) ==0) {
+ ifp->if_oerrors++;
+ m_freem(m);
+ continue;
+ }
+ DPRINTF("packet %s -> %s\n", ifp->if_xname, oifp->if_xname);
+
+ /*
+ * Add a reference so the interface cannot go while the
+ * packet is in transit as we rely on rcvif to stay valid.
+ */
+ EPAIR_REFCOUNT_AQUIRE(&sc->refcount);
+ m->m_pkthdr.rcvif = oifp;
+ CURVNET_SET_QUIET(oifp->if_vnet);
+ error = netisr_queue(NETISR_EPAIR, m);
+ CURVNET_RESTORE();
+ if (!error) {
+ ifp->if_opackets++;
+ /* Someone else received the packet. */
+ oifp->if_ipackets++;
+ } else {
+ /* The packet was freed already. */
+ epair_dpcpu->epair_drv_flags |= IFF_DRV_OACTIVE;
+ ifp->if_drv_flags |= IFF_DRV_OACTIVE;
+ (void) epair_add_ifp_for_draining(ifp);
+ ifp->if_oerrors++;
+ EPAIR_REFCOUNT_RELEASE(&sc->refcount);
+ EPAIR_REFCOUNT_ASSERT((int)sc->refcount >= 1,
+ ("%s: ifp=%p sc->refcount not >= 1: %d",
+ __func__, oifp, sc->refcount));
+ }
+ }
+}
+
+static void
+epair_start(struct ifnet *ifp)
+{
+ struct epair_dpcpu *epair_dpcpu;
+
+ epair_dpcpu = DPCPU_ID_PTR(cpuid_from_ifp(ifp), epair_dpcpu);
+ EPAIR_LOCK(epair_dpcpu);
+ epair_start_locked(ifp);
+ EPAIR_UNLOCK(epair_dpcpu);
+}
+
+static int
+epair_transmit_locked(struct ifnet *ifp, struct mbuf *m)
+{
+ struct epair_dpcpu *epair_dpcpu;
+ struct epair_softc *sc;
+ struct ifnet *oifp;
+ int error, len;
+ short mflags;
+
+ DPRINTF("ifp=%p m=%p\n", ifp, m);
+ sc = ifp->if_softc;
+ epair_dpcpu = DPCPU_ID_PTR(sc->cpuid, epair_dpcpu);
+ EPAIR_LOCK_ASSERT(epair_dpcpu);
+
+ if (m == NULL)
+ return (0);
+
+ /*
+ * We are not going to use the interface en/dequeue mechanism
+ * on the TX side. We are called from ether_output_frame()
+ * and will put the packet into the incoming queue of the
+ * other interface of our pair via the netsir.
+ */
+ if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
+ m_freem(m);
+ return (ENXIO);
+ }
+ if ((ifp->if_flags & IFF_UP) == 0) {
+ m_freem(m);
+ return (ENETDOWN);
+ }
+
+ BPF_MTAP(ifp, m);
+
+ /*
+ * In case the outgoing interface is not usable,
+ * drop the packet.
+ */
+ oifp = sc->oifp;
+ if ((oifp->if_drv_flags & IFF_DRV_RUNNING) == 0 ||
+ (oifp->if_flags & IFF_UP) ==0) {
+ ifp->if_oerrors++;
+ m_freem(m);
+ return (0);
+ }
+ len = m->m_pkthdr.len;
+ mflags = m->m_flags;
+ DPRINTF("packet %s -> %s\n", ifp->if_xname, oifp->if_xname);
+
+#ifdef ALTQ
+ /* Support ALTQ via the clasic if_start() path. */
+ IF_LOCK(&ifp->if_snd);
+ if (ALTQ_IS_ENABLED(&ifp->if_snd)) {
+ ALTQ_ENQUEUE(&ifp->if_snd, m, NULL, error);
+ if (error)
+ ifp->if_snd.ifq_drops++;
+ IF_UNLOCK(&ifp->if_snd);
+ if (!error) {
+ ifp->if_obytes += len;
+ if (mflags & (M_BCAST|M_MCAST))
+ ifp->if_omcasts++;
+
+ if ((ifp->if_drv_flags & IFF_DRV_OACTIVE) == 0)
+ epair_start_locked(ifp);
+ else
+ (void)epair_add_ifp_for_draining(ifp);
+ }
+ return (error);
+ }
+ IF_UNLOCK(&ifp->if_snd);
+#endif
+
+ if ((epair_dpcpu->epair_drv_flags & IFF_DRV_OACTIVE) != 0) {
+ /*
+ * Our hardware queue is full, try to fall back
+ * queuing to the ifq but do not call ifp->if_start.
+ * Either we are lucky or the packet is gone.
+ */
+ IFQ_ENQUEUE(&ifp->if_snd, m, error);
+ if (!error)
+ (void)epair_add_ifp_for_draining(ifp);
+ return (error);
+ }
+ sc = oifp->if_softc;
+ /*
+ * Add a reference so the interface cannot go while the
+ * packet is in transit as we rely on rcvif to stay valid.
+ */
+ EPAIR_REFCOUNT_AQUIRE(&sc->refcount);
+ m->m_pkthdr.rcvif = oifp;
+ CURVNET_SET_QUIET(oifp->if_vnet);
+ error = netisr_queue(NETISR_EPAIR, m);
+ CURVNET_RESTORE();
+ if (!error) {
+ ifp->if_opackets++;
+ /*
+ * IFQ_HANDOFF_ADJ/ip_handoff() update statistics,
+ * but as we bypass all this we have to duplicate
+ * the logic another time.
+ */
+ ifp->if_obytes += len;
+ if (mflags & (M_BCAST|M_MCAST))
+ ifp->if_omcasts++;
+ /* Someone else received the packet. */
+ oifp->if_ipackets++;
+ } else {
+ /* The packet was freed already. */
+ epair_dpcpu->epair_drv_flags |= IFF_DRV_OACTIVE;
+ ifp->if_drv_flags |= IFF_DRV_OACTIVE;
+ ifp->if_oerrors++;
+ EPAIR_REFCOUNT_RELEASE(&sc->refcount);
+ EPAIR_REFCOUNT_ASSERT((int)sc->refcount >= 1,
+ ("%s: ifp=%p sc->refcount not >= 1: %d",
+ __func__, oifp, sc->refcount));
+ }
+
+ return (error);
+}
+
+static int
+epair_transmit(struct ifnet *ifp, struct mbuf *m)
+{
+ struct epair_dpcpu *epair_dpcpu;
+ int error;
+
+ epair_dpcpu = DPCPU_ID_PTR(cpuid_from_ifp(ifp), epair_dpcpu);
+ EPAIR_LOCK(epair_dpcpu);
+ error = epair_transmit_locked(ifp, m);
+ EPAIR_UNLOCK(epair_dpcpu);
+ return (error);
+}
+
+static void
+epair_qflush(struct ifnet *ifp)
+{
+ struct epair_softc *sc;
+
+ sc = ifp->if_softc;
+ KASSERT(sc != NULL, ("%s: ifp=%p, epair_softc gone? sc=%p\n",
+ __func__, ifp, sc));
+ /*
+ * Remove this ifp from all backpointer lists. The interface will not
+ * usable for flushing anyway nor should it have anything to flush
+ * after if_qflush().
+ */
+ epair_remove_ifp_from_draining(ifp);
+
+ if (sc->if_qflush)
+ sc->if_qflush(ifp);
+}
+
+static int
+epair_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
+{
+ struct ifreq *ifr;
+ int error;
+
+ ifr = (struct ifreq *)data;
+ switch (cmd) {
+ case SIOCSIFFLAGS:
+ case SIOCADDMULTI:
+ case SIOCDELMULTI:
+ error = 0;
+ break;
+
+ case SIOCSIFMTU:
+ /* We basically allow all kinds of MTUs. */
+ ifp->if_mtu = ifr->ifr_mtu;
+ error = 0;
+ break;
+
+ default:
+ /* Let the common ethernet handler process this. */
+ error = ether_ioctl(ifp, cmd, data);
+ break;
+ }
+
+ return (error);
+}
+
+static void
+epair_init(void *dummy __unused)
+{
+}
+
+
+/*
+ * Interface cloning functions.
+ * We use our private ones so that we can create/destroy our secondary
+ * device along with the primary one.
+ */
+static int
+epair_clone_match(struct if_clone *ifc, const char *name)
+{
+ const char *cp;
+
+ DPRINTF("name='%s'\n", name);
+
+ /*
+ * Our base name is epair.
+ * Our interfaces will be named epair<n>[ab].
+ * So accept anything of the following list:
+ * - epair
+ * - epair<n>
+ * but not the epair<n>[ab] versions.
+ */
+ if (strncmp(EPAIRNAME, name, sizeof(EPAIRNAME)-1) != 0)
+ return (0);
+
+ for (cp = name + sizeof(EPAIRNAME) - 1; *cp != '\0'; cp++) {
+ if (*cp < '0' || *cp > '9')
+ return (0);
+ }
+
+ return (1);
+}
+
+static int
+epair_clone_create(struct if_clone *ifc, char *name, size_t len, caddr_t params)
+{
+ struct epair_softc *sca, *scb;
+ struct ifnet *ifp;
+ char *dp;
+ int error, unit, wildcard;
+ uint8_t eaddr[ETHER_ADDR_LEN]; /* 00:00:00:00:00:00 */
+
+ /*
+ * We are abusing params to create our second interface.
+ * Actually we already created it and called if_clone_createif()
+ * for it to do the official insertion procedure the moment we knew
+ * it cannot fail anymore. So just do attach it here.
+ */
+ if (params) {
+ scb = (struct epair_softc *)params;
+ ifp = scb->ifp;
+ /* Assign a hopefully unique, locally administered etheraddr. */
+ eaddr[0] = 0x02;
+ eaddr[3] = (ifp->if_index >> 8) & 0xff;
+ eaddr[4] = ifp->if_index & 0xff;
+ eaddr[5] = 0x0b;
+ ether_ifattach(ifp, eaddr);
+ /* Correctly set the name for the cloner list. */
+ strlcpy(name, scb->ifp->if_xname, len);
+ return (0);
+ }
+
+ /* Try to see if a special unit was requested. */
+ error = ifc_name2unit(name, &unit);
+ if (error != 0)
+ return (error);
+ wildcard = (unit < 0);
+
+ error = ifc_alloc_unit(ifc, &unit);
+ if (error != 0)
+ return (error);
+
+ /*
+ * If no unit had been given, we need to adjust the ifName.
+ * Also make sure there is space for our extra [ab] suffix.
+ */
+ for (dp = name; *dp != '\0'; dp++);
+ if (wildcard) {
+ error = snprintf(dp, len - (dp - name), "%d", unit);
+ if (error > len - (dp - name) - 1) {
+ /* ifName too long. */
+ ifc_free_unit(ifc, unit);
+ return (ENOSPC);
+ }
+ dp += error;
+ }
+ if (len - (dp - name) - 1 < 1) {
+ /* No space left for our [ab] suffix. */
+ ifc_free_unit(ifc, unit);
+ return (ENOSPC);
+ }
+ *dp = 'a';
+ /* Must not change dp so we can replace 'a' by 'b' later. */
+ *(dp+1) = '\0';
+
+ /* Allocate memory for both [ab] interfaces */
+ sca = malloc(sizeof(struct epair_softc), M_EPAIR, M_WAITOK | M_ZERO);
+ EPAIR_REFCOUNT_INIT(&sca->refcount, 1);
+ sca->ifp = if_alloc(IFT_ETHER);
+ if (sca->ifp == NULL) {
+ free(sca, M_EPAIR);
+ ifc_free_unit(ifc, unit);
+ return (ENOSPC);
+ }
+
+ scb = malloc(sizeof(struct epair_softc), M_EPAIR, M_WAITOK | M_ZERO);
+ EPAIR_REFCOUNT_INIT(&scb->refcount, 1);
+ scb->ifp = if_alloc(IFT_ETHER);
+ if (scb->ifp == NULL) {
+ free(scb, M_EPAIR);
+ if_free(sca->ifp);
+ free(sca, M_EPAIR);
+ ifc_free_unit(ifc, unit);
+ return (ENOSPC);
+ }
+
+ /*
+ * Cross-reference the interfaces so we will be able to free both.
+ */
+ sca->oifp = scb->ifp;
+ scb->oifp = sca->ifp;
+
+ /*
+ * Calculate the cpuid for netisr queueing based on the
+ * ifIndex of the interfaces. As long as we cannot configure
+ * this or use cpuset information easily we cannot guarantee
+ * cache locality but we can at least allow parallelism.
+ */
+ sca->cpuid =
+ netisr_get_cpuid(sca->ifp->if_index % netisr_get_cpucount());
+ scb->cpuid =
+ netisr_get_cpuid(scb->ifp->if_index % netisr_get_cpucount());
+
+ /* Finish initialization of interface <n>a. */
+ ifp = sca->ifp;
+ ifp->if_softc = sca;
+ strlcpy(ifp->if_xname, name, IFNAMSIZ);
+ ifp->if_dname = ifc->ifc_name;
+ ifp->if_dunit = unit;
+ ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
+ ifp->if_start = epair_start;
+ ifp->if_ioctl = epair_ioctl;
+ ifp->if_init = epair_init;
+ ifp->if_snd.ifq_maxlen = ifqmaxlen;
+ /* Assign a hopefully unique, locally administered etheraddr. */
+ eaddr[0] = 0x02;
+ eaddr[3] = (ifp->if_index >> 8) & 0xff;
+ eaddr[4] = ifp->if_index & 0xff;
+ eaddr[5] = 0x0a;
+ ether_ifattach(ifp, eaddr);
+ sca->if_qflush = ifp->if_qflush;
+ ifp->if_qflush = epair_qflush;
+ ifp->if_transmit = epair_transmit;
+ ifp->if_baudrate = IF_Gbps(10UL); /* arbitrary maximum */
+
+ /* Swap the name and finish initialization of interface <n>b. */
+ *dp = 'b';
+
+ ifp = scb->ifp;
+ ifp->if_softc = scb;
+ strlcpy(ifp->if_xname, name, IFNAMSIZ);
+ ifp->if_dname = ifc->ifc_name;
+ ifp->if_dunit = unit;
+ ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
+ ifp->if_start = epair_start;
+ ifp->if_ioctl = epair_ioctl;
+ ifp->if_init = epair_init;
+ ifp->if_snd.ifq_maxlen = ifqmaxlen;
+ /* We need to play some tricks here for the second interface. */
+ strlcpy(name, EPAIRNAME, len);
+ error = if_clone_create(name, len, (caddr_t)scb);
+ if (error)
+ panic("%s: if_clone_createif() for our 2nd iface failed: %d",
+ __func__, error);
+ scb->if_qflush = ifp->if_qflush;
+ ifp->if_qflush = epair_qflush;
+ ifp->if_transmit = epair_transmit;
+ ifp->if_baudrate = IF_Gbps(10UL); /* arbitrary maximum */
+
+ /*
+ * Restore name to <n>a as the ifp for this will go into the
+ * cloner list for the initial call.
+ */
+ strlcpy(name, sca->ifp->if_xname, len);
+ DPRINTF("name='%s/%db' created sca=%p scb=%p\n", name, unit, sca, scb);
+
+ /* Tell the world, that we are ready to rock. */
+ sca->ifp->if_drv_flags |= IFF_DRV_RUNNING;
+ scb->ifp->if_drv_flags |= IFF_DRV_RUNNING;
+ if_link_state_change(sca->ifp, LINK_STATE_UP);
+ if_link_state_change(scb->ifp, LINK_STATE_UP);
+
+ return (0);
+}
+
+static int
+epair_clone_destroy(struct if_clone *ifc, struct ifnet *ifp)
+{
+ struct ifnet *oifp;
+ struct epair_softc *sca, *scb;
+ int unit, error;
+
+ DPRINTF("ifp=%p\n", ifp);
+
+ /*
+ * In case we called into if_clone_destroyif() ourselves
+ * again to remove the second interface, the softc will be
+ * NULL. In that case so not do anything but return success.
+ */
+ if (ifp->if_softc == NULL)
+ return (0);
+
+ unit = ifp->if_dunit;
+ sca = ifp->if_softc;
+ oifp = sca->oifp;
+ scb = oifp->if_softc;
+
+ DPRINTF("ifp=%p oifp=%p\n", ifp, oifp);
+ if_link_state_change(ifp, LINK_STATE_DOWN);
+ if_link_state_change(oifp, LINK_STATE_DOWN);
+ ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
+ oifp->if_drv_flags &= ~IFF_DRV_RUNNING;
+ ether_ifdetach(oifp);
+ ether_ifdetach(ifp);
+ /*
+ * Wait for all packets to be dispatched to if_input.
+ * The numbers can only go down as the interfaces are
+ * detached so there is no need to use atomics.
+ */
+ DPRINTF("sca refcnt=%u scb refcnt=%u\n", sca->refcount, scb->refcount);
+ EPAIR_REFCOUNT_ASSERT(sca->refcount == 1 && scb->refcount == 1,
+ ("%s: ifp=%p sca->refcount!=1: %d || ifp=%p scb->refcount!=1: %d",
+ __func__, ifp, sca->refcount, oifp, scb->refcount));
+
+ /*
+ * Get rid of our second half.
+ */
+ oifp->if_softc = NULL;
+ error = if_clone_destroyif(ifc, oifp);
+ if (error)
+ panic("%s: if_clone_destroyif() for our 2nd iface failed: %d",
+ __func__, error);
+
+ /*
+ * Finish cleaning up. Free them and release the unit.
+ * As the other of the two interfaces my reside in a different vnet,
+ * we need to switch before freeing them.
+ */
+ CURVNET_SET_QUIET(oifp->if_vnet);
+ if_free(oifp);
+ CURVNET_RESTORE();
+ if_free(ifp);
+ free(scb, M_EPAIR);
+ free(sca, M_EPAIR);
+ ifc_free_unit(ifc, unit);
+
+ return (0);
+}
+
+static int
+epair_modevent(module_t mod, int type, void *data)
+{
+ int qlimit;
+
+ switch (type) {
+ case MOD_LOAD:
+ /* For now limit us to one global mutex and one inq. */
+ epair_dpcpu_init();
+ epair_nh.nh_qlimit = 42 * ifqmaxlen; /* 42 shall be the number. */
+#ifndef __rtems__
+ if (TUNABLE_INT_FETCH("net.link.epair.netisr_maxqlen", &qlimit))
+ epair_nh.nh_qlimit = qlimit;
+#endif
+ netisr_register(&epair_nh);
+ if_clone_attach(&epair_cloner);
+ if (bootverbose)
+ printf("%s initialized.\n", EPAIRNAME);
+ break;
+ case MOD_UNLOAD:
+ if_clone_detach(&epair_cloner);
+ netisr_unregister(&epair_nh);
+ epair_dpcpu_detach();
+ if (bootverbose)
+ printf("%s unloaded.\n", EPAIRNAME);
+ break;
+ default:
+ return (EOPNOTSUPP);
+ }
+ return (0);
+}
+
+static moduledata_t epair_mod = {
+ "if_epair",
+ epair_modevent,
+ 0
+};
+
+DECLARE_MODULE(if_epair, epair_mod, SI_SUB_PSEUDO, SI_ORDER_ANY);
+MODULE_VERSION(if_epair, 1);
diff --git a/rtems/freebsd/net/if_ethersubr.c b/rtems/freebsd/net/if_ethersubr.c
new file mode 100644
index 00000000..cf5492cd
--- /dev/null
+++ b/rtems/freebsd/net/if_ethersubr.c
@@ -0,0 +1,1364 @@
+#include <rtems/freebsd/machine/rtems-bsd-config.h>
+
+/*-
+ * Copyright (c) 1982, 1989, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * @(#)if_ethersubr.c 8.1 (Berkeley) 6/10/93
+ * $FreeBSD$
+ */
+
+#include <rtems/freebsd/local/opt_atalk.h>
+#include <rtems/freebsd/local/opt_inet.h>
+#include <rtems/freebsd/local/opt_inet6.h>
+#include <rtems/freebsd/local/opt_ipx.h>
+#include <rtems/freebsd/local/opt_netgraph.h>
+#include <rtems/freebsd/local/opt_mbuf_profiling.h>
+
+#include <rtems/freebsd/sys/param.h>
+#include <rtems/freebsd/sys/systm.h>
+#include <rtems/freebsd/sys/kernel.h>
+#include <rtems/freebsd/sys/lock.h>
+#include <rtems/freebsd/sys/malloc.h>
+#include <rtems/freebsd/sys/module.h>
+#include <rtems/freebsd/sys/mbuf.h>
+#include <rtems/freebsd/sys/random.h>
+#include <rtems/freebsd/sys/rwlock.h>
+#include <rtems/freebsd/sys/socket.h>
+#include <rtems/freebsd/sys/sockio.h>
+#include <rtems/freebsd/sys/sysctl.h>
+
+#include <rtems/freebsd/net/if.h>
+#include <rtems/freebsd/net/if_arp.h>
+#include <rtems/freebsd/net/netisr.h>
+#include <rtems/freebsd/net/route.h>
+#include <rtems/freebsd/net/if_llc.h>
+#include <rtems/freebsd/net/if_dl.h>
+#include <rtems/freebsd/net/if_types.h>
+#include <rtems/freebsd/net/bpf.h>
+#include <rtems/freebsd/net/ethernet.h>
+#include <rtems/freebsd/net/if_bridgevar.h>
+#include <rtems/freebsd/net/if_vlan_var.h>
+#include <rtems/freebsd/net/if_llatbl.h>
+#include <rtems/freebsd/net/pf_mtag.h>
+#include <rtems/freebsd/net/vnet.h>
+
+#if defined(INET) || defined(INET6)
+#include <rtems/freebsd/netinet/in.h>
+#include <rtems/freebsd/netinet/in_var.h>
+#include <rtems/freebsd/netinet/if_ether.h>
+#include <rtems/freebsd/netinet/ip_carp.h>
+#include <rtems/freebsd/netinet/ip_var.h>
+#include <rtems/freebsd/netinet/ip_fw.h>
+#include <rtems/freebsd/netinet/ipfw/ip_fw_private.h>
+#endif
+#ifdef INET6
+#include <rtems/freebsd/netinet6/nd6.h>
+#endif
+
+#ifdef IPX
+#include <rtems/freebsd/netipx/ipx.h>
+#include <rtems/freebsd/netipx/ipx_if.h>
+#endif
+
+int (*ef_inputp)(struct ifnet*, struct ether_header *eh, struct mbuf *m);
+int (*ef_outputp)(struct ifnet *ifp, struct mbuf **mp,
+ struct sockaddr *dst, short *tp, int *hlen);
+
+#ifdef NETATALK
+#include <rtems/freebsd/netatalk/at.h>
+#include <rtems/freebsd/netatalk/at_var.h>
+#include <rtems/freebsd/netatalk/at_extern.h>
+
+#define llc_snap_org_code llc_un.type_snap.org_code
+#define llc_snap_ether_type llc_un.type_snap.ether_type
+
+extern u_char at_org_code[3];
+extern u_char aarp_org_code[3];
+#endif /* NETATALK */
+
+#include <rtems/freebsd/security/mac/mac_framework.h>
+
+#ifdef CTASSERT
+CTASSERT(sizeof (struct ether_header) == ETHER_ADDR_LEN * 2 + 2);
+CTASSERT(sizeof (struct ether_addr) == ETHER_ADDR_LEN);
+#endif
+
+/* netgraph node hooks for ng_ether(4) */
+void (*ng_ether_input_p)(struct ifnet *ifp, struct mbuf **mp);
+void (*ng_ether_input_orphan_p)(struct ifnet *ifp, struct mbuf *m);
+int (*ng_ether_output_p)(struct ifnet *ifp, struct mbuf **mp);
+void (*ng_ether_attach_p)(struct ifnet *ifp);
+void (*ng_ether_detach_p)(struct ifnet *ifp);
+
+void (*vlan_input_p)(struct ifnet *, struct mbuf *);
+
+/* if_bridge(4) support */
+struct mbuf *(*bridge_input_p)(struct ifnet *, struct mbuf *);
+int (*bridge_output_p)(struct ifnet *, struct mbuf *,
+ struct sockaddr *, struct rtentry *);
+void (*bridge_dn_p)(struct mbuf *, struct ifnet *);
+
+/* if_lagg(4) support */
+struct mbuf *(*lagg_input_p)(struct ifnet *, struct mbuf *);
+
+static const u_char etherbroadcastaddr[ETHER_ADDR_LEN] =
+ { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
+
+static int ether_resolvemulti(struct ifnet *, struct sockaddr **,
+ struct sockaddr *);
+#ifdef VIMAGE
+static void ether_reassign(struct ifnet *, struct vnet *, char *);
+#endif
+
+/* XXX: should be in an arp support file, not here */
+MALLOC_DEFINE(M_ARPCOM, "arpcom", "802.* interface internals");
+
+#define ETHER_IS_BROADCAST(addr) \
+ (bcmp(etherbroadcastaddr, (addr), ETHER_ADDR_LEN) == 0)
+
+#define senderr(e) do { error = (e); goto bad;} while (0)
+
+#if defined(INET) || defined(INET6)
+int
+ether_ipfw_chk(struct mbuf **m0, struct ifnet *dst, int shared);
+static VNET_DEFINE(int, ether_ipfw);
+#define V_ether_ipfw VNET(ether_ipfw)
+#endif
+
+
+/*
+ * Ethernet output routine.
+ * Encapsulate a packet of type family for the local net.
+ * Use trailer local net encapsulation if enough data in first
+ * packet leaves a multiple of 512 bytes of data in remainder.
+ */
+int
+ether_output(struct ifnet *ifp, struct mbuf *m,
+ struct sockaddr *dst, struct route *ro)
+{
+ short type;
+ int error = 0, hdrcmplt = 0;
+ u_char esrc[ETHER_ADDR_LEN], edst[ETHER_ADDR_LEN];
+ struct llentry *lle = NULL;
+ struct rtentry *rt0 = NULL;
+ struct ether_header *eh;
+ struct pf_mtag *t;
+ int loop_copy = 1;
+ int hlen; /* link layer header length */
+
+ if (ro != NULL) {
+ if (!(m->m_flags & (M_BCAST | M_MCAST)))
+ lle = ro->ro_lle;
+ rt0 = ro->ro_rt;
+ }
+#ifdef MAC
+ error = mac_ifnet_check_transmit(ifp, m);
+ if (error)
+ senderr(error);
+#endif
+
+ M_PROFILE(m);
+ if (ifp->if_flags & IFF_MONITOR)
+ senderr(ENETDOWN);
+ if (!((ifp->if_flags & IFF_UP) &&
+ (ifp->if_drv_flags & IFF_DRV_RUNNING)))
+ senderr(ENETDOWN);
+
+ hlen = ETHER_HDR_LEN;
+ switch (dst->sa_family) {
+#ifdef INET
+ case AF_INET:
+ if (lle != NULL && (lle->la_flags & LLE_VALID))
+ memcpy(edst, &lle->ll_addr.mac16, sizeof(edst));
+ else
+ error = arpresolve(ifp, rt0, m, dst, edst, &lle);
+ if (error)
+ return (error == EWOULDBLOCK ? 0 : error);
+ type = htons(ETHERTYPE_IP);
+ break;
+ case AF_ARP:
+ {
+ struct arphdr *ah;
+ ah = mtod(m, struct arphdr *);
+ ah->ar_hrd = htons(ARPHRD_ETHER);
+
+ loop_copy = 0; /* if this is for us, don't do it */
+
+ switch(ntohs(ah->ar_op)) {
+ case ARPOP_REVREQUEST:
+ case ARPOP_REVREPLY:
+ type = htons(ETHERTYPE_REVARP);
+ break;
+ case ARPOP_REQUEST:
+ case ARPOP_REPLY:
+ default:
+ type = htons(ETHERTYPE_ARP);
+ break;
+ }
+
+ if (m->m_flags & M_BCAST)
+ bcopy(ifp->if_broadcastaddr, edst, ETHER_ADDR_LEN);
+ else
+ bcopy(ar_tha(ah), edst, ETHER_ADDR_LEN);
+
+ }
+ break;
+#endif
+#ifdef INET6
+ case AF_INET6:
+ if (lle != NULL && (lle->la_flags & LLE_VALID))
+ memcpy(edst, &lle->ll_addr.mac16, sizeof(edst));
+ else
+ error = nd6_storelladdr(ifp, m, dst, (u_char *)edst, &lle);
+ if (error)
+ return error;
+ type = htons(ETHERTYPE_IPV6);
+ break;
+#endif
+#ifdef IPX
+ case AF_IPX:
+ if (ef_outputp) {
+ error = ef_outputp(ifp, &m, dst, &type, &hlen);
+ if (error)
+ goto bad;
+ } else
+ type = htons(ETHERTYPE_IPX);
+ bcopy((caddr_t)&(((struct sockaddr_ipx *)dst)->sipx_addr.x_host),
+ (caddr_t)edst, sizeof (edst));
+ break;
+#endif
+#ifdef NETATALK
+ case AF_APPLETALK:
+ {
+ struct at_ifaddr *aa;
+
+ if ((aa = at_ifawithnet((struct sockaddr_at *)dst)) == NULL)
+ senderr(EHOSTUNREACH); /* XXX */
+ if (!aarpresolve(ifp, m, (struct sockaddr_at *)dst, edst)) {
+ ifa_free(&aa->aa_ifa);
+ return (0);
+ }
+ /*
+ * In the phase 2 case, need to prepend an mbuf for the llc header.
+ */
+ if ( aa->aa_flags & AFA_PHASE2 ) {
+ struct llc llc;
+
+ ifa_free(&aa->aa_ifa);
+ M_PREPEND(m, LLC_SNAPFRAMELEN, M_DONTWAIT);
+ if (m == NULL)
+ senderr(ENOBUFS);
+ llc.llc_dsap = llc.llc_ssap = LLC_SNAP_LSAP;
+ llc.llc_control = LLC_UI;
+ bcopy(at_org_code, llc.llc_snap_org_code, sizeof(at_org_code));
+ llc.llc_snap_ether_type = htons( ETHERTYPE_AT );
+ bcopy(&llc, mtod(m, caddr_t), LLC_SNAPFRAMELEN);
+ type = htons(m->m_pkthdr.len);
+ hlen = LLC_SNAPFRAMELEN + ETHER_HDR_LEN;
+ } else {
+ ifa_free(&aa->aa_ifa);
+ type = htons(ETHERTYPE_AT);
+ }
+ break;
+ }
+#endif /* NETATALK */
+
+ case pseudo_AF_HDRCMPLT:
+ hdrcmplt = 1;
+ eh = (struct ether_header *)dst->sa_data;
+ (void)memcpy(esrc, eh->ether_shost, sizeof (esrc));
+ /* FALLTHROUGH */
+
+ case AF_UNSPEC:
+ loop_copy = 0; /* if this is for us, don't do it */
+ eh = (struct ether_header *)dst->sa_data;
+ (void)memcpy(edst, eh->ether_dhost, sizeof (edst));
+ type = eh->ether_type;
+ break;
+
+ default:
+ if_printf(ifp, "can't handle af%d\n", dst->sa_family);
+ senderr(EAFNOSUPPORT);
+ }
+
+ if (lle != NULL && (lle->la_flags & LLE_IFADDR)) {
+ int csum_flags = 0;
+ if (m->m_pkthdr.csum_flags & CSUM_IP)
+ csum_flags |= (CSUM_IP_CHECKED|CSUM_IP_VALID);
+ if (m->m_pkthdr.csum_flags & CSUM_DELAY_DATA)
+ csum_flags |= (CSUM_DATA_VALID|CSUM_PSEUDO_HDR);
+ if (m->m_pkthdr.csum_flags & CSUM_SCTP)
+ csum_flags |= CSUM_SCTP_VALID;
+ m->m_pkthdr.csum_flags |= csum_flags;
+ m->m_pkthdr.csum_data = 0xffff;
+ return (if_simloop(ifp, m, dst->sa_family, 0));
+ }
+
+ /*
+ * Add local net header. If no space in first mbuf,
+ * allocate another.
+ */
+ M_PREPEND(m, ETHER_HDR_LEN, M_DONTWAIT);
+ if (m == NULL)
+ senderr(ENOBUFS);
+ eh = mtod(m, struct ether_header *);
+ (void)memcpy(&eh->ether_type, &type,
+ sizeof(eh->ether_type));
+ (void)memcpy(eh->ether_dhost, edst, sizeof (edst));
+ if (hdrcmplt)
+ (void)memcpy(eh->ether_shost, esrc,
+ sizeof(eh->ether_shost));
+ else
+ (void)memcpy(eh->ether_shost, IF_LLADDR(ifp),
+ sizeof(eh->ether_shost));
+
+ /*
+ * If a simplex interface, and the packet is being sent to our
+ * Ethernet address or a broadcast address, loopback a copy.
+ * XXX To make a simplex device behave exactly like a duplex
+ * device, we should copy in the case of sending to our own
+ * ethernet address (thus letting the original actually appear
+ * on the wire). However, we don't do that here for security
+ * reasons and compatibility with the original behavior.
+ */
+ if ((ifp->if_flags & IFF_SIMPLEX) && loop_copy &&
+ ((t = pf_find_mtag(m)) == NULL || !t->routed)) {
+ int csum_flags = 0;
+
+ if (m->m_pkthdr.csum_flags & CSUM_IP)
+ csum_flags |= (CSUM_IP_CHECKED|CSUM_IP_VALID);
+ if (m->m_pkthdr.csum_flags & CSUM_DELAY_DATA)
+ csum_flags |= (CSUM_DATA_VALID|CSUM_PSEUDO_HDR);
+ if (m->m_pkthdr.csum_flags & CSUM_SCTP)
+ csum_flags |= CSUM_SCTP_VALID;
+
+ if (m->m_flags & M_BCAST) {
+ struct mbuf *n;
+
+ /*
+ * Because if_simloop() modifies the packet, we need a
+ * writable copy through m_dup() instead of a readonly
+ * one as m_copy[m] would give us. The alternative would
+ * be to modify if_simloop() to handle the readonly mbuf,
+ * but performancewise it is mostly equivalent (trading
+ * extra data copying vs. extra locking).
+ *
+ * XXX This is a local workaround. A number of less
+ * often used kernel parts suffer from the same bug.
+ * See PR kern/105943 for a proposed general solution.
+ */
+ if ((n = m_dup(m, M_DONTWAIT)) != NULL) {
+ n->m_pkthdr.csum_flags |= csum_flags;
+ if (csum_flags & CSUM_DATA_VALID)
+ n->m_pkthdr.csum_data = 0xffff;
+ (void)if_simloop(ifp, n, dst->sa_family, hlen);
+ } else
+ ifp->if_iqdrops++;
+ } else if (bcmp(eh->ether_dhost, eh->ether_shost,
+ ETHER_ADDR_LEN) == 0) {
+ m->m_pkthdr.csum_flags |= csum_flags;
+ if (csum_flags & CSUM_DATA_VALID)
+ m->m_pkthdr.csum_data = 0xffff;
+ (void) if_simloop(ifp, m, dst->sa_family, hlen);
+ return (0); /* XXX */
+ }
+ }
+
+ /*
+ * Bridges require special output handling.
+ */
+ if (ifp->if_bridge) {
+ BRIDGE_OUTPUT(ifp, m, error);
+ return (error);
+ }
+
+#if defined(INET) || defined(INET6)
+ if (ifp->if_carp &&
+ (error = (*carp_output_p)(ifp, m, dst, NULL)))
+ goto bad;
+#endif
+
+ /* Handle ng_ether(4) processing, if any */
+ if (IFP2AC(ifp)->ac_netgraph != NULL) {
+ KASSERT(ng_ether_output_p != NULL,
+ ("ng_ether_output_p is NULL"));
+ if ((error = (*ng_ether_output_p)(ifp, &m)) != 0) {
+bad: if (m != NULL)
+ m_freem(m);
+ return (error);
+ }
+ if (m == NULL)
+ return (0);
+ }
+
+ /* Continue with link-layer output */
+ return ether_output_frame(ifp, m);
+}
+
+/*
+ * Ethernet link layer output routine to send a raw frame to the device.
+ *
+ * This assumes that the 14 byte Ethernet header is present and contiguous
+ * in the first mbuf (if BRIDGE'ing).
+ */
+int
+ether_output_frame(struct ifnet *ifp, struct mbuf *m)
+{
+#if defined(INET) || defined(INET6)
+
+ if (V_ip_fw_chk_ptr && V_ether_ipfw != 0) {
+ if (ether_ipfw_chk(&m, ifp, 0) == 0) {
+ if (m) {
+ m_freem(m);
+ return EACCES; /* pkt dropped */
+ } else
+ return 0; /* consumed e.g. in a pipe */
+ }
+ }
+#endif
+
+ /*
+ * Queue message on interface, update output statistics if
+ * successful, and start output if interface not yet active.
+ */
+ return ((ifp->if_transmit)(ifp, m));
+}
+
+#if defined(INET) || defined(INET6)
+/*
+ * ipfw processing for ethernet packets (in and out).
+ * The second parameter is NULL from ether_demux, and ifp from
+ * ether_output_frame.
+ */
+int
+ether_ipfw_chk(struct mbuf **m0, struct ifnet *dst, int shared)
+{
+ struct ether_header *eh;
+ struct ether_header save_eh;
+ struct mbuf *m;
+ int i;
+ struct ip_fw_args args;
+ struct m_tag *mtag;
+
+ /* fetch start point from rule, if any */
+ mtag = m_tag_locate(*m0, MTAG_IPFW_RULE, 0, NULL);
+ if (mtag == NULL) {
+ args.rule.slot = 0;
+ } else {
+ /* dummynet packet, already partially processed */
+ struct ipfw_rule_ref *r;
+
+ /* XXX can we free it after use ? */
+ mtag->m_tag_id = PACKET_TAG_NONE;
+ r = (struct ipfw_rule_ref *)(mtag + 1);
+ if (r->info & IPFW_ONEPASS)
+ return (1);
+ args.rule = *r;
+ }
+
+ /*
+ * I need some amt of data to be contiguous, and in case others need
+ * the packet (shared==1) also better be in the first mbuf.
+ */
+ m = *m0;
+ i = min( m->m_pkthdr.len, max_protohdr);
+ if ( shared || m->m_len < i) {
+ m = m_pullup(m, i);
+ if (m == NULL) {
+ *m0 = m;
+ return 0;
+ }
+ }
+ eh = mtod(m, struct ether_header *);
+ save_eh = *eh; /* save copy for restore below */
+ m_adj(m, ETHER_HDR_LEN); /* strip ethernet header */
+
+ args.m = m; /* the packet we are looking at */
+ args.oif = dst; /* destination, if any */
+ args.next_hop = NULL; /* we do not support forward yet */
+ args.eh = &save_eh; /* MAC header for bridged/MAC packets */
+ args.inp = NULL; /* used by ipfw uid/gid/jail rules */
+ i = V_ip_fw_chk_ptr(&args);
+ m = args.m;
+ if (m != NULL) {
+ /*
+ * Restore Ethernet header, as needed, in case the
+ * mbuf chain was replaced by ipfw.
+ */
+ M_PREPEND(m, ETHER_HDR_LEN, M_DONTWAIT);
+ if (m == NULL) {
+ *m0 = m;
+ return 0;
+ }
+ if (eh != mtod(m, struct ether_header *))
+ bcopy(&save_eh, mtod(m, struct ether_header *),
+ ETHER_HDR_LEN);
+ }
+ *m0 = m;
+
+ if (i == IP_FW_DENY) /* drop */
+ return 0;
+
+ KASSERT(m != NULL, ("ether_ipfw_chk: m is NULL"));
+
+ if (i == IP_FW_PASS) /* a PASS rule. */
+ return 1;
+
+ if (ip_dn_io_ptr && (i == IP_FW_DUMMYNET)) {
+ int dir;
+ /*
+ * Pass the pkt to dummynet, which consumes it.
+ * If shared, make a copy and keep the original.
+ */
+ if (shared) {
+ m = m_copypacket(m, M_DONTWAIT);
+ if (m == NULL)
+ return 0;
+ } else {
+ /*
+ * Pass the original to dummynet and
+ * nothing back to the caller
+ */
+ *m0 = NULL ;
+ }
+ dir = PROTO_LAYER2 | (dst ? DIR_OUT : DIR_IN);
+ ip_dn_io_ptr(&m, dir, &args);
+ return 0;
+ }
+ /*
+ * XXX at some point add support for divert/forward actions.
+ * If none of the above matches, we have to drop the pkt.
+ */
+ return 0;
+}
+#endif
+
+/*
+ * Process a received Ethernet packet; the packet is in the
+ * mbuf chain m with the ethernet header at the front.
+ */
+static void
+ether_input(struct ifnet *ifp, struct mbuf *m)
+{
+ struct ether_header *eh;
+ u_short etype;
+
+ if ((ifp->if_flags & IFF_UP) == 0) {
+ m_freem(m);
+ return;
+ }
+#ifdef DIAGNOSTIC
+ if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
+ if_printf(ifp, "discard frame at !IFF_DRV_RUNNING\n");
+ m_freem(m);
+ return;
+ }
+#endif
+ /*
+ * Do consistency checks to verify assumptions
+ * made by code past this point.
+ */
+ if ((m->m_flags & M_PKTHDR) == 0) {
+ if_printf(ifp, "discard frame w/o packet header\n");
+ ifp->if_ierrors++;
+ m_freem(m);
+ return;
+ }
+ if (m->m_len < ETHER_HDR_LEN) {
+ /* XXX maybe should pullup? */
+ if_printf(ifp, "discard frame w/o leading ethernet "
+ "header (len %u pkt len %u)\n",
+ m->m_len, m->m_pkthdr.len);
+ ifp->if_ierrors++;
+ m_freem(m);
+ return;
+ }
+ eh = mtod(m, struct ether_header *);
+ etype = ntohs(eh->ether_type);
+ if (m->m_pkthdr.rcvif == NULL) {
+ if_printf(ifp, "discard frame w/o interface pointer\n");
+ ifp->if_ierrors++;
+ m_freem(m);
+ return;
+ }
+#ifdef DIAGNOSTIC
+ if (m->m_pkthdr.rcvif != ifp) {
+ if_printf(ifp, "Warning, frame marked as received on %s\n",
+ m->m_pkthdr.rcvif->if_xname);
+ }
+#endif
+
+ CURVNET_SET_QUIET(ifp->if_vnet);
+
+ if (ETHER_IS_MULTICAST(eh->ether_dhost)) {
+ if (ETHER_IS_BROADCAST(eh->ether_dhost))
+ m->m_flags |= M_BCAST;
+ else
+ m->m_flags |= M_MCAST;
+ ifp->if_imcasts++;
+ }
+
+#ifdef MAC
+ /*
+ * Tag the mbuf with an appropriate MAC label before any other
+ * consumers can get to it.
+ */
+ mac_ifnet_create_mbuf(ifp, m);
+#endif
+
+ /*
+ * Give bpf a chance at the packet.
+ */
+ ETHER_BPF_MTAP(ifp, m);
+
+ /*
+ * If the CRC is still on the packet, trim it off. We do this once
+ * and once only in case we are re-entered. Nothing else on the
+ * Ethernet receive path expects to see the FCS.
+ */
+ if (m->m_flags & M_HASFCS) {
+ m_adj(m, -ETHER_CRC_LEN);
+ m->m_flags &= ~M_HASFCS;
+ }
+
+ ifp->if_ibytes += m->m_pkthdr.len;
+
+ /* Allow monitor mode to claim this frame, after stats are updated. */
+ if (ifp->if_flags & IFF_MONITOR) {
+ m_freem(m);
+ CURVNET_RESTORE();
+ return;
+ }
+
+ /* Handle input from a lagg(4) port */
+ if (ifp->if_type == IFT_IEEE8023ADLAG) {
+ KASSERT(lagg_input_p != NULL,
+ ("%s: if_lagg not loaded!", __func__));
+ m = (*lagg_input_p)(ifp, m);
+ if (m != NULL)
+ ifp = m->m_pkthdr.rcvif;
+ else
+ return;
+ }
+
+ /*
+ * If the hardware did not process an 802.1Q tag, do this now,
+ * to allow 802.1P priority frames to be passed to the main input
+ * path correctly.
+ * TODO: Deal with Q-in-Q frames, but not arbitrary nesting levels.
+ */
+ if ((m->m_flags & M_VLANTAG) == 0 && etype == ETHERTYPE_VLAN) {
+ struct ether_vlan_header *evl;
+
+ if (m->m_len < sizeof(*evl) &&
+ (m = m_pullup(m, sizeof(*evl))) == NULL) {
+#ifdef DIAGNOSTIC
+ if_printf(ifp, "cannot pullup VLAN header\n");
+#endif
+ ifp->if_ierrors++;
+ m_freem(m);
+ return;
+ }
+
+ evl = mtod(m, struct ether_vlan_header *);
+ m->m_pkthdr.ether_vtag = ntohs(evl->evl_tag);
+ m->m_flags |= M_VLANTAG;
+
+ bcopy((char *)evl, (char *)evl + ETHER_VLAN_ENCAP_LEN,
+ ETHER_HDR_LEN - ETHER_TYPE_LEN);
+ m_adj(m, ETHER_VLAN_ENCAP_LEN);
+ }
+
+ /* Allow ng_ether(4) to claim this frame. */
+ if (IFP2AC(ifp)->ac_netgraph != NULL) {
+ KASSERT(ng_ether_input_p != NULL,
+ ("%s: ng_ether_input_p is NULL", __func__));
+ m->m_flags &= ~M_PROMISC;
+ (*ng_ether_input_p)(ifp, &m);
+ if (m == NULL) {
+ CURVNET_RESTORE();
+ return;
+ }
+ }
+
+ /*
+ * Allow if_bridge(4) to claim this frame.
+ * The BRIDGE_INPUT() macro will update ifp if the bridge changed it
+ * and the frame should be delivered locally.
+ */
+ if (ifp->if_bridge != NULL) {
+ m->m_flags &= ~M_PROMISC;
+ BRIDGE_INPUT(ifp, m);
+ if (m == NULL) {
+ CURVNET_RESTORE();
+ return;
+ }
+ }
+
+#if defined(INET) || defined(INET6)
+ /*
+ * Clear M_PROMISC on frame so that carp(4) will see it when the
+ * mbuf flows up to Layer 3.
+ * FreeBSD's implementation of carp(4) uses the inprotosw
+ * to dispatch IPPROTO_CARP. carp(4) also allocates its own
+ * Ethernet addresses of the form 00:00:5e:00:01:xx, which
+ * is outside the scope of the M_PROMISC test below.
+ * TODO: Maintain a hash table of ethernet addresses other than
+ * ether_dhost which may be active on this ifp.
+ */
+ if (ifp->if_carp && (*carp_forus_p)(ifp, eh->ether_dhost)) {
+ m->m_flags &= ~M_PROMISC;
+ } else
+#endif
+ {
+ /*
+ * If the frame received was not for our MAC address, set the
+ * M_PROMISC flag on the mbuf chain. The frame may need to
+ * be seen by the rest of the Ethernet input path in case of
+ * re-entry (e.g. bridge, vlan, netgraph) but should not be
+ * seen by upper protocol layers.
+ */
+ if (!ETHER_IS_MULTICAST(eh->ether_dhost) &&
+ bcmp(IF_LLADDR(ifp), eh->ether_dhost, ETHER_ADDR_LEN) != 0)
+ m->m_flags |= M_PROMISC;
+ }
+
+ /* First chunk of an mbuf contains good entropy */
+ if (harvest.ethernet)
+ random_harvest(m, 16, 3, 0, RANDOM_NET);
+
+ ether_demux(ifp, m);
+ CURVNET_RESTORE();
+}
+
+/*
+ * Upper layer processing for a received Ethernet packet.
+ */
+void
+ether_demux(struct ifnet *ifp, struct mbuf *m)
+{
+ struct ether_header *eh;
+ int isr;
+ u_short ether_type;
+#if defined(NETATALK)
+ struct llc *l;
+#endif
+
+ KASSERT(ifp != NULL, ("%s: NULL interface pointer", __func__));
+
+#if defined(INET) || defined(INET6)
+ /*
+ * Allow dummynet and/or ipfw to claim the frame.
+ * Do not do this for PROMISC frames in case we are re-entered.
+ */
+ if (V_ip_fw_chk_ptr && V_ether_ipfw != 0 && !(m->m_flags & M_PROMISC)) {
+ if (ether_ipfw_chk(&m, NULL, 0) == 0) {
+ if (m)
+ m_freem(m); /* dropped; free mbuf chain */
+ return; /* consumed */
+ }
+ }
+#endif
+ eh = mtod(m, struct ether_header *);
+ ether_type = ntohs(eh->ether_type);
+
+ /*
+ * If this frame has a VLAN tag other than 0, call vlan_input()
+ * if its module is loaded. Otherwise, drop.
+ */
+ if ((m->m_flags & M_VLANTAG) &&
+ EVL_VLANOFTAG(m->m_pkthdr.ether_vtag) != 0) {
+ if (ifp->if_vlantrunk == NULL) {
+ ifp->if_noproto++;
+ m_freem(m);
+ return;
+ }
+ KASSERT(vlan_input_p != NULL,("%s: VLAN not loaded!",
+ __func__));
+ /* Clear before possibly re-entering ether_input(). */
+ m->m_flags &= ~M_PROMISC;
+ (*vlan_input_p)(ifp, m);
+ return;
+ }
+
+ /*
+ * Pass promiscuously received frames to the upper layer if the user
+ * requested this by setting IFF_PPROMISC. Otherwise, drop them.
+ */
+ if ((ifp->if_flags & IFF_PPROMISC) == 0 && (m->m_flags & M_PROMISC)) {
+ m_freem(m);
+ return;
+ }
+
+ /*
+ * Reset layer specific mbuf flags to avoid confusing upper layers.
+ * Strip off Ethernet header.
+ */
+ m->m_flags &= ~M_VLANTAG;
+ m->m_flags &= ~(M_PROTOFLAGS);
+ m_adj(m, ETHER_HDR_LEN);
+
+ /*
+ * Dispatch frame to upper layer.
+ */
+ switch (ether_type) {
+#ifdef INET
+ case ETHERTYPE_IP:
+ if ((m = ip_fastforward(m)) == NULL)
+ return;
+ isr = NETISR_IP;
+ break;
+
+ case ETHERTYPE_ARP:
+ if (ifp->if_flags & IFF_NOARP) {
+ /* Discard packet if ARP is disabled on interface */
+ m_freem(m);
+ return;
+ }
+ isr = NETISR_ARP;
+ break;
+#endif
+#ifdef IPX
+ case ETHERTYPE_IPX:
+ if (ef_inputp && ef_inputp(ifp, eh, m) == 0)
+ return;
+ isr = NETISR_IPX;
+ break;
+#endif
+#ifdef INET6
+ case ETHERTYPE_IPV6:
+ isr = NETISR_IPV6;
+ break;
+#endif
+#ifdef NETATALK
+ case ETHERTYPE_AT:
+ isr = NETISR_ATALK1;
+ break;
+ case ETHERTYPE_AARP:
+ isr = NETISR_AARP;
+ break;
+#endif /* NETATALK */
+ default:
+#ifdef IPX
+ if (ef_inputp && ef_inputp(ifp, eh, m) == 0)
+ return;
+#endif /* IPX */
+#if defined(NETATALK)
+ if (ether_type > ETHERMTU)
+ goto discard;
+ l = mtod(m, struct llc *);
+ if (l->llc_dsap == LLC_SNAP_LSAP &&
+ l->llc_ssap == LLC_SNAP_LSAP &&
+ l->llc_control == LLC_UI) {
+ if (bcmp(&(l->llc_snap_org_code)[0], at_org_code,
+ sizeof(at_org_code)) == 0 &&
+ ntohs(l->llc_snap_ether_type) == ETHERTYPE_AT) {
+ m_adj(m, LLC_SNAPFRAMELEN);
+ isr = NETISR_ATALK2;
+ break;
+ }
+ if (bcmp(&(l->llc_snap_org_code)[0], aarp_org_code,
+ sizeof(aarp_org_code)) == 0 &&
+ ntohs(l->llc_snap_ether_type) == ETHERTYPE_AARP) {
+ m_adj(m, LLC_SNAPFRAMELEN);
+ isr = NETISR_AARP;
+ break;
+ }
+ }
+#endif /* NETATALK */
+ goto discard;
+ }
+ netisr_dispatch(isr, m);
+ return;
+
+discard:
+ /*
+ * Packet is to be discarded. If netgraph is present,
+ * hand the packet to it for last chance processing;
+ * otherwise dispose of it.
+ */
+ if (IFP2AC(ifp)->ac_netgraph != NULL) {
+ KASSERT(ng_ether_input_orphan_p != NULL,
+ ("ng_ether_input_orphan_p is NULL"));
+ /*
+ * Put back the ethernet header so netgraph has a
+ * consistent view of inbound packets.
+ */
+ M_PREPEND(m, ETHER_HDR_LEN, M_DONTWAIT);
+ (*ng_ether_input_orphan_p)(ifp, m);
+ return;
+ }
+ m_freem(m);
+}
+
+/*
+ * Convert Ethernet address to printable (loggable) representation.
+ * This routine is for compatibility; it's better to just use
+ *
+ * printf("%6D", <pointer to address>, ":");
+ *
+ * since there's no static buffer involved.
+ */
+char *
+ether_sprintf(const u_char *ap)
+{
+ static char etherbuf[18];
+ snprintf(etherbuf, sizeof (etherbuf), "%6D", ap, ":");
+ return (etherbuf);
+}
+
+/*
+ * Perform common duties while attaching to interface list
+ */
+void
+ether_ifattach(struct ifnet *ifp, const u_int8_t *lla)
+{
+ int i;
+ struct ifaddr *ifa;
+ struct sockaddr_dl *sdl;
+
+ ifp->if_addrlen = ETHER_ADDR_LEN;
+ ifp->if_hdrlen = ETHER_HDR_LEN;
+ if_attach(ifp);
+ ifp->if_mtu = ETHERMTU;
+ ifp->if_output = ether_output;
+ ifp->if_input = ether_input;
+ ifp->if_resolvemulti = ether_resolvemulti;
+#ifdef VIMAGE
+ ifp->if_reassign = ether_reassign;
+#endif
+ if (ifp->if_baudrate == 0)
+ ifp->if_baudrate = IF_Mbps(10); /* just a default */
+ ifp->if_broadcastaddr = etherbroadcastaddr;
+
+ ifa = ifp->if_addr;
+ KASSERT(ifa != NULL, ("%s: no lladdr!\n", __func__));
+ sdl = (struct sockaddr_dl *)ifa->ifa_addr;
+ sdl->sdl_type = IFT_ETHER;
+ sdl->sdl_alen = ifp->if_addrlen;
+ bcopy(lla, LLADDR(sdl), ifp->if_addrlen);
+
+ bpfattach(ifp, DLT_EN10MB, ETHER_HDR_LEN);
+ if (ng_ether_attach_p != NULL)
+ (*ng_ether_attach_p)(ifp);
+
+ /* Announce Ethernet MAC address if non-zero. */
+ for (i = 0; i < ifp->if_addrlen; i++)
+ if (lla[i] != 0)
+ break;
+ if (i != ifp->if_addrlen)
+ if_printf(ifp, "Ethernet address: %6D\n", lla, ":");
+}
+
+/*
+ * Perform common duties while detaching an Ethernet interface
+ */
+void
+ether_ifdetach(struct ifnet *ifp)
+{
+ if (IFP2AC(ifp)->ac_netgraph != NULL) {
+ KASSERT(ng_ether_detach_p != NULL,
+ ("ng_ether_detach_p is NULL"));
+ (*ng_ether_detach_p)(ifp);
+ }
+
+ bpfdetach(ifp);
+ if_detach(ifp);
+}
+
+#ifdef VIMAGE
+void
+ether_reassign(struct ifnet *ifp, struct vnet *new_vnet, char *unused __unused)
+{
+
+ if (IFP2AC(ifp)->ac_netgraph != NULL) {
+ KASSERT(ng_ether_detach_p != NULL,
+ ("ng_ether_detach_p is NULL"));
+ (*ng_ether_detach_p)(ifp);
+ }
+
+ if (ng_ether_attach_p != NULL) {
+ CURVNET_SET_QUIET(new_vnet);
+ (*ng_ether_attach_p)(ifp);
+ CURVNET_RESTORE();
+ }
+}
+#endif
+
+SYSCTL_DECL(_net_link);
+SYSCTL_NODE(_net_link, IFT_ETHER, ether, CTLFLAG_RW, 0, "Ethernet");
+#if defined(INET) || defined(INET6)
+SYSCTL_VNET_INT(_net_link_ether, OID_AUTO, ipfw, CTLFLAG_RW,
+ &VNET_NAME(ether_ipfw), 0, "Pass ether pkts through firewall");
+#endif
+
+#if 0
+/*
+ * This is for reference. We have a table-driven version
+ * of the little-endian crc32 generator, which is faster
+ * than the double-loop.
+ */
+uint32_t
+ether_crc32_le(const uint8_t *buf, size_t len)
+{
+ size_t i;
+ uint32_t crc;
+ int bit;
+ uint8_t data;
+
+ crc = 0xffffffff; /* initial value */
+
+ for (i = 0; i < len; i++) {
+ for (data = *buf++, bit = 0; bit < 8; bit++, data >>= 1) {
+ carry = (crc ^ data) & 1;
+ crc >>= 1;
+ if (carry)
+ crc = (crc ^ ETHER_CRC_POLY_LE);
+ }
+ }
+
+ return (crc);
+}
+#else
+uint32_t
+ether_crc32_le(const uint8_t *buf, size_t len)
+{
+ static const uint32_t crctab[] = {
+ 0x00000000, 0x1db71064, 0x3b6e20c8, 0x26d930ac,
+ 0x76dc4190, 0x6b6b51f4, 0x4db26158, 0x5005713c,
+ 0xedb88320, 0xf00f9344, 0xd6d6a3e8, 0xcb61b38c,
+ 0x9b64c2b0, 0x86d3d2d4, 0xa00ae278, 0xbdbdf21c
+ };
+ size_t i;
+ uint32_t crc;
+
+ crc = 0xffffffff; /* initial value */
+
+ for (i = 0; i < len; i++) {
+ crc ^= buf[i];
+ crc = (crc >> 4) ^ crctab[crc & 0xf];
+ crc = (crc >> 4) ^ crctab[crc & 0xf];
+ }
+
+ return (crc);
+}
+#endif
+
+uint32_t
+ether_crc32_be(const uint8_t *buf, size_t len)
+{
+ size_t i;
+ uint32_t crc, carry;
+ int bit;
+ uint8_t data;
+
+ crc = 0xffffffff; /* initial value */
+
+ for (i = 0; i < len; i++) {
+ for (data = *buf++, bit = 0; bit < 8; bit++, data >>= 1) {
+ carry = ((crc & 0x80000000) ? 1 : 0) ^ (data & 0x01);
+ crc <<= 1;
+ if (carry)
+ crc = (crc ^ ETHER_CRC_POLY_BE) | carry;
+ }
+ }
+
+ return (crc);
+}
+
+int
+ether_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
+{
+ struct ifaddr *ifa = (struct ifaddr *) data;
+ struct ifreq *ifr = (struct ifreq *) data;
+ int error = 0;
+
+ switch (command) {
+ case SIOCSIFADDR:
+ ifp->if_flags |= IFF_UP;
+
+ switch (ifa->ifa_addr->sa_family) {
+#ifdef INET
+ case AF_INET:
+ ifp->if_init(ifp->if_softc); /* before arpwhohas */
+ arp_ifinit(ifp, ifa);
+ break;
+#endif
+#ifdef IPX
+ /*
+ * XXX - This code is probably wrong
+ */
+ case AF_IPX:
+ {
+ struct ipx_addr *ina = &(IA_SIPX(ifa)->sipx_addr);
+
+ if (ipx_nullhost(*ina))
+ ina->x_host =
+ *(union ipx_host *)
+ IF_LLADDR(ifp);
+ else {
+ bcopy((caddr_t) ina->x_host.c_host,
+ (caddr_t) IF_LLADDR(ifp),
+ ETHER_ADDR_LEN);
+ }
+
+ /*
+ * Set new address
+ */
+ ifp->if_init(ifp->if_softc);
+ break;
+ }
+#endif
+ default:
+ ifp->if_init(ifp->if_softc);
+ break;
+ }
+ break;
+
+ case SIOCGIFADDR:
+ {
+ struct sockaddr *sa;
+
+ sa = (struct sockaddr *) & ifr->ifr_data;
+ bcopy(IF_LLADDR(ifp),
+ (caddr_t) sa->sa_data, ETHER_ADDR_LEN);
+ }
+ break;
+
+ case SIOCSIFMTU:
+ /*
+ * Set the interface MTU.
+ */
+ if (ifr->ifr_mtu > ETHERMTU) {
+ error = EINVAL;
+ } else {
+ ifp->if_mtu = ifr->ifr_mtu;
+ }
+ break;
+ default:
+ error = EINVAL; /* XXX netbsd has ENOTTY??? */
+ break;
+ }
+ return (error);
+}
+
+static int
+ether_resolvemulti(struct ifnet *ifp, struct sockaddr **llsa,
+ struct sockaddr *sa)
+{
+ struct sockaddr_dl *sdl;
+#ifdef INET
+ struct sockaddr_in *sin;
+#endif
+#ifdef INET6
+ struct sockaddr_in6 *sin6;
+#endif
+ u_char *e_addr;
+
+ switch(sa->sa_family) {
+ case AF_LINK:
+ /*
+ * No mapping needed. Just check that it's a valid MC address.
+ */
+ sdl = (struct sockaddr_dl *)sa;
+ e_addr = LLADDR(sdl);
+ if (!ETHER_IS_MULTICAST(e_addr))
+ return EADDRNOTAVAIL;
+ *llsa = 0;
+ return 0;
+
+#ifdef INET
+ case AF_INET:
+ sin = (struct sockaddr_in *)sa;
+ if (!IN_MULTICAST(ntohl(sin->sin_addr.s_addr)))
+ return EADDRNOTAVAIL;
+ sdl = malloc(sizeof *sdl, M_IFMADDR,
+ M_NOWAIT|M_ZERO);
+ if (sdl == NULL)
+ return ENOMEM;
+ sdl->sdl_len = sizeof *sdl;
+ sdl->sdl_family = AF_LINK;
+ sdl->sdl_index = ifp->if_index;
+ sdl->sdl_type = IFT_ETHER;
+ sdl->sdl_alen = ETHER_ADDR_LEN;
+ e_addr = LLADDR(sdl);
+ ETHER_MAP_IP_MULTICAST(&sin->sin_addr, e_addr);
+ *llsa = (struct sockaddr *)sdl;
+ return 0;
+#endif
+#ifdef INET6
+ case AF_INET6:
+ sin6 = (struct sockaddr_in6 *)sa;
+ if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) {
+ /*
+ * An IP6 address of 0 means listen to all
+ * of the Ethernet multicast address used for IP6.
+ * (This is used for multicast routers.)
+ */
+ ifp->if_flags |= IFF_ALLMULTI;
+ *llsa = 0;
+ return 0;
+ }
+ if (!IN6_IS_ADDR_MULTICAST(&sin6->sin6_addr))
+ return EADDRNOTAVAIL;
+ sdl = malloc(sizeof *sdl, M_IFMADDR,
+ M_NOWAIT|M_ZERO);
+ if (sdl == NULL)
+ return (ENOMEM);
+ sdl->sdl_len = sizeof *sdl;
+ sdl->sdl_family = AF_LINK;
+ sdl->sdl_index = ifp->if_index;
+ sdl->sdl_type = IFT_ETHER;
+ sdl->sdl_alen = ETHER_ADDR_LEN;
+ e_addr = LLADDR(sdl);
+ ETHER_MAP_IPV6_MULTICAST(&sin6->sin6_addr, e_addr);
+ *llsa = (struct sockaddr *)sdl;
+ return 0;
+#endif
+
+ default:
+ /*
+ * Well, the text isn't quite right, but it's the name
+ * that counts...
+ */
+ return EAFNOSUPPORT;
+ }
+}
+
+static void*
+ether_alloc(u_char type, struct ifnet *ifp)
+{
+ struct arpcom *ac;
+
+ ac = malloc(sizeof(struct arpcom), M_ARPCOM, M_WAITOK | M_ZERO);
+ ac->ac_ifp = ifp;
+
+ return (ac);
+}
+
+static void
+ether_free(void *com, u_char type)
+{
+
+ free(com, M_ARPCOM);
+}
+
+static int
+ether_modevent(module_t mod, int type, void *data)
+{
+
+ switch (type) {
+ case MOD_LOAD:
+ if_register_com_alloc(IFT_ETHER, ether_alloc, ether_free);
+ break;
+ case MOD_UNLOAD:
+ if_deregister_com_alloc(IFT_ETHER);
+ break;
+ default:
+ return EOPNOTSUPP;
+ }
+
+ return (0);
+}
+
+static moduledata_t ether_mod = {
+ "ether",
+ ether_modevent,
+ 0
+};
+
+void
+ether_vlan_mtap(struct bpf_if *bp, struct mbuf *m, void *data, u_int dlen)
+{
+ struct ether_vlan_header vlan;
+ struct mbuf mv, mb;
+
+ KASSERT((m->m_flags & M_VLANTAG) != 0,
+ ("%s: vlan information not present", __func__));
+ KASSERT(m->m_len >= sizeof(struct ether_header),
+ ("%s: mbuf not large enough for header", __func__));
+ bcopy(mtod(m, char *), &vlan, sizeof(struct ether_header));
+ vlan.evl_proto = vlan.evl_encap_proto;
+ vlan.evl_encap_proto = htons(ETHERTYPE_VLAN);
+ vlan.evl_tag = htons(m->m_pkthdr.ether_vtag);
+ m->m_len -= sizeof(struct ether_header);
+ m->m_data += sizeof(struct ether_header);
+ /*
+ * If a data link has been supplied by the caller, then we will need to
+ * re-create a stack allocated mbuf chain with the following structure:
+ *
+ * (1) mbuf #1 will contain the supplied data link
+ * (2) mbuf #2 will contain the vlan header
+ * (3) mbuf #3 will contain the original mbuf's packet data
+ *
+ * Otherwise, submit the packet and vlan header via bpf_mtap2().
+ */
+ if (data != NULL) {
+ mv.m_next = m;
+ mv.m_data = (caddr_t)&vlan;
+ mv.m_len = sizeof(vlan);
+ mb.m_next = &mv;
+ mb.m_data = data;
+ mb.m_len = dlen;
+ bpf_mtap(bp, &mb);
+ } else
+ bpf_mtap2(bp, &vlan, sizeof(vlan), m);
+ m->m_len += sizeof(struct ether_header);
+ m->m_data -= sizeof(struct ether_header);
+}
+
+struct mbuf *
+ether_vlanencap(struct mbuf *m, uint16_t tag)
+{
+ struct ether_vlan_header *evl;
+
+ M_PREPEND(m, ETHER_VLAN_ENCAP_LEN, M_DONTWAIT);
+ if (m == NULL)
+ return (NULL);
+ /* M_PREPEND takes care of m_len, m_pkthdr.len for us */
+
+ if (m->m_len < sizeof(*evl)) {
+ m = m_pullup(m, sizeof(*evl));
+ if (m == NULL)
+ return (NULL);
+ }
+
+ /*
+ * Transform the Ethernet header into an Ethernet header
+ * with 802.1Q encapsulation.
+ */
+ evl = mtod(m, struct ether_vlan_header *);
+ bcopy((char *)evl + ETHER_VLAN_ENCAP_LEN,
+ (char *)evl, ETHER_HDR_LEN - ETHER_TYPE_LEN);
+ evl->evl_encap_proto = htons(ETHERTYPE_VLAN);
+ evl->evl_tag = htons(tag);
+ return (m);
+}
+
+DECLARE_MODULE(ether, ether_mod, SI_SUB_INIT_IF, SI_ORDER_ANY);
+MODULE_VERSION(ether, 1);
diff --git a/rtems/freebsd/net/if_faith.c b/rtems/freebsd/net/if_faith.c
new file mode 100644
index 00000000..7ccfb4d3
--- /dev/null
+++ b/rtems/freebsd/net/if_faith.c
@@ -0,0 +1,353 @@
+#include <rtems/freebsd/machine/rtems-bsd-config.h>
+
+/* $KAME: if_faith.c,v 1.23 2001/12/17 13:55:29 sumikawa Exp $ */
+
+/*-
+ * Copyright (c) 1982, 1986, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+/*
+ * derived from
+ * @(#)if_loop.c 8.1 (Berkeley) 6/10/93
+ * Id: if_loop.c,v 1.22 1996/06/19 16:24:10 wollman Exp
+ */
+
+/*
+ * Loopback interface driver for protocol testing and timing.
+ */
+#include <rtems/freebsd/local/opt_inet.h>
+#include <rtems/freebsd/local/opt_inet6.h>
+
+#include <rtems/freebsd/sys/param.h>
+#include <rtems/freebsd/sys/systm.h>
+#include <rtems/freebsd/sys/kernel.h>
+#include <rtems/freebsd/sys/mbuf.h>
+#include <rtems/freebsd/sys/module.h>
+#include <rtems/freebsd/sys/socket.h>
+#include <rtems/freebsd/sys/errno.h>
+#include <rtems/freebsd/sys/sockio.h>
+#include <rtems/freebsd/sys/time.h>
+#include <rtems/freebsd/sys/queue.h>
+#include <rtems/freebsd/sys/types.h>
+#include <rtems/freebsd/sys/malloc.h>
+
+#include <rtems/freebsd/net/if.h>
+#include <rtems/freebsd/net/if_clone.h>
+#include <rtems/freebsd/net/if_types.h>
+#include <rtems/freebsd/net/netisr.h>
+#include <rtems/freebsd/net/route.h>
+#include <rtems/freebsd/net/bpf.h>
+#include <rtems/freebsd/net/vnet.h>
+
+#ifdef INET
+#include <rtems/freebsd/netinet/in.h>
+#include <rtems/freebsd/netinet/in_systm.h>
+#include <rtems/freebsd/netinet/in_var.h>
+#include <rtems/freebsd/netinet/ip.h>
+#endif
+
+#ifdef INET6
+#ifndef INET
+#include <rtems/freebsd/netinet/in.h>
+#endif
+#include <rtems/freebsd/netinet6/in6_var.h>
+#include <rtems/freebsd/netinet/ip6.h>
+#include <rtems/freebsd/netinet6/ip6_var.h>
+#endif
+
+#define FAITHNAME "faith"
+
+struct faith_softc {
+ struct ifnet *sc_ifp;
+};
+
+static int faithioctl(struct ifnet *, u_long, caddr_t);
+int faithoutput(struct ifnet *, struct mbuf *, struct sockaddr *,
+ struct route *);
+static void faithrtrequest(int, struct rtentry *, struct rt_addrinfo *);
+#ifdef INET6
+static int faithprefix(struct in6_addr *);
+#endif
+
+static int faithmodevent(module_t, int, void *);
+
+static MALLOC_DEFINE(M_FAITH, FAITHNAME, "Firewall Assisted Tunnel Interface");
+
+static int faith_clone_create(struct if_clone *, int, caddr_t);
+static void faith_clone_destroy(struct ifnet *);
+
+IFC_SIMPLE_DECLARE(faith, 0);
+
+#define FAITHMTU 1500
+
+static int
+faithmodevent(mod, type, data)
+ module_t mod;
+ int type;
+ void *data;
+{
+
+ switch (type) {
+ case MOD_LOAD:
+ if_clone_attach(&faith_cloner);
+
+#ifdef INET6
+ faithprefix_p = faithprefix;
+#endif
+
+ break;
+ case MOD_UNLOAD:
+#ifdef INET6
+ faithprefix_p = NULL;
+#endif
+
+ if_clone_detach(&faith_cloner);
+ break;
+ default:
+ return EOPNOTSUPP;
+ }
+ return 0;
+}
+
+static moduledata_t faith_mod = {
+ "if_faith",
+ faithmodevent,
+ 0
+};
+
+DECLARE_MODULE(if_faith, faith_mod, SI_SUB_PSEUDO, SI_ORDER_ANY);
+MODULE_VERSION(if_faith, 1);
+
+static int
+faith_clone_create(ifc, unit, params)
+ struct if_clone *ifc;
+ int unit;
+ caddr_t params;
+{
+ struct ifnet *ifp;
+ struct faith_softc *sc;
+
+ sc = malloc(sizeof(struct faith_softc), M_FAITH, M_WAITOK | M_ZERO);
+ ifp = sc->sc_ifp = if_alloc(IFT_FAITH);
+ if (ifp == NULL) {
+ free(sc, M_FAITH);
+ return (ENOSPC);
+ }
+
+ ifp->if_softc = sc;
+ if_initname(sc->sc_ifp, ifc->ifc_name, unit);
+
+ ifp->if_mtu = FAITHMTU;
+ /* Change to BROADCAST experimentaly to announce its prefix. */
+ ifp->if_flags = /* IFF_LOOPBACK */ IFF_BROADCAST | IFF_MULTICAST;
+ ifp->if_ioctl = faithioctl;
+ ifp->if_output = faithoutput;
+ ifp->if_hdrlen = 0;
+ ifp->if_addrlen = 0;
+ ifp->if_snd.ifq_maxlen = ifqmaxlen;
+ if_attach(ifp);
+ bpfattach(ifp, DLT_NULL, sizeof(u_int32_t));
+ return (0);
+}
+
+static void
+faith_clone_destroy(ifp)
+ struct ifnet *ifp;
+{
+ struct faith_softc *sc = ifp->if_softc;
+
+ bpfdetach(ifp);
+ if_detach(ifp);
+ if_free(ifp);
+ free(sc, M_FAITH);
+}
+
+int
+faithoutput(ifp, m, dst, ro)
+ struct ifnet *ifp;
+ struct mbuf *m;
+ struct sockaddr *dst;
+ struct route *ro;
+{
+ int isr;
+ u_int32_t af;
+ struct rtentry *rt = NULL;
+
+ M_ASSERTPKTHDR(m);
+
+ if (ro != NULL)
+ rt = ro->ro_rt;
+ /* BPF writes need to be handled specially. */
+ if (dst->sa_family == AF_UNSPEC) {
+ bcopy(dst->sa_data, &af, sizeof(af));
+ dst->sa_family = af;
+ }
+
+ if (bpf_peers_present(ifp->if_bpf)) {
+ af = dst->sa_family;
+ bpf_mtap2(ifp->if_bpf, &af, sizeof(af), m);
+ }
+
+ if (rt && rt->rt_flags & (RTF_REJECT|RTF_BLACKHOLE)) {
+ m_freem(m);
+ return (rt->rt_flags & RTF_BLACKHOLE ? 0 :
+ rt->rt_flags & RTF_HOST ? EHOSTUNREACH : ENETUNREACH);
+ }
+ ifp->if_opackets++;
+ ifp->if_obytes += m->m_pkthdr.len;
+ switch (dst->sa_family) {
+#ifdef INET
+ case AF_INET:
+ isr = NETISR_IP;
+ break;
+#endif
+#ifdef INET6
+ case AF_INET6:
+ isr = NETISR_IPV6;
+ break;
+#endif
+ default:
+ m_freem(m);
+ return EAFNOSUPPORT;
+ }
+
+ /* XXX do we need more sanity checks? */
+
+ m->m_pkthdr.rcvif = ifp;
+ ifp->if_ipackets++;
+ ifp->if_ibytes += m->m_pkthdr.len;
+ netisr_dispatch(isr, m);
+ return (0);
+}
+
+/* ARGSUSED */
+static void
+faithrtrequest(cmd, rt, info)
+ int cmd;
+ struct rtentry *rt;
+ struct rt_addrinfo *info;
+{
+ RT_LOCK_ASSERT(rt);
+ rt->rt_rmx.rmx_mtu = rt->rt_ifp->if_mtu;
+}
+
+/*
+ * Process an ioctl request.
+ */
+/* ARGSUSED */
+static int
+faithioctl(ifp, cmd, data)
+ struct ifnet *ifp;
+ u_long cmd;
+ caddr_t data;
+{
+ struct ifaddr *ifa;
+ struct ifreq *ifr = (struct ifreq *)data;
+ int error = 0;
+
+ switch (cmd) {
+
+ case SIOCSIFADDR:
+ ifp->if_flags |= IFF_UP;
+ ifp->if_drv_flags |= IFF_DRV_RUNNING;
+ ifa = (struct ifaddr *)data;
+ ifa->ifa_rtrequest = faithrtrequest;
+ /*
+ * Everything else is done at a higher level.
+ */
+ break;
+
+ case SIOCADDMULTI:
+ case SIOCDELMULTI:
+ if (ifr == 0) {
+ error = EAFNOSUPPORT; /* XXX */
+ break;
+ }
+ switch (ifr->ifr_addr.sa_family) {
+#ifdef INET
+ case AF_INET:
+ break;
+#endif
+#ifdef INET6
+ case AF_INET6:
+ break;
+#endif
+
+ default:
+ error = EAFNOSUPPORT;
+ break;
+ }
+ break;
+
+#ifdef SIOCSIFMTU
+ case SIOCSIFMTU:
+ ifp->if_mtu = ifr->ifr_mtu;
+ break;
+#endif
+
+ case SIOCSIFFLAGS:
+ break;
+
+ default:
+ error = EINVAL;
+ }
+ return (error);
+}
+
+#ifdef INET6
+/*
+ * XXX could be slow
+ * XXX could be layer violation to call sys/net from sys/netinet6
+ */
+static int
+faithprefix(in6)
+ struct in6_addr *in6;
+{
+ struct rtentry *rt;
+ struct sockaddr_in6 sin6;
+ int ret;
+
+ if (V_ip6_keepfaith == 0)
+ return 0;
+
+ bzero(&sin6, sizeof(sin6));
+ sin6.sin6_family = AF_INET6;
+ sin6.sin6_len = sizeof(struct sockaddr_in6);
+ sin6.sin6_addr = *in6;
+ rt = rtalloc1((struct sockaddr *)&sin6, 0, 0UL);
+ if (rt && rt->rt_ifp && rt->rt_ifp->if_type == IFT_FAITH &&
+ (rt->rt_ifp->if_flags & IFF_UP) != 0)
+ ret = 1;
+ else
+ ret = 0;
+ if (rt)
+ RTFREE_LOCKED(rt);
+ return ret;
+}
+#endif
diff --git a/rtems/freebsd/net/if_fddisubr.c b/rtems/freebsd/net/if_fddisubr.c
new file mode 100644
index 00000000..f989ed98
--- /dev/null
+++ b/rtems/freebsd/net/if_fddisubr.c
@@ -0,0 +1,800 @@
+#include <rtems/freebsd/machine/rtems-bsd-config.h>
+
+/*-
+ * Copyright (c) 1995, 1996
+ * Matt Thomas <matt@3am-software.com>. All rights reserved.
+ * Copyright (c) 1982, 1989, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the University of
+ * California, Berkeley and its contributors.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * from: if_ethersubr.c,v 1.5 1994/12/13 22:31:45 wollman Exp
+ * $FreeBSD$
+ */
+
+#include <rtems/freebsd/local/opt_atalk.h>
+#include <rtems/freebsd/local/opt_inet.h>
+#include <rtems/freebsd/local/opt_inet6.h>
+#include <rtems/freebsd/local/opt_ipx.h>
+
+#include <rtems/freebsd/sys/param.h>
+#include <rtems/freebsd/sys/systm.h>
+#include <rtems/freebsd/sys/kernel.h>
+#include <rtems/freebsd/sys/malloc.h>
+#include <rtems/freebsd/sys/mbuf.h>
+#include <rtems/freebsd/sys/module.h>
+#include <rtems/freebsd/sys/socket.h>
+#include <rtems/freebsd/sys/sockio.h>
+
+#include <rtems/freebsd/net/if.h>
+#include <rtems/freebsd/net/if_dl.h>
+#include <rtems/freebsd/net/if_llc.h>
+#include <rtems/freebsd/net/if_types.h>
+#include <rtems/freebsd/net/if_llatbl.h>
+
+#include <rtems/freebsd/net/ethernet.h>
+#include <rtems/freebsd/net/netisr.h>
+#include <rtems/freebsd/net/route.h>
+#include <rtems/freebsd/net/bpf.h>
+#include <rtems/freebsd/net/fddi.h>
+
+#if defined(INET) || defined(INET6)
+#include <rtems/freebsd/netinet/in.h>
+#include <rtems/freebsd/netinet/in_var.h>
+#include <rtems/freebsd/netinet/if_ether.h>
+#endif
+#ifdef INET6
+#include <rtems/freebsd/netinet6/nd6.h>
+#endif
+
+#ifdef IPX
+#include <rtems/freebsd/netipx/ipx.h>
+#include <rtems/freebsd/netipx/ipx_if.h>
+#endif
+
+#ifdef DECNET
+#include <rtems/freebsd/netdnet/dn.h>
+#endif
+
+#ifdef NETATALK
+#include <rtems/freebsd/netatalk/at.h>
+#include <rtems/freebsd/netatalk/at_var.h>
+#include <rtems/freebsd/netatalk/at_extern.h>
+
+extern u_char at_org_code[ 3 ];
+extern u_char aarp_org_code[ 3 ];
+#endif /* NETATALK */
+
+#include <rtems/freebsd/security/mac/mac_framework.h>
+
+static const u_char fddibroadcastaddr[FDDI_ADDR_LEN] =
+ { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
+
+static int fddi_resolvemulti(struct ifnet *, struct sockaddr **,
+ struct sockaddr *);
+static int fddi_output(struct ifnet *, struct mbuf *, struct sockaddr *,
+ struct route *);
+static void fddi_input(struct ifnet *ifp, struct mbuf *m);
+
+#define senderr(e) do { error = (e); goto bad; } while (0)
+
+/*
+ * FDDI output routine.
+ * Encapsulate a packet of type family for the local net.
+ * Use trailer local net encapsulation if enough data in first
+ * packet leaves a multiple of 512 bytes of data in remainder.
+ * Assumes that ifp is actually pointer to arpcom structure.
+ */
+static int
+fddi_output(ifp, m, dst, ro)
+ struct ifnet *ifp;
+ struct mbuf *m;
+ struct sockaddr *dst;
+ struct route *ro;
+{
+ u_int16_t type;
+ int loop_copy = 0, error = 0, hdrcmplt = 0;
+ u_char esrc[FDDI_ADDR_LEN], edst[FDDI_ADDR_LEN];
+ struct fddi_header *fh;
+#if defined(INET) || defined(INET6)
+ struct llentry *lle;
+#endif
+
+#ifdef MAC
+ error = mac_ifnet_check_transmit(ifp, m);
+ if (error)
+ senderr(error);
+#endif
+
+ if (ifp->if_flags & IFF_MONITOR)
+ senderr(ENETDOWN);
+ if (!((ifp->if_flags & IFF_UP) &&
+ (ifp->if_drv_flags & IFF_DRV_RUNNING)))
+ senderr(ENETDOWN);
+ getmicrotime(&ifp->if_lastchange);
+
+ switch (dst->sa_family) {
+#ifdef INET
+ case AF_INET: {
+ struct rtentry *rt0 = NULL;
+
+ if (ro != NULL)
+ rt0 = ro->ro_rt;
+ error = arpresolve(ifp, rt0, m, dst, edst, &lle);
+ if (error)
+ return (error == EWOULDBLOCK ? 0 : error);
+ type = htons(ETHERTYPE_IP);
+ break;
+ }
+ case AF_ARP:
+ {
+ struct arphdr *ah;
+ ah = mtod(m, struct arphdr *);
+ ah->ar_hrd = htons(ARPHRD_ETHER);
+
+ loop_copy = -1; /* if this is for us, don't do it */
+
+ switch (ntohs(ah->ar_op)) {
+ case ARPOP_REVREQUEST:
+ case ARPOP_REVREPLY:
+ type = htons(ETHERTYPE_REVARP);
+ break;
+ case ARPOP_REQUEST:
+ case ARPOP_REPLY:
+ default:
+ type = htons(ETHERTYPE_ARP);
+ break;
+ }
+
+ if (m->m_flags & M_BCAST)
+ bcopy(ifp->if_broadcastaddr, edst, FDDI_ADDR_LEN);
+ else
+ bcopy(ar_tha(ah), edst, FDDI_ADDR_LEN);
+
+ }
+ break;
+#endif /* INET */
+#ifdef INET6
+ case AF_INET6:
+ error = nd6_storelladdr(ifp, m, dst, (u_char *)edst, &lle);
+ if (error)
+ return (error); /* Something bad happened */
+ type = htons(ETHERTYPE_IPV6);
+ break;
+#endif /* INET6 */
+#ifdef IPX
+ case AF_IPX:
+ type = htons(ETHERTYPE_IPX);
+ bcopy((caddr_t)&(((struct sockaddr_ipx *)dst)->sipx_addr.x_host),
+ (caddr_t)edst, FDDI_ADDR_LEN);
+ break;
+#endif /* IPX */
+#ifdef NETATALK
+ case AF_APPLETALK: {
+ struct at_ifaddr *aa;
+ if (!aarpresolve(ifp, m, (struct sockaddr_at *)dst, edst))
+ return (0);
+ /*
+ * ifaddr is the first thing in at_ifaddr
+ */
+ if ((aa = at_ifawithnet( (struct sockaddr_at *)dst)) == 0)
+ goto bad;
+
+ /*
+ * In the phase 2 case, we need to prepend an mbuf for the llc header.
+ * Since we must preserve the value of m, which is passed to us by
+ * value, we m_copy() the first mbuf, and use it for our llc header.
+ */
+ if (aa->aa_flags & AFA_PHASE2) {
+ struct llc llc;
+
+ M_PREPEND(m, LLC_SNAPFRAMELEN, M_WAIT);
+ llc.llc_dsap = llc.llc_ssap = LLC_SNAP_LSAP;
+ llc.llc_control = LLC_UI;
+ bcopy(at_org_code, llc.llc_snap.org_code, sizeof(at_org_code));
+ llc.llc_snap.ether_type = htons(ETHERTYPE_AT);
+ bcopy(&llc, mtod(m, caddr_t), LLC_SNAPFRAMELEN);
+ type = 0;
+ } else {
+ type = htons(ETHERTYPE_AT);
+ }
+ ifa_free(&aa->aa_ifa);
+ break;
+ }
+#endif /* NETATALK */
+
+ case pseudo_AF_HDRCMPLT:
+ {
+ struct ether_header *eh;
+ hdrcmplt = 1;
+ eh = (struct ether_header *)dst->sa_data;
+ bcopy((caddr_t)eh->ether_shost, (caddr_t)esrc, FDDI_ADDR_LEN);
+ /* FALLTHROUGH */
+ }
+
+ case AF_UNSPEC:
+ {
+ struct ether_header *eh;
+ loop_copy = -1;
+ eh = (struct ether_header *)dst->sa_data;
+ bcopy((caddr_t)eh->ether_dhost, (caddr_t)edst, FDDI_ADDR_LEN);
+ if (*edst & 1)
+ m->m_flags |= (M_BCAST|M_MCAST);
+ type = eh->ether_type;
+ break;
+ }
+
+ case AF_IMPLINK:
+ {
+ fh = mtod(m, struct fddi_header *);
+ error = EPROTONOSUPPORT;
+ switch (fh->fddi_fc & (FDDIFC_C|FDDIFC_L|FDDIFC_F)) {
+ case FDDIFC_LLC_ASYNC: {
+ /* legal priorities are 0 through 7 */
+ if ((fh->fddi_fc & FDDIFC_Z) > 7)
+ goto bad;
+ break;
+ }
+ case FDDIFC_LLC_SYNC: {
+ /* FDDIFC_Z bits reserved, must be zero */
+ if (fh->fddi_fc & FDDIFC_Z)
+ goto bad;
+ break;
+ }
+ case FDDIFC_SMT: {
+ /* FDDIFC_Z bits must be non zero */
+ if ((fh->fddi_fc & FDDIFC_Z) == 0)
+ goto bad;
+ break;
+ }
+ default: {
+ /* anything else is too dangerous */
+ goto bad;
+ }
+ }
+ error = 0;
+ if (fh->fddi_dhost[0] & 1)
+ m->m_flags |= (M_BCAST|M_MCAST);
+ goto queue_it;
+ }
+ default:
+ if_printf(ifp, "can't handle af%d\n", dst->sa_family);
+ senderr(EAFNOSUPPORT);
+ }
+
+ /*
+ * Add LLC header.
+ */
+ if (type != 0) {
+ struct llc *l;
+ M_PREPEND(m, LLC_SNAPFRAMELEN, M_DONTWAIT);
+ if (m == 0)
+ senderr(ENOBUFS);
+ l = mtod(m, struct llc *);
+ l->llc_control = LLC_UI;
+ l->llc_dsap = l->llc_ssap = LLC_SNAP_LSAP;
+ l->llc_snap.org_code[0] =
+ l->llc_snap.org_code[1] =
+ l->llc_snap.org_code[2] = 0;
+ l->llc_snap.ether_type = htons(type);
+ }
+
+ /*
+ * Add local net header. If no space in first mbuf,
+ * allocate another.
+ */
+ M_PREPEND(m, FDDI_HDR_LEN, M_DONTWAIT);
+ if (m == 0)
+ senderr(ENOBUFS);
+ fh = mtod(m, struct fddi_header *);
+ fh->fddi_fc = FDDIFC_LLC_ASYNC|FDDIFC_LLC_PRIO4;
+ bcopy((caddr_t)edst, (caddr_t)fh->fddi_dhost, FDDI_ADDR_LEN);
+ queue_it:
+ if (hdrcmplt)
+ bcopy((caddr_t)esrc, (caddr_t)fh->fddi_shost, FDDI_ADDR_LEN);
+ else
+ bcopy(IF_LLADDR(ifp), (caddr_t)fh->fddi_shost,
+ FDDI_ADDR_LEN);
+
+ /*
+ * If a simplex interface, and the packet is being sent to our
+ * Ethernet address or a broadcast address, loopback a copy.
+ * XXX To make a simplex device behave exactly like a duplex
+ * device, we should copy in the case of sending to our own
+ * ethernet address (thus letting the original actually appear
+ * on the wire). However, we don't do that here for security
+ * reasons and compatibility with the original behavior.
+ */
+ if ((ifp->if_flags & IFF_SIMPLEX) && (loop_copy != -1)) {
+ if ((m->m_flags & M_BCAST) || (loop_copy > 0)) {
+ struct mbuf *n;
+ n = m_copy(m, 0, (int)M_COPYALL);
+ (void) if_simloop(ifp, n, dst->sa_family,
+ FDDI_HDR_LEN);
+ } else if (bcmp(fh->fddi_dhost, fh->fddi_shost,
+ FDDI_ADDR_LEN) == 0) {
+ (void) if_simloop(ifp, m, dst->sa_family,
+ FDDI_HDR_LEN);
+ return (0); /* XXX */
+ }
+ }
+
+ error = (ifp->if_transmit)(ifp, m);
+ if (error)
+ ifp->if_oerrors++;
+
+ return (error);
+
+bad:
+ ifp->if_oerrors++;
+ if (m)
+ m_freem(m);
+ return (error);
+}
+
+/*
+ * Process a received FDDI packet.
+ */
+static void
+fddi_input(ifp, m)
+ struct ifnet *ifp;
+ struct mbuf *m;
+{
+ int isr;
+ struct llc *l;
+ struct fddi_header *fh;
+
+ /*
+ * Do consistency checks to verify assumptions
+ * made by code past this point.
+ */
+ if ((m->m_flags & M_PKTHDR) == 0) {
+ if_printf(ifp, "discard frame w/o packet header\n");
+ ifp->if_ierrors++;
+ m_freem(m);
+ return;
+ }
+ if (m->m_pkthdr.rcvif == NULL) {
+ if_printf(ifp, "discard frame w/o interface pointer\n");
+ ifp->if_ierrors++;
+ m_freem(m);
+ return;
+ }
+
+ m = m_pullup(m, FDDI_HDR_LEN);
+ if (m == NULL) {
+ ifp->if_ierrors++;
+ goto dropanyway;
+ }
+ fh = mtod(m, struct fddi_header *);
+ m->m_pkthdr.header = (void *)fh;
+
+ /*
+ * Discard packet if interface is not up.
+ */
+ if (!((ifp->if_flags & IFF_UP) &&
+ (ifp->if_drv_flags & IFF_DRV_RUNNING)))
+ goto dropanyway;
+
+ /*
+ * Give bpf a chance at the packet.
+ */
+ BPF_MTAP(ifp, m);
+
+ /*
+ * Interface marked for monitoring; discard packet.
+ */
+ if (ifp->if_flags & IFF_MONITOR) {
+ m_freem(m);
+ return;
+ }
+
+#ifdef MAC
+ mac_ifnet_create_mbuf(ifp, m);
+#endif
+
+ /*
+ * Update interface statistics.
+ */
+ ifp->if_ibytes += m->m_pkthdr.len;
+ getmicrotime(&ifp->if_lastchange);
+
+ /*
+ * Discard non local unicast packets when interface
+ * is in promiscuous mode.
+ */
+ if ((ifp->if_flags & IFF_PROMISC) && ((fh->fddi_dhost[0] & 1) == 0) &&
+ (bcmp(IF_LLADDR(ifp), (caddr_t)fh->fddi_dhost,
+ FDDI_ADDR_LEN) != 0))
+ goto dropanyway;
+
+ /*
+ * Set mbuf flags for bcast/mcast.
+ */
+ if (fh->fddi_dhost[0] & 1) {
+ if (bcmp(ifp->if_broadcastaddr, fh->fddi_dhost,
+ FDDI_ADDR_LEN) == 0)
+ m->m_flags |= M_BCAST;
+ else
+ m->m_flags |= M_MCAST;
+ ifp->if_imcasts++;
+ }
+
+#ifdef M_LINK0
+ /*
+ * If this has a LLC priority of 0, then mark it so upper
+ * layers have a hint that it really came via a FDDI/Ethernet
+ * bridge.
+ */
+ if ((fh->fddi_fc & FDDIFC_LLC_PRIO7) == FDDIFC_LLC_PRIO0)
+ m->m_flags |= M_LINK0;
+#endif
+
+ /* Strip off FDDI header. */
+ m_adj(m, FDDI_HDR_LEN);
+
+ m = m_pullup(m, LLC_SNAPFRAMELEN);
+ if (m == 0) {
+ ifp->if_ierrors++;
+ goto dropanyway;
+ }
+ l = mtod(m, struct llc *);
+
+ switch (l->llc_dsap) {
+ case LLC_SNAP_LSAP:
+ {
+ u_int16_t type;
+ if ((l->llc_control != LLC_UI) ||
+ (l->llc_ssap != LLC_SNAP_LSAP)) {
+ ifp->if_noproto++;
+ goto dropanyway;
+ }
+#ifdef NETATALK
+ if (bcmp(&(l->llc_snap.org_code)[0], at_org_code,
+ sizeof(at_org_code)) == 0 &&
+ ntohs(l->llc_snap.ether_type) == ETHERTYPE_AT) {
+ isr = NETISR_ATALK2;
+ m_adj(m, LLC_SNAPFRAMELEN);
+ break;
+ }
+
+ if (bcmp(&(l->llc_snap.org_code)[0], aarp_org_code,
+ sizeof(aarp_org_code)) == 0 &&
+ ntohs(l->llc_snap.ether_type) == ETHERTYPE_AARP) {
+ m_adj(m, LLC_SNAPFRAMELEN);
+ isr = NETISR_AARP;
+ break;
+ }
+#endif /* NETATALK */
+ if (l->llc_snap.org_code[0] != 0 ||
+ l->llc_snap.org_code[1] != 0 ||
+ l->llc_snap.org_code[2] != 0) {
+ ifp->if_noproto++;
+ goto dropanyway;
+ }
+
+ type = ntohs(l->llc_snap.ether_type);
+ m_adj(m, LLC_SNAPFRAMELEN);
+
+ switch (type) {
+#ifdef INET
+ case ETHERTYPE_IP:
+ if ((m = ip_fastforward(m)) == NULL)
+ return;
+ isr = NETISR_IP;
+ break;
+
+ case ETHERTYPE_ARP:
+ if (ifp->if_flags & IFF_NOARP)
+ goto dropanyway;
+ isr = NETISR_ARP;
+ break;
+#endif
+#ifdef INET6
+ case ETHERTYPE_IPV6:
+ isr = NETISR_IPV6;
+ break;
+#endif
+#ifdef IPX
+ case ETHERTYPE_IPX:
+ isr = NETISR_IPX;
+ break;
+#endif
+#ifdef DECNET
+ case ETHERTYPE_DECNET:
+ isr = NETISR_DECNET;
+ break;
+#endif
+#ifdef NETATALK
+ case ETHERTYPE_AT:
+ isr = NETISR_ATALK1;
+ break;
+ case ETHERTYPE_AARP:
+ isr = NETISR_AARP;
+ break;
+#endif /* NETATALK */
+ default:
+ /* printf("fddi_input: unknown protocol 0x%x\n", type); */
+ ifp->if_noproto++;
+ goto dropanyway;
+ }
+ break;
+ }
+
+ default:
+ /* printf("fddi_input: unknown dsap 0x%x\n", l->llc_dsap); */
+ ifp->if_noproto++;
+ goto dropanyway;
+ }
+ netisr_dispatch(isr, m);
+ return;
+
+dropanyway:
+ ifp->if_iqdrops++;
+ if (m)
+ m_freem(m);
+ return;
+}
+
+/*
+ * Perform common duties while attaching to interface list
+ */
+void
+fddi_ifattach(ifp, lla, bpf)
+ struct ifnet *ifp;
+ const u_int8_t *lla;
+ int bpf;
+{
+ struct ifaddr *ifa;
+ struct sockaddr_dl *sdl;
+
+ ifp->if_type = IFT_FDDI;
+ ifp->if_addrlen = FDDI_ADDR_LEN;
+ ifp->if_hdrlen = 21;
+
+ if_attach(ifp); /* Must be called before additional assignments */
+
+ ifp->if_mtu = FDDIMTU;
+ ifp->if_output = fddi_output;
+ ifp->if_input = fddi_input;
+ ifp->if_resolvemulti = fddi_resolvemulti;
+ ifp->if_broadcastaddr = fddibroadcastaddr;
+ ifp->if_baudrate = 100000000;
+#ifdef IFF_NOTRAILERS
+ ifp->if_flags |= IFF_NOTRAILERS;
+#endif
+ ifa = ifp->if_addr;
+ KASSERT(ifa != NULL, ("%s: no lladdr!\n", __func__));
+
+ sdl = (struct sockaddr_dl *)ifa->ifa_addr;
+ sdl->sdl_type = IFT_FDDI;
+ sdl->sdl_alen = ifp->if_addrlen;
+ bcopy(lla, LLADDR(sdl), ifp->if_addrlen);
+
+ if (bpf)
+ bpfattach(ifp, DLT_FDDI, FDDI_HDR_LEN);
+
+ return;
+}
+
+void
+fddi_ifdetach(ifp, bpf)
+ struct ifnet *ifp;
+ int bpf;
+{
+
+ if (bpf)
+ bpfdetach(ifp);
+
+ if_detach(ifp);
+
+ return;
+}
+
+int
+fddi_ioctl (ifp, command, data)
+ struct ifnet *ifp;
+ u_long command;
+ caddr_t data;
+{
+ struct ifaddr *ifa;
+ struct ifreq *ifr;
+ int error;
+
+ ifa = (struct ifaddr *) data;
+ ifr = (struct ifreq *) data;
+ error = 0;
+
+ switch (command) {
+ case SIOCSIFADDR:
+ ifp->if_flags |= IFF_UP;
+
+ switch (ifa->ifa_addr->sa_family) {
+#ifdef INET
+ case AF_INET: /* before arpwhohas */
+ ifp->if_init(ifp->if_softc);
+ arp_ifinit(ifp, ifa);
+ break;
+#endif
+#ifdef IPX
+ /*
+ * XXX - This code is probably wrong
+ */
+ case AF_IPX: {
+ struct ipx_addr *ina;
+
+ ina = &(IA_SIPX(ifa)->sipx_addr);
+
+ if (ipx_nullhost(*ina)) {
+ ina->x_host = *(union ipx_host *)
+ IF_LLADDR(ifp);
+ } else {
+ bcopy((caddr_t) ina->x_host.c_host,
+ (caddr_t) IF_LLADDR(ifp),
+ ETHER_ADDR_LEN);
+ }
+
+ /*
+ * Set new address
+ */
+ ifp->if_init(ifp->if_softc);
+ }
+ break;
+#endif
+ default:
+ ifp->if_init(ifp->if_softc);
+ break;
+ }
+ break;
+ case SIOCGIFADDR: {
+ struct sockaddr *sa;
+
+ sa = (struct sockaddr *) & ifr->ifr_data;
+ bcopy(IF_LLADDR(ifp),
+ (caddr_t) sa->sa_data, FDDI_ADDR_LEN);
+
+ }
+ break;
+ case SIOCSIFMTU:
+ /*
+ * Set the interface MTU.
+ */
+ if (ifr->ifr_mtu > FDDIMTU) {
+ error = EINVAL;
+ } else {
+ ifp->if_mtu = ifr->ifr_mtu;
+ }
+ break;
+ default:
+ error = EINVAL;
+ break;
+ }
+
+ return (error);
+}
+
+static int
+fddi_resolvemulti(ifp, llsa, sa)
+ struct ifnet *ifp;
+ struct sockaddr **llsa;
+ struct sockaddr *sa;
+{
+ struct sockaddr_dl *sdl;
+#ifdef INET
+ struct sockaddr_in *sin;
+#endif
+#ifdef INET6
+ struct sockaddr_in6 *sin6;
+#endif
+ u_char *e_addr;
+
+ switch(sa->sa_family) {
+ case AF_LINK:
+ /*
+ * No mapping needed. Just check that it's a valid MC address.
+ */
+ sdl = (struct sockaddr_dl *)sa;
+ e_addr = LLADDR(sdl);
+ if ((e_addr[0] & 1) != 1)
+ return (EADDRNOTAVAIL);
+ *llsa = 0;
+ return (0);
+
+#ifdef INET
+ case AF_INET:
+ sin = (struct sockaddr_in *)sa;
+ if (!IN_MULTICAST(ntohl(sin->sin_addr.s_addr)))
+ return (EADDRNOTAVAIL);
+ sdl = malloc(sizeof *sdl, M_IFMADDR,
+ M_NOWAIT | M_ZERO);
+ if (sdl == NULL)
+ return (ENOMEM);
+ sdl->sdl_len = sizeof *sdl;
+ sdl->sdl_family = AF_LINK;
+ sdl->sdl_index = ifp->if_index;
+ sdl->sdl_type = IFT_FDDI;
+ sdl->sdl_nlen = 0;
+ sdl->sdl_alen = FDDI_ADDR_LEN;
+ sdl->sdl_slen = 0;
+ e_addr = LLADDR(sdl);
+ ETHER_MAP_IP_MULTICAST(&sin->sin_addr, e_addr);
+ *llsa = (struct sockaddr *)sdl;
+ return (0);
+#endif
+#ifdef INET6
+ case AF_INET6:
+ sin6 = (struct sockaddr_in6 *)sa;
+ if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) {
+ /*
+ * An IP6 address of 0 means listen to all
+ * of the Ethernet multicast address used for IP6.
+ * (This is used for multicast routers.)
+ */
+ ifp->if_flags |= IFF_ALLMULTI;
+ *llsa = 0;
+ return (0);
+ }
+ if (!IN6_IS_ADDR_MULTICAST(&sin6->sin6_addr))
+ return (EADDRNOTAVAIL);
+ sdl = malloc(sizeof *sdl, M_IFMADDR,
+ M_NOWAIT | M_ZERO);
+ if (sdl == NULL)
+ return (ENOMEM);
+ sdl->sdl_len = sizeof *sdl;
+ sdl->sdl_family = AF_LINK;
+ sdl->sdl_index = ifp->if_index;
+ sdl->sdl_type = IFT_FDDI;
+ sdl->sdl_nlen = 0;
+ sdl->sdl_alen = FDDI_ADDR_LEN;
+ sdl->sdl_slen = 0;
+ e_addr = LLADDR(sdl);
+ ETHER_MAP_IPV6_MULTICAST(&sin6->sin6_addr, e_addr);
+ *llsa = (struct sockaddr *)sdl;
+ return (0);
+#endif
+
+ default:
+ /*
+ * Well, the text isn't quite right, but it's the name
+ * that counts...
+ */
+ return (EAFNOSUPPORT);
+ }
+
+ return (0);
+}
+
+static moduledata_t fddi_mod = {
+ "fddi", /* module name */
+ NULL, /* event handler */
+ 0 /* extra data */
+};
+
+DECLARE_MODULE(fddi, fddi_mod, SI_SUB_PSEUDO, SI_ORDER_ANY);
+MODULE_VERSION(fddi, 1);
diff --git a/rtems/freebsd/net/if_fwsubr.c b/rtems/freebsd/net/if_fwsubr.c
new file mode 100644
index 00000000..5d0e904d
--- /dev/null
+++ b/rtems/freebsd/net/if_fwsubr.c
@@ -0,0 +1,853 @@
+#include <rtems/freebsd/machine/rtems-bsd-config.h>
+
+/*-
+ * Copyright (c) 2004 Doug Rabson
+ * Copyright (c) 1982, 1989, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#include <rtems/freebsd/local/opt_inet.h>
+#include <rtems/freebsd/local/opt_inet6.h>
+
+#include <rtems/freebsd/sys/param.h>
+#include <rtems/freebsd/sys/systm.h>
+#include <rtems/freebsd/sys/kernel.h>
+#include <rtems/freebsd/sys/malloc.h>
+#include <rtems/freebsd/sys/mbuf.h>
+#include <rtems/freebsd/sys/module.h>
+#include <rtems/freebsd/sys/socket.h>
+#include <rtems/freebsd/sys/sockio.h>
+
+#include <rtems/freebsd/net/if.h>
+#include <rtems/freebsd/net/netisr.h>
+#include <rtems/freebsd/net/route.h>
+#include <rtems/freebsd/net/if_llc.h>
+#include <rtems/freebsd/net/if_dl.h>
+#include <rtems/freebsd/net/if_types.h>
+#include <rtems/freebsd/net/bpf.h>
+#include <rtems/freebsd/net/firewire.h>
+#include <rtems/freebsd/net/if_llatbl.h>
+
+#if defined(INET) || defined(INET6)
+#include <rtems/freebsd/netinet/in.h>
+#include <rtems/freebsd/netinet/in_var.h>
+#include <rtems/freebsd/netinet/if_ether.h>
+#endif
+#ifdef INET6
+#include <rtems/freebsd/netinet6/nd6.h>
+#endif
+
+#include <rtems/freebsd/security/mac/mac_framework.h>
+
+MALLOC_DEFINE(M_FWCOM, "fw_com", "firewire interface internals");
+
+struct fw_hwaddr firewire_broadcastaddr = {
+ 0xffffffff,
+ 0xffffffff,
+ 0xff,
+ 0xff,
+ 0xffff,
+ 0xffffffff
+};
+
+static int
+firewire_output(struct ifnet *ifp, struct mbuf *m, struct sockaddr *dst,
+ struct route *ro)
+{
+ struct fw_com *fc = IFP2FWC(ifp);
+ int error, type;
+ struct m_tag *mtag;
+ union fw_encap *enc;
+ struct fw_hwaddr *destfw;
+ uint8_t speed;
+ uint16_t psize, fsize, dsize;
+ struct mbuf *mtail;
+ int unicast, dgl, foff;
+ static int next_dgl;
+#if defined(INET) || defined(INET6)
+ struct llentry *lle;
+#endif
+
+#ifdef MAC
+ error = mac_ifnet_check_transmit(ifp, m);
+ if (error)
+ goto bad;
+#endif
+
+ if (!((ifp->if_flags & IFF_UP) &&
+ (ifp->if_drv_flags & IFF_DRV_RUNNING))) {
+ error = ENETDOWN;
+ goto bad;
+ }
+
+ /*
+ * For unicast, we make a tag to store the lladdr of the
+ * destination. This might not be the first time we have seen
+ * the packet (for instance, the arp code might be trying to
+ * re-send it after receiving an arp reply) so we only
+ * allocate a tag if there isn't one there already. For
+ * multicast, we will eventually use a different tag to store
+ * the channel number.
+ */
+ unicast = !(m->m_flags & (M_BCAST | M_MCAST));
+ if (unicast) {
+ mtag = m_tag_locate(m, MTAG_FIREWIRE, MTAG_FIREWIRE_HWADDR, NULL);
+ if (!mtag) {
+ mtag = m_tag_alloc(MTAG_FIREWIRE, MTAG_FIREWIRE_HWADDR,
+ sizeof (struct fw_hwaddr), M_NOWAIT);
+ if (!mtag) {
+ error = ENOMEM;
+ goto bad;
+ }
+ m_tag_prepend(m, mtag);
+ }
+ destfw = (struct fw_hwaddr *)(mtag + 1);
+ } else {
+ destfw = 0;
+ }
+
+ switch (dst->sa_family) {
+#ifdef INET
+ case AF_INET:
+ /*
+ * Only bother with arp for unicast. Allocation of
+ * channels etc. for firewire is quite different and
+ * doesn't fit into the arp model.
+ */
+ if (unicast) {
+ error = arpresolve(ifp, ro ? ro->ro_rt : NULL, m, dst, (u_char *) destfw, &lle);
+ if (error)
+ return (error == EWOULDBLOCK ? 0 : error);
+ }
+ type = ETHERTYPE_IP;
+ break;
+
+ case AF_ARP:
+ {
+ struct arphdr *ah;
+ ah = mtod(m, struct arphdr *);
+ ah->ar_hrd = htons(ARPHRD_IEEE1394);
+ type = ETHERTYPE_ARP;
+ if (unicast)
+ *destfw = *(struct fw_hwaddr *) ar_tha(ah);
+
+ /*
+ * The standard arp code leaves a hole for the target
+ * hardware address which we need to close up.
+ */
+ bcopy(ar_tpa(ah), ar_tha(ah), ah->ar_pln);
+ m_adj(m, -ah->ar_hln);
+ break;
+ }
+#endif
+
+#ifdef INET6
+ case AF_INET6:
+ if (unicast) {
+ error = nd6_storelladdr(fc->fc_ifp, m, dst,
+ (u_char *) destfw, &lle);
+ if (error)
+ return (error);
+ }
+ type = ETHERTYPE_IPV6;
+ break;
+#endif
+
+ default:
+ if_printf(ifp, "can't handle af%d\n", dst->sa_family);
+ error = EAFNOSUPPORT;
+ goto bad;
+ }
+
+ /*
+ * Let BPF tap off a copy before we encapsulate.
+ */
+ if (bpf_peers_present(ifp->if_bpf)) {
+ struct fw_bpfhdr h;
+ if (unicast)
+ bcopy(destfw, h.firewire_dhost, 8);
+ else
+ bcopy(&firewire_broadcastaddr, h.firewire_dhost, 8);
+ bcopy(&fc->fc_hwaddr, h.firewire_shost, 8);
+ h.firewire_type = htons(type);
+ bpf_mtap2(ifp->if_bpf, &h, sizeof(h), m);
+ }
+
+ /*
+ * Punt on MCAP for now and send all multicast packets on the
+ * broadcast channel.
+ */
+ if (m->m_flags & M_MCAST)
+ m->m_flags |= M_BCAST;
+
+ /*
+ * Figure out what speed to use and what the largest supported
+ * packet size is. For unicast, this is the minimum of what we
+ * can speak and what they can hear. For broadcast, lets be
+ * conservative and use S100. We could possibly improve that
+ * by examining the bus manager's speed map or similar. We
+ * also reduce the packet size for broadcast to account for
+ * the GASP header.
+ */
+ if (unicast) {
+ speed = min(fc->fc_speed, destfw->sspd);
+ psize = min(512 << speed, 2 << destfw->sender_max_rec);
+ } else {
+ speed = 0;
+ psize = 512 - 2*sizeof(uint32_t);
+ }
+
+ /*
+ * Next, we encapsulate, possibly fragmenting the original
+ * datagram if it won't fit into a single packet.
+ */
+ if (m->m_pkthdr.len <= psize - sizeof(uint32_t)) {
+ /*
+ * No fragmentation is necessary.
+ */
+ M_PREPEND(m, sizeof(uint32_t), M_DONTWAIT);
+ if (!m) {
+ error = ENOBUFS;
+ goto bad;
+ }
+ enc = mtod(m, union fw_encap *);
+ enc->unfrag.ether_type = type;
+ enc->unfrag.lf = FW_ENCAP_UNFRAG;
+ enc->unfrag.reserved = 0;
+
+ /*
+ * Byte swap the encapsulation header manually.
+ */
+ enc->ul[0] = htonl(enc->ul[0]);
+
+ error = (ifp->if_transmit)(ifp, m);
+ return (error);
+ } else {
+ /*
+ * Fragment the datagram, making sure to leave enough
+ * space for the encapsulation header in each packet.
+ */
+ fsize = psize - 2*sizeof(uint32_t);
+ dgl = next_dgl++;
+ dsize = m->m_pkthdr.len;
+ foff = 0;
+ while (m) {
+ if (m->m_pkthdr.len > fsize) {
+ /*
+ * Split off the tail segment from the
+ * datagram, copying our tags over.
+ */
+ mtail = m_split(m, fsize, M_DONTWAIT);
+ m_tag_copy_chain(mtail, m, M_NOWAIT);
+ } else {
+ mtail = 0;
+ }
+
+ /*
+ * Add our encapsulation header to this
+ * fragment and hand it off to the link.
+ */
+ M_PREPEND(m, 2*sizeof(uint32_t), M_DONTWAIT);
+ if (!m) {
+ error = ENOBUFS;
+ goto bad;
+ }
+ enc = mtod(m, union fw_encap *);
+ if (foff == 0) {
+ enc->firstfrag.lf = FW_ENCAP_FIRST;
+ enc->firstfrag.reserved1 = 0;
+ enc->firstfrag.reserved2 = 0;
+ enc->firstfrag.datagram_size = dsize - 1;
+ enc->firstfrag.ether_type = type;
+ enc->firstfrag.dgl = dgl;
+ } else {
+ if (mtail)
+ enc->nextfrag.lf = FW_ENCAP_NEXT;
+ else
+ enc->nextfrag.lf = FW_ENCAP_LAST;
+ enc->nextfrag.reserved1 = 0;
+ enc->nextfrag.reserved2 = 0;
+ enc->nextfrag.reserved3 = 0;
+ enc->nextfrag.datagram_size = dsize - 1;
+ enc->nextfrag.fragment_offset = foff;
+ enc->nextfrag.dgl = dgl;
+ }
+ foff += m->m_pkthdr.len - 2*sizeof(uint32_t);
+
+ /*
+ * Byte swap the encapsulation header manually.
+ */
+ enc->ul[0] = htonl(enc->ul[0]);
+ enc->ul[1] = htonl(enc->ul[1]);
+
+ error = (ifp->if_transmit)(ifp, m);
+ if (error) {
+ if (mtail)
+ m_freem(mtail);
+ return (ENOBUFS);
+ }
+
+ m = mtail;
+ }
+
+ return (0);
+ }
+
+bad:
+ if (m)
+ m_freem(m);
+ return (error);
+}
+
+static struct mbuf *
+firewire_input_fragment(struct fw_com *fc, struct mbuf *m, int src)
+{
+ union fw_encap *enc;
+ struct fw_reass *r;
+ struct mbuf *mf, *mprev;
+ int dsize;
+ int fstart, fend, start, end, islast;
+ uint32_t id;
+
+ /*
+ * Find an existing reassembly buffer or create a new one.
+ */
+ enc = mtod(m, union fw_encap *);
+ id = enc->firstfrag.dgl | (src << 16);
+ STAILQ_FOREACH(r, &fc->fc_frags, fr_link)
+ if (r->fr_id == id)
+ break;
+ if (!r) {
+ r = malloc(sizeof(struct fw_reass), M_TEMP, M_NOWAIT);
+ if (!r) {
+ m_freem(m);
+ return 0;
+ }
+ r->fr_id = id;
+ r->fr_frags = 0;
+ STAILQ_INSERT_HEAD(&fc->fc_frags, r, fr_link);
+ }
+
+ /*
+ * If this fragment overlaps any other fragment, we must discard
+ * the partial reassembly and start again.
+ */
+ if (enc->firstfrag.lf == FW_ENCAP_FIRST)
+ fstart = 0;
+ else
+ fstart = enc->nextfrag.fragment_offset;
+ fend = fstart + m->m_pkthdr.len - 2*sizeof(uint32_t);
+ dsize = enc->nextfrag.datagram_size;
+ islast = (enc->nextfrag.lf == FW_ENCAP_LAST);
+
+ for (mf = r->fr_frags; mf; mf = mf->m_nextpkt) {
+ enc = mtod(mf, union fw_encap *);
+ if (enc->nextfrag.datagram_size != dsize) {
+ /*
+ * This fragment must be from a different
+ * packet.
+ */
+ goto bad;
+ }
+ if (enc->firstfrag.lf == FW_ENCAP_FIRST)
+ start = 0;
+ else
+ start = enc->nextfrag.fragment_offset;
+ end = start + mf->m_pkthdr.len - 2*sizeof(uint32_t);
+ if ((fstart < end && fend > start) ||
+ (islast && enc->nextfrag.lf == FW_ENCAP_LAST)) {
+ /*
+ * Overlap - discard reassembly buffer and start
+ * again with this fragment.
+ */
+ goto bad;
+ }
+ }
+
+ /*
+ * Find where to put this fragment in the list.
+ */
+ for (mf = r->fr_frags, mprev = NULL; mf;
+ mprev = mf, mf = mf->m_nextpkt) {
+ enc = mtod(mf, union fw_encap *);
+ if (enc->firstfrag.lf == FW_ENCAP_FIRST)
+ start = 0;
+ else
+ start = enc->nextfrag.fragment_offset;
+ if (start >= fend)
+ break;
+ }
+
+ /*
+ * If this is a last fragment and we are not adding at the end
+ * of the list, discard the buffer.
+ */
+ if (islast && mprev && mprev->m_nextpkt)
+ goto bad;
+
+ if (mprev) {
+ m->m_nextpkt = mprev->m_nextpkt;
+ mprev->m_nextpkt = m;
+
+ /*
+ * Coalesce forwards and see if we can make a whole
+ * datagram.
+ */
+ enc = mtod(mprev, union fw_encap *);
+ if (enc->firstfrag.lf == FW_ENCAP_FIRST)
+ start = 0;
+ else
+ start = enc->nextfrag.fragment_offset;
+ end = start + mprev->m_pkthdr.len - 2*sizeof(uint32_t);
+ while (end == fstart) {
+ /*
+ * Strip off the encap header from m and
+ * append it to mprev, freeing m.
+ */
+ m_adj(m, 2*sizeof(uint32_t));
+ mprev->m_nextpkt = m->m_nextpkt;
+ mprev->m_pkthdr.len += m->m_pkthdr.len;
+ m_cat(mprev, m);
+
+ if (mprev->m_pkthdr.len == dsize + 1 + 2*sizeof(uint32_t)) {
+ /*
+ * We have assembled a complete packet
+ * we must be finished. Make sure we have
+ * merged the whole chain.
+ */
+ STAILQ_REMOVE(&fc->fc_frags, r, fw_reass, fr_link);
+ free(r, M_TEMP);
+ m = mprev->m_nextpkt;
+ while (m) {
+ mf = m->m_nextpkt;
+ m_freem(m);
+ m = mf;
+ }
+ mprev->m_nextpkt = NULL;
+
+ return (mprev);
+ }
+
+ /*
+ * See if we can continue merging forwards.
+ */
+ end = fend;
+ m = mprev->m_nextpkt;
+ if (m) {
+ enc = mtod(m, union fw_encap *);
+ if (enc->firstfrag.lf == FW_ENCAP_FIRST)
+ fstart = 0;
+ else
+ fstart = enc->nextfrag.fragment_offset;
+ fend = fstart + m->m_pkthdr.len
+ - 2*sizeof(uint32_t);
+ } else {
+ break;
+ }
+ }
+ } else {
+ m->m_nextpkt = 0;
+ r->fr_frags = m;
+ }
+
+ return (0);
+
+bad:
+ while (r->fr_frags) {
+ mf = r->fr_frags;
+ r->fr_frags = mf->m_nextpkt;
+ m_freem(mf);
+ }
+ m->m_nextpkt = 0;
+ r->fr_frags = m;
+
+ return (0);
+}
+
+void
+firewire_input(struct ifnet *ifp, struct mbuf *m, uint16_t src)
+{
+ struct fw_com *fc = IFP2FWC(ifp);
+ union fw_encap *enc;
+ int type, isr;
+
+ /*
+ * The caller has already stripped off the packet header
+ * (stream or wreqb) and marked the mbuf's M_BCAST flag
+ * appropriately. We de-encapsulate the IP packet and pass it
+ * up the line after handling link-level fragmentation.
+ */
+ if (m->m_pkthdr.len < sizeof(uint32_t)) {
+ if_printf(ifp, "discarding frame without "
+ "encapsulation header (len %u pkt len %u)\n",
+ m->m_len, m->m_pkthdr.len);
+ }
+
+ m = m_pullup(m, sizeof(uint32_t));
+ if (m == NULL)
+ return;
+ enc = mtod(m, union fw_encap *);
+
+ /*
+ * Byte swap the encapsulation header manually.
+ */
+ enc->ul[0] = ntohl(enc->ul[0]);
+
+ if (enc->unfrag.lf != 0) {
+ m = m_pullup(m, 2*sizeof(uint32_t));
+ if (!m)
+ return;
+ enc = mtod(m, union fw_encap *);
+ enc->ul[1] = ntohl(enc->ul[1]);
+ m = firewire_input_fragment(fc, m, src);
+ if (!m)
+ return;
+ enc = mtod(m, union fw_encap *);
+ type = enc->firstfrag.ether_type;
+ m_adj(m, 2*sizeof(uint32_t));
+ } else {
+ type = enc->unfrag.ether_type;
+ m_adj(m, sizeof(uint32_t));
+ }
+
+ if (m->m_pkthdr.rcvif == NULL) {
+ if_printf(ifp, "discard frame w/o interface pointer\n");
+ ifp->if_ierrors++;
+ m_freem(m);
+ return;
+ }
+#ifdef DIAGNOSTIC
+ if (m->m_pkthdr.rcvif != ifp) {
+ if_printf(ifp, "Warning, frame marked as received on %s\n",
+ m->m_pkthdr.rcvif->if_xname);
+ }
+#endif
+
+#ifdef MAC
+ /*
+ * Tag the mbuf with an appropriate MAC label before any other
+ * consumers can get to it.
+ */
+ mac_ifnet_create_mbuf(ifp, m);
+#endif
+
+ /*
+ * Give bpf a chance at the packet. The link-level driver
+ * should have left us a tag with the EUID of the sender.
+ */
+ if (bpf_peers_present(ifp->if_bpf)) {
+ struct fw_bpfhdr h;
+ struct m_tag *mtag;
+
+ mtag = m_tag_locate(m, MTAG_FIREWIRE, MTAG_FIREWIRE_SENDER_EUID, 0);
+ if (mtag)
+ bcopy(mtag + 1, h.firewire_shost, 8);
+ else
+ bcopy(&firewire_broadcastaddr, h.firewire_dhost, 8);
+ bcopy(&fc->fc_hwaddr, h.firewire_dhost, 8);
+ h.firewire_type = htons(type);
+ bpf_mtap2(ifp->if_bpf, &h, sizeof(h), m);
+ }
+
+ if (ifp->if_flags & IFF_MONITOR) {
+ /*
+ * Interface marked for monitoring; discard packet.
+ */
+ m_freem(m);
+ return;
+ }
+
+ ifp->if_ibytes += m->m_pkthdr.len;
+
+ /* Discard packet if interface is not up */
+ if ((ifp->if_flags & IFF_UP) == 0) {
+ m_freem(m);
+ return;
+ }
+
+ if (m->m_flags & (M_BCAST|M_MCAST))
+ ifp->if_imcasts++;
+
+ switch (type) {
+#ifdef INET
+ case ETHERTYPE_IP:
+ if ((m = ip_fastforward(m)) == NULL)
+ return;
+ isr = NETISR_IP;
+ break;
+
+ case ETHERTYPE_ARP:
+ {
+ struct arphdr *ah;
+ ah = mtod(m, struct arphdr *);
+
+ /*
+ * Adjust the arp packet to insert an empty tha slot.
+ */
+ m->m_len += ah->ar_hln;
+ m->m_pkthdr.len += ah->ar_hln;
+ bcopy(ar_tha(ah), ar_tpa(ah), ah->ar_pln);
+ isr = NETISR_ARP;
+ break;
+ }
+#endif
+
+#ifdef INET6
+ case ETHERTYPE_IPV6:
+ isr = NETISR_IPV6;
+ break;
+#endif
+
+ default:
+ m_freem(m);
+ return;
+ }
+
+ netisr_dispatch(isr, m);
+}
+
+int
+firewire_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
+{
+ struct ifaddr *ifa = (struct ifaddr *) data;
+ struct ifreq *ifr = (struct ifreq *) data;
+ int error = 0;
+
+ switch (command) {
+ case SIOCSIFADDR:
+ ifp->if_flags |= IFF_UP;
+
+ switch (ifa->ifa_addr->sa_family) {
+#ifdef INET
+ case AF_INET:
+ ifp->if_init(ifp->if_softc); /* before arpwhohas */
+ arp_ifinit(ifp, ifa);
+ break;
+#endif
+ default:
+ ifp->if_init(ifp->if_softc);
+ break;
+ }
+ break;
+
+ case SIOCGIFADDR:
+ {
+ struct sockaddr *sa;
+
+ sa = (struct sockaddr *) & ifr->ifr_data;
+ bcopy(&IFP2FWC(ifp)->fc_hwaddr,
+ (caddr_t) sa->sa_data, sizeof(struct fw_hwaddr));
+ }
+ break;
+
+ case SIOCSIFMTU:
+ /*
+ * Set the interface MTU.
+ */
+ if (ifr->ifr_mtu > 1500) {
+ error = EINVAL;
+ } else {
+ ifp->if_mtu = ifr->ifr_mtu;
+ }
+ break;
+ default:
+ error = EINVAL; /* XXX netbsd has ENOTTY??? */
+ break;
+ }
+ return (error);
+}
+
+static int
+firewire_resolvemulti(struct ifnet *ifp, struct sockaddr **llsa,
+ struct sockaddr *sa)
+{
+#ifdef INET
+ struct sockaddr_in *sin;
+#endif
+#ifdef INET6
+ struct sockaddr_in6 *sin6;
+#endif
+
+ switch(sa->sa_family) {
+ case AF_LINK:
+ /*
+ * No mapping needed.
+ */
+ *llsa = 0;
+ return 0;
+
+#ifdef INET
+ case AF_INET:
+ sin = (struct sockaddr_in *)sa;
+ if (!IN_MULTICAST(ntohl(sin->sin_addr.s_addr)))
+ return EADDRNOTAVAIL;
+ *llsa = 0;
+ return 0;
+#endif
+#ifdef INET6
+ case AF_INET6:
+ sin6 = (struct sockaddr_in6 *)sa;
+ if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) {
+ /*
+ * An IP6 address of 0 means listen to all
+ * of the Ethernet multicast address used for IP6.
+ * (This is used for multicast routers.)
+ */
+ ifp->if_flags |= IFF_ALLMULTI;
+ *llsa = 0;
+ return 0;
+ }
+ if (!IN6_IS_ADDR_MULTICAST(&sin6->sin6_addr))
+ return EADDRNOTAVAIL;
+ *llsa = 0;
+ return 0;
+#endif
+
+ default:
+ /*
+ * Well, the text isn't quite right, but it's the name
+ * that counts...
+ */
+ return EAFNOSUPPORT;
+ }
+}
+
+void
+firewire_ifattach(struct ifnet *ifp, struct fw_hwaddr *llc)
+{
+ struct fw_com *fc = IFP2FWC(ifp);
+ struct ifaddr *ifa;
+ struct sockaddr_dl *sdl;
+ static const char* speeds[] = {
+ "S100", "S200", "S400", "S800",
+ "S1600", "S3200"
+ };
+
+ fc->fc_speed = llc->sspd;
+ STAILQ_INIT(&fc->fc_frags);
+
+ ifp->if_addrlen = sizeof(struct fw_hwaddr);
+ ifp->if_hdrlen = 0;
+ if_attach(ifp);
+ ifp->if_mtu = 1500; /* XXX */
+ ifp->if_output = firewire_output;
+ ifp->if_resolvemulti = firewire_resolvemulti;
+ ifp->if_broadcastaddr = (u_char *) &firewire_broadcastaddr;
+
+ ifa = ifp->if_addr;
+ KASSERT(ifa != NULL, ("%s: no lladdr!\n", __func__));
+ sdl = (struct sockaddr_dl *)ifa->ifa_addr;
+ sdl->sdl_type = IFT_IEEE1394;
+ sdl->sdl_alen = ifp->if_addrlen;
+ bcopy(llc, LLADDR(sdl), ifp->if_addrlen);
+
+ bpfattach(ifp, DLT_APPLE_IP_OVER_IEEE1394,
+ sizeof(struct fw_hwaddr));
+
+ if_printf(ifp, "Firewire address: %8D @ 0x%04x%08x, %s, maxrec %d\n",
+ (uint8_t *) &llc->sender_unique_ID_hi, ":",
+ ntohs(llc->sender_unicast_FIFO_hi),
+ ntohl(llc->sender_unicast_FIFO_lo),
+ speeds[llc->sspd],
+ (2 << llc->sender_max_rec));
+}
+
+void
+firewire_ifdetach(struct ifnet *ifp)
+{
+ bpfdetach(ifp);
+ if_detach(ifp);
+}
+
+void
+firewire_busreset(struct ifnet *ifp)
+{
+ struct fw_com *fc = IFP2FWC(ifp);
+ struct fw_reass *r;
+ struct mbuf *m;
+
+ /*
+ * Discard any partial datagrams since the host ids may have changed.
+ */
+ while ((r = STAILQ_FIRST(&fc->fc_frags))) {
+ STAILQ_REMOVE_HEAD(&fc->fc_frags, fr_link);
+ while (r->fr_frags) {
+ m = r->fr_frags;
+ r->fr_frags = m->m_nextpkt;
+ m_freem(m);
+ }
+ free(r, M_TEMP);
+ }
+}
+
+static void *
+firewire_alloc(u_char type, struct ifnet *ifp)
+{
+ struct fw_com *fc;
+
+ fc = malloc(sizeof(struct fw_com), M_FWCOM, M_WAITOK | M_ZERO);
+ fc->fc_ifp = ifp;
+
+ return (fc);
+}
+
+static void
+firewire_free(void *com, u_char type)
+{
+
+ free(com, M_FWCOM);
+}
+
+static int
+firewire_modevent(module_t mod, int type, void *data)
+{
+
+ switch (type) {
+ case MOD_LOAD:
+ if_register_com_alloc(IFT_IEEE1394,
+ firewire_alloc, firewire_free);
+ break;
+ case MOD_UNLOAD:
+ if_deregister_com_alloc(IFT_IEEE1394);
+ break;
+ default:
+ return (EOPNOTSUPP);
+ }
+
+ return (0);
+}
+
+static moduledata_t firewire_mod = {
+ "if_firewire",
+ firewire_modevent,
+ 0
+};
+
+DECLARE_MODULE(if_firewire, firewire_mod, SI_SUB_INIT_IF, SI_ORDER_ANY);
+MODULE_VERSION(if_firewire, 1);
diff --git a/rtems/freebsd/net/if_gif.c b/rtems/freebsd/net/if_gif.c
new file mode 100644
index 00000000..3ab1cbe9
--- /dev/null
+++ b/rtems/freebsd/net/if_gif.c
@@ -0,0 +1,1025 @@
+#include <rtems/freebsd/machine/rtems-bsd-config.h>
+
+/* $FreeBSD$ */
+/* $KAME: if_gif.c,v 1.87 2001/10/19 08:50:27 itojun Exp $ */
+
+/*-
+ * Copyright (C) 1995, 1996, 1997, and 1998 WIDE Project.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the project nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <rtems/freebsd/local/opt_inet.h>
+#include <rtems/freebsd/local/opt_inet6.h>
+
+#include <rtems/freebsd/sys/param.h>
+#include <rtems/freebsd/sys/systm.h>
+#include <rtems/freebsd/sys/kernel.h>
+#include <rtems/freebsd/sys/malloc.h>
+#include <rtems/freebsd/sys/mbuf.h>
+#include <rtems/freebsd/sys/module.h>
+#include <rtems/freebsd/sys/socket.h>
+#include <rtems/freebsd/sys/sockio.h>
+#include <rtems/freebsd/sys/errno.h>
+#include <rtems/freebsd/sys/time.h>
+#include <rtems/freebsd/sys/sysctl.h>
+#include <rtems/freebsd/sys/syslog.h>
+#include <rtems/freebsd/sys/priv.h>
+#include <rtems/freebsd/sys/proc.h>
+#include <rtems/freebsd/sys/protosw.h>
+#include <rtems/freebsd/sys/conf.h>
+#include <rtems/freebsd/machine/cpu.h>
+
+#include <rtems/freebsd/net/if.h>
+#include <rtems/freebsd/net/if_clone.h>
+#include <rtems/freebsd/net/if_types.h>
+#include <rtems/freebsd/net/netisr.h>
+#include <rtems/freebsd/net/route.h>
+#include <rtems/freebsd/net/bpf.h>
+#include <rtems/freebsd/net/vnet.h>
+
+#include <rtems/freebsd/netinet/in.h>
+#include <rtems/freebsd/netinet/in_systm.h>
+#include <rtems/freebsd/netinet/ip.h>
+#ifdef INET
+#include <rtems/freebsd/netinet/in_var.h>
+#include <rtems/freebsd/netinet/in_gif.h>
+#include <rtems/freebsd/netinet/ip_var.h>
+#endif /* INET */
+
+#ifdef INET6
+#ifndef INET
+#include <rtems/freebsd/netinet/in.h>
+#endif
+#include <rtems/freebsd/netinet6/in6_var.h>
+#include <rtems/freebsd/netinet/ip6.h>
+#include <rtems/freebsd/netinet6/ip6_var.h>
+#include <rtems/freebsd/netinet6/scope6_var.h>
+#include <rtems/freebsd/netinet6/in6_gif.h>
+#include <rtems/freebsd/netinet6/ip6protosw.h>
+#endif /* INET6 */
+
+#include <rtems/freebsd/netinet/ip_encap.h>
+#include <rtems/freebsd/net/ethernet.h>
+#include <rtems/freebsd/net/if_bridgevar.h>
+#include <rtems/freebsd/net/if_gif.h>
+
+#include <rtems/freebsd/security/mac/mac_framework.h>
+
+#define GIFNAME "gif"
+
+/*
+ * gif_mtx protects the global gif_softc_list.
+ */
+static struct mtx gif_mtx;
+static MALLOC_DEFINE(M_GIF, "gif", "Generic Tunnel Interface");
+static VNET_DEFINE(LIST_HEAD(, gif_softc), gif_softc_list);
+#define V_gif_softc_list VNET(gif_softc_list)
+
+void (*ng_gif_input_p)(struct ifnet *ifp, struct mbuf **mp, int af);
+void (*ng_gif_input_orphan_p)(struct ifnet *ifp, struct mbuf *m, int af);
+void (*ng_gif_attach_p)(struct ifnet *ifp);
+void (*ng_gif_detach_p)(struct ifnet *ifp);
+
+static void gif_start(struct ifnet *);
+static int gif_clone_create(struct if_clone *, int, caddr_t);
+static void gif_clone_destroy(struct ifnet *);
+
+IFC_SIMPLE_DECLARE(gif, 0);
+
+static int gifmodevent(module_t, int, void *);
+
+SYSCTL_DECL(_net_link);
+SYSCTL_NODE(_net_link, IFT_GIF, gif, CTLFLAG_RW, 0,
+ "Generic Tunnel Interface");
+#ifndef MAX_GIF_NEST
+/*
+ * This macro controls the default upper limitation on nesting of gif tunnels.
+ * Since, setting a large value to this macro with a careless configuration
+ * may introduce system crash, we don't allow any nestings by default.
+ * If you need to configure nested gif tunnels, you can define this macro
+ * in your kernel configuration file. However, if you do so, please be
+ * careful to configure the tunnels so that it won't make a loop.
+ */
+#define MAX_GIF_NEST 1
+#endif
+static VNET_DEFINE(int, max_gif_nesting) = MAX_GIF_NEST;
+#define V_max_gif_nesting VNET(max_gif_nesting)
+SYSCTL_VNET_INT(_net_link_gif, OID_AUTO, max_nesting, CTLFLAG_RW,
+ &VNET_NAME(max_gif_nesting), 0, "Max nested tunnels");
+
+/*
+ * By default, we disallow creation of multiple tunnels between the same
+ * pair of addresses. Some applications require this functionality so
+ * we allow control over this check here.
+ */
+#ifdef XBONEHACK
+static VNET_DEFINE(int, parallel_tunnels) = 1;
+#else
+static VNET_DEFINE(int, parallel_tunnels) = 0;
+#endif
+#define V_parallel_tunnels VNET(parallel_tunnels)
+SYSCTL_VNET_INT(_net_link_gif, OID_AUTO, parallel_tunnels, CTLFLAG_RW,
+ &VNET_NAME(parallel_tunnels), 0, "Allow parallel tunnels?");
+
+/* copy from src/sys/net/if_ethersubr.c */
+static const u_char etherbroadcastaddr[ETHER_ADDR_LEN] =
+ { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
+#ifndef ETHER_IS_BROADCAST
+#define ETHER_IS_BROADCAST(addr) \
+ (bcmp(etherbroadcastaddr, (addr), ETHER_ADDR_LEN) == 0)
+#endif
+
+static int
+gif_clone_create(ifc, unit, params)
+ struct if_clone *ifc;
+ int unit;
+ caddr_t params;
+{
+ struct gif_softc *sc;
+
+ sc = malloc(sizeof(struct gif_softc), M_GIF, M_WAITOK | M_ZERO);
+ sc->gif_fibnum = curthread->td_proc->p_fibnum;
+ GIF2IFP(sc) = if_alloc(IFT_GIF);
+ if (GIF2IFP(sc) == NULL) {
+ free(sc, M_GIF);
+ return (ENOSPC);
+ }
+
+ GIF_LOCK_INIT(sc);
+
+ GIF2IFP(sc)->if_softc = sc;
+ if_initname(GIF2IFP(sc), ifc->ifc_name, unit);
+
+ sc->encap_cookie4 = sc->encap_cookie6 = NULL;
+ sc->gif_options = GIF_ACCEPT_REVETHIP;
+
+ GIF2IFP(sc)->if_addrlen = 0;
+ GIF2IFP(sc)->if_mtu = GIF_MTU;
+ GIF2IFP(sc)->if_flags = IFF_POINTOPOINT | IFF_MULTICAST;
+#if 0
+ /* turn off ingress filter */
+ GIF2IFP(sc)->if_flags |= IFF_LINK2;
+#endif
+ GIF2IFP(sc)->if_ioctl = gif_ioctl;
+ GIF2IFP(sc)->if_start = gif_start;
+ GIF2IFP(sc)->if_output = gif_output;
+ GIF2IFP(sc)->if_snd.ifq_maxlen = ifqmaxlen;
+ if_attach(GIF2IFP(sc));
+ bpfattach(GIF2IFP(sc), DLT_NULL, sizeof(u_int32_t));
+ if (ng_gif_attach_p != NULL)
+ (*ng_gif_attach_p)(GIF2IFP(sc));
+
+ mtx_lock(&gif_mtx);
+ LIST_INSERT_HEAD(&V_gif_softc_list, sc, gif_list);
+ mtx_unlock(&gif_mtx);
+
+ return (0);
+}
+
+static void
+gif_clone_destroy(ifp)
+ struct ifnet *ifp;
+{
+#if defined(INET) || defined(INET6)
+ int err;
+#endif
+ struct gif_softc *sc = ifp->if_softc;
+
+ mtx_lock(&gif_mtx);
+ LIST_REMOVE(sc, gif_list);
+ mtx_unlock(&gif_mtx);
+
+ gif_delete_tunnel(ifp);
+#ifdef INET6
+ if (sc->encap_cookie6 != NULL) {
+ err = encap_detach(sc->encap_cookie6);
+ KASSERT(err == 0, ("Unexpected error detaching encap_cookie6"));
+ }
+#endif
+#ifdef INET
+ if (sc->encap_cookie4 != NULL) {
+ err = encap_detach(sc->encap_cookie4);
+ KASSERT(err == 0, ("Unexpected error detaching encap_cookie4"));
+ }
+#endif
+
+ if (ng_gif_detach_p != NULL)
+ (*ng_gif_detach_p)(ifp);
+ bpfdetach(ifp);
+ if_detach(ifp);
+ if_free(ifp);
+
+ GIF_LOCK_DESTROY(sc);
+
+ free(sc, M_GIF);
+}
+
+static void
+vnet_gif_init(const void *unused __unused)
+{
+
+ LIST_INIT(&V_gif_softc_list);
+}
+VNET_SYSINIT(vnet_gif_init, SI_SUB_PSEUDO, SI_ORDER_MIDDLE, vnet_gif_init,
+ NULL);
+
+static int
+gifmodevent(mod, type, data)
+ module_t mod;
+ int type;
+ void *data;
+{
+
+ switch (type) {
+ case MOD_LOAD:
+ mtx_init(&gif_mtx, "gif_mtx", NULL, MTX_DEF);
+ if_clone_attach(&gif_cloner);
+ break;
+
+ case MOD_UNLOAD:
+ if_clone_detach(&gif_cloner);
+ mtx_destroy(&gif_mtx);
+ break;
+ default:
+ return EOPNOTSUPP;
+ }
+ return 0;
+}
+
+static moduledata_t gif_mod = {
+ "if_gif",
+ gifmodevent,
+ 0
+};
+
+DECLARE_MODULE(if_gif, gif_mod, SI_SUB_PSEUDO, SI_ORDER_ANY);
+MODULE_VERSION(if_gif, 1);
+
+int
+gif_encapcheck(m, off, proto, arg)
+ const struct mbuf *m;
+ int off;
+ int proto;
+ void *arg;
+{
+ struct ip ip;
+ struct gif_softc *sc;
+
+ sc = (struct gif_softc *)arg;
+ if (sc == NULL)
+ return 0;
+
+ if ((GIF2IFP(sc)->if_flags & IFF_UP) == 0)
+ return 0;
+
+ /* no physical address */
+ if (!sc->gif_psrc || !sc->gif_pdst)
+ return 0;
+
+ switch (proto) {
+#ifdef INET
+ case IPPROTO_IPV4:
+ break;
+#endif
+#ifdef INET6
+ case IPPROTO_IPV6:
+ break;
+#endif
+ case IPPROTO_ETHERIP:
+ break;
+
+ default:
+ return 0;
+ }
+
+ /* Bail on short packets */
+ if (m->m_pkthdr.len < sizeof(ip))
+ return 0;
+
+ m_copydata(m, 0, sizeof(ip), (caddr_t)&ip);
+
+ switch (ip.ip_v) {
+#ifdef INET
+ case 4:
+ if (sc->gif_psrc->sa_family != AF_INET ||
+ sc->gif_pdst->sa_family != AF_INET)
+ return 0;
+ return gif_encapcheck4(m, off, proto, arg);
+#endif
+#ifdef INET6
+ case 6:
+ if (m->m_pkthdr.len < sizeof(struct ip6_hdr))
+ return 0;
+ if (sc->gif_psrc->sa_family != AF_INET6 ||
+ sc->gif_pdst->sa_family != AF_INET6)
+ return 0;
+ return gif_encapcheck6(m, off, proto, arg);
+#endif
+ default:
+ return 0;
+ }
+}
+
+static void
+gif_start(struct ifnet *ifp)
+{
+ struct gif_softc *sc;
+ struct mbuf *m;
+
+ sc = ifp->if_softc;
+
+ ifp->if_drv_flags |= IFF_DRV_OACTIVE;
+ for (;;) {
+ IFQ_DEQUEUE(&ifp->if_snd, m);
+ if (m == 0)
+ break;
+
+ gif_output(ifp, m, sc->gif_pdst, NULL);
+
+ }
+ ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
+
+ return;
+}
+
+int
+gif_output(ifp, m, dst, ro)
+ struct ifnet *ifp;
+ struct mbuf *m;
+ struct sockaddr *dst;
+ struct route *ro;
+{
+ struct gif_softc *sc = ifp->if_softc;
+ struct m_tag *mtag;
+ int error = 0;
+ int gif_called;
+ u_int32_t af;
+
+#ifdef MAC
+ error = mac_ifnet_check_transmit(ifp, m);
+ if (error) {
+ m_freem(m);
+ goto end;
+ }
+#endif
+
+ /*
+ * gif may cause infinite recursion calls when misconfigured.
+ * We'll prevent this by detecting loops.
+ *
+ * High nesting level may cause stack exhaustion.
+ * We'll prevent this by introducing upper limit.
+ */
+ gif_called = 1;
+ mtag = m_tag_locate(m, MTAG_GIF, MTAG_GIF_CALLED, NULL);
+ while (mtag != NULL) {
+ if (*(struct ifnet **)(mtag + 1) == ifp) {
+ log(LOG_NOTICE,
+ "gif_output: loop detected on %s\n",
+ (*(struct ifnet **)(mtag + 1))->if_xname);
+ m_freem(m);
+ error = EIO; /* is there better errno? */
+ goto end;
+ }
+ mtag = m_tag_locate(m, MTAG_GIF, MTAG_GIF_CALLED, mtag);
+ gif_called++;
+ }
+ if (gif_called > V_max_gif_nesting) {
+ log(LOG_NOTICE,
+ "gif_output: recursively called too many times(%d)\n",
+ gif_called);
+ m_freem(m);
+ error = EIO; /* is there better errno? */
+ goto end;
+ }
+ mtag = m_tag_alloc(MTAG_GIF, MTAG_GIF_CALLED, sizeof(struct ifnet *),
+ M_NOWAIT);
+ if (mtag == NULL) {
+ m_freem(m);
+ error = ENOMEM;
+ goto end;
+ }
+ *(struct ifnet **)(mtag + 1) = ifp;
+ m_tag_prepend(m, mtag);
+
+ m->m_flags &= ~(M_BCAST|M_MCAST);
+
+ GIF_LOCK(sc);
+
+ if (!(ifp->if_flags & IFF_UP) ||
+ sc->gif_psrc == NULL || sc->gif_pdst == NULL) {
+ GIF_UNLOCK(sc);
+ m_freem(m);
+ error = ENETDOWN;
+ goto end;
+ }
+
+ /* BPF writes need to be handled specially. */
+ if (dst->sa_family == AF_UNSPEC) {
+ bcopy(dst->sa_data, &af, sizeof(af));
+ dst->sa_family = af;
+ }
+
+ af = dst->sa_family;
+ BPF_MTAP2(ifp, &af, sizeof(af), m);
+ ifp->if_opackets++;
+ ifp->if_obytes += m->m_pkthdr.len;
+
+ /* override to IPPROTO_ETHERIP for bridged traffic */
+ if (ifp->if_bridge)
+ af = AF_LINK;
+
+ M_SETFIB(m, sc->gif_fibnum);
+ /* inner AF-specific encapsulation */
+
+ /* XXX should we check if our outer source is legal? */
+
+ /* dispatch to output logic based on outer AF */
+ switch (sc->gif_psrc->sa_family) {
+#ifdef INET
+ case AF_INET:
+ error = in_gif_output(ifp, af, m);
+ break;
+#endif
+#ifdef INET6
+ case AF_INET6:
+ error = in6_gif_output(ifp, af, m);
+ break;
+#endif
+ default:
+ m_freem(m);
+ error = ENETDOWN;
+ }
+
+ GIF_UNLOCK(sc);
+ end:
+ if (error)
+ ifp->if_oerrors++;
+ return (error);
+}
+
+void
+gif_input(m, af, ifp)
+ struct mbuf *m;
+ int af;
+ struct ifnet *ifp;
+{
+ int isr, n;
+ struct gif_softc *sc = ifp->if_softc;
+ struct etherip_header *eip;
+ struct ether_header *eh;
+ struct ifnet *oldifp;
+
+ if (ifp == NULL) {
+ /* just in case */
+ m_freem(m);
+ return;
+ }
+
+ m->m_pkthdr.rcvif = ifp;
+
+#ifdef MAC
+ mac_ifnet_create_mbuf(ifp, m);
+#endif
+
+ if (bpf_peers_present(ifp->if_bpf)) {
+ u_int32_t af1 = af;
+ bpf_mtap2(ifp->if_bpf, &af1, sizeof(af1), m);
+ }
+
+ if (ng_gif_input_p != NULL) {
+ (*ng_gif_input_p)(ifp, &m, af);
+ if (m == NULL)
+ return;
+ }
+
+ /*
+ * Put the packet to the network layer input queue according to the
+ * specified address family.
+ * Note: older versions of gif_input directly called network layer
+ * input functions, e.g. ip6_input, here. We changed the policy to
+ * prevent too many recursive calls of such input functions, which
+ * might cause kernel panic. But the change may introduce another
+ * problem; if the input queue is full, packets are discarded.
+ * The kernel stack overflow really happened, and we believed
+ * queue-full rarely occurs, so we changed the policy.
+ */
+ switch (af) {
+#ifdef INET
+ case AF_INET:
+ isr = NETISR_IP;
+ break;
+#endif
+#ifdef INET6
+ case AF_INET6:
+ isr = NETISR_IPV6;
+ break;
+#endif
+ case AF_LINK:
+ n = sizeof(struct etherip_header) + sizeof(struct ether_header);
+ if (n > m->m_len) {
+ m = m_pullup(m, n);
+ if (m == NULL) {
+ ifp->if_ierrors++;
+ return;
+ }
+ }
+
+ eip = mtod(m, struct etherip_header *);
+ /*
+ * GIF_ACCEPT_REVETHIP (enabled by default) intentionally
+ * accepts an EtherIP packet with revered version field in
+ * the header. This is a knob for backward compatibility
+ * with FreeBSD 7.2R or prior.
+ */
+ if (sc->gif_options & GIF_ACCEPT_REVETHIP) {
+ if (eip->eip_resvl != ETHERIP_VERSION
+ && eip->eip_ver != ETHERIP_VERSION) {
+ /* discard unknown versions */
+ m_freem(m);
+ return;
+ }
+ } else {
+ if (eip->eip_ver != ETHERIP_VERSION) {
+ /* discard unknown versions */
+ m_freem(m);
+ return;
+ }
+ }
+ m_adj(m, sizeof(struct etherip_header));
+
+ m->m_flags &= ~(M_BCAST|M_MCAST);
+ m->m_pkthdr.rcvif = ifp;
+
+ if (ifp->if_bridge) {
+ oldifp = ifp;
+ eh = mtod(m, struct ether_header *);
+ if (ETHER_IS_MULTICAST(eh->ether_dhost)) {
+ if (ETHER_IS_BROADCAST(eh->ether_dhost))
+ m->m_flags |= M_BCAST;
+ else
+ m->m_flags |= M_MCAST;
+ ifp->if_imcasts++;
+ }
+ BRIDGE_INPUT(ifp, m);
+
+ if (m != NULL && ifp != oldifp) {
+ /*
+ * The bridge gave us back itself or one of the
+ * members for which the frame is addressed.
+ */
+ ether_demux(ifp, m);
+ return;
+ }
+ }
+ if (m != NULL)
+ m_freem(m);
+ return;
+
+ default:
+ if (ng_gif_input_orphan_p != NULL)
+ (*ng_gif_input_orphan_p)(ifp, m, af);
+ else
+ m_freem(m);
+ return;
+ }
+
+ ifp->if_ipackets++;
+ ifp->if_ibytes += m->m_pkthdr.len;
+ netisr_dispatch(isr, m);
+}
+
+/* XXX how should we handle IPv6 scope on SIOC[GS]IFPHYADDR? */
+int
+gif_ioctl(ifp, cmd, data)
+ struct ifnet *ifp;
+ u_long cmd;
+ caddr_t data;
+{
+ struct gif_softc *sc = ifp->if_softc;
+ struct ifreq *ifr = (struct ifreq*)data;
+ int error = 0, size;
+ u_int options;
+ struct sockaddr *dst, *src;
+#ifdef SIOCSIFMTU /* xxx */
+ u_long mtu;
+#endif
+
+ switch (cmd) {
+ case SIOCSIFADDR:
+ ifp->if_flags |= IFF_UP;
+ break;
+
+ case SIOCSIFDSTADDR:
+ break;
+
+ case SIOCADDMULTI:
+ case SIOCDELMULTI:
+ break;
+
+#ifdef SIOCSIFMTU /* xxx */
+ case SIOCGIFMTU:
+ break;
+
+ case SIOCSIFMTU:
+ mtu = ifr->ifr_mtu;
+ if (mtu < GIF_MTU_MIN || mtu > GIF_MTU_MAX)
+ return (EINVAL);
+ ifp->if_mtu = mtu;
+ break;
+#endif /* SIOCSIFMTU */
+
+#ifdef INET
+ case SIOCSIFPHYADDR:
+#endif
+#ifdef INET6
+ case SIOCSIFPHYADDR_IN6:
+#endif /* INET6 */
+ case SIOCSLIFPHYADDR:
+ switch (cmd) {
+#ifdef INET
+ case SIOCSIFPHYADDR:
+ src = (struct sockaddr *)
+ &(((struct in_aliasreq *)data)->ifra_addr);
+ dst = (struct sockaddr *)
+ &(((struct in_aliasreq *)data)->ifra_dstaddr);
+ break;
+#endif
+#ifdef INET6
+ case SIOCSIFPHYADDR_IN6:
+ src = (struct sockaddr *)
+ &(((struct in6_aliasreq *)data)->ifra_addr);
+ dst = (struct sockaddr *)
+ &(((struct in6_aliasreq *)data)->ifra_dstaddr);
+ break;
+#endif
+ case SIOCSLIFPHYADDR:
+ src = (struct sockaddr *)
+ &(((struct if_laddrreq *)data)->addr);
+ dst = (struct sockaddr *)
+ &(((struct if_laddrreq *)data)->dstaddr);
+ break;
+ default:
+ return EINVAL;
+ }
+
+ /* sa_family must be equal */
+ if (src->sa_family != dst->sa_family)
+ return EINVAL;
+
+ /* validate sa_len */
+ switch (src->sa_family) {
+#ifdef INET
+ case AF_INET:
+ if (src->sa_len != sizeof(struct sockaddr_in))
+ return EINVAL;
+ break;
+#endif
+#ifdef INET6
+ case AF_INET6:
+ if (src->sa_len != sizeof(struct sockaddr_in6))
+ return EINVAL;
+ break;
+#endif
+ default:
+ return EAFNOSUPPORT;
+ }
+ switch (dst->sa_family) {
+#ifdef INET
+ case AF_INET:
+ if (dst->sa_len != sizeof(struct sockaddr_in))
+ return EINVAL;
+ break;
+#endif
+#ifdef INET6
+ case AF_INET6:
+ if (dst->sa_len != sizeof(struct sockaddr_in6))
+ return EINVAL;
+ break;
+#endif
+ default:
+ return EAFNOSUPPORT;
+ }
+
+ /* check sa_family looks sane for the cmd */
+ switch (cmd) {
+ case SIOCSIFPHYADDR:
+ if (src->sa_family == AF_INET)
+ break;
+ return EAFNOSUPPORT;
+#ifdef INET6
+ case SIOCSIFPHYADDR_IN6:
+ if (src->sa_family == AF_INET6)
+ break;
+ return EAFNOSUPPORT;
+#endif /* INET6 */
+ case SIOCSLIFPHYADDR:
+ /* checks done in the above */
+ break;
+ }
+
+ error = gif_set_tunnel(GIF2IFP(sc), src, dst);
+ break;
+
+#ifdef SIOCDIFPHYADDR
+ case SIOCDIFPHYADDR:
+ gif_delete_tunnel(GIF2IFP(sc));
+ break;
+#endif
+
+ case SIOCGIFPSRCADDR:
+#ifdef INET6
+ case SIOCGIFPSRCADDR_IN6:
+#endif /* INET6 */
+ if (sc->gif_psrc == NULL) {
+ error = EADDRNOTAVAIL;
+ goto bad;
+ }
+ src = sc->gif_psrc;
+ switch (cmd) {
+#ifdef INET
+ case SIOCGIFPSRCADDR:
+ dst = &ifr->ifr_addr;
+ size = sizeof(ifr->ifr_addr);
+ break;
+#endif /* INET */
+#ifdef INET6
+ case SIOCGIFPSRCADDR_IN6:
+ dst = (struct sockaddr *)
+ &(((struct in6_ifreq *)data)->ifr_addr);
+ size = sizeof(((struct in6_ifreq *)data)->ifr_addr);
+ break;
+#endif /* INET6 */
+ default:
+ error = EADDRNOTAVAIL;
+ goto bad;
+ }
+ if (src->sa_len > size)
+ return EINVAL;
+ bcopy((caddr_t)src, (caddr_t)dst, src->sa_len);
+#ifdef INET6
+ if (dst->sa_family == AF_INET6) {
+ error = sa6_recoverscope((struct sockaddr_in6 *)dst);
+ if (error != 0)
+ return (error);
+ }
+#endif
+ break;
+
+ case SIOCGIFPDSTADDR:
+#ifdef INET6
+ case SIOCGIFPDSTADDR_IN6:
+#endif /* INET6 */
+ if (sc->gif_pdst == NULL) {
+ error = EADDRNOTAVAIL;
+ goto bad;
+ }
+ src = sc->gif_pdst;
+ switch (cmd) {
+#ifdef INET
+ case SIOCGIFPDSTADDR:
+ dst = &ifr->ifr_addr;
+ size = sizeof(ifr->ifr_addr);
+ break;
+#endif /* INET */
+#ifdef INET6
+ case SIOCGIFPDSTADDR_IN6:
+ dst = (struct sockaddr *)
+ &(((struct in6_ifreq *)data)->ifr_addr);
+ size = sizeof(((struct in6_ifreq *)data)->ifr_addr);
+ break;
+#endif /* INET6 */
+ default:
+ error = EADDRNOTAVAIL;
+ goto bad;
+ }
+ if (src->sa_len > size)
+ return EINVAL;
+ bcopy((caddr_t)src, (caddr_t)dst, src->sa_len);
+#ifdef INET6
+ if (dst->sa_family == AF_INET6) {
+ error = sa6_recoverscope((struct sockaddr_in6 *)dst);
+ if (error != 0)
+ return (error);
+ }
+#endif
+ break;
+
+ case SIOCGLIFPHYADDR:
+ if (sc->gif_psrc == NULL || sc->gif_pdst == NULL) {
+ error = EADDRNOTAVAIL;
+ goto bad;
+ }
+
+ /* copy src */
+ src = sc->gif_psrc;
+ dst = (struct sockaddr *)
+ &(((struct if_laddrreq *)data)->addr);
+ size = sizeof(((struct if_laddrreq *)data)->addr);
+ if (src->sa_len > size)
+ return EINVAL;
+ bcopy((caddr_t)src, (caddr_t)dst, src->sa_len);
+
+ /* copy dst */
+ src = sc->gif_pdst;
+ dst = (struct sockaddr *)
+ &(((struct if_laddrreq *)data)->dstaddr);
+ size = sizeof(((struct if_laddrreq *)data)->dstaddr);
+ if (src->sa_len > size)
+ return EINVAL;
+ bcopy((caddr_t)src, (caddr_t)dst, src->sa_len);
+ break;
+
+ case SIOCSIFFLAGS:
+ /* if_ioctl() takes care of it */
+ break;
+
+ case GIFGOPTS:
+ options = sc->gif_options;
+ error = copyout(&options, ifr->ifr_data,
+ sizeof(options));
+ break;
+
+ case GIFSOPTS:
+ if ((error = priv_check(curthread, PRIV_NET_GIF)) != 0)
+ break;
+ error = copyin(ifr->ifr_data, &options, sizeof(options));
+ if (error)
+ break;
+ if (options & ~GIF_OPTMASK)
+ error = EINVAL;
+ else
+ sc->gif_options = options;
+ break;
+
+ default:
+ error = EINVAL;
+ break;
+ }
+ bad:
+ return error;
+}
+
+/*
+ * XXXRW: There's a general event-ordering issue here: the code to check
+ * if a given tunnel is already present happens before we perform a
+ * potentially blocking setup of the tunnel. This code needs to be
+ * re-ordered so that the check and replacement can be atomic using
+ * a mutex.
+ */
+int
+gif_set_tunnel(ifp, src, dst)
+ struct ifnet *ifp;
+ struct sockaddr *src;
+ struct sockaddr *dst;
+{
+ struct gif_softc *sc = ifp->if_softc;
+ struct gif_softc *sc2;
+ struct sockaddr *osrc, *odst, *sa;
+ int error = 0;
+
+ mtx_lock(&gif_mtx);
+ LIST_FOREACH(sc2, &V_gif_softc_list, gif_list) {
+ if (sc2 == sc)
+ continue;
+ if (!sc2->gif_pdst || !sc2->gif_psrc)
+ continue;
+ if (sc2->gif_pdst->sa_family != dst->sa_family ||
+ sc2->gif_pdst->sa_len != dst->sa_len ||
+ sc2->gif_psrc->sa_family != src->sa_family ||
+ sc2->gif_psrc->sa_len != src->sa_len)
+ continue;
+
+ /*
+ * Disallow parallel tunnels unless instructed
+ * otherwise.
+ */
+ if (!V_parallel_tunnels &&
+ bcmp(sc2->gif_pdst, dst, dst->sa_len) == 0 &&
+ bcmp(sc2->gif_psrc, src, src->sa_len) == 0) {
+ error = EADDRNOTAVAIL;
+ mtx_unlock(&gif_mtx);
+ goto bad;
+ }
+
+ /* XXX both end must be valid? (I mean, not 0.0.0.0) */
+ }
+ mtx_unlock(&gif_mtx);
+
+ /* XXX we can detach from both, but be polite just in case */
+ if (sc->gif_psrc)
+ switch (sc->gif_psrc->sa_family) {
+#ifdef INET
+ case AF_INET:
+ (void)in_gif_detach(sc);
+ break;
+#endif
+#ifdef INET6
+ case AF_INET6:
+ (void)in6_gif_detach(sc);
+ break;
+#endif
+ }
+
+ osrc = sc->gif_psrc;
+ sa = (struct sockaddr *)malloc(src->sa_len, M_IFADDR, M_WAITOK);
+ bcopy((caddr_t)src, (caddr_t)sa, src->sa_len);
+ sc->gif_psrc = sa;
+
+ odst = sc->gif_pdst;
+ sa = (struct sockaddr *)malloc(dst->sa_len, M_IFADDR, M_WAITOK);
+ bcopy((caddr_t)dst, (caddr_t)sa, dst->sa_len);
+ sc->gif_pdst = sa;
+
+ switch (sc->gif_psrc->sa_family) {
+#ifdef INET
+ case AF_INET:
+ error = in_gif_attach(sc);
+ break;
+#endif
+#ifdef INET6
+ case AF_INET6:
+ /*
+ * Check validity of the scope zone ID of the addresses, and
+ * convert it into the kernel internal form if necessary.
+ */
+ error = sa6_embedscope((struct sockaddr_in6 *)sc->gif_psrc, 0);
+ if (error != 0)
+ break;
+ error = sa6_embedscope((struct sockaddr_in6 *)sc->gif_pdst, 0);
+ if (error != 0)
+ break;
+ error = in6_gif_attach(sc);
+ break;
+#endif
+ }
+ if (error) {
+ /* rollback */
+ free((caddr_t)sc->gif_psrc, M_IFADDR);
+ free((caddr_t)sc->gif_pdst, M_IFADDR);
+ sc->gif_psrc = osrc;
+ sc->gif_pdst = odst;
+ goto bad;
+ }
+
+ if (osrc)
+ free((caddr_t)osrc, M_IFADDR);
+ if (odst)
+ free((caddr_t)odst, M_IFADDR);
+
+ bad:
+ if (sc->gif_psrc && sc->gif_pdst)
+ ifp->if_drv_flags |= IFF_DRV_RUNNING;
+ else
+ ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
+
+ return error;
+}
+
+void
+gif_delete_tunnel(ifp)
+ struct ifnet *ifp;
+{
+ struct gif_softc *sc = ifp->if_softc;
+
+ if (sc->gif_psrc) {
+ free((caddr_t)sc->gif_psrc, M_IFADDR);
+ sc->gif_psrc = NULL;
+ }
+ if (sc->gif_pdst) {
+ free((caddr_t)sc->gif_pdst, M_IFADDR);
+ sc->gif_pdst = NULL;
+ }
+ /* it is safe to detach from both */
+#ifdef INET
+ (void)in_gif_detach(sc);
+#endif
+#ifdef INET6
+ (void)in6_gif_detach(sc);
+#endif
+ ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
+}
diff --git a/rtems/freebsd/net/if_gif.h b/rtems/freebsd/net/if_gif.h
new file mode 100644
index 00000000..fe25c112
--- /dev/null
+++ b/rtems/freebsd/net/if_gif.h
@@ -0,0 +1,130 @@
+/* $FreeBSD$ */
+/* $KAME: if_gif.h,v 1.17 2000/09/11 11:36:41 sumikawa Exp $ */
+
+/*-
+ * Copyright (C) 1995, 1996, 1997, and 1998 WIDE Project.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the project nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+/*
+ * if_gif.h
+ */
+
+#ifndef _NET_IF_GIF_HH_
+#define _NET_IF_GIF_HH_
+
+
+#ifdef _KERNEL
+#include <rtems/freebsd/local/opt_inet.h>
+#include <rtems/freebsd/local/opt_inet6.h>
+
+#include <rtems/freebsd/netinet/in.h>
+/* xxx sigh, why route have struct route instead of pointer? */
+
+struct encaptab;
+
+extern void (*ng_gif_input_p)(struct ifnet *ifp, struct mbuf **mp,
+ int af);
+extern void (*ng_gif_input_orphan_p)(struct ifnet *ifp, struct mbuf *m,
+ int af);
+extern int (*ng_gif_output_p)(struct ifnet *ifp, struct mbuf **mp);
+extern void (*ng_gif_attach_p)(struct ifnet *ifp);
+extern void (*ng_gif_detach_p)(struct ifnet *ifp);
+
+struct gif_softc {
+ struct ifnet *gif_ifp;
+ struct mtx gif_mtx;
+ struct sockaddr *gif_psrc; /* Physical src addr */
+ struct sockaddr *gif_pdst; /* Physical dst addr */
+ union {
+ struct route gifscr_ro; /* xxx */
+#ifdef INET6
+ struct route_in6 gifscr_ro6; /* xxx */
+#endif
+ } gifsc_gifscr;
+ int gif_flags;
+ u_int gif_fibnum;
+ const struct encaptab *encap_cookie4;
+ const struct encaptab *encap_cookie6;
+ void *gif_netgraph; /* ng_gif(4) netgraph node info */
+ u_int gif_options;
+ LIST_ENTRY(gif_softc) gif_list; /* all gif's are linked */
+};
+#define GIF2IFP(sc) ((sc)->gif_ifp)
+#define GIF_LOCK_INIT(sc) mtx_init(&(sc)->gif_mtx, "gif softc", \
+ NULL, MTX_DEF)
+#define GIF_LOCK_DESTROY(sc) mtx_destroy(&(sc)->gif_mtx)
+#define GIF_LOCK(sc) mtx_lock(&(sc)->gif_mtx)
+#define GIF_UNLOCK(sc) mtx_unlock(&(sc)->gif_mtx)
+#define GIF_LOCK_ASSERT(sc) mtx_assert(&(sc)->gif_mtx, MA_OWNED)
+
+#define gif_ro gifsc_gifscr.gifscr_ro
+#ifdef INET6
+#define gif_ro6 gifsc_gifscr.gifscr_ro6
+#endif
+
+#define GIF_MTU (1280) /* Default MTU */
+#define GIF_MTU_MIN (1280) /* Minimum MTU */
+#define GIF_MTU_MAX (8192) /* Maximum MTU */
+
+#define MTAG_GIF 1080679712
+#define MTAG_GIF_CALLED 0
+
+struct etherip_header {
+#if BYTE_ORDER == LITTLE_ENDIAN
+ u_int eip_resvl:4, /* reserved */
+ eip_ver:4; /* version */
+#endif
+#if BYTE_ORDER == BIG_ENDIAN
+ u_int eip_ver:4, /* version */
+ eip_resvl:4; /* reserved */
+#endif
+ u_int8_t eip_resvh; /* reserved */
+} __packed;
+
+#define ETHERIP_VERSION 0x3
+/* mbuf adjust factor to force 32-bit alignment of IP header */
+#define ETHERIP_ALIGN 2
+
+/* Prototypes */
+void gif_input(struct mbuf *, int, struct ifnet *);
+int gif_output(struct ifnet *, struct mbuf *, struct sockaddr *,
+ struct route *);
+int gif_ioctl(struct ifnet *, u_long, caddr_t);
+int gif_set_tunnel(struct ifnet *, struct sockaddr *, struct sockaddr *);
+void gif_delete_tunnel(struct ifnet *);
+int gif_encapcheck(const struct mbuf *, int, int, void *);
+#endif /* _KERNEL */
+
+#define GIFGOPTS _IOWR('i', 150, struct ifreq)
+#define GIFSOPTS _IOW('i', 151, struct ifreq)
+
+#define GIF_ACCEPT_REVETHIP 0x0001
+#define GIF_SEND_REVETHIP 0x0010
+#define GIF_OPTMASK (GIF_ACCEPT_REVETHIP|GIF_SEND_REVETHIP)
+
+#endif /* _NET_IF_GIF_HH_ */
diff --git a/rtems/freebsd/net/if_gre.c b/rtems/freebsd/net/if_gre.c
new file mode 100644
index 00000000..7d1a9cae
--- /dev/null
+++ b/rtems/freebsd/net/if_gre.c
@@ -0,0 +1,909 @@
+#include <rtems/freebsd/machine/rtems-bsd-config.h>
+
+/* $NetBSD: if_gre.c,v 1.49 2003/12/11 00:22:29 itojun Exp $ */
+/* $FreeBSD$ */
+
+/*-
+ * Copyright (c) 1998 The NetBSD Foundation, Inc.
+ * All rights reserved.
+ *
+ * This code is derived from software contributed to The NetBSD Foundation
+ * by Heiko W.Rupp <hwr@pilhuhn.de>
+ *
+ * IPv6-over-GRE contributed by Gert Doering <gert@greenie.muc.de>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the NetBSD
+ * Foundation, Inc. and its contributors.
+ * 4. Neither the name of The NetBSD Foundation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
+ * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * Encapsulate L3 protocols into IP
+ * See RFC 2784 (successor of RFC 1701 and 1702) for more details.
+ * If_gre is compatible with Cisco GRE tunnels, so you can
+ * have a NetBSD box as the other end of a tunnel interface of a Cisco
+ * router. See gre(4) for more details.
+ * Also supported: IP in IP encaps (proto 55) as of RFC 2004
+ */
+
+#include <rtems/freebsd/local/opt_atalk.h>
+#include <rtems/freebsd/local/opt_inet.h>
+#include <rtems/freebsd/local/opt_inet6.h>
+
+#include <rtems/freebsd/sys/param.h>
+#include <rtems/freebsd/sys/kernel.h>
+#include <rtems/freebsd/sys/malloc.h>
+#include <rtems/freebsd/sys/module.h>
+#include <rtems/freebsd/sys/mbuf.h>
+#include <rtems/freebsd/sys/priv.h>
+#include <rtems/freebsd/sys/proc.h>
+#include <rtems/freebsd/sys/protosw.h>
+#include <rtems/freebsd/sys/socket.h>
+#include <rtems/freebsd/sys/sockio.h>
+#include <rtems/freebsd/sys/sysctl.h>
+#include <rtems/freebsd/sys/systm.h>
+
+#include <rtems/freebsd/net/ethernet.h>
+#include <rtems/freebsd/net/if.h>
+#include <rtems/freebsd/net/if_clone.h>
+#include <rtems/freebsd/net/if_types.h>
+#include <rtems/freebsd/net/route.h>
+#include <rtems/freebsd/net/vnet.h>
+
+#ifdef INET
+#include <rtems/freebsd/netinet/in.h>
+#include <rtems/freebsd/netinet/in_systm.h>
+#include <rtems/freebsd/netinet/in_var.h>
+#include <rtems/freebsd/netinet/ip.h>
+#include <rtems/freebsd/netinet/ip_gre.h>
+#include <rtems/freebsd/netinet/ip_var.h>
+#include <rtems/freebsd/netinet/ip_encap.h>
+#else
+#error "Huh? if_gre without inet?"
+#endif
+
+#include <rtems/freebsd/net/bpf.h>
+
+#include <rtems/freebsd/net/if_gre.h>
+
+/*
+ * It is not easy to calculate the right value for a GRE MTU.
+ * We leave this task to the admin and use the same default that
+ * other vendors use.
+ */
+#define GREMTU 1476
+
+#define GRENAME "gre"
+
+/*
+ * gre_mtx protects all global variables in if_gre.c.
+ * XXX: gre_softc data not protected yet.
+ */
+struct mtx gre_mtx;
+static MALLOC_DEFINE(M_GRE, GRENAME, "Generic Routing Encapsulation");
+
+struct gre_softc_head gre_softc_list;
+
+static int gre_clone_create(struct if_clone *, int, caddr_t);
+static void gre_clone_destroy(struct ifnet *);
+static int gre_ioctl(struct ifnet *, u_long, caddr_t);
+static int gre_output(struct ifnet *, struct mbuf *, struct sockaddr *,
+ struct route *ro);
+
+IFC_SIMPLE_DECLARE(gre, 0);
+
+static int gre_compute_route(struct gre_softc *sc);
+
+static void greattach(void);
+
+#ifdef INET
+extern struct domain inetdomain;
+static const struct protosw in_gre_protosw = {
+ .pr_type = SOCK_RAW,
+ .pr_domain = &inetdomain,
+ .pr_protocol = IPPROTO_GRE,
+ .pr_flags = PR_ATOMIC|PR_ADDR,
+ .pr_input = gre_input,
+ .pr_output = (pr_output_t *)rip_output,
+ .pr_ctlinput = rip_ctlinput,
+ .pr_ctloutput = rip_ctloutput,
+ .pr_usrreqs = &rip_usrreqs
+};
+static const struct protosw in_mobile_protosw = {
+ .pr_type = SOCK_RAW,
+ .pr_domain = &inetdomain,
+ .pr_protocol = IPPROTO_MOBILE,
+ .pr_flags = PR_ATOMIC|PR_ADDR,
+ .pr_input = gre_mobile_input,
+ .pr_output = (pr_output_t *)rip_output,
+ .pr_ctlinput = rip_ctlinput,
+ .pr_ctloutput = rip_ctloutput,
+ .pr_usrreqs = &rip_usrreqs
+};
+#endif
+
+SYSCTL_DECL(_net_link);
+SYSCTL_NODE(_net_link, IFT_TUNNEL, gre, CTLFLAG_RW, 0,
+ "Generic Routing Encapsulation");
+#ifndef MAX_GRE_NEST
+/*
+ * This macro controls the default upper limitation on nesting of gre tunnels.
+ * Since, setting a large value to this macro with a careless configuration
+ * may introduce system crash, we don't allow any nestings by default.
+ * If you need to configure nested gre tunnels, you can define this macro
+ * in your kernel configuration file. However, if you do so, please be
+ * careful to configure the tunnels so that it won't make a loop.
+ */
+#define MAX_GRE_NEST 1
+#endif
+static int max_gre_nesting = MAX_GRE_NEST;
+SYSCTL_INT(_net_link_gre, OID_AUTO, max_nesting, CTLFLAG_RW,
+ &max_gre_nesting, 0, "Max nested tunnels");
+
+/* ARGSUSED */
+static void
+greattach(void)
+{
+
+ mtx_init(&gre_mtx, "gre_mtx", NULL, MTX_DEF);
+ LIST_INIT(&gre_softc_list);
+ if_clone_attach(&gre_cloner);
+}
+
+static int
+gre_clone_create(ifc, unit, params)
+ struct if_clone *ifc;
+ int unit;
+ caddr_t params;
+{
+ struct gre_softc *sc;
+
+ sc = malloc(sizeof(struct gre_softc), M_GRE, M_WAITOK | M_ZERO);
+
+ GRE2IFP(sc) = if_alloc(IFT_TUNNEL);
+ if (GRE2IFP(sc) == NULL) {
+ free(sc, M_GRE);
+ return (ENOSPC);
+ }
+
+ GRE2IFP(sc)->if_softc = sc;
+ if_initname(GRE2IFP(sc), ifc->ifc_name, unit);
+
+ GRE2IFP(sc)->if_snd.ifq_maxlen = ifqmaxlen;
+ GRE2IFP(sc)->if_addrlen = 0;
+ GRE2IFP(sc)->if_hdrlen = 24; /* IP + GRE */
+ GRE2IFP(sc)->if_mtu = GREMTU;
+ GRE2IFP(sc)->if_flags = IFF_POINTOPOINT|IFF_MULTICAST;
+ GRE2IFP(sc)->if_output = gre_output;
+ GRE2IFP(sc)->if_ioctl = gre_ioctl;
+ sc->g_dst.s_addr = sc->g_src.s_addr = INADDR_ANY;
+ sc->g_proto = IPPROTO_GRE;
+ GRE2IFP(sc)->if_flags |= IFF_LINK0;
+ sc->encap = NULL;
+ sc->called = 0;
+ sc->gre_fibnum = curthread->td_proc->p_fibnum;
+ sc->wccp_ver = WCCP_V1;
+ sc->key = 0;
+ if_attach(GRE2IFP(sc));
+ bpfattach(GRE2IFP(sc), DLT_NULL, sizeof(u_int32_t));
+ mtx_lock(&gre_mtx);
+ LIST_INSERT_HEAD(&gre_softc_list, sc, sc_list);
+ mtx_unlock(&gre_mtx);
+ return (0);
+}
+
+static void
+gre_clone_destroy(ifp)
+ struct ifnet *ifp;
+{
+ struct gre_softc *sc = ifp->if_softc;
+
+ mtx_lock(&gre_mtx);
+ LIST_REMOVE(sc, sc_list);
+ mtx_unlock(&gre_mtx);
+
+#ifdef INET
+ if (sc->encap != NULL)
+ encap_detach(sc->encap);
+#endif
+ bpfdetach(ifp);
+ if_detach(ifp);
+ if_free(ifp);
+ free(sc, M_GRE);
+}
+
+/*
+ * The output routine. Takes a packet and encapsulates it in the protocol
+ * given by sc->g_proto. See also RFC 1701 and RFC 2004
+ */
+static int
+gre_output(struct ifnet *ifp, struct mbuf *m, struct sockaddr *dst,
+ struct route *ro)
+{
+ int error = 0;
+ struct gre_softc *sc = ifp->if_softc;
+ struct greip *gh;
+ struct ip *ip;
+ u_short gre_ip_id = 0;
+ uint8_t gre_ip_tos = 0;
+ u_int16_t etype = 0;
+ struct mobile_h mob_h;
+ u_int32_t af;
+ int extra = 0;
+
+ /*
+ * gre may cause infinite recursion calls when misconfigured.
+ * We'll prevent this by introducing upper limit.
+ */
+ if (++(sc->called) > max_gre_nesting) {
+ printf("%s: gre_output: recursively called too many "
+ "times(%d)\n", if_name(GRE2IFP(sc)), sc->called);
+ m_freem(m);
+ error = EIO; /* is there better errno? */
+ goto end;
+ }
+
+ if (!((ifp->if_flags & IFF_UP) &&
+ (ifp->if_drv_flags & IFF_DRV_RUNNING)) ||
+ sc->g_src.s_addr == INADDR_ANY || sc->g_dst.s_addr == INADDR_ANY) {
+ m_freem(m);
+ error = ENETDOWN;
+ goto end;
+ }
+
+ gh = NULL;
+ ip = NULL;
+
+ /* BPF writes need to be handled specially. */
+ if (dst->sa_family == AF_UNSPEC) {
+ bcopy(dst->sa_data, &af, sizeof(af));
+ dst->sa_family = af;
+ }
+
+ if (bpf_peers_present(ifp->if_bpf)) {
+ af = dst->sa_family;
+ bpf_mtap2(ifp->if_bpf, &af, sizeof(af), m);
+ }
+
+ m->m_flags &= ~(M_BCAST|M_MCAST);
+
+ if (sc->g_proto == IPPROTO_MOBILE) {
+ if (dst->sa_family == AF_INET) {
+ struct mbuf *m0;
+ int msiz;
+
+ ip = mtod(m, struct ip *);
+
+ /*
+ * RFC2004 specifies that fragmented diagrams shouldn't
+ * be encapsulated.
+ */
+ if (ip->ip_off & (IP_MF | IP_OFFMASK)) {
+ _IF_DROP(&ifp->if_snd);
+ m_freem(m);
+ error = EINVAL; /* is there better errno? */
+ goto end;
+ }
+ memset(&mob_h, 0, MOB_HH_SIZ_L);
+ mob_h.proto = (ip->ip_p) << 8;
+ mob_h.odst = ip->ip_dst.s_addr;
+ ip->ip_dst.s_addr = sc->g_dst.s_addr;
+
+ /*
+ * If the packet comes from our host, we only change
+ * the destination address in the IP header.
+ * Else we also need to save and change the source
+ */
+ if (in_hosteq(ip->ip_src, sc->g_src)) {
+ msiz = MOB_HH_SIZ_S;
+ } else {
+ mob_h.proto |= MOB_HH_SBIT;
+ mob_h.osrc = ip->ip_src.s_addr;
+ ip->ip_src.s_addr = sc->g_src.s_addr;
+ msiz = MOB_HH_SIZ_L;
+ }
+ mob_h.proto = htons(mob_h.proto);
+ mob_h.hcrc = gre_in_cksum((u_int16_t *)&mob_h, msiz);
+
+ if ((m->m_data - msiz) < m->m_pktdat) {
+ /* need new mbuf */
+ MGETHDR(m0, M_DONTWAIT, MT_DATA);
+ if (m0 == NULL) {
+ _IF_DROP(&ifp->if_snd);
+ m_freem(m);
+ error = ENOBUFS;
+ goto end;
+ }
+ m0->m_next = m;
+ m->m_data += sizeof(struct ip);
+ m->m_len -= sizeof(struct ip);
+ m0->m_pkthdr.len = m->m_pkthdr.len + msiz;
+ m0->m_len = msiz + sizeof(struct ip);
+ m0->m_data += max_linkhdr;
+ memcpy(mtod(m0, caddr_t), (caddr_t)ip,
+ sizeof(struct ip));
+ m = m0;
+ } else { /* we have some space left in the old one */
+ m->m_data -= msiz;
+ m->m_len += msiz;
+ m->m_pkthdr.len += msiz;
+ bcopy(ip, mtod(m, caddr_t),
+ sizeof(struct ip));
+ }
+ ip = mtod(m, struct ip *);
+ memcpy((caddr_t)(ip + 1), &mob_h, (unsigned)msiz);
+ ip->ip_len = ntohs(ip->ip_len) + msiz;
+ } else { /* AF_INET */
+ _IF_DROP(&ifp->if_snd);
+ m_freem(m);
+ error = EINVAL;
+ goto end;
+ }
+ } else if (sc->g_proto == IPPROTO_GRE) {
+ switch (dst->sa_family) {
+ case AF_INET:
+ ip = mtod(m, struct ip *);
+ gre_ip_tos = ip->ip_tos;
+ gre_ip_id = ip->ip_id;
+ if (sc->wccp_ver == WCCP_V2) {
+ extra = sizeof(uint32_t);
+ etype = WCCP_PROTOCOL_TYPE;
+ } else {
+ etype = ETHERTYPE_IP;
+ }
+ break;
+#ifdef INET6
+ case AF_INET6:
+ gre_ip_id = ip_newid();
+ etype = ETHERTYPE_IPV6;
+ break;
+#endif
+#ifdef NETATALK
+ case AF_APPLETALK:
+ etype = ETHERTYPE_ATALK;
+ break;
+#endif
+ default:
+ _IF_DROP(&ifp->if_snd);
+ m_freem(m);
+ error = EAFNOSUPPORT;
+ goto end;
+ }
+
+ /* Reserve space for GRE header + optional GRE key */
+ int hdrlen = sizeof(struct greip) + extra;
+ if (sc->key)
+ hdrlen += sizeof(uint32_t);
+ M_PREPEND(m, hdrlen, M_DONTWAIT);
+ } else {
+ _IF_DROP(&ifp->if_snd);
+ m_freem(m);
+ error = EINVAL;
+ goto end;
+ }
+
+ if (m == NULL) { /* mbuf allocation failed */
+ _IF_DROP(&ifp->if_snd);
+ error = ENOBUFS;
+ goto end;
+ }
+
+ M_SETFIB(m, sc->gre_fibnum); /* The envelope may use a different FIB */
+
+ gh = mtod(m, struct greip *);
+ if (sc->g_proto == IPPROTO_GRE) {
+ uint32_t *options = gh->gi_options;
+
+ memset((void *)gh, 0, sizeof(struct greip) + extra);
+ gh->gi_ptype = htons(etype);
+ gh->gi_flags = 0;
+
+ /* Add key option */
+ if (sc->key)
+ {
+ gh->gi_flags |= htons(GRE_KP);
+ *(options++) = htonl(sc->key);
+ }
+ }
+
+ gh->gi_pr = sc->g_proto;
+ if (sc->g_proto != IPPROTO_MOBILE) {
+ gh->gi_src = sc->g_src;
+ gh->gi_dst = sc->g_dst;
+ ((struct ip*)gh)->ip_v = IPPROTO_IPV4;
+ ((struct ip*)gh)->ip_hl = (sizeof(struct ip)) >> 2;
+ ((struct ip*)gh)->ip_ttl = GRE_TTL;
+ ((struct ip*)gh)->ip_tos = gre_ip_tos;
+ ((struct ip*)gh)->ip_id = gre_ip_id;
+ gh->gi_len = m->m_pkthdr.len;
+ }
+
+ ifp->if_opackets++;
+ ifp->if_obytes += m->m_pkthdr.len;
+ /*
+ * Send it off and with IP_FORWARD flag to prevent it from
+ * overwriting the ip_id again. ip_id is already set to the
+ * ip_id of the encapsulated packet.
+ */
+ error = ip_output(m, NULL, &sc->route, IP_FORWARDING,
+ (struct ip_moptions *)NULL, (struct inpcb *)NULL);
+ end:
+ sc->called = 0;
+ if (error)
+ ifp->if_oerrors++;
+ return (error);
+}
+
+static int
+gre_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
+{
+ struct ifreq *ifr = (struct ifreq *)data;
+ struct if_laddrreq *lifr = (struct if_laddrreq *)data;
+ struct in_aliasreq *aifr = (struct in_aliasreq *)data;
+ struct gre_softc *sc = ifp->if_softc;
+ int s;
+ struct sockaddr_in si;
+ struct sockaddr *sa = NULL;
+ int error, adj;
+ struct sockaddr_in sp, sm, dp, dm;
+ uint32_t key;
+
+ error = 0;
+ adj = 0;
+
+ s = splnet();
+ switch (cmd) {
+ case SIOCSIFADDR:
+ ifp->if_flags |= IFF_UP;
+ break;
+ case SIOCSIFDSTADDR:
+ break;
+ case SIOCSIFFLAGS:
+ /*
+ * XXXRW: Isn't this priv_check() redundant to the ifnet
+ * layer check?
+ */
+ if ((error = priv_check(curthread, PRIV_NET_SETIFFLAGS)) != 0)
+ break;
+ if ((ifr->ifr_flags & IFF_LINK0) != 0)
+ sc->g_proto = IPPROTO_GRE;
+ else
+ sc->g_proto = IPPROTO_MOBILE;
+ if ((ifr->ifr_flags & IFF_LINK2) != 0)
+ sc->wccp_ver = WCCP_V2;
+ else
+ sc->wccp_ver = WCCP_V1;
+ goto recompute;
+ case SIOCSIFMTU:
+ /*
+ * XXXRW: Isn't this priv_check() redundant to the ifnet
+ * layer check?
+ */
+ if ((error = priv_check(curthread, PRIV_NET_SETIFMTU)) != 0)
+ break;
+ if (ifr->ifr_mtu < 576) {
+ error = EINVAL;
+ break;
+ }
+ ifp->if_mtu = ifr->ifr_mtu;
+ break;
+ case SIOCGIFMTU:
+ ifr->ifr_mtu = GRE2IFP(sc)->if_mtu;
+ break;
+ case SIOCADDMULTI:
+ /*
+ * XXXRW: Isn't this priv_checkr() redundant to the ifnet
+ * layer check?
+ */
+ if ((error = priv_check(curthread, PRIV_NET_ADDMULTI)) != 0)
+ break;
+ if (ifr == 0) {
+ error = EAFNOSUPPORT;
+ break;
+ }
+ switch (ifr->ifr_addr.sa_family) {
+#ifdef INET
+ case AF_INET:
+ break;
+#endif
+#ifdef INET6
+ case AF_INET6:
+ break;
+#endif
+ default:
+ error = EAFNOSUPPORT;
+ break;
+ }
+ break;
+ case SIOCDELMULTI:
+ /*
+ * XXXRW: Isn't this priv_check() redundant to the ifnet
+ * layer check?
+ */
+ if ((error = priv_check(curthread, PRIV_NET_DELIFGROUP)) != 0)
+ break;
+ if (ifr == 0) {
+ error = EAFNOSUPPORT;
+ break;
+ }
+ switch (ifr->ifr_addr.sa_family) {
+#ifdef INET
+ case AF_INET:
+ break;
+#endif
+#ifdef INET6
+ case AF_INET6:
+ break;
+#endif
+ default:
+ error = EAFNOSUPPORT;
+ break;
+ }
+ break;
+ case GRESPROTO:
+ /*
+ * XXXRW: Isn't this priv_check() redundant to the ifnet
+ * layer check?
+ */
+ if ((error = priv_check(curthread, PRIV_NET_GRE)) != 0)
+ break;
+ sc->g_proto = ifr->ifr_flags;
+ switch (sc->g_proto) {
+ case IPPROTO_GRE:
+ ifp->if_flags |= IFF_LINK0;
+ break;
+ case IPPROTO_MOBILE:
+ ifp->if_flags &= ~IFF_LINK0;
+ break;
+ default:
+ error = EPROTONOSUPPORT;
+ break;
+ }
+ goto recompute;
+ case GREGPROTO:
+ ifr->ifr_flags = sc->g_proto;
+ break;
+ case GRESADDRS:
+ case GRESADDRD:
+ error = priv_check(curthread, PRIV_NET_GRE);
+ if (error)
+ return (error);
+ /*
+ * set tunnel endpoints, compute a less specific route
+ * to the remote end and mark if as up
+ */
+ sa = &ifr->ifr_addr;
+ if (cmd == GRESADDRS)
+ sc->g_src = (satosin(sa))->sin_addr;
+ if (cmd == GRESADDRD)
+ sc->g_dst = (satosin(sa))->sin_addr;
+ recompute:
+#ifdef INET
+ if (sc->encap != NULL) {
+ encap_detach(sc->encap);
+ sc->encap = NULL;
+ }
+#endif
+ if ((sc->g_src.s_addr != INADDR_ANY) &&
+ (sc->g_dst.s_addr != INADDR_ANY)) {
+ bzero(&sp, sizeof(sp));
+ bzero(&sm, sizeof(sm));
+ bzero(&dp, sizeof(dp));
+ bzero(&dm, sizeof(dm));
+ sp.sin_len = sm.sin_len = dp.sin_len = dm.sin_len =
+ sizeof(struct sockaddr_in);
+ sp.sin_family = sm.sin_family = dp.sin_family =
+ dm.sin_family = AF_INET;
+ sp.sin_addr = sc->g_src;
+ dp.sin_addr = sc->g_dst;
+ sm.sin_addr.s_addr = dm.sin_addr.s_addr =
+ INADDR_BROADCAST;
+#ifdef INET
+ sc->encap = encap_attach(AF_INET, sc->g_proto,
+ sintosa(&sp), sintosa(&sm), sintosa(&dp),
+ sintosa(&dm), (sc->g_proto == IPPROTO_GRE) ?
+ &in_gre_protosw : &in_mobile_protosw, sc);
+ if (sc->encap == NULL)
+ printf("%s: unable to attach encap\n",
+ if_name(GRE2IFP(sc)));
+#endif
+ if (sc->route.ro_rt != 0) /* free old route */
+ RTFREE(sc->route.ro_rt);
+ if (gre_compute_route(sc) == 0)
+ ifp->if_drv_flags |= IFF_DRV_RUNNING;
+ else
+ ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
+ }
+ break;
+ case GREGADDRS:
+ memset(&si, 0, sizeof(si));
+ si.sin_family = AF_INET;
+ si.sin_len = sizeof(struct sockaddr_in);
+ si.sin_addr.s_addr = sc->g_src.s_addr;
+ sa = sintosa(&si);
+ ifr->ifr_addr = *sa;
+ break;
+ case GREGADDRD:
+ memset(&si, 0, sizeof(si));
+ si.sin_family = AF_INET;
+ si.sin_len = sizeof(struct sockaddr_in);
+ si.sin_addr.s_addr = sc->g_dst.s_addr;
+ sa = sintosa(&si);
+ ifr->ifr_addr = *sa;
+ break;
+ case SIOCSIFPHYADDR:
+ /*
+ * XXXRW: Isn't this priv_check() redundant to the ifnet
+ * layer check?
+ */
+ if ((error = priv_check(curthread, PRIV_NET_SETIFPHYS)) != 0)
+ break;
+ if (aifr->ifra_addr.sin_family != AF_INET ||
+ aifr->ifra_dstaddr.sin_family != AF_INET) {
+ error = EAFNOSUPPORT;
+ break;
+ }
+ if (aifr->ifra_addr.sin_len != sizeof(si) ||
+ aifr->ifra_dstaddr.sin_len != sizeof(si)) {
+ error = EINVAL;
+ break;
+ }
+ sc->g_src = aifr->ifra_addr.sin_addr;
+ sc->g_dst = aifr->ifra_dstaddr.sin_addr;
+ goto recompute;
+ case SIOCSLIFPHYADDR:
+ /*
+ * XXXRW: Isn't this priv_check() redundant to the ifnet
+ * layer check?
+ */
+ if ((error = priv_check(curthread, PRIV_NET_SETIFPHYS)) != 0)
+ break;
+ if (lifr->addr.ss_family != AF_INET ||
+ lifr->dstaddr.ss_family != AF_INET) {
+ error = EAFNOSUPPORT;
+ break;
+ }
+ if (lifr->addr.ss_len != sizeof(si) ||
+ lifr->dstaddr.ss_len != sizeof(si)) {
+ error = EINVAL;
+ break;
+ }
+ sc->g_src = (satosin(&lifr->addr))->sin_addr;
+ sc->g_dst =
+ (satosin(&lifr->dstaddr))->sin_addr;
+ goto recompute;
+ case SIOCDIFPHYADDR:
+ /*
+ * XXXRW: Isn't this priv_check() redundant to the ifnet
+ * layer check?
+ */
+ if ((error = priv_check(curthread, PRIV_NET_SETIFPHYS)) != 0)
+ break;
+ sc->g_src.s_addr = INADDR_ANY;
+ sc->g_dst.s_addr = INADDR_ANY;
+ goto recompute;
+ case SIOCGLIFPHYADDR:
+ if (sc->g_src.s_addr == INADDR_ANY ||
+ sc->g_dst.s_addr == INADDR_ANY) {
+ error = EADDRNOTAVAIL;
+ break;
+ }
+ memset(&si, 0, sizeof(si));
+ si.sin_family = AF_INET;
+ si.sin_len = sizeof(struct sockaddr_in);
+ si.sin_addr.s_addr = sc->g_src.s_addr;
+ memcpy(&lifr->addr, &si, sizeof(si));
+ si.sin_addr.s_addr = sc->g_dst.s_addr;
+ memcpy(&lifr->dstaddr, &si, sizeof(si));
+ break;
+ case SIOCGIFPSRCADDR:
+#ifdef INET6
+ case SIOCGIFPSRCADDR_IN6:
+#endif
+ if (sc->g_src.s_addr == INADDR_ANY) {
+ error = EADDRNOTAVAIL;
+ break;
+ }
+ memset(&si, 0, sizeof(si));
+ si.sin_family = AF_INET;
+ si.sin_len = sizeof(struct sockaddr_in);
+ si.sin_addr.s_addr = sc->g_src.s_addr;
+ bcopy(&si, &ifr->ifr_addr, sizeof(ifr->ifr_addr));
+ break;
+ case SIOCGIFPDSTADDR:
+#ifdef INET6
+ case SIOCGIFPDSTADDR_IN6:
+#endif
+ if (sc->g_dst.s_addr == INADDR_ANY) {
+ error = EADDRNOTAVAIL;
+ break;
+ }
+ memset(&si, 0, sizeof(si));
+ si.sin_family = AF_INET;
+ si.sin_len = sizeof(struct sockaddr_in);
+ si.sin_addr.s_addr = sc->g_dst.s_addr;
+ bcopy(&si, &ifr->ifr_addr, sizeof(ifr->ifr_addr));
+ break;
+ case GRESKEY:
+ error = priv_check(curthread, PRIV_NET_GRE);
+ if (error)
+ break;
+ error = copyin(ifr->ifr_data, &key, sizeof(key));
+ if (error)
+ break;
+ /* adjust MTU for option header */
+ if (key == 0 && sc->key != 0) /* clear */
+ adj += sizeof(key);
+ else if (key != 0 && sc->key == 0) /* set */
+ adj -= sizeof(key);
+
+ if (ifp->if_mtu + adj < 576) {
+ error = EINVAL;
+ break;
+ }
+ ifp->if_mtu += adj;
+ sc->key = key;
+ break;
+ case GREGKEY:
+ error = copyout(&sc->key, ifr->ifr_data, sizeof(sc->key));
+ break;
+
+ default:
+ error = EINVAL;
+ break;
+ }
+
+ splx(s);
+ return (error);
+}
+
+/*
+ * computes a route to our destination that is not the one
+ * which would be taken by ip_output(), as this one will loop back to
+ * us. If the interface is p2p as a--->b, then a routing entry exists
+ * If we now send a packet to b (e.g. ping b), this will come down here
+ * gets src=a, dst=b tacked on and would from ip_output() sent back to
+ * if_gre.
+ * Goal here is to compute a route to b that is less specific than
+ * a-->b. We know that this one exists as in normal operation we have
+ * at least a default route which matches.
+ */
+static int
+gre_compute_route(struct gre_softc *sc)
+{
+ struct route *ro;
+
+ ro = &sc->route;
+
+ memset(ro, 0, sizeof(struct route));
+ ((struct sockaddr_in *)&ro->ro_dst)->sin_addr = sc->g_dst;
+ ro->ro_dst.sa_family = AF_INET;
+ ro->ro_dst.sa_len = sizeof(ro->ro_dst);
+
+ /*
+ * toggle last bit, so our interface is not found, but a less
+ * specific route. I'd rather like to specify a shorter mask,
+ * but this is not possible. Should work though. XXX
+ * XXX MRT Use a different FIB for the tunnel to solve this problem.
+ */
+ if ((GRE2IFP(sc)->if_flags & IFF_LINK1) == 0) {
+ ((struct sockaddr_in *)&ro->ro_dst)->sin_addr.s_addr ^=
+ htonl(0x01);
+ }
+
+#ifdef DIAGNOSTIC
+ printf("%s: searching for a route to %s", if_name(GRE2IFP(sc)),
+ inet_ntoa(((struct sockaddr_in *)&ro->ro_dst)->sin_addr));
+#endif
+
+ rtalloc_fib(ro, sc->gre_fibnum);
+
+ /*
+ * check if this returned a route at all and this route is no
+ * recursion to ourself
+ */
+ if (ro->ro_rt == NULL || ro->ro_rt->rt_ifp->if_softc == sc) {
+#ifdef DIAGNOSTIC
+ if (ro->ro_rt == NULL)
+ printf(" - no route found!\n");
+ else
+ printf(" - route loops back to ourself!\n");
+#endif
+ return EADDRNOTAVAIL;
+ }
+
+ /*
+ * now change it back - else ip_output will just drop
+ * the route and search one to this interface ...
+ */
+ if ((GRE2IFP(sc)->if_flags & IFF_LINK1) == 0)
+ ((struct sockaddr_in *)&ro->ro_dst)->sin_addr = sc->g_dst;
+
+#ifdef DIAGNOSTIC
+ printf(", choosing %s with gateway %s", if_name(ro->ro_rt->rt_ifp),
+ inet_ntoa(((struct sockaddr_in *)(ro->ro_rt->rt_gateway))->sin_addr));
+ printf("\n");
+#endif
+
+ return 0;
+}
+
+/*
+ * do a checksum of a buffer - much like in_cksum, which operates on
+ * mbufs.
+ */
+u_int16_t
+gre_in_cksum(u_int16_t *p, u_int len)
+{
+ u_int32_t sum = 0;
+ int nwords = len >> 1;
+
+ while (nwords-- != 0)
+ sum += *p++;
+
+ if (len & 1) {
+ union {
+ u_short w;
+ u_char c[2];
+ } u;
+ u.c[0] = *(u_char *)p;
+ u.c[1] = 0;
+ sum += u.w;
+ }
+
+ /* end-around-carry */
+ sum = (sum >> 16) + (sum & 0xffff);
+ sum += (sum >> 16);
+ return (~sum);
+}
+
+static int
+gremodevent(module_t mod, int type, void *data)
+{
+
+ switch (type) {
+ case MOD_LOAD:
+ greattach();
+ break;
+ case MOD_UNLOAD:
+ if_clone_detach(&gre_cloner);
+ mtx_destroy(&gre_mtx);
+ break;
+ default:
+ return EOPNOTSUPP;
+ }
+ return 0;
+}
+
+static moduledata_t gre_mod = {
+ "if_gre",
+ gremodevent,
+ 0
+};
+
+DECLARE_MODULE(if_gre, gre_mod, SI_SUB_PSEUDO, SI_ORDER_ANY);
+MODULE_VERSION(if_gre, 1);
diff --git a/rtems/freebsd/net/if_gre.h b/rtems/freebsd/net/if_gre.h
new file mode 100644
index 00000000..c21a9159
--- /dev/null
+++ b/rtems/freebsd/net/if_gre.h
@@ -0,0 +1,194 @@
+/* $NetBSD: if_gre.h,v 1.13 2003/11/10 08:51:52 wiz Exp $ */
+/* $FreeBSD$ */
+
+/*-
+ * Copyright (c) 1998 The NetBSD Foundation, Inc.
+ * All rights reserved
+ *
+ * This code is derived from software contributed to The NetBSD Foundation
+ * by Heiko W.Rupp <hwr@pilhuhn.de>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the NetBSD
+ * Foundation, Inc. and its contributors.
+ * 4. Neither the name of The NetBSD Foundation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
+ * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _NET_IF_GRE_H
+#define _NET_IF_GRE_H
+
+#include <rtems/freebsd/sys/ioccom.h>
+#ifdef _KERNEL
+#include <rtems/freebsd/sys/queue.h>
+
+/*
+ * Version of the WCCP, need to be configured manually since
+ * header for version 2 is the same but IP payload is prepended
+ * with additional 4-bytes field.
+ */
+typedef enum {
+ WCCP_V1 = 0,
+ WCCP_V2
+} wccp_ver_t;
+
+struct gre_softc {
+ struct ifnet *sc_ifp;
+ LIST_ENTRY(gre_softc) sc_list;
+ int gre_unit;
+ int gre_flags;
+ u_int gre_fibnum; /* use this fib for envelopes */
+ struct in_addr g_src; /* source address of gre packets */
+ struct in_addr g_dst; /* destination address of gre packets */
+ struct route route; /* routing entry that determines, where a
+ encapsulated packet should go */
+ u_char g_proto; /* protocol of encapsulator */
+
+ const struct encaptab *encap; /* encapsulation cookie */
+
+ int called; /* infinite recursion preventer */
+
+ uint32_t key; /* key included in outgoing GRE packets */
+ /* zero means none */
+
+ wccp_ver_t wccp_ver; /* version of the WCCP */
+};
+#define GRE2IFP(sc) ((sc)->sc_ifp)
+
+
+struct gre_h {
+ u_int16_t flags; /* GRE flags */
+ u_int16_t ptype; /* protocol type of payload typically
+ Ether protocol type*/
+ uint32_t options[0]; /* optional options */
+/*
+ * from here on: fields are optional, presence indicated by flags
+ *
+ u_int_16 checksum checksum (one-complements of GRE header
+ and payload
+ Present if (ck_pres | rt_pres == 1).
+ Valid if (ck_pres == 1).
+ u_int_16 offset offset from start of routing filed to
+ first octet of active SRE (see below).
+ Present if (ck_pres | rt_pres == 1).
+ Valid if (rt_pres == 1).
+ u_int_32 key inserted by encapsulator e.g. for
+ authentication
+ Present if (key_pres ==1 ).
+ u_int_32 seq_num Sequence number to allow for packet order
+ Present if (seq_pres ==1 ).
+ struct gre_sre[] routing Routing fileds (see below)
+ Present if (rt_pres == 1)
+ */
+} __packed;
+
+struct greip {
+ struct ip gi_i;
+ struct gre_h gi_g;
+} __packed;
+
+#define gi_pr gi_i.ip_p
+#define gi_len gi_i.ip_len
+#define gi_src gi_i.ip_src
+#define gi_dst gi_i.ip_dst
+#define gi_ptype gi_g.ptype
+#define gi_flags gi_g.flags
+#define gi_options gi_g.options
+
+#define GRE_CP 0x8000 /* Checksum Present */
+#define GRE_RP 0x4000 /* Routing Present */
+#define GRE_KP 0x2000 /* Key Present */
+#define GRE_SP 0x1000 /* Sequence Present */
+#define GRE_SS 0x0800 /* Strict Source Route */
+
+/*
+ * CISCO uses special type for GRE tunnel created as part of WCCP
+ * connection, while in fact those packets are just IPv4 encapsulated
+ * into GRE.
+ */
+#define WCCP_PROTOCOL_TYPE 0x883E
+
+/*
+ * gre_sre defines a Source route Entry. These are needed if packets
+ * should be routed over more than one tunnel hop by hop
+ */
+struct gre_sre {
+ u_int16_t sre_family; /* address family */
+ u_char sre_offset; /* offset to first octet of active entry */
+ u_char sre_length; /* number of octets in the SRE.
+ sre_lengthl==0 -> last entry. */
+ u_char *sre_rtinfo; /* the routing information */
+};
+
+struct greioctl {
+ int unit;
+ struct in_addr addr;
+};
+
+/* for mobile encaps */
+
+struct mobile_h {
+ u_int16_t proto; /* protocol and S-bit */
+ u_int16_t hcrc; /* header checksum */
+ u_int32_t odst; /* original destination address */
+ u_int32_t osrc; /* original source addr, if S-bit set */
+} __packed;
+
+struct mobip_h {
+ struct ip mi;
+ struct mobile_h mh;
+} __packed;
+
+
+#define MOB_HH_SIZ_S (sizeof(struct mobile_h) - sizeof(u_int32_t))
+#define MOB_HH_SIZ_L (sizeof(struct mobile_h))
+#define MOB_HH_SBIT 0x0080
+
+#define GRE_TTL 30
+
+#endif /* _KERNEL */
+
+/*
+ * ioctls needed to manipulate the interface
+ */
+
+#define GRESADDRS _IOW('i', 101, struct ifreq)
+#define GRESADDRD _IOW('i', 102, struct ifreq)
+#define GREGADDRS _IOWR('i', 103, struct ifreq)
+#define GREGADDRD _IOWR('i', 104, struct ifreq)
+#define GRESPROTO _IOW('i' , 105, struct ifreq)
+#define GREGPROTO _IOWR('i', 106, struct ifreq)
+#define GREGKEY _IOWR('i', 107, struct ifreq)
+#define GRESKEY _IOW('i', 108, struct ifreq)
+
+#ifdef _KERNEL
+LIST_HEAD(gre_softc_head, gre_softc);
+extern struct mtx gre_mtx;
+extern struct gre_softc_head gre_softc_list;
+
+u_int16_t gre_in_cksum(u_int16_t *, u_int);
+#endif /* _KERNEL */
+
+#endif
diff --git a/rtems/freebsd/net/if_iso88025subr.c b/rtems/freebsd/net/if_iso88025subr.c
new file mode 100644
index 00000000..629aac36
--- /dev/null
+++ b/rtems/freebsd/net/if_iso88025subr.c
@@ -0,0 +1,831 @@
+#include <rtems/freebsd/machine/rtems-bsd-config.h>
+
+/*-
+ * Copyright (c) 1998, Larry Lile
+ * All rights reserved.
+ *
+ * For latest sources and information on this driver, please
+ * go to http://anarchy.stdio.com.
+ *
+ * Questions, comments or suggestions should be directed to
+ * Larry Lile <lile@stdio.com>.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice unmodified, this list of conditions, and the following
+ * disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ *
+ */
+
+/*
+ *
+ * General ISO 802.5 (Token Ring) support routines
+ *
+ */
+
+#include <rtems/freebsd/local/opt_inet.h>
+#include <rtems/freebsd/local/opt_inet6.h>
+#include <rtems/freebsd/local/opt_ipx.h>
+
+#include <rtems/freebsd/sys/param.h>
+#include <rtems/freebsd/sys/systm.h>
+#include <rtems/freebsd/sys/kernel.h>
+#include <rtems/freebsd/sys/malloc.h>
+#include <rtems/freebsd/sys/mbuf.h>
+#include <rtems/freebsd/sys/module.h>
+#include <rtems/freebsd/sys/socket.h>
+#include <rtems/freebsd/sys/sockio.h>
+
+#include <rtems/freebsd/net/if.h>
+#include <rtems/freebsd/net/if_arp.h>
+#include <rtems/freebsd/net/if_dl.h>
+#include <rtems/freebsd/net/if_llc.h>
+#include <rtems/freebsd/net/if_types.h>
+#include <rtems/freebsd/net/if_llatbl.h>
+
+#include <rtems/freebsd/net/ethernet.h>
+#include <rtems/freebsd/net/netisr.h>
+#include <rtems/freebsd/net/route.h>
+#include <rtems/freebsd/net/bpf.h>
+#include <rtems/freebsd/net/iso88025.h>
+
+#if defined(INET) || defined(INET6)
+#include <rtems/freebsd/netinet/in.h>
+#include <rtems/freebsd/netinet/in_var.h>
+#include <rtems/freebsd/netinet/if_ether.h>
+#endif
+#ifdef INET6
+#include <rtems/freebsd/netinet6/nd6.h>
+#endif
+
+#ifdef IPX
+#include <rtems/freebsd/netipx/ipx.h>
+#include <rtems/freebsd/netipx/ipx_if.h>
+#endif
+
+#include <rtems/freebsd/security/mac/mac_framework.h>
+
+static const u_char iso88025_broadcastaddr[ISO88025_ADDR_LEN] =
+ { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
+
+static int iso88025_resolvemulti (struct ifnet *, struct sockaddr **,
+ struct sockaddr *);
+
+#define senderr(e) do { error = (e); goto bad; } while (0)
+
+/*
+ * Perform common duties while attaching to interface list
+ */
+void
+iso88025_ifattach(struct ifnet *ifp, const u_int8_t *lla, int bpf)
+{
+ struct ifaddr *ifa;
+ struct sockaddr_dl *sdl;
+
+ ifa = NULL;
+
+ ifp->if_type = IFT_ISO88025;
+ ifp->if_addrlen = ISO88025_ADDR_LEN;
+ ifp->if_hdrlen = ISO88025_HDR_LEN;
+
+ if_attach(ifp); /* Must be called before additional assignments */
+
+ ifp->if_output = iso88025_output;
+ ifp->if_input = iso88025_input;
+ ifp->if_resolvemulti = iso88025_resolvemulti;
+ ifp->if_broadcastaddr = iso88025_broadcastaddr;
+
+ if (ifp->if_baudrate == 0)
+ ifp->if_baudrate = TR_16MBPS; /* 16Mbit should be a safe default */
+ if (ifp->if_mtu == 0)
+ ifp->if_mtu = ISO88025_DEFAULT_MTU;
+
+ ifa = ifp->if_addr;
+ KASSERT(ifa != NULL, ("%s: no lladdr!\n", __func__));
+
+ sdl = (struct sockaddr_dl *)ifa->ifa_addr;
+ sdl->sdl_type = IFT_ISO88025;
+ sdl->sdl_alen = ifp->if_addrlen;
+ bcopy(lla, LLADDR(sdl), ifp->if_addrlen);
+
+ if (bpf)
+ bpfattach(ifp, DLT_IEEE802, ISO88025_HDR_LEN);
+
+ return;
+}
+
+/*
+ * Perform common duties while detaching a Token Ring interface
+ */
+void
+iso88025_ifdetach(ifp, bpf)
+ struct ifnet *ifp;
+ int bpf;
+{
+
+ if (bpf)
+ bpfdetach(ifp);
+
+ if_detach(ifp);
+
+ return;
+}
+
+int
+iso88025_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
+{
+ struct ifaddr *ifa;
+ struct ifreq *ifr;
+ int error;
+
+ ifa = (struct ifaddr *) data;
+ ifr = (struct ifreq *) data;
+ error = 0;
+
+ switch (command) {
+ case SIOCSIFADDR:
+ ifp->if_flags |= IFF_UP;
+
+ switch (ifa->ifa_addr->sa_family) {
+#ifdef INET
+ case AF_INET:
+ ifp->if_init(ifp->if_softc); /* before arpwhohas */
+ arp_ifinit(ifp, ifa);
+ break;
+#endif /* INET */
+#ifdef IPX
+ /*
+ * XXX - This code is probably wrong
+ */
+ case AF_IPX: {
+ struct ipx_addr *ina;
+
+ ina = &(IA_SIPX(ifa)->sipx_addr);
+
+ if (ipx_nullhost(*ina))
+ ina->x_host = *(union ipx_host *)
+ IF_LLADDR(ifp);
+ else
+ bcopy((caddr_t) ina->x_host.c_host,
+ (caddr_t) IF_LLADDR(ifp),
+ ISO88025_ADDR_LEN);
+
+ /*
+ * Set new address
+ */
+ ifp->if_init(ifp->if_softc);
+ }
+ break;
+#endif /* IPX */
+ default:
+ ifp->if_init(ifp->if_softc);
+ break;
+ }
+ break;
+
+ case SIOCGIFADDR: {
+ struct sockaddr *sa;
+
+ sa = (struct sockaddr *) & ifr->ifr_data;
+ bcopy(IF_LLADDR(ifp),
+ (caddr_t) sa->sa_data, ISO88025_ADDR_LEN);
+ }
+ break;
+
+ case SIOCSIFMTU:
+ /*
+ * Set the interface MTU.
+ */
+ if (ifr->ifr_mtu > ISO88025_MAX_MTU) {
+ error = EINVAL;
+ } else {
+ ifp->if_mtu = ifr->ifr_mtu;
+ }
+ break;
+ default:
+ error = EINVAL; /* XXX netbsd has ENOTTY??? */
+ break;
+ }
+
+ return (error);
+}
+
+/*
+ * ISO88025 encapsulation
+ */
+int
+iso88025_output(ifp, m, dst, ro)
+ struct ifnet *ifp;
+ struct mbuf *m;
+ struct sockaddr *dst;
+ struct route *ro;
+{
+ u_int16_t snap_type = 0;
+ int loop_copy = 0, error = 0, rif_len = 0;
+ u_char edst[ISO88025_ADDR_LEN];
+ struct iso88025_header *th;
+ struct iso88025_header gen_th;
+ struct sockaddr_dl *sdl = NULL;
+ struct rtentry *rt0 = NULL;
+#if defined(INET) || defined(INET6)
+ struct llentry *lle;
+#endif
+
+ if (ro != NULL)
+ rt0 = ro->ro_rt;
+
+#ifdef MAC
+ error = mac_ifnet_check_transmit(ifp, m);
+ if (error)
+ senderr(error);
+#endif
+
+ if (ifp->if_flags & IFF_MONITOR)
+ senderr(ENETDOWN);
+ if (!((ifp->if_flags & IFF_UP) &&
+ (ifp->if_drv_flags & IFF_DRV_RUNNING)))
+ senderr(ENETDOWN);
+ getmicrotime(&ifp->if_lastchange);
+
+ /* Calculate routing info length based on arp table entry */
+ /* XXX any better way to do this ? */
+
+ if (rt0 && (sdl = (struct sockaddr_dl *)rt0->rt_gateway))
+ if (SDL_ISO88025(sdl)->trld_rcf != 0)
+ rif_len = TR_RCF_RIFLEN(SDL_ISO88025(sdl)->trld_rcf);
+
+ /* Generate a generic 802.5 header for the packet */
+ gen_th.ac = TR_AC;
+ gen_th.fc = TR_LLC_FRAME;
+ (void)memcpy((caddr_t)gen_th.iso88025_shost, IF_LLADDR(ifp),
+ ISO88025_ADDR_LEN);
+ if (rif_len) {
+ gen_th.iso88025_shost[0] |= TR_RII;
+ if (rif_len > 2) {
+ gen_th.rcf = SDL_ISO88025(sdl)->trld_rcf;
+ (void)memcpy((caddr_t)gen_th.rd,
+ (caddr_t)SDL_ISO88025(sdl)->trld_route,
+ rif_len - 2);
+ }
+ }
+
+ switch (dst->sa_family) {
+#ifdef INET
+ case AF_INET:
+ error = arpresolve(ifp, rt0, m, dst, edst, &lle);
+ if (error)
+ return (error == EWOULDBLOCK ? 0 : error);
+ snap_type = ETHERTYPE_IP;
+ break;
+ case AF_ARP:
+ {
+ struct arphdr *ah;
+ ah = mtod(m, struct arphdr *);
+ ah->ar_hrd = htons(ARPHRD_IEEE802);
+
+ loop_copy = -1; /* if this is for us, don't do it */
+
+ switch(ntohs(ah->ar_op)) {
+ case ARPOP_REVREQUEST:
+ case ARPOP_REVREPLY:
+ snap_type = ETHERTYPE_REVARP;
+ break;
+ case ARPOP_REQUEST:
+ case ARPOP_REPLY:
+ default:
+ snap_type = ETHERTYPE_ARP;
+ break;
+ }
+
+ if (m->m_flags & M_BCAST)
+ bcopy(ifp->if_broadcastaddr, edst, ISO88025_ADDR_LEN);
+ else
+ bcopy(ar_tha(ah), edst, ISO88025_ADDR_LEN);
+
+ }
+ break;
+#endif /* INET */
+#ifdef INET6
+ case AF_INET6:
+ error = nd6_storelladdr(ifp, m, dst, (u_char *)edst, &lle);
+ if (error)
+ return (error);
+ snap_type = ETHERTYPE_IPV6;
+ break;
+#endif /* INET6 */
+#ifdef IPX
+ case AF_IPX:
+ {
+ u_int8_t *cp;
+
+ bcopy((caddr_t)&(satoipx_addr(dst).x_host), (caddr_t)edst,
+ ISO88025_ADDR_LEN);
+
+ M_PREPEND(m, 3, M_WAIT);
+ m = m_pullup(m, 3);
+ if (m == 0)
+ senderr(ENOBUFS);
+ cp = mtod(m, u_int8_t *);
+ *cp++ = ETHERTYPE_IPX_8022;
+ *cp++ = ETHERTYPE_IPX_8022;
+ *cp++ = LLC_UI;
+ }
+ break;
+#endif /* IPX */
+ case AF_UNSPEC:
+ {
+ struct iso88025_sockaddr_data *sd;
+ /*
+ * For AF_UNSPEC sockaddr.sa_data must contain all of the
+ * mac information needed to send the packet. This allows
+ * full mac, llc, and source routing function to be controlled.
+ * llc and source routing information must already be in the
+ * mbuf provided, ac/fc are set in sa_data. sockaddr.sa_data
+ * should be an iso88025_sockaddr_data structure see iso88025.h
+ */
+ loop_copy = -1;
+ sd = (struct iso88025_sockaddr_data *)dst->sa_data;
+ gen_th.ac = sd->ac;
+ gen_th.fc = sd->fc;
+ (void)memcpy((caddr_t)edst, (caddr_t)sd->ether_dhost,
+ ISO88025_ADDR_LEN);
+ (void)memcpy((caddr_t)gen_th.iso88025_shost,
+ (caddr_t)sd->ether_shost, ISO88025_ADDR_LEN);
+ rif_len = 0;
+ break;
+ }
+ default:
+ if_printf(ifp, "can't handle af%d\n", dst->sa_family);
+ senderr(EAFNOSUPPORT);
+ break;
+ }
+
+ /*
+ * Add LLC header.
+ */
+ if (snap_type != 0) {
+ struct llc *l;
+ M_PREPEND(m, LLC_SNAPFRAMELEN, M_DONTWAIT);
+ if (m == 0)
+ senderr(ENOBUFS);
+ l = mtod(m, struct llc *);
+ l->llc_control = LLC_UI;
+ l->llc_dsap = l->llc_ssap = LLC_SNAP_LSAP;
+ l->llc_snap.org_code[0] =
+ l->llc_snap.org_code[1] =
+ l->llc_snap.org_code[2] = 0;
+ l->llc_snap.ether_type = htons(snap_type);
+ }
+
+ /*
+ * Add local net header. If no space in first mbuf,
+ * allocate another.
+ */
+ M_PREPEND(m, ISO88025_HDR_LEN + rif_len, M_DONTWAIT);
+ if (m == 0)
+ senderr(ENOBUFS);
+ th = mtod(m, struct iso88025_header *);
+ bcopy((caddr_t)edst, (caddr_t)&gen_th.iso88025_dhost, ISO88025_ADDR_LEN);
+
+ /* Copy as much of the generic header as is needed into the mbuf */
+ memcpy(th, &gen_th, ISO88025_HDR_LEN + rif_len);
+
+ /*
+ * If a simplex interface, and the packet is being sent to our
+ * Ethernet address or a broadcast address, loopback a copy.
+ * XXX To make a simplex device behave exactly like a duplex
+ * device, we should copy in the case of sending to our own
+ * ethernet address (thus letting the original actually appear
+ * on the wire). However, we don't do that here for security
+ * reasons and compatibility with the original behavior.
+ */
+ if ((ifp->if_flags & IFF_SIMPLEX) && (loop_copy != -1)) {
+ if ((m->m_flags & M_BCAST) || (loop_copy > 0)) {
+ struct mbuf *n;
+ n = m_copy(m, 0, (int)M_COPYALL);
+ (void) if_simloop(ifp, n, dst->sa_family,
+ ISO88025_HDR_LEN);
+ } else if (bcmp(th->iso88025_dhost, th->iso88025_shost,
+ ETHER_ADDR_LEN) == 0) {
+ (void) if_simloop(ifp, m, dst->sa_family,
+ ISO88025_HDR_LEN);
+ return(0); /* XXX */
+ }
+ }
+
+ IFQ_HANDOFF_ADJ(ifp, m, ISO88025_HDR_LEN + LLC_SNAPFRAMELEN, error);
+ if (error) {
+ printf("iso88025_output: packet dropped QFULL.\n");
+ ifp->if_oerrors++;
+ }
+ return (error);
+
+bad:
+ ifp->if_oerrors++;
+ if (m)
+ m_freem(m);
+ return (error);
+}
+
+/*
+ * ISO 88025 de-encapsulation
+ */
+void
+iso88025_input(ifp, m)
+ struct ifnet *ifp;
+ struct mbuf *m;
+{
+ struct iso88025_header *th;
+ struct llc *l;
+ int isr;
+ int mac_hdr_len;
+
+ /*
+ * Do consistency checks to verify assumptions
+ * made by code past this point.
+ */
+ if ((m->m_flags & M_PKTHDR) == 0) {
+ if_printf(ifp, "discard frame w/o packet header\n");
+ ifp->if_ierrors++;
+ m_freem(m);
+ return;
+ }
+ if (m->m_pkthdr.rcvif == NULL) {
+ if_printf(ifp, "discard frame w/o interface pointer\n");
+ ifp->if_ierrors++;
+ m_freem(m);
+ return;
+ }
+
+ m = m_pullup(m, ISO88025_HDR_LEN);
+ if (m == NULL) {
+ ifp->if_ierrors++;
+ goto dropanyway;
+ }
+ th = mtod(m, struct iso88025_header *);
+ m->m_pkthdr.header = (void *)th;
+
+ /*
+ * Discard packet if interface is not up.
+ */
+ if (!((ifp->if_flags & IFF_UP) &&
+ (ifp->if_drv_flags & IFF_DRV_RUNNING)))
+ goto dropanyway;
+
+ /*
+ * Give bpf a chance at the packet.
+ */
+ BPF_MTAP(ifp, m);
+
+ /*
+ * Interface marked for monitoring; discard packet.
+ */
+ if (ifp->if_flags & IFF_MONITOR) {
+ m_freem(m);
+ return;
+ }
+
+#ifdef MAC
+ mac_ifnet_create_mbuf(ifp, m);
+#endif
+
+ /*
+ * Update interface statistics.
+ */
+ ifp->if_ibytes += m->m_pkthdr.len;
+ getmicrotime(&ifp->if_lastchange);
+
+ /*
+ * Discard non local unicast packets when interface
+ * is in promiscuous mode.
+ */
+ if ((ifp->if_flags & IFF_PROMISC) &&
+ ((th->iso88025_dhost[0] & 1) == 0) &&
+ (bcmp(IF_LLADDR(ifp), (caddr_t) th->iso88025_dhost,
+ ISO88025_ADDR_LEN) != 0))
+ goto dropanyway;
+
+ /*
+ * Set mbuf flags for bcast/mcast.
+ */
+ if (th->iso88025_dhost[0] & 1) {
+ if (bcmp(iso88025_broadcastaddr, th->iso88025_dhost,
+ ISO88025_ADDR_LEN) == 0)
+ m->m_flags |= M_BCAST;
+ else
+ m->m_flags |= M_MCAST;
+ ifp->if_imcasts++;
+ }
+
+ mac_hdr_len = ISO88025_HDR_LEN;
+ /* Check for source routing info */
+ if (th->iso88025_shost[0] & TR_RII)
+ mac_hdr_len += TR_RCF_RIFLEN(th->rcf);
+
+ /* Strip off ISO88025 header. */
+ m_adj(m, mac_hdr_len);
+
+ m = m_pullup(m, LLC_SNAPFRAMELEN);
+ if (m == 0) {
+ ifp->if_ierrors++;
+ goto dropanyway;
+ }
+ l = mtod(m, struct llc *);
+
+ switch (l->llc_dsap) {
+#ifdef IPX
+ case ETHERTYPE_IPX_8022: /* Thanks a bunch Novell */
+ if ((l->llc_control != LLC_UI) ||
+ (l->llc_ssap != ETHERTYPE_IPX_8022)) {
+ ifp->if_noproto++;
+ goto dropanyway;
+ }
+
+ th->iso88025_shost[0] &= ~(TR_RII);
+ m_adj(m, 3);
+ isr = NETISR_IPX;
+ break;
+#endif /* IPX */
+ case LLC_SNAP_LSAP: {
+ u_int16_t type;
+ if ((l->llc_control != LLC_UI) ||
+ (l->llc_ssap != LLC_SNAP_LSAP)) {
+ ifp->if_noproto++;
+ goto dropanyway;
+ }
+
+ if (l->llc_snap.org_code[0] != 0 ||
+ l->llc_snap.org_code[1] != 0 ||
+ l->llc_snap.org_code[2] != 0) {
+ ifp->if_noproto++;
+ goto dropanyway;
+ }
+
+ type = ntohs(l->llc_snap.ether_type);
+ m_adj(m, LLC_SNAPFRAMELEN);
+ switch (type) {
+#ifdef INET
+ case ETHERTYPE_IP:
+ th->iso88025_shost[0] &= ~(TR_RII);
+ if ((m = ip_fastforward(m)) == NULL)
+ return;
+ isr = NETISR_IP;
+ break;
+
+ case ETHERTYPE_ARP:
+ if (ifp->if_flags & IFF_NOARP)
+ goto dropanyway;
+ isr = NETISR_ARP;
+ break;
+#endif /* INET */
+#ifdef IPX_SNAP /* XXX: Not supported! */
+ case ETHERTYPE_IPX:
+ th->iso88025_shost[0] &= ~(TR_RII);
+ isr = NETISR_IPX;
+ break;
+#endif /* IPX_SNAP */
+#ifdef INET6
+ case ETHERTYPE_IPV6:
+ th->iso88025_shost[0] &= ~(TR_RII);
+ isr = NETISR_IPV6;
+ break;
+#endif /* INET6 */
+ default:
+ printf("iso88025_input: unexpected llc_snap ether_type 0x%02x\n", type);
+ ifp->if_noproto++;
+ goto dropanyway;
+ }
+ break;
+ }
+#ifdef ISO
+ case LLC_ISO_LSAP:
+ switch (l->llc_control) {
+ case LLC_UI:
+ ifp->if_noproto++;
+ goto dropanyway;
+ break;
+ case LLC_XID:
+ case LLC_XID_P:
+ if(m->m_len < ISO88025_ADDR_LEN)
+ goto dropanyway;
+ l->llc_window = 0;
+ l->llc_fid = 9;
+ l->llc_class = 1;
+ l->llc_dsap = l->llc_ssap = 0;
+ /* Fall through to */
+ case LLC_TEST:
+ case LLC_TEST_P:
+ {
+ struct sockaddr sa;
+ struct arpcom *ac;
+ struct iso88025_sockaddr_data *th2;
+ int i;
+ u_char c;
+
+ c = l->llc_dsap;
+
+ if (th->iso88025_shost[0] & TR_RII) { /* XXX */
+ printf("iso88025_input: dropping source routed LLC_TEST\n");
+ goto dropanyway;
+ }
+ l->llc_dsap = l->llc_ssap;
+ l->llc_ssap = c;
+ if (m->m_flags & (M_BCAST | M_MCAST))
+ bcopy((caddr_t)IF_LLADDR(ifp),
+ (caddr_t)th->iso88025_dhost,
+ ISO88025_ADDR_LEN);
+ sa.sa_family = AF_UNSPEC;
+ sa.sa_len = sizeof(sa);
+ th2 = (struct iso88025_sockaddr_data *)sa.sa_data;
+ for (i = 0; i < ISO88025_ADDR_LEN; i++) {
+ th2->ether_shost[i] = c = th->iso88025_dhost[i];
+ th2->ether_dhost[i] = th->iso88025_dhost[i] =
+ th->iso88025_shost[i];
+ th->iso88025_shost[i] = c;
+ }
+ th2->ac = TR_AC;
+ th2->fc = TR_LLC_FRAME;
+ ifp->if_output(ifp, m, &sa, NULL);
+ return;
+ }
+ default:
+ printf("iso88025_input: unexpected llc control 0x%02x\n", l->llc_control);
+ ifp->if_noproto++;
+ goto dropanyway;
+ break;
+ }
+ break;
+#endif /* ISO */
+ default:
+ printf("iso88025_input: unknown dsap 0x%x\n", l->llc_dsap);
+ ifp->if_noproto++;
+ goto dropanyway;
+ break;
+ }
+
+ netisr_dispatch(isr, m);
+ return;
+
+dropanyway:
+ ifp->if_iqdrops++;
+ if (m)
+ m_freem(m);
+ return;
+}
+
+static int
+iso88025_resolvemulti (ifp, llsa, sa)
+ struct ifnet *ifp;
+ struct sockaddr **llsa;
+ struct sockaddr *sa;
+{
+ struct sockaddr_dl *sdl;
+#ifdef INET
+ struct sockaddr_in *sin;
+#endif
+#ifdef INET6
+ struct sockaddr_in6 *sin6;
+#endif
+ u_char *e_addr;
+
+ switch(sa->sa_family) {
+ case AF_LINK:
+ /*
+ * No mapping needed. Just check that it's a valid MC address.
+ */
+ sdl = (struct sockaddr_dl *)sa;
+ e_addr = LLADDR(sdl);
+ if ((e_addr[0] & 1) != 1) {
+ return (EADDRNOTAVAIL);
+ }
+ *llsa = 0;
+ return (0);
+
+#ifdef INET
+ case AF_INET:
+ sin = (struct sockaddr_in *)sa;
+ if (!IN_MULTICAST(ntohl(sin->sin_addr.s_addr))) {
+ return (EADDRNOTAVAIL);
+ }
+ sdl = malloc(sizeof *sdl, M_IFMADDR,
+ M_NOWAIT|M_ZERO);
+ if (sdl == NULL)
+ return (ENOMEM);
+ sdl->sdl_len = sizeof *sdl;
+ sdl->sdl_family = AF_LINK;
+ sdl->sdl_index = ifp->if_index;
+ sdl->sdl_type = IFT_ISO88025;
+ sdl->sdl_alen = ISO88025_ADDR_LEN;
+ e_addr = LLADDR(sdl);
+ ETHER_MAP_IP_MULTICAST(&sin->sin_addr, e_addr);
+ *llsa = (struct sockaddr *)sdl;
+ return (0);
+#endif
+#ifdef INET6
+ case AF_INET6:
+ sin6 = (struct sockaddr_in6 *)sa;
+ if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) {
+ /*
+ * An IP6 address of 0 means listen to all
+ * of the Ethernet multicast address used for IP6.
+ * (This is used for multicast routers.)
+ */
+ ifp->if_flags |= IFF_ALLMULTI;
+ *llsa = 0;
+ return (0);
+ }
+ if (!IN6_IS_ADDR_MULTICAST(&sin6->sin6_addr)) {
+ return (EADDRNOTAVAIL);
+ }
+ sdl = malloc(sizeof *sdl, M_IFMADDR,
+ M_NOWAIT|M_ZERO);
+ if (sdl == NULL)
+ return (ENOMEM);
+ sdl->sdl_len = sizeof *sdl;
+ sdl->sdl_family = AF_LINK;
+ sdl->sdl_index = ifp->if_index;
+ sdl->sdl_type = IFT_ISO88025;
+ sdl->sdl_alen = ISO88025_ADDR_LEN;
+ e_addr = LLADDR(sdl);
+ ETHER_MAP_IPV6_MULTICAST(&sin6->sin6_addr, e_addr);
+ *llsa = (struct sockaddr *)sdl;
+ return (0);
+#endif
+
+ default:
+ /*
+ * Well, the text isn't quite right, but it's the name
+ * that counts...
+ */
+ return (EAFNOSUPPORT);
+ }
+
+ return (0);
+}
+
+MALLOC_DEFINE(M_ISO88025, "arpcom", "802.5 interface internals");
+
+static void*
+iso88025_alloc(u_char type, struct ifnet *ifp)
+{
+ struct arpcom *ac;
+
+ ac = malloc(sizeof(struct arpcom), M_ISO88025, M_WAITOK | M_ZERO);
+ ac->ac_ifp = ifp;
+
+ return (ac);
+}
+
+static void
+iso88025_free(void *com, u_char type)
+{
+
+ free(com, M_ISO88025);
+}
+
+static int
+iso88025_modevent(module_t mod, int type, void *data)
+{
+
+ switch (type) {
+ case MOD_LOAD:
+ if_register_com_alloc(IFT_ISO88025, iso88025_alloc,
+ iso88025_free);
+ break;
+ case MOD_UNLOAD:
+ if_deregister_com_alloc(IFT_ISO88025);
+ break;
+ default:
+ return EOPNOTSUPP;
+ }
+
+ return (0);
+}
+
+static moduledata_t iso88025_mod = {
+ "iso88025",
+ iso88025_modevent,
+ 0
+};
+
+DECLARE_MODULE(iso88025, iso88025_mod, SI_SUB_PSEUDO, SI_ORDER_ANY);
+MODULE_VERSION(iso88025, 1);
diff --git a/rtems/freebsd/net/if_lagg.c b/rtems/freebsd/net/if_lagg.c
new file mode 100644
index 00000000..1f04ef73
--- /dev/null
+++ b/rtems/freebsd/net/if_lagg.c
@@ -0,0 +1,1808 @@
+#include <rtems/freebsd/machine/rtems-bsd-config.h>
+
+/* $OpenBSD: if_trunk.c,v 1.30 2007/01/31 06:20:19 reyk Exp $ */
+
+/*
+ * Copyright (c) 2005, 2006 Reyk Floeter <reyk@openbsd.org>
+ * Copyright (c) 2007 Andrew Thompson <thompsa@FreeBSD.org>
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <rtems/freebsd/sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <rtems/freebsd/local/opt_inet.h>
+#include <rtems/freebsd/local/opt_inet6.h>
+
+#include <rtems/freebsd/sys/param.h>
+#include <rtems/freebsd/sys/kernel.h>
+#include <rtems/freebsd/sys/malloc.h>
+#include <rtems/freebsd/sys/mbuf.h>
+#include <rtems/freebsd/sys/queue.h>
+#include <rtems/freebsd/sys/socket.h>
+#include <rtems/freebsd/sys/sockio.h>
+#include <rtems/freebsd/sys/sysctl.h>
+#include <rtems/freebsd/sys/module.h>
+#include <rtems/freebsd/sys/priv.h>
+#include <rtems/freebsd/sys/systm.h>
+#include <rtems/freebsd/sys/proc.h>
+#include <rtems/freebsd/sys/hash.h>
+#include <rtems/freebsd/sys/lock.h>
+#include <rtems/freebsd/sys/rwlock.h>
+#include <rtems/freebsd/sys/taskqueue.h>
+#include <rtems/freebsd/sys/eventhandler.h>
+
+#include <rtems/freebsd/net/ethernet.h>
+#include <rtems/freebsd/net/if.h>
+#include <rtems/freebsd/net/if_clone.h>
+#include <rtems/freebsd/net/if_arp.h>
+#include <rtems/freebsd/net/if_dl.h>
+#include <rtems/freebsd/net/if_llc.h>
+#include <rtems/freebsd/net/if_media.h>
+#include <rtems/freebsd/net/if_types.h>
+#include <rtems/freebsd/net/if_var.h>
+#include <rtems/freebsd/net/bpf.h>
+
+#ifdef INET
+#include <rtems/freebsd/netinet/in.h>
+#include <rtems/freebsd/netinet/in_systm.h>
+#include <rtems/freebsd/netinet/if_ether.h>
+#include <rtems/freebsd/netinet/ip.h>
+#endif
+
+#ifdef INET6
+#include <rtems/freebsd/netinet/ip6.h>
+#endif
+
+#include <rtems/freebsd/net/if_vlan_var.h>
+#include <rtems/freebsd/net/if_lagg.h>
+#include <rtems/freebsd/net/ieee8023ad_lacp.h>
+
+/* Special flags we should propagate to the lagg ports. */
+static struct {
+ int flag;
+ int (*func)(struct ifnet *, int);
+} lagg_pflags[] = {
+ {IFF_PROMISC, ifpromisc},
+ {IFF_ALLMULTI, if_allmulti},
+ {0, NULL}
+};
+
+SLIST_HEAD(__trhead, lagg_softc) lagg_list; /* list of laggs */
+static struct mtx lagg_list_mtx;
+eventhandler_tag lagg_detach_cookie = NULL;
+
+static int lagg_clone_create(struct if_clone *, int, caddr_t);
+static void lagg_clone_destroy(struct ifnet *);
+static void lagg_lladdr(struct lagg_softc *, uint8_t *);
+static void lagg_capabilities(struct lagg_softc *);
+static void lagg_port_lladdr(struct lagg_port *, uint8_t *);
+static void lagg_port_setlladdr(void *, int);
+static int lagg_port_create(struct lagg_softc *, struct ifnet *);
+static int lagg_port_destroy(struct lagg_port *, int);
+static struct mbuf *lagg_input(struct ifnet *, struct mbuf *);
+static void lagg_linkstate(struct lagg_softc *);
+static void lagg_port_state(struct ifnet *, int);
+static int lagg_port_ioctl(struct ifnet *, u_long, caddr_t);
+static int lagg_port_output(struct ifnet *, struct mbuf *,
+ struct sockaddr *, struct route *);
+static void lagg_port_ifdetach(void *arg __unused, struct ifnet *);
+static int lagg_port_checkstacking(struct lagg_softc *);
+static void lagg_port2req(struct lagg_port *, struct lagg_reqport *);
+static void lagg_init(void *);
+static void lagg_stop(struct lagg_softc *);
+static int lagg_ioctl(struct ifnet *, u_long, caddr_t);
+static int lagg_ether_setmulti(struct lagg_softc *);
+static int lagg_ether_cmdmulti(struct lagg_port *, int);
+static int lagg_setflag(struct lagg_port *, int, int,
+ int (*func)(struct ifnet *, int));
+static int lagg_setflags(struct lagg_port *, int status);
+static void lagg_start(struct ifnet *);
+static int lagg_media_change(struct ifnet *);
+static void lagg_media_status(struct ifnet *, struct ifmediareq *);
+static struct lagg_port *lagg_link_active(struct lagg_softc *,
+ struct lagg_port *);
+static const void *lagg_gethdr(struct mbuf *, u_int, u_int, void *);
+
+IFC_SIMPLE_DECLARE(lagg, 0);
+
+/* Simple round robin */
+static int lagg_rr_attach(struct lagg_softc *);
+static int lagg_rr_detach(struct lagg_softc *);
+static int lagg_rr_start(struct lagg_softc *, struct mbuf *);
+static struct mbuf *lagg_rr_input(struct lagg_softc *, struct lagg_port *,
+ struct mbuf *);
+
+/* Active failover */
+static int lagg_fail_attach(struct lagg_softc *);
+static int lagg_fail_detach(struct lagg_softc *);
+static int lagg_fail_start(struct lagg_softc *, struct mbuf *);
+static struct mbuf *lagg_fail_input(struct lagg_softc *, struct lagg_port *,
+ struct mbuf *);
+
+/* Loadbalancing */
+static int lagg_lb_attach(struct lagg_softc *);
+static int lagg_lb_detach(struct lagg_softc *);
+static int lagg_lb_port_create(struct lagg_port *);
+static void lagg_lb_port_destroy(struct lagg_port *);
+static int lagg_lb_start(struct lagg_softc *, struct mbuf *);
+static struct mbuf *lagg_lb_input(struct lagg_softc *, struct lagg_port *,
+ struct mbuf *);
+static int lagg_lb_porttable(struct lagg_softc *, struct lagg_port *);
+
+/* 802.3ad LACP */
+static int lagg_lacp_attach(struct lagg_softc *);
+static int lagg_lacp_detach(struct lagg_softc *);
+static int lagg_lacp_start(struct lagg_softc *, struct mbuf *);
+static struct mbuf *lagg_lacp_input(struct lagg_softc *, struct lagg_port *,
+ struct mbuf *);
+static void lagg_lacp_lladdr(struct lagg_softc *);
+
+/* lagg protocol table */
+static const struct {
+ int ti_proto;
+ int (*ti_attach)(struct lagg_softc *);
+} lagg_protos[] = {
+ { LAGG_PROTO_ROUNDROBIN, lagg_rr_attach },
+ { LAGG_PROTO_FAILOVER, lagg_fail_attach },
+ { LAGG_PROTO_LOADBALANCE, lagg_lb_attach },
+ { LAGG_PROTO_ETHERCHANNEL, lagg_lb_attach },
+ { LAGG_PROTO_LACP, lagg_lacp_attach },
+ { LAGG_PROTO_NONE, NULL }
+};
+
+SYSCTL_DECL(_net_link);
+SYSCTL_NODE(_net_link, OID_AUTO, lagg, CTLFLAG_RW, 0, "Link Aggregation");
+
+static int lagg_failover_rx_all = 0; /* Allow input on any failover links */
+SYSCTL_INT(_net_link_lagg, OID_AUTO, failover_rx_all, CTLFLAG_RW,
+ &lagg_failover_rx_all, 0,
+ "Accept input from any interface in a failover lagg");
+
+static int
+lagg_modevent(module_t mod, int type, void *data)
+{
+
+ switch (type) {
+ case MOD_LOAD:
+ mtx_init(&lagg_list_mtx, "if_lagg list", NULL, MTX_DEF);
+ SLIST_INIT(&lagg_list);
+ if_clone_attach(&lagg_cloner);
+ lagg_input_p = lagg_input;
+ lagg_linkstate_p = lagg_port_state;
+ lagg_detach_cookie = EVENTHANDLER_REGISTER(
+ ifnet_departure_event, lagg_port_ifdetach, NULL,
+ EVENTHANDLER_PRI_ANY);
+ break;
+ case MOD_UNLOAD:
+ EVENTHANDLER_DEREGISTER(ifnet_departure_event,
+ lagg_detach_cookie);
+ if_clone_detach(&lagg_cloner);
+ lagg_input_p = NULL;
+ lagg_linkstate_p = NULL;
+ mtx_destroy(&lagg_list_mtx);
+ break;
+ default:
+ return (EOPNOTSUPP);
+ }
+ return (0);
+}
+
+static moduledata_t lagg_mod = {
+ "if_lagg",
+ lagg_modevent,
+ 0
+};
+
+DECLARE_MODULE(if_lagg, lagg_mod, SI_SUB_PSEUDO, SI_ORDER_ANY);
+
+#if __FreeBSD_version >= 800000
+/*
+ * This routine is run via an vlan
+ * config EVENT
+ */
+static void
+lagg_register_vlan(void *arg, struct ifnet *ifp, u_int16_t vtag)
+{
+ struct lagg_softc *sc = ifp->if_softc;
+ struct lagg_port *lp;
+
+ if (ifp->if_softc != arg) /* Not our event */
+ return;
+
+ LAGG_RLOCK(sc);
+ if (!SLIST_EMPTY(&sc->sc_ports)) {
+ SLIST_FOREACH(lp, &sc->sc_ports, lp_entries)
+ EVENTHANDLER_INVOKE(vlan_config, lp->lp_ifp, vtag);
+ }
+ LAGG_RUNLOCK(sc);
+}
+
+/*
+ * This routine is run via an vlan
+ * unconfig EVENT
+ */
+static void
+lagg_unregister_vlan(void *arg, struct ifnet *ifp, u_int16_t vtag)
+{
+ struct lagg_softc *sc = ifp->if_softc;
+ struct lagg_port *lp;
+
+ if (ifp->if_softc != arg) /* Not our event */
+ return;
+
+ LAGG_RLOCK(sc);
+ if (!SLIST_EMPTY(&sc->sc_ports)) {
+ SLIST_FOREACH(lp, &sc->sc_ports, lp_entries)
+ EVENTHANDLER_INVOKE(vlan_unconfig, lp->lp_ifp, vtag);
+ }
+ LAGG_RUNLOCK(sc);
+}
+#endif
+
+static int
+lagg_clone_create(struct if_clone *ifc, int unit, caddr_t params)
+{
+ struct lagg_softc *sc;
+ struct ifnet *ifp;
+ int i, error = 0;
+ static const u_char eaddr[6]; /* 00:00:00:00:00:00 */
+
+ sc = malloc(sizeof(*sc), M_DEVBUF, M_WAITOK|M_ZERO);
+ ifp = sc->sc_ifp = if_alloc(IFT_ETHER);
+ if (ifp == NULL) {
+ free(sc, M_DEVBUF);
+ return (ENOSPC);
+ }
+
+ sc->sc_proto = LAGG_PROTO_NONE;
+ for (i = 0; lagg_protos[i].ti_proto != LAGG_PROTO_NONE; i++) {
+ if (lagg_protos[i].ti_proto == LAGG_PROTO_DEFAULT) {
+ sc->sc_proto = lagg_protos[i].ti_proto;
+ if ((error = lagg_protos[i].ti_attach(sc)) != 0) {
+ if_free_type(ifp, IFT_ETHER);
+ free(sc, M_DEVBUF);
+ return (error);
+ }
+ break;
+ }
+ }
+ LAGG_LOCK_INIT(sc);
+ SLIST_INIT(&sc->sc_ports);
+ TASK_INIT(&sc->sc_lladdr_task, 0, lagg_port_setlladdr, sc);
+
+ /* Initialise pseudo media types */
+ ifmedia_init(&sc->sc_media, 0, lagg_media_change,
+ lagg_media_status);
+ ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_AUTO, 0, NULL);
+ ifmedia_set(&sc->sc_media, IFM_ETHER | IFM_AUTO);
+
+ if_initname(ifp, ifc->ifc_name, unit);
+ ifp->if_type = IFT_ETHER;
+ ifp->if_softc = sc;
+ ifp->if_start = lagg_start;
+ ifp->if_init = lagg_init;
+ ifp->if_ioctl = lagg_ioctl;
+ ifp->if_flags = IFF_SIMPLEX | IFF_BROADCAST | IFF_MULTICAST;
+
+ IFQ_SET_MAXLEN(&ifp->if_snd, ifqmaxlen);
+ ifp->if_snd.ifq_drv_maxlen = ifqmaxlen;
+ IFQ_SET_READY(&ifp->if_snd);
+
+ /*
+ * Attach as an ordinary ethernet device, childs will be attached
+ * as special device IFT_IEEE8023ADLAG.
+ */
+ ether_ifattach(ifp, eaddr);
+
+#if __FreeBSD_version >= 800000
+ sc->vlan_attach = EVENTHANDLER_REGISTER(vlan_config,
+ lagg_register_vlan, sc, EVENTHANDLER_PRI_FIRST);
+ sc->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig,
+ lagg_unregister_vlan, sc, EVENTHANDLER_PRI_FIRST);
+#endif
+
+ /* Insert into the global list of laggs */
+ mtx_lock(&lagg_list_mtx);
+ SLIST_INSERT_HEAD(&lagg_list, sc, sc_entries);
+ mtx_unlock(&lagg_list_mtx);
+
+ return (0);
+}
+
+static void
+lagg_clone_destroy(struct ifnet *ifp)
+{
+ struct lagg_softc *sc = (struct lagg_softc *)ifp->if_softc;
+ struct lagg_port *lp;
+
+ LAGG_WLOCK(sc);
+
+ lagg_stop(sc);
+ ifp->if_flags &= ~IFF_UP;
+
+#if __FreeBSD_version >= 800000
+ EVENTHANDLER_DEREGISTER(vlan_config, sc->vlan_attach);
+ EVENTHANDLER_DEREGISTER(vlan_unconfig, sc->vlan_detach);
+#endif
+
+ /* Shutdown and remove lagg ports */
+ while ((lp = SLIST_FIRST(&sc->sc_ports)) != NULL)
+ lagg_port_destroy(lp, 1);
+ /* Unhook the aggregation protocol */
+ (*sc->sc_detach)(sc);
+
+ LAGG_WUNLOCK(sc);
+
+ ifmedia_removeall(&sc->sc_media);
+ ether_ifdetach(ifp);
+ if_free_type(ifp, IFT_ETHER);
+
+ mtx_lock(&lagg_list_mtx);
+ SLIST_REMOVE(&lagg_list, sc, lagg_softc, sc_entries);
+ mtx_unlock(&lagg_list_mtx);
+
+ taskqueue_drain(taskqueue_swi, &sc->sc_lladdr_task);
+ LAGG_LOCK_DESTROY(sc);
+ free(sc, M_DEVBUF);
+}
+
+static void
+lagg_lladdr(struct lagg_softc *sc, uint8_t *lladdr)
+{
+ struct ifnet *ifp = sc->sc_ifp;
+
+ if (memcmp(lladdr, IF_LLADDR(ifp), ETHER_ADDR_LEN) == 0)
+ return;
+
+ bcopy(lladdr, IF_LLADDR(ifp), ETHER_ADDR_LEN);
+ /* Let the protocol know the MAC has changed */
+ if (sc->sc_lladdr != NULL)
+ (*sc->sc_lladdr)(sc);
+ EVENTHANDLER_INVOKE(iflladdr_event, ifp);
+}
+
+static void
+lagg_capabilities(struct lagg_softc *sc)
+{
+ struct lagg_port *lp;
+ int cap = ~0, ena = ~0;
+ u_long hwa = ~0UL;
+
+ LAGG_WLOCK_ASSERT(sc);
+
+ /* Get capabilities from the lagg ports */
+ SLIST_FOREACH(lp, &sc->sc_ports, lp_entries) {
+ cap &= lp->lp_ifp->if_capabilities;
+ ena &= lp->lp_ifp->if_capenable;
+ hwa &= lp->lp_ifp->if_hwassist;
+ }
+ cap = (cap == ~0 ? 0 : cap);
+ ena = (ena == ~0 ? 0 : ena);
+ hwa = (hwa == ~0 ? 0 : hwa);
+
+ if (sc->sc_ifp->if_capabilities != cap ||
+ sc->sc_ifp->if_capenable != ena ||
+ sc->sc_ifp->if_hwassist != hwa) {
+ sc->sc_ifp->if_capabilities = cap;
+ sc->sc_ifp->if_capenable = ena;
+ sc->sc_ifp->if_hwassist = hwa;
+ getmicrotime(&sc->sc_ifp->if_lastchange);
+
+ if (sc->sc_ifflags & IFF_DEBUG)
+ if_printf(sc->sc_ifp,
+ "capabilities 0x%08x enabled 0x%08x\n", cap, ena);
+ }
+}
+
+static void
+lagg_port_lladdr(struct lagg_port *lp, uint8_t *lladdr)
+{
+ struct lagg_softc *sc = lp->lp_softc;
+ struct ifnet *ifp = lp->lp_ifp;
+ struct lagg_llq *llq;
+ int pending = 0;
+
+ LAGG_WLOCK_ASSERT(sc);
+
+ if (lp->lp_detaching ||
+ memcmp(lladdr, IF_LLADDR(ifp), ETHER_ADDR_LEN) == 0)
+ return;
+
+ /* Check to make sure its not already queued to be changed */
+ SLIST_FOREACH(llq, &sc->sc_llq_head, llq_entries) {
+ if (llq->llq_ifp == ifp) {
+ pending = 1;
+ break;
+ }
+ }
+
+ if (!pending) {
+ llq = malloc(sizeof(struct lagg_llq), M_DEVBUF, M_NOWAIT);
+ if (llq == NULL) /* XXX what to do */
+ return;
+ }
+
+ /* Update the lladdr even if pending, it may have changed */
+ llq->llq_ifp = ifp;
+ bcopy(lladdr, llq->llq_lladdr, ETHER_ADDR_LEN);
+
+ if (!pending)
+ SLIST_INSERT_HEAD(&sc->sc_llq_head, llq, llq_entries);
+
+ taskqueue_enqueue(taskqueue_swi, &sc->sc_lladdr_task);
+}
+
+/*
+ * Set the interface MAC address from a taskqueue to avoid a LOR.
+ */
+static void
+lagg_port_setlladdr(void *arg, int pending)
+{
+ struct lagg_softc *sc = (struct lagg_softc *)arg;
+ struct lagg_llq *llq, *head;
+ struct ifnet *ifp;
+ int error;
+
+ /* Grab a local reference of the queue and remove it from the softc */
+ LAGG_WLOCK(sc);
+ head = SLIST_FIRST(&sc->sc_llq_head);
+ SLIST_FIRST(&sc->sc_llq_head) = NULL;
+ LAGG_WUNLOCK(sc);
+
+ /*
+ * Traverse the queue and set the lladdr on each ifp. It is safe to do
+ * unlocked as we have the only reference to it.
+ */
+ for (llq = head; llq != NULL; llq = head) {
+ ifp = llq->llq_ifp;
+
+ /* Set the link layer address */
+ error = if_setlladdr(ifp, llq->llq_lladdr, ETHER_ADDR_LEN);
+ if (error)
+ printf("%s: setlladdr failed on %s\n", __func__,
+ ifp->if_xname);
+
+ head = SLIST_NEXT(llq, llq_entries);
+ free(llq, M_DEVBUF);
+ }
+}
+
+static int
+lagg_port_create(struct lagg_softc *sc, struct ifnet *ifp)
+{
+ struct lagg_softc *sc_ptr;
+ struct lagg_port *lp;
+ int error = 0;
+
+ LAGG_WLOCK_ASSERT(sc);
+
+ /* Limit the maximal number of lagg ports */
+ if (sc->sc_count >= LAGG_MAX_PORTS)
+ return (ENOSPC);
+
+ /* Check if port has already been associated to a lagg */
+ if (ifp->if_lagg != NULL)
+ return (EBUSY);
+
+ /* XXX Disallow non-ethernet interfaces (this should be any of 802) */
+ if (ifp->if_type != IFT_ETHER)
+ return (EPROTONOSUPPORT);
+
+ /* Allow the first Ethernet member to define the MTU */
+ if (SLIST_EMPTY(&sc->sc_ports))
+ sc->sc_ifp->if_mtu = ifp->if_mtu;
+ else if (sc->sc_ifp->if_mtu != ifp->if_mtu) {
+ if_printf(sc->sc_ifp, "invalid MTU for %s\n",
+ ifp->if_xname);
+ return (EINVAL);
+ }
+
+ if ((lp = malloc(sizeof(struct lagg_port),
+ M_DEVBUF, M_NOWAIT|M_ZERO)) == NULL)
+ return (ENOMEM);
+
+ /* Check if port is a stacked lagg */
+ mtx_lock(&lagg_list_mtx);
+ SLIST_FOREACH(sc_ptr, &lagg_list, sc_entries) {
+ if (ifp == sc_ptr->sc_ifp) {
+ mtx_unlock(&lagg_list_mtx);
+ free(lp, M_DEVBUF);
+ return (EINVAL);
+ /* XXX disable stacking for the moment, its untested
+ lp->lp_flags |= LAGG_PORT_STACK;
+ if (lagg_port_checkstacking(sc_ptr) >=
+ LAGG_MAX_STACKING) {
+ mtx_unlock(&lagg_list_mtx);
+ free(lp, M_DEVBUF);
+ return (E2BIG);
+ }
+ */
+ }
+ }
+ mtx_unlock(&lagg_list_mtx);
+
+ /* Change the interface type */
+ lp->lp_iftype = ifp->if_type;
+ ifp->if_type = IFT_IEEE8023ADLAG;
+ ifp->if_lagg = lp;
+ lp->lp_ioctl = ifp->if_ioctl;
+ ifp->if_ioctl = lagg_port_ioctl;
+ lp->lp_output = ifp->if_output;
+ ifp->if_output = lagg_port_output;
+
+ lp->lp_ifp = ifp;
+ lp->lp_softc = sc;
+
+ /* Save port link layer address */
+ bcopy(IF_LLADDR(ifp), lp->lp_lladdr, ETHER_ADDR_LEN);
+
+ if (SLIST_EMPTY(&sc->sc_ports)) {
+ sc->sc_primary = lp;
+ lagg_lladdr(sc, IF_LLADDR(ifp));
+ } else {
+ /* Update link layer address for this port */
+ lagg_port_lladdr(lp, IF_LLADDR(sc->sc_ifp));
+ }
+
+ /* Insert into the list of ports */
+ SLIST_INSERT_HEAD(&sc->sc_ports, lp, lp_entries);
+ sc->sc_count++;
+
+ /* Update lagg capabilities */
+ lagg_capabilities(sc);
+ lagg_linkstate(sc);
+
+ /* Add multicast addresses and interface flags to this port */
+ lagg_ether_cmdmulti(lp, 1);
+ lagg_setflags(lp, 1);
+
+ if (sc->sc_port_create != NULL)
+ error = (*sc->sc_port_create)(lp);
+ if (error) {
+ /* remove the port again, without calling sc_port_destroy */
+ lagg_port_destroy(lp, 0);
+ return (error);
+ }
+
+ return (error);
+}
+
+static int
+lagg_port_checkstacking(struct lagg_softc *sc)
+{
+ struct lagg_softc *sc_ptr;
+ struct lagg_port *lp;
+ int m = 0;
+
+ LAGG_WLOCK_ASSERT(sc);
+
+ SLIST_FOREACH(lp, &sc->sc_ports, lp_entries) {
+ if (lp->lp_flags & LAGG_PORT_STACK) {
+ sc_ptr = (struct lagg_softc *)lp->lp_ifp->if_softc;
+ m = MAX(m, lagg_port_checkstacking(sc_ptr));
+ }
+ }
+
+ return (m + 1);
+}
+
+static int
+lagg_port_destroy(struct lagg_port *lp, int runpd)
+{
+ struct lagg_softc *sc = lp->lp_softc;
+ struct lagg_port *lp_ptr;
+ struct lagg_llq *llq;
+ struct ifnet *ifp = lp->lp_ifp;
+
+ LAGG_WLOCK_ASSERT(sc);
+
+ if (runpd && sc->sc_port_destroy != NULL)
+ (*sc->sc_port_destroy)(lp);
+
+ /*
+ * Remove multicast addresses and interface flags from this port and
+ * reset the MAC address, skip if the interface is being detached.
+ */
+ if (!lp->lp_detaching) {
+ lagg_ether_cmdmulti(lp, 0);
+ lagg_setflags(lp, 0);
+ lagg_port_lladdr(lp, lp->lp_lladdr);
+ }
+
+ /* Restore interface */
+ ifp->if_type = lp->lp_iftype;
+ ifp->if_ioctl = lp->lp_ioctl;
+ ifp->if_output = lp->lp_output;
+ ifp->if_lagg = NULL;
+
+ /* Finally, remove the port from the lagg */
+ SLIST_REMOVE(&sc->sc_ports, lp, lagg_port, lp_entries);
+ sc->sc_count--;
+
+ /* Update the primary interface */
+ if (lp == sc->sc_primary) {
+ uint8_t lladdr[ETHER_ADDR_LEN];
+
+ if ((lp_ptr = SLIST_FIRST(&sc->sc_ports)) == NULL) {
+ bzero(&lladdr, ETHER_ADDR_LEN);
+ } else {
+ bcopy(lp_ptr->lp_lladdr,
+ lladdr, ETHER_ADDR_LEN);
+ }
+ lagg_lladdr(sc, lladdr);
+ sc->sc_primary = lp_ptr;
+
+ /* Update link layer address for each port */
+ SLIST_FOREACH(lp_ptr, &sc->sc_ports, lp_entries)
+ lagg_port_lladdr(lp_ptr, lladdr);
+ }
+
+ /* Remove any pending lladdr changes from the queue */
+ if (lp->lp_detaching) {
+ SLIST_FOREACH(llq, &sc->sc_llq_head, llq_entries) {
+ if (llq->llq_ifp == ifp) {
+ SLIST_REMOVE(&sc->sc_llq_head, llq, lagg_llq,
+ llq_entries);
+ free(llq, M_DEVBUF);
+ break; /* Only appears once */
+ }
+ }
+ }
+
+ if (lp->lp_ifflags)
+ if_printf(ifp, "%s: lp_ifflags unclean\n", __func__);
+
+ free(lp, M_DEVBUF);
+
+ /* Update lagg capabilities */
+ lagg_capabilities(sc);
+ lagg_linkstate(sc);
+
+ return (0);
+}
+
+static int
+lagg_port_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
+{
+ struct lagg_reqport *rp = (struct lagg_reqport *)data;
+ struct lagg_softc *sc;
+ struct lagg_port *lp = NULL;
+ int error = 0;
+
+ /* Should be checked by the caller */
+ if (ifp->if_type != IFT_IEEE8023ADLAG ||
+ (lp = ifp->if_lagg) == NULL || (sc = lp->lp_softc) == NULL)
+ goto fallback;
+
+ switch (cmd) {
+ case SIOCGLAGGPORT:
+ if (rp->rp_portname[0] == '\0' ||
+ ifunit(rp->rp_portname) != ifp) {
+ error = EINVAL;
+ break;
+ }
+
+ LAGG_RLOCK(sc);
+ if ((lp = ifp->if_lagg) == NULL || lp->lp_softc != sc) {
+ error = ENOENT;
+ LAGG_RUNLOCK(sc);
+ break;
+ }
+
+ lagg_port2req(lp, rp);
+ LAGG_RUNLOCK(sc);
+ break;
+
+ case SIOCSIFCAP:
+ if (lp->lp_ioctl == NULL) {
+ error = EINVAL;
+ break;
+ }
+ error = (*lp->lp_ioctl)(ifp, cmd, data);
+ if (error)
+ break;
+
+ /* Update lagg interface capabilities */
+ LAGG_WLOCK(sc);
+ lagg_capabilities(sc);
+ LAGG_WUNLOCK(sc);
+ break;
+
+ case SIOCSIFMTU:
+ /* Do not allow the MTU to be changed once joined */
+ error = EINVAL;
+ break;
+
+ default:
+ goto fallback;
+ }
+
+ return (error);
+
+fallback:
+ if (lp->lp_ioctl != NULL)
+ return ((*lp->lp_ioctl)(ifp, cmd, data));
+
+ return (EINVAL);
+}
+
+static int
+lagg_port_output(struct ifnet *ifp, struct mbuf *m,
+ struct sockaddr *dst, struct route *ro)
+{
+ struct lagg_port *lp = ifp->if_lagg;
+ struct ether_header *eh;
+ short type = 0;
+
+ switch (dst->sa_family) {
+ case pseudo_AF_HDRCMPLT:
+ case AF_UNSPEC:
+ eh = (struct ether_header *)dst->sa_data;
+ type = eh->ether_type;
+ break;
+ }
+
+ /*
+ * Only allow ethernet types required to initiate or maintain the link,
+ * aggregated frames take a different path.
+ */
+ switch (ntohs(type)) {
+ case ETHERTYPE_PAE: /* EAPOL PAE/802.1x */
+ return ((*lp->lp_output)(ifp, m, dst, ro));
+ }
+
+ /* drop any other frames */
+ m_freem(m);
+ return (EBUSY);
+}
+
+static void
+lagg_port_ifdetach(void *arg __unused, struct ifnet *ifp)
+{
+ struct lagg_port *lp;
+ struct lagg_softc *sc;
+
+ if ((lp = ifp->if_lagg) == NULL)
+ return;
+
+ sc = lp->lp_softc;
+
+ LAGG_WLOCK(sc);
+ lp->lp_detaching = 1;
+ lagg_port_destroy(lp, 1);
+ LAGG_WUNLOCK(sc);
+}
+
+static void
+lagg_port2req(struct lagg_port *lp, struct lagg_reqport *rp)
+{
+ struct lagg_softc *sc = lp->lp_softc;
+
+ strlcpy(rp->rp_ifname, sc->sc_ifname, sizeof(rp->rp_ifname));
+ strlcpy(rp->rp_portname, lp->lp_ifp->if_xname, sizeof(rp->rp_portname));
+ rp->rp_prio = lp->lp_prio;
+ rp->rp_flags = lp->lp_flags;
+ if (sc->sc_portreq != NULL)
+ (*sc->sc_portreq)(lp, (caddr_t)&rp->rp_psc);
+
+ /* Add protocol specific flags */
+ switch (sc->sc_proto) {
+ case LAGG_PROTO_FAILOVER:
+ if (lp == sc->sc_primary)
+ rp->rp_flags |= LAGG_PORT_MASTER;
+ if (lp == lagg_link_active(sc, sc->sc_primary))
+ rp->rp_flags |= LAGG_PORT_ACTIVE;
+ break;
+
+ case LAGG_PROTO_ROUNDROBIN:
+ case LAGG_PROTO_LOADBALANCE:
+ case LAGG_PROTO_ETHERCHANNEL:
+ if (LAGG_PORTACTIVE(lp))
+ rp->rp_flags |= LAGG_PORT_ACTIVE;
+ break;
+
+ case LAGG_PROTO_LACP:
+ /* LACP has a different definition of active */
+ if (lacp_isactive(lp))
+ rp->rp_flags |= LAGG_PORT_ACTIVE;
+ if (lacp_iscollecting(lp))
+ rp->rp_flags |= LAGG_PORT_COLLECTING;
+ if (lacp_isdistributing(lp))
+ rp->rp_flags |= LAGG_PORT_DISTRIBUTING;
+ break;
+ }
+
+}
+
+static void
+lagg_init(void *xsc)
+{
+ struct lagg_softc *sc = (struct lagg_softc *)xsc;
+ struct lagg_port *lp;
+ struct ifnet *ifp = sc->sc_ifp;
+
+ if (ifp->if_drv_flags & IFF_DRV_RUNNING)
+ return;
+
+ LAGG_WLOCK(sc);
+
+ ifp->if_drv_flags |= IFF_DRV_RUNNING;
+ /* Update the port lladdrs */
+ SLIST_FOREACH(lp, &sc->sc_ports, lp_entries)
+ lagg_port_lladdr(lp, IF_LLADDR(ifp));
+
+ if (sc->sc_init != NULL)
+ (*sc->sc_init)(sc);
+
+ LAGG_WUNLOCK(sc);
+}
+
+static void
+lagg_stop(struct lagg_softc *sc)
+{
+ struct ifnet *ifp = sc->sc_ifp;
+
+ LAGG_WLOCK_ASSERT(sc);
+
+ if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
+ return;
+
+ ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
+
+ if (sc->sc_stop != NULL)
+ (*sc->sc_stop)(sc);
+}
+
+static int
+lagg_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
+{
+ struct lagg_softc *sc = (struct lagg_softc *)ifp->if_softc;
+ struct lagg_reqall *ra = (struct lagg_reqall *)data;
+ struct lagg_reqport *rp = (struct lagg_reqport *)data, rpbuf;
+ struct ifreq *ifr = (struct ifreq *)data;
+ struct lagg_port *lp;
+ struct ifnet *tpif;
+ struct thread *td = curthread;
+ char *buf, *outbuf;
+ int count, buflen, len, error = 0;
+
+ bzero(&rpbuf, sizeof(rpbuf));
+
+ switch (cmd) {
+ case SIOCGLAGG:
+ LAGG_RLOCK(sc);
+ count = 0;
+ SLIST_FOREACH(lp, &sc->sc_ports, lp_entries)
+ count++;
+ buflen = count * sizeof(struct lagg_reqport);
+ LAGG_RUNLOCK(sc);
+
+ outbuf = malloc(buflen, M_TEMP, M_WAITOK | M_ZERO);
+
+ LAGG_RLOCK(sc);
+ ra->ra_proto = sc->sc_proto;
+ if (sc->sc_req != NULL)
+ (*sc->sc_req)(sc, (caddr_t)&ra->ra_psc);
+
+ count = 0;
+ buf = outbuf;
+ len = min(ra->ra_size, buflen);
+ SLIST_FOREACH(lp, &sc->sc_ports, lp_entries) {
+ if (len < sizeof(rpbuf))
+ break;
+
+ lagg_port2req(lp, &rpbuf);
+ memcpy(buf, &rpbuf, sizeof(rpbuf));
+ count++;
+ buf += sizeof(rpbuf);
+ len -= sizeof(rpbuf);
+ }
+ LAGG_RUNLOCK(sc);
+ ra->ra_ports = count;
+ ra->ra_size = count * sizeof(rpbuf);
+ error = copyout(outbuf, ra->ra_port, ra->ra_size);
+ free(outbuf, M_TEMP);
+ break;
+ case SIOCSLAGG:
+ error = priv_check(td, PRIV_NET_LAGG);
+ if (error)
+ break;
+ if (ra->ra_proto >= LAGG_PROTO_MAX) {
+ error = EPROTONOSUPPORT;
+ break;
+ }
+ if (sc->sc_proto != LAGG_PROTO_NONE) {
+ LAGG_WLOCK(sc);
+ error = sc->sc_detach(sc);
+ /* Reset protocol and pointers */
+ sc->sc_proto = LAGG_PROTO_NONE;
+ sc->sc_detach = NULL;
+ sc->sc_start = NULL;
+ sc->sc_input = NULL;
+ sc->sc_port_create = NULL;
+ sc->sc_port_destroy = NULL;
+ sc->sc_linkstate = NULL;
+ sc->sc_init = NULL;
+ sc->sc_stop = NULL;
+ sc->sc_lladdr = NULL;
+ sc->sc_req = NULL;
+ sc->sc_portreq = NULL;
+ LAGG_WUNLOCK(sc);
+ }
+ if (error != 0)
+ break;
+ for (int i = 0; i < (sizeof(lagg_protos) /
+ sizeof(lagg_protos[0])); i++) {
+ if (lagg_protos[i].ti_proto == ra->ra_proto) {
+ if (sc->sc_ifflags & IFF_DEBUG)
+ printf("%s: using proto %u\n",
+ sc->sc_ifname,
+ lagg_protos[i].ti_proto);
+ LAGG_WLOCK(sc);
+ sc->sc_proto = lagg_protos[i].ti_proto;
+ if (sc->sc_proto != LAGG_PROTO_NONE)
+ error = lagg_protos[i].ti_attach(sc);
+ LAGG_WUNLOCK(sc);
+ return (error);
+ }
+ }
+ error = EPROTONOSUPPORT;
+ break;
+ case SIOCGLAGGPORT:
+ if (rp->rp_portname[0] == '\0' ||
+ (tpif = ifunit(rp->rp_portname)) == NULL) {
+ error = EINVAL;
+ break;
+ }
+
+ LAGG_RLOCK(sc);
+ if ((lp = (struct lagg_port *)tpif->if_lagg) == NULL ||
+ lp->lp_softc != sc) {
+ error = ENOENT;
+ LAGG_RUNLOCK(sc);
+ break;
+ }
+
+ lagg_port2req(lp, rp);
+ LAGG_RUNLOCK(sc);
+ break;
+ case SIOCSLAGGPORT:
+ error = priv_check(td, PRIV_NET_LAGG);
+ if (error)
+ break;
+ if (rp->rp_portname[0] == '\0' ||
+ (tpif = ifunit(rp->rp_portname)) == NULL) {
+ error = EINVAL;
+ break;
+ }
+ LAGG_WLOCK(sc);
+ error = lagg_port_create(sc, tpif);
+ LAGG_WUNLOCK(sc);
+ break;
+ case SIOCSLAGGDELPORT:
+ error = priv_check(td, PRIV_NET_LAGG);
+ if (error)
+ break;
+ if (rp->rp_portname[0] == '\0' ||
+ (tpif = ifunit(rp->rp_portname)) == NULL) {
+ error = EINVAL;
+ break;
+ }
+
+ LAGG_WLOCK(sc);
+ if ((lp = (struct lagg_port *)tpif->if_lagg) == NULL ||
+ lp->lp_softc != sc) {
+ error = ENOENT;
+ LAGG_WUNLOCK(sc);
+ break;
+ }
+
+ error = lagg_port_destroy(lp, 1);
+ LAGG_WUNLOCK(sc);
+ break;
+ case SIOCSIFFLAGS:
+ /* Set flags on ports too */
+ LAGG_WLOCK(sc);
+ SLIST_FOREACH(lp, &sc->sc_ports, lp_entries) {
+ lagg_setflags(lp, 1);
+ }
+ LAGG_WUNLOCK(sc);
+
+ if (!(ifp->if_flags & IFF_UP) &&
+ (ifp->if_drv_flags & IFF_DRV_RUNNING)) {
+ /*
+ * If interface is marked down and it is running,
+ * then stop and disable it.
+ */
+ LAGG_WLOCK(sc);
+ lagg_stop(sc);
+ LAGG_WUNLOCK(sc);
+ } else if ((ifp->if_flags & IFF_UP) &&
+ !(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
+ /*
+ * If interface is marked up and it is stopped, then
+ * start it.
+ */
+ (*ifp->if_init)(sc);
+ }
+ break;
+ case SIOCADDMULTI:
+ case SIOCDELMULTI:
+ LAGG_WLOCK(sc);
+ error = lagg_ether_setmulti(sc);
+ LAGG_WUNLOCK(sc);
+ break;
+ case SIOCSIFMEDIA:
+ case SIOCGIFMEDIA:
+ error = ifmedia_ioctl(ifp, ifr, &sc->sc_media, cmd);
+ break;
+
+ case SIOCSIFCAP:
+ case SIOCSIFMTU:
+ /* Do not allow the MTU or caps to be directly changed */
+ error = EINVAL;
+ break;
+
+ default:
+ error = ether_ioctl(ifp, cmd, data);
+ break;
+ }
+ return (error);
+}
+
+static int
+lagg_ether_setmulti(struct lagg_softc *sc)
+{
+ struct lagg_port *lp;
+
+ LAGG_WLOCK_ASSERT(sc);
+
+ SLIST_FOREACH(lp, &sc->sc_ports, lp_entries) {
+ /* First, remove any existing filter entries. */
+ lagg_ether_cmdmulti(lp, 0);
+ /* copy all addresses from the lagg interface to the port */
+ lagg_ether_cmdmulti(lp, 1);
+ }
+ return (0);
+}
+
+static int
+lagg_ether_cmdmulti(struct lagg_port *lp, int set)
+{
+ struct lagg_softc *sc = lp->lp_softc;
+ struct ifnet *ifp = lp->lp_ifp;
+ struct ifnet *scifp = sc->sc_ifp;
+ struct lagg_mc *mc;
+ struct ifmultiaddr *ifma, *rifma = NULL;
+ struct sockaddr_dl sdl;
+ int error;
+
+ LAGG_WLOCK_ASSERT(sc);
+
+ bzero((char *)&sdl, sizeof(sdl));
+ sdl.sdl_len = sizeof(sdl);
+ sdl.sdl_family = AF_LINK;
+ sdl.sdl_type = IFT_ETHER;
+ sdl.sdl_alen = ETHER_ADDR_LEN;
+ sdl.sdl_index = ifp->if_index;
+
+ if (set) {
+ TAILQ_FOREACH(ifma, &scifp->if_multiaddrs, ifma_link) {
+ if (ifma->ifma_addr->sa_family != AF_LINK)
+ continue;
+ bcopy(LLADDR((struct sockaddr_dl *)ifma->ifma_addr),
+ LLADDR(&sdl), ETHER_ADDR_LEN);
+
+ error = if_addmulti(ifp, (struct sockaddr *)&sdl, &rifma);
+ if (error)
+ return (error);
+ mc = malloc(sizeof(struct lagg_mc), M_DEVBUF, M_NOWAIT);
+ if (mc == NULL)
+ return (ENOMEM);
+ mc->mc_ifma = rifma;
+ SLIST_INSERT_HEAD(&lp->lp_mc_head, mc, mc_entries);
+ }
+ } else {
+ while ((mc = SLIST_FIRST(&lp->lp_mc_head)) != NULL) {
+ SLIST_REMOVE(&lp->lp_mc_head, mc, lagg_mc, mc_entries);
+ if_delmulti_ifma(mc->mc_ifma);
+ free(mc, M_DEVBUF);
+ }
+ }
+ return (0);
+}
+
+/* Handle a ref counted flag that should be set on the lagg port as well */
+static int
+lagg_setflag(struct lagg_port *lp, int flag, int status,
+ int (*func)(struct ifnet *, int))
+{
+ struct lagg_softc *sc = lp->lp_softc;
+ struct ifnet *scifp = sc->sc_ifp;
+ struct ifnet *ifp = lp->lp_ifp;
+ int error;
+
+ LAGG_WLOCK_ASSERT(sc);
+
+ status = status ? (scifp->if_flags & flag) : 0;
+ /* Now "status" contains the flag value or 0 */
+
+ /*
+ * See if recorded ports status is different from what
+ * we want it to be. If it is, flip it. We record ports
+ * status in lp_ifflags so that we won't clear ports flag
+ * we haven't set. In fact, we don't clear or set ports
+ * flags directly, but get or release references to them.
+ * That's why we can be sure that recorded flags still are
+ * in accord with actual ports flags.
+ */
+ if (status != (lp->lp_ifflags & flag)) {
+ error = (*func)(ifp, status);
+ if (error)
+ return (error);
+ lp->lp_ifflags &= ~flag;
+ lp->lp_ifflags |= status;
+ }
+ return (0);
+}
+
+/*
+ * Handle IFF_* flags that require certain changes on the lagg port
+ * if "status" is true, update ports flags respective to the lagg
+ * if "status" is false, forcedly clear the flags set on port.
+ */
+static int
+lagg_setflags(struct lagg_port *lp, int status)
+{
+ int error, i;
+
+ for (i = 0; lagg_pflags[i].flag; i++) {
+ error = lagg_setflag(lp, lagg_pflags[i].flag,
+ status, lagg_pflags[i].func);
+ if (error)
+ return (error);
+ }
+ return (0);
+}
+
+static void
+lagg_start(struct ifnet *ifp)
+{
+ struct lagg_softc *sc = (struct lagg_softc *)ifp->if_softc;
+ struct mbuf *m;
+ int error = 0;
+
+ LAGG_RLOCK(sc);
+ /* We need a Tx algorithm and at least one port */
+ if (sc->sc_proto == LAGG_PROTO_NONE || sc->sc_count == 0) {
+ IF_DRAIN(&ifp->if_snd);
+ LAGG_RUNLOCK(sc);
+ return;
+ }
+
+ for (;; error = 0) {
+ IFQ_DEQUEUE(&ifp->if_snd, m);
+ if (m == NULL)
+ break;
+
+ ETHER_BPF_MTAP(ifp, m);
+
+ error = (*sc->sc_start)(sc, m);
+ if (error == 0)
+ ifp->if_opackets++;
+ else
+ ifp->if_oerrors++;
+ }
+ LAGG_RUNLOCK(sc);
+}
+
+static struct mbuf *
+lagg_input(struct ifnet *ifp, struct mbuf *m)
+{
+ struct lagg_port *lp = ifp->if_lagg;
+ struct lagg_softc *sc = lp->lp_softc;
+ struct ifnet *scifp = sc->sc_ifp;
+
+ if ((scifp->if_drv_flags & IFF_DRV_RUNNING) == 0 ||
+ (lp->lp_flags & LAGG_PORT_DISABLED) ||
+ sc->sc_proto == LAGG_PROTO_NONE) {
+ m_freem(m);
+ return (NULL);
+ }
+
+ LAGG_RLOCK(sc);
+ ETHER_BPF_MTAP(scifp, m);
+
+ m = (*sc->sc_input)(sc, lp, m);
+
+ if (m != NULL) {
+ scifp->if_ipackets++;
+ scifp->if_ibytes += m->m_pkthdr.len;
+
+ if (scifp->if_flags & IFF_MONITOR) {
+ m_freem(m);
+ m = NULL;
+ }
+ }
+
+ LAGG_RUNLOCK(sc);
+ return (m);
+}
+
+static int
+lagg_media_change(struct ifnet *ifp)
+{
+ struct lagg_softc *sc = (struct lagg_softc *)ifp->if_softc;
+
+ if (sc->sc_ifflags & IFF_DEBUG)
+ printf("%s\n", __func__);
+
+ /* Ignore */
+ return (0);
+}
+
+static void
+lagg_media_status(struct ifnet *ifp, struct ifmediareq *imr)
+{
+ struct lagg_softc *sc = (struct lagg_softc *)ifp->if_softc;
+ struct lagg_port *lp;
+
+ imr->ifm_status = IFM_AVALID;
+ imr->ifm_active = IFM_ETHER | IFM_AUTO;
+
+ LAGG_RLOCK(sc);
+ SLIST_FOREACH(lp, &sc->sc_ports, lp_entries) {
+ if (LAGG_PORTACTIVE(lp))
+ imr->ifm_status |= IFM_ACTIVE;
+ }
+ LAGG_RUNLOCK(sc);
+}
+
+static void
+lagg_linkstate(struct lagg_softc *sc)
+{
+ struct lagg_port *lp;
+ int new_link = LINK_STATE_DOWN;
+ uint64_t speed;
+
+ /* Our link is considered up if at least one of our ports is active */
+ SLIST_FOREACH(lp, &sc->sc_ports, lp_entries) {
+ if (lp->lp_link_state == LINK_STATE_UP) {
+ new_link = LINK_STATE_UP;
+ break;
+ }
+ }
+ if_link_state_change(sc->sc_ifp, new_link);
+
+ /* Update if_baudrate to reflect the max possible speed */
+ switch (sc->sc_proto) {
+ case LAGG_PROTO_FAILOVER:
+ sc->sc_ifp->if_baudrate = sc->sc_primary != NULL ?
+ sc->sc_primary->lp_ifp->if_baudrate : 0;
+ break;
+ case LAGG_PROTO_ROUNDROBIN:
+ case LAGG_PROTO_LOADBALANCE:
+ case LAGG_PROTO_ETHERCHANNEL:
+ speed = 0;
+ SLIST_FOREACH(lp, &sc->sc_ports, lp_entries)
+ speed += lp->lp_ifp->if_baudrate;
+ sc->sc_ifp->if_baudrate = speed;
+ break;
+ case LAGG_PROTO_LACP:
+ /* LACP updates if_baudrate itself */
+ break;
+ }
+}
+
+static void
+lagg_port_state(struct ifnet *ifp, int state)
+{
+ struct lagg_port *lp = (struct lagg_port *)ifp->if_lagg;
+ struct lagg_softc *sc = NULL;
+
+ if (lp != NULL)
+ sc = lp->lp_softc;
+ if (sc == NULL)
+ return;
+
+ LAGG_WLOCK(sc);
+ lagg_linkstate(sc);
+ if (sc->sc_linkstate != NULL)
+ (*sc->sc_linkstate)(lp);
+ LAGG_WUNLOCK(sc);
+}
+
+struct lagg_port *
+lagg_link_active(struct lagg_softc *sc, struct lagg_port *lp)
+{
+ struct lagg_port *lp_next, *rval = NULL;
+ // int new_link = LINK_STATE_DOWN;
+
+ LAGG_RLOCK_ASSERT(sc);
+ /*
+ * Search a port which reports an active link state.
+ */
+
+ if (lp == NULL)
+ goto search;
+ if (LAGG_PORTACTIVE(lp)) {
+ rval = lp;
+ goto found;
+ }
+ if ((lp_next = SLIST_NEXT(lp, lp_entries)) != NULL &&
+ LAGG_PORTACTIVE(lp_next)) {
+ rval = lp_next;
+ goto found;
+ }
+
+search:
+ SLIST_FOREACH(lp_next, &sc->sc_ports, lp_entries) {
+ if (LAGG_PORTACTIVE(lp_next)) {
+ rval = lp_next;
+ goto found;
+ }
+ }
+
+found:
+ if (rval != NULL) {
+ /*
+ * The IEEE 802.1D standard assumes that a lagg with
+ * multiple ports is always full duplex. This is valid
+ * for load sharing laggs and if at least two links
+ * are active. Unfortunately, checking the latter would
+ * be too expensive at this point.
+ XXX
+ if ((sc->sc_capabilities & IFCAP_LAGG_FULLDUPLEX) &&
+ (sc->sc_count > 1))
+ new_link = LINK_STATE_FULL_DUPLEX;
+ else
+ new_link = rval->lp_link_state;
+ */
+ }
+
+ return (rval);
+}
+
+static const void *
+lagg_gethdr(struct mbuf *m, u_int off, u_int len, void *buf)
+{
+ if (m->m_pkthdr.len < (off + len)) {
+ return (NULL);
+ } else if (m->m_len < (off + len)) {
+ m_copydata(m, off, len, buf);
+ return (buf);
+ }
+ return (mtod(m, char *) + off);
+}
+
+uint32_t
+lagg_hashmbuf(struct mbuf *m, uint32_t key)
+{
+ uint16_t etype;
+ uint32_t p = 0;
+ int off;
+ struct ether_header *eh;
+ struct ether_vlan_header vlanbuf;
+ const struct ether_vlan_header *vlan;
+#ifdef INET
+ const struct ip *ip;
+ struct ip ipbuf;
+#endif
+#ifdef INET6
+ const struct ip6_hdr *ip6;
+ struct ip6_hdr ip6buf;
+ uint32_t flow;
+#endif
+
+ off = sizeof(*eh);
+ if (m->m_len < off)
+ goto out;
+ eh = mtod(m, struct ether_header *);
+ etype = ntohs(eh->ether_type);
+ p = hash32_buf(&eh->ether_shost, ETHER_ADDR_LEN, key);
+ p = hash32_buf(&eh->ether_dhost, ETHER_ADDR_LEN, p);
+
+ /* Special handling for encapsulating VLAN frames */
+ if (m->m_flags & M_VLANTAG) {
+ p = hash32_buf(&m->m_pkthdr.ether_vtag,
+ sizeof(m->m_pkthdr.ether_vtag), p);
+ } else if (etype == ETHERTYPE_VLAN) {
+ vlan = lagg_gethdr(m, off, sizeof(*vlan), &vlanbuf);
+ if (vlan == NULL)
+ goto out;
+
+ p = hash32_buf(&vlan->evl_tag, sizeof(vlan->evl_tag), p);
+ etype = ntohs(vlan->evl_proto);
+ off += sizeof(*vlan) - sizeof(*eh);
+ }
+
+ switch (etype) {
+#ifdef INET
+ case ETHERTYPE_IP:
+ ip = lagg_gethdr(m, off, sizeof(*ip), &ipbuf);
+ if (ip == NULL)
+ goto out;
+
+ p = hash32_buf(&ip->ip_src, sizeof(struct in_addr), p);
+ p = hash32_buf(&ip->ip_dst, sizeof(struct in_addr), p);
+ break;
+#endif
+#ifdef INET6
+ case ETHERTYPE_IPV6:
+ ip6 = lagg_gethdr(m, off, sizeof(*ip6), &ip6buf);
+ if (ip6 == NULL)
+ goto out;
+
+ p = hash32_buf(&ip6->ip6_src, sizeof(struct in6_addr), p);
+ p = hash32_buf(&ip6->ip6_dst, sizeof(struct in6_addr), p);
+ flow = ip6->ip6_flow & IPV6_FLOWLABEL_MASK;
+ p = hash32_buf(&flow, sizeof(flow), p); /* IPv6 flow label */
+ break;
+#endif
+ }
+out:
+ return (p);
+}
+
+int
+lagg_enqueue(struct ifnet *ifp, struct mbuf *m)
+{
+
+ return (ifp->if_transmit)(ifp, m);
+}
+
+/*
+ * Simple round robin aggregation
+ */
+
+static int
+lagg_rr_attach(struct lagg_softc *sc)
+{
+ sc->sc_detach = lagg_rr_detach;
+ sc->sc_start = lagg_rr_start;
+ sc->sc_input = lagg_rr_input;
+ sc->sc_port_create = NULL;
+ sc->sc_capabilities = IFCAP_LAGG_FULLDUPLEX;
+ sc->sc_seq = 0;
+
+ return (0);
+}
+
+static int
+lagg_rr_detach(struct lagg_softc *sc)
+{
+ return (0);
+}
+
+static int
+lagg_rr_start(struct lagg_softc *sc, struct mbuf *m)
+{
+ struct lagg_port *lp;
+ uint32_t p;
+
+ p = atomic_fetchadd_32(&sc->sc_seq, 1);
+ p %= sc->sc_count;
+ lp = SLIST_FIRST(&sc->sc_ports);
+ while (p--)
+ lp = SLIST_NEXT(lp, lp_entries);
+
+ /*
+ * Check the port's link state. This will return the next active
+ * port if the link is down or the port is NULL.
+ */
+ if ((lp = lagg_link_active(sc, lp)) == NULL) {
+ m_freem(m);
+ return (ENOENT);
+ }
+
+ /* Send mbuf */
+ return (lagg_enqueue(lp->lp_ifp, m));
+}
+
+static struct mbuf *
+lagg_rr_input(struct lagg_softc *sc, struct lagg_port *lp, struct mbuf *m)
+{
+ struct ifnet *ifp = sc->sc_ifp;
+
+ /* Just pass in the packet to our lagg device */
+ m->m_pkthdr.rcvif = ifp;
+
+ return (m);
+}
+
+/*
+ * Active failover
+ */
+
+static int
+lagg_fail_attach(struct lagg_softc *sc)
+{
+ sc->sc_detach = lagg_fail_detach;
+ sc->sc_start = lagg_fail_start;
+ sc->sc_input = lagg_fail_input;
+ sc->sc_port_create = NULL;
+ sc->sc_port_destroy = NULL;
+
+ return (0);
+}
+
+static int
+lagg_fail_detach(struct lagg_softc *sc)
+{
+ return (0);
+}
+
+static int
+lagg_fail_start(struct lagg_softc *sc, struct mbuf *m)
+{
+ struct lagg_port *lp;
+
+ /* Use the master port if active or the next available port */
+ if ((lp = lagg_link_active(sc, sc->sc_primary)) == NULL) {
+ m_freem(m);
+ return (ENOENT);
+ }
+
+ /* Send mbuf */
+ return (lagg_enqueue(lp->lp_ifp, m));
+}
+
+static struct mbuf *
+lagg_fail_input(struct lagg_softc *sc, struct lagg_port *lp, struct mbuf *m)
+{
+ struct ifnet *ifp = sc->sc_ifp;
+ struct lagg_port *tmp_tp;
+
+ if (lp == sc->sc_primary || lagg_failover_rx_all) {
+ m->m_pkthdr.rcvif = ifp;
+ return (m);
+ }
+
+ if (!LAGG_PORTACTIVE(sc->sc_primary)) {
+ tmp_tp = lagg_link_active(sc, sc->sc_primary);
+ /*
+ * If tmp_tp is null, we've recieved a packet when all
+ * our links are down. Weird, but process it anyways.
+ */
+ if ((tmp_tp == NULL || tmp_tp == lp)) {
+ m->m_pkthdr.rcvif = ifp;
+ return (m);
+ }
+ }
+
+ m_freem(m);
+ return (NULL);
+}
+
+/*
+ * Loadbalancing
+ */
+
+static int
+lagg_lb_attach(struct lagg_softc *sc)
+{
+ struct lagg_port *lp;
+ struct lagg_lb *lb;
+
+ if ((lb = (struct lagg_lb *)malloc(sizeof(struct lagg_lb),
+ M_DEVBUF, M_NOWAIT|M_ZERO)) == NULL)
+ return (ENOMEM);
+
+ sc->sc_detach = lagg_lb_detach;
+ sc->sc_start = lagg_lb_start;
+ sc->sc_input = lagg_lb_input;
+ sc->sc_port_create = lagg_lb_port_create;
+ sc->sc_port_destroy = lagg_lb_port_destroy;
+ sc->sc_capabilities = IFCAP_LAGG_FULLDUPLEX;
+
+ lb->lb_key = arc4random();
+ sc->sc_psc = (caddr_t)lb;
+
+ SLIST_FOREACH(lp, &sc->sc_ports, lp_entries)
+ lagg_lb_port_create(lp);
+
+ return (0);
+}
+
+static int
+lagg_lb_detach(struct lagg_softc *sc)
+{
+ struct lagg_lb *lb = (struct lagg_lb *)sc->sc_psc;
+ if (lb != NULL)
+ free(lb, M_DEVBUF);
+ return (0);
+}
+
+static int
+lagg_lb_porttable(struct lagg_softc *sc, struct lagg_port *lp)
+{
+ struct lagg_lb *lb = (struct lagg_lb *)sc->sc_psc;
+ struct lagg_port *lp_next;
+ int i = 0;
+
+ bzero(&lb->lb_ports, sizeof(lb->lb_ports));
+ SLIST_FOREACH(lp_next, &sc->sc_ports, lp_entries) {
+ if (lp_next == lp)
+ continue;
+ if (i >= LAGG_MAX_PORTS)
+ return (EINVAL);
+ if (sc->sc_ifflags & IFF_DEBUG)
+ printf("%s: port %s at index %d\n",
+ sc->sc_ifname, lp_next->lp_ifname, i);
+ lb->lb_ports[i++] = lp_next;
+ }
+
+ return (0);
+}
+
+static int
+lagg_lb_port_create(struct lagg_port *lp)
+{
+ struct lagg_softc *sc = lp->lp_softc;
+ return (lagg_lb_porttable(sc, NULL));
+}
+
+static void
+lagg_lb_port_destroy(struct lagg_port *lp)
+{
+ struct lagg_softc *sc = lp->lp_softc;
+ lagg_lb_porttable(sc, lp);
+}
+
+static int
+lagg_lb_start(struct lagg_softc *sc, struct mbuf *m)
+{
+ struct lagg_lb *lb = (struct lagg_lb *)sc->sc_psc;
+ struct lagg_port *lp = NULL;
+ uint32_t p = 0;
+
+ if (m->m_flags & M_FLOWID)
+ p = m->m_pkthdr.flowid;
+ else
+ p = lagg_hashmbuf(m, lb->lb_key);
+ p %= sc->sc_count;
+ lp = lb->lb_ports[p];
+
+ /*
+ * Check the port's link state. This will return the next active
+ * port if the link is down or the port is NULL.
+ */
+ if ((lp = lagg_link_active(sc, lp)) == NULL) {
+ m_freem(m);
+ return (ENOENT);
+ }
+
+ /* Send mbuf */
+ return (lagg_enqueue(lp->lp_ifp, m));
+}
+
+static struct mbuf *
+lagg_lb_input(struct lagg_softc *sc, struct lagg_port *lp, struct mbuf *m)
+{
+ struct ifnet *ifp = sc->sc_ifp;
+
+ /* Just pass in the packet to our lagg device */
+ m->m_pkthdr.rcvif = ifp;
+
+ return (m);
+}
+
+/*
+ * 802.3ad LACP
+ */
+
+static int
+lagg_lacp_attach(struct lagg_softc *sc)
+{
+ struct lagg_port *lp;
+ int error;
+
+ sc->sc_detach = lagg_lacp_detach;
+ sc->sc_port_create = lacp_port_create;
+ sc->sc_port_destroy = lacp_port_destroy;
+ sc->sc_linkstate = lacp_linkstate;
+ sc->sc_start = lagg_lacp_start;
+ sc->sc_input = lagg_lacp_input;
+ sc->sc_init = lacp_init;
+ sc->sc_stop = lacp_stop;
+ sc->sc_lladdr = lagg_lacp_lladdr;
+ sc->sc_req = lacp_req;
+ sc->sc_portreq = lacp_portreq;
+
+ error = lacp_attach(sc);
+ if (error)
+ return (error);
+
+ SLIST_FOREACH(lp, &sc->sc_ports, lp_entries)
+ lacp_port_create(lp);
+
+ return (error);
+}
+
+static int
+lagg_lacp_detach(struct lagg_softc *sc)
+{
+ struct lagg_port *lp;
+ int error;
+
+ SLIST_FOREACH(lp, &sc->sc_ports, lp_entries)
+ lacp_port_destroy(lp);
+
+ /* unlocking is safe here */
+ LAGG_WUNLOCK(sc);
+ error = lacp_detach(sc);
+ LAGG_WLOCK(sc);
+
+ return (error);
+}
+
+static void
+lagg_lacp_lladdr(struct lagg_softc *sc)
+{
+ struct lagg_port *lp;
+
+ /* purge all the lacp ports */
+ SLIST_FOREACH(lp, &sc->sc_ports, lp_entries)
+ lacp_port_destroy(lp);
+
+ /* add them back in */
+ SLIST_FOREACH(lp, &sc->sc_ports, lp_entries)
+ lacp_port_create(lp);
+}
+
+static int
+lagg_lacp_start(struct lagg_softc *sc, struct mbuf *m)
+{
+ struct lagg_port *lp;
+
+ lp = lacp_select_tx_port(sc, m);
+ if (lp == NULL) {
+ m_freem(m);
+ return (EBUSY);
+ }
+
+ /* Send mbuf */
+ return (lagg_enqueue(lp->lp_ifp, m));
+}
+
+static struct mbuf *
+lagg_lacp_input(struct lagg_softc *sc, struct lagg_port *lp, struct mbuf *m)
+{
+ struct ifnet *ifp = sc->sc_ifp;
+ struct ether_header *eh;
+ u_short etype;
+
+ eh = mtod(m, struct ether_header *);
+ etype = ntohs(eh->ether_type);
+
+ /* Tap off LACP control messages */
+ if (etype == ETHERTYPE_SLOW) {
+ m = lacp_input(lp, m);
+ if (m == NULL)
+ return (NULL);
+ }
+
+ /*
+ * If the port is not collecting or not in the active aggregator then
+ * free and return.
+ */
+ if (lacp_iscollecting(lp) == 0 || lacp_isactive(lp) == 0) {
+ m_freem(m);
+ return (NULL);
+ }
+
+ m->m_pkthdr.rcvif = ifp;
+ return (m);
+}
diff --git a/rtems/freebsd/net/if_lagg.h b/rtems/freebsd/net/if_lagg.h
new file mode 100644
index 00000000..0034c617
--- /dev/null
+++ b/rtems/freebsd/net/if_lagg.h
@@ -0,0 +1,247 @@
+/* $OpenBSD: if_trunk.h,v 1.11 2007/01/31 06:20:19 reyk Exp $ */
+
+/*
+ * Copyright (c) 2005, 2006 Reyk Floeter <reyk@openbsd.org>
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _NET_LAGG_H
+#define _NET_LAGG_H
+
+/*
+ * Global definitions
+ */
+
+#define LAGG_MAX_PORTS 32 /* logically */
+#define LAGG_MAX_NAMESIZE 32 /* name of a protocol */
+#define LAGG_MAX_STACKING 4 /* maximum number of stacked laggs */
+
+/* Port flags */
+#define LAGG_PORT_SLAVE 0x00000000 /* normal enslaved port */
+#define LAGG_PORT_MASTER 0x00000001 /* primary port */
+#define LAGG_PORT_STACK 0x00000002 /* stacked lagg port */
+#define LAGG_PORT_ACTIVE 0x00000004 /* port is active */
+#define LAGG_PORT_COLLECTING 0x00000008 /* port is receiving frames */
+#define LAGG_PORT_DISTRIBUTING 0x00000010 /* port is sending frames */
+#define LAGG_PORT_DISABLED 0x00000020 /* port is disabled */
+#define LAGG_PORT_BITS "\20\01MASTER\02STACK\03ACTIVE\04COLLECTING" \
+ "\05DISTRIBUTING\06DISABLED"
+
+/* Supported lagg PROTOs */
+#define LAGG_PROTO_NONE 0 /* no lagg protocol defined */
+#define LAGG_PROTO_ROUNDROBIN 1 /* simple round robin */
+#define LAGG_PROTO_FAILOVER 2 /* active failover */
+#define LAGG_PROTO_LOADBALANCE 3 /* loadbalance */
+#define LAGG_PROTO_LACP 4 /* 802.3ad lacp */
+#define LAGG_PROTO_ETHERCHANNEL 5 /* Cisco FEC */
+#define LAGG_PROTO_MAX 6
+
+struct lagg_protos {
+ const char *lpr_name;
+ int lpr_proto;
+};
+
+#define LAGG_PROTO_DEFAULT LAGG_PROTO_FAILOVER
+#define LAGG_PROTOS { \
+ { "failover", LAGG_PROTO_FAILOVER }, \
+ { "fec", LAGG_PROTO_ETHERCHANNEL }, \
+ { "lacp", LAGG_PROTO_LACP }, \
+ { "loadbalance", LAGG_PROTO_LOADBALANCE }, \
+ { "roundrobin", LAGG_PROTO_ROUNDROBIN }, \
+ { "none", LAGG_PROTO_NONE }, \
+ { "default", LAGG_PROTO_DEFAULT } \
+}
+
+/*
+ * lagg ioctls.
+ */
+
+/*
+ * LACP current operational parameters structure.
+ */
+struct lacp_opreq {
+ uint16_t actor_prio;
+ uint8_t actor_mac[ETHER_ADDR_LEN];
+ uint16_t actor_key;
+ uint16_t actor_portprio;
+ uint16_t actor_portno;
+ uint8_t actor_state;
+ uint16_t partner_prio;
+ uint8_t partner_mac[ETHER_ADDR_LEN];
+ uint16_t partner_key;
+ uint16_t partner_portprio;
+ uint16_t partner_portno;
+ uint8_t partner_state;
+};
+
+/* lagg port settings */
+struct lagg_reqport {
+ char rp_ifname[IFNAMSIZ]; /* name of the lagg */
+ char rp_portname[IFNAMSIZ]; /* name of the port */
+ u_int32_t rp_prio; /* port priority */
+ u_int32_t rp_flags; /* port flags */
+ union {
+ struct lacp_opreq rpsc_lacp;
+ } rp_psc;
+#define rp_lacpreq rp_psc.rpsc_lacp
+};
+
+#define SIOCGLAGGPORT _IOWR('i', 140, struct lagg_reqport)
+#define SIOCSLAGGPORT _IOW('i', 141, struct lagg_reqport)
+#define SIOCSLAGGDELPORT _IOW('i', 142, struct lagg_reqport)
+
+/* lagg, ports and options */
+struct lagg_reqall {
+ char ra_ifname[IFNAMSIZ]; /* name of the lagg */
+ u_int ra_proto; /* lagg protocol */
+
+ size_t ra_size; /* size of buffer */
+ struct lagg_reqport *ra_port; /* allocated buffer */
+ int ra_ports; /* total port count */
+ union {
+ struct lacp_opreq rpsc_lacp;
+ } ra_psc;
+#define ra_lacpreq ra_psc.rpsc_lacp
+};
+
+#define SIOCGLAGG _IOWR('i', 143, struct lagg_reqall)
+#define SIOCSLAGG _IOW('i', 144, struct lagg_reqall)
+
+#ifdef _KERNEL
+/*
+ * Internal kernel part
+ */
+
+#define lp_ifname lp_ifp->if_xname /* interface name */
+#define lp_link_state lp_ifp->if_link_state /* link state */
+
+#define LAGG_PORTACTIVE(_tp) ( \
+ ((_tp)->lp_link_state == LINK_STATE_UP) && \
+ ((_tp)->lp_ifp->if_flags & IFF_UP) \
+)
+
+struct lagg_ifreq {
+ union {
+ struct ifreq ifreq;
+ struct {
+ char ifr_name[IFNAMSIZ];
+ struct sockaddr_storage ifr_ss;
+ } ifreq_storage;
+ } ifreq;
+};
+
+#define sc_ifflags sc_ifp->if_flags /* flags */
+#define sc_ifname sc_ifp->if_xname /* name */
+#define sc_capabilities sc_ifp->if_capabilities /* capabilities */
+
+#define IFCAP_LAGG_MASK 0xffff0000 /* private capabilities */
+#define IFCAP_LAGG_FULLDUPLEX 0x00010000 /* full duplex with >1 ports */
+
+/* Private data used by the loadbalancing protocol */
+struct lagg_lb {
+ u_int32_t lb_key;
+ struct lagg_port *lb_ports[LAGG_MAX_PORTS];
+};
+
+struct lagg_mc {
+ struct ifmultiaddr *mc_ifma;
+ SLIST_ENTRY(lagg_mc) mc_entries;
+};
+
+/* List of interfaces to have the MAC address modified */
+struct lagg_llq {
+ struct ifnet *llq_ifp;
+ uint8_t llq_lladdr[ETHER_ADDR_LEN];
+ SLIST_ENTRY(lagg_llq) llq_entries;
+};
+
+struct lagg_softc {
+ struct ifnet *sc_ifp; /* virtual interface */
+ struct rwlock sc_mtx;
+ int sc_proto; /* lagg protocol */
+ u_int sc_count; /* number of ports */
+ struct lagg_port *sc_primary; /* primary port */
+ struct ifmedia sc_media; /* media config */
+ caddr_t sc_psc; /* protocol data */
+ uint32_t sc_seq; /* sequence counter */
+
+ SLIST_HEAD(__tplhd, lagg_port) sc_ports; /* list of interfaces */
+ SLIST_ENTRY(lagg_softc) sc_entries;
+
+ struct task sc_lladdr_task;
+ SLIST_HEAD(__llqhd, lagg_llq) sc_llq_head; /* interfaces to program
+ the lladdr on */
+
+ /* lagg protocol callbacks */
+ int (*sc_detach)(struct lagg_softc *);
+ int (*sc_start)(struct lagg_softc *, struct mbuf *);
+ struct mbuf *(*sc_input)(struct lagg_softc *, struct lagg_port *,
+ struct mbuf *);
+ int (*sc_port_create)(struct lagg_port *);
+ void (*sc_port_destroy)(struct lagg_port *);
+ void (*sc_linkstate)(struct lagg_port *);
+ void (*sc_init)(struct lagg_softc *);
+ void (*sc_stop)(struct lagg_softc *);
+ void (*sc_lladdr)(struct lagg_softc *);
+ void (*sc_req)(struct lagg_softc *, caddr_t);
+ void (*sc_portreq)(struct lagg_port *, caddr_t);
+#if __FreeBSD_version >= 800000
+ eventhandler_tag vlan_attach;
+ eventhandler_tag vlan_detach;
+#endif
+};
+
+struct lagg_port {
+ struct ifnet *lp_ifp; /* physical interface */
+ struct lagg_softc *lp_softc; /* parent lagg */
+ uint8_t lp_lladdr[ETHER_ADDR_LEN];
+
+ u_char lp_iftype; /* interface type */
+ uint32_t lp_prio; /* port priority */
+ uint32_t lp_flags; /* port flags */
+ int lp_ifflags; /* saved ifp flags */
+ void *lh_cookie; /* if state hook */
+ caddr_t lp_psc; /* protocol data */
+ int lp_detaching; /* ifnet is detaching */
+
+ SLIST_HEAD(__mclhd, lagg_mc) lp_mc_head; /* multicast addresses */
+
+ /* Redirected callbacks */
+ int (*lp_ioctl)(struct ifnet *, u_long, caddr_t);
+ int (*lp_output)(struct ifnet *, struct mbuf *, struct sockaddr *,
+ struct route *);
+
+ SLIST_ENTRY(lagg_port) lp_entries;
+};
+
+#define LAGG_LOCK_INIT(_sc) rw_init(&(_sc)->sc_mtx, "if_lagg rwlock")
+#define LAGG_LOCK_DESTROY(_sc) rw_destroy(&(_sc)->sc_mtx)
+#define LAGG_RLOCK(_sc) rw_rlock(&(_sc)->sc_mtx)
+#define LAGG_WLOCK(_sc) rw_wlock(&(_sc)->sc_mtx)
+#define LAGG_RUNLOCK(_sc) rw_runlock(&(_sc)->sc_mtx)
+#define LAGG_WUNLOCK(_sc) rw_wunlock(&(_sc)->sc_mtx)
+#define LAGG_RLOCK_ASSERT(_sc) rw_assert(&(_sc)->sc_mtx, RA_RLOCKED)
+#define LAGG_WLOCK_ASSERT(_sc) rw_assert(&(_sc)->sc_mtx, RA_WLOCKED)
+
+extern struct mbuf *(*lagg_input_p)(struct ifnet *, struct mbuf *);
+extern void (*lagg_linkstate_p)(struct ifnet *, int );
+
+int lagg_enqueue(struct ifnet *, struct mbuf *);
+uint32_t lagg_hashmbuf(struct mbuf *, uint32_t);
+
+#endif /* _KERNEL */
+
+#endif /* _NET_LAGG_H */
diff --git a/rtems/freebsd/net/if_llatbl.c b/rtems/freebsd/net/if_llatbl.c
new file mode 100644
index 00000000..e04cea43
--- /dev/null
+++ b/rtems/freebsd/net/if_llatbl.c
@@ -0,0 +1,528 @@
+#include <rtems/freebsd/machine/rtems-bsd-config.h>
+
+/*
+ * Copyright (c) 2004 Luigi Rizzo, Alessandro Cerri. All rights reserved.
+ * Copyright (c) 2004-2008 Qing Li. All rights reserved.
+ * Copyright (c) 2008 Kip Macy. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+#include <rtems/freebsd/sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <rtems/freebsd/local/opt_ddb.h>
+#include <rtems/freebsd/local/opt_inet.h>
+#include <rtems/freebsd/local/opt_inet6.h>
+
+#include <rtems/freebsd/sys/param.h>
+#include <rtems/freebsd/sys/systm.h>
+#include <rtems/freebsd/sys/malloc.h>
+#include <rtems/freebsd/sys/mbuf.h>
+#include <rtems/freebsd/sys/syslog.h>
+#include <rtems/freebsd/sys/sysctl.h>
+#include <rtems/freebsd/sys/socket.h>
+#include <rtems/freebsd/sys/kernel.h>
+#include <rtems/freebsd/sys/lock.h>
+#include <rtems/freebsd/sys/mutex.h>
+#include <rtems/freebsd/sys/rwlock.h>
+
+#ifdef DDB
+#include <rtems/freebsd/ddb/ddb.h>
+#endif
+
+#include <rtems/freebsd/vm/uma.h>
+
+#include <rtems/freebsd/netinet/in.h>
+#include <rtems/freebsd/net/if_llatbl.h>
+#include <rtems/freebsd/net/if.h>
+#include <rtems/freebsd/net/if_dl.h>
+#include <rtems/freebsd/net/if_var.h>
+#include <rtems/freebsd/net/route.h>
+#include <rtems/freebsd/net/vnet.h>
+#include <rtems/freebsd/netinet/if_ether.h>
+#include <rtems/freebsd/netinet6/in6_var.h>
+#include <rtems/freebsd/netinet6/nd6.h>
+
+MALLOC_DEFINE(M_LLTABLE, "lltable", "link level address tables");
+
+static VNET_DEFINE(SLIST_HEAD(, lltable), lltables);
+#define V_lltables VNET(lltables)
+
+extern void arprequest(struct ifnet *, struct in_addr *, struct in_addr *,
+ u_char *);
+
+static void vnet_lltable_init(void);
+
+struct rwlock lltable_rwlock;
+RW_SYSINIT(lltable_rwlock, &lltable_rwlock, "lltable_rwlock");
+
+/*
+ * Dump arp state for a specific address family.
+ */
+int
+lltable_sysctl_dumparp(int af, struct sysctl_req *wr)
+{
+ struct lltable *llt;
+ int error = 0;
+
+ LLTABLE_RLOCK();
+ SLIST_FOREACH(llt, &V_lltables, llt_link) {
+ if (llt->llt_af == af) {
+ error = llt->llt_dump(llt, wr);
+ if (error != 0)
+ goto done;
+ }
+ }
+done:
+ LLTABLE_RUNLOCK();
+ return (error);
+}
+
+/*
+ * Deletes an address from the address table.
+ * This function is called by the timer functions
+ * such as arptimer() and nd6_llinfo_timer(), and
+ * the caller does the locking.
+ */
+void
+llentry_free(struct llentry *lle)
+{
+
+ LLE_WLOCK_ASSERT(lle);
+ LIST_REMOVE(lle, lle_next);
+
+ if (lle->la_hold != NULL)
+ m_freem(lle->la_hold);
+
+ LLE_FREE_LOCKED(lle);
+}
+
+/*
+ * Update an llentry for address dst (equivalent to rtalloc for new-arp)
+ * Caller must pass in a valid struct llentry * (or NULL)
+ *
+ * if found the llentry * is returned referenced and unlocked
+ */
+int
+llentry_update(struct llentry **llep, struct lltable *lt,
+ struct sockaddr_storage *dst, struct ifnet *ifp)
+{
+ struct llentry *la;
+
+ IF_AFDATA_RLOCK(ifp);
+ la = lla_lookup(lt, LLE_EXCLUSIVE,
+ (struct sockaddr *)dst);
+ IF_AFDATA_RUNLOCK(ifp);
+ if ((la == NULL) &&
+ (ifp->if_flags & (IFF_NOARP | IFF_STATICARP)) == 0) {
+ IF_AFDATA_WLOCK(ifp);
+ la = lla_lookup(lt,
+ (LLE_CREATE | LLE_EXCLUSIVE),
+ (struct sockaddr *)dst);
+ IF_AFDATA_WUNLOCK(ifp);
+ }
+ if (la != NULL && (*llep != la)) {
+ if (*llep != NULL)
+ LLE_FREE(*llep);
+ LLE_ADDREF(la);
+ LLE_WUNLOCK(la);
+ *llep = la;
+ } else if (la != NULL)
+ LLE_WUNLOCK(la);
+
+ if (la == NULL)
+ return (ENOENT);
+
+ return (0);
+}
+
+/*
+ * Free all entries from given table and free itself.
+ */
+void
+lltable_free(struct lltable *llt)
+{
+ struct llentry *lle, *next;
+ int i;
+
+ KASSERT(llt != NULL, ("%s: llt is NULL", __func__));
+
+ LLTABLE_WLOCK();
+ SLIST_REMOVE(&V_lltables, llt, lltable, llt_link);
+ LLTABLE_WUNLOCK();
+
+ for (i=0; i < LLTBL_HASHTBL_SIZE; i++) {
+ LIST_FOREACH_SAFE(lle, &llt->lle_head[i], lle_next, next) {
+ int canceled;
+
+ canceled = callout_drain(&lle->la_timer);
+ LLE_WLOCK(lle);
+ if (canceled)
+ LLE_REMREF(lle);
+ llentry_free(lle);
+ }
+ }
+
+ free(llt, M_LLTABLE);
+}
+
+#if 0
+void
+lltable_drain(int af)
+{
+ struct lltable *llt;
+ struct llentry *lle;
+ register int i;
+
+ LLTABLE_RLOCK();
+ SLIST_FOREACH(llt, &V_lltables, llt_link) {
+ if (llt->llt_af != af)
+ continue;
+
+ for (i=0; i < LLTBL_HASHTBL_SIZE; i++) {
+ LIST_FOREACH(lle, &llt->lle_head[i], lle_next) {
+ LLE_WLOCK(lle);
+ if (lle->la_hold) {
+ m_freem(lle->la_hold);
+ lle->la_hold = NULL;
+ }
+ LLE_WUNLOCK(lle);
+ }
+ }
+ }
+ LLTABLE_RUNLOCK();
+}
+#endif
+
+void
+lltable_prefix_free(int af, struct sockaddr *prefix, struct sockaddr *mask)
+{
+ struct lltable *llt;
+
+ LLTABLE_RLOCK();
+ SLIST_FOREACH(llt, &V_lltables, llt_link) {
+ if (llt->llt_af != af)
+ continue;
+
+ llt->llt_prefix_free(llt, prefix, mask);
+ }
+ LLTABLE_RUNLOCK();
+}
+
+
+
+/*
+ * Create a new lltable.
+ */
+struct lltable *
+lltable_init(struct ifnet *ifp, int af)
+{
+ struct lltable *llt;
+ register int i;
+
+ llt = malloc(sizeof(struct lltable), M_LLTABLE, M_WAITOK);
+
+ llt->llt_af = af;
+ llt->llt_ifp = ifp;
+ for (i = 0; i < LLTBL_HASHTBL_SIZE; i++)
+ LIST_INIT(&llt->lle_head[i]);
+
+ LLTABLE_WLOCK();
+ SLIST_INSERT_HEAD(&V_lltables, llt, llt_link);
+ LLTABLE_WUNLOCK();
+
+ return (llt);
+}
+
+/*
+ * Called in route_output when adding/deleting a route to an interface.
+ */
+int
+lla_rt_output(struct rt_msghdr *rtm, struct rt_addrinfo *info)
+{
+ struct sockaddr_dl *dl =
+ (struct sockaddr_dl *)info->rti_info[RTAX_GATEWAY];
+ struct sockaddr *dst = (struct sockaddr *)info->rti_info[RTAX_DST];
+ struct ifnet *ifp;
+ struct lltable *llt;
+ struct llentry *lle;
+ u_int laflags = 0, flags = 0;
+ int error = 0;
+
+ if (dl == NULL || dl->sdl_family != AF_LINK) {
+ log(LOG_INFO, "%s: invalid dl\n", __func__);
+ return EINVAL;
+ }
+ ifp = ifnet_byindex(dl->sdl_index);
+ if (ifp == NULL) {
+ log(LOG_INFO, "%s: invalid ifp (sdl_index %d)\n",
+ __func__, dl->sdl_index);
+ return EINVAL;
+ }
+
+ switch (rtm->rtm_type) {
+ case RTM_ADD:
+ if (rtm->rtm_flags & RTF_ANNOUNCE) {
+ flags |= LLE_PUB;
+#ifdef INET
+ if (dst->sa_family == AF_INET &&
+ ((struct sockaddr_inarp *)dst)->sin_other != 0) {
+ struct rtentry *rt;
+ ((struct sockaddr_inarp *)dst)->sin_other = 0;
+ rt = rtalloc1(dst, 0, 0);
+ if (rt == NULL || !(rt->rt_flags & RTF_HOST)) {
+ log(LOG_INFO, "%s: RTM_ADD publish "
+ "(proxy only) is invalid\n",
+ __func__);
+ if (rt)
+ RTFREE_LOCKED(rt);
+ return EINVAL;
+ }
+ RTFREE_LOCKED(rt);
+
+ flags |= LLE_PROXY;
+ }
+#endif
+ }
+ flags |= LLE_CREATE;
+ break;
+
+ case RTM_DELETE:
+ flags |= LLE_DELETE;
+ break;
+
+ case RTM_CHANGE:
+ break;
+
+ default:
+ return EINVAL; /* XXX not implemented yet */
+ }
+
+ /* XXX linked list may be too expensive */
+ LLTABLE_RLOCK();
+ SLIST_FOREACH(llt, &V_lltables, llt_link) {
+ if (llt->llt_af == dst->sa_family &&
+ llt->llt_ifp == ifp)
+ break;
+ }
+ LLTABLE_RUNLOCK();
+ KASSERT(llt != NULL, ("Yep, ugly hacks are bad\n"));
+
+ if (flags & LLE_CREATE)
+ flags |= LLE_EXCLUSIVE;
+
+ IF_AFDATA_LOCK(ifp);
+ lle = lla_lookup(llt, flags, dst);
+ IF_AFDATA_UNLOCK(ifp);
+ if (LLE_IS_VALID(lle)) {
+ if (flags & LLE_CREATE) {
+ /*
+ * If we delay the delete, then a subsequent
+ * "arp add" should look up this entry, reset the
+ * LLE_DELETED flag, and reset the expiration timer
+ */
+ bcopy(LLADDR(dl), &lle->ll_addr, ifp->if_addrlen);
+ lle->la_flags |= (flags & (LLE_PUB | LLE_PROXY));
+ lle->la_flags |= LLE_VALID;
+ lle->la_flags &= ~LLE_DELETED;
+#ifdef INET6
+ /*
+ * ND6
+ */
+ if (dst->sa_family == AF_INET6)
+ lle->ln_state = ND6_LLINFO_REACHABLE;
+#endif
+ /*
+ * NB: arp and ndp always set (RTF_STATIC | RTF_HOST)
+ */
+
+ if (rtm->rtm_rmx.rmx_expire == 0) {
+ lle->la_flags |= LLE_STATIC;
+ lle->la_expire = 0;
+ } else
+ lle->la_expire = rtm->rtm_rmx.rmx_expire;
+ laflags = lle->la_flags;
+ LLE_WUNLOCK(lle);
+#ifdef INET
+ /* gratuitous ARP */
+ if ((laflags & LLE_PUB) && dst->sa_family == AF_INET) {
+ arprequest(ifp,
+ &((struct sockaddr_in *)dst)->sin_addr,
+ &((struct sockaddr_in *)dst)->sin_addr,
+ ((laflags & LLE_PROXY) ?
+ (u_char *)IF_LLADDR(ifp) :
+ (u_char *)LLADDR(dl)));
+ }
+#endif
+ } else {
+ if (flags & LLE_EXCLUSIVE)
+ LLE_WUNLOCK(lle);
+ else
+ LLE_RUNLOCK(lle);
+ }
+ } else if ((lle == NULL) && (flags & LLE_DELETE))
+ error = EINVAL;
+
+
+ return (error);
+}
+
+static void
+vnet_lltable_init()
+{
+
+ SLIST_INIT(&V_lltables);
+}
+VNET_SYSINIT(vnet_lltable_init, SI_SUB_PSEUDO, SI_ORDER_FIRST,
+ vnet_lltable_init, NULL);
+
+#ifdef DDB
+struct llentry_sa {
+ struct llentry base;
+ struct sockaddr l3_addr;
+};
+
+static void
+llatbl_lle_show(struct llentry_sa *la)
+{
+ struct llentry *lle;
+ uint8_t octet[6];
+
+ lle = &la->base;
+ db_printf("lle=%p\n", lle);
+ db_printf(" lle_next=%p\n", lle->lle_next.le_next);
+ db_printf(" lle_lock=%p\n", &lle->lle_lock);
+ db_printf(" lle_tbl=%p\n", lle->lle_tbl);
+ db_printf(" lle_head=%p\n", lle->lle_head);
+ db_printf(" la_hold=%p\n", lle->la_hold);
+ db_printf(" la_expire=%ju\n", (uintmax_t)lle->la_expire);
+ db_printf(" la_flags=0x%04x\n", lle->la_flags);
+ db_printf(" la_asked=%u\n", lle->la_asked);
+ db_printf(" la_preempt=%u\n", lle->la_preempt);
+ db_printf(" ln_byhint=%u\n", lle->ln_byhint);
+ db_printf(" ln_state=%d\n", lle->ln_state);
+ db_printf(" ln_router=%u\n", lle->ln_router);
+ db_printf(" ln_ntick=%ju\n", (uintmax_t)lle->ln_ntick);
+ db_printf(" lle_refcnt=%d\n", lle->lle_refcnt);
+ bcopy(&lle->ll_addr.mac16, octet, sizeof(octet));
+ db_printf(" ll_addr=%02x:%02x:%02x:%02x:%02x:%02x\n",
+ octet[0], octet[1], octet[2], octet[3], octet[4], octet[5]);
+ db_printf(" la_timer=%p\n", &lle->la_timer);
+
+ switch (la->l3_addr.sa_family) {
+#ifdef INET
+ case AF_INET:
+ {
+ struct sockaddr_in *sin;
+ char l3s[INET_ADDRSTRLEN];
+
+ sin = (struct sockaddr_in *)&la->l3_addr;
+ inet_ntoa_r(sin->sin_addr, l3s);
+ db_printf(" l3_addr=%s\n", l3s);
+ break;
+ }
+#endif
+#ifdef INET6
+ case AF_INET6:
+ {
+ struct sockaddr_in6 *sin6;
+ char l3s[INET6_ADDRSTRLEN];
+
+ sin6 = (struct sockaddr_in6 *)&la->l3_addr;
+ ip6_sprintf(l3s, &sin6->sin6_addr);
+ db_printf(" l3_addr=%s\n", l3s);
+ break;
+ }
+#endif
+ default:
+ db_printf(" l3_addr=N/A (af=%d)\n", la->l3_addr.sa_family);
+ break;
+ }
+}
+
+DB_SHOW_COMMAND(llentry, db_show_llentry)
+{
+
+ if (!have_addr) {
+ db_printf("usage: show llentry <struct llentry *>\n");
+ return;
+ }
+
+ llatbl_lle_show((struct llentry_sa *)addr);
+}
+
+static void
+llatbl_llt_show(struct lltable *llt)
+{
+ int i;
+ struct llentry *lle;
+
+ db_printf("llt=%p llt_af=%d llt_ifp=%p\n",
+ llt, llt->llt_af, llt->llt_ifp);
+
+ for (i = 0; i < LLTBL_HASHTBL_SIZE; i++) {
+ LIST_FOREACH(lle, &llt->lle_head[i], lle_next) {
+
+ llatbl_lle_show((struct llentry_sa *)lle);
+ if (db_pager_quit)
+ return;
+ }
+ }
+}
+
+DB_SHOW_COMMAND(lltable, db_show_lltable)
+{
+
+ if (!have_addr) {
+ db_printf("usage: show lltable <struct lltable *>\n");
+ return;
+ }
+
+ llatbl_llt_show((struct lltable *)addr);
+}
+
+DB_SHOW_ALL_COMMAND(lltables, db_show_all_lltables)
+{
+ VNET_ITERATOR_DECL(vnet_iter);
+ struct lltable *llt;
+
+ VNET_FOREACH(vnet_iter) {
+ CURVNET_SET_QUIET(vnet_iter);
+#ifdef VIMAGE
+ db_printf("vnet=%p\n", curvnet);
+#endif
+ SLIST_FOREACH(llt, &V_lltables, llt_link) {
+ db_printf("llt=%p llt_af=%d llt_ifp=%p(%s)\n",
+ llt, llt->llt_af, llt->llt_ifp,
+ (llt->llt_ifp != NULL) ?
+ llt->llt_ifp->if_xname : "?");
+ if (have_addr && addr != 0) /* verbose */
+ llatbl_llt_show(llt);
+ if (db_pager_quit) {
+ CURVNET_RESTORE();
+ return;
+ }
+ }
+ CURVNET_RESTORE();
+ }
+}
+#endif
diff --git a/rtems/freebsd/net/if_llatbl.h b/rtems/freebsd/net/if_llatbl.h
new file mode 100644
index 00000000..98dd19f6
--- /dev/null
+++ b/rtems/freebsd/net/if_llatbl.h
@@ -0,0 +1,208 @@
+/*
+ * Copyright (c) 2004 Luigi Rizzo, Alessandro Cerri. All rights reserved.
+ * Copyright (c) 2004-2008 Qing Li. All rights reserved.
+ * Copyright (c) 2008 Kip Macy. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+#include <rtems/freebsd/sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#ifndef _NET_IF_LLATBL_HH_
+#define _NET_IF_LLATBL_HH_
+
+#include <rtems/freebsd/sys/_rwlock.h>
+#include <rtems/freebsd/netinet/in.h>
+
+struct ifnet;
+struct sysctl_req;
+struct rt_msghdr;
+struct rt_addrinfo;
+
+struct llentry;
+LIST_HEAD(llentries, llentry);
+
+extern struct rwlock lltable_rwlock;
+#define LLTABLE_RLOCK() rw_rlock(&lltable_rwlock)
+#define LLTABLE_RUNLOCK() rw_runlock(&lltable_rwlock)
+#define LLTABLE_WLOCK() rw_wlock(&lltable_rwlock)
+#define LLTABLE_WUNLOCK() rw_wunlock(&lltable_rwlock)
+#define LLTABLE_LOCK_ASSERT() rw_assert(&lltable_rwlock, RA_LOCKED)
+
+/*
+ * Code referencing llentry must at least hold
+ * a shared lock
+ */
+struct llentry {
+ LIST_ENTRY(llentry) lle_next;
+ struct rwlock lle_lock;
+ struct lltable *lle_tbl;
+ struct llentries *lle_head;
+ struct mbuf *la_hold;
+ time_t la_expire;
+ uint16_t la_flags;
+ uint16_t la_asked;
+ uint16_t la_preempt;
+ uint16_t ln_byhint;
+ int16_t ln_state; /* IPv6 has ND6_LLINFO_NOSTATE == -2 */
+ uint16_t ln_router;
+ time_t ln_ntick;
+ int lle_refcnt;
+
+ union {
+ uint64_t mac_aligned;
+ uint16_t mac16[3];
+ } ll_addr;
+
+ /* XXX af-private? */
+ union {
+ struct callout ln_timer_ch;
+ struct callout la_timer;
+ } lle_timer;
+ /* NB: struct sockaddr must immediately follow */
+};
+
+#define LLE_WLOCK(lle) rw_wlock(&(lle)->lle_lock)
+#define LLE_RLOCK(lle) rw_rlock(&(lle)->lle_lock)
+#define LLE_WUNLOCK(lle) rw_wunlock(&(lle)->lle_lock)
+#define LLE_RUNLOCK(lle) rw_runlock(&(lle)->lle_lock)
+#define LLE_DOWNGRADE(lle) rw_downgrade(&(lle)->lle_lock)
+#define LLE_TRY_UPGRADE(lle) rw_try_upgrade(&(lle)->lle_lock)
+#define LLE_LOCK_INIT(lle) rw_init_flags(&(lle)->lle_lock, "lle", RW_DUPOK)
+#define LLE_LOCK_DESTROY(lle) rw_destroy(&(lle)->lle_lock)
+#define LLE_WLOCK_ASSERT(lle) rw_assert(&(lle)->lle_lock, RA_WLOCKED)
+
+#define LLE_IS_VALID(lle) (((lle) != NULL) && ((lle) != (void *)-1))
+
+#define LLE_ADDREF(lle) do { \
+ LLE_WLOCK_ASSERT(lle); \
+ KASSERT((lle)->lle_refcnt >= 0, \
+ ("negative refcnt %d", (lle)->lle_refcnt)); \
+ (lle)->lle_refcnt++; \
+} while (0)
+
+#define LLE_REMREF(lle) do { \
+ LLE_WLOCK_ASSERT(lle); \
+ KASSERT((lle)->lle_refcnt > 1, \
+ ("bogus refcnt %d", (lle)->lle_refcnt)); \
+ (lle)->lle_refcnt--; \
+} while (0)
+
+#define LLE_FREE_LOCKED(lle) do { \
+ if ((lle)->lle_refcnt <= 1) \
+ (lle)->lle_tbl->llt_free((lle)->lle_tbl, (lle));\
+ else { \
+ (lle)->lle_refcnt--; \
+ LLE_WUNLOCK(lle); \
+ } \
+ /* guard against invalid refs */ \
+ lle = 0; \
+} while (0)
+
+#define LLE_FREE(lle) do { \
+ LLE_WLOCK(lle); \
+ if ((lle)->lle_refcnt <= 1) \
+ (lle)->lle_tbl->llt_free((lle)->lle_tbl, (lle));\
+ else { \
+ (lle)->lle_refcnt--; \
+ LLE_WUNLOCK(lle); \
+ } \
+ /* guard against invalid refs */ \
+ lle = NULL; \
+} while (0)
+
+
+#define ln_timer_ch lle_timer.ln_timer_ch
+#define la_timer lle_timer.la_timer
+
+/* XXX bad name */
+#define L3_ADDR(lle) ((struct sockaddr *)(&lle[1]))
+#define L3_ADDR_LEN(lle) (((struct sockaddr *)(&lle[1]))->sa_len)
+
+#ifndef LLTBL_HASHTBL_SIZE
+#define LLTBL_HASHTBL_SIZE 32 /* default 32 ? */
+#endif
+
+#ifndef LLTBL_HASHMASK
+#define LLTBL_HASHMASK (LLTBL_HASHTBL_SIZE - 1)
+#endif
+
+struct lltable {
+ SLIST_ENTRY(lltable) llt_link;
+ struct llentries lle_head[LLTBL_HASHTBL_SIZE];
+ int llt_af;
+ struct ifnet *llt_ifp;
+
+ struct llentry * (*llt_new)(const struct sockaddr *, u_int);
+ void (*llt_free)(struct lltable *, struct llentry *);
+ void (*llt_prefix_free)(struct lltable *,
+ const struct sockaddr *prefix,
+ const struct sockaddr *mask);
+ struct llentry * (*llt_lookup)(struct lltable *, u_int flags,
+ const struct sockaddr *l3addr);
+ int (*llt_rtcheck)(struct ifnet *, u_int flags,
+ const struct sockaddr *);
+ int (*llt_dump)(struct lltable *,
+ struct sysctl_req *);
+};
+MALLOC_DECLARE(M_LLTABLE);
+
+/*
+ * flags to be passed to arplookup.
+ */
+#define LLE_DELETED 0x0001 /* entry must be deleted */
+#define LLE_STATIC 0x0002 /* entry is static */
+#define LLE_IFADDR 0x0004 /* entry is interface addr */
+#define LLE_VALID 0x0008 /* ll_addr is valid */
+#define LLE_PROXY 0x0010 /* proxy entry ??? */
+#define LLE_PUB 0x0020 /* publish entry ??? */
+#define LLE_DELETE 0x4000 /* delete on a lookup - match LLE_IFADDR */
+#define LLE_CREATE 0x8000 /* create on a lookup miss */
+#define LLE_EXCLUSIVE 0x2000 /* return lle xlocked */
+
+#define LLATBL_HASH(key, mask) \
+ (((((((key >> 8) ^ key) >> 8) ^ key) >> 8) ^ key) & mask)
+
+struct lltable *lltable_init(struct ifnet *, int);
+void lltable_free(struct lltable *);
+void lltable_prefix_free(int, struct sockaddr *,
+ struct sockaddr *);
+#if 0
+void lltable_drain(int);
+#endif
+int lltable_sysctl_dumparp(int, struct sysctl_req *);
+
+void llentry_free(struct llentry *);
+int llentry_update(struct llentry **, struct lltable *,
+ struct sockaddr_storage *, struct ifnet *);
+
+/*
+ * Generic link layer address lookup function.
+ */
+static __inline struct llentry *
+lla_lookup(struct lltable *llt, u_int flags, const struct sockaddr *l3addr)
+{
+ return llt->llt_lookup(llt, flags, l3addr);
+}
+
+int lla_rt_output(struct rt_msghdr *, struct rt_addrinfo *);
+#endif /* _NET_IF_LLATBL_HH_ */
diff --git a/rtems/freebsd/net/if_llc.h b/rtems/freebsd/net/if_llc.h
new file mode 100644
index 00000000..b72f21bc
--- /dev/null
+++ b/rtems/freebsd/net/if_llc.h
@@ -0,0 +1,161 @@
+/* $NetBSD: if_llc.h,v 1.12 1999/11/19 20:41:19 thorpej Exp $ */
+
+/*-
+ * Copyright (c) 1988, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * @(#)if_llc.h 8.1 (Berkeley) 6/10/93
+ * $FreeBSD$
+ */
+
+#ifndef _NET_IF_LLC_HH_
+#define _NET_IF_LLC_HH_
+
+/*
+ * IEEE 802.2 Link Level Control headers, for use in conjunction with
+ * 802.{3,4,5} media access control methods.
+ *
+ * Headers here do not use bit fields due to shortcommings in many
+ * compilers.
+ */
+
+struct llc {
+ u_int8_t llc_dsap;
+ u_int8_t llc_ssap;
+ union {
+ struct {
+ u_int8_t control;
+ u_int8_t format_id;
+ u_int8_t class;
+ u_int8_t window_x2;
+ } __packed type_u;
+ struct {
+ u_int8_t num_snd_x2;
+ u_int8_t num_rcv_x2;
+ } __packed type_i;
+ struct {
+ u_int8_t control;
+ u_int8_t num_rcv_x2;
+ } __packed type_s;
+ struct {
+ u_int8_t control;
+ /*
+ * We cannot put the following fields in a structure because
+ * the structure rounding might cause padding.
+ */
+ u_int8_t frmr_rej_pdu0;
+ u_int8_t frmr_rej_pdu1;
+ u_int8_t frmr_control;
+ u_int8_t frmr_control_ext;
+ u_int8_t frmr_cause;
+ } __packed type_frmr;
+ struct {
+ u_int8_t control;
+ u_int8_t org_code[3];
+ u_int16_t ether_type;
+ } __packed type_snap;
+ struct {
+ u_int8_t control;
+ u_int8_t control_ext;
+ } __packed type_raw;
+ } __packed llc_un;
+} __packed;
+
+struct frmrinfo {
+ u_int8_t frmr_rej_pdu0;
+ u_int8_t frmr_rej_pdu1;
+ u_int8_t frmr_control;
+ u_int8_t frmr_control_ext;
+ u_int8_t frmr_cause;
+} __packed;
+
+#define llc_control llc_un.type_u.control
+#define llc_control_ext llc_un.type_raw.control_ext
+#define llc_fid llc_un.type_u.format_id
+#define llc_class llc_un.type_u.class
+#define llc_window llc_un.type_u.window_x2
+#define llc_frmrinfo llc_un.type_frmr.frmr_rej_pdu0
+#define llc_frmr_pdu0 llc_un.type_frmr.frmr_rej_pdu0
+#define llc_frmr_pdu1 llc_un.type_frmr.frmr_rej_pdu1
+#define llc_frmr_control llc_un.type_frmr.frmr_control
+#define llc_frmr_control_ext llc_un.type_frmr.frmr_control_ext
+#define llc_frmr_cause llc_un.type_frmr.frmr_cause
+#define llc_snap llc_un.type_snap
+
+/*
+ * Don't use sizeof(struct llc_un) for LLC header sizes
+ */
+#define LLC_ISFRAMELEN 4
+#define LLC_UFRAMELEN 3
+#define LLC_FRMRLEN 7
+#define LLC_SNAPFRAMELEN 8
+
+#ifdef CTASSERT
+CTASSERT(sizeof (struct llc) == LLC_SNAPFRAMELEN);
+#endif
+
+/*
+ * Unnumbered LLC format commands
+ */
+#define LLC_UI 0x3
+#define LLC_UI_P 0x13
+#define LLC_DISC 0x43
+#define LLC_DISC_P 0x53
+#define LLC_UA 0x63
+#define LLC_UA_P 0x73
+#define LLC_TEST 0xe3
+#define LLC_TEST_P 0xf3
+#define LLC_FRMR 0x87
+#define LLC_FRMR_P 0x97
+#define LLC_DM 0x0f
+#define LLC_DM_P 0x1f
+#define LLC_XID 0xaf
+#define LLC_XID_P 0xbf
+#define LLC_SABME 0x6f
+#define LLC_SABME_P 0x7f
+
+/*
+ * Supervisory LLC commands
+ */
+#define LLC_RR 0x01
+#define LLC_RNR 0x05
+#define LLC_REJ 0x09
+
+/*
+ * Info format - dummy only
+ */
+#define LLC_INFO 0x00
+
+/*
+ * ISO PDTR 10178 contains among others
+ */
+#define LLC_8021D_LSAP 0x42
+#define LLC_X25_LSAP 0x7e
+#define LLC_SNAP_LSAP 0xaa
+#define LLC_ISO_LSAP 0xfe
+
+#endif /* _NET_IF_LLC_HH_ */
diff --git a/rtems/freebsd/net/if_loop.c b/rtems/freebsd/net/if_loop.c
new file mode 100644
index 00000000..bcd76edf
--- /dev/null
+++ b/rtems/freebsd/net/if_loop.c
@@ -0,0 +1,451 @@
+#include <rtems/freebsd/machine/rtems-bsd-config.h>
+
+/*-
+ * Copyright (c) 1982, 1986, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * @(#)if_loop.c 8.2 (Berkeley) 1/9/95
+ * $FreeBSD$
+ */
+
+/*
+ * Loopback interface driver for protocol testing and timing.
+ */
+
+#include <rtems/freebsd/local/opt_atalk.h>
+#include <rtems/freebsd/local/opt_inet.h>
+#include <rtems/freebsd/local/opt_inet6.h>
+#include <rtems/freebsd/local/opt_ipx.h>
+
+#include <rtems/freebsd/sys/param.h>
+#include <rtems/freebsd/sys/systm.h>
+#include <rtems/freebsd/sys/kernel.h>
+#include <rtems/freebsd/sys/mbuf.h>
+#include <rtems/freebsd/sys/module.h>
+#include <rtems/freebsd/machine/bus.h>
+#include <rtems/freebsd/sys/rman.h>
+#include <rtems/freebsd/sys/socket.h>
+#include <rtems/freebsd/sys/sockio.h>
+#include <rtems/freebsd/sys/sysctl.h>
+
+#include <rtems/freebsd/net/if.h>
+#include <rtems/freebsd/net/if_clone.h>
+#include <rtems/freebsd/net/if_types.h>
+#include <rtems/freebsd/net/netisr.h>
+#include <rtems/freebsd/net/route.h>
+#include <rtems/freebsd/net/bpf.h>
+#include <rtems/freebsd/net/vnet.h>
+
+#ifdef INET
+#include <rtems/freebsd/netinet/in.h>
+#include <rtems/freebsd/netinet/in_var.h>
+#endif
+
+#ifdef IPX
+#include <rtems/freebsd/netipx/ipx.h>
+#include <rtems/freebsd/netipx/ipx_if.h>
+#endif
+
+#ifdef INET6
+#ifndef INET
+#include <rtems/freebsd/netinet/in.h>
+#endif
+#include <rtems/freebsd/netinet6/in6_var.h>
+#include <rtems/freebsd/netinet/ip6.h>
+#endif
+
+#ifdef NETATALK
+#include <rtems/freebsd/netatalk/at.h>
+#include <rtems/freebsd/netatalk/at_var.h>
+#endif
+
+#include <rtems/freebsd/security/mac/mac_framework.h>
+
+#ifdef TINY_LOMTU
+#define LOMTU (1024+512)
+#elif defined(LARGE_LOMTU)
+#define LOMTU 131072
+#else
+#define LOMTU 16384
+#endif
+
+#define LO_CSUM_FEATURES (CSUM_IP | CSUM_TCP | CSUM_UDP | CSUM_SCTP)
+#define LO_CSUM_SET (CSUM_DATA_VALID | CSUM_PSEUDO_HDR | \
+ CSUM_IP_CHECKED | CSUM_IP_VALID | \
+ CSUM_SCTP_VALID)
+
+int loioctl(struct ifnet *, u_long, caddr_t);
+static void lortrequest(int, struct rtentry *, struct rt_addrinfo *);
+int looutput(struct ifnet *ifp, struct mbuf *m,
+ struct sockaddr *dst, struct route *ro);
+static int lo_clone_create(struct if_clone *, int, caddr_t);
+static void lo_clone_destroy(struct ifnet *);
+
+VNET_DEFINE(struct ifnet *, loif); /* Used externally */
+
+#ifdef VIMAGE
+static VNET_DEFINE(struct ifc_simple_data, lo_cloner_data);
+static VNET_DEFINE(struct if_clone, lo_cloner);
+#define V_lo_cloner_data VNET(lo_cloner_data)
+#define V_lo_cloner VNET(lo_cloner)
+#endif
+
+IFC_SIMPLE_DECLARE(lo, 1);
+
+static void
+lo_clone_destroy(struct ifnet *ifp)
+{
+
+#ifndef VIMAGE
+ /* XXX: destroying lo0 will lead to panics. */
+ KASSERT(V_loif != ifp, ("%s: destroying lo0", __func__));
+#endif
+
+ bpfdetach(ifp);
+ if_detach(ifp);
+ if_free(ifp);
+}
+
+static int
+lo_clone_create(struct if_clone *ifc, int unit, caddr_t params)
+{
+ struct ifnet *ifp;
+
+ ifp = if_alloc(IFT_LOOP);
+ if (ifp == NULL)
+ return (ENOSPC);
+
+ if_initname(ifp, ifc->ifc_name, unit);
+ ifp->if_mtu = LOMTU;
+ ifp->if_flags = IFF_LOOPBACK | IFF_MULTICAST;
+ ifp->if_ioctl = loioctl;
+ ifp->if_output = looutput;
+ ifp->if_snd.ifq_maxlen = ifqmaxlen;
+ ifp->if_capabilities = ifp->if_capenable = IFCAP_HWCSUM;
+ ifp->if_hwassist = LO_CSUM_FEATURES;
+ if_attach(ifp);
+ bpfattach(ifp, DLT_NULL, sizeof(u_int32_t));
+ if (V_loif == NULL)
+ V_loif = ifp;
+
+ return (0);
+}
+
+static void
+vnet_loif_init(const void *unused __unused)
+{
+
+#ifdef VIMAGE
+ V_lo_cloner = lo_cloner;
+ V_lo_cloner_data = lo_cloner_data;
+ V_lo_cloner.ifc_data = &V_lo_cloner_data;
+ if_clone_attach(&V_lo_cloner);
+#else
+ if_clone_attach(&lo_cloner);
+#endif
+}
+VNET_SYSINIT(vnet_loif_init, SI_SUB_PROTO_IFATTACHDOMAIN, SI_ORDER_ANY,
+ vnet_loif_init, NULL);
+
+#ifdef VIMAGE
+static void
+vnet_loif_uninit(const void *unused __unused)
+{
+
+ if_clone_detach(&V_lo_cloner);
+ V_loif = NULL;
+}
+VNET_SYSUNINIT(vnet_loif_uninit, SI_SUB_PROTO_IFATTACHDOMAIN, SI_ORDER_ANY,
+ vnet_loif_uninit, NULL);
+#endif
+
+static int
+loop_modevent(module_t mod, int type, void *data)
+{
+
+ switch (type) {
+ case MOD_LOAD:
+ break;
+
+ case MOD_UNLOAD:
+ printf("loop module unload - not possible for this module type\n");
+ return (EINVAL);
+
+ default:
+ return (EOPNOTSUPP);
+ }
+ return (0);
+}
+
+static moduledata_t loop_mod = {
+ "if_lo",
+ loop_modevent,
+ 0
+};
+
+DECLARE_MODULE(if_lo, loop_mod, SI_SUB_PROTO_IFATTACHDOMAIN, SI_ORDER_ANY);
+
+int
+looutput(struct ifnet *ifp, struct mbuf *m, struct sockaddr *dst,
+ struct route *ro)
+{
+ u_int32_t af;
+ struct rtentry *rt = NULL;
+#ifdef MAC
+ int error;
+#endif
+
+ M_ASSERTPKTHDR(m); /* check if we have the packet header */
+
+ if (ro != NULL)
+ rt = ro->ro_rt;
+#ifdef MAC
+ error = mac_ifnet_check_transmit(ifp, m);
+ if (error) {
+ m_freem(m);
+ return (error);
+ }
+#endif
+
+ if (rt && rt->rt_flags & (RTF_REJECT|RTF_BLACKHOLE)) {
+ m_freem(m);
+ return (rt->rt_flags & RTF_BLACKHOLE ? 0 :
+ rt->rt_flags & RTF_HOST ? EHOSTUNREACH : ENETUNREACH);
+ }
+
+ ifp->if_opackets++;
+ ifp->if_obytes += m->m_pkthdr.len;
+
+ /* BPF writes need to be handled specially. */
+ if (dst->sa_family == AF_UNSPEC) {
+ bcopy(dst->sa_data, &af, sizeof(af));
+ dst->sa_family = af;
+ }
+
+#if 1 /* XXX */
+ switch (dst->sa_family) {
+ case AF_INET:
+ if (ifp->if_capenable & IFCAP_RXCSUM) {
+ m->m_pkthdr.csum_data = 0xffff;
+ m->m_pkthdr.csum_flags = LO_CSUM_SET;
+ }
+ m->m_pkthdr.csum_flags &= ~LO_CSUM_FEATURES;
+ case AF_INET6:
+ case AF_IPX:
+ case AF_APPLETALK:
+ break;
+ default:
+ printf("looutput: af=%d unexpected\n", dst->sa_family);
+ m_freem(m);
+ return (EAFNOSUPPORT);
+ }
+#endif
+ return (if_simloop(ifp, m, dst->sa_family, 0));
+}
+
+/*
+ * if_simloop()
+ *
+ * This function is to support software emulation of hardware loopback,
+ * i.e., for interfaces with the IFF_SIMPLEX attribute. Since they can't
+ * hear their own broadcasts, we create a copy of the packet that we
+ * would normally receive via a hardware loopback.
+ *
+ * This function expects the packet to include the media header of length hlen.
+ */
+int
+if_simloop(struct ifnet *ifp, struct mbuf *m, int af, int hlen)
+{
+ int isr;
+
+ M_ASSERTPKTHDR(m);
+ m_tag_delete_nonpersistent(m);
+ m->m_pkthdr.rcvif = ifp;
+
+#ifdef MAC
+ mac_ifnet_create_mbuf(ifp, m);
+#endif
+
+ /*
+ * Let BPF see incoming packet in the following manner:
+ * - Emulated packet loopback for a simplex interface
+ * (net/if_ethersubr.c)
+ * -> passes it to ifp's BPF
+ * - IPv4/v6 multicast packet loopback (netinet(6)/ip(6)_output.c)
+ * -> not passes it to any BPF
+ * - Normal packet loopback from myself to myself (net/if_loop.c)
+ * -> passes to lo0's BPF (even in case of IPv6, where ifp!=lo0)
+ */
+ if (hlen > 0) {
+ if (bpf_peers_present(ifp->if_bpf)) {
+ bpf_mtap(ifp->if_bpf, m);
+ }
+ } else {
+ if (bpf_peers_present(V_loif->if_bpf)) {
+ if ((m->m_flags & M_MCAST) == 0 || V_loif == ifp) {
+ /* XXX beware sizeof(af) != 4 */
+ u_int32_t af1 = af;
+
+ /*
+ * We need to prepend the address family.
+ */
+ bpf_mtap2(V_loif->if_bpf, &af1, sizeof(af1), m);
+ }
+ }
+ }
+
+ /* Strip away media header */
+ if (hlen > 0) {
+ m_adj(m, hlen);
+#ifndef __NO_STRICT_ALIGNMENT
+ /*
+ * Some archs do not like unaligned data, so
+ * we move data down in the first mbuf.
+ */
+ if (mtod(m, vm_offset_t) & 3) {
+ KASSERT(hlen >= 3, ("if_simloop: hlen too small"));
+ bcopy(m->m_data,
+ (char *)(mtod(m, vm_offset_t)
+ - (mtod(m, vm_offset_t) & 3)),
+ m->m_len);
+ m->m_data -= (mtod(m,vm_offset_t) & 3);
+ }
+#endif
+ }
+
+ /* Deliver to upper layer protocol */
+ switch (af) {
+#ifdef INET
+ case AF_INET:
+ isr = NETISR_IP;
+ break;
+#endif
+#ifdef INET6
+ case AF_INET6:
+ m->m_flags |= M_LOOP;
+ isr = NETISR_IPV6;
+ break;
+#endif
+#ifdef IPX
+ case AF_IPX:
+ isr = NETISR_IPX;
+ break;
+#endif
+#ifdef NETATALK
+ case AF_APPLETALK:
+ isr = NETISR_ATALK2;
+ break;
+#endif
+ default:
+ printf("if_simloop: can't handle af=%d\n", af);
+ m_freem(m);
+ return (EAFNOSUPPORT);
+ }
+ ifp->if_ipackets++;
+ ifp->if_ibytes += m->m_pkthdr.len;
+ netisr_queue(isr, m); /* mbuf is free'd on failure. */
+ return (0);
+}
+
+/* ARGSUSED */
+static void
+lortrequest(int cmd, struct rtentry *rt, struct rt_addrinfo *info)
+{
+
+ RT_LOCK_ASSERT(rt);
+ rt->rt_rmx.rmx_mtu = rt->rt_ifp->if_mtu;
+}
+
+/*
+ * Process an ioctl request.
+ */
+/* ARGSUSED */
+int
+loioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
+{
+ struct ifaddr *ifa;
+ struct ifreq *ifr = (struct ifreq *)data;
+ int error = 0, mask;
+
+ switch (cmd) {
+ case SIOCSIFADDR:
+ ifp->if_flags |= IFF_UP;
+ ifp->if_drv_flags |= IFF_DRV_RUNNING;
+ ifa = (struct ifaddr *)data;
+ ifa->ifa_rtrequest = lortrequest;
+ /*
+ * Everything else is done at a higher level.
+ */
+ break;
+
+ case SIOCADDMULTI:
+ case SIOCDELMULTI:
+ if (ifr == 0) {
+ error = EAFNOSUPPORT; /* XXX */
+ break;
+ }
+ switch (ifr->ifr_addr.sa_family) {
+
+#ifdef INET
+ case AF_INET:
+ break;
+#endif
+#ifdef INET6
+ case AF_INET6:
+ break;
+#endif
+
+ default:
+ error = EAFNOSUPPORT;
+ break;
+ }
+ break;
+
+ case SIOCSIFMTU:
+ ifp->if_mtu = ifr->ifr_mtu;
+ break;
+
+ case SIOCSIFFLAGS:
+ break;
+
+ case SIOCSIFCAP:
+ mask = ifp->if_capenable ^ ifr->ifr_reqcap;
+ if ((mask & IFCAP_RXCSUM) != 0)
+ ifp->if_capenable ^= IFCAP_RXCSUM;
+ if ((mask & IFCAP_TXCSUM) != 0)
+ ifp->if_capenable ^= IFCAP_TXCSUM;
+ if (ifp->if_capenable & IFCAP_TXCSUM)
+ ifp->if_hwassist = LO_CSUM_FEATURES;
+ else
+ ifp->if_hwassist = 0;
+ break;
+
+ default:
+ error = EINVAL;
+ }
+ return (error);
+}
diff --git a/rtems/freebsd/net/if_media.c b/rtems/freebsd/net/if_media.c
new file mode 100644
index 00000000..00bf1386
--- /dev/null
+++ b/rtems/freebsd/net/if_media.c
@@ -0,0 +1,566 @@
+#include <rtems/freebsd/machine/rtems-bsd-config.h>
+
+/* $NetBSD: if_media.c,v 1.1 1997/03/17 02:55:15 thorpej Exp $ */
+/* $FreeBSD$ */
+
+/*-
+ * Copyright (c) 1997
+ * Jonathan Stone and Jason R. Thorpe. All rights reserved.
+ *
+ * This software is derived from information provided by Matt Thomas.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by Jonathan Stone
+ * and Jason R. Thorpe for the NetBSD Project.
+ * 4. The names of the authors may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+ * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
+ * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+ * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+/*
+ * BSD/OS-compatible network interface media selection.
+ *
+ * Where it is safe to do so, this code strays slightly from the BSD/OS
+ * design. Software which uses the API (device drivers, basically)
+ * shouldn't notice any difference.
+ *
+ * Many thanks to Matt Thomas for providing the information necessary
+ * to implement this interface.
+ */
+
+#include <rtems/freebsd/sys/param.h>
+#include <rtems/freebsd/sys/systm.h>
+#include <rtems/freebsd/sys/socket.h>
+#include <rtems/freebsd/sys/sockio.h>
+#include <rtems/freebsd/sys/malloc.h>
+#include <rtems/freebsd/sys/module.h>
+#include <rtems/freebsd/sys/sysctl.h>
+
+#include <rtems/freebsd/net/if.h>
+#include <rtems/freebsd/net/if_media.h>
+
+/*
+ * Compile-time options:
+ * IFMEDIA_DEBUG:
+ * turn on implementation-level debug printfs.
+ * Useful for debugging newly-ported drivers.
+ */
+
+static struct ifmedia_entry *ifmedia_match(struct ifmedia *ifm,
+ int flags, int mask);
+
+#ifdef IFMEDIA_DEBUG
+int ifmedia_debug = 0;
+SYSCTL_INT(_debug, OID_AUTO, ifmedia, CTLFLAG_RW, &ifmedia_debug,
+ 0, "if_media debugging msgs");
+static void ifmedia_printword(int);
+#endif
+
+/*
+ * Initialize if_media struct for a specific interface instance.
+ */
+void
+ifmedia_init(ifm, dontcare_mask, change_callback, status_callback)
+ struct ifmedia *ifm;
+ int dontcare_mask;
+ ifm_change_cb_t change_callback;
+ ifm_stat_cb_t status_callback;
+{
+
+ LIST_INIT(&ifm->ifm_list);
+ ifm->ifm_cur = NULL;
+ ifm->ifm_media = 0;
+ ifm->ifm_mask = dontcare_mask; /* IF don't-care bits */
+ ifm->ifm_change = change_callback;
+ ifm->ifm_status = status_callback;
+}
+
+void
+ifmedia_removeall(ifm)
+ struct ifmedia *ifm;
+{
+ struct ifmedia_entry *entry;
+
+ for (entry = LIST_FIRST(&ifm->ifm_list); entry;
+ entry = LIST_FIRST(&ifm->ifm_list)) {
+ LIST_REMOVE(entry, ifm_list);
+ free(entry, M_IFADDR);
+ }
+}
+
+/*
+ * Add a media configuration to the list of supported media
+ * for a specific interface instance.
+ */
+void
+ifmedia_add(ifm, mword, data, aux)
+ struct ifmedia *ifm;
+ int mword;
+ int data;
+ void *aux;
+{
+ register struct ifmedia_entry *entry;
+
+#ifdef IFMEDIA_DEBUG
+ if (ifmedia_debug) {
+ if (ifm == NULL) {
+ printf("ifmedia_add: null ifm\n");
+ return;
+ }
+ printf("Adding entry for ");
+ ifmedia_printword(mword);
+ }
+#endif
+
+ entry = malloc(sizeof(*entry), M_IFADDR, M_NOWAIT);
+ if (entry == NULL)
+ panic("ifmedia_add: can't malloc entry");
+
+ entry->ifm_media = mword;
+ entry->ifm_data = data;
+ entry->ifm_aux = aux;
+
+ LIST_INSERT_HEAD(&ifm->ifm_list, entry, ifm_list);
+}
+
+/*
+ * Add an array of media configurations to the list of
+ * supported media for a specific interface instance.
+ */
+void
+ifmedia_list_add(ifm, lp, count)
+ struct ifmedia *ifm;
+ struct ifmedia_entry *lp;
+ int count;
+{
+ int i;
+
+ for (i = 0; i < count; i++)
+ ifmedia_add(ifm, lp[i].ifm_media, lp[i].ifm_data,
+ lp[i].ifm_aux);
+}
+
+/*
+ * Set the default active media.
+ *
+ * Called by device-specific code which is assumed to have already
+ * selected the default media in hardware. We do _not_ call the
+ * media-change callback.
+ */
+void
+ifmedia_set(ifm, target)
+ struct ifmedia *ifm;
+ int target;
+
+{
+ struct ifmedia_entry *match;
+
+ match = ifmedia_match(ifm, target, ifm->ifm_mask);
+
+ if (match == NULL) {
+ printf("ifmedia_set: no match for 0x%x/0x%x\n",
+ target, ~ifm->ifm_mask);
+ panic("ifmedia_set");
+ }
+ ifm->ifm_cur = match;
+
+#ifdef IFMEDIA_DEBUG
+ if (ifmedia_debug) {
+ printf("ifmedia_set: target ");
+ ifmedia_printword(target);
+ printf("ifmedia_set: setting to ");
+ ifmedia_printword(ifm->ifm_cur->ifm_media);
+ }
+#endif
+}
+
+/*
+ * Device-independent media ioctl support function.
+ */
+int
+ifmedia_ioctl(ifp, ifr, ifm, cmd)
+ struct ifnet *ifp;
+ struct ifreq *ifr;
+ struct ifmedia *ifm;
+ u_long cmd;
+{
+ struct ifmedia_entry *match;
+ struct ifmediareq *ifmr = (struct ifmediareq *) ifr;
+ int error = 0, sticky;
+
+ if (ifp == NULL || ifr == NULL || ifm == NULL)
+ return(EINVAL);
+
+ switch (cmd) {
+
+ /*
+ * Set the current media.
+ */
+ case SIOCSIFMEDIA:
+ {
+ struct ifmedia_entry *oldentry;
+ int oldmedia;
+ int newmedia = ifr->ifr_media;
+
+ match = ifmedia_match(ifm, newmedia, ifm->ifm_mask);
+ if (match == NULL) {
+#ifdef IFMEDIA_DEBUG
+ if (ifmedia_debug) {
+ printf(
+ "ifmedia_ioctl: no media found for 0x%x\n",
+ newmedia);
+ }
+#endif
+ return (ENXIO);
+ }
+
+ /*
+ * If no change, we're done.
+ * XXX Automedia may invole software intervention.
+ * Keep going in case the the connected media changed.
+ * Similarly, if best match changed (kernel debugger?).
+ */
+ if ((IFM_SUBTYPE(newmedia) != IFM_AUTO) &&
+ (newmedia == ifm->ifm_media) &&
+ (match == ifm->ifm_cur))
+ return 0;
+
+ /*
+ * We found a match, now make the driver switch to it.
+ * Make sure to preserve our old media type in case the
+ * driver can't switch.
+ */
+#ifdef IFMEDIA_DEBUG
+ if (ifmedia_debug) {
+ printf("ifmedia_ioctl: switching %s to ",
+ ifp->if_xname);
+ ifmedia_printword(match->ifm_media);
+ }
+#endif
+ oldentry = ifm->ifm_cur;
+ oldmedia = ifm->ifm_media;
+ ifm->ifm_cur = match;
+ ifm->ifm_media = newmedia;
+ error = (*ifm->ifm_change)(ifp);
+ if (error) {
+ ifm->ifm_cur = oldentry;
+ ifm->ifm_media = oldmedia;
+ }
+ break;
+ }
+
+ /*
+ * Get list of available media and current media on interface.
+ */
+ case SIOCGIFMEDIA:
+ {
+ struct ifmedia_entry *ep;
+ int *kptr, count;
+ int usermax; /* user requested max */
+
+ kptr = NULL; /* XXX gcc */
+
+ ifmr->ifm_active = ifmr->ifm_current = ifm->ifm_cur ?
+ ifm->ifm_cur->ifm_media : IFM_NONE;
+ ifmr->ifm_mask = ifm->ifm_mask;
+ ifmr->ifm_status = 0;
+ (*ifm->ifm_status)(ifp, ifmr);
+
+ count = 0;
+ usermax = 0;
+
+ /*
+ * If there are more interfaces on the list, count
+ * them. This allows the caller to set ifmr->ifm_count
+ * to 0 on the first call to know how much space to
+ * allocate.
+ */
+ LIST_FOREACH(ep, &ifm->ifm_list, ifm_list)
+ usermax++;
+
+ /*
+ * Don't allow the user to ask for too many
+ * or a negative number.
+ */
+ if (ifmr->ifm_count > usermax)
+ ifmr->ifm_count = usermax;
+ else if (ifmr->ifm_count < 0)
+ return (EINVAL);
+
+ if (ifmr->ifm_count != 0) {
+ kptr = (int *)malloc(ifmr->ifm_count * sizeof(int),
+ M_TEMP, M_NOWAIT);
+
+ if (kptr == NULL)
+ return (ENOMEM);
+ /*
+ * Get the media words from the interface's list.
+ */
+ ep = LIST_FIRST(&ifm->ifm_list);
+ for (; ep != NULL && count < ifmr->ifm_count;
+ ep = LIST_NEXT(ep, ifm_list), count++)
+ kptr[count] = ep->ifm_media;
+
+ if (ep != NULL)
+ error = E2BIG; /* oops! */
+ } else {
+ count = usermax;
+ }
+
+ /*
+ * We do the copyout on E2BIG, because that's
+ * just our way of telling userland that there
+ * are more. This is the behavior I've observed
+ * under BSD/OS 3.0
+ */
+ sticky = error;
+ if ((error == 0 || error == E2BIG) && ifmr->ifm_count != 0) {
+ error = copyout((caddr_t)kptr,
+ (caddr_t)ifmr->ifm_ulist,
+ ifmr->ifm_count * sizeof(int));
+ }
+
+ if (error == 0)
+ error = sticky;
+
+ if (ifmr->ifm_count != 0)
+ free(kptr, M_TEMP);
+
+ ifmr->ifm_count = count;
+ break;
+ }
+
+ default:
+ return (EINVAL);
+ }
+
+ return (error);
+}
+
+/*
+ * Find media entry matching a given ifm word.
+ *
+ */
+static struct ifmedia_entry *
+ifmedia_match(ifm, target, mask)
+ struct ifmedia *ifm;
+ int target;
+ int mask;
+{
+ struct ifmedia_entry *match, *next;
+
+ match = NULL;
+ mask = ~mask;
+
+ LIST_FOREACH(next, &ifm->ifm_list, ifm_list) {
+ if ((next->ifm_media & mask) == (target & mask)) {
+#if defined(IFMEDIA_DEBUG) || defined(DIAGNOSTIC)
+ if (match) {
+ printf("ifmedia_match: multiple match for "
+ "0x%x/0x%x\n", target, mask);
+ }
+#endif
+ match = next;
+ }
+ }
+
+ return match;
+}
+
+/*
+ * Compute the interface `baudrate' from the media, for the interface
+ * metrics (used by routing daemons).
+ */
+static const struct ifmedia_baudrate ifmedia_baudrate_descriptions[] =
+ IFM_BAUDRATE_DESCRIPTIONS;
+
+uint64_t
+ifmedia_baudrate(int mword)
+{
+ int i;
+
+ for (i = 0; ifmedia_baudrate_descriptions[i].ifmb_word != 0; i++) {
+ if ((mword & (IFM_NMASK|IFM_TMASK)) ==
+ ifmedia_baudrate_descriptions[i].ifmb_word)
+ return (ifmedia_baudrate_descriptions[i].ifmb_baudrate);
+ }
+
+ /* Not known. */
+ return (0);
+}
+
+#ifdef IFMEDIA_DEBUG
+struct ifmedia_description ifm_type_descriptions[] =
+ IFM_TYPE_DESCRIPTIONS;
+
+struct ifmedia_description ifm_subtype_ethernet_descriptions[] =
+ IFM_SUBTYPE_ETHERNET_DESCRIPTIONS;
+
+struct ifmedia_description ifm_subtype_ethernet_option_descriptions[] =
+ IFM_SUBTYPE_ETHERNET_OPTION_DESCRIPTIONS;
+
+struct ifmedia_description ifm_subtype_tokenring_descriptions[] =
+ IFM_SUBTYPE_TOKENRING_DESCRIPTIONS;
+
+struct ifmedia_description ifm_subtype_tokenring_option_descriptions[] =
+ IFM_SUBTYPE_TOKENRING_OPTION_DESCRIPTIONS;
+
+struct ifmedia_description ifm_subtype_fddi_descriptions[] =
+ IFM_SUBTYPE_FDDI_DESCRIPTIONS;
+
+struct ifmedia_description ifm_subtype_fddi_option_descriptions[] =
+ IFM_SUBTYPE_FDDI_OPTION_DESCRIPTIONS;
+
+struct ifmedia_description ifm_subtype_ieee80211_descriptions[] =
+ IFM_SUBTYPE_IEEE80211_DESCRIPTIONS;
+
+struct ifmedia_description ifm_subtype_ieee80211_option_descriptions[] =
+ IFM_SUBTYPE_IEEE80211_OPTION_DESCRIPTIONS;
+
+struct ifmedia_description ifm_subtype_ieee80211_mode_descriptions[] =
+ IFM_SUBTYPE_IEEE80211_MODE_DESCRIPTIONS;
+
+struct ifmedia_description ifm_subtype_atm_descriptions[] =
+ IFM_SUBTYPE_ATM_DESCRIPTIONS;
+
+struct ifmedia_description ifm_subtype_atm_option_descriptions[] =
+ IFM_SUBTYPE_ATM_OPTION_DESCRIPTIONS;
+
+struct ifmedia_description ifm_subtype_shared_descriptions[] =
+ IFM_SUBTYPE_SHARED_DESCRIPTIONS;
+
+struct ifmedia_description ifm_shared_option_descriptions[] =
+ IFM_SHARED_OPTION_DESCRIPTIONS;
+
+struct ifmedia_type_to_subtype {
+ struct ifmedia_description *subtypes;
+ struct ifmedia_description *options;
+ struct ifmedia_description *modes;
+};
+
+/* must be in the same order as IFM_TYPE_DESCRIPTIONS */
+struct ifmedia_type_to_subtype ifmedia_types_to_subtypes[] = {
+ {
+ &ifm_subtype_ethernet_descriptions[0],
+ &ifm_subtype_ethernet_option_descriptions[0],
+ NULL,
+ },
+ {
+ &ifm_subtype_tokenring_descriptions[0],
+ &ifm_subtype_tokenring_option_descriptions[0],
+ NULL,
+ },
+ {
+ &ifm_subtype_fddi_descriptions[0],
+ &ifm_subtype_fddi_option_descriptions[0],
+ NULL,
+ },
+ {
+ &ifm_subtype_ieee80211_descriptions[0],
+ &ifm_subtype_ieee80211_option_descriptions[0],
+ &ifm_subtype_ieee80211_mode_descriptions[0]
+ },
+ {
+ &ifm_subtype_atm_descriptions[0],
+ &ifm_subtype_atm_option_descriptions[0],
+ NULL,
+ },
+};
+
+/*
+ * print a media word.
+ */
+static void
+ifmedia_printword(ifmw)
+ int ifmw;
+{
+ struct ifmedia_description *desc;
+ struct ifmedia_type_to_subtype *ttos;
+ int seen_option = 0;
+
+ /* Find the top-level interface type. */
+ for (desc = ifm_type_descriptions, ttos = ifmedia_types_to_subtypes;
+ desc->ifmt_string != NULL; desc++, ttos++)
+ if (IFM_TYPE(ifmw) == desc->ifmt_word)
+ break;
+ if (desc->ifmt_string == NULL) {
+ printf("<unknown type>\n");
+ return;
+ }
+ printf(desc->ifmt_string);
+
+ /* Any mode. */
+ for (desc = ttos->modes; desc && desc->ifmt_string != NULL; desc++)
+ if (IFM_MODE(ifmw) == desc->ifmt_word) {
+ if (desc->ifmt_string != NULL)
+ printf(" mode %s", desc->ifmt_string);
+ break;
+ }
+
+ /*
+ * Check for the shared subtype descriptions first, then the
+ * type-specific ones.
+ */
+ for (desc = ifm_subtype_shared_descriptions;
+ desc->ifmt_string != NULL; desc++)
+ if (IFM_SUBTYPE(ifmw) == desc->ifmt_word)
+ goto got_subtype;
+
+ for (desc = ttos->subtypes; desc->ifmt_string != NULL; desc++)
+ if (IFM_SUBTYPE(ifmw) == desc->ifmt_word)
+ break;
+ if (desc->ifmt_string == NULL) {
+ printf(" <unknown subtype>\n");
+ return;
+ }
+
+ got_subtype:
+ printf(" %s", desc->ifmt_string);
+
+ /*
+ * Look for shared options.
+ */
+ for (desc = ifm_shared_option_descriptions;
+ desc->ifmt_string != NULL; desc++) {
+ if (ifmw & desc->ifmt_word) {
+ if (seen_option == 0)
+ printf(" <");
+ printf("%s%s", seen_option++ ? "," : "",
+ desc->ifmt_string);
+ }
+ }
+
+ /*
+ * Look for subtype-specific options.
+ */
+ for (desc = ttos->options; desc->ifmt_string != NULL; desc++) {
+ if (ifmw & desc->ifmt_word) {
+ if (seen_option == 0)
+ printf(" <");
+ printf("%s%s", seen_option++ ? "," : "",
+ desc->ifmt_string);
+ }
+ }
+ printf("%s\n", seen_option ? ">" : "");
+}
+#endif /* IFMEDIA_DEBUG */
diff --git a/rtems/freebsd/net/if_media.h b/rtems/freebsd/net/if_media.h
new file mode 100644
index 00000000..3eddbd89
--- /dev/null
+++ b/rtems/freebsd/net/if_media.h
@@ -0,0 +1,692 @@
+/* $NetBSD: if_media.h,v 1.3 1997/03/26 01:19:27 thorpej Exp $ */
+/* $FreeBSD$ */
+
+/*-
+ * Copyright (c) 1997
+ * Jonathan Stone and Jason R. Thorpe. All rights reserved.
+ *
+ * This software is derived from information provided by Matt Thomas.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by Jonathan Stone
+ * and Jason R. Thorpe for the NetBSD Project.
+ * 4. The names of the authors may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+ * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
+ * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+ * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifndef _NET_IF_MEDIA_HH_
+#define _NET_IF_MEDIA_HH_
+
+/*
+ * Prototypes and definitions for BSD/OS-compatible network interface
+ * media selection.
+ *
+ * Where it is safe to do so, this code strays slightly from the BSD/OS
+ * design. Software which uses the API (device drivers, basically)
+ * shouldn't notice any difference.
+ *
+ * Many thanks to Matt Thomas for providing the information necessary
+ * to implement this interface.
+ */
+
+#ifdef _KERNEL
+
+#include <rtems/freebsd/sys/queue.h>
+
+/*
+ * Driver callbacks for media status and change requests.
+ */
+typedef int (*ifm_change_cb_t)(struct ifnet *ifp);
+typedef void (*ifm_stat_cb_t)(struct ifnet *ifp, struct ifmediareq *req);
+
+/*
+ * In-kernel representation of a single supported media type.
+ */
+struct ifmedia_entry {
+ LIST_ENTRY(ifmedia_entry) ifm_list;
+ int ifm_media; /* description of this media attachment */
+ int ifm_data; /* for driver-specific use */
+ void *ifm_aux; /* for driver-specific use */
+};
+
+/*
+ * One of these goes into a network interface's softc structure.
+ * It is used to keep general media state.
+ */
+struct ifmedia {
+ int ifm_mask; /* mask of changes we don't care about */
+ int ifm_media; /* current user-set media word */
+ struct ifmedia_entry *ifm_cur; /* currently selected media */
+ LIST_HEAD(, ifmedia_entry) ifm_list; /* list of all supported media */
+ ifm_change_cb_t ifm_change; /* media change driver callback */
+ ifm_stat_cb_t ifm_status; /* media status driver callback */
+};
+
+/* Initialize an interface's struct if_media field. */
+void ifmedia_init(struct ifmedia *ifm, int dontcare_mask,
+ ifm_change_cb_t change_callback, ifm_stat_cb_t status_callback);
+
+/* Remove all mediums from a struct ifmedia. */
+void ifmedia_removeall( struct ifmedia *ifm);
+
+/* Add one supported medium to a struct ifmedia. */
+void ifmedia_add(struct ifmedia *ifm, int mword, int data, void *aux);
+
+/* Add an array (of ifmedia_entry) media to a struct ifmedia. */
+void ifmedia_list_add(struct ifmedia *mp, struct ifmedia_entry *lp,
+ int count);
+
+/* Set default media type on initialization. */
+void ifmedia_set(struct ifmedia *ifm, int mword);
+
+/* Common ioctl function for getting/setting media, called by driver. */
+int ifmedia_ioctl(struct ifnet *ifp, struct ifreq *ifr,
+ struct ifmedia *ifm, u_long cmd);
+
+/* Compute baudrate for a given media. */
+uint64_t ifmedia_baudrate(int);
+
+#endif /*_KERNEL */
+
+/*
+ * if_media Options word:
+ * Bits Use
+ * ---- -------
+ * 0-4 Media variant
+ * 5-7 Media type
+ * 8-15 Type specific options
+ * 16-18 Mode (for multi-mode devices)
+ * 19 RFU
+ * 20-27 Shared (global) options
+ * 28-31 Instance
+ */
+
+/*
+ * Ethernet
+ */
+#define IFM_ETHER 0x00000020
+#define IFM_10_T 3 /* 10BaseT - RJ45 */
+#define IFM_10_2 4 /* 10Base2 - Thinnet */
+#define IFM_10_5 5 /* 10Base5 - AUI */
+#define IFM_100_TX 6 /* 100BaseTX - RJ45 */
+#define IFM_100_FX 7 /* 100BaseFX - Fiber */
+#define IFM_100_T4 8 /* 100BaseT4 - 4 pair cat 3 */
+#define IFM_100_VG 9 /* 100VG-AnyLAN */
+#define IFM_100_T2 10 /* 100BaseT2 */
+#define IFM_1000_SX 11 /* 1000BaseSX - multi-mode fiber */
+#define IFM_10_STP 12 /* 10BaseT over shielded TP */
+#define IFM_10_FL 13 /* 10BaseFL - Fiber */
+#define IFM_1000_LX 14 /* 1000baseLX - single-mode fiber */
+#define IFM_1000_CX 15 /* 1000baseCX - 150ohm STP */
+#define IFM_1000_T 16 /* 1000baseT - 4 pair cat 5 */
+#define IFM_HPNA_1 17 /* HomePNA 1.0 (1Mb/s) */
+#define IFM_10G_LR 18 /* 10GBase-LR 1310nm Single-mode */
+#define IFM_10G_SR 19 /* 10GBase-SR 850nm Multi-mode */
+#define IFM_10G_CX4 20 /* 10GBase CX4 copper */
+#define IFM_2500_SX 21 /* 2500BaseSX - multi-mode fiber */
+#define IFM_10G_TWINAX 22 /* 10GBase Twinax copper */
+#define IFM_10G_TWINAX_LONG 23 /* 10GBase Twinax Long copper */
+#define IFM_10G_LRM 24 /* 10GBase-LRM 850nm Multi-mode */
+#define IFM_UNKNOWN 25 /* media types not defined yet */
+#define IFM_10G_T 26 /* 10GBase-T - RJ45 */
+
+
+/* note 31 is the max! */
+
+#define IFM_ETH_MASTER 0x00000100 /* master mode (1000baseT) */
+#define IFM_ETH_RXPAUSE 0x00000200 /* receive PAUSE frames */
+#define IFM_ETH_TXPAUSE 0x00000400 /* transmit PAUSE frames */
+
+/*
+ * Token ring
+ */
+#define IFM_TOKEN 0x00000040
+#define IFM_TOK_STP4 3 /* Shielded twisted pair 4m - DB9 */
+#define IFM_TOK_STP16 4 /* Shielded twisted pair 16m - DB9 */
+#define IFM_TOK_UTP4 5 /* Unshielded twisted pair 4m - RJ45 */
+#define IFM_TOK_UTP16 6 /* Unshielded twisted pair 16m - RJ45 */
+#define IFM_TOK_STP100 7 /* Shielded twisted pair 100m - DB9 */
+#define IFM_TOK_UTP100 8 /* Unshielded twisted pair 100m - RJ45 */
+#define IFM_TOK_ETR 0x00000200 /* Early token release */
+#define IFM_TOK_SRCRT 0x00000400 /* Enable source routing features */
+#define IFM_TOK_ALLR 0x00000800 /* All routes / Single route bcast */
+#define IFM_TOK_DTR 0x00002000 /* Dedicated token ring */
+#define IFM_TOK_CLASSIC 0x00004000 /* Classic token ring */
+#define IFM_TOK_AUTO 0x00008000 /* Automatic Dedicate/Classic token ring */
+
+/*
+ * FDDI
+ */
+#define IFM_FDDI 0x00000060
+#define IFM_FDDI_SMF 3 /* Single-mode fiber */
+#define IFM_FDDI_MMF 4 /* Multi-mode fiber */
+#define IFM_FDDI_UTP 5 /* CDDI / UTP */
+#define IFM_FDDI_DA 0x00000100 /* Dual attach / single attach */
+
+/*
+ * IEEE 802.11 Wireless
+ */
+#define IFM_IEEE80211 0x00000080
+/* NB: 0,1,2 are auto, manual, none defined below */
+#define IFM_IEEE80211_FH1 3 /* Frequency Hopping 1Mbps */
+#define IFM_IEEE80211_FH2 4 /* Frequency Hopping 2Mbps */
+#define IFM_IEEE80211_DS1 5 /* Direct Sequence 1Mbps */
+#define IFM_IEEE80211_DS2 6 /* Direct Sequence 2Mbps */
+#define IFM_IEEE80211_DS5 7 /* Direct Sequence 5.5Mbps */
+#define IFM_IEEE80211_DS11 8 /* Direct Sequence 11Mbps */
+#define IFM_IEEE80211_DS22 9 /* Direct Sequence 22Mbps */
+#define IFM_IEEE80211_OFDM6 10 /* OFDM 6Mbps */
+#define IFM_IEEE80211_OFDM9 11 /* OFDM 9Mbps */
+#define IFM_IEEE80211_OFDM12 12 /* OFDM 12Mbps */
+#define IFM_IEEE80211_OFDM18 13 /* OFDM 18Mbps */
+#define IFM_IEEE80211_OFDM24 14 /* OFDM 24Mbps */
+#define IFM_IEEE80211_OFDM36 15 /* OFDM 36Mbps */
+#define IFM_IEEE80211_OFDM48 16 /* OFDM 48Mbps */
+#define IFM_IEEE80211_OFDM54 17 /* OFDM 54Mbps */
+#define IFM_IEEE80211_OFDM72 18 /* OFDM 72Mbps */
+#define IFM_IEEE80211_DS354k 19 /* Direct Sequence 354Kbps */
+#define IFM_IEEE80211_DS512k 20 /* Direct Sequence 512Kbps */
+#define IFM_IEEE80211_OFDM3 21 /* OFDM 3Mbps */
+#define IFM_IEEE80211_OFDM4 22 /* OFDM 4.5Mbps */
+#define IFM_IEEE80211_OFDM27 23 /* OFDM 27Mbps */
+/* NB: not enough bits to express MCS fully */
+#define IFM_IEEE80211_MCS 24 /* HT MCS rate */
+
+#define IFM_IEEE80211_ADHOC 0x00000100 /* Operate in Adhoc mode */
+#define IFM_IEEE80211_HOSTAP 0x00000200 /* Operate in Host AP mode */
+#define IFM_IEEE80211_IBSS 0x00000400 /* Operate in IBSS mode */
+#define IFM_IEEE80211_WDS 0x00000800 /* Operate in WDS mode */
+#define IFM_IEEE80211_TURBO 0x00001000 /* Operate in turbo mode */
+#define IFM_IEEE80211_MONITOR 0x00002000 /* Operate in monitor mode */
+#define IFM_IEEE80211_MBSS 0x00004000 /* Operate in MBSS mode */
+
+/* operating mode for multi-mode devices */
+#define IFM_IEEE80211_11A 0x00010000 /* 5Ghz, OFDM mode */
+#define IFM_IEEE80211_11B 0x00020000 /* Direct Sequence mode */
+#define IFM_IEEE80211_11G 0x00030000 /* 2Ghz, CCK mode */
+#define IFM_IEEE80211_FH 0x00040000 /* 2Ghz, GFSK mode */
+#define IFM_IEEE80211_11NA 0x00050000 /* 5Ghz, HT mode */
+#define IFM_IEEE80211_11NG 0x00060000 /* 2Ghz, HT mode */
+
+/*
+ * ATM
+ */
+#define IFM_ATM 0x000000a0
+#define IFM_ATM_UNKNOWN 3
+#define IFM_ATM_UTP_25 4
+#define IFM_ATM_TAXI_100 5
+#define IFM_ATM_TAXI_140 6
+#define IFM_ATM_MM_155 7
+#define IFM_ATM_SM_155 8
+#define IFM_ATM_UTP_155 9
+#define IFM_ATM_MM_622 10
+#define IFM_ATM_SM_622 11
+#define IFM_ATM_VIRTUAL 12
+#define IFM_ATM_SDH 0x00000100 /* SDH instead of SONET */
+#define IFM_ATM_NOSCRAMB 0x00000200 /* no scrambling */
+#define IFM_ATM_UNASSIGNED 0x00000400 /* unassigned cells */
+
+/*
+ * CARP Common Address Redundancy Protocol
+ */
+#define IFM_CARP 0x000000c0
+
+/*
+ * Shared media sub-types
+ */
+#define IFM_AUTO 0 /* Autoselect best media */
+#define IFM_MANUAL 1 /* Jumper/dipswitch selects media */
+#define IFM_NONE 2 /* Deselect all media */
+
+/*
+ * Shared options
+ */
+#define IFM_FDX 0x00100000 /* Force full duplex */
+#define IFM_HDX 0x00200000 /* Force half duplex */
+#define IFM_FLOW 0x00400000 /* enable hardware flow control */
+#define IFM_FLAG0 0x01000000 /* Driver defined flag */
+#define IFM_FLAG1 0x02000000 /* Driver defined flag */
+#define IFM_FLAG2 0x04000000 /* Driver defined flag */
+#define IFM_LOOP 0x08000000 /* Put hardware in loopback */
+
+/*
+ * Masks
+ */
+#define IFM_NMASK 0x000000e0 /* Network type */
+#define IFM_TMASK 0x0000001f /* Media sub-type */
+#define IFM_IMASK 0xf0000000 /* Instance */
+#define IFM_ISHIFT 28 /* Instance shift */
+#define IFM_OMASK 0x0000ff00 /* Type specific options */
+#define IFM_MMASK 0x00070000 /* Mode */
+#define IFM_MSHIFT 16 /* Mode shift */
+#define IFM_GMASK 0x0ff00000 /* Global options */
+
+/* Ethernet flow control mask */
+#define IFM_ETH_FMASK (IFM_FLOW | IFM_ETH_RXPAUSE | IFM_ETH_TXPAUSE)
+
+/*
+ * Status bits
+ */
+#define IFM_AVALID 0x00000001 /* Active bit valid */
+#define IFM_ACTIVE 0x00000002 /* Interface attached to working net */
+
+/* Mask of "status valid" bits, for ifconfig(8). */
+#define IFM_STATUS_VALID IFM_AVALID
+
+/* List of "status valid" bits, for ifconfig(8). */
+#define IFM_STATUS_VALID_LIST { \
+ IFM_AVALID, \
+ 0 \
+}
+
+/*
+ * Macros to extract various bits of information from the media word.
+ */
+#define IFM_TYPE(x) ((x) & IFM_NMASK)
+#define IFM_SUBTYPE(x) ((x) & IFM_TMASK)
+#define IFM_TYPE_OPTIONS(x) ((x) & IFM_OMASK)
+#define IFM_INST(x) (((x) & IFM_IMASK) >> IFM_ISHIFT)
+#define IFM_OPTIONS(x) ((x) & (IFM_OMASK|IFM_GMASK))
+#define IFM_MODE(x) ((x) & IFM_MMASK)
+
+#define IFM_INST_MAX IFM_INST(IFM_IMASK)
+
+/*
+ * Macro to create a media word.
+ */
+#define IFM_MAKEWORD(type, subtype, options, instance) \
+ ((type) | (subtype) | (options) | ((instance) << IFM_ISHIFT))
+#define IFM_MAKEMODE(mode) \
+ (((mode) << IFM_MSHIFT) & IFM_MMASK)
+
+/*
+ * NetBSD extension not defined in the BSDI API. This is used in various
+ * places to get the canonical description for a given type/subtype.
+ *
+ * NOTE: all but the top-level type descriptions must contain NO whitespace!
+ * Otherwise, parsing these in ifconfig(8) would be a nightmare.
+ */
+struct ifmedia_description {
+ int ifmt_word; /* word value; may be masked */
+ const char *ifmt_string; /* description */
+};
+
+#define IFM_TYPE_DESCRIPTIONS { \
+ { IFM_ETHER, "Ethernet" }, \
+ { IFM_TOKEN, "Token ring" }, \
+ { IFM_FDDI, "FDDI" }, \
+ { IFM_IEEE80211, "IEEE 802.11 Wireless Ethernet" }, \
+ { IFM_ATM, "ATM" }, \
+ { IFM_CARP, "Common Address Redundancy Protocol" }, \
+ { 0, NULL }, \
+}
+
+#define IFM_SUBTYPE_ETHERNET_DESCRIPTIONS { \
+ { IFM_10_T, "10baseT/UTP" }, \
+ { IFM_10_2, "10base2/BNC" }, \
+ { IFM_10_5, "10base5/AUI" }, \
+ { IFM_100_TX, "100baseTX" }, \
+ { IFM_100_FX, "100baseFX" }, \
+ { IFM_100_T4, "100baseT4" }, \
+ { IFM_100_VG, "100baseVG" }, \
+ { IFM_100_T2, "100baseT2" }, \
+ { IFM_10_STP, "10baseSTP" }, \
+ { IFM_10_FL, "10baseFL" }, \
+ { IFM_1000_SX, "1000baseSX" }, \
+ { IFM_1000_LX, "1000baseLX" }, \
+ { IFM_1000_CX, "1000baseCX" }, \
+ { IFM_1000_T, "1000baseT" }, \
+ { IFM_HPNA_1, "homePNA" }, \
+ { IFM_10G_LR, "10Gbase-LR" }, \
+ { IFM_10G_SR, "10Gbase-SR" }, \
+ { IFM_10G_CX4, "10Gbase-CX4" }, \
+ { IFM_2500_SX, "2500BaseSX" }, \
+ { IFM_10G_LRM, "10Gbase-LRM" }, \
+ { IFM_10G_TWINAX, "10Gbase-Twinax" }, \
+ { IFM_10G_TWINAX_LONG, "10Gbase-Twinax-Long" }, \
+ { IFM_UNKNOWN, "Unknown" }, \
+ { IFM_10G_T, "10Gbase-T" }, \
+ { 0, NULL }, \
+}
+
+#define IFM_SUBTYPE_ETHERNET_ALIASES { \
+ { IFM_10_T, "UTP" }, \
+ { IFM_10_T, "10UTP" }, \
+ { IFM_10_2, "BNC" }, \
+ { IFM_10_2, "10BNC" }, \
+ { IFM_10_5, "AUI" }, \
+ { IFM_10_5, "10AUI" }, \
+ { IFM_100_TX, "100TX" }, \
+ { IFM_100_T4, "100T4" }, \
+ { IFM_100_VG, "100VG" }, \
+ { IFM_100_T2, "100T2" }, \
+ { IFM_10_STP, "10STP" }, \
+ { IFM_10_FL, "10FL" }, \
+ { IFM_1000_SX, "1000SX" }, \
+ { IFM_1000_LX, "1000LX" }, \
+ { IFM_1000_CX, "1000CX" }, \
+ { IFM_1000_T, "1000baseTX" }, \
+ { IFM_1000_T, "1000TX" }, \
+ { IFM_1000_T, "1000T" }, \
+ { IFM_2500_SX, "2500SX" }, \
+ { 0, NULL }, \
+}
+
+#define IFM_SUBTYPE_ETHERNET_OPTION_DESCRIPTIONS { \
+ { IFM_ETH_MASTER, "master" }, \
+ { IFM_ETH_RXPAUSE, "rxpause" }, \
+ { IFM_ETH_TXPAUSE, "txpause" }, \
+ { 0, NULL }, \
+}
+
+#define IFM_SUBTYPE_TOKENRING_DESCRIPTIONS { \
+ { IFM_TOK_STP4, "DB9/4Mbit" }, \
+ { IFM_TOK_STP16, "DB9/16Mbit" }, \
+ { IFM_TOK_UTP4, "UTP/4Mbit" }, \
+ { IFM_TOK_UTP16, "UTP/16Mbit" }, \
+ { IFM_TOK_STP100, "STP/100Mbit" }, \
+ { IFM_TOK_UTP100, "UTP/100Mbit" }, \
+ { 0, NULL }, \
+}
+
+#define IFM_SUBTYPE_TOKENRING_ALIASES { \
+ { IFM_TOK_STP4, "4STP" }, \
+ { IFM_TOK_STP16, "16STP" }, \
+ { IFM_TOK_UTP4, "4UTP" }, \
+ { IFM_TOK_UTP16, "16UTP" }, \
+ { IFM_TOK_STP100, "100STP" }, \
+ { IFM_TOK_UTP100, "100UTP" }, \
+ { 0, NULL }, \
+}
+
+#define IFM_SUBTYPE_TOKENRING_OPTION_DESCRIPTIONS { \
+ { IFM_TOK_ETR, "EarlyTokenRelease" }, \
+ { IFM_TOK_SRCRT, "SourceRouting" }, \
+ { IFM_TOK_ALLR, "AllRoutes" }, \
+ { IFM_TOK_DTR, "Dedicated" }, \
+ { IFM_TOK_CLASSIC,"Classic" }, \
+ { IFM_TOK_AUTO, " " }, \
+ { 0, NULL }, \
+}
+
+#define IFM_SUBTYPE_FDDI_DESCRIPTIONS { \
+ { IFM_FDDI_SMF, "Single-mode" }, \
+ { IFM_FDDI_MMF, "Multi-mode" }, \
+ { IFM_FDDI_UTP, "UTP" }, \
+ { 0, NULL }, \
+}
+
+#define IFM_SUBTYPE_FDDI_ALIASES { \
+ { IFM_FDDI_SMF, "SMF" }, \
+ { IFM_FDDI_MMF, "MMF" }, \
+ { IFM_FDDI_UTP, "CDDI" }, \
+ { 0, NULL }, \
+}
+
+#define IFM_SUBTYPE_FDDI_OPTION_DESCRIPTIONS { \
+ { IFM_FDDI_DA, "Dual-attach" }, \
+ { 0, NULL }, \
+}
+
+#define IFM_SUBTYPE_IEEE80211_DESCRIPTIONS { \
+ { IFM_IEEE80211_FH1, "FH/1Mbps" }, \
+ { IFM_IEEE80211_FH2, "FH/2Mbps" }, \
+ { IFM_IEEE80211_DS1, "DS/1Mbps" }, \
+ { IFM_IEEE80211_DS2, "DS/2Mbps" }, \
+ { IFM_IEEE80211_DS5, "DS/5.5Mbps" }, \
+ { IFM_IEEE80211_DS11, "DS/11Mbps" }, \
+ { IFM_IEEE80211_DS22, "DS/22Mbps" }, \
+ { IFM_IEEE80211_OFDM6, "OFDM/6Mbps" }, \
+ { IFM_IEEE80211_OFDM9, "OFDM/9Mbps" }, \
+ { IFM_IEEE80211_OFDM12, "OFDM/12Mbps" }, \
+ { IFM_IEEE80211_OFDM18, "OFDM/18Mbps" }, \
+ { IFM_IEEE80211_OFDM24, "OFDM/24Mbps" }, \
+ { IFM_IEEE80211_OFDM36, "OFDM/36Mbps" }, \
+ { IFM_IEEE80211_OFDM48, "OFDM/48Mbps" }, \
+ { IFM_IEEE80211_OFDM54, "OFDM/54Mbps" }, \
+ { IFM_IEEE80211_OFDM72, "OFDM/72Mbps" }, \
+ { IFM_IEEE80211_DS354k, "DS/354Kbps" }, \
+ { IFM_IEEE80211_DS512k, "DS/512Kbps" }, \
+ { IFM_IEEE80211_OFDM3, "OFDM/3Mbps" }, \
+ { IFM_IEEE80211_OFDM4, "OFDM/4.5Mbps" }, \
+ { IFM_IEEE80211_OFDM27, "OFDM/27Mbps" }, \
+ { 0, NULL }, \
+}
+
+#define IFM_SUBTYPE_IEEE80211_ALIASES { \
+ { IFM_IEEE80211_FH1, "FH1" }, \
+ { IFM_IEEE80211_FH2, "FH2" }, \
+ { IFM_IEEE80211_FH1, "FrequencyHopping/1Mbps" }, \
+ { IFM_IEEE80211_FH2, "FrequencyHopping/2Mbps" }, \
+ { IFM_IEEE80211_DS1, "DS1" }, \
+ { IFM_IEEE80211_DS2, "DS2" }, \
+ { IFM_IEEE80211_DS5, "DS5.5" }, \
+ { IFM_IEEE80211_DS11, "DS11" }, \
+ { IFM_IEEE80211_DS22, "DS22" }, \
+ { IFM_IEEE80211_DS1, "DirectSequence/1Mbps" }, \
+ { IFM_IEEE80211_DS2, "DirectSequence/2Mbps" }, \
+ { IFM_IEEE80211_DS5, "DirectSequence/5.5Mbps" }, \
+ { IFM_IEEE80211_DS11, "DirectSequence/11Mbps" }, \
+ { IFM_IEEE80211_DS22, "DirectSequence/22Mbps" }, \
+ { IFM_IEEE80211_OFDM6, "OFDM6" }, \
+ { IFM_IEEE80211_OFDM9, "OFDM9" }, \
+ { IFM_IEEE80211_OFDM12, "OFDM12" }, \
+ { IFM_IEEE80211_OFDM18, "OFDM18" }, \
+ { IFM_IEEE80211_OFDM24, "OFDM24" }, \
+ { IFM_IEEE80211_OFDM36, "OFDM36" }, \
+ { IFM_IEEE80211_OFDM48, "OFDM48" }, \
+ { IFM_IEEE80211_OFDM54, "OFDM54" }, \
+ { IFM_IEEE80211_OFDM72, "OFDM72" }, \
+ { IFM_IEEE80211_DS1, "CCK1" }, \
+ { IFM_IEEE80211_DS2, "CCK2" }, \
+ { IFM_IEEE80211_DS5, "CCK5.5" }, \
+ { IFM_IEEE80211_DS11, "CCK11" }, \
+ { IFM_IEEE80211_DS354k, "DS354K" }, \
+ { IFM_IEEE80211_DS354k, "DirectSequence/354Kbps" }, \
+ { IFM_IEEE80211_DS512k, "DS512K" }, \
+ { IFM_IEEE80211_DS512k, "DirectSequence/512Kbps" }, \
+ { IFM_IEEE80211_OFDM3, "OFDM3" }, \
+ { IFM_IEEE80211_OFDM4, "OFDM4.5" }, \
+ { IFM_IEEE80211_OFDM27, "OFDM27" }, \
+ { 0, NULL }, \
+}
+
+#define IFM_SUBTYPE_IEEE80211_OPTION_DESCRIPTIONS { \
+ { IFM_IEEE80211_ADHOC, "adhoc" }, \
+ { IFM_IEEE80211_HOSTAP, "hostap" }, \
+ { IFM_IEEE80211_IBSS, "ibss" }, \
+ { IFM_IEEE80211_WDS, "wds" }, \
+ { IFM_IEEE80211_TURBO, "turbo" }, \
+ { IFM_IEEE80211_MONITOR, "monitor" }, \
+ { IFM_IEEE80211_MBSS, "mesh" }, \
+ { 0, NULL }, \
+}
+
+#define IFM_SUBTYPE_IEEE80211_MODE_DESCRIPTIONS { \
+ { IFM_AUTO, "autoselect" }, \
+ { IFM_IEEE80211_11A, "11a" }, \
+ { IFM_IEEE80211_11B, "11b" }, \
+ { IFM_IEEE80211_11G, "11g" }, \
+ { IFM_IEEE80211_FH, "fh" }, \
+ { IFM_IEEE80211_11NA, "11na" }, \
+ { IFM_IEEE80211_11NG, "11ng" }, \
+ { 0, NULL }, \
+}
+
+#define IFM_SUBTYPE_IEEE80211_MODE_ALIASES { \
+ { IFM_AUTO, "auto" }, \
+ { 0, NULL }, \
+}
+
+# define IFM_SUBTYPE_ATM_DESCRIPTIONS { \
+ { IFM_ATM_UNKNOWN, "Unknown" }, \
+ { IFM_ATM_UTP_25, "UTP/25.6MBit" }, \
+ { IFM_ATM_TAXI_100, "Taxi/100MBit" }, \
+ { IFM_ATM_TAXI_140, "Taxi/140MBit" }, \
+ { IFM_ATM_MM_155, "Multi-mode/155MBit" }, \
+ { IFM_ATM_SM_155, "Single-mode/155MBit" }, \
+ { IFM_ATM_UTP_155, "UTP/155MBit" }, \
+ { IFM_ATM_MM_622, "Multi-mode/622MBit" }, \
+ { IFM_ATM_SM_622, "Single-mode/622MBit" }, \
+ { IFM_ATM_VIRTUAL, "Virtual" }, \
+ { 0, NULL }, \
+}
+
+# define IFM_SUBTYPE_ATM_ALIASES { \
+ { IFM_ATM_UNKNOWN, "UNKNOWN" }, \
+ { IFM_ATM_UTP_25, "UTP-25" }, \
+ { IFM_ATM_TAXI_100, "TAXI-100" }, \
+ { IFM_ATM_TAXI_140, "TAXI-140" }, \
+ { IFM_ATM_MM_155, "MM-155" }, \
+ { IFM_ATM_SM_155, "SM-155" }, \
+ { IFM_ATM_UTP_155, "UTP-155" }, \
+ { IFM_ATM_MM_622, "MM-622" }, \
+ { IFM_ATM_SM_622, "SM-622" }, \
+ { IFM_ATM_VIRTUAL, "VIRTUAL" }, \
+ { 0, NULL }, \
+}
+
+#define IFM_SUBTYPE_ATM_OPTION_DESCRIPTIONS { \
+ { IFM_ATM_SDH, "SDH" }, \
+ { IFM_ATM_NOSCRAMB, "Noscramb" }, \
+ { IFM_ATM_UNASSIGNED, "Unassigned" }, \
+ { 0, NULL }, \
+}
+
+
+#define IFM_SUBTYPE_SHARED_DESCRIPTIONS { \
+ { IFM_AUTO, "autoselect" }, \
+ { IFM_MANUAL, "manual" }, \
+ { IFM_NONE, "none" }, \
+ { 0, NULL }, \
+}
+
+#define IFM_SUBTYPE_SHARED_ALIASES { \
+ { IFM_AUTO, "auto" }, \
+ { 0, NULL }, \
+}
+
+#define IFM_SHARED_OPTION_DESCRIPTIONS { \
+ { IFM_FDX, "full-duplex" }, \
+ { IFM_HDX, "half-duplex" }, \
+ { IFM_FLOW, "flowcontrol" }, \
+ { IFM_FLAG0, "flag0" }, \
+ { IFM_FLAG1, "flag1" }, \
+ { IFM_FLAG2, "flag2" }, \
+ { IFM_LOOP, "hw-loopback" }, \
+ { 0, NULL }, \
+}
+
+/*
+ * Baudrate descriptions for the various media types.
+ */
+struct ifmedia_baudrate {
+ int ifmb_word; /* media word */
+ uint64_t ifmb_baudrate; /* corresponding baudrate */
+};
+
+#define IFM_BAUDRATE_DESCRIPTIONS { \
+ { IFM_ETHER | IFM_10_T, IF_Mbps(10) }, \
+ { IFM_ETHER | IFM_10_2, IF_Mbps(10) }, \
+ { IFM_ETHER | IFM_10_5, IF_Mbps(10) }, \
+ { IFM_ETHER | IFM_100_TX, IF_Mbps(100) }, \
+ { IFM_ETHER | IFM_100_FX, IF_Mbps(100) }, \
+ { IFM_ETHER | IFM_100_T4, IF_Mbps(100) }, \
+ { IFM_ETHER | IFM_100_VG, IF_Mbps(100) }, \
+ { IFM_ETHER | IFM_100_T2, IF_Mbps(100) }, \
+ { IFM_ETHER | IFM_1000_SX, IF_Mbps(1000) }, \
+ { IFM_ETHER | IFM_10_STP, IF_Mbps(10) }, \
+ { IFM_ETHER | IFM_10_FL, IF_Mbps(10) }, \
+ { IFM_ETHER | IFM_1000_LX, IF_Mbps(1000) }, \
+ { IFM_ETHER | IFM_1000_CX, IF_Mbps(1000) }, \
+ { IFM_ETHER | IFM_1000_T, IF_Mbps(1000) }, \
+ { IFM_ETHER | IFM_HPNA_1, IF_Mbps(1) }, \
+ { IFM_ETHER | IFM_10G_LR, IF_Gbps(10ULL) }, \
+ { IFM_ETHER | IFM_10G_SR, IF_Gbps(10ULL) }, \
+ { IFM_ETHER | IFM_10G_CX4, IF_Gbps(10ULL) }, \
+ { IFM_ETHER | IFM_2500_SX, IF_Mbps(2500ULL) }, \
+ { IFM_ETHER | IFM_10G_TWINAX, IF_Gbps(10ULL) }, \
+ { IFM_ETHER | IFM_10G_TWINAX_LONG, IF_Gbps(10ULL) }, \
+ { IFM_ETHER | IFM_10G_LRM, IF_Gbps(10ULL) }, \
+ { IFM_ETHER | IFM_10G_T, IF_Gbps(10ULL) }, \
+ \
+ { IFM_TOKEN | IFM_TOK_STP4, IF_Mbps(4) }, \
+ { IFM_TOKEN | IFM_TOK_STP16, IF_Mbps(16) }, \
+ { IFM_TOKEN | IFM_TOK_UTP4, IF_Mbps(4) }, \
+ { IFM_TOKEN | IFM_TOK_UTP16, IF_Mbps(16) }, \
+ \
+ { IFM_FDDI | IFM_FDDI_SMF, IF_Mbps(100) }, \
+ { IFM_FDDI | IFM_FDDI_MMF, IF_Mbps(100) }, \
+ { IFM_FDDI | IFM_FDDI_UTP, IF_Mbps(100) }, \
+ \
+ { IFM_IEEE80211 | IFM_IEEE80211_FH1, IF_Mbps(1) }, \
+ { IFM_IEEE80211 | IFM_IEEE80211_FH2, IF_Mbps(2) }, \
+ { IFM_IEEE80211 | IFM_IEEE80211_DS2, IF_Mbps(2) }, \
+ { IFM_IEEE80211 | IFM_IEEE80211_DS5, IF_Kbps(5500) }, \
+ { IFM_IEEE80211 | IFM_IEEE80211_DS11, IF_Mbps(11) }, \
+ { IFM_IEEE80211 | IFM_IEEE80211_DS1, IF_Mbps(1) }, \
+ { IFM_IEEE80211 | IFM_IEEE80211_DS22, IF_Mbps(22) }, \
+ { IFM_IEEE80211 | IFM_IEEE80211_OFDM6, IF_Mbps(6) }, \
+ { IFM_IEEE80211 | IFM_IEEE80211_OFDM9, IF_Mbps(9) }, \
+ { IFM_IEEE80211 | IFM_IEEE80211_OFDM12, IF_Mbps(12) }, \
+ { IFM_IEEE80211 | IFM_IEEE80211_OFDM18, IF_Mbps(18) }, \
+ { IFM_IEEE80211 | IFM_IEEE80211_OFDM24, IF_Mbps(24) }, \
+ { IFM_IEEE80211 | IFM_IEEE80211_OFDM36, IF_Mbps(36) }, \
+ { IFM_IEEE80211 | IFM_IEEE80211_OFDM48, IF_Mbps(48) }, \
+ { IFM_IEEE80211 | IFM_IEEE80211_OFDM54, IF_Mbps(54) }, \
+ { IFM_IEEE80211 | IFM_IEEE80211_OFDM72, IF_Mbps(72) }, \
+ \
+ { 0, 0 }, \
+}
+
+/*
+ * Status descriptions for the various media types.
+ */
+struct ifmedia_status_description {
+ int ifms_type;
+ int ifms_valid;
+ int ifms_bit;
+ const char *ifms_string[2];
+};
+
+#define IFM_STATUS_DESC(ifms, bit) \
+ (ifms)->ifms_string[((ifms)->ifms_bit & (bit)) ? 1 : 0]
+
+#define IFM_STATUS_DESCRIPTIONS { \
+ { IFM_ETHER, IFM_AVALID, IFM_ACTIVE, \
+ { "no carrier", "active" } }, \
+ { IFM_FDDI, IFM_AVALID, IFM_ACTIVE, \
+ { "no ring", "inserted" } }, \
+ { IFM_TOKEN, IFM_AVALID, IFM_ACTIVE, \
+ { "no ring", "inserted" } }, \
+ { IFM_IEEE80211, IFM_AVALID, IFM_ACTIVE, \
+ { "no network", "active" } }, \
+ { IFM_ATM, IFM_AVALID, IFM_ACTIVE, \
+ { "no network", "active" } }, \
+ { IFM_CARP, IFM_AVALID, IFM_ACTIVE, \
+ { "backup", "master" } }, \
+ { 0, 0, 0, \
+ { NULL, NULL } } \
+}
+#endif /* _NET_IF_MEDIA_HH_ */
diff --git a/rtems/freebsd/net/if_mib.c b/rtems/freebsd/net/if_mib.c
new file mode 100644
index 00000000..50cc725d
--- /dev/null
+++ b/rtems/freebsd/net/if_mib.c
@@ -0,0 +1,171 @@
+#include <rtems/freebsd/machine/rtems-bsd-config.h>
+
+/*-
+ * Copyright 1996 Massachusetts Institute of Technology
+ *
+ * Permission to use, copy, modify, and distribute this software and
+ * its documentation for any purpose and without fee is hereby
+ * granted, provided that both the above copyright notice and this
+ * permission notice appear in all copies, that both the above
+ * copyright notice and this permission notice appear in all
+ * supporting documentation, and that the name of M.I.T. not be used
+ * in advertising or publicity pertaining to distribution of the
+ * software without specific, written prior permission. M.I.T. makes
+ * no representations about the suitability of this software for any
+ * purpose. It is provided "as is" without express or implied
+ * warranty.
+ *
+ * THIS SOFTWARE IS PROVIDED BY M.I.T. ``AS IS''. M.I.T. DISCLAIMS
+ * ALL EXPRESS OR IMPLIED WARRANTIES WITH REGARD TO THIS SOFTWARE,
+ * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. IN NO EVENT
+ * SHALL M.I.T. BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
+ * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+ * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
+ * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#include <rtems/freebsd/sys/param.h>
+#include <rtems/freebsd/sys/systm.h>
+#include <rtems/freebsd/sys/kernel.h>
+#include <rtems/freebsd/sys/socket.h>
+#include <rtems/freebsd/sys/sysctl.h>
+
+#include <rtems/freebsd/net/if.h>
+#include <rtems/freebsd/net/if_mib.h>
+#include <rtems/freebsd/net/vnet.h>
+
+/*
+ * A sysctl(3) MIB for generic interface information. This information
+ * is exported in the net.link.generic branch, which has the following
+ * structure:
+ *
+ * net.link.generic .system - system-wide control variables
+ * and statistics (node)
+ * .ifdata.<ifindex>.general
+ * - what's in `struct ifdata'
+ * plus some other info
+ * .ifdata.<ifindex>.linkspecific
+ * - a link-type-specific data
+ * structure (as might be used
+ * by an SNMP agent
+ *
+ * Perhaps someday we will make addresses accessible via this interface
+ * as well (then there will be four such...). The reason that the
+ * index comes before the last element in the name is because it
+ * seems more orthogonal that way, particularly with the possibility
+ * of other per-interface data living down here as well (e.g., integrated
+ * services stuff).
+ */
+
+SYSCTL_DECL(_net_link_generic);
+SYSCTL_NODE(_net_link_generic, IFMIB_SYSTEM, system, CTLFLAG_RW, 0,
+ "Variables global to all interfaces");
+
+SYSCTL_VNET_INT(_net_link_generic_system, IFMIB_IFCOUNT, ifcount, CTLFLAG_RD,
+ &VNET_NAME(if_index), 0,
+ "Number of configured interfaces");
+
+static int
+sysctl_ifdata(SYSCTL_HANDLER_ARGS) /* XXX bad syntax! */
+{
+ int *name = (int *)arg1;
+ int error;
+ u_int namelen = arg2;
+ struct ifnet *ifp;
+ struct ifmibdata ifmd;
+ size_t dlen;
+ char *dbuf;
+
+ if (namelen != 2)
+ return EINVAL;
+ if (name[0] <= 0)
+ return (ENOENT);
+ ifp = ifnet_byindex_ref(name[0]);
+ if (ifp == NULL)
+ return (ENOENT);
+
+ switch(name[1]) {
+ default:
+ error = ENOENT;
+ goto out;
+
+ case IFDATA_GENERAL:
+ bzero(&ifmd, sizeof(ifmd));
+ strlcpy(ifmd.ifmd_name, ifp->if_xname, sizeof(ifmd.ifmd_name));
+
+#define COPY(fld) ifmd.ifmd_##fld = ifp->if_##fld
+ COPY(pcount);
+ COPY(data);
+#undef COPY
+ ifmd.ifmd_flags = ifp->if_flags | ifp->if_drv_flags;
+ ifmd.ifmd_snd_len = ifp->if_snd.ifq_len;
+ ifmd.ifmd_snd_maxlen = ifp->if_snd.ifq_maxlen;
+ ifmd.ifmd_snd_drops = ifp->if_snd.ifq_drops;
+
+ error = SYSCTL_OUT(req, &ifmd, sizeof ifmd);
+ if (error || !req->newptr)
+ goto out;
+
+ error = SYSCTL_IN(req, &ifmd, sizeof ifmd);
+ if (error)
+ goto out;
+
+#define DONTCOPY(fld) ifmd.ifmd_data.ifi_##fld = ifp->if_data.ifi_##fld
+ DONTCOPY(type);
+ DONTCOPY(physical);
+ DONTCOPY(addrlen);
+ DONTCOPY(hdrlen);
+ DONTCOPY(mtu);
+ DONTCOPY(metric);
+ DONTCOPY(baudrate);
+#undef DONTCOPY
+#define COPY(fld) ifp->if_##fld = ifmd.ifmd_##fld
+ COPY(data);
+ ifp->if_snd.ifq_maxlen = ifmd.ifmd_snd_maxlen;
+ ifp->if_snd.ifq_drops = ifmd.ifmd_snd_drops;
+#undef COPY
+ break;
+
+ case IFDATA_LINKSPECIFIC:
+ error = SYSCTL_OUT(req, ifp->if_linkmib, ifp->if_linkmiblen);
+ if (error || !req->newptr)
+ goto out;
+
+ error = SYSCTL_IN(req, ifp->if_linkmib, ifp->if_linkmiblen);
+ if (error)
+ goto out;
+ break;
+
+ case IFDATA_DRIVERNAME:
+ /* 20 is enough for 64bit ints */
+ dlen = strlen(ifp->if_dname) + 20 + 1;
+ if ((dbuf = malloc(dlen, M_TEMP, M_NOWAIT)) == NULL) {
+ error = ENOMEM;
+ goto out;
+ }
+ if (ifp->if_dunit == IF_DUNIT_NONE)
+ strcpy(dbuf, ifp->if_dname);
+ else
+ sprintf(dbuf, "%s%d", ifp->if_dname, ifp->if_dunit);
+
+ error = SYSCTL_OUT(req, dbuf, strlen(dbuf) + 1);
+ if (error == 0 && req->newptr != NULL)
+ error = EPERM;
+ free(dbuf, M_TEMP);
+ goto out;
+ }
+out:
+ if_rele(ifp);
+ return error;
+}
+
+SYSCTL_NODE(_net_link_generic, IFMIB_IFDATA, ifdata, CTLFLAG_RW,
+ sysctl_ifdata, "Interface table");
+
diff --git a/rtems/freebsd/net/if_mib.h b/rtems/freebsd/net/if_mib.h
new file mode 100644
index 00000000..e2b80c87
--- /dev/null
+++ b/rtems/freebsd/net/if_mib.h
@@ -0,0 +1,171 @@
+/*-
+ * Copyright 1996 Massachusetts Institute of Technology
+ *
+ * Permission to use, copy, modify, and distribute this software and
+ * its documentation for any purpose and without fee is hereby
+ * granted, provided that both the above copyright notice and this
+ * permission notice appear in all copies, that both the above
+ * copyright notice and this permission notice appear in all
+ * supporting documentation, and that the name of M.I.T. not be used
+ * in advertising or publicity pertaining to distribution of the
+ * software without specific, written prior permission. M.I.T. makes
+ * no representations about the suitability of this software for any
+ * purpose. It is provided "as is" without express or implied
+ * warranty.
+ *
+ * THIS SOFTWARE IS PROVIDED BY M.I.T. ``AS IS''. M.I.T. DISCLAIMS
+ * ALL EXPRESS OR IMPLIED WARRANTIES WITH REGARD TO THIS SOFTWARE,
+ * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. IN NO EVENT
+ * SHALL M.I.T. BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
+ * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+ * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
+ * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _NET_IF_MIB_H
+#define _NET_IF_MIB_H 1
+
+struct ifmibdata {
+ char ifmd_name[IFNAMSIZ]; /* name of interface */
+ int ifmd_pcount; /* number of promiscuous listeners */
+ int ifmd_flags; /* interface flags */
+ int ifmd_snd_len; /* instantaneous length of send queue */
+ int ifmd_snd_maxlen; /* maximum length of send queue */
+ int ifmd_snd_drops; /* number of drops in send queue */
+ int ifmd_filler[4]; /* for future expansion */
+ struct if_data ifmd_data; /* generic information and statistics */
+};
+
+/*
+ * sysctl MIB tags at the net.link.generic level
+ */
+#define IFMIB_SYSTEM 1 /* non-interface-specific */
+#define IFMIB_IFDATA 2 /* per-interface data table */
+
+/*
+ * MIB tags for the various net.link.generic.ifdata tables
+ */
+#define IFDATA_GENERAL 1 /* generic stats for all kinds of ifaces */
+#define IFDATA_LINKSPECIFIC 2 /* specific to the type of interface */
+#define IFDATA_DRIVERNAME 3 /* driver name and unit */
+
+/*
+ * MIB tags at the net.link.generic.system level
+ */
+#define IFMIB_IFCOUNT 1 /* number of interfaces configured */
+
+/*
+ * MIB tags as the net.link level
+ * All of the other values are IFT_* names defined in if_types.h.
+ */
+#define NETLINK_GENERIC 0 /* functions not specific to a type of iface */
+
+/*
+ * The reason why the IFDATA_LINKSPECIFIC stuff is not under the
+ * net.link.<iftype> branches is twofold:
+ * 1) It's easier to code this way, and doesn't require duplication.
+ * 2) The fourth level under net.link.<iftype> is <pf>; that is to say,
+ * the net.link.<iftype> tree instruments the adaptation layers between
+ * <iftype> and a particular protocol family (e.g., net.link.ether.inet
+ * instruments ARP). This does not really leave room for anything else
+ * that needs to have a well-known number.
+ */
+
+/*
+ * Link-specific MIB structures for various link types.
+ */
+
+/* For IFT_ETHER, IFT_ISO88023, and IFT_STARLAN, as used by RFC 1650 */
+struct ifmib_iso_8802_3 {
+ u_int32_t dot3StatsAlignmentErrors;
+ u_int32_t dot3StatsFCSErrors;
+ u_int32_t dot3StatsSingleCollisionFrames;
+ u_int32_t dot3StatsMultipleCollisionFrames;
+ u_int32_t dot3StatsSQETestErrors;
+ u_int32_t dot3StatsDeferredTransmissions;
+ u_int32_t dot3StatsLateCollisions;
+ u_int32_t dot3StatsExcessiveCollisions;
+ u_int32_t dot3StatsInternalMacTransmitErrors;
+ u_int32_t dot3StatsCarrierSenseErrors;
+ u_int32_t dot3StatsFrameTooLongs;
+ u_int32_t dot3StatsInternalMacReceiveErrors;
+ u_int32_t dot3StatsEtherChipSet;
+ /* Matt Thomas wants this one, not included in RFC 1650: */
+ u_int32_t dot3StatsMissedFrames;
+
+ u_int32_t dot3StatsCollFrequencies[16]; /* NB: index origin */
+
+ u_int32_t dot3Compliance;
+#define DOT3COMPLIANCE_STATS 1
+#define DOT3COMPLIANCE_COLLS 2
+};
+
+/*
+ * Chipset identifiers are normally part of the vendor's enterprise MIB.
+ * However, we don't want to be trying to represent arbitrary-length
+ * OBJECT IDENTIFIERs here (ick!), and the right value is not necessarily
+ * obvious to the driver implementor. So, we define our own identification
+ * mechanism here, and let the agent writer deal with the translation.
+ */
+#define DOT3CHIPSET_VENDOR(x) ((x) >> 16)
+#define DOT3CHIPSET_PART(x) ((x) & 0xffff)
+#define DOT3CHIPSET(v,p) (((v) << 16) + ((p) & 0xffff))
+
+/* Driver writers! Add your vendors here! */
+enum dot3Vendors {
+ dot3VendorAMD = 1,
+ dot3VendorIntel = 2,
+ dot3VendorNational = 4,
+ dot3VendorFujitsu = 5,
+ dot3VendorDigital = 6,
+ dot3VendorWesternDigital = 7
+};
+
+/* Driver writers! Add your chipsets here! */
+enum {
+ dot3ChipSetAMD7990 = 1,
+ dot3ChipSetAMD79900 = 2,
+ dot3ChipSetAMD79C940 = 3
+};
+
+enum {
+ dot3ChipSetIntel82586 = 1,
+ dot3ChipSetIntel82596 = 2,
+ dot3ChipSetIntel82557 = 3
+};
+
+enum {
+ dot3ChipSetNational8390 = 1,
+ dot3ChipSetNationalSonic = 2
+};
+
+enum {
+ dot3ChipSetFujitsu86950 = 1
+};
+
+enum {
+ dot3ChipSetDigitalDC21040 = 1,
+ dot3ChipSetDigitalDC21140 = 2,
+ dot3ChipSetDigitalDC21041 = 3,
+ dot3ChipSetDigitalDC21140A = 4,
+ dot3ChipSetDigitalDC21142 = 5
+};
+
+enum {
+ dot3ChipSetWesternDigital83C690 = 1,
+ dot3ChipSetWesternDigital83C790 = 2
+};
+/* END of Ethernet-link MIB stuff */
+
+/*
+ * Put other types of interface MIBs here, or in interface-specific
+ * header files if convenient ones already exist.
+ */
+#endif /* _NET_IF_MIB_H */
diff --git a/rtems/freebsd/net/if_sppp.h b/rtems/freebsd/net/if_sppp.h
new file mode 100644
index 00000000..ed406b55
--- /dev/null
+++ b/rtems/freebsd/net/if_sppp.h
@@ -0,0 +1,234 @@
+/*
+ * Defines for synchronous PPP/Cisco/Frame Relay link level subroutines.
+ */
+/*-
+ * Copyright (C) 1994-2000 Cronyx Engineering.
+ * Author: Serge Vakulenko, <vak@cronyx.ru>
+ *
+ * Heavily revamped to conform to RFC 1661.
+ * Copyright (C) 1997, Joerg Wunsch.
+ *
+ * This software is distributed with NO WARRANTIES, not even the implied
+ * warranties for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * Authors grant any other persons or organizations permission to use
+ * or modify this software as long as this message is kept with the software,
+ * all derivative works or modified versions.
+ *
+ * From: Version 2.0, Fri Oct 6 20:39:21 MSK 1995
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _NET_IF_SPPP_HH_
+#define _NET_IF_SPPP_HH_ 1
+
+#define IDX_LCP 0 /* idx into state table */
+
+struct slcp {
+ u_long opts; /* LCP options to send (bitfield) */
+ u_long magic; /* local magic number */
+ u_long mru; /* our max receive unit */
+ u_long their_mru; /* their max receive unit */
+ u_long protos; /* bitmask of protos that are started */
+ u_char echoid; /* id of last keepalive echo request */
+ /* restart max values, see RFC 1661 */
+ int timeout;
+ int max_terminate;
+ int max_configure;
+ int max_failure;
+};
+
+#define IDX_IPCP 1 /* idx into state table */
+#define IDX_IPV6CP 2 /* idx into state table */
+
+struct sipcp {
+ u_long opts; /* IPCP options to send (bitfield) */
+ u_int flags;
+#define IPCP_HISADDR_SEEN 1 /* have seen his address already */
+#define IPCP_MYADDR_DYN 2 /* my address is dynamically assigned */
+#define IPCP_MYADDR_SEEN 4 /* have seen his address already */
+#ifdef notdef
+#define IPV6CP_MYIFID_DYN 8 /* my ifid is dynamically assigned */
+#endif
+#define IPV6CP_MYIFID_SEEN 0x10 /* have seen his ifid already */
+#define IPCP_VJ 0x20 /* can use VJ compression */
+ int max_state; /* VJ: Max-Slot-Id */
+ int compress_cid; /* VJ: Comp-Slot-Id */
+};
+
+#define AUTHNAMELEN 64
+#define AUTHKEYLEN 16
+
+struct sauth {
+ u_short proto; /* authentication protocol to use */
+ u_short flags;
+#define AUTHFLAG_NOCALLOUT 1 /* do not require authentication on */
+ /* callouts */
+#define AUTHFLAG_NORECHALLENGE 2 /* do not re-challenge CHAP */
+ u_char name[AUTHNAMELEN]; /* system identification name */
+ u_char secret[AUTHKEYLEN]; /* secret password */
+ u_char challenge[AUTHKEYLEN]; /* random challenge */
+};
+
+#define IDX_PAP 3
+#define IDX_CHAP 4
+
+#define IDX_COUNT (IDX_CHAP + 1) /* bump this when adding cp's! */
+
+/*
+ * Don't change the order of this. Ordering the phases this way allows
+ * for a comparision of ``pp_phase >= PHASE_AUTHENTICATE'' in order to
+ * know whether LCP is up.
+ */
+enum ppp_phase {
+ PHASE_DEAD, PHASE_ESTABLISH, PHASE_TERMINATE,
+ PHASE_AUTHENTICATE, PHASE_NETWORK
+};
+
+#define PP_MTU 1500 /* default/minimal MRU */
+#define PP_MAX_MRU 2048 /* maximal MRU we want to negotiate */
+
+/*
+ * This is a cut down struct sppp (see below) that can easily be
+ * exported to/ imported from userland without the need to include
+ * dozens of kernel-internal header files. It is used by the
+ * SPPPIO[GS]DEFS ioctl commands below.
+ */
+struct sppp_parms {
+ enum ppp_phase pp_phase; /* phase we're currently in */
+ int enable_vj; /* VJ header compression enabled */
+ int enable_ipv6; /*
+ * Enable IPv6 negotiations -- only
+ * needed since each IPv4 i/f auto-
+ * matically gets an IPv6 address
+ * assigned, so we can't use this as
+ * a decision.
+ */
+ struct slcp lcp; /* LCP params */
+ struct sipcp ipcp; /* IPCP params */
+ struct sipcp ipv6cp; /* IPv6CP params */
+ struct sauth myauth; /* auth params, i'm peer */
+ struct sauth hisauth; /* auth params, i'm authenticator */
+};
+
+/*
+ * Definitions to pass struct sppp_parms data down into the kernel
+ * using the SIOC[SG]IFGENERIC ioctl interface.
+ *
+ * In order to use this, create a struct spppreq, fill in the cmd
+ * field with SPPPIOGDEFS, and put the address of this structure into
+ * the ifr_data portion of a struct ifreq. Pass this struct to a
+ * SIOCGIFGENERIC ioctl. Then replace the cmd field by SPPPIOSDEFS,
+ * modify the defs field as desired, and pass the struct ifreq now
+ * to a SIOCSIFGENERIC ioctl.
+ */
+
+#define SPPPIOGDEFS ((caddr_t)(('S' << 24) + (1 << 16) +\
+ sizeof(struct sppp_parms)))
+#define SPPPIOSDEFS ((caddr_t)(('S' << 24) + (2 << 16) +\
+ sizeof(struct sppp_parms)))
+
+struct spppreq {
+ int cmd;
+ struct sppp_parms defs;
+};
+
+#ifdef _KERNEL
+struct sppp {
+ struct ifnet *pp_ifp; /* network interface data */
+ struct ifqueue pp_fastq; /* fast output queue */
+ struct ifqueue pp_cpq; /* PPP control protocol queue */
+ struct sppp *pp_next; /* next interface in keepalive list */
+ u_int pp_mode; /* major protocol modes (cisco/ppp/...) */
+ u_int pp_flags; /* sub modes */
+ u_short pp_alivecnt; /* keepalive packets counter */
+ u_short pp_loopcnt; /* loopback detection counter */
+ u_long pp_seq[IDX_COUNT]; /* local sequence number */
+ u_long pp_rseq[IDX_COUNT]; /* remote sequence number */
+ enum ppp_phase pp_phase; /* phase we're currently in */
+ int state[IDX_COUNT]; /* state machine */
+ u_char confid[IDX_COUNT]; /* id of last configuration request */
+ int rst_counter[IDX_COUNT]; /* restart counter */
+ int fail_counter[IDX_COUNT]; /* negotiation failure counter */
+ int confflags; /* administrative configuration flags */
+#define CONF_ENABLE_VJ 0x01 /* VJ header compression enabled */
+#define CONF_ENABLE_IPV6 0x02 /* IPv6 administratively enabled */
+ time_t pp_last_recv; /* time last packet has been received */
+ time_t pp_last_sent; /* time last packet has been sent */
+ struct callout ch[IDX_COUNT]; /* per-proto and if callouts */
+ struct callout pap_my_to_ch; /* PAP needs one more... */
+ struct callout keepalive_callout; /* keepalive callout */
+ struct slcp lcp; /* LCP params */
+ struct sipcp ipcp; /* IPCP params */
+ struct sipcp ipv6cp; /* IPv6CP params */
+ struct sauth myauth; /* auth params, i'm peer */
+ struct sauth hisauth; /* auth params, i'm authenticator */
+ struct slcompress *pp_comp; /* for VJ compression */
+ u_short fr_dlci; /* Frame Relay DLCI number, 16..1023 */
+ u_char fr_status; /* PVC status, active/new/delete */
+ /*
+ * These functions are filled in by sppp_attach(), and are
+ * expected to be used by the lower layer (hardware) drivers
+ * in order to communicate the (un)availability of the
+ * communication link. Lower layer drivers that are always
+ * ready to communicate (like hardware HDLC) can shortcut
+ * pp_up from pp_tls, and pp_down from pp_tlf.
+ */
+ void (*pp_up)(struct sppp *sp);
+ void (*pp_down)(struct sppp *sp);
+ /*
+ * These functions need to be filled in by the lower layer
+ * (hardware) drivers if they request notification from the
+ * PPP layer whether the link is actually required. They
+ * correspond to the tls and tlf actions.
+ */
+ void (*pp_tls)(struct sppp *sp);
+ void (*pp_tlf)(struct sppp *sp);
+ /*
+ * These (optional) functions may be filled by the hardware
+ * driver if any notification of established connections
+ * (currently: IPCP up) is desired (pp_con) or any internal
+ * state change of the interface state machine should be
+ * signaled for monitoring purposes (pp_chg).
+ */
+ void (*pp_con)(struct sppp *sp);
+ void (*pp_chg)(struct sppp *sp, int new_state);
+ /* These two fields are for use by the lower layer */
+ void *pp_lowerp;
+ int pp_loweri;
+ /* Lock */
+ struct mtx mtx;
+ /* if_start () wrapper */
+ void (*if_start) (struct ifnet *);
+ struct callout ifstart_callout; /* if_start () scheduler */
+};
+#define IFP2SP(ifp) ((struct sppp *)(ifp)->if_l2com)
+#define SP2IFP(sp) ((sp)->pp_ifp)
+
+/* bits for pp_flags */
+#define PP_KEEPALIVE 0x01 /* use keepalive protocol */
+#define PP_FR 0x04 /* use Frame Relay protocol instead of PPP */
+ /* 0x04 was PP_TIMO */
+#define PP_CALLIN 0x08 /* we are being called */
+#define PP_NEEDAUTH 0x10 /* remote requested authentication */
+
+void sppp_attach (struct ifnet *ifp);
+void sppp_detach (struct ifnet *ifp);
+void sppp_input (struct ifnet *ifp, struct mbuf *m);
+int sppp_ioctl (struct ifnet *ifp, u_long cmd, void *data);
+struct mbuf *sppp_dequeue (struct ifnet *ifp);
+struct mbuf *sppp_pick(struct ifnet *ifp);
+int sppp_isempty (struct ifnet *ifp);
+void sppp_flush (struct ifnet *ifp);
+
+/* Internal functions */
+void sppp_fr_input (struct sppp *sp, struct mbuf *m);
+struct mbuf *sppp_fr_header (struct sppp *sp, struct mbuf *m, int fam);
+void sppp_fr_keepalive (struct sppp *sp);
+void sppp_get_ip_addrs(struct sppp *sp, u_long *src, u_long *dst,
+ u_long *srcmask);
+
+#endif
+
+#endif /* _NET_IF_SPPP_HH_ */
diff --git a/rtems/freebsd/net/if_spppfr.c b/rtems/freebsd/net/if_spppfr.c
new file mode 100644
index 00000000..b6673013
--- /dev/null
+++ b/rtems/freebsd/net/if_spppfr.c
@@ -0,0 +1,636 @@
+#include <rtems/freebsd/machine/rtems-bsd-config.h>
+
+/*-
+ * Synchronous Frame Relay link level subroutines.
+ * ANSI T1.617-compaible link management signaling
+ * implemented for Frame Relay mode.
+ * Cisco-type Frame Relay framing added, thanks Alex Tutubalin.
+ * Only one DLCI per channel for now.
+ *
+ * Copyright (C) 1994-2000 Cronyx Engineering.
+ * Author: Serge Vakulenko, <vak@cronyx.ru>
+ *
+ * Copyright (C) 1999-2004 Cronyx Engineering.
+ * Author: Kurakin Roman, <rik@cronyx.ru>
+ *
+ * This software is distributed with NO WARRANTIES, not even the implied
+ * warranties for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * Authors grant any other persons or organisations a permission to use,
+ * modify and redistribute this software in source and binary forms,
+ * as long as this message is kept with the software, all derivative
+ * works or modified versions.
+ *
+ * $Cronyx Id: if_spppfr.c,v 1.1.2.10 2004/06/29 09:02:30 rik Exp $
+ * $FreeBSD$
+ */
+
+#include <rtems/freebsd/sys/param.h>
+
+#if defined(__FreeBSD__) && __FreeBSD__ >= 3
+#include <rtems/freebsd/local/opt_inet.h>
+#include <rtems/freebsd/local/opt_inet6.h>
+#include <rtems/freebsd/local/opt_ipx.h>
+#endif
+
+#ifdef NetBSD1_3
+# if NetBSD1_3 > 6
+# include "opt_inet.h"
+# include "opt_inet6.h"
+# include "opt_iso.h"
+# endif
+#endif
+
+#include <rtems/freebsd/sys/systm.h>
+#include <rtems/freebsd/sys/kernel.h>
+#include <rtems/freebsd/sys/module.h>
+#include <rtems/freebsd/sys/sockio.h>
+#include <rtems/freebsd/sys/socket.h>
+#include <rtems/freebsd/sys/syslog.h>
+#if defined(__FreeBSD__) && __FreeBSD__ >= 3
+#include <rtems/freebsd/sys/random.h>
+#endif
+#include <rtems/freebsd/sys/malloc.h>
+#include <rtems/freebsd/sys/mbuf.h>
+
+#if defined (__OpenBSD__)
+#include <rtems/freebsd/sys/md5k.h>
+#else
+#include <rtems/freebsd/sys/md5.h>
+#endif
+
+#include <rtems/freebsd/net/if.h>
+#include <rtems/freebsd/net/netisr.h>
+#include <rtems/freebsd/net/if_types.h>
+#include <rtems/freebsd/net/route.h>
+#include <rtems/freebsd/netinet/in.h>
+#include <rtems/freebsd/netinet/in_systm.h>
+#include <rtems/freebsd/netinet/ip.h>
+#include <rtems/freebsd/net/slcompress.h>
+
+#if defined (__NetBSD__) || defined (__OpenBSD__)
+#include <rtems/freebsd/machine/cpu.h> /* XXX for softnet */
+#endif
+
+#include <rtems/freebsd/machine/stdarg.h>
+
+#include <rtems/freebsd/netinet/in_var.h>
+#ifdef INET
+#include <rtems/freebsd/netinet/ip.h>
+#include <rtems/freebsd/netinet/tcp.h>
+#endif
+
+#if defined (__FreeBSD__) || defined (__OpenBSD__)
+# include <rtems/freebsd/netinet/if_ether.h>
+#else
+# include <rtems/freebsd/net/ethertypes.h>
+#endif
+
+#ifdef IPX
+#include <rtems/freebsd/netipx/ipx.h>
+#include <rtems/freebsd/netipx/ipx_if.h>
+#endif
+
+#include <rtems/freebsd/net/if_sppp.h>
+
+/*
+ * Frame Relay.
+ */
+#define FR_UI 0x03 /* Unnumbered Information */
+#define FR_IP 0xCC /* IP protocol identifier */
+#define FR_PADDING 0x00 /* NLPID padding */
+#define FR_SIGNALING 0x08 /* Q.933/T1.617 signaling identifier */
+#define FR_SNAP 0x80 /* NLPID snap */
+
+/*
+ * Header flags.
+ */
+#define FR_DE 0x02 /* discard eligibility */
+#define FR_FECN 0x04 /* forward notification */
+#define FR_BECN 0x08 /* backward notification */
+
+/*
+ * Signaling message types.
+ */
+#define FR_MSG_ENQUIRY 0x75 /* status enquiry */
+#define FR_MSG_STATUS 0x7d /* status */
+
+#define FR_ENQUIRY_SIZE 14
+
+/*
+ * Message field types.
+ */
+#define FR_FLD_RTYPE 0x01 /* report type */
+#define FR_FLD_VERIFY 0x03 /* link verification */
+#define FR_FLD_PVC 0x07 /* PVC status */
+#define FR_FLD_LSHIFT5 0x95 /* locking shift 5 */
+
+/*
+ * Report types.
+ */
+#define FR_RTYPE_FULL 0 /* full status */
+#define FR_RTYPE_SHORT 1 /* link verification only */
+#define FR_RTYPE_SINGLE 2 /* single PVC status */
+
+/* PVC status field. */
+#define FR_DLCI_DELETE 0x04 /* PVC is deleted */
+#define FR_DLCI_ACTIVE 0x02 /* PVC is operational */
+#define FR_DLCI_NEW 0x08 /* PVC is new */
+
+struct arp_req {
+ unsigned short htype; /* hardware type = ARPHRD_FRELAY */
+ unsigned short ptype; /* protocol type = ETHERTYPE_IP */
+ unsigned char halen; /* hardware address length = 2 */
+ unsigned char palen; /* protocol address length = 4 */
+ unsigned short op; /* ARP/RARP/InARP request/reply */
+ unsigned short hsource; /* hardware source address */
+ unsigned short psource1; /* protocol source */
+ unsigned short psource2;
+ unsigned short htarget; /* hardware target address */
+ unsigned short ptarget1; /* protocol target */
+ unsigned short ptarget2;
+} __packed;
+
+#if defined(__FreeBSD__) && __FreeBSD__ >= 3 && __FreeBSD_version < 501113
+#define SPP_FMT "%s%d: "
+#define SPP_ARGS(ifp) (ifp)->if_name, (ifp)->if_unit
+#else
+#define SPP_FMT "%s: "
+#define SPP_ARGS(ifp) (ifp)->if_xname
+#endif
+
+/* almost every function needs these */
+#define STDDCL \
+ struct ifnet *ifp = SP2IFP(sp); \
+ int debug = ifp->if_flags & IFF_DEBUG
+
+static void sppp_fr_arp (struct sppp *sp, struct arp_req *req, u_short addr);
+static void sppp_fr_signal (struct sppp *sp, unsigned char *h, int len);
+
+void sppp_fr_input (struct sppp *sp, struct mbuf *m)
+{
+ STDDCL;
+ u_char *h = mtod (m, u_char*);
+ int isr = -1;
+ int dlci, hlen, proto;
+
+ /* Get the DLCI number. */
+ if (m->m_pkthdr.len < 10) {
+bad: m_freem (m);
+ return;
+ }
+ dlci = (h[0] << 2 & 0x3f0) | (h[1] >> 4 & 0x0f);
+
+ /* Process signaling packets. */
+ if (dlci == 0) {
+ sppp_fr_signal (sp, h, m->m_pkthdr.len);
+ m_freem (m);
+ return;
+ }
+
+ if (dlci != sp->fr_dlci) {
+ if (debug)
+ printf (SPP_FMT "Received packet from invalid DLCI %d\n",
+ SPP_ARGS(ifp), dlci);
+ goto bad;
+ }
+
+ /* Process the packet. */
+ if (ntohs (*(short*) (h+2)) == ETHERTYPE_IP) {
+ /* Prehistoric IP framing? */
+ h[2] = FR_UI;
+ h[3] = FR_IP;
+ }
+ if (h[2] != FR_UI) {
+ if (debug)
+ printf (SPP_FMT "Invalid frame relay header flag 0x%02x\n",
+ SPP_ARGS(ifp), h[2]);
+ goto bad;
+ }
+ switch (h[3]) {
+ default:
+ if (debug)
+ printf (SPP_FMT "Unsupported NLPID 0x%02x\n",
+ SPP_ARGS(ifp), h[3]);
+ goto bad;
+
+ case FR_PADDING:
+ if (h[4] != FR_SNAP) {
+ if (debug)
+ printf (SPP_FMT "Bad NLPID 0x%02x\n",
+ SPP_ARGS(ifp), h[4]);
+ goto bad;
+ }
+ if (h[5] || h[6] || h[7]) {
+ if (debug)
+ printf (SPP_FMT "Bad OID 0x%02x-0x%02x-0x%02x\n",
+ SPP_ARGS(ifp),
+ h[5], h[6], h[7]);
+ goto bad;
+ }
+ proto = ntohs (*(short*) (h+8));
+ if (proto == ETHERTYPE_ARP) {
+ /* Process the ARP request. */
+ if (m->m_pkthdr.len != 10 + sizeof (struct arp_req)) {
+ if (debug)
+ printf (SPP_FMT "Bad ARP request size = %d bytes\n",
+ SPP_ARGS(ifp),
+ m->m_pkthdr.len);
+ goto bad;
+ }
+ sppp_fr_arp (sp, (struct arp_req*) (h + 10),
+ h[0] << 8 | h[1]);
+ m_freem (m);
+ return;
+ }
+ hlen = 10;
+ break;
+
+ case FR_IP:
+ proto = ETHERTYPE_IP;
+ hlen = 4;
+ break;
+ }
+
+ /* Remove frame relay header. */
+ m_adj (m, hlen);
+
+ switch (proto) {
+ default:
+ ++ifp->if_noproto;
+drop: ++ifp->if_ierrors;
+ ++ifp->if_iqdrops;
+ m_freem (m);
+ return;
+#ifdef INET
+ case ETHERTYPE_IP:
+ isr = NETISR_IP;
+ break;
+#endif
+#ifdef IPX
+ case ETHERTYPE_IPX:
+ isr = NETISR_IPX;
+ break;
+#endif
+#ifdef NETATALK
+ case ETHERTYPE_AT:
+ isr = NETISR_ATALK;
+ break;
+#endif
+ }
+
+ if (! (ifp->if_flags & IFF_UP))
+ goto drop;
+
+ /* Check queue. */
+ if (netisr_queue(isr, m)) { /* (0) on success. */
+ if (debug)
+ log(LOG_DEBUG, SPP_FMT "protocol queue overflow\n",
+ SPP_ARGS(ifp));
+ }
+}
+
+/*
+ * Add the frame relay header to the packet.
+ * For IP the header length is 4 bytes,
+ * for all other protocols - 10 bytes (RFC 1490).
+ */
+struct mbuf *sppp_fr_header (struct sppp *sp, struct mbuf *m,
+ int family)
+{
+ STDDCL;
+ u_char *h;
+ int type, hlen;
+
+ /* Prepend the space for Frame Relay header. */
+ hlen = (family == AF_INET) ? 4 : 10;
+ M_PREPEND (m, hlen, M_DONTWAIT);
+ if (! m)
+ return 0;
+ h = mtod (m, u_char*);
+
+ /* Fill the header. */
+ h[0] = sp->fr_dlci >> 2 & 0xfc;
+ h[1] = sp->fr_dlci << 4 | 1;
+ h[2] = FR_UI;
+
+ switch (family) {
+ default:
+ if (debug)
+ printf (SPP_FMT "Cannot handle address family %d\n",
+ SPP_ARGS(ifp), family);
+ m_freem (m);
+ return 0;
+#ifdef INET
+ case AF_INET:
+#if 0 /* Crashes on fragmented packets */
+ /*
+ * Set the discard eligibility bit, if:
+ * 1) no fragmentation
+ * 2) length > 400 bytes
+ * 3a) the protocol is UDP or
+ * 3b) TCP data (no control bits)
+ */
+ {
+ struct ip *ip = (struct ip*) (h + hlen);
+ struct tcphdr *tcp = (struct tcphdr*) ((long*)ip + ip->ip_hl);
+
+ if (! (ip->ip_off & ~IP_DF) && ip->ip_len > 400 &&
+ (ip->ip_p == IPPROTO_UDP ||
+ ip->ip_p == IPPROTO_TCP && ! tcp->th_flags))
+ h[1] |= FR_DE;
+ }
+#endif
+ h[3] = FR_IP;
+ return m;
+#endif
+#ifdef IPX
+ case AF_IPX:
+ type = ETHERTYPE_IPX;
+ break;
+#endif
+#ifdef NS
+ case AF_NS:
+ type = 0x8137;
+ break;
+#endif
+#ifdef NETATALK
+ case AF_APPLETALK:
+ type = ETHERTYPE_AT;
+ break;
+#endif
+ }
+ h[3] = FR_PADDING;
+ h[4] = FR_SNAP;
+ h[5] = 0;
+ h[6] = 0;
+ h[7] = 0;
+ *(short*) (h+8) = htons(type);
+ return m;
+}
+
+/*
+ * Send periodical frame relay link verification messages via DLCI 0.
+ * Called every 10 seconds (default value of T391 timer is 10 sec).
+ * Every 6-th message is a full status request
+ * (default value of N391 counter is 6).
+ */
+void sppp_fr_keepalive (struct sppp *sp)
+{
+ STDDCL;
+ unsigned char *h, *p;
+ struct mbuf *m;
+
+ MGETHDR (m, M_DONTWAIT, MT_DATA);
+ if (! m)
+ return;
+ m->m_pkthdr.rcvif = 0;
+
+ h = mtod (m, u_char*);
+ p = h;
+ *p++ = 0; /* DLCI = 0 */
+ *p++ = 1;
+ *p++ = FR_UI;
+ *p++ = FR_SIGNALING; /* NLPID = UNI call control */
+
+ *p++ = 0; /* call reference length = 0 */
+ *p++ = FR_MSG_ENQUIRY; /* message type = status enquiry */
+
+ *p++ = FR_FLD_LSHIFT5; /* locking shift 5 */
+
+ *p++ = FR_FLD_RTYPE; /* report type field */
+ *p++ = 1; /* report type length = 1 */
+ if (sp->pp_seq[IDX_LCP] % 6)
+ *p++ = FR_RTYPE_SHORT; /* link verification only */
+ else
+ *p++ = FR_RTYPE_FULL; /* full status needed */
+
+ if (sp->pp_seq[IDX_LCP] >= 255)
+ sp->pp_seq[IDX_LCP] = 0;
+ *p++ = FR_FLD_VERIFY; /* link verification type field */
+ *p++ = 2; /* link verification field length = 2 */
+ *p++ = ++sp->pp_seq[IDX_LCP]; /* our sequence number */
+ *p++ = sp->pp_rseq[IDX_LCP]; /* last received sequence number */
+
+ m->m_pkthdr.len = m->m_len = p - h;
+ if (debug)
+ printf (SPP_FMT "send lmi packet, seq=%d, rseq=%d\n",
+ SPP_ARGS(ifp), (u_char) sp->pp_seq[IDX_LCP],
+ (u_char) sp->pp_rseq[IDX_LCP]);
+
+ if (! IF_HANDOFF_ADJ(&sp->pp_cpq, m, ifp, 3))
+ ++ifp->if_oerrors;
+}
+
+/*
+ * Process the frame relay Inverse ARP request.
+ */
+static void sppp_fr_arp (struct sppp *sp, struct arp_req *req,
+ u_short his_hardware_address)
+{
+ STDDCL;
+ struct mbuf *m;
+ struct arp_req *reply;
+ u_char *h;
+ u_short my_hardware_address;
+ u_long his_ip_address, my_ip_address;
+
+ if ((ntohs (req->htype) != ARPHRD_FRELAY ||
+ ntohs (req->htype) != 16) || /* for BayNetworks routers */
+ ntohs (req->ptype) != ETHERTYPE_IP) {
+ if (debug)
+ printf (SPP_FMT "Invalid ARP hardware/protocol type = 0x%x/0x%x\n",
+ SPP_ARGS(ifp),
+ ntohs (req->htype), ntohs (req->ptype));
+ return;
+ }
+ if (req->halen != 2 || req->palen != 4) {
+ if (debug)
+ printf (SPP_FMT "Invalid ARP hardware/protocol address length = %d/%d\n",
+ SPP_ARGS(ifp),
+ req->halen, req->palen);
+ return;
+ }
+ switch (ntohs (req->op)) {
+ default:
+ if (debug)
+ printf (SPP_FMT "Invalid ARP op = 0x%x\n",
+ SPP_ARGS(ifp), ntohs (req->op));
+ return;
+
+ case ARPOP_INVREPLY:
+ /* Ignore. */
+ return;
+
+ case ARPOP_INVREQUEST:
+ my_hardware_address = ntohs (req->htarget);
+ his_ip_address = ntohs (req->psource1) << 16 |
+ ntohs (req->psource2);
+ my_ip_address = ntohs (req->ptarget1) << 16 |
+ ntohs (req->ptarget2);
+ break;
+ }
+ if (debug)
+ printf (SPP_FMT "got ARP request, source=0x%04x/%d.%d.%d.%d, target=0x%04x/%d.%d.%d.%d\n",
+ SPP_ARGS(ifp), ntohs (req->hsource),
+ (unsigned char) (his_ip_address >> 24),
+ (unsigned char) (his_ip_address >> 16),
+ (unsigned char) (his_ip_address >> 8),
+ (unsigned char) his_ip_address,
+ my_hardware_address,
+ (unsigned char) (my_ip_address >> 24),
+ (unsigned char) (my_ip_address >> 16),
+ (unsigned char) (my_ip_address >> 8),
+ (unsigned char) my_ip_address);
+
+ sppp_get_ip_addrs (sp, &my_ip_address, 0, 0);
+ if (! my_ip_address)
+ return; /* nothing to reply */
+
+ if (debug)
+ printf (SPP_FMT "send ARP reply, source=0x%04x/%d.%d.%d.%d, target=0x%04x/%d.%d.%d.%d\n",
+ SPP_ARGS(ifp), my_hardware_address,
+ (unsigned char) (my_ip_address >> 24),
+ (unsigned char) (my_ip_address >> 16),
+ (unsigned char) (my_ip_address >> 8),
+ (unsigned char) my_ip_address,
+ his_hardware_address,
+ (unsigned char) (his_ip_address >> 24),
+ (unsigned char) (his_ip_address >> 16),
+ (unsigned char) (his_ip_address >> 8),
+ (unsigned char) his_ip_address);
+
+ /* Send the Inverse ARP reply. */
+ MGETHDR (m, M_DONTWAIT, MT_DATA);
+ if (! m)
+ return;
+ m->m_pkthdr.len = m->m_len = 10 + sizeof (*reply);
+ m->m_pkthdr.rcvif = 0;
+
+ h = mtod (m, u_char*);
+ reply = (struct arp_req*) (h + 10);
+
+ h[0] = his_hardware_address >> 8;
+ h[1] = his_hardware_address;
+ h[2] = FR_UI;
+ h[3] = FR_PADDING;
+ h[4] = FR_SNAP;
+ h[5] = 0;
+ h[6] = 0;
+ h[7] = 0;
+ *(short*) (h+8) = htons (ETHERTYPE_ARP);
+
+ reply->htype = htons (ARPHRD_FRELAY);
+ reply->ptype = htons (ETHERTYPE_IP);
+ reply->halen = 2;
+ reply->palen = 4;
+ reply->op = htons (ARPOP_INVREPLY);
+ reply->hsource = htons (my_hardware_address);
+ reply->psource1 = htonl (my_ip_address);
+ reply->psource2 = htonl (my_ip_address) >> 16;
+ reply->htarget = htons (his_hardware_address);
+ reply->ptarget1 = htonl (his_ip_address);
+ reply->ptarget2 = htonl (his_ip_address) >> 16;
+
+ if (! IF_HANDOFF_ADJ(&sp->pp_cpq, m, ifp, 3))
+ ++ifp->if_oerrors;
+}
+
+/*
+ * Process the input signaling packet (DLCI 0).
+ * The implemented protocol is ANSI T1.617 Annex D.
+ */
+static void sppp_fr_signal (struct sppp *sp, unsigned char *h, int len)
+{
+ STDDCL;
+ u_char *p;
+ int dlci;
+
+ if (h[2] != FR_UI || h[3] != FR_SIGNALING || h[4] != 0) {
+ if (debug)
+ printf (SPP_FMT "Invalid signaling header\n",
+ SPP_ARGS(ifp));
+bad: if (debug) {
+ printf ("%02x", *h++);
+ while (--len > 0)
+ printf ("-%02x", *h++);
+ printf ("\n");
+ }
+ return;
+ }
+ if (h[5] == FR_MSG_ENQUIRY) {
+ if (len == FR_ENQUIRY_SIZE &&
+ h[12] == (u_char) sp->pp_seq[IDX_LCP]) {
+ sp->pp_seq[IDX_LCP] = random();
+ printf (SPP_FMT "loopback detected\n",
+ SPP_ARGS(ifp));
+ }
+ return;
+ }
+ if (h[5] != FR_MSG_STATUS) {
+ if (debug)
+ printf (SPP_FMT "Unknown signaling message: 0x%02x\n",
+ SPP_ARGS(ifp), h[5]);
+ goto bad;
+ }
+
+ /* Parse message fields. */
+ for (p=h+6; p<h+len; ) {
+ switch (*p) {
+ default:
+ if (debug)
+ printf (SPP_FMT "Unknown signaling field 0x%x\n",
+ SPP_ARGS(ifp), *p);
+ break;
+ case FR_FLD_LSHIFT5:
+ case FR_FLD_RTYPE:
+ /* Ignore. */
+ break;
+ case FR_FLD_VERIFY:
+ if (p[1] != 2) {
+ if (debug)
+ printf (SPP_FMT "Invalid signaling verify field length %d\n",
+ SPP_ARGS(ifp), p[1]);
+ break;
+ }
+ sp->pp_rseq[IDX_LCP] = p[2];
+ if (debug) {
+ printf (SPP_FMT "got lmi reply rseq=%d, seq=%d",
+ SPP_ARGS(ifp), p[2], p[3]);
+ if (p[3] != (u_char) sp->pp_seq[IDX_LCP])
+ printf (" (really %d)",
+ (u_char) sp->pp_seq[IDX_LCP]);
+ printf ("\n");
+ }
+ break;
+ case FR_FLD_PVC:
+ if (p[1] < 3) {
+ if (debug)
+ printf (SPP_FMT "Invalid PVC status length %d\n",
+ SPP_ARGS(ifp), p[1]);
+ break;
+ }
+ dlci = (p[2] << 4 & 0x3f0) | (p[3] >> 3 & 0x0f);
+ if (! sp->fr_dlci)
+ sp->fr_dlci = dlci;
+ if (sp->fr_status != p[4])
+ printf (SPP_FMT "DLCI %d %s%s\n",
+ SPP_ARGS(ifp), dlci,
+ p[4] & FR_DLCI_DELETE ? "deleted" :
+ p[4] & FR_DLCI_ACTIVE ? "active" : "passive",
+ p[4] & FR_DLCI_NEW ? ", new" : "");
+ sp->fr_status = p[4];
+ break;
+ }
+ if (*p & 0x80)
+ ++p;
+ else if (p < h+len+1 && p[1])
+ p += 2 + p[1];
+ else {
+ if (debug)
+ printf (SPP_FMT "Invalid signaling field 0x%x\n",
+ SPP_ARGS(ifp), *p);
+ goto bad;
+ }
+ }
+}
diff --git a/rtems/freebsd/net/if_spppsubr.c b/rtems/freebsd/net/if_spppsubr.c
new file mode 100644
index 00000000..7e54addd
--- /dev/null
+++ b/rtems/freebsd/net/if_spppsubr.c
@@ -0,0 +1,5492 @@
+#include <rtems/freebsd/machine/rtems-bsd-config.h>
+
+/*
+ * Synchronous PPP/Cisco/Frame Relay link level subroutines.
+ * Keepalive protocol implemented in both Cisco and PPP modes.
+ */
+/*-
+ * Copyright (C) 1994-2000 Cronyx Engineering.
+ * Author: Serge Vakulenko, <vak@cronyx.ru>
+ *
+ * Heavily revamped to conform to RFC 1661.
+ * Copyright (C) 1997, 2001 Joerg Wunsch.
+ *
+ * This software is distributed with NO WARRANTIES, not even the implied
+ * warranties for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * Authors grant any other persons or organisations permission to use
+ * or modify this software as long as this message is kept with the software,
+ * all derivative works or modified versions.
+ *
+ * From: Version 2.4, Thu Apr 30 17:17:21 MSD 1997
+ *
+ * $FreeBSD$
+ */
+
+#include <rtems/freebsd/sys/param.h>
+
+#include <rtems/freebsd/local/opt_inet.h>
+#include <rtems/freebsd/local/opt_inet6.h>
+#include <rtems/freebsd/local/opt_ipx.h>
+
+#include <rtems/freebsd/sys/systm.h>
+#include <rtems/freebsd/sys/kernel.h>
+#include <rtems/freebsd/sys/module.h>
+#include <rtems/freebsd/sys/sockio.h>
+#include <rtems/freebsd/sys/socket.h>
+#include <rtems/freebsd/sys/syslog.h>
+#include <rtems/freebsd/sys/random.h>
+#include <rtems/freebsd/sys/malloc.h>
+#include <rtems/freebsd/sys/mbuf.h>
+
+#include <rtems/freebsd/sys/md5.h>
+
+#include <rtems/freebsd/net/if.h>
+#include <rtems/freebsd/net/netisr.h>
+#include <rtems/freebsd/net/if_types.h>
+#include <rtems/freebsd/net/route.h>
+#include <rtems/freebsd/net/vnet.h>
+#include <rtems/freebsd/netinet/in.h>
+#include <rtems/freebsd/netinet/in_systm.h>
+#include <rtems/freebsd/netinet/ip.h>
+#include <rtems/freebsd/net/slcompress.h>
+
+#include <rtems/freebsd/machine/stdarg.h>
+
+#include <rtems/freebsd/netinet/in_var.h>
+
+#ifdef INET
+#include <rtems/freebsd/netinet/ip.h>
+#include <rtems/freebsd/netinet/tcp.h>
+#endif
+
+#ifdef INET6
+#include <rtems/freebsd/netinet6/scope6_var.h>
+#endif
+
+#include <rtems/freebsd/netinet/if_ether.h>
+
+#ifdef IPX
+#include <rtems/freebsd/netipx/ipx.h>
+#include <rtems/freebsd/netipx/ipx_if.h>
+#endif
+
+#include <rtems/freebsd/net/if_sppp.h>
+
+#define IOCTL_CMD_T u_long
+#define MAXALIVECNT 3 /* max. alive packets */
+
+/*
+ * Interface flags that can be set in an ifconfig command.
+ *
+ * Setting link0 will make the link passive, i.e. it will be marked
+ * as being administrative openable, but won't be opened to begin
+ * with. Incoming calls will be answered, or subsequent calls with
+ * -link1 will cause the administrative open of the LCP layer.
+ *
+ * Setting link1 will cause the link to auto-dial only as packets
+ * arrive to be sent.
+ *
+ * Setting IFF_DEBUG will syslog the option negotiation and state
+ * transitions at level kern.debug. Note: all logs consistently look
+ * like
+ *
+ * <if-name><unit>: <proto-name> <additional info...>
+ *
+ * with <if-name><unit> being something like "bppp0", and <proto-name>
+ * being one of "lcp", "ipcp", "cisco", "chap", "pap", etc.
+ */
+
+#define IFF_PASSIVE IFF_LINK0 /* wait passively for connection */
+#define IFF_AUTO IFF_LINK1 /* auto-dial on output */
+#define IFF_CISCO IFF_LINK2 /* auto-dial on output */
+
+#define PPP_ALLSTATIONS 0xff /* All-Stations broadcast address */
+#define PPP_UI 0x03 /* Unnumbered Information */
+#define PPP_IP 0x0021 /* Internet Protocol */
+#define PPP_ISO 0x0023 /* ISO OSI Protocol */
+#define PPP_XNS 0x0025 /* Xerox NS Protocol */
+#define PPP_IPX 0x002b /* Novell IPX Protocol */
+#define PPP_VJ_COMP 0x002d /* VJ compressed TCP/IP */
+#define PPP_VJ_UCOMP 0x002f /* VJ uncompressed TCP/IP */
+#define PPP_IPV6 0x0057 /* Internet Protocol Version 6 */
+#define PPP_LCP 0xc021 /* Link Control Protocol */
+#define PPP_PAP 0xc023 /* Password Authentication Protocol */
+#define PPP_CHAP 0xc223 /* Challenge-Handshake Auth Protocol */
+#define PPP_IPCP 0x8021 /* Internet Protocol Control Protocol */
+#define PPP_IPV6CP 0x8057 /* IPv6 Control Protocol */
+
+#define CONF_REQ 1 /* PPP configure request */
+#define CONF_ACK 2 /* PPP configure acknowledge */
+#define CONF_NAK 3 /* PPP configure negative ack */
+#define CONF_REJ 4 /* PPP configure reject */
+#define TERM_REQ 5 /* PPP terminate request */
+#define TERM_ACK 6 /* PPP terminate acknowledge */
+#define CODE_REJ 7 /* PPP code reject */
+#define PROTO_REJ 8 /* PPP protocol reject */
+#define ECHO_REQ 9 /* PPP echo request */
+#define ECHO_REPLY 10 /* PPP echo reply */
+#define DISC_REQ 11 /* PPP discard request */
+
+#define LCP_OPT_MRU 1 /* maximum receive unit */
+#define LCP_OPT_ASYNC_MAP 2 /* async control character map */
+#define LCP_OPT_AUTH_PROTO 3 /* authentication protocol */
+#define LCP_OPT_QUAL_PROTO 4 /* quality protocol */
+#define LCP_OPT_MAGIC 5 /* magic number */
+#define LCP_OPT_RESERVED 6 /* reserved */
+#define LCP_OPT_PROTO_COMP 7 /* protocol field compression */
+#define LCP_OPT_ADDR_COMP 8 /* address/control field compression */
+
+#define IPCP_OPT_ADDRESSES 1 /* both IP addresses; deprecated */
+#define IPCP_OPT_COMPRESSION 2 /* IP compression protocol (VJ) */
+#define IPCP_OPT_ADDRESS 3 /* local IP address */
+
+#define IPV6CP_OPT_IFID 1 /* interface identifier */
+#define IPV6CP_OPT_COMPRESSION 2 /* IPv6 compression protocol */
+
+#define IPCP_COMP_VJ 0x2d /* Code for VJ compression */
+
+#define PAP_REQ 1 /* PAP name/password request */
+#define PAP_ACK 2 /* PAP acknowledge */
+#define PAP_NAK 3 /* PAP fail */
+
+#define CHAP_CHALLENGE 1 /* CHAP challenge request */
+#define CHAP_RESPONSE 2 /* CHAP challenge response */
+#define CHAP_SUCCESS 3 /* CHAP response ok */
+#define CHAP_FAILURE 4 /* CHAP response failed */
+
+#define CHAP_MD5 5 /* hash algorithm - MD5 */
+
+#define CISCO_MULTICAST 0x8f /* Cisco multicast address */
+#define CISCO_UNICAST 0x0f /* Cisco unicast address */
+#define CISCO_KEEPALIVE 0x8035 /* Cisco keepalive protocol */
+#define CISCO_ADDR_REQ 0 /* Cisco address request */
+#define CISCO_ADDR_REPLY 1 /* Cisco address reply */
+#define CISCO_KEEPALIVE_REQ 2 /* Cisco keepalive request */
+
+/* states are named and numbered according to RFC 1661 */
+#define STATE_INITIAL 0
+#define STATE_STARTING 1
+#define STATE_CLOSED 2
+#define STATE_STOPPED 3
+#define STATE_CLOSING 4
+#define STATE_STOPPING 5
+#define STATE_REQ_SENT 6
+#define STATE_ACK_RCVD 7
+#define STATE_ACK_SENT 8
+#define STATE_OPENED 9
+
+MALLOC_DEFINE(M_SPPP, "sppp", "synchronous PPP interface internals");
+
+struct ppp_header {
+ u_char address;
+ u_char control;
+ u_short protocol;
+} __packed;
+#define PPP_HEADER_LEN sizeof (struct ppp_header)
+
+struct lcp_header {
+ u_char type;
+ u_char ident;
+ u_short len;
+} __packed;
+#define LCP_HEADER_LEN sizeof (struct lcp_header)
+
+struct cisco_packet {
+ u_long type;
+ u_long par1;
+ u_long par2;
+ u_short rel;
+ u_short time0;
+ u_short time1;
+} __packed;
+#define CISCO_PACKET_LEN sizeof (struct cisco_packet)
+
+/*
+ * We follow the spelling and capitalization of RFC 1661 here, to make
+ * it easier comparing with the standard. Please refer to this RFC in
+ * case you can't make sense out of these abbreviation; it will also
+ * explain the semantics related to the various events and actions.
+ */
+struct cp {
+ u_short proto; /* PPP control protocol number */
+ u_char protoidx; /* index into state table in struct sppp */
+ u_char flags;
+#define CP_LCP 0x01 /* this is the LCP */
+#define CP_AUTH 0x02 /* this is an authentication protocol */
+#define CP_NCP 0x04 /* this is a NCP */
+#define CP_QUAL 0x08 /* this is a quality reporting protocol */
+ const char *name; /* name of this control protocol */
+ /* event handlers */
+ void (*Up)(struct sppp *sp);
+ void (*Down)(struct sppp *sp);
+ void (*Open)(struct sppp *sp);
+ void (*Close)(struct sppp *sp);
+ void (*TO)(void *sp);
+ int (*RCR)(struct sppp *sp, struct lcp_header *h, int len);
+ void (*RCN_rej)(struct sppp *sp, struct lcp_header *h, int len);
+ void (*RCN_nak)(struct sppp *sp, struct lcp_header *h, int len);
+ /* actions */
+ void (*tlu)(struct sppp *sp);
+ void (*tld)(struct sppp *sp);
+ void (*tls)(struct sppp *sp);
+ void (*tlf)(struct sppp *sp);
+ void (*scr)(struct sppp *sp);
+};
+
+#define SPP_FMT "%s: "
+#define SPP_ARGS(ifp) (ifp)->if_xname
+
+#define SPPP_LOCK(sp) mtx_lock (&(sp)->mtx)
+#define SPPP_UNLOCK(sp) mtx_unlock (&(sp)->mtx)
+#define SPPP_LOCK_ASSERT(sp) mtx_assert (&(sp)->mtx, MA_OWNED)
+#define SPPP_LOCK_OWNED(sp) mtx_owned (&(sp)->mtx)
+
+#ifdef INET
+/*
+ * The following disgusting hack gets around the problem that IP TOS
+ * can't be set yet. We want to put "interactive" traffic on a high
+ * priority queue. To decide if traffic is interactive, we check that
+ * a) it is TCP and b) one of its ports is telnet, rlogin or ftp control.
+ *
+ * XXX is this really still necessary? - joerg -
+ */
+static const u_short interactive_ports[8] = {
+ 0, 513, 0, 0,
+ 0, 21, 0, 23,
+};
+#define INTERACTIVE(p) (interactive_ports[(p) & 7] == (p))
+#endif
+
+/* almost every function needs these */
+#define STDDCL \
+ struct ifnet *ifp = SP2IFP(sp); \
+ int debug = ifp->if_flags & IFF_DEBUG
+
+static int sppp_output(struct ifnet *ifp, struct mbuf *m,
+ struct sockaddr *dst, struct route *ro);
+
+static void sppp_cisco_send(struct sppp *sp, int type, long par1, long par2);
+static void sppp_cisco_input(struct sppp *sp, struct mbuf *m);
+
+static void sppp_cp_input(const struct cp *cp, struct sppp *sp,
+ struct mbuf *m);
+static void sppp_cp_send(struct sppp *sp, u_short proto, u_char type,
+ u_char ident, u_short len, void *data);
+/* static void sppp_cp_timeout(void *arg); */
+static void sppp_cp_change_state(const struct cp *cp, struct sppp *sp,
+ int newstate);
+static void sppp_auth_send(const struct cp *cp,
+ struct sppp *sp, unsigned int type, unsigned int id,
+ ...);
+
+static void sppp_up_event(const struct cp *cp, struct sppp *sp);
+static void sppp_down_event(const struct cp *cp, struct sppp *sp);
+static void sppp_open_event(const struct cp *cp, struct sppp *sp);
+static void sppp_close_event(const struct cp *cp, struct sppp *sp);
+static void sppp_to_event(const struct cp *cp, struct sppp *sp);
+
+static void sppp_null(struct sppp *sp);
+
+static void sppp_pp_up(struct sppp *sp);
+static void sppp_pp_down(struct sppp *sp);
+
+static void sppp_lcp_init(struct sppp *sp);
+static void sppp_lcp_up(struct sppp *sp);
+static void sppp_lcp_down(struct sppp *sp);
+static void sppp_lcp_open(struct sppp *sp);
+static void sppp_lcp_close(struct sppp *sp);
+static void sppp_lcp_TO(void *sp);
+static int sppp_lcp_RCR(struct sppp *sp, struct lcp_header *h, int len);
+static void sppp_lcp_RCN_rej(struct sppp *sp, struct lcp_header *h, int len);
+static void sppp_lcp_RCN_nak(struct sppp *sp, struct lcp_header *h, int len);
+static void sppp_lcp_tlu(struct sppp *sp);
+static void sppp_lcp_tld(struct sppp *sp);
+static void sppp_lcp_tls(struct sppp *sp);
+static void sppp_lcp_tlf(struct sppp *sp);
+static void sppp_lcp_scr(struct sppp *sp);
+static void sppp_lcp_check_and_close(struct sppp *sp);
+static int sppp_ncp_check(struct sppp *sp);
+
+static void sppp_ipcp_init(struct sppp *sp);
+static void sppp_ipcp_up(struct sppp *sp);
+static void sppp_ipcp_down(struct sppp *sp);
+static void sppp_ipcp_open(struct sppp *sp);
+static void sppp_ipcp_close(struct sppp *sp);
+static void sppp_ipcp_TO(void *sp);
+static int sppp_ipcp_RCR(struct sppp *sp, struct lcp_header *h, int len);
+static void sppp_ipcp_RCN_rej(struct sppp *sp, struct lcp_header *h, int len);
+static void sppp_ipcp_RCN_nak(struct sppp *sp, struct lcp_header *h, int len);
+static void sppp_ipcp_tlu(struct sppp *sp);
+static void sppp_ipcp_tld(struct sppp *sp);
+static void sppp_ipcp_tls(struct sppp *sp);
+static void sppp_ipcp_tlf(struct sppp *sp);
+static void sppp_ipcp_scr(struct sppp *sp);
+
+static void sppp_ipv6cp_init(struct sppp *sp);
+static void sppp_ipv6cp_up(struct sppp *sp);
+static void sppp_ipv6cp_down(struct sppp *sp);
+static void sppp_ipv6cp_open(struct sppp *sp);
+static void sppp_ipv6cp_close(struct sppp *sp);
+static void sppp_ipv6cp_TO(void *sp);
+static int sppp_ipv6cp_RCR(struct sppp *sp, struct lcp_header *h, int len);
+static void sppp_ipv6cp_RCN_rej(struct sppp *sp, struct lcp_header *h, int len);
+static void sppp_ipv6cp_RCN_nak(struct sppp *sp, struct lcp_header *h, int len);
+static void sppp_ipv6cp_tlu(struct sppp *sp);
+static void sppp_ipv6cp_tld(struct sppp *sp);
+static void sppp_ipv6cp_tls(struct sppp *sp);
+static void sppp_ipv6cp_tlf(struct sppp *sp);
+static void sppp_ipv6cp_scr(struct sppp *sp);
+
+static void sppp_pap_input(struct sppp *sp, struct mbuf *m);
+static void sppp_pap_init(struct sppp *sp);
+static void sppp_pap_open(struct sppp *sp);
+static void sppp_pap_close(struct sppp *sp);
+static void sppp_pap_TO(void *sp);
+static void sppp_pap_my_TO(void *sp);
+static void sppp_pap_tlu(struct sppp *sp);
+static void sppp_pap_tld(struct sppp *sp);
+static void sppp_pap_scr(struct sppp *sp);
+
+static void sppp_chap_input(struct sppp *sp, struct mbuf *m);
+static void sppp_chap_init(struct sppp *sp);
+static void sppp_chap_open(struct sppp *sp);
+static void sppp_chap_close(struct sppp *sp);
+static void sppp_chap_TO(void *sp);
+static void sppp_chap_tlu(struct sppp *sp);
+static void sppp_chap_tld(struct sppp *sp);
+static void sppp_chap_scr(struct sppp *sp);
+
+static const char *sppp_auth_type_name(u_short proto, u_char type);
+static const char *sppp_cp_type_name(u_char type);
+#ifdef INET
+static const char *sppp_dotted_quad(u_long addr);
+static const char *sppp_ipcp_opt_name(u_char opt);
+#endif
+#ifdef INET6
+static const char *sppp_ipv6cp_opt_name(u_char opt);
+#endif
+static const char *sppp_lcp_opt_name(u_char opt);
+static const char *sppp_phase_name(enum ppp_phase phase);
+static const char *sppp_proto_name(u_short proto);
+static const char *sppp_state_name(int state);
+static int sppp_params(struct sppp *sp, u_long cmd, void *data);
+static int sppp_strnlen(u_char *p, int max);
+static void sppp_keepalive(void *dummy);
+static void sppp_phase_network(struct sppp *sp);
+static void sppp_print_bytes(const u_char *p, u_short len);
+static void sppp_print_string(const char *p, u_short len);
+static void sppp_qflush(struct ifqueue *ifq);
+#ifdef INET
+static void sppp_set_ip_addr(struct sppp *sp, u_long src);
+#endif
+#ifdef INET6
+static void sppp_get_ip6_addrs(struct sppp *sp, struct in6_addr *src,
+ struct in6_addr *dst, struct in6_addr *srcmask);
+#ifdef IPV6CP_MYIFID_DYN
+static void sppp_set_ip6_addr(struct sppp *sp, const struct in6_addr *src);
+static void sppp_gen_ip6_addr(struct sppp *sp, const struct in6_addr *src);
+#endif
+static void sppp_suggest_ip6_addr(struct sppp *sp, struct in6_addr *src);
+#endif
+
+/* if_start () wrapper */
+static void sppp_ifstart (struct ifnet *ifp);
+
+/* our control protocol descriptors */
+static const struct cp lcp = {
+ PPP_LCP, IDX_LCP, CP_LCP, "lcp",
+ sppp_lcp_up, sppp_lcp_down, sppp_lcp_open, sppp_lcp_close,
+ sppp_lcp_TO, sppp_lcp_RCR, sppp_lcp_RCN_rej, sppp_lcp_RCN_nak,
+ sppp_lcp_tlu, sppp_lcp_tld, sppp_lcp_tls, sppp_lcp_tlf,
+ sppp_lcp_scr
+};
+
+static const struct cp ipcp = {
+ PPP_IPCP, IDX_IPCP,
+#ifdef INET /* don't run IPCP if there's no IPv4 support */
+ CP_NCP,
+#else
+ 0,
+#endif
+ "ipcp",
+ sppp_ipcp_up, sppp_ipcp_down, sppp_ipcp_open, sppp_ipcp_close,
+ sppp_ipcp_TO, sppp_ipcp_RCR, sppp_ipcp_RCN_rej, sppp_ipcp_RCN_nak,
+ sppp_ipcp_tlu, sppp_ipcp_tld, sppp_ipcp_tls, sppp_ipcp_tlf,
+ sppp_ipcp_scr
+};
+
+static const struct cp ipv6cp = {
+ PPP_IPV6CP, IDX_IPV6CP,
+#ifdef INET6 /*don't run IPv6CP if there's no IPv6 support*/
+ CP_NCP,
+#else
+ 0,
+#endif
+ "ipv6cp",
+ sppp_ipv6cp_up, sppp_ipv6cp_down, sppp_ipv6cp_open, sppp_ipv6cp_close,
+ sppp_ipv6cp_TO, sppp_ipv6cp_RCR, sppp_ipv6cp_RCN_rej, sppp_ipv6cp_RCN_nak,
+ sppp_ipv6cp_tlu, sppp_ipv6cp_tld, sppp_ipv6cp_tls, sppp_ipv6cp_tlf,
+ sppp_ipv6cp_scr
+};
+
+static const struct cp pap = {
+ PPP_PAP, IDX_PAP, CP_AUTH, "pap",
+ sppp_null, sppp_null, sppp_pap_open, sppp_pap_close,
+ sppp_pap_TO, 0, 0, 0,
+ sppp_pap_tlu, sppp_pap_tld, sppp_null, sppp_null,
+ sppp_pap_scr
+};
+
+static const struct cp chap = {
+ PPP_CHAP, IDX_CHAP, CP_AUTH, "chap",
+ sppp_null, sppp_null, sppp_chap_open, sppp_chap_close,
+ sppp_chap_TO, 0, 0, 0,
+ sppp_chap_tlu, sppp_chap_tld, sppp_null, sppp_null,
+ sppp_chap_scr
+};
+
+static const struct cp *cps[IDX_COUNT] = {
+ &lcp, /* IDX_LCP */
+ &ipcp, /* IDX_IPCP */
+ &ipv6cp, /* IDX_IPV6CP */
+ &pap, /* IDX_PAP */
+ &chap, /* IDX_CHAP */
+};
+
+static void*
+sppp_alloc(u_char type, struct ifnet *ifp)
+{
+ struct sppp *sp;
+
+ sp = malloc(sizeof(struct sppp), M_SPPP, M_WAITOK | M_ZERO);
+ sp->pp_ifp = ifp;
+
+ return (sp);
+}
+
+static void
+sppp_free(void *com, u_char type)
+{
+
+ free(com, M_SPPP);
+}
+
+static int
+sppp_modevent(module_t mod, int type, void *unused)
+{
+ switch (type) {
+ case MOD_LOAD:
+ /*
+ * XXX: should probably be IFT_SPPP, but it's fairly
+ * harmless to allocate struct sppp's for non-sppp
+ * interfaces.
+ */
+
+ if_register_com_alloc(IFT_PPP, sppp_alloc, sppp_free);
+ break;
+ case MOD_UNLOAD:
+ /* if_deregister_com_alloc(IFT_PPP); */
+ return EACCES;
+ default:
+ return EOPNOTSUPP;
+ }
+ return 0;
+}
+static moduledata_t spppmod = {
+ "sppp",
+ sppp_modevent,
+ 0
+};
+MODULE_VERSION(sppp, 1);
+DECLARE_MODULE(sppp, spppmod, SI_SUB_DRIVERS, SI_ORDER_ANY);
+
+/*
+ * Exported functions, comprising our interface to the lower layer.
+ */
+
+/*
+ * Process the received packet.
+ */
+void
+sppp_input(struct ifnet *ifp, struct mbuf *m)
+{
+ struct ppp_header *h;
+ int isr = -1;
+ struct sppp *sp = IFP2SP(ifp);
+ int debug, do_account = 0;
+#ifdef INET
+ int hlen, vjlen;
+ u_char *iphdr;
+#endif
+
+ SPPP_LOCK(sp);
+ debug = ifp->if_flags & IFF_DEBUG;
+
+ if (ifp->if_flags & IFF_UP)
+ /* Count received bytes, add FCS and one flag */
+ ifp->if_ibytes += m->m_pkthdr.len + 3;
+
+ if (m->m_pkthdr.len <= PPP_HEADER_LEN) {
+ /* Too small packet, drop it. */
+ if (debug)
+ log(LOG_DEBUG,
+ SPP_FMT "input packet is too small, %d bytes\n",
+ SPP_ARGS(ifp), m->m_pkthdr.len);
+ drop:
+ m_freem (m);
+ SPPP_UNLOCK(sp);
+ drop2:
+ ++ifp->if_ierrors;
+ ++ifp->if_iqdrops;
+ return;
+ }
+
+ if (sp->pp_mode == PP_FR) {
+ sppp_fr_input (sp, m);
+ SPPP_UNLOCK(sp);
+ return;
+ }
+
+ /* Get PPP header. */
+ h = mtod (m, struct ppp_header*);
+ m_adj (m, PPP_HEADER_LEN);
+
+ switch (h->address) {
+ case PPP_ALLSTATIONS:
+ if (h->control != PPP_UI)
+ goto invalid;
+ if (sp->pp_mode == IFF_CISCO) {
+ if (debug)
+ log(LOG_DEBUG,
+ SPP_FMT "PPP packet in Cisco mode "
+ "<addr=0x%x ctrl=0x%x proto=0x%x>\n",
+ SPP_ARGS(ifp),
+ h->address, h->control, ntohs(h->protocol));
+ goto drop;
+ }
+ switch (ntohs (h->protocol)) {
+ default:
+ if (debug)
+ log(LOG_DEBUG,
+ SPP_FMT "rejecting protocol "
+ "<addr=0x%x ctrl=0x%x proto=0x%x>\n",
+ SPP_ARGS(ifp),
+ h->address, h->control, ntohs(h->protocol));
+ if (sp->state[IDX_LCP] == STATE_OPENED)
+ sppp_cp_send (sp, PPP_LCP, PROTO_REJ,
+ ++sp->pp_seq[IDX_LCP], m->m_pkthdr.len + 2,
+ &h->protocol);
+ ++ifp->if_noproto;
+ goto drop;
+ case PPP_LCP:
+ sppp_cp_input(&lcp, sp, m);
+ m_freem (m);
+ SPPP_UNLOCK(sp);
+ return;
+ case PPP_PAP:
+ if (sp->pp_phase >= PHASE_AUTHENTICATE)
+ sppp_pap_input(sp, m);
+ m_freem (m);
+ SPPP_UNLOCK(sp);
+ return;
+ case PPP_CHAP:
+ if (sp->pp_phase >= PHASE_AUTHENTICATE)
+ sppp_chap_input(sp, m);
+ m_freem (m);
+ SPPP_UNLOCK(sp);
+ return;
+#ifdef INET
+ case PPP_IPCP:
+ if (sp->pp_phase == PHASE_NETWORK)
+ sppp_cp_input(&ipcp, sp, m);
+ m_freem (m);
+ SPPP_UNLOCK(sp);
+ return;
+ case PPP_IP:
+ if (sp->state[IDX_IPCP] == STATE_OPENED) {
+ isr = NETISR_IP;
+ }
+ do_account++;
+ break;
+ case PPP_VJ_COMP:
+ if (sp->state[IDX_IPCP] == STATE_OPENED) {
+ if ((vjlen =
+ sl_uncompress_tcp_core(mtod(m, u_char *),
+ m->m_len, m->m_len,
+ TYPE_COMPRESSED_TCP,
+ sp->pp_comp,
+ &iphdr, &hlen)) <= 0) {
+ if (debug)
+ log(LOG_INFO,
+ SPP_FMT "VJ uncompress failed on compressed packet\n",
+ SPP_ARGS(ifp));
+ goto drop;
+ }
+
+ /*
+ * Trim the VJ header off the packet, and prepend
+ * the uncompressed IP header (which will usually
+ * end up in two chained mbufs since there's not
+ * enough leading space in the existing mbuf).
+ */
+ m_adj(m, vjlen);
+ M_PREPEND(m, hlen, M_DONTWAIT);
+ if (m == NULL) {
+ SPPP_UNLOCK(sp);
+ goto drop2;
+ }
+ bcopy(iphdr, mtod(m, u_char *), hlen);
+ isr = NETISR_IP;
+ }
+ do_account++;
+ break;
+ case PPP_VJ_UCOMP:
+ if (sp->state[IDX_IPCP] == STATE_OPENED) {
+ if (sl_uncompress_tcp_core(mtod(m, u_char *),
+ m->m_len, m->m_len,
+ TYPE_UNCOMPRESSED_TCP,
+ sp->pp_comp,
+ &iphdr, &hlen) != 0) {
+ if (debug)
+ log(LOG_INFO,
+ SPP_FMT "VJ uncompress failed on uncompressed packet\n",
+ SPP_ARGS(ifp));
+ goto drop;
+ }
+ isr = NETISR_IP;
+ }
+ do_account++;
+ break;
+#endif
+#ifdef INET6
+ case PPP_IPV6CP:
+ if (sp->pp_phase == PHASE_NETWORK)
+ sppp_cp_input(&ipv6cp, sp, m);
+ m_freem (m);
+ SPPP_UNLOCK(sp);
+ return;
+
+ case PPP_IPV6:
+ if (sp->state[IDX_IPV6CP] == STATE_OPENED)
+ isr = NETISR_IPV6;
+ do_account++;
+ break;
+#endif
+#ifdef IPX
+ case PPP_IPX:
+ /* IPX IPXCP not implemented yet */
+ if (sp->pp_phase == PHASE_NETWORK)
+ isr = NETISR_IPX;
+ do_account++;
+ break;
+#endif
+ }
+ break;
+ case CISCO_MULTICAST:
+ case CISCO_UNICAST:
+ /* Don't check the control field here (RFC 1547). */
+ if (sp->pp_mode != IFF_CISCO) {
+ if (debug)
+ log(LOG_DEBUG,
+ SPP_FMT "Cisco packet in PPP mode "
+ "<addr=0x%x ctrl=0x%x proto=0x%x>\n",
+ SPP_ARGS(ifp),
+ h->address, h->control, ntohs(h->protocol));
+ goto drop;
+ }
+ switch (ntohs (h->protocol)) {
+ default:
+ ++ifp->if_noproto;
+ goto invalid;
+ case CISCO_KEEPALIVE:
+ sppp_cisco_input (sp, m);
+ m_freem (m);
+ SPPP_UNLOCK(sp);
+ return;
+#ifdef INET
+ case ETHERTYPE_IP:
+ isr = NETISR_IP;
+ do_account++;
+ break;
+#endif
+#ifdef INET6
+ case ETHERTYPE_IPV6:
+ isr = NETISR_IPV6;
+ do_account++;
+ break;
+#endif
+#ifdef IPX
+ case ETHERTYPE_IPX:
+ isr = NETISR_IPX;
+ do_account++;
+ break;
+#endif
+ }
+ break;
+ default: /* Invalid PPP packet. */
+ invalid:
+ if (debug)
+ log(LOG_DEBUG,
+ SPP_FMT "invalid input packet "
+ "<addr=0x%x ctrl=0x%x proto=0x%x>\n",
+ SPP_ARGS(ifp),
+ h->address, h->control, ntohs(h->protocol));
+ goto drop;
+ }
+
+ if (! (ifp->if_flags & IFF_UP) || isr == -1)
+ goto drop;
+
+ SPPP_UNLOCK(sp);
+ /* Check queue. */
+ if (netisr_queue(isr, m)) { /* (0) on success. */
+ if (debug)
+ log(LOG_DEBUG, SPP_FMT "protocol queue overflow\n",
+ SPP_ARGS(ifp));
+ goto drop2;
+ }
+
+ if (do_account)
+ /*
+ * Do only account for network packets, not for control
+ * packets. This is used by some subsystems to detect
+ * idle lines.
+ */
+ sp->pp_last_recv = time_uptime;
+}
+
+static void
+sppp_ifstart_sched(void *dummy)
+{
+ struct sppp *sp = dummy;
+
+ sp->if_start(SP2IFP(sp));
+}
+
+/* if_start () wrapper function. We use it to schedule real if_start () for
+ * execution. We can't call it directly
+ */
+static void
+sppp_ifstart(struct ifnet *ifp)
+{
+ struct sppp *sp = IFP2SP(ifp);
+
+ if (SPPP_LOCK_OWNED(sp)) {
+ if (callout_pending(&sp->ifstart_callout))
+ return;
+ callout_reset(&sp->ifstart_callout, 1, sppp_ifstart_sched,
+ (void *)sp);
+ } else {
+ sp->if_start(ifp);
+ }
+}
+
+/*
+ * Enqueue transmit packet.
+ */
+static int
+sppp_output(struct ifnet *ifp, struct mbuf *m,
+ struct sockaddr *dst, struct route *ro)
+{
+ struct sppp *sp = IFP2SP(ifp);
+ struct ppp_header *h;
+ struct ifqueue *ifq = NULL;
+ int s, error, rv = 0;
+#ifdef INET
+ int ipproto = PPP_IP;
+#endif
+ int debug = ifp->if_flags & IFF_DEBUG;
+
+ s = splimp();
+ SPPP_LOCK(sp);
+
+ if (!(ifp->if_flags & IFF_UP) ||
+ (!(ifp->if_flags & IFF_AUTO) &&
+ !(ifp->if_drv_flags & IFF_DRV_RUNNING))) {
+#ifdef INET6
+ drop:
+#endif
+ m_freem (m);
+ SPPP_UNLOCK(sp);
+ splx (s);
+ return (ENETDOWN);
+ }
+
+ if ((ifp->if_flags & IFF_AUTO) &&
+ !(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
+#ifdef INET6
+ /*
+ * XXX
+ *
+ * Hack to prevent the initialization-time generated
+ * IPv6 multicast packet to erroneously cause a
+ * dialout event in case IPv6 has been
+ * administratively disabled on that interface.
+ */
+ if (dst->sa_family == AF_INET6 &&
+ !(sp->confflags & CONF_ENABLE_IPV6))
+ goto drop;
+#endif
+ /*
+ * Interface is not yet running, but auto-dial. Need
+ * to start LCP for it.
+ */
+ ifp->if_drv_flags |= IFF_DRV_RUNNING;
+ splx(s);
+ lcp.Open(sp);
+ s = splimp();
+ }
+
+#ifdef INET
+ if (dst->sa_family == AF_INET) {
+ /* XXX Check mbuf length here? */
+ struct ip *ip = mtod (m, struct ip*);
+ struct tcphdr *tcp = (struct tcphdr*) ((long*)ip + ip->ip_hl);
+
+ /*
+ * When using dynamic local IP address assignment by using
+ * 0.0.0.0 as a local address, the first TCP session will
+ * not connect because the local TCP checksum is computed
+ * using 0.0.0.0 which will later become our real IP address
+ * so the TCP checksum computed at the remote end will
+ * become invalid. So we
+ * - don't let packets with src ip addr 0 thru
+ * - we flag TCP packets with src ip 0 as an error
+ */
+
+ if(ip->ip_src.s_addr == INADDR_ANY) /* -hm */
+ {
+ m_freem(m);
+ SPPP_UNLOCK(sp);
+ splx(s);
+ if(ip->ip_p == IPPROTO_TCP)
+ return(EADDRNOTAVAIL);
+ else
+ return(0);
+ }
+
+ /*
+ * Put low delay, telnet, rlogin and ftp control packets
+ * in front of the queue or let ALTQ take care.
+ */
+ if (ALTQ_IS_ENABLED(&ifp->if_snd))
+ ;
+ else if (_IF_QFULL(&sp->pp_fastq))
+ ;
+ else if (ip->ip_tos & IPTOS_LOWDELAY)
+ ifq = &sp->pp_fastq;
+ else if (m->m_len < sizeof *ip + sizeof *tcp)
+ ;
+ else if (ip->ip_p != IPPROTO_TCP)
+ ;
+ else if (INTERACTIVE (ntohs (tcp->th_sport)))
+ ifq = &sp->pp_fastq;
+ else if (INTERACTIVE (ntohs (tcp->th_dport)))
+ ifq = &sp->pp_fastq;
+
+ /*
+ * Do IP Header compression
+ */
+ if (sp->pp_mode != IFF_CISCO && sp->pp_mode != PP_FR &&
+ (sp->ipcp.flags & IPCP_VJ) && ip->ip_p == IPPROTO_TCP)
+ switch (sl_compress_tcp(m, ip, sp->pp_comp,
+ sp->ipcp.compress_cid)) {
+ case TYPE_COMPRESSED_TCP:
+ ipproto = PPP_VJ_COMP;
+ break;
+ case TYPE_UNCOMPRESSED_TCP:
+ ipproto = PPP_VJ_UCOMP;
+ break;
+ case TYPE_IP:
+ ipproto = PPP_IP;
+ break;
+ default:
+ m_freem(m);
+ SPPP_UNLOCK(sp);
+ splx(s);
+ return (EINVAL);
+ }
+ }
+#endif
+
+#ifdef INET6
+ if (dst->sa_family == AF_INET6) {
+ /* XXX do something tricky here? */
+ }
+#endif
+
+ if (sp->pp_mode == PP_FR) {
+ /* Add frame relay header. */
+ m = sppp_fr_header (sp, m, dst->sa_family);
+ if (! m)
+ goto nobufs;
+ goto out;
+ }
+
+ /*
+ * Prepend general data packet PPP header. For now, IP only.
+ */
+ M_PREPEND (m, PPP_HEADER_LEN, M_DONTWAIT);
+ if (! m) {
+nobufs: if (debug)
+ log(LOG_DEBUG, SPP_FMT "no memory for transmit header\n",
+ SPP_ARGS(ifp));
+ ++ifp->if_oerrors;
+ SPPP_UNLOCK(sp);
+ splx (s);
+ return (ENOBUFS);
+ }
+ /*
+ * May want to check size of packet
+ * (albeit due to the implementation it's always enough)
+ */
+ h = mtod (m, struct ppp_header*);
+ if (sp->pp_mode == IFF_CISCO) {
+ h->address = CISCO_UNICAST; /* unicast address */
+ h->control = 0;
+ } else {
+ h->address = PPP_ALLSTATIONS; /* broadcast address */
+ h->control = PPP_UI; /* Unnumbered Info */
+ }
+
+ switch (dst->sa_family) {
+#ifdef INET
+ case AF_INET: /* Internet Protocol */
+ if (sp->pp_mode == IFF_CISCO)
+ h->protocol = htons (ETHERTYPE_IP);
+ else {
+ /*
+ * Don't choke with an ENETDOWN early. It's
+ * possible that we just started dialing out,
+ * so don't drop the packet immediately. If
+ * we notice that we run out of buffer space
+ * below, we will however remember that we are
+ * not ready to carry IP packets, and return
+ * ENETDOWN, as opposed to ENOBUFS.
+ */
+ h->protocol = htons(ipproto);
+ if (sp->state[IDX_IPCP] != STATE_OPENED)
+ rv = ENETDOWN;
+ }
+ break;
+#endif
+#ifdef INET6
+ case AF_INET6: /* Internet Protocol */
+ if (sp->pp_mode == IFF_CISCO)
+ h->protocol = htons (ETHERTYPE_IPV6);
+ else {
+ /*
+ * Don't choke with an ENETDOWN early. It's
+ * possible that we just started dialing out,
+ * so don't drop the packet immediately. If
+ * we notice that we run out of buffer space
+ * below, we will however remember that we are
+ * not ready to carry IP packets, and return
+ * ENETDOWN, as opposed to ENOBUFS.
+ */
+ h->protocol = htons(PPP_IPV6);
+ if (sp->state[IDX_IPV6CP] != STATE_OPENED)
+ rv = ENETDOWN;
+ }
+ break;
+#endif
+#ifdef IPX
+ case AF_IPX: /* Novell IPX Protocol */
+ h->protocol = htons (sp->pp_mode == IFF_CISCO ?
+ ETHERTYPE_IPX : PPP_IPX);
+ break;
+#endif
+ default:
+ m_freem (m);
+ ++ifp->if_oerrors;
+ SPPP_UNLOCK(sp);
+ splx (s);
+ return (EAFNOSUPPORT);
+ }
+
+ /*
+ * Queue message on interface, and start output if interface
+ * not yet active.
+ */
+out:
+ if (ifq != NULL)
+ error = !(IF_HANDOFF_ADJ(ifq, m, ifp, 3));
+ else
+ IFQ_HANDOFF_ADJ(ifp, m, 3, error);
+ if (error) {
+ ++ifp->if_oerrors;
+ SPPP_UNLOCK(sp);
+ splx (s);
+ return (rv? rv: ENOBUFS);
+ }
+ SPPP_UNLOCK(sp);
+ splx (s);
+ /*
+ * Unlike in sppp_input(), we can always bump the timestamp
+ * here since sppp_output() is only called on behalf of
+ * network-layer traffic; control-layer traffic is handled
+ * by sppp_cp_send().
+ */
+ sp->pp_last_sent = time_uptime;
+ return (0);
+}
+
+void
+sppp_attach(struct ifnet *ifp)
+{
+ struct sppp *sp = IFP2SP(ifp);
+
+ /* Initialize mtx lock */
+ mtx_init(&sp->mtx, "sppp", MTX_NETWORK_LOCK, MTX_DEF | MTX_RECURSE);
+
+ /* Initialize keepalive handler. */
+ callout_init(&sp->keepalive_callout, CALLOUT_MPSAFE);
+ callout_reset(&sp->keepalive_callout, hz * 10, sppp_keepalive,
+ (void *)sp);
+
+ ifp->if_mtu = PP_MTU;
+ ifp->if_flags = IFF_POINTOPOINT | IFF_MULTICAST;
+ ifp->if_output = sppp_output;
+#if 0
+ sp->pp_flags = PP_KEEPALIVE;
+#endif
+ ifp->if_snd.ifq_maxlen = 32;
+ sp->pp_fastq.ifq_maxlen = 32;
+ sp->pp_cpq.ifq_maxlen = 20;
+ sp->pp_loopcnt = 0;
+ sp->pp_alivecnt = 0;
+ bzero(&sp->pp_seq[0], sizeof(sp->pp_seq));
+ bzero(&sp->pp_rseq[0], sizeof(sp->pp_rseq));
+ sp->pp_phase = PHASE_DEAD;
+ sp->pp_up = sppp_pp_up;
+ sp->pp_down = sppp_pp_down;
+ if(!mtx_initialized(&sp->pp_cpq.ifq_mtx))
+ mtx_init(&sp->pp_cpq.ifq_mtx, "sppp_cpq", NULL, MTX_DEF);
+ if(!mtx_initialized(&sp->pp_fastq.ifq_mtx))
+ mtx_init(&sp->pp_fastq.ifq_mtx, "sppp_fastq", NULL, MTX_DEF);
+ sp->pp_last_recv = sp->pp_last_sent = time_uptime;
+ sp->confflags = 0;
+#ifdef INET
+ sp->confflags |= CONF_ENABLE_VJ;
+#endif
+#ifdef INET6
+ sp->confflags |= CONF_ENABLE_IPV6;
+#endif
+ callout_init(&sp->ifstart_callout, CALLOUT_MPSAFE);
+ sp->if_start = ifp->if_start;
+ ifp->if_start = sppp_ifstart;
+ sp->pp_comp = malloc(sizeof(struct slcompress), M_TEMP, M_WAITOK);
+ sl_compress_init(sp->pp_comp, -1);
+ sppp_lcp_init(sp);
+ sppp_ipcp_init(sp);
+ sppp_ipv6cp_init(sp);
+ sppp_pap_init(sp);
+ sppp_chap_init(sp);
+}
+
+void
+sppp_detach(struct ifnet *ifp)
+{
+ struct sppp *sp = IFP2SP(ifp);
+ int i;
+
+ KASSERT(mtx_initialized(&sp->mtx), ("sppp mutex is not initialized"));
+
+ /* Stop keepalive handler. */
+ if (!callout_drain(&sp->keepalive_callout))
+ callout_stop(&sp->keepalive_callout);
+
+ for (i = 0; i < IDX_COUNT; i++) {
+ if (!callout_drain(&sp->ch[i]))
+ callout_stop(&sp->ch[i]);
+ }
+ if (!callout_drain(&sp->pap_my_to_ch))
+ callout_stop(&sp->pap_my_to_ch);
+ mtx_destroy(&sp->pp_cpq.ifq_mtx);
+ mtx_destroy(&sp->pp_fastq.ifq_mtx);
+ mtx_destroy(&sp->mtx);
+}
+
+/*
+ * Flush the interface output queue.
+ */
+static void
+sppp_flush_unlocked(struct ifnet *ifp)
+{
+ struct sppp *sp = IFP2SP(ifp);
+
+ sppp_qflush ((struct ifqueue *)&SP2IFP(sp)->if_snd);
+ sppp_qflush (&sp->pp_fastq);
+ sppp_qflush (&sp->pp_cpq);
+}
+
+void
+sppp_flush(struct ifnet *ifp)
+{
+ struct sppp *sp = IFP2SP(ifp);
+
+ SPPP_LOCK(sp);
+ sppp_flush_unlocked (ifp);
+ SPPP_UNLOCK(sp);
+}
+
+/*
+ * Check if the output queue is empty.
+ */
+int
+sppp_isempty(struct ifnet *ifp)
+{
+ struct sppp *sp = IFP2SP(ifp);
+ int empty, s;
+
+ s = splimp();
+ SPPP_LOCK(sp);
+ empty = !sp->pp_fastq.ifq_head && !sp->pp_cpq.ifq_head &&
+ !SP2IFP(sp)->if_snd.ifq_head;
+ SPPP_UNLOCK(sp);
+ splx(s);
+ return (empty);
+}
+
+/*
+ * Get next packet to send.
+ */
+struct mbuf *
+sppp_dequeue(struct ifnet *ifp)
+{
+ struct sppp *sp = IFP2SP(ifp);
+ struct mbuf *m;
+ int s;
+
+ s = splimp();
+ SPPP_LOCK(sp);
+ /*
+ * Process only the control protocol queue until we have at
+ * least one NCP open.
+ *
+ * Do always serve all three queues in Cisco mode.
+ */
+ IF_DEQUEUE(&sp->pp_cpq, m);
+ if (m == NULL &&
+ (sppp_ncp_check(sp) || sp->pp_mode == IFF_CISCO ||
+ sp->pp_mode == PP_FR)) {
+ IF_DEQUEUE(&sp->pp_fastq, m);
+ if (m == NULL)
+ IF_DEQUEUE (&SP2IFP(sp)->if_snd, m);
+ }
+ SPPP_UNLOCK(sp);
+ splx(s);
+ return m;
+}
+
+/*
+ * Pick the next packet, do not remove it from the queue.
+ */
+struct mbuf *
+sppp_pick(struct ifnet *ifp)
+{
+ struct sppp *sp = IFP2SP(ifp);
+ struct mbuf *m;
+ int s;
+
+ s = splimp ();
+ SPPP_LOCK(sp);
+
+ m = sp->pp_cpq.ifq_head;
+ if (m == NULL &&
+ (sp->pp_phase == PHASE_NETWORK ||
+ sp->pp_mode == IFF_CISCO ||
+ sp->pp_mode == PP_FR))
+ if ((m = sp->pp_fastq.ifq_head) == NULL)
+ m = SP2IFP(sp)->if_snd.ifq_head;
+ SPPP_UNLOCK(sp);
+ splx (s);
+ return (m);
+}
+
+/*
+ * Process an ioctl request. Called on low priority level.
+ */
+int
+sppp_ioctl(struct ifnet *ifp, IOCTL_CMD_T cmd, void *data)
+{
+ struct ifreq *ifr = (struct ifreq*) data;
+ struct sppp *sp = IFP2SP(ifp);
+ int s, rv, going_up, going_down, newmode;
+
+ s = splimp();
+ SPPP_LOCK(sp);
+ rv = 0;
+ switch (cmd) {
+ case SIOCAIFADDR:
+ case SIOCSIFDSTADDR:
+ break;
+
+ case SIOCSIFADDR:
+ /* set the interface "up" when assigning an IP address */
+ ifp->if_flags |= IFF_UP;
+ /* FALLTHROUGH */
+
+ case SIOCSIFFLAGS:
+ going_up = ifp->if_flags & IFF_UP &&
+ (ifp->if_drv_flags & IFF_DRV_RUNNING) == 0;
+ going_down = (ifp->if_flags & IFF_UP) == 0 &&
+ ifp->if_drv_flags & IFF_DRV_RUNNING;
+
+ newmode = ifp->if_flags & IFF_PASSIVE;
+ if (!newmode)
+ newmode = ifp->if_flags & IFF_AUTO;
+ if (!newmode)
+ newmode = ifp->if_flags & IFF_CISCO;
+ ifp->if_flags &= ~(IFF_PASSIVE | IFF_AUTO | IFF_CISCO);
+ ifp->if_flags |= newmode;
+
+ if (!newmode)
+ newmode = sp->pp_flags & PP_FR;
+
+ if (newmode != sp->pp_mode) {
+ going_down = 1;
+ if (!going_up)
+ going_up = ifp->if_drv_flags & IFF_DRV_RUNNING;
+ }
+
+ if (going_down) {
+ if (sp->pp_mode != IFF_CISCO &&
+ sp->pp_mode != PP_FR)
+ lcp.Close(sp);
+ else if (sp->pp_tlf)
+ (sp->pp_tlf)(sp);
+ sppp_flush_unlocked(ifp);
+ ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
+ sp->pp_mode = newmode;
+ }
+
+ if (going_up) {
+ if (sp->pp_mode != IFF_CISCO &&
+ sp->pp_mode != PP_FR)
+ lcp.Close(sp);
+ sp->pp_mode = newmode;
+ if (sp->pp_mode == 0) {
+ ifp->if_drv_flags |= IFF_DRV_RUNNING;
+ lcp.Open(sp);
+ }
+ if ((sp->pp_mode == IFF_CISCO) ||
+ (sp->pp_mode == PP_FR)) {
+ if (sp->pp_tls)
+ (sp->pp_tls)(sp);
+ ifp->if_drv_flags |= IFF_DRV_RUNNING;
+ }
+ }
+
+ break;
+
+#ifdef SIOCSIFMTU
+#ifndef ifr_mtu
+#define ifr_mtu ifr_metric
+#endif
+ case SIOCSIFMTU:
+ if (ifr->ifr_mtu < 128 || ifr->ifr_mtu > sp->lcp.their_mru)
+ return (EINVAL);
+ ifp->if_mtu = ifr->ifr_mtu;
+ break;
+#endif
+#ifdef SLIOCSETMTU
+ case SLIOCSETMTU:
+ if (*(short*)data < 128 || *(short*)data > sp->lcp.their_mru)
+ return (EINVAL);
+ ifp->if_mtu = *(short*)data;
+ break;
+#endif
+#ifdef SIOCGIFMTU
+ case SIOCGIFMTU:
+ ifr->ifr_mtu = ifp->if_mtu;
+ break;
+#endif
+#ifdef SLIOCGETMTU
+ case SLIOCGETMTU:
+ *(short*)data = ifp->if_mtu;
+ break;
+#endif
+ case SIOCADDMULTI:
+ case SIOCDELMULTI:
+ break;
+
+ case SIOCGIFGENERIC:
+ case SIOCSIFGENERIC:
+ rv = sppp_params(sp, cmd, data);
+ break;
+
+ default:
+ rv = ENOTTY;
+ }
+ SPPP_UNLOCK(sp);
+ splx(s);
+ return rv;
+}
+
+/*
+ * Cisco framing implementation.
+ */
+
+/*
+ * Handle incoming Cisco keepalive protocol packets.
+ */
+static void
+sppp_cisco_input(struct sppp *sp, struct mbuf *m)
+{
+ STDDCL;
+ struct cisco_packet *h;
+ u_long me, mymask;
+
+ if (m->m_pkthdr.len < CISCO_PACKET_LEN) {
+ if (debug)
+ log(LOG_DEBUG,
+ SPP_FMT "cisco invalid packet length: %d bytes\n",
+ SPP_ARGS(ifp), m->m_pkthdr.len);
+ return;
+ }
+ h = mtod (m, struct cisco_packet*);
+ if (debug)
+ log(LOG_DEBUG,
+ SPP_FMT "cisco input: %d bytes "
+ "<0x%lx 0x%lx 0x%lx 0x%x 0x%x-0x%x>\n",
+ SPP_ARGS(ifp), m->m_pkthdr.len,
+ (u_long)ntohl (h->type), (u_long)h->par1, (u_long)h->par2, (u_int)h->rel,
+ (u_int)h->time0, (u_int)h->time1);
+ switch (ntohl (h->type)) {
+ default:
+ if (debug)
+ log(-1, SPP_FMT "cisco unknown packet type: 0x%lx\n",
+ SPP_ARGS(ifp), (u_long)ntohl (h->type));
+ break;
+ case CISCO_ADDR_REPLY:
+ /* Reply on address request, ignore */
+ break;
+ case CISCO_KEEPALIVE_REQ:
+ sp->pp_alivecnt = 0;
+ sp->pp_rseq[IDX_LCP] = ntohl (h->par1);
+ if (sp->pp_seq[IDX_LCP] == sp->pp_rseq[IDX_LCP]) {
+ /* Local and remote sequence numbers are equal.
+ * Probably, the line is in loopback mode. */
+ if (sp->pp_loopcnt >= MAXALIVECNT) {
+ printf (SPP_FMT "loopback\n",
+ SPP_ARGS(ifp));
+ sp->pp_loopcnt = 0;
+ if (ifp->if_flags & IFF_UP) {
+ if_down (ifp);
+ sppp_qflush (&sp->pp_cpq);
+ }
+ }
+ ++sp->pp_loopcnt;
+
+ /* Generate new local sequence number */
+ sp->pp_seq[IDX_LCP] = random();
+ break;
+ }
+ sp->pp_loopcnt = 0;
+ if (! (ifp->if_flags & IFF_UP) &&
+ (ifp->if_drv_flags & IFF_DRV_RUNNING)) {
+ if_up(ifp);
+ printf (SPP_FMT "up\n", SPP_ARGS(ifp));
+ }
+ break;
+ case CISCO_ADDR_REQ:
+ sppp_get_ip_addrs(sp, &me, 0, &mymask);
+ if (me != 0L)
+ sppp_cisco_send(sp, CISCO_ADDR_REPLY, me, mymask);
+ break;
+ }
+}
+
+/*
+ * Send Cisco keepalive packet.
+ */
+static void
+sppp_cisco_send(struct sppp *sp, int type, long par1, long par2)
+{
+ STDDCL;
+ struct ppp_header *h;
+ struct cisco_packet *ch;
+ struct mbuf *m;
+ struct timeval tv;
+
+ getmicrouptime(&tv);
+
+ MGETHDR (m, M_DONTWAIT, MT_DATA);
+ if (! m)
+ return;
+ m->m_pkthdr.len = m->m_len = PPP_HEADER_LEN + CISCO_PACKET_LEN;
+ m->m_pkthdr.rcvif = 0;
+
+ h = mtod (m, struct ppp_header*);
+ h->address = CISCO_MULTICAST;
+ h->control = 0;
+ h->protocol = htons (CISCO_KEEPALIVE);
+
+ ch = (struct cisco_packet*) (h + 1);
+ ch->type = htonl (type);
+ ch->par1 = htonl (par1);
+ ch->par2 = htonl (par2);
+ ch->rel = -1;
+
+ ch->time0 = htons ((u_short) (tv.tv_sec >> 16));
+ ch->time1 = htons ((u_short) tv.tv_sec);
+
+ if (debug)
+ log(LOG_DEBUG,
+ SPP_FMT "cisco output: <0x%lx 0x%lx 0x%lx 0x%x 0x%x-0x%x>\n",
+ SPP_ARGS(ifp), (u_long)ntohl (ch->type), (u_long)ch->par1,
+ (u_long)ch->par2, (u_int)ch->rel, (u_int)ch->time0, (u_int)ch->time1);
+
+ if (! IF_HANDOFF_ADJ(&sp->pp_cpq, m, ifp, 3))
+ ifp->if_oerrors++;
+}
+
+/*
+ * PPP protocol implementation.
+ */
+
+/*
+ * Send PPP control protocol packet.
+ */
+static void
+sppp_cp_send(struct sppp *sp, u_short proto, u_char type,
+ u_char ident, u_short len, void *data)
+{
+ STDDCL;
+ struct ppp_header *h;
+ struct lcp_header *lh;
+ struct mbuf *m;
+
+ if (len > MHLEN - PPP_HEADER_LEN - LCP_HEADER_LEN)
+ len = MHLEN - PPP_HEADER_LEN - LCP_HEADER_LEN;
+ MGETHDR (m, M_DONTWAIT, MT_DATA);
+ if (! m)
+ return;
+ m->m_pkthdr.len = m->m_len = PPP_HEADER_LEN + LCP_HEADER_LEN + len;
+ m->m_pkthdr.rcvif = 0;
+
+ h = mtod (m, struct ppp_header*);
+ h->address = PPP_ALLSTATIONS; /* broadcast address */
+ h->control = PPP_UI; /* Unnumbered Info */
+ h->protocol = htons (proto); /* Link Control Protocol */
+
+ lh = (struct lcp_header*) (h + 1);
+ lh->type = type;
+ lh->ident = ident;
+ lh->len = htons (LCP_HEADER_LEN + len);
+ if (len)
+ bcopy (data, lh+1, len);
+
+ if (debug) {
+ log(LOG_DEBUG, SPP_FMT "%s output <%s id=0x%x len=%d",
+ SPP_ARGS(ifp),
+ sppp_proto_name(proto),
+ sppp_cp_type_name (lh->type), lh->ident,
+ ntohs (lh->len));
+ sppp_print_bytes ((u_char*) (lh+1), len);
+ log(-1, ">\n");
+ }
+ if (! IF_HANDOFF_ADJ(&sp->pp_cpq, m, ifp, 3))
+ ifp->if_oerrors++;
+}
+
+/*
+ * Handle incoming PPP control protocol packets.
+ */
+static void
+sppp_cp_input(const struct cp *cp, struct sppp *sp, struct mbuf *m)
+{
+ STDDCL;
+ struct lcp_header *h;
+ int len = m->m_pkthdr.len;
+ int rv;
+ u_char *p;
+
+ if (len < 4) {
+ if (debug)
+ log(LOG_DEBUG,
+ SPP_FMT "%s invalid packet length: %d bytes\n",
+ SPP_ARGS(ifp), cp->name, len);
+ return;
+ }
+ h = mtod (m, struct lcp_header*);
+ if (debug) {
+ log(LOG_DEBUG,
+ SPP_FMT "%s input(%s): <%s id=0x%x len=%d",
+ SPP_ARGS(ifp), cp->name,
+ sppp_state_name(sp->state[cp->protoidx]),
+ sppp_cp_type_name (h->type), h->ident, ntohs (h->len));
+ sppp_print_bytes ((u_char*) (h+1), len-4);
+ log(-1, ">\n");
+ }
+ if (len > ntohs (h->len))
+ len = ntohs (h->len);
+ p = (u_char *)(h + 1);
+ switch (h->type) {
+ case CONF_REQ:
+ if (len < 4) {
+ if (debug)
+ log(-1, SPP_FMT "%s invalid conf-req length %d\n",
+ SPP_ARGS(ifp), cp->name,
+ len);
+ ++ifp->if_ierrors;
+ break;
+ }
+ /* handle states where RCR doesn't get a SCA/SCN */
+ switch (sp->state[cp->protoidx]) {
+ case STATE_CLOSING:
+ case STATE_STOPPING:
+ return;
+ case STATE_CLOSED:
+ sppp_cp_send(sp, cp->proto, TERM_ACK, h->ident,
+ 0, 0);
+ return;
+ }
+ rv = (cp->RCR)(sp, h, len);
+ switch (sp->state[cp->protoidx]) {
+ case STATE_OPENED:
+ (cp->tld)(sp);
+ (cp->scr)(sp);
+ /* FALLTHROUGH */
+ case STATE_ACK_SENT:
+ case STATE_REQ_SENT:
+ /*
+ * sppp_cp_change_state() have the side effect of
+ * restarting the timeouts. We want to avoid that
+ * if the state don't change, otherwise we won't
+ * ever timeout and resend a configuration request
+ * that got lost.
+ */
+ if (sp->state[cp->protoidx] == (rv ? STATE_ACK_SENT:
+ STATE_REQ_SENT))
+ break;
+ sppp_cp_change_state(cp, sp, rv?
+ STATE_ACK_SENT: STATE_REQ_SENT);
+ break;
+ case STATE_STOPPED:
+ sp->rst_counter[cp->protoidx] = sp->lcp.max_configure;
+ (cp->scr)(sp);
+ sppp_cp_change_state(cp, sp, rv?
+ STATE_ACK_SENT: STATE_REQ_SENT);
+ break;
+ case STATE_ACK_RCVD:
+ if (rv) {
+ sppp_cp_change_state(cp, sp, STATE_OPENED);
+ if (debug)
+ log(LOG_DEBUG, SPP_FMT "%s tlu\n",
+ SPP_ARGS(ifp),
+ cp->name);
+ (cp->tlu)(sp);
+ } else
+ sppp_cp_change_state(cp, sp, STATE_ACK_RCVD);
+ break;
+ default:
+ printf(SPP_FMT "%s illegal %s in state %s\n",
+ SPP_ARGS(ifp), cp->name,
+ sppp_cp_type_name(h->type),
+ sppp_state_name(sp->state[cp->protoidx]));
+ ++ifp->if_ierrors;
+ }
+ break;
+ case CONF_ACK:
+ if (h->ident != sp->confid[cp->protoidx]) {
+ if (debug)
+ log(-1, SPP_FMT "%s id mismatch 0x%x != 0x%x\n",
+ SPP_ARGS(ifp), cp->name,
+ h->ident, sp->confid[cp->protoidx]);
+ ++ifp->if_ierrors;
+ break;
+ }
+ switch (sp->state[cp->protoidx]) {
+ case STATE_CLOSED:
+ case STATE_STOPPED:
+ sppp_cp_send(sp, cp->proto, TERM_ACK, h->ident, 0, 0);
+ break;
+ case STATE_CLOSING:
+ case STATE_STOPPING:
+ break;
+ case STATE_REQ_SENT:
+ sp->rst_counter[cp->protoidx] = sp->lcp.max_configure;
+ sppp_cp_change_state(cp, sp, STATE_ACK_RCVD);
+ break;
+ case STATE_OPENED:
+ (cp->tld)(sp);
+ /* FALLTHROUGH */
+ case STATE_ACK_RCVD:
+ (cp->scr)(sp);
+ sppp_cp_change_state(cp, sp, STATE_REQ_SENT);
+ break;
+ case STATE_ACK_SENT:
+ sp->rst_counter[cp->protoidx] = sp->lcp.max_configure;
+ sppp_cp_change_state(cp, sp, STATE_OPENED);
+ if (debug)
+ log(LOG_DEBUG, SPP_FMT "%s tlu\n",
+ SPP_ARGS(ifp), cp->name);
+ (cp->tlu)(sp);
+ break;
+ default:
+ printf(SPP_FMT "%s illegal %s in state %s\n",
+ SPP_ARGS(ifp), cp->name,
+ sppp_cp_type_name(h->type),
+ sppp_state_name(sp->state[cp->protoidx]));
+ ++ifp->if_ierrors;
+ }
+ break;
+ case CONF_NAK:
+ case CONF_REJ:
+ if (h->ident != sp->confid[cp->protoidx]) {
+ if (debug)
+ log(-1, SPP_FMT "%s id mismatch 0x%x != 0x%x\n",
+ SPP_ARGS(ifp), cp->name,
+ h->ident, sp->confid[cp->protoidx]);
+ ++ifp->if_ierrors;
+ break;
+ }
+ if (h->type == CONF_NAK)
+ (cp->RCN_nak)(sp, h, len);
+ else /* CONF_REJ */
+ (cp->RCN_rej)(sp, h, len);
+
+ switch (sp->state[cp->protoidx]) {
+ case STATE_CLOSED:
+ case STATE_STOPPED:
+ sppp_cp_send(sp, cp->proto, TERM_ACK, h->ident, 0, 0);
+ break;
+ case STATE_REQ_SENT:
+ case STATE_ACK_SENT:
+ sp->rst_counter[cp->protoidx] = sp->lcp.max_configure;
+ /*
+ * Slow things down a bit if we think we might be
+ * in loopback. Depend on the timeout to send the
+ * next configuration request.
+ */
+ if (sp->pp_loopcnt)
+ break;
+ (cp->scr)(sp);
+ break;
+ case STATE_OPENED:
+ (cp->tld)(sp);
+ /* FALLTHROUGH */
+ case STATE_ACK_RCVD:
+ sppp_cp_change_state(cp, sp, STATE_REQ_SENT);
+ (cp->scr)(sp);
+ break;
+ case STATE_CLOSING:
+ case STATE_STOPPING:
+ break;
+ default:
+ printf(SPP_FMT "%s illegal %s in state %s\n",
+ SPP_ARGS(ifp), cp->name,
+ sppp_cp_type_name(h->type),
+ sppp_state_name(sp->state[cp->protoidx]));
+ ++ifp->if_ierrors;
+ }
+ break;
+
+ case TERM_REQ:
+ switch (sp->state[cp->protoidx]) {
+ case STATE_ACK_RCVD:
+ case STATE_ACK_SENT:
+ sppp_cp_change_state(cp, sp, STATE_REQ_SENT);
+ /* FALLTHROUGH */
+ case STATE_CLOSED:
+ case STATE_STOPPED:
+ case STATE_CLOSING:
+ case STATE_STOPPING:
+ case STATE_REQ_SENT:
+ sta:
+ /* Send Terminate-Ack packet. */
+ if (debug)
+ log(LOG_DEBUG, SPP_FMT "%s send terminate-ack\n",
+ SPP_ARGS(ifp), cp->name);
+ sppp_cp_send(sp, cp->proto, TERM_ACK, h->ident, 0, 0);
+ break;
+ case STATE_OPENED:
+ (cp->tld)(sp);
+ sp->rst_counter[cp->protoidx] = 0;
+ sppp_cp_change_state(cp, sp, STATE_STOPPING);
+ goto sta;
+ break;
+ default:
+ printf(SPP_FMT "%s illegal %s in state %s\n",
+ SPP_ARGS(ifp), cp->name,
+ sppp_cp_type_name(h->type),
+ sppp_state_name(sp->state[cp->protoidx]));
+ ++ifp->if_ierrors;
+ }
+ break;
+ case TERM_ACK:
+ switch (sp->state[cp->protoidx]) {
+ case STATE_CLOSED:
+ case STATE_STOPPED:
+ case STATE_REQ_SENT:
+ case STATE_ACK_SENT:
+ break;
+ case STATE_CLOSING:
+ sppp_cp_change_state(cp, sp, STATE_CLOSED);
+ (cp->tlf)(sp);
+ break;
+ case STATE_STOPPING:
+ sppp_cp_change_state(cp, sp, STATE_STOPPED);
+ (cp->tlf)(sp);
+ break;
+ case STATE_ACK_RCVD:
+ sppp_cp_change_state(cp, sp, STATE_REQ_SENT);
+ break;
+ case STATE_OPENED:
+ (cp->tld)(sp);
+ (cp->scr)(sp);
+ sppp_cp_change_state(cp, sp, STATE_ACK_RCVD);
+ break;
+ default:
+ printf(SPP_FMT "%s illegal %s in state %s\n",
+ SPP_ARGS(ifp), cp->name,
+ sppp_cp_type_name(h->type),
+ sppp_state_name(sp->state[cp->protoidx]));
+ ++ifp->if_ierrors;
+ }
+ break;
+ case CODE_REJ:
+ /* XXX catastrophic rejects (RXJ-) aren't handled yet. */
+ log(LOG_INFO,
+ SPP_FMT "%s: ignoring RXJ (%s) for proto 0x%x, "
+ "danger will robinson\n",
+ SPP_ARGS(ifp), cp->name,
+ sppp_cp_type_name(h->type), ntohs(*((u_short *)p)));
+ switch (sp->state[cp->protoidx]) {
+ case STATE_CLOSED:
+ case STATE_STOPPED:
+ case STATE_REQ_SENT:
+ case STATE_ACK_SENT:
+ case STATE_CLOSING:
+ case STATE_STOPPING:
+ case STATE_OPENED:
+ break;
+ case STATE_ACK_RCVD:
+ sppp_cp_change_state(cp, sp, STATE_REQ_SENT);
+ break;
+ default:
+ printf(SPP_FMT "%s illegal %s in state %s\n",
+ SPP_ARGS(ifp), cp->name,
+ sppp_cp_type_name(h->type),
+ sppp_state_name(sp->state[cp->protoidx]));
+ ++ifp->if_ierrors;
+ }
+ break;
+ case PROTO_REJ:
+ {
+ int catastrophic;
+ const struct cp *upper;
+ int i;
+ u_int16_t proto;
+
+ catastrophic = 0;
+ upper = NULL;
+ proto = ntohs(*((u_int16_t *)p));
+ for (i = 0; i < IDX_COUNT; i++) {
+ if (cps[i]->proto == proto) {
+ upper = cps[i];
+ break;
+ }
+ }
+ if (upper == NULL)
+ catastrophic++;
+
+ if (catastrophic || debug)
+ log(catastrophic? LOG_INFO: LOG_DEBUG,
+ SPP_FMT "%s: RXJ%c (%s) for proto 0x%x (%s/%s)\n",
+ SPP_ARGS(ifp), cp->name, catastrophic ? '-' : '+',
+ sppp_cp_type_name(h->type), proto,
+ upper ? upper->name : "unknown",
+ upper ? sppp_state_name(sp->state[upper->protoidx]) : "?");
+
+ /*
+ * if we got RXJ+ against conf-req, the peer does not implement
+ * this particular protocol type. terminate the protocol.
+ */
+ if (upper && !catastrophic) {
+ if (sp->state[upper->protoidx] == STATE_REQ_SENT) {
+ upper->Close(sp);
+ break;
+ }
+ }
+
+ /* XXX catastrophic rejects (RXJ-) aren't handled yet. */
+ switch (sp->state[cp->protoidx]) {
+ case STATE_CLOSED:
+ case STATE_STOPPED:
+ case STATE_REQ_SENT:
+ case STATE_ACK_SENT:
+ case STATE_CLOSING:
+ case STATE_STOPPING:
+ case STATE_OPENED:
+ break;
+ case STATE_ACK_RCVD:
+ sppp_cp_change_state(cp, sp, STATE_REQ_SENT);
+ break;
+ default:
+ printf(SPP_FMT "%s illegal %s in state %s\n",
+ SPP_ARGS(ifp), cp->name,
+ sppp_cp_type_name(h->type),
+ sppp_state_name(sp->state[cp->protoidx]));
+ ++ifp->if_ierrors;
+ }
+ break;
+ }
+ case DISC_REQ:
+ if (cp->proto != PPP_LCP)
+ goto illegal;
+ /* Discard the packet. */
+ break;
+ case ECHO_REQ:
+ if (cp->proto != PPP_LCP)
+ goto illegal;
+ if (sp->state[cp->protoidx] != STATE_OPENED) {
+ if (debug)
+ log(-1, SPP_FMT "lcp echo req but lcp closed\n",
+ SPP_ARGS(ifp));
+ ++ifp->if_ierrors;
+ break;
+ }
+ if (len < 8) {
+ if (debug)
+ log(-1, SPP_FMT "invalid lcp echo request "
+ "packet length: %d bytes\n",
+ SPP_ARGS(ifp), len);
+ break;
+ }
+ if ((sp->lcp.opts & (1 << LCP_OPT_MAGIC)) &&
+ ntohl (*(long*)(h+1)) == sp->lcp.magic) {
+ /* Line loopback mode detected. */
+ printf(SPP_FMT "loopback\n", SPP_ARGS(ifp));
+ sp->pp_loopcnt = MAXALIVECNT * 5;
+ if_down (ifp);
+ sppp_qflush (&sp->pp_cpq);
+
+ /* Shut down the PPP link. */
+ /* XXX */
+ lcp.Down(sp);
+ lcp.Up(sp);
+ break;
+ }
+ *(long*)(h+1) = htonl (sp->lcp.magic);
+ if (debug)
+ log(-1, SPP_FMT "got lcp echo req, sending echo rep\n",
+ SPP_ARGS(ifp));
+ sppp_cp_send (sp, PPP_LCP, ECHO_REPLY, h->ident, len-4, h+1);
+ break;
+ case ECHO_REPLY:
+ if (cp->proto != PPP_LCP)
+ goto illegal;
+ if (h->ident != sp->lcp.echoid) {
+ ++ifp->if_ierrors;
+ break;
+ }
+ if (len < 8) {
+ if (debug)
+ log(-1, SPP_FMT "lcp invalid echo reply "
+ "packet length: %d bytes\n",
+ SPP_ARGS(ifp), len);
+ break;
+ }
+ if (debug)
+ log(-1, SPP_FMT "lcp got echo rep\n",
+ SPP_ARGS(ifp));
+ if (!(sp->lcp.opts & (1 << LCP_OPT_MAGIC)) ||
+ ntohl (*(long*)(h+1)) != sp->lcp.magic)
+ sp->pp_alivecnt = 0;
+ break;
+ default:
+ /* Unknown packet type -- send Code-Reject packet. */
+ illegal:
+ if (debug)
+ log(-1, SPP_FMT "%s send code-rej for 0x%x\n",
+ SPP_ARGS(ifp), cp->name, h->type);
+ sppp_cp_send(sp, cp->proto, CODE_REJ,
+ ++sp->pp_seq[cp->protoidx], m->m_pkthdr.len, h);
+ ++ifp->if_ierrors;
+ }
+}
+
+
+/*
+ * The generic part of all Up/Down/Open/Close/TO event handlers.
+ * Basically, the state transition handling in the automaton.
+ */
+static void
+sppp_up_event(const struct cp *cp, struct sppp *sp)
+{
+ STDDCL;
+
+ if (debug)
+ log(LOG_DEBUG, SPP_FMT "%s up(%s)\n",
+ SPP_ARGS(ifp), cp->name,
+ sppp_state_name(sp->state[cp->protoidx]));
+
+ switch (sp->state[cp->protoidx]) {
+ case STATE_INITIAL:
+ sppp_cp_change_state(cp, sp, STATE_CLOSED);
+ break;
+ case STATE_STARTING:
+ sp->rst_counter[cp->protoidx] = sp->lcp.max_configure;
+ (cp->scr)(sp);
+ sppp_cp_change_state(cp, sp, STATE_REQ_SENT);
+ break;
+ default:
+ printf(SPP_FMT "%s illegal up in state %s\n",
+ SPP_ARGS(ifp), cp->name,
+ sppp_state_name(sp->state[cp->protoidx]));
+ }
+}
+
+static void
+sppp_down_event(const struct cp *cp, struct sppp *sp)
+{
+ STDDCL;
+
+ if (debug)
+ log(LOG_DEBUG, SPP_FMT "%s down(%s)\n",
+ SPP_ARGS(ifp), cp->name,
+ sppp_state_name(sp->state[cp->protoidx]));
+
+ switch (sp->state[cp->protoidx]) {
+ case STATE_CLOSED:
+ case STATE_CLOSING:
+ sppp_cp_change_state(cp, sp, STATE_INITIAL);
+ break;
+ case STATE_STOPPED:
+ sppp_cp_change_state(cp, sp, STATE_STARTING);
+ (cp->tls)(sp);
+ break;
+ case STATE_STOPPING:
+ case STATE_REQ_SENT:
+ case STATE_ACK_RCVD:
+ case STATE_ACK_SENT:
+ sppp_cp_change_state(cp, sp, STATE_STARTING);
+ break;
+ case STATE_OPENED:
+ (cp->tld)(sp);
+ sppp_cp_change_state(cp, sp, STATE_STARTING);
+ break;
+ default:
+ printf(SPP_FMT "%s illegal down in state %s\n",
+ SPP_ARGS(ifp), cp->name,
+ sppp_state_name(sp->state[cp->protoidx]));
+ }
+}
+
+
+static void
+sppp_open_event(const struct cp *cp, struct sppp *sp)
+{
+ STDDCL;
+
+ if (debug)
+ log(LOG_DEBUG, SPP_FMT "%s open(%s)\n",
+ SPP_ARGS(ifp), cp->name,
+ sppp_state_name(sp->state[cp->protoidx]));
+
+ switch (sp->state[cp->protoidx]) {
+ case STATE_INITIAL:
+ sppp_cp_change_state(cp, sp, STATE_STARTING);
+ (cp->tls)(sp);
+ break;
+ case STATE_STARTING:
+ break;
+ case STATE_CLOSED:
+ sp->rst_counter[cp->protoidx] = sp->lcp.max_configure;
+ (cp->scr)(sp);
+ sppp_cp_change_state(cp, sp, STATE_REQ_SENT);
+ break;
+ case STATE_STOPPED:
+ /*
+ * Try escaping stopped state. This seems to bite
+ * people occasionally, in particular for IPCP,
+ * presumably following previous IPCP negotiation
+ * aborts. Somehow, we must have missed a Down event
+ * which would have caused a transition into starting
+ * state, so as a bandaid we force the Down event now.
+ * This effectively implements (something like the)
+ * `restart' option mentioned in the state transition
+ * table of RFC 1661.
+ */
+ sppp_cp_change_state(cp, sp, STATE_STARTING);
+ (cp->tls)(sp);
+ break;
+ case STATE_STOPPING:
+ case STATE_REQ_SENT:
+ case STATE_ACK_RCVD:
+ case STATE_ACK_SENT:
+ case STATE_OPENED:
+ break;
+ case STATE_CLOSING:
+ sppp_cp_change_state(cp, sp, STATE_STOPPING);
+ break;
+ }
+}
+
+
+static void
+sppp_close_event(const struct cp *cp, struct sppp *sp)
+{
+ STDDCL;
+
+ if (debug)
+ log(LOG_DEBUG, SPP_FMT "%s close(%s)\n",
+ SPP_ARGS(ifp), cp->name,
+ sppp_state_name(sp->state[cp->protoidx]));
+
+ switch (sp->state[cp->protoidx]) {
+ case STATE_INITIAL:
+ case STATE_CLOSED:
+ case STATE_CLOSING:
+ break;
+ case STATE_STARTING:
+ sppp_cp_change_state(cp, sp, STATE_INITIAL);
+ (cp->tlf)(sp);
+ break;
+ case STATE_STOPPED:
+ sppp_cp_change_state(cp, sp, STATE_CLOSED);
+ break;
+ case STATE_STOPPING:
+ sppp_cp_change_state(cp, sp, STATE_CLOSING);
+ break;
+ case STATE_OPENED:
+ (cp->tld)(sp);
+ /* FALLTHROUGH */
+ case STATE_REQ_SENT:
+ case STATE_ACK_RCVD:
+ case STATE_ACK_SENT:
+ sp->rst_counter[cp->protoidx] = sp->lcp.max_terminate;
+ sppp_cp_send(sp, cp->proto, TERM_REQ,
+ ++sp->pp_seq[cp->protoidx], 0, 0);
+ sppp_cp_change_state(cp, sp, STATE_CLOSING);
+ break;
+ }
+}
+
+static void
+sppp_to_event(const struct cp *cp, struct sppp *sp)
+{
+ STDDCL;
+ int s;
+
+ s = splimp();
+ SPPP_LOCK(sp);
+ if (debug)
+ log(LOG_DEBUG, SPP_FMT "%s TO(%s) rst_counter = %d\n",
+ SPP_ARGS(ifp), cp->name,
+ sppp_state_name(sp->state[cp->protoidx]),
+ sp->rst_counter[cp->protoidx]);
+
+ if (--sp->rst_counter[cp->protoidx] < 0)
+ /* TO- event */
+ switch (sp->state[cp->protoidx]) {
+ case STATE_CLOSING:
+ sppp_cp_change_state(cp, sp, STATE_CLOSED);
+ (cp->tlf)(sp);
+ break;
+ case STATE_STOPPING:
+ sppp_cp_change_state(cp, sp, STATE_STOPPED);
+ (cp->tlf)(sp);
+ break;
+ case STATE_REQ_SENT:
+ case STATE_ACK_RCVD:
+ case STATE_ACK_SENT:
+ sppp_cp_change_state(cp, sp, STATE_STOPPED);
+ (cp->tlf)(sp);
+ break;
+ }
+ else
+ /* TO+ event */
+ switch (sp->state[cp->protoidx]) {
+ case STATE_CLOSING:
+ case STATE_STOPPING:
+ sppp_cp_send(sp, cp->proto, TERM_REQ,
+ ++sp->pp_seq[cp->protoidx], 0, 0);
+ callout_reset(&sp->ch[cp->protoidx], sp->lcp.timeout,
+ cp->TO, (void *)sp);
+ break;
+ case STATE_REQ_SENT:
+ case STATE_ACK_RCVD:
+ (cp->scr)(sp);
+ /* sppp_cp_change_state() will restart the timer */
+ sppp_cp_change_state(cp, sp, STATE_REQ_SENT);
+ break;
+ case STATE_ACK_SENT:
+ (cp->scr)(sp);
+ callout_reset(&sp->ch[cp->protoidx], sp->lcp.timeout,
+ cp->TO, (void *)sp);
+ break;
+ }
+
+ SPPP_UNLOCK(sp);
+ splx(s);
+}
+
+/*
+ * Change the state of a control protocol in the state automaton.
+ * Takes care of starting/stopping the restart timer.
+ */
+static void
+sppp_cp_change_state(const struct cp *cp, struct sppp *sp, int newstate)
+{
+ sp->state[cp->protoidx] = newstate;
+
+ callout_stop (&sp->ch[cp->protoidx]);
+
+ switch (newstate) {
+ case STATE_INITIAL:
+ case STATE_STARTING:
+ case STATE_CLOSED:
+ case STATE_STOPPED:
+ case STATE_OPENED:
+ break;
+ case STATE_CLOSING:
+ case STATE_STOPPING:
+ case STATE_REQ_SENT:
+ case STATE_ACK_RCVD:
+ case STATE_ACK_SENT:
+ callout_reset(&sp->ch[cp->protoidx], sp->lcp.timeout,
+ cp->TO, (void *)sp);
+ break;
+ }
+}
+
+/*
+ *--------------------------------------------------------------------------*
+ * *
+ * The LCP implementation. *
+ * *
+ *--------------------------------------------------------------------------*
+ */
+static void
+sppp_pp_up(struct sppp *sp)
+{
+ SPPP_LOCK(sp);
+ lcp.Up(sp);
+ SPPP_UNLOCK(sp);
+}
+
+static void
+sppp_pp_down(struct sppp *sp)
+{
+ SPPP_LOCK(sp);
+ lcp.Down(sp);
+ SPPP_UNLOCK(sp);
+}
+
+static void
+sppp_lcp_init(struct sppp *sp)
+{
+ sp->lcp.opts = (1 << LCP_OPT_MAGIC);
+ sp->lcp.magic = 0;
+ sp->state[IDX_LCP] = STATE_INITIAL;
+ sp->fail_counter[IDX_LCP] = 0;
+ sp->pp_seq[IDX_LCP] = 0;
+ sp->pp_rseq[IDX_LCP] = 0;
+ sp->lcp.protos = 0;
+ sp->lcp.mru = sp->lcp.their_mru = PP_MTU;
+
+ /* Note that these values are relevant for all control protocols */
+ sp->lcp.timeout = 3 * hz;
+ sp->lcp.max_terminate = 2;
+ sp->lcp.max_configure = 10;
+ sp->lcp.max_failure = 10;
+ callout_init(&sp->ch[IDX_LCP], CALLOUT_MPSAFE);
+}
+
+static void
+sppp_lcp_up(struct sppp *sp)
+{
+ STDDCL;
+
+ sp->pp_alivecnt = 0;
+ sp->lcp.opts = (1 << LCP_OPT_MAGIC);
+ sp->lcp.magic = 0;
+ sp->lcp.protos = 0;
+ sp->lcp.mru = sp->lcp.their_mru = PP_MTU;
+ /*
+ * If we are authenticator, negotiate LCP_AUTH
+ */
+ if (sp->hisauth.proto != 0)
+ sp->lcp.opts |= (1 << LCP_OPT_AUTH_PROTO);
+ else
+ sp->lcp.opts &= ~(1 << LCP_OPT_AUTH_PROTO);
+ sp->pp_flags &= ~PP_NEEDAUTH;
+ /*
+ * If this interface is passive or dial-on-demand, and we are
+ * still in Initial state, it means we've got an incoming
+ * call. Activate the interface.
+ */
+ if ((ifp->if_flags & (IFF_AUTO | IFF_PASSIVE)) != 0) {
+ if (debug)
+ log(LOG_DEBUG,
+ SPP_FMT "Up event", SPP_ARGS(ifp));
+ ifp->if_drv_flags |= IFF_DRV_RUNNING;
+ if (sp->state[IDX_LCP] == STATE_INITIAL) {
+ if (debug)
+ log(-1, "(incoming call)\n");
+ sp->pp_flags |= PP_CALLIN;
+ lcp.Open(sp);
+ } else if (debug)
+ log(-1, "\n");
+ } else if ((ifp->if_flags & (IFF_AUTO | IFF_PASSIVE)) == 0 &&
+ (sp->state[IDX_LCP] == STATE_INITIAL)) {
+ ifp->if_drv_flags |= IFF_DRV_RUNNING;
+ lcp.Open(sp);
+ }
+
+ sppp_up_event(&lcp, sp);
+}
+
+static void
+sppp_lcp_down(struct sppp *sp)
+{
+ STDDCL;
+
+ sppp_down_event(&lcp, sp);
+
+ /*
+ * If this is neither a dial-on-demand nor a passive
+ * interface, simulate an ``ifconfig down'' action, so the
+ * administrator can force a redial by another ``ifconfig
+ * up''. XXX For leased line operation, should we immediately
+ * try to reopen the connection here?
+ */
+ if ((ifp->if_flags & (IFF_AUTO | IFF_PASSIVE)) == 0) {
+ log(LOG_INFO,
+ SPP_FMT "Down event, taking interface down.\n",
+ SPP_ARGS(ifp));
+ if_down(ifp);
+ } else {
+ if (debug)
+ log(LOG_DEBUG,
+ SPP_FMT "Down event (carrier loss)\n",
+ SPP_ARGS(ifp));
+ sp->pp_flags &= ~PP_CALLIN;
+ if (sp->state[IDX_LCP] != STATE_INITIAL)
+ lcp.Close(sp);
+ ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
+ }
+}
+
+static void
+sppp_lcp_open(struct sppp *sp)
+{
+ sppp_open_event(&lcp, sp);
+}
+
+static void
+sppp_lcp_close(struct sppp *sp)
+{
+ sppp_close_event(&lcp, sp);
+}
+
+static void
+sppp_lcp_TO(void *cookie)
+{
+ sppp_to_event(&lcp, (struct sppp *)cookie);
+}
+
+/*
+ * Analyze a configure request. Return true if it was agreeable, and
+ * caused action sca, false if it has been rejected or nak'ed, and
+ * caused action scn. (The return value is used to make the state
+ * transition decision in the state automaton.)
+ */
+static int
+sppp_lcp_RCR(struct sppp *sp, struct lcp_header *h, int len)
+{
+ STDDCL;
+ u_char *buf, *r, *p;
+ int origlen, rlen;
+ u_long nmagic;
+ u_short authproto;
+
+ len -= 4;
+ origlen = len;
+ buf = r = malloc (len, M_TEMP, M_NOWAIT);
+ if (! buf)
+ return (0);
+
+ if (debug)
+ log(LOG_DEBUG, SPP_FMT "lcp parse opts: ",
+ SPP_ARGS(ifp));
+
+ /* pass 1: check for things that need to be rejected */
+ p = (void*) (h+1);
+ for (rlen=0; len >= 2 && p[1] >= 2 && len >= p[1];
+ len-=p[1], p+=p[1]) {
+ if (debug)
+ log(-1, " %s ", sppp_lcp_opt_name(*p));
+ switch (*p) {
+ case LCP_OPT_MAGIC:
+ /* Magic number. */
+ if (len >= 6 && p[1] == 6)
+ continue;
+ if (debug)
+ log(-1, "[invalid] ");
+ break;
+ case LCP_OPT_ASYNC_MAP:
+ /* Async control character map. */
+ if (len >= 6 && p[1] == 6)
+ continue;
+ if (debug)
+ log(-1, "[invalid] ");
+ break;
+ case LCP_OPT_MRU:
+ /* Maximum receive unit. */
+ if (len >= 4 && p[1] == 4)
+ continue;
+ if (debug)
+ log(-1, "[invalid] ");
+ break;
+ case LCP_OPT_AUTH_PROTO:
+ if (len < 4) {
+ if (debug)
+ log(-1, "[invalid] ");
+ break;
+ }
+ authproto = (p[2] << 8) + p[3];
+ if (authproto == PPP_CHAP && p[1] != 5) {
+ if (debug)
+ log(-1, "[invalid chap len] ");
+ break;
+ }
+ if (sp->myauth.proto == 0) {
+ /* we are not configured to do auth */
+ if (debug)
+ log(-1, "[not configured] ");
+ break;
+ }
+ /*
+ * Remote want us to authenticate, remember this,
+ * so we stay in PHASE_AUTHENTICATE after LCP got
+ * up.
+ */
+ sp->pp_flags |= PP_NEEDAUTH;
+ continue;
+ default:
+ /* Others not supported. */
+ if (debug)
+ log(-1, "[rej] ");
+ break;
+ }
+ /* Add the option to rejected list. */
+ bcopy (p, r, p[1]);
+ r += p[1];
+ rlen += p[1];
+ }
+ if (rlen) {
+ if (debug)
+ log(-1, " send conf-rej\n");
+ sppp_cp_send (sp, PPP_LCP, CONF_REJ, h->ident, rlen, buf);
+ return 0;
+ } else if (debug)
+ log(-1, "\n");
+
+ /*
+ * pass 2: check for option values that are unacceptable and
+ * thus require to be nak'ed.
+ */
+ if (debug)
+ log(LOG_DEBUG, SPP_FMT "lcp parse opt values: ",
+ SPP_ARGS(ifp));
+
+ p = (void*) (h+1);
+ len = origlen;
+ for (rlen=0; len >= 2 && p[1] >= 2 && len >= p[1];
+ len-=p[1], p+=p[1]) {
+ if (debug)
+ log(-1, " %s ", sppp_lcp_opt_name(*p));
+ switch (*p) {
+ case LCP_OPT_MAGIC:
+ /* Magic number -- extract. */
+ nmagic = (u_long)p[2] << 24 |
+ (u_long)p[3] << 16 | p[4] << 8 | p[5];
+ if (nmagic != sp->lcp.magic) {
+ sp->pp_loopcnt = 0;
+ if (debug)
+ log(-1, "0x%lx ", nmagic);
+ continue;
+ }
+ if (debug && sp->pp_loopcnt < MAXALIVECNT*5)
+ log(-1, "[glitch] ");
+ ++sp->pp_loopcnt;
+ /*
+ * We negate our magic here, and NAK it. If
+ * we see it later in an NAK packet, we
+ * suggest a new one.
+ */
+ nmagic = ~sp->lcp.magic;
+ /* Gonna NAK it. */
+ p[2] = nmagic >> 24;
+ p[3] = nmagic >> 16;
+ p[4] = nmagic >> 8;
+ p[5] = nmagic;
+ break;
+
+ case LCP_OPT_ASYNC_MAP:
+ /*
+ * Async control character map -- just ignore it.
+ *
+ * Quote from RFC 1662, chapter 6:
+ * To enable this functionality, synchronous PPP
+ * implementations MUST always respond to the
+ * Async-Control-Character-Map Configuration
+ * Option with the LCP Configure-Ack. However,
+ * acceptance of the Configuration Option does
+ * not imply that the synchronous implementation
+ * will do any ACCM mapping. Instead, all such
+ * octet mapping will be performed by the
+ * asynchronous-to-synchronous converter.
+ */
+ continue;
+
+ case LCP_OPT_MRU:
+ /*
+ * Maximum receive unit. Always agreeable,
+ * but ignored by now.
+ */
+ sp->lcp.their_mru = p[2] * 256 + p[3];
+ if (debug)
+ log(-1, "%lu ", sp->lcp.their_mru);
+ continue;
+
+ case LCP_OPT_AUTH_PROTO:
+ authproto = (p[2] << 8) + p[3];
+ if (sp->myauth.proto != authproto) {
+ /* not agreed, nak */
+ if (debug)
+ log(-1, "[mine %s != his %s] ",
+ sppp_proto_name(sp->hisauth.proto),
+ sppp_proto_name(authproto));
+ p[2] = sp->myauth.proto >> 8;
+ p[3] = sp->myauth.proto;
+ break;
+ }
+ if (authproto == PPP_CHAP && p[4] != CHAP_MD5) {
+ if (debug)
+ log(-1, "[chap not MD5] ");
+ p[4] = CHAP_MD5;
+ break;
+ }
+ continue;
+ }
+ /* Add the option to nak'ed list. */
+ bcopy (p, r, p[1]);
+ r += p[1];
+ rlen += p[1];
+ }
+ if (rlen) {
+ /*
+ * Local and remote magics equal -- loopback?
+ */
+ if (sp->pp_loopcnt >= MAXALIVECNT*5) {
+ if (sp->pp_loopcnt == MAXALIVECNT*5)
+ printf (SPP_FMT "loopback\n",
+ SPP_ARGS(ifp));
+ if (ifp->if_flags & IFF_UP) {
+ if_down(ifp);
+ sppp_qflush(&sp->pp_cpq);
+ /* XXX ? */
+ lcp.Down(sp);
+ lcp.Up(sp);
+ }
+ } else if (!sp->pp_loopcnt &&
+ ++sp->fail_counter[IDX_LCP] >= sp->lcp.max_failure) {
+ if (debug)
+ log(-1, " max_failure (%d) exceeded, "
+ "send conf-rej\n",
+ sp->lcp.max_failure);
+ sppp_cp_send(sp, PPP_LCP, CONF_REJ, h->ident, rlen, buf);
+ } else {
+ if (debug)
+ log(-1, " send conf-nak\n");
+ sppp_cp_send (sp, PPP_LCP, CONF_NAK, h->ident, rlen, buf);
+ }
+ } else {
+ if (debug)
+ log(-1, " send conf-ack\n");
+ sp->fail_counter[IDX_LCP] = 0;
+ sp->pp_loopcnt = 0;
+ sppp_cp_send (sp, PPP_LCP, CONF_ACK,
+ h->ident, origlen, h+1);
+ }
+
+ free (buf, M_TEMP);
+ return (rlen == 0);
+}
+
+/*
+ * Analyze the LCP Configure-Reject option list, and adjust our
+ * negotiation.
+ */
+static void
+sppp_lcp_RCN_rej(struct sppp *sp, struct lcp_header *h, int len)
+{
+ STDDCL;
+ u_char *buf, *p;
+
+ len -= 4;
+ buf = malloc (len, M_TEMP, M_NOWAIT);
+ if (!buf)
+ return;
+
+ if (debug)
+ log(LOG_DEBUG, SPP_FMT "lcp rej opts: ",
+ SPP_ARGS(ifp));
+
+ p = (void*) (h+1);
+ for (; len >= 2 && p[1] >= 2 && len >= p[1];
+ len -= p[1], p += p[1]) {
+ if (debug)
+ log(-1, " %s ", sppp_lcp_opt_name(*p));
+ switch (*p) {
+ case LCP_OPT_MAGIC:
+ /* Magic number -- can't use it, use 0 */
+ sp->lcp.opts &= ~(1 << LCP_OPT_MAGIC);
+ sp->lcp.magic = 0;
+ break;
+ case LCP_OPT_MRU:
+ /*
+ * Should not be rejected anyway, since we only
+ * negotiate a MRU if explicitly requested by
+ * peer.
+ */
+ sp->lcp.opts &= ~(1 << LCP_OPT_MRU);
+ break;
+ case LCP_OPT_AUTH_PROTO:
+ /*
+ * Peer doesn't want to authenticate himself,
+ * deny unless this is a dialout call, and
+ * AUTHFLAG_NOCALLOUT is set.
+ */
+ if ((sp->pp_flags & PP_CALLIN) == 0 &&
+ (sp->hisauth.flags & AUTHFLAG_NOCALLOUT) != 0) {
+ if (debug)
+ log(-1, "[don't insist on auth "
+ "for callout]");
+ sp->lcp.opts &= ~(1 << LCP_OPT_AUTH_PROTO);
+ break;
+ }
+ if (debug)
+ log(-1, "[access denied]\n");
+ lcp.Close(sp);
+ break;
+ }
+ }
+ if (debug)
+ log(-1, "\n");
+ free (buf, M_TEMP);
+ return;
+}
+
+/*
+ * Analyze the LCP Configure-NAK option list, and adjust our
+ * negotiation.
+ */
+static void
+sppp_lcp_RCN_nak(struct sppp *sp, struct lcp_header *h, int len)
+{
+ STDDCL;
+ u_char *buf, *p;
+ u_long magic;
+
+ len -= 4;
+ buf = malloc (len, M_TEMP, M_NOWAIT);
+ if (!buf)
+ return;
+
+ if (debug)
+ log(LOG_DEBUG, SPP_FMT "lcp nak opts: ",
+ SPP_ARGS(ifp));
+
+ p = (void*) (h+1);
+ for (; len >= 2 && p[1] >= 2 && len >= p[1];
+ len -= p[1], p += p[1]) {
+ if (debug)
+ log(-1, " %s ", sppp_lcp_opt_name(*p));
+ switch (*p) {
+ case LCP_OPT_MAGIC:
+ /* Magic number -- renegotiate */
+ if ((sp->lcp.opts & (1 << LCP_OPT_MAGIC)) &&
+ len >= 6 && p[1] == 6) {
+ magic = (u_long)p[2] << 24 |
+ (u_long)p[3] << 16 | p[4] << 8 | p[5];
+ /*
+ * If the remote magic is our negated one,
+ * this looks like a loopback problem.
+ * Suggest a new magic to make sure.
+ */
+ if (magic == ~sp->lcp.magic) {
+ if (debug)
+ log(-1, "magic glitch ");
+ sp->lcp.magic = random();
+ } else {
+ sp->lcp.magic = magic;
+ if (debug)
+ log(-1, "%lu ", magic);
+ }
+ }
+ break;
+ case LCP_OPT_MRU:
+ /*
+ * Peer wants to advise us to negotiate an MRU.
+ * Agree on it if it's reasonable, or use
+ * default otherwise.
+ */
+ if (len >= 4 && p[1] == 4) {
+ u_int mru = p[2] * 256 + p[3];
+ if (debug)
+ log(-1, "%d ", mru);
+ if (mru < PP_MTU || mru > PP_MAX_MRU)
+ mru = PP_MTU;
+ sp->lcp.mru = mru;
+ sp->lcp.opts |= (1 << LCP_OPT_MRU);
+ }
+ break;
+ case LCP_OPT_AUTH_PROTO:
+ /*
+ * Peer doesn't like our authentication method,
+ * deny.
+ */
+ if (debug)
+ log(-1, "[access denied]\n");
+ lcp.Close(sp);
+ break;
+ }
+ }
+ if (debug)
+ log(-1, "\n");
+ free (buf, M_TEMP);
+ return;
+}
+
+static void
+sppp_lcp_tlu(struct sppp *sp)
+{
+ STDDCL;
+ int i;
+ u_long mask;
+
+ /* XXX ? */
+ if (! (ifp->if_flags & IFF_UP) &&
+ (ifp->if_drv_flags & IFF_DRV_RUNNING)) {
+ /* Coming out of loopback mode. */
+ if_up(ifp);
+ printf (SPP_FMT "up\n", SPP_ARGS(ifp));
+ }
+
+ for (i = 0; i < IDX_COUNT; i++)
+ if ((cps[i])->flags & CP_QUAL)
+ (cps[i])->Open(sp);
+
+ if ((sp->lcp.opts & (1 << LCP_OPT_AUTH_PROTO)) != 0 ||
+ (sp->pp_flags & PP_NEEDAUTH) != 0)
+ sp->pp_phase = PHASE_AUTHENTICATE;
+ else
+ sp->pp_phase = PHASE_NETWORK;
+
+ if (debug)
+ log(LOG_DEBUG, SPP_FMT "phase %s\n", SPP_ARGS(ifp),
+ sppp_phase_name(sp->pp_phase));
+
+ /*
+ * Open all authentication protocols. This is even required
+ * if we already proceeded to network phase, since it might be
+ * that remote wants us to authenticate, so we might have to
+ * send a PAP request. Undesired authentication protocols
+ * don't do anything when they get an Open event.
+ */
+ for (i = 0; i < IDX_COUNT; i++)
+ if ((cps[i])->flags & CP_AUTH)
+ (cps[i])->Open(sp);
+
+ if (sp->pp_phase == PHASE_NETWORK) {
+ /* Notify all NCPs. */
+ for (i = 0; i < IDX_COUNT; i++)
+ if (((cps[i])->flags & CP_NCP) &&
+ /*
+ * XXX
+ * Hack to administratively disable IPv6 if
+ * not desired. Perhaps we should have another
+ * flag for this, but right now, we can make
+ * all struct cp's read/only.
+ */
+ (cps[i] != &ipv6cp ||
+ (sp->confflags & CONF_ENABLE_IPV6)))
+ (cps[i])->Open(sp);
+ }
+
+ /* Send Up events to all started protos. */
+ for (i = 0, mask = 1; i < IDX_COUNT; i++, mask <<= 1)
+ if ((sp->lcp.protos & mask) && ((cps[i])->flags & CP_LCP) == 0)
+ (cps[i])->Up(sp);
+
+ /* notify low-level driver of state change */
+ if (sp->pp_chg)
+ sp->pp_chg(sp, (int)sp->pp_phase);
+
+ if (sp->pp_phase == PHASE_NETWORK)
+ /* if no NCP is starting, close down */
+ sppp_lcp_check_and_close(sp);
+}
+
+static void
+sppp_lcp_tld(struct sppp *sp)
+{
+ STDDCL;
+ int i;
+ u_long mask;
+
+ sp->pp_phase = PHASE_TERMINATE;
+
+ if (debug)
+ log(LOG_DEBUG, SPP_FMT "phase %s\n", SPP_ARGS(ifp),
+ sppp_phase_name(sp->pp_phase));
+
+ /*
+ * Take upper layers down. We send the Down event first and
+ * the Close second to prevent the upper layers from sending
+ * ``a flurry of terminate-request packets'', as the RFC
+ * describes it.
+ */
+ for (i = 0, mask = 1; i < IDX_COUNT; i++, mask <<= 1)
+ if ((sp->lcp.protos & mask) && ((cps[i])->flags & CP_LCP) == 0) {
+ (cps[i])->Down(sp);
+ (cps[i])->Close(sp);
+ }
+}
+
+static void
+sppp_lcp_tls(struct sppp *sp)
+{
+ STDDCL;
+
+ sp->pp_phase = PHASE_ESTABLISH;
+
+ if (debug)
+ log(LOG_DEBUG, SPP_FMT "phase %s\n", SPP_ARGS(ifp),
+ sppp_phase_name(sp->pp_phase));
+
+ /* Notify lower layer if desired. */
+ if (sp->pp_tls)
+ (sp->pp_tls)(sp);
+ else
+ (sp->pp_up)(sp);
+}
+
+static void
+sppp_lcp_tlf(struct sppp *sp)
+{
+ STDDCL;
+
+ sp->pp_phase = PHASE_DEAD;
+ if (debug)
+ log(LOG_DEBUG, SPP_FMT "phase %s\n", SPP_ARGS(ifp),
+ sppp_phase_name(sp->pp_phase));
+
+ /* Notify lower layer if desired. */
+ if (sp->pp_tlf)
+ (sp->pp_tlf)(sp);
+ else
+ (sp->pp_down)(sp);
+}
+
+static void
+sppp_lcp_scr(struct sppp *sp)
+{
+ char opt[6 /* magicnum */ + 4 /* mru */ + 5 /* chap */];
+ int i = 0;
+ u_short authproto;
+
+ if (sp->lcp.opts & (1 << LCP_OPT_MAGIC)) {
+ if (! sp->lcp.magic)
+ sp->lcp.magic = random();
+ opt[i++] = LCP_OPT_MAGIC;
+ opt[i++] = 6;
+ opt[i++] = sp->lcp.magic >> 24;
+ opt[i++] = sp->lcp.magic >> 16;
+ opt[i++] = sp->lcp.magic >> 8;
+ opt[i++] = sp->lcp.magic;
+ }
+
+ if (sp->lcp.opts & (1 << LCP_OPT_MRU)) {
+ opt[i++] = LCP_OPT_MRU;
+ opt[i++] = 4;
+ opt[i++] = sp->lcp.mru >> 8;
+ opt[i++] = sp->lcp.mru;
+ }
+
+ if (sp->lcp.opts & (1 << LCP_OPT_AUTH_PROTO)) {
+ authproto = sp->hisauth.proto;
+ opt[i++] = LCP_OPT_AUTH_PROTO;
+ opt[i++] = authproto == PPP_CHAP? 5: 4;
+ opt[i++] = authproto >> 8;
+ opt[i++] = authproto;
+ if (authproto == PPP_CHAP)
+ opt[i++] = CHAP_MD5;
+ }
+
+ sp->confid[IDX_LCP] = ++sp->pp_seq[IDX_LCP];
+ sppp_cp_send (sp, PPP_LCP, CONF_REQ, sp->confid[IDX_LCP], i, &opt);
+}
+
+/*
+ * Check the open NCPs, return true if at least one NCP is open.
+ */
+static int
+sppp_ncp_check(struct sppp *sp)
+{
+ int i, mask;
+
+ for (i = 0, mask = 1; i < IDX_COUNT; i++, mask <<= 1)
+ if ((sp->lcp.protos & mask) && (cps[i])->flags & CP_NCP)
+ return 1;
+ return 0;
+}
+
+/*
+ * Re-check the open NCPs and see if we should terminate the link.
+ * Called by the NCPs during their tlf action handling.
+ */
+static void
+sppp_lcp_check_and_close(struct sppp *sp)
+{
+
+ if (sp->pp_phase < PHASE_NETWORK)
+ /* don't bother, we are already going down */
+ return;
+
+ if (sppp_ncp_check(sp))
+ return;
+
+ lcp.Close(sp);
+}
+
+/*
+ *--------------------------------------------------------------------------*
+ * *
+ * The IPCP implementation. *
+ * *
+ *--------------------------------------------------------------------------*
+ */
+
+#ifdef INET
+static void
+sppp_ipcp_init(struct sppp *sp)
+{
+ sp->ipcp.opts = 0;
+ sp->ipcp.flags = 0;
+ sp->state[IDX_IPCP] = STATE_INITIAL;
+ sp->fail_counter[IDX_IPCP] = 0;
+ sp->pp_seq[IDX_IPCP] = 0;
+ sp->pp_rseq[IDX_IPCP] = 0;
+ callout_init(&sp->ch[IDX_IPCP], CALLOUT_MPSAFE);
+}
+
+static void
+sppp_ipcp_up(struct sppp *sp)
+{
+ sppp_up_event(&ipcp, sp);
+}
+
+static void
+sppp_ipcp_down(struct sppp *sp)
+{
+ sppp_down_event(&ipcp, sp);
+}
+
+static void
+sppp_ipcp_open(struct sppp *sp)
+{
+ STDDCL;
+ u_long myaddr, hisaddr;
+
+ sp->ipcp.flags &= ~(IPCP_HISADDR_SEEN | IPCP_MYADDR_SEEN |
+ IPCP_MYADDR_DYN | IPCP_VJ);
+ sp->ipcp.opts = 0;
+
+ sppp_get_ip_addrs(sp, &myaddr, &hisaddr, 0);
+ /*
+ * If we don't have his address, this probably means our
+ * interface doesn't want to talk IP at all. (This could
+ * be the case if somebody wants to speak only IPX, for
+ * example.) Don't open IPCP in this case.
+ */
+ if (hisaddr == 0L) {
+ /* XXX this message should go away */
+ if (debug)
+ log(LOG_DEBUG, SPP_FMT "ipcp_open(): no IP interface\n",
+ SPP_ARGS(ifp));
+ return;
+ }
+ if (myaddr == 0L) {
+ /*
+ * I don't have an assigned address, so i need to
+ * negotiate my address.
+ */
+ sp->ipcp.flags |= IPCP_MYADDR_DYN;
+ sp->ipcp.opts |= (1 << IPCP_OPT_ADDRESS);
+ } else
+ sp->ipcp.flags |= IPCP_MYADDR_SEEN;
+ if (sp->confflags & CONF_ENABLE_VJ) {
+ sp->ipcp.opts |= (1 << IPCP_OPT_COMPRESSION);
+ sp->ipcp.max_state = MAX_STATES - 1;
+ sp->ipcp.compress_cid = 1;
+ }
+ sppp_open_event(&ipcp, sp);
+}
+
+static void
+sppp_ipcp_close(struct sppp *sp)
+{
+ sppp_close_event(&ipcp, sp);
+ if (sp->ipcp.flags & IPCP_MYADDR_DYN)
+ /*
+ * My address was dynamic, clear it again.
+ */
+ sppp_set_ip_addr(sp, 0L);
+}
+
+static void
+sppp_ipcp_TO(void *cookie)
+{
+ sppp_to_event(&ipcp, (struct sppp *)cookie);
+}
+
+/*
+ * Analyze a configure request. Return true if it was agreeable, and
+ * caused action sca, false if it has been rejected or nak'ed, and
+ * caused action scn. (The return value is used to make the state
+ * transition decision in the state automaton.)
+ */
+static int
+sppp_ipcp_RCR(struct sppp *sp, struct lcp_header *h, int len)
+{
+ u_char *buf, *r, *p;
+ struct ifnet *ifp = SP2IFP(sp);
+ int rlen, origlen, debug = ifp->if_flags & IFF_DEBUG;
+ u_long hisaddr, desiredaddr;
+ int gotmyaddr = 0;
+ int desiredcomp;
+
+ len -= 4;
+ origlen = len;
+ /*
+ * Make sure to allocate a buf that can at least hold a
+ * conf-nak with an `address' option. We might need it below.
+ */
+ buf = r = malloc ((len < 6? 6: len), M_TEMP, M_NOWAIT);
+ if (! buf)
+ return (0);
+
+ /* pass 1: see if we can recognize them */
+ if (debug)
+ log(LOG_DEBUG, SPP_FMT "ipcp parse opts: ",
+ SPP_ARGS(ifp));
+ p = (void*) (h+1);
+ for (rlen=0; len >= 2 && p[1] >= 2 && len >= p[1];
+ len-=p[1], p+=p[1]) {
+ if (debug)
+ log(-1, " %s ", sppp_ipcp_opt_name(*p));
+ switch (*p) {
+ case IPCP_OPT_COMPRESSION:
+ if (!(sp->confflags & CONF_ENABLE_VJ)) {
+ /* VJ compression administratively disabled */
+ if (debug)
+ log(-1, "[locally disabled] ");
+ break;
+ }
+ /*
+ * In theory, we should only conf-rej an
+ * option that is shorter than RFC 1618
+ * requires (i.e. < 4), and should conf-nak
+ * anything else that is not VJ. However,
+ * since our algorithm always uses the
+ * original option to NAK it with new values,
+ * things would become more complicated. In
+ * pratice, the only commonly implemented IP
+ * compression option is VJ anyway, so the
+ * difference is negligible.
+ */
+ if (len >= 6 && p[1] == 6) {
+ /*
+ * correctly formed compression option
+ * that could be VJ compression
+ */
+ continue;
+ }
+ if (debug)
+ log(-1,
+ "optlen %d [invalid/unsupported] ",
+ p[1]);
+ break;
+ case IPCP_OPT_ADDRESS:
+ if (len >= 6 && p[1] == 6) {
+ /* correctly formed address option */
+ continue;
+ }
+ if (debug)
+ log(-1, "[invalid] ");
+ break;
+ default:
+ /* Others not supported. */
+ if (debug)
+ log(-1, "[rej] ");
+ break;
+ }
+ /* Add the option to rejected list. */
+ bcopy (p, r, p[1]);
+ r += p[1];
+ rlen += p[1];
+ }
+ if (rlen) {
+ if (debug)
+ log(-1, " send conf-rej\n");
+ sppp_cp_send (sp, PPP_IPCP, CONF_REJ, h->ident, rlen, buf);
+ return 0;
+ } else if (debug)
+ log(-1, "\n");
+
+ /* pass 2: parse option values */
+ sppp_get_ip_addrs(sp, 0, &hisaddr, 0);
+ if (debug)
+ log(LOG_DEBUG, SPP_FMT "ipcp parse opt values: ",
+ SPP_ARGS(ifp));
+ p = (void*) (h+1);
+ len = origlen;
+ for (rlen=0; len >= 2 && p[1] >= 2 && len >= p[1];
+ len-=p[1], p+=p[1]) {
+ if (debug)
+ log(-1, " %s ", sppp_ipcp_opt_name(*p));
+ switch (*p) {
+ case IPCP_OPT_COMPRESSION:
+ desiredcomp = p[2] << 8 | p[3];
+ /* We only support VJ */
+ if (desiredcomp == IPCP_COMP_VJ) {
+ if (debug)
+ log(-1, "VJ [ack] ");
+ sp->ipcp.flags |= IPCP_VJ;
+ sl_compress_init(sp->pp_comp, p[4]);
+ sp->ipcp.max_state = p[4];
+ sp->ipcp.compress_cid = p[5];
+ continue;
+ }
+ if (debug)
+ log(-1,
+ "compproto %#04x [not supported] ",
+ desiredcomp);
+ p[2] = IPCP_COMP_VJ >> 8;
+ p[3] = IPCP_COMP_VJ;
+ p[4] = sp->ipcp.max_state;
+ p[5] = sp->ipcp.compress_cid;
+ break;
+ case IPCP_OPT_ADDRESS:
+ /* This is the address he wants in his end */
+ desiredaddr = p[2] << 24 | p[3] << 16 |
+ p[4] << 8 | p[5];
+ if (desiredaddr == hisaddr ||
+ (hisaddr >= 1 && hisaddr <= 254 && desiredaddr != 0)) {
+ /*
+ * Peer's address is same as our value,
+ * or we have set it to 0.0.0.* to
+ * indicate that we do not really care,
+ * this is agreeable. Gonna conf-ack
+ * it.
+ */
+ if (debug)
+ log(-1, "%s [ack] ",
+ sppp_dotted_quad(hisaddr));
+ /* record that we've seen it already */
+ sp->ipcp.flags |= IPCP_HISADDR_SEEN;
+ continue;
+ }
+ /*
+ * The address wasn't agreeable. This is either
+ * he sent us 0.0.0.0, asking to assign him an
+ * address, or he send us another address not
+ * matching our value. Either case, we gonna
+ * conf-nak it with our value.
+ * XXX: we should "rej" if hisaddr == 0
+ */
+ if (debug) {
+ if (desiredaddr == 0)
+ log(-1, "[addr requested] ");
+ else
+ log(-1, "%s [not agreed] ",
+ sppp_dotted_quad(desiredaddr));
+
+ }
+ p[2] = hisaddr >> 24;
+ p[3] = hisaddr >> 16;
+ p[4] = hisaddr >> 8;
+ p[5] = hisaddr;
+ break;
+ }
+ /* Add the option to nak'ed list. */
+ bcopy (p, r, p[1]);
+ r += p[1];
+ rlen += p[1];
+ }
+
+ /*
+ * If we are about to conf-ack the request, but haven't seen
+ * his address so far, gonna conf-nak it instead, with the
+ * `address' option present and our idea of his address being
+ * filled in there, to request negotiation of both addresses.
+ *
+ * XXX This can result in an endless req - nak loop if peer
+ * doesn't want to send us his address. Q: What should we do
+ * about it? XXX A: implement the max-failure counter.
+ */
+ if (rlen == 0 && !(sp->ipcp.flags & IPCP_HISADDR_SEEN) && !gotmyaddr) {
+ buf[0] = IPCP_OPT_ADDRESS;
+ buf[1] = 6;
+ buf[2] = hisaddr >> 24;
+ buf[3] = hisaddr >> 16;
+ buf[4] = hisaddr >> 8;
+ buf[5] = hisaddr;
+ rlen = 6;
+ if (debug)
+ log(-1, "still need hisaddr ");
+ }
+
+ if (rlen) {
+ if (debug)
+ log(-1, " send conf-nak\n");
+ sppp_cp_send (sp, PPP_IPCP, CONF_NAK, h->ident, rlen, buf);
+ } else {
+ if (debug)
+ log(-1, " send conf-ack\n");
+ sppp_cp_send (sp, PPP_IPCP, CONF_ACK,
+ h->ident, origlen, h+1);
+ }
+
+ free (buf, M_TEMP);
+ return (rlen == 0);
+}
+
+/*
+ * Analyze the IPCP Configure-Reject option list, and adjust our
+ * negotiation.
+ */
+static void
+sppp_ipcp_RCN_rej(struct sppp *sp, struct lcp_header *h, int len)
+{
+ u_char *buf, *p;
+ struct ifnet *ifp = SP2IFP(sp);
+ int debug = ifp->if_flags & IFF_DEBUG;
+
+ len -= 4;
+ buf = malloc (len, M_TEMP, M_NOWAIT);
+ if (!buf)
+ return;
+
+ if (debug)
+ log(LOG_DEBUG, SPP_FMT "ipcp rej opts: ",
+ SPP_ARGS(ifp));
+
+ p = (void*) (h+1);
+ for (; len >= 2 && p[1] >= 2 && len >= p[1];
+ len -= p[1], p += p[1]) {
+ if (debug)
+ log(-1, " %s ", sppp_ipcp_opt_name(*p));
+ switch (*p) {
+ case IPCP_OPT_COMPRESSION:
+ sp->ipcp.opts &= ~(1 << IPCP_OPT_COMPRESSION);
+ break;
+ case IPCP_OPT_ADDRESS:
+ /*
+ * Peer doesn't grok address option. This is
+ * bad. XXX Should we better give up here?
+ * XXX We could try old "addresses" option...
+ */
+ sp->ipcp.opts &= ~(1 << IPCP_OPT_ADDRESS);
+ break;
+ }
+ }
+ if (debug)
+ log(-1, "\n");
+ free (buf, M_TEMP);
+ return;
+}
+
+/*
+ * Analyze the IPCP Configure-NAK option list, and adjust our
+ * negotiation.
+ */
+static void
+sppp_ipcp_RCN_nak(struct sppp *sp, struct lcp_header *h, int len)
+{
+ u_char *buf, *p;
+ struct ifnet *ifp = SP2IFP(sp);
+ int debug = ifp->if_flags & IFF_DEBUG;
+ int desiredcomp;
+ u_long wantaddr;
+
+ len -= 4;
+ buf = malloc (len, M_TEMP, M_NOWAIT);
+ if (!buf)
+ return;
+
+ if (debug)
+ log(LOG_DEBUG, SPP_FMT "ipcp nak opts: ",
+ SPP_ARGS(ifp));
+
+ p = (void*) (h+1);
+ for (; len >= 2 && p[1] >= 2 && len >= p[1];
+ len -= p[1], p += p[1]) {
+ if (debug)
+ log(-1, " %s ", sppp_ipcp_opt_name(*p));
+ switch (*p) {
+ case IPCP_OPT_COMPRESSION:
+ if (len >= 6 && p[1] == 6) {
+ desiredcomp = p[2] << 8 | p[3];
+ if (debug)
+ log(-1, "[wantcomp %#04x] ",
+ desiredcomp);
+ if (desiredcomp == IPCP_COMP_VJ) {
+ sl_compress_init(sp->pp_comp, p[4]);
+ sp->ipcp.max_state = p[4];
+ sp->ipcp.compress_cid = p[5];
+ if (debug)
+ log(-1, "[agree] ");
+ } else
+ sp->ipcp.opts &=
+ ~(1 << IPCP_OPT_COMPRESSION);
+ }
+ break;
+ case IPCP_OPT_ADDRESS:
+ /*
+ * Peer doesn't like our local IP address. See
+ * if we can do something for him. We'll drop
+ * him our address then.
+ */
+ if (len >= 6 && p[1] == 6) {
+ wantaddr = p[2] << 24 | p[3] << 16 |
+ p[4] << 8 | p[5];
+ sp->ipcp.opts |= (1 << IPCP_OPT_ADDRESS);
+ if (debug)
+ log(-1, "[wantaddr %s] ",
+ sppp_dotted_quad(wantaddr));
+ /*
+ * When doing dynamic address assignment,
+ * we accept his offer. Otherwise, we
+ * ignore it and thus continue to negotiate
+ * our already existing value.
+ * XXX: Bogus, if he said no once, he'll
+ * just say no again, might as well die.
+ */
+ if (sp->ipcp.flags & IPCP_MYADDR_DYN) {
+ sppp_set_ip_addr(sp, wantaddr);
+ if (debug)
+ log(-1, "[agree] ");
+ sp->ipcp.flags |= IPCP_MYADDR_SEEN;
+ }
+ }
+ break;
+ }
+ }
+ if (debug)
+ log(-1, "\n");
+ free (buf, M_TEMP);
+ return;
+}
+
+static void
+sppp_ipcp_tlu(struct sppp *sp)
+{
+ /* we are up - notify isdn daemon */
+ if (sp->pp_con)
+ sp->pp_con(sp);
+}
+
+static void
+sppp_ipcp_tld(struct sppp *sp)
+{
+}
+
+static void
+sppp_ipcp_tls(struct sppp *sp)
+{
+ /* indicate to LCP that it must stay alive */
+ sp->lcp.protos |= (1 << IDX_IPCP);
+}
+
+static void
+sppp_ipcp_tlf(struct sppp *sp)
+{
+ /* we no longer need LCP */
+ sp->lcp.protos &= ~(1 << IDX_IPCP);
+ sppp_lcp_check_and_close(sp);
+}
+
+static void
+sppp_ipcp_scr(struct sppp *sp)
+{
+ char opt[6 /* compression */ + 6 /* address */];
+ u_long ouraddr;
+ int i = 0;
+
+ if (sp->ipcp.opts & (1 << IPCP_OPT_COMPRESSION)) {
+ opt[i++] = IPCP_OPT_COMPRESSION;
+ opt[i++] = 6;
+ opt[i++] = IPCP_COMP_VJ >> 8;
+ opt[i++] = IPCP_COMP_VJ;
+ opt[i++] = sp->ipcp.max_state;
+ opt[i++] = sp->ipcp.compress_cid;
+ }
+ if (sp->ipcp.opts & (1 << IPCP_OPT_ADDRESS)) {
+ sppp_get_ip_addrs(sp, &ouraddr, 0, 0);
+ opt[i++] = IPCP_OPT_ADDRESS;
+ opt[i++] = 6;
+ opt[i++] = ouraddr >> 24;
+ opt[i++] = ouraddr >> 16;
+ opt[i++] = ouraddr >> 8;
+ opt[i++] = ouraddr;
+ }
+
+ sp->confid[IDX_IPCP] = ++sp->pp_seq[IDX_IPCP];
+ sppp_cp_send(sp, PPP_IPCP, CONF_REQ, sp->confid[IDX_IPCP], i, &opt);
+}
+#else /* !INET */
+static void
+sppp_ipcp_init(struct sppp *sp)
+{
+}
+
+static void
+sppp_ipcp_up(struct sppp *sp)
+{
+}
+
+static void
+sppp_ipcp_down(struct sppp *sp)
+{
+}
+
+static void
+sppp_ipcp_open(struct sppp *sp)
+{
+}
+
+static void
+sppp_ipcp_close(struct sppp *sp)
+{
+}
+
+static void
+sppp_ipcp_TO(void *cookie)
+{
+}
+
+static int
+sppp_ipcp_RCR(struct sppp *sp, struct lcp_header *h, int len)
+{
+ return (0);
+}
+
+static void
+sppp_ipcp_RCN_rej(struct sppp *sp, struct lcp_header *h, int len)
+{
+}
+
+static void
+sppp_ipcp_RCN_nak(struct sppp *sp, struct lcp_header *h, int len)
+{
+}
+
+static void
+sppp_ipcp_tlu(struct sppp *sp)
+{
+}
+
+static void
+sppp_ipcp_tld(struct sppp *sp)
+{
+}
+
+static void
+sppp_ipcp_tls(struct sppp *sp)
+{
+}
+
+static void
+sppp_ipcp_tlf(struct sppp *sp)
+{
+}
+
+static void
+sppp_ipcp_scr(struct sppp *sp)
+{
+}
+#endif
+
+/*
+ *--------------------------------------------------------------------------*
+ * *
+ * The IPv6CP implementation. *
+ * *
+ *--------------------------------------------------------------------------*
+ */
+
+#ifdef INET6
+static void
+sppp_ipv6cp_init(struct sppp *sp)
+{
+ sp->ipv6cp.opts = 0;
+ sp->ipv6cp.flags = 0;
+ sp->state[IDX_IPV6CP] = STATE_INITIAL;
+ sp->fail_counter[IDX_IPV6CP] = 0;
+ sp->pp_seq[IDX_IPV6CP] = 0;
+ sp->pp_rseq[IDX_IPV6CP] = 0;
+ callout_init(&sp->ch[IDX_IPV6CP], CALLOUT_MPSAFE);
+}
+
+static void
+sppp_ipv6cp_up(struct sppp *sp)
+{
+ sppp_up_event(&ipv6cp, sp);
+}
+
+static void
+sppp_ipv6cp_down(struct sppp *sp)
+{
+ sppp_down_event(&ipv6cp, sp);
+}
+
+static void
+sppp_ipv6cp_open(struct sppp *sp)
+{
+ STDDCL;
+ struct in6_addr myaddr, hisaddr;
+
+#ifdef IPV6CP_MYIFID_DYN
+ sp->ipv6cp.flags &= ~(IPV6CP_MYIFID_SEEN|IPV6CP_MYIFID_DYN);
+#else
+ sp->ipv6cp.flags &= ~IPV6CP_MYIFID_SEEN;
+#endif
+
+ sppp_get_ip6_addrs(sp, &myaddr, &hisaddr, 0);
+ /*
+ * If we don't have our address, this probably means our
+ * interface doesn't want to talk IPv6 at all. (This could
+ * be the case if somebody wants to speak only IPX, for
+ * example.) Don't open IPv6CP in this case.
+ */
+ if (IN6_IS_ADDR_UNSPECIFIED(&myaddr)) {
+ /* XXX this message should go away */
+ if (debug)
+ log(LOG_DEBUG, SPP_FMT "ipv6cp_open(): no IPv6 interface\n",
+ SPP_ARGS(ifp));
+ return;
+ }
+
+ sp->ipv6cp.flags |= IPV6CP_MYIFID_SEEN;
+ sp->ipv6cp.opts |= (1 << IPV6CP_OPT_IFID);
+ sppp_open_event(&ipv6cp, sp);
+}
+
+static void
+sppp_ipv6cp_close(struct sppp *sp)
+{
+ sppp_close_event(&ipv6cp, sp);
+}
+
+static void
+sppp_ipv6cp_TO(void *cookie)
+{
+ sppp_to_event(&ipv6cp, (struct sppp *)cookie);
+}
+
+/*
+ * Analyze a configure request. Return true if it was agreeable, and
+ * caused action sca, false if it has been rejected or nak'ed, and
+ * caused action scn. (The return value is used to make the state
+ * transition decision in the state automaton.)
+ */
+static int
+sppp_ipv6cp_RCR(struct sppp *sp, struct lcp_header *h, int len)
+{
+ u_char *buf, *r, *p;
+ struct ifnet *ifp = SP2IFP(sp);
+ int rlen, origlen, debug = ifp->if_flags & IFF_DEBUG;
+ struct in6_addr myaddr, desiredaddr, suggestaddr;
+ int ifidcount;
+ int type;
+ int collision, nohisaddr;
+ char ip6buf[INET6_ADDRSTRLEN];
+
+ len -= 4;
+ origlen = len;
+ /*
+ * Make sure to allocate a buf that can at least hold a
+ * conf-nak with an `address' option. We might need it below.
+ */
+ buf = r = malloc ((len < 6? 6: len), M_TEMP, M_NOWAIT);
+ if (! buf)
+ return (0);
+
+ /* pass 1: see if we can recognize them */
+ if (debug)
+ log(LOG_DEBUG, SPP_FMT "ipv6cp parse opts:",
+ SPP_ARGS(ifp));
+ p = (void*) (h+1);
+ ifidcount = 0;
+ for (rlen=0; len >= 2 && p[1] >= 2 && len >= p[1];
+ len-=p[1], p+=p[1]) {
+ if (debug)
+ log(-1, " %s", sppp_ipv6cp_opt_name(*p));
+ switch (*p) {
+ case IPV6CP_OPT_IFID:
+ if (len >= 10 && p[1] == 10 && ifidcount == 0) {
+ /* correctly formed address option */
+ ifidcount++;
+ continue;
+ }
+ if (debug)
+ log(-1, " [invalid]");
+ break;
+#ifdef notyet
+ case IPV6CP_OPT_COMPRESSION:
+ if (len >= 4 && p[1] >= 4) {
+ /* correctly formed compress option */
+ continue;
+ }
+ if (debug)
+ log(-1, " [invalid]");
+ break;
+#endif
+ default:
+ /* Others not supported. */
+ if (debug)
+ log(-1, " [rej]");
+ break;
+ }
+ /* Add the option to rejected list. */
+ bcopy (p, r, p[1]);
+ r += p[1];
+ rlen += p[1];
+ }
+ if (rlen) {
+ if (debug)
+ log(-1, " send conf-rej\n");
+ sppp_cp_send (sp, PPP_IPV6CP, CONF_REJ, h->ident, rlen, buf);
+ goto end;
+ } else if (debug)
+ log(-1, "\n");
+
+ /* pass 2: parse option values */
+ sppp_get_ip6_addrs(sp, &myaddr, 0, 0);
+ if (debug)
+ log(LOG_DEBUG, SPP_FMT "ipv6cp parse opt values: ",
+ SPP_ARGS(ifp));
+ p = (void*) (h+1);
+ len = origlen;
+ type = CONF_ACK;
+ for (rlen=0; len >= 2 && p[1] >= 2 && len >= p[1];
+ len-=p[1], p+=p[1]) {
+ if (debug)
+ log(-1, " %s", sppp_ipv6cp_opt_name(*p));
+ switch (*p) {
+#ifdef notyet
+ case IPV6CP_OPT_COMPRESSION:
+ continue;
+#endif
+ case IPV6CP_OPT_IFID:
+ bzero(&desiredaddr, sizeof(desiredaddr));
+ bcopy(&p[2], &desiredaddr.s6_addr[8], 8);
+ collision = (bcmp(&desiredaddr.s6_addr[8],
+ &myaddr.s6_addr[8], 8) == 0);
+ nohisaddr = IN6_IS_ADDR_UNSPECIFIED(&desiredaddr);
+
+ desiredaddr.s6_addr16[0] = htons(0xfe80);
+ (void)in6_setscope(&desiredaddr, SP2IFP(sp), NULL);
+
+ if (!collision && !nohisaddr) {
+ /* no collision, hisaddr known - Conf-Ack */
+ type = CONF_ACK;
+
+ if (debug) {
+ log(-1, " %s [%s]",
+ ip6_sprintf(ip6buf, &desiredaddr),
+ sppp_cp_type_name(type));
+ }
+ continue;
+ }
+
+ bzero(&suggestaddr, sizeof(&suggestaddr));
+ if (collision && nohisaddr) {
+ /* collision, hisaddr unknown - Conf-Rej */
+ type = CONF_REJ;
+ bzero(&p[2], 8);
+ } else {
+ /*
+ * - no collision, hisaddr unknown, or
+ * - collision, hisaddr known
+ * Conf-Nak, suggest hisaddr
+ */
+ type = CONF_NAK;
+ sppp_suggest_ip6_addr(sp, &suggestaddr);
+ bcopy(&suggestaddr.s6_addr[8], &p[2], 8);
+ }
+ if (debug)
+ log(-1, " %s [%s]",
+ ip6_sprintf(ip6buf, &desiredaddr),
+ sppp_cp_type_name(type));
+ break;
+ }
+ /* Add the option to nak'ed list. */
+ bcopy (p, r, p[1]);
+ r += p[1];
+ rlen += p[1];
+ }
+
+ if (rlen == 0 && type == CONF_ACK) {
+ if (debug)
+ log(-1, " send %s\n", sppp_cp_type_name(type));
+ sppp_cp_send (sp, PPP_IPV6CP, type, h->ident, origlen, h+1);
+ } else {
+#ifdef DIAGNOSTIC
+ if (type == CONF_ACK)
+ panic("IPv6CP RCR: CONF_ACK with non-zero rlen");
+#endif
+
+ if (debug) {
+ log(-1, " send %s suggest %s\n",
+ sppp_cp_type_name(type),
+ ip6_sprintf(ip6buf, &suggestaddr));
+ }
+ sppp_cp_send (sp, PPP_IPV6CP, type, h->ident, rlen, buf);
+ }
+
+ end:
+ free (buf, M_TEMP);
+ return (rlen == 0);
+}
+
+/*
+ * Analyze the IPv6CP Configure-Reject option list, and adjust our
+ * negotiation.
+ */
+static void
+sppp_ipv6cp_RCN_rej(struct sppp *sp, struct lcp_header *h, int len)
+{
+ u_char *buf, *p;
+ struct ifnet *ifp = SP2IFP(sp);
+ int debug = ifp->if_flags & IFF_DEBUG;
+
+ len -= 4;
+ buf = malloc (len, M_TEMP, M_NOWAIT);
+ if (!buf)
+ return;
+
+ if (debug)
+ log(LOG_DEBUG, SPP_FMT "ipv6cp rej opts:",
+ SPP_ARGS(ifp));
+
+ p = (void*) (h+1);
+ for (; len >= 2 && p[1] >= 2 && len >= p[1];
+ len -= p[1], p += p[1]) {
+ if (debug)
+ log(-1, " %s", sppp_ipv6cp_opt_name(*p));
+ switch (*p) {
+ case IPV6CP_OPT_IFID:
+ /*
+ * Peer doesn't grok address option. This is
+ * bad. XXX Should we better give up here?
+ */
+ sp->ipv6cp.opts &= ~(1 << IPV6CP_OPT_IFID);
+ break;
+#ifdef notyet
+ case IPV6CP_OPT_COMPRESS:
+ sp->ipv6cp.opts &= ~(1 << IPV6CP_OPT_COMPRESS);
+ break;
+#endif
+ }
+ }
+ if (debug)
+ log(-1, "\n");
+ free (buf, M_TEMP);
+ return;
+}
+
+/*
+ * Analyze the IPv6CP Configure-NAK option list, and adjust our
+ * negotiation.
+ */
+static void
+sppp_ipv6cp_RCN_nak(struct sppp *sp, struct lcp_header *h, int len)
+{
+ u_char *buf, *p;
+ struct ifnet *ifp = SP2IFP(sp);
+ int debug = ifp->if_flags & IFF_DEBUG;
+ struct in6_addr suggestaddr;
+ char ip6buf[INET6_ADDRSTRLEN];
+
+ len -= 4;
+ buf = malloc (len, M_TEMP, M_NOWAIT);
+ if (!buf)
+ return;
+
+ if (debug)
+ log(LOG_DEBUG, SPP_FMT "ipv6cp nak opts:",
+ SPP_ARGS(ifp));
+
+ p = (void*) (h+1);
+ for (; len >= 2 && p[1] >= 2 && len >= p[1];
+ len -= p[1], p += p[1]) {
+ if (debug)
+ log(-1, " %s", sppp_ipv6cp_opt_name(*p));
+ switch (*p) {
+ case IPV6CP_OPT_IFID:
+ /*
+ * Peer doesn't like our local ifid. See
+ * if we can do something for him. We'll drop
+ * him our address then.
+ */
+ if (len < 10 || p[1] != 10)
+ break;
+ bzero(&suggestaddr, sizeof(suggestaddr));
+ suggestaddr.s6_addr16[0] = htons(0xfe80);
+ (void)in6_setscope(&suggestaddr, SP2IFP(sp), NULL);
+ bcopy(&p[2], &suggestaddr.s6_addr[8], 8);
+
+ sp->ipv6cp.opts |= (1 << IPV6CP_OPT_IFID);
+ if (debug)
+ log(-1, " [suggestaddr %s]",
+ ip6_sprintf(ip6buf, &suggestaddr));
+#ifdef IPV6CP_MYIFID_DYN
+ /*
+ * When doing dynamic address assignment,
+ * we accept his offer.
+ */
+ if (sp->ipv6cp.flags & IPV6CP_MYIFID_DYN) {
+ struct in6_addr lastsuggest;
+ /*
+ * If <suggested myaddr from peer> equals to
+ * <hisaddr we have suggested last time>,
+ * we have a collision. generate new random
+ * ifid.
+ */
+ sppp_suggest_ip6_addr(&lastsuggest);
+ if (IN6_ARE_ADDR_EQUAL(&suggestaddr,
+ lastsuggest)) {
+ if (debug)
+ log(-1, " [random]");
+ sppp_gen_ip6_addr(sp, &suggestaddr);
+ }
+ sppp_set_ip6_addr(sp, &suggestaddr, 0);
+ if (debug)
+ log(-1, " [agree]");
+ sp->ipv6cp.flags |= IPV6CP_MYIFID_SEEN;
+ }
+#else
+ /*
+ * Since we do not do dynamic address assignment,
+ * we ignore it and thus continue to negotiate
+ * our already existing value. This can possibly
+ * go into infinite request-reject loop.
+ *
+ * This is not likely because we normally use
+ * ifid based on MAC-address.
+ * If you have no ethernet card on the node, too bad.
+ * XXX should we use fail_counter?
+ */
+#endif
+ break;
+#ifdef notyet
+ case IPV6CP_OPT_COMPRESS:
+ /*
+ * Peer wants different compression parameters.
+ */
+ break;
+#endif
+ }
+ }
+ if (debug)
+ log(-1, "\n");
+ free (buf, M_TEMP);
+ return;
+}
+static void
+sppp_ipv6cp_tlu(struct sppp *sp)
+{
+ /* we are up - notify isdn daemon */
+ if (sp->pp_con)
+ sp->pp_con(sp);
+}
+
+static void
+sppp_ipv6cp_tld(struct sppp *sp)
+{
+}
+
+static void
+sppp_ipv6cp_tls(struct sppp *sp)
+{
+ /* indicate to LCP that it must stay alive */
+ sp->lcp.protos |= (1 << IDX_IPV6CP);
+}
+
+static void
+sppp_ipv6cp_tlf(struct sppp *sp)
+{
+
+#if 0 /* need #if 0 to close IPv6CP properly */
+ /* we no longer need LCP */
+ sp->lcp.protos &= ~(1 << IDX_IPV6CP);
+ sppp_lcp_check_and_close(sp);
+#endif
+}
+
+static void
+sppp_ipv6cp_scr(struct sppp *sp)
+{
+ char opt[10 /* ifid */ + 4 /* compression, minimum */];
+ struct in6_addr ouraddr;
+ int i = 0;
+
+ if (sp->ipv6cp.opts & (1 << IPV6CP_OPT_IFID)) {
+ sppp_get_ip6_addrs(sp, &ouraddr, 0, 0);
+ opt[i++] = IPV6CP_OPT_IFID;
+ opt[i++] = 10;
+ bcopy(&ouraddr.s6_addr[8], &opt[i], 8);
+ i += 8;
+ }
+
+#ifdef notyet
+ if (sp->ipv6cp.opts & (1 << IPV6CP_OPT_COMPRESSION)) {
+ opt[i++] = IPV6CP_OPT_COMPRESSION;
+ opt[i++] = 4;
+ opt[i++] = 0; /* TBD */
+ opt[i++] = 0; /* TBD */
+ /* variable length data may follow */
+ }
+#endif
+
+ sp->confid[IDX_IPV6CP] = ++sp->pp_seq[IDX_IPV6CP];
+ sppp_cp_send(sp, PPP_IPV6CP, CONF_REQ, sp->confid[IDX_IPV6CP], i, &opt);
+}
+#else /*INET6*/
+static void sppp_ipv6cp_init(struct sppp *sp)
+{
+}
+
+static void sppp_ipv6cp_up(struct sppp *sp)
+{
+}
+
+static void sppp_ipv6cp_down(struct sppp *sp)
+{
+}
+
+
+static void sppp_ipv6cp_open(struct sppp *sp)
+{
+}
+
+static void sppp_ipv6cp_close(struct sppp *sp)
+{
+}
+
+static void sppp_ipv6cp_TO(void *sp)
+{
+}
+
+static int sppp_ipv6cp_RCR(struct sppp *sp, struct lcp_header *h, int len)
+{
+ return 0;
+}
+
+static void sppp_ipv6cp_RCN_rej(struct sppp *sp, struct lcp_header *h, int len)
+{
+}
+
+static void sppp_ipv6cp_RCN_nak(struct sppp *sp, struct lcp_header *h, int len)
+{
+}
+
+static void sppp_ipv6cp_tlu(struct sppp *sp)
+{
+}
+
+static void sppp_ipv6cp_tld(struct sppp *sp)
+{
+}
+
+static void sppp_ipv6cp_tls(struct sppp *sp)
+{
+}
+
+static void sppp_ipv6cp_tlf(struct sppp *sp)
+{
+}
+
+static void sppp_ipv6cp_scr(struct sppp *sp)
+{
+}
+#endif /*INET6*/
+
+/*
+ *--------------------------------------------------------------------------*
+ * *
+ * The CHAP implementation. *
+ * *
+ *--------------------------------------------------------------------------*
+ */
+
+/*
+ * The authentication protocols don't employ a full-fledged state machine as
+ * the control protocols do, since they do have Open and Close events, but
+ * not Up and Down, nor are they explicitly terminated. Also, use of the
+ * authentication protocols may be different in both directions (this makes
+ * sense, think of a machine that never accepts incoming calls but only
+ * calls out, it doesn't require the called party to authenticate itself).
+ *
+ * Our state machine for the local authentication protocol (we are requesting
+ * the peer to authenticate) looks like:
+ *
+ * RCA-
+ * +--------------------------------------------+
+ * V scn,tld|
+ * +--------+ Close +---------+ RCA+
+ * | |<----------------------------------| |------+
+ * +--->| Closed | TO* | Opened | sca |
+ * | | |-----+ +-------| |<-----+
+ * | +--------+ irc | | +---------+
+ * | ^ | | ^
+ * | | | | |
+ * | | | | |
+ * | TO-| | | |
+ * | |tld TO+ V | |
+ * | | +------->+ | |
+ * | | | | | |
+ * | +--------+ V | |
+ * | | |<----+<--------------------+ |
+ * | | Req- | scr |
+ * | | Sent | |
+ * | | | |
+ * | +--------+ |
+ * | RCA- | | RCA+ |
+ * +------+ +------------------------------------------+
+ * scn,tld sca,irc,ict,tlu
+ *
+ *
+ * with:
+ *
+ * Open: LCP reached authentication phase
+ * Close: LCP reached terminate phase
+ *
+ * RCA+: received reply (pap-req, chap-response), acceptable
+ * RCN: received reply (pap-req, chap-response), not acceptable
+ * TO+: timeout with restart counter >= 0
+ * TO-: timeout with restart counter < 0
+ * TO*: reschedule timeout for CHAP
+ *
+ * scr: send request packet (none for PAP, chap-challenge)
+ * sca: send ack packet (pap-ack, chap-success)
+ * scn: send nak packet (pap-nak, chap-failure)
+ * ict: initialize re-challenge timer (CHAP only)
+ *
+ * tlu: this-layer-up, LCP reaches network phase
+ * tld: this-layer-down, LCP enters terminate phase
+ *
+ * Note that in CHAP mode, after sending a new challenge, while the state
+ * automaton falls back into Req-Sent state, it doesn't signal a tld
+ * event to LCP, so LCP remains in network phase. Only after not getting
+ * any response (or after getting an unacceptable response), CHAP closes,
+ * causing LCP to enter terminate phase.
+ *
+ * With PAP, there is no initial request that can be sent. The peer is
+ * expected to send one based on the successful negotiation of PAP as
+ * the authentication protocol during the LCP option negotiation.
+ *
+ * Incoming authentication protocol requests (remote requests
+ * authentication, we are peer) don't employ a state machine at all,
+ * they are simply answered. Some peers [Ascend P50 firmware rev
+ * 4.50] react allergically when sending IPCP requests while they are
+ * still in authentication phase (thereby violating the standard that
+ * demands that these NCP packets are to be discarded), so we keep
+ * track of the peer demanding us to authenticate, and only proceed to
+ * phase network once we've seen a positive acknowledge for the
+ * authentication.
+ */
+
+/*
+ * Handle incoming CHAP packets.
+ */
+static void
+sppp_chap_input(struct sppp *sp, struct mbuf *m)
+{
+ STDDCL;
+ struct lcp_header *h;
+ int len, x;
+ u_char *value, *name, digest[AUTHKEYLEN], dsize;
+ int value_len, name_len;
+ MD5_CTX ctx;
+
+ len = m->m_pkthdr.len;
+ if (len < 4) {
+ if (debug)
+ log(LOG_DEBUG,
+ SPP_FMT "chap invalid packet length: %d bytes\n",
+ SPP_ARGS(ifp), len);
+ return;
+ }
+ h = mtod (m, struct lcp_header*);
+ if (len > ntohs (h->len))
+ len = ntohs (h->len);
+
+ switch (h->type) {
+ /* challenge, failure and success are his authproto */
+ case CHAP_CHALLENGE:
+ value = 1 + (u_char*)(h+1);
+ value_len = value[-1];
+ name = value + value_len;
+ name_len = len - value_len - 5;
+ if (name_len < 0) {
+ if (debug) {
+ log(LOG_DEBUG,
+ SPP_FMT "chap corrupted challenge "
+ "<%s id=0x%x len=%d",
+ SPP_ARGS(ifp),
+ sppp_auth_type_name(PPP_CHAP, h->type),
+ h->ident, ntohs(h->len));
+ sppp_print_bytes((u_char*) (h+1), len-4);
+ log(-1, ">\n");
+ }
+ break;
+ }
+
+ if (debug) {
+ log(LOG_DEBUG,
+ SPP_FMT "chap input <%s id=0x%x len=%d name=",
+ SPP_ARGS(ifp),
+ sppp_auth_type_name(PPP_CHAP, h->type), h->ident,
+ ntohs(h->len));
+ sppp_print_string((char*) name, name_len);
+ log(-1, " value-size=%d value=", value_len);
+ sppp_print_bytes(value, value_len);
+ log(-1, ">\n");
+ }
+
+ /* Compute reply value. */
+ MD5Init(&ctx);
+ MD5Update(&ctx, &h->ident, 1);
+ MD5Update(&ctx, sp->myauth.secret,
+ sppp_strnlen(sp->myauth.secret, AUTHKEYLEN));
+ MD5Update(&ctx, value, value_len);
+ MD5Final(digest, &ctx);
+ dsize = sizeof digest;
+
+ sppp_auth_send(&chap, sp, CHAP_RESPONSE, h->ident,
+ sizeof dsize, (const char *)&dsize,
+ sizeof digest, digest,
+ (size_t)sppp_strnlen(sp->myauth.name, AUTHNAMELEN),
+ sp->myauth.name,
+ 0);
+ break;
+
+ case CHAP_SUCCESS:
+ if (debug) {
+ log(LOG_DEBUG, SPP_FMT "chap success",
+ SPP_ARGS(ifp));
+ if (len > 4) {
+ log(-1, ": ");
+ sppp_print_string((char*)(h + 1), len - 4);
+ }
+ log(-1, "\n");
+ }
+ x = splimp();
+ SPPP_LOCK(sp);
+ sp->pp_flags &= ~PP_NEEDAUTH;
+ if (sp->myauth.proto == PPP_CHAP &&
+ (sp->lcp.opts & (1 << LCP_OPT_AUTH_PROTO)) &&
+ (sp->lcp.protos & (1 << IDX_CHAP)) == 0) {
+ /*
+ * We are authenticator for CHAP but didn't
+ * complete yet. Leave it to tlu to proceed
+ * to network phase.
+ */
+ SPPP_UNLOCK(sp);
+ splx(x);
+ break;
+ }
+ SPPP_UNLOCK(sp);
+ splx(x);
+ sppp_phase_network(sp);
+ break;
+
+ case CHAP_FAILURE:
+ if (debug) {
+ log(LOG_INFO, SPP_FMT "chap failure",
+ SPP_ARGS(ifp));
+ if (len > 4) {
+ log(-1, ": ");
+ sppp_print_string((char*)(h + 1), len - 4);
+ }
+ log(-1, "\n");
+ } else
+ log(LOG_INFO, SPP_FMT "chap failure\n",
+ SPP_ARGS(ifp));
+ /* await LCP shutdown by authenticator */
+ break;
+
+ /* response is my authproto */
+ case CHAP_RESPONSE:
+ value = 1 + (u_char*)(h+1);
+ value_len = value[-1];
+ name = value + value_len;
+ name_len = len - value_len - 5;
+ if (name_len < 0) {
+ if (debug) {
+ log(LOG_DEBUG,
+ SPP_FMT "chap corrupted response "
+ "<%s id=0x%x len=%d",
+ SPP_ARGS(ifp),
+ sppp_auth_type_name(PPP_CHAP, h->type),
+ h->ident, ntohs(h->len));
+ sppp_print_bytes((u_char*)(h+1), len-4);
+ log(-1, ">\n");
+ }
+ break;
+ }
+ if (h->ident != sp->confid[IDX_CHAP]) {
+ if (debug)
+ log(LOG_DEBUG,
+ SPP_FMT "chap dropping response for old ID "
+ "(got %d, expected %d)\n",
+ SPP_ARGS(ifp),
+ h->ident, sp->confid[IDX_CHAP]);
+ break;
+ }
+ if (name_len != sppp_strnlen(sp->hisauth.name, AUTHNAMELEN)
+ || bcmp(name, sp->hisauth.name, name_len) != 0) {
+ log(LOG_INFO, SPP_FMT "chap response, his name ",
+ SPP_ARGS(ifp));
+ sppp_print_string(name, name_len);
+ log(-1, " != expected ");
+ sppp_print_string(sp->hisauth.name,
+ sppp_strnlen(sp->hisauth.name, AUTHNAMELEN));
+ log(-1, "\n");
+ }
+ if (debug) {
+ log(LOG_DEBUG, SPP_FMT "chap input(%s) "
+ "<%s id=0x%x len=%d name=",
+ SPP_ARGS(ifp),
+ sppp_state_name(sp->state[IDX_CHAP]),
+ sppp_auth_type_name(PPP_CHAP, h->type),
+ h->ident, ntohs (h->len));
+ sppp_print_string((char*)name, name_len);
+ log(-1, " value-size=%d value=", value_len);
+ sppp_print_bytes(value, value_len);
+ log(-1, ">\n");
+ }
+ if (value_len != AUTHKEYLEN) {
+ if (debug)
+ log(LOG_DEBUG,
+ SPP_FMT "chap bad hash value length: "
+ "%d bytes, should be %d\n",
+ SPP_ARGS(ifp), value_len,
+ AUTHKEYLEN);
+ break;
+ }
+
+ MD5Init(&ctx);
+ MD5Update(&ctx, &h->ident, 1);
+ MD5Update(&ctx, sp->hisauth.secret,
+ sppp_strnlen(sp->hisauth.secret, AUTHKEYLEN));
+ MD5Update(&ctx, sp->myauth.challenge, AUTHKEYLEN);
+ MD5Final(digest, &ctx);
+
+#define FAILMSG "Failed..."
+#define SUCCMSG "Welcome!"
+
+ if (value_len != sizeof digest ||
+ bcmp(digest, value, value_len) != 0) {
+ /* action scn, tld */
+ sppp_auth_send(&chap, sp, CHAP_FAILURE, h->ident,
+ sizeof(FAILMSG) - 1, (u_char *)FAILMSG,
+ 0);
+ chap.tld(sp);
+ break;
+ }
+ /* action sca, perhaps tlu */
+ if (sp->state[IDX_CHAP] == STATE_REQ_SENT ||
+ sp->state[IDX_CHAP] == STATE_OPENED)
+ sppp_auth_send(&chap, sp, CHAP_SUCCESS, h->ident,
+ sizeof(SUCCMSG) - 1, (u_char *)SUCCMSG,
+ 0);
+ if (sp->state[IDX_CHAP] == STATE_REQ_SENT) {
+ sppp_cp_change_state(&chap, sp, STATE_OPENED);
+ chap.tlu(sp);
+ }
+ break;
+
+ default:
+ /* Unknown CHAP packet type -- ignore. */
+ if (debug) {
+ log(LOG_DEBUG, SPP_FMT "chap unknown input(%s) "
+ "<0x%x id=0x%xh len=%d",
+ SPP_ARGS(ifp),
+ sppp_state_name(sp->state[IDX_CHAP]),
+ h->type, h->ident, ntohs(h->len));
+ sppp_print_bytes((u_char*)(h+1), len-4);
+ log(-1, ">\n");
+ }
+ break;
+
+ }
+}
+
+static void
+sppp_chap_init(struct sppp *sp)
+{
+ /* Chap doesn't have STATE_INITIAL at all. */
+ sp->state[IDX_CHAP] = STATE_CLOSED;
+ sp->fail_counter[IDX_CHAP] = 0;
+ sp->pp_seq[IDX_CHAP] = 0;
+ sp->pp_rseq[IDX_CHAP] = 0;
+ callout_init(&sp->ch[IDX_CHAP], CALLOUT_MPSAFE);
+}
+
+static void
+sppp_chap_open(struct sppp *sp)
+{
+ if (sp->myauth.proto == PPP_CHAP &&
+ (sp->lcp.opts & (1 << LCP_OPT_AUTH_PROTO)) != 0) {
+ /* we are authenticator for CHAP, start it */
+ chap.scr(sp);
+ sp->rst_counter[IDX_CHAP] = sp->lcp.max_configure;
+ sppp_cp_change_state(&chap, sp, STATE_REQ_SENT);
+ }
+ /* nothing to be done if we are peer, await a challenge */
+}
+
+static void
+sppp_chap_close(struct sppp *sp)
+{
+ if (sp->state[IDX_CHAP] != STATE_CLOSED)
+ sppp_cp_change_state(&chap, sp, STATE_CLOSED);
+}
+
+static void
+sppp_chap_TO(void *cookie)
+{
+ struct sppp *sp = (struct sppp *)cookie;
+ STDDCL;
+ int s;
+
+ s = splimp();
+ SPPP_LOCK(sp);
+ if (debug)
+ log(LOG_DEBUG, SPP_FMT "chap TO(%s) rst_counter = %d\n",
+ SPP_ARGS(ifp),
+ sppp_state_name(sp->state[IDX_CHAP]),
+ sp->rst_counter[IDX_CHAP]);
+
+ if (--sp->rst_counter[IDX_CHAP] < 0)
+ /* TO- event */
+ switch (sp->state[IDX_CHAP]) {
+ case STATE_REQ_SENT:
+ chap.tld(sp);
+ sppp_cp_change_state(&chap, sp, STATE_CLOSED);
+ break;
+ }
+ else
+ /* TO+ (or TO*) event */
+ switch (sp->state[IDX_CHAP]) {
+ case STATE_OPENED:
+ /* TO* event */
+ sp->rst_counter[IDX_CHAP] = sp->lcp.max_configure;
+ /* FALLTHROUGH */
+ case STATE_REQ_SENT:
+ chap.scr(sp);
+ /* sppp_cp_change_state() will restart the timer */
+ sppp_cp_change_state(&chap, sp, STATE_REQ_SENT);
+ break;
+ }
+
+ SPPP_UNLOCK(sp);
+ splx(s);
+}
+
+static void
+sppp_chap_tlu(struct sppp *sp)
+{
+ STDDCL;
+ int i, x;
+
+ i = 0;
+ sp->rst_counter[IDX_CHAP] = sp->lcp.max_configure;
+
+ /*
+ * Some broken CHAP implementations (Conware CoNet, firmware
+ * 4.0.?) don't want to re-authenticate their CHAP once the
+ * initial challenge-response exchange has taken place.
+ * Provide for an option to avoid rechallenges.
+ */
+ if ((sp->hisauth.flags & AUTHFLAG_NORECHALLENGE) == 0) {
+ /*
+ * Compute the re-challenge timeout. This will yield
+ * a number between 300 and 810 seconds.
+ */
+ i = 300 + ((unsigned)(random() & 0xff00) >> 7);
+ callout_reset(&sp->ch[IDX_CHAP], i * hz, chap.TO, (void *)sp);
+ }
+
+ if (debug) {
+ log(LOG_DEBUG,
+ SPP_FMT "chap %s, ",
+ SPP_ARGS(ifp),
+ sp->pp_phase == PHASE_NETWORK? "reconfirmed": "tlu");
+ if ((sp->hisauth.flags & AUTHFLAG_NORECHALLENGE) == 0)
+ log(-1, "next re-challenge in %d seconds\n", i);
+ else
+ log(-1, "re-challenging supressed\n");
+ }
+
+ x = splimp();
+ SPPP_LOCK(sp);
+ /* indicate to LCP that we need to be closed down */
+ sp->lcp.protos |= (1 << IDX_CHAP);
+
+ if (sp->pp_flags & PP_NEEDAUTH) {
+ /*
+ * Remote is authenticator, but his auth proto didn't
+ * complete yet. Defer the transition to network
+ * phase.
+ */
+ SPPP_UNLOCK(sp);
+ splx(x);
+ return;
+ }
+ SPPP_UNLOCK(sp);
+ splx(x);
+
+ /*
+ * If we are already in phase network, we are done here. This
+ * is the case if this is a dummy tlu event after a re-challenge.
+ */
+ if (sp->pp_phase != PHASE_NETWORK)
+ sppp_phase_network(sp);
+}
+
+static void
+sppp_chap_tld(struct sppp *sp)
+{
+ STDDCL;
+
+ if (debug)
+ log(LOG_DEBUG, SPP_FMT "chap tld\n", SPP_ARGS(ifp));
+ callout_stop(&sp->ch[IDX_CHAP]);
+ sp->lcp.protos &= ~(1 << IDX_CHAP);
+
+ lcp.Close(sp);
+}
+
+static void
+sppp_chap_scr(struct sppp *sp)
+{
+ u_long *ch, seed;
+ u_char clen;
+
+ /* Compute random challenge. */
+ ch = (u_long *)sp->myauth.challenge;
+ read_random(&seed, sizeof seed);
+ ch[0] = seed ^ random();
+ ch[1] = seed ^ random();
+ ch[2] = seed ^ random();
+ ch[3] = seed ^ random();
+ clen = AUTHKEYLEN;
+
+ sp->confid[IDX_CHAP] = ++sp->pp_seq[IDX_CHAP];
+
+ sppp_auth_send(&chap, sp, CHAP_CHALLENGE, sp->confid[IDX_CHAP],
+ sizeof clen, (const char *)&clen,
+ (size_t)AUTHKEYLEN, sp->myauth.challenge,
+ (size_t)sppp_strnlen(sp->myauth.name, AUTHNAMELEN),
+ sp->myauth.name,
+ 0);
+}
+
+/*
+ *--------------------------------------------------------------------------*
+ * *
+ * The PAP implementation. *
+ * *
+ *--------------------------------------------------------------------------*
+ */
+/*
+ * For PAP, we need to keep a little state also if we are the peer, not the
+ * authenticator. This is since we don't get a request to authenticate, but
+ * have to repeatedly authenticate ourself until we got a response (or the
+ * retry counter is expired).
+ */
+
+/*
+ * Handle incoming PAP packets. */
+static void
+sppp_pap_input(struct sppp *sp, struct mbuf *m)
+{
+ STDDCL;
+ struct lcp_header *h;
+ int len, x;
+ u_char *name, *passwd, mlen;
+ int name_len, passwd_len;
+
+ len = m->m_pkthdr.len;
+ if (len < 5) {
+ if (debug)
+ log(LOG_DEBUG,
+ SPP_FMT "pap invalid packet length: %d bytes\n",
+ SPP_ARGS(ifp), len);
+ return;
+ }
+ h = mtod (m, struct lcp_header*);
+ if (len > ntohs (h->len))
+ len = ntohs (h->len);
+ switch (h->type) {
+ /* PAP request is my authproto */
+ case PAP_REQ:
+ name = 1 + (u_char*)(h+1);
+ name_len = name[-1];
+ passwd = name + name_len + 1;
+ if (name_len > len - 6 ||
+ (passwd_len = passwd[-1]) > len - 6 - name_len) {
+ if (debug) {
+ log(LOG_DEBUG, SPP_FMT "pap corrupted input "
+ "<%s id=0x%x len=%d",
+ SPP_ARGS(ifp),
+ sppp_auth_type_name(PPP_PAP, h->type),
+ h->ident, ntohs(h->len));
+ sppp_print_bytes((u_char*)(h+1), len-4);
+ log(-1, ">\n");
+ }
+ break;
+ }
+ if (debug) {
+ log(LOG_DEBUG, SPP_FMT "pap input(%s) "
+ "<%s id=0x%x len=%d name=",
+ SPP_ARGS(ifp),
+ sppp_state_name(sp->state[IDX_PAP]),
+ sppp_auth_type_name(PPP_PAP, h->type),
+ h->ident, ntohs(h->len));
+ sppp_print_string((char*)name, name_len);
+ log(-1, " passwd=");
+ sppp_print_string((char*)passwd, passwd_len);
+ log(-1, ">\n");
+ }
+ if (name_len != sppp_strnlen(sp->hisauth.name, AUTHNAMELEN) ||
+ passwd_len != sppp_strnlen(sp->hisauth.secret, AUTHKEYLEN) ||
+ bcmp(name, sp->hisauth.name, name_len) != 0 ||
+ bcmp(passwd, sp->hisauth.secret, passwd_len) != 0) {
+ /* action scn, tld */
+ mlen = sizeof(FAILMSG) - 1;
+ sppp_auth_send(&pap, sp, PAP_NAK, h->ident,
+ sizeof mlen, (const char *)&mlen,
+ sizeof(FAILMSG) - 1, (u_char *)FAILMSG,
+ 0);
+ pap.tld(sp);
+ break;
+ }
+ /* action sca, perhaps tlu */
+ if (sp->state[IDX_PAP] == STATE_REQ_SENT ||
+ sp->state[IDX_PAP] == STATE_OPENED) {
+ mlen = sizeof(SUCCMSG) - 1;
+ sppp_auth_send(&pap, sp, PAP_ACK, h->ident,
+ sizeof mlen, (const char *)&mlen,
+ sizeof(SUCCMSG) - 1, (u_char *)SUCCMSG,
+ 0);
+ }
+ if (sp->state[IDX_PAP] == STATE_REQ_SENT) {
+ sppp_cp_change_state(&pap, sp, STATE_OPENED);
+ pap.tlu(sp);
+ }
+ break;
+
+ /* ack and nak are his authproto */
+ case PAP_ACK:
+ callout_stop(&sp->pap_my_to_ch);
+ if (debug) {
+ log(LOG_DEBUG, SPP_FMT "pap success",
+ SPP_ARGS(ifp));
+ name_len = *((char *)h);
+ if (len > 5 && name_len) {
+ log(-1, ": ");
+ sppp_print_string((char*)(h+1), name_len);
+ }
+ log(-1, "\n");
+ }
+ x = splimp();
+ SPPP_LOCK(sp);
+ sp->pp_flags &= ~PP_NEEDAUTH;
+ if (sp->myauth.proto == PPP_PAP &&
+ (sp->lcp.opts & (1 << LCP_OPT_AUTH_PROTO)) &&
+ (sp->lcp.protos & (1 << IDX_PAP)) == 0) {
+ /*
+ * We are authenticator for PAP but didn't
+ * complete yet. Leave it to tlu to proceed
+ * to network phase.
+ */
+ SPPP_UNLOCK(sp);
+ splx(x);
+ break;
+ }
+ SPPP_UNLOCK(sp);
+ splx(x);
+ sppp_phase_network(sp);
+ break;
+
+ case PAP_NAK:
+ callout_stop (&sp->pap_my_to_ch);
+ if (debug) {
+ log(LOG_INFO, SPP_FMT "pap failure",
+ SPP_ARGS(ifp));
+ name_len = *((char *)h);
+ if (len > 5 && name_len) {
+ log(-1, ": ");
+ sppp_print_string((char*)(h+1), name_len);
+ }
+ log(-1, "\n");
+ } else
+ log(LOG_INFO, SPP_FMT "pap failure\n",
+ SPP_ARGS(ifp));
+ /* await LCP shutdown by authenticator */
+ break;
+
+ default:
+ /* Unknown PAP packet type -- ignore. */
+ if (debug) {
+ log(LOG_DEBUG, SPP_FMT "pap corrupted input "
+ "<0x%x id=0x%x len=%d",
+ SPP_ARGS(ifp),
+ h->type, h->ident, ntohs(h->len));
+ sppp_print_bytes((u_char*)(h+1), len-4);
+ log(-1, ">\n");
+ }
+ break;
+
+ }
+}
+
+static void
+sppp_pap_init(struct sppp *sp)
+{
+ /* PAP doesn't have STATE_INITIAL at all. */
+ sp->state[IDX_PAP] = STATE_CLOSED;
+ sp->fail_counter[IDX_PAP] = 0;
+ sp->pp_seq[IDX_PAP] = 0;
+ sp->pp_rseq[IDX_PAP] = 0;
+ callout_init(&sp->ch[IDX_PAP], CALLOUT_MPSAFE);
+ callout_init(&sp->pap_my_to_ch, CALLOUT_MPSAFE);
+}
+
+static void
+sppp_pap_open(struct sppp *sp)
+{
+ if (sp->hisauth.proto == PPP_PAP &&
+ (sp->lcp.opts & (1 << LCP_OPT_AUTH_PROTO)) != 0) {
+ /* we are authenticator for PAP, start our timer */
+ sp->rst_counter[IDX_PAP] = sp->lcp.max_configure;
+ sppp_cp_change_state(&pap, sp, STATE_REQ_SENT);
+ }
+ if (sp->myauth.proto == PPP_PAP) {
+ /* we are peer, send a request, and start a timer */
+ pap.scr(sp);
+ callout_reset(&sp->pap_my_to_ch, sp->lcp.timeout,
+ sppp_pap_my_TO, (void *)sp);
+ }
+}
+
+static void
+sppp_pap_close(struct sppp *sp)
+{
+ if (sp->state[IDX_PAP] != STATE_CLOSED)
+ sppp_cp_change_state(&pap, sp, STATE_CLOSED);
+}
+
+/*
+ * That's the timeout routine if we are authenticator. Since the
+ * authenticator is basically passive in PAP, we can't do much here.
+ */
+static void
+sppp_pap_TO(void *cookie)
+{
+ struct sppp *sp = (struct sppp *)cookie;
+ STDDCL;
+ int s;
+
+ s = splimp();
+ SPPP_LOCK(sp);
+ if (debug)
+ log(LOG_DEBUG, SPP_FMT "pap TO(%s) rst_counter = %d\n",
+ SPP_ARGS(ifp),
+ sppp_state_name(sp->state[IDX_PAP]),
+ sp->rst_counter[IDX_PAP]);
+
+ if (--sp->rst_counter[IDX_PAP] < 0)
+ /* TO- event */
+ switch (sp->state[IDX_PAP]) {
+ case STATE_REQ_SENT:
+ pap.tld(sp);
+ sppp_cp_change_state(&pap, sp, STATE_CLOSED);
+ break;
+ }
+ else
+ /* TO+ event, not very much we could do */
+ switch (sp->state[IDX_PAP]) {
+ case STATE_REQ_SENT:
+ /* sppp_cp_change_state() will restart the timer */
+ sppp_cp_change_state(&pap, sp, STATE_REQ_SENT);
+ break;
+ }
+
+ SPPP_UNLOCK(sp);
+ splx(s);
+}
+
+/*
+ * That's the timeout handler if we are peer. Since the peer is active,
+ * we need to retransmit our PAP request since it is apparently lost.
+ * XXX We should impose a max counter.
+ */
+static void
+sppp_pap_my_TO(void *cookie)
+{
+ struct sppp *sp = (struct sppp *)cookie;
+ STDDCL;
+
+ if (debug)
+ log(LOG_DEBUG, SPP_FMT "pap peer TO\n",
+ SPP_ARGS(ifp));
+
+ SPPP_LOCK(sp);
+ pap.scr(sp);
+ SPPP_UNLOCK(sp);
+}
+
+static void
+sppp_pap_tlu(struct sppp *sp)
+{
+ STDDCL;
+ int x;
+
+ sp->rst_counter[IDX_PAP] = sp->lcp.max_configure;
+
+ if (debug)
+ log(LOG_DEBUG, SPP_FMT "%s tlu\n",
+ SPP_ARGS(ifp), pap.name);
+
+ x = splimp();
+ SPPP_LOCK(sp);
+ /* indicate to LCP that we need to be closed down */
+ sp->lcp.protos |= (1 << IDX_PAP);
+
+ if (sp->pp_flags & PP_NEEDAUTH) {
+ /*
+ * Remote is authenticator, but his auth proto didn't
+ * complete yet. Defer the transition to network
+ * phase.
+ */
+ SPPP_UNLOCK(sp);
+ splx(x);
+ return;
+ }
+ SPPP_UNLOCK(sp);
+ splx(x);
+ sppp_phase_network(sp);
+}
+
+static void
+sppp_pap_tld(struct sppp *sp)
+{
+ STDDCL;
+
+ if (debug)
+ log(LOG_DEBUG, SPP_FMT "pap tld\n", SPP_ARGS(ifp));
+ callout_stop (&sp->ch[IDX_PAP]);
+ callout_stop (&sp->pap_my_to_ch);
+ sp->lcp.protos &= ~(1 << IDX_PAP);
+
+ lcp.Close(sp);
+}
+
+static void
+sppp_pap_scr(struct sppp *sp)
+{
+ u_char idlen, pwdlen;
+
+ sp->confid[IDX_PAP] = ++sp->pp_seq[IDX_PAP];
+ pwdlen = sppp_strnlen(sp->myauth.secret, AUTHKEYLEN);
+ idlen = sppp_strnlen(sp->myauth.name, AUTHNAMELEN);
+
+ sppp_auth_send(&pap, sp, PAP_REQ, sp->confid[IDX_PAP],
+ sizeof idlen, (const char *)&idlen,
+ (size_t)idlen, sp->myauth.name,
+ sizeof pwdlen, (const char *)&pwdlen,
+ (size_t)pwdlen, sp->myauth.secret,
+ 0);
+}
+
+/*
+ * Random miscellaneous functions.
+ */
+
+/*
+ * Send a PAP or CHAP proto packet.
+ *
+ * Varadic function, each of the elements for the ellipsis is of type
+ * ``size_t mlen, const u_char *msg''. Processing will stop iff
+ * mlen == 0.
+ * NOTE: never declare variadic functions with types subject to type
+ * promotion (i.e. u_char). This is asking for big trouble depending
+ * on the architecture you are on...
+ */
+
+static void
+sppp_auth_send(const struct cp *cp, struct sppp *sp,
+ unsigned int type, unsigned int id,
+ ...)
+{
+ STDDCL;
+ struct ppp_header *h;
+ struct lcp_header *lh;
+ struct mbuf *m;
+ u_char *p;
+ int len;
+ unsigned int mlen;
+ const char *msg;
+ va_list ap;
+
+ MGETHDR (m, M_DONTWAIT, MT_DATA);
+ if (! m)
+ return;
+ m->m_pkthdr.rcvif = 0;
+
+ h = mtod (m, struct ppp_header*);
+ h->address = PPP_ALLSTATIONS; /* broadcast address */
+ h->control = PPP_UI; /* Unnumbered Info */
+ h->protocol = htons(cp->proto);
+
+ lh = (struct lcp_header*)(h + 1);
+ lh->type = type;
+ lh->ident = id;
+ p = (u_char*) (lh+1);
+
+ va_start(ap, id);
+ len = 0;
+
+ while ((mlen = (unsigned int)va_arg(ap, size_t)) != 0) {
+ msg = va_arg(ap, const char *);
+ len += mlen;
+ if (len > MHLEN - PPP_HEADER_LEN - LCP_HEADER_LEN) {
+ va_end(ap);
+ m_freem(m);
+ return;
+ }
+
+ bcopy(msg, p, mlen);
+ p += mlen;
+ }
+ va_end(ap);
+
+ m->m_pkthdr.len = m->m_len = PPP_HEADER_LEN + LCP_HEADER_LEN + len;
+ lh->len = htons (LCP_HEADER_LEN + len);
+
+ if (debug) {
+ log(LOG_DEBUG, SPP_FMT "%s output <%s id=0x%x len=%d",
+ SPP_ARGS(ifp), cp->name,
+ sppp_auth_type_name(cp->proto, lh->type),
+ lh->ident, ntohs(lh->len));
+ sppp_print_bytes((u_char*) (lh+1), len);
+ log(-1, ">\n");
+ }
+ if (! IF_HANDOFF_ADJ(&sp->pp_cpq, m, ifp, 3))
+ ifp->if_oerrors++;
+}
+
+/*
+ * Flush interface queue.
+ */
+static void
+sppp_qflush(struct ifqueue *ifq)
+{
+ struct mbuf *m, *n;
+
+ n = ifq->ifq_head;
+ while ((m = n)) {
+ n = m->m_act;
+ m_freem (m);
+ }
+ ifq->ifq_head = 0;
+ ifq->ifq_tail = 0;
+ ifq->ifq_len = 0;
+}
+
+/*
+ * Send keepalive packets, every 10 seconds.
+ */
+static void
+sppp_keepalive(void *dummy)
+{
+ struct sppp *sp = (struct sppp*)dummy;
+ struct ifnet *ifp = SP2IFP(sp);
+ int s;
+
+ s = splimp();
+ SPPP_LOCK(sp);
+ /* Keepalive mode disabled or channel down? */
+ if (! (sp->pp_flags & PP_KEEPALIVE) ||
+ ! (ifp->if_drv_flags & IFF_DRV_RUNNING))
+ goto out;
+
+ if (sp->pp_mode == PP_FR) {
+ sppp_fr_keepalive (sp);
+ goto out;
+ }
+
+ /* No keepalive in PPP mode if LCP not opened yet. */
+ if (sp->pp_mode != IFF_CISCO &&
+ sp->pp_phase < PHASE_AUTHENTICATE)
+ goto out;
+
+ if (sp->pp_alivecnt == MAXALIVECNT) {
+ /* No keepalive packets got. Stop the interface. */
+ printf (SPP_FMT "down\n", SPP_ARGS(ifp));
+ if_down (ifp);
+ sppp_qflush (&sp->pp_cpq);
+ if (sp->pp_mode != IFF_CISCO) {
+ /* XXX */
+ /* Shut down the PPP link. */
+ lcp.Down(sp);
+ /* Initiate negotiation. XXX */
+ lcp.Up(sp);
+ }
+ }
+ if (sp->pp_alivecnt <= MAXALIVECNT)
+ ++sp->pp_alivecnt;
+ if (sp->pp_mode == IFF_CISCO)
+ sppp_cisco_send (sp, CISCO_KEEPALIVE_REQ,
+ ++sp->pp_seq[IDX_LCP], sp->pp_rseq[IDX_LCP]);
+ else if (sp->pp_phase >= PHASE_AUTHENTICATE) {
+ long nmagic = htonl (sp->lcp.magic);
+ sp->lcp.echoid = ++sp->pp_seq[IDX_LCP];
+ sppp_cp_send (sp, PPP_LCP, ECHO_REQ,
+ sp->lcp.echoid, 4, &nmagic);
+ }
+out:
+ SPPP_UNLOCK(sp);
+ splx(s);
+ callout_reset(&sp->keepalive_callout, hz * 10, sppp_keepalive,
+ (void *)sp);
+}
+
+/*
+ * Get both IP addresses.
+ */
+void
+sppp_get_ip_addrs(struct sppp *sp, u_long *src, u_long *dst, u_long *srcmask)
+{
+ struct ifnet *ifp = SP2IFP(sp);
+ struct ifaddr *ifa;
+ struct sockaddr_in *si, *sm;
+ u_long ssrc, ddst;
+
+ sm = NULL;
+ ssrc = ddst = 0L;
+ /*
+ * Pick the first AF_INET address from the list,
+ * aliases don't make any sense on a p2p link anyway.
+ */
+ si = 0;
+ if_addr_rlock(ifp);
+ TAILQ_FOREACH(ifa, &ifp->if_addrhead, ifa_link)
+ if (ifa->ifa_addr->sa_family == AF_INET) {
+ si = (struct sockaddr_in *)ifa->ifa_addr;
+ sm = (struct sockaddr_in *)ifa->ifa_netmask;
+ if (si)
+ break;
+ }
+ if (ifa) {
+ if (si && si->sin_addr.s_addr) {
+ ssrc = si->sin_addr.s_addr;
+ if (srcmask)
+ *srcmask = ntohl(sm->sin_addr.s_addr);
+ }
+
+ si = (struct sockaddr_in *)ifa->ifa_dstaddr;
+ if (si && si->sin_addr.s_addr)
+ ddst = si->sin_addr.s_addr;
+ }
+ if_addr_runlock(ifp);
+
+ if (dst) *dst = ntohl(ddst);
+ if (src) *src = ntohl(ssrc);
+}
+
+#ifdef INET
+/*
+ * Set my IP address. Must be called at splimp.
+ */
+static void
+sppp_set_ip_addr(struct sppp *sp, u_long src)
+{
+ STDDCL;
+ struct ifaddr *ifa;
+ struct sockaddr_in *si;
+ struct in_ifaddr *ia;
+
+ /*
+ * Pick the first AF_INET address from the list,
+ * aliases don't make any sense on a p2p link anyway.
+ */
+ si = 0;
+ if_addr_rlock(ifp);
+ TAILQ_FOREACH(ifa, &ifp->if_addrhead, ifa_link) {
+ if (ifa->ifa_addr->sa_family == AF_INET) {
+ si = (struct sockaddr_in *)ifa->ifa_addr;
+ if (si != NULL) {
+ ifa_ref(ifa);
+ break;
+ }
+ }
+ }
+ if_addr_runlock(ifp);
+
+ if (ifa != NULL) {
+ int error;
+
+ /* delete old route */
+ error = rtinit(ifa, (int)RTM_DELETE, RTF_HOST);
+ if (debug && error) {
+ log(LOG_DEBUG, SPP_FMT "sppp_set_ip_addr: rtinit DEL failed, error=%d\n",
+ SPP_ARGS(ifp), error);
+ }
+
+ /* set new address */
+ si->sin_addr.s_addr = htonl(src);
+ ia = ifatoia(ifa);
+ IN_IFADDR_WLOCK();
+ LIST_REMOVE(ia, ia_hash);
+ LIST_INSERT_HEAD(INADDR_HASH(si->sin_addr.s_addr), ia, ia_hash);
+ IN_IFADDR_WUNLOCK();
+
+ /* add new route */
+ error = rtinit(ifa, (int)RTM_ADD, RTF_HOST);
+ if (debug && error) {
+ log(LOG_DEBUG, SPP_FMT "sppp_set_ip_addr: rtinit ADD failed, error=%d",
+ SPP_ARGS(ifp), error);
+ }
+ ifa_free(ifa);
+ }
+}
+#endif
+
+#ifdef INET6
+/*
+ * Get both IPv6 addresses.
+ */
+static void
+sppp_get_ip6_addrs(struct sppp *sp, struct in6_addr *src, struct in6_addr *dst,
+ struct in6_addr *srcmask)
+{
+ struct ifnet *ifp = SP2IFP(sp);
+ struct ifaddr *ifa;
+ struct sockaddr_in6 *si, *sm;
+ struct in6_addr ssrc, ddst;
+
+ sm = NULL;
+ bzero(&ssrc, sizeof(ssrc));
+ bzero(&ddst, sizeof(ddst));
+ /*
+ * Pick the first link-local AF_INET6 address from the list,
+ * aliases don't make any sense on a p2p link anyway.
+ */
+ si = NULL;
+ if_addr_rlock(ifp);
+ TAILQ_FOREACH(ifa, &ifp->if_addrhead, ifa_link)
+ if (ifa->ifa_addr->sa_family == AF_INET6) {
+ si = (struct sockaddr_in6 *)ifa->ifa_addr;
+ sm = (struct sockaddr_in6 *)ifa->ifa_netmask;
+ if (si && IN6_IS_ADDR_LINKLOCAL(&si->sin6_addr))
+ break;
+ }
+ if (ifa) {
+ if (si && !IN6_IS_ADDR_UNSPECIFIED(&si->sin6_addr)) {
+ bcopy(&si->sin6_addr, &ssrc, sizeof(ssrc));
+ if (srcmask) {
+ bcopy(&sm->sin6_addr, srcmask,
+ sizeof(*srcmask));
+ }
+ }
+
+ si = (struct sockaddr_in6 *)ifa->ifa_dstaddr;
+ if (si && !IN6_IS_ADDR_UNSPECIFIED(&si->sin6_addr))
+ bcopy(&si->sin6_addr, &ddst, sizeof(ddst));
+ }
+
+ if (dst)
+ bcopy(&ddst, dst, sizeof(*dst));
+ if (src)
+ bcopy(&ssrc, src, sizeof(*src));
+ if_addr_runlock(ifp);
+}
+
+#ifdef IPV6CP_MYIFID_DYN
+/*
+ * Generate random ifid.
+ */
+static void
+sppp_gen_ip6_addr(struct sppp *sp, struct in6_addr *addr)
+{
+ /* TBD */
+}
+
+/*
+ * Set my IPv6 address. Must be called at splimp.
+ */
+static void
+sppp_set_ip6_addr(struct sppp *sp, const struct in6_addr *src)
+{
+ STDDCL;
+ struct ifaddr *ifa;
+ struct sockaddr_in6 *sin6;
+
+ /*
+ * Pick the first link-local AF_INET6 address from the list,
+ * aliases don't make any sense on a p2p link anyway.
+ */
+
+ sin6 = NULL;
+ if_addr_rlock(ifp);
+ TAILQ_FOREACH(ifa, &ifp->if_addrhead, ifa_link) {
+ if (ifa->ifa_addr->sa_family == AF_INET6) {
+ sin6 = (struct sockaddr_in6 *)ifa->ifa_addr;
+ if (sin6 && IN6_IS_ADDR_LINKLOCAL(&sin6->sin6_addr)) {
+ ifa_ref(ifa);
+ break;
+ }
+ }
+ }
+ if_addr_runlock(ifp);
+
+ if (ifa != NULL) {
+ int error;
+ struct sockaddr_in6 new_sin6 = *sin6;
+
+ bcopy(src, &new_sin6.sin6_addr, sizeof(new_sin6.sin6_addr));
+ error = in6_ifinit(ifp, ifatoia6(ifa), &new_sin6, 1);
+ if (debug && error) {
+ log(LOG_DEBUG, SPP_FMT "sppp_set_ip6_addr: in6_ifinit "
+ " failed, error=%d\n", SPP_ARGS(ifp), error);
+ }
+ ifa_free(ifa);
+ }
+}
+#endif
+
+/*
+ * Suggest a candidate address to be used by peer.
+ */
+static void
+sppp_suggest_ip6_addr(struct sppp *sp, struct in6_addr *suggest)
+{
+ struct in6_addr myaddr;
+ struct timeval tv;
+
+ sppp_get_ip6_addrs(sp, &myaddr, 0, 0);
+
+ myaddr.s6_addr[8] &= ~0x02; /* u bit to "local" */
+ microtime(&tv);
+ if ((tv.tv_usec & 0xff) == 0 && (tv.tv_sec & 0xff) == 0) {
+ myaddr.s6_addr[14] ^= 0xff;
+ myaddr.s6_addr[15] ^= 0xff;
+ } else {
+ myaddr.s6_addr[14] ^= (tv.tv_usec & 0xff);
+ myaddr.s6_addr[15] ^= (tv.tv_sec & 0xff);
+ }
+ if (suggest)
+ bcopy(&myaddr, suggest, sizeof(myaddr));
+}
+#endif /*INET6*/
+
+static int
+sppp_params(struct sppp *sp, u_long cmd, void *data)
+{
+ u_long subcmd;
+ struct ifreq *ifr = (struct ifreq *)data;
+ struct spppreq *spr;
+ int rv = 0;
+
+ if ((spr = malloc(sizeof(struct spppreq), M_TEMP, M_NOWAIT)) == 0)
+ return (EAGAIN);
+ /*
+ * ifr->ifr_data is supposed to point to a struct spppreq.
+ * Check the cmd word first before attempting to fetch all the
+ * data.
+ */
+ if ((subcmd = fuword(ifr->ifr_data)) == -1) {
+ rv = EFAULT;
+ goto quit;
+ }
+
+ if (copyin((caddr_t)ifr->ifr_data, spr, sizeof(struct spppreq)) != 0) {
+ rv = EFAULT;
+ goto quit;
+ }
+
+ switch (subcmd) {
+ case (u_long)SPPPIOGDEFS:
+ if (cmd != SIOCGIFGENERIC) {
+ rv = EINVAL;
+ break;
+ }
+ /*
+ * We copy over the entire current state, but clean
+ * out some of the stuff we don't wanna pass up.
+ * Remember, SIOCGIFGENERIC is unprotected, and can be
+ * called by any user. No need to ever get PAP or
+ * CHAP secrets back to userland anyway.
+ */
+ spr->defs.pp_phase = sp->pp_phase;
+ spr->defs.enable_vj = (sp->confflags & CONF_ENABLE_VJ) != 0;
+ spr->defs.enable_ipv6 = (sp->confflags & CONF_ENABLE_IPV6) != 0;
+ spr->defs.lcp = sp->lcp;
+ spr->defs.ipcp = sp->ipcp;
+ spr->defs.ipv6cp = sp->ipv6cp;
+ spr->defs.myauth = sp->myauth;
+ spr->defs.hisauth = sp->hisauth;
+ bzero(spr->defs.myauth.secret, AUTHKEYLEN);
+ bzero(spr->defs.myauth.challenge, AUTHKEYLEN);
+ bzero(spr->defs.hisauth.secret, AUTHKEYLEN);
+ bzero(spr->defs.hisauth.challenge, AUTHKEYLEN);
+ /*
+ * Fixup the LCP timeout value to milliseconds so
+ * spppcontrol doesn't need to bother about the value
+ * of "hz". We do the reverse calculation below when
+ * setting it.
+ */
+ spr->defs.lcp.timeout = sp->lcp.timeout * 1000 / hz;
+ rv = copyout(spr, (caddr_t)ifr->ifr_data,
+ sizeof(struct spppreq));
+ break;
+
+ case (u_long)SPPPIOSDEFS:
+ if (cmd != SIOCSIFGENERIC) {
+ rv = EINVAL;
+ break;
+ }
+ /*
+ * We have a very specific idea of which fields we
+ * allow being passed back from userland, so to not
+ * clobber our current state. For one, we only allow
+ * setting anything if LCP is in dead or establish
+ * phase. Once the authentication negotiations
+ * started, the authentication settings must not be
+ * changed again. (The administrator can force an
+ * ifconfig down in order to get LCP back into dead
+ * phase.)
+ *
+ * Also, we only allow for authentication parameters to be
+ * specified.
+ *
+ * XXX Should allow to set or clear pp_flags.
+ *
+ * Finally, if the respective authentication protocol to
+ * be used is set differently than 0, but the secret is
+ * passed as all zeros, we don't trash the existing secret.
+ * This allows an administrator to change the system name
+ * only without clobbering the secret (which he didn't get
+ * back in a previous SPPPIOGDEFS call). However, the
+ * secrets are cleared if the authentication protocol is
+ * reset to 0. */
+ if (sp->pp_phase != PHASE_DEAD &&
+ sp->pp_phase != PHASE_ESTABLISH) {
+ rv = EBUSY;
+ break;
+ }
+
+ if ((spr->defs.myauth.proto != 0 && spr->defs.myauth.proto != PPP_PAP &&
+ spr->defs.myauth.proto != PPP_CHAP) ||
+ (spr->defs.hisauth.proto != 0 && spr->defs.hisauth.proto != PPP_PAP &&
+ spr->defs.hisauth.proto != PPP_CHAP)) {
+ rv = EINVAL;
+ break;
+ }
+
+ if (spr->defs.myauth.proto == 0)
+ /* resetting myauth */
+ bzero(&sp->myauth, sizeof sp->myauth);
+ else {
+ /* setting/changing myauth */
+ sp->myauth.proto = spr->defs.myauth.proto;
+ bcopy(spr->defs.myauth.name, sp->myauth.name, AUTHNAMELEN);
+ if (spr->defs.myauth.secret[0] != '\0')
+ bcopy(spr->defs.myauth.secret, sp->myauth.secret,
+ AUTHKEYLEN);
+ }
+ if (spr->defs.hisauth.proto == 0)
+ /* resetting hisauth */
+ bzero(&sp->hisauth, sizeof sp->hisauth);
+ else {
+ /* setting/changing hisauth */
+ sp->hisauth.proto = spr->defs.hisauth.proto;
+ sp->hisauth.flags = spr->defs.hisauth.flags;
+ bcopy(spr->defs.hisauth.name, sp->hisauth.name, AUTHNAMELEN);
+ if (spr->defs.hisauth.secret[0] != '\0')
+ bcopy(spr->defs.hisauth.secret, sp->hisauth.secret,
+ AUTHKEYLEN);
+ }
+ /* set LCP restart timer timeout */
+ if (spr->defs.lcp.timeout != 0)
+ sp->lcp.timeout = spr->defs.lcp.timeout * hz / 1000;
+ /* set VJ enable and IPv6 disable flags */
+#ifdef INET
+ if (spr->defs.enable_vj)
+ sp->confflags |= CONF_ENABLE_VJ;
+ else
+ sp->confflags &= ~CONF_ENABLE_VJ;
+#endif
+#ifdef INET6
+ if (spr->defs.enable_ipv6)
+ sp->confflags |= CONF_ENABLE_IPV6;
+ else
+ sp->confflags &= ~CONF_ENABLE_IPV6;
+#endif
+ break;
+
+ default:
+ rv = EINVAL;
+ }
+
+ quit:
+ free(spr, M_TEMP);
+
+ return (rv);
+}
+
+static void
+sppp_phase_network(struct sppp *sp)
+{
+ STDDCL;
+ int i;
+ u_long mask;
+
+ sp->pp_phase = PHASE_NETWORK;
+
+ if (debug)
+ log(LOG_DEBUG, SPP_FMT "phase %s\n", SPP_ARGS(ifp),
+ sppp_phase_name(sp->pp_phase));
+
+ /* Notify NCPs now. */
+ for (i = 0; i < IDX_COUNT; i++)
+ if ((cps[i])->flags & CP_NCP)
+ (cps[i])->Open(sp);
+
+ /* Send Up events to all NCPs. */
+ for (i = 0, mask = 1; i < IDX_COUNT; i++, mask <<= 1)
+ if ((sp->lcp.protos & mask) && ((cps[i])->flags & CP_NCP))
+ (cps[i])->Up(sp);
+
+ /* if no NCP is starting, all this was in vain, close down */
+ sppp_lcp_check_and_close(sp);
+}
+
+
+static const char *
+sppp_cp_type_name(u_char type)
+{
+ static char buf[12];
+ switch (type) {
+ case CONF_REQ: return "conf-req";
+ case CONF_ACK: return "conf-ack";
+ case CONF_NAK: return "conf-nak";
+ case CONF_REJ: return "conf-rej";
+ case TERM_REQ: return "term-req";
+ case TERM_ACK: return "term-ack";
+ case CODE_REJ: return "code-rej";
+ case PROTO_REJ: return "proto-rej";
+ case ECHO_REQ: return "echo-req";
+ case ECHO_REPLY: return "echo-reply";
+ case DISC_REQ: return "discard-req";
+ }
+ snprintf (buf, sizeof(buf), "cp/0x%x", type);
+ return buf;
+}
+
+static const char *
+sppp_auth_type_name(u_short proto, u_char type)
+{
+ static char buf[12];
+ switch (proto) {
+ case PPP_CHAP:
+ switch (type) {
+ case CHAP_CHALLENGE: return "challenge";
+ case CHAP_RESPONSE: return "response";
+ case CHAP_SUCCESS: return "success";
+ case CHAP_FAILURE: return "failure";
+ }
+ case PPP_PAP:
+ switch (type) {
+ case PAP_REQ: return "req";
+ case PAP_ACK: return "ack";
+ case PAP_NAK: return "nak";
+ }
+ }
+ snprintf (buf, sizeof(buf), "auth/0x%x", type);
+ return buf;
+}
+
+static const char *
+sppp_lcp_opt_name(u_char opt)
+{
+ static char buf[12];
+ switch (opt) {
+ case LCP_OPT_MRU: return "mru";
+ case LCP_OPT_ASYNC_MAP: return "async-map";
+ case LCP_OPT_AUTH_PROTO: return "auth-proto";
+ case LCP_OPT_QUAL_PROTO: return "qual-proto";
+ case LCP_OPT_MAGIC: return "magic";
+ case LCP_OPT_PROTO_COMP: return "proto-comp";
+ case LCP_OPT_ADDR_COMP: return "addr-comp";
+ }
+ snprintf (buf, sizeof(buf), "lcp/0x%x", opt);
+ return buf;
+}
+
+#ifdef INET
+static const char *
+sppp_ipcp_opt_name(u_char opt)
+{
+ static char buf[12];
+ switch (opt) {
+ case IPCP_OPT_ADDRESSES: return "addresses";
+ case IPCP_OPT_COMPRESSION: return "compression";
+ case IPCP_OPT_ADDRESS: return "address";
+ }
+ snprintf (buf, sizeof(buf), "ipcp/0x%x", opt);
+ return buf;
+}
+#endif
+
+#ifdef INET6
+static const char *
+sppp_ipv6cp_opt_name(u_char opt)
+{
+ static char buf[12];
+ switch (opt) {
+ case IPV6CP_OPT_IFID: return "ifid";
+ case IPV6CP_OPT_COMPRESSION: return "compression";
+ }
+ sprintf (buf, "0x%x", opt);
+ return buf;
+}
+#endif
+
+static const char *
+sppp_state_name(int state)
+{
+ switch (state) {
+ case STATE_INITIAL: return "initial";
+ case STATE_STARTING: return "starting";
+ case STATE_CLOSED: return "closed";
+ case STATE_STOPPED: return "stopped";
+ case STATE_CLOSING: return "closing";
+ case STATE_STOPPING: return "stopping";
+ case STATE_REQ_SENT: return "req-sent";
+ case STATE_ACK_RCVD: return "ack-rcvd";
+ case STATE_ACK_SENT: return "ack-sent";
+ case STATE_OPENED: return "opened";
+ }
+ return "illegal";
+}
+
+static const char *
+sppp_phase_name(enum ppp_phase phase)
+{
+ switch (phase) {
+ case PHASE_DEAD: return "dead";
+ case PHASE_ESTABLISH: return "establish";
+ case PHASE_TERMINATE: return "terminate";
+ case PHASE_AUTHENTICATE: return "authenticate";
+ case PHASE_NETWORK: return "network";
+ }
+ return "illegal";
+}
+
+static const char *
+sppp_proto_name(u_short proto)
+{
+ static char buf[12];
+ switch (proto) {
+ case PPP_LCP: return "lcp";
+ case PPP_IPCP: return "ipcp";
+ case PPP_PAP: return "pap";
+ case PPP_CHAP: return "chap";
+ case PPP_IPV6CP: return "ipv6cp";
+ }
+ snprintf(buf, sizeof(buf), "proto/0x%x", (unsigned)proto);
+ return buf;
+}
+
+static void
+sppp_print_bytes(const u_char *p, u_short len)
+{
+ if (len)
+ log(-1, " %*D", len, p, "-");
+}
+
+static void
+sppp_print_string(const char *p, u_short len)
+{
+ u_char c;
+
+ while (len-- > 0) {
+ c = *p++;
+ /*
+ * Print only ASCII chars directly. RFC 1994 recommends
+ * using only them, but we don't rely on it. */
+ if (c < ' ' || c > '~')
+ log(-1, "\\x%x", c);
+ else
+ log(-1, "%c", c);
+ }
+}
+
+#ifdef INET
+static const char *
+sppp_dotted_quad(u_long addr)
+{
+ static char s[16];
+ sprintf(s, "%d.%d.%d.%d",
+ (int)((addr >> 24) & 0xff),
+ (int)((addr >> 16) & 0xff),
+ (int)((addr >> 8) & 0xff),
+ (int)(addr & 0xff));
+ return s;
+}
+#endif
+
+static int
+sppp_strnlen(u_char *p, int max)
+{
+ int len;
+
+ for (len = 0; len < max && *p; ++p)
+ ++len;
+ return len;
+}
+
+/* a dummy, used to drop uninteresting events */
+static void
+sppp_null(struct sppp *unused)
+{
+ /* do just nothing */
+}
diff --git a/rtems/freebsd/net/if_stf.c b/rtems/freebsd/net/if_stf.c
new file mode 100644
index 00000000..70edf61b
--- /dev/null
+++ b/rtems/freebsd/net/if_stf.c
@@ -0,0 +1,850 @@
+#include <rtems/freebsd/machine/rtems-bsd-config.h>
+
+/* $FreeBSD$ */
+/* $KAME: if_stf.c,v 1.73 2001/12/03 11:08:30 keiichi Exp $ */
+
+/*-
+ * Copyright (C) 2000 WIDE Project.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the project nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+/*
+ * 6to4 interface, based on RFC3056.
+ *
+ * 6to4 interface is NOT capable of link-layer (I mean, IPv4) multicasting.
+ * There is no address mapping defined from IPv6 multicast address to IPv4
+ * address. Therefore, we do not have IFF_MULTICAST on the interface.
+ *
+ * Due to the lack of address mapping for link-local addresses, we cannot
+ * throw packets toward link-local addresses (fe80::x). Also, we cannot throw
+ * packets to link-local multicast addresses (ff02::x).
+ *
+ * Here are interesting symptoms due to the lack of link-local address:
+ *
+ * Unicast routing exchange:
+ * - RIPng: Impossible. Uses link-local multicast packet toward ff02::9,
+ * and link-local addresses as nexthop.
+ * - OSPFv6: Impossible. OSPFv6 assumes that there's link-local address
+ * assigned to the link, and makes use of them. Also, HELLO packets use
+ * link-local multicast addresses (ff02::5 and ff02::6).
+ * - BGP4+: Maybe. You can only use global address as nexthop, and global
+ * address as TCP endpoint address.
+ *
+ * Multicast routing protocols:
+ * - PIM: Hello packet cannot be used to discover adjacent PIM routers.
+ * Adjacent PIM routers must be configured manually (is it really spec-wise
+ * correct thing to do?).
+ *
+ * ICMPv6:
+ * - Redirects cannot be used due to the lack of link-local address.
+ *
+ * stf interface does not have, and will not need, a link-local address.
+ * It seems to have no real benefit and does not help the above symptoms much.
+ * Even if we assign link-locals to interface, we cannot really
+ * use link-local unicast/multicast on top of 6to4 cloud (since there's no
+ * encapsulation defined for link-local address), and the above analysis does
+ * not change. RFC3056 does not mandate the assignment of link-local address
+ * either.
+ *
+ * 6to4 interface has security issues. Refer to
+ * http://playground.iijlab.net/i-d/draft-itojun-ipv6-transition-abuse-00.txt
+ * for details. The code tries to filter out some of malicious packets.
+ * Note that there is no way to be 100% secure.
+ */
+
+#include <rtems/freebsd/local/opt_inet.h>
+#include <rtems/freebsd/local/opt_inet6.h>
+
+#include <rtems/freebsd/sys/param.h>
+#include <rtems/freebsd/sys/systm.h>
+#include <rtems/freebsd/sys/socket.h>
+#include <rtems/freebsd/sys/sockio.h>
+#include <rtems/freebsd/sys/mbuf.h>
+#include <rtems/freebsd/sys/errno.h>
+#include <rtems/freebsd/sys/kernel.h>
+#include <rtems/freebsd/sys/module.h>
+#include <rtems/freebsd/sys/protosw.h>
+#include <rtems/freebsd/sys/proc.h>
+#include <rtems/freebsd/sys/queue.h>
+#include <rtems/freebsd/sys/sysctl.h>
+#include <rtems/freebsd/machine/cpu.h>
+
+#include <rtems/freebsd/sys/malloc.h>
+
+#include <rtems/freebsd/net/if.h>
+#include <rtems/freebsd/net/if_clone.h>
+#include <rtems/freebsd/net/route.h>
+#include <rtems/freebsd/net/netisr.h>
+#include <rtems/freebsd/net/if_types.h>
+#include <rtems/freebsd/net/if_stf.h>
+#include <rtems/freebsd/net/vnet.h>
+
+#include <rtems/freebsd/netinet/in.h>
+#include <rtems/freebsd/netinet/in_systm.h>
+#include <rtems/freebsd/netinet/ip.h>
+#include <rtems/freebsd/netinet/ip_var.h>
+#include <rtems/freebsd/netinet/in_var.h>
+
+#include <rtems/freebsd/netinet/ip6.h>
+#include <rtems/freebsd/netinet6/ip6_var.h>
+#include <rtems/freebsd/netinet6/in6_var.h>
+#include <rtems/freebsd/netinet/ip_ecn.h>
+
+#include <rtems/freebsd/netinet/ip_encap.h>
+
+#include <rtems/freebsd/machine/stdarg.h>
+
+#include <rtems/freebsd/net/bpf.h>
+
+#include <rtems/freebsd/security/mac/mac_framework.h>
+
+SYSCTL_DECL(_net_link);
+SYSCTL_NODE(_net_link, IFT_STF, stf, CTLFLAG_RW, 0, "6to4 Interface");
+
+static int stf_route_cache = 1;
+SYSCTL_INT(_net_link_stf, OID_AUTO, route_cache, CTLFLAG_RW,
+ &stf_route_cache, 0, "Caching of IPv4 routes for 6to4 Output");
+
+#define STFNAME "stf"
+#define STFUNIT 0
+
+#define IN6_IS_ADDR_6TO4(x) (ntohs((x)->s6_addr16[0]) == 0x2002)
+
+/*
+ * XXX: Return a pointer with 16-bit aligned. Don't cast it to
+ * struct in_addr *; use bcopy() instead.
+ */
+#define GET_V4(x) ((caddr_t)(&(x)->s6_addr16[1]))
+
+struct stf_softc {
+ struct ifnet *sc_ifp;
+ union {
+ struct route __sc_ro4;
+ struct route_in6 __sc_ro6; /* just for safety */
+ } __sc_ro46;
+#define sc_ro __sc_ro46.__sc_ro4
+ struct mtx sc_ro_mtx;
+ u_int sc_fibnum;
+ const struct encaptab *encap_cookie;
+};
+#define STF2IFP(sc) ((sc)->sc_ifp)
+
+/*
+ * Note that mutable fields in the softc are not currently locked.
+ * We do lock sc_ro in stf_output though.
+ */
+static MALLOC_DEFINE(M_STF, STFNAME, "6to4 Tunnel Interface");
+static const int ip_stf_ttl = 40;
+
+extern struct domain inetdomain;
+struct protosw in_stf_protosw = {
+ .pr_type = SOCK_RAW,
+ .pr_domain = &inetdomain,
+ .pr_protocol = IPPROTO_IPV6,
+ .pr_flags = PR_ATOMIC|PR_ADDR,
+ .pr_input = in_stf_input,
+ .pr_output = (pr_output_t *)rip_output,
+ .pr_ctloutput = rip_ctloutput,
+ .pr_usrreqs = &rip_usrreqs
+};
+
+static char *stfnames[] = {"stf0", "stf", "6to4", NULL};
+
+static int stfmodevent(module_t, int, void *);
+static int stf_encapcheck(const struct mbuf *, int, int, void *);
+static struct in6_ifaddr *stf_getsrcifa6(struct ifnet *);
+static int stf_output(struct ifnet *, struct mbuf *, struct sockaddr *,
+ struct route *);
+static int isrfc1918addr(struct in_addr *);
+static int stf_checkaddr4(struct stf_softc *, struct in_addr *,
+ struct ifnet *);
+static int stf_checkaddr6(struct stf_softc *, struct in6_addr *,
+ struct ifnet *);
+static void stf_rtrequest(int, struct rtentry *, struct rt_addrinfo *);
+static int stf_ioctl(struct ifnet *, u_long, caddr_t);
+
+static int stf_clone_match(struct if_clone *, const char *);
+static int stf_clone_create(struct if_clone *, char *, size_t, caddr_t);
+static int stf_clone_destroy(struct if_clone *, struct ifnet *);
+struct if_clone stf_cloner = IFC_CLONE_INITIALIZER(STFNAME, NULL, 0,
+ NULL, stf_clone_match, stf_clone_create, stf_clone_destroy);
+
+static int
+stf_clone_match(struct if_clone *ifc, const char *name)
+{
+ int i;
+
+ for(i = 0; stfnames[i] != NULL; i++) {
+ if (strcmp(stfnames[i], name) == 0)
+ return (1);
+ }
+
+ return (0);
+}
+
+static int
+stf_clone_create(struct if_clone *ifc, char *name, size_t len, caddr_t params)
+{
+ int err, unit;
+ struct stf_softc *sc;
+ struct ifnet *ifp;
+
+ /*
+ * We can only have one unit, but since unit allocation is
+ * already locked, we use it to keep from allocating extra
+ * interfaces.
+ */
+ unit = STFUNIT;
+ err = ifc_alloc_unit(ifc, &unit);
+ if (err != 0)
+ return (err);
+
+ sc = malloc(sizeof(struct stf_softc), M_STF, M_WAITOK | M_ZERO);
+ ifp = STF2IFP(sc) = if_alloc(IFT_STF);
+ if (ifp == NULL) {
+ free(sc, M_STF);
+ ifc_free_unit(ifc, unit);
+ return (ENOSPC);
+ }
+ ifp->if_softc = sc;
+ sc->sc_fibnum = curthread->td_proc->p_fibnum;
+
+ /*
+ * Set the name manually rather then using if_initname because
+ * we don't conform to the default naming convention for interfaces.
+ */
+ strlcpy(ifp->if_xname, name, IFNAMSIZ);
+ ifp->if_dname = ifc->ifc_name;
+ ifp->if_dunit = IF_DUNIT_NONE;
+
+ mtx_init(&(sc)->sc_ro_mtx, "stf ro", NULL, MTX_DEF);
+ sc->encap_cookie = encap_attach_func(AF_INET, IPPROTO_IPV6,
+ stf_encapcheck, &in_stf_protosw, sc);
+ if (sc->encap_cookie == NULL) {
+ if_printf(ifp, "attach failed\n");
+ free(sc, M_STF);
+ ifc_free_unit(ifc, unit);
+ return (ENOMEM);
+ }
+
+ ifp->if_mtu = IPV6_MMTU;
+ ifp->if_ioctl = stf_ioctl;
+ ifp->if_output = stf_output;
+ ifp->if_snd.ifq_maxlen = ifqmaxlen;
+ if_attach(ifp);
+ bpfattach(ifp, DLT_NULL, sizeof(u_int32_t));
+ return (0);
+}
+
+static int
+stf_clone_destroy(struct if_clone *ifc, struct ifnet *ifp)
+{
+ struct stf_softc *sc = ifp->if_softc;
+ int err;
+
+ err = encap_detach(sc->encap_cookie);
+ KASSERT(err == 0, ("Unexpected error detaching encap_cookie"));
+ mtx_destroy(&(sc)->sc_ro_mtx);
+ bpfdetach(ifp);
+ if_detach(ifp);
+ if_free(ifp);
+
+ free(sc, M_STF);
+ ifc_free_unit(ifc, STFUNIT);
+
+ return (0);
+}
+
+static int
+stfmodevent(mod, type, data)
+ module_t mod;
+ int type;
+ void *data;
+{
+
+ switch (type) {
+ case MOD_LOAD:
+ if_clone_attach(&stf_cloner);
+ break;
+ case MOD_UNLOAD:
+ if_clone_detach(&stf_cloner);
+ break;
+ default:
+ return (EOPNOTSUPP);
+ }
+
+ return (0);
+}
+
+static moduledata_t stf_mod = {
+ "if_stf",
+ stfmodevent,
+ 0
+};
+
+DECLARE_MODULE(if_stf, stf_mod, SI_SUB_PSEUDO, SI_ORDER_ANY);
+
+static int
+stf_encapcheck(m, off, proto, arg)
+ const struct mbuf *m;
+ int off;
+ int proto;
+ void *arg;
+{
+ struct ip ip;
+ struct in6_ifaddr *ia6;
+ struct stf_softc *sc;
+ struct in_addr a, b, mask;
+
+ sc = (struct stf_softc *)arg;
+ if (sc == NULL)
+ return 0;
+
+ if ((STF2IFP(sc)->if_flags & IFF_UP) == 0)
+ return 0;
+
+ /* IFF_LINK0 means "no decapsulation" */
+ if ((STF2IFP(sc)->if_flags & IFF_LINK0) != 0)
+ return 0;
+
+ if (proto != IPPROTO_IPV6)
+ return 0;
+
+ /* LINTED const cast */
+ m_copydata((struct mbuf *)(uintptr_t)m, 0, sizeof(ip), (caddr_t)&ip);
+
+ if (ip.ip_v != 4)
+ return 0;
+
+ ia6 = stf_getsrcifa6(STF2IFP(sc));
+ if (ia6 == NULL)
+ return 0;
+
+ /*
+ * check if IPv4 dst matches the IPv4 address derived from the
+ * local 6to4 address.
+ * success on: dst = 10.1.1.1, ia6->ia_addr = 2002:0a01:0101:...
+ */
+ if (bcmp(GET_V4(&ia6->ia_addr.sin6_addr), &ip.ip_dst,
+ sizeof(ip.ip_dst)) != 0) {
+ ifa_free(&ia6->ia_ifa);
+ return 0;
+ }
+
+ /*
+ * check if IPv4 src matches the IPv4 address derived from the
+ * local 6to4 address masked by prefixmask.
+ * success on: src = 10.1.1.1, ia6->ia_addr = 2002:0a00:.../24
+ * fail on: src = 10.1.1.1, ia6->ia_addr = 2002:0b00:.../24
+ */
+ bzero(&a, sizeof(a));
+ bcopy(GET_V4(&ia6->ia_addr.sin6_addr), &a, sizeof(a));
+ bcopy(GET_V4(&ia6->ia_prefixmask.sin6_addr), &mask, sizeof(mask));
+ ifa_free(&ia6->ia_ifa);
+ a.s_addr &= mask.s_addr;
+ b = ip.ip_src;
+ b.s_addr &= mask.s_addr;
+ if (a.s_addr != b.s_addr)
+ return 0;
+
+ /* stf interface makes single side match only */
+ return 32;
+}
+
+static struct in6_ifaddr *
+stf_getsrcifa6(ifp)
+ struct ifnet *ifp;
+{
+ struct ifaddr *ia;
+ struct in_ifaddr *ia4;
+ struct sockaddr_in6 *sin6;
+ struct in_addr in;
+
+ if_addr_rlock(ifp);
+ TAILQ_FOREACH(ia, &ifp->if_addrhead, ifa_link) {
+ if (ia->ifa_addr->sa_family != AF_INET6)
+ continue;
+ sin6 = (struct sockaddr_in6 *)ia->ifa_addr;
+ if (!IN6_IS_ADDR_6TO4(&sin6->sin6_addr))
+ continue;
+
+ bcopy(GET_V4(&sin6->sin6_addr), &in, sizeof(in));
+ LIST_FOREACH(ia4, INADDR_HASH(in.s_addr), ia_hash)
+ if (ia4->ia_addr.sin_addr.s_addr == in.s_addr)
+ break;
+ if (ia4 == NULL)
+ continue;
+
+ ifa_ref(ia);
+ if_addr_runlock(ifp);
+ return (struct in6_ifaddr *)ia;
+ }
+ if_addr_runlock(ifp);
+
+ return NULL;
+}
+
+static int
+stf_output(ifp, m, dst, ro)
+ struct ifnet *ifp;
+ struct mbuf *m;
+ struct sockaddr *dst;
+ struct route *ro;
+{
+ struct stf_softc *sc;
+ struct sockaddr_in6 *dst6;
+ struct route *cached_route;
+ struct in_addr in4;
+ caddr_t ptr;
+ struct sockaddr_in *dst4;
+ u_int8_t tos;
+ struct ip *ip;
+ struct ip6_hdr *ip6;
+ struct in6_ifaddr *ia6;
+ u_int32_t af;
+ int error;
+
+#ifdef MAC
+ error = mac_ifnet_check_transmit(ifp, m);
+ if (error) {
+ m_freem(m);
+ return (error);
+ }
+#endif
+
+ sc = ifp->if_softc;
+ dst6 = (struct sockaddr_in6 *)dst;
+
+ /* just in case */
+ if ((ifp->if_flags & IFF_UP) == 0) {
+ m_freem(m);
+ ifp->if_oerrors++;
+ return ENETDOWN;
+ }
+
+ /*
+ * If we don't have an ip4 address that match my inner ip6 address,
+ * we shouldn't generate output. Without this check, we'll end up
+ * using wrong IPv4 source.
+ */
+ ia6 = stf_getsrcifa6(ifp);
+ if (ia6 == NULL) {
+ m_freem(m);
+ ifp->if_oerrors++;
+ return ENETDOWN;
+ }
+
+ if (m->m_len < sizeof(*ip6)) {
+ m = m_pullup(m, sizeof(*ip6));
+ if (!m) {
+ ifa_free(&ia6->ia_ifa);
+ ifp->if_oerrors++;
+ return ENOBUFS;
+ }
+ }
+ ip6 = mtod(m, struct ip6_hdr *);
+ tos = (ntohl(ip6->ip6_flow) >> 20) & 0xff;
+
+ /*
+ * BPF writes need to be handled specially.
+ * This is a null operation, nothing here checks dst->sa_family.
+ */
+ if (dst->sa_family == AF_UNSPEC) {
+ bcopy(dst->sa_data, &af, sizeof(af));
+ dst->sa_family = af;
+ }
+
+ /*
+ * Pickup the right outer dst addr from the list of candidates.
+ * ip6_dst has priority as it may be able to give us shorter IPv4 hops.
+ */
+ ptr = NULL;
+ if (IN6_IS_ADDR_6TO4(&ip6->ip6_dst))
+ ptr = GET_V4(&ip6->ip6_dst);
+ else if (IN6_IS_ADDR_6TO4(&dst6->sin6_addr))
+ ptr = GET_V4(&dst6->sin6_addr);
+ else {
+ ifa_free(&ia6->ia_ifa);
+ m_freem(m);
+ ifp->if_oerrors++;
+ return ENETUNREACH;
+ }
+ bcopy(ptr, &in4, sizeof(in4));
+
+ if (bpf_peers_present(ifp->if_bpf)) {
+ /*
+ * We need to prepend the address family as
+ * a four byte field. Cons up a dummy header
+ * to pacify bpf. This is safe because bpf
+ * will only read from the mbuf (i.e., it won't
+ * try to free it or keep a pointer a to it).
+ */
+ af = AF_INET6;
+ bpf_mtap2(ifp->if_bpf, &af, sizeof(af), m);
+ }
+
+ M_PREPEND(m, sizeof(struct ip), M_DONTWAIT);
+ if (m && m->m_len < sizeof(struct ip))
+ m = m_pullup(m, sizeof(struct ip));
+ if (m == NULL) {
+ ifa_free(&ia6->ia_ifa);
+ ifp->if_oerrors++;
+ return ENOBUFS;
+ }
+ ip = mtod(m, struct ip *);
+
+ bzero(ip, sizeof(*ip));
+
+ bcopy(GET_V4(&((struct sockaddr_in6 *)&ia6->ia_addr)->sin6_addr),
+ &ip->ip_src, sizeof(ip->ip_src));
+ ifa_free(&ia6->ia_ifa);
+ bcopy(&in4, &ip->ip_dst, sizeof(ip->ip_dst));
+ ip->ip_p = IPPROTO_IPV6;
+ ip->ip_ttl = ip_stf_ttl;
+ ip->ip_len = m->m_pkthdr.len; /*host order*/
+ if (ifp->if_flags & IFF_LINK1)
+ ip_ecn_ingress(ECN_ALLOWED, &ip->ip_tos, &tos);
+ else
+ ip_ecn_ingress(ECN_NOCARE, &ip->ip_tos, &tos);
+
+ if (!stf_route_cache) {
+ cached_route = NULL;
+ goto sendit;
+ }
+
+ /*
+ * Do we have a cached route?
+ */
+ mtx_lock(&(sc)->sc_ro_mtx);
+ dst4 = (struct sockaddr_in *)&sc->sc_ro.ro_dst;
+ if (dst4->sin_family != AF_INET ||
+ bcmp(&dst4->sin_addr, &ip->ip_dst, sizeof(ip->ip_dst)) != 0) {
+ /* cache route doesn't match */
+ dst4->sin_family = AF_INET;
+ dst4->sin_len = sizeof(struct sockaddr_in);
+ bcopy(&ip->ip_dst, &dst4->sin_addr, sizeof(dst4->sin_addr));
+ if (sc->sc_ro.ro_rt) {
+ RTFREE(sc->sc_ro.ro_rt);
+ sc->sc_ro.ro_rt = NULL;
+ }
+ }
+
+ if (sc->sc_ro.ro_rt == NULL) {
+ rtalloc_fib(&sc->sc_ro, sc->sc_fibnum);
+ if (sc->sc_ro.ro_rt == NULL) {
+ m_freem(m);
+ mtx_unlock(&(sc)->sc_ro_mtx);
+ ifp->if_oerrors++;
+ return ENETUNREACH;
+ }
+ }
+ cached_route = &sc->sc_ro;
+
+sendit:
+ M_SETFIB(m, sc->sc_fibnum);
+ ifp->if_opackets++;
+ error = ip_output(m, NULL, cached_route, 0, NULL, NULL);
+
+ if (cached_route != NULL)
+ mtx_unlock(&(sc)->sc_ro_mtx);
+ return error;
+}
+
+static int
+isrfc1918addr(in)
+ struct in_addr *in;
+{
+ /*
+ * returns 1 if private address range:
+ * 10.0.0.0/8 172.16.0.0/12 192.168.0.0/16
+ */
+ if ((ntohl(in->s_addr) & 0xff000000) >> 24 == 10 ||
+ (ntohl(in->s_addr) & 0xfff00000) >> 16 == 172 * 256 + 16 ||
+ (ntohl(in->s_addr) & 0xffff0000) >> 16 == 192 * 256 + 168)
+ return 1;
+
+ return 0;
+}
+
+static int
+stf_checkaddr4(sc, in, inifp)
+ struct stf_softc *sc;
+ struct in_addr *in;
+ struct ifnet *inifp; /* incoming interface */
+{
+ struct in_ifaddr *ia4;
+
+ /*
+ * reject packets with the following address:
+ * 224.0.0.0/4 0.0.0.0/8 127.0.0.0/8 255.0.0.0/8
+ */
+ if (IN_MULTICAST(ntohl(in->s_addr)))
+ return -1;
+ switch ((ntohl(in->s_addr) & 0xff000000) >> 24) {
+ case 0: case 127: case 255:
+ return -1;
+ }
+
+ /*
+ * reject packets with private address range.
+ * (requirement from RFC3056 section 2 1st paragraph)
+ */
+ if (isrfc1918addr(in))
+ return -1;
+
+ /*
+ * reject packets with broadcast
+ */
+ IN_IFADDR_RLOCK();
+ for (ia4 = TAILQ_FIRST(&V_in_ifaddrhead);
+ ia4;
+ ia4 = TAILQ_NEXT(ia4, ia_link))
+ {
+ if ((ia4->ia_ifa.ifa_ifp->if_flags & IFF_BROADCAST) == 0)
+ continue;
+ if (in->s_addr == ia4->ia_broadaddr.sin_addr.s_addr) {
+ IN_IFADDR_RUNLOCK();
+ return -1;
+ }
+ }
+ IN_IFADDR_RUNLOCK();
+
+ /*
+ * perform ingress filter
+ */
+ if (sc && (STF2IFP(sc)->if_flags & IFF_LINK2) == 0 && inifp) {
+ struct sockaddr_in sin;
+ struct rtentry *rt;
+
+ bzero(&sin, sizeof(sin));
+ sin.sin_family = AF_INET;
+ sin.sin_len = sizeof(struct sockaddr_in);
+ sin.sin_addr = *in;
+ rt = rtalloc1_fib((struct sockaddr *)&sin, 0,
+ 0UL, sc->sc_fibnum);
+ if (!rt || rt->rt_ifp != inifp) {
+#if 0
+ log(LOG_WARNING, "%s: packet from 0x%x dropped "
+ "due to ingress filter\n", if_name(STF2IFP(sc)),
+ (u_int32_t)ntohl(sin.sin_addr.s_addr));
+#endif
+ if (rt)
+ RTFREE_LOCKED(rt);
+ return -1;
+ }
+ RTFREE_LOCKED(rt);
+ }
+
+ return 0;
+}
+
+static int
+stf_checkaddr6(sc, in6, inifp)
+ struct stf_softc *sc;
+ struct in6_addr *in6;
+ struct ifnet *inifp; /* incoming interface */
+{
+ /*
+ * check 6to4 addresses
+ */
+ if (IN6_IS_ADDR_6TO4(in6)) {
+ struct in_addr in4;
+ bcopy(GET_V4(in6), &in4, sizeof(in4));
+ return stf_checkaddr4(sc, &in4, inifp);
+ }
+
+ /*
+ * reject anything that look suspicious. the test is implemented
+ * in ip6_input too, but we check here as well to
+ * (1) reject bad packets earlier, and
+ * (2) to be safe against future ip6_input change.
+ */
+ if (IN6_IS_ADDR_V4COMPAT(in6) || IN6_IS_ADDR_V4MAPPED(in6))
+ return -1;
+
+ return 0;
+}
+
+void
+in_stf_input(m, off)
+ struct mbuf *m;
+ int off;
+{
+ int proto;
+ struct stf_softc *sc;
+ struct ip *ip;
+ struct ip6_hdr *ip6;
+ u_int8_t otos, itos;
+ struct ifnet *ifp;
+
+ proto = mtod(m, struct ip *)->ip_p;
+
+ if (proto != IPPROTO_IPV6) {
+ m_freem(m);
+ return;
+ }
+
+ ip = mtod(m, struct ip *);
+
+ sc = (struct stf_softc *)encap_getarg(m);
+
+ if (sc == NULL || (STF2IFP(sc)->if_flags & IFF_UP) == 0) {
+ m_freem(m);
+ return;
+ }
+
+ ifp = STF2IFP(sc);
+
+#ifdef MAC
+ mac_ifnet_create_mbuf(ifp, m);
+#endif
+
+ /*
+ * perform sanity check against outer src/dst.
+ * for source, perform ingress filter as well.
+ */
+ if (stf_checkaddr4(sc, &ip->ip_dst, NULL) < 0 ||
+ stf_checkaddr4(sc, &ip->ip_src, m->m_pkthdr.rcvif) < 0) {
+ m_freem(m);
+ return;
+ }
+
+ otos = ip->ip_tos;
+ m_adj(m, off);
+
+ if (m->m_len < sizeof(*ip6)) {
+ m = m_pullup(m, sizeof(*ip6));
+ if (!m)
+ return;
+ }
+ ip6 = mtod(m, struct ip6_hdr *);
+
+ /*
+ * perform sanity check against inner src/dst.
+ * for source, perform ingress filter as well.
+ */
+ if (stf_checkaddr6(sc, &ip6->ip6_dst, NULL) < 0 ||
+ stf_checkaddr6(sc, &ip6->ip6_src, m->m_pkthdr.rcvif) < 0) {
+ m_freem(m);
+ return;
+ }
+
+ itos = (ntohl(ip6->ip6_flow) >> 20) & 0xff;
+ if ((ifp->if_flags & IFF_LINK1) != 0)
+ ip_ecn_egress(ECN_ALLOWED, &otos, &itos);
+ else
+ ip_ecn_egress(ECN_NOCARE, &otos, &itos);
+ ip6->ip6_flow &= ~htonl(0xff << 20);
+ ip6->ip6_flow |= htonl((u_int32_t)itos << 20);
+
+ m->m_pkthdr.rcvif = ifp;
+
+ if (bpf_peers_present(ifp->if_bpf)) {
+ /*
+ * We need to prepend the address family as
+ * a four byte field. Cons up a dummy header
+ * to pacify bpf. This is safe because bpf
+ * will only read from the mbuf (i.e., it won't
+ * try to free it or keep a pointer a to it).
+ */
+ u_int32_t af = AF_INET6;
+ bpf_mtap2(ifp->if_bpf, &af, sizeof(af), m);
+ }
+
+ /*
+ * Put the packet to the network layer input queue according to the
+ * specified address family.
+ * See net/if_gif.c for possible issues with packet processing
+ * reorder due to extra queueing.
+ */
+ ifp->if_ipackets++;
+ ifp->if_ibytes += m->m_pkthdr.len;
+ netisr_dispatch(NETISR_IPV6, m);
+}
+
+/* ARGSUSED */
+static void
+stf_rtrequest(cmd, rt, info)
+ int cmd;
+ struct rtentry *rt;
+ struct rt_addrinfo *info;
+{
+ RT_LOCK_ASSERT(rt);
+ rt->rt_rmx.rmx_mtu = IPV6_MMTU;
+}
+
+static int
+stf_ioctl(ifp, cmd, data)
+ struct ifnet *ifp;
+ u_long cmd;
+ caddr_t data;
+{
+ struct ifaddr *ifa;
+ struct ifreq *ifr;
+ struct sockaddr_in6 *sin6;
+ struct in_addr addr;
+ int error;
+
+ error = 0;
+ switch (cmd) {
+ case SIOCSIFADDR:
+ ifa = (struct ifaddr *)data;
+ if (ifa == NULL || ifa->ifa_addr->sa_family != AF_INET6) {
+ error = EAFNOSUPPORT;
+ break;
+ }
+ sin6 = (struct sockaddr_in6 *)ifa->ifa_addr;
+ if (!IN6_IS_ADDR_6TO4(&sin6->sin6_addr)) {
+ error = EINVAL;
+ break;
+ }
+ bcopy(GET_V4(&sin6->sin6_addr), &addr, sizeof(addr));
+ if (isrfc1918addr(&addr)) {
+ error = EINVAL;
+ break;
+ }
+
+ ifa->ifa_rtrequest = stf_rtrequest;
+ ifp->if_flags |= IFF_UP;
+ break;
+
+ case SIOCADDMULTI:
+ case SIOCDELMULTI:
+ ifr = (struct ifreq *)data;
+ if (ifr && ifr->ifr_addr.sa_family == AF_INET6)
+ ;
+ else
+ error = EAFNOSUPPORT;
+ break;
+
+ default:
+ error = EINVAL;
+ break;
+ }
+
+ return error;
+}
diff --git a/rtems/freebsd/net/if_stf.h b/rtems/freebsd/net/if_stf.h
new file mode 100644
index 00000000..64fd30ee
--- /dev/null
+++ b/rtems/freebsd/net/if_stf.h
@@ -0,0 +1,38 @@
+/* $FreeBSD$ */
+/* $KAME: if_stf.h,v 1.5 2001/10/12 10:09:17 keiichi Exp $ */
+
+/*-
+ * Copyright (C) 2000 WIDE Project.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the project nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifndef _NET_IF_STF_HH_
+#define _NET_IF_STF_HH_
+
+void in_stf_input(struct mbuf *, int);
+
+#endif /* _NET_IF_STF_HH_ */
diff --git a/rtems/freebsd/net/if_tap.c b/rtems/freebsd/net/if_tap.c
new file mode 100644
index 00000000..08a5c8bf
--- /dev/null
+++ b/rtems/freebsd/net/if_tap.c
@@ -0,0 +1,1086 @@
+#include <rtems/freebsd/machine/rtems-bsd-config.h>
+
+/*-
+ * Copyright (C) 1999-2000 by Maksim Yevmenkin <m_evmenkin@yahoo.com>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * BASED ON:
+ * -------------------------------------------------------------------------
+ *
+ * Copyright (c) 1988, Julian Onions <jpo@cs.nott.ac.uk>
+ * Nottingham University 1987.
+ */
+
+/*
+ * $FreeBSD$
+ * $Id: if_tap.c,v 0.21 2000/07/23 21:46:02 max Exp $
+ */
+
+#include <rtems/freebsd/local/opt_compat.h>
+#include <rtems/freebsd/local/opt_inet.h>
+
+#include <rtems/freebsd/sys/param.h>
+#include <rtems/freebsd/sys/conf.h>
+#include <rtems/freebsd/sys/fcntl.h>
+#include <rtems/freebsd/sys/filio.h>
+#include <rtems/freebsd/sys/kernel.h>
+#include <rtems/freebsd/sys/malloc.h>
+#include <rtems/freebsd/sys/mbuf.h>
+#include <rtems/freebsd/sys/module.h>
+#include <rtems/freebsd/sys/poll.h>
+#include <rtems/freebsd/sys/priv.h>
+#include <rtems/freebsd/sys/proc.h>
+#include <rtems/freebsd/sys/selinfo.h>
+#include <rtems/freebsd/sys/signalvar.h>
+#include <rtems/freebsd/sys/socket.h>
+#include <rtems/freebsd/sys/sockio.h>
+#include <rtems/freebsd/sys/sysctl.h>
+#include <rtems/freebsd/sys/systm.h>
+#include <rtems/freebsd/sys/ttycom.h>
+#include <rtems/freebsd/sys/uio.h>
+#include <rtems/freebsd/sys/queue.h>
+
+#include <rtems/freebsd/net/bpf.h>
+#include <rtems/freebsd/net/ethernet.h>
+#include <rtems/freebsd/net/if.h>
+#include <rtems/freebsd/net/if_clone.h>
+#include <rtems/freebsd/net/if_dl.h>
+#include <rtems/freebsd/net/route.h>
+#include <rtems/freebsd/net/if_types.h>
+
+#include <rtems/freebsd/netinet/in.h>
+
+#include <rtems/freebsd/net/if_tapvar.h>
+#include <rtems/freebsd/net/if_tap.h>
+
+
+#define CDEV_NAME "tap"
+#define TAPDEBUG if (tapdebug) printf
+
+#define TAP "tap"
+#define VMNET "vmnet"
+#define TAPMAXUNIT 0x7fff
+#define VMNET_DEV_MASK CLONE_FLAG0
+
+/* module */
+static int tapmodevent(module_t, int, void *);
+
+/* device */
+static void tapclone(void *, struct ucred *, char *, int,
+ struct cdev **);
+static void tapcreate(struct cdev *);
+
+/* network interface */
+static void tapifstart(struct ifnet *);
+static int tapifioctl(struct ifnet *, u_long, caddr_t);
+static void tapifinit(void *);
+
+static int tap_clone_create(struct if_clone *, int, caddr_t);
+static void tap_clone_destroy(struct ifnet *);
+static int vmnet_clone_create(struct if_clone *, int, caddr_t);
+static void vmnet_clone_destroy(struct ifnet *);
+
+IFC_SIMPLE_DECLARE(tap, 0);
+IFC_SIMPLE_DECLARE(vmnet, 0);
+
+/* character device */
+static d_open_t tapopen;
+static d_close_t tapclose;
+static d_read_t tapread;
+static d_write_t tapwrite;
+static d_ioctl_t tapioctl;
+static d_poll_t tappoll;
+static d_kqfilter_t tapkqfilter;
+
+/* kqueue(2) */
+static int tapkqread(struct knote *, long);
+static int tapkqwrite(struct knote *, long);
+static void tapkqdetach(struct knote *);
+
+static struct filterops tap_read_filterops = {
+ .f_isfd = 1,
+ .f_attach = NULL,
+ .f_detach = tapkqdetach,
+ .f_event = tapkqread,
+};
+
+static struct filterops tap_write_filterops = {
+ .f_isfd = 1,
+ .f_attach = NULL,
+ .f_detach = tapkqdetach,
+ .f_event = tapkqwrite,
+};
+
+static struct cdevsw tap_cdevsw = {
+ .d_version = D_VERSION,
+ .d_flags = D_PSEUDO | D_NEEDMINOR,
+ .d_open = tapopen,
+ .d_close = tapclose,
+ .d_read = tapread,
+ .d_write = tapwrite,
+ .d_ioctl = tapioctl,
+ .d_poll = tappoll,
+ .d_name = CDEV_NAME,
+ .d_kqfilter = tapkqfilter,
+};
+
+/*
+ * All global variables in if_tap.c are locked with tapmtx, with the
+ * exception of tapdebug, which is accessed unlocked; tapclones is
+ * static at runtime.
+ */
+static struct mtx tapmtx;
+static int tapdebug = 0; /* debug flag */
+static int tapuopen = 0; /* allow user open() */
+static int tapuponopen = 0; /* IFF_UP on open() */
+static int tapdclone = 1; /* enable devfs cloning */
+static SLIST_HEAD(, tap_softc) taphead; /* first device */
+static struct clonedevs *tapclones;
+
+MALLOC_DECLARE(M_TAP);
+MALLOC_DEFINE(M_TAP, CDEV_NAME, "Ethernet tunnel interface");
+SYSCTL_INT(_debug, OID_AUTO, if_tap_debug, CTLFLAG_RW, &tapdebug, 0, "");
+
+SYSCTL_DECL(_net_link);
+SYSCTL_NODE(_net_link, OID_AUTO, tap, CTLFLAG_RW, 0,
+ "Ethernet tunnel software network interface");
+SYSCTL_INT(_net_link_tap, OID_AUTO, user_open, CTLFLAG_RW, &tapuopen, 0,
+ "Allow user to open /dev/tap (based on node permissions)");
+SYSCTL_INT(_net_link_tap, OID_AUTO, up_on_open, CTLFLAG_RW, &tapuponopen, 0,
+ "Bring interface up when /dev/tap is opened");
+SYSCTL_INT(_net_link_tap, OID_AUTO, devfs_cloning, CTLFLAG_RW, &tapdclone, 0,
+ "Enably legacy devfs interface creation");
+SYSCTL_INT(_net_link_tap, OID_AUTO, debug, CTLFLAG_RW, &tapdebug, 0, "");
+
+TUNABLE_INT("net.link.tap.devfs_cloning", &tapdclone);
+
+DEV_MODULE(if_tap, tapmodevent, NULL);
+
+static int
+tap_clone_create(struct if_clone *ifc, int unit, caddr_t params)
+{
+ struct cdev *dev;
+ int i;
+ int extra;
+
+ if (strcmp(ifc->ifc_name, VMNET) == 0)
+ extra = VMNET_DEV_MASK;
+ else
+ extra = 0;
+
+ /* find any existing device, or allocate new unit number */
+ i = clone_create(&tapclones, &tap_cdevsw, &unit, &dev, extra);
+ if (i) {
+ dev = make_dev(&tap_cdevsw, unit | extra,
+ UID_ROOT, GID_WHEEL, 0600, "%s%d", ifc->ifc_name, unit);
+ }
+
+ tapcreate(dev);
+ return (0);
+}
+
+/* vmnet devices are tap devices in disguise */
+static int
+vmnet_clone_create(struct if_clone *ifc, int unit, caddr_t params)
+{
+ return tap_clone_create(ifc, unit, params);
+}
+
+static void
+tap_destroy(struct tap_softc *tp)
+{
+ struct ifnet *ifp = tp->tap_ifp;
+
+ /* Unlocked read. */
+ KASSERT(!(tp->tap_flags & TAP_OPEN),
+ ("%s flags is out of sync", ifp->if_xname));
+
+ knlist_destroy(&tp->tap_rsel.si_note);
+ destroy_dev(tp->tap_dev);
+ ether_ifdetach(ifp);
+ if_free_type(ifp, IFT_ETHER);
+
+ mtx_destroy(&tp->tap_mtx);
+ free(tp, M_TAP);
+}
+
+static void
+tap_clone_destroy(struct ifnet *ifp)
+{
+ struct tap_softc *tp = ifp->if_softc;
+
+ mtx_lock(&tapmtx);
+ SLIST_REMOVE(&taphead, tp, tap_softc, tap_next);
+ mtx_unlock(&tapmtx);
+ tap_destroy(tp);
+}
+
+/* vmnet devices are tap devices in disguise */
+static void
+vmnet_clone_destroy(struct ifnet *ifp)
+{
+ tap_clone_destroy(ifp);
+}
+
+/*
+ * tapmodevent
+ *
+ * module event handler
+ */
+static int
+tapmodevent(module_t mod, int type, void *data)
+{
+ static eventhandler_tag eh_tag = NULL;
+ struct tap_softc *tp = NULL;
+ struct ifnet *ifp = NULL;
+
+ switch (type) {
+ case MOD_LOAD:
+
+ /* intitialize device */
+
+ mtx_init(&tapmtx, "tapmtx", NULL, MTX_DEF);
+ SLIST_INIT(&taphead);
+
+ clone_setup(&tapclones);
+ eh_tag = EVENTHANDLER_REGISTER(dev_clone, tapclone, 0, 1000);
+ if (eh_tag == NULL) {
+ clone_cleanup(&tapclones);
+ mtx_destroy(&tapmtx);
+ return (ENOMEM);
+ }
+ if_clone_attach(&tap_cloner);
+ if_clone_attach(&vmnet_cloner);
+ return (0);
+
+ case MOD_UNLOAD:
+ /*
+ * The EBUSY algorithm here can't quite atomically
+ * guarantee that this is race-free since we have to
+ * release the tap mtx to deregister the clone handler.
+ */
+ mtx_lock(&tapmtx);
+ SLIST_FOREACH(tp, &taphead, tap_next) {
+ mtx_lock(&tp->tap_mtx);
+ if (tp->tap_flags & TAP_OPEN) {
+ mtx_unlock(&tp->tap_mtx);
+ mtx_unlock(&tapmtx);
+ return (EBUSY);
+ }
+ mtx_unlock(&tp->tap_mtx);
+ }
+ mtx_unlock(&tapmtx);
+
+ EVENTHANDLER_DEREGISTER(dev_clone, eh_tag);
+ if_clone_detach(&tap_cloner);
+ if_clone_detach(&vmnet_cloner);
+ drain_dev_clone_events();
+
+ mtx_lock(&tapmtx);
+ while ((tp = SLIST_FIRST(&taphead)) != NULL) {
+ SLIST_REMOVE_HEAD(&taphead, tap_next);
+ mtx_unlock(&tapmtx);
+
+ ifp = tp->tap_ifp;
+
+ TAPDEBUG("detaching %s\n", ifp->if_xname);
+
+ tap_destroy(tp);
+ mtx_lock(&tapmtx);
+ }
+ mtx_unlock(&tapmtx);
+ clone_cleanup(&tapclones);
+
+ mtx_destroy(&tapmtx);
+
+ break;
+
+ default:
+ return (EOPNOTSUPP);
+ }
+
+ return (0);
+} /* tapmodevent */
+
+
+/*
+ * DEVFS handler
+ *
+ * We need to support two kind of devices - tap and vmnet
+ */
+static void
+tapclone(void *arg, struct ucred *cred, char *name, int namelen, struct cdev **dev)
+{
+ char devname[SPECNAMELEN + 1];
+ int i, unit, append_unit;
+ int extra;
+
+ if (*dev != NULL)
+ return;
+
+ if (!tapdclone ||
+ (!tapuopen && priv_check_cred(cred, PRIV_NET_IFCREATE, 0) != 0))
+ return;
+
+ unit = 0;
+ append_unit = 0;
+ extra = 0;
+
+ /* We're interested in only tap/vmnet devices. */
+ if (strcmp(name, TAP) == 0) {
+ unit = -1;
+ } else if (strcmp(name, VMNET) == 0) {
+ unit = -1;
+ extra = VMNET_DEV_MASK;
+ } else if (dev_stdclone(name, NULL, TAP, &unit) != 1) {
+ if (dev_stdclone(name, NULL, VMNET, &unit) != 1) {
+ return;
+ } else {
+ extra = VMNET_DEV_MASK;
+ }
+ }
+
+ if (unit == -1)
+ append_unit = 1;
+
+ /* find any existing device, or allocate new unit number */
+ i = clone_create(&tapclones, &tap_cdevsw, &unit, dev, extra);
+ if (i) {
+ if (append_unit) {
+ /*
+ * We were passed 'tun' or 'tap', with no unit specified
+ * so we'll need to append it now.
+ */
+ namelen = snprintf(devname, sizeof(devname), "%s%d", name,
+ unit);
+ name = devname;
+ }
+
+ *dev = make_dev_credf(MAKEDEV_REF, &tap_cdevsw, unit | extra,
+ cred, UID_ROOT, GID_WHEEL, 0600, "%s", name);
+ }
+
+ if_clone_create(name, namelen, NULL);
+} /* tapclone */
+
+
+/*
+ * tapcreate
+ *
+ * to create interface
+ */
+static void
+tapcreate(struct cdev *dev)
+{
+ struct ifnet *ifp = NULL;
+ struct tap_softc *tp = NULL;
+ unsigned short macaddr_hi;
+ uint32_t macaddr_mid;
+ int unit;
+ char *name = NULL;
+ u_char eaddr[6];
+
+ dev->si_flags &= ~SI_CHEAPCLONE;
+
+ /* allocate driver storage and create device */
+ tp = malloc(sizeof(*tp), M_TAP, M_WAITOK | M_ZERO);
+ mtx_init(&tp->tap_mtx, "tap_mtx", NULL, MTX_DEF);
+ mtx_lock(&tapmtx);
+ SLIST_INSERT_HEAD(&taphead, tp, tap_next);
+ mtx_unlock(&tapmtx);
+
+ unit = dev2unit(dev);
+
+ /* select device: tap or vmnet */
+ if (unit & VMNET_DEV_MASK) {
+ name = VMNET;
+ tp->tap_flags |= TAP_VMNET;
+ } else
+ name = TAP;
+
+ unit &= TAPMAXUNIT;
+
+ TAPDEBUG("tapcreate(%s%d). minor = %#x\n", name, unit, dev2unit(dev));
+
+ /* generate fake MAC address: 00 bd xx xx xx unit_no */
+ macaddr_hi = htons(0x00bd);
+ macaddr_mid = (uint32_t) ticks;
+ bcopy(&macaddr_hi, eaddr, sizeof(short));
+ bcopy(&macaddr_mid, &eaddr[2], sizeof(uint32_t));
+ eaddr[5] = (u_char)unit;
+
+ /* fill the rest and attach interface */
+ ifp = tp->tap_ifp = if_alloc(IFT_ETHER);
+ if (ifp == NULL)
+ panic("%s%d: can not if_alloc()", name, unit);
+ ifp->if_softc = tp;
+ if_initname(ifp, name, unit);
+ ifp->if_init = tapifinit;
+ ifp->if_start = tapifstart;
+ ifp->if_ioctl = tapifioctl;
+ ifp->if_mtu = ETHERMTU;
+ ifp->if_flags = (IFF_BROADCAST|IFF_SIMPLEX|IFF_MULTICAST);
+ IFQ_SET_MAXLEN(&ifp->if_snd, ifqmaxlen);
+ ifp->if_capabilities |= IFCAP_LINKSTATE;
+ ifp->if_capenable |= IFCAP_LINKSTATE;
+
+ dev->si_drv1 = tp;
+ tp->tap_dev = dev;
+
+ ether_ifattach(ifp, eaddr);
+
+ mtx_lock(&tp->tap_mtx);
+ tp->tap_flags |= TAP_INITED;
+ mtx_unlock(&tp->tap_mtx);
+
+ knlist_init_mtx(&tp->tap_rsel.si_note, &tp->tap_mtx);
+
+ TAPDEBUG("interface %s is created. minor = %#x\n",
+ ifp->if_xname, dev2unit(dev));
+} /* tapcreate */
+
+
+/*
+ * tapopen
+ *
+ * to open tunnel. must be superuser
+ */
+static int
+tapopen(struct cdev *dev, int flag, int mode, struct thread *td)
+{
+ struct tap_softc *tp = NULL;
+ struct ifnet *ifp = NULL;
+ int error;
+
+ if (tapuopen == 0) {
+ error = priv_check(td, PRIV_NET_TAP);
+ if (error)
+ return (error);
+ }
+
+ if ((dev2unit(dev) & CLONE_UNITMASK) > TAPMAXUNIT)
+ return (ENXIO);
+
+ tp = dev->si_drv1;
+
+ mtx_lock(&tp->tap_mtx);
+ if (tp->tap_flags & TAP_OPEN) {
+ mtx_unlock(&tp->tap_mtx);
+ return (EBUSY);
+ }
+
+ bcopy(IF_LLADDR(tp->tap_ifp), tp->ether_addr, sizeof(tp->ether_addr));
+ tp->tap_pid = td->td_proc->p_pid;
+ tp->tap_flags |= TAP_OPEN;
+ ifp = tp->tap_ifp;
+
+ ifp->if_drv_flags |= IFF_DRV_RUNNING;
+ ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
+ if (tapuponopen)
+ ifp->if_flags |= IFF_UP;
+ if_link_state_change(ifp, LINK_STATE_UP);
+ mtx_unlock(&tp->tap_mtx);
+
+ TAPDEBUG("%s is open. minor = %#x\n", ifp->if_xname, dev2unit(dev));
+
+ return (0);
+} /* tapopen */
+
+
+/*
+ * tapclose
+ *
+ * close the device - mark i/f down & delete routing info
+ */
+static int
+tapclose(struct cdev *dev, int foo, int bar, struct thread *td)
+{
+ struct ifaddr *ifa;
+ struct tap_softc *tp = dev->si_drv1;
+ struct ifnet *ifp = tp->tap_ifp;
+
+ /* junk all pending output */
+ mtx_lock(&tp->tap_mtx);
+ IF_DRAIN(&ifp->if_snd);
+
+ /*
+ * do not bring the interface down, and do not anything with
+ * interface, if we are in VMnet mode. just close the device.
+ */
+
+ if (((tp->tap_flags & TAP_VMNET) == 0) && (ifp->if_flags & IFF_UP)) {
+ mtx_unlock(&tp->tap_mtx);
+ if_down(ifp);
+ mtx_lock(&tp->tap_mtx);
+ if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
+ ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
+ mtx_unlock(&tp->tap_mtx);
+ TAILQ_FOREACH(ifa, &ifp->if_addrhead, ifa_link) {
+ rtinit(ifa, (int)RTM_DELETE, 0);
+ }
+ if_purgeaddrs(ifp);
+ mtx_lock(&tp->tap_mtx);
+ }
+ }
+
+ if_link_state_change(ifp, LINK_STATE_DOWN);
+ funsetown(&tp->tap_sigio);
+ selwakeuppri(&tp->tap_rsel, PZERO+1);
+ KNOTE_LOCKED(&tp->tap_rsel.si_note, 0);
+
+ tp->tap_flags &= ~TAP_OPEN;
+ tp->tap_pid = 0;
+ mtx_unlock(&tp->tap_mtx);
+
+ TAPDEBUG("%s is closed. minor = %#x\n",
+ ifp->if_xname, dev2unit(dev));
+
+ return (0);
+} /* tapclose */
+
+
+/*
+ * tapifinit
+ *
+ * network interface initialization function
+ */
+static void
+tapifinit(void *xtp)
+{
+ struct tap_softc *tp = (struct tap_softc *)xtp;
+ struct ifnet *ifp = tp->tap_ifp;
+
+ TAPDEBUG("initializing %s\n", ifp->if_xname);
+
+ mtx_lock(&tp->tap_mtx);
+ ifp->if_drv_flags |= IFF_DRV_RUNNING;
+ ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
+ mtx_unlock(&tp->tap_mtx);
+
+ /* attempt to start output */
+ tapifstart(ifp);
+} /* tapifinit */
+
+
+/*
+ * tapifioctl
+ *
+ * Process an ioctl request on network interface
+ */
+static int
+tapifioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
+{
+ struct tap_softc *tp = ifp->if_softc;
+ struct ifreq *ifr = (struct ifreq *)data;
+ struct ifstat *ifs = NULL;
+ int dummy;
+
+ switch (cmd) {
+ case SIOCSIFFLAGS: /* XXX -- just like vmnet does */
+ case SIOCADDMULTI:
+ case SIOCDELMULTI:
+ break;
+
+ case SIOCSIFMTU:
+ ifp->if_mtu = ifr->ifr_mtu;
+ break;
+
+ case SIOCGIFSTATUS:
+ ifs = (struct ifstat *)data;
+ dummy = strlen(ifs->ascii);
+ mtx_lock(&tp->tap_mtx);
+ if (tp->tap_pid != 0 && dummy < sizeof(ifs->ascii))
+ snprintf(ifs->ascii + dummy,
+ sizeof(ifs->ascii) - dummy,
+ "\tOpened by PID %d\n", tp->tap_pid);
+ mtx_unlock(&tp->tap_mtx);
+ break;
+
+ default:
+ return (ether_ioctl(ifp, cmd, data));
+ /* NOT REACHED */
+ }
+
+ return (0);
+} /* tapifioctl */
+
+
+/*
+ * tapifstart
+ *
+ * queue packets from higher level ready to put out
+ */
+static void
+tapifstart(struct ifnet *ifp)
+{
+ struct tap_softc *tp = ifp->if_softc;
+
+ TAPDEBUG("%s starting\n", ifp->if_xname);
+
+ /*
+ * do not junk pending output if we are in VMnet mode.
+ * XXX: can this do any harm because of queue overflow?
+ */
+
+ mtx_lock(&tp->tap_mtx);
+ if (((tp->tap_flags & TAP_VMNET) == 0) &&
+ ((tp->tap_flags & TAP_READY) != TAP_READY)) {
+ struct mbuf *m;
+
+ /* Unlocked read. */
+ TAPDEBUG("%s not ready, tap_flags = 0x%x\n", ifp->if_xname,
+ tp->tap_flags);
+
+ for (;;) {
+ IF_DEQUEUE(&ifp->if_snd, m);
+ if (m != NULL) {
+ m_freem(m);
+ ifp->if_oerrors++;
+ } else
+ break;
+ }
+ mtx_unlock(&tp->tap_mtx);
+
+ return;
+ }
+
+ ifp->if_drv_flags |= IFF_DRV_OACTIVE;
+
+ if (!IFQ_IS_EMPTY(&ifp->if_snd)) {
+ if (tp->tap_flags & TAP_RWAIT) {
+ tp->tap_flags &= ~TAP_RWAIT;
+ wakeup(tp);
+ }
+
+ if ((tp->tap_flags & TAP_ASYNC) && (tp->tap_sigio != NULL)) {
+ mtx_unlock(&tp->tap_mtx);
+ pgsigio(&tp->tap_sigio, SIGIO, 0);
+ mtx_lock(&tp->tap_mtx);
+ }
+
+ selwakeuppri(&tp->tap_rsel, PZERO+1);
+ KNOTE_LOCKED(&tp->tap_rsel.si_note, 0);
+ ifp->if_opackets ++; /* obytes are counted in ether_output */
+ }
+
+ ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
+ mtx_unlock(&tp->tap_mtx);
+} /* tapifstart */
+
+
+/*
+ * tapioctl
+ *
+ * the cdevsw interface is now pretty minimal
+ */
+static int
+tapioctl(struct cdev *dev, u_long cmd, caddr_t data, int flag, struct thread *td)
+{
+ struct tap_softc *tp = dev->si_drv1;
+ struct ifnet *ifp = tp->tap_ifp;
+ struct tapinfo *tapp = NULL;
+ int f;
+#if defined(COMPAT_FREEBSD6) || defined(COMPAT_FREEBSD5) || \
+ defined(COMPAT_FREEBSD4)
+ int ival;
+#endif
+
+ switch (cmd) {
+ case TAPSIFINFO:
+ tapp = (struct tapinfo *)data;
+ mtx_lock(&tp->tap_mtx);
+ ifp->if_mtu = tapp->mtu;
+ ifp->if_type = tapp->type;
+ ifp->if_baudrate = tapp->baudrate;
+ mtx_unlock(&tp->tap_mtx);
+ break;
+
+ case TAPGIFINFO:
+ tapp = (struct tapinfo *)data;
+ mtx_lock(&tp->tap_mtx);
+ tapp->mtu = ifp->if_mtu;
+ tapp->type = ifp->if_type;
+ tapp->baudrate = ifp->if_baudrate;
+ mtx_unlock(&tp->tap_mtx);
+ break;
+
+ case TAPSDEBUG:
+ tapdebug = *(int *)data;
+ break;
+
+ case TAPGDEBUG:
+ *(int *)data = tapdebug;
+ break;
+
+ case TAPGIFNAME: {
+ struct ifreq *ifr = (struct ifreq *) data;
+
+ strlcpy(ifr->ifr_name, ifp->if_xname, IFNAMSIZ);
+ } break;
+
+ case FIONBIO:
+ break;
+
+ case FIOASYNC:
+ mtx_lock(&tp->tap_mtx);
+ if (*(int *)data)
+ tp->tap_flags |= TAP_ASYNC;
+ else
+ tp->tap_flags &= ~TAP_ASYNC;
+ mtx_unlock(&tp->tap_mtx);
+ break;
+
+ case FIONREAD:
+ if (!IFQ_IS_EMPTY(&ifp->if_snd)) {
+ struct mbuf *mb;
+
+ IFQ_LOCK(&ifp->if_snd);
+ IFQ_POLL_NOLOCK(&ifp->if_snd, mb);
+ for (*(int *)data = 0; mb != NULL;
+ mb = mb->m_next)
+ *(int *)data += mb->m_len;
+ IFQ_UNLOCK(&ifp->if_snd);
+ } else
+ *(int *)data = 0;
+ break;
+
+ case FIOSETOWN:
+ return (fsetown(*(int *)data, &tp->tap_sigio));
+
+ case FIOGETOWN:
+ *(int *)data = fgetown(&tp->tap_sigio);
+ return (0);
+
+ /* this is deprecated, FIOSETOWN should be used instead */
+ case TIOCSPGRP:
+ return (fsetown(-(*(int *)data), &tp->tap_sigio));
+
+ /* this is deprecated, FIOGETOWN should be used instead */
+ case TIOCGPGRP:
+ *(int *)data = -fgetown(&tp->tap_sigio);
+ return (0);
+
+ /* VMware/VMnet port ioctl's */
+
+#if defined(COMPAT_FREEBSD6) || defined(COMPAT_FREEBSD5) || \
+ defined(COMPAT_FREEBSD4)
+ case _IO('V', 0):
+ ival = IOCPARM_IVAL(data);
+ data = (caddr_t)&ival;
+ /* FALLTHROUGH */
+#endif
+ case VMIO_SIOCSIFFLAGS: /* VMware/VMnet SIOCSIFFLAGS */
+ f = *(int *)data;
+ f &= 0x0fff;
+ f &= ~IFF_CANTCHANGE;
+ f |= IFF_UP;
+
+ mtx_lock(&tp->tap_mtx);
+ ifp->if_flags = f | (ifp->if_flags & IFF_CANTCHANGE);
+ mtx_unlock(&tp->tap_mtx);
+ break;
+
+ case OSIOCGIFADDR: /* get MAC address of the remote side */
+ case SIOCGIFADDR:
+ mtx_lock(&tp->tap_mtx);
+ bcopy(tp->ether_addr, data, sizeof(tp->ether_addr));
+ mtx_unlock(&tp->tap_mtx);
+ break;
+
+ case SIOCSIFADDR: /* set MAC address of the remote side */
+ mtx_lock(&tp->tap_mtx);
+ bcopy(data, tp->ether_addr, sizeof(tp->ether_addr));
+ mtx_unlock(&tp->tap_mtx);
+ break;
+
+ default:
+ return (ENOTTY);
+ }
+ return (0);
+} /* tapioctl */
+
+
+/*
+ * tapread
+ *
+ * the cdevsw read interface - reads a packet at a time, or at
+ * least as much of a packet as can be read
+ */
+static int
+tapread(struct cdev *dev, struct uio *uio, int flag)
+{
+ struct tap_softc *tp = dev->si_drv1;
+ struct ifnet *ifp = tp->tap_ifp;
+ struct mbuf *m = NULL;
+ int error = 0, len;
+
+ TAPDEBUG("%s reading, minor = %#x\n", ifp->if_xname, dev2unit(dev));
+
+ mtx_lock(&tp->tap_mtx);
+ if ((tp->tap_flags & TAP_READY) != TAP_READY) {
+ mtx_unlock(&tp->tap_mtx);
+
+ /* Unlocked read. */
+ TAPDEBUG("%s not ready. minor = %#x, tap_flags = 0x%x\n",
+ ifp->if_xname, dev2unit(dev), tp->tap_flags);
+
+ return (EHOSTDOWN);
+ }
+
+ tp->tap_flags &= ~TAP_RWAIT;
+
+ /* sleep until we get a packet */
+ do {
+ IF_DEQUEUE(&ifp->if_snd, m);
+
+ if (m == NULL) {
+ if (flag & O_NONBLOCK) {
+ mtx_unlock(&tp->tap_mtx);
+ return (EWOULDBLOCK);
+ }
+
+ tp->tap_flags |= TAP_RWAIT;
+ error = mtx_sleep(tp, &tp->tap_mtx, PCATCH | (PZERO + 1),
+ "taprd", 0);
+ if (error) {
+ mtx_unlock(&tp->tap_mtx);
+ return (error);
+ }
+ }
+ } while (m == NULL);
+ mtx_unlock(&tp->tap_mtx);
+
+ /* feed packet to bpf */
+ BPF_MTAP(ifp, m);
+
+ /* xfer packet to user space */
+ while ((m != NULL) && (uio->uio_resid > 0) && (error == 0)) {
+ len = min(uio->uio_resid, m->m_len);
+ if (len == 0)
+ break;
+
+ error = uiomove(mtod(m, void *), len, uio);
+ m = m_free(m);
+ }
+
+ if (m != NULL) {
+ TAPDEBUG("%s dropping mbuf, minor = %#x\n", ifp->if_xname,
+ dev2unit(dev));
+ m_freem(m);
+ }
+
+ return (error);
+} /* tapread */
+
+
+/*
+ * tapwrite
+ *
+ * the cdevsw write interface - an atomic write is a packet - or else!
+ */
+static int
+tapwrite(struct cdev *dev, struct uio *uio, int flag)
+{
+ struct ether_header *eh;
+ struct tap_softc *tp = dev->si_drv1;
+ struct ifnet *ifp = tp->tap_ifp;
+ struct mbuf *m;
+
+ TAPDEBUG("%s writting, minor = %#x\n",
+ ifp->if_xname, dev2unit(dev));
+
+ if (uio->uio_resid == 0)
+ return (0);
+
+ if ((uio->uio_resid < 0) || (uio->uio_resid > TAPMRU)) {
+ TAPDEBUG("%s invalid packet len = %zd, minor = %#x\n",
+ ifp->if_xname, uio->uio_resid, dev2unit(dev));
+
+ return (EIO);
+ }
+
+ if ((m = m_uiotombuf(uio, M_DONTWAIT, 0, ETHER_ALIGN,
+ M_PKTHDR)) == NULL) {
+ ifp->if_ierrors ++;
+ return (ENOBUFS);
+ }
+
+ m->m_pkthdr.rcvif = ifp;
+
+ /*
+ * Only pass a unicast frame to ether_input(), if it would actually
+ * have been received by non-virtual hardware.
+ */
+ if (m->m_len < sizeof(struct ether_header)) {
+ m_freem(m);
+ return (0);
+ }
+ eh = mtod(m, struct ether_header *);
+
+ if (eh && (ifp->if_flags & IFF_PROMISC) == 0 &&
+ !ETHER_IS_MULTICAST(eh->ether_dhost) &&
+ bcmp(eh->ether_dhost, IF_LLADDR(ifp), ETHER_ADDR_LEN) != 0) {
+ m_freem(m);
+ return (0);
+ }
+
+ /* Pass packet up to parent. */
+ (*ifp->if_input)(ifp, m);
+ ifp->if_ipackets ++; /* ibytes are counted in parent */
+
+ return (0);
+} /* tapwrite */
+
+
+/*
+ * tappoll
+ *
+ * the poll interface, this is only useful on reads
+ * really. the write detect always returns true, write never blocks
+ * anyway, it either accepts the packet or drops it
+ */
+static int
+tappoll(struct cdev *dev, int events, struct thread *td)
+{
+ struct tap_softc *tp = dev->si_drv1;
+ struct ifnet *ifp = tp->tap_ifp;
+ int revents = 0;
+
+ TAPDEBUG("%s polling, minor = %#x\n",
+ ifp->if_xname, dev2unit(dev));
+
+ if (events & (POLLIN | POLLRDNORM)) {
+ IFQ_LOCK(&ifp->if_snd);
+ if (!IFQ_IS_EMPTY(&ifp->if_snd)) {
+ TAPDEBUG("%s have data in queue. len = %d, " \
+ "minor = %#x\n", ifp->if_xname,
+ ifp->if_snd.ifq_len, dev2unit(dev));
+
+ revents |= (events & (POLLIN | POLLRDNORM));
+ } else {
+ TAPDEBUG("%s waiting for data, minor = %#x\n",
+ ifp->if_xname, dev2unit(dev));
+
+ selrecord(td, &tp->tap_rsel);
+ }
+ IFQ_UNLOCK(&ifp->if_snd);
+ }
+
+ if (events & (POLLOUT | POLLWRNORM))
+ revents |= (events & (POLLOUT | POLLWRNORM));
+
+ return (revents);
+} /* tappoll */
+
+
+/*
+ * tap_kqfilter
+ *
+ * support for kevent() system call
+ */
+static int
+tapkqfilter(struct cdev *dev, struct knote *kn)
+{
+ struct tap_softc *tp = dev->si_drv1;
+ struct ifnet *ifp = tp->tap_ifp;
+
+ switch (kn->kn_filter) {
+ case EVFILT_READ:
+ TAPDEBUG("%s kqfilter: EVFILT_READ, minor = %#x\n",
+ ifp->if_xname, dev2unit(dev));
+ kn->kn_fop = &tap_read_filterops;
+ break;
+
+ case EVFILT_WRITE:
+ TAPDEBUG("%s kqfilter: EVFILT_WRITE, minor = %#x\n",
+ ifp->if_xname, dev2unit(dev));
+ kn->kn_fop = &tap_write_filterops;
+ break;
+
+ default:
+ TAPDEBUG("%s kqfilter: invalid filter, minor = %#x\n",
+ ifp->if_xname, dev2unit(dev));
+ return (EINVAL);
+ /* NOT REACHED */
+ }
+
+ kn->kn_hook = tp;
+ knlist_add(&tp->tap_rsel.si_note, kn, 0);
+
+ return (0);
+} /* tapkqfilter */
+
+
+/*
+ * tap_kqread
+ *
+ * Return true if there is data in the interface queue
+ */
+static int
+tapkqread(struct knote *kn, long hint)
+{
+ int ret;
+ struct tap_softc *tp = kn->kn_hook;
+ struct cdev *dev = tp->tap_dev;
+ struct ifnet *ifp = tp->tap_ifp;
+
+ if ((kn->kn_data = ifp->if_snd.ifq_len) > 0) {
+ TAPDEBUG("%s have data in queue. len = %d, minor = %#x\n",
+ ifp->if_xname, ifp->if_snd.ifq_len, dev2unit(dev));
+ ret = 1;
+ } else {
+ TAPDEBUG("%s waiting for data, minor = %#x\n",
+ ifp->if_xname, dev2unit(dev));
+ ret = 0;
+ }
+
+ return (ret);
+} /* tapkqread */
+
+
+/*
+ * tap_kqwrite
+ *
+ * Always can write. Return the MTU in kn->data
+ */
+static int
+tapkqwrite(struct knote *kn, long hint)
+{
+ struct tap_softc *tp = kn->kn_hook;
+ struct ifnet *ifp = tp->tap_ifp;
+
+ kn->kn_data = ifp->if_mtu;
+
+ return (1);
+} /* tapkqwrite */
+
+
+static void
+tapkqdetach(struct knote *kn)
+{
+ struct tap_softc *tp = kn->kn_hook;
+
+ knlist_remove(&tp->tap_rsel.si_note, kn, 0);
+} /* tapkqdetach */
+
diff --git a/rtems/freebsd/net/if_tap.h b/rtems/freebsd/net/if_tap.h
new file mode 100644
index 00000000..e611884b
--- /dev/null
+++ b/rtems/freebsd/net/if_tap.h
@@ -0,0 +1,74 @@
+/*-
+ * Copyright (C) 1999-2000 by Maksim Yevmenkin <m_evmenkin@yahoo.com>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * BASED ON:
+ * -------------------------------------------------------------------------
+ *
+ * Copyright (c) 1988, Julian Onions <jpo@cs.nott.ac.uk>
+ * Nottingham University 1987.
+ */
+
+/*
+ * $FreeBSD$
+ * $Id: if_tap.h,v 0.7 2000/07/12 04:12:51 max Exp $
+ */
+
+#ifndef _NET_IF_TAP_HH_
+#define _NET_IF_TAP_HH_
+
+/* refer to if_tapvar.h for the softc stuff */
+
+/* maximum receive packet size (hard limit) */
+#define TAPMRU 16384
+
+struct tapinfo {
+ int baudrate; /* linespeed */
+ short mtu; /* maximum transmission unit */
+ u_char type; /* ethernet, tokenring, etc. */
+ u_char dummy; /* place holder */
+};
+
+/* ioctl's for get/set debug */
+#define TAPSDEBUG _IOW('t', 90, int)
+#define TAPGDEBUG _IOR('t', 89, int)
+#define TAPSIFINFO _IOW('t', 91, struct tapinfo)
+#define TAPGIFINFO _IOR('t', 92, struct tapinfo)
+#define TAPGIFNAME _IOR('t', 93, struct ifreq)
+
+/* VMware ioctl's */
+#define VMIO_SIOCSIFFLAGS _IOWINT('V', 0)
+#define VMIO_SIOCSKEEP _IO('V', 1)
+#define VMIO_SIOCSIFBR _IO('V', 2)
+#define VMIO_SIOCSLADRF _IO('V', 3)
+
+/* XXX -- unimplemented */
+#define VMIO_SIOCSETMACADDR _IO('V', 4)
+
+/* XXX -- not used? */
+#define VMIO_SIOCPORT _IO('V', 5)
+#define VMIO_SIOCBRIDGE _IO('V', 6)
+#define VMIO_SIOCNETIF _IO('V', 7)
+
+#endif /* !_NET_IF_TAP_HH_ */
diff --git a/rtems/freebsd/net/if_tapvar.h b/rtems/freebsd/net/if_tapvar.h
new file mode 100644
index 00000000..4a26fd87
--- /dev/null
+++ b/rtems/freebsd/net/if_tapvar.h
@@ -0,0 +1,69 @@
+/*-
+ * Copyright (C) 1999-2000 by Maksim Yevmenkin <m_evmenkin@yahoo.com>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * BASED ON:
+ * -------------------------------------------------------------------------
+ *
+ * Copyright (c) 1998 Brian Somers <brian@Awfulhak.org>
+ * All rights reserved.
+ *
+ * Copyright (c) 1988, Julian Onions <jpo@cs.nott.ac.uk>
+ * Nottingham University 1987.
+ */
+
+/*
+ * $FreeBSD$
+ * $Id: if_tapvar.h,v 0.6 2000/07/11 02:16:08 max Exp $
+ */
+
+#ifndef _NET_IF_TAPVAR_HH_
+#define _NET_IF_TAPVAR_HH_
+
+/*
+ * tap_mtx locks tap_flags, tap_pid. tap_next locked with global tapmtx.
+ * Other fields locked by owning subsystems.
+ */
+struct tap_softc {
+ struct ifnet *tap_ifp;
+ u_short tap_flags; /* misc flags */
+#define TAP_OPEN (1 << 0)
+#define TAP_INITED (1 << 1)
+#define TAP_RWAIT (1 << 2)
+#define TAP_ASYNC (1 << 3)
+#define TAP_READY (TAP_OPEN|TAP_INITED)
+#define TAP_VMNET (1 << 4)
+
+ u_int8_t ether_addr[ETHER_ADDR_LEN]; /* ether addr of the remote side */
+
+ pid_t tap_pid; /* PID of process to open */
+ struct sigio *tap_sigio; /* information for async I/O */
+ struct selinfo tap_rsel; /* read select */
+
+ SLIST_ENTRY(tap_softc) tap_next; /* next device in chain */
+ struct cdev *tap_dev;
+ struct mtx tap_mtx; /* per-softc mutex */
+};
+
+#endif /* !_NET_IF_TAPVAR_HH_ */
diff --git a/rtems/freebsd/net/if_tun.c b/rtems/freebsd/net/if_tun.c
new file mode 100644
index 00000000..ca923a38
--- /dev/null
+++ b/rtems/freebsd/net/if_tun.c
@@ -0,0 +1,1059 @@
+#include <rtems/freebsd/machine/rtems-bsd-config.h>
+
+/* $NetBSD: if_tun.c,v 1.14 1994/06/29 06:36:25 cgd Exp $ */
+
+/*-
+ * Copyright (c) 1988, Julian Onions <jpo@cs.nott.ac.uk>
+ * Nottingham University 1987.
+ *
+ * This source may be freely distributed, however I would be interested
+ * in any changes that are made.
+ *
+ * This driver takes packets off the IP i/f and hands them up to a
+ * user process to have its wicked way with. This driver has it's
+ * roots in a similar driver written by Phil Cockcroft (formerly) at
+ * UCL. This driver is based much more on read/write/poll mode of
+ * operation though.
+ *
+ * $FreeBSD$
+ */
+
+#include <rtems/freebsd/local/opt_atalk.h>
+#include <rtems/freebsd/local/opt_inet.h>
+#include <rtems/freebsd/local/opt_inet6.h>
+#include <rtems/freebsd/local/opt_ipx.h>
+
+#include <rtems/freebsd/sys/param.h>
+#include <rtems/freebsd/sys/priv.h>
+#include <rtems/freebsd/sys/proc.h>
+#include <rtems/freebsd/sys/systm.h>
+#include <rtems/freebsd/sys/jail.h>
+#include <rtems/freebsd/sys/mbuf.h>
+#include <rtems/freebsd/sys/module.h>
+#include <rtems/freebsd/sys/socket.h>
+#include <rtems/freebsd/sys/fcntl.h>
+#include <rtems/freebsd/sys/filio.h>
+#include <rtems/freebsd/sys/sockio.h>
+#include <rtems/freebsd/sys/ttycom.h>
+#include <rtems/freebsd/sys/poll.h>
+#include <rtems/freebsd/sys/selinfo.h>
+#include <rtems/freebsd/sys/signalvar.h>
+#include <rtems/freebsd/sys/filedesc.h>
+#include <rtems/freebsd/sys/kernel.h>
+#include <rtems/freebsd/sys/sysctl.h>
+#include <rtems/freebsd/sys/conf.h>
+#include <rtems/freebsd/sys/uio.h>
+#include <rtems/freebsd/sys/malloc.h>
+#include <rtems/freebsd/sys/random.h>
+
+#include <rtems/freebsd/net/if.h>
+#include <rtems/freebsd/net/if_clone.h>
+#include <rtems/freebsd/net/if_types.h>
+#include <rtems/freebsd/net/netisr.h>
+#include <rtems/freebsd/net/route.h>
+#include <rtems/freebsd/net/vnet.h>
+#ifdef INET
+#include <rtems/freebsd/netinet/in.h>
+#endif
+#include <rtems/freebsd/net/bpf.h>
+#include <rtems/freebsd/net/if_tun.h>
+
+#include <rtems/freebsd/sys/queue.h>
+#include <rtems/freebsd/sys/condvar.h>
+
+#include <rtems/freebsd/security/mac/mac_framework.h>
+
+/*
+ * tun_list is protected by global tunmtx. Other mutable fields are
+ * protected by tun->tun_mtx, or by their owning subsystem. tun_dev is
+ * static for the duration of a tunnel interface.
+ */
+struct tun_softc {
+ TAILQ_ENTRY(tun_softc) tun_list;
+ struct cdev *tun_dev;
+ u_short tun_flags; /* misc flags */
+#define TUN_OPEN 0x0001
+#define TUN_INITED 0x0002
+#define TUN_RCOLL 0x0004
+#define TUN_IASET 0x0008
+#define TUN_DSTADDR 0x0010
+#define TUN_LMODE 0x0020
+#define TUN_RWAIT 0x0040
+#define TUN_ASYNC 0x0080
+#define TUN_IFHEAD 0x0100
+
+#define TUN_READY (TUN_OPEN | TUN_INITED)
+
+ /*
+ * XXXRW: tun_pid is used to exclusively lock /dev/tun. Is this
+ * actually needed? Can we just return EBUSY if already open?
+ * Problem is that this involved inherent races when a tun device
+ * is handed off from one process to another, as opposed to just
+ * being slightly stale informationally.
+ */
+ pid_t tun_pid; /* owning pid */
+ struct ifnet *tun_ifp; /* the interface */
+ struct sigio *tun_sigio; /* information for async I/O */
+ struct selinfo tun_rsel; /* read select */
+ struct mtx tun_mtx; /* protect mutable softc fields */
+ struct cv tun_cv; /* protect against ref'd dev destroy */
+};
+#define TUN2IFP(sc) ((sc)->tun_ifp)
+
+#define TUNDEBUG if (tundebug) if_printf
+#define TUNNAME "tun"
+
+/*
+ * All mutable global variables in if_tun are locked using tunmtx, with
+ * the exception of tundebug, which is used unlocked, and tunclones,
+ * which is static after setup.
+ */
+static struct mtx tunmtx;
+static MALLOC_DEFINE(M_TUN, TUNNAME, "Tunnel Interface");
+static int tundebug = 0;
+static int tundclone = 1;
+static struct clonedevs *tunclones;
+static TAILQ_HEAD(,tun_softc) tunhead = TAILQ_HEAD_INITIALIZER(tunhead);
+SYSCTL_INT(_debug, OID_AUTO, if_tun_debug, CTLFLAG_RW, &tundebug, 0, "");
+
+SYSCTL_DECL(_net_link);
+SYSCTL_NODE(_net_link, OID_AUTO, tun, CTLFLAG_RW, 0,
+ "IP tunnel software network interface.");
+SYSCTL_INT(_net_link_tun, OID_AUTO, devfs_cloning, CTLFLAG_RW, &tundclone, 0,
+ "Enable legacy devfs interface creation.");
+
+TUNABLE_INT("net.link.tun.devfs_cloning", &tundclone);
+
+static void tunclone(void *arg, struct ucred *cred, char *name,
+ int namelen, struct cdev **dev);
+static void tuncreate(const char *name, struct cdev *dev);
+static int tunifioctl(struct ifnet *, u_long, caddr_t);
+static int tuninit(struct ifnet *);
+static int tunmodevent(module_t, int, void *);
+static int tunoutput(struct ifnet *, struct mbuf *, struct sockaddr *,
+ struct route *ro);
+static void tunstart(struct ifnet *);
+
+static int tun_clone_create(struct if_clone *, int, caddr_t);
+static void tun_clone_destroy(struct ifnet *);
+
+IFC_SIMPLE_DECLARE(tun, 0);
+
+static d_open_t tunopen;
+static d_close_t tunclose;
+static d_read_t tunread;
+static d_write_t tunwrite;
+static d_ioctl_t tunioctl;
+static d_poll_t tunpoll;
+static d_kqfilter_t tunkqfilter;
+
+static int tunkqread(struct knote *, long);
+static int tunkqwrite(struct knote *, long);
+static void tunkqdetach(struct knote *);
+
+static struct filterops tun_read_filterops = {
+ .f_isfd = 1,
+ .f_attach = NULL,
+ .f_detach = tunkqdetach,
+ .f_event = tunkqread,
+};
+
+static struct filterops tun_write_filterops = {
+ .f_isfd = 1,
+ .f_attach = NULL,
+ .f_detach = tunkqdetach,
+ .f_event = tunkqwrite,
+};
+
+static struct cdevsw tun_cdevsw = {
+ .d_version = D_VERSION,
+ .d_flags = D_PSEUDO | D_NEEDMINOR,
+ .d_open = tunopen,
+ .d_close = tunclose,
+ .d_read = tunread,
+ .d_write = tunwrite,
+ .d_ioctl = tunioctl,
+ .d_poll = tunpoll,
+ .d_kqfilter = tunkqfilter,
+ .d_name = TUNNAME,
+};
+
+static int
+tun_clone_create(struct if_clone *ifc, int unit, caddr_t params)
+{
+ struct cdev *dev;
+ int i;
+
+ /* find any existing device, or allocate new unit number */
+ i = clone_create(&tunclones, &tun_cdevsw, &unit, &dev, 0);
+ if (i) {
+ /* No preexisting struct cdev *, create one */
+ dev = make_dev(&tun_cdevsw, unit,
+ UID_UUCP, GID_DIALER, 0600, "%s%d", ifc->ifc_name, unit);
+ }
+ tuncreate(ifc->ifc_name, dev);
+
+ return (0);
+}
+
+static void
+tunclone(void *arg, struct ucred *cred, char *name, int namelen,
+ struct cdev **dev)
+{
+ char devname[SPECNAMELEN + 1];
+ int u, i, append_unit;
+
+ if (*dev != NULL)
+ return;
+
+ /*
+ * If tun cloning is enabled, only the superuser can create an
+ * interface.
+ */
+ if (!tundclone || priv_check_cred(cred, PRIV_NET_IFCREATE, 0) != 0)
+ return;
+
+ if (strcmp(name, TUNNAME) == 0) {
+ u = -1;
+ } else if (dev_stdclone(name, NULL, TUNNAME, &u) != 1)
+ return; /* Don't recognise the name */
+ if (u != -1 && u > IF_MAXUNIT)
+ return; /* Unit number too high */
+
+ if (u == -1)
+ append_unit = 1;
+ else
+ append_unit = 0;
+
+ CURVNET_SET(CRED_TO_VNET(cred));
+ /* find any existing device, or allocate new unit number */
+ i = clone_create(&tunclones, &tun_cdevsw, &u, dev, 0);
+ if (i) {
+ if (append_unit) {
+ namelen = snprintf(devname, sizeof(devname), "%s%d", name,
+ u);
+ name = devname;
+ }
+ /* No preexisting struct cdev *, create one */
+ *dev = make_dev_credf(MAKEDEV_REF, &tun_cdevsw, u, cred,
+ UID_UUCP, GID_DIALER, 0600, "%s", name);
+ }
+
+ if_clone_create(name, namelen, NULL);
+ CURVNET_RESTORE();
+}
+
+static void
+tun_destroy(struct tun_softc *tp)
+{
+ struct cdev *dev;
+
+ /* Unlocked read. */
+ mtx_lock(&tp->tun_mtx);
+ if ((tp->tun_flags & TUN_OPEN) != 0)
+ cv_wait_unlock(&tp->tun_cv, &tp->tun_mtx);
+ else
+ mtx_unlock(&tp->tun_mtx);
+
+ CURVNET_SET(TUN2IFP(tp)->if_vnet);
+ dev = tp->tun_dev;
+ bpfdetach(TUN2IFP(tp));
+ if_detach(TUN2IFP(tp));
+ if_free(TUN2IFP(tp));
+ destroy_dev(dev);
+ knlist_destroy(&tp->tun_rsel.si_note);
+ mtx_destroy(&tp->tun_mtx);
+ cv_destroy(&tp->tun_cv);
+ free(tp, M_TUN);
+ CURVNET_RESTORE();
+}
+
+static void
+tun_clone_destroy(struct ifnet *ifp)
+{
+ struct tun_softc *tp = ifp->if_softc;
+
+ mtx_lock(&tunmtx);
+ TAILQ_REMOVE(&tunhead, tp, tun_list);
+ mtx_unlock(&tunmtx);
+ tun_destroy(tp);
+}
+
+static int
+tunmodevent(module_t mod, int type, void *data)
+{
+ static eventhandler_tag tag;
+ struct tun_softc *tp;
+
+ switch (type) {
+ case MOD_LOAD:
+ mtx_init(&tunmtx, "tunmtx", NULL, MTX_DEF);
+ clone_setup(&tunclones);
+ tag = EVENTHANDLER_REGISTER(dev_clone, tunclone, 0, 1000);
+ if (tag == NULL)
+ return (ENOMEM);
+ if_clone_attach(&tun_cloner);
+ break;
+ case MOD_UNLOAD:
+ if_clone_detach(&tun_cloner);
+ EVENTHANDLER_DEREGISTER(dev_clone, tag);
+ drain_dev_clone_events();
+
+ mtx_lock(&tunmtx);
+ while ((tp = TAILQ_FIRST(&tunhead)) != NULL) {
+ TAILQ_REMOVE(&tunhead, tp, tun_list);
+ mtx_unlock(&tunmtx);
+ tun_destroy(tp);
+ mtx_lock(&tunmtx);
+ }
+ mtx_unlock(&tunmtx);
+ clone_cleanup(&tunclones);
+ mtx_destroy(&tunmtx);
+ break;
+ default:
+ return EOPNOTSUPP;
+ }
+ return 0;
+}
+
+static moduledata_t tun_mod = {
+ "if_tun",
+ tunmodevent,
+ 0
+};
+
+DECLARE_MODULE(if_tun, tun_mod, SI_SUB_PSEUDO, SI_ORDER_ANY);
+
+static void
+tunstart(struct ifnet *ifp)
+{
+ struct tun_softc *tp = ifp->if_softc;
+ struct mbuf *m;
+
+ TUNDEBUG(ifp,"%s starting\n", ifp->if_xname);
+ if (ALTQ_IS_ENABLED(&ifp->if_snd)) {
+ IFQ_LOCK(&ifp->if_snd);
+ IFQ_POLL_NOLOCK(&ifp->if_snd, m);
+ if (m == NULL) {
+ IFQ_UNLOCK(&ifp->if_snd);
+ return;
+ }
+ IFQ_UNLOCK(&ifp->if_snd);
+ }
+
+ mtx_lock(&tp->tun_mtx);
+ if (tp->tun_flags & TUN_RWAIT) {
+ tp->tun_flags &= ~TUN_RWAIT;
+ wakeup(tp);
+ }
+ selwakeuppri(&tp->tun_rsel, PZERO + 1);
+ KNOTE_LOCKED(&tp->tun_rsel.si_note, 0);
+ if (tp->tun_flags & TUN_ASYNC && tp->tun_sigio) {
+ mtx_unlock(&tp->tun_mtx);
+ pgsigio(&tp->tun_sigio, SIGIO, 0);
+ } else
+ mtx_unlock(&tp->tun_mtx);
+}
+
+/* XXX: should return an error code so it can fail. */
+static void
+tuncreate(const char *name, struct cdev *dev)
+{
+ struct tun_softc *sc;
+ struct ifnet *ifp;
+
+ dev->si_flags &= ~SI_CHEAPCLONE;
+
+ sc = malloc(sizeof(*sc), M_TUN, M_WAITOK | M_ZERO);
+ mtx_init(&sc->tun_mtx, "tun_mtx", NULL, MTX_DEF);
+ cv_init(&sc->tun_cv, "tun_condvar");
+ sc->tun_flags = TUN_INITED;
+ sc->tun_dev = dev;
+ mtx_lock(&tunmtx);
+ TAILQ_INSERT_TAIL(&tunhead, sc, tun_list);
+ mtx_unlock(&tunmtx);
+
+ ifp = sc->tun_ifp = if_alloc(IFT_PPP);
+ if (ifp == NULL)
+ panic("%s%d: failed to if_alloc() interface.\n",
+ name, dev2unit(dev));
+ if_initname(ifp, name, dev2unit(dev));
+ ifp->if_mtu = TUNMTU;
+ ifp->if_ioctl = tunifioctl;
+ ifp->if_output = tunoutput;
+ ifp->if_start = tunstart;
+ ifp->if_flags = IFF_POINTOPOINT | IFF_MULTICAST;
+ ifp->if_softc = sc;
+ IFQ_SET_MAXLEN(&ifp->if_snd, ifqmaxlen);
+ ifp->if_snd.ifq_drv_maxlen = 0;
+ IFQ_SET_READY(&ifp->if_snd);
+ knlist_init_mtx(&sc->tun_rsel.si_note, &sc->tun_mtx);
+ ifp->if_capabilities |= IFCAP_LINKSTATE;
+ ifp->if_capenable |= IFCAP_LINKSTATE;
+
+ if_attach(ifp);
+ bpfattach(ifp, DLT_NULL, sizeof(u_int32_t));
+ dev->si_drv1 = sc;
+ TUNDEBUG(ifp, "interface %s is created, minor = %#x\n",
+ ifp->if_xname, dev2unit(dev));
+}
+
+static int
+tunopen(struct cdev *dev, int flag, int mode, struct thread *td)
+{
+ struct ifnet *ifp;
+ struct tun_softc *tp;
+
+ /*
+ * XXXRW: Non-atomic test and set of dev->si_drv1 requires
+ * synchronization.
+ */
+ tp = dev->si_drv1;
+ if (!tp) {
+ tuncreate(TUNNAME, dev);
+ tp = dev->si_drv1;
+ }
+
+ /*
+ * XXXRW: This use of tun_pid is subject to error due to the
+ * fact that a reference to the tunnel can live beyond the
+ * death of the process that created it. Can we replace this
+ * with a simple busy flag?
+ */
+ mtx_lock(&tp->tun_mtx);
+ if (tp->tun_pid != 0 && tp->tun_pid != td->td_proc->p_pid) {
+ mtx_unlock(&tp->tun_mtx);
+ return (EBUSY);
+ }
+ tp->tun_pid = td->td_proc->p_pid;
+
+ tp->tun_flags |= TUN_OPEN;
+ ifp = TUN2IFP(tp);
+ if_link_state_change(ifp, LINK_STATE_UP);
+ TUNDEBUG(ifp, "open\n");
+ mtx_unlock(&tp->tun_mtx);
+
+ return (0);
+}
+
+/*
+ * tunclose - close the device - mark i/f down & delete
+ * routing info
+ */
+static int
+tunclose(struct cdev *dev, int foo, int bar, struct thread *td)
+{
+ struct tun_softc *tp;
+ struct ifnet *ifp;
+
+ tp = dev->si_drv1;
+ ifp = TUN2IFP(tp);
+
+ mtx_lock(&tp->tun_mtx);
+ tp->tun_flags &= ~TUN_OPEN;
+ tp->tun_pid = 0;
+
+ /*
+ * junk all pending output
+ */
+ CURVNET_SET(ifp->if_vnet);
+ IFQ_PURGE(&ifp->if_snd);
+
+ if (ifp->if_flags & IFF_UP) {
+ mtx_unlock(&tp->tun_mtx);
+ if_down(ifp);
+ mtx_lock(&tp->tun_mtx);
+ }
+
+ /* Delete all addresses and routes which reference this interface. */
+ if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
+ struct ifaddr *ifa;
+
+ ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
+ mtx_unlock(&tp->tun_mtx);
+ TAILQ_FOREACH(ifa, &ifp->if_addrhead, ifa_link) {
+ /* deal w/IPv4 PtP destination; unlocked read */
+ if (ifa->ifa_addr->sa_family == AF_INET) {
+ rtinit(ifa, (int)RTM_DELETE,
+ tp->tun_flags & TUN_DSTADDR ? RTF_HOST : 0);
+ } else {
+ rtinit(ifa, (int)RTM_DELETE, 0);
+ }
+ }
+ if_purgeaddrs(ifp);
+ mtx_lock(&tp->tun_mtx);
+ }
+ if_link_state_change(ifp, LINK_STATE_DOWN);
+ CURVNET_RESTORE();
+
+ funsetown(&tp->tun_sigio);
+ selwakeuppri(&tp->tun_rsel, PZERO + 1);
+ KNOTE_LOCKED(&tp->tun_rsel.si_note, 0);
+ TUNDEBUG (ifp, "closed\n");
+
+ cv_broadcast(&tp->tun_cv);
+ mtx_unlock(&tp->tun_mtx);
+ return (0);
+}
+
+static int
+tuninit(struct ifnet *ifp)
+{
+ struct tun_softc *tp = ifp->if_softc;
+#ifdef INET
+ struct ifaddr *ifa;
+#endif
+ int error = 0;
+
+ TUNDEBUG(ifp, "tuninit\n");
+
+ mtx_lock(&tp->tun_mtx);
+ ifp->if_flags |= IFF_UP;
+ ifp->if_drv_flags |= IFF_DRV_RUNNING;
+ getmicrotime(&ifp->if_lastchange);
+
+#ifdef INET
+ if_addr_rlock(ifp);
+ TAILQ_FOREACH(ifa, &ifp->if_addrhead, ifa_link) {
+ if (ifa->ifa_addr->sa_family == AF_INET) {
+ struct sockaddr_in *si;
+
+ si = (struct sockaddr_in *)ifa->ifa_addr;
+ if (si->sin_addr.s_addr)
+ tp->tun_flags |= TUN_IASET;
+
+ si = (struct sockaddr_in *)ifa->ifa_dstaddr;
+ if (si && si->sin_addr.s_addr)
+ tp->tun_flags |= TUN_DSTADDR;
+ }
+ }
+ if_addr_runlock(ifp);
+#endif
+ mtx_unlock(&tp->tun_mtx);
+ return (error);
+}
+
+/*
+ * Process an ioctl request.
+ */
+static int
+tunifioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
+{
+ struct ifreq *ifr = (struct ifreq *)data;
+ struct tun_softc *tp = ifp->if_softc;
+ struct ifstat *ifs;
+ int error = 0;
+
+ switch(cmd) {
+ case SIOCGIFSTATUS:
+ ifs = (struct ifstat *)data;
+ mtx_lock(&tp->tun_mtx);
+ if (tp->tun_pid)
+ sprintf(ifs->ascii + strlen(ifs->ascii),
+ "\tOpened by PID %d\n", tp->tun_pid);
+ mtx_unlock(&tp->tun_mtx);
+ break;
+ case SIOCSIFADDR:
+ error = tuninit(ifp);
+ TUNDEBUG(ifp, "address set, error=%d\n", error);
+ break;
+ case SIOCSIFDSTADDR:
+ error = tuninit(ifp);
+ TUNDEBUG(ifp, "destination address set, error=%d\n", error);
+ break;
+ case SIOCSIFMTU:
+ ifp->if_mtu = ifr->ifr_mtu;
+ TUNDEBUG(ifp, "mtu set\n");
+ break;
+ case SIOCSIFFLAGS:
+ case SIOCADDMULTI:
+ case SIOCDELMULTI:
+ break;
+ default:
+ error = EINVAL;
+ }
+ return (error);
+}
+
+/*
+ * tunoutput - queue packets from higher level ready to put out.
+ */
+static int
+tunoutput(
+ struct ifnet *ifp,
+ struct mbuf *m0,
+ struct sockaddr *dst,
+ struct route *ro)
+{
+ struct tun_softc *tp = ifp->if_softc;
+ u_short cached_tun_flags;
+ int error;
+ u_int32_t af;
+
+ TUNDEBUG (ifp, "tunoutput\n");
+
+#ifdef MAC
+ error = mac_ifnet_check_transmit(ifp, m0);
+ if (error) {
+ m_freem(m0);
+ return (error);
+ }
+#endif
+
+ /* Could be unlocked read? */
+ mtx_lock(&tp->tun_mtx);
+ cached_tun_flags = tp->tun_flags;
+ mtx_unlock(&tp->tun_mtx);
+ if ((cached_tun_flags & TUN_READY) != TUN_READY) {
+ TUNDEBUG (ifp, "not ready 0%o\n", tp->tun_flags);
+ m_freem (m0);
+ return (EHOSTDOWN);
+ }
+
+ if ((ifp->if_flags & IFF_UP) != IFF_UP) {
+ m_freem (m0);
+ return (EHOSTDOWN);
+ }
+
+ /* BPF writes need to be handled specially. */
+ if (dst->sa_family == AF_UNSPEC) {
+ bcopy(dst->sa_data, &af, sizeof(af));
+ dst->sa_family = af;
+ }
+
+ if (bpf_peers_present(ifp->if_bpf)) {
+ af = dst->sa_family;
+ bpf_mtap2(ifp->if_bpf, &af, sizeof(af), m0);
+ }
+
+ /* prepend sockaddr? this may abort if the mbuf allocation fails */
+ if (cached_tun_flags & TUN_LMODE) {
+ /* allocate space for sockaddr */
+ M_PREPEND(m0, dst->sa_len, M_DONTWAIT);
+
+ /* if allocation failed drop packet */
+ if (m0 == NULL) {
+ ifp->if_iqdrops++;
+ ifp->if_oerrors++;
+ return (ENOBUFS);
+ } else {
+ bcopy(dst, m0->m_data, dst->sa_len);
+ }
+ }
+
+ if (cached_tun_flags & TUN_IFHEAD) {
+ /* Prepend the address family */
+ M_PREPEND(m0, 4, M_DONTWAIT);
+
+ /* if allocation failed drop packet */
+ if (m0 == NULL) {
+ ifp->if_iqdrops++;
+ ifp->if_oerrors++;
+ return (ENOBUFS);
+ } else
+ *(u_int32_t *)m0->m_data = htonl(dst->sa_family);
+ } else {
+#ifdef INET
+ if (dst->sa_family != AF_INET)
+#endif
+ {
+ m_freem(m0);
+ return (EAFNOSUPPORT);
+ }
+ }
+
+ error = (ifp->if_transmit)(ifp, m0);
+ if (error) {
+ ifp->if_collisions++;
+ return (ENOBUFS);
+ }
+ ifp->if_opackets++;
+ return (0);
+}
+
+/*
+ * the cdevsw interface is now pretty minimal.
+ */
+static int
+tunioctl(struct cdev *dev, u_long cmd, caddr_t data, int flag, struct thread *td)
+{
+ int error;
+ struct tun_softc *tp = dev->si_drv1;
+ struct tuninfo *tunp;
+
+ switch (cmd) {
+ case TUNSIFINFO:
+ tunp = (struct tuninfo *)data;
+ if (tunp->mtu < IF_MINMTU)
+ return (EINVAL);
+ if (TUN2IFP(tp)->if_mtu != tunp->mtu) {
+ error = priv_check(td, PRIV_NET_SETIFMTU);
+ if (error)
+ return (error);
+ }
+ mtx_lock(&tp->tun_mtx);
+ TUN2IFP(tp)->if_mtu = tunp->mtu;
+ TUN2IFP(tp)->if_type = tunp->type;
+ TUN2IFP(tp)->if_baudrate = tunp->baudrate;
+ mtx_unlock(&tp->tun_mtx);
+ break;
+ case TUNGIFINFO:
+ tunp = (struct tuninfo *)data;
+ mtx_lock(&tp->tun_mtx);
+ tunp->mtu = TUN2IFP(tp)->if_mtu;
+ tunp->type = TUN2IFP(tp)->if_type;
+ tunp->baudrate = TUN2IFP(tp)->if_baudrate;
+ mtx_unlock(&tp->tun_mtx);
+ break;
+ case TUNSDEBUG:
+ tundebug = *(int *)data;
+ break;
+ case TUNGDEBUG:
+ *(int *)data = tundebug;
+ break;
+ case TUNSLMODE:
+ mtx_lock(&tp->tun_mtx);
+ if (*(int *)data) {
+ tp->tun_flags |= TUN_LMODE;
+ tp->tun_flags &= ~TUN_IFHEAD;
+ } else
+ tp->tun_flags &= ~TUN_LMODE;
+ mtx_unlock(&tp->tun_mtx);
+ break;
+ case TUNSIFHEAD:
+ mtx_lock(&tp->tun_mtx);
+ if (*(int *)data) {
+ tp->tun_flags |= TUN_IFHEAD;
+ tp->tun_flags &= ~TUN_LMODE;
+ } else
+ tp->tun_flags &= ~TUN_IFHEAD;
+ mtx_unlock(&tp->tun_mtx);
+ break;
+ case TUNGIFHEAD:
+ mtx_lock(&tp->tun_mtx);
+ *(int *)data = (tp->tun_flags & TUN_IFHEAD) ? 1 : 0;
+ mtx_unlock(&tp->tun_mtx);
+ break;
+ case TUNSIFMODE:
+ /* deny this if UP */
+ if (TUN2IFP(tp)->if_flags & IFF_UP)
+ return(EBUSY);
+
+ switch (*(int *)data & ~IFF_MULTICAST) {
+ case IFF_POINTOPOINT:
+ case IFF_BROADCAST:
+ mtx_lock(&tp->tun_mtx);
+ TUN2IFP(tp)->if_flags &=
+ ~(IFF_BROADCAST|IFF_POINTOPOINT|IFF_MULTICAST);
+ TUN2IFP(tp)->if_flags |= *(int *)data;
+ mtx_unlock(&tp->tun_mtx);
+ break;
+ default:
+ return(EINVAL);
+ }
+ break;
+ case TUNSIFPID:
+ mtx_lock(&tp->tun_mtx);
+ tp->tun_pid = curthread->td_proc->p_pid;
+ mtx_unlock(&tp->tun_mtx);
+ break;
+ case FIONBIO:
+ break;
+ case FIOASYNC:
+ mtx_lock(&tp->tun_mtx);
+ if (*(int *)data)
+ tp->tun_flags |= TUN_ASYNC;
+ else
+ tp->tun_flags &= ~TUN_ASYNC;
+ mtx_unlock(&tp->tun_mtx);
+ break;
+ case FIONREAD:
+ if (!IFQ_IS_EMPTY(&TUN2IFP(tp)->if_snd)) {
+ struct mbuf *mb;
+ IFQ_LOCK(&TUN2IFP(tp)->if_snd);
+ IFQ_POLL_NOLOCK(&TUN2IFP(tp)->if_snd, mb);
+ for (*(int *)data = 0; mb != NULL; mb = mb->m_next)
+ *(int *)data += mb->m_len;
+ IFQ_UNLOCK(&TUN2IFP(tp)->if_snd);
+ } else
+ *(int *)data = 0;
+ break;
+ case FIOSETOWN:
+ return (fsetown(*(int *)data, &tp->tun_sigio));
+
+ case FIOGETOWN:
+ *(int *)data = fgetown(&tp->tun_sigio);
+ return (0);
+
+ /* This is deprecated, FIOSETOWN should be used instead. */
+ case TIOCSPGRP:
+ return (fsetown(-(*(int *)data), &tp->tun_sigio));
+
+ /* This is deprecated, FIOGETOWN should be used instead. */
+ case TIOCGPGRP:
+ *(int *)data = -fgetown(&tp->tun_sigio);
+ return (0);
+
+ default:
+ return (ENOTTY);
+ }
+ return (0);
+}
+
+/*
+ * The cdevsw read interface - reads a packet at a time, or at
+ * least as much of a packet as can be read.
+ */
+static int
+tunread(struct cdev *dev, struct uio *uio, int flag)
+{
+ struct tun_softc *tp = dev->si_drv1;
+ struct ifnet *ifp = TUN2IFP(tp);
+ struct mbuf *m;
+ int error=0, len;
+
+ TUNDEBUG (ifp, "read\n");
+ mtx_lock(&tp->tun_mtx);
+ if ((tp->tun_flags & TUN_READY) != TUN_READY) {
+ mtx_unlock(&tp->tun_mtx);
+ TUNDEBUG (ifp, "not ready 0%o\n", tp->tun_flags);
+ return (EHOSTDOWN);
+ }
+
+ tp->tun_flags &= ~TUN_RWAIT;
+
+ do {
+ IFQ_DEQUEUE(&ifp->if_snd, m);
+ if (m == NULL) {
+ if (flag & O_NONBLOCK) {
+ mtx_unlock(&tp->tun_mtx);
+ return (EWOULDBLOCK);
+ }
+ tp->tun_flags |= TUN_RWAIT;
+ error = mtx_sleep(tp, &tp->tun_mtx, PCATCH | (PZERO + 1),
+ "tunread", 0);
+ if (error != 0) {
+ mtx_unlock(&tp->tun_mtx);
+ return (error);
+ }
+ }
+ } while (m == NULL);
+ mtx_unlock(&tp->tun_mtx);
+
+ while (m && uio->uio_resid > 0 && error == 0) {
+ len = min(uio->uio_resid, m->m_len);
+ if (len != 0)
+ error = uiomove(mtod(m, void *), len, uio);
+ m = m_free(m);
+ }
+
+ if (m) {
+ TUNDEBUG(ifp, "Dropping mbuf\n");
+ m_freem(m);
+ }
+ return (error);
+}
+
+/*
+ * the cdevsw write interface - an atomic write is a packet - or else!
+ */
+static int
+tunwrite(struct cdev *dev, struct uio *uio, int flag)
+{
+ struct tun_softc *tp = dev->si_drv1;
+ struct ifnet *ifp = TUN2IFP(tp);
+ struct mbuf *m;
+ int error = 0;
+ uint32_t family;
+ int isr;
+
+ TUNDEBUG(ifp, "tunwrite\n");
+
+ if ((ifp->if_flags & IFF_UP) != IFF_UP)
+ /* ignore silently */
+ return (0);
+
+ if (uio->uio_resid == 0)
+ return (0);
+
+ if (uio->uio_resid < 0 || uio->uio_resid > TUNMRU) {
+ TUNDEBUG(ifp, "len=%zd!\n", uio->uio_resid);
+ return (EIO);
+ }
+
+ if ((m = m_uiotombuf(uio, M_DONTWAIT, 0, 0, M_PKTHDR)) == NULL) {
+ ifp->if_ierrors++;
+ return (error);
+ }
+
+ m->m_pkthdr.rcvif = ifp;
+#ifdef MAC
+ mac_ifnet_create_mbuf(ifp, m);
+#endif
+
+ /* Could be unlocked read? */
+ mtx_lock(&tp->tun_mtx);
+ if (tp->tun_flags & TUN_IFHEAD) {
+ mtx_unlock(&tp->tun_mtx);
+ if (m->m_len < sizeof(family) &&
+ (m = m_pullup(m, sizeof(family))) == NULL)
+ return (ENOBUFS);
+ family = ntohl(*mtod(m, u_int32_t *));
+ m_adj(m, sizeof(family));
+ } else {
+ mtx_unlock(&tp->tun_mtx);
+ family = AF_INET;
+ }
+
+ BPF_MTAP2(ifp, &family, sizeof(family), m);
+
+ switch (family) {
+#ifdef INET
+ case AF_INET:
+ isr = NETISR_IP;
+ break;
+#endif
+#ifdef INET6
+ case AF_INET6:
+ isr = NETISR_IPV6;
+ break;
+#endif
+#ifdef IPX
+ case AF_IPX:
+ isr = NETISR_IPX;
+ break;
+#endif
+#ifdef NETATALK
+ case AF_APPLETALK:
+ isr = NETISR_ATALK2;
+ break;
+#endif
+ default:
+ m_freem(m);
+ return (EAFNOSUPPORT);
+ }
+ /* First chunk of an mbuf contains good junk */
+ if (harvest.point_to_point)
+ random_harvest(m, 16, 3, 0, RANDOM_NET);
+ ifp->if_ibytes += m->m_pkthdr.len;
+ ifp->if_ipackets++;
+ CURVNET_SET(ifp->if_vnet);
+ netisr_dispatch(isr, m);
+ CURVNET_RESTORE();
+ return (0);
+}
+
+/*
+ * tunpoll - the poll interface, this is only useful on reads
+ * really. The write detect always returns true, write never blocks
+ * anyway, it either accepts the packet or drops it.
+ */
+static int
+tunpoll(struct cdev *dev, int events, struct thread *td)
+{
+ struct tun_softc *tp = dev->si_drv1;
+ struct ifnet *ifp = TUN2IFP(tp);
+ int revents = 0;
+ struct mbuf *m;
+
+ TUNDEBUG(ifp, "tunpoll\n");
+
+ if (events & (POLLIN | POLLRDNORM)) {
+ IFQ_LOCK(&ifp->if_snd);
+ IFQ_POLL_NOLOCK(&ifp->if_snd, m);
+ if (m != NULL) {
+ TUNDEBUG(ifp, "tunpoll q=%d\n", ifp->if_snd.ifq_len);
+ revents |= events & (POLLIN | POLLRDNORM);
+ } else {
+ TUNDEBUG(ifp, "tunpoll waiting\n");
+ selrecord(td, &tp->tun_rsel);
+ }
+ IFQ_UNLOCK(&ifp->if_snd);
+ }
+ if (events & (POLLOUT | POLLWRNORM))
+ revents |= events & (POLLOUT | POLLWRNORM);
+
+ return (revents);
+}
+
+/*
+ * tunkqfilter - support for the kevent() system call.
+ */
+static int
+tunkqfilter(struct cdev *dev, struct knote *kn)
+{
+ struct tun_softc *tp = dev->si_drv1;
+ struct ifnet *ifp = TUN2IFP(tp);
+
+ switch(kn->kn_filter) {
+ case EVFILT_READ:
+ TUNDEBUG(ifp, "%s kqfilter: EVFILT_READ, minor = %#x\n",
+ ifp->if_xname, dev2unit(dev));
+ kn->kn_fop = &tun_read_filterops;
+ break;
+
+ case EVFILT_WRITE:
+ TUNDEBUG(ifp, "%s kqfilter: EVFILT_WRITE, minor = %#x\n",
+ ifp->if_xname, dev2unit(dev));
+ kn->kn_fop = &tun_write_filterops;
+ break;
+
+ default:
+ TUNDEBUG(ifp, "%s kqfilter: invalid filter, minor = %#x\n",
+ ifp->if_xname, dev2unit(dev));
+ return(EINVAL);
+ }
+
+ kn->kn_hook = tp;
+ knlist_add(&tp->tun_rsel.si_note, kn, 0);
+
+ return (0);
+}
+
+/*
+ * Return true of there is data in the interface queue.
+ */
+static int
+tunkqread(struct knote *kn, long hint)
+{
+ int ret;
+ struct tun_softc *tp = kn->kn_hook;
+ struct cdev *dev = tp->tun_dev;
+ struct ifnet *ifp = TUN2IFP(tp);
+
+ if ((kn->kn_data = ifp->if_snd.ifq_len) > 0) {
+ TUNDEBUG(ifp,
+ "%s have data in the queue. Len = %d, minor = %#x\n",
+ ifp->if_xname, ifp->if_snd.ifq_len, dev2unit(dev));
+ ret = 1;
+ } else {
+ TUNDEBUG(ifp,
+ "%s waiting for data, minor = %#x\n", ifp->if_xname,
+ dev2unit(dev));
+ ret = 0;
+ }
+
+ return (ret);
+}
+
+/*
+ * Always can write, always return MTU in kn->data.
+ */
+static int
+tunkqwrite(struct knote *kn, long hint)
+{
+ struct tun_softc *tp = kn->kn_hook;
+ struct ifnet *ifp = TUN2IFP(tp);
+
+ kn->kn_data = ifp->if_mtu;
+
+ return (1);
+}
+
+static void
+tunkqdetach(struct knote *kn)
+{
+ struct tun_softc *tp = kn->kn_hook;
+
+ knlist_remove(&tp->tun_rsel.si_note, kn, 0);
+}
diff --git a/rtems/freebsd/net/if_tun.h b/rtems/freebsd/net/if_tun.h
new file mode 100644
index 00000000..29718cda
--- /dev/null
+++ b/rtems/freebsd/net/if_tun.h
@@ -0,0 +1,48 @@
+/* $NetBSD: if_tun.h,v 1.5 1994/06/29 06:36:27 cgd Exp $ */
+
+/*-
+ * Copyright (c) 1988, Julian Onions <jpo@cs.nott.ac.uk>
+ * Nottingham University 1987.
+ *
+ * This source may be freely distributed, however I would be interested
+ * in any changes that are made.
+ *
+ * This driver takes packets off the IP i/f and hands them up to a
+ * user process to have its wicked way with. This driver has it's
+ * roots in a similar driver written by Phil Cockcroft (formerly) at
+ * UCL. This driver is based much more on read/write/select mode of
+ * operation though.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _NET_IF_TUN_HH_
+#define _NET_IF_TUN_HH_
+
+/* Refer to if_tunvar.h for the softc stuff */
+
+/* Maximum transmit packet size (default) */
+#define TUNMTU 1500
+
+/* Maximum receive packet size (hard limit) */
+#define TUNMRU 16384
+
+struct tuninfo {
+ int baudrate; /* linespeed */
+ short mtu; /* maximum transmission unit */
+ u_char type; /* ethernet, tokenring, etc. */
+ u_char dummy; /* place holder */
+};
+
+/* ioctl's for get/set debug */
+#define TUNSDEBUG _IOW('t', 90, int)
+#define TUNGDEBUG _IOR('t', 89, int)
+#define TUNSIFINFO _IOW('t', 91, struct tuninfo)
+#define TUNGIFINFO _IOR('t', 92, struct tuninfo)
+#define TUNSLMODE _IOW('t', 93, int)
+#define TUNSIFMODE _IOW('t', 94, int)
+#define TUNSIFPID _IO('t', 95)
+#define TUNSIFHEAD _IOW('t', 96, int)
+#define TUNGIFHEAD _IOR('t', 97, int)
+
+#endif /* !_NET_IF_TUN_HH_ */
diff --git a/rtems/freebsd/net/if_types.h b/rtems/freebsd/net/if_types.h
new file mode 100644
index 00000000..bb46d79b
--- /dev/null
+++ b/rtems/freebsd/net/if_types.h
@@ -0,0 +1,254 @@
+/*-
+ * Copyright (c) 1989, 1993, 1994
+ * The Regents of the University of California. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * @(#)if_types.h 8.3 (Berkeley) 4/28/95
+ * $FreeBSD$
+ * $NetBSD: if_types.h,v 1.16 2000/04/19 06:30:53 itojun Exp $
+ */
+
+#ifndef _NET_IF_TYPES_HH_
+#define _NET_IF_TYPES_HH_
+
+/*
+ * Interface types for benefit of parsing media address headers.
+ * This list is derived from the SNMP list of ifTypes, originally
+ * documented in RFC1573, now maintained as:
+ *
+ * http://www.iana.org/assignments/smi-numbers
+ */
+
+#define IFT_OTHER 0x1 /* none of the following */
+#define IFT_1822 0x2 /* old-style arpanet imp */
+#define IFT_HDH1822 0x3 /* HDH arpanet imp */
+#define IFT_X25DDN 0x4 /* x25 to imp */
+#define IFT_X25 0x5 /* PDN X25 interface (RFC877) */
+#define IFT_ETHER 0x6 /* Ethernet CSMA/CD */
+#define IFT_ISO88023 0x7 /* CMSA/CD */
+#define IFT_ISO88024 0x8 /* Token Bus */
+#define IFT_ISO88025 0x9 /* Token Ring */
+#define IFT_ISO88026 0xa /* MAN */
+#define IFT_STARLAN 0xb
+#define IFT_P10 0xc /* Proteon 10MBit ring */
+#define IFT_P80 0xd /* Proteon 80MBit ring */
+#define IFT_HY 0xe /* Hyperchannel */
+#define IFT_FDDI 0xf
+#define IFT_LAPB 0x10
+#define IFT_SDLC 0x11
+#define IFT_T1 0x12
+#define IFT_CEPT 0x13 /* E1 - european T1 */
+#define IFT_ISDNBASIC 0x14
+#define IFT_ISDNPRIMARY 0x15
+#define IFT_PTPSERIAL 0x16 /* Proprietary PTP serial */
+#define IFT_PPP 0x17 /* RFC 1331 */
+#define IFT_LOOP 0x18 /* loopback */
+#define IFT_EON 0x19 /* ISO over IP */
+#define IFT_XETHER 0x1a /* obsolete 3MB experimental ethernet */
+#define IFT_NSIP 0x1b /* XNS over IP */
+#define IFT_SLIP 0x1c /* IP over generic TTY */
+#define IFT_ULTRA 0x1d /* Ultra Technologies */
+#define IFT_DS3 0x1e /* Generic T3 */
+#define IFT_SIP 0x1f /* SMDS */
+#define IFT_FRELAY 0x20 /* Frame Relay DTE only */
+#define IFT_RS232 0x21
+#define IFT_PARA 0x22 /* parallel-port */
+#define IFT_ARCNET 0x23
+#define IFT_ARCNETPLUS 0x24
+#define IFT_ATM 0x25 /* ATM cells */
+#define IFT_MIOX25 0x26
+#define IFT_SONET 0x27 /* SONET or SDH */
+#define IFT_X25PLE 0x28
+#define IFT_ISO88022LLC 0x29
+#define IFT_LOCALTALK 0x2a
+#define IFT_SMDSDXI 0x2b
+#define IFT_FRELAYDCE 0x2c /* Frame Relay DCE */
+#define IFT_V35 0x2d
+#define IFT_HSSI 0x2e
+#define IFT_HIPPI 0x2f
+#define IFT_MODEM 0x30 /* Generic Modem */
+#define IFT_AAL5 0x31 /* AAL5 over ATM */
+#define IFT_SONETPATH 0x32
+#define IFT_SONETVT 0x33
+#define IFT_SMDSICIP 0x34 /* SMDS InterCarrier Interface */
+#define IFT_PROPVIRTUAL 0x35 /* Proprietary Virtual/internal */
+#define IFT_PROPMUX 0x36 /* Proprietary Multiplexing */
+#define IFT_IEEE80212 0x37 /* 100BaseVG */
+#define IFT_FIBRECHANNEL 0x38 /* Fibre Channel */
+#define IFT_HIPPIINTERFACE 0x39 /* HIPPI interfaces */
+#define IFT_FRAMERELAYINTERCONNECT 0x3a /* Obsolete, use either 0x20 or 0x2c */
+#define IFT_AFLANE8023 0x3b /* ATM Emulated LAN for 802.3 */
+#define IFT_AFLANE8025 0x3c /* ATM Emulated LAN for 802.5 */
+#define IFT_CCTEMUL 0x3d /* ATM Emulated circuit */
+#define IFT_FASTETHER 0x3e /* Fast Ethernet (100BaseT) */
+#define IFT_ISDN 0x3f /* ISDN and X.25 */
+#define IFT_V11 0x40 /* CCITT V.11/X.21 */
+#define IFT_V36 0x41 /* CCITT V.36 */
+#define IFT_G703AT64K 0x42 /* CCITT G703 at 64Kbps */
+#define IFT_G703AT2MB 0x43 /* Obsolete see DS1-MIB */
+#define IFT_QLLC 0x44 /* SNA QLLC */
+#define IFT_FASTETHERFX 0x45 /* Fast Ethernet (100BaseFX) */
+#define IFT_CHANNEL 0x46 /* channel */
+#define IFT_IEEE80211 0x47 /* radio spread spectrum */
+#define IFT_IBM370PARCHAN 0x48 /* IBM System 360/370 OEMI Channel */
+#define IFT_ESCON 0x49 /* IBM Enterprise Systems Connection */
+#define IFT_DLSW 0x4a /* Data Link Switching */
+#define IFT_ISDNS 0x4b /* ISDN S/T interface */
+#define IFT_ISDNU 0x4c /* ISDN U interface */
+#define IFT_LAPD 0x4d /* Link Access Protocol D */
+#define IFT_IPSWITCH 0x4e /* IP Switching Objects */
+#define IFT_RSRB 0x4f /* Remote Source Route Bridging */
+#define IFT_ATMLOGICAL 0x50 /* ATM Logical Port */
+#define IFT_DS0 0x51 /* Digital Signal Level 0 */
+#define IFT_DS0BUNDLE 0x52 /* group of ds0s on the same ds1 */
+#define IFT_BSC 0x53 /* Bisynchronous Protocol */
+#define IFT_ASYNC 0x54 /* Asynchronous Protocol */
+#define IFT_CNR 0x55 /* Combat Net Radio */
+#define IFT_ISO88025DTR 0x56 /* ISO 802.5r DTR */
+#define IFT_EPLRS 0x57 /* Ext Pos Loc Report Sys */
+#define IFT_ARAP 0x58 /* Appletalk Remote Access Protocol */
+#define IFT_PROPCNLS 0x59 /* Proprietary Connectionless Protocol*/
+#define IFT_HOSTPAD 0x5a /* CCITT-ITU X.29 PAD Protocol */
+#define IFT_TERMPAD 0x5b /* CCITT-ITU X.3 PAD Facility */
+#define IFT_FRAMERELAYMPI 0x5c /* Multiproto Interconnect over FR */
+#define IFT_X213 0x5d /* CCITT-ITU X213 */
+#define IFT_ADSL 0x5e /* Asymmetric Digital Subscriber Loop */
+#define IFT_RADSL 0x5f /* Rate-Adapt. Digital Subscriber Loop*/
+#define IFT_SDSL 0x60 /* Symmetric Digital Subscriber Loop */
+#define IFT_VDSL 0x61 /* Very H-Speed Digital Subscrib. Loop*/
+#define IFT_ISO88025CRFPINT 0x62 /* ISO 802.5 CRFP */
+#define IFT_MYRINET 0x63 /* Myricom Myrinet */
+#define IFT_VOICEEM 0x64 /* voice recEive and transMit */
+#define IFT_VOICEFXO 0x65 /* voice Foreign Exchange Office */
+#define IFT_VOICEFXS 0x66 /* voice Foreign Exchange Station */
+#define IFT_VOICEENCAP 0x67 /* voice encapsulation */
+#define IFT_VOICEOVERIP 0x68 /* voice over IP encapsulation */
+#define IFT_ATMDXI 0x69 /* ATM DXI */
+#define IFT_ATMFUNI 0x6a /* ATM FUNI */
+#define IFT_ATMIMA 0x6b /* ATM IMA */
+#define IFT_PPPMULTILINKBUNDLE 0x6c /* PPP Multilink Bundle */
+#define IFT_IPOVERCDLC 0x6d /* IBM ipOverCdlc */
+#define IFT_IPOVERCLAW 0x6e /* IBM Common Link Access to Workstn */
+#define IFT_STACKTOSTACK 0x6f /* IBM stackToStack */
+#define IFT_VIRTUALIPADDRESS 0x70 /* IBM VIPA */
+#define IFT_MPC 0x71 /* IBM multi-protocol channel support */
+#define IFT_IPOVERATM 0x72 /* IBM ipOverAtm */
+#define IFT_ISO88025FIBER 0x73 /* ISO 802.5j Fiber Token Ring */
+#define IFT_TDLC 0x74 /* IBM twinaxial data link control */
+#define IFT_GIGABITETHERNET 0x75 /* Gigabit Ethernet */
+#define IFT_HDLC 0x76 /* HDLC */
+#define IFT_LAPF 0x77 /* LAP F */
+#define IFT_V37 0x78 /* V.37 */
+#define IFT_X25MLP 0x79 /* Multi-Link Protocol */
+#define IFT_X25HUNTGROUP 0x7a /* X25 Hunt Group */
+#define IFT_TRANSPHDLC 0x7b /* Transp HDLC */
+#define IFT_INTERLEAVE 0x7c /* Interleave channel */
+#define IFT_FAST 0x7d /* Fast channel */
+#define IFT_IP 0x7e /* IP (for APPN HPR in IP networks) */
+#define IFT_DOCSCABLEMACLAYER 0x7f /* CATV Mac Layer */
+#define IFT_DOCSCABLEDOWNSTREAM 0x80 /* CATV Downstream interface */
+#define IFT_DOCSCABLEUPSTREAM 0x81 /* CATV Upstream interface */
+#define IFT_A12MPPSWITCH 0x82 /* Avalon Parallel Processor */
+#define IFT_TUNNEL 0x83 /* Encapsulation interface */
+#define IFT_COFFEE 0x84 /* coffee pot */
+#define IFT_CES 0x85 /* Circiut Emulation Service */
+#define IFT_ATMSUBINTERFACE 0x86 /* (x) ATM Sub Interface */
+#define IFT_L2VLAN 0x87 /* Layer 2 Virtual LAN using 802.1Q */
+#define IFT_L3IPVLAN 0x88 /* Layer 3 Virtual LAN - IP Protocol */
+#define IFT_L3IPXVLAN 0x89 /* Layer 3 Virtual LAN - IPX Prot. */
+#define IFT_DIGITALPOWERLINE 0x8a /* IP over Power Lines */
+#define IFT_MEDIAMAILOVERIP 0x8b /* (xxx) Multimedia Mail over IP */
+#define IFT_DTM 0x8c /* Dynamic synchronous Transfer Mode */
+#define IFT_DCN 0x8d /* Data Communications Network */
+#define IFT_IPFORWARD 0x8e /* IP Forwarding Interface */
+#define IFT_MSDSL 0x8f /* Multi-rate Symmetric DSL */
+#define IFT_IEEE1394 0x90 /* IEEE1394 High Performance SerialBus*/
+#define IFT_IFGSN 0x91 /* HIPPI-6400 */
+#define IFT_DVBRCCMACLAYER 0x92 /* DVB-RCC MAC Layer */
+#define IFT_DVBRCCDOWNSTREAM 0x93 /* DVB-RCC Downstream Channel */
+#define IFT_DVBRCCUPSTREAM 0x94 /* DVB-RCC Upstream Channel */
+#define IFT_ATMVIRTUAL 0x95 /* ATM Virtual Interface */
+#define IFT_MPLSTUNNEL 0x96 /* MPLS Tunnel Virtual Interface */
+#define IFT_SRP 0x97 /* Spatial Reuse Protocol */
+#define IFT_VOICEOVERATM 0x98 /* Voice over ATM */
+#define IFT_VOICEOVERFRAMERELAY 0x99 /* Voice Over Frame Relay */
+#define IFT_IDSL 0x9a /* Digital Subscriber Loop over ISDN */
+#define IFT_COMPOSITELINK 0x9b /* Avici Composite Link Interface */
+#define IFT_SS7SIGLINK 0x9c /* SS7 Signaling Link */
+#define IFT_PROPWIRELESSP2P 0x9d /* Prop. P2P wireless interface */
+#define IFT_FRFORWARD 0x9e /* Frame forward Interface */
+#define IFT_RFC1483 0x9f /* Multiprotocol over ATM AAL5 */
+#define IFT_USB 0xa0 /* USB Interface */
+#define IFT_IEEE8023ADLAG 0xa1 /* IEEE 802.3ad Link Aggregate*/
+#define IFT_BGPPOLICYACCOUNTING 0xa2 /* BGP Policy Accounting */
+#define IFT_FRF16MFRBUNDLE 0xa3 /* FRF.16 Multilik Frame Relay*/
+#define IFT_H323GATEKEEPER 0xa4 /* H323 Gatekeeper */
+#define IFT_H323PROXY 0xa5 /* H323 Voice and Video Proxy */
+#define IFT_MPLS 0xa6 /* MPLS */
+#define IFT_MFSIGLINK 0xa7 /* Multi-frequency signaling link */
+#define IFT_HDSL2 0xa8 /* High Bit-Rate DSL, 2nd gen. */
+#define IFT_SHDSL 0xa9 /* Multirate HDSL2 */
+#define IFT_DS1FDL 0xaa /* Facility Data Link (4Kbps) on a DS1*/
+#define IFT_POS 0xab /* Packet over SONET/SDH Interface */
+#define IFT_DVBASILN 0xac /* DVB-ASI Input */
+#define IFT_DVBASIOUT 0xad /* DVB-ASI Output */
+#define IFT_PLC 0xae /* Power Line Communications */
+#define IFT_NFAS 0xaf /* Non-Facility Associated Signaling */
+#define IFT_TR008 0xb0 /* TROO8 */
+#define IFT_GR303RDT 0xb1 /* Remote Digital Terminal */
+#define IFT_GR303IDT 0xb2 /* Integrated Digital Terminal */
+#define IFT_ISUP 0xb3 /* ISUP */
+#define IFT_PROPDOCSWIRELESSMACLAYER 0xb4 /* prop/Wireless MAC Layer */
+#define IFT_PROPDOCSWIRELESSDOWNSTREAM 0xb5 /* prop/Wireless Downstream */
+#define IFT_PROPDOCSWIRELESSUPSTREAM 0xb6 /* prop/Wireless Upstream */
+#define IFT_HIPERLAN2 0xb7 /* HIPERLAN Type 2 Radio Interface */
+#define IFT_PROPBWAP2MP 0xb8 /* PropBroadbandWirelessAccess P2MP*/
+#define IFT_SONETOVERHEADCHANNEL 0xb9 /* SONET Overhead Channel */
+#define IFT_DIGITALWRAPPEROVERHEADCHANNEL 0xba /* Digital Wrapper Overhead */
+#define IFT_AAL2 0xbb /* ATM adaptation layer 2 */
+#define IFT_RADIOMAC 0xbc /* MAC layer over radio links */
+#define IFT_ATMRADIO 0xbd /* ATM over radio links */
+#define IFT_IMT 0xbe /* Inter-Machine Trunks */
+#define IFT_MVL 0xbf /* Multiple Virtual Lines DSL */
+#define IFT_REACHDSL 0xc0 /* Long Reach DSL */
+#define IFT_FRDLCIENDPT 0xc1 /* Frame Relay DLCI End Point */
+#define IFT_ATMVCIENDPT 0xc2 /* ATM VCI End Point */
+#define IFT_OPTICALCHANNEL 0xc3 /* Optical Channel */
+#define IFT_OPTICALTRANSPORT 0xc4 /* Optical Transport */
+#define IFT_BRIDGE 0xd1 /* Transparent bridge interface */
+
+#define IFT_STF 0xd7 /* 6to4 interface */
+
+/* not based on IANA assignments */
+#define IFT_GIF 0xf0
+#define IFT_PVC 0xf1
+#define IFT_FAITH 0xf2
+#define IFT_ENC 0xf4
+#define IFT_PFLOG 0xf6
+#define IFT_PFSYNC 0xf7
+#define IFT_CARP 0xf8 /* Common Address Redundancy Protocol */
+#define IFT_IPXIP 0xf9 /* IPX over IP tunneling; no longer used. */
+#endif /* !_NET_IF_TYPES_HH_ */
diff --git a/rtems/freebsd/net/if_var.h b/rtems/freebsd/net/if_var.h
new file mode 100644
index 00000000..cc3d90e6
--- /dev/null
+++ b/rtems/freebsd/net/if_var.h
@@ -0,0 +1,904 @@
+/*-
+ * Copyright (c) 1982, 1986, 1989, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * From: @(#)if.h 8.1 (Berkeley) 6/10/93
+ * $FreeBSD$
+ */
+
+#ifndef _NET_IF_VAR_HH_
+#define _NET_IF_VAR_HH_
+
+/*
+ * Structures defining a network interface, providing a packet
+ * transport mechanism (ala level 0 of the PUP protocols).
+ *
+ * Each interface accepts output datagrams of a specified maximum
+ * length, and provides higher level routines with input datagrams
+ * received from its medium.
+ *
+ * Output occurs when the routine if_output is called, with three parameters:
+ * (*ifp->if_output)(ifp, m, dst, rt)
+ * Here m is the mbuf chain to be sent and dst is the destination address.
+ * The output routine encapsulates the supplied datagram if necessary,
+ * and then transmits it on its medium.
+ *
+ * On input, each interface unwraps the data received by it, and either
+ * places it on the input queue of an internetwork datagram routine
+ * and posts the associated software interrupt, or passes the datagram to a raw
+ * packet input routine.
+ *
+ * Routines exist for locating interfaces by their addresses
+ * or for locating an interface on a certain network, as well as more general
+ * routing and gateway routines maintaining information used to locate
+ * interfaces. These routines live in the files if.c and route.c
+ */
+
+#ifdef __STDC__
+/*
+ * Forward structure declarations for function prototypes [sic].
+ */
+struct mbuf;
+struct thread;
+struct rtentry;
+struct rt_addrinfo;
+struct socket;
+struct ether_header;
+struct carp_if;
+struct ifvlantrunk;
+struct route;
+struct vnet;
+#endif
+
+#include <rtems/freebsd/sys/queue.h> /* get TAILQ macros */
+
+#ifdef _KERNEL
+#include <rtems/freebsd/sys/mbuf.h>
+#include <rtems/freebsd/sys/eventhandler.h>
+#include <rtems/freebsd/sys/buf_ring.h>
+#include <rtems/freebsd/net/vnet.h>
+#endif /* _KERNEL */
+#include <rtems/freebsd/sys/lock.h> /* XXX */
+#include <rtems/freebsd/sys/mutex.h> /* XXX */
+#include <rtems/freebsd/sys/rwlock.h> /* XXX */
+#include <rtems/freebsd/sys/sx.h> /* XXX */
+#include <rtems/freebsd/sys/event.h> /* XXX */
+#include <rtems/freebsd/sys/_task.h>
+
+#define IF_DUNIT_NONE -1
+
+#include <rtems/freebsd/altq/if_altq.h>
+
+TAILQ_HEAD(ifnethead, ifnet); /* we use TAILQs so that the order of */
+TAILQ_HEAD(ifaddrhead, ifaddr); /* instantiation is preserved in the list */
+TAILQ_HEAD(ifprefixhead, ifprefix);
+TAILQ_HEAD(ifmultihead, ifmultiaddr);
+TAILQ_HEAD(ifgrouphead, ifg_group);
+
+/*
+ * Structure defining a queue for a network interface.
+ */
+struct ifqueue {
+ struct mbuf *ifq_head;
+ struct mbuf *ifq_tail;
+ int ifq_len;
+ int ifq_maxlen;
+ int ifq_drops;
+ struct mtx ifq_mtx;
+};
+
+/*
+ * Structure defining a network interface.
+ *
+ * (Would like to call this struct ``if'', but C isn't PL/1.)
+ */
+
+struct ifnet {
+ void *if_softc; /* pointer to driver state */
+ void *if_l2com; /* pointer to protocol bits */
+ struct vnet *if_vnet; /* pointer to network stack instance */
+ TAILQ_ENTRY(ifnet) if_link; /* all struct ifnets are chained */
+ char if_xname[IFNAMSIZ]; /* external name (name + unit) */
+ const char *if_dname; /* driver name */
+ int if_dunit; /* unit or IF_DUNIT_NONE */
+ u_int if_refcount; /* reference count */
+ struct ifaddrhead if_addrhead; /* linked list of addresses per if */
+ /*
+ * if_addrhead is the list of all addresses associated to
+ * an interface.
+ * Some code in the kernel assumes that first element
+ * of the list has type AF_LINK, and contains sockaddr_dl
+ * addresses which store the link-level address and the name
+ * of the interface.
+ * However, access to the AF_LINK address through this
+ * field is deprecated. Use if_addr or ifaddr_byindex() instead.
+ */
+ int if_pcount; /* number of promiscuous listeners */
+ struct carp_if *if_carp; /* carp interface structure */
+ struct bpf_if *if_bpf; /* packet filter structure */
+ u_short if_index; /* numeric abbreviation for this if */
+ short if_timer; /* time 'til if_watchdog called */
+ struct ifvlantrunk *if_vlantrunk; /* pointer to 802.1q data */
+ int if_flags; /* up/down, broadcast, etc. */
+ int if_capabilities; /* interface features & capabilities */
+ int if_capenable; /* enabled features & capabilities */
+ void *if_linkmib; /* link-type-specific MIB data */
+ size_t if_linkmiblen; /* length of above data */
+ struct if_data if_data;
+ struct ifmultihead if_multiaddrs; /* multicast addresses configured */
+ int if_amcount; /* number of all-multicast requests */
+/* procedure handles */
+ int (*if_output) /* output routine (enqueue) */
+ (struct ifnet *, struct mbuf *, struct sockaddr *,
+ struct route *);
+ void (*if_input) /* input routine (from h/w driver) */
+ (struct ifnet *, struct mbuf *);
+ void (*if_start) /* initiate output routine */
+ (struct ifnet *);
+ int (*if_ioctl) /* ioctl routine */
+ (struct ifnet *, u_long, caddr_t);
+ void (*if_watchdog) /* timer routine */
+ (struct ifnet *);
+ void (*if_init) /* Init routine */
+ (void *);
+ int (*if_resolvemulti) /* validate/resolve multicast */
+ (struct ifnet *, struct sockaddr **, struct sockaddr *);
+ void (*if_qflush) /* flush any queues */
+ (struct ifnet *);
+ int (*if_transmit) /* initiate output routine */
+ (struct ifnet *, struct mbuf *);
+ void (*if_reassign) /* reassign to vnet routine */
+ (struct ifnet *, struct vnet *, char *);
+ struct vnet *if_home_vnet; /* where this ifnet originates from */
+ struct ifaddr *if_addr; /* pointer to link-level address */
+ void *if_llsoftc; /* link layer softc */
+ int if_drv_flags; /* driver-managed status flags */
+ struct ifaltq if_snd; /* output queue (includes altq) */
+ const u_int8_t *if_broadcastaddr; /* linklevel broadcast bytestring */
+
+ void *if_bridge; /* bridge glue */
+
+ struct label *if_label; /* interface MAC label */
+
+ /* these are only used by IPv6 */
+ struct ifprefixhead if_prefixhead; /* list of prefixes per if */
+ void *if_afdata[AF_MAX];
+ int if_afdata_initialized;
+ struct rwlock if_afdata_lock;
+ struct task if_linktask; /* task for link change events */
+ struct mtx if_addr_mtx; /* mutex to protect address lists */
+
+ LIST_ENTRY(ifnet) if_clones; /* interfaces of a cloner */
+ TAILQ_HEAD(, ifg_list) if_groups; /* linked list of groups per if */
+ /* protected by if_addr_mtx */
+ void *if_pf_kif;
+ void *if_lagg; /* lagg glue */
+ u_char if_alloctype; /* if_type at time of allocation */
+
+ /*
+ * Spare fields are added so that we can modify sensitive data
+ * structures without changing the kernel binary interface, and must
+ * be used with care where binary compatibility is required.
+ */
+ char if_cspare[3];
+ char *if_description; /* interface description */
+ void *if_pspare[7];
+ int if_ispare[4];
+};
+
+typedef void if_init_f_t(void *);
+
+/*
+ * XXX These aliases are terribly dangerous because they could apply
+ * to anything.
+ */
+#define if_mtu if_data.ifi_mtu
+#define if_type if_data.ifi_type
+#define if_physical if_data.ifi_physical
+#define if_addrlen if_data.ifi_addrlen
+#define if_hdrlen if_data.ifi_hdrlen
+#define if_metric if_data.ifi_metric
+#define if_link_state if_data.ifi_link_state
+#define if_baudrate if_data.ifi_baudrate
+#define if_hwassist if_data.ifi_hwassist
+#define if_ipackets if_data.ifi_ipackets
+#define if_ierrors if_data.ifi_ierrors
+#define if_opackets if_data.ifi_opackets
+#define if_oerrors if_data.ifi_oerrors
+#define if_collisions if_data.ifi_collisions
+#define if_ibytes if_data.ifi_ibytes
+#define if_obytes if_data.ifi_obytes
+#define if_imcasts if_data.ifi_imcasts
+#define if_omcasts if_data.ifi_omcasts
+#define if_iqdrops if_data.ifi_iqdrops
+#define if_noproto if_data.ifi_noproto
+#define if_lastchange if_data.ifi_lastchange
+
+/* for compatibility with other BSDs */
+#define if_addrlist if_addrhead
+#define if_list if_link
+#define if_name(ifp) ((ifp)->if_xname)
+
+/*
+ * Locks for address lists on the network interface.
+ */
+#define IF_ADDR_LOCK_INIT(if) mtx_init(&(if)->if_addr_mtx, \
+ "if_addr_mtx", NULL, MTX_DEF)
+#define IF_ADDR_LOCK_DESTROY(if) mtx_destroy(&(if)->if_addr_mtx)
+#define IF_ADDR_LOCK(if) mtx_lock(&(if)->if_addr_mtx)
+#define IF_ADDR_UNLOCK(if) mtx_unlock(&(if)->if_addr_mtx)
+#define IF_ADDR_LOCK_ASSERT(if) mtx_assert(&(if)->if_addr_mtx, MA_OWNED)
+
+/*
+ * Function variations on locking macros intended to be used by loadable
+ * kernel modules in order to divorce them from the internals of address list
+ * locking.
+ */
+void if_addr_rlock(struct ifnet *ifp); /* if_addrhead */
+void if_addr_runlock(struct ifnet *ifp); /* if_addrhead */
+void if_maddr_rlock(struct ifnet *ifp); /* if_multiaddrs */
+void if_maddr_runlock(struct ifnet *ifp); /* if_multiaddrs */
+
+/*
+ * Output queues (ifp->if_snd) and slow device input queues (*ifp->if_slowq)
+ * are queues of messages stored on ifqueue structures
+ * (defined above). Entries are added to and deleted from these structures
+ * by these macros, which should be called with ipl raised to splimp().
+ */
+#define IF_LOCK(ifq) mtx_lock(&(ifq)->ifq_mtx)
+#define IF_UNLOCK(ifq) mtx_unlock(&(ifq)->ifq_mtx)
+#define IF_LOCK_ASSERT(ifq) mtx_assert(&(ifq)->ifq_mtx, MA_OWNED)
+#define _IF_QFULL(ifq) ((ifq)->ifq_len >= (ifq)->ifq_maxlen)
+#define _IF_DROP(ifq) ((ifq)->ifq_drops++)
+#define _IF_QLEN(ifq) ((ifq)->ifq_len)
+
+#define _IF_ENQUEUE(ifq, m) do { \
+ (m)->m_nextpkt = NULL; \
+ if ((ifq)->ifq_tail == NULL) \
+ (ifq)->ifq_head = m; \
+ else \
+ (ifq)->ifq_tail->m_nextpkt = m; \
+ (ifq)->ifq_tail = m; \
+ (ifq)->ifq_len++; \
+} while (0)
+
+#define IF_ENQUEUE(ifq, m) do { \
+ IF_LOCK(ifq); \
+ _IF_ENQUEUE(ifq, m); \
+ IF_UNLOCK(ifq); \
+} while (0)
+
+#define _IF_PREPEND(ifq, m) do { \
+ (m)->m_nextpkt = (ifq)->ifq_head; \
+ if ((ifq)->ifq_tail == NULL) \
+ (ifq)->ifq_tail = (m); \
+ (ifq)->ifq_head = (m); \
+ (ifq)->ifq_len++; \
+} while (0)
+
+#define IF_PREPEND(ifq, m) do { \
+ IF_LOCK(ifq); \
+ _IF_PREPEND(ifq, m); \
+ IF_UNLOCK(ifq); \
+} while (0)
+
+#define _IF_DEQUEUE(ifq, m) do { \
+ (m) = (ifq)->ifq_head; \
+ if (m) { \
+ if (((ifq)->ifq_head = (m)->m_nextpkt) == NULL) \
+ (ifq)->ifq_tail = NULL; \
+ (m)->m_nextpkt = NULL; \
+ (ifq)->ifq_len--; \
+ } \
+} while (0)
+
+#define IF_DEQUEUE(ifq, m) do { \
+ IF_LOCK(ifq); \
+ _IF_DEQUEUE(ifq, m); \
+ IF_UNLOCK(ifq); \
+} while (0)
+
+#define _IF_POLL(ifq, m) ((m) = (ifq)->ifq_head)
+#define IF_POLL(ifq, m) _IF_POLL(ifq, m)
+
+#define _IF_DRAIN(ifq) do { \
+ struct mbuf *m; \
+ for (;;) { \
+ _IF_DEQUEUE(ifq, m); \
+ if (m == NULL) \
+ break; \
+ m_freem(m); \
+ } \
+} while (0)
+
+#define IF_DRAIN(ifq) do { \
+ IF_LOCK(ifq); \
+ _IF_DRAIN(ifq); \
+ IF_UNLOCK(ifq); \
+} while(0)
+
+#ifdef _KERNEL
+/* interface link layer address change event */
+typedef void (*iflladdr_event_handler_t)(void *, struct ifnet *);
+EVENTHANDLER_DECLARE(iflladdr_event, iflladdr_event_handler_t);
+/* interface address change event */
+typedef void (*ifaddr_event_handler_t)(void *, struct ifnet *);
+EVENTHANDLER_DECLARE(ifaddr_event, ifaddr_event_handler_t);
+/* new interface arrival event */
+typedef void (*ifnet_arrival_event_handler_t)(void *, struct ifnet *);
+EVENTHANDLER_DECLARE(ifnet_arrival_event, ifnet_arrival_event_handler_t);
+/* interface departure event */
+typedef void (*ifnet_departure_event_handler_t)(void *, struct ifnet *);
+EVENTHANDLER_DECLARE(ifnet_departure_event, ifnet_departure_event_handler_t);
+
+/*
+ * interface groups
+ */
+struct ifg_group {
+ char ifg_group[IFNAMSIZ];
+ u_int ifg_refcnt;
+ void *ifg_pf_kif;
+ TAILQ_HEAD(, ifg_member) ifg_members;
+ TAILQ_ENTRY(ifg_group) ifg_next;
+};
+
+struct ifg_member {
+ TAILQ_ENTRY(ifg_member) ifgm_next;
+ struct ifnet *ifgm_ifp;
+};
+
+struct ifg_list {
+ struct ifg_group *ifgl_group;
+ TAILQ_ENTRY(ifg_list) ifgl_next;
+};
+
+/* group attach event */
+typedef void (*group_attach_event_handler_t)(void *, struct ifg_group *);
+EVENTHANDLER_DECLARE(group_attach_event, group_attach_event_handler_t);
+/* group detach event */
+typedef void (*group_detach_event_handler_t)(void *, struct ifg_group *);
+EVENTHANDLER_DECLARE(group_detach_event, group_detach_event_handler_t);
+/* group change event */
+typedef void (*group_change_event_handler_t)(void *, const char *);
+EVENTHANDLER_DECLARE(group_change_event, group_change_event_handler_t);
+
+#define IF_AFDATA_LOCK_INIT(ifp) \
+ rw_init(&(ifp)->if_afdata_lock, "if_afdata")
+
+#define IF_AFDATA_WLOCK(ifp) rw_wlock(&(ifp)->if_afdata_lock)
+#define IF_AFDATA_RLOCK(ifp) rw_rlock(&(ifp)->if_afdata_lock)
+#define IF_AFDATA_WUNLOCK(ifp) rw_wunlock(&(ifp)->if_afdata_lock)
+#define IF_AFDATA_RUNLOCK(ifp) rw_runlock(&(ifp)->if_afdata_lock)
+#define IF_AFDATA_LOCK(ifp) IF_AFDATA_WLOCK(ifp)
+#define IF_AFDATA_UNLOCK(ifp) IF_AFDATA_WUNLOCK(ifp)
+#define IF_AFDATA_TRYLOCK(ifp) rw_try_wlock(&(ifp)->if_afdata_lock)
+#define IF_AFDATA_DESTROY(ifp) rw_destroy(&(ifp)->if_afdata_lock)
+
+#define IF_AFDATA_LOCK_ASSERT(ifp) rw_assert(&(ifp)->if_afdata_lock, RA_LOCKED)
+#define IF_AFDATA_UNLOCK_ASSERT(ifp) rw_assert(&(ifp)->if_afdata_lock, RA_UNLOCKED)
+
+int if_handoff(struct ifqueue *ifq, struct mbuf *m, struct ifnet *ifp,
+ int adjust);
+#define IF_HANDOFF(ifq, m, ifp) \
+ if_handoff((struct ifqueue *)ifq, m, ifp, 0)
+#define IF_HANDOFF_ADJ(ifq, m, ifp, adj) \
+ if_handoff((struct ifqueue *)ifq, m, ifp, adj)
+
+void if_start(struct ifnet *);
+
+#define IFQ_ENQUEUE(ifq, m, err) \
+do { \
+ IF_LOCK(ifq); \
+ if (ALTQ_IS_ENABLED(ifq)) \
+ ALTQ_ENQUEUE(ifq, m, NULL, err); \
+ else { \
+ if (_IF_QFULL(ifq)) { \
+ m_freem(m); \
+ (err) = ENOBUFS; \
+ } else { \
+ _IF_ENQUEUE(ifq, m); \
+ (err) = 0; \
+ } \
+ } \
+ if (err) \
+ (ifq)->ifq_drops++; \
+ IF_UNLOCK(ifq); \
+} while (0)
+
+#define IFQ_DEQUEUE_NOLOCK(ifq, m) \
+do { \
+ if (TBR_IS_ENABLED(ifq)) \
+ (m) = tbr_dequeue_ptr(ifq, ALTDQ_REMOVE); \
+ else if (ALTQ_IS_ENABLED(ifq)) \
+ ALTQ_DEQUEUE(ifq, m); \
+ else \
+ _IF_DEQUEUE(ifq, m); \
+} while (0)
+
+#define IFQ_DEQUEUE(ifq, m) \
+do { \
+ IF_LOCK(ifq); \
+ IFQ_DEQUEUE_NOLOCK(ifq, m); \
+ IF_UNLOCK(ifq); \
+} while (0)
+
+#define IFQ_POLL_NOLOCK(ifq, m) \
+do { \
+ if (TBR_IS_ENABLED(ifq)) \
+ (m) = tbr_dequeue_ptr(ifq, ALTDQ_POLL); \
+ else if (ALTQ_IS_ENABLED(ifq)) \
+ ALTQ_POLL(ifq, m); \
+ else \
+ _IF_POLL(ifq, m); \
+} while (0)
+
+#define IFQ_POLL(ifq, m) \
+do { \
+ IF_LOCK(ifq); \
+ IFQ_POLL_NOLOCK(ifq, m); \
+ IF_UNLOCK(ifq); \
+} while (0)
+
+#define IFQ_PURGE_NOLOCK(ifq) \
+do { \
+ if (ALTQ_IS_ENABLED(ifq)) { \
+ ALTQ_PURGE(ifq); \
+ } else \
+ _IF_DRAIN(ifq); \
+} while (0)
+
+#define IFQ_PURGE(ifq) \
+do { \
+ IF_LOCK(ifq); \
+ IFQ_PURGE_NOLOCK(ifq); \
+ IF_UNLOCK(ifq); \
+} while (0)
+
+#define IFQ_SET_READY(ifq) \
+ do { ((ifq)->altq_flags |= ALTQF_READY); } while (0)
+
+#define IFQ_LOCK(ifq) IF_LOCK(ifq)
+#define IFQ_UNLOCK(ifq) IF_UNLOCK(ifq)
+#define IFQ_LOCK_ASSERT(ifq) IF_LOCK_ASSERT(ifq)
+#define IFQ_IS_EMPTY(ifq) ((ifq)->ifq_len == 0)
+#define IFQ_INC_LEN(ifq) ((ifq)->ifq_len++)
+#define IFQ_DEC_LEN(ifq) (--(ifq)->ifq_len)
+#define IFQ_INC_DROPS(ifq) ((ifq)->ifq_drops++)
+#define IFQ_SET_MAXLEN(ifq, len) ((ifq)->ifq_maxlen = (len))
+
+/*
+ * The IFF_DRV_OACTIVE test should really occur in the device driver, not in
+ * the handoff logic, as that flag is locked by the device driver.
+ */
+#define IFQ_HANDOFF_ADJ(ifp, m, adj, err) \
+do { \
+ int len; \
+ short mflags; \
+ \
+ len = (m)->m_pkthdr.len; \
+ mflags = (m)->m_flags; \
+ IFQ_ENQUEUE(&(ifp)->if_snd, m, err); \
+ if ((err) == 0) { \
+ (ifp)->if_obytes += len + (adj); \
+ if (mflags & M_MCAST) \
+ (ifp)->if_omcasts++; \
+ if (((ifp)->if_drv_flags & IFF_DRV_OACTIVE) == 0) \
+ if_start(ifp); \
+ } \
+} while (0)
+
+#define IFQ_HANDOFF(ifp, m, err) \
+ IFQ_HANDOFF_ADJ(ifp, m, 0, err)
+
+#define IFQ_DRV_DEQUEUE(ifq, m) \
+do { \
+ (m) = (ifq)->ifq_drv_head; \
+ if (m) { \
+ if (((ifq)->ifq_drv_head = (m)->m_nextpkt) == NULL) \
+ (ifq)->ifq_drv_tail = NULL; \
+ (m)->m_nextpkt = NULL; \
+ (ifq)->ifq_drv_len--; \
+ } else { \
+ IFQ_LOCK(ifq); \
+ IFQ_DEQUEUE_NOLOCK(ifq, m); \
+ while ((ifq)->ifq_drv_len < (ifq)->ifq_drv_maxlen) { \
+ struct mbuf *m0; \
+ IFQ_DEQUEUE_NOLOCK(ifq, m0); \
+ if (m0 == NULL) \
+ break; \
+ m0->m_nextpkt = NULL; \
+ if ((ifq)->ifq_drv_tail == NULL) \
+ (ifq)->ifq_drv_head = m0; \
+ else \
+ (ifq)->ifq_drv_tail->m_nextpkt = m0; \
+ (ifq)->ifq_drv_tail = m0; \
+ (ifq)->ifq_drv_len++; \
+ } \
+ IFQ_UNLOCK(ifq); \
+ } \
+} while (0)
+
+#define IFQ_DRV_PREPEND(ifq, m) \
+do { \
+ (m)->m_nextpkt = (ifq)->ifq_drv_head; \
+ if ((ifq)->ifq_drv_tail == NULL) \
+ (ifq)->ifq_drv_tail = (m); \
+ (ifq)->ifq_drv_head = (m); \
+ (ifq)->ifq_drv_len++; \
+} while (0)
+
+#define IFQ_DRV_IS_EMPTY(ifq) \
+ (((ifq)->ifq_drv_len == 0) && ((ifq)->ifq_len == 0))
+
+#define IFQ_DRV_PURGE(ifq) \
+do { \
+ struct mbuf *m, *n = (ifq)->ifq_drv_head; \
+ while((m = n) != NULL) { \
+ n = m->m_nextpkt; \
+ m_freem(m); \
+ } \
+ (ifq)->ifq_drv_head = (ifq)->ifq_drv_tail = NULL; \
+ (ifq)->ifq_drv_len = 0; \
+ IFQ_PURGE(ifq); \
+} while (0)
+
+#ifdef _KERNEL
+static __inline void
+drbr_stats_update(struct ifnet *ifp, int len, int mflags)
+{
+#ifndef NO_SLOW_STATS
+ ifp->if_obytes += len;
+ if (mflags & M_MCAST)
+ ifp->if_omcasts++;
+#endif
+}
+
+static __inline int
+drbr_enqueue(struct ifnet *ifp, struct buf_ring *br, struct mbuf *m)
+{
+ int error = 0;
+ int len = m->m_pkthdr.len;
+ int mflags = m->m_flags;
+
+#ifdef ALTQ
+ if (ALTQ_IS_ENABLED(&ifp->if_snd)) {
+ IFQ_ENQUEUE(&ifp->if_snd, m, error);
+ return (error);
+ }
+#endif
+ if ((error = buf_ring_enqueue_bytes(br, m, len)) == ENOBUFS) {
+ br->br_drops++;
+ m_freem(m);
+ } else
+ drbr_stats_update(ifp, len, mflags);
+
+ return (error);
+}
+
+static __inline void
+drbr_flush(struct ifnet *ifp, struct buf_ring *br)
+{
+ struct mbuf *m;
+
+#ifdef ALTQ
+ if (ifp != NULL && ALTQ_IS_ENABLED(&ifp->if_snd))
+ IFQ_PURGE(&ifp->if_snd);
+#endif
+ while ((m = buf_ring_dequeue_sc(br)) != NULL)
+ m_freem(m);
+}
+
+static __inline void
+drbr_free(struct buf_ring *br, struct malloc_type *type)
+{
+
+ drbr_flush(NULL, br);
+ buf_ring_free(br, type);
+}
+
+static __inline struct mbuf *
+drbr_dequeue(struct ifnet *ifp, struct buf_ring *br)
+{
+#ifdef ALTQ
+ struct mbuf *m;
+
+ if (ALTQ_IS_ENABLED(&ifp->if_snd)) {
+ IFQ_DEQUEUE(&ifp->if_snd, m);
+ return (m);
+ }
+#endif
+ return (buf_ring_dequeue_sc(br));
+}
+
+static __inline struct mbuf *
+drbr_dequeue_cond(struct ifnet *ifp, struct buf_ring *br,
+ int (*func) (struct mbuf *, void *), void *arg)
+{
+ struct mbuf *m;
+#ifdef ALTQ
+ if (ALTQ_IS_ENABLED(&ifp->if_snd)) {
+ IFQ_LOCK(&ifp->if_snd);
+ IFQ_POLL_NOLOCK(&ifp->if_snd, m);
+ if (m != NULL && func(m, arg) == 0) {
+ IFQ_UNLOCK(&ifp->if_snd);
+ return (NULL);
+ }
+ IFQ_DEQUEUE_NOLOCK(&ifp->if_snd, m);
+ IFQ_UNLOCK(&ifp->if_snd);
+ return (m);
+ }
+#endif
+ m = buf_ring_peek(br);
+ if (m == NULL || func(m, arg) == 0)
+ return (NULL);
+
+ return (buf_ring_dequeue_sc(br));
+}
+
+static __inline int
+drbr_empty(struct ifnet *ifp, struct buf_ring *br)
+{
+#ifdef ALTQ
+ if (ALTQ_IS_ENABLED(&ifp->if_snd))
+ return (IFQ_IS_EMPTY(&ifp->if_snd));
+#endif
+ return (buf_ring_empty(br));
+}
+
+static __inline int
+drbr_needs_enqueue(struct ifnet *ifp, struct buf_ring *br)
+{
+#ifdef ALTQ
+ if (ALTQ_IS_ENABLED(&ifp->if_snd))
+ return (1);
+#endif
+ return (!buf_ring_empty(br));
+}
+
+static __inline int
+drbr_inuse(struct ifnet *ifp, struct buf_ring *br)
+{
+#ifdef ALTQ
+ if (ALTQ_IS_ENABLED(&ifp->if_snd))
+ return (ifp->if_snd.ifq_len);
+#endif
+ return (buf_ring_count(br));
+}
+#endif
+/*
+ * 72 was chosen below because it is the size of a TCP/IP
+ * header (40) + the minimum mss (32).
+ */
+#define IF_MINMTU 72
+#define IF_MAXMTU 65535
+
+#endif /* _KERNEL */
+
+/*
+ * The ifaddr structure contains information about one address
+ * of an interface. They are maintained by the different address families,
+ * are allocated and attached when an address is set, and are linked
+ * together so all addresses for an interface can be located.
+ *
+ * NOTE: a 'struct ifaddr' is always at the beginning of a larger
+ * chunk of malloc'ed memory, where we store the three addresses
+ * (ifa_addr, ifa_dstaddr and ifa_netmask) referenced here.
+ */
+struct ifaddr {
+ struct sockaddr *ifa_addr; /* address of interface */
+ struct sockaddr *ifa_dstaddr; /* other end of p-to-p link */
+#define ifa_broadaddr ifa_dstaddr /* broadcast address interface */
+ struct sockaddr *ifa_netmask; /* used to determine subnet */
+ struct if_data if_data; /* not all members are meaningful */
+ struct ifnet *ifa_ifp; /* back-pointer to interface */
+ TAILQ_ENTRY(ifaddr) ifa_link; /* queue macro glue */
+ void (*ifa_rtrequest) /* check or clean routes (+ or -)'d */
+ (int, struct rtentry *, struct rt_addrinfo *);
+ u_short ifa_flags; /* mostly rt_flags for cloning */
+ u_int ifa_refcnt; /* references to this structure */
+ int ifa_metric; /* cost of going out this interface */
+ int (*ifa_claim_addr) /* check if an addr goes to this if */
+ (struct ifaddr *, struct sockaddr *);
+ struct mtx ifa_mtx;
+};
+#define IFA_ROUTE RTF_UP /* route installed */
+#define IFA_RTSELF RTF_HOST /* loopback route to self installed */
+
+/* for compatibility with other BSDs */
+#define ifa_list ifa_link
+
+#ifdef _KERNEL
+#define IFA_LOCK(ifa) mtx_lock(&(ifa)->ifa_mtx)
+#define IFA_UNLOCK(ifa) mtx_unlock(&(ifa)->ifa_mtx)
+
+void ifa_free(struct ifaddr *ifa);
+void ifa_init(struct ifaddr *ifa);
+void ifa_ref(struct ifaddr *ifa);
+#endif
+
+/*
+ * The prefix structure contains information about one prefix
+ * of an interface. They are maintained by the different address families,
+ * are allocated and attached when a prefix or an address is set,
+ * and are linked together so all prefixes for an interface can be located.
+ */
+struct ifprefix {
+ struct sockaddr *ifpr_prefix; /* prefix of interface */
+ struct ifnet *ifpr_ifp; /* back-pointer to interface */
+ TAILQ_ENTRY(ifprefix) ifpr_list; /* queue macro glue */
+ u_char ifpr_plen; /* prefix length in bits */
+ u_char ifpr_type; /* protocol dependent prefix type */
+};
+
+/*
+ * Multicast address structure. This is analogous to the ifaddr
+ * structure except that it keeps track of multicast addresses.
+ */
+struct ifmultiaddr {
+ TAILQ_ENTRY(ifmultiaddr) ifma_link; /* queue macro glue */
+ struct sockaddr *ifma_addr; /* address this membership is for */
+ struct sockaddr *ifma_lladdr; /* link-layer translation, if any */
+ struct ifnet *ifma_ifp; /* back-pointer to interface */
+ u_int ifma_refcount; /* reference count */
+ void *ifma_protospec; /* protocol-specific state, if any */
+ struct ifmultiaddr *ifma_llifma; /* pointer to ifma for ifma_lladdr */
+};
+
+#ifdef _KERNEL
+
+extern struct rwlock ifnet_rwlock;
+extern struct sx ifnet_sxlock;
+
+#define IFNET_LOCK_INIT() do { \
+ rw_init_flags(&ifnet_rwlock, "ifnet_rw", RW_RECURSE); \
+ sx_init_flags(&ifnet_sxlock, "ifnet_sx", SX_RECURSE); \
+} while(0)
+
+#define IFNET_WLOCK() do { \
+ sx_xlock(&ifnet_sxlock); \
+ rw_wlock(&ifnet_rwlock); \
+} while (0)
+
+#define IFNET_WUNLOCK() do { \
+ rw_wunlock(&ifnet_rwlock); \
+ sx_xunlock(&ifnet_sxlock); \
+} while (0)
+
+/*
+ * To assert the ifnet lock, you must know not only whether it's for read or
+ * write, but also whether it was acquired with sleep support or not.
+ */
+#define IFNET_RLOCK_ASSERT() sx_assert(&ifnet_sxlock, SA_SLOCKED)
+#define IFNET_RLOCK_NOSLEEP_ASSERT() rw_assert(&ifnet_rwlock, RA_RLOCKED)
+#define IFNET_WLOCK_ASSERT() do { \
+ sx_assert(&ifnet_sxlock, SA_XLOCKED); \
+ rw_assert(&ifnet_rwlock, RA_WLOCKED); \
+} while (0)
+
+#define IFNET_RLOCK() sx_slock(&ifnet_sxlock)
+#define IFNET_RLOCK_NOSLEEP() rw_rlock(&ifnet_rwlock)
+#define IFNET_RUNLOCK() sx_sunlock(&ifnet_sxlock)
+#define IFNET_RUNLOCK_NOSLEEP() rw_runlock(&ifnet_rwlock)
+
+/*
+ * Look up an ifnet given its index; the _ref variant also acquires a
+ * reference that must be freed using if_rele(). It is almost always a bug
+ * to call ifnet_byindex() instead if ifnet_byindex_ref().
+ */
+struct ifnet *ifnet_byindex(u_short idx);
+struct ifnet *ifnet_byindex_locked(u_short idx);
+struct ifnet *ifnet_byindex_ref(u_short idx);
+
+/*
+ * Given the index, ifaddr_byindex() returns the one and only
+ * link-level ifaddr for the interface. You are not supposed to use
+ * it to traverse the list of addresses associated to the interface.
+ */
+struct ifaddr *ifaddr_byindex(u_short idx);
+
+VNET_DECLARE(struct ifnethead, ifnet);
+VNET_DECLARE(struct ifgrouphead, ifg_head);
+VNET_DECLARE(int, if_index);
+VNET_DECLARE(struct ifnet *, loif); /* first loopback interface */
+VNET_DECLARE(int, useloopback);
+
+#define V_ifnet VNET(ifnet)
+#define V_ifg_head VNET(ifg_head)
+#define V_if_index VNET(if_index)
+#define V_loif VNET(loif)
+#define V_useloopback VNET(useloopback)
+
+extern int ifqmaxlen;
+
+int if_addgroup(struct ifnet *, const char *);
+int if_delgroup(struct ifnet *, const char *);
+int if_addmulti(struct ifnet *, struct sockaddr *, struct ifmultiaddr **);
+int if_allmulti(struct ifnet *, int);
+struct ifnet* if_alloc(u_char);
+void if_attach(struct ifnet *);
+void if_dead(struct ifnet *);
+int if_delmulti(struct ifnet *, struct sockaddr *);
+void if_delmulti_ifma(struct ifmultiaddr *);
+void if_detach(struct ifnet *);
+void if_vmove(struct ifnet *, struct vnet *);
+void if_purgeaddrs(struct ifnet *);
+void if_delallmulti(struct ifnet *);
+void if_down(struct ifnet *);
+struct ifmultiaddr *
+ if_findmulti(struct ifnet *, struct sockaddr *);
+void if_free(struct ifnet *);
+void if_free_type(struct ifnet *, u_char);
+void if_initname(struct ifnet *, const char *, int);
+void if_link_state_change(struct ifnet *, int);
+int if_printf(struct ifnet *, const char *, ...) __printflike(2, 3);
+void if_qflush(struct ifnet *);
+void if_ref(struct ifnet *);
+void if_rele(struct ifnet *);
+int if_setlladdr(struct ifnet *, const u_char *, int);
+void if_up(struct ifnet *);
+int ifioctl(struct socket *, u_long, caddr_t, struct thread *);
+int ifpromisc(struct ifnet *, int);
+struct ifnet *ifunit(const char *);
+struct ifnet *ifunit_ref(const char *);
+
+void ifq_init(struct ifaltq *, struct ifnet *ifp);
+void ifq_delete(struct ifaltq *);
+
+int ifa_add_loopback_route(struct ifaddr *, struct sockaddr *);
+int ifa_del_loopback_route(struct ifaddr *, struct sockaddr *);
+
+struct ifaddr *ifa_ifwithaddr(struct sockaddr *);
+int ifa_ifwithaddr_check(struct sockaddr *);
+struct ifaddr *ifa_ifwithbroadaddr(struct sockaddr *);
+struct ifaddr *ifa_ifwithdstaddr(struct sockaddr *);
+struct ifaddr *ifa_ifwithnet(struct sockaddr *, int);
+struct ifaddr *ifa_ifwithroute(int, struct sockaddr *, struct sockaddr *);
+struct ifaddr *ifa_ifwithroute_fib(int, struct sockaddr *, struct sockaddr *, u_int);
+
+struct ifaddr *ifaof_ifpforaddr(struct sockaddr *, struct ifnet *);
+
+int if_simloop(struct ifnet *ifp, struct mbuf *m, int af, int hlen);
+
+typedef void *if_com_alloc_t(u_char type, struct ifnet *ifp);
+typedef void if_com_free_t(void *com, u_char type);
+void if_register_com_alloc(u_char type, if_com_alloc_t *a, if_com_free_t *f);
+void if_deregister_com_alloc(u_char type);
+
+#define IF_LLADDR(ifp) \
+ LLADDR((struct sockaddr_dl *)((ifp)->if_addr->ifa_addr))
+
+#ifdef DEVICE_POLLING
+enum poll_cmd { POLL_ONLY, POLL_AND_CHECK_STATUS };
+
+typedef int poll_handler_t(struct ifnet *ifp, enum poll_cmd cmd, int count);
+int ether_poll_register(poll_handler_t *h, struct ifnet *ifp);
+int ether_poll_deregister(struct ifnet *ifp);
+#endif /* DEVICE_POLLING */
+
+#endif /* _KERNEL */
+
+#endif /* !_NET_IF_VAR_HH_ */
diff --git a/rtems/freebsd/net/if_vlan.c b/rtems/freebsd/net/if_vlan.c
new file mode 100644
index 00000000..f37f1186
--- /dev/null
+++ b/rtems/freebsd/net/if_vlan.c
@@ -0,0 +1,1538 @@
+#include <rtems/freebsd/machine/rtems-bsd-config.h>
+
+/*-
+ * Copyright 1998 Massachusetts Institute of Technology
+ *
+ * Permission to use, copy, modify, and distribute this software and
+ * its documentation for any purpose and without fee is hereby
+ * granted, provided that both the above copyright notice and this
+ * permission notice appear in all copies, that both the above
+ * copyright notice and this permission notice appear in all
+ * supporting documentation, and that the name of M.I.T. not be used
+ * in advertising or publicity pertaining to distribution of the
+ * software without specific, written prior permission. M.I.T. makes
+ * no representations about the suitability of this software for any
+ * purpose. It is provided "as is" without express or implied
+ * warranty.
+ *
+ * THIS SOFTWARE IS PROVIDED BY M.I.T. ``AS IS''. M.I.T. DISCLAIMS
+ * ALL EXPRESS OR IMPLIED WARRANTIES WITH REGARD TO THIS SOFTWARE,
+ * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. IN NO EVENT
+ * SHALL M.I.T. BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
+ * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+ * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
+ * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+/*
+ * if_vlan.c - pseudo-device driver for IEEE 802.1Q virtual LANs.
+ * Might be extended some day to also handle IEEE 802.1p priority
+ * tagging. This is sort of sneaky in the implementation, since
+ * we need to pretend to be enough of an Ethernet implementation
+ * to make arp work. The way we do this is by telling everyone
+ * that we are an Ethernet, and then catch the packets that
+ * ether_output() left on our output queue when it calls
+ * if_start(), rewrite them for use by the real outgoing interface,
+ * and ask it to send them.
+ */
+
+#include <rtems/freebsd/sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <rtems/freebsd/local/opt_vlan.h>
+
+#include <rtems/freebsd/sys/param.h>
+#include <rtems/freebsd/sys/kernel.h>
+#include <rtems/freebsd/sys/lock.h>
+#include <rtems/freebsd/sys/malloc.h>
+#include <rtems/freebsd/sys/mbuf.h>
+#include <rtems/freebsd/sys/module.h>
+#include <rtems/freebsd/sys/rwlock.h>
+#include <rtems/freebsd/sys/queue.h>
+#include <rtems/freebsd/sys/socket.h>
+#include <rtems/freebsd/sys/sockio.h>
+#include <rtems/freebsd/sys/sysctl.h>
+#include <rtems/freebsd/sys/systm.h>
+
+#include <rtems/freebsd/net/bpf.h>
+#include <rtems/freebsd/net/ethernet.h>
+#include <rtems/freebsd/net/if.h>
+#include <rtems/freebsd/net/if_clone.h>
+#include <rtems/freebsd/net/if_dl.h>
+#include <rtems/freebsd/net/if_types.h>
+#include <rtems/freebsd/net/if_vlan_var.h>
+#include <rtems/freebsd/net/vnet.h>
+
+#define VLANNAME "vlan"
+#define VLAN_DEF_HWIDTH 4
+#define VLAN_IFFLAGS (IFF_BROADCAST | IFF_MULTICAST)
+
+#define UP_AND_RUNNING(ifp) \
+ ((ifp)->if_flags & IFF_UP && (ifp)->if_drv_flags & IFF_DRV_RUNNING)
+
+LIST_HEAD(ifvlanhead, ifvlan);
+
+struct ifvlantrunk {
+ struct ifnet *parent; /* parent interface of this trunk */
+ struct rwlock rw;
+#ifdef VLAN_ARRAY
+#define VLAN_ARRAY_SIZE (EVL_VLID_MASK + 1)
+ struct ifvlan *vlans[VLAN_ARRAY_SIZE]; /* static table */
+#else
+ struct ifvlanhead *hash; /* dynamic hash-list table */
+ uint16_t hmask;
+ uint16_t hwidth;
+#endif
+ int refcnt;
+};
+
+struct vlan_mc_entry {
+ struct ether_addr mc_addr;
+ SLIST_ENTRY(vlan_mc_entry) mc_entries;
+};
+
+struct ifvlan {
+ struct ifvlantrunk *ifv_trunk;
+ struct ifnet *ifv_ifp;
+#define TRUNK(ifv) ((ifv)->ifv_trunk)
+#define PARENT(ifv) ((ifv)->ifv_trunk->parent)
+ int ifv_pflags; /* special flags we have set on parent */
+ struct ifv_linkmib {
+ int ifvm_encaplen; /* encapsulation length */
+ int ifvm_mtufudge; /* MTU fudged by this much */
+ int ifvm_mintu; /* min transmission unit */
+ uint16_t ifvm_proto; /* encapsulation ethertype */
+ uint16_t ifvm_tag; /* tag to apply on packets leaving if */
+ } ifv_mib;
+ SLIST_HEAD(, vlan_mc_entry) vlan_mc_listhead;
+#ifndef VLAN_ARRAY
+ LIST_ENTRY(ifvlan) ifv_list;
+#endif
+};
+#define ifv_proto ifv_mib.ifvm_proto
+#define ifv_tag ifv_mib.ifvm_tag
+#define ifv_encaplen ifv_mib.ifvm_encaplen
+#define ifv_mtufudge ifv_mib.ifvm_mtufudge
+#define ifv_mintu ifv_mib.ifvm_mintu
+
+/* Special flags we should propagate to parent. */
+static struct {
+ int flag;
+ int (*func)(struct ifnet *, int);
+} vlan_pflags[] = {
+ {IFF_PROMISC, ifpromisc},
+ {IFF_ALLMULTI, if_allmulti},
+ {0, NULL}
+};
+
+SYSCTL_DECL(_net_link);
+SYSCTL_NODE(_net_link, IFT_L2VLAN, vlan, CTLFLAG_RW, 0, "IEEE 802.1Q VLAN");
+SYSCTL_NODE(_net_link_vlan, PF_LINK, link, CTLFLAG_RW, 0, "for consistency");
+
+static int soft_pad = 0;
+SYSCTL_INT(_net_link_vlan, OID_AUTO, soft_pad, CTLFLAG_RW, &soft_pad, 0,
+ "pad short frames before tagging");
+
+static MALLOC_DEFINE(M_VLAN, VLANNAME, "802.1Q Virtual LAN Interface");
+
+static eventhandler_tag ifdetach_tag;
+static eventhandler_tag iflladdr_tag;
+
+/*
+ * We have a global mutex, that is used to serialize configuration
+ * changes and isn't used in normal packet delivery.
+ *
+ * We also have a per-trunk rwlock, that is locked shared on packet
+ * processing and exclusive when configuration is changed.
+ *
+ * The VLAN_ARRAY substitutes the dynamic hash with a static array
+ * with 4096 entries. In theory this can give a boost in processing,
+ * however on practice it does not. Probably this is because array
+ * is too big to fit into CPU cache.
+ */
+static struct mtx ifv_mtx;
+#define VLAN_LOCK_INIT() mtx_init(&ifv_mtx, "vlan_global", NULL, MTX_DEF)
+#define VLAN_LOCK_DESTROY() mtx_destroy(&ifv_mtx)
+#define VLAN_LOCK_ASSERT() mtx_assert(&ifv_mtx, MA_OWNED)
+#define VLAN_LOCK() mtx_lock(&ifv_mtx)
+#define VLAN_UNLOCK() mtx_unlock(&ifv_mtx)
+#define TRUNK_LOCK_INIT(trunk) rw_init(&(trunk)->rw, VLANNAME)
+#define TRUNK_LOCK_DESTROY(trunk) rw_destroy(&(trunk)->rw)
+#define TRUNK_LOCK(trunk) rw_wlock(&(trunk)->rw)
+#define TRUNK_UNLOCK(trunk) rw_wunlock(&(trunk)->rw)
+#define TRUNK_LOCK_ASSERT(trunk) rw_assert(&(trunk)->rw, RA_WLOCKED)
+#define TRUNK_RLOCK(trunk) rw_rlock(&(trunk)->rw)
+#define TRUNK_RUNLOCK(trunk) rw_runlock(&(trunk)->rw)
+#define TRUNK_LOCK_RASSERT(trunk) rw_assert(&(trunk)->rw, RA_RLOCKED)
+
+#ifndef VLAN_ARRAY
+static void vlan_inithash(struct ifvlantrunk *trunk);
+static void vlan_freehash(struct ifvlantrunk *trunk);
+static int vlan_inshash(struct ifvlantrunk *trunk, struct ifvlan *ifv);
+static int vlan_remhash(struct ifvlantrunk *trunk, struct ifvlan *ifv);
+static void vlan_growhash(struct ifvlantrunk *trunk, int howmuch);
+static __inline struct ifvlan * vlan_gethash(struct ifvlantrunk *trunk,
+ uint16_t tag);
+#endif
+static void trunk_destroy(struct ifvlantrunk *trunk);
+
+static void vlan_start(struct ifnet *ifp);
+static void vlan_init(void *foo);
+static void vlan_input(struct ifnet *ifp, struct mbuf *m);
+static int vlan_ioctl(struct ifnet *ifp, u_long cmd, caddr_t addr);
+static int vlan_setflag(struct ifnet *ifp, int flag, int status,
+ int (*func)(struct ifnet *, int));
+static int vlan_setflags(struct ifnet *ifp, int status);
+static int vlan_setmulti(struct ifnet *ifp);
+static void vlan_unconfig(struct ifnet *ifp);
+static void vlan_unconfig_locked(struct ifnet *ifp);
+static int vlan_config(struct ifvlan *ifv, struct ifnet *p, uint16_t tag);
+static void vlan_link_state(struct ifnet *ifp, int link);
+static void vlan_capabilities(struct ifvlan *ifv);
+static void vlan_trunk_capabilities(struct ifnet *ifp);
+
+static struct ifnet *vlan_clone_match_ethertag(struct if_clone *,
+ const char *, int *);
+static int vlan_clone_match(struct if_clone *, const char *);
+static int vlan_clone_create(struct if_clone *, char *, size_t, caddr_t);
+static int vlan_clone_destroy(struct if_clone *, struct ifnet *);
+
+static void vlan_ifdetach(void *arg, struct ifnet *ifp);
+static void vlan_iflladdr(void *arg, struct ifnet *ifp);
+
+static struct if_clone vlan_cloner = IFC_CLONE_INITIALIZER(VLANNAME, NULL,
+ IF_MAXUNIT, NULL, vlan_clone_match, vlan_clone_create, vlan_clone_destroy);
+
+#ifdef VIMAGE
+static VNET_DEFINE(struct if_clone, vlan_cloner);
+#define V_vlan_cloner VNET(vlan_cloner)
+#endif
+
+#ifndef VLAN_ARRAY
+#define HASH(n, m) ((((n) >> 8) ^ ((n) >> 4) ^ (n)) & (m))
+
+static void
+vlan_inithash(struct ifvlantrunk *trunk)
+{
+ int i, n;
+
+ /*
+ * The trunk must not be locked here since we call malloc(M_WAITOK).
+ * It is OK in case this function is called before the trunk struct
+ * gets hooked up and becomes visible from other threads.
+ */
+
+ KASSERT(trunk->hwidth == 0 && trunk->hash == NULL,
+ ("%s: hash already initialized", __func__));
+
+ trunk->hwidth = VLAN_DEF_HWIDTH;
+ n = 1 << trunk->hwidth;
+ trunk->hmask = n - 1;
+ trunk->hash = malloc(sizeof(struct ifvlanhead) * n, M_VLAN, M_WAITOK);
+ for (i = 0; i < n; i++)
+ LIST_INIT(&trunk->hash[i]);
+}
+
+static void
+vlan_freehash(struct ifvlantrunk *trunk)
+{
+#ifdef INVARIANTS
+ int i;
+
+ KASSERT(trunk->hwidth > 0, ("%s: hwidth not positive", __func__));
+ for (i = 0; i < (1 << trunk->hwidth); i++)
+ KASSERT(LIST_EMPTY(&trunk->hash[i]),
+ ("%s: hash table not empty", __func__));
+#endif
+ free(trunk->hash, M_VLAN);
+ trunk->hash = NULL;
+ trunk->hwidth = trunk->hmask = 0;
+}
+
+static int
+vlan_inshash(struct ifvlantrunk *trunk, struct ifvlan *ifv)
+{
+ int i, b;
+ struct ifvlan *ifv2;
+
+ TRUNK_LOCK_ASSERT(trunk);
+ KASSERT(trunk->hwidth > 0, ("%s: hwidth not positive", __func__));
+
+ b = 1 << trunk->hwidth;
+ i = HASH(ifv->ifv_tag, trunk->hmask);
+ LIST_FOREACH(ifv2, &trunk->hash[i], ifv_list)
+ if (ifv->ifv_tag == ifv2->ifv_tag)
+ return (EEXIST);
+
+ /*
+ * Grow the hash when the number of vlans exceeds half of the number of
+ * hash buckets squared. This will make the average linked-list length
+ * buckets/2.
+ */
+ if (trunk->refcnt > (b * b) / 2) {
+ vlan_growhash(trunk, 1);
+ i = HASH(ifv->ifv_tag, trunk->hmask);
+ }
+ LIST_INSERT_HEAD(&trunk->hash[i], ifv, ifv_list);
+ trunk->refcnt++;
+
+ return (0);
+}
+
+static int
+vlan_remhash(struct ifvlantrunk *trunk, struct ifvlan *ifv)
+{
+ int i, b;
+ struct ifvlan *ifv2;
+
+ TRUNK_LOCK_ASSERT(trunk);
+ KASSERT(trunk->hwidth > 0, ("%s: hwidth not positive", __func__));
+
+ b = 1 << trunk->hwidth;
+ i = HASH(ifv->ifv_tag, trunk->hmask);
+ LIST_FOREACH(ifv2, &trunk->hash[i], ifv_list)
+ if (ifv2 == ifv) {
+ trunk->refcnt--;
+ LIST_REMOVE(ifv2, ifv_list);
+ if (trunk->refcnt < (b * b) / 2)
+ vlan_growhash(trunk, -1);
+ return (0);
+ }
+
+ panic("%s: vlan not found\n", __func__);
+ return (ENOENT); /*NOTREACHED*/
+}
+
+/*
+ * Grow the hash larger or smaller if memory permits.
+ */
+static void
+vlan_growhash(struct ifvlantrunk *trunk, int howmuch)
+{
+ struct ifvlan *ifv;
+ struct ifvlanhead *hash2;
+ int hwidth2, i, j, n, n2;
+
+ TRUNK_LOCK_ASSERT(trunk);
+ KASSERT(trunk->hwidth > 0, ("%s: hwidth not positive", __func__));
+
+ if (howmuch == 0) {
+ /* Harmless yet obvious coding error */
+ printf("%s: howmuch is 0\n", __func__);
+ return;
+ }
+
+ hwidth2 = trunk->hwidth + howmuch;
+ n = 1 << trunk->hwidth;
+ n2 = 1 << hwidth2;
+ /* Do not shrink the table below the default */
+ if (hwidth2 < VLAN_DEF_HWIDTH)
+ return;
+
+ /* M_NOWAIT because we're called with trunk mutex held */
+ hash2 = malloc(sizeof(struct ifvlanhead) * n2, M_VLAN, M_NOWAIT);
+ if (hash2 == NULL) {
+ printf("%s: out of memory -- hash size not changed\n",
+ __func__);
+ return; /* We can live with the old hash table */
+ }
+ for (j = 0; j < n2; j++)
+ LIST_INIT(&hash2[j]);
+ for (i = 0; i < n; i++)
+ while ((ifv = LIST_FIRST(&trunk->hash[i])) != NULL) {
+ LIST_REMOVE(ifv, ifv_list);
+ j = HASH(ifv->ifv_tag, n2 - 1);
+ LIST_INSERT_HEAD(&hash2[j], ifv, ifv_list);
+ }
+ free(trunk->hash, M_VLAN);
+ trunk->hash = hash2;
+ trunk->hwidth = hwidth2;
+ trunk->hmask = n2 - 1;
+
+ if (bootverbose)
+ if_printf(trunk->parent,
+ "VLAN hash table resized from %d to %d buckets\n", n, n2);
+}
+
+static __inline struct ifvlan *
+vlan_gethash(struct ifvlantrunk *trunk, uint16_t tag)
+{
+ struct ifvlan *ifv;
+
+ TRUNK_LOCK_RASSERT(trunk);
+
+ LIST_FOREACH(ifv, &trunk->hash[HASH(tag, trunk->hmask)], ifv_list)
+ if (ifv->ifv_tag == tag)
+ return (ifv);
+ return (NULL);
+}
+
+#if 0
+/* Debugging code to view the hashtables. */
+static void
+vlan_dumphash(struct ifvlantrunk *trunk)
+{
+ int i;
+ struct ifvlan *ifv;
+
+ for (i = 0; i < (1 << trunk->hwidth); i++) {
+ printf("%d: ", i);
+ LIST_FOREACH(ifv, &trunk->hash[i], ifv_list)
+ printf("%s ", ifv->ifv_ifp->if_xname);
+ printf("\n");
+ }
+}
+#endif /* 0 */
+#endif /* !VLAN_ARRAY */
+
+static void
+trunk_destroy(struct ifvlantrunk *trunk)
+{
+ VLAN_LOCK_ASSERT();
+
+ TRUNK_LOCK(trunk);
+#ifndef VLAN_ARRAY
+ vlan_freehash(trunk);
+#endif
+ trunk->parent->if_vlantrunk = NULL;
+ TRUNK_UNLOCK(trunk);
+ TRUNK_LOCK_DESTROY(trunk);
+ free(trunk, M_VLAN);
+}
+
+/*
+ * Program our multicast filter. What we're actually doing is
+ * programming the multicast filter of the parent. This has the
+ * side effect of causing the parent interface to receive multicast
+ * traffic that it doesn't really want, which ends up being discarded
+ * later by the upper protocol layers. Unfortunately, there's no way
+ * to avoid this: there really is only one physical interface.
+ *
+ * XXX: There is a possible race here if more than one thread is
+ * modifying the multicast state of the vlan interface at the same time.
+ */
+static int
+vlan_setmulti(struct ifnet *ifp)
+{
+ struct ifnet *ifp_p;
+ struct ifmultiaddr *ifma, *rifma = NULL;
+ struct ifvlan *sc;
+ struct vlan_mc_entry *mc;
+ struct sockaddr_dl sdl;
+ int error;
+
+ /*VLAN_LOCK_ASSERT();*/
+
+ /* Find the parent. */
+ sc = ifp->if_softc;
+ ifp_p = PARENT(sc);
+
+ CURVNET_SET_QUIET(ifp_p->if_vnet);
+
+ bzero((char *)&sdl, sizeof(sdl));
+ sdl.sdl_len = sizeof(sdl);
+ sdl.sdl_family = AF_LINK;
+ sdl.sdl_index = ifp_p->if_index;
+ sdl.sdl_type = IFT_ETHER;
+ sdl.sdl_alen = ETHER_ADDR_LEN;
+
+ /* First, remove any existing filter entries. */
+ while ((mc = SLIST_FIRST(&sc->vlan_mc_listhead)) != NULL) {
+ bcopy((char *)&mc->mc_addr, LLADDR(&sdl), ETHER_ADDR_LEN);
+ error = if_delmulti(ifp_p, (struct sockaddr *)&sdl);
+ if (error)
+ return (error);
+ SLIST_REMOVE_HEAD(&sc->vlan_mc_listhead, mc_entries);
+ free(mc, M_VLAN);
+ }
+
+ /* Now program new ones. */
+ TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
+ if (ifma->ifma_addr->sa_family != AF_LINK)
+ continue;
+ mc = malloc(sizeof(struct vlan_mc_entry), M_VLAN, M_NOWAIT);
+ if (mc == NULL)
+ return (ENOMEM);
+ bcopy(LLADDR((struct sockaddr_dl *)ifma->ifma_addr),
+ (char *)&mc->mc_addr, ETHER_ADDR_LEN);
+ SLIST_INSERT_HEAD(&sc->vlan_mc_listhead, mc, mc_entries);
+ bcopy(LLADDR((struct sockaddr_dl *)ifma->ifma_addr),
+ LLADDR(&sdl), ETHER_ADDR_LEN);
+ error = if_addmulti(ifp_p, (struct sockaddr *)&sdl, &rifma);
+ if (error)
+ return (error);
+ }
+
+ CURVNET_RESTORE();
+ return (0);
+}
+
+/*
+ * A handler for parent interface link layer address changes.
+ * If the parent interface link layer address is changed we
+ * should also change it on all children vlans.
+ */
+static void
+vlan_iflladdr(void *arg __unused, struct ifnet *ifp)
+{
+ struct ifvlan *ifv;
+#ifndef VLAN_ARRAY
+ struct ifvlan *next;
+#endif
+ int i;
+
+ /*
+ * Check if it's a trunk interface first of all
+ * to avoid needless locking.
+ */
+ if (ifp->if_vlantrunk == NULL)
+ return;
+
+ VLAN_LOCK();
+ /*
+ * OK, it's a trunk. Loop over and change all vlan's lladdrs on it.
+ */
+#ifdef VLAN_ARRAY
+ for (i = 0; i < VLAN_ARRAY_SIZE; i++)
+ if ((ifv = ifp->if_vlantrunk->vlans[i])) {
+#else /* VLAN_ARRAY */
+ for (i = 0; i < (1 << ifp->if_vlantrunk->hwidth); i++)
+ LIST_FOREACH_SAFE(ifv, &ifp->if_vlantrunk->hash[i], ifv_list, next) {
+#endif /* VLAN_ARRAY */
+ VLAN_UNLOCK();
+ if_setlladdr(ifv->ifv_ifp, IF_LLADDR(ifp), ETHER_ADDR_LEN);
+ VLAN_LOCK();
+ }
+ VLAN_UNLOCK();
+
+}
+
+/*
+ * A handler for network interface departure events.
+ * Track departure of trunks here so that we don't access invalid
+ * pointers or whatever if a trunk is ripped from under us, e.g.,
+ * by ejecting its hot-plug card. However, if an ifnet is simply
+ * being renamed, then there's no need to tear down the state.
+ */
+static void
+vlan_ifdetach(void *arg __unused, struct ifnet *ifp)
+{
+ struct ifvlan *ifv;
+ int i;
+
+ /*
+ * Check if it's a trunk interface first of all
+ * to avoid needless locking.
+ */
+ if (ifp->if_vlantrunk == NULL)
+ return;
+
+ /* If the ifnet is just being renamed, don't do anything. */
+ if (ifp->if_flags & IFF_RENAMING)
+ return;
+
+ VLAN_LOCK();
+ /*
+ * OK, it's a trunk. Loop over and detach all vlan's on it.
+ * Check trunk pointer after each vlan_unconfig() as it will
+ * free it and set to NULL after the last vlan was detached.
+ */
+#ifdef VLAN_ARRAY
+ for (i = 0; i < VLAN_ARRAY_SIZE; i++)
+ if ((ifv = ifp->if_vlantrunk->vlans[i])) {
+ vlan_unconfig_locked(ifv->ifv_ifp);
+ if (ifp->if_vlantrunk == NULL)
+ break;
+ }
+#else /* VLAN_ARRAY */
+restart:
+ for (i = 0; i < (1 << ifp->if_vlantrunk->hwidth); i++)
+ if ((ifv = LIST_FIRST(&ifp->if_vlantrunk->hash[i]))) {
+ vlan_unconfig_locked(ifv->ifv_ifp);
+ if (ifp->if_vlantrunk)
+ goto restart; /* trunk->hwidth can change */
+ else
+ break;
+ }
+#endif /* VLAN_ARRAY */
+ /* Trunk should have been destroyed in vlan_unconfig(). */
+ KASSERT(ifp->if_vlantrunk == NULL, ("%s: purge failed", __func__));
+ VLAN_UNLOCK();
+}
+
+/*
+ * VLAN support can be loaded as a module. The only place in the
+ * system that's intimately aware of this is ether_input. We hook
+ * into this code through vlan_input_p which is defined there and
+ * set here. Noone else in the system should be aware of this so
+ * we use an explicit reference here.
+ */
+extern void (*vlan_input_p)(struct ifnet *, struct mbuf *);
+
+/* For if_link_state_change() eyes only... */
+extern void (*vlan_link_state_p)(struct ifnet *, int);
+
+static int
+vlan_modevent(module_t mod, int type, void *data)
+{
+
+ switch (type) {
+ case MOD_LOAD:
+ ifdetach_tag = EVENTHANDLER_REGISTER(ifnet_departure_event,
+ vlan_ifdetach, NULL, EVENTHANDLER_PRI_ANY);
+ if (ifdetach_tag == NULL)
+ return (ENOMEM);
+ iflladdr_tag = EVENTHANDLER_REGISTER(iflladdr_event,
+ vlan_iflladdr, NULL, EVENTHANDLER_PRI_ANY);
+ if (iflladdr_tag == NULL)
+ return (ENOMEM);
+ VLAN_LOCK_INIT();
+ vlan_input_p = vlan_input;
+ vlan_link_state_p = vlan_link_state;
+ vlan_trunk_cap_p = vlan_trunk_capabilities;
+#ifndef VIMAGE
+ if_clone_attach(&vlan_cloner);
+#endif
+ if (bootverbose)
+ printf("vlan: initialized, using "
+#ifdef VLAN_ARRAY
+ "full-size arrays"
+#else
+ "hash tables with chaining"
+#endif
+
+ "\n");
+ break;
+ case MOD_UNLOAD:
+#ifndef VIMAGE
+ if_clone_detach(&vlan_cloner);
+#endif
+ EVENTHANDLER_DEREGISTER(ifnet_departure_event, ifdetach_tag);
+ EVENTHANDLER_DEREGISTER(iflladdr_event, iflladdr_tag);
+ vlan_input_p = NULL;
+ vlan_link_state_p = NULL;
+ vlan_trunk_cap_p = NULL;
+ VLAN_LOCK_DESTROY();
+ if (bootverbose)
+ printf("vlan: unloaded\n");
+ break;
+ default:
+ return (EOPNOTSUPP);
+ }
+ return (0);
+}
+
+static moduledata_t vlan_mod = {
+ "if_vlan",
+ vlan_modevent,
+ 0
+};
+
+DECLARE_MODULE(if_vlan, vlan_mod, SI_SUB_PSEUDO, SI_ORDER_ANY);
+MODULE_VERSION(if_vlan, 3);
+
+#ifdef VIMAGE
+static void
+vnet_vlan_init(const void *unused __unused)
+{
+
+ V_vlan_cloner = vlan_cloner;
+ if_clone_attach(&V_vlan_cloner);
+}
+VNET_SYSINIT(vnet_vlan_init, SI_SUB_PROTO_IFATTACHDOMAIN, SI_ORDER_ANY,
+ vnet_vlan_init, NULL);
+
+static void
+vnet_vlan_uninit(const void *unused __unused)
+{
+
+ if_clone_detach(&V_vlan_cloner);
+}
+VNET_SYSUNINIT(vnet_vlan_uninit, SI_SUB_PROTO_IFATTACHDOMAIN, SI_ORDER_FIRST,
+ vnet_vlan_uninit, NULL);
+#endif
+
+static struct ifnet *
+vlan_clone_match_ethertag(struct if_clone *ifc, const char *name, int *tag)
+{
+ const char *cp;
+ struct ifnet *ifp;
+ int t;
+
+ /* Check for <etherif>.<vlan> style interface names. */
+ IFNET_RLOCK_NOSLEEP();
+ TAILQ_FOREACH(ifp, &V_ifnet, if_link) {
+ if (ifp->if_type != IFT_ETHER)
+ continue;
+ if (strncmp(ifp->if_xname, name, strlen(ifp->if_xname)) != 0)
+ continue;
+ cp = name + strlen(ifp->if_xname);
+ if (*cp++ != '.')
+ continue;
+ if (*cp == '\0')
+ continue;
+ t = 0;
+ for(; *cp >= '0' && *cp <= '9'; cp++)
+ t = (t * 10) + (*cp - '0');
+ if (*cp != '\0')
+ continue;
+ if (tag != NULL)
+ *tag = t;
+ break;
+ }
+ IFNET_RUNLOCK_NOSLEEP();
+
+ return (ifp);
+}
+
+static int
+vlan_clone_match(struct if_clone *ifc, const char *name)
+{
+ const char *cp;
+
+ if (vlan_clone_match_ethertag(ifc, name, NULL) != NULL)
+ return (1);
+
+ if (strncmp(VLANNAME, name, strlen(VLANNAME)) != 0)
+ return (0);
+ for (cp = name + 4; *cp != '\0'; cp++) {
+ if (*cp < '0' || *cp > '9')
+ return (0);
+ }
+
+ return (1);
+}
+
+static int
+vlan_clone_create(struct if_clone *ifc, char *name, size_t len, caddr_t params)
+{
+ char *dp;
+ int wildcard;
+ int unit;
+ int error;
+ int tag;
+ int ethertag;
+ struct ifvlan *ifv;
+ struct ifnet *ifp;
+ struct ifnet *p;
+ struct vlanreq vlr;
+ static const u_char eaddr[ETHER_ADDR_LEN]; /* 00:00:00:00:00:00 */
+
+ /*
+ * There are 3 (ugh) ways to specify the cloned device:
+ * o pass a parameter block with the clone request.
+ * o specify parameters in the text of the clone device name
+ * o specify no parameters and get an unattached device that
+ * must be configured separately.
+ * The first technique is preferred; the latter two are
+ * supported for backwards compatibilty.
+ */
+ if (params) {
+ error = copyin(params, &vlr, sizeof(vlr));
+ if (error)
+ return error;
+ p = ifunit(vlr.vlr_parent);
+ if (p == NULL)
+ return ENXIO;
+ /*
+ * Don't let the caller set up a VLAN tag with
+ * anything except VLID bits.
+ */
+ if (vlr.vlr_tag & ~EVL_VLID_MASK)
+ return (EINVAL);
+ error = ifc_name2unit(name, &unit);
+ if (error != 0)
+ return (error);
+
+ ethertag = 1;
+ tag = vlr.vlr_tag;
+ wildcard = (unit < 0);
+ } else if ((p = vlan_clone_match_ethertag(ifc, name, &tag)) != NULL) {
+ ethertag = 1;
+ unit = -1;
+ wildcard = 0;
+
+ /*
+ * Don't let the caller set up a VLAN tag with
+ * anything except VLID bits.
+ */
+ if (tag & ~EVL_VLID_MASK)
+ return (EINVAL);
+ } else {
+ ethertag = 0;
+
+ error = ifc_name2unit(name, &unit);
+ if (error != 0)
+ return (error);
+
+ wildcard = (unit < 0);
+ }
+
+ error = ifc_alloc_unit(ifc, &unit);
+ if (error != 0)
+ return (error);
+
+ /* In the wildcard case, we need to update the name. */
+ if (wildcard) {
+ for (dp = name; *dp != '\0'; dp++);
+ if (snprintf(dp, len - (dp-name), "%d", unit) >
+ len - (dp-name) - 1) {
+ panic("%s: interface name too long", __func__);
+ }
+ }
+
+ ifv = malloc(sizeof(struct ifvlan), M_VLAN, M_WAITOK | M_ZERO);
+ ifp = ifv->ifv_ifp = if_alloc(IFT_ETHER);
+ if (ifp == NULL) {
+ ifc_free_unit(ifc, unit);
+ free(ifv, M_VLAN);
+ return (ENOSPC);
+ }
+ SLIST_INIT(&ifv->vlan_mc_listhead);
+
+ ifp->if_softc = ifv;
+ /*
+ * Set the name manually rather than using if_initname because
+ * we don't conform to the default naming convention for interfaces.
+ */
+ strlcpy(ifp->if_xname, name, IFNAMSIZ);
+ ifp->if_dname = ifc->ifc_name;
+ ifp->if_dunit = unit;
+ /* NB: flags are not set here */
+ ifp->if_linkmib = &ifv->ifv_mib;
+ ifp->if_linkmiblen = sizeof(ifv->ifv_mib);
+ /* NB: mtu is not set here */
+
+ ifp->if_init = vlan_init;
+ ifp->if_start = vlan_start;
+ ifp->if_ioctl = vlan_ioctl;
+ ifp->if_snd.ifq_maxlen = ifqmaxlen;
+ ifp->if_flags = VLAN_IFFLAGS;
+ ether_ifattach(ifp, eaddr);
+ /* Now undo some of the damage... */
+ ifp->if_baudrate = 0;
+ ifp->if_type = IFT_L2VLAN;
+ ifp->if_hdrlen = ETHER_VLAN_ENCAP_LEN;
+
+ if (ethertag) {
+ error = vlan_config(ifv, p, tag);
+ if (error != 0) {
+ /*
+ * Since we've partialy failed, we need to back
+ * out all the way, otherwise userland could get
+ * confused. Thus, we destroy the interface.
+ */
+ ether_ifdetach(ifp);
+ vlan_unconfig(ifp);
+ if_free_type(ifp, IFT_ETHER);
+ ifc_free_unit(ifc, unit);
+ free(ifv, M_VLAN);
+
+ return (error);
+ }
+
+ /* Update flags on the parent, if necessary. */
+ vlan_setflags(ifp, 1);
+ }
+
+ return (0);
+}
+
+static int
+vlan_clone_destroy(struct if_clone *ifc, struct ifnet *ifp)
+{
+ struct ifvlan *ifv = ifp->if_softc;
+ int unit = ifp->if_dunit;
+
+ ether_ifdetach(ifp); /* first, remove it from system-wide lists */
+ vlan_unconfig(ifp); /* now it can be unconfigured and freed */
+ if_free_type(ifp, IFT_ETHER);
+ free(ifv, M_VLAN);
+ ifc_free_unit(ifc, unit);
+
+ return (0);
+}
+
+/*
+ * The ifp->if_init entry point for vlan(4) is a no-op.
+ */
+static void
+vlan_init(void *foo __unused)
+{
+}
+
+/*
+ * The if_start method for vlan(4) interface. It doesn't
+ * raises the IFF_DRV_OACTIVE flag, since it is called
+ * only from IFQ_HANDOFF() macro in ether_output_frame().
+ * If the interface queue is full, and vlan_start() is
+ * not called, the queue would never get emptied and
+ * interface would stall forever.
+ */
+static void
+vlan_start(struct ifnet *ifp)
+{
+ struct ifvlan *ifv;
+ struct ifnet *p;
+ struct mbuf *m;
+ int error;
+
+ ifv = ifp->if_softc;
+ p = PARENT(ifv);
+
+ for (;;) {
+ IF_DEQUEUE(&ifp->if_snd, m);
+ if (m == NULL)
+ break;
+ BPF_MTAP(ifp, m);
+
+ /*
+ * Do not run parent's if_start() if the parent is not up,
+ * or parent's driver will cause a system crash.
+ */
+ if (!UP_AND_RUNNING(p)) {
+ m_freem(m);
+ ifp->if_collisions++;
+ continue;
+ }
+
+ /*
+ * Pad the frame to the minimum size allowed if told to.
+ * This option is in accord with IEEE Std 802.1Q, 2003 Ed.,
+ * paragraph C.4.4.3.b. It can help to work around buggy
+ * bridges that violate paragraph C.4.4.3.a from the same
+ * document, i.e., fail to pad short frames after untagging.
+ * E.g., a tagged frame 66 bytes long (incl. FCS) is OK, but
+ * untagging it will produce a 62-byte frame, which is a runt
+ * and requires padding. There are VLAN-enabled network
+ * devices that just discard such runts instead or mishandle
+ * them somehow.
+ */
+ if (soft_pad) {
+ static char pad[8]; /* just zeros */
+ int n;
+
+ for (n = ETHERMIN + ETHER_HDR_LEN - m->m_pkthdr.len;
+ n > 0; n -= sizeof(pad))
+ if (!m_append(m, min(n, sizeof(pad)), pad))
+ break;
+
+ if (n > 0) {
+ if_printf(ifp, "cannot pad short frame\n");
+ ifp->if_oerrors++;
+ m_freem(m);
+ continue;
+ }
+ }
+
+ /*
+ * If underlying interface can do VLAN tag insertion itself,
+ * just pass the packet along. However, we need some way to
+ * tell the interface where the packet came from so that it
+ * knows how to find the VLAN tag to use, so we attach a
+ * packet tag that holds it.
+ */
+ if (p->if_capenable & IFCAP_VLAN_HWTAGGING) {
+ m->m_pkthdr.ether_vtag = ifv->ifv_tag;
+ m->m_flags |= M_VLANTAG;
+ } else {
+ m = ether_vlanencap(m, ifv->ifv_tag);
+ if (m == NULL) {
+ if_printf(ifp,
+ "unable to prepend VLAN header\n");
+ ifp->if_oerrors++;
+ continue;
+ }
+ }
+
+ /*
+ * Send it, precisely as ether_output() would have.
+ * We are already running at splimp.
+ */
+ error = (p->if_transmit)(p, m);
+ if (!error)
+ ifp->if_opackets++;
+ else
+ ifp->if_oerrors++;
+ }
+}
+
+static void
+vlan_input(struct ifnet *ifp, struct mbuf *m)
+{
+ struct ifvlantrunk *trunk = ifp->if_vlantrunk;
+ struct ifvlan *ifv;
+ uint16_t tag;
+
+ KASSERT(trunk != NULL, ("%s: no trunk", __func__));
+
+ if (m->m_flags & M_VLANTAG) {
+ /*
+ * Packet is tagged, but m contains a normal
+ * Ethernet frame; the tag is stored out-of-band.
+ */
+ tag = EVL_VLANOFTAG(m->m_pkthdr.ether_vtag);
+ m->m_flags &= ~M_VLANTAG;
+ } else {
+ struct ether_vlan_header *evl;
+
+ /*
+ * Packet is tagged in-band as specified by 802.1q.
+ */
+ switch (ifp->if_type) {
+ case IFT_ETHER:
+ if (m->m_len < sizeof(*evl) &&
+ (m = m_pullup(m, sizeof(*evl))) == NULL) {
+ if_printf(ifp, "cannot pullup VLAN header\n");
+ return;
+ }
+ evl = mtod(m, struct ether_vlan_header *);
+ tag = EVL_VLANOFTAG(ntohs(evl->evl_tag));
+
+ /*
+ * Remove the 802.1q header by copying the Ethernet
+ * addresses over it and adjusting the beginning of
+ * the data in the mbuf. The encapsulated Ethernet
+ * type field is already in place.
+ */
+ bcopy((char *)evl, (char *)evl + ETHER_VLAN_ENCAP_LEN,
+ ETHER_HDR_LEN - ETHER_TYPE_LEN);
+ m_adj(m, ETHER_VLAN_ENCAP_LEN);
+ break;
+
+ default:
+#ifdef INVARIANTS
+ panic("%s: %s has unsupported if_type %u",
+ __func__, ifp->if_xname, ifp->if_type);
+#endif
+ m_freem(m);
+ ifp->if_noproto++;
+ return;
+ }
+ }
+
+ TRUNK_RLOCK(trunk);
+#ifdef VLAN_ARRAY
+ ifv = trunk->vlans[tag];
+#else
+ ifv = vlan_gethash(trunk, tag);
+#endif
+ if (ifv == NULL || !UP_AND_RUNNING(ifv->ifv_ifp)) {
+ TRUNK_RUNLOCK(trunk);
+ m_freem(m);
+ ifp->if_noproto++;
+ return;
+ }
+ TRUNK_RUNLOCK(trunk);
+
+ m->m_pkthdr.rcvif = ifv->ifv_ifp;
+ ifv->ifv_ifp->if_ipackets++;
+
+ /* Pass it back through the parent's input routine. */
+ (*ifp->if_input)(ifv->ifv_ifp, m);
+}
+
+static int
+vlan_config(struct ifvlan *ifv, struct ifnet *p, uint16_t tag)
+{
+ struct ifvlantrunk *trunk;
+ struct ifnet *ifp;
+ int error = 0;
+
+ /* VID numbers 0x0 and 0xFFF are reserved */
+ if (tag == 0 || tag == 0xFFF)
+ return (EINVAL);
+ if (p->if_type != IFT_ETHER)
+ return (EPROTONOSUPPORT);
+ if ((p->if_flags & VLAN_IFFLAGS) != VLAN_IFFLAGS)
+ return (EPROTONOSUPPORT);
+ if (ifv->ifv_trunk)
+ return (EBUSY);
+
+ if (p->if_vlantrunk == NULL) {
+ trunk = malloc(sizeof(struct ifvlantrunk),
+ M_VLAN, M_WAITOK | M_ZERO);
+#ifndef VLAN_ARRAY
+ vlan_inithash(trunk);
+#endif
+ VLAN_LOCK();
+ if (p->if_vlantrunk != NULL) {
+ /* A race that that is very unlikely to be hit. */
+#ifndef VLAN_ARRAY
+ vlan_freehash(trunk);
+#endif
+ free(trunk, M_VLAN);
+ goto exists;
+ }
+ TRUNK_LOCK_INIT(trunk);
+ TRUNK_LOCK(trunk);
+ p->if_vlantrunk = trunk;
+ trunk->parent = p;
+ } else {
+ VLAN_LOCK();
+exists:
+ trunk = p->if_vlantrunk;
+ TRUNK_LOCK(trunk);
+ }
+
+ ifv->ifv_tag = tag; /* must set this before vlan_inshash() */
+#ifdef VLAN_ARRAY
+ if (trunk->vlans[tag] != NULL) {
+ error = EEXIST;
+ goto done;
+ }
+ trunk->vlans[tag] = ifv;
+ trunk->refcnt++;
+#else
+ error = vlan_inshash(trunk, ifv);
+ if (error)
+ goto done;
+#endif
+ ifv->ifv_proto = ETHERTYPE_VLAN;
+ ifv->ifv_encaplen = ETHER_VLAN_ENCAP_LEN;
+ ifv->ifv_mintu = ETHERMIN;
+ ifv->ifv_pflags = 0;
+
+ /*
+ * If the parent supports the VLAN_MTU capability,
+ * i.e. can Tx/Rx larger than ETHER_MAX_LEN frames,
+ * use it.
+ */
+ if (p->if_capenable & IFCAP_VLAN_MTU) {
+ /*
+ * No need to fudge the MTU since the parent can
+ * handle extended frames.
+ */
+ ifv->ifv_mtufudge = 0;
+ } else {
+ /*
+ * Fudge the MTU by the encapsulation size. This
+ * makes us incompatible with strictly compliant
+ * 802.1Q implementations, but allows us to use
+ * the feature with other NetBSD implementations,
+ * which might still be useful.
+ */
+ ifv->ifv_mtufudge = ifv->ifv_encaplen;
+ }
+
+ ifv->ifv_trunk = trunk;
+ ifp = ifv->ifv_ifp;
+ ifp->if_mtu = p->if_mtu - ifv->ifv_mtufudge;
+ ifp->if_baudrate = p->if_baudrate;
+ /*
+ * Copy only a selected subset of flags from the parent.
+ * Other flags are none of our business.
+ */
+#define VLAN_COPY_FLAGS (IFF_SIMPLEX)
+ ifp->if_flags &= ~VLAN_COPY_FLAGS;
+ ifp->if_flags |= p->if_flags & VLAN_COPY_FLAGS;
+#undef VLAN_COPY_FLAGS
+
+ ifp->if_link_state = p->if_link_state;
+
+ vlan_capabilities(ifv);
+
+ /*
+ * Set up our ``Ethernet address'' to reflect the underlying
+ * physical interface's.
+ */
+ bcopy(IF_LLADDR(p), IF_LLADDR(ifp), ETHER_ADDR_LEN);
+
+ /*
+ * Configure multicast addresses that may already be
+ * joined on the vlan device.
+ */
+ (void)vlan_setmulti(ifp); /* XXX: VLAN lock held */
+
+ /* We are ready for operation now. */
+ ifp->if_drv_flags |= IFF_DRV_RUNNING;
+done:
+ TRUNK_UNLOCK(trunk);
+ if (error == 0)
+ EVENTHANDLER_INVOKE(vlan_config, p, ifv->ifv_tag);
+ VLAN_UNLOCK();
+
+ return (error);
+}
+
+static void
+vlan_unconfig(struct ifnet *ifp)
+{
+
+ VLAN_LOCK();
+ vlan_unconfig_locked(ifp);
+ VLAN_UNLOCK();
+}
+
+static void
+vlan_unconfig_locked(struct ifnet *ifp)
+{
+ struct ifvlantrunk *trunk;
+ struct vlan_mc_entry *mc;
+ struct ifvlan *ifv;
+ struct ifnet *parent;
+
+ VLAN_LOCK_ASSERT();
+
+ ifv = ifp->if_softc;
+ trunk = ifv->ifv_trunk;
+ parent = NULL;
+
+ if (trunk != NULL) {
+ struct sockaddr_dl sdl;
+
+ TRUNK_LOCK(trunk);
+ parent = trunk->parent;
+
+ /*
+ * Since the interface is being unconfigured, we need to
+ * empty the list of multicast groups that we may have joined
+ * while we were alive from the parent's list.
+ */
+ bzero((char *)&sdl, sizeof(sdl));
+ sdl.sdl_len = sizeof(sdl);
+ sdl.sdl_family = AF_LINK;
+ sdl.sdl_index = parent->if_index;
+ sdl.sdl_type = IFT_ETHER;
+ sdl.sdl_alen = ETHER_ADDR_LEN;
+
+ while ((mc = SLIST_FIRST(&ifv->vlan_mc_listhead)) != NULL) {
+ bcopy((char *)&mc->mc_addr, LLADDR(&sdl),
+ ETHER_ADDR_LEN);
+
+ /*
+ * This may fail if the parent interface is
+ * being detached. Regardless, we should do a
+ * best effort to free this interface as much
+ * as possible as all callers expect vlan
+ * destruction to succeed.
+ */
+ (void)if_delmulti(parent, (struct sockaddr *)&sdl);
+ SLIST_REMOVE_HEAD(&ifv->vlan_mc_listhead, mc_entries);
+ free(mc, M_VLAN);
+ }
+
+ vlan_setflags(ifp, 0); /* clear special flags on parent */
+#ifdef VLAN_ARRAY
+ trunk->vlans[ifv->ifv_tag] = NULL;
+ trunk->refcnt--;
+#else
+ vlan_remhash(trunk, ifv);
+#endif
+ ifv->ifv_trunk = NULL;
+
+ /*
+ * Check if we were the last.
+ */
+ if (trunk->refcnt == 0) {
+ trunk->parent->if_vlantrunk = NULL;
+ /*
+ * XXXGL: If some ithread has already entered
+ * vlan_input() and is now blocked on the trunk
+ * lock, then it should preempt us right after
+ * unlock and finish its work. Then we will acquire
+ * lock again in trunk_destroy().
+ */
+ TRUNK_UNLOCK(trunk);
+ trunk_destroy(trunk);
+ } else
+ TRUNK_UNLOCK(trunk);
+ }
+
+ /* Disconnect from parent. */
+ if (ifv->ifv_pflags)
+ if_printf(ifp, "%s: ifv_pflags unclean\n", __func__);
+ ifp->if_mtu = ETHERMTU;
+ ifp->if_link_state = LINK_STATE_UNKNOWN;
+ ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
+
+ /*
+ * Only dispatch an event if vlan was
+ * attached, otherwise there is nothing
+ * to cleanup anyway.
+ */
+ if (parent != NULL)
+ EVENTHANDLER_INVOKE(vlan_unconfig, parent, ifv->ifv_tag);
+}
+
+/* Handle a reference counted flag that should be set on the parent as well */
+static int
+vlan_setflag(struct ifnet *ifp, int flag, int status,
+ int (*func)(struct ifnet *, int))
+{
+ struct ifvlan *ifv;
+ int error;
+
+ /* XXX VLAN_LOCK_ASSERT(); */
+
+ ifv = ifp->if_softc;
+ status = status ? (ifp->if_flags & flag) : 0;
+ /* Now "status" contains the flag value or 0 */
+
+ /*
+ * See if recorded parent's status is different from what
+ * we want it to be. If it is, flip it. We record parent's
+ * status in ifv_pflags so that we won't clear parent's flag
+ * we haven't set. In fact, we don't clear or set parent's
+ * flags directly, but get or release references to them.
+ * That's why we can be sure that recorded flags still are
+ * in accord with actual parent's flags.
+ */
+ if (status != (ifv->ifv_pflags & flag)) {
+ error = (*func)(PARENT(ifv), status);
+ if (error)
+ return (error);
+ ifv->ifv_pflags &= ~flag;
+ ifv->ifv_pflags |= status;
+ }
+ return (0);
+}
+
+/*
+ * Handle IFF_* flags that require certain changes on the parent:
+ * if "status" is true, update parent's flags respective to our if_flags;
+ * if "status" is false, forcedly clear the flags set on parent.
+ */
+static int
+vlan_setflags(struct ifnet *ifp, int status)
+{
+ int error, i;
+
+ for (i = 0; vlan_pflags[i].flag; i++) {
+ error = vlan_setflag(ifp, vlan_pflags[i].flag,
+ status, vlan_pflags[i].func);
+ if (error)
+ return (error);
+ }
+ return (0);
+}
+
+/* Inform all vlans that their parent has changed link state */
+static void
+vlan_link_state(struct ifnet *ifp, int link)
+{
+ struct ifvlantrunk *trunk = ifp->if_vlantrunk;
+ struct ifvlan *ifv;
+ int i;
+
+ TRUNK_LOCK(trunk);
+#ifdef VLAN_ARRAY
+ for (i = 0; i < VLAN_ARRAY_SIZE; i++)
+ if (trunk->vlans[i] != NULL) {
+ ifv = trunk->vlans[i];
+#else
+ for (i = 0; i < (1 << trunk->hwidth); i++)
+ LIST_FOREACH(ifv, &trunk->hash[i], ifv_list) {
+#endif
+ ifv->ifv_ifp->if_baudrate = trunk->parent->if_baudrate;
+ if_link_state_change(ifv->ifv_ifp,
+ trunk->parent->if_link_state);
+ }
+ TRUNK_UNLOCK(trunk);
+}
+
+static void
+vlan_capabilities(struct ifvlan *ifv)
+{
+ struct ifnet *p = PARENT(ifv);
+ struct ifnet *ifp = ifv->ifv_ifp;
+
+ TRUNK_LOCK_ASSERT(TRUNK(ifv));
+
+ /*
+ * If the parent interface can do checksum offloading
+ * on VLANs, then propagate its hardware-assisted
+ * checksumming flags. Also assert that checksum
+ * offloading requires hardware VLAN tagging.
+ */
+ if (p->if_capabilities & IFCAP_VLAN_HWCSUM)
+ ifp->if_capabilities = p->if_capabilities & IFCAP_HWCSUM;
+
+ if (p->if_capenable & IFCAP_VLAN_HWCSUM &&
+ p->if_capenable & IFCAP_VLAN_HWTAGGING) {
+ ifp->if_capenable = p->if_capenable & IFCAP_HWCSUM;
+ ifp->if_hwassist = p->if_hwassist & (CSUM_IP | CSUM_TCP |
+ CSUM_UDP | CSUM_SCTP | CSUM_IP_FRAGS | CSUM_FRAGMENT);
+ } else {
+ ifp->if_capenable = 0;
+ ifp->if_hwassist = 0;
+ }
+ /*
+ * If the parent interface can do TSO on VLANs then
+ * propagate the hardware-assisted flag. TSO on VLANs
+ * does not necessarily require hardware VLAN tagging.
+ */
+ if (p->if_capabilities & IFCAP_VLAN_HWTSO)
+ ifp->if_capabilities |= p->if_capabilities & IFCAP_TSO;
+ if (p->if_capenable & IFCAP_VLAN_HWTSO) {
+ ifp->if_capenable |= p->if_capenable & IFCAP_TSO;
+ ifp->if_hwassist |= p->if_hwassist & CSUM_TSO;
+ } else {
+ ifp->if_capenable &= ~(p->if_capenable & IFCAP_TSO);
+ ifp->if_hwassist &= ~(p->if_hwassist & CSUM_TSO);
+ }
+}
+
+static void
+vlan_trunk_capabilities(struct ifnet *ifp)
+{
+ struct ifvlantrunk *trunk = ifp->if_vlantrunk;
+ struct ifvlan *ifv;
+ int i;
+
+ TRUNK_LOCK(trunk);
+#ifdef VLAN_ARRAY
+ for (i = 0; i < VLAN_ARRAY_SIZE; i++)
+ if (trunk->vlans[i] != NULL) {
+ ifv = trunk->vlans[i];
+#else
+ for (i = 0; i < (1 << trunk->hwidth); i++) {
+ LIST_FOREACH(ifv, &trunk->hash[i], ifv_list)
+#endif
+ vlan_capabilities(ifv);
+ }
+ TRUNK_UNLOCK(trunk);
+}
+
+static int
+vlan_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
+{
+ struct ifnet *p;
+ struct ifreq *ifr;
+ struct ifvlan *ifv;
+ struct vlanreq vlr;
+ int error = 0;
+
+ ifr = (struct ifreq *)data;
+ ifv = ifp->if_softc;
+
+ switch (cmd) {
+ case SIOCGIFMEDIA:
+ VLAN_LOCK();
+ if (TRUNK(ifv) != NULL) {
+ p = PARENT(ifv);
+ VLAN_UNLOCK();
+ error = (*p->if_ioctl)(p, SIOCGIFMEDIA, data);
+ /* Limit the result to the parent's current config. */
+ if (error == 0) {
+ struct ifmediareq *ifmr;
+
+ ifmr = (struct ifmediareq *)data;
+ if (ifmr->ifm_count >= 1 && ifmr->ifm_ulist) {
+ ifmr->ifm_count = 1;
+ error = copyout(&ifmr->ifm_current,
+ ifmr->ifm_ulist,
+ sizeof(int));
+ }
+ }
+ } else {
+ VLAN_UNLOCK();
+ error = EINVAL;
+ }
+ break;
+
+ case SIOCSIFMEDIA:
+ error = EINVAL;
+ break;
+
+ case SIOCSIFMTU:
+ /*
+ * Set the interface MTU.
+ */
+ VLAN_LOCK();
+ if (TRUNK(ifv) != NULL) {
+ if (ifr->ifr_mtu >
+ (PARENT(ifv)->if_mtu - ifv->ifv_mtufudge) ||
+ ifr->ifr_mtu <
+ (ifv->ifv_mintu - ifv->ifv_mtufudge))
+ error = EINVAL;
+ else
+ ifp->if_mtu = ifr->ifr_mtu;
+ } else
+ error = EINVAL;
+ VLAN_UNLOCK();
+ break;
+
+ case SIOCSETVLAN:
+#ifdef VIMAGE
+ if (ifp->if_vnet != ifp->if_home_vnet) {
+ error = EPERM;
+ break;
+ }
+#endif
+ error = copyin(ifr->ifr_data, &vlr, sizeof(vlr));
+ if (error)
+ break;
+ if (vlr.vlr_parent[0] == '\0') {
+ vlan_unconfig(ifp);
+ break;
+ }
+ p = ifunit(vlr.vlr_parent);
+ if (p == 0) {
+ error = ENOENT;
+ break;
+ }
+ /*
+ * Don't let the caller set up a VLAN tag with
+ * anything except VLID bits.
+ */
+ if (vlr.vlr_tag & ~EVL_VLID_MASK) {
+ error = EINVAL;
+ break;
+ }
+ error = vlan_config(ifv, p, vlr.vlr_tag);
+ if (error)
+ break;
+
+ /* Update flags on the parent, if necessary. */
+ vlan_setflags(ifp, 1);
+ break;
+
+ case SIOCGETVLAN:
+#ifdef VIMAGE
+ if (ifp->if_vnet != ifp->if_home_vnet) {
+ error = EPERM;
+ break;
+ }
+#endif
+ bzero(&vlr, sizeof(vlr));
+ VLAN_LOCK();
+ if (TRUNK(ifv) != NULL) {
+ strlcpy(vlr.vlr_parent, PARENT(ifv)->if_xname,
+ sizeof(vlr.vlr_parent));
+ vlr.vlr_tag = ifv->ifv_tag;
+ }
+ VLAN_UNLOCK();
+ error = copyout(&vlr, ifr->ifr_data, sizeof(vlr));
+ break;
+
+ case SIOCSIFFLAGS:
+ /*
+ * We should propagate selected flags to the parent,
+ * e.g., promiscuous mode.
+ */
+ if (TRUNK(ifv) != NULL)
+ error = vlan_setflags(ifp, 1);
+ break;
+
+ case SIOCADDMULTI:
+ case SIOCDELMULTI:
+ /*
+ * If we don't have a parent, just remember the membership for
+ * when we do.
+ */
+ if (TRUNK(ifv) != NULL)
+ error = vlan_setmulti(ifp);
+ break;
+
+ default:
+ error = ether_ioctl(ifp, cmd, data);
+ }
+
+ return (error);
+}
diff --git a/rtems/freebsd/net/if_vlan_var.h b/rtems/freebsd/net/if_vlan_var.h
new file mode 100644
index 00000000..045e2fa1
--- /dev/null
+++ b/rtems/freebsd/net/if_vlan_var.h
@@ -0,0 +1,137 @@
+/*-
+ * Copyright 1998 Massachusetts Institute of Technology
+ *
+ * Permission to use, copy, modify, and distribute this software and
+ * its documentation for any purpose and without fee is hereby
+ * granted, provided that both the above copyright notice and this
+ * permission notice appear in all copies, that both the above
+ * copyright notice and this permission notice appear in all
+ * supporting documentation, and that the name of M.I.T. not be used
+ * in advertising or publicity pertaining to distribution of the
+ * software without specific, written prior permission. M.I.T. makes
+ * no representations about the suitability of this software for any
+ * purpose. It is provided "as is" without express or implied
+ * warranty.
+ *
+ * THIS SOFTWARE IS PROVIDED BY M.I.T. ``AS IS''. M.I.T. DISCLAIMS
+ * ALL EXPRESS OR IMPLIED WARRANTIES WITH REGARD TO THIS SOFTWARE,
+ * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. IN NO EVENT
+ * SHALL M.I.T. BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
+ * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+ * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
+ * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _NET_IF_VLAN_VAR_HH_
+#define _NET_IF_VLAN_VAR_HH_ 1
+
+struct ether_vlan_header {
+ u_char evl_dhost[ETHER_ADDR_LEN];
+ u_char evl_shost[ETHER_ADDR_LEN];
+ u_int16_t evl_encap_proto;
+ u_int16_t evl_tag;
+ u_int16_t evl_proto;
+};
+
+#define EVL_VLID_MASK 0x0FFF
+#define EVL_PRI_MASK 0xE000
+#define EVL_VLANOFTAG(tag) ((tag) & EVL_VLID_MASK)
+#define EVL_PRIOFTAG(tag) (((tag) >> 13) & 7)
+#define EVL_CFIOFTAG(tag) (((tag) >> 12) & 1)
+#define EVL_MAKETAG(vlid, pri, cfi) \
+ ((((((pri) & 7) << 1) | ((cfi) & 1)) << 12) | ((vlid) & EVL_VLID_MASK))
+
+/* Set the VLAN ID in an mbuf packet header non-destructively. */
+#define EVL_APPLY_VLID(m, vlid) \
+ do { \
+ if ((m)->m_flags & M_VLANTAG) { \
+ (m)->m_pkthdr.ether_vtag &= EVL_VLID_MASK; \
+ (m)->m_pkthdr.ether_vtag |= (vlid); \
+ } else { \
+ (m)->m_pkthdr.ether_vtag = (vlid); \
+ (m)->m_flags |= M_VLANTAG; \
+ } \
+ } while (0)
+
+/* Set the priority ID in an mbuf packet header non-destructively. */
+#define EVL_APPLY_PRI(m, pri) \
+ do { \
+ if ((m)->m_flags & M_VLANTAG) { \
+ uint16_t __vlantag = (m)->m_pkthdr.ether_vtag; \
+ (m)->m_pkthdr.ether_vtag |= EVL_MAKETAG( \
+ EVL_VLANOFTAG(__vlantag), (pri), \
+ EVL_CFIOFTAG(__vlantag)); \
+ } else { \
+ (m)->m_pkthdr.ether_vtag = \
+ EVL_MAKETAG(0, (pri), 0); \
+ (m)->m_flags |= M_VLANTAG; \
+ } \
+ } while (0)
+
+/* sysctl(3) tags, for compatibility purposes */
+#define VLANCTL_PROTO 1
+#define VLANCTL_MAX 2
+
+/*
+ * Configuration structure for SIOCSETVLAN and SIOCGETVLAN ioctls.
+ */
+struct vlanreq {
+ char vlr_parent[IFNAMSIZ];
+ u_short vlr_tag;
+};
+#define SIOCSETVLAN SIOCSIFGENERIC
+#define SIOCGETVLAN SIOCGIFGENERIC
+
+#ifdef _KERNEL
+/*
+ * Drivers that are capable of adding and removing the VLAN header
+ * in hardware indicate they support this by marking IFCAP_VLAN_HWTAGGING
+ * in if_capabilities. Drivers for hardware that is capable
+ * of handling larger MTU's that may include a software-appended
+ * VLAN header w/o lowering the normal MTU should mark IFCAP_VLAN_MTU
+ * in if_capabilities; this notifies the VLAN code it can leave the
+ * MTU on the vlan interface at the normal setting.
+ */
+
+/*
+ * VLAN tags are stored in host byte order. Byte swapping may be
+ * necessary.
+ *
+ * Drivers that support hardware VLAN tag stripping fill in the
+ * received VLAN tag (containing both vlan and priority information)
+ * into the ether_vtag mbuf packet header field:
+ *
+ * m->m_pkthdr.ether_vtag = vlan_id; // ntohs()?
+ * m->m_flags |= M_VLANTAG;
+ *
+ * to mark the packet m with the specified VLAN tag.
+ *
+ * On output the driver should check the mbuf for the M_VLANTAG
+ * flag to see if a VLAN tag is present and valid:
+ *
+ * if (m->m_flags & M_VLANTAG) {
+ * ... = m->m_pkthdr.ether_vtag; // htons()?
+ * ... pass tag to hardware ...
+ * }
+ *
+ * Note that a driver must indicate it supports hardware VLAN
+ * stripping/insertion by marking IFCAP_VLAN_HWTAGGING in
+ * if_capabilities.
+ */
+
+#define VLAN_CAPABILITIES(_ifp) do { \
+ if ((_ifp)->if_vlantrunk != NULL) \
+ (*vlan_trunk_cap_p)(_ifp); \
+} while (0)
+
+extern void (*vlan_trunk_cap_p)(struct ifnet *);
+#endif /* _KERNEL */
+
+#endif /* _NET_IF_VLAN_VAR_HH_ */
diff --git a/rtems/freebsd/net/iso88025.h b/rtems/freebsd/net/iso88025.h
new file mode 100644
index 00000000..26e3ada6
--- /dev/null
+++ b/rtems/freebsd/net/iso88025.h
@@ -0,0 +1,172 @@
+/*-
+ * Copyright (c) 1998, Larry Lile
+ * All rights reserved.
+ *
+ * For latest sources and information on this driver, please
+ * go to http://anarchy.stdio.com.
+ *
+ * Questions, comments or suggestions should be directed to
+ * Larry Lile <lile@stdio.com>.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice unmodified, this list of conditions, and the following
+ * disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ *
+ * Information gathered from tokenring@freebsd, /sys/net/ethernet.h and
+ * the Mach token ring driver.
+ */
+
+/*
+ * Fundamental constants relating to iso 802.5
+ */
+
+#ifndef _NET_ISO88025_HH_
+#define _NET_ISO88025_HH_
+
+/*
+ * General ISO 802.5 definitions
+ */
+#define ISO88025_ADDR_LEN 6
+#define ISO88025_CF_LEN 2
+#define ISO88025_HDR_LEN (ISO88025_CF_LEN + (ISO88025_ADDR_LEN * 2))
+#define RCF_LEN 2
+#define RIF_MAX_RD 14
+#define RIF_MAX_LEN 16
+
+#define TR_AC 0x10
+#define TR_LLC_FRAME 0x40
+
+#define TR_4MBPS 4000000
+#define TR_16MBPS 16000000
+#define TR_100MBPS 100000000
+
+/*
+ * Source routing
+ */
+#define TR_RII 0x80
+#define TR_RCF_BCST_MASK 0xe000
+#define TR_RCF_LEN_MASK 0x1f00
+#define TR_RCF_DIR 0x0080
+#define TR_RCF_LF_MASK 0x0070
+
+#define TR_RCF_RIFLEN(x) ((ntohs(x) & TR_RCF_LEN_MASK) >> 8)
+
+/*
+ * Minimum and maximum packet payload lengths.
+ */
+#define ISO88025_MIN_LEN 0
+#define ISO88025_MAX_LEN_4 4464
+#define ISO88025_MAX_LEN_16 17960
+#define ISO88025_MAX_LEN ISO88025_MAX_LEN_16
+
+/*
+ * A macro to validate a length with
+ */
+#define ISO88025_IS_VALID_LEN(foo) \
+ ((foo) >= ISO88025_MIN_LEN && (foo) <= ISO88025_MAX_LEN)
+
+/* Access Control field */
+#define AC_PRI_MASK 0xe0 /* Priority bits */
+#define AC_TOKEN 0x10 /* Token bit: 0=Token, 1=Frame */
+#define AC_MONITOR 0x08 /* Monitor */
+#define AC_RESV_MASK 0x07 /* Reservation bits */
+
+/* Frame Control field */
+#define FC_FT_MASK 0xc0 /* Frame Type */
+#define FC_FT_MAC 0x00 /* MAC frame */
+#define FC_FT_LLC 0x40 /* LLC frame */
+#define FC_ATTN_MASK 0x0f /* Attention bits */
+#define FC_ATTN_EB 0x01 /* Express buffer */
+#define FC_ATTN_BE 0x02 /* Beacon */
+#define FC_ATTN_CT 0x03 /* Claim token */
+#define FC_ATTN_RP 0x04 /* Ring purge */
+#define FC_ATTN_AMP 0x05 /* Active monitor present */
+#define FC_ATTN_SMP 0x06 /* Standby monitor present */
+
+/* Token Ring destination address */
+#define DA_IG 0x80 /* Individual/group address. */
+ /* 0=Individual, 1=Group */
+#define DA_UL 0x40 /* Universal/local address. */
+ /* 0=Universal, 1=Local */
+/* Token Ring source address */
+#define SA_RII 0x80 /* Routing information indicator */
+#define SA_IG 0x40 /* Individual/group address */
+ /* 0=Group, 1=Individual */
+
+/*
+ * ISO 802.5 physical header
+ */
+struct iso88025_header {
+ u_int8_t ac; /* access control field */
+ u_int8_t fc; /* frame control field */
+ u_int8_t iso88025_dhost[ISO88025_ADDR_LEN]; /* destination address */
+ u_int8_t iso88025_shost[ISO88025_ADDR_LEN]; /* source address */
+ u_int16_t rcf; /* route control field */
+ u_int16_t rd[RIF_MAX_RD]; /* routing designators */
+} __packed;
+
+struct iso88025_rif {
+ u_int16_t rcf; /* route control field */
+ u_int16_t rd[RIF_MAX_RD]; /* routing designators */
+} __packed;
+
+struct iso88025_sockaddr_data {
+ u_char ether_dhost[ISO88025_ADDR_LEN];
+ u_char ether_shost[ISO88025_ADDR_LEN];
+ u_char ac;
+ u_char fc;
+};
+
+struct iso88025_sockaddr_dl_data {
+ u_short trld_rcf;
+ u_short *trld_route[RIF_MAX_LEN];
+};
+
+#define ISO88025_MAX(a, b) (((a)>(b))?(a):(b))
+#define SDL_ISO88025(s) ((struct iso88025_sockaddr_dl_data *) \
+ ((s)->sdl_data + \
+ ISO88025_MAX((s)->sdl_nlen + (s)->sdl_alen + \
+ (s)->sdl_slen, 12)))
+
+/*
+ * Structure of a 48-bit iso 802.5 address.
+ * ( We could also add the 16 bit addresses as a union)
+ */
+struct iso88025_addr {
+ u_char octet[ISO88025_ADDR_LEN];
+};
+
+#define ISO88025_MAX_MTU 18000
+#define ISO88025_DEFAULT_MTU 1500
+
+#define ISO88025_BPF_UNSUPPORTED 0
+#define ISO88025_BPF_SUPPORTED 1
+
+void iso88025_ifattach (struct ifnet *, const u_int8_t *, int);
+void iso88025_ifdetach (struct ifnet *, int);
+int iso88025_ioctl (struct ifnet *, u_long, caddr_t );
+int iso88025_output (struct ifnet *, struct mbuf *, struct sockaddr *,
+ struct route *);
+void iso88025_input (struct ifnet *, struct mbuf *);
+
+#endif
diff --git a/rtems/freebsd/net/netisr.c b/rtems/freebsd/net/netisr.c
new file mode 100644
index 00000000..8f0e9f1a
--- /dev/null
+++ b/rtems/freebsd/net/netisr.c
@@ -0,0 +1,1158 @@
+#include <rtems/freebsd/machine/rtems-bsd-config.h>
+
+/*-
+ * Copyright (c) 2007-2009 Robert N. M. Watson
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <rtems/freebsd/sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+/*
+ * netisr is a packet dispatch service, allowing synchronous (directly
+ * dispatched) and asynchronous (deferred dispatch) processing of packets by
+ * registered protocol handlers. Callers pass a protocol identifier and
+ * packet to netisr, along with a direct dispatch hint, and work will either
+ * be immediately processed with the registered handler, or passed to a
+ * kernel software interrupt (SWI) thread for deferred dispatch. Callers
+ * will generally select one or the other based on:
+ *
+ * - Might directly dispatching a netisr handler lead to code reentrance or
+ * lock recursion, such as entering the socket code from the socket code.
+ * - Might directly dispatching a netisr handler lead to recursive
+ * processing, such as when decapsulating several wrapped layers of tunnel
+ * information (IPSEC within IPSEC within ...).
+ *
+ * Maintaining ordering for protocol streams is a critical design concern.
+ * Enforcing ordering limits the opportunity for concurrency, but maintains
+ * the strong ordering requirements found in some protocols, such as TCP. Of
+ * related concern is CPU affinity--it is desirable to process all data
+ * associated with a particular stream on the same CPU over time in order to
+ * avoid acquiring locks associated with the connection on different CPUs,
+ * keep connection data in one cache, and to generally encourage associated
+ * user threads to live on the same CPU as the stream. It's also desirable
+ * to avoid lock migration and contention where locks are associated with
+ * more than one flow.
+ *
+ * netisr supports several policy variations, represented by the
+ * NETISR_POLICY_* constants, allowing protocols to play a varying role in
+ * identifying flows, assigning work to CPUs, etc. These are described in
+ * detail in netisr.h.
+ */
+
+#include <rtems/freebsd/local/opt_ddb.h>
+#include <rtems/freebsd/local/opt_device_polling.h>
+
+#include <rtems/freebsd/sys/param.h>
+#include <rtems/freebsd/sys/bus.h>
+#include <rtems/freebsd/sys/kernel.h>
+#include <rtems/freebsd/sys/kthread.h>
+#include <rtems/freebsd/sys/interrupt.h>
+#include <rtems/freebsd/sys/lock.h>
+#include <rtems/freebsd/sys/mbuf.h>
+#include <rtems/freebsd/sys/mutex.h>
+#include <rtems/freebsd/sys/pcpu.h>
+#include <rtems/freebsd/sys/proc.h>
+#include <rtems/freebsd/sys/rmlock.h>
+#include <rtems/freebsd/sys/sched.h>
+#include <rtems/freebsd/sys/smp.h>
+#include <rtems/freebsd/sys/socket.h>
+#include <rtems/freebsd/sys/sysctl.h>
+#include <rtems/freebsd/sys/systm.h>
+
+#ifdef DDB
+#include <rtems/freebsd/ddb/ddb.h>
+#endif
+
+#include <rtems/freebsd/net/if.h>
+#include <rtems/freebsd/net/if_var.h>
+#include <rtems/freebsd/net/netisr.h>
+#include <rtems/freebsd/net/vnet.h>
+
+/*-
+ * Synchronize use and modification of the registered netisr data structures;
+ * acquire a read lock while modifying the set of registered protocols to
+ * prevent partially registered or unregistered protocols from being run.
+ *
+ * The following data structures and fields are protected by this lock:
+ *
+ * - The np array, including all fields of struct netisr_proto.
+ * - The nws array, including all fields of struct netisr_worker.
+ * - The nws_array array.
+ *
+ * Note: the NETISR_LOCKING define controls whether read locks are acquired
+ * in packet processing paths requiring netisr registration stability. This
+ * is disabled by default as it can lead to a measurable performance
+ * degradation even with rmlocks (3%-6% for loopback ping-pong traffic), and
+ * because netisr registration and unregistration is extremely rare at
+ * runtime. If it becomes more common, this decision should be revisited.
+ *
+ * XXXRW: rmlocks don't support assertions.
+ */
+static struct rmlock netisr_rmlock;
+#define NETISR_LOCK_INIT() rm_init_flags(&netisr_rmlock, "netisr", \
+ RM_NOWITNESS)
+#define NETISR_LOCK_ASSERT()
+#define NETISR_RLOCK(tracker) rm_rlock(&netisr_rmlock, (tracker))
+#define NETISR_RUNLOCK(tracker) rm_runlock(&netisr_rmlock, (tracker))
+#define NETISR_WLOCK() rm_wlock(&netisr_rmlock)
+#define NETISR_WUNLOCK() rm_wunlock(&netisr_rmlock)
+/* #define NETISR_LOCKING */
+
+SYSCTL_NODE(_net, OID_AUTO, isr, CTLFLAG_RW, 0, "netisr");
+
+/*-
+ * Three direct dispatch policies are supported:
+ *
+ * - Always defer: all work is scheduled for a netisr, regardless of context.
+ * (!direct)
+ *
+ * - Hybrid: if the executing context allows direct dispatch, and we're
+ * running on the CPU the work would be done on, then direct dispatch if it
+ * wouldn't violate ordering constraints on the workstream.
+ * (direct && !direct_force)
+ *
+ * - Always direct: if the executing context allows direct dispatch, always
+ * direct dispatch. (direct && direct_force)
+ *
+ * Notice that changing the global policy could lead to short periods of
+ * misordered processing, but this is considered acceptable as compared to
+ * the complexity of enforcing ordering during policy changes.
+ */
+static int netisr_direct_force = 1; /* Always direct dispatch. */
+TUNABLE_INT("net.isr.direct_force", &netisr_direct_force);
+SYSCTL_INT(_net_isr, OID_AUTO, direct_force, CTLFLAG_RW,
+ &netisr_direct_force, 0, "Force direct dispatch");
+
+static int netisr_direct = 1; /* Enable direct dispatch. */
+TUNABLE_INT("net.isr.direct", &netisr_direct);
+SYSCTL_INT(_net_isr, OID_AUTO, direct, CTLFLAG_RW,
+ &netisr_direct, 0, "Enable direct dispatch");
+
+/*
+ * Allow the administrator to limit the number of threads (CPUs) to use for
+ * netisr. We don't check netisr_maxthreads before creating the thread for
+ * CPU 0, so in practice we ignore values <= 1. This must be set at boot.
+ * We will create at most one thread per CPU.
+ */
+static int netisr_maxthreads = -1; /* Max number of threads. */
+TUNABLE_INT("net.isr.maxthreads", &netisr_maxthreads);
+SYSCTL_INT(_net_isr, OID_AUTO, maxthreads, CTLFLAG_RD,
+ &netisr_maxthreads, 0,
+ "Use at most this many CPUs for netisr processing");
+
+static int netisr_bindthreads = 0; /* Bind threads to CPUs. */
+TUNABLE_INT("net.isr.bindthreads", &netisr_bindthreads);
+SYSCTL_INT(_net_isr, OID_AUTO, bindthreads, CTLFLAG_RD,
+ &netisr_bindthreads, 0, "Bind netisr threads to CPUs.");
+
+/*
+ * Limit per-workstream queues to at most net.isr.maxqlimit, both for initial
+ * configuration and later modification using netisr_setqlimit().
+ */
+#define NETISR_DEFAULT_MAXQLIMIT 10240
+static u_int netisr_maxqlimit = NETISR_DEFAULT_MAXQLIMIT;
+TUNABLE_INT("net.isr.maxqlimit", &netisr_maxqlimit);
+SYSCTL_INT(_net_isr, OID_AUTO, maxqlimit, CTLFLAG_RD,
+ &netisr_maxqlimit, 0,
+ "Maximum netisr per-protocol, per-CPU queue depth.");
+
+/*
+ * The default per-workstream queue limit for protocols that don't initialize
+ * the nh_qlimit field of their struct netisr_handler. If this is set above
+ * netisr_maxqlimit, we truncate it to the maximum during boot.
+ */
+#define NETISR_DEFAULT_DEFAULTQLIMIT 256
+static u_int netisr_defaultqlimit = NETISR_DEFAULT_DEFAULTQLIMIT;
+TUNABLE_INT("net.isr.defaultqlimit", &netisr_defaultqlimit);
+SYSCTL_INT(_net_isr, OID_AUTO, defaultqlimit, CTLFLAG_RD,
+ &netisr_defaultqlimit, 0,
+ "Default netisr per-protocol, per-CPU queue limit if not set by protocol");
+
+/*
+ * Each protocol is described by a struct netisr_proto, which holds all
+ * global per-protocol information. This data structure is set up by
+ * netisr_register(), and derived from the public struct netisr_handler.
+ */
+struct netisr_proto {
+ const char *np_name; /* Character string protocol name. */
+ netisr_handler_t *np_handler; /* Protocol handler. */
+ netisr_m2flow_t *np_m2flow; /* Query flow for untagged packet. */
+ netisr_m2cpuid_t *np_m2cpuid; /* Query CPU to process packet on. */
+ netisr_drainedcpu_t *np_drainedcpu; /* Callback when drained a queue. */
+ u_int np_qlimit; /* Maximum per-CPU queue depth. */
+ u_int np_policy; /* Work placement policy. */
+};
+
+#define NETISR_MAXPROT 16 /* Compile-time limit. */
+
+/*
+ * The np array describes all registered protocols, indexed by protocol
+ * number.
+ */
+static struct netisr_proto np[NETISR_MAXPROT];
+
+/*
+ * Protocol-specific work for each workstream is described by struct
+ * netisr_work. Each work descriptor consists of an mbuf queue and
+ * statistics.
+ */
+struct netisr_work {
+ /*
+ * Packet queue, linked by m_nextpkt.
+ */
+ struct mbuf *nw_head;
+ struct mbuf *nw_tail;
+ u_int nw_len;
+ u_int nw_qlimit;
+ u_int nw_watermark;
+
+ /*
+ * Statistics -- written unlocked, but mostly from curcpu.
+ */
+ u_int64_t nw_dispatched; /* Number of direct dispatches. */
+ u_int64_t nw_hybrid_dispatched; /* "" hybrid dispatches. */
+ u_int64_t nw_qdrops; /* "" drops. */
+ u_int64_t nw_queued; /* "" enqueues. */
+ u_int64_t nw_handled; /* "" handled in worker. */
+};
+
+/*
+ * Workstreams hold a set of ordered work across each protocol, and are
+ * described by netisr_workstream. Each workstream is associated with a
+ * worker thread, which in turn is pinned to a CPU. Work associated with a
+ * workstream can be processd in other threads during direct dispatch;
+ * concurrent processing is prevented by the NWS_RUNNING flag, which
+ * indicates that a thread is already processing the work queue.
+ */
+struct netisr_workstream {
+ struct intr_event *nws_intr_event; /* Handler for stream. */
+ void *nws_swi_cookie; /* swi(9) cookie for stream. */
+ struct mtx nws_mtx; /* Synchronize work. */
+ u_int nws_cpu; /* CPU pinning. */
+ u_int nws_flags; /* Wakeup flags. */
+ u_int nws_pendingbits; /* Scheduled protocols. */
+
+ /*
+ * Each protocol has per-workstream data.
+ */
+ struct netisr_work nws_work[NETISR_MAXPROT];
+} __aligned(CACHE_LINE_SIZE);
+
+/*
+ * Per-CPU workstream data.
+ */
+DPCPU_DEFINE(struct netisr_workstream, nws);
+
+/*
+ * Map contiguous values between 0 and nws_count into CPU IDs appropriate for
+ * accessing workstreams. This allows constructions of the form
+ * DPCPU_ID_GET(nws_array[arbitraryvalue % nws_count], nws).
+ */
+static u_int nws_array[MAXCPU];
+
+/*
+ * Number of registered workstreams. Will be at most the number of running
+ * CPUs once fully started.
+ */
+static u_int nws_count;
+SYSCTL_INT(_net_isr, OID_AUTO, numthreads, CTLFLAG_RD,
+ &nws_count, 0, "Number of extant netisr threads.");
+
+/*
+ * Per-workstream flags.
+ */
+#define NWS_RUNNING 0x00000001 /* Currently running in a thread. */
+#define NWS_DISPATCHING 0x00000002 /* Currently being direct-dispatched. */
+#define NWS_SCHEDULED 0x00000004 /* Signal issued. */
+
+/*
+ * Synchronization for each workstream: a mutex protects all mutable fields
+ * in each stream, including per-protocol state (mbuf queues). The SWI is
+ * woken up if asynchronous dispatch is required.
+ */
+#define NWS_LOCK(s) mtx_lock(&(s)->nws_mtx)
+#define NWS_LOCK_ASSERT(s) mtx_assert(&(s)->nws_mtx, MA_OWNED)
+#define NWS_UNLOCK(s) mtx_unlock(&(s)->nws_mtx)
+#define NWS_SIGNAL(s) swi_sched((s)->nws_swi_cookie, 0)
+
+/*
+ * Utility routines for protocols that implement their own mapping of flows
+ * to CPUs.
+ */
+u_int
+netisr_get_cpucount(void)
+{
+
+ return (nws_count);
+}
+
+u_int
+netisr_get_cpuid(u_int cpunumber)
+{
+
+ KASSERT(cpunumber < nws_count, ("%s: %u > %u", __func__, cpunumber,
+ nws_count));
+
+ return (nws_array[cpunumber]);
+}
+
+/*
+ * The default implementation of -> CPU ID mapping.
+ *
+ * Non-static so that protocols can use it to map their own work to specific
+ * CPUs in a manner consistent to netisr for affinity purposes.
+ */
+u_int
+netisr_default_flow2cpu(u_int flowid)
+{
+
+ return (nws_array[flowid % nws_count]);
+}
+
+/*
+ * Register a new netisr handler, which requires initializing per-protocol
+ * fields for each workstream. All netisr work is briefly suspended while
+ * the protocol is installed.
+ */
+void
+netisr_register(const struct netisr_handler *nhp)
+{
+ struct netisr_work *npwp;
+ const char *name;
+ u_int i, proto;
+
+ proto = nhp->nh_proto;
+ name = nhp->nh_name;
+
+ /*
+ * Test that the requested registration is valid.
+ */
+ KASSERT(nhp->nh_name != NULL,
+ ("%s: nh_name NULL for %u", __func__, proto));
+ KASSERT(nhp->nh_handler != NULL,
+ ("%s: nh_handler NULL for %s", __func__, name));
+ KASSERT(nhp->nh_policy == NETISR_POLICY_SOURCE ||
+ nhp->nh_policy == NETISR_POLICY_FLOW ||
+ nhp->nh_policy == NETISR_POLICY_CPU,
+ ("%s: unsupported nh_policy %u for %s", __func__,
+ nhp->nh_policy, name));
+ KASSERT(nhp->nh_policy == NETISR_POLICY_FLOW ||
+ nhp->nh_m2flow == NULL,
+ ("%s: nh_policy != FLOW but m2flow defined for %s", __func__,
+ name));
+ KASSERT(nhp->nh_policy == NETISR_POLICY_CPU || nhp->nh_m2cpuid == NULL,
+ ("%s: nh_policy != CPU but m2cpuid defined for %s", __func__,
+ name));
+ KASSERT(nhp->nh_policy != NETISR_POLICY_CPU || nhp->nh_m2cpuid != NULL,
+ ("%s: nh_policy == CPU but m2cpuid not defined for %s", __func__,
+ name));
+ KASSERT(proto < NETISR_MAXPROT,
+ ("%s(%u, %s): protocol too big", __func__, proto, name));
+
+ /*
+ * Test that no existing registration exists for this protocol.
+ */
+ NETISR_WLOCK();
+ KASSERT(np[proto].np_name == NULL,
+ ("%s(%u, %s): name present", __func__, proto, name));
+ KASSERT(np[proto].np_handler == NULL,
+ ("%s(%u, %s): handler present", __func__, proto, name));
+
+ np[proto].np_name = name;
+ np[proto].np_handler = nhp->nh_handler;
+ np[proto].np_m2flow = nhp->nh_m2flow;
+ np[proto].np_m2cpuid = nhp->nh_m2cpuid;
+ np[proto].np_drainedcpu = nhp->nh_drainedcpu;
+ if (nhp->nh_qlimit == 0)
+ np[proto].np_qlimit = netisr_defaultqlimit;
+ else if (nhp->nh_qlimit > netisr_maxqlimit) {
+ printf("%s: %s requested queue limit %u capped to "
+ "net.isr.maxqlimit %u\n", __func__, name, nhp->nh_qlimit,
+ netisr_maxqlimit);
+ np[proto].np_qlimit = netisr_maxqlimit;
+ } else
+ np[proto].np_qlimit = nhp->nh_qlimit;
+ np[proto].np_policy = nhp->nh_policy;
+ for (i = 0; i <= mp_maxid; i++) {
+ if (CPU_ABSENT(i))
+ continue;
+ npwp = &(DPCPU_ID_PTR(i, nws))->nws_work[proto];
+ bzero(npwp, sizeof(*npwp));
+ npwp->nw_qlimit = np[proto].np_qlimit;
+ }
+ NETISR_WUNLOCK();
+}
+
+/*
+ * Clear drop counters across all workstreams for a protocol.
+ */
+void
+netisr_clearqdrops(const struct netisr_handler *nhp)
+{
+ struct netisr_work *npwp;
+#ifdef INVARIANTS
+ const char *name;
+#endif
+ u_int i, proto;
+
+ proto = nhp->nh_proto;
+#ifdef INVARIANTS
+ name = nhp->nh_name;
+#endif
+ KASSERT(proto < NETISR_MAXPROT,
+ ("%s(%u): protocol too big for %s", __func__, proto, name));
+
+ NETISR_WLOCK();
+ KASSERT(np[proto].np_handler != NULL,
+ ("%s(%u): protocol not registered for %s", __func__, proto,
+ name));
+
+ for (i = 0; i <= mp_maxid; i++) {
+ if (CPU_ABSENT(i))
+ continue;
+ npwp = &(DPCPU_ID_PTR(i, nws))->nws_work[proto];
+ npwp->nw_qdrops = 0;
+ }
+ NETISR_WUNLOCK();
+}
+
+/*
+ * Query the current drop counters across all workstreams for a protocol.
+ */
+void
+netisr_getqdrops(const struct netisr_handler *nhp, u_int64_t *qdropp)
+{
+ struct netisr_work *npwp;
+ struct rm_priotracker tracker;
+#ifdef INVARIANTS
+ const char *name;
+#endif
+ u_int i, proto;
+
+ *qdropp = 0;
+ proto = nhp->nh_proto;
+#ifdef INVARIANTS
+ name = nhp->nh_name;
+#endif
+ KASSERT(proto < NETISR_MAXPROT,
+ ("%s(%u): protocol too big for %s", __func__, proto, name));
+
+ NETISR_RLOCK(&tracker);
+ KASSERT(np[proto].np_handler != NULL,
+ ("%s(%u): protocol not registered for %s", __func__, proto,
+ name));
+
+ for (i = 0; i <= mp_maxid; i++) {
+ if (CPU_ABSENT(i))
+ continue;
+ npwp = &(DPCPU_ID_PTR(i, nws))->nws_work[proto];
+ *qdropp += npwp->nw_qdrops;
+ }
+ NETISR_RUNLOCK(&tracker);
+}
+
+/*
+ * Query the current queue limit for per-workstream queues for a protocol.
+ */
+void
+netisr_getqlimit(const struct netisr_handler *nhp, u_int *qlimitp)
+{
+ struct rm_priotracker tracker;
+#ifdef INVARIANTS
+ const char *name;
+#endif
+ u_int proto;
+
+ proto = nhp->nh_proto;
+#ifdef INVARIANTS
+ name = nhp->nh_name;
+#endif
+ KASSERT(proto < NETISR_MAXPROT,
+ ("%s(%u): protocol too big for %s", __func__, proto, name));
+
+ NETISR_RLOCK(&tracker);
+ KASSERT(np[proto].np_handler != NULL,
+ ("%s(%u): protocol not registered for %s", __func__, proto,
+ name));
+ *qlimitp = np[proto].np_qlimit;
+ NETISR_RUNLOCK(&tracker);
+}
+
+/*
+ * Update the queue limit across per-workstream queues for a protocol. We
+ * simply change the limits, and don't drain overflowed packets as they will
+ * (hopefully) take care of themselves shortly.
+ */
+int
+netisr_setqlimit(const struct netisr_handler *nhp, u_int qlimit)
+{
+ struct netisr_work *npwp;
+#ifdef INVARIANTS
+ const char *name;
+#endif
+ u_int i, proto;
+
+ if (qlimit > netisr_maxqlimit)
+ return (EINVAL);
+
+ proto = nhp->nh_proto;
+#ifdef INVARIANTS
+ name = nhp->nh_name;
+#endif
+ KASSERT(proto < NETISR_MAXPROT,
+ ("%s(%u): protocol too big for %s", __func__, proto, name));
+
+ NETISR_WLOCK();
+ KASSERT(np[proto].np_handler != NULL,
+ ("%s(%u): protocol not registered for %s", __func__, proto,
+ name));
+
+ np[proto].np_qlimit = qlimit;
+ for (i = 0; i <= mp_maxid; i++) {
+ if (CPU_ABSENT(i))
+ continue;
+ npwp = &(DPCPU_ID_PTR(i, nws))->nws_work[proto];
+ npwp->nw_qlimit = qlimit;
+ }
+ NETISR_WUNLOCK();
+ return (0);
+}
+
+/*
+ * Drain all packets currently held in a particular protocol work queue.
+ */
+static void
+netisr_drain_proto(struct netisr_work *npwp)
+{
+ struct mbuf *m;
+
+ /*
+ * We would assert the lock on the workstream but it's not passed in.
+ */
+ while ((m = npwp->nw_head) != NULL) {
+ npwp->nw_head = m->m_nextpkt;
+ m->m_nextpkt = NULL;
+ if (npwp->nw_head == NULL)
+ npwp->nw_tail = NULL;
+ npwp->nw_len--;
+ m_freem(m);
+ }
+ KASSERT(npwp->nw_tail == NULL, ("%s: tail", __func__));
+ KASSERT(npwp->nw_len == 0, ("%s: len", __func__));
+}
+
+/*
+ * Remove the registration of a network protocol, which requires clearing
+ * per-protocol fields across all workstreams, including freeing all mbufs in
+ * the queues at time of unregister. All work in netisr is briefly suspended
+ * while this takes place.
+ */
+void
+netisr_unregister(const struct netisr_handler *nhp)
+{
+ struct netisr_work *npwp;
+#ifdef INVARIANTS
+ const char *name;
+#endif
+ u_int i, proto;
+
+ proto = nhp->nh_proto;
+#ifdef INVARIANTS
+ name = nhp->nh_name;
+#endif
+ KASSERT(proto < NETISR_MAXPROT,
+ ("%s(%u): protocol too big for %s", __func__, proto, name));
+
+ NETISR_WLOCK();
+ KASSERT(np[proto].np_handler != NULL,
+ ("%s(%u): protocol not registered for %s", __func__, proto,
+ name));
+
+ np[proto].np_name = NULL;
+ np[proto].np_handler = NULL;
+ np[proto].np_m2flow = NULL;
+ np[proto].np_m2cpuid = NULL;
+ np[proto].np_qlimit = 0;
+ np[proto].np_policy = 0;
+ for (i = 0; i <= mp_maxid; i++) {
+ if (CPU_ABSENT(i))
+ continue;
+ npwp = &(DPCPU_ID_PTR(i, nws))->nws_work[proto];
+ netisr_drain_proto(npwp);
+ bzero(npwp, sizeof(*npwp));
+ }
+ NETISR_WUNLOCK();
+}
+
+/*
+ * Look up the workstream given a packet and source identifier. Do this by
+ * checking the protocol's policy, and optionally call out to the protocol
+ * for assistance if required.
+ */
+static struct mbuf *
+netisr_select_cpuid(struct netisr_proto *npp, uintptr_t source,
+ struct mbuf *m, u_int *cpuidp)
+{
+ struct ifnet *ifp;
+
+ NETISR_LOCK_ASSERT();
+
+ /*
+ * In the event we have only one worker, shortcut and deliver to it
+ * without further ado.
+ */
+ if (nws_count == 1) {
+ *cpuidp = nws_array[0];
+ return (m);
+ }
+
+ /*
+ * What happens next depends on the policy selected by the protocol.
+ * If we want to support per-interface policies, we should do that
+ * here first.
+ */
+ switch (npp->np_policy) {
+ case NETISR_POLICY_CPU:
+ return (npp->np_m2cpuid(m, source, cpuidp));
+
+ case NETISR_POLICY_FLOW:
+ if (!(m->m_flags & M_FLOWID) && npp->np_m2flow != NULL) {
+ m = npp->np_m2flow(m, source);
+ if (m == NULL)
+ return (NULL);
+ }
+ if (m->m_flags & M_FLOWID) {
+ *cpuidp =
+ netisr_default_flow2cpu(m->m_pkthdr.flowid);
+ return (m);
+ }
+ /* FALLTHROUGH */
+
+ case NETISR_POLICY_SOURCE:
+ ifp = m->m_pkthdr.rcvif;
+ if (ifp != NULL)
+ *cpuidp = nws_array[(ifp->if_index + source) %
+ nws_count];
+ else
+ *cpuidp = nws_array[source % nws_count];
+ return (m);
+
+ default:
+ panic("%s: invalid policy %u for %s", __func__,
+ npp->np_policy, npp->np_name);
+ }
+}
+
+/*
+ * Process packets associated with a workstream and protocol. For reasons of
+ * fairness, we process up to one complete netisr queue at a time, moving the
+ * queue to a stack-local queue for processing, but do not loop refreshing
+ * from the global queue. The caller is responsible for deciding whether to
+ * loop, and for setting the NWS_RUNNING flag. The passed workstream will be
+ * locked on entry and relocked before return, but will be released while
+ * processing. The number of packets processed is returned.
+ */
+static u_int
+netisr_process_workstream_proto(struct netisr_workstream *nwsp, u_int proto)
+{
+ struct netisr_work local_npw, *npwp;
+ u_int handled;
+ struct mbuf *m;
+
+ NETISR_LOCK_ASSERT();
+ NWS_LOCK_ASSERT(nwsp);
+
+ KASSERT(nwsp->nws_flags & NWS_RUNNING,
+ ("%s(%u): not running", __func__, proto));
+ KASSERT(proto >= 0 && proto < NETISR_MAXPROT,
+ ("%s(%u): invalid proto\n", __func__, proto));
+
+ npwp = &nwsp->nws_work[proto];
+ if (npwp->nw_len == 0)
+ return (0);
+
+ /*
+ * Move the global work queue to a thread-local work queue.
+ *
+ * Notice that this means the effective maximum length of the queue
+ * is actually twice that of the maximum queue length specified in
+ * the protocol registration call.
+ */
+ handled = npwp->nw_len;
+ local_npw = *npwp;
+ npwp->nw_head = NULL;
+ npwp->nw_tail = NULL;
+ npwp->nw_len = 0;
+ nwsp->nws_pendingbits &= ~(1 << proto);
+ NWS_UNLOCK(nwsp);
+ while ((m = local_npw.nw_head) != NULL) {
+ local_npw.nw_head = m->m_nextpkt;
+ m->m_nextpkt = NULL;
+ if (local_npw.nw_head == NULL)
+ local_npw.nw_tail = NULL;
+ local_npw.nw_len--;
+ VNET_ASSERT(m->m_pkthdr.rcvif != NULL);
+ CURVNET_SET(m->m_pkthdr.rcvif->if_vnet);
+ np[proto].np_handler(m);
+ CURVNET_RESTORE();
+ }
+ KASSERT(local_npw.nw_len == 0,
+ ("%s(%u): len %u", __func__, proto, local_npw.nw_len));
+ if (np[proto].np_drainedcpu)
+ np[proto].np_drainedcpu(nwsp->nws_cpu);
+ NWS_LOCK(nwsp);
+ npwp->nw_handled += handled;
+ return (handled);
+}
+
+/*
+ * SWI handler for netisr -- processes prackets in a set of workstreams that
+ * it owns, woken up by calls to NWS_SIGNAL(). If this workstream is already
+ * being direct dispatched, go back to sleep and wait for the dispatching
+ * thread to wake us up again.
+ */
+static void
+swi_net(void *arg)
+{
+#ifdef NETISR_LOCKING
+ struct rm_priotracker tracker;
+#endif
+ struct netisr_workstream *nwsp;
+ u_int bits, prot;
+
+ nwsp = arg;
+
+#ifdef DEVICE_POLLING
+ KASSERT(nws_count == 1,
+ ("%s: device_polling but nws_count != 1", __func__));
+ netisr_poll();
+#endif
+#ifdef NETISR_LOCKING
+ NETISR_RLOCK(&tracker);
+#endif
+ NWS_LOCK(nwsp);
+ KASSERT(!(nwsp->nws_flags & NWS_RUNNING), ("swi_net: running"));
+ if (nwsp->nws_flags & NWS_DISPATCHING)
+ goto out;
+ nwsp->nws_flags |= NWS_RUNNING;
+ nwsp->nws_flags &= ~NWS_SCHEDULED;
+ while ((bits = nwsp->nws_pendingbits) != 0) {
+ while ((prot = ffs(bits)) != 0) {
+ prot--;
+ bits &= ~(1 << prot);
+ (void)netisr_process_workstream_proto(nwsp, prot);
+ }
+ }
+ nwsp->nws_flags &= ~NWS_RUNNING;
+out:
+ NWS_UNLOCK(nwsp);
+#ifdef NETISR_LOCKING
+ NETISR_RUNLOCK(&tracker);
+#endif
+#ifdef DEVICE_POLLING
+ netisr_pollmore();
+#endif
+}
+
+static int
+netisr_queue_workstream(struct netisr_workstream *nwsp, u_int proto,
+ struct netisr_work *npwp, struct mbuf *m, int *dosignalp)
+{
+
+ NWS_LOCK_ASSERT(nwsp);
+
+ *dosignalp = 0;
+ if (npwp->nw_len < npwp->nw_qlimit) {
+ m->m_nextpkt = NULL;
+ if (npwp->nw_head == NULL) {
+ npwp->nw_head = m;
+ npwp->nw_tail = m;
+ } else {
+ npwp->nw_tail->m_nextpkt = m;
+ npwp->nw_tail = m;
+ }
+ npwp->nw_len++;
+ if (npwp->nw_len > npwp->nw_watermark)
+ npwp->nw_watermark = npwp->nw_len;
+ nwsp->nws_pendingbits |= (1 << proto);
+ if (!(nwsp->nws_flags &
+ (NWS_RUNNING | NWS_DISPATCHING | NWS_SCHEDULED))) {
+ nwsp->nws_flags |= NWS_SCHEDULED;
+ *dosignalp = 1; /* Defer until unlocked. */
+ }
+ npwp->nw_queued++;
+ return (0);
+ } else {
+ m_freem(m);
+ npwp->nw_qdrops++;
+ return (ENOBUFS);
+ }
+}
+
+static int
+netisr_queue_internal(u_int proto, struct mbuf *m, u_int cpuid)
+{
+ struct netisr_workstream *nwsp;
+ struct netisr_work *npwp;
+ int dosignal, error;
+
+#ifdef NETISR_LOCKING
+ NETISR_LOCK_ASSERT();
+#endif
+ KASSERT(cpuid <= mp_maxid, ("%s: cpuid too big (%u, %u)", __func__,
+ cpuid, mp_maxid));
+ KASSERT(!CPU_ABSENT(cpuid), ("%s: CPU %u absent", __func__, cpuid));
+
+ dosignal = 0;
+ error = 0;
+ nwsp = DPCPU_ID_PTR(cpuid, nws);
+ npwp = &nwsp->nws_work[proto];
+ NWS_LOCK(nwsp);
+ error = netisr_queue_workstream(nwsp, proto, npwp, m, &dosignal);
+ NWS_UNLOCK(nwsp);
+ if (dosignal)
+ NWS_SIGNAL(nwsp);
+ return (error);
+}
+
+int
+netisr_queue_src(u_int proto, uintptr_t source, struct mbuf *m)
+{
+#ifdef NETISR_LOCKING
+ struct rm_priotracker tracker;
+#endif
+ u_int cpuid;
+ int error;
+
+ KASSERT(proto < NETISR_MAXPROT,
+ ("%s: invalid proto %u", __func__, proto));
+
+#ifdef NETISR_LOCKING
+ NETISR_RLOCK(&tracker);
+#endif
+ KASSERT(np[proto].np_handler != NULL,
+ ("%s: invalid proto %u", __func__, proto));
+
+ m = netisr_select_cpuid(&np[proto], source, m, &cpuid);
+ if (m != NULL) {
+ KASSERT(!CPU_ABSENT(cpuid), ("%s: CPU %u absent", __func__,
+ cpuid));
+ error = netisr_queue_internal(proto, m, cpuid);
+ } else
+ error = ENOBUFS;
+#ifdef NETISR_LOCKING
+ NETISR_RUNLOCK(&tracker);
+#endif
+ return (error);
+}
+
+int
+netisr_queue(u_int proto, struct mbuf *m)
+{
+
+ return (netisr_queue_src(proto, 0, m));
+}
+
+/*
+ * Dispatch a packet for netisr processing, direct dispatch permitted by
+ * calling context.
+ */
+int
+netisr_dispatch_src(u_int proto, uintptr_t source, struct mbuf *m)
+{
+#ifdef NETISR_LOCKING
+ struct rm_priotracker tracker;
+#endif
+ struct netisr_workstream *nwsp;
+ struct netisr_work *npwp;
+ int dosignal, error;
+ u_int cpuid;
+
+ /*
+ * If direct dispatch is entirely disabled, fall back on queueing.
+ */
+ if (!netisr_direct)
+ return (netisr_queue_src(proto, source, m));
+
+ KASSERT(proto < NETISR_MAXPROT,
+ ("%s: invalid proto %u", __func__, proto));
+#ifdef NETISR_LOCKING
+ NETISR_RLOCK(&tracker);
+#endif
+ KASSERT(np[proto].np_handler != NULL,
+ ("%s: invalid proto %u", __func__, proto));
+
+ /*
+ * If direct dispatch is forced, then unconditionally dispatch
+ * without a formal CPU selection. Borrow the current CPU's stats,
+ * even if there's no worker on it. In this case we don't update
+ * nws_flags because all netisr processing will be source ordered due
+ * to always being forced to directly dispatch.
+ */
+ if (netisr_direct_force) {
+ nwsp = DPCPU_PTR(nws);
+ npwp = &nwsp->nws_work[proto];
+ npwp->nw_dispatched++;
+ npwp->nw_handled++;
+ np[proto].np_handler(m);
+ error = 0;
+ goto out_unlock;
+ }
+
+ /*
+ * Otherwise, we execute in a hybrid mode where we will try to direct
+ * dispatch if we're on the right CPU and the netisr worker isn't
+ * already running.
+ */
+ m = netisr_select_cpuid(&np[proto], source, m, &cpuid);
+ if (m == NULL) {
+ error = ENOBUFS;
+ goto out_unlock;
+ }
+ KASSERT(!CPU_ABSENT(cpuid), ("%s: CPU %u absent", __func__, cpuid));
+ sched_pin();
+ if (cpuid != curcpu)
+ goto queue_fallback;
+ nwsp = DPCPU_PTR(nws);
+ npwp = &nwsp->nws_work[proto];
+
+ /*-
+ * We are willing to direct dispatch only if three conditions hold:
+ *
+ * (1) The netisr worker isn't already running,
+ * (2) Another thread isn't already directly dispatching, and
+ * (3) The netisr hasn't already been woken up.
+ */
+ NWS_LOCK(nwsp);
+ if (nwsp->nws_flags & (NWS_RUNNING | NWS_DISPATCHING | NWS_SCHEDULED)) {
+ error = netisr_queue_workstream(nwsp, proto, npwp, m,
+ &dosignal);
+ NWS_UNLOCK(nwsp);
+ if (dosignal)
+ NWS_SIGNAL(nwsp);
+ goto out_unpin;
+ }
+
+ /*
+ * The current thread is now effectively the netisr worker, so set
+ * the dispatching flag to prevent concurrent processing of the
+ * stream from another thread (even the netisr worker), which could
+ * otherwise lead to effective misordering of the stream.
+ */
+ nwsp->nws_flags |= NWS_DISPATCHING;
+ NWS_UNLOCK(nwsp);
+ np[proto].np_handler(m);
+ NWS_LOCK(nwsp);
+ nwsp->nws_flags &= ~NWS_DISPATCHING;
+ npwp->nw_handled++;
+ npwp->nw_hybrid_dispatched++;
+
+ /*
+ * If other work was enqueued by another thread while we were direct
+ * dispatching, we need to signal the netisr worker to do that work.
+ * In the future, we might want to do some of that work in the
+ * current thread, rather than trigger further context switches. If
+ * so, we'll want to establish a reasonable bound on the work done in
+ * the "borrowed" context.
+ */
+ if (nwsp->nws_pendingbits != 0) {
+ nwsp->nws_flags |= NWS_SCHEDULED;
+ dosignal = 1;
+ } else
+ dosignal = 0;
+ NWS_UNLOCK(nwsp);
+ if (dosignal)
+ NWS_SIGNAL(nwsp);
+ error = 0;
+ goto out_unpin;
+
+queue_fallback:
+ error = netisr_queue_internal(proto, m, cpuid);
+out_unpin:
+ sched_unpin();
+out_unlock:
+#ifdef NETISR_LOCKING
+ NETISR_RUNLOCK(&tracker);
+#endif
+ return (error);
+}
+
+int
+netisr_dispatch(u_int proto, struct mbuf *m)
+{
+
+ return (netisr_dispatch_src(proto, 0, m));
+}
+
+#ifdef DEVICE_POLLING
+/*
+ * Kernel polling borrows a netisr thread to run interface polling in; this
+ * function allows kernel polling to request that the netisr thread be
+ * scheduled even if no packets are pending for protocols.
+ */
+void
+netisr_sched_poll(void)
+{
+ struct netisr_workstream *nwsp;
+
+ nwsp = DPCPU_ID_PTR(nws_array[0], nws);
+ NWS_SIGNAL(nwsp);
+}
+#endif
+
+static void
+netisr_start_swi(u_int cpuid, struct pcpu *pc)
+{
+ char swiname[12];
+ struct netisr_workstream *nwsp;
+ int error;
+
+ KASSERT(!CPU_ABSENT(cpuid), ("%s: CPU %u absent", __func__, cpuid));
+
+ nwsp = DPCPU_ID_PTR(cpuid, nws);
+ mtx_init(&nwsp->nws_mtx, "netisr_mtx", NULL, MTX_DEF);
+ nwsp->nws_cpu = cpuid;
+ snprintf(swiname, sizeof(swiname), "netisr %u", cpuid);
+ error = swi_add(&nwsp->nws_intr_event, swiname, swi_net, nwsp,
+ SWI_NET, INTR_MPSAFE, &nwsp->nws_swi_cookie);
+ if (error)
+ panic("%s: swi_add %d", __func__, error);
+ pc->pc_netisr = nwsp->nws_intr_event;
+ if (netisr_bindthreads) {
+ error = intr_event_bind(nwsp->nws_intr_event, cpuid);
+ if (error != 0)
+ printf("%s: cpu %u: intr_event_bind: %d", __func__,
+ cpuid, error);
+ }
+ NETISR_WLOCK();
+ nws_array[nws_count] = nwsp->nws_cpu;
+ nws_count++;
+ NETISR_WUNLOCK();
+}
+
+/*
+ * Initialize the netisr subsystem. We rely on BSS and static initialization
+ * of most fields in global data structures.
+ *
+ * Start a worker thread for the boot CPU so that we can support network
+ * traffic immediately in case the network stack is used before additional
+ * CPUs are started (for example, diskless boot).
+ */
+static void
+netisr_init(void *arg)
+{
+
+ KASSERT(curcpu == 0, ("%s: not on CPU 0", __func__));
+
+ NETISR_LOCK_INIT();
+ if (netisr_maxthreads < 1)
+ netisr_maxthreads = 1;
+ if (netisr_maxthreads > mp_ncpus) {
+ printf("netisr_init: forcing maxthreads from %d to %d\n",
+ netisr_maxthreads, mp_ncpus);
+ netisr_maxthreads = mp_ncpus;
+ }
+ if (netisr_defaultqlimit > netisr_maxqlimit) {
+ printf("netisr_init: forcing defaultqlimit from %d to %d\n",
+ netisr_defaultqlimit, netisr_maxqlimit);
+ netisr_defaultqlimit = netisr_maxqlimit;
+ }
+#ifdef DEVICE_POLLING
+ /*
+ * The device polling code is not yet aware of how to deal with
+ * multiple netisr threads, so for the time being compiling in device
+ * polling disables parallel netisr workers.
+ */
+ if (netisr_maxthreads != 1 || netisr_bindthreads != 0) {
+ printf("netisr_init: forcing maxthreads to 1 and "
+ "bindthreads to 0 for device polling\n");
+ netisr_maxthreads = 1;
+ netisr_bindthreads = 0;
+ }
+#endif
+
+ netisr_start_swi(curcpu, pcpu_find(curcpu));
+}
+SYSINIT(netisr_init, SI_SUB_SOFTINTR, SI_ORDER_FIRST, netisr_init, NULL);
+
+/*
+ * Start worker threads for additional CPUs. No attempt to gracefully handle
+ * work reassignment, we don't yet support dynamic reconfiguration.
+ */
+static void
+netisr_start(void *arg)
+{
+ struct pcpu *pc;
+
+ SLIST_FOREACH(pc, &cpuhead, pc_allcpu) {
+ if (nws_count >= netisr_maxthreads)
+ break;
+ /* XXXRW: Is skipping absent CPUs still required here? */
+ if (CPU_ABSENT(pc->pc_cpuid))
+ continue;
+ /* Worker will already be present for boot CPU. */
+ if (pc->pc_netisr != NULL)
+ continue;
+ netisr_start_swi(pc->pc_cpuid, pc);
+ }
+}
+SYSINIT(netisr_start, SI_SUB_SMP, SI_ORDER_MIDDLE, netisr_start, NULL);
+
+#ifdef DDB
+DB_SHOW_COMMAND(netisr, db_show_netisr)
+{
+ struct netisr_workstream *nwsp;
+ struct netisr_work *nwp;
+ int first, proto;
+ u_int cpuid;
+
+ db_printf("%3s %6s %5s %5s %5s %8s %8s %8s %8s\n", "CPU", "Proto",
+ "Len", "WMark", "Max", "Disp", "HDisp", "Drop", "Queue");
+ for (cpuid = 0; cpuid <= mp_maxid; cpuid++) {
+ if (CPU_ABSENT(cpuid))
+ continue;
+ nwsp = DPCPU_ID_PTR(cpuid, nws);
+ if (nwsp->nws_intr_event == NULL)
+ continue;
+ first = 1;
+ for (proto = 0; proto < NETISR_MAXPROT; proto++) {
+ if (np[proto].np_handler == NULL)
+ continue;
+ nwp = &nwsp->nws_work[proto];
+ if (first) {
+ db_printf("%3d ", cpuid);
+ first = 0;
+ } else
+ db_printf("%3s ", "");
+ db_printf(
+ "%6s %5d %5d %5d %8ju %8ju %8ju %8ju\n",
+ np[proto].np_name, nwp->nw_len,
+ nwp->nw_watermark, nwp->nw_qlimit,
+ nwp->nw_dispatched, nwp->nw_hybrid_dispatched,
+ nwp->nw_qdrops, nwp->nw_queued);
+ }
+ }
+}
+#endif
diff --git a/rtems/freebsd/net/netisr.h b/rtems/freebsd/net/netisr.h
new file mode 100644
index 00000000..b755332a
--- /dev/null
+++ b/rtems/freebsd/net/netisr.h
@@ -0,0 +1,156 @@
+/*-
+ * Copyright (c) 2007-2009 Robert N. M. Watson
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _NET_NETISR_HH_
+#define _NET_NETISR_HH_
+#ifdef _KERNEL
+
+/*
+ * The netisr (network interrupt service routine) provides a deferred
+ * execution evironment in which (generally inbound) network processing can
+ * take place. Protocols register handlers which will be executed directly,
+ * or via deferred dispatch, depending on the circumstances.
+ *
+ * Historically, this was implemented by the BSD software ISR facility; it is
+ * now implemented via a software ithread (SWI).
+ */
+#define NETISR_IP 1
+#define NETISR_IGMP 2 /* IGMPv3 output queue */
+#define NETISR_ROUTE 3 /* routing socket */
+#define NETISR_AARP 4 /* Appletalk ARP */
+#define NETISR_ATALK2 5 /* Appletalk phase 2 */
+#define NETISR_ATALK1 6 /* Appletalk phase 1 */
+#define NETISR_ARP 7 /* same as AF_LINK */
+#define NETISR_IPX 8 /* same as AF_IPX */
+#define NETISR_ETHER 9 /* ethernet input */
+#define NETISR_IPV6 10
+#define NETISR_NATM 11
+#define NETISR_EPAIR 12 /* if_epair(4) */
+
+/*-
+ * Protocols express ordering constraints and affinity preferences by
+ * implementing one or neither of nh_m2flow and nh_m2cpuid, which are used by
+ * netisr to determine which per-CPU workstream to assign mbufs to.
+ *
+ * The following policies may be used by protocols:
+ *
+ * NETISR_POLICY_SOURCE - netisr should maintain source ordering without
+ * advice from the protocol. netisr will ignore any
+ * flow IDs present on the mbuf for the purposes of
+ * work placement.
+ *
+ * NETISR_POLICY_FLOW - netisr should maintain flow ordering as defined by
+ * the mbuf header flow ID field. If the protocol
+ * implements nh_m2flow, then netisr will query the
+ * protocol in the event that the mbuf doesn't have a
+ * flow ID, falling back on source ordering.
+ *
+ * NETISR_POLICY_CPU - netisr will delegate all work placement decisions to
+ * the protocol, querying nh_m2cpuid for each packet.
+ *
+ * Protocols might make decisions about work placement based on an existing
+ * calculated flow ID on the mbuf, such as one provided in hardware, the
+ * receive interface pointed to by the mbuf (if any), the optional source
+ * identifier passed at some dispatch points, or even parse packet headers to
+ * calculate a flow. Both protocol handlers may return a new mbuf pointer
+ * for the chain, or NULL if the packet proves invalid or m_pullup() fails.
+ *
+ * XXXRW: If we eventually support dynamic reconfiguration, there should be
+ * protocol handlers to notify them of CPU configuration changes so that they
+ * can rebalance work.
+ */
+struct mbuf;
+typedef void netisr_handler_t(struct mbuf *m);
+typedef struct mbuf *netisr_m2cpuid_t(struct mbuf *m, uintptr_t source,
+ u_int *cpuid);
+typedef struct mbuf *netisr_m2flow_t(struct mbuf *m, uintptr_t source);
+typedef void netisr_drainedcpu_t(u_int cpuid);
+
+#define NETISR_POLICY_SOURCE 1 /* Maintain source ordering. */
+#define NETISR_POLICY_FLOW 2 /* Maintain flow ordering. */
+#define NETISR_POLICY_CPU 3 /* Protocol determines CPU placement. */
+
+/*
+ * Data structure describing a protocol handler.
+ */
+struct netisr_handler {
+ const char *nh_name; /* Character string protocol name. */
+ netisr_handler_t *nh_handler; /* Protocol handler. */
+ netisr_m2flow_t *nh_m2flow; /* Query flow for untagged packet. */
+ netisr_m2cpuid_t *nh_m2cpuid; /* Query CPU to process mbuf on. */
+ netisr_drainedcpu_t *nh_drainedcpu; /* Callback when drained a queue. */
+ u_int nh_proto; /* Integer protocol ID. */
+ u_int nh_qlimit; /* Maximum per-CPU queue depth. */
+ u_int nh_policy; /* Work placement policy. */
+ u_int nh_ispare[5]; /* For future use. */
+ void *nh_pspare[4]; /* For future use. */
+};
+
+/*
+ * Register, unregister, and other netisr handler management functions.
+ */
+void netisr_clearqdrops(const struct netisr_handler *nhp);
+void netisr_getqdrops(const struct netisr_handler *nhp,
+ u_int64_t *qdropsp);
+void netisr_getqlimit(const struct netisr_handler *nhp, u_int *qlimitp);
+void netisr_register(const struct netisr_handler *nhp);
+int netisr_setqlimit(const struct netisr_handler *nhp, u_int qlimit);
+void netisr_unregister(const struct netisr_handler *nhp);
+
+/*
+ * Process a packet destined for a protocol, and attempt direct dispatch.
+ * Supplemental source ordering information can be passed using the _src
+ * variant.
+ */
+int netisr_dispatch(u_int proto, struct mbuf *m);
+int netisr_dispatch_src(u_int proto, uintptr_t source, struct mbuf *m);
+int netisr_queue(u_int proto, struct mbuf *m);
+int netisr_queue_src(u_int proto, uintptr_t source, struct mbuf *m);
+
+/*
+ * Provide a default implementation of "map an ID to a CPU ID".
+ */
+u_int netisr_default_flow2cpu(u_int flowid);
+
+/*
+ * Utility routines to return the number of CPUs participting in netisr, and
+ * to return a mapping from a number to a CPU ID that can be used with the
+ * scheduler.
+ */
+u_int netisr_get_cpucount(void);
+u_int netisr_get_cpuid(u_int cpunumber);
+
+/*
+ * Interfaces between DEVICE_POLLING and netisr.
+ */
+void netisr_sched_poll(void);
+void netisr_poll(void);
+void netisr_pollmore(void);
+
+#endif /* !_KERNEL */
+#endif /* !_NET_NETISR_HH_ */
diff --git a/rtems/freebsd/net/pfil.c b/rtems/freebsd/net/pfil.c
new file mode 100644
index 00000000..4a9a3f27
--- /dev/null
+++ b/rtems/freebsd/net/pfil.c
@@ -0,0 +1,331 @@
+#include <rtems/freebsd/machine/rtems-bsd-config.h>
+
+/* $FreeBSD$ */
+/* $NetBSD: pfil.c,v 1.20 2001/11/12 23:49:46 lukem Exp $ */
+
+/*-
+ * Copyright (c) 1996 Matthew R. Green
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+ * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
+ * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+ * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <rtems/freebsd/sys/param.h>
+#include <rtems/freebsd/sys/kernel.h>
+#include <rtems/freebsd/sys/errno.h>
+#include <rtems/freebsd/sys/lock.h>
+#include <rtems/freebsd/sys/malloc.h>
+#include <rtems/freebsd/sys/rmlock.h>
+#include <rtems/freebsd/sys/socket.h>
+#include <rtems/freebsd/sys/socketvar.h>
+#include <rtems/freebsd/sys/systm.h>
+#include <rtems/freebsd/sys/condvar.h>
+#include <rtems/freebsd/sys/lock.h>
+#include <rtems/freebsd/sys/mutex.h>
+#include <rtems/freebsd/sys/proc.h>
+#include <rtems/freebsd/sys/queue.h>
+
+#include <rtems/freebsd/net/if.h>
+#include <rtems/freebsd/net/pfil.h>
+
+static struct mtx pfil_global_lock;
+
+MTX_SYSINIT(pfil_heads_lock, &pfil_global_lock, "pfil_head_list lock",
+ MTX_DEF);
+
+static int pfil_list_add(pfil_list_t *, struct packet_filter_hook *, int);
+
+static int pfil_list_remove(pfil_list_t *,
+ int (*)(void *, struct mbuf **, struct ifnet *, int, struct inpcb *),
+ void *);
+
+LIST_HEAD(pfilheadhead, pfil_head);
+VNET_DEFINE(struct pfilheadhead, pfil_head_list);
+#define V_pfil_head_list VNET(pfil_head_list)
+
+/*
+ * pfil_run_hooks() runs the specified packet filter hooks.
+ */
+int
+pfil_run_hooks(struct pfil_head *ph, struct mbuf **mp, struct ifnet *ifp,
+ int dir, struct inpcb *inp)
+{
+ struct rm_priotracker rmpt;
+ struct packet_filter_hook *pfh;
+ struct mbuf *m = *mp;
+ int rv = 0;
+
+ PFIL_RLOCK(ph, &rmpt);
+ KASSERT(ph->ph_nhooks >= 0, ("Pfil hook count dropped < 0"));
+ for (pfh = pfil_hook_get(dir, ph); pfh != NULL;
+ pfh = TAILQ_NEXT(pfh, pfil_link)) {
+ if (pfh->pfil_func != NULL) {
+ rv = (*pfh->pfil_func)(pfh->pfil_arg, &m, ifp, dir,
+ inp);
+ if (rv != 0 || m == NULL)
+ break;
+ }
+ }
+ PFIL_RUNLOCK(ph, &rmpt);
+ *mp = m;
+ return (rv);
+}
+
+/*
+ * pfil_head_register() registers a pfil_head with the packet filter hook
+ * mechanism.
+ */
+int
+pfil_head_register(struct pfil_head *ph)
+{
+ struct pfil_head *lph;
+
+ PFIL_LIST_LOCK();
+ LIST_FOREACH(lph, &V_pfil_head_list, ph_list) {
+ if (ph->ph_type == lph->ph_type &&
+ ph->ph_un.phu_val == lph->ph_un.phu_val) {
+ PFIL_LIST_UNLOCK();
+ return (EEXIST);
+ }
+ }
+ PFIL_LOCK_INIT(ph);
+ ph->ph_nhooks = 0;
+ TAILQ_INIT(&ph->ph_in);
+ TAILQ_INIT(&ph->ph_out);
+ LIST_INSERT_HEAD(&V_pfil_head_list, ph, ph_list);
+ PFIL_LIST_UNLOCK();
+ return (0);
+}
+
+/*
+ * pfil_head_unregister() removes a pfil_head from the packet filter hook
+ * mechanism. The producer of the hook promises that all outstanding
+ * invocations of the hook have completed before it unregisters the hook.
+ */
+int
+pfil_head_unregister(struct pfil_head *ph)
+{
+ struct packet_filter_hook *pfh, *pfnext;
+
+ PFIL_LIST_LOCK();
+ LIST_REMOVE(ph, ph_list);
+ PFIL_LIST_UNLOCK();
+ TAILQ_FOREACH_SAFE(pfh, &ph->ph_in, pfil_link, pfnext)
+ free(pfh, M_IFADDR);
+ TAILQ_FOREACH_SAFE(pfh, &ph->ph_out, pfil_link, pfnext)
+ free(pfh, M_IFADDR);
+ PFIL_LOCK_DESTROY(ph);
+ return (0);
+}
+
+/*
+ * pfil_head_get() returns the pfil_head for a given key/dlt.
+ */
+struct pfil_head *
+pfil_head_get(int type, u_long val)
+{
+ struct pfil_head *ph;
+
+ PFIL_LIST_LOCK();
+ LIST_FOREACH(ph, &V_pfil_head_list, ph_list)
+ if (ph->ph_type == type && ph->ph_un.phu_val == val)
+ break;
+ PFIL_LIST_UNLOCK();
+ return (ph);
+}
+
+/*
+ * pfil_add_hook() adds a function to the packet filter hook. the
+ * flags are:
+ * PFIL_IN call me on incoming packets
+ * PFIL_OUT call me on outgoing packets
+ * PFIL_ALL call me on all of the above
+ * PFIL_WAITOK OK to call malloc with M_WAITOK.
+ */
+int
+pfil_add_hook(int (*func)(void *, struct mbuf **, struct ifnet *, int,
+ struct inpcb *), void *arg, int flags, struct pfil_head *ph)
+{
+ struct packet_filter_hook *pfh1 = NULL;
+ struct packet_filter_hook *pfh2 = NULL;
+ int err;
+
+ if (flags & PFIL_IN) {
+ pfh1 = (struct packet_filter_hook *)malloc(sizeof(*pfh1),
+ M_IFADDR, (flags & PFIL_WAITOK) ? M_WAITOK : M_NOWAIT);
+ if (pfh1 == NULL) {
+ err = ENOMEM;
+ goto error;
+ }
+ }
+ if (flags & PFIL_OUT) {
+ pfh2 = (struct packet_filter_hook *)malloc(sizeof(*pfh1),
+ M_IFADDR, (flags & PFIL_WAITOK) ? M_WAITOK : M_NOWAIT);
+ if (pfh2 == NULL) {
+ err = ENOMEM;
+ goto error;
+ }
+ }
+ PFIL_WLOCK(ph);
+ if (flags & PFIL_IN) {
+ pfh1->pfil_func = func;
+ pfh1->pfil_arg = arg;
+ err = pfil_list_add(&ph->ph_in, pfh1, flags & ~PFIL_OUT);
+ if (err)
+ goto locked_error;
+ ph->ph_nhooks++;
+ }
+ if (flags & PFIL_OUT) {
+ pfh2->pfil_func = func;
+ pfh2->pfil_arg = arg;
+ err = pfil_list_add(&ph->ph_out, pfh2, flags & ~PFIL_IN);
+ if (err) {
+ if (flags & PFIL_IN)
+ pfil_list_remove(&ph->ph_in, func, arg);
+ goto locked_error;
+ }
+ ph->ph_nhooks++;
+ }
+ PFIL_WUNLOCK(ph);
+ return (0);
+locked_error:
+ PFIL_WUNLOCK(ph);
+error:
+ if (pfh1 != NULL)
+ free(pfh1, M_IFADDR);
+ if (pfh2 != NULL)
+ free(pfh2, M_IFADDR);
+ return (err);
+}
+
+/*
+ * pfil_remove_hook removes a specific function from the packet filter hook
+ * list.
+ */
+int
+pfil_remove_hook(int (*func)(void *, struct mbuf **, struct ifnet *, int,
+ struct inpcb *), void *arg, int flags, struct pfil_head *ph)
+{
+ int err = 0;
+
+ PFIL_WLOCK(ph);
+ if (flags & PFIL_IN) {
+ err = pfil_list_remove(&ph->ph_in, func, arg);
+ if (err == 0)
+ ph->ph_nhooks--;
+ }
+ if ((err == 0) && (flags & PFIL_OUT)) {
+ err = pfil_list_remove(&ph->ph_out, func, arg);
+ if (err == 0)
+ ph->ph_nhooks--;
+ }
+ PFIL_WUNLOCK(ph);
+ return (err);
+}
+
+static int
+pfil_list_add(pfil_list_t *list, struct packet_filter_hook *pfh1, int flags)
+{
+ struct packet_filter_hook *pfh;
+
+ /*
+ * First make sure the hook is not already there.
+ */
+ TAILQ_FOREACH(pfh, list, pfil_link)
+ if (pfh->pfil_func == pfh1->pfil_func &&
+ pfh->pfil_arg == pfh1->pfil_arg)
+ return (EEXIST);
+
+ /*
+ * Insert the input list in reverse order of the output list so that
+ * the same path is followed in or out of the kernel.
+ */
+ if (flags & PFIL_IN)
+ TAILQ_INSERT_HEAD(list, pfh1, pfil_link);
+ else
+ TAILQ_INSERT_TAIL(list, pfh1, pfil_link);
+ return (0);
+}
+
+/*
+ * pfil_list_remove is an internal function that takes a function off the
+ * specified list.
+ */
+static int
+pfil_list_remove(pfil_list_t *list,
+ int (*func)(void *, struct mbuf **, struct ifnet *, int, struct inpcb *),
+ void *arg)
+{
+ struct packet_filter_hook *pfh;
+
+ TAILQ_FOREACH(pfh, list, pfil_link)
+ if (pfh->pfil_func == func && pfh->pfil_arg == arg) {
+ TAILQ_REMOVE(list, pfh, pfil_link);
+ free(pfh, M_IFADDR);
+ return (0);
+ }
+ return (ENOENT);
+}
+
+/****************
+ * Stuff that must be initialized for every instance
+ * (including the first of course).
+ */
+static int
+vnet_pfil_init(const void *unused)
+{
+ LIST_INIT(&V_pfil_head_list);
+ return (0);
+}
+
+/***********************
+ * Called for the removal of each instance.
+ */
+static int
+vnet_pfil_uninit(const void *unused)
+{
+ /* XXX should panic if list is not empty */
+ return 0;
+}
+
+/* Define startup order. */
+#define PFIL_SYSINIT_ORDER SI_SUB_PROTO_BEGIN
+#define PFIL_MODEVENT_ORDER (SI_ORDER_FIRST) /* On boot slot in here. */
+#define PFIL_VNET_ORDER (PFIL_MODEVENT_ORDER + 2) /* Later still. */
+
+/*
+ * Starting up.
+ * VNET_SYSINIT is called for each existing vnet and each new vnet.
+ */
+VNET_SYSINIT(vnet_pfil_init, PFIL_SYSINIT_ORDER, PFIL_VNET_ORDER,
+ vnet_pfil_init, NULL);
+
+/*
+ * Closing up shop. These are done in REVERSE ORDER,
+ * Not called on reboot.
+ * VNET_SYSUNINIT is called for each exiting vnet as it exits.
+ */
+VNET_SYSUNINIT(vnet_pfil_uninit, PFIL_SYSINIT_ORDER, PFIL_VNET_ORDER,
+ vnet_pfil_uninit, NULL);
+
diff --git a/rtems/freebsd/net/pfil.h b/rtems/freebsd/net/pfil.h
new file mode 100644
index 00000000..052ae10c
--- /dev/null
+++ b/rtems/freebsd/net/pfil.h
@@ -0,0 +1,117 @@
+/* $FreeBSD$ */
+/* $NetBSD: pfil.h,v 1.22 2003/06/23 12:57:08 martin Exp $ */
+
+/*-
+ * Copyright (c) 1996 Matthew R. Green
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+ * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
+ * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+ * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifndef _NET_PFIL_HH_
+#define _NET_PFIL_HH_
+
+#include <rtems/freebsd/sys/systm.h>
+#include <rtems/freebsd/sys/queue.h>
+#include <rtems/freebsd/sys/_lock.h>
+#include <rtems/freebsd/sys/_mutex.h>
+#include <rtems/freebsd/sys/lock.h>
+#include <rtems/freebsd/sys/rmlock.h>
+
+struct mbuf;
+struct ifnet;
+struct inpcb;
+
+/*
+ * The packet filter hooks are designed for anything to call them to
+ * possibly intercept the packet.
+ */
+struct packet_filter_hook {
+ TAILQ_ENTRY(packet_filter_hook) pfil_link;
+ int (*pfil_func)(void *, struct mbuf **, struct ifnet *, int,
+ struct inpcb *);
+ void *pfil_arg;
+};
+
+#define PFIL_IN 0x00000001
+#define PFIL_OUT 0x00000002
+#define PFIL_WAITOK 0x00000004
+#define PFIL_ALL (PFIL_IN|PFIL_OUT)
+
+typedef TAILQ_HEAD(pfil_list, packet_filter_hook) pfil_list_t;
+
+#define PFIL_TYPE_AF 1 /* key is AF_* type */
+#define PFIL_TYPE_IFNET 2 /* key is ifnet pointer */
+
+struct pfil_head {
+ pfil_list_t ph_in;
+ pfil_list_t ph_out;
+ int ph_type;
+ int ph_nhooks;
+ struct rmlock ph_lock;
+ union {
+ u_long phu_val;
+ void *phu_ptr;
+ } ph_un;
+#define ph_af ph_un.phu_val
+#define ph_ifnet ph_un.phu_ptr
+ LIST_ENTRY(pfil_head) ph_list;
+};
+
+int pfil_add_hook(int (*func)(void *, struct mbuf **, struct ifnet *,
+ int, struct inpcb *), void *, int, struct pfil_head *);
+int pfil_remove_hook(int (*func)(void *, struct mbuf **, struct ifnet *,
+ int, struct inpcb *), void *, int, struct pfil_head *);
+int pfil_run_hooks(struct pfil_head *, struct mbuf **, struct ifnet *,
+ int, struct inpcb *inp);
+
+int pfil_head_register(struct pfil_head *);
+int pfil_head_unregister(struct pfil_head *);
+
+struct pfil_head *pfil_head_get(int, u_long);
+
+#define PFIL_HOOKED(p) ((p)->ph_nhooks > 0)
+#define PFIL_LOCK_INIT(p) \
+ rm_init_flags(&(p)->ph_lock, "PFil hook read/write mutex", RM_RECURSE)
+#define PFIL_LOCK_DESTROY(p) rm_destroy(&(p)->ph_lock)
+#define PFIL_RLOCK(p, t) rm_rlock(&(p)->ph_lock, (t))
+#define PFIL_WLOCK(p) rm_wlock(&(p)->ph_lock)
+#define PFIL_RUNLOCK(p, t) rm_runlock(&(p)->ph_lock, (t))
+#define PFIL_WUNLOCK(p) rm_wunlock(&(p)->ph_lock)
+#define PFIL_LIST_LOCK() mtx_lock(&pfil_global_lock)
+#define PFIL_LIST_UNLOCK() mtx_unlock(&pfil_global_lock)
+
+static __inline struct packet_filter_hook *
+pfil_hook_get(int dir, struct pfil_head *ph)
+{
+
+ if (dir == PFIL_IN)
+ return (TAILQ_FIRST(&ph->ph_in));
+ else if (dir == PFIL_OUT)
+ return (TAILQ_FIRST(&ph->ph_out));
+ else
+ return (NULL);
+}
+
+#endif /* _NET_PFIL_HH_ */
diff --git a/rtems/freebsd/net/pfkeyv2.h b/rtems/freebsd/net/pfkeyv2.h
new file mode 100644
index 00000000..f8e088e1
--- /dev/null
+++ b/rtems/freebsd/net/pfkeyv2.h
@@ -0,0 +1,432 @@
+/* $FreeBSD$ */
+/* $KAME: pfkeyv2.h,v 1.37 2003/09/06 05:15:43 itojun Exp $ */
+
+/*-
+ * Copyright (C) 1995, 1996, 1997, and 1998 WIDE Project.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the project nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+/*
+ * This file has been derived rfc 2367,
+ * And added some flags of SADB_KEY_FLAGS_ as SADB_X_EXT_.
+ * sakane@ydc.co.jp
+ */
+
+#ifndef _NET_PFKEYV2_HH_
+#define _NET_PFKEYV2_HH_
+
+/*
+This file defines structures and symbols for the PF_KEY Version 2
+key management interface. It was written at the U.S. Naval Research
+Laboratory. This file is in the public domain. The authors ask that
+you leave this credit intact on any copies of this file.
+*/
+#ifndef __PFKEY_V2_H
+#define __PFKEY_V2_H 1
+
+#define PF_KEY_V2 2
+#define PFKEYV2_REVISION 199806L
+
+#define SADB_RESERVED 0
+#define SADB_GETSPI 1
+#define SADB_UPDATE 2
+#define SADB_ADD 3
+#define SADB_DELETE 4
+#define SADB_GET 5
+#define SADB_ACQUIRE 6
+#define SADB_REGISTER 7
+#define SADB_EXPIRE 8
+#define SADB_FLUSH 9
+#define SADB_DUMP 10
+#define SADB_X_PROMISC 11
+#define SADB_X_PCHANGE 12
+
+#define SADB_X_SPDUPDATE 13
+#define SADB_X_SPDADD 14
+#define SADB_X_SPDDELETE 15 /* by policy index */
+#define SADB_X_SPDGET 16
+#define SADB_X_SPDACQUIRE 17
+#define SADB_X_SPDDUMP 18
+#define SADB_X_SPDFLUSH 19
+#define SADB_X_SPDSETIDX 20
+#define SADB_X_SPDEXPIRE 21
+#define SADB_X_SPDDELETE2 22 /* by policy id */
+#define SADB_MAX 22
+
+struct sadb_msg {
+ u_int8_t sadb_msg_version;
+ u_int8_t sadb_msg_type;
+ u_int8_t sadb_msg_errno;
+ u_int8_t sadb_msg_satype;
+ u_int16_t sadb_msg_len;
+ u_int16_t sadb_msg_reserved;
+ u_int32_t sadb_msg_seq;
+ u_int32_t sadb_msg_pid;
+};
+
+struct sadb_ext {
+ u_int16_t sadb_ext_len;
+ u_int16_t sadb_ext_type;
+};
+
+struct sadb_sa {
+ u_int16_t sadb_sa_len;
+ u_int16_t sadb_sa_exttype;
+ u_int32_t sadb_sa_spi;
+ u_int8_t sadb_sa_replay;
+ u_int8_t sadb_sa_state;
+ u_int8_t sadb_sa_auth;
+ u_int8_t sadb_sa_encrypt;
+ u_int32_t sadb_sa_flags;
+};
+
+struct sadb_lifetime {
+ u_int16_t sadb_lifetime_len;
+ u_int16_t sadb_lifetime_exttype;
+ u_int32_t sadb_lifetime_allocations;
+ u_int64_t sadb_lifetime_bytes;
+ u_int64_t sadb_lifetime_addtime;
+ u_int64_t sadb_lifetime_usetime;
+};
+
+struct sadb_address {
+ u_int16_t sadb_address_len;
+ u_int16_t sadb_address_exttype;
+ u_int8_t sadb_address_proto;
+ u_int8_t sadb_address_prefixlen;
+ u_int16_t sadb_address_reserved;
+};
+
+struct sadb_key {
+ u_int16_t sadb_key_len;
+ u_int16_t sadb_key_exttype;
+ u_int16_t sadb_key_bits;
+ u_int16_t sadb_key_reserved;
+};
+
+struct sadb_ident {
+ u_int16_t sadb_ident_len;
+ u_int16_t sadb_ident_exttype;
+ u_int16_t sadb_ident_type;
+ u_int16_t sadb_ident_reserved;
+ u_int64_t sadb_ident_id;
+};
+
+struct sadb_sens {
+ u_int16_t sadb_sens_len;
+ u_int16_t sadb_sens_exttype;
+ u_int32_t sadb_sens_dpd;
+ u_int8_t sadb_sens_sens_level;
+ u_int8_t sadb_sens_sens_len;
+ u_int8_t sadb_sens_integ_level;
+ u_int8_t sadb_sens_integ_len;
+ u_int32_t sadb_sens_reserved;
+};
+
+struct sadb_prop {
+ u_int16_t sadb_prop_len;
+ u_int16_t sadb_prop_exttype;
+ u_int8_t sadb_prop_replay;
+ u_int8_t sadb_prop_reserved[3];
+};
+
+struct sadb_comb {
+ u_int8_t sadb_comb_auth;
+ u_int8_t sadb_comb_encrypt;
+ u_int16_t sadb_comb_flags;
+ u_int16_t sadb_comb_auth_minbits;
+ u_int16_t sadb_comb_auth_maxbits;
+ u_int16_t sadb_comb_encrypt_minbits;
+ u_int16_t sadb_comb_encrypt_maxbits;
+ u_int32_t sadb_comb_reserved;
+ u_int32_t sadb_comb_soft_allocations;
+ u_int32_t sadb_comb_hard_allocations;
+ u_int64_t sadb_comb_soft_bytes;
+ u_int64_t sadb_comb_hard_bytes;
+ u_int64_t sadb_comb_soft_addtime;
+ u_int64_t sadb_comb_hard_addtime;
+ u_int64_t sadb_comb_soft_usetime;
+ u_int64_t sadb_comb_hard_usetime;
+};
+
+struct sadb_supported {
+ u_int16_t sadb_supported_len;
+ u_int16_t sadb_supported_exttype;
+ u_int32_t sadb_supported_reserved;
+};
+
+struct sadb_alg {
+ u_int8_t sadb_alg_id;
+ u_int8_t sadb_alg_ivlen;
+ u_int16_t sadb_alg_minbits;
+ u_int16_t sadb_alg_maxbits;
+ u_int16_t sadb_alg_reserved;
+};
+
+struct sadb_spirange {
+ u_int16_t sadb_spirange_len;
+ u_int16_t sadb_spirange_exttype;
+ u_int32_t sadb_spirange_min;
+ u_int32_t sadb_spirange_max;
+ u_int32_t sadb_spirange_reserved;
+};
+
+struct sadb_x_kmprivate {
+ u_int16_t sadb_x_kmprivate_len;
+ u_int16_t sadb_x_kmprivate_exttype;
+ u_int32_t sadb_x_kmprivate_reserved;
+};
+
+/*
+ * XXX Additional SA Extension.
+ * mode: tunnel or transport
+ * reqid: to make SA unique nevertheless the address pair of SA are same.
+ * Mainly it's for VPN.
+ */
+struct sadb_x_sa2 {
+ u_int16_t sadb_x_sa2_len;
+ u_int16_t sadb_x_sa2_exttype;
+ u_int8_t sadb_x_sa2_mode;
+ u_int8_t sadb_x_sa2_reserved1;
+ u_int16_t sadb_x_sa2_reserved2;
+ u_int32_t sadb_x_sa2_sequence; /* lowermost 32bit of sequence number */
+ u_int32_t sadb_x_sa2_reqid;
+};
+
+/* XXX Policy Extension */
+/* sizeof(struct sadb_x_policy) == 16 */
+struct sadb_x_policy {
+ u_int16_t sadb_x_policy_len;
+ u_int16_t sadb_x_policy_exttype;
+ u_int16_t sadb_x_policy_type; /* See policy type of ipsec.h */
+ u_int8_t sadb_x_policy_dir; /* direction, see ipsec.h */
+ u_int8_t sadb_x_policy_reserved;
+ u_int32_t sadb_x_policy_id;
+ u_int32_t sadb_x_policy_reserved2;
+};
+/*
+ * When policy_type == IPSEC, it is followed by some of
+ * the ipsec policy request.
+ * [total length of ipsec policy requests]
+ * = (sadb_x_policy_len * sizeof(uint64_t) - sizeof(struct sadb_x_policy))
+ */
+
+/* XXX IPsec Policy Request Extension */
+/*
+ * This structure is aligned 8 bytes.
+ */
+struct sadb_x_ipsecrequest {
+ u_int16_t sadb_x_ipsecrequest_len; /* structure length in 64 bits. */
+ u_int16_t sadb_x_ipsecrequest_proto; /* See ipsec.h */
+ u_int8_t sadb_x_ipsecrequest_mode; /* See IPSEC_MODE_XX in ipsec.h. */
+ u_int8_t sadb_x_ipsecrequest_level; /* See IPSEC_LEVEL_XX in ipsec.h */
+ u_int16_t sadb_x_ipsecrequest_reqid; /* See ipsec.h */
+
+ /*
+ * followed by source IP address of SA, and immediately followed by
+ * destination IP address of SA. These encoded into two of sockaddr
+ * structure without any padding. Must set each sa_len exactly.
+ * Each of length of the sockaddr structure are not aligned to 64bits,
+ * but sum of x_request and addresses is aligned to 64bits.
+ */
+};
+
+/* NAT-Traversal type, see RFC 3948 (and drafts). */
+/* sizeof(struct sadb_x_nat_t_type) == 8 */
+struct sadb_x_nat_t_type {
+ u_int16_t sadb_x_nat_t_type_len;
+ u_int16_t sadb_x_nat_t_type_exttype;
+ u_int8_t sadb_x_nat_t_type_type;
+ u_int8_t sadb_x_nat_t_type_reserved[3];
+};
+
+/* NAT-Traversal source or destination port. */
+/* sizeof(struct sadb_x_nat_t_port) == 8 */
+struct sadb_x_nat_t_port {
+ u_int16_t sadb_x_nat_t_port_len;
+ u_int16_t sadb_x_nat_t_port_exttype;
+ u_int16_t sadb_x_nat_t_port_port;
+ u_int16_t sadb_x_nat_t_port_reserved;
+};
+
+/* ESP fragmentation size. */
+/* sizeof(struct sadb_x_nat_t_frag) == 8 */
+struct sadb_x_nat_t_frag {
+ u_int16_t sadb_x_nat_t_frag_len;
+ u_int16_t sadb_x_nat_t_frag_exttype;
+ u_int16_t sadb_x_nat_t_frag_fraglen;
+ u_int16_t sadb_x_nat_t_frag_reserved;
+};
+
+
+#define SADB_EXT_RESERVED 0
+#define SADB_EXT_SA 1
+#define SADB_EXT_LIFETIME_CURRENT 2
+#define SADB_EXT_LIFETIME_HARD 3
+#define SADB_EXT_LIFETIME_SOFT 4
+#define SADB_EXT_ADDRESS_SRC 5
+#define SADB_EXT_ADDRESS_DST 6
+#define SADB_EXT_ADDRESS_PROXY 7
+#define SADB_EXT_KEY_AUTH 8
+#define SADB_EXT_KEY_ENCRYPT 9
+#define SADB_EXT_IDENTITY_SRC 10
+#define SADB_EXT_IDENTITY_DST 11
+#define SADB_EXT_SENSITIVITY 12
+#define SADB_EXT_PROPOSAL 13
+#define SADB_EXT_SUPPORTED_AUTH 14
+#define SADB_EXT_SUPPORTED_ENCRYPT 15
+#define SADB_EXT_SPIRANGE 16
+#define SADB_X_EXT_KMPRIVATE 17
+#define SADB_X_EXT_POLICY 18
+#define SADB_X_EXT_SA2 19
+#define SADB_X_EXT_NAT_T_TYPE 20
+#define SADB_X_EXT_NAT_T_SPORT 21
+#define SADB_X_EXT_NAT_T_DPORT 22
+#define SADB_X_EXT_NAT_T_OA 23 /* Deprecated. */
+#define SADB_X_EXT_NAT_T_OAI 23 /* Peer's NAT_OA for src of SA. */
+#define SADB_X_EXT_NAT_T_OAR 24 /* Peer's NAT_OA for dst of SA. */
+#define SADB_X_EXT_NAT_T_FRAG 25 /* Manual MTU override. */
+#define SADB_EXT_MAX 25
+
+#define SADB_SATYPE_UNSPEC 0
+#define SADB_SATYPE_AH 2
+#define SADB_SATYPE_ESP 3
+#define SADB_SATYPE_RSVP 5
+#define SADB_SATYPE_OSPFV2 6
+#define SADB_SATYPE_RIPV2 7
+#define SADB_SATYPE_MIP 8
+#define SADB_X_SATYPE_IPCOMP 9
+/*#define SADB_X_SATYPE_POLICY 10 obsolete, do not reuse */
+#define SADB_X_SATYPE_TCPSIGNATURE 11
+#define SADB_SATYPE_MAX 12
+
+#define SADB_SASTATE_LARVAL 0
+#define SADB_SASTATE_MATURE 1
+#define SADB_SASTATE_DYING 2
+#define SADB_SASTATE_DEAD 3
+#define SADB_SASTATE_MAX 3
+
+#define SADB_SAFLAGS_PFS 1
+
+/* RFC2367 numbers - meets RFC2407 */
+#define SADB_AALG_NONE 0
+#define SADB_AALG_MD5HMAC 2
+#define SADB_AALG_SHA1HMAC 3
+#define SADB_AALG_MAX 252
+/* private allocations - based on RFC2407/IANA assignment */
+#define SADB_X_AALG_SHA2_256 5
+#define SADB_X_AALG_SHA2_384 6
+#define SADB_X_AALG_SHA2_512 7
+#define SADB_X_AALG_RIPEMD160HMAC 8
+#define SADB_X_AALG_AES_XCBC_MAC 9 /* draft-ietf-ipsec-ciph-aes-xcbc-mac-04 */
+/* private allocations should use 249-255 (RFC2407) */
+#define SADB_X_AALG_MD5 249 /* Keyed MD5 */
+#define SADB_X_AALG_SHA 250 /* Keyed SHA */
+#define SADB_X_AALG_NULL 251 /* null authentication */
+#define SADB_X_AALG_TCP_MD5 252 /* Keyed TCP-MD5 (RFC2385) */
+
+/* RFC2367 numbers - meets RFC2407 */
+#define SADB_EALG_NONE 0
+#define SADB_EALG_DESCBC 2
+#define SADB_EALG_3DESCBC 3
+#define SADB_EALG_NULL 11
+#define SADB_EALG_MAX 250
+/* private allocations - based on RFC2407/IANA assignment */
+#define SADB_X_EALG_CAST128CBC 6
+#define SADB_X_EALG_BLOWFISHCBC 7
+#define SADB_X_EALG_RIJNDAELCBC 12
+#define SADB_X_EALG_AES 12
+/* private allocations - based on RFC4312/IANA assignment */
+#define SADB_X_EALG_CAMELLIACBC 22
+/* private allocations should use 249-255 (RFC2407) */
+#define SADB_X_EALG_SKIPJACK 249 /*250*/ /* for IPSEC */
+#define SADB_X_EALG_AESCTR 250 /*249*/ /* draft-ietf-ipsec-ciph-aes-ctr-03 */
+
+/* private allocations - based on RFC2407/IANA assignment */
+#define SADB_X_CALG_NONE 0
+#define SADB_X_CALG_OUI 1
+#define SADB_X_CALG_DEFLATE 2
+#define SADB_X_CALG_LZS 3
+#define SADB_X_CALG_MAX 4
+
+#define SADB_IDENTTYPE_RESERVED 0
+#define SADB_IDENTTYPE_PREFIX 1
+#define SADB_IDENTTYPE_FQDN 2
+#define SADB_IDENTTYPE_USERFQDN 3
+#define SADB_X_IDENTTYPE_ADDR 4
+#define SADB_IDENTTYPE_MAX 4
+
+/* `flags' in sadb_sa structure holds followings */
+#define SADB_X_EXT_NONE 0x0000 /* i.e. new format. */
+#define SADB_X_EXT_OLD 0x0001 /* old format. */
+
+#define SADB_X_EXT_IV4B 0x0010 /* IV length of 4 bytes in use */
+#define SADB_X_EXT_DERIV 0x0020 /* DES derived */
+#define SADB_X_EXT_CYCSEQ 0x0040 /* allowing to cyclic sequence. */
+
+ /* three of followings are exclusive flags each them */
+#define SADB_X_EXT_PSEQ 0x0000 /* sequencial padding for ESP */
+#define SADB_X_EXT_PRAND 0x0100 /* random padding for ESP */
+#define SADB_X_EXT_PZERO 0x0200 /* zero padding for ESP */
+#define SADB_X_EXT_PMASK 0x0300 /* mask for padding flag */
+
+#if 1
+#define SADB_X_EXT_RAWCPI 0x0080 /* use well known CPI (IPComp) */
+#endif
+
+#define SADB_KEY_FLAGS_MAX 0x0fff
+
+/* SPI size for PF_KEYv2 */
+#define PFKEY_SPI_SIZE sizeof(u_int32_t)
+
+/* Identifier for menber of lifetime structure */
+#define SADB_X_LIFETIME_ALLOCATIONS 0
+#define SADB_X_LIFETIME_BYTES 1
+#define SADB_X_LIFETIME_ADDTIME 2
+#define SADB_X_LIFETIME_USETIME 3
+
+/* The rate for SOFT lifetime against HARD one. */
+#define PFKEY_SOFT_LIFETIME_RATE 80
+
+/* Utilities */
+#define PFKEY_ALIGN8(a) (1 + (((a) - 1) | (8 - 1)))
+#define PFKEY_EXTLEN(msg) \
+ PFKEY_UNUNIT64(((struct sadb_ext *)(msg))->sadb_ext_len)
+#define PFKEY_ADDR_PREFIX(ext) \
+ (((struct sadb_address *)(ext))->sadb_address_prefixlen)
+#define PFKEY_ADDR_PROTO(ext) \
+ (((struct sadb_address *)(ext))->sadb_address_proto)
+#define PFKEY_ADDR_SADDR(ext) \
+ ((struct sockaddr *)((caddr_t)(ext) + sizeof(struct sadb_address)))
+
+/* in 64bits */
+#define PFKEY_UNUNIT64(a) ((a) << 3)
+#define PFKEY_UNIT64(a) ((a) >> 3)
+
+#endif /* __PFKEY_V2_H */
+
+#endif /* _NET_PFKEYV2_HH_ */
diff --git a/rtems/freebsd/net/ppp_defs.h b/rtems/freebsd/net/ppp_defs.h
new file mode 100644
index 00000000..e0690e94
--- /dev/null
+++ b/rtems/freebsd/net/ppp_defs.h
@@ -0,0 +1,158 @@
+/*
+ * ppp_defs.h - PPP definitions.
+ */
+/*-
+ * Copyright (c) 1994 The Australian National University.
+ * All rights reserved.
+ *
+ * Permission to use, copy, modify, and distribute this software and its
+ * documentation is hereby granted, provided that the above copyright
+ * notice appears in all copies. This software is provided without any
+ * warranty, express or implied. The Australian National University
+ * makes no representations about the suitability of this software for
+ * any purpose.
+ *
+ * IN NO EVENT SHALL THE AUSTRALIAN NATIONAL UNIVERSITY BE LIABLE TO ANY
+ * PARTY FOR DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES
+ * ARISING OUT OF THE USE OF THIS SOFTWARE AND ITS DOCUMENTATION, EVEN IF
+ * THE AUSTRALIAN NATIONAL UNIVERSITY HAVE BEEN ADVISED OF THE POSSIBILITY
+ * OF SUCH DAMAGE.
+ *
+ * THE AUSTRALIAN NATIONAL UNIVERSITY SPECIFICALLY DISCLAIMS ANY WARRANTIES,
+ * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY
+ * AND FITNESS FOR A PARTICULAR PURPOSE. THE SOFTWARE PROVIDED HEREUNDER IS
+ * ON AN "AS IS" BASIS, AND THE AUSTRALIAN NATIONAL UNIVERSITY HAS NO
+ * OBLIGATION TO PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS,
+ * OR MODIFICATIONS.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _PPP_DEFS_HH_
+#define _PPP_DEFS_HH_
+
+/*
+ * The basic PPP frame.
+ */
+#define PPP_HDRLEN 4 /* octets for standard ppp header */
+#define PPP_FCSLEN 2 /* octets for FCS */
+#define PPP_MRU 1500 /* default MRU = max length of info field */
+
+#define PPP_ADDRESS(p) (((u_char *)(p))[0])
+#define PPP_CONTROL(p) (((u_char *)(p))[1])
+#define PPP_PROTOCOL(p) ((((u_char *)(p))[2] << 8) + ((u_char *)(p))[3])
+
+/*
+ * Significant octet values.
+ */
+#define PPP_ALLSTATIONS 0xff /* All-Stations broadcast address */
+#define PPP_UI 0x03 /* Unnumbered Information */
+#define PPP_FLAG 0x7e /* Flag Sequence */
+#define PPP_ESCAPE 0x7d /* Asynchronous Control Escape */
+#define PPP_TRANS 0x20 /* Asynchronous transparency modifier */
+
+/*
+ * Protocol field values.
+ */
+#define PPP_IP 0x21 /* Internet Protocol */
+#define PPP_XNS 0x25 /* Xerox NS */
+#define PPP_AT 0x29 /* AppleTalk Protocol */
+#define PPP_IPX 0x2b /* IPX Datagram (RFC1552) */
+#define PPP_VJC_COMP 0x2d /* VJ compressed TCP */
+#define PPP_VJC_UNCOMP 0x2f /* VJ uncompressed TCP */
+#define PPP_COMP 0xfd /* compressed packet */
+#define PPP_IPCP 0x8021 /* IP Control Protocol */
+#define PPP_ATCP 0x8029 /* AppleTalk Control Protocol */
+#define PPP_IPXCP 0x802b /* IPX Control Protocol (RFC1552) */
+#define PPP_CCP 0x80fd /* Compression Control Protocol */
+#define PPP_LCP 0xc021 /* Link Control Protocol */
+#define PPP_PAP 0xc023 /* Password Authentication Protocol */
+#define PPP_LQR 0xc025 /* Link Quality Report protocol */
+#define PPP_CHAP 0xc223 /* Cryptographic Handshake Auth. Protocol */
+#define PPP_CBCP 0xc029 /* Callback Control Protocol */
+#define PPP_IPV6 0x57 /* Internet Protocol version 6*/
+#define PPP_IPV6CP 0x8057 /* IPv6 Control Protocol */
+
+/*
+ * Values for FCS calculations.
+ */
+#define PPP_INITFCS 0xffff /* Initial FCS value */
+#define PPP_GOODFCS 0xf0b8 /* Good final FCS value */
+#define PPP_FCS(fcs, c) (((fcs) >> 8) ^ fcstab[((fcs) ^ (c)) & 0xff])
+
+/*
+ * Extended asyncmap - allows any character to be escaped.
+ */
+typedef u_int32_t ext_accm[8];
+
+/*
+ * What to do with network protocol (NP) packets.
+ */
+enum NPmode {
+ NPMODE_PASS, /* pass the packet through */
+ NPMODE_DROP, /* silently drop the packet */
+ NPMODE_ERROR, /* return an error */
+ NPMODE_QUEUE /* save it up for later. */
+};
+
+/*
+ * Statistics.
+ */
+struct pppstat {
+ unsigned int ppp_ibytes; /* bytes received */
+ unsigned int ppp_ipackets; /* packets received */
+ unsigned int ppp_ierrors; /* receive errors */
+ unsigned int ppp_obytes; /* bytes sent */
+ unsigned int ppp_opackets; /* packets sent */
+ unsigned int ppp_oerrors; /* transmit errors */
+};
+
+struct vjstat {
+ unsigned int vjs_packets; /* outbound packets */
+ unsigned int vjs_compressed; /* outbound compressed packets */
+ unsigned int vjs_searches; /* searches for connection state */
+ unsigned int vjs_misses; /* times couldn't find conn. state */
+ unsigned int vjs_uncompressedin; /* inbound uncompressed packets */
+ unsigned int vjs_compressedin; /* inbound compressed packets */
+ unsigned int vjs_errorin; /* inbound unknown type packets */
+ unsigned int vjs_tossed; /* inbound packets tossed because of error */
+};
+
+struct ppp_stats {
+ struct pppstat p; /* basic PPP statistics */
+ struct vjstat vj; /* VJ header compression statistics */
+};
+
+struct compstat {
+ unsigned int unc_bytes; /* total uncompressed bytes */
+ unsigned int unc_packets; /* total uncompressed packets */
+ unsigned int comp_bytes; /* compressed bytes */
+ unsigned int comp_packets; /* compressed packets */
+ unsigned int inc_bytes; /* incompressible bytes */
+ unsigned int inc_packets; /* incompressible packets */
+ unsigned int ratio; /* recent compression ratio << 8 */
+};
+
+struct ppp_comp_stats {
+ struct compstat c; /* packet compression statistics */
+ struct compstat d; /* packet decompression statistics */
+};
+
+/*
+ * The following structure records the time in seconds since
+ * the last NP packet was sent or received.
+ */
+struct ppp_idle {
+ time_t xmit_idle; /* time since last NP packet sent */
+ time_t recv_idle; /* time since last NP packet received */
+};
+
+#ifndef __P
+#ifdef __STDC__
+#define __P(x) x
+#else
+#define __P(x) ()
+#endif
+#endif
+
+#endif /* _PPP_DEFS_HH_ */
diff --git a/rtems/freebsd/net/radix.c b/rtems/freebsd/net/radix.c
new file mode 100644
index 00000000..022eecab
--- /dev/null
+++ b/rtems/freebsd/net/radix.c
@@ -0,0 +1,1205 @@
+#include <rtems/freebsd/machine/rtems-bsd-config.h>
+
+/*-
+ * Copyright (c) 1988, 1989, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * @(#)radix.c 8.5 (Berkeley) 5/19/95
+ * $FreeBSD$
+ */
+
+/*
+ * Routines to build and maintain radix trees for routing lookups.
+ */
+#include <rtems/freebsd/sys/param.h>
+#ifdef _KERNEL
+#include <rtems/freebsd/sys/lock.h>
+#include <rtems/freebsd/sys/mutex.h>
+#include <rtems/freebsd/sys/rwlock.h>
+#include <rtems/freebsd/sys/systm.h>
+#include <rtems/freebsd/sys/malloc.h>
+#include <rtems/freebsd/sys/syslog.h>
+#include <rtems/freebsd/net/radix.h>
+#include <rtems/freebsd/local/opt_mpath.h>
+#ifdef RADIX_MPATH
+#include <rtems/freebsd/net/radix_mpath.h>
+#endif
+#else /* !_KERNEL */
+#include <rtems/freebsd/stdio.h>
+#include <rtems/freebsd/strings.h>
+#include <rtems/freebsd/stdlib.h>
+#define log(x, arg...) fprintf(stderr, ## arg)
+#define panic(x) fprintf(stderr, "PANIC: %s", x), exit(1)
+#define min(a, b) ((a) < (b) ? (a) : (b) )
+#include <rtems/freebsd/net/radix.h>
+#endif /* !_KERNEL */
+
+static int rn_walktree_from(struct radix_node_head *h, void *a, void *m,
+ walktree_f_t *f, void *w);
+static int rn_walktree(struct radix_node_head *, walktree_f_t *, void *);
+static struct radix_node
+ *rn_insert(void *, struct radix_node_head *, int *,
+ struct radix_node [2]),
+ *rn_newpair(void *, int, struct radix_node[2]),
+ *rn_search(void *, struct radix_node *),
+ *rn_search_m(void *, struct radix_node *, void *);
+
+static int max_keylen;
+static struct radix_mask *rn_mkfreelist;
+static struct radix_node_head *mask_rnhead;
+/*
+ * Work area -- the following point to 3 buffers of size max_keylen,
+ * allocated in this order in a block of memory malloc'ed by rn_init.
+ * rn_zeros, rn_ones are set in rn_init and used in readonly afterwards.
+ * addmask_key is used in rn_addmask in rw mode and not thread-safe.
+ */
+static char *rn_zeros, *rn_ones, *addmask_key;
+
+#define MKGet(m) { \
+ if (rn_mkfreelist) { \
+ m = rn_mkfreelist; \
+ rn_mkfreelist = (m)->rm_mklist; \
+ } else \
+ R_Malloc(m, struct radix_mask *, sizeof (struct radix_mask)); }
+
+#define MKFree(m) { (m)->rm_mklist = rn_mkfreelist; rn_mkfreelist = (m);}
+
+#define rn_masktop (mask_rnhead->rnh_treetop)
+
+static int rn_lexobetter(void *m_arg, void *n_arg);
+static struct radix_mask *
+ rn_new_radix_mask(struct radix_node *tt,
+ struct radix_mask *next);
+static int rn_satisfies_leaf(char *trial, struct radix_node *leaf,
+ int skip);
+
+/*
+ * The data structure for the keys is a radix tree with one way
+ * branching removed. The index rn_bit at an internal node n represents a bit
+ * position to be tested. The tree is arranged so that all descendants
+ * of a node n have keys whose bits all agree up to position rn_bit - 1.
+ * (We say the index of n is rn_bit.)
+ *
+ * There is at least one descendant which has a one bit at position rn_bit,
+ * and at least one with a zero there.
+ *
+ * A route is determined by a pair of key and mask. We require that the
+ * bit-wise logical and of the key and mask to be the key.
+ * We define the index of a route to associated with the mask to be
+ * the first bit number in the mask where 0 occurs (with bit number 0
+ * representing the highest order bit).
+ *
+ * We say a mask is normal if every bit is 0, past the index of the mask.
+ * If a node n has a descendant (k, m) with index(m) == index(n) == rn_bit,
+ * and m is a normal mask, then the route applies to every descendant of n.
+ * If the index(m) < rn_bit, this implies the trailing last few bits of k
+ * before bit b are all 0, (and hence consequently true of every descendant
+ * of n), so the route applies to all descendants of the node as well.
+ *
+ * Similar logic shows that a non-normal mask m such that
+ * index(m) <= index(n) could potentially apply to many children of n.
+ * Thus, for each non-host route, we attach its mask to a list at an internal
+ * node as high in the tree as we can go.
+ *
+ * The present version of the code makes use of normal routes in short-
+ * circuiting an explict mask and compare operation when testing whether
+ * a key satisfies a normal route, and also in remembering the unique leaf
+ * that governs a subtree.
+ */
+
+/*
+ * Most of the functions in this code assume that the key/mask arguments
+ * are sockaddr-like structures, where the first byte is an u_char
+ * indicating the size of the entire structure.
+ *
+ * To make the assumption more explicit, we use the LEN() macro to access
+ * this field. It is safe to pass an expression with side effects
+ * to LEN() as the argument is evaluated only once.
+ * We cast the result to int as this is the dominant usage.
+ */
+#define LEN(x) ( (int) (*(const u_char *)(x)) )
+
+/*
+ * XXX THIS NEEDS TO BE FIXED
+ * In the code, pointers to keys and masks are passed as either
+ * 'void *' (because callers use to pass pointers of various kinds), or
+ * 'caddr_t' (which is fine for pointer arithmetics, but not very
+ * clean when you dereference it to access data). Furthermore, caddr_t
+ * is really 'char *', while the natural type to operate on keys and
+ * masks would be 'u_char'. This mismatch require a lot of casts and
+ * intermediate variables to adapt types that clutter the code.
+ */
+
+/*
+ * Search a node in the tree matching the key.
+ */
+static struct radix_node *
+rn_search(v_arg, head)
+ void *v_arg;
+ struct radix_node *head;
+{
+ register struct radix_node *x;
+ register caddr_t v;
+
+ for (x = head, v = v_arg; x->rn_bit >= 0;) {
+ if (x->rn_bmask & v[x->rn_offset])
+ x = x->rn_right;
+ else
+ x = x->rn_left;
+ }
+ return (x);
+}
+
+/*
+ * Same as above, but with an additional mask.
+ * XXX note this function is used only once.
+ */
+static struct radix_node *
+rn_search_m(v_arg, head, m_arg)
+ struct radix_node *head;
+ void *v_arg, *m_arg;
+{
+ register struct radix_node *x;
+ register caddr_t v = v_arg, m = m_arg;
+
+ for (x = head; x->rn_bit >= 0;) {
+ if ((x->rn_bmask & m[x->rn_offset]) &&
+ (x->rn_bmask & v[x->rn_offset]))
+ x = x->rn_right;
+ else
+ x = x->rn_left;
+ }
+ return x;
+}
+
+int
+rn_refines(m_arg, n_arg)
+ void *m_arg, *n_arg;
+{
+ register caddr_t m = m_arg, n = n_arg;
+ register caddr_t lim, lim2 = lim = n + LEN(n);
+ int longer = LEN(n++) - LEN(m++);
+ int masks_are_equal = 1;
+
+ if (longer > 0)
+ lim -= longer;
+ while (n < lim) {
+ if (*n & ~(*m))
+ return 0;
+ if (*n++ != *m++)
+ masks_are_equal = 0;
+ }
+ while (n < lim2)
+ if (*n++)
+ return 0;
+ if (masks_are_equal && (longer < 0))
+ for (lim2 = m - longer; m < lim2; )
+ if (*m++)
+ return 1;
+ return (!masks_are_equal);
+}
+
+struct radix_node *
+rn_lookup(v_arg, m_arg, head)
+ void *v_arg, *m_arg;
+ struct radix_node_head *head;
+{
+ register struct radix_node *x;
+ caddr_t netmask = 0;
+
+ if (m_arg) {
+ x = rn_addmask(m_arg, 1, head->rnh_treetop->rn_offset);
+ if (x == 0)
+ return (0);
+ netmask = x->rn_key;
+ }
+ x = rn_match(v_arg, head);
+ if (x && netmask) {
+ while (x && x->rn_mask != netmask)
+ x = x->rn_dupedkey;
+ }
+ return x;
+}
+
+static int
+rn_satisfies_leaf(trial, leaf, skip)
+ char *trial;
+ register struct radix_node *leaf;
+ int skip;
+{
+ register char *cp = trial, *cp2 = leaf->rn_key, *cp3 = leaf->rn_mask;
+ char *cplim;
+ int length = min(LEN(cp), LEN(cp2));
+
+ if (cp3 == NULL)
+ cp3 = rn_ones;
+ else
+ length = min(length, LEN(cp3));
+ cplim = cp + length; cp3 += skip; cp2 += skip;
+ for (cp += skip; cp < cplim; cp++, cp2++, cp3++)
+ if ((*cp ^ *cp2) & *cp3)
+ return 0;
+ return 1;
+}
+
+struct radix_node *
+rn_match(v_arg, head)
+ void *v_arg;
+ struct radix_node_head *head;
+{
+ caddr_t v = v_arg;
+ register struct radix_node *t = head->rnh_treetop, *x;
+ register caddr_t cp = v, cp2;
+ caddr_t cplim;
+ struct radix_node *saved_t, *top = t;
+ int off = t->rn_offset, vlen = LEN(cp), matched_off;
+ register int test, b, rn_bit;
+
+ /*
+ * Open code rn_search(v, top) to avoid overhead of extra
+ * subroutine call.
+ */
+ for (; t->rn_bit >= 0; ) {
+ if (t->rn_bmask & cp[t->rn_offset])
+ t = t->rn_right;
+ else
+ t = t->rn_left;
+ }
+ /*
+ * See if we match exactly as a host destination
+ * or at least learn how many bits match, for normal mask finesse.
+ *
+ * It doesn't hurt us to limit how many bytes to check
+ * to the length of the mask, since if it matches we had a genuine
+ * match and the leaf we have is the most specific one anyway;
+ * if it didn't match with a shorter length it would fail
+ * with a long one. This wins big for class B&C netmasks which
+ * are probably the most common case...
+ */
+ if (t->rn_mask)
+ vlen = *(u_char *)t->rn_mask;
+ cp += off; cp2 = t->rn_key + off; cplim = v + vlen;
+ for (; cp < cplim; cp++, cp2++)
+ if (*cp != *cp2)
+ goto on1;
+ /*
+ * This extra grot is in case we are explicitly asked
+ * to look up the default. Ugh!
+ *
+ * Never return the root node itself, it seems to cause a
+ * lot of confusion.
+ */
+ if (t->rn_flags & RNF_ROOT)
+ t = t->rn_dupedkey;
+ return t;
+on1:
+ test = (*cp ^ *cp2) & 0xff; /* find first bit that differs */
+ for (b = 7; (test >>= 1) > 0;)
+ b--;
+ matched_off = cp - v;
+ b += matched_off << 3;
+ rn_bit = -1 - b;
+ /*
+ * If there is a host route in a duped-key chain, it will be first.
+ */
+ if ((saved_t = t)->rn_mask == 0)
+ t = t->rn_dupedkey;
+ for (; t; t = t->rn_dupedkey)
+ /*
+ * Even if we don't match exactly as a host,
+ * we may match if the leaf we wound up at is
+ * a route to a net.
+ */
+ if (t->rn_flags & RNF_NORMAL) {
+ if (rn_bit <= t->rn_bit)
+ return t;
+ } else if (rn_satisfies_leaf(v, t, matched_off))
+ return t;
+ t = saved_t;
+ /* start searching up the tree */
+ do {
+ register struct radix_mask *m;
+ t = t->rn_parent;
+ m = t->rn_mklist;
+ /*
+ * If non-contiguous masks ever become important
+ * we can restore the masking and open coding of
+ * the search and satisfaction test and put the
+ * calculation of "off" back before the "do".
+ */
+ while (m) {
+ if (m->rm_flags & RNF_NORMAL) {
+ if (rn_bit <= m->rm_bit)
+ return (m->rm_leaf);
+ } else {
+ off = min(t->rn_offset, matched_off);
+ x = rn_search_m(v, t, m->rm_mask);
+ while (x && x->rn_mask != m->rm_mask)
+ x = x->rn_dupedkey;
+ if (x && rn_satisfies_leaf(v, x, off))
+ return x;
+ }
+ m = m->rm_mklist;
+ }
+ } while (t != top);
+ return 0;
+}
+
+#ifdef RN_DEBUG
+int rn_nodenum;
+struct radix_node *rn_clist;
+int rn_saveinfo;
+int rn_debug = 1;
+#endif
+
+/*
+ * Whenever we add a new leaf to the tree, we also add a parent node,
+ * so we allocate them as an array of two elements: the first one must be
+ * the leaf (see RNTORT() in route.c), the second one is the parent.
+ * This routine initializes the relevant fields of the nodes, so that
+ * the leaf is the left child of the parent node, and both nodes have
+ * (almost) all all fields filled as appropriate.
+ * (XXX some fields are left unset, see the '#if 0' section).
+ * The function returns a pointer to the parent node.
+ */
+
+static struct radix_node *
+rn_newpair(v, b, nodes)
+ void *v;
+ int b;
+ struct radix_node nodes[2];
+{
+ register struct radix_node *tt = nodes, *t = tt + 1;
+ t->rn_bit = b;
+ t->rn_bmask = 0x80 >> (b & 7);
+ t->rn_left = tt;
+ t->rn_offset = b >> 3;
+
+#if 0 /* XXX perhaps we should fill these fields as well. */
+ t->rn_parent = t->rn_right = NULL;
+
+ tt->rn_mask = NULL;
+ tt->rn_dupedkey = NULL;
+ tt->rn_bmask = 0;
+#endif
+ tt->rn_bit = -1;
+ tt->rn_key = (caddr_t)v;
+ tt->rn_parent = t;
+ tt->rn_flags = t->rn_flags = RNF_ACTIVE;
+ tt->rn_mklist = t->rn_mklist = 0;
+#ifdef RN_DEBUG
+ tt->rn_info = rn_nodenum++; t->rn_info = rn_nodenum++;
+ tt->rn_twin = t;
+ tt->rn_ybro = rn_clist;
+ rn_clist = tt;
+#endif
+ return t;
+}
+
+static struct radix_node *
+rn_insert(v_arg, head, dupentry, nodes)
+ void *v_arg;
+ struct radix_node_head *head;
+ int *dupentry;
+ struct radix_node nodes[2];
+{
+ caddr_t v = v_arg;
+ struct radix_node *top = head->rnh_treetop;
+ int head_off = top->rn_offset, vlen = LEN(v);
+ register struct radix_node *t = rn_search(v_arg, top);
+ register caddr_t cp = v + head_off;
+ register int b;
+ struct radix_node *tt;
+ /*
+ * Find first bit at which v and t->rn_key differ
+ */
+ {
+ register caddr_t cp2 = t->rn_key + head_off;
+ register int cmp_res;
+ caddr_t cplim = v + vlen;
+
+ while (cp < cplim)
+ if (*cp2++ != *cp++)
+ goto on1;
+ *dupentry = 1;
+ return t;
+on1:
+ *dupentry = 0;
+ cmp_res = (cp[-1] ^ cp2[-1]) & 0xff;
+ for (b = (cp - v) << 3; cmp_res; b--)
+ cmp_res >>= 1;
+ }
+ {
+ register struct radix_node *p, *x = top;
+ cp = v;
+ do {
+ p = x;
+ if (cp[x->rn_offset] & x->rn_bmask)
+ x = x->rn_right;
+ else
+ x = x->rn_left;
+ } while (b > (unsigned) x->rn_bit);
+ /* x->rn_bit < b && x->rn_bit >= 0 */
+#ifdef RN_DEBUG
+ if (rn_debug)
+ log(LOG_DEBUG, "rn_insert: Going In:\n"), traverse(p);
+#endif
+ t = rn_newpair(v_arg, b, nodes);
+ tt = t->rn_left;
+ if ((cp[p->rn_offset] & p->rn_bmask) == 0)
+ p->rn_left = t;
+ else
+ p->rn_right = t;
+ x->rn_parent = t;
+ t->rn_parent = p; /* frees x, p as temp vars below */
+ if ((cp[t->rn_offset] & t->rn_bmask) == 0) {
+ t->rn_right = x;
+ } else {
+ t->rn_right = tt;
+ t->rn_left = x;
+ }
+#ifdef RN_DEBUG
+ if (rn_debug)
+ log(LOG_DEBUG, "rn_insert: Coming Out:\n"), traverse(p);
+#endif
+ }
+ return (tt);
+}
+
+struct radix_node *
+rn_addmask(n_arg, search, skip)
+ int search, skip;
+ void *n_arg;
+{
+ caddr_t netmask = (caddr_t)n_arg;
+ register struct radix_node *x;
+ register caddr_t cp, cplim;
+ register int b = 0, mlen, j;
+ int maskduplicated, m0, isnormal;
+ struct radix_node *saved_x;
+ static int last_zeroed = 0;
+
+ if ((mlen = LEN(netmask)) > max_keylen)
+ mlen = max_keylen;
+ if (skip == 0)
+ skip = 1;
+ if (mlen <= skip)
+ return (mask_rnhead->rnh_nodes);
+ if (skip > 1)
+ bcopy(rn_ones + 1, addmask_key + 1, skip - 1);
+ if ((m0 = mlen) > skip)
+ bcopy(netmask + skip, addmask_key + skip, mlen - skip);
+ /*
+ * Trim trailing zeroes.
+ */
+ for (cp = addmask_key + mlen; (cp > addmask_key) && cp[-1] == 0;)
+ cp--;
+ mlen = cp - addmask_key;
+ if (mlen <= skip) {
+ if (m0 >= last_zeroed)
+ last_zeroed = mlen;
+ return (mask_rnhead->rnh_nodes);
+ }
+ if (m0 < last_zeroed)
+ bzero(addmask_key + m0, last_zeroed - m0);
+ *addmask_key = last_zeroed = mlen;
+ x = rn_search(addmask_key, rn_masktop);
+ if (bcmp(addmask_key, x->rn_key, mlen) != 0)
+ x = 0;
+ if (x || search)
+ return (x);
+ R_Zalloc(x, struct radix_node *, max_keylen + 2 * sizeof (*x));
+ if ((saved_x = x) == 0)
+ return (0);
+ netmask = cp = (caddr_t)(x + 2);
+ bcopy(addmask_key, cp, mlen);
+ x = rn_insert(cp, mask_rnhead, &maskduplicated, x);
+ if (maskduplicated) {
+ log(LOG_ERR, "rn_addmask: mask impossibly already in tree");
+ Free(saved_x);
+ return (x);
+ }
+ /*
+ * Calculate index of mask, and check for normalcy.
+ * First find the first byte with a 0 bit, then if there are
+ * more bits left (remember we already trimmed the trailing 0's),
+ * the pattern must be one of those in normal_chars[], or we have
+ * a non-contiguous mask.
+ */
+ cplim = netmask + mlen;
+ isnormal = 1;
+ for (cp = netmask + skip; (cp < cplim) && *(u_char *)cp == 0xff;)
+ cp++;
+ if (cp != cplim) {
+ static char normal_chars[] = {
+ 0, 0x80, 0xc0, 0xe0, 0xf0, 0xf8, 0xfc, 0xfe, 0xff};
+
+ for (j = 0x80; (j & *cp) != 0; j >>= 1)
+ b++;
+ if (*cp != normal_chars[b] || cp != (cplim - 1))
+ isnormal = 0;
+ }
+ b += (cp - netmask) << 3;
+ x->rn_bit = -1 - b;
+ if (isnormal)
+ x->rn_flags |= RNF_NORMAL;
+ return (x);
+}
+
+static int /* XXX: arbitrary ordering for non-contiguous masks */
+rn_lexobetter(m_arg, n_arg)
+ void *m_arg, *n_arg;
+{
+ register u_char *mp = m_arg, *np = n_arg, *lim;
+
+ if (LEN(mp) > LEN(np))
+ return 1; /* not really, but need to check longer one first */
+ if (LEN(mp) == LEN(np))
+ for (lim = mp + LEN(mp); mp < lim;)
+ if (*mp++ > *np++)
+ return 1;
+ return 0;
+}
+
+static struct radix_mask *
+rn_new_radix_mask(tt, next)
+ register struct radix_node *tt;
+ register struct radix_mask *next;
+{
+ register struct radix_mask *m;
+
+ MKGet(m);
+ if (m == 0) {
+ log(LOG_ERR, "Mask for route not entered\n");
+ return (0);
+ }
+ bzero(m, sizeof *m);
+ m->rm_bit = tt->rn_bit;
+ m->rm_flags = tt->rn_flags;
+ if (tt->rn_flags & RNF_NORMAL)
+ m->rm_leaf = tt;
+ else
+ m->rm_mask = tt->rn_mask;
+ m->rm_mklist = next;
+ tt->rn_mklist = m;
+ return m;
+}
+
+struct radix_node *
+rn_addroute(v_arg, n_arg, head, treenodes)
+ void *v_arg, *n_arg;
+ struct radix_node_head *head;
+ struct radix_node treenodes[2];
+{
+ caddr_t v = (caddr_t)v_arg, netmask = (caddr_t)n_arg;
+ register struct radix_node *t, *x = 0, *tt;
+ struct radix_node *saved_tt, *top = head->rnh_treetop;
+ short b = 0, b_leaf = 0;
+ int keyduplicated;
+ caddr_t mmask;
+ struct radix_mask *m, **mp;
+
+ /*
+ * In dealing with non-contiguous masks, there may be
+ * many different routes which have the same mask.
+ * We will find it useful to have a unique pointer to
+ * the mask to speed avoiding duplicate references at
+ * nodes and possibly save time in calculating indices.
+ */
+ if (netmask) {
+ if ((x = rn_addmask(netmask, 0, top->rn_offset)) == 0)
+ return (0);
+ b_leaf = x->rn_bit;
+ b = -1 - x->rn_bit;
+ netmask = x->rn_key;
+ }
+ /*
+ * Deal with duplicated keys: attach node to previous instance
+ */
+ saved_tt = tt = rn_insert(v, head, &keyduplicated, treenodes);
+ if (keyduplicated) {
+ for (t = tt; tt; t = tt, tt = tt->rn_dupedkey) {
+#ifdef RADIX_MPATH
+ /* permit multipath, if enabled for the family */
+ if (rn_mpath_capable(head) && netmask == tt->rn_mask) {
+ /*
+ * go down to the end of multipaths, so that
+ * new entry goes into the end of rn_dupedkey
+ * chain.
+ */
+ do {
+ t = tt;
+ tt = tt->rn_dupedkey;
+ } while (tt && t->rn_mask == tt->rn_mask);
+ break;
+ }
+#endif
+ if (tt->rn_mask == netmask)
+ return (0);
+ if (netmask == 0 ||
+ (tt->rn_mask &&
+ ((b_leaf < tt->rn_bit) /* index(netmask) > node */
+ || rn_refines(netmask, tt->rn_mask)
+ || rn_lexobetter(netmask, tt->rn_mask))))
+ break;
+ }
+ /*
+ * If the mask is not duplicated, we wouldn't
+ * find it among possible duplicate key entries
+ * anyway, so the above test doesn't hurt.
+ *
+ * We sort the masks for a duplicated key the same way as
+ * in a masklist -- most specific to least specific.
+ * This may require the unfortunate nuisance of relocating
+ * the head of the list.
+ *
+ * We also reverse, or doubly link the list through the
+ * parent pointer.
+ */
+ if (tt == saved_tt) {
+ struct radix_node *xx = x;
+ /* link in at head of list */
+ (tt = treenodes)->rn_dupedkey = t;
+ tt->rn_flags = t->rn_flags;
+ tt->rn_parent = x = t->rn_parent;
+ t->rn_parent = tt; /* parent */
+ if (x->rn_left == t)
+ x->rn_left = tt;
+ else
+ x->rn_right = tt;
+ saved_tt = tt; x = xx;
+ } else {
+ (tt = treenodes)->rn_dupedkey = t->rn_dupedkey;
+ t->rn_dupedkey = tt;
+ tt->rn_parent = t; /* parent */
+ if (tt->rn_dupedkey) /* parent */
+ tt->rn_dupedkey->rn_parent = tt; /* parent */
+ }
+#ifdef RN_DEBUG
+ t=tt+1; tt->rn_info = rn_nodenum++; t->rn_info = rn_nodenum++;
+ tt->rn_twin = t; tt->rn_ybro = rn_clist; rn_clist = tt;
+#endif
+ tt->rn_key = (caddr_t) v;
+ tt->rn_bit = -1;
+ tt->rn_flags = RNF_ACTIVE;
+ }
+ /*
+ * Put mask in tree.
+ */
+ if (netmask) {
+ tt->rn_mask = netmask;
+ tt->rn_bit = x->rn_bit;
+ tt->rn_flags |= x->rn_flags & RNF_NORMAL;
+ }
+ t = saved_tt->rn_parent;
+ if (keyduplicated)
+ goto on2;
+ b_leaf = -1 - t->rn_bit;
+ if (t->rn_right == saved_tt)
+ x = t->rn_left;
+ else
+ x = t->rn_right;
+ /* Promote general routes from below */
+ if (x->rn_bit < 0) {
+ for (mp = &t->rn_mklist; x; x = x->rn_dupedkey)
+ if (x->rn_mask && (x->rn_bit >= b_leaf) && x->rn_mklist == 0) {
+ *mp = m = rn_new_radix_mask(x, 0);
+ if (m)
+ mp = &m->rm_mklist;
+ }
+ } else if (x->rn_mklist) {
+ /*
+ * Skip over masks whose index is > that of new node
+ */
+ for (mp = &x->rn_mklist; (m = *mp); mp = &m->rm_mklist)
+ if (m->rm_bit >= b_leaf)
+ break;
+ t->rn_mklist = m; *mp = 0;
+ }
+on2:
+ /* Add new route to highest possible ancestor's list */
+ if ((netmask == 0) || (b > t->rn_bit ))
+ return tt; /* can't lift at all */
+ b_leaf = tt->rn_bit;
+ do {
+ x = t;
+ t = t->rn_parent;
+ } while (b <= t->rn_bit && x != top);
+ /*
+ * Search through routes associated with node to
+ * insert new route according to index.
+ * Need same criteria as when sorting dupedkeys to avoid
+ * double loop on deletion.
+ */
+ for (mp = &x->rn_mklist; (m = *mp); mp = &m->rm_mklist) {
+ if (m->rm_bit < b_leaf)
+ continue;
+ if (m->rm_bit > b_leaf)
+ break;
+ if (m->rm_flags & RNF_NORMAL) {
+ mmask = m->rm_leaf->rn_mask;
+ if (tt->rn_flags & RNF_NORMAL) {
+#if !defined(RADIX_MPATH)
+ log(LOG_ERR,
+ "Non-unique normal route, mask not entered\n");
+#endif
+ return tt;
+ }
+ } else
+ mmask = m->rm_mask;
+ if (mmask == netmask) {
+ m->rm_refs++;
+ tt->rn_mklist = m;
+ return tt;
+ }
+ if (rn_refines(netmask, mmask)
+ || rn_lexobetter(netmask, mmask))
+ break;
+ }
+ *mp = rn_new_radix_mask(tt, *mp);
+ return tt;
+}
+
+struct radix_node *
+rn_delete(v_arg, netmask_arg, head)
+ void *v_arg, *netmask_arg;
+ struct radix_node_head *head;
+{
+ register struct radix_node *t, *p, *x, *tt;
+ struct radix_mask *m, *saved_m, **mp;
+ struct radix_node *dupedkey, *saved_tt, *top;
+ caddr_t v, netmask;
+ int b, head_off, vlen;
+
+ v = v_arg;
+ netmask = netmask_arg;
+ x = head->rnh_treetop;
+ tt = rn_search(v, x);
+ head_off = x->rn_offset;
+ vlen = LEN(v);
+ saved_tt = tt;
+ top = x;
+ if (tt == 0 ||
+ bcmp(v + head_off, tt->rn_key + head_off, vlen - head_off))
+ return (0);
+ /*
+ * Delete our route from mask lists.
+ */
+ if (netmask) {
+ if ((x = rn_addmask(netmask, 1, head_off)) == 0)
+ return (0);
+ netmask = x->rn_key;
+ while (tt->rn_mask != netmask)
+ if ((tt = tt->rn_dupedkey) == 0)
+ return (0);
+ }
+ if (tt->rn_mask == 0 || (saved_m = m = tt->rn_mklist) == 0)
+ goto on1;
+ if (tt->rn_flags & RNF_NORMAL) {
+ if (m->rm_leaf != tt || m->rm_refs > 0) {
+ log(LOG_ERR, "rn_delete: inconsistent annotation\n");
+ return 0; /* dangling ref could cause disaster */
+ }
+ } else {
+ if (m->rm_mask != tt->rn_mask) {
+ log(LOG_ERR, "rn_delete: inconsistent annotation\n");
+ goto on1;
+ }
+ if (--m->rm_refs >= 0)
+ goto on1;
+ }
+ b = -1 - tt->rn_bit;
+ t = saved_tt->rn_parent;
+ if (b > t->rn_bit)
+ goto on1; /* Wasn't lifted at all */
+ do {
+ x = t;
+ t = t->rn_parent;
+ } while (b <= t->rn_bit && x != top);
+ for (mp = &x->rn_mklist; (m = *mp); mp = &m->rm_mklist)
+ if (m == saved_m) {
+ *mp = m->rm_mklist;
+ MKFree(m);
+ break;
+ }
+ if (m == 0) {
+ log(LOG_ERR, "rn_delete: couldn't find our annotation\n");
+ if (tt->rn_flags & RNF_NORMAL)
+ return (0); /* Dangling ref to us */
+ }
+on1:
+ /*
+ * Eliminate us from tree
+ */
+ if (tt->rn_flags & RNF_ROOT)
+ return (0);
+#ifdef RN_DEBUG
+ /* Get us out of the creation list */
+ for (t = rn_clist; t && t->rn_ybro != tt; t = t->rn_ybro) {}
+ if (t) t->rn_ybro = tt->rn_ybro;
+#endif
+ t = tt->rn_parent;
+ dupedkey = saved_tt->rn_dupedkey;
+ if (dupedkey) {
+ /*
+ * Here, tt is the deletion target and
+ * saved_tt is the head of the dupekey chain.
+ */
+ if (tt == saved_tt) {
+ /* remove from head of chain */
+ x = dupedkey; x->rn_parent = t;
+ if (t->rn_left == tt)
+ t->rn_left = x;
+ else
+ t->rn_right = x;
+ } else {
+ /* find node in front of tt on the chain */
+ for (x = p = saved_tt; p && p->rn_dupedkey != tt;)
+ p = p->rn_dupedkey;
+ if (p) {
+ p->rn_dupedkey = tt->rn_dupedkey;
+ if (tt->rn_dupedkey) /* parent */
+ tt->rn_dupedkey->rn_parent = p;
+ /* parent */
+ } else log(LOG_ERR, "rn_delete: couldn't find us\n");
+ }
+ t = tt + 1;
+ if (t->rn_flags & RNF_ACTIVE) {
+#ifndef RN_DEBUG
+ *++x = *t;
+ p = t->rn_parent;
+#else
+ b = t->rn_info;
+ *++x = *t;
+ t->rn_info = b;
+ p = t->rn_parent;
+#endif
+ if (p->rn_left == t)
+ p->rn_left = x;
+ else
+ p->rn_right = x;
+ x->rn_left->rn_parent = x;
+ x->rn_right->rn_parent = x;
+ }
+ goto out;
+ }
+ if (t->rn_left == tt)
+ x = t->rn_right;
+ else
+ x = t->rn_left;
+ p = t->rn_parent;
+ if (p->rn_right == t)
+ p->rn_right = x;
+ else
+ p->rn_left = x;
+ x->rn_parent = p;
+ /*
+ * Demote routes attached to us.
+ */
+ if (t->rn_mklist) {
+ if (x->rn_bit >= 0) {
+ for (mp = &x->rn_mklist; (m = *mp);)
+ mp = &m->rm_mklist;
+ *mp = t->rn_mklist;
+ } else {
+ /* If there are any key,mask pairs in a sibling
+ duped-key chain, some subset will appear sorted
+ in the same order attached to our mklist */
+ for (m = t->rn_mklist; m && x; x = x->rn_dupedkey)
+ if (m == x->rn_mklist) {
+ struct radix_mask *mm = m->rm_mklist;
+ x->rn_mklist = 0;
+ if (--(m->rm_refs) < 0)
+ MKFree(m);
+ m = mm;
+ }
+ if (m)
+ log(LOG_ERR,
+ "rn_delete: Orphaned Mask %p at %p\n",
+ m, x);
+ }
+ }
+ /*
+ * We may be holding an active internal node in the tree.
+ */
+ x = tt + 1;
+ if (t != x) {
+#ifndef RN_DEBUG
+ *t = *x;
+#else
+ b = t->rn_info;
+ *t = *x;
+ t->rn_info = b;
+#endif
+ t->rn_left->rn_parent = t;
+ t->rn_right->rn_parent = t;
+ p = x->rn_parent;
+ if (p->rn_left == x)
+ p->rn_left = t;
+ else
+ p->rn_right = t;
+ }
+out:
+ tt->rn_flags &= ~RNF_ACTIVE;
+ tt[1].rn_flags &= ~RNF_ACTIVE;
+ return (tt);
+}
+
+/*
+ * This is the same as rn_walktree() except for the parameters and the
+ * exit.
+ */
+static int
+rn_walktree_from(h, a, m, f, w)
+ struct radix_node_head *h;
+ void *a, *m;
+ walktree_f_t *f;
+ void *w;
+{
+ int error;
+ struct radix_node *base, *next;
+ u_char *xa = (u_char *)a;
+ u_char *xm = (u_char *)m;
+ register struct radix_node *rn, *last = 0 /* shut up gcc */;
+ int stopping = 0;
+ int lastb;
+
+ /*
+ * rn_search_m is sort-of-open-coded here. We cannot use the
+ * function because we need to keep track of the last node seen.
+ */
+ /* printf("about to search\n"); */
+ for (rn = h->rnh_treetop; rn->rn_bit >= 0; ) {
+ last = rn;
+ /* printf("rn_bit %d, rn_bmask %x, xm[rn_offset] %x\n",
+ rn->rn_bit, rn->rn_bmask, xm[rn->rn_offset]); */
+ if (!(rn->rn_bmask & xm[rn->rn_offset])) {
+ break;
+ }
+ if (rn->rn_bmask & xa[rn->rn_offset]) {
+ rn = rn->rn_right;
+ } else {
+ rn = rn->rn_left;
+ }
+ }
+ /* printf("done searching\n"); */
+
+ /*
+ * Two cases: either we stepped off the end of our mask,
+ * in which case last == rn, or we reached a leaf, in which
+ * case we want to start from the last node we looked at.
+ * Either way, last is the node we want to start from.
+ */
+ rn = last;
+ lastb = rn->rn_bit;
+
+ /* printf("rn %p, lastb %d\n", rn, lastb);*/
+
+ /*
+ * This gets complicated because we may delete the node
+ * while applying the function f to it, so we need to calculate
+ * the successor node in advance.
+ */
+ while (rn->rn_bit >= 0)
+ rn = rn->rn_left;
+
+ while (!stopping) {
+ /* printf("node %p (%d)\n", rn, rn->rn_bit); */
+ base = rn;
+ /* If at right child go back up, otherwise, go right */
+ while (rn->rn_parent->rn_right == rn
+ && !(rn->rn_flags & RNF_ROOT)) {
+ rn = rn->rn_parent;
+
+ /* if went up beyond last, stop */
+ if (rn->rn_bit <= lastb) {
+ stopping = 1;
+ /* printf("up too far\n"); */
+ /*
+ * XXX we should jump to the 'Process leaves'
+ * part, because the values of 'rn' and 'next'
+ * we compute will not be used. Not a big deal
+ * because this loop will terminate, but it is
+ * inefficient and hard to understand!
+ */
+ }
+ }
+
+ /*
+ * At the top of the tree, no need to traverse the right
+ * half, prevent the traversal of the entire tree in the
+ * case of default route.
+ */
+ if (rn->rn_parent->rn_flags & RNF_ROOT)
+ stopping = 1;
+
+ /* Find the next *leaf* since next node might vanish, too */
+ for (rn = rn->rn_parent->rn_right; rn->rn_bit >= 0;)
+ rn = rn->rn_left;
+ next = rn;
+ /* Process leaves */
+ while ((rn = base) != 0) {
+ base = rn->rn_dupedkey;
+ /* printf("leaf %p\n", rn); */
+ if (!(rn->rn_flags & RNF_ROOT)
+ && (error = (*f)(rn, w)))
+ return (error);
+ }
+ rn = next;
+
+ if (rn->rn_flags & RNF_ROOT) {
+ /* printf("root, stopping"); */
+ stopping = 1;
+ }
+
+ }
+ return 0;
+}
+
+static int
+rn_walktree(h, f, w)
+ struct radix_node_head *h;
+ walktree_f_t *f;
+ void *w;
+{
+ int error;
+ struct radix_node *base, *next;
+ register struct radix_node *rn = h->rnh_treetop;
+ /*
+ * This gets complicated because we may delete the node
+ * while applying the function f to it, so we need to calculate
+ * the successor node in advance.
+ */
+
+ /* First time through node, go left */
+ while (rn->rn_bit >= 0)
+ rn = rn->rn_left;
+ for (;;) {
+ base = rn;
+ /* If at right child go back up, otherwise, go right */
+ while (rn->rn_parent->rn_right == rn
+ && (rn->rn_flags & RNF_ROOT) == 0)
+ rn = rn->rn_parent;
+ /* Find the next *leaf* since next node might vanish, too */
+ for (rn = rn->rn_parent->rn_right; rn->rn_bit >= 0;)
+ rn = rn->rn_left;
+ next = rn;
+ /* Process leaves */
+ while ((rn = base)) {
+ base = rn->rn_dupedkey;
+ if (!(rn->rn_flags & RNF_ROOT)
+ && (error = (*f)(rn, w)))
+ return (error);
+ }
+ rn = next;
+ if (rn->rn_flags & RNF_ROOT)
+ return (0);
+ }
+ /* NOTREACHED */
+}
+
+/*
+ * Allocate and initialize an empty tree. This has 3 nodes, which are
+ * part of the radix_node_head (in the order <left,root,right>) and are
+ * marked RNF_ROOT so they cannot be freed.
+ * The leaves have all-zero and all-one keys, with significant
+ * bits starting at 'off'.
+ * Return 1 on success, 0 on error.
+ */
+int
+rn_inithead(head, off)
+ void **head;
+ int off;
+{
+ register struct radix_node_head *rnh;
+ register struct radix_node *t, *tt, *ttt;
+ if (*head)
+ return (1);
+ R_Zalloc(rnh, struct radix_node_head *, sizeof (*rnh));
+ if (rnh == 0)
+ return (0);
+#ifdef _KERNEL
+ RADIX_NODE_HEAD_LOCK_INIT(rnh);
+#endif
+ *head = rnh;
+ t = rn_newpair(rn_zeros, off, rnh->rnh_nodes);
+ ttt = rnh->rnh_nodes + 2;
+ t->rn_right = ttt;
+ t->rn_parent = t;
+ tt = t->rn_left; /* ... which in turn is rnh->rnh_nodes */
+ tt->rn_flags = t->rn_flags = RNF_ROOT | RNF_ACTIVE;
+ tt->rn_bit = -1 - off;
+ *ttt = *tt;
+ ttt->rn_key = rn_ones;
+ rnh->rnh_addaddr = rn_addroute;
+ rnh->rnh_deladdr = rn_delete;
+ rnh->rnh_matchaddr = rn_match;
+ rnh->rnh_lookup = rn_lookup;
+ rnh->rnh_walktree = rn_walktree;
+ rnh->rnh_walktree_from = rn_walktree_from;
+ rnh->rnh_treetop = t;
+ return (1);
+}
+
+int
+rn_detachhead(void **head)
+{
+ struct radix_node_head *rnh;
+
+ KASSERT((head != NULL && *head != NULL),
+ ("%s: head already freed", __func__));
+ rnh = *head;
+
+ /* Free <left,root,right> nodes. */
+ Free(rnh);
+
+ *head = NULL;
+ return (1);
+}
+
+void
+rn_init(int maxk)
+{
+ char *cp, *cplim;
+
+ max_keylen = maxk;
+ if (max_keylen == 0) {
+ log(LOG_ERR,
+ "rn_init: radix functions require max_keylen be set\n");
+ return;
+ }
+ R_Malloc(rn_zeros, char *, 3 * max_keylen);
+ if (rn_zeros == NULL)
+ panic("rn_init");
+ bzero(rn_zeros, 3 * max_keylen);
+ rn_ones = cp = rn_zeros + max_keylen;
+ addmask_key = cplim = rn_ones + max_keylen;
+ while (cp < cplim)
+ *cp++ = -1;
+ if (rn_inithead((void **)(void *)&mask_rnhead, 0) == 0)
+ panic("rn_init 2");
+}
diff --git a/rtems/freebsd/net/radix.h b/rtems/freebsd/net/radix.h
new file mode 100644
index 00000000..19512469
--- /dev/null
+++ b/rtems/freebsd/net/radix.h
@@ -0,0 +1,176 @@
+/*-
+ * Copyright (c) 1988, 1989, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * @(#)radix.h 8.2 (Berkeley) 10/31/94
+ * $FreeBSD$
+ */
+
+#ifndef _RADIX_HH_
+#define _RADIX_HH_
+
+#ifdef _KERNEL
+#include <rtems/freebsd/sys/_lock.h>
+#include <rtems/freebsd/sys/_mutex.h>
+#include <rtems/freebsd/sys/_rwlock.h>
+#endif
+
+#ifdef MALLOC_DECLARE
+MALLOC_DECLARE(M_RTABLE);
+#endif
+
+/*
+ * Radix search tree node layout.
+ */
+
+struct radix_node {
+ struct radix_mask *rn_mklist; /* list of masks contained in subtree */
+ struct radix_node *rn_parent; /* parent */
+ short rn_bit; /* bit offset; -1-index(netmask) */
+ char rn_bmask; /* node: mask for bit test*/
+ u_char rn_flags; /* enumerated next */
+#define RNF_NORMAL 1 /* leaf contains normal route */
+#define RNF_ROOT 2 /* leaf is root leaf for tree */
+#define RNF_ACTIVE 4 /* This node is alive (for rtfree) */
+ union {
+ struct { /* leaf only data: */
+ caddr_t rn_Key; /* object of search */
+ caddr_t rn_Mask; /* netmask, if present */
+ struct radix_node *rn_Dupedkey;
+ } rn_leaf;
+ struct { /* node only data: */
+ int rn_Off; /* where to start compare */
+ struct radix_node *rn_L;/* progeny */
+ struct radix_node *rn_R;/* progeny */
+ } rn_node;
+ } rn_u;
+#ifdef RN_DEBUG
+ int rn_info;
+ struct radix_node *rn_twin;
+ struct radix_node *rn_ybro;
+#endif
+};
+
+#define rn_dupedkey rn_u.rn_leaf.rn_Dupedkey
+#define rn_key rn_u.rn_leaf.rn_Key
+#define rn_mask rn_u.rn_leaf.rn_Mask
+#define rn_offset rn_u.rn_node.rn_Off
+#define rn_left rn_u.rn_node.rn_L
+#define rn_right rn_u.rn_node.rn_R
+
+/*
+ * Annotations to tree concerning potential routes applying to subtrees.
+ */
+
+struct radix_mask {
+ short rm_bit; /* bit offset; -1-index(netmask) */
+ char rm_unused; /* cf. rn_bmask */
+ u_char rm_flags; /* cf. rn_flags */
+ struct radix_mask *rm_mklist; /* more masks to try */
+ union {
+ caddr_t rmu_mask; /* the mask */
+ struct radix_node *rmu_leaf; /* for normal routes */
+ } rm_rmu;
+ int rm_refs; /* # of references to this struct */
+};
+
+#define rm_mask rm_rmu.rmu_mask
+#define rm_leaf rm_rmu.rmu_leaf /* extra field would make 32 bytes */
+
+typedef int walktree_f_t(struct radix_node *, void *);
+
+struct radix_node_head {
+ struct radix_node *rnh_treetop;
+ int rnh_addrsize; /* permit, but not require fixed keys */
+ int rnh_pktsize; /* permit, but not require fixed keys */
+ struct radix_node *(*rnh_addaddr) /* add based on sockaddr */
+ (void *v, void *mask,
+ struct radix_node_head *head, struct radix_node nodes[]);
+ struct radix_node *(*rnh_addpkt) /* add based on packet hdr */
+ (void *v, void *mask,
+ struct radix_node_head *head, struct radix_node nodes[]);
+ struct radix_node *(*rnh_deladdr) /* remove based on sockaddr */
+ (void *v, void *mask, struct radix_node_head *head);
+ struct radix_node *(*rnh_delpkt) /* remove based on packet hdr */
+ (void *v, void *mask, struct radix_node_head *head);
+ struct radix_node *(*rnh_matchaddr) /* locate based on sockaddr */
+ (void *v, struct radix_node_head *head);
+ struct radix_node *(*rnh_lookup) /* locate based on sockaddr */
+ (void *v, void *mask, struct radix_node_head *head);
+ struct radix_node *(*rnh_matchpkt) /* locate based on packet hdr */
+ (void *v, struct radix_node_head *head);
+ int (*rnh_walktree) /* traverse tree */
+ (struct radix_node_head *head, walktree_f_t *f, void *w);
+ int (*rnh_walktree_from) /* traverse tree below a */
+ (struct radix_node_head *head, void *a, void *m,
+ walktree_f_t *f, void *w);
+ void (*rnh_close) /* do something when the last ref drops */
+ (struct radix_node *rn, struct radix_node_head *head);
+ struct radix_node rnh_nodes[3]; /* empty tree for common case */
+ int rnh_multipath; /* multipath capable ? */
+#ifdef _KERNEL
+ struct rwlock rnh_lock; /* locks entire radix tree */
+#endif
+};
+
+#ifndef _KERNEL
+#define R_Malloc(p, t, n) (p = (t) malloc((unsigned int)(n)))
+#define R_Zalloc(p, t, n) (p = (t) calloc(1,(unsigned int)(n)))
+#define Free(p) free((char *)p);
+#else
+#define R_Malloc(p, t, n) (p = (t) malloc((unsigned long)(n), M_RTABLE, M_NOWAIT))
+#define R_Zalloc(p, t, n) (p = (t) malloc((unsigned long)(n), M_RTABLE, M_NOWAIT | M_ZERO))
+#define Free(p) free((caddr_t)p, M_RTABLE);
+
+#define RADIX_NODE_HEAD_LOCK_INIT(rnh) \
+ rw_init_flags(&(rnh)->rnh_lock, "radix node head", 0)
+#define RADIX_NODE_HEAD_LOCK(rnh) rw_wlock(&(rnh)->rnh_lock)
+#define RADIX_NODE_HEAD_UNLOCK(rnh) rw_wunlock(&(rnh)->rnh_lock)
+#define RADIX_NODE_HEAD_RLOCK(rnh) rw_rlock(&(rnh)->rnh_lock)
+#define RADIX_NODE_HEAD_RUNLOCK(rnh) rw_runlock(&(rnh)->rnh_lock)
+#define RADIX_NODE_HEAD_LOCK_TRY_UPGRADE(rnh) rw_try_upgrade(&(rnh)->rnh_lock)
+
+
+#define RADIX_NODE_HEAD_DESTROY(rnh) rw_destroy(&(rnh)->rnh_lock)
+#define RADIX_NODE_HEAD_LOCK_ASSERT(rnh) rw_assert(&(rnh)->rnh_lock, RA_LOCKED)
+#define RADIX_NODE_HEAD_WLOCK_ASSERT(rnh) rw_assert(&(rnh)->rnh_lock, RA_WLOCKED)
+#endif /* _KERNEL */
+
+void rn_init(int);
+int rn_inithead(void **, int);
+int rn_detachhead(void **);
+int rn_refines(void *, void *);
+struct radix_node
+ *rn_addmask(void *, int, int),
+ *rn_addroute (void *, void *, struct radix_node_head *,
+ struct radix_node [2]),
+ *rn_delete(void *, void *, struct radix_node_head *),
+ *rn_lookup (void *v_arg, void *m_arg,
+ struct radix_node_head *head),
+ *rn_match(void *, struct radix_node_head *);
+
+#endif /* _RADIX_HH_ */
diff --git a/rtems/freebsd/net/radix_mpath.c b/rtems/freebsd/net/radix_mpath.c
new file mode 100644
index 00000000..55f31b1a
--- /dev/null
+++ b/rtems/freebsd/net/radix_mpath.c
@@ -0,0 +1,357 @@
+#include <rtems/freebsd/machine/rtems-bsd-config.h>
+
+/* $KAME: radix_mpath.c,v 1.17 2004/11/08 10:29:39 itojun Exp $ */
+
+/*
+ * Copyright (C) 2001 WIDE Project.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the project nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ * THE AUTHORS DO NOT GUARANTEE THAT THIS SOFTWARE DOES NOT INFRINGE
+ * ANY OTHERS' INTELLECTUAL PROPERTIES. IN NO EVENT SHALL THE AUTHORS
+ * BE LIABLE FOR ANY INFRINGEMENT OF ANY OTHERS' INTELLECTUAL
+ * PROPERTIES.
+ */
+
+#include <rtems/freebsd/sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <rtems/freebsd/local/opt_inet.h>
+#include <rtems/freebsd/local/opt_inet6.h>
+
+#include <rtems/freebsd/sys/param.h>
+#include <rtems/freebsd/sys/systm.h>
+#include <rtems/freebsd/sys/malloc.h>
+#include <rtems/freebsd/sys/socket.h>
+#include <rtems/freebsd/sys/domain.h>
+#include <rtems/freebsd/sys/syslog.h>
+#include <rtems/freebsd/net/radix.h>
+#include <rtems/freebsd/net/radix_mpath.h>
+#include <rtems/freebsd/net/route.h>
+#include <rtems/freebsd/net/if.h>
+#include <rtems/freebsd/net/if_var.h>
+
+/*
+ * give some jitter to hash, to avoid synchronization between routers
+ */
+static uint32_t hashjitter;
+
+int
+rn_mpath_capable(struct radix_node_head *rnh)
+{
+
+ return rnh->rnh_multipath;
+}
+
+struct radix_node *
+rn_mpath_next(struct radix_node *rn)
+{
+ struct radix_node *next;
+
+ if (!rn->rn_dupedkey)
+ return NULL;
+ next = rn->rn_dupedkey;
+ if (rn->rn_mask == next->rn_mask)
+ return next;
+ else
+ return NULL;
+}
+
+uint32_t
+rn_mpath_count(struct radix_node *rn)
+{
+ uint32_t i = 0;
+ struct rtentry *rt;
+
+ while (rn != NULL) {
+ rt = (struct rtentry *)rn;
+ i += rt->rt_rmx.rmx_weight;
+ rn = rn_mpath_next(rn);
+ }
+ return (i);
+}
+
+struct rtentry *
+rt_mpath_matchgate(struct rtentry *rt, struct sockaddr *gate)
+{
+ struct radix_node *rn;
+
+ if (!rn_mpath_next((struct radix_node *)rt))
+ return rt;
+
+ if (!gate)
+ return NULL;
+
+ /* beyond here, we use rn as the master copy */
+ rn = (struct radix_node *)rt;
+ do {
+ rt = (struct rtentry *)rn;
+ /*
+ * we are removing an address alias that has
+ * the same prefix as another address
+ * we need to compare the interface address because
+ * rt_gateway is a special sockadd_dl structure
+ */
+ if (rt->rt_gateway->sa_family == AF_LINK) {
+ if (!memcmp(rt->rt_ifa->ifa_addr, gate, gate->sa_len))
+ break;
+ } else {
+ if (rt->rt_gateway->sa_len == gate->sa_len &&
+ !memcmp(rt->rt_gateway, gate, gate->sa_len))
+ break;
+ }
+ } while ((rn = rn_mpath_next(rn)) != NULL);
+
+ return (struct rtentry *)rn;
+}
+
+/*
+ * go through the chain and unlink "rt" from the list
+ * the caller will free "rt"
+ */
+int
+rt_mpath_deldup(struct rtentry *headrt, struct rtentry *rt)
+{
+ struct radix_node *t, *tt;
+
+ if (!headrt || !rt)
+ return (0);
+ t = (struct radix_node *)headrt;
+ tt = rn_mpath_next(t);
+ while (tt) {
+ if (tt == (struct radix_node *)rt) {
+ t->rn_dupedkey = tt->rn_dupedkey;
+ tt->rn_dupedkey = NULL;
+ tt->rn_flags &= ~RNF_ACTIVE;
+ tt[1].rn_flags &= ~RNF_ACTIVE;
+ return (1);
+ }
+ t = tt;
+ tt = rn_mpath_next((struct radix_node *)t);
+ }
+ return (0);
+}
+
+/*
+ * check if we have the same key/mask/gateway on the table already.
+ */
+int
+rt_mpath_conflict(struct radix_node_head *rnh, struct rtentry *rt,
+ struct sockaddr *netmask)
+{
+ struct radix_node *rn, *rn1;
+ struct rtentry *rt1;
+ char *p, *q, *eq;
+ int same, l, skip;
+
+ rn = (struct radix_node *)rt;
+ rn1 = rnh->rnh_lookup(rt_key(rt), netmask, rnh);
+ if (!rn1 || rn1->rn_flags & RNF_ROOT)
+ return 0;
+
+ /*
+ * unlike other functions we have in this file, we have to check
+ * all key/mask/gateway as rnh_lookup can match less specific entry.
+ */
+ rt1 = (struct rtentry *)rn1;
+
+ /* compare key. */
+ if (rt_key(rt1)->sa_len != rt_key(rt)->sa_len ||
+ bcmp(rt_key(rt1), rt_key(rt), rt_key(rt1)->sa_len))
+ goto different;
+
+ /* key was the same. compare netmask. hairy... */
+ if (rt_mask(rt1) && netmask) {
+ skip = rnh->rnh_treetop->rn_offset;
+ if (rt_mask(rt1)->sa_len > netmask->sa_len) {
+ /*
+ * as rt_mask(rt1) is made optimal by radix.c,
+ * there must be some 1-bits on rt_mask(rt1)
+ * after netmask->sa_len. therefore, in
+ * this case, the entries are different.
+ */
+ if (rt_mask(rt1)->sa_len > skip)
+ goto different;
+ else {
+ /* no bits to compare, i.e. same*/
+ goto maskmatched;
+ }
+ }
+
+ l = rt_mask(rt1)->sa_len;
+ if (skip > l) {
+ /* no bits to compare, i.e. same */
+ goto maskmatched;
+ }
+ p = (char *)rt_mask(rt1);
+ q = (char *)netmask;
+ if (bcmp(p + skip, q + skip, l - skip))
+ goto different;
+ /*
+ * need to go through all the bit, as netmask is not
+ * optimal and can contain trailing 0s
+ */
+ eq = (char *)netmask + netmask->sa_len;
+ q += l;
+ same = 1;
+ while (eq > q)
+ if (*q++) {
+ same = 0;
+ break;
+ }
+ if (!same)
+ goto different;
+ } else if (!rt_mask(rt1) && !netmask)
+ ; /* no mask to compare, i.e. same */
+ else {
+ /* one has mask and the other does not, different */
+ goto different;
+ }
+
+maskmatched:
+
+ /* key/mask were the same. compare gateway for all multipaths */
+ do {
+ rt1 = (struct rtentry *)rn1;
+
+ /* sanity: no use in comparing the same thing */
+ if (rn1 == rn)
+ continue;
+
+ if (rt1->rt_gateway->sa_family == AF_LINK) {
+ if (rt1->rt_ifa->ifa_addr->sa_len != rt->rt_ifa->ifa_addr->sa_len ||
+ bcmp(rt1->rt_ifa->ifa_addr, rt->rt_ifa->ifa_addr,
+ rt1->rt_ifa->ifa_addr->sa_len))
+ continue;
+ } else {
+ if (rt1->rt_gateway->sa_len != rt->rt_gateway->sa_len ||
+ bcmp(rt1->rt_gateway, rt->rt_gateway,
+ rt1->rt_gateway->sa_len))
+ continue;
+ }
+
+ /* all key/mask/gateway are the same. conflicting entry. */
+ return EEXIST;
+ } while ((rn1 = rn_mpath_next(rn1)) != NULL);
+
+different:
+ return 0;
+}
+
+void
+rtalloc_mpath_fib(struct route *ro, uint32_t hash, u_int fibnum)
+{
+ struct radix_node *rn0, *rn;
+ u_int32_t n;
+ struct rtentry *rt;
+ int64_t weight;
+
+ /*
+ * XXX we don't attempt to lookup cached route again; what should
+ * be done for sendto(3) case?
+ */
+ if (ro->ro_rt && ro->ro_rt->rt_ifp && (ro->ro_rt->rt_flags & RTF_UP)
+ && RT_LINK_IS_UP(ro->ro_rt->rt_ifp))
+ return;
+ ro->ro_rt = rtalloc1_fib(&ro->ro_dst, 1, 0, fibnum);
+
+ /* if the route does not exist or it is not multipath, don't care */
+ if (ro->ro_rt == NULL)
+ return;
+ if (rn_mpath_next((struct radix_node *)ro->ro_rt) == NULL) {
+ RT_UNLOCK(ro->ro_rt);
+ return;
+ }
+
+ /* beyond here, we use rn as the master copy */
+ rn0 = rn = (struct radix_node *)ro->ro_rt;
+ n = rn_mpath_count(rn0);
+
+ /* gw selection by Modulo-N Hash (RFC2991) XXX need improvement? */
+ hash += hashjitter;
+ hash %= n;
+ for (weight = abs((int32_t)hash), rt = ro->ro_rt;
+ weight >= rt->rt_rmx.rmx_weight && rn;
+ weight -= rt->rt_rmx.rmx_weight) {
+
+ /* stay within the multipath routes */
+ if (rn->rn_dupedkey && rn->rn_mask != rn->rn_dupedkey->rn_mask)
+ break;
+ rn = rn->rn_dupedkey;
+ rt = (struct rtentry *)rn;
+ }
+ /* XXX try filling rt_gwroute and avoid unreachable gw */
+
+ /* gw selection has failed - there must be only zero weight routes */
+ if (!rn) {
+ RT_UNLOCK(ro->ro_rt);
+ ro->ro_rt = NULL;
+ return;
+ }
+ if (ro->ro_rt != rt) {
+ RTFREE_LOCKED(ro->ro_rt);
+ ro->ro_rt = (struct rtentry *)rn;
+ RT_LOCK(ro->ro_rt);
+ RT_ADDREF(ro->ro_rt);
+
+ }
+ RT_UNLOCK(ro->ro_rt);
+}
+
+extern int in6_inithead(void **head, int off);
+extern int in_inithead(void **head, int off);
+
+#ifdef INET
+int
+rn4_mpath_inithead(void **head, int off)
+{
+ struct radix_node_head *rnh;
+
+ hashjitter = arc4random();
+ if (in_inithead(head, off) == 1) {
+ rnh = (struct radix_node_head *)*head;
+ rnh->rnh_multipath = 1;
+ return 1;
+ } else
+ return 0;
+}
+#endif
+
+#ifdef INET6
+int
+rn6_mpath_inithead(void **head, int off)
+{
+ struct radix_node_head *rnh;
+
+ hashjitter = arc4random();
+ if (in6_inithead(head, off) == 1) {
+ rnh = (struct radix_node_head *)*head;
+ rnh->rnh_multipath = 1;
+ return 1;
+ } else
+ return 0;
+}
+
+#endif
diff --git a/rtems/freebsd/net/radix_mpath.h b/rtems/freebsd/net/radix_mpath.h
new file mode 100644
index 00000000..b6d8c16a
--- /dev/null
+++ b/rtems/freebsd/net/radix_mpath.h
@@ -0,0 +1,63 @@
+/* $KAME: radix_mpath.h,v 1.10 2004/11/06 15:44:28 itojun Exp $ */
+
+/*
+ * Copyright (C) 2001 WIDE Project.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the project nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ * THE AUTHORS DO NOT GUARANTEE THAT THIS SOFTWARE DOES NOT INFRINGE
+ * ANY OTHERS' INTELLECTUAL PROPERTIES. IN NO EVENT SHALL THE AUTHORS
+ * BE LIABLE FOR ANY INFRINGEMENT OF ANY OTHERS' INTELLECTUAL
+ * PROPERTIES.
+ */
+/* $FreeBSD$ */
+
+#ifndef _NET_RADIX_MPATH_HH_
+#define _NET_RADIX_MPATH_HH_
+
+#ifdef _KERNEL
+/*
+ * Radix tree API with multipath support
+ */
+struct route;
+struct rtentry;
+struct sockaddr;
+int rn_mpath_capable(struct radix_node_head *);
+struct radix_node *rn_mpath_next(struct radix_node *);
+u_int32_t rn_mpath_count(struct radix_node *);
+struct rtentry *rt_mpath_matchgate(struct rtentry *, struct sockaddr *);
+int rt_mpath_conflict(struct radix_node_head *, struct rtentry *,
+ struct sockaddr *);
+void rtalloc_mpath_fib(struct route *, u_int32_t, u_int);
+#define rtalloc_mpath(_route, _hash) rtalloc_mpath_fib((_route), (_hash), 0)
+struct radix_node *rn_mpath_lookup(void *, void *,
+ struct radix_node_head *);
+int rt_mpath_deldup(struct rtentry *, struct rtentry *);
+int rn4_mpath_inithead(void **, int);
+int rn6_mpath_inithead(void **, int);
+
+#endif
+
+#endif /* _NET_RADIX_MPATH_HH_ */
diff --git a/rtems/freebsd/net/raw_cb.c b/rtems/freebsd/net/raw_cb.c
new file mode 100644
index 00000000..097864aa
--- /dev/null
+++ b/rtems/freebsd/net/raw_cb.c
@@ -0,0 +1,119 @@
+#include <rtems/freebsd/machine/rtems-bsd-config.h>
+
+/*-
+ * Copyright (c) 1980, 1986, 1993
+ * The Regents of the University of California.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * @(#)raw_cb.c 8.1 (Berkeley) 6/10/93
+ * $FreeBSD$
+ */
+
+#include <rtems/freebsd/sys/param.h>
+#include <rtems/freebsd/sys/domain.h>
+#include <rtems/freebsd/sys/lock.h>
+#include <rtems/freebsd/sys/kernel.h>
+#include <rtems/freebsd/sys/malloc.h>
+#include <rtems/freebsd/sys/mutex.h>
+#include <rtems/freebsd/sys/protosw.h>
+#include <rtems/freebsd/sys/socket.h>
+#include <rtems/freebsd/sys/socketvar.h>
+#include <rtems/freebsd/sys/sysctl.h>
+#include <rtems/freebsd/sys/systm.h>
+
+#include <rtems/freebsd/net/if.h>
+#include <rtems/freebsd/net/raw_cb.h>
+#include <rtems/freebsd/net/vnet.h>
+
+/*
+ * Routines to manage the raw protocol control blocks.
+ *
+ * TODO:
+ * hash lookups by protocol family/protocol + address family
+ * take care of unique address problems per AF?
+ * redo address binding to allow wildcards
+ */
+
+struct mtx rawcb_mtx;
+VNET_DEFINE(struct rawcb_list_head, rawcb_list);
+
+SYSCTL_NODE(_net, OID_AUTO, raw, CTLFLAG_RW, 0, "Raw socket infrastructure");
+
+static u_long raw_sendspace = RAWSNDQ;
+SYSCTL_ULONG(_net_raw, OID_AUTO, sendspace, CTLFLAG_RW, &raw_sendspace, 0,
+ "Default raw socket send space");
+
+static u_long raw_recvspace = RAWRCVQ;
+SYSCTL_ULONG(_net_raw, OID_AUTO, recvspace, CTLFLAG_RW, &raw_recvspace, 0,
+ "Default raw socket receive space");
+
+/*
+ * Allocate a control block and a nominal amount of buffer space for the
+ * socket.
+ */
+int
+raw_attach(struct socket *so, int proto)
+{
+ struct rawcb *rp = sotorawcb(so);
+ int error;
+
+ /*
+ * It is assumed that raw_attach is called after space has been
+ * allocated for the rawcb; consumer protocols may simply allocate
+ * type struct rawcb, or a wrapper data structure that begins with a
+ * struct rawcb.
+ */
+ KASSERT(rp != NULL, ("raw_attach: rp == NULL"));
+
+ error = soreserve(so, raw_sendspace, raw_recvspace);
+ if (error)
+ return (error);
+ rp->rcb_socket = so;
+ rp->rcb_proto.sp_family = so->so_proto->pr_domain->dom_family;
+ rp->rcb_proto.sp_protocol = proto;
+ mtx_lock(&rawcb_mtx);
+ LIST_INSERT_HEAD(&V_rawcb_list, rp, list);
+ mtx_unlock(&rawcb_mtx);
+ return (0);
+}
+
+/*
+ * Detach the raw connection block and discard socket resources.
+ */
+void
+raw_detach(struct rawcb *rp)
+{
+ struct socket *so = rp->rcb_socket;
+
+ KASSERT(so->so_pcb == rp, ("raw_detach: so_pcb != rp"));
+
+ so->so_pcb = NULL;
+ mtx_lock(&rawcb_mtx);
+ LIST_REMOVE(rp, list);
+ mtx_unlock(&rawcb_mtx);
+ free((caddr_t)(rp), M_PCB);
+}
diff --git a/rtems/freebsd/net/raw_cb.h b/rtems/freebsd/net/raw_cb.h
new file mode 100644
index 00000000..2c9e1f67
--- /dev/null
+++ b/rtems/freebsd/net/raw_cb.h
@@ -0,0 +1,84 @@
+/*-
+ * Copyright (c) 1980, 1986, 1993
+ * The Regents of the University of California.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * @(#)raw_cb.h 8.1 (Berkeley) 6/10/93
+ * $FreeBSD$
+ */
+
+#ifndef _NET_RAW_CB_HH_
+#define _NET_RAW_CB_HH_
+
+#include <rtems/freebsd/sys/queue.h>
+
+/*
+ * Raw protocol interface control block. Used to tie a socket to the generic
+ * raw interface.
+ */
+struct rawcb {
+ LIST_ENTRY(rawcb) list;
+ struct socket *rcb_socket; /* back pointer to socket */
+ struct sockproto rcb_proto; /* protocol family, protocol */
+};
+
+#define sotorawcb(so) ((struct rawcb *)(so)->so_pcb)
+
+/*
+ * Nominal space allocated to a raw socket.
+ */
+#define RAWSNDQ 8192
+#define RAWRCVQ 8192
+
+#ifdef _KERNEL
+VNET_DECLARE(LIST_HEAD(rawcb_list_head, rawcb), rawcb_list);
+#define V_rawcb_list VNET(rawcb_list)
+
+extern struct mtx rawcb_mtx;
+
+/*
+ * Generic protosw entries for raw socket protocols.
+ */
+pr_ctlinput_t raw_ctlinput;
+pr_init_t raw_init;
+
+/*
+ * Library routines for raw socket usrreq functions; will always be wrapped
+ * so that protocol-specific functions can be handled.
+ */
+int raw_attach(struct socket *, int);
+void raw_detach(struct rawcb *);
+void raw_input(struct mbuf *, struct sockproto *, struct sockaddr *);
+
+/*
+ * Generic pr_usrreqs entries for raw socket protocols, usually wrapped so
+ * that protocol-specific functions can be handled.
+ */
+extern struct pr_usrreqs raw_usrreqs;
+#endif
+
+#endif
diff --git a/rtems/freebsd/net/raw_usrreq.c b/rtems/freebsd/net/raw_usrreq.c
new file mode 100644
index 00000000..0b518568
--- /dev/null
+++ b/rtems/freebsd/net/raw_usrreq.c
@@ -0,0 +1,266 @@
+#include <rtems/freebsd/machine/rtems-bsd-config.h>
+
+/*-
+ * Copyright (c) 1980, 1986, 1993
+ * The Regents of the University of California.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * @(#)raw_usrreq.c 8.1 (Berkeley) 6/10/93
+ * $FreeBSD$
+ */
+
+#include <rtems/freebsd/sys/param.h>
+#include <rtems/freebsd/sys/kernel.h>
+#include <rtems/freebsd/sys/lock.h>
+#include <rtems/freebsd/sys/malloc.h>
+#include <rtems/freebsd/sys/mbuf.h>
+#include <rtems/freebsd/sys/mutex.h>
+#include <rtems/freebsd/sys/priv.h>
+#include <rtems/freebsd/sys/protosw.h>
+#include <rtems/freebsd/sys/signalvar.h>
+#include <rtems/freebsd/sys/socket.h>
+#include <rtems/freebsd/sys/socketvar.h>
+#include <rtems/freebsd/sys/sx.h>
+#include <rtems/freebsd/sys/systm.h>
+
+#include <rtems/freebsd/net/if.h>
+#include <rtems/freebsd/net/raw_cb.h>
+#include <rtems/freebsd/net/vnet.h>
+
+MTX_SYSINIT(rawcb_mtx, &rawcb_mtx, "rawcb", MTX_DEF);
+
+/*
+ * Initialize raw connection block q.
+ */
+void
+raw_init(void)
+{
+
+ LIST_INIT(&V_rawcb_list);
+}
+
+/*
+ * Raw protocol input routine. Find the socket associated with the packet(s)
+ * and move them over. If nothing exists for this packet, drop it.
+ */
+/*
+ * Raw protocol interface.
+ */
+void
+raw_input(struct mbuf *m0, struct sockproto *proto, struct sockaddr *src)
+{
+ struct rawcb *rp;
+ struct mbuf *m = m0;
+ struct socket *last;
+
+ last = 0;
+ mtx_lock(&rawcb_mtx);
+ LIST_FOREACH(rp, &V_rawcb_list, list) {
+ if (rp->rcb_proto.sp_family != proto->sp_family)
+ continue;
+ if (rp->rcb_proto.sp_protocol &&
+ rp->rcb_proto.sp_protocol != proto->sp_protocol)
+ continue;
+ if (last) {
+ struct mbuf *n;
+ n = m_copy(m, 0, (int)M_COPYALL);
+ if (n) {
+ if (sbappendaddr(&last->so_rcv, src,
+ n, (struct mbuf *)0) == 0)
+ /* should notify about lost packet */
+ m_freem(n);
+ else
+ sorwakeup(last);
+ }
+ }
+ last = rp->rcb_socket;
+ }
+ if (last) {
+ if (sbappendaddr(&last->so_rcv, src,
+ m, (struct mbuf *)0) == 0)
+ m_freem(m);
+ else
+ sorwakeup(last);
+ } else
+ m_freem(m);
+ mtx_unlock(&rawcb_mtx);
+}
+
+/*ARGSUSED*/
+void
+raw_ctlinput(int cmd, struct sockaddr *arg, void *dummy)
+{
+
+ if (cmd < 0 || cmd >= PRC_NCMDS)
+ return;
+ /* INCOMPLETE */
+}
+
+static void
+raw_uabort(struct socket *so)
+{
+
+ KASSERT(sotorawcb(so) != NULL, ("raw_uabort: rp == NULL"));
+
+ soisdisconnected(so);
+}
+
+static void
+raw_uclose(struct socket *so)
+{
+
+ KASSERT(sotorawcb(so) != NULL, ("raw_uabort: rp == NULL"));
+
+ soisdisconnected(so);
+}
+
+/* pru_accept is EOPNOTSUPP */
+
+static int
+raw_uattach(struct socket *so, int proto, struct thread *td)
+{
+ int error;
+
+ /*
+ * Implementors of raw sockets will already have allocated the PCB,
+ * so it must be non-NULL here.
+ */
+ KASSERT(sotorawcb(so) != NULL, ("raw_uattach: so_pcb == NULL"));
+
+ if (td != NULL) {
+ error = priv_check(td, PRIV_NET_RAW);
+ if (error)
+ return (error);
+ }
+ return (raw_attach(so, proto));
+}
+
+static int
+raw_ubind(struct socket *so, struct sockaddr *nam, struct thread *td)
+{
+
+ return (EINVAL);
+}
+
+static int
+raw_uconnect(struct socket *so, struct sockaddr *nam, struct thread *td)
+{
+
+ return (EINVAL);
+}
+
+/* pru_connect2 is EOPNOTSUPP */
+/* pru_control is EOPNOTSUPP */
+
+static void
+raw_udetach(struct socket *so)
+{
+ struct rawcb *rp = sotorawcb(so);
+
+ KASSERT(rp != NULL, ("raw_udetach: rp == NULL"));
+
+ raw_detach(rp);
+}
+
+static int
+raw_udisconnect(struct socket *so)
+{
+
+ KASSERT(sotorawcb(so) != NULL, ("raw_udisconnect: rp == NULL"));
+
+ return (ENOTCONN);
+}
+
+/* pru_listen is EOPNOTSUPP */
+
+static int
+raw_upeeraddr(struct socket *so, struct sockaddr **nam)
+{
+
+ KASSERT(sotorawcb(so) != NULL, ("raw_upeeraddr: rp == NULL"));
+
+ return (ENOTCONN);
+}
+
+/* pru_rcvd is EOPNOTSUPP */
+/* pru_rcvoob is EOPNOTSUPP */
+
+static int
+raw_usend(struct socket *so, int flags, struct mbuf *m, struct sockaddr *nam,
+ struct mbuf *control, struct thread *td)
+{
+
+ KASSERT(sotorawcb(so) != NULL, ("raw_usend: rp == NULL"));
+
+ if ((flags & PRUS_OOB) || (control && control->m_len)) {
+ /* XXXRW: Should control also be freed here? */
+ if (m != NULL)
+ m_freem(m);
+ return (EOPNOTSUPP);
+ }
+
+ /*
+ * For historical (bad?) reasons, we effectively ignore the address
+ * argument to sendto(2). Perhaps we should return an error instead?
+ */
+ return ((*so->so_proto->pr_output)(m, so));
+}
+
+/* pru_sense is null */
+
+static int
+raw_ushutdown(struct socket *so)
+{
+
+ KASSERT(sotorawcb(so) != NULL, ("raw_ushutdown: rp == NULL"));
+
+ socantsendmore(so);
+ return (0);
+}
+
+static int
+raw_usockaddr(struct socket *so, struct sockaddr **nam)
+{
+
+ KASSERT(sotorawcb(so) != NULL, ("raw_usockaddr: rp == NULL"));
+
+ return (EINVAL);
+}
+
+struct pr_usrreqs raw_usrreqs = {
+ .pru_abort = raw_uabort,
+ .pru_attach = raw_uattach,
+ .pru_bind = raw_ubind,
+ .pru_connect = raw_uconnect,
+ .pru_detach = raw_udetach,
+ .pru_disconnect = raw_udisconnect,
+ .pru_peeraddr = raw_upeeraddr,
+ .pru_send = raw_usend,
+ .pru_shutdown = raw_ushutdown,
+ .pru_sockaddr = raw_usockaddr,
+ .pru_close = raw_uclose,
+};
diff --git a/rtems/freebsd/net/route.c b/rtems/freebsd/net/route.c
new file mode 100644
index 00000000..73285797
--- /dev/null
+++ b/rtems/freebsd/net/route.c
@@ -0,0 +1,1601 @@
+#include <rtems/freebsd/machine/rtems-bsd-config.h>
+
+/*-
+ * Copyright (c) 1980, 1986, 1991, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * @(#)route.c 8.3.1.1 (Berkeley) 2/23/95
+ * $FreeBSD$
+ */
+/************************************************************************
+ * Note: In this file a 'fib' is a "forwarding information base" *
+ * Which is the new name for an in kernel routing (next hop) table. *
+ ***********************************************************************/
+
+#include <rtems/freebsd/local/opt_inet.h>
+#include <rtems/freebsd/local/opt_route.h>
+#include <rtems/freebsd/local/opt_mrouting.h>
+#include <rtems/freebsd/local/opt_mpath.h>
+
+#include <rtems/freebsd/sys/param.h>
+#include <rtems/freebsd/sys/systm.h>
+#include <rtems/freebsd/sys/syslog.h>
+#include <rtems/freebsd/sys/malloc.h>
+#include <rtems/freebsd/sys/mbuf.h>
+#include <rtems/freebsd/sys/socket.h>
+#include <rtems/freebsd/sys/sysctl.h>
+#include <rtems/freebsd/sys/syslog.h>
+#include <rtems/freebsd/sys/sysproto.h>
+#include <rtems/freebsd/sys/proc.h>
+#include <rtems/freebsd/sys/domain.h>
+#include <rtems/freebsd/sys/kernel.h>
+
+#include <rtems/freebsd/net/if.h>
+#include <rtems/freebsd/net/if_dl.h>
+#include <rtems/freebsd/net/route.h>
+#include <rtems/freebsd/net/vnet.h>
+#include <rtems/freebsd/net/flowtable.h>
+
+#ifdef RADIX_MPATH
+#include <rtems/freebsd/net/radix_mpath.h>
+#endif
+
+#include <rtems/freebsd/netinet/in.h>
+#include <rtems/freebsd/netinet/ip_mroute.h>
+
+#include <rtems/freebsd/vm/uma.h>
+
+u_int rt_numfibs = RT_NUMFIBS;
+SYSCTL_INT(_net, OID_AUTO, fibs, CTLFLAG_RD, &rt_numfibs, 0, "");
+/*
+ * Allow the boot code to allow LESS than RT_MAXFIBS to be used.
+ * We can't do more because storage is statically allocated for now.
+ * (for compatibility reasons.. this will change).
+ */
+TUNABLE_INT("net.fibs", &rt_numfibs);
+
+/*
+ * By default add routes to all fibs for new interfaces.
+ * Once this is set to 0 then only allocate routes on interface
+ * changes for the FIB of the caller when adding a new set of addresses
+ * to an interface. XXX this is a shotgun aproach to a problem that needs
+ * a more fine grained solution.. that will come.
+ */
+u_int rt_add_addr_allfibs = 1;
+SYSCTL_INT(_net, OID_AUTO, add_addr_allfibs, CTLFLAG_RW,
+ &rt_add_addr_allfibs, 0, "");
+TUNABLE_INT("net.add_addr_allfibs", &rt_add_addr_allfibs);
+
+VNET_DEFINE(struct rtstat, rtstat);
+#define V_rtstat VNET(rtstat)
+
+VNET_DEFINE(struct radix_node_head *, rt_tables);
+#define V_rt_tables VNET(rt_tables)
+
+VNET_DEFINE(int, rttrash); /* routes not in table but not freed */
+#define V_rttrash VNET(rttrash)
+
+
+/* compare two sockaddr structures */
+#define sa_equal(a1, a2) (bcmp((a1), (a2), (a1)->sa_len) == 0)
+
+/*
+ * Convert a 'struct radix_node *' to a 'struct rtentry *'.
+ * The operation can be done safely (in this code) because a
+ * 'struct rtentry' starts with two 'struct radix_node''s, the first
+ * one representing leaf nodes in the routing tree, which is
+ * what the code in radix.c passes us as a 'struct radix_node'.
+ *
+ * But because there are a lot of assumptions in this conversion,
+ * do not cast explicitly, but always use the macro below.
+ */
+#define RNTORT(p) ((struct rtentry *)(p))
+
+static VNET_DEFINE(uma_zone_t, rtzone); /* Routing table UMA zone. */
+#define V_rtzone VNET(rtzone)
+
+#if 0
+/* default fib for tunnels to use */
+u_int tunnel_fib = 0;
+SYSCTL_INT(_net, OID_AUTO, tunnelfib, CTLFLAG_RD, &tunnel_fib, 0, "");
+#endif
+
+/*
+ * handler for net.my_fibnum
+ */
+static int
+sysctl_my_fibnum(SYSCTL_HANDLER_ARGS)
+{
+ int fibnum;
+ int error;
+
+ fibnum = curthread->td_proc->p_fibnum;
+ error = sysctl_handle_int(oidp, &fibnum, 0, req);
+ return (error);
+}
+
+SYSCTL_PROC(_net, OID_AUTO, my_fibnum, CTLTYPE_INT|CTLFLAG_RD,
+ NULL, 0, &sysctl_my_fibnum, "I", "default FIB of caller");
+
+static __inline struct radix_node_head **
+rt_tables_get_rnh_ptr(int table, int fam)
+{
+ struct radix_node_head **rnh;
+
+ KASSERT(table >= 0 && table < rt_numfibs, ("%s: table out of bounds.",
+ __func__));
+ KASSERT(fam >= 0 && fam < (AF_MAX+1), ("%s: fam out of bounds.",
+ __func__));
+
+ /* rnh is [fib=0][af=0]. */
+ rnh = (struct radix_node_head **)V_rt_tables;
+ /* Get the offset to the requested table and fam. */
+ rnh += table * (AF_MAX+1) + fam;
+
+ return (rnh);
+}
+
+struct radix_node_head *
+rt_tables_get_rnh(int table, int fam)
+{
+
+ return (*rt_tables_get_rnh_ptr(table, fam));
+}
+
+/*
+ * route initialization must occur before ip6_init2(), which happenas at
+ * SI_ORDER_MIDDLE.
+ */
+static void
+route_init(void)
+{
+ struct domain *dom;
+ int max_keylen = 0;
+
+ /* whack the tunable ints into line. */
+ if (rt_numfibs > RT_MAXFIBS)
+ rt_numfibs = RT_MAXFIBS;
+ if (rt_numfibs == 0)
+ rt_numfibs = 1;
+
+ for (dom = domains; dom; dom = dom->dom_next)
+ if (dom->dom_maxrtkey > max_keylen)
+ max_keylen = dom->dom_maxrtkey;
+
+ rn_init(max_keylen); /* init all zeroes, all ones, mask table */
+}
+SYSINIT(route_init, SI_SUB_PROTO_DOMAIN, SI_ORDER_THIRD, route_init, 0);
+
+static void
+vnet_route_init(const void *unused __unused)
+{
+ struct domain *dom;
+ struct radix_node_head **rnh;
+ int table;
+ int fam;
+
+ V_rt_tables = malloc(rt_numfibs * (AF_MAX+1) *
+ sizeof(struct radix_node_head *), M_RTABLE, M_WAITOK|M_ZERO);
+
+ V_rtzone = uma_zcreate("rtentry", sizeof(struct rtentry), NULL, NULL,
+ NULL, NULL, UMA_ALIGN_PTR, 0);
+ for (dom = domains; dom; dom = dom->dom_next) {
+ if (dom->dom_rtattach) {
+ for (table = 0; table < rt_numfibs; table++) {
+ if ( (fam = dom->dom_family) == AF_INET ||
+ table == 0) {
+ /* for now only AF_INET has > 1 table */
+ /* XXX MRT
+ * rtattach will be also called
+ * from vfs_export.c but the
+ * offset will be 0
+ * (only for AF_INET and AF_INET6
+ * which don't need it anyhow)
+ */
+ rnh = rt_tables_get_rnh_ptr(table, fam);
+ if (rnh == NULL)
+ panic("%s: rnh NULL", __func__);
+ dom->dom_rtattach((void **)rnh,
+ dom->dom_rtoffset);
+ } else {
+ break;
+ }
+ }
+ }
+ }
+}
+VNET_SYSINIT(vnet_route_init, SI_SUB_PROTO_DOMAIN, SI_ORDER_FOURTH,
+ vnet_route_init, 0);
+
+#ifdef VIMAGE
+static void
+vnet_route_uninit(const void *unused __unused)
+{
+ int table;
+ int fam;
+ struct domain *dom;
+ struct radix_node_head **rnh;
+
+ for (dom = domains; dom; dom = dom->dom_next) {
+ if (dom->dom_rtdetach) {
+ for (table = 0; table < rt_numfibs; table++) {
+ if ( (fam = dom->dom_family) == AF_INET ||
+ table == 0) {
+ /* For now only AF_INET has > 1 tbl. */
+ rnh = rt_tables_get_rnh_ptr(table, fam);
+ if (rnh == NULL)
+ panic("%s: rnh NULL", __func__);
+ dom->dom_rtdetach((void **)rnh,
+ dom->dom_rtoffset);
+ } else {
+ break;
+ }
+ }
+ }
+ }
+}
+VNET_SYSUNINIT(vnet_route_uninit, SI_SUB_PROTO_DOMAIN, SI_ORDER_THIRD,
+ vnet_route_uninit, 0);
+#endif
+
+#ifndef _SYS_SYSPROTO_HH_
+struct setfib_args {
+ int fibnum;
+};
+#endif
+int
+setfib(struct thread *td, struct setfib_args *uap)
+{
+ if (uap->fibnum < 0 || uap->fibnum >= rt_numfibs)
+ return EINVAL;
+ td->td_proc->p_fibnum = uap->fibnum;
+ return (0);
+}
+
+/*
+ * Packet routing routines.
+ */
+void
+rtalloc(struct route *ro)
+{
+ rtalloc_ign_fib(ro, 0UL, 0);
+}
+
+void
+rtalloc_fib(struct route *ro, u_int fibnum)
+{
+ rtalloc_ign_fib(ro, 0UL, fibnum);
+}
+
+void
+rtalloc_ign(struct route *ro, u_long ignore)
+{
+ struct rtentry *rt;
+
+ if ((rt = ro->ro_rt) != NULL) {
+ if (rt->rt_ifp != NULL && rt->rt_flags & RTF_UP)
+ return;
+ RTFREE(rt);
+ ro->ro_rt = NULL;
+ }
+ ro->ro_rt = rtalloc1_fib(&ro->ro_dst, 1, ignore, 0);
+ if (ro->ro_rt)
+ RT_UNLOCK(ro->ro_rt);
+}
+
+void
+rtalloc_ign_fib(struct route *ro, u_long ignore, u_int fibnum)
+{
+ struct rtentry *rt;
+
+ if ((rt = ro->ro_rt) != NULL) {
+ if (rt->rt_ifp != NULL && rt->rt_flags & RTF_UP)
+ return;
+ RTFREE(rt);
+ ro->ro_rt = NULL;
+ }
+ ro->ro_rt = rtalloc1_fib(&ro->ro_dst, 1, ignore, fibnum);
+ if (ro->ro_rt)
+ RT_UNLOCK(ro->ro_rt);
+}
+
+/*
+ * Look up the route that matches the address given
+ * Or, at least try.. Create a cloned route if needed.
+ *
+ * The returned route, if any, is locked.
+ */
+struct rtentry *
+rtalloc1(struct sockaddr *dst, int report, u_long ignflags)
+{
+ return (rtalloc1_fib(dst, report, ignflags, 0));
+}
+
+struct rtentry *
+rtalloc1_fib(struct sockaddr *dst, int report, u_long ignflags,
+ u_int fibnum)
+{
+ struct radix_node_head *rnh;
+ struct rtentry *rt;
+ struct radix_node *rn;
+ struct rtentry *newrt;
+ struct rt_addrinfo info;
+ int err = 0, msgtype = RTM_MISS;
+ int needlock;
+
+ KASSERT((fibnum < rt_numfibs), ("rtalloc1_fib: bad fibnum"));
+ if (dst->sa_family != AF_INET) /* Only INET supports > 1 fib now */
+ fibnum = 0;
+ rnh = rt_tables_get_rnh(fibnum, dst->sa_family);
+ newrt = NULL;
+ /*
+ * Look up the address in the table for that Address Family
+ */
+ if (rnh == NULL) {
+ V_rtstat.rts_unreach++;
+ goto miss;
+ }
+ needlock = !(ignflags & RTF_RNH_LOCKED);
+ if (needlock)
+ RADIX_NODE_HEAD_RLOCK(rnh);
+#ifdef INVARIANTS
+ else
+ RADIX_NODE_HEAD_LOCK_ASSERT(rnh);
+#endif
+ rn = rnh->rnh_matchaddr(dst, rnh);
+ if (rn && ((rn->rn_flags & RNF_ROOT) == 0)) {
+ newrt = rt = RNTORT(rn);
+ RT_LOCK(newrt);
+ RT_ADDREF(newrt);
+ if (needlock)
+ RADIX_NODE_HEAD_RUNLOCK(rnh);
+ goto done;
+
+ } else if (needlock)
+ RADIX_NODE_HEAD_RUNLOCK(rnh);
+
+ /*
+ * Either we hit the root or couldn't find any match,
+ * Which basically means
+ * "caint get there frm here"
+ */
+ V_rtstat.rts_unreach++;
+miss:
+ if (report) {
+ /*
+ * If required, report the failure to the supervising
+ * Authorities.
+ * For a delete, this is not an error. (report == 0)
+ */
+ bzero(&info, sizeof(info));
+ info.rti_info[RTAX_DST] = dst;
+ rt_missmsg(msgtype, &info, 0, err);
+ }
+done:
+ if (newrt)
+ RT_LOCK_ASSERT(newrt);
+ return (newrt);
+}
+
+/*
+ * Remove a reference count from an rtentry.
+ * If the count gets low enough, take it out of the routing table
+ */
+void
+rtfree(struct rtentry *rt)
+{
+ struct radix_node_head *rnh;
+
+ KASSERT(rt != NULL,("%s: NULL rt", __func__));
+ rnh = rt_tables_get_rnh(rt->rt_fibnum, rt_key(rt)->sa_family);
+ KASSERT(rnh != NULL,("%s: NULL rnh", __func__));
+
+ RT_LOCK_ASSERT(rt);
+
+ /*
+ * The callers should use RTFREE_LOCKED() or RTFREE(), so
+ * we should come here exactly with the last reference.
+ */
+ RT_REMREF(rt);
+ if (rt->rt_refcnt > 0) {
+ log(LOG_DEBUG, "%s: %p has %d refs\n", __func__, rt, rt->rt_refcnt);
+ goto done;
+ }
+
+ /*
+ * On last reference give the "close method" a chance
+ * to cleanup private state. This also permits (for
+ * IPv4 and IPv6) a chance to decide if the routing table
+ * entry should be purged immediately or at a later time.
+ * When an immediate purge is to happen the close routine
+ * typically calls rtexpunge which clears the RTF_UP flag
+ * on the entry so that the code below reclaims the storage.
+ */
+ if (rt->rt_refcnt == 0 && rnh->rnh_close)
+ rnh->rnh_close((struct radix_node *)rt, rnh);
+
+ /*
+ * If we are no longer "up" (and ref == 0)
+ * then we can free the resources associated
+ * with the route.
+ */
+ if ((rt->rt_flags & RTF_UP) == 0) {
+ if (rt->rt_nodes->rn_flags & (RNF_ACTIVE | RNF_ROOT))
+ panic("rtfree 2");
+ /*
+ * the rtentry must have been removed from the routing table
+ * so it is represented in rttrash.. remove that now.
+ */
+ V_rttrash--;
+#ifdef DIAGNOSTIC
+ if (rt->rt_refcnt < 0) {
+ printf("rtfree: %p not freed (neg refs)\n", rt);
+ goto done;
+ }
+#endif
+ /*
+ * release references on items we hold them on..
+ * e.g other routes and ifaddrs.
+ */
+ if (rt->rt_ifa)
+ ifa_free(rt->rt_ifa);
+ /*
+ * The key is separatly alloc'd so free it (see rt_setgate()).
+ * This also frees the gateway, as they are always malloc'd
+ * together.
+ */
+ Free(rt_key(rt));
+
+ /*
+ * and the rtentry itself of course
+ */
+ RT_LOCK_DESTROY(rt);
+ uma_zfree(V_rtzone, rt);
+ return;
+ }
+done:
+ RT_UNLOCK(rt);
+}
+
+
+/*
+ * Force a routing table entry to the specified
+ * destination to go through the given gateway.
+ * Normally called as a result of a routing redirect
+ * message from the network layer.
+ */
+void
+rtredirect(struct sockaddr *dst,
+ struct sockaddr *gateway,
+ struct sockaddr *netmask,
+ int flags,
+ struct sockaddr *src)
+{
+ rtredirect_fib(dst, gateway, netmask, flags, src, 0);
+}
+
+void
+rtredirect_fib(struct sockaddr *dst,
+ struct sockaddr *gateway,
+ struct sockaddr *netmask,
+ int flags,
+ struct sockaddr *src,
+ u_int fibnum)
+{
+ struct rtentry *rt, *rt0 = NULL;
+ int error = 0;
+ short *stat = NULL;
+ struct rt_addrinfo info;
+ struct ifaddr *ifa;
+ struct radix_node_head *rnh;
+
+ ifa = NULL;
+ rnh = rt_tables_get_rnh(fibnum, dst->sa_family);
+ if (rnh == NULL) {
+ error = EAFNOSUPPORT;
+ goto out;
+ }
+
+ /* verify the gateway is directly reachable */
+ if ((ifa = ifa_ifwithnet(gateway, 0)) == NULL) {
+ error = ENETUNREACH;
+ goto out;
+ }
+ rt = rtalloc1_fib(dst, 0, 0UL, fibnum); /* NB: rt is locked */
+ /*
+ * If the redirect isn't from our current router for this dst,
+ * it's either old or wrong. If it redirects us to ourselves,
+ * we have a routing loop, perhaps as a result of an interface
+ * going down recently.
+ */
+ if (!(flags & RTF_DONE) && rt &&
+ (!sa_equal(src, rt->rt_gateway) || rt->rt_ifa != ifa))
+ error = EINVAL;
+ else if (ifa_ifwithaddr_check(gateway))
+ error = EHOSTUNREACH;
+ if (error)
+ goto done;
+ /*
+ * Create a new entry if we just got back a wildcard entry
+ * or the the lookup failed. This is necessary for hosts
+ * which use routing redirects generated by smart gateways
+ * to dynamically build the routing tables.
+ */
+ if (rt == NULL || (rt_mask(rt) && rt_mask(rt)->sa_len < 2))
+ goto create;
+ /*
+ * Don't listen to the redirect if it's
+ * for a route to an interface.
+ */
+ if (rt->rt_flags & RTF_GATEWAY) {
+ if (((rt->rt_flags & RTF_HOST) == 0) && (flags & RTF_HOST)) {
+ /*
+ * Changing from route to net => route to host.
+ * Create new route, rather than smashing route to net.
+ */
+ create:
+ rt0 = rt;
+ rt = NULL;
+
+ flags |= RTF_GATEWAY | RTF_DYNAMIC;
+ bzero((caddr_t)&info, sizeof(info));
+ info.rti_info[RTAX_DST] = dst;
+ info.rti_info[RTAX_GATEWAY] = gateway;
+ info.rti_info[RTAX_NETMASK] = netmask;
+ info.rti_ifa = ifa;
+ info.rti_flags = flags;
+ if (rt0 != NULL)
+ RT_UNLOCK(rt0); /* drop lock to avoid LOR with RNH */
+ error = rtrequest1_fib(RTM_ADD, &info, &rt, fibnum);
+ if (rt != NULL) {
+ RT_LOCK(rt);
+ if (rt0 != NULL)
+ EVENTHANDLER_INVOKE(route_redirect_event, rt0, rt, dst);
+ flags = rt->rt_flags;
+ }
+ if (rt0 != NULL)
+ RTFREE(rt0);
+
+ stat = &V_rtstat.rts_dynamic;
+ } else {
+ struct rtentry *gwrt;
+
+ /*
+ * Smash the current notion of the gateway to
+ * this destination. Should check about netmask!!!
+ */
+ rt->rt_flags |= RTF_MODIFIED;
+ flags |= RTF_MODIFIED;
+ stat = &V_rtstat.rts_newgateway;
+ /*
+ * add the key and gateway (in one malloc'd chunk).
+ */
+ RT_UNLOCK(rt);
+ RADIX_NODE_HEAD_LOCK(rnh);
+ RT_LOCK(rt);
+ rt_setgate(rt, rt_key(rt), gateway);
+ gwrt = rtalloc1(gateway, 1, RTF_RNH_LOCKED);
+ RADIX_NODE_HEAD_UNLOCK(rnh);
+ EVENTHANDLER_INVOKE(route_redirect_event, rt, gwrt, dst);
+ RTFREE_LOCKED(gwrt);
+ }
+ } else
+ error = EHOSTUNREACH;
+done:
+ if (rt)
+ RTFREE_LOCKED(rt);
+out:
+ if (error)
+ V_rtstat.rts_badredirect++;
+ else if (stat != NULL)
+ (*stat)++;
+ bzero((caddr_t)&info, sizeof(info));
+ info.rti_info[RTAX_DST] = dst;
+ info.rti_info[RTAX_GATEWAY] = gateway;
+ info.rti_info[RTAX_NETMASK] = netmask;
+ info.rti_info[RTAX_AUTHOR] = src;
+ rt_missmsg(RTM_REDIRECT, &info, flags, error);
+ if (ifa != NULL)
+ ifa_free(ifa);
+}
+
+int
+rtioctl(u_long req, caddr_t data)
+{
+ return (rtioctl_fib(req, data, 0));
+}
+
+/*
+ * Routing table ioctl interface.
+ */
+int
+rtioctl_fib(u_long req, caddr_t data, u_int fibnum)
+{
+
+ /*
+ * If more ioctl commands are added here, make sure the proper
+ * super-user checks are being performed because it is possible for
+ * prison-root to make it this far if raw sockets have been enabled
+ * in jails.
+ */
+#ifdef INET
+ /* Multicast goop, grrr... */
+ return mrt_ioctl ? mrt_ioctl(req, data, fibnum) : EOPNOTSUPP;
+#else /* INET */
+ return ENXIO;
+#endif /* INET */
+}
+
+/*
+ * For both ifa_ifwithroute() routines, 'ifa' is returned referenced.
+ */
+struct ifaddr *
+ifa_ifwithroute(int flags, struct sockaddr *dst, struct sockaddr *gateway)
+{
+ return (ifa_ifwithroute_fib(flags, dst, gateway, 0));
+}
+
+struct ifaddr *
+ifa_ifwithroute_fib(int flags, struct sockaddr *dst, struct sockaddr *gateway,
+ u_int fibnum)
+{
+ register struct ifaddr *ifa;
+ int not_found = 0;
+
+ if ((flags & RTF_GATEWAY) == 0) {
+ /*
+ * If we are adding a route to an interface,
+ * and the interface is a pt to pt link
+ * we should search for the destination
+ * as our clue to the interface. Otherwise
+ * we can use the local address.
+ */
+ ifa = NULL;
+ if (flags & RTF_HOST)
+ ifa = ifa_ifwithdstaddr(dst);
+ if (ifa == NULL)
+ ifa = ifa_ifwithaddr(gateway);
+ } else {
+ /*
+ * If we are adding a route to a remote net
+ * or host, the gateway may still be on the
+ * other end of a pt to pt link.
+ */
+ ifa = ifa_ifwithdstaddr(gateway);
+ }
+ if (ifa == NULL)
+ ifa = ifa_ifwithnet(gateway, 0);
+ if (ifa == NULL) {
+ struct rtentry *rt = rtalloc1_fib(gateway, 0, RTF_RNH_LOCKED, fibnum);
+ if (rt == NULL)
+ return (NULL);
+ /*
+ * dismiss a gateway that is reachable only
+ * through the default router
+ */
+ switch (gateway->sa_family) {
+ case AF_INET:
+ if (satosin(rt_key(rt))->sin_addr.s_addr == INADDR_ANY)
+ not_found = 1;
+ break;
+ case AF_INET6:
+ if (IN6_IS_ADDR_UNSPECIFIED(&satosin6(rt_key(rt))->sin6_addr))
+ not_found = 1;
+ break;
+ default:
+ break;
+ }
+ if (!not_found && rt->rt_ifa != NULL) {
+ ifa = rt->rt_ifa;
+ ifa_ref(ifa);
+ }
+ RT_REMREF(rt);
+ RT_UNLOCK(rt);
+ if (not_found || ifa == NULL)
+ return (NULL);
+ }
+ if (ifa->ifa_addr->sa_family != dst->sa_family) {
+ struct ifaddr *oifa = ifa;
+ ifa = ifaof_ifpforaddr(dst, ifa->ifa_ifp);
+ if (ifa == NULL)
+ ifa = oifa;
+ else
+ ifa_free(oifa);
+ }
+ return (ifa);
+}
+
+/*
+ * Do appropriate manipulations of a routing tree given
+ * all the bits of info needed
+ */
+int
+rtrequest(int req,
+ struct sockaddr *dst,
+ struct sockaddr *gateway,
+ struct sockaddr *netmask,
+ int flags,
+ struct rtentry **ret_nrt)
+{
+ return (rtrequest_fib(req, dst, gateway, netmask, flags, ret_nrt, 0));
+}
+
+int
+rtrequest_fib(int req,
+ struct sockaddr *dst,
+ struct sockaddr *gateway,
+ struct sockaddr *netmask,
+ int flags,
+ struct rtentry **ret_nrt,
+ u_int fibnum)
+{
+ struct rt_addrinfo info;
+
+ if (dst->sa_len == 0)
+ return(EINVAL);
+
+ bzero((caddr_t)&info, sizeof(info));
+ info.rti_flags = flags;
+ info.rti_info[RTAX_DST] = dst;
+ info.rti_info[RTAX_GATEWAY] = gateway;
+ info.rti_info[RTAX_NETMASK] = netmask;
+ return rtrequest1_fib(req, &info, ret_nrt, fibnum);
+}
+
+/*
+ * These (questionable) definitions of apparent local variables apply
+ * to the next two functions. XXXXXX!!!
+ */
+#define dst info->rti_info[RTAX_DST]
+#define gateway info->rti_info[RTAX_GATEWAY]
+#define netmask info->rti_info[RTAX_NETMASK]
+#define ifaaddr info->rti_info[RTAX_IFA]
+#define ifpaddr info->rti_info[RTAX_IFP]
+#define flags info->rti_flags
+
+int
+rt_getifa(struct rt_addrinfo *info)
+{
+ return (rt_getifa_fib(info, 0));
+}
+
+/*
+ * Look up rt_addrinfo for a specific fib. Note that if rti_ifa is defined,
+ * it will be referenced so the caller must free it.
+ */
+int
+rt_getifa_fib(struct rt_addrinfo *info, u_int fibnum)
+{
+ struct ifaddr *ifa;
+ int error = 0;
+
+ /*
+ * ifp may be specified by sockaddr_dl
+ * when protocol address is ambiguous.
+ */
+ if (info->rti_ifp == NULL && ifpaddr != NULL &&
+ ifpaddr->sa_family == AF_LINK &&
+ (ifa = ifa_ifwithnet(ifpaddr, 0)) != NULL) {
+ info->rti_ifp = ifa->ifa_ifp;
+ ifa_free(ifa);
+ }
+ if (info->rti_ifa == NULL && ifaaddr != NULL)
+ info->rti_ifa = ifa_ifwithaddr(ifaaddr);
+ if (info->rti_ifa == NULL) {
+ struct sockaddr *sa;
+
+ sa = ifaaddr != NULL ? ifaaddr :
+ (gateway != NULL ? gateway : dst);
+ if (sa != NULL && info->rti_ifp != NULL)
+ info->rti_ifa = ifaof_ifpforaddr(sa, info->rti_ifp);
+ else if (dst != NULL && gateway != NULL)
+ info->rti_ifa = ifa_ifwithroute_fib(flags, dst, gateway,
+ fibnum);
+ else if (sa != NULL)
+ info->rti_ifa = ifa_ifwithroute_fib(flags, sa, sa,
+ fibnum);
+ }
+ if ((ifa = info->rti_ifa) != NULL) {
+ if (info->rti_ifp == NULL)
+ info->rti_ifp = ifa->ifa_ifp;
+ } else
+ error = ENETUNREACH;
+ return (error);
+}
+
+/*
+ * Expunges references to a route that's about to be reclaimed.
+ * The route must be locked.
+ */
+int
+rtexpunge(struct rtentry *rt)
+{
+#if !defined(RADIX_MPATH)
+ struct radix_node *rn;
+#else
+ struct rt_addrinfo info;
+ int fib;
+ struct rtentry *rt0;
+#endif
+ struct radix_node_head *rnh;
+ struct ifaddr *ifa;
+ int error = 0;
+
+ /*
+ * Find the correct routing tree to use for this Address Family
+ */
+ rnh = rt_tables_get_rnh(rt->rt_fibnum, rt_key(rt)->sa_family);
+ RT_LOCK_ASSERT(rt);
+ if (rnh == NULL)
+ return (EAFNOSUPPORT);
+ RADIX_NODE_HEAD_LOCK_ASSERT(rnh);
+
+#ifdef RADIX_MPATH
+ fib = rt->rt_fibnum;
+ bzero(&info, sizeof(info));
+ info.rti_ifp = rt->rt_ifp;
+ info.rti_flags = RTF_RNH_LOCKED;
+ info.rti_info[RTAX_DST] = rt_key(rt);
+ info.rti_info[RTAX_GATEWAY] = rt->rt_ifa->ifa_addr;
+
+ RT_UNLOCK(rt);
+ error = rtrequest1_fib(RTM_DELETE, &info, &rt0, fib);
+
+ if (error == 0 && rt0 != NULL) {
+ rt = rt0;
+ RT_LOCK(rt);
+ } else if (error != 0) {
+ RT_LOCK(rt);
+ return (error);
+ }
+#else
+ /*
+ * Remove the item from the tree; it should be there,
+ * but when callers invoke us blindly it may not (sigh).
+ */
+ rn = rnh->rnh_deladdr(rt_key(rt), rt_mask(rt), rnh);
+ if (rn == NULL) {
+ error = ESRCH;
+ goto bad;
+ }
+ KASSERT((rn->rn_flags & (RNF_ACTIVE | RNF_ROOT)) == 0,
+ ("unexpected flags 0x%x", rn->rn_flags));
+ KASSERT(rt == RNTORT(rn),
+ ("lookup mismatch, rt %p rn %p", rt, rn));
+#endif /* RADIX_MPATH */
+
+ rt->rt_flags &= ~RTF_UP;
+
+ /*
+ * Give the protocol a chance to keep things in sync.
+ */
+ if ((ifa = rt->rt_ifa) && ifa->ifa_rtrequest) {
+ struct rt_addrinfo info;
+
+ bzero((caddr_t)&info, sizeof(info));
+ info.rti_flags = rt->rt_flags;
+ info.rti_info[RTAX_DST] = rt_key(rt);
+ info.rti_info[RTAX_GATEWAY] = rt->rt_gateway;
+ info.rti_info[RTAX_NETMASK] = rt_mask(rt);
+ ifa->ifa_rtrequest(RTM_DELETE, rt, &info);
+ }
+
+ /*
+ * one more rtentry floating around that is not
+ * linked to the routing table.
+ */
+ V_rttrash++;
+#if !defined(RADIX_MPATH)
+bad:
+#endif
+ return (error);
+}
+
+#ifdef RADIX_MPATH
+static int
+rn_mpath_update(int req, struct rt_addrinfo *info,
+ struct radix_node_head *rnh, struct rtentry **ret_nrt)
+{
+ /*
+ * if we got multipath routes, we require users to specify
+ * a matching RTAX_GATEWAY.
+ */
+ struct rtentry *rt, *rto = NULL;
+ register struct radix_node *rn;
+ int error = 0;
+
+ rn = rnh->rnh_matchaddr(dst, rnh);
+ if (rn == NULL)
+ return (ESRCH);
+ rto = rt = RNTORT(rn);
+ rt = rt_mpath_matchgate(rt, gateway);
+ if (rt == NULL)
+ return (ESRCH);
+ /*
+ * this is the first entry in the chain
+ */
+ if (rto == rt) {
+ rn = rn_mpath_next((struct radix_node *)rt);
+ /*
+ * there is another entry, now it's active
+ */
+ if (rn) {
+ rto = RNTORT(rn);
+ RT_LOCK(rto);
+ rto->rt_flags |= RTF_UP;
+ RT_UNLOCK(rto);
+ } else if (rt->rt_flags & RTF_GATEWAY) {
+ /*
+ * For gateway routes, we need to
+ * make sure that we we are deleting
+ * the correct gateway.
+ * rt_mpath_matchgate() does not
+ * check the case when there is only
+ * one route in the chain.
+ */
+ if (gateway &&
+ (rt->rt_gateway->sa_len != gateway->sa_len ||
+ memcmp(rt->rt_gateway, gateway, gateway->sa_len)))
+ error = ESRCH;
+ else {
+ /*
+ * remove from tree before returning it
+ * to the caller
+ */
+ rn = rnh->rnh_deladdr(dst, netmask, rnh);
+ KASSERT(rt == RNTORT(rn), ("radix node disappeared"));
+ goto gwdelete;
+ }
+
+ }
+ /*
+ * use the normal delete code to remove
+ * the first entry
+ */
+ if (req != RTM_DELETE)
+ goto nondelete;
+
+ error = ENOENT;
+ goto done;
+ }
+
+ /*
+ * if the entry is 2nd and on up
+ */
+ if ((req == RTM_DELETE) && !rt_mpath_deldup(rto, rt))
+ panic ("rtrequest1: rt_mpath_deldup");
+gwdelete:
+ RT_LOCK(rt);
+ RT_ADDREF(rt);
+ if (req == RTM_DELETE) {
+ rt->rt_flags &= ~RTF_UP;
+ /*
+ * One more rtentry floating around that is not
+ * linked to the routing table. rttrash will be decremented
+ * when RTFREE(rt) is eventually called.
+ */
+ V_rttrash++;
+ }
+
+nondelete:
+ if (req != RTM_DELETE)
+ panic("unrecognized request %d", req);
+
+
+ /*
+ * If the caller wants it, then it can have it,
+ * but it's up to it to free the rtentry as we won't be
+ * doing it.
+ */
+ if (ret_nrt) {
+ *ret_nrt = rt;
+ RT_UNLOCK(rt);
+ } else
+ RTFREE_LOCKED(rt);
+done:
+ return (error);
+}
+#endif
+
+int
+rtrequest1_fib(int req, struct rt_addrinfo *info, struct rtentry **ret_nrt,
+ u_int fibnum)
+{
+ int error = 0, needlock = 0;
+ register struct rtentry *rt;
+#ifdef FLOWTABLE
+ register struct rtentry *rt0;
+#endif
+ register struct radix_node *rn;
+ register struct radix_node_head *rnh;
+ struct ifaddr *ifa;
+ struct sockaddr *ndst;
+#define senderr(x) { error = x ; goto bad; }
+
+ KASSERT((fibnum < rt_numfibs), ("rtrequest1_fib: bad fibnum"));
+ if (dst->sa_family != AF_INET) /* Only INET supports > 1 fib now */
+ fibnum = 0;
+ /*
+ * Find the correct routing tree to use for this Address Family
+ */
+ rnh = rt_tables_get_rnh(fibnum, dst->sa_family);
+ if (rnh == NULL)
+ return (EAFNOSUPPORT);
+ needlock = ((flags & RTF_RNH_LOCKED) == 0);
+ flags &= ~RTF_RNH_LOCKED;
+ if (needlock)
+ RADIX_NODE_HEAD_LOCK(rnh);
+ else
+ RADIX_NODE_HEAD_LOCK_ASSERT(rnh);
+ /*
+ * If we are adding a host route then we don't want to put
+ * a netmask in the tree, nor do we want to clone it.
+ */
+ if (flags & RTF_HOST)
+ netmask = NULL;
+
+ switch (req) {
+ case RTM_DELETE:
+#ifdef RADIX_MPATH
+ if (rn_mpath_capable(rnh)) {
+ error = rn_mpath_update(req, info, rnh, ret_nrt);
+ /*
+ * "bad" holds true for the success case
+ * as well
+ */
+ if (error != ENOENT)
+ goto bad;
+ error = 0;
+ }
+#endif
+ /*
+ * Remove the item from the tree and return it.
+ * Complain if it is not there and do no more processing.
+ */
+ rn = rnh->rnh_deladdr(dst, netmask, rnh);
+ if (rn == NULL)
+ senderr(ESRCH);
+ if (rn->rn_flags & (RNF_ACTIVE | RNF_ROOT))
+ panic ("rtrequest delete");
+ rt = RNTORT(rn);
+ RT_LOCK(rt);
+ RT_ADDREF(rt);
+ rt->rt_flags &= ~RTF_UP;
+
+ /*
+ * give the protocol a chance to keep things in sync.
+ */
+ if ((ifa = rt->rt_ifa) && ifa->ifa_rtrequest)
+ ifa->ifa_rtrequest(RTM_DELETE, rt, info);
+
+ /*
+ * One more rtentry floating around that is not
+ * linked to the routing table. rttrash will be decremented
+ * when RTFREE(rt) is eventually called.
+ */
+ V_rttrash++;
+
+ /*
+ * If the caller wants it, then it can have it,
+ * but it's up to it to free the rtentry as we won't be
+ * doing it.
+ */
+ if (ret_nrt) {
+ *ret_nrt = rt;
+ RT_UNLOCK(rt);
+ } else
+ RTFREE_LOCKED(rt);
+ break;
+ case RTM_RESOLVE:
+ /*
+ * resolve was only used for route cloning
+ * here for compat
+ */
+ break;
+ case RTM_ADD:
+ if ((flags & RTF_GATEWAY) && !gateway)
+ senderr(EINVAL);
+ if (dst && gateway && (dst->sa_family != gateway->sa_family) &&
+ (gateway->sa_family != AF_UNSPEC) && (gateway->sa_family != AF_LINK))
+ senderr(EINVAL);
+
+ if (info->rti_ifa == NULL) {
+ error = rt_getifa_fib(info, fibnum);
+ if (error)
+ senderr(error);
+ } else
+ ifa_ref(info->rti_ifa);
+ ifa = info->rti_ifa;
+ rt = uma_zalloc(V_rtzone, M_NOWAIT | M_ZERO);
+ if (rt == NULL) {
+ if (ifa != NULL)
+ ifa_free(ifa);
+ senderr(ENOBUFS);
+ }
+ RT_LOCK_INIT(rt);
+ rt->rt_flags = RTF_UP | flags;
+ rt->rt_fibnum = fibnum;
+ /*
+ * Add the gateway. Possibly re-malloc-ing the storage for it
+ *
+ */
+ RT_LOCK(rt);
+ if ((error = rt_setgate(rt, dst, gateway)) != 0) {
+ RT_LOCK_DESTROY(rt);
+ if (ifa != NULL)
+ ifa_free(ifa);
+ uma_zfree(V_rtzone, rt);
+ senderr(error);
+ }
+
+ /*
+ * point to the (possibly newly malloc'd) dest address.
+ */
+ ndst = (struct sockaddr *)rt_key(rt);
+
+ /*
+ * make sure it contains the value we want (masked if needed).
+ */
+ if (netmask) {
+ rt_maskedcopy(dst, ndst, netmask);
+ } else
+ bcopy(dst, ndst, dst->sa_len);
+
+ /*
+ * We use the ifa reference returned by rt_getifa_fib().
+ * This moved from below so that rnh->rnh_addaddr() can
+ * examine the ifa and ifa->ifa_ifp if it so desires.
+ */
+ rt->rt_ifa = ifa;
+ rt->rt_ifp = ifa->ifa_ifp;
+ rt->rt_rmx.rmx_weight = 1;
+
+#ifdef RADIX_MPATH
+ /* do not permit exactly the same dst/mask/gw pair */
+ if (rn_mpath_capable(rnh) &&
+ rt_mpath_conflict(rnh, rt, netmask)) {
+ if (rt->rt_ifa) {
+ ifa_free(rt->rt_ifa);
+ }
+ Free(rt_key(rt));
+ RT_LOCK_DESTROY(rt);
+ uma_zfree(V_rtzone, rt);
+ senderr(EEXIST);
+ }
+#endif
+
+#ifdef FLOWTABLE
+ rt0 = NULL;
+ /* XXX
+ * "flow-table" only support IPv4 at the moment.
+ */
+#ifdef INET
+ if (dst->sa_family == AF_INET) {
+ rn = rnh->rnh_matchaddr(dst, rnh);
+ if (rn && ((rn->rn_flags & RNF_ROOT) == 0)) {
+ struct sockaddr *mask;
+ u_char *m, *n;
+ int len;
+
+ /*
+ * compare mask to see if the new route is
+ * more specific than the existing one
+ */
+ rt0 = RNTORT(rn);
+ RT_LOCK(rt0);
+ RT_ADDREF(rt0);
+ RT_UNLOCK(rt0);
+ /*
+ * A host route is already present, so
+ * leave the flow-table entries as is.
+ */
+ if (rt0->rt_flags & RTF_HOST) {
+ RTFREE(rt0);
+ rt0 = NULL;
+ } else if (!(flags & RTF_HOST) && netmask) {
+ mask = rt_mask(rt0);
+ len = mask->sa_len;
+ m = (u_char *)mask;
+ n = (u_char *)netmask;
+ while (len-- > 0) {
+ if (*n != *m)
+ break;
+ n++;
+ m++;
+ }
+ if (len == 0 || (*n < *m)) {
+ RTFREE(rt0);
+ rt0 = NULL;
+ }
+ }
+ }
+ }
+#endif
+#endif
+
+ /* XXX mtu manipulation will be done in rnh_addaddr -- itojun */
+ rn = rnh->rnh_addaddr(ndst, netmask, rnh, rt->rt_nodes);
+ /*
+ * If it still failed to go into the tree,
+ * then un-make it (this should be a function)
+ */
+ if (rn == NULL) {
+ if (rt->rt_ifa)
+ ifa_free(rt->rt_ifa);
+ Free(rt_key(rt));
+ RT_LOCK_DESTROY(rt);
+ uma_zfree(V_rtzone, rt);
+#ifdef FLOWTABLE
+ if (rt0 != NULL)
+ RTFREE(rt0);
+#endif
+ senderr(EEXIST);
+ }
+#ifdef FLOWTABLE
+ else if (rt0 != NULL) {
+#ifdef INET
+ flowtable_route_flush(V_ip_ft, rt0);
+#endif
+ RTFREE(rt0);
+ }
+#endif
+
+ /*
+ * If this protocol has something to add to this then
+ * allow it to do that as well.
+ */
+ if (ifa->ifa_rtrequest)
+ ifa->ifa_rtrequest(req, rt, info);
+
+ /*
+ * actually return a resultant rtentry and
+ * give the caller a single reference.
+ */
+ if (ret_nrt) {
+ *ret_nrt = rt;
+ RT_ADDREF(rt);
+ }
+ RT_UNLOCK(rt);
+ break;
+ default:
+ error = EOPNOTSUPP;
+ }
+bad:
+ if (needlock)
+ RADIX_NODE_HEAD_UNLOCK(rnh);
+ return (error);
+#undef senderr
+}
+
+#undef dst
+#undef gateway
+#undef netmask
+#undef ifaaddr
+#undef ifpaddr
+#undef flags
+
+int
+rt_setgate(struct rtentry *rt, struct sockaddr *dst, struct sockaddr *gate)
+{
+ /* XXX dst may be overwritten, can we move this to below */
+ int dlen = SA_SIZE(dst), glen = SA_SIZE(gate);
+#ifdef INVARIANTS
+ struct radix_node_head *rnh;
+
+ rnh = rt_tables_get_rnh(rt->rt_fibnum, dst->sa_family);
+#endif
+
+ RT_LOCK_ASSERT(rt);
+ RADIX_NODE_HEAD_LOCK_ASSERT(rnh);
+
+ /*
+ * Prepare to store the gateway in rt->rt_gateway.
+ * Both dst and gateway are stored one after the other in the same
+ * malloc'd chunk. If we have room, we can reuse the old buffer,
+ * rt_gateway already points to the right place.
+ * Otherwise, malloc a new block and update the 'dst' address.
+ */
+ if (rt->rt_gateway == NULL || glen > SA_SIZE(rt->rt_gateway)) {
+ caddr_t new;
+
+ R_Malloc(new, caddr_t, dlen + glen);
+ if (new == NULL)
+ return ENOBUFS;
+ /*
+ * XXX note, we copy from *dst and not *rt_key(rt) because
+ * rt_setgate() can be called to initialize a newly
+ * allocated route entry, in which case rt_key(rt) == NULL
+ * (and also rt->rt_gateway == NULL).
+ * Free()/free() handle a NULL argument just fine.
+ */
+ bcopy(dst, new, dlen);
+ Free(rt_key(rt)); /* free old block, if any */
+ rt_key(rt) = (struct sockaddr *)new;
+ rt->rt_gateway = (struct sockaddr *)(new + dlen);
+ }
+
+ /*
+ * Copy the new gateway value into the memory chunk.
+ */
+ bcopy(gate, rt->rt_gateway, glen);
+
+ return (0);
+}
+
+void
+rt_maskedcopy(struct sockaddr *src, struct sockaddr *dst, struct sockaddr *netmask)
+{
+ register u_char *cp1 = (u_char *)src;
+ register u_char *cp2 = (u_char *)dst;
+ register u_char *cp3 = (u_char *)netmask;
+ u_char *cplim = cp2 + *cp3;
+ u_char *cplim2 = cp2 + *cp1;
+
+ *cp2++ = *cp1++; *cp2++ = *cp1++; /* copies sa_len & sa_family */
+ cp3 += 2;
+ if (cplim > cplim2)
+ cplim = cplim2;
+ while (cp2 < cplim)
+ *cp2++ = *cp1++ & *cp3++;
+ if (cp2 < cplim2)
+ bzero((caddr_t)cp2, (unsigned)(cplim2 - cp2));
+}
+
+/*
+ * Set up a routing table entry, normally
+ * for an interface.
+ */
+#define _SOCKADDR_TMPSIZE 128 /* Not too big.. kernel stack size is limited */
+static inline int
+rtinit1(struct ifaddr *ifa, int cmd, int flags, int fibnum)
+{
+ struct sockaddr *dst;
+ struct sockaddr *netmask;
+ struct rtentry *rt = NULL;
+ struct rt_addrinfo info;
+ int error = 0;
+ int startfib, endfib;
+ char tempbuf[_SOCKADDR_TMPSIZE];
+ int didwork = 0;
+ int a_failure = 0;
+ static struct sockaddr_dl null_sdl = {sizeof(null_sdl), AF_LINK};
+
+ if (flags & RTF_HOST) {
+ dst = ifa->ifa_dstaddr;
+ netmask = NULL;
+ } else {
+ dst = ifa->ifa_addr;
+ netmask = ifa->ifa_netmask;
+ }
+ if ( dst->sa_family != AF_INET)
+ fibnum = 0;
+ if (fibnum == -1) {
+ if (rt_add_addr_allfibs == 0 && cmd == (int)RTM_ADD) {
+ startfib = endfib = curthread->td_proc->p_fibnum;
+ } else {
+ startfib = 0;
+ endfib = rt_numfibs - 1;
+ }
+ } else {
+ KASSERT((fibnum < rt_numfibs), ("rtinit1: bad fibnum"));
+ startfib = fibnum;
+ endfib = fibnum;
+ }
+ if (dst->sa_len == 0)
+ return(EINVAL);
+
+ /*
+ * If it's a delete, check that if it exists,
+ * it's on the correct interface or we might scrub
+ * a route to another ifa which would
+ * be confusing at best and possibly worse.
+ */
+ if (cmd == RTM_DELETE) {
+ /*
+ * It's a delete, so it should already exist..
+ * If it's a net, mask off the host bits
+ * (Assuming we have a mask)
+ * XXX this is kinda inet specific..
+ */
+ if (netmask != NULL) {
+ rt_maskedcopy(dst, (struct sockaddr *)tempbuf, netmask);
+ dst = (struct sockaddr *)tempbuf;
+ }
+ }
+ /*
+ * Now go through all the requested tables (fibs) and do the
+ * requested action. Realistically, this will either be fib 0
+ * for protocols that don't do multiple tables or all the
+ * tables for those that do. XXX For this version only AF_INET.
+ * When that changes code should be refactored to protocol
+ * independent parts and protocol dependent parts.
+ */
+ for ( fibnum = startfib; fibnum <= endfib; fibnum++) {
+ if (cmd == RTM_DELETE) {
+ struct radix_node_head *rnh;
+ struct radix_node *rn;
+ /*
+ * Look up an rtentry that is in the routing tree and
+ * contains the correct info.
+ */
+ rnh = rt_tables_get_rnh(fibnum, dst->sa_family);
+ if (rnh == NULL)
+ /* this table doesn't exist but others might */
+ continue;
+ RADIX_NODE_HEAD_LOCK(rnh);
+#ifdef RADIX_MPATH
+ if (rn_mpath_capable(rnh)) {
+
+ rn = rnh->rnh_matchaddr(dst, rnh);
+ if (rn == NULL)
+ error = ESRCH;
+ else {
+ rt = RNTORT(rn);
+ /*
+ * for interface route the
+ * rt->rt_gateway is sockaddr_intf
+ * for cloning ARP entries, so
+ * rt_mpath_matchgate must use the
+ * interface address
+ */
+ rt = rt_mpath_matchgate(rt,
+ ifa->ifa_addr);
+ if (!rt)
+ error = ESRCH;
+ }
+ }
+ else
+#endif
+ rn = rnh->rnh_lookup(dst, netmask, rnh);
+ error = (rn == NULL ||
+ (rn->rn_flags & RNF_ROOT) ||
+ RNTORT(rn)->rt_ifa != ifa ||
+ !sa_equal((struct sockaddr *)rn->rn_key, dst));
+ RADIX_NODE_HEAD_UNLOCK(rnh);
+ if (error) {
+ /* this is only an error if bad on ALL tables */
+ continue;
+ }
+ }
+ /*
+ * Do the actual request
+ */
+ bzero((caddr_t)&info, sizeof(info));
+ info.rti_ifa = ifa;
+ info.rti_flags = flags | ifa->ifa_flags;
+ info.rti_info[RTAX_DST] = dst;
+ /*
+ * doing this for compatibility reasons
+ */
+ if (cmd == RTM_ADD)
+ info.rti_info[RTAX_GATEWAY] =
+ (struct sockaddr *)&null_sdl;
+ else
+ info.rti_info[RTAX_GATEWAY] = ifa->ifa_addr;
+ info.rti_info[RTAX_NETMASK] = netmask;
+ error = rtrequest1_fib(cmd, &info, &rt, fibnum);
+ if (error == 0 && rt != NULL) {
+ /*
+ * notify any listening routing agents of the change
+ */
+ RT_LOCK(rt);
+#ifdef RADIX_MPATH
+ /*
+ * in case address alias finds the first address
+ * e.g. ifconfig bge0 192.103.54.246/24
+ * e.g. ifconfig bge0 192.103.54.247/24
+ * the address set in the route is 192.103.54.246
+ * so we need to replace it with 192.103.54.247
+ */
+ if (memcmp(rt->rt_ifa->ifa_addr,
+ ifa->ifa_addr, ifa->ifa_addr->sa_len)) {
+ ifa_free(rt->rt_ifa);
+ ifa_ref(ifa);
+ rt->rt_ifp = ifa->ifa_ifp;
+ rt->rt_ifa = ifa;
+ }
+#endif
+ /*
+ * doing this for compatibility reasons
+ */
+ if (cmd == RTM_ADD) {
+ ((struct sockaddr_dl *)rt->rt_gateway)->sdl_type =
+ rt->rt_ifp->if_type;
+ ((struct sockaddr_dl *)rt->rt_gateway)->sdl_index =
+ rt->rt_ifp->if_index;
+ }
+ RT_ADDREF(rt);
+ RT_UNLOCK(rt);
+ rt_newaddrmsg(cmd, ifa, error, rt);
+ RT_LOCK(rt);
+ RT_REMREF(rt);
+ if (cmd == RTM_DELETE) {
+ /*
+ * If we are deleting, and we found an entry,
+ * then it's been removed from the tree..
+ * now throw it away.
+ */
+ RTFREE_LOCKED(rt);
+ } else {
+ if (cmd == RTM_ADD) {
+ /*
+ * We just wanted to add it..
+ * we don't actually need a reference.
+ */
+ RT_REMREF(rt);
+ }
+ RT_UNLOCK(rt);
+ }
+ didwork = 1;
+ }
+ if (error)
+ a_failure = error;
+ }
+ if (cmd == RTM_DELETE) {
+ if (didwork) {
+ error = 0;
+ } else {
+ /* we only give an error if it wasn't in any table */
+ error = ((flags & RTF_HOST) ?
+ EHOSTUNREACH : ENETUNREACH);
+ }
+ } else {
+ if (a_failure) {
+ /* return an error if any of them failed */
+ error = a_failure;
+ }
+ }
+ return (error);
+}
+
+/* special one for inet internal use. may not use. */
+int
+rtinit_fib(struct ifaddr *ifa, int cmd, int flags)
+{
+ return (rtinit1(ifa, cmd, flags, -1));
+}
+
+/*
+ * Set up a routing table entry, normally
+ * for an interface.
+ */
+int
+rtinit(struct ifaddr *ifa, int cmd, int flags)
+{
+ struct sockaddr *dst;
+ int fib = 0;
+
+ if (flags & RTF_HOST) {
+ dst = ifa->ifa_dstaddr;
+ } else {
+ dst = ifa->ifa_addr;
+ }
+
+ if (dst->sa_family == AF_INET)
+ fib = -1;
+ return (rtinit1(ifa, cmd, flags, fib));
+}
diff --git a/rtems/freebsd/net/route.h b/rtems/freebsd/net/route.h
new file mode 100644
index 00000000..0aaa9699
--- /dev/null
+++ b/rtems/freebsd/net/route.h
@@ -0,0 +1,446 @@
+/*-
+ * Copyright (c) 1980, 1986, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * @(#)route.h 8.4 (Berkeley) 1/9/95
+ * $FreeBSD$
+ */
+
+#ifndef _NET_ROUTE_HH_
+#define _NET_ROUTE_HH_
+
+/*
+ * Kernel resident routing tables.
+ *
+ * The routing tables are initialized when interface addresses
+ * are set by making entries for all directly connected interfaces.
+ */
+
+/*
+ * A route consists of a destination address, a reference
+ * to a routing entry, and a reference to an llentry.
+ * These are often held by protocols in their control
+ * blocks, e.g. inpcb.
+ */
+struct route {
+ struct rtentry *ro_rt;
+ struct llentry *ro_lle;
+ struct sockaddr ro_dst;
+};
+
+/*
+ * These numbers are used by reliable protocols for determining
+ * retransmission behavior and are included in the routing structure.
+ */
+struct rt_metrics_lite {
+ u_long rmx_mtu; /* MTU for this path */
+ u_long rmx_expire; /* lifetime for route, e.g. redirect */
+ u_long rmx_pksent; /* packets sent using this route */
+ u_long rmx_weight; /* absolute weight */
+};
+
+struct rt_metrics {
+ u_long rmx_locks; /* Kernel must leave these values alone */
+ u_long rmx_mtu; /* MTU for this path */
+ u_long rmx_hopcount; /* max hops expected */
+ u_long rmx_expire; /* lifetime for route, e.g. redirect */
+ u_long rmx_recvpipe; /* inbound delay-bandwidth product */
+ u_long rmx_sendpipe; /* outbound delay-bandwidth product */
+ u_long rmx_ssthresh; /* outbound gateway buffer limit */
+ u_long rmx_rtt; /* estimated round trip time */
+ u_long rmx_rttvar; /* estimated rtt variance */
+ u_long rmx_pksent; /* packets sent using this route */
+ u_long rmx_weight; /* route weight */
+ u_long rmx_filler[3]; /* will be used for T/TCP later */
+};
+
+/*
+ * rmx_rtt and rmx_rttvar are stored as microseconds;
+ * RTTTOPRHZ(rtt) converts to a value suitable for use
+ * by a protocol slowtimo counter.
+ */
+#define RTM_RTTUNIT 1000000 /* units for rtt, rttvar, as units per sec */
+#define RTTTOPRHZ(r) ((r) / (RTM_RTTUNIT / PR_SLOWHZ))
+
+/* MRT compile-time constants */
+#ifdef _KERNEL
+ #ifndef ROUTETABLES
+ #define RT_NUMFIBS 1
+ #define RT_MAXFIBS 1
+ #else
+ /* while we use 4 bits in the mbuf flags, we are limited to 16 */
+ #define RT_MAXFIBS 16
+ #if ROUTETABLES > RT_MAXFIBS
+ #define RT_NUMFIBS RT_MAXFIBS
+ #error "ROUTETABLES defined too big"
+ #else
+ #if ROUTETABLES == 0
+ #define RT_NUMFIBS 1
+ #else
+ #define RT_NUMFIBS ROUTETABLES
+ #endif
+ #endif
+ #endif
+#endif
+
+extern u_int rt_numfibs; /* number fo usable routing tables */
+extern u_int tunnel_fib; /* tunnels use these */
+extern u_int fwd_fib; /* packets being forwarded use these routes */
+/*
+ * XXX kernel function pointer `rt_output' is visible to applications.
+ */
+struct mbuf;
+
+/*
+ * We distinguish between routes to hosts and routes to networks,
+ * preferring the former if available. For each route we infer
+ * the interface to use from the gateway address supplied when
+ * the route was entered. Routes that forward packets through
+ * gateways are marked so that the output routines know to address the
+ * gateway rather than the ultimate destination.
+ */
+#ifndef RNF_NORMAL
+#include <rtems/freebsd/net/radix.h>
+#ifdef RADIX_MPATH
+#include <rtems/freebsd/net/radix_mpath.h>
+#endif
+#endif
+struct rtentry {
+ struct radix_node rt_nodes[2]; /* tree glue, and other values */
+ /*
+ * XXX struct rtentry must begin with a struct radix_node (or two!)
+ * because the code does some casts of a 'struct radix_node *'
+ * to a 'struct rtentry *'
+ */
+#define rt_key(r) (*((struct sockaddr **)(&(r)->rt_nodes->rn_key)))
+#define rt_mask(r) (*((struct sockaddr **)(&(r)->rt_nodes->rn_mask)))
+ struct sockaddr *rt_gateway; /* value */
+ int rt_flags; /* up/down?, host/net */
+ int rt_refcnt; /* # held references */
+ struct ifnet *rt_ifp; /* the answer: interface to use */
+ struct ifaddr *rt_ifa; /* the answer: interface address to use */
+ struct rt_metrics_lite rt_rmx; /* metrics used by rx'ing protocols */
+ u_int rt_fibnum; /* which FIB */
+#ifdef _KERNEL
+ /* XXX ugly, user apps use this definition but don't have a mtx def */
+ struct mtx rt_mtx; /* mutex for routing entry */
+#endif
+};
+
+/*
+ * Following structure necessary for 4.3 compatibility;
+ * We should eventually move it to a compat file.
+ */
+struct ortentry {
+ u_long rt_hash; /* to speed lookups */
+ struct sockaddr rt_dst; /* key */
+ struct sockaddr rt_gateway; /* value */
+ short rt_flags; /* up/down?, host/net */
+ short rt_refcnt; /* # held references */
+ u_long rt_use; /* raw # packets forwarded */
+ struct ifnet *rt_ifp; /* the answer: interface to use */
+};
+
+#define rt_use rt_rmx.rmx_pksent
+
+#define RTF_UP 0x1 /* route usable */
+#define RTF_GATEWAY 0x2 /* destination is a gateway */
+#define RTF_HOST 0x4 /* host entry (net otherwise) */
+#define RTF_REJECT 0x8 /* host or net unreachable */
+#define RTF_DYNAMIC 0x10 /* created dynamically (by redirect) */
+#define RTF_MODIFIED 0x20 /* modified dynamically (by redirect) */
+#define RTF_DONE 0x40 /* message confirmed */
+/* 0x80 unused, was RTF_DELCLONE */
+/* 0x100 unused, was RTF_CLONING */
+#define RTF_XRESOLVE 0x200 /* external daemon resolves name */
+#define RTF_LLINFO 0x400 /* DEPRECATED - exists ONLY for backward
+ compatibility */
+#define RTF_LLDATA 0x400 /* used by apps to add/del L2 entries */
+#define RTF_STATIC 0x800 /* manually added */
+#define RTF_BLACKHOLE 0x1000 /* just discard pkts (during updates) */
+#define RTF_PROTO2 0x4000 /* protocol specific routing flag */
+#define RTF_PROTO1 0x8000 /* protocol specific routing flag */
+
+/* XXX: temporary to stay API/ABI compatible with userland */
+#ifndef _KERNEL
+#define RTF_PRCLONING 0x10000 /* unused, for compatibility */
+#endif
+
+/* 0x20000 unused, was RTF_WASCLONED */
+#define RTF_PROTO3 0x40000 /* protocol specific routing flag */
+/* 0x80000 unused */
+#define RTF_PINNED 0x100000 /* future use */
+#define RTF_LOCAL 0x200000 /* route represents a local address */
+#define RTF_BROADCAST 0x400000 /* route represents a bcast address */
+#define RTF_MULTICAST 0x800000 /* route represents a mcast address */
+ /* 0x8000000 and up unassigned */
+#define RTF_STICKY 0x10000000 /* always route dst->src */
+
+#define RTF_RNH_LOCKED 0x40000000 /* radix node head is locked */
+
+/* Mask of RTF flags that are allowed to be modified by RTM_CHANGE. */
+#define RTF_FMASK \
+ (RTF_PROTO1 | RTF_PROTO2 | RTF_PROTO3 | RTF_BLACKHOLE | \
+ RTF_REJECT | RTF_STATIC | RTF_STICKY)
+
+/*
+ * Routing statistics.
+ */
+struct rtstat {
+ short rts_badredirect; /* bogus redirect calls */
+ short rts_dynamic; /* routes created by redirects */
+ short rts_newgateway; /* routes modified by redirects */
+ short rts_unreach; /* lookups which failed */
+ short rts_wildcard; /* lookups satisfied by a wildcard */
+};
+/*
+ * Structures for routing messages.
+ */
+struct rt_msghdr {
+ u_short rtm_msglen; /* to skip over non-understood messages */
+ u_char rtm_version; /* future binary compatibility */
+ u_char rtm_type; /* message type */
+ u_short rtm_index; /* index for associated ifp */
+ int rtm_flags; /* flags, incl. kern & message, e.g. DONE */
+ int rtm_addrs; /* bitmask identifying sockaddrs in msg */
+ pid_t rtm_pid; /* identify sender */
+ int rtm_seq; /* for sender to identify action */
+ int rtm_errno; /* why failed */
+ int rtm_fmask; /* bitmask used in RTM_CHANGE message */
+ u_long rtm_inits; /* which metrics we are initializing */
+ struct rt_metrics rtm_rmx; /* metrics themselves */
+};
+
+#define RTM_VERSION 5 /* Up the ante and ignore older versions */
+
+/*
+ * Message types.
+ */
+#define RTM_ADD 0x1 /* Add Route */
+#define RTM_DELETE 0x2 /* Delete Route */
+#define RTM_CHANGE 0x3 /* Change Metrics or flags */
+#define RTM_GET 0x4 /* Report Metrics */
+#define RTM_LOSING 0x5 /* Kernel Suspects Partitioning */
+#define RTM_REDIRECT 0x6 /* Told to use different route */
+#define RTM_MISS 0x7 /* Lookup failed on this address */
+#define RTM_LOCK 0x8 /* fix specified metrics */
+#define RTM_OLDADD 0x9 /* caused by SIOCADDRT */
+#define RTM_OLDDEL 0xa /* caused by SIOCDELRT */
+#define RTM_RESOLVE 0xb /* req to resolve dst to LL addr */
+#define RTM_NEWADDR 0xc /* address being added to iface */
+#define RTM_DELADDR 0xd /* address being removed from iface */
+#define RTM_IFINFO 0xe /* iface going up/down etc. */
+#define RTM_NEWMADDR 0xf /* mcast group membership being added to if */
+#define RTM_DELMADDR 0x10 /* mcast group membership being deleted */
+#define RTM_IFANNOUNCE 0x11 /* iface arrival/departure */
+#define RTM_IEEE80211 0x12 /* IEEE80211 wireless event */
+
+/*
+ * Bitmask values for rtm_inits and rmx_locks.
+ */
+#define RTV_MTU 0x1 /* init or lock _mtu */
+#define RTV_HOPCOUNT 0x2 /* init or lock _hopcount */
+#define RTV_EXPIRE 0x4 /* init or lock _expire */
+#define RTV_RPIPE 0x8 /* init or lock _recvpipe */
+#define RTV_SPIPE 0x10 /* init or lock _sendpipe */
+#define RTV_SSTHRESH 0x20 /* init or lock _ssthresh */
+#define RTV_RTT 0x40 /* init or lock _rtt */
+#define RTV_RTTVAR 0x80 /* init or lock _rttvar */
+#define RTV_WEIGHT 0x100 /* init or lock _weight */
+
+/*
+ * Bitmask values for rtm_addrs.
+ */
+#define RTA_DST 0x1 /* destination sockaddr present */
+#define RTA_GATEWAY 0x2 /* gateway sockaddr present */
+#define RTA_NETMASK 0x4 /* netmask sockaddr present */
+#define RTA_GENMASK 0x8 /* cloning mask sockaddr present */
+#define RTA_IFP 0x10 /* interface name sockaddr present */
+#define RTA_IFA 0x20 /* interface addr sockaddr present */
+#define RTA_AUTHOR 0x40 /* sockaddr for author of redirect */
+#define RTA_BRD 0x80 /* for NEWADDR, broadcast or p-p dest addr */
+
+/*
+ * Index offsets for sockaddr array for alternate internal encoding.
+ */
+#define RTAX_DST 0 /* destination sockaddr present */
+#define RTAX_GATEWAY 1 /* gateway sockaddr present */
+#define RTAX_NETMASK 2 /* netmask sockaddr present */
+#define RTAX_GENMASK 3 /* cloning mask sockaddr present */
+#define RTAX_IFP 4 /* interface name sockaddr present */
+#define RTAX_IFA 5 /* interface addr sockaddr present */
+#define RTAX_AUTHOR 6 /* sockaddr for author of redirect */
+#define RTAX_BRD 7 /* for NEWADDR, broadcast or p-p dest addr */
+#define RTAX_MAX 8 /* size of array to allocate */
+
+struct rt_addrinfo {
+ int rti_addrs;
+ struct sockaddr *rti_info[RTAX_MAX];
+ int rti_flags;
+ struct ifaddr *rti_ifa;
+ struct ifnet *rti_ifp;
+};
+
+/*
+ * This macro returns the size of a struct sockaddr when passed
+ * through a routing socket. Basically we round up sa_len to
+ * a multiple of sizeof(long), with a minimum of sizeof(long).
+ * The check for a NULL pointer is just a convenience, probably never used.
+ * The case sa_len == 0 should only apply to empty structures.
+ */
+#define SA_SIZE(sa) \
+ ( (!(sa) || ((struct sockaddr *)(sa))->sa_len == 0) ? \
+ sizeof(long) : \
+ 1 + ( (((struct sockaddr *)(sa))->sa_len - 1) | (sizeof(long) - 1) ) )
+
+#ifdef _KERNEL
+
+#define RT_LINK_IS_UP(ifp) (!((ifp)->if_capabilities & IFCAP_LINKSTATE) \
+ || (ifp)->if_link_state == LINK_STATE_UP)
+
+#define RT_LOCK_INIT(_rt) \
+ mtx_init(&(_rt)->rt_mtx, "rtentry", NULL, MTX_DEF | MTX_DUPOK)
+#define RT_LOCK(_rt) mtx_lock(&(_rt)->rt_mtx)
+#define RT_TRYLOCK(_rt) mtx_trylock(&(_rt)->rt_mtx)
+#define RT_UNLOCK(_rt) mtx_unlock(&(_rt)->rt_mtx)
+#define RT_LOCK_DESTROY(_rt) mtx_destroy(&(_rt)->rt_mtx)
+#define RT_LOCK_ASSERT(_rt) mtx_assert(&(_rt)->rt_mtx, MA_OWNED)
+
+#define RT_ADDREF(_rt) do { \
+ RT_LOCK_ASSERT(_rt); \
+ KASSERT((_rt)->rt_refcnt >= 0, \
+ ("negative refcnt %d", (_rt)->rt_refcnt)); \
+ (_rt)->rt_refcnt++; \
+} while (0)
+
+#define RT_REMREF(_rt) do { \
+ RT_LOCK_ASSERT(_rt); \
+ KASSERT((_rt)->rt_refcnt > 0, \
+ ("bogus refcnt %d", (_rt)->rt_refcnt)); \
+ (_rt)->rt_refcnt--; \
+} while (0)
+
+#define RTFREE_LOCKED(_rt) do { \
+ if ((_rt)->rt_refcnt <= 1) \
+ rtfree(_rt); \
+ else { \
+ RT_REMREF(_rt); \
+ RT_UNLOCK(_rt); \
+ } \
+ /* guard against invalid refs */ \
+ _rt = 0; \
+} while (0)
+
+#define RTFREE(_rt) do { \
+ RT_LOCK(_rt); \
+ RTFREE_LOCKED(_rt); \
+} while (0)
+
+#define RT_TEMP_UNLOCK(_rt) do { \
+ RT_ADDREF(_rt); \
+ RT_UNLOCK(_rt); \
+} while (0)
+
+#define RT_RELOCK(_rt) do { \
+ RT_LOCK(_rt); \
+ if ((_rt)->rt_refcnt <= 1) { \
+ rtfree(_rt); \
+ _rt = 0; /* signal that it went away */ \
+ } else { \
+ RT_REMREF(_rt); \
+ /* note that _rt is still valid */ \
+ } \
+} while (0)
+
+struct radix_node_head *rt_tables_get_rnh(int, int);
+
+struct ifmultiaddr;
+
+void rt_ieee80211msg(struct ifnet *, int, void *, size_t);
+void rt_ifannouncemsg(struct ifnet *, int);
+void rt_ifmsg(struct ifnet *);
+void rt_missmsg(int, struct rt_addrinfo *, int, int);
+void rt_newaddrmsg(int, struct ifaddr *, int, struct rtentry *);
+void rt_newmaddrmsg(int, struct ifmultiaddr *);
+int rt_setgate(struct rtentry *, struct sockaddr *, struct sockaddr *);
+void rt_maskedcopy(struct sockaddr *, struct sockaddr *, struct sockaddr *);
+
+/*
+ * Note the following locking behavior:
+ *
+ * rtalloc_ign() and rtalloc() return ro->ro_rt unlocked
+ *
+ * rtalloc1() returns a locked rtentry
+ *
+ * rtfree() and RTFREE_LOCKED() require a locked rtentry
+ *
+ * RTFREE() uses an unlocked entry.
+ */
+
+int rtexpunge(struct rtentry *);
+void rtfree(struct rtentry *);
+int rt_check(struct rtentry **, struct rtentry **, struct sockaddr *);
+
+/* XXX MRT COMPAT VERSIONS THAT SET UNIVERSE to 0 */
+/* Thes are used by old code not yet converted to use multiple FIBS */
+int rt_getifa(struct rt_addrinfo *);
+void rtalloc_ign(struct route *ro, u_long ignflags);
+void rtalloc(struct route *ro); /* XXX deprecated, use rtalloc_ign(ro, 0) */
+struct rtentry *rtalloc1(struct sockaddr *, int, u_long);
+int rtinit(struct ifaddr *, int, int);
+int rtioctl(u_long, caddr_t);
+void rtredirect(struct sockaddr *, struct sockaddr *,
+ struct sockaddr *, int, struct sockaddr *);
+int rtrequest(int, struct sockaddr *,
+ struct sockaddr *, struct sockaddr *, int, struct rtentry **);
+
+/* defaults to "all" FIBs */
+int rtinit_fib(struct ifaddr *, int, int);
+
+/* XXX MRT NEW VERSIONS THAT USE FIBs
+ * For now the protocol indepedent versions are the same as the AF_INET ones
+ * but this will change..
+ */
+int rt_getifa_fib(struct rt_addrinfo *, u_int fibnum);
+void rtalloc_ign_fib(struct route *ro, u_long ignflags, u_int fibnum);
+void rtalloc_fib(struct route *ro, u_int fibnum);
+struct rtentry *rtalloc1_fib(struct sockaddr *, int, u_long, u_int);
+int rtioctl_fib(u_long, caddr_t, u_int);
+void rtredirect_fib(struct sockaddr *, struct sockaddr *,
+ struct sockaddr *, int, struct sockaddr *, u_int);
+int rtrequest_fib(int, struct sockaddr *,
+ struct sockaddr *, struct sockaddr *, int, struct rtentry **, u_int);
+int rtrequest1_fib(int, struct rt_addrinfo *, struct rtentry **, u_int);
+
+#include <rtems/freebsd/sys/eventhandler.h>
+typedef void (*rtevent_arp_update_fn)(void *, struct rtentry *, uint8_t *, struct sockaddr *);
+typedef void (*rtevent_redirect_fn)(void *, struct rtentry *, struct rtentry *, struct sockaddr *);
+EVENTHANDLER_DECLARE(route_arp_update_event, rtevent_arp_update_fn);
+EVENTHANDLER_DECLARE(route_redirect_event, rtevent_redirect_fn);
+#endif
+
+#endif
diff --git a/rtems/freebsd/net/rtsock.c b/rtems/freebsd/net/rtsock.c
new file mode 100644
index 00000000..4f919e18
--- /dev/null
+++ b/rtems/freebsd/net/rtsock.c
@@ -0,0 +1,1702 @@
+#include <rtems/freebsd/machine/rtems-bsd-config.h>
+
+/*-
+ * Copyright (c) 1988, 1991, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * @(#)rtsock.c 8.7 (Berkeley) 10/12/95
+ * $FreeBSD$
+ */
+#include <rtems/freebsd/local/opt_compat.h>
+#include <rtems/freebsd/local/opt_sctp.h>
+#include <rtems/freebsd/local/opt_mpath.h>
+#include <rtems/freebsd/local/opt_inet.h>
+#include <rtems/freebsd/local/opt_inet6.h>
+
+#include <rtems/freebsd/sys/param.h>
+#include <rtems/freebsd/sys/jail.h>
+#include <rtems/freebsd/sys/kernel.h>
+#include <rtems/freebsd/sys/domain.h>
+#include <rtems/freebsd/sys/lock.h>
+#include <rtems/freebsd/sys/malloc.h>
+#include <rtems/freebsd/sys/mbuf.h>
+#include <rtems/freebsd/sys/priv.h>
+#include <rtems/freebsd/sys/proc.h>
+#include <rtems/freebsd/sys/protosw.h>
+#include <rtems/freebsd/sys/rwlock.h>
+#include <rtems/freebsd/sys/signalvar.h>
+#include <rtems/freebsd/sys/socket.h>
+#include <rtems/freebsd/sys/socketvar.h>
+#include <rtems/freebsd/sys/sysctl.h>
+#include <rtems/freebsd/sys/systm.h>
+
+#include <rtems/freebsd/net/if.h>
+#include <rtems/freebsd/net/if_dl.h>
+#include <rtems/freebsd/net/if_llatbl.h>
+#include <rtems/freebsd/net/if_types.h>
+#include <rtems/freebsd/net/netisr.h>
+#include <rtems/freebsd/net/raw_cb.h>
+#include <rtems/freebsd/net/route.h>
+#include <rtems/freebsd/net/vnet.h>
+
+#include <rtems/freebsd/netinet/in.h>
+#include <rtems/freebsd/netinet/if_ether.h>
+#ifdef INET6
+#include <rtems/freebsd/netinet6/scope6_var.h>
+#endif
+
+#if defined(INET) || defined(INET6)
+#ifdef SCTP
+extern void sctp_addr_change(struct ifaddr *ifa, int cmd);
+#endif /* SCTP */
+#endif
+
+#ifdef COMPAT_FREEBSD32
+#include <rtems/freebsd/sys/mount.h>
+#include <rtems/freebsd/compat/freebsd32/freebsd32.h>
+
+struct if_data32 {
+ uint8_t ifi_type;
+ uint8_t ifi_physical;
+ uint8_t ifi_addrlen;
+ uint8_t ifi_hdrlen;
+ uint8_t ifi_link_state;
+ uint8_t ifi_spare_char1;
+ uint8_t ifi_spare_char2;
+ uint8_t ifi_datalen;
+ uint32_t ifi_mtu;
+ uint32_t ifi_metric;
+ uint32_t ifi_baudrate;
+ uint32_t ifi_ipackets;
+ uint32_t ifi_ierrors;
+ uint32_t ifi_opackets;
+ uint32_t ifi_oerrors;
+ uint32_t ifi_collisions;
+ uint32_t ifi_ibytes;
+ uint32_t ifi_obytes;
+ uint32_t ifi_imcasts;
+ uint32_t ifi_omcasts;
+ uint32_t ifi_iqdrops;
+ uint32_t ifi_noproto;
+ uint32_t ifi_hwassist;
+ int32_t ifi_epoch;
+ struct timeval32 ifi_lastchange;
+};
+
+struct if_msghdr32 {
+ uint16_t ifm_msglen;
+ uint8_t ifm_version;
+ uint8_t ifm_type;
+ int32_t ifm_addrs;
+ int32_t ifm_flags;
+ uint16_t ifm_index;
+ struct if_data32 ifm_data;
+};
+#endif
+
+MALLOC_DEFINE(M_RTABLE, "routetbl", "routing tables");
+
+/* NB: these are not modified */
+static struct sockaddr route_src = { 2, PF_ROUTE, };
+static struct sockaddr sa_zero = { sizeof(sa_zero), AF_INET, };
+
+static struct {
+ int ip_count; /* attached w/ AF_INET */
+ int ip6_count; /* attached w/ AF_INET6 */
+ int ipx_count; /* attached w/ AF_IPX */
+ int any_count; /* total attached */
+} route_cb;
+
+struct mtx rtsock_mtx;
+MTX_SYSINIT(rtsock, &rtsock_mtx, "rtsock route_cb lock", MTX_DEF);
+
+#define RTSOCK_LOCK() mtx_lock(&rtsock_mtx)
+#define RTSOCK_UNLOCK() mtx_unlock(&rtsock_mtx)
+#define RTSOCK_LOCK_ASSERT() mtx_assert(&rtsock_mtx, MA_OWNED)
+
+SYSCTL_NODE(_net, OID_AUTO, route, CTLFLAG_RD, 0, "");
+
+struct walkarg {
+ int w_tmemsize;
+ int w_op, w_arg;
+ caddr_t w_tmem;
+ struct sysctl_req *w_req;
+};
+
+static void rts_input(struct mbuf *m);
+static struct mbuf *rt_msg1(int type, struct rt_addrinfo *rtinfo);
+static int rt_msg2(int type, struct rt_addrinfo *rtinfo,
+ caddr_t cp, struct walkarg *w);
+static int rt_xaddrs(caddr_t cp, caddr_t cplim,
+ struct rt_addrinfo *rtinfo);
+static int sysctl_dumpentry(struct radix_node *rn, void *vw);
+static int sysctl_iflist(int af, struct walkarg *w);
+static int sysctl_ifmalist(int af, struct walkarg *w);
+static int route_output(struct mbuf *m, struct socket *so);
+static void rt_setmetrics(u_long which, const struct rt_metrics *in,
+ struct rt_metrics_lite *out);
+static void rt_getmetrics(const struct rt_metrics_lite *in,
+ struct rt_metrics *out);
+static void rt_dispatch(struct mbuf *, const struct sockaddr *);
+
+static struct netisr_handler rtsock_nh = {
+ .nh_name = "rtsock",
+ .nh_handler = rts_input,
+ .nh_proto = NETISR_ROUTE,
+ .nh_policy = NETISR_POLICY_SOURCE,
+};
+
+static int
+sysctl_route_netisr_maxqlen(SYSCTL_HANDLER_ARGS)
+{
+ int error, qlimit;
+
+ netisr_getqlimit(&rtsock_nh, &qlimit);
+ error = sysctl_handle_int(oidp, &qlimit, 0, req);
+ if (error || !req->newptr)
+ return (error);
+ if (qlimit < 1)
+ return (EINVAL);
+ return (netisr_setqlimit(&rtsock_nh, qlimit));
+}
+SYSCTL_PROC(_net_route, OID_AUTO, netisr_maxqlen, CTLTYPE_INT|CTLFLAG_RW,
+ 0, 0, sysctl_route_netisr_maxqlen, "I",
+ "maximum routing socket dispatch queue length");
+
+static void
+rts_init(void)
+{
+ int tmp;
+
+#ifndef __rtems__
+ if (TUNABLE_INT_FETCH("net.route.netisr_maxqlen", &tmp))
+ rtsock_nh.nh_qlimit = tmp;
+#endif
+ netisr_register(&rtsock_nh);
+}
+SYSINIT(rtsock, SI_SUB_PROTO_DOMAIN, SI_ORDER_THIRD, rts_init, 0);
+
+static void
+rts_input(struct mbuf *m)
+{
+ struct sockproto route_proto;
+ unsigned short *family;
+ struct m_tag *tag;
+
+ route_proto.sp_family = PF_ROUTE;
+ tag = m_tag_find(m, PACKET_TAG_RTSOCKFAM, NULL);
+ if (tag != NULL) {
+ family = (unsigned short *)(tag + 1);
+ route_proto.sp_protocol = *family;
+ m_tag_delete(m, tag);
+ } else
+ route_proto.sp_protocol = 0;
+
+ raw_input(m, &route_proto, &route_src);
+}
+
+/*
+ * It really doesn't make any sense at all for this code to share much
+ * with raw_usrreq.c, since its functionality is so restricted. XXX
+ */
+static void
+rts_abort(struct socket *so)
+{
+
+ raw_usrreqs.pru_abort(so);
+}
+
+static void
+rts_close(struct socket *so)
+{
+
+ raw_usrreqs.pru_close(so);
+}
+
+/* pru_accept is EOPNOTSUPP */
+
+static int
+rts_attach(struct socket *so, int proto, struct thread *td)
+{
+ struct rawcb *rp;
+ int s, error;
+
+ KASSERT(so->so_pcb == NULL, ("rts_attach: so_pcb != NULL"));
+
+ /* XXX */
+ rp = malloc(sizeof *rp, M_PCB, M_WAITOK | M_ZERO);
+ if (rp == NULL)
+ return ENOBUFS;
+
+ /*
+ * The splnet() is necessary to block protocols from sending
+ * error notifications (like RTM_REDIRECT or RTM_LOSING) while
+ * this PCB is extant but incompletely initialized.
+ * Probably we should try to do more of this work beforehand and
+ * eliminate the spl.
+ */
+ s = splnet();
+ so->so_pcb = (caddr_t)rp;
+ so->so_fibnum = td->td_proc->p_fibnum;
+ error = raw_attach(so, proto);
+ rp = sotorawcb(so);
+ if (error) {
+ splx(s);
+ so->so_pcb = NULL;
+ free(rp, M_PCB);
+ return error;
+ }
+ RTSOCK_LOCK();
+ switch(rp->rcb_proto.sp_protocol) {
+ case AF_INET:
+ route_cb.ip_count++;
+ break;
+ case AF_INET6:
+ route_cb.ip6_count++;
+ break;
+ case AF_IPX:
+ route_cb.ipx_count++;
+ break;
+ }
+ route_cb.any_count++;
+ RTSOCK_UNLOCK();
+ soisconnected(so);
+ so->so_options |= SO_USELOOPBACK;
+ splx(s);
+ return 0;
+}
+
+static int
+rts_bind(struct socket *so, struct sockaddr *nam, struct thread *td)
+{
+
+ return (raw_usrreqs.pru_bind(so, nam, td)); /* xxx just EINVAL */
+}
+
+static int
+rts_connect(struct socket *so, struct sockaddr *nam, struct thread *td)
+{
+
+ return (raw_usrreqs.pru_connect(so, nam, td)); /* XXX just EINVAL */
+}
+
+/* pru_connect2 is EOPNOTSUPP */
+/* pru_control is EOPNOTSUPP */
+
+static void
+rts_detach(struct socket *so)
+{
+ struct rawcb *rp = sotorawcb(so);
+
+ KASSERT(rp != NULL, ("rts_detach: rp == NULL"));
+
+ RTSOCK_LOCK();
+ switch(rp->rcb_proto.sp_protocol) {
+ case AF_INET:
+ route_cb.ip_count--;
+ break;
+ case AF_INET6:
+ route_cb.ip6_count--;
+ break;
+ case AF_IPX:
+ route_cb.ipx_count--;
+ break;
+ }
+ route_cb.any_count--;
+ RTSOCK_UNLOCK();
+ raw_usrreqs.pru_detach(so);
+}
+
+static int
+rts_disconnect(struct socket *so)
+{
+
+ return (raw_usrreqs.pru_disconnect(so));
+}
+
+/* pru_listen is EOPNOTSUPP */
+
+static int
+rts_peeraddr(struct socket *so, struct sockaddr **nam)
+{
+
+ return (raw_usrreqs.pru_peeraddr(so, nam));
+}
+
+/* pru_rcvd is EOPNOTSUPP */
+/* pru_rcvoob is EOPNOTSUPP */
+
+static int
+rts_send(struct socket *so, int flags, struct mbuf *m, struct sockaddr *nam,
+ struct mbuf *control, struct thread *td)
+{
+
+ return (raw_usrreqs.pru_send(so, flags, m, nam, control, td));
+}
+
+/* pru_sense is null */
+
+static int
+rts_shutdown(struct socket *so)
+{
+
+ return (raw_usrreqs.pru_shutdown(so));
+}
+
+static int
+rts_sockaddr(struct socket *so, struct sockaddr **nam)
+{
+
+ return (raw_usrreqs.pru_sockaddr(so, nam));
+}
+
+static struct pr_usrreqs route_usrreqs = {
+ .pru_abort = rts_abort,
+ .pru_attach = rts_attach,
+ .pru_bind = rts_bind,
+ .pru_connect = rts_connect,
+ .pru_detach = rts_detach,
+ .pru_disconnect = rts_disconnect,
+ .pru_peeraddr = rts_peeraddr,
+ .pru_send = rts_send,
+ .pru_shutdown = rts_shutdown,
+ .pru_sockaddr = rts_sockaddr,
+ .pru_close = rts_close,
+};
+
+#ifndef _SOCKADDR_UNION_DEFINED
+#define _SOCKADDR_UNION_DEFINED
+/*
+ * The union of all possible address formats we handle.
+ */
+union sockaddr_union {
+ struct sockaddr sa;
+ struct sockaddr_in sin;
+ struct sockaddr_in6 sin6;
+};
+#endif /* _SOCKADDR_UNION_DEFINED */
+
+static int
+rtm_get_jailed(struct rt_addrinfo *info, struct ifnet *ifp,
+ struct rtentry *rt, union sockaddr_union *saun, struct ucred *cred)
+{
+
+ /* First, see if the returned address is part of the jail. */
+ if (prison_if(cred, rt->rt_ifa->ifa_addr) == 0) {
+ info->rti_info[RTAX_IFA] = rt->rt_ifa->ifa_addr;
+ return (0);
+ }
+
+ switch (info->rti_info[RTAX_DST]->sa_family) {
+#ifdef INET
+ case AF_INET:
+ {
+ struct in_addr ia;
+ struct ifaddr *ifa;
+ int found;
+
+ found = 0;
+ /*
+ * Try to find an address on the given outgoing interface
+ * that belongs to the jail.
+ */
+ IF_ADDR_LOCK(ifp);
+ TAILQ_FOREACH(ifa, &ifp->if_addrhead, ifa_link) {
+ struct sockaddr *sa;
+ sa = ifa->ifa_addr;
+ if (sa->sa_family != AF_INET)
+ continue;
+ ia = ((struct sockaddr_in *)sa)->sin_addr;
+ if (prison_check_ip4(cred, &ia) == 0) {
+ found = 1;
+ break;
+ }
+ }
+ IF_ADDR_UNLOCK(ifp);
+ if (!found) {
+ /*
+ * As a last resort return the 'default' jail address.
+ */
+ ia = ((struct sockaddr_in *)rt->rt_ifa->ifa_addr)->
+ sin_addr;
+ if (prison_get_ip4(cred, &ia) != 0)
+ return (ESRCH);
+ }
+ bzero(&saun->sin, sizeof(struct sockaddr_in));
+ saun->sin.sin_len = sizeof(struct sockaddr_in);
+ saun->sin.sin_family = AF_INET;
+ saun->sin.sin_addr.s_addr = ia.s_addr;
+ info->rti_info[RTAX_IFA] = (struct sockaddr *)&saun->sin;
+ break;
+ }
+#endif
+#ifdef INET6
+ case AF_INET6:
+ {
+ struct in6_addr ia6;
+ struct ifaddr *ifa;
+ int found;
+
+ found = 0;
+ /*
+ * Try to find an address on the given outgoing interface
+ * that belongs to the jail.
+ */
+ IF_ADDR_LOCK(ifp);
+ TAILQ_FOREACH(ifa, &ifp->if_addrhead, ifa_link) {
+ struct sockaddr *sa;
+ sa = ifa->ifa_addr;
+ if (sa->sa_family != AF_INET6)
+ continue;
+ bcopy(&((struct sockaddr_in6 *)sa)->sin6_addr,
+ &ia6, sizeof(struct in6_addr));
+ if (prison_check_ip6(cred, &ia6) == 0) {
+ found = 1;
+ break;
+ }
+ }
+ IF_ADDR_UNLOCK(ifp);
+ if (!found) {
+ /*
+ * As a last resort return the 'default' jail address.
+ */
+ ia6 = ((struct sockaddr_in6 *)rt->rt_ifa->ifa_addr)->
+ sin6_addr;
+ if (prison_get_ip6(cred, &ia6) != 0)
+ return (ESRCH);
+ }
+ bzero(&saun->sin6, sizeof(struct sockaddr_in6));
+ saun->sin6.sin6_len = sizeof(struct sockaddr_in6);
+ saun->sin6.sin6_family = AF_INET6;
+ bcopy(&ia6, &saun->sin6.sin6_addr, sizeof(struct in6_addr));
+ if (sa6_recoverscope(&saun->sin6) != 0)
+ return (ESRCH);
+ info->rti_info[RTAX_IFA] = (struct sockaddr *)&saun->sin6;
+ break;
+ }
+#endif
+ default:
+ return (ESRCH);
+ }
+ return (0);
+}
+
+/*ARGSUSED*/
+static int
+route_output(struct mbuf *m, struct socket *so)
+{
+#define sa_equal(a1, a2) (bcmp((a1), (a2), (a1)->sa_len) == 0)
+ struct rt_msghdr *rtm = NULL;
+ struct rtentry *rt = NULL;
+ struct radix_node_head *rnh;
+ struct rt_addrinfo info;
+ int len, error = 0;
+ struct ifnet *ifp = NULL;
+ union sockaddr_union saun;
+
+#define senderr(e) { error = e; goto flush;}
+ if (m == NULL || ((m->m_len < sizeof(long)) &&
+ (m = m_pullup(m, sizeof(long))) == NULL))
+ return (ENOBUFS);
+ if ((m->m_flags & M_PKTHDR) == 0)
+ panic("route_output");
+ len = m->m_pkthdr.len;
+ if (len < sizeof(*rtm) ||
+ len != mtod(m, struct rt_msghdr *)->rtm_msglen) {
+ info.rti_info[RTAX_DST] = NULL;
+ senderr(EINVAL);
+ }
+ R_Malloc(rtm, struct rt_msghdr *, len);
+ if (rtm == NULL) {
+ info.rti_info[RTAX_DST] = NULL;
+ senderr(ENOBUFS);
+ }
+ m_copydata(m, 0, len, (caddr_t)rtm);
+ if (rtm->rtm_version != RTM_VERSION) {
+ info.rti_info[RTAX_DST] = NULL;
+ senderr(EPROTONOSUPPORT);
+ }
+ rtm->rtm_pid = curproc->p_pid;
+ bzero(&info, sizeof(info));
+ info.rti_addrs = rtm->rtm_addrs;
+ if (rt_xaddrs((caddr_t)(rtm + 1), len + (caddr_t)rtm, &info)) {
+ info.rti_info[RTAX_DST] = NULL;
+ senderr(EINVAL);
+ }
+ info.rti_flags = rtm->rtm_flags;
+ if (info.rti_info[RTAX_DST] == NULL ||
+ info.rti_info[RTAX_DST]->sa_family >= AF_MAX ||
+ (info.rti_info[RTAX_GATEWAY] != NULL &&
+ info.rti_info[RTAX_GATEWAY]->sa_family >= AF_MAX))
+ senderr(EINVAL);
+ /*
+ * Verify that the caller has the appropriate privilege; RTM_GET
+ * is the only operation the non-superuser is allowed.
+ */
+ if (rtm->rtm_type != RTM_GET) {
+ error = priv_check(curthread, PRIV_NET_ROUTE);
+ if (error)
+ senderr(error);
+ }
+
+ /*
+ * The given gateway address may be an interface address.
+ * For example, issuing a "route change" command on a route
+ * entry that was created from a tunnel, and the gateway
+ * address given is the local end point. In this case the
+ * RTF_GATEWAY flag must be cleared or the destination will
+ * not be reachable even though there is no error message.
+ */
+ if (info.rti_info[RTAX_GATEWAY] != NULL &&
+ info.rti_info[RTAX_GATEWAY]->sa_family != AF_LINK) {
+ struct route gw_ro;
+
+ bzero(&gw_ro, sizeof(gw_ro));
+ gw_ro.ro_dst = *info.rti_info[RTAX_GATEWAY];
+ rtalloc_ign_fib(&gw_ro, 0, so->so_fibnum);
+ /*
+ * A host route through the loopback interface is
+ * installed for each interface adddress. In pre 8.0
+ * releases the interface address of a PPP link type
+ * is not reachable locally. This behavior is fixed as
+ * part of the new L2/L3 redesign and rewrite work. The
+ * signature of this interface address route is the
+ * AF_LINK sa_family type of the rt_gateway, and the
+ * rt_ifp has the IFF_LOOPBACK flag set.
+ */
+ if (gw_ro.ro_rt != NULL &&
+ gw_ro.ro_rt->rt_gateway->sa_family == AF_LINK &&
+ gw_ro.ro_rt->rt_ifp->if_flags & IFF_LOOPBACK)
+ info.rti_flags &= ~RTF_GATEWAY;
+ if (gw_ro.ro_rt != NULL)
+ RTFREE(gw_ro.ro_rt);
+ }
+
+ switch (rtm->rtm_type) {
+ struct rtentry *saved_nrt;
+
+ case RTM_ADD:
+ if (info.rti_info[RTAX_GATEWAY] == NULL)
+ senderr(EINVAL);
+ saved_nrt = NULL;
+
+ /* support for new ARP code */
+ if (info.rti_info[RTAX_GATEWAY]->sa_family == AF_LINK &&
+ (rtm->rtm_flags & RTF_LLDATA) != 0) {
+ error = lla_rt_output(rtm, &info);
+ break;
+ }
+ error = rtrequest1_fib(RTM_ADD, &info, &saved_nrt,
+ so->so_fibnum);
+ if (error == 0 && saved_nrt) {
+ RT_LOCK(saved_nrt);
+ rt_setmetrics(rtm->rtm_inits,
+ &rtm->rtm_rmx, &saved_nrt->rt_rmx);
+ rtm->rtm_index = saved_nrt->rt_ifp->if_index;
+ RT_REMREF(saved_nrt);
+ RT_UNLOCK(saved_nrt);
+ }
+ break;
+
+ case RTM_DELETE:
+ saved_nrt = NULL;
+ /* support for new ARP code */
+ if (info.rti_info[RTAX_GATEWAY] &&
+ (info.rti_info[RTAX_GATEWAY]->sa_family == AF_LINK) &&
+ (rtm->rtm_flags & RTF_LLDATA) != 0) {
+ error = lla_rt_output(rtm, &info);
+ break;
+ }
+ error = rtrequest1_fib(RTM_DELETE, &info, &saved_nrt,
+ so->so_fibnum);
+ if (error == 0) {
+ RT_LOCK(saved_nrt);
+ rt = saved_nrt;
+ goto report;
+ }
+ break;
+
+ case RTM_GET:
+ case RTM_CHANGE:
+ case RTM_LOCK:
+ rnh = rt_tables_get_rnh(so->so_fibnum,
+ info.rti_info[RTAX_DST]->sa_family);
+ if (rnh == NULL)
+ senderr(EAFNOSUPPORT);
+ RADIX_NODE_HEAD_RLOCK(rnh);
+ rt = (struct rtentry *) rnh->rnh_lookup(info.rti_info[RTAX_DST],
+ info.rti_info[RTAX_NETMASK], rnh);
+ if (rt == NULL) { /* XXX looks bogus */
+ RADIX_NODE_HEAD_RUNLOCK(rnh);
+ senderr(ESRCH);
+ }
+#ifdef RADIX_MPATH
+ /*
+ * for RTM_CHANGE/LOCK, if we got multipath routes,
+ * we require users to specify a matching RTAX_GATEWAY.
+ *
+ * for RTM_GET, gate is optional even with multipath.
+ * if gate == NULL the first match is returned.
+ * (no need to call rt_mpath_matchgate if gate == NULL)
+ */
+ if (rn_mpath_capable(rnh) &&
+ (rtm->rtm_type != RTM_GET || info.rti_info[RTAX_GATEWAY])) {
+ rt = rt_mpath_matchgate(rt, info.rti_info[RTAX_GATEWAY]);
+ if (!rt) {
+ RADIX_NODE_HEAD_RUNLOCK(rnh);
+ senderr(ESRCH);
+ }
+ }
+#endif
+ /*
+ * If performing proxied L2 entry insertion, and
+ * the actual PPP host entry is found, perform
+ * another search to retrieve the prefix route of
+ * the local end point of the PPP link.
+ */
+ if (rtm->rtm_flags & RTF_ANNOUNCE) {
+ struct sockaddr laddr;
+
+ if (rt->rt_ifp != NULL &&
+ rt->rt_ifp->if_type == IFT_PROPVIRTUAL) {
+ struct ifaddr *ifa;
+
+ ifa = ifa_ifwithnet(info.rti_info[RTAX_DST], 1);
+ if (ifa != NULL)
+ rt_maskedcopy(ifa->ifa_addr,
+ &laddr,
+ ifa->ifa_netmask);
+ } else
+ rt_maskedcopy(rt->rt_ifa->ifa_addr,
+ &laddr,
+ rt->rt_ifa->ifa_netmask);
+ /*
+ * refactor rt and no lock operation necessary
+ */
+ rt = (struct rtentry *)rnh->rnh_matchaddr(&laddr, rnh);
+ if (rt == NULL) {
+ RADIX_NODE_HEAD_RUNLOCK(rnh);
+ senderr(ESRCH);
+ }
+ }
+ RT_LOCK(rt);
+ RT_ADDREF(rt);
+ RADIX_NODE_HEAD_RUNLOCK(rnh);
+
+ /*
+ * Fix for PR: 82974
+ *
+ * RTM_CHANGE/LOCK need a perfect match, rn_lookup()
+ * returns a perfect match in case a netmask is
+ * specified. For host routes only a longest prefix
+ * match is returned so it is necessary to compare the
+ * existence of the netmask. If both have a netmask
+ * rnh_lookup() did a perfect match and if none of them
+ * have a netmask both are host routes which is also a
+ * perfect match.
+ */
+
+ if (rtm->rtm_type != RTM_GET &&
+ (!rt_mask(rt) != !info.rti_info[RTAX_NETMASK])) {
+ RT_UNLOCK(rt);
+ senderr(ESRCH);
+ }
+
+ switch(rtm->rtm_type) {
+
+ case RTM_GET:
+ report:
+ RT_LOCK_ASSERT(rt);
+ if ((rt->rt_flags & RTF_HOST) == 0
+ ? jailed_without_vnet(curthread->td_ucred)
+ : prison_if(curthread->td_ucred,
+ rt_key(rt)) != 0) {
+ RT_UNLOCK(rt);
+ senderr(ESRCH);
+ }
+ info.rti_info[RTAX_DST] = rt_key(rt);
+ info.rti_info[RTAX_GATEWAY] = rt->rt_gateway;
+ info.rti_info[RTAX_NETMASK] = rt_mask(rt);
+ info.rti_info[RTAX_GENMASK] = 0;
+ if (rtm->rtm_addrs & (RTA_IFP | RTA_IFA)) {
+ ifp = rt->rt_ifp;
+ if (ifp) {
+ info.rti_info[RTAX_IFP] =
+ ifp->if_addr->ifa_addr;
+ error = rtm_get_jailed(&info, ifp, rt,
+ &saun, curthread->td_ucred);
+ if (error != 0) {
+ RT_UNLOCK(rt);
+ senderr(error);
+ }
+ if (ifp->if_flags & IFF_POINTOPOINT)
+ info.rti_info[RTAX_BRD] =
+ rt->rt_ifa->ifa_dstaddr;
+ rtm->rtm_index = ifp->if_index;
+ } else {
+ info.rti_info[RTAX_IFP] = NULL;
+ info.rti_info[RTAX_IFA] = NULL;
+ }
+ } else if ((ifp = rt->rt_ifp) != NULL) {
+ rtm->rtm_index = ifp->if_index;
+ }
+ len = rt_msg2(rtm->rtm_type, &info, NULL, NULL);
+ if (len > rtm->rtm_msglen) {
+ struct rt_msghdr *new_rtm;
+ R_Malloc(new_rtm, struct rt_msghdr *, len);
+ if (new_rtm == NULL) {
+ RT_UNLOCK(rt);
+ senderr(ENOBUFS);
+ }
+ bcopy(rtm, new_rtm, rtm->rtm_msglen);
+ Free(rtm); rtm = new_rtm;
+ }
+ (void)rt_msg2(rtm->rtm_type, &info, (caddr_t)rtm, NULL);
+ rtm->rtm_flags = rt->rt_flags;
+ rt_getmetrics(&rt->rt_rmx, &rtm->rtm_rmx);
+ rtm->rtm_addrs = info.rti_addrs;
+ break;
+
+ case RTM_CHANGE:
+ /*
+ * New gateway could require new ifaddr, ifp;
+ * flags may also be different; ifp may be specified
+ * by ll sockaddr when protocol address is ambiguous
+ */
+ if (((rt->rt_flags & RTF_GATEWAY) &&
+ info.rti_info[RTAX_GATEWAY] != NULL) ||
+ info.rti_info[RTAX_IFP] != NULL ||
+ (info.rti_info[RTAX_IFA] != NULL &&
+ !sa_equal(info.rti_info[RTAX_IFA],
+ rt->rt_ifa->ifa_addr))) {
+ RT_UNLOCK(rt);
+ RADIX_NODE_HEAD_LOCK(rnh);
+ error = rt_getifa_fib(&info, rt->rt_fibnum);
+ /*
+ * XXXRW: Really we should release this
+ * reference later, but this maintains
+ * historical behavior.
+ */
+ if (info.rti_ifa != NULL)
+ ifa_free(info.rti_ifa);
+ RADIX_NODE_HEAD_UNLOCK(rnh);
+ if (error != 0)
+ senderr(error);
+ RT_LOCK(rt);
+ }
+ if (info.rti_ifa != NULL &&
+ info.rti_ifa != rt->rt_ifa &&
+ rt->rt_ifa != NULL &&
+ rt->rt_ifa->ifa_rtrequest != NULL) {
+ rt->rt_ifa->ifa_rtrequest(RTM_DELETE, rt,
+ &info);
+ ifa_free(rt->rt_ifa);
+ }
+ if (info.rti_info[RTAX_GATEWAY] != NULL) {
+ RT_UNLOCK(rt);
+ RADIX_NODE_HEAD_LOCK(rnh);
+ RT_LOCK(rt);
+
+ error = rt_setgate(rt, rt_key(rt),
+ info.rti_info[RTAX_GATEWAY]);
+ RADIX_NODE_HEAD_UNLOCK(rnh);
+ if (error != 0) {
+ RT_UNLOCK(rt);
+ senderr(error);
+ }
+ rt->rt_flags |= (RTF_GATEWAY & info.rti_flags);
+ }
+ if (info.rti_ifa != NULL &&
+ info.rti_ifa != rt->rt_ifa) {
+ ifa_ref(info.rti_ifa);
+ rt->rt_ifa = info.rti_ifa;
+ rt->rt_ifp = info.rti_ifp;
+ }
+ /* Allow some flags to be toggled on change. */
+ rt->rt_flags = (rt->rt_flags & ~RTF_FMASK) |
+ (rtm->rtm_flags & RTF_FMASK);
+ rt_setmetrics(rtm->rtm_inits, &rtm->rtm_rmx,
+ &rt->rt_rmx);
+ rtm->rtm_index = rt->rt_ifp->if_index;
+ if (rt->rt_ifa && rt->rt_ifa->ifa_rtrequest)
+ rt->rt_ifa->ifa_rtrequest(RTM_ADD, rt, &info);
+ /* FALLTHROUGH */
+ case RTM_LOCK:
+ /* We don't support locks anymore */
+ break;
+ }
+ RT_UNLOCK(rt);
+ break;
+
+ default:
+ senderr(EOPNOTSUPP);
+ }
+
+flush:
+ if (rtm) {
+ if (error)
+ rtm->rtm_errno = error;
+ else
+ rtm->rtm_flags |= RTF_DONE;
+ }
+ if (rt) /* XXX can this be true? */
+ RTFREE(rt);
+ {
+ struct rawcb *rp = NULL;
+ /*
+ * Check to see if we don't want our own messages.
+ */
+ if ((so->so_options & SO_USELOOPBACK) == 0) {
+ if (route_cb.any_count <= 1) {
+ if (rtm)
+ Free(rtm);
+ m_freem(m);
+ return (error);
+ }
+ /* There is another listener, so construct message */
+ rp = sotorawcb(so);
+ }
+ if (rtm) {
+ m_copyback(m, 0, rtm->rtm_msglen, (caddr_t)rtm);
+ if (m->m_pkthdr.len < rtm->rtm_msglen) {
+ m_freem(m);
+ m = NULL;
+ } else if (m->m_pkthdr.len > rtm->rtm_msglen)
+ m_adj(m, rtm->rtm_msglen - m->m_pkthdr.len);
+ Free(rtm);
+ }
+ if (m) {
+ if (rp) {
+ /*
+ * XXX insure we don't get a copy by
+ * invalidating our protocol
+ */
+ unsigned short family = rp->rcb_proto.sp_family;
+ rp->rcb_proto.sp_family = 0;
+ rt_dispatch(m, info.rti_info[RTAX_DST]);
+ rp->rcb_proto.sp_family = family;
+ } else
+ rt_dispatch(m, info.rti_info[RTAX_DST]);
+ }
+ }
+ return (error);
+#undef sa_equal
+}
+
+static void
+rt_setmetrics(u_long which, const struct rt_metrics *in,
+ struct rt_metrics_lite *out)
+{
+#define metric(f, e) if (which & (f)) out->e = in->e;
+ /*
+ * Only these are stored in the routing entry since introduction
+ * of tcp hostcache. The rest is ignored.
+ */
+ metric(RTV_MTU, rmx_mtu);
+ metric(RTV_WEIGHT, rmx_weight);
+ /* Userland -> kernel timebase conversion. */
+ if (which & RTV_EXPIRE)
+ out->rmx_expire = in->rmx_expire ?
+ in->rmx_expire - time_second + time_uptime : 0;
+#undef metric
+}
+
+static void
+rt_getmetrics(const struct rt_metrics_lite *in, struct rt_metrics *out)
+{
+#define metric(e) out->e = in->e;
+ bzero(out, sizeof(*out));
+ metric(rmx_mtu);
+ metric(rmx_weight);
+ /* Kernel -> userland timebase conversion. */
+ out->rmx_expire = in->rmx_expire ?
+ in->rmx_expire - time_uptime + time_second : 0;
+#undef metric
+}
+
+/*
+ * Extract the addresses of the passed sockaddrs.
+ * Do a little sanity checking so as to avoid bad memory references.
+ * This data is derived straight from userland.
+ */
+static int
+rt_xaddrs(caddr_t cp, caddr_t cplim, struct rt_addrinfo *rtinfo)
+{
+ struct sockaddr *sa;
+ int i;
+
+ for (i = 0; i < RTAX_MAX && cp < cplim; i++) {
+ if ((rtinfo->rti_addrs & (1 << i)) == 0)
+ continue;
+ sa = (struct sockaddr *)cp;
+ /*
+ * It won't fit.
+ */
+ if (cp + sa->sa_len > cplim)
+ return (EINVAL);
+ /*
+ * there are no more.. quit now
+ * If there are more bits, they are in error.
+ * I've seen this. route(1) can evidently generate these.
+ * This causes kernel to core dump.
+ * for compatibility, If we see this, point to a safe address.
+ */
+ if (sa->sa_len == 0) {
+ rtinfo->rti_info[i] = &sa_zero;
+ return (0); /* should be EINVAL but for compat */
+ }
+ /* accept it */
+ rtinfo->rti_info[i] = sa;
+ cp += SA_SIZE(sa);
+ }
+ return (0);
+}
+
+static struct mbuf *
+rt_msg1(int type, struct rt_addrinfo *rtinfo)
+{
+ struct rt_msghdr *rtm;
+ struct mbuf *m;
+ int i;
+ struct sockaddr *sa;
+ int len, dlen;
+
+ switch (type) {
+
+ case RTM_DELADDR:
+ case RTM_NEWADDR:
+ len = sizeof(struct ifa_msghdr);
+ break;
+
+ case RTM_DELMADDR:
+ case RTM_NEWMADDR:
+ len = sizeof(struct ifma_msghdr);
+ break;
+
+ case RTM_IFINFO:
+ len = sizeof(struct if_msghdr);
+ break;
+
+ case RTM_IFANNOUNCE:
+ case RTM_IEEE80211:
+ len = sizeof(struct if_announcemsghdr);
+ break;
+
+ default:
+ len = sizeof(struct rt_msghdr);
+ }
+ if (len > MCLBYTES)
+ panic("rt_msg1");
+ m = m_gethdr(M_DONTWAIT, MT_DATA);
+ if (m && len > MHLEN) {
+ MCLGET(m, M_DONTWAIT);
+ if ((m->m_flags & M_EXT) == 0) {
+ m_free(m);
+ m = NULL;
+ }
+ }
+ if (m == NULL)
+ return (m);
+ m->m_pkthdr.len = m->m_len = len;
+ m->m_pkthdr.rcvif = NULL;
+ rtm = mtod(m, struct rt_msghdr *);
+ bzero((caddr_t)rtm, len);
+ for (i = 0; i < RTAX_MAX; i++) {
+ if ((sa = rtinfo->rti_info[i]) == NULL)
+ continue;
+ rtinfo->rti_addrs |= (1 << i);
+ dlen = SA_SIZE(sa);
+ m_copyback(m, len, dlen, (caddr_t)sa);
+ len += dlen;
+ }
+ if (m->m_pkthdr.len != len) {
+ m_freem(m);
+ return (NULL);
+ }
+ rtm->rtm_msglen = len;
+ rtm->rtm_version = RTM_VERSION;
+ rtm->rtm_type = type;
+ return (m);
+}
+
+static int
+rt_msg2(int type, struct rt_addrinfo *rtinfo, caddr_t cp, struct walkarg *w)
+{
+ int i;
+ int len, dlen, second_time = 0;
+ caddr_t cp0;
+
+ rtinfo->rti_addrs = 0;
+again:
+ switch (type) {
+
+ case RTM_DELADDR:
+ case RTM_NEWADDR:
+ len = sizeof(struct ifa_msghdr);
+ break;
+
+ case RTM_IFINFO:
+#ifdef COMPAT_FREEBSD32
+ if (w != NULL && w->w_req->flags & SCTL_MASK32) {
+ len = sizeof(struct if_msghdr32);
+ break;
+ }
+#endif
+ len = sizeof(struct if_msghdr);
+ break;
+
+ case RTM_NEWMADDR:
+ len = sizeof(struct ifma_msghdr);
+ break;
+
+ default:
+ len = sizeof(struct rt_msghdr);
+ }
+ cp0 = cp;
+ if (cp0)
+ cp += len;
+ for (i = 0; i < RTAX_MAX; i++) {
+ struct sockaddr *sa;
+
+ if ((sa = rtinfo->rti_info[i]) == NULL)
+ continue;
+ rtinfo->rti_addrs |= (1 << i);
+ dlen = SA_SIZE(sa);
+ if (cp) {
+ bcopy((caddr_t)sa, cp, (unsigned)dlen);
+ cp += dlen;
+ }
+ len += dlen;
+ }
+ len = ALIGN(len);
+ if (cp == NULL && w != NULL && !second_time) {
+ struct walkarg *rw = w;
+
+ if (rw->w_req) {
+ if (rw->w_tmemsize < len) {
+ if (rw->w_tmem)
+ free(rw->w_tmem, M_RTABLE);
+ rw->w_tmem = (caddr_t)
+ malloc(len, M_RTABLE, M_NOWAIT);
+ if (rw->w_tmem)
+ rw->w_tmemsize = len;
+ }
+ if (rw->w_tmem) {
+ cp = rw->w_tmem;
+ second_time = 1;
+ goto again;
+ }
+ }
+ }
+ if (cp) {
+ struct rt_msghdr *rtm = (struct rt_msghdr *)cp0;
+
+ rtm->rtm_version = RTM_VERSION;
+ rtm->rtm_type = type;
+ rtm->rtm_msglen = len;
+ }
+ return (len);
+}
+
+/*
+ * This routine is called to generate a message from the routing
+ * socket indicating that a redirect has occured, a routing lookup
+ * has failed, or that a protocol has detected timeouts to a particular
+ * destination.
+ */
+void
+rt_missmsg(int type, struct rt_addrinfo *rtinfo, int flags, int error)
+{
+ struct rt_msghdr *rtm;
+ struct mbuf *m;
+ struct sockaddr *sa = rtinfo->rti_info[RTAX_DST];
+
+ if (route_cb.any_count == 0)
+ return;
+ m = rt_msg1(type, rtinfo);
+ if (m == NULL)
+ return;
+ rtm = mtod(m, struct rt_msghdr *);
+ rtm->rtm_flags = RTF_DONE | flags;
+ rtm->rtm_errno = error;
+ rtm->rtm_addrs = rtinfo->rti_addrs;
+ rt_dispatch(m, sa);
+}
+
+/*
+ * This routine is called to generate a message from the routing
+ * socket indicating that the status of a network interface has changed.
+ */
+void
+rt_ifmsg(struct ifnet *ifp)
+{
+ struct if_msghdr *ifm;
+ struct mbuf *m;
+ struct rt_addrinfo info;
+
+ if (route_cb.any_count == 0)
+ return;
+ bzero((caddr_t)&info, sizeof(info));
+ m = rt_msg1(RTM_IFINFO, &info);
+ if (m == NULL)
+ return;
+ ifm = mtod(m, struct if_msghdr *);
+ ifm->ifm_index = ifp->if_index;
+ ifm->ifm_flags = ifp->if_flags | ifp->if_drv_flags;
+ ifm->ifm_data = ifp->if_data;
+ ifm->ifm_addrs = 0;
+ rt_dispatch(m, NULL);
+}
+
+/*
+ * This is called to generate messages from the routing socket
+ * indicating a network interface has had addresses associated with it.
+ * if we ever reverse the logic and replace messages TO the routing
+ * socket indicate a request to configure interfaces, then it will
+ * be unnecessary as the routing socket will automatically generate
+ * copies of it.
+ */
+void
+rt_newaddrmsg(int cmd, struct ifaddr *ifa, int error, struct rtentry *rt)
+{
+ struct rt_addrinfo info;
+ struct sockaddr *sa = NULL;
+ int pass;
+ struct mbuf *m = NULL;
+ struct ifnet *ifp = ifa->ifa_ifp;
+
+ KASSERT(cmd == RTM_ADD || cmd == RTM_DELETE,
+ ("unexpected cmd %u", cmd));
+#if defined(INET) || defined(INET6)
+#ifdef SCTP
+ /*
+ * notify the SCTP stack
+ * this will only get called when an address is added/deleted
+ * XXX pass the ifaddr struct instead if ifa->ifa_addr...
+ */
+ sctp_addr_change(ifa, cmd);
+#endif /* SCTP */
+#endif
+ if (route_cb.any_count == 0)
+ return;
+ for (pass = 1; pass < 3; pass++) {
+ bzero((caddr_t)&info, sizeof(info));
+ if ((cmd == RTM_ADD && pass == 1) ||
+ (cmd == RTM_DELETE && pass == 2)) {
+ struct ifa_msghdr *ifam;
+ int ncmd = cmd == RTM_ADD ? RTM_NEWADDR : RTM_DELADDR;
+
+ info.rti_info[RTAX_IFA] = sa = ifa->ifa_addr;
+ info.rti_info[RTAX_IFP] = ifp->if_addr->ifa_addr;
+ info.rti_info[RTAX_NETMASK] = ifa->ifa_netmask;
+ info.rti_info[RTAX_BRD] = ifa->ifa_dstaddr;
+ if ((m = rt_msg1(ncmd, &info)) == NULL)
+ continue;
+ ifam = mtod(m, struct ifa_msghdr *);
+ ifam->ifam_index = ifp->if_index;
+ ifam->ifam_metric = ifa->ifa_metric;
+ ifam->ifam_flags = ifa->ifa_flags;
+ ifam->ifam_addrs = info.rti_addrs;
+ }
+ if ((cmd == RTM_ADD && pass == 2) ||
+ (cmd == RTM_DELETE && pass == 1)) {
+ struct rt_msghdr *rtm;
+
+ if (rt == NULL)
+ continue;
+ info.rti_info[RTAX_NETMASK] = rt_mask(rt);
+ info.rti_info[RTAX_DST] = sa = rt_key(rt);
+ info.rti_info[RTAX_GATEWAY] = rt->rt_gateway;
+ if ((m = rt_msg1(cmd, &info)) == NULL)
+ continue;
+ rtm = mtod(m, struct rt_msghdr *);
+ rtm->rtm_index = ifp->if_index;
+ rtm->rtm_flags |= rt->rt_flags;
+ rtm->rtm_errno = error;
+ rtm->rtm_addrs = info.rti_addrs;
+ }
+ rt_dispatch(m, sa);
+ }
+}
+
+/*
+ * This is the analogue to the rt_newaddrmsg which performs the same
+ * function but for multicast group memberhips. This is easier since
+ * there is no route state to worry about.
+ */
+void
+rt_newmaddrmsg(int cmd, struct ifmultiaddr *ifma)
+{
+ struct rt_addrinfo info;
+ struct mbuf *m = NULL;
+ struct ifnet *ifp = ifma->ifma_ifp;
+ struct ifma_msghdr *ifmam;
+
+ if (route_cb.any_count == 0)
+ return;
+
+ bzero((caddr_t)&info, sizeof(info));
+ info.rti_info[RTAX_IFA] = ifma->ifma_addr;
+ info.rti_info[RTAX_IFP] = ifp ? ifp->if_addr->ifa_addr : NULL;
+ /*
+ * If a link-layer address is present, present it as a ``gateway''
+ * (similarly to how ARP entries, e.g., are presented).
+ */
+ info.rti_info[RTAX_GATEWAY] = ifma->ifma_lladdr;
+ m = rt_msg1(cmd, &info);
+ if (m == NULL)
+ return;
+ ifmam = mtod(m, struct ifma_msghdr *);
+ KASSERT(ifp != NULL, ("%s: link-layer multicast address w/o ifp\n",
+ __func__));
+ ifmam->ifmam_index = ifp->if_index;
+ ifmam->ifmam_addrs = info.rti_addrs;
+ rt_dispatch(m, ifma->ifma_addr);
+}
+
+static struct mbuf *
+rt_makeifannouncemsg(struct ifnet *ifp, int type, int what,
+ struct rt_addrinfo *info)
+{
+ struct if_announcemsghdr *ifan;
+ struct mbuf *m;
+
+ if (route_cb.any_count == 0)
+ return NULL;
+ bzero((caddr_t)info, sizeof(*info));
+ m = rt_msg1(type, info);
+ if (m != NULL) {
+ ifan = mtod(m, struct if_announcemsghdr *);
+ ifan->ifan_index = ifp->if_index;
+ strlcpy(ifan->ifan_name, ifp->if_xname,
+ sizeof(ifan->ifan_name));
+ ifan->ifan_what = what;
+ }
+ return m;
+}
+
+/*
+ * This is called to generate routing socket messages indicating
+ * IEEE80211 wireless events.
+ * XXX we piggyback on the RTM_IFANNOUNCE msg format in a clumsy way.
+ */
+void
+rt_ieee80211msg(struct ifnet *ifp, int what, void *data, size_t data_len)
+{
+ struct mbuf *m;
+ struct rt_addrinfo info;
+
+ m = rt_makeifannouncemsg(ifp, RTM_IEEE80211, what, &info);
+ if (m != NULL) {
+ /*
+ * Append the ieee80211 data. Try to stick it in the
+ * mbuf containing the ifannounce msg; otherwise allocate
+ * a new mbuf and append.
+ *
+ * NB: we assume m is a single mbuf.
+ */
+ if (data_len > M_TRAILINGSPACE(m)) {
+ struct mbuf *n = m_get(M_NOWAIT, MT_DATA);
+ if (n == NULL) {
+ m_freem(m);
+ return;
+ }
+ bcopy(data, mtod(n, void *), data_len);
+ n->m_len = data_len;
+ m->m_next = n;
+ } else if (data_len > 0) {
+ bcopy(data, mtod(m, u_int8_t *) + m->m_len, data_len);
+ m->m_len += data_len;
+ }
+ if (m->m_flags & M_PKTHDR)
+ m->m_pkthdr.len += data_len;
+ mtod(m, struct if_announcemsghdr *)->ifan_msglen += data_len;
+ rt_dispatch(m, NULL);
+ }
+}
+
+/*
+ * This is called to generate routing socket messages indicating
+ * network interface arrival and departure.
+ */
+void
+rt_ifannouncemsg(struct ifnet *ifp, int what)
+{
+ struct mbuf *m;
+ struct rt_addrinfo info;
+
+ m = rt_makeifannouncemsg(ifp, RTM_IFANNOUNCE, what, &info);
+ if (m != NULL)
+ rt_dispatch(m, NULL);
+}
+
+static void
+rt_dispatch(struct mbuf *m, const struct sockaddr *sa)
+{
+ struct m_tag *tag;
+
+ /*
+ * Preserve the family from the sockaddr, if any, in an m_tag for
+ * use when injecting the mbuf into the routing socket buffer from
+ * the netisr.
+ */
+ if (sa != NULL) {
+ tag = m_tag_get(PACKET_TAG_RTSOCKFAM, sizeof(unsigned short),
+ M_NOWAIT);
+ if (tag == NULL) {
+ m_freem(m);
+ return;
+ }
+ *(unsigned short *)(tag + 1) = sa->sa_family;
+ m_tag_prepend(m, tag);
+ }
+#ifdef VIMAGE
+ if (V_loif)
+ m->m_pkthdr.rcvif = V_loif;
+ else {
+ m_freem(m);
+ return;
+ }
+#endif
+ netisr_queue(NETISR_ROUTE, m); /* mbuf is free'd on failure. */
+}
+
+/*
+ * This is used in dumping the kernel table via sysctl().
+ */
+static int
+sysctl_dumpentry(struct radix_node *rn, void *vw)
+{
+ struct walkarg *w = vw;
+ struct rtentry *rt = (struct rtentry *)rn;
+ int error = 0, size;
+ struct rt_addrinfo info;
+
+ if (w->w_op == NET_RT_FLAGS && !(rt->rt_flags & w->w_arg))
+ return 0;
+ if ((rt->rt_flags & RTF_HOST) == 0
+ ? jailed_without_vnet(w->w_req->td->td_ucred)
+ : prison_if(w->w_req->td->td_ucred, rt_key(rt)) != 0)
+ return (0);
+ bzero((caddr_t)&info, sizeof(info));
+ info.rti_info[RTAX_DST] = rt_key(rt);
+ info.rti_info[RTAX_GATEWAY] = rt->rt_gateway;
+ info.rti_info[RTAX_NETMASK] = rt_mask(rt);
+ info.rti_info[RTAX_GENMASK] = 0;
+ if (rt->rt_ifp) {
+ info.rti_info[RTAX_IFP] = rt->rt_ifp->if_addr->ifa_addr;
+ info.rti_info[RTAX_IFA] = rt->rt_ifa->ifa_addr;
+ if (rt->rt_ifp->if_flags & IFF_POINTOPOINT)
+ info.rti_info[RTAX_BRD] = rt->rt_ifa->ifa_dstaddr;
+ }
+ size = rt_msg2(RTM_GET, &info, NULL, w);
+ if (w->w_req && w->w_tmem) {
+ struct rt_msghdr *rtm = (struct rt_msghdr *)w->w_tmem;
+
+ rtm->rtm_flags = rt->rt_flags;
+ /*
+ * let's be honest about this being a retarded hack
+ */
+ rtm->rtm_fmask = rt->rt_rmx.rmx_pksent;
+ rt_getmetrics(&rt->rt_rmx, &rtm->rtm_rmx);
+ rtm->rtm_index = rt->rt_ifp->if_index;
+ rtm->rtm_errno = rtm->rtm_pid = rtm->rtm_seq = 0;
+ rtm->rtm_addrs = info.rti_addrs;
+ error = SYSCTL_OUT(w->w_req, (caddr_t)rtm, size);
+ return (error);
+ }
+ return (error);
+}
+
+#ifdef COMPAT_FREEBSD32
+static void
+copy_ifdata32(struct if_data *src, struct if_data32 *dst)
+{
+
+ bzero(dst, sizeof(*dst));
+ CP(*src, *dst, ifi_type);
+ CP(*src, *dst, ifi_physical);
+ CP(*src, *dst, ifi_addrlen);
+ CP(*src, *dst, ifi_hdrlen);
+ CP(*src, *dst, ifi_link_state);
+ dst->ifi_datalen = sizeof(struct if_data32);
+ CP(*src, *dst, ifi_mtu);
+ CP(*src, *dst, ifi_metric);
+ CP(*src, *dst, ifi_baudrate);
+ CP(*src, *dst, ifi_ipackets);
+ CP(*src, *dst, ifi_ierrors);
+ CP(*src, *dst, ifi_opackets);
+ CP(*src, *dst, ifi_oerrors);
+ CP(*src, *dst, ifi_collisions);
+ CP(*src, *dst, ifi_ibytes);
+ CP(*src, *dst, ifi_obytes);
+ CP(*src, *dst, ifi_imcasts);
+ CP(*src, *dst, ifi_omcasts);
+ CP(*src, *dst, ifi_iqdrops);
+ CP(*src, *dst, ifi_noproto);
+ CP(*src, *dst, ifi_hwassist);
+ CP(*src, *dst, ifi_epoch);
+ TV_CP(*src, *dst, ifi_lastchange);
+}
+#endif
+
+static int
+sysctl_iflist(int af, struct walkarg *w)
+{
+ struct ifnet *ifp;
+ struct ifaddr *ifa;
+ struct rt_addrinfo info;
+ int len, error = 0;
+
+ bzero((caddr_t)&info, sizeof(info));
+ IFNET_RLOCK();
+ TAILQ_FOREACH(ifp, &V_ifnet, if_link) {
+ if (w->w_arg && w->w_arg != ifp->if_index)
+ continue;
+ IF_ADDR_LOCK(ifp);
+ ifa = ifp->if_addr;
+ info.rti_info[RTAX_IFP] = ifa->ifa_addr;
+ len = rt_msg2(RTM_IFINFO, &info, NULL, w);
+ info.rti_info[RTAX_IFP] = NULL;
+ if (w->w_req && w->w_tmem) {
+ struct if_msghdr *ifm;
+
+#ifdef COMPAT_FREEBSD32
+ if (w->w_req->flags & SCTL_MASK32) {
+ struct if_msghdr32 *ifm32;
+
+ ifm32 = (struct if_msghdr32 *)w->w_tmem;
+ ifm32->ifm_index = ifp->if_index;
+ ifm32->ifm_flags = ifp->if_flags |
+ ifp->if_drv_flags;
+ copy_ifdata32(&ifp->if_data, &ifm32->ifm_data);
+ ifm32->ifm_addrs = info.rti_addrs;
+ error = SYSCTL_OUT(w->w_req, (caddr_t)ifm32,
+ len);
+ goto sysctl_out;
+ }
+#endif
+ ifm = (struct if_msghdr *)w->w_tmem;
+ ifm->ifm_index = ifp->if_index;
+ ifm->ifm_flags = ifp->if_flags | ifp->if_drv_flags;
+ ifm->ifm_data = ifp->if_data;
+ ifm->ifm_addrs = info.rti_addrs;
+ error = SYSCTL_OUT(w->w_req, (caddr_t)ifm, len);
+#ifdef COMPAT_FREEBSD32
+ sysctl_out:
+#endif
+ if (error)
+ goto done;
+ }
+ while ((ifa = TAILQ_NEXT(ifa, ifa_link)) != NULL) {
+ if (af && af != ifa->ifa_addr->sa_family)
+ continue;
+ if (prison_if(w->w_req->td->td_ucred,
+ ifa->ifa_addr) != 0)
+ continue;
+ info.rti_info[RTAX_IFA] = ifa->ifa_addr;
+ info.rti_info[RTAX_NETMASK] = ifa->ifa_netmask;
+ info.rti_info[RTAX_BRD] = ifa->ifa_dstaddr;
+ len = rt_msg2(RTM_NEWADDR, &info, NULL, w);
+ if (w->w_req && w->w_tmem) {
+ struct ifa_msghdr *ifam;
+
+ ifam = (struct ifa_msghdr *)w->w_tmem;
+ ifam->ifam_index = ifa->ifa_ifp->if_index;
+ ifam->ifam_flags = ifa->ifa_flags;
+ ifam->ifam_metric = ifa->ifa_metric;
+ ifam->ifam_addrs = info.rti_addrs;
+ error = SYSCTL_OUT(w->w_req, w->w_tmem, len);
+ if (error)
+ goto done;
+ }
+ }
+ IF_ADDR_UNLOCK(ifp);
+ info.rti_info[RTAX_IFA] = info.rti_info[RTAX_NETMASK] =
+ info.rti_info[RTAX_BRD] = NULL;
+ }
+done:
+ if (ifp != NULL)
+ IF_ADDR_UNLOCK(ifp);
+ IFNET_RUNLOCK();
+ return (error);
+}
+
+static int
+sysctl_ifmalist(int af, struct walkarg *w)
+{
+ struct ifnet *ifp;
+ struct ifmultiaddr *ifma;
+ struct rt_addrinfo info;
+ int len, error = 0;
+ struct ifaddr *ifa;
+
+ bzero((caddr_t)&info, sizeof(info));
+ IFNET_RLOCK();
+ TAILQ_FOREACH(ifp, &V_ifnet, if_link) {
+ if (w->w_arg && w->w_arg != ifp->if_index)
+ continue;
+ ifa = ifp->if_addr;
+ info.rti_info[RTAX_IFP] = ifa ? ifa->ifa_addr : NULL;
+ IF_ADDR_LOCK(ifp);
+ TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
+ if (af && af != ifma->ifma_addr->sa_family)
+ continue;
+ if (prison_if(w->w_req->td->td_ucred,
+ ifma->ifma_addr) != 0)
+ continue;
+ info.rti_info[RTAX_IFA] = ifma->ifma_addr;
+ info.rti_info[RTAX_GATEWAY] =
+ (ifma->ifma_addr->sa_family != AF_LINK) ?
+ ifma->ifma_lladdr : NULL;
+ len = rt_msg2(RTM_NEWMADDR, &info, NULL, w);
+ if (w->w_req && w->w_tmem) {
+ struct ifma_msghdr *ifmam;
+
+ ifmam = (struct ifma_msghdr *)w->w_tmem;
+ ifmam->ifmam_index = ifma->ifma_ifp->if_index;
+ ifmam->ifmam_flags = 0;
+ ifmam->ifmam_addrs = info.rti_addrs;
+ error = SYSCTL_OUT(w->w_req, w->w_tmem, len);
+ if (error) {
+ IF_ADDR_UNLOCK(ifp);
+ goto done;
+ }
+ }
+ }
+ IF_ADDR_UNLOCK(ifp);
+ }
+done:
+ IFNET_RUNLOCK();
+ return (error);
+}
+
+static int
+sysctl_rtsock(SYSCTL_HANDLER_ARGS)
+{
+ int *name = (int *)arg1;
+ u_int namelen = arg2;
+ struct radix_node_head *rnh = NULL; /* silence compiler. */
+ int i, lim, error = EINVAL;
+ u_char af;
+ struct walkarg w;
+
+ name ++;
+ namelen--;
+ if (req->newptr)
+ return (EPERM);
+ if (namelen != 3)
+ return ((namelen < 3) ? EISDIR : ENOTDIR);
+ af = name[0];
+ if (af > AF_MAX)
+ return (EINVAL);
+ bzero(&w, sizeof(w));
+ w.w_op = name[1];
+ w.w_arg = name[2];
+ w.w_req = req;
+
+ error = sysctl_wire_old_buffer(req, 0);
+ if (error)
+ return (error);
+ switch (w.w_op) {
+
+ case NET_RT_DUMP:
+ case NET_RT_FLAGS:
+ if (af == 0) { /* dump all tables */
+ i = 1;
+ lim = AF_MAX;
+ } else /* dump only one table */
+ i = lim = af;
+
+ /*
+ * take care of llinfo entries, the caller must
+ * specify an AF
+ */
+ if (w.w_op == NET_RT_FLAGS &&
+ (w.w_arg == 0 || w.w_arg & RTF_LLINFO)) {
+ if (af != 0)
+ error = lltable_sysctl_dumparp(af, w.w_req);
+ else
+ error = EINVAL;
+ break;
+ }
+ /*
+ * take care of routing entries
+ */
+ for (error = 0; error == 0 && i <= lim; i++) {
+ rnh = rt_tables_get_rnh(req->td->td_proc->p_fibnum, i);
+ if (rnh != NULL) {
+ RADIX_NODE_HEAD_LOCK(rnh);
+ error = rnh->rnh_walktree(rnh,
+ sysctl_dumpentry, &w);
+ RADIX_NODE_HEAD_UNLOCK(rnh);
+ } else if (af != 0)
+ error = EAFNOSUPPORT;
+ }
+ break;
+
+ case NET_RT_IFLIST:
+ error = sysctl_iflist(af, &w);
+ break;
+
+ case NET_RT_IFMALIST:
+ error = sysctl_ifmalist(af, &w);
+ break;
+ }
+ if (w.w_tmem)
+ free(w.w_tmem, M_RTABLE);
+ return (error);
+}
+
+SYSCTL_NODE(_net, PF_ROUTE, routetable, CTLFLAG_RD, sysctl_rtsock, "");
+
+/*
+ * Definitions of protocols supported in the ROUTE domain.
+ */
+
+static struct domain routedomain; /* or at least forward */
+
+static struct protosw routesw[] = {
+{
+ .pr_type = SOCK_RAW,
+ .pr_domain = &routedomain,
+ .pr_flags = PR_ATOMIC|PR_ADDR,
+ .pr_output = route_output,
+ .pr_ctlinput = raw_ctlinput,
+ .pr_init = raw_init,
+ .pr_usrreqs = &route_usrreqs
+}
+};
+
+static struct domain routedomain = {
+ .dom_family = PF_ROUTE,
+ .dom_name = "route",
+ .dom_protosw = routesw,
+ .dom_protoswNPROTOSW = &routesw[sizeof(routesw)/sizeof(routesw[0])]
+};
+
+VNET_DOMAIN_SET(route);
diff --git a/rtems/freebsd/net/slcompress.c b/rtems/freebsd/net/slcompress.c
new file mode 100644
index 00000000..ac3ce5a0
--- /dev/null
+++ b/rtems/freebsd/net/slcompress.c
@@ -0,0 +1,609 @@
+#include <rtems/freebsd/machine/rtems-bsd-config.h>
+
+/*-
+ * Copyright (c) 1989, 1993, 1994
+ * The Regents of the University of California. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * @(#)slcompress.c 8.2 (Berkeley) 4/16/94
+ * $FreeBSD$
+ */
+
+/*
+ * Routines to compress and uncompess tcp packets (for transmission
+ * over low speed serial lines.
+ *
+ * Van Jacobson (van@helios.ee.lbl.gov), Dec 31, 1989:
+ * - Initial distribution.
+ *
+ */
+
+#include <rtems/freebsd/sys/param.h>
+#include <rtems/freebsd/sys/mbuf.h>
+#include <rtems/freebsd/sys/systm.h>
+
+#include <rtems/freebsd/netinet/in.h>
+#include <rtems/freebsd/netinet/in_systm.h>
+#include <rtems/freebsd/netinet/ip.h>
+#include <rtems/freebsd/netinet/tcp.h>
+
+#include <rtems/freebsd/net/slcompress.h>
+
+#ifndef SL_NO_STATS
+#define INCR(counter) ++comp->counter;
+#else
+#define INCR(counter)
+#endif
+
+#define BCMP(p1, p2, n) bcmp((void *)(p1), (void *)(p2), (int)(n))
+#define BCOPY(p1, p2, n) bcopy((void *)(p1), (void *)(p2), (int)(n))
+
+void
+sl_compress_init(comp, max_state)
+ struct slcompress *comp;
+ int max_state;
+{
+ register u_int i;
+ register struct cstate *tstate = comp->tstate;
+
+ if (max_state == -1) {
+ max_state = MAX_STATES - 1;
+ bzero((char *)comp, sizeof(*comp));
+ } else {
+ /* Don't reset statistics */
+ bzero((char *)comp->tstate, sizeof(comp->tstate));
+ bzero((char *)comp->rstate, sizeof(comp->rstate));
+ }
+ for (i = max_state; i > 0; --i) {
+ tstate[i].cs_id = i;
+ tstate[i].cs_next = &tstate[i - 1];
+ }
+ tstate[0].cs_next = &tstate[max_state];
+ tstate[0].cs_id = 0;
+ comp->last_cs = &tstate[0];
+ comp->last_recv = 255;
+ comp->last_xmit = 255;
+ comp->flags = SLF_TOSS;
+}
+
+
+/* ENCODE encodes a number that is known to be non-zero. ENCODEZ
+ * checks for zero (since zero has to be encoded in the long, 3 byte
+ * form).
+ */
+#define ENCODE(n) { \
+ if ((u_int16_t)(n) >= 256) { \
+ *cp++ = 0; \
+ cp[1] = (n); \
+ cp[0] = (n) >> 8; \
+ cp += 2; \
+ } else { \
+ *cp++ = (n); \
+ } \
+}
+#define ENCODEZ(n) { \
+ if ((u_int16_t)(n) >= 256 || (u_int16_t)(n) == 0) { \
+ *cp++ = 0; \
+ cp[1] = (n); \
+ cp[0] = (n) >> 8; \
+ cp += 2; \
+ } else { \
+ *cp++ = (n); \
+ } \
+}
+
+#define DECODEL(f) { \
+ if (*cp == 0) {\
+ (f) = htonl(ntohl(f) + ((cp[1] << 8) | cp[2])); \
+ cp += 3; \
+ } else { \
+ (f) = htonl(ntohl(f) + (u_int32_t)*cp++); \
+ } \
+}
+
+#define DECODES(f) { \
+ if (*cp == 0) {\
+ (f) = htons(ntohs(f) + ((cp[1] << 8) | cp[2])); \
+ cp += 3; \
+ } else { \
+ (f) = htons(ntohs(f) + (u_int32_t)*cp++); \
+ } \
+}
+
+#define DECODEU(f) { \
+ if (*cp == 0) {\
+ (f) = htons((cp[1] << 8) | cp[2]); \
+ cp += 3; \
+ } else { \
+ (f) = htons((u_int32_t)*cp++); \
+ } \
+}
+
+/*
+ * Attempt to compress an outgoing TCP packet and return the type of
+ * the result. The caller must have already verified that the protocol
+ * is TCP. The first mbuf must contain the complete IP and TCP headers,
+ * and "ip" must be == mtod(m, struct ip *). "comp" supplies the
+ * compression state, and "compress_cid" tells us whether it is OK
+ * to leave out the CID field when feasible.
+ *
+ * The caller is responsible for adjusting m->m_pkthdr.len upon return,
+ * if m is an M_PKTHDR mbuf.
+ */
+u_int
+sl_compress_tcp(m, ip, comp, compress_cid)
+ struct mbuf *m;
+ register struct ip *ip;
+ struct slcompress *comp;
+ int compress_cid;
+{
+ register struct cstate *cs = comp->last_cs->cs_next;
+ register u_int hlen = ip->ip_hl;
+ register struct tcphdr *oth;
+ register struct tcphdr *th;
+ register u_int deltaS, deltaA;
+ register u_int changes = 0;
+ u_char new_seq[16];
+ register u_char *cp = new_seq;
+
+ /*
+ * Bail if this is an IP fragment or if the TCP packet isn't
+ * `compressible' (i.e., ACK isn't set or some other control bit is
+ * set). (We assume that the caller has already made sure the
+ * packet is IP proto TCP).
+ */
+ if ((ip->ip_off & htons(0x3fff)) || m->m_len < 40)
+ return (TYPE_IP);
+
+ th = (struct tcphdr *)&((int32_t *)ip)[hlen];
+ if ((th->th_flags & (TH_SYN|TH_FIN|TH_RST|TH_ACK)) != TH_ACK)
+ return (TYPE_IP);
+ /*
+ * Packet is compressible -- we're going to send either a
+ * COMPRESSED_TCP or UNCOMPRESSED_TCP packet. Either way we need
+ * to locate (or create) the connection state. Special case the
+ * most recently used connection since it's most likely to be used
+ * again & we don't have to do any reordering if it's used.
+ */
+ INCR(sls_packets)
+ if (ip->ip_src.s_addr != cs->cs_ip.ip_src.s_addr ||
+ ip->ip_dst.s_addr != cs->cs_ip.ip_dst.s_addr ||
+ *(int32_t *)th != ((int32_t *)&cs->cs_ip)[cs->cs_ip.ip_hl]) {
+ /*
+ * Wasn't the first -- search for it.
+ *
+ * States are kept in a circularly linked list with
+ * last_cs pointing to the end of the list. The
+ * list is kept in lru order by moving a state to the
+ * head of the list whenever it is referenced. Since
+ * the list is short and, empirically, the connection
+ * we want is almost always near the front, we locate
+ * states via linear search. If we don't find a state
+ * for the datagram, the oldest state is (re-)used.
+ */
+ register struct cstate *lcs;
+ register struct cstate *lastcs = comp->last_cs;
+
+ do {
+ lcs = cs; cs = cs->cs_next;
+ INCR(sls_searches)
+ if (ip->ip_src.s_addr == cs->cs_ip.ip_src.s_addr
+ && ip->ip_dst.s_addr == cs->cs_ip.ip_dst.s_addr
+ && *(int32_t *)th ==
+ ((int32_t *)&cs->cs_ip)[cs->cs_ip.ip_hl])
+ goto found;
+ } while (cs != lastcs);
+
+ /*
+ * Didn't find it -- re-use oldest cstate. Send an
+ * uncompressed packet that tells the other side what
+ * connection number we're using for this conversation.
+ * Note that since the state list is circular, the oldest
+ * state points to the newest and we only need to set
+ * last_cs to update the lru linkage.
+ */
+ INCR(sls_misses)
+ comp->last_cs = lcs;
+ hlen += th->th_off;
+ hlen <<= 2;
+ if (hlen > m->m_len)
+ return TYPE_IP;
+ goto uncompressed;
+
+ found:
+ /*
+ * Found it -- move to the front on the connection list.
+ */
+ if (cs == lastcs)
+ comp->last_cs = lcs;
+ else {
+ lcs->cs_next = cs->cs_next;
+ cs->cs_next = lastcs->cs_next;
+ lastcs->cs_next = cs;
+ }
+ }
+
+ /*
+ * Make sure that only what we expect to change changed. The first
+ * line of the `if' checks the IP protocol version, header length &
+ * type of service. The 2nd line checks the "Don't fragment" bit.
+ * The 3rd line checks the time-to-live and protocol (the protocol
+ * check is unnecessary but costless). The 4th line checks the TCP
+ * header length. The 5th line checks IP options, if any. The 6th
+ * line checks TCP options, if any. If any of these things are
+ * different between the previous & current datagram, we send the
+ * current datagram `uncompressed'.
+ */
+ oth = (struct tcphdr *)&((int32_t *)&cs->cs_ip)[hlen];
+ deltaS = hlen;
+ hlen += th->th_off;
+ hlen <<= 2;
+ if (hlen > m->m_len)
+ return TYPE_IP;
+
+ if (((u_int16_t *)ip)[0] != ((u_int16_t *)&cs->cs_ip)[0] ||
+ ((u_int16_t *)ip)[3] != ((u_int16_t *)&cs->cs_ip)[3] ||
+ ((u_int16_t *)ip)[4] != ((u_int16_t *)&cs->cs_ip)[4] ||
+ th->th_off != oth->th_off ||
+ (deltaS > 5 &&
+ BCMP(ip + 1, &cs->cs_ip + 1, (deltaS - 5) << 2)) ||
+ (th->th_off > 5 &&
+ BCMP(th + 1, oth + 1, (th->th_off - 5) << 2)))
+ goto uncompressed;
+
+ /*
+ * Figure out which of the changing fields changed. The
+ * receiver expects changes in the order: urgent, window,
+ * ack, seq (the order minimizes the number of temporaries
+ * needed in this section of code).
+ */
+ if (th->th_flags & TH_URG) {
+ deltaS = ntohs(th->th_urp);
+ ENCODEZ(deltaS);
+ changes |= NEW_U;
+ } else if (th->th_urp != oth->th_urp)
+ /* argh! URG not set but urp changed -- a sensible
+ * implementation should never do this but RFC793
+ * doesn't prohibit the change so we have to deal
+ * with it. */
+ goto uncompressed;
+
+ deltaS = (u_int16_t)(ntohs(th->th_win) - ntohs(oth->th_win));
+ if (deltaS) {
+ ENCODE(deltaS);
+ changes |= NEW_W;
+ }
+
+ deltaA = ntohl(th->th_ack) - ntohl(oth->th_ack);
+ if (deltaA) {
+ if (deltaA > 0xffff)
+ goto uncompressed;
+ ENCODE(deltaA);
+ changes |= NEW_A;
+ }
+
+ deltaS = ntohl(th->th_seq) - ntohl(oth->th_seq);
+ if (deltaS) {
+ if (deltaS > 0xffff)
+ goto uncompressed;
+ ENCODE(deltaS);
+ changes |= NEW_S;
+ }
+
+ switch(changes) {
+
+ case 0:
+ /*
+ * Nothing changed. If this packet contains data and the
+ * last one didn't, this is probably a data packet following
+ * an ack (normal on an interactive connection) and we send
+ * it compressed. Otherwise it's probably a retransmit,
+ * retransmitted ack or window probe. Send it uncompressed
+ * in case the other side missed the compressed version.
+ */
+ if (ip->ip_len != cs->cs_ip.ip_len &&
+ ntohs(cs->cs_ip.ip_len) == hlen)
+ break;
+
+ /* FALLTHROUGH */
+
+ case SPECIAL_I:
+ case SPECIAL_D:
+ /*
+ * actual changes match one of our special case encodings --
+ * send packet uncompressed.
+ */
+ goto uncompressed;
+
+ case NEW_S|NEW_A:
+ if (deltaS == deltaA &&
+ deltaS == ntohs(cs->cs_ip.ip_len) - hlen) {
+ /* special case for echoed terminal traffic */
+ changes = SPECIAL_I;
+ cp = new_seq;
+ }
+ break;
+
+ case NEW_S:
+ if (deltaS == ntohs(cs->cs_ip.ip_len) - hlen) {
+ /* special case for data xfer */
+ changes = SPECIAL_D;
+ cp = new_seq;
+ }
+ break;
+ }
+
+ deltaS = ntohs(ip->ip_id) - ntohs(cs->cs_ip.ip_id);
+ if (deltaS != 1) {
+ ENCODEZ(deltaS);
+ changes |= NEW_I;
+ }
+ if (th->th_flags & TH_PUSH)
+ changes |= TCP_PUSH_BIT;
+ /*
+ * Grab the cksum before we overwrite it below. Then update our
+ * state with this packet's header.
+ */
+ deltaA = ntohs(th->th_sum);
+ BCOPY(ip, &cs->cs_ip, hlen);
+
+ /*
+ * We want to use the original packet as our compressed packet.
+ * (cp - new_seq) is the number of bytes we need for compressed
+ * sequence numbers. In addition we need one byte for the change
+ * mask, one for the connection id and two for the tcp checksum.
+ * So, (cp - new_seq) + 4 bytes of header are needed. hlen is how
+ * many bytes of the original packet to toss so subtract the two to
+ * get the new packet size.
+ */
+ deltaS = cp - new_seq;
+ cp = (u_char *)ip;
+ if (compress_cid == 0 || comp->last_xmit != cs->cs_id) {
+ comp->last_xmit = cs->cs_id;
+ hlen -= deltaS + 4;
+ cp += hlen;
+ *cp++ = changes | NEW_C;
+ *cp++ = cs->cs_id;
+ } else {
+ hlen -= deltaS + 3;
+ cp += hlen;
+ *cp++ = changes;
+ }
+ m->m_len -= hlen;
+ m->m_data += hlen;
+ *cp++ = deltaA >> 8;
+ *cp++ = deltaA;
+ BCOPY(new_seq, cp, deltaS);
+ INCR(sls_compressed)
+ return (TYPE_COMPRESSED_TCP);
+
+ /*
+ * Update connection state cs & send uncompressed packet ('uncompressed'
+ * means a regular ip/tcp packet but with the 'conversation id' we hope
+ * to use on future compressed packets in the protocol field).
+ */
+uncompressed:
+ BCOPY(ip, &cs->cs_ip, hlen);
+ ip->ip_p = cs->cs_id;
+ comp->last_xmit = cs->cs_id;
+ return (TYPE_UNCOMPRESSED_TCP);
+}
+
+
+int
+sl_uncompress_tcp(bufp, len, type, comp)
+ u_char **bufp;
+ int len;
+ u_int type;
+ struct slcompress *comp;
+{
+ u_char *hdr, *cp;
+ int hlen, vjlen;
+
+ cp = bufp? *bufp: NULL;
+ vjlen = sl_uncompress_tcp_core(cp, len, len, type, comp, &hdr, &hlen);
+ if (vjlen < 0)
+ return (0); /* error */
+ if (vjlen == 0)
+ return (len); /* was uncompressed already */
+
+ cp += vjlen;
+ len -= vjlen;
+
+ /*
+ * At this point, cp points to the first byte of data in the
+ * packet. If we're not aligned on a 4-byte boundary, copy the
+ * data down so the ip & tcp headers will be aligned. Then back up
+ * cp by the tcp/ip header length to make room for the reconstructed
+ * header (we assume the packet we were handed has enough space to
+ * prepend 128 bytes of header).
+ */
+ if ((intptr_t)cp & 3) {
+ if (len > 0)
+ BCOPY(cp, ((intptr_t)cp &~ 3), len);
+ cp = (u_char *)((intptr_t)cp &~ 3);
+ }
+ cp -= hlen;
+ len += hlen;
+ BCOPY(hdr, cp, hlen);
+
+ *bufp = cp;
+ return (len);
+}
+
+/*
+ * Uncompress a packet of total length total_len. The first buflen
+ * bytes are at buf; this must include the entire (compressed or
+ * uncompressed) TCP/IP header. This procedure returns the length
+ * of the VJ header, with a pointer to the uncompressed IP header
+ * in *hdrp and its length in *hlenp.
+ */
+int
+sl_uncompress_tcp_core(buf, buflen, total_len, type, comp, hdrp, hlenp)
+ u_char *buf;
+ int buflen, total_len;
+ u_int type;
+ struct slcompress *comp;
+ u_char **hdrp;
+ u_int *hlenp;
+{
+ register u_char *cp;
+ register u_int hlen, changes;
+ register struct tcphdr *th;
+ register struct cstate *cs;
+ register struct ip *ip;
+ register u_int16_t *bp;
+ register u_int vjlen;
+
+ switch (type) {
+
+ case TYPE_UNCOMPRESSED_TCP:
+ ip = (struct ip *) buf;
+ if (ip->ip_p >= MAX_STATES)
+ goto bad;
+ cs = &comp->rstate[comp->last_recv = ip->ip_p];
+ comp->flags &=~ SLF_TOSS;
+ ip->ip_p = IPPROTO_TCP;
+ /*
+ * Calculate the size of the TCP/IP header and make sure that
+ * we don't overflow the space we have available for it.
+ */
+ hlen = ip->ip_hl << 2;
+ if (hlen + sizeof(struct tcphdr) > buflen)
+ goto bad;
+ hlen += ((struct tcphdr *)&((char *)ip)[hlen])->th_off << 2;
+ if (hlen > MAX_HDR || hlen > buflen)
+ goto bad;
+ BCOPY(ip, &cs->cs_ip, hlen);
+ cs->cs_hlen = hlen;
+ INCR(sls_uncompressedin)
+ *hdrp = (u_char *) &cs->cs_ip;
+ *hlenp = hlen;
+ return (0);
+
+ default:
+ goto bad;
+
+ case TYPE_COMPRESSED_TCP:
+ break;
+ }
+ /* We've got a compressed packet. */
+ INCR(sls_compressedin)
+ cp = buf;
+ changes = *cp++;
+ if (changes & NEW_C) {
+ /* Make sure the state index is in range, then grab the state.
+ * If we have a good state index, clear the 'discard' flag. */
+ if (*cp >= MAX_STATES)
+ goto bad;
+
+ comp->flags &=~ SLF_TOSS;
+ comp->last_recv = *cp++;
+ } else {
+ /* this packet has an implicit state index. If we've
+ * had a line error since the last time we got an
+ * explicit state index, we have to toss the packet. */
+ if (comp->flags & SLF_TOSS) {
+ INCR(sls_tossed)
+ return (-1);
+ }
+ }
+ cs = &comp->rstate[comp->last_recv];
+ hlen = cs->cs_ip.ip_hl << 2;
+ th = (struct tcphdr *)&((u_char *)&cs->cs_ip)[hlen];
+ th->th_sum = htons((*cp << 8) | cp[1]);
+ cp += 2;
+ if (changes & TCP_PUSH_BIT)
+ th->th_flags |= TH_PUSH;
+ else
+ th->th_flags &=~ TH_PUSH;
+
+ switch (changes & SPECIALS_MASK) {
+ case SPECIAL_I:
+ {
+ register u_int i = ntohs(cs->cs_ip.ip_len) - cs->cs_hlen;
+ th->th_ack = htonl(ntohl(th->th_ack) + i);
+ th->th_seq = htonl(ntohl(th->th_seq) + i);
+ }
+ break;
+
+ case SPECIAL_D:
+ th->th_seq = htonl(ntohl(th->th_seq) + ntohs(cs->cs_ip.ip_len)
+ - cs->cs_hlen);
+ break;
+
+ default:
+ if (changes & NEW_U) {
+ th->th_flags |= TH_URG;
+ DECODEU(th->th_urp)
+ } else
+ th->th_flags &=~ TH_URG;
+ if (changes & NEW_W)
+ DECODES(th->th_win)
+ if (changes & NEW_A)
+ DECODEL(th->th_ack)
+ if (changes & NEW_S)
+ DECODEL(th->th_seq)
+ break;
+ }
+ if (changes & NEW_I) {
+ DECODES(cs->cs_ip.ip_id)
+ } else
+ cs->cs_ip.ip_id = htons(ntohs(cs->cs_ip.ip_id) + 1);
+
+ /*
+ * At this point, cp points to the first byte of data in the
+ * packet. Fill in the IP total length and update the IP
+ * header checksum.
+ */
+ vjlen = cp - buf;
+ buflen -= vjlen;
+ if (buflen < 0)
+ /* we must have dropped some characters (crc should detect
+ * this but the old slip framing won't) */
+ goto bad;
+
+ total_len += cs->cs_hlen - vjlen;
+ cs->cs_ip.ip_len = htons(total_len);
+
+ /* recompute the ip header checksum */
+ bp = (u_int16_t *) &cs->cs_ip;
+ cs->cs_ip.ip_sum = 0;
+ for (changes = 0; hlen > 0; hlen -= 2)
+ changes += *bp++;
+ changes = (changes & 0xffff) + (changes >> 16);
+ changes = (changes & 0xffff) + (changes >> 16);
+ cs->cs_ip.ip_sum = ~ changes;
+
+ *hdrp = (u_char *) &cs->cs_ip;
+ *hlenp = cs->cs_hlen;
+ return vjlen;
+
+bad:
+ comp->flags |= SLF_TOSS;
+ INCR(sls_errorin)
+ return (-1);
+}
diff --git a/rtems/freebsd/net/slcompress.h b/rtems/freebsd/net/slcompress.h
new file mode 100644
index 00000000..08c9042e
--- /dev/null
+++ b/rtems/freebsd/net/slcompress.h
@@ -0,0 +1,158 @@
+/*
+ * Definitions for tcp compression routines.
+ */
+/*-
+ * Copyright (c) 1989, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * Van Jacobson (van@helios.ee.lbl.gov), Dec 31, 1989:
+ * - Initial distribution.
+ * $FreeBSD$
+ */
+
+#ifndef _NET_SLCOMPRESS_HH_
+#define _NET_SLCOMPRESS_HH_
+
+#define MAX_STATES 16 /* must be > 2 and < 256 */
+#define MAX_HDR 128
+
+/*
+ * Compressed packet format:
+ *
+ * The first octet contains the packet type (top 3 bits), TCP
+ * 'push' bit, and flags that indicate which of the 4 TCP sequence
+ * numbers have changed (bottom 5 bits). The next octet is a
+ * conversation number that associates a saved IP/TCP header with
+ * the compressed packet. The next two octets are the TCP checksum
+ * from the original datagram. The next 0 to 15 octets are
+ * sequence number changes, one change per bit set in the header
+ * (there may be no changes and there are two special cases where
+ * the receiver implicitly knows what changed -- see below).
+ *
+ * There are 5 numbers which can change (they are always inserted
+ * in the following order): TCP urgent pointer, window,
+ * acknowledgement, sequence number and IP ID. (The urgent pointer
+ * is different from the others in that its value is sent, not the
+ * change in value.) Since typical use of SLIP links is biased
+ * toward small packets (see comments on MTU/MSS below), changes
+ * use a variable length coding with one octet for numbers in the
+ * range 1 - 255 and 3 octets (0, MSB, LSB) for numbers in the
+ * range 256 - 65535 or 0. (If the change in sequence number or
+ * ack is more than 65535, an uncompressed packet is sent.)
+ */
+
+/*
+ * Packet types (must not conflict with IP protocol version)
+ *
+ * The top nibble of the first octet is the packet type. There are
+ * three possible types: IP (not proto TCP or tcp with one of the
+ * control flags set); uncompressed TCP (a normal IP/TCP packet but
+ * with the 8-bit protocol field replaced by an 8-bit connection id --
+ * this type of packet syncs the sender & receiver); and compressed
+ * TCP (described above).
+ *
+ * LSB of 4-bit field is TCP "PUSH" bit (a worthless anachronism) and
+ * is logically part of the 4-bit "changes" field that follows. Top
+ * three bits are actual packet type. For backward compatibility
+ * and in the interest of conserving bits, numbers are chosen so the
+ * IP protocol version number (4) which normally appears in this nibble
+ * means "IP packet".
+ */
+
+/* packet types */
+#define TYPE_IP 0x40
+#define TYPE_UNCOMPRESSED_TCP 0x70
+#define TYPE_COMPRESSED_TCP 0x80
+#define TYPE_ERROR 0x00
+
+/* Bits in first octet of compressed packet */
+#define NEW_C 0x40 /* flag bits for what changed in a packet */
+#define NEW_I 0x20
+#define NEW_S 0x08
+#define NEW_A 0x04
+#define NEW_W 0x02
+#define NEW_U 0x01
+
+/* reserved, special-case values of above */
+#define SPECIAL_I (NEW_S|NEW_W|NEW_U) /* echoed interactive traffic */
+#define SPECIAL_D (NEW_S|NEW_A|NEW_W|NEW_U) /* unidirectional data */
+#define SPECIALS_MASK (NEW_S|NEW_A|NEW_W|NEW_U)
+
+#define TCP_PUSH_BIT 0x10
+
+
+/*
+ * "state" data for each active tcp conversation on the wire. This is
+ * basically a copy of the entire IP/TCP header from the last packet
+ * we saw from the conversation together with a small identifier
+ * the transmit & receive ends of the line use to locate saved header.
+ */
+struct cstate {
+ struct cstate *cs_next; /* next most recently used cstate (xmit only) */
+ u_int16_t cs_hlen; /* size of hdr (receive only) */
+ u_char cs_id; /* connection # associated with this state */
+ u_char cs_filler;
+ union {
+ char csu_hdr[MAX_HDR];
+ struct ip csu_ip; /* ip/tcp hdr from most recent packet */
+ } slcs_u;
+};
+#define cs_ip slcs_u.csu_ip
+#define cs_hdr slcs_u.csu_hdr
+
+/*
+ * all the state data for one serial line (we need one of these
+ * per line).
+ */
+struct slcompress {
+ struct cstate *last_cs; /* most recently used tstate */
+ u_char last_recv; /* last rcvd conn. id */
+ u_char last_xmit; /* last sent conn. id */
+ u_int16_t flags;
+#ifndef SL_NO_STATS
+ int sls_packets; /* outbound packets */
+ int sls_compressed; /* outbound compressed packets */
+ int sls_searches; /* searches for connection state */
+ int sls_misses; /* times couldn't find conn. state */
+ int sls_uncompressedin; /* inbound uncompressed packets */
+ int sls_compressedin; /* inbound compressed packets */
+ int sls_errorin; /* inbound unknown type packets */
+ int sls_tossed; /* inbound packets tossed because of error */
+#endif
+ struct cstate tstate[MAX_STATES]; /* xmit connection states */
+ struct cstate rstate[MAX_STATES]; /* receive connection states */
+};
+/* flag values */
+#define SLF_TOSS 1 /* tossing rcvd frames because of input err */
+
+void sl_compress_init(struct slcompress *, int);
+u_int sl_compress_tcp(struct mbuf *, struct ip *, struct slcompress *, int);
+int sl_uncompress_tcp(u_char **, int, u_int, struct slcompress *);
+int sl_uncompress_tcp_core(u_char *, int, int, u_int,
+ struct slcompress *, u_char **, u_int *);
+
+#endif /* !_NET_SLCOMPRESS_HH_ */
diff --git a/rtems/freebsd/net/vnet.h b/rtems/freebsd/net/vnet.h
new file mode 100644
index 00000000..3cd8a0c3
--- /dev/null
+++ b/rtems/freebsd/net/vnet.h
@@ -0,0 +1,437 @@
+/*-
+ * Copyright (c) 2006-2009 University of Zagreb
+ * Copyright (c) 2006-2009 FreeBSD Foundation
+ * All rights reserved.
+ *
+ * This software was developed by the University of Zagreb and the
+ * FreeBSD Foundation under sponsorship by the Stichting NLnet and the
+ * FreeBSD Foundation.
+ *
+ * Copyright (c) 2009 Jeffrey Roberson <jeff@freebsd.org>
+ * Copyright (c) 2009 Robert N. M. Watson
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+/*-
+ * This header file defines several sets of interfaces supporting virtualized
+ * network stacks:
+ *
+ * - Definition of 'struct vnet' and functions and macros to allocate/free/
+ * manipulate it.
+ *
+ * - A virtual network stack memory allocator, which provides support for
+ * virtualized global variables via a special linker set, set_vnet.
+ *
+ * - Virtualized sysinits/sysuninits, which allow constructors and
+ * destructors to be run for each network stack subsystem as virtual
+ * instances are created and destroyed.
+ *
+ * If VIMAGE isn't compiled into the kernel, virtualized global variables
+ * compile to normal global variables, and virtualized sysinits to regular
+ * sysinits.
+ */
+
+#ifndef _NET_VNET_HH_
+#define _NET_VNET_HH_
+
+/*
+ * struct vnet describes a virtualized network stack, and is primarily a
+ * pointer to storage for virtualized global variables. Expose to userspace
+ * as required for libkvm.
+ */
+#if defined(_KERNEL) || defined(_WANT_VNET)
+#include <rtems/freebsd/sys/queue.h>
+
+struct vnet {
+ LIST_ENTRY(vnet) vnet_le; /* all vnets list */
+ u_int vnet_magic_n;
+ u_int vnet_ifcnt;
+ u_int vnet_sockcnt;
+ void *vnet_data_mem;
+ uintptr_t vnet_data_base;
+};
+#define VNET_MAGIC_N 0x3e0d8f29
+
+/*
+ * These two virtual network stack allocator definitions are also required
+ * for libkvm so that it can evaluate virtualized global variables.
+ */
+#define VNET_SETNAME "set_vnet"
+#define VNET_SYMPREFIX "vnet_entry_"
+#endif
+
+#ifdef _KERNEL
+
+#ifdef VIMAGE
+#include <rtems/freebsd/sys/lock.h>
+#include <rtems/freebsd/sys/proc.h> /* for struct thread */
+#include <rtems/freebsd/sys/rwlock.h>
+#include <rtems/freebsd/sys/sx.h>
+
+/*
+ * Location of the kernel's 'set_vnet' linker set.
+ */
+extern uintptr_t *__start_set_vnet;
+extern uintptr_t *__stop_set_vnet;
+
+#define VNET_START (uintptr_t)&__start_set_vnet
+#define VNET_STOP (uintptr_t)&__stop_set_vnet
+
+/*
+ * Functions to allocate and destroy virtual network stacks.
+ */
+struct vnet *vnet_alloc(void);
+void vnet_destroy(struct vnet *vnet);
+
+/*
+ * The current virtual network stack -- we may wish to move this to struct
+ * pcpu in the future.
+ */
+#define curvnet curthread->td_vnet
+
+/*
+ * Various macros -- get and set the current network stack, but also
+ * assertions.
+ */
+#ifdef VNET_DEBUG
+void vnet_log_recursion(struct vnet *, const char *, int);
+
+#define VNET_ASSERT(condition) \
+ if (!(condition)) { \
+ printf("VNET_ASSERT @ %s:%d %s():\n", \
+ __FILE__, __LINE__, __FUNCTION__); \
+ panic(#condition); \
+ }
+
+#define CURVNET_SET_QUIET(arg) \
+ VNET_ASSERT((arg)->vnet_magic_n == VNET_MAGIC_N); \
+ struct vnet *saved_vnet = curvnet; \
+ const char *saved_vnet_lpush = curthread->td_vnet_lpush; \
+ curvnet = arg; \
+ curthread->td_vnet_lpush = __FUNCTION__;
+
+#define CURVNET_SET_VERBOSE(arg) \
+ CURVNET_SET_QUIET(arg) \
+ if (saved_vnet) \
+ vnet_log_recursion(saved_vnet, saved_vnet_lpush, __LINE__);
+
+#define CURVNET_SET(arg) CURVNET_SET_VERBOSE(arg)
+
+#define CURVNET_RESTORE() \
+ VNET_ASSERT(saved_vnet == NULL || \
+ saved_vnet->vnet_magic_n == VNET_MAGIC_N); \
+ curvnet = saved_vnet; \
+ curthread->td_vnet_lpush = saved_vnet_lpush;
+#else /* !VNET_DEBUG */
+#define VNET_ASSERT(condition)
+
+#define CURVNET_SET(arg) \
+ struct vnet *saved_vnet = curvnet; \
+ curvnet = arg;
+
+#define CURVNET_SET_VERBOSE(arg) CURVNET_SET(arg)
+#define CURVNET_SET_QUIET(arg) CURVNET_SET(arg)
+
+#define CURVNET_RESTORE() \
+ curvnet = saved_vnet;
+#endif /* VNET_DEBUG */
+
+extern struct vnet *vnet0;
+#define IS_DEFAULT_VNET(arg) ((arg) == vnet0)
+
+#define CRED_TO_VNET(cr) (cr)->cr_prison->pr_vnet
+#define TD_TO_VNET(td) CRED_TO_VNET((td)->td_ucred)
+#define P_TO_VNET(p) CRED_TO_VNET((p)->p_ucred)
+
+/*
+ * Global linked list of all virtual network stacks, along with read locks to
+ * access it. If a caller may sleep while accessing the list, it must use
+ * the sleepable lock macros.
+ */
+LIST_HEAD(vnet_list_head, vnet);
+extern struct vnet_list_head vnet_head;
+extern struct rwlock vnet_rwlock;
+extern struct sx vnet_sxlock;
+
+#define VNET_LIST_RLOCK() sx_slock(&vnet_sxlock)
+#define VNET_LIST_RLOCK_NOSLEEP() rw_rlock(&vnet_rwlock)
+#define VNET_LIST_RUNLOCK() sx_sunlock(&vnet_sxlock)
+#define VNET_LIST_RUNLOCK_NOSLEEP() rw_runlock(&vnet_rwlock)
+
+/*
+ * Iteration macros to walk the global list of virtual network stacks.
+ */
+#define VNET_ITERATOR_DECL(arg) struct vnet *arg
+#define VNET_FOREACH(arg) LIST_FOREACH((arg), &vnet_head, vnet_le)
+
+/*
+ * Virtual network stack memory allocator, which allows global variables to
+ * be automatically instantiated for each network stack instance.
+ */
+__asm__(
+#if defined(__arm__)
+ ".section " VNET_SETNAME ", \"aw\", %progbits\n"
+#else
+ ".section " VNET_SETNAME ", \"aw\", @progbits\n"
+#endif
+ "\t.p2align " __XSTRING(CACHE_LINE_SHIFT) "\n"
+ "\t.previous");
+
+#define VNET_NAME(n) vnet_entry_##n
+#define VNET_DECLARE(t, n) extern t VNET_NAME(n)
+#define VNET_DEFINE(t, n) t VNET_NAME(n) __section(VNET_SETNAME) __used
+#define _VNET_PTR(b, n) (__typeof(VNET_NAME(n))*) \
+ ((b) + (uintptr_t)&VNET_NAME(n))
+
+#define _VNET(b, n) (*_VNET_PTR(b, n))
+
+/*
+ * Virtualized global variable accessor macros.
+ */
+#define VNET_VNET_PTR(vnet, n) _VNET_PTR((vnet)->vnet_data_base, n)
+#define VNET_VNET(vnet, n) (*VNET_VNET_PTR((vnet), n))
+
+#define VNET_PTR(n) VNET_VNET_PTR(curvnet, n)
+#define VNET(n) VNET_VNET(curvnet, n)
+
+/*
+ * Virtual network stack allocator interfaces from the kernel linker.
+ */
+void *vnet_data_alloc(int size);
+void vnet_data_copy(void *start, int size);
+void vnet_data_free(void *start_arg, int size);
+
+/*
+ * Sysctl variants for vnet-virtualized global variables. Include
+ * <sys/sysctl.h> to expose these definitions.
+ *
+ * Note: SYSCTL_PROC() handler functions will need to resolve pointer
+ * arguments themselves, if required.
+ */
+#ifdef SYSCTL_OID
+int vnet_sysctl_handle_int(SYSCTL_HANDLER_ARGS);
+int vnet_sysctl_handle_opaque(SYSCTL_HANDLER_ARGS);
+int vnet_sysctl_handle_string(SYSCTL_HANDLER_ARGS);
+int vnet_sysctl_handle_uint(SYSCTL_HANDLER_ARGS);
+
+#define SYSCTL_VNET_INT(parent, nbr, name, access, ptr, val, descr) \
+ SYSCTL_OID(parent, nbr, name, \
+ CTLTYPE_INT|CTLFLAG_MPSAFE|CTLFLAG_VNET|(access), \
+ ptr, val, vnet_sysctl_handle_int, "I", descr)
+#define SYSCTL_VNET_PROC(parent, nbr, name, access, ptr, arg, handler, \
+ fmt, descr) \
+ SYSCTL_OID(parent, nbr, name, CTLFLAG_VNET|(access), ptr, arg, \
+ handler, fmt, descr)
+#define SYSCTL_VNET_OPAQUE(parent, nbr, name, access, ptr, len, fmt, \
+ descr) \
+ SYSCTL_OID(parent, nbr, name, \
+ CTLTYPE_OPAQUE|CTLFLAG_VNET|(access), ptr, len, \
+ vnet_sysctl_handle_opaque, fmt, descr)
+#define SYSCTL_VNET_STRING(parent, nbr, name, access, arg, len, descr) \
+ SYSCTL_OID(parent, nbr, name, \
+ CTLTYPE_STRING|CTLFLAG_VNET|(access), \
+ arg, len, vnet_sysctl_handle_string, "A", descr)
+#define SYSCTL_VNET_STRUCT(parent, nbr, name, access, ptr, type, descr) \
+ SYSCTL_OID(parent, nbr, name, \
+ CTLTYPE_OPAQUE|CTLFLAG_VNET|(access), ptr, \
+ sizeof(struct type), vnet_sysctl_handle_opaque, "S," #type, \
+ descr)
+#define SYSCTL_VNET_UINT(parent, nbr, name, access, ptr, val, descr) \
+ SYSCTL_OID(parent, nbr, name, \
+ CTLTYPE_UINT|CTLFLAG_MPSAFE|CTLFLAG_VNET|(access), \
+ ptr, val, vnet_sysctl_handle_uint, "IU", descr)
+#define VNET_SYSCTL_ARG(req, arg1) do { \
+ if (arg1 != NULL) \
+ arg1 = (void *)(TD_TO_VNET((req)->td)->vnet_data_base + \
+ (uintptr_t)(arg1)); \
+} while (0)
+#endif /* SYSCTL_OID */
+
+/*
+ * Virtual sysinit mechanism, allowing network stack components to declare
+ * startup and shutdown methods to be run when virtual network stack
+ * instances are created and destroyed.
+ */
+#include <rtems/freebsd/sys/kernel.h>
+
+/*
+ * SYSINIT/SYSUNINIT variants that provide per-vnet constructors and
+ * destructors.
+ */
+struct vnet_sysinit {
+ enum sysinit_sub_id subsystem;
+ enum sysinit_elem_order order;
+ sysinit_cfunc_t func;
+ const void *arg;
+ TAILQ_ENTRY(vnet_sysinit) link;
+};
+
+#define VNET_SYSINIT(ident, subsystem, order, func, arg) \
+ static struct vnet_sysinit ident ## _vnet_init = { \
+ subsystem, \
+ order, \
+ (sysinit_cfunc_t)(sysinit_nfunc_t)func, \
+ (arg) \
+ }; \
+ SYSINIT(vnet_init_ ## ident, subsystem, order, \
+ vnet_register_sysinit, &ident ## _vnet_init); \
+ SYSUNINIT(vnet_init_ ## ident, subsystem, order, \
+ vnet_deregister_sysinit, &ident ## _vnet_init)
+
+#define VNET_SYSUNINIT(ident, subsystem, order, func, arg) \
+ static struct vnet_sysinit ident ## _vnet_uninit = { \
+ subsystem, \
+ order, \
+ (sysinit_cfunc_t)(sysinit_nfunc_t)func, \
+ (arg) \
+ }; \
+ SYSINIT(vnet_uninit_ ## ident, subsystem, order, \
+ vnet_register_sysuninit, &ident ## _vnet_uninit); \
+ SYSUNINIT(vnet_uninit_ ## ident, subsystem, order, \
+ vnet_deregister_sysuninit, &ident ## _vnet_uninit)
+
+/*
+ * Run per-vnet sysinits or sysuninits during vnet creation/destruction.
+ */
+void vnet_sysinit(void);
+void vnet_sysuninit(void);
+
+/*
+ * Interfaces for managing per-vnet constructors and destructors.
+ */
+void vnet_register_sysinit(void *arg);
+void vnet_register_sysuninit(void *arg);
+void vnet_deregister_sysinit(void *arg);
+void vnet_deregister_sysuninit(void *arg);
+
+/*
+ * EVENTHANDLER(9) extensions.
+ */
+#include <rtems/freebsd/sys/eventhandler.h>
+
+void vnet_global_eventhandler_iterator_func(void *, ...);
+#define VNET_GLOBAL_EVENTHANDLER_REGISTER_TAG(tag, name, func, arg, priority) \
+do { \
+ if (IS_DEFAULT_VNET(curvnet)) { \
+ (tag) = vimage_eventhandler_register(NULL, #name, func, \
+ arg, priority, \
+ vnet_global_eventhandler_iterator_func); \
+ } \
+} while(0)
+#define VNET_GLOBAL_EVENTHANDLER_REGISTER(name, func, arg, priority) \
+do { \
+ if (IS_DEFAULT_VNET(curvnet)) { \
+ vimage_eventhandler_register(NULL, #name, func, \
+ arg, priority, \
+ vnet_global_eventhandler_iterator_func); \
+ } \
+} while(0)
+
+#else /* !VIMAGE */
+
+/*
+ * Various virtual network stack macros compile to no-ops without VIMAGE.
+ */
+#define curvnet NULL
+
+#define VNET_ASSERT(condition)
+#define CURVNET_SET(arg)
+#define CURVNET_SET_QUIET(arg)
+#define CURVNET_RESTORE()
+
+#define VNET_LIST_RLOCK()
+#define VNET_LIST_RLOCK_NOSLEEP()
+#define VNET_LIST_RUNLOCK()
+#define VNET_LIST_RUNLOCK_NOSLEEP()
+#define VNET_ITERATOR_DECL(arg)
+#define VNET_FOREACH(arg)
+
+#define IS_DEFAULT_VNET(arg) 1
+#define CRED_TO_VNET(cr) NULL
+#define TD_TO_VNET(td) NULL
+#define P_TO_VNET(p) NULL
+
+/*
+ * Versions of the VNET macros that compile to normal global variables and
+ * standard sysctl definitions.
+ */
+#define VNET_NAME(n) n
+#define VNET_DECLARE(t, n) extern t n
+#define VNET_DEFINE(t, n) t n
+#define _VNET_PTR(b, n) &VNET_NAME(n)
+
+/*
+ * Virtualized global variable accessor macros.
+ */
+#define VNET_VNET_PTR(vnet, n) (&(n))
+#define VNET_VNET(vnet, n) (n)
+
+#define VNET_PTR(n) (&(n))
+#define VNET(n) (n)
+
+/*
+ * When VIMAGE isn't compiled into the kernel, virtaulized SYSCTLs simply
+ * become normal SYSCTLs.
+ */
+#ifdef SYSCTL_OID
+#define SYSCTL_VNET_INT(parent, nbr, name, access, ptr, val, descr) \
+ SYSCTL_INT(parent, nbr, name, access, ptr, val, descr)
+#define SYSCTL_VNET_PROC(parent, nbr, name, access, ptr, arg, handler, \
+ fmt, descr) \
+ SYSCTL_PROC(parent, nbr, name, access, ptr, arg, handler, fmt, \
+ descr)
+#define SYSCTL_VNET_OPAQUE(parent, nbr, name, access, ptr, len, fmt, \
+ descr) \
+ SYSCTL_OPAQUE(parent, nbr, name, access, ptr, len, fmt, descr)
+#define SYSCTL_VNET_STRING(parent, nbr, name, access, arg, len, descr) \
+ SYSCTL_STRING(parent, nbr, name, access, arg, len, descr)
+#define SYSCTL_VNET_STRUCT(parent, nbr, name, access, ptr, type, descr) \
+ SYSCTL_STRUCT(parent, nbr, name, access, ptr, type, descr)
+#define SYSCTL_VNET_UINT(parent, nbr, name, access, ptr, val, descr) \
+ SYSCTL_UINT(parent, nbr, name, access, ptr, val, descr)
+#define VNET_SYSCTL_ARG(req, arg1)
+#endif /* SYSCTL_OID */
+
+/*
+ * When VIMAGE isn't compiled into the kernel, VNET_SYSINIT/VNET_SYSUNINIT
+ * map into normal sysinits, which have the same ordering properties.
+ */
+#define VNET_SYSINIT(ident, subsystem, order, func, arg) \
+ SYSINIT(ident, subsystem, order, func, arg)
+#define VNET_SYSUNINIT(ident, subsystem, order, func, arg) \
+ SYSUNINIT(ident, subsystem, order, func, arg)
+
+/*
+ * Without VIMAGE revert to the default implementation.
+ */
+#define VNET_GLOBAL_EVENTHANDLER_REGISTER_TAG(tag, name, func, arg, priority) \
+ (tag) = eventhandler_register(NULL, #name, func, arg, priority)
+#define VNET_GLOBAL_EVENTHANDLER_REGISTER(name, func, arg, priority) \
+ eventhandler_register(NULL, #name, func, arg, priority)
+#endif /* VIMAGE */
+#endif /* _KERNEL */
+
+#endif /* !_NET_VNET_HH_ */
diff --git a/rtems/freebsd/net/zlib.c b/rtems/freebsd/net/zlib.c
new file mode 100644
index 00000000..6a6c1017
--- /dev/null
+++ b/rtems/freebsd/net/zlib.c
@@ -0,0 +1,5409 @@
+#include <rtems/freebsd/machine/rtems-bsd-config.h>
+
+/*
+ * This file is derived from various .h and .c files from the zlib-1.0.4
+ * distribution by Jean-loup Gailly and Mark Adler, with some additions
+ * by Paul Mackerras to aid in implementing Deflate compression and
+ * decompression for PPP packets. See zlib.h for conditions of
+ * distribution and use.
+ *
+ * Changes that have been made include:
+ * - added Z_PACKET_FLUSH (see zlib.h for details)
+ * - added inflateIncomp and deflateOutputPending
+ * - allow strm->next_out to be NULL, meaning discard the output
+ *
+ * $FreeBSD$
+ */
+
+/*
+ * ==FILEVERSION 971210==
+ *
+ * This marker is used by the Linux installation script to determine
+ * whether an up-to-date version of this file is already installed.
+ */
+
+#define NO_DUMMY_DECL
+#define NO_ZCFUNCS
+#define MY_ZCALLOC
+
+#if defined(__FreeBSD__) && defined(_KERNEL)
+#define inflate inflate_ppp /* FreeBSD already has an inflate :-( */
+#endif
+
+
+/* +++ zutil.h */
+/*-
+ * zutil.h -- internal interface and configuration of the compression library
+ * Copyright (C) 1995-1996 Jean-loup Gailly.
+ * For conditions of distribution and use, see copyright notice in zlib.h
+ */
+
+/* WARNING: this file should *not* be used by applications. It is
+ part of the implementation of the compression library and is
+ subject to change. Applications should only use zlib.h.
+ */
+
+/* From: zutil.h,v 1.16 1996/07/24 13:41:13 me Exp $ */
+
+#ifndef _Z_UTIL_H
+#define _Z_UTIL_H
+
+#ifdef _KERNEL
+#include <rtems/freebsd/net/zlib.h>
+#else
+#include <rtems/freebsd/local/zlib.h>
+#endif
+
+#ifdef _KERNEL
+/* Assume this is a *BSD or SVR4 kernel */
+#include <rtems/freebsd/sys/types.h>
+#include <rtems/freebsd/sys/time.h>
+#include <rtems/freebsd/sys/systm.h>
+#include <rtems/freebsd/sys/param.h>
+#include <rtems/freebsd/sys/kernel.h>
+#include <rtems/freebsd/sys/module.h>
+# define HAVE_MEMCPY
+
+#else
+#if defined(__KERNEL__)
+/* Assume this is a Linux kernel */
+#include <rtems/freebsd/linux/string.h>
+#define HAVE_MEMCPY
+
+#else /* not kernel */
+
+#if defined(MSDOS)||defined(VMS)||defined(CRAY)||defined(WIN32)||defined(RISCOS)
+# include <rtems/freebsd/stddef.h>
+# include <rtems/freebsd/errno.h>
+#else
+ extern int errno;
+#endif
+#ifdef STDC
+# include <rtems/freebsd/string.h>
+# include <rtems/freebsd/stdlib.h>
+#endif
+#endif /* __KERNEL__ */
+#endif /* _KERNEL */
+
+#ifndef local
+# define local static
+#endif
+/* compile with -Dlocal if your debugger can't find static symbols */
+
+typedef unsigned char uch;
+typedef uch FAR uchf;
+typedef unsigned short ush;
+typedef ush FAR ushf;
+typedef unsigned long ulg;
+
+static const char *z_errmsg[10]; /* indexed by 2-zlib_error */
+/* (size given to avoid silly warnings with Visual C++) */
+
+#define ERR_MSG(err) z_errmsg[Z_NEED_DICT-(err)]
+
+#define ERR_RETURN(strm,err) \
+ return (strm->msg = (const char*)ERR_MSG(err), (err))
+/* To be used only when the state is known to be valid */
+
+ /* common constants */
+
+#ifndef DEF_WBITS
+# define DEF_WBITS MAX_WBITS
+#endif
+/* default windowBits for decompression. MAX_WBITS is for compression only */
+
+#if MAX_MEM_LEVEL >= 8
+# define DEF_MEM_LEVEL 8
+#else
+# define DEF_MEM_LEVEL MAX_MEM_LEVEL
+#endif
+/* default memLevel */
+
+#define STORED_BLOCK 0
+#define STATIC_TREES 1
+#define DYN_TREES 2
+/* The three kinds of block type */
+
+#define MIN_MATCH 3
+#define MAX_MATCH 258
+/* The minimum and maximum match lengths */
+
+#define PRESET_DICT 0x20 /* preset dictionary flag in zlib header */
+
+ /* target dependencies */
+
+#ifdef MSDOS
+# define OS_CODE 0x00
+# ifdef __TURBOC__
+# include <rtems/freebsd/alloc.h>
+# else /* MSC or DJGPP */
+# include <rtems/freebsd/malloc.h>
+# endif
+#endif
+
+#ifdef OS2
+# define OS_CODE 0x06
+#endif
+
+#ifdef WIN32 /* Window 95 & Windows NT */
+# define OS_CODE 0x0b
+#endif
+
+#if defined(VAXC) || defined(VMS)
+# define OS_CODE 0x02
+# define FOPEN(name, mode) \
+ fopen((name), (mode), "mbc=60", "ctx=stm", "rfm=fix", "mrs=512")
+#endif
+
+#ifdef AMIGA
+# define OS_CODE 0x01
+#endif
+
+#if defined(ATARI) || defined(atarist)
+# define OS_CODE 0x05
+#endif
+
+#ifdef MACOS
+# define OS_CODE 0x07
+#endif
+
+#ifdef __50SERIES /* Prime/PRIMOS */
+# define OS_CODE 0x0F
+#endif
+
+#ifdef TOPS20
+# define OS_CODE 0x0a
+#endif
+
+#if defined(_BEOS_) || defined(RISCOS)
+# define fdopen(fd,mode) NULL /* No fdopen() */
+#endif
+
+ /* Common defaults */
+
+#ifndef OS_CODE
+# define OS_CODE 0x03 /* assume Unix */
+#endif
+
+#ifndef FOPEN
+# define FOPEN(name, mode) fopen((name), (mode))
+#endif
+
+ /* functions */
+
+#ifdef HAVE_STRERROR
+ extern char *strerror OF((int));
+# define zstrerror(errnum) strerror(errnum)
+#else
+# define zstrerror(errnum) ""
+#endif
+
+#if defined(pyr)
+# define NO_MEMCPY
+#endif
+#if (defined(M_I86SM) || defined(M_I86MM)) && !defined(_MSC_VER)
+ /* Use our own functions for small and medium model with MSC <= 5.0.
+ * You may have to use the same strategy for Borland C (untested).
+ */
+# define NO_MEMCPY
+#endif
+#if defined(STDC) && !defined(HAVE_MEMCPY) && !defined(NO_MEMCPY)
+# define HAVE_MEMCPY
+#endif
+#ifdef HAVE_MEMCPY
+# ifdef SMALL_MEDIUM /* MSDOS small or medium model */
+# define zmemcpy _fmemcpy
+# define zmemcmp _fmemcmp
+# define zmemzero(dest, len) _fmemset(dest, 0, len)
+# else
+# define zmemcpy memcpy
+# define zmemcmp memcmp
+# define zmemzero(dest, len) memset(dest, 0, len)
+# endif
+#else
+ extern void zmemcpy OF((Bytef* dest, Bytef* source, uInt len));
+ extern int zmemcmp OF((Bytef* s1, Bytef* s2, uInt len));
+ extern void zmemzero OF((Bytef* dest, uInt len));
+#endif
+
+/* Diagnostic functions */
+#ifdef DEBUG_ZLIB
+# include <rtems/freebsd/stdio.h>
+# ifndef verbose
+# define verbose 0
+# endif
+ extern void z_error OF((char *m));
+# define Assert(cond,msg) {if(!(cond)) z_error(msg);}
+# define Trace(x) fprintf x
+# define Tracev(x) {if (verbose) fprintf x ;}
+# define Tracevv(x) {if (verbose>1) fprintf x ;}
+# define Tracec(c,x) {if (verbose && (c)) fprintf x ;}
+# define Tracecv(c,x) {if (verbose>1 && (c)) fprintf x ;}
+#else
+# define Assert(cond,msg)
+# define Trace(x)
+# define Tracev(x)
+# define Tracevv(x)
+# define Tracec(c,x)
+# define Tracecv(c,x)
+#endif
+
+
+typedef uLong (*check_func) OF((uLong check, const Bytef *buf, uInt len));
+
+voidpf zcalloc OF((voidpf opaque, unsigned items, unsigned size));
+void zcfree OF((voidpf opaque, voidpf ptr));
+
+#define ZALLOC(strm, items, size) \
+ (*((strm)->zalloc))((strm)->opaque, (items), (size))
+#define ZFREE(strm, addr) (*((strm)->zfree))((strm)->opaque, (voidpf)(addr))
+#define TRY_FREE(s, p) {if (p) ZFREE(s, p);}
+
+#endif /* _Z_UTIL_H */
+/* --- zutil.h */
+
+/* +++ deflate.h */
+/* deflate.h -- internal compression state
+ * Copyright (C) 1995-1996 Jean-loup Gailly
+ * For conditions of distribution and use, see copyright notice in zlib.h
+ */
+
+/* WARNING: this file should *not* be used by applications. It is
+ part of the implementation of the compression library and is
+ subject to change. Applications should only use zlib.h.
+ */
+
+/* From: deflate.h,v 1.10 1996/07/02 12:41:00 me Exp $ */
+
+#ifndef _DEFLATE_H
+#define _DEFLATE_H
+
+/* #include <rtems/freebsd/local/zutil.h> */
+
+/* ===========================================================================
+ * Internal compression state.
+ */
+
+#define LENGTH_CODES 29
+/* number of length codes, not counting the special END_BLOCK code */
+
+#define LITERALS 256
+/* number of literal bytes 0..255 */
+
+#define L_CODES (LITERALS+1+LENGTH_CODES)
+/* number of Literal or Length codes, including the END_BLOCK code */
+
+#define D_CODES 30
+/* number of distance codes */
+
+#define BL_CODES 19
+/* number of codes used to transfer the bit lengths */
+
+#define HEAP_SIZE (2*L_CODES+1)
+/* maximum heap size */
+
+#define MAX_BITS 15
+/* All codes must not exceed MAX_BITS bits */
+
+#define INIT_STATE 42
+#define BUSY_STATE 113
+#define FINISH_STATE 666
+/* Stream status */
+
+
+/* Data structure describing a single value and its code string. */
+typedef struct ct_data_s {
+ union {
+ ush freq; /* frequency count */
+ ush code; /* bit string */
+ } fc;
+ union {
+ ush dad; /* father node in Huffman tree */
+ ush len; /* length of bit string */
+ } dl;
+} FAR ct_data;
+
+#define Freq fc.freq
+#define Code fc.code
+#define Dad dl.dad
+#define Len dl.len
+
+typedef struct static_tree_desc_s static_tree_desc;
+
+typedef struct tree_desc_s {
+ ct_data *dyn_tree; /* the dynamic tree */
+ int max_code; /* largest code with non zero frequency */
+ static_tree_desc *stat_desc; /* the corresponding static tree */
+} FAR tree_desc;
+
+typedef ush Pos;
+typedef Pos FAR Posf;
+typedef unsigned IPos;
+
+/* A Pos is an index in the character window. We use short instead of int to
+ * save space in the various tables. IPos is used only for parameter passing.
+ */
+
+typedef struct deflate_state {
+ z_streamp strm; /* pointer back to this zlib stream */
+ int status; /* as the name implies */
+ Bytef *pending_buf; /* output still pending */
+ ulg pending_buf_size; /* size of pending_buf */
+ Bytef *pending_out; /* next pending byte to output to the stream */
+ int pending; /* nb of bytes in the pending buffer */
+ int noheader; /* suppress zlib header and adler32 */
+ Byte data_type; /* UNKNOWN, BINARY or ASCII */
+ Byte method; /* STORED (for zip only) or DEFLATED */
+ int last_flush; /* value of flush param for previous deflate call */
+
+ /* used by deflate.c: */
+
+ uInt w_size; /* LZ77 window size (32K by default) */
+ uInt w_bits; /* log2(w_size) (8..16) */
+ uInt w_mask; /* w_size - 1 */
+
+ Bytef *window;
+ /* Sliding window. Input bytes are read into the second half of the window,
+ * and move to the first half later to keep a dictionary of at least wSize
+ * bytes. With this organization, matches are limited to a distance of
+ * wSize-MAX_MATCH bytes, but this ensures that IO is always
+ * performed with a length multiple of the block size. Also, it limits
+ * the window size to 64K, which is quite useful on MSDOS.
+ * To do: use the user input buffer as sliding window.
+ */
+
+ ulg window_size;
+ /* Actual size of window: 2*wSize, except when the user input buffer
+ * is directly used as sliding window.
+ */
+
+ Posf *prev;
+ /* Link to older string with same hash index. To limit the size of this
+ * array to 64K, this link is maintained only for the last 32K strings.
+ * An index in this array is thus a window index modulo 32K.
+ */
+
+ Posf *head; /* Heads of the hash chains or NIL. */
+
+ uInt ins_h; /* hash index of string to be inserted */
+ uInt hash_size; /* number of elements in hash table */
+ uInt hash_bits; /* log2(hash_size) */
+ uInt hash_mask; /* hash_size-1 */
+
+ uInt hash_shift;
+ /* Number of bits by which ins_h must be shifted at each input
+ * step. It must be such that after MIN_MATCH steps, the oldest
+ * byte no longer takes part in the hash key, that is:
+ * hash_shift * MIN_MATCH >= hash_bits
+ */
+
+ long block_start;
+ /* Window position at the beginning of the current output block. Gets
+ * negative when the window is moved backwards.
+ */
+
+ uInt match_length; /* length of best match */
+ IPos prev_match; /* previous match */
+ int match_available; /* set if previous match exists */
+ uInt strstart; /* start of string to insert */
+ uInt match_start; /* start of matching string */
+ uInt lookahead; /* number of valid bytes ahead in window */
+
+ uInt prev_length;
+ /* Length of the best match at previous step. Matches not greater than this
+ * are discarded. This is used in the lazy match evaluation.
+ */
+
+ uInt max_chain_length;
+ /* To speed up deflation, hash chains are never searched beyond this
+ * length. A higher limit improves compression ratio but degrades the
+ * speed.
+ */
+
+ uInt max_lazy_match;
+ /* Attempt to find a better match only when the current match is strictly
+ * smaller than this value. This mechanism is used only for compression
+ * levels >= 4.
+ */
+# define max_insert_length max_lazy_match
+ /* Insert new strings in the hash table only if the match length is not
+ * greater than this length. This saves time but degrades compression.
+ * max_insert_length is used only for compression levels <= 3.
+ */
+
+ int level; /* compression level (1..9) */
+ int strategy; /* favor or force Huffman coding*/
+
+ uInt good_match;
+ /* Use a faster search when the previous match is longer than this */
+
+ int nice_match; /* Stop searching when current match exceeds this */
+
+ /* used by trees.c: */
+ /* Didn't use ct_data typedef below to supress compiler warning */
+ struct ct_data_s dyn_ltree[HEAP_SIZE]; /* literal and length tree */
+ struct ct_data_s dyn_dtree[2*D_CODES+1]; /* distance tree */
+ struct ct_data_s bl_tree[2*BL_CODES+1]; /* Huffman tree for bit lengths */
+
+ struct tree_desc_s l_desc; /* desc. for literal tree */
+ struct tree_desc_s d_desc; /* desc. for distance tree */
+ struct tree_desc_s bl_desc; /* desc. for bit length tree */
+
+ ush bl_count[MAX_BITS+1];
+ /* number of codes at each bit length for an optimal tree */
+
+ int heap[2*L_CODES+1]; /* heap used to build the Huffman trees */
+ int heap_len; /* number of elements in the heap */
+ int heap_max; /* element of largest frequency */
+ /* The sons of heap[n] are heap[2*n] and heap[2*n+1]. heap[0] is not used.
+ * The same heap array is used to build all trees.
+ */
+
+ uch depth[2*L_CODES+1];
+ /* Depth of each subtree used as tie breaker for trees of equal frequency
+ */
+
+ uchf *l_buf; /* buffer for literals or lengths */
+
+ uInt lit_bufsize;
+ /* Size of match buffer for literals/lengths. There are 4 reasons for
+ * limiting lit_bufsize to 64K:
+ * - frequencies can be kept in 16 bit counters
+ * - if compression is not successful for the first block, all input
+ * data is still in the window so we can still emit a stored block even
+ * when input comes from standard input. (This can also be done for
+ * all blocks if lit_bufsize is not greater than 32K.)
+ * - if compression is not successful for a file smaller than 64K, we can
+ * even emit a stored file instead of a stored block (saving 5 bytes).
+ * This is applicable only for zip (not gzip or zlib).
+ * - creating new Huffman trees less frequently may not provide fast
+ * adaptation to changes in the input data statistics. (Take for
+ * example a binary file with poorly compressible code followed by
+ * a highly compressible string table.) Smaller buffer sizes give
+ * fast adaptation but have of course the overhead of transmitting
+ * trees more frequently.
+ * - I can't count above 4
+ */
+
+ uInt last_lit; /* running index in l_buf */
+
+ ushf *d_buf;
+ /* Buffer for distances. To simplify the code, d_buf and l_buf have
+ * the same number of elements. To use different lengths, an extra flag
+ * array would be necessary.
+ */
+
+ ulg opt_len; /* bit length of current block with optimal trees */
+ ulg static_len; /* bit length of current block with static trees */
+ ulg compressed_len; /* total bit length of compressed file */
+ uInt matches; /* number of string matches in current block */
+ int last_eob_len; /* bit length of EOB code for last block */
+
+#ifdef DEBUG_ZLIB
+ ulg bits_sent; /* bit length of the compressed data */
+#endif
+
+ ush bi_buf;
+ /* Output buffer. bits are inserted starting at the bottom (least
+ * significant bits).
+ */
+ int bi_valid;
+ /* Number of valid bits in bi_buf. All bits above the last valid bit
+ * are always zero.
+ */
+
+} FAR deflate_state;
+
+/* Output a byte on the stream.
+ * IN assertion: there is enough room in pending_buf.
+ */
+#define put_byte(s, c) {s->pending_buf[s->pending++] = (c);}
+
+
+#define MIN_LOOKAHEAD (MAX_MATCH+MIN_MATCH+1)
+/* Minimum amount of lookahead, except at the end of the input file.
+ * See deflate.c for comments about the MIN_MATCH+1.
+ */
+
+#define MAX_DIST(s) ((s)->w_size-MIN_LOOKAHEAD)
+/* In order to simplify the code, particularly on 16 bit machines, match
+ * distances are limited to MAX_DIST instead of WSIZE.
+ */
+
+ /* in trees.c */
+void _tr_init OF((deflate_state *s));
+int _tr_tally OF((deflate_state *s, unsigned dist, unsigned lc));
+ulg _tr_flush_block OF((deflate_state *s, charf *buf, ulg stored_len,
+ int eof));
+void _tr_align OF((deflate_state *s));
+void _tr_stored_block OF((deflate_state *s, charf *buf, ulg stored_len,
+ int eof));
+void _tr_stored_type_only OF((deflate_state *));
+
+#endif
+/* --- deflate.h */
+
+/* +++ deflate.c */
+/* deflate.c -- compress data using the deflation algorithm
+ * Copyright (C) 1995-1996 Jean-loup Gailly.
+ * For conditions of distribution and use, see copyright notice in zlib.h
+ */
+
+/*
+ * ALGORITHM
+ *
+ * The "deflation" process depends on being able to identify portions
+ * of the input text which are identical to earlier input (within a
+ * sliding window trailing behind the input currently being processed).
+ *
+ * The most straightforward technique turns out to be the fastest for
+ * most input files: try all possible matches and select the longest.
+ * The key feature of this algorithm is that insertions into the string
+ * dictionary are very simple and thus fast, and deletions are avoided
+ * completely. Insertions are performed at each input character, whereas
+ * string matches are performed only when the previous match ends. So it
+ * is preferable to spend more time in matches to allow very fast string
+ * insertions and avoid deletions. The matching algorithm for small
+ * strings is inspired from that of Rabin & Karp. A brute force approach
+ * is used to find longer strings when a small match has been found.
+ * A similar algorithm is used in comic (by Jan-Mark Wams) and freeze
+ * (by Leonid Broukhis).
+ * A previous version of this file used a more sophisticated algorithm
+ * (by Fiala and Greene) which is guaranteed to run in linear amortized
+ * time, but has a larger average cost, uses more memory and is patented.
+ * However the F&G algorithm may be faster for some highly redundant
+ * files if the parameter max_chain_length (described below) is too large.
+ *
+ * ACKNOWLEDGEMENTS
+ *
+ * The idea of lazy evaluation of matches is due to Jan-Mark Wams, and
+ * I found it in 'freeze' written by Leonid Broukhis.
+ * Thanks to many people for bug reports and testing.
+ *
+ * REFERENCES
+ *
+ * Deutsch, L.P.,"DEFLATE Compressed Data Format Specification".
+ * Available in ftp://ds.internic.net/rfc/rfc1951.txt
+ *
+ * A description of the Rabin and Karp algorithm is given in the book
+ * "Algorithms" by R. Sedgewick, Addison-Wesley, p252.
+ *
+ * Fiala,E.R., and Greene,D.H.
+ * Data Compression with Finite Windows, Comm.ACM, 32,4 (1989) 490-595
+ *
+ */
+
+/* From: deflate.c,v 1.15 1996/07/24 13:40:58 me Exp $ */
+
+/* #include <rtems/freebsd/local/deflate.h> */
+
+char deflate_copyright[] = " deflate 1.0.4 Copyright 1995-1996 Jean-loup Gailly ";
+/*
+ If you use the zlib library in a product, an acknowledgment is welcome
+ in the documentation of your product. If for some reason you cannot
+ include such an acknowledgment, I would appreciate that you keep this
+ copyright string in the executable of your product.
+ */
+
+/* ===========================================================================
+ * Function prototypes.
+ */
+typedef enum {
+ need_more, /* block not completed, need more input or more output */
+ block_done, /* block flush performed */
+ finish_started, /* finish started, need only more output at next deflate */
+ finish_done /* finish done, accept no more input or output */
+} block_state;
+
+typedef block_state (*compress_func) OF((deflate_state *s, int flush));
+/* Compression function. Returns the block state after the call. */
+
+local void fill_window OF((deflate_state *s));
+local block_state deflate_stored OF((deflate_state *s, int flush));
+local block_state deflate_fast OF((deflate_state *s, int flush));
+local block_state deflate_slow OF((deflate_state *s, int flush));
+local void lm_init OF((deflate_state *s));
+local void putShortMSB OF((deflate_state *s, uInt b));
+local void flush_pending OF((z_streamp strm));
+local int read_buf OF((z_streamp strm, charf *buf, unsigned size));
+#ifdef ASMV
+ void match_init OF((void)); /* asm code initialization */
+ uInt longest_match OF((deflate_state *s, IPos cur_match));
+#else
+local uInt longest_match OF((deflate_state *s, IPos cur_match));
+#endif
+
+#ifdef DEBUG_ZLIB
+local void check_match OF((deflate_state *s, IPos start, IPos match,
+ int length));
+#endif
+
+/* ===========================================================================
+ * Local data
+ */
+
+#define NIL 0
+/* Tail of hash chains */
+
+#ifndef TOO_FAR
+# define TOO_FAR 4096
+#endif
+/* Matches of length 3 are discarded if their distance exceeds TOO_FAR */
+
+#define MIN_LOOKAHEAD (MAX_MATCH+MIN_MATCH+1)
+/* Minimum amount of lookahead, except at the end of the input file.
+ * See deflate.c for comments about the MIN_MATCH+1.
+ */
+
+/* Values for max_lazy_match, good_match and max_chain_length, depending on
+ * the desired pack level (0..9). The values given below have been tuned to
+ * exclude worst case performance for pathological files. Better values may be
+ * found for specific files.
+ */
+typedef struct config_s {
+ ush good_length; /* reduce lazy search above this match length */
+ ush max_lazy; /* do not perform lazy search above this match length */
+ ush nice_length; /* quit search above this match length */
+ ush max_chain;
+ compress_func func;
+} config;
+
+local config configuration_table[10] = {
+/* good lazy nice chain */
+/* 0 */ {0, 0, 0, 0, deflate_stored}, /* store only */
+/* 1 */ {4, 4, 8, 4, deflate_fast}, /* maximum speed, no lazy matches */
+/* 2 */ {4, 5, 16, 8, deflate_fast},
+/* 3 */ {4, 6, 32, 32, deflate_fast},
+
+/* 4 */ {4, 4, 16, 16, deflate_slow}, /* lazy matches */
+/* 5 */ {8, 16, 32, 32, deflate_slow},
+/* 6 */ {8, 16, 128, 128, deflate_slow},
+/* 7 */ {8, 32, 128, 256, deflate_slow},
+/* 8 */ {32, 128, 258, 1024, deflate_slow},
+/* 9 */ {32, 258, 258, 4096, deflate_slow}}; /* maximum compression */
+
+/* Note: the deflate() code requires max_lazy >= MIN_MATCH and max_chain >= 4
+ * For deflate_fast() (levels <= 3) good is ignored and lazy has a different
+ * meaning.
+ */
+
+#define EQUAL 0
+/* result of memcmp for equal strings */
+
+#ifndef NO_DUMMY_DECL
+struct static_tree_desc_s {int dummy;}; /* for buggy compilers */
+#endif
+
+/* ===========================================================================
+ * Update a hash value with the given input byte
+ * IN assertion: all calls to to UPDATE_HASH are made with consecutive
+ * input characters, so that a running hash key can be computed from the
+ * previous key instead of complete recalculation each time.
+ */
+#define UPDATE_HASH(s,h,c) (h = (((h)<<s->hash_shift) ^ (c)) & s->hash_mask)
+
+
+/* ===========================================================================
+ * Insert string str in the dictionary and set match_head to the previous head
+ * of the hash chain (the most recent string with same hash key). Return
+ * the previous length of the hash chain.
+ * IN assertion: all calls to to INSERT_STRING are made with consecutive
+ * input characters and the first MIN_MATCH bytes of str are valid
+ * (except for the last MIN_MATCH-1 bytes of the input file).
+ */
+#define INSERT_STRING(s, str, match_head) \
+ (UPDATE_HASH(s, s->ins_h, s->window[(str) + (MIN_MATCH-1)]), \
+ s->prev[(str) & s->w_mask] = match_head = s->head[s->ins_h], \
+ s->head[s->ins_h] = (Pos)(str))
+
+/* ===========================================================================
+ * Initialize the hash table (avoiding 64K overflow for 16 bit systems).
+ * prev[] will be initialized on the fly.
+ */
+#define CLEAR_HASH(s) \
+ s->head[s->hash_size-1] = NIL; \
+ zmemzero((charf *)s->head, (unsigned)(s->hash_size-1)*sizeof(*s->head));
+
+/* ========================================================================= */
+int deflateInit_(strm, level, version, stream_size)
+ z_streamp strm;
+ int level;
+ const char *version;
+ int stream_size;
+{
+ return deflateInit2_(strm, level, Z_DEFLATED, MAX_WBITS, DEF_MEM_LEVEL,
+ Z_DEFAULT_STRATEGY, version, stream_size);
+ /* To do: ignore strm->next_in if we use it as window */
+}
+
+/* ========================================================================= */
+int deflateInit2_(strm, level, method, windowBits, memLevel, strategy,
+ version, stream_size)
+ z_streamp strm;
+ int level;
+ int method;
+ int windowBits;
+ int memLevel;
+ int strategy;
+ const char *version;
+ int stream_size;
+{
+ deflate_state *s;
+ int noheader = 0;
+ static char* my_version = ZLIB_VERSION;
+
+ ushf *overlay;
+ /* We overlay pending_buf and d_buf+l_buf. This works since the average
+ * output size for (length,distance) codes is <= 24 bits.
+ */
+
+ if (version == Z_NULL || version[0] != my_version[0] ||
+ stream_size != sizeof(z_stream)) {
+ return Z_VERSION_ERROR;
+ }
+ if (strm == Z_NULL) return Z_STREAM_ERROR;
+
+ strm->msg = Z_NULL;
+#ifndef NO_ZCFUNCS
+ if (strm->zalloc == Z_NULL) {
+ strm->zalloc = zcalloc;
+ strm->opaque = (voidpf)0;
+ }
+ if (strm->zfree == Z_NULL) strm->zfree = zcfree;
+#endif
+
+ if (level == Z_DEFAULT_COMPRESSION) level = 6;
+
+ if (windowBits < 0) { /* undocumented feature: suppress zlib header */
+ noheader = 1;
+ windowBits = -windowBits;
+ }
+ if (memLevel < 1 || memLevel > MAX_MEM_LEVEL || method != Z_DEFLATED ||
+ windowBits < 9 || windowBits > 15 || level < 0 || level > 9 ||
+ strategy < 0 || strategy > Z_HUFFMAN_ONLY) {
+ return Z_STREAM_ERROR;
+ }
+ s = (deflate_state *) ZALLOC(strm, 1, sizeof(deflate_state));
+ if (s == Z_NULL) return Z_MEM_ERROR;
+ strm->state = (struct internal_state FAR *)s;
+ s->strm = strm;
+
+ s->noheader = noheader;
+ s->w_bits = windowBits;
+ s->w_size = 1 << s->w_bits;
+ s->w_mask = s->w_size - 1;
+
+ s->hash_bits = memLevel + 7;
+ s->hash_size = 1 << s->hash_bits;
+ s->hash_mask = s->hash_size - 1;
+ s->hash_shift = ((s->hash_bits+MIN_MATCH-1)/MIN_MATCH);
+
+ s->window = (Bytef *) ZALLOC(strm, s->w_size, 2*sizeof(Byte));
+ s->prev = (Posf *) ZALLOC(strm, s->w_size, sizeof(Pos));
+ s->head = (Posf *) ZALLOC(strm, s->hash_size, sizeof(Pos));
+
+ s->lit_bufsize = 1 << (memLevel + 6); /* 16K elements by default */
+
+ overlay = (ushf *) ZALLOC(strm, s->lit_bufsize, sizeof(ush)+2);
+ s->pending_buf = (uchf *) overlay;
+ s->pending_buf_size = (ulg)s->lit_bufsize * (sizeof(ush)+2L);
+
+ if (s->window == Z_NULL || s->prev == Z_NULL || s->head == Z_NULL ||
+ s->pending_buf == Z_NULL) {
+ strm->msg = (const char*)ERR_MSG(Z_MEM_ERROR);
+ deflateEnd (strm);
+ return Z_MEM_ERROR;
+ }
+ s->d_buf = overlay + s->lit_bufsize/sizeof(ush);
+ s->l_buf = s->pending_buf + (1+sizeof(ush))*s->lit_bufsize;
+
+ s->level = level;
+ s->strategy = strategy;
+ s->method = (Byte)method;
+
+ return deflateReset(strm);
+}
+
+/* ========================================================================= */
+int deflateSetDictionary (strm, dictionary, dictLength)
+ z_streamp strm;
+ const Bytef *dictionary;
+ uInt dictLength;
+{
+ deflate_state *s;
+ uInt length = dictLength;
+ uInt n;
+ IPos hash_head = 0;
+
+ if (strm == Z_NULL || strm->state == Z_NULL || dictionary == Z_NULL)
+ return Z_STREAM_ERROR;
+
+ s = (deflate_state *) strm->state;
+ if (s->status != INIT_STATE) return Z_STREAM_ERROR;
+
+ strm->adler = adler32(strm->adler, dictionary, dictLength);
+
+ if (length < MIN_MATCH) return Z_OK;
+ if (length > MAX_DIST(s)) {
+ length = MAX_DIST(s);
+#ifndef USE_DICT_HEAD
+ dictionary += dictLength - length; /* use the tail of the dictionary */
+#endif
+ }
+ zmemcpy((charf *)s->window, dictionary, length);
+ s->strstart = length;
+ s->block_start = (long)length;
+
+ /* Insert all strings in the hash table (except for the last two bytes).
+ * s->lookahead stays null, so s->ins_h will be recomputed at the next
+ * call of fill_window.
+ */
+ s->ins_h = s->window[0];
+ UPDATE_HASH(s, s->ins_h, s->window[1]);
+ for (n = 0; n <= length - MIN_MATCH; n++) {
+ INSERT_STRING(s, n, hash_head);
+ }
+ if (hash_head) hash_head = 0; /* to make compiler happy */
+ return Z_OK;
+}
+
+/* ========================================================================= */
+int deflateReset (strm)
+ z_streamp strm;
+{
+ deflate_state *s;
+
+ if (strm == Z_NULL || strm->state == Z_NULL ||
+ strm->zalloc == Z_NULL || strm->zfree == Z_NULL) return Z_STREAM_ERROR;
+
+ strm->total_in = strm->total_out = 0;
+ strm->msg = Z_NULL; /* use zfree if we ever allocate msg dynamically */
+ strm->data_type = Z_UNKNOWN;
+
+ s = (deflate_state *)strm->state;
+ s->pending = 0;
+ s->pending_out = s->pending_buf;
+
+ if (s->noheader < 0) {
+ s->noheader = 0; /* was set to -1 by deflate(..., Z_FINISH); */
+ }
+ s->status = s->noheader ? BUSY_STATE : INIT_STATE;
+ strm->adler = 1;
+ s->last_flush = Z_NO_FLUSH;
+
+ _tr_init(s);
+ lm_init(s);
+
+ return Z_OK;
+}
+
+/* ========================================================================= */
+int deflateParams(strm, level, strategy)
+ z_streamp strm;
+ int level;
+ int strategy;
+{
+ deflate_state *s;
+ compress_func func;
+ int err = Z_OK;
+
+ if (strm == Z_NULL || strm->state == Z_NULL) return Z_STREAM_ERROR;
+ s = (deflate_state *) strm->state;
+
+ if (level == Z_DEFAULT_COMPRESSION) {
+ level = 6;
+ }
+ if (level < 0 || level > 9 || strategy < 0 || strategy > Z_HUFFMAN_ONLY) {
+ return Z_STREAM_ERROR;
+ }
+ func = configuration_table[s->level].func;
+
+ if (func != configuration_table[level].func && strm->total_in != 0) {
+ /* Flush the last buffer: */
+ err = deflate(strm, Z_PARTIAL_FLUSH);
+ }
+ if (s->level != level) {
+ s->level = level;
+ s->max_lazy_match = configuration_table[level].max_lazy;
+ s->good_match = configuration_table[level].good_length;
+ s->nice_match = configuration_table[level].nice_length;
+ s->max_chain_length = configuration_table[level].max_chain;
+ }
+ s->strategy = strategy;
+ return err;
+}
+
+/* =========================================================================
+ * Put a short in the pending buffer. The 16-bit value is put in MSB order.
+ * IN assertion: the stream state is correct and there is enough room in
+ * pending_buf.
+ */
+local void putShortMSB (s, b)
+ deflate_state *s;
+ uInt b;
+{
+ put_byte(s, (Byte)(b >> 8));
+ put_byte(s, (Byte)(b & 0xff));
+}
+
+/* =========================================================================
+ * Flush as much pending output as possible. All deflate() output goes
+ * through this function so some applications may wish to modify it
+ * to avoid allocating a large strm->next_out buffer and copying into it.
+ * (See also read_buf()).
+ */
+local void flush_pending(strm)
+ z_streamp strm;
+{
+ deflate_state *s = (deflate_state *) strm->state;
+ unsigned len = s->pending;
+
+ if (len > strm->avail_out) len = strm->avail_out;
+ if (len == 0) return;
+
+ if (strm->next_out != Z_NULL) {
+ zmemcpy(strm->next_out, s->pending_out, len);
+ strm->next_out += len;
+ }
+ s->pending_out += len;
+ strm->total_out += len;
+ strm->avail_out -= len;
+ s->pending -= len;
+ if (s->pending == 0) {
+ s->pending_out = s->pending_buf;
+ }
+}
+
+/* ========================================================================= */
+int deflate (strm, flush)
+ z_streamp strm;
+ int flush;
+{
+ int old_flush; /* value of flush param for previous deflate call */
+ deflate_state *s;
+
+ if (strm == Z_NULL || strm->state == Z_NULL ||
+ flush > Z_FINISH || flush < 0) {
+ return Z_STREAM_ERROR;
+ }
+ s = (deflate_state *) strm->state;
+
+ if ((strm->next_in == Z_NULL && strm->avail_in != 0) ||
+ (s->status == FINISH_STATE && flush != Z_FINISH)) {
+ ERR_RETURN(strm, Z_STREAM_ERROR);
+ }
+ if (strm->avail_out == 0) ERR_RETURN(strm, Z_BUF_ERROR);
+
+ s->strm = strm; /* just in case */
+ old_flush = s->last_flush;
+ s->last_flush = flush;
+
+ /* Write the zlib header */
+ if (s->status == INIT_STATE) {
+
+ uInt header = (Z_DEFLATED + ((s->w_bits-8)<<4)) << 8;
+ uInt level_flags = (s->level-1) >> 1;
+
+ if (level_flags > 3) level_flags = 3;
+ header |= (level_flags << 6);
+ if (s->strstart != 0) header |= PRESET_DICT;
+ header += 31 - (header % 31);
+
+ s->status = BUSY_STATE;
+ putShortMSB(s, header);
+
+ /* Save the adler32 of the preset dictionary: */
+ if (s->strstart != 0) {
+ putShortMSB(s, (uInt)(strm->adler >> 16));
+ putShortMSB(s, (uInt)(strm->adler & 0xffff));
+ }
+ strm->adler = 1L;
+ }
+
+ /* Flush as much pending output as possible */
+ if (s->pending != 0) {
+ flush_pending(strm);
+ if (strm->avail_out == 0) {
+ /* Since avail_out is 0, deflate will be called again with
+ * more output space, but possibly with both pending and
+ * avail_in equal to zero. There won't be anything to do,
+ * but this is not an error situation so make sure we
+ * return OK instead of BUF_ERROR at next call of deflate:
+ */
+ s->last_flush = -1;
+ return Z_OK;
+ }
+
+ /* Make sure there is something to do and avoid duplicate consecutive
+ * flushes. For repeated and useless calls with Z_FINISH, we keep
+ * returning Z_STREAM_END instead of Z_BUFF_ERROR.
+ */
+ } else if (strm->avail_in == 0 && flush <= old_flush &&
+ flush != Z_FINISH) {
+ ERR_RETURN(strm, Z_BUF_ERROR);
+ }
+
+ /* User must not provide more input after the first FINISH: */
+ if (s->status == FINISH_STATE && strm->avail_in != 0) {
+ ERR_RETURN(strm, Z_BUF_ERROR);
+ }
+
+ /* Start a new block or continue the current one.
+ */
+ if (strm->avail_in != 0 || s->lookahead != 0 ||
+ (flush != Z_NO_FLUSH && s->status != FINISH_STATE)) {
+ block_state bstate;
+
+ bstate = (*(configuration_table[s->level].func))(s, flush);
+
+ if (bstate == finish_started || bstate == finish_done) {
+ s->status = FINISH_STATE;
+ }
+ if (bstate == need_more || bstate == finish_started) {
+ if (strm->avail_out == 0) {
+ s->last_flush = -1; /* avoid BUF_ERROR next call, see above */
+ }
+ return Z_OK;
+ /* If flush != Z_NO_FLUSH && avail_out == 0, the next call
+ * of deflate should use the same flush parameter to make sure
+ * that the flush is complete. So we don't have to output an
+ * empty block here, this will be done at next call. This also
+ * ensures that for a very small output buffer, we emit at most
+ * one empty block.
+ */
+ }
+ if (bstate == block_done) {
+ if (flush == Z_PARTIAL_FLUSH) {
+ _tr_align(s);
+ } else if (flush == Z_PACKET_FLUSH) {
+ /* Output just the 3-bit `stored' block type value,
+ but not a zero length. */
+ _tr_stored_type_only(s);
+ } else { /* FULL_FLUSH or SYNC_FLUSH */
+ _tr_stored_block(s, (char*)0, 0L, 0);
+ /* For a full flush, this empty block will be recognized
+ * as a special marker by inflate_sync().
+ */
+ if (flush == Z_FULL_FLUSH) {
+ CLEAR_HASH(s); /* forget history */
+ }
+ }
+ flush_pending(strm);
+ if (strm->avail_out == 0) {
+ s->last_flush = -1; /* avoid BUF_ERROR at next call, see above */
+ return Z_OK;
+ }
+ }
+ }
+ Assert(strm->avail_out > 0, "bug2");
+
+ if (flush != Z_FINISH) return Z_OK;
+ if (s->noheader) return Z_STREAM_END;
+
+ /* Write the zlib trailer (adler32) */
+ putShortMSB(s, (uInt)(strm->adler >> 16));
+ putShortMSB(s, (uInt)(strm->adler & 0xffff));
+ flush_pending(strm);
+ /* If avail_out is zero, the application will call deflate again
+ * to flush the rest.
+ */
+ s->noheader = -1; /* write the trailer only once! */
+ return s->pending != 0 ? Z_OK : Z_STREAM_END;
+}
+
+/* ========================================================================= */
+int deflateEnd (strm)
+ z_streamp strm;
+{
+ int status;
+ deflate_state *s;
+
+ if (strm == Z_NULL || strm->state == Z_NULL) return Z_STREAM_ERROR;
+ s = (deflate_state *) strm->state;
+
+ status = s->status;
+ if (status != INIT_STATE && status != BUSY_STATE &&
+ status != FINISH_STATE) {
+ return Z_STREAM_ERROR;
+ }
+
+ /* Deallocate in reverse order of allocations: */
+ TRY_FREE(strm, s->pending_buf);
+ TRY_FREE(strm, s->head);
+ TRY_FREE(strm, s->prev);
+ TRY_FREE(strm, s->window);
+
+ ZFREE(strm, s);
+ strm->state = Z_NULL;
+
+ return status == BUSY_STATE ? Z_DATA_ERROR : Z_OK;
+}
+
+/* =========================================================================
+ * Copy the source state to the destination state.
+ */
+int deflateCopy (dest, source)
+ z_streamp dest;
+ z_streamp source;
+{
+ deflate_state *ds;
+ deflate_state *ss;
+ ushf *overlay;
+
+ if (source == Z_NULL || dest == Z_NULL || source->state == Z_NULL)
+ return Z_STREAM_ERROR;
+ ss = (deflate_state *) source->state;
+
+ zmemcpy(dest, source, sizeof(*dest));
+
+ ds = (deflate_state *) ZALLOC(dest, 1, sizeof(deflate_state));
+ if (ds == Z_NULL) return Z_MEM_ERROR;
+ dest->state = (struct internal_state FAR *) ds;
+ zmemcpy(ds, ss, sizeof(*ds));
+ ds->strm = dest;
+
+ ds->window = (Bytef *) ZALLOC(dest, ds->w_size, 2*sizeof(Byte));
+ ds->prev = (Posf *) ZALLOC(dest, ds->w_size, sizeof(Pos));
+ ds->head = (Posf *) ZALLOC(dest, ds->hash_size, sizeof(Pos));
+ overlay = (ushf *) ZALLOC(dest, ds->lit_bufsize, sizeof(ush)+2);
+ ds->pending_buf = (uchf *) overlay;
+
+ if (ds->window == Z_NULL || ds->prev == Z_NULL || ds->head == Z_NULL ||
+ ds->pending_buf == Z_NULL) {
+ deflateEnd (dest);
+ return Z_MEM_ERROR;
+ }
+ /* ??? following zmemcpy doesn't work for 16-bit MSDOS */
+ zmemcpy(ds->window, ss->window, ds->w_size * 2 * sizeof(Byte));
+ zmemcpy(ds->prev, ss->prev, ds->w_size * sizeof(Pos));
+ zmemcpy(ds->head, ss->head, ds->hash_size * sizeof(Pos));
+ zmemcpy(ds->pending_buf, ss->pending_buf, (uInt)ds->pending_buf_size);
+
+ ds->pending_out = ds->pending_buf + (ss->pending_out - ss->pending_buf);
+ ds->d_buf = overlay + ds->lit_bufsize/sizeof(ush);
+ ds->l_buf = ds->pending_buf + (1+sizeof(ush))*ds->lit_bufsize;
+
+ ds->l_desc.dyn_tree = ds->dyn_ltree;
+ ds->d_desc.dyn_tree = ds->dyn_dtree;
+ ds->bl_desc.dyn_tree = ds->bl_tree;
+
+ return Z_OK;
+}
+
+/* ===========================================================================
+ * Return the number of bytes of output which are immediately available
+ * for output from the decompressor.
+ */
+int deflateOutputPending (strm)
+ z_streamp strm;
+{
+ if (strm == Z_NULL || strm->state == Z_NULL) return 0;
+
+ return ((deflate_state *)(strm->state))->pending;
+}
+
+/* ===========================================================================
+ * Read a new buffer from the current input stream, update the adler32
+ * and total number of bytes read. All deflate() input goes through
+ * this function so some applications may wish to modify it to avoid
+ * allocating a large strm->next_in buffer and copying from it.
+ * (See also flush_pending()).
+ */
+local int read_buf(strm, buf, size)
+ z_streamp strm;
+ charf *buf;
+ unsigned size;
+{
+ unsigned len = strm->avail_in;
+
+ if (len > size) len = size;
+ if (len == 0) return 0;
+
+ strm->avail_in -= len;
+
+ if (!((deflate_state *)(strm->state))->noheader) {
+ strm->adler = adler32(strm->adler, strm->next_in, len);
+ }
+ zmemcpy(buf, strm->next_in, len);
+ strm->next_in += len;
+ strm->total_in += len;
+
+ return (int)len;
+}
+
+/* ===========================================================================
+ * Initialize the "longest match" routines for a new zlib stream
+ */
+local void lm_init (s)
+ deflate_state *s;
+{
+ s->window_size = (ulg)2L*s->w_size;
+
+ CLEAR_HASH(s);
+
+ /* Set the default configuration parameters:
+ */
+ s->max_lazy_match = configuration_table[s->level].max_lazy;
+ s->good_match = configuration_table[s->level].good_length;
+ s->nice_match = configuration_table[s->level].nice_length;
+ s->max_chain_length = configuration_table[s->level].max_chain;
+
+ s->strstart = 0;
+ s->block_start = 0L;
+ s->lookahead = 0;
+ s->match_length = s->prev_length = MIN_MATCH-1;
+ s->match_available = 0;
+ s->ins_h = 0;
+#ifdef ASMV
+ match_init(); /* initialize the asm code */
+#endif
+}
+
+/* ===========================================================================
+ * Set match_start to the longest match starting at the given string and
+ * return its length. Matches shorter or equal to prev_length are discarded,
+ * in which case the result is equal to prev_length and match_start is
+ * garbage.
+ * IN assertions: cur_match is the head of the hash chain for the current
+ * string (strstart) and its distance is <= MAX_DIST, and prev_length >= 1
+ * OUT assertion: the match length is not greater than s->lookahead.
+ */
+#ifndef ASMV
+/* For 80x86 and 680x0, an optimized version will be provided in match.asm or
+ * match.S. The code will be functionally equivalent.
+ */
+local uInt longest_match(s, cur_match)
+ deflate_state *s;
+ IPos cur_match; /* current match */
+{
+ unsigned chain_length = s->max_chain_length;/* max hash chain length */
+ register Bytef *scan = s->window + s->strstart; /* current string */
+ register Bytef *match; /* matched string */
+ register int len; /* length of current match */
+ int best_len = s->prev_length; /* best match length so far */
+ int nice_match = s->nice_match; /* stop if match long enough */
+ IPos limit = s->strstart > (IPos)MAX_DIST(s) ?
+ s->strstart - (IPos)MAX_DIST(s) : NIL;
+ /* Stop when cur_match becomes <= limit. To simplify the code,
+ * we prevent matches with the string of window index 0.
+ */
+ Posf *prev = s->prev;
+ uInt wmask = s->w_mask;
+
+#ifdef UNALIGNED_OK
+ /* Compare two bytes at a time. Note: this is not always beneficial.
+ * Try with and without -DUNALIGNED_OK to check.
+ */
+ register Bytef *strend = s->window + s->strstart + MAX_MATCH - 1;
+ register ush scan_start = *(ushf*)scan;
+ register ush scan_end = *(ushf*)(scan+best_len-1);
+#else
+ register Bytef *strend = s->window + s->strstart + MAX_MATCH;
+ register Byte scan_end1 = scan[best_len-1];
+ register Byte scan_end = scan[best_len];
+#endif
+
+ /* The code is optimized for HASH_BITS >= 8 and MAX_MATCH-2 multiple of 16.
+ * It is easy to get rid of this optimization if necessary.
+ */
+ Assert(s->hash_bits >= 8 && MAX_MATCH == 258, "Code too clever");
+
+ /* Do not waste too much time if we already have a good match: */
+ if (s->prev_length >= s->good_match) {
+ chain_length >>= 2;
+ }
+ /* Do not look for matches beyond the end of the input. This is necessary
+ * to make deflate deterministic.
+ */
+ if ((uInt)nice_match > s->lookahead) nice_match = s->lookahead;
+
+ Assert((ulg)s->strstart <= s->window_size-MIN_LOOKAHEAD, "need lookahead");
+
+ do {
+ Assert(cur_match < s->strstart, "no future");
+ match = s->window + cur_match;
+
+ /* Skip to next match if the match length cannot increase
+ * or if the match length is less than 2:
+ */
+#if (defined(UNALIGNED_OK) && MAX_MATCH == 258)
+ /* This code assumes sizeof(unsigned short) == 2. Do not use
+ * UNALIGNED_OK if your compiler uses a different size.
+ */
+ if (*(ushf*)(match+best_len-1) != scan_end ||
+ *(ushf*)match != scan_start) continue;
+
+ /* It is not necessary to compare scan[2] and match[2] since they are
+ * always equal when the other bytes match, given that the hash keys
+ * are equal and that HASH_BITS >= 8. Compare 2 bytes at a time at
+ * strstart+3, +5, ... up to strstart+257. We check for insufficient
+ * lookahead only every 4th comparison; the 128th check will be made
+ * at strstart+257. If MAX_MATCH-2 is not a multiple of 8, it is
+ * necessary to put more guard bytes at the end of the window, or
+ * to check more often for insufficient lookahead.
+ */
+ Assert(scan[2] == match[2], "scan[2]?");
+ scan++, match++;
+ do {
+ } while (*(ushf*)(scan+=2) == *(ushf*)(match+=2) &&
+ *(ushf*)(scan+=2) == *(ushf*)(match+=2) &&
+ *(ushf*)(scan+=2) == *(ushf*)(match+=2) &&
+ *(ushf*)(scan+=2) == *(ushf*)(match+=2) &&
+ scan < strend);
+ /* The funny "do {}" generates better code on most compilers */
+
+ /* Here, scan <= window+strstart+257 */
+ Assert(scan <= s->window+(unsigned)(s->window_size-1), "wild scan");
+ if (*scan == *match) scan++;
+
+ len = (MAX_MATCH - 1) - (int)(strend-scan);
+ scan = strend - (MAX_MATCH-1);
+
+#else /* UNALIGNED_OK */
+
+ if (match[best_len] != scan_end ||
+ match[best_len-1] != scan_end1 ||
+ *match != *scan ||
+ *++match != scan[1]) continue;
+
+ /* The check at best_len-1 can be removed because it will be made
+ * again later. (This heuristic is not always a win.)
+ * It is not necessary to compare scan[2] and match[2] since they
+ * are always equal when the other bytes match, given that
+ * the hash keys are equal and that HASH_BITS >= 8.
+ */
+ scan += 2, match++;
+ Assert(*scan == *match, "match[2]?");
+
+ /* We check for insufficient lookahead only every 8th comparison;
+ * the 256th check will be made at strstart+258.
+ */
+ do {
+ } while (*++scan == *++match && *++scan == *++match &&
+ *++scan == *++match && *++scan == *++match &&
+ *++scan == *++match && *++scan == *++match &&
+ *++scan == *++match && *++scan == *++match &&
+ scan < strend);
+
+ Assert(scan <= s->window+(unsigned)(s->window_size-1), "wild scan");
+
+ len = MAX_MATCH - (int)(strend - scan);
+ scan = strend - MAX_MATCH;
+
+#endif /* UNALIGNED_OK */
+
+ if (len > best_len) {
+ s->match_start = cur_match;
+ best_len = len;
+ if (len >= nice_match) break;
+#ifdef UNALIGNED_OK
+ scan_end = *(ushf*)(scan+best_len-1);
+#else
+ scan_end1 = scan[best_len-1];
+ scan_end = scan[best_len];
+#endif
+ }
+ } while ((cur_match = prev[cur_match & wmask]) > limit
+ && --chain_length != 0);
+
+ if ((uInt)best_len <= s->lookahead) return best_len;
+ return s->lookahead;
+}
+#endif /* ASMV */
+
+#ifdef DEBUG_ZLIB
+/* ===========================================================================
+ * Check that the match at match_start is indeed a match.
+ */
+local void check_match(s, start, match, length)
+ deflate_state *s;
+ IPos start, match;
+ int length;
+{
+ /* check that the match is indeed a match */
+ if (zmemcmp((charf *)s->window + match,
+ (charf *)s->window + start, length) != EQUAL) {
+ fprintf(stderr, " start %u, match %u, length %d\n",
+ start, match, length);
+ do {
+ fprintf(stderr, "%c%c", s->window[match++], s->window[start++]);
+ } while (--length != 0);
+ z_error("invalid match");
+ }
+ if (z_verbose > 1) {
+ fprintf(stderr,"\\[%d,%d]", start-match, length);
+ do { putc(s->window[start++], stderr); } while (--length != 0);
+ }
+}
+#else
+# define check_match(s, start, match, length)
+#endif
+
+/* ===========================================================================
+ * Fill the window when the lookahead becomes insufficient.
+ * Updates strstart and lookahead.
+ *
+ * IN assertion: lookahead < MIN_LOOKAHEAD
+ * OUT assertions: strstart <= window_size-MIN_LOOKAHEAD
+ * At least one byte has been read, or avail_in == 0; reads are
+ * performed for at least two bytes (required for the zip translate_eol
+ * option -- not supported here).
+ */
+local void fill_window(s)
+ deflate_state *s;
+{
+ register unsigned n, m;
+ register Posf *p;
+ unsigned more; /* Amount of free space at the end of the window. */
+ uInt wsize = s->w_size;
+
+ do {
+ more = (unsigned)(s->window_size -(ulg)s->lookahead -(ulg)s->strstart);
+
+ /* Deal with !@#$% 64K limit: */
+ if (more == 0 && s->strstart == 0 && s->lookahead == 0) {
+ more = wsize;
+
+ } else if (more == (unsigned)(-1)) {
+ /* Very unlikely, but possible on 16 bit machine if strstart == 0
+ * and lookahead == 1 (input done one byte at time)
+ */
+ more--;
+
+ /* If the window is almost full and there is insufficient lookahead,
+ * move the upper half to the lower one to make room in the upper half.
+ */
+ } else if (s->strstart >= wsize+MAX_DIST(s)) {
+
+ zmemcpy((charf *)s->window, (charf *)s->window+wsize,
+ (unsigned)wsize);
+ s->match_start -= wsize;
+ s->strstart -= wsize; /* we now have strstart >= MAX_DIST */
+ s->block_start -= (long) wsize;
+
+ /* Slide the hash table (could be avoided with 32 bit values
+ at the expense of memory usage). We slide even when level == 0
+ to keep the hash table consistent if we switch back to level > 0
+ later. (Using level 0 permanently is not an optimal usage of
+ zlib, so we don't care about this pathological case.)
+ */
+ n = s->hash_size;
+ p = &s->head[n];
+ do {
+ m = *--p;
+ *p = (Pos)(m >= wsize ? m-wsize : NIL);
+ } while (--n);
+
+ n = wsize;
+ p = &s->prev[n];
+ do {
+ m = *--p;
+ *p = (Pos)(m >= wsize ? m-wsize : NIL);
+ /* If n is not on any hash chain, prev[n] is garbage but
+ * its value will never be used.
+ */
+ } while (--n);
+ more += wsize;
+ }
+ if (s->strm->avail_in == 0) return;
+
+ /* If there was no sliding:
+ * strstart <= WSIZE+MAX_DIST-1 && lookahead <= MIN_LOOKAHEAD - 1 &&
+ * more == window_size - lookahead - strstart
+ * => more >= window_size - (MIN_LOOKAHEAD-1 + WSIZE + MAX_DIST-1)
+ * => more >= window_size - 2*WSIZE + 2
+ * In the BIG_MEM or MMAP case (not yet supported),
+ * window_size == input_size + MIN_LOOKAHEAD &&
+ * strstart + s->lookahead <= input_size => more >= MIN_LOOKAHEAD.
+ * Otherwise, window_size == 2*WSIZE so more >= 2.
+ * If there was sliding, more >= WSIZE. So in all cases, more >= 2.
+ */
+ Assert(more >= 2, "more < 2");
+
+ n = read_buf(s->strm, (charf *)s->window + s->strstart + s->lookahead,
+ more);
+ s->lookahead += n;
+
+ /* Initialize the hash value now that we have some input: */
+ if (s->lookahead >= MIN_MATCH) {
+ s->ins_h = s->window[s->strstart];
+ UPDATE_HASH(s, s->ins_h, s->window[s->strstart+1]);
+#if MIN_MATCH != 3
+ Call UPDATE_HASH() MIN_MATCH-3 more times
+#endif
+ }
+ /* If the whole input has less than MIN_MATCH bytes, ins_h is garbage,
+ * but this is not important since only literal bytes will be emitted.
+ */
+
+ } while (s->lookahead < MIN_LOOKAHEAD && s->strm->avail_in != 0);
+}
+
+/* ===========================================================================
+ * Flush the current block, with given end-of-file flag.
+ * IN assertion: strstart is set to the end of the current match.
+ */
+#define FLUSH_BLOCK_ONLY(s, eof) { \
+ _tr_flush_block(s, (s->block_start >= 0L ? \
+ (charf *)&s->window[(unsigned)s->block_start] : \
+ (charf *)Z_NULL), \
+ (ulg)((long)s->strstart - s->block_start), \
+ (eof)); \
+ s->block_start = s->strstart; \
+ flush_pending(s->strm); \
+ Tracev((stderr,"[FLUSH]")); \
+}
+
+/* Same but force premature exit if necessary. */
+#define FLUSH_BLOCK(s, eof) { \
+ FLUSH_BLOCK_ONLY(s, eof); \
+ if (s->strm->avail_out == 0) return (eof) ? finish_started : need_more; \
+}
+
+/* ===========================================================================
+ * Copy without compression as much as possible from the input stream, return
+ * the current block state.
+ * This function does not insert new strings in the dictionary since
+ * uncompressible data is probably not useful. This function is used
+ * only for the level=0 compression option.
+ * NOTE: this function should be optimized to avoid extra copying from
+ * window to pending_buf.
+ */
+local block_state deflate_stored(s, flush)
+ deflate_state *s;
+ int flush;
+{
+ /* Stored blocks are limited to 0xffff bytes, pending_buf is limited
+ * to pending_buf_size, and each stored block has a 5 byte header:
+ */
+ ulg max_block_size = 0xffff;
+ ulg max_start;
+
+ if (max_block_size > s->pending_buf_size - 5) {
+ max_block_size = s->pending_buf_size - 5;
+ }
+
+ /* Copy as much as possible from input to output: */
+ for (;;) {
+ /* Fill the window as much as possible: */
+ if (s->lookahead <= 1) {
+
+ Assert(s->strstart < s->w_size+MAX_DIST(s) ||
+ s->block_start >= (long)s->w_size, "slide too late");
+
+ fill_window(s);
+ if (s->lookahead == 0 && flush == Z_NO_FLUSH) return need_more;
+
+ if (s->lookahead == 0) break; /* flush the current block */
+ }
+ Assert(s->block_start >= 0L, "block gone");
+
+ s->strstart += s->lookahead;
+ s->lookahead = 0;
+
+ /* Emit a stored block if pending_buf will be full: */
+ max_start = s->block_start + max_block_size;
+ if (s->strstart == 0 || (ulg)s->strstart >= max_start) {
+ /* strstart == 0 is possible when wraparound on 16-bit machine */
+ s->lookahead = (uInt)(s->strstart - max_start);
+ s->strstart = (uInt)max_start;
+ FLUSH_BLOCK(s, 0);
+ }
+ /* Flush if we may have to slide, otherwise block_start may become
+ * negative and the data will be gone:
+ */
+ if (s->strstart - (uInt)s->block_start >= MAX_DIST(s)) {
+ FLUSH_BLOCK(s, 0);
+ }
+ }
+ FLUSH_BLOCK(s, flush == Z_FINISH);
+ return flush == Z_FINISH ? finish_done : block_done;
+}
+
+/* ===========================================================================
+ * Compress as much as possible from the input stream, return the current
+ * block state.
+ * This function does not perform lazy evaluation of matches and inserts
+ * new strings in the dictionary only for unmatched strings or for short
+ * matches. It is used only for the fast compression options.
+ */
+local block_state deflate_fast(s, flush)
+ deflate_state *s;
+ int flush;
+{
+ IPos hash_head = NIL; /* head of the hash chain */
+ int bflush; /* set if current block must be flushed */
+
+ for (;;) {
+ /* Make sure that we always have enough lookahead, except
+ * at the end of the input file. We need MAX_MATCH bytes
+ * for the next match, plus MIN_MATCH bytes to insert the
+ * string following the next match.
+ */
+ if (s->lookahead < MIN_LOOKAHEAD) {
+ fill_window(s);
+ if (s->lookahead < MIN_LOOKAHEAD && flush == Z_NO_FLUSH) {
+ return need_more;
+ }
+ if (s->lookahead == 0) break; /* flush the current block */
+ }
+
+ /* Insert the string window[strstart .. strstart+2] in the
+ * dictionary, and set hash_head to the head of the hash chain:
+ */
+ if (s->lookahead >= MIN_MATCH) {
+ INSERT_STRING(s, s->strstart, hash_head);
+ }
+
+ /* Find the longest match, discarding those <= prev_length.
+ * At this point we have always match_length < MIN_MATCH
+ */
+ if (hash_head != NIL && s->strstart - hash_head <= MAX_DIST(s)) {
+ /* To simplify the code, we prevent matches with the string
+ * of window index 0 (in particular we have to avoid a match
+ * of the string with itself at the start of the input file).
+ */
+ if (s->strategy != Z_HUFFMAN_ONLY) {
+ s->match_length = longest_match (s, hash_head);
+ }
+ /* longest_match() sets match_start */
+ }
+ if (s->match_length >= MIN_MATCH) {
+ check_match(s, s->strstart, s->match_start, s->match_length);
+
+ bflush = _tr_tally(s, s->strstart - s->match_start,
+ s->match_length - MIN_MATCH);
+
+ s->lookahead -= s->match_length;
+
+ /* Insert new strings in the hash table only if the match length
+ * is not too large. This saves time but degrades compression.
+ */
+ if (s->match_length <= s->max_insert_length &&
+ s->lookahead >= MIN_MATCH) {
+ s->match_length--; /* string at strstart already in hash table */
+ do {
+ s->strstart++;
+ INSERT_STRING(s, s->strstart, hash_head);
+ /* strstart never exceeds WSIZE-MAX_MATCH, so there are
+ * always MIN_MATCH bytes ahead.
+ */
+ } while (--s->match_length != 0);
+ s->strstart++;
+ } else {
+ s->strstart += s->match_length;
+ s->match_length = 0;
+ s->ins_h = s->window[s->strstart];
+ UPDATE_HASH(s, s->ins_h, s->window[s->strstart+1]);
+#if MIN_MATCH != 3
+ Call UPDATE_HASH() MIN_MATCH-3 more times
+#endif
+ /* If lookahead < MIN_MATCH, ins_h is garbage, but it does not
+ * matter since it will be recomputed at next deflate call.
+ */
+ }
+ } else {
+ /* No match, output a literal byte */
+ Tracevv((stderr,"%c", s->window[s->strstart]));
+ bflush = _tr_tally (s, 0, s->window[s->strstart]);
+ s->lookahead--;
+ s->strstart++;
+ }
+ if (bflush) FLUSH_BLOCK(s, 0);
+ }
+ FLUSH_BLOCK(s, flush == Z_FINISH);
+ return flush == Z_FINISH ? finish_done : block_done;
+}
+
+/* ===========================================================================
+ * Same as above, but achieves better compression. We use a lazy
+ * evaluation for matches: a match is finally adopted only if there is
+ * no better match at the next window position.
+ */
+local block_state deflate_slow(s, flush)
+ deflate_state *s;
+ int flush;
+{
+ IPos hash_head = NIL; /* head of hash chain */
+ int bflush; /* set if current block must be flushed */
+
+ /* Process the input block. */
+ for (;;) {
+ /* Make sure that we always have enough lookahead, except
+ * at the end of the input file. We need MAX_MATCH bytes
+ * for the next match, plus MIN_MATCH bytes to insert the
+ * string following the next match.
+ */
+ if (s->lookahead < MIN_LOOKAHEAD) {
+ fill_window(s);
+ if (s->lookahead < MIN_LOOKAHEAD && flush == Z_NO_FLUSH) {
+ return need_more;
+ }
+ if (s->lookahead == 0) break; /* flush the current block */
+ }
+
+ /* Insert the string window[strstart .. strstart+2] in the
+ * dictionary, and set hash_head to the head of the hash chain:
+ */
+ if (s->lookahead >= MIN_MATCH) {
+ INSERT_STRING(s, s->strstart, hash_head);
+ }
+
+ /* Find the longest match, discarding those <= prev_length.
+ */
+ s->prev_length = s->match_length, s->prev_match = s->match_start;
+ s->match_length = MIN_MATCH-1;
+
+ if (hash_head != NIL && s->prev_length < s->max_lazy_match &&
+ s->strstart - hash_head <= MAX_DIST(s)) {
+ /* To simplify the code, we prevent matches with the string
+ * of window index 0 (in particular we have to avoid a match
+ * of the string with itself at the start of the input file).
+ */
+ if (s->strategy != Z_HUFFMAN_ONLY) {
+ s->match_length = longest_match (s, hash_head);
+ }
+ /* longest_match() sets match_start */
+
+ if (s->match_length <= 5 && (s->strategy == Z_FILTERED ||
+ (s->match_length == MIN_MATCH &&
+ s->strstart - s->match_start > TOO_FAR))) {
+
+ /* If prev_match is also MIN_MATCH, match_start is garbage
+ * but we will ignore the current match anyway.
+ */
+ s->match_length = MIN_MATCH-1;
+ }
+ }
+ /* If there was a match at the previous step and the current
+ * match is not better, output the previous match:
+ */
+ if (s->prev_length >= MIN_MATCH && s->match_length <= s->prev_length) {
+ uInt max_insert = s->strstart + s->lookahead - MIN_MATCH;
+ /* Do not insert strings in hash table beyond this. */
+
+ check_match(s, s->strstart-1, s->prev_match, s->prev_length);
+
+ bflush = _tr_tally(s, s->strstart -1 - s->prev_match,
+ s->prev_length - MIN_MATCH);
+
+ /* Insert in hash table all strings up to the end of the match.
+ * strstart-1 and strstart are already inserted. If there is not
+ * enough lookahead, the last two strings are not inserted in
+ * the hash table.
+ */
+ s->lookahead -= s->prev_length-1;
+ s->prev_length -= 2;
+ do {
+ if (++s->strstart <= max_insert) {
+ INSERT_STRING(s, s->strstart, hash_head);
+ }
+ } while (--s->prev_length != 0);
+ s->match_available = 0;
+ s->match_length = MIN_MATCH-1;
+ s->strstart++;
+
+ if (bflush) FLUSH_BLOCK(s, 0);
+
+ } else if (s->match_available) {
+ /* If there was no match at the previous position, output a
+ * single literal. If there was a match but the current match
+ * is longer, truncate the previous match to a single literal.
+ */
+ Tracevv((stderr,"%c", s->window[s->strstart-1]));
+ if (_tr_tally (s, 0, s->window[s->strstart-1])) {
+ FLUSH_BLOCK_ONLY(s, 0);
+ }
+ s->strstart++;
+ s->lookahead--;
+ if (s->strm->avail_out == 0) return need_more;
+ } else {
+ /* There is no previous match to compare with, wait for
+ * the next step to decide.
+ */
+ s->match_available = 1;
+ s->strstart++;
+ s->lookahead--;
+ }
+ }
+ Assert (flush != Z_NO_FLUSH, "no flush?");
+ if (s->match_available) {
+ Tracevv((stderr,"%c", s->window[s->strstart-1]));
+ _tr_tally (s, 0, s->window[s->strstart-1]);
+ s->match_available = 0;
+ }
+ FLUSH_BLOCK(s, flush == Z_FINISH);
+ return flush == Z_FINISH ? finish_done : block_done;
+}
+/* --- deflate.c */
+
+/* +++ trees.c */
+/* trees.c -- output deflated data using Huffman coding
+ * Copyright (C) 1995-1996 Jean-loup Gailly
+ * For conditions of distribution and use, see copyright notice in zlib.h
+ */
+
+/*
+ * ALGORITHM
+ *
+ * The "deflation" process uses several Huffman trees. The more
+ * common source values are represented by shorter bit sequences.
+ *
+ * Each code tree is stored in a compressed form which is itself
+ * a Huffman encoding of the lengths of all the code strings (in
+ * ascending order by source values). The actual code strings are
+ * reconstructed from the lengths in the inflate process, as described
+ * in the deflate specification.
+ *
+ * REFERENCES
+ *
+ * Deutsch, L.P.,"'Deflate' Compressed Data Format Specification".
+ * Available in ftp.uu.net:/pub/archiving/zip/doc/deflate-1.1.doc
+ *
+ * Storer, James A.
+ * Data Compression: Methods and Theory, pp. 49-50.
+ * Computer Science Press, 1988. ISBN 0-7167-8156-5.
+ *
+ * Sedgewick, R.
+ * Algorithms, p290.
+ * Addison-Wesley, 1983. ISBN 0-201-06672-6.
+ */
+
+/* From: trees.c,v 1.11 1996/07/24 13:41:06 me Exp $ */
+
+/* #include <rtems/freebsd/local/deflate.h> */
+
+#ifdef DEBUG_ZLIB
+# include <rtems/freebsd/ctype.h>
+#endif
+
+/* ===========================================================================
+ * Constants
+ */
+
+#define MAX_BL_BITS 7
+/* Bit length codes must not exceed MAX_BL_BITS bits */
+
+#define END_BLOCK 256
+/* end of block literal code */
+
+#define REP_3_6 16
+/* repeat previous bit length 3-6 times (2 bits of repeat count) */
+
+#define REPZ_3_10 17
+/* repeat a zero length 3-10 times (3 bits of repeat count) */
+
+#define REPZ_11_138 18
+/* repeat a zero length 11-138 times (7 bits of repeat count) */
+
+local int extra_lbits[LENGTH_CODES] /* extra bits for each length code */
+ = {0,0,0,0,0,0,0,0,1,1,1,1,2,2,2,2,3,3,3,3,4,4,4,4,5,5,5,5,0};
+
+local int extra_dbits[D_CODES] /* extra bits for each distance code */
+ = {0,0,0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,8,8,9,9,10,10,11,11,12,12,13,13};
+
+local int extra_blbits[BL_CODES]/* extra bits for each bit length code */
+ = {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,3,7};
+
+local uch bl_order[BL_CODES]
+ = {16,17,18,0,8,7,9,6,10,5,11,4,12,3,13,2,14,1,15};
+/* The lengths of the bit length codes are sent in order of decreasing
+ * probability, to avoid transmitting the lengths for unused bit length codes.
+ */
+
+#define Buf_size (8 * 2*sizeof(char))
+/* Number of bits used within bi_buf. (bi_buf might be implemented on
+ * more than 16 bits on some systems.)
+ */
+
+/* ===========================================================================
+ * Local data. These are initialized only once.
+ */
+
+local ct_data static_ltree[L_CODES+2];
+/* The static literal tree. Since the bit lengths are imposed, there is no
+ * need for the L_CODES extra codes used during heap construction. However
+ * The codes 286 and 287 are needed to build a canonical tree (see _tr_init
+ * below).
+ */
+
+local ct_data static_dtree[D_CODES];
+/* The static distance tree. (Actually a trivial tree since all codes use
+ * 5 bits.)
+ */
+
+local uch dist_code[512];
+/* distance codes. The first 256 values correspond to the distances
+ * 3 .. 258, the last 256 values correspond to the top 8 bits of
+ * the 15 bit distances.
+ */
+
+local uch length_code[MAX_MATCH-MIN_MATCH+1];
+/* length code for each normalized match length (0 == MIN_MATCH) */
+
+local int base_length[LENGTH_CODES];
+/* First normalized length for each code (0 = MIN_MATCH) */
+
+local int base_dist[D_CODES];
+/* First normalized distance for each code (0 = distance of 1) */
+
+struct static_tree_desc_s {
+ ct_data *static_tree; /* static tree or NULL */
+ intf *extra_bits; /* extra bits for each code or NULL */
+ int extra_base; /* base index for extra_bits */
+ int elems; /* max number of elements in the tree */
+ int max_length; /* max bit length for the codes */
+};
+
+local static_tree_desc static_l_desc =
+{static_ltree, extra_lbits, LITERALS+1, L_CODES, MAX_BITS};
+
+local static_tree_desc static_d_desc =
+{static_dtree, extra_dbits, 0, D_CODES, MAX_BITS};
+
+local static_tree_desc static_bl_desc =
+{(ct_data *)0, extra_blbits, 0, BL_CODES, MAX_BL_BITS};
+
+/* ===========================================================================
+ * Local (static) routines in this file.
+ */
+
+local void tr_static_init OF((void));
+local void init_block OF((deflate_state *s));
+local void pqdownheap OF((deflate_state *s, ct_data *tree, int k));
+local void gen_bitlen OF((deflate_state *s, tree_desc *desc));
+local void gen_codes OF((ct_data *tree, int max_code, ushf *bl_count));
+local void build_tree OF((deflate_state *s, tree_desc *desc));
+local void scan_tree OF((deflate_state *s, ct_data *tree, int max_code));
+local void send_tree OF((deflate_state *s, ct_data *tree, int max_code));
+local int build_bl_tree OF((deflate_state *s));
+local void send_all_trees OF((deflate_state *s, int lcodes, int dcodes,
+ int blcodes));
+local void compress_block OF((deflate_state *s, ct_data *ltree,
+ ct_data *dtree));
+local void set_data_type OF((deflate_state *s));
+local unsigned bi_reverse OF((unsigned value, int length));
+local void bi_windup OF((deflate_state *s));
+local void bi_flush OF((deflate_state *s));
+local void copy_block OF((deflate_state *s, charf *buf, unsigned len,
+ int header));
+
+#ifndef DEBUG_ZLIB
+# define send_code(s, c, tree) send_bits(s, tree[(c)].Code, tree[(c)].Len)
+ /* Send a code of the given tree. c and tree must not have side effects */
+
+#else /* DEBUG_ZLIB */
+# define send_code(s, c, tree) \
+ { if (verbose>2) fprintf(stderr,"\ncd %3d ",(c)); \
+ send_bits(s, tree[c].Code, tree[c].Len); }
+#endif
+
+#define d_code(dist) \
+ ((dist) < 256 ? dist_code[dist] : dist_code[256+((dist)>>7)])
+/* Mapping from a distance to a distance code. dist is the distance - 1 and
+ * must not have side effects. dist_code[256] and dist_code[257] are never
+ * used.
+ */
+
+/* ===========================================================================
+ * Output a short LSB first on the stream.
+ * IN assertion: there is enough room in pendingBuf.
+ */
+#define put_short(s, w) { \
+ put_byte(s, (uch)((w) & 0xff)); \
+ put_byte(s, (uch)((ush)(w) >> 8)); \
+}
+
+/* ===========================================================================
+ * Send a value on a given number of bits.
+ * IN assertion: length <= 16 and value fits in length bits.
+ */
+#ifdef DEBUG_ZLIB
+local void send_bits OF((deflate_state *s, int value, int length));
+
+local void send_bits(s, value, length)
+ deflate_state *s;
+ int value; /* value to send */
+ int length; /* number of bits */
+{
+ Tracevv((stderr," l %2d v %4x ", length, value));
+ Assert(length > 0 && length <= 15, "invalid length");
+ s->bits_sent += (ulg)length;
+
+ /* If not enough room in bi_buf, use (valid) bits from bi_buf and
+ * (16 - bi_valid) bits from value, leaving (width - (16-bi_valid))
+ * unused bits in value.
+ */
+ if (s->bi_valid > (int)Buf_size - length) {
+ s->bi_buf |= (value << s->bi_valid);
+ put_short(s, s->bi_buf);
+ s->bi_buf = (ush)value >> (Buf_size - s->bi_valid);
+ s->bi_valid += length - Buf_size;
+ } else {
+ s->bi_buf |= value << s->bi_valid;
+ s->bi_valid += length;
+ }
+}
+#else /* !DEBUG_ZLIB */
+
+#define send_bits(s, value, length) \
+{ int len = (length);\
+ if ((s)->bi_valid > (int)Buf_size - len) {\
+ int val = (value);\
+ (s)->bi_buf |= (val << (s)->bi_valid);\
+ put_short((s), (s)->bi_buf);\
+ (s)->bi_buf = (ush)val >> (Buf_size - (s)->bi_valid);\
+ (s)->bi_valid += len - Buf_size;\
+ } else {\
+ (s)->bi_buf |= (value) << (s)->bi_valid;\
+ (s)->bi_valid += len;\
+ }\
+}
+#endif /* DEBUG_ZLIB */
+
+/* the arguments must not have side effects */
+
+/* ===========================================================================
+ * Initialize the various 'constant' tables. In a multi-threaded environment,
+ * this function may be called by two threads concurrently, but this is
+ * harmless since both invocations do exactly the same thing.
+ */
+local void tr_static_init()
+{
+ static int static_init_done = 0;
+ int n; /* iterates over tree elements */
+ int bits; /* bit counter */
+ int length; /* length value */
+ int code; /* code value */
+ int dist; /* distance index */
+ ush bl_count[MAX_BITS+1];
+ /* number of codes at each bit length for an optimal tree */
+
+ if (static_init_done) return;
+
+ /* Initialize the mapping length (0..255) -> length code (0..28) */
+ length = 0;
+ for (code = 0; code < LENGTH_CODES-1; code++) {
+ base_length[code] = length;
+ for (n = 0; n < (1<<extra_lbits[code]); n++) {
+ length_code[length++] = (uch)code;
+ }
+ }
+ Assert (length == 256, "tr_static_init: length != 256");
+ /* Note that the length 255 (match length 258) can be represented
+ * in two different ways: code 284 + 5 bits or code 285, so we
+ * overwrite length_code[255] to use the best encoding:
+ */
+ length_code[length-1] = (uch)code;
+
+ /* Initialize the mapping dist (0..32K) -> dist code (0..29) */
+ dist = 0;
+ for (code = 0 ; code < 16; code++) {
+ base_dist[code] = dist;
+ for (n = 0; n < (1<<extra_dbits[code]); n++) {
+ dist_code[dist++] = (uch)code;
+ }
+ }
+ Assert (dist == 256, "tr_static_init: dist != 256");
+ dist >>= 7; /* from now on, all distances are divided by 128 */
+ for ( ; code < D_CODES; code++) {
+ base_dist[code] = dist << 7;
+ for (n = 0; n < (1<<(extra_dbits[code]-7)); n++) {
+ dist_code[256 + dist++] = (uch)code;
+ }
+ }
+ Assert (dist == 256, "tr_static_init: 256+dist != 512");
+
+ /* Construct the codes of the static literal tree */
+ for (bits = 0; bits <= MAX_BITS; bits++) bl_count[bits] = 0;
+ n = 0;
+ while (n <= 143) static_ltree[n++].Len = 8, bl_count[8]++;
+ while (n <= 255) static_ltree[n++].Len = 9, bl_count[9]++;
+ while (n <= 279) static_ltree[n++].Len = 7, bl_count[7]++;
+ while (n <= 287) static_ltree[n++].Len = 8, bl_count[8]++;
+ /* Codes 286 and 287 do not exist, but we must include them in the
+ * tree construction to get a canonical Huffman tree (longest code
+ * all ones)
+ */
+ gen_codes((ct_data *)static_ltree, L_CODES+1, bl_count);
+
+ /* The static distance tree is trivial: */
+ for (n = 0; n < D_CODES; n++) {
+ static_dtree[n].Len = 5;
+ static_dtree[n].Code = bi_reverse((unsigned)n, 5);
+ }
+ static_init_done = 1;
+}
+
+/* ===========================================================================
+ * Initialize the tree data structures for a new zlib stream.
+ */
+void _tr_init(s)
+ deflate_state *s;
+{
+ tr_static_init();
+
+ s->compressed_len = 0L;
+
+ s->l_desc.dyn_tree = s->dyn_ltree;
+ s->l_desc.stat_desc = &static_l_desc;
+
+ s->d_desc.dyn_tree = s->dyn_dtree;
+ s->d_desc.stat_desc = &static_d_desc;
+
+ s->bl_desc.dyn_tree = s->bl_tree;
+ s->bl_desc.stat_desc = &static_bl_desc;
+
+ s->bi_buf = 0;
+ s->bi_valid = 0;
+ s->last_eob_len = 8; /* enough lookahead for inflate */
+#ifdef DEBUG_ZLIB
+ s->bits_sent = 0L;
+#endif
+
+ /* Initialize the first block of the first file: */
+ init_block(s);
+}
+
+/* ===========================================================================
+ * Initialize a new block.
+ */
+local void init_block(s)
+ deflate_state *s;
+{
+ int n; /* iterates over tree elements */
+
+ /* Initialize the trees. */
+ for (n = 0; n < L_CODES; n++) s->dyn_ltree[n].Freq = 0;
+ for (n = 0; n < D_CODES; n++) s->dyn_dtree[n].Freq = 0;
+ for (n = 0; n < BL_CODES; n++) s->bl_tree[n].Freq = 0;
+
+ s->dyn_ltree[END_BLOCK].Freq = 1;
+ s->opt_len = s->static_len = 0L;
+ s->last_lit = s->matches = 0;
+}
+
+#define SMALLEST 1
+/* Index within the heap array of least frequent node in the Huffman tree */
+
+
+/* ===========================================================================
+ * Remove the smallest element from the heap and recreate the heap with
+ * one less element. Updates heap and heap_len.
+ */
+#define pqremove(s, tree, top) \
+{\
+ top = s->heap[SMALLEST]; \
+ s->heap[SMALLEST] = s->heap[s->heap_len--]; \
+ pqdownheap(s, tree, SMALLEST); \
+}
+
+/* ===========================================================================
+ * Compares to subtrees, using the tree depth as tie breaker when
+ * the subtrees have equal frequency. This minimizes the worst case length.
+ */
+#define smaller(tree, n, m, depth) \
+ (tree[n].Freq < tree[m].Freq || \
+ (tree[n].Freq == tree[m].Freq && depth[n] <= depth[m]))
+
+/* ===========================================================================
+ * Restore the heap property by moving down the tree starting at node k,
+ * exchanging a node with the smallest of its two sons if necessary, stopping
+ * when the heap property is re-established (each father smaller than its
+ * two sons).
+ */
+local void pqdownheap(s, tree, k)
+ deflate_state *s;
+ ct_data *tree; /* the tree to restore */
+ int k; /* node to move down */
+{
+ int v = s->heap[k];
+ int j = k << 1; /* left son of k */
+ while (j <= s->heap_len) {
+ /* Set j to the smallest of the two sons: */
+ if (j < s->heap_len &&
+ smaller(tree, s->heap[j+1], s->heap[j], s->depth)) {
+ j++;
+ }
+ /* Exit if v is smaller than both sons */
+ if (smaller(tree, v, s->heap[j], s->depth)) break;
+
+ /* Exchange v with the smallest son */
+ s->heap[k] = s->heap[j]; k = j;
+
+ /* And continue down the tree, setting j to the left son of k */
+ j <<= 1;
+ }
+ s->heap[k] = v;
+}
+
+/* ===========================================================================
+ * Compute the optimal bit lengths for a tree and update the total bit length
+ * for the current block.
+ * IN assertion: the fields freq and dad are set, heap[heap_max] and
+ * above are the tree nodes sorted by increasing frequency.
+ * OUT assertions: the field len is set to the optimal bit length, the
+ * array bl_count contains the frequencies for each bit length.
+ * The length opt_len is updated; static_len is also updated if stree is
+ * not null.
+ */
+local void gen_bitlen(s, desc)
+ deflate_state *s;
+ tree_desc *desc; /* the tree descriptor */
+{
+ ct_data *tree = desc->dyn_tree;
+ int max_code = desc->max_code;
+ ct_data *stree = desc->stat_desc->static_tree;
+ intf *extra = desc->stat_desc->extra_bits;
+ int base = desc->stat_desc->extra_base;
+ int max_length = desc->stat_desc->max_length;
+ int h; /* heap index */
+ int n, m; /* iterate over the tree elements */
+ int bits; /* bit length */
+ int xbits; /* extra bits */
+ ush f; /* frequency */
+ int overflow = 0; /* number of elements with bit length too large */
+
+ for (bits = 0; bits <= MAX_BITS; bits++) s->bl_count[bits] = 0;
+
+ /* In a first pass, compute the optimal bit lengths (which may
+ * overflow in the case of the bit length tree).
+ */
+ tree[s->heap[s->heap_max]].Len = 0; /* root of the heap */
+
+ for (h = s->heap_max+1; h < HEAP_SIZE; h++) {
+ n = s->heap[h];
+ bits = tree[tree[n].Dad].Len + 1;
+ if (bits > max_length) bits = max_length, overflow++;
+ tree[n].Len = (ush)bits;
+ /* We overwrite tree[n].Dad which is no longer needed */
+
+ if (n > max_code) continue; /* not a leaf node */
+
+ s->bl_count[bits]++;
+ xbits = 0;
+ if (n >= base) xbits = extra[n-base];
+ f = tree[n].Freq;
+ s->opt_len += (ulg)f * (bits + xbits);
+ if (stree) s->static_len += (ulg)f * (stree[n].Len + xbits);
+ }
+ if (overflow == 0) return;
+
+ Trace((stderr,"\nbit length overflow\n"));
+ /* This happens for example on obj2 and pic of the Calgary corpus */
+
+ /* Find the first bit length which could increase: */
+ do {
+ bits = max_length-1;
+ while (s->bl_count[bits] == 0) bits--;
+ s->bl_count[bits]--; /* move one leaf down the tree */
+ s->bl_count[bits+1] += 2; /* move one overflow item as its brother */
+ s->bl_count[max_length]--;
+ /* The brother of the overflow item also moves one step up,
+ * but this does not affect bl_count[max_length]
+ */
+ overflow -= 2;
+ } while (overflow > 0);
+
+ /* Now recompute all bit lengths, scanning in increasing frequency.
+ * h is still equal to HEAP_SIZE. (It is simpler to reconstruct all
+ * lengths instead of fixing only the wrong ones. This idea is taken
+ * from 'ar' written by Haruhiko Okumura.)
+ */
+ for (bits = max_length; bits != 0; bits--) {
+ n = s->bl_count[bits];
+ while (n != 0) {
+ m = s->heap[--h];
+ if (m > max_code) continue;
+ if (tree[m].Len != (unsigned) bits) {
+ Trace((stderr,"code %d bits %d->%d\n", m, tree[m].Len, bits));
+ s->opt_len += ((long)bits - (long)tree[m].Len)
+ *(long)tree[m].Freq;
+ tree[m].Len = (ush)bits;
+ }
+ n--;
+ }
+ }
+}
+
+/* ===========================================================================
+ * Generate the codes for a given tree and bit counts (which need not be
+ * optimal).
+ * IN assertion: the array bl_count contains the bit length statistics for
+ * the given tree and the field len is set for all tree elements.
+ * OUT assertion: the field code is set for all tree elements of non
+ * zero code length.
+ */
+local void gen_codes (tree, max_code, bl_count)
+ ct_data *tree; /* the tree to decorate */
+ int max_code; /* largest code with non zero frequency */
+ ushf *bl_count; /* number of codes at each bit length */
+{
+ ush next_code[MAX_BITS+1]; /* next code value for each bit length */
+ ush code = 0; /* running code value */
+ int bits; /* bit index */
+ int n; /* code index */
+
+ /* The distribution counts are first used to generate the code values
+ * without bit reversal.
+ */
+ for (bits = 1; bits <= MAX_BITS; bits++) {
+ next_code[bits] = code = (code + bl_count[bits-1]) << 1;
+ }
+ /* Check that the bit counts in bl_count are consistent. The last code
+ * must be all ones.
+ */
+ Assert (code + bl_count[MAX_BITS]-1 == (1<<MAX_BITS)-1,
+ "inconsistent bit counts");
+ Tracev((stderr,"\ngen_codes: max_code %d ", max_code));
+
+ for (n = 0; n <= max_code; n++) {
+ int len = tree[n].Len;
+ if (len == 0) continue;
+ /* Now reverse the bits */
+ tree[n].Code = bi_reverse(next_code[len]++, len);
+
+ Tracecv(tree != static_ltree, (stderr,"\nn %3d %c l %2d c %4x (%x) ",
+ n, (isgraph(n) ? n : ' '), len, tree[n].Code, next_code[len]-1));
+ }
+}
+
+/* ===========================================================================
+ * Construct one Huffman tree and assigns the code bit strings and lengths.
+ * Update the total bit length for the current block.
+ * IN assertion: the field freq is set for all tree elements.
+ * OUT assertions: the fields len and code are set to the optimal bit length
+ * and corresponding code. The length opt_len is updated; static_len is
+ * also updated if stree is not null. The field max_code is set.
+ */
+local void build_tree(s, desc)
+ deflate_state *s;
+ tree_desc *desc; /* the tree descriptor */
+{
+ ct_data *tree = desc->dyn_tree;
+ ct_data *stree = desc->stat_desc->static_tree;
+ int elems = desc->stat_desc->elems;
+ int n, m; /* iterate over heap elements */
+ int max_code = -1; /* largest code with non zero frequency */
+ int node; /* new node being created */
+
+ /* Construct the initial heap, with least frequent element in
+ * heap[SMALLEST]. The sons of heap[n] are heap[2*n] and heap[2*n+1].
+ * heap[0] is not used.
+ */
+ s->heap_len = 0, s->heap_max = HEAP_SIZE;
+
+ for (n = 0; n < elems; n++) {
+ if (tree[n].Freq != 0) {
+ s->heap[++(s->heap_len)] = max_code = n;
+ s->depth[n] = 0;
+ } else {
+ tree[n].Len = 0;
+ }
+ }
+
+ /* The pkzip format requires that at least one distance code exists,
+ * and that at least one bit should be sent even if there is only one
+ * possible code. So to avoid special checks later on we force at least
+ * two codes of non zero frequency.
+ */
+ while (s->heap_len < 2) {
+ node = s->heap[++(s->heap_len)] = (max_code < 2 ? ++max_code : 0);
+ tree[node].Freq = 1;
+ s->depth[node] = 0;
+ s->opt_len--; if (stree) s->static_len -= stree[node].Len;
+ /* node is 0 or 1 so it does not have extra bits */
+ }
+ desc->max_code = max_code;
+
+ /* The elements heap[heap_len/2+1 .. heap_len] are leaves of the tree,
+ * establish sub-heaps of increasing lengths:
+ */
+ for (n = s->heap_len/2; n >= 1; n--) pqdownheap(s, tree, n);
+
+ /* Construct the Huffman tree by repeatedly combining the least two
+ * frequent nodes.
+ */
+ node = elems; /* next internal node of the tree */
+ do {
+ pqremove(s, tree, n); /* n = node of least frequency */
+ m = s->heap[SMALLEST]; /* m = node of next least frequency */
+
+ s->heap[--(s->heap_max)] = n; /* keep the nodes sorted by frequency */
+ s->heap[--(s->heap_max)] = m;
+
+ /* Create a new node father of n and m */
+ tree[node].Freq = tree[n].Freq + tree[m].Freq;
+ s->depth[node] = (uch) (MAX(s->depth[n], s->depth[m]) + 1);
+ tree[n].Dad = tree[m].Dad = (ush)node;
+#ifdef DUMP_BL_TREE
+ if (tree == s->bl_tree) {
+ fprintf(stderr,"\nnode %d(%d), sons %d(%d) %d(%d)",
+ node, tree[node].Freq, n, tree[n].Freq, m, tree[m].Freq);
+ }
+#endif
+ /* and insert the new node in the heap */
+ s->heap[SMALLEST] = node++;
+ pqdownheap(s, tree, SMALLEST);
+
+ } while (s->heap_len >= 2);
+
+ s->heap[--(s->heap_max)] = s->heap[SMALLEST];
+
+ /* At this point, the fields freq and dad are set. We can now
+ * generate the bit lengths.
+ */
+ gen_bitlen(s, (tree_desc *)desc);
+
+ /* The field len is now set, we can generate the bit codes */
+ gen_codes ((ct_data *)tree, max_code, s->bl_count);
+}
+
+/* ===========================================================================
+ * Scan a literal or distance tree to determine the frequencies of the codes
+ * in the bit length tree.
+ */
+local void scan_tree (s, tree, max_code)
+ deflate_state *s;
+ ct_data *tree; /* the tree to be scanned */
+ int max_code; /* and its largest code of non zero frequency */
+{
+ int n; /* iterates over all tree elements */
+ int prevlen = -1; /* last emitted length */
+ int curlen; /* length of current code */
+ int nextlen = tree[0].Len; /* length of next code */
+ int count = 0; /* repeat count of the current code */
+ int max_count = 7; /* max repeat count */
+ int min_count = 4; /* min repeat count */
+
+ if (nextlen == 0) max_count = 138, min_count = 3;
+ tree[max_code+1].Len = (ush)0xffff; /* guard */
+
+ for (n = 0; n <= max_code; n++) {
+ curlen = nextlen; nextlen = tree[n+1].Len;
+ if (++count < max_count && curlen == nextlen) {
+ continue;
+ } else if (count < min_count) {
+ s->bl_tree[curlen].Freq += count;
+ } else if (curlen != 0) {
+ if (curlen != prevlen) s->bl_tree[curlen].Freq++;
+ s->bl_tree[REP_3_6].Freq++;
+ } else if (count <= 10) {
+ s->bl_tree[REPZ_3_10].Freq++;
+ } else {
+ s->bl_tree[REPZ_11_138].Freq++;
+ }
+ count = 0; prevlen = curlen;
+ if (nextlen == 0) {
+ max_count = 138, min_count = 3;
+ } else if (curlen == nextlen) {
+ max_count = 6, min_count = 3;
+ } else {
+ max_count = 7, min_count = 4;
+ }
+ }
+}
+
+/* ===========================================================================
+ * Send a literal or distance tree in compressed form, using the codes in
+ * bl_tree.
+ */
+local void send_tree (s, tree, max_code)
+ deflate_state *s;
+ ct_data *tree; /* the tree to be scanned */
+ int max_code; /* and its largest code of non zero frequency */
+{
+ int n; /* iterates over all tree elements */
+ int prevlen = -1; /* last emitted length */
+ int curlen; /* length of current code */
+ int nextlen = tree[0].Len; /* length of next code */
+ int count = 0; /* repeat count of the current code */
+ int max_count = 7; /* max repeat count */
+ int min_count = 4; /* min repeat count */
+
+ /* tree[max_code+1].Len = -1; */ /* guard already set */
+ if (nextlen == 0) max_count = 138, min_count = 3;
+
+ for (n = 0; n <= max_code; n++) {
+ curlen = nextlen; nextlen = tree[n+1].Len;
+ if (++count < max_count && curlen == nextlen) {
+ continue;
+ } else if (count < min_count) {
+ do { send_code(s, curlen, s->bl_tree); } while (--count != 0);
+
+ } else if (curlen != 0) {
+ if (curlen != prevlen) {
+ send_code(s, curlen, s->bl_tree); count--;
+ }
+ Assert(count >= 3 && count <= 6, " 3_6?");
+ send_code(s, REP_3_6, s->bl_tree); send_bits(s, count-3, 2);
+
+ } else if (count <= 10) {
+ send_code(s, REPZ_3_10, s->bl_tree); send_bits(s, count-3, 3);
+
+ } else {
+ send_code(s, REPZ_11_138, s->bl_tree); send_bits(s, count-11, 7);
+ }
+ count = 0; prevlen = curlen;
+ if (nextlen == 0) {
+ max_count = 138, min_count = 3;
+ } else if (curlen == nextlen) {
+ max_count = 6, min_count = 3;
+ } else {
+ max_count = 7, min_count = 4;
+ }
+ }
+}
+
+/* ===========================================================================
+ * Construct the Huffman tree for the bit lengths and return the index in
+ * bl_order of the last bit length code to send.
+ */
+local int build_bl_tree(s)
+ deflate_state *s;
+{
+ int max_blindex; /* index of last bit length code of non zero freq */
+
+ /* Determine the bit length frequencies for literal and distance trees */
+ scan_tree(s, (ct_data *)s->dyn_ltree, s->l_desc.max_code);
+ scan_tree(s, (ct_data *)s->dyn_dtree, s->d_desc.max_code);
+
+ /* Build the bit length tree: */
+ build_tree(s, (tree_desc *)(&(s->bl_desc)));
+ /* opt_len now includes the length of the tree representations, except
+ * the lengths of the bit lengths codes and the 5+5+4 bits for the counts.
+ */
+
+ /* Determine the number of bit length codes to send. The pkzip format
+ * requires that at least 4 bit length codes be sent. (appnote.txt says
+ * 3 but the actual value used is 4.)
+ */
+ for (max_blindex = BL_CODES-1; max_blindex >= 3; max_blindex--) {
+ if (s->bl_tree[bl_order[max_blindex]].Len != 0) break;
+ }
+ /* Update opt_len to include the bit length tree and counts */
+ s->opt_len += 3*(max_blindex+1) + 5+5+4;
+ Tracev((stderr, "\ndyn trees: dyn %ld, stat %ld",
+ s->opt_len, s->static_len));
+
+ return max_blindex;
+}
+
+/* ===========================================================================
+ * Send the header for a block using dynamic Huffman trees: the counts, the
+ * lengths of the bit length codes, the literal tree and the distance tree.
+ * IN assertion: lcodes >= 257, dcodes >= 1, blcodes >= 4.
+ */
+local void send_all_trees(s, lcodes, dcodes, blcodes)
+ deflate_state *s;
+ int lcodes, dcodes, blcodes; /* number of codes for each tree */
+{
+ int rank; /* index in bl_order */
+
+ Assert (lcodes >= 257 && dcodes >= 1 && blcodes >= 4, "not enough codes");
+ Assert (lcodes <= L_CODES && dcodes <= D_CODES && blcodes <= BL_CODES,
+ "too many codes");
+ Tracev((stderr, "\nbl counts: "));
+ send_bits(s, lcodes-257, 5); /* not +255 as stated in appnote.txt */
+ send_bits(s, dcodes-1, 5);
+ send_bits(s, blcodes-4, 4); /* not -3 as stated in appnote.txt */
+ for (rank = 0; rank < blcodes; rank++) {
+ Tracev((stderr, "\nbl code %2d ", bl_order[rank]));
+ send_bits(s, s->bl_tree[bl_order[rank]].Len, 3);
+ }
+ Tracev((stderr, "\nbl tree: sent %ld", s->bits_sent));
+
+ send_tree(s, (ct_data *)s->dyn_ltree, lcodes-1); /* literal tree */
+ Tracev((stderr, "\nlit tree: sent %ld", s->bits_sent));
+
+ send_tree(s, (ct_data *)s->dyn_dtree, dcodes-1); /* distance tree */
+ Tracev((stderr, "\ndist tree: sent %ld", s->bits_sent));
+}
+
+/* ===========================================================================
+ * Send a stored block
+ */
+void _tr_stored_block(s, buf, stored_len, eof)
+ deflate_state *s;
+ charf *buf; /* input block */
+ ulg stored_len; /* length of input block */
+ int eof; /* true if this is the last block for a file */
+{
+ send_bits(s, (STORED_BLOCK<<1)+eof, 3); /* send block type */
+ s->compressed_len = (s->compressed_len + 3 + 7) & (ulg)~7L;
+ s->compressed_len += (stored_len + 4) << 3;
+
+ copy_block(s, buf, (unsigned)stored_len, 1); /* with header */
+}
+
+/* Send just the `stored block' type code without any length bytes or data.
+ */
+void _tr_stored_type_only(s)
+ deflate_state *s;
+{
+ send_bits(s, (STORED_BLOCK << 1), 3);
+ bi_windup(s);
+ s->compressed_len = (s->compressed_len + 3) & ~7L;
+}
+
+
+/* ===========================================================================
+ * Send one empty static block to give enough lookahead for inflate.
+ * This takes 10 bits, of which 7 may remain in the bit buffer.
+ * The current inflate code requires 9 bits of lookahead. If the
+ * last two codes for the previous block (real code plus EOB) were coded
+ * on 5 bits or less, inflate may have only 5+3 bits of lookahead to decode
+ * the last real code. In this case we send two empty static blocks instead
+ * of one. (There are no problems if the previous block is stored or fixed.)
+ * To simplify the code, we assume the worst case of last real code encoded
+ * on one bit only.
+ */
+void _tr_align(s)
+ deflate_state *s;
+{
+ send_bits(s, STATIC_TREES<<1, 3);
+ send_code(s, END_BLOCK, static_ltree);
+ s->compressed_len += 10L; /* 3 for block type, 7 for EOB */
+ bi_flush(s);
+ /* Of the 10 bits for the empty block, we have already sent
+ * (10 - bi_valid) bits. The lookahead for the last real code (before
+ * the EOB of the previous block) was thus at least one plus the length
+ * of the EOB plus what we have just sent of the empty static block.
+ */
+ if (1 + s->last_eob_len + 10 - s->bi_valid < 9) {
+ send_bits(s, STATIC_TREES<<1, 3);
+ send_code(s, END_BLOCK, static_ltree);
+ s->compressed_len += 10L;
+ bi_flush(s);
+ }
+ s->last_eob_len = 7;
+}
+
+/* ===========================================================================
+ * Determine the best encoding for the current block: dynamic trees, static
+ * trees or store, and output the encoded block to the zip file. This function
+ * returns the total compressed length for the file so far.
+ */
+ulg _tr_flush_block(s, buf, stored_len, eof)
+ deflate_state *s;
+ charf *buf; /* input block, or NULL if too old */
+ ulg stored_len; /* length of input block */
+ int eof; /* true if this is the last block for a file */
+{
+ ulg opt_lenb, static_lenb; /* opt_len and static_len in bytes */
+ int max_blindex = 0; /* index of last bit length code of non zero freq */
+
+ /* Build the Huffman trees unless a stored block is forced */
+ if (s->level > 0) {
+
+ /* Check if the file is ascii or binary */
+ if (s->data_type == Z_UNKNOWN) set_data_type(s);
+
+ /* Construct the literal and distance trees */
+ build_tree(s, (tree_desc *)(&(s->l_desc)));
+ Tracev((stderr, "\nlit data: dyn %ld, stat %ld", s->opt_len,
+ s->static_len));
+
+ build_tree(s, (tree_desc *)(&(s->d_desc)));
+ Tracev((stderr, "\ndist data: dyn %ld, stat %ld", s->opt_len,
+ s->static_len));
+ /* At this point, opt_len and static_len are the total bit lengths of
+ * the compressed block data, excluding the tree representations.
+ */
+
+ /* Build the bit length tree for the above two trees, and get the index
+ * in bl_order of the last bit length code to send.
+ */
+ max_blindex = build_bl_tree(s);
+
+ /* Determine the best encoding. Compute first the block length in bytes*/
+ opt_lenb = (s->opt_len+3+7)>>3;
+ static_lenb = (s->static_len+3+7)>>3;
+
+ Tracev((stderr, "\nopt %lu(%lu) stat %lu(%lu) stored %lu lit %u ",
+ opt_lenb, s->opt_len, static_lenb, s->static_len, stored_len,
+ s->last_lit));
+
+ if (static_lenb <= opt_lenb) opt_lenb = static_lenb;
+
+ } else {
+ Assert(buf != (char*)0, "lost buf");
+ opt_lenb = static_lenb = stored_len + 5; /* force a stored block */
+ }
+
+ /* If compression failed and this is the first and last block,
+ * and if the .zip file can be seeked (to rewrite the local header),
+ * the whole file is transformed into a stored file:
+ */
+#ifdef STORED_FILE_OK
+# ifdef FORCE_STORED_FILE
+ if (eof && s->compressed_len == 0L) { /* force stored file */
+# else
+ if (stored_len <= opt_lenb && eof && s->compressed_len==0L && seekable()) {
+# endif
+ /* Since LIT_BUFSIZE <= 2*WSIZE, the input data must be there: */
+ if (buf == (charf*)0) error ("block vanished");
+
+ copy_block(s, buf, (unsigned)stored_len, 0); /* without header */
+ s->compressed_len = stored_len << 3;
+ s->method = STORED;
+ } else
+#endif /* STORED_FILE_OK */
+
+#ifdef FORCE_STORED
+ if (buf != (char*)0) { /* force stored block */
+#else
+ if (stored_len+4 <= opt_lenb && buf != (char*)0) {
+ /* 4: two words for the lengths */
+#endif
+ /* The test buf != NULL is only necessary if LIT_BUFSIZE > WSIZE.
+ * Otherwise we can't have processed more than WSIZE input bytes since
+ * the last block flush, because compression would have been
+ * successful. If LIT_BUFSIZE <= WSIZE, it is never too late to
+ * transform a block into a stored block.
+ */
+ _tr_stored_block(s, buf, stored_len, eof);
+
+#ifdef FORCE_STATIC
+ } else if (static_lenb >= 0) { /* force static trees */
+#else
+ } else if (static_lenb == opt_lenb) {
+#endif
+ send_bits(s, (STATIC_TREES<<1)+eof, 3);
+ compress_block(s, (ct_data *)static_ltree, (ct_data *)static_dtree);
+ s->compressed_len += 3 + s->static_len;
+ } else {
+ send_bits(s, (DYN_TREES<<1)+eof, 3);
+ send_all_trees(s, s->l_desc.max_code+1, s->d_desc.max_code+1,
+ max_blindex+1);
+ compress_block(s, (ct_data *)s->dyn_ltree, (ct_data *)s->dyn_dtree);
+ s->compressed_len += 3 + s->opt_len;
+ }
+ Assert (s->compressed_len == s->bits_sent, "bad compressed size");
+ init_block(s);
+
+ if (eof) {
+ bi_windup(s);
+ s->compressed_len += 7; /* align on byte boundary */
+ }
+ Tracev((stderr,"\ncomprlen %lu(%lu) ", s->compressed_len>>3,
+ s->compressed_len-7*eof));
+
+ return s->compressed_len >> 3;
+}
+
+/* ===========================================================================
+ * Save the match info and tally the frequency counts. Return true if
+ * the current block must be flushed.
+ */
+int _tr_tally (s, dist, lc)
+ deflate_state *s;
+ unsigned dist; /* distance of matched string */
+ unsigned lc; /* match length-MIN_MATCH or unmatched char (if dist==0) */
+{
+ s->d_buf[s->last_lit] = (ush)dist;
+ s->l_buf[s->last_lit++] = (uch)lc;
+ if (dist == 0) {
+ /* lc is the unmatched char */
+ s->dyn_ltree[lc].Freq++;
+ } else {
+ s->matches++;
+ /* Here, lc is the match length - MIN_MATCH */
+ dist--; /* dist = match distance - 1 */
+ Assert((ush)dist < (ush)MAX_DIST(s) &&
+ (ush)lc <= (ush)(MAX_MATCH-MIN_MATCH) &&
+ (ush)d_code(dist) < (ush)D_CODES, "_tr_tally: bad match");
+
+ s->dyn_ltree[length_code[lc]+LITERALS+1].Freq++;
+ s->dyn_dtree[d_code(dist)].Freq++;
+ }
+
+ /* Try to guess if it is profitable to stop the current block here */
+ if (s->level > 2 && (s->last_lit & 0xfff) == 0) {
+ /* Compute an upper bound for the compressed length */
+ ulg out_length = (ulg)s->last_lit*8L;
+ ulg in_length = (ulg)((long)s->strstart - s->block_start);
+ int dcode;
+ for (dcode = 0; dcode < D_CODES; dcode++) {
+ out_length += (ulg)s->dyn_dtree[dcode].Freq *
+ (5L+extra_dbits[dcode]);
+ }
+ out_length >>= 3;
+ Tracev((stderr,"\nlast_lit %u, in %ld, out ~%ld(%ld%%) ",
+ s->last_lit, in_length, out_length,
+ 100L - out_length*100L/in_length));
+ if (s->matches < s->last_lit/2 && out_length < in_length/2) return 1;
+ }
+ return (s->last_lit == s->lit_bufsize-1);
+ /* We avoid equality with lit_bufsize because of wraparound at 64K
+ * on 16 bit machines and because stored blocks are restricted to
+ * 64K-1 bytes.
+ */
+}
+
+/* ===========================================================================
+ * Send the block data compressed using the given Huffman trees
+ */
+local void compress_block(s, ltree, dtree)
+ deflate_state *s;
+ ct_data *ltree; /* literal tree */
+ ct_data *dtree; /* distance tree */
+{
+ unsigned dist; /* distance of matched string */
+ int lc; /* match length or unmatched char (if dist == 0) */
+ unsigned lx = 0; /* running index in l_buf */
+ unsigned code; /* the code to send */
+ int extra; /* number of extra bits to send */
+
+ if (s->last_lit != 0) do {
+ dist = s->d_buf[lx];
+ lc = s->l_buf[lx++];
+ if (dist == 0) {
+ send_code(s, lc, ltree); /* send a literal byte */
+ Tracecv(isgraph(lc), (stderr," '%c' ", lc));
+ } else {
+ /* Here, lc is the match length - MIN_MATCH */
+ code = length_code[lc];
+ send_code(s, code+LITERALS+1, ltree); /* send the length code */
+ extra = extra_lbits[code];
+ if (extra != 0) {
+ lc -= base_length[code];
+ send_bits(s, lc, extra); /* send the extra length bits */
+ }
+ dist--; /* dist is now the match distance - 1 */
+ code = d_code(dist);
+ Assert (code < D_CODES, "bad d_code");
+
+ send_code(s, code, dtree); /* send the distance code */
+ extra = extra_dbits[code];
+ if (extra != 0) {
+ dist -= base_dist[code];
+ send_bits(s, dist, extra); /* send the extra distance bits */
+ }
+ } /* literal or match pair ? */
+
+ /* Check that the overlay between pending_buf and d_buf+l_buf is ok: */
+ Assert(s->pending < s->lit_bufsize + 2*lx, "pendingBuf overflow");
+
+ } while (lx < s->last_lit);
+
+ send_code(s, END_BLOCK, ltree);
+ s->last_eob_len = ltree[END_BLOCK].Len;
+}
+
+/* ===========================================================================
+ * Set the data type to ASCII or BINARY, using a crude approximation:
+ * binary if more than 20% of the bytes are <= 6 or >= 128, ascii otherwise.
+ * IN assertion: the fields freq of dyn_ltree are set and the total of all
+ * frequencies does not exceed 64K (to fit in an int on 16 bit machines).
+ */
+local void set_data_type(s)
+ deflate_state *s;
+{
+ int n = 0;
+ unsigned ascii_freq = 0;
+ unsigned bin_freq = 0;
+ while (n < 7) bin_freq += s->dyn_ltree[n++].Freq;
+ while (n < 128) ascii_freq += s->dyn_ltree[n++].Freq;
+ while (n < LITERALS) bin_freq += s->dyn_ltree[n++].Freq;
+ s->data_type = (Byte)(bin_freq > (ascii_freq >> 2) ? Z_BINARY : Z_ASCII);
+}
+
+/* ===========================================================================
+ * Reverse the first len bits of a code, using straightforward code (a faster
+ * method would use a table)
+ * IN assertion: 1 <= len <= 15
+ */
+local unsigned bi_reverse(code, len)
+ unsigned code; /* the value to invert */
+ int len; /* its bit length */
+{
+ register unsigned res = 0;
+ do {
+ res |= code & 1;
+ code >>= 1, res <<= 1;
+ } while (--len > 0);
+ return res >> 1;
+}
+
+/* ===========================================================================
+ * Flush the bit buffer, keeping at most 7 bits in it.
+ */
+local void bi_flush(s)
+ deflate_state *s;
+{
+ if (s->bi_valid == 16) {
+ put_short(s, s->bi_buf);
+ s->bi_buf = 0;
+ s->bi_valid = 0;
+ } else if (s->bi_valid >= 8) {
+ put_byte(s, (Byte)s->bi_buf);
+ s->bi_buf >>= 8;
+ s->bi_valid -= 8;
+ }
+}
+
+/* ===========================================================================
+ * Flush the bit buffer and align the output on a byte boundary
+ */
+local void bi_windup(s)
+ deflate_state *s;
+{
+ if (s->bi_valid > 8) {
+ put_short(s, s->bi_buf);
+ } else if (s->bi_valid > 0) {
+ put_byte(s, (Byte)s->bi_buf);
+ }
+ s->bi_buf = 0;
+ s->bi_valid = 0;
+#ifdef DEBUG_ZLIB
+ s->bits_sent = (s->bits_sent+7) & ~7;
+#endif
+}
+
+/* ===========================================================================
+ * Copy a stored block, storing first the length and its
+ * one's complement if requested.
+ */
+local void copy_block(s, buf, len, header)
+ deflate_state *s;
+ charf *buf; /* the input data */
+ unsigned len; /* its length */
+ int header; /* true if block header must be written */
+{
+ bi_windup(s); /* align on byte boundary */
+ s->last_eob_len = 8; /* enough lookahead for inflate */
+
+ if (header) {
+ put_short(s, (ush)len);
+ put_short(s, (ush)~len);
+#ifdef DEBUG_ZLIB
+ s->bits_sent += 2*16;
+#endif
+ }
+#ifdef DEBUG_ZLIB
+ s->bits_sent += (ulg)len<<3;
+#endif
+ /* bundle up the put_byte(s, *buf++) calls */
+ zmemcpy(&s->pending_buf[s->pending], buf, len);
+ s->pending += len;
+}
+/* --- trees.c */
+
+/* +++ inflate.c */
+/* inflate.c -- zlib interface to inflate modules
+ * Copyright (C) 1995-1996 Mark Adler
+ * For conditions of distribution and use, see copyright notice in zlib.h
+ */
+
+/* #include <rtems/freebsd/local/zutil.h> */
+
+/* +++ infblock.h */
+/* infblock.h -- header to use infblock.c
+ * Copyright (C) 1995-1996 Mark Adler
+ * For conditions of distribution and use, see copyright notice in zlib.h
+ */
+
+/* WARNING: this file should *not* be used by applications. It is
+ part of the implementation of the compression library and is
+ subject to change. Applications should only use zlib.h.
+ */
+
+struct inflate_blocks_state;
+typedef struct inflate_blocks_state FAR inflate_blocks_statef;
+
+extern inflate_blocks_statef * inflate_blocks_new OF((
+ z_streamp z,
+ check_func c, /* check function */
+ uInt w)); /* window size */
+
+extern int inflate_blocks OF((
+ inflate_blocks_statef *,
+ z_streamp ,
+ int)); /* initial return code */
+
+extern void inflate_blocks_reset OF((
+ inflate_blocks_statef *,
+ z_streamp ,
+ uLongf *)); /* check value on output */
+
+extern int inflate_blocks_free OF((
+ inflate_blocks_statef *,
+ z_streamp ,
+ uLongf *)); /* check value on output */
+
+extern void inflate_set_dictionary OF((
+ inflate_blocks_statef *s,
+ const Bytef *d, /* dictionary */
+ uInt n)); /* dictionary length */
+
+extern int inflate_addhistory OF((
+ inflate_blocks_statef *,
+ z_streamp));
+
+extern int inflate_packet_flush OF((
+ inflate_blocks_statef *));
+/* --- infblock.h */
+
+#ifndef NO_DUMMY_DECL
+struct inflate_blocks_state {int dummy;}; /* for buggy compilers */
+#endif
+
+/* inflate private state */
+struct internal_state {
+
+ /* mode */
+ enum {
+ METHOD, /* waiting for method byte */
+ FLAG, /* waiting for flag byte */
+ DICT4, /* four dictionary check bytes to go */
+ DICT3, /* three dictionary check bytes to go */
+ DICT2, /* two dictionary check bytes to go */
+ DICT1, /* one dictionary check byte to go */
+ DICT0, /* waiting for inflateSetDictionary */
+ BLOCKS, /* decompressing blocks */
+ CHECK4, /* four check bytes to go */
+ CHECK3, /* three check bytes to go */
+ CHECK2, /* two check bytes to go */
+ CHECK1, /* one check byte to go */
+ DONE, /* finished check, done */
+ BAD} /* got an error--stay here */
+ mode; /* current inflate mode */
+
+ /* mode dependent information */
+ union {
+ uInt method; /* if FLAGS, method byte */
+ struct {
+ uLong was; /* computed check value */
+ uLong need; /* stream check value */
+ } check; /* if CHECK, check values to compare */
+ uInt marker; /* if BAD, inflateSync's marker bytes count */
+ } sub; /* submode */
+
+ /* mode independent information */
+ int nowrap; /* flag for no wrapper */
+ uInt wbits; /* log2(window size) (8..15, defaults to 15) */
+ inflate_blocks_statef
+ *blocks; /* current inflate_blocks state */
+
+};
+
+
+int inflateReset(z)
+z_streamp z;
+{
+ uLong c;
+
+ if (z == Z_NULL || z->state == Z_NULL)
+ return Z_STREAM_ERROR;
+ z->total_in = z->total_out = 0;
+ z->msg = Z_NULL;
+ z->state->mode = z->state->nowrap ? BLOCKS : METHOD;
+ inflate_blocks_reset(z->state->blocks, z, &c);
+ Trace((stderr, "inflate: reset\n"));
+ return Z_OK;
+}
+
+
+int inflateEnd(z)
+z_streamp z;
+{
+ uLong c;
+
+ if (z == Z_NULL || z->state == Z_NULL || z->zfree == Z_NULL)
+ return Z_STREAM_ERROR;
+ if (z->state->blocks != Z_NULL)
+ inflate_blocks_free(z->state->blocks, z, &c);
+ ZFREE(z, z->state);
+ z->state = Z_NULL;
+ Trace((stderr, "inflate: end\n"));
+ return Z_OK;
+}
+
+
+int inflateInit2_(z, w, version, stream_size)
+z_streamp z;
+int w;
+const char *version;
+int stream_size;
+{
+ if (version == Z_NULL || version[0] != ZLIB_VERSION[0] ||
+ stream_size != sizeof(z_stream))
+ return Z_VERSION_ERROR;
+
+ /* initialize state */
+ if (z == Z_NULL)
+ return Z_STREAM_ERROR;
+ z->msg = Z_NULL;
+#ifndef NO_ZCFUNCS
+ if (z->zalloc == Z_NULL)
+ {
+ z->zalloc = zcalloc;
+ z->opaque = (voidpf)0;
+ }
+ if (z->zfree == Z_NULL) z->zfree = zcfree;
+#endif
+ if ((z->state = (struct internal_state FAR *)
+ ZALLOC(z,1,sizeof(struct internal_state))) == Z_NULL)
+ return Z_MEM_ERROR;
+ z->state->blocks = Z_NULL;
+
+ /* handle undocumented nowrap option (no zlib header or check) */
+ z->state->nowrap = 0;
+ if (w < 0)
+ {
+ w = - w;
+ z->state->nowrap = 1;
+ }
+
+ /* set window size */
+ if (w < 8 || w > 15)
+ {
+ inflateEnd(z);
+ return Z_STREAM_ERROR;
+ }
+ z->state->wbits = (uInt)w;
+
+ /* create inflate_blocks state */
+ if ((z->state->blocks =
+ inflate_blocks_new(z, z->state->nowrap ? Z_NULL : adler32, (uInt)1 << w))
+ == Z_NULL)
+ {
+ inflateEnd(z);
+ return Z_MEM_ERROR;
+ }
+ Trace((stderr, "inflate: allocated\n"));
+
+ /* reset state */
+ inflateReset(z);
+ return Z_OK;
+}
+
+
+int inflateInit_(z, version, stream_size)
+z_streamp z;
+const char *version;
+int stream_size;
+{
+ return inflateInit2_(z, DEF_WBITS, version, stream_size);
+}
+
+
+#define NEEDBYTE {if(z->avail_in==0)goto empty;r=Z_OK;}
+#define NEXTBYTE (z->avail_in--,z->total_in++,*z->next_in++)
+
+int inflate(z, f)
+z_streamp z;
+int f;
+{
+ int r;
+ uInt b;
+
+ if (z == Z_NULL || z->state == Z_NULL || z->next_in == Z_NULL || f < 0)
+ return Z_STREAM_ERROR;
+ r = Z_BUF_ERROR;
+ while (1) switch (z->state->mode)
+ {
+ case METHOD:
+ NEEDBYTE
+ if (((z->state->sub.method = NEXTBYTE) & 0xf) != Z_DEFLATED)
+ {
+ z->state->mode = BAD;
+ z->msg = (char*)"unknown compression method";
+ z->state->sub.marker = 5; /* can't try inflateSync */
+ break;
+ }
+ if ((z->state->sub.method >> 4) + 8 > z->state->wbits)
+ {
+ z->state->mode = BAD;
+ z->msg = (char*)"invalid window size";
+ z->state->sub.marker = 5; /* can't try inflateSync */
+ break;
+ }
+ z->state->mode = FLAG;
+ case FLAG:
+ NEEDBYTE
+ b = NEXTBYTE;
+ if (((z->state->sub.method << 8) + b) % 31)
+ {
+ z->state->mode = BAD;
+ z->msg = (char*)"incorrect header check";
+ z->state->sub.marker = 5; /* can't try inflateSync */
+ break;
+ }
+ Trace((stderr, "inflate: zlib header ok\n"));
+ if (!(b & PRESET_DICT))
+ {
+ z->state->mode = BLOCKS;
+ break;
+ }
+ z->state->mode = DICT4;
+ case DICT4:
+ NEEDBYTE
+ z->state->sub.check.need = (uLong)NEXTBYTE << 24;
+ z->state->mode = DICT3;
+ case DICT3:
+ NEEDBYTE
+ z->state->sub.check.need += (uLong)NEXTBYTE << 16;
+ z->state->mode = DICT2;
+ case DICT2:
+ NEEDBYTE
+ z->state->sub.check.need += (uLong)NEXTBYTE << 8;
+ z->state->mode = DICT1;
+ case DICT1:
+ NEEDBYTE
+ z->state->sub.check.need += (uLong)NEXTBYTE;
+ z->adler = z->state->sub.check.need;
+ z->state->mode = DICT0;
+ return Z_NEED_DICT;
+ case DICT0:
+ z->state->mode = BAD;
+ z->msg = (char*)"need dictionary";
+ z->state->sub.marker = 0; /* can try inflateSync */
+ return Z_STREAM_ERROR;
+ case BLOCKS:
+ r = inflate_blocks(z->state->blocks, z, r);
+ if (f == Z_PACKET_FLUSH && z->avail_in == 0 && z->avail_out != 0)
+ r = inflate_packet_flush(z->state->blocks);
+ if (r == Z_DATA_ERROR)
+ {
+ z->state->mode = BAD;
+ z->state->sub.marker = 0; /* can try inflateSync */
+ break;
+ }
+ if (r != Z_STREAM_END)
+ return r;
+ r = Z_OK;
+ inflate_blocks_reset(z->state->blocks, z, &z->state->sub.check.was);
+ if (z->state->nowrap)
+ {
+ z->state->mode = DONE;
+ break;
+ }
+ z->state->mode = CHECK4;
+ case CHECK4:
+ NEEDBYTE
+ z->state->sub.check.need = (uLong)NEXTBYTE << 24;
+ z->state->mode = CHECK3;
+ case CHECK3:
+ NEEDBYTE
+ z->state->sub.check.need += (uLong)NEXTBYTE << 16;
+ z->state->mode = CHECK2;
+ case CHECK2:
+ NEEDBYTE
+ z->state->sub.check.need += (uLong)NEXTBYTE << 8;
+ z->state->mode = CHECK1;
+ case CHECK1:
+ NEEDBYTE
+ z->state->sub.check.need += (uLong)NEXTBYTE;
+
+ if (z->state->sub.check.was != z->state->sub.check.need)
+ {
+ z->state->mode = BAD;
+ z->msg = (char*)"incorrect data check";
+ z->state->sub.marker = 5; /* can't try inflateSync */
+ break;
+ }
+ Trace((stderr, "inflate: zlib check ok\n"));
+ z->state->mode = DONE;
+ case DONE:
+ return Z_STREAM_END;
+ case BAD:
+ return Z_DATA_ERROR;
+ default:
+ return Z_STREAM_ERROR;
+ }
+
+ empty:
+ if (f != Z_PACKET_FLUSH)
+ return r;
+ z->state->mode = BAD;
+ z->msg = (char *)"need more for packet flush";
+ z->state->sub.marker = 0; /* can try inflateSync */
+ return Z_DATA_ERROR;
+}
+
+
+int inflateSetDictionary(z, dictionary, dictLength)
+z_streamp z;
+const Bytef *dictionary;
+uInt dictLength;
+{
+ uInt length = dictLength;
+
+ if (z == Z_NULL || z->state == Z_NULL || z->state->mode != DICT0)
+ return Z_STREAM_ERROR;
+
+ if (adler32(1L, dictionary, dictLength) != z->adler) return Z_DATA_ERROR;
+ z->adler = 1L;
+
+ if (length >= ((uInt)1<<z->state->wbits))
+ {
+ length = (1<<z->state->wbits)-1;
+ dictionary += dictLength - length;
+ }
+ inflate_set_dictionary(z->state->blocks, dictionary, length);
+ z->state->mode = BLOCKS;
+ return Z_OK;
+}
+
+/*
+ * This subroutine adds the data at next_in/avail_in to the output history
+ * without performing any output. The output buffer must be "caught up";
+ * i.e. no pending output (hence s->read equals s->write), and the state must
+ * be BLOCKS (i.e. we should be willing to see the start of a series of
+ * BLOCKS). On exit, the output will also be caught up, and the checksum
+ * will have been updated if need be.
+ */
+
+int inflateIncomp(z)
+z_stream *z;
+{
+ if (z->state->mode != BLOCKS)
+ return Z_DATA_ERROR;
+ return inflate_addhistory(z->state->blocks, z);
+}
+
+
+int inflateSync(z)
+z_streamp z;
+{
+ uInt n; /* number of bytes to look at */
+ Bytef *p; /* pointer to bytes */
+ uInt m; /* number of marker bytes found in a row */
+ uLong r, w; /* temporaries to save total_in and total_out */
+
+ /* set up */
+ if (z == Z_NULL || z->state == Z_NULL)
+ return Z_STREAM_ERROR;
+ if (z->state->mode != BAD)
+ {
+ z->state->mode = BAD;
+ z->state->sub.marker = 0;
+ }
+ if ((n = z->avail_in) == 0)
+ return Z_BUF_ERROR;
+ p = z->next_in;
+ m = z->state->sub.marker;
+
+ /* search */
+ while (n && m < 4)
+ {
+ if (*p == (Byte)(m < 2 ? 0 : 0xff))
+ m++;
+ else if (*p)
+ m = 0;
+ else
+ m = 4 - m;
+ p++, n--;
+ }
+
+ /* restore */
+ z->total_in += p - z->next_in;
+ z->next_in = p;
+ z->avail_in = n;
+ z->state->sub.marker = m;
+
+ /* return no joy or set up to restart on a new block */
+ if (m != 4)
+ return Z_DATA_ERROR;
+ r = z->total_in; w = z->total_out;
+ inflateReset(z);
+ z->total_in = r; z->total_out = w;
+ z->state->mode = BLOCKS;
+ return Z_OK;
+}
+
+#undef NEEDBYTE
+#undef NEXTBYTE
+/* --- inflate.c */
+
+/* +++ infblock.c */
+/* infblock.c -- interpret and process block types to last block
+ * Copyright (C) 1995-1996 Mark Adler
+ * For conditions of distribution and use, see copyright notice in zlib.h
+ */
+
+/* #include <rtems/freebsd/local/zutil.h> */
+/* #include <rtems/freebsd/local/infblock.h> */
+
+/* +++ inftrees.h */
+/* inftrees.h -- header to use inftrees.c
+ * Copyright (C) 1995-1996 Mark Adler
+ * For conditions of distribution and use, see copyright notice in zlib.h
+ */
+
+/* WARNING: this file should *not* be used by applications. It is
+ part of the implementation of the compression library and is
+ subject to change. Applications should only use zlib.h.
+ */
+
+/* Huffman code lookup table entry--this entry is four bytes for machines
+ that have 16-bit pointers (e.g. PC's in the small or medium model). */
+
+typedef struct inflate_huft_s FAR inflate_huft;
+
+struct inflate_huft_s {
+ union {
+ struct {
+ Byte Exop; /* number of extra bits or operation */
+ Byte Bits; /* number of bits in this code or subcode */
+ } what;
+ Bytef *pad; /* pad structure to a power of 2 (4 bytes for */
+ } word; /* 16-bit, 8 bytes for 32-bit machines) */
+ union {
+ uInt Base; /* literal, length base, or distance base */
+ inflate_huft *Next; /* pointer to next level of table */
+ } more;
+};
+
+#ifdef DEBUG_ZLIB
+ extern uInt inflate_hufts;
+#endif
+
+extern int inflate_trees_bits OF((
+ uIntf *, /* 19 code lengths */
+ uIntf *, /* bits tree desired/actual depth */
+ inflate_huft * FAR *, /* bits tree result */
+ z_streamp )); /* for zalloc, zfree functions */
+
+extern int inflate_trees_dynamic OF((
+ uInt, /* number of literal/length codes */
+ uInt, /* number of distance codes */
+ uIntf *, /* that many (total) code lengths */
+ uIntf *, /* literal desired/actual bit depth */
+ uIntf *, /* distance desired/actual bit depth */
+ inflate_huft * FAR *, /* literal/length tree result */
+ inflate_huft * FAR *, /* distance tree result */
+ z_streamp )); /* for zalloc, zfree functions */
+
+extern int inflate_trees_fixed OF((
+ uIntf *, /* literal desired/actual bit depth */
+ uIntf *, /* distance desired/actual bit depth */
+ inflate_huft * FAR *, /* literal/length tree result */
+ inflate_huft * FAR *)); /* distance tree result */
+
+extern int inflate_trees_free OF((
+ inflate_huft *, /* tables to free */
+ z_streamp )); /* for zfree function */
+
+/* --- inftrees.h */
+
+/* +++ infcodes.h */
+/* infcodes.h -- header to use infcodes.c
+ * Copyright (C) 1995-1996 Mark Adler
+ * For conditions of distribution and use, see copyright notice in zlib.h
+ */
+
+/* WARNING: this file should *not* be used by applications. It is
+ part of the implementation of the compression library and is
+ subject to change. Applications should only use zlib.h.
+ */
+
+struct inflate_codes_state;
+typedef struct inflate_codes_state FAR inflate_codes_statef;
+
+extern inflate_codes_statef *inflate_codes_new OF((
+ uInt, uInt,
+ inflate_huft *, inflate_huft *,
+ z_streamp ));
+
+extern int inflate_codes OF((
+ inflate_blocks_statef *,
+ z_streamp ,
+ int));
+
+extern void inflate_codes_free OF((
+ inflate_codes_statef *,
+ z_streamp ));
+
+/* --- infcodes.h */
+
+/* +++ infutil.h */
+/* infutil.h -- types and macros common to blocks and codes
+ * Copyright (C) 1995-1996 Mark Adler
+ * For conditions of distribution and use, see copyright notice in zlib.h
+ */
+
+/* WARNING: this file should *not* be used by applications. It is
+ part of the implementation of the compression library and is
+ subject to change. Applications should only use zlib.h.
+ */
+
+#ifndef _INFUTIL_H
+#define _INFUTIL_H
+
+typedef enum {
+ TYPE, /* get type bits (3, including end bit) */
+ LENS, /* get lengths for stored */
+ STORED, /* processing stored block */
+ TABLE, /* get table lengths */
+ BTREE, /* get bit lengths tree for a dynamic block */
+ DTREE, /* get length, distance trees for a dynamic block */
+ CODES, /* processing fixed or dynamic block */
+ DRY, /* output remaining window bytes */
+ DONEB, /* finished last block, done */
+ BADB} /* got a data error--stuck here */
+inflate_block_mode;
+
+/* inflate blocks semi-private state */
+struct inflate_blocks_state {
+
+ /* mode */
+ inflate_block_mode mode; /* current inflate_block mode */
+
+ /* mode dependent information */
+ union {
+ uInt left; /* if STORED, bytes left to copy */
+ struct {
+ uInt table; /* table lengths (14 bits) */
+ uInt index; /* index into blens (or border) */
+ uIntf *blens; /* bit lengths of codes */
+ uInt bb; /* bit length tree depth */
+ inflate_huft *tb; /* bit length decoding tree */
+ } trees; /* if DTREE, decoding info for trees */
+ struct {
+ inflate_huft *tl;
+ inflate_huft *td; /* trees to free */
+ inflate_codes_statef
+ *codes;
+ } decode; /* if CODES, current state */
+ } sub; /* submode */
+ uInt last; /* true if this block is the last block */
+
+ /* mode independent information */
+ uInt bitk; /* bits in bit buffer */
+ uLong bitb; /* bit buffer */
+ Bytef *window; /* sliding window */
+ Bytef *end; /* one byte after sliding window */
+ Bytef *read; /* window read pointer */
+ Bytef *write; /* window write pointer */
+ check_func checkfn; /* check function */
+ uLong check; /* check on output */
+
+};
+
+
+/* defines for inflate input/output */
+/* update pointers and return */
+#define UPDBITS {s->bitb=b;s->bitk=k;}
+#define UPDIN {z->avail_in=n;z->total_in+=p-z->next_in;z->next_in=p;}
+#define UPDOUT {s->write=q;}
+#define UPDATE {UPDBITS UPDIN UPDOUT}
+#define LEAVE {UPDATE return inflate_flush(s,z,r);}
+/* get bytes and bits */
+#define LOADIN {p=z->next_in;n=z->avail_in;b=s->bitb;k=s->bitk;}
+#define NEEDBYTE {if(n)r=Z_OK;else LEAVE}
+#define NEXTBYTE (n--,*p++)
+#define NEEDBITS(j) {while(k<(j)){NEEDBYTE;b|=((uLong)NEXTBYTE)<<k;k+=8;}}
+#define DUMPBITS(j) {b>>=(j);k-=(j);}
+/* output bytes */
+#define WAVAIL (uInt)(q<s->read?s->read-q-1:s->end-q)
+#define LOADOUT {q=s->write;m=(uInt)WAVAIL;}
+#define WWRAP {if(q==s->end&&s->read!=s->window){q=s->window;m=(uInt)WAVAIL;}}
+#define FLUSH {UPDOUT r=inflate_flush(s,z,r); LOADOUT}
+#define NEEDOUT {if(m==0){WWRAP if(m==0){FLUSH WWRAP if(m==0) LEAVE}}r=Z_OK;}
+#define OUTBYTE(a) {*q++=(Byte)(a);m--;}
+/* load local pointers */
+#define LOAD {LOADIN LOADOUT}
+
+/* masks for lower bits (size given to avoid silly warnings with Visual C++) */
+extern uInt inflate_mask[17];
+
+/* copy as much as possible from the sliding window to the output area */
+extern int inflate_flush OF((
+ inflate_blocks_statef *,
+ z_streamp ,
+ int));
+
+#ifndef NO_DUMMY_DECL
+struct internal_state {int dummy;}; /* for buggy compilers */
+#endif
+
+#endif
+/* --- infutil.h */
+
+#ifndef NO_DUMMY_DECL
+struct inflate_codes_state {int dummy;}; /* for buggy compilers */
+#endif
+
+/* Table for deflate from PKZIP's appnote.txt. */
+local const uInt border[] = { /* Order of the bit length code lengths */
+ 16, 17, 18, 0, 8, 7, 9, 6, 10, 5, 11, 4, 12, 3, 13, 2, 14, 1, 15};
+
+/*
+ Notes beyond the 1.93a appnote.txt:
+
+ 1. Distance pointers never point before the beginning of the output
+ stream.
+ 2. Distance pointers can point back across blocks, up to 32k away.
+ 3. There is an implied maximum of 7 bits for the bit length table and
+ 15 bits for the actual data.
+ 4. If only one code exists, then it is encoded using one bit. (Zero
+ would be more efficient, but perhaps a little confusing.) If two
+ codes exist, they are coded using one bit each (0 and 1).
+ 5. There is no way of sending zero distance codes--a dummy must be
+ sent if there are none. (History: a pre 2.0 version of PKZIP would
+ store blocks with no distance codes, but this was discovered to be
+ too harsh a criterion.) Valid only for 1.93a. 2.04c does allow
+ zero distance codes, which is sent as one code of zero bits in
+ length.
+ 6. There are up to 286 literal/length codes. Code 256 represents the
+ end-of-block. Note however that the static length tree defines
+ 288 codes just to fill out the Huffman codes. Codes 286 and 287
+ cannot be used though, since there is no length base or extra bits
+ defined for them. Similarily, there are up to 30 distance codes.
+ However, static trees define 32 codes (all 5 bits) to fill out the
+ Huffman codes, but the last two had better not show up in the data.
+ 7. Unzip can check dynamic Huffman blocks for complete code sets.
+ The exception is that a single code would not be complete (see #4).
+ 8. The five bits following the block type is really the number of
+ literal codes sent minus 257.
+ 9. Length codes 8,16,16 are interpreted as 13 length codes of 8 bits
+ (1+6+6). Therefore, to output three times the length, you output
+ three codes (1+1+1), whereas to output four times the same length,
+ you only need two codes (1+3). Hmm.
+ 10. In the tree reconstruction algorithm, Code = Code + Increment
+ only if BitLength(i) is not zero. (Pretty obvious.)
+ 11. Correction: 4 Bits: # of Bit Length codes - 4 (4 - 19)
+ 12. Note: length code 284 can represent 227-258, but length code 285
+ really is 258. The last length deserves its own, short code
+ since it gets used a lot in very redundant files. The length
+ 258 is special since 258 - 3 (the min match length) is 255.
+ 13. The literal/length and distance code bit lengths are read as a
+ single stream of lengths. It is possible (and advantageous) for
+ a repeat code (16, 17, or 18) to go across the boundary between
+ the two sets of lengths.
+ */
+
+
+void inflate_blocks_reset(s, z, c)
+inflate_blocks_statef *s;
+z_streamp z;
+uLongf *c;
+{
+ if (s->checkfn != Z_NULL)
+ *c = s->check;
+ if (s->mode == BTREE || s->mode == DTREE)
+ ZFREE(z, s->sub.trees.blens);
+ if (s->mode == CODES)
+ {
+ inflate_codes_free(s->sub.decode.codes, z);
+ inflate_trees_free(s->sub.decode.td, z);
+ inflate_trees_free(s->sub.decode.tl, z);
+ }
+ s->mode = TYPE;
+ s->bitk = 0;
+ s->bitb = 0;
+ s->read = s->write = s->window;
+ if (s->checkfn != Z_NULL)
+ z->adler = s->check = (*s->checkfn)(0L, Z_NULL, 0);
+ Trace((stderr, "inflate: blocks reset\n"));
+}
+
+
+inflate_blocks_statef *inflate_blocks_new(z, c, w)
+z_streamp z;
+check_func c;
+uInt w;
+{
+ inflate_blocks_statef *s;
+
+ if ((s = (inflate_blocks_statef *)ZALLOC
+ (z,1,sizeof(struct inflate_blocks_state))) == Z_NULL)
+ return s;
+ if ((s->window = (Bytef *)ZALLOC(z, 1, w)) == Z_NULL)
+ {
+ ZFREE(z, s);
+ return Z_NULL;
+ }
+ s->end = s->window + w;
+ s->checkfn = c;
+ s->mode = TYPE;
+ Trace((stderr, "inflate: blocks allocated\n"));
+ inflate_blocks_reset(s, z, &s->check);
+ return s;
+}
+
+
+#ifdef DEBUG_ZLIB
+ extern uInt inflate_hufts;
+#endif
+int inflate_blocks(s, z, r)
+inflate_blocks_statef *s;
+z_streamp z;
+int r;
+{
+ uInt t; /* temporary storage */
+ uLong b; /* bit buffer */
+ uInt k; /* bits in bit buffer */
+ Bytef *p; /* input data pointer */
+ uInt n; /* bytes available there */
+ Bytef *q; /* output window write pointer */
+ uInt m; /* bytes to end of window or read pointer */
+
+ /* copy input/output information to locals (UPDATE macro restores) */
+ LOAD
+
+ /* process input based on current state */
+ while (1) switch (s->mode)
+ {
+ case TYPE:
+ NEEDBITS(3)
+ t = (uInt)b & 7;
+ s->last = t & 1;
+ switch (t >> 1)
+ {
+ case 0: /* stored */
+ Trace((stderr, "inflate: stored block%s\n",
+ s->last ? " (last)" : ""));
+ DUMPBITS(3)
+ t = k & 7; /* go to byte boundary */
+ DUMPBITS(t)
+ s->mode = LENS; /* get length of stored block */
+ break;
+ case 1: /* fixed */
+ Trace((stderr, "inflate: fixed codes block%s\n",
+ s->last ? " (last)" : ""));
+ {
+ uInt bl, bd;
+ inflate_huft *tl, *td;
+
+ inflate_trees_fixed(&bl, &bd, &tl, &td);
+ s->sub.decode.codes = inflate_codes_new(bl, bd, tl, td, z);
+ if (s->sub.decode.codes == Z_NULL)
+ {
+ r = Z_MEM_ERROR;
+ LEAVE
+ }
+ s->sub.decode.tl = Z_NULL; /* don't try to free these */
+ s->sub.decode.td = Z_NULL;
+ }
+ DUMPBITS(3)
+ s->mode = CODES;
+ break;
+ case 2: /* dynamic */
+ Trace((stderr, "inflate: dynamic codes block%s\n",
+ s->last ? " (last)" : ""));
+ DUMPBITS(3)
+ s->mode = TABLE;
+ break;
+ case 3: /* illegal */
+ DUMPBITS(3)
+ s->mode = BADB;
+ z->msg = (char*)"invalid block type";
+ r = Z_DATA_ERROR;
+ LEAVE
+ }
+ break;
+ case LENS:
+ NEEDBITS(32)
+ if ((((~b) >> 16) & 0xffff) != (b & 0xffff))
+ {
+ s->mode = BADB;
+ z->msg = (char*)"invalid stored block lengths";
+ r = Z_DATA_ERROR;
+ LEAVE
+ }
+ s->sub.left = (uInt)b & 0xffff;
+ b = k = 0; /* dump bits */
+ Tracev((stderr, "inflate: stored length %u\n", s->sub.left));
+ s->mode = s->sub.left ? STORED : (s->last ? DRY : TYPE);
+ break;
+ case STORED:
+ if (n == 0)
+ LEAVE
+ NEEDOUT
+ t = s->sub.left;
+ if (t > n) t = n;
+ if (t > m) t = m;
+ zmemcpy(q, p, t);
+ p += t; n -= t;
+ q += t; m -= t;
+ if ((s->sub.left -= t) != 0)
+ break;
+ Tracev((stderr, "inflate: stored end, %lu total out\n",
+ z->total_out + (q >= s->read ? q - s->read :
+ (s->end - s->read) + (q - s->window))));
+ s->mode = s->last ? DRY : TYPE;
+ break;
+ case TABLE:
+ NEEDBITS(14)
+ s->sub.trees.table = t = (uInt)b & 0x3fff;
+#ifndef PKZIP_BUG_WORKAROUND
+ if ((t & 0x1f) > 29 || ((t >> 5) & 0x1f) > 29)
+ {
+ s->mode = BADB;
+ z->msg = (char*)"too many length or distance symbols";
+ r = Z_DATA_ERROR;
+ LEAVE
+ }
+#endif
+ t = 258 + (t & 0x1f) + ((t >> 5) & 0x1f);
+ if (t < 19)
+ t = 19;
+ if ((s->sub.trees.blens = (uIntf*)ZALLOC(z, t, sizeof(uInt))) == Z_NULL)
+ {
+ r = Z_MEM_ERROR;
+ LEAVE
+ }
+ DUMPBITS(14)
+ s->sub.trees.index = 0;
+ Tracev((stderr, "inflate: table sizes ok\n"));
+ s->mode = BTREE;
+ case BTREE:
+ while (s->sub.trees.index < 4 + (s->sub.trees.table >> 10))
+ {
+ NEEDBITS(3)
+ s->sub.trees.blens[border[s->sub.trees.index++]] = (uInt)b & 7;
+ DUMPBITS(3)
+ }
+ while (s->sub.trees.index < 19)
+ s->sub.trees.blens[border[s->sub.trees.index++]] = 0;
+ s->sub.trees.bb = 7;
+ t = inflate_trees_bits(s->sub.trees.blens, &s->sub.trees.bb,
+ &s->sub.trees.tb, z);
+ if (t != Z_OK)
+ {
+ r = t;
+ if (r == Z_DATA_ERROR) {
+ ZFREE(z, s->sub.trees.blens);
+ s->mode = BADB;
+ }
+ LEAVE
+ }
+ s->sub.trees.index = 0;
+ Tracev((stderr, "inflate: bits tree ok\n"));
+ s->mode = DTREE;
+ case DTREE:
+ while (t = s->sub.trees.table,
+ s->sub.trees.index < 258 + (t & 0x1f) + ((t >> 5) & 0x1f))
+ {
+ inflate_huft *h;
+ uInt i, j, c;
+
+ t = s->sub.trees.bb;
+ NEEDBITS(t)
+ h = s->sub.trees.tb + ((uInt)b & inflate_mask[t]);
+ t = h->word.what.Bits;
+ c = h->more.Base;
+ if (c < 16)
+ {
+ DUMPBITS(t)
+ s->sub.trees.blens[s->sub.trees.index++] = c;
+ }
+ else /* c == 16..18 */
+ {
+ i = c == 18 ? 7 : c - 14;
+ j = c == 18 ? 11 : 3;
+ NEEDBITS(t + i)
+ DUMPBITS(t)
+ j += (uInt)b & inflate_mask[i];
+ DUMPBITS(i)
+ i = s->sub.trees.index;
+ t = s->sub.trees.table;
+ if (i + j > 258 + (t & 0x1f) + ((t >> 5) & 0x1f) ||
+ (c == 16 && i < 1))
+ {
+ inflate_trees_free(s->sub.trees.tb, z);
+ ZFREE(z, s->sub.trees.blens);
+ s->mode = BADB;
+ z->msg = (char*)"invalid bit length repeat";
+ r = Z_DATA_ERROR;
+ LEAVE
+ }
+ c = c == 16 ? s->sub.trees.blens[i - 1] : 0;
+ do {
+ s->sub.trees.blens[i++] = c;
+ } while (--j);
+ s->sub.trees.index = i;
+ }
+ }
+ inflate_trees_free(s->sub.trees.tb, z);
+ s->sub.trees.tb = Z_NULL;
+ {
+ uInt bl, bd;
+ inflate_huft *tl, *td;
+ inflate_codes_statef *c;
+
+ bl = 9; /* must be <= 9 for lookahead assumptions */
+ bd = 6; /* must be <= 9 for lookahead assumptions */
+ t = s->sub.trees.table;
+#ifdef DEBUG_ZLIB
+ inflate_hufts = 0;
+#endif
+ t = inflate_trees_dynamic(257 + (t & 0x1f), 1 + ((t >> 5) & 0x1f),
+ s->sub.trees.blens, &bl, &bd, &tl, &td, z);
+ if (t != Z_OK)
+ {
+ if (t == (uInt)Z_DATA_ERROR) {
+ ZFREE(z, s->sub.trees.blens);
+ s->mode = BADB;
+ }
+ r = t;
+ LEAVE
+ }
+ Tracev((stderr, "inflate: trees ok, %d * %d bytes used\n",
+ inflate_hufts, sizeof(inflate_huft)));
+ if ((c = inflate_codes_new(bl, bd, tl, td, z)) == Z_NULL)
+ {
+ inflate_trees_free(td, z);
+ inflate_trees_free(tl, z);
+ r = Z_MEM_ERROR;
+ LEAVE
+ }
+ /*
+ * this ZFREE must occur *BEFORE* we mess with sub.decode, because
+ * sub.trees is union'd with sub.decode.
+ */
+ ZFREE(z, s->sub.trees.blens);
+ s->sub.decode.codes = c;
+ s->sub.decode.tl = tl;
+ s->sub.decode.td = td;
+ }
+ s->mode = CODES;
+ case CODES:
+ UPDATE
+ if ((r = inflate_codes(s, z, r)) != Z_STREAM_END)
+ return inflate_flush(s, z, r);
+ r = Z_OK;
+ inflate_codes_free(s->sub.decode.codes, z);
+ inflate_trees_free(s->sub.decode.td, z);
+ inflate_trees_free(s->sub.decode.tl, z);
+ LOAD
+ Tracev((stderr, "inflate: codes end, %lu total out\n",
+ z->total_out + (q >= s->read ? q - s->read :
+ (s->end - s->read) + (q - s->window))));
+ if (!s->last)
+ {
+ s->mode = TYPE;
+ break;
+ }
+ if (k > 7) /* return unused byte, if any */
+ {
+ Assert(k < 16, "inflate_codes grabbed too many bytes")
+ k -= 8;
+ n++;
+ p--; /* can always return one */
+ }
+ s->mode = DRY;
+ case DRY:
+ FLUSH
+ if (s->read != s->write)
+ LEAVE
+ s->mode = DONEB;
+ case DONEB:
+ r = Z_STREAM_END;
+ LEAVE
+ case BADB:
+ r = Z_DATA_ERROR;
+ LEAVE
+ default:
+ r = Z_STREAM_ERROR;
+ LEAVE
+ }
+}
+
+
+int inflate_blocks_free(s, z, c)
+inflate_blocks_statef *s;
+z_streamp z;
+uLongf *c;
+{
+ inflate_blocks_reset(s, z, c);
+ ZFREE(z, s->window);
+ ZFREE(z, s);
+ Trace((stderr, "inflate: blocks freed\n"));
+ return Z_OK;
+}
+
+
+void inflate_set_dictionary(s, d, n)
+inflate_blocks_statef *s;
+const Bytef *d;
+uInt n;
+{
+ zmemcpy((charf *)s->window, d, n);
+ s->read = s->write = s->window + n;
+}
+
+/*
+ * This subroutine adds the data at next_in/avail_in to the output history
+ * without performing any output. The output buffer must be "caught up";
+ * i.e. no pending output (hence s->read equals s->write), and the state must
+ * be BLOCKS (i.e. we should be willing to see the start of a series of
+ * BLOCKS). On exit, the output will also be caught up, and the checksum
+ * will have been updated if need be.
+ */
+int inflate_addhistory(s, z)
+inflate_blocks_statef *s;
+z_stream *z;
+{
+ uLong b; /* bit buffer */ /* NOT USED HERE */
+ uInt k; /* bits in bit buffer */ /* NOT USED HERE */
+ uInt t; /* temporary storage */
+ Bytef *p; /* input data pointer */
+ uInt n; /* bytes available there */
+ Bytef *q; /* output window write pointer */
+ uInt m; /* bytes to end of window or read pointer */
+
+ if (s->read != s->write)
+ return Z_STREAM_ERROR;
+ if (s->mode != TYPE)
+ return Z_DATA_ERROR;
+
+ /* we're ready to rock */
+ LOAD
+ /* while there is input ready, copy to output buffer, moving
+ * pointers as needed.
+ */
+ while (n) {
+ t = n; /* how many to do */
+ /* is there room until end of buffer? */
+ if (t > m) t = m;
+ /* update check information */
+ if (s->checkfn != Z_NULL)
+ s->check = (*s->checkfn)(s->check, q, t);
+ zmemcpy(q, p, t);
+ q += t;
+ p += t;
+ n -= t;
+ z->total_out += t;
+ s->read = q; /* drag read pointer forward */
+/* WWRAP */ /* expand WWRAP macro by hand to handle s->read */
+ if (q == s->end) {
+ s->read = q = s->window;
+ m = WAVAIL;
+ }
+ }
+ UPDATE
+ return Z_OK;
+}
+
+
+/*
+ * At the end of a Deflate-compressed PPP packet, we expect to have seen
+ * a `stored' block type value but not the (zero) length bytes.
+ */
+int inflate_packet_flush(s)
+ inflate_blocks_statef *s;
+{
+ if (s->mode != LENS)
+ return Z_DATA_ERROR;
+ s->mode = TYPE;
+ return Z_OK;
+}
+/* --- infblock.c */
+
+/* +++ inftrees.c */
+/* inftrees.c -- generate Huffman trees for efficient decoding
+ * Copyright (C) 1995-1996 Mark Adler
+ * For conditions of distribution and use, see copyright notice in zlib.h
+ */
+
+/* #include <rtems/freebsd/local/zutil.h> */
+/* #include <rtems/freebsd/local/inftrees.h> */
+
+char inflate_copyright[] = " inflate 1.0.4 Copyright 1995-1996 Mark Adler ";
+/*
+ If you use the zlib library in a product, an acknowledgment is welcome
+ in the documentation of your product. If for some reason you cannot
+ include such an acknowledgment, I would appreciate that you keep this
+ copyright string in the executable of your product.
+ */
+
+#ifndef NO_DUMMY_DECL
+struct internal_state {int dummy;}; /* for buggy compilers */
+#endif
+
+/* simplify the use of the inflate_huft type with some defines */
+#define base more.Base
+#define next more.Next
+#define exop word.what.Exop
+#define bits word.what.Bits
+
+
+local int huft_build OF((
+ uIntf *, /* code lengths in bits */
+ uInt, /* number of codes */
+ uInt, /* number of "simple" codes */
+ const uIntf *, /* list of base values for non-simple codes */
+ const uIntf *, /* list of extra bits for non-simple codes */
+ inflate_huft * FAR*,/* result: starting table */
+ uIntf *, /* maximum lookup bits (returns actual) */
+ z_streamp )); /* for zalloc function */
+
+local voidpf falloc OF((
+ voidpf, /* opaque pointer (not used) */
+ uInt, /* number of items */
+ uInt)); /* size of item */
+
+/* Tables for deflate from PKZIP's appnote.txt. */
+local const uInt cplens[31] = { /* Copy lengths for literal codes 257..285 */
+ 3, 4, 5, 6, 7, 8, 9, 10, 11, 13, 15, 17, 19, 23, 27, 31,
+ 35, 43, 51, 59, 67, 83, 99, 115, 131, 163, 195, 227, 258, 0, 0};
+ /* see note #13 above about 258 */
+local const uInt cplext[31] = { /* Extra bits for literal codes 257..285 */
+ 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2,
+ 3, 3, 3, 3, 4, 4, 4, 4, 5, 5, 5, 5, 0, 112, 112}; /* 112==invalid */
+local const uInt cpdist[30] = { /* Copy offsets for distance codes 0..29 */
+ 1, 2, 3, 4, 5, 7, 9, 13, 17, 25, 33, 49, 65, 97, 129, 193,
+ 257, 385, 513, 769, 1025, 1537, 2049, 3073, 4097, 6145,
+ 8193, 12289, 16385, 24577};
+local const uInt cpdext[30] = { /* Extra bits for distance codes */
+ 0, 0, 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6,
+ 7, 7, 8, 8, 9, 9, 10, 10, 11, 11,
+ 12, 12, 13, 13};
+
+/*
+ Huffman code decoding is performed using a multi-level table lookup.
+ The fastest way to decode is to simply build a lookup table whose
+ size is determined by the longest code. However, the time it takes
+ to build this table can also be a factor if the data being decoded
+ is not very long. The most common codes are necessarily the
+ shortest codes, so those codes dominate the decoding time, and hence
+ the speed. The idea is you can have a shorter table that decodes the
+ shorter, more probable codes, and then point to subsidiary tables for
+ the longer codes. The time it costs to decode the longer codes is
+ then traded against the time it takes to make longer tables.
+
+ This results of this trade are in the variables lbits and dbits
+ below. lbits is the number of bits the first level table for literal/
+ length codes can decode in one step, and dbits is the same thing for
+ the distance codes. Subsequent tables are also less than or equal to
+ those sizes. These values may be adjusted either when all of the
+ codes are shorter than that, in which case the longest code length in
+ bits is used, or when the shortest code is *longer* than the requested
+ table size, in which case the length of the shortest code in bits is
+ used.
+
+ There are two different values for the two tables, since they code a
+ different number of possibilities each. The literal/length table
+ codes 286 possible values, or in a flat code, a little over eight
+ bits. The distance table codes 30 possible values, or a little less
+ than five bits, flat. The optimum values for speed end up being
+ about one bit more than those, so lbits is 8+1 and dbits is 5+1.
+ The optimum values may differ though from machine to machine, and
+ possibly even between compilers. Your mileage may vary.
+ */
+
+
+/* If BMAX needs to be larger than 16, then h and x[] should be uLong. */
+#define BMAX 15 /* maximum bit length of any code */
+#define N_MAX 288 /* maximum number of codes in any set */
+
+#ifdef DEBUG_ZLIB
+ uInt inflate_hufts;
+#endif
+
+local int huft_build(b, n, s, d, e, t, m, zs)
+uIntf *b; /* code lengths in bits (all assumed <= BMAX) */
+uInt n; /* number of codes (assumed <= N_MAX) */
+uInt s; /* number of simple-valued codes (0..s-1) */
+const uIntf *d; /* list of base values for non-simple codes */
+const uIntf *e; /* list of extra bits for non-simple codes */
+inflate_huft * FAR *t; /* result: starting table */
+uIntf *m; /* maximum lookup bits, returns actual */
+z_streamp zs; /* for zalloc function */
+/* Given a list of code lengths and a maximum table size, make a set of
+ tables to decode that set of codes. Return Z_OK on success, Z_BUF_ERROR
+ if the given code set is incomplete (the tables are still built in this
+ case), Z_DATA_ERROR if the input is invalid (an over-subscribed set of
+ lengths), or Z_MEM_ERROR if not enough memory. */
+{
+
+ uInt a; /* counter for codes of length k */
+ uInt c[BMAX+1]; /* bit length count table */
+ uInt f; /* i repeats in table every f entries */
+ int g; /* maximum code length */
+ int h; /* table level */
+ register uInt i; /* counter, current code */
+ register uInt j; /* counter */
+ register int k; /* number of bits in current code */
+ int l; /* bits per table (returned in m) */
+ register uIntf *p; /* pointer into c[], b[], or v[] */
+ inflate_huft *q; /* points to current table */
+ struct inflate_huft_s r; /* table entry for structure assignment */
+ inflate_huft *u[BMAX]; /* table stack */
+ uInt v[N_MAX]; /* values in order of bit length */
+ register int w; /* bits before this table == (l * h) */
+ uInt x[BMAX+1]; /* bit offsets, then code stack */
+ uIntf *xp; /* pointer into x */
+ int y; /* number of dummy codes added */
+ uInt z; /* number of entries in current table */
+
+
+ /* Generate counts for each bit length */
+ p = c;
+#define C0 *p++ = 0;
+#define C2 C0 C0 C0 C0
+#define C4 C2 C2 C2 C2
+ C4 /* clear c[]--assume BMAX+1 is 16 */
+ p = b; i = n;
+ do {
+ c[*p++]++; /* assume all entries <= BMAX */
+ } while (--i);
+ if (c[0] == n) /* null input--all zero length codes */
+ {
+ *t = (inflate_huft *)Z_NULL;
+ *m = 0;
+ return Z_OK;
+ }
+
+
+ /* Find minimum and maximum length, bound *m by those */
+ l = *m;
+ for (j = 1; j <= BMAX; j++)
+ if (c[j])
+ break;
+ k = j; /* minimum code length */
+ if ((uInt)l < j)
+ l = j;
+ for (i = BMAX; i; i--)
+ if (c[i])
+ break;
+ g = i; /* maximum code length */
+ if ((uInt)l > i)
+ l = i;
+ *m = l;
+
+
+ /* Adjust last length count to fill out codes, if needed */
+ for (y = 1 << j; j < i; j++, y <<= 1)
+ if ((y -= c[j]) < 0)
+ return Z_DATA_ERROR;
+ if ((y -= c[i]) < 0)
+ return Z_DATA_ERROR;
+ c[i] += y;
+
+
+ /* Generate starting offsets into the value table for each length */
+ x[1] = j = 0;
+ p = c + 1; xp = x + 2;
+ while (--i) { /* note that i == g from above */
+ *xp++ = (j += *p++);
+ }
+
+
+ /* Make a table of values in order of bit lengths */
+ p = b; i = 0;
+ do {
+ if ((j = *p++) != 0)
+ v[x[j]++] = i;
+ } while (++i < n);
+ n = x[g]; /* set n to length of v */
+
+
+ /* Generate the Huffman codes and for each, make the table entries */
+ x[0] = i = 0; /* first Huffman code is zero */
+ p = v; /* grab values in bit order */
+ h = -1; /* no tables yet--level -1 */
+ w = -l; /* bits decoded == (l * h) */
+ u[0] = (inflate_huft *)Z_NULL; /* just to keep compilers happy */
+ q = (inflate_huft *)Z_NULL; /* ditto */
+ z = 0; /* ditto */
+
+ /* go through the bit lengths (k already is bits in shortest code) */
+ for (; k <= g; k++)
+ {
+ a = c[k];
+ while (a--)
+ {
+ /* here i is the Huffman code of length k bits for value *p */
+ /* make tables up to required level */
+ while (k > w + l)
+ {
+ h++;
+ w += l; /* previous table always l bits */
+
+ /* compute minimum size table less than or equal to l bits */
+ z = g - w;
+ z = z > (uInt)l ? l : z; /* table size upper limit */
+ if ((f = 1 << (j = k - w)) > a + 1) /* try a k-w bit table */
+ { /* too few codes for k-w bit table */
+ f -= a + 1; /* deduct codes from patterns left */
+ xp = c + k;
+ if (j < z)
+ while (++j < z) /* try smaller tables up to z bits */
+ {
+ if ((f <<= 1) <= *++xp)
+ break; /* enough codes to use up j bits */
+ f -= *xp; /* else deduct codes from patterns */
+ }
+ }
+ z = 1 << j; /* table entries for j-bit table */
+
+ /* allocate and link in new table */
+ if ((q = (inflate_huft *)ZALLOC
+ (zs,z + 1,sizeof(inflate_huft))) == Z_NULL)
+ {
+ if (h)
+ inflate_trees_free(u[0], zs);
+ return Z_MEM_ERROR; /* not enough memory */
+ }
+#ifdef DEBUG_ZLIB
+ inflate_hufts += z + 1;
+#endif
+ *t = q + 1; /* link to list for huft_free() */
+ *(t = &(q->next)) = Z_NULL;
+ u[h] = ++q; /* table starts after link */
+
+ /* connect to last table, if there is one */
+ if (h)
+ {
+ x[h] = i; /* save pattern for backing up */
+ r.bits = (Byte)l; /* bits to dump before this table */
+ r.exop = (Byte)j; /* bits in this table */
+ r.next = q; /* pointer to this table */
+ j = i >> (w - l); /* (get around Turbo C bug) */
+ u[h-1][j] = r; /* connect to last table */
+ }
+ }
+
+ /* set up table entry in r */
+ r.bits = (Byte)(k - w);
+ if (p >= v + n)
+ r.exop = 128 + 64; /* out of values--invalid code */
+ else if (*p < s)
+ {
+ r.exop = (Byte)(*p < 256 ? 0 : 32 + 64); /* 256 is end-of-block */
+ r.base = *p++; /* simple code is just the value */
+ }
+ else
+ {
+ r.exop = (Byte)(e[*p - s] + 16 + 64);/* non-simple--look up in lists */
+ r.base = d[*p++ - s];
+ }
+
+ /* fill code-like entries with r */
+ f = 1 << (k - w);
+ for (j = i >> w; j < z; j += f)
+ q[j] = r;
+
+ /* backwards increment the k-bit code i */
+ for (j = 1 << (k - 1); i & j; j >>= 1)
+ i ^= j;
+ i ^= j;
+
+ /* backup over finished tables */
+ while ((i & ((1 << w) - 1)) != x[h])
+ {
+ h--; /* don't need to update q */
+ w -= l;
+ }
+ }
+ }
+
+
+ /* Return Z_BUF_ERROR if we were given an incomplete table */
+ return y != 0 && g != 1 ? Z_BUF_ERROR : Z_OK;
+}
+
+
+int inflate_trees_bits(c, bb, tb, z)
+uIntf *c; /* 19 code lengths */
+uIntf *bb; /* bits tree desired/actual depth */
+inflate_huft * FAR *tb; /* bits tree result */
+z_streamp z; /* for zfree function */
+{
+ int r;
+
+ r = huft_build(c, 19, 19, (uIntf*)Z_NULL, (uIntf*)Z_NULL, tb, bb, z);
+ if (r == Z_DATA_ERROR)
+ z->msg = (char*)"oversubscribed dynamic bit lengths tree";
+ else if (r == Z_BUF_ERROR || *bb == 0)
+ {
+ inflate_trees_free(*tb, z);
+ z->msg = (char*)"incomplete dynamic bit lengths tree";
+ r = Z_DATA_ERROR;
+ }
+ return r;
+}
+
+
+int inflate_trees_dynamic(nl, nd, c, bl, bd, tl, td, z)
+uInt nl; /* number of literal/length codes */
+uInt nd; /* number of distance codes */
+uIntf *c; /* that many (total) code lengths */
+uIntf *bl; /* literal desired/actual bit depth */
+uIntf *bd; /* distance desired/actual bit depth */
+inflate_huft * FAR *tl; /* literal/length tree result */
+inflate_huft * FAR *td; /* distance tree result */
+z_streamp z; /* for zfree function */
+{
+ int r;
+
+ /* build literal/length tree */
+ r = huft_build(c, nl, 257, cplens, cplext, tl, bl, z);
+ if (r != Z_OK || *bl == 0)
+ {
+ if (r == Z_DATA_ERROR)
+ z->msg = (char*)"oversubscribed literal/length tree";
+ else if (r != Z_MEM_ERROR)
+ {
+ inflate_trees_free(*tl, z);
+ z->msg = (char*)"incomplete literal/length tree";
+ r = Z_DATA_ERROR;
+ }
+ return r;
+ }
+
+ /* build distance tree */
+ r = huft_build(c + nl, nd, 0, cpdist, cpdext, td, bd, z);
+ if (r != Z_OK || (*bd == 0 && nl > 257))
+ {
+ if (r == Z_DATA_ERROR)
+ z->msg = (char*)"oversubscribed distance tree";
+ else if (r == Z_BUF_ERROR) {
+#ifdef PKZIP_BUG_WORKAROUND
+ r = Z_OK;
+ }
+#else
+ inflate_trees_free(*td, z);
+ z->msg = (char*)"incomplete distance tree";
+ r = Z_DATA_ERROR;
+ }
+ else if (r != Z_MEM_ERROR)
+ {
+ z->msg = (char*)"empty distance tree with lengths";
+ r = Z_DATA_ERROR;
+ }
+ inflate_trees_free(*tl, z);
+ return r;
+#endif
+ }
+
+ /* done */
+ return Z_OK;
+}
+
+
+/* build fixed tables only once--keep them here */
+local int fixed_built = 0;
+#define FIXEDH 530 /* number of hufts used by fixed tables */
+local inflate_huft fixed_mem[FIXEDH];
+local uInt fixed_bl;
+local uInt fixed_bd;
+local inflate_huft *fixed_tl;
+local inflate_huft *fixed_td;
+
+
+local voidpf falloc(q, n, s)
+voidpf q; /* opaque pointer */
+uInt n; /* number of items */
+uInt s; /* size of item */
+{
+ Assert(s == sizeof(inflate_huft) && n <= *(intf *)q,
+ "inflate_trees falloc overflow");
+ *(intf *)q -= n+s-s; /* s-s to avoid warning */
+ return (voidpf)(fixed_mem + *(intf *)q);
+}
+
+
+int inflate_trees_fixed(bl, bd, tl, td)
+uIntf *bl; /* literal desired/actual bit depth */
+uIntf *bd; /* distance desired/actual bit depth */
+inflate_huft * FAR *tl; /* literal/length tree result */
+inflate_huft * FAR *td; /* distance tree result */
+{
+ /* build fixed tables if not already (multiple overlapped executions ok) */
+ if (!fixed_built)
+ {
+ int k; /* temporary variable */
+ unsigned c[288]; /* length list for huft_build */
+ z_stream z; /* for falloc function */
+ int f = FIXEDH; /* number of hufts left in fixed_mem */
+
+ /* set up fake z_stream for memory routines */
+ z.zalloc = falloc;
+ z.zfree = Z_NULL;
+ z.opaque = (voidpf)&f;
+
+ /* literal table */
+ for (k = 0; k < 144; k++)
+ c[k] = 8;
+ for (; k < 256; k++)
+ c[k] = 9;
+ for (; k < 280; k++)
+ c[k] = 7;
+ for (; k < 288; k++)
+ c[k] = 8;
+ fixed_bl = 7;
+ huft_build(c, 288, 257, cplens, cplext, &fixed_tl, &fixed_bl, &z);
+
+ /* distance table */
+ for (k = 0; k < 30; k++)
+ c[k] = 5;
+ fixed_bd = 5;
+ huft_build(c, 30, 0, cpdist, cpdext, &fixed_td, &fixed_bd, &z);
+
+ /* done */
+ Assert(f == 0, "invalid build of fixed tables");
+ fixed_built = 1;
+ }
+ *bl = fixed_bl;
+ *bd = fixed_bd;
+ *tl = fixed_tl;
+ *td = fixed_td;
+ return Z_OK;
+}
+
+
+int inflate_trees_free(t, z)
+inflate_huft *t; /* table to free */
+z_streamp z; /* for zfree function */
+/* Free the malloc'ed tables built by huft_build(), which makes a linked
+ list of the tables it made, with the links in a dummy first entry of
+ each table. */
+{
+ register inflate_huft *p, *q, *r;
+
+ /* Reverse linked list */
+ p = Z_NULL;
+ q = t;
+ while (q != Z_NULL)
+ {
+ r = (q - 1)->next;
+ (q - 1)->next = p;
+ p = q;
+ q = r;
+ }
+ /* Go through linked list, freeing from the malloced (t[-1]) address. */
+ while (p != Z_NULL)
+ {
+ q = (--p)->next;
+ ZFREE(z,p);
+ p = q;
+ }
+ return Z_OK;
+}
+/* --- inftrees.c */
+
+/* +++ infcodes.c */
+/* infcodes.c -- process literals and length/distance pairs
+ * Copyright (C) 1995-1996 Mark Adler
+ * For conditions of distribution and use, see copyright notice in zlib.h
+ */
+
+/* #include <rtems/freebsd/local/zutil.h> */
+/* #include <rtems/freebsd/local/inftrees.h> */
+/* #include <rtems/freebsd/local/infblock.h> */
+/* #include <rtems/freebsd/local/infcodes.h> */
+/* #include <rtems/freebsd/local/infutil.h> */
+
+/* +++ inffast.h */
+/* inffast.h -- header to use inffast.c
+ * Copyright (C) 1995-1996 Mark Adler
+ * For conditions of distribution and use, see copyright notice in zlib.h
+ */
+
+/* WARNING: this file should *not* be used by applications. It is
+ part of the implementation of the compression library and is
+ subject to change. Applications should only use zlib.h.
+ */
+
+extern int inflate_fast OF((
+ uInt,
+ uInt,
+ inflate_huft *,
+ inflate_huft *,
+ inflate_blocks_statef *,
+ z_streamp ));
+/* --- inffast.h */
+
+/* simplify the use of the inflate_huft type with some defines */
+#define base more.Base
+#define next more.Next
+#define exop word.what.Exop
+#define bits word.what.Bits
+
+/* inflate codes private state */
+struct inflate_codes_state {
+
+ /* mode */
+ enum { /* waiting for "i:"=input, "o:"=output, "x:"=nothing */
+ START, /* x: set up for LEN */
+ LEN, /* i: get length/literal/eob next */
+ LENEXT, /* i: getting length extra (have base) */
+ DIST, /* i: get distance next */
+ DISTEXT, /* i: getting distance extra */
+ COPY, /* o: copying bytes in window, waiting for space */
+ LIT, /* o: got literal, waiting for output space */
+ WASH, /* o: got eob, possibly still output waiting */
+ END, /* x: got eob and all data flushed */
+ BADCODE} /* x: got error */
+ mode; /* current inflate_codes mode */
+
+ /* mode dependent information */
+ uInt len;
+ union {
+ struct {
+ inflate_huft *tree; /* pointer into tree */
+ uInt need; /* bits needed */
+ } code; /* if LEN or DIST, where in tree */
+ uInt lit; /* if LIT, literal */
+ struct {
+ uInt get; /* bits to get for extra */
+ uInt dist; /* distance back to copy from */
+ } copy; /* if EXT or COPY, where and how much */
+ } sub; /* submode */
+
+ /* mode independent information */
+ Byte lbits; /* ltree bits decoded per branch */
+ Byte dbits; /* dtree bits decoder per branch */
+ inflate_huft *ltree; /* literal/length/eob tree */
+ inflate_huft *dtree; /* distance tree */
+
+};
+
+
+inflate_codes_statef *inflate_codes_new(bl, bd, tl, td, z)
+uInt bl, bd;
+inflate_huft *tl;
+inflate_huft *td; /* need separate declaration for Borland C++ */
+z_streamp z;
+{
+ inflate_codes_statef *c;
+
+ if ((c = (inflate_codes_statef *)
+ ZALLOC(z,1,sizeof(struct inflate_codes_state))) != Z_NULL)
+ {
+ c->mode = START;
+ c->lbits = (Byte)bl;
+ c->dbits = (Byte)bd;
+ c->ltree = tl;
+ c->dtree = td;
+ Tracev((stderr, "inflate: codes new\n"));
+ }
+ return c;
+}
+
+
+int inflate_codes(s, z, r)
+inflate_blocks_statef *s;
+z_streamp z;
+int r;
+{
+ uInt j; /* temporary storage */
+ inflate_huft *t; /* temporary pointer */
+ uInt e; /* extra bits or operation */
+ uLong b; /* bit buffer */
+ uInt k; /* bits in bit buffer */
+ Bytef *p; /* input data pointer */
+ uInt n; /* bytes available there */
+ Bytef *q; /* output window write pointer */
+ uInt m; /* bytes to end of window or read pointer */
+ Bytef *f; /* pointer to copy strings from */
+ inflate_codes_statef *c = s->sub.decode.codes; /* codes state */
+
+ /* copy input/output information to locals (UPDATE macro restores) */
+ LOAD
+
+ /* process input and output based on current state */
+ while (1) switch (c->mode)
+ { /* waiting for "i:"=input, "o:"=output, "x:"=nothing */
+ case START: /* x: set up for LEN */
+#ifndef SLOW
+ if (m >= 258 && n >= 10)
+ {
+ UPDATE
+ r = inflate_fast(c->lbits, c->dbits, c->ltree, c->dtree, s, z);
+ LOAD
+ if (r != Z_OK)
+ {
+ c->mode = r == Z_STREAM_END ? WASH : BADCODE;
+ break;
+ }
+ }
+#endif /* !SLOW */
+ c->sub.code.need = c->lbits;
+ c->sub.code.tree = c->ltree;
+ c->mode = LEN;
+ case LEN: /* i: get length/literal/eob next */
+ j = c->sub.code.need;
+ NEEDBITS(j)
+ t = c->sub.code.tree + ((uInt)b & inflate_mask[j]);
+ DUMPBITS(t->bits)
+ e = (uInt)(t->exop);
+ if (e == 0) /* literal */
+ {
+ c->sub.lit = t->base;
+ Tracevv((stderr, t->base >= 0x20 && t->base < 0x7f ?
+ "inflate: literal '%c'\n" :
+ "inflate: literal 0x%02x\n", t->base));
+ c->mode = LIT;
+ break;
+ }
+ if (e & 16) /* length */
+ {
+ c->sub.copy.get = e & 15;
+ c->len = t->base;
+ c->mode = LENEXT;
+ break;
+ }
+ if ((e & 64) == 0) /* next table */
+ {
+ c->sub.code.need = e;
+ c->sub.code.tree = t->next;
+ break;
+ }
+ if (e & 32) /* end of block */
+ {
+ Tracevv((stderr, "inflate: end of block\n"));
+ c->mode = WASH;
+ break;
+ }
+ c->mode = BADCODE; /* invalid code */
+ z->msg = (char*)"invalid literal/length code";
+ r = Z_DATA_ERROR;
+ LEAVE
+ case LENEXT: /* i: getting length extra (have base) */
+ j = c->sub.copy.get;
+ NEEDBITS(j)
+ c->len += (uInt)b & inflate_mask[j];
+ DUMPBITS(j)
+ c->sub.code.need = c->dbits;
+ c->sub.code.tree = c->dtree;
+ Tracevv((stderr, "inflate: length %u\n", c->len));
+ c->mode = DIST;
+ case DIST: /* i: get distance next */
+ j = c->sub.code.need;
+ NEEDBITS(j)
+ t = c->sub.code.tree + ((uInt)b & inflate_mask[j]);
+ DUMPBITS(t->bits)
+ e = (uInt)(t->exop);
+ if (e & 16) /* distance */
+ {
+ c->sub.copy.get = e & 15;
+ c->sub.copy.dist = t->base;
+ c->mode = DISTEXT;
+ break;
+ }
+ if ((e & 64) == 0) /* next table */
+ {
+ c->sub.code.need = e;
+ c->sub.code.tree = t->next;
+ break;
+ }
+ c->mode = BADCODE; /* invalid code */
+ z->msg = (char*)"invalid distance code";
+ r = Z_DATA_ERROR;
+ LEAVE
+ case DISTEXT: /* i: getting distance extra */
+ j = c->sub.copy.get;
+ NEEDBITS(j)
+ c->sub.copy.dist += (uInt)b & inflate_mask[j];
+ DUMPBITS(j)
+ Tracevv((stderr, "inflate: distance %u\n", c->sub.copy.dist));
+ c->mode = COPY;
+ case COPY: /* o: copying bytes in window, waiting for space */
+#ifndef __TURBOC__ /* Turbo C bug for following expression */
+ f = (uInt)(q - s->window) < c->sub.copy.dist ?
+ s->end - (c->sub.copy.dist - (q - s->window)) :
+ q - c->sub.copy.dist;
+#else
+ f = q - c->sub.copy.dist;
+ if ((uInt)(q - s->window) < c->sub.copy.dist)
+ f = s->end - (c->sub.copy.dist - (uInt)(q - s->window));
+#endif
+ while (c->len)
+ {
+ NEEDOUT
+ OUTBYTE(*f++)
+ if (f == s->end)
+ f = s->window;
+ c->len--;
+ }
+ c->mode = START;
+ break;
+ case LIT: /* o: got literal, waiting for output space */
+ NEEDOUT
+ OUTBYTE(c->sub.lit)
+ c->mode = START;
+ break;
+ case WASH: /* o: got eob, possibly more output */
+ FLUSH
+ if (s->read != s->write)
+ LEAVE
+ c->mode = END;
+ case END:
+ r = Z_STREAM_END;
+ LEAVE
+ case BADCODE: /* x: got error */
+ r = Z_DATA_ERROR;
+ LEAVE
+ default:
+ r = Z_STREAM_ERROR;
+ LEAVE
+ }
+}
+
+
+void inflate_codes_free(c, z)
+inflate_codes_statef *c;
+z_streamp z;
+{
+ ZFREE(z, c);
+ Tracev((stderr, "inflate: codes free\n"));
+}
+/* --- infcodes.c */
+
+/* +++ infutil.c */
+/* inflate_util.c -- data and routines common to blocks and codes
+ * Copyright (C) 1995-1996 Mark Adler
+ * For conditions of distribution and use, see copyright notice in zlib.h
+ */
+
+/* #include <rtems/freebsd/local/zutil.h> */
+/* #include <rtems/freebsd/local/infblock.h> */
+/* #include <rtems/freebsd/local/inftrees.h> */
+/* #include <rtems/freebsd/local/infcodes.h> */
+/* #include <rtems/freebsd/local/infutil.h> */
+
+#ifndef NO_DUMMY_DECL
+struct inflate_codes_state {int dummy;}; /* for buggy compilers */
+#endif
+
+/* And'ing with mask[n] masks the lower n bits */
+uInt inflate_mask[17] = {
+ 0x0000,
+ 0x0001, 0x0003, 0x0007, 0x000f, 0x001f, 0x003f, 0x007f, 0x00ff,
+ 0x01ff, 0x03ff, 0x07ff, 0x0fff, 0x1fff, 0x3fff, 0x7fff, 0xffff
+};
+
+
+/* copy as much as possible from the sliding window to the output area */
+int inflate_flush(s, z, r)
+inflate_blocks_statef *s;
+z_streamp z;
+int r;
+{
+ uInt n;
+ Bytef *p;
+ Bytef *q;
+
+ /* local copies of source and destination pointers */
+ p = z->next_out;
+ q = s->read;
+
+ /* compute number of bytes to copy as far as end of window */
+ n = (uInt)((q <= s->write ? s->write : s->end) - q);
+ if (n > z->avail_out) n = z->avail_out;
+ if (n && r == Z_BUF_ERROR) r = Z_OK;
+
+ /* update counters */
+ z->avail_out -= n;
+ z->total_out += n;
+
+ /* update check information */
+ if (s->checkfn != Z_NULL)
+ z->adler = s->check = (*s->checkfn)(s->check, q, n);
+
+ /* copy as far as end of window */
+ if (p != Z_NULL) {
+ zmemcpy(p, q, n);
+ p += n;
+ }
+ q += n;
+
+ /* see if more to copy at beginning of window */
+ if (q == s->end)
+ {
+ /* wrap pointers */
+ q = s->window;
+ if (s->write == s->end)
+ s->write = s->window;
+
+ /* compute bytes to copy */
+ n = (uInt)(s->write - q);
+ if (n > z->avail_out) n = z->avail_out;
+ if (n && r == Z_BUF_ERROR) r = Z_OK;
+
+ /* update counters */
+ z->avail_out -= n;
+ z->total_out += n;
+
+ /* update check information */
+ if (s->checkfn != Z_NULL)
+ z->adler = s->check = (*s->checkfn)(s->check, q, n);
+
+ /* copy */
+ if (p != Z_NULL) {
+ zmemcpy(p, q, n);
+ p += n;
+ }
+ q += n;
+ }
+
+ /* update pointers */
+ z->next_out = p;
+ s->read = q;
+
+ /* done */
+ return r;
+}
+/* --- infutil.c */
+
+/* +++ inffast.c */
+/* inffast.c -- process literals and length/distance pairs fast
+ * Copyright (C) 1995-1996 Mark Adler
+ * For conditions of distribution and use, see copyright notice in zlib.h
+ */
+
+/* #include <rtems/freebsd/local/zutil.h> */
+/* #include <rtems/freebsd/local/inftrees.h> */
+/* #include <rtems/freebsd/local/infblock.h> */
+/* #include <rtems/freebsd/local/infcodes.h> */
+/* #include <rtems/freebsd/local/infutil.h> */
+/* #include <rtems/freebsd/local/inffast.h> */
+
+#ifndef NO_DUMMY_DECL
+struct inflate_codes_state {int dummy;}; /* for buggy compilers */
+#endif
+
+/* simplify the use of the inflate_huft type with some defines */
+#define base more.Base
+#define next more.Next
+#define exop word.what.Exop
+#define bits word.what.Bits
+
+/* macros for bit input with no checking and for returning unused bytes */
+#define GRABBITS(j) {while(k<(j)){b|=((uLong)NEXTBYTE)<<k;k+=8;}}
+#define UNGRAB {n+=(c=k>>3);p-=c;k&=7;}
+
+/* Called with number of bytes left to write in window at least 258
+ (the maximum string length) and number of input bytes available
+ at least ten. The ten bytes are six bytes for the longest length/
+ distance pair plus four bytes for overloading the bit buffer. */
+
+int inflate_fast(bl, bd, tl, td, s, z)
+uInt bl, bd;
+inflate_huft *tl;
+inflate_huft *td; /* need separate declaration for Borland C++ */
+inflate_blocks_statef *s;
+z_streamp z;
+{
+ inflate_huft *t; /* temporary pointer */
+ uInt e; /* extra bits or operation */
+ uLong b; /* bit buffer */
+ uInt k; /* bits in bit buffer */
+ Bytef *p; /* input data pointer */
+ uInt n; /* bytes available there */
+ Bytef *q; /* output window write pointer */
+ uInt m; /* bytes to end of window or read pointer */
+ uInt ml; /* mask for literal/length tree */
+ uInt md; /* mask for distance tree */
+ uInt c; /* bytes to copy */
+ uInt d; /* distance back to copy from */
+ Bytef *r; /* copy source pointer */
+
+ /* load input, output, bit values */
+ LOAD
+
+ /* initialize masks */
+ ml = inflate_mask[bl];
+ md = inflate_mask[bd];
+
+ /* do until not enough input or output space for fast loop */
+ do { /* assume called with m >= 258 && n >= 10 */
+ /* get literal/length code */
+ GRABBITS(20) /* max bits for literal/length code */
+ if ((e = (t = tl + ((uInt)b & ml))->exop) == 0)
+ {
+ DUMPBITS(t->bits)
+ Tracevv((stderr, t->base >= 0x20 && t->base < 0x7f ?
+ "inflate: * literal '%c'\n" :
+ "inflate: * literal 0x%02x\n", t->base));
+ *q++ = (Byte)t->base;
+ m--;
+ continue;
+ }
+ do {
+ DUMPBITS(t->bits)
+ if (e & 16)
+ {
+ /* get extra bits for length */
+ e &= 15;
+ c = t->base + ((uInt)b & inflate_mask[e]);
+ DUMPBITS(e)
+ Tracevv((stderr, "inflate: * length %u\n", c));
+
+ /* decode distance base of block to copy */
+ GRABBITS(15); /* max bits for distance code */
+ e = (t = td + ((uInt)b & md))->exop;
+ do {
+ DUMPBITS(t->bits)
+ if (e & 16)
+ {
+ /* get extra bits to add to distance base */
+ e &= 15;
+ GRABBITS(e) /* get extra bits (up to 13) */
+ d = t->base + ((uInt)b & inflate_mask[e]);
+ DUMPBITS(e)
+ Tracevv((stderr, "inflate: * distance %u\n", d));
+
+ /* do the copy */
+ m -= c;
+ if ((uInt)(q - s->window) >= d) /* offset before dest */
+ { /* just copy */
+ r = q - d;
+ *q++ = *r++; c--; /* minimum count is three, */
+ *q++ = *r++; c--; /* so unroll loop a little */
+ }
+ else /* else offset after destination */
+ {
+ e = d - (uInt)(q - s->window); /* bytes from offset to end */
+ r = s->end - e; /* pointer to offset */
+ if (c > e) /* if source crosses, */
+ {
+ c -= e; /* copy to end of window */
+ do {
+ *q++ = *r++;
+ } while (--e);
+ r = s->window; /* copy rest from start of window */
+ }
+ }
+ do { /* copy all or what's left */
+ *q++ = *r++;
+ } while (--c);
+ break;
+ }
+ else if ((e & 64) == 0)
+ e = (t = t->next + ((uInt)b & inflate_mask[e]))->exop;
+ else
+ {
+ z->msg = (char*)"invalid distance code";
+ UNGRAB
+ UPDATE
+ return Z_DATA_ERROR;
+ }
+ } while (1);
+ break;
+ }
+ if ((e & 64) == 0)
+ {
+ if ((e = (t = t->next + ((uInt)b & inflate_mask[e]))->exop) == 0)
+ {
+ DUMPBITS(t->bits)
+ Tracevv((stderr, t->base >= 0x20 && t->base < 0x7f ?
+ "inflate: * literal '%c'\n" :
+ "inflate: * literal 0x%02x\n", t->base));
+ *q++ = (Byte)t->base;
+ m--;
+ break;
+ }
+ }
+ else if (e & 32)
+ {
+ Tracevv((stderr, "inflate: * end of block\n"));
+ UNGRAB
+ UPDATE
+ return Z_STREAM_END;
+ }
+ else
+ {
+ z->msg = (char*)"invalid literal/length code";
+ UNGRAB
+ UPDATE
+ return Z_DATA_ERROR;
+ }
+ } while (1);
+ } while (m >= 258 && n >= 10);
+
+ /* not enough input or output--restore pointers and return */
+ UNGRAB
+ UPDATE
+ return Z_OK;
+}
+/* --- inffast.c */
+
+/* +++ zutil.c */
+/* zutil.c -- target dependent utility functions for the compression library
+ * Copyright (C) 1995-1996 Jean-loup Gailly.
+ * For conditions of distribution and use, see copyright notice in zlib.h
+ */
+
+/* From: zutil.c,v 1.17 1996/07/24 13:41:12 me Exp $ */
+
+#ifdef DEBUG_ZLIB
+#include <rtems/freebsd/stdio.h>
+#endif
+
+/* #include <rtems/freebsd/local/zutil.h> */
+
+#ifndef NO_DUMMY_DECL
+struct internal_state {int dummy;}; /* for buggy compilers */
+#endif
+
+#ifndef STDC
+extern void exit OF((int));
+#endif
+
+static const char *z_errmsg[10] = {
+"need dictionary", /* Z_NEED_DICT 2 */
+"stream end", /* Z_STREAM_END 1 */
+"", /* Z_OK 0 */
+"file error", /* Z_ERRNO (-1) */
+"stream error", /* Z_STREAM_ERROR (-2) */
+"data error", /* Z_DATA_ERROR (-3) */
+"insufficient memory", /* Z_MEM_ERROR (-4) */
+"buffer error", /* Z_BUF_ERROR (-5) */
+"incompatible version",/* Z_VERSION_ERROR (-6) */
+""};
+
+
+const char *zlibVersion()
+{
+ return ZLIB_VERSION;
+}
+
+#ifdef DEBUG_ZLIB
+void z_error (m)
+ char *m;
+{
+ fprintf(stderr, "%s\n", m);
+ exit(1);
+}
+#endif
+
+#ifndef HAVE_MEMCPY
+
+void zmemcpy(dest, source, len)
+ Bytef* dest;
+ Bytef* source;
+ uInt len;
+{
+ if (len == 0) return;
+ do {
+ *dest++ = *source++; /* ??? to be unrolled */
+ } while (--len != 0);
+}
+
+int zmemcmp(s1, s2, len)
+ Bytef* s1;
+ Bytef* s2;
+ uInt len;
+{
+ uInt j;
+
+ for (j = 0; j < len; j++) {
+ if (s1[j] != s2[j]) return 2*(s1[j] > s2[j])-1;
+ }
+ return 0;
+}
+
+void zmemzero(dest, len)
+ Bytef* dest;
+ uInt len;
+{
+ if (len == 0) return;
+ do {
+ *dest++ = 0; /* ??? to be unrolled */
+ } while (--len != 0);
+}
+#endif
+
+#ifdef __TURBOC__
+#if (defined( __BORLANDC__) || !defined(SMALL_MEDIUM)) && !defined(__32BIT__)
+/* Small and medium model in Turbo C are for now limited to near allocation
+ * with reduced MAX_WBITS and MAX_MEM_LEVEL
+ */
+# define MY_ZCALLOC
+
+/* Turbo C malloc() does not allow dynamic allocation of 64K bytes
+ * and farmalloc(64K) returns a pointer with an offset of 8, so we
+ * must fix the pointer. Warning: the pointer must be put back to its
+ * original form in order to free it, use zcfree().
+ */
+
+#define MAX_PTR 10
+/* 10*64K = 640K */
+
+local int next_ptr = 0;
+
+typedef struct ptr_table_s {
+ voidpf org_ptr;
+ voidpf new_ptr;
+} ptr_table;
+
+local ptr_table table[MAX_PTR];
+/* This table is used to remember the original form of pointers
+ * to large buffers (64K). Such pointers are normalized with a zero offset.
+ * Since MSDOS is not a preemptive multitasking OS, this table is not
+ * protected from concurrent access. This hack doesn't work anyway on
+ * a protected system like OS/2. Use Microsoft C instead.
+ */
+
+voidpf zcalloc (voidpf opaque, unsigned items, unsigned size)
+{
+ voidpf buf = opaque; /* just to make some compilers happy */
+ ulg bsize = (ulg)items*size;
+
+ /* If we allocate less than 65520 bytes, we assume that farmalloc
+ * will return a usable pointer which doesn't have to be normalized.
+ */
+ if (bsize < 65520L) {
+ buf = farmalloc(bsize);
+ if (*(ush*)&buf != 0) return buf;
+ } else {
+ buf = farmalloc(bsize + 16L);
+ }
+ if (buf == NULL || next_ptr >= MAX_PTR) return NULL;
+ table[next_ptr].org_ptr = buf;
+
+ /* Normalize the pointer to seg:0 */
+ *((ush*)&buf+1) += ((ush)((uch*)buf-0) + 15) >> 4;
+ *(ush*)&buf = 0;
+ table[next_ptr++].new_ptr = buf;
+ return buf;
+}
+
+void zcfree (voidpf opaque, voidpf ptr)
+{
+ int n;
+ if (*(ush*)&ptr != 0) { /* object < 64K */
+ farfree(ptr);
+ return;
+ }
+ /* Find the original pointer */
+ for (n = 0; n < next_ptr; n++) {
+ if (ptr != table[n].new_ptr) continue;
+
+ farfree(table[n].org_ptr);
+ while (++n < next_ptr) {
+ table[n-1] = table[n];
+ }
+ next_ptr--;
+ return;
+ }
+ ptr = opaque; /* just to make some compilers happy */
+ Assert(0, "zcfree: ptr not found");
+}
+#endif
+#endif /* __TURBOC__ */
+
+
+#if defined(M_I86) && !defined(__32BIT__)
+/* Microsoft C in 16-bit mode */
+
+# define MY_ZCALLOC
+
+#if (!defined(_MSC_VER) || (_MSC_VER < 600))
+# define _halloc halloc
+# define _hfree hfree
+#endif
+
+voidpf zcalloc (voidpf opaque, unsigned items, unsigned size)
+{
+ if (opaque) opaque = 0; /* to make compiler happy */
+ return _halloc((long)items, size);
+}
+
+void zcfree (voidpf opaque, voidpf ptr)
+{
+ if (opaque) opaque = 0; /* to make compiler happy */
+ _hfree(ptr);
+}
+
+#endif /* MSC */
+
+
+#ifndef MY_ZCALLOC /* Any system without a special alloc function */
+
+#ifndef STDC
+extern voidp calloc OF((uInt items, uInt size));
+extern void free OF((voidpf ptr));
+#endif
+
+voidpf zcalloc (opaque, items, size)
+ voidpf opaque;
+ unsigned items;
+ unsigned size;
+{
+ if (opaque) items += size - size; /* make compiler happy */
+ return (voidpf)calloc(items, size);
+}
+
+void zcfree (opaque, ptr)
+ voidpf opaque;
+ voidpf ptr;
+{
+ free(ptr);
+ if (opaque) return; /* make compiler happy */
+}
+
+#endif /* MY_ZCALLOC */
+/* --- zutil.c */
+
+/* +++ adler32.c */
+/* adler32.c -- compute the Adler-32 checksum of a data stream
+ * Copyright (C) 1995-1996 Mark Adler
+ * For conditions of distribution and use, see copyright notice in zlib.h
+ */
+
+/* From: adler32.c,v 1.10 1996/05/22 11:52:18 me Exp $ */
+
+/* #include <rtems/freebsd/local/zlib.h> */
+
+#define BASE 65521L /* largest prime smaller than 65536 */
+#define NMAX 5552
+/* NMAX is the largest n such that 255n(n+1)/2 + (n+1)(BASE-1) <= 2^32-1 */
+
+#define DO1(buf,i) {s1 += buf[(i)]; s2 += s1;}
+#define DO2(buf,i) DO1(buf,i); DO1(buf,(i)+1);
+#define DO4(buf,i) DO2(buf,i); DO2(buf,(i)+2);
+#define DO8(buf,i) DO4(buf,i); DO4(buf,(i)+4);
+#define DO16(buf) DO8(buf,0); DO8(buf,8);
+
+/* ========================================================================= */
+uLong adler32(adler, buf, len)
+ uLong adler;
+ const Bytef *buf;
+ uInt len;
+{
+ unsigned long s1 = adler & 0xffff;
+ unsigned long s2 = (adler >> 16) & 0xffff;
+ int k;
+
+ if (buf == Z_NULL) return 1L;
+
+ while (len > 0) {
+ k = len < NMAX ? len : NMAX;
+ len -= k;
+ while (k >= 16) {
+ DO16(buf);
+ buf += 16;
+ k -= 16;
+ }
+ if (k != 0) do {
+ s1 += *buf++;
+ s2 += s1;
+ } while (--k);
+ s1 %= BASE;
+ s2 %= BASE;
+ }
+ return (s2 << 16) | s1;
+}
+/* --- adler32.c */
+
+#ifdef _KERNEL
+static int
+zlib_modevent(module_t mod, int type, void *unused)
+{
+ switch (type) {
+ case MOD_LOAD:
+ return 0;
+ case MOD_UNLOAD:
+ return 0;
+ }
+ return EINVAL;
+}
+
+static moduledata_t zlib_mod = {
+ "zlib",
+ zlib_modevent,
+ 0
+};
+DECLARE_MODULE(zlib, zlib_mod, SI_SUB_DRIVERS, SI_ORDER_FIRST);
+MODULE_VERSION(zlib, 1);
+#endif /* _KERNEL */
diff --git a/rtems/freebsd/net/zlib.h b/rtems/freebsd/net/zlib.h
new file mode 100644
index 00000000..e0829ecf
--- /dev/null
+++ b/rtems/freebsd/net/zlib.h
@@ -0,0 +1,1018 @@
+/* $FreeBSD$ */
+
+/*
+ * This file is derived from zlib.h and zconf.h from the zlib-1.0.4
+ * distribution by Jean-loup Gailly and Mark Adler, with some additions
+ * by Paul Mackerras to aid in implementing Deflate compression and
+ * decompression for PPP packets.
+ */
+
+/*
+ * ==FILEVERSION 971127==
+ *
+ * This marker is used by the Linux installation script to determine
+ * whether an up-to-date version of this file is already installed.
+ */
+
+
+/* +++ zlib.h */
+/*-
+ zlib.h -- interface of the 'zlib' general purpose compression library
+ version 1.0.4, Jul 24th, 1996.
+
+ Copyright (C) 1995-1996 Jean-loup Gailly and Mark Adler
+
+ This software is provided 'as-is', without any express or implied
+ warranty. In no event will the authors be held liable for any damages
+ arising from the use of this software.
+
+ Permission is granted to anyone to use this software for any purpose,
+ including commercial applications, and to alter it and redistribute it
+ freely, subject to the following restrictions:
+
+ 1. The origin of this software must not be misrepresented; you must not
+ claim that you wrote the original software. If you use this software
+ in a product, an acknowledgment in the product documentation would be
+ appreciated but is not required.
+ 2. Altered source versions must be plainly marked as such, and must not be
+ misrepresented as being the original software.
+ 3. This notice may not be removed or altered from any source distribution.
+
+ Jean-loup Gailly Mark Adler
+ gzip@prep.ai.mit.edu madler@alumni.caltech.edu
+*/
+/*
+ The data format used by the zlib library is described by RFCs (Request for
+ Comments) 1950 to 1952 in the files ftp://ds.internic.net/rfc/rfc1950.txt
+ (zlib format), rfc1951.txt (deflate format) and rfc1952.txt (gzip format).
+*/
+
+#ifndef _ZLIB_H
+#define _ZLIB_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+
+/* +++ zconf.h */
+/* zconf.h -- configuration of the zlib compression library
+ * Copyright (C) 1995-1996 Jean-loup Gailly.
+ * For conditions of distribution and use, see copyright notice in zlib.h
+ */
+
+/* From: zconf.h,v 1.20 1996/07/02 15:09:28 me Exp $ */
+
+#ifndef _ZCONF_H
+#define _ZCONF_H
+
+/*
+ * If you *really* need a unique prefix for all types and library functions,
+ * compile with -DZ_PREFIX. The "standard" zlib should be compiled without it.
+ */
+#ifdef Z_PREFIX
+# define deflateInit_ z_deflateInit_
+# define deflate z_deflate
+# define deflateEnd z_deflateEnd
+# define inflateInit_ z_inflateInit_
+# define inflate z_inflate
+# define inflateEnd z_inflateEnd
+# define deflateInit2_ z_deflateInit2_
+# define deflateSetDictionary z_deflateSetDictionary
+# define deflateCopy z_deflateCopy
+# define deflateReset z_deflateReset
+# define deflateParams z_deflateParams
+# define inflateInit2_ z_inflateInit2_
+# define inflateSetDictionary z_inflateSetDictionary
+# define inflateSync z_inflateSync
+# define inflateReset z_inflateReset
+# define compress z_compress
+# define uncompress z_uncompress
+# define adler32 z_adler32
+#if 0
+# define crc32 z_crc32
+# define get_crc_table z_get_crc_table
+#endif
+
+# define Byte z_Byte
+# define uInt z_uInt
+# define uLong z_uLong
+# define Bytef z_Bytef
+# define charf z_charf
+# define intf z_intf
+# define uIntf z_uIntf
+# define uLongf z_uLongf
+# define voidpf z_voidpf
+# define voidp z_voidp
+#endif
+
+#if (defined(_WIN32) || defined(__WIN32__)) && !defined(WIN32)
+# define WIN32
+#endif
+#if defined(__GNUC__) || defined(WIN32) || defined(__386__) || defined(i386)
+# ifndef __32BIT__
+# define __32BIT__
+# endif
+#endif
+#if defined(__MSDOS__) && !defined(MSDOS)
+# define MSDOS
+#endif
+
+/*
+ * Compile with -DMAXSEG_64K if the alloc function cannot allocate more
+ * than 64k bytes at a time (needed on systems with 16-bit int).
+ */
+#if defined(MSDOS) && !defined(__32BIT__)
+# define MAXSEG_64K
+#endif
+#ifdef MSDOS
+# define UNALIGNED_OK
+#endif
+
+#if (defined(MSDOS) || defined(_WINDOWS) || defined(WIN32)) && !defined(STDC)
+# define STDC
+#endif
+#if (defined(__STDC__) || defined(__cplusplus)) && !defined(STDC)
+# define STDC
+#endif
+
+#ifndef STDC
+# ifndef const /* cannot use !defined(STDC) && !defined(const) on Mac */
+# define const
+# endif
+#endif
+
+/* Some Mac compilers merge all .h files incorrectly: */
+#if defined(__MWERKS__) || defined(applec) ||defined(THINK_C) ||defined(__SC__)
+# define NO_DUMMY_DECL
+#endif
+
+/* Maximum value for memLevel in deflateInit2 */
+#ifndef MAX_MEM_LEVEL
+# ifdef MAXSEG_64K
+# define MAX_MEM_LEVEL 8
+# else
+# define MAX_MEM_LEVEL 9
+# endif
+#endif
+
+/* Maximum value for windowBits in deflateInit2 and inflateInit2 */
+#ifndef MAX_WBITS
+# define MAX_WBITS 15 /* 32K LZ77 window */
+#endif
+
+/* The memory requirements for deflate are (in bytes):
+ 1 << (windowBits+2) + 1 << (memLevel+9)
+ that is: 128K for windowBits=15 + 128K for memLevel = 8 (default values)
+ plus a few kilobytes for small objects. For example, if you want to reduce
+ the default memory requirements from 256K to 128K, compile with
+ make CFLAGS="-O -DMAX_WBITS=14 -DMAX_MEM_LEVEL=7"
+ Of course this will generally degrade compression (there's no free lunch).
+
+ The memory requirements for inflate are (in bytes) 1 << windowBits
+ that is, 32K for windowBits=15 (default value) plus a few kilobytes
+ for small objects.
+*/
+
+ /* Type declarations */
+
+#ifndef OF /* function prototypes */
+# ifdef STDC
+# define OF(args) args
+# else
+# define OF(args) ()
+# endif
+#endif
+
+/* The following definitions for FAR are needed only for MSDOS mixed
+ * model programming (small or medium model with some far allocations).
+ * This was tested only with MSC; for other MSDOS compilers you may have
+ * to define NO_MEMCPY in zutil.h. If you don't need the mixed model,
+ * just define FAR to be empty.
+ */
+#if (defined(M_I86SM) || defined(M_I86MM)) && !defined(__32BIT__)
+ /* MSC small or medium model */
+# define SMALL_MEDIUM
+# ifdef _MSC_VER
+# define FAR __far
+# else
+# define FAR far
+# endif
+#endif
+#if defined(__BORLANDC__) && (defined(__SMALL__) || defined(__MEDIUM__))
+# ifndef __32BIT__
+# define SMALL_MEDIUM
+# define FAR __far
+# endif
+#endif
+#ifndef FAR
+# define FAR
+#endif
+
+typedef unsigned char Byte; /* 8 bits */
+typedef unsigned int uInt; /* 16 bits or more */
+typedef unsigned long uLong; /* 32 bits or more */
+
+#if defined(__BORLANDC__) && defined(SMALL_MEDIUM)
+ /* Borland C/C++ ignores FAR inside typedef */
+# define Bytef Byte FAR
+#else
+ typedef Byte FAR Bytef;
+#endif
+typedef char FAR charf;
+typedef int FAR intf;
+typedef uInt FAR uIntf;
+typedef uLong FAR uLongf;
+
+#ifdef STDC
+ typedef void FAR *voidpf;
+ typedef void *voidp;
+#else
+ typedef Byte FAR *voidpf;
+ typedef Byte *voidp;
+#endif
+
+
+/* Compile with -DZLIB_DLL for Windows DLL support */
+#if (defined(_WINDOWS) || defined(WINDOWS)) && defined(ZLIB_DLL)
+# include <rtems/freebsd/windows.h>
+# define EXPORT WINAPI
+#else
+# define EXPORT
+#endif
+
+#endif /* _ZCONF_H */
+/* --- zconf.h */
+
+#define ZLIB_VERSION "1.0.4P"
+
+/*
+ The 'zlib' compression library provides in-memory compression and
+ decompression functions, including integrity checks of the uncompressed
+ data. This version of the library supports only one compression method
+ (deflation) but other algorithms may be added later and will have the same
+ stream interface.
+
+ For compression the application must provide the output buffer and
+ may optionally provide the input buffer for optimization. For decompression,
+ the application must provide the input buffer and may optionally provide
+ the output buffer for optimization.
+
+ Compression can be done in a single step if the buffers are large
+ enough (for example if an input file is mmap'ed), or can be done by
+ repeated calls of the compression function. In the latter case, the
+ application must provide more input and/or consume the output
+ (providing more output space) before each call.
+
+ The library does not install any signal handler. It is recommended to
+ add at least a handler for SIGSEGV when decompressing; the library checks
+ the consistency of the input data whenever possible but may go nuts
+ for some forms of corrupted input.
+*/
+
+typedef voidpf (*alloc_func) OF((voidpf opaque, uInt items, uInt size));
+typedef void (*free_func) OF((voidpf opaque, voidpf address));
+
+struct internal_state;
+
+typedef struct z_stream_s {
+ Bytef *next_in; /* next input byte */
+ uInt avail_in; /* number of bytes available at next_in */
+ uLong total_in; /* total nb of input bytes read so far */
+
+ Bytef *next_out; /* next output byte should be put there */
+ uInt avail_out; /* remaining free space at next_out */
+ uLong total_out; /* total nb of bytes output so far */
+
+ const char *msg; /* last error message, NULL if no error */
+ struct internal_state FAR *state; /* not visible by applications */
+
+ alloc_func zalloc; /* used to allocate the internal state */
+ free_func zfree; /* used to free the internal state */
+ voidpf opaque; /* private data object passed to zalloc and zfree */
+
+ int data_type; /* best guess about the data type: ascii or binary */
+ uLong adler; /* adler32 value of the uncompressed data */
+ uLong reserved; /* reserved for future use */
+} z_stream;
+
+typedef z_stream FAR *z_streamp;
+
+/*
+ The application must update next_in and avail_in when avail_in has
+ dropped to zero. It must update next_out and avail_out when avail_out
+ has dropped to zero. The application must initialize zalloc, zfree and
+ opaque before calling the init function. All other fields are set by the
+ compression library and must not be updated by the application.
+
+ The opaque value provided by the application will be passed as the first
+ parameter for calls of zalloc and zfree. This can be useful for custom
+ memory management. The compression library attaches no meaning to the
+ opaque value.
+
+ zalloc must return Z_NULL if there is not enough memory for the object.
+ On 16-bit systems, the functions zalloc and zfree must be able to allocate
+ exactly 65536 bytes, but will not be required to allocate more than this
+ if the symbol MAXSEG_64K is defined (see zconf.h). WARNING: On MSDOS,
+ pointers returned by zalloc for objects of exactly 65536 bytes *must*
+ have their offset normalized to zero. The default allocation function
+ provided by this library ensures this (see zutil.c). To reduce memory
+ requirements and avoid any allocation of 64K objects, at the expense of
+ compression ratio, compile the library with -DMAX_WBITS=14 (see zconf.h).
+
+ The fields total_in and total_out can be used for statistics or
+ progress reports. After compression, total_in holds the total size of
+ the uncompressed data and may be saved for use in the decompressor
+ (particularly if the decompressor wants to decompress everything in
+ a single step).
+*/
+
+ /* constants */
+
+#define Z_NO_FLUSH 0
+#define Z_PARTIAL_FLUSH 1
+#define Z_PACKET_FLUSH 2
+#define Z_SYNC_FLUSH 3
+#define Z_FULL_FLUSH 4
+#define Z_FINISH 5
+/* Allowed flush values; see deflate() below for details */
+
+#define Z_OK 0
+#define Z_STREAM_END 1
+#define Z_NEED_DICT 2
+#define Z_ERRNO (-1)
+#define Z_STREAM_ERROR (-2)
+#define Z_DATA_ERROR (-3)
+#define Z_MEM_ERROR (-4)
+#define Z_BUF_ERROR (-5)
+#define Z_VERSION_ERROR (-6)
+/* Return codes for the compression/decompression functions. Negative
+ * values are errors, positive values are used for special but normal events.
+ */
+
+#define Z_NO_COMPRESSION 0
+#define Z_BEST_SPEED 1
+#define Z_BEST_COMPRESSION 9
+#define Z_DEFAULT_COMPRESSION (-1)
+/* compression levels */
+
+#define Z_FILTERED 1
+#define Z_HUFFMAN_ONLY 2
+#define Z_DEFAULT_STRATEGY 0
+/* compression strategy; see deflateInit2() below for details */
+
+#define Z_BINARY 0
+#define Z_ASCII 1
+#define Z_UNKNOWN 2
+/* Possible values of the data_type field */
+
+#define Z_DEFLATED 8
+/* The deflate compression method (the only one supported in this version) */
+
+#define Z_NULL 0 /* for initializing zalloc, zfree, opaque */
+
+#define zlib_version zlibVersion()
+/* for compatibility with versions < 1.0.2 */
+
+ /* basic functions */
+
+extern const char * EXPORT zlibVersion OF((void));
+/* The application can compare zlibVersion and ZLIB_VERSION for consistency.
+ If the first character differs, the library code actually used is
+ not compatible with the zlib.h header file used by the application.
+ This check is automatically made by deflateInit and inflateInit.
+ */
+
+/*
+extern int EXPORT deflateInit OF((z_streamp strm, int level));
+
+ Initializes the internal stream state for compression. The fields
+ zalloc, zfree and opaque must be initialized before by the caller.
+ If zalloc and zfree are set to Z_NULL, deflateInit updates them to
+ use default allocation functions.
+
+ The compression level must be Z_DEFAULT_COMPRESSION, or between 0 and 9:
+ 1 gives best speed, 9 gives best compression, 0 gives no compression at
+ all (the input data is simply copied a block at a time).
+ Z_DEFAULT_COMPRESSION requests a default compromise between speed and
+ compression (currently equivalent to level 6).
+
+ deflateInit returns Z_OK if success, Z_MEM_ERROR if there was not
+ enough memory, Z_STREAM_ERROR if level is not a valid compression level,
+ Z_VERSION_ERROR if the zlib library version (zlib_version) is incompatible
+ with the version assumed by the caller (ZLIB_VERSION).
+ msg is set to null if there is no error message. deflateInit does not
+ perform any compression: this will be done by deflate().
+*/
+
+
+extern int EXPORT deflate OF((z_streamp strm, int flush));
+/*
+ Performs one or both of the following actions:
+
+ - Compress more input starting at next_in and update next_in and avail_in
+ accordingly. If not all input can be processed (because there is not
+ enough room in the output buffer), next_in and avail_in are updated and
+ processing will resume at this point for the next call of deflate().
+
+ - Provide more output starting at next_out and update next_out and avail_out
+ accordingly. This action is forced if the parameter flush is non zero.
+ Forcing flush frequently degrades the compression ratio, so this parameter
+ should be set only when necessary (in interactive applications).
+ Some output may be provided even if flush is not set.
+
+ Before the call of deflate(), the application should ensure that at least
+ one of the actions is possible, by providing more input and/or consuming
+ more output, and updating avail_in or avail_out accordingly; avail_out
+ should never be zero before the call. The application can consume the
+ compressed output when it wants, for example when the output buffer is full
+ (avail_out == 0), or after each call of deflate(). If deflate returns Z_OK
+ and with zero avail_out, it must be called again after making room in the
+ output buffer because there might be more output pending.
+
+ If the parameter flush is set to Z_PARTIAL_FLUSH, the current compression
+ block is terminated and flushed to the output buffer so that the
+ decompressor can get all input data available so far. For method 9, a future
+ variant on method 8, the current block will be flushed but not terminated.
+ Z_SYNC_FLUSH has the same effect as partial flush except that the compressed
+ output is byte aligned (the compressor can clear its internal bit buffer)
+ and the current block is always terminated; this can be useful if the
+ compressor has to be restarted from scratch after an interruption (in which
+ case the internal state of the compressor may be lost).
+ If flush is set to Z_FULL_FLUSH, the compression block is terminated, a
+ special marker is output and the compression dictionary is discarded; this
+ is useful to allow the decompressor to synchronize if one compressed block
+ has been damaged (see inflateSync below). Flushing degrades compression and
+ so should be used only when necessary. Using Z_FULL_FLUSH too often can
+ seriously degrade the compression. If deflate returns with avail_out == 0,
+ this function must be called again with the same value of the flush
+ parameter and more output space (updated avail_out), until the flush is
+ complete (deflate returns with non-zero avail_out).
+
+ If the parameter flush is set to Z_PACKET_FLUSH, the compression
+ block is terminated, and a zero-length stored block is output,
+ omitting the length bytes (the effect of this is that the 3-bit type
+ code 000 for a stored block is output, and the output is then
+ byte-aligned). This is designed for use at the end of a PPP packet.
+
+ If the parameter flush is set to Z_FINISH, pending input is processed,
+ pending output is flushed and deflate returns with Z_STREAM_END if there
+ was enough output space; if deflate returns with Z_OK, this function must be
+ called again with Z_FINISH and more output space (updated avail_out) but no
+ more input data, until it returns with Z_STREAM_END or an error. After
+ deflate has returned Z_STREAM_END, the only possible operations on the
+ stream are deflateReset or deflateEnd.
+
+ Z_FINISH can be used immediately after deflateInit if all the compression
+ is to be done in a single step. In this case, avail_out must be at least
+ 0.1% larger than avail_in plus 12 bytes. If deflate does not return
+ Z_STREAM_END, then it must be called again as described above.
+
+ deflate() may update data_type if it can make a good guess about
+ the input data type (Z_ASCII or Z_BINARY). In doubt, the data is considered
+ binary. This field is only for information purposes and does not affect
+ the compression algorithm in any manner.
+
+ deflate() returns Z_OK if some progress has been made (more input
+ processed or more output produced), Z_STREAM_END if all input has been
+ consumed and all output has been produced (only when flush is set to
+ Z_FINISH), Z_STREAM_ERROR if the stream state was inconsistent (for example
+ if next_in or next_out was NULL), Z_BUF_ERROR if no progress is possible.
+*/
+
+
+extern int EXPORT deflateEnd OF((z_streamp strm));
+/*
+ All dynamically allocated data structures for this stream are freed.
+ This function discards any unprocessed input and does not flush any
+ pending output.
+
+ deflateEnd returns Z_OK if success, Z_STREAM_ERROR if the
+ stream state was inconsistent, Z_DATA_ERROR if the stream was freed
+ prematurely (some input or output was discarded). In the error case,
+ msg may be set but then points to a static string (which must not be
+ deallocated).
+*/
+
+
+/*
+extern int EXPORT inflateInit OF((z_streamp strm));
+
+ Initializes the internal stream state for decompression. The fields
+ zalloc, zfree and opaque must be initialized before by the caller. If
+ zalloc and zfree are set to Z_NULL, inflateInit updates them to use default
+ allocation functions.
+
+ inflateInit returns Z_OK if success, Z_MEM_ERROR if there was not
+ enough memory, Z_VERSION_ERROR if the zlib library version is incompatible
+ with the version assumed by the caller. msg is set to null if there is no
+ error message. inflateInit does not perform any decompression: this will be
+ done by inflate().
+*/
+
+#if defined(__FreeBSD__) && defined(_KERNEL)
+#define inflate inflate_ppp /* FreeBSD already has an inflate :-( */
+#endif
+
+extern int EXPORT inflate OF((z_streamp strm, int flush));
+/*
+ Performs one or both of the following actions:
+
+ - Decompress more input starting at next_in and update next_in and avail_in
+ accordingly. If not all input can be processed (because there is not
+ enough room in the output buffer), next_in is updated and processing
+ will resume at this point for the next call of inflate().
+
+ - Provide more output starting at next_out and update next_out and avail_out
+ accordingly. inflate() provides as much output as possible, until there
+ is no more input data or no more space in the output buffer (see below
+ about the flush parameter).
+
+ Before the call of inflate(), the application should ensure that at least
+ one of the actions is possible, by providing more input and/or consuming
+ more output, and updating the next_* and avail_* values accordingly.
+ The application can consume the uncompressed output when it wants, for
+ example when the output buffer is full (avail_out == 0), or after each
+ call of inflate(). If inflate returns Z_OK and with zero avail_out, it
+ must be called again after making room in the output buffer because there
+ might be more output pending.
+
+ If the parameter flush is set to Z_PARTIAL_FLUSH or Z_PACKET_FLUSH,
+ inflate flushes as much output as possible to the output buffer. The
+ flushing behavior of inflate is not specified for values of the flush
+ parameter other than Z_PARTIAL_FLUSH, Z_PACKET_FLUSH or Z_FINISH, but the
+ current implementation actually flushes as much output as possible
+ anyway. For Z_PACKET_FLUSH, inflate checks that once all the input data
+ has been consumed, it is expecting to see the length field of a stored
+ block; if not, it returns Z_DATA_ERROR.
+
+ inflate() should normally be called until it returns Z_STREAM_END or an
+ error. However if all decompression is to be performed in a single step
+ (a single call of inflate), the parameter flush should be set to
+ Z_FINISH. In this case all pending input is processed and all pending
+ output is flushed; avail_out must be large enough to hold all the
+ uncompressed data. (The size of the uncompressed data may have been saved
+ by the compressor for this purpose.) The next operation on this stream must
+ be inflateEnd to deallocate the decompression state. The use of Z_FINISH
+ is never required, but can be used to inform inflate that a faster routine
+ may be used for the single inflate() call.
+
+ inflate() returns Z_OK if some progress has been made (more input
+ processed or more output produced), Z_STREAM_END if the end of the
+ compressed data has been reached and all uncompressed output has been
+ produced, Z_NEED_DICT if a preset dictionary is needed at this point (see
+ inflateSetDictionary below), Z_DATA_ERROR if the input data was corrupted,
+ Z_STREAM_ERROR if the stream structure was inconsistent (for example if
+ next_in or next_out was NULL), Z_MEM_ERROR if there was not enough memory,
+ Z_BUF_ERROR if no progress is possible or if there was not enough room in
+ the output buffer when Z_FINISH is used. In the Z_DATA_ERROR case, the
+ application may then call inflateSync to look for a good compression block.
+ In the Z_NEED_DICT case, strm->adler is set to the Adler32 value of the
+ dictionary chosen by the compressor.
+*/
+
+
+extern int EXPORT inflateEnd OF((z_streamp strm));
+/*
+ All dynamically allocated data structures for this stream are freed.
+ This function discards any unprocessed input and does not flush any
+ pending output.
+
+ inflateEnd returns Z_OK if success, Z_STREAM_ERROR if the stream state
+ was inconsistent. In the error case, msg may be set but then points to a
+ static string (which must not be deallocated).
+*/
+
+ /* Advanced functions */
+
+/*
+ The following functions are needed only in some special applications.
+*/
+
+/*
+extern int EXPORT deflateInit2 OF((z_streamp strm,
+ int level,
+ int method,
+ int windowBits,
+ int memLevel,
+ int strategy));
+
+ This is another version of deflateInit with more compression options. The
+ fields next_in, zalloc, zfree and opaque must be initialized before by
+ the caller.
+
+ The method parameter is the compression method. It must be Z_DEFLATED in
+ this version of the library. (Method 9 will allow a 64K history buffer and
+ partial block flushes.)
+
+ The windowBits parameter is the base two logarithm of the window size
+ (the size of the history buffer). It should be in the range 8..15 for this
+ version of the library (the value 16 will be allowed for method 9). Larger
+ values of this parameter result in better compression at the expense of
+ memory usage. The default value is 15 if deflateInit is used instead.
+
+ The memLevel parameter specifies how much memory should be allocated
+ for the internal compression state. memLevel=1 uses minimum memory but
+ is slow and reduces compression ratio; memLevel=9 uses maximum memory
+ for optimal speed. The default value is 8. See zconf.h for total memory
+ usage as a function of windowBits and memLevel.
+
+ The strategy parameter is used to tune the compression algorithm. Use the
+ value Z_DEFAULT_STRATEGY for normal data, Z_FILTERED for data produced by a
+ filter (or predictor), or Z_HUFFMAN_ONLY to force Huffman encoding only (no
+ string match). Filtered data consists mostly of small values with a
+ somewhat random distribution. In this case, the compression algorithm is
+ tuned to compress them better. The effect of Z_FILTERED is to force more
+ Huffman coding and less string matching; it is somewhat intermediate
+ between Z_DEFAULT and Z_HUFFMAN_ONLY. The strategy parameter only affects
+ the compression ratio but not the correctness of the compressed output even
+ if it is not set appropriately.
+
+ If next_in is not null, the library will use this buffer to hold also
+ some history information; the buffer must either hold the entire input
+ data, or have at least 1<<(windowBits+1) bytes and be writable. If next_in
+ is null, the library will allocate its own history buffer (and leave next_in
+ null). next_out need not be provided here but must be provided by the
+ application for the next call of deflate().
+
+ If the history buffer is provided by the application, next_in must
+ must never be changed by the application since the compressor maintains
+ information inside this buffer from call to call; the application
+ must provide more input only by increasing avail_in. next_in is always
+ reset by the library in this case.
+
+ deflateInit2 returns Z_OK if success, Z_MEM_ERROR if there was
+ not enough memory, Z_STREAM_ERROR if a parameter is invalid (such as
+ an invalid method). msg is set to null if there is no error message.
+ deflateInit2 does not perform any compression: this will be done by
+ deflate().
+*/
+
+extern int EXPORT deflateSetDictionary OF((z_streamp strm,
+ const Bytef *dictionary,
+ uInt dictLength));
+/*
+ Initializes the compression dictionary (history buffer) from the given
+ byte sequence without producing any compressed output. This function must
+ be called immediately after deflateInit or deflateInit2, before any call
+ of deflate. The compressor and decompressor must use exactly the same
+ dictionary (see inflateSetDictionary).
+ The dictionary should consist of strings (byte sequences) that are likely
+ to be encountered later in the data to be compressed, with the most commonly
+ used strings preferably put towards the end of the dictionary. Using a
+ dictionary is most useful when the data to be compressed is short and
+ can be predicted with good accuracy; the data can then be compressed better
+ than with the default empty dictionary. In this version of the library,
+ only the last 32K bytes of the dictionary are used.
+ Upon return of this function, strm->adler is set to the Adler32 value
+ of the dictionary; the decompressor may later use this value to determine
+ which dictionary has been used by the compressor. (The Adler32 value
+ applies to the whole dictionary even if only a subset of the dictionary is
+ actually used by the compressor.)
+
+ deflateSetDictionary returns Z_OK if success, or Z_STREAM_ERROR if a
+ parameter is invalid (such as NULL dictionary) or the stream state
+ is inconsistent (for example if deflate has already been called for this
+ stream). deflateSetDictionary does not perform any compression: this will
+ be done by deflate().
+*/
+
+extern int EXPORT deflateCopy OF((z_streamp dest,
+ z_streamp source));
+/*
+ Sets the destination stream as a complete copy of the source stream. If
+ the source stream is using an application-supplied history buffer, a new
+ buffer is allocated for the destination stream. The compressed output
+ buffer is always application-supplied. It's the responsibility of the
+ application to provide the correct values of next_out and avail_out for the
+ next call of deflate.
+
+ This function can be useful when several compression strategies will be
+ tried, for example when there are several ways of pre-processing the input
+ data with a filter. The streams that will be discarded should then be freed
+ by calling deflateEnd. Note that deflateCopy duplicates the internal
+ compression state which can be quite large, so this strategy is slow and
+ can consume lots of memory.
+
+ deflateCopy returns Z_OK if success, Z_MEM_ERROR if there was not
+ enough memory, Z_STREAM_ERROR if the source stream state was inconsistent
+ (such as zalloc being NULL). msg is left unchanged in both source and
+ destination.
+*/
+
+extern int EXPORT deflateReset OF((z_streamp strm));
+/*
+ This function is equivalent to deflateEnd followed by deflateInit,
+ but does not free and reallocate all the internal compression state.
+ The stream will keep the same compression level and any other attributes
+ that may have been set by deflateInit2.
+
+ deflateReset returns Z_OK if success, or Z_STREAM_ERROR if the source
+ stream state was inconsistent (such as zalloc or state being NULL).
+*/
+
+extern int EXPORT deflateParams OF((z_streamp strm, int level, int strategy));
+/*
+ Dynamically update the compression level and compression strategy.
+ This can be used to switch between compression and straight copy of
+ the input data, or to switch to a different kind of input data requiring
+ a different strategy. If the compression level is changed, the input
+ available so far is compressed with the old level (and may be flushed);
+ the new level will take effect only at the next call of deflate().
+
+ Before the call of deflateParams, the stream state must be set as for
+ a call of deflate(), since the currently available input may have to
+ be compressed and flushed. In particular, strm->avail_out must be non-zero.
+
+ deflateParams returns Z_OK if success, Z_STREAM_ERROR if the source
+ stream state was inconsistent or if a parameter was invalid, Z_BUF_ERROR
+ if strm->avail_out was zero.
+*/
+
+extern int EXPORT deflateOutputPending OF((z_streamp strm));
+/*
+ Returns the number of bytes of output which are immediately
+ available from the compressor (i.e. without any further input
+ or flush).
+*/
+
+/*
+extern int EXPORT inflateInit2 OF((z_streamp strm,
+ int windowBits));
+
+ This is another version of inflateInit with more compression options. The
+ fields next_out, zalloc, zfree and opaque must be initialized before by
+ the caller.
+
+ The windowBits parameter is the base two logarithm of the maximum window
+ size (the size of the history buffer). It should be in the range 8..15 for
+ this version of the library (the value 16 will be allowed soon). The
+ default value is 15 if inflateInit is used instead. If a compressed stream
+ with a larger window size is given as input, inflate() will return with
+ the error code Z_DATA_ERROR instead of trying to allocate a larger window.
+
+ If next_out is not null, the library will use this buffer for the history
+ buffer; the buffer must either be large enough to hold the entire output
+ data, or have at least 1<<windowBits bytes. If next_out is null, the
+ library will allocate its own buffer (and leave next_out null). next_in
+ need not be provided here but must be provided by the application for the
+ next call of inflate().
+
+ If the history buffer is provided by the application, next_out must
+ never be changed by the application since the decompressor maintains
+ history information inside this buffer from call to call; the application
+ can only reset next_out to the beginning of the history buffer when
+ avail_out is zero and all output has been consumed.
+
+ inflateInit2 returns Z_OK if success, Z_MEM_ERROR if there was
+ not enough memory, Z_STREAM_ERROR if a parameter is invalid (such as
+ windowBits < 8). msg is set to null if there is no error message.
+ inflateInit2 does not perform any decompression: this will be done by
+ inflate().
+*/
+
+extern int EXPORT inflateSetDictionary OF((z_streamp strm,
+ const Bytef *dictionary,
+ uInt dictLength));
+/*
+ Initializes the decompression dictionary (history buffer) from the given
+ uncompressed byte sequence. This function must be called immediately after
+ a call of inflate if this call returned Z_NEED_DICT. The dictionary chosen
+ by the compressor can be determined from the Adler32 value returned by this
+ call of inflate. The compressor and decompressor must use exactly the same
+ dictionary (see deflateSetDictionary).
+
+ inflateSetDictionary returns Z_OK if success, Z_STREAM_ERROR if a
+ parameter is invalid (such as NULL dictionary) or the stream state is
+ inconsistent, Z_DATA_ERROR if the given dictionary doesn't match the
+ expected one (incorrect Adler32 value). inflateSetDictionary does not
+ perform any decompression: this will be done by subsequent calls of
+ inflate().
+*/
+
+extern int EXPORT inflateSync OF((z_streamp strm));
+/*
+ Skips invalid compressed data until the special marker (see deflate()
+ above) can be found, or until all available input is skipped. No output
+ is provided.
+
+ inflateSync returns Z_OK if the special marker has been found, Z_BUF_ERROR
+ if no more input was provided, Z_DATA_ERROR if no marker has been found,
+ or Z_STREAM_ERROR if the stream structure was inconsistent. In the success
+ case, the application may save the current current value of total_in which
+ indicates where valid compressed data was found. In the error case, the
+ application may repeatedly call inflateSync, providing more input each time,
+ until success or end of the input data.
+*/
+
+extern int EXPORT inflateReset OF((z_streamp strm));
+/*
+ This function is equivalent to inflateEnd followed by inflateInit,
+ but does not free and reallocate all the internal decompression state.
+ The stream will keep attributes that may have been set by inflateInit2.
+
+ inflateReset returns Z_OK if success, or Z_STREAM_ERROR if the source
+ stream state was inconsistent (such as zalloc or state being NULL).
+*/
+
+extern int inflateIncomp OF((z_stream *strm));
+/*
+ This function adds the data at next_in (avail_in bytes) to the output
+ history without performing any output. There must be no pending output,
+ and the decompressor must be expecting to see the start of a block.
+ Calling this function is equivalent to decompressing a stored block
+ containing the data at next_in (except that the data is not output).
+*/
+
+ /* utility functions */
+
+/*
+ The following utility functions are implemented on top of the
+ basic stream-oriented functions. To simplify the interface, some
+ default options are assumed (compression level, window size,
+ standard memory allocation functions). The source code of these
+ utility functions can easily be modified if you need special options.
+*/
+
+extern int EXPORT compress OF((Bytef *dest, uLongf *destLen,
+ const Bytef *source, uLong sourceLen));
+/*
+ Compresses the source buffer into the destination buffer. sourceLen is
+ the byte length of the source buffer. Upon entry, destLen is the total
+ size of the destination buffer, which must be at least 0.1% larger than
+ sourceLen plus 12 bytes. Upon exit, destLen is the actual size of the
+ compressed buffer.
+ This function can be used to compress a whole file at once if the
+ input file is mmap'ed.
+ compress returns Z_OK if success, Z_MEM_ERROR if there was not
+ enough memory, Z_BUF_ERROR if there was not enough room in the output
+ buffer.
+*/
+
+extern int EXPORT uncompress OF((Bytef *dest, uLongf *destLen,
+ const Bytef *source, uLong sourceLen));
+/*
+ Decompresses the source buffer into the destination buffer. sourceLen is
+ the byte length of the source buffer. Upon entry, destLen is the total
+ size of the destination buffer, which must be large enough to hold the
+ entire uncompressed data. (The size of the uncompressed data must have
+ been saved previously by the compressor and transmitted to the decompressor
+ by some mechanism outside the scope of this compression library.)
+ Upon exit, destLen is the actual size of the compressed buffer.
+ This function can be used to decompress a whole file at once if the
+ input file is mmap'ed.
+
+ uncompress returns Z_OK if success, Z_MEM_ERROR if there was not
+ enough memory, Z_BUF_ERROR if there was not enough room in the output
+ buffer, or Z_DATA_ERROR if the input data was corrupted.
+*/
+
+
+typedef voidp gzFile;
+
+extern gzFile EXPORT gzopen OF((const char *path, const char *mode));
+/*
+ Opens a gzip (.gz) file for reading or writing. The mode parameter
+ is as in fopen ("rb" or "wb") but can also include a compression level
+ ("wb9"). gzopen can be used to read a file which is not in gzip format;
+ in this case gzread will directly read from the file without decompression.
+ gzopen returns NULL if the file could not be opened or if there was
+ insufficient memory to allocate the (de)compression state; errno
+ can be checked to distinguish the two cases (if errno is zero, the
+ zlib error is Z_MEM_ERROR).
+*/
+
+extern gzFile EXPORT gzdopen OF((int fd, const char *mode));
+/*
+ gzdopen() associates a gzFile with the file descriptor fd. File
+ descriptors are obtained from calls like open, dup, creat, pipe or
+ fileno (in the file has been previously opened with fopen).
+ The mode parameter is as in gzopen.
+ The next call of gzclose on the returned gzFile will also close the
+ file descriptor fd, just like fclose(fdopen(fd), mode) closes the file
+ descriptor fd. If you want to keep fd open, use gzdopen(dup(fd), mode).
+ gzdopen returns NULL if there was insufficient memory to allocate
+ the (de)compression state.
+*/
+
+extern int EXPORT gzread OF((gzFile file, voidp buf, unsigned len));
+/*
+ Reads the given number of uncompressed bytes from the compressed file.
+ If the input file was not in gzip format, gzread copies the given number
+ of bytes into the buffer.
+ gzread returns the number of uncompressed bytes actually read (0 for
+ end of file, -1 for error). */
+
+extern int EXPORT gzwrite OF((gzFile file, const voidp buf, unsigned len));
+/*
+ Writes the given number of uncompressed bytes into the compressed file.
+ gzwrite returns the number of uncompressed bytes actually written
+ (0 in case of error).
+*/
+
+extern int EXPORT gzflush OF((gzFile file, int flush));
+/*
+ Flushes all pending output into the compressed file. The parameter
+ flush is as in the deflate() function. The return value is the zlib
+ error number (see function gzerror below). gzflush returns Z_OK if
+ the flush parameter is Z_FINISH and all output could be flushed.
+ gzflush should be called only when strictly necessary because it can
+ degrade compression.
+*/
+
+extern int EXPORT gzclose OF((gzFile file));
+/*
+ Flushes all pending output if necessary, closes the compressed file
+ and deallocates all the (de)compression state. The return value is the zlib
+ error number (see function gzerror below).
+*/
+
+extern const char * EXPORT gzerror OF((gzFile file, int *errnum));
+/*
+ Returns the error message for the last error which occurred on the
+ given compressed file. errnum is set to zlib error number. If an
+ error occurred in the filesystem and not in the compression library,
+ errnum is set to Z_ERRNO and the application may consult errno
+ to get the exact error code.
+*/
+
+ /* checksum functions */
+
+/*
+ These functions are not related to compression but are exported
+ anyway because they might be useful in applications using the
+ compression library.
+*/
+
+extern uLong EXPORT adler32 OF((uLong adler, const Bytef *buf, uInt len));
+
+/*
+ Update a running Adler-32 checksum with the bytes buf[0..len-1] and
+ return the updated checksum. If buf is NULL, this function returns
+ the required initial value for the checksum.
+ An Adler-32 checksum is almost as reliable as a CRC32 but can be computed
+ much faster. Usage example:
+
+ uLong adler = adler32(0L, Z_NULL, 0);
+
+ while (read_buffer(buffer, length) != EOF) {
+ adler = adler32(adler, buffer, length);
+ }
+ if (adler != original_adler) error();
+*/
+
+#if 0
+extern uLong EXPORT crc32 OF((uLong crc, const Bytef *buf, uInt len));
+/*
+ Update a running crc with the bytes buf[0..len-1] and return the updated
+ crc. If buf is NULL, this function returns the required initial value
+ for the crc. Pre- and post-conditioning (one's complement) is performed
+ within this function so it shouldn't be done by the application.
+ Usage example:
+
+ uLong crc = crc32(0L, Z_NULL, 0);
+
+ while (read_buffer(buffer, length) != EOF) {
+ crc = crc32(crc, buffer, length);
+ }
+ if (crc != original_crc) error();
+*/
+#endif
+
+
+ /* various hacks, don't look :) */
+
+/* deflateInit and inflateInit are macros to allow checking the zlib version
+ * and the compiler's view of z_stream:
+ */
+extern int EXPORT deflateInit_ OF((z_streamp strm, int level,
+ const char *version, int stream_size));
+extern int EXPORT inflateInit_ OF((z_streamp strm,
+ const char *version, int stream_size));
+extern int EXPORT deflateInit2_ OF((z_streamp strm, int level, int method,
+ int windowBits, int memLevel, int strategy,
+ const char *version, int stream_size));
+extern int EXPORT inflateInit2_ OF((z_streamp strm, int windowBits,
+ const char *version, int stream_size));
+#define deflateInit(strm, level) \
+ deflateInit_((strm), (level), ZLIB_VERSION, sizeof(z_stream))
+#define inflateInit(strm) \
+ inflateInit_((strm), ZLIB_VERSION, sizeof(z_stream))
+#define deflateInit2(strm, level, method, windowBits, memLevel, strategy) \
+ deflateInit2_((strm),(level),(method),(windowBits),(memLevel),\
+ (strategy), ZLIB_VERSION, sizeof(z_stream))
+#define inflateInit2(strm, windowBits) \
+ inflateInit2_((strm), (windowBits), ZLIB_VERSION, sizeof(z_stream))
+
+#if !defined(_Z_UTIL_H) && !defined(NO_DUMMY_DECL)
+ struct internal_state {int dummy;}; /* hack for buggy compilers */
+#endif
+
+uLongf *get_crc_table OF((void)); /* can be used by asm versions of crc32() */
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _ZLIB_H */
+/* --- zlib.h */
diff --git a/rtems/freebsd/net80211/_ieee80211.h b/rtems/freebsd/net80211/_ieee80211.h
new file mode 100644
index 00000000..764e5cf1
--- /dev/null
+++ b/rtems/freebsd/net80211/_ieee80211.h
@@ -0,0 +1,396 @@
+/*-
+ * Copyright (c) 2001 Atsushi Onoe
+ * Copyright (c) 2002-2008 Sam Leffler, Errno Consulting
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+#ifndef _NET80211__IEEE80211_HH_
+#define _NET80211__IEEE80211_HH_
+
+/*
+ * 802.11 implementation definitions.
+ *
+ * NB: this file is used by applications.
+ */
+
+/*
+ * PHY type; mostly used to identify FH phys.
+ */
+enum ieee80211_phytype {
+ IEEE80211_T_DS, /* direct sequence spread spectrum */
+ IEEE80211_T_FH, /* frequency hopping */
+ IEEE80211_T_OFDM, /* frequency division multiplexing */
+ IEEE80211_T_TURBO, /* high rate OFDM, aka turbo mode */
+ IEEE80211_T_HT, /* high throughput */
+ IEEE80211_T_OFDM_HALF, /* 1/2 rate OFDM */
+ IEEE80211_T_OFDM_QUARTER, /* 1/4 rate OFDM */
+};
+#define IEEE80211_T_CCK IEEE80211_T_DS /* more common nomenclature */
+
+/*
+ * PHY mode; this is not really a mode as multi-mode devices
+ * have multiple PHY's. Mode is mostly used as a shorthand
+ * for constraining which channels to consider in setting up
+ * operation. Modes used to be used more extensively when
+ * channels were identified as IEEE channel numbers.
+ */
+enum ieee80211_phymode {
+ IEEE80211_MODE_AUTO = 0, /* autoselect */
+ IEEE80211_MODE_11A = 1, /* 5GHz, OFDM */
+ IEEE80211_MODE_11B = 2, /* 2GHz, CCK */
+ IEEE80211_MODE_11G = 3, /* 2GHz, OFDM */
+ IEEE80211_MODE_FH = 4, /* 2GHz, GFSK */
+ IEEE80211_MODE_TURBO_A = 5, /* 5GHz, OFDM, 2x clock */
+ IEEE80211_MODE_TURBO_G = 6, /* 2GHz, OFDM, 2x clock */
+ IEEE80211_MODE_STURBO_A = 7, /* 5GHz, OFDM, 2x clock, static */
+ IEEE80211_MODE_11NA = 8, /* 5GHz, w/ HT */
+ IEEE80211_MODE_11NG = 9, /* 2GHz, w/ HT */
+ IEEE80211_MODE_HALF = 10, /* OFDM, 1/2x clock */
+ IEEE80211_MODE_QUARTER = 11, /* OFDM, 1/4x clock */
+};
+#define IEEE80211_MODE_MAX (IEEE80211_MODE_QUARTER+1)
+
+/*
+ * Operating mode. Devices do not necessarily support
+ * all modes; they indicate which are supported in their
+ * capabilities.
+ */
+enum ieee80211_opmode {
+ IEEE80211_M_IBSS = 0, /* IBSS (adhoc) station */
+ IEEE80211_M_STA = 1, /* infrastructure station */
+ IEEE80211_M_WDS = 2, /* WDS link */
+ IEEE80211_M_AHDEMO = 3, /* Old lucent compatible adhoc demo */
+ IEEE80211_M_HOSTAP = 4, /* Software Access Point */
+ IEEE80211_M_MONITOR = 5, /* Monitor mode */
+ IEEE80211_M_MBSS = 6, /* MBSS (Mesh Point) link */
+};
+#define IEEE80211_OPMODE_MAX (IEEE80211_M_MBSS+1)
+
+/*
+ * 802.11g/802.11n protection mode.
+ */
+enum ieee80211_protmode {
+ IEEE80211_PROT_NONE = 0, /* no protection */
+ IEEE80211_PROT_CTSONLY = 1, /* CTS to self */
+ IEEE80211_PROT_RTSCTS = 2, /* RTS-CTS */
+};
+
+/*
+ * Authentication mode. The open and shared key authentication
+ * modes are implemented within the 802.11 layer. 802.1x and
+ * WPA/802.11i are implemented in user mode by setting the
+ * 802.11 layer into IEEE80211_AUTH_8021X and deferring
+ * authentication to user space programs.
+ */
+enum ieee80211_authmode {
+ IEEE80211_AUTH_NONE = 0,
+ IEEE80211_AUTH_OPEN = 1, /* open */
+ IEEE80211_AUTH_SHARED = 2, /* shared-key */
+ IEEE80211_AUTH_8021X = 3, /* 802.1x */
+ IEEE80211_AUTH_AUTO = 4, /* auto-select/accept */
+ /* NB: these are used only for ioctls */
+ IEEE80211_AUTH_WPA = 5, /* WPA/RSN w/ 802.1x/PSK */
+};
+
+/*
+ * Roaming mode is effectively who controls the operation
+ * of the 802.11 state machine when operating as a station.
+ * State transitions are controlled either by the driver
+ * (typically when management frames are processed by the
+ * hardware/firmware), the host (auto/normal operation of
+ * the 802.11 layer), or explicitly through ioctl requests
+ * when applications like wpa_supplicant want control.
+ */
+enum ieee80211_roamingmode {
+ IEEE80211_ROAMING_DEVICE= 0, /* driver/hardware control */
+ IEEE80211_ROAMING_AUTO = 1, /* 802.11 layer control */
+ IEEE80211_ROAMING_MANUAL= 2, /* application control */
+};
+
+/*
+ * Channels are specified by frequency and attributes.
+ */
+struct ieee80211_channel {
+ uint32_t ic_flags; /* see below */
+ uint16_t ic_freq; /* setting in Mhz */
+ uint8_t ic_ieee; /* IEEE channel number */
+ int8_t ic_maxregpower; /* maximum regulatory tx power in dBm */
+ int8_t ic_maxpower; /* maximum tx power in .5 dBm */
+ int8_t ic_minpower; /* minimum tx power in .5 dBm */
+ uint8_t ic_state; /* dynamic state */
+ uint8_t ic_extieee; /* HT40 extension channel number */
+ int8_t ic_maxantgain; /* maximum antenna gain in .5 dBm */
+ uint8_t ic_pad;
+ uint16_t ic_devdata; /* opaque device/driver data */
+};
+
+#define IEEE80211_CHAN_MAX 256
+#define IEEE80211_CHAN_BYTES 32 /* howmany(IEEE80211_CHAN_MAX, NBBY) */
+#define IEEE80211_CHAN_ANY 0xffff /* token for ``any channel'' */
+#define IEEE80211_CHAN_ANYC \
+ ((struct ieee80211_channel *) IEEE80211_CHAN_ANY)
+
+/* channel attributes */
+#define IEEE80211_CHAN_PRIV0 0x00000001 /* driver private bit 0 */
+#define IEEE80211_CHAN_PRIV1 0x00000002 /* driver private bit 1 */
+#define IEEE80211_CHAN_PRIV2 0x00000004 /* driver private bit 2 */
+#define IEEE80211_CHAN_PRIV3 0x00000008 /* driver private bit 3 */
+#define IEEE80211_CHAN_TURBO 0x00000010 /* Turbo channel */
+#define IEEE80211_CHAN_CCK 0x00000020 /* CCK channel */
+#define IEEE80211_CHAN_OFDM 0x00000040 /* OFDM channel */
+#define IEEE80211_CHAN_2GHZ 0x00000080 /* 2 GHz spectrum channel. */
+#define IEEE80211_CHAN_5GHZ 0x00000100 /* 5 GHz spectrum channel */
+#define IEEE80211_CHAN_PASSIVE 0x00000200 /* Only passive scan allowed */
+#define IEEE80211_CHAN_DYN 0x00000400 /* Dynamic CCK-OFDM channel */
+#define IEEE80211_CHAN_GFSK 0x00000800 /* GFSK channel (FHSS PHY) */
+#define IEEE80211_CHAN_GSM 0x00001000 /* 900 MHz spectrum channel */
+#define IEEE80211_CHAN_STURBO 0x00002000 /* 11a static turbo channel only */
+#define IEEE80211_CHAN_HALF 0x00004000 /* Half rate channel */
+#define IEEE80211_CHAN_QUARTER 0x00008000 /* Quarter rate channel */
+#define IEEE80211_CHAN_HT20 0x00010000 /* HT 20 channel */
+#define IEEE80211_CHAN_HT40U 0x00020000 /* HT 40 channel w/ ext above */
+#define IEEE80211_CHAN_HT40D 0x00040000 /* HT 40 channel w/ ext below */
+#define IEEE80211_CHAN_DFS 0x00080000 /* DFS required */
+#define IEEE80211_CHAN_4MSXMIT 0x00100000 /* 4ms limit on frame length */
+#define IEEE80211_CHAN_NOADHOC 0x00200000 /* adhoc mode not allowed */
+#define IEEE80211_CHAN_NOHOSTAP 0x00400000 /* hostap mode not allowed */
+#define IEEE80211_CHAN_11D 0x00800000 /* 802.11d required */
+
+#define IEEE80211_CHAN_HT40 (IEEE80211_CHAN_HT40U | IEEE80211_CHAN_HT40D)
+#define IEEE80211_CHAN_HT (IEEE80211_CHAN_HT20 | IEEE80211_CHAN_HT40)
+
+#define IEEE80211_CHAN_BITS \
+ "\20\1PRIV0\2PRIV2\3PRIV3\4PRIV4\5TURBO\6CCK\7OFDM\0102GHZ\0115GHZ" \
+ "\12PASSIVE\13DYN\14GFSK\15GSM\16STURBO\17HALF\20QUARTER\21HT20" \
+ "\22HT40U\23HT40D\24DFS\0254MSXMIT\26NOADHOC\27NOHOSTAP\03011D"
+
+/*
+ * Useful combinations of channel characteristics.
+ */
+#define IEEE80211_CHAN_FHSS \
+ (IEEE80211_CHAN_2GHZ | IEEE80211_CHAN_GFSK)
+#define IEEE80211_CHAN_A \
+ (IEEE80211_CHAN_5GHZ | IEEE80211_CHAN_OFDM)
+#define IEEE80211_CHAN_B \
+ (IEEE80211_CHAN_2GHZ | IEEE80211_CHAN_CCK)
+#define IEEE80211_CHAN_PUREG \
+ (IEEE80211_CHAN_2GHZ | IEEE80211_CHAN_OFDM)
+#define IEEE80211_CHAN_G \
+ (IEEE80211_CHAN_2GHZ | IEEE80211_CHAN_DYN)
+#define IEEE80211_CHAN_108A \
+ (IEEE80211_CHAN_A | IEEE80211_CHAN_TURBO)
+#define IEEE80211_CHAN_108G \
+ (IEEE80211_CHAN_PUREG | IEEE80211_CHAN_TURBO)
+#define IEEE80211_CHAN_ST \
+ (IEEE80211_CHAN_108A | IEEE80211_CHAN_STURBO)
+
+#define IEEE80211_CHAN_ALL \
+ (IEEE80211_CHAN_2GHZ | IEEE80211_CHAN_5GHZ | IEEE80211_CHAN_GFSK | \
+ IEEE80211_CHAN_CCK | IEEE80211_CHAN_OFDM | IEEE80211_CHAN_DYN | \
+ IEEE80211_CHAN_HALF | IEEE80211_CHAN_QUARTER | \
+ IEEE80211_CHAN_HT)
+#define IEEE80211_CHAN_ALLTURBO \
+ (IEEE80211_CHAN_ALL | IEEE80211_CHAN_TURBO | IEEE80211_CHAN_STURBO)
+
+#define IEEE80211_IS_CHAN_FHSS(_c) \
+ (((_c)->ic_flags & IEEE80211_CHAN_FHSS) == IEEE80211_CHAN_FHSS)
+#define IEEE80211_IS_CHAN_A(_c) \
+ (((_c)->ic_flags & IEEE80211_CHAN_A) == IEEE80211_CHAN_A)
+#define IEEE80211_IS_CHAN_B(_c) \
+ (((_c)->ic_flags & IEEE80211_CHAN_B) == IEEE80211_CHAN_B)
+#define IEEE80211_IS_CHAN_PUREG(_c) \
+ (((_c)->ic_flags & IEEE80211_CHAN_PUREG) == IEEE80211_CHAN_PUREG)
+#define IEEE80211_IS_CHAN_G(_c) \
+ (((_c)->ic_flags & IEEE80211_CHAN_G) == IEEE80211_CHAN_G)
+#define IEEE80211_IS_CHAN_ANYG(_c) \
+ (IEEE80211_IS_CHAN_PUREG(_c) || IEEE80211_IS_CHAN_G(_c))
+#define IEEE80211_IS_CHAN_ST(_c) \
+ (((_c)->ic_flags & IEEE80211_CHAN_ST) == IEEE80211_CHAN_ST)
+#define IEEE80211_IS_CHAN_108A(_c) \
+ (((_c)->ic_flags & IEEE80211_CHAN_108A) == IEEE80211_CHAN_108A)
+#define IEEE80211_IS_CHAN_108G(_c) \
+ (((_c)->ic_flags & IEEE80211_CHAN_108G) == IEEE80211_CHAN_108G)
+
+#define IEEE80211_IS_CHAN_2GHZ(_c) \
+ (((_c)->ic_flags & IEEE80211_CHAN_2GHZ) != 0)
+#define IEEE80211_IS_CHAN_5GHZ(_c) \
+ (((_c)->ic_flags & IEEE80211_CHAN_5GHZ) != 0)
+#define IEEE80211_IS_CHAN_PASSIVE(_c) \
+ (((_c)->ic_flags & IEEE80211_CHAN_PASSIVE) != 0)
+#define IEEE80211_IS_CHAN_OFDM(_c) \
+ (((_c)->ic_flags & (IEEE80211_CHAN_OFDM | IEEE80211_CHAN_DYN)) != 0)
+#define IEEE80211_IS_CHAN_CCK(_c) \
+ (((_c)->ic_flags & (IEEE80211_CHAN_CCK | IEEE80211_CHAN_DYN)) != 0)
+#define IEEE80211_IS_CHAN_GFSK(_c) \
+ (((_c)->ic_flags & IEEE80211_CHAN_GFSK) != 0)
+#define IEEE80211_IS_CHAN_TURBO(_c) \
+ (((_c)->ic_flags & IEEE80211_CHAN_TURBO) != 0)
+#define IEEE80211_IS_CHAN_STURBO(_c) \
+ (((_c)->ic_flags & IEEE80211_CHAN_STURBO) != 0)
+#define IEEE80211_IS_CHAN_DTURBO(_c) \
+ (((_c)->ic_flags & \
+ (IEEE80211_CHAN_TURBO | IEEE80211_CHAN_STURBO)) == IEEE80211_CHAN_TURBO)
+#define IEEE80211_IS_CHAN_HALF(_c) \
+ (((_c)->ic_flags & IEEE80211_CHAN_HALF) != 0)
+#define IEEE80211_IS_CHAN_QUARTER(_c) \
+ (((_c)->ic_flags & IEEE80211_CHAN_QUARTER) != 0)
+#define IEEE80211_IS_CHAN_FULL(_c) \
+ (((_c)->ic_flags & (IEEE80211_CHAN_QUARTER | IEEE80211_CHAN_HALF)) == 0)
+#define IEEE80211_IS_CHAN_GSM(_c) \
+ (((_c)->ic_flags & IEEE80211_CHAN_GSM) != 0)
+#define IEEE80211_IS_CHAN_HT(_c) \
+ (((_c)->ic_flags & IEEE80211_CHAN_HT) != 0)
+#define IEEE80211_IS_CHAN_HT20(_c) \
+ (((_c)->ic_flags & IEEE80211_CHAN_HT20) != 0)
+#define IEEE80211_IS_CHAN_HT40(_c) \
+ (((_c)->ic_flags & IEEE80211_CHAN_HT40) != 0)
+#define IEEE80211_IS_CHAN_HT40U(_c) \
+ (((_c)->ic_flags & IEEE80211_CHAN_HT40U) != 0)
+#define IEEE80211_IS_CHAN_HT40D(_c) \
+ (((_c)->ic_flags & IEEE80211_CHAN_HT40D) != 0)
+#define IEEE80211_IS_CHAN_HTA(_c) \
+ (IEEE80211_IS_CHAN_5GHZ(_c) && \
+ ((_c)->ic_flags & IEEE80211_CHAN_HT) != 0)
+#define IEEE80211_IS_CHAN_HTG(_c) \
+ (IEEE80211_IS_CHAN_2GHZ(_c) && \
+ ((_c)->ic_flags & IEEE80211_CHAN_HT) != 0)
+#define IEEE80211_IS_CHAN_DFS(_c) \
+ (((_c)->ic_flags & IEEE80211_CHAN_DFS) != 0)
+#define IEEE80211_IS_CHAN_NOADHOC(_c) \
+ (((_c)->ic_flags & IEEE80211_CHAN_NOADHOC) != 0)
+#define IEEE80211_IS_CHAN_NOHOSTAP(_c) \
+ (((_c)->ic_flags & IEEE80211_CHAN_NOHOSTAP) != 0)
+#define IEEE80211_IS_CHAN_11D(_c) \
+ (((_c)->ic_flags & IEEE80211_CHAN_11D) != 0)
+
+#define IEEE80211_CHAN2IEEE(_c) (_c)->ic_ieee
+
+/* dynamic state */
+#define IEEE80211_CHANSTATE_RADAR 0x01 /* radar detected */
+#define IEEE80211_CHANSTATE_CACDONE 0x02 /* CAC completed */
+#define IEEE80211_CHANSTATE_CWINT 0x04 /* interference detected */
+#define IEEE80211_CHANSTATE_NORADAR 0x10 /* post notify on radar clear */
+
+#define IEEE80211_IS_CHAN_RADAR(_c) \
+ (((_c)->ic_state & IEEE80211_CHANSTATE_RADAR) != 0)
+#define IEEE80211_IS_CHAN_CACDONE(_c) \
+ (((_c)->ic_state & IEEE80211_CHANSTATE_CACDONE) != 0)
+#define IEEE80211_IS_CHAN_CWINT(_c) \
+ (((_c)->ic_state & IEEE80211_CHANSTATE_CWINT) != 0)
+
+/* ni_chan encoding for FH phy */
+#define IEEE80211_FH_CHANMOD 80
+#define IEEE80211_FH_CHAN(set,pat) (((set)-1)*IEEE80211_FH_CHANMOD+(pat))
+#define IEEE80211_FH_CHANSET(chan) ((chan)/IEEE80211_FH_CHANMOD+1)
+#define IEEE80211_FH_CHANPAT(chan) ((chan)%IEEE80211_FH_CHANMOD)
+
+#define IEEE80211_TID_SIZE (WME_NUM_TID+1) /* WME TID's +1 for non-QoS */
+#define IEEE80211_NONQOS_TID WME_NUM_TID /* index for non-QoS sta */
+
+/*
+ * The 802.11 spec says at most 2007 stations may be
+ * associated at once. For most AP's this is way more
+ * than is feasible so we use a default of 128. This
+ * number may be overridden by the driver and/or by
+ * user configuration but may not be less than IEEE80211_AID_MIN.
+ */
+#define IEEE80211_AID_DEF 128
+#define IEEE80211_AID_MIN 16
+
+/*
+ * 802.11 rate set.
+ */
+#define IEEE80211_RATE_SIZE 8 /* 802.11 standard */
+#define IEEE80211_RATE_MAXSIZE 15 /* max rates we'll handle */
+
+struct ieee80211_rateset {
+ uint8_t rs_nrates;
+ uint8_t rs_rates[IEEE80211_RATE_MAXSIZE];
+};
+
+/*
+ * 802.11n variant of ieee80211_rateset. Instead of
+ * legacy rates the entries are MCS rates. We define
+ * the structure such that it can be used interchangeably
+ * with an ieee80211_rateset (modulo structure size).
+ */
+#define IEEE80211_HTRATE_MAXSIZE 127
+
+struct ieee80211_htrateset {
+ uint8_t rs_nrates;
+ uint8_t rs_rates[IEEE80211_HTRATE_MAXSIZE];
+};
+
+#define IEEE80211_RATE_MCS 0x80
+
+/*
+ * Per-mode transmit parameters/controls visible to user space.
+ * These can be used to set fixed transmit rate for all operating
+ * modes or on a per-client basis according to the capabilities
+ * of the client (e.g. an 11b client associated to an 11g ap).
+ *
+ * MCS are distinguished from legacy rates by or'ing in 0x80.
+ */
+struct ieee80211_txparam {
+ uint8_t ucastrate; /* ucast data rate (legacy/MCS|0x80) */
+ uint8_t mgmtrate; /* mgmt frame rate (legacy/MCS|0x80) */
+ uint8_t mcastrate; /* multicast rate (legacy/MCS|0x80) */
+ uint8_t maxretry; /* max unicast data retry count */
+};
+
+/*
+ * Per-mode roaming state visible to user space. There are two
+ * thresholds that control whether roaming is considered; when
+ * either is exceeded the 802.11 layer will check the scan cache
+ * for another AP. If the cache is stale then a scan may be
+ * triggered.
+ */
+struct ieee80211_roamparam {
+ int8_t rssi; /* rssi thresh (.5 dBm) */
+ uint8_t rate; /* tx rate thresh (.5 Mb/s or MCS) */
+ uint16_t pad; /* reserve */
+};
+
+/*
+ * Regulatory Information.
+ */
+struct ieee80211_regdomain {
+ uint16_t regdomain; /* SKU */
+ uint16_t country; /* ISO country code */
+ uint8_t location; /* I (indoor), O (outdoor), other */
+ uint8_t ecm; /* Extended Channel Mode */
+ char isocc[2]; /* country code string */
+ short pad[2];
+};
+
+/*
+ * MIMO antenna/radio state.
+ */
+struct ieee80211_mimo_info {
+ int8_t rssi[3]; /* per-antenna rssi */
+ int8_t noise[3]; /* per-antenna noise floor */
+ uint8_t pad[2];
+ uint32_t evm[3]; /* EVM data */
+};
+#endif /* _NET80211__IEEE80211_HH_ */
diff --git a/rtems/freebsd/net80211/ieee80211.c b/rtems/freebsd/net80211/ieee80211.c
new file mode 100644
index 00000000..c78a6403
--- /dev/null
+++ b/rtems/freebsd/net80211/ieee80211.c
@@ -0,0 +1,1638 @@
+#include <rtems/freebsd/machine/rtems-bsd-config.h>
+
+/*-
+ * Copyright (c) 2001 Atsushi Onoe
+ * Copyright (c) 2002-2009 Sam Leffler, Errno Consulting
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <rtems/freebsd/sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+/*
+ * IEEE 802.11 generic handler
+ */
+#include <rtems/freebsd/local/opt_wlan.h>
+
+#include <rtems/freebsd/sys/param.h>
+#include <rtems/freebsd/sys/systm.h>
+#include <rtems/freebsd/sys/kernel.h>
+
+#include <rtems/freebsd/sys/socket.h>
+
+#include <rtems/freebsd/net/if.h>
+#include <rtems/freebsd/net/if_dl.h>
+#include <rtems/freebsd/net/if_media.h>
+#include <rtems/freebsd/net/if_types.h>
+#include <rtems/freebsd/net/ethernet.h>
+
+#include <rtems/freebsd/net80211/ieee80211_var.h>
+#include <rtems/freebsd/net80211/ieee80211_regdomain.h>
+#ifdef IEEE80211_SUPPORT_SUPERG
+#include <rtems/freebsd/net80211/ieee80211_superg.h>
+#endif
+#include <rtems/freebsd/net80211/ieee80211_ratectl.h>
+
+#include <rtems/freebsd/net/bpf.h>
+
+const char *ieee80211_phymode_name[IEEE80211_MODE_MAX] = {
+ [IEEE80211_MODE_AUTO] = "auto",
+ [IEEE80211_MODE_11A] = "11a",
+ [IEEE80211_MODE_11B] = "11b",
+ [IEEE80211_MODE_11G] = "11g",
+ [IEEE80211_MODE_FH] = "FH",
+ [IEEE80211_MODE_TURBO_A] = "turboA",
+ [IEEE80211_MODE_TURBO_G] = "turboG",
+ [IEEE80211_MODE_STURBO_A] = "sturboA",
+ [IEEE80211_MODE_HALF] = "half",
+ [IEEE80211_MODE_QUARTER] = "quarter",
+ [IEEE80211_MODE_11NA] = "11na",
+ [IEEE80211_MODE_11NG] = "11ng",
+};
+/* map ieee80211_opmode to the corresponding capability bit */
+const int ieee80211_opcap[IEEE80211_OPMODE_MAX] = {
+ [IEEE80211_M_IBSS] = IEEE80211_C_IBSS,
+ [IEEE80211_M_WDS] = IEEE80211_C_WDS,
+ [IEEE80211_M_STA] = IEEE80211_C_STA,
+ [IEEE80211_M_AHDEMO] = IEEE80211_C_AHDEMO,
+ [IEEE80211_M_HOSTAP] = IEEE80211_C_HOSTAP,
+ [IEEE80211_M_MONITOR] = IEEE80211_C_MONITOR,
+#ifdef IEEE80211_SUPPORT_MESH
+ [IEEE80211_M_MBSS] = IEEE80211_C_MBSS,
+#endif
+};
+
+static const uint8_t ieee80211broadcastaddr[IEEE80211_ADDR_LEN] =
+ { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
+
+static void ieee80211_syncflag_locked(struct ieee80211com *ic, int flag);
+static void ieee80211_syncflag_ht_locked(struct ieee80211com *ic, int flag);
+static void ieee80211_syncflag_ext_locked(struct ieee80211com *ic, int flag);
+static int ieee80211_media_setup(struct ieee80211com *ic,
+ struct ifmedia *media, int caps, int addsta,
+ ifm_change_cb_t media_change, ifm_stat_cb_t media_stat);
+static void ieee80211com_media_status(struct ifnet *, struct ifmediareq *);
+static int ieee80211com_media_change(struct ifnet *);
+static int media_status(enum ieee80211_opmode,
+ const struct ieee80211_channel *);
+
+MALLOC_DEFINE(M_80211_VAP, "80211vap", "802.11 vap state");
+
+/*
+ * Default supported rates for 802.11 operation (in IEEE .5Mb units).
+ */
+#define B(r) ((r) | IEEE80211_RATE_BASIC)
+static const struct ieee80211_rateset ieee80211_rateset_11a =
+ { 8, { B(12), 18, B(24), 36, B(48), 72, 96, 108 } };
+static const struct ieee80211_rateset ieee80211_rateset_half =
+ { 8, { B(6), 9, B(12), 18, B(24), 36, 48, 54 } };
+static const struct ieee80211_rateset ieee80211_rateset_quarter =
+ { 8, { B(3), 4, B(6), 9, B(12), 18, 24, 27 } };
+static const struct ieee80211_rateset ieee80211_rateset_11b =
+ { 4, { B(2), B(4), B(11), B(22) } };
+/* NB: OFDM rates are handled specially based on mode */
+static const struct ieee80211_rateset ieee80211_rateset_11g =
+ { 12, { B(2), B(4), B(11), B(22), 12, 18, 24, 36, 48, 72, 96, 108 } };
+#undef B
+
+/*
+ * Fill in 802.11 available channel set, mark
+ * all available channels as active, and pick
+ * a default channel if not already specified.
+ */
+static void
+ieee80211_chan_init(struct ieee80211com *ic)
+{
+#define DEFAULTRATES(m, def) do { \
+ if (ic->ic_sup_rates[m].rs_nrates == 0) \
+ ic->ic_sup_rates[m] = def; \
+} while (0)
+ struct ieee80211_channel *c;
+ int i;
+
+ KASSERT(0 < ic->ic_nchans && ic->ic_nchans <= IEEE80211_CHAN_MAX,
+ ("invalid number of channels specified: %u", ic->ic_nchans));
+ memset(ic->ic_chan_avail, 0, sizeof(ic->ic_chan_avail));
+ memset(ic->ic_modecaps, 0, sizeof(ic->ic_modecaps));
+ setbit(ic->ic_modecaps, IEEE80211_MODE_AUTO);
+ for (i = 0; i < ic->ic_nchans; i++) {
+ c = &ic->ic_channels[i];
+ KASSERT(c->ic_flags != 0, ("channel with no flags"));
+ /*
+ * Help drivers that work only with frequencies by filling
+ * in IEEE channel #'s if not already calculated. Note this
+ * mimics similar work done in ieee80211_setregdomain when
+ * changing regulatory state.
+ */
+ if (c->ic_ieee == 0)
+ c->ic_ieee = ieee80211_mhz2ieee(c->ic_freq,c->ic_flags);
+ if (IEEE80211_IS_CHAN_HT40(c) && c->ic_extieee == 0)
+ c->ic_extieee = ieee80211_mhz2ieee(c->ic_freq +
+ (IEEE80211_IS_CHAN_HT40U(c) ? 20 : -20),
+ c->ic_flags);
+ /* default max tx power to max regulatory */
+ if (c->ic_maxpower == 0)
+ c->ic_maxpower = 2*c->ic_maxregpower;
+ setbit(ic->ic_chan_avail, c->ic_ieee);
+ /*
+ * Identify mode capabilities.
+ */
+ if (IEEE80211_IS_CHAN_A(c))
+ setbit(ic->ic_modecaps, IEEE80211_MODE_11A);
+ if (IEEE80211_IS_CHAN_B(c))
+ setbit(ic->ic_modecaps, IEEE80211_MODE_11B);
+ if (IEEE80211_IS_CHAN_ANYG(c))
+ setbit(ic->ic_modecaps, IEEE80211_MODE_11G);
+ if (IEEE80211_IS_CHAN_FHSS(c))
+ setbit(ic->ic_modecaps, IEEE80211_MODE_FH);
+ if (IEEE80211_IS_CHAN_108A(c))
+ setbit(ic->ic_modecaps, IEEE80211_MODE_TURBO_A);
+ if (IEEE80211_IS_CHAN_108G(c))
+ setbit(ic->ic_modecaps, IEEE80211_MODE_TURBO_G);
+ if (IEEE80211_IS_CHAN_ST(c))
+ setbit(ic->ic_modecaps, IEEE80211_MODE_STURBO_A);
+ if (IEEE80211_IS_CHAN_HALF(c))
+ setbit(ic->ic_modecaps, IEEE80211_MODE_HALF);
+ if (IEEE80211_IS_CHAN_QUARTER(c))
+ setbit(ic->ic_modecaps, IEEE80211_MODE_QUARTER);
+ if (IEEE80211_IS_CHAN_HTA(c))
+ setbit(ic->ic_modecaps, IEEE80211_MODE_11NA);
+ if (IEEE80211_IS_CHAN_HTG(c))
+ setbit(ic->ic_modecaps, IEEE80211_MODE_11NG);
+ }
+ /* initialize candidate channels to all available */
+ memcpy(ic->ic_chan_active, ic->ic_chan_avail,
+ sizeof(ic->ic_chan_avail));
+
+ /* sort channel table to allow lookup optimizations */
+ ieee80211_sort_channels(ic->ic_channels, ic->ic_nchans);
+
+ /* invalidate any previous state */
+ ic->ic_bsschan = IEEE80211_CHAN_ANYC;
+ ic->ic_prevchan = NULL;
+ ic->ic_csa_newchan = NULL;
+ /* arbitrarily pick the first channel */
+ ic->ic_curchan = &ic->ic_channels[0];
+ ic->ic_rt = ieee80211_get_ratetable(ic->ic_curchan);
+
+ /* fillin well-known rate sets if driver has not specified */
+ DEFAULTRATES(IEEE80211_MODE_11B, ieee80211_rateset_11b);
+ DEFAULTRATES(IEEE80211_MODE_11G, ieee80211_rateset_11g);
+ DEFAULTRATES(IEEE80211_MODE_11A, ieee80211_rateset_11a);
+ DEFAULTRATES(IEEE80211_MODE_TURBO_A, ieee80211_rateset_11a);
+ DEFAULTRATES(IEEE80211_MODE_TURBO_G, ieee80211_rateset_11g);
+ DEFAULTRATES(IEEE80211_MODE_STURBO_A, ieee80211_rateset_11a);
+ DEFAULTRATES(IEEE80211_MODE_HALF, ieee80211_rateset_half);
+ DEFAULTRATES(IEEE80211_MODE_QUARTER, ieee80211_rateset_quarter);
+ DEFAULTRATES(IEEE80211_MODE_11NA, ieee80211_rateset_11a);
+ DEFAULTRATES(IEEE80211_MODE_11NG, ieee80211_rateset_11g);
+
+ /*
+ * Set auto mode to reset active channel state and any desired channel.
+ */
+ (void) ieee80211_setmode(ic, IEEE80211_MODE_AUTO);
+#undef DEFAULTRATES
+}
+
+static void
+null_update_mcast(struct ifnet *ifp)
+{
+ if_printf(ifp, "need multicast update callback\n");
+}
+
+static void
+null_update_promisc(struct ifnet *ifp)
+{
+ if_printf(ifp, "need promiscuous mode update callback\n");
+}
+
+static int
+null_transmit(struct ifnet *ifp, struct mbuf *m)
+{
+ m_freem(m);
+ ifp->if_oerrors++;
+ return EACCES; /* XXX EIO/EPERM? */
+}
+
+static int
+null_output(struct ifnet *ifp, struct mbuf *m,
+ struct sockaddr *dst, struct route *ro)
+{
+ if_printf(ifp, "discard raw packet\n");
+ return null_transmit(ifp, m);
+}
+
+static void
+null_input(struct ifnet *ifp, struct mbuf *m)
+{
+ if_printf(ifp, "if_input should not be called\n");
+ m_freem(m);
+}
+
+/*
+ * Attach/setup the common net80211 state. Called by
+ * the driver on attach to prior to creating any vap's.
+ */
+void
+ieee80211_ifattach(struct ieee80211com *ic,
+ const uint8_t macaddr[IEEE80211_ADDR_LEN])
+{
+ struct ifnet *ifp = ic->ic_ifp;
+ struct sockaddr_dl *sdl;
+ struct ifaddr *ifa;
+
+ KASSERT(ifp->if_type == IFT_IEEE80211, ("if_type %d", ifp->if_type));
+
+ IEEE80211_LOCK_INIT(ic, ifp->if_xname);
+ TAILQ_INIT(&ic->ic_vaps);
+
+ /* Create a taskqueue for all state changes */
+ ic->ic_tq = taskqueue_create("ic_taskq", M_WAITOK | M_ZERO,
+ taskqueue_thread_enqueue, &ic->ic_tq);
+ taskqueue_start_threads(&ic->ic_tq, 1, PI_NET, "%s taskq",
+ ifp->if_xname);
+ /*
+ * Fill in 802.11 available channel set, mark all
+ * available channels as active, and pick a default
+ * channel if not already specified.
+ */
+ ieee80211_media_init(ic);
+
+ ic->ic_update_mcast = null_update_mcast;
+ ic->ic_update_promisc = null_update_promisc;
+
+ ic->ic_hash_key = arc4random();
+ ic->ic_bintval = IEEE80211_BINTVAL_DEFAULT;
+ ic->ic_lintval = ic->ic_bintval;
+ ic->ic_txpowlimit = IEEE80211_TXPOWER_MAX;
+
+ ieee80211_crypto_attach(ic);
+ ieee80211_node_attach(ic);
+ ieee80211_power_attach(ic);
+ ieee80211_proto_attach(ic);
+#ifdef IEEE80211_SUPPORT_SUPERG
+ ieee80211_superg_attach(ic);
+#endif
+ ieee80211_ht_attach(ic);
+ ieee80211_scan_attach(ic);
+ ieee80211_regdomain_attach(ic);
+ ieee80211_dfs_attach(ic);
+
+ ieee80211_sysctl_attach(ic);
+
+ ifp->if_addrlen = IEEE80211_ADDR_LEN;
+ ifp->if_hdrlen = 0;
+ if_attach(ifp);
+ ifp->if_mtu = IEEE80211_MTU_MAX;
+ ifp->if_broadcastaddr = ieee80211broadcastaddr;
+ ifp->if_output = null_output;
+ ifp->if_input = null_input; /* just in case */
+ ifp->if_resolvemulti = NULL; /* NB: callers check */
+
+ ifa = ifaddr_byindex(ifp->if_index);
+ KASSERT(ifa != NULL, ("%s: no lladdr!\n", __func__));
+ sdl = (struct sockaddr_dl *)ifa->ifa_addr;
+ sdl->sdl_type = IFT_ETHER; /* XXX IFT_IEEE80211? */
+ sdl->sdl_alen = IEEE80211_ADDR_LEN;
+ IEEE80211_ADDR_COPY(LLADDR(sdl), macaddr);
+ ifa_free(ifa);
+}
+
+/*
+ * Detach net80211 state on device detach. Tear down
+ * all vap's and reclaim all common state prior to the
+ * device state going away. Note we may call back into
+ * driver; it must be prepared for this.
+ */
+void
+ieee80211_ifdetach(struct ieee80211com *ic)
+{
+ struct ifnet *ifp = ic->ic_ifp;
+ struct ieee80211vap *vap;
+
+ if_detach(ifp);
+
+ while ((vap = TAILQ_FIRST(&ic->ic_vaps)) != NULL)
+ ieee80211_vap_destroy(vap);
+ ieee80211_waitfor_parent(ic);
+
+ ieee80211_sysctl_detach(ic);
+ ieee80211_dfs_detach(ic);
+ ieee80211_regdomain_detach(ic);
+ ieee80211_scan_detach(ic);
+#ifdef IEEE80211_SUPPORT_SUPERG
+ ieee80211_superg_detach(ic);
+#endif
+ ieee80211_ht_detach(ic);
+ /* NB: must be called before ieee80211_node_detach */
+ ieee80211_proto_detach(ic);
+ ieee80211_crypto_detach(ic);
+ ieee80211_power_detach(ic);
+ ieee80211_node_detach(ic);
+
+ ifmedia_removeall(&ic->ic_media);
+ taskqueue_free(ic->ic_tq);
+ IEEE80211_LOCK_DESTROY(ic);
+}
+
+/*
+ * Default reset method for use with the ioctl support. This
+ * method is invoked after any state change in the 802.11
+ * layer that should be propagated to the hardware but not
+ * require re-initialization of the 802.11 state machine (e.g
+ * rescanning for an ap). We always return ENETRESET which
+ * should cause the driver to re-initialize the device. Drivers
+ * can override this method to implement more optimized support.
+ */
+static int
+default_reset(struct ieee80211vap *vap, u_long cmd)
+{
+ return ENETRESET;
+}
+
+/*
+ * Prepare a vap for use. Drivers use this call to
+ * setup net80211 state in new vap's prior attaching
+ * them with ieee80211_vap_attach (below).
+ */
+int
+ieee80211_vap_setup(struct ieee80211com *ic, struct ieee80211vap *vap,
+ const char name[IFNAMSIZ], int unit, int opmode, int flags,
+ const uint8_t bssid[IEEE80211_ADDR_LEN],
+ const uint8_t macaddr[IEEE80211_ADDR_LEN])
+{
+ struct ifnet *ifp;
+
+ ifp = if_alloc(IFT_ETHER);
+ if (ifp == NULL) {
+ if_printf(ic->ic_ifp, "%s: unable to allocate ifnet\n",
+ __func__);
+ return ENOMEM;
+ }
+ if_initname(ifp, name, unit);
+ ifp->if_softc = vap; /* back pointer */
+ ifp->if_flags = IFF_SIMPLEX | IFF_BROADCAST | IFF_MULTICAST;
+ ifp->if_start = ieee80211_start;
+ ifp->if_ioctl = ieee80211_ioctl;
+ ifp->if_init = ieee80211_init;
+ /* NB: input+output filled in by ether_ifattach */
+ IFQ_SET_MAXLEN(&ifp->if_snd, ifqmaxlen);
+ ifp->if_snd.ifq_drv_maxlen = ifqmaxlen;
+ IFQ_SET_READY(&ifp->if_snd);
+
+ vap->iv_ifp = ifp;
+ vap->iv_ic = ic;
+ vap->iv_flags = ic->ic_flags; /* propagate common flags */
+ vap->iv_flags_ext = ic->ic_flags_ext;
+ vap->iv_flags_ven = ic->ic_flags_ven;
+ vap->iv_caps = ic->ic_caps &~ IEEE80211_C_OPMODE;
+ vap->iv_htcaps = ic->ic_htcaps;
+ vap->iv_opmode = opmode;
+ vap->iv_caps |= ieee80211_opcap[opmode];
+ switch (opmode) {
+ case IEEE80211_M_WDS:
+ /*
+ * WDS links must specify the bssid of the far end.
+ * For legacy operation this is a static relationship.
+ * For non-legacy operation the station must associate
+ * and be authorized to pass traffic. Plumbing the
+ * vap to the proper node happens when the vap
+ * transitions to RUN state.
+ */
+ IEEE80211_ADDR_COPY(vap->iv_des_bssid, bssid);
+ vap->iv_flags |= IEEE80211_F_DESBSSID;
+ if (flags & IEEE80211_CLONE_WDSLEGACY)
+ vap->iv_flags_ext |= IEEE80211_FEXT_WDSLEGACY;
+ break;
+#ifdef IEEE80211_SUPPORT_TDMA
+ case IEEE80211_M_AHDEMO:
+ if (flags & IEEE80211_CLONE_TDMA) {
+ /* NB: checked before clone operation allowed */
+ KASSERT(ic->ic_caps & IEEE80211_C_TDMA,
+ ("not TDMA capable, ic_caps 0x%x", ic->ic_caps));
+ /*
+ * Propagate TDMA capability to mark vap; this
+ * cannot be removed and is used to distinguish
+ * regular ahdemo operation from ahdemo+tdma.
+ */
+ vap->iv_caps |= IEEE80211_C_TDMA;
+ }
+ break;
+#endif
+ }
+ /* auto-enable s/w beacon miss support */
+ if (flags & IEEE80211_CLONE_NOBEACONS)
+ vap->iv_flags_ext |= IEEE80211_FEXT_SWBMISS;
+ /* auto-generated or user supplied MAC address */
+ if (flags & (IEEE80211_CLONE_BSSID|IEEE80211_CLONE_MACADDR))
+ vap->iv_flags_ext |= IEEE80211_FEXT_UNIQMAC;
+ /*
+ * Enable various functionality by default if we're
+ * capable; the driver can override us if it knows better.
+ */
+ if (vap->iv_caps & IEEE80211_C_WME)
+ vap->iv_flags |= IEEE80211_F_WME;
+ if (vap->iv_caps & IEEE80211_C_BURST)
+ vap->iv_flags |= IEEE80211_F_BURST;
+ /* NB: bg scanning only makes sense for station mode right now */
+ if (vap->iv_opmode == IEEE80211_M_STA &&
+ (vap->iv_caps & IEEE80211_C_BGSCAN))
+ vap->iv_flags |= IEEE80211_F_BGSCAN;
+ vap->iv_flags |= IEEE80211_F_DOTH; /* XXX no cap, just ena */
+ /* NB: DFS support only makes sense for ap mode right now */
+ if (vap->iv_opmode == IEEE80211_M_HOSTAP &&
+ (vap->iv_caps & IEEE80211_C_DFS))
+ vap->iv_flags_ext |= IEEE80211_FEXT_DFS;
+
+ vap->iv_des_chan = IEEE80211_CHAN_ANYC; /* any channel is ok */
+ vap->iv_bmissthreshold = IEEE80211_HWBMISS_DEFAULT;
+ vap->iv_dtim_period = IEEE80211_DTIM_DEFAULT;
+ /*
+ * Install a default reset method for the ioctl support;
+ * the driver can override this.
+ */
+ vap->iv_reset = default_reset;
+
+ IEEE80211_ADDR_COPY(vap->iv_myaddr, macaddr);
+
+ ieee80211_sysctl_vattach(vap);
+ ieee80211_crypto_vattach(vap);
+ ieee80211_node_vattach(vap);
+ ieee80211_power_vattach(vap);
+ ieee80211_proto_vattach(vap);
+#ifdef IEEE80211_SUPPORT_SUPERG
+ ieee80211_superg_vattach(vap);
+#endif
+ ieee80211_ht_vattach(vap);
+ ieee80211_scan_vattach(vap);
+ ieee80211_regdomain_vattach(vap);
+ ieee80211_radiotap_vattach(vap);
+ ieee80211_ratectl_set(vap, IEEE80211_RATECTL_NONE);
+
+ return 0;
+}
+
+/*
+ * Activate a vap. State should have been prepared with a
+ * call to ieee80211_vap_setup and by the driver. On return
+ * from this call the vap is ready for use.
+ */
+int
+ieee80211_vap_attach(struct ieee80211vap *vap,
+ ifm_change_cb_t media_change, ifm_stat_cb_t media_stat)
+{
+ struct ifnet *ifp = vap->iv_ifp;
+ struct ieee80211com *ic = vap->iv_ic;
+ struct ifmediareq imr;
+ int maxrate;
+
+ IEEE80211_DPRINTF(vap, IEEE80211_MSG_STATE,
+ "%s: %s parent %s flags 0x%x flags_ext 0x%x\n",
+ __func__, ieee80211_opmode_name[vap->iv_opmode],
+ ic->ic_ifp->if_xname, vap->iv_flags, vap->iv_flags_ext);
+
+ /*
+ * Do late attach work that cannot happen until after
+ * the driver has had a chance to override defaults.
+ */
+ ieee80211_node_latevattach(vap);
+ ieee80211_power_latevattach(vap);
+
+ maxrate = ieee80211_media_setup(ic, &vap->iv_media, vap->iv_caps,
+ vap->iv_opmode == IEEE80211_M_STA, media_change, media_stat);
+ ieee80211_media_status(ifp, &imr);
+ /* NB: strip explicit mode; we're actually in autoselect */
+ ifmedia_set(&vap->iv_media,
+ imr.ifm_active &~ (IFM_MMASK | IFM_IEEE80211_TURBO));
+ if (maxrate)
+ ifp->if_baudrate = IF_Mbps(maxrate);
+
+ ether_ifattach(ifp, vap->iv_myaddr);
+ if (vap->iv_opmode == IEEE80211_M_MONITOR) {
+ /* NB: disallow transmit */
+ ifp->if_transmit = null_transmit;
+ ifp->if_output = null_output;
+ } else {
+ /* hook output method setup by ether_ifattach */
+ vap->iv_output = ifp->if_output;
+ ifp->if_output = ieee80211_output;
+ }
+ /* NB: if_mtu set by ether_ifattach to ETHERMTU */
+
+ IEEE80211_LOCK(ic);
+ TAILQ_INSERT_TAIL(&ic->ic_vaps, vap, iv_next);
+ ieee80211_syncflag_locked(ic, IEEE80211_F_WME);
+#ifdef IEEE80211_SUPPORT_SUPERG
+ ieee80211_syncflag_locked(ic, IEEE80211_F_TURBOP);
+#endif
+ ieee80211_syncflag_locked(ic, IEEE80211_F_PCF);
+ ieee80211_syncflag_locked(ic, IEEE80211_F_BURST);
+ ieee80211_syncflag_ht_locked(ic, IEEE80211_FHT_HT);
+ ieee80211_syncflag_ht_locked(ic, IEEE80211_FHT_USEHT40);
+ ieee80211_syncifflag_locked(ic, IFF_PROMISC);
+ ieee80211_syncifflag_locked(ic, IFF_ALLMULTI);
+ IEEE80211_UNLOCK(ic);
+
+ return 1;
+}
+
+/*
+ * Tear down vap state and reclaim the ifnet.
+ * The driver is assumed to have prepared for
+ * this; e.g. by turning off interrupts for the
+ * underlying device.
+ */
+void
+ieee80211_vap_detach(struct ieee80211vap *vap)
+{
+ struct ieee80211com *ic = vap->iv_ic;
+ struct ifnet *ifp = vap->iv_ifp;
+
+ IEEE80211_DPRINTF(vap, IEEE80211_MSG_STATE, "%s: %s parent %s\n",
+ __func__, ieee80211_opmode_name[vap->iv_opmode],
+ ic->ic_ifp->if_xname);
+
+ /* NB: bpfdetach is called by ether_ifdetach and claims all taps */
+ ether_ifdetach(ifp);
+
+ ieee80211_stop(vap);
+
+ /*
+ * Flush any deferred vap tasks.
+ */
+ ieee80211_draintask(ic, &vap->iv_nstate_task);
+ ieee80211_draintask(ic, &vap->iv_swbmiss_task);
+
+ /* XXX band-aid until ifnet handles this for us */
+ taskqueue_drain(taskqueue_swi, &ifp->if_linktask);
+
+ IEEE80211_LOCK(ic);
+ KASSERT(vap->iv_state == IEEE80211_S_INIT , ("vap still running"));
+ TAILQ_REMOVE(&ic->ic_vaps, vap, iv_next);
+ ieee80211_syncflag_locked(ic, IEEE80211_F_WME);
+#ifdef IEEE80211_SUPPORT_SUPERG
+ ieee80211_syncflag_locked(ic, IEEE80211_F_TURBOP);
+#endif
+ ieee80211_syncflag_locked(ic, IEEE80211_F_PCF);
+ ieee80211_syncflag_locked(ic, IEEE80211_F_BURST);
+ ieee80211_syncflag_ht_locked(ic, IEEE80211_FHT_HT);
+ ieee80211_syncflag_ht_locked(ic, IEEE80211_FHT_USEHT40);
+ /* NB: this handles the bpfdetach done below */
+ ieee80211_syncflag_ext_locked(ic, IEEE80211_FEXT_BPF);
+ ieee80211_syncifflag_locked(ic, IFF_PROMISC);
+ ieee80211_syncifflag_locked(ic, IFF_ALLMULTI);
+ IEEE80211_UNLOCK(ic);
+
+ ifmedia_removeall(&vap->iv_media);
+
+ ieee80211_radiotap_vdetach(vap);
+ ieee80211_regdomain_vdetach(vap);
+ ieee80211_scan_vdetach(vap);
+#ifdef IEEE80211_SUPPORT_SUPERG
+ ieee80211_superg_vdetach(vap);
+#endif
+ ieee80211_ht_vdetach(vap);
+ /* NB: must be before ieee80211_node_vdetach */
+ ieee80211_proto_vdetach(vap);
+ ieee80211_crypto_vdetach(vap);
+ ieee80211_power_vdetach(vap);
+ ieee80211_node_vdetach(vap);
+ ieee80211_sysctl_vdetach(vap);
+
+ if_free(ifp);
+}
+
+/*
+ * Synchronize flag bit state in the parent ifnet structure
+ * according to the state of all vap ifnet's. This is used,
+ * for example, to handle IFF_PROMISC and IFF_ALLMULTI.
+ */
+void
+ieee80211_syncifflag_locked(struct ieee80211com *ic, int flag)
+{
+ struct ifnet *ifp = ic->ic_ifp;
+ struct ieee80211vap *vap;
+ int bit, oflags;
+
+ IEEE80211_LOCK_ASSERT(ic);
+
+ bit = 0;
+ TAILQ_FOREACH(vap, &ic->ic_vaps, iv_next)
+ if (vap->iv_ifp->if_flags & flag) {
+ /*
+ * XXX the bridge sets PROMISC but we don't want to
+ * enable it on the device, discard here so all the
+ * drivers don't need to special-case it
+ */
+ if (flag == IFF_PROMISC &&
+ !(vap->iv_opmode == IEEE80211_M_MONITOR ||
+ (vap->iv_opmode == IEEE80211_M_AHDEMO &&
+ (vap->iv_caps & IEEE80211_C_TDMA) == 0)))
+ continue;
+ bit = 1;
+ break;
+ }
+ oflags = ifp->if_flags;
+ if (bit)
+ ifp->if_flags |= flag;
+ else
+ ifp->if_flags &= ~flag;
+ if ((ifp->if_flags ^ oflags) & flag) {
+ /* XXX should we return 1/0 and let caller do this? */
+ if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
+ if (flag == IFF_PROMISC)
+ ieee80211_runtask(ic, &ic->ic_promisc_task);
+ else if (flag == IFF_ALLMULTI)
+ ieee80211_runtask(ic, &ic->ic_mcast_task);
+ }
+ }
+}
+
+/*
+ * Synchronize flag bit state in the com structure
+ * according to the state of all vap's. This is used,
+ * for example, to handle state changes via ioctls.
+ */
+static void
+ieee80211_syncflag_locked(struct ieee80211com *ic, int flag)
+{
+ struct ieee80211vap *vap;
+ int bit;
+
+ IEEE80211_LOCK_ASSERT(ic);
+
+ bit = 0;
+ TAILQ_FOREACH(vap, &ic->ic_vaps, iv_next)
+ if (vap->iv_flags & flag) {
+ bit = 1;
+ break;
+ }
+ if (bit)
+ ic->ic_flags |= flag;
+ else
+ ic->ic_flags &= ~flag;
+}
+
+void
+ieee80211_syncflag(struct ieee80211vap *vap, int flag)
+{
+ struct ieee80211com *ic = vap->iv_ic;
+
+ IEEE80211_LOCK(ic);
+ if (flag < 0) {
+ flag = -flag;
+ vap->iv_flags &= ~flag;
+ } else
+ vap->iv_flags |= flag;
+ ieee80211_syncflag_locked(ic, flag);
+ IEEE80211_UNLOCK(ic);
+}
+
+/*
+ * Synchronize flags_ht bit state in the com structure
+ * according to the state of all vap's. This is used,
+ * for example, to handle state changes via ioctls.
+ */
+static void
+ieee80211_syncflag_ht_locked(struct ieee80211com *ic, int flag)
+{
+ struct ieee80211vap *vap;
+ int bit;
+
+ IEEE80211_LOCK_ASSERT(ic);
+
+ bit = 0;
+ TAILQ_FOREACH(vap, &ic->ic_vaps, iv_next)
+ if (vap->iv_flags_ht & flag) {
+ bit = 1;
+ break;
+ }
+ if (bit)
+ ic->ic_flags_ht |= flag;
+ else
+ ic->ic_flags_ht &= ~flag;
+}
+
+void
+ieee80211_syncflag_ht(struct ieee80211vap *vap, int flag)
+{
+ struct ieee80211com *ic = vap->iv_ic;
+
+ IEEE80211_LOCK(ic);
+ if (flag < 0) {
+ flag = -flag;
+ vap->iv_flags_ht &= ~flag;
+ } else
+ vap->iv_flags_ht |= flag;
+ ieee80211_syncflag_ht_locked(ic, flag);
+ IEEE80211_UNLOCK(ic);
+}
+
+/*
+ * Synchronize flags_ext bit state in the com structure
+ * according to the state of all vap's. This is used,
+ * for example, to handle state changes via ioctls.
+ */
+static void
+ieee80211_syncflag_ext_locked(struct ieee80211com *ic, int flag)
+{
+ struct ieee80211vap *vap;
+ int bit;
+
+ IEEE80211_LOCK_ASSERT(ic);
+
+ bit = 0;
+ TAILQ_FOREACH(vap, &ic->ic_vaps, iv_next)
+ if (vap->iv_flags_ext & flag) {
+ bit = 1;
+ break;
+ }
+ if (bit)
+ ic->ic_flags_ext |= flag;
+ else
+ ic->ic_flags_ext &= ~flag;
+}
+
+void
+ieee80211_syncflag_ext(struct ieee80211vap *vap, int flag)
+{
+ struct ieee80211com *ic = vap->iv_ic;
+
+ IEEE80211_LOCK(ic);
+ if (flag < 0) {
+ flag = -flag;
+ vap->iv_flags_ext &= ~flag;
+ } else
+ vap->iv_flags_ext |= flag;
+ ieee80211_syncflag_ext_locked(ic, flag);
+ IEEE80211_UNLOCK(ic);
+}
+
+static __inline int
+mapgsm(u_int freq, u_int flags)
+{
+ freq *= 10;
+ if (flags & IEEE80211_CHAN_QUARTER)
+ freq += 5;
+ else if (flags & IEEE80211_CHAN_HALF)
+ freq += 10;
+ else
+ freq += 20;
+ /* NB: there is no 907/20 wide but leave room */
+ return (freq - 906*10) / 5;
+}
+
+static __inline int
+mappsb(u_int freq, u_int flags)
+{
+ return 37 + ((freq * 10) + ((freq % 5) == 2 ? 5 : 0) - 49400) / 5;
+}
+
+/*
+ * Convert MHz frequency to IEEE channel number.
+ */
+int
+ieee80211_mhz2ieee(u_int freq, u_int flags)
+{
+#define IS_FREQ_IN_PSB(_freq) ((_freq) > 4940 && (_freq) < 4990)
+ if (flags & IEEE80211_CHAN_GSM)
+ return mapgsm(freq, flags);
+ if (flags & IEEE80211_CHAN_2GHZ) { /* 2GHz band */
+ if (freq == 2484)
+ return 14;
+ if (freq < 2484)
+ return ((int) freq - 2407) / 5;
+ else
+ return 15 + ((freq - 2512) / 20);
+ } else if (flags & IEEE80211_CHAN_5GHZ) { /* 5Ghz band */
+ if (freq <= 5000) {
+ /* XXX check regdomain? */
+ if (IS_FREQ_IN_PSB(freq))
+ return mappsb(freq, flags);
+ return (freq - 4000) / 5;
+ } else
+ return (freq - 5000) / 5;
+ } else { /* either, guess */
+ if (freq == 2484)
+ return 14;
+ if (freq < 2484) {
+ if (907 <= freq && freq <= 922)
+ return mapgsm(freq, flags);
+ return ((int) freq - 2407) / 5;
+ }
+ if (freq < 5000) {
+ if (IS_FREQ_IN_PSB(freq))
+ return mappsb(freq, flags);
+ else if (freq > 4900)
+ return (freq - 4000) / 5;
+ else
+ return 15 + ((freq - 2512) / 20);
+ }
+ return (freq - 5000) / 5;
+ }
+#undef IS_FREQ_IN_PSB
+}
+
+/*
+ * Convert channel to IEEE channel number.
+ */
+int
+ieee80211_chan2ieee(struct ieee80211com *ic, const struct ieee80211_channel *c)
+{
+ if (c == NULL) {
+ if_printf(ic->ic_ifp, "invalid channel (NULL)\n");
+ return 0; /* XXX */
+ }
+ return (c == IEEE80211_CHAN_ANYC ? IEEE80211_CHAN_ANY : c->ic_ieee);
+}
+
+/*
+ * Convert IEEE channel number to MHz frequency.
+ */
+u_int
+ieee80211_ieee2mhz(u_int chan, u_int flags)
+{
+ if (flags & IEEE80211_CHAN_GSM)
+ return 907 + 5 * (chan / 10);
+ if (flags & IEEE80211_CHAN_2GHZ) { /* 2GHz band */
+ if (chan == 14)
+ return 2484;
+ if (chan < 14)
+ return 2407 + chan*5;
+ else
+ return 2512 + ((chan-15)*20);
+ } else if (flags & IEEE80211_CHAN_5GHZ) {/* 5Ghz band */
+ if (flags & (IEEE80211_CHAN_HALF|IEEE80211_CHAN_QUARTER)) {
+ chan -= 37;
+ return 4940 + chan*5 + (chan % 5 ? 2 : 0);
+ }
+ return 5000 + (chan*5);
+ } else { /* either, guess */
+ /* XXX can't distinguish PSB+GSM channels */
+ if (chan == 14)
+ return 2484;
+ if (chan < 14) /* 0-13 */
+ return 2407 + chan*5;
+ if (chan < 27) /* 15-26 */
+ return 2512 + ((chan-15)*20);
+ return 5000 + (chan*5);
+ }
+}
+
+/*
+ * Locate a channel given a frequency+flags. We cache
+ * the previous lookup to optimize switching between two
+ * channels--as happens with dynamic turbo.
+ */
+struct ieee80211_channel *
+ieee80211_find_channel(struct ieee80211com *ic, int freq, int flags)
+{
+ struct ieee80211_channel *c;
+ int i;
+
+ flags &= IEEE80211_CHAN_ALLTURBO;
+ c = ic->ic_prevchan;
+ if (c != NULL && c->ic_freq == freq &&
+ (c->ic_flags & IEEE80211_CHAN_ALLTURBO) == flags)
+ return c;
+ /* brute force search */
+ for (i = 0; i < ic->ic_nchans; i++) {
+ c = &ic->ic_channels[i];
+ if (c->ic_freq == freq &&
+ (c->ic_flags & IEEE80211_CHAN_ALLTURBO) == flags)
+ return c;
+ }
+ return NULL;
+}
+
+/*
+ * Locate a channel given a channel number+flags. We cache
+ * the previous lookup to optimize switching between two
+ * channels--as happens with dynamic turbo.
+ */
+struct ieee80211_channel *
+ieee80211_find_channel_byieee(struct ieee80211com *ic, int ieee, int flags)
+{
+ struct ieee80211_channel *c;
+ int i;
+
+ flags &= IEEE80211_CHAN_ALLTURBO;
+ c = ic->ic_prevchan;
+ if (c != NULL && c->ic_ieee == ieee &&
+ (c->ic_flags & IEEE80211_CHAN_ALLTURBO) == flags)
+ return c;
+ /* brute force search */
+ for (i = 0; i < ic->ic_nchans; i++) {
+ c = &ic->ic_channels[i];
+ if (c->ic_ieee == ieee &&
+ (c->ic_flags & IEEE80211_CHAN_ALLTURBO) == flags)
+ return c;
+ }
+ return NULL;
+}
+
+static void
+addmedia(struct ifmedia *media, int caps, int addsta, int mode, int mword)
+{
+#define ADD(_ic, _s, _o) \
+ ifmedia_add(media, \
+ IFM_MAKEWORD(IFM_IEEE80211, (_s), (_o), 0), 0, NULL)
+ static const u_int mopts[IEEE80211_MODE_MAX] = {
+ [IEEE80211_MODE_AUTO] = IFM_AUTO,
+ [IEEE80211_MODE_11A] = IFM_IEEE80211_11A,
+ [IEEE80211_MODE_11B] = IFM_IEEE80211_11B,
+ [IEEE80211_MODE_11G] = IFM_IEEE80211_11G,
+ [IEEE80211_MODE_FH] = IFM_IEEE80211_FH,
+ [IEEE80211_MODE_TURBO_A] = IFM_IEEE80211_11A|IFM_IEEE80211_TURBO,
+ [IEEE80211_MODE_TURBO_G] = IFM_IEEE80211_11G|IFM_IEEE80211_TURBO,
+ [IEEE80211_MODE_STURBO_A] = IFM_IEEE80211_11A|IFM_IEEE80211_TURBO,
+ [IEEE80211_MODE_HALF] = IFM_IEEE80211_11A, /* XXX */
+ [IEEE80211_MODE_QUARTER] = IFM_IEEE80211_11A, /* XXX */
+ [IEEE80211_MODE_11NA] = IFM_IEEE80211_11NA,
+ [IEEE80211_MODE_11NG] = IFM_IEEE80211_11NG,
+ };
+ u_int mopt;
+
+ mopt = mopts[mode];
+ if (addsta)
+ ADD(ic, mword, mopt); /* STA mode has no cap */
+ if (caps & IEEE80211_C_IBSS)
+ ADD(media, mword, mopt | IFM_IEEE80211_ADHOC);
+ if (caps & IEEE80211_C_HOSTAP)
+ ADD(media, mword, mopt | IFM_IEEE80211_HOSTAP);
+ if (caps & IEEE80211_C_AHDEMO)
+ ADD(media, mword, mopt | IFM_IEEE80211_ADHOC | IFM_FLAG0);
+ if (caps & IEEE80211_C_MONITOR)
+ ADD(media, mword, mopt | IFM_IEEE80211_MONITOR);
+ if (caps & IEEE80211_C_WDS)
+ ADD(media, mword, mopt | IFM_IEEE80211_WDS);
+ if (caps & IEEE80211_C_MBSS)
+ ADD(media, mword, mopt | IFM_IEEE80211_MBSS);
+#undef ADD
+}
+
+/*
+ * Setup the media data structures according to the channel and
+ * rate tables.
+ */
+static int
+ieee80211_media_setup(struct ieee80211com *ic,
+ struct ifmedia *media, int caps, int addsta,
+ ifm_change_cb_t media_change, ifm_stat_cb_t media_stat)
+{
+ int i, j, mode, rate, maxrate, mword, r;
+ const struct ieee80211_rateset *rs;
+ struct ieee80211_rateset allrates;
+
+ /*
+ * Fill in media characteristics.
+ */
+ ifmedia_init(media, 0, media_change, media_stat);
+ maxrate = 0;
+ /*
+ * Add media for legacy operating modes.
+ */
+ memset(&allrates, 0, sizeof(allrates));
+ for (mode = IEEE80211_MODE_AUTO; mode < IEEE80211_MODE_11NA; mode++) {
+ if (isclr(ic->ic_modecaps, mode))
+ continue;
+ addmedia(media, caps, addsta, mode, IFM_AUTO);
+ if (mode == IEEE80211_MODE_AUTO)
+ continue;
+ rs = &ic->ic_sup_rates[mode];
+ for (i = 0; i < rs->rs_nrates; i++) {
+ rate = rs->rs_rates[i];
+ mword = ieee80211_rate2media(ic, rate, mode);
+ if (mword == 0)
+ continue;
+ addmedia(media, caps, addsta, mode, mword);
+ /*
+ * Add legacy rate to the collection of all rates.
+ */
+ r = rate & IEEE80211_RATE_VAL;
+ for (j = 0; j < allrates.rs_nrates; j++)
+ if (allrates.rs_rates[j] == r)
+ break;
+ if (j == allrates.rs_nrates) {
+ /* unique, add to the set */
+ allrates.rs_rates[j] = r;
+ allrates.rs_nrates++;
+ }
+ rate = (rate & IEEE80211_RATE_VAL) / 2;
+ if (rate > maxrate)
+ maxrate = rate;
+ }
+ }
+ for (i = 0; i < allrates.rs_nrates; i++) {
+ mword = ieee80211_rate2media(ic, allrates.rs_rates[i],
+ IEEE80211_MODE_AUTO);
+ if (mword == 0)
+ continue;
+ /* NB: remove media options from mword */
+ addmedia(media, caps, addsta,
+ IEEE80211_MODE_AUTO, IFM_SUBTYPE(mword));
+ }
+ /*
+ * Add HT/11n media. Note that we do not have enough
+ * bits in the media subtype to express the MCS so we
+ * use a "placeholder" media subtype and any fixed MCS
+ * must be specified with a different mechanism.
+ */
+ for (; mode <= IEEE80211_MODE_11NG; mode++) {
+ if (isclr(ic->ic_modecaps, mode))
+ continue;
+ addmedia(media, caps, addsta, mode, IFM_AUTO);
+ addmedia(media, caps, addsta, mode, IFM_IEEE80211_MCS);
+ }
+ if (isset(ic->ic_modecaps, IEEE80211_MODE_11NA) ||
+ isset(ic->ic_modecaps, IEEE80211_MODE_11NG)) {
+ addmedia(media, caps, addsta,
+ IEEE80211_MODE_AUTO, IFM_IEEE80211_MCS);
+ /* XXX could walk htrates */
+ /* XXX known array size */
+ if (ieee80211_htrates[15].ht40_rate_400ns > maxrate)
+ maxrate = ieee80211_htrates[15].ht40_rate_400ns;
+ }
+ return maxrate;
+}
+
+void
+ieee80211_media_init(struct ieee80211com *ic)
+{
+ struct ifnet *ifp = ic->ic_ifp;
+ int maxrate;
+
+ /* NB: this works because the structure is initialized to zero */
+ if (!LIST_EMPTY(&ic->ic_media.ifm_list)) {
+ /*
+ * We are re-initializing the channel list; clear
+ * the existing media state as the media routines
+ * don't suppress duplicates.
+ */
+ ifmedia_removeall(&ic->ic_media);
+ }
+ ieee80211_chan_init(ic);
+
+ /*
+ * Recalculate media settings in case new channel list changes
+ * the set of available modes.
+ */
+ maxrate = ieee80211_media_setup(ic, &ic->ic_media, ic->ic_caps, 1,
+ ieee80211com_media_change, ieee80211com_media_status);
+ /* NB: strip explicit mode; we're actually in autoselect */
+ ifmedia_set(&ic->ic_media,
+ media_status(ic->ic_opmode, ic->ic_curchan) &~
+ (IFM_MMASK | IFM_IEEE80211_TURBO));
+ if (maxrate)
+ ifp->if_baudrate = IF_Mbps(maxrate);
+
+ /* XXX need to propagate new media settings to vap's */
+}
+
+/* XXX inline or eliminate? */
+const struct ieee80211_rateset *
+ieee80211_get_suprates(struct ieee80211com *ic, const struct ieee80211_channel *c)
+{
+ /* XXX does this work for 11ng basic rates? */
+ return &ic->ic_sup_rates[ieee80211_chan2mode(c)];
+}
+
+void
+ieee80211_announce(struct ieee80211com *ic)
+{
+ struct ifnet *ifp = ic->ic_ifp;
+ int i, mode, rate, mword;
+ const struct ieee80211_rateset *rs;
+
+ /* NB: skip AUTO since it has no rates */
+ for (mode = IEEE80211_MODE_AUTO+1; mode < IEEE80211_MODE_11NA; mode++) {
+ if (isclr(ic->ic_modecaps, mode))
+ continue;
+ if_printf(ifp, "%s rates: ", ieee80211_phymode_name[mode]);
+ rs = &ic->ic_sup_rates[mode];
+ for (i = 0; i < rs->rs_nrates; i++) {
+ mword = ieee80211_rate2media(ic, rs->rs_rates[i], mode);
+ if (mword == 0)
+ continue;
+ rate = ieee80211_media2rate(mword);
+ printf("%s%d%sMbps", (i != 0 ? " " : ""),
+ rate / 2, ((rate & 0x1) != 0 ? ".5" : ""));
+ }
+ printf("\n");
+ }
+ ieee80211_ht_announce(ic);
+}
+
+void
+ieee80211_announce_channels(struct ieee80211com *ic)
+{
+ const struct ieee80211_channel *c;
+ char type;
+ int i, cw;
+
+ printf("Chan Freq CW RegPwr MinPwr MaxPwr\n");
+ for (i = 0; i < ic->ic_nchans; i++) {
+ c = &ic->ic_channels[i];
+ if (IEEE80211_IS_CHAN_ST(c))
+ type = 'S';
+ else if (IEEE80211_IS_CHAN_108A(c))
+ type = 'T';
+ else if (IEEE80211_IS_CHAN_108G(c))
+ type = 'G';
+ else if (IEEE80211_IS_CHAN_HT(c))
+ type = 'n';
+ else if (IEEE80211_IS_CHAN_A(c))
+ type = 'a';
+ else if (IEEE80211_IS_CHAN_ANYG(c))
+ type = 'g';
+ else if (IEEE80211_IS_CHAN_B(c))
+ type = 'b';
+ else
+ type = 'f';
+ if (IEEE80211_IS_CHAN_HT40(c) || IEEE80211_IS_CHAN_TURBO(c))
+ cw = 40;
+ else if (IEEE80211_IS_CHAN_HALF(c))
+ cw = 10;
+ else if (IEEE80211_IS_CHAN_QUARTER(c))
+ cw = 5;
+ else
+ cw = 20;
+ printf("%4d %4d%c %2d%c %6d %4d.%d %4d.%d\n"
+ , c->ic_ieee, c->ic_freq, type
+ , cw
+ , IEEE80211_IS_CHAN_HT40U(c) ? '+' :
+ IEEE80211_IS_CHAN_HT40D(c) ? '-' : ' '
+ , c->ic_maxregpower
+ , c->ic_minpower / 2, c->ic_minpower & 1 ? 5 : 0
+ , c->ic_maxpower / 2, c->ic_maxpower & 1 ? 5 : 0
+ );
+ }
+}
+
+static int
+media2mode(const struct ifmedia_entry *ime, uint32_t flags, uint16_t *mode)
+{
+ switch (IFM_MODE(ime->ifm_media)) {
+ case IFM_IEEE80211_11A:
+ *mode = IEEE80211_MODE_11A;
+ break;
+ case IFM_IEEE80211_11B:
+ *mode = IEEE80211_MODE_11B;
+ break;
+ case IFM_IEEE80211_11G:
+ *mode = IEEE80211_MODE_11G;
+ break;
+ case IFM_IEEE80211_FH:
+ *mode = IEEE80211_MODE_FH;
+ break;
+ case IFM_IEEE80211_11NA:
+ *mode = IEEE80211_MODE_11NA;
+ break;
+ case IFM_IEEE80211_11NG:
+ *mode = IEEE80211_MODE_11NG;
+ break;
+ case IFM_AUTO:
+ *mode = IEEE80211_MODE_AUTO;
+ break;
+ default:
+ return 0;
+ }
+ /*
+ * Turbo mode is an ``option''.
+ * XXX does not apply to AUTO
+ */
+ if (ime->ifm_media & IFM_IEEE80211_TURBO) {
+ if (*mode == IEEE80211_MODE_11A) {
+ if (flags & IEEE80211_F_TURBOP)
+ *mode = IEEE80211_MODE_TURBO_A;
+ else
+ *mode = IEEE80211_MODE_STURBO_A;
+ } else if (*mode == IEEE80211_MODE_11G)
+ *mode = IEEE80211_MODE_TURBO_G;
+ else
+ return 0;
+ }
+ /* XXX HT40 +/- */
+ return 1;
+}
+
+/*
+ * Handle a media change request on the underlying interface.
+ */
+int
+ieee80211com_media_change(struct ifnet *ifp)
+{
+ return EINVAL;
+}
+
+/*
+ * Handle a media change request on the vap interface.
+ */
+int
+ieee80211_media_change(struct ifnet *ifp)
+{
+ struct ieee80211vap *vap = ifp->if_softc;
+ struct ifmedia_entry *ime = vap->iv_media.ifm_cur;
+ uint16_t newmode;
+
+ if (!media2mode(ime, vap->iv_flags, &newmode))
+ return EINVAL;
+ if (vap->iv_des_mode != newmode) {
+ vap->iv_des_mode = newmode;
+ /* XXX kick state machine if up+running */
+ }
+ return 0;
+}
+
+/*
+ * Common code to calculate the media status word
+ * from the operating mode and channel state.
+ */
+static int
+media_status(enum ieee80211_opmode opmode, const struct ieee80211_channel *chan)
+{
+ int status;
+
+ status = IFM_IEEE80211;
+ switch (opmode) {
+ case IEEE80211_M_STA:
+ break;
+ case IEEE80211_M_IBSS:
+ status |= IFM_IEEE80211_ADHOC;
+ break;
+ case IEEE80211_M_HOSTAP:
+ status |= IFM_IEEE80211_HOSTAP;
+ break;
+ case IEEE80211_M_MONITOR:
+ status |= IFM_IEEE80211_MONITOR;
+ break;
+ case IEEE80211_M_AHDEMO:
+ status |= IFM_IEEE80211_ADHOC | IFM_FLAG0;
+ break;
+ case IEEE80211_M_WDS:
+ status |= IFM_IEEE80211_WDS;
+ break;
+ case IEEE80211_M_MBSS:
+ status |= IFM_IEEE80211_MBSS;
+ break;
+ }
+ if (IEEE80211_IS_CHAN_HTA(chan)) {
+ status |= IFM_IEEE80211_11NA;
+ } else if (IEEE80211_IS_CHAN_HTG(chan)) {
+ status |= IFM_IEEE80211_11NG;
+ } else if (IEEE80211_IS_CHAN_A(chan)) {
+ status |= IFM_IEEE80211_11A;
+ } else if (IEEE80211_IS_CHAN_B(chan)) {
+ status |= IFM_IEEE80211_11B;
+ } else if (IEEE80211_IS_CHAN_ANYG(chan)) {
+ status |= IFM_IEEE80211_11G;
+ } else if (IEEE80211_IS_CHAN_FHSS(chan)) {
+ status |= IFM_IEEE80211_FH;
+ }
+ /* XXX else complain? */
+
+ if (IEEE80211_IS_CHAN_TURBO(chan))
+ status |= IFM_IEEE80211_TURBO;
+#if 0
+ if (IEEE80211_IS_CHAN_HT20(chan))
+ status |= IFM_IEEE80211_HT20;
+ if (IEEE80211_IS_CHAN_HT40(chan))
+ status |= IFM_IEEE80211_HT40;
+#endif
+ return status;
+}
+
+static void
+ieee80211com_media_status(struct ifnet *ifp, struct ifmediareq *imr)
+{
+ struct ieee80211com *ic = ifp->if_l2com;
+ struct ieee80211vap *vap;
+
+ imr->ifm_status = IFM_AVALID;
+ TAILQ_FOREACH(vap, &ic->ic_vaps, iv_next)
+ if (vap->iv_ifp->if_flags & IFF_UP) {
+ imr->ifm_status |= IFM_ACTIVE;
+ break;
+ }
+ imr->ifm_active = media_status(ic->ic_opmode, ic->ic_curchan);
+ if (imr->ifm_status & IFM_ACTIVE)
+ imr->ifm_current = imr->ifm_active;
+}
+
+void
+ieee80211_media_status(struct ifnet *ifp, struct ifmediareq *imr)
+{
+ struct ieee80211vap *vap = ifp->if_softc;
+ struct ieee80211com *ic = vap->iv_ic;
+ enum ieee80211_phymode mode;
+
+ imr->ifm_status = IFM_AVALID;
+ /*
+ * NB: use the current channel's mode to lock down a xmit
+ * rate only when running; otherwise we may have a mismatch
+ * in which case the rate will not be convertible.
+ */
+ if (vap->iv_state == IEEE80211_S_RUN) {
+ imr->ifm_status |= IFM_ACTIVE;
+ mode = ieee80211_chan2mode(ic->ic_curchan);
+ } else
+ mode = IEEE80211_MODE_AUTO;
+ imr->ifm_active = media_status(vap->iv_opmode, ic->ic_curchan);
+ /*
+ * Calculate a current rate if possible.
+ */
+ if (vap->iv_txparms[mode].ucastrate != IEEE80211_FIXED_RATE_NONE) {
+ /*
+ * A fixed rate is set, report that.
+ */
+ imr->ifm_active |= ieee80211_rate2media(ic,
+ vap->iv_txparms[mode].ucastrate, mode);
+ } else if (vap->iv_opmode == IEEE80211_M_STA) {
+ /*
+ * In station mode report the current transmit rate.
+ */
+ imr->ifm_active |= ieee80211_rate2media(ic,
+ vap->iv_bss->ni_txrate, mode);
+ } else
+ imr->ifm_active |= IFM_AUTO;
+ if (imr->ifm_status & IFM_ACTIVE)
+ imr->ifm_current = imr->ifm_active;
+}
+
+/*
+ * Set the current phy mode and recalculate the active channel
+ * set based on the available channels for this mode. Also
+ * select a new default/current channel if the current one is
+ * inappropriate for this mode.
+ */
+int
+ieee80211_setmode(struct ieee80211com *ic, enum ieee80211_phymode mode)
+{
+ /*
+ * Adjust basic rates in 11b/11g supported rate set.
+ * Note that if operating on a hal/quarter rate channel
+ * this is a noop as those rates sets are different
+ * and used instead.
+ */
+ if (mode == IEEE80211_MODE_11G || mode == IEEE80211_MODE_11B)
+ ieee80211_setbasicrates(&ic->ic_sup_rates[mode], mode);
+
+ ic->ic_curmode = mode;
+ ieee80211_reset_erp(ic); /* reset ERP state */
+
+ return 0;
+}
+
+/*
+ * Return the phy mode for with the specified channel.
+ */
+enum ieee80211_phymode
+ieee80211_chan2mode(const struct ieee80211_channel *chan)
+{
+
+ if (IEEE80211_IS_CHAN_HTA(chan))
+ return IEEE80211_MODE_11NA;
+ else if (IEEE80211_IS_CHAN_HTG(chan))
+ return IEEE80211_MODE_11NG;
+ else if (IEEE80211_IS_CHAN_108G(chan))
+ return IEEE80211_MODE_TURBO_G;
+ else if (IEEE80211_IS_CHAN_ST(chan))
+ return IEEE80211_MODE_STURBO_A;
+ else if (IEEE80211_IS_CHAN_TURBO(chan))
+ return IEEE80211_MODE_TURBO_A;
+ else if (IEEE80211_IS_CHAN_HALF(chan))
+ return IEEE80211_MODE_HALF;
+ else if (IEEE80211_IS_CHAN_QUARTER(chan))
+ return IEEE80211_MODE_QUARTER;
+ else if (IEEE80211_IS_CHAN_A(chan))
+ return IEEE80211_MODE_11A;
+ else if (IEEE80211_IS_CHAN_ANYG(chan))
+ return IEEE80211_MODE_11G;
+ else if (IEEE80211_IS_CHAN_B(chan))
+ return IEEE80211_MODE_11B;
+ else if (IEEE80211_IS_CHAN_FHSS(chan))
+ return IEEE80211_MODE_FH;
+
+ /* NB: should not get here */
+ printf("%s: cannot map channel to mode; freq %u flags 0x%x\n",
+ __func__, chan->ic_freq, chan->ic_flags);
+ return IEEE80211_MODE_11B;
+}
+
+struct ratemedia {
+ u_int match; /* rate + mode */
+ u_int media; /* if_media rate */
+};
+
+static int
+findmedia(const struct ratemedia rates[], int n, u_int match)
+{
+ int i;
+
+ for (i = 0; i < n; i++)
+ if (rates[i].match == match)
+ return rates[i].media;
+ return IFM_AUTO;
+}
+
+/*
+ * Convert IEEE80211 rate value to ifmedia subtype.
+ * Rate is either a legacy rate in units of 0.5Mbps
+ * or an MCS index.
+ */
+int
+ieee80211_rate2media(struct ieee80211com *ic, int rate, enum ieee80211_phymode mode)
+{
+#define N(a) (sizeof(a) / sizeof(a[0]))
+ static const struct ratemedia rates[] = {
+ { 2 | IFM_IEEE80211_FH, IFM_IEEE80211_FH1 },
+ { 4 | IFM_IEEE80211_FH, IFM_IEEE80211_FH2 },
+ { 2 | IFM_IEEE80211_11B, IFM_IEEE80211_DS1 },
+ { 4 | IFM_IEEE80211_11B, IFM_IEEE80211_DS2 },
+ { 11 | IFM_IEEE80211_11B, IFM_IEEE80211_DS5 },
+ { 22 | IFM_IEEE80211_11B, IFM_IEEE80211_DS11 },
+ { 44 | IFM_IEEE80211_11B, IFM_IEEE80211_DS22 },
+ { 12 | IFM_IEEE80211_11A, IFM_IEEE80211_OFDM6 },
+ { 18 | IFM_IEEE80211_11A, IFM_IEEE80211_OFDM9 },
+ { 24 | IFM_IEEE80211_11A, IFM_IEEE80211_OFDM12 },
+ { 36 | IFM_IEEE80211_11A, IFM_IEEE80211_OFDM18 },
+ { 48 | IFM_IEEE80211_11A, IFM_IEEE80211_OFDM24 },
+ { 72 | IFM_IEEE80211_11A, IFM_IEEE80211_OFDM36 },
+ { 96 | IFM_IEEE80211_11A, IFM_IEEE80211_OFDM48 },
+ { 108 | IFM_IEEE80211_11A, IFM_IEEE80211_OFDM54 },
+ { 2 | IFM_IEEE80211_11G, IFM_IEEE80211_DS1 },
+ { 4 | IFM_IEEE80211_11G, IFM_IEEE80211_DS2 },
+ { 11 | IFM_IEEE80211_11G, IFM_IEEE80211_DS5 },
+ { 22 | IFM_IEEE80211_11G, IFM_IEEE80211_DS11 },
+ { 12 | IFM_IEEE80211_11G, IFM_IEEE80211_OFDM6 },
+ { 18 | IFM_IEEE80211_11G, IFM_IEEE80211_OFDM9 },
+ { 24 | IFM_IEEE80211_11G, IFM_IEEE80211_OFDM12 },
+ { 36 | IFM_IEEE80211_11G, IFM_IEEE80211_OFDM18 },
+ { 48 | IFM_IEEE80211_11G, IFM_IEEE80211_OFDM24 },
+ { 72 | IFM_IEEE80211_11G, IFM_IEEE80211_OFDM36 },
+ { 96 | IFM_IEEE80211_11G, IFM_IEEE80211_OFDM48 },
+ { 108 | IFM_IEEE80211_11G, IFM_IEEE80211_OFDM54 },
+ { 6 | IFM_IEEE80211_11A, IFM_IEEE80211_OFDM3 },
+ { 9 | IFM_IEEE80211_11A, IFM_IEEE80211_OFDM4 },
+ { 54 | IFM_IEEE80211_11A, IFM_IEEE80211_OFDM27 },
+ /* NB: OFDM72 doesn't realy exist so we don't handle it */
+ };
+ static const struct ratemedia htrates[] = {
+ { 0, IFM_IEEE80211_MCS },
+ { 1, IFM_IEEE80211_MCS },
+ { 2, IFM_IEEE80211_MCS },
+ { 3, IFM_IEEE80211_MCS },
+ { 4, IFM_IEEE80211_MCS },
+ { 5, IFM_IEEE80211_MCS },
+ { 6, IFM_IEEE80211_MCS },
+ { 7, IFM_IEEE80211_MCS },
+ { 8, IFM_IEEE80211_MCS },
+ { 9, IFM_IEEE80211_MCS },
+ { 10, IFM_IEEE80211_MCS },
+ { 11, IFM_IEEE80211_MCS },
+ { 12, IFM_IEEE80211_MCS },
+ { 13, IFM_IEEE80211_MCS },
+ { 14, IFM_IEEE80211_MCS },
+ { 15, IFM_IEEE80211_MCS },
+ };
+ int m;
+
+ /*
+ * Check 11n rates first for match as an MCS.
+ */
+ if (mode == IEEE80211_MODE_11NA) {
+ if (rate & IEEE80211_RATE_MCS) {
+ rate &= ~IEEE80211_RATE_MCS;
+ m = findmedia(htrates, N(htrates), rate);
+ if (m != IFM_AUTO)
+ return m | IFM_IEEE80211_11NA;
+ }
+ } else if (mode == IEEE80211_MODE_11NG) {
+ /* NB: 12 is ambiguous, it will be treated as an MCS */
+ if (rate & IEEE80211_RATE_MCS) {
+ rate &= ~IEEE80211_RATE_MCS;
+ m = findmedia(htrates, N(htrates), rate);
+ if (m != IFM_AUTO)
+ return m | IFM_IEEE80211_11NG;
+ }
+ }
+ rate &= IEEE80211_RATE_VAL;
+ switch (mode) {
+ case IEEE80211_MODE_11A:
+ case IEEE80211_MODE_HALF: /* XXX good 'nuf */
+ case IEEE80211_MODE_QUARTER:
+ case IEEE80211_MODE_11NA:
+ case IEEE80211_MODE_TURBO_A:
+ case IEEE80211_MODE_STURBO_A:
+ return findmedia(rates, N(rates), rate | IFM_IEEE80211_11A);
+ case IEEE80211_MODE_11B:
+ return findmedia(rates, N(rates), rate | IFM_IEEE80211_11B);
+ case IEEE80211_MODE_FH:
+ return findmedia(rates, N(rates), rate | IFM_IEEE80211_FH);
+ case IEEE80211_MODE_AUTO:
+ /* NB: ic may be NULL for some drivers */
+ if (ic != NULL && ic->ic_phytype == IEEE80211_T_FH)
+ return findmedia(rates, N(rates),
+ rate | IFM_IEEE80211_FH);
+ /* NB: hack, 11g matches both 11b+11a rates */
+ /* fall thru... */
+ case IEEE80211_MODE_11G:
+ case IEEE80211_MODE_11NG:
+ case IEEE80211_MODE_TURBO_G:
+ return findmedia(rates, N(rates), rate | IFM_IEEE80211_11G);
+ }
+ return IFM_AUTO;
+#undef N
+}
+
+int
+ieee80211_media2rate(int mword)
+{
+#define N(a) (sizeof(a) / sizeof(a[0]))
+ static const int ieeerates[] = {
+ -1, /* IFM_AUTO */
+ 0, /* IFM_MANUAL */
+ 0, /* IFM_NONE */
+ 2, /* IFM_IEEE80211_FH1 */
+ 4, /* IFM_IEEE80211_FH2 */
+ 2, /* IFM_IEEE80211_DS1 */
+ 4, /* IFM_IEEE80211_DS2 */
+ 11, /* IFM_IEEE80211_DS5 */
+ 22, /* IFM_IEEE80211_DS11 */
+ 44, /* IFM_IEEE80211_DS22 */
+ 12, /* IFM_IEEE80211_OFDM6 */
+ 18, /* IFM_IEEE80211_OFDM9 */
+ 24, /* IFM_IEEE80211_OFDM12 */
+ 36, /* IFM_IEEE80211_OFDM18 */
+ 48, /* IFM_IEEE80211_OFDM24 */
+ 72, /* IFM_IEEE80211_OFDM36 */
+ 96, /* IFM_IEEE80211_OFDM48 */
+ 108, /* IFM_IEEE80211_OFDM54 */
+ 144, /* IFM_IEEE80211_OFDM72 */
+ 0, /* IFM_IEEE80211_DS354k */
+ 0, /* IFM_IEEE80211_DS512k */
+ 6, /* IFM_IEEE80211_OFDM3 */
+ 9, /* IFM_IEEE80211_OFDM4 */
+ 54, /* IFM_IEEE80211_OFDM27 */
+ -1, /* IFM_IEEE80211_MCS */
+ };
+ return IFM_SUBTYPE(mword) < N(ieeerates) ?
+ ieeerates[IFM_SUBTYPE(mword)] : 0;
+#undef N
+}
+
+/*
+ * The following hash function is adapted from "Hash Functions" by Bob Jenkins
+ * ("Algorithm Alley", Dr. Dobbs Journal, September 1997).
+ */
+#define mix(a, b, c) \
+do { \
+ a -= b; a -= c; a ^= (c >> 13); \
+ b -= c; b -= a; b ^= (a << 8); \
+ c -= a; c -= b; c ^= (b >> 13); \
+ a -= b; a -= c; a ^= (c >> 12); \
+ b -= c; b -= a; b ^= (a << 16); \
+ c -= a; c -= b; c ^= (b >> 5); \
+ a -= b; a -= c; a ^= (c >> 3); \
+ b -= c; b -= a; b ^= (a << 10); \
+ c -= a; c -= b; c ^= (b >> 15); \
+} while (/*CONSTCOND*/0)
+
+uint32_t
+ieee80211_mac_hash(const struct ieee80211com *ic,
+ const uint8_t addr[IEEE80211_ADDR_LEN])
+{
+ uint32_t a = 0x9e3779b9, b = 0x9e3779b9, c = ic->ic_hash_key;
+
+ b += addr[5] << 8;
+ b += addr[4];
+ a += addr[3] << 24;
+ a += addr[2] << 16;
+ a += addr[1] << 8;
+ a += addr[0];
+
+ mix(a, b, c);
+
+ return c;
+}
+#undef mix
diff --git a/rtems/freebsd/net80211/ieee80211.h b/rtems/freebsd/net80211/ieee80211.h
new file mode 100644
index 00000000..6019e0ed
--- /dev/null
+++ b/rtems/freebsd/net80211/ieee80211.h
@@ -0,0 +1,1087 @@
+/*-
+ * Copyright (c) 2001 Atsushi Onoe
+ * Copyright (c) 2002-2009 Sam Leffler, Errno Consulting
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+#ifndef _NET80211_IEEE80211_HH_
+#define _NET80211_IEEE80211_HH_
+
+/*
+ * 802.11 protocol definitions.
+ */
+
+#define IEEE80211_ADDR_LEN 6 /* size of 802.11 address */
+/* is 802.11 address multicast/broadcast? */
+#define IEEE80211_IS_MULTICAST(_a) (*(_a) & 0x01)
+
+typedef uint16_t ieee80211_seq;
+
+/* IEEE 802.11 PLCP header */
+struct ieee80211_plcp_hdr {
+ uint16_t i_sfd;
+ uint8_t i_signal;
+ uint8_t i_service;
+ uint16_t i_length;
+ uint16_t i_crc;
+} __packed;
+
+#define IEEE80211_PLCP_SFD 0xF3A0
+#define IEEE80211_PLCP_SERVICE 0x00
+#define IEEE80211_PLCP_SERVICE_LOCKED 0x04
+#define IEEE80211_PLCL_SERVICE_PBCC 0x08
+#define IEEE80211_PLCP_SERVICE_LENEXT5 0x20
+#define IEEE80211_PLCP_SERVICE_LENEXT6 0x40
+#define IEEE80211_PLCP_SERVICE_LENEXT7 0x80
+
+/*
+ * generic definitions for IEEE 802.11 frames
+ */
+struct ieee80211_frame {
+ uint8_t i_fc[2];
+ uint8_t i_dur[2];
+ uint8_t i_addr1[IEEE80211_ADDR_LEN];
+ uint8_t i_addr2[IEEE80211_ADDR_LEN];
+ uint8_t i_addr3[IEEE80211_ADDR_LEN];
+ uint8_t i_seq[2];
+ /* possibly followed by addr4[IEEE80211_ADDR_LEN]; */
+ /* see below */
+} __packed;
+
+struct ieee80211_qosframe {
+ uint8_t i_fc[2];
+ uint8_t i_dur[2];
+ uint8_t i_addr1[IEEE80211_ADDR_LEN];
+ uint8_t i_addr2[IEEE80211_ADDR_LEN];
+ uint8_t i_addr3[IEEE80211_ADDR_LEN];
+ uint8_t i_seq[2];
+ uint8_t i_qos[2];
+ /* possibly followed by addr4[IEEE80211_ADDR_LEN]; */
+ /* see below */
+} __packed;
+
+struct ieee80211_qoscntl {
+ uint8_t i_qos[2];
+};
+
+struct ieee80211_frame_addr4 {
+ uint8_t i_fc[2];
+ uint8_t i_dur[2];
+ uint8_t i_addr1[IEEE80211_ADDR_LEN];
+ uint8_t i_addr2[IEEE80211_ADDR_LEN];
+ uint8_t i_addr3[IEEE80211_ADDR_LEN];
+ uint8_t i_seq[2];
+ uint8_t i_addr4[IEEE80211_ADDR_LEN];
+} __packed;
+
+
+struct ieee80211_qosframe_addr4 {
+ uint8_t i_fc[2];
+ uint8_t i_dur[2];
+ uint8_t i_addr1[IEEE80211_ADDR_LEN];
+ uint8_t i_addr2[IEEE80211_ADDR_LEN];
+ uint8_t i_addr3[IEEE80211_ADDR_LEN];
+ uint8_t i_seq[2];
+ uint8_t i_addr4[IEEE80211_ADDR_LEN];
+ uint8_t i_qos[2];
+} __packed;
+
+#define IEEE80211_FC0_VERSION_MASK 0x03
+#define IEEE80211_FC0_VERSION_SHIFT 0
+#define IEEE80211_FC0_VERSION_0 0x00
+#define IEEE80211_FC0_TYPE_MASK 0x0c
+#define IEEE80211_FC0_TYPE_SHIFT 2
+#define IEEE80211_FC0_TYPE_MGT 0x00
+#define IEEE80211_FC0_TYPE_CTL 0x04
+#define IEEE80211_FC0_TYPE_DATA 0x08
+
+#define IEEE80211_FC0_SUBTYPE_MASK 0xf0
+#define IEEE80211_FC0_SUBTYPE_SHIFT 4
+/* for TYPE_MGT */
+#define IEEE80211_FC0_SUBTYPE_ASSOC_REQ 0x00
+#define IEEE80211_FC0_SUBTYPE_ASSOC_RESP 0x10
+#define IEEE80211_FC0_SUBTYPE_REASSOC_REQ 0x20
+#define IEEE80211_FC0_SUBTYPE_REASSOC_RESP 0x30
+#define IEEE80211_FC0_SUBTYPE_PROBE_REQ 0x40
+#define IEEE80211_FC0_SUBTYPE_PROBE_RESP 0x50
+#define IEEE80211_FC0_SUBTYPE_BEACON 0x80
+#define IEEE80211_FC0_SUBTYPE_ATIM 0x90
+#define IEEE80211_FC0_SUBTYPE_DISASSOC 0xa0
+#define IEEE80211_FC0_SUBTYPE_AUTH 0xb0
+#define IEEE80211_FC0_SUBTYPE_DEAUTH 0xc0
+#define IEEE80211_FC0_SUBTYPE_ACTION 0xd0
+/* for TYPE_CTL */
+#define IEEE80211_FC0_SUBTYPE_BAR 0x80
+#define IEEE80211_FC0_SUBTYPE_BA 0x90
+#define IEEE80211_FC0_SUBTYPE_PS_POLL 0xa0
+#define IEEE80211_FC0_SUBTYPE_RTS 0xb0
+#define IEEE80211_FC0_SUBTYPE_CTS 0xc0
+#define IEEE80211_FC0_SUBTYPE_ACK 0xd0
+#define IEEE80211_FC0_SUBTYPE_CF_END 0xe0
+#define IEEE80211_FC0_SUBTYPE_CF_END_ACK 0xf0
+/* for TYPE_DATA (bit combination) */
+#define IEEE80211_FC0_SUBTYPE_DATA 0x00
+#define IEEE80211_FC0_SUBTYPE_CF_ACK 0x10
+#define IEEE80211_FC0_SUBTYPE_CF_POLL 0x20
+#define IEEE80211_FC0_SUBTYPE_CF_ACPL 0x30
+#define IEEE80211_FC0_SUBTYPE_NODATA 0x40
+#define IEEE80211_FC0_SUBTYPE_CFACK 0x50
+#define IEEE80211_FC0_SUBTYPE_CFPOLL 0x60
+#define IEEE80211_FC0_SUBTYPE_CF_ACK_CF_ACK 0x70
+#define IEEE80211_FC0_SUBTYPE_QOS 0x80
+#define IEEE80211_FC0_SUBTYPE_QOS_NULL 0xc0
+
+#define IEEE80211_FC1_DIR_MASK 0x03
+#define IEEE80211_FC1_DIR_NODS 0x00 /* STA->STA */
+#define IEEE80211_FC1_DIR_TODS 0x01 /* STA->AP */
+#define IEEE80211_FC1_DIR_FROMDS 0x02 /* AP ->STA */
+#define IEEE80211_FC1_DIR_DSTODS 0x03 /* AP ->AP */
+
+#define IEEE80211_IS_DSTODS(wh) \
+ (((wh)->i_fc[1] & IEEE80211_FC1_DIR_MASK) == IEEE80211_FC1_DIR_DSTODS)
+
+#define IEEE80211_FC1_MORE_FRAG 0x04
+#define IEEE80211_FC1_RETRY 0x08
+#define IEEE80211_FC1_PWR_MGT 0x10
+#define IEEE80211_FC1_MORE_DATA 0x20
+#define IEEE80211_FC1_WEP 0x40
+#define IEEE80211_FC1_ORDER 0x80
+
+#define IEEE80211_SEQ_FRAG_MASK 0x000f
+#define IEEE80211_SEQ_FRAG_SHIFT 0
+#define IEEE80211_SEQ_SEQ_MASK 0xfff0
+#define IEEE80211_SEQ_SEQ_SHIFT 4
+#define IEEE80211_SEQ_RANGE 4096
+
+#define IEEE80211_SEQ_ADD(seq, incr) \
+ (((seq) + (incr)) & (IEEE80211_SEQ_RANGE-1))
+#define IEEE80211_SEQ_INC(seq) IEEE80211_SEQ_ADD(seq,1)
+#define IEEE80211_SEQ_SUB(a, b) \
+ (((a) + IEEE80211_SEQ_RANGE - (b)) & (IEEE80211_SEQ_RANGE-1))
+
+#define IEEE80211_SEQ_BA_RANGE 2048 /* 2^11 */
+#define IEEE80211_SEQ_BA_BEFORE(a, b) \
+ (IEEE80211_SEQ_SUB(b, a+1) < IEEE80211_SEQ_BA_RANGE-1)
+
+#define IEEE80211_NWID_LEN 32
+#define IEEE80211_MESHID_LEN 32
+
+#define IEEE80211_QOS_TXOP 0x00ff
+/* bit 8 is reserved */
+#define IEEE80211_QOS_AMSDU 0x80
+#define IEEE80211_QOS_AMSDU_S 7
+#define IEEE80211_QOS_ACKPOLICY 0x60
+#define IEEE80211_QOS_ACKPOLICY_S 5
+#define IEEE80211_QOS_ACKPOLICY_NOACK 0x20 /* No ACK required */
+#define IEEE80211_QOS_ACKPOLICY_BA 0x60 /* Block ACK */
+#define IEEE80211_QOS_EOSP 0x10 /* EndOfService Period*/
+#define IEEE80211_QOS_EOSP_S 4
+#define IEEE80211_QOS_TID 0x0f
+
+/* does frame have QoS sequence control data */
+#define IEEE80211_QOS_HAS_SEQ(wh) \
+ (((wh)->i_fc[0] & \
+ (IEEE80211_FC0_TYPE_MASK | IEEE80211_FC0_SUBTYPE_QOS)) == \
+ (IEEE80211_FC0_TYPE_DATA | IEEE80211_FC0_SUBTYPE_QOS))
+
+/*
+ * WME/802.11e information element.
+ */
+struct ieee80211_wme_info {
+ uint8_t wme_id; /* IEEE80211_ELEMID_VENDOR */
+ uint8_t wme_len; /* length in bytes */
+ uint8_t wme_oui[3]; /* 0x00, 0x50, 0xf2 */
+ uint8_t wme_type; /* OUI type */
+ uint8_t wme_subtype; /* OUI subtype */
+ uint8_t wme_version; /* spec revision */
+ uint8_t wme_info; /* QoS info */
+} __packed;
+
+/*
+ * WME/802.11e Tspec Element
+ */
+struct ieee80211_wme_tspec {
+ uint8_t ts_id;
+ uint8_t ts_len;
+ uint8_t ts_oui[3];
+ uint8_t ts_oui_type;
+ uint8_t ts_oui_subtype;
+ uint8_t ts_version;
+ uint8_t ts_tsinfo[3];
+ uint8_t ts_nom_msdu[2];
+ uint8_t ts_max_msdu[2];
+ uint8_t ts_min_svc[4];
+ uint8_t ts_max_svc[4];
+ uint8_t ts_inactv_intv[4];
+ uint8_t ts_susp_intv[4];
+ uint8_t ts_start_svc[4];
+ uint8_t ts_min_rate[4];
+ uint8_t ts_mean_rate[4];
+ uint8_t ts_max_burst[4];
+ uint8_t ts_min_phy[4];
+ uint8_t ts_peak_rate[4];
+ uint8_t ts_delay[4];
+ uint8_t ts_surplus[2];
+ uint8_t ts_medium_time[2];
+} __packed;
+
+/*
+ * WME AC parameter field
+ */
+struct ieee80211_wme_acparams {
+ uint8_t acp_aci_aifsn;
+ uint8_t acp_logcwminmax;
+ uint16_t acp_txop;
+} __packed;
+
+#define WME_NUM_AC 4 /* 4 AC categories */
+#define WME_NUM_TID 16 /* 16 tids */
+
+#define WME_PARAM_ACI 0x60 /* Mask for ACI field */
+#define WME_PARAM_ACI_S 5 /* Shift for ACI field */
+#define WME_PARAM_ACM 0x10 /* Mask for ACM bit */
+#define WME_PARAM_ACM_S 4 /* Shift for ACM bit */
+#define WME_PARAM_AIFSN 0x0f /* Mask for aifsn field */
+#define WME_PARAM_AIFSN_S 0 /* Shift for aifsn field */
+#define WME_PARAM_LOGCWMIN 0x0f /* Mask for CwMin field (in log) */
+#define WME_PARAM_LOGCWMIN_S 0 /* Shift for CwMin field */
+#define WME_PARAM_LOGCWMAX 0xf0 /* Mask for CwMax field (in log) */
+#define WME_PARAM_LOGCWMAX_S 4 /* Shift for CwMax field */
+
+#define WME_AC_TO_TID(_ac) ( \
+ ((_ac) == WME_AC_VO) ? 6 : \
+ ((_ac) == WME_AC_VI) ? 5 : \
+ ((_ac) == WME_AC_BK) ? 1 : \
+ 0)
+
+#define TID_TO_WME_AC(_tid) ( \
+ ((_tid) == 0 || (_tid) == 3) ? WME_AC_BE : \
+ ((_tid) < 3) ? WME_AC_BK : \
+ ((_tid) < 6) ? WME_AC_VI : \
+ WME_AC_VO)
+
+/*
+ * WME Parameter Element
+ */
+struct ieee80211_wme_param {
+ uint8_t param_id;
+ uint8_t param_len;
+ uint8_t param_oui[3];
+ uint8_t param_oui_type;
+ uint8_t param_oui_subtype;
+ uint8_t param_version;
+ uint8_t param_qosInfo;
+#define WME_QOSINFO_COUNT 0x0f /* Mask for param count field */
+ uint8_t param_reserved;
+ struct ieee80211_wme_acparams params_acParams[WME_NUM_AC];
+} __packed;
+
+/*
+ * Management Notification Frame
+ */
+struct ieee80211_mnf {
+ uint8_t mnf_category;
+ uint8_t mnf_action;
+ uint8_t mnf_dialog;
+ uint8_t mnf_status;
+} __packed;
+#define MNF_SETUP_REQ 0
+#define MNF_SETUP_RESP 1
+#define MNF_TEARDOWN 2
+
+/*
+ * 802.11n Management Action Frames
+ */
+/* generic frame format */
+struct ieee80211_action {
+ uint8_t ia_category;
+ uint8_t ia_action;
+} __packed;
+
+#define IEEE80211_ACTION_CAT_SM 0 /* Spectrum Management */
+#define IEEE80211_ACTION_CAT_QOS 1 /* QoS */
+#define IEEE80211_ACTION_CAT_DLS 2 /* DLS */
+#define IEEE80211_ACTION_CAT_BA 3 /* BA */
+#define IEEE80211_ACTION_CAT_HT 7 /* HT */
+#define IEEE80211_ACTION_CAT_VENDOR 127 /* Vendor Specific */
+
+#define IEEE80211_ACTION_HT_TXCHWIDTH 0 /* recommended xmit chan width*/
+#define IEEE80211_ACTION_HT_MIMOPWRSAVE 1 /* MIMO power save */
+
+/* HT - recommended transmission channel width */
+struct ieee80211_action_ht_txchwidth {
+ struct ieee80211_action at_header;
+ uint8_t at_chwidth;
+} __packed;
+
+#define IEEE80211_A_HT_TXCHWIDTH_20 0
+#define IEEE80211_A_HT_TXCHWIDTH_2040 1
+
+/* HT - MIMO Power Save (NB: D2.04) */
+struct ieee80211_action_ht_mimopowersave {
+ struct ieee80211_action am_header;
+ uint8_t am_control;
+} __packed;
+
+#define IEEE80211_A_HT_MIMOPWRSAVE_ENA 0x01 /* PS enabled */
+#define IEEE80211_A_HT_MIMOPWRSAVE_MODE 0x02
+#define IEEE80211_A_HT_MIMOPWRSAVE_MODE_S 1
+#define IEEE80211_A_HT_MIMOPWRSAVE_DYNAMIC 0x02 /* Dynamic Mode */
+#define IEEE80211_A_HT_MIMOPWRSAVE_STATIC 0x00 /* no SM packets */
+/* bits 2-7 reserved */
+
+/* Block Ack actions */
+#define IEEE80211_ACTION_BA_ADDBA_REQUEST 0 /* ADDBA request */
+#define IEEE80211_ACTION_BA_ADDBA_RESPONSE 1 /* ADDBA response */
+#define IEEE80211_ACTION_BA_DELBA 2 /* DELBA */
+
+/* Block Ack Parameter Set */
+#define IEEE80211_BAPS_BUFSIZ 0xffc0 /* buffer size */
+#define IEEE80211_BAPS_BUFSIZ_S 6
+#define IEEE80211_BAPS_TID 0x003c /* TID */
+#define IEEE80211_BAPS_TID_S 2
+#define IEEE80211_BAPS_POLICY 0x0002 /* block ack policy */
+#define IEEE80211_BAPS_POLICY_S 1
+
+#define IEEE80211_BAPS_POLICY_DELAYED (0<<IEEE80211_BAPS_POLICY_S)
+#define IEEE80211_BAPS_POLICY_IMMEDIATE (1<<IEEE80211_BAPS_POLICY_S)
+
+/* Block Ack Sequence Control */
+#define IEEE80211_BASEQ_START 0xfff0 /* starting seqnum */
+#define IEEE80211_BASEQ_START_S 4
+#define IEEE80211_BASEQ_FRAG 0x000f /* fragment number */
+#define IEEE80211_BASEQ_FRAG_S 0
+
+/* Delayed Block Ack Parameter Set */
+#define IEEE80211_DELBAPS_TID 0xf000 /* TID */
+#define IEEE80211_DELBAPS_TID_S 12
+#define IEEE80211_DELBAPS_INIT 0x0800 /* initiator */
+#define IEEE80211_DELBAPS_INIT_S 11
+
+/* BA - ADDBA request */
+struct ieee80211_action_ba_addbarequest {
+ struct ieee80211_action rq_header;
+ uint8_t rq_dialogtoken;
+ uint16_t rq_baparamset;
+ uint16_t rq_batimeout; /* in TUs */
+ uint16_t rq_baseqctl;
+} __packed;
+
+/* BA - ADDBA response */
+struct ieee80211_action_ba_addbaresponse {
+ struct ieee80211_action rs_header;
+ uint8_t rs_dialogtoken;
+ uint16_t rs_statuscode;
+ uint16_t rs_baparamset;
+ uint16_t rs_batimeout; /* in TUs */
+} __packed;
+
+/* BA - DELBA */
+struct ieee80211_action_ba_delba {
+ struct ieee80211_action dl_header;
+ uint16_t dl_baparamset;
+ uint16_t dl_reasoncode;
+} __packed;
+
+/* BAR Control */
+#define IEEE80211_BAR_TID 0xf000 /* TID */
+#define IEEE80211_BAR_TID_S 12
+#define IEEE80211_BAR_COMP 0x0004 /* Compressed Bitmap */
+#define IEEE80211_BAR_MTID 0x0002 /* Multi-TID */
+#define IEEE80211_BAR_NOACK 0x0001 /* No-Ack policy */
+
+/* BAR Starting Sequence Control */
+#define IEEE80211_BAR_SEQ_START 0xfff0 /* starting seqnum */
+#define IEEE80211_BAR_SEQ_START_S 4
+
+struct ieee80211_ba_request {
+ uint16_t rq_barctl;
+ uint16_t rq_barseqctl;
+} __packed;
+
+/*
+ * Control frames.
+ */
+struct ieee80211_frame_min {
+ uint8_t i_fc[2];
+ uint8_t i_dur[2];
+ uint8_t i_addr1[IEEE80211_ADDR_LEN];
+ uint8_t i_addr2[IEEE80211_ADDR_LEN];
+ /* FCS */
+} __packed;
+
+struct ieee80211_frame_rts {
+ uint8_t i_fc[2];
+ uint8_t i_dur[2];
+ uint8_t i_ra[IEEE80211_ADDR_LEN];
+ uint8_t i_ta[IEEE80211_ADDR_LEN];
+ /* FCS */
+} __packed;
+
+struct ieee80211_frame_cts {
+ uint8_t i_fc[2];
+ uint8_t i_dur[2];
+ uint8_t i_ra[IEEE80211_ADDR_LEN];
+ /* FCS */
+} __packed;
+
+struct ieee80211_frame_ack {
+ uint8_t i_fc[2];
+ uint8_t i_dur[2];
+ uint8_t i_ra[IEEE80211_ADDR_LEN];
+ /* FCS */
+} __packed;
+
+struct ieee80211_frame_pspoll {
+ uint8_t i_fc[2];
+ uint8_t i_aid[2];
+ uint8_t i_bssid[IEEE80211_ADDR_LEN];
+ uint8_t i_ta[IEEE80211_ADDR_LEN];
+ /* FCS */
+} __packed;
+
+struct ieee80211_frame_cfend { /* NB: also CF-End+CF-Ack */
+ uint8_t i_fc[2];
+ uint8_t i_dur[2]; /* should be zero */
+ uint8_t i_ra[IEEE80211_ADDR_LEN];
+ uint8_t i_bssid[IEEE80211_ADDR_LEN];
+ /* FCS */
+} __packed;
+
+struct ieee80211_frame_bar {
+ uint8_t i_fc[2];
+ uint8_t i_dur[2];
+ uint8_t i_ra[IEEE80211_ADDR_LEN];
+ uint8_t i_ta[IEEE80211_ADDR_LEN];
+ uint16_t i_ctl;
+ uint16_t i_seq;
+ /* FCS */
+} __packed;
+
+/*
+ * BEACON management packets
+ *
+ * octet timestamp[8]
+ * octet beacon interval[2]
+ * octet capability information[2]
+ * information element
+ * octet elemid
+ * octet length
+ * octet information[length]
+ */
+
+#define IEEE80211_BEACON_INTERVAL(beacon) \
+ ((beacon)[8] | ((beacon)[9] << 8))
+#define IEEE80211_BEACON_CAPABILITY(beacon) \
+ ((beacon)[10] | ((beacon)[11] << 8))
+
+#define IEEE80211_CAPINFO_ESS 0x0001
+#define IEEE80211_CAPINFO_IBSS 0x0002
+#define IEEE80211_CAPINFO_CF_POLLABLE 0x0004
+#define IEEE80211_CAPINFO_CF_POLLREQ 0x0008
+#define IEEE80211_CAPINFO_PRIVACY 0x0010
+#define IEEE80211_CAPINFO_SHORT_PREAMBLE 0x0020
+#define IEEE80211_CAPINFO_PBCC 0x0040
+#define IEEE80211_CAPINFO_CHNL_AGILITY 0x0080
+#define IEEE80211_CAPINFO_SPECTRUM_MGMT 0x0100
+/* bit 9 is reserved */
+#define IEEE80211_CAPINFO_SHORT_SLOTTIME 0x0400
+#define IEEE80211_CAPINFO_RSN 0x0800
+/* bit 12 is reserved */
+#define IEEE80211_CAPINFO_DSSSOFDM 0x2000
+/* bits 14-15 are reserved */
+
+#define IEEE80211_CAPINFO_BITS \
+ "\20\1ESS\2IBSS\3CF_POLLABLE\4CF_POLLREQ\5PRIVACY\6SHORT_PREAMBLE" \
+ "\7PBCC\10CHNL_AGILITY\11SPECTRUM_MGMT\13SHORT_SLOTTIME\14RSN" \
+ "\16DSSOFDM"
+
+/*
+ * 802.11i/WPA information element (maximally sized).
+ */
+struct ieee80211_ie_wpa {
+ uint8_t wpa_id; /* IEEE80211_ELEMID_VENDOR */
+ uint8_t wpa_len; /* length in bytes */
+ uint8_t wpa_oui[3]; /* 0x00, 0x50, 0xf2 */
+ uint8_t wpa_type; /* OUI type */
+ uint16_t wpa_version; /* spec revision */
+ uint32_t wpa_mcipher[1]; /* multicast/group key cipher */
+ uint16_t wpa_uciphercnt; /* # pairwise key ciphers */
+ uint32_t wpa_uciphers[8];/* ciphers */
+ uint16_t wpa_authselcnt; /* authentication selector cnt*/
+ uint32_t wpa_authsels[8];/* selectors */
+ uint16_t wpa_caps; /* 802.11i capabilities */
+ uint16_t wpa_pmkidcnt; /* 802.11i pmkid count */
+ uint16_t wpa_pmkids[8]; /* 802.11i pmkids */
+} __packed;
+
+/*
+ * 802.11n HT Capability IE
+ * NB: these reflect D1.10
+ */
+struct ieee80211_ie_htcap {
+ uint8_t hc_id; /* element ID */
+ uint8_t hc_len; /* length in bytes */
+ uint16_t hc_cap; /* HT caps (see below) */
+ uint8_t hc_param; /* HT params (see below) */
+ uint8_t hc_mcsset[16]; /* supported MCS set */
+ uint16_t hc_extcap; /* extended HT capabilities */
+ uint32_t hc_txbf; /* txbf capabilities */
+ uint8_t hc_antenna; /* antenna capabilities */
+} __packed;
+
+/* HT capability flags (ht_cap) */
+#define IEEE80211_HTCAP_LDPC 0x0001 /* LDPC supported */
+#define IEEE80211_HTCAP_CHWIDTH40 0x0002 /* 20/40 supported */
+#define IEEE80211_HTCAP_SMPS 0x000c /* SM Power Save mode */
+#define IEEE80211_HTCAP_SMPS_OFF 0x000c /* disabled */
+#define IEEE80211_HTCAP_SMPS_DYNAMIC 0x0004 /* send RTS first */
+/* NB: SMPS value 2 is reserved */
+#define IEEE80211_HTCAP_SMPS_ENA 0x0000 /* enabled (static mode) */
+#define IEEE80211_HTCAP_GREENFIELD 0x0010 /* Greenfield supported */
+#define IEEE80211_HTCAP_SHORTGI20 0x0020 /* Short GI in 20MHz */
+#define IEEE80211_HTCAP_SHORTGI40 0x0040 /* Short GI in 40MHz */
+#define IEEE80211_HTCAP_TXSTBC 0x0080 /* STBC tx ok */
+#define IEEE80211_HTCAP_RXSTBC 0x0300 /* STBC rx support */
+#define IEEE80211_HTCAP_RXSTBC_S 8
+#define IEEE80211_HTCAP_RXSTBC_1STREAM 0x0100 /* 1 spatial stream */
+#define IEEE80211_HTCAP_RXSTBC_2STREAM 0x0200 /* 1-2 spatial streams*/
+#define IEEE80211_HTCAP_RXSTBC_3STREAM 0x0300 /* 1-3 spatial streams*/
+#define IEEE80211_HTCAP_DELBA 0x0400 /* HT DELBA supported */
+#define IEEE80211_HTCAP_MAXAMSDU 0x0800 /* max A-MSDU length */
+#define IEEE80211_HTCAP_MAXAMSDU_7935 0x0800 /* 7935 octets */
+#define IEEE80211_HTCAP_MAXAMSDU_3839 0x0000 /* 3839 octets */
+#define IEEE80211_HTCAP_DSSSCCK40 0x1000 /* DSSS/CCK in 40MHz */
+#define IEEE80211_HTCAP_PSMP 0x2000 /* PSMP supported */
+#define IEEE80211_HTCAP_40INTOLERANT 0x4000 /* 40MHz intolerant */
+#define IEEE80211_HTCAP_LSIGTXOPPROT 0x8000 /* L-SIG TXOP prot */
+
+#define IEEE80211_HTCAP_BITS \
+ "\20\1LDPC\2CHWIDTH40\5GREENFIELD\6SHORTGI20\7SHORTGI40\10TXSTBC" \
+ "\13DELBA\14AMSDU(7935)\15DSSSCCK40\16PSMP\1740INTOLERANT" \
+ "\20LSIGTXOPPROT"
+
+/* HT parameters (hc_param) */
+#define IEEE80211_HTCAP_MAXRXAMPDU 0x03 /* max rx A-MPDU factor */
+#define IEEE80211_HTCAP_MAXRXAMPDU_S 0
+#define IEEE80211_HTCAP_MAXRXAMPDU_8K 0
+#define IEEE80211_HTCAP_MAXRXAMPDU_16K 1
+#define IEEE80211_HTCAP_MAXRXAMPDU_32K 2
+#define IEEE80211_HTCAP_MAXRXAMPDU_64K 3
+#define IEEE80211_HTCAP_MPDUDENSITY 0x1c /* min MPDU start spacing */
+#define IEEE80211_HTCAP_MPDUDENSITY_S 2
+#define IEEE80211_HTCAP_MPDUDENSITY_NA 0 /* no time restriction */
+#define IEEE80211_HTCAP_MPDUDENSITY_025 1 /* 1/4 us */
+#define IEEE80211_HTCAP_MPDUDENSITY_05 2 /* 1/2 us */
+#define IEEE80211_HTCAP_MPDUDENSITY_1 3 /* 1 us */
+#define IEEE80211_HTCAP_MPDUDENSITY_2 4 /* 2 us */
+#define IEEE80211_HTCAP_MPDUDENSITY_4 5 /* 4 us */
+#define IEEE80211_HTCAP_MPDUDENSITY_8 6 /* 8 us */
+#define IEEE80211_HTCAP_MPDUDENSITY_16 7 /* 16 us */
+
+/* HT extended capabilities (hc_extcap) */
+#define IEEE80211_HTCAP_PCO 0x0001 /* PCO capable */
+#define IEEE80211_HTCAP_PCOTRANS 0x0006 /* PCO transition time */
+#define IEEE80211_HTCAP_PCOTRANS_S 1
+#define IEEE80211_HTCAP_PCOTRANS_04 0x0002 /* 400 us */
+#define IEEE80211_HTCAP_PCOTRANS_15 0x0004 /* 1.5 ms */
+#define IEEE80211_HTCAP_PCOTRANS_5 0x0006 /* 5 ms */
+/* bits 3-7 reserved */
+#define IEEE80211_HTCAP_MCSFBACK 0x0300 /* MCS feedback */
+#define IEEE80211_HTCAP_MCSFBACK_S 8
+#define IEEE80211_HTCAP_MCSFBACK_NONE 0x0000 /* nothing provided */
+#define IEEE80211_HTCAP_MCSFBACK_UNSOL 0x0200 /* unsolicited feedback */
+#define IEEE80211_HTCAP_MCSFBACK_MRQ 0x0300 /* " "+respond to MRQ */
+#define IEEE80211_HTCAP_HTC 0x0400 /* +HTC support */
+#define IEEE80211_HTCAP_RDR 0x0800 /* reverse direction responder*/
+/* bits 12-15 reserved */
+
+/*
+ * 802.11n HT Information IE
+ */
+struct ieee80211_ie_htinfo {
+ uint8_t hi_id; /* element ID */
+ uint8_t hi_len; /* length in bytes */
+ uint8_t hi_ctrlchannel; /* primary channel */
+ uint8_t hi_byte1; /* ht ie byte 1 */
+ uint8_t hi_byte2; /* ht ie byte 2 */
+ uint8_t hi_byte3; /* ht ie byte 3 */
+ uint16_t hi_byte45; /* ht ie bytes 4+5 */
+ uint8_t hi_basicmcsset[16]; /* basic MCS set */
+} __packed;
+
+/* byte1 */
+#define IEEE80211_HTINFO_2NDCHAN 0x03 /* secondary/ext chan offset */
+#define IEEE80211_HTINFO_2NDCHAN_S 0
+#define IEEE80211_HTINFO_2NDCHAN_NONE 0x00 /* no secondary/ext channel */
+#define IEEE80211_HTINFO_2NDCHAN_ABOVE 0x01 /* above private channel */
+/* NB: 2 is reserved */
+#define IEEE80211_HTINFO_2NDCHAN_BELOW 0x03 /* below primary channel */
+#define IEEE80211_HTINFO_TXWIDTH 0x04 /* tx channel width */
+#define IEEE80211_HTINFO_TXWIDTH_20 0x00 /* 20MHz width */
+#define IEEE80211_HTINFO_TXWIDTH_2040 0x04 /* any supported width */
+#define IEEE80211_HTINFO_RIFSMODE 0x08 /* Reduced IFS (RIFS) use */
+#define IEEE80211_HTINFO_RIFSMODE_PROH 0x00 /* RIFS use prohibited */
+#define IEEE80211_HTINFO_RIFSMODE_PERM 0x08 /* RIFS use permitted */
+#define IEEE80211_HTINFO_PMSPONLY 0x10 /* PSMP required to associate */
+#define IEEE80211_HTINFO_SIGRAN 0xe0 /* shortest Service Interval */
+#define IEEE80211_HTINFO_SIGRAN_S 5
+#define IEEE80211_HTINFO_SIGRAN_5 0x00 /* 5 ms */
+/* XXX add rest */
+
+/* bytes 2+3 */
+#define IEEE80211_HTINFO_OPMODE 0x03 /* operating mode */
+#define IEEE80211_HTINFO_OPMODE_S 0
+#define IEEE80211_HTINFO_OPMODE_PURE 0x00 /* no protection */
+#define IEEE80211_HTINFO_OPMODE_PROTOPT 0x01 /* protection optional */
+#define IEEE80211_HTINFO_OPMODE_HT20PR 0x02 /* protection for HT20 sta's */
+#define IEEE80211_HTINFO_OPMODE_MIXED 0x03 /* protection for legacy sta's*/
+#define IEEE80211_HTINFO_NONGF_PRESENT 0x04 /* non-GF sta's present */
+#define IEEE80211_HTINFO_TXBL 0x08 /* transmit burst limit */
+#define IEEE80211_HTINFO_NONHT_PRESENT 0x10 /* non-HT sta's present */
+/* bits 5-15 reserved */
+
+/* bytes 4+5 */
+#define IEEE80211_HTINFO_2NDARYBEACON 0x01
+#define IEEE80211_HTINFO_LSIGTXOPPROT 0x02
+#define IEEE80211_HTINFO_PCO_ACTIVE 0x04
+#define IEEE80211_HTINFO_40MHZPHASE 0x08
+
+/* byte5 */
+#define IEEE80211_HTINFO_BASIC_STBCMCS 0x7f
+#define IEEE80211_HTINFO_BASIC_STBCMCS_S 0
+#define IEEE80211_HTINFO_DUALPROTECTED 0x80
+
+/*
+ * Management information element payloads.
+ */
+
+enum {
+ IEEE80211_ELEMID_SSID = 0,
+ IEEE80211_ELEMID_RATES = 1,
+ IEEE80211_ELEMID_FHPARMS = 2,
+ IEEE80211_ELEMID_DSPARMS = 3,
+ IEEE80211_ELEMID_CFPARMS = 4,
+ IEEE80211_ELEMID_TIM = 5,
+ IEEE80211_ELEMID_IBSSPARMS = 6,
+ IEEE80211_ELEMID_COUNTRY = 7,
+ IEEE80211_ELEMID_CHALLENGE = 16,
+ /* 17-31 reserved for challenge text extension */
+ IEEE80211_ELEMID_PWRCNSTR = 32,
+ IEEE80211_ELEMID_PWRCAP = 33,
+ IEEE80211_ELEMID_TPCREQ = 34,
+ IEEE80211_ELEMID_TPCREP = 35,
+ IEEE80211_ELEMID_SUPPCHAN = 36,
+ IEEE80211_ELEMID_CSA = 37,
+ IEEE80211_ELEMID_MEASREQ = 38,
+ IEEE80211_ELEMID_MEASREP = 39,
+ IEEE80211_ELEMID_QUIET = 40,
+ IEEE80211_ELEMID_IBSSDFS = 41,
+ IEEE80211_ELEMID_ERP = 42,
+ IEEE80211_ELEMID_HTCAP = 45,
+ IEEE80211_ELEMID_RSN = 48,
+ IEEE80211_ELEMID_XRATES = 50,
+ IEEE80211_ELEMID_HTINFO = 61,
+ IEEE80211_ELEMID_TPC = 150,
+ IEEE80211_ELEMID_CCKM = 156,
+ IEEE80211_ELEMID_VENDOR = 221, /* vendor private */
+
+ /*
+ * 802.11s IEs based on D3.03 spec and were not assigned by
+ * ANA. Beware changing them because some of them are being
+ * kept compatible with Linux.
+ */
+ IEEE80211_ELEMID_MESHCONF = 51,
+ IEEE80211_ELEMID_MESHID = 52,
+ IEEE80211_ELEMID_MESHLINK = 35,
+ IEEE80211_ELEMID_MESHCNGST = 36,
+ IEEE80211_ELEMID_MESHPEER = 55,
+ IEEE80211_ELEMID_MESHCSA = 38,
+ IEEE80211_ELEMID_MESHTIM = 39,
+ IEEE80211_ELEMID_MESHAWAKEW = 40,
+ IEEE80211_ELEMID_MESHBEACONT = 41,
+ IEEE80211_ELEMID_MESHPANN = 48,
+ IEEE80211_ELEMID_MESHRANN = 49,
+ IEEE80211_ELEMID_MESHPREQ = 68,
+ IEEE80211_ELEMID_MESHPREP = 69,
+ IEEE80211_ELEMID_MESHPERR = 70,
+ IEEE80211_ELEMID_MESHPXU = 53,
+ IEEE80211_ELEMID_MESHPXUC = 54,
+ IEEE80211_ELEMID_MESHAH = 60, /* Abbreviated Handshake */
+};
+
+struct ieee80211_tim_ie {
+ uint8_t tim_ie; /* IEEE80211_ELEMID_TIM */
+ uint8_t tim_len;
+ uint8_t tim_count; /* DTIM count */
+ uint8_t tim_period; /* DTIM period */
+ uint8_t tim_bitctl; /* bitmap control */
+ uint8_t tim_bitmap[1]; /* variable-length bitmap */
+} __packed;
+
+struct ieee80211_country_ie {
+ uint8_t ie; /* IEEE80211_ELEMID_COUNTRY */
+ uint8_t len;
+ uint8_t cc[3]; /* ISO CC+(I)ndoor/(O)utdoor */
+ struct {
+ uint8_t schan; /* starting channel */
+ uint8_t nchan; /* number channels */
+ uint8_t maxtxpwr; /* tx power cap */
+ } __packed band[1]; /* sub bands (NB: var size) */
+} __packed;
+
+#define IEEE80211_COUNTRY_MAX_BANDS 84 /* max possible bands */
+#define IEEE80211_COUNTRY_MAX_SIZE \
+ (sizeof(struct ieee80211_country_ie) + 3*(IEEE80211_COUNTRY_MAX_BANDS-1))
+
+/*
+ * 802.11h Channel Switch Announcement (CSA).
+ */
+struct ieee80211_csa_ie {
+ uint8_t csa_ie; /* IEEE80211_ELEMID_CHANSWITCHANN */
+ uint8_t csa_len;
+ uint8_t csa_mode; /* Channel Switch Mode */
+ uint8_t csa_newchan; /* New Channel Number */
+ uint8_t csa_count; /* Channel Switch Count */
+} __packed;
+
+/*
+ * Note the min acceptable CSA count is used to guard against
+ * malicious CSA injection in station mode. Defining this value
+ * as other than 0 violates the 11h spec.
+ */
+#define IEEE80211_CSA_COUNT_MIN 2
+#define IEEE80211_CSA_COUNT_MAX 255
+
+/* rate set entries are in .5 Mb/s units, and potentially marked as basic */
+#define IEEE80211_RATE_BASIC 0x80
+#define IEEE80211_RATE_VAL 0x7f
+
+/* EPR information element flags */
+#define IEEE80211_ERP_NON_ERP_PRESENT 0x01
+#define IEEE80211_ERP_USE_PROTECTION 0x02
+#define IEEE80211_ERP_LONG_PREAMBLE 0x04
+
+#define IEEE80211_ERP_BITS \
+ "\20\1NON_ERP_PRESENT\2USE_PROTECTION\3LONG_PREAMBLE"
+
+#define ATH_OUI 0x7f0300 /* Atheros OUI */
+#define ATH_OUI_TYPE 0x01 /* Atheros protocol ie */
+
+/* NB: Atheros allocated the OUI for this purpose ~2005 but beware ... */
+#define TDMA_OUI ATH_OUI
+#define TDMA_OUI_TYPE 0x02 /* TDMA protocol ie */
+
+#define BCM_OUI 0x4c9000 /* Broadcom OUI */
+#define BCM_OUI_HTCAP 51 /* pre-draft HTCAP ie */
+#define BCM_OUI_HTINFO 52 /* pre-draft HTINFO ie */
+
+#define WPA_OUI 0xf25000
+#define WPA_OUI_TYPE 0x01
+#define WPA_VERSION 1 /* current supported version */
+
+#define WPA_CSE_NULL 0x00
+#define WPA_CSE_WEP40 0x01
+#define WPA_CSE_TKIP 0x02
+#define WPA_CSE_CCMP 0x04
+#define WPA_CSE_WEP104 0x05
+
+#define WPA_ASE_NONE 0x00
+#define WPA_ASE_8021X_UNSPEC 0x01
+#define WPA_ASE_8021X_PSK 0x02
+
+#define WPS_OUI_TYPE 0x04
+
+#define RSN_OUI 0xac0f00
+#define RSN_VERSION 1 /* current supported version */
+
+#define RSN_CSE_NULL 0x00
+#define RSN_CSE_WEP40 0x01
+#define RSN_CSE_TKIP 0x02
+#define RSN_CSE_WRAP 0x03
+#define RSN_CSE_CCMP 0x04
+#define RSN_CSE_WEP104 0x05
+
+#define RSN_ASE_NONE 0x00
+#define RSN_ASE_8021X_UNSPEC 0x01
+#define RSN_ASE_8021X_PSK 0x02
+
+#define RSN_CAP_PREAUTH 0x01
+
+#define WME_OUI 0xf25000
+#define WME_OUI_TYPE 0x02
+#define WME_INFO_OUI_SUBTYPE 0x00
+#define WME_PARAM_OUI_SUBTYPE 0x01
+#define WME_VERSION 1
+
+/* WME stream classes */
+#define WME_AC_BE 0 /* best effort */
+#define WME_AC_BK 1 /* background */
+#define WME_AC_VI 2 /* video */
+#define WME_AC_VO 3 /* voice */
+
+/*
+ * AUTH management packets
+ *
+ * octet algo[2]
+ * octet seq[2]
+ * octet status[2]
+ * octet chal.id
+ * octet chal.length
+ * octet chal.text[253] NB: 1-253 bytes
+ */
+
+/* challenge length for shared key auth */
+#define IEEE80211_CHALLENGE_LEN 128
+
+#define IEEE80211_AUTH_ALG_OPEN 0x0000
+#define IEEE80211_AUTH_ALG_SHARED 0x0001
+#define IEEE80211_AUTH_ALG_LEAP 0x0080
+
+enum {
+ IEEE80211_AUTH_OPEN_REQUEST = 1,
+ IEEE80211_AUTH_OPEN_RESPONSE = 2,
+};
+
+enum {
+ IEEE80211_AUTH_SHARED_REQUEST = 1,
+ IEEE80211_AUTH_SHARED_CHALLENGE = 2,
+ IEEE80211_AUTH_SHARED_RESPONSE = 3,
+ IEEE80211_AUTH_SHARED_PASS = 4,
+};
+
+/*
+ * Reason and status codes.
+ *
+ * Reason codes are used in management frames to indicate why an
+ * action took place (e.g. on disassociation). Status codes are
+ * used in management frames to indicate the result of an operation.
+ *
+ * Unlisted codes are reserved
+ */
+
+enum {
+ IEEE80211_REASON_UNSPECIFIED = 1,
+ IEEE80211_REASON_AUTH_EXPIRE = 2,
+ IEEE80211_REASON_AUTH_LEAVE = 3,
+ IEEE80211_REASON_ASSOC_EXPIRE = 4,
+ IEEE80211_REASON_ASSOC_TOOMANY = 5,
+ IEEE80211_REASON_NOT_AUTHED = 6,
+ IEEE80211_REASON_NOT_ASSOCED = 7,
+ IEEE80211_REASON_ASSOC_LEAVE = 8,
+ IEEE80211_REASON_ASSOC_NOT_AUTHED = 9,
+ IEEE80211_REASON_DISASSOC_PWRCAP_BAD = 10, /* 11h */
+ IEEE80211_REASON_DISASSOC_SUPCHAN_BAD = 11, /* 11h */
+ IEEE80211_REASON_IE_INVALID = 13, /* 11i */
+ IEEE80211_REASON_MIC_FAILURE = 14, /* 11i */
+ IEEE80211_REASON_4WAY_HANDSHAKE_TIMEOUT = 15, /* 11i */
+ IEEE80211_REASON_GROUP_KEY_UPDATE_TIMEOUT = 16, /* 11i */
+ IEEE80211_REASON_IE_IN_4WAY_DIFFERS = 17, /* 11i */
+ IEEE80211_REASON_GROUP_CIPHER_INVALID = 18, /* 11i */
+ IEEE80211_REASON_PAIRWISE_CIPHER_INVALID= 19, /* 11i */
+ IEEE80211_REASON_AKMP_INVALID = 20, /* 11i */
+ IEEE80211_REASON_UNSUPP_RSN_IE_VERSION = 21, /* 11i */
+ IEEE80211_REASON_INVALID_RSN_IE_CAP = 22, /* 11i */
+ IEEE80211_REASON_802_1X_AUTH_FAILED = 23, /* 11i */
+ IEEE80211_REASON_CIPHER_SUITE_REJECTED = 24, /* 11i */
+ IEEE80211_REASON_UNSPECIFIED_QOS = 32, /* 11e */
+ IEEE80211_REASON_INSUFFICIENT_BW = 33, /* 11e */
+ IEEE80211_REASON_TOOMANY_FRAMES = 34, /* 11e */
+ IEEE80211_REASON_OUTSIDE_TXOP = 35, /* 11e */
+ IEEE80211_REASON_LEAVING_QBSS = 36, /* 11e */
+ IEEE80211_REASON_BAD_MECHANISM = 37, /* 11e */
+ IEEE80211_REASON_SETUP_NEEDED = 38, /* 11e */
+ IEEE80211_REASON_TIMEOUT = 39, /* 11e */
+
+ /* values not yet allocated by ANA */
+ IEEE80211_REASON_PEER_LINK_CANCELED = 2, /* 11s */
+ IEEE80211_REASON_MESH_MAX_PEERS = 3, /* 11s */
+ IEEE80211_REASON_MESH_CPVIOLATION = 4, /* 11s */
+ IEEE80211_REASON_MESH_CLOSE_RCVD = 5, /* 11s */
+ IEEE80211_REASON_MESH_MAX_RETRIES = 6, /* 11s */
+ IEEE80211_REASON_MESH_CONFIRM_TIMEOUT = 7, /* 11s */
+ IEEE80211_REASON_MESH_INVALID_GTK = 8, /* 11s */
+ IEEE80211_REASON_MESH_INCONS_PARAMS = 9, /* 11s */
+ IEEE80211_REASON_MESH_INVALID_SECURITY = 10, /* 11s */
+ IEEE80211_REASON_MESH_PERR_UNSPEC = 11, /* 11s */
+ IEEE80211_REASON_MESH_PERR_NO_FI = 12, /* 11s */
+ IEEE80211_REASON_MESH_PERR_DEST_UNREACH = 13, /* 11s */
+
+ IEEE80211_STATUS_SUCCESS = 0,
+ IEEE80211_STATUS_UNSPECIFIED = 1,
+ IEEE80211_STATUS_CAPINFO = 10,
+ IEEE80211_STATUS_NOT_ASSOCED = 11,
+ IEEE80211_STATUS_OTHER = 12,
+ IEEE80211_STATUS_ALG = 13,
+ IEEE80211_STATUS_SEQUENCE = 14,
+ IEEE80211_STATUS_CHALLENGE = 15,
+ IEEE80211_STATUS_TIMEOUT = 16,
+ IEEE80211_STATUS_TOOMANY = 17,
+ IEEE80211_STATUS_BASIC_RATE = 18,
+ IEEE80211_STATUS_SP_REQUIRED = 19, /* 11b */
+ IEEE80211_STATUS_PBCC_REQUIRED = 20, /* 11b */
+ IEEE80211_STATUS_CA_REQUIRED = 21, /* 11b */
+ IEEE80211_STATUS_SPECMGMT_REQUIRED = 22, /* 11h */
+ IEEE80211_STATUS_PWRCAP_REQUIRED = 23, /* 11h */
+ IEEE80211_STATUS_SUPCHAN_REQUIRED = 24, /* 11h */
+ IEEE80211_STATUS_SHORTSLOT_REQUIRED = 25, /* 11g */
+ IEEE80211_STATUS_DSSSOFDM_REQUIRED = 26, /* 11g */
+ IEEE80211_STATUS_MISSING_HT_CAPS = 27, /* 11n D3.0 */
+ IEEE80211_STATUS_INVALID_IE = 40, /* 11i */
+ IEEE80211_STATUS_GROUP_CIPHER_INVALID = 41, /* 11i */
+ IEEE80211_STATUS_PAIRWISE_CIPHER_INVALID = 42, /* 11i */
+ IEEE80211_STATUS_AKMP_INVALID = 43, /* 11i */
+ IEEE80211_STATUS_UNSUPP_RSN_IE_VERSION = 44, /* 11i */
+ IEEE80211_STATUS_INVALID_RSN_IE_CAP = 45, /* 11i */
+ IEEE80211_STATUS_CIPHER_SUITE_REJECTED = 46, /* 11i */
+};
+
+#define IEEE80211_WEP_KEYLEN 5 /* 40bit */
+#define IEEE80211_WEP_IVLEN 3 /* 24bit */
+#define IEEE80211_WEP_KIDLEN 1 /* 1 octet */
+#define IEEE80211_WEP_CRCLEN 4 /* CRC-32 */
+#define IEEE80211_WEP_TOTLEN (IEEE80211_WEP_IVLEN + \
+ IEEE80211_WEP_KIDLEN + \
+ IEEE80211_WEP_CRCLEN)
+#define IEEE80211_WEP_NKID 4 /* number of key ids */
+
+/*
+ * 802.11i defines an extended IV for use with non-WEP ciphers.
+ * When the EXTIV bit is set in the key id byte an additional
+ * 4 bytes immediately follow the IV for TKIP. For CCMP the
+ * EXTIV bit is likewise set but the 8 bytes represent the
+ * CCMP header rather than IV+extended-IV.
+ */
+#define IEEE80211_WEP_EXTIV 0x20
+#define IEEE80211_WEP_EXTIVLEN 4 /* extended IV length */
+#define IEEE80211_WEP_MICLEN 8 /* trailing MIC */
+
+#define IEEE80211_CRC_LEN 4
+
+/*
+ * Maximum acceptable MTU is:
+ * IEEE80211_MAX_LEN - WEP overhead - CRC -
+ * QoS overhead - RSN/WPA overhead
+ * Min is arbitrarily chosen > IEEE80211_MIN_LEN. The default
+ * mtu is Ethernet-compatible; it's set by ether_ifattach.
+ */
+#define IEEE80211_MTU_MAX 2290
+#define IEEE80211_MTU_MIN 32
+
+#define IEEE80211_MAX_LEN (2300 + IEEE80211_CRC_LEN + \
+ (IEEE80211_WEP_IVLEN + IEEE80211_WEP_KIDLEN + IEEE80211_WEP_CRCLEN))
+#define IEEE80211_ACK_LEN \
+ (sizeof(struct ieee80211_frame_ack) + IEEE80211_CRC_LEN)
+#define IEEE80211_MIN_LEN \
+ (sizeof(struct ieee80211_frame_min) + IEEE80211_CRC_LEN)
+
+/*
+ * The 802.11 spec says at most 2007 stations may be
+ * associated at once. For most AP's this is way more
+ * than is feasible so we use a default of IEEE80211_AID_DEF.
+ * This number may be overridden by the driver and/or by
+ * user configuration but may not be less than IEEE80211_AID_MIN
+ * (see _ieee80211.h for implementation-specific settings).
+ */
+#define IEEE80211_AID_MAX 2007
+
+#define IEEE80211_AID(b) ((b) &~ 0xc000)
+
+/*
+ * RTS frame length parameters. The default is specified in
+ * the 802.11 spec as 512; we treat it as implementation-dependent
+ * so it's defined in ieee80211_var.h. The max may be wrong
+ * for jumbo frames.
+ */
+#define IEEE80211_RTS_MIN 1
+#define IEEE80211_RTS_MAX 2346
+
+/*
+ * TX fragmentation parameters. As above for RTS, we treat
+ * default as implementation-dependent so define it elsewhere.
+ */
+#define IEEE80211_FRAG_MIN 256
+#define IEEE80211_FRAG_MAX 2346
+
+/*
+ * Beacon interval (TU's). Min+max come from WiFi requirements.
+ * As above, we treat default as implementation-dependent so
+ * define it elsewhere.
+ */
+#define IEEE80211_BINTVAL_MAX 1000 /* max beacon interval (TU's) */
+#define IEEE80211_BINTVAL_MIN 25 /* min beacon interval (TU's) */
+
+/*
+ * DTIM period (beacons). Min+max are not really defined
+ * by the protocol but we want them publicly visible so
+ * define them here.
+ */
+#define IEEE80211_DTIM_MAX 15 /* max DTIM period */
+#define IEEE80211_DTIM_MIN 1 /* min DTIM period */
+
+/*
+ * Beacon miss threshold (beacons). As for DTIM, we define
+ * them here to be publicly visible. Note the max may be
+ * clamped depending on device capabilities.
+ */
+#define IEEE80211_HWBMISS_MIN 1
+#define IEEE80211_HWBMISS_MAX 255
+
+/*
+ * 802.11 frame duration definitions.
+ */
+
+struct ieee80211_duration {
+ uint16_t d_rts_dur;
+ uint16_t d_data_dur;
+ uint16_t d_plcp_len;
+ uint8_t d_residue; /* unused octets in time slot */
+};
+
+/* One Time Unit (TU) is 1Kus = 1024 microseconds. */
+#define IEEE80211_DUR_TU 1024
+
+/* IEEE 802.11b durations for DSSS PHY in microseconds */
+#define IEEE80211_DUR_DS_LONG_PREAMBLE 144
+#define IEEE80211_DUR_DS_SHORT_PREAMBLE 72
+
+#define IEEE80211_DUR_DS_SLOW_PLCPHDR 48
+#define IEEE80211_DUR_DS_FAST_PLCPHDR 24
+#define IEEE80211_DUR_DS_SLOW_ACK 112
+#define IEEE80211_DUR_DS_FAST_ACK 56
+#define IEEE80211_DUR_DS_SLOW_CTS 112
+#define IEEE80211_DUR_DS_FAST_CTS 56
+
+#define IEEE80211_DUR_DS_SLOT 20
+#define IEEE80211_DUR_DS_SIFS 10
+#define IEEE80211_DUR_DS_PIFS (IEEE80211_DUR_DS_SIFS + IEEE80211_DUR_DS_SLOT)
+#define IEEE80211_DUR_DS_DIFS (IEEE80211_DUR_DS_SIFS + \
+ 2 * IEEE80211_DUR_DS_SLOT)
+#define IEEE80211_DUR_DS_EIFS (IEEE80211_DUR_DS_SIFS + \
+ IEEE80211_DUR_DS_SLOW_ACK + \
+ IEEE80211_DUR_DS_LONG_PREAMBLE + \
+ IEEE80211_DUR_DS_SLOW_PLCPHDR + \
+ IEEE80211_DUR_DIFS)
+
+#endif /* _NET80211_IEEE80211_HH_ */
diff --git a/rtems/freebsd/net80211/ieee80211_acl.c b/rtems/freebsd/net80211/ieee80211_acl.c
new file mode 100644
index 00000000..06b41b09
--- /dev/null
+++ b/rtems/freebsd/net80211/ieee80211_acl.c
@@ -0,0 +1,341 @@
+#include <rtems/freebsd/machine/rtems-bsd-config.h>
+
+/*-
+ * Copyright (c) 2004-2008 Sam Leffler, Errno Consulting
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <rtems/freebsd/sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+/*
+ * IEEE 802.11 MAC ACL support.
+ *
+ * When this module is loaded the sender address of each auth mgt
+ * frame is passed to the iac_check method and the module indicates
+ * if the frame should be accepted or rejected. If the policy is
+ * set to ACL_POLICY_OPEN then all frames are accepted w/o checking
+ * the address. Otherwise, the address is looked up in the database
+ * and if found the frame is either accepted (ACL_POLICY_ALLOW)
+ * or rejected (ACL_POLICY_DENT).
+ */
+#include <rtems/freebsd/local/opt_wlan.h>
+
+#include <rtems/freebsd/sys/param.h>
+#include <rtems/freebsd/sys/kernel.h>
+#include <rtems/freebsd/sys/systm.h>
+#include <rtems/freebsd/sys/mbuf.h>
+#include <rtems/freebsd/sys/module.h>
+#include <rtems/freebsd/sys/queue.h>
+
+#include <rtems/freebsd/sys/socket.h>
+
+#include <rtems/freebsd/net/if.h>
+#include <rtems/freebsd/net/if_media.h>
+#include <rtems/freebsd/net/ethernet.h>
+#include <rtems/freebsd/net/route.h>
+
+#include <rtems/freebsd/net80211/ieee80211_var.h>
+
+enum {
+ ACL_POLICY_OPEN = 0, /* open, don't check ACL's */
+ ACL_POLICY_ALLOW = 1, /* allow traffic from MAC */
+ ACL_POLICY_DENY = 2, /* deny traffic from MAC */
+ /*
+ * NB: ACL_POLICY_RADIUS must be the same value as
+ * IEEE80211_MACCMD_POLICY_RADIUS because of the way
+ * acl_getpolicy() works.
+ */
+ ACL_POLICY_RADIUS = 7, /* defer to RADIUS ACL server */
+};
+
+#define ACL_HASHSIZE 32
+
+struct acl {
+ TAILQ_ENTRY(acl) acl_list;
+ LIST_ENTRY(acl) acl_hash;
+ uint8_t acl_macaddr[IEEE80211_ADDR_LEN];
+};
+struct aclstate {
+ acl_lock_t as_lock;
+ int as_policy;
+ int as_nacls;
+ TAILQ_HEAD(, acl) as_list; /* list of all ACL's */
+ LIST_HEAD(, acl) as_hash[ACL_HASHSIZE];
+ struct ieee80211vap *as_vap;
+};
+
+/* simple hash is enough for variation of macaddr */
+#define ACL_HASH(addr) \
+ (((const uint8_t *)(addr))[IEEE80211_ADDR_LEN - 1] % ACL_HASHSIZE)
+
+MALLOC_DEFINE(M_80211_ACL, "acl", "802.11 station acl");
+
+static int acl_free_all(struct ieee80211vap *);
+
+/* number of references from net80211 layer */
+static int nrefs = 0;
+
+static int
+acl_attach(struct ieee80211vap *vap)
+{
+ struct aclstate *as;
+
+ as = (struct aclstate *) malloc(sizeof(struct aclstate),
+ M_80211_ACL, M_NOWAIT | M_ZERO);
+ if (as == NULL)
+ return 0;
+ ACL_LOCK_INIT(as, "acl");
+ TAILQ_INIT(&as->as_list);
+ as->as_policy = ACL_POLICY_OPEN;
+ as->as_vap = vap;
+ vap->iv_as = as;
+ nrefs++; /* NB: we assume caller locking */
+ return 1;
+}
+
+static void
+acl_detach(struct ieee80211vap *vap)
+{
+ struct aclstate *as = vap->iv_as;
+
+ KASSERT(nrefs > 0, ("imbalanced attach/detach"));
+ nrefs--; /* NB: we assume caller locking */
+
+ acl_free_all(vap);
+ vap->iv_as = NULL;
+ ACL_LOCK_DESTROY(as);
+ free(as, M_80211_ACL);
+}
+
+static __inline struct acl *
+_find_acl(struct aclstate *as, const uint8_t *macaddr)
+{
+ struct acl *acl;
+ int hash;
+
+ hash = ACL_HASH(macaddr);
+ LIST_FOREACH(acl, &as->as_hash[hash], acl_hash) {
+ if (IEEE80211_ADDR_EQ(acl->acl_macaddr, macaddr))
+ return acl;
+ }
+ return NULL;
+}
+
+static void
+_acl_free(struct aclstate *as, struct acl *acl)
+{
+ ACL_LOCK_ASSERT(as);
+
+ TAILQ_REMOVE(&as->as_list, acl, acl_list);
+ LIST_REMOVE(acl, acl_hash);
+ free(acl, M_80211_ACL);
+ as->as_nacls--;
+}
+
+static int
+acl_check(struct ieee80211vap *vap, const uint8_t mac[IEEE80211_ADDR_LEN])
+{
+ struct aclstate *as = vap->iv_as;
+
+ switch (as->as_policy) {
+ case ACL_POLICY_OPEN:
+ case ACL_POLICY_RADIUS:
+ return 1;
+ case ACL_POLICY_ALLOW:
+ return _find_acl(as, mac) != NULL;
+ case ACL_POLICY_DENY:
+ return _find_acl(as, mac) == NULL;
+ }
+ return 0; /* should not happen */
+}
+
+static int
+acl_add(struct ieee80211vap *vap, const uint8_t mac[IEEE80211_ADDR_LEN])
+{
+ struct aclstate *as = vap->iv_as;
+ struct acl *acl, *new;
+ int hash;
+
+ new = (struct acl *) malloc(sizeof(struct acl), M_80211_ACL, M_NOWAIT | M_ZERO);
+ if (new == NULL) {
+ IEEE80211_DPRINTF(vap, IEEE80211_MSG_ACL,
+ "ACL: add %s failed, no memory\n", ether_sprintf(mac));
+ /* XXX statistic */
+ return ENOMEM;
+ }
+
+ ACL_LOCK(as);
+ hash = ACL_HASH(mac);
+ LIST_FOREACH(acl, &as->as_hash[hash], acl_hash) {
+ if (IEEE80211_ADDR_EQ(acl->acl_macaddr, mac)) {
+ ACL_UNLOCK(as);
+ free(new, M_80211_ACL);
+ IEEE80211_DPRINTF(vap, IEEE80211_MSG_ACL,
+ "ACL: add %s failed, already present\n",
+ ether_sprintf(mac));
+ return EEXIST;
+ }
+ }
+ IEEE80211_ADDR_COPY(new->acl_macaddr, mac);
+ TAILQ_INSERT_TAIL(&as->as_list, new, acl_list);
+ LIST_INSERT_HEAD(&as->as_hash[hash], new, acl_hash);
+ as->as_nacls++;
+ ACL_UNLOCK(as);
+
+ IEEE80211_DPRINTF(vap, IEEE80211_MSG_ACL,
+ "ACL: add %s\n", ether_sprintf(mac));
+ return 0;
+}
+
+static int
+acl_remove(struct ieee80211vap *vap, const uint8_t mac[IEEE80211_ADDR_LEN])
+{
+ struct aclstate *as = vap->iv_as;
+ struct acl *acl;
+
+ ACL_LOCK(as);
+ acl = _find_acl(as, mac);
+ if (acl != NULL)
+ _acl_free(as, acl);
+ ACL_UNLOCK(as);
+
+ IEEE80211_DPRINTF(vap, IEEE80211_MSG_ACL,
+ "ACL: remove %s%s\n", ether_sprintf(mac),
+ acl == NULL ? ", not present" : "");
+
+ return (acl == NULL ? ENOENT : 0);
+}
+
+static int
+acl_free_all(struct ieee80211vap *vap)
+{
+ struct aclstate *as = vap->iv_as;
+ struct acl *acl;
+
+ IEEE80211_DPRINTF(vap, IEEE80211_MSG_ACL, "ACL: %s\n", "free all");
+
+ ACL_LOCK(as);
+ while ((acl = TAILQ_FIRST(&as->as_list)) != NULL)
+ _acl_free(as, acl);
+ ACL_UNLOCK(as);
+
+ return 0;
+}
+
+static int
+acl_setpolicy(struct ieee80211vap *vap, int policy)
+{
+ struct aclstate *as = vap->iv_as;
+
+ IEEE80211_DPRINTF(vap, IEEE80211_MSG_ACL,
+ "ACL: set policy to %u\n", policy);
+
+ switch (policy) {
+ case IEEE80211_MACCMD_POLICY_OPEN:
+ as->as_policy = ACL_POLICY_OPEN;
+ break;
+ case IEEE80211_MACCMD_POLICY_ALLOW:
+ as->as_policy = ACL_POLICY_ALLOW;
+ break;
+ case IEEE80211_MACCMD_POLICY_DENY:
+ as->as_policy = ACL_POLICY_DENY;
+ break;
+ case IEEE80211_MACCMD_POLICY_RADIUS:
+ as->as_policy = ACL_POLICY_RADIUS;
+ break;
+ default:
+ return EINVAL;
+ }
+ return 0;
+}
+
+static int
+acl_getpolicy(struct ieee80211vap *vap)
+{
+ struct aclstate *as = vap->iv_as;
+
+ return as->as_policy;
+}
+
+static int
+acl_setioctl(struct ieee80211vap *vap, struct ieee80211req *ireq)
+{
+
+ return EINVAL;
+}
+
+static int
+acl_getioctl(struct ieee80211vap *vap, struct ieee80211req *ireq)
+{
+ struct aclstate *as = vap->iv_as;
+ struct acl *acl;
+ struct ieee80211req_maclist *ap;
+ int error, space, i;
+
+ switch (ireq->i_val) {
+ case IEEE80211_MACCMD_POLICY:
+ ireq->i_val = as->as_policy;
+ return 0;
+ case IEEE80211_MACCMD_LIST:
+ space = as->as_nacls * IEEE80211_ADDR_LEN;
+ if (ireq->i_len == 0) {
+ ireq->i_len = space; /* return required space */
+ return 0; /* NB: must not error */
+ }
+ ap = (struct ieee80211req_maclist *) malloc(space,
+ M_TEMP, M_NOWAIT);
+ if (ap == NULL)
+ return ENOMEM;
+ i = 0;
+ ACL_LOCK(as);
+ TAILQ_FOREACH(acl, &as->as_list, acl_list) {
+ IEEE80211_ADDR_COPY(ap[i].ml_macaddr, acl->acl_macaddr);
+ i++;
+ }
+ ACL_UNLOCK(as);
+ if (ireq->i_len >= space) {
+ error = copyout(ap, ireq->i_data, space);
+ ireq->i_len = space;
+ } else
+ error = copyout(ap, ireq->i_data, ireq->i_len);
+ free(ap, M_TEMP);
+ return error;
+ }
+ return EINVAL;
+}
+
+static const struct ieee80211_aclator mac = {
+ .iac_name = "mac",
+ .iac_attach = acl_attach,
+ .iac_detach = acl_detach,
+ .iac_check = acl_check,
+ .iac_add = acl_add,
+ .iac_remove = acl_remove,
+ .iac_flush = acl_free_all,
+ .iac_setpolicy = acl_setpolicy,
+ .iac_getpolicy = acl_getpolicy,
+ .iac_setioctl = acl_setioctl,
+ .iac_getioctl = acl_getioctl,
+};
+IEEE80211_ACL_MODULE(wlan_acl, mac, 1);
diff --git a/rtems/freebsd/net80211/ieee80211_action.c b/rtems/freebsd/net80211/ieee80211_action.c
new file mode 100644
index 00000000..7780c78f
--- /dev/null
+++ b/rtems/freebsd/net80211/ieee80211_action.c
@@ -0,0 +1,281 @@
+#include <rtems/freebsd/machine/rtems-bsd-config.h>
+
+/*-
+ * Copyright (c) 2009 Sam Leffler, Errno Consulting
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <rtems/freebsd/sys/cdefs.h>
+#ifdef __FreeBSD__
+__FBSDID("$FreeBSD$");
+#endif
+
+/*
+ * IEEE 802.11 send/recv action frame support.
+ */
+
+#include <rtems/freebsd/local/opt_inet.h>
+#include <rtems/freebsd/local/opt_wlan.h>
+
+#include <rtems/freebsd/sys/param.h>
+#include <rtems/freebsd/sys/kernel.h>
+#include <rtems/freebsd/sys/systm.h>
+
+#include <rtems/freebsd/sys/socket.h>
+
+#include <rtems/freebsd/net/if.h>
+#include <rtems/freebsd/net/if_media.h>
+#include <rtems/freebsd/net/ethernet.h>
+
+#include <rtems/freebsd/net80211/ieee80211_var.h>
+#include <rtems/freebsd/net80211/ieee80211_action.h>
+#include <rtems/freebsd/net80211/ieee80211_mesh.h>
+
+static int
+send_inval(struct ieee80211_node *ni, int cat, int act, void *sa)
+{
+ return EINVAL;
+}
+
+static ieee80211_send_action_func *ba_send_action[8] = {
+ send_inval, send_inval, send_inval, send_inval,
+ send_inval, send_inval, send_inval, send_inval,
+};
+static ieee80211_send_action_func *ht_send_action[8] = {
+ send_inval, send_inval, send_inval, send_inval,
+ send_inval, send_inval, send_inval, send_inval,
+};
+static ieee80211_send_action_func *meshpl_send_action[8] = {
+ send_inval, send_inval, send_inval, send_inval,
+ send_inval, send_inval, send_inval, send_inval,
+};
+static ieee80211_send_action_func *meshlm_send_action[4] = {
+ send_inval, send_inval, send_inval, send_inval,
+};
+static ieee80211_send_action_func *hwmp_send_action[8] = {
+ send_inval, send_inval, send_inval, send_inval,
+ send_inval, send_inval, send_inval, send_inval,
+};
+static ieee80211_send_action_func *vendor_send_action[8] = {
+ send_inval, send_inval, send_inval, send_inval,
+ send_inval, send_inval, send_inval, send_inval,
+};
+
+int
+ieee80211_send_action_register(int cat, int act, ieee80211_send_action_func *f)
+{
+#define N(a) (sizeof(a) / sizeof(a[0]))
+ switch (cat) {
+ case IEEE80211_ACTION_CAT_BA:
+ if (act >= N(ba_send_action))
+ break;
+ ba_send_action[act] = f;
+ return 0;
+ case IEEE80211_ACTION_CAT_HT:
+ if (act >= N(ht_send_action))
+ break;
+ ht_send_action[act] = f;
+ return 0;
+ case IEEE80211_ACTION_CAT_MESHPEERING:
+ if (act >= N(meshpl_send_action))
+ break;
+ meshpl_send_action[act] = f;
+ return 0;
+ case IEEE80211_ACTION_CAT_MESHLMETRIC:
+ if (act >= N(meshlm_send_action))
+ break;
+ meshlm_send_action[act] = f;
+ return 0;
+ case IEEE80211_ACTION_CAT_MESHPATH:
+ if (act >= N(hwmp_send_action))
+ break;
+ hwmp_send_action[act] = f;
+ return 0;
+ case IEEE80211_ACTION_CAT_VENDOR:
+ if (act >= N(vendor_send_action))
+ break;
+ vendor_send_action[act] = f;
+ return 0;
+ }
+ return EINVAL;
+#undef N
+}
+
+void
+ieee80211_send_action_unregister(int cat, int act)
+{
+ ieee80211_send_action_register(cat, act, send_inval);
+}
+
+int
+ieee80211_send_action(struct ieee80211_node *ni, int cat, int act, void *sa)
+{
+#define N(a) (sizeof(a) / sizeof(a[0]))
+ ieee80211_send_action_func *f = send_inval;
+
+ switch (cat) {
+ case IEEE80211_ACTION_CAT_BA:
+ if (act < N(ba_send_action))
+ f = ba_send_action[act];
+ break;
+ case IEEE80211_ACTION_CAT_HT:
+ if (act < N(ht_send_action))
+ f = ht_send_action[act];
+ break;
+ case IEEE80211_ACTION_CAT_MESHPEERING:
+ if (act < N(meshpl_send_action))
+ f = meshpl_send_action[act];
+ break;
+ case IEEE80211_ACTION_CAT_MESHLMETRIC:
+ if (act < N(meshlm_send_action))
+ f = meshlm_send_action[act];
+ break;
+ case IEEE80211_ACTION_CAT_MESHPATH:
+ if (act < N(hwmp_send_action))
+ f = hwmp_send_action[act];
+ break;
+ case IEEE80211_ACTION_CAT_VENDOR:
+ if (act < N(vendor_send_action))
+ f = vendor_send_action[act];
+ break;
+ }
+ return f(ni, cat, act, sa);
+#undef N
+}
+
+static int
+recv_inval(struct ieee80211_node *ni, const struct ieee80211_frame *wh,
+ const uint8_t *frm, const uint8_t *efrm)
+{
+ return EINVAL;
+}
+
+static ieee80211_recv_action_func *ba_recv_action[8] = {
+ recv_inval, recv_inval, recv_inval, recv_inval,
+ recv_inval, recv_inval, recv_inval, recv_inval,
+};
+static ieee80211_recv_action_func *ht_recv_action[8] = {
+ recv_inval, recv_inval, recv_inval, recv_inval,
+ recv_inval, recv_inval, recv_inval, recv_inval,
+};
+static ieee80211_recv_action_func *meshpl_recv_action[8] = {
+ recv_inval, recv_inval, recv_inval, recv_inval,
+ recv_inval, recv_inval, recv_inval, recv_inval,
+};
+static ieee80211_recv_action_func *meshlm_recv_action[4] = {
+ recv_inval, recv_inval, recv_inval, recv_inval,
+};
+static ieee80211_recv_action_func *hwmp_recv_action[8] = {
+ recv_inval, recv_inval, recv_inval, recv_inval,
+ recv_inval, recv_inval, recv_inval, recv_inval,
+};
+static ieee80211_recv_action_func *vendor_recv_action[8] = {
+ recv_inval, recv_inval, recv_inval, recv_inval,
+ recv_inval, recv_inval, recv_inval, recv_inval,
+};
+
+int
+ieee80211_recv_action_register(int cat, int act, ieee80211_recv_action_func *f)
+{
+#define N(a) (sizeof(a) / sizeof(a[0]))
+ switch (cat) {
+ case IEEE80211_ACTION_CAT_BA:
+ if (act >= N(ba_recv_action))
+ break;
+ ba_recv_action[act] = f;
+ return 0;
+ case IEEE80211_ACTION_CAT_HT:
+ if (act >= N(ht_recv_action))
+ break;
+ ht_recv_action[act] = f;
+ return 0;
+ case IEEE80211_ACTION_CAT_MESHPEERING:
+ if (act >= N(meshpl_recv_action))
+ break;
+ meshpl_recv_action[act] = f;
+ return 0;
+ case IEEE80211_ACTION_CAT_MESHLMETRIC:
+ if (act >= N(meshlm_recv_action))
+ break;
+ meshlm_recv_action[act] = f;
+ return 0;
+ case IEEE80211_ACTION_CAT_MESHPATH:
+ if (act >= N(hwmp_recv_action))
+ break;
+ hwmp_recv_action[act] = f;
+ return 0;
+ case IEEE80211_ACTION_CAT_VENDOR:
+ if (act >= N(vendor_recv_action))
+ break;
+ vendor_recv_action[act] = f;
+ return 0;
+ }
+ return EINVAL;
+#undef N
+}
+
+void
+ieee80211_recv_action_unregister(int cat, int act)
+{
+ ieee80211_recv_action_register(cat, act, recv_inval);
+}
+
+int
+ieee80211_recv_action(struct ieee80211_node *ni,
+ const struct ieee80211_frame *wh,
+ const uint8_t *frm, const uint8_t *efrm)
+{
+#define N(a) (sizeof(a) / sizeof(a[0]))
+ ieee80211_recv_action_func *f = recv_inval;
+ const struct ieee80211_action *ia =
+ (const struct ieee80211_action *) frm;
+
+ switch (ia->ia_category) {
+ case IEEE80211_ACTION_CAT_BA:
+ if (ia->ia_action < N(ba_recv_action))
+ f = ba_recv_action[ia->ia_action];
+ break;
+ case IEEE80211_ACTION_CAT_HT:
+ if (ia->ia_action < N(ht_recv_action))
+ f = ht_recv_action[ia->ia_action];
+ break;
+ case IEEE80211_ACTION_CAT_MESHPEERING:
+ if (ia->ia_action < N(meshpl_recv_action))
+ f = meshpl_recv_action[ia->ia_action];
+ break;
+ case IEEE80211_ACTION_CAT_MESHLMETRIC:
+ if (ia->ia_action < N(meshlm_recv_action))
+ f = meshlm_recv_action[ia->ia_action];
+ break;
+ case IEEE80211_ACTION_CAT_MESHPATH:
+ if (ia->ia_action < N(hwmp_recv_action))
+ f = hwmp_recv_action[ia->ia_action];
+ break;
+ case IEEE80211_ACTION_CAT_VENDOR:
+ if (ia->ia_action < N(vendor_recv_action))
+ f = vendor_recv_action[ia->ia_action];
+ break;
+ }
+ return f(ni, wh, frm, efrm);
+#undef N
+}
diff --git a/rtems/freebsd/net80211/ieee80211_action.h b/rtems/freebsd/net80211/ieee80211_action.h
new file mode 100644
index 00000000..943d145b
--- /dev/null
+++ b/rtems/freebsd/net80211/ieee80211_action.h
@@ -0,0 +1,52 @@
+/*-
+ * Copyright (c) 2009 Sam Leffler, Errno Consulting
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+#ifndef _NET80211_IEEE80211_ACTION_HH_
+#define _NET80211_IEEE80211_ACTION_HH_
+
+/*
+ * 802.11 send/recv action frame support.
+ */
+
+struct ieee80211_node;
+struct ieee80211_frame;
+
+typedef int ieee80211_send_action_func(struct ieee80211_node *,
+ int, int, void *);
+int ieee80211_send_action_register(int cat, int act,
+ ieee80211_send_action_func *f);
+void ieee80211_send_action_unregister(int cat, int act);
+int ieee80211_send_action(struct ieee80211_node *, int, int, void *);
+
+typedef int ieee80211_recv_action_func(struct ieee80211_node *,
+ const struct ieee80211_frame *, const uint8_t *, const uint8_t *);
+int ieee80211_recv_action_register(int cat, int act,
+ ieee80211_recv_action_func *);
+void ieee80211_recv_action_unregister(int cat, int act);
+int ieee80211_recv_action(struct ieee80211_node *,
+ const struct ieee80211_frame *,
+ const uint8_t *, const uint8_t *);
+#endif /* _NET80211_IEEE80211_ACTION_HH_ */
diff --git a/rtems/freebsd/net80211/ieee80211_adhoc.c b/rtems/freebsd/net80211/ieee80211_adhoc.c
new file mode 100644
index 00000000..5f3eb7cb
--- /dev/null
+++ b/rtems/freebsd/net80211/ieee80211_adhoc.c
@@ -0,0 +1,929 @@
+#include <rtems/freebsd/machine/rtems-bsd-config.h>
+
+/*-
+ * Copyright (c) 2007-2009 Sam Leffler, Errno Consulting
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <rtems/freebsd/sys/cdefs.h>
+#ifdef __FreeBSD__
+__FBSDID("$FreeBSD$");
+#endif
+
+/*
+ * IEEE 802.11 IBSS mode support.
+ */
+#include <rtems/freebsd/local/opt_inet.h>
+#include <rtems/freebsd/local/opt_wlan.h>
+
+#include <rtems/freebsd/sys/param.h>
+#include <rtems/freebsd/sys/systm.h>
+#include <rtems/freebsd/sys/mbuf.h>
+#include <rtems/freebsd/sys/malloc.h>
+#include <rtems/freebsd/sys/kernel.h>
+
+#include <rtems/freebsd/sys/socket.h>
+#include <rtems/freebsd/sys/sockio.h>
+#include <rtems/freebsd/sys/endian.h>
+#include <rtems/freebsd/sys/errno.h>
+#include <rtems/freebsd/sys/proc.h>
+#include <rtems/freebsd/sys/sysctl.h>
+
+#include <rtems/freebsd/net/if.h>
+#include <rtems/freebsd/net/if_media.h>
+#include <rtems/freebsd/net/if_llc.h>
+#include <rtems/freebsd/net/ethernet.h>
+
+#include <rtems/freebsd/net/bpf.h>
+
+#include <rtems/freebsd/net80211/ieee80211_var.h>
+#include <rtems/freebsd/net80211/ieee80211_adhoc.h>
+#include <rtems/freebsd/net80211/ieee80211_input.h>
+#ifdef IEEE80211_SUPPORT_SUPERG
+#include <rtems/freebsd/net80211/ieee80211_superg.h>
+#endif
+#ifdef IEEE80211_SUPPORT_TDMA
+#include <rtems/freebsd/net80211/ieee80211_tdma.h>
+#endif
+
+#define IEEE80211_RATE2MBS(r) (((r) & IEEE80211_RATE_VAL) / 2)
+
+static void adhoc_vattach(struct ieee80211vap *);
+static int adhoc_newstate(struct ieee80211vap *, enum ieee80211_state, int);
+static int adhoc_input(struct ieee80211_node *, struct mbuf *, int, int);
+static void adhoc_recv_mgmt(struct ieee80211_node *, struct mbuf *,
+ int subtype, int, int);
+static void ahdemo_recv_mgmt(struct ieee80211_node *, struct mbuf *,
+ int subtype, int, int);
+static void adhoc_recv_ctl(struct ieee80211_node *, struct mbuf *, int subtype);
+
+void
+ieee80211_adhoc_attach(struct ieee80211com *ic)
+{
+ ic->ic_vattach[IEEE80211_M_IBSS] = adhoc_vattach;
+ ic->ic_vattach[IEEE80211_M_AHDEMO] = adhoc_vattach;
+}
+
+void
+ieee80211_adhoc_detach(struct ieee80211com *ic)
+{
+}
+
+static void
+adhoc_vdetach(struct ieee80211vap *vap)
+{
+}
+
+static void
+adhoc_vattach(struct ieee80211vap *vap)
+{
+ vap->iv_newstate = adhoc_newstate;
+ vap->iv_input = adhoc_input;
+ if (vap->iv_opmode == IEEE80211_M_IBSS)
+ vap->iv_recv_mgmt = adhoc_recv_mgmt;
+ else
+ vap->iv_recv_mgmt = ahdemo_recv_mgmt;
+ vap->iv_recv_ctl = adhoc_recv_ctl;
+ vap->iv_opdetach = adhoc_vdetach;
+#ifdef IEEE80211_SUPPORT_TDMA
+ /*
+ * Throw control to tdma support. Note we do this
+ * after setting up our callbacks so it can piggyback
+ * on top of us.
+ */
+ if (vap->iv_caps & IEEE80211_C_TDMA)
+ ieee80211_tdma_vattach(vap);
+#endif
+}
+
+static void
+sta_leave(void *arg, struct ieee80211_node *ni)
+{
+ struct ieee80211vap *vap = arg;
+
+ if (ni->ni_vap == vap && ni != vap->iv_bss)
+ ieee80211_node_leave(ni);
+}
+
+/*
+ * IEEE80211_M_IBSS+IEEE80211_M_AHDEMO vap state machine handler.
+ */
+static int
+adhoc_newstate(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg)
+{
+ struct ieee80211com *ic = vap->iv_ic;
+ struct ieee80211_node *ni;
+ enum ieee80211_state ostate;
+
+ IEEE80211_LOCK_ASSERT(vap->iv_ic);
+
+ ostate = vap->iv_state;
+ IEEE80211_DPRINTF(vap, IEEE80211_MSG_STATE, "%s: %s -> %s (%d)\n",
+ __func__, ieee80211_state_name[ostate],
+ ieee80211_state_name[nstate], arg);
+ vap->iv_state = nstate; /* state transition */
+ if (ostate != IEEE80211_S_SCAN)
+ ieee80211_cancel_scan(vap); /* background scan */
+ ni = vap->iv_bss; /* NB: no reference held */
+ switch (nstate) {
+ case IEEE80211_S_INIT:
+ switch (ostate) {
+ case IEEE80211_S_SCAN:
+ ieee80211_cancel_scan(vap);
+ break;
+ default:
+ break;
+ }
+ if (ostate != IEEE80211_S_INIT) {
+ /* NB: optimize INIT -> INIT case */
+ ieee80211_reset_bss(vap);
+ }
+ break;
+ case IEEE80211_S_SCAN:
+ switch (ostate) {
+ case IEEE80211_S_RUN: /* beacon miss */
+ /* purge station table; entries are stale */
+ ieee80211_iterate_nodes(&ic->ic_sta, sta_leave, vap);
+ /* fall thru... */
+ case IEEE80211_S_INIT:
+ if (vap->iv_des_chan != IEEE80211_CHAN_ANYC &&
+ !IEEE80211_IS_CHAN_RADAR(vap->iv_des_chan)) {
+ /*
+ * Already have a channel; bypass the
+ * scan and startup immediately.
+ */
+ ieee80211_create_ibss(vap, vap->iv_des_chan);
+ break;
+ }
+ /*
+ * Initiate a scan. We can come here as a result
+ * of an IEEE80211_IOC_SCAN_REQ too in which case
+ * the vap will be marked with IEEE80211_FEXT_SCANREQ
+ * and the scan request parameters will be present
+ * in iv_scanreq. Otherwise we do the default.
+ */
+ if (vap->iv_flags_ext & IEEE80211_FEXT_SCANREQ) {
+ ieee80211_check_scan(vap,
+ vap->iv_scanreq_flags,
+ vap->iv_scanreq_duration,
+ vap->iv_scanreq_mindwell,
+ vap->iv_scanreq_maxdwell,
+ vap->iv_scanreq_nssid, vap->iv_scanreq_ssid);
+ vap->iv_flags_ext &= ~IEEE80211_FEXT_SCANREQ;
+ } else
+ ieee80211_check_scan_current(vap);
+ break;
+ case IEEE80211_S_SCAN:
+ /*
+ * This can happen because of a change in state
+ * that requires a reset. Trigger a new scan
+ * unless we're in manual roaming mode in which
+ * case an application must issue an explicit request.
+ */
+ if (vap->iv_roaming == IEEE80211_ROAMING_AUTO)
+ ieee80211_check_scan_current(vap);
+ break;
+ default:
+ goto invalid;
+ }
+ break;
+ case IEEE80211_S_RUN:
+ if (vap->iv_flags & IEEE80211_F_WPA) {
+ /* XXX validate prerequisites */
+ }
+ switch (ostate) {
+ case IEEE80211_S_SCAN:
+#ifdef IEEE80211_DEBUG
+ if (ieee80211_msg_debug(vap)) {
+ ieee80211_note(vap,
+ "synchronized with %s ssid ",
+ ether_sprintf(ni->ni_bssid));
+ ieee80211_print_essid(vap->iv_bss->ni_essid,
+ ni->ni_esslen);
+ /* XXX MCS/HT */
+ printf(" channel %d start %uMb\n",
+ ieee80211_chan2ieee(ic, ic->ic_curchan),
+ IEEE80211_RATE2MBS(ni->ni_txrate));
+ }
+#endif
+ break;
+ default:
+ goto invalid;
+ }
+ /*
+ * When 802.1x is not in use mark the port authorized
+ * at this point so traffic can flow.
+ */
+ if (ni->ni_authmode != IEEE80211_AUTH_8021X)
+ ieee80211_node_authorize(ni);
+ /*
+ * Fake association when joining an existing bss.
+ */
+ if (!IEEE80211_ADDR_EQ(ni->ni_macaddr, vap->iv_myaddr) &&
+ ic->ic_newassoc != NULL)
+ ic->ic_newassoc(ni, ostate != IEEE80211_S_RUN);
+ break;
+ case IEEE80211_S_SLEEP:
+ ieee80211_sta_pwrsave(vap, 0);
+ break;
+ default:
+ invalid:
+ IEEE80211_DPRINTF(vap, IEEE80211_MSG_STATE,
+ "%s: unexpected state transition %s -> %s\n", __func__,
+ ieee80211_state_name[ostate], ieee80211_state_name[nstate]);
+ break;
+ }
+ return 0;
+}
+
+/*
+ * Decide if a received management frame should be
+ * printed when debugging is enabled. This filters some
+ * of the less interesting frames that come frequently
+ * (e.g. beacons).
+ */
+static __inline int
+doprint(struct ieee80211vap *vap, int subtype)
+{
+ switch (subtype) {
+ case IEEE80211_FC0_SUBTYPE_BEACON:
+ return (vap->iv_ic->ic_flags & IEEE80211_F_SCAN);
+ case IEEE80211_FC0_SUBTYPE_PROBE_REQ:
+ return 1;
+ }
+ return 1;
+}
+
+/*
+ * Process a received frame. The node associated with the sender
+ * should be supplied. If nothing was found in the node table then
+ * the caller is assumed to supply a reference to iv_bss instead.
+ * The RSSI and a timestamp are also supplied. The RSSI data is used
+ * during AP scanning to select a AP to associate with; it can have
+ * any units so long as values have consistent units and higher values
+ * mean ``better signal''. The receive timestamp is currently not used
+ * by the 802.11 layer.
+ */
+static int
+adhoc_input(struct ieee80211_node *ni, struct mbuf *m, int rssi, int nf)
+{
+#define SEQ_LEQ(a,b) ((int)((a)-(b)) <= 0)
+#define HAS_SEQ(type) ((type & 0x4) == 0)
+ struct ieee80211vap *vap = ni->ni_vap;
+ struct ieee80211com *ic = ni->ni_ic;
+ struct ifnet *ifp = vap->iv_ifp;
+ struct ieee80211_frame *wh;
+ struct ieee80211_key *key;
+ struct ether_header *eh;
+ int hdrspace, need_tap = 1; /* mbuf need to be tapped. */
+ uint8_t dir, type, subtype, qos;
+ uint8_t *bssid;
+ uint16_t rxseq;
+
+ if (m->m_flags & M_AMPDU_MPDU) {
+ /*
+ * Fastpath for A-MPDU reorder q resubmission. Frames
+ * w/ M_AMPDU_MPDU marked have already passed through
+ * here but were received out of order and been held on
+ * the reorder queue. When resubmitted they are marked
+ * with the M_AMPDU_MPDU flag and we can bypass most of
+ * the normal processing.
+ */
+ wh = mtod(m, struct ieee80211_frame *);
+ type = IEEE80211_FC0_TYPE_DATA;
+ dir = wh->i_fc[1] & IEEE80211_FC1_DIR_MASK;
+ subtype = IEEE80211_FC0_SUBTYPE_QOS;
+ hdrspace = ieee80211_hdrspace(ic, wh); /* XXX optimize? */
+ goto resubmit_ampdu;
+ }
+
+ KASSERT(ni != NULL, ("null node"));
+ ni->ni_inact = ni->ni_inact_reload;
+
+ type = -1; /* undefined */
+
+ if (m->m_pkthdr.len < sizeof(struct ieee80211_frame_min)) {
+ IEEE80211_DISCARD_MAC(vap, IEEE80211_MSG_ANY,
+ ni->ni_macaddr, NULL,
+ "too short (1): len %u", m->m_pkthdr.len);
+ vap->iv_stats.is_rx_tooshort++;
+ goto out;
+ }
+ /*
+ * Bit of a cheat here, we use a pointer for a 3-address
+ * frame format but don't reference fields past outside
+ * ieee80211_frame_min w/o first validating the data is
+ * present.
+ */
+ wh = mtod(m, struct ieee80211_frame *);
+
+ if ((wh->i_fc[0] & IEEE80211_FC0_VERSION_MASK) !=
+ IEEE80211_FC0_VERSION_0) {
+ IEEE80211_DISCARD_MAC(vap, IEEE80211_MSG_ANY,
+ ni->ni_macaddr, NULL, "wrong version, fc %02x:%02x",
+ wh->i_fc[0], wh->i_fc[1]);
+ vap->iv_stats.is_rx_badversion++;
+ goto err;
+ }
+
+ dir = wh->i_fc[1] & IEEE80211_FC1_DIR_MASK;
+ type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
+ subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
+ if ((ic->ic_flags & IEEE80211_F_SCAN) == 0) {
+ if (dir != IEEE80211_FC1_DIR_NODS)
+ bssid = wh->i_addr1;
+ else if (type == IEEE80211_FC0_TYPE_CTL)
+ bssid = wh->i_addr1;
+ else {
+ if (m->m_pkthdr.len < sizeof(struct ieee80211_frame)) {
+ IEEE80211_DISCARD_MAC(vap,
+ IEEE80211_MSG_ANY, ni->ni_macaddr,
+ NULL, "too short (2): len %u",
+ m->m_pkthdr.len);
+ vap->iv_stats.is_rx_tooshort++;
+ goto out;
+ }
+ bssid = wh->i_addr3;
+ }
+ /*
+ * Validate the bssid.
+ */
+ if (!IEEE80211_ADDR_EQ(bssid, vap->iv_bss->ni_bssid) &&
+ !IEEE80211_ADDR_EQ(bssid, ifp->if_broadcastaddr)) {
+ /* not interested in */
+ IEEE80211_DISCARD_MAC(vap, IEEE80211_MSG_INPUT,
+ bssid, NULL, "%s", "not to bss");
+ vap->iv_stats.is_rx_wrongbss++;
+ goto out;
+ }
+ /*
+ * Data frame, cons up a node when it doesn't
+ * exist. This should probably done after an ACL check.
+ */
+ if (type == IEEE80211_FC0_TYPE_DATA &&
+ ni == vap->iv_bss &&
+ !IEEE80211_ADDR_EQ(wh->i_addr2, ni->ni_macaddr)) {
+ /*
+ * Beware of frames that come in too early; we
+ * can receive broadcast frames and creating sta
+ * entries will blow up because there is no bss
+ * channel yet.
+ */
+ if (vap->iv_state != IEEE80211_S_RUN) {
+ IEEE80211_DISCARD(vap, IEEE80211_MSG_INPUT,
+ wh, "data", "not in RUN state (%s)",
+ ieee80211_state_name[vap->iv_state]);
+ vap->iv_stats.is_rx_badstate++;
+ goto err;
+ }
+ /*
+ * Fake up a node for this newly
+ * discovered member of the IBSS.
+ */
+ ni = ieee80211_fakeup_adhoc_node(vap, wh->i_addr2);
+ if (ni == NULL) {
+ /* NB: stat kept for alloc failure */
+ goto err;
+ }
+ }
+ IEEE80211_RSSI_LPF(ni->ni_avgrssi, rssi);
+ ni->ni_noise = nf;
+ if (HAS_SEQ(type)) {
+ uint8_t tid = ieee80211_gettid(wh);
+ if (IEEE80211_QOS_HAS_SEQ(wh) &&
+ TID_TO_WME_AC(tid) >= WME_AC_VI)
+ ic->ic_wme.wme_hipri_traffic++;
+ rxseq = le16toh(*(uint16_t *)wh->i_seq);
+ if ((ni->ni_flags & IEEE80211_NODE_HT) == 0 &&
+ (wh->i_fc[1] & IEEE80211_FC1_RETRY) &&
+ SEQ_LEQ(rxseq, ni->ni_rxseqs[tid])) {
+ /* duplicate, discard */
+ IEEE80211_DISCARD_MAC(vap, IEEE80211_MSG_INPUT,
+ bssid, "duplicate",
+ "seqno <%u,%u> fragno <%u,%u> tid %u",
+ rxseq >> IEEE80211_SEQ_SEQ_SHIFT,
+ ni->ni_rxseqs[tid] >>
+ IEEE80211_SEQ_SEQ_SHIFT,
+ rxseq & IEEE80211_SEQ_FRAG_MASK,
+ ni->ni_rxseqs[tid] &
+ IEEE80211_SEQ_FRAG_MASK,
+ tid);
+ vap->iv_stats.is_rx_dup++;
+ IEEE80211_NODE_STAT(ni, rx_dup);
+ goto out;
+ }
+ ni->ni_rxseqs[tid] = rxseq;
+ }
+ }
+
+ switch (type) {
+ case IEEE80211_FC0_TYPE_DATA:
+ hdrspace = ieee80211_hdrspace(ic, wh);
+ if (m->m_len < hdrspace &&
+ (m = m_pullup(m, hdrspace)) == NULL) {
+ IEEE80211_DISCARD_MAC(vap, IEEE80211_MSG_ANY,
+ ni->ni_macaddr, NULL,
+ "data too short: expecting %u", hdrspace);
+ vap->iv_stats.is_rx_tooshort++;
+ goto out; /* XXX */
+ }
+ if (dir != IEEE80211_FC1_DIR_NODS) {
+ IEEE80211_DISCARD(vap, IEEE80211_MSG_INPUT,
+ wh, "data", "incorrect dir 0x%x", dir);
+ vap->iv_stats.is_rx_wrongdir++;
+ goto out;
+ }
+ /* XXX no power-save support */
+
+ /*
+ * Handle A-MPDU re-ordering. If the frame is to be
+ * processed directly then ieee80211_ampdu_reorder
+ * will return 0; otherwise it has consumed the mbuf
+ * and we should do nothing more with it.
+ */
+ if ((m->m_flags & M_AMPDU) &&
+ ieee80211_ampdu_reorder(ni, m) != 0) {
+ m = NULL;
+ goto out;
+ }
+ resubmit_ampdu:
+
+ /*
+ * Handle privacy requirements. Note that we
+ * must not be preempted from here until after
+ * we (potentially) call ieee80211_crypto_demic;
+ * otherwise we may violate assumptions in the
+ * crypto cipher modules used to do delayed update
+ * of replay sequence numbers.
+ */
+ if (wh->i_fc[1] & IEEE80211_FC1_WEP) {
+ if ((vap->iv_flags & IEEE80211_F_PRIVACY) == 0) {
+ /*
+ * Discard encrypted frames when privacy is off.
+ */
+ IEEE80211_DISCARD(vap, IEEE80211_MSG_INPUT,
+ wh, "WEP", "%s", "PRIVACY off");
+ vap->iv_stats.is_rx_noprivacy++;
+ IEEE80211_NODE_STAT(ni, rx_noprivacy);
+ goto out;
+ }
+ key = ieee80211_crypto_decap(ni, m, hdrspace);
+ if (key == NULL) {
+ /* NB: stats+msgs handled in crypto_decap */
+ IEEE80211_NODE_STAT(ni, rx_wepfail);
+ goto out;
+ }
+ wh = mtod(m, struct ieee80211_frame *);
+ wh->i_fc[1] &= ~IEEE80211_FC1_WEP;
+ } else {
+ /* XXX M_WEP and IEEE80211_F_PRIVACY */
+ key = NULL;
+ }
+
+ /*
+ * Save QoS bits for use below--before we strip the header.
+ */
+ if (subtype == IEEE80211_FC0_SUBTYPE_QOS) {
+ qos = (dir == IEEE80211_FC1_DIR_DSTODS) ?
+ ((struct ieee80211_qosframe_addr4 *)wh)->i_qos[0] :
+ ((struct ieee80211_qosframe *)wh)->i_qos[0];
+ } else
+ qos = 0;
+
+ /*
+ * Next up, any fragmentation.
+ */
+ if (!IEEE80211_IS_MULTICAST(wh->i_addr1)) {
+ m = ieee80211_defrag(ni, m, hdrspace);
+ if (m == NULL) {
+ /* Fragment dropped or frame not complete yet */
+ goto out;
+ }
+ }
+ wh = NULL; /* no longer valid, catch any uses */
+
+ /*
+ * Next strip any MSDU crypto bits.
+ */
+ if (key != NULL && !ieee80211_crypto_demic(vap, key, m, 0)) {
+ IEEE80211_DISCARD_MAC(vap, IEEE80211_MSG_INPUT,
+ ni->ni_macaddr, "data", "%s", "demic error");
+ vap->iv_stats.is_rx_demicfail++;
+ IEEE80211_NODE_STAT(ni, rx_demicfail);
+ goto out;
+ }
+
+ /* copy to listener after decrypt */
+ if (ieee80211_radiotap_active_vap(vap))
+ ieee80211_radiotap_rx(vap, m);
+ need_tap = 0;
+
+ /*
+ * Finally, strip the 802.11 header.
+ */
+ m = ieee80211_decap(vap, m, hdrspace);
+ if (m == NULL) {
+ /* XXX mask bit to check for both */
+ /* don't count Null data frames as errors */
+ if (subtype == IEEE80211_FC0_SUBTYPE_NODATA ||
+ subtype == IEEE80211_FC0_SUBTYPE_QOS_NULL)
+ goto out;
+ IEEE80211_DISCARD_MAC(vap, IEEE80211_MSG_INPUT,
+ ni->ni_macaddr, "data", "%s", "decap error");
+ vap->iv_stats.is_rx_decap++;
+ IEEE80211_NODE_STAT(ni, rx_decap);
+ goto err;
+ }
+ eh = mtod(m, struct ether_header *);
+ if (!ieee80211_node_is_authorized(ni)) {
+ /*
+ * Deny any non-PAE frames received prior to
+ * authorization. For open/shared-key
+ * authentication the port is mark authorized
+ * after authentication completes. For 802.1x
+ * the port is not marked authorized by the
+ * authenticator until the handshake has completed.
+ */
+ if (eh->ether_type != htons(ETHERTYPE_PAE)) {
+ IEEE80211_DISCARD_MAC(vap, IEEE80211_MSG_INPUT,
+ eh->ether_shost, "data",
+ "unauthorized port: ether type 0x%x len %u",
+ eh->ether_type, m->m_pkthdr.len);
+ vap->iv_stats.is_rx_unauth++;
+ IEEE80211_NODE_STAT(ni, rx_unauth);
+ goto err;
+ }
+ } else {
+ /*
+ * When denying unencrypted frames, discard
+ * any non-PAE frames received without encryption.
+ */
+ if ((vap->iv_flags & IEEE80211_F_DROPUNENC) &&
+ (key == NULL && (m->m_flags & M_WEP) == 0) &&
+ eh->ether_type != htons(ETHERTYPE_PAE)) {
+ /*
+ * Drop unencrypted frames.
+ */
+ vap->iv_stats.is_rx_unencrypted++;
+ IEEE80211_NODE_STAT(ni, rx_unencrypted);
+ goto out;
+ }
+ }
+ /* XXX require HT? */
+ if (qos & IEEE80211_QOS_AMSDU) {
+ m = ieee80211_decap_amsdu(ni, m);
+ if (m == NULL)
+ return IEEE80211_FC0_TYPE_DATA;
+ } else {
+#ifdef IEEE80211_SUPPORT_SUPERG
+ m = ieee80211_decap_fastframe(vap, ni, m);
+ if (m == NULL)
+ return IEEE80211_FC0_TYPE_DATA;
+#endif
+ }
+ if (dir == IEEE80211_FC1_DIR_DSTODS && ni->ni_wdsvap != NULL)
+ ieee80211_deliver_data(ni->ni_wdsvap, ni, m);
+ else
+ ieee80211_deliver_data(vap, ni, m);
+ return IEEE80211_FC0_TYPE_DATA;
+
+ case IEEE80211_FC0_TYPE_MGT:
+ vap->iv_stats.is_rx_mgmt++;
+ IEEE80211_NODE_STAT(ni, rx_mgmt);
+ if (dir != IEEE80211_FC1_DIR_NODS) {
+ IEEE80211_DISCARD(vap, IEEE80211_MSG_INPUT,
+ wh, "data", "incorrect dir 0x%x", dir);
+ vap->iv_stats.is_rx_wrongdir++;
+ goto err;
+ }
+ if (m->m_pkthdr.len < sizeof(struct ieee80211_frame)) {
+ IEEE80211_DISCARD_MAC(vap, IEEE80211_MSG_ANY,
+ ni->ni_macaddr, "mgt", "too short: len %u",
+ m->m_pkthdr.len);
+ vap->iv_stats.is_rx_tooshort++;
+ goto out;
+ }
+#ifdef IEEE80211_DEBUG
+ if ((ieee80211_msg_debug(vap) && doprint(vap, subtype)) ||
+ ieee80211_msg_dumppkts(vap)) {
+ if_printf(ifp, "received %s from %s rssi %d\n",
+ ieee80211_mgt_subtype_name[subtype >>
+ IEEE80211_FC0_SUBTYPE_SHIFT],
+ ether_sprintf(wh->i_addr2), rssi);
+ }
+#endif
+ if (wh->i_fc[1] & IEEE80211_FC1_WEP) {
+ IEEE80211_DISCARD(vap, IEEE80211_MSG_INPUT,
+ wh, NULL, "%s", "WEP set but not permitted");
+ vap->iv_stats.is_rx_mgtdiscard++; /* XXX */
+ goto out;
+ }
+ vap->iv_recv_mgmt(ni, m, subtype, rssi, nf);
+ goto out;
+
+ case IEEE80211_FC0_TYPE_CTL:
+ vap->iv_stats.is_rx_ctl++;
+ IEEE80211_NODE_STAT(ni, rx_ctrl);
+ vap->iv_recv_ctl(ni, m, subtype);
+ goto out;
+
+ default:
+ IEEE80211_DISCARD(vap, IEEE80211_MSG_ANY,
+ wh, "bad", "frame type 0x%x", type);
+ /* should not come here */
+ break;
+ }
+err:
+ ifp->if_ierrors++;
+out:
+ if (m != NULL) {
+ if (need_tap && ieee80211_radiotap_active_vap(vap))
+ ieee80211_radiotap_rx(vap, m);
+ m_freem(m);
+ }
+ return type;
+#undef SEQ_LEQ
+}
+
+static int
+is11bclient(const uint8_t *rates, const uint8_t *xrates)
+{
+ static const uint32_t brates = (1<<2*1)|(1<<2*2)|(1<<11)|(1<<2*11);
+ int i;
+
+ /* NB: the 11b clients we care about will not have xrates */
+ if (xrates != NULL || rates == NULL)
+ return 0;
+ for (i = 0; i < rates[1]; i++) {
+ int r = rates[2+i] & IEEE80211_RATE_VAL;
+ if (r > 2*11 || ((1<<r) & brates) == 0)
+ return 0;
+ }
+ return 1;
+}
+
+static void
+adhoc_recv_mgmt(struct ieee80211_node *ni, struct mbuf *m0,
+ int subtype, int rssi, int nf)
+{
+ struct ieee80211vap *vap = ni->ni_vap;
+ struct ieee80211com *ic = ni->ni_ic;
+ struct ieee80211_frame *wh;
+ uint8_t *frm, *efrm, *sfrm;
+ uint8_t *ssid, *rates, *xrates;
+
+ wh = mtod(m0, struct ieee80211_frame *);
+ frm = (uint8_t *)&wh[1];
+ efrm = mtod(m0, uint8_t *) + m0->m_len;
+ switch (subtype) {
+ case IEEE80211_FC0_SUBTYPE_PROBE_RESP:
+ case IEEE80211_FC0_SUBTYPE_BEACON: {
+ struct ieee80211_scanparams scan;
+ /*
+ * We process beacon/probe response
+ * frames to discover neighbors.
+ */
+ if (ieee80211_parse_beacon(ni, m0, &scan) != 0)
+ return;
+ /*
+ * Count frame now that we know it's to be processed.
+ */
+ if (subtype == IEEE80211_FC0_SUBTYPE_BEACON) {
+ vap->iv_stats.is_rx_beacon++; /* XXX remove */
+ IEEE80211_NODE_STAT(ni, rx_beacons);
+ } else
+ IEEE80211_NODE_STAT(ni, rx_proberesp);
+ /*
+ * If scanning, just pass information to the scan module.
+ */
+ if (ic->ic_flags & IEEE80211_F_SCAN) {
+ if (ic->ic_flags_ext & IEEE80211_FEXT_PROBECHAN) {
+ /*
+ * Actively scanning a channel marked passive;
+ * send a probe request now that we know there
+ * is 802.11 traffic present.
+ *
+ * XXX check if the beacon we recv'd gives
+ * us what we need and suppress the probe req
+ */
+ ieee80211_probe_curchan(vap, 1);
+ ic->ic_flags_ext &= ~IEEE80211_FEXT_PROBECHAN;
+ }
+ ieee80211_add_scan(vap, &scan, wh, subtype, rssi, nf);
+ return;
+ }
+ if (scan.capinfo & IEEE80211_CAPINFO_IBSS) {
+ if (!IEEE80211_ADDR_EQ(wh->i_addr2, ni->ni_macaddr)) {
+ /*
+ * Create a new entry in the neighbor table.
+ */
+ ni = ieee80211_add_neighbor(vap, wh, &scan);
+ } else if (ni->ni_capinfo == 0) {
+ /*
+ * Update faked node created on transmit.
+ * Note this also updates the tsf.
+ */
+ ieee80211_init_neighbor(ni, wh, &scan);
+ } else {
+ /*
+ * Record tsf for potential resync.
+ */
+ memcpy(ni->ni_tstamp.data, scan.tstamp,
+ sizeof(ni->ni_tstamp));
+ }
+ if (ni != NULL) {
+ IEEE80211_RSSI_LPF(ni->ni_avgrssi, rssi);
+ ni->ni_noise = nf;
+ }
+ }
+ break;
+ }
+
+ case IEEE80211_FC0_SUBTYPE_PROBE_REQ:
+ if (vap->iv_state != IEEE80211_S_RUN) {
+ IEEE80211_DISCARD(vap, IEEE80211_MSG_INPUT,
+ wh, NULL, "wrong state %s",
+ ieee80211_state_name[vap->iv_state]);
+ vap->iv_stats.is_rx_mgtdiscard++;
+ return;
+ }
+ if (IEEE80211_IS_MULTICAST(wh->i_addr2)) {
+ /* frame must be directed */
+ IEEE80211_DISCARD(vap, IEEE80211_MSG_INPUT,
+ wh, NULL, "%s", "not unicast");
+ vap->iv_stats.is_rx_mgtdiscard++; /* XXX stat */
+ return;
+ }
+
+ /*
+ * prreq frame format
+ * [tlv] ssid
+ * [tlv] supported rates
+ * [tlv] extended supported rates
+ */
+ ssid = rates = xrates = NULL;
+ sfrm = frm;
+ while (efrm - frm > 1) {
+ IEEE80211_VERIFY_LENGTH(efrm - frm, frm[1] + 2, return);
+ switch (*frm) {
+ case IEEE80211_ELEMID_SSID:
+ ssid = frm;
+ break;
+ case IEEE80211_ELEMID_RATES:
+ rates = frm;
+ break;
+ case IEEE80211_ELEMID_XRATES:
+ xrates = frm;
+ break;
+ }
+ frm += frm[1] + 2;
+ }
+ IEEE80211_VERIFY_ELEMENT(rates, IEEE80211_RATE_MAXSIZE, return);
+ if (xrates != NULL)
+ IEEE80211_VERIFY_ELEMENT(xrates,
+ IEEE80211_RATE_MAXSIZE - rates[1], return);
+ IEEE80211_VERIFY_ELEMENT(ssid, IEEE80211_NWID_LEN, return);
+ IEEE80211_VERIFY_SSID(vap->iv_bss, ssid, return);
+ if ((vap->iv_flags & IEEE80211_F_HIDESSID) && ssid[1] == 0) {
+ IEEE80211_DISCARD(vap, IEEE80211_MSG_INPUT,
+ wh, NULL,
+ "%s", "no ssid with ssid suppression enabled");
+ vap->iv_stats.is_rx_ssidmismatch++; /*XXX*/
+ return;
+ }
+
+ /* XXX find a better class or define it's own */
+ IEEE80211_NOTE_MAC(vap, IEEE80211_MSG_INPUT, wh->i_addr2,
+ "%s", "recv probe req");
+ /*
+ * Some legacy 11b clients cannot hack a complete
+ * probe response frame. When the request includes
+ * only a bare-bones rate set, communicate this to
+ * the transmit side.
+ */
+ ieee80211_send_proberesp(vap, wh->i_addr2,
+ is11bclient(rates, xrates) ? IEEE80211_SEND_LEGACY_11B : 0);
+ break;
+
+ case IEEE80211_FC0_SUBTYPE_ACTION: {
+ const struct ieee80211_action *ia;
+
+ if (vap->iv_state != IEEE80211_S_RUN) {
+ IEEE80211_DISCARD(vap, IEEE80211_MSG_INPUT,
+ wh, NULL, "wrong state %s",
+ ieee80211_state_name[vap->iv_state]);
+ vap->iv_stats.is_rx_mgtdiscard++;
+ return;
+ }
+ /*
+ * action frame format:
+ * [1] category
+ * [1] action
+ * [tlv] parameters
+ */
+ IEEE80211_VERIFY_LENGTH(efrm - frm,
+ sizeof(struct ieee80211_action), return);
+ ia = (const struct ieee80211_action *) frm;
+
+ vap->iv_stats.is_rx_action++;
+ IEEE80211_NODE_STAT(ni, rx_action);
+
+ /* verify frame payloads but defer processing */
+ /* XXX maybe push this to method */
+ switch (ia->ia_category) {
+ case IEEE80211_ACTION_CAT_BA:
+ switch (ia->ia_action) {
+ case IEEE80211_ACTION_BA_ADDBA_REQUEST:
+ IEEE80211_VERIFY_LENGTH(efrm - frm,
+ sizeof(struct ieee80211_action_ba_addbarequest),
+ return);
+ break;
+ case IEEE80211_ACTION_BA_ADDBA_RESPONSE:
+ IEEE80211_VERIFY_LENGTH(efrm - frm,
+ sizeof(struct ieee80211_action_ba_addbaresponse),
+ return);
+ break;
+ case IEEE80211_ACTION_BA_DELBA:
+ IEEE80211_VERIFY_LENGTH(efrm - frm,
+ sizeof(struct ieee80211_action_ba_delba),
+ return);
+ break;
+ }
+ break;
+ case IEEE80211_ACTION_CAT_HT:
+ switch (ia->ia_action) {
+ case IEEE80211_ACTION_HT_TXCHWIDTH:
+ IEEE80211_VERIFY_LENGTH(efrm - frm,
+ sizeof(struct ieee80211_action_ht_txchwidth),
+ return);
+ break;
+ }
+ break;
+ }
+ ic->ic_recv_action(ni, wh, frm, efrm);
+ break;
+ }
+
+ case IEEE80211_FC0_SUBTYPE_AUTH:
+ case IEEE80211_FC0_SUBTYPE_ASSOC_REQ:
+ case IEEE80211_FC0_SUBTYPE_REASSOC_REQ:
+ case IEEE80211_FC0_SUBTYPE_ASSOC_RESP:
+ case IEEE80211_FC0_SUBTYPE_REASSOC_RESP:
+ case IEEE80211_FC0_SUBTYPE_DEAUTH:
+ case IEEE80211_FC0_SUBTYPE_DISASSOC:
+ IEEE80211_DISCARD(vap, IEEE80211_MSG_INPUT,
+ wh, NULL, "%s", "not handled");
+ vap->iv_stats.is_rx_mgtdiscard++;
+ return;
+
+ default:
+ IEEE80211_DISCARD(vap, IEEE80211_MSG_ANY,
+ wh, "mgt", "subtype 0x%x not handled", subtype);
+ vap->iv_stats.is_rx_badsubtype++;
+ break;
+ }
+}
+#undef IEEE80211_VERIFY_LENGTH
+#undef IEEE80211_VERIFY_ELEMENT
+
+static void
+ahdemo_recv_mgmt(struct ieee80211_node *ni, struct mbuf *m0,
+ int subtype, int rssi, int nf)
+{
+ struct ieee80211vap *vap = ni->ni_vap;
+ struct ieee80211com *ic = ni->ni_ic;
+
+ /*
+ * Process management frames when scanning; useful for doing
+ * a site-survey.
+ */
+ if (ic->ic_flags & IEEE80211_F_SCAN)
+ adhoc_recv_mgmt(ni, m0, subtype, rssi, nf);
+ else
+ vap->iv_stats.is_rx_mgtdiscard++;
+}
+
+static void
+adhoc_recv_ctl(struct ieee80211_node *ni, struct mbuf *m0, int subtype)
+{
+}
diff --git a/rtems/freebsd/net80211/ieee80211_adhoc.h b/rtems/freebsd/net80211/ieee80211_adhoc.h
new file mode 100644
index 00000000..2fad984e
--- /dev/null
+++ b/rtems/freebsd/net80211/ieee80211_adhoc.h
@@ -0,0 +1,35 @@
+/*-
+ * Copyright (c) 2007-2008 Sam Leffler, Errno Consulting
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+#ifndef _NET80211_IEEE80211_ADHOC_HH_
+#define _NET80211_IEEE80211_ADHOC_HH_
+
+/*
+ * Adhoc-mode (ibss+ahdemo) implementation definitions.
+ */
+void ieee80211_adhoc_attach(struct ieee80211com *);
+void ieee80211_adhoc_detach(struct ieee80211com *);
+#endif /* !_NET80211_IEEE80211_STA_HH_ */
diff --git a/rtems/freebsd/net80211/ieee80211_ageq.c b/rtems/freebsd/net80211/ieee80211_ageq.c
new file mode 100644
index 00000000..93523cee
--- /dev/null
+++ b/rtems/freebsd/net80211/ieee80211_ageq.c
@@ -0,0 +1,239 @@
+#include <rtems/freebsd/machine/rtems-bsd-config.h>
+
+/*-
+ * Copyright (c) 2009 Sam Leffler, Errno Consulting
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <rtems/freebsd/sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+/*
+ * IEEE 802.11 age queue support.
+ */
+#include <rtems/freebsd/local/opt_wlan.h>
+
+#include <rtems/freebsd/sys/param.h>
+#include <rtems/freebsd/sys/systm.h>
+#include <rtems/freebsd/sys/kernel.h>
+
+#include <rtems/freebsd/sys/socket.h>
+
+#include <rtems/freebsd/net/if.h>
+#include <rtems/freebsd/net/if_media.h>
+#include <rtems/freebsd/net/ethernet.h>
+
+#include <rtems/freebsd/net80211/ieee80211_var.h>
+
+/*
+ * Initialize an ageq.
+ */
+void
+ieee80211_ageq_init(struct ieee80211_ageq *aq, int maxlen, const char *name)
+{
+ memset(aq, 0, sizeof(aq));
+ aq->aq_maxlen = maxlen;
+ IEEE80211_AGEQ_INIT(aq, name); /* OS-dependent setup */
+}
+
+/*
+ * Cleanup an ageq initialized with ieee80211_ageq_init. Note
+ * the queue is assumed empty; this can be done with ieee80211_ageq_drain.
+ */
+void
+ieee80211_ageq_cleanup(struct ieee80211_ageq *aq)
+{
+ KASSERT(aq->aq_len == 0, ("%d frames on ageq", aq->aq_len));
+ IEEE80211_AGEQ_DESTROY(aq); /* OS-dependent cleanup */
+}
+
+/*
+ * Free an mbuf according to ageq rules: if marked as holding
+ * and 802.11 frame then also reclaim a node reference from
+ * the packet header; this handles packets q'd in the tx path.
+ */
+static void
+ageq_mfree(struct mbuf *m)
+{
+ if (m->m_flags & M_ENCAP) {
+ struct ieee80211_node *ni = (void *) m->m_pkthdr.rcvif;
+ ieee80211_free_node(ni);
+ }
+ m->m_nextpkt = NULL;
+ m_freem(m);
+}
+
+/*
+ * Free a list of mbufs using ageq rules (see above).
+ */
+void
+ieee80211_ageq_mfree(struct mbuf *m)
+{
+ struct mbuf *next;
+
+ for (; m != NULL; m = next) {
+ next = m->m_nextpkt;
+ ageq_mfree(m);
+ }
+}
+
+/*
+ * Append an mbuf to the ageq and mark it with the specified max age
+ * If the frame is not removed before the age (in seconds) expires
+ * then it is reclaimed (along with any node reference).
+ */
+int
+ieee80211_ageq_append(struct ieee80211_ageq *aq, struct mbuf *m, int age)
+{
+ IEEE80211_AGEQ_LOCK(aq);
+ if (__predict_true(aq->aq_len < aq->aq_maxlen)) {
+ if (aq->aq_tail == NULL) {
+ aq->aq_head = m;
+ } else {
+ aq->aq_tail->m_nextpkt = m;
+ age -= M_AGE_GET(aq->aq_head);
+ }
+ KASSERT(age >= 0, ("age %d", age));
+ M_AGE_SET(m, age);
+ m->m_nextpkt = NULL;
+ aq->aq_tail = m;
+ aq->aq_len++;
+ IEEE80211_AGEQ_UNLOCK(aq);
+ return 0;
+ } else {
+ /*
+ * No space, drop and cleanup references.
+ */
+ aq->aq_drops++;
+ IEEE80211_AGEQ_UNLOCK(aq);
+ /* XXX tail drop? */
+ ageq_mfree(m);
+ return ENOSPC;
+ }
+}
+
+/*
+ * Drain/reclaim all frames from an ageq.
+ */
+void
+ieee80211_ageq_drain(struct ieee80211_ageq *aq)
+{
+ ieee80211_ageq_mfree(ieee80211_ageq_remove(aq, NULL));
+}
+
+/*
+ * Drain/reclaim frames associated with a specific node from an ageq.
+ */
+void
+ieee80211_ageq_drain_node(struct ieee80211_ageq *aq,
+ struct ieee80211_node *ni)
+{
+ ieee80211_ageq_mfree(ieee80211_ageq_remove(aq, ni));
+}
+
+/*
+ * Age frames on the age queue. Ages are stored as time
+ * deltas (in seconds) relative to the head so we can check
+ * and/or adjust only the head of the list. If a frame's age
+ * exceeds the time quanta then remove it. The list of removed
+ * frames is is returned to the caller joined by m_nextpkt.
+ */
+struct mbuf *
+ieee80211_ageq_age(struct ieee80211_ageq *aq, int quanta)
+{
+ struct mbuf *head, **phead;
+ struct mbuf *m;
+
+ phead = &head;
+ if (aq->aq_len != 0) {
+ IEEE80211_AGEQ_LOCK(aq);
+ while ((m = aq->aq_head) != NULL && M_AGE_GET(m) < quanta) {
+ if ((aq->aq_head = m->m_nextpkt) == NULL)
+ aq->aq_tail = NULL;
+ KASSERT(aq->aq_len > 0, ("aq len %d", aq->aq_len));
+ aq->aq_len--;
+ /* add to private list for return */
+ *phead = m;
+ phead = &m->m_nextpkt;
+ }
+ if (m != NULL)
+ M_AGE_SUB(m, quanta);
+ IEEE80211_AGEQ_UNLOCK(aq);
+ }
+ *phead = NULL;
+ return head;
+}
+
+/*
+ * Remove all frames matching the specified node identifier
+ * (NULL matches all). Frames are returned as a list joined
+ * by m_nextpkt.
+ */
+struct mbuf *
+ieee80211_ageq_remove(struct ieee80211_ageq *aq,
+ struct ieee80211_node *match)
+{
+ struct mbuf *m, **prev, *ohead;
+ struct mbuf *head, **phead;
+
+ IEEE80211_AGEQ_LOCK(aq);
+ ohead = aq->aq_head;
+ prev = &aq->aq_head;
+ phead = &head;
+ while ((m = *prev) != NULL) {
+ if (match != NULL && m->m_pkthdr.rcvif != (void *) match) {
+ prev = &m->m_nextpkt;
+ continue;
+ }
+ /*
+ * Adjust q length.
+ */
+ KASSERT(aq->aq_len > 0, ("aq len %d", aq->aq_len));
+ aq->aq_len--;
+ /*
+ * Remove from forward list; tail pointer is harder.
+ */
+ if (aq->aq_tail == m) {
+ KASSERT(m->m_nextpkt == NULL, ("not last"));
+ if (aq->aq_head == m) { /* list empty */
+ KASSERT(aq->aq_len == 0,
+ ("not empty, len %d", aq->aq_len));
+ aq->aq_tail = NULL;
+ } else { /* must be one before */
+ aq->aq_tail = (struct mbuf *)((uintptr_t)prev -
+ offsetof(struct mbuf, m_nextpkt));
+ }
+ }
+ *prev = m->m_nextpkt;
+
+ /* add to private list for return */
+ *phead = m;
+ phead = &m->m_nextpkt;
+ }
+ if (head == ohead && aq->aq_head != NULL) /* correct age */
+ M_AGE_SET(aq->aq_head, M_AGE_GET(head));
+ IEEE80211_AGEQ_UNLOCK(aq);
+
+ *phead = NULL;
+ return head;
+}
diff --git a/rtems/freebsd/net80211/ieee80211_ageq.h b/rtems/freebsd/net80211/ieee80211_ageq.h
new file mode 100644
index 00000000..8aecae05
--- /dev/null
+++ b/rtems/freebsd/net80211/ieee80211_ageq.h
@@ -0,0 +1,54 @@
+/*-
+ * Copyright (c) 2009 Sam Leffler, Errno Consulting
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+#ifndef _NET80211_IEEE80211_STAGEQ_HH_
+#define _NET80211_IEEE80211_STAGEQ_HH_
+
+struct ieee80211_node;
+struct mbuf;
+
+struct ieee80211_ageq {
+ ieee80211_ageq_lock_t aq_lock;
+ int aq_len; /* # items on queue */
+ int aq_maxlen; /* max queue length */
+ int aq_drops; /* frames dropped */
+ struct mbuf *aq_head; /* frames linked w/ m_nextpkt */
+ struct mbuf *aq_tail; /* last frame in queue */
+};
+
+void ieee80211_ageq_init(struct ieee80211_ageq *, int maxlen,
+ const char *name);
+void ieee80211_ageq_cleanup(struct ieee80211_ageq *);
+void ieee80211_ageq_mfree(struct mbuf *);
+int ieee80211_ageq_append(struct ieee80211_ageq *, struct mbuf *,
+ int age);
+void ieee80211_ageq_drain(struct ieee80211_ageq *);
+void ieee80211_ageq_drain_node(struct ieee80211_ageq *,
+ struct ieee80211_node *);
+struct mbuf *ieee80211_ageq_age(struct ieee80211_ageq *, int quanta);
+struct mbuf *ieee80211_ageq_remove(struct ieee80211_ageq *,
+ struct ieee80211_node *match);
+#endif /* _NET80211_IEEE80211_STAGEQ_HH_ */
diff --git a/rtems/freebsd/net80211/ieee80211_amrr.c b/rtems/freebsd/net80211/ieee80211_amrr.c
new file mode 100644
index 00000000..196e426f
--- /dev/null
+++ b/rtems/freebsd/net80211/ieee80211_amrr.c
@@ -0,0 +1,319 @@
+#include <rtems/freebsd/machine/rtems-bsd-config.h>
+
+/* $OpenBSD: ieee80211_amrr.c,v 1.1 2006/06/17 19:07:19 damien Exp $ */
+
+/*-
+ * Copyright (c) 2010 Rui Paulo <rpaulo@FreeBSD.org>
+ * Copyright (c) 2006
+ * Damien Bergamini <damien.bergamini@free.fr>
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <rtems/freebsd/sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+/*-
+ * Naive implementation of the Adaptive Multi Rate Retry algorithm:
+ *
+ * "IEEE 802.11 Rate Adaptation: A Practical Approach"
+ * Mathieu Lacage, Hossein Manshaei, Thierry Turletti
+ * INRIA Sophia - Projet Planete
+ * http://www-sop.inria.fr/rapports/sophia/RR-5208.html
+ */
+#include <rtems/freebsd/local/opt_wlan.h>
+
+#include <rtems/freebsd/sys/param.h>
+#include <rtems/freebsd/sys/kernel.h>
+#include <rtems/freebsd/sys/module.h>
+#include <rtems/freebsd/sys/socket.h>
+#include <rtems/freebsd/sys/sysctl.h>
+
+#include <rtems/freebsd/net/if.h>
+#include <rtems/freebsd/net/if_media.h>
+
+#ifdef INET
+#include <rtems/freebsd/netinet/in.h>
+#include <rtems/freebsd/netinet/if_ether.h>
+#endif
+
+#include <rtems/freebsd/net80211/ieee80211_var.h>
+#include <rtems/freebsd/net80211/ieee80211_amrr.h>
+#include <rtems/freebsd/net80211/ieee80211_ratectl.h>
+
+#define is_success(amn) \
+ ((amn)->amn_retrycnt < (amn)->amn_txcnt / 10)
+#define is_failure(amn) \
+ ((amn)->amn_retrycnt > (amn)->amn_txcnt / 3)
+#define is_enough(amn) \
+ ((amn)->amn_txcnt > 10)
+
+static void amrr_setinterval(const struct ieee80211vap *, int);
+static void amrr_init(struct ieee80211vap *);
+static void amrr_deinit(struct ieee80211vap *);
+static void amrr_node_init(struct ieee80211_node *);
+static void amrr_node_deinit(struct ieee80211_node *);
+static int amrr_update(struct ieee80211_amrr *,
+ struct ieee80211_amrr_node *, struct ieee80211_node *);
+static int amrr_rate(struct ieee80211_node *, void *, uint32_t);
+static void amrr_tx_complete(const struct ieee80211vap *,
+ const struct ieee80211_node *, int,
+ void *, void *);
+static void amrr_tx_update(const struct ieee80211vap *vap,
+ const struct ieee80211_node *, void *, void *, void *);
+static void amrr_sysctlattach(struct ieee80211vap *,
+ struct sysctl_ctx_list *, struct sysctl_oid *);
+
+/* number of references from net80211 layer */
+static int nrefs = 0;
+
+static const struct ieee80211_ratectl amrr = {
+ .ir_name = "amrr",
+ .ir_attach = NULL,
+ .ir_detach = NULL,
+ .ir_init = amrr_init,
+ .ir_deinit = amrr_deinit,
+ .ir_node_init = amrr_node_init,
+ .ir_node_deinit = amrr_node_deinit,
+ .ir_rate = amrr_rate,
+ .ir_tx_complete = amrr_tx_complete,
+ .ir_tx_update = amrr_tx_update,
+ .ir_setinterval = amrr_setinterval,
+};
+IEEE80211_RATECTL_MODULE(amrr, 1);
+IEEE80211_RATECTL_ALG(amrr, IEEE80211_RATECTL_AMRR, amrr);
+
+static void
+amrr_setinterval(const struct ieee80211vap *vap, int msecs)
+{
+ struct ieee80211_amrr *amrr = vap->iv_rs;
+ int t;
+
+ if (msecs < 100)
+ msecs = 100;
+ t = msecs_to_ticks(msecs);
+ amrr->amrr_interval = (t < 1) ? 1 : t;
+}
+
+static void
+amrr_init(struct ieee80211vap *vap)
+{
+ struct ieee80211_amrr *amrr;
+
+ KASSERT(vap->iv_rs == NULL, ("%s called multiple times", __func__));
+
+ amrr = vap->iv_rs = malloc(sizeof(struct ieee80211_amrr),
+ M_80211_RATECTL, M_NOWAIT|M_ZERO);
+ if (amrr == NULL) {
+ if_printf(vap->iv_ifp, "couldn't alloc ratectl structure\n");
+ return;
+ }
+ amrr->amrr_min_success_threshold = IEEE80211_AMRR_MIN_SUCCESS_THRESHOLD;
+ amrr->amrr_max_success_threshold = IEEE80211_AMRR_MAX_SUCCESS_THRESHOLD;
+ amrr_setinterval(vap, 500 /* ms */);
+ amrr_sysctlattach(vap, vap->iv_sysctl, vap->iv_oid);
+}
+
+static void
+amrr_deinit(struct ieee80211vap *vap)
+{
+ free(vap->iv_rs, M_80211_RATECTL);
+}
+
+static void
+amrr_node_init(struct ieee80211_node *ni)
+{
+ const struct ieee80211_rateset *rs = &ni->ni_rates;
+ struct ieee80211vap *vap = ni->ni_vap;
+ struct ieee80211_amrr *amrr = vap->iv_rs;
+ struct ieee80211_amrr_node *amn;
+
+ if (ni->ni_rctls == NULL) {
+ ni->ni_rctls = amn = malloc(sizeof(struct ieee80211_amrr_node),
+ M_80211_RATECTL, M_NOWAIT|M_ZERO);
+ if (amn == NULL) {
+ if_printf(vap->iv_ifp, "couldn't alloc per-node ratectl "
+ "structure\n");
+ return;
+ }
+ } else
+ amn = ni->ni_rctls;
+ amn->amn_amrr = amrr;
+ amn->amn_success = 0;
+ amn->amn_recovery = 0;
+ amn->amn_txcnt = amn->amn_retrycnt = 0;
+ amn->amn_success_threshold = amrr->amrr_min_success_threshold;
+
+ /* pick initial rate */
+ for (amn->amn_rix = rs->rs_nrates - 1;
+ amn->amn_rix > 0 && (rs->rs_rates[amn->amn_rix] & IEEE80211_RATE_VAL) > 72;
+ amn->amn_rix--)
+ ;
+ ni->ni_txrate = rs->rs_rates[amn->amn_rix] & IEEE80211_RATE_VAL;
+ amn->amn_ticks = ticks;
+
+ IEEE80211_NOTE(ni->ni_vap, IEEE80211_MSG_RATECTL, ni,
+ "AMRR initial rate %d", ni->ni_txrate);
+}
+
+static void
+amrr_node_deinit(struct ieee80211_node *ni)
+{
+ free(ni->ni_rctls, M_80211_RATECTL);
+}
+
+static int
+amrr_update(struct ieee80211_amrr *amrr, struct ieee80211_amrr_node *amn,
+ struct ieee80211_node *ni)
+{
+ int rix = amn->amn_rix;
+
+ KASSERT(is_enough(amn), ("txcnt %d", amn->amn_txcnt));
+
+ if (is_success(amn)) {
+ amn->amn_success++;
+ if (amn->amn_success >= amn->amn_success_threshold &&
+ rix + 1 < ni->ni_rates.rs_nrates) {
+ amn->amn_recovery = 1;
+ amn->amn_success = 0;
+ rix++;
+ IEEE80211_NOTE(ni->ni_vap, IEEE80211_MSG_RATECTL, ni,
+ "AMRR increasing rate %d (txcnt=%d retrycnt=%d)",
+ ni->ni_rates.rs_rates[rix] & IEEE80211_RATE_VAL,
+ amn->amn_txcnt, amn->amn_retrycnt);
+ } else {
+ amn->amn_recovery = 0;
+ }
+ } else if (is_failure(amn)) {
+ amn->amn_success = 0;
+ if (rix > 0) {
+ if (amn->amn_recovery) {
+ amn->amn_success_threshold *= 2;
+ if (amn->amn_success_threshold >
+ amrr->amrr_max_success_threshold)
+ amn->amn_success_threshold =
+ amrr->amrr_max_success_threshold;
+ } else {
+ amn->amn_success_threshold =
+ amrr->amrr_min_success_threshold;
+ }
+ rix--;
+ IEEE80211_NOTE(ni->ni_vap, IEEE80211_MSG_RATECTL, ni,
+ "AMRR decreasing rate %d (txcnt=%d retrycnt=%d)",
+ ni->ni_rates.rs_rates[rix] & IEEE80211_RATE_VAL,
+ amn->amn_txcnt, amn->amn_retrycnt);
+ }
+ amn->amn_recovery = 0;
+ }
+
+ /* reset counters */
+ amn->amn_txcnt = 0;
+ amn->amn_retrycnt = 0;
+
+ return rix;
+}
+
+/*
+ * Return the rate index to use in sending a data frame.
+ * Update our internal state if it's been long enough.
+ * If the rate changes we also update ni_txrate to match.
+ */
+static int
+amrr_rate(struct ieee80211_node *ni, void *arg __unused, uint32_t iarg __unused)
+{
+ struct ieee80211_amrr_node *amn = ni->ni_rctls;
+ struct ieee80211_amrr *amrr = amn->amn_amrr;
+ int rix;
+
+ if (is_enough(amn) && (ticks - amn->amn_ticks) > amrr->amrr_interval) {
+ rix = amrr_update(amrr, amn, ni);
+ if (rix != amn->amn_rix) {
+ /* update public rate */
+ ni->ni_txrate =
+ ni->ni_rates.rs_rates[rix] & IEEE80211_RATE_VAL;
+ amn->amn_rix = rix;
+ }
+ amn->amn_ticks = ticks;
+ } else
+ rix = amn->amn_rix;
+ return rix;
+}
+
+/*
+ * Update statistics with tx complete status. Ok is non-zero
+ * if the packet is known to be ACK'd. Retries has the number
+ * retransmissions (i.e. xmit attempts - 1).
+ */
+static void
+amrr_tx_complete(const struct ieee80211vap *vap,
+ const struct ieee80211_node *ni, int ok,
+ void *arg1, void *arg2 __unused)
+{
+ struct ieee80211_amrr_node *amn = ni->ni_rctls;
+ int retries = *(int *)arg1;
+
+ amn->amn_txcnt++;
+ if (ok)
+ amn->amn_success++;
+ amn->amn_retrycnt += retries;
+}
+
+/*
+ * Set tx count/retry statistics explicitly. Intended for
+ * drivers that poll the device for statistics maintained
+ * in the device.
+ */
+static void
+amrr_tx_update(const struct ieee80211vap *vap, const struct ieee80211_node *ni,
+ void *arg1, void *arg2, void *arg3)
+{
+ struct ieee80211_amrr_node *amn = ni->ni_rctls;
+ int txcnt = *(int *)arg1, success = *(int *)arg2, retrycnt = *(int *)arg3;
+
+ amn->amn_txcnt = txcnt;
+ amn->amn_success = success;
+ amn->amn_retrycnt = retrycnt;
+}
+
+static int
+amrr_sysctl_interval(SYSCTL_HANDLER_ARGS)
+{
+ struct ieee80211vap *vap = arg1;
+ struct ieee80211_amrr *amrr = vap->iv_rs;
+ int msecs = ticks_to_msecs(amrr->amrr_interval);
+ int error;
+
+ error = sysctl_handle_int(oidp, &msecs, 0, req);
+ if (error || !req->newptr)
+ return error;
+ amrr_setinterval(vap, msecs);
+ return 0;
+}
+
+static void
+amrr_sysctlattach(struct ieee80211vap *vap,
+ struct sysctl_ctx_list *ctx, struct sysctl_oid *tree)
+{
+ struct ieee80211_amrr *amrr = vap->iv_rs;
+
+ SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
+ "amrr_rate_interval", CTLTYPE_INT | CTLFLAG_RW, vap,
+ 0, amrr_sysctl_interval, "I", "amrr operation interval (ms)");
+ /* XXX bounds check values */
+ SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
+ "amrr_max_sucess_threshold", CTLFLAG_RW,
+ &amrr->amrr_max_success_threshold, 0, "");
+ SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
+ "amrr_min_sucess_threshold", CTLFLAG_RW,
+ &amrr->amrr_min_success_threshold, 0, "");
+}
diff --git a/rtems/freebsd/net80211/ieee80211_amrr.h b/rtems/freebsd/net80211/ieee80211_amrr.h
new file mode 100644
index 00000000..b425e268
--- /dev/null
+++ b/rtems/freebsd/net80211/ieee80211_amrr.h
@@ -0,0 +1,61 @@
+/* $FreeBSD$ */
+/* $OpenBSD: ieee80211_amrr.h,v 1.3 2006/06/17 19:34:31 damien Exp $ */
+
+/*-
+ * Copyright (c) 2006
+ * Damien Bergamini <damien.bergamini@free.fr>
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+#ifndef _NET80211_IEEE80211_AMRR_HH_
+#define _NET80211_IEEE80211_AMRR_HH_
+
+/*-
+ * Naive implementation of the Adaptive Multi Rate Retry algorithm:
+ *
+ * "IEEE 802.11 Rate Adaptation: A Practical Approach"
+ * Mathieu Lacage, Hossein Manshaei, Thierry Turletti
+ * INRIA Sophia - Projet Planete
+ * http://www-sop.inria.fr/rapports/sophia/RR-5208.html
+ */
+
+/*
+ * Rate control settings.
+ */
+struct ieee80211vap;
+
+struct ieee80211_amrr {
+ u_int amrr_min_success_threshold;
+ u_int amrr_max_success_threshold;
+ int amrr_interval; /* update interval (ticks) */
+};
+
+#define IEEE80211_AMRR_MIN_SUCCESS_THRESHOLD 1
+#define IEEE80211_AMRR_MAX_SUCCESS_THRESHOLD 15
+
+/*
+ * Rate control state for a given node.
+ */
+struct ieee80211_amrr_node {
+ struct ieee80211_amrr *amn_amrr;/* backpointer */
+ int amn_rix; /* current rate index */
+ int amn_ticks; /* time of last update */
+ /* statistics */
+ u_int amn_txcnt;
+ u_int amn_success;
+ u_int amn_success_threshold;
+ u_int amn_recovery;
+ u_int amn_retrycnt;
+};
+
+#endif /* _NET80211_IEEE80211_AMRR_HH_ */
diff --git a/rtems/freebsd/net80211/ieee80211_crypto.c b/rtems/freebsd/net80211/ieee80211_crypto.c
new file mode 100644
index 00000000..3b4e8fb9
--- /dev/null
+++ b/rtems/freebsd/net80211/ieee80211_crypto.c
@@ -0,0 +1,663 @@
+#include <rtems/freebsd/machine/rtems-bsd-config.h>
+
+/*-
+ * Copyright (c) 2001 Atsushi Onoe
+ * Copyright (c) 2002-2008 Sam Leffler, Errno Consulting
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <rtems/freebsd/sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+/*
+ * IEEE 802.11 generic crypto support.
+ */
+#include <rtems/freebsd/local/opt_wlan.h>
+
+#include <rtems/freebsd/sys/param.h>
+#include <rtems/freebsd/sys/kernel.h>
+#include <rtems/freebsd/sys/malloc.h>
+#include <rtems/freebsd/sys/mbuf.h>
+
+#include <rtems/freebsd/sys/socket.h>
+
+#include <rtems/freebsd/net/if.h>
+#include <rtems/freebsd/net/if_media.h>
+#include <rtems/freebsd/net/ethernet.h> /* XXX ETHER_HDR_LEN */
+
+#include <rtems/freebsd/net80211/ieee80211_var.h>
+
+MALLOC_DEFINE(M_80211_CRYPTO, "80211crypto", "802.11 crypto state");
+
+static int _ieee80211_crypto_delkey(struct ieee80211vap *,
+ struct ieee80211_key *);
+
+/*
+ * Table of registered cipher modules.
+ */
+static const struct ieee80211_cipher *ciphers[IEEE80211_CIPHER_MAX];
+
+/*
+ * Default "null" key management routines.
+ */
+static int
+null_key_alloc(struct ieee80211vap *vap, struct ieee80211_key *k,
+ ieee80211_keyix *keyix, ieee80211_keyix *rxkeyix)
+{
+ if (!(&vap->iv_nw_keys[0] <= k &&
+ k < &vap->iv_nw_keys[IEEE80211_WEP_NKID])) {
+ /*
+ * Not in the global key table, the driver should handle this
+ * by allocating a slot in the h/w key table/cache. In
+ * lieu of that return key slot 0 for any unicast key
+ * request. We disallow the request if this is a group key.
+ * This default policy does the right thing for legacy hardware
+ * with a 4 key table. It also handles devices that pass
+ * packets through untouched when marked with the WEP bit
+ * and key index 0.
+ */
+ if (k->wk_flags & IEEE80211_KEY_GROUP)
+ return 0;
+ *keyix = 0; /* NB: use key index 0 for ucast key */
+ } else {
+ *keyix = k - vap->iv_nw_keys;
+ }
+ *rxkeyix = IEEE80211_KEYIX_NONE; /* XXX maybe *keyix? */
+ return 1;
+}
+static int
+null_key_delete(struct ieee80211vap *vap, const struct ieee80211_key *k)
+{
+ return 1;
+}
+static int
+null_key_set(struct ieee80211vap *vap, const struct ieee80211_key *k,
+ const uint8_t mac[IEEE80211_ADDR_LEN])
+{
+ return 1;
+}
+static void null_key_update(struct ieee80211vap *vap) {}
+
+/*
+ * Write-arounds for common operations.
+ */
+static __inline void
+cipher_detach(struct ieee80211_key *key)
+{
+ key->wk_cipher->ic_detach(key);
+}
+
+static __inline void *
+cipher_attach(struct ieee80211vap *vap, struct ieee80211_key *key)
+{
+ return key->wk_cipher->ic_attach(vap, key);
+}
+
+/*
+ * Wrappers for driver key management methods.
+ */
+static __inline int
+dev_key_alloc(struct ieee80211vap *vap,
+ struct ieee80211_key *key,
+ ieee80211_keyix *keyix, ieee80211_keyix *rxkeyix)
+{
+ return vap->iv_key_alloc(vap, key, keyix, rxkeyix);
+}
+
+static __inline int
+dev_key_delete(struct ieee80211vap *vap,
+ const struct ieee80211_key *key)
+{
+ return vap->iv_key_delete(vap, key);
+}
+
+static __inline int
+dev_key_set(struct ieee80211vap *vap, const struct ieee80211_key *key)
+{
+ return vap->iv_key_set(vap, key, key->wk_macaddr);
+}
+
+/*
+ * Setup crypto support for a device/shared instance.
+ */
+void
+ieee80211_crypto_attach(struct ieee80211com *ic)
+{
+ /* NB: we assume everything is pre-zero'd */
+ ciphers[IEEE80211_CIPHER_NONE] = &ieee80211_cipher_none;
+}
+
+/*
+ * Teardown crypto support.
+ */
+void
+ieee80211_crypto_detach(struct ieee80211com *ic)
+{
+}
+
+/*
+ * Setup crypto support for a vap.
+ */
+void
+ieee80211_crypto_vattach(struct ieee80211vap *vap)
+{
+ int i;
+
+ /* NB: we assume everything is pre-zero'd */
+ vap->iv_max_keyix = IEEE80211_WEP_NKID;
+ vap->iv_def_txkey = IEEE80211_KEYIX_NONE;
+ for (i = 0; i < IEEE80211_WEP_NKID; i++)
+ ieee80211_crypto_resetkey(vap, &vap->iv_nw_keys[i],
+ IEEE80211_KEYIX_NONE);
+ /*
+ * Initialize the driver key support routines to noop entries.
+ * This is useful especially for the cipher test modules.
+ */
+ vap->iv_key_alloc = null_key_alloc;
+ vap->iv_key_set = null_key_set;
+ vap->iv_key_delete = null_key_delete;
+ vap->iv_key_update_begin = null_key_update;
+ vap->iv_key_update_end = null_key_update;
+}
+
+/*
+ * Teardown crypto support for a vap.
+ */
+void
+ieee80211_crypto_vdetach(struct ieee80211vap *vap)
+{
+ ieee80211_crypto_delglobalkeys(vap);
+}
+
+/*
+ * Register a crypto cipher module.
+ */
+void
+ieee80211_crypto_register(const struct ieee80211_cipher *cip)
+{
+ if (cip->ic_cipher >= IEEE80211_CIPHER_MAX) {
+ printf("%s: cipher %s has an invalid cipher index %u\n",
+ __func__, cip->ic_name, cip->ic_cipher);
+ return;
+ }
+ if (ciphers[cip->ic_cipher] != NULL && ciphers[cip->ic_cipher] != cip) {
+ printf("%s: cipher %s registered with a different template\n",
+ __func__, cip->ic_name);
+ return;
+ }
+ ciphers[cip->ic_cipher] = cip;
+}
+
+/*
+ * Unregister a crypto cipher module.
+ */
+void
+ieee80211_crypto_unregister(const struct ieee80211_cipher *cip)
+{
+ if (cip->ic_cipher >= IEEE80211_CIPHER_MAX) {
+ printf("%s: cipher %s has an invalid cipher index %u\n",
+ __func__, cip->ic_name, cip->ic_cipher);
+ return;
+ }
+ if (ciphers[cip->ic_cipher] != NULL && ciphers[cip->ic_cipher] != cip) {
+ printf("%s: cipher %s registered with a different template\n",
+ __func__, cip->ic_name);
+ return;
+ }
+ /* NB: don't complain about not being registered */
+ /* XXX disallow if references */
+ ciphers[cip->ic_cipher] = NULL;
+}
+
+int
+ieee80211_crypto_available(u_int cipher)
+{
+ return cipher < IEEE80211_CIPHER_MAX && ciphers[cipher] != NULL;
+}
+
+/* XXX well-known names! */
+static const char *cipher_modnames[IEEE80211_CIPHER_MAX] = {
+ [IEEE80211_CIPHER_WEP] = "wlan_wep",
+ [IEEE80211_CIPHER_TKIP] = "wlan_tkip",
+ [IEEE80211_CIPHER_AES_OCB] = "wlan_aes_ocb",
+ [IEEE80211_CIPHER_AES_CCM] = "wlan_ccmp",
+ [IEEE80211_CIPHER_TKIPMIC] = "#4", /* NB: reserved */
+ [IEEE80211_CIPHER_CKIP] = "wlan_ckip",
+ [IEEE80211_CIPHER_NONE] = "wlan_none",
+};
+
+/* NB: there must be no overlap between user-supplied and device-owned flags */
+CTASSERT((IEEE80211_KEY_COMMON & IEEE80211_KEY_DEVICE) == 0);
+
+/*
+ * Establish a relationship between the specified key and cipher
+ * and, if necessary, allocate a hardware index from the driver.
+ * Note that when a fixed key index is required it must be specified.
+ *
+ * This must be the first call applied to a key; all the other key
+ * routines assume wk_cipher is setup.
+ *
+ * Locking must be handled by the caller using:
+ * ieee80211_key_update_begin(vap);
+ * ieee80211_key_update_end(vap);
+ */
+int
+ieee80211_crypto_newkey(struct ieee80211vap *vap,
+ int cipher, int flags, struct ieee80211_key *key)
+{
+ struct ieee80211com *ic = vap->iv_ic;
+ const struct ieee80211_cipher *cip;
+ ieee80211_keyix keyix, rxkeyix;
+ void *keyctx;
+ int oflags;
+
+ IEEE80211_DPRINTF(vap, IEEE80211_MSG_CRYPTO,
+ "%s: cipher %u flags 0x%x keyix %u\n",
+ __func__, cipher, flags, key->wk_keyix);
+
+ /*
+ * Validate cipher and set reference to cipher routines.
+ */
+ if (cipher >= IEEE80211_CIPHER_MAX) {
+ IEEE80211_DPRINTF(vap, IEEE80211_MSG_CRYPTO,
+ "%s: invalid cipher %u\n", __func__, cipher);
+ vap->iv_stats.is_crypto_badcipher++;
+ return 0;
+ }
+ cip = ciphers[cipher];
+ if (cip == NULL) {
+ /*
+ * Auto-load cipher module if we have a well-known name
+ * for it. It might be better to use string names rather
+ * than numbers and craft a module name based on the cipher
+ * name; e.g. wlan_cipher_<cipher-name>.
+ */
+ IEEE80211_DPRINTF(vap, IEEE80211_MSG_CRYPTO,
+ "%s: unregistered cipher %u, load module %s\n",
+ __func__, cipher, cipher_modnames[cipher]);
+ ieee80211_load_module(cipher_modnames[cipher]);
+ /*
+ * If cipher module loaded it should immediately
+ * call ieee80211_crypto_register which will fill
+ * in the entry in the ciphers array.
+ */
+ cip = ciphers[cipher];
+ if (cip == NULL) {
+ IEEE80211_DPRINTF(vap, IEEE80211_MSG_CRYPTO,
+ "%s: unable to load cipher %u, module %s\n",
+ __func__, cipher, cipher_modnames[cipher]);
+ vap->iv_stats.is_crypto_nocipher++;
+ return 0;
+ }
+ }
+
+ oflags = key->wk_flags;
+ flags &= IEEE80211_KEY_COMMON;
+ /* NB: preserve device attributes */
+ flags |= (oflags & IEEE80211_KEY_DEVICE);
+ /*
+ * If the hardware does not support the cipher then
+ * fallback to a host-based implementation.
+ */
+ if ((ic->ic_cryptocaps & (1<<cipher)) == 0) {
+ IEEE80211_DPRINTF(vap, IEEE80211_MSG_CRYPTO,
+ "%s: no h/w support for cipher %s, falling back to s/w\n",
+ __func__, cip->ic_name);
+ flags |= IEEE80211_KEY_SWCRYPT;
+ }
+ /*
+ * Hardware TKIP with software MIC is an important
+ * combination; we handle it by flagging each key,
+ * the cipher modules honor it.
+ */
+ if (cipher == IEEE80211_CIPHER_TKIP &&
+ (ic->ic_cryptocaps & IEEE80211_CRYPTO_TKIPMIC) == 0) {
+ IEEE80211_DPRINTF(vap, IEEE80211_MSG_CRYPTO,
+ "%s: no h/w support for TKIP MIC, falling back to s/w\n",
+ __func__);
+ flags |= IEEE80211_KEY_SWMIC;
+ }
+
+ /*
+ * Bind cipher to key instance. Note we do this
+ * after checking the device capabilities so the
+ * cipher module can optimize space usage based on
+ * whether or not it needs to do the cipher work.
+ */
+ if (key->wk_cipher != cip || key->wk_flags != flags) {
+ /*
+ * Fillin the flags so cipher modules can see s/w
+ * crypto requirements and potentially allocate
+ * different state and/or attach different method
+ * pointers.
+ */
+ key->wk_flags = flags;
+ keyctx = cip->ic_attach(vap, key);
+ if (keyctx == NULL) {
+ IEEE80211_DPRINTF(vap, IEEE80211_MSG_CRYPTO,
+ "%s: unable to attach cipher %s\n",
+ __func__, cip->ic_name);
+ key->wk_flags = oflags; /* restore old flags */
+ vap->iv_stats.is_crypto_attachfail++;
+ return 0;
+ }
+ cipher_detach(key);
+ key->wk_cipher = cip; /* XXX refcnt? */
+ key->wk_private = keyctx;
+ }
+
+ /*
+ * Ask the driver for a key index if we don't have one.
+ * Note that entries in the global key table always have
+ * an index; this means it's safe to call this routine
+ * for these entries just to setup the reference to the
+ * cipher template. Note also that when using software
+ * crypto we also call the driver to give us a key index.
+ */
+ if ((key->wk_flags & IEEE80211_KEY_DEVKEY) == 0) {
+ if (!dev_key_alloc(vap, key, &keyix, &rxkeyix)) {
+ /*
+ * Unable to setup driver state.
+ */
+ vap->iv_stats.is_crypto_keyfail++;
+ IEEE80211_DPRINTF(vap, IEEE80211_MSG_CRYPTO,
+ "%s: unable to setup cipher %s\n",
+ __func__, cip->ic_name);
+ return 0;
+ }
+ if (key->wk_flags != flags) {
+ /*
+ * Driver overrode flags we setup; typically because
+ * resources were unavailable to handle _this_ key.
+ * Re-attach the cipher context to allow cipher
+ * modules to handle differing requirements.
+ */
+ IEEE80211_DPRINTF(vap, IEEE80211_MSG_CRYPTO,
+ "%s: driver override for cipher %s, flags "
+ "0x%x -> 0x%x\n", __func__, cip->ic_name,
+ oflags, key->wk_flags);
+ keyctx = cip->ic_attach(vap, key);
+ if (keyctx == NULL) {
+ IEEE80211_DPRINTF(vap, IEEE80211_MSG_CRYPTO,
+ "%s: unable to attach cipher %s with "
+ "flags 0x%x\n", __func__, cip->ic_name,
+ key->wk_flags);
+ key->wk_flags = oflags; /* restore old flags */
+ vap->iv_stats.is_crypto_attachfail++;
+ return 0;
+ }
+ cipher_detach(key);
+ key->wk_cipher = cip; /* XXX refcnt? */
+ key->wk_private = keyctx;
+ }
+ key->wk_keyix = keyix;
+ key->wk_rxkeyix = rxkeyix;
+ key->wk_flags |= IEEE80211_KEY_DEVKEY;
+ }
+ return 1;
+}
+
+/*
+ * Remove the key (no locking, for internal use).
+ */
+static int
+_ieee80211_crypto_delkey(struct ieee80211vap *vap, struct ieee80211_key *key)
+{
+ KASSERT(key->wk_cipher != NULL, ("No cipher!"));
+
+ IEEE80211_DPRINTF(vap, IEEE80211_MSG_CRYPTO,
+ "%s: %s keyix %u flags 0x%x rsc %ju tsc %ju len %u\n",
+ __func__, key->wk_cipher->ic_name,
+ key->wk_keyix, key->wk_flags,
+ key->wk_keyrsc[IEEE80211_NONQOS_TID], key->wk_keytsc,
+ key->wk_keylen);
+
+ if (key->wk_flags & IEEE80211_KEY_DEVKEY) {
+ /*
+ * Remove hardware entry.
+ */
+ /* XXX key cache */
+ if (!dev_key_delete(vap, key)) {
+ IEEE80211_DPRINTF(vap, IEEE80211_MSG_CRYPTO,
+ "%s: driver did not delete key index %u\n",
+ __func__, key->wk_keyix);
+ vap->iv_stats.is_crypto_delkey++;
+ /* XXX recovery? */
+ }
+ }
+ cipher_detach(key);
+ memset(key, 0, sizeof(*key));
+ ieee80211_crypto_resetkey(vap, key, IEEE80211_KEYIX_NONE);
+ return 1;
+}
+
+/*
+ * Remove the specified key.
+ */
+int
+ieee80211_crypto_delkey(struct ieee80211vap *vap, struct ieee80211_key *key)
+{
+ int status;
+
+ ieee80211_key_update_begin(vap);
+ status = _ieee80211_crypto_delkey(vap, key);
+ ieee80211_key_update_end(vap);
+ return status;
+}
+
+/*
+ * Clear the global key table.
+ */
+void
+ieee80211_crypto_delglobalkeys(struct ieee80211vap *vap)
+{
+ int i;
+
+ ieee80211_key_update_begin(vap);
+ for (i = 0; i < IEEE80211_WEP_NKID; i++)
+ (void) _ieee80211_crypto_delkey(vap, &vap->iv_nw_keys[i]);
+ ieee80211_key_update_end(vap);
+}
+
+/*
+ * Set the contents of the specified key.
+ *
+ * Locking must be handled by the caller using:
+ * ieee80211_key_update_begin(vap);
+ * ieee80211_key_update_end(vap);
+ */
+int
+ieee80211_crypto_setkey(struct ieee80211vap *vap, struct ieee80211_key *key)
+{
+ const struct ieee80211_cipher *cip = key->wk_cipher;
+
+ KASSERT(cip != NULL, ("No cipher!"));
+
+ IEEE80211_DPRINTF(vap, IEEE80211_MSG_CRYPTO,
+ "%s: %s keyix %u flags 0x%x mac %s rsc %ju tsc %ju len %u\n",
+ __func__, cip->ic_name, key->wk_keyix,
+ key->wk_flags, ether_sprintf(key->wk_macaddr),
+ key->wk_keyrsc[IEEE80211_NONQOS_TID], key->wk_keytsc,
+ key->wk_keylen);
+
+ if ((key->wk_flags & IEEE80211_KEY_DEVKEY) == 0) {
+ /* XXX nothing allocated, should not happen */
+ IEEE80211_DPRINTF(vap, IEEE80211_MSG_CRYPTO,
+ "%s: no device key setup done; should not happen!\n",
+ __func__);
+ vap->iv_stats.is_crypto_setkey_nokey++;
+ return 0;
+ }
+ /*
+ * Give cipher a chance to validate key contents.
+ * XXX should happen before modifying state.
+ */
+ if (!cip->ic_setkey(key)) {
+ IEEE80211_DPRINTF(vap, IEEE80211_MSG_CRYPTO,
+ "%s: cipher %s rejected key index %u len %u flags 0x%x\n",
+ __func__, cip->ic_name, key->wk_keyix,
+ key->wk_keylen, key->wk_flags);
+ vap->iv_stats.is_crypto_setkey_cipher++;
+ return 0;
+ }
+ return dev_key_set(vap, key);
+}
+
+/*
+ * Add privacy headers appropriate for the specified key.
+ */
+struct ieee80211_key *
+ieee80211_crypto_encap(struct ieee80211_node *ni, struct mbuf *m)
+{
+ struct ieee80211vap *vap = ni->ni_vap;
+ struct ieee80211_key *k;
+ struct ieee80211_frame *wh;
+ const struct ieee80211_cipher *cip;
+ uint8_t keyid;
+
+ /*
+ * Multicast traffic always uses the multicast key.
+ * Otherwise if a unicast key is set we use that and
+ * it is always key index 0. When no unicast key is
+ * set we fall back to the default transmit key.
+ */
+ wh = mtod(m, struct ieee80211_frame *);
+ if (IEEE80211_IS_MULTICAST(wh->i_addr1) ||
+ IEEE80211_KEY_UNDEFINED(&ni->ni_ucastkey)) {
+ if (vap->iv_def_txkey == IEEE80211_KEYIX_NONE) {
+ IEEE80211_NOTE_MAC(vap, IEEE80211_MSG_CRYPTO,
+ wh->i_addr1,
+ "no default transmit key (%s) deftxkey %u",
+ __func__, vap->iv_def_txkey);
+ vap->iv_stats.is_tx_nodefkey++;
+ return NULL;
+ }
+ keyid = vap->iv_def_txkey;
+ k = &vap->iv_nw_keys[vap->iv_def_txkey];
+ } else {
+ keyid = 0;
+ k = &ni->ni_ucastkey;
+ }
+ cip = k->wk_cipher;
+ return (cip->ic_encap(k, m, keyid<<6) ? k : NULL);
+}
+
+/*
+ * Validate and strip privacy headers (and trailer) for a
+ * received frame that has the WEP/Privacy bit set.
+ */
+struct ieee80211_key *
+ieee80211_crypto_decap(struct ieee80211_node *ni, struct mbuf *m, int hdrlen)
+{
+#define IEEE80211_WEP_HDRLEN (IEEE80211_WEP_IVLEN + IEEE80211_WEP_KIDLEN)
+#define IEEE80211_WEP_MINLEN \
+ (sizeof(struct ieee80211_frame) + \
+ IEEE80211_WEP_HDRLEN + IEEE80211_WEP_CRCLEN)
+ struct ieee80211vap *vap = ni->ni_vap;
+ struct ieee80211_key *k;
+ struct ieee80211_frame *wh;
+ const struct ieee80211_cipher *cip;
+ uint8_t keyid;
+
+ /* NB: this minimum size data frame could be bigger */
+ if (m->m_pkthdr.len < IEEE80211_WEP_MINLEN) {
+ IEEE80211_DPRINTF(vap, IEEE80211_MSG_ANY,
+ "%s: WEP data frame too short, len %u\n",
+ __func__, m->m_pkthdr.len);
+ vap->iv_stats.is_rx_tooshort++; /* XXX need unique stat? */
+ return NULL;
+ }
+
+ /*
+ * Locate the key. If unicast and there is no unicast
+ * key then we fall back to the key id in the header.
+ * This assumes unicast keys are only configured when
+ * the key id in the header is meaningless (typically 0).
+ */
+ wh = mtod(m, struct ieee80211_frame *);
+ m_copydata(m, hdrlen + IEEE80211_WEP_IVLEN, sizeof(keyid), &keyid);
+ if (IEEE80211_IS_MULTICAST(wh->i_addr1) ||
+ IEEE80211_KEY_UNDEFINED(&ni->ni_ucastkey))
+ k = &vap->iv_nw_keys[keyid >> 6];
+ else
+ k = &ni->ni_ucastkey;
+
+ /*
+ * Insure crypto header is contiguous for all decap work.
+ */
+ cip = k->wk_cipher;
+ if (m->m_len < hdrlen + cip->ic_header &&
+ (m = m_pullup(m, hdrlen + cip->ic_header)) == NULL) {
+ IEEE80211_NOTE_MAC(vap, IEEE80211_MSG_CRYPTO, wh->i_addr2,
+ "unable to pullup %s header", cip->ic_name);
+ vap->iv_stats.is_rx_wepfail++; /* XXX */
+ return NULL;
+ }
+
+ return (cip->ic_decap(k, m, hdrlen) ? k : NULL);
+#undef IEEE80211_WEP_MINLEN
+#undef IEEE80211_WEP_HDRLEN
+}
+
+static void
+load_ucastkey(void *arg, struct ieee80211_node *ni)
+{
+ struct ieee80211vap *vap = ni->ni_vap;
+ struct ieee80211_key *k;
+
+ if (vap->iv_state != IEEE80211_S_RUN)
+ return;
+ k = &ni->ni_ucastkey;
+ if (k->wk_flags & IEEE80211_KEY_DEVKEY)
+ dev_key_set(vap, k);
+}
+
+/*
+ * Re-load all keys known to the 802.11 layer that may
+ * have hardware state backing them. This is used by
+ * drivers on resume to push keys down into the device.
+ */
+void
+ieee80211_crypto_reload_keys(struct ieee80211com *ic)
+{
+ struct ieee80211vap *vap;
+ int i;
+
+ /*
+ * Keys in the global key table of each vap.
+ */
+ /* NB: used only during resume so don't lock for now */
+ TAILQ_FOREACH(vap, &ic->ic_vaps, iv_next) {
+ if (vap->iv_state != IEEE80211_S_RUN)
+ continue;
+ for (i = 0; i < IEEE80211_WEP_NKID; i++) {
+ const struct ieee80211_key *k = &vap->iv_nw_keys[i];
+ if (k->wk_flags & IEEE80211_KEY_DEVKEY)
+ dev_key_set(vap, k);
+ }
+ }
+ /*
+ * Unicast keys.
+ */
+ ieee80211_iterate_nodes(&ic->ic_sta, load_ucastkey, NULL);
+}
diff --git a/rtems/freebsd/net80211/ieee80211_crypto.h b/rtems/freebsd/net80211/ieee80211_crypto.h
new file mode 100644
index 00000000..b9e8e25b
--- /dev/null
+++ b/rtems/freebsd/net80211/ieee80211_crypto.h
@@ -0,0 +1,245 @@
+/*-
+ * Copyright (c) 2001 Atsushi Onoe
+ * Copyright (c) 2002-2008 Sam Leffler, Errno Consulting
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+#ifndef _NET80211_IEEE80211_CRYPTO_HH_
+#define _NET80211_IEEE80211_CRYPTO_HH_
+
+/*
+ * 802.11 protocol crypto-related definitions.
+ */
+#define IEEE80211_KEYBUF_SIZE 16
+#define IEEE80211_MICBUF_SIZE (8+8) /* space for both tx+rx keys */
+
+/*
+ * Old WEP-style key. Deprecated.
+ */
+struct ieee80211_wepkey {
+ u_int wk_len; /* key length in bytes */
+ uint8_t wk_key[IEEE80211_KEYBUF_SIZE];
+};
+
+struct ieee80211_rsnparms {
+ uint8_t rsn_mcastcipher; /* mcast/group cipher */
+ uint8_t rsn_mcastkeylen; /* mcast key length */
+ uint8_t rsn_ucastcipher; /* selected unicast cipher */
+ uint8_t rsn_ucastkeylen; /* unicast key length */
+ uint8_t rsn_keymgmt; /* selected key mgmt algo */
+ uint16_t rsn_caps; /* capabilities */
+};
+
+struct ieee80211_cipher;
+
+/*
+ * Crypto key state. There is sufficient room for all supported
+ * ciphers (see below). The underlying ciphers are handled
+ * separately through loadable cipher modules that register with
+ * the generic crypto support. A key has a reference to an instance
+ * of the cipher; any per-key state is hung off wk_private by the
+ * cipher when it is attached. Ciphers are automatically called
+ * to detach and cleanup any such state when the key is deleted.
+ *
+ * The generic crypto support handles encap/decap of cipher-related
+ * frame contents for both hardware- and software-based implementations.
+ * A key requiring software crypto support is automatically flagged and
+ * the cipher is expected to honor this and do the necessary work.
+ * Ciphers such as TKIP may also support mixed hardware/software
+ * encrypt/decrypt and MIC processing.
+ */
+typedef uint16_t ieee80211_keyix; /* h/w key index */
+
+struct ieee80211_key {
+ uint8_t wk_keylen; /* key length in bytes */
+ uint8_t wk_pad;
+ uint16_t wk_flags;
+#define IEEE80211_KEY_XMIT 0x0001 /* key used for xmit */
+#define IEEE80211_KEY_RECV 0x0002 /* key used for recv */
+#define IEEE80211_KEY_GROUP 0x0004 /* key used for WPA group operation */
+#define IEEE80211_KEY_SWENCRYPT 0x0010 /* host-based encrypt */
+#define IEEE80211_KEY_SWDECRYPT 0x0020 /* host-based decrypt */
+#define IEEE80211_KEY_SWENMIC 0x0040 /* host-based enmic */
+#define IEEE80211_KEY_SWDEMIC 0x0080 /* host-based demic */
+#define IEEE80211_KEY_DEVKEY 0x0100 /* device key request completed */
+#define IEEE80211_KEY_CIPHER0 0x1000 /* cipher-specific action 0 */
+#define IEEE80211_KEY_CIPHER1 0x2000 /* cipher-specific action 1 */
+ ieee80211_keyix wk_keyix; /* h/w key index */
+ ieee80211_keyix wk_rxkeyix; /* optional h/w rx key index */
+ uint8_t wk_key[IEEE80211_KEYBUF_SIZE+IEEE80211_MICBUF_SIZE];
+#define wk_txmic wk_key+IEEE80211_KEYBUF_SIZE+0 /* XXX can't () right */
+#define wk_rxmic wk_key+IEEE80211_KEYBUF_SIZE+8 /* XXX can't () right */
+ /* key receive sequence counter */
+ uint64_t wk_keyrsc[IEEE80211_TID_SIZE];
+ uint64_t wk_keytsc; /* key transmit sequence counter */
+ const struct ieee80211_cipher *wk_cipher;
+ void *wk_private; /* private cipher state */
+ uint8_t wk_macaddr[IEEE80211_ADDR_LEN];
+};
+#define IEEE80211_KEY_COMMON /* common flags passed in by apps */\
+ (IEEE80211_KEY_XMIT | IEEE80211_KEY_RECV | IEEE80211_KEY_GROUP)
+#define IEEE80211_KEY_DEVICE /* flags owned by device driver */\
+ (IEEE80211_KEY_DEVKEY|IEEE80211_KEY_CIPHER0|IEEE80211_KEY_CIPHER1)
+
+#define IEEE80211_KEY_SWCRYPT \
+ (IEEE80211_KEY_SWENCRYPT | IEEE80211_KEY_SWDECRYPT)
+#define IEEE80211_KEY_SWMIC (IEEE80211_KEY_SWENMIC | IEEE80211_KEY_SWDEMIC)
+
+#define IEEE80211_KEY_BITS \
+ "\20\1XMIT\2RECV\3GROUP\4SWENCRYPT\5SWDECRYPT\6SWENMIC\7SWDEMIC" \
+ "\10DEVKEY\11CIPHER0\12CIPHER1"
+
+#define IEEE80211_KEYIX_NONE ((ieee80211_keyix) -1)
+
+/*
+ * NB: these values are ordered carefully; there are lots of
+ * of implications in any reordering. Beware that 4 is used
+ * only to indicate h/w TKIP MIC support in driver capabilities;
+ * there is no separate cipher support (it's rolled into the
+ * TKIP cipher support).
+ */
+#define IEEE80211_CIPHER_WEP 0
+#define IEEE80211_CIPHER_TKIP 1
+#define IEEE80211_CIPHER_AES_OCB 2
+#define IEEE80211_CIPHER_AES_CCM 3
+#define IEEE80211_CIPHER_TKIPMIC 4 /* TKIP MIC capability */
+#define IEEE80211_CIPHER_CKIP 5
+#define IEEE80211_CIPHER_NONE 6 /* pseudo value */
+
+#define IEEE80211_CIPHER_MAX (IEEE80211_CIPHER_NONE+1)
+
+/* capability bits in ic_cryptocaps/iv_cryptocaps */
+#define IEEE80211_CRYPTO_WEP (1<<IEEE80211_CIPHER_WEP)
+#define IEEE80211_CRYPTO_TKIP (1<<IEEE80211_CIPHER_TKIP)
+#define IEEE80211_CRYPTO_AES_OCB (1<<IEEE80211_CIPHER_AES_OCB)
+#define IEEE80211_CRYPTO_AES_CCM (1<<IEEE80211_CIPHER_AES_CCM)
+#define IEEE80211_CRYPTO_TKIPMIC (1<<IEEE80211_CIPHER_TKIPMIC)
+#define IEEE80211_CRYPTO_CKIP (1<<IEEE80211_CIPHER_CKIP)
+
+#define IEEE80211_CRYPTO_BITS \
+ "\20\1WEP\2TKIP\3AES\4AES_CCM\5TKIPMIC\6CKIP"
+
+#if defined(__KERNEL__) || defined(_KERNEL)
+
+struct ieee80211com;
+struct ieee80211vap;
+struct ieee80211_node;
+struct mbuf;
+
+MALLOC_DECLARE(M_80211_CRYPTO);
+
+void ieee80211_crypto_attach(struct ieee80211com *);
+void ieee80211_crypto_detach(struct ieee80211com *);
+void ieee80211_crypto_vattach(struct ieee80211vap *);
+void ieee80211_crypto_vdetach(struct ieee80211vap *);
+int ieee80211_crypto_newkey(struct ieee80211vap *,
+ int cipher, int flags, struct ieee80211_key *);
+int ieee80211_crypto_delkey(struct ieee80211vap *,
+ struct ieee80211_key *);
+int ieee80211_crypto_setkey(struct ieee80211vap *, struct ieee80211_key *);
+void ieee80211_crypto_delglobalkeys(struct ieee80211vap *);
+void ieee80211_crypto_reload_keys(struct ieee80211com *);
+
+/*
+ * Template for a supported cipher. Ciphers register with the
+ * crypto code and are typically loaded as separate modules
+ * (the null cipher is always present).
+ * XXX may need refcnts
+ */
+struct ieee80211_cipher {
+ const char *ic_name; /* printable name */
+ u_int ic_cipher; /* IEEE80211_CIPHER_* */
+ u_int ic_header; /* size of privacy header (bytes) */
+ u_int ic_trailer; /* size of privacy trailer (bytes) */
+ u_int ic_miclen; /* size of mic trailer (bytes) */
+ void* (*ic_attach)(struct ieee80211vap *, struct ieee80211_key *);
+ void (*ic_detach)(struct ieee80211_key *);
+ int (*ic_setkey)(struct ieee80211_key *);
+ int (*ic_encap)(struct ieee80211_key *, struct mbuf *,
+ uint8_t keyid);
+ int (*ic_decap)(struct ieee80211_key *, struct mbuf *, int);
+ int (*ic_enmic)(struct ieee80211_key *, struct mbuf *, int);
+ int (*ic_demic)(struct ieee80211_key *, struct mbuf *, int);
+};
+extern const struct ieee80211_cipher ieee80211_cipher_none;
+
+#define IEEE80211_KEY_UNDEFINED(k) \
+ ((k)->wk_cipher == &ieee80211_cipher_none)
+
+void ieee80211_crypto_register(const struct ieee80211_cipher *);
+void ieee80211_crypto_unregister(const struct ieee80211_cipher *);
+int ieee80211_crypto_available(u_int cipher);
+
+struct ieee80211_key *ieee80211_crypto_encap(struct ieee80211_node *,
+ struct mbuf *);
+struct ieee80211_key *ieee80211_crypto_decap(struct ieee80211_node *,
+ struct mbuf *, int);
+
+/*
+ * Check and remove any MIC.
+ */
+static __inline int
+ieee80211_crypto_demic(struct ieee80211vap *vap, struct ieee80211_key *k,
+ struct mbuf *m, int force)
+{
+ const struct ieee80211_cipher *cip = k->wk_cipher;
+ return (cip->ic_miclen > 0 ? cip->ic_demic(k, m, force) : 1);
+}
+
+/*
+ * Add any MIC.
+ */
+static __inline int
+ieee80211_crypto_enmic(struct ieee80211vap *vap,
+ struct ieee80211_key *k, struct mbuf *m, int force)
+{
+ const struct ieee80211_cipher *cip = k->wk_cipher;
+ return (cip->ic_miclen > 0 ? cip->ic_enmic(k, m, force) : 1);
+}
+
+/*
+ * Reset key state to an unused state. The crypto
+ * key allocation mechanism insures other state (e.g.
+ * key data) is properly setup before a key is used.
+ */
+static __inline void
+ieee80211_crypto_resetkey(struct ieee80211vap *vap,
+ struct ieee80211_key *k, ieee80211_keyix ix)
+{
+ k->wk_cipher = &ieee80211_cipher_none;
+ k->wk_private = k->wk_cipher->ic_attach(vap, k);
+ k->wk_keyix = k->wk_rxkeyix = ix;
+ k->wk_flags = IEEE80211_KEY_XMIT | IEEE80211_KEY_RECV;
+}
+
+/*
+ * Crypt-related notification methods.
+ */
+void ieee80211_notify_replay_failure(struct ieee80211vap *,
+ const struct ieee80211_frame *, const struct ieee80211_key *,
+ uint64_t rsc, int tid);
+void ieee80211_notify_michael_failure(struct ieee80211vap *,
+ const struct ieee80211_frame *, u_int keyix);
+#endif /* defined(__KERNEL__) || defined(_KERNEL) */
+#endif /* _NET80211_IEEE80211_CRYPTO_HH_ */
diff --git a/rtems/freebsd/net80211/ieee80211_crypto_ccmp.c b/rtems/freebsd/net80211/ieee80211_crypto_ccmp.c
new file mode 100644
index 00000000..d99c2057
--- /dev/null
+++ b/rtems/freebsd/net80211/ieee80211_crypto_ccmp.c
@@ -0,0 +1,636 @@
+#include <rtems/freebsd/machine/rtems-bsd-config.h>
+
+/*-
+ * Copyright (c) 2002-2008 Sam Leffler, Errno Consulting
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <rtems/freebsd/sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+/*
+ * IEEE 802.11i AES-CCMP crypto support.
+ *
+ * Part of this module is derived from similar code in the Host
+ * AP driver. The code is used with the consent of the author and
+ * it's license is included below.
+ */
+#include <rtems/freebsd/local/opt_wlan.h>
+
+#include <rtems/freebsd/sys/param.h>
+#include <rtems/freebsd/sys/systm.h>
+#include <rtems/freebsd/sys/mbuf.h>
+#include <rtems/freebsd/sys/malloc.h>
+#include <rtems/freebsd/sys/kernel.h>
+#include <rtems/freebsd/sys/module.h>
+
+#include <rtems/freebsd/sys/socket.h>
+
+#include <rtems/freebsd/net/if.h>
+#include <rtems/freebsd/net/if_media.h>
+#include <rtems/freebsd/net/ethernet.h>
+
+#include <rtems/freebsd/net80211/ieee80211_var.h>
+
+#include <rtems/freebsd/crypto/rijndael/rijndael.h>
+
+#define AES_BLOCK_LEN 16
+
+struct ccmp_ctx {
+ struct ieee80211vap *cc_vap; /* for diagnostics+statistics */
+ struct ieee80211com *cc_ic;
+ rijndael_ctx cc_aes;
+};
+
+static void *ccmp_attach(struct ieee80211vap *, struct ieee80211_key *);
+static void ccmp_detach(struct ieee80211_key *);
+static int ccmp_setkey(struct ieee80211_key *);
+static int ccmp_encap(struct ieee80211_key *k, struct mbuf *, uint8_t keyid);
+static int ccmp_decap(struct ieee80211_key *, struct mbuf *, int);
+static int ccmp_enmic(struct ieee80211_key *, struct mbuf *, int);
+static int ccmp_demic(struct ieee80211_key *, struct mbuf *, int);
+
+static const struct ieee80211_cipher ccmp = {
+ .ic_name = "AES-CCM",
+ .ic_cipher = IEEE80211_CIPHER_AES_CCM,
+ .ic_header = IEEE80211_WEP_IVLEN + IEEE80211_WEP_KIDLEN +
+ IEEE80211_WEP_EXTIVLEN,
+ .ic_trailer = IEEE80211_WEP_MICLEN,
+ .ic_miclen = 0,
+ .ic_attach = ccmp_attach,
+ .ic_detach = ccmp_detach,
+ .ic_setkey = ccmp_setkey,
+ .ic_encap = ccmp_encap,
+ .ic_decap = ccmp_decap,
+ .ic_enmic = ccmp_enmic,
+ .ic_demic = ccmp_demic,
+};
+
+static int ccmp_encrypt(struct ieee80211_key *, struct mbuf *, int hdrlen);
+static int ccmp_decrypt(struct ieee80211_key *, u_int64_t pn,
+ struct mbuf *, int hdrlen);
+
+/* number of references from net80211 layer */
+static int nrefs = 0;
+
+static void *
+ccmp_attach(struct ieee80211vap *vap, struct ieee80211_key *k)
+{
+ struct ccmp_ctx *ctx;
+
+ ctx = (struct ccmp_ctx *) malloc(sizeof(struct ccmp_ctx),
+ M_80211_CRYPTO, M_NOWAIT | M_ZERO);
+ if (ctx == NULL) {
+ vap->iv_stats.is_crypto_nomem++;
+ return NULL;
+ }
+ ctx->cc_vap = vap;
+ ctx->cc_ic = vap->iv_ic;
+ nrefs++; /* NB: we assume caller locking */
+ return ctx;
+}
+
+static void
+ccmp_detach(struct ieee80211_key *k)
+{
+ struct ccmp_ctx *ctx = k->wk_private;
+
+ free(ctx, M_80211_CRYPTO);
+ KASSERT(nrefs > 0, ("imbalanced attach/detach"));
+ nrefs--; /* NB: we assume caller locking */
+}
+
+static int
+ccmp_setkey(struct ieee80211_key *k)
+{
+ struct ccmp_ctx *ctx = k->wk_private;
+
+ if (k->wk_keylen != (128/NBBY)) {
+ IEEE80211_DPRINTF(ctx->cc_vap, IEEE80211_MSG_CRYPTO,
+ "%s: Invalid key length %u, expecting %u\n",
+ __func__, k->wk_keylen, 128/NBBY);
+ return 0;
+ }
+ if (k->wk_flags & IEEE80211_KEY_SWENCRYPT)
+ rijndael_set_key(&ctx->cc_aes, k->wk_key, k->wk_keylen*NBBY);
+ return 1;
+}
+
+/*
+ * Add privacy headers appropriate for the specified key.
+ */
+static int
+ccmp_encap(struct ieee80211_key *k, struct mbuf *m, uint8_t keyid)
+{
+ struct ccmp_ctx *ctx = k->wk_private;
+ struct ieee80211com *ic = ctx->cc_ic;
+ uint8_t *ivp;
+ int hdrlen;
+
+ hdrlen = ieee80211_hdrspace(ic, mtod(m, void *));
+
+ /*
+ * Copy down 802.11 header and add the IV, KeyID, and ExtIV.
+ */
+ M_PREPEND(m, ccmp.ic_header, M_NOWAIT);
+ if (m == NULL)
+ return 0;
+ ivp = mtod(m, uint8_t *);
+ ovbcopy(ivp + ccmp.ic_header, ivp, hdrlen);
+ ivp += hdrlen;
+
+ k->wk_keytsc++; /* XXX wrap at 48 bits */
+ ivp[0] = k->wk_keytsc >> 0; /* PN0 */
+ ivp[1] = k->wk_keytsc >> 8; /* PN1 */
+ ivp[2] = 0; /* Reserved */
+ ivp[3] = keyid | IEEE80211_WEP_EXTIV; /* KeyID | ExtID */
+ ivp[4] = k->wk_keytsc >> 16; /* PN2 */
+ ivp[5] = k->wk_keytsc >> 24; /* PN3 */
+ ivp[6] = k->wk_keytsc >> 32; /* PN4 */
+ ivp[7] = k->wk_keytsc >> 40; /* PN5 */
+
+ /*
+ * Finally, do software encrypt if neeed.
+ */
+ if ((k->wk_flags & IEEE80211_KEY_SWENCRYPT) &&
+ !ccmp_encrypt(k, m, hdrlen))
+ return 0;
+
+ return 1;
+}
+
+/*
+ * Add MIC to the frame as needed.
+ */
+static int
+ccmp_enmic(struct ieee80211_key *k, struct mbuf *m, int force)
+{
+
+ return 1;
+}
+
+static __inline uint64_t
+READ_6(uint8_t b0, uint8_t b1, uint8_t b2, uint8_t b3, uint8_t b4, uint8_t b5)
+{
+ uint32_t iv32 = (b0 << 0) | (b1 << 8) | (b2 << 16) | (b3 << 24);
+ uint16_t iv16 = (b4 << 0) | (b5 << 8);
+ return (((uint64_t)iv16) << 32) | iv32;
+}
+
+/*
+ * Validate and strip privacy headers (and trailer) for a
+ * received frame. The specified key should be correct but
+ * is also verified.
+ */
+static int
+ccmp_decap(struct ieee80211_key *k, struct mbuf *m, int hdrlen)
+{
+ struct ccmp_ctx *ctx = k->wk_private;
+ struct ieee80211vap *vap = ctx->cc_vap;
+ struct ieee80211_frame *wh;
+ uint8_t *ivp, tid;
+ uint64_t pn;
+
+ /*
+ * Header should have extended IV and sequence number;
+ * verify the former and validate the latter.
+ */
+ wh = mtod(m, struct ieee80211_frame *);
+ ivp = mtod(m, uint8_t *) + hdrlen;
+ if ((ivp[IEEE80211_WEP_IVLEN] & IEEE80211_WEP_EXTIV) == 0) {
+ /*
+ * No extended IV; discard frame.
+ */
+ IEEE80211_NOTE_MAC(vap, IEEE80211_MSG_CRYPTO, wh->i_addr2,
+ "%s", "missing ExtIV for AES-CCM cipher");
+ vap->iv_stats.is_rx_ccmpformat++;
+ return 0;
+ }
+ tid = ieee80211_gettid(wh);
+ pn = READ_6(ivp[0], ivp[1], ivp[4], ivp[5], ivp[6], ivp[7]);
+ if (pn <= k->wk_keyrsc[tid]) {
+ /*
+ * Replay violation.
+ */
+ ieee80211_notify_replay_failure(vap, wh, k, pn, tid);
+ vap->iv_stats.is_rx_ccmpreplay++;
+ return 0;
+ }
+
+ /*
+ * Check if the device handled the decrypt in hardware.
+ * If so we just strip the header; otherwise we need to
+ * handle the decrypt in software. Note that for the
+ * latter we leave the header in place for use in the
+ * decryption work.
+ */
+ if ((k->wk_flags & IEEE80211_KEY_SWDECRYPT) &&
+ !ccmp_decrypt(k, pn, m, hdrlen))
+ return 0;
+
+ /*
+ * Copy up 802.11 header and strip crypto bits.
+ */
+ ovbcopy(mtod(m, void *), mtod(m, uint8_t *) + ccmp.ic_header, hdrlen);
+ m_adj(m, ccmp.ic_header);
+ m_adj(m, -ccmp.ic_trailer);
+
+ /*
+ * Ok to update rsc now.
+ */
+ k->wk_keyrsc[tid] = pn;
+
+ return 1;
+}
+
+/*
+ * Verify and strip MIC from the frame.
+ */
+static int
+ccmp_demic(struct ieee80211_key *k, struct mbuf *m, int force)
+{
+ return 1;
+}
+
+static __inline void
+xor_block(uint8_t *b, const uint8_t *a, size_t len)
+{
+ int i;
+ for (i = 0; i < len; i++)
+ b[i] ^= a[i];
+}
+
+/*
+ * Host AP crypt: host-based CCMP encryption implementation for Host AP driver
+ *
+ * Copyright (c) 2003-2004, Jouni Malinen <jkmaline@cc.hut.fi>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation. See README and COPYING for
+ * more details.
+ *
+ * Alternatively, this software may be distributed under the terms of BSD
+ * license.
+ */
+
+static void
+ccmp_init_blocks(rijndael_ctx *ctx, struct ieee80211_frame *wh,
+ u_int64_t pn, size_t dlen,
+ uint8_t b0[AES_BLOCK_LEN], uint8_t aad[2 * AES_BLOCK_LEN],
+ uint8_t auth[AES_BLOCK_LEN], uint8_t s0[AES_BLOCK_LEN])
+{
+#define IS_QOS_DATA(wh) IEEE80211_QOS_HAS_SEQ(wh)
+
+ /* CCM Initial Block:
+ * Flag (Include authentication header, M=3 (8-octet MIC),
+ * L=1 (2-octet Dlen))
+ * Nonce: 0x00 | A2 | PN
+ * Dlen */
+ b0[0] = 0x59;
+ /* NB: b0[1] set below */
+ IEEE80211_ADDR_COPY(b0 + 2, wh->i_addr2);
+ b0[8] = pn >> 40;
+ b0[9] = pn >> 32;
+ b0[10] = pn >> 24;
+ b0[11] = pn >> 16;
+ b0[12] = pn >> 8;
+ b0[13] = pn >> 0;
+ b0[14] = (dlen >> 8) & 0xff;
+ b0[15] = dlen & 0xff;
+
+ /* AAD:
+ * FC with bits 4..6 and 11..13 masked to zero; 14 is always one
+ * A1 | A2 | A3
+ * SC with bits 4..15 (seq#) masked to zero
+ * A4 (if present)
+ * QC (if present)
+ */
+ aad[0] = 0; /* AAD length >> 8 */
+ /* NB: aad[1] set below */
+ aad[2] = wh->i_fc[0] & 0x8f; /* XXX magic #s */
+ aad[3] = wh->i_fc[1] & 0xc7; /* XXX magic #s */
+ /* NB: we know 3 addresses are contiguous */
+ memcpy(aad + 4, wh->i_addr1, 3 * IEEE80211_ADDR_LEN);
+ aad[22] = wh->i_seq[0] & IEEE80211_SEQ_FRAG_MASK;
+ aad[23] = 0; /* all bits masked */
+ /*
+ * Construct variable-length portion of AAD based
+ * on whether this is a 4-address frame/QOS frame.
+ * We always zero-pad to 32 bytes before running it
+ * through the cipher.
+ *
+ * We also fill in the priority bits of the CCM
+ * initial block as we know whether or not we have
+ * a QOS frame.
+ */
+ if (IEEE80211_IS_DSTODS(wh)) {
+ IEEE80211_ADDR_COPY(aad + 24,
+ ((struct ieee80211_frame_addr4 *)wh)->i_addr4);
+ if (IS_QOS_DATA(wh)) {
+ struct ieee80211_qosframe_addr4 *qwh4 =
+ (struct ieee80211_qosframe_addr4 *) wh;
+ aad[30] = qwh4->i_qos[0] & 0x0f;/* just priority bits */
+ aad[31] = 0;
+ b0[1] = aad[30];
+ aad[1] = 22 + IEEE80211_ADDR_LEN + 2;
+ } else {
+ *(uint16_t *)&aad[30] = 0;
+ b0[1] = 0;
+ aad[1] = 22 + IEEE80211_ADDR_LEN;
+ }
+ } else {
+ if (IS_QOS_DATA(wh)) {
+ struct ieee80211_qosframe *qwh =
+ (struct ieee80211_qosframe*) wh;
+ aad[24] = qwh->i_qos[0] & 0x0f; /* just priority bits */
+ aad[25] = 0;
+ b0[1] = aad[24];
+ aad[1] = 22 + 2;
+ } else {
+ *(uint16_t *)&aad[24] = 0;
+ b0[1] = 0;
+ aad[1] = 22;
+ }
+ *(uint16_t *)&aad[26] = 0;
+ *(uint32_t *)&aad[28] = 0;
+ }
+
+ /* Start with the first block and AAD */
+ rijndael_encrypt(ctx, b0, auth);
+ xor_block(auth, aad, AES_BLOCK_LEN);
+ rijndael_encrypt(ctx, auth, auth);
+ xor_block(auth, &aad[AES_BLOCK_LEN], AES_BLOCK_LEN);
+ rijndael_encrypt(ctx, auth, auth);
+ b0[0] &= 0x07;
+ b0[14] = b0[15] = 0;
+ rijndael_encrypt(ctx, b0, s0);
+#undef IS_QOS_DATA
+}
+
+#define CCMP_ENCRYPT(_i, _b, _b0, _pos, _e, _len) do { \
+ /* Authentication */ \
+ xor_block(_b, _pos, _len); \
+ rijndael_encrypt(&ctx->cc_aes, _b, _b); \
+ /* Encryption, with counter */ \
+ _b0[14] = (_i >> 8) & 0xff; \
+ _b0[15] = _i & 0xff; \
+ rijndael_encrypt(&ctx->cc_aes, _b0, _e); \
+ xor_block(_pos, _e, _len); \
+} while (0)
+
+static int
+ccmp_encrypt(struct ieee80211_key *key, struct mbuf *m0, int hdrlen)
+{
+ struct ccmp_ctx *ctx = key->wk_private;
+ struct ieee80211_frame *wh;
+ struct mbuf *m = m0;
+ int data_len, i, space;
+ uint8_t aad[2 * AES_BLOCK_LEN], b0[AES_BLOCK_LEN], b[AES_BLOCK_LEN],
+ e[AES_BLOCK_LEN], s0[AES_BLOCK_LEN];
+ uint8_t *pos;
+
+ ctx->cc_vap->iv_stats.is_crypto_ccmp++;
+
+ wh = mtod(m, struct ieee80211_frame *);
+ data_len = m->m_pkthdr.len - (hdrlen + ccmp.ic_header);
+ ccmp_init_blocks(&ctx->cc_aes, wh, key->wk_keytsc,
+ data_len, b0, aad, b, s0);
+
+ i = 1;
+ pos = mtod(m, uint8_t *) + hdrlen + ccmp.ic_header;
+ /* NB: assumes header is entirely in first mbuf */
+ space = m->m_len - (hdrlen + ccmp.ic_header);
+ for (;;) {
+ if (space > data_len)
+ space = data_len;
+ /*
+ * Do full blocks.
+ */
+ while (space >= AES_BLOCK_LEN) {
+ CCMP_ENCRYPT(i, b, b0, pos, e, AES_BLOCK_LEN);
+ pos += AES_BLOCK_LEN, space -= AES_BLOCK_LEN;
+ data_len -= AES_BLOCK_LEN;
+ i++;
+ }
+ if (data_len <= 0) /* no more data */
+ break;
+ m = m->m_next;
+ if (m == NULL) { /* last buffer */
+ if (space != 0) {
+ /*
+ * Short last block.
+ */
+ CCMP_ENCRYPT(i, b, b0, pos, e, space);
+ }
+ break;
+ }
+ if (space != 0) {
+ uint8_t *pos_next;
+ int space_next;
+ int len, dl, sp;
+ struct mbuf *n;
+
+ /*
+ * Block straddles one or more mbufs, gather data
+ * into the block buffer b, apply the cipher, then
+ * scatter the results back into the mbuf chain.
+ * The buffer will automatically get space bytes
+ * of data at offset 0 copied in+out by the
+ * CCMP_ENCRYPT request so we must take care of
+ * the remaining data.
+ */
+ n = m;
+ dl = data_len;
+ sp = space;
+ for (;;) {
+ pos_next = mtod(n, uint8_t *);
+ len = min(dl, AES_BLOCK_LEN);
+ space_next = len > sp ? len - sp : 0;
+ if (n->m_len >= space_next) {
+ /*
+ * This mbuf has enough data; just grab
+ * what we need and stop.
+ */
+ xor_block(b+sp, pos_next, space_next);
+ break;
+ }
+ /*
+ * This mbuf's contents are insufficient,
+ * take 'em all and prepare to advance to
+ * the next mbuf.
+ */
+ xor_block(b+sp, pos_next, n->m_len);
+ sp += n->m_len, dl -= n->m_len;
+ n = n->m_next;
+ if (n == NULL)
+ break;
+ }
+
+ CCMP_ENCRYPT(i, b, b0, pos, e, space);
+
+ /* NB: just like above, but scatter data to mbufs */
+ dl = data_len;
+ sp = space;
+ for (;;) {
+ pos_next = mtod(m, uint8_t *);
+ len = min(dl, AES_BLOCK_LEN);
+ space_next = len > sp ? len - sp : 0;
+ if (m->m_len >= space_next) {
+ xor_block(pos_next, e+sp, space_next);
+ break;
+ }
+ xor_block(pos_next, e+sp, m->m_len);
+ sp += m->m_len, dl -= m->m_len;
+ m = m->m_next;
+ if (m == NULL)
+ goto done;
+ }
+ /*
+ * Do bookkeeping. m now points to the last mbuf
+ * we grabbed data from. We know we consumed a
+ * full block of data as otherwise we'd have hit
+ * the end of the mbuf chain, so deduct from data_len.
+ * Otherwise advance the block number (i) and setup
+ * pos+space to reflect contents of the new mbuf.
+ */
+ data_len -= AES_BLOCK_LEN;
+ i++;
+ pos = pos_next + space_next;
+ space = m->m_len - space_next;
+ } else {
+ /*
+ * Setup for next buffer.
+ */
+ pos = mtod(m, uint8_t *);
+ space = m->m_len;
+ }
+ }
+done:
+ /* tack on MIC */
+ xor_block(b, s0, ccmp.ic_trailer);
+ return m_append(m0, ccmp.ic_trailer, b);
+}
+#undef CCMP_ENCRYPT
+
+#define CCMP_DECRYPT(_i, _b, _b0, _pos, _a, _len) do { \
+ /* Decrypt, with counter */ \
+ _b0[14] = (_i >> 8) & 0xff; \
+ _b0[15] = _i & 0xff; \
+ rijndael_encrypt(&ctx->cc_aes, _b0, _b); \
+ xor_block(_pos, _b, _len); \
+ /* Authentication */ \
+ xor_block(_a, _pos, _len); \
+ rijndael_encrypt(&ctx->cc_aes, _a, _a); \
+} while (0)
+
+static int
+ccmp_decrypt(struct ieee80211_key *key, u_int64_t pn, struct mbuf *m, int hdrlen)
+{
+ struct ccmp_ctx *ctx = key->wk_private;
+ struct ieee80211vap *vap = ctx->cc_vap;
+ struct ieee80211_frame *wh;
+ uint8_t aad[2 * AES_BLOCK_LEN];
+ uint8_t b0[AES_BLOCK_LEN], b[AES_BLOCK_LEN], a[AES_BLOCK_LEN];
+ uint8_t mic[AES_BLOCK_LEN];
+ size_t data_len;
+ int i;
+ uint8_t *pos;
+ u_int space;
+
+ ctx->cc_vap->iv_stats.is_crypto_ccmp++;
+
+ wh = mtod(m, struct ieee80211_frame *);
+ data_len = m->m_pkthdr.len - (hdrlen + ccmp.ic_header + ccmp.ic_trailer);
+ ccmp_init_blocks(&ctx->cc_aes, wh, pn, data_len, b0, aad, a, b);
+ m_copydata(m, m->m_pkthdr.len - ccmp.ic_trailer, ccmp.ic_trailer, mic);
+ xor_block(mic, b, ccmp.ic_trailer);
+
+ i = 1;
+ pos = mtod(m, uint8_t *) + hdrlen + ccmp.ic_header;
+ space = m->m_len - (hdrlen + ccmp.ic_header);
+ for (;;) {
+ if (space > data_len)
+ space = data_len;
+ while (space >= AES_BLOCK_LEN) {
+ CCMP_DECRYPT(i, b, b0, pos, a, AES_BLOCK_LEN);
+ pos += AES_BLOCK_LEN, space -= AES_BLOCK_LEN;
+ data_len -= AES_BLOCK_LEN;
+ i++;
+ }
+ if (data_len <= 0) /* no more data */
+ break;
+ m = m->m_next;
+ if (m == NULL) { /* last buffer */
+ if (space != 0) /* short last block */
+ CCMP_DECRYPT(i, b, b0, pos, a, space);
+ break;
+ }
+ if (space != 0) {
+ uint8_t *pos_next;
+ u_int space_next;
+ u_int len;
+
+ /*
+ * Block straddles buffers, split references. We
+ * do not handle splits that require >2 buffers
+ * since rx'd frames are never badly fragmented
+ * because drivers typically recv in clusters.
+ */
+ pos_next = mtod(m, uint8_t *);
+ len = min(data_len, AES_BLOCK_LEN);
+ space_next = len > space ? len - space : 0;
+ KASSERT(m->m_len >= space_next,
+ ("not enough data in following buffer, "
+ "m_len %u need %u\n", m->m_len, space_next));
+
+ xor_block(b+space, pos_next, space_next);
+ CCMP_DECRYPT(i, b, b0, pos, a, space);
+ xor_block(pos_next, b+space, space_next);
+ data_len -= len;
+ i++;
+
+ pos = pos_next + space_next;
+ space = m->m_len - space_next;
+ } else {
+ /*
+ * Setup for next buffer.
+ */
+ pos = mtod(m, uint8_t *);
+ space = m->m_len;
+ }
+ }
+ if (memcmp(mic, a, ccmp.ic_trailer) != 0) {
+ IEEE80211_NOTE_MAC(vap, IEEE80211_MSG_CRYPTO, wh->i_addr2,
+ "%s", "AES-CCM decrypt failed; MIC mismatch");
+ vap->iv_stats.is_rx_ccmpmic++;
+ return 0;
+ }
+ return 1;
+}
+#undef CCMP_DECRYPT
+
+/*
+ * Module glue.
+ */
+IEEE80211_CRYPTO_MODULE(ccmp, 1);
diff --git a/rtems/freebsd/net80211/ieee80211_crypto_none.c b/rtems/freebsd/net80211/ieee80211_crypto_none.c
new file mode 100644
index 00000000..0a7b4eec
--- /dev/null
+++ b/rtems/freebsd/net80211/ieee80211_crypto_none.c
@@ -0,0 +1,146 @@
+#include <rtems/freebsd/machine/rtems-bsd-config.h>
+
+/*-
+ * Copyright (c) 2002-2008 Sam Leffler, Errno Consulting
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <rtems/freebsd/sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+/*
+ * IEEE 802.11 NULL crypto support.
+ */
+#include <rtems/freebsd/local/opt_wlan.h>
+
+#include <rtems/freebsd/sys/param.h>
+#include <rtems/freebsd/sys/kernel.h>
+#include <rtems/freebsd/sys/systm.h>
+#include <rtems/freebsd/sys/mbuf.h>
+#include <rtems/freebsd/sys/module.h>
+
+#include <rtems/freebsd/sys/socket.h>
+
+#include <rtems/freebsd/net/if.h>
+#include <rtems/freebsd/net/if_media.h>
+#include <rtems/freebsd/net/ethernet.h>
+
+#include <rtems/freebsd/net80211/ieee80211_var.h>
+
+static void *none_attach(struct ieee80211vap *, struct ieee80211_key *);
+static void none_detach(struct ieee80211_key *);
+static int none_setkey(struct ieee80211_key *);
+static int none_encap(struct ieee80211_key *, struct mbuf *, uint8_t);
+static int none_decap(struct ieee80211_key *, struct mbuf *, int);
+static int none_enmic(struct ieee80211_key *, struct mbuf *, int);
+static int none_demic(struct ieee80211_key *, struct mbuf *, int);
+
+const struct ieee80211_cipher ieee80211_cipher_none = {
+ .ic_name = "NONE",
+ .ic_cipher = IEEE80211_CIPHER_NONE,
+ .ic_header = 0,
+ .ic_trailer = 0,
+ .ic_miclen = 0,
+ .ic_attach = none_attach,
+ .ic_detach = none_detach,
+ .ic_setkey = none_setkey,
+ .ic_encap = none_encap,
+ .ic_decap = none_decap,
+ .ic_enmic = none_enmic,
+ .ic_demic = none_demic,
+};
+
+static void *
+none_attach(struct ieee80211vap *vap, struct ieee80211_key *k)
+{
+ return vap; /* for diagnostics+stats */
+}
+
+static void
+none_detach(struct ieee80211_key *k)
+{
+ (void) k;
+}
+
+static int
+none_setkey(struct ieee80211_key *k)
+{
+ (void) k;
+ return 1;
+}
+
+static int
+none_encap(struct ieee80211_key *k, struct mbuf *m, uint8_t keyid)
+{
+ struct ieee80211vap *vap = k->wk_private;
+#ifdef IEEE80211_DEBUG
+ struct ieee80211_frame *wh = mtod(m, struct ieee80211_frame *);
+#endif
+
+ /*
+ * The specified key is not setup; this can
+ * happen, at least, when changing keys.
+ */
+ IEEE80211_NOTE_MAC(vap, IEEE80211_MSG_CRYPTO, wh->i_addr1,
+ "key id %u is not set (encap)", keyid>>6);
+ vap->iv_stats.is_tx_badcipher++;
+ return 0;
+}
+
+static int
+none_decap(struct ieee80211_key *k, struct mbuf *m, int hdrlen)
+{
+ struct ieee80211vap *vap = k->wk_private;
+#ifdef IEEE80211_DEBUG
+ struct ieee80211_frame *wh = mtod(m, struct ieee80211_frame *);
+ const uint8_t *ivp = (const uint8_t *)&wh[1];
+#endif
+
+ /*
+ * The specified key is not setup; this can
+ * happen, at least, when changing keys.
+ */
+ /* XXX useful to know dst too */
+ IEEE80211_NOTE_MAC(vap, IEEE80211_MSG_CRYPTO, wh->i_addr2,
+ "key id %u is not set (decap)", ivp[IEEE80211_WEP_IVLEN] >> 6);
+ vap->iv_stats.is_rx_badkeyid++;
+ return 0;
+}
+
+static int
+none_enmic(struct ieee80211_key *k, struct mbuf *m, int force)
+{
+ struct ieee80211vap *vap = k->wk_private;
+
+ vap->iv_stats.is_tx_badcipher++;
+ return 0;
+}
+
+static int
+none_demic(struct ieee80211_key *k, struct mbuf *m, int force)
+{
+ struct ieee80211vap *vap = k->wk_private;
+
+ vap->iv_stats.is_rx_badkeyid++;
+ return 0;
+}
diff --git a/rtems/freebsd/net80211/ieee80211_crypto_tkip.c b/rtems/freebsd/net80211/ieee80211_crypto_tkip.c
new file mode 100644
index 00000000..7e4bec67
--- /dev/null
+++ b/rtems/freebsd/net80211/ieee80211_crypto_tkip.c
@@ -0,0 +1,1000 @@
+#include <rtems/freebsd/machine/rtems-bsd-config.h>
+
+/*-
+ * Copyright (c) 2002-2008 Sam Leffler, Errno Consulting
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <rtems/freebsd/sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+/*
+ * IEEE 802.11i TKIP crypto support.
+ *
+ * Part of this module is derived from similar code in the Host
+ * AP driver. The code is used with the consent of the author and
+ * it's license is included below.
+ */
+#include <rtems/freebsd/local/opt_wlan.h>
+
+#include <rtems/freebsd/sys/param.h>
+#include <rtems/freebsd/sys/systm.h>
+#include <rtems/freebsd/sys/mbuf.h>
+#include <rtems/freebsd/sys/malloc.h>
+#include <rtems/freebsd/sys/kernel.h>
+#include <rtems/freebsd/sys/module.h>
+#include <rtems/freebsd/sys/endian.h>
+
+#include <rtems/freebsd/sys/socket.h>
+
+#include <rtems/freebsd/net/if.h>
+#include <rtems/freebsd/net/if_media.h>
+#include <rtems/freebsd/net/ethernet.h>
+
+#include <rtems/freebsd/net80211/ieee80211_var.h>
+
+static void *tkip_attach(struct ieee80211vap *, struct ieee80211_key *);
+static void tkip_detach(struct ieee80211_key *);
+static int tkip_setkey(struct ieee80211_key *);
+static int tkip_encap(struct ieee80211_key *, struct mbuf *m, uint8_t keyid);
+static int tkip_enmic(struct ieee80211_key *, struct mbuf *, int);
+static int tkip_decap(struct ieee80211_key *, struct mbuf *, int);
+static int tkip_demic(struct ieee80211_key *, struct mbuf *, int);
+
+static const struct ieee80211_cipher tkip = {
+ .ic_name = "TKIP",
+ .ic_cipher = IEEE80211_CIPHER_TKIP,
+ .ic_header = IEEE80211_WEP_IVLEN + IEEE80211_WEP_KIDLEN +
+ IEEE80211_WEP_EXTIVLEN,
+ .ic_trailer = IEEE80211_WEP_CRCLEN,
+ .ic_miclen = IEEE80211_WEP_MICLEN,
+ .ic_attach = tkip_attach,
+ .ic_detach = tkip_detach,
+ .ic_setkey = tkip_setkey,
+ .ic_encap = tkip_encap,
+ .ic_decap = tkip_decap,
+ .ic_enmic = tkip_enmic,
+ .ic_demic = tkip_demic,
+};
+
+typedef uint8_t u8;
+typedef uint16_t u16;
+typedef uint32_t __u32;
+typedef uint32_t u32;
+
+struct tkip_ctx {
+ struct ieee80211vap *tc_vap; /* for diagnostics+statistics */
+
+ u16 tx_ttak[5];
+ int tx_phase1_done;
+ u8 tx_rc4key[16]; /* XXX for test module; make locals? */
+
+ u16 rx_ttak[5];
+ int rx_phase1_done;
+ u8 rx_rc4key[16]; /* XXX for test module; make locals? */
+ uint64_t rx_rsc; /* held until MIC verified */
+};
+
+static void michael_mic(struct tkip_ctx *, const u8 *key,
+ struct mbuf *m, u_int off, size_t data_len,
+ u8 mic[IEEE80211_WEP_MICLEN]);
+static int tkip_encrypt(struct tkip_ctx *, struct ieee80211_key *,
+ struct mbuf *, int hdr_len);
+static int tkip_decrypt(struct tkip_ctx *, struct ieee80211_key *,
+ struct mbuf *, int hdr_len);
+
+/* number of references from net80211 layer */
+static int nrefs = 0;
+
+static void *
+tkip_attach(struct ieee80211vap *vap, struct ieee80211_key *k)
+{
+ struct tkip_ctx *ctx;
+
+ ctx = (struct tkip_ctx *) malloc(sizeof(struct tkip_ctx),
+ M_80211_CRYPTO, M_NOWAIT | M_ZERO);
+ if (ctx == NULL) {
+ vap->iv_stats.is_crypto_nomem++;
+ return NULL;
+ }
+
+ ctx->tc_vap = vap;
+ nrefs++; /* NB: we assume caller locking */
+ return ctx;
+}
+
+static void
+tkip_detach(struct ieee80211_key *k)
+{
+ struct tkip_ctx *ctx = k->wk_private;
+
+ free(ctx, M_80211_CRYPTO);
+ KASSERT(nrefs > 0, ("imbalanced attach/detach"));
+ nrefs--; /* NB: we assume caller locking */
+}
+
+static int
+tkip_setkey(struct ieee80211_key *k)
+{
+ struct tkip_ctx *ctx = k->wk_private;
+
+ if (k->wk_keylen != (128/NBBY)) {
+ (void) ctx; /* XXX */
+ IEEE80211_DPRINTF(ctx->tc_vap, IEEE80211_MSG_CRYPTO,
+ "%s: Invalid key length %u, expecting %u\n",
+ __func__, k->wk_keylen, 128/NBBY);
+ return 0;
+ }
+ k->wk_keytsc = 1; /* TSC starts at 1 */
+ ctx->rx_phase1_done = 0;
+ return 1;
+}
+
+/*
+ * Add privacy headers and do any s/w encryption required.
+ */
+static int
+tkip_encap(struct ieee80211_key *k, struct mbuf *m, uint8_t keyid)
+{
+ struct tkip_ctx *ctx = k->wk_private;
+ struct ieee80211vap *vap = ctx->tc_vap;
+ struct ieee80211com *ic = vap->iv_ic;
+ uint8_t *ivp;
+ int hdrlen;
+
+ /*
+ * Handle TKIP counter measures requirement.
+ */
+ if (vap->iv_flags & IEEE80211_F_COUNTERM) {
+#ifdef IEEE80211_DEBUG
+ struct ieee80211_frame *wh = mtod(m, struct ieee80211_frame *);
+#endif
+
+ IEEE80211_NOTE_MAC(vap, IEEE80211_MSG_CRYPTO, wh->i_addr2,
+ "discard frame due to countermeasures (%s)", __func__);
+ vap->iv_stats.is_crypto_tkipcm++;
+ return 0;
+ }
+ hdrlen = ieee80211_hdrspace(ic, mtod(m, void *));
+
+ /*
+ * Copy down 802.11 header and add the IV, KeyID, and ExtIV.
+ */
+ M_PREPEND(m, tkip.ic_header, M_NOWAIT);
+ if (m == NULL)
+ return 0;
+ ivp = mtod(m, uint8_t *);
+ memmove(ivp, ivp + tkip.ic_header, hdrlen);
+ ivp += hdrlen;
+
+ ivp[0] = k->wk_keytsc >> 8; /* TSC1 */
+ ivp[1] = (ivp[0] | 0x20) & 0x7f; /* WEP seed */
+ ivp[2] = k->wk_keytsc >> 0; /* TSC0 */
+ ivp[3] = keyid | IEEE80211_WEP_EXTIV; /* KeyID | ExtID */
+ ivp[4] = k->wk_keytsc >> 16; /* TSC2 */
+ ivp[5] = k->wk_keytsc >> 24; /* TSC3 */
+ ivp[6] = k->wk_keytsc >> 32; /* TSC4 */
+ ivp[7] = k->wk_keytsc >> 40; /* TSC5 */
+
+ /*
+ * Finally, do software encrypt if neeed.
+ */
+ if (k->wk_flags & IEEE80211_KEY_SWENCRYPT) {
+ if (!tkip_encrypt(ctx, k, m, hdrlen))
+ return 0;
+ /* NB: tkip_encrypt handles wk_keytsc */
+ } else
+ k->wk_keytsc++;
+
+ return 1;
+}
+
+/*
+ * Add MIC to the frame as needed.
+ */
+static int
+tkip_enmic(struct ieee80211_key *k, struct mbuf *m, int force)
+{
+ struct tkip_ctx *ctx = k->wk_private;
+
+ if (force || (k->wk_flags & IEEE80211_KEY_SWENMIC)) {
+ struct ieee80211_frame *wh = mtod(m, struct ieee80211_frame *);
+ struct ieee80211vap *vap = ctx->tc_vap;
+ struct ieee80211com *ic = vap->iv_ic;
+ int hdrlen;
+ uint8_t mic[IEEE80211_WEP_MICLEN];
+
+ vap->iv_stats.is_crypto_tkipenmic++;
+
+ hdrlen = ieee80211_hdrspace(ic, wh);
+
+ michael_mic(ctx, k->wk_txmic,
+ m, hdrlen, m->m_pkthdr.len - hdrlen, mic);
+ return m_append(m, tkip.ic_miclen, mic);
+ }
+ return 1;
+}
+
+static __inline uint64_t
+READ_6(uint8_t b0, uint8_t b1, uint8_t b2, uint8_t b3, uint8_t b4, uint8_t b5)
+{
+ uint32_t iv32 = (b0 << 0) | (b1 << 8) | (b2 << 16) | (b3 << 24);
+ uint16_t iv16 = (b4 << 0) | (b5 << 8);
+ return (((uint64_t)iv16) << 32) | iv32;
+}
+
+/*
+ * Validate and strip privacy headers (and trailer) for a
+ * received frame. If necessary, decrypt the frame using
+ * the specified key.
+ */
+static int
+tkip_decap(struct ieee80211_key *k, struct mbuf *m, int hdrlen)
+{
+ struct tkip_ctx *ctx = k->wk_private;
+ struct ieee80211vap *vap = ctx->tc_vap;
+ struct ieee80211_frame *wh;
+ uint8_t *ivp, tid;
+
+ /*
+ * Header should have extended IV and sequence number;
+ * verify the former and validate the latter.
+ */
+ wh = mtod(m, struct ieee80211_frame *);
+ ivp = mtod(m, uint8_t *) + hdrlen;
+ if ((ivp[IEEE80211_WEP_IVLEN] & IEEE80211_WEP_EXTIV) == 0) {
+ /*
+ * No extended IV; discard frame.
+ */
+ IEEE80211_NOTE_MAC(vap, IEEE80211_MSG_CRYPTO, wh->i_addr2,
+ "%s", "missing ExtIV for TKIP cipher");
+ vap->iv_stats.is_rx_tkipformat++;
+ return 0;
+ }
+ /*
+ * Handle TKIP counter measures requirement.
+ */
+ if (vap->iv_flags & IEEE80211_F_COUNTERM) {
+ IEEE80211_NOTE_MAC(vap, IEEE80211_MSG_CRYPTO, wh->i_addr2,
+ "discard frame due to countermeasures (%s)", __func__);
+ vap->iv_stats.is_crypto_tkipcm++;
+ return 0;
+ }
+
+ tid = ieee80211_gettid(wh);
+ ctx->rx_rsc = READ_6(ivp[2], ivp[0], ivp[4], ivp[5], ivp[6], ivp[7]);
+ if (ctx->rx_rsc <= k->wk_keyrsc[tid]) {
+ /*
+ * Replay violation; notify upper layer.
+ */
+ ieee80211_notify_replay_failure(vap, wh, k, ctx->rx_rsc, tid);
+ vap->iv_stats.is_rx_tkipreplay++;
+ return 0;
+ }
+ /*
+ * NB: We can't update the rsc in the key until MIC is verified.
+ *
+ * We assume we are not preempted between doing the check above
+ * and updating wk_keyrsc when stripping the MIC in tkip_demic.
+ * Otherwise we might process another packet and discard it as
+ * a replay.
+ */
+
+ /*
+ * Check if the device handled the decrypt in hardware.
+ * If so we just strip the header; otherwise we need to
+ * handle the decrypt in software.
+ */
+ if ((k->wk_flags & IEEE80211_KEY_SWDECRYPT) &&
+ !tkip_decrypt(ctx, k, m, hdrlen))
+ return 0;
+
+ /*
+ * Copy up 802.11 header and strip crypto bits.
+ */
+ memmove(mtod(m, uint8_t *) + tkip.ic_header, mtod(m, void *), hdrlen);
+ m_adj(m, tkip.ic_header);
+ m_adj(m, -tkip.ic_trailer);
+
+ return 1;
+}
+
+/*
+ * Verify and strip MIC from the frame.
+ */
+static int
+tkip_demic(struct ieee80211_key *k, struct mbuf *m, int force)
+{
+ struct tkip_ctx *ctx = k->wk_private;
+ struct ieee80211_frame *wh;
+ uint8_t tid;
+
+ wh = mtod(m, struct ieee80211_frame *);
+ if ((k->wk_flags & IEEE80211_KEY_SWDEMIC) || force) {
+ struct ieee80211vap *vap = ctx->tc_vap;
+ int hdrlen = ieee80211_hdrspace(vap->iv_ic, wh);
+ u8 mic[IEEE80211_WEP_MICLEN];
+ u8 mic0[IEEE80211_WEP_MICLEN];
+
+ vap->iv_stats.is_crypto_tkipdemic++;
+
+ michael_mic(ctx, k->wk_rxmic,
+ m, hdrlen, m->m_pkthdr.len - (hdrlen + tkip.ic_miclen),
+ mic);
+ m_copydata(m, m->m_pkthdr.len - tkip.ic_miclen,
+ tkip.ic_miclen, mic0);
+ if (memcmp(mic, mic0, tkip.ic_miclen)) {
+ /* NB: 802.11 layer handles statistic and debug msg */
+ ieee80211_notify_michael_failure(vap, wh,
+ k->wk_rxkeyix != IEEE80211_KEYIX_NONE ?
+ k->wk_rxkeyix : k->wk_keyix);
+ return 0;
+ }
+ }
+ /*
+ * Strip MIC from the tail.
+ */
+ m_adj(m, -tkip.ic_miclen);
+
+ /*
+ * Ok to update rsc now that MIC has been verified.
+ */
+ tid = ieee80211_gettid(wh);
+ k->wk_keyrsc[tid] = ctx->rx_rsc;
+
+ return 1;
+}
+
+/*
+ * Host AP crypt: host-based TKIP encryption implementation for Host AP driver
+ *
+ * Copyright (c) 2003-2004, Jouni Malinen <jkmaline@cc.hut.fi>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation. See README and COPYING for
+ * more details.
+ *
+ * Alternatively, this software may be distributed under the terms of BSD
+ * license.
+ */
+
+static const __u32 crc32_table[256] = {
+ 0x00000000L, 0x77073096L, 0xee0e612cL, 0x990951baL, 0x076dc419L,
+ 0x706af48fL, 0xe963a535L, 0x9e6495a3L, 0x0edb8832L, 0x79dcb8a4L,
+ 0xe0d5e91eL, 0x97d2d988L, 0x09b64c2bL, 0x7eb17cbdL, 0xe7b82d07L,
+ 0x90bf1d91L, 0x1db71064L, 0x6ab020f2L, 0xf3b97148L, 0x84be41deL,
+ 0x1adad47dL, 0x6ddde4ebL, 0xf4d4b551L, 0x83d385c7L, 0x136c9856L,
+ 0x646ba8c0L, 0xfd62f97aL, 0x8a65c9ecL, 0x14015c4fL, 0x63066cd9L,
+ 0xfa0f3d63L, 0x8d080df5L, 0x3b6e20c8L, 0x4c69105eL, 0xd56041e4L,
+ 0xa2677172L, 0x3c03e4d1L, 0x4b04d447L, 0xd20d85fdL, 0xa50ab56bL,
+ 0x35b5a8faL, 0x42b2986cL, 0xdbbbc9d6L, 0xacbcf940L, 0x32d86ce3L,
+ 0x45df5c75L, 0xdcd60dcfL, 0xabd13d59L, 0x26d930acL, 0x51de003aL,
+ 0xc8d75180L, 0xbfd06116L, 0x21b4f4b5L, 0x56b3c423L, 0xcfba9599L,
+ 0xb8bda50fL, 0x2802b89eL, 0x5f058808L, 0xc60cd9b2L, 0xb10be924L,
+ 0x2f6f7c87L, 0x58684c11L, 0xc1611dabL, 0xb6662d3dL, 0x76dc4190L,
+ 0x01db7106L, 0x98d220bcL, 0xefd5102aL, 0x71b18589L, 0x06b6b51fL,
+ 0x9fbfe4a5L, 0xe8b8d433L, 0x7807c9a2L, 0x0f00f934L, 0x9609a88eL,
+ 0xe10e9818L, 0x7f6a0dbbL, 0x086d3d2dL, 0x91646c97L, 0xe6635c01L,
+ 0x6b6b51f4L, 0x1c6c6162L, 0x856530d8L, 0xf262004eL, 0x6c0695edL,
+ 0x1b01a57bL, 0x8208f4c1L, 0xf50fc457L, 0x65b0d9c6L, 0x12b7e950L,
+ 0x8bbeb8eaL, 0xfcb9887cL, 0x62dd1ddfL, 0x15da2d49L, 0x8cd37cf3L,
+ 0xfbd44c65L, 0x4db26158L, 0x3ab551ceL, 0xa3bc0074L, 0xd4bb30e2L,
+ 0x4adfa541L, 0x3dd895d7L, 0xa4d1c46dL, 0xd3d6f4fbL, 0x4369e96aL,
+ 0x346ed9fcL, 0xad678846L, 0xda60b8d0L, 0x44042d73L, 0x33031de5L,
+ 0xaa0a4c5fL, 0xdd0d7cc9L, 0x5005713cL, 0x270241aaL, 0xbe0b1010L,
+ 0xc90c2086L, 0x5768b525L, 0x206f85b3L, 0xb966d409L, 0xce61e49fL,
+ 0x5edef90eL, 0x29d9c998L, 0xb0d09822L, 0xc7d7a8b4L, 0x59b33d17L,
+ 0x2eb40d81L, 0xb7bd5c3bL, 0xc0ba6cadL, 0xedb88320L, 0x9abfb3b6L,
+ 0x03b6e20cL, 0x74b1d29aL, 0xead54739L, 0x9dd277afL, 0x04db2615L,
+ 0x73dc1683L, 0xe3630b12L, 0x94643b84L, 0x0d6d6a3eL, 0x7a6a5aa8L,
+ 0xe40ecf0bL, 0x9309ff9dL, 0x0a00ae27L, 0x7d079eb1L, 0xf00f9344L,
+ 0x8708a3d2L, 0x1e01f268L, 0x6906c2feL, 0xf762575dL, 0x806567cbL,
+ 0x196c3671L, 0x6e6b06e7L, 0xfed41b76L, 0x89d32be0L, 0x10da7a5aL,
+ 0x67dd4accL, 0xf9b9df6fL, 0x8ebeeff9L, 0x17b7be43L, 0x60b08ed5L,
+ 0xd6d6a3e8L, 0xa1d1937eL, 0x38d8c2c4L, 0x4fdff252L, 0xd1bb67f1L,
+ 0xa6bc5767L, 0x3fb506ddL, 0x48b2364bL, 0xd80d2bdaL, 0xaf0a1b4cL,
+ 0x36034af6L, 0x41047a60L, 0xdf60efc3L, 0xa867df55L, 0x316e8eefL,
+ 0x4669be79L, 0xcb61b38cL, 0xbc66831aL, 0x256fd2a0L, 0x5268e236L,
+ 0xcc0c7795L, 0xbb0b4703L, 0x220216b9L, 0x5505262fL, 0xc5ba3bbeL,
+ 0xb2bd0b28L, 0x2bb45a92L, 0x5cb36a04L, 0xc2d7ffa7L, 0xb5d0cf31L,
+ 0x2cd99e8bL, 0x5bdeae1dL, 0x9b64c2b0L, 0xec63f226L, 0x756aa39cL,
+ 0x026d930aL, 0x9c0906a9L, 0xeb0e363fL, 0x72076785L, 0x05005713L,
+ 0x95bf4a82L, 0xe2b87a14L, 0x7bb12baeL, 0x0cb61b38L, 0x92d28e9bL,
+ 0xe5d5be0dL, 0x7cdcefb7L, 0x0bdbdf21L, 0x86d3d2d4L, 0xf1d4e242L,
+ 0x68ddb3f8L, 0x1fda836eL, 0x81be16cdL, 0xf6b9265bL, 0x6fb077e1L,
+ 0x18b74777L, 0x88085ae6L, 0xff0f6a70L, 0x66063bcaL, 0x11010b5cL,
+ 0x8f659effL, 0xf862ae69L, 0x616bffd3L, 0x166ccf45L, 0xa00ae278L,
+ 0xd70dd2eeL, 0x4e048354L, 0x3903b3c2L, 0xa7672661L, 0xd06016f7L,
+ 0x4969474dL, 0x3e6e77dbL, 0xaed16a4aL, 0xd9d65adcL, 0x40df0b66L,
+ 0x37d83bf0L, 0xa9bcae53L, 0xdebb9ec5L, 0x47b2cf7fL, 0x30b5ffe9L,
+ 0xbdbdf21cL, 0xcabac28aL, 0x53b39330L, 0x24b4a3a6L, 0xbad03605L,
+ 0xcdd70693L, 0x54de5729L, 0x23d967bfL, 0xb3667a2eL, 0xc4614ab8L,
+ 0x5d681b02L, 0x2a6f2b94L, 0xb40bbe37L, 0xc30c8ea1L, 0x5a05df1bL,
+ 0x2d02ef8dL
+};
+
+static __inline u16 RotR1(u16 val)
+{
+ return (val >> 1) | (val << 15);
+}
+
+static __inline u8 Lo8(u16 val)
+{
+ return val & 0xff;
+}
+
+static __inline u8 Hi8(u16 val)
+{
+ return val >> 8;
+}
+
+static __inline u16 Lo16(u32 val)
+{
+ return val & 0xffff;
+}
+
+static __inline u16 Hi16(u32 val)
+{
+ return val >> 16;
+}
+
+static __inline u16 Mk16(u8 hi, u8 lo)
+{
+ return lo | (((u16) hi) << 8);
+}
+
+static __inline u16 Mk16_le(const u16 *v)
+{
+ return le16toh(*v);
+}
+
+static const u16 Sbox[256] = {
+ 0xC6A5, 0xF884, 0xEE99, 0xF68D, 0xFF0D, 0xD6BD, 0xDEB1, 0x9154,
+ 0x6050, 0x0203, 0xCEA9, 0x567D, 0xE719, 0xB562, 0x4DE6, 0xEC9A,
+ 0x8F45, 0x1F9D, 0x8940, 0xFA87, 0xEF15, 0xB2EB, 0x8EC9, 0xFB0B,
+ 0x41EC, 0xB367, 0x5FFD, 0x45EA, 0x23BF, 0x53F7, 0xE496, 0x9B5B,
+ 0x75C2, 0xE11C, 0x3DAE, 0x4C6A, 0x6C5A, 0x7E41, 0xF502, 0x834F,
+ 0x685C, 0x51F4, 0xD134, 0xF908, 0xE293, 0xAB73, 0x6253, 0x2A3F,
+ 0x080C, 0x9552, 0x4665, 0x9D5E, 0x3028, 0x37A1, 0x0A0F, 0x2FB5,
+ 0x0E09, 0x2436, 0x1B9B, 0xDF3D, 0xCD26, 0x4E69, 0x7FCD, 0xEA9F,
+ 0x121B, 0x1D9E, 0x5874, 0x342E, 0x362D, 0xDCB2, 0xB4EE, 0x5BFB,
+ 0xA4F6, 0x764D, 0xB761, 0x7DCE, 0x527B, 0xDD3E, 0x5E71, 0x1397,
+ 0xA6F5, 0xB968, 0x0000, 0xC12C, 0x4060, 0xE31F, 0x79C8, 0xB6ED,
+ 0xD4BE, 0x8D46, 0x67D9, 0x724B, 0x94DE, 0x98D4, 0xB0E8, 0x854A,
+ 0xBB6B, 0xC52A, 0x4FE5, 0xED16, 0x86C5, 0x9AD7, 0x6655, 0x1194,
+ 0x8ACF, 0xE910, 0x0406, 0xFE81, 0xA0F0, 0x7844, 0x25BA, 0x4BE3,
+ 0xA2F3, 0x5DFE, 0x80C0, 0x058A, 0x3FAD, 0x21BC, 0x7048, 0xF104,
+ 0x63DF, 0x77C1, 0xAF75, 0x4263, 0x2030, 0xE51A, 0xFD0E, 0xBF6D,
+ 0x814C, 0x1814, 0x2635, 0xC32F, 0xBEE1, 0x35A2, 0x88CC, 0x2E39,
+ 0x9357, 0x55F2, 0xFC82, 0x7A47, 0xC8AC, 0xBAE7, 0x322B, 0xE695,
+ 0xC0A0, 0x1998, 0x9ED1, 0xA37F, 0x4466, 0x547E, 0x3BAB, 0x0B83,
+ 0x8CCA, 0xC729, 0x6BD3, 0x283C, 0xA779, 0xBCE2, 0x161D, 0xAD76,
+ 0xDB3B, 0x6456, 0x744E, 0x141E, 0x92DB, 0x0C0A, 0x486C, 0xB8E4,
+ 0x9F5D, 0xBD6E, 0x43EF, 0xC4A6, 0x39A8, 0x31A4, 0xD337, 0xF28B,
+ 0xD532, 0x8B43, 0x6E59, 0xDAB7, 0x018C, 0xB164, 0x9CD2, 0x49E0,
+ 0xD8B4, 0xACFA, 0xF307, 0xCF25, 0xCAAF, 0xF48E, 0x47E9, 0x1018,
+ 0x6FD5, 0xF088, 0x4A6F, 0x5C72, 0x3824, 0x57F1, 0x73C7, 0x9751,
+ 0xCB23, 0xA17C, 0xE89C, 0x3E21, 0x96DD, 0x61DC, 0x0D86, 0x0F85,
+ 0xE090, 0x7C42, 0x71C4, 0xCCAA, 0x90D8, 0x0605, 0xF701, 0x1C12,
+ 0xC2A3, 0x6A5F, 0xAEF9, 0x69D0, 0x1791, 0x9958, 0x3A27, 0x27B9,
+ 0xD938, 0xEB13, 0x2BB3, 0x2233, 0xD2BB, 0xA970, 0x0789, 0x33A7,
+ 0x2DB6, 0x3C22, 0x1592, 0xC920, 0x8749, 0xAAFF, 0x5078, 0xA57A,
+ 0x038F, 0x59F8, 0x0980, 0x1A17, 0x65DA, 0xD731, 0x84C6, 0xD0B8,
+ 0x82C3, 0x29B0, 0x5A77, 0x1E11, 0x7BCB, 0xA8FC, 0x6DD6, 0x2C3A,
+};
+
+static __inline u16 _S_(u16 v)
+{
+ u16 t = Sbox[Hi8(v)];
+ return Sbox[Lo8(v)] ^ ((t << 8) | (t >> 8));
+}
+
+#define PHASE1_LOOP_COUNT 8
+
+static void tkip_mixing_phase1(u16 *TTAK, const u8 *TK, const u8 *TA, u32 IV32)
+{
+ int i, j;
+
+ /* Initialize the 80-bit TTAK from TSC (IV32) and TA[0..5] */
+ TTAK[0] = Lo16(IV32);
+ TTAK[1] = Hi16(IV32);
+ TTAK[2] = Mk16(TA[1], TA[0]);
+ TTAK[3] = Mk16(TA[3], TA[2]);
+ TTAK[4] = Mk16(TA[5], TA[4]);
+
+ for (i = 0; i < PHASE1_LOOP_COUNT; i++) {
+ j = 2 * (i & 1);
+ TTAK[0] += _S_(TTAK[4] ^ Mk16(TK[1 + j], TK[0 + j]));
+ TTAK[1] += _S_(TTAK[0] ^ Mk16(TK[5 + j], TK[4 + j]));
+ TTAK[2] += _S_(TTAK[1] ^ Mk16(TK[9 + j], TK[8 + j]));
+ TTAK[3] += _S_(TTAK[2] ^ Mk16(TK[13 + j], TK[12 + j]));
+ TTAK[4] += _S_(TTAK[3] ^ Mk16(TK[1 + j], TK[0 + j])) + i;
+ }
+}
+
+#ifndef _BYTE_ORDER
+#error "Don't know native byte order"
+#endif
+
+static void tkip_mixing_phase2(u8 *WEPSeed, const u8 *TK, const u16 *TTAK,
+ u16 IV16)
+{
+ /* Make temporary area overlap WEP seed so that the final copy can be
+ * avoided on little endian hosts. */
+ u16 *PPK = (u16 *) &WEPSeed[4];
+
+ /* Step 1 - make copy of TTAK and bring in TSC */
+ PPK[0] = TTAK[0];
+ PPK[1] = TTAK[1];
+ PPK[2] = TTAK[2];
+ PPK[3] = TTAK[3];
+ PPK[4] = TTAK[4];
+ PPK[5] = TTAK[4] + IV16;
+
+ /* Step 2 - 96-bit bijective mixing using S-box */
+ PPK[0] += _S_(PPK[5] ^ Mk16_le((const u16 *) &TK[0]));
+ PPK[1] += _S_(PPK[0] ^ Mk16_le((const u16 *) &TK[2]));
+ PPK[2] += _S_(PPK[1] ^ Mk16_le((const u16 *) &TK[4]));
+ PPK[3] += _S_(PPK[2] ^ Mk16_le((const u16 *) &TK[6]));
+ PPK[4] += _S_(PPK[3] ^ Mk16_le((const u16 *) &TK[8]));
+ PPK[5] += _S_(PPK[4] ^ Mk16_le((const u16 *) &TK[10]));
+
+ PPK[0] += RotR1(PPK[5] ^ Mk16_le((const u16 *) &TK[12]));
+ PPK[1] += RotR1(PPK[0] ^ Mk16_le((const u16 *) &TK[14]));
+ PPK[2] += RotR1(PPK[1]);
+ PPK[3] += RotR1(PPK[2]);
+ PPK[4] += RotR1(PPK[3]);
+ PPK[5] += RotR1(PPK[4]);
+
+ /* Step 3 - bring in last of TK bits, assign 24-bit WEP IV value
+ * WEPSeed[0..2] is transmitted as WEP IV */
+ WEPSeed[0] = Hi8(IV16);
+ WEPSeed[1] = (Hi8(IV16) | 0x20) & 0x7F;
+ WEPSeed[2] = Lo8(IV16);
+ WEPSeed[3] = Lo8((PPK[5] ^ Mk16_le((const u16 *) &TK[0])) >> 1);
+
+#if _BYTE_ORDER == _BIG_ENDIAN
+ {
+ int i;
+ for (i = 0; i < 6; i++)
+ PPK[i] = (PPK[i] << 8) | (PPK[i] >> 8);
+ }
+#endif
+}
+
+static void
+wep_encrypt(u8 *key, struct mbuf *m0, u_int off, size_t data_len,
+ uint8_t icv[IEEE80211_WEP_CRCLEN])
+{
+ u32 i, j, k, crc;
+ size_t buflen;
+ u8 S[256];
+ u8 *pos;
+ struct mbuf *m;
+#define S_SWAP(a,b) do { u8 t = S[a]; S[a] = S[b]; S[b] = t; } while(0)
+
+ /* Setup RC4 state */
+ for (i = 0; i < 256; i++)
+ S[i] = i;
+ j = 0;
+ for (i = 0; i < 256; i++) {
+ j = (j + S[i] + key[i & 0x0f]) & 0xff;
+ S_SWAP(i, j);
+ }
+
+ /* Compute CRC32 over unencrypted data and apply RC4 to data */
+ crc = ~0;
+ i = j = 0;
+ m = m0;
+ pos = mtod(m, uint8_t *) + off;
+ buflen = m->m_len - off;
+ for (;;) {
+ if (buflen > data_len)
+ buflen = data_len;
+ data_len -= buflen;
+ for (k = 0; k < buflen; k++) {
+ crc = crc32_table[(crc ^ *pos) & 0xff] ^ (crc >> 8);
+ i = (i + 1) & 0xff;
+ j = (j + S[i]) & 0xff;
+ S_SWAP(i, j);
+ *pos++ ^= S[(S[i] + S[j]) & 0xff];
+ }
+ m = m->m_next;
+ if (m == NULL) {
+ KASSERT(data_len == 0,
+ ("out of buffers with data_len %zu\n", data_len));
+ break;
+ }
+ pos = mtod(m, uint8_t *);
+ buflen = m->m_len;
+ }
+ crc = ~crc;
+
+ /* Append little-endian CRC32 and encrypt it to produce ICV */
+ icv[0] = crc;
+ icv[1] = crc >> 8;
+ icv[2] = crc >> 16;
+ icv[3] = crc >> 24;
+ for (k = 0; k < IEEE80211_WEP_CRCLEN; k++) {
+ i = (i + 1) & 0xff;
+ j = (j + S[i]) & 0xff;
+ S_SWAP(i, j);
+ icv[k] ^= S[(S[i] + S[j]) & 0xff];
+ }
+}
+
+static int
+wep_decrypt(u8 *key, struct mbuf *m, u_int off, size_t data_len)
+{
+ u32 i, j, k, crc;
+ u8 S[256];
+ u8 *pos, icv[4];
+ size_t buflen;
+
+ /* Setup RC4 state */
+ for (i = 0; i < 256; i++)
+ S[i] = i;
+ j = 0;
+ for (i = 0; i < 256; i++) {
+ j = (j + S[i] + key[i & 0x0f]) & 0xff;
+ S_SWAP(i, j);
+ }
+
+ /* Apply RC4 to data and compute CRC32 over decrypted data */
+ crc = ~0;
+ i = j = 0;
+ pos = mtod(m, uint8_t *) + off;
+ buflen = m->m_len - off;
+ for (;;) {
+ if (buflen > data_len)
+ buflen = data_len;
+ data_len -= buflen;
+ for (k = 0; k < buflen; k++) {
+ i = (i + 1) & 0xff;
+ j = (j + S[i]) & 0xff;
+ S_SWAP(i, j);
+ *pos ^= S[(S[i] + S[j]) & 0xff];
+ crc = crc32_table[(crc ^ *pos) & 0xff] ^ (crc >> 8);
+ pos++;
+ }
+ m = m->m_next;
+ if (m == NULL) {
+ KASSERT(data_len == 0,
+ ("out of buffers with data_len %zu\n", data_len));
+ break;
+ }
+ pos = mtod(m, uint8_t *);
+ buflen = m->m_len;
+ }
+ crc = ~crc;
+
+ /* Encrypt little-endian CRC32 and verify that it matches with the
+ * received ICV */
+ icv[0] = crc;
+ icv[1] = crc >> 8;
+ icv[2] = crc >> 16;
+ icv[3] = crc >> 24;
+ for (k = 0; k < 4; k++) {
+ i = (i + 1) & 0xff;
+ j = (j + S[i]) & 0xff;
+ S_SWAP(i, j);
+ if ((icv[k] ^ S[(S[i] + S[j]) & 0xff]) != *pos++) {
+ /* ICV mismatch - drop frame */
+ return -1;
+ }
+ }
+
+ return 0;
+}
+
+
+static __inline u32 rotl(u32 val, int bits)
+{
+ return (val << bits) | (val >> (32 - bits));
+}
+
+
+static __inline u32 rotr(u32 val, int bits)
+{
+ return (val >> bits) | (val << (32 - bits));
+}
+
+
+static __inline u32 xswap(u32 val)
+{
+ return ((val & 0x00ff00ff) << 8) | ((val & 0xff00ff00) >> 8);
+}
+
+
+#define michael_block(l, r) \
+do { \
+ r ^= rotl(l, 17); \
+ l += r; \
+ r ^= xswap(l); \
+ l += r; \
+ r ^= rotl(l, 3); \
+ l += r; \
+ r ^= rotr(l, 2); \
+ l += r; \
+} while (0)
+
+
+static __inline u32 get_le32_split(u8 b0, u8 b1, u8 b2, u8 b3)
+{
+ return b0 | (b1 << 8) | (b2 << 16) | (b3 << 24);
+}
+
+static __inline u32 get_le32(const u8 *p)
+{
+ return get_le32_split(p[0], p[1], p[2], p[3]);
+}
+
+
+static __inline void put_le32(u8 *p, u32 v)
+{
+ p[0] = v;
+ p[1] = v >> 8;
+ p[2] = v >> 16;
+ p[3] = v >> 24;
+}
+
+/*
+ * Craft pseudo header used to calculate the MIC.
+ */
+static void
+michael_mic_hdr(const struct ieee80211_frame *wh0, uint8_t hdr[16])
+{
+ const struct ieee80211_frame_addr4 *wh =
+ (const struct ieee80211_frame_addr4 *) wh0;
+
+ switch (wh->i_fc[1] & IEEE80211_FC1_DIR_MASK) {
+ case IEEE80211_FC1_DIR_NODS:
+ IEEE80211_ADDR_COPY(hdr, wh->i_addr1); /* DA */
+ IEEE80211_ADDR_COPY(hdr + IEEE80211_ADDR_LEN, wh->i_addr2);
+ break;
+ case IEEE80211_FC1_DIR_TODS:
+ IEEE80211_ADDR_COPY(hdr, wh->i_addr3); /* DA */
+ IEEE80211_ADDR_COPY(hdr + IEEE80211_ADDR_LEN, wh->i_addr2);
+ break;
+ case IEEE80211_FC1_DIR_FROMDS:
+ IEEE80211_ADDR_COPY(hdr, wh->i_addr1); /* DA */
+ IEEE80211_ADDR_COPY(hdr + IEEE80211_ADDR_LEN, wh->i_addr3);
+ break;
+ case IEEE80211_FC1_DIR_DSTODS:
+ IEEE80211_ADDR_COPY(hdr, wh->i_addr3); /* DA */
+ IEEE80211_ADDR_COPY(hdr + IEEE80211_ADDR_LEN, wh->i_addr4);
+ break;
+ }
+
+ if (wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_QOS) {
+ const struct ieee80211_qosframe *qwh =
+ (const struct ieee80211_qosframe *) wh;
+ hdr[12] = qwh->i_qos[0] & IEEE80211_QOS_TID;
+ } else
+ hdr[12] = 0;
+ hdr[13] = hdr[14] = hdr[15] = 0; /* reserved */
+}
+
+static void
+michael_mic(struct tkip_ctx *ctx, const u8 *key,
+ struct mbuf *m, u_int off, size_t data_len,
+ u8 mic[IEEE80211_WEP_MICLEN])
+{
+ uint8_t hdr[16];
+ u32 l, r;
+ const uint8_t *data;
+ u_int space;
+
+ michael_mic_hdr(mtod(m, struct ieee80211_frame *), hdr);
+
+ l = get_le32(key);
+ r = get_le32(key + 4);
+
+ /* Michael MIC pseudo header: DA, SA, 3 x 0, Priority */
+ l ^= get_le32(hdr);
+ michael_block(l, r);
+ l ^= get_le32(&hdr[4]);
+ michael_block(l, r);
+ l ^= get_le32(&hdr[8]);
+ michael_block(l, r);
+ l ^= get_le32(&hdr[12]);
+ michael_block(l, r);
+
+ /* first buffer has special handling */
+ data = mtod(m, const uint8_t *) + off;
+ space = m->m_len - off;
+ for (;;) {
+ if (space > data_len)
+ space = data_len;
+ /* collect 32-bit blocks from current buffer */
+ while (space >= sizeof(uint32_t)) {
+ l ^= get_le32(data);
+ michael_block(l, r);
+ data += sizeof(uint32_t), space -= sizeof(uint32_t);
+ data_len -= sizeof(uint32_t);
+ }
+ /*
+ * NB: when space is zero we make one more trip around
+ * the loop to advance to the next mbuf where there is
+ * data. This handles the case where there are 4*n
+ * bytes in an mbuf followed by <4 bytes in a later mbuf.
+ * By making an extra trip we'll drop out of the loop
+ * with m pointing at the mbuf with 3 bytes and space
+ * set as required by the remainder handling below.
+ */
+ if (data_len == 0 ||
+ (data_len < sizeof(uint32_t) && space != 0))
+ break;
+ m = m->m_next;
+ if (m == NULL) {
+ KASSERT(0, ("out of data, data_len %zu\n", data_len));
+ break;
+ }
+ if (space != 0) {
+ const uint8_t *data_next;
+ /*
+ * Block straddles buffers, split references.
+ */
+ data_next = mtod(m, const uint8_t *);
+ KASSERT(m->m_len >= sizeof(uint32_t) - space,
+ ("not enough data in following buffer, "
+ "m_len %u need %zu\n", m->m_len,
+ sizeof(uint32_t) - space));
+ switch (space) {
+ case 1:
+ l ^= get_le32_split(data[0], data_next[0],
+ data_next[1], data_next[2]);
+ data = data_next + 3;
+ space = m->m_len - 3;
+ break;
+ case 2:
+ l ^= get_le32_split(data[0], data[1],
+ data_next[0], data_next[1]);
+ data = data_next + 2;
+ space = m->m_len - 2;
+ break;
+ case 3:
+ l ^= get_le32_split(data[0], data[1],
+ data[2], data_next[0]);
+ data = data_next + 1;
+ space = m->m_len - 1;
+ break;
+ }
+ michael_block(l, r);
+ data_len -= sizeof(uint32_t);
+ } else {
+ /*
+ * Setup for next buffer.
+ */
+ data = mtod(m, const uint8_t *);
+ space = m->m_len;
+ }
+ }
+ /*
+ * Catch degenerate cases like mbuf[4*n+1 bytes] followed by
+ * mbuf[2 bytes]. I don't believe these should happen; if they
+ * do then we'll need more involved logic.
+ */
+ KASSERT(data_len <= space,
+ ("not enough data, data_len %zu space %u\n", data_len, space));
+
+ /* Last block and padding (0x5a, 4..7 x 0) */
+ switch (data_len) {
+ case 0:
+ l ^= get_le32_split(0x5a, 0, 0, 0);
+ break;
+ case 1:
+ l ^= get_le32_split(data[0], 0x5a, 0, 0);
+ break;
+ case 2:
+ l ^= get_le32_split(data[0], data[1], 0x5a, 0);
+ break;
+ case 3:
+ l ^= get_le32_split(data[0], data[1], data[2], 0x5a);
+ break;
+ }
+ michael_block(l, r);
+ /* l ^= 0; */
+ michael_block(l, r);
+
+ put_le32(mic, l);
+ put_le32(mic + 4, r);
+}
+
+static int
+tkip_encrypt(struct tkip_ctx *ctx, struct ieee80211_key *key,
+ struct mbuf *m, int hdrlen)
+{
+ struct ieee80211_frame *wh;
+ uint8_t icv[IEEE80211_WEP_CRCLEN];
+
+ ctx->tc_vap->iv_stats.is_crypto_tkip++;
+
+ wh = mtod(m, struct ieee80211_frame *);
+ if (!ctx->tx_phase1_done) {
+ tkip_mixing_phase1(ctx->tx_ttak, key->wk_key, wh->i_addr2,
+ (u32)(key->wk_keytsc >> 16));
+ ctx->tx_phase1_done = 1;
+ }
+ tkip_mixing_phase2(ctx->tx_rc4key, key->wk_key, ctx->tx_ttak,
+ (u16) key->wk_keytsc);
+
+ wep_encrypt(ctx->tx_rc4key,
+ m, hdrlen + tkip.ic_header,
+ m->m_pkthdr.len - (hdrlen + tkip.ic_header),
+ icv);
+ (void) m_append(m, IEEE80211_WEP_CRCLEN, icv); /* XXX check return */
+
+ key->wk_keytsc++;
+ if ((u16)(key->wk_keytsc) == 0)
+ ctx->tx_phase1_done = 0;
+ return 1;
+}
+
+static int
+tkip_decrypt(struct tkip_ctx *ctx, struct ieee80211_key *key,
+ struct mbuf *m, int hdrlen)
+{
+ struct ieee80211_frame *wh;
+ struct ieee80211vap *vap = ctx->tc_vap;
+ u32 iv32;
+ u16 iv16;
+ u8 tid;
+
+ vap->iv_stats.is_crypto_tkip++;
+
+ wh = mtod(m, struct ieee80211_frame *);
+ /* NB: tkip_decap already verified header and left seq in rx_rsc */
+ iv16 = (u16) ctx->rx_rsc;
+ iv32 = (u32) (ctx->rx_rsc >> 16);
+
+ tid = ieee80211_gettid(wh);
+ if (iv32 != (u32)(key->wk_keyrsc[tid] >> 16) || !ctx->rx_phase1_done) {
+ tkip_mixing_phase1(ctx->rx_ttak, key->wk_key,
+ wh->i_addr2, iv32);
+ ctx->rx_phase1_done = 1;
+ }
+ tkip_mixing_phase2(ctx->rx_rc4key, key->wk_key, ctx->rx_ttak, iv16);
+
+ /* NB: m is unstripped; deduct headers + ICV to get payload */
+ if (wep_decrypt(ctx->rx_rc4key,
+ m, hdrlen + tkip.ic_header,
+ m->m_pkthdr.len - (hdrlen + tkip.ic_header + tkip.ic_trailer))) {
+ if (iv32 != (u32)(key->wk_keyrsc[tid] >> 16)) {
+ /* Previously cached Phase1 result was already lost, so
+ * it needs to be recalculated for the next packet. */
+ ctx->rx_phase1_done = 0;
+ }
+ IEEE80211_NOTE_MAC(vap, IEEE80211_MSG_CRYPTO, wh->i_addr2,
+ "%s", "TKIP ICV mismatch on decrypt");
+ vap->iv_stats.is_rx_tkipicv++;
+ return 0;
+ }
+ return 1;
+}
+
+/*
+ * Module glue.
+ */
+IEEE80211_CRYPTO_MODULE(tkip, 1);
diff --git a/rtems/freebsd/net80211/ieee80211_crypto_wep.c b/rtems/freebsd/net80211/ieee80211_crypto_wep.c
new file mode 100644
index 00000000..6695c354
--- /dev/null
+++ b/rtems/freebsd/net80211/ieee80211_crypto_wep.c
@@ -0,0 +1,482 @@
+#include <rtems/freebsd/machine/rtems-bsd-config.h>
+
+/*-
+ * Copyright (c) 2002-2008 Sam Leffler, Errno Consulting
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <rtems/freebsd/sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+/*
+ * IEEE 802.11 WEP crypto support.
+ */
+#include <rtems/freebsd/local/opt_wlan.h>
+
+#include <rtems/freebsd/sys/param.h>
+#include <rtems/freebsd/sys/systm.h>
+#include <rtems/freebsd/sys/mbuf.h>
+#include <rtems/freebsd/sys/malloc.h>
+#include <rtems/freebsd/sys/kernel.h>
+#include <rtems/freebsd/sys/module.h>
+#include <rtems/freebsd/sys/endian.h>
+
+#include <rtems/freebsd/sys/socket.h>
+
+#include <rtems/freebsd/net/if.h>
+#include <rtems/freebsd/net/if_media.h>
+#include <rtems/freebsd/net/ethernet.h>
+
+#include <rtems/freebsd/net80211/ieee80211_var.h>
+
+static void *wep_attach(struct ieee80211vap *, struct ieee80211_key *);
+static void wep_detach(struct ieee80211_key *);
+static int wep_setkey(struct ieee80211_key *);
+static int wep_encap(struct ieee80211_key *, struct mbuf *, uint8_t keyid);
+static int wep_decap(struct ieee80211_key *, struct mbuf *, int hdrlen);
+static int wep_enmic(struct ieee80211_key *, struct mbuf *, int);
+static int wep_demic(struct ieee80211_key *, struct mbuf *, int);
+
+static const struct ieee80211_cipher wep = {
+ .ic_name = "WEP",
+ .ic_cipher = IEEE80211_CIPHER_WEP,
+ .ic_header = IEEE80211_WEP_IVLEN + IEEE80211_WEP_KIDLEN,
+ .ic_trailer = IEEE80211_WEP_CRCLEN,
+ .ic_miclen = 0,
+ .ic_attach = wep_attach,
+ .ic_detach = wep_detach,
+ .ic_setkey = wep_setkey,
+ .ic_encap = wep_encap,
+ .ic_decap = wep_decap,
+ .ic_enmic = wep_enmic,
+ .ic_demic = wep_demic,
+};
+
+static int wep_encrypt(struct ieee80211_key *, struct mbuf *, int hdrlen);
+static int wep_decrypt(struct ieee80211_key *, struct mbuf *, int hdrlen);
+
+struct wep_ctx {
+ struct ieee80211vap *wc_vap; /* for diagnostics+statistics */
+ struct ieee80211com *wc_ic;
+ uint32_t wc_iv; /* initial vector for crypto */
+};
+
+/* number of references from net80211 layer */
+static int nrefs = 0;
+
+static void *
+wep_attach(struct ieee80211vap *vap, struct ieee80211_key *k)
+{
+ struct wep_ctx *ctx;
+
+ ctx = (struct wep_ctx *) malloc(sizeof(struct wep_ctx),
+ M_80211_CRYPTO, M_NOWAIT | M_ZERO);
+ if (ctx == NULL) {
+ vap->iv_stats.is_crypto_nomem++;
+ return NULL;
+ }
+
+ ctx->wc_vap = vap;
+ ctx->wc_ic = vap->iv_ic;
+ get_random_bytes(&ctx->wc_iv, sizeof(ctx->wc_iv));
+ nrefs++; /* NB: we assume caller locking */
+ return ctx;
+}
+
+static void
+wep_detach(struct ieee80211_key *k)
+{
+ struct wep_ctx *ctx = k->wk_private;
+
+ free(ctx, M_80211_CRYPTO);
+ KASSERT(nrefs > 0, ("imbalanced attach/detach"));
+ nrefs--; /* NB: we assume caller locking */
+}
+
+static int
+wep_setkey(struct ieee80211_key *k)
+{
+ return k->wk_keylen >= 40/NBBY;
+}
+
+/*
+ * Add privacy headers appropriate for the specified key.
+ */
+static int
+wep_encap(struct ieee80211_key *k, struct mbuf *m, uint8_t keyid)
+{
+ struct wep_ctx *ctx = k->wk_private;
+ struct ieee80211com *ic = ctx->wc_ic;
+ uint32_t iv;
+ uint8_t *ivp;
+ int hdrlen;
+
+ hdrlen = ieee80211_hdrspace(ic, mtod(m, void *));
+
+ /*
+ * Copy down 802.11 header and add the IV + KeyID.
+ */
+ M_PREPEND(m, wep.ic_header, M_NOWAIT);
+ if (m == NULL)
+ return 0;
+ ivp = mtod(m, uint8_t *);
+ ovbcopy(ivp + wep.ic_header, ivp, hdrlen);
+ ivp += hdrlen;
+
+ /*
+ * XXX
+ * IV must not duplicate during the lifetime of the key.
+ * But no mechanism to renew keys is defined in IEEE 802.11
+ * for WEP. And the IV may be duplicated at other stations
+ * because the session key itself is shared. So we use a
+ * pseudo random IV for now, though it is not the right way.
+ *
+ * NB: Rather than use a strictly random IV we select a
+ * random one to start and then increment the value for
+ * each frame. This is an explicit tradeoff between
+ * overhead and security. Given the basic insecurity of
+ * WEP this seems worthwhile.
+ */
+
+ /*
+ * Skip 'bad' IVs from Fluhrer/Mantin/Shamir:
+ * (B, 255, N) with 3 <= B < 16 and 0 <= N <= 255
+ */
+ iv = ctx->wc_iv;
+ if ((iv & 0xff00) == 0xff00) {
+ int B = (iv & 0xff0000) >> 16;
+ if (3 <= B && B < 16)
+ iv += 0x0100;
+ }
+ ctx->wc_iv = iv + 1;
+
+ /*
+ * NB: Preserve byte order of IV for packet
+ * sniffers; it doesn't matter otherwise.
+ */
+#if _BYTE_ORDER == _BIG_ENDIAN
+ ivp[0] = iv >> 0;
+ ivp[1] = iv >> 8;
+ ivp[2] = iv >> 16;
+#else
+ ivp[2] = iv >> 0;
+ ivp[1] = iv >> 8;
+ ivp[0] = iv >> 16;
+#endif
+ ivp[3] = keyid;
+
+ /*
+ * Finally, do software encrypt if neeed.
+ */
+ if ((k->wk_flags & IEEE80211_KEY_SWENCRYPT) &&
+ !wep_encrypt(k, m, hdrlen))
+ return 0;
+
+ return 1;
+}
+
+/*
+ * Add MIC to the frame as needed.
+ */
+static int
+wep_enmic(struct ieee80211_key *k, struct mbuf *m, int force)
+{
+
+ return 1;
+}
+
+/*
+ * Validate and strip privacy headers (and trailer) for a
+ * received frame. If necessary, decrypt the frame using
+ * the specified key.
+ */
+static int
+wep_decap(struct ieee80211_key *k, struct mbuf *m, int hdrlen)
+{
+ struct wep_ctx *ctx = k->wk_private;
+ struct ieee80211vap *vap = ctx->wc_vap;
+ struct ieee80211_frame *wh;
+
+ wh = mtod(m, struct ieee80211_frame *);
+
+ /*
+ * Check if the device handled the decrypt in hardware.
+ * If so we just strip the header; otherwise we need to
+ * handle the decrypt in software.
+ */
+ if ((k->wk_flags & IEEE80211_KEY_SWDECRYPT) &&
+ !wep_decrypt(k, m, hdrlen)) {
+ IEEE80211_NOTE_MAC(vap, IEEE80211_MSG_CRYPTO, wh->i_addr2,
+ "%s", "WEP ICV mismatch on decrypt");
+ vap->iv_stats.is_rx_wepfail++;
+ return 0;
+ }
+
+ /*
+ * Copy up 802.11 header and strip crypto bits.
+ */
+ ovbcopy(mtod(m, void *), mtod(m, uint8_t *) + wep.ic_header, hdrlen);
+ m_adj(m, wep.ic_header);
+ m_adj(m, -wep.ic_trailer);
+
+ return 1;
+}
+
+/*
+ * Verify and strip MIC from the frame.
+ */
+static int
+wep_demic(struct ieee80211_key *k, struct mbuf *skb, int force)
+{
+ return 1;
+}
+
+static const uint32_t crc32_table[256] = {
+ 0x00000000L, 0x77073096L, 0xee0e612cL, 0x990951baL, 0x076dc419L,
+ 0x706af48fL, 0xe963a535L, 0x9e6495a3L, 0x0edb8832L, 0x79dcb8a4L,
+ 0xe0d5e91eL, 0x97d2d988L, 0x09b64c2bL, 0x7eb17cbdL, 0xe7b82d07L,
+ 0x90bf1d91L, 0x1db71064L, 0x6ab020f2L, 0xf3b97148L, 0x84be41deL,
+ 0x1adad47dL, 0x6ddde4ebL, 0xf4d4b551L, 0x83d385c7L, 0x136c9856L,
+ 0x646ba8c0L, 0xfd62f97aL, 0x8a65c9ecL, 0x14015c4fL, 0x63066cd9L,
+ 0xfa0f3d63L, 0x8d080df5L, 0x3b6e20c8L, 0x4c69105eL, 0xd56041e4L,
+ 0xa2677172L, 0x3c03e4d1L, 0x4b04d447L, 0xd20d85fdL, 0xa50ab56bL,
+ 0x35b5a8faL, 0x42b2986cL, 0xdbbbc9d6L, 0xacbcf940L, 0x32d86ce3L,
+ 0x45df5c75L, 0xdcd60dcfL, 0xabd13d59L, 0x26d930acL, 0x51de003aL,
+ 0xc8d75180L, 0xbfd06116L, 0x21b4f4b5L, 0x56b3c423L, 0xcfba9599L,
+ 0xb8bda50fL, 0x2802b89eL, 0x5f058808L, 0xc60cd9b2L, 0xb10be924L,
+ 0x2f6f7c87L, 0x58684c11L, 0xc1611dabL, 0xb6662d3dL, 0x76dc4190L,
+ 0x01db7106L, 0x98d220bcL, 0xefd5102aL, 0x71b18589L, 0x06b6b51fL,
+ 0x9fbfe4a5L, 0xe8b8d433L, 0x7807c9a2L, 0x0f00f934L, 0x9609a88eL,
+ 0xe10e9818L, 0x7f6a0dbbL, 0x086d3d2dL, 0x91646c97L, 0xe6635c01L,
+ 0x6b6b51f4L, 0x1c6c6162L, 0x856530d8L, 0xf262004eL, 0x6c0695edL,
+ 0x1b01a57bL, 0x8208f4c1L, 0xf50fc457L, 0x65b0d9c6L, 0x12b7e950L,
+ 0x8bbeb8eaL, 0xfcb9887cL, 0x62dd1ddfL, 0x15da2d49L, 0x8cd37cf3L,
+ 0xfbd44c65L, 0x4db26158L, 0x3ab551ceL, 0xa3bc0074L, 0xd4bb30e2L,
+ 0x4adfa541L, 0x3dd895d7L, 0xa4d1c46dL, 0xd3d6f4fbL, 0x4369e96aL,
+ 0x346ed9fcL, 0xad678846L, 0xda60b8d0L, 0x44042d73L, 0x33031de5L,
+ 0xaa0a4c5fL, 0xdd0d7cc9L, 0x5005713cL, 0x270241aaL, 0xbe0b1010L,
+ 0xc90c2086L, 0x5768b525L, 0x206f85b3L, 0xb966d409L, 0xce61e49fL,
+ 0x5edef90eL, 0x29d9c998L, 0xb0d09822L, 0xc7d7a8b4L, 0x59b33d17L,
+ 0x2eb40d81L, 0xb7bd5c3bL, 0xc0ba6cadL, 0xedb88320L, 0x9abfb3b6L,
+ 0x03b6e20cL, 0x74b1d29aL, 0xead54739L, 0x9dd277afL, 0x04db2615L,
+ 0x73dc1683L, 0xe3630b12L, 0x94643b84L, 0x0d6d6a3eL, 0x7a6a5aa8L,
+ 0xe40ecf0bL, 0x9309ff9dL, 0x0a00ae27L, 0x7d079eb1L, 0xf00f9344L,
+ 0x8708a3d2L, 0x1e01f268L, 0x6906c2feL, 0xf762575dL, 0x806567cbL,
+ 0x196c3671L, 0x6e6b06e7L, 0xfed41b76L, 0x89d32be0L, 0x10da7a5aL,
+ 0x67dd4accL, 0xf9b9df6fL, 0x8ebeeff9L, 0x17b7be43L, 0x60b08ed5L,
+ 0xd6d6a3e8L, 0xa1d1937eL, 0x38d8c2c4L, 0x4fdff252L, 0xd1bb67f1L,
+ 0xa6bc5767L, 0x3fb506ddL, 0x48b2364bL, 0xd80d2bdaL, 0xaf0a1b4cL,
+ 0x36034af6L, 0x41047a60L, 0xdf60efc3L, 0xa867df55L, 0x316e8eefL,
+ 0x4669be79L, 0xcb61b38cL, 0xbc66831aL, 0x256fd2a0L, 0x5268e236L,
+ 0xcc0c7795L, 0xbb0b4703L, 0x220216b9L, 0x5505262fL, 0xc5ba3bbeL,
+ 0xb2bd0b28L, 0x2bb45a92L, 0x5cb36a04L, 0xc2d7ffa7L, 0xb5d0cf31L,
+ 0x2cd99e8bL, 0x5bdeae1dL, 0x9b64c2b0L, 0xec63f226L, 0x756aa39cL,
+ 0x026d930aL, 0x9c0906a9L, 0xeb0e363fL, 0x72076785L, 0x05005713L,
+ 0x95bf4a82L, 0xe2b87a14L, 0x7bb12baeL, 0x0cb61b38L, 0x92d28e9bL,
+ 0xe5d5be0dL, 0x7cdcefb7L, 0x0bdbdf21L, 0x86d3d2d4L, 0xf1d4e242L,
+ 0x68ddb3f8L, 0x1fda836eL, 0x81be16cdL, 0xf6b9265bL, 0x6fb077e1L,
+ 0x18b74777L, 0x88085ae6L, 0xff0f6a70L, 0x66063bcaL, 0x11010b5cL,
+ 0x8f659effL, 0xf862ae69L, 0x616bffd3L, 0x166ccf45L, 0xa00ae278L,
+ 0xd70dd2eeL, 0x4e048354L, 0x3903b3c2L, 0xa7672661L, 0xd06016f7L,
+ 0x4969474dL, 0x3e6e77dbL, 0xaed16a4aL, 0xd9d65adcL, 0x40df0b66L,
+ 0x37d83bf0L, 0xa9bcae53L, 0xdebb9ec5L, 0x47b2cf7fL, 0x30b5ffe9L,
+ 0xbdbdf21cL, 0xcabac28aL, 0x53b39330L, 0x24b4a3a6L, 0xbad03605L,
+ 0xcdd70693L, 0x54de5729L, 0x23d967bfL, 0xb3667a2eL, 0xc4614ab8L,
+ 0x5d681b02L, 0x2a6f2b94L, 0xb40bbe37L, 0xc30c8ea1L, 0x5a05df1bL,
+ 0x2d02ef8dL
+};
+
+static int
+wep_encrypt(struct ieee80211_key *key, struct mbuf *m0, int hdrlen)
+{
+#define S_SWAP(a,b) do { uint8_t t = S[a]; S[a] = S[b]; S[b] = t; } while(0)
+ struct wep_ctx *ctx = key->wk_private;
+ struct ieee80211vap *vap = ctx->wc_vap;
+ struct mbuf *m = m0;
+ uint8_t rc4key[IEEE80211_WEP_IVLEN + IEEE80211_KEYBUF_SIZE];
+ uint8_t icv[IEEE80211_WEP_CRCLEN];
+ uint32_t i, j, k, crc;
+ size_t buflen, data_len;
+ uint8_t S[256];
+ uint8_t *pos;
+ u_int off, keylen;
+
+ vap->iv_stats.is_crypto_wep++;
+
+ /* NB: this assumes the header was pulled up */
+ memcpy(rc4key, mtod(m, uint8_t *) + hdrlen, IEEE80211_WEP_IVLEN);
+ memcpy(rc4key + IEEE80211_WEP_IVLEN, key->wk_key, key->wk_keylen);
+
+ /* Setup RC4 state */
+ for (i = 0; i < 256; i++)
+ S[i] = i;
+ j = 0;
+ keylen = key->wk_keylen + IEEE80211_WEP_IVLEN;
+ for (i = 0; i < 256; i++) {
+ j = (j + S[i] + rc4key[i % keylen]) & 0xff;
+ S_SWAP(i, j);
+ }
+
+ off = hdrlen + wep.ic_header;
+ data_len = m->m_pkthdr.len - off;
+
+ /* Compute CRC32 over unencrypted data and apply RC4 to data */
+ crc = ~0;
+ i = j = 0;
+ pos = mtod(m, uint8_t *) + off;
+ buflen = m->m_len - off;
+ for (;;) {
+ if (buflen > data_len)
+ buflen = data_len;
+ data_len -= buflen;
+ for (k = 0; k < buflen; k++) {
+ crc = crc32_table[(crc ^ *pos) & 0xff] ^ (crc >> 8);
+ i = (i + 1) & 0xff;
+ j = (j + S[i]) & 0xff;
+ S_SWAP(i, j);
+ *pos++ ^= S[(S[i] + S[j]) & 0xff];
+ }
+ if (m->m_next == NULL) {
+ if (data_len != 0) { /* out of data */
+ IEEE80211_NOTE_MAC(vap, IEEE80211_MSG_CRYPTO,
+ ether_sprintf(mtod(m0,
+ struct ieee80211_frame *)->i_addr2),
+ "out of data for WEP (data_len %zu)",
+ data_len);
+ /* XXX stat */
+ return 0;
+ }
+ break;
+ }
+ m = m->m_next;
+ pos = mtod(m, uint8_t *);
+ buflen = m->m_len;
+ }
+ crc = ~crc;
+
+ /* Append little-endian CRC32 and encrypt it to produce ICV */
+ icv[0] = crc;
+ icv[1] = crc >> 8;
+ icv[2] = crc >> 16;
+ icv[3] = crc >> 24;
+ for (k = 0; k < IEEE80211_WEP_CRCLEN; k++) {
+ i = (i + 1) & 0xff;
+ j = (j + S[i]) & 0xff;
+ S_SWAP(i, j);
+ icv[k] ^= S[(S[i] + S[j]) & 0xff];
+ }
+ return m_append(m0, IEEE80211_WEP_CRCLEN, icv);
+#undef S_SWAP
+}
+
+static int
+wep_decrypt(struct ieee80211_key *key, struct mbuf *m0, int hdrlen)
+{
+#define S_SWAP(a,b) do { uint8_t t = S[a]; S[a] = S[b]; S[b] = t; } while(0)
+ struct wep_ctx *ctx = key->wk_private;
+ struct ieee80211vap *vap = ctx->wc_vap;
+ struct mbuf *m = m0;
+ uint8_t rc4key[IEEE80211_WEP_IVLEN + IEEE80211_KEYBUF_SIZE];
+ uint8_t icv[IEEE80211_WEP_CRCLEN];
+ uint32_t i, j, k, crc;
+ size_t buflen, data_len;
+ uint8_t S[256];
+ uint8_t *pos;
+ u_int off, keylen;
+
+ vap->iv_stats.is_crypto_wep++;
+
+ /* NB: this assumes the header was pulled up */
+ memcpy(rc4key, mtod(m, uint8_t *) + hdrlen, IEEE80211_WEP_IVLEN);
+ memcpy(rc4key + IEEE80211_WEP_IVLEN, key->wk_key, key->wk_keylen);
+
+ /* Setup RC4 state */
+ for (i = 0; i < 256; i++)
+ S[i] = i;
+ j = 0;
+ keylen = key->wk_keylen + IEEE80211_WEP_IVLEN;
+ for (i = 0; i < 256; i++) {
+ j = (j + S[i] + rc4key[i % keylen]) & 0xff;
+ S_SWAP(i, j);
+ }
+
+ off = hdrlen + wep.ic_header;
+ data_len = m->m_pkthdr.len - (off + wep.ic_trailer),
+
+ /* Compute CRC32 over unencrypted data and apply RC4 to data */
+ crc = ~0;
+ i = j = 0;
+ pos = mtod(m, uint8_t *) + off;
+ buflen = m->m_len - off;
+ for (;;) {
+ if (buflen > data_len)
+ buflen = data_len;
+ data_len -= buflen;
+ for (k = 0; k < buflen; k++) {
+ i = (i + 1) & 0xff;
+ j = (j + S[i]) & 0xff;
+ S_SWAP(i, j);
+ *pos ^= S[(S[i] + S[j]) & 0xff];
+ crc = crc32_table[(crc ^ *pos) & 0xff] ^ (crc >> 8);
+ pos++;
+ }
+ m = m->m_next;
+ if (m == NULL) {
+ if (data_len != 0) { /* out of data */
+ IEEE80211_NOTE_MAC(vap, IEEE80211_MSG_CRYPTO,
+ mtod(m0, struct ieee80211_frame *)->i_addr2,
+ "out of data for WEP (data_len %zu)",
+ data_len);
+ return 0;
+ }
+ break;
+ }
+ pos = mtod(m, uint8_t *);
+ buflen = m->m_len;
+ }
+ crc = ~crc;
+
+ /* Encrypt little-endian CRC32 and verify that it matches with
+ * received ICV */
+ icv[0] = crc;
+ icv[1] = crc >> 8;
+ icv[2] = crc >> 16;
+ icv[3] = crc >> 24;
+ for (k = 0; k < IEEE80211_WEP_CRCLEN; k++) {
+ i = (i + 1) & 0xff;
+ j = (j + S[i]) & 0xff;
+ S_SWAP(i, j);
+ /* XXX assumes ICV is contiguous in mbuf */
+ if ((icv[k] ^ S[(S[i] + S[j]) & 0xff]) != *pos++) {
+ /* ICV mismatch - drop frame */
+ return 0;
+ }
+ }
+ return 1;
+#undef S_SWAP
+}
+
+/*
+ * Module glue.
+ */
+IEEE80211_CRYPTO_MODULE(wep, 1);
diff --git a/rtems/freebsd/net80211/ieee80211_ddb.c b/rtems/freebsd/net80211/ieee80211_ddb.c
new file mode 100644
index 00000000..6435a8cb
--- /dev/null
+++ b/rtems/freebsd/net80211/ieee80211_ddb.c
@@ -0,0 +1,881 @@
+#include <rtems/freebsd/machine/rtems-bsd-config.h>
+
+/*-
+ * Copyright (c) 2007-2009 Sam Leffler, Errno Consulting
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <rtems/freebsd/sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <rtems/freebsd/local/opt_ddb.h>
+#include <rtems/freebsd/local/opt_wlan.h>
+
+#ifdef DDB
+/*
+ * IEEE 802.11 DDB support
+ */
+#include <rtems/freebsd/sys/param.h>
+#include <rtems/freebsd/sys/systm.h>
+#include <rtems/freebsd/sys/kernel.h>
+#include <rtems/freebsd/sys/socket.h>
+
+#include <rtems/freebsd/net/if.h>
+#include <rtems/freebsd/net/if_dl.h>
+#include <rtems/freebsd/net/if_media.h>
+#include <rtems/freebsd/net/if_types.h>
+#include <rtems/freebsd/net/ethernet.h>
+#include <rtems/freebsd/net/vnet.h>
+
+#include <rtems/freebsd/net80211/ieee80211_var.h>
+#ifdef IEEE80211_SUPPORT_TDMA
+#include <rtems/freebsd/net80211/ieee80211_tdma.h>
+#endif
+#ifdef IEEE80211_SUPPORT_MESH
+#include <rtems/freebsd/net80211/ieee80211_mesh.h>
+#endif
+
+#include <rtems/freebsd/ddb/ddb.h>
+#include <rtems/freebsd/ddb/db_sym.h>
+
+#define DB_PRINTSYM(prefix, name, addr) do { \
+ db_printf("%s%-25s : ", prefix, name); \
+ db_printsym((db_addr_t) addr, DB_STGY_ANY); \
+ db_printf("\n"); \
+} while (0)
+
+static void _db_show_sta(const struct ieee80211_node *);
+static void _db_show_vap(const struct ieee80211vap *, int);
+static void _db_show_com(const struct ieee80211com *,
+ int showvaps, int showsta, int showprocs);
+
+static void _db_show_node_table(const char *tag,
+ const struct ieee80211_node_table *);
+static void _db_show_channel(const char *tag, const struct ieee80211_channel *);
+static void _db_show_ssid(const char *tag, int ix, int len, const uint8_t *);
+static void _db_show_appie(const char *tag, const struct ieee80211_appie *);
+static void _db_show_key(const char *tag, int ix, const struct ieee80211_key *);
+static void _db_show_roamparams(const char *tag, const void *arg,
+ const struct ieee80211_roamparam *rp);
+static void _db_show_txparams(const char *tag, const void *arg,
+ const struct ieee80211_txparam *tp);
+static void _db_show_ageq(const char *tag, const struct ieee80211_ageq *q);
+static void _db_show_stats(const struct ieee80211_stats *);
+#ifdef IEEE80211_SUPPORT_MESH
+static void _db_show_mesh(const struct ieee80211_mesh_state *);
+#endif
+
+DB_SHOW_COMMAND(sta, db_show_sta)
+{
+ if (!have_addr) {
+ db_printf("usage: show sta <addr>\n");
+ return;
+ }
+ _db_show_sta((const struct ieee80211_node *) addr);
+}
+
+DB_SHOW_COMMAND(statab, db_show_statab)
+{
+ if (!have_addr) {
+ db_printf("usage: show statab <addr>\n");
+ return;
+ }
+ _db_show_node_table("", (const struct ieee80211_node_table *) addr);
+}
+
+DB_SHOW_COMMAND(vap, db_show_vap)
+{
+ int i, showprocs = 0;
+
+ if (!have_addr) {
+ db_printf("usage: show vap <addr>\n");
+ return;
+ }
+ for (i = 0; modif[i] != '\0'; i++)
+ switch (modif[i]) {
+ case 'a':
+ showprocs = 1;
+ break;
+ case 'p':
+ showprocs = 1;
+ break;
+ }
+ _db_show_vap((const struct ieee80211vap *) addr, showprocs);
+}
+
+DB_SHOW_COMMAND(com, db_show_com)
+{
+ const struct ieee80211com *ic;
+ int i, showprocs = 0, showvaps = 0, showsta = 0;
+
+ if (!have_addr) {
+ db_printf("usage: show com <addr>\n");
+ return;
+ }
+ for (i = 0; modif[i] != '\0'; i++)
+ switch (modif[i]) {
+ case 'a':
+ showsta = showvaps = showprocs = 1;
+ break;
+ case 's':
+ showsta = 1;
+ break;
+ case 'v':
+ showvaps = 1;
+ break;
+ case 'p':
+ showprocs = 1;
+ break;
+ }
+
+ ic = (const struct ieee80211com *) addr;
+ _db_show_com(ic, showvaps, showsta, showprocs);
+}
+
+DB_SHOW_ALL_COMMAND(vaps, db_show_all_vaps)
+{
+ VNET_ITERATOR_DECL(vnet_iter);
+ const struct ifnet *ifp;
+ int i, showall = 0;
+
+ for (i = 0; modif[i] != '\0'; i++)
+ switch (modif[i]) {
+ case 'a':
+ showall = 1;
+ break;
+ }
+
+ VNET_FOREACH(vnet_iter) {
+ TAILQ_FOREACH(ifp, &V_ifnet, if_list)
+ if (ifp->if_type == IFT_IEEE80211) {
+ const struct ieee80211com *ic = ifp->if_l2com;
+
+ if (!showall) {
+ const struct ieee80211vap *vap;
+ db_printf("%s: com %p vaps:",
+ ifp->if_xname, ic);
+ TAILQ_FOREACH(vap, &ic->ic_vaps,
+ iv_next)
+ db_printf(" %s(%p)",
+ vap->iv_ifp->if_xname, vap);
+ db_printf("\n");
+ } else
+ _db_show_com(ic, 1, 1, 1);
+ }
+ }
+}
+
+#ifdef IEEE80211_SUPPORT_MESH
+DB_SHOW_ALL_COMMAND(mesh, db_show_mesh)
+{
+ const struct ieee80211_mesh_state *ms;
+
+ if (!have_addr) {
+ db_printf("usage: show mesh <addr>\n");
+ return;
+ }
+ ms = (const struct ieee80211_mesh_state *) addr;
+ _db_show_mesh(ms);
+}
+#endif /* IEEE80211_SUPPORT_MESH */
+
+static void
+_db_show_txampdu(const char *sep, int ix, const struct ieee80211_tx_ampdu *tap)
+{
+ db_printf("%stxampdu[%d]: %p flags %b %s\n",
+ sep, ix, tap, tap->txa_flags, IEEE80211_AGGR_BITS,
+ ieee80211_wme_acnames[tap->txa_ac]);
+ db_printf("%s token %u lastsample %d pkts %d avgpps %d qbytes %d qframes %d\n",
+ sep, tap->txa_token, tap->txa_lastsample, tap->txa_pkts,
+ tap->txa_avgpps, tap->txa_qbytes, tap->txa_qframes);
+ db_printf("%s start %u seqpending %u wnd %u attempts %d nextrequest %d\n",
+ sep, tap->txa_start, tap->txa_seqpending, tap->txa_wnd,
+ tap->txa_attempts, tap->txa_nextrequest);
+ /* XXX timer */
+}
+
+static void
+_db_show_rxampdu(const char *sep, int ix, const struct ieee80211_rx_ampdu *rap)
+{
+ int i;
+
+ db_printf("%srxampdu[%d]: %p flags 0x%x tid %u\n",
+ sep, ix, rap, rap->rxa_flags, ix /*XXX */);
+ db_printf("%s qbytes %d qframes %d seqstart %u start %u wnd %u\n",
+ sep, rap->rxa_qbytes, rap->rxa_qframes,
+ rap->rxa_seqstart, rap->rxa_start, rap->rxa_wnd);
+ db_printf("%s age %d nframes %d\n", sep,
+ rap->rxa_age, rap->rxa_nframes);
+ for (i = 0; i < IEEE80211_AGGR_BAWMAX; i++)
+ if (rap->rxa_m[i] != NULL)
+ db_printf("%s m[%2u:%4u] %p\n", sep, i,
+ IEEE80211_SEQ_ADD(rap->rxa_start, i),
+ rap->rxa_m[i]);
+}
+
+static void
+_db_show_sta(const struct ieee80211_node *ni)
+{
+ int i;
+
+ db_printf("0x%p: mac %s refcnt %d\n", ni,
+ ether_sprintf(ni->ni_macaddr), ieee80211_node_refcnt(ni));
+ db_printf("\tvap %p wdsvap %p ic %p table %p\n",
+ ni->ni_vap, ni->ni_wdsvap, ni->ni_ic, ni->ni_table);
+ db_printf("\tflags=%b\n", ni->ni_flags, IEEE80211_NODE_BITS);
+ db_printf("\tscangen %u authmode %u ath_flags 0x%x ath_defkeyix %u\n",
+ ni->ni_scangen, ni->ni_authmode,
+ ni->ni_ath_flags, ni->ni_ath_defkeyix);
+ db_printf("\tassocid 0x%x txpower %u vlan %u\n",
+ ni->ni_associd, ni->ni_txpower, ni->ni_vlan);
+ db_printf("\tjointime %d (%lu secs) challenge %p\n",
+ ni->ni_jointime, (unsigned long)(time_uptime - ni->ni_jointime),
+ ni->ni_challenge);
+ db_printf("\ties: data %p len %d\n", ni->ni_ies.data, ni->ni_ies.len);
+ db_printf("\t[wpa_ie %p rsn_ie %p wme_ie %p ath_ie %p\n",
+ ni->ni_ies.wpa_ie, ni->ni_ies.rsn_ie, ni->ni_ies.wme_ie,
+ ni->ni_ies.ath_ie);
+ db_printf("\t htcap_ie %p htinfo_ie %p]\n",
+ ni->ni_ies.htcap_ie, ni->ni_ies.htinfo_ie);
+ if (ni->ni_flags & IEEE80211_NODE_QOS) {
+ for (i = 0; i < WME_NUM_TID; i++) {
+ if (ni->ni_txseqs[i] || ni->ni_rxseqs[i])
+ db_printf("\t[%u] txseq %u rxseq %u fragno %u\n",
+ i, ni->ni_txseqs[i],
+ ni->ni_rxseqs[i] >> IEEE80211_SEQ_SEQ_SHIFT,
+ ni->ni_rxseqs[i] & IEEE80211_SEQ_FRAG_MASK);
+ }
+ }
+ db_printf("\ttxseq %u rxseq %u fragno %u rxfragstamp %u\n",
+ ni->ni_txseqs[IEEE80211_NONQOS_TID],
+ ni->ni_rxseqs[IEEE80211_NONQOS_TID] >> IEEE80211_SEQ_SEQ_SHIFT,
+ ni->ni_rxseqs[IEEE80211_NONQOS_TID] & IEEE80211_SEQ_FRAG_MASK,
+ ni->ni_rxfragstamp);
+ db_printf("\trxfrag[0] %p rxfrag[1] %p rxfrag[2] %p\n",
+ ni->ni_rxfrag[0], ni->ni_rxfrag[1], ni->ni_rxfrag[2]);
+ _db_show_key("\tucastkey", 0, &ni->ni_ucastkey);
+ db_printf("\tavgrssi 0x%x (rssi %d) noise %d\n",
+ ni->ni_avgrssi, IEEE80211_RSSI_GET(ni->ni_avgrssi),
+ ni->ni_noise);
+ db_printf("\tintval %u capinfo %b\n",
+ ni->ni_intval, ni->ni_capinfo, IEEE80211_CAPINFO_BITS);
+ db_printf("\tbssid %s", ether_sprintf(ni->ni_bssid));
+ _db_show_ssid(" essid ", 0, ni->ni_esslen, ni->ni_essid);
+ db_printf("\n");
+ _db_show_channel("\tchannel", ni->ni_chan);
+ db_printf("\n");
+ db_printf("\terp %b dtim_period %u dtim_count %u\n",
+ ni->ni_erp, IEEE80211_ERP_BITS,
+ ni->ni_dtim_period, ni->ni_dtim_count);
+
+ db_printf("\thtcap %b htparam 0x%x htctlchan %u ht2ndchan %u\n",
+ ni->ni_htcap, IEEE80211_HTCAP_BITS,
+ ni->ni_htparam, ni->ni_htctlchan, ni->ni_ht2ndchan);
+ db_printf("\thtopmode 0x%x htstbc 0x%x chw %u\n",
+ ni->ni_htopmode, ni->ni_htstbc, ni->ni_chw);
+
+ /* XXX ampdu state */
+ for (i = 0; i < WME_NUM_AC; i++)
+ if (ni->ni_tx_ampdu[i].txa_flags & IEEE80211_AGGR_SETUP)
+ _db_show_txampdu("\t", i, &ni->ni_tx_ampdu[i]);
+ for (i = 0; i < WME_NUM_TID; i++)
+ if (ni->ni_rx_ampdu[i].rxa_flags)
+ _db_show_rxampdu("\t", i, &ni->ni_rx_ampdu[i]);
+
+ db_printf("\tinact %u inact_reload %u txrate %u\n",
+ ni->ni_inact, ni->ni_inact_reload, ni->ni_txrate);
+#ifdef IEEE80211_SUPPORT_MESH
+ _db_show_ssid("\tmeshid ", 0, ni->ni_meshidlen, ni->ni_meshid);
+ db_printf(" mlstate %b mllid 0x%x mlpid 0x%x mlrcnt %u mltval %u\n",
+ ni->ni_mlstate, IEEE80211_MESH_MLSTATE_BITS,
+ ni->ni_mllid, ni->ni_mlpid, ni->ni_mlrcnt, ni->ni_mltval);
+#endif
+}
+
+#ifdef IEEE80211_SUPPORT_TDMA
+static void
+_db_show_tdma(const char *sep, const struct ieee80211_tdma_state *ts, int showprocs)
+{
+ db_printf("%stdma %p:\n", sep, ts);
+ db_printf("%s version %u slot %u bintval %u peer %p\n", sep,
+ ts->tdma_version, ts->tdma_slot, ts->tdma_bintval, ts->tdma_peer);
+ db_printf("%s slotlen %u slotcnt %u", sep,
+ ts->tdma_slotlen, ts->tdma_slotcnt);
+ db_printf(" inuse 0x%x active 0x%x count %d\n",
+ ts->tdma_inuse[0], ts->tdma_active[0], ts->tdma_count);
+ if (showprocs) {
+ DB_PRINTSYM(sep, " tdma_newstate", ts->tdma_newstate);
+ DB_PRINTSYM(sep, " tdma_recv_mgmt", ts->tdma_recv_mgmt);
+ DB_PRINTSYM(sep, " tdma_opdetach", ts->tdma_opdetach);
+ }
+}
+#endif /* IEEE80211_SUPPORT_TDMA */
+
+static void
+_db_show_vap(const struct ieee80211vap *vap, int showprocs)
+{
+ const struct ieee80211com *ic = vap->iv_ic;
+ int i;
+
+ db_printf("%p:", vap);
+ db_printf(" bss %p", vap->iv_bss);
+ db_printf(" myaddr %s", ether_sprintf(vap->iv_myaddr));
+ db_printf("\n");
+
+ db_printf("\topmode %s", ieee80211_opmode_name[vap->iv_opmode]);
+ db_printf(" state %s", ieee80211_state_name[vap->iv_state]);
+ db_printf(" ifp %p(%s)", vap->iv_ifp, vap->iv_ifp->if_xname);
+ db_printf("\n");
+
+ db_printf("\tic %p", vap->iv_ic);
+ db_printf(" media %p", &vap->iv_media);
+ db_printf(" bpf_if %p", vap->iv_rawbpf);
+ db_printf(" mgtsend %p", &vap->iv_mgtsend);
+#if 0
+ struct sysctllog *iv_sysctl; /* dynamic sysctl context */
+#endif
+ db_printf("\n");
+ db_printf("\tdebug=%b\n", vap->iv_debug, IEEE80211_MSG_BITS);
+
+ db_printf("\tflags=%b\n", vap->iv_flags, IEEE80211_F_BITS);
+ db_printf("\tflags_ext=%b\n", vap->iv_flags_ext, IEEE80211_FEXT_BITS);
+ db_printf("\tflags_ht=%b\n", vap->iv_flags_ht, IEEE80211_FHT_BITS);
+ db_printf("\tflags_ven=%b\n", vap->iv_flags_ven, IEEE80211_FVEN_BITS);
+ db_printf("\tcaps=%b\n", vap->iv_caps, IEEE80211_C_BITS);
+ db_printf("\thtcaps=%b\n", vap->iv_htcaps, IEEE80211_C_HTCAP_BITS);
+
+ _db_show_stats(&vap->iv_stats);
+
+ db_printf("\tinact_init %d", vap->iv_inact_init);
+ db_printf(" inact_auth %d", vap->iv_inact_auth);
+ db_printf(" inact_run %d", vap->iv_inact_run);
+ db_printf(" inact_probe %d", vap->iv_inact_probe);
+ db_printf("\n");
+
+ db_printf("\tdes_nssid %d", vap->iv_des_nssid);
+ if (vap->iv_des_nssid)
+ _db_show_ssid(" des_ssid[%u] ", 0,
+ vap->iv_des_ssid[0].len, vap->iv_des_ssid[0].ssid);
+ db_printf(" des_bssid %s", ether_sprintf(vap->iv_des_bssid));
+ db_printf("\n");
+ db_printf("\tdes_mode %d", vap->iv_des_mode);
+ _db_show_channel(" des_chan", vap->iv_des_chan);
+ db_printf("\n");
+#if 0
+ int iv_nicknamelen; /* XXX junk */
+ uint8_t iv_nickname[IEEE80211_NWID_LEN];
+#endif
+ db_printf("\tbgscanidle %u", vap->iv_bgscanidle);
+ db_printf(" bgscanintvl %u", vap->iv_bgscanintvl);
+ db_printf(" scanvalid %u", vap->iv_scanvalid);
+ db_printf("\n");
+ db_printf("\tscanreq_duration %u", vap->iv_scanreq_duration);
+ db_printf(" scanreq_mindwell %u", vap->iv_scanreq_mindwell);
+ db_printf(" scanreq_maxdwell %u", vap->iv_scanreq_maxdwell);
+ db_printf("\n");
+ db_printf("\tscanreq_flags 0x%x", vap->iv_scanreq_flags);
+ db_printf(" scanreq_nssid %d", vap->iv_scanreq_nssid);
+ for (i = 0; i < vap->iv_scanreq_nssid; i++)
+ _db_show_ssid(" scanreq_ssid[%u]", i,
+ vap->iv_scanreq_ssid[i].len, vap->iv_scanreq_ssid[i].ssid);
+ db_printf(" roaming %d", vap->iv_roaming);
+ db_printf("\n");
+ for (i = IEEE80211_MODE_11A; i < IEEE80211_MODE_MAX; i++)
+ if (isset(ic->ic_modecaps, i)) {
+ _db_show_roamparams("\troamparms[%s]",
+ ieee80211_phymode_name[i], &vap->iv_roamparms[i]);
+ db_printf("\n");
+ }
+
+ db_printf("\tbmissthreshold %u", vap->iv_bmissthreshold);
+ db_printf(" bmiss_max %u", vap->iv_bmiss_count);
+ db_printf(" bmiss_max %d", vap->iv_bmiss_max);
+ db_printf("\n");
+ db_printf("\tswbmiss_count %u", vap->iv_swbmiss_count);
+ db_printf(" swbmiss_period %u", vap->iv_swbmiss_period);
+ db_printf(" swbmiss %p", &vap->iv_swbmiss);
+ db_printf("\n");
+
+ db_printf("\tampdu_rxmax %d", vap->iv_ampdu_rxmax);
+ db_printf(" ampdu_density %d", vap->iv_ampdu_density);
+ db_printf(" ampdu_limit %d", vap->iv_ampdu_limit);
+ db_printf(" amsdu_limit %d", vap->iv_amsdu_limit);
+ db_printf("\n");
+
+ db_printf("\tmax_aid %u", vap->iv_max_aid);
+ db_printf(" aid_bitmap %p", vap->iv_aid_bitmap);
+ db_printf("\n");
+ db_printf("\tsta_assoc %u", vap->iv_sta_assoc);
+ db_printf(" ps_sta %u", vap->iv_ps_sta);
+ db_printf(" ps_pending %u", vap->iv_ps_pending);
+ db_printf(" tim_len %u", vap->iv_tim_len);
+ db_printf(" tim_bitmap %p", vap->iv_tim_bitmap);
+ db_printf("\n");
+ db_printf("\tdtim_period %u", vap->iv_dtim_period);
+ db_printf(" dtim_count %u", vap->iv_dtim_count);
+ db_printf(" set_tim %p", vap->iv_set_tim);
+ db_printf(" csa_count %d", vap->iv_csa_count);
+ db_printf("\n");
+
+ db_printf("\trtsthreshold %u", vap->iv_rtsthreshold);
+ db_printf(" fragthreshold %u", vap->iv_fragthreshold);
+ db_printf(" inact_timer %d", vap->iv_inact_timer);
+ db_printf("\n");
+ for (i = IEEE80211_MODE_11A; i < IEEE80211_MODE_MAX; i++)
+ if (isset(ic->ic_modecaps, i)) {
+ _db_show_txparams("\ttxparms[%s]",
+ ieee80211_phymode_name[i], &vap->iv_txparms[i]);
+ db_printf("\n");
+ }
+
+ /* application-specified IE's to attach to mgt frames */
+ _db_show_appie("\tappie_beacon", vap->iv_appie_beacon);
+ _db_show_appie("\tappie_probereq", vap->iv_appie_probereq);
+ _db_show_appie("\tappie_proberesp", vap->iv_appie_proberesp);
+ _db_show_appie("\tappie_assocreq", vap->iv_appie_assocreq);
+ _db_show_appie("\tappie_asscoresp", vap->iv_appie_assocresp);
+ _db_show_appie("\tappie_wpa", vap->iv_appie_wpa);
+ if (vap->iv_wpa_ie != NULL || vap->iv_rsn_ie != NULL) {
+ if (vap->iv_wpa_ie != NULL)
+ db_printf("\twpa_ie %p", vap->iv_wpa_ie);
+ if (vap->iv_rsn_ie != NULL)
+ db_printf("\trsn_ie %p", vap->iv_rsn_ie);
+ db_printf("\n");
+ }
+ db_printf("\tmax_keyix %u", vap->iv_max_keyix);
+ db_printf(" def_txkey %d", vap->iv_def_txkey);
+ db_printf("\n");
+ for (i = 0; i < IEEE80211_WEP_NKID; i++)
+ _db_show_key("\tnw_keys[%u]", i, &vap->iv_nw_keys[i]);
+
+ db_printf("\tauth %p(%s)", vap->iv_auth, vap->iv_auth->ia_name);
+ db_printf(" ec %p", vap->iv_ec);
+
+ db_printf(" acl %p", vap->iv_acl);
+ db_printf(" as %p", vap->iv_as);
+ db_printf("\n");
+#ifdef IEEE80211_SUPPORT_TDMA
+ if (vap->iv_tdma != NULL)
+ _db_show_tdma("\t", vap->iv_tdma, showprocs);
+#endif /* IEEE80211_SUPPORT_TDMA */
+ if (showprocs) {
+ DB_PRINTSYM("\t", "iv_key_alloc", vap->iv_key_alloc);
+ DB_PRINTSYM("\t", "iv_key_delete", vap->iv_key_delete);
+ DB_PRINTSYM("\t", "iv_key_set", vap->iv_key_set);
+ DB_PRINTSYM("\t", "iv_key_update_begin", vap->iv_key_update_begin);
+ DB_PRINTSYM("\t", "iv_key_update_end", vap->iv_key_update_end);
+ DB_PRINTSYM("\t", "iv_opdetach", vap->iv_opdetach);
+ DB_PRINTSYM("\t", "iv_input", vap->iv_input);
+ DB_PRINTSYM("\t", "iv_recv_mgmt", vap->iv_recv_mgmt);
+ DB_PRINTSYM("\t", "iv_deliver_data", vap->iv_deliver_data);
+ DB_PRINTSYM("\t", "iv_bmiss", vap->iv_bmiss);
+ DB_PRINTSYM("\t", "iv_reset", vap->iv_reset);
+ DB_PRINTSYM("\t", "iv_update_beacon", vap->iv_update_beacon);
+ DB_PRINTSYM("\t", "iv_newstate", vap->iv_newstate);
+ DB_PRINTSYM("\t", "iv_output", vap->iv_output);
+ }
+}
+
+static void
+_db_show_com(const struct ieee80211com *ic, int showvaps, int showsta, int showprocs)
+{
+ struct ieee80211vap *vap;
+
+ db_printf("%p:", ic);
+ TAILQ_FOREACH(vap, &ic->ic_vaps, iv_next)
+ db_printf(" %s(%p)", vap->iv_ifp->if_xname, vap);
+ db_printf("\n");
+ db_printf("\tifp %p(%s)", ic->ic_ifp, ic->ic_ifp->if_xname);
+ db_printf(" comlock %p", &ic->ic_comlock);
+ db_printf("\n");
+ db_printf("\theadroom %d", ic->ic_headroom);
+ db_printf(" phytype %d", ic->ic_phytype);
+ db_printf(" opmode %s", ieee80211_opmode_name[ic->ic_opmode]);
+ db_printf("\n");
+ db_printf("\tmedia %p", &ic->ic_media);
+ db_printf(" inact %p", &ic->ic_inact);
+ db_printf("\n");
+
+ db_printf("\tflags=%b\n", ic->ic_flags, IEEE80211_F_BITS);
+ db_printf("\tflags_ext=%b\n", ic->ic_flags_ext, IEEE80211_FEXT_BITS);
+ db_printf("\tflags_ht=%b\n", ic->ic_flags_ht, IEEE80211_FHT_BITS);
+ db_printf("\tflags_ven=%b\n", ic->ic_flags_ven, IEEE80211_FVEN_BITS);
+ db_printf("\tcaps=%b\n", ic->ic_caps, IEEE80211_C_BITS);
+ db_printf("\tcryptocaps=%b\n",
+ ic->ic_cryptocaps, IEEE80211_CRYPTO_BITS);
+ db_printf("\thtcaps=%b\n", ic->ic_htcaps, IEEE80211_HTCAP_BITS);
+
+#if 0
+ uint8_t ic_modecaps[2]; /* set of mode capabilities */
+#endif
+ db_printf("\tcurmode %u", ic->ic_curmode);
+ db_printf(" promisc %u", ic->ic_promisc);
+ db_printf(" allmulti %u", ic->ic_allmulti);
+ db_printf(" nrunning %u", ic->ic_nrunning);
+ db_printf("\n");
+ db_printf("\tbintval %u", ic->ic_bintval);
+ db_printf(" lintval %u", ic->ic_lintval);
+ db_printf(" holdover %u", ic->ic_holdover);
+ db_printf(" txpowlimit %u", ic->ic_txpowlimit);
+ db_printf("\n");
+#if 0
+ struct ieee80211_rateset ic_sup_rates[IEEE80211_MODE_MAX];
+#endif
+ /*
+ * Channel state:
+ *
+ * ic_channels is the set of available channels for the device;
+ * it is setup by the driver
+ * ic_nchans is the number of valid entries in ic_channels
+ * ic_chan_avail is a bit vector of these channels used to check
+ * whether a channel is available w/o searching the channel table.
+ * ic_chan_active is a (potentially) constrained subset of
+ * ic_chan_avail that reflects any mode setting or user-specified
+ * limit on the set of channels to use/scan
+ * ic_curchan is the current channel the device is set to; it may
+ * be different from ic_bsschan when we are off-channel scanning
+ * or otherwise doing background work
+ * ic_bsschan is the channel selected for operation; it may
+ * be undefined (IEEE80211_CHAN_ANYC)
+ * ic_prevchan is a cached ``previous channel'' used to optimize
+ * lookups when switching back+forth between two channels
+ * (e.g. for dynamic turbo)
+ */
+ db_printf("\tnchans %d", ic->ic_nchans);
+#if 0
+ struct ieee80211_channel ic_channels[IEEE80211_CHAN_MAX];
+ uint8_t ic_chan_avail[IEEE80211_CHAN_BYTES];
+ uint8_t ic_chan_active[IEEE80211_CHAN_BYTES];
+ uint8_t ic_chan_scan[IEEE80211_CHAN_BYTES];
+#endif
+ db_printf("\n");
+ _db_show_channel("\tcurchan", ic->ic_curchan);
+ db_printf("\n");
+ _db_show_channel("\tbsschan", ic->ic_bsschan);
+ db_printf("\n");
+ _db_show_channel("\tprevchan", ic->ic_prevchan);
+ db_printf("\n");
+ db_printf("\tregdomain %p", &ic->ic_regdomain);
+ db_printf("\n");
+
+ _db_show_channel("\tcsa_newchan", ic->ic_csa_newchan);
+ db_printf(" csa_count %d", ic->ic_csa_count);
+ db_printf( "dfs %p", &ic->ic_dfs);
+ db_printf("\n");
+
+ db_printf("\tscan %p", ic->ic_scan);
+ db_printf(" lastdata %d", ic->ic_lastdata);
+ db_printf(" lastscan %d", ic->ic_lastscan);
+ db_printf("\n");
+
+ db_printf("\tmax_keyix %d", ic->ic_max_keyix);
+ db_printf(" hash_key 0x%x", ic->ic_hash_key);
+ db_printf(" wme %p", &ic->ic_wme);
+ if (!showsta)
+ db_printf(" sta %p", &ic->ic_sta);
+ db_printf("\n");
+ db_printf("\tstageq@%p:\n", &ic->ic_stageq);
+ _db_show_ageq("\t", &ic->ic_stageq);
+ if (showsta)
+ _db_show_node_table("\t", &ic->ic_sta);
+
+ db_printf("\tprotmode %d", ic->ic_protmode);
+ db_printf(" nonerpsta %u", ic->ic_nonerpsta);
+ db_printf(" longslotsta %u", ic->ic_longslotsta);
+ db_printf(" lastnonerp %d", ic->ic_lastnonerp);
+ db_printf("\n");
+ db_printf("\tsta_assoc %u", ic->ic_sta_assoc);
+ db_printf(" ht_sta_assoc %u", ic->ic_ht_sta_assoc);
+ db_printf(" ht40_sta_assoc %u", ic->ic_ht40_sta_assoc);
+ db_printf("\n");
+ db_printf("\tcurhtprotmode 0x%x", ic->ic_curhtprotmode);
+ db_printf(" htprotmode %d", ic->ic_htprotmode);
+ db_printf(" lastnonht %d", ic->ic_lastnonht);
+ db_printf("\n");
+
+ db_printf("\tsuperg %p\n", ic->ic_superg);
+
+ db_printf("\tmontaps %d th %p txchan %p rh %p rxchan %p\n",
+ ic->ic_montaps, ic->ic_th, ic->ic_txchan, ic->ic_rh, ic->ic_rxchan);
+
+ if (showprocs) {
+ DB_PRINTSYM("\t", "ic_vap_create", ic->ic_vap_create);
+ DB_PRINTSYM("\t", "ic_vap_delete", ic->ic_vap_delete);
+#if 0
+ /* operating mode attachment */
+ ieee80211vap_attach ic_vattach[IEEE80211_OPMODE_MAX];
+#endif
+ DB_PRINTSYM("\t", "ic_newassoc", ic->ic_newassoc);
+ DB_PRINTSYM("\t", "ic_getradiocaps", ic->ic_getradiocaps);
+ DB_PRINTSYM("\t", "ic_setregdomain", ic->ic_setregdomain);
+ DB_PRINTSYM("\t", "ic_send_mgmt", ic->ic_send_mgmt);
+ DB_PRINTSYM("\t", "ic_raw_xmit", ic->ic_raw_xmit);
+ DB_PRINTSYM("\t", "ic_updateslot", ic->ic_updateslot);
+ DB_PRINTSYM("\t", "ic_update_mcast", ic->ic_update_mcast);
+ DB_PRINTSYM("\t", "ic_update_promisc", ic->ic_update_promisc);
+ DB_PRINTSYM("\t", "ic_node_alloc", ic->ic_node_alloc);
+ DB_PRINTSYM("\t", "ic_node_free", ic->ic_node_free);
+ DB_PRINTSYM("\t", "ic_node_cleanup", ic->ic_node_cleanup);
+ DB_PRINTSYM("\t", "ic_node_getrssi", ic->ic_node_getrssi);
+ DB_PRINTSYM("\t", "ic_node_getsignal", ic->ic_node_getsignal);
+ DB_PRINTSYM("\t", "ic_node_getmimoinfo", ic->ic_node_getmimoinfo);
+ DB_PRINTSYM("\t", "ic_scan_start", ic->ic_scan_start);
+ DB_PRINTSYM("\t", "ic_scan_end", ic->ic_scan_end);
+ DB_PRINTSYM("\t", "ic_set_channel", ic->ic_set_channel);
+ DB_PRINTSYM("\t", "ic_scan_curchan", ic->ic_scan_curchan);
+ DB_PRINTSYM("\t", "ic_scan_mindwell", ic->ic_scan_mindwell);
+ DB_PRINTSYM("\t", "ic_recv_action", ic->ic_recv_action);
+ DB_PRINTSYM("\t", "ic_send_action", ic->ic_send_action);
+ DB_PRINTSYM("\t", "ic_addba_request", ic->ic_addba_request);
+ DB_PRINTSYM("\t", "ic_addba_response", ic->ic_addba_response);
+ DB_PRINTSYM("\t", "ic_addba_stop", ic->ic_addba_stop);
+ }
+ if (showvaps && !TAILQ_EMPTY(&ic->ic_vaps)) {
+ db_printf("\n");
+ TAILQ_FOREACH(vap, &ic->ic_vaps, iv_next)
+ _db_show_vap(vap, showprocs);
+ }
+ if (showsta && !TAILQ_EMPTY(&ic->ic_sta.nt_node)) {
+ const struct ieee80211_node_table *nt = &ic->ic_sta;
+ const struct ieee80211_node *ni;
+
+ TAILQ_FOREACH(ni, &nt->nt_node, ni_list) {
+ db_printf("\n");
+ _db_show_sta(ni);
+ }
+ }
+}
+
+static void
+_db_show_node_table(const char *tag, const struct ieee80211_node_table *nt)
+{
+ int i;
+
+ db_printf("%s%s@%p:\n", tag, nt->nt_name, nt);
+ db_printf("%s nodelock %p", tag, &nt->nt_nodelock);
+ db_printf(" inact_init %d", nt->nt_inact_init);
+ db_printf(" scanlock %p", &nt->nt_scanlock);
+ db_printf(" scangen %u\n", nt->nt_scangen);
+ db_printf("%s keyixmax %d keyixmap %p\n",
+ tag, nt->nt_keyixmax, nt->nt_keyixmap);
+ for (i = 0; i < nt->nt_keyixmax; i++) {
+ const struct ieee80211_node *ni = nt->nt_keyixmap[i];
+ if (ni != NULL)
+ db_printf("%s [%3u] %p %s\n", tag, i, ni,
+ ether_sprintf(ni->ni_macaddr));
+ }
+}
+
+static void
+_db_show_channel(const char *tag, const struct ieee80211_channel *c)
+{
+ db_printf("%s ", tag);
+ if (c == NULL)
+ db_printf("<NULL>");
+ else if (c == IEEE80211_CHAN_ANYC)
+ db_printf("<ANY>");
+ else
+ db_printf("[%u (%u) flags=%b maxreg %d maxpow %d minpow %d state 0x%x extieee %u]",
+ c->ic_freq, c->ic_ieee,
+ c->ic_flags, IEEE80211_CHAN_BITS,
+ c->ic_maxregpower, c->ic_maxpower, c->ic_minpower,
+ c->ic_state, c->ic_extieee);
+}
+
+static void
+_db_show_ssid(const char *tag, int ix, int len, const uint8_t *ssid)
+{
+ const uint8_t *p;
+ int i;
+
+ db_printf(tag, ix);
+
+ if (len > IEEE80211_NWID_LEN)
+ len = IEEE80211_NWID_LEN;
+ /* determine printable or not */
+ for (i = 0, p = ssid; i < len; i++, p++) {
+ if (*p < ' ' || *p > 0x7e)
+ break;
+ }
+ if (i == len) {
+ db_printf("\"");
+ for (i = 0, p = ssid; i < len; i++, p++)
+ db_printf("%c", *p);
+ db_printf("\"");
+ } else {
+ db_printf("0x");
+ for (i = 0, p = ssid; i < len; i++, p++)
+ db_printf("%02x", *p);
+ }
+}
+
+static void
+_db_show_appie(const char *tag, const struct ieee80211_appie *ie)
+{
+ const uint8_t *p;
+ int i;
+
+ if (ie == NULL)
+ return;
+ db_printf("%s [0x", tag);
+ for (i = 0, p = ie->ie_data; i < ie->ie_len; i++, p++)
+ db_printf("%02x", *p);
+ db_printf("]\n");
+}
+
+static void
+_db_show_key(const char *tag, int ix, const struct ieee80211_key *wk)
+{
+ static const uint8_t zerodata[IEEE80211_KEYBUF_SIZE];
+ const struct ieee80211_cipher *cip = wk->wk_cipher;
+ int keylen = wk->wk_keylen;
+
+ db_printf(tag, ix);
+ switch (cip->ic_cipher) {
+ case IEEE80211_CIPHER_WEP:
+ /* compatibility */
+ db_printf(" wepkey %u:%s", wk->wk_keyix,
+ keylen <= 5 ? "40-bit" :
+ keylen <= 13 ? "104-bit" : "128-bit");
+ break;
+ case IEEE80211_CIPHER_TKIP:
+ if (keylen > 128/8)
+ keylen -= 128/8; /* ignore MIC for now */
+ db_printf(" TKIP %u:%u-bit", wk->wk_keyix, 8*keylen);
+ break;
+ case IEEE80211_CIPHER_AES_OCB:
+ db_printf(" AES-OCB %u:%u-bit", wk->wk_keyix, 8*keylen);
+ break;
+ case IEEE80211_CIPHER_AES_CCM:
+ db_printf(" AES-CCM %u:%u-bit", wk->wk_keyix, 8*keylen);
+ break;
+ case IEEE80211_CIPHER_CKIP:
+ db_printf(" CKIP %u:%u-bit", wk->wk_keyix, 8*keylen);
+ break;
+ case IEEE80211_CIPHER_NONE:
+ db_printf(" NULL %u:%u-bit", wk->wk_keyix, 8*keylen);
+ break;
+ default:
+ db_printf(" UNKNOWN (0x%x) %u:%u-bit",
+ cip->ic_cipher, wk->wk_keyix, 8*keylen);
+ break;
+ }
+ if (wk->wk_rxkeyix != wk->wk_keyix)
+ db_printf(" rxkeyix %u", wk->wk_rxkeyix);
+ if (memcmp(wk->wk_key, zerodata, keylen) != 0) {
+ int i;
+
+ db_printf(" <");
+ for (i = 0; i < keylen; i++)
+ db_printf("%02x", wk->wk_key[i]);
+ db_printf(">");
+ if (cip->ic_cipher != IEEE80211_CIPHER_WEP &&
+ wk->wk_keyrsc[IEEE80211_NONQOS_TID] != 0)
+ db_printf(" rsc %ju", (uintmax_t)wk->wk_keyrsc[IEEE80211_NONQOS_TID]);
+ if (cip->ic_cipher != IEEE80211_CIPHER_WEP &&
+ wk->wk_keytsc != 0)
+ db_printf(" tsc %ju", (uintmax_t)wk->wk_keytsc);
+ db_printf(" flags=%b", wk->wk_flags, IEEE80211_KEY_BITS);
+ }
+ db_printf("\n");
+}
+
+static void
+printrate(const char *tag, int v)
+{
+ if (v == IEEE80211_FIXED_RATE_NONE)
+ db_printf(" %s <none>", tag);
+ else if (v == 11)
+ db_printf(" %s 5.5", tag);
+ else if (v & IEEE80211_RATE_MCS)
+ db_printf(" %s MCS%d", tag, v &~ IEEE80211_RATE_MCS);
+ else
+ db_printf(" %s %d", tag, v/2);
+}
+
+static void
+_db_show_roamparams(const char *tag, const void *arg,
+ const struct ieee80211_roamparam *rp)
+{
+
+ db_printf(tag, arg);
+ if (rp->rssi & 1)
+ db_printf(" rssi %u.5", rp->rssi/2);
+ else
+ db_printf(" rssi %u", rp->rssi/2);
+ printrate("rate", rp->rate);
+}
+
+static void
+_db_show_txparams(const char *tag, const void *arg,
+ const struct ieee80211_txparam *tp)
+{
+
+ db_printf(tag, arg);
+ printrate("ucastrate", tp->ucastrate);
+ printrate("mcastrate", tp->mcastrate);
+ printrate("mgmtrate", tp->mgmtrate);
+ db_printf(" maxretry %d", tp->maxretry);
+}
+
+static void
+_db_show_ageq(const char *tag, const struct ieee80211_ageq *q)
+{
+ const struct mbuf *m;
+
+ db_printf("%s lock %p len %d maxlen %d drops %d head %p tail %p\n",
+ tag, &q->aq_lock, q->aq_len, q->aq_maxlen, q->aq_drops,
+ q->aq_head, q->aq_tail);
+ for (m = q->aq_head; m != NULL; m = m->m_nextpkt)
+ db_printf("%s %p (len %d, %b)\n", tag, m, m->m_len,
+ /* XXX could be either TX or RX but is mostly TX */
+ m->m_flags, IEEE80211_MBUF_TX_FLAG_BITS);
+}
+
+static void
+_db_show_stats(const struct ieee80211_stats *is)
+{
+}
+
+#ifdef IEEE80211_SUPPORT_MESH
+static void
+_db_show_mesh(const struct ieee80211_mesh_state *ms)
+{
+ struct ieee80211_mesh_route *rt;
+ int i;
+
+ _db_show_ssid(" meshid ", 0, ms->ms_idlen, ms->ms_id);
+ db_printf("nextseq %u ttl %u flags 0x%x\n", ms->ms_seq,
+ ms->ms_ttl, ms->ms_flags);
+ db_printf("routing table:\n");
+ i = 0;
+ TAILQ_FOREACH(rt, &ms->ms_routes, rt_next) {
+ db_printf("entry %d:\tdest: %6D nexthop: %6D metric: %u", i,
+ rt->rt_dest, ":", rt->rt_nexthop, ":", rt->rt_metric);
+ db_printf("\tlifetime: %u lastseq: %u priv: %p\n",
+ rt->rt_lifetime, rt->rt_lastmseq, rt->rt_priv);
+ i++;
+ }
+}
+#endif /* IEEE80211_SUPPORT_MESH */
+#endif /* DDB */
diff --git a/rtems/freebsd/net80211/ieee80211_dfs.c b/rtems/freebsd/net80211/ieee80211_dfs.c
new file mode 100644
index 00000000..90f7b51f
--- /dev/null
+++ b/rtems/freebsd/net80211/ieee80211_dfs.c
@@ -0,0 +1,379 @@
+#include <rtems/freebsd/machine/rtems-bsd-config.h>
+
+/*-
+ * Copyright (c) 2007-2008 Sam Leffler, Errno Consulting
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <rtems/freebsd/sys/cdefs.h>
+#ifdef __FreeBSD__
+__FBSDID("$FreeBSD$");
+#endif
+
+/*
+ * IEEE 802.11 DFS/Radar support.
+ */
+#include <rtems/freebsd/local/opt_inet.h>
+#include <rtems/freebsd/local/opt_wlan.h>
+
+#include <rtems/freebsd/sys/param.h>
+#include <rtems/freebsd/sys/systm.h>
+#include <rtems/freebsd/sys/mbuf.h>
+#include <rtems/freebsd/sys/malloc.h>
+#include <rtems/freebsd/sys/kernel.h>
+
+#include <rtems/freebsd/sys/socket.h>
+#include <rtems/freebsd/sys/sockio.h>
+#include <rtems/freebsd/sys/endian.h>
+#include <rtems/freebsd/sys/errno.h>
+#include <rtems/freebsd/sys/proc.h>
+#include <rtems/freebsd/sys/sysctl.h>
+
+#include <rtems/freebsd/net/if.h>
+#include <rtems/freebsd/net/if_media.h>
+
+#include <rtems/freebsd/net80211/ieee80211_var.h>
+
+MALLOC_DEFINE(M_80211_DFS, "80211dfs", "802.11 DFS state");
+
+static int ieee80211_nol_timeout = 30*60; /* 30 minutes */
+SYSCTL_INT(_net_wlan, OID_AUTO, nol_timeout, CTLFLAG_RW,
+ &ieee80211_nol_timeout, 0, "NOL timeout (secs)");
+#define NOL_TIMEOUT msecs_to_ticks(ieee80211_nol_timeout*1000)
+
+static int ieee80211_cac_timeout = 60; /* 60 seconds */
+SYSCTL_INT(_net_wlan, OID_AUTO, cac_timeout, CTLFLAG_RW,
+ &ieee80211_cac_timeout, 0, "CAC timeout (secs)");
+#define CAC_TIMEOUT msecs_to_ticks(ieee80211_cac_timeout*1000)
+
+void
+ieee80211_dfs_attach(struct ieee80211com *ic)
+{
+ struct ieee80211_dfs_state *dfs = &ic->ic_dfs;
+
+ callout_init_mtx(&dfs->nol_timer, IEEE80211_LOCK_OBJ(ic), 0);
+ callout_init_mtx(&dfs->cac_timer, IEEE80211_LOCK_OBJ(ic), 0);
+}
+
+void
+ieee80211_dfs_detach(struct ieee80211com *ic)
+{
+ /* NB: we assume no locking is needed */
+ ieee80211_dfs_reset(ic);
+}
+
+void
+ieee80211_dfs_reset(struct ieee80211com *ic)
+{
+ struct ieee80211_dfs_state *dfs = &ic->ic_dfs;
+ int i;
+
+ /* NB: we assume no locking is needed */
+ /* NB: cac_timer should be cleared by the state machine */
+ callout_drain(&dfs->nol_timer);
+ for (i = 0; i < ic->ic_nchans; i++)
+ ic->ic_channels[i].ic_state = 0;
+ dfs->lastchan = NULL;
+}
+
+static void
+cac_timeout(void *arg)
+{
+ struct ieee80211vap *vap = arg;
+ struct ieee80211com *ic = vap->iv_ic;
+ struct ieee80211_dfs_state *dfs = &ic->ic_dfs;
+ int i;
+
+ IEEE80211_LOCK_ASSERT(ic);
+
+ if (vap->iv_state != IEEE80211_S_CAC) /* NB: just in case */
+ return;
+ /*
+ * When radar is detected during a CAC we are woken
+ * up prematurely to switch to a new channel.
+ * Check the channel to decide how to act.
+ */
+ if (IEEE80211_IS_CHAN_RADAR(ic->ic_curchan)) {
+ ieee80211_notify_cac(ic, ic->ic_curchan,
+ IEEE80211_NOTIFY_CAC_RADAR);
+
+ if_printf(vap->iv_ifp,
+ "CAC timer on channel %u (%u MHz) stopped due to radar\n",
+ ic->ic_curchan->ic_ieee, ic->ic_curchan->ic_freq);
+
+ /* XXX clobbers any existing desired channel */
+ /* NB: dfs->newchan may be NULL, that's ok */
+ vap->iv_des_chan = dfs->newchan;
+ /* XXX recursive lock need ieee80211_new_state_locked */
+ ieee80211_new_state(vap, IEEE80211_S_SCAN, 0);
+ } else {
+ if_printf(vap->iv_ifp,
+ "CAC timer on channel %u (%u MHz) expired; "
+ "no radar detected\n",
+ ic->ic_curchan->ic_ieee, ic->ic_curchan->ic_freq);
+ /*
+ * Mark all channels with the current frequency
+ * as having completed CAC; this keeps us from
+ * doing it again until we change channels.
+ */
+ for (i = 0; i < ic->ic_nchans; i++) {
+ struct ieee80211_channel *c = &ic->ic_channels[i];
+ if (c->ic_freq == ic->ic_curchan->ic_freq)
+ c->ic_state |= IEEE80211_CHANSTATE_CACDONE;
+ }
+ ieee80211_notify_cac(ic, ic->ic_curchan,
+ IEEE80211_NOTIFY_CAC_EXPIRE);
+ ieee80211_cac_completeswitch(vap);
+ }
+}
+
+/*
+ * Initiate the CAC timer. The driver is responsible
+ * for setting up the hardware to scan for radar on the
+ * channnel, we just handle timing things out.
+ */
+void
+ieee80211_dfs_cac_start(struct ieee80211vap *vap)
+{
+ struct ieee80211com *ic = vap->iv_ic;
+ struct ieee80211_dfs_state *dfs = &ic->ic_dfs;
+
+ IEEE80211_LOCK_ASSERT(ic);
+
+ callout_reset(&dfs->cac_timer, CAC_TIMEOUT, cac_timeout, vap);
+ if_printf(vap->iv_ifp, "start %d second CAC timer on channel %u (%u MHz)\n",
+ ticks_to_secs(CAC_TIMEOUT),
+ ic->ic_curchan->ic_ieee, ic->ic_curchan->ic_freq);
+ ieee80211_notify_cac(ic, ic->ic_curchan, IEEE80211_NOTIFY_CAC_START);
+}
+
+/*
+ * Clear the CAC timer.
+ */
+void
+ieee80211_dfs_cac_stop(struct ieee80211vap *vap)
+{
+ struct ieee80211com *ic = vap->iv_ic;
+ struct ieee80211_dfs_state *dfs = &ic->ic_dfs;
+
+ IEEE80211_LOCK_ASSERT(ic);
+
+ /* NB: racey but not important */
+ if (callout_pending(&dfs->cac_timer)) {
+ if_printf(vap->iv_ifp, "stop CAC timer on channel %u (%u MHz)\n",
+ ic->ic_curchan->ic_ieee, ic->ic_curchan->ic_freq);
+ ieee80211_notify_cac(ic, ic->ic_curchan,
+ IEEE80211_NOTIFY_CAC_STOP);
+ }
+ callout_stop(&dfs->cac_timer);
+}
+
+void
+ieee80211_dfs_cac_clear(struct ieee80211com *ic,
+ const struct ieee80211_channel *chan)
+{
+ int i;
+
+ for (i = 0; i < ic->ic_nchans; i++) {
+ struct ieee80211_channel *c = &ic->ic_channels[i];
+ if (c->ic_freq == chan->ic_freq)
+ c->ic_state &= ~IEEE80211_CHANSTATE_CACDONE;
+ }
+}
+
+static void
+dfs_timeout(void *arg)
+{
+ struct ieee80211com *ic = arg;
+ struct ieee80211_dfs_state *dfs = &ic->ic_dfs;
+ struct ieee80211_channel *c;
+ int i, oldest, now;
+
+ IEEE80211_LOCK_ASSERT(ic);
+
+ now = oldest = ticks;
+ for (i = 0; i < ic->ic_nchans; i++) {
+ c = &ic->ic_channels[i];
+ if (IEEE80211_IS_CHAN_RADAR(c)) {
+ if (time_after_eq(now, dfs->nol_event[i]+NOL_TIMEOUT)) {
+ c->ic_state &= ~IEEE80211_CHANSTATE_RADAR;
+ if (c->ic_state & IEEE80211_CHANSTATE_NORADAR) {
+ /*
+ * NB: do this here so we get only one
+ * msg instead of one for every channel
+ * table entry.
+ */
+ if_printf(ic->ic_ifp, "radar on channel"
+ " %u (%u MHz) cleared after timeout\n",
+ c->ic_ieee, c->ic_freq);
+ /* notify user space */
+ c->ic_state &=
+ ~IEEE80211_CHANSTATE_NORADAR;
+ ieee80211_notify_radar(ic, c);
+ }
+ } else if (dfs->nol_event[i] < oldest)
+ oldest = dfs->nol_event[i];
+ }
+ }
+ if (oldest != now) {
+ /* arrange to process next channel up for a status change */
+ callout_schedule(&dfs->nol_timer, oldest + NOL_TIMEOUT - now);
+ }
+}
+
+static void
+announce_radar(struct ifnet *ifp, const struct ieee80211_channel *curchan,
+ const struct ieee80211_channel *newchan)
+{
+ if (newchan == NULL)
+ if_printf(ifp, "radar detected on channel %u (%u MHz)\n",
+ curchan->ic_ieee, curchan->ic_freq);
+ else
+ if_printf(ifp, "radar detected on channel %u (%u MHz), "
+ "moving to channel %u (%u MHz)\n",
+ curchan->ic_ieee, curchan->ic_freq,
+ newchan->ic_ieee, newchan->ic_freq);
+}
+
+/*
+ * Handle a radar detection event on a channel. The channel is
+ * added to the NOL list and we record the time of the event.
+ * Entries are aged out after NOL_TIMEOUT. If radar was
+ * detected while doing CAC we force a state/channel change.
+ * Otherwise radar triggers a channel switch using the CSA
+ * mechanism (when the channel is the bss channel).
+ */
+void
+ieee80211_dfs_notify_radar(struct ieee80211com *ic, struct ieee80211_channel *chan)
+{
+ struct ieee80211_dfs_state *dfs = &ic->ic_dfs;
+ int i, now;
+
+ IEEE80211_LOCK_ASSERT(ic);
+
+ /*
+ * Mark all entries with this frequency. Notify user
+ * space and arrange for notification when the radar
+ * indication is cleared. Then kick the NOL processing
+ * thread if not already running.
+ */
+ now = ticks;
+ for (i = 0; i < ic->ic_nchans; i++) {
+ struct ieee80211_channel *c = &ic->ic_channels[i];
+ if (c->ic_freq == chan->ic_freq) {
+ c->ic_state &= ~IEEE80211_CHANSTATE_CACDONE;
+ c->ic_state |= IEEE80211_CHANSTATE_RADAR;
+ dfs->nol_event[i] = now;
+ }
+ }
+ ieee80211_notify_radar(ic, chan);
+ chan->ic_state |= IEEE80211_CHANSTATE_NORADAR;
+ if (!callout_pending(&dfs->nol_timer))
+ callout_reset(&dfs->nol_timer, NOL_TIMEOUT, dfs_timeout, ic);
+
+ /*
+ * If radar is detected on the bss channel while
+ * doing CAC; force a state change by scheduling the
+ * callout to be dispatched asap. Otherwise, if this
+ * event is for the bss channel then we must quiet
+ * traffic and schedule a channel switch.
+ *
+ * Note this allows us to receive notification about
+ * channels other than the bss channel; not sure
+ * that can/will happen but it's simple to support.
+ */
+ if (chan == ic->ic_bsschan) {
+ /* XXX need a way to defer to user app */
+ dfs->newchan = ieee80211_dfs_pickchannel(ic);
+
+ announce_radar(ic->ic_ifp, chan, dfs->newchan);
+
+ if (callout_pending(&dfs->cac_timer))
+ callout_schedule(&dfs->cac_timer, 0);
+ else if (dfs->newchan != NULL) {
+ /* XXX mode 1, switch count 2 */
+ /* XXX calculate switch count based on max
+ switch time and beacon interval? */
+ ieee80211_csa_startswitch(ic, dfs->newchan, 1, 2);
+ } else {
+ /*
+ * Spec says to stop all transmissions and
+ * wait on the current channel for an entry
+ * on the NOL to expire.
+ */
+ /*XXX*/
+ }
+ } else {
+ /*
+ * Issue rate-limited console msgs.
+ */
+ if (dfs->lastchan != chan) {
+ dfs->lastchan = chan;
+ dfs->cureps = 0;
+ announce_radar(ic->ic_ifp, chan, NULL);
+ } else if (ppsratecheck(&dfs->lastevent, &dfs->cureps, 1)) {
+ announce_radar(ic->ic_ifp, chan, NULL);
+ }
+ }
+}
+
+struct ieee80211_channel *
+ieee80211_dfs_pickchannel(struct ieee80211com *ic)
+{
+ struct ieee80211_channel *c;
+ int i, flags;
+ uint16_t v;
+
+ /*
+ * Consult the scan cache first.
+ */
+ flags = ic->ic_curchan->ic_flags & IEEE80211_CHAN_ALL;
+ /*
+ * XXX if curchan is HT this will never find a channel
+ * XXX 'cuz we scan only legacy channels
+ */
+ c = ieee80211_scan_pickchannel(ic, flags);
+ if (c != NULL)
+ return c;
+ /*
+ * No channel found in scan cache; select a compatible
+ * one at random (skipping channels where radar has
+ * been detected).
+ */
+ get_random_bytes(&v, sizeof(v));
+ v %= ic->ic_nchans;
+ for (i = v; i < ic->ic_nchans; i++) {
+ c = &ic->ic_channels[i];
+ if (!IEEE80211_IS_CHAN_RADAR(c) &&
+ (c->ic_flags & flags) == flags)
+ return c;
+ }
+ for (i = 0; i < v; i++) {
+ c = &ic->ic_channels[i];
+ if (!IEEE80211_IS_CHAN_RADAR(c) &&
+ (c->ic_flags & flags) == flags)
+ return c;
+ }
+ if_printf(ic->ic_ifp, "HELP, no channel located to switch to!\n");
+ return NULL;
+}
diff --git a/rtems/freebsd/net80211/ieee80211_dfs.h b/rtems/freebsd/net80211/ieee80211_dfs.h
new file mode 100644
index 00000000..474b2078
--- /dev/null
+++ b/rtems/freebsd/net80211/ieee80211_dfs.h
@@ -0,0 +1,57 @@
+/*-
+ * Copyright (c) 2007-2008 Sam Leffler, Errno Consulting
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+#ifndef _NET80211_IEEE80211_DFS_HH_
+#define _NET80211_IEEE80211_DFS_HH_
+
+/*
+ * 802.11h/DFS definitions.
+ */
+
+struct ieee80211_dfs_state {
+ int nol_event[IEEE80211_CHAN_MAX];
+ struct callout nol_timer; /* NOL list processing */
+ struct callout cac_timer; /* CAC timer */
+ struct timeval lastevent; /* time of last radar event */
+ int cureps; /* current events/second */
+ const struct ieee80211_channel *lastchan;/* chan w/ last radar event */
+ struct ieee80211_channel *newchan; /* chan selected next */
+};
+
+void ieee80211_dfs_attach(struct ieee80211com *);
+void ieee80211_dfs_detach(struct ieee80211com *);
+
+void ieee80211_dfs_reset(struct ieee80211com *);
+
+void ieee80211_dfs_cac_start(struct ieee80211vap *);
+void ieee80211_dfs_cac_stop(struct ieee80211vap *);
+void ieee80211_dfs_cac_clear(struct ieee80211com *,
+ const struct ieee80211_channel *);
+
+void ieee80211_dfs_notify_radar(struct ieee80211com *,
+ struct ieee80211_channel *);
+struct ieee80211_channel *ieee80211_dfs_pickchannel(struct ieee80211com *);
+#endif /* _NET80211_IEEE80211_DFS_HH_ */
diff --git a/rtems/freebsd/net80211/ieee80211_freebsd.c b/rtems/freebsd/net80211/ieee80211_freebsd.c
new file mode 100644
index 00000000..3d07170b
--- /dev/null
+++ b/rtems/freebsd/net80211/ieee80211_freebsd.c
@@ -0,0 +1,831 @@
+#include <rtems/freebsd/machine/rtems-bsd-config.h>
+
+/*-
+ * Copyright (c) 2003-2009 Sam Leffler, Errno Consulting
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <rtems/freebsd/sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+/*
+ * IEEE 802.11 support (FreeBSD-specific code)
+ */
+#include <rtems/freebsd/local/opt_wlan.h>
+
+#include <rtems/freebsd/sys/param.h>
+#include <rtems/freebsd/sys/kernel.h>
+#include <rtems/freebsd/sys/systm.h>
+#include <rtems/freebsd/sys/linker.h>
+#include <rtems/freebsd/sys/mbuf.h>
+#include <rtems/freebsd/sys/module.h>
+#include <rtems/freebsd/sys/proc.h>
+#include <rtems/freebsd/sys/sysctl.h>
+
+#include <rtems/freebsd/sys/socket.h>
+
+#include <rtems/freebsd/net/bpf.h>
+#include <rtems/freebsd/net/if.h>
+#include <rtems/freebsd/net/if_dl.h>
+#include <rtems/freebsd/net/if_clone.h>
+#include <rtems/freebsd/net/if_media.h>
+#include <rtems/freebsd/net/if_types.h>
+#include <rtems/freebsd/net/ethernet.h>
+#include <rtems/freebsd/net/route.h>
+#include <rtems/freebsd/net/vnet.h>
+
+#include <rtems/freebsd/net80211/ieee80211_var.h>
+#include <rtems/freebsd/net80211/ieee80211_input.h>
+
+SYSCTL_NODE(_net, OID_AUTO, wlan, CTLFLAG_RD, 0, "IEEE 80211 parameters");
+
+#ifdef IEEE80211_DEBUG
+int ieee80211_debug = 0;
+SYSCTL_INT(_net_wlan, OID_AUTO, debug, CTLFLAG_RW, &ieee80211_debug,
+ 0, "debugging printfs");
+#endif
+
+MALLOC_DEFINE(M_80211_COM, "80211com", "802.11 com state");
+
+/*
+ * Allocate/free com structure in conjunction with ifnet;
+ * these routines are registered with if_register_com_alloc
+ * below and are called automatically by the ifnet code
+ * when the ifnet of the parent device is created.
+ */
+static void *
+wlan_alloc(u_char type, struct ifnet *ifp)
+{
+ struct ieee80211com *ic;
+
+ ic = malloc(sizeof(struct ieee80211com), M_80211_COM, M_WAITOK|M_ZERO);
+ ic->ic_ifp = ifp;
+
+ return (ic);
+}
+
+static void
+wlan_free(void *ic, u_char type)
+{
+ free(ic, M_80211_COM);
+}
+
+static int
+wlan_clone_create(struct if_clone *ifc, int unit, caddr_t params)
+{
+ struct ieee80211_clone_params cp;
+ struct ieee80211vap *vap;
+ struct ieee80211com *ic;
+ struct ifnet *ifp;
+ int error;
+
+ error = copyin(params, &cp, sizeof(cp));
+ if (error)
+ return error;
+ ifp = ifunit(cp.icp_parent);
+ if (ifp == NULL)
+ return ENXIO;
+ /* XXX move printfs to DIAGNOSTIC before release */
+ if (ifp->if_type != IFT_IEEE80211) {
+ if_printf(ifp, "%s: reject, not an 802.11 device\n", __func__);
+ return ENXIO;
+ }
+ if (cp.icp_opmode >= IEEE80211_OPMODE_MAX) {
+ if_printf(ifp, "%s: invalid opmode %d\n",
+ __func__, cp.icp_opmode);
+ return EINVAL;
+ }
+ ic = ifp->if_l2com;
+ if ((ic->ic_caps & ieee80211_opcap[cp.icp_opmode]) == 0) {
+ if_printf(ifp, "%s mode not supported\n",
+ ieee80211_opmode_name[cp.icp_opmode]);
+ return EOPNOTSUPP;
+ }
+ if ((cp.icp_flags & IEEE80211_CLONE_TDMA) &&
+#ifdef IEEE80211_SUPPORT_TDMA
+ (ic->ic_caps & IEEE80211_C_TDMA) == 0
+#else
+ (1)
+#endif
+ ) {
+ if_printf(ifp, "TDMA not supported\n");
+ return EOPNOTSUPP;
+ }
+ vap = ic->ic_vap_create(ic, ifc->ifc_name, unit,
+ cp.icp_opmode, cp.icp_flags, cp.icp_bssid,
+ cp.icp_flags & IEEE80211_CLONE_MACADDR ?
+ cp.icp_macaddr : (const uint8_t *)IF_LLADDR(ifp));
+ return (vap == NULL ? EIO : 0);
+}
+
+static void
+wlan_clone_destroy(struct ifnet *ifp)
+{
+ struct ieee80211vap *vap = ifp->if_softc;
+ struct ieee80211com *ic = vap->iv_ic;
+
+ ic->ic_vap_delete(vap);
+}
+IFC_SIMPLE_DECLARE(wlan, 0);
+
+void
+ieee80211_vap_destroy(struct ieee80211vap *vap)
+{
+ if_clone_destroyif(&wlan_cloner, vap->iv_ifp);
+}
+
+int
+ieee80211_sysctl_msecs_ticks(SYSCTL_HANDLER_ARGS)
+{
+ int msecs = ticks_to_msecs(*(int *)arg1);
+ int error, t;
+
+ error = sysctl_handle_int(oidp, &msecs, 0, req);
+ if (error || !req->newptr)
+ return error;
+ t = msecs_to_ticks(msecs);
+ *(int *)arg1 = (t < 1) ? 1 : t;
+ return 0;
+}
+
+static int
+ieee80211_sysctl_inact(SYSCTL_HANDLER_ARGS)
+{
+ int inact = (*(int *)arg1) * IEEE80211_INACT_WAIT;
+ int error;
+
+ error = sysctl_handle_int(oidp, &inact, 0, req);
+ if (error || !req->newptr)
+ return error;
+ *(int *)arg1 = inact / IEEE80211_INACT_WAIT;
+ return 0;
+}
+
+static int
+ieee80211_sysctl_parent(SYSCTL_HANDLER_ARGS)
+{
+ struct ieee80211com *ic = arg1;
+ const char *name = ic->ic_ifp->if_xname;
+
+ return SYSCTL_OUT(req, name, strlen(name));
+}
+
+static int
+ieee80211_sysctl_radar(SYSCTL_HANDLER_ARGS)
+{
+ struct ieee80211com *ic = arg1;
+ int t = 0, error;
+
+ error = sysctl_handle_int(oidp, &t, 0, req);
+ if (error || !req->newptr)
+ return error;
+ IEEE80211_LOCK(ic);
+ ieee80211_dfs_notify_radar(ic, ic->ic_curchan);
+ IEEE80211_UNLOCK(ic);
+ return 0;
+}
+
+void
+ieee80211_sysctl_attach(struct ieee80211com *ic)
+{
+}
+
+void
+ieee80211_sysctl_detach(struct ieee80211com *ic)
+{
+}
+
+void
+ieee80211_sysctl_vattach(struct ieee80211vap *vap)
+{
+ struct ifnet *ifp = vap->iv_ifp;
+ struct sysctl_ctx_list *ctx;
+ struct sysctl_oid *oid;
+ char num[14]; /* sufficient for 32 bits */
+
+ ctx = (struct sysctl_ctx_list *) malloc(sizeof(struct sysctl_ctx_list),
+ M_DEVBUF, M_NOWAIT | M_ZERO);
+ if (ctx == NULL) {
+ if_printf(ifp, "%s: cannot allocate sysctl context!\n",
+ __func__);
+ return;
+ }
+ sysctl_ctx_init(ctx);
+ snprintf(num, sizeof(num), "%u", ifp->if_dunit);
+ oid = SYSCTL_ADD_NODE(ctx, &SYSCTL_NODE_CHILDREN(_net, wlan),
+ OID_AUTO, num, CTLFLAG_RD, NULL, "");
+ SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(oid), OID_AUTO,
+ "%parent", CTLFLAG_RD, vap->iv_ic, 0,
+ ieee80211_sysctl_parent, "A", "parent device");
+ SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(oid), OID_AUTO,
+ "driver_caps", CTLFLAG_RW, &vap->iv_caps, 0,
+ "driver capabilities");
+#ifdef IEEE80211_DEBUG
+ vap->iv_debug = ieee80211_debug;
+ SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(oid), OID_AUTO,
+ "debug", CTLFLAG_RW, &vap->iv_debug, 0,
+ "control debugging printfs");
+#endif
+ SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(oid), OID_AUTO,
+ "bmiss_max", CTLFLAG_RW, &vap->iv_bmiss_max, 0,
+ "consecutive beacon misses before scanning");
+ /* XXX inherit from tunables */
+ SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(oid), OID_AUTO,
+ "inact_run", CTLTYPE_INT | CTLFLAG_RW, &vap->iv_inact_run, 0,
+ ieee80211_sysctl_inact, "I",
+ "station inactivity timeout (sec)");
+ SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(oid), OID_AUTO,
+ "inact_probe", CTLTYPE_INT | CTLFLAG_RW, &vap->iv_inact_probe, 0,
+ ieee80211_sysctl_inact, "I",
+ "station inactivity probe timeout (sec)");
+ SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(oid), OID_AUTO,
+ "inact_auth", CTLTYPE_INT | CTLFLAG_RW, &vap->iv_inact_auth, 0,
+ ieee80211_sysctl_inact, "I",
+ "station authentication timeout (sec)");
+ SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(oid), OID_AUTO,
+ "inact_init", CTLTYPE_INT | CTLFLAG_RW, &vap->iv_inact_init, 0,
+ ieee80211_sysctl_inact, "I",
+ "station initial state timeout (sec)");
+ if (vap->iv_htcaps & IEEE80211_HTC_HT) {
+ SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(oid), OID_AUTO,
+ "ampdu_mintraffic_bk", CTLFLAG_RW,
+ &vap->iv_ampdu_mintraffic[WME_AC_BK], 0,
+ "BK traffic tx aggr threshold (pps)");
+ SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(oid), OID_AUTO,
+ "ampdu_mintraffic_be", CTLFLAG_RW,
+ &vap->iv_ampdu_mintraffic[WME_AC_BE], 0,
+ "BE traffic tx aggr threshold (pps)");
+ SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(oid), OID_AUTO,
+ "ampdu_mintraffic_vo", CTLFLAG_RW,
+ &vap->iv_ampdu_mintraffic[WME_AC_VO], 0,
+ "VO traffic tx aggr threshold (pps)");
+ SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(oid), OID_AUTO,
+ "ampdu_mintraffic_vi", CTLFLAG_RW,
+ &vap->iv_ampdu_mintraffic[WME_AC_VI], 0,
+ "VI traffic tx aggr threshold (pps)");
+ }
+ if (vap->iv_caps & IEEE80211_C_DFS) {
+ SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(oid), OID_AUTO,
+ "radar", CTLTYPE_INT | CTLFLAG_RW, vap->iv_ic, 0,
+ ieee80211_sysctl_radar, "I", "simulate radar event");
+ }
+ vap->iv_sysctl = ctx;
+ vap->iv_oid = oid;
+}
+
+void
+ieee80211_sysctl_vdetach(struct ieee80211vap *vap)
+{
+
+ if (vap->iv_sysctl != NULL) {
+ sysctl_ctx_free(vap->iv_sysctl);
+ free(vap->iv_sysctl, M_DEVBUF);
+ vap->iv_sysctl = NULL;
+ }
+}
+
+int
+ieee80211_node_dectestref(struct ieee80211_node *ni)
+{
+ /* XXX need equivalent of atomic_dec_and_test */
+ atomic_subtract_int(&ni->ni_refcnt, 1);
+ return atomic_cmpset_int(&ni->ni_refcnt, 0, 1);
+}
+
+void
+ieee80211_drain_ifq(struct ifqueue *ifq)
+{
+ struct ieee80211_node *ni;
+ struct mbuf *m;
+
+ for (;;) {
+ IF_DEQUEUE(ifq, m);
+ if (m == NULL)
+ break;
+
+ ni = (struct ieee80211_node *)m->m_pkthdr.rcvif;
+ KASSERT(ni != NULL, ("frame w/o node"));
+ ieee80211_free_node(ni);
+ m->m_pkthdr.rcvif = NULL;
+
+ m_freem(m);
+ }
+}
+
+void
+ieee80211_flush_ifq(struct ifqueue *ifq, struct ieee80211vap *vap)
+{
+ struct ieee80211_node *ni;
+ struct mbuf *m, **mprev;
+
+ IF_LOCK(ifq);
+ mprev = &ifq->ifq_head;
+ while ((m = *mprev) != NULL) {
+ ni = (struct ieee80211_node *)m->m_pkthdr.rcvif;
+ if (ni != NULL && ni->ni_vap == vap) {
+ *mprev = m->m_nextpkt; /* remove from list */
+ ifq->ifq_len--;
+
+ m_freem(m);
+ ieee80211_free_node(ni); /* reclaim ref */
+ } else
+ mprev = &m->m_nextpkt;
+ }
+ /* recalculate tail ptr */
+ m = ifq->ifq_head;
+ for (; m != NULL && m->m_nextpkt != NULL; m = m->m_nextpkt)
+ ;
+ ifq->ifq_tail = m;
+ IF_UNLOCK(ifq);
+}
+
+/*
+ * As above, for mbufs allocated with m_gethdr/MGETHDR
+ * or initialized by M_COPY_PKTHDR.
+ */
+#define MC_ALIGN(m, len) \
+do { \
+ (m)->m_data += (MCLBYTES - (len)) &~ (sizeof(long) - 1); \
+} while (/* CONSTCOND */ 0)
+
+/*
+ * Allocate and setup a management frame of the specified
+ * size. We return the mbuf and a pointer to the start
+ * of the contiguous data area that's been reserved based
+ * on the packet length. The data area is forced to 32-bit
+ * alignment and the buffer length to a multiple of 4 bytes.
+ * This is done mainly so beacon frames (that require this)
+ * can use this interface too.
+ */
+struct mbuf *
+ieee80211_getmgtframe(uint8_t **frm, int headroom, int pktlen)
+{
+ struct mbuf *m;
+ u_int len;
+
+ /*
+ * NB: we know the mbuf routines will align the data area
+ * so we don't need to do anything special.
+ */
+ len = roundup2(headroom + pktlen, 4);
+ KASSERT(len <= MCLBYTES, ("802.11 mgt frame too large: %u", len));
+ if (len < MINCLSIZE) {
+ m = m_gethdr(M_NOWAIT, MT_DATA);
+ /*
+ * Align the data in case additional headers are added.
+ * This should only happen when a WEP header is added
+ * which only happens for shared key authentication mgt
+ * frames which all fit in MHLEN.
+ */
+ if (m != NULL)
+ MH_ALIGN(m, len);
+ } else {
+ m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
+ if (m != NULL)
+ MC_ALIGN(m, len);
+ }
+ if (m != NULL) {
+ m->m_data += headroom;
+ *frm = m->m_data;
+ }
+ return m;
+}
+
+/*
+ * Re-align the payload in the mbuf. This is mainly used (right now)
+ * to handle IP header alignment requirements on certain architectures.
+ */
+struct mbuf *
+ieee80211_realign(struct ieee80211vap *vap, struct mbuf *m, size_t align)
+{
+ int pktlen, space;
+ struct mbuf *n;
+
+ pktlen = m->m_pkthdr.len;
+ space = pktlen + align;
+ if (space < MINCLSIZE)
+ n = m_gethdr(M_DONTWAIT, MT_DATA);
+ else {
+ n = m_getjcl(M_DONTWAIT, MT_DATA, M_PKTHDR,
+ space <= MCLBYTES ? MCLBYTES :
+#if MJUMPAGESIZE != MCLBYTES
+ space <= MJUMPAGESIZE ? MJUMPAGESIZE :
+#endif
+ space <= MJUM9BYTES ? MJUM9BYTES : MJUM16BYTES);
+ }
+ if (__predict_true(n != NULL)) {
+ m_move_pkthdr(n, m);
+ n->m_data = (caddr_t)(ALIGN(n->m_data + align) - align);
+ m_copydata(m, 0, pktlen, mtod(n, caddr_t));
+ n->m_len = pktlen;
+ } else {
+ IEEE80211_DISCARD(vap, IEEE80211_MSG_ANY,
+ mtod(m, const struct ieee80211_frame *), NULL,
+ "%s", "no mbuf to realign");
+ vap->iv_stats.is_rx_badalign++;
+ }
+ m_freem(m);
+ return n;
+}
+
+int
+ieee80211_add_callback(struct mbuf *m,
+ void (*func)(struct ieee80211_node *, void *, int), void *arg)
+{
+ struct m_tag *mtag;
+ struct ieee80211_cb *cb;
+
+ mtag = m_tag_alloc(MTAG_ABI_NET80211, NET80211_TAG_CALLBACK,
+ sizeof(struct ieee80211_cb), M_NOWAIT);
+ if (mtag == NULL)
+ return 0;
+
+ cb = (struct ieee80211_cb *)(mtag+1);
+ cb->func = func;
+ cb->arg = arg;
+ m_tag_prepend(m, mtag);
+ m->m_flags |= M_TXCB;
+ return 1;
+}
+
+void
+ieee80211_process_callback(struct ieee80211_node *ni,
+ struct mbuf *m, int status)
+{
+ struct m_tag *mtag;
+
+ mtag = m_tag_locate(m, MTAG_ABI_NET80211, NET80211_TAG_CALLBACK, NULL);
+ if (mtag != NULL) {
+ struct ieee80211_cb *cb = (struct ieee80211_cb *)(mtag+1);
+ cb->func(ni, cb->arg, status);
+ }
+}
+
+#include <rtems/freebsd/sys/libkern.h>
+
+void
+get_random_bytes(void *p, size_t n)
+{
+ uint8_t *dp = p;
+
+ while (n > 0) {
+ uint32_t v = arc4random();
+ size_t nb = n > sizeof(uint32_t) ? sizeof(uint32_t) : n;
+ bcopy(&v, dp, n > sizeof(uint32_t) ? sizeof(uint32_t) : n);
+ dp += sizeof(uint32_t), n -= nb;
+ }
+}
+
+/*
+ * Helper function for events that pass just a single mac address.
+ */
+static void
+notify_macaddr(struct ifnet *ifp, int op, const uint8_t mac[IEEE80211_ADDR_LEN])
+{
+ struct ieee80211_join_event iev;
+
+ CURVNET_SET(ifp->if_vnet);
+ memset(&iev, 0, sizeof(iev));
+ IEEE80211_ADDR_COPY(iev.iev_addr, mac);
+ rt_ieee80211msg(ifp, op, &iev, sizeof(iev));
+ CURVNET_RESTORE();
+}
+
+void
+ieee80211_notify_node_join(struct ieee80211_node *ni, int newassoc)
+{
+ struct ieee80211vap *vap = ni->ni_vap;
+ struct ifnet *ifp = vap->iv_ifp;
+
+ CURVNET_SET_QUIET(ifp->if_vnet);
+ IEEE80211_NOTE(vap, IEEE80211_MSG_NODE, ni, "%snode join",
+ (ni == vap->iv_bss) ? "bss " : "");
+
+ if (ni == vap->iv_bss) {
+ notify_macaddr(ifp, newassoc ?
+ RTM_IEEE80211_ASSOC : RTM_IEEE80211_REASSOC, ni->ni_bssid);
+ if_link_state_change(ifp, LINK_STATE_UP);
+ } else {
+ notify_macaddr(ifp, newassoc ?
+ RTM_IEEE80211_JOIN : RTM_IEEE80211_REJOIN, ni->ni_macaddr);
+ }
+ CURVNET_RESTORE();
+}
+
+void
+ieee80211_notify_node_leave(struct ieee80211_node *ni)
+{
+ struct ieee80211vap *vap = ni->ni_vap;
+ struct ifnet *ifp = vap->iv_ifp;
+
+ CURVNET_SET_QUIET(ifp->if_vnet);
+ IEEE80211_NOTE(vap, IEEE80211_MSG_NODE, ni, "%snode leave",
+ (ni == vap->iv_bss) ? "bss " : "");
+
+ if (ni == vap->iv_bss) {
+ rt_ieee80211msg(ifp, RTM_IEEE80211_DISASSOC, NULL, 0);
+ if_link_state_change(ifp, LINK_STATE_DOWN);
+ } else {
+ /* fire off wireless event station leaving */
+ notify_macaddr(ifp, RTM_IEEE80211_LEAVE, ni->ni_macaddr);
+ }
+ CURVNET_RESTORE();
+}
+
+void
+ieee80211_notify_scan_done(struct ieee80211vap *vap)
+{
+ struct ifnet *ifp = vap->iv_ifp;
+
+ IEEE80211_DPRINTF(vap, IEEE80211_MSG_SCAN, "%s\n", "notify scan done");
+
+ /* dispatch wireless event indicating scan completed */
+ CURVNET_SET(ifp->if_vnet);
+ rt_ieee80211msg(ifp, RTM_IEEE80211_SCAN, NULL, 0);
+ CURVNET_RESTORE();
+}
+
+void
+ieee80211_notify_replay_failure(struct ieee80211vap *vap,
+ const struct ieee80211_frame *wh, const struct ieee80211_key *k,
+ u_int64_t rsc, int tid)
+{
+ struct ifnet *ifp = vap->iv_ifp;
+
+ IEEE80211_NOTE_MAC(vap, IEEE80211_MSG_CRYPTO, wh->i_addr2,
+ "%s replay detected <rsc %ju, csc %ju, keyix %u rxkeyix %u>",
+ k->wk_cipher->ic_name, (intmax_t) rsc,
+ (intmax_t) k->wk_keyrsc[tid],
+ k->wk_keyix, k->wk_rxkeyix);
+
+ if (ifp != NULL) { /* NB: for cipher test modules */
+ struct ieee80211_replay_event iev;
+
+ IEEE80211_ADDR_COPY(iev.iev_dst, wh->i_addr1);
+ IEEE80211_ADDR_COPY(iev.iev_src, wh->i_addr2);
+ iev.iev_cipher = k->wk_cipher->ic_cipher;
+ if (k->wk_rxkeyix != IEEE80211_KEYIX_NONE)
+ iev.iev_keyix = k->wk_rxkeyix;
+ else
+ iev.iev_keyix = k->wk_keyix;
+ iev.iev_keyrsc = k->wk_keyrsc[tid];
+ iev.iev_rsc = rsc;
+ CURVNET_SET(ifp->if_vnet);
+ rt_ieee80211msg(ifp, RTM_IEEE80211_REPLAY, &iev, sizeof(iev));
+ CURVNET_RESTORE();
+ }
+}
+
+void
+ieee80211_notify_michael_failure(struct ieee80211vap *vap,
+ const struct ieee80211_frame *wh, u_int keyix)
+{
+ struct ifnet *ifp = vap->iv_ifp;
+
+ IEEE80211_NOTE_MAC(vap, IEEE80211_MSG_CRYPTO, wh->i_addr2,
+ "michael MIC verification failed <keyix %u>", keyix);
+ vap->iv_stats.is_rx_tkipmic++;
+
+ if (ifp != NULL) { /* NB: for cipher test modules */
+ struct ieee80211_michael_event iev;
+
+ IEEE80211_ADDR_COPY(iev.iev_dst, wh->i_addr1);
+ IEEE80211_ADDR_COPY(iev.iev_src, wh->i_addr2);
+ iev.iev_cipher = IEEE80211_CIPHER_TKIP;
+ iev.iev_keyix = keyix;
+ CURVNET_SET(ifp->if_vnet);
+ rt_ieee80211msg(ifp, RTM_IEEE80211_MICHAEL, &iev, sizeof(iev));
+ CURVNET_RESTORE();
+ }
+}
+
+void
+ieee80211_notify_wds_discover(struct ieee80211_node *ni)
+{
+ struct ieee80211vap *vap = ni->ni_vap;
+ struct ifnet *ifp = vap->iv_ifp;
+
+ notify_macaddr(ifp, RTM_IEEE80211_WDS, ni->ni_macaddr);
+}
+
+void
+ieee80211_notify_csa(struct ieee80211com *ic,
+ const struct ieee80211_channel *c, int mode, int count)
+{
+ struct ifnet *ifp = ic->ic_ifp;
+ struct ieee80211_csa_event iev;
+
+ memset(&iev, 0, sizeof(iev));
+ iev.iev_flags = c->ic_flags;
+ iev.iev_freq = c->ic_freq;
+ iev.iev_ieee = c->ic_ieee;
+ iev.iev_mode = mode;
+ iev.iev_count = count;
+ rt_ieee80211msg(ifp, RTM_IEEE80211_CSA, &iev, sizeof(iev));
+}
+
+void
+ieee80211_notify_radar(struct ieee80211com *ic,
+ const struct ieee80211_channel *c)
+{
+ struct ifnet *ifp = ic->ic_ifp;
+ struct ieee80211_radar_event iev;
+
+ memset(&iev, 0, sizeof(iev));
+ iev.iev_flags = c->ic_flags;
+ iev.iev_freq = c->ic_freq;
+ iev.iev_ieee = c->ic_ieee;
+ rt_ieee80211msg(ifp, RTM_IEEE80211_RADAR, &iev, sizeof(iev));
+}
+
+void
+ieee80211_notify_cac(struct ieee80211com *ic,
+ const struct ieee80211_channel *c, enum ieee80211_notify_cac_event type)
+{
+ struct ifnet *ifp = ic->ic_ifp;
+ struct ieee80211_cac_event iev;
+
+ memset(&iev, 0, sizeof(iev));
+ iev.iev_flags = c->ic_flags;
+ iev.iev_freq = c->ic_freq;
+ iev.iev_ieee = c->ic_ieee;
+ iev.iev_type = type;
+ rt_ieee80211msg(ifp, RTM_IEEE80211_CAC, &iev, sizeof(iev));
+}
+
+void
+ieee80211_notify_node_deauth(struct ieee80211_node *ni)
+{
+ struct ieee80211vap *vap = ni->ni_vap;
+ struct ifnet *ifp = vap->iv_ifp;
+
+ IEEE80211_NOTE(vap, IEEE80211_MSG_NODE, ni, "%s", "node deauth");
+
+ notify_macaddr(ifp, RTM_IEEE80211_DEAUTH, ni->ni_macaddr);
+}
+
+void
+ieee80211_notify_node_auth(struct ieee80211_node *ni)
+{
+ struct ieee80211vap *vap = ni->ni_vap;
+ struct ifnet *ifp = vap->iv_ifp;
+
+ IEEE80211_NOTE(vap, IEEE80211_MSG_NODE, ni, "%s", "node auth");
+
+ notify_macaddr(ifp, RTM_IEEE80211_AUTH, ni->ni_macaddr);
+}
+
+void
+ieee80211_notify_country(struct ieee80211vap *vap,
+ const uint8_t bssid[IEEE80211_ADDR_LEN], const uint8_t cc[2])
+{
+ struct ifnet *ifp = vap->iv_ifp;
+ struct ieee80211_country_event iev;
+
+ memset(&iev, 0, sizeof(iev));
+ IEEE80211_ADDR_COPY(iev.iev_addr, bssid);
+ iev.iev_cc[0] = cc[0];
+ iev.iev_cc[1] = cc[1];
+ rt_ieee80211msg(ifp, RTM_IEEE80211_COUNTRY, &iev, sizeof(iev));
+}
+
+void
+ieee80211_notify_radio(struct ieee80211com *ic, int state)
+{
+ struct ifnet *ifp = ic->ic_ifp;
+ struct ieee80211_radio_event iev;
+
+ memset(&iev, 0, sizeof(iev));
+ iev.iev_state = state;
+ rt_ieee80211msg(ifp, RTM_IEEE80211_RADIO, &iev, sizeof(iev));
+}
+
+void
+ieee80211_load_module(const char *modname)
+{
+
+#ifdef notyet
+ (void)kern_kldload(curthread, modname, NULL);
+#else
+ printf("%s: load the %s module by hand for now.\n", __func__, modname);
+#endif
+}
+
+static eventhandler_tag wlan_bpfevent;
+static eventhandler_tag wlan_ifllevent;
+
+static void
+bpf_track(void *arg, struct ifnet *ifp, int dlt, int attach)
+{
+ /* NB: identify vap's by if_start */
+ if (dlt == DLT_IEEE802_11_RADIO && ifp->if_start == ieee80211_start) {
+ struct ieee80211vap *vap = ifp->if_softc;
+ /*
+ * Track bpf radiotap listener state. We mark the vap
+ * to indicate if any listener is present and the com
+ * to indicate if any listener exists on any associated
+ * vap. This flag is used by drivers to prepare radiotap
+ * state only when needed.
+ */
+ if (attach) {
+ ieee80211_syncflag_ext(vap, IEEE80211_FEXT_BPF);
+ if (vap->iv_opmode == IEEE80211_M_MONITOR)
+ atomic_add_int(&vap->iv_ic->ic_montaps, 1);
+ } else if (!bpf_peers_present(vap->iv_rawbpf)) {
+ ieee80211_syncflag_ext(vap, -IEEE80211_FEXT_BPF);
+ if (vap->iv_opmode == IEEE80211_M_MONITOR)
+ atomic_subtract_int(&vap->iv_ic->ic_montaps, 1);
+ }
+ }
+}
+
+static void
+wlan_iflladdr(void *arg __unused, struct ifnet *ifp)
+{
+ struct ieee80211com *ic = ifp->if_l2com;
+ struct ieee80211vap *vap, *next;
+
+ if (ifp->if_type != IFT_IEEE80211 || ic == NULL)
+ return;
+
+ IEEE80211_LOCK(ic);
+ TAILQ_FOREACH_SAFE(vap, &ic->ic_vaps, iv_next, next) {
+ /*
+ * If the MAC address has changed on the parent and it was
+ * copied to the vap on creation then re-sync.
+ */
+ if (vap->iv_ic == ic &&
+ (vap->iv_flags_ext & IEEE80211_FEXT_UNIQMAC) == 0) {
+ IEEE80211_ADDR_COPY(vap->iv_myaddr, IF_LLADDR(ifp));
+ IEEE80211_UNLOCK(ic);
+ if_setlladdr(vap->iv_ifp, IF_LLADDR(ifp),
+ IEEE80211_ADDR_LEN);
+ IEEE80211_LOCK(ic);
+ }
+ }
+ IEEE80211_UNLOCK(ic);
+}
+
+/*
+ * Module glue.
+ *
+ * NB: the module name is "wlan" for compatibility with NetBSD.
+ */
+static int
+wlan_modevent(module_t mod, int type, void *unused)
+{
+ switch (type) {
+ case MOD_LOAD:
+ if (bootverbose)
+ printf("wlan: <802.11 Link Layer>\n");
+ wlan_bpfevent = EVENTHANDLER_REGISTER(bpf_track,
+ bpf_track, 0, EVENTHANDLER_PRI_ANY);
+ if (wlan_bpfevent == NULL)
+ return ENOMEM;
+ wlan_ifllevent = EVENTHANDLER_REGISTER(iflladdr_event,
+ wlan_iflladdr, NULL, EVENTHANDLER_PRI_ANY);
+ if (wlan_ifllevent == NULL) {
+ EVENTHANDLER_DEREGISTER(bpf_track, wlan_bpfevent);
+ return ENOMEM;
+ }
+ if_clone_attach(&wlan_cloner);
+ if_register_com_alloc(IFT_IEEE80211, wlan_alloc, wlan_free);
+ return 0;
+ case MOD_UNLOAD:
+ if_deregister_com_alloc(IFT_IEEE80211);
+ if_clone_detach(&wlan_cloner);
+ EVENTHANDLER_DEREGISTER(bpf_track, wlan_bpfevent);
+ EVENTHANDLER_DEREGISTER(iflladdr_event, wlan_ifllevent);
+ return 0;
+ }
+ return EINVAL;
+}
+
+static moduledata_t wlan_mod = {
+ "wlan",
+ wlan_modevent,
+ 0
+};
+DECLARE_MODULE(wlan, wlan_mod, SI_SUB_DRIVERS, SI_ORDER_FIRST);
+MODULE_VERSION(wlan, 1);
+MODULE_DEPEND(wlan, ether, 1, 1, 1);
diff --git a/rtems/freebsd/net80211/ieee80211_freebsd.h b/rtems/freebsd/net80211/ieee80211_freebsd.h
new file mode 100644
index 00000000..da2dd013
--- /dev/null
+++ b/rtems/freebsd/net80211/ieee80211_freebsd.h
@@ -0,0 +1,550 @@
+/*-
+ * Copyright (c) 2003-2008 Sam Leffler, Errno Consulting
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+#ifndef _NET80211_IEEE80211_FREEBSD_HH_
+#define _NET80211_IEEE80211_FREEBSD_HH_
+
+#ifdef _KERNEL
+#include <rtems/freebsd/sys/param.h>
+#include <rtems/freebsd/sys/lock.h>
+#include <rtems/freebsd/sys/mutex.h>
+#include <rtems/freebsd/sys/rwlock.h>
+#include <rtems/freebsd/sys/sysctl.h>
+#include <rtems/freebsd/sys/taskqueue.h>
+
+/*
+ * Common state locking definitions.
+ */
+typedef struct {
+ char name[16]; /* e.g. "ath0_com_lock" */
+ struct mtx mtx;
+} ieee80211_com_lock_t;
+#define IEEE80211_LOCK_INIT(_ic, _name) do { \
+ ieee80211_com_lock_t *cl = &(_ic)->ic_comlock; \
+ snprintf(cl->name, sizeof(cl->name), "%s_com_lock", _name); \
+ mtx_init(&cl->mtx, cl->name, NULL, MTX_DEF | MTX_RECURSE); \
+} while (0)
+#define IEEE80211_LOCK_OBJ(_ic) (&(_ic)->ic_comlock.mtx)
+#define IEEE80211_LOCK_DESTROY(_ic) mtx_destroy(IEEE80211_LOCK_OBJ(_ic))
+#define IEEE80211_LOCK(_ic) mtx_lock(IEEE80211_LOCK_OBJ(_ic))
+#define IEEE80211_UNLOCK(_ic) mtx_unlock(IEEE80211_LOCK_OBJ(_ic))
+#define IEEE80211_LOCK_ASSERT(_ic) \
+ mtx_assert(IEEE80211_LOCK_OBJ(_ic), MA_OWNED)
+
+/*
+ * Node locking definitions.
+ */
+typedef struct {
+ char name[16]; /* e.g. "ath0_node_lock" */
+ struct mtx mtx;
+} ieee80211_node_lock_t;
+#define IEEE80211_NODE_LOCK_INIT(_nt, _name) do { \
+ ieee80211_node_lock_t *nl = &(_nt)->nt_nodelock; \
+ snprintf(nl->name, sizeof(nl->name), "%s_node_lock", _name); \
+ mtx_init(&nl->mtx, nl->name, NULL, MTX_DEF | MTX_RECURSE); \
+} while (0)
+#define IEEE80211_NODE_LOCK_OBJ(_nt) (&(_nt)->nt_nodelock.mtx)
+#define IEEE80211_NODE_LOCK_DESTROY(_nt) \
+ mtx_destroy(IEEE80211_NODE_LOCK_OBJ(_nt))
+#define IEEE80211_NODE_LOCK(_nt) \
+ mtx_lock(IEEE80211_NODE_LOCK_OBJ(_nt))
+#define IEEE80211_NODE_IS_LOCKED(_nt) \
+ mtx_owned(IEEE80211_NODE_LOCK_OBJ(_nt))
+#define IEEE80211_NODE_UNLOCK(_nt) \
+ mtx_unlock(IEEE80211_NODE_LOCK_OBJ(_nt))
+#define IEEE80211_NODE_LOCK_ASSERT(_nt) \
+ mtx_assert(IEEE80211_NODE_LOCK_OBJ(_nt), MA_OWNED)
+
+/*
+ * Node table iteration locking definitions; this protects the
+ * scan generation # used to iterate over the station table
+ * while grabbing+releasing the node lock.
+ */
+typedef struct {
+ char name[16]; /* e.g. "ath0_scan_lock" */
+ struct mtx mtx;
+} ieee80211_scan_lock_t;
+#define IEEE80211_NODE_ITERATE_LOCK_INIT(_nt, _name) do { \
+ ieee80211_scan_lock_t *sl = &(_nt)->nt_scanlock; \
+ snprintf(sl->name, sizeof(sl->name), "%s_scan_lock", _name); \
+ mtx_init(&sl->mtx, sl->name, NULL, MTX_DEF); \
+} while (0)
+#define IEEE80211_NODE_ITERATE_LOCK_OBJ(_nt) (&(_nt)->nt_scanlock.mtx)
+#define IEEE80211_NODE_ITERATE_LOCK_DESTROY(_nt) \
+ mtx_destroy(IEEE80211_NODE_ITERATE_LOCK_OBJ(_nt))
+#define IEEE80211_NODE_ITERATE_LOCK(_nt) \
+ mtx_lock(IEEE80211_NODE_ITERATE_LOCK_OBJ(_nt))
+#define IEEE80211_NODE_ITERATE_UNLOCK(_nt) \
+ mtx_unlock(IEEE80211_NODE_ITERATE_LOCK_OBJ(_nt))
+
+/*
+ * Power-save queue definitions.
+ */
+typedef struct mtx ieee80211_psq_lock_t;
+#define IEEE80211_PSQ_INIT(_psq, _name) \
+ mtx_init(&(_psq)->psq_lock, _name, "802.11 ps q", MTX_DEF)
+#define IEEE80211_PSQ_DESTROY(_psq) mtx_destroy(&(_psq)->psq_lock)
+#define IEEE80211_PSQ_LOCK(_psq) mtx_lock(&(_psq)->psq_lock)
+#define IEEE80211_PSQ_UNLOCK(_psq) mtx_unlock(&(_psq)->psq_lock)
+
+#ifndef IF_PREPEND_LIST
+#define _IF_PREPEND_LIST(ifq, mhead, mtail, mcount) do { \
+ (mtail)->m_nextpkt = (ifq)->ifq_head; \
+ if ((ifq)->ifq_tail == NULL) \
+ (ifq)->ifq_tail = (mtail); \
+ (ifq)->ifq_head = (mhead); \
+ (ifq)->ifq_len += (mcount); \
+} while (0)
+#define IF_PREPEND_LIST(ifq, mhead, mtail, mcount) do { \
+ IF_LOCK(ifq); \
+ _IF_PREPEND_LIST(ifq, mhead, mtail, mcount); \
+ IF_UNLOCK(ifq); \
+} while (0)
+#endif /* IF_PREPEND_LIST */
+
+/*
+ * Age queue definitions.
+ */
+typedef struct mtx ieee80211_ageq_lock_t;
+#define IEEE80211_AGEQ_INIT(_aq, _name) \
+ mtx_init(&(_aq)->aq_lock, _name, "802.11 age q", MTX_DEF)
+#define IEEE80211_AGEQ_DESTROY(_aq) mtx_destroy(&(_aq)->aq_lock)
+#define IEEE80211_AGEQ_LOCK(_aq) mtx_lock(&(_aq)->aq_lock)
+#define IEEE80211_AGEQ_UNLOCK(_aq) mtx_unlock(&(_aq)->aq_lock)
+
+/*
+ * 802.1x MAC ACL database locking definitions.
+ */
+typedef struct mtx acl_lock_t;
+#define ACL_LOCK_INIT(_as, _name) \
+ mtx_init(&(_as)->as_lock, _name, "802.11 ACL", MTX_DEF)
+#define ACL_LOCK_DESTROY(_as) mtx_destroy(&(_as)->as_lock)
+#define ACL_LOCK(_as) mtx_lock(&(_as)->as_lock)
+#define ACL_UNLOCK(_as) mtx_unlock(&(_as)->as_lock)
+#define ACL_LOCK_ASSERT(_as) \
+ mtx_assert((&(_as)->as_lock), MA_OWNED)
+
+/*
+ * Scan table definitions.
+ */
+typedef struct mtx ieee80211_scan_table_lock_t;
+#define IEEE80211_SCAN_TABLE_LOCK_INIT(_st, _name) \
+ mtx_init(&(_st)->st_lock, _name, "802.11 scan table", MTX_DEF)
+#define IEEE80211_SCAN_TABLE_LOCK_DESTROY(_st) mtx_destroy(&(_st)->st_lock)
+#define IEEE80211_SCAN_TABLE_LOCK(_st) mtx_lock(&(_st)->st_lock)
+#define IEEE80211_SCAN_TABLE_UNLOCK(_st) mtx_unlock(&(_st)->st_lock)
+
+/*
+ * Node reference counting definitions.
+ *
+ * ieee80211_node_initref initialize the reference count to 1
+ * ieee80211_node_incref add a reference
+ * ieee80211_node_decref remove a reference
+ * ieee80211_node_dectestref remove a reference and return 1 if this
+ * is the last reference, otherwise 0
+ * ieee80211_node_refcnt reference count for printing (only)
+ */
+#include <rtems/freebsd/machine/atomic.h>
+
+#define ieee80211_node_initref(_ni) \
+ do { ((_ni)->ni_refcnt = 1); } while (0)
+#define ieee80211_node_incref(_ni) \
+ atomic_add_int(&(_ni)->ni_refcnt, 1)
+#define ieee80211_node_decref(_ni) \
+ atomic_subtract_int(&(_ni)->ni_refcnt, 1)
+struct ieee80211_node;
+int ieee80211_node_dectestref(struct ieee80211_node *ni);
+#define ieee80211_node_refcnt(_ni) (_ni)->ni_refcnt
+
+struct ifqueue;
+struct ieee80211vap;
+void ieee80211_drain_ifq(struct ifqueue *);
+void ieee80211_flush_ifq(struct ifqueue *, struct ieee80211vap *);
+
+void ieee80211_vap_destroy(struct ieee80211vap *);
+
+#define IFNET_IS_UP_RUNNING(_ifp) \
+ (((_ifp)->if_flags & IFF_UP) && \
+ ((_ifp)->if_drv_flags & IFF_DRV_RUNNING))
+
+#define msecs_to_ticks(ms) (((ms)*hz)/1000)
+#define ticks_to_msecs(t) (1000*(t) / hz)
+#define ticks_to_secs(t) ((t) / hz)
+#define time_after(a,b) ((long)(b) - (long)(a) < 0)
+#define time_before(a,b) time_after(b,a)
+#define time_after_eq(a,b) ((long)(a) - (long)(b) >= 0)
+#define time_before_eq(a,b) time_after_eq(b,a)
+
+struct mbuf *ieee80211_getmgtframe(uint8_t **frm, int headroom, int pktlen);
+
+/* tx path usage */
+#define M_ENCAP M_PROTO1 /* 802.11 encap done */
+#define M_EAPOL M_PROTO3 /* PAE/EAPOL frame */
+#define M_PWR_SAV M_PROTO4 /* bypass PS handling */
+#define M_MORE_DATA M_PROTO5 /* more data frames to follow */
+#define M_FF M_PROTO6 /* fast frame */
+#define M_TXCB M_PROTO7 /* do tx complete callback */
+#define M_AMPDU_MPDU M_PROTO8 /* ok for A-MPDU aggregation */
+#define M_80211_TX \
+ (M_FRAG|M_FIRSTFRAG|M_LASTFRAG|M_ENCAP|M_EAPOL|M_PWR_SAV|\
+ M_MORE_DATA|M_FF|M_TXCB|M_AMPDU_MPDU)
+
+/* rx path usage */
+#define M_AMPDU M_PROTO1 /* A-MPDU subframe */
+#define M_WEP M_PROTO2 /* WEP done by hardware */
+#if 0
+#define M_AMPDU_MPDU M_PROTO8 /* A-MPDU re-order done */
+#endif
+#define M_80211_RX (M_AMPDU|M_WEP|M_AMPDU_MPDU)
+
+#define IEEE80211_MBUF_TX_FLAG_BITS \
+ "\20\1M_EXT\2M_PKTHDR\3M_EOR\4M_RDONLY\5M_ENCAP\6M_WEP\7M_EAPOL" \
+ "\10M_PWR_SAV\11M_MORE_DATA\12M_BCAST\13M_MCAST\14M_FRAG\15M_FIRSTFRAG" \
+ "\16M_LASTFRAG\17M_SKIP_FIREWALL\20M_FREELIST\21M_VLANTAG\22M_PROMISC" \
+ "\23M_NOFREE\24M_FF\25M_TXCB\26M_AMPDU_MPDU\27M_FLOWID"
+
+#define IEEE80211_MBUF_RX_FLAG_BITS \
+ "\20\1M_EXT\2M_PKTHDR\3M_EOR\4M_RDONLY\5M_AMPDU\6M_WEP\7M_PROTO3" \
+ "\10M_PROTO4\11M_PROTO5\12M_BCAST\13M_MCAST\14M_FRAG\15M_FIRSTFRAG" \
+ "\16M_LASTFRAG\17M_SKIP_FIREWALL\20M_FREELIST\21M_VLANTAG\22M_PROMISC" \
+ "\23M_NOFREE\24M_PROTO6\25M_PROTO7\26M_AMPDU_MPDU\27M_FLOWID"
+
+/*
+ * Store WME access control bits in the vlan tag.
+ * This is safe since it's done after the packet is classified
+ * (where we use any previous tag) and because it's passed
+ * directly in to the driver and there's no chance someone
+ * else will clobber them on us.
+ */
+#define M_WME_SETAC(m, ac) \
+ ((m)->m_pkthdr.ether_vtag = (ac))
+#define M_WME_GETAC(m) ((m)->m_pkthdr.ether_vtag)
+
+/*
+ * Mbufs on the power save queue are tagged with an age and
+ * timed out. We reuse the hardware checksum field in the
+ * mbuf packet header to store this data.
+ */
+#define M_AGE_SET(m,v) (m->m_pkthdr.csum_data = v)
+#define M_AGE_GET(m) (m->m_pkthdr.csum_data)
+#define M_AGE_SUB(m,adj) (m->m_pkthdr.csum_data -= adj)
+
+/*
+ * Store the sequence number.
+ */
+#define M_SEQNO_SET(m, seqno) \
+ ((m)->m_pkthdr.tso_segsz = (seqno))
+#define M_SEQNO_GET(m) ((m)->m_pkthdr.tso_segsz)
+
+#define MTAG_ABI_NET80211 1132948340 /* net80211 ABI */
+
+struct ieee80211_cb {
+ void (*func)(struct ieee80211_node *, void *, int status);
+ void *arg;
+};
+#define NET80211_TAG_CALLBACK 0 /* xmit complete callback */
+int ieee80211_add_callback(struct mbuf *m,
+ void (*func)(struct ieee80211_node *, void *, int), void *arg);
+void ieee80211_process_callback(struct ieee80211_node *, struct mbuf *, int);
+
+void get_random_bytes(void *, size_t);
+
+struct ieee80211com;
+
+void ieee80211_sysctl_attach(struct ieee80211com *);
+void ieee80211_sysctl_detach(struct ieee80211com *);
+void ieee80211_sysctl_vattach(struct ieee80211vap *);
+void ieee80211_sysctl_vdetach(struct ieee80211vap *);
+
+SYSCTL_DECL(_net_wlan);
+int ieee80211_sysctl_msecs_ticks(SYSCTL_HANDLER_ARGS);
+
+void ieee80211_load_module(const char *);
+
+/*
+ * A "policy module" is an adjunct module to net80211 that provides
+ * functionality that typically includes policy decisions. This
+ * modularity enables extensibility and vendor-supplied functionality.
+ */
+#define _IEEE80211_POLICY_MODULE(policy, name, version) \
+typedef void (*policy##_setup)(int); \
+SET_DECLARE(policy##_set, policy##_setup); \
+static int \
+wlan_##name##_modevent(module_t mod, int type, void *unused) \
+{ \
+ policy##_setup * const *iter, f; \
+ switch (type) { \
+ case MOD_LOAD: \
+ SET_FOREACH(iter, policy##_set) { \
+ f = (void*) *iter; \
+ f(type); \
+ } \
+ return 0; \
+ case MOD_UNLOAD: \
+ case MOD_QUIESCE: \
+ if (nrefs) { \
+ printf("wlan_##name: still in use (%u dynamic refs)\n",\
+ nrefs); \
+ return EBUSY; \
+ } \
+ if (type == MOD_UNLOAD) { \
+ SET_FOREACH(iter, policy##_set) { \
+ f = (void*) *iter; \
+ f(type); \
+ } \
+ } \
+ return 0; \
+ } \
+ return EINVAL; \
+} \
+static moduledata_t name##_mod = { \
+ "wlan_" #name, \
+ wlan_##name##_modevent, \
+ 0 \
+}; \
+DECLARE_MODULE(wlan_##name, name##_mod, SI_SUB_DRIVERS, SI_ORDER_FIRST);\
+MODULE_VERSION(wlan_##name, version); \
+MODULE_DEPEND(wlan_##name, wlan, 1, 1, 1)
+
+/*
+ * Crypto modules implement cipher support.
+ */
+#define IEEE80211_CRYPTO_MODULE(name, version) \
+_IEEE80211_POLICY_MODULE(crypto, name, version); \
+static void \
+name##_modevent(int type) \
+{ \
+ if (type == MOD_LOAD) \
+ ieee80211_crypto_register(&name); \
+ else \
+ ieee80211_crypto_unregister(&name); \
+} \
+TEXT_SET(crypto##_set, name##_modevent)
+
+/*
+ * Scanner modules provide scanning policy.
+ */
+#define IEEE80211_SCANNER_MODULE(name, version) \
+ _IEEE80211_POLICY_MODULE(scanner, name, version)
+
+#define IEEE80211_SCANNER_ALG(name, alg, v) \
+static void \
+name##_modevent(int type) \
+{ \
+ if (type == MOD_LOAD) \
+ ieee80211_scanner_register(alg, &v); \
+ else \
+ ieee80211_scanner_unregister(alg, &v); \
+} \
+TEXT_SET(scanner_set, name##_modevent); \
+
+/*
+ * ACL modules implement acl policy.
+ */
+#define IEEE80211_ACL_MODULE(name, alg, version) \
+_IEEE80211_POLICY_MODULE(acl, name, version); \
+static void \
+alg##_modevent(int type) \
+{ \
+ if (type == MOD_LOAD) \
+ ieee80211_aclator_register(&alg); \
+ else \
+ ieee80211_aclator_unregister(&alg); \
+} \
+TEXT_SET(acl_set, alg##_modevent); \
+
+/*
+ * Authenticator modules handle 802.1x/WPA authentication.
+ */
+#define IEEE80211_AUTH_MODULE(name, version) \
+ _IEEE80211_POLICY_MODULE(auth, name, version)
+
+#define IEEE80211_AUTH_ALG(name, alg, v) \
+static void \
+name##_modevent(int type) \
+{ \
+ if (type == MOD_LOAD) \
+ ieee80211_authenticator_register(alg, &v); \
+ else \
+ ieee80211_authenticator_unregister(alg); \
+} \
+TEXT_SET(auth_set, name##_modevent)
+
+/*
+ * Rate control modules provide tx rate control support.
+ */
+#define IEEE80211_RATECTL_MODULE(alg, version) \
+ _IEEE80211_POLICY_MODULE(ratectl, alg, version); \
+
+#define IEEE80211_RATECTL_ALG(name, alg, v) \
+static void \
+alg##_modevent(int type) \
+{ \
+ if (type == MOD_LOAD) \
+ ieee80211_ratectl_register(alg, &v); \
+ else \
+ ieee80211_ratectl_unregister(alg); \
+} \
+TEXT_SET(ratectl##_set, alg##_modevent)
+
+struct ieee80211req;
+typedef int ieee80211_ioctl_getfunc(struct ieee80211vap *,
+ struct ieee80211req *);
+SET_DECLARE(ieee80211_ioctl_getset, ieee80211_ioctl_getfunc);
+#define IEEE80211_IOCTL_GET(_name, _get) TEXT_SET(ieee80211_ioctl_getset, _get)
+
+typedef int ieee80211_ioctl_setfunc(struct ieee80211vap *,
+ struct ieee80211req *);
+SET_DECLARE(ieee80211_ioctl_setset, ieee80211_ioctl_setfunc);
+#define IEEE80211_IOCTL_SET(_name, _set) TEXT_SET(ieee80211_ioctl_setset, _set)
+#endif /* _KERNEL */
+
+/* XXX this stuff belongs elsewhere */
+/*
+ * Message formats for messages from the net80211 layer to user
+ * applications via the routing socket. These messages are appended
+ * to an if_announcemsghdr structure.
+ */
+struct ieee80211_join_event {
+ uint8_t iev_addr[6];
+};
+
+struct ieee80211_leave_event {
+ uint8_t iev_addr[6];
+};
+
+struct ieee80211_replay_event {
+ uint8_t iev_src[6]; /* src MAC */
+ uint8_t iev_dst[6]; /* dst MAC */
+ uint8_t iev_cipher; /* cipher type */
+ uint8_t iev_keyix; /* key id/index */
+ uint64_t iev_keyrsc; /* RSC from key */
+ uint64_t iev_rsc; /* RSC from frame */
+};
+
+struct ieee80211_michael_event {
+ uint8_t iev_src[6]; /* src MAC */
+ uint8_t iev_dst[6]; /* dst MAC */
+ uint8_t iev_cipher; /* cipher type */
+ uint8_t iev_keyix; /* key id/index */
+};
+
+struct ieee80211_wds_event {
+ uint8_t iev_addr[6];
+};
+
+struct ieee80211_csa_event {
+ uint32_t iev_flags; /* channel flags */
+ uint16_t iev_freq; /* setting in Mhz */
+ uint8_t iev_ieee; /* IEEE channel number */
+ uint8_t iev_mode; /* CSA mode */
+ uint8_t iev_count; /* CSA count */
+};
+
+struct ieee80211_cac_event {
+ uint32_t iev_flags; /* channel flags */
+ uint16_t iev_freq; /* setting in Mhz */
+ uint8_t iev_ieee; /* IEEE channel number */
+ /* XXX timestamp? */
+ uint8_t iev_type; /* IEEE80211_NOTIFY_CAC_* */
+};
+
+struct ieee80211_radar_event {
+ uint32_t iev_flags; /* channel flags */
+ uint16_t iev_freq; /* setting in Mhz */
+ uint8_t iev_ieee; /* IEEE channel number */
+ /* XXX timestamp? */
+};
+
+struct ieee80211_auth_event {
+ uint8_t iev_addr[6];
+};
+
+struct ieee80211_deauth_event {
+ uint8_t iev_addr[6];
+};
+
+struct ieee80211_country_event {
+ uint8_t iev_addr[6];
+ uint8_t iev_cc[2]; /* ISO country code */
+};
+
+struct ieee80211_radio_event {
+ uint8_t iev_state; /* 1 on, 0 off */
+};
+
+#define RTM_IEEE80211_ASSOC 100 /* station associate (bss mode) */
+#define RTM_IEEE80211_REASSOC 101 /* station re-associate (bss mode) */
+#define RTM_IEEE80211_DISASSOC 102 /* station disassociate (bss mode) */
+#define RTM_IEEE80211_JOIN 103 /* station join (ap mode) */
+#define RTM_IEEE80211_LEAVE 104 /* station leave (ap mode) */
+#define RTM_IEEE80211_SCAN 105 /* scan complete, results available */
+#define RTM_IEEE80211_REPLAY 106 /* sequence counter replay detected */
+#define RTM_IEEE80211_MICHAEL 107 /* Michael MIC failure detected */
+#define RTM_IEEE80211_REJOIN 108 /* station re-associate (ap mode) */
+#define RTM_IEEE80211_WDS 109 /* WDS discovery (ap mode) */
+#define RTM_IEEE80211_CSA 110 /* Channel Switch Announcement event */
+#define RTM_IEEE80211_RADAR 111 /* radar event */
+#define RTM_IEEE80211_CAC 112 /* Channel Availability Check event */
+#define RTM_IEEE80211_DEAUTH 113 /* station deauthenticate */
+#define RTM_IEEE80211_AUTH 114 /* station authenticate (ap mode) */
+#define RTM_IEEE80211_COUNTRY 115 /* discovered country code (sta mode) */
+#define RTM_IEEE80211_RADIO 116 /* RF kill switch state change */
+
+/*
+ * Structure prepended to raw packets sent through the bpf
+ * interface when set to DLT_IEEE802_11_RADIO. This allows
+ * user applications to specify pretty much everything in
+ * an Atheros tx descriptor. XXX need to generalize.
+ *
+ * XXX cannot be more than 14 bytes as it is copied to a sockaddr's
+ * XXX sa_data area.
+ */
+struct ieee80211_bpf_params {
+ uint8_t ibp_vers; /* version */
+#define IEEE80211_BPF_VERSION 0
+ uint8_t ibp_len; /* header length in bytes */
+ uint8_t ibp_flags;
+#define IEEE80211_BPF_SHORTPRE 0x01 /* tx with short preamble */
+#define IEEE80211_BPF_NOACK 0x02 /* tx with no ack */
+#define IEEE80211_BPF_CRYPTO 0x04 /* tx with h/w encryption */
+#define IEEE80211_BPF_FCS 0x10 /* frame incldues FCS */
+#define IEEE80211_BPF_DATAPAD 0x20 /* frame includes data padding */
+#define IEEE80211_BPF_RTS 0x40 /* tx with RTS/CTS */
+#define IEEE80211_BPF_CTS 0x80 /* tx with CTS only */
+ uint8_t ibp_pri; /* WME/WMM AC+tx antenna */
+ uint8_t ibp_try0; /* series 1 try count */
+ uint8_t ibp_rate0; /* series 1 IEEE tx rate */
+ uint8_t ibp_power; /* tx power (device units) */
+ uint8_t ibp_ctsrate; /* IEEE tx rate for CTS */
+ uint8_t ibp_try1; /* series 2 try count */
+ uint8_t ibp_rate1; /* series 2 IEEE tx rate */
+ uint8_t ibp_try2; /* series 3 try count */
+ uint8_t ibp_rate2; /* series 3 IEEE tx rate */
+ uint8_t ibp_try3; /* series 4 try count */
+ uint8_t ibp_rate3; /* series 4 IEEE tx rate */
+};
+#endif /* _NET80211_IEEE80211_FREEBSD_HH_ */
diff --git a/rtems/freebsd/net80211/ieee80211_hostap.c b/rtems/freebsd/net80211/ieee80211_hostap.c
new file mode 100644
index 00000000..a7ffd198
--- /dev/null
+++ b/rtems/freebsd/net80211/ieee80211_hostap.c
@@ -0,0 +1,2307 @@
+#include <rtems/freebsd/machine/rtems-bsd-config.h>
+
+/*-
+ * Copyright (c) 2007-2008 Sam Leffler, Errno Consulting
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <rtems/freebsd/sys/cdefs.h>
+#ifdef __FreeBSD__
+__FBSDID("$FreeBSD$");
+#endif
+
+/*
+ * IEEE 802.11 HOSTAP mode support.
+ */
+#include <rtems/freebsd/local/opt_inet.h>
+#include <rtems/freebsd/local/opt_wlan.h>
+
+#include <rtems/freebsd/sys/param.h>
+#include <rtems/freebsd/sys/systm.h>
+#include <rtems/freebsd/sys/mbuf.h>
+#include <rtems/freebsd/sys/malloc.h>
+#include <rtems/freebsd/sys/kernel.h>
+
+#include <rtems/freebsd/sys/socket.h>
+#include <rtems/freebsd/sys/sockio.h>
+#include <rtems/freebsd/sys/endian.h>
+#include <rtems/freebsd/sys/errno.h>
+#include <rtems/freebsd/sys/proc.h>
+#include <rtems/freebsd/sys/sysctl.h>
+
+#include <rtems/freebsd/net/if.h>
+#include <rtems/freebsd/net/if_media.h>
+#include <rtems/freebsd/net/if_llc.h>
+#include <rtems/freebsd/net/ethernet.h>
+
+#include <rtems/freebsd/net/bpf.h>
+
+#include <rtems/freebsd/net80211/ieee80211_var.h>
+#include <rtems/freebsd/net80211/ieee80211_hostap.h>
+#include <rtems/freebsd/net80211/ieee80211_input.h>
+#ifdef IEEE80211_SUPPORT_SUPERG
+#include <rtems/freebsd/net80211/ieee80211_superg.h>
+#endif
+#include <rtems/freebsd/net80211/ieee80211_wds.h>
+
+#define IEEE80211_RATE2MBS(r) (((r) & IEEE80211_RATE_VAL) / 2)
+
+static void hostap_vattach(struct ieee80211vap *);
+static int hostap_newstate(struct ieee80211vap *, enum ieee80211_state, int);
+static int hostap_input(struct ieee80211_node *ni, struct mbuf *m,
+ int rssi, int nf);
+static void hostap_deliver_data(struct ieee80211vap *,
+ struct ieee80211_node *, struct mbuf *);
+static void hostap_recv_mgmt(struct ieee80211_node *, struct mbuf *,
+ int subtype, int rssi, int nf);
+static void hostap_recv_ctl(struct ieee80211_node *, struct mbuf *, int);
+static void hostap_recv_pspoll(struct ieee80211_node *, struct mbuf *);
+
+void
+ieee80211_hostap_attach(struct ieee80211com *ic)
+{
+ ic->ic_vattach[IEEE80211_M_HOSTAP] = hostap_vattach;
+}
+
+void
+ieee80211_hostap_detach(struct ieee80211com *ic)
+{
+}
+
+static void
+hostap_vdetach(struct ieee80211vap *vap)
+{
+}
+
+static void
+hostap_vattach(struct ieee80211vap *vap)
+{
+ vap->iv_newstate = hostap_newstate;
+ vap->iv_input = hostap_input;
+ vap->iv_recv_mgmt = hostap_recv_mgmt;
+ vap->iv_recv_ctl = hostap_recv_ctl;
+ vap->iv_opdetach = hostap_vdetach;
+ vap->iv_deliver_data = hostap_deliver_data;
+}
+
+static void
+sta_disassoc(void *arg, struct ieee80211_node *ni)
+{
+ struct ieee80211vap *vap = arg;
+
+ if (ni->ni_vap == vap && ni->ni_associd != 0) {
+ IEEE80211_SEND_MGMT(ni, IEEE80211_FC0_SUBTYPE_DISASSOC,
+ IEEE80211_REASON_ASSOC_LEAVE);
+ ieee80211_node_leave(ni);
+ }
+}
+
+static void
+sta_csa(void *arg, struct ieee80211_node *ni)
+{
+ struct ieee80211vap *vap = arg;
+
+ if (ni->ni_vap == vap && ni->ni_associd != 0)
+ if (ni->ni_inact > vap->iv_inact_init) {
+ ni->ni_inact = vap->iv_inact_init;
+ IEEE80211_NOTE(vap, IEEE80211_MSG_INACT, ni,
+ "%s: inact %u", __func__, ni->ni_inact);
+ }
+}
+
+static void
+sta_drop(void *arg, struct ieee80211_node *ni)
+{
+ struct ieee80211vap *vap = arg;
+
+ if (ni->ni_vap == vap && ni->ni_associd != 0)
+ ieee80211_node_leave(ni);
+}
+
+/*
+ * Does a channel change require associated stations to re-associate
+ * so protocol state is correct. This is used when doing CSA across
+ * bands or similar (e.g. HT -> legacy).
+ */
+static int
+isbandchange(struct ieee80211com *ic)
+{
+ return ((ic->ic_bsschan->ic_flags ^ ic->ic_csa_newchan->ic_flags) &
+ (IEEE80211_CHAN_2GHZ | IEEE80211_CHAN_5GHZ | IEEE80211_CHAN_HALF |
+ IEEE80211_CHAN_QUARTER | IEEE80211_CHAN_HT)) != 0;
+}
+
+/*
+ * IEEE80211_M_HOSTAP vap state machine handler.
+ */
+static int
+hostap_newstate(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg)
+{
+ struct ieee80211com *ic = vap->iv_ic;
+ enum ieee80211_state ostate;
+
+ IEEE80211_LOCK_ASSERT(ic);
+
+ ostate = vap->iv_state;
+ IEEE80211_DPRINTF(vap, IEEE80211_MSG_STATE, "%s: %s -> %s (%d)\n",
+ __func__, ieee80211_state_name[ostate],
+ ieee80211_state_name[nstate], arg);
+ vap->iv_state = nstate; /* state transition */
+ if (ostate != IEEE80211_S_SCAN)
+ ieee80211_cancel_scan(vap); /* background scan */
+ switch (nstate) {
+ case IEEE80211_S_INIT:
+ switch (ostate) {
+ case IEEE80211_S_SCAN:
+ ieee80211_cancel_scan(vap);
+ break;
+ case IEEE80211_S_CAC:
+ ieee80211_dfs_cac_stop(vap);
+ break;
+ case IEEE80211_S_RUN:
+ ieee80211_iterate_nodes(&ic->ic_sta, sta_disassoc, vap);
+ break;
+ default:
+ break;
+ }
+ if (ostate != IEEE80211_S_INIT) {
+ /* NB: optimize INIT -> INIT case */
+ ieee80211_reset_bss(vap);
+ }
+ if (vap->iv_auth->ia_detach != NULL)
+ vap->iv_auth->ia_detach(vap);
+ break;
+ case IEEE80211_S_SCAN:
+ switch (ostate) {
+ case IEEE80211_S_CSA:
+ case IEEE80211_S_RUN:
+ ieee80211_iterate_nodes(&ic->ic_sta, sta_disassoc, vap);
+ /*
+ * Clear overlapping BSS state; the beacon frame
+ * will be reconstructed on transition to the RUN
+ * state and the timeout routines check if the flag
+ * is set before doing anything so this is sufficient.
+ */
+ ic->ic_flags_ext &= ~IEEE80211_FEXT_NONERP_PR;
+ ic->ic_flags_ht &= ~IEEE80211_FHT_NONHT_PR;
+ /* fall thru... */
+ case IEEE80211_S_CAC:
+ /*
+ * NB: We may get here because of a manual channel
+ * change in which case we need to stop CAC
+ * XXX no need to stop if ostate RUN but it's ok
+ */
+ ieee80211_dfs_cac_stop(vap);
+ /* fall thru... */
+ case IEEE80211_S_INIT:
+ if (vap->iv_des_chan != IEEE80211_CHAN_ANYC &&
+ !IEEE80211_IS_CHAN_RADAR(vap->iv_des_chan)) {
+ /*
+ * Already have a channel; bypass the
+ * scan and startup immediately.
+ * ieee80211_create_ibss will call back to
+ * move us to RUN state.
+ */
+ ieee80211_create_ibss(vap, vap->iv_des_chan);
+ break;
+ }
+ /*
+ * Initiate a scan. We can come here as a result
+ * of an IEEE80211_IOC_SCAN_REQ too in which case
+ * the vap will be marked with IEEE80211_FEXT_SCANREQ
+ * and the scan request parameters will be present
+ * in iv_scanreq. Otherwise we do the default.
+ */
+ if (vap->iv_flags_ext & IEEE80211_FEXT_SCANREQ) {
+ ieee80211_check_scan(vap,
+ vap->iv_scanreq_flags,
+ vap->iv_scanreq_duration,
+ vap->iv_scanreq_mindwell,
+ vap->iv_scanreq_maxdwell,
+ vap->iv_scanreq_nssid, vap->iv_scanreq_ssid);
+ vap->iv_flags_ext &= ~IEEE80211_FEXT_SCANREQ;
+ } else
+ ieee80211_check_scan_current(vap);
+ break;
+ case IEEE80211_S_SCAN:
+ /*
+ * A state change requires a reset; scan.
+ */
+ ieee80211_check_scan_current(vap);
+ break;
+ default:
+ break;
+ }
+ break;
+ case IEEE80211_S_CAC:
+ /*
+ * Start CAC on a DFS channel. We come here when starting
+ * a bss on a DFS channel (see ieee80211_create_ibss).
+ */
+ ieee80211_dfs_cac_start(vap);
+ break;
+ case IEEE80211_S_RUN:
+ if (vap->iv_flags & IEEE80211_F_WPA) {
+ /* XXX validate prerequisites */
+ }
+ switch (ostate) {
+ case IEEE80211_S_INIT:
+ /*
+ * Already have a channel; bypass the
+ * scan and startup immediately.
+ * Note that ieee80211_create_ibss will call
+ * back to do a RUN->RUN state change.
+ */
+ ieee80211_create_ibss(vap,
+ ieee80211_ht_adjust_channel(ic,
+ ic->ic_curchan, vap->iv_flags_ht));
+ /* NB: iv_bss is changed on return */
+ break;
+ case IEEE80211_S_CAC:
+ /*
+ * NB: This is the normal state change when CAC
+ * expires and no radar was detected; no need to
+ * clear the CAC timer as it's already expired.
+ */
+ /* fall thru... */
+ case IEEE80211_S_CSA:
+ /*
+ * Shorten inactivity timer of associated stations
+ * to weed out sta's that don't follow a CSA.
+ */
+ ieee80211_iterate_nodes(&ic->ic_sta, sta_csa, vap);
+ /*
+ * Update bss node channel to reflect where
+ * we landed after CSA.
+ */
+ ieee80211_node_set_chan(vap->iv_bss,
+ ieee80211_ht_adjust_channel(ic, ic->ic_curchan,
+ ieee80211_htchanflags(vap->iv_bss->ni_chan)));
+ /* XXX bypass debug msgs */
+ break;
+ case IEEE80211_S_SCAN:
+ case IEEE80211_S_RUN:
+#ifdef IEEE80211_DEBUG
+ if (ieee80211_msg_debug(vap)) {
+ struct ieee80211_node *ni = vap->iv_bss;
+ ieee80211_note(vap,
+ "synchronized with %s ssid ",
+ ether_sprintf(ni->ni_bssid));
+ ieee80211_print_essid(ni->ni_essid,
+ ni->ni_esslen);
+ /* XXX MCS/HT */
+ printf(" channel %d start %uMb\n",
+ ieee80211_chan2ieee(ic, ic->ic_curchan),
+ IEEE80211_RATE2MBS(ni->ni_txrate));
+ }
+#endif
+ break;
+ default:
+ break;
+ }
+ /*
+ * Start/stop the authenticator. We delay until here
+ * to allow configuration to happen out of order.
+ */
+ if (vap->iv_auth->ia_attach != NULL) {
+ /* XXX check failure */
+ vap->iv_auth->ia_attach(vap);
+ } else if (vap->iv_auth->ia_detach != NULL) {
+ vap->iv_auth->ia_detach(vap);
+ }
+ ieee80211_node_authorize(vap->iv_bss);
+ break;
+ case IEEE80211_S_CSA:
+ if (ostate == IEEE80211_S_RUN && isbandchange(ic)) {
+ /*
+ * On a ``band change'' silently drop associated
+ * stations as they must re-associate before they
+ * can pass traffic (as otherwise protocol state
+ * such as capabilities and the negotiated rate
+ * set may/will be wrong).
+ */
+ ieee80211_iterate_nodes(&ic->ic_sta, sta_drop, vap);
+ }
+ break;
+ default:
+ break;
+ }
+ return 0;
+}
+
+static void
+hostap_deliver_data(struct ieee80211vap *vap,
+ struct ieee80211_node *ni, struct mbuf *m)
+{
+ struct ether_header *eh = mtod(m, struct ether_header *);
+ struct ifnet *ifp = vap->iv_ifp;
+
+ /* clear driver/net80211 flags before passing up */
+ m->m_flags &= ~(M_80211_RX | M_MCAST | M_BCAST);
+
+ KASSERT(vap->iv_opmode == IEEE80211_M_HOSTAP,
+ ("gack, opmode %d", vap->iv_opmode));
+ /*
+ * Do accounting.
+ */
+ ifp->if_ipackets++;
+ IEEE80211_NODE_STAT(ni, rx_data);
+ IEEE80211_NODE_STAT_ADD(ni, rx_bytes, m->m_pkthdr.len);
+ if (ETHER_IS_MULTICAST(eh->ether_dhost)) {
+ m->m_flags |= M_MCAST; /* XXX M_BCAST? */
+ IEEE80211_NODE_STAT(ni, rx_mcast);
+ } else
+ IEEE80211_NODE_STAT(ni, rx_ucast);
+
+ /* perform as a bridge within the AP */
+ if ((vap->iv_flags & IEEE80211_F_NOBRIDGE) == 0) {
+ struct mbuf *mcopy = NULL;
+
+ if (m->m_flags & M_MCAST) {
+ mcopy = m_dup(m, M_DONTWAIT);
+ if (mcopy == NULL)
+ ifp->if_oerrors++;
+ else
+ mcopy->m_flags |= M_MCAST;
+ } else {
+ /*
+ * Check if the destination is associated with the
+ * same vap and authorized to receive traffic.
+ * Beware of traffic destined for the vap itself;
+ * sending it will not work; just let it be delivered
+ * normally.
+ */
+ struct ieee80211_node *sta = ieee80211_find_vap_node(
+ &vap->iv_ic->ic_sta, vap, eh->ether_dhost);
+ if (sta != NULL) {
+ if (ieee80211_node_is_authorized(sta)) {
+ /*
+ * Beware of sending to ourself; this
+ * needs to happen via the normal
+ * input path.
+ */
+ if (sta != vap->iv_bss) {
+ mcopy = m;
+ m = NULL;
+ }
+ } else {
+ vap->iv_stats.is_rx_unauth++;
+ IEEE80211_NODE_STAT(sta, rx_unauth);
+ }
+ ieee80211_free_node(sta);
+ }
+ }
+ if (mcopy != NULL) {
+ int len, err;
+ len = mcopy->m_pkthdr.len;
+ err = ifp->if_transmit(ifp, mcopy);
+ if (err) {
+ /* NB: IFQ_HANDOFF reclaims mcopy */
+ } else {
+ ifp->if_opackets++;
+ }
+ }
+ }
+ if (m != NULL) {
+ /*
+ * Mark frame as coming from vap's interface.
+ */
+ m->m_pkthdr.rcvif = ifp;
+ if (m->m_flags & M_MCAST) {
+ /*
+ * Spam DWDS vap's w/ multicast traffic.
+ */
+ /* XXX only if dwds in use? */
+ ieee80211_dwds_mcast(vap, m);
+ }
+ if (ni->ni_vlan != 0) {
+ /* attach vlan tag */
+ m->m_pkthdr.ether_vtag = ni->ni_vlan;
+ m->m_flags |= M_VLANTAG;
+ }
+ ifp->if_input(ifp, m);
+ }
+}
+
+/*
+ * Decide if a received management frame should be
+ * printed when debugging is enabled. This filters some
+ * of the less interesting frames that come frequently
+ * (e.g. beacons).
+ */
+static __inline int
+doprint(struct ieee80211vap *vap, int subtype)
+{
+ switch (subtype) {
+ case IEEE80211_FC0_SUBTYPE_BEACON:
+ return (vap->iv_ic->ic_flags & IEEE80211_F_SCAN);
+ case IEEE80211_FC0_SUBTYPE_PROBE_REQ:
+ return 0;
+ }
+ return 1;
+}
+
+/*
+ * Process a received frame. The node associated with the sender
+ * should be supplied. If nothing was found in the node table then
+ * the caller is assumed to supply a reference to iv_bss instead.
+ * The RSSI and a timestamp are also supplied. The RSSI data is used
+ * during AP scanning to select a AP to associate with; it can have
+ * any units so long as values have consistent units and higher values
+ * mean ``better signal''. The receive timestamp is currently not used
+ * by the 802.11 layer.
+ */
+static int
+hostap_input(struct ieee80211_node *ni, struct mbuf *m, int rssi, int nf)
+{
+#define SEQ_LEQ(a,b) ((int)((a)-(b)) <= 0)
+#define HAS_SEQ(type) ((type & 0x4) == 0)
+ struct ieee80211vap *vap = ni->ni_vap;
+ struct ieee80211com *ic = ni->ni_ic;
+ struct ifnet *ifp = vap->iv_ifp;
+ struct ieee80211_frame *wh;
+ struct ieee80211_key *key;
+ struct ether_header *eh;
+ int hdrspace, need_tap = 1; /* mbuf need to be tapped. */
+ uint8_t dir, type, subtype, qos;
+ uint8_t *bssid;
+ uint16_t rxseq;
+
+ if (m->m_flags & M_AMPDU_MPDU) {
+ /*
+ * Fastpath for A-MPDU reorder q resubmission. Frames
+ * w/ M_AMPDU_MPDU marked have already passed through
+ * here but were received out of order and been held on
+ * the reorder queue. When resubmitted they are marked
+ * with the M_AMPDU_MPDU flag and we can bypass most of
+ * the normal processing.
+ */
+ wh = mtod(m, struct ieee80211_frame *);
+ type = IEEE80211_FC0_TYPE_DATA;
+ dir = wh->i_fc[1] & IEEE80211_FC1_DIR_MASK;
+ subtype = IEEE80211_FC0_SUBTYPE_QOS;
+ hdrspace = ieee80211_hdrspace(ic, wh); /* XXX optimize? */
+ goto resubmit_ampdu;
+ }
+
+ KASSERT(ni != NULL, ("null node"));
+ ni->ni_inact = ni->ni_inact_reload;
+
+ type = -1; /* undefined */
+
+ if (m->m_pkthdr.len < sizeof(struct ieee80211_frame_min)) {
+ IEEE80211_DISCARD_MAC(vap, IEEE80211_MSG_ANY,
+ ni->ni_macaddr, NULL,
+ "too short (1): len %u", m->m_pkthdr.len);
+ vap->iv_stats.is_rx_tooshort++;
+ goto out;
+ }
+ /*
+ * Bit of a cheat here, we use a pointer for a 3-address
+ * frame format but don't reference fields past outside
+ * ieee80211_frame_min w/o first validating the data is
+ * present.
+ */
+ wh = mtod(m, struct ieee80211_frame *);
+
+ if ((wh->i_fc[0] & IEEE80211_FC0_VERSION_MASK) !=
+ IEEE80211_FC0_VERSION_0) {
+ IEEE80211_DISCARD_MAC(vap, IEEE80211_MSG_ANY,
+ ni->ni_macaddr, NULL, "wrong version, fc %02x:%02x",
+ wh->i_fc[0], wh->i_fc[1]);
+ vap->iv_stats.is_rx_badversion++;
+ goto err;
+ }
+
+ dir = wh->i_fc[1] & IEEE80211_FC1_DIR_MASK;
+ type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
+ subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
+ if ((ic->ic_flags & IEEE80211_F_SCAN) == 0) {
+ if (dir != IEEE80211_FC1_DIR_NODS)
+ bssid = wh->i_addr1;
+ else if (type == IEEE80211_FC0_TYPE_CTL)
+ bssid = wh->i_addr1;
+ else {
+ if (m->m_pkthdr.len < sizeof(struct ieee80211_frame)) {
+ IEEE80211_DISCARD_MAC(vap,
+ IEEE80211_MSG_ANY, ni->ni_macaddr,
+ NULL, "too short (2): len %u",
+ m->m_pkthdr.len);
+ vap->iv_stats.is_rx_tooshort++;
+ goto out;
+ }
+ bssid = wh->i_addr3;
+ }
+ /*
+ * Validate the bssid.
+ */
+ if (!(type == IEEE80211_FC0_TYPE_MGT &&
+ subtype == IEEE80211_FC0_SUBTYPE_BEACON) &&
+ !IEEE80211_ADDR_EQ(bssid, vap->iv_bss->ni_bssid) &&
+ !IEEE80211_ADDR_EQ(bssid, ifp->if_broadcastaddr)) {
+ /* not interested in */
+ IEEE80211_DISCARD_MAC(vap, IEEE80211_MSG_INPUT,
+ bssid, NULL, "%s", "not to bss");
+ vap->iv_stats.is_rx_wrongbss++;
+ goto out;
+ }
+
+ IEEE80211_RSSI_LPF(ni->ni_avgrssi, rssi);
+ ni->ni_noise = nf;
+ if (HAS_SEQ(type)) {
+ uint8_t tid = ieee80211_gettid(wh);
+ if (IEEE80211_QOS_HAS_SEQ(wh) &&
+ TID_TO_WME_AC(tid) >= WME_AC_VI)
+ ic->ic_wme.wme_hipri_traffic++;
+ rxseq = le16toh(*(uint16_t *)wh->i_seq);
+ if ((ni->ni_flags & IEEE80211_NODE_HT) == 0 &&
+ (wh->i_fc[1] & IEEE80211_FC1_RETRY) &&
+ SEQ_LEQ(rxseq, ni->ni_rxseqs[tid])) {
+ /* duplicate, discard */
+ IEEE80211_DISCARD_MAC(vap, IEEE80211_MSG_INPUT,
+ bssid, "duplicate",
+ "seqno <%u,%u> fragno <%u,%u> tid %u",
+ rxseq >> IEEE80211_SEQ_SEQ_SHIFT,
+ ni->ni_rxseqs[tid] >>
+ IEEE80211_SEQ_SEQ_SHIFT,
+ rxseq & IEEE80211_SEQ_FRAG_MASK,
+ ni->ni_rxseqs[tid] &
+ IEEE80211_SEQ_FRAG_MASK,
+ tid);
+ vap->iv_stats.is_rx_dup++;
+ IEEE80211_NODE_STAT(ni, rx_dup);
+ goto out;
+ }
+ ni->ni_rxseqs[tid] = rxseq;
+ }
+ }
+
+ switch (type) {
+ case IEEE80211_FC0_TYPE_DATA:
+ hdrspace = ieee80211_hdrspace(ic, wh);
+ if (m->m_len < hdrspace &&
+ (m = m_pullup(m, hdrspace)) == NULL) {
+ IEEE80211_DISCARD_MAC(vap, IEEE80211_MSG_ANY,
+ ni->ni_macaddr, NULL,
+ "data too short: expecting %u", hdrspace);
+ vap->iv_stats.is_rx_tooshort++;
+ goto out; /* XXX */
+ }
+ if (!(dir == IEEE80211_FC1_DIR_TODS ||
+ (dir == IEEE80211_FC1_DIR_DSTODS &&
+ (vap->iv_flags & IEEE80211_F_DWDS)))) {
+ if (dir != IEEE80211_FC1_DIR_DSTODS) {
+ IEEE80211_DISCARD(vap,
+ IEEE80211_MSG_INPUT, wh, "data",
+ "incorrect dir 0x%x", dir);
+ } else {
+ IEEE80211_DISCARD(vap,
+ IEEE80211_MSG_INPUT |
+ IEEE80211_MSG_WDS, wh,
+ "4-address data",
+ "%s", "DWDS not enabled");
+ }
+ vap->iv_stats.is_rx_wrongdir++;
+ goto out;
+ }
+ /* check if source STA is associated */
+ if (ni == vap->iv_bss) {
+ IEEE80211_DISCARD(vap, IEEE80211_MSG_INPUT,
+ wh, "data", "%s", "unknown src");
+ ieee80211_send_error(ni, wh->i_addr2,
+ IEEE80211_FC0_SUBTYPE_DEAUTH,
+ IEEE80211_REASON_NOT_AUTHED);
+ vap->iv_stats.is_rx_notassoc++;
+ goto err;
+ }
+ if (ni->ni_associd == 0) {
+ IEEE80211_DISCARD(vap, IEEE80211_MSG_INPUT,
+ wh, "data", "%s", "unassoc src");
+ IEEE80211_SEND_MGMT(ni,
+ IEEE80211_FC0_SUBTYPE_DISASSOC,
+ IEEE80211_REASON_NOT_ASSOCED);
+ vap->iv_stats.is_rx_notassoc++;
+ goto err;
+ }
+
+ /*
+ * Check for power save state change.
+ * XXX out-of-order A-MPDU frames?
+ */
+ if (((wh->i_fc[1] & IEEE80211_FC1_PWR_MGT) ^
+ (ni->ni_flags & IEEE80211_NODE_PWR_MGT)))
+ ieee80211_node_pwrsave(ni,
+ wh->i_fc[1] & IEEE80211_FC1_PWR_MGT);
+ /*
+ * For 4-address packets handle WDS discovery
+ * notifications. Once a WDS link is setup frames
+ * are just delivered to the WDS vap (see below).
+ */
+ if (dir == IEEE80211_FC1_DIR_DSTODS && ni->ni_wdsvap == NULL) {
+ if (!ieee80211_node_is_authorized(ni)) {
+ IEEE80211_DISCARD(vap,
+ IEEE80211_MSG_INPUT |
+ IEEE80211_MSG_WDS, wh,
+ "4-address data",
+ "%s", "unauthorized port");
+ vap->iv_stats.is_rx_unauth++;
+ IEEE80211_NODE_STAT(ni, rx_unauth);
+ goto err;
+ }
+ ieee80211_dwds_discover(ni, m);
+ return type;
+ }
+
+ /*
+ * Handle A-MPDU re-ordering. If the frame is to be
+ * processed directly then ieee80211_ampdu_reorder
+ * will return 0; otherwise it has consumed the mbuf
+ * and we should do nothing more with it.
+ */
+ if ((m->m_flags & M_AMPDU) &&
+ ieee80211_ampdu_reorder(ni, m) != 0) {
+ m = NULL;
+ goto out;
+ }
+ resubmit_ampdu:
+
+ /*
+ * Handle privacy requirements. Note that we
+ * must not be preempted from here until after
+ * we (potentially) call ieee80211_crypto_demic;
+ * otherwise we may violate assumptions in the
+ * crypto cipher modules used to do delayed update
+ * of replay sequence numbers.
+ */
+ if (wh->i_fc[1] & IEEE80211_FC1_WEP) {
+ if ((vap->iv_flags & IEEE80211_F_PRIVACY) == 0) {
+ /*
+ * Discard encrypted frames when privacy is off.
+ */
+ IEEE80211_DISCARD(vap, IEEE80211_MSG_INPUT,
+ wh, "WEP", "%s", "PRIVACY off");
+ vap->iv_stats.is_rx_noprivacy++;
+ IEEE80211_NODE_STAT(ni, rx_noprivacy);
+ goto out;
+ }
+ key = ieee80211_crypto_decap(ni, m, hdrspace);
+ if (key == NULL) {
+ /* NB: stats+msgs handled in crypto_decap */
+ IEEE80211_NODE_STAT(ni, rx_wepfail);
+ goto out;
+ }
+ wh = mtod(m, struct ieee80211_frame *);
+ wh->i_fc[1] &= ~IEEE80211_FC1_WEP;
+ } else {
+ /* XXX M_WEP and IEEE80211_F_PRIVACY */
+ key = NULL;
+ }
+
+ /*
+ * Save QoS bits for use below--before we strip the header.
+ */
+ if (subtype == IEEE80211_FC0_SUBTYPE_QOS) {
+ qos = (dir == IEEE80211_FC1_DIR_DSTODS) ?
+ ((struct ieee80211_qosframe_addr4 *)wh)->i_qos[0] :
+ ((struct ieee80211_qosframe *)wh)->i_qos[0];
+ } else
+ qos = 0;
+
+ /*
+ * Next up, any fragmentation.
+ */
+ if (!IEEE80211_IS_MULTICAST(wh->i_addr1)) {
+ m = ieee80211_defrag(ni, m, hdrspace);
+ if (m == NULL) {
+ /* Fragment dropped or frame not complete yet */
+ goto out;
+ }
+ }
+ wh = NULL; /* no longer valid, catch any uses */
+
+ /*
+ * Next strip any MSDU crypto bits.
+ */
+ if (key != NULL && !ieee80211_crypto_demic(vap, key, m, 0)) {
+ IEEE80211_DISCARD_MAC(vap, IEEE80211_MSG_INPUT,
+ ni->ni_macaddr, "data", "%s", "demic error");
+ vap->iv_stats.is_rx_demicfail++;
+ IEEE80211_NODE_STAT(ni, rx_demicfail);
+ goto out;
+ }
+ /* copy to listener after decrypt */
+ if (ieee80211_radiotap_active_vap(vap))
+ ieee80211_radiotap_rx(vap, m);
+ need_tap = 0;
+ /*
+ * Finally, strip the 802.11 header.
+ */
+ m = ieee80211_decap(vap, m, hdrspace);
+ if (m == NULL) {
+ /* XXX mask bit to check for both */
+ /* don't count Null data frames as errors */
+ if (subtype == IEEE80211_FC0_SUBTYPE_NODATA ||
+ subtype == IEEE80211_FC0_SUBTYPE_QOS_NULL)
+ goto out;
+ IEEE80211_DISCARD_MAC(vap, IEEE80211_MSG_INPUT,
+ ni->ni_macaddr, "data", "%s", "decap error");
+ vap->iv_stats.is_rx_decap++;
+ IEEE80211_NODE_STAT(ni, rx_decap);
+ goto err;
+ }
+ eh = mtod(m, struct ether_header *);
+ if (!ieee80211_node_is_authorized(ni)) {
+ /*
+ * Deny any non-PAE frames received prior to
+ * authorization. For open/shared-key
+ * authentication the port is mark authorized
+ * after authentication completes. For 802.1x
+ * the port is not marked authorized by the
+ * authenticator until the handshake has completed.
+ */
+ if (eh->ether_type != htons(ETHERTYPE_PAE)) {
+ IEEE80211_DISCARD_MAC(vap, IEEE80211_MSG_INPUT,
+ eh->ether_shost, "data",
+ "unauthorized port: ether type 0x%x len %u",
+ eh->ether_type, m->m_pkthdr.len);
+ vap->iv_stats.is_rx_unauth++;
+ IEEE80211_NODE_STAT(ni, rx_unauth);
+ goto err;
+ }
+ } else {
+ /*
+ * When denying unencrypted frames, discard
+ * any non-PAE frames received without encryption.
+ */
+ if ((vap->iv_flags & IEEE80211_F_DROPUNENC) &&
+ (key == NULL && (m->m_flags & M_WEP) == 0) &&
+ eh->ether_type != htons(ETHERTYPE_PAE)) {
+ /*
+ * Drop unencrypted frames.
+ */
+ vap->iv_stats.is_rx_unencrypted++;
+ IEEE80211_NODE_STAT(ni, rx_unencrypted);
+ goto out;
+ }
+ }
+ /* XXX require HT? */
+ if (qos & IEEE80211_QOS_AMSDU) {
+ m = ieee80211_decap_amsdu(ni, m);
+ if (m == NULL)
+ return IEEE80211_FC0_TYPE_DATA;
+ } else {
+#ifdef IEEE80211_SUPPORT_SUPERG
+ m = ieee80211_decap_fastframe(vap, ni, m);
+ if (m == NULL)
+ return IEEE80211_FC0_TYPE_DATA;
+#endif
+ }
+ if (dir == IEEE80211_FC1_DIR_DSTODS && ni->ni_wdsvap != NULL)
+ ieee80211_deliver_data(ni->ni_wdsvap, ni, m);
+ else
+ hostap_deliver_data(vap, ni, m);
+ return IEEE80211_FC0_TYPE_DATA;
+
+ case IEEE80211_FC0_TYPE_MGT:
+ vap->iv_stats.is_rx_mgmt++;
+ IEEE80211_NODE_STAT(ni, rx_mgmt);
+ if (dir != IEEE80211_FC1_DIR_NODS) {
+ IEEE80211_DISCARD(vap, IEEE80211_MSG_INPUT,
+ wh, "mgt", "incorrect dir 0x%x", dir);
+ vap->iv_stats.is_rx_wrongdir++;
+ goto err;
+ }
+ if (m->m_pkthdr.len < sizeof(struct ieee80211_frame)) {
+ IEEE80211_DISCARD_MAC(vap, IEEE80211_MSG_ANY,
+ ni->ni_macaddr, "mgt", "too short: len %u",
+ m->m_pkthdr.len);
+ vap->iv_stats.is_rx_tooshort++;
+ goto out;
+ }
+ if (IEEE80211_IS_MULTICAST(wh->i_addr2)) {
+ /* ensure return frames are unicast */
+ IEEE80211_DISCARD(vap, IEEE80211_MSG_ANY,
+ wh, NULL, "source is multicast: %s",
+ ether_sprintf(wh->i_addr2));
+ vap->iv_stats.is_rx_mgtdiscard++; /* XXX stat */
+ goto out;
+ }
+#ifdef IEEE80211_DEBUG
+ if ((ieee80211_msg_debug(vap) && doprint(vap, subtype)) ||
+ ieee80211_msg_dumppkts(vap)) {
+ if_printf(ifp, "received %s from %s rssi %d\n",
+ ieee80211_mgt_subtype_name[subtype >>
+ IEEE80211_FC0_SUBTYPE_SHIFT],
+ ether_sprintf(wh->i_addr2), rssi);
+ }
+#endif
+ if (wh->i_fc[1] & IEEE80211_FC1_WEP) {
+ if (subtype != IEEE80211_FC0_SUBTYPE_AUTH) {
+ /*
+ * Only shared key auth frames with a challenge
+ * should be encrypted, discard all others.
+ */
+ IEEE80211_DISCARD(vap, IEEE80211_MSG_INPUT,
+ wh, NULL,
+ "%s", "WEP set but not permitted");
+ vap->iv_stats.is_rx_mgtdiscard++; /* XXX */
+ goto out;
+ }
+ if ((vap->iv_flags & IEEE80211_F_PRIVACY) == 0) {
+ /*
+ * Discard encrypted frames when privacy is off.
+ */
+ IEEE80211_DISCARD(vap, IEEE80211_MSG_INPUT,
+ wh, NULL, "%s", "WEP set but PRIVACY off");
+ vap->iv_stats.is_rx_noprivacy++;
+ goto out;
+ }
+ hdrspace = ieee80211_hdrspace(ic, wh);
+ key = ieee80211_crypto_decap(ni, m, hdrspace);
+ if (key == NULL) {
+ /* NB: stats+msgs handled in crypto_decap */
+ goto out;
+ }
+ wh = mtod(m, struct ieee80211_frame *);
+ wh->i_fc[1] &= ~IEEE80211_FC1_WEP;
+ }
+ /*
+ * Pass the packet to radiotap before calling iv_recv_mgmt().
+ * Otherwise iv_recv_mgmt() might pass another packet to
+ * radiotap, resulting in out of order packet captures.
+ */
+ if (ieee80211_radiotap_active_vap(vap))
+ ieee80211_radiotap_rx(vap, m);
+ need_tap = 0;
+ vap->iv_recv_mgmt(ni, m, subtype, rssi, nf);
+ goto out;
+
+ case IEEE80211_FC0_TYPE_CTL:
+ vap->iv_stats.is_rx_ctl++;
+ IEEE80211_NODE_STAT(ni, rx_ctrl);
+ vap->iv_recv_ctl(ni, m, subtype);
+ goto out;
+ default:
+ IEEE80211_DISCARD(vap, IEEE80211_MSG_ANY,
+ wh, "bad", "frame type 0x%x", type);
+ /* should not come here */
+ break;
+ }
+err:
+ ifp->if_ierrors++;
+out:
+ if (m != NULL) {
+ if (need_tap && ieee80211_radiotap_active_vap(vap))
+ ieee80211_radiotap_rx(vap, m);
+ m_freem(m);
+ }
+ return type;
+#undef SEQ_LEQ
+}
+
+static void
+hostap_auth_open(struct ieee80211_node *ni, struct ieee80211_frame *wh,
+ int rssi, int nf, uint16_t seq, uint16_t status)
+{
+ struct ieee80211vap *vap = ni->ni_vap;
+
+ KASSERT(vap->iv_state == IEEE80211_S_RUN, ("state %d", vap->iv_state));
+
+ if (ni->ni_authmode == IEEE80211_AUTH_SHARED) {
+ IEEE80211_DISCARD_MAC(vap, IEEE80211_MSG_AUTH,
+ ni->ni_macaddr, "open auth",
+ "bad sta auth mode %u", ni->ni_authmode);
+ vap->iv_stats.is_rx_bad_auth++; /* XXX */
+ /*
+ * Clear any challenge text that may be there if
+ * a previous shared key auth failed and then an
+ * open auth is attempted.
+ */
+ if (ni->ni_challenge != NULL) {
+ free(ni->ni_challenge, M_80211_NODE);
+ ni->ni_challenge = NULL;
+ }
+ /* XXX hack to workaround calling convention */
+ ieee80211_send_error(ni, wh->i_addr2,
+ IEEE80211_FC0_SUBTYPE_AUTH,
+ (seq + 1) | (IEEE80211_STATUS_ALG<<16));
+ return;
+ }
+ if (seq != IEEE80211_AUTH_OPEN_REQUEST) {
+ vap->iv_stats.is_rx_bad_auth++;
+ return;
+ }
+ /* always accept open authentication requests */
+ if (ni == vap->iv_bss) {
+ ni = ieee80211_dup_bss(vap, wh->i_addr2);
+ if (ni == NULL)
+ return;
+ } else if ((ni->ni_flags & IEEE80211_NODE_AREF) == 0)
+ (void) ieee80211_ref_node(ni);
+ /*
+ * Mark the node as referenced to reflect that it's
+ * reference count has been bumped to insure it remains
+ * after the transaction completes.
+ */
+ ni->ni_flags |= IEEE80211_NODE_AREF;
+ /*
+ * Mark the node as requiring a valid association id
+ * before outbound traffic is permitted.
+ */
+ ni->ni_flags |= IEEE80211_NODE_ASSOCID;
+
+ if (vap->iv_acl != NULL &&
+ vap->iv_acl->iac_getpolicy(vap) == IEEE80211_MACCMD_POLICY_RADIUS) {
+ /*
+ * When the ACL policy is set to RADIUS we defer the
+ * authorization to a user agent. Dispatch an event,
+ * a subsequent MLME call will decide the fate of the
+ * station. If the user agent is not present then the
+ * node will be reclaimed due to inactivity.
+ */
+ IEEE80211_NOTE_MAC(vap,
+ IEEE80211_MSG_AUTH | IEEE80211_MSG_ACL, ni->ni_macaddr,
+ "%s", "station authentication defered (radius acl)");
+ ieee80211_notify_node_auth(ni);
+ } else {
+ IEEE80211_SEND_MGMT(ni, IEEE80211_FC0_SUBTYPE_AUTH, seq + 1);
+ IEEE80211_NOTE_MAC(vap,
+ IEEE80211_MSG_DEBUG | IEEE80211_MSG_AUTH, ni->ni_macaddr,
+ "%s", "station authenticated (open)");
+ /*
+ * When 802.1x is not in use mark the port
+ * authorized at this point so traffic can flow.
+ */
+ if (ni->ni_authmode != IEEE80211_AUTH_8021X)
+ ieee80211_node_authorize(ni);
+ }
+}
+
+static void
+hostap_auth_shared(struct ieee80211_node *ni, struct ieee80211_frame *wh,
+ uint8_t *frm, uint8_t *efrm, int rssi, int nf,
+ uint16_t seq, uint16_t status)
+{
+ struct ieee80211vap *vap = ni->ni_vap;
+ uint8_t *challenge;
+ int allocbs, estatus;
+
+ KASSERT(vap->iv_state == IEEE80211_S_RUN, ("state %d", vap->iv_state));
+
+ /*
+ * NB: this can happen as we allow pre-shared key
+ * authentication to be enabled w/o wep being turned
+ * on so that configuration of these can be done
+ * in any order. It may be better to enforce the
+ * ordering in which case this check would just be
+ * for sanity/consistency.
+ */
+ if ((vap->iv_flags & IEEE80211_F_PRIVACY) == 0) {
+ IEEE80211_DISCARD_MAC(vap, IEEE80211_MSG_AUTH,
+ ni->ni_macaddr, "shared key auth",
+ "%s", " PRIVACY is disabled");
+ estatus = IEEE80211_STATUS_ALG;
+ goto bad;
+ }
+ /*
+ * Pre-shared key authentication is evil; accept
+ * it only if explicitly configured (it is supported
+ * mainly for compatibility with clients like Mac OS X).
+ */
+ if (ni->ni_authmode != IEEE80211_AUTH_AUTO &&
+ ni->ni_authmode != IEEE80211_AUTH_SHARED) {
+ IEEE80211_DISCARD_MAC(vap, IEEE80211_MSG_AUTH,
+ ni->ni_macaddr, "shared key auth",
+ "bad sta auth mode %u", ni->ni_authmode);
+ vap->iv_stats.is_rx_bad_auth++; /* XXX maybe a unique error? */
+ estatus = IEEE80211_STATUS_ALG;
+ goto bad;
+ }
+
+ challenge = NULL;
+ if (frm + 1 < efrm) {
+ if ((frm[1] + 2) > (efrm - frm)) {
+ IEEE80211_DISCARD_MAC(vap, IEEE80211_MSG_AUTH,
+ ni->ni_macaddr, "shared key auth",
+ "ie %d/%d too long",
+ frm[0], (frm[1] + 2) - (efrm - frm));
+ vap->iv_stats.is_rx_bad_auth++;
+ estatus = IEEE80211_STATUS_CHALLENGE;
+ goto bad;
+ }
+ if (*frm == IEEE80211_ELEMID_CHALLENGE)
+ challenge = frm;
+ frm += frm[1] + 2;
+ }
+ switch (seq) {
+ case IEEE80211_AUTH_SHARED_CHALLENGE:
+ case IEEE80211_AUTH_SHARED_RESPONSE:
+ if (challenge == NULL) {
+ IEEE80211_DISCARD_MAC(vap, IEEE80211_MSG_AUTH,
+ ni->ni_macaddr, "shared key auth",
+ "%s", "no challenge");
+ vap->iv_stats.is_rx_bad_auth++;
+ estatus = IEEE80211_STATUS_CHALLENGE;
+ goto bad;
+ }
+ if (challenge[1] != IEEE80211_CHALLENGE_LEN) {
+ IEEE80211_DISCARD_MAC(vap, IEEE80211_MSG_AUTH,
+ ni->ni_macaddr, "shared key auth",
+ "bad challenge len %d", challenge[1]);
+ vap->iv_stats.is_rx_bad_auth++;
+ estatus = IEEE80211_STATUS_CHALLENGE;
+ goto bad;
+ }
+ default:
+ break;
+ }
+ switch (seq) {
+ case IEEE80211_AUTH_SHARED_REQUEST:
+ if (ni == vap->iv_bss) {
+ ni = ieee80211_dup_bss(vap, wh->i_addr2);
+ if (ni == NULL) {
+ /* NB: no way to return an error */
+ return;
+ }
+ allocbs = 1;
+ } else {
+ if ((ni->ni_flags & IEEE80211_NODE_AREF) == 0)
+ (void) ieee80211_ref_node(ni);
+ allocbs = 0;
+ }
+ /*
+ * Mark the node as referenced to reflect that it's
+ * reference count has been bumped to insure it remains
+ * after the transaction completes.
+ */
+ ni->ni_flags |= IEEE80211_NODE_AREF;
+ /*
+ * Mark the node as requiring a valid associatio id
+ * before outbound traffic is permitted.
+ */
+ ni->ni_flags |= IEEE80211_NODE_ASSOCID;
+ IEEE80211_RSSI_LPF(ni->ni_avgrssi, rssi);
+ ni->ni_noise = nf;
+ if (!ieee80211_alloc_challenge(ni)) {
+ /* NB: don't return error so they rexmit */
+ return;
+ }
+ get_random_bytes(ni->ni_challenge,
+ IEEE80211_CHALLENGE_LEN);
+ IEEE80211_NOTE(vap, IEEE80211_MSG_DEBUG | IEEE80211_MSG_AUTH,
+ ni, "shared key %sauth request", allocbs ? "" : "re");
+ /*
+ * When the ACL policy is set to RADIUS we defer the
+ * authorization to a user agent. Dispatch an event,
+ * a subsequent MLME call will decide the fate of the
+ * station. If the user agent is not present then the
+ * node will be reclaimed due to inactivity.
+ */
+ if (vap->iv_acl != NULL &&
+ vap->iv_acl->iac_getpolicy(vap) == IEEE80211_MACCMD_POLICY_RADIUS) {
+ IEEE80211_NOTE_MAC(vap,
+ IEEE80211_MSG_AUTH | IEEE80211_MSG_ACL,
+ ni->ni_macaddr,
+ "%s", "station authentication defered (radius acl)");
+ ieee80211_notify_node_auth(ni);
+ return;
+ }
+ break;
+ case IEEE80211_AUTH_SHARED_RESPONSE:
+ if (ni == vap->iv_bss) {
+ IEEE80211_DISCARD_MAC(vap, IEEE80211_MSG_AUTH,
+ ni->ni_macaddr, "shared key response",
+ "%s", "unknown station");
+ /* NB: don't send a response */
+ return;
+ }
+ if (ni->ni_challenge == NULL) {
+ IEEE80211_DISCARD_MAC(vap, IEEE80211_MSG_AUTH,
+ ni->ni_macaddr, "shared key response",
+ "%s", "no challenge recorded");
+ vap->iv_stats.is_rx_bad_auth++;
+ estatus = IEEE80211_STATUS_CHALLENGE;
+ goto bad;
+ }
+ if (memcmp(ni->ni_challenge, &challenge[2],
+ challenge[1]) != 0) {
+ IEEE80211_DISCARD_MAC(vap, IEEE80211_MSG_AUTH,
+ ni->ni_macaddr, "shared key response",
+ "%s", "challenge mismatch");
+ vap->iv_stats.is_rx_auth_fail++;
+ estatus = IEEE80211_STATUS_CHALLENGE;
+ goto bad;
+ }
+ IEEE80211_NOTE(vap, IEEE80211_MSG_DEBUG | IEEE80211_MSG_AUTH,
+ ni, "%s", "station authenticated (shared key)");
+ ieee80211_node_authorize(ni);
+ break;
+ default:
+ IEEE80211_DISCARD_MAC(vap, IEEE80211_MSG_AUTH,
+ ni->ni_macaddr, "shared key auth",
+ "bad seq %d", seq);
+ vap->iv_stats.is_rx_bad_auth++;
+ estatus = IEEE80211_STATUS_SEQUENCE;
+ goto bad;
+ }
+ IEEE80211_SEND_MGMT(ni, IEEE80211_FC0_SUBTYPE_AUTH, seq + 1);
+ return;
+bad:
+ /*
+ * Send an error response; but only when operating as an AP.
+ */
+ /* XXX hack to workaround calling convention */
+ ieee80211_send_error(ni, wh->i_addr2,
+ IEEE80211_FC0_SUBTYPE_AUTH,
+ (seq + 1) | (estatus<<16));
+}
+
+/*
+ * Convert a WPA cipher selector OUI to an internal
+ * cipher algorithm. Where appropriate we also
+ * record any key length.
+ */
+static int
+wpa_cipher(const uint8_t *sel, uint8_t *keylen)
+{
+#define WPA_SEL(x) (((x)<<24)|WPA_OUI)
+ uint32_t w = LE_READ_4(sel);
+
+ switch (w) {
+ case WPA_SEL(WPA_CSE_NULL):
+ return IEEE80211_CIPHER_NONE;
+ case WPA_SEL(WPA_CSE_WEP40):
+ if (keylen)
+ *keylen = 40 / NBBY;
+ return IEEE80211_CIPHER_WEP;
+ case WPA_SEL(WPA_CSE_WEP104):
+ if (keylen)
+ *keylen = 104 / NBBY;
+ return IEEE80211_CIPHER_WEP;
+ case WPA_SEL(WPA_CSE_TKIP):
+ return IEEE80211_CIPHER_TKIP;
+ case WPA_SEL(WPA_CSE_CCMP):
+ return IEEE80211_CIPHER_AES_CCM;
+ }
+ return 32; /* NB: so 1<< is discarded */
+#undef WPA_SEL
+}
+
+/*
+ * Convert a WPA key management/authentication algorithm
+ * to an internal code.
+ */
+static int
+wpa_keymgmt(const uint8_t *sel)
+{
+#define WPA_SEL(x) (((x)<<24)|WPA_OUI)
+ uint32_t w = LE_READ_4(sel);
+
+ switch (w) {
+ case WPA_SEL(WPA_ASE_8021X_UNSPEC):
+ return WPA_ASE_8021X_UNSPEC;
+ case WPA_SEL(WPA_ASE_8021X_PSK):
+ return WPA_ASE_8021X_PSK;
+ case WPA_SEL(WPA_ASE_NONE):
+ return WPA_ASE_NONE;
+ }
+ return 0; /* NB: so is discarded */
+#undef WPA_SEL
+}
+
+/*
+ * Parse a WPA information element to collect parameters.
+ * Note that we do not validate security parameters; that
+ * is handled by the authenticator; the parsing done here
+ * is just for internal use in making operational decisions.
+ */
+static int
+ieee80211_parse_wpa(struct ieee80211vap *vap, const uint8_t *frm,
+ struct ieee80211_rsnparms *rsn, const struct ieee80211_frame *wh)
+{
+ uint8_t len = frm[1];
+ uint32_t w;
+ int n;
+
+ /*
+ * Check the length once for fixed parts: OUI, type,
+ * version, mcast cipher, and 2 selector counts.
+ * Other, variable-length data, must be checked separately.
+ */
+ if ((vap->iv_flags & IEEE80211_F_WPA1) == 0) {
+ IEEE80211_DISCARD_IE(vap,
+ IEEE80211_MSG_ELEMID | IEEE80211_MSG_WPA,
+ wh, "WPA", "not WPA, flags 0x%x", vap->iv_flags);
+ return IEEE80211_REASON_IE_INVALID;
+ }
+ if (len < 14) {
+ IEEE80211_DISCARD_IE(vap,
+ IEEE80211_MSG_ELEMID | IEEE80211_MSG_WPA,
+ wh, "WPA", "too short, len %u", len);
+ return IEEE80211_REASON_IE_INVALID;
+ }
+ frm += 6, len -= 4; /* NB: len is payload only */
+ /* NB: iswpaoui already validated the OUI and type */
+ w = LE_READ_2(frm);
+ if (w != WPA_VERSION) {
+ IEEE80211_DISCARD_IE(vap,
+ IEEE80211_MSG_ELEMID | IEEE80211_MSG_WPA,
+ wh, "WPA", "bad version %u", w);
+ return IEEE80211_REASON_IE_INVALID;
+ }
+ frm += 2, len -= 2;
+
+ memset(rsn, 0, sizeof(*rsn));
+
+ /* multicast/group cipher */
+ rsn->rsn_mcastcipher = wpa_cipher(frm, &rsn->rsn_mcastkeylen);
+ frm += 4, len -= 4;
+
+ /* unicast ciphers */
+ n = LE_READ_2(frm);
+ frm += 2, len -= 2;
+ if (len < n*4+2) {
+ IEEE80211_DISCARD_IE(vap,
+ IEEE80211_MSG_ELEMID | IEEE80211_MSG_WPA,
+ wh, "WPA", "ucast cipher data too short; len %u, n %u",
+ len, n);
+ return IEEE80211_REASON_IE_INVALID;
+ }
+ w = 0;
+ for (; n > 0; n--) {
+ w |= 1<<wpa_cipher(frm, &rsn->rsn_ucastkeylen);
+ frm += 4, len -= 4;
+ }
+ if (w & (1<<IEEE80211_CIPHER_TKIP))
+ rsn->rsn_ucastcipher = IEEE80211_CIPHER_TKIP;
+ else
+ rsn->rsn_ucastcipher = IEEE80211_CIPHER_AES_CCM;
+
+ /* key management algorithms */
+ n = LE_READ_2(frm);
+ frm += 2, len -= 2;
+ if (len < n*4) {
+ IEEE80211_DISCARD_IE(vap,
+ IEEE80211_MSG_ELEMID | IEEE80211_MSG_WPA,
+ wh, "WPA", "key mgmt alg data too short; len %u, n %u",
+ len, n);
+ return IEEE80211_REASON_IE_INVALID;
+ }
+ w = 0;
+ for (; n > 0; n--) {
+ w |= wpa_keymgmt(frm);
+ frm += 4, len -= 4;
+ }
+ if (w & WPA_ASE_8021X_UNSPEC)
+ rsn->rsn_keymgmt = WPA_ASE_8021X_UNSPEC;
+ else
+ rsn->rsn_keymgmt = WPA_ASE_8021X_PSK;
+
+ if (len > 2) /* optional capabilities */
+ rsn->rsn_caps = LE_READ_2(frm);
+
+ return 0;
+}
+
+/*
+ * Convert an RSN cipher selector OUI to an internal
+ * cipher algorithm. Where appropriate we also
+ * record any key length.
+ */
+static int
+rsn_cipher(const uint8_t *sel, uint8_t *keylen)
+{
+#define RSN_SEL(x) (((x)<<24)|RSN_OUI)
+ uint32_t w = LE_READ_4(sel);
+
+ switch (w) {
+ case RSN_SEL(RSN_CSE_NULL):
+ return IEEE80211_CIPHER_NONE;
+ case RSN_SEL(RSN_CSE_WEP40):
+ if (keylen)
+ *keylen = 40 / NBBY;
+ return IEEE80211_CIPHER_WEP;
+ case RSN_SEL(RSN_CSE_WEP104):
+ if (keylen)
+ *keylen = 104 / NBBY;
+ return IEEE80211_CIPHER_WEP;
+ case RSN_SEL(RSN_CSE_TKIP):
+ return IEEE80211_CIPHER_TKIP;
+ case RSN_SEL(RSN_CSE_CCMP):
+ return IEEE80211_CIPHER_AES_CCM;
+ case RSN_SEL(RSN_CSE_WRAP):
+ return IEEE80211_CIPHER_AES_OCB;
+ }
+ return 32; /* NB: so 1<< is discarded */
+#undef WPA_SEL
+}
+
+/*
+ * Convert an RSN key management/authentication algorithm
+ * to an internal code.
+ */
+static int
+rsn_keymgmt(const uint8_t *sel)
+{
+#define RSN_SEL(x) (((x)<<24)|RSN_OUI)
+ uint32_t w = LE_READ_4(sel);
+
+ switch (w) {
+ case RSN_SEL(RSN_ASE_8021X_UNSPEC):
+ return RSN_ASE_8021X_UNSPEC;
+ case RSN_SEL(RSN_ASE_8021X_PSK):
+ return RSN_ASE_8021X_PSK;
+ case RSN_SEL(RSN_ASE_NONE):
+ return RSN_ASE_NONE;
+ }
+ return 0; /* NB: so is discarded */
+#undef RSN_SEL
+}
+
+/*
+ * Parse a WPA/RSN information element to collect parameters
+ * and validate the parameters against what has been
+ * configured for the system.
+ */
+static int
+ieee80211_parse_rsn(struct ieee80211vap *vap, const uint8_t *frm,
+ struct ieee80211_rsnparms *rsn, const struct ieee80211_frame *wh)
+{
+ uint8_t len = frm[1];
+ uint32_t w;
+ int n;
+
+ /*
+ * Check the length once for fixed parts:
+ * version, mcast cipher, and 2 selector counts.
+ * Other, variable-length data, must be checked separately.
+ */
+ if ((vap->iv_flags & IEEE80211_F_WPA2) == 0) {
+ IEEE80211_DISCARD_IE(vap,
+ IEEE80211_MSG_ELEMID | IEEE80211_MSG_WPA,
+ wh, "WPA", "not RSN, flags 0x%x", vap->iv_flags);
+ return IEEE80211_REASON_IE_INVALID;
+ }
+ if (len < 10) {
+ IEEE80211_DISCARD_IE(vap,
+ IEEE80211_MSG_ELEMID | IEEE80211_MSG_WPA,
+ wh, "RSN", "too short, len %u", len);
+ return IEEE80211_REASON_IE_INVALID;
+ }
+ frm += 2;
+ w = LE_READ_2(frm);
+ if (w != RSN_VERSION) {
+ IEEE80211_DISCARD_IE(vap,
+ IEEE80211_MSG_ELEMID | IEEE80211_MSG_WPA,
+ wh, "RSN", "bad version %u", w);
+ return IEEE80211_REASON_IE_INVALID;
+ }
+ frm += 2, len -= 2;
+
+ memset(rsn, 0, sizeof(*rsn));
+
+ /* multicast/group cipher */
+ rsn->rsn_mcastcipher = rsn_cipher(frm, &rsn->rsn_mcastkeylen);
+ frm += 4, len -= 4;
+
+ /* unicast ciphers */
+ n = LE_READ_2(frm);
+ frm += 2, len -= 2;
+ if (len < n*4+2) {
+ IEEE80211_DISCARD_IE(vap,
+ IEEE80211_MSG_ELEMID | IEEE80211_MSG_WPA,
+ wh, "RSN", "ucast cipher data too short; len %u, n %u",
+ len, n);
+ return IEEE80211_REASON_IE_INVALID;
+ }
+ w = 0;
+ for (; n > 0; n--) {
+ w |= 1<<rsn_cipher(frm, &rsn->rsn_ucastkeylen);
+ frm += 4, len -= 4;
+ }
+ if (w & (1<<IEEE80211_CIPHER_TKIP))
+ rsn->rsn_ucastcipher = IEEE80211_CIPHER_TKIP;
+ else
+ rsn->rsn_ucastcipher = IEEE80211_CIPHER_AES_CCM;
+
+ /* key management algorithms */
+ n = LE_READ_2(frm);
+ frm += 2, len -= 2;
+ if (len < n*4) {
+ IEEE80211_DISCARD_IE(vap,
+ IEEE80211_MSG_ELEMID | IEEE80211_MSG_WPA,
+ wh, "RSN", "key mgmt alg data too short; len %u, n %u",
+ len, n);
+ return IEEE80211_REASON_IE_INVALID;
+ }
+ w = 0;
+ for (; n > 0; n--) {
+ w |= rsn_keymgmt(frm);
+ frm += 4, len -= 4;
+ }
+ if (w & RSN_ASE_8021X_UNSPEC)
+ rsn->rsn_keymgmt = RSN_ASE_8021X_UNSPEC;
+ else
+ rsn->rsn_keymgmt = RSN_ASE_8021X_PSK;
+
+ /* optional RSN capabilities */
+ if (len > 2)
+ rsn->rsn_caps = LE_READ_2(frm);
+ /* XXXPMKID */
+
+ return 0;
+}
+
+/*
+ * WPA/802.11i assocation request processing.
+ */
+static int
+wpa_assocreq(struct ieee80211_node *ni, struct ieee80211_rsnparms *rsnparms,
+ const struct ieee80211_frame *wh, const uint8_t *wpa,
+ const uint8_t *rsn, uint16_t capinfo)
+{
+ struct ieee80211vap *vap = ni->ni_vap;
+ uint8_t reason;
+ int badwparsn;
+
+ ni->ni_flags &= ~(IEEE80211_NODE_WPS|IEEE80211_NODE_TSN);
+ if (wpa == NULL && rsn == NULL) {
+ if (vap->iv_flags_ext & IEEE80211_FEXT_WPS) {
+ /*
+ * W-Fi Protected Setup (WPS) permits
+ * clients to associate and pass EAPOL frames
+ * to establish initial credentials.
+ */
+ ni->ni_flags |= IEEE80211_NODE_WPS;
+ return 1;
+ }
+ if ((vap->iv_flags_ext & IEEE80211_FEXT_TSN) &&
+ (capinfo & IEEE80211_CAPINFO_PRIVACY)) {
+ /*
+ * Transitional Security Network. Permits clients
+ * to associate and use WEP while WPA is configured.
+ */
+ ni->ni_flags |= IEEE80211_NODE_TSN;
+ return 1;
+ }
+ IEEE80211_DISCARD(vap, IEEE80211_MSG_ASSOC | IEEE80211_MSG_WPA,
+ wh, NULL, "%s", "no WPA/RSN IE in association request");
+ vap->iv_stats.is_rx_assoc_badwpaie++;
+ reason = IEEE80211_REASON_IE_INVALID;
+ goto bad;
+ }
+ /* assert right association security credentials */
+ badwparsn = 0; /* NB: to silence compiler */
+ switch (vap->iv_flags & IEEE80211_F_WPA) {
+ case IEEE80211_F_WPA1:
+ badwparsn = (wpa == NULL);
+ break;
+ case IEEE80211_F_WPA2:
+ badwparsn = (rsn == NULL);
+ break;
+ case IEEE80211_F_WPA1|IEEE80211_F_WPA2:
+ badwparsn = (wpa == NULL && rsn == NULL);
+ break;
+ }
+ if (badwparsn) {
+ IEEE80211_DISCARD(vap, IEEE80211_MSG_ASSOC | IEEE80211_MSG_WPA,
+ wh, NULL,
+ "%s", "missing WPA/RSN IE in association request");
+ vap->iv_stats.is_rx_assoc_badwpaie++;
+ reason = IEEE80211_REASON_IE_INVALID;
+ goto bad;
+ }
+ /*
+ * Parse WPA/RSN information element.
+ */
+ if (wpa != NULL)
+ reason = ieee80211_parse_wpa(vap, wpa, rsnparms, wh);
+ else
+ reason = ieee80211_parse_rsn(vap, rsn, rsnparms, wh);
+ if (reason != 0) {
+ /* XXX distinguish WPA/RSN? */
+ vap->iv_stats.is_rx_assoc_badwpaie++;
+ goto bad;
+ }
+ IEEE80211_NOTE(vap, IEEE80211_MSG_ASSOC | IEEE80211_MSG_WPA, ni,
+ "%s ie: mc %u/%u uc %u/%u key %u caps 0x%x",
+ wpa != NULL ? "WPA" : "RSN",
+ rsnparms->rsn_mcastcipher, rsnparms->rsn_mcastkeylen,
+ rsnparms->rsn_ucastcipher, rsnparms->rsn_ucastkeylen,
+ rsnparms->rsn_keymgmt, rsnparms->rsn_caps);
+
+ return 1;
+bad:
+ ieee80211_node_deauth(ni, reason);
+ return 0;
+}
+
+/* XXX find a better place for definition */
+struct l2_update_frame {
+ struct ether_header eh;
+ uint8_t dsap;
+ uint8_t ssap;
+ uint8_t control;
+ uint8_t xid[3];
+} __packed;
+
+/*
+ * Deliver a TGf L2UF frame on behalf of a station.
+ * This primes any bridge when the station is roaming
+ * between ap's on the same wired network.
+ */
+static void
+ieee80211_deliver_l2uf(struct ieee80211_node *ni)
+{
+ struct ieee80211vap *vap = ni->ni_vap;
+ struct ifnet *ifp = vap->iv_ifp;
+ struct mbuf *m;
+ struct l2_update_frame *l2uf;
+ struct ether_header *eh;
+
+ m = m_gethdr(M_NOWAIT, MT_DATA);
+ if (m == NULL) {
+ IEEE80211_NOTE(vap, IEEE80211_MSG_ASSOC, ni,
+ "%s", "no mbuf for l2uf frame");
+ vap->iv_stats.is_rx_nobuf++; /* XXX not right */
+ return;
+ }
+ l2uf = mtod(m, struct l2_update_frame *);
+ eh = &l2uf->eh;
+ /* dst: Broadcast address */
+ IEEE80211_ADDR_COPY(eh->ether_dhost, ifp->if_broadcastaddr);
+ /* src: associated STA */
+ IEEE80211_ADDR_COPY(eh->ether_shost, ni->ni_macaddr);
+ eh->ether_type = htons(sizeof(*l2uf) - sizeof(*eh));
+
+ l2uf->dsap = 0;
+ l2uf->ssap = 0;
+ l2uf->control = 0xf5;
+ l2uf->xid[0] = 0x81;
+ l2uf->xid[1] = 0x80;
+ l2uf->xid[2] = 0x00;
+
+ m->m_pkthdr.len = m->m_len = sizeof(*l2uf);
+ hostap_deliver_data(vap, ni, m);
+}
+
+static void
+ratesetmismatch(struct ieee80211_node *ni, const struct ieee80211_frame *wh,
+ int reassoc, int resp, const char *tag, int rate)
+{
+ IEEE80211_NOTE_MAC(ni->ni_vap, IEEE80211_MSG_ANY, wh->i_addr2,
+ "deny %s request, %s rate set mismatch, rate/MCS %d",
+ reassoc ? "reassoc" : "assoc", tag, rate & IEEE80211_RATE_VAL);
+ IEEE80211_SEND_MGMT(ni, resp, IEEE80211_STATUS_BASIC_RATE);
+ ieee80211_node_leave(ni);
+}
+
+static void
+capinfomismatch(struct ieee80211_node *ni, const struct ieee80211_frame *wh,
+ int reassoc, int resp, const char *tag, int capinfo)
+{
+ struct ieee80211vap *vap = ni->ni_vap;
+
+ IEEE80211_NOTE_MAC(vap, IEEE80211_MSG_ANY, wh->i_addr2,
+ "deny %s request, %s mismatch 0x%x",
+ reassoc ? "reassoc" : "assoc", tag, capinfo);
+ IEEE80211_SEND_MGMT(ni, resp, IEEE80211_STATUS_CAPINFO);
+ ieee80211_node_leave(ni);
+ vap->iv_stats.is_rx_assoc_capmismatch++;
+}
+
+static void
+htcapmismatch(struct ieee80211_node *ni, const struct ieee80211_frame *wh,
+ int reassoc, int resp)
+{
+ IEEE80211_NOTE_MAC(ni->ni_vap, IEEE80211_MSG_ANY, wh->i_addr2,
+ "deny %s request, %s missing HT ie", reassoc ? "reassoc" : "assoc");
+ /* XXX no better code */
+ IEEE80211_SEND_MGMT(ni, resp, IEEE80211_STATUS_MISSING_HT_CAPS);
+ ieee80211_node_leave(ni);
+}
+
+static void
+authalgreject(struct ieee80211_node *ni, const struct ieee80211_frame *wh,
+ int algo, int seq, int status)
+{
+ struct ieee80211vap *vap = ni->ni_vap;
+
+ IEEE80211_DISCARD(vap, IEEE80211_MSG_ANY,
+ wh, NULL, "unsupported alg %d", algo);
+ vap->iv_stats.is_rx_auth_unsupported++;
+ ieee80211_send_error(ni, wh->i_addr2, IEEE80211_FC0_SUBTYPE_AUTH,
+ seq | (status << 16));
+}
+
+static __inline int
+ishtmixed(const uint8_t *ie)
+{
+ const struct ieee80211_ie_htinfo *ht =
+ (const struct ieee80211_ie_htinfo *) ie;
+ return (ht->hi_byte2 & IEEE80211_HTINFO_OPMODE) ==
+ IEEE80211_HTINFO_OPMODE_MIXED;
+}
+
+static int
+is11bclient(const uint8_t *rates, const uint8_t *xrates)
+{
+ static const uint32_t brates = (1<<2*1)|(1<<2*2)|(1<<11)|(1<<2*11);
+ int i;
+
+ /* NB: the 11b clients we care about will not have xrates */
+ if (xrates != NULL || rates == NULL)
+ return 0;
+ for (i = 0; i < rates[1]; i++) {
+ int r = rates[2+i] & IEEE80211_RATE_VAL;
+ if (r > 2*11 || ((1<<r) & brates) == 0)
+ return 0;
+ }
+ return 1;
+}
+
+static void
+hostap_recv_mgmt(struct ieee80211_node *ni, struct mbuf *m0,
+ int subtype, int rssi, int nf)
+{
+ struct ieee80211vap *vap = ni->ni_vap;
+ struct ieee80211com *ic = ni->ni_ic;
+ struct ieee80211_frame *wh;
+ uint8_t *frm, *efrm, *sfrm;
+ uint8_t *ssid, *rates, *xrates, *wpa, *rsn, *wme, *ath, *htcap;
+ int reassoc, resp;
+ uint8_t rate;
+
+ wh = mtod(m0, struct ieee80211_frame *);
+ frm = (uint8_t *)&wh[1];
+ efrm = mtod(m0, uint8_t *) + m0->m_len;
+ switch (subtype) {
+ case IEEE80211_FC0_SUBTYPE_PROBE_RESP:
+ case IEEE80211_FC0_SUBTYPE_BEACON: {
+ struct ieee80211_scanparams scan;
+ /*
+ * We process beacon/probe response frames when scanning;
+ * otherwise we check beacon frames for overlapping non-ERP
+ * BSS in 11g and/or overlapping legacy BSS when in HT.
+ */
+ if ((ic->ic_flags & IEEE80211_F_SCAN) == 0 &&
+ subtype == IEEE80211_FC0_SUBTYPE_PROBE_RESP) {
+ vap->iv_stats.is_rx_mgtdiscard++;
+ return;
+ }
+ /* NB: accept off-channel frames */
+ if (ieee80211_parse_beacon(ni, m0, &scan) &~ IEEE80211_BPARSE_OFFCHAN)
+ return;
+ /*
+ * Count frame now that we know it's to be processed.
+ */
+ if (subtype == IEEE80211_FC0_SUBTYPE_BEACON) {
+ vap->iv_stats.is_rx_beacon++; /* XXX remove */
+ IEEE80211_NODE_STAT(ni, rx_beacons);
+ } else
+ IEEE80211_NODE_STAT(ni, rx_proberesp);
+ /*
+ * If scanning, just pass information to the scan module.
+ */
+ if (ic->ic_flags & IEEE80211_F_SCAN) {
+ if (scan.status == 0 && /* NB: on channel */
+ (ic->ic_flags_ext & IEEE80211_FEXT_PROBECHAN)) {
+ /*
+ * Actively scanning a channel marked passive;
+ * send a probe request now that we know there
+ * is 802.11 traffic present.
+ *
+ * XXX check if the beacon we recv'd gives
+ * us what we need and suppress the probe req
+ */
+ ieee80211_probe_curchan(vap, 1);
+ ic->ic_flags_ext &= ~IEEE80211_FEXT_PROBECHAN;
+ }
+ ieee80211_add_scan(vap, &scan, wh, subtype, rssi, nf);
+ return;
+ }
+ /*
+ * Check beacon for overlapping bss w/ non ERP stations.
+ * If we detect one and protection is configured but not
+ * enabled, enable it and start a timer that'll bring us
+ * out if we stop seeing the bss.
+ */
+ if (IEEE80211_IS_CHAN_ANYG(ic->ic_curchan) &&
+ scan.status == 0 && /* NB: on-channel */
+ ((scan.erp & 0x100) == 0 || /* NB: no ERP, 11b sta*/
+ (scan.erp & IEEE80211_ERP_NON_ERP_PRESENT))) {
+ ic->ic_lastnonerp = ticks;
+ ic->ic_flags_ext |= IEEE80211_FEXT_NONERP_PR;
+ if (ic->ic_protmode != IEEE80211_PROT_NONE &&
+ (ic->ic_flags & IEEE80211_F_USEPROT) == 0) {
+ IEEE80211_NOTE_FRAME(vap,
+ IEEE80211_MSG_ASSOC, wh,
+ "non-ERP present on channel %d "
+ "(saw erp 0x%x from channel %d), "
+ "enable use of protection",
+ ic->ic_curchan->ic_ieee,
+ scan.erp, scan.chan);
+ ic->ic_flags |= IEEE80211_F_USEPROT;
+ ieee80211_notify_erp(ic);
+ }
+ }
+ /*
+ * Check beacon for non-HT station on HT channel
+ * and update HT BSS occupancy as appropriate.
+ */
+ if (IEEE80211_IS_CHAN_HT(ic->ic_curchan)) {
+ if (scan.status & IEEE80211_BPARSE_OFFCHAN) {
+ /*
+ * Off control channel; only check frames
+ * that come in the extension channel when
+ * operating w/ HT40.
+ */
+ if (!IEEE80211_IS_CHAN_HT40(ic->ic_curchan))
+ break;
+ if (scan.chan != ic->ic_curchan->ic_extieee)
+ break;
+ }
+ if (scan.htinfo == NULL) {
+ ieee80211_htprot_update(ic,
+ IEEE80211_HTINFO_OPMODE_PROTOPT |
+ IEEE80211_HTINFO_NONHT_PRESENT);
+ } else if (ishtmixed(scan.htinfo)) {
+ /* XXX? take NONHT_PRESENT from beacon? */
+ ieee80211_htprot_update(ic,
+ IEEE80211_HTINFO_OPMODE_MIXED |
+ IEEE80211_HTINFO_NONHT_PRESENT);
+ }
+ }
+ break;
+ }
+
+ case IEEE80211_FC0_SUBTYPE_PROBE_REQ:
+ if (vap->iv_state != IEEE80211_S_RUN) {
+ vap->iv_stats.is_rx_mgtdiscard++;
+ return;
+ }
+ /*
+ * prreq frame format
+ * [tlv] ssid
+ * [tlv] supported rates
+ * [tlv] extended supported rates
+ */
+ ssid = rates = xrates = NULL;
+ sfrm = frm;
+ while (efrm - frm > 1) {
+ IEEE80211_VERIFY_LENGTH(efrm - frm, frm[1] + 2, return);
+ switch (*frm) {
+ case IEEE80211_ELEMID_SSID:
+ ssid = frm;
+ break;
+ case IEEE80211_ELEMID_RATES:
+ rates = frm;
+ break;
+ case IEEE80211_ELEMID_XRATES:
+ xrates = frm;
+ break;
+ }
+ frm += frm[1] + 2;
+ }
+ IEEE80211_VERIFY_ELEMENT(rates, IEEE80211_RATE_MAXSIZE, return);
+ if (xrates != NULL)
+ IEEE80211_VERIFY_ELEMENT(xrates,
+ IEEE80211_RATE_MAXSIZE - rates[1], return);
+ IEEE80211_VERIFY_ELEMENT(ssid, IEEE80211_NWID_LEN, return);
+ IEEE80211_VERIFY_SSID(vap->iv_bss, ssid, return);
+ if ((vap->iv_flags & IEEE80211_F_HIDESSID) && ssid[1] == 0) {
+ IEEE80211_DISCARD(vap, IEEE80211_MSG_INPUT,
+ wh, NULL,
+ "%s", "no ssid with ssid suppression enabled");
+ vap->iv_stats.is_rx_ssidmismatch++; /*XXX*/
+ return;
+ }
+
+ /* XXX find a better class or define it's own */
+ IEEE80211_NOTE_MAC(vap, IEEE80211_MSG_INPUT, wh->i_addr2,
+ "%s", "recv probe req");
+ /*
+ * Some legacy 11b clients cannot hack a complete
+ * probe response frame. When the request includes
+ * only a bare-bones rate set, communicate this to
+ * the transmit side.
+ */
+ ieee80211_send_proberesp(vap, wh->i_addr2,
+ is11bclient(rates, xrates) ? IEEE80211_SEND_LEGACY_11B : 0);
+ break;
+
+ case IEEE80211_FC0_SUBTYPE_AUTH: {
+ uint16_t algo, seq, status;
+
+ if (vap->iv_state != IEEE80211_S_RUN) {
+ vap->iv_stats.is_rx_mgtdiscard++;
+ return;
+ }
+ if (!IEEE80211_ADDR_EQ(wh->i_addr3, vap->iv_bss->ni_bssid)) {
+ IEEE80211_DISCARD(vap, IEEE80211_MSG_ANY,
+ wh, NULL, "%s", "wrong bssid");
+ vap->iv_stats.is_rx_wrongbss++; /*XXX unique stat?*/
+ return;
+ }
+ /*
+ * auth frame format
+ * [2] algorithm
+ * [2] sequence
+ * [2] status
+ * [tlv*] challenge
+ */
+ IEEE80211_VERIFY_LENGTH(efrm - frm, 6, return);
+ algo = le16toh(*(uint16_t *)frm);
+ seq = le16toh(*(uint16_t *)(frm + 2));
+ status = le16toh(*(uint16_t *)(frm + 4));
+ IEEE80211_NOTE_MAC(vap, IEEE80211_MSG_AUTH, wh->i_addr2,
+ "recv auth frame with algorithm %d seq %d", algo, seq);
+ /*
+ * Consult the ACL policy module if setup.
+ */
+ if (vap->iv_acl != NULL &&
+ !vap->iv_acl->iac_check(vap, wh->i_addr2)) {
+ IEEE80211_DISCARD(vap, IEEE80211_MSG_ACL,
+ wh, NULL, "%s", "disallowed by ACL");
+ vap->iv_stats.is_rx_acl++;
+ ieee80211_send_error(ni, wh->i_addr2,
+ IEEE80211_FC0_SUBTYPE_AUTH,
+ (seq+1) | (IEEE80211_STATUS_UNSPECIFIED<<16));
+ return;
+ }
+ if (vap->iv_flags & IEEE80211_F_COUNTERM) {
+ IEEE80211_DISCARD(vap,
+ IEEE80211_MSG_AUTH | IEEE80211_MSG_CRYPTO,
+ wh, NULL, "%s", "TKIP countermeasures enabled");
+ vap->iv_stats.is_rx_auth_countermeasures++;
+ ieee80211_send_error(ni, wh->i_addr2,
+ IEEE80211_FC0_SUBTYPE_AUTH,
+ IEEE80211_REASON_MIC_FAILURE);
+ return;
+ }
+ if (algo == IEEE80211_AUTH_ALG_SHARED)
+ hostap_auth_shared(ni, wh, frm + 6, efrm, rssi, nf,
+ seq, status);
+ else if (algo == IEEE80211_AUTH_ALG_OPEN)
+ hostap_auth_open(ni, wh, rssi, nf, seq, status);
+ else if (algo == IEEE80211_AUTH_ALG_LEAP) {
+ authalgreject(ni, wh, algo,
+ seq+1, IEEE80211_STATUS_ALG);
+ return;
+ } else {
+ /*
+ * We assume that an unknown algorithm is the result
+ * of a decryption failure on a shared key auth frame;
+ * return a status code appropriate for that instead
+ * of IEEE80211_STATUS_ALG.
+ *
+ * NB: a seq# of 4 is intentional; the decrypted
+ * frame likely has a bogus seq value.
+ */
+ authalgreject(ni, wh, algo,
+ 4, IEEE80211_STATUS_CHALLENGE);
+ return;
+ }
+ break;
+ }
+
+ case IEEE80211_FC0_SUBTYPE_ASSOC_REQ:
+ case IEEE80211_FC0_SUBTYPE_REASSOC_REQ: {
+ uint16_t capinfo, lintval;
+ struct ieee80211_rsnparms rsnparms;
+
+ if (vap->iv_state != IEEE80211_S_RUN) {
+ vap->iv_stats.is_rx_mgtdiscard++;
+ return;
+ }
+ if (!IEEE80211_ADDR_EQ(wh->i_addr3, vap->iv_bss->ni_bssid)) {
+ IEEE80211_DISCARD(vap, IEEE80211_MSG_ANY,
+ wh, NULL, "%s", "wrong bssid");
+ vap->iv_stats.is_rx_assoc_bss++;
+ return;
+ }
+ if (subtype == IEEE80211_FC0_SUBTYPE_REASSOC_REQ) {
+ reassoc = 1;
+ resp = IEEE80211_FC0_SUBTYPE_REASSOC_RESP;
+ } else {
+ reassoc = 0;
+ resp = IEEE80211_FC0_SUBTYPE_ASSOC_RESP;
+ }
+ if (ni == vap->iv_bss) {
+ IEEE80211_NOTE_MAC(vap, IEEE80211_MSG_ANY, wh->i_addr2,
+ "deny %s request, sta not authenticated",
+ reassoc ? "reassoc" : "assoc");
+ ieee80211_send_error(ni, wh->i_addr2,
+ IEEE80211_FC0_SUBTYPE_DEAUTH,
+ IEEE80211_REASON_ASSOC_NOT_AUTHED);
+ vap->iv_stats.is_rx_assoc_notauth++;
+ return;
+ }
+
+ /*
+ * asreq frame format
+ * [2] capability information
+ * [2] listen interval
+ * [6*] current AP address (reassoc only)
+ * [tlv] ssid
+ * [tlv] supported rates
+ * [tlv] extended supported rates
+ * [tlv] WPA or RSN
+ * [tlv] HT capabilities
+ * [tlv] Atheros capabilities
+ */
+ IEEE80211_VERIFY_LENGTH(efrm - frm, (reassoc ? 10 : 4), return);
+ capinfo = le16toh(*(uint16_t *)frm); frm += 2;
+ lintval = le16toh(*(uint16_t *)frm); frm += 2;
+ if (reassoc)
+ frm += 6; /* ignore current AP info */
+ ssid = rates = xrates = wpa = rsn = wme = ath = htcap = NULL;
+ sfrm = frm;
+ while (efrm - frm > 1) {
+ IEEE80211_VERIFY_LENGTH(efrm - frm, frm[1] + 2, return);
+ switch (*frm) {
+ case IEEE80211_ELEMID_SSID:
+ ssid = frm;
+ break;
+ case IEEE80211_ELEMID_RATES:
+ rates = frm;
+ break;
+ case IEEE80211_ELEMID_XRATES:
+ xrates = frm;
+ break;
+ case IEEE80211_ELEMID_RSN:
+ rsn = frm;
+ break;
+ case IEEE80211_ELEMID_HTCAP:
+ htcap = frm;
+ break;
+ case IEEE80211_ELEMID_VENDOR:
+ if (iswpaoui(frm))
+ wpa = frm;
+ else if (iswmeinfo(frm))
+ wme = frm;
+#ifdef IEEE80211_SUPPORT_SUPERG
+ else if (isatherosoui(frm))
+ ath = frm;
+#endif
+ else if (vap->iv_flags_ht & IEEE80211_FHT_HTCOMPAT) {
+ if (ishtcapoui(frm) && htcap == NULL)
+ htcap = frm;
+ }
+ break;
+ }
+ frm += frm[1] + 2;
+ }
+ IEEE80211_VERIFY_ELEMENT(rates, IEEE80211_RATE_MAXSIZE, return);
+ if (xrates != NULL)
+ IEEE80211_VERIFY_ELEMENT(xrates,
+ IEEE80211_RATE_MAXSIZE - rates[1], return);
+ IEEE80211_VERIFY_ELEMENT(ssid, IEEE80211_NWID_LEN, return);
+ IEEE80211_VERIFY_SSID(vap->iv_bss, ssid, return);
+ if (htcap != NULL) {
+ IEEE80211_VERIFY_LENGTH(htcap[1],
+ htcap[0] == IEEE80211_ELEMID_VENDOR ?
+ 4 + sizeof(struct ieee80211_ie_htcap)-2 :
+ sizeof(struct ieee80211_ie_htcap)-2,
+ return); /* XXX just NULL out? */
+ }
+
+ if ((vap->iv_flags & IEEE80211_F_WPA) &&
+ !wpa_assocreq(ni, &rsnparms, wh, wpa, rsn, capinfo))
+ return;
+ /* discard challenge after association */
+ if (ni->ni_challenge != NULL) {
+ free(ni->ni_challenge, M_80211_NODE);
+ ni->ni_challenge = NULL;
+ }
+ /* NB: 802.11 spec says to ignore station's privacy bit */
+ if ((capinfo & IEEE80211_CAPINFO_ESS) == 0) {
+ capinfomismatch(ni, wh, reassoc, resp,
+ "capability", capinfo);
+ return;
+ }
+ /*
+ * Disallow re-associate w/ invalid slot time setting.
+ */
+ if (ni->ni_associd != 0 &&
+ IEEE80211_IS_CHAN_ANYG(ic->ic_bsschan) &&
+ ((ni->ni_capinfo ^ capinfo) & IEEE80211_CAPINFO_SHORT_SLOTTIME)) {
+ capinfomismatch(ni, wh, reassoc, resp,
+ "slot time", capinfo);
+ return;
+ }
+ rate = ieee80211_setup_rates(ni, rates, xrates,
+ IEEE80211_F_DOSORT | IEEE80211_F_DOFRATE |
+ IEEE80211_F_DONEGO | IEEE80211_F_DODEL);
+ if (rate & IEEE80211_RATE_BASIC) {
+ ratesetmismatch(ni, wh, reassoc, resp, "legacy", rate);
+ vap->iv_stats.is_rx_assoc_norate++;
+ return;
+ }
+ /*
+ * If constrained to 11g-only stations reject an
+ * 11b-only station. We cheat a bit here by looking
+ * at the max negotiated xmit rate and assuming anyone
+ * with a best rate <24Mb/s is an 11b station.
+ */
+ if ((vap->iv_flags & IEEE80211_F_PUREG) && rate < 48) {
+ ratesetmismatch(ni, wh, reassoc, resp, "11g", rate);
+ vap->iv_stats.is_rx_assoc_norate++;
+ return;
+ }
+ /*
+ * Do HT rate set handling and setup HT node state.
+ */
+ ni->ni_chan = vap->iv_bss->ni_chan;
+ if (IEEE80211_IS_CHAN_HT(ni->ni_chan) && htcap != NULL) {
+ rate = ieee80211_setup_htrates(ni, htcap,
+ IEEE80211_F_DOFMCS | IEEE80211_F_DONEGO |
+ IEEE80211_F_DOBRS);
+ if (rate & IEEE80211_RATE_BASIC) {
+ ratesetmismatch(ni, wh, reassoc, resp,
+ "HT", rate);
+ vap->iv_stats.is_ht_assoc_norate++;
+ return;
+ }
+ ieee80211_ht_node_init(ni);
+ ieee80211_ht_updatehtcap(ni, htcap);
+ } else if (ni->ni_flags & IEEE80211_NODE_HT)
+ ieee80211_ht_node_cleanup(ni);
+#ifdef IEEE80211_SUPPORT_SUPERG
+ else if (ni->ni_ath_flags & IEEE80211_NODE_ATH)
+ ieee80211_ff_node_cleanup(ni);
+#endif
+ /*
+ * Allow AMPDU operation only with unencrypted traffic
+ * or AES-CCM; the 11n spec only specifies these ciphers
+ * so permitting any others is undefined and can lead
+ * to interoperability problems.
+ */
+ if ((ni->ni_flags & IEEE80211_NODE_HT) &&
+ (((vap->iv_flags & IEEE80211_F_WPA) &&
+ rsnparms.rsn_ucastcipher != IEEE80211_CIPHER_AES_CCM) ||
+ (vap->iv_flags & (IEEE80211_F_WPA|IEEE80211_F_PRIVACY)) == IEEE80211_F_PRIVACY)) {
+ IEEE80211_NOTE(vap,
+ IEEE80211_MSG_ASSOC | IEEE80211_MSG_11N, ni,
+ "disallow HT use because WEP or TKIP requested, "
+ "capinfo 0x%x ucastcipher %d", capinfo,
+ rsnparms.rsn_ucastcipher);
+ ieee80211_ht_node_cleanup(ni);
+ vap->iv_stats.is_ht_assoc_downgrade++;
+ }
+ /*
+ * If constrained to 11n-only stations reject legacy stations.
+ */
+ if ((vap->iv_flags_ht & IEEE80211_FHT_PUREN) &&
+ (ni->ni_flags & IEEE80211_NODE_HT) == 0) {
+ htcapmismatch(ni, wh, reassoc, resp);
+ vap->iv_stats.is_ht_assoc_nohtcap++;
+ return;
+ }
+ IEEE80211_RSSI_LPF(ni->ni_avgrssi, rssi);
+ ni->ni_noise = nf;
+ ni->ni_intval = lintval;
+ ni->ni_capinfo = capinfo;
+ ni->ni_fhdwell = vap->iv_bss->ni_fhdwell;
+ ni->ni_fhindex = vap->iv_bss->ni_fhindex;
+ /*
+ * Store the IEs.
+ * XXX maybe better to just expand
+ */
+ if (ieee80211_ies_init(&ni->ni_ies, sfrm, efrm - sfrm)) {
+#define setie(_ie, _off) ieee80211_ies_setie(ni->ni_ies, _ie, _off)
+ if (wpa != NULL)
+ setie(wpa_ie, wpa - sfrm);
+ if (rsn != NULL)
+ setie(rsn_ie, rsn - sfrm);
+ if (htcap != NULL)
+ setie(htcap_ie, htcap - sfrm);
+ if (wme != NULL) {
+ setie(wme_ie, wme - sfrm);
+ /*
+ * Mark node as capable of QoS.
+ */
+ ni->ni_flags |= IEEE80211_NODE_QOS;
+ } else
+ ni->ni_flags &= ~IEEE80211_NODE_QOS;
+#ifdef IEEE80211_SUPPORT_SUPERG
+ if (ath != NULL) {
+ setie(ath_ie, ath - sfrm);
+ /*
+ * Parse ATH station parameters.
+ */
+ ieee80211_parse_ath(ni, ni->ni_ies.ath_ie);
+ } else
+#endif
+ ni->ni_ath_flags = 0;
+#undef setie
+ } else {
+ ni->ni_flags &= ~IEEE80211_NODE_QOS;
+ ni->ni_ath_flags = 0;
+ }
+ ieee80211_node_join(ni, resp);
+ ieee80211_deliver_l2uf(ni);
+ break;
+ }
+
+ case IEEE80211_FC0_SUBTYPE_DEAUTH:
+ case IEEE80211_FC0_SUBTYPE_DISASSOC: {
+ uint16_t reason;
+
+ if (vap->iv_state != IEEE80211_S_RUN ||
+ /* NB: can happen when in promiscuous mode */
+ !IEEE80211_ADDR_EQ(wh->i_addr1, vap->iv_myaddr)) {
+ vap->iv_stats.is_rx_mgtdiscard++;
+ break;
+ }
+ /*
+ * deauth/disassoc frame format
+ * [2] reason
+ */
+ IEEE80211_VERIFY_LENGTH(efrm - frm, 2, return);
+ reason = le16toh(*(uint16_t *)frm);
+ if (subtype == IEEE80211_FC0_SUBTYPE_DEAUTH) {
+ vap->iv_stats.is_rx_deauth++;
+ IEEE80211_NODE_STAT(ni, rx_deauth);
+ } else {
+ vap->iv_stats.is_rx_disassoc++;
+ IEEE80211_NODE_STAT(ni, rx_disassoc);
+ }
+ IEEE80211_NOTE(vap, IEEE80211_MSG_AUTH, ni,
+ "recv %s (reason %d)", ieee80211_mgt_subtype_name[subtype >>
+ IEEE80211_FC0_SUBTYPE_SHIFT], reason);
+ if (ni != vap->iv_bss)
+ ieee80211_node_leave(ni);
+ break;
+ }
+
+ case IEEE80211_FC0_SUBTYPE_ACTION:
+ if (vap->iv_state == IEEE80211_S_RUN) {
+ if (ieee80211_parse_action(ni, m0) == 0)
+ ic->ic_recv_action(ni, wh, frm, efrm);
+ } else
+ vap->iv_stats.is_rx_mgtdiscard++;
+ break;
+
+ case IEEE80211_FC0_SUBTYPE_ASSOC_RESP:
+ case IEEE80211_FC0_SUBTYPE_REASSOC_RESP:
+ default:
+ IEEE80211_DISCARD(vap, IEEE80211_MSG_ANY,
+ wh, "mgt", "subtype 0x%x not handled", subtype);
+ vap->iv_stats.is_rx_badsubtype++;
+ break;
+ }
+}
+
+static void
+hostap_recv_ctl(struct ieee80211_node *ni, struct mbuf *m, int subtype)
+{
+ switch (subtype) {
+ case IEEE80211_FC0_SUBTYPE_PS_POLL:
+ hostap_recv_pspoll(ni, m);
+ break;
+ case IEEE80211_FC0_SUBTYPE_BAR:
+ ieee80211_recv_bar(ni, m);
+ break;
+ }
+}
+
+/*
+ * Process a received ps-poll frame.
+ */
+static void
+hostap_recv_pspoll(struct ieee80211_node *ni, struct mbuf *m0)
+{
+ struct ieee80211vap *vap = ni->ni_vap;
+ struct ieee80211_frame_min *wh;
+ struct ifnet *ifp;
+ struct mbuf *m;
+ uint16_t aid;
+ int qlen;
+
+ wh = mtod(m0, struct ieee80211_frame_min *);
+ if (ni->ni_associd == 0) {
+ IEEE80211_DISCARD(vap,
+ IEEE80211_MSG_POWER | IEEE80211_MSG_DEBUG,
+ (struct ieee80211_frame *) wh, NULL,
+ "%s", "unassociated station");
+ vap->iv_stats.is_ps_unassoc++;
+ IEEE80211_SEND_MGMT(ni, IEEE80211_FC0_SUBTYPE_DEAUTH,
+ IEEE80211_REASON_NOT_ASSOCED);
+ return;
+ }
+
+ aid = le16toh(*(uint16_t *)wh->i_dur);
+ if (aid != ni->ni_associd) {
+ IEEE80211_DISCARD(vap,
+ IEEE80211_MSG_POWER | IEEE80211_MSG_DEBUG,
+ (struct ieee80211_frame *) wh, NULL,
+ "aid mismatch: sta aid 0x%x poll aid 0x%x",
+ ni->ni_associd, aid);
+ vap->iv_stats.is_ps_badaid++;
+ /*
+ * NB: We used to deauth the station but it turns out
+ * the Blackberry Curve 8230 (and perhaps other devices)
+ * sometimes send the wrong AID when WME is negotiated.
+ * Being more lenient here seems ok as we already check
+ * the station is associated and we only return frames
+ * queued for the station (i.e. we don't use the AID).
+ */
+ return;
+ }
+
+ /* Okay, take the first queued packet and put it out... */
+ m = ieee80211_node_psq_dequeue(ni, &qlen);
+ if (m == NULL) {
+ IEEE80211_NOTE_MAC(vap, IEEE80211_MSG_POWER, wh->i_addr2,
+ "%s", "recv ps-poll, but queue empty");
+ ieee80211_send_nulldata(ieee80211_ref_node(ni));
+ vap->iv_stats.is_ps_qempty++; /* XXX node stat */
+ if (vap->iv_set_tim != NULL)
+ vap->iv_set_tim(ni, 0); /* just in case */
+ return;
+ }
+ /*
+ * If there are more packets, set the more packets bit
+ * in the packet dispatched to the station; otherwise
+ * turn off the TIM bit.
+ */
+ if (qlen != 0) {
+ IEEE80211_NOTE(vap, IEEE80211_MSG_POWER, ni,
+ "recv ps-poll, send packet, %u still queued", qlen);
+ m->m_flags |= M_MORE_DATA;
+ } else {
+ IEEE80211_NOTE(vap, IEEE80211_MSG_POWER, ni,
+ "%s", "recv ps-poll, send packet, queue empty");
+ if (vap->iv_set_tim != NULL)
+ vap->iv_set_tim(ni, 0);
+ }
+ m->m_flags |= M_PWR_SAV; /* bypass PS handling */
+
+ if (m->m_flags & M_ENCAP)
+ ifp = vap->iv_ic->ic_ifp;
+ else
+ ifp = vap->iv_ifp;
+ IF_ENQUEUE(&ifp->if_snd, m);
+ if_start(ifp);
+}
diff --git a/rtems/freebsd/net80211/ieee80211_hostap.h b/rtems/freebsd/net80211/ieee80211_hostap.h
new file mode 100644
index 00000000..fa35e220
--- /dev/null
+++ b/rtems/freebsd/net80211/ieee80211_hostap.h
@@ -0,0 +1,35 @@
+/*-
+ * Copyright (c) 2007-2008 Sam Leffler, Errno Consulting
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+#ifndef _NET80211_IEEE80211_HOSTAP_HH_
+#define _NET80211_IEEE80211_HOSTAP_HH_
+
+/*
+ * Hostap implementation definitions.
+ */
+void ieee80211_hostap_attach(struct ieee80211com *);
+void ieee80211_hostap_detach(struct ieee80211com *);
+#endif /* !_NET80211_IEEE80211_HOSTAP_HH_ */
diff --git a/rtems/freebsd/net80211/ieee80211_ht.c b/rtems/freebsd/net80211/ieee80211_ht.c
new file mode 100644
index 00000000..f97d1858
--- /dev/null
+++ b/rtems/freebsd/net80211/ieee80211_ht.c
@@ -0,0 +1,2523 @@
+#include <rtems/freebsd/machine/rtems-bsd-config.h>
+
+/*-
+ * Copyright (c) 2007-2008 Sam Leffler, Errno Consulting
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <rtems/freebsd/sys/cdefs.h>
+#ifdef __FreeBSD__
+__FBSDID("$FreeBSD$");
+#endif
+
+/*
+ * IEEE 802.11n protocol support.
+ */
+
+#include <rtems/freebsd/local/opt_inet.h>
+#include <rtems/freebsd/local/opt_wlan.h>
+
+#include <rtems/freebsd/sys/param.h>
+#include <rtems/freebsd/sys/kernel.h>
+#include <rtems/freebsd/sys/systm.h>
+#include <rtems/freebsd/sys/endian.h>
+
+#include <rtems/freebsd/sys/socket.h>
+
+#include <rtems/freebsd/net/if.h>
+#include <rtems/freebsd/net/if_media.h>
+#include <rtems/freebsd/net/ethernet.h>
+
+#include <rtems/freebsd/net80211/ieee80211_var.h>
+#include <rtems/freebsd/net80211/ieee80211_action.h>
+#include <rtems/freebsd/net80211/ieee80211_input.h>
+
+/* define here, used throughout file */
+#define MS(_v, _f) (((_v) & _f) >> _f##_S)
+#define SM(_v, _f) (((_v) << _f##_S) & _f)
+
+const struct ieee80211_mcs_rates ieee80211_htrates[16] = {
+ { 13, 14, 27, 30 }, /* MCS 0 */
+ { 26, 29, 54, 60 }, /* MCS 1 */
+ { 39, 43, 81, 90 }, /* MCS 2 */
+ { 52, 58, 108, 120 }, /* MCS 3 */
+ { 78, 87, 162, 180 }, /* MCS 4 */
+ { 104, 116, 216, 240 }, /* MCS 5 */
+ { 117, 130, 243, 270 }, /* MCS 6 */
+ { 130, 144, 270, 300 }, /* MCS 7 */
+ { 26, 29, 54, 60 }, /* MCS 8 */
+ { 52, 58, 108, 120 }, /* MCS 9 */
+ { 78, 87, 162, 180 }, /* MCS 10 */
+ { 104, 116, 216, 240 }, /* MCS 11 */
+ { 156, 173, 324, 360 }, /* MCS 12 */
+ { 208, 231, 432, 480 }, /* MCS 13 */
+ { 234, 260, 486, 540 }, /* MCS 14 */
+ { 260, 289, 540, 600 } /* MCS 15 */
+};
+
+static const struct ieee80211_htrateset ieee80211_rateset_11n =
+ { 16, {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
+ 10, 11, 12, 13, 14, 15 }
+ };
+
+#ifdef IEEE80211_AMPDU_AGE
+static int ieee80211_ampdu_age = -1; /* threshold for ampdu reorder q (ms) */
+SYSCTL_PROC(_net_wlan, OID_AUTO, ampdu_age, CTLTYPE_INT | CTLFLAG_RW,
+ &ieee80211_ampdu_age, 0, ieee80211_sysctl_msecs_ticks, "I",
+ "AMPDU max reorder age (ms)");
+#endif
+
+static int ieee80211_recv_bar_ena = 1;
+SYSCTL_INT(_net_wlan, OID_AUTO, recv_bar, CTLFLAG_RW, &ieee80211_recv_bar_ena,
+ 0, "BAR frame processing (ena/dis)");
+
+static int ieee80211_addba_timeout = -1;/* timeout for ADDBA response */
+SYSCTL_PROC(_net_wlan, OID_AUTO, addba_timeout, CTLTYPE_INT | CTLFLAG_RW,
+ &ieee80211_addba_timeout, 0, ieee80211_sysctl_msecs_ticks, "I",
+ "ADDBA request timeout (ms)");
+static int ieee80211_addba_backoff = -1;/* backoff after max ADDBA requests */
+SYSCTL_PROC(_net_wlan, OID_AUTO, addba_backoff, CTLTYPE_INT | CTLFLAG_RW,
+ &ieee80211_addba_backoff, 0, ieee80211_sysctl_msecs_ticks, "I",
+ "ADDBA request backoff (ms)");
+static int ieee80211_addba_maxtries = 3;/* max ADDBA requests before backoff */
+SYSCTL_INT(_net_wlan, OID_AUTO, addba_maxtries, CTLTYPE_INT | CTLFLAG_RW,
+ &ieee80211_addba_maxtries, 0, "max ADDBA requests sent before backoff");
+
+static int ieee80211_bar_timeout = -1; /* timeout waiting for BAR response */
+static int ieee80211_bar_maxtries = 50;/* max BAR requests before DELBA */
+
+static ieee80211_recv_action_func ht_recv_action_ba_addba_request;
+static ieee80211_recv_action_func ht_recv_action_ba_addba_response;
+static ieee80211_recv_action_func ht_recv_action_ba_delba;
+static ieee80211_recv_action_func ht_recv_action_ht_mimopwrsave;
+static ieee80211_recv_action_func ht_recv_action_ht_txchwidth;
+
+static ieee80211_send_action_func ht_send_action_ba_addba;
+static ieee80211_send_action_func ht_send_action_ba_delba;
+static ieee80211_send_action_func ht_send_action_ht_txchwidth;
+
+static void
+ieee80211_ht_init(void)
+{
+ /*
+ * Setup HT parameters that depends on the clock frequency.
+ */
+#ifdef IEEE80211_AMPDU_AGE
+ ieee80211_ampdu_age = msecs_to_ticks(500);
+#endif
+ ieee80211_addba_timeout = msecs_to_ticks(250);
+ ieee80211_addba_backoff = msecs_to_ticks(10*1000);
+ ieee80211_bar_timeout = msecs_to_ticks(250);
+ /*
+ * Register action frame handlers.
+ */
+ ieee80211_recv_action_register(IEEE80211_ACTION_CAT_BA,
+ IEEE80211_ACTION_BA_ADDBA_REQUEST, ht_recv_action_ba_addba_request);
+ ieee80211_recv_action_register(IEEE80211_ACTION_CAT_BA,
+ IEEE80211_ACTION_BA_ADDBA_RESPONSE, ht_recv_action_ba_addba_response);
+ ieee80211_recv_action_register(IEEE80211_ACTION_CAT_BA,
+ IEEE80211_ACTION_BA_DELBA, ht_recv_action_ba_delba);
+ ieee80211_recv_action_register(IEEE80211_ACTION_CAT_HT,
+ IEEE80211_ACTION_HT_MIMOPWRSAVE, ht_recv_action_ht_mimopwrsave);
+ ieee80211_recv_action_register(IEEE80211_ACTION_CAT_HT,
+ IEEE80211_ACTION_HT_TXCHWIDTH, ht_recv_action_ht_txchwidth);
+
+ ieee80211_send_action_register(IEEE80211_ACTION_CAT_BA,
+ IEEE80211_ACTION_BA_ADDBA_REQUEST, ht_send_action_ba_addba);
+ ieee80211_send_action_register(IEEE80211_ACTION_CAT_BA,
+ IEEE80211_ACTION_BA_ADDBA_RESPONSE, ht_send_action_ba_addba);
+ ieee80211_send_action_register(IEEE80211_ACTION_CAT_BA,
+ IEEE80211_ACTION_BA_DELBA, ht_send_action_ba_delba);
+ ieee80211_send_action_register(IEEE80211_ACTION_CAT_HT,
+ IEEE80211_ACTION_HT_TXCHWIDTH, ht_send_action_ht_txchwidth);
+}
+SYSINIT(wlan_ht, SI_SUB_DRIVERS, SI_ORDER_FIRST, ieee80211_ht_init, NULL);
+
+static int ieee80211_ampdu_enable(struct ieee80211_node *ni,
+ struct ieee80211_tx_ampdu *tap);
+static int ieee80211_addba_request(struct ieee80211_node *ni,
+ struct ieee80211_tx_ampdu *tap,
+ int dialogtoken, int baparamset, int batimeout);
+static int ieee80211_addba_response(struct ieee80211_node *ni,
+ struct ieee80211_tx_ampdu *tap,
+ int code, int baparamset, int batimeout);
+static void ieee80211_addba_stop(struct ieee80211_node *ni,
+ struct ieee80211_tx_ampdu *tap);
+static void ieee80211_bar_response(struct ieee80211_node *ni,
+ struct ieee80211_tx_ampdu *tap, int status);
+static void ampdu_tx_stop(struct ieee80211_tx_ampdu *tap);
+static void bar_stop_timer(struct ieee80211_tx_ampdu *tap);
+static int ampdu_rx_start(struct ieee80211_node *, struct ieee80211_rx_ampdu *,
+ int baparamset, int batimeout, int baseqctl);
+static void ampdu_rx_stop(struct ieee80211_node *, struct ieee80211_rx_ampdu *);
+
+void
+ieee80211_ht_attach(struct ieee80211com *ic)
+{
+ /* setup default aggregation policy */
+ ic->ic_recv_action = ieee80211_recv_action;
+ ic->ic_send_action = ieee80211_send_action;
+ ic->ic_ampdu_enable = ieee80211_ampdu_enable;
+ ic->ic_addba_request = ieee80211_addba_request;
+ ic->ic_addba_response = ieee80211_addba_response;
+ ic->ic_addba_stop = ieee80211_addba_stop;
+ ic->ic_bar_response = ieee80211_bar_response;
+ ic->ic_ampdu_rx_start = ampdu_rx_start;
+ ic->ic_ampdu_rx_stop = ampdu_rx_stop;
+
+ ic->ic_htprotmode = IEEE80211_PROT_RTSCTS;
+ ic->ic_curhtprotmode = IEEE80211_HTINFO_OPMODE_PURE;
+}
+
+void
+ieee80211_ht_detach(struct ieee80211com *ic)
+{
+}
+
+void
+ieee80211_ht_vattach(struct ieee80211vap *vap)
+{
+
+ /* driver can override defaults */
+ vap->iv_ampdu_rxmax = IEEE80211_HTCAP_MAXRXAMPDU_8K;
+ vap->iv_ampdu_density = IEEE80211_HTCAP_MPDUDENSITY_NA;
+ vap->iv_ampdu_limit = vap->iv_ampdu_rxmax;
+ vap->iv_amsdu_limit = vap->iv_htcaps & IEEE80211_HTCAP_MAXAMSDU;
+ /* tx aggregation traffic thresholds */
+ vap->iv_ampdu_mintraffic[WME_AC_BK] = 128;
+ vap->iv_ampdu_mintraffic[WME_AC_BE] = 64;
+ vap->iv_ampdu_mintraffic[WME_AC_VO] = 32;
+ vap->iv_ampdu_mintraffic[WME_AC_VI] = 32;
+
+ if (vap->iv_htcaps & IEEE80211_HTC_HT) {
+ /*
+ * Device is HT capable; enable all HT-related
+ * facilities by default.
+ * XXX these choices may be too aggressive.
+ */
+ vap->iv_flags_ht |= IEEE80211_FHT_HT
+ | IEEE80211_FHT_HTCOMPAT
+ ;
+ if (vap->iv_htcaps & IEEE80211_HTCAP_SHORTGI20)
+ vap->iv_flags_ht |= IEEE80211_FHT_SHORTGI20;
+ /* XXX infer from channel list? */
+ if (vap->iv_htcaps & IEEE80211_HTCAP_CHWIDTH40) {
+ vap->iv_flags_ht |= IEEE80211_FHT_USEHT40;
+ if (vap->iv_htcaps & IEEE80211_HTCAP_SHORTGI40)
+ vap->iv_flags_ht |= IEEE80211_FHT_SHORTGI40;
+ }
+ /* enable RIFS if capable */
+ if (vap->iv_htcaps & IEEE80211_HTC_RIFS)
+ vap->iv_flags_ht |= IEEE80211_FHT_RIFS;
+
+ /* NB: A-MPDU and A-MSDU rx are mandated, these are tx only */
+ vap->iv_flags_ht |= IEEE80211_FHT_AMPDU_RX;
+ if (vap->iv_htcaps & IEEE80211_HTC_AMPDU)
+ vap->iv_flags_ht |= IEEE80211_FHT_AMPDU_TX;
+ vap->iv_flags_ht |= IEEE80211_FHT_AMSDU_RX;
+ if (vap->iv_htcaps & IEEE80211_HTC_AMSDU)
+ vap->iv_flags_ht |= IEEE80211_FHT_AMSDU_TX;
+ }
+ /* NB: disable default legacy WDS, too many issues right now */
+ if (vap->iv_flags_ext & IEEE80211_FEXT_WDSLEGACY)
+ vap->iv_flags_ht &= ~IEEE80211_FHT_HT;
+}
+
+void
+ieee80211_ht_vdetach(struct ieee80211vap *vap)
+{
+}
+
+static void
+ht_announce(struct ieee80211com *ic, int mode,
+ const struct ieee80211_htrateset *rs)
+{
+ struct ifnet *ifp = ic->ic_ifp;
+ int i, rate, mword;
+
+ if_printf(ifp, "%s MCS: ", ieee80211_phymode_name[mode]);
+ for (i = 0; i < rs->rs_nrates; i++) {
+ mword = ieee80211_rate2media(ic,
+ rs->rs_rates[i] | IEEE80211_RATE_MCS, mode);
+ if (IFM_SUBTYPE(mword) != IFM_IEEE80211_MCS)
+ continue;
+ rate = ieee80211_htrates[rs->rs_rates[i]].ht40_rate_400ns;
+ printf("%s%d%sMbps", (i != 0 ? " " : ""),
+ rate / 2, ((rate & 0x1) != 0 ? ".5" : ""));
+ }
+ printf("\n");
+}
+
+void
+ieee80211_ht_announce(struct ieee80211com *ic)
+{
+ if (isset(ic->ic_modecaps, IEEE80211_MODE_11NA))
+ ht_announce(ic, IEEE80211_MODE_11NA, &ieee80211_rateset_11n);
+ if (isset(ic->ic_modecaps, IEEE80211_MODE_11NG))
+ ht_announce(ic, IEEE80211_MODE_11NG, &ieee80211_rateset_11n);
+}
+
+const struct ieee80211_htrateset *
+ieee80211_get_suphtrates(struct ieee80211com *ic,
+ const struct ieee80211_channel *c)
+{
+ return &ieee80211_rateset_11n;
+}
+
+/*
+ * Receive processing.
+ */
+
+/*
+ * Decap the encapsulated A-MSDU frames and dispatch all but
+ * the last for delivery. The last frame is returned for
+ * delivery via the normal path.
+ */
+struct mbuf *
+ieee80211_decap_amsdu(struct ieee80211_node *ni, struct mbuf *m)
+{
+ struct ieee80211vap *vap = ni->ni_vap;
+ int framelen;
+ struct mbuf *n;
+
+ /* discard 802.3 header inserted by ieee80211_decap */
+ m_adj(m, sizeof(struct ether_header));
+
+ vap->iv_stats.is_amsdu_decap++;
+
+ for (;;) {
+ /*
+ * Decap the first frame, bust it apart from the
+ * remainder and deliver. We leave the last frame
+ * delivery to the caller (for consistency with other
+ * code paths, could also do it here).
+ */
+ m = ieee80211_decap1(m, &framelen);
+ if (m == NULL) {
+ IEEE80211_DISCARD_MAC(vap, IEEE80211_MSG_ANY,
+ ni->ni_macaddr, "a-msdu", "%s", "decap failed");
+ vap->iv_stats.is_amsdu_tooshort++;
+ return NULL;
+ }
+ if (m->m_pkthdr.len == framelen)
+ break;
+ n = m_split(m, framelen, M_NOWAIT);
+ if (n == NULL) {
+ IEEE80211_DISCARD_MAC(vap, IEEE80211_MSG_ANY,
+ ni->ni_macaddr, "a-msdu",
+ "%s", "unable to split encapsulated frames");
+ vap->iv_stats.is_amsdu_split++;
+ m_freem(m); /* NB: must reclaim */
+ return NULL;
+ }
+ vap->iv_deliver_data(vap, ni, m);
+
+ /*
+ * Remove frame contents; each intermediate frame
+ * is required to be aligned to a 4-byte boundary.
+ */
+ m = n;
+ m_adj(m, roundup2(framelen, 4) - framelen); /* padding */
+ }
+ return m; /* last delivered by caller */
+}
+
+/*
+ * Purge all frames in the A-MPDU re-order queue.
+ */
+static void
+ampdu_rx_purge(struct ieee80211_rx_ampdu *rap)
+{
+ struct mbuf *m;
+ int i;
+
+ for (i = 0; i < rap->rxa_wnd; i++) {
+ m = rap->rxa_m[i];
+ if (m != NULL) {
+ rap->rxa_m[i] = NULL;
+ rap->rxa_qbytes -= m->m_pkthdr.len;
+ m_freem(m);
+ if (--rap->rxa_qframes == 0)
+ break;
+ }
+ }
+ KASSERT(rap->rxa_qbytes == 0 && rap->rxa_qframes == 0,
+ ("lost %u data, %u frames on ampdu rx q",
+ rap->rxa_qbytes, rap->rxa_qframes));
+}
+
+/*
+ * Start A-MPDU rx/re-order processing for the specified TID.
+ */
+static int
+ampdu_rx_start(struct ieee80211_node *ni, struct ieee80211_rx_ampdu *rap,
+ int baparamset, int batimeout, int baseqctl)
+{
+ int bufsiz = MS(baparamset, IEEE80211_BAPS_BUFSIZ);
+
+ if (rap->rxa_flags & IEEE80211_AGGR_RUNNING) {
+ /*
+ * AMPDU previously setup and not terminated with a DELBA,
+ * flush the reorder q's in case anything remains.
+ */
+ ampdu_rx_purge(rap);
+ }
+ memset(rap, 0, sizeof(*rap));
+ rap->rxa_wnd = (bufsiz == 0) ?
+ IEEE80211_AGGR_BAWMAX : min(bufsiz, IEEE80211_AGGR_BAWMAX);
+ rap->rxa_start = MS(baseqctl, IEEE80211_BASEQ_START);
+ rap->rxa_flags |= IEEE80211_AGGR_RUNNING | IEEE80211_AGGR_XCHGPEND;
+
+ return 0;
+}
+
+/*
+ * Stop A-MPDU rx processing for the specified TID.
+ */
+static void
+ampdu_rx_stop(struct ieee80211_node *ni, struct ieee80211_rx_ampdu *rap)
+{
+ ampdu_rx_purge(rap);
+ rap->rxa_flags &= ~(IEEE80211_AGGR_RUNNING | IEEE80211_AGGR_XCHGPEND);
+}
+
+/*
+ * Dispatch a frame from the A-MPDU reorder queue. The
+ * frame is fed back into ieee80211_input marked with an
+ * M_AMPDU_MPDU flag so it doesn't come back to us (it also
+ * permits ieee80211_input to optimize re-processing).
+ */
+static __inline void
+ampdu_dispatch(struct ieee80211_node *ni, struct mbuf *m)
+{
+ m->m_flags |= M_AMPDU_MPDU; /* bypass normal processing */
+ /* NB: rssi and noise are ignored w/ M_AMPDU_MPDU set */
+ (void) ieee80211_input(ni, m, 0, 0);
+}
+
+/*
+ * Dispatch as many frames as possible from the re-order queue.
+ * Frames will always be "at the front"; we process all frames
+ * up to the first empty slot in the window. On completion we
+ * cleanup state if there are still pending frames in the current
+ * BA window. We assume the frame at slot 0 is already handled
+ * by the caller; we always start at slot 1.
+ */
+static void
+ampdu_rx_dispatch(struct ieee80211_rx_ampdu *rap, struct ieee80211_node *ni)
+{
+ struct ieee80211vap *vap = ni->ni_vap;
+ struct mbuf *m;
+ int i;
+
+ /* flush run of frames */
+ for (i = 1; i < rap->rxa_wnd; i++) {
+ m = rap->rxa_m[i];
+ if (m == NULL)
+ break;
+ rap->rxa_m[i] = NULL;
+ rap->rxa_qbytes -= m->m_pkthdr.len;
+ rap->rxa_qframes--;
+
+ ampdu_dispatch(ni, m);
+ }
+ /*
+ * If frames remain, copy the mbuf pointers down so
+ * they correspond to the offsets in the new window.
+ */
+ if (rap->rxa_qframes != 0) {
+ int n = rap->rxa_qframes, j;
+ for (j = i+1; j < rap->rxa_wnd; j++) {
+ if (rap->rxa_m[j] != NULL) {
+ rap->rxa_m[j-i] = rap->rxa_m[j];
+ rap->rxa_m[j] = NULL;
+ if (--n == 0)
+ break;
+ }
+ }
+ KASSERT(n == 0, ("lost %d frames", n));
+ vap->iv_stats.is_ampdu_rx_copy += rap->rxa_qframes;
+ }
+ /*
+ * Adjust the start of the BA window to
+ * reflect the frames just dispatched.
+ */
+ rap->rxa_start = IEEE80211_SEQ_ADD(rap->rxa_start, i);
+ vap->iv_stats.is_ampdu_rx_oor += i;
+}
+
+#ifdef IEEE80211_AMPDU_AGE
+/*
+ * Dispatch all frames in the A-MPDU re-order queue.
+ */
+static void
+ampdu_rx_flush(struct ieee80211_node *ni, struct ieee80211_rx_ampdu *rap)
+{
+ struct ieee80211vap *vap = ni->ni_vap;
+ struct mbuf *m;
+ int i;
+
+ for (i = 0; i < rap->rxa_wnd; i++) {
+ m = rap->rxa_m[i];
+ if (m == NULL)
+ continue;
+ rap->rxa_m[i] = NULL;
+ rap->rxa_qbytes -= m->m_pkthdr.len;
+ rap->rxa_qframes--;
+ vap->iv_stats.is_ampdu_rx_oor++;
+
+ ampdu_dispatch(ni, m);
+ if (rap->rxa_qframes == 0)
+ break;
+ }
+}
+#endif /* IEEE80211_AMPDU_AGE */
+
+/*
+ * Dispatch all frames in the A-MPDU re-order queue
+ * preceding the specified sequence number. This logic
+ * handles window moves due to a received MSDU or BAR.
+ */
+static void
+ampdu_rx_flush_upto(struct ieee80211_node *ni,
+ struct ieee80211_rx_ampdu *rap, ieee80211_seq winstart)
+{
+ struct ieee80211vap *vap = ni->ni_vap;
+ struct mbuf *m;
+ ieee80211_seq seqno;
+ int i;
+
+ /*
+ * Flush any complete MSDU's with a sequence number lower
+ * than winstart. Gaps may exist. Note that we may actually
+ * dispatch frames past winstart if a run continues; this is
+ * an optimization that avoids having to do a separate pass
+ * to dispatch frames after moving the BA window start.
+ */
+ seqno = rap->rxa_start;
+ for (i = 0; i < rap->rxa_wnd; i++) {
+ m = rap->rxa_m[i];
+ if (m != NULL) {
+ rap->rxa_m[i] = NULL;
+ rap->rxa_qbytes -= m->m_pkthdr.len;
+ rap->rxa_qframes--;
+ vap->iv_stats.is_ampdu_rx_oor++;
+
+ ampdu_dispatch(ni, m);
+ } else {
+ if (!IEEE80211_SEQ_BA_BEFORE(seqno, winstart))
+ break;
+ }
+ seqno = IEEE80211_SEQ_INC(seqno);
+ }
+ /*
+ * If frames remain, copy the mbuf pointers down so
+ * they correspond to the offsets in the new window.
+ */
+ if (rap->rxa_qframes != 0) {
+ int n = rap->rxa_qframes, j;
+
+ /* NB: this loop assumes i > 0 and/or rxa_m[0] is NULL */
+ KASSERT(rap->rxa_m[0] == NULL,
+ ("%s: BA window slot 0 occupied", __func__));
+ for (j = i+1; j < rap->rxa_wnd; j++) {
+ if (rap->rxa_m[j] != NULL) {
+ rap->rxa_m[j-i] = rap->rxa_m[j];
+ rap->rxa_m[j] = NULL;
+ if (--n == 0)
+ break;
+ }
+ }
+ KASSERT(n == 0, ("%s: lost %d frames, qframes %d off %d "
+ "BA win <%d:%d> winstart %d",
+ __func__, n, rap->rxa_qframes, i, rap->rxa_start,
+ IEEE80211_SEQ_ADD(rap->rxa_start, rap->rxa_wnd-1),
+ winstart));
+ vap->iv_stats.is_ampdu_rx_copy += rap->rxa_qframes;
+ }
+ /*
+ * Move the start of the BA window; we use the
+ * sequence number of the last MSDU that was
+ * passed up the stack+1 or winstart if stopped on
+ * a gap in the reorder buffer.
+ */
+ rap->rxa_start = seqno;
+}
+
+/*
+ * Process a received QoS data frame for an HT station. Handle
+ * A-MPDU reordering: if this frame is received out of order
+ * and falls within the BA window hold onto it. Otherwise if
+ * this frame completes a run, flush any pending frames. We
+ * return 1 if the frame is consumed. A 0 is returned if
+ * the frame should be processed normally by the caller.
+ */
+int
+ieee80211_ampdu_reorder(struct ieee80211_node *ni, struct mbuf *m)
+{
+#define IEEE80211_FC0_QOSDATA \
+ (IEEE80211_FC0_TYPE_DATA|IEEE80211_FC0_SUBTYPE_QOS|IEEE80211_FC0_VERSION_0)
+#define PROCESS 0 /* caller should process frame */
+#define CONSUMED 1 /* frame consumed, caller does nothing */
+ struct ieee80211vap *vap = ni->ni_vap;
+ struct ieee80211_qosframe *wh;
+ struct ieee80211_rx_ampdu *rap;
+ ieee80211_seq rxseq;
+ uint8_t tid;
+ int off;
+
+ KASSERT((m->m_flags & (M_AMPDU | M_AMPDU_MPDU)) == M_AMPDU,
+ ("!a-mpdu or already re-ordered, flags 0x%x", m->m_flags));
+ KASSERT(ni->ni_flags & IEEE80211_NODE_HT, ("not an HT sta"));
+
+ /* NB: m_len known to be sufficient */
+ wh = mtod(m, struct ieee80211_qosframe *);
+ if (wh->i_fc[0] != IEEE80211_FC0_QOSDATA) {
+ /*
+ * Not QoS data, shouldn't get here but just
+ * return it to the caller for processing.
+ */
+ return PROCESS;
+ }
+ if (IEEE80211_IS_DSTODS(wh))
+ tid = ((struct ieee80211_qosframe_addr4 *)wh)->i_qos[0];
+ else
+ tid = wh->i_qos[0];
+ tid &= IEEE80211_QOS_TID;
+ rap = &ni->ni_rx_ampdu[tid];
+ if ((rap->rxa_flags & IEEE80211_AGGR_XCHGPEND) == 0) {
+ /*
+ * No ADDBA request yet, don't touch.
+ */
+ return PROCESS;
+ }
+ rxseq = le16toh(*(uint16_t *)wh->i_seq);
+ if ((rxseq & IEEE80211_SEQ_FRAG_MASK) != 0) {
+ /*
+ * Fragments are not allowed; toss.
+ */
+ IEEE80211_DISCARD_MAC(vap,
+ IEEE80211_MSG_INPUT | IEEE80211_MSG_11N, ni->ni_macaddr,
+ "A-MPDU", "fragment, rxseq 0x%x tid %u%s", rxseq, tid,
+ wh->i_fc[1] & IEEE80211_FC1_RETRY ? " (retransmit)" : "");
+ vap->iv_stats.is_ampdu_rx_drop++;
+ IEEE80211_NODE_STAT(ni, rx_drop);
+ m_freem(m);
+ return CONSUMED;
+ }
+ rxseq >>= IEEE80211_SEQ_SEQ_SHIFT;
+ rap->rxa_nframes++;
+again:
+ if (rxseq == rap->rxa_start) {
+ /*
+ * First frame in window.
+ */
+ if (rap->rxa_qframes != 0) {
+ /*
+ * Dispatch as many packets as we can.
+ */
+ KASSERT(rap->rxa_m[0] == NULL, ("unexpected dup"));
+ ampdu_dispatch(ni, m);
+ ampdu_rx_dispatch(rap, ni);
+ return CONSUMED;
+ } else {
+ /*
+ * In order; advance window and notify
+ * caller to dispatch directly.
+ */
+ rap->rxa_start = IEEE80211_SEQ_INC(rxseq);
+ return PROCESS;
+ }
+ }
+ /*
+ * Frame is out of order; store if in the BA window.
+ */
+ /* calculate offset in BA window */
+ off = IEEE80211_SEQ_SUB(rxseq, rap->rxa_start);
+ if (off < rap->rxa_wnd) {
+ /*
+ * Common case (hopefully): in the BA window.
+ * Sec 9.10.7.6 a) (D2.04 p.118 line 47)
+ */
+#ifdef IEEE80211_AMPDU_AGE
+ /*
+ * Check for frames sitting too long in the reorder queue.
+ * This should only ever happen if frames are not delivered
+ * without the sender otherwise notifying us (e.g. with a
+ * BAR to move the window). Typically this happens because
+ * of vendor bugs that cause the sequence number to jump.
+ * When this happens we get a gap in the reorder queue that
+ * leaves frame sitting on the queue until they get pushed
+ * out due to window moves. When the vendor does not send
+ * BAR this move only happens due to explicit packet sends
+ *
+ * NB: we only track the time of the oldest frame in the
+ * reorder q; this means that if we flush we might push
+ * frames that still "new"; if this happens then subsequent
+ * frames will result in BA window moves which cost something
+ * but is still better than a big throughput dip.
+ */
+ if (rap->rxa_qframes != 0) {
+ /* XXX honor batimeout? */
+ if (ticks - rap->rxa_age > ieee80211_ampdu_age) {
+ /*
+ * Too long since we received the first
+ * frame; flush the reorder buffer.
+ */
+ if (rap->rxa_qframes != 0) {
+ vap->iv_stats.is_ampdu_rx_age +=
+ rap->rxa_qframes;
+ ampdu_rx_flush(ni, rap);
+ }
+ rap->rxa_start = IEEE80211_SEQ_INC(rxseq);
+ return PROCESS;
+ }
+ } else {
+ /*
+ * First frame, start aging timer.
+ */
+ rap->rxa_age = ticks;
+ }
+#endif /* IEEE80211_AMPDU_AGE */
+ /* save packet */
+ if (rap->rxa_m[off] == NULL) {
+ rap->rxa_m[off] = m;
+ rap->rxa_qframes++;
+ rap->rxa_qbytes += m->m_pkthdr.len;
+ vap->iv_stats.is_ampdu_rx_reorder++;
+ } else {
+ IEEE80211_DISCARD_MAC(vap,
+ IEEE80211_MSG_INPUT | IEEE80211_MSG_11N,
+ ni->ni_macaddr, "a-mpdu duplicate",
+ "seqno %u tid %u BA win <%u:%u>",
+ rxseq, tid, rap->rxa_start,
+ IEEE80211_SEQ_ADD(rap->rxa_start, rap->rxa_wnd-1));
+ vap->iv_stats.is_rx_dup++;
+ IEEE80211_NODE_STAT(ni, rx_dup);
+ m_freem(m);
+ }
+ return CONSUMED;
+ }
+ if (off < IEEE80211_SEQ_BA_RANGE) {
+ /*
+ * Outside the BA window, but within range;
+ * flush the reorder q and move the window.
+ * Sec 9.10.7.6 b) (D2.04 p.118 line 60)
+ */
+ IEEE80211_NOTE(vap, IEEE80211_MSG_11N, ni,
+ "move BA win <%u:%u> (%u frames) rxseq %u tid %u",
+ rap->rxa_start,
+ IEEE80211_SEQ_ADD(rap->rxa_start, rap->rxa_wnd-1),
+ rap->rxa_qframes, rxseq, tid);
+ vap->iv_stats.is_ampdu_rx_move++;
+
+ /*
+ * The spec says to flush frames up to but not including:
+ * WinStart_B = rxseq - rap->rxa_wnd + 1
+ * Then insert the frame or notify the caller to process
+ * it immediately. We can safely do this by just starting
+ * over again because we know the frame will now be within
+ * the BA window.
+ */
+ /* NB: rxa_wnd known to be >0 */
+ ampdu_rx_flush_upto(ni, rap,
+ IEEE80211_SEQ_SUB(rxseq, rap->rxa_wnd-1));
+ goto again;
+ } else {
+ /*
+ * Outside the BA window and out of range; toss.
+ * Sec 9.10.7.6 c) (D2.04 p.119 line 16)
+ */
+ IEEE80211_DISCARD_MAC(vap,
+ IEEE80211_MSG_INPUT | IEEE80211_MSG_11N, ni->ni_macaddr,
+ "MPDU", "BA win <%u:%u> (%u frames) rxseq %u tid %u%s",
+ rap->rxa_start,
+ IEEE80211_SEQ_ADD(rap->rxa_start, rap->rxa_wnd-1),
+ rap->rxa_qframes, rxseq, tid,
+ wh->i_fc[1] & IEEE80211_FC1_RETRY ? " (retransmit)" : "");
+ vap->iv_stats.is_ampdu_rx_drop++;
+ IEEE80211_NODE_STAT(ni, rx_drop);
+ m_freem(m);
+ return CONSUMED;
+ }
+#undef CONSUMED
+#undef PROCESS
+#undef IEEE80211_FC0_QOSDATA
+}
+
+/*
+ * Process a BAR ctl frame. Dispatch all frames up to
+ * the sequence number of the frame. If this frame is
+ * out of range it's discarded.
+ */
+void
+ieee80211_recv_bar(struct ieee80211_node *ni, struct mbuf *m0)
+{
+ struct ieee80211vap *vap = ni->ni_vap;
+ struct ieee80211_frame_bar *wh;
+ struct ieee80211_rx_ampdu *rap;
+ ieee80211_seq rxseq;
+ int tid, off;
+
+ if (!ieee80211_recv_bar_ena) {
+#if 0
+ IEEE80211_DISCARD_MAC(vap, IEEE80211_MSG_11N,
+ ni->ni_macaddr, "BAR", "%s", "processing disabled");
+#endif
+ vap->iv_stats.is_ampdu_bar_bad++;
+ return;
+ }
+ wh = mtod(m0, struct ieee80211_frame_bar *);
+ /* XXX check basic BAR */
+ tid = MS(le16toh(wh->i_ctl), IEEE80211_BAR_TID);
+ rap = &ni->ni_rx_ampdu[tid];
+ if ((rap->rxa_flags & IEEE80211_AGGR_XCHGPEND) == 0) {
+ /*
+ * No ADDBA request yet, don't touch.
+ */
+ IEEE80211_DISCARD_MAC(vap,
+ IEEE80211_MSG_INPUT | IEEE80211_MSG_11N,
+ ni->ni_macaddr, "BAR", "no BA stream, tid %u", tid);
+ vap->iv_stats.is_ampdu_bar_bad++;
+ return;
+ }
+ vap->iv_stats.is_ampdu_bar_rx++;
+ rxseq = le16toh(wh->i_seq) >> IEEE80211_SEQ_SEQ_SHIFT;
+ if (rxseq == rap->rxa_start)
+ return;
+ /* calculate offset in BA window */
+ off = IEEE80211_SEQ_SUB(rxseq, rap->rxa_start);
+ if (off < IEEE80211_SEQ_BA_RANGE) {
+ /*
+ * Flush the reorder q up to rxseq and move the window.
+ * Sec 9.10.7.6 a) (D2.04 p.119 line 22)
+ */
+ IEEE80211_NOTE(vap, IEEE80211_MSG_11N, ni,
+ "BAR moves BA win <%u:%u> (%u frames) rxseq %u tid %u",
+ rap->rxa_start,
+ IEEE80211_SEQ_ADD(rap->rxa_start, rap->rxa_wnd-1),
+ rap->rxa_qframes, rxseq, tid);
+ vap->iv_stats.is_ampdu_bar_move++;
+
+ ampdu_rx_flush_upto(ni, rap, rxseq);
+ if (off >= rap->rxa_wnd) {
+ /*
+ * BAR specifies a window start to the right of BA
+ * window; we must move it explicitly since
+ * ampdu_rx_flush_upto will not.
+ */
+ rap->rxa_start = rxseq;
+ }
+ } else {
+ /*
+ * Out of range; toss.
+ * Sec 9.10.7.6 b) (D2.04 p.119 line 41)
+ */
+ IEEE80211_DISCARD_MAC(vap,
+ IEEE80211_MSG_INPUT | IEEE80211_MSG_11N, ni->ni_macaddr,
+ "BAR", "BA win <%u:%u> (%u frames) rxseq %u tid %u%s",
+ rap->rxa_start,
+ IEEE80211_SEQ_ADD(rap->rxa_start, rap->rxa_wnd-1),
+ rap->rxa_qframes, rxseq, tid,
+ wh->i_fc[1] & IEEE80211_FC1_RETRY ? " (retransmit)" : "");
+ vap->iv_stats.is_ampdu_bar_oow++;
+ IEEE80211_NODE_STAT(ni, rx_drop);
+ }
+}
+
+/*
+ * Setup HT-specific state in a node. Called only
+ * when HT use is negotiated so we don't do extra
+ * work for temporary and/or legacy sta's.
+ */
+void
+ieee80211_ht_node_init(struct ieee80211_node *ni)
+{
+ struct ieee80211_tx_ampdu *tap;
+ int ac;
+
+ if (ni->ni_flags & IEEE80211_NODE_HT) {
+ /*
+ * Clean AMPDU state on re-associate. This handles the case
+ * where a station leaves w/o notifying us and then returns
+ * before node is reaped for inactivity.
+ */
+ ieee80211_ht_node_cleanup(ni);
+ }
+ for (ac = 0; ac < WME_NUM_AC; ac++) {
+ tap = &ni->ni_tx_ampdu[ac];
+ tap->txa_ac = ac;
+ tap->txa_ni = ni;
+ /* NB: further initialization deferred */
+ }
+ ni->ni_flags |= IEEE80211_NODE_HT | IEEE80211_NODE_AMPDU;
+}
+
+/*
+ * Cleanup HT-specific state in a node. Called only
+ * when HT use has been marked.
+ */
+void
+ieee80211_ht_node_cleanup(struct ieee80211_node *ni)
+{
+ struct ieee80211com *ic = ni->ni_ic;
+ int i;
+
+ KASSERT(ni->ni_flags & IEEE80211_NODE_HT, ("not an HT node"));
+
+ /* XXX optimize this */
+ for (i = 0; i < WME_NUM_AC; i++) {
+ struct ieee80211_tx_ampdu *tap = &ni->ni_tx_ampdu[i];
+ if (tap->txa_flags & IEEE80211_AGGR_SETUP)
+ ampdu_tx_stop(tap);
+ }
+ for (i = 0; i < WME_NUM_TID; i++)
+ ic->ic_ampdu_rx_stop(ni, &ni->ni_rx_ampdu[i]);
+
+ ni->ni_htcap = 0;
+ ni->ni_flags &= ~IEEE80211_NODE_HT_ALL;
+}
+
+/*
+ * Age out HT resources for a station.
+ */
+void
+ieee80211_ht_node_age(struct ieee80211_node *ni)
+{
+#ifdef IEEE80211_AMPDU_AGE
+ struct ieee80211vap *vap = ni->ni_vap;
+ uint8_t tid;
+#endif
+
+ KASSERT(ni->ni_flags & IEEE80211_NODE_HT, ("not an HT sta"));
+
+#ifdef IEEE80211_AMPDU_AGE
+ for (tid = 0; tid < WME_NUM_TID; tid++) {
+ struct ieee80211_rx_ampdu *rap;
+
+ rap = &ni->ni_rx_ampdu[tid];
+ if ((rap->rxa_flags & IEEE80211_AGGR_XCHGPEND) == 0)
+ continue;
+ if (rap->rxa_qframes == 0)
+ continue;
+ /*
+ * Check for frames sitting too long in the reorder queue.
+ * See above for more details on what's happening here.
+ */
+ /* XXX honor batimeout? */
+ if (ticks - rap->rxa_age > ieee80211_ampdu_age) {
+ /*
+ * Too long since we received the first
+ * frame; flush the reorder buffer.
+ */
+ vap->iv_stats.is_ampdu_rx_age += rap->rxa_qframes;
+ ampdu_rx_flush(ni, rap);
+ }
+ }
+#endif /* IEEE80211_AMPDU_AGE */
+}
+
+static struct ieee80211_channel *
+findhtchan(struct ieee80211com *ic, struct ieee80211_channel *c, int htflags)
+{
+ return ieee80211_find_channel(ic, c->ic_freq,
+ (c->ic_flags &~ IEEE80211_CHAN_HT) | htflags);
+}
+
+/*
+ * Adjust a channel to be HT/non-HT according to the vap's configuration.
+ */
+struct ieee80211_channel *
+ieee80211_ht_adjust_channel(struct ieee80211com *ic,
+ struct ieee80211_channel *chan, int flags)
+{
+ struct ieee80211_channel *c;
+
+ if (flags & IEEE80211_FHT_HT) {
+ /* promote to HT if possible */
+ if (flags & IEEE80211_FHT_USEHT40) {
+ if (!IEEE80211_IS_CHAN_HT40(chan)) {
+ /* NB: arbitrarily pick ht40+ over ht40- */
+ c = findhtchan(ic, chan, IEEE80211_CHAN_HT40U);
+ if (c == NULL)
+ c = findhtchan(ic, chan,
+ IEEE80211_CHAN_HT40D);
+ if (c == NULL)
+ c = findhtchan(ic, chan,
+ IEEE80211_CHAN_HT20);
+ if (c != NULL)
+ chan = c;
+ }
+ } else if (!IEEE80211_IS_CHAN_HT20(chan)) {
+ c = findhtchan(ic, chan, IEEE80211_CHAN_HT20);
+ if (c != NULL)
+ chan = c;
+ }
+ } else if (IEEE80211_IS_CHAN_HT(chan)) {
+ /* demote to legacy, HT use is disabled */
+ c = ieee80211_find_channel(ic, chan->ic_freq,
+ chan->ic_flags &~ IEEE80211_CHAN_HT);
+ if (c != NULL)
+ chan = c;
+ }
+ return chan;
+}
+
+/*
+ * Setup HT-specific state for a legacy WDS peer.
+ */
+void
+ieee80211_ht_wds_init(struct ieee80211_node *ni)
+{
+ struct ieee80211vap *vap = ni->ni_vap;
+ struct ieee80211_tx_ampdu *tap;
+ int ac;
+
+ KASSERT(vap->iv_flags_ht & IEEE80211_FHT_HT, ("no HT requested"));
+
+ /* XXX check scan cache in case peer has an ap and we have info */
+ /*
+ * If setup with a legacy channel; locate an HT channel.
+ * Otherwise if the inherited channel (from a companion
+ * AP) is suitable use it so we use the same location
+ * for the extension channel).
+ */
+ ni->ni_chan = ieee80211_ht_adjust_channel(ni->ni_ic,
+ ni->ni_chan, ieee80211_htchanflags(ni->ni_chan));
+
+ ni->ni_htcap = 0;
+ if (vap->iv_flags_ht & IEEE80211_FHT_SHORTGI20)
+ ni->ni_htcap |= IEEE80211_HTCAP_SHORTGI20;
+ if (IEEE80211_IS_CHAN_HT40(ni->ni_chan)) {
+ ni->ni_htcap |= IEEE80211_HTCAP_CHWIDTH40;
+ ni->ni_chw = 40;
+ if (IEEE80211_IS_CHAN_HT40U(ni->ni_chan))
+ ni->ni_ht2ndchan = IEEE80211_HTINFO_2NDCHAN_ABOVE;
+ else if (IEEE80211_IS_CHAN_HT40D(ni->ni_chan))
+ ni->ni_ht2ndchan = IEEE80211_HTINFO_2NDCHAN_BELOW;
+ if (vap->iv_flags_ht & IEEE80211_FHT_SHORTGI40)
+ ni->ni_htcap |= IEEE80211_HTCAP_SHORTGI40;
+ } else {
+ ni->ni_chw = 20;
+ ni->ni_ht2ndchan = IEEE80211_HTINFO_2NDCHAN_NONE;
+ }
+ ni->ni_htctlchan = ni->ni_chan->ic_ieee;
+ if (vap->iv_flags_ht & IEEE80211_FHT_RIFS)
+ ni->ni_flags |= IEEE80211_NODE_RIFS;
+ /* XXX does it make sense to enable SMPS? */
+
+ ni->ni_htopmode = 0; /* XXX need protection state */
+ ni->ni_htstbc = 0; /* XXX need info */
+
+ for (ac = 0; ac < WME_NUM_AC; ac++) {
+ tap = &ni->ni_tx_ampdu[ac];
+ tap->txa_ac = ac;
+ }
+ /* NB: AMPDU tx/rx governed by IEEE80211_FHT_AMPDU_{TX,RX} */
+ ni->ni_flags |= IEEE80211_NODE_HT | IEEE80211_NODE_AMPDU;
+}
+
+/*
+ * Notify hostap vaps of a change in the HTINFO ie.
+ */
+static void
+htinfo_notify(struct ieee80211com *ic)
+{
+ struct ieee80211vap *vap;
+ int first = 1;
+
+ IEEE80211_LOCK_ASSERT(ic);
+
+ TAILQ_FOREACH(vap, &ic->ic_vaps, iv_next) {
+ if (vap->iv_opmode != IEEE80211_M_HOSTAP)
+ continue;
+ if (vap->iv_state != IEEE80211_S_RUN ||
+ !IEEE80211_IS_CHAN_HT(vap->iv_bss->ni_chan))
+ continue;
+ if (first) {
+ IEEE80211_NOTE(vap,
+ IEEE80211_MSG_ASSOC | IEEE80211_MSG_11N,
+ vap->iv_bss,
+ "HT bss occupancy change: %d sta, %d ht, "
+ "%d ht40%s, HT protmode now 0x%x"
+ , ic->ic_sta_assoc
+ , ic->ic_ht_sta_assoc
+ , ic->ic_ht40_sta_assoc
+ , (ic->ic_flags_ht & IEEE80211_FHT_NONHT_PR) ?
+ ", non-HT sta present" : ""
+ , ic->ic_curhtprotmode);
+ first = 0;
+ }
+ ieee80211_beacon_notify(vap, IEEE80211_BEACON_HTINFO);
+ }
+}
+
+/*
+ * Calculate HT protection mode from current
+ * state and handle updates.
+ */
+static void
+htinfo_update(struct ieee80211com *ic)
+{
+ uint8_t protmode;
+
+ if (ic->ic_sta_assoc != ic->ic_ht_sta_assoc) {
+ protmode = IEEE80211_HTINFO_OPMODE_MIXED
+ | IEEE80211_HTINFO_NONHT_PRESENT;
+ } else if (ic->ic_flags_ht & IEEE80211_FHT_NONHT_PR) {
+ protmode = IEEE80211_HTINFO_OPMODE_PROTOPT
+ | IEEE80211_HTINFO_NONHT_PRESENT;
+ } else if (ic->ic_bsschan != IEEE80211_CHAN_ANYC &&
+ IEEE80211_IS_CHAN_HT40(ic->ic_bsschan) &&
+ ic->ic_sta_assoc != ic->ic_ht40_sta_assoc) {
+ protmode = IEEE80211_HTINFO_OPMODE_HT20PR;
+ } else {
+ protmode = IEEE80211_HTINFO_OPMODE_PURE;
+ }
+ if (protmode != ic->ic_curhtprotmode) {
+ ic->ic_curhtprotmode = protmode;
+ htinfo_notify(ic);
+ }
+}
+
+/*
+ * Handle an HT station joining a BSS.
+ */
+void
+ieee80211_ht_node_join(struct ieee80211_node *ni)
+{
+ struct ieee80211com *ic = ni->ni_ic;
+
+ IEEE80211_LOCK_ASSERT(ic);
+
+ if (ni->ni_flags & IEEE80211_NODE_HT) {
+ ic->ic_ht_sta_assoc++;
+ if (ni->ni_chw == 40)
+ ic->ic_ht40_sta_assoc++;
+ }
+ htinfo_update(ic);
+}
+
+/*
+ * Handle an HT station leaving a BSS.
+ */
+void
+ieee80211_ht_node_leave(struct ieee80211_node *ni)
+{
+ struct ieee80211com *ic = ni->ni_ic;
+
+ IEEE80211_LOCK_ASSERT(ic);
+
+ if (ni->ni_flags & IEEE80211_NODE_HT) {
+ ic->ic_ht_sta_assoc--;
+ if (ni->ni_chw == 40)
+ ic->ic_ht40_sta_assoc--;
+ }
+ htinfo_update(ic);
+}
+
+/*
+ * Public version of htinfo_update; used for processing
+ * beacon frames from overlapping bss.
+ *
+ * Caller can specify either IEEE80211_HTINFO_OPMODE_MIXED
+ * (on receipt of a beacon that advertises MIXED) or
+ * IEEE80211_HTINFO_OPMODE_PROTOPT (on receipt of a beacon
+ * from an overlapping legacy bss). We treat MIXED with
+ * a higher precedence than PROTOPT (i.e. we will not change
+ * change PROTOPT -> MIXED; only MIXED -> PROTOPT). This
+ * corresponds to how we handle things in htinfo_update.
+ */
+void
+ieee80211_htprot_update(struct ieee80211com *ic, int protmode)
+{
+#define OPMODE(x) SM(x, IEEE80211_HTINFO_OPMODE)
+ IEEE80211_LOCK(ic);
+
+ /* track non-HT station presence */
+ KASSERT(protmode & IEEE80211_HTINFO_NONHT_PRESENT,
+ ("protmode 0x%x", protmode));
+ ic->ic_flags_ht |= IEEE80211_FHT_NONHT_PR;
+ ic->ic_lastnonht = ticks;
+
+ if (protmode != ic->ic_curhtprotmode &&
+ (OPMODE(ic->ic_curhtprotmode) != IEEE80211_HTINFO_OPMODE_MIXED ||
+ OPMODE(protmode) == IEEE80211_HTINFO_OPMODE_PROTOPT)) {
+ /* push beacon update */
+ ic->ic_curhtprotmode = protmode;
+ htinfo_notify(ic);
+ }
+ IEEE80211_UNLOCK(ic);
+#undef OPMODE
+}
+
+/*
+ * Time out presence of an overlapping bss with non-HT
+ * stations. When operating in hostap mode we listen for
+ * beacons from other stations and if we identify a non-HT
+ * station is present we update the opmode field of the
+ * HTINFO ie. To identify when all non-HT stations are
+ * gone we time out this condition.
+ */
+void
+ieee80211_ht_timeout(struct ieee80211com *ic)
+{
+ IEEE80211_LOCK_ASSERT(ic);
+
+ if ((ic->ic_flags_ht & IEEE80211_FHT_NONHT_PR) &&
+ time_after(ticks, ic->ic_lastnonht + IEEE80211_NONHT_PRESENT_AGE)) {
+#if 0
+ IEEE80211_NOTE(vap, IEEE80211_MSG_11N, ni,
+ "%s", "time out non-HT STA present on channel");
+#endif
+ ic->ic_flags_ht &= ~IEEE80211_FHT_NONHT_PR;
+ htinfo_update(ic);
+ }
+}
+
+/* unalligned little endian access */
+#define LE_READ_2(p) \
+ ((uint16_t) \
+ ((((const uint8_t *)(p))[0] ) | \
+ (((const uint8_t *)(p))[1] << 8)))
+
+/*
+ * Process an 802.11n HT capabilities ie.
+ */
+void
+ieee80211_parse_htcap(struct ieee80211_node *ni, const uint8_t *ie)
+{
+ if (ie[0] == IEEE80211_ELEMID_VENDOR) {
+ /*
+ * Station used Vendor OUI ie to associate;
+ * mark the node so when we respond we'll use
+ * the Vendor OUI's and not the standard ie's.
+ */
+ ni->ni_flags |= IEEE80211_NODE_HTCOMPAT;
+ ie += 4;
+ } else
+ ni->ni_flags &= ~IEEE80211_NODE_HTCOMPAT;
+
+ ni->ni_htcap = LE_READ_2(ie +
+ __offsetof(struct ieee80211_ie_htcap, hc_cap));
+ ni->ni_htparam = ie[__offsetof(struct ieee80211_ie_htcap, hc_param)];
+}
+
+static void
+htinfo_parse(struct ieee80211_node *ni,
+ const struct ieee80211_ie_htinfo *htinfo)
+{
+ uint16_t w;
+
+ ni->ni_htctlchan = htinfo->hi_ctrlchannel;
+ ni->ni_ht2ndchan = SM(htinfo->hi_byte1, IEEE80211_HTINFO_2NDCHAN);
+ w = LE_READ_2(&htinfo->hi_byte2);
+ ni->ni_htopmode = SM(w, IEEE80211_HTINFO_OPMODE);
+ w = LE_READ_2(&htinfo->hi_byte45);
+ ni->ni_htstbc = SM(w, IEEE80211_HTINFO_BASIC_STBCMCS);
+}
+
+/*
+ * Parse an 802.11n HT info ie and save useful information
+ * to the node state. Note this does not effect any state
+ * changes such as for channel width change.
+ */
+void
+ieee80211_parse_htinfo(struct ieee80211_node *ni, const uint8_t *ie)
+{
+ if (ie[0] == IEEE80211_ELEMID_VENDOR)
+ ie += 4;
+ htinfo_parse(ni, (const struct ieee80211_ie_htinfo *) ie);
+}
+
+/*
+ * Handle 11n channel switch. Use the received HT ie's to
+ * identify the right channel to use. If we cannot locate it
+ * in the channel table then fallback to legacy operation.
+ * Note that we use this information to identify the node's
+ * channel only; the caller is responsible for insuring any
+ * required channel change is done (e.g. in sta mode when
+ * parsing the contents of a beacon frame).
+ */
+static void
+htinfo_update_chw(struct ieee80211_node *ni, int htflags)
+{
+ struct ieee80211com *ic = ni->ni_ic;
+ struct ieee80211_channel *c;
+ int chanflags;
+
+ chanflags = (ni->ni_chan->ic_flags &~ IEEE80211_CHAN_HT) | htflags;
+ if (chanflags != ni->ni_chan->ic_flags) {
+ /* XXX not right for ht40- */
+ c = ieee80211_find_channel(ic, ni->ni_chan->ic_freq, chanflags);
+ if (c == NULL && (htflags & IEEE80211_CHAN_HT40)) {
+ /*
+ * No HT40 channel entry in our table; fall back
+ * to HT20 operation. This should not happen.
+ */
+ c = findhtchan(ic, ni->ni_chan, IEEE80211_CHAN_HT20);
+#if 0
+ IEEE80211_NOTE(ni->ni_vap,
+ IEEE80211_MSG_ASSOC | IEEE80211_MSG_11N, ni,
+ "no HT40 channel (freq %u), falling back to HT20",
+ ni->ni_chan->ic_freq);
+#endif
+ /* XXX stat */
+ }
+ if (c != NULL && c != ni->ni_chan) {
+ IEEE80211_NOTE(ni->ni_vap,
+ IEEE80211_MSG_ASSOC | IEEE80211_MSG_11N, ni,
+ "switch station to HT%d channel %u/0x%x",
+ IEEE80211_IS_CHAN_HT40(c) ? 40 : 20,
+ c->ic_freq, c->ic_flags);
+ ni->ni_chan = c;
+ }
+ /* NB: caller responsible for forcing any channel change */
+ }
+ /* update node's tx channel width */
+ ni->ni_chw = IEEE80211_IS_CHAN_HT40(ni->ni_chan)? 40 : 20;
+}
+
+/*
+ * Update 11n MIMO PS state according to received htcap.
+ */
+static __inline int
+htcap_update_mimo_ps(struct ieee80211_node *ni)
+{
+ uint16_t oflags = ni->ni_flags;
+
+ switch (ni->ni_htcap & IEEE80211_HTCAP_SMPS) {
+ case IEEE80211_HTCAP_SMPS_DYNAMIC:
+ ni->ni_flags |= IEEE80211_NODE_MIMO_PS;
+ ni->ni_flags |= IEEE80211_NODE_MIMO_RTS;
+ break;
+ case IEEE80211_HTCAP_SMPS_ENA:
+ ni->ni_flags |= IEEE80211_NODE_MIMO_PS;
+ ni->ni_flags &= ~IEEE80211_NODE_MIMO_RTS;
+ break;
+ case IEEE80211_HTCAP_SMPS_OFF:
+ default: /* disable on rx of reserved value */
+ ni->ni_flags &= ~IEEE80211_NODE_MIMO_PS;
+ ni->ni_flags &= ~IEEE80211_NODE_MIMO_RTS;
+ break;
+ }
+ return (oflags ^ ni->ni_flags);
+}
+
+/*
+ * Update short GI state according to received htcap
+ * and local settings.
+ */
+static __inline void
+htcap_update_shortgi(struct ieee80211_node *ni)
+{
+ struct ieee80211vap *vap = ni->ni_vap;
+
+ ni->ni_flags &= ~(IEEE80211_NODE_SGI20|IEEE80211_NODE_SGI40);
+ if ((ni->ni_htcap & IEEE80211_HTCAP_SHORTGI20) &&
+ (vap->iv_flags_ht & IEEE80211_FHT_SHORTGI20))
+ ni->ni_flags |= IEEE80211_NODE_SGI20;
+ if ((ni->ni_htcap & IEEE80211_HTCAP_SHORTGI40) &&
+ (vap->iv_flags_ht & IEEE80211_FHT_SHORTGI40))
+ ni->ni_flags |= IEEE80211_NODE_SGI40;
+}
+
+/*
+ * Parse and update HT-related state extracted from
+ * the HT cap and info ie's.
+ */
+void
+ieee80211_ht_updateparams(struct ieee80211_node *ni,
+ const uint8_t *htcapie, const uint8_t *htinfoie)
+{
+ struct ieee80211vap *vap = ni->ni_vap;
+ const struct ieee80211_ie_htinfo *htinfo;
+ int htflags;
+
+ ieee80211_parse_htcap(ni, htcapie);
+ if (vap->iv_htcaps & IEEE80211_HTCAP_SMPS)
+ htcap_update_mimo_ps(ni);
+ htcap_update_shortgi(ni);
+
+ if (htinfoie[0] == IEEE80211_ELEMID_VENDOR)
+ htinfoie += 4;
+ htinfo = (const struct ieee80211_ie_htinfo *) htinfoie;
+ htinfo_parse(ni, htinfo);
+
+ htflags = (vap->iv_flags_ht & IEEE80211_FHT_HT) ?
+ IEEE80211_CHAN_HT20 : 0;
+ /* NB: honor operating mode constraint */
+ if ((htinfo->hi_byte1 & IEEE80211_HTINFO_TXWIDTH_2040) &&
+ (vap->iv_flags_ht & IEEE80211_FHT_USEHT40)) {
+ if (ni->ni_ht2ndchan == IEEE80211_HTINFO_2NDCHAN_ABOVE)
+ htflags = IEEE80211_CHAN_HT40U;
+ else if (ni->ni_ht2ndchan == IEEE80211_HTINFO_2NDCHAN_BELOW)
+ htflags = IEEE80211_CHAN_HT40D;
+ }
+ htinfo_update_chw(ni, htflags);
+
+ if ((htinfo->hi_byte1 & IEEE80211_HTINFO_RIFSMODE_PERM) &&
+ (vap->iv_flags_ht & IEEE80211_FHT_RIFS))
+ ni->ni_flags |= IEEE80211_NODE_RIFS;
+ else
+ ni->ni_flags &= ~IEEE80211_NODE_RIFS;
+}
+
+/*
+ * Parse and update HT-related state extracted from the HT cap ie
+ * for a station joining an HT BSS.
+ */
+void
+ieee80211_ht_updatehtcap(struct ieee80211_node *ni, const uint8_t *htcapie)
+{
+ struct ieee80211vap *vap = ni->ni_vap;
+ int htflags;
+
+ ieee80211_parse_htcap(ni, htcapie);
+ if (vap->iv_htcaps & IEEE80211_HTCAP_SMPS)
+ htcap_update_mimo_ps(ni);
+ htcap_update_shortgi(ni);
+
+ /* NB: honor operating mode constraint */
+ /* XXX 40 MHZ intolerant */
+ htflags = (vap->iv_flags_ht & IEEE80211_FHT_HT) ?
+ IEEE80211_CHAN_HT20 : 0;
+ if ((ni->ni_htcap & IEEE80211_HTCAP_CHWIDTH40) &&
+ (vap->iv_flags_ht & IEEE80211_FHT_USEHT40)) {
+ if (IEEE80211_IS_CHAN_HT40U(vap->iv_bss->ni_chan))
+ htflags = IEEE80211_CHAN_HT40U;
+ else if (IEEE80211_IS_CHAN_HT40D(vap->iv_bss->ni_chan))
+ htflags = IEEE80211_CHAN_HT40D;
+ }
+ htinfo_update_chw(ni, htflags);
+}
+
+/*
+ * Install received HT rate set by parsing the HT cap ie.
+ */
+int
+ieee80211_setup_htrates(struct ieee80211_node *ni, const uint8_t *ie, int flags)
+{
+ struct ieee80211vap *vap = ni->ni_vap;
+ const struct ieee80211_ie_htcap *htcap;
+ struct ieee80211_htrateset *rs;
+ int i;
+
+ rs = &ni->ni_htrates;
+ memset(rs, 0, sizeof(*rs));
+ if (ie != NULL) {
+ if (ie[0] == IEEE80211_ELEMID_VENDOR)
+ ie += 4;
+ htcap = (const struct ieee80211_ie_htcap *) ie;
+ for (i = 0; i < IEEE80211_HTRATE_MAXSIZE; i++) {
+ if (isclr(htcap->hc_mcsset, i))
+ continue;
+ if (rs->rs_nrates == IEEE80211_HTRATE_MAXSIZE) {
+ IEEE80211_NOTE(vap,
+ IEEE80211_MSG_XRATE | IEEE80211_MSG_11N, ni,
+ "WARNING, HT rate set too large; only "
+ "using %u rates", IEEE80211_HTRATE_MAXSIZE);
+ vap->iv_stats.is_rx_rstoobig++;
+ break;
+ }
+ rs->rs_rates[rs->rs_nrates++] = i;
+ }
+ }
+ return ieee80211_fix_rate(ni, (struct ieee80211_rateset *) rs, flags);
+}
+
+/*
+ * Mark rates in a node's HT rate set as basic according
+ * to the information in the supplied HT info ie.
+ */
+void
+ieee80211_setup_basic_htrates(struct ieee80211_node *ni, const uint8_t *ie)
+{
+ const struct ieee80211_ie_htinfo *htinfo;
+ struct ieee80211_htrateset *rs;
+ int i, j;
+
+ if (ie[0] == IEEE80211_ELEMID_VENDOR)
+ ie += 4;
+ htinfo = (const struct ieee80211_ie_htinfo *) ie;
+ rs = &ni->ni_htrates;
+ if (rs->rs_nrates == 0) {
+ IEEE80211_NOTE(ni->ni_vap,
+ IEEE80211_MSG_XRATE | IEEE80211_MSG_11N, ni,
+ "%s", "WARNING, empty HT rate set");
+ return;
+ }
+ for (i = 0; i < IEEE80211_HTRATE_MAXSIZE; i++) {
+ if (isclr(htinfo->hi_basicmcsset, i))
+ continue;
+ for (j = 0; j < rs->rs_nrates; j++)
+ if ((rs->rs_rates[j] & IEEE80211_RATE_VAL) == i)
+ rs->rs_rates[j] |= IEEE80211_RATE_BASIC;
+ }
+}
+
+static void
+ampdu_tx_setup(struct ieee80211_tx_ampdu *tap)
+{
+ callout_init(&tap->txa_timer, CALLOUT_MPSAFE);
+ tap->txa_flags |= IEEE80211_AGGR_SETUP;
+}
+
+static void
+ampdu_tx_stop(struct ieee80211_tx_ampdu *tap)
+{
+ struct ieee80211_node *ni = tap->txa_ni;
+ struct ieee80211com *ic = ni->ni_ic;
+
+ KASSERT(tap->txa_flags & IEEE80211_AGGR_SETUP,
+ ("txa_flags 0x%x ac %d", tap->txa_flags, tap->txa_ac));
+
+ /*
+ * Stop BA stream if setup so driver has a chance
+ * to reclaim any resources it might have allocated.
+ */
+ ic->ic_addba_stop(ni, tap);
+ /*
+ * Stop any pending BAR transmit.
+ */
+ bar_stop_timer(tap);
+
+ tap->txa_lastsample = 0;
+ tap->txa_avgpps = 0;
+ /* NB: clearing NAK means we may re-send ADDBA */
+ tap->txa_flags &= ~(IEEE80211_AGGR_SETUP | IEEE80211_AGGR_NAK);
+}
+
+static void
+addba_timeout(void *arg)
+{
+ struct ieee80211_tx_ampdu *tap = arg;
+
+ /* XXX ? */
+ tap->txa_flags &= ~IEEE80211_AGGR_XCHGPEND;
+ tap->txa_attempts++;
+}
+
+static void
+addba_start_timeout(struct ieee80211_tx_ampdu *tap)
+{
+ /* XXX use CALLOUT_PENDING instead? */
+ callout_reset(&tap->txa_timer, ieee80211_addba_timeout,
+ addba_timeout, tap);
+ tap->txa_flags |= IEEE80211_AGGR_XCHGPEND;
+ tap->txa_nextrequest = ticks + ieee80211_addba_timeout;
+}
+
+static void
+addba_stop_timeout(struct ieee80211_tx_ampdu *tap)
+{
+ /* XXX use CALLOUT_PENDING instead? */
+ if (tap->txa_flags & IEEE80211_AGGR_XCHGPEND) {
+ callout_stop(&tap->txa_timer);
+ tap->txa_flags &= ~IEEE80211_AGGR_XCHGPEND;
+ }
+}
+
+/*
+ * Default method for requesting A-MPDU tx aggregation.
+ * We setup the specified state block and start a timer
+ * to wait for an ADDBA response frame.
+ */
+static int
+ieee80211_addba_request(struct ieee80211_node *ni,
+ struct ieee80211_tx_ampdu *tap,
+ int dialogtoken, int baparamset, int batimeout)
+{
+ int bufsiz;
+
+ /* XXX locking */
+ tap->txa_token = dialogtoken;
+ tap->txa_flags |= IEEE80211_AGGR_IMMEDIATE;
+ bufsiz = MS(baparamset, IEEE80211_BAPS_BUFSIZ);
+ tap->txa_wnd = (bufsiz == 0) ?
+ IEEE80211_AGGR_BAWMAX : min(bufsiz, IEEE80211_AGGR_BAWMAX);
+ addba_start_timeout(tap);
+ return 1;
+}
+
+/*
+ * Default method for processing an A-MPDU tx aggregation
+ * response. We shutdown any pending timer and update the
+ * state block according to the reply.
+ */
+static int
+ieee80211_addba_response(struct ieee80211_node *ni,
+ struct ieee80211_tx_ampdu *tap,
+ int status, int baparamset, int batimeout)
+{
+ int bufsiz, tid;
+
+ /* XXX locking */
+ addba_stop_timeout(tap);
+ if (status == IEEE80211_STATUS_SUCCESS) {
+ bufsiz = MS(baparamset, IEEE80211_BAPS_BUFSIZ);
+ /* XXX override our request? */
+ tap->txa_wnd = (bufsiz == 0) ?
+ IEEE80211_AGGR_BAWMAX : min(bufsiz, IEEE80211_AGGR_BAWMAX);
+ /* XXX AC/TID */
+ tid = MS(baparamset, IEEE80211_BAPS_TID);
+ tap->txa_flags |= IEEE80211_AGGR_RUNNING;
+ tap->txa_attempts = 0;
+ } else {
+ /* mark tid so we don't try again */
+ tap->txa_flags |= IEEE80211_AGGR_NAK;
+ }
+ return 1;
+}
+
+/*
+ * Default method for stopping A-MPDU tx aggregation.
+ * Any timer is cleared and we drain any pending frames.
+ */
+static void
+ieee80211_addba_stop(struct ieee80211_node *ni, struct ieee80211_tx_ampdu *tap)
+{
+ /* XXX locking */
+ addba_stop_timeout(tap);
+ if (tap->txa_flags & IEEE80211_AGGR_RUNNING) {
+ /* XXX clear aggregation queue */
+ tap->txa_flags &= ~IEEE80211_AGGR_RUNNING;
+ }
+ tap->txa_attempts = 0;
+}
+
+/*
+ * Process a received action frame using the default aggregation
+ * policy. We intercept ADDBA-related frames and use them to
+ * update our aggregation state. All other frames are passed up
+ * for processing by ieee80211_recv_action.
+ */
+static int
+ht_recv_action_ba_addba_request(struct ieee80211_node *ni,
+ const struct ieee80211_frame *wh,
+ const uint8_t *frm, const uint8_t *efrm)
+{
+ struct ieee80211com *ic = ni->ni_ic;
+ struct ieee80211vap *vap = ni->ni_vap;
+ struct ieee80211_rx_ampdu *rap;
+ uint8_t dialogtoken;
+ uint16_t baparamset, batimeout, baseqctl;
+ uint16_t args[4];
+ int tid;
+
+ dialogtoken = frm[2];
+ baparamset = LE_READ_2(frm+3);
+ batimeout = LE_READ_2(frm+5);
+ baseqctl = LE_READ_2(frm+7);
+
+ tid = MS(baparamset, IEEE80211_BAPS_TID);
+
+ IEEE80211_NOTE(vap, IEEE80211_MSG_ACTION | IEEE80211_MSG_11N, ni,
+ "recv ADDBA request: dialogtoken %u baparamset 0x%x "
+ "(tid %d bufsiz %d) batimeout %d baseqctl %d:%d",
+ dialogtoken, baparamset,
+ tid, MS(baparamset, IEEE80211_BAPS_BUFSIZ),
+ batimeout,
+ MS(baseqctl, IEEE80211_BASEQ_START),
+ MS(baseqctl, IEEE80211_BASEQ_FRAG));
+
+ rap = &ni->ni_rx_ampdu[tid];
+
+ /* Send ADDBA response */
+ args[0] = dialogtoken;
+ /*
+ * NB: We ack only if the sta associated with HT and
+ * the ap is configured to do AMPDU rx (the latter
+ * violates the 11n spec and is mostly for testing).
+ */
+ if ((ni->ni_flags & IEEE80211_NODE_AMPDU_RX) &&
+ (vap->iv_flags_ht & IEEE80211_FHT_AMPDU_RX)) {
+ /* XXX handle ampdu_rx_start failure */
+ ic->ic_ampdu_rx_start(ni, rap,
+ baparamset, batimeout, baseqctl);
+
+ args[1] = IEEE80211_STATUS_SUCCESS;
+ } else {
+ IEEE80211_NOTE(vap, IEEE80211_MSG_ACTION | IEEE80211_MSG_11N,
+ ni, "reject ADDBA request: %s",
+ ni->ni_flags & IEEE80211_NODE_AMPDU_RX ?
+ "administratively disabled" :
+ "not negotiated for station");
+ vap->iv_stats.is_addba_reject++;
+ args[1] = IEEE80211_STATUS_UNSPECIFIED;
+ }
+ /* XXX honor rap flags? */
+ args[2] = IEEE80211_BAPS_POLICY_IMMEDIATE
+ | SM(tid, IEEE80211_BAPS_TID)
+ | SM(rap->rxa_wnd, IEEE80211_BAPS_BUFSIZ)
+ ;
+ args[3] = 0;
+ ic->ic_send_action(ni, IEEE80211_ACTION_CAT_BA,
+ IEEE80211_ACTION_BA_ADDBA_RESPONSE, args);
+ return 0;
+}
+
+static int
+ht_recv_action_ba_addba_response(struct ieee80211_node *ni,
+ const struct ieee80211_frame *wh,
+ const uint8_t *frm, const uint8_t *efrm)
+{
+ struct ieee80211com *ic = ni->ni_ic;
+ struct ieee80211vap *vap = ni->ni_vap;
+ struct ieee80211_tx_ampdu *tap;
+ uint8_t dialogtoken, policy;
+ uint16_t baparamset, batimeout, code;
+ int tid, ac, bufsiz;
+
+ dialogtoken = frm[2];
+ code = LE_READ_2(frm+3);
+ baparamset = LE_READ_2(frm+5);
+ tid = MS(baparamset, IEEE80211_BAPS_TID);
+ bufsiz = MS(baparamset, IEEE80211_BAPS_BUFSIZ);
+ policy = MS(baparamset, IEEE80211_BAPS_POLICY);
+ batimeout = LE_READ_2(frm+7);
+
+ ac = TID_TO_WME_AC(tid);
+ tap = &ni->ni_tx_ampdu[ac];
+ if ((tap->txa_flags & IEEE80211_AGGR_XCHGPEND) == 0) {
+ IEEE80211_DISCARD_MAC(vap,
+ IEEE80211_MSG_ACTION | IEEE80211_MSG_11N,
+ ni->ni_macaddr, "ADDBA response",
+ "no pending ADDBA, tid %d dialogtoken %u "
+ "code %d", tid, dialogtoken, code);
+ vap->iv_stats.is_addba_norequest++;
+ return 0;
+ }
+ if (dialogtoken != tap->txa_token) {
+ IEEE80211_DISCARD_MAC(vap,
+ IEEE80211_MSG_ACTION | IEEE80211_MSG_11N,
+ ni->ni_macaddr, "ADDBA response",
+ "dialogtoken mismatch: waiting for %d, "
+ "received %d, tid %d code %d",
+ tap->txa_token, dialogtoken, tid, code);
+ vap->iv_stats.is_addba_badtoken++;
+ return 0;
+ }
+ /* NB: assumes IEEE80211_AGGR_IMMEDIATE is 1 */
+ if (policy != (tap->txa_flags & IEEE80211_AGGR_IMMEDIATE)) {
+ IEEE80211_DISCARD_MAC(vap,
+ IEEE80211_MSG_ACTION | IEEE80211_MSG_11N,
+ ni->ni_macaddr, "ADDBA response",
+ "policy mismatch: expecting %s, "
+ "received %s, tid %d code %d",
+ tap->txa_flags & IEEE80211_AGGR_IMMEDIATE,
+ policy, tid, code);
+ vap->iv_stats.is_addba_badpolicy++;
+ return 0;
+ }
+#if 0
+ /* XXX we take MIN in ieee80211_addba_response */
+ if (bufsiz > IEEE80211_AGGR_BAWMAX) {
+ IEEE80211_DISCARD_MAC(vap,
+ IEEE80211_MSG_ACTION | IEEE80211_MSG_11N,
+ ni->ni_macaddr, "ADDBA response",
+ "BA window too large: max %d, "
+ "received %d, tid %d code %d",
+ bufsiz, IEEE80211_AGGR_BAWMAX, tid, code);
+ vap->iv_stats.is_addba_badbawinsize++;
+ return 0;
+ }
+#endif
+ IEEE80211_NOTE(vap, IEEE80211_MSG_ACTION | IEEE80211_MSG_11N, ni,
+ "recv ADDBA response: dialogtoken %u code %d "
+ "baparamset 0x%x (tid %d bufsiz %d) batimeout %d",
+ dialogtoken, code, baparamset, tid, bufsiz,
+ batimeout);
+ ic->ic_addba_response(ni, tap, code, baparamset, batimeout);
+ return 0;
+}
+
+static int
+ht_recv_action_ba_delba(struct ieee80211_node *ni,
+ const struct ieee80211_frame *wh,
+ const uint8_t *frm, const uint8_t *efrm)
+{
+ struct ieee80211com *ic = ni->ni_ic;
+ struct ieee80211_rx_ampdu *rap;
+ struct ieee80211_tx_ampdu *tap;
+ uint16_t baparamset, code;
+ int tid, ac;
+
+ baparamset = LE_READ_2(frm+2);
+ code = LE_READ_2(frm+4);
+
+ tid = MS(baparamset, IEEE80211_DELBAPS_TID);
+
+ IEEE80211_NOTE(ni->ni_vap, IEEE80211_MSG_ACTION | IEEE80211_MSG_11N, ni,
+ "recv DELBA: baparamset 0x%x (tid %d initiator %d) "
+ "code %d", baparamset, tid,
+ MS(baparamset, IEEE80211_DELBAPS_INIT), code);
+
+ if ((baparamset & IEEE80211_DELBAPS_INIT) == 0) {
+ ac = TID_TO_WME_AC(tid);
+ tap = &ni->ni_tx_ampdu[ac];
+ ic->ic_addba_stop(ni, tap);
+ } else {
+ rap = &ni->ni_rx_ampdu[tid];
+ ic->ic_ampdu_rx_stop(ni, rap);
+ }
+ return 0;
+}
+
+static int
+ht_recv_action_ht_txchwidth(struct ieee80211_node *ni,
+ const struct ieee80211_frame *wh,
+ const uint8_t *frm, const uint8_t *efrm)
+{
+ int chw;
+
+ chw = (frm[2] == IEEE80211_A_HT_TXCHWIDTH_2040) ? 40 : 20;
+
+ IEEE80211_NOTE(ni->ni_vap, IEEE80211_MSG_ACTION | IEEE80211_MSG_11N, ni,
+ "%s: HT txchwidth, width %d%s",
+ __func__, chw, ni->ni_chw != chw ? "*" : "");
+ if (chw != ni->ni_chw) {
+ ni->ni_chw = chw;
+ /* XXX notify on change */
+ }
+ return 0;
+}
+
+static int
+ht_recv_action_ht_mimopwrsave(struct ieee80211_node *ni,
+ const struct ieee80211_frame *wh,
+ const uint8_t *frm, const uint8_t *efrm)
+{
+ const struct ieee80211_action_ht_mimopowersave *mps =
+ (const struct ieee80211_action_ht_mimopowersave *) frm;
+
+ /* XXX check iv_htcaps */
+ if (mps->am_control & IEEE80211_A_HT_MIMOPWRSAVE_ENA)
+ ni->ni_flags |= IEEE80211_NODE_MIMO_PS;
+ else
+ ni->ni_flags &= ~IEEE80211_NODE_MIMO_PS;
+ if (mps->am_control & IEEE80211_A_HT_MIMOPWRSAVE_MODE)
+ ni->ni_flags |= IEEE80211_NODE_MIMO_RTS;
+ else
+ ni->ni_flags &= ~IEEE80211_NODE_MIMO_RTS;
+ /* XXX notify on change */
+ IEEE80211_NOTE(ni->ni_vap, IEEE80211_MSG_ACTION | IEEE80211_MSG_11N, ni,
+ "%s: HT MIMO PS (%s%s)", __func__,
+ (ni->ni_flags & IEEE80211_NODE_MIMO_PS) ? "on" : "off",
+ (ni->ni_flags & IEEE80211_NODE_MIMO_RTS) ? "+rts" : ""
+ );
+ return 0;
+}
+
+/*
+ * Transmit processing.
+ */
+
+/*
+ * Check if A-MPDU should be requested/enabled for a stream.
+ * We require a traffic rate above a per-AC threshold and we
+ * also handle backoff from previous failed attempts.
+ *
+ * Drivers may override this method to bring in information
+ * such as link state conditions in making the decision.
+ */
+static int
+ieee80211_ampdu_enable(struct ieee80211_node *ni,
+ struct ieee80211_tx_ampdu *tap)
+{
+ struct ieee80211vap *vap = ni->ni_vap;
+
+ if (tap->txa_avgpps < vap->iv_ampdu_mintraffic[tap->txa_ac])
+ return 0;
+ /* XXX check rssi? */
+ if (tap->txa_attempts >= ieee80211_addba_maxtries &&
+ ticks < tap->txa_nextrequest) {
+ /*
+ * Don't retry too often; txa_nextrequest is set
+ * to the minimum interval we'll retry after
+ * ieee80211_addba_maxtries failed attempts are made.
+ */
+ return 0;
+ }
+ IEEE80211_NOTE(vap, IEEE80211_MSG_11N, ni,
+ "enable AMPDU on %s, avgpps %d pkts %d",
+ ieee80211_wme_acnames[tap->txa_ac], tap->txa_avgpps, tap->txa_pkts);
+ return 1;
+}
+
+/*
+ * Request A-MPDU tx aggregation. Setup local state and
+ * issue an ADDBA request. BA use will only happen after
+ * the other end replies with ADDBA response.
+ */
+int
+ieee80211_ampdu_request(struct ieee80211_node *ni,
+ struct ieee80211_tx_ampdu *tap)
+{
+ struct ieee80211com *ic = ni->ni_ic;
+ uint16_t args[4];
+ int tid, dialogtoken;
+ static int tokens = 0; /* XXX */
+
+ /* XXX locking */
+ if ((tap->txa_flags & IEEE80211_AGGR_SETUP) == 0) {
+ /* do deferred setup of state */
+ ampdu_tx_setup(tap);
+ }
+ /* XXX hack for not doing proper locking */
+ tap->txa_flags &= ~IEEE80211_AGGR_NAK;
+
+ dialogtoken = (tokens+1) % 63; /* XXX */
+ tid = WME_AC_TO_TID(tap->txa_ac);
+ tap->txa_start = ni->ni_txseqs[tid];
+
+ args[0] = dialogtoken;
+ args[1] = IEEE80211_BAPS_POLICY_IMMEDIATE
+ | SM(tid, IEEE80211_BAPS_TID)
+ | SM(IEEE80211_AGGR_BAWMAX, IEEE80211_BAPS_BUFSIZ)
+ ;
+ args[2] = 0; /* batimeout */
+ /* NB: do first so there's no race against reply */
+ if (!ic->ic_addba_request(ni, tap, dialogtoken, args[1], args[2])) {
+ /* unable to setup state, don't make request */
+ IEEE80211_NOTE(ni->ni_vap, IEEE80211_MSG_11N,
+ ni, "%s: could not setup BA stream for AC %d",
+ __func__, tap->txa_ac);
+ /* defer next try so we don't slam the driver with requests */
+ tap->txa_attempts = ieee80211_addba_maxtries;
+ /* NB: check in case driver wants to override */
+ if (tap->txa_nextrequest <= ticks)
+ tap->txa_nextrequest = ticks + ieee80211_addba_backoff;
+ return 0;
+ }
+ tokens = dialogtoken; /* allocate token */
+ /* NB: after calling ic_addba_request so driver can set txa_start */
+ args[3] = SM(tap->txa_start, IEEE80211_BASEQ_START)
+ | SM(0, IEEE80211_BASEQ_FRAG)
+ ;
+ return ic->ic_send_action(ni, IEEE80211_ACTION_CAT_BA,
+ IEEE80211_ACTION_BA_ADDBA_REQUEST, args);
+}
+
+/*
+ * Terminate an AMPDU tx stream. State is reclaimed
+ * and the peer notified with a DelBA Action frame.
+ */
+void
+ieee80211_ampdu_stop(struct ieee80211_node *ni, struct ieee80211_tx_ampdu *tap,
+ int reason)
+{
+ struct ieee80211com *ic = ni->ni_ic;
+ struct ieee80211vap *vap = ni->ni_vap;
+ uint16_t args[4];
+
+ /* XXX locking */
+ tap->txa_flags &= ~IEEE80211_AGGR_BARPEND;
+ if (IEEE80211_AMPDU_RUNNING(tap)) {
+ IEEE80211_NOTE(vap, IEEE80211_MSG_ACTION | IEEE80211_MSG_11N,
+ ni, "%s: stop BA stream for AC %d (reason %d)",
+ __func__, tap->txa_ac, reason);
+ vap->iv_stats.is_ampdu_stop++;
+
+ ic->ic_addba_stop(ni, tap);
+ args[0] = WME_AC_TO_TID(tap->txa_ac);
+ args[1] = IEEE80211_DELBAPS_INIT;
+ args[2] = reason; /* XXX reason code */
+ ic->ic_send_action(ni, IEEE80211_ACTION_CAT_BA,
+ IEEE80211_ACTION_BA_DELBA, args);
+ } else {
+ IEEE80211_NOTE(vap, IEEE80211_MSG_ACTION | IEEE80211_MSG_11N,
+ ni, "%s: BA stream for AC %d not running (reason %d)",
+ __func__, tap->txa_ac, reason);
+ vap->iv_stats.is_ampdu_stop_failed++;
+ }
+}
+
+static void
+bar_timeout(void *arg)
+{
+ struct ieee80211_tx_ampdu *tap = arg;
+ struct ieee80211_node *ni = tap->txa_ni;
+
+ KASSERT((tap->txa_flags & IEEE80211_AGGR_XCHGPEND) == 0,
+ ("bar/addba collision, flags 0x%x", tap->txa_flags));
+
+ IEEE80211_NOTE(ni->ni_vap, IEEE80211_MSG_11N,
+ ni, "%s: tid %u flags 0x%x attempts %d", __func__,
+ tap->txa_ac, tap->txa_flags, tap->txa_attempts);
+
+ /* guard against race with bar_tx_complete */
+ if ((tap->txa_flags & IEEE80211_AGGR_BARPEND) == 0)
+ return;
+ /* XXX ? */
+ if (tap->txa_attempts >= ieee80211_bar_maxtries)
+ ieee80211_ampdu_stop(ni, tap, IEEE80211_REASON_TIMEOUT);
+ else
+ ieee80211_send_bar(ni, tap, tap->txa_seqpending);
+}
+
+static void
+bar_start_timer(struct ieee80211_tx_ampdu *tap)
+{
+ callout_reset(&tap->txa_timer, ieee80211_bar_timeout, bar_timeout, tap);
+}
+
+static void
+bar_stop_timer(struct ieee80211_tx_ampdu *tap)
+{
+ callout_stop(&tap->txa_timer);
+}
+
+static void
+bar_tx_complete(struct ieee80211_node *ni, void *arg, int status)
+{
+ struct ieee80211_tx_ampdu *tap = arg;
+
+ IEEE80211_NOTE(ni->ni_vap, IEEE80211_MSG_11N,
+ ni, "%s: tid %u flags 0x%x pending %d status %d",
+ __func__, tap->txa_ac, tap->txa_flags,
+ callout_pending(&tap->txa_timer), status);
+
+ /* XXX locking */
+ if ((tap->txa_flags & IEEE80211_AGGR_BARPEND) &&
+ callout_pending(&tap->txa_timer)) {
+ struct ieee80211com *ic = ni->ni_ic;
+
+ if (status) /* ACK'd */
+ bar_stop_timer(tap);
+ ic->ic_bar_response(ni, tap, status);
+ /* NB: just let timer expire so we pace requests */
+ }
+}
+
+static void
+ieee80211_bar_response(struct ieee80211_node *ni,
+ struct ieee80211_tx_ampdu *tap, int status)
+{
+
+ if (status != 0) { /* got ACK */
+ IEEE80211_NOTE(ni->ni_vap, IEEE80211_MSG_11N,
+ ni, "BAR moves BA win <%u:%u> (%u frames) txseq %u tid %u",
+ tap->txa_start,
+ IEEE80211_SEQ_ADD(tap->txa_start, tap->txa_wnd-1),
+ tap->txa_qframes, tap->txa_seqpending,
+ WME_AC_TO_TID(tap->txa_ac));
+
+ /* NB: timer already stopped in bar_tx_complete */
+ tap->txa_start = tap->txa_seqpending;
+ tap->txa_flags &= ~IEEE80211_AGGR_BARPEND;
+ }
+}
+
+/*
+ * Transmit a BAR frame to the specified node. The
+ * BAR contents are drawn from the supplied aggregation
+ * state associated with the node.
+ *
+ * NB: we only handle immediate ACK w/ compressed bitmap.
+ */
+int
+ieee80211_send_bar(struct ieee80211_node *ni,
+ struct ieee80211_tx_ampdu *tap, ieee80211_seq seq)
+{
+#define senderr(_x, _v) do { vap->iv_stats._v++; ret = _x; goto bad; } while (0)
+ struct ieee80211vap *vap = ni->ni_vap;
+ struct ieee80211com *ic = ni->ni_ic;
+ struct ieee80211_frame_bar *bar;
+ struct mbuf *m;
+ uint16_t barctl, barseqctl;
+ uint8_t *frm;
+ int tid, ret;
+
+ if ((tap->txa_flags & IEEE80211_AGGR_RUNNING) == 0) {
+ /* no ADDBA response, should not happen */
+ /* XXX stat+msg */
+ return EINVAL;
+ }
+ /* XXX locking */
+ bar_stop_timer(tap);
+
+ ieee80211_ref_node(ni);
+
+ m = ieee80211_getmgtframe(&frm, ic->ic_headroom, sizeof(*bar));
+ if (m == NULL)
+ senderr(ENOMEM, is_tx_nobuf);
+
+ if (!ieee80211_add_callback(m, bar_tx_complete, tap)) {
+ m_freem(m);
+ senderr(ENOMEM, is_tx_nobuf); /* XXX */
+ /* NOTREACHED */
+ }
+
+ bar = mtod(m, struct ieee80211_frame_bar *);
+ bar->i_fc[0] = IEEE80211_FC0_VERSION_0 |
+ IEEE80211_FC0_TYPE_CTL | IEEE80211_FC0_SUBTYPE_BAR;
+ bar->i_fc[1] = 0;
+ IEEE80211_ADDR_COPY(bar->i_ra, ni->ni_macaddr);
+ IEEE80211_ADDR_COPY(bar->i_ta, vap->iv_myaddr);
+
+ tid = WME_AC_TO_TID(tap->txa_ac);
+ barctl = (tap->txa_flags & IEEE80211_AGGR_IMMEDIATE ?
+ 0 : IEEE80211_BAR_NOACK)
+ | IEEE80211_BAR_COMP
+ | SM(tid, IEEE80211_BAR_TID)
+ ;
+ barseqctl = SM(seq, IEEE80211_BAR_SEQ_START);
+ /* NB: known to have proper alignment */
+ bar->i_ctl = htole16(barctl);
+ bar->i_seq = htole16(barseqctl);
+ m->m_pkthdr.len = m->m_len = sizeof(struct ieee80211_frame_bar);
+
+ M_WME_SETAC(m, WME_AC_VO);
+
+ IEEE80211_NODE_STAT(ni, tx_mgmt); /* XXX tx_ctl? */
+
+ /* XXX locking */
+ /* init/bump attempts counter */
+ if ((tap->txa_flags & IEEE80211_AGGR_BARPEND) == 0)
+ tap->txa_attempts = 1;
+ else
+ tap->txa_attempts++;
+ tap->txa_seqpending = seq;
+ tap->txa_flags |= IEEE80211_AGGR_BARPEND;
+
+ IEEE80211_NOTE(vap, IEEE80211_MSG_DEBUG | IEEE80211_MSG_11N,
+ ni, "send BAR: tid %u ctl 0x%x start %u (attempt %d)",
+ tid, barctl, seq, tap->txa_attempts);
+
+ ret = ic->ic_raw_xmit(ni, m, NULL);
+ if (ret != 0) {
+ /* xmit failed, clear state flag */
+ tap->txa_flags &= ~IEEE80211_AGGR_BARPEND;
+ goto bad;
+ }
+ /* XXX hack against tx complete happening before timer is started */
+ if (tap->txa_flags & IEEE80211_AGGR_BARPEND)
+ bar_start_timer(tap);
+ return 0;
+bad:
+ ieee80211_free_node(ni);
+ return ret;
+#undef senderr
+}
+
+static int
+ht_action_output(struct ieee80211_node *ni, struct mbuf *m)
+{
+ struct ieee80211_bpf_params params;
+
+ memset(&params, 0, sizeof(params));
+ params.ibp_pri = WME_AC_VO;
+ params.ibp_rate0 = ni->ni_txparms->mgmtrate;
+ /* NB: we know all frames are unicast */
+ params.ibp_try0 = ni->ni_txparms->maxretry;
+ params.ibp_power = ni->ni_txpower;
+ return ieee80211_mgmt_output(ni, m, IEEE80211_FC0_SUBTYPE_ACTION,
+ &params);
+}
+
+#define ADDSHORT(frm, v) do { \
+ frm[0] = (v) & 0xff; \
+ frm[1] = (v) >> 8; \
+ frm += 2; \
+} while (0)
+
+/*
+ * Send an action management frame. The arguments are stuff
+ * into a frame without inspection; the caller is assumed to
+ * prepare them carefully (e.g. based on the aggregation state).
+ */
+static int
+ht_send_action_ba_addba(struct ieee80211_node *ni,
+ int category, int action, void *arg0)
+{
+ struct ieee80211vap *vap = ni->ni_vap;
+ struct ieee80211com *ic = ni->ni_ic;
+ uint16_t *args = arg0;
+ struct mbuf *m;
+ uint8_t *frm;
+
+ IEEE80211_NOTE(vap, IEEE80211_MSG_ACTION | IEEE80211_MSG_11N, ni,
+ "send ADDBA %s: dialogtoken %d "
+ "baparamset 0x%x (tid %d) batimeout 0x%x baseqctl 0x%x",
+ (action == IEEE80211_ACTION_BA_ADDBA_REQUEST) ?
+ "request" : "response",
+ args[0], args[1], MS(args[1], IEEE80211_BAPS_TID),
+ args[2], args[3]);
+
+ IEEE80211_DPRINTF(vap, IEEE80211_MSG_NODE,
+ "ieee80211_ref_node (%s:%u) %p<%s> refcnt %d\n", __func__, __LINE__,
+ ni, ether_sprintf(ni->ni_macaddr), ieee80211_node_refcnt(ni)+1);
+ ieee80211_ref_node(ni);
+
+ m = ieee80211_getmgtframe(&frm,
+ ic->ic_headroom + sizeof(struct ieee80211_frame),
+ sizeof(uint16_t) /* action+category */
+ /* XXX may action payload */
+ + sizeof(struct ieee80211_action_ba_addbaresponse)
+ );
+ if (m != NULL) {
+ *frm++ = category;
+ *frm++ = action;
+ *frm++ = args[0]; /* dialog token */
+ ADDSHORT(frm, args[1]); /* baparamset */
+ ADDSHORT(frm, args[2]); /* batimeout */
+ if (action == IEEE80211_ACTION_BA_ADDBA_REQUEST)
+ ADDSHORT(frm, args[3]); /* baseqctl */
+ m->m_pkthdr.len = m->m_len = frm - mtod(m, uint8_t *);
+ return ht_action_output(ni, m);
+ } else {
+ vap->iv_stats.is_tx_nobuf++;
+ ieee80211_free_node(ni);
+ return ENOMEM;
+ }
+}
+
+static int
+ht_send_action_ba_delba(struct ieee80211_node *ni,
+ int category, int action, void *arg0)
+{
+ struct ieee80211vap *vap = ni->ni_vap;
+ struct ieee80211com *ic = ni->ni_ic;
+ uint16_t *args = arg0;
+ struct mbuf *m;
+ uint16_t baparamset;
+ uint8_t *frm;
+
+ baparamset = SM(args[0], IEEE80211_DELBAPS_TID)
+ | args[1]
+ ;
+ IEEE80211_NOTE(vap, IEEE80211_MSG_ACTION | IEEE80211_MSG_11N, ni,
+ "send DELBA action: tid %d, initiator %d reason %d",
+ args[0], args[1], args[2]);
+
+ IEEE80211_DPRINTF(vap, IEEE80211_MSG_NODE,
+ "ieee80211_ref_node (%s:%u) %p<%s> refcnt %d\n", __func__, __LINE__,
+ ni, ether_sprintf(ni->ni_macaddr), ieee80211_node_refcnt(ni)+1);
+ ieee80211_ref_node(ni);
+
+ m = ieee80211_getmgtframe(&frm,
+ ic->ic_headroom + sizeof(struct ieee80211_frame),
+ sizeof(uint16_t) /* action+category */
+ /* XXX may action payload */
+ + sizeof(struct ieee80211_action_ba_addbaresponse)
+ );
+ if (m != NULL) {
+ *frm++ = category;
+ *frm++ = action;
+ ADDSHORT(frm, baparamset);
+ ADDSHORT(frm, args[2]); /* reason code */
+ m->m_pkthdr.len = m->m_len = frm - mtod(m, uint8_t *);
+ return ht_action_output(ni, m);
+ } else {
+ vap->iv_stats.is_tx_nobuf++;
+ ieee80211_free_node(ni);
+ return ENOMEM;
+ }
+}
+
+static int
+ht_send_action_ht_txchwidth(struct ieee80211_node *ni,
+ int category, int action, void *arg0)
+{
+ struct ieee80211vap *vap = ni->ni_vap;
+ struct ieee80211com *ic = ni->ni_ic;
+ struct mbuf *m;
+ uint8_t *frm;
+
+ IEEE80211_NOTE(vap, IEEE80211_MSG_ACTION | IEEE80211_MSG_11N, ni,
+ "send HT txchwidth: width %d",
+ IEEE80211_IS_CHAN_HT40(ni->ni_chan) ? 40 : 20);
+
+ IEEE80211_DPRINTF(vap, IEEE80211_MSG_NODE,
+ "ieee80211_ref_node (%s:%u) %p<%s> refcnt %d\n", __func__, __LINE__,
+ ni, ether_sprintf(ni->ni_macaddr), ieee80211_node_refcnt(ni)+1);
+ ieee80211_ref_node(ni);
+
+ m = ieee80211_getmgtframe(&frm,
+ ic->ic_headroom + sizeof(struct ieee80211_frame),
+ sizeof(uint16_t) /* action+category */
+ /* XXX may action payload */
+ + sizeof(struct ieee80211_action_ba_addbaresponse)
+ );
+ if (m != NULL) {
+ *frm++ = category;
+ *frm++ = action;
+ *frm++ = IEEE80211_IS_CHAN_HT40(ni->ni_chan) ?
+ IEEE80211_A_HT_TXCHWIDTH_2040 :
+ IEEE80211_A_HT_TXCHWIDTH_20;
+ m->m_pkthdr.len = m->m_len = frm - mtod(m, uint8_t *);
+ return ht_action_output(ni, m);
+ } else {
+ vap->iv_stats.is_tx_nobuf++;
+ ieee80211_free_node(ni);
+ return ENOMEM;
+ }
+}
+#undef ADDSHORT
+
+/*
+ * Construct the MCS bit mask for inclusion
+ * in an HT information element.
+ */
+static void
+ieee80211_set_htrates(uint8_t *frm, const struct ieee80211_htrateset *rs)
+{
+ int i;
+
+ for (i = 0; i < rs->rs_nrates; i++) {
+ int r = rs->rs_rates[i] & IEEE80211_RATE_VAL;
+ if (r < IEEE80211_HTRATE_MAXSIZE) { /* XXX? */
+ /* NB: this assumes a particular implementation */
+ setbit(frm, r);
+ }
+ }
+}
+
+/*
+ * Add body of an HTCAP information element.
+ */
+static uint8_t *
+ieee80211_add_htcap_body(uint8_t *frm, struct ieee80211_node *ni)
+{
+#define ADDSHORT(frm, v) do { \
+ frm[0] = (v) & 0xff; \
+ frm[1] = (v) >> 8; \
+ frm += 2; \
+} while (0)
+ struct ieee80211vap *vap = ni->ni_vap;
+ uint16_t caps;
+ int rxmax, density;
+
+ /* HT capabilities */
+ caps = vap->iv_htcaps & 0xffff;
+ /*
+ * Note channel width depends on whether we are operating as
+ * a sta or not. When operating as a sta we are generating
+ * a request based on our desired configuration. Otherwise
+ * we are operational and the channel attributes identify
+ * how we've been setup (which might be different if a fixed
+ * channel is specified).
+ */
+ if (vap->iv_opmode == IEEE80211_M_STA) {
+ /* override 20/40 use based on config */
+ if (vap->iv_flags_ht & IEEE80211_FHT_USEHT40)
+ caps |= IEEE80211_HTCAP_CHWIDTH40;
+ else
+ caps &= ~IEEE80211_HTCAP_CHWIDTH40;
+ /* use advertised setting (XXX locally constraint) */
+ rxmax = MS(ni->ni_htparam, IEEE80211_HTCAP_MAXRXAMPDU);
+ density = MS(ni->ni_htparam, IEEE80211_HTCAP_MPDUDENSITY);
+ } else {
+ /* override 20/40 use based on current channel */
+ if (IEEE80211_IS_CHAN_HT40(ni->ni_chan))
+ caps |= IEEE80211_HTCAP_CHWIDTH40;
+ else
+ caps &= ~IEEE80211_HTCAP_CHWIDTH40;
+ rxmax = vap->iv_ampdu_rxmax;
+ density = vap->iv_ampdu_density;
+ }
+ /* adjust short GI based on channel and config */
+ if ((vap->iv_flags_ht & IEEE80211_FHT_SHORTGI20) == 0)
+ caps &= ~IEEE80211_HTCAP_SHORTGI20;
+ if ((vap->iv_flags_ht & IEEE80211_FHT_SHORTGI40) == 0 ||
+ (caps & IEEE80211_HTCAP_CHWIDTH40) == 0)
+ caps &= ~IEEE80211_HTCAP_SHORTGI40;
+ ADDSHORT(frm, caps);
+
+ /* HT parameters */
+ *frm = SM(rxmax, IEEE80211_HTCAP_MAXRXAMPDU)
+ | SM(density, IEEE80211_HTCAP_MPDUDENSITY)
+ ;
+ frm++;
+
+ /* pre-zero remainder of ie */
+ memset(frm, 0, sizeof(struct ieee80211_ie_htcap) -
+ __offsetof(struct ieee80211_ie_htcap, hc_mcsset));
+
+ /* supported MCS set */
+ /*
+ * XXX it would better to get the rate set from ni_htrates
+ * so we can restrict it but for sta mode ni_htrates isn't
+ * setup when we're called to form an AssocReq frame so for
+ * now we're restricted to the default HT rate set.
+ */
+ ieee80211_set_htrates(frm, &ieee80211_rateset_11n);
+
+ frm += sizeof(struct ieee80211_ie_htcap) -
+ __offsetof(struct ieee80211_ie_htcap, hc_mcsset);
+ return frm;
+#undef ADDSHORT
+}
+
+/*
+ * Add 802.11n HT capabilities information element
+ */
+uint8_t *
+ieee80211_add_htcap(uint8_t *frm, struct ieee80211_node *ni)
+{
+ frm[0] = IEEE80211_ELEMID_HTCAP;
+ frm[1] = sizeof(struct ieee80211_ie_htcap) - 2;
+ return ieee80211_add_htcap_body(frm + 2, ni);
+}
+
+/*
+ * Add Broadcom OUI wrapped standard HTCAP ie; this is
+ * used for compatibility w/ pre-draft implementations.
+ */
+uint8_t *
+ieee80211_add_htcap_vendor(uint8_t *frm, struct ieee80211_node *ni)
+{
+ frm[0] = IEEE80211_ELEMID_VENDOR;
+ frm[1] = 4 + sizeof(struct ieee80211_ie_htcap) - 2;
+ frm[2] = (BCM_OUI >> 0) & 0xff;
+ frm[3] = (BCM_OUI >> 8) & 0xff;
+ frm[4] = (BCM_OUI >> 16) & 0xff;
+ frm[5] = BCM_OUI_HTCAP;
+ return ieee80211_add_htcap_body(frm + 6, ni);
+}
+
+/*
+ * Construct the MCS bit mask of basic rates
+ * for inclusion in an HT information element.
+ */
+static void
+ieee80211_set_basic_htrates(uint8_t *frm, const struct ieee80211_htrateset *rs)
+{
+ int i;
+
+ for (i = 0; i < rs->rs_nrates; i++) {
+ int r = rs->rs_rates[i] & IEEE80211_RATE_VAL;
+ if ((rs->rs_rates[i] & IEEE80211_RATE_BASIC) &&
+ r < IEEE80211_HTRATE_MAXSIZE) {
+ /* NB: this assumes a particular implementation */
+ setbit(frm, r);
+ }
+ }
+}
+
+/*
+ * Update the HTINFO ie for a beacon frame.
+ */
+void
+ieee80211_ht_update_beacon(struct ieee80211vap *vap,
+ struct ieee80211_beacon_offsets *bo)
+{
+#define PROTMODE (IEEE80211_HTINFO_OPMODE|IEEE80211_HTINFO_NONHT_PRESENT)
+ const struct ieee80211_channel *bsschan = vap->iv_bss->ni_chan;
+ struct ieee80211com *ic = vap->iv_ic;
+ struct ieee80211_ie_htinfo *ht =
+ (struct ieee80211_ie_htinfo *) bo->bo_htinfo;
+
+ /* XXX only update on channel change */
+ ht->hi_ctrlchannel = ieee80211_chan2ieee(ic, bsschan);
+ if (vap->iv_flags_ht & IEEE80211_FHT_RIFS)
+ ht->hi_byte1 = IEEE80211_HTINFO_RIFSMODE_PERM;
+ else
+ ht->hi_byte1 = IEEE80211_HTINFO_RIFSMODE_PROH;
+ if (IEEE80211_IS_CHAN_HT40U(bsschan))
+ ht->hi_byte1 |= IEEE80211_HTINFO_2NDCHAN_ABOVE;
+ else if (IEEE80211_IS_CHAN_HT40D(bsschan))
+ ht->hi_byte1 |= IEEE80211_HTINFO_2NDCHAN_BELOW;
+ else
+ ht->hi_byte1 |= IEEE80211_HTINFO_2NDCHAN_NONE;
+ if (IEEE80211_IS_CHAN_HT40(bsschan))
+ ht->hi_byte1 |= IEEE80211_HTINFO_TXWIDTH_2040;
+
+ /* protection mode */
+ ht->hi_byte2 = (ht->hi_byte2 &~ PROTMODE) | ic->ic_curhtprotmode;
+
+ /* XXX propagate to vendor ie's */
+#undef PROTMODE
+}
+
+/*
+ * Add body of an HTINFO information element.
+ *
+ * NB: We don't use struct ieee80211_ie_htinfo because we can
+ * be called to fillin both a standard ie and a compat ie that
+ * has a vendor OUI at the front.
+ */
+static uint8_t *
+ieee80211_add_htinfo_body(uint8_t *frm, struct ieee80211_node *ni)
+{
+ struct ieee80211vap *vap = ni->ni_vap;
+ struct ieee80211com *ic = ni->ni_ic;
+
+ /* pre-zero remainder of ie */
+ memset(frm, 0, sizeof(struct ieee80211_ie_htinfo) - 2);
+
+ /* primary/control channel center */
+ *frm++ = ieee80211_chan2ieee(ic, ni->ni_chan);
+
+ if (vap->iv_flags_ht & IEEE80211_FHT_RIFS)
+ frm[0] = IEEE80211_HTINFO_RIFSMODE_PERM;
+ else
+ frm[0] = IEEE80211_HTINFO_RIFSMODE_PROH;
+ if (IEEE80211_IS_CHAN_HT40U(ni->ni_chan))
+ frm[0] |= IEEE80211_HTINFO_2NDCHAN_ABOVE;
+ else if (IEEE80211_IS_CHAN_HT40D(ni->ni_chan))
+ frm[0] |= IEEE80211_HTINFO_2NDCHAN_BELOW;
+ else
+ frm[0] |= IEEE80211_HTINFO_2NDCHAN_NONE;
+ if (IEEE80211_IS_CHAN_HT40(ni->ni_chan))
+ frm[0] |= IEEE80211_HTINFO_TXWIDTH_2040;
+
+ frm[1] = ic->ic_curhtprotmode;
+
+ frm += 5;
+
+ /* basic MCS set */
+ ieee80211_set_basic_htrates(frm, &ni->ni_htrates);
+ frm += sizeof(struct ieee80211_ie_htinfo) -
+ __offsetof(struct ieee80211_ie_htinfo, hi_basicmcsset);
+ return frm;
+}
+
+/*
+ * Add 802.11n HT information information element.
+ */
+uint8_t *
+ieee80211_add_htinfo(uint8_t *frm, struct ieee80211_node *ni)
+{
+ frm[0] = IEEE80211_ELEMID_HTINFO;
+ frm[1] = sizeof(struct ieee80211_ie_htinfo) - 2;
+ return ieee80211_add_htinfo_body(frm + 2, ni);
+}
+
+/*
+ * Add Broadcom OUI wrapped standard HTINFO ie; this is
+ * used for compatibility w/ pre-draft implementations.
+ */
+uint8_t *
+ieee80211_add_htinfo_vendor(uint8_t *frm, struct ieee80211_node *ni)
+{
+ frm[0] = IEEE80211_ELEMID_VENDOR;
+ frm[1] = 4 + sizeof(struct ieee80211_ie_htinfo) - 2;
+ frm[2] = (BCM_OUI >> 0) & 0xff;
+ frm[3] = (BCM_OUI >> 8) & 0xff;
+ frm[4] = (BCM_OUI >> 16) & 0xff;
+ frm[5] = BCM_OUI_HTINFO;
+ return ieee80211_add_htinfo_body(frm + 6, ni);
+}
diff --git a/rtems/freebsd/net80211/ieee80211_ht.h b/rtems/freebsd/net80211/ieee80211_ht.h
new file mode 100644
index 00000000..552a4264
--- /dev/null
+++ b/rtems/freebsd/net80211/ieee80211_ht.h
@@ -0,0 +1,202 @@
+/*-
+ * Copyright (c) 2007-2008 Sam Leffler, Errno Consulting
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+#ifndef _NET80211_IEEE80211_HT_HH_
+#define _NET80211_IEEE80211_HT_HH_
+
+/*
+ * 802.11n protocol implementation definitions.
+ */
+
+#define IEEE80211_AGGR_BAWMAX 64 /* max block ack window size */
+/* threshold for aging overlapping non-HT bss */
+#define IEEE80211_NONHT_PRESENT_AGE msecs_to_ticks(60*1000)
+
+struct ieee80211_tx_ampdu {
+ struct ieee80211_node *txa_ni; /* back pointer */
+ u_short txa_flags;
+#define IEEE80211_AGGR_IMMEDIATE 0x0001 /* BA policy */
+#define IEEE80211_AGGR_XCHGPEND 0x0002 /* ADDBA response pending */
+#define IEEE80211_AGGR_RUNNING 0x0004 /* ADDBA response received */
+#define IEEE80211_AGGR_SETUP 0x0008 /* deferred state setup */
+#define IEEE80211_AGGR_NAK 0x0010 /* peer NAK'd ADDBA request */
+#define IEEE80211_AGGR_BARPEND 0x0020 /* BAR response pending */
+ uint8_t txa_ac;
+ uint8_t txa_token; /* dialog token */
+ int txa_lastsample; /* ticks @ last traffic sample */
+ int txa_pkts; /* packets over last sample interval */
+ int txa_avgpps; /* filtered traffic over window */
+ int txa_qbytes; /* data queued (bytes) */
+ short txa_qframes; /* data queued (frames) */
+ ieee80211_seq txa_start; /* BA window left edge */
+ ieee80211_seq txa_seqpending; /* new txa_start pending BAR response */
+ uint16_t txa_wnd; /* BA window size */
+ uint8_t txa_attempts; /* # ADDBA/BAR requests w/o a response*/
+ int txa_nextrequest;/* soonest to make next request */
+ struct callout txa_timer;
+ void *txa_private; /* driver-private storage */
+ uint64_t txa_pad[4];
+};
+
+/* return non-zero if AMPDU tx for the TID is running */
+#define IEEE80211_AMPDU_RUNNING(tap) \
+ (((tap)->txa_flags & IEEE80211_AGGR_RUNNING) != 0)
+
+/* return non-zero if AMPDU tx for the TID is running or started */
+#define IEEE80211_AMPDU_REQUESTED(tap) \
+ (((tap)->txa_flags & \
+ (IEEE80211_AGGR_RUNNING|IEEE80211_AGGR_XCHGPEND|IEEE80211_AGGR_NAK)) != 0)
+
+#define IEEE80211_AGGR_BITS \
+ "\20\1IMMEDIATE\2XCHGPEND\3RUNNING\4SETUP\5NAK"
+
+/*
+ * Traffic estimator support. We estimate packets/sec for
+ * each AC that is setup for AMPDU or will potentially be
+ * setup for AMPDU. The traffic rate can be used to decide
+ * when AMPDU should be setup (according to a threshold)
+ * and is available for drivers to do things like cache
+ * eviction when only a limited number of BA streams are
+ * available and more streams are requested than available.
+ */
+
+static __inline void
+ieee80211_txampdu_update_pps(struct ieee80211_tx_ampdu *tap)
+{
+ /* NB: scale factor of 2 was picked heuristically */
+ tap->txa_avgpps = ((tap->txa_avgpps << 2) -
+ tap->txa_avgpps + tap->txa_pkts) >> 2;
+}
+
+/*
+ * Count a packet towards the pps estimate.
+ */
+static __inline void
+ieee80211_txampdu_count_packet(struct ieee80211_tx_ampdu *tap)
+{
+ /* XXX bound loop/do more crude estimate? */
+ while (ticks - tap->txa_lastsample >= hz) {
+ ieee80211_txampdu_update_pps(tap);
+ /* reset to start new sample interval */
+ tap->txa_pkts = 0;
+ if (tap->txa_avgpps == 0) {
+ tap->txa_lastsample = ticks;
+ break;
+ }
+ tap->txa_lastsample += hz;
+ }
+ tap->txa_pkts++;
+}
+
+/*
+ * Get the current pps estimate. If the average is out of
+ * date due to lack of traffic then we decay the estimate
+ * to account for the idle time.
+ */
+static __inline int
+ieee80211_txampdu_getpps(struct ieee80211_tx_ampdu *tap)
+{
+ /* XXX bound loop/do more crude estimate? */
+ while (ticks - tap->txa_lastsample >= hz) {
+ ieee80211_txampdu_update_pps(tap);
+ tap->txa_pkts = 0;
+ if (tap->txa_avgpps == 0) {
+ tap->txa_lastsample = ticks;
+ break;
+ }
+ tap->txa_lastsample += hz;
+ }
+ return tap->txa_avgpps;
+}
+
+struct ieee80211_rx_ampdu {
+ int rxa_flags;
+ int rxa_qbytes; /* data queued (bytes) */
+ short rxa_qframes; /* data queued (frames) */
+ ieee80211_seq rxa_seqstart;
+ ieee80211_seq rxa_start; /* start of current BA window */
+ uint16_t rxa_wnd; /* BA window size */
+ int rxa_age; /* age of oldest frame in window */
+ int rxa_nframes; /* frames since ADDBA */
+ struct mbuf *rxa_m[IEEE80211_AGGR_BAWMAX];
+ uint64_t rxa_pad[4];
+};
+
+void ieee80211_ht_attach(struct ieee80211com *);
+void ieee80211_ht_detach(struct ieee80211com *);
+void ieee80211_ht_vattach(struct ieee80211vap *);
+void ieee80211_ht_vdetach(struct ieee80211vap *);
+
+void ieee80211_ht_announce(struct ieee80211com *);
+
+struct ieee80211_mcs_rates {
+ uint16_t ht20_rate_800ns;
+ uint16_t ht20_rate_400ns;
+ uint16_t ht40_rate_800ns;
+ uint16_t ht40_rate_400ns;
+};
+extern const struct ieee80211_mcs_rates ieee80211_htrates[16];
+const struct ieee80211_htrateset *ieee80211_get_suphtrates(
+ struct ieee80211com *, const struct ieee80211_channel *);
+
+struct ieee80211_node;
+int ieee80211_setup_htrates(struct ieee80211_node *,
+ const uint8_t *htcap, int flags);
+void ieee80211_setup_basic_htrates(struct ieee80211_node *,
+ const uint8_t *htinfo);
+struct mbuf *ieee80211_decap_amsdu(struct ieee80211_node *, struct mbuf *);
+int ieee80211_ampdu_reorder(struct ieee80211_node *, struct mbuf *);
+void ieee80211_recv_bar(struct ieee80211_node *, struct mbuf *);
+void ieee80211_ht_node_init(struct ieee80211_node *);
+void ieee80211_ht_node_cleanup(struct ieee80211_node *);
+void ieee80211_ht_node_age(struct ieee80211_node *);
+
+struct ieee80211_channel *ieee80211_ht_adjust_channel(struct ieee80211com *,
+ struct ieee80211_channel *, int);
+void ieee80211_ht_wds_init(struct ieee80211_node *);
+void ieee80211_ht_node_join(struct ieee80211_node *);
+void ieee80211_ht_node_leave(struct ieee80211_node *);
+void ieee80211_htprot_update(struct ieee80211com *, int protmode);
+void ieee80211_ht_timeout(struct ieee80211com *);
+void ieee80211_parse_htcap(struct ieee80211_node *, const uint8_t *);
+void ieee80211_parse_htinfo(struct ieee80211_node *, const uint8_t *);
+void ieee80211_ht_updateparams(struct ieee80211_node *, const uint8_t *,
+ const uint8_t *);
+void ieee80211_ht_updatehtcap(struct ieee80211_node *, const uint8_t *);
+int ieee80211_ampdu_request(struct ieee80211_node *,
+ struct ieee80211_tx_ampdu *);
+void ieee80211_ampdu_stop(struct ieee80211_node *,
+ struct ieee80211_tx_ampdu *, int);
+int ieee80211_send_bar(struct ieee80211_node *, struct ieee80211_tx_ampdu *,
+ ieee80211_seq);
+uint8_t *ieee80211_add_htcap(uint8_t *, struct ieee80211_node *);
+uint8_t *ieee80211_add_htcap_vendor(uint8_t *, struct ieee80211_node *);
+uint8_t *ieee80211_add_htinfo(uint8_t *, struct ieee80211_node *);
+uint8_t *ieee80211_add_htinfo_vendor(uint8_t *, struct ieee80211_node *);
+struct ieee80211_beacon_offsets;
+void ieee80211_ht_update_beacon(struct ieee80211vap *,
+ struct ieee80211_beacon_offsets *);
+#endif /* _NET80211_IEEE80211_HT_HH_ */
diff --git a/rtems/freebsd/net80211/ieee80211_hwmp.c b/rtems/freebsd/net80211/ieee80211_hwmp.c
new file mode 100644
index 00000000..f957071a
--- /dev/null
+++ b/rtems/freebsd/net80211/ieee80211_hwmp.c
@@ -0,0 +1,1440 @@
+#include <rtems/freebsd/machine/rtems-bsd-config.h>
+
+/*-
+ * Copyright (c) 2009 The FreeBSD Foundation
+ * All rights reserved.
+ *
+ * This software was developed by Rui Paulo under sponsorship from the
+ * FreeBSD Foundation.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+#include <rtems/freebsd/sys/cdefs.h>
+#ifdef __FreeBSD__
+__FBSDID("$FreeBSD$");
+#endif
+
+/*
+ * IEEE 802.11s Hybrid Wireless Mesh Protocol, HWMP.
+ *
+ * Based on March 2009, D3.0 802.11s draft spec.
+ */
+#include <rtems/freebsd/local/opt_inet.h>
+#include <rtems/freebsd/local/opt_wlan.h>
+
+#include <rtems/freebsd/sys/param.h>
+#include <rtems/freebsd/sys/systm.h>
+#include <rtems/freebsd/sys/mbuf.h>
+#include <rtems/freebsd/sys/malloc.h>
+#include <rtems/freebsd/sys/kernel.h>
+
+#include <rtems/freebsd/sys/socket.h>
+#include <rtems/freebsd/sys/sockio.h>
+#include <rtems/freebsd/sys/endian.h>
+#include <rtems/freebsd/sys/errno.h>
+#include <rtems/freebsd/sys/proc.h>
+#include <rtems/freebsd/sys/sysctl.h>
+
+#include <rtems/freebsd/net/if.h>
+#include <rtems/freebsd/net/if_media.h>
+#include <rtems/freebsd/net/if_llc.h>
+#include <rtems/freebsd/net/ethernet.h>
+
+#include <rtems/freebsd/net/bpf.h>
+
+#include <rtems/freebsd/net80211/ieee80211_var.h>
+#include <rtems/freebsd/net80211/ieee80211_action.h>
+#include <rtems/freebsd/net80211/ieee80211_input.h>
+#include <rtems/freebsd/net80211/ieee80211_mesh.h>
+
+static void hwmp_vattach(struct ieee80211vap *);
+static void hwmp_vdetach(struct ieee80211vap *);
+static int hwmp_newstate(struct ieee80211vap *,
+ enum ieee80211_state, int);
+static int hwmp_send_action(struct ieee80211_node *,
+ const uint8_t [IEEE80211_ADDR_LEN],
+ const uint8_t [IEEE80211_ADDR_LEN],
+ uint8_t *, size_t);
+static uint8_t * hwmp_add_meshpreq(uint8_t *,
+ const struct ieee80211_meshpreq_ie *);
+static uint8_t * hwmp_add_meshprep(uint8_t *,
+ const struct ieee80211_meshprep_ie *);
+static uint8_t * hwmp_add_meshperr(uint8_t *,
+ const struct ieee80211_meshperr_ie *);
+static uint8_t * hwmp_add_meshrann(uint8_t *,
+ const struct ieee80211_meshrann_ie *);
+static void hwmp_rootmode_setup(struct ieee80211vap *);
+static void hwmp_rootmode_cb(void *);
+static void hwmp_rootmode_rann_cb(void *);
+static void hwmp_recv_preq(struct ieee80211vap *, struct ieee80211_node *,
+ const struct ieee80211_frame *,
+ const struct ieee80211_meshpreq_ie *);
+static int hwmp_send_preq(struct ieee80211_node *,
+ const uint8_t [IEEE80211_ADDR_LEN],
+ const uint8_t [IEEE80211_ADDR_LEN],
+ struct ieee80211_meshpreq_ie *);
+static void hwmp_recv_prep(struct ieee80211vap *, struct ieee80211_node *,
+ const struct ieee80211_frame *,
+ const struct ieee80211_meshprep_ie *);
+static int hwmp_send_prep(struct ieee80211_node *,
+ const uint8_t [IEEE80211_ADDR_LEN],
+ const uint8_t [IEEE80211_ADDR_LEN],
+ struct ieee80211_meshprep_ie *);
+static void hwmp_recv_perr(struct ieee80211vap *, struct ieee80211_node *,
+ const struct ieee80211_frame *,
+ const struct ieee80211_meshperr_ie *);
+static int hwmp_send_perr(struct ieee80211_node *,
+ const uint8_t [IEEE80211_ADDR_LEN],
+ const uint8_t [IEEE80211_ADDR_LEN],
+ struct ieee80211_meshperr_ie *);
+static void hwmp_recv_rann(struct ieee80211vap *, struct ieee80211_node *,
+ const struct ieee80211_frame *,
+ const struct ieee80211_meshrann_ie *);
+static int hwmp_send_rann(struct ieee80211_node *,
+ const uint8_t [IEEE80211_ADDR_LEN],
+ const uint8_t [IEEE80211_ADDR_LEN],
+ struct ieee80211_meshrann_ie *);
+static struct ieee80211_node *
+ hwmp_discover(struct ieee80211vap *,
+ const uint8_t [IEEE80211_ADDR_LEN], struct mbuf *);
+static void hwmp_peerdown(struct ieee80211_node *);
+
+static struct timeval ieee80211_hwmp_preqminint = { 0, 100000 };
+static struct timeval ieee80211_hwmp_perrminint = { 0, 100000 };
+
+/* unalligned little endian access */
+#define LE_WRITE_2(p, v) do { \
+ ((uint8_t *)(p))[0] = (v) & 0xff; \
+ ((uint8_t *)(p))[1] = ((v) >> 8) & 0xff; \
+} while (0)
+#define LE_WRITE_4(p, v) do { \
+ ((uint8_t *)(p))[0] = (v) & 0xff; \
+ ((uint8_t *)(p))[1] = ((v) >> 8) & 0xff; \
+ ((uint8_t *)(p))[2] = ((v) >> 16) & 0xff; \
+ ((uint8_t *)(p))[3] = ((v) >> 24) & 0xff; \
+} while (0)
+
+
+/* NB: the Target Address set in a Proactive PREQ is the broadcast address. */
+static const uint8_t broadcastaddr[IEEE80211_ADDR_LEN] =
+ { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
+
+typedef uint32_t ieee80211_hwmp_seq;
+#define HWMP_SEQ_LT(a, b) ((int32_t)((a)-(b)) < 0)
+#define HWMP_SEQ_LEQ(a, b) ((int32_t)((a)-(b)) <= 0)
+#define HWMP_SEQ_GT(a, b) ((int32_t)((a)-(b)) > 0)
+#define HWMP_SEQ_GEQ(a, b) ((int32_t)((a)-(b)) >= 0)
+
+/*
+ * Private extension of ieee80211_mesh_route.
+ */
+struct ieee80211_hwmp_route {
+ ieee80211_hwmp_seq hr_seq; /* last HWMP seq seen from dst*/
+ ieee80211_hwmp_seq hr_preqid; /* last PREQ ID seen from dst */
+ ieee80211_hwmp_seq hr_origseq; /* seq. no. on our latest PREQ*/
+ int hr_preqretries;
+};
+struct ieee80211_hwmp_state {
+ ieee80211_hwmp_seq hs_seq; /* next seq to be used */
+ ieee80211_hwmp_seq hs_preqid; /* next PREQ ID to be used */
+ struct timeval hs_lastpreq; /* last time we sent a PREQ */
+ struct timeval hs_lastperr; /* last time we sent a PERR */
+ int hs_rootmode; /* proactive HWMP */
+ struct callout hs_roottimer;
+ uint8_t hs_maxhops; /* max hop count */
+};
+
+SYSCTL_NODE(_net_wlan, OID_AUTO, hwmp, CTLFLAG_RD, 0,
+ "IEEE 802.11s HWMP parameters");
+static int ieee80211_hwmp_targetonly = 0;
+SYSCTL_INT(_net_wlan_hwmp, OID_AUTO, targetonly, CTLTYPE_INT | CTLFLAG_RW,
+ &ieee80211_hwmp_targetonly, 0, "Set TO bit on generated PREQs");
+static int ieee80211_hwmp_replyforward = 1;
+SYSCTL_INT(_net_wlan_hwmp, OID_AUTO, replyforward, CTLTYPE_INT | CTLFLAG_RW,
+ &ieee80211_hwmp_replyforward, 0, "Set RF bit on generated PREQs");
+static int ieee80211_hwmp_pathtimeout = -1;
+SYSCTL_PROC(_net_wlan_hwmp, OID_AUTO, pathlifetime, CTLTYPE_INT | CTLFLAG_RW,
+ &ieee80211_hwmp_pathtimeout, 0, ieee80211_sysctl_msecs_ticks, "I",
+ "path entry lifetime (ms)");
+static int ieee80211_hwmp_roottimeout = -1;
+SYSCTL_PROC(_net_wlan_hwmp, OID_AUTO, roottimeout, CTLTYPE_INT | CTLFLAG_RW,
+ &ieee80211_hwmp_roottimeout, 0, ieee80211_sysctl_msecs_ticks, "I",
+ "root PREQ timeout (ms)");
+static int ieee80211_hwmp_rootint = -1;
+SYSCTL_PROC(_net_wlan_hwmp, OID_AUTO, rootint, CTLTYPE_INT | CTLFLAG_RW,
+ &ieee80211_hwmp_rootint, 0, ieee80211_sysctl_msecs_ticks, "I",
+ "root interval (ms)");
+static int ieee80211_hwmp_rannint = -1;
+SYSCTL_PROC(_net_wlan_hwmp, OID_AUTO, rannint, CTLTYPE_INT | CTLFLAG_RW,
+ &ieee80211_hwmp_rannint, 0, ieee80211_sysctl_msecs_ticks, "I",
+ "root announcement interval (ms)");
+
+#define IEEE80211_HWMP_DEFAULT_MAXHOPS 31
+
+static ieee80211_recv_action_func hwmp_recv_action_meshpath;
+
+static struct ieee80211_mesh_proto_path mesh_proto_hwmp = {
+ .mpp_descr = "HWMP",
+ .mpp_ie = IEEE80211_MESHCONF_PATH_HWMP,
+ .mpp_discover = hwmp_discover,
+ .mpp_peerdown = hwmp_peerdown,
+ .mpp_vattach = hwmp_vattach,
+ .mpp_vdetach = hwmp_vdetach,
+ .mpp_newstate = hwmp_newstate,
+ .mpp_privlen = sizeof(struct ieee80211_hwmp_route),
+};
+SYSCTL_PROC(_net_wlan_hwmp, OID_AUTO, inact, CTLTYPE_INT | CTLFLAG_RW,
+ &mesh_proto_hwmp.mpp_inact, 0, ieee80211_sysctl_msecs_ticks, "I",
+ "mesh route inactivity timeout (ms)");
+
+
+static void
+ieee80211_hwmp_init(void)
+{
+ ieee80211_hwmp_pathtimeout = msecs_to_ticks(5*1000);
+ ieee80211_hwmp_roottimeout = msecs_to_ticks(5*1000);
+ ieee80211_hwmp_rootint = msecs_to_ticks(2*1000);
+ ieee80211_hwmp_rannint = msecs_to_ticks(1*1000);
+
+ /*
+ * Register action frame handler.
+ */
+ ieee80211_recv_action_register(IEEE80211_ACTION_CAT_MESHPATH,
+ IEEE80211_ACTION_MESHPATH_SEL, hwmp_recv_action_meshpath);
+
+ /* NB: default is 5 secs per spec */
+ mesh_proto_hwmp.mpp_inact = msecs_to_ticks(5*1000);
+
+ /*
+ * Register HWMP.
+ */
+ ieee80211_mesh_register_proto_path(&mesh_proto_hwmp);
+}
+SYSINIT(wlan_hwmp, SI_SUB_DRIVERS, SI_ORDER_SECOND, ieee80211_hwmp_init, NULL);
+
+void
+hwmp_vattach(struct ieee80211vap *vap)
+{
+ struct ieee80211_hwmp_state *hs;
+
+ KASSERT(vap->iv_opmode == IEEE80211_M_MBSS,
+ ("not a mesh vap, opmode %d", vap->iv_opmode));
+
+ hs = malloc(sizeof(struct ieee80211_hwmp_state), M_80211_VAP,
+ M_NOWAIT | M_ZERO);
+ if (hs == NULL) {
+ printf("%s: couldn't alloc HWMP state\n", __func__);
+ return;
+ }
+ hs->hs_maxhops = IEEE80211_HWMP_DEFAULT_MAXHOPS;
+ callout_init(&hs->hs_roottimer, CALLOUT_MPSAFE);
+ vap->iv_hwmp = hs;
+}
+
+void
+hwmp_vdetach(struct ieee80211vap *vap)
+{
+ struct ieee80211_hwmp_state *hs = vap->iv_hwmp;
+
+ callout_drain(&hs->hs_roottimer);
+ free(vap->iv_hwmp, M_80211_VAP);
+ vap->iv_hwmp = NULL;
+}
+
+int
+hwmp_newstate(struct ieee80211vap *vap, enum ieee80211_state ostate, int arg)
+{
+ enum ieee80211_state nstate = vap->iv_state;
+ struct ieee80211_hwmp_state *hs = vap->iv_hwmp;
+
+ IEEE80211_DPRINTF(vap, IEEE80211_MSG_STATE, "%s: %s -> %s (%d)\n",
+ __func__, ieee80211_state_name[ostate],
+ ieee80211_state_name[nstate], arg);
+
+ if (nstate != IEEE80211_S_RUN && ostate == IEEE80211_S_RUN)
+ callout_drain(&hs->hs_roottimer);
+ if (nstate == IEEE80211_S_RUN)
+ hwmp_rootmode_setup(vap);
+ return 0;
+}
+
+static int
+hwmp_recv_action_meshpath(struct ieee80211_node *ni,
+ const struct ieee80211_frame *wh,
+ const uint8_t *frm, const uint8_t *efrm)
+{
+ struct ieee80211vap *vap = ni->ni_vap;
+ struct ieee80211_meshpreq_ie preq;
+ struct ieee80211_meshprep_ie prep;
+ struct ieee80211_meshperr_ie perr;
+ struct ieee80211_meshrann_ie rann;
+ const uint8_t *iefrm = frm + 2; /* action + code */
+ int found = 0;
+
+ while (efrm - iefrm > 1) {
+ IEEE80211_VERIFY_LENGTH(efrm - iefrm, iefrm[1] + 2, return 0);
+ switch (*iefrm) {
+ case IEEE80211_ELEMID_MESHPREQ:
+ {
+ const struct ieee80211_meshpreq_ie *mpreq =
+ (const struct ieee80211_meshpreq_ie *) iefrm;
+ /* XXX > 1 target */
+ if (mpreq->preq_len !=
+ sizeof(struct ieee80211_meshpreq_ie) - 2) {
+ IEEE80211_DISCARD(vap,
+ IEEE80211_MSG_ACTION | IEEE80211_MSG_HWMP,
+ wh, NULL, "%s", "PREQ with wrong len");
+ vap->iv_stats.is_rx_mgtdiscard++;
+ break;
+ }
+ memcpy(&preq, mpreq, sizeof(preq));
+ preq.preq_id = LE_READ_4(&mpreq->preq_id);
+ preq.preq_origseq = LE_READ_4(&mpreq->preq_origseq);
+ preq.preq_lifetime = LE_READ_4(&mpreq->preq_lifetime);
+ preq.preq_metric = LE_READ_4(&mpreq->preq_metric);
+ preq.preq_targets[0].target_seq =
+ LE_READ_4(&mpreq->preq_targets[0].target_seq);
+ hwmp_recv_preq(vap, ni, wh, &preq);
+ found++;
+ break;
+ }
+ case IEEE80211_ELEMID_MESHPREP:
+ {
+ const struct ieee80211_meshprep_ie *mprep =
+ (const struct ieee80211_meshprep_ie *) iefrm;
+ if (mprep->prep_len !=
+ sizeof(struct ieee80211_meshprep_ie) - 2) {
+ IEEE80211_DISCARD(vap,
+ IEEE80211_MSG_ACTION | IEEE80211_MSG_HWMP,
+ wh, NULL, "%s", "PREP with wrong len");
+ vap->iv_stats.is_rx_mgtdiscard++;
+ break;
+ }
+ memcpy(&prep, mprep, sizeof(prep));
+ prep.prep_targetseq = LE_READ_4(&mprep->prep_targetseq);
+ prep.prep_lifetime = LE_READ_4(&mprep->prep_lifetime);
+ prep.prep_metric = LE_READ_4(&mprep->prep_metric);
+ prep.prep_origseq = LE_READ_4(&mprep->prep_origseq);
+ hwmp_recv_prep(vap, ni, wh, &prep);
+ found++;
+ break;
+ }
+ case IEEE80211_ELEMID_MESHPERR:
+ {
+ const struct ieee80211_meshperr_ie *mperr =
+ (const struct ieee80211_meshperr_ie *) iefrm;
+ /* XXX > 1 target */
+ if (mperr->perr_len !=
+ sizeof(struct ieee80211_meshperr_ie) - 2) {
+ IEEE80211_DISCARD(vap,
+ IEEE80211_MSG_ACTION | IEEE80211_MSG_HWMP,
+ wh, NULL, "%s", "PERR with wrong len");
+ vap->iv_stats.is_rx_mgtdiscard++;
+ break;
+ }
+ memcpy(&perr, mperr, sizeof(perr));
+ perr.perr_dests[0].dest_seq =
+ LE_READ_4(&mperr->perr_dests[0].dest_seq);
+ hwmp_recv_perr(vap, ni, wh, &perr);
+ found++;
+ break;
+ }
+ case IEEE80211_ELEMID_MESHRANN:
+ {
+ const struct ieee80211_meshrann_ie *mrann =
+ (const struct ieee80211_meshrann_ie *) iefrm;
+ if (mrann->rann_len !=
+ sizeof(struct ieee80211_meshrann_ie) - 2) {
+ IEEE80211_DISCARD(vap,
+ IEEE80211_MSG_ACTION | IEEE80211_MSG_HWMP,
+ wh, NULL, "%s", "RAN with wrong len");
+ vap->iv_stats.is_rx_mgtdiscard++;
+ return 1;
+ }
+ memcpy(&rann, mrann, sizeof(rann));
+ rann.rann_seq = LE_READ_4(&mrann->rann_seq);
+ rann.rann_metric = LE_READ_4(&mrann->rann_metric);
+ hwmp_recv_rann(vap, ni, wh, &rann);
+ found++;
+ break;
+ }
+ }
+ iefrm += iefrm[1] + 2;
+ }
+ if (!found) {
+ IEEE80211_DISCARD(vap,
+ IEEE80211_MSG_ACTION | IEEE80211_MSG_HWMP,
+ wh, NULL, "%s", "PATH SEL action without IE");
+ vap->iv_stats.is_rx_mgtdiscard++;
+ }
+ return 0;
+}
+
+static int
+hwmp_send_action(struct ieee80211_node *ni,
+ const uint8_t sa[IEEE80211_ADDR_LEN],
+ const uint8_t da[IEEE80211_ADDR_LEN],
+ uint8_t *ie, size_t len)
+{
+ struct ieee80211vap *vap = ni->ni_vap;
+ struct ieee80211com *ic = ni->ni_ic;
+ struct ieee80211_bpf_params params;
+ struct mbuf *m;
+ uint8_t *frm;
+
+ if (vap->iv_state == IEEE80211_S_CAC) {
+ IEEE80211_NOTE(vap, IEEE80211_MSG_OUTPUT, ni,
+ "block %s frame in CAC state", "HWMP action");
+ vap->iv_stats.is_tx_badstate++;
+ return EIO; /* XXX */
+ }
+
+ KASSERT(ni != NULL, ("null node"));
+ /*
+ * Hold a reference on the node so it doesn't go away until after
+ * the xmit is complete all the way in the driver. On error we
+ * will remove our reference.
+ */
+#ifdef IEEE80211_DEBUG_REFCNT
+ IEEE80211_DPRINTF(vap, IEEE80211_MSG_NODE,
+ "ieee80211_ref_node (%s:%u) %p<%s> refcnt %d\n",
+ __func__, __LINE__,
+ ni, ether_sprintf(ni->ni_macaddr),
+ ieee80211_node_refcnt(ni)+1);
+#endif
+ ieee80211_ref_node(ni);
+
+ m = ieee80211_getmgtframe(&frm,
+ ic->ic_headroom + sizeof(struct ieee80211_frame),
+ sizeof(struct ieee80211_action) + len
+ );
+ if (m == NULL) {
+ ieee80211_free_node(ni);
+ vap->iv_stats.is_tx_nobuf++;
+ return ENOMEM;
+ }
+ *frm++ = IEEE80211_ACTION_CAT_MESHPATH;
+ *frm++ = IEEE80211_ACTION_MESHPATH_SEL;
+ switch (*ie) {
+ case IEEE80211_ELEMID_MESHPREQ:
+ frm = hwmp_add_meshpreq(frm,
+ (struct ieee80211_meshpreq_ie *)ie);
+ break;
+ case IEEE80211_ELEMID_MESHPREP:
+ frm = hwmp_add_meshprep(frm,
+ (struct ieee80211_meshprep_ie *)ie);
+ break;
+ case IEEE80211_ELEMID_MESHPERR:
+ frm = hwmp_add_meshperr(frm,
+ (struct ieee80211_meshperr_ie *)ie);
+ break;
+ case IEEE80211_ELEMID_MESHRANN:
+ frm = hwmp_add_meshrann(frm,
+ (struct ieee80211_meshrann_ie *)ie);
+ break;
+ }
+
+ m->m_pkthdr.len = m->m_len = frm - mtod(m, uint8_t *);
+ M_PREPEND(m, sizeof(struct ieee80211_frame), M_DONTWAIT);
+ if (m == NULL) {
+ ieee80211_free_node(ni);
+ vap->iv_stats.is_tx_nobuf++;
+ return ENOMEM;
+ }
+ ieee80211_send_setup(ni, m,
+ IEEE80211_FC0_TYPE_MGT | IEEE80211_FC0_SUBTYPE_ACTION,
+ IEEE80211_NONQOS_TID, sa, da, sa);
+
+ m->m_flags |= M_ENCAP; /* mark encapsulated */
+ IEEE80211_NODE_STAT(ni, tx_mgmt);
+
+ memset(&params, 0, sizeof(params));
+ params.ibp_pri = WME_AC_VO;
+ params.ibp_rate0 = ni->ni_txparms->mgmtrate;
+ if (IEEE80211_IS_MULTICAST(da))
+ params.ibp_try0 = 1;
+ else
+ params.ibp_try0 = ni->ni_txparms->maxretry;
+ params.ibp_power = ni->ni_txpower;
+ return ic->ic_raw_xmit(ni, m, &params);
+}
+
+#define ADDSHORT(frm, v) do { \
+ frm[0] = (v) & 0xff; \
+ frm[1] = (v) >> 8; \
+ frm += 2; \
+} while (0)
+#define ADDWORD(frm, v) do { \
+ LE_WRITE_4(frm, v); \
+ frm += 4; \
+} while (0)
+/*
+ * Add a Mesh Path Request IE to a frame.
+ */
+static uint8_t *
+hwmp_add_meshpreq(uint8_t *frm, const struct ieee80211_meshpreq_ie *preq)
+{
+ int i;
+
+ *frm++ = IEEE80211_ELEMID_MESHPREQ;
+ *frm++ = sizeof(struct ieee80211_meshpreq_ie) - 2 +
+ (preq->preq_tcount - 1) * sizeof(*preq->preq_targets);
+ *frm++ = preq->preq_flags;
+ *frm++ = preq->preq_hopcount;
+ *frm++ = preq->preq_ttl;
+ ADDWORD(frm, preq->preq_id);
+ IEEE80211_ADDR_COPY(frm, preq->preq_origaddr); frm += 6;
+ ADDWORD(frm, preq->preq_origseq);
+ ADDWORD(frm, preq->preq_lifetime);
+ ADDWORD(frm, preq->preq_metric);
+ *frm++ = preq->preq_tcount;
+ for (i = 0; i < preq->preq_tcount; i++) {
+ *frm++ = preq->preq_targets[i].target_flags;
+ IEEE80211_ADDR_COPY(frm, preq->preq_targets[i].target_addr);
+ frm += 6;
+ ADDWORD(frm, preq->preq_targets[i].target_seq);
+ }
+ return frm;
+}
+
+/*
+ * Add a Mesh Path Reply IE to a frame.
+ */
+static uint8_t *
+hwmp_add_meshprep(uint8_t *frm, const struct ieee80211_meshprep_ie *prep)
+{
+ *frm++ = IEEE80211_ELEMID_MESHPREP;
+ *frm++ = sizeof(struct ieee80211_meshprep_ie) - 2;
+ *frm++ = prep->prep_flags;
+ *frm++ = prep->prep_hopcount;
+ *frm++ = prep->prep_ttl;
+ IEEE80211_ADDR_COPY(frm, prep->prep_targetaddr); frm += 6;
+ ADDWORD(frm, prep->prep_targetseq);
+ ADDWORD(frm, prep->prep_lifetime);
+ ADDWORD(frm, prep->prep_metric);
+ IEEE80211_ADDR_COPY(frm, prep->prep_origaddr); frm += 6;
+ ADDWORD(frm, prep->prep_origseq);
+ return frm;
+}
+
+/*
+ * Add a Mesh Path Error IE to a frame.
+ */
+static uint8_t *
+hwmp_add_meshperr(uint8_t *frm, const struct ieee80211_meshperr_ie *perr)
+{
+ int i;
+
+ *frm++ = IEEE80211_ELEMID_MESHPERR;
+ *frm++ = sizeof(struct ieee80211_meshperr_ie) - 2 +
+ (perr->perr_ndests - 1) * sizeof(*perr->perr_dests);
+ *frm++ = perr->perr_ttl;
+ *frm++ = perr->perr_ndests;
+ for (i = 0; i < perr->perr_ndests; i++) {
+ *frm++ = perr->perr_dests[i].dest_flags;
+ IEEE80211_ADDR_COPY(frm, perr->perr_dests[i].dest_addr);
+ frm += 6;
+ ADDWORD(frm, perr->perr_dests[i].dest_seq);
+ ADDSHORT(frm, perr->perr_dests[i].dest_rcode);
+ }
+ return frm;
+}
+
+/*
+ * Add a Root Annoucement IE to a frame.
+ */
+static uint8_t *
+hwmp_add_meshrann(uint8_t *frm, const struct ieee80211_meshrann_ie *rann)
+{
+ *frm++ = IEEE80211_ELEMID_MESHRANN;
+ *frm++ = sizeof(struct ieee80211_meshrann_ie) - 2;
+ *frm++ = rann->rann_flags;
+ *frm++ = rann->rann_hopcount;
+ *frm++ = rann->rann_ttl;
+ IEEE80211_ADDR_COPY(frm, rann->rann_addr); frm += 6;
+ ADDWORD(frm, rann->rann_seq);
+ ADDWORD(frm, rann->rann_metric);
+ return frm;
+}
+
+static void
+hwmp_rootmode_setup(struct ieee80211vap *vap)
+{
+ struct ieee80211_hwmp_state *hs = vap->iv_hwmp;
+
+ switch (hs->hs_rootmode) {
+ case IEEE80211_HWMP_ROOTMODE_DISABLED:
+ callout_drain(&hs->hs_roottimer);
+ break;
+ case IEEE80211_HWMP_ROOTMODE_NORMAL:
+ case IEEE80211_HWMP_ROOTMODE_PROACTIVE:
+ callout_reset(&hs->hs_roottimer, ieee80211_hwmp_rootint,
+ hwmp_rootmode_cb, vap);
+ break;
+ case IEEE80211_HWMP_ROOTMODE_RANN:
+ callout_reset(&hs->hs_roottimer, ieee80211_hwmp_rannint,
+ hwmp_rootmode_rann_cb, vap);
+ break;
+ }
+}
+
+/*
+ * Send a broadcast Path Request to find all nodes on the mesh. We are
+ * called when the vap is configured as a HWMP root node.
+ */
+#define PREQ_TFLAGS(n) preq.preq_targets[n].target_flags
+#define PREQ_TADDR(n) preq.preq_targets[n].target_addr
+#define PREQ_TSEQ(n) preq.preq_targets[n].target_seq
+static void
+hwmp_rootmode_cb(void *arg)
+{
+ struct ieee80211vap *vap = (struct ieee80211vap *)arg;
+ struct ieee80211_hwmp_state *hs = vap->iv_hwmp;
+ struct ieee80211_mesh_state *ms = vap->iv_mesh;
+ struct ieee80211_meshpreq_ie preq;
+
+ IEEE80211_NOTE(vap, IEEE80211_MSG_HWMP, vap->iv_bss,
+ "%s", "send broadcast PREQ");
+
+ preq.preq_flags = IEEE80211_MESHPREQ_FLAGS_AM;
+ if (ms->ms_flags & IEEE80211_MESHFLAGS_PORTAL)
+ preq.preq_flags |= IEEE80211_MESHPREQ_FLAGS_PR;
+ if (hs->hs_rootmode == IEEE80211_HWMP_ROOTMODE_PROACTIVE)
+ preq.preq_flags |= IEEE80211_MESHPREQ_FLAGS_PP;
+ preq.preq_hopcount = 0;
+ preq.preq_ttl = ms->ms_ttl;
+ preq.preq_id = ++hs->hs_preqid;
+ IEEE80211_ADDR_COPY(preq.preq_origaddr, vap->iv_myaddr);
+ preq.preq_origseq = ++hs->hs_seq;
+ preq.preq_lifetime = ticks_to_msecs(ieee80211_hwmp_roottimeout);
+ preq.preq_metric = IEEE80211_MESHLMETRIC_INITIALVAL;
+ preq.preq_tcount = 1;
+ IEEE80211_ADDR_COPY(PREQ_TADDR(0), broadcastaddr);
+ PREQ_TFLAGS(0) = IEEE80211_MESHPREQ_TFLAGS_TO |
+ IEEE80211_MESHPREQ_TFLAGS_RF;
+ PREQ_TSEQ(0) = 0;
+ vap->iv_stats.is_hwmp_rootreqs++;
+ hwmp_send_preq(vap->iv_bss, vap->iv_myaddr, broadcastaddr, &preq);
+ hwmp_rootmode_setup(vap);
+}
+#undef PREQ_TFLAGS
+#undef PREQ_TADDR
+#undef PREQ_TSEQ
+
+/*
+ * Send a Root Annoucement (RANN) to find all the nodes on the mesh. We are
+ * called when the vap is configured as a HWMP RANN root node.
+ */
+static void
+hwmp_rootmode_rann_cb(void *arg)
+{
+ struct ieee80211vap *vap = (struct ieee80211vap *)arg;
+ struct ieee80211_hwmp_state *hs = vap->iv_hwmp;
+ struct ieee80211_mesh_state *ms = vap->iv_mesh;
+ struct ieee80211_meshrann_ie rann;
+
+ IEEE80211_NOTE(vap, IEEE80211_MSG_HWMP, vap->iv_bss,
+ "%s", "send broadcast RANN");
+
+ rann.rann_flags = 0;
+ if (ms->ms_flags & IEEE80211_MESHFLAGS_PORTAL)
+ rann.rann_flags |= IEEE80211_MESHRANN_FLAGS_PR;
+ rann.rann_hopcount = 0;
+ rann.rann_ttl = ms->ms_ttl;
+ IEEE80211_ADDR_COPY(rann.rann_addr, vap->iv_myaddr);
+ rann.rann_seq = ++hs->hs_seq;
+ rann.rann_metric = IEEE80211_MESHLMETRIC_INITIALVAL;
+
+ vap->iv_stats.is_hwmp_rootrann++;
+ hwmp_send_rann(vap->iv_bss, vap->iv_myaddr, broadcastaddr, &rann);
+ hwmp_rootmode_setup(vap);
+}
+
+#define PREQ_TFLAGS(n) preq->preq_targets[n].target_flags
+#define PREQ_TADDR(n) preq->preq_targets[n].target_addr
+#define PREQ_TSEQ(n) preq->preq_targets[n].target_seq
+static void
+hwmp_recv_preq(struct ieee80211vap *vap, struct ieee80211_node *ni,
+ const struct ieee80211_frame *wh, const struct ieee80211_meshpreq_ie *preq)
+{
+ struct ieee80211_mesh_state *ms = vap->iv_mesh;
+ struct ieee80211_mesh_route *rt = NULL;
+ struct ieee80211_mesh_route *rtorig = NULL;
+ struct ieee80211_hwmp_route *hrorig;
+ struct ieee80211_hwmp_state *hs = vap->iv_hwmp;
+ struct ieee80211_meshprep_ie prep;
+
+ if (ni == vap->iv_bss ||
+ ni->ni_mlstate != IEEE80211_NODE_MESH_ESTABLISHED)
+ return;
+ /*
+ * Ignore PREQs from us. Could happen because someone forward it
+ * back to us.
+ */
+ if (IEEE80211_ADDR_EQ(vap->iv_myaddr, preq->preq_origaddr))
+ return;
+
+ IEEE80211_NOTE(vap, IEEE80211_MSG_HWMP, ni,
+ "received PREQ, source %s", ether_sprintf(preq->preq_origaddr));
+
+ /*
+ * Acceptance criteria: if the PREQ is not for us and
+ * forwarding is disabled, discard this PREQ.
+ */
+ if (!IEEE80211_ADDR_EQ(vap->iv_myaddr, PREQ_TADDR(0)) &&
+ !(ms->ms_flags & IEEE80211_MESHFLAGS_FWD)) {
+ IEEE80211_DISCARD_MAC(vap, IEEE80211_MSG_HWMP,
+ preq->preq_origaddr, NULL, "%s", "not accepting PREQ");
+ return;
+ }
+ rtorig = ieee80211_mesh_rt_find(vap, preq->preq_origaddr);
+ if (rtorig == NULL)
+ rtorig = ieee80211_mesh_rt_add(vap, preq->preq_origaddr);
+ if (rtorig == NULL) {
+ /* XXX stat */
+ return;
+ }
+ hrorig = IEEE80211_MESH_ROUTE_PRIV(rtorig, struct ieee80211_hwmp_route);
+ /*
+ * Sequence number validation.
+ */
+ if (HWMP_SEQ_LEQ(preq->preq_id, hrorig->hr_preqid) &&
+ HWMP_SEQ_LEQ(preq->preq_origseq, hrorig->hr_seq)) {
+ IEEE80211_NOTE(vap, IEEE80211_MSG_HWMP, ni,
+ "discard PREQ from %s, old seq no %u <= %u",
+ ether_sprintf(preq->preq_origaddr),
+ preq->preq_origseq, hrorig->hr_seq);
+ return;
+ }
+ hrorig->hr_preqid = preq->preq_id;
+ hrorig->hr_seq = preq->preq_origseq;
+
+ /*
+ * Check if the PREQ is addressed to us.
+ */
+ if (IEEE80211_ADDR_EQ(vap->iv_myaddr, PREQ_TADDR(0))) {
+ IEEE80211_NOTE(vap, IEEE80211_MSG_HWMP, ni,
+ "reply to %s", ether_sprintf(preq->preq_origaddr));
+ /*
+ * Build and send a PREP frame.
+ */
+ prep.prep_flags = 0;
+ prep.prep_hopcount = 0;
+ prep.prep_ttl = ms->ms_ttl;
+ IEEE80211_ADDR_COPY(prep.prep_targetaddr, vap->iv_myaddr);
+ prep.prep_targetseq = ++hs->hs_seq;
+ prep.prep_lifetime = preq->preq_lifetime;
+ prep.prep_metric = IEEE80211_MESHLMETRIC_INITIALVAL;
+ IEEE80211_ADDR_COPY(prep.prep_origaddr, preq->preq_origaddr);
+ prep.prep_origseq = preq->preq_origseq;
+ hwmp_send_prep(ni, vap->iv_myaddr, wh->i_addr2, &prep);
+ /*
+ * Build the reverse path, if we don't have it already.
+ */
+ rt = ieee80211_mesh_rt_find(vap, preq->preq_origaddr);
+ if (rt == NULL)
+ hwmp_discover(vap, preq->preq_origaddr, NULL);
+ else if ((rt->rt_flags & IEEE80211_MESHRT_FLAGS_VALID) == 0)
+ hwmp_discover(vap, rt->rt_dest, NULL);
+ return;
+ }
+ /*
+ * Proactive PREQ: reply with a proactive PREP to the
+ * root STA if requested.
+ */
+ if (IEEE80211_ADDR_EQ(PREQ_TADDR(0), broadcastaddr) &&
+ (PREQ_TFLAGS(0) &
+ ((IEEE80211_MESHPREQ_TFLAGS_TO|IEEE80211_MESHPREQ_TFLAGS_RF) ==
+ (IEEE80211_MESHPREQ_TFLAGS_TO|IEEE80211_MESHPREQ_TFLAGS_RF)))) {
+ uint8_t rootmac[IEEE80211_ADDR_LEN];
+
+ IEEE80211_ADDR_COPY(rootmac, preq->preq_origaddr);
+ rt = ieee80211_mesh_rt_find(vap, rootmac);
+ if (rt == NULL) {
+ rt = ieee80211_mesh_rt_add(vap, rootmac);
+ if (rt == NULL) {
+ IEEE80211_NOTE(vap, IEEE80211_MSG_HWMP, ni,
+ "unable to add root mesh path to %s",
+ ether_sprintf(rootmac));
+ vap->iv_stats.is_mesh_rtaddfailed++;
+ return;
+ }
+ }
+ IEEE80211_NOTE(vap, IEEE80211_MSG_HWMP, ni,
+ "root mesh station @ %s", ether_sprintf(rootmac));
+
+ /*
+ * Reply with a PREP if we don't have a path to the root
+ * or if the root sent us a proactive PREQ.
+ */
+ if ((rt->rt_flags & IEEE80211_MESHRT_FLAGS_VALID) == 0 ||
+ (preq->preq_flags & IEEE80211_MESHPREQ_FLAGS_PP)) {
+ prep.prep_flags = 0;
+ prep.prep_hopcount = 0;
+ prep.prep_ttl = ms->ms_ttl;
+ IEEE80211_ADDR_COPY(prep.prep_origaddr, rootmac);
+ prep.prep_origseq = preq->preq_origseq;
+ prep.prep_lifetime = preq->preq_lifetime;
+ prep.prep_metric = IEEE80211_MESHLMETRIC_INITIALVAL;
+ IEEE80211_ADDR_COPY(prep.prep_targetaddr,
+ vap->iv_myaddr);
+ prep.prep_targetseq = ++hs->hs_seq;
+ hwmp_send_prep(vap->iv_bss, vap->iv_myaddr,
+ broadcastaddr, &prep);
+ }
+ hwmp_discover(vap, rootmac, NULL);
+ return;
+ }
+ rt = ieee80211_mesh_rt_find(vap, PREQ_TADDR(0));
+
+ /*
+ * Forwarding and Intermediate reply for PREQs with 1 target.
+ */
+ if (preq->preq_tcount == 1) {
+ struct ieee80211_meshpreq_ie ppreq; /* propagated PREQ */
+
+ memcpy(&ppreq, preq, sizeof(ppreq));
+ /*
+ * We have a valid route to this node.
+ */
+ if (rt != NULL &&
+ (rt->rt_flags & IEEE80211_MESHRT_FLAGS_VALID)) {
+ if (preq->preq_ttl > 1 &&
+ preq->preq_hopcount < hs->hs_maxhops) {
+ IEEE80211_NOTE(vap, IEEE80211_MSG_HWMP, ni,
+ "forward PREQ from %s",
+ ether_sprintf(preq->preq_origaddr));
+ /*
+ * Propagate the original PREQ.
+ */
+ ppreq.preq_hopcount += 1;
+ ppreq.preq_ttl -= 1;
+ ppreq.preq_metric +=
+ ms->ms_pmetric->mpm_metric(ni);
+ /*
+ * Set TO and unset RF bits because we are going
+ * to send a PREP next.
+ */
+ ppreq.preq_targets[0].target_flags |=
+ IEEE80211_MESHPREQ_TFLAGS_TO;
+ ppreq.preq_targets[0].target_flags &=
+ ~IEEE80211_MESHPREQ_TFLAGS_RF;
+ hwmp_send_preq(ni, vap->iv_myaddr,
+ broadcastaddr, &ppreq);
+ }
+ /*
+ * Check if we can send an intermediate Path Reply,
+ * i.e., Target Only bit is not set.
+ */
+ if (!(PREQ_TFLAGS(0) & IEEE80211_MESHPREQ_TFLAGS_TO)) {
+ struct ieee80211_meshprep_ie prep;
+
+ IEEE80211_NOTE(vap, IEEE80211_MSG_HWMP, ni,
+ "intermediate reply for PREQ from %s",
+ ether_sprintf(preq->preq_origaddr));
+ prep.prep_flags = 0;
+ prep.prep_hopcount = rt->rt_nhops + 1;
+ prep.prep_ttl = ms->ms_ttl;
+ IEEE80211_ADDR_COPY(&prep.prep_targetaddr,
+ PREQ_TADDR(0));
+ prep.prep_targetseq = hrorig->hr_seq;
+ prep.prep_lifetime = preq->preq_lifetime;
+ prep.prep_metric = rt->rt_metric +
+ ms->ms_pmetric->mpm_metric(ni);
+ IEEE80211_ADDR_COPY(&prep.prep_origaddr,
+ preq->preq_origaddr);
+ prep.prep_origseq = hrorig->hr_seq;
+ hwmp_send_prep(ni, vap->iv_myaddr,
+ broadcastaddr, &prep);
+ }
+ /*
+ * We have no information about this path,
+ * propagate the PREQ.
+ */
+ } else if (preq->preq_ttl > 1 &&
+ preq->preq_hopcount < hs->hs_maxhops) {
+ if (rt == NULL) {
+ rt = ieee80211_mesh_rt_add(vap, PREQ_TADDR(0));
+ if (rt == NULL) {
+ IEEE80211_NOTE(vap, IEEE80211_MSG_HWMP,
+ ni, "unable to add PREQ path to %s",
+ ether_sprintf(PREQ_TADDR(0)));
+ vap->iv_stats.is_mesh_rtaddfailed++;
+ return;
+ }
+ }
+ rt->rt_metric = preq->preq_metric;
+ rt->rt_lifetime = preq->preq_lifetime;
+ hrorig = IEEE80211_MESH_ROUTE_PRIV(rt,
+ struct ieee80211_hwmp_route);
+ hrorig->hr_seq = preq->preq_origseq;
+ hrorig->hr_preqid = preq->preq_id;
+
+ IEEE80211_NOTE(vap, IEEE80211_MSG_HWMP, ni,
+ "forward PREQ from %s",
+ ether_sprintf(preq->preq_origaddr));
+ ppreq.preq_hopcount += 1;
+ ppreq.preq_ttl -= 1;
+ ppreq.preq_metric += ms->ms_pmetric->mpm_metric(ni);
+ hwmp_send_preq(ni, vap->iv_myaddr, broadcastaddr,
+ &ppreq);
+ }
+ }
+
+}
+#undef PREQ_TFLAGS
+#undef PREQ_TADDR
+#undef PREQ_TSEQ
+
+static int
+hwmp_send_preq(struct ieee80211_node *ni,
+ const uint8_t sa[IEEE80211_ADDR_LEN],
+ const uint8_t da[IEEE80211_ADDR_LEN],
+ struct ieee80211_meshpreq_ie *preq)
+{
+ struct ieee80211_hwmp_state *hs = ni->ni_vap->iv_hwmp;
+
+ /*
+ * Enforce PREQ interval.
+ */
+ if (ratecheck(&hs->hs_lastpreq, &ieee80211_hwmp_preqminint) == 0)
+ return EALREADY;
+ getmicrouptime(&hs->hs_lastpreq);
+
+ /*
+ * mesh preq action frame format
+ * [6] da
+ * [6] sa
+ * [6] addr3 = sa
+ * [1] action
+ * [1] category
+ * [tlv] mesh path request
+ */
+ preq->preq_ie = IEEE80211_ELEMID_MESHPREQ;
+ return hwmp_send_action(ni, sa, da, (uint8_t *)preq,
+ sizeof(struct ieee80211_meshpreq_ie));
+}
+
+static void
+hwmp_recv_prep(struct ieee80211vap *vap, struct ieee80211_node *ni,
+ const struct ieee80211_frame *wh, const struct ieee80211_meshprep_ie *prep)
+{
+ struct ieee80211_mesh_state *ms = vap->iv_mesh;
+ struct ieee80211_hwmp_state *hs = vap->iv_hwmp;
+ struct ieee80211_mesh_route *rt = NULL;
+ struct ieee80211_hwmp_route *hr;
+ struct ieee80211com *ic = vap->iv_ic;
+ struct ifnet *ifp = vap->iv_ifp;
+ struct mbuf *m, *next;
+
+ /*
+ * Acceptance criteria: if the corresponding PREQ was not generated
+ * by us and forwarding is disabled, discard this PREP.
+ */
+ if (ni == vap->iv_bss ||
+ ni->ni_mlstate != IEEE80211_NODE_MESH_ESTABLISHED)
+ return;
+ if (!IEEE80211_ADDR_EQ(vap->iv_myaddr, prep->prep_origaddr) &&
+ !(ms->ms_flags & IEEE80211_MESHFLAGS_FWD))
+ return;
+
+ IEEE80211_NOTE(vap, IEEE80211_MSG_HWMP, ni,
+ "received PREP from %s", ether_sprintf(prep->prep_targetaddr));
+
+ rt = ieee80211_mesh_rt_find(vap, prep->prep_targetaddr);
+ if (rt == NULL) {
+ /*
+ * If we have no entry this could be a reply to a root PREQ.
+ */
+ if (hs->hs_rootmode != IEEE80211_HWMP_ROOTMODE_DISABLED) {
+ rt = ieee80211_mesh_rt_add(vap, prep->prep_targetaddr);
+ if (rt == NULL) {
+ IEEE80211_NOTE(vap, IEEE80211_MSG_HWMP,
+ ni, "unable to add PREP path to %s",
+ ether_sprintf(prep->prep_targetaddr));
+ vap->iv_stats.is_mesh_rtaddfailed++;
+ return;
+ }
+ IEEE80211_ADDR_COPY(rt->rt_nexthop, wh->i_addr2);
+ rt->rt_nhops = prep->prep_hopcount;
+ rt->rt_lifetime = prep->prep_lifetime;
+ rt->rt_metric = prep->prep_metric;
+ rt->rt_flags |= IEEE80211_MESHRT_FLAGS_VALID;
+ IEEE80211_NOTE(vap, IEEE80211_MSG_HWMP, ni,
+ "add root path to %s nhops %d metric %d (PREP)",
+ ether_sprintf(prep->prep_targetaddr),
+ rt->rt_nhops, rt->rt_metric);
+ return;
+ }
+ return;
+ }
+ /*
+ * Sequence number validation.
+ */
+ hr = IEEE80211_MESH_ROUTE_PRIV(rt, struct ieee80211_hwmp_route);
+ if (HWMP_SEQ_LEQ(prep->prep_targetseq, hr->hr_seq)) {
+ IEEE80211_NOTE(vap, IEEE80211_MSG_HWMP, ni,
+ "discard PREP from %s, old seq no %u <= %u",
+ ether_sprintf(prep->prep_targetaddr),
+ prep->prep_targetseq, hr->hr_seq);
+ return;
+ }
+ hr->hr_seq = prep->prep_targetseq;
+ /*
+ * If it's NOT for us, propagate the PREP.
+ */
+ if (!IEEE80211_ADDR_EQ(vap->iv_myaddr, prep->prep_origaddr) &&
+ prep->prep_ttl > 1 && prep->prep_hopcount < hs->hs_maxhops) {
+ struct ieee80211_meshprep_ie pprep; /* propagated PREP */
+
+ IEEE80211_NOTE(vap, IEEE80211_MSG_HWMP, ni,
+ "propagate PREP from %s",
+ ether_sprintf(prep->prep_targetaddr));
+
+ memcpy(&pprep, prep, sizeof(pprep));
+ pprep.prep_hopcount += 1;
+ pprep.prep_ttl -= 1;
+ pprep.prep_metric += ms->ms_pmetric->mpm_metric(ni);
+ IEEE80211_ADDR_COPY(pprep.prep_targetaddr, vap->iv_myaddr);
+ hwmp_send_prep(ni, vap->iv_myaddr, broadcastaddr, &pprep);
+ }
+ hr = IEEE80211_MESH_ROUTE_PRIV(rt, struct ieee80211_hwmp_route);
+ if (rt->rt_flags & IEEE80211_MESHRT_FLAGS_PROXY) {
+ /* NB: never clobber a proxy entry */;
+ IEEE80211_NOTE(vap, IEEE80211_MSG_HWMP, ni,
+ "discard PREP for %s, route is marked PROXY",
+ ether_sprintf(prep->prep_targetaddr));
+ vap->iv_stats.is_hwmp_proxy++;
+ } else if (prep->prep_origseq == hr->hr_origseq) {
+ /*
+ * Check if we already have a path to this node.
+ * If we do, check if this path reply contains a
+ * better route.
+ */
+ if ((rt->rt_flags & IEEE80211_MESHRT_FLAGS_VALID) == 0 ||
+ (prep->prep_hopcount < rt->rt_nhops ||
+ prep->prep_metric < rt->rt_metric)) {
+ IEEE80211_NOTE(vap, IEEE80211_MSG_HWMP, ni,
+ "%s path to %s, hopcount %d:%d metric %d:%d",
+ rt->rt_flags & IEEE80211_MESHRT_FLAGS_VALID ?
+ "prefer" : "update",
+ ether_sprintf(prep->prep_origaddr),
+ rt->rt_nhops, prep->prep_hopcount,
+ rt->rt_metric, prep->prep_metric);
+ IEEE80211_ADDR_COPY(rt->rt_nexthop, wh->i_addr2);
+ rt->rt_nhops = prep->prep_hopcount;
+ rt->rt_lifetime = prep->prep_lifetime;
+ rt->rt_metric = prep->prep_metric;
+ rt->rt_flags |= IEEE80211_MESHRT_FLAGS_VALID;
+ } else {
+ IEEE80211_NOTE(vap, IEEE80211_MSG_HWMP, ni,
+ "ignore PREP for %s, hopcount %d:%d metric %d:%d",
+ ether_sprintf(prep->prep_targetaddr),
+ rt->rt_nhops, prep->prep_hopcount,
+ rt->rt_metric, prep->prep_metric);
+ }
+ } else {
+ IEEE80211_NOTE(vap, IEEE80211_MSG_HWMP, ni,
+ "discard PREP for %s, wrong seqno %u != %u",
+ ether_sprintf(prep->prep_targetaddr), prep->prep_origseq,
+ hr->hr_seq);
+ vap->iv_stats.is_hwmp_wrongseq++;
+ }
+ /*
+ * Check for frames queued awaiting path discovery.
+ * XXX probably can tell exactly and avoid remove call
+ * NB: hash may have false matches, if so they will get
+ * stuck back on the stageq because there won't be
+ * a path.
+ */
+ m = ieee80211_ageq_remove(&ic->ic_stageq,
+ (struct ieee80211_node *)(uintptr_t)
+ ieee80211_mac_hash(ic, rt->rt_dest));
+ for (; m != NULL; m = next) {
+ next = m->m_nextpkt;
+ m->m_nextpkt = NULL;
+ IEEE80211_NOTE(vap, IEEE80211_MSG_HWMP, ni,
+ "flush queued frame %p len %d", m, m->m_pkthdr.len);
+ ifp->if_transmit(ifp, m);
+ }
+}
+
+static int
+hwmp_send_prep(struct ieee80211_node *ni,
+ const uint8_t sa[IEEE80211_ADDR_LEN],
+ const uint8_t da[IEEE80211_ADDR_LEN],
+ struct ieee80211_meshprep_ie *prep)
+{
+ /* NB: there's no PREP minimum interval. */
+
+ /*
+ * mesh prep action frame format
+ * [6] da
+ * [6] sa
+ * [6] addr3 = sa
+ * [1] action
+ * [1] category
+ * [tlv] mesh path reply
+ */
+ prep->prep_ie = IEEE80211_ELEMID_MESHPREP;
+ return hwmp_send_action(ni, sa, da, (uint8_t *)prep,
+ sizeof(struct ieee80211_meshprep_ie));
+}
+
+#define PERR_DFLAGS(n) perr.perr_dests[n].dest_flags
+#define PERR_DADDR(n) perr.perr_dests[n].dest_addr
+#define PERR_DSEQ(n) perr.perr_dests[n].dest_seq
+#define PERR_DRCODE(n) perr.perr_dests[n].dest_rcode
+static void
+hwmp_peerdown(struct ieee80211_node *ni)
+{
+ struct ieee80211vap *vap = ni->ni_vap;
+ struct ieee80211_mesh_state *ms = vap->iv_mesh;
+ struct ieee80211_meshperr_ie perr;
+ struct ieee80211_mesh_route *rt;
+ struct ieee80211_hwmp_route *hr;
+
+ rt = ieee80211_mesh_rt_find(vap, ni->ni_macaddr);
+ if (rt == NULL)
+ return;
+ hr = IEEE80211_MESH_ROUTE_PRIV(rt, struct ieee80211_hwmp_route);
+ IEEE80211_NOTE(vap, IEEE80211_MSG_HWMP, ni,
+ "%s", "delete route entry");
+ perr.perr_ttl = ms->ms_ttl;
+ perr.perr_ndests = 1;
+ PERR_DFLAGS(0) = 0;
+ if (hr->hr_seq == 0)
+ PERR_DFLAGS(0) |= IEEE80211_MESHPERR_DFLAGS_USN;
+ PERR_DFLAGS(0) |= IEEE80211_MESHPERR_DFLAGS_RC;
+ IEEE80211_ADDR_COPY(PERR_DADDR(0), rt->rt_dest);
+ PERR_DSEQ(0) = hr->hr_seq;
+ PERR_DRCODE(0) = IEEE80211_REASON_MESH_PERR_DEST_UNREACH;
+ /* NB: flush everything passing through peer */
+ ieee80211_mesh_rt_flush_peer(vap, ni->ni_macaddr);
+ hwmp_send_perr(vap->iv_bss, vap->iv_myaddr, broadcastaddr, &perr);
+}
+#undef PERR_DFLAGS
+#undef PERR_DADDR
+#undef PERR_DSEQ
+#undef PERR_DRCODE
+
+#define PERR_DFLAGS(n) perr->perr_dests[n].dest_flags
+#define PERR_DADDR(n) perr->perr_dests[n].dest_addr
+#define PERR_DSEQ(n) perr->perr_dests[n].dest_seq
+#define PERR_DRCODE(n) perr->perr_dests[n].dest_rcode
+static void
+hwmp_recv_perr(struct ieee80211vap *vap, struct ieee80211_node *ni,
+ const struct ieee80211_frame *wh, const struct ieee80211_meshperr_ie *perr)
+{
+ struct ieee80211_mesh_state *ms = vap->iv_mesh;
+ struct ieee80211_mesh_route *rt = NULL;
+ struct ieee80211_hwmp_route *hr;
+ struct ieee80211_meshperr_ie pperr;
+ int i, forward = 0;
+
+ /*
+ * Acceptance criteria: check if we received a PERR from a
+ * neighbor and forwarding is enabled.
+ */
+ if (ni == vap->iv_bss ||
+ ni->ni_mlstate != IEEE80211_NODE_MESH_ESTABLISHED ||
+ !(ms->ms_flags & IEEE80211_MESHFLAGS_FWD))
+ return;
+ /*
+ * Find all routing entries that match and delete them.
+ */
+ for (i = 0; i < perr->perr_ndests; i++) {
+ rt = ieee80211_mesh_rt_find(vap, PERR_DADDR(i));
+ if (rt == NULL)
+ continue;
+ hr = IEEE80211_MESH_ROUTE_PRIV(rt, struct ieee80211_hwmp_route);
+ if (!(PERR_DFLAGS(0) & IEEE80211_MESHPERR_DFLAGS_USN) &&
+ HWMP_SEQ_GEQ(PERR_DSEQ(i), hr->hr_seq)) {
+ ieee80211_mesh_rt_del(vap, rt->rt_dest);
+ ieee80211_mesh_rt_flush_peer(vap, rt->rt_dest);
+ rt = NULL;
+ forward = 1;
+ }
+ }
+ /*
+ * Propagate the PERR if we previously found it on our routing table.
+ * XXX handle ndest > 1
+ */
+ if (forward && perr->perr_ttl > 1) {
+ IEEE80211_NOTE(vap, IEEE80211_MSG_HWMP, ni,
+ "propagate PERR from %s", ether_sprintf(wh->i_addr2));
+ memcpy(&pperr, perr, sizeof(*perr));
+ pperr.perr_ttl--;
+ hwmp_send_perr(vap->iv_bss, vap->iv_myaddr, broadcastaddr,
+ &pperr);
+ }
+}
+#undef PEER_DADDR
+#undef PERR_DSEQ
+
+static int
+hwmp_send_perr(struct ieee80211_node *ni,
+ const uint8_t sa[IEEE80211_ADDR_LEN],
+ const uint8_t da[IEEE80211_ADDR_LEN],
+ struct ieee80211_meshperr_ie *perr)
+{
+ struct ieee80211_hwmp_state *hs = ni->ni_vap->iv_hwmp;
+
+ /*
+ * Enforce PERR interval.
+ */
+ if (ratecheck(&hs->hs_lastperr, &ieee80211_hwmp_perrminint) == 0)
+ return EALREADY;
+ getmicrouptime(&hs->hs_lastperr);
+
+ /*
+ * mesh perr action frame format
+ * [6] da
+ * [6] sa
+ * [6] addr3 = sa
+ * [1] action
+ * [1] category
+ * [tlv] mesh path error
+ */
+ perr->perr_ie = IEEE80211_ELEMID_MESHPERR;
+ return hwmp_send_action(ni, sa, da, (uint8_t *)perr,
+ sizeof(struct ieee80211_meshperr_ie));
+}
+
+static void
+hwmp_recv_rann(struct ieee80211vap *vap, struct ieee80211_node *ni,
+ const struct ieee80211_frame *wh, const struct ieee80211_meshrann_ie *rann)
+{
+ struct ieee80211_mesh_state *ms = vap->iv_mesh;
+ struct ieee80211_hwmp_state *hs = vap->iv_hwmp;
+ struct ieee80211_mesh_route *rt = NULL;
+ struct ieee80211_hwmp_route *hr;
+ struct ieee80211_meshrann_ie prann;
+
+ if (ni == vap->iv_bss ||
+ ni->ni_mlstate != IEEE80211_NODE_MESH_ESTABLISHED ||
+ IEEE80211_ADDR_EQ(rann->rann_addr, vap->iv_myaddr))
+ return;
+
+ rt = ieee80211_mesh_rt_find(vap, rann->rann_addr);
+ /*
+ * Discover the path to the root mesh STA.
+ * If we already know it, propagate the RANN element.
+ */
+ if (rt == NULL) {
+ hwmp_discover(vap, rann->rann_addr, NULL);
+ return;
+ }
+ hr = IEEE80211_MESH_ROUTE_PRIV(rt, struct ieee80211_hwmp_route);
+ if (HWMP_SEQ_GT(rann->rann_seq, hr->hr_seq)) {
+ hr->hr_seq = rann->rann_seq;
+ if (rann->rann_ttl > 1 &&
+ rann->rann_hopcount < hs->hs_maxhops &&
+ (ms->ms_flags & IEEE80211_MESHFLAGS_FWD)) {
+ memcpy(&prann, rann, sizeof(prann));
+ prann.rann_hopcount += 1;
+ prann.rann_ttl -= 1;
+ prann.rann_metric += ms->ms_pmetric->mpm_metric(ni);
+ hwmp_send_rann(vap->iv_bss, vap->iv_myaddr,
+ broadcastaddr, &prann);
+ }
+ }
+}
+
+static int
+hwmp_send_rann(struct ieee80211_node *ni,
+ const uint8_t sa[IEEE80211_ADDR_LEN],
+ const uint8_t da[IEEE80211_ADDR_LEN],
+ struct ieee80211_meshrann_ie *rann)
+{
+ /*
+ * mesh rann action frame format
+ * [6] da
+ * [6] sa
+ * [6] addr3 = sa
+ * [1] action
+ * [1] category
+ * [tlv] root annoucement
+ */
+ rann->rann_ie = IEEE80211_ELEMID_MESHRANN;
+ return hwmp_send_action(ni, sa, da, (uint8_t *)rann,
+ sizeof(struct ieee80211_meshrann_ie));
+}
+
+#define PREQ_TFLAGS(n) preq.preq_targets[n].target_flags
+#define PREQ_TADDR(n) preq.preq_targets[n].target_addr
+#define PREQ_TSEQ(n) preq.preq_targets[n].target_seq
+static struct ieee80211_node *
+hwmp_discover(struct ieee80211vap *vap,
+ const uint8_t dest[IEEE80211_ADDR_LEN], struct mbuf *m)
+{
+ struct ieee80211_hwmp_state *hs = vap->iv_hwmp;
+ struct ieee80211_mesh_state *ms = vap->iv_mesh;
+ struct ieee80211_mesh_route *rt = NULL;
+ struct ieee80211_hwmp_route *hr;
+ struct ieee80211_meshpreq_ie preq;
+ struct ieee80211_node *ni;
+ int sendpreq = 0;
+
+ KASSERT(vap->iv_opmode == IEEE80211_M_MBSS,
+ ("not a mesh vap, opmode %d", vap->iv_opmode));
+
+ KASSERT(!IEEE80211_ADDR_EQ(vap->iv_myaddr, dest),
+ ("%s: discovering self!", __func__));
+
+ ni = NULL;
+ if (!IEEE80211_IS_MULTICAST(dest)) {
+ rt = ieee80211_mesh_rt_find(vap, dest);
+ if (rt == NULL) {
+ rt = ieee80211_mesh_rt_add(vap, dest);
+ if (rt == NULL) {
+ IEEE80211_NOTE(vap, IEEE80211_MSG_HWMP,
+ ni, "unable to add discovery path to %s",
+ ether_sprintf(dest));
+ vap->iv_stats.is_mesh_rtaddfailed++;
+ goto done;
+ }
+ }
+ hr = IEEE80211_MESH_ROUTE_PRIV(rt,
+ struct ieee80211_hwmp_route);
+ if ((rt->rt_flags & IEEE80211_MESHRT_FLAGS_VALID) == 0) {
+ if (hr->hr_origseq == 0)
+ hr->hr_origseq = ++hs->hs_seq;
+ rt->rt_metric = IEEE80211_MESHLMETRIC_INITIALVAL;
+ rt->rt_lifetime =
+ ticks_to_msecs(ieee80211_hwmp_pathtimeout);
+ /* XXX check preq retries */
+ sendpreq = 1;
+ IEEE80211_NOTE_MAC(vap, IEEE80211_MSG_HWMP, dest,
+ "start path discovery (src %s)",
+ m == NULL ? "<none>" : ether_sprintf(
+ mtod(m, struct ether_header *)->ether_shost));
+ /*
+ * Try to discover the path for this node.
+ */
+ preq.preq_flags = 0;
+ preq.preq_hopcount = 0;
+ preq.preq_ttl = ms->ms_ttl;
+ preq.preq_id = ++hs->hs_preqid;
+ IEEE80211_ADDR_COPY(preq.preq_origaddr, vap->iv_myaddr);
+ preq.preq_origseq = hr->hr_origseq;
+ preq.preq_lifetime = rt->rt_lifetime;
+ preq.preq_metric = rt->rt_metric;
+ preq.preq_tcount = 1;
+ IEEE80211_ADDR_COPY(PREQ_TADDR(0), dest);
+ PREQ_TFLAGS(0) = 0;
+ if (ieee80211_hwmp_targetonly)
+ PREQ_TFLAGS(0) |= IEEE80211_MESHPREQ_TFLAGS_TO;
+ if (ieee80211_hwmp_replyforward)
+ PREQ_TFLAGS(0) |= IEEE80211_MESHPREQ_TFLAGS_RF;
+ PREQ_TFLAGS(0) |= IEEE80211_MESHPREQ_TFLAGS_USN;
+ PREQ_TSEQ(0) = 0;
+ /* XXX check return value */
+ hwmp_send_preq(vap->iv_bss, vap->iv_myaddr,
+ broadcastaddr, &preq);
+ }
+ if (rt->rt_flags & IEEE80211_MESHRT_FLAGS_VALID)
+ ni = ieee80211_find_txnode(vap, rt->rt_nexthop);
+ } else {
+ ni = ieee80211_find_txnode(vap, dest);
+ /* NB: if null then we leak mbuf */
+ KASSERT(ni != NULL, ("leak mcast frame"));
+ return ni;
+ }
+done:
+ if (ni == NULL && m != NULL) {
+ if (sendpreq) {
+ struct ieee80211com *ic = vap->iv_ic;
+ /*
+ * Queue packet for transmit when path discovery
+ * completes. If discovery never completes the
+ * frame will be flushed by way of the aging timer.
+ */
+ IEEE80211_NOTE_MAC(vap, IEEE80211_MSG_HWMP, dest,
+ "%s", "queue frame until path found");
+ m->m_pkthdr.rcvif = (void *)(uintptr_t)
+ ieee80211_mac_hash(ic, dest);
+ /* XXX age chosen randomly */
+ ieee80211_ageq_append(&ic->ic_stageq, m,
+ IEEE80211_INACT_WAIT);
+ } else {
+ IEEE80211_DISCARD_MAC(vap, IEEE80211_MSG_HWMP,
+ dest, NULL, "%s", "no valid path to this node");
+ m_freem(m);
+ }
+ }
+ return ni;
+}
+#undef PREQ_TFLAGS
+#undef PREQ_TADDR
+#undef PREQ_TSEQ
+
+static int
+hwmp_ioctl_get80211(struct ieee80211vap *vap, struct ieee80211req *ireq)
+{
+ struct ieee80211_hwmp_state *hs = vap->iv_hwmp;
+ int error;
+
+ if (vap->iv_opmode != IEEE80211_M_MBSS)
+ return ENOSYS;
+ error = 0;
+ switch (ireq->i_type) {
+ case IEEE80211_IOC_HWMP_ROOTMODE:
+ ireq->i_val = hs->hs_rootmode;
+ break;
+ case IEEE80211_IOC_HWMP_MAXHOPS:
+ ireq->i_val = hs->hs_maxhops;
+ break;
+ default:
+ return ENOSYS;
+ }
+ return error;
+}
+IEEE80211_IOCTL_GET(hwmp, hwmp_ioctl_get80211);
+
+static int
+hwmp_ioctl_set80211(struct ieee80211vap *vap, struct ieee80211req *ireq)
+{
+ struct ieee80211_hwmp_state *hs = vap->iv_hwmp;
+ int error;
+
+ if (vap->iv_opmode != IEEE80211_M_MBSS)
+ return ENOSYS;
+ error = 0;
+ switch (ireq->i_type) {
+ case IEEE80211_IOC_HWMP_ROOTMODE:
+ if (ireq->i_val < 0 || ireq->i_val > 3)
+ return EINVAL;
+ hs->hs_rootmode = ireq->i_val;
+ hwmp_rootmode_setup(vap);
+ break;
+ case IEEE80211_IOC_HWMP_MAXHOPS:
+ if (ireq->i_val <= 0 || ireq->i_val > 255)
+ return EINVAL;
+ hs->hs_maxhops = ireq->i_val;
+ break;
+ default:
+ return ENOSYS;
+ }
+ return error;
+}
+IEEE80211_IOCTL_SET(hwmp, hwmp_ioctl_set80211);
diff --git a/rtems/freebsd/net80211/ieee80211_input.c b/rtems/freebsd/net80211/ieee80211_input.c
new file mode 100644
index 00000000..91e76f1e
--- /dev/null
+++ b/rtems/freebsd/net80211/ieee80211_input.c
@@ -0,0 +1,852 @@
+#include <rtems/freebsd/machine/rtems-bsd-config.h>
+
+/*-
+ * Copyright (c) 2001 Atsushi Onoe
+ * Copyright (c) 2002-2009 Sam Leffler, Errno Consulting
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <rtems/freebsd/sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <rtems/freebsd/local/opt_wlan.h>
+
+#include <rtems/freebsd/sys/param.h>
+#include <rtems/freebsd/sys/systm.h>
+#include <rtems/freebsd/sys/mbuf.h>
+#include <rtems/freebsd/sys/malloc.h>
+#include <rtems/freebsd/sys/endian.h>
+#include <rtems/freebsd/sys/kernel.h>
+
+#include <rtems/freebsd/sys/socket.h>
+
+#include <rtems/freebsd/net/ethernet.h>
+#include <rtems/freebsd/net/if.h>
+#include <rtems/freebsd/net/if_llc.h>
+#include <rtems/freebsd/net/if_media.h>
+#include <rtems/freebsd/net/if_vlan_var.h>
+
+#include <rtems/freebsd/net80211/ieee80211_var.h>
+#include <rtems/freebsd/net80211/ieee80211_input.h>
+#ifdef IEEE80211_SUPPORT_MESH
+#include <rtems/freebsd/net80211/ieee80211_mesh.h>
+#endif
+
+#include <rtems/freebsd/net/bpf.h>
+
+#ifdef INET
+#include <rtems/freebsd/netinet/in.h>
+#include <rtems/freebsd/net/ethernet.h>
+#endif
+
+int
+ieee80211_input_all(struct ieee80211com *ic, struct mbuf *m, int rssi, int nf)
+{
+ struct ieee80211vap *vap;
+ int type = -1;
+
+ m->m_flags |= M_BCAST; /* NB: mark for bpf tap'ing */
+
+ /* XXX locking */
+ TAILQ_FOREACH(vap, &ic->ic_vaps, iv_next) {
+ struct ieee80211_node *ni;
+ struct mbuf *mcopy;
+
+ /* NB: could check for IFF_UP but this is cheaper */
+ if (vap->iv_state == IEEE80211_S_INIT)
+ continue;
+ /*
+ * WDS vap's only receive directed traffic from the
+ * station at the ``far end''. That traffic should
+ * be passed through the AP vap the station is associated
+ * to--so don't spam them with mcast frames.
+ */
+ if (vap->iv_opmode == IEEE80211_M_WDS)
+ continue;
+ if (TAILQ_NEXT(vap, iv_next) != NULL) {
+ /*
+ * Packet contents are changed by ieee80211_decap
+ * so do a deep copy of the packet.
+ */
+ mcopy = m_dup(m, M_DONTWAIT);
+ if (mcopy == NULL) {
+ /* XXX stat+msg */
+ continue;
+ }
+ } else {
+ mcopy = m;
+ m = NULL;
+ }
+ ni = ieee80211_ref_node(vap->iv_bss);
+ type = ieee80211_input(ni, mcopy, rssi, nf);
+ ieee80211_free_node(ni);
+ }
+ if (m != NULL) /* no vaps, reclaim mbuf */
+ m_freem(m);
+ return type;
+}
+
+/*
+ * This function reassembles fragments.
+ *
+ * XXX should handle 3 concurrent reassemblies per-spec.
+ */
+struct mbuf *
+ieee80211_defrag(struct ieee80211_node *ni, struct mbuf *m, int hdrspace)
+{
+ struct ieee80211vap *vap = ni->ni_vap;
+ struct ieee80211_frame *wh = mtod(m, struct ieee80211_frame *);
+ struct ieee80211_frame *lwh;
+ uint16_t rxseq;
+ uint8_t fragno;
+ uint8_t more_frag = wh->i_fc[1] & IEEE80211_FC1_MORE_FRAG;
+ struct mbuf *mfrag;
+
+ KASSERT(!IEEE80211_IS_MULTICAST(wh->i_addr1), ("multicast fragm?"));
+
+ rxseq = le16toh(*(uint16_t *)wh->i_seq);
+ fragno = rxseq & IEEE80211_SEQ_FRAG_MASK;
+
+ /* Quick way out, if there's nothing to defragment */
+ if (!more_frag && fragno == 0 && ni->ni_rxfrag[0] == NULL)
+ return m;
+
+ /*
+ * Remove frag to insure it doesn't get reaped by timer.
+ */
+ if (ni->ni_table == NULL) {
+ /*
+ * Should never happen. If the node is orphaned (not in
+ * the table) then input packets should not reach here.
+ * Otherwise, a concurrent request that yanks the table
+ * should be blocked by other interlocking and/or by first
+ * shutting the driver down. Regardless, be defensive
+ * here and just bail
+ */
+ /* XXX need msg+stat */
+ m_freem(m);
+ return NULL;
+ }
+ IEEE80211_NODE_LOCK(ni->ni_table);
+ mfrag = ni->ni_rxfrag[0];
+ ni->ni_rxfrag[0] = NULL;
+ IEEE80211_NODE_UNLOCK(ni->ni_table);
+
+ /*
+ * Validate new fragment is in order and
+ * related to the previous ones.
+ */
+ if (mfrag != NULL) {
+ uint16_t last_rxseq;
+
+ lwh = mtod(mfrag, struct ieee80211_frame *);
+ last_rxseq = le16toh(*(uint16_t *)lwh->i_seq);
+ /* NB: check seq # and frag together */
+ if (rxseq != last_rxseq+1 ||
+ !IEEE80211_ADDR_EQ(wh->i_addr1, lwh->i_addr1) ||
+ !IEEE80211_ADDR_EQ(wh->i_addr2, lwh->i_addr2)) {
+ /*
+ * Unrelated fragment or no space for it,
+ * clear current fragments.
+ */
+ m_freem(mfrag);
+ mfrag = NULL;
+ }
+ }
+
+ if (mfrag == NULL) {
+ if (fragno != 0) { /* !first fragment, discard */
+ vap->iv_stats.is_rx_defrag++;
+ IEEE80211_NODE_STAT(ni, rx_defrag);
+ m_freem(m);
+ return NULL;
+ }
+ mfrag = m;
+ } else { /* concatenate */
+ m_adj(m, hdrspace); /* strip header */
+ m_cat(mfrag, m);
+ /* NB: m_cat doesn't update the packet header */
+ mfrag->m_pkthdr.len += m->m_pkthdr.len;
+ /* track last seqnum and fragno */
+ lwh = mtod(mfrag, struct ieee80211_frame *);
+ *(uint16_t *) lwh->i_seq = *(uint16_t *) wh->i_seq;
+ }
+ if (more_frag) { /* more to come, save */
+ ni->ni_rxfragstamp = ticks;
+ ni->ni_rxfrag[0] = mfrag;
+ mfrag = NULL;
+ }
+ return mfrag;
+}
+
+void
+ieee80211_deliver_data(struct ieee80211vap *vap,
+ struct ieee80211_node *ni, struct mbuf *m)
+{
+ struct ether_header *eh = mtod(m, struct ether_header *);
+ struct ifnet *ifp = vap->iv_ifp;
+
+ /* clear driver/net80211 flags before passing up */
+ m->m_flags &= ~(M_80211_RX | M_MCAST | M_BCAST);
+
+ /* NB: see hostap_deliver_data, this path doesn't handle hostap */
+ KASSERT(vap->iv_opmode != IEEE80211_M_HOSTAP, ("gack, hostap"));
+ /*
+ * Do accounting.
+ */
+ ifp->if_ipackets++;
+ IEEE80211_NODE_STAT(ni, rx_data);
+ IEEE80211_NODE_STAT_ADD(ni, rx_bytes, m->m_pkthdr.len);
+ if (ETHER_IS_MULTICAST(eh->ether_dhost)) {
+ m->m_flags |= M_MCAST; /* XXX M_BCAST? */
+ IEEE80211_NODE_STAT(ni, rx_mcast);
+ } else
+ IEEE80211_NODE_STAT(ni, rx_ucast);
+ m->m_pkthdr.rcvif = ifp;
+
+ if (ni->ni_vlan != 0) {
+ /* attach vlan tag */
+ m->m_pkthdr.ether_vtag = ni->ni_vlan;
+ m->m_flags |= M_VLANTAG;
+ }
+ ifp->if_input(ifp, m);
+}
+
+struct mbuf *
+ieee80211_decap(struct ieee80211vap *vap, struct mbuf *m, int hdrlen)
+{
+ struct ieee80211_qosframe_addr4 wh;
+ struct ether_header *eh;
+ struct llc *llc;
+
+ KASSERT(hdrlen <= sizeof(wh),
+ ("hdrlen %d > max %zd", hdrlen, sizeof(wh)));
+
+ if (m->m_len < hdrlen + sizeof(*llc) &&
+ (m = m_pullup(m, hdrlen + sizeof(*llc))) == NULL) {
+ vap->iv_stats.is_rx_tooshort++;
+ /* XXX msg */
+ return NULL;
+ }
+ memcpy(&wh, mtod(m, caddr_t), hdrlen);
+ llc = (struct llc *)(mtod(m, caddr_t) + hdrlen);
+ if (llc->llc_dsap == LLC_SNAP_LSAP && llc->llc_ssap == LLC_SNAP_LSAP &&
+ llc->llc_control == LLC_UI && llc->llc_snap.org_code[0] == 0 &&
+ llc->llc_snap.org_code[1] == 0 && llc->llc_snap.org_code[2] == 0 &&
+ /* NB: preserve AppleTalk frames that have a native SNAP hdr */
+ !(llc->llc_snap.ether_type == htons(ETHERTYPE_AARP) ||
+ llc->llc_snap.ether_type == htons(ETHERTYPE_IPX))) {
+ m_adj(m, hdrlen + sizeof(struct llc) - sizeof(*eh));
+ llc = NULL;
+ } else {
+ m_adj(m, hdrlen - sizeof(*eh));
+ }
+ eh = mtod(m, struct ether_header *);
+ switch (wh.i_fc[1] & IEEE80211_FC1_DIR_MASK) {
+ case IEEE80211_FC1_DIR_NODS:
+ IEEE80211_ADDR_COPY(eh->ether_dhost, wh.i_addr1);
+ IEEE80211_ADDR_COPY(eh->ether_shost, wh.i_addr2);
+ break;
+ case IEEE80211_FC1_DIR_TODS:
+ IEEE80211_ADDR_COPY(eh->ether_dhost, wh.i_addr3);
+ IEEE80211_ADDR_COPY(eh->ether_shost, wh.i_addr2);
+ break;
+ case IEEE80211_FC1_DIR_FROMDS:
+ IEEE80211_ADDR_COPY(eh->ether_dhost, wh.i_addr1);
+ IEEE80211_ADDR_COPY(eh->ether_shost, wh.i_addr3);
+ break;
+ case IEEE80211_FC1_DIR_DSTODS:
+ IEEE80211_ADDR_COPY(eh->ether_dhost, wh.i_addr3);
+ IEEE80211_ADDR_COPY(eh->ether_shost, wh.i_addr4);
+ break;
+ }
+#ifdef ALIGNED_POINTER
+ if (!ALIGNED_POINTER(mtod(m, caddr_t) + sizeof(*eh), uint32_t)) {
+ m = ieee80211_realign(vap, m, sizeof(*eh));
+ if (m == NULL)
+ return NULL;
+ }
+#endif /* ALIGNED_POINTER */
+ if (llc != NULL) {
+ eh = mtod(m, struct ether_header *);
+ eh->ether_type = htons(m->m_pkthdr.len - sizeof(*eh));
+ }
+ return m;
+}
+
+/*
+ * Decap a frame encapsulated in a fast-frame/A-MSDU.
+ */
+struct mbuf *
+ieee80211_decap1(struct mbuf *m, int *framelen)
+{
+#define FF_LLC_SIZE (sizeof(struct ether_header) + sizeof(struct llc))
+ struct ether_header *eh;
+ struct llc *llc;
+
+ /*
+ * The frame has an 802.3 header followed by an 802.2
+ * LLC header. The encapsulated frame length is in the
+ * first header type field; save that and overwrite it
+ * with the true type field found in the second. Then
+ * copy the 802.3 header up to where it belongs and
+ * adjust the mbuf contents to remove the void.
+ */
+ if (m->m_len < FF_LLC_SIZE && (m = m_pullup(m, FF_LLC_SIZE)) == NULL)
+ return NULL;
+ eh = mtod(m, struct ether_header *); /* 802.3 header is first */
+ llc = (struct llc *)&eh[1]; /* 802.2 header follows */
+ *framelen = ntohs(eh->ether_type) /* encap'd frame size */
+ + sizeof(struct ether_header) - sizeof(struct llc);
+ eh->ether_type = llc->llc_un.type_snap.ether_type;
+ ovbcopy(eh, mtod(m, uint8_t *) + sizeof(struct llc),
+ sizeof(struct ether_header));
+ m_adj(m, sizeof(struct llc));
+ return m;
+#undef FF_LLC_SIZE
+}
+
+/*
+ * Install received rate set information in the node's state block.
+ */
+int
+ieee80211_setup_rates(struct ieee80211_node *ni,
+ const uint8_t *rates, const uint8_t *xrates, int flags)
+{
+ struct ieee80211vap *vap = ni->ni_vap;
+ struct ieee80211_rateset *rs = &ni->ni_rates;
+
+ memset(rs, 0, sizeof(*rs));
+ rs->rs_nrates = rates[1];
+ memcpy(rs->rs_rates, rates + 2, rs->rs_nrates);
+ if (xrates != NULL) {
+ uint8_t nxrates;
+ /*
+ * Tack on 11g extended supported rate element.
+ */
+ nxrates = xrates[1];
+ if (rs->rs_nrates + nxrates > IEEE80211_RATE_MAXSIZE) {
+ nxrates = IEEE80211_RATE_MAXSIZE - rs->rs_nrates;
+ IEEE80211_NOTE(vap, IEEE80211_MSG_XRATE, ni,
+ "extended rate set too large; only using "
+ "%u of %u rates", nxrates, xrates[1]);
+ vap->iv_stats.is_rx_rstoobig++;
+ }
+ memcpy(rs->rs_rates + rs->rs_nrates, xrates+2, nxrates);
+ rs->rs_nrates += nxrates;
+ }
+ return ieee80211_fix_rate(ni, rs, flags);
+}
+
+/*
+ * Send a management frame error response to the specified
+ * station. If ni is associated with the station then use
+ * it; otherwise allocate a temporary node suitable for
+ * transmitting the frame and then free the reference so
+ * it will go away as soon as the frame has been transmitted.
+ */
+void
+ieee80211_send_error(struct ieee80211_node *ni,
+ const uint8_t mac[IEEE80211_ADDR_LEN], int subtype, int arg)
+{
+ struct ieee80211vap *vap = ni->ni_vap;
+ int istmp;
+
+ if (ni == vap->iv_bss) {
+ if (vap->iv_state != IEEE80211_S_RUN) {
+ /*
+ * XXX hack until we get rid of this routine.
+ * We can be called prior to the vap reaching
+ * run state under certain conditions in which
+ * case iv_bss->ni_chan will not be setup.
+ * Check for this explicitly and and just ignore
+ * the request.
+ */
+ return;
+ }
+ ni = ieee80211_tmp_node(vap, mac);
+ if (ni == NULL) {
+ /* XXX msg */
+ return;
+ }
+ istmp = 1;
+ } else
+ istmp = 0;
+ IEEE80211_SEND_MGMT(ni, subtype, arg);
+ if (istmp)
+ ieee80211_free_node(ni);
+}
+
+int
+ieee80211_alloc_challenge(struct ieee80211_node *ni)
+{
+ if (ni->ni_challenge == NULL)
+ ni->ni_challenge = (uint32_t *) malloc(IEEE80211_CHALLENGE_LEN,
+ M_80211_NODE, M_NOWAIT);
+ if (ni->ni_challenge == NULL) {
+ IEEE80211_NOTE(ni->ni_vap,
+ IEEE80211_MSG_DEBUG | IEEE80211_MSG_AUTH, ni,
+ "%s", "shared key challenge alloc failed");
+ /* XXX statistic */
+ }
+ return (ni->ni_challenge != NULL);
+}
+
+/*
+ * Parse a Beacon or ProbeResponse frame and return the
+ * useful information in an ieee80211_scanparams structure.
+ * Status is set to 0 if no problems were found; otherwise
+ * a bitmask of IEEE80211_BPARSE_* items is returned that
+ * describes the problems detected.
+ */
+int
+ieee80211_parse_beacon(struct ieee80211_node *ni, struct mbuf *m,
+ struct ieee80211_scanparams *scan)
+{
+ struct ieee80211vap *vap = ni->ni_vap;
+ struct ieee80211com *ic = ni->ni_ic;
+ struct ieee80211_frame *wh;
+ uint8_t *frm, *efrm;
+
+ wh = mtod(m, struct ieee80211_frame *);
+ frm = (uint8_t *)&wh[1];
+ efrm = mtod(m, uint8_t *) + m->m_len;
+ scan->status = 0;
+ /*
+ * beacon/probe response frame format
+ * [8] time stamp
+ * [2] beacon interval
+ * [2] capability information
+ * [tlv] ssid
+ * [tlv] supported rates
+ * [tlv] country information
+ * [tlv] channel switch announcement (CSA)
+ * [tlv] parameter set (FH/DS)
+ * [tlv] erp information
+ * [tlv] extended supported rates
+ * [tlv] WME
+ * [tlv] WPA or RSN
+ * [tlv] HT capabilities
+ * [tlv] HT information
+ * [tlv] Atheros capabilities
+ * [tlv] Mesh ID
+ * [tlv] Mesh Configuration
+ */
+ IEEE80211_VERIFY_LENGTH(efrm - frm, 12,
+ return (scan->status = IEEE80211_BPARSE_BADIELEN));
+ memset(scan, 0, sizeof(*scan));
+ scan->tstamp = frm; frm += 8;
+ scan->bintval = le16toh(*(uint16_t *)frm); frm += 2;
+ scan->capinfo = le16toh(*(uint16_t *)frm); frm += 2;
+ scan->bchan = ieee80211_chan2ieee(ic, ic->ic_curchan);
+ scan->chan = scan->bchan;
+ scan->ies = frm;
+ scan->ies_len = efrm - frm;
+
+ while (efrm - frm > 1) {
+ IEEE80211_VERIFY_LENGTH(efrm - frm, frm[1] + 2,
+ return (scan->status = IEEE80211_BPARSE_BADIELEN));
+ switch (*frm) {
+ case IEEE80211_ELEMID_SSID:
+ scan->ssid = frm;
+ break;
+ case IEEE80211_ELEMID_RATES:
+ scan->rates = frm;
+ break;
+ case IEEE80211_ELEMID_COUNTRY:
+ scan->country = frm;
+ break;
+ case IEEE80211_ELEMID_CSA:
+ scan->csa = frm;
+ break;
+ case IEEE80211_ELEMID_FHPARMS:
+ if (ic->ic_phytype == IEEE80211_T_FH) {
+ scan->fhdwell = LE_READ_2(&frm[2]);
+ scan->chan = IEEE80211_FH_CHAN(frm[4], frm[5]);
+ scan->fhindex = frm[6];
+ }
+ break;
+ case IEEE80211_ELEMID_DSPARMS:
+ /*
+ * XXX hack this since depending on phytype
+ * is problematic for multi-mode devices.
+ */
+ if (ic->ic_phytype != IEEE80211_T_FH)
+ scan->chan = frm[2];
+ break;
+ case IEEE80211_ELEMID_TIM:
+ /* XXX ATIM? */
+ scan->tim = frm;
+ scan->timoff = frm - mtod(m, uint8_t *);
+ break;
+ case IEEE80211_ELEMID_IBSSPARMS:
+ case IEEE80211_ELEMID_CFPARMS:
+ case IEEE80211_ELEMID_PWRCNSTR:
+ /* NB: avoid debugging complaints */
+ break;
+ case IEEE80211_ELEMID_XRATES:
+ scan->xrates = frm;
+ break;
+ case IEEE80211_ELEMID_ERP:
+ if (frm[1] != 1) {
+ IEEE80211_DISCARD_IE(vap,
+ IEEE80211_MSG_ELEMID, wh, "ERP",
+ "bad len %u", frm[1]);
+ vap->iv_stats.is_rx_elem_toobig++;
+ break;
+ }
+ scan->erp = frm[2] | 0x100;
+ break;
+ case IEEE80211_ELEMID_HTCAP:
+ scan->htcap = frm;
+ break;
+ case IEEE80211_ELEMID_RSN:
+ scan->rsn = frm;
+ break;
+ case IEEE80211_ELEMID_HTINFO:
+ scan->htinfo = frm;
+ break;
+#ifdef IEEE80211_SUPPORT_MESH
+ case IEEE80211_ELEMID_MESHID:
+ scan->meshid = frm;
+ break;
+ case IEEE80211_ELEMID_MESHCONF:
+ scan->meshconf = frm;
+ break;
+#endif
+ case IEEE80211_ELEMID_VENDOR:
+ if (iswpaoui(frm))
+ scan->wpa = frm;
+ else if (iswmeparam(frm) || iswmeinfo(frm))
+ scan->wme = frm;
+#ifdef IEEE80211_SUPPORT_SUPERG
+ else if (isatherosoui(frm))
+ scan->ath = frm;
+#endif
+#ifdef IEEE80211_SUPPORT_TDMA
+ else if (istdmaoui(frm))
+ scan->tdma = frm;
+#endif
+ else if (vap->iv_flags_ht & IEEE80211_FHT_HTCOMPAT) {
+ /*
+ * Accept pre-draft HT ie's if the
+ * standard ones have not been seen.
+ */
+ if (ishtcapoui(frm)) {
+ if (scan->htcap == NULL)
+ scan->htcap = frm;
+ } else if (ishtinfooui(frm)) {
+ if (scan->htinfo == NULL)
+ scan->htcap = frm;
+ }
+ }
+ break;
+ default:
+ IEEE80211_DISCARD_IE(vap, IEEE80211_MSG_ELEMID,
+ wh, "unhandled",
+ "id %u, len %u", *frm, frm[1]);
+ vap->iv_stats.is_rx_elem_unknown++;
+ break;
+ }
+ frm += frm[1] + 2;
+ }
+ IEEE80211_VERIFY_ELEMENT(scan->rates, IEEE80211_RATE_MAXSIZE,
+ scan->status |= IEEE80211_BPARSE_RATES_INVALID);
+ if (scan->rates != NULL && scan->xrates != NULL) {
+ /*
+ * NB: don't process XRATES if RATES is missing. This
+ * avoids a potential null ptr deref and should be ok
+ * as the return code will already note RATES is missing
+ * (so callers shouldn't otherwise process the frame).
+ */
+ IEEE80211_VERIFY_ELEMENT(scan->xrates,
+ IEEE80211_RATE_MAXSIZE - scan->rates[1],
+ scan->status |= IEEE80211_BPARSE_XRATES_INVALID);
+ }
+ IEEE80211_VERIFY_ELEMENT(scan->ssid, IEEE80211_NWID_LEN,
+ scan->status |= IEEE80211_BPARSE_SSID_INVALID);
+ if (scan->chan != scan->bchan && ic->ic_phytype != IEEE80211_T_FH) {
+ /*
+ * Frame was received on a channel different from the
+ * one indicated in the DS params element id;
+ * silently discard it.
+ *
+ * NB: this can happen due to signal leakage.
+ * But we should take it for FH phy because
+ * the rssi value should be correct even for
+ * different hop pattern in FH.
+ */
+ IEEE80211_DISCARD(vap,
+ IEEE80211_MSG_ELEMID | IEEE80211_MSG_INPUT,
+ wh, NULL, "for off-channel %u", scan->chan);
+ vap->iv_stats.is_rx_chanmismatch++;
+ scan->status |= IEEE80211_BPARSE_OFFCHAN;
+ }
+ if (!(IEEE80211_BINTVAL_MIN <= scan->bintval &&
+ scan->bintval <= IEEE80211_BINTVAL_MAX)) {
+ IEEE80211_DISCARD(vap,
+ IEEE80211_MSG_ELEMID | IEEE80211_MSG_INPUT,
+ wh, NULL, "bogus beacon interval", scan->bintval);
+ vap->iv_stats.is_rx_badbintval++;
+ scan->status |= IEEE80211_BPARSE_BINTVAL_INVALID;
+ }
+ if (scan->country != NULL) {
+ /*
+ * Validate we have at least enough data to extract
+ * the country code. Not sure if we should return an
+ * error instead of discarding the IE; consider this
+ * being lenient as we don't depend on the data for
+ * correct operation.
+ */
+ IEEE80211_VERIFY_LENGTH(scan->country[1], 3 * sizeof(uint8_t),
+ scan->country = NULL);
+ }
+ if (scan->csa != NULL) {
+ /*
+ * Validate Channel Switch Announcement; this must
+ * be the correct length or we toss the frame.
+ */
+ IEEE80211_VERIFY_LENGTH(scan->csa[1], 3 * sizeof(uint8_t),
+ scan->status |= IEEE80211_BPARSE_CSA_INVALID);
+ }
+ /*
+ * Process HT ie's. This is complicated by our
+ * accepting both the standard ie's and the pre-draft
+ * vendor OUI ie's that some vendors still use/require.
+ */
+ if (scan->htcap != NULL) {
+ IEEE80211_VERIFY_LENGTH(scan->htcap[1],
+ scan->htcap[0] == IEEE80211_ELEMID_VENDOR ?
+ 4 + sizeof(struct ieee80211_ie_htcap)-2 :
+ sizeof(struct ieee80211_ie_htcap)-2,
+ scan->htcap = NULL);
+ }
+ if (scan->htinfo != NULL) {
+ IEEE80211_VERIFY_LENGTH(scan->htinfo[1],
+ scan->htinfo[0] == IEEE80211_ELEMID_VENDOR ?
+ 4 + sizeof(struct ieee80211_ie_htinfo)-2 :
+ sizeof(struct ieee80211_ie_htinfo)-2,
+ scan->htinfo = NULL);
+ }
+ return scan->status;
+}
+
+/*
+ * Parse an Action frame. Return 0 on success, non-zero on failure.
+ */
+int
+ieee80211_parse_action(struct ieee80211_node *ni, struct mbuf *m)
+{
+ struct ieee80211vap *vap = ni->ni_vap;
+ const struct ieee80211_action *ia;
+ struct ieee80211_frame *wh;
+ uint8_t *frm, *efrm;
+
+ /*
+ * action frame format:
+ * [1] category
+ * [1] action
+ * [tlv] parameters
+ */
+ wh = mtod(m, struct ieee80211_frame *);
+ frm = (u_int8_t *)&wh[1];
+ efrm = mtod(m, u_int8_t *) + m->m_len;
+ IEEE80211_VERIFY_LENGTH(efrm - frm,
+ sizeof(struct ieee80211_action), return EINVAL);
+ ia = (const struct ieee80211_action *) frm;
+
+ vap->iv_stats.is_rx_action++;
+ IEEE80211_NODE_STAT(ni, rx_action);
+
+ /* verify frame payloads but defer processing */
+ /* XXX maybe push this to method */
+ switch (ia->ia_category) {
+ case IEEE80211_ACTION_CAT_BA:
+ switch (ia->ia_action) {
+ case IEEE80211_ACTION_BA_ADDBA_REQUEST:
+ IEEE80211_VERIFY_LENGTH(efrm - frm,
+ sizeof(struct ieee80211_action_ba_addbarequest),
+ return EINVAL);
+ break;
+ case IEEE80211_ACTION_BA_ADDBA_RESPONSE:
+ IEEE80211_VERIFY_LENGTH(efrm - frm,
+ sizeof(struct ieee80211_action_ba_addbaresponse),
+ return EINVAL);
+ break;
+ case IEEE80211_ACTION_BA_DELBA:
+ IEEE80211_VERIFY_LENGTH(efrm - frm,
+ sizeof(struct ieee80211_action_ba_delba),
+ return EINVAL);
+ break;
+ }
+ break;
+ case IEEE80211_ACTION_CAT_HT:
+ switch (ia->ia_action) {
+ case IEEE80211_ACTION_HT_TXCHWIDTH:
+ IEEE80211_VERIFY_LENGTH(efrm - frm,
+ sizeof(struct ieee80211_action_ht_txchwidth),
+ return EINVAL);
+ break;
+ case IEEE80211_ACTION_HT_MIMOPWRSAVE:
+ IEEE80211_VERIFY_LENGTH(efrm - frm,
+ sizeof(struct ieee80211_action_ht_mimopowersave),
+ return EINVAL);
+ break;
+ }
+ break;
+ }
+ return 0;
+}
+
+#ifdef IEEE80211_DEBUG
+/*
+ * Debugging support.
+ */
+void
+ieee80211_ssid_mismatch(struct ieee80211vap *vap, const char *tag,
+ uint8_t mac[IEEE80211_ADDR_LEN], uint8_t *ssid)
+{
+ printf("[%s] discard %s frame, ssid mismatch: ",
+ ether_sprintf(mac), tag);
+ ieee80211_print_essid(ssid + 2, ssid[1]);
+ printf("\n");
+}
+
+/*
+ * Return the bssid of a frame.
+ */
+static const uint8_t *
+ieee80211_getbssid(const struct ieee80211vap *vap,
+ const struct ieee80211_frame *wh)
+{
+ if (vap->iv_opmode == IEEE80211_M_STA)
+ return wh->i_addr2;
+ if ((wh->i_fc[1] & IEEE80211_FC1_DIR_MASK) != IEEE80211_FC1_DIR_NODS)
+ return wh->i_addr1;
+ if ((wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK) == IEEE80211_FC0_SUBTYPE_PS_POLL)
+ return wh->i_addr1;
+ return wh->i_addr3;
+}
+
+#include <rtems/freebsd/machine/stdarg.h>
+
+void
+ieee80211_note(const struct ieee80211vap *vap, const char *fmt, ...)
+{
+ char buf[128]; /* XXX */
+ va_list ap;
+
+ va_start(ap, fmt);
+ vsnprintf(buf, sizeof(buf), fmt, ap);
+ va_end(ap);
+
+ if_printf(vap->iv_ifp, "%s", buf); /* NB: no \n */
+}
+
+void
+ieee80211_note_frame(const struct ieee80211vap *vap,
+ const struct ieee80211_frame *wh,
+ const char *fmt, ...)
+{
+ char buf[128]; /* XXX */
+ va_list ap;
+
+ va_start(ap, fmt);
+ vsnprintf(buf, sizeof(buf), fmt, ap);
+ va_end(ap);
+ if_printf(vap->iv_ifp, "[%s] %s\n",
+ ether_sprintf(ieee80211_getbssid(vap, wh)), buf);
+}
+
+void
+ieee80211_note_mac(const struct ieee80211vap *vap,
+ const uint8_t mac[IEEE80211_ADDR_LEN],
+ const char *fmt, ...)
+{
+ char buf[128]; /* XXX */
+ va_list ap;
+
+ va_start(ap, fmt);
+ vsnprintf(buf, sizeof(buf), fmt, ap);
+ va_end(ap);
+ if_printf(vap->iv_ifp, "[%s] %s\n", ether_sprintf(mac), buf);
+}
+
+void
+ieee80211_discard_frame(const struct ieee80211vap *vap,
+ const struct ieee80211_frame *wh,
+ const char *type, const char *fmt, ...)
+{
+ va_list ap;
+
+ if_printf(vap->iv_ifp, "[%s] discard ",
+ ether_sprintf(ieee80211_getbssid(vap, wh)));
+ if (type == NULL) {
+ printf("%s frame, ", ieee80211_mgt_subtype_name[
+ (wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK) >>
+ IEEE80211_FC0_SUBTYPE_SHIFT]);
+ } else
+ printf("%s frame, ", type);
+ va_start(ap, fmt);
+ vprintf(fmt, ap);
+ va_end(ap);
+ printf("\n");
+}
+
+void
+ieee80211_discard_ie(const struct ieee80211vap *vap,
+ const struct ieee80211_frame *wh,
+ const char *type, const char *fmt, ...)
+{
+ va_list ap;
+
+ if_printf(vap->iv_ifp, "[%s] discard ",
+ ether_sprintf(ieee80211_getbssid(vap, wh)));
+ if (type != NULL)
+ printf("%s information element, ", type);
+ else
+ printf("information element, ");
+ va_start(ap, fmt);
+ vprintf(fmt, ap);
+ va_end(ap);
+ printf("\n");
+}
+
+void
+ieee80211_discard_mac(const struct ieee80211vap *vap,
+ const uint8_t mac[IEEE80211_ADDR_LEN],
+ const char *type, const char *fmt, ...)
+{
+ va_list ap;
+
+ if_printf(vap->iv_ifp, "[%s] discard ", ether_sprintf(mac));
+ if (type != NULL)
+ printf("%s frame, ", type);
+ else
+ printf("frame, ");
+ va_start(ap, fmt);
+ vprintf(fmt, ap);
+ va_end(ap);
+ printf("\n");
+}
+#endif /* IEEE80211_DEBUG */
diff --git a/rtems/freebsd/net80211/ieee80211_input.h b/rtems/freebsd/net80211/ieee80211_input.h
new file mode 100644
index 00000000..778badb6
--- /dev/null
+++ b/rtems/freebsd/net80211/ieee80211_input.h
@@ -0,0 +1,160 @@
+/*-
+ * Copyright (c) 2007-2009 Sam Leffler, Errno Consulting
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+#ifndef _NET80211_IEEE80211_INPUT_HH_
+#define _NET80211_IEEE80211_INPUT_HH_
+
+/* Verify the existence and length of __elem or get out. */
+#define IEEE80211_VERIFY_ELEMENT(__elem, __maxlen, _action) do { \
+ if ((__elem) == NULL) { \
+ IEEE80211_DISCARD(vap, IEEE80211_MSG_ELEMID, \
+ wh, NULL, "%s", "no " #__elem ); \
+ vap->iv_stats.is_rx_elem_missing++; \
+ _action; \
+ } else if ((__elem)[1] > (__maxlen)) { \
+ IEEE80211_DISCARD(vap, IEEE80211_MSG_ELEMID, \
+ wh, NULL, "bad " #__elem " len %d", (__elem)[1]); \
+ vap->iv_stats.is_rx_elem_toobig++; \
+ _action; \
+ } \
+} while (0)
+
+#define IEEE80211_VERIFY_LENGTH(_len, _minlen, _action) do { \
+ if ((_len) < (_minlen)) { \
+ IEEE80211_DISCARD(vap, IEEE80211_MSG_ELEMID, \
+ wh, NULL, "ie too short, got %d, expected %d", \
+ (_len), (_minlen)); \
+ vap->iv_stats.is_rx_elem_toosmall++; \
+ _action; \
+ } \
+} while (0)
+
+#ifdef IEEE80211_DEBUG
+void ieee80211_ssid_mismatch(struct ieee80211vap *, const char *tag,
+ uint8_t mac[IEEE80211_ADDR_LEN], uint8_t *ssid);
+
+#define IEEE80211_VERIFY_SSID(_ni, _ssid, _action) do { \
+ if ((_ssid)[1] != 0 && \
+ ((_ssid)[1] != (_ni)->ni_esslen || \
+ memcmp((_ssid) + 2, (_ni)->ni_essid, (_ssid)[1]) != 0)) { \
+ if (ieee80211_msg_input(vap)) \
+ ieee80211_ssid_mismatch(vap, \
+ ieee80211_mgt_subtype_name[subtype >> \
+ IEEE80211_FC0_SUBTYPE_SHIFT], \
+ wh->i_addr2, _ssid); \
+ vap->iv_stats.is_rx_ssidmismatch++; \
+ _action; \
+ } \
+} while (0)
+#else /* !IEEE80211_DEBUG */
+#define IEEE80211_VERIFY_SSID(_ni, _ssid, _action) do { \
+ if ((_ssid)[1] != 0 && \
+ ((_ssid)[1] != (_ni)->ni_esslen || \
+ memcmp((_ssid) + 2, (_ni)->ni_essid, (_ssid)[1]) != 0)) { \
+ vap->iv_stats.is_rx_ssidmismatch++; \
+ _action; \
+ } \
+} while (0)
+#endif /* !IEEE80211_DEBUG */
+
+/* unalligned little endian access */
+#define LE_READ_2(p) \
+ ((uint16_t) \
+ ((((const uint8_t *)(p))[0] ) | \
+ (((const uint8_t *)(p))[1] << 8)))
+#define LE_READ_4(p) \
+ ((uint32_t) \
+ ((((const uint8_t *)(p))[0] ) | \
+ (((const uint8_t *)(p))[1] << 8) | \
+ (((const uint8_t *)(p))[2] << 16) | \
+ (((const uint8_t *)(p))[3] << 24)))
+
+static __inline int
+iswpaoui(const uint8_t *frm)
+{
+ return frm[1] > 3 && LE_READ_4(frm+2) == ((WPA_OUI_TYPE<<24)|WPA_OUI);
+}
+
+static __inline int
+iswmeoui(const uint8_t *frm)
+{
+ return frm[1] > 3 && LE_READ_4(frm+2) == ((WME_OUI_TYPE<<24)|WME_OUI);
+}
+
+static __inline int
+iswmeparam(const uint8_t *frm)
+{
+ return frm[1] > 5 && LE_READ_4(frm+2) == ((WME_OUI_TYPE<<24)|WME_OUI) &&
+ frm[6] == WME_PARAM_OUI_SUBTYPE;
+}
+
+static __inline int
+iswmeinfo(const uint8_t *frm)
+{
+ return frm[1] > 5 && LE_READ_4(frm+2) == ((WME_OUI_TYPE<<24)|WME_OUI) &&
+ frm[6] == WME_INFO_OUI_SUBTYPE;
+}
+
+static __inline int
+isatherosoui(const uint8_t *frm)
+{
+ return frm[1] > 3 && LE_READ_4(frm+2) == ((ATH_OUI_TYPE<<24)|ATH_OUI);
+}
+
+static __inline int
+istdmaoui(const uint8_t *frm)
+{
+ return frm[1] > 3 && LE_READ_4(frm+2) == ((TDMA_OUI_TYPE<<24)|TDMA_OUI);
+}
+
+static __inline int
+ishtcapoui(const uint8_t *frm)
+{
+ return frm[1] > 3 && LE_READ_4(frm+2) == ((BCM_OUI_HTCAP<<24)|BCM_OUI);
+}
+
+static __inline int
+ishtinfooui(const uint8_t *frm)
+{
+ return frm[1] > 3 && LE_READ_4(frm+2) == ((BCM_OUI_HTINFO<<24)|BCM_OUI);
+}
+
+void ieee80211_deliver_data(struct ieee80211vap *,
+ struct ieee80211_node *, struct mbuf *);
+struct mbuf *ieee80211_defrag(struct ieee80211_node *,
+ struct mbuf *, int);
+struct mbuf *ieee80211_realign(struct ieee80211vap *, struct mbuf *, size_t);
+struct mbuf *ieee80211_decap(struct ieee80211vap *, struct mbuf *, int);
+struct mbuf *ieee80211_decap1(struct mbuf *, int *);
+int ieee80211_setup_rates(struct ieee80211_node *ni,
+ const uint8_t *rates, const uint8_t *xrates, int flags);
+void ieee80211_send_error(struct ieee80211_node *,
+ const uint8_t mac[IEEE80211_ADDR_LEN], int subtype, int arg);
+int ieee80211_alloc_challenge(struct ieee80211_node *);
+int ieee80211_parse_beacon(struct ieee80211_node *, struct mbuf *,
+ struct ieee80211_scanparams *);
+int ieee80211_parse_action(struct ieee80211_node *, struct mbuf *);
+#endif /* _NET80211_IEEE80211_INPUT_HH_ */
diff --git a/rtems/freebsd/net80211/ieee80211_ioctl.c b/rtems/freebsd/net80211/ieee80211_ioctl.c
new file mode 100644
index 00000000..5ec29533
--- /dev/null
+++ b/rtems/freebsd/net80211/ieee80211_ioctl.c
@@ -0,0 +1,3349 @@
+#include <rtems/freebsd/machine/rtems-bsd-config.h>
+
+/*-
+ * Copyright (c) 2001 Atsushi Onoe
+ * Copyright (c) 2002-2009 Sam Leffler, Errno Consulting
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <rtems/freebsd/sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+/*
+ * IEEE 802.11 ioctl support (FreeBSD-specific)
+ */
+
+#include <rtems/freebsd/local/opt_inet.h>
+#include <rtems/freebsd/local/opt_ipx.h>
+#include <rtems/freebsd/local/opt_wlan.h>
+
+#include <rtems/freebsd/sys/endian.h>
+#include <rtems/freebsd/sys/param.h>
+#include <rtems/freebsd/sys/kernel.h>
+#include <rtems/freebsd/sys/priv.h>
+#include <rtems/freebsd/sys/socket.h>
+#include <rtems/freebsd/sys/sockio.h>
+#include <rtems/freebsd/sys/systm.h>
+
+#include <rtems/freebsd/net/if.h>
+#include <rtems/freebsd/net/if_dl.h>
+#include <rtems/freebsd/net/if_media.h>
+#include <rtems/freebsd/net/ethernet.h>
+
+#ifdef INET
+#include <rtems/freebsd/netinet/in.h>
+#include <rtems/freebsd/netinet/if_ether.h>
+#endif
+
+#ifdef IPX
+#include <rtems/freebsd/netipx/ipx.h>
+#include <rtems/freebsd/netipx/ipx_if.h>
+#endif
+
+#include <rtems/freebsd/net80211/ieee80211_var.h>
+#include <rtems/freebsd/net80211/ieee80211_ioctl.h>
+#include <rtems/freebsd/net80211/ieee80211_regdomain.h>
+#include <rtems/freebsd/net80211/ieee80211_input.h>
+
+#define IS_UP_AUTO(_vap) \
+ (IFNET_IS_UP_RUNNING((_vap)->iv_ifp) && \
+ (_vap)->iv_roaming == IEEE80211_ROAMING_AUTO)
+
+static const uint8_t zerobssid[IEEE80211_ADDR_LEN];
+static struct ieee80211_channel *findchannel(struct ieee80211com *,
+ int ieee, int mode);
+
+static __noinline int
+ieee80211_ioctl_getkey(struct ieee80211vap *vap, struct ieee80211req *ireq)
+{
+ struct ieee80211com *ic = vap->iv_ic;
+ struct ieee80211_node *ni;
+ struct ieee80211req_key ik;
+ struct ieee80211_key *wk;
+ const struct ieee80211_cipher *cip;
+ u_int kid;
+ int error;
+
+ if (ireq->i_len != sizeof(ik))
+ return EINVAL;
+ error = copyin(ireq->i_data, &ik, sizeof(ik));
+ if (error)
+ return error;
+ kid = ik.ik_keyix;
+ if (kid == IEEE80211_KEYIX_NONE) {
+ ni = ieee80211_find_vap_node(&ic->ic_sta, vap, ik.ik_macaddr);
+ if (ni == NULL)
+ return ENOENT;
+ wk = &ni->ni_ucastkey;
+ } else {
+ if (kid >= IEEE80211_WEP_NKID)
+ return EINVAL;
+ wk = &vap->iv_nw_keys[kid];
+ IEEE80211_ADDR_COPY(&ik.ik_macaddr, vap->iv_bss->ni_macaddr);
+ ni = NULL;
+ }
+ cip = wk->wk_cipher;
+ ik.ik_type = cip->ic_cipher;
+ ik.ik_keylen = wk->wk_keylen;
+ ik.ik_flags = wk->wk_flags & (IEEE80211_KEY_XMIT | IEEE80211_KEY_RECV);
+ if (wk->wk_keyix == vap->iv_def_txkey)
+ ik.ik_flags |= IEEE80211_KEY_DEFAULT;
+ if (priv_check(curthread, PRIV_NET80211_GETKEY) == 0) {
+ /* NB: only root can read key data */
+ ik.ik_keyrsc = wk->wk_keyrsc[IEEE80211_NONQOS_TID];
+ ik.ik_keytsc = wk->wk_keytsc;
+ memcpy(ik.ik_keydata, wk->wk_key, wk->wk_keylen);
+ if (cip->ic_cipher == IEEE80211_CIPHER_TKIP) {
+ memcpy(ik.ik_keydata+wk->wk_keylen,
+ wk->wk_key + IEEE80211_KEYBUF_SIZE,
+ IEEE80211_MICBUF_SIZE);
+ ik.ik_keylen += IEEE80211_MICBUF_SIZE;
+ }
+ } else {
+ ik.ik_keyrsc = 0;
+ ik.ik_keytsc = 0;
+ memset(ik.ik_keydata, 0, sizeof(ik.ik_keydata));
+ }
+ if (ni != NULL)
+ ieee80211_free_node(ni);
+ return copyout(&ik, ireq->i_data, sizeof(ik));
+}
+
+static __noinline int
+ieee80211_ioctl_getchanlist(struct ieee80211vap *vap, struct ieee80211req *ireq)
+{
+ struct ieee80211com *ic = vap->iv_ic;
+
+ if (sizeof(ic->ic_chan_active) < ireq->i_len)
+ ireq->i_len = sizeof(ic->ic_chan_active);
+ return copyout(&ic->ic_chan_active, ireq->i_data, ireq->i_len);
+}
+
+static __noinline int
+ieee80211_ioctl_getchaninfo(struct ieee80211vap *vap, struct ieee80211req *ireq)
+{
+ struct ieee80211com *ic = vap->iv_ic;
+ int space;
+
+ space = __offsetof(struct ieee80211req_chaninfo,
+ ic_chans[ic->ic_nchans]);
+ if (space > ireq->i_len)
+ space = ireq->i_len;
+ /* XXX assumes compatible layout */
+ return copyout(&ic->ic_nchans, ireq->i_data, space);
+}
+
+static __noinline int
+ieee80211_ioctl_getwpaie(struct ieee80211vap *vap,
+ struct ieee80211req *ireq, int req)
+{
+ struct ieee80211_node *ni;
+ struct ieee80211req_wpaie2 wpaie;
+ int error;
+
+ if (ireq->i_len < IEEE80211_ADDR_LEN)
+ return EINVAL;
+ error = copyin(ireq->i_data, wpaie.wpa_macaddr, IEEE80211_ADDR_LEN);
+ if (error != 0)
+ return error;
+ ni = ieee80211_find_vap_node(&vap->iv_ic->ic_sta, vap, wpaie.wpa_macaddr);
+ if (ni == NULL)
+ return ENOENT;
+ memset(wpaie.wpa_ie, 0, sizeof(wpaie.wpa_ie));
+ if (ni->ni_ies.wpa_ie != NULL) {
+ int ielen = ni->ni_ies.wpa_ie[1] + 2;
+ if (ielen > sizeof(wpaie.wpa_ie))
+ ielen = sizeof(wpaie.wpa_ie);
+ memcpy(wpaie.wpa_ie, ni->ni_ies.wpa_ie, ielen);
+ }
+ if (req == IEEE80211_IOC_WPAIE2) {
+ memset(wpaie.rsn_ie, 0, sizeof(wpaie.rsn_ie));
+ if (ni->ni_ies.rsn_ie != NULL) {
+ int ielen = ni->ni_ies.rsn_ie[1] + 2;
+ if (ielen > sizeof(wpaie.rsn_ie))
+ ielen = sizeof(wpaie.rsn_ie);
+ memcpy(wpaie.rsn_ie, ni->ni_ies.rsn_ie, ielen);
+ }
+ if (ireq->i_len > sizeof(struct ieee80211req_wpaie2))
+ ireq->i_len = sizeof(struct ieee80211req_wpaie2);
+ } else {
+ /* compatibility op, may overwrite wpa ie */
+ /* XXX check ic_flags? */
+ if (ni->ni_ies.rsn_ie != NULL) {
+ int ielen = ni->ni_ies.rsn_ie[1] + 2;
+ if (ielen > sizeof(wpaie.wpa_ie))
+ ielen = sizeof(wpaie.wpa_ie);
+ memcpy(wpaie.wpa_ie, ni->ni_ies.rsn_ie, ielen);
+ }
+ if (ireq->i_len > sizeof(struct ieee80211req_wpaie))
+ ireq->i_len = sizeof(struct ieee80211req_wpaie);
+ }
+ ieee80211_free_node(ni);
+ return copyout(&wpaie, ireq->i_data, ireq->i_len);
+}
+
+static __noinline int
+ieee80211_ioctl_getstastats(struct ieee80211vap *vap, struct ieee80211req *ireq)
+{
+ struct ieee80211_node *ni;
+ uint8_t macaddr[IEEE80211_ADDR_LEN];
+ const int off = __offsetof(struct ieee80211req_sta_stats, is_stats);
+ int error;
+
+ if (ireq->i_len < off)
+ return EINVAL;
+ error = copyin(ireq->i_data, macaddr, IEEE80211_ADDR_LEN);
+ if (error != 0)
+ return error;
+ ni = ieee80211_find_vap_node(&vap->iv_ic->ic_sta, vap, macaddr);
+ if (ni == NULL)
+ return ENOENT;
+ if (ireq->i_len > sizeof(struct ieee80211req_sta_stats))
+ ireq->i_len = sizeof(struct ieee80211req_sta_stats);
+ /* NB: copy out only the statistics */
+ error = copyout(&ni->ni_stats, (uint8_t *) ireq->i_data + off,
+ ireq->i_len - off);
+ ieee80211_free_node(ni);
+ return error;
+}
+
+struct scanreq {
+ struct ieee80211req_scan_result *sr;
+ size_t space;
+};
+
+static size_t
+scan_space(const struct ieee80211_scan_entry *se, int *ielen)
+{
+ size_t len;
+
+ *ielen = se->se_ies.len;
+ /*
+ * NB: ie's can be no more than 255 bytes and the max 802.11
+ * packet is <3Kbytes so we are sure this doesn't overflow
+ * 16-bits; if this is a concern we can drop the ie's.
+ */
+ len = sizeof(struct ieee80211req_scan_result) + se->se_ssid[1] +
+ se->se_meshid[1] + *ielen;
+ return roundup(len, sizeof(uint32_t));
+}
+
+static void
+get_scan_space(void *arg, const struct ieee80211_scan_entry *se)
+{
+ struct scanreq *req = arg;
+ int ielen;
+
+ req->space += scan_space(se, &ielen);
+}
+
+static __noinline void
+get_scan_result(void *arg, const struct ieee80211_scan_entry *se)
+{
+ struct scanreq *req = arg;
+ struct ieee80211req_scan_result *sr;
+ int ielen, len, nr, nxr;
+ uint8_t *cp;
+
+ len = scan_space(se, &ielen);
+ if (len > req->space)
+ return;
+
+ sr = req->sr;
+ KASSERT(len <= 65535 && ielen <= 65535,
+ ("len %u ssid %u ie %u", len, se->se_ssid[1], ielen));
+ sr->isr_len = len;
+ sr->isr_ie_off = sizeof(struct ieee80211req_scan_result);
+ sr->isr_ie_len = ielen;
+ sr->isr_freq = se->se_chan->ic_freq;
+ sr->isr_flags = se->se_chan->ic_flags;
+ sr->isr_rssi = se->se_rssi;
+ sr->isr_noise = se->se_noise;
+ sr->isr_intval = se->se_intval;
+ sr->isr_capinfo = se->se_capinfo;
+ sr->isr_erp = se->se_erp;
+ IEEE80211_ADDR_COPY(sr->isr_bssid, se->se_bssid);
+ nr = min(se->se_rates[1], IEEE80211_RATE_MAXSIZE);
+ memcpy(sr->isr_rates, se->se_rates+2, nr);
+ nxr = min(se->se_xrates[1], IEEE80211_RATE_MAXSIZE - nr);
+ memcpy(sr->isr_rates+nr, se->se_xrates+2, nxr);
+ sr->isr_nrates = nr + nxr;
+
+ /* copy SSID */
+ sr->isr_ssid_len = se->se_ssid[1];
+ cp = ((uint8_t *)sr) + sr->isr_ie_off;
+ memcpy(cp, se->se_ssid+2, sr->isr_ssid_len);
+
+ /* copy mesh id */
+ cp += sr->isr_ssid_len;
+ sr->isr_meshid_len = se->se_meshid[1];
+ memcpy(cp, se->se_meshid+2, sr->isr_meshid_len);
+ cp += sr->isr_meshid_len;
+
+ if (ielen)
+ memcpy(cp, se->se_ies.data, ielen);
+
+ req->space -= len;
+ req->sr = (struct ieee80211req_scan_result *)(((uint8_t *)sr) + len);
+}
+
+static __noinline int
+ieee80211_ioctl_getscanresults(struct ieee80211vap *vap,
+ struct ieee80211req *ireq)
+{
+ struct scanreq req;
+ int error;
+
+ if (ireq->i_len < sizeof(struct scanreq))
+ return EFAULT;
+
+ error = 0;
+ req.space = 0;
+ ieee80211_scan_iterate(vap, get_scan_space, &req);
+ if (req.space > ireq->i_len)
+ req.space = ireq->i_len;
+ if (req.space > 0) {
+ size_t space;
+ void *p;
+
+ space = req.space;
+ /* XXX M_WAITOK after driver lock released */
+ p = malloc(space, M_TEMP, M_NOWAIT | M_ZERO);
+ if (p == NULL)
+ return ENOMEM;
+ req.sr = p;
+ ieee80211_scan_iterate(vap, get_scan_result, &req);
+ ireq->i_len = space - req.space;
+ error = copyout(p, ireq->i_data, ireq->i_len);
+ free(p, M_TEMP);
+ } else
+ ireq->i_len = 0;
+
+ return error;
+}
+
+struct stainforeq {
+ struct ieee80211vap *vap;
+ struct ieee80211req_sta_info *si;
+ size_t space;
+};
+
+static size_t
+sta_space(const struct ieee80211_node *ni, size_t *ielen)
+{
+ *ielen = ni->ni_ies.len;
+ return roundup(sizeof(struct ieee80211req_sta_info) + *ielen,
+ sizeof(uint32_t));
+}
+
+static void
+get_sta_space(void *arg, struct ieee80211_node *ni)
+{
+ struct stainforeq *req = arg;
+ size_t ielen;
+
+ if (req->vap != ni->ni_vap)
+ return;
+ if (ni->ni_vap->iv_opmode == IEEE80211_M_HOSTAP &&
+ ni->ni_associd == 0) /* only associated stations */
+ return;
+ req->space += sta_space(ni, &ielen);
+}
+
+static __noinline void
+get_sta_info(void *arg, struct ieee80211_node *ni)
+{
+ struct stainforeq *req = arg;
+ struct ieee80211vap *vap = ni->ni_vap;
+ struct ieee80211req_sta_info *si;
+ size_t ielen, len;
+ uint8_t *cp;
+
+ if (req->vap != ni->ni_vap)
+ return;
+ if (vap->iv_opmode == IEEE80211_M_HOSTAP &&
+ ni->ni_associd == 0) /* only associated stations */
+ return;
+ if (ni->ni_chan == IEEE80211_CHAN_ANYC) /* XXX bogus entry */
+ return;
+ len = sta_space(ni, &ielen);
+ if (len > req->space)
+ return;
+ si = req->si;
+ si->isi_len = len;
+ si->isi_ie_off = sizeof(struct ieee80211req_sta_info);
+ si->isi_ie_len = ielen;
+ si->isi_freq = ni->ni_chan->ic_freq;
+ si->isi_flags = ni->ni_chan->ic_flags;
+ si->isi_state = ni->ni_flags;
+ si->isi_authmode = ni->ni_authmode;
+ vap->iv_ic->ic_node_getsignal(ni, &si->isi_rssi, &si->isi_noise);
+ vap->iv_ic->ic_node_getmimoinfo(ni, &si->isi_mimo);
+ si->isi_capinfo = ni->ni_capinfo;
+ si->isi_erp = ni->ni_erp;
+ IEEE80211_ADDR_COPY(si->isi_macaddr, ni->ni_macaddr);
+ si->isi_nrates = ni->ni_rates.rs_nrates;
+ if (si->isi_nrates > 15)
+ si->isi_nrates = 15;
+ memcpy(si->isi_rates, ni->ni_rates.rs_rates, si->isi_nrates);
+ si->isi_txrate = ni->ni_txrate;
+ if (si->isi_txrate & IEEE80211_RATE_MCS) {
+ const struct ieee80211_mcs_rates *mcs =
+ &ieee80211_htrates[ni->ni_txrate &~ IEEE80211_RATE_MCS];
+ if (IEEE80211_IS_CHAN_HT40(ni->ni_chan)) {
+ if (ni->ni_flags & IEEE80211_NODE_SGI40)
+ si->isi_txmbps = mcs->ht40_rate_800ns;
+ else
+ si->isi_txmbps = mcs->ht40_rate_400ns;
+ } else {
+ if (ni->ni_flags & IEEE80211_NODE_SGI20)
+ si->isi_txmbps = mcs->ht20_rate_800ns;
+ else
+ si->isi_txmbps = mcs->ht20_rate_400ns;
+ }
+ } else
+ si->isi_txmbps = si->isi_txrate;
+ si->isi_associd = ni->ni_associd;
+ si->isi_txpower = ni->ni_txpower;
+ si->isi_vlan = ni->ni_vlan;
+ if (ni->ni_flags & IEEE80211_NODE_QOS) {
+ memcpy(si->isi_txseqs, ni->ni_txseqs, sizeof(ni->ni_txseqs));
+ memcpy(si->isi_rxseqs, ni->ni_rxseqs, sizeof(ni->ni_rxseqs));
+ } else {
+ si->isi_txseqs[0] = ni->ni_txseqs[IEEE80211_NONQOS_TID];
+ si->isi_rxseqs[0] = ni->ni_rxseqs[IEEE80211_NONQOS_TID];
+ }
+ /* NB: leave all cases in case we relax ni_associd == 0 check */
+ if (ieee80211_node_is_authorized(ni))
+ si->isi_inact = vap->iv_inact_run;
+ else if (ni->ni_associd != 0 ||
+ (vap->iv_opmode == IEEE80211_M_WDS &&
+ (vap->iv_flags_ext & IEEE80211_FEXT_WDSLEGACY)))
+ si->isi_inact = vap->iv_inact_auth;
+ else
+ si->isi_inact = vap->iv_inact_init;
+ si->isi_inact = (si->isi_inact - ni->ni_inact) * IEEE80211_INACT_WAIT;
+ si->isi_localid = ni->ni_mllid;
+ si->isi_peerid = ni->ni_mlpid;
+ si->isi_peerstate = ni->ni_mlstate;
+
+ if (ielen) {
+ cp = ((uint8_t *)si) + si->isi_ie_off;
+ memcpy(cp, ni->ni_ies.data, ielen);
+ }
+
+ req->si = (struct ieee80211req_sta_info *)(((uint8_t *)si) + len);
+ req->space -= len;
+}
+
+static __noinline int
+getstainfo_common(struct ieee80211vap *vap, struct ieee80211req *ireq,
+ struct ieee80211_node *ni, int off)
+{
+ struct ieee80211com *ic = vap->iv_ic;
+ struct stainforeq req;
+ size_t space;
+ void *p;
+ int error;
+
+ error = 0;
+ req.space = 0;
+ req.vap = vap;
+ if (ni == NULL)
+ ieee80211_iterate_nodes(&ic->ic_sta, get_sta_space, &req);
+ else
+ get_sta_space(&req, ni);
+ if (req.space > ireq->i_len)
+ req.space = ireq->i_len;
+ if (req.space > 0) {
+ space = req.space;
+ /* XXX M_WAITOK after driver lock released */
+ p = malloc(space, M_TEMP, M_NOWAIT | M_ZERO);
+ if (p == NULL) {
+ error = ENOMEM;
+ goto bad;
+ }
+ req.si = p;
+ if (ni == NULL)
+ ieee80211_iterate_nodes(&ic->ic_sta, get_sta_info, &req);
+ else
+ get_sta_info(&req, ni);
+ ireq->i_len = space - req.space;
+ error = copyout(p, (uint8_t *) ireq->i_data+off, ireq->i_len);
+ free(p, M_TEMP);
+ } else
+ ireq->i_len = 0;
+bad:
+ if (ni != NULL)
+ ieee80211_free_node(ni);
+ return error;
+}
+
+static __noinline int
+ieee80211_ioctl_getstainfo(struct ieee80211vap *vap, struct ieee80211req *ireq)
+{
+ uint8_t macaddr[IEEE80211_ADDR_LEN];
+ const int off = __offsetof(struct ieee80211req_sta_req, info);
+ struct ieee80211_node *ni;
+ int error;
+
+ if (ireq->i_len < sizeof(struct ieee80211req_sta_req))
+ return EFAULT;
+ error = copyin(ireq->i_data, macaddr, IEEE80211_ADDR_LEN);
+ if (error != 0)
+ return error;
+ if (IEEE80211_ADDR_EQ(macaddr, vap->iv_ifp->if_broadcastaddr)) {
+ ni = NULL;
+ } else {
+ ni = ieee80211_find_vap_node(&vap->iv_ic->ic_sta, vap, macaddr);
+ if (ni == NULL)
+ return ENOENT;
+ }
+ return getstainfo_common(vap, ireq, ni, off);
+}
+
+static __noinline int
+ieee80211_ioctl_getstatxpow(struct ieee80211vap *vap, struct ieee80211req *ireq)
+{
+ struct ieee80211_node *ni;
+ struct ieee80211req_sta_txpow txpow;
+ int error;
+
+ if (ireq->i_len != sizeof(txpow))
+ return EINVAL;
+ error = copyin(ireq->i_data, &txpow, sizeof(txpow));
+ if (error != 0)
+ return error;
+ ni = ieee80211_find_vap_node(&vap->iv_ic->ic_sta, vap, txpow.it_macaddr);
+ if (ni == NULL)
+ return ENOENT;
+ txpow.it_txpow = ni->ni_txpower;
+ error = copyout(&txpow, ireq->i_data, sizeof(txpow));
+ ieee80211_free_node(ni);
+ return error;
+}
+
+static __noinline int
+ieee80211_ioctl_getwmeparam(struct ieee80211vap *vap, struct ieee80211req *ireq)
+{
+ struct ieee80211com *ic = vap->iv_ic;
+ struct ieee80211_wme_state *wme = &ic->ic_wme;
+ struct wmeParams *wmep;
+ int ac;
+
+ if ((ic->ic_caps & IEEE80211_C_WME) == 0)
+ return EINVAL;
+
+ ac = (ireq->i_len & IEEE80211_WMEPARAM_VAL);
+ if (ac >= WME_NUM_AC)
+ ac = WME_AC_BE;
+ if (ireq->i_len & IEEE80211_WMEPARAM_BSS)
+ wmep = &wme->wme_wmeBssChanParams.cap_wmeParams[ac];
+ else
+ wmep = &wme->wme_wmeChanParams.cap_wmeParams[ac];
+ switch (ireq->i_type) {
+ case IEEE80211_IOC_WME_CWMIN: /* WME: CWmin */
+ ireq->i_val = wmep->wmep_logcwmin;
+ break;
+ case IEEE80211_IOC_WME_CWMAX: /* WME: CWmax */
+ ireq->i_val = wmep->wmep_logcwmax;
+ break;
+ case IEEE80211_IOC_WME_AIFS: /* WME: AIFS */
+ ireq->i_val = wmep->wmep_aifsn;
+ break;
+ case IEEE80211_IOC_WME_TXOPLIMIT: /* WME: txops limit */
+ ireq->i_val = wmep->wmep_txopLimit;
+ break;
+ case IEEE80211_IOC_WME_ACM: /* WME: ACM (bss only) */
+ wmep = &wme->wme_wmeBssChanParams.cap_wmeParams[ac];
+ ireq->i_val = wmep->wmep_acm;
+ break;
+ case IEEE80211_IOC_WME_ACKPOLICY: /* WME: ACK policy (!bss only)*/
+ wmep = &wme->wme_wmeChanParams.cap_wmeParams[ac];
+ ireq->i_val = !wmep->wmep_noackPolicy;
+ break;
+ }
+ return 0;
+}
+
+static __noinline int
+ieee80211_ioctl_getmaccmd(struct ieee80211vap *vap, struct ieee80211req *ireq)
+{
+ const struct ieee80211_aclator *acl = vap->iv_acl;
+
+ return (acl == NULL ? EINVAL : acl->iac_getioctl(vap, ireq));
+}
+
+static __noinline int
+ieee80211_ioctl_getcurchan(struct ieee80211vap *vap, struct ieee80211req *ireq)
+{
+ struct ieee80211com *ic = vap->iv_ic;
+ struct ieee80211_channel *c;
+
+ if (ireq->i_len != sizeof(struct ieee80211_channel))
+ return EINVAL;
+ /*
+ * vap's may have different operating channels when HT is
+ * in use. When in RUN state report the vap-specific channel.
+ * Otherwise return curchan.
+ */
+ if (vap->iv_state == IEEE80211_S_RUN)
+ c = vap->iv_bss->ni_chan;
+ else
+ c = ic->ic_curchan;
+ return copyout(c, ireq->i_data, sizeof(*c));
+}
+
+static int
+getappie(const struct ieee80211_appie *aie, struct ieee80211req *ireq)
+{
+ if (aie == NULL)
+ return EINVAL;
+ /* NB: truncate, caller can check length */
+ if (ireq->i_len > aie->ie_len)
+ ireq->i_len = aie->ie_len;
+ return copyout(aie->ie_data, ireq->i_data, ireq->i_len);
+}
+
+static int
+ieee80211_ioctl_getappie(struct ieee80211vap *vap, struct ieee80211req *ireq)
+{
+ uint8_t fc0;
+
+ fc0 = ireq->i_val & 0xff;
+ if ((fc0 & IEEE80211_FC0_TYPE_MASK) != IEEE80211_FC0_TYPE_MGT)
+ return EINVAL;
+ /* NB: could check iv_opmode and reject but hardly worth the effort */
+ switch (fc0 & IEEE80211_FC0_SUBTYPE_MASK) {
+ case IEEE80211_FC0_SUBTYPE_BEACON:
+ return getappie(vap->iv_appie_beacon, ireq);
+ case IEEE80211_FC0_SUBTYPE_PROBE_RESP:
+ return getappie(vap->iv_appie_proberesp, ireq);
+ case IEEE80211_FC0_SUBTYPE_ASSOC_RESP:
+ return getappie(vap->iv_appie_assocresp, ireq);
+ case IEEE80211_FC0_SUBTYPE_PROBE_REQ:
+ return getappie(vap->iv_appie_probereq, ireq);
+ case IEEE80211_FC0_SUBTYPE_ASSOC_REQ:
+ return getappie(vap->iv_appie_assocreq, ireq);
+ case IEEE80211_FC0_SUBTYPE_BEACON|IEEE80211_FC0_SUBTYPE_PROBE_RESP:
+ return getappie(vap->iv_appie_wpa, ireq);
+ }
+ return EINVAL;
+}
+
+static __noinline int
+ieee80211_ioctl_getregdomain(struct ieee80211vap *vap,
+ const struct ieee80211req *ireq)
+{
+ struct ieee80211com *ic = vap->iv_ic;
+
+ if (ireq->i_len != sizeof(ic->ic_regdomain))
+ return EINVAL;
+ return copyout(&ic->ic_regdomain, ireq->i_data,
+ sizeof(ic->ic_regdomain));
+}
+
+static __noinline int
+ieee80211_ioctl_getroam(struct ieee80211vap *vap,
+ const struct ieee80211req *ireq)
+{
+ size_t len = ireq->i_len;
+ /* NB: accept short requests for backwards compat */
+ if (len > sizeof(vap->iv_roamparms))
+ len = sizeof(vap->iv_roamparms);
+ return copyout(vap->iv_roamparms, ireq->i_data, len);
+}
+
+static __noinline int
+ieee80211_ioctl_gettxparams(struct ieee80211vap *vap,
+ const struct ieee80211req *ireq)
+{
+ size_t len = ireq->i_len;
+ /* NB: accept short requests for backwards compat */
+ if (len > sizeof(vap->iv_txparms))
+ len = sizeof(vap->iv_txparms);
+ return copyout(vap->iv_txparms, ireq->i_data, len);
+}
+
+static __noinline int
+ieee80211_ioctl_getdevcaps(struct ieee80211com *ic,
+ const struct ieee80211req *ireq)
+{
+ struct ieee80211_devcaps_req *dc;
+ struct ieee80211req_chaninfo *ci;
+ int maxchans, error;
+
+ maxchans = 1 + ((ireq->i_len - sizeof(struct ieee80211_devcaps_req)) /
+ sizeof(struct ieee80211_channel));
+ /* NB: require 1 so we know ic_nchans is accessible */
+ if (maxchans < 1)
+ return EINVAL;
+ /* constrain max request size, 2K channels is ~24Kbytes */
+ if (maxchans > 2048)
+ maxchans = 2048;
+ dc = (struct ieee80211_devcaps_req *)
+ malloc(IEEE80211_DEVCAPS_SIZE(maxchans), M_TEMP, M_NOWAIT | M_ZERO);
+ if (dc == NULL)
+ return ENOMEM;
+ dc->dc_drivercaps = ic->ic_caps;
+ dc->dc_cryptocaps = ic->ic_cryptocaps;
+ dc->dc_htcaps = ic->ic_htcaps;
+ ci = &dc->dc_chaninfo;
+ ic->ic_getradiocaps(ic, maxchans, &ci->ic_nchans, ci->ic_chans);
+ KASSERT(ci->ic_nchans <= maxchans,
+ ("nchans %d maxchans %d", ci->ic_nchans, maxchans));
+ ieee80211_sort_channels(ci->ic_chans, ci->ic_nchans);
+ error = copyout(dc, ireq->i_data, IEEE80211_DEVCAPS_SPACE(dc));
+ free(dc, M_TEMP);
+ return error;
+}
+
+static __noinline int
+ieee80211_ioctl_getstavlan(struct ieee80211vap *vap, struct ieee80211req *ireq)
+{
+ struct ieee80211_node *ni;
+ struct ieee80211req_sta_vlan vlan;
+ int error;
+
+ if (ireq->i_len != sizeof(vlan))
+ return EINVAL;
+ error = copyin(ireq->i_data, &vlan, sizeof(vlan));
+ if (error != 0)
+ return error;
+ if (!IEEE80211_ADDR_EQ(vlan.sv_macaddr, zerobssid)) {
+ ni = ieee80211_find_vap_node(&vap->iv_ic->ic_sta, vap,
+ vlan.sv_macaddr);
+ if (ni == NULL)
+ return ENOENT;
+ } else
+ ni = ieee80211_ref_node(vap->iv_bss);
+ vlan.sv_vlan = ni->ni_vlan;
+ error = copyout(&vlan, ireq->i_data, sizeof(vlan));
+ ieee80211_free_node(ni);
+ return error;
+}
+
+/*
+ * Dummy ioctl get handler so the linker set is defined.
+ */
+static int
+dummy_ioctl_get(struct ieee80211vap *vap, struct ieee80211req *ireq)
+{
+ return ENOSYS;
+}
+IEEE80211_IOCTL_GET(dummy, dummy_ioctl_get);
+
+static int
+ieee80211_ioctl_getdefault(struct ieee80211vap *vap, struct ieee80211req *ireq)
+{
+ ieee80211_ioctl_getfunc * const *get;
+ int error;
+
+ SET_FOREACH(get, ieee80211_ioctl_getset) {
+ error = (*get)(vap, ireq);
+ if (error != ENOSYS)
+ return error;
+ }
+ return EINVAL;
+}
+
+/*
+ * When building the kernel with -O2 on the i386 architecture, gcc
+ * seems to want to inline this function into ieee80211_ioctl()
+ * (which is the only routine that calls it). When this happens,
+ * ieee80211_ioctl() ends up consuming an additional 2K of stack
+ * space. (Exactly why it needs so much is unclear.) The problem
+ * is that it's possible for ieee80211_ioctl() to invoke other
+ * routines (including driver init functions) which could then find
+ * themselves perilously close to exhausting the stack.
+ *
+ * To avoid this, we deliberately prevent gcc from inlining this
+ * routine. Another way to avoid this is to use less agressive
+ * optimization when compiling this file (i.e. -O instead of -O2)
+ * but special-casing the compilation of this one module in the
+ * build system would be awkward.
+ */
+static __noinline int
+ieee80211_ioctl_get80211(struct ieee80211vap *vap, u_long cmd,
+ struct ieee80211req *ireq)
+{
+#define MS(_v, _f) (((_v) & _f) >> _f##_S)
+ struct ieee80211com *ic = vap->iv_ic;
+ u_int kid, len;
+ uint8_t tmpkey[IEEE80211_KEYBUF_SIZE];
+ char tmpssid[IEEE80211_NWID_LEN];
+ int error = 0;
+
+ switch (ireq->i_type) {
+ case IEEE80211_IOC_SSID:
+ switch (vap->iv_state) {
+ case IEEE80211_S_INIT:
+ case IEEE80211_S_SCAN:
+ ireq->i_len = vap->iv_des_ssid[0].len;
+ memcpy(tmpssid, vap->iv_des_ssid[0].ssid, ireq->i_len);
+ break;
+ default:
+ ireq->i_len = vap->iv_bss->ni_esslen;
+ memcpy(tmpssid, vap->iv_bss->ni_essid, ireq->i_len);
+ break;
+ }
+ error = copyout(tmpssid, ireq->i_data, ireq->i_len);
+ break;
+ case IEEE80211_IOC_NUMSSIDS:
+ ireq->i_val = 1;
+ break;
+ case IEEE80211_IOC_WEP:
+ if ((vap->iv_flags & IEEE80211_F_PRIVACY) == 0)
+ ireq->i_val = IEEE80211_WEP_OFF;
+ else if (vap->iv_flags & IEEE80211_F_DROPUNENC)
+ ireq->i_val = IEEE80211_WEP_ON;
+ else
+ ireq->i_val = IEEE80211_WEP_MIXED;
+ break;
+ case IEEE80211_IOC_WEPKEY:
+ kid = (u_int) ireq->i_val;
+ if (kid >= IEEE80211_WEP_NKID)
+ return EINVAL;
+ len = (u_int) vap->iv_nw_keys[kid].wk_keylen;
+ /* NB: only root can read WEP keys */
+ if (priv_check(curthread, PRIV_NET80211_GETKEY) == 0) {
+ bcopy(vap->iv_nw_keys[kid].wk_key, tmpkey, len);
+ } else {
+ bzero(tmpkey, len);
+ }
+ ireq->i_len = len;
+ error = copyout(tmpkey, ireq->i_data, len);
+ break;
+ case IEEE80211_IOC_NUMWEPKEYS:
+ ireq->i_val = IEEE80211_WEP_NKID;
+ break;
+ case IEEE80211_IOC_WEPTXKEY:
+ ireq->i_val = vap->iv_def_txkey;
+ break;
+ case IEEE80211_IOC_AUTHMODE:
+ if (vap->iv_flags & IEEE80211_F_WPA)
+ ireq->i_val = IEEE80211_AUTH_WPA;
+ else
+ ireq->i_val = vap->iv_bss->ni_authmode;
+ break;
+ case IEEE80211_IOC_CHANNEL:
+ ireq->i_val = ieee80211_chan2ieee(ic, ic->ic_curchan);
+ break;
+ case IEEE80211_IOC_POWERSAVE:
+ if (vap->iv_flags & IEEE80211_F_PMGTON)
+ ireq->i_val = IEEE80211_POWERSAVE_ON;
+ else
+ ireq->i_val = IEEE80211_POWERSAVE_OFF;
+ break;
+ case IEEE80211_IOC_POWERSAVESLEEP:
+ ireq->i_val = ic->ic_lintval;
+ break;
+ case IEEE80211_IOC_RTSTHRESHOLD:
+ ireq->i_val = vap->iv_rtsthreshold;
+ break;
+ case IEEE80211_IOC_PROTMODE:
+ ireq->i_val = ic->ic_protmode;
+ break;
+ case IEEE80211_IOC_TXPOWER:
+ /*
+ * Tx power limit is the min of max regulatory
+ * power, any user-set limit, and the max the
+ * radio can do.
+ */
+ ireq->i_val = 2*ic->ic_curchan->ic_maxregpower;
+ if (ireq->i_val > ic->ic_txpowlimit)
+ ireq->i_val = ic->ic_txpowlimit;
+ if (ireq->i_val > ic->ic_curchan->ic_maxpower)
+ ireq->i_val = ic->ic_curchan->ic_maxpower;
+ break;
+ case IEEE80211_IOC_WPA:
+ switch (vap->iv_flags & IEEE80211_F_WPA) {
+ case IEEE80211_F_WPA1:
+ ireq->i_val = 1;
+ break;
+ case IEEE80211_F_WPA2:
+ ireq->i_val = 2;
+ break;
+ case IEEE80211_F_WPA1 | IEEE80211_F_WPA2:
+ ireq->i_val = 3;
+ break;
+ default:
+ ireq->i_val = 0;
+ break;
+ }
+ break;
+ case IEEE80211_IOC_CHANLIST:
+ error = ieee80211_ioctl_getchanlist(vap, ireq);
+ break;
+ case IEEE80211_IOC_ROAMING:
+ ireq->i_val = vap->iv_roaming;
+ break;
+ case IEEE80211_IOC_PRIVACY:
+ ireq->i_val = (vap->iv_flags & IEEE80211_F_PRIVACY) != 0;
+ break;
+ case IEEE80211_IOC_DROPUNENCRYPTED:
+ ireq->i_val = (vap->iv_flags & IEEE80211_F_DROPUNENC) != 0;
+ break;
+ case IEEE80211_IOC_COUNTERMEASURES:
+ ireq->i_val = (vap->iv_flags & IEEE80211_F_COUNTERM) != 0;
+ break;
+ case IEEE80211_IOC_WME:
+ ireq->i_val = (vap->iv_flags & IEEE80211_F_WME) != 0;
+ break;
+ case IEEE80211_IOC_HIDESSID:
+ ireq->i_val = (vap->iv_flags & IEEE80211_F_HIDESSID) != 0;
+ break;
+ case IEEE80211_IOC_APBRIDGE:
+ ireq->i_val = (vap->iv_flags & IEEE80211_F_NOBRIDGE) == 0;
+ break;
+ case IEEE80211_IOC_WPAKEY:
+ error = ieee80211_ioctl_getkey(vap, ireq);
+ break;
+ case IEEE80211_IOC_CHANINFO:
+ error = ieee80211_ioctl_getchaninfo(vap, ireq);
+ break;
+ case IEEE80211_IOC_BSSID:
+ if (ireq->i_len != IEEE80211_ADDR_LEN)
+ return EINVAL;
+ if (vap->iv_state == IEEE80211_S_RUN) {
+ error = copyout(vap->iv_opmode == IEEE80211_M_WDS ?
+ vap->iv_bss->ni_macaddr : vap->iv_bss->ni_bssid,
+ ireq->i_data, ireq->i_len);
+ } else
+ error = copyout(vap->iv_des_bssid, ireq->i_data,
+ ireq->i_len);
+ break;
+ case IEEE80211_IOC_WPAIE:
+ error = ieee80211_ioctl_getwpaie(vap, ireq, ireq->i_type);
+ break;
+ case IEEE80211_IOC_WPAIE2:
+ error = ieee80211_ioctl_getwpaie(vap, ireq, ireq->i_type);
+ break;
+ case IEEE80211_IOC_SCAN_RESULTS:
+ error = ieee80211_ioctl_getscanresults(vap, ireq);
+ break;
+ case IEEE80211_IOC_STA_STATS:
+ error = ieee80211_ioctl_getstastats(vap, ireq);
+ break;
+ case IEEE80211_IOC_TXPOWMAX:
+ ireq->i_val = vap->iv_bss->ni_txpower;
+ break;
+ case IEEE80211_IOC_STA_TXPOW:
+ error = ieee80211_ioctl_getstatxpow(vap, ireq);
+ break;
+ case IEEE80211_IOC_STA_INFO:
+ error = ieee80211_ioctl_getstainfo(vap, ireq);
+ break;
+ case IEEE80211_IOC_WME_CWMIN: /* WME: CWmin */
+ case IEEE80211_IOC_WME_CWMAX: /* WME: CWmax */
+ case IEEE80211_IOC_WME_AIFS: /* WME: AIFS */
+ case IEEE80211_IOC_WME_TXOPLIMIT: /* WME: txops limit */
+ case IEEE80211_IOC_WME_ACM: /* WME: ACM (bss only) */
+ case IEEE80211_IOC_WME_ACKPOLICY: /* WME: ACK policy (bss only) */
+ error = ieee80211_ioctl_getwmeparam(vap, ireq);
+ break;
+ case IEEE80211_IOC_DTIM_PERIOD:
+ ireq->i_val = vap->iv_dtim_period;
+ break;
+ case IEEE80211_IOC_BEACON_INTERVAL:
+ /* NB: get from ic_bss for station mode */
+ ireq->i_val = vap->iv_bss->ni_intval;
+ break;
+ case IEEE80211_IOC_PUREG:
+ ireq->i_val = (vap->iv_flags & IEEE80211_F_PUREG) != 0;
+ break;
+ case IEEE80211_IOC_BGSCAN:
+ ireq->i_val = (vap->iv_flags & IEEE80211_F_BGSCAN) != 0;
+ break;
+ case IEEE80211_IOC_BGSCAN_IDLE:
+ ireq->i_val = vap->iv_bgscanidle*hz/1000; /* ms */
+ break;
+ case IEEE80211_IOC_BGSCAN_INTERVAL:
+ ireq->i_val = vap->iv_bgscanintvl/hz; /* seconds */
+ break;
+ case IEEE80211_IOC_SCANVALID:
+ ireq->i_val = vap->iv_scanvalid/hz; /* seconds */
+ break;
+ case IEEE80211_IOC_FRAGTHRESHOLD:
+ ireq->i_val = vap->iv_fragthreshold;
+ break;
+ case IEEE80211_IOC_MACCMD:
+ error = ieee80211_ioctl_getmaccmd(vap, ireq);
+ break;
+ case IEEE80211_IOC_BURST:
+ ireq->i_val = (vap->iv_flags & IEEE80211_F_BURST) != 0;
+ break;
+ case IEEE80211_IOC_BMISSTHRESHOLD:
+ ireq->i_val = vap->iv_bmissthreshold;
+ break;
+ case IEEE80211_IOC_CURCHAN:
+ error = ieee80211_ioctl_getcurchan(vap, ireq);
+ break;
+ case IEEE80211_IOC_SHORTGI:
+ ireq->i_val = 0;
+ if (vap->iv_flags_ht & IEEE80211_FHT_SHORTGI20)
+ ireq->i_val |= IEEE80211_HTCAP_SHORTGI20;
+ if (vap->iv_flags_ht & IEEE80211_FHT_SHORTGI40)
+ ireq->i_val |= IEEE80211_HTCAP_SHORTGI40;
+ break;
+ case IEEE80211_IOC_AMPDU:
+ ireq->i_val = 0;
+ if (vap->iv_flags_ht & IEEE80211_FHT_AMPDU_TX)
+ ireq->i_val |= 1;
+ if (vap->iv_flags_ht & IEEE80211_FHT_AMPDU_RX)
+ ireq->i_val |= 2;
+ break;
+ case IEEE80211_IOC_AMPDU_LIMIT:
+ if (vap->iv_opmode == IEEE80211_M_HOSTAP)
+ ireq->i_val = vap->iv_ampdu_rxmax;
+ else if (vap->iv_state == IEEE80211_S_RUN)
+ ireq->i_val = MS(vap->iv_bss->ni_htparam,
+ IEEE80211_HTCAP_MAXRXAMPDU);
+ else
+ ireq->i_val = vap->iv_ampdu_limit;
+ break;
+ case IEEE80211_IOC_AMPDU_DENSITY:
+ if (vap->iv_opmode == IEEE80211_M_STA &&
+ vap->iv_state == IEEE80211_S_RUN)
+ ireq->i_val = MS(vap->iv_bss->ni_htparam,
+ IEEE80211_HTCAP_MPDUDENSITY);
+ else
+ ireq->i_val = vap->iv_ampdu_density;
+ break;
+ case IEEE80211_IOC_AMSDU:
+ ireq->i_val = 0;
+ if (vap->iv_flags_ht & IEEE80211_FHT_AMSDU_TX)
+ ireq->i_val |= 1;
+ if (vap->iv_flags_ht & IEEE80211_FHT_AMSDU_RX)
+ ireq->i_val |= 2;
+ break;
+ case IEEE80211_IOC_AMSDU_LIMIT:
+ ireq->i_val = vap->iv_amsdu_limit; /* XXX truncation? */
+ break;
+ case IEEE80211_IOC_PUREN:
+ ireq->i_val = (vap->iv_flags_ht & IEEE80211_FHT_PUREN) != 0;
+ break;
+ case IEEE80211_IOC_DOTH:
+ ireq->i_val = (vap->iv_flags & IEEE80211_F_DOTH) != 0;
+ break;
+ case IEEE80211_IOC_REGDOMAIN:
+ error = ieee80211_ioctl_getregdomain(vap, ireq);
+ break;
+ case IEEE80211_IOC_ROAM:
+ error = ieee80211_ioctl_getroam(vap, ireq);
+ break;
+ case IEEE80211_IOC_TXPARAMS:
+ error = ieee80211_ioctl_gettxparams(vap, ireq);
+ break;
+ case IEEE80211_IOC_HTCOMPAT:
+ ireq->i_val = (vap->iv_flags_ht & IEEE80211_FHT_HTCOMPAT) != 0;
+ break;
+ case IEEE80211_IOC_DWDS:
+ ireq->i_val = (vap->iv_flags & IEEE80211_F_DWDS) != 0;
+ break;
+ case IEEE80211_IOC_INACTIVITY:
+ ireq->i_val = (vap->iv_flags_ext & IEEE80211_FEXT_INACT) != 0;
+ break;
+ case IEEE80211_IOC_APPIE:
+ error = ieee80211_ioctl_getappie(vap, ireq);
+ break;
+ case IEEE80211_IOC_WPS:
+ ireq->i_val = (vap->iv_flags_ext & IEEE80211_FEXT_WPS) != 0;
+ break;
+ case IEEE80211_IOC_TSN:
+ ireq->i_val = (vap->iv_flags_ext & IEEE80211_FEXT_TSN) != 0;
+ break;
+ case IEEE80211_IOC_DFS:
+ ireq->i_val = (vap->iv_flags_ext & IEEE80211_FEXT_DFS) != 0;
+ break;
+ case IEEE80211_IOC_DOTD:
+ ireq->i_val = (vap->iv_flags_ext & IEEE80211_FEXT_DOTD) != 0;
+ break;
+ case IEEE80211_IOC_DEVCAPS:
+ error = ieee80211_ioctl_getdevcaps(ic, ireq);
+ break;
+ case IEEE80211_IOC_HTPROTMODE:
+ ireq->i_val = ic->ic_htprotmode;
+ break;
+ case IEEE80211_IOC_HTCONF:
+ if (vap->iv_flags_ht & IEEE80211_FHT_HT) {
+ ireq->i_val = 1;
+ if (vap->iv_flags_ht & IEEE80211_FHT_USEHT40)
+ ireq->i_val |= 2;
+ } else
+ ireq->i_val = 0;
+ break;
+ case IEEE80211_IOC_STA_VLAN:
+ error = ieee80211_ioctl_getstavlan(vap, ireq);
+ break;
+ case IEEE80211_IOC_SMPS:
+ if (vap->iv_opmode == IEEE80211_M_STA &&
+ vap->iv_state == IEEE80211_S_RUN) {
+ if (vap->iv_bss->ni_flags & IEEE80211_NODE_MIMO_RTS)
+ ireq->i_val = IEEE80211_HTCAP_SMPS_DYNAMIC;
+ else if (vap->iv_bss->ni_flags & IEEE80211_NODE_MIMO_PS)
+ ireq->i_val = IEEE80211_HTCAP_SMPS_ENA;
+ else
+ ireq->i_val = IEEE80211_HTCAP_SMPS_OFF;
+ } else
+ ireq->i_val = vap->iv_htcaps & IEEE80211_HTCAP_SMPS;
+ break;
+ case IEEE80211_IOC_RIFS:
+ if (vap->iv_opmode == IEEE80211_M_STA &&
+ vap->iv_state == IEEE80211_S_RUN)
+ ireq->i_val =
+ (vap->iv_bss->ni_flags & IEEE80211_NODE_RIFS) != 0;
+ else
+ ireq->i_val =
+ (vap->iv_flags_ht & IEEE80211_FHT_RIFS) != 0;
+ break;
+ default:
+ error = ieee80211_ioctl_getdefault(vap, ireq);
+ break;
+ }
+ return error;
+#undef MS
+}
+
+static __noinline int
+ieee80211_ioctl_setkey(struct ieee80211vap *vap, struct ieee80211req *ireq)
+{
+ struct ieee80211req_key ik;
+ struct ieee80211_node *ni;
+ struct ieee80211_key *wk;
+ uint16_t kid;
+ int error, i;
+
+ if (ireq->i_len != sizeof(ik))
+ return EINVAL;
+ error = copyin(ireq->i_data, &ik, sizeof(ik));
+ if (error)
+ return error;
+ /* NB: cipher support is verified by ieee80211_crypt_newkey */
+ /* NB: this also checks ik->ik_keylen > sizeof(wk->wk_key) */
+ if (ik.ik_keylen > sizeof(ik.ik_keydata))
+ return E2BIG;
+ kid = ik.ik_keyix;
+ if (kid == IEEE80211_KEYIX_NONE) {
+ /* XXX unicast keys currently must be tx/rx */
+ if (ik.ik_flags != (IEEE80211_KEY_XMIT | IEEE80211_KEY_RECV))
+ return EINVAL;
+ if (vap->iv_opmode == IEEE80211_M_STA) {
+ ni = ieee80211_ref_node(vap->iv_bss);
+ if (!IEEE80211_ADDR_EQ(ik.ik_macaddr, ni->ni_bssid)) {
+ ieee80211_free_node(ni);
+ return EADDRNOTAVAIL;
+ }
+ } else {
+ ni = ieee80211_find_vap_node(&vap->iv_ic->ic_sta, vap,
+ ik.ik_macaddr);
+ if (ni == NULL)
+ return ENOENT;
+ }
+ wk = &ni->ni_ucastkey;
+ } else {
+ if (kid >= IEEE80211_WEP_NKID)
+ return EINVAL;
+ wk = &vap->iv_nw_keys[kid];
+ /*
+ * Global slots start off w/o any assigned key index.
+ * Force one here for consistency with IEEE80211_IOC_WEPKEY.
+ */
+ if (wk->wk_keyix == IEEE80211_KEYIX_NONE)
+ wk->wk_keyix = kid;
+ ni = NULL;
+ }
+ error = 0;
+ ieee80211_key_update_begin(vap);
+ if (ieee80211_crypto_newkey(vap, ik.ik_type, ik.ik_flags, wk)) {
+ wk->wk_keylen = ik.ik_keylen;
+ /* NB: MIC presence is implied by cipher type */
+ if (wk->wk_keylen > IEEE80211_KEYBUF_SIZE)
+ wk->wk_keylen = IEEE80211_KEYBUF_SIZE;
+ for (i = 0; i < IEEE80211_TID_SIZE; i++)
+ wk->wk_keyrsc[i] = ik.ik_keyrsc;
+ wk->wk_keytsc = 0; /* new key, reset */
+ memset(wk->wk_key, 0, sizeof(wk->wk_key));
+ memcpy(wk->wk_key, ik.ik_keydata, ik.ik_keylen);
+ IEEE80211_ADDR_COPY(wk->wk_macaddr,
+ ni != NULL ? ni->ni_macaddr : ik.ik_macaddr);
+ if (!ieee80211_crypto_setkey(vap, wk))
+ error = EIO;
+ else if ((ik.ik_flags & IEEE80211_KEY_DEFAULT))
+ vap->iv_def_txkey = kid;
+ } else
+ error = ENXIO;
+ ieee80211_key_update_end(vap);
+ if (ni != NULL)
+ ieee80211_free_node(ni);
+ return error;
+}
+
+static __noinline int
+ieee80211_ioctl_delkey(struct ieee80211vap *vap, struct ieee80211req *ireq)
+{
+ struct ieee80211req_del_key dk;
+ int kid, error;
+
+ if (ireq->i_len != sizeof(dk))
+ return EINVAL;
+ error = copyin(ireq->i_data, &dk, sizeof(dk));
+ if (error)
+ return error;
+ kid = dk.idk_keyix;
+ /* XXX uint8_t -> uint16_t */
+ if (dk.idk_keyix == (uint8_t) IEEE80211_KEYIX_NONE) {
+ struct ieee80211_node *ni;
+
+ if (vap->iv_opmode == IEEE80211_M_STA) {
+ ni = ieee80211_ref_node(vap->iv_bss);
+ if (!IEEE80211_ADDR_EQ(dk.idk_macaddr, ni->ni_bssid)) {
+ ieee80211_free_node(ni);
+ return EADDRNOTAVAIL;
+ }
+ } else {
+ ni = ieee80211_find_vap_node(&vap->iv_ic->ic_sta, vap,
+ dk.idk_macaddr);
+ if (ni == NULL)
+ return ENOENT;
+ }
+ /* XXX error return */
+ ieee80211_node_delucastkey(ni);
+ ieee80211_free_node(ni);
+ } else {
+ if (kid >= IEEE80211_WEP_NKID)
+ return EINVAL;
+ /* XXX error return */
+ ieee80211_crypto_delkey(vap, &vap->iv_nw_keys[kid]);
+ }
+ return 0;
+}
+
+struct mlmeop {
+ struct ieee80211vap *vap;
+ int op;
+ int reason;
+};
+
+static void
+mlmedebug(struct ieee80211vap *vap, const uint8_t mac[IEEE80211_ADDR_LEN],
+ int op, int reason)
+{
+#ifdef IEEE80211_DEBUG
+ static const struct {
+ int mask;
+ const char *opstr;
+ } ops[] = {
+ { 0, "op#0" },
+ { IEEE80211_MSG_IOCTL | IEEE80211_MSG_STATE |
+ IEEE80211_MSG_ASSOC, "assoc" },
+ { IEEE80211_MSG_IOCTL | IEEE80211_MSG_STATE |
+ IEEE80211_MSG_ASSOC, "disassoc" },
+ { IEEE80211_MSG_IOCTL | IEEE80211_MSG_STATE |
+ IEEE80211_MSG_AUTH, "deauth" },
+ { IEEE80211_MSG_IOCTL | IEEE80211_MSG_STATE |
+ IEEE80211_MSG_AUTH, "authorize" },
+ { IEEE80211_MSG_IOCTL | IEEE80211_MSG_STATE |
+ IEEE80211_MSG_AUTH, "unauthorize" },
+ };
+
+ if (op == IEEE80211_MLME_AUTH) {
+ IEEE80211_NOTE_MAC(vap, IEEE80211_MSG_IOCTL |
+ IEEE80211_MSG_STATE | IEEE80211_MSG_AUTH, mac,
+ "station authenticate %s via MLME (reason %d)",
+ reason == IEEE80211_STATUS_SUCCESS ? "ACCEPT" : "REJECT",
+ reason);
+ } else if (!(IEEE80211_MLME_ASSOC <= op && op <= IEEE80211_MLME_AUTH)) {
+ IEEE80211_NOTE_MAC(vap, IEEE80211_MSG_ANY, mac,
+ "unknown MLME request %d (reason %d)", op, reason);
+ } else if (reason == IEEE80211_STATUS_SUCCESS) {
+ IEEE80211_NOTE_MAC(vap, ops[op].mask, mac,
+ "station %s via MLME", ops[op].opstr);
+ } else {
+ IEEE80211_NOTE_MAC(vap, ops[op].mask, mac,
+ "station %s via MLME (reason %d)", ops[op].opstr, reason);
+ }
+#endif /* IEEE80211_DEBUG */
+}
+
+static void
+domlme(void *arg, struct ieee80211_node *ni)
+{
+ struct mlmeop *mop = arg;
+ struct ieee80211vap *vap = ni->ni_vap;
+
+ if (vap != mop->vap)
+ return;
+ /*
+ * NB: if ni_associd is zero then the node is already cleaned
+ * up and we don't need to do this (we're safely holding a
+ * reference but should otherwise not modify it's state).
+ */
+ if (ni->ni_associd == 0)
+ return;
+ mlmedebug(vap, ni->ni_macaddr, mop->op, mop->reason);
+ if (mop->op == IEEE80211_MLME_DEAUTH) {
+ IEEE80211_SEND_MGMT(ni, IEEE80211_FC0_SUBTYPE_DEAUTH,
+ mop->reason);
+ } else {
+ IEEE80211_SEND_MGMT(ni, IEEE80211_FC0_SUBTYPE_DISASSOC,
+ mop->reason);
+ }
+ ieee80211_node_leave(ni);
+}
+
+static int
+setmlme_dropsta(struct ieee80211vap *vap,
+ const uint8_t mac[IEEE80211_ADDR_LEN], struct mlmeop *mlmeop)
+{
+ struct ieee80211com *ic = vap->iv_ic;
+ struct ieee80211_node_table *nt = &ic->ic_sta;
+ struct ieee80211_node *ni;
+ int error = 0;
+
+ /* NB: the broadcast address means do 'em all */
+ if (!IEEE80211_ADDR_EQ(mac, ic->ic_ifp->if_broadcastaddr)) {
+ IEEE80211_NODE_LOCK(nt);
+ ni = ieee80211_find_node_locked(nt, mac);
+ if (ni != NULL) {
+ domlme(mlmeop, ni);
+ ieee80211_free_node(ni);
+ } else
+ error = ENOENT;
+ IEEE80211_NODE_UNLOCK(nt);
+ } else {
+ ieee80211_iterate_nodes(nt, domlme, mlmeop);
+ }
+ return error;
+}
+
+static __noinline int
+setmlme_common(struct ieee80211vap *vap, int op,
+ const uint8_t mac[IEEE80211_ADDR_LEN], int reason)
+{
+ struct ieee80211com *ic = vap->iv_ic;
+ struct ieee80211_node_table *nt = &ic->ic_sta;
+ struct ieee80211_node *ni;
+ struct mlmeop mlmeop;
+ int error;
+
+ error = 0;
+ switch (op) {
+ case IEEE80211_MLME_DISASSOC:
+ case IEEE80211_MLME_DEAUTH:
+ switch (vap->iv_opmode) {
+ case IEEE80211_M_STA:
+ mlmedebug(vap, vap->iv_bss->ni_macaddr, op, reason);
+ /* XXX not quite right */
+ ieee80211_new_state(vap, IEEE80211_S_INIT, reason);
+ break;
+ case IEEE80211_M_HOSTAP:
+ mlmeop.vap = vap;
+ mlmeop.op = op;
+ mlmeop.reason = reason;
+ error = setmlme_dropsta(vap, mac, &mlmeop);
+ break;
+ case IEEE80211_M_WDS:
+ /* XXX user app should send raw frame? */
+ if (op != IEEE80211_MLME_DEAUTH) {
+ error = EINVAL;
+ break;
+ }
+#if 0
+ /* XXX accept any address, simplifies user code */
+ if (!IEEE80211_ADDR_EQ(mac, vap->iv_bss->ni_macaddr)) {
+ error = EINVAL;
+ break;
+ }
+#endif
+ mlmedebug(vap, vap->iv_bss->ni_macaddr, op, reason);
+ ni = ieee80211_ref_node(vap->iv_bss);
+ IEEE80211_SEND_MGMT(ni,
+ IEEE80211_FC0_SUBTYPE_DEAUTH, reason);
+ ieee80211_free_node(ni);
+ break;
+ default:
+ error = EINVAL;
+ break;
+ }
+ break;
+ case IEEE80211_MLME_AUTHORIZE:
+ case IEEE80211_MLME_UNAUTHORIZE:
+ if (vap->iv_opmode != IEEE80211_M_HOSTAP &&
+ vap->iv_opmode != IEEE80211_M_WDS) {
+ error = EINVAL;
+ break;
+ }
+ IEEE80211_NODE_LOCK(nt);
+ ni = ieee80211_find_vap_node_locked(nt, vap, mac);
+ if (ni != NULL) {
+ mlmedebug(vap, mac, op, reason);
+ if (op == IEEE80211_MLME_AUTHORIZE)
+ ieee80211_node_authorize(ni);
+ else
+ ieee80211_node_unauthorize(ni);
+ ieee80211_free_node(ni);
+ } else
+ error = ENOENT;
+ IEEE80211_NODE_UNLOCK(nt);
+ break;
+ case IEEE80211_MLME_AUTH:
+ if (vap->iv_opmode != IEEE80211_M_HOSTAP) {
+ error = EINVAL;
+ break;
+ }
+ IEEE80211_NODE_LOCK(nt);
+ ni = ieee80211_find_vap_node_locked(nt, vap, mac);
+ if (ni != NULL) {
+ mlmedebug(vap, mac, op, reason);
+ if (reason == IEEE80211_STATUS_SUCCESS) {
+ IEEE80211_SEND_MGMT(ni,
+ IEEE80211_FC0_SUBTYPE_AUTH, 2);
+ /*
+ * For shared key auth, just continue the
+ * exchange. Otherwise when 802.1x is not in
+ * use mark the port authorized at this point
+ * so traffic can flow.
+ */
+ if (ni->ni_authmode != IEEE80211_AUTH_8021X &&
+ ni->ni_challenge == NULL)
+ ieee80211_node_authorize(ni);
+ } else {
+ vap->iv_stats.is_rx_acl++;
+ ieee80211_send_error(ni, ni->ni_macaddr,
+ IEEE80211_FC0_SUBTYPE_AUTH, 2|(reason<<16));
+ ieee80211_node_leave(ni);
+ }
+ ieee80211_free_node(ni);
+ } else
+ error = ENOENT;
+ IEEE80211_NODE_UNLOCK(nt);
+ break;
+ default:
+ error = EINVAL;
+ break;
+ }
+ return error;
+}
+
+struct scanlookup {
+ const uint8_t *mac;
+ int esslen;
+ const uint8_t *essid;
+ const struct ieee80211_scan_entry *se;
+};
+
+/*
+ * Match mac address and any ssid.
+ */
+static void
+mlmelookup(void *arg, const struct ieee80211_scan_entry *se)
+{
+ struct scanlookup *look = arg;
+
+ if (!IEEE80211_ADDR_EQ(look->mac, se->se_macaddr))
+ return;
+ if (look->esslen != 0) {
+ if (se->se_ssid[1] != look->esslen)
+ return;
+ if (memcmp(look->essid, se->se_ssid+2, look->esslen))
+ return;
+ }
+ look->se = se;
+}
+
+static __noinline int
+setmlme_assoc(struct ieee80211vap *vap, const uint8_t mac[IEEE80211_ADDR_LEN],
+ int ssid_len, const uint8_t ssid[IEEE80211_NWID_LEN])
+{
+ struct scanlookup lookup;
+
+ /* XXX ibss/ahdemo */
+ if (vap->iv_opmode != IEEE80211_M_STA)
+ return EINVAL;
+
+ /* NB: this is racey if roaming is !manual */
+ lookup.se = NULL;
+ lookup.mac = mac;
+ lookup.esslen = ssid_len;
+ lookup.essid = ssid;
+ ieee80211_scan_iterate(vap, mlmelookup, &lookup);
+ if (lookup.se == NULL)
+ return ENOENT;
+ mlmedebug(vap, mac, IEEE80211_MLME_ASSOC, 0);
+ if (!ieee80211_sta_join(vap, lookup.se->se_chan, lookup.se))
+ return EIO; /* XXX unique but could be better */
+ return 0;
+}
+
+static __noinline int
+ieee80211_ioctl_setmlme(struct ieee80211vap *vap, struct ieee80211req *ireq)
+{
+ struct ieee80211req_mlme mlme;
+ int error;
+
+ if (ireq->i_len != sizeof(mlme))
+ return EINVAL;
+ error = copyin(ireq->i_data, &mlme, sizeof(mlme));
+ if (error)
+ return error;
+ if (mlme.im_op == IEEE80211_MLME_ASSOC)
+ return setmlme_assoc(vap, mlme.im_macaddr,
+ vap->iv_des_ssid[0].len, vap->iv_des_ssid[0].ssid);
+ else
+ return setmlme_common(vap, mlme.im_op,
+ mlme.im_macaddr, mlme.im_reason);
+}
+
+static __noinline int
+ieee80211_ioctl_macmac(struct ieee80211vap *vap, struct ieee80211req *ireq)
+{
+ uint8_t mac[IEEE80211_ADDR_LEN];
+ const struct ieee80211_aclator *acl = vap->iv_acl;
+ int error;
+
+ if (ireq->i_len != sizeof(mac))
+ return EINVAL;
+ error = copyin(ireq->i_data, mac, ireq->i_len);
+ if (error)
+ return error;
+ if (acl == NULL) {
+ acl = ieee80211_aclator_get("mac");
+ if (acl == NULL || !acl->iac_attach(vap))
+ return EINVAL;
+ vap->iv_acl = acl;
+ }
+ if (ireq->i_type == IEEE80211_IOC_ADDMAC)
+ acl->iac_add(vap, mac);
+ else
+ acl->iac_remove(vap, mac);
+ return 0;
+}
+
+static __noinline int
+ieee80211_ioctl_setmaccmd(struct ieee80211vap *vap, struct ieee80211req *ireq)
+{
+ const struct ieee80211_aclator *acl = vap->iv_acl;
+
+ switch (ireq->i_val) {
+ case IEEE80211_MACCMD_POLICY_OPEN:
+ case IEEE80211_MACCMD_POLICY_ALLOW:
+ case IEEE80211_MACCMD_POLICY_DENY:
+ case IEEE80211_MACCMD_POLICY_RADIUS:
+ if (acl == NULL) {
+ acl = ieee80211_aclator_get("mac");
+ if (acl == NULL || !acl->iac_attach(vap))
+ return EINVAL;
+ vap->iv_acl = acl;
+ }
+ acl->iac_setpolicy(vap, ireq->i_val);
+ break;
+ case IEEE80211_MACCMD_FLUSH:
+ if (acl != NULL)
+ acl->iac_flush(vap);
+ /* NB: silently ignore when not in use */
+ break;
+ case IEEE80211_MACCMD_DETACH:
+ if (acl != NULL) {
+ vap->iv_acl = NULL;
+ acl->iac_detach(vap);
+ }
+ break;
+ default:
+ if (acl == NULL)
+ return EINVAL;
+ else
+ return acl->iac_setioctl(vap, ireq);
+ }
+ return 0;
+}
+
+static __noinline int
+ieee80211_ioctl_setchanlist(struct ieee80211vap *vap, struct ieee80211req *ireq)
+{
+ struct ieee80211com *ic = vap->iv_ic;
+ uint8_t *chanlist, *list;
+ int i, nchan, maxchan, error;
+
+ if (ireq->i_len > sizeof(ic->ic_chan_active))
+ ireq->i_len = sizeof(ic->ic_chan_active);
+ list = malloc(ireq->i_len + IEEE80211_CHAN_BYTES, M_TEMP,
+ M_NOWAIT | M_ZERO);
+ if (list == NULL)
+ return ENOMEM;
+ error = copyin(ireq->i_data, list, ireq->i_len);
+ if (error) {
+ free(list, M_TEMP);
+ return error;
+ }
+ nchan = 0;
+ chanlist = list + ireq->i_len; /* NB: zero'd already */
+ maxchan = ireq->i_len * NBBY;
+ for (i = 0; i < ic->ic_nchans; i++) {
+ const struct ieee80211_channel *c = &ic->ic_channels[i];
+ /*
+ * Calculate the intersection of the user list and the
+ * available channels so users can do things like specify
+ * 1-255 to get all available channels.
+ */
+ if (c->ic_ieee < maxchan && isset(list, c->ic_ieee)) {
+ setbit(chanlist, c->ic_ieee);
+ nchan++;
+ }
+ }
+ if (nchan == 0) {
+ free(list, M_TEMP);
+ return EINVAL;
+ }
+ if (ic->ic_bsschan != IEEE80211_CHAN_ANYC && /* XXX */
+ isclr(chanlist, ic->ic_bsschan->ic_ieee))
+ ic->ic_bsschan = IEEE80211_CHAN_ANYC;
+ memcpy(ic->ic_chan_active, chanlist, IEEE80211_CHAN_BYTES);
+ ieee80211_scan_flush(vap);
+ free(list, M_TEMP);
+ return ENETRESET;
+}
+
+static __noinline int
+ieee80211_ioctl_setstastats(struct ieee80211vap *vap, struct ieee80211req *ireq)
+{
+ struct ieee80211_node *ni;
+ uint8_t macaddr[IEEE80211_ADDR_LEN];
+ int error;
+
+ /*
+ * NB: we could copyin ieee80211req_sta_stats so apps
+ * could make selective changes but that's overkill;
+ * just clear all stats for now.
+ */
+ if (ireq->i_len < IEEE80211_ADDR_LEN)
+ return EINVAL;
+ error = copyin(ireq->i_data, macaddr, IEEE80211_ADDR_LEN);
+ if (error != 0)
+ return error;
+ ni = ieee80211_find_vap_node(&vap->iv_ic->ic_sta, vap, macaddr);
+ if (ni == NULL)
+ return ENOENT;
+ /* XXX require ni_vap == vap? */
+ memset(&ni->ni_stats, 0, sizeof(ni->ni_stats));
+ ieee80211_free_node(ni);
+ return 0;
+}
+
+static __noinline int
+ieee80211_ioctl_setstatxpow(struct ieee80211vap *vap, struct ieee80211req *ireq)
+{
+ struct ieee80211_node *ni;
+ struct ieee80211req_sta_txpow txpow;
+ int error;
+
+ if (ireq->i_len != sizeof(txpow))
+ return EINVAL;
+ error = copyin(ireq->i_data, &txpow, sizeof(txpow));
+ if (error != 0)
+ return error;
+ ni = ieee80211_find_vap_node(&vap->iv_ic->ic_sta, vap, txpow.it_macaddr);
+ if (ni == NULL)
+ return ENOENT;
+ ni->ni_txpower = txpow.it_txpow;
+ ieee80211_free_node(ni);
+ return error;
+}
+
+static __noinline int
+ieee80211_ioctl_setwmeparam(struct ieee80211vap *vap, struct ieee80211req *ireq)
+{
+ struct ieee80211com *ic = vap->iv_ic;
+ struct ieee80211_wme_state *wme = &ic->ic_wme;
+ struct wmeParams *wmep, *chanp;
+ int isbss, ac;
+
+ if ((ic->ic_caps & IEEE80211_C_WME) == 0)
+ return EOPNOTSUPP;
+
+ isbss = (ireq->i_len & IEEE80211_WMEPARAM_BSS);
+ ac = (ireq->i_len & IEEE80211_WMEPARAM_VAL);
+ if (ac >= WME_NUM_AC)
+ ac = WME_AC_BE;
+ if (isbss) {
+ chanp = &wme->wme_bssChanParams.cap_wmeParams[ac];
+ wmep = &wme->wme_wmeBssChanParams.cap_wmeParams[ac];
+ } else {
+ chanp = &wme->wme_chanParams.cap_wmeParams[ac];
+ wmep = &wme->wme_wmeChanParams.cap_wmeParams[ac];
+ }
+ switch (ireq->i_type) {
+ case IEEE80211_IOC_WME_CWMIN: /* WME: CWmin */
+ if (isbss) {
+ wmep->wmep_logcwmin = ireq->i_val;
+ if ((wme->wme_flags & WME_F_AGGRMODE) == 0)
+ chanp->wmep_logcwmin = ireq->i_val;
+ } else {
+ wmep->wmep_logcwmin = chanp->wmep_logcwmin =
+ ireq->i_val;
+ }
+ break;
+ case IEEE80211_IOC_WME_CWMAX: /* WME: CWmax */
+ if (isbss) {
+ wmep->wmep_logcwmax = ireq->i_val;
+ if ((wme->wme_flags & WME_F_AGGRMODE) == 0)
+ chanp->wmep_logcwmax = ireq->i_val;
+ } else {
+ wmep->wmep_logcwmax = chanp->wmep_logcwmax =
+ ireq->i_val;
+ }
+ break;
+ case IEEE80211_IOC_WME_AIFS: /* WME: AIFS */
+ if (isbss) {
+ wmep->wmep_aifsn = ireq->i_val;
+ if ((wme->wme_flags & WME_F_AGGRMODE) == 0)
+ chanp->wmep_aifsn = ireq->i_val;
+ } else {
+ wmep->wmep_aifsn = chanp->wmep_aifsn = ireq->i_val;
+ }
+ break;
+ case IEEE80211_IOC_WME_TXOPLIMIT: /* WME: txops limit */
+ if (isbss) {
+ wmep->wmep_txopLimit = ireq->i_val;
+ if ((wme->wme_flags & WME_F_AGGRMODE) == 0)
+ chanp->wmep_txopLimit = ireq->i_val;
+ } else {
+ wmep->wmep_txopLimit = chanp->wmep_txopLimit =
+ ireq->i_val;
+ }
+ break;
+ case IEEE80211_IOC_WME_ACM: /* WME: ACM (bss only) */
+ wmep->wmep_acm = ireq->i_val;
+ if ((wme->wme_flags & WME_F_AGGRMODE) == 0)
+ chanp->wmep_acm = ireq->i_val;
+ break;
+ case IEEE80211_IOC_WME_ACKPOLICY: /* WME: ACK policy (!bss only)*/
+ wmep->wmep_noackPolicy = chanp->wmep_noackPolicy =
+ (ireq->i_val) == 0;
+ break;
+ }
+ ieee80211_wme_updateparams(vap);
+ return 0;
+}
+
+static int
+find11gchannel(struct ieee80211com *ic, int start, int freq)
+{
+ const struct ieee80211_channel *c;
+ int i;
+
+ for (i = start+1; i < ic->ic_nchans; i++) {
+ c = &ic->ic_channels[i];
+ if (c->ic_freq == freq && IEEE80211_IS_CHAN_ANYG(c))
+ return 1;
+ }
+ /* NB: should not be needed but in case things are mis-sorted */
+ for (i = 0; i < start; i++) {
+ c = &ic->ic_channels[i];
+ if (c->ic_freq == freq && IEEE80211_IS_CHAN_ANYG(c))
+ return 1;
+ }
+ return 0;
+}
+
+static struct ieee80211_channel *
+findchannel(struct ieee80211com *ic, int ieee, int mode)
+{
+ static const u_int chanflags[IEEE80211_MODE_MAX] = {
+ [IEEE80211_MODE_AUTO] = 0,
+ [IEEE80211_MODE_11A] = IEEE80211_CHAN_A,
+ [IEEE80211_MODE_11B] = IEEE80211_CHAN_B,
+ [IEEE80211_MODE_11G] = IEEE80211_CHAN_G,
+ [IEEE80211_MODE_FH] = IEEE80211_CHAN_FHSS,
+ [IEEE80211_MODE_TURBO_A] = IEEE80211_CHAN_108A,
+ [IEEE80211_MODE_TURBO_G] = IEEE80211_CHAN_108G,
+ [IEEE80211_MODE_STURBO_A] = IEEE80211_CHAN_STURBO,
+ [IEEE80211_MODE_HALF] = IEEE80211_CHAN_HALF,
+ [IEEE80211_MODE_QUARTER] = IEEE80211_CHAN_QUARTER,
+ /* NB: handled specially below */
+ [IEEE80211_MODE_11NA] = IEEE80211_CHAN_A,
+ [IEEE80211_MODE_11NG] = IEEE80211_CHAN_G,
+ };
+ u_int modeflags;
+ int i;
+
+ modeflags = chanflags[mode];
+ for (i = 0; i < ic->ic_nchans; i++) {
+ struct ieee80211_channel *c = &ic->ic_channels[i];
+
+ if (c->ic_ieee != ieee)
+ continue;
+ if (mode == IEEE80211_MODE_AUTO) {
+ /* ignore turbo channels for autoselect */
+ if (IEEE80211_IS_CHAN_TURBO(c))
+ continue;
+ /*
+ * XXX special-case 11b/g channels so we
+ * always select the g channel if both
+ * are present.
+ * XXX prefer HT to non-HT?
+ */
+ if (!IEEE80211_IS_CHAN_B(c) ||
+ !find11gchannel(ic, i, c->ic_freq))
+ return c;
+ } else {
+ /* must check HT specially */
+ if ((mode == IEEE80211_MODE_11NA ||
+ mode == IEEE80211_MODE_11NG) &&
+ !IEEE80211_IS_CHAN_HT(c))
+ continue;
+ if ((c->ic_flags & modeflags) == modeflags)
+ return c;
+ }
+ }
+ return NULL;
+}
+
+/*
+ * Check the specified against any desired mode (aka netband).
+ * This is only used (presently) when operating in hostap mode
+ * to enforce consistency.
+ */
+static int
+check_mode_consistency(const struct ieee80211_channel *c, int mode)
+{
+ KASSERT(c != IEEE80211_CHAN_ANYC, ("oops, no channel"));
+
+ switch (mode) {
+ case IEEE80211_MODE_11B:
+ return (IEEE80211_IS_CHAN_B(c));
+ case IEEE80211_MODE_11G:
+ return (IEEE80211_IS_CHAN_ANYG(c) && !IEEE80211_IS_CHAN_HT(c));
+ case IEEE80211_MODE_11A:
+ return (IEEE80211_IS_CHAN_A(c) && !IEEE80211_IS_CHAN_HT(c));
+ case IEEE80211_MODE_STURBO_A:
+ return (IEEE80211_IS_CHAN_STURBO(c));
+ case IEEE80211_MODE_11NA:
+ return (IEEE80211_IS_CHAN_HTA(c));
+ case IEEE80211_MODE_11NG:
+ return (IEEE80211_IS_CHAN_HTG(c));
+ }
+ return 1;
+
+}
+
+/*
+ * Common code to set the current channel. If the device
+ * is up and running this may result in an immediate channel
+ * change or a kick of the state machine.
+ */
+static int
+setcurchan(struct ieee80211vap *vap, struct ieee80211_channel *c)
+{
+ struct ieee80211com *ic = vap->iv_ic;
+ int error;
+
+ if (c != IEEE80211_CHAN_ANYC) {
+ if (IEEE80211_IS_CHAN_RADAR(c))
+ return EBUSY; /* XXX better code? */
+ if (vap->iv_opmode == IEEE80211_M_HOSTAP) {
+ if (IEEE80211_IS_CHAN_NOHOSTAP(c))
+ return EINVAL;
+ if (!check_mode_consistency(c, vap->iv_des_mode))
+ return EINVAL;
+ } else if (vap->iv_opmode == IEEE80211_M_IBSS) {
+ if (IEEE80211_IS_CHAN_NOADHOC(c))
+ return EINVAL;
+ }
+ if (vap->iv_state == IEEE80211_S_RUN &&
+ vap->iv_bss->ni_chan == c)
+ return 0; /* NB: nothing to do */
+ }
+ vap->iv_des_chan = c;
+
+ error = 0;
+ if (vap->iv_opmode == IEEE80211_M_MONITOR &&
+ vap->iv_des_chan != IEEE80211_CHAN_ANYC) {
+ /*
+ * Monitor mode can switch directly.
+ */
+ if (IFNET_IS_UP_RUNNING(vap->iv_ifp)) {
+ /* XXX need state machine for other vap's to follow */
+ ieee80211_setcurchan(ic, vap->iv_des_chan);
+ vap->iv_bss->ni_chan = ic->ic_curchan;
+ } else
+ ic->ic_curchan = vap->iv_des_chan;
+ ic->ic_rt = ieee80211_get_ratetable(ic->ic_curchan);
+ } else {
+ /*
+ * Need to go through the state machine in case we
+ * need to reassociate or the like. The state machine
+ * will pickup the desired channel and avoid scanning.
+ */
+ if (IS_UP_AUTO(vap))
+ ieee80211_new_state(vap, IEEE80211_S_SCAN, 0);
+ else if (vap->iv_des_chan != IEEE80211_CHAN_ANYC) {
+ /*
+ * When not up+running and a real channel has
+ * been specified fix the current channel so
+ * there is immediate feedback; e.g. via ifconfig.
+ */
+ ic->ic_curchan = vap->iv_des_chan;
+ ic->ic_rt = ieee80211_get_ratetable(ic->ic_curchan);
+ }
+ }
+ return error;
+}
+
+/*
+ * Old api for setting the current channel; this is
+ * deprecated because channel numbers are ambiguous.
+ */
+static __noinline int
+ieee80211_ioctl_setchannel(struct ieee80211vap *vap,
+ const struct ieee80211req *ireq)
+{
+ struct ieee80211com *ic = vap->iv_ic;
+ struct ieee80211_channel *c;
+
+ /* XXX 0xffff overflows 16-bit signed */
+ if (ireq->i_val == 0 ||
+ ireq->i_val == (int16_t) IEEE80211_CHAN_ANY) {
+ c = IEEE80211_CHAN_ANYC;
+ } else {
+ struct ieee80211_channel *c2;
+
+ c = findchannel(ic, ireq->i_val, vap->iv_des_mode);
+ if (c == NULL) {
+ c = findchannel(ic, ireq->i_val,
+ IEEE80211_MODE_AUTO);
+ if (c == NULL)
+ return EINVAL;
+ }
+ /*
+ * Fine tune channel selection based on desired mode:
+ * if 11b is requested, find the 11b version of any
+ * 11g channel returned,
+ * if static turbo, find the turbo version of any
+ * 11a channel return,
+ * if 11na is requested, find the ht version of any
+ * 11a channel returned,
+ * if 11ng is requested, find the ht version of any
+ * 11g channel returned,
+ * otherwise we should be ok with what we've got.
+ */
+ switch (vap->iv_des_mode) {
+ case IEEE80211_MODE_11B:
+ if (IEEE80211_IS_CHAN_ANYG(c)) {
+ c2 = findchannel(ic, ireq->i_val,
+ IEEE80211_MODE_11B);
+ /* NB: should not happen, =>'s 11g w/o 11b */
+ if (c2 != NULL)
+ c = c2;
+ }
+ break;
+ case IEEE80211_MODE_TURBO_A:
+ if (IEEE80211_IS_CHAN_A(c)) {
+ c2 = findchannel(ic, ireq->i_val,
+ IEEE80211_MODE_TURBO_A);
+ if (c2 != NULL)
+ c = c2;
+ }
+ break;
+ case IEEE80211_MODE_11NA:
+ if (IEEE80211_IS_CHAN_A(c)) {
+ c2 = findchannel(ic, ireq->i_val,
+ IEEE80211_MODE_11NA);
+ if (c2 != NULL)
+ c = c2;
+ }
+ break;
+ case IEEE80211_MODE_11NG:
+ if (IEEE80211_IS_CHAN_ANYG(c)) {
+ c2 = findchannel(ic, ireq->i_val,
+ IEEE80211_MODE_11NG);
+ if (c2 != NULL)
+ c = c2;
+ }
+ break;
+ default: /* NB: no static turboG */
+ break;
+ }
+ }
+ return setcurchan(vap, c);
+}
+
+/*
+ * New/current api for setting the current channel; a complete
+ * channel description is provide so there is no ambiguity in
+ * identifying the channel.
+ */
+static __noinline int
+ieee80211_ioctl_setcurchan(struct ieee80211vap *vap,
+ const struct ieee80211req *ireq)
+{
+ struct ieee80211com *ic = vap->iv_ic;
+ struct ieee80211_channel chan, *c;
+ int error;
+
+ if (ireq->i_len != sizeof(chan))
+ return EINVAL;
+ error = copyin(ireq->i_data, &chan, sizeof(chan));
+ if (error != 0)
+ return error;
+ /* XXX 0xffff overflows 16-bit signed */
+ if (chan.ic_freq == 0 || chan.ic_freq == IEEE80211_CHAN_ANY) {
+ c = IEEE80211_CHAN_ANYC;
+ } else {
+ c = ieee80211_find_channel(ic, chan.ic_freq, chan.ic_flags);
+ if (c == NULL)
+ return EINVAL;
+ }
+ return setcurchan(vap, c);
+}
+
+static __noinline int
+ieee80211_ioctl_setregdomain(struct ieee80211vap *vap,
+ const struct ieee80211req *ireq)
+{
+ struct ieee80211_regdomain_req *reg;
+ int nchans, error;
+
+ nchans = 1 + ((ireq->i_len - sizeof(struct ieee80211_regdomain_req)) /
+ sizeof(struct ieee80211_channel));
+ if (!(1 <= nchans && nchans <= IEEE80211_CHAN_MAX)) {
+ IEEE80211_DPRINTF(vap, IEEE80211_MSG_IOCTL,
+ "%s: bad # chans, i_len %d nchans %d\n", __func__,
+ ireq->i_len, nchans);
+ return EINVAL;
+ }
+ reg = (struct ieee80211_regdomain_req *)
+ malloc(IEEE80211_REGDOMAIN_SIZE(nchans), M_TEMP, M_NOWAIT);
+ if (reg == NULL) {
+ IEEE80211_DPRINTF(vap, IEEE80211_MSG_IOCTL,
+ "%s: no memory, nchans %d\n", __func__, nchans);
+ return ENOMEM;
+ }
+ error = copyin(ireq->i_data, reg, IEEE80211_REGDOMAIN_SIZE(nchans));
+ if (error == 0) {
+ /* NB: validate inline channel count against storage size */
+ if (reg->chaninfo.ic_nchans != nchans) {
+ IEEE80211_DPRINTF(vap, IEEE80211_MSG_IOCTL,
+ "%s: chan cnt mismatch, %d != %d\n", __func__,
+ reg->chaninfo.ic_nchans, nchans);
+ error = EINVAL;
+ } else
+ error = ieee80211_setregdomain(vap, reg);
+ }
+ free(reg, M_TEMP);
+
+ return (error == 0 ? ENETRESET : error);
+}
+
+static int
+ieee80211_ioctl_setroam(struct ieee80211vap *vap,
+ const struct ieee80211req *ireq)
+{
+ if (ireq->i_len != sizeof(vap->iv_roamparms))
+ return EINVAL;
+ /* XXX validate params */
+ /* XXX? ENETRESET to push to device? */
+ return copyin(ireq->i_data, vap->iv_roamparms,
+ sizeof(vap->iv_roamparms));
+}
+
+static int
+checkrate(const struct ieee80211_rateset *rs, int rate)
+{
+ int i;
+
+ if (rate == IEEE80211_FIXED_RATE_NONE)
+ return 1;
+ for (i = 0; i < rs->rs_nrates; i++)
+ if ((rs->rs_rates[i] & IEEE80211_RATE_VAL) == rate)
+ return 1;
+ return 0;
+}
+
+static int
+checkmcs(int mcs)
+{
+ if (mcs == IEEE80211_FIXED_RATE_NONE)
+ return 1;
+ if ((mcs & IEEE80211_RATE_MCS) == 0) /* MCS always have 0x80 set */
+ return 0;
+ return (mcs & 0x7f) <= 15; /* XXX could search ht rate set */
+}
+
+static __noinline int
+ieee80211_ioctl_settxparams(struct ieee80211vap *vap,
+ const struct ieee80211req *ireq)
+{
+ struct ieee80211com *ic = vap->iv_ic;
+ struct ieee80211_txparams_req parms; /* XXX stack use? */
+ struct ieee80211_txparam *src, *dst;
+ const struct ieee80211_rateset *rs;
+ int error, mode, changed, is11n, nmodes;
+
+ /* NB: accept short requests for backwards compat */
+ if (ireq->i_len > sizeof(parms))
+ return EINVAL;
+ error = copyin(ireq->i_data, &parms, ireq->i_len);
+ if (error != 0)
+ return error;
+ nmodes = ireq->i_len / sizeof(struct ieee80211_txparam);
+ changed = 0;
+ /* validate parameters and check if anything changed */
+ for (mode = IEEE80211_MODE_11A; mode < nmodes; mode++) {
+ if (isclr(ic->ic_modecaps, mode))
+ continue;
+ src = &parms.params[mode];
+ dst = &vap->iv_txparms[mode];
+ rs = &ic->ic_sup_rates[mode]; /* NB: 11n maps to legacy */
+ is11n = (mode == IEEE80211_MODE_11NA ||
+ mode == IEEE80211_MODE_11NG);
+ if (src->ucastrate != dst->ucastrate) {
+ if (!checkrate(rs, src->ucastrate) &&
+ (!is11n || !checkmcs(src->ucastrate)))
+ return EINVAL;
+ changed++;
+ }
+ if (src->mcastrate != dst->mcastrate) {
+ if (!checkrate(rs, src->mcastrate) &&
+ (!is11n || !checkmcs(src->mcastrate)))
+ return EINVAL;
+ changed++;
+ }
+ if (src->mgmtrate != dst->mgmtrate) {
+ if (!checkrate(rs, src->mgmtrate) &&
+ (!is11n || !checkmcs(src->mgmtrate)))
+ return EINVAL;
+ changed++;
+ }
+ if (src->maxretry != dst->maxretry) /* NB: no bounds */
+ changed++;
+ }
+ if (changed) {
+ /*
+ * Copy new parameters in place and notify the
+ * driver so it can push state to the device.
+ */
+ for (mode = IEEE80211_MODE_11A; mode < nmodes; mode++) {
+ if (isset(ic->ic_modecaps, mode))
+ vap->iv_txparms[mode] = parms.params[mode];
+ }
+ /* XXX could be more intelligent,
+ e.g. don't reset if setting not being used */
+ return ENETRESET;
+ }
+ return 0;
+}
+
+/*
+ * Application Information Element support.
+ */
+static int
+setappie(struct ieee80211_appie **aie, const struct ieee80211req *ireq)
+{
+ struct ieee80211_appie *app = *aie;
+ struct ieee80211_appie *napp;
+ int error;
+
+ if (ireq->i_len == 0) { /* delete any existing ie */
+ if (app != NULL) {
+ *aie = NULL; /* XXX racey */
+ free(app, M_80211_NODE_IE);
+ }
+ return 0;
+ }
+ if (!(2 <= ireq->i_len && ireq->i_len <= IEEE80211_MAX_APPIE))
+ return EINVAL;
+ /*
+ * Allocate a new appie structure and copy in the user data.
+ * When done swap in the new structure. Note that we do not
+ * guard against users holding a ref to the old structure;
+ * this must be handled outside this code.
+ *
+ * XXX bad bad bad
+ */
+ napp = (struct ieee80211_appie *) malloc(
+ sizeof(struct ieee80211_appie) + ireq->i_len, M_80211_NODE_IE, M_NOWAIT);
+ if (napp == NULL)
+ return ENOMEM;
+ /* XXX holding ic lock */
+ error = copyin(ireq->i_data, napp->ie_data, ireq->i_len);
+ if (error) {
+ free(napp, M_80211_NODE_IE);
+ return error;
+ }
+ napp->ie_len = ireq->i_len;
+ *aie = napp;
+ if (app != NULL)
+ free(app, M_80211_NODE_IE);
+ return 0;
+}
+
+static void
+setwparsnie(struct ieee80211vap *vap, uint8_t *ie, int space)
+{
+ /* validate data is present as best we can */
+ if (space == 0 || 2+ie[1] > space)
+ return;
+ if (ie[0] == IEEE80211_ELEMID_VENDOR)
+ vap->iv_wpa_ie = ie;
+ else if (ie[0] == IEEE80211_ELEMID_RSN)
+ vap->iv_rsn_ie = ie;
+}
+
+static __noinline int
+ieee80211_ioctl_setappie_locked(struct ieee80211vap *vap,
+ const struct ieee80211req *ireq, int fc0)
+{
+ int error;
+
+ IEEE80211_LOCK_ASSERT(vap->iv_ic);
+
+ switch (fc0 & IEEE80211_FC0_SUBTYPE_MASK) {
+ case IEEE80211_FC0_SUBTYPE_BEACON:
+ if (vap->iv_opmode != IEEE80211_M_HOSTAP &&
+ vap->iv_opmode != IEEE80211_M_IBSS) {
+ error = EINVAL;
+ break;
+ }
+ error = setappie(&vap->iv_appie_beacon, ireq);
+ if (error == 0)
+ ieee80211_beacon_notify(vap, IEEE80211_BEACON_APPIE);
+ break;
+ case IEEE80211_FC0_SUBTYPE_PROBE_RESP:
+ error = setappie(&vap->iv_appie_proberesp, ireq);
+ break;
+ case IEEE80211_FC0_SUBTYPE_ASSOC_RESP:
+ if (vap->iv_opmode == IEEE80211_M_HOSTAP)
+ error = setappie(&vap->iv_appie_assocresp, ireq);
+ else
+ error = EINVAL;
+ break;
+ case IEEE80211_FC0_SUBTYPE_PROBE_REQ:
+ error = setappie(&vap->iv_appie_probereq, ireq);
+ break;
+ case IEEE80211_FC0_SUBTYPE_ASSOC_REQ:
+ if (vap->iv_opmode == IEEE80211_M_STA)
+ error = setappie(&vap->iv_appie_assocreq, ireq);
+ else
+ error = EINVAL;
+ break;
+ case (IEEE80211_APPIE_WPA & IEEE80211_FC0_SUBTYPE_MASK):
+ error = setappie(&vap->iv_appie_wpa, ireq);
+ if (error == 0) {
+ /*
+ * Must split single blob of data into separate
+ * WPA and RSN ie's because they go in different
+ * locations in the mgt frames.
+ * XXX use IEEE80211_IOC_WPA2 so user code does split
+ */
+ vap->iv_wpa_ie = NULL;
+ vap->iv_rsn_ie = NULL;
+ if (vap->iv_appie_wpa != NULL) {
+ struct ieee80211_appie *appie =
+ vap->iv_appie_wpa;
+ uint8_t *data = appie->ie_data;
+
+ /* XXX ie length validate is painful, cheat */
+ setwparsnie(vap, data, appie->ie_len);
+ setwparsnie(vap, data + 2 + data[1],
+ appie->ie_len - (2 + data[1]));
+ }
+ if (vap->iv_opmode == IEEE80211_M_HOSTAP ||
+ vap->iv_opmode == IEEE80211_M_IBSS) {
+ /*
+ * Must rebuild beacon frame as the update
+ * mechanism doesn't handle WPA/RSN ie's.
+ * Could extend it but it doesn't normally
+ * change; this is just to deal with hostapd
+ * plumbing the ie after the interface is up.
+ */
+ error = ENETRESET;
+ }
+ }
+ break;
+ default:
+ error = EINVAL;
+ break;
+ }
+ return error;
+}
+
+static __noinline int
+ieee80211_ioctl_setappie(struct ieee80211vap *vap,
+ const struct ieee80211req *ireq)
+{
+ struct ieee80211com *ic = vap->iv_ic;
+ int error;
+ uint8_t fc0;
+
+ fc0 = ireq->i_val & 0xff;
+ if ((fc0 & IEEE80211_FC0_TYPE_MASK) != IEEE80211_FC0_TYPE_MGT)
+ return EINVAL;
+ /* NB: could check iv_opmode and reject but hardly worth the effort */
+ IEEE80211_LOCK(ic);
+ error = ieee80211_ioctl_setappie_locked(vap, ireq, fc0);
+ IEEE80211_UNLOCK(ic);
+ return error;
+}
+
+static __noinline int
+ieee80211_ioctl_chanswitch(struct ieee80211vap *vap, struct ieee80211req *ireq)
+{
+ struct ieee80211com *ic = vap->iv_ic;
+ struct ieee80211_chanswitch_req csr;
+ struct ieee80211_channel *c;
+ int error;
+
+ if (ireq->i_len != sizeof(csr))
+ return EINVAL;
+ error = copyin(ireq->i_data, &csr, sizeof(csr));
+ if (error != 0)
+ return error;
+ /* XXX adhoc mode not supported */
+ if (vap->iv_opmode != IEEE80211_M_HOSTAP ||
+ (vap->iv_flags & IEEE80211_F_DOTH) == 0)
+ return EOPNOTSUPP;
+ c = ieee80211_find_channel(ic,
+ csr.csa_chan.ic_freq, csr.csa_chan.ic_flags);
+ if (c == NULL)
+ return ENOENT;
+ IEEE80211_LOCK(ic);
+ if ((ic->ic_flags & IEEE80211_F_CSAPENDING) == 0)
+ ieee80211_csa_startswitch(ic, c, csr.csa_mode, csr.csa_count);
+ else if (csr.csa_count == 0)
+ ieee80211_csa_cancelswitch(ic);
+ else
+ error = EBUSY;
+ IEEE80211_UNLOCK(ic);
+ return error;
+}
+
+static __noinline int
+ieee80211_ioctl_scanreq(struct ieee80211vap *vap, struct ieee80211req *ireq)
+{
+#define IEEE80211_IOC_SCAN_FLAGS \
+ (IEEE80211_IOC_SCAN_NOPICK | IEEE80211_IOC_SCAN_ACTIVE | \
+ IEEE80211_IOC_SCAN_PICK1ST | IEEE80211_IOC_SCAN_BGSCAN | \
+ IEEE80211_IOC_SCAN_ONCE | IEEE80211_IOC_SCAN_NOBCAST | \
+ IEEE80211_IOC_SCAN_NOJOIN | IEEE80211_IOC_SCAN_FLUSH | \
+ IEEE80211_IOC_SCAN_CHECK)
+ struct ieee80211com *ic = vap->iv_ic;
+ struct ieee80211_scan_req sr; /* XXX off stack? */
+ int error, i;
+
+ /* NB: parent must be running */
+ if ((ic->ic_ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
+ return ENXIO;
+
+ if (ireq->i_len != sizeof(sr))
+ return EINVAL;
+ error = copyin(ireq->i_data, &sr, sizeof(sr));
+ if (error != 0)
+ return error;
+ /* convert duration */
+ if (sr.sr_duration == IEEE80211_IOC_SCAN_FOREVER)
+ sr.sr_duration = IEEE80211_SCAN_FOREVER;
+ else {
+ if (sr.sr_duration < IEEE80211_IOC_SCAN_DURATION_MIN ||
+ sr.sr_duration > IEEE80211_IOC_SCAN_DURATION_MAX)
+ return EINVAL;
+ sr.sr_duration = msecs_to_ticks(sr.sr_duration);
+ if (sr.sr_duration < 1)
+ sr.sr_duration = 1;
+ }
+ /* convert min/max channel dwell */
+ if (sr.sr_mindwell != 0) {
+ sr.sr_mindwell = msecs_to_ticks(sr.sr_mindwell);
+ if (sr.sr_mindwell < 1)
+ sr.sr_mindwell = 1;
+ }
+ if (sr.sr_maxdwell != 0) {
+ sr.sr_maxdwell = msecs_to_ticks(sr.sr_maxdwell);
+ if (sr.sr_maxdwell < 1)
+ sr.sr_maxdwell = 1;
+ }
+ /* NB: silently reduce ssid count to what is supported */
+ if (sr.sr_nssid > IEEE80211_SCAN_MAX_SSID)
+ sr.sr_nssid = IEEE80211_SCAN_MAX_SSID;
+ for (i = 0; i < sr.sr_nssid; i++)
+ if (sr.sr_ssid[i].len > IEEE80211_NWID_LEN)
+ return EINVAL;
+ /* cleanse flags just in case, could reject if invalid flags */
+ sr.sr_flags &= IEEE80211_IOC_SCAN_FLAGS;
+ /*
+ * Add an implicit NOPICK if the vap is not marked UP. This
+ * allows applications to scan without joining a bss (or picking
+ * a channel and setting up a bss) and without forcing manual
+ * roaming mode--you just need to mark the parent device UP.
+ */
+ if ((vap->iv_ifp->if_flags & IFF_UP) == 0)
+ sr.sr_flags |= IEEE80211_IOC_SCAN_NOPICK;
+
+ IEEE80211_DPRINTF(vap, IEEE80211_MSG_SCAN,
+ "%s: flags 0x%x%s duration 0x%x mindwell %u maxdwell %u nssid %d\n",
+ __func__, sr.sr_flags,
+ (vap->iv_ifp->if_flags & IFF_UP) == 0 ? " (!IFF_UP)" : "",
+ sr.sr_duration, sr.sr_mindwell, sr.sr_maxdwell, sr.sr_nssid);
+ /*
+ * If we are in INIT state then the driver has never had a chance
+ * to setup hardware state to do a scan; we must use the state
+ * machine to get us up to the SCAN state but once we reach SCAN
+ * state we then want to use the supplied params. Stash the
+ * parameters in the vap and mark IEEE80211_FEXT_SCANREQ; the
+ * state machines will recognize this and use the stashed params
+ * to issue the scan request.
+ *
+ * Otherwise just invoke the scan machinery directly.
+ */
+ IEEE80211_LOCK(ic);
+ if (vap->iv_state == IEEE80211_S_INIT) {
+ /* NB: clobbers previous settings */
+ vap->iv_scanreq_flags = sr.sr_flags;
+ vap->iv_scanreq_duration = sr.sr_duration;
+ vap->iv_scanreq_nssid = sr.sr_nssid;
+ for (i = 0; i < sr.sr_nssid; i++) {
+ vap->iv_scanreq_ssid[i].len = sr.sr_ssid[i].len;
+ memcpy(vap->iv_scanreq_ssid[i].ssid, sr.sr_ssid[i].ssid,
+ sr.sr_ssid[i].len);
+ }
+ vap->iv_flags_ext |= IEEE80211_FEXT_SCANREQ;
+ IEEE80211_UNLOCK(ic);
+ ieee80211_new_state(vap, IEEE80211_S_SCAN, 0);
+ } else {
+ vap->iv_flags_ext &= ~IEEE80211_FEXT_SCANREQ;
+ IEEE80211_UNLOCK(ic);
+ /* XXX neeed error return codes */
+ if (sr.sr_flags & IEEE80211_IOC_SCAN_CHECK) {
+ (void) ieee80211_check_scan(vap, sr.sr_flags,
+ sr.sr_duration, sr.sr_mindwell, sr.sr_maxdwell,
+ sr.sr_nssid,
+ /* NB: cheat, we assume structures are compatible */
+ (const struct ieee80211_scan_ssid *) &sr.sr_ssid[0]);
+ } else {
+ (void) ieee80211_start_scan(vap, sr.sr_flags,
+ sr.sr_duration, sr.sr_mindwell, sr.sr_maxdwell,
+ sr.sr_nssid,
+ /* NB: cheat, we assume structures are compatible */
+ (const struct ieee80211_scan_ssid *) &sr.sr_ssid[0]);
+ }
+ }
+ return error;
+#undef IEEE80211_IOC_SCAN_FLAGS
+}
+
+static __noinline int
+ieee80211_ioctl_setstavlan(struct ieee80211vap *vap, struct ieee80211req *ireq)
+{
+ struct ieee80211_node *ni;
+ struct ieee80211req_sta_vlan vlan;
+ int error;
+
+ if (ireq->i_len != sizeof(vlan))
+ return EINVAL;
+ error = copyin(ireq->i_data, &vlan, sizeof(vlan));
+ if (error != 0)
+ return error;
+ if (!IEEE80211_ADDR_EQ(vlan.sv_macaddr, zerobssid)) {
+ ni = ieee80211_find_vap_node(&vap->iv_ic->ic_sta, vap,
+ vlan.sv_macaddr);
+ if (ni == NULL)
+ return ENOENT;
+ } else
+ ni = ieee80211_ref_node(vap->iv_bss);
+ ni->ni_vlan = vlan.sv_vlan;
+ ieee80211_free_node(ni);
+ return error;
+}
+
+static int
+isvap11g(const struct ieee80211vap *vap)
+{
+ const struct ieee80211_node *bss = vap->iv_bss;
+ return bss->ni_chan != IEEE80211_CHAN_ANYC &&
+ IEEE80211_IS_CHAN_ANYG(bss->ni_chan);
+}
+
+static int
+isvapht(const struct ieee80211vap *vap)
+{
+ const struct ieee80211_node *bss = vap->iv_bss;
+ return bss->ni_chan != IEEE80211_CHAN_ANYC &&
+ IEEE80211_IS_CHAN_HT(bss->ni_chan);
+}
+
+/*
+ * Dummy ioctl set handler so the linker set is defined.
+ */
+static int
+dummy_ioctl_set(struct ieee80211vap *vap, struct ieee80211req *ireq)
+{
+ return ENOSYS;
+}
+IEEE80211_IOCTL_SET(dummy, dummy_ioctl_set);
+
+static int
+ieee80211_ioctl_setdefault(struct ieee80211vap *vap, struct ieee80211req *ireq)
+{
+ ieee80211_ioctl_setfunc * const *set;
+ int error;
+
+ SET_FOREACH(set, ieee80211_ioctl_setset) {
+ error = (*set)(vap, ireq);
+ if (error != ENOSYS)
+ return error;
+ }
+ return EINVAL;
+}
+
+static __noinline int
+ieee80211_ioctl_set80211(struct ieee80211vap *vap, u_long cmd, struct ieee80211req *ireq)
+{
+ struct ieee80211com *ic = vap->iv_ic;
+ int error;
+ const struct ieee80211_authenticator *auth;
+ uint8_t tmpkey[IEEE80211_KEYBUF_SIZE];
+ char tmpssid[IEEE80211_NWID_LEN];
+ uint8_t tmpbssid[IEEE80211_ADDR_LEN];
+ struct ieee80211_key *k;
+ u_int kid;
+ uint32_t flags;
+
+ error = 0;
+ switch (ireq->i_type) {
+ case IEEE80211_IOC_SSID:
+ if (ireq->i_val != 0 ||
+ ireq->i_len > IEEE80211_NWID_LEN)
+ return EINVAL;
+ error = copyin(ireq->i_data, tmpssid, ireq->i_len);
+ if (error)
+ break;
+ memset(vap->iv_des_ssid[0].ssid, 0, IEEE80211_NWID_LEN);
+ vap->iv_des_ssid[0].len = ireq->i_len;
+ memcpy(vap->iv_des_ssid[0].ssid, tmpssid, ireq->i_len);
+ vap->iv_des_nssid = (ireq->i_len > 0);
+ error = ENETRESET;
+ break;
+ case IEEE80211_IOC_WEP:
+ switch (ireq->i_val) {
+ case IEEE80211_WEP_OFF:
+ vap->iv_flags &= ~IEEE80211_F_PRIVACY;
+ vap->iv_flags &= ~IEEE80211_F_DROPUNENC;
+ break;
+ case IEEE80211_WEP_ON:
+ vap->iv_flags |= IEEE80211_F_PRIVACY;
+ vap->iv_flags |= IEEE80211_F_DROPUNENC;
+ break;
+ case IEEE80211_WEP_MIXED:
+ vap->iv_flags |= IEEE80211_F_PRIVACY;
+ vap->iv_flags &= ~IEEE80211_F_DROPUNENC;
+ break;
+ }
+ error = ENETRESET;
+ break;
+ case IEEE80211_IOC_WEPKEY:
+ kid = (u_int) ireq->i_val;
+ if (kid >= IEEE80211_WEP_NKID)
+ return EINVAL;
+ k = &vap->iv_nw_keys[kid];
+ if (ireq->i_len == 0) {
+ /* zero-len =>'s delete any existing key */
+ (void) ieee80211_crypto_delkey(vap, k);
+ break;
+ }
+ if (ireq->i_len > sizeof(tmpkey))
+ return EINVAL;
+ memset(tmpkey, 0, sizeof(tmpkey));
+ error = copyin(ireq->i_data, tmpkey, ireq->i_len);
+ if (error)
+ break;
+ ieee80211_key_update_begin(vap);
+ k->wk_keyix = kid; /* NB: force fixed key id */
+ if (ieee80211_crypto_newkey(vap, IEEE80211_CIPHER_WEP,
+ IEEE80211_KEY_XMIT | IEEE80211_KEY_RECV, k)) {
+ k->wk_keylen = ireq->i_len;
+ memcpy(k->wk_key, tmpkey, sizeof(tmpkey));
+ IEEE80211_ADDR_COPY(k->wk_macaddr, vap->iv_myaddr);
+ if (!ieee80211_crypto_setkey(vap, k))
+ error = EINVAL;
+ } else
+ error = EINVAL;
+ ieee80211_key_update_end(vap);
+ break;
+ case IEEE80211_IOC_WEPTXKEY:
+ kid = (u_int) ireq->i_val;
+ if (kid >= IEEE80211_WEP_NKID &&
+ (uint16_t) kid != IEEE80211_KEYIX_NONE)
+ return EINVAL;
+ vap->iv_def_txkey = kid;
+ break;
+ case IEEE80211_IOC_AUTHMODE:
+ switch (ireq->i_val) {
+ case IEEE80211_AUTH_WPA:
+ case IEEE80211_AUTH_8021X: /* 802.1x */
+ case IEEE80211_AUTH_OPEN: /* open */
+ case IEEE80211_AUTH_SHARED: /* shared-key */
+ case IEEE80211_AUTH_AUTO: /* auto */
+ auth = ieee80211_authenticator_get(ireq->i_val);
+ if (auth == NULL)
+ return EINVAL;
+ break;
+ default:
+ return EINVAL;
+ }
+ switch (ireq->i_val) {
+ case IEEE80211_AUTH_WPA: /* WPA w/ 802.1x */
+ vap->iv_flags |= IEEE80211_F_PRIVACY;
+ ireq->i_val = IEEE80211_AUTH_8021X;
+ break;
+ case IEEE80211_AUTH_OPEN: /* open */
+ vap->iv_flags &= ~(IEEE80211_F_WPA|IEEE80211_F_PRIVACY);
+ break;
+ case IEEE80211_AUTH_SHARED: /* shared-key */
+ case IEEE80211_AUTH_8021X: /* 802.1x */
+ vap->iv_flags &= ~IEEE80211_F_WPA;
+ /* both require a key so mark the PRIVACY capability */
+ vap->iv_flags |= IEEE80211_F_PRIVACY;
+ break;
+ case IEEE80211_AUTH_AUTO: /* auto */
+ vap->iv_flags &= ~IEEE80211_F_WPA;
+ /* XXX PRIVACY handling? */
+ /* XXX what's the right way to do this? */
+ break;
+ }
+ /* NB: authenticator attach/detach happens on state change */
+ vap->iv_bss->ni_authmode = ireq->i_val;
+ /* XXX mixed/mode/usage? */
+ vap->iv_auth = auth;
+ error = ENETRESET;
+ break;
+ case IEEE80211_IOC_CHANNEL:
+ error = ieee80211_ioctl_setchannel(vap, ireq);
+ break;
+ case IEEE80211_IOC_POWERSAVE:
+ switch (ireq->i_val) {
+ case IEEE80211_POWERSAVE_OFF:
+ if (vap->iv_flags & IEEE80211_F_PMGTON) {
+ ieee80211_syncflag(vap, -IEEE80211_F_PMGTON);
+ error = ERESTART;
+ }
+ break;
+ case IEEE80211_POWERSAVE_ON:
+ if ((vap->iv_caps & IEEE80211_C_PMGT) == 0)
+ error = EOPNOTSUPP;
+ else if ((vap->iv_flags & IEEE80211_F_PMGTON) == 0) {
+ ieee80211_syncflag(vap, IEEE80211_F_PMGTON);
+ error = ERESTART;
+ }
+ break;
+ default:
+ error = EINVAL;
+ break;
+ }
+ break;
+ case IEEE80211_IOC_POWERSAVESLEEP:
+ if (ireq->i_val < 0)
+ return EINVAL;
+ ic->ic_lintval = ireq->i_val;
+ error = ERESTART;
+ break;
+ case IEEE80211_IOC_RTSTHRESHOLD:
+ if (!(IEEE80211_RTS_MIN <= ireq->i_val &&
+ ireq->i_val <= IEEE80211_RTS_MAX))
+ return EINVAL;
+ vap->iv_rtsthreshold = ireq->i_val;
+ error = ERESTART;
+ break;
+ case IEEE80211_IOC_PROTMODE:
+ if (ireq->i_val > IEEE80211_PROT_RTSCTS)
+ return EINVAL;
+ ic->ic_protmode = ireq->i_val;
+ /* NB: if not operating in 11g this can wait */
+ if (ic->ic_bsschan != IEEE80211_CHAN_ANYC &&
+ IEEE80211_IS_CHAN_ANYG(ic->ic_bsschan))
+ error = ERESTART;
+ break;
+ case IEEE80211_IOC_TXPOWER:
+ if ((ic->ic_caps & IEEE80211_C_TXPMGT) == 0)
+ return EOPNOTSUPP;
+ if (!(IEEE80211_TXPOWER_MIN <= ireq->i_val &&
+ ireq->i_val <= IEEE80211_TXPOWER_MAX))
+ return EINVAL;
+ ic->ic_txpowlimit = ireq->i_val;
+ error = ERESTART;
+ break;
+ case IEEE80211_IOC_ROAMING:
+ if (!(IEEE80211_ROAMING_DEVICE <= ireq->i_val &&
+ ireq->i_val <= IEEE80211_ROAMING_MANUAL))
+ return EINVAL;
+ vap->iv_roaming = ireq->i_val;
+ /* XXXX reset? */
+ break;
+ case IEEE80211_IOC_PRIVACY:
+ if (ireq->i_val) {
+ /* XXX check for key state? */
+ vap->iv_flags |= IEEE80211_F_PRIVACY;
+ } else
+ vap->iv_flags &= ~IEEE80211_F_PRIVACY;
+ /* XXX ERESTART? */
+ break;
+ case IEEE80211_IOC_DROPUNENCRYPTED:
+ if (ireq->i_val)
+ vap->iv_flags |= IEEE80211_F_DROPUNENC;
+ else
+ vap->iv_flags &= ~IEEE80211_F_DROPUNENC;
+ /* XXX ERESTART? */
+ break;
+ case IEEE80211_IOC_WPAKEY:
+ error = ieee80211_ioctl_setkey(vap, ireq);
+ break;
+ case IEEE80211_IOC_DELKEY:
+ error = ieee80211_ioctl_delkey(vap, ireq);
+ break;
+ case IEEE80211_IOC_MLME:
+ error = ieee80211_ioctl_setmlme(vap, ireq);
+ break;
+ case IEEE80211_IOC_COUNTERMEASURES:
+ if (ireq->i_val) {
+ if ((vap->iv_flags & IEEE80211_F_WPA) == 0)
+ return EOPNOTSUPP;
+ vap->iv_flags |= IEEE80211_F_COUNTERM;
+ } else
+ vap->iv_flags &= ~IEEE80211_F_COUNTERM;
+ /* XXX ERESTART? */
+ break;
+ case IEEE80211_IOC_WPA:
+ if (ireq->i_val > 3)
+ return EINVAL;
+ /* XXX verify ciphers available */
+ flags = vap->iv_flags & ~IEEE80211_F_WPA;
+ switch (ireq->i_val) {
+ case 1:
+ if (!(vap->iv_caps & IEEE80211_C_WPA1))
+ return EOPNOTSUPP;
+ flags |= IEEE80211_F_WPA1;
+ break;
+ case 2:
+ if (!(vap->iv_caps & IEEE80211_C_WPA2))
+ return EOPNOTSUPP;
+ flags |= IEEE80211_F_WPA2;
+ break;
+ case 3:
+ if ((vap->iv_caps & IEEE80211_C_WPA) != IEEE80211_C_WPA)
+ return EOPNOTSUPP;
+ flags |= IEEE80211_F_WPA1 | IEEE80211_F_WPA2;
+ break;
+ default: /* Can't set any -> error */
+ return EOPNOTSUPP;
+ }
+ vap->iv_flags = flags;
+ error = ERESTART; /* NB: can change beacon frame */
+ break;
+ case IEEE80211_IOC_WME:
+ if (ireq->i_val) {
+ if ((vap->iv_caps & IEEE80211_C_WME) == 0)
+ return EOPNOTSUPP;
+ ieee80211_syncflag(vap, IEEE80211_F_WME);
+ } else
+ ieee80211_syncflag(vap, -IEEE80211_F_WME);
+ error = ERESTART; /* NB: can change beacon frame */
+ break;
+ case IEEE80211_IOC_HIDESSID:
+ if (ireq->i_val)
+ vap->iv_flags |= IEEE80211_F_HIDESSID;
+ else
+ vap->iv_flags &= ~IEEE80211_F_HIDESSID;
+ error = ERESTART; /* XXX ENETRESET? */
+ break;
+ case IEEE80211_IOC_APBRIDGE:
+ if (ireq->i_val == 0)
+ vap->iv_flags |= IEEE80211_F_NOBRIDGE;
+ else
+ vap->iv_flags &= ~IEEE80211_F_NOBRIDGE;
+ break;
+ case IEEE80211_IOC_BSSID:
+ if (ireq->i_len != sizeof(tmpbssid))
+ return EINVAL;
+ error = copyin(ireq->i_data, tmpbssid, ireq->i_len);
+ if (error)
+ break;
+ IEEE80211_ADDR_COPY(vap->iv_des_bssid, tmpbssid);
+ if (IEEE80211_ADDR_EQ(vap->iv_des_bssid, zerobssid))
+ vap->iv_flags &= ~IEEE80211_F_DESBSSID;
+ else
+ vap->iv_flags |= IEEE80211_F_DESBSSID;
+ error = ENETRESET;
+ break;
+ case IEEE80211_IOC_CHANLIST:
+ error = ieee80211_ioctl_setchanlist(vap, ireq);
+ break;
+#define OLD_IEEE80211_IOC_SCAN_REQ 23
+#ifdef OLD_IEEE80211_IOC_SCAN_REQ
+ case OLD_IEEE80211_IOC_SCAN_REQ:
+ IEEE80211_DPRINTF(vap, IEEE80211_MSG_SCAN,
+ "%s: active scan request\n", __func__);
+ /*
+ * If we are in INIT state then the driver has never
+ * had a chance to setup hardware state to do a scan;
+ * use the state machine to get us up the SCAN state.
+ * Otherwise just invoke the scan machinery to start
+ * a one-time scan.
+ */
+ if (vap->iv_state == IEEE80211_S_INIT)
+ ieee80211_new_state(vap, IEEE80211_S_SCAN, 0);
+ else
+ (void) ieee80211_start_scan(vap,
+ IEEE80211_SCAN_ACTIVE |
+ IEEE80211_SCAN_NOPICK |
+ IEEE80211_SCAN_ONCE,
+ IEEE80211_SCAN_FOREVER, 0, 0,
+ /* XXX use ioctl params */
+ vap->iv_des_nssid, vap->iv_des_ssid);
+ break;
+#endif /* OLD_IEEE80211_IOC_SCAN_REQ */
+ case IEEE80211_IOC_SCAN_REQ:
+ error = ieee80211_ioctl_scanreq(vap, ireq);
+ break;
+ case IEEE80211_IOC_SCAN_CANCEL:
+ IEEE80211_DPRINTF(vap, IEEE80211_MSG_SCAN,
+ "%s: cancel scan\n", __func__);
+ ieee80211_cancel_scan(vap);
+ break;
+ case IEEE80211_IOC_HTCONF:
+ if (ireq->i_val & 1)
+ ieee80211_syncflag_ht(vap, IEEE80211_FHT_HT);
+ else
+ ieee80211_syncflag_ht(vap, -IEEE80211_FHT_HT);
+ if (ireq->i_val & 2)
+ ieee80211_syncflag_ht(vap, IEEE80211_FHT_USEHT40);
+ else
+ ieee80211_syncflag_ht(vap, -IEEE80211_FHT_USEHT40);
+ error = ENETRESET;
+ break;
+ case IEEE80211_IOC_ADDMAC:
+ case IEEE80211_IOC_DELMAC:
+ error = ieee80211_ioctl_macmac(vap, ireq);
+ break;
+ case IEEE80211_IOC_MACCMD:
+ error = ieee80211_ioctl_setmaccmd(vap, ireq);
+ break;
+ case IEEE80211_IOC_STA_STATS:
+ error = ieee80211_ioctl_setstastats(vap, ireq);
+ break;
+ case IEEE80211_IOC_STA_TXPOW:
+ error = ieee80211_ioctl_setstatxpow(vap, ireq);
+ break;
+ case IEEE80211_IOC_WME_CWMIN: /* WME: CWmin */
+ case IEEE80211_IOC_WME_CWMAX: /* WME: CWmax */
+ case IEEE80211_IOC_WME_AIFS: /* WME: AIFS */
+ case IEEE80211_IOC_WME_TXOPLIMIT: /* WME: txops limit */
+ case IEEE80211_IOC_WME_ACM: /* WME: ACM (bss only) */
+ case IEEE80211_IOC_WME_ACKPOLICY: /* WME: ACK policy (bss only) */
+ error = ieee80211_ioctl_setwmeparam(vap, ireq);
+ break;
+ case IEEE80211_IOC_DTIM_PERIOD:
+ if (vap->iv_opmode != IEEE80211_M_HOSTAP &&
+ vap->iv_opmode != IEEE80211_M_MBSS &&
+ vap->iv_opmode != IEEE80211_M_IBSS)
+ return EINVAL;
+ if (IEEE80211_DTIM_MIN <= ireq->i_val &&
+ ireq->i_val <= IEEE80211_DTIM_MAX) {
+ vap->iv_dtim_period = ireq->i_val;
+ error = ENETRESET; /* requires restart */
+ } else
+ error = EINVAL;
+ break;
+ case IEEE80211_IOC_BEACON_INTERVAL:
+ if (vap->iv_opmode != IEEE80211_M_HOSTAP &&
+ vap->iv_opmode != IEEE80211_M_MBSS &&
+ vap->iv_opmode != IEEE80211_M_IBSS)
+ return EINVAL;
+ if (IEEE80211_BINTVAL_MIN <= ireq->i_val &&
+ ireq->i_val <= IEEE80211_BINTVAL_MAX) {
+ ic->ic_bintval = ireq->i_val;
+ error = ENETRESET; /* requires restart */
+ } else
+ error = EINVAL;
+ break;
+ case IEEE80211_IOC_PUREG:
+ if (ireq->i_val)
+ vap->iv_flags |= IEEE80211_F_PUREG;
+ else
+ vap->iv_flags &= ~IEEE80211_F_PUREG;
+ /* NB: reset only if we're operating on an 11g channel */
+ if (isvap11g(vap))
+ error = ENETRESET;
+ break;
+ case IEEE80211_IOC_BGSCAN:
+ if (ireq->i_val) {
+ if ((vap->iv_caps & IEEE80211_C_BGSCAN) == 0)
+ return EOPNOTSUPP;
+ vap->iv_flags |= IEEE80211_F_BGSCAN;
+ } else
+ vap->iv_flags &= ~IEEE80211_F_BGSCAN;
+ break;
+ case IEEE80211_IOC_BGSCAN_IDLE:
+ if (ireq->i_val >= IEEE80211_BGSCAN_IDLE_MIN)
+ vap->iv_bgscanidle = ireq->i_val*hz/1000;
+ else
+ error = EINVAL;
+ break;
+ case IEEE80211_IOC_BGSCAN_INTERVAL:
+ if (ireq->i_val >= IEEE80211_BGSCAN_INTVAL_MIN)
+ vap->iv_bgscanintvl = ireq->i_val*hz;
+ else
+ error = EINVAL;
+ break;
+ case IEEE80211_IOC_SCANVALID:
+ if (ireq->i_val >= IEEE80211_SCAN_VALID_MIN)
+ vap->iv_scanvalid = ireq->i_val*hz;
+ else
+ error = EINVAL;
+ break;
+ case IEEE80211_IOC_FRAGTHRESHOLD:
+ if ((vap->iv_caps & IEEE80211_C_TXFRAG) == 0 &&
+ ireq->i_val != IEEE80211_FRAG_MAX)
+ return EOPNOTSUPP;
+ if (!(IEEE80211_FRAG_MIN <= ireq->i_val &&
+ ireq->i_val <= IEEE80211_FRAG_MAX))
+ return EINVAL;
+ vap->iv_fragthreshold = ireq->i_val;
+ error = ERESTART;
+ break;
+ case IEEE80211_IOC_BURST:
+ if (ireq->i_val) {
+ if ((vap->iv_caps & IEEE80211_C_BURST) == 0)
+ return EOPNOTSUPP;
+ ieee80211_syncflag(vap, IEEE80211_F_BURST);
+ } else
+ ieee80211_syncflag(vap, -IEEE80211_F_BURST);
+ error = ERESTART;
+ break;
+ case IEEE80211_IOC_BMISSTHRESHOLD:
+ if (!(IEEE80211_HWBMISS_MIN <= ireq->i_val &&
+ ireq->i_val <= IEEE80211_HWBMISS_MAX))
+ return EINVAL;
+ vap->iv_bmissthreshold = ireq->i_val;
+ error = ERESTART;
+ break;
+ case IEEE80211_IOC_CURCHAN:
+ error = ieee80211_ioctl_setcurchan(vap, ireq);
+ break;
+ case IEEE80211_IOC_SHORTGI:
+ if (ireq->i_val) {
+#define IEEE80211_HTCAP_SHORTGI \
+ (IEEE80211_HTCAP_SHORTGI20 | IEEE80211_HTCAP_SHORTGI40)
+ if (((ireq->i_val ^ vap->iv_htcaps) & IEEE80211_HTCAP_SHORTGI) != 0)
+ return EINVAL;
+ if (ireq->i_val & IEEE80211_HTCAP_SHORTGI20)
+ vap->iv_flags_ht |= IEEE80211_FHT_SHORTGI20;
+ if (ireq->i_val & IEEE80211_HTCAP_SHORTGI40)
+ vap->iv_flags_ht |= IEEE80211_FHT_SHORTGI40;
+#undef IEEE80211_HTCAP_SHORTGI
+ } else
+ vap->iv_flags_ht &=
+ ~(IEEE80211_FHT_SHORTGI20 | IEEE80211_FHT_SHORTGI40);
+ error = ERESTART;
+ break;
+ case IEEE80211_IOC_AMPDU:
+ if (ireq->i_val && (vap->iv_htcaps & IEEE80211_HTC_AMPDU) == 0)
+ return EINVAL;
+ if (ireq->i_val & 1)
+ vap->iv_flags_ht |= IEEE80211_FHT_AMPDU_TX;
+ else
+ vap->iv_flags_ht &= ~IEEE80211_FHT_AMPDU_TX;
+ if (ireq->i_val & 2)
+ vap->iv_flags_ht |= IEEE80211_FHT_AMPDU_RX;
+ else
+ vap->iv_flags_ht &= ~IEEE80211_FHT_AMPDU_RX;
+ /* NB: reset only if we're operating on an 11n channel */
+ if (isvapht(vap))
+ error = ERESTART;
+ break;
+ case IEEE80211_IOC_AMPDU_LIMIT:
+ if (!(IEEE80211_HTCAP_MAXRXAMPDU_8K <= ireq->i_val &&
+ ireq->i_val <= IEEE80211_HTCAP_MAXRXAMPDU_64K))
+ return EINVAL;
+ if (vap->iv_opmode == IEEE80211_M_HOSTAP)
+ vap->iv_ampdu_rxmax = ireq->i_val;
+ else
+ vap->iv_ampdu_limit = ireq->i_val;
+ error = ERESTART;
+ break;
+ case IEEE80211_IOC_AMPDU_DENSITY:
+ if (!(IEEE80211_HTCAP_MPDUDENSITY_NA <= ireq->i_val &&
+ ireq->i_val <= IEEE80211_HTCAP_MPDUDENSITY_16))
+ return EINVAL;
+ vap->iv_ampdu_density = ireq->i_val;
+ error = ERESTART;
+ break;
+ case IEEE80211_IOC_AMSDU:
+ if (ireq->i_val && (vap->iv_htcaps & IEEE80211_HTC_AMSDU) == 0)
+ return EINVAL;
+ if (ireq->i_val & 1)
+ vap->iv_flags_ht |= IEEE80211_FHT_AMSDU_TX;
+ else
+ vap->iv_flags_ht &= ~IEEE80211_FHT_AMSDU_TX;
+ if (ireq->i_val & 2)
+ vap->iv_flags_ht |= IEEE80211_FHT_AMSDU_RX;
+ else
+ vap->iv_flags_ht &= ~IEEE80211_FHT_AMSDU_RX;
+ /* NB: reset only if we're operating on an 11n channel */
+ if (isvapht(vap))
+ error = ERESTART;
+ break;
+ case IEEE80211_IOC_AMSDU_LIMIT:
+ /* XXX validate */
+ vap->iv_amsdu_limit = ireq->i_val; /* XXX truncation? */
+ break;
+ case IEEE80211_IOC_PUREN:
+ if (ireq->i_val) {
+ if ((vap->iv_flags_ht & IEEE80211_FHT_HT) == 0)
+ return EINVAL;
+ vap->iv_flags_ht |= IEEE80211_FHT_PUREN;
+ } else
+ vap->iv_flags_ht &= ~IEEE80211_FHT_PUREN;
+ /* NB: reset only if we're operating on an 11n channel */
+ if (isvapht(vap))
+ error = ERESTART;
+ break;
+ case IEEE80211_IOC_DOTH:
+ if (ireq->i_val) {
+#if 0
+ /* XXX no capability */
+ if ((vap->iv_caps & IEEE80211_C_DOTH) == 0)
+ return EOPNOTSUPP;
+#endif
+ vap->iv_flags |= IEEE80211_F_DOTH;
+ } else
+ vap->iv_flags &= ~IEEE80211_F_DOTH;
+ error = ENETRESET;
+ break;
+ case IEEE80211_IOC_REGDOMAIN:
+ error = ieee80211_ioctl_setregdomain(vap, ireq);
+ break;
+ case IEEE80211_IOC_ROAM:
+ error = ieee80211_ioctl_setroam(vap, ireq);
+ break;
+ case IEEE80211_IOC_TXPARAMS:
+ error = ieee80211_ioctl_settxparams(vap, ireq);
+ break;
+ case IEEE80211_IOC_HTCOMPAT:
+ if (ireq->i_val) {
+ if ((vap->iv_flags_ht & IEEE80211_FHT_HT) == 0)
+ return EOPNOTSUPP;
+ vap->iv_flags_ht |= IEEE80211_FHT_HTCOMPAT;
+ } else
+ vap->iv_flags_ht &= ~IEEE80211_FHT_HTCOMPAT;
+ /* NB: reset only if we're operating on an 11n channel */
+ if (isvapht(vap))
+ error = ERESTART;
+ break;
+ case IEEE80211_IOC_DWDS:
+ if (ireq->i_val) {
+ /* NB: DWDS only makes sense for WDS-capable devices */
+ if ((ic->ic_caps & IEEE80211_C_WDS) == 0)
+ return EOPNOTSUPP;
+ /* NB: DWDS is used only with ap+sta vaps */
+ if (vap->iv_opmode != IEEE80211_M_HOSTAP &&
+ vap->iv_opmode != IEEE80211_M_STA)
+ return EINVAL;
+ vap->iv_flags |= IEEE80211_F_DWDS;
+ if (vap->iv_opmode == IEEE80211_M_STA)
+ vap->iv_flags_ext |= IEEE80211_FEXT_4ADDR;
+ } else {
+ vap->iv_flags &= ~IEEE80211_F_DWDS;
+ if (vap->iv_opmode == IEEE80211_M_STA)
+ vap->iv_flags_ext &= ~IEEE80211_FEXT_4ADDR;
+ }
+ break;
+ case IEEE80211_IOC_INACTIVITY:
+ if (ireq->i_val)
+ vap->iv_flags_ext |= IEEE80211_FEXT_INACT;
+ else
+ vap->iv_flags_ext &= ~IEEE80211_FEXT_INACT;
+ break;
+ case IEEE80211_IOC_APPIE:
+ error = ieee80211_ioctl_setappie(vap, ireq);
+ break;
+ case IEEE80211_IOC_WPS:
+ if (ireq->i_val) {
+ if ((vap->iv_caps & IEEE80211_C_WPA) == 0)
+ return EOPNOTSUPP;
+ vap->iv_flags_ext |= IEEE80211_FEXT_WPS;
+ } else
+ vap->iv_flags_ext &= ~IEEE80211_FEXT_WPS;
+ break;
+ case IEEE80211_IOC_TSN:
+ if (ireq->i_val) {
+ if ((vap->iv_caps & IEEE80211_C_WPA) == 0)
+ return EOPNOTSUPP;
+ vap->iv_flags_ext |= IEEE80211_FEXT_TSN;
+ } else
+ vap->iv_flags_ext &= ~IEEE80211_FEXT_TSN;
+ break;
+ case IEEE80211_IOC_CHANSWITCH:
+ error = ieee80211_ioctl_chanswitch(vap, ireq);
+ break;
+ case IEEE80211_IOC_DFS:
+ if (ireq->i_val) {
+ if ((vap->iv_caps & IEEE80211_C_DFS) == 0)
+ return EOPNOTSUPP;
+ /* NB: DFS requires 11h support */
+ if ((vap->iv_flags & IEEE80211_F_DOTH) == 0)
+ return EINVAL;
+ vap->iv_flags_ext |= IEEE80211_FEXT_DFS;
+ } else
+ vap->iv_flags_ext &= ~IEEE80211_FEXT_DFS;
+ break;
+ case IEEE80211_IOC_DOTD:
+ if (ireq->i_val)
+ vap->iv_flags_ext |= IEEE80211_FEXT_DOTD;
+ else
+ vap->iv_flags_ext &= ~IEEE80211_FEXT_DOTD;
+ if (vap->iv_opmode == IEEE80211_M_STA)
+ error = ENETRESET;
+ break;
+ case IEEE80211_IOC_HTPROTMODE:
+ if (ireq->i_val > IEEE80211_PROT_RTSCTS)
+ return EINVAL;
+ ic->ic_htprotmode = ireq->i_val ?
+ IEEE80211_PROT_RTSCTS : IEEE80211_PROT_NONE;
+ /* NB: if not operating in 11n this can wait */
+ if (isvapht(vap))
+ error = ERESTART;
+ break;
+ case IEEE80211_IOC_STA_VLAN:
+ error = ieee80211_ioctl_setstavlan(vap, ireq);
+ break;
+ case IEEE80211_IOC_SMPS:
+ if ((ireq->i_val &~ IEEE80211_HTCAP_SMPS) != 0 ||
+ ireq->i_val == 0x0008) /* value of 2 is reserved */
+ return EINVAL;
+ if (ireq->i_val != IEEE80211_HTCAP_SMPS_OFF &&
+ (vap->iv_htcaps & IEEE80211_HTC_SMPS) == 0)
+ return EOPNOTSUPP;
+ vap->iv_htcaps = (vap->iv_htcaps &~ IEEE80211_HTCAP_SMPS) |
+ ireq->i_val;
+ /* NB: if not operating in 11n this can wait */
+ if (isvapht(vap))
+ error = ERESTART;
+ break;
+ case IEEE80211_IOC_RIFS:
+ if (ireq->i_val != 0) {
+ if ((vap->iv_htcaps & IEEE80211_HTC_RIFS) == 0)
+ return EOPNOTSUPP;
+ vap->iv_flags_ht |= IEEE80211_FHT_RIFS;
+ } else
+ vap->iv_flags_ht &= ~IEEE80211_FHT_RIFS;
+ /* NB: if not operating in 11n this can wait */
+ if (isvapht(vap))
+ error = ERESTART;
+ break;
+ default:
+ error = ieee80211_ioctl_setdefault(vap, ireq);
+ break;
+ }
+ /*
+ * The convention is that ENETRESET means an operation
+ * requires a complete re-initialization of the device (e.g.
+ * changing something that affects the association state).
+ * ERESTART means the request may be handled with only a
+ * reload of the hardware state. We hand ERESTART requests
+ * to the iv_reset callback so the driver can decide. If
+ * a device does not fillin iv_reset then it defaults to one
+ * that returns ENETRESET. Otherwise a driver may return
+ * ENETRESET (in which case a full reset will be done) or
+ * 0 to mean there's no need to do anything (e.g. when the
+ * change has no effect on the driver/device).
+ */
+ if (error == ERESTART)
+ error = IFNET_IS_UP_RUNNING(vap->iv_ifp) ?
+ vap->iv_reset(vap, ireq->i_type) : 0;
+ if (error == ENETRESET) {
+ /* XXX need to re-think AUTO handling */
+ if (IS_UP_AUTO(vap))
+ ieee80211_init(vap);
+ error = 0;
+ }
+ return error;
+}
+
+/*
+ * Rebuild the parent's multicast address list after an add/del
+ * of a multicast address for a vap. We have no way to tell
+ * what happened above to optimize the work so we purge the entire
+ * list and rebuild from scratch. This is way expensive.
+ * Note also the half-baked workaround for if_addmulti calling
+ * back to the parent device; there's no way to insert mcast
+ * entries quietly and/or cheaply.
+ */
+static void
+ieee80211_ioctl_updatemulti(struct ieee80211com *ic)
+{
+ struct ifnet *parent = ic->ic_ifp;
+ struct ieee80211vap *vap;
+ void *ioctl;
+
+ IEEE80211_LOCK(ic);
+ if_delallmulti(parent);
+ ioctl = parent->if_ioctl; /* XXX WAR if_allmulti */
+ parent->if_ioctl = NULL;
+ TAILQ_FOREACH(vap, &ic->ic_vaps, iv_next) {
+ struct ifnet *ifp = vap->iv_ifp;
+ struct ifmultiaddr *ifma;
+
+ TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
+ if (ifma->ifma_addr->sa_family != AF_LINK)
+ continue;
+ (void) if_addmulti(parent, ifma->ifma_addr, NULL);
+ }
+ }
+ parent->if_ioctl = ioctl;
+ ieee80211_runtask(ic, &ic->ic_mcast_task);
+ IEEE80211_UNLOCK(ic);
+}
+
+int
+ieee80211_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
+{
+ struct ieee80211vap *vap = ifp->if_softc;
+ struct ieee80211com *ic = vap->iv_ic;
+ int error = 0;
+ struct ifreq *ifr;
+ struct ifaddr *ifa; /* XXX */
+
+ switch (cmd) {
+ case SIOCSIFFLAGS:
+ IEEE80211_LOCK(ic);
+ ieee80211_syncifflag_locked(ic, IFF_PROMISC);
+ ieee80211_syncifflag_locked(ic, IFF_ALLMULTI);
+ if (ifp->if_flags & IFF_UP) {
+ /*
+ * Bring ourself up unless we're already operational.
+ * If we're the first vap and the parent is not up
+ * then it will automatically be brought up as a
+ * side-effect of bringing ourself up.
+ */
+ if (vap->iv_state == IEEE80211_S_INIT)
+ ieee80211_start_locked(vap);
+ } else if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
+ /*
+ * Stop ourself. If we are the last vap to be
+ * marked down the parent will also be taken down.
+ */
+ ieee80211_stop_locked(vap);
+ }
+ IEEE80211_UNLOCK(ic);
+ /* Wait for parent ioctl handler if it was queued */
+ ieee80211_waitfor_parent(ic);
+ break;
+ case SIOCADDMULTI:
+ case SIOCDELMULTI:
+ ieee80211_ioctl_updatemulti(ic);
+ break;
+ case SIOCSIFMEDIA:
+ case SIOCGIFMEDIA:
+ ifr = (struct ifreq *)data;
+ error = ifmedia_ioctl(ifp, ifr, &vap->iv_media, cmd);
+ break;
+ case SIOCG80211:
+ error = ieee80211_ioctl_get80211(vap, cmd,
+ (struct ieee80211req *) data);
+ break;
+ case SIOCS80211:
+ error = priv_check(curthread, PRIV_NET80211_MANAGE);
+ if (error == 0)
+ error = ieee80211_ioctl_set80211(vap, cmd,
+ (struct ieee80211req *) data);
+ break;
+ case SIOCG80211STATS:
+ ifr = (struct ifreq *)data;
+ copyout(&vap->iv_stats, ifr->ifr_data, sizeof (vap->iv_stats));
+ break;
+ case SIOCSIFMTU:
+ ifr = (struct ifreq *)data;
+ if (!(IEEE80211_MTU_MIN <= ifr->ifr_mtu &&
+ ifr->ifr_mtu <= IEEE80211_MTU_MAX))
+ error = EINVAL;
+ else
+ ifp->if_mtu = ifr->ifr_mtu;
+ break;
+ case SIOCSIFADDR:
+ /*
+ * XXX Handle this directly so we can supress if_init calls.
+ * XXX This should be done in ether_ioctl but for the moment
+ * XXX there are too many other parts of the system that
+ * XXX set IFF_UP and so supress if_init being called when
+ * XXX it should be.
+ */
+ ifa = (struct ifaddr *) data;
+ switch (ifa->ifa_addr->sa_family) {
+#ifdef INET
+ case AF_INET:
+ if ((ifp->if_flags & IFF_UP) == 0) {
+ ifp->if_flags |= IFF_UP;
+ ifp->if_init(ifp->if_softc);
+ }
+ arp_ifinit(ifp, ifa);
+ break;
+#endif
+#ifdef IPX
+ /*
+ * XXX - This code is probably wrong,
+ * but has been copied many times.
+ */
+ case AF_IPX: {
+ struct ipx_addr *ina = &(IA_SIPX(ifa)->sipx_addr);
+
+ if (ipx_nullhost(*ina))
+ ina->x_host = *(union ipx_host *)
+ IF_LLADDR(ifp);
+ else
+ bcopy((caddr_t) ina->x_host.c_host,
+ (caddr_t) IF_LLADDR(ifp),
+ ETHER_ADDR_LEN);
+ /* fall thru... */
+ }
+#endif
+ default:
+ if ((ifp->if_flags & IFF_UP) == 0) {
+ ifp->if_flags |= IFF_UP;
+ ifp->if_init(ifp->if_softc);
+ }
+ break;
+ }
+ break;
+ /* Pass NDIS ioctls up to the driver */
+ case SIOCGDRVSPEC:
+ case SIOCSDRVSPEC:
+ case SIOCGPRIVATE_0: {
+ struct ifnet *parent = vap->iv_ic->ic_ifp;
+ error = parent->if_ioctl(parent, cmd, data);
+ break;
+ }
+ default:
+ error = ether_ioctl(ifp, cmd, data);
+ break;
+ }
+ return error;
+}
diff --git a/rtems/freebsd/net80211/ieee80211_ioctl.h b/rtems/freebsd/net80211/ieee80211_ioctl.h
new file mode 100644
index 00000000..5a20c413
--- /dev/null
+++ b/rtems/freebsd/net80211/ieee80211_ioctl.h
@@ -0,0 +1,849 @@
+/*-
+ * Copyright (c) 2001 Atsushi Onoe
+ * Copyright (c) 2002-2009 Sam Leffler, Errno Consulting
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+#ifndef _NET80211_IEEE80211_IOCTL_HH_
+#define _NET80211_IEEE80211_IOCTL_HH_
+
+/*
+ * IEEE 802.11 ioctls.
+ */
+#include <rtems/freebsd/net80211/_ieee80211.h>
+#include <rtems/freebsd/net80211/ieee80211.h>
+#include <rtems/freebsd/net80211/ieee80211_crypto.h>
+
+/*
+ * Per/node (station) statistics.
+ */
+struct ieee80211_nodestats {
+ uint32_t ns_rx_data; /* rx data frames */
+ uint32_t ns_rx_mgmt; /* rx management frames */
+ uint32_t ns_rx_ctrl; /* rx control frames */
+ uint32_t ns_rx_ucast; /* rx unicast frames */
+ uint32_t ns_rx_mcast; /* rx multi/broadcast frames */
+ uint64_t ns_rx_bytes; /* rx data count (bytes) */
+ uint64_t ns_rx_beacons; /* rx beacon frames */
+ uint32_t ns_rx_proberesp; /* rx probe response frames */
+
+ uint32_t ns_rx_dup; /* rx discard 'cuz dup */
+ uint32_t ns_rx_noprivacy; /* rx w/ wep but privacy off */
+ uint32_t ns_rx_wepfail; /* rx wep processing failed */
+ uint32_t ns_rx_demicfail; /* rx demic failed */
+ uint32_t ns_rx_decap; /* rx decapsulation failed */
+ uint32_t ns_rx_defrag; /* rx defragmentation failed */
+ uint32_t ns_rx_disassoc; /* rx disassociation */
+ uint32_t ns_rx_deauth; /* rx deauthentication */
+ uint32_t ns_rx_action; /* rx action */
+ uint32_t ns_rx_decryptcrc; /* rx decrypt failed on crc */
+ uint32_t ns_rx_unauth; /* rx on unauthorized port */
+ uint32_t ns_rx_unencrypted; /* rx unecrypted w/ privacy */
+ uint32_t ns_rx_drop; /* rx discard other reason */
+
+ uint32_t ns_tx_data; /* tx data frames */
+ uint32_t ns_tx_mgmt; /* tx management frames */
+ uint32_t ns_tx_ctrl; /* tx control frames */
+ uint32_t ns_tx_ucast; /* tx unicast frames */
+ uint32_t ns_tx_mcast; /* tx multi/broadcast frames */
+ uint64_t ns_tx_bytes; /* tx data count (bytes) */
+ uint32_t ns_tx_probereq; /* tx probe request frames */
+
+ uint32_t ns_tx_novlantag; /* tx discard 'cuz no tag */
+ uint32_t ns_tx_vlanmismatch; /* tx discard 'cuz bad tag */
+
+ uint32_t ns_ps_discard; /* ps discard 'cuz of age */
+
+ /* MIB-related state */
+ uint32_t ns_tx_assoc; /* [re]associations */
+ uint32_t ns_tx_assoc_fail; /* [re]association failures */
+ uint32_t ns_tx_auth; /* [re]authentications */
+ uint32_t ns_tx_auth_fail; /* [re]authentication failures*/
+ uint32_t ns_tx_deauth; /* deauthentications */
+ uint32_t ns_tx_deauth_code; /* last deauth reason */
+ uint32_t ns_tx_disassoc; /* disassociations */
+ uint32_t ns_tx_disassoc_code; /* last disassociation reason */
+ uint32_t ns_spare[8];
+};
+
+/*
+ * Summary statistics.
+ */
+struct ieee80211_stats {
+ uint32_t is_rx_badversion; /* rx frame with bad version */
+ uint32_t is_rx_tooshort; /* rx frame too short */
+ uint32_t is_rx_wrongbss; /* rx from wrong bssid */
+ uint32_t is_rx_dup; /* rx discard 'cuz dup */
+ uint32_t is_rx_wrongdir; /* rx w/ wrong direction */
+ uint32_t is_rx_mcastecho; /* rx discard 'cuz mcast echo */
+ uint32_t is_rx_notassoc; /* rx discard 'cuz sta !assoc */
+ uint32_t is_rx_noprivacy; /* rx w/ wep but privacy off */
+ uint32_t is_rx_unencrypted; /* rx w/o wep and privacy on */
+ uint32_t is_rx_wepfail; /* rx wep processing failed */
+ uint32_t is_rx_decap; /* rx decapsulation failed */
+ uint32_t is_rx_mgtdiscard; /* rx discard mgt frames */
+ uint32_t is_rx_ctl; /* rx ctrl frames */
+ uint32_t is_rx_beacon; /* rx beacon frames */
+ uint32_t is_rx_rstoobig; /* rx rate set truncated */
+ uint32_t is_rx_elem_missing; /* rx required element missing*/
+ uint32_t is_rx_elem_toobig; /* rx element too big */
+ uint32_t is_rx_elem_toosmall; /* rx element too small */
+ uint32_t is_rx_elem_unknown; /* rx element unknown */
+ uint32_t is_rx_badchan; /* rx frame w/ invalid chan */
+ uint32_t is_rx_chanmismatch; /* rx frame chan mismatch */
+ uint32_t is_rx_nodealloc; /* rx frame dropped */
+ uint32_t is_rx_ssidmismatch; /* rx frame ssid mismatch */
+ uint32_t is_rx_auth_unsupported; /* rx w/ unsupported auth alg */
+ uint32_t is_rx_auth_fail; /* rx sta auth failure */
+ uint32_t is_rx_auth_countermeasures;/* rx auth discard 'cuz CM */
+ uint32_t is_rx_assoc_bss; /* rx assoc from wrong bssid */
+ uint32_t is_rx_assoc_notauth; /* rx assoc w/o auth */
+ uint32_t is_rx_assoc_capmismatch;/* rx assoc w/ cap mismatch */
+ uint32_t is_rx_assoc_norate; /* rx assoc w/ no rate match */
+ uint32_t is_rx_assoc_badwpaie; /* rx assoc w/ bad WPA IE */
+ uint32_t is_rx_deauth; /* rx deauthentication */
+ uint32_t is_rx_disassoc; /* rx disassociation */
+ uint32_t is_rx_badsubtype; /* rx frame w/ unknown subtype*/
+ uint32_t is_rx_nobuf; /* rx failed for lack of buf */
+ uint32_t is_rx_decryptcrc; /* rx decrypt failed on crc */
+ uint32_t is_rx_ahdemo_mgt; /* rx discard ahdemo mgt frame*/
+ uint32_t is_rx_bad_auth; /* rx bad auth request */
+ uint32_t is_rx_unauth; /* rx on unauthorized port */
+ uint32_t is_rx_badkeyid; /* rx w/ incorrect keyid */
+ uint32_t is_rx_ccmpreplay; /* rx seq# violation (CCMP) */
+ uint32_t is_rx_ccmpformat; /* rx format bad (CCMP) */
+ uint32_t is_rx_ccmpmic; /* rx MIC check failed (CCMP) */
+ uint32_t is_rx_tkipreplay; /* rx seq# violation (TKIP) */
+ uint32_t is_rx_tkipformat; /* rx format bad (TKIP) */
+ uint32_t is_rx_tkipmic; /* rx MIC check failed (TKIP) */
+ uint32_t is_rx_tkipicv; /* rx ICV check failed (TKIP) */
+ uint32_t is_rx_badcipher; /* rx failed 'cuz key type */
+ uint32_t is_rx_nocipherctx; /* rx failed 'cuz key !setup */
+ uint32_t is_rx_acl; /* rx discard 'cuz acl policy */
+ uint32_t is_tx_nobuf; /* tx failed for lack of buf */
+ uint32_t is_tx_nonode; /* tx failed for no node */
+ uint32_t is_tx_unknownmgt; /* tx of unknown mgt frame */
+ uint32_t is_tx_badcipher; /* tx failed 'cuz key type */
+ uint32_t is_tx_nodefkey; /* tx failed 'cuz no defkey */
+ uint32_t is_tx_noheadroom; /* tx failed 'cuz no space */
+ uint32_t is_tx_fragframes; /* tx frames fragmented */
+ uint32_t is_tx_frags; /* tx fragments created */
+ uint32_t is_scan_active; /* active scans started */
+ uint32_t is_scan_passive; /* passive scans started */
+ uint32_t is_node_timeout; /* nodes timed out inactivity */
+ uint32_t is_crypto_nomem; /* no memory for crypto ctx */
+ uint32_t is_crypto_tkip; /* tkip crypto done in s/w */
+ uint32_t is_crypto_tkipenmic; /* tkip en-MIC done in s/w */
+ uint32_t is_crypto_tkipdemic; /* tkip de-MIC done in s/w */
+ uint32_t is_crypto_tkipcm; /* tkip counter measures */
+ uint32_t is_crypto_ccmp; /* ccmp crypto done in s/w */
+ uint32_t is_crypto_wep; /* wep crypto done in s/w */
+ uint32_t is_crypto_setkey_cipher;/* cipher rejected key */
+ uint32_t is_crypto_setkey_nokey; /* no key index for setkey */
+ uint32_t is_crypto_delkey; /* driver key delete failed */
+ uint32_t is_crypto_badcipher; /* unknown cipher */
+ uint32_t is_crypto_nocipher; /* cipher not available */
+ uint32_t is_crypto_attachfail; /* cipher attach failed */
+ uint32_t is_crypto_swfallback; /* cipher fallback to s/w */
+ uint32_t is_crypto_keyfail; /* driver key alloc failed */
+ uint32_t is_crypto_enmicfail; /* en-MIC failed */
+ uint32_t is_ibss_capmismatch; /* merge failed-cap mismatch */
+ uint32_t is_ibss_norate; /* merge failed-rate mismatch */
+ uint32_t is_ps_unassoc; /* ps-poll for unassoc. sta */
+ uint32_t is_ps_badaid; /* ps-poll w/ incorrect aid */
+ uint32_t is_ps_qempty; /* ps-poll w/ nothing to send */
+ uint32_t is_ff_badhdr; /* fast frame rx'd w/ bad hdr */
+ uint32_t is_ff_tooshort; /* fast frame rx decap error */
+ uint32_t is_ff_split; /* fast frame rx split error */
+ uint32_t is_ff_decap; /* fast frames decap'd */
+ uint32_t is_ff_encap; /* fast frames encap'd for tx */
+ uint32_t is_rx_badbintval; /* rx frame w/ bogus bintval */
+ uint32_t is_rx_demicfail; /* rx demic failed */
+ uint32_t is_rx_defrag; /* rx defragmentation failed */
+ uint32_t is_rx_mgmt; /* rx management frames */
+ uint32_t is_rx_action; /* rx action mgt frames */
+ uint32_t is_amsdu_tooshort; /* A-MSDU rx decap error */
+ uint32_t is_amsdu_split; /* A-MSDU rx split error */
+ uint32_t is_amsdu_decap; /* A-MSDU decap'd */
+ uint32_t is_amsdu_encap; /* A-MSDU encap'd for tx */
+ uint32_t is_ampdu_bar_bad; /* A-MPDU BAR out of window */
+ uint32_t is_ampdu_bar_oow; /* A-MPDU BAR before ADDBA */
+ uint32_t is_ampdu_bar_move; /* A-MPDU BAR moved window */
+ uint32_t is_ampdu_bar_rx; /* A-MPDU BAR frames handled */
+ uint32_t is_ampdu_rx_flush; /* A-MPDU frames flushed */
+ uint32_t is_ampdu_rx_oor; /* A-MPDU frames out-of-order */
+ uint32_t is_ampdu_rx_copy; /* A-MPDU frames copied down */
+ uint32_t is_ampdu_rx_drop; /* A-MPDU frames dropped */
+ uint32_t is_tx_badstate; /* tx discard state != RUN */
+ uint32_t is_tx_notassoc; /* tx failed, sta not assoc */
+ uint32_t is_tx_classify; /* tx classification failed */
+ uint32_t is_dwds_mcast; /* discard mcast over dwds */
+ uint32_t is_dwds_qdrop; /* dwds pending frame q full */
+ uint32_t is_ht_assoc_nohtcap; /* non-HT sta rejected */
+ uint32_t is_ht_assoc_downgrade; /* HT sta forced to legacy */
+ uint32_t is_ht_assoc_norate; /* HT assoc w/ rate mismatch */
+ uint32_t is_ampdu_rx_age; /* A-MPDU sent up 'cuz of age */
+ uint32_t is_ampdu_rx_move; /* A-MPDU MSDU moved window */
+ uint32_t is_addba_reject; /* ADDBA reject 'cuz disabled */
+ uint32_t is_addba_norequest; /* ADDBA response w/o ADDBA */
+ uint32_t is_addba_badtoken; /* ADDBA response w/ wrong
+ dialogtoken */
+ uint32_t is_addba_badpolicy; /* ADDBA resp w/ wrong policy */
+ uint32_t is_ampdu_stop; /* A-MPDU stream stopped */
+ uint32_t is_ampdu_stop_failed; /* A-MPDU stream not running */
+ uint32_t is_ampdu_rx_reorder; /* A-MPDU held for rx reorder */
+ uint32_t is_scan_bg; /* background scans started */
+ uint8_t is_rx_deauth_code; /* last rx'd deauth reason */
+ uint8_t is_rx_disassoc_code; /* last rx'd disassoc reason */
+ uint8_t is_rx_authfail_code; /* last rx'd auth fail reason */
+ uint32_t is_beacon_miss; /* beacon miss notification */
+ uint32_t is_rx_badstate; /* rx discard state != RUN */
+ uint32_t is_ff_flush; /* ff's flush'd from stageq */
+ uint32_t is_tx_ctl; /* tx ctrl frames */
+ uint32_t is_ampdu_rexmt; /* A-MPDU frames rexmt ok */
+ uint32_t is_ampdu_rexmt_fail; /* A-MPDU frames rexmt fail */
+
+ uint32_t is_mesh_wrongmesh; /* dropped 'cuz not mesh sta*/
+ uint32_t is_mesh_nolink; /* dropped 'cuz link not estab*/
+ uint32_t is_mesh_fwd_ttl; /* mesh not fwd'd 'cuz ttl 0 */
+ uint32_t is_mesh_fwd_nobuf; /* mesh not fwd'd 'cuz no mbuf*/
+ uint32_t is_mesh_fwd_tooshort; /* mesh not fwd'd 'cuz no hdr */
+ uint32_t is_mesh_fwd_disabled; /* mesh not fwd'd 'cuz disabled */
+ uint32_t is_mesh_fwd_nopath; /* mesh not fwd'd 'cuz path unknown */
+
+ uint32_t is_hwmp_wrongseq; /* wrong hwmp seq no. */
+ uint32_t is_hwmp_rootreqs; /* root PREQs sent */
+ uint32_t is_hwmp_rootrann; /* root RANNs sent */
+
+ uint32_t is_mesh_badae; /* dropped 'cuz invalid AE */
+ uint32_t is_mesh_rtaddfailed; /* route add failed */
+ uint32_t is_mesh_notproxy; /* dropped 'cuz not proxying */
+ uint32_t is_rx_badalign; /* dropped 'cuz misaligned */
+ uint32_t is_hwmp_proxy; /* PREP for proxy route */
+
+ uint32_t is_spare[11];
+};
+
+/*
+ * Max size of optional information elements. We artificially
+ * constrain this; it's limited only by the max frame size (and
+ * the max parameter size of the wireless extensions).
+ */
+#define IEEE80211_MAX_OPT_IE 256
+
+/*
+ * WPA/RSN get/set key request. Specify the key/cipher
+ * type and whether the key is to be used for sending and/or
+ * receiving. The key index should be set only when working
+ * with global keys (use IEEE80211_KEYIX_NONE for ``no index'').
+ * Otherwise a unicast/pairwise key is specified by the bssid
+ * (on a station) or mac address (on an ap). They key length
+ * must include any MIC key data; otherwise it should be no
+ * more than IEEE80211_KEYBUF_SIZE.
+ */
+struct ieee80211req_key {
+ uint8_t ik_type; /* key/cipher type */
+ uint8_t ik_pad;
+ uint16_t ik_keyix; /* key index */
+ uint8_t ik_keylen; /* key length in bytes */
+ uint8_t ik_flags;
+/* NB: IEEE80211_KEY_XMIT and IEEE80211_KEY_RECV defined elsewhere */
+#define IEEE80211_KEY_DEFAULT 0x80 /* default xmit key */
+ uint8_t ik_macaddr[IEEE80211_ADDR_LEN];
+ uint64_t ik_keyrsc; /* key receive sequence counter */
+ uint64_t ik_keytsc; /* key transmit sequence counter */
+ uint8_t ik_keydata[IEEE80211_KEYBUF_SIZE+IEEE80211_MICBUF_SIZE];
+};
+
+/*
+ * Delete a key either by index or address. Set the index
+ * to IEEE80211_KEYIX_NONE when deleting a unicast key.
+ */
+struct ieee80211req_del_key {
+ uint8_t idk_keyix; /* key index */
+ uint8_t idk_macaddr[IEEE80211_ADDR_LEN];
+};
+
+/*
+ * MLME state manipulation request. IEEE80211_MLME_ASSOC
+ * only makes sense when operating as a station. The other
+ * requests can be used when operating as a station or an
+ * ap (to effect a station).
+ */
+struct ieee80211req_mlme {
+ uint8_t im_op; /* operation to perform */
+#define IEEE80211_MLME_ASSOC 1 /* associate station */
+#define IEEE80211_MLME_DISASSOC 2 /* disassociate station */
+#define IEEE80211_MLME_DEAUTH 3 /* deauthenticate station */
+#define IEEE80211_MLME_AUTHORIZE 4 /* authorize station */
+#define IEEE80211_MLME_UNAUTHORIZE 5 /* unauthorize station */
+#define IEEE80211_MLME_AUTH 6 /* authenticate station */
+ uint8_t im_ssid_len; /* length of optional ssid */
+ uint16_t im_reason; /* 802.11 reason code */
+ uint8_t im_macaddr[IEEE80211_ADDR_LEN];
+ uint8_t im_ssid[IEEE80211_NWID_LEN];
+};
+
+/*
+ * MAC ACL operations.
+ */
+enum {
+ IEEE80211_MACCMD_POLICY_OPEN = 0, /* set policy: no ACL's */
+ IEEE80211_MACCMD_POLICY_ALLOW = 1, /* set policy: allow traffic */
+ IEEE80211_MACCMD_POLICY_DENY = 2, /* set policy: deny traffic */
+ IEEE80211_MACCMD_FLUSH = 3, /* flush ACL database */
+ IEEE80211_MACCMD_DETACH = 4, /* detach ACL policy */
+ IEEE80211_MACCMD_POLICY = 5, /* get ACL policy */
+ IEEE80211_MACCMD_LIST = 6, /* get ACL database */
+ IEEE80211_MACCMD_POLICY_RADIUS = 7, /* set policy: RADIUS managed */
+};
+
+struct ieee80211req_maclist {
+ uint8_t ml_macaddr[IEEE80211_ADDR_LEN];
+} __packed;
+
+/*
+ * Mesh Routing Table Operations.
+ */
+enum {
+ IEEE80211_MESH_RTCMD_LIST = 0, /* list HWMP routing table */
+ IEEE80211_MESH_RTCMD_FLUSH = 1, /* flush HWMP routing table */
+ IEEE80211_MESH_RTCMD_ADD = 2, /* add entry to the table */
+ IEEE80211_MESH_RTCMD_DELETE = 3, /* delete an entry from the table */
+};
+
+struct ieee80211req_mesh_route {
+ uint8_t imr_flags;
+#define IEEE80211_MESHRT_FLAGS_VALID 0x01
+#define IEEE80211_MESHRT_FLAGS_PROXY 0x02
+ uint8_t imr_dest[IEEE80211_ADDR_LEN];
+ uint8_t imr_nexthop[IEEE80211_ADDR_LEN];
+ uint16_t imr_nhops;
+ uint8_t imr_pad;
+ uint32_t imr_metric;
+ uint32_t imr_lifetime;
+ uint32_t imr_lastmseq;
+};
+
+/*
+ * HWMP root modes
+ */
+enum {
+ IEEE80211_HWMP_ROOTMODE_DISABLED = 0, /* disabled */
+ IEEE80211_HWMP_ROOTMODE_NORMAL = 1, /* normal PREPs */
+ IEEE80211_HWMP_ROOTMODE_PROACTIVE = 2, /* proactive PREPS */
+ IEEE80211_HWMP_ROOTMODE_RANN = 3, /* use RANN elemid */
+};
+
+
+/*
+ * Set the active channel list by IEEE channel #: each channel
+ * to be marked active is set in a bit vector. Note this list is
+ * intersected with the available channel list in calculating
+ * the set of channels actually used in scanning.
+ */
+struct ieee80211req_chanlist {
+ uint8_t ic_channels[32]; /* NB: can be variable length */
+};
+
+/*
+ * Get the active channel list info.
+ */
+struct ieee80211req_chaninfo {
+ u_int ic_nchans;
+ struct ieee80211_channel ic_chans[1]; /* NB: variable length */
+};
+#define IEEE80211_CHANINFO_SIZE(_nchan) \
+ (sizeof(struct ieee80211req_chaninfo) + \
+ (((_nchan)-1) * sizeof(struct ieee80211_channel)))
+#define IEEE80211_CHANINFO_SPACE(_ci) \
+ IEEE80211_CHANINFO_SIZE((_ci)->ic_nchans)
+
+/*
+ * Retrieve the WPA/RSN information element for an associated station.
+ */
+struct ieee80211req_wpaie { /* old version w/ only one ie */
+ uint8_t wpa_macaddr[IEEE80211_ADDR_LEN];
+ uint8_t wpa_ie[IEEE80211_MAX_OPT_IE];
+};
+struct ieee80211req_wpaie2 {
+ uint8_t wpa_macaddr[IEEE80211_ADDR_LEN];
+ uint8_t wpa_ie[IEEE80211_MAX_OPT_IE];
+ uint8_t rsn_ie[IEEE80211_MAX_OPT_IE];
+};
+
+/*
+ * Retrieve per-node statistics.
+ */
+struct ieee80211req_sta_stats {
+ union {
+ /* NB: explicitly force 64-bit alignment */
+ uint8_t macaddr[IEEE80211_ADDR_LEN];
+ uint64_t pad;
+ } is_u;
+ struct ieee80211_nodestats is_stats;
+};
+
+/*
+ * Station information block; the mac address is used
+ * to retrieve other data like stats, unicast key, etc.
+ */
+struct ieee80211req_sta_info {
+ uint16_t isi_len; /* total length (mult of 4) */
+ uint16_t isi_ie_off; /* offset to IE data */
+ uint16_t isi_ie_len; /* IE length */
+ uint16_t isi_freq; /* MHz */
+ uint32_t isi_flags; /* channel flags */
+ uint32_t isi_state; /* state flags */
+ uint8_t isi_authmode; /* authentication algorithm */
+ int8_t isi_rssi; /* receive signal strength */
+ int8_t isi_noise; /* noise floor */
+ uint8_t isi_capinfo; /* capabilities */
+ uint8_t isi_erp; /* ERP element */
+ uint8_t isi_macaddr[IEEE80211_ADDR_LEN];
+ uint8_t isi_nrates;
+ /* negotiated rates */
+ uint8_t isi_rates[IEEE80211_RATE_MAXSIZE];
+ uint8_t isi_txrate; /* legacy/IEEE rate or MCS */
+ uint16_t isi_associd; /* assoc response */
+ uint16_t isi_txpower; /* current tx power */
+ uint16_t isi_vlan; /* vlan tag */
+ /* NB: [IEEE80211_NONQOS_TID] holds seq#'s for non-QoS stations */
+ uint16_t isi_txseqs[IEEE80211_TID_SIZE];/* tx seq #/TID */
+ uint16_t isi_rxseqs[IEEE80211_TID_SIZE];/* rx seq#/TID */
+ uint16_t isi_inact; /* inactivity timer */
+ uint16_t isi_txmbps; /* current tx rate in .5 Mb/s */
+ uint16_t isi_pad;
+ uint32_t isi_jointime; /* time of assoc/join */
+ struct ieee80211_mimo_info isi_mimo; /* MIMO info for 11n sta's */
+ /* 11s info */
+ uint16_t isi_peerid;
+ uint16_t isi_localid;
+ uint8_t isi_peerstate;
+ /* XXX frag state? */
+ /* variable length IE data */
+};
+
+/*
+ * Retrieve per-station information; to retrieve all
+ * specify a mac address of ff:ff:ff:ff:ff:ff.
+ */
+struct ieee80211req_sta_req {
+ union {
+ /* NB: explicitly force 64-bit alignment */
+ uint8_t macaddr[IEEE80211_ADDR_LEN];
+ uint64_t pad;
+ } is_u;
+ struct ieee80211req_sta_info info[1]; /* variable length */
+};
+
+/*
+ * Get/set per-station tx power cap.
+ */
+struct ieee80211req_sta_txpow {
+ uint8_t it_macaddr[IEEE80211_ADDR_LEN];
+ uint8_t it_txpow;
+};
+
+/*
+ * WME parameters manipulated with IEEE80211_IOC_WME_CWMIN
+ * through IEEE80211_IOC_WME_ACKPOLICY are set and return
+ * using i_val and i_len. i_val holds the value itself.
+ * i_len specifies the AC and, as appropriate, then high bit
+ * specifies whether the operation is to be applied to the
+ * BSS or ourself.
+ */
+#define IEEE80211_WMEPARAM_SELF 0x0000 /* parameter applies to self */
+#define IEEE80211_WMEPARAM_BSS 0x8000 /* parameter applies to BSS */
+#define IEEE80211_WMEPARAM_VAL 0x7fff /* parameter value */
+
+/*
+ * Application Information Elements can be appended to a variety
+ * of frames with the IEE80211_IOC_APPIE request. This request
+ * piggybacks on a normal ieee80211req; the frame type is passed
+ * in i_val as the 802.11 FC0 bytes and the length of the IE data
+ * is passed in i_len. The data is referenced in i_data. If i_len
+ * is zero then any previously configured IE data is removed. At
+ * most IEEE80211_MAX_APPIE data be appened. Note that multiple
+ * IE's can be supplied; the data is treated opaquely.
+ */
+#define IEEE80211_MAX_APPIE 1024 /* max app IE data */
+/*
+ * Hack: the WPA authenticator uses this mechanism to specify WPA
+ * ie's that are used instead of the ones normally constructed using
+ * the cipher state setup with separate ioctls. This avoids issues
+ * like the authenticator ordering ie data differently than the
+ * net80211 layer and needing to keep separate state for WPA and RSN.
+ */
+#define IEEE80211_APPIE_WPA \
+ (IEEE80211_FC0_TYPE_MGT | IEEE80211_FC0_SUBTYPE_BEACON | \
+ IEEE80211_FC0_SUBTYPE_PROBE_RESP)
+
+/*
+ * Station mode roaming parameters. These are maintained
+ * per band/mode and control the roaming algorithm.
+ */
+struct ieee80211_roamparams_req {
+ struct ieee80211_roamparam params[IEEE80211_MODE_MAX];
+};
+
+/*
+ * Transmit parameters. These can be used to set fixed transmit
+ * rate for each operating mode when operating as client or on a
+ * per-client basis according to the capabilities of the client
+ * (e.g. an 11b client associated to an 11g ap) when operating as
+ * an ap.
+ *
+ * MCS are distinguished from legacy rates by or'ing in 0x80.
+ */
+struct ieee80211_txparams_req {
+ struct ieee80211_txparam params[IEEE80211_MODE_MAX];
+};
+
+/*
+ * Set regulatory domain state with IEEE80211_IOC_REGDOMAIN.
+ * Note this is both the regulatory description and the channel
+ * list. The get request for IEEE80211_IOC_REGDOMAIN returns
+ * only the regdomain info; the channel list is obtained
+ * separately with IEEE80211_IOC_CHANINFO.
+ */
+struct ieee80211_regdomain_req {
+ struct ieee80211_regdomain rd;
+ struct ieee80211req_chaninfo chaninfo;
+};
+#define IEEE80211_REGDOMAIN_SIZE(_nchan) \
+ (sizeof(struct ieee80211_regdomain_req) + \
+ (((_nchan)-1) * sizeof(struct ieee80211_channel)))
+#define IEEE80211_REGDOMAIN_SPACE(_req) \
+ IEEE80211_REGDOMAIN_SIZE((_req)->chaninfo.ic_nchans)
+
+/*
+ * Get driver capabilities. Driver, hardware crypto, and
+ * HT/802.11n capabilities, and a table that describes what
+ * the radio can do.
+ */
+struct ieee80211_devcaps_req {
+ uint32_t dc_drivercaps; /* general driver caps */
+ uint32_t dc_cryptocaps; /* hardware crypto support */
+ uint32_t dc_htcaps; /* HT/802.11n support */
+ struct ieee80211req_chaninfo dc_chaninfo;
+};
+#define IEEE80211_DEVCAPS_SIZE(_nchan) \
+ (sizeof(struct ieee80211_devcaps_req) + \
+ (((_nchan)-1) * sizeof(struct ieee80211_channel)))
+#define IEEE80211_DEVCAPS_SPACE(_dc) \
+ IEEE80211_DEVCAPS_SIZE((_dc)->dc_chaninfo.ic_nchans)
+
+struct ieee80211_chanswitch_req {
+ struct ieee80211_channel csa_chan; /* new channel */
+ int csa_mode; /* CSA mode */
+ int csa_count; /* beacon count to switch */
+};
+
+/*
+ * Get/set per-station vlan tag.
+ */
+struct ieee80211req_sta_vlan {
+ uint8_t sv_macaddr[IEEE80211_ADDR_LEN];
+ uint16_t sv_vlan;
+};
+
+#ifdef __FreeBSD__
+/*
+ * FreeBSD-style ioctls.
+ */
+/* the first member must be matched with struct ifreq */
+struct ieee80211req {
+ char i_name[IFNAMSIZ]; /* if_name, e.g. "wi0" */
+ uint16_t i_type; /* req type */
+ int16_t i_val; /* Index or simple value */
+ int16_t i_len; /* Index or simple value */
+ void *i_data; /* Extra data */
+};
+#define SIOCS80211 _IOW('i', 234, struct ieee80211req)
+#define SIOCG80211 _IOWR('i', 235, struct ieee80211req)
+#define SIOCG80211STATS _IOWR('i', 236, struct ifreq)
+
+#define IEEE80211_IOC_SSID 1
+#define IEEE80211_IOC_NUMSSIDS 2
+#define IEEE80211_IOC_WEP 3
+#define IEEE80211_WEP_NOSUP -1
+#define IEEE80211_WEP_OFF 0
+#define IEEE80211_WEP_ON 1
+#define IEEE80211_WEP_MIXED 2
+#define IEEE80211_IOC_WEPKEY 4
+#define IEEE80211_IOC_NUMWEPKEYS 5
+#define IEEE80211_IOC_WEPTXKEY 6
+#define IEEE80211_IOC_AUTHMODE 7
+#define IEEE80211_IOC_STATIONNAME 8
+#define IEEE80211_IOC_CHANNEL 9
+#define IEEE80211_IOC_POWERSAVE 10
+#define IEEE80211_POWERSAVE_NOSUP -1
+#define IEEE80211_POWERSAVE_OFF 0
+#define IEEE80211_POWERSAVE_CAM 1
+#define IEEE80211_POWERSAVE_PSP 2
+#define IEEE80211_POWERSAVE_PSP_CAM 3
+#define IEEE80211_POWERSAVE_ON IEEE80211_POWERSAVE_CAM
+#define IEEE80211_IOC_POWERSAVESLEEP 11
+#define IEEE80211_IOC_RTSTHRESHOLD 12
+#define IEEE80211_IOC_PROTMODE 13
+#define IEEE80211_PROTMODE_OFF 0
+#define IEEE80211_PROTMODE_CTS 1
+#define IEEE80211_PROTMODE_RTSCTS 2
+#define IEEE80211_IOC_TXPOWER 14 /* global tx power limit */
+#define IEEE80211_IOC_BSSID 15
+#define IEEE80211_IOC_ROAMING 16 /* roaming mode */
+#define IEEE80211_IOC_PRIVACY 17 /* privacy invoked */
+#define IEEE80211_IOC_DROPUNENCRYPTED 18 /* discard unencrypted frames */
+#define IEEE80211_IOC_WPAKEY 19
+#define IEEE80211_IOC_DELKEY 20
+#define IEEE80211_IOC_MLME 21
+/* 22 was IEEE80211_IOC_OPTIE, replaced by IEEE80211_IOC_APPIE */
+/* 23 was IEEE80211_IOC_SCAN_REQ */
+/* 24 was IEEE80211_IOC_SCAN_RESULTS */
+#define IEEE80211_IOC_COUNTERMEASURES 25 /* WPA/TKIP countermeasures */
+#define IEEE80211_IOC_WPA 26 /* WPA mode (0,1,2) */
+#define IEEE80211_IOC_CHANLIST 27 /* channel list */
+#define IEEE80211_IOC_WME 28 /* WME mode (on, off) */
+#define IEEE80211_IOC_HIDESSID 29 /* hide SSID mode (on, off) */
+#define IEEE80211_IOC_APBRIDGE 30 /* AP inter-sta bridging */
+/* 31-35,37-38 were for WPA authenticator settings */
+/* 36 was IEEE80211_IOC_DRIVER_CAPS */
+#define IEEE80211_IOC_WPAIE 39 /* WPA information element */
+#define IEEE80211_IOC_STA_STATS 40 /* per-station statistics */
+#define IEEE80211_IOC_MACCMD 41 /* MAC ACL operation */
+#define IEEE80211_IOC_CHANINFO 42 /* channel info list */
+#define IEEE80211_IOC_TXPOWMAX 43 /* max tx power for channel */
+#define IEEE80211_IOC_STA_TXPOW 44 /* per-station tx power limit */
+/* 45 was IEEE80211_IOC_STA_INFO */
+#define IEEE80211_IOC_WME_CWMIN 46 /* WME: ECWmin */
+#define IEEE80211_IOC_WME_CWMAX 47 /* WME: ECWmax */
+#define IEEE80211_IOC_WME_AIFS 48 /* WME: AIFSN */
+#define IEEE80211_IOC_WME_TXOPLIMIT 49 /* WME: txops limit */
+#define IEEE80211_IOC_WME_ACM 50 /* WME: ACM (bss only) */
+#define IEEE80211_IOC_WME_ACKPOLICY 51 /* WME: ACK policy (!bss only)*/
+#define IEEE80211_IOC_DTIM_PERIOD 52 /* DTIM period (beacons) */
+#define IEEE80211_IOC_BEACON_INTERVAL 53 /* beacon interval (ms) */
+#define IEEE80211_IOC_ADDMAC 54 /* add sta to MAC ACL table */
+#define IEEE80211_IOC_DELMAC 55 /* del sta from MAC ACL table */
+#define IEEE80211_IOC_PUREG 56 /* pure 11g (no 11b stations) */
+#define IEEE80211_IOC_FF 57 /* ATH fast frames (on, off) */
+#define IEEE80211_IOC_TURBOP 58 /* ATH turbo' (on, off) */
+#define IEEE80211_IOC_BGSCAN 59 /* bg scanning (on, off) */
+#define IEEE80211_IOC_BGSCAN_IDLE 60 /* bg scan idle threshold */
+#define IEEE80211_IOC_BGSCAN_INTERVAL 61 /* bg scan interval */
+#define IEEE80211_IOC_SCANVALID 65 /* scan cache valid threshold */
+/* 66-72 were IEEE80211_IOC_ROAM_* and IEEE80211_IOC_MCAST_RATE */
+#define IEEE80211_IOC_FRAGTHRESHOLD 73 /* tx fragmentation threshold */
+#define IEEE80211_IOC_BURST 75 /* packet bursting */
+#define IEEE80211_IOC_SCAN_RESULTS 76 /* get scan results */
+#define IEEE80211_IOC_BMISSTHRESHOLD 77 /* beacon miss threshold */
+#define IEEE80211_IOC_STA_INFO 78 /* station/neighbor info */
+#define IEEE80211_IOC_WPAIE2 79 /* WPA+RSN info elements */
+#define IEEE80211_IOC_CURCHAN 80 /* current channel */
+#define IEEE80211_IOC_SHORTGI 81 /* 802.11n half GI */
+#define IEEE80211_IOC_AMPDU 82 /* 802.11n A-MPDU (on, off) */
+#define IEEE80211_IOC_AMPDU_LIMIT 83 /* A-MPDU length limit */
+#define IEEE80211_IOC_AMPDU_DENSITY 84 /* A-MPDU density */
+#define IEEE80211_IOC_AMSDU 85 /* 802.11n A-MSDU (on, off) */
+#define IEEE80211_IOC_AMSDU_LIMIT 86 /* A-MSDU length limit */
+#define IEEE80211_IOC_PUREN 87 /* pure 11n (no legacy sta's) */
+#define IEEE80211_IOC_DOTH 88 /* 802.11h (on, off) */
+/* 89-91 were regulatory items */
+#define IEEE80211_IOC_HTCOMPAT 92 /* support pre-D1.10 HT ie's */
+#define IEEE80211_IOC_DWDS 93 /* DWDS/4-address handling */
+#define IEEE80211_IOC_INACTIVITY 94 /* sta inactivity handling */
+#define IEEE80211_IOC_APPIE 95 /* application IE's */
+#define IEEE80211_IOC_WPS 96 /* WPS operation */
+#define IEEE80211_IOC_TSN 97 /* TSN operation */
+#define IEEE80211_IOC_DEVCAPS 98 /* driver+device capabilities */
+#define IEEE80211_IOC_CHANSWITCH 99 /* start 11h channel switch */
+#define IEEE80211_IOC_DFS 100 /* DFS (on, off) */
+#define IEEE80211_IOC_DOTD 101 /* 802.11d (on, off) */
+#define IEEE80211_IOC_HTPROTMODE 102 /* HT protection (off, rts) */
+#define IEEE80211_IOC_SCAN_REQ 103 /* scan w/ specified params */
+#define IEEE80211_IOC_SCAN_CANCEL 104 /* cancel ongoing scan */
+#define IEEE80211_IOC_HTCONF 105 /* HT config (off, HT20, HT40)*/
+#define IEEE80211_IOC_REGDOMAIN 106 /* regulatory domain info */
+#define IEEE80211_IOC_ROAM 107 /* roaming params en masse */
+#define IEEE80211_IOC_TXPARAMS 108 /* tx parameters */
+#define IEEE80211_IOC_STA_VLAN 109 /* per-station vlan tag */
+#define IEEE80211_IOC_SMPS 110 /* MIMO power save */
+#define IEEE80211_IOC_RIFS 111 /* RIFS config (on, off) */
+#define IEEE80211_IOC_GREENFIELD 112 /* Greenfield (on, off) */
+#define IEEE80211_IOC_STBC 113 /* STBC Tx/RX (on, off) */
+
+#define IEEE80211_IOC_MESH_ID 170 /* mesh identifier */
+#define IEEE80211_IOC_MESH_AP 171 /* accepting peerings */
+#define IEEE80211_IOC_MESH_FWRD 172 /* forward frames */
+#define IEEE80211_IOC_MESH_PROTO 173 /* mesh protocols */
+#define IEEE80211_IOC_MESH_TTL 174 /* mesh TTL */
+#define IEEE80211_IOC_MESH_RTCMD 175 /* mesh routing table commands*/
+#define IEEE80211_IOC_MESH_PR_METRIC 176 /* mesh metric protocol */
+#define IEEE80211_IOC_MESH_PR_PATH 177 /* mesh path protocol */
+#define IEEE80211_IOC_MESH_PR_SIG 178 /* mesh sig protocol */
+#define IEEE80211_IOC_MESH_PR_CC 179 /* mesh congestion protocol */
+#define IEEE80211_IOC_MESH_PR_AUTH 180 /* mesh auth protocol */
+
+#define IEEE80211_IOC_HWMP_ROOTMODE 190 /* HWMP root mode */
+#define IEEE80211_IOC_HWMP_MAXHOPS 191 /* number of hops before drop */
+#define IEEE80211_IOC_HWMP_TTL 192 /* HWMP TTL */
+
+#define IEEE80211_IOC_TDMA_SLOT 201 /* TDMA: assigned slot */
+#define IEEE80211_IOC_TDMA_SLOTCNT 202 /* TDMA: slots in bss */
+#define IEEE80211_IOC_TDMA_SLOTLEN 203 /* TDMA: slot length (usecs) */
+#define IEEE80211_IOC_TDMA_BINTERVAL 204 /* TDMA: beacon intvl (slots) */
+
+/*
+ * Parameters for controlling a scan requested with
+ * IEEE80211_IOC_SCAN_REQ.
+ *
+ * Active scans cause ProbeRequest frames to be issued for each
+ * specified ssid and, by default, a broadcast ProbeRequest frame.
+ * The set of ssid's is specified in the request.
+ *
+ * By default the scan will cause a BSS to be joined (in station/adhoc
+ * mode) or a channel to be selected for operation (hostap mode).
+ * To disable that specify IEEE80211_IOC_SCAN_NOPICK and if the
+ *
+ * If the station is currently associated to an AP then a scan request
+ * will cause the station to leave the current channel and potentially
+ * miss frames from the AP. Alternatively the station may notify the
+ * AP that it is going into power save mode before it leaves the channel.
+ * This ensures frames for the station are buffered by the AP. This is
+ * termed a ``bg scan'' and is requested with the IEEE80211_IOC_SCAN_BGSCAN
+ * flag. Background scans may take longer than foreground scans and may
+ * be preempted by traffic. If a station is not associated to an AP
+ * then a request for a background scan is automatically done in the
+ * foreground.
+ *
+ * The results of the scan request are cached by the system. This
+ * information is aged out and/or invalidated based on events like not
+ * being able to associated to an AP. To flush the current cache
+ * contents before doing a scan the IEEE80211_IOC_SCAN_FLUSH flag may
+ * be specified.
+ *
+ * By default the scan will be done until a suitable AP is located
+ * or a channel is found for use. A scan can also be constrained
+ * to be done once (IEEE80211_IOC_SCAN_ONCE) or to last for no more
+ * than a specified duration.
+ */
+struct ieee80211_scan_req {
+ int sr_flags;
+#define IEEE80211_IOC_SCAN_NOPICK 0x00001 /* scan only, no selection */
+#define IEEE80211_IOC_SCAN_ACTIVE 0x00002 /* active scan (probe req) */
+#define IEEE80211_IOC_SCAN_PICK1ST 0x00004 /* ``hey sailor'' mode */
+#define IEEE80211_IOC_SCAN_BGSCAN 0x00008 /* bg scan, exit ps at end */
+#define IEEE80211_IOC_SCAN_ONCE 0x00010 /* do one complete pass */
+#define IEEE80211_IOC_SCAN_NOBCAST 0x00020 /* don't send bcast probe req */
+#define IEEE80211_IOC_SCAN_NOJOIN 0x00040 /* no auto-sequencing */
+#define IEEE80211_IOC_SCAN_FLUSH 0x10000 /* flush scan cache first */
+#define IEEE80211_IOC_SCAN_CHECK 0x20000 /* check scan cache first */
+ u_int sr_duration; /* duration (ms) */
+#define IEEE80211_IOC_SCAN_DURATION_MIN 1
+#define IEEE80211_IOC_SCAN_DURATION_MAX 0x7fffffff
+#define IEEE80211_IOC_SCAN_FOREVER IEEE80211_IOC_SCAN_DURATION_MAX
+ u_int sr_mindwell; /* min channel dwelltime (ms) */
+ u_int sr_maxdwell; /* max channel dwelltime (ms) */
+ int sr_nssid;
+#define IEEE80211_IOC_SCAN_MAX_SSID 3
+ struct {
+ int len; /* length in bytes */
+ uint8_t ssid[IEEE80211_NWID_LEN]; /* ssid contents */
+ } sr_ssid[IEEE80211_IOC_SCAN_MAX_SSID];
+};
+
+/*
+ * Scan result data returned for IEEE80211_IOC_SCAN_RESULTS.
+ * Each result is a fixed size structure followed by a variable
+ * length SSID and one or more variable length information elements.
+ * The size of each variable length item is found in the fixed
+ * size structure and the entire length of the record is specified
+ * in isr_len. Result records are rounded to a multiple of 4 bytes.
+ */
+struct ieee80211req_scan_result {
+ uint16_t isr_len; /* total length (mult of 4) */
+ uint16_t isr_ie_off; /* offset to SSID+IE data */
+ uint16_t isr_ie_len; /* IE length */
+ uint16_t isr_freq; /* MHz */
+ uint16_t isr_flags; /* channel flags */
+ int8_t isr_noise;
+ int8_t isr_rssi;
+ uint8_t isr_intval; /* beacon interval */
+ uint8_t isr_capinfo; /* capabilities */
+ uint8_t isr_erp; /* ERP element */
+ uint8_t isr_bssid[IEEE80211_ADDR_LEN];
+ uint8_t isr_nrates;
+ uint8_t isr_rates[IEEE80211_RATE_MAXSIZE];
+ uint8_t isr_ssid_len; /* SSID length */
+ uint8_t isr_meshid_len; /* MESH ID length */
+ /* variable length SSID, followed by variable length MESH ID,
+ followed by IE data */
+};
+
+/*
+ * Virtual AP cloning parameters. The parent device must
+ * be a vap-capable device. All parameters specified with
+ * the clone request are fixed for the lifetime of the vap.
+ *
+ * There are two flavors of WDS vaps: legacy and dynamic.
+ * Legacy WDS operation implements a static binding between
+ * two stations encapsulating traffic in 4-address frames.
+ * Dynamic WDS vaps are created when a station associates to
+ * an AP and sends a 4-address frame. If the AP vap is
+ * configured to support WDS then this will generate an
+ * event to user programs listening on the routing socket
+ * and a Dynamic WDS vap will be created to handle traffic
+ * to/from that station. In both cases the bssid of the
+ * peer must be specified when creating the vap.
+ *
+ * By default a vap will inherit the mac address/bssid of
+ * the underlying device. To request a unique address the
+ * IEEE80211_CLONE_BSSID flag should be supplied. This is
+ * meaningless for WDS vaps as they share the bssid of an
+ * AP vap that must otherwise exist. Note that some devices
+ * may not be able to support multiple addresses.
+ *
+ * Station mode vap's normally depend on the device to notice
+ * when the AP stops sending beacon frames. If IEEE80211_CLONE_NOBEACONS
+ * is specified the net80211 layer will do this in s/w. This
+ * is mostly useful when setting up a WDS repeater/extender where
+ * an AP vap is combined with a sta vap and the device isn't able
+ * to track beacon frames in hardware.
+ */
+struct ieee80211_clone_params {
+ char icp_parent[IFNAMSIZ]; /* parent device */
+ uint16_t icp_opmode; /* operating mode */
+ uint16_t icp_flags; /* see below */
+ uint8_t icp_bssid[IEEE80211_ADDR_LEN]; /* for WDS links */
+ uint8_t icp_macaddr[IEEE80211_ADDR_LEN];/* local address */
+};
+#define IEEE80211_CLONE_BSSID 0x0001 /* allocate unique mac/bssid */
+#define IEEE80211_CLONE_NOBEACONS 0x0002 /* don't setup beacon timers */
+#define IEEE80211_CLONE_WDSLEGACY 0x0004 /* legacy WDS processing */
+#define IEEE80211_CLONE_MACADDR 0x0008 /* use specified mac addr */
+#define IEEE80211_CLONE_TDMA 0x0010 /* operate in TDMA mode */
+#endif /* __FreeBSD__ */
+
+#endif /* _NET80211_IEEE80211_IOCTL_HH_ */
diff --git a/rtems/freebsd/net80211/ieee80211_mesh.c b/rtems/freebsd/net80211/ieee80211_mesh.c
new file mode 100644
index 00000000..aa07fc20
--- /dev/null
+++ b/rtems/freebsd/net80211/ieee80211_mesh.c
@@ -0,0 +1,2755 @@
+#include <rtems/freebsd/machine/rtems-bsd-config.h>
+
+/*-
+ * Copyright (c) 2009 The FreeBSD Foundation
+ * All rights reserved.
+ *
+ * This software was developed by Rui Paulo under sponsorship from the
+ * FreeBSD Foundation.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+#include <rtems/freebsd/sys/cdefs.h>
+#ifdef __FreeBSD__
+__FBSDID("$FreeBSD$");
+#endif
+
+/*
+ * IEEE 802.11s Mesh Point (MBSS) support.
+ *
+ * Based on March 2009, D3.0 802.11s draft spec.
+ */
+#include <rtems/freebsd/local/opt_inet.h>
+#include <rtems/freebsd/local/opt_wlan.h>
+
+#include <rtems/freebsd/sys/param.h>
+#include <rtems/freebsd/sys/systm.h>
+#include <rtems/freebsd/sys/mbuf.h>
+#include <rtems/freebsd/sys/malloc.h>
+#include <rtems/freebsd/sys/kernel.h>
+
+#include <rtems/freebsd/sys/socket.h>
+#include <rtems/freebsd/sys/sockio.h>
+#include <rtems/freebsd/sys/endian.h>
+#include <rtems/freebsd/sys/errno.h>
+#include <rtems/freebsd/sys/proc.h>
+#include <rtems/freebsd/sys/sysctl.h>
+
+#include <rtems/freebsd/net/if.h>
+#include <rtems/freebsd/net/if_media.h>
+#include <rtems/freebsd/net/if_llc.h>
+#include <rtems/freebsd/net/ethernet.h>
+
+#include <rtems/freebsd/net80211/ieee80211_var.h>
+#include <rtems/freebsd/net80211/ieee80211_action.h>
+#include <rtems/freebsd/net80211/ieee80211_input.h>
+#include <rtems/freebsd/net80211/ieee80211_mesh.h>
+
+static void mesh_rt_flush_invalid(struct ieee80211vap *);
+static int mesh_select_proto_path(struct ieee80211vap *, const char *);
+static int mesh_select_proto_metric(struct ieee80211vap *, const char *);
+static void mesh_vattach(struct ieee80211vap *);
+static int mesh_newstate(struct ieee80211vap *, enum ieee80211_state, int);
+static void mesh_rt_cleanup_cb(void *);
+static void mesh_linkchange(struct ieee80211_node *,
+ enum ieee80211_mesh_mlstate);
+static void mesh_checkid(void *, struct ieee80211_node *);
+static uint32_t mesh_generateid(struct ieee80211vap *);
+static int mesh_checkpseq(struct ieee80211vap *,
+ const uint8_t [IEEE80211_ADDR_LEN], uint32_t);
+static struct ieee80211_node *
+ mesh_find_txnode(struct ieee80211vap *,
+ const uint8_t [IEEE80211_ADDR_LEN]);
+static void mesh_forward(struct ieee80211vap *, struct mbuf *,
+ const struct ieee80211_meshcntl *);
+static int mesh_input(struct ieee80211_node *, struct mbuf *, int, int);
+static void mesh_recv_mgmt(struct ieee80211_node *, struct mbuf *, int,
+ int, int);
+static void mesh_peer_timeout_setup(struct ieee80211_node *);
+static void mesh_peer_timeout_backoff(struct ieee80211_node *);
+static void mesh_peer_timeout_cb(void *);
+static __inline void
+ mesh_peer_timeout_stop(struct ieee80211_node *);
+static int mesh_verify_meshid(struct ieee80211vap *, const uint8_t *);
+static int mesh_verify_meshconf(struct ieee80211vap *, const uint8_t *);
+static int mesh_verify_meshpeer(struct ieee80211vap *, uint8_t,
+ const uint8_t *);
+uint32_t mesh_airtime_calc(struct ieee80211_node *);
+
+/*
+ * Timeout values come from the specification and are in milliseconds.
+ */
+SYSCTL_NODE(_net_wlan, OID_AUTO, mesh, CTLFLAG_RD, 0,
+ "IEEE 802.11s parameters");
+static int ieee80211_mesh_retrytimeout = -1;
+SYSCTL_PROC(_net_wlan_mesh, OID_AUTO, retrytimeout, CTLTYPE_INT | CTLFLAG_RW,
+ &ieee80211_mesh_retrytimeout, 0, ieee80211_sysctl_msecs_ticks, "I",
+ "Retry timeout (msec)");
+static int ieee80211_mesh_holdingtimeout = -1;
+SYSCTL_PROC(_net_wlan_mesh, OID_AUTO, holdingtimeout, CTLTYPE_INT | CTLFLAG_RW,
+ &ieee80211_mesh_holdingtimeout, 0, ieee80211_sysctl_msecs_ticks, "I",
+ "Holding state timeout (msec)");
+static int ieee80211_mesh_confirmtimeout = -1;
+SYSCTL_PROC(_net_wlan_mesh, OID_AUTO, confirmtimeout, CTLTYPE_INT | CTLFLAG_RW,
+ &ieee80211_mesh_confirmtimeout, 0, ieee80211_sysctl_msecs_ticks, "I",
+ "Confirm state timeout (msec)");
+static int ieee80211_mesh_maxretries = 2;
+SYSCTL_INT(_net_wlan_mesh, OID_AUTO, maxretries, CTLTYPE_INT | CTLFLAG_RW,
+ &ieee80211_mesh_maxretries, 0,
+ "Maximum retries during peer link establishment");
+
+static const uint8_t broadcastaddr[IEEE80211_ADDR_LEN] =
+ { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
+
+static ieee80211_recv_action_func mesh_recv_action_meshpeering_open;
+static ieee80211_recv_action_func mesh_recv_action_meshpeering_confirm;
+static ieee80211_recv_action_func mesh_recv_action_meshpeering_close;
+static ieee80211_recv_action_func mesh_recv_action_meshlmetric_req;
+static ieee80211_recv_action_func mesh_recv_action_meshlmetric_rep;
+
+static ieee80211_send_action_func mesh_send_action_meshpeering_open;
+static ieee80211_send_action_func mesh_send_action_meshpeering_confirm;
+static ieee80211_send_action_func mesh_send_action_meshpeering_close;
+static ieee80211_send_action_func mesh_send_action_meshlink_request;
+static ieee80211_send_action_func mesh_send_action_meshlink_reply;
+
+static const struct ieee80211_mesh_proto_metric mesh_metric_airtime = {
+ .mpm_descr = "AIRTIME",
+ .mpm_ie = IEEE80211_MESHCONF_METRIC_AIRTIME,
+ .mpm_metric = mesh_airtime_calc,
+};
+
+static struct ieee80211_mesh_proto_path mesh_proto_paths[4];
+static struct ieee80211_mesh_proto_metric mesh_proto_metrics[4];
+
+#define MESH_RT_LOCK(ms) mtx_lock(&(ms)->ms_rt_lock)
+#define MESH_RT_LOCK_ASSERT(ms) mtx_assert(&(ms)->ms_rt_lock, MA_OWNED)
+#define MESH_RT_UNLOCK(ms) mtx_unlock(&(ms)->ms_rt_lock)
+
+MALLOC_DEFINE(M_80211_MESH_RT, "80211mesh", "802.11s routing table");
+
+/*
+ * Helper functions to manipulate the Mesh routing table.
+ */
+
+static struct ieee80211_mesh_route *
+mesh_rt_find_locked(struct ieee80211_mesh_state *ms,
+ const uint8_t dest[IEEE80211_ADDR_LEN])
+{
+ struct ieee80211_mesh_route *rt;
+
+ MESH_RT_LOCK_ASSERT(ms);
+
+ TAILQ_FOREACH(rt, &ms->ms_routes, rt_next) {
+ if (IEEE80211_ADDR_EQ(dest, rt->rt_dest))
+ return rt;
+ }
+ return NULL;
+}
+
+static struct ieee80211_mesh_route *
+mesh_rt_add_locked(struct ieee80211_mesh_state *ms,
+ const uint8_t dest[IEEE80211_ADDR_LEN])
+{
+ struct ieee80211_mesh_route *rt;
+
+ KASSERT(!IEEE80211_ADDR_EQ(broadcastaddr, dest),
+ ("%s: adding broadcast to the routing table", __func__));
+
+ MESH_RT_LOCK_ASSERT(ms);
+
+ rt = malloc(ALIGN(sizeof(struct ieee80211_mesh_route)) +
+ ms->ms_ppath->mpp_privlen, M_80211_MESH_RT, M_NOWAIT | M_ZERO);
+ if (rt != NULL) {
+ IEEE80211_ADDR_COPY(rt->rt_dest, dest);
+ rt->rt_priv = (void *)ALIGN(&rt[1]);
+ rt->rt_crtime = ticks;
+ TAILQ_INSERT_TAIL(&ms->ms_routes, rt, rt_next);
+ }
+ return rt;
+}
+
+struct ieee80211_mesh_route *
+ieee80211_mesh_rt_find(struct ieee80211vap *vap,
+ const uint8_t dest[IEEE80211_ADDR_LEN])
+{
+ struct ieee80211_mesh_state *ms = vap->iv_mesh;
+ struct ieee80211_mesh_route *rt;
+
+ MESH_RT_LOCK(ms);
+ rt = mesh_rt_find_locked(ms, dest);
+ MESH_RT_UNLOCK(ms);
+ return rt;
+}
+
+struct ieee80211_mesh_route *
+ieee80211_mesh_rt_add(struct ieee80211vap *vap,
+ const uint8_t dest[IEEE80211_ADDR_LEN])
+{
+ struct ieee80211_mesh_state *ms = vap->iv_mesh;
+ struct ieee80211_mesh_route *rt;
+
+ KASSERT(ieee80211_mesh_rt_find(vap, dest) == NULL,
+ ("%s: duplicate entry in the routing table", __func__));
+ KASSERT(!IEEE80211_ADDR_EQ(vap->iv_myaddr, dest),
+ ("%s: adding self to the routing table", __func__));
+
+ MESH_RT_LOCK(ms);
+ rt = mesh_rt_add_locked(ms, dest);
+ MESH_RT_UNLOCK(ms);
+ return rt;
+}
+
+/*
+ * Add a proxy route (as needed) for the specified destination.
+ */
+void
+ieee80211_mesh_proxy_check(struct ieee80211vap *vap,
+ const uint8_t dest[IEEE80211_ADDR_LEN])
+{
+ struct ieee80211_mesh_state *ms = vap->iv_mesh;
+ struct ieee80211_mesh_route *rt;
+
+ MESH_RT_LOCK(ms);
+ rt = mesh_rt_find_locked(ms, dest);
+ if (rt == NULL) {
+ rt = mesh_rt_add_locked(ms, dest);
+ if (rt == NULL) {
+ IEEE80211_NOTE_MAC(vap, IEEE80211_MSG_MESH, dest,
+ "%s", "unable to add proxy entry");
+ vap->iv_stats.is_mesh_rtaddfailed++;
+ } else {
+ IEEE80211_NOTE_MAC(vap, IEEE80211_MSG_MESH, dest,
+ "%s", "add proxy entry");
+ IEEE80211_ADDR_COPY(rt->rt_nexthop, vap->iv_myaddr);
+ rt->rt_flags |= IEEE80211_MESHRT_FLAGS_VALID
+ | IEEE80211_MESHRT_FLAGS_PROXY;
+ }
+ /* XXX assert PROXY? */
+ } else if ((rt->rt_flags & IEEE80211_MESHRT_FLAGS_VALID) == 0) {
+ struct ieee80211com *ic = vap->iv_ic;
+ /*
+ * Fix existing entry created by received frames from
+ * stations that have some memory of dest. We also
+ * flush any frames held on the staging queue; delivering
+ * them is too much trouble right now.
+ */
+ IEEE80211_NOTE_MAC(vap, IEEE80211_MSG_MESH, dest,
+ "%s", "fix proxy entry");
+ IEEE80211_ADDR_COPY(rt->rt_nexthop, vap->iv_myaddr);
+ rt->rt_flags |= IEEE80211_MESHRT_FLAGS_VALID
+ | IEEE80211_MESHRT_FLAGS_PROXY;
+ /* XXX belongs in hwmp */
+ ieee80211_ageq_drain_node(&ic->ic_stageq,
+ (void *)(uintptr_t) ieee80211_mac_hash(ic, dest));
+ /* XXX stat? */
+ }
+ MESH_RT_UNLOCK(ms);
+}
+
+static __inline void
+mesh_rt_del(struct ieee80211_mesh_state *ms, struct ieee80211_mesh_route *rt)
+{
+ TAILQ_REMOVE(&ms->ms_routes, rt, rt_next);
+ free(rt, M_80211_MESH_RT);
+}
+
+void
+ieee80211_mesh_rt_del(struct ieee80211vap *vap,
+ const uint8_t dest[IEEE80211_ADDR_LEN])
+{
+ struct ieee80211_mesh_state *ms = vap->iv_mesh;
+ struct ieee80211_mesh_route *rt, *next;
+
+ MESH_RT_LOCK(ms);
+ TAILQ_FOREACH_SAFE(rt, &ms->ms_routes, rt_next, next) {
+ if (IEEE80211_ADDR_EQ(rt->rt_dest, dest)) {
+ mesh_rt_del(ms, rt);
+ MESH_RT_UNLOCK(ms);
+ return;
+ }
+ }
+ MESH_RT_UNLOCK(ms);
+}
+
+void
+ieee80211_mesh_rt_flush(struct ieee80211vap *vap)
+{
+ struct ieee80211_mesh_state *ms = vap->iv_mesh;
+ struct ieee80211_mesh_route *rt, *next;
+
+ if (ms == NULL)
+ return;
+ MESH_RT_LOCK(ms);
+ TAILQ_FOREACH_SAFE(rt, &ms->ms_routes, rt_next, next)
+ mesh_rt_del(ms, rt);
+ MESH_RT_UNLOCK(ms);
+}
+
+void
+ieee80211_mesh_rt_flush_peer(struct ieee80211vap *vap,
+ const uint8_t peer[IEEE80211_ADDR_LEN])
+{
+ struct ieee80211_mesh_state *ms = vap->iv_mesh;
+ struct ieee80211_mesh_route *rt, *next;
+
+ MESH_RT_LOCK(ms);
+ TAILQ_FOREACH_SAFE(rt, &ms->ms_routes, rt_next, next) {
+ if (IEEE80211_ADDR_EQ(rt->rt_nexthop, peer))
+ mesh_rt_del(ms, rt);
+ }
+ MESH_RT_UNLOCK(ms);
+}
+
+/*
+ * Flush expired routing entries, i.e. those in invalid state for
+ * some time.
+ */
+static void
+mesh_rt_flush_invalid(struct ieee80211vap *vap)
+{
+ struct ieee80211_mesh_state *ms = vap->iv_mesh;
+ struct ieee80211_mesh_route *rt, *next;
+
+ if (ms == NULL)
+ return;
+ MESH_RT_LOCK(ms);
+ TAILQ_FOREACH_SAFE(rt, &ms->ms_routes, rt_next, next) {
+ if ((rt->rt_flags & IEEE80211_MESHRT_FLAGS_VALID) == 0 &&
+ ticks - rt->rt_crtime >= ms->ms_ppath->mpp_inact)
+ mesh_rt_del(ms, rt);
+ }
+ MESH_RT_UNLOCK(ms);
+}
+
+#define N(a) (sizeof(a) / sizeof(a[0]))
+int
+ieee80211_mesh_register_proto_path(const struct ieee80211_mesh_proto_path *mpp)
+{
+ int i, firstempty = -1;
+
+ for (i = 0; i < N(mesh_proto_paths); i++) {
+ if (strncmp(mpp->mpp_descr, mesh_proto_paths[i].mpp_descr,
+ IEEE80211_MESH_PROTO_DSZ) == 0)
+ return EEXIST;
+ if (!mesh_proto_paths[i].mpp_active && firstempty == -1)
+ firstempty = i;
+ }
+ if (firstempty < 0)
+ return ENOSPC;
+ memcpy(&mesh_proto_paths[firstempty], mpp, sizeof(*mpp));
+ mesh_proto_paths[firstempty].mpp_active = 1;
+ return 0;
+}
+
+int
+ieee80211_mesh_register_proto_metric(const struct
+ ieee80211_mesh_proto_metric *mpm)
+{
+ int i, firstempty = -1;
+
+ for (i = 0; i < N(mesh_proto_metrics); i++) {
+ if (strncmp(mpm->mpm_descr, mesh_proto_metrics[i].mpm_descr,
+ IEEE80211_MESH_PROTO_DSZ) == 0)
+ return EEXIST;
+ if (!mesh_proto_metrics[i].mpm_active && firstempty == -1)
+ firstempty = i;
+ }
+ if (firstempty < 0)
+ return ENOSPC;
+ memcpy(&mesh_proto_metrics[firstempty], mpm, sizeof(*mpm));
+ mesh_proto_metrics[firstempty].mpm_active = 1;
+ return 0;
+}
+
+static int
+mesh_select_proto_path(struct ieee80211vap *vap, const char *name)
+{
+ struct ieee80211_mesh_state *ms = vap->iv_mesh;
+ int i;
+
+ for (i = 0; i < N(mesh_proto_paths); i++) {
+ if (strcasecmp(mesh_proto_paths[i].mpp_descr, name) == 0) {
+ ms->ms_ppath = &mesh_proto_paths[i];
+ return 0;
+ }
+ }
+ return ENOENT;
+}
+
+static int
+mesh_select_proto_metric(struct ieee80211vap *vap, const char *name)
+{
+ struct ieee80211_mesh_state *ms = vap->iv_mesh;
+ int i;
+
+ for (i = 0; i < N(mesh_proto_metrics); i++) {
+ if (strcasecmp(mesh_proto_metrics[i].mpm_descr, name) == 0) {
+ ms->ms_pmetric = &mesh_proto_metrics[i];
+ return 0;
+ }
+ }
+ return ENOENT;
+}
+#undef N
+
+static void
+ieee80211_mesh_init(void)
+{
+
+ memset(mesh_proto_paths, 0, sizeof(mesh_proto_paths));
+ memset(mesh_proto_metrics, 0, sizeof(mesh_proto_metrics));
+
+ /*
+ * Setup mesh parameters that depends on the clock frequency.
+ */
+ ieee80211_mesh_retrytimeout = msecs_to_ticks(40);
+ ieee80211_mesh_holdingtimeout = msecs_to_ticks(40);
+ ieee80211_mesh_confirmtimeout = msecs_to_ticks(40);
+
+ /*
+ * Register action frame handlers.
+ */
+ ieee80211_recv_action_register(IEEE80211_ACTION_CAT_MESHPEERING,
+ IEEE80211_ACTION_MESHPEERING_OPEN,
+ mesh_recv_action_meshpeering_open);
+ ieee80211_recv_action_register(IEEE80211_ACTION_CAT_MESHPEERING,
+ IEEE80211_ACTION_MESHPEERING_CONFIRM,
+ mesh_recv_action_meshpeering_confirm);
+ ieee80211_recv_action_register(IEEE80211_ACTION_CAT_MESHPEERING,
+ IEEE80211_ACTION_MESHPEERING_CLOSE,
+ mesh_recv_action_meshpeering_close);
+ ieee80211_recv_action_register(IEEE80211_ACTION_CAT_MESHLMETRIC,
+ IEEE80211_ACTION_MESHLMETRIC_REQ, mesh_recv_action_meshlmetric_req);
+ ieee80211_recv_action_register(IEEE80211_ACTION_CAT_MESHLMETRIC,
+ IEEE80211_ACTION_MESHLMETRIC_REP, mesh_recv_action_meshlmetric_rep);
+
+ ieee80211_send_action_register(IEEE80211_ACTION_CAT_MESHPEERING,
+ IEEE80211_ACTION_MESHPEERING_OPEN,
+ mesh_send_action_meshpeering_open);
+ ieee80211_send_action_register(IEEE80211_ACTION_CAT_MESHPEERING,
+ IEEE80211_ACTION_MESHPEERING_CONFIRM,
+ mesh_send_action_meshpeering_confirm);
+ ieee80211_send_action_register(IEEE80211_ACTION_CAT_MESHPEERING,
+ IEEE80211_ACTION_MESHPEERING_CLOSE,
+ mesh_send_action_meshpeering_close);
+ ieee80211_send_action_register(IEEE80211_ACTION_CAT_MESHLMETRIC,
+ IEEE80211_ACTION_MESHLMETRIC_REQ,
+ mesh_send_action_meshlink_request);
+ ieee80211_send_action_register(IEEE80211_ACTION_CAT_MESHLMETRIC,
+ IEEE80211_ACTION_MESHLMETRIC_REP,
+ mesh_send_action_meshlink_reply);
+
+ /*
+ * Register Airtime Link Metric.
+ */
+ ieee80211_mesh_register_proto_metric(&mesh_metric_airtime);
+
+}
+SYSINIT(wlan_mesh, SI_SUB_DRIVERS, SI_ORDER_FIRST, ieee80211_mesh_init, NULL);
+
+void
+ieee80211_mesh_attach(struct ieee80211com *ic)
+{
+ ic->ic_vattach[IEEE80211_M_MBSS] = mesh_vattach;
+}
+
+void
+ieee80211_mesh_detach(struct ieee80211com *ic)
+{
+}
+
+static void
+mesh_vdetach_peers(void *arg, struct ieee80211_node *ni)
+{
+ struct ieee80211com *ic = ni->ni_ic;
+ uint16_t args[3];
+
+ if (ni->ni_mlstate == IEEE80211_NODE_MESH_ESTABLISHED) {
+ args[0] = ni->ni_mlpid;
+ args[1] = ni->ni_mllid;
+ args[2] = IEEE80211_REASON_PEER_LINK_CANCELED;
+ ieee80211_send_action(ni,
+ IEEE80211_ACTION_CAT_MESHPEERING,
+ IEEE80211_ACTION_MESHPEERING_CLOSE,
+ args);
+ }
+ callout_drain(&ni->ni_mltimer);
+ /* XXX belongs in hwmp */
+ ieee80211_ageq_drain_node(&ic->ic_stageq,
+ (void *)(uintptr_t) ieee80211_mac_hash(ic, ni->ni_macaddr));
+}
+
+static void
+mesh_vdetach(struct ieee80211vap *vap)
+{
+ struct ieee80211_mesh_state *ms = vap->iv_mesh;
+
+ callout_drain(&ms->ms_cleantimer);
+ ieee80211_iterate_nodes(&vap->iv_ic->ic_sta, mesh_vdetach_peers,
+ NULL);
+ ieee80211_mesh_rt_flush(vap);
+ mtx_destroy(&ms->ms_rt_lock);
+ ms->ms_ppath->mpp_vdetach(vap);
+ free(vap->iv_mesh, M_80211_VAP);
+ vap->iv_mesh = NULL;
+}
+
+static void
+mesh_vattach(struct ieee80211vap *vap)
+{
+ struct ieee80211_mesh_state *ms;
+ vap->iv_newstate = mesh_newstate;
+ vap->iv_input = mesh_input;
+ vap->iv_opdetach = mesh_vdetach;
+ vap->iv_recv_mgmt = mesh_recv_mgmt;
+ ms = malloc(sizeof(struct ieee80211_mesh_state), M_80211_VAP,
+ M_NOWAIT | M_ZERO);
+ if (ms == NULL) {
+ printf("%s: couldn't alloc MBSS state\n", __func__);
+ return;
+ }
+ vap->iv_mesh = ms;
+ ms->ms_seq = 0;
+ ms->ms_flags = (IEEE80211_MESHFLAGS_AP | IEEE80211_MESHFLAGS_FWD);
+ ms->ms_ttl = IEEE80211_MESH_DEFAULT_TTL;
+ TAILQ_INIT(&ms->ms_routes);
+ mtx_init(&ms->ms_rt_lock, "MBSS", "802.11s routing table", MTX_DEF);
+ callout_init(&ms->ms_cleantimer, CALLOUT_MPSAFE);
+ mesh_select_proto_metric(vap, "AIRTIME");
+ KASSERT(ms->ms_pmetric, ("ms_pmetric == NULL"));
+ mesh_select_proto_path(vap, "HWMP");
+ KASSERT(ms->ms_ppath, ("ms_ppath == NULL"));
+ ms->ms_ppath->mpp_vattach(vap);
+}
+
+/*
+ * IEEE80211_M_MBSS vap state machine handler.
+ */
+static int
+mesh_newstate(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg)
+{
+ struct ieee80211_mesh_state *ms = vap->iv_mesh;
+ struct ieee80211com *ic = vap->iv_ic;
+ struct ieee80211_node *ni;
+ enum ieee80211_state ostate;
+
+ IEEE80211_LOCK_ASSERT(ic);
+
+ ostate = vap->iv_state;
+ IEEE80211_DPRINTF(vap, IEEE80211_MSG_STATE, "%s: %s -> %s (%d)\n",
+ __func__, ieee80211_state_name[ostate],
+ ieee80211_state_name[nstate], arg);
+ vap->iv_state = nstate; /* state transition */
+ if (ostate != IEEE80211_S_SCAN)
+ ieee80211_cancel_scan(vap); /* background scan */
+ ni = vap->iv_bss; /* NB: no reference held */
+ if (nstate != IEEE80211_S_RUN && ostate == IEEE80211_S_RUN)
+ callout_drain(&ms->ms_cleantimer);
+ switch (nstate) {
+ case IEEE80211_S_INIT:
+ switch (ostate) {
+ case IEEE80211_S_SCAN:
+ ieee80211_cancel_scan(vap);
+ break;
+ case IEEE80211_S_CAC:
+ ieee80211_dfs_cac_stop(vap);
+ break;
+ case IEEE80211_S_RUN:
+ ieee80211_iterate_nodes(&ic->ic_sta,
+ mesh_vdetach_peers, NULL);
+ break;
+ default:
+ break;
+ }
+ if (ostate != IEEE80211_S_INIT) {
+ /* NB: optimize INIT -> INIT case */
+ ieee80211_reset_bss(vap);
+ ieee80211_mesh_rt_flush(vap);
+ }
+ break;
+ case IEEE80211_S_SCAN:
+ switch (ostate) {
+ case IEEE80211_S_INIT:
+ if (vap->iv_des_chan != IEEE80211_CHAN_ANYC &&
+ !IEEE80211_IS_CHAN_RADAR(vap->iv_des_chan) &&
+ ms->ms_idlen != 0) {
+ /*
+ * Already have a channel and a mesh ID; bypass
+ * the scan and startup immediately.
+ */
+ ieee80211_create_ibss(vap, vap->iv_des_chan);
+ break;
+ }
+ /*
+ * Initiate a scan. We can come here as a result
+ * of an IEEE80211_IOC_SCAN_REQ too in which case
+ * the vap will be marked with IEEE80211_FEXT_SCANREQ
+ * and the scan request parameters will be present
+ * in iv_scanreq. Otherwise we do the default.
+ */
+ if (vap->iv_flags_ext & IEEE80211_FEXT_SCANREQ) {
+ ieee80211_check_scan(vap,
+ vap->iv_scanreq_flags,
+ vap->iv_scanreq_duration,
+ vap->iv_scanreq_mindwell,
+ vap->iv_scanreq_maxdwell,
+ vap->iv_scanreq_nssid, vap->iv_scanreq_ssid);
+ vap->iv_flags_ext &= ~IEEE80211_FEXT_SCANREQ;
+ } else
+ ieee80211_check_scan_current(vap);
+ break;
+ default:
+ break;
+ }
+ break;
+ case IEEE80211_S_CAC:
+ /*
+ * Start CAC on a DFS channel. We come here when starting
+ * a bss on a DFS channel (see ieee80211_create_ibss).
+ */
+ ieee80211_dfs_cac_start(vap);
+ break;
+ case IEEE80211_S_RUN:
+ switch (ostate) {
+ case IEEE80211_S_INIT:
+ /*
+ * Already have a channel; bypass the
+ * scan and startup immediately.
+ * Note that ieee80211_create_ibss will call
+ * back to do a RUN->RUN state change.
+ */
+ ieee80211_create_ibss(vap,
+ ieee80211_ht_adjust_channel(ic,
+ ic->ic_curchan, vap->iv_flags_ht));
+ /* NB: iv_bss is changed on return */
+ break;
+ case IEEE80211_S_CAC:
+ /*
+ * NB: This is the normal state change when CAC
+ * expires and no radar was detected; no need to
+ * clear the CAC timer as it's already expired.
+ */
+ /* fall thru... */
+ case IEEE80211_S_CSA:
+#if 0
+ /*
+ * Shorten inactivity timer of associated stations
+ * to weed out sta's that don't follow a CSA.
+ */
+ ieee80211_iterate_nodes(&ic->ic_sta, sta_csa, vap);
+#endif
+ /*
+ * Update bss node channel to reflect where
+ * we landed after CSA.
+ */
+ ieee80211_node_set_chan(vap->iv_bss,
+ ieee80211_ht_adjust_channel(ic, ic->ic_curchan,
+ ieee80211_htchanflags(vap->iv_bss->ni_chan)));
+ /* XXX bypass debug msgs */
+ break;
+ case IEEE80211_S_SCAN:
+ case IEEE80211_S_RUN:
+#ifdef IEEE80211_DEBUG
+ if (ieee80211_msg_debug(vap)) {
+ struct ieee80211_node *ni = vap->iv_bss;
+ ieee80211_note(vap,
+ "synchronized with %s meshid ",
+ ether_sprintf(ni->ni_meshid));
+ ieee80211_print_essid(ni->ni_meshid,
+ ni->ni_meshidlen);
+ /* XXX MCS/HT */
+ printf(" channel %d\n",
+ ieee80211_chan2ieee(ic, ic->ic_curchan));
+ }
+#endif
+ break;
+ default:
+ break;
+ }
+ ieee80211_node_authorize(vap->iv_bss);
+ callout_reset(&ms->ms_cleantimer, ms->ms_ppath->mpp_inact,
+ mesh_rt_cleanup_cb, vap);
+ break;
+ default:
+ break;
+ }
+ /* NB: ostate not nstate */
+ ms->ms_ppath->mpp_newstate(vap, ostate, arg);
+ return 0;
+}
+
+static void
+mesh_rt_cleanup_cb(void *arg)
+{
+ struct ieee80211vap *vap = arg;
+ struct ieee80211_mesh_state *ms = vap->iv_mesh;
+
+ mesh_rt_flush_invalid(vap);
+ callout_reset(&ms->ms_cleantimer, ms->ms_ppath->mpp_inact,
+ mesh_rt_cleanup_cb, vap);
+}
+
+
+/*
+ * Helper function to note the Mesh Peer Link FSM change.
+ */
+static void
+mesh_linkchange(struct ieee80211_node *ni, enum ieee80211_mesh_mlstate state)
+{
+ struct ieee80211vap *vap = ni->ni_vap;
+ struct ieee80211_mesh_state *ms = vap->iv_mesh;
+#ifdef IEEE80211_DEBUG
+ static const char *meshlinkstates[] = {
+ [IEEE80211_NODE_MESH_IDLE] = "IDLE",
+ [IEEE80211_NODE_MESH_OPENSNT] = "OPEN SENT",
+ [IEEE80211_NODE_MESH_OPENRCV] = "OPEN RECEIVED",
+ [IEEE80211_NODE_MESH_CONFIRMRCV] = "CONFIRM RECEIVED",
+ [IEEE80211_NODE_MESH_ESTABLISHED] = "ESTABLISHED",
+ [IEEE80211_NODE_MESH_HOLDING] = "HOLDING"
+ };
+#endif
+ IEEE80211_NOTE(vap, IEEE80211_MSG_MESH,
+ ni, "peer link: %s -> %s",
+ meshlinkstates[ni->ni_mlstate], meshlinkstates[state]);
+
+ /* track neighbor count */
+ if (state == IEEE80211_NODE_MESH_ESTABLISHED &&
+ ni->ni_mlstate != IEEE80211_NODE_MESH_ESTABLISHED) {
+ KASSERT(ms->ms_neighbors < 65535, ("neighbor count overflow"));
+ ms->ms_neighbors++;
+ ieee80211_beacon_notify(vap, IEEE80211_BEACON_MESHCONF);
+ } else if (ni->ni_mlstate == IEEE80211_NODE_MESH_ESTABLISHED &&
+ state != IEEE80211_NODE_MESH_ESTABLISHED) {
+ KASSERT(ms->ms_neighbors > 0, ("neighbor count 0"));
+ ms->ms_neighbors--;
+ ieee80211_beacon_notify(vap, IEEE80211_BEACON_MESHCONF);
+ }
+ ni->ni_mlstate = state;
+ switch (state) {
+ case IEEE80211_NODE_MESH_HOLDING:
+ ms->ms_ppath->mpp_peerdown(ni);
+ break;
+ case IEEE80211_NODE_MESH_ESTABLISHED:
+ ieee80211_mesh_discover(vap, ni->ni_macaddr, NULL);
+ break;
+ default:
+ break;
+ }
+}
+
+/*
+ * Helper function to generate a unique local ID required for mesh
+ * peer establishment.
+ */
+static void
+mesh_checkid(void *arg, struct ieee80211_node *ni)
+{
+ uint16_t *r = arg;
+
+ if (*r == ni->ni_mllid)
+ *(uint16_t *)arg = 0;
+}
+
+static uint32_t
+mesh_generateid(struct ieee80211vap *vap)
+{
+ int maxiter = 4;
+ uint16_t r;
+
+ do {
+ get_random_bytes(&r, 2);
+ ieee80211_iterate_nodes(&vap->iv_ic->ic_sta, mesh_checkid, &r);
+ maxiter--;
+ } while (r == 0 && maxiter > 0);
+ return r;
+}
+
+/*
+ * Verifies if we already received this packet by checking its
+ * sequence number.
+ * Returns 0 if the frame is to be accepted, 1 otherwise.
+ */
+static int
+mesh_checkpseq(struct ieee80211vap *vap,
+ const uint8_t source[IEEE80211_ADDR_LEN], uint32_t seq)
+{
+ struct ieee80211_mesh_route *rt;
+
+ rt = ieee80211_mesh_rt_find(vap, source);
+ if (rt == NULL) {
+ rt = ieee80211_mesh_rt_add(vap, source);
+ if (rt == NULL) {
+ IEEE80211_NOTE_MAC(vap, IEEE80211_MSG_MESH, source,
+ "%s", "add mcast route failed");
+ vap->iv_stats.is_mesh_rtaddfailed++;
+ return 1;
+ }
+ IEEE80211_NOTE_MAC(vap, IEEE80211_MSG_MESH, source,
+ "add mcast route, mesh seqno %d", seq);
+ rt->rt_lastmseq = seq;
+ return 0;
+ }
+ if (IEEE80211_MESH_SEQ_GEQ(rt->rt_lastmseq, seq)) {
+ return 1;
+ } else {
+ rt->rt_lastmseq = seq;
+ return 0;
+ }
+}
+
+/*
+ * Iterate the routing table and locate the next hop.
+ */
+static struct ieee80211_node *
+mesh_find_txnode(struct ieee80211vap *vap,
+ const uint8_t dest[IEEE80211_ADDR_LEN])
+{
+ struct ieee80211_mesh_route *rt;
+
+ rt = ieee80211_mesh_rt_find(vap, dest);
+ if (rt == NULL)
+ return NULL;
+ if ((rt->rt_flags & IEEE80211_MESHRT_FLAGS_VALID) == 0 ||
+ (rt->rt_flags & IEEE80211_MESHRT_FLAGS_PROXY)) {
+ IEEE80211_NOTE_MAC(vap, IEEE80211_MSG_MESH, dest,
+ "%s: !valid or proxy, flags 0x%x", __func__, rt->rt_flags);
+ /* XXX stat */
+ return NULL;
+ }
+ return ieee80211_find_txnode(vap, rt->rt_nexthop);
+}
+
+/*
+ * Forward the specified frame.
+ * Decrement the TTL and set TA to our MAC address.
+ */
+static void
+mesh_forward(struct ieee80211vap *vap, struct mbuf *m,
+ const struct ieee80211_meshcntl *mc)
+{
+ struct ieee80211com *ic = vap->iv_ic;
+ struct ieee80211_mesh_state *ms = vap->iv_mesh;
+ struct ifnet *ifp = vap->iv_ifp;
+ struct ifnet *parent = ic->ic_ifp;
+ const struct ieee80211_frame *wh =
+ mtod(m, const struct ieee80211_frame *);
+ struct mbuf *mcopy;
+ struct ieee80211_meshcntl *mccopy;
+ struct ieee80211_frame *whcopy;
+ struct ieee80211_node *ni;
+ int err;
+
+ if (mc->mc_ttl == 0) {
+ IEEE80211_NOTE_FRAME(vap, IEEE80211_MSG_MESH, wh,
+ "%s", "frame not fwd'd, ttl 0");
+ vap->iv_stats.is_mesh_fwd_ttl++;
+ return;
+ }
+ if (!(ms->ms_flags & IEEE80211_MESHFLAGS_FWD)) {
+ IEEE80211_NOTE_FRAME(vap, IEEE80211_MSG_MESH, wh,
+ "%s", "frame not fwd'd, fwding disabled");
+ vap->iv_stats.is_mesh_fwd_disabled++;
+ return;
+ }
+ mcopy = m_dup(m, M_DONTWAIT);
+ if (mcopy == NULL) {
+ IEEE80211_NOTE_FRAME(vap, IEEE80211_MSG_MESH, wh,
+ "%s", "frame not fwd'd, cannot dup");
+ vap->iv_stats.is_mesh_fwd_nobuf++;
+ ifp->if_oerrors++;
+ return;
+ }
+ mcopy = m_pullup(mcopy, ieee80211_hdrspace(ic, wh) +
+ sizeof(struct ieee80211_meshcntl));
+ if (mcopy == NULL) {
+ IEEE80211_NOTE_FRAME(vap, IEEE80211_MSG_MESH, wh,
+ "%s", "frame not fwd'd, too short");
+ vap->iv_stats.is_mesh_fwd_tooshort++;
+ ifp->if_oerrors++;
+ m_freem(mcopy);
+ return;
+ }
+ whcopy = mtod(mcopy, struct ieee80211_frame *);
+ mccopy = (struct ieee80211_meshcntl *)
+ (mtod(mcopy, uint8_t *) + ieee80211_hdrspace(ic, wh));
+ /* XXX clear other bits? */
+ whcopy->i_fc[1] &= ~IEEE80211_FC1_RETRY;
+ IEEE80211_ADDR_COPY(whcopy->i_addr2, vap->iv_myaddr);
+ if (IEEE80211_IS_MULTICAST(wh->i_addr1)) {
+ ni = ieee80211_ref_node(vap->iv_bss);
+ mcopy->m_flags |= M_MCAST;
+ } else {
+ ni = mesh_find_txnode(vap, whcopy->i_addr3);
+ if (ni == NULL) {
+ IEEE80211_NOTE_FRAME(vap, IEEE80211_MSG_MESH, wh,
+ "%s", "frame not fwd'd, no path");
+ vap->iv_stats.is_mesh_fwd_nopath++;
+ m_freem(mcopy);
+ return;
+ }
+ IEEE80211_ADDR_COPY(whcopy->i_addr1, ni->ni_macaddr);
+ }
+ KASSERT(mccopy->mc_ttl > 0, ("%s called with wrong ttl", __func__));
+ mccopy->mc_ttl--;
+
+ /* XXX calculate priority so drivers can find the tx queue */
+ M_WME_SETAC(mcopy, WME_AC_BE);
+
+ /* XXX do we know m_nextpkt is NULL? */
+ mcopy->m_pkthdr.rcvif = (void *) ni;
+ err = parent->if_transmit(parent, mcopy);
+ if (err != 0) {
+ /* NB: IFQ_HANDOFF reclaims mbuf */
+ ieee80211_free_node(ni);
+ } else {
+ ifp->if_opackets++;
+ }
+}
+
+static struct mbuf *
+mesh_decap(struct ieee80211vap *vap, struct mbuf *m, int hdrlen, int meshdrlen)
+{
+#define WHDIR(wh) ((wh)->i_fc[1] & IEEE80211_FC1_DIR_MASK)
+ uint8_t b[sizeof(struct ieee80211_qosframe_addr4) +
+ sizeof(struct ieee80211_meshcntl_ae11)];
+ const struct ieee80211_qosframe_addr4 *wh;
+ const struct ieee80211_meshcntl_ae10 *mc;
+ struct ether_header *eh;
+ struct llc *llc;
+ int ae;
+
+ if (m->m_len < hdrlen + sizeof(*llc) &&
+ (m = m_pullup(m, hdrlen + sizeof(*llc))) == NULL) {
+ IEEE80211_DPRINTF(vap, IEEE80211_MSG_ANY,
+ "discard data frame: %s", "m_pullup failed");
+ vap->iv_stats.is_rx_tooshort++;
+ return NULL;
+ }
+ memcpy(b, mtod(m, caddr_t), hdrlen);
+ wh = (const struct ieee80211_qosframe_addr4 *)&b[0];
+ mc = (const struct ieee80211_meshcntl_ae10 *)&b[hdrlen - meshdrlen];
+ KASSERT(WHDIR(wh) == IEEE80211_FC1_DIR_FROMDS ||
+ WHDIR(wh) == IEEE80211_FC1_DIR_DSTODS,
+ ("bogus dir, fc 0x%x:0x%x", wh->i_fc[0], wh->i_fc[1]));
+
+ llc = (struct llc *)(mtod(m, caddr_t) + hdrlen);
+ if (llc->llc_dsap == LLC_SNAP_LSAP && llc->llc_ssap == LLC_SNAP_LSAP &&
+ llc->llc_control == LLC_UI && llc->llc_snap.org_code[0] == 0 &&
+ llc->llc_snap.org_code[1] == 0 && llc->llc_snap.org_code[2] == 0 &&
+ /* NB: preserve AppleTalk frames that have a native SNAP hdr */
+ !(llc->llc_snap.ether_type == htons(ETHERTYPE_AARP) ||
+ llc->llc_snap.ether_type == htons(ETHERTYPE_IPX))) {
+ m_adj(m, hdrlen + sizeof(struct llc) - sizeof(*eh));
+ llc = NULL;
+ } else {
+ m_adj(m, hdrlen - sizeof(*eh));
+ }
+ eh = mtod(m, struct ether_header *);
+ ae = mc->mc_flags & 3;
+ if (WHDIR(wh) == IEEE80211_FC1_DIR_FROMDS) {
+ IEEE80211_ADDR_COPY(eh->ether_dhost, wh->i_addr1);
+ if (ae == 0) {
+ IEEE80211_ADDR_COPY(eh->ether_shost, wh->i_addr3);
+ } else if (ae == 1) {
+ IEEE80211_ADDR_COPY(eh->ether_shost, mc->mc_addr4);
+ } else {
+ IEEE80211_DISCARD(vap, IEEE80211_MSG_ANY,
+ (const struct ieee80211_frame *)wh, NULL,
+ "bad AE %d", ae);
+ vap->iv_stats.is_mesh_badae++;
+ m_freem(m);
+ return NULL;
+ }
+ } else {
+ if (ae == 0) {
+ IEEE80211_ADDR_COPY(eh->ether_dhost, wh->i_addr3);
+ IEEE80211_ADDR_COPY(eh->ether_shost, wh->i_addr4);
+ } else if (ae == 2) {
+ IEEE80211_ADDR_COPY(eh->ether_dhost, mc->mc_addr4);
+ IEEE80211_ADDR_COPY(eh->ether_shost, mc->mc_addr5);
+ } else {
+ IEEE80211_DISCARD(vap, IEEE80211_MSG_ANY,
+ (const struct ieee80211_frame *)wh, NULL,
+ "bad AE %d", ae);
+ vap->iv_stats.is_mesh_badae++;
+ m_freem(m);
+ return NULL;
+ }
+ }
+#ifdef ALIGNED_POINTER
+ if (!ALIGNED_POINTER(mtod(m, caddr_t) + sizeof(*eh), uint32_t)) {
+ m = ieee80211_realign(vap, m, sizeof(*eh));
+ if (m == NULL)
+ return NULL;
+ }
+#endif /* ALIGNED_POINTER */
+ if (llc != NULL) {
+ eh = mtod(m, struct ether_header *);
+ eh->ether_type = htons(m->m_pkthdr.len - sizeof(*eh));
+ }
+ return m;
+#undef WDIR
+}
+
+/*
+ * Return non-zero if the unicast mesh data frame should be processed
+ * locally. Frames that are not proxy'd have our address, otherwise
+ * we need to consult the routing table to look for a proxy entry.
+ */
+static __inline int
+mesh_isucastforme(struct ieee80211vap *vap, const struct ieee80211_frame *wh,
+ const struct ieee80211_meshcntl *mc)
+{
+ int ae = mc->mc_flags & 3;
+
+ KASSERT((wh->i_fc[1] & IEEE80211_FC1_DIR_MASK) == IEEE80211_FC1_DIR_DSTODS,
+ ("bad dir 0x%x:0x%x", wh->i_fc[0], wh->i_fc[1]));
+ KASSERT(ae == 0 || ae == 2, ("bad AE %d", ae));
+ if (ae == 2) { /* ucast w/ proxy */
+ const struct ieee80211_meshcntl_ae10 *mc10 =
+ (const struct ieee80211_meshcntl_ae10 *) mc;
+ struct ieee80211_mesh_route *rt =
+ ieee80211_mesh_rt_find(vap, mc10->mc_addr4);
+ /* check for proxy route to ourself */
+ return (rt != NULL &&
+ (rt->rt_flags & IEEE80211_MESHRT_FLAGS_PROXY));
+ } else /* ucast w/o proxy */
+ return IEEE80211_ADDR_EQ(wh->i_addr3, vap->iv_myaddr);
+}
+
+static int
+mesh_input(struct ieee80211_node *ni, struct mbuf *m, int rssi, int nf)
+{
+#define SEQ_LEQ(a,b) ((int)((a)-(b)) <= 0)
+#define HAS_SEQ(type) ((type & 0x4) == 0)
+ struct ieee80211vap *vap = ni->ni_vap;
+ struct ieee80211com *ic = ni->ni_ic;
+ struct ifnet *ifp = vap->iv_ifp;
+ struct ieee80211_frame *wh;
+ const struct ieee80211_meshcntl *mc;
+ int hdrspace, meshdrlen, need_tap;
+ uint8_t dir, type, subtype, qos;
+ uint32_t seq;
+ uint8_t *addr;
+ ieee80211_seq rxseq;
+
+ KASSERT(ni != NULL, ("null node"));
+ ni->ni_inact = ni->ni_inact_reload;
+
+ need_tap = 1; /* mbuf need to be tapped. */
+ type = -1; /* undefined */
+
+ if (m->m_pkthdr.len < sizeof(struct ieee80211_frame_min)) {
+ IEEE80211_DISCARD_MAC(vap, IEEE80211_MSG_ANY,
+ ni->ni_macaddr, NULL,
+ "too short (1): len %u", m->m_pkthdr.len);
+ vap->iv_stats.is_rx_tooshort++;
+ goto out;
+ }
+ /*
+ * Bit of a cheat here, we use a pointer for a 3-address
+ * frame format but don't reference fields past outside
+ * ieee80211_frame_min w/o first validating the data is
+ * present.
+ */
+ wh = mtod(m, struct ieee80211_frame *);
+
+ if ((wh->i_fc[0] & IEEE80211_FC0_VERSION_MASK) !=
+ IEEE80211_FC0_VERSION_0) {
+ IEEE80211_DISCARD_MAC(vap, IEEE80211_MSG_ANY,
+ ni->ni_macaddr, NULL, "wrong version %x", wh->i_fc[0]);
+ vap->iv_stats.is_rx_badversion++;
+ goto err;
+ }
+ dir = wh->i_fc[1] & IEEE80211_FC1_DIR_MASK;
+ type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
+ subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
+ if ((ic->ic_flags & IEEE80211_F_SCAN) == 0) {
+ IEEE80211_RSSI_LPF(ni->ni_avgrssi, rssi);
+ ni->ni_noise = nf;
+ if (HAS_SEQ(type)) {
+ uint8_t tid = ieee80211_gettid(wh);
+
+ if (IEEE80211_QOS_HAS_SEQ(wh) &&
+ TID_TO_WME_AC(tid) >= WME_AC_VI)
+ ic->ic_wme.wme_hipri_traffic++;
+ rxseq = le16toh(*(uint16_t *)wh->i_seq);
+ if ((ni->ni_flags & IEEE80211_NODE_HT) == 0 &&
+ (wh->i_fc[1] & IEEE80211_FC1_RETRY) &&
+ SEQ_LEQ(rxseq, ni->ni_rxseqs[tid])) {
+ /* duplicate, discard */
+ IEEE80211_DISCARD_MAC(vap, IEEE80211_MSG_INPUT,
+ wh->i_addr1, "duplicate",
+ "seqno <%u,%u> fragno <%u,%u> tid %u",
+ rxseq >> IEEE80211_SEQ_SEQ_SHIFT,
+ ni->ni_rxseqs[tid] >>
+ IEEE80211_SEQ_SEQ_SHIFT,
+ rxseq & IEEE80211_SEQ_FRAG_MASK,
+ ni->ni_rxseqs[tid] &
+ IEEE80211_SEQ_FRAG_MASK,
+ tid);
+ vap->iv_stats.is_rx_dup++;
+ IEEE80211_NODE_STAT(ni, rx_dup);
+ goto out;
+ }
+ ni->ni_rxseqs[tid] = rxseq;
+ }
+ }
+#ifdef IEEE80211_DEBUG
+ /*
+ * It's easier, but too expensive, to simulate different mesh
+ * topologies by consulting the ACL policy very early, so do this
+ * only under DEBUG.
+ *
+ * NB: this check is also done upon peering link initiation.
+ */
+ if (vap->iv_acl != NULL && !vap->iv_acl->iac_check(vap, wh->i_addr2)) {
+ IEEE80211_DISCARD(vap, IEEE80211_MSG_ACL,
+ wh, NULL, "%s", "disallowed by ACL");
+ vap->iv_stats.is_rx_acl++;
+ goto out;
+ }
+#endif
+ switch (type) {
+ case IEEE80211_FC0_TYPE_DATA:
+ if (ni == vap->iv_bss)
+ goto out;
+ if (ni->ni_mlstate != IEEE80211_NODE_MESH_ESTABLISHED) {
+ IEEE80211_DISCARD_MAC(vap, IEEE80211_MSG_MESH,
+ ni->ni_macaddr, NULL,
+ "peer link not yet established (%d)",
+ ni->ni_mlstate);
+ vap->iv_stats.is_mesh_nolink++;
+ goto out;
+ }
+ if (dir != IEEE80211_FC1_DIR_FROMDS &&
+ dir != IEEE80211_FC1_DIR_DSTODS) {
+ IEEE80211_DISCARD(vap, IEEE80211_MSG_INPUT,
+ wh, "data", "incorrect dir 0x%x", dir);
+ vap->iv_stats.is_rx_wrongdir++;
+ goto err;
+ }
+ /* pull up enough to get to the mesh control */
+ hdrspace = ieee80211_hdrspace(ic, wh);
+ if (m->m_len < hdrspace + sizeof(struct ieee80211_meshcntl) &&
+ (m = m_pullup(m, hdrspace +
+ sizeof(struct ieee80211_meshcntl))) == NULL) {
+ IEEE80211_DISCARD_MAC(vap, IEEE80211_MSG_ANY,
+ ni->ni_macaddr, NULL,
+ "data too short: expecting %u", hdrspace);
+ vap->iv_stats.is_rx_tooshort++;
+ goto out; /* XXX */
+ }
+ /*
+ * Now calculate the full extent of the headers. Note
+ * mesh_decap will pull up anything we didn't get
+ * above when it strips the 802.11 headers.
+ */
+ mc = (const struct ieee80211_meshcntl *)
+ (mtod(m, const uint8_t *) + hdrspace);
+ meshdrlen = sizeof(struct ieee80211_meshcntl) +
+ (mc->mc_flags & 3) * IEEE80211_ADDR_LEN;
+ hdrspace += meshdrlen;
+ seq = LE_READ_4(mc->mc_seq);
+ if (IEEE80211_IS_MULTICAST(wh->i_addr1))
+ addr = wh->i_addr3;
+ else
+ addr = ((struct ieee80211_qosframe_addr4 *)wh)->i_addr4;
+ if (IEEE80211_ADDR_EQ(vap->iv_myaddr, addr)) {
+ IEEE80211_DISCARD_MAC(vap, IEEE80211_MSG_INPUT,
+ addr, "data", "%s", "not to me");
+ vap->iv_stats.is_rx_wrongbss++; /* XXX kinda */
+ goto out;
+ }
+ if (mesh_checkpseq(vap, addr, seq) != 0) {
+ vap->iv_stats.is_rx_dup++;
+ goto out;
+ }
+
+ /*
+ * Potentially forward packet. See table s36 (p140)
+ * for the rules. XXX tap fwd'd packets not for us?
+ */
+ if (dir == IEEE80211_FC1_DIR_FROMDS ||
+ !mesh_isucastforme(vap, wh, mc)) {
+ mesh_forward(vap, m, mc);
+ if (dir == IEEE80211_FC1_DIR_DSTODS)
+ goto out;
+ /* NB: fall thru to deliver mcast frames locally */
+ }
+
+ /*
+ * Save QoS bits for use below--before we strip the header.
+ */
+ if (subtype == IEEE80211_FC0_SUBTYPE_QOS) {
+ qos = (dir == IEEE80211_FC1_DIR_DSTODS) ?
+ ((struct ieee80211_qosframe_addr4 *)wh)->i_qos[0] :
+ ((struct ieee80211_qosframe *)wh)->i_qos[0];
+ } else
+ qos = 0;
+ /*
+ * Next up, any fragmentation.
+ */
+ if (!IEEE80211_IS_MULTICAST(wh->i_addr1)) {
+ m = ieee80211_defrag(ni, m, hdrspace);
+ if (m == NULL) {
+ /* Fragment dropped or frame not complete yet */
+ goto out;
+ }
+ }
+ wh = NULL; /* no longer valid, catch any uses */
+
+ if (ieee80211_radiotap_active_vap(vap))
+ ieee80211_radiotap_rx(vap, m);
+ need_tap = 0;
+
+ /*
+ * Finally, strip the 802.11 header.
+ */
+ m = mesh_decap(vap, m, hdrspace, meshdrlen);
+ if (m == NULL) {
+ /* XXX mask bit to check for both */
+ /* don't count Null data frames as errors */
+ if (subtype == IEEE80211_FC0_SUBTYPE_NODATA ||
+ subtype == IEEE80211_FC0_SUBTYPE_QOS_NULL)
+ goto out;
+ IEEE80211_DISCARD_MAC(vap, IEEE80211_MSG_INPUT,
+ ni->ni_macaddr, "data", "%s", "decap error");
+ vap->iv_stats.is_rx_decap++;
+ IEEE80211_NODE_STAT(ni, rx_decap);
+ goto err;
+ }
+ if (qos & IEEE80211_QOS_AMSDU) {
+ m = ieee80211_decap_amsdu(ni, m);
+ if (m == NULL)
+ return IEEE80211_FC0_TYPE_DATA;
+ }
+ ieee80211_deliver_data(vap, ni, m);
+ return type;
+ case IEEE80211_FC0_TYPE_MGT:
+ vap->iv_stats.is_rx_mgmt++;
+ IEEE80211_NODE_STAT(ni, rx_mgmt);
+ if (dir != IEEE80211_FC1_DIR_NODS) {
+ IEEE80211_DISCARD(vap, IEEE80211_MSG_INPUT,
+ wh, "mgt", "incorrect dir 0x%x", dir);
+ vap->iv_stats.is_rx_wrongdir++;
+ goto err;
+ }
+ if (m->m_pkthdr.len < sizeof(struct ieee80211_frame)) {
+ IEEE80211_DISCARD_MAC(vap, IEEE80211_MSG_ANY,
+ ni->ni_macaddr, "mgt", "too short: len %u",
+ m->m_pkthdr.len);
+ vap->iv_stats.is_rx_tooshort++;
+ goto out;
+ }
+#ifdef IEEE80211_DEBUG
+ if ((ieee80211_msg_debug(vap) &&
+ (vap->iv_ic->ic_flags & IEEE80211_F_SCAN)) ||
+ ieee80211_msg_dumppkts(vap)) {
+ if_printf(ifp, "received %s from %s rssi %d\n",
+ ieee80211_mgt_subtype_name[subtype >>
+ IEEE80211_FC0_SUBTYPE_SHIFT],
+ ether_sprintf(wh->i_addr2), rssi);
+ }
+#endif
+ if (wh->i_fc[1] & IEEE80211_FC1_WEP) {
+ IEEE80211_DISCARD(vap, IEEE80211_MSG_INPUT,
+ wh, NULL, "%s", "WEP set but not permitted");
+ vap->iv_stats.is_rx_mgtdiscard++; /* XXX */
+ goto out;
+ }
+ vap->iv_recv_mgmt(ni, m, subtype, rssi, nf);
+ goto out;
+ case IEEE80211_FC0_TYPE_CTL:
+ vap->iv_stats.is_rx_ctl++;
+ IEEE80211_NODE_STAT(ni, rx_ctrl);
+ goto out;
+ default:
+ IEEE80211_DISCARD(vap, IEEE80211_MSG_ANY,
+ wh, "bad", "frame type 0x%x", type);
+ /* should not come here */
+ break;
+ }
+err:
+ ifp->if_ierrors++;
+out:
+ if (m != NULL) {
+ if (need_tap && ieee80211_radiotap_active_vap(vap))
+ ieee80211_radiotap_rx(vap, m);
+ m_freem(m);
+ }
+ return type;
+}
+
+static void
+mesh_recv_mgmt(struct ieee80211_node *ni, struct mbuf *m0, int subtype,
+ int rssi, int nf)
+{
+ struct ieee80211vap *vap = ni->ni_vap;
+ struct ieee80211_mesh_state *ms = vap->iv_mesh;
+ struct ieee80211com *ic = ni->ni_ic;
+ struct ieee80211_frame *wh;
+ uint8_t *frm, *efrm;
+
+ wh = mtod(m0, struct ieee80211_frame *);
+ frm = (uint8_t *)&wh[1];
+ efrm = mtod(m0, uint8_t *) + m0->m_len;
+ switch (subtype) {
+ case IEEE80211_FC0_SUBTYPE_PROBE_RESP:
+ case IEEE80211_FC0_SUBTYPE_BEACON:
+ {
+ struct ieee80211_scanparams scan;
+ /*
+ * We process beacon/probe response
+ * frames to discover neighbors.
+ */
+ if (ieee80211_parse_beacon(ni, m0, &scan) != 0)
+ return;
+ /*
+ * Count frame now that we know it's to be processed.
+ */
+ if (subtype == IEEE80211_FC0_SUBTYPE_BEACON) {
+ vap->iv_stats.is_rx_beacon++; /* XXX remove */
+ IEEE80211_NODE_STAT(ni, rx_beacons);
+ } else
+ IEEE80211_NODE_STAT(ni, rx_proberesp);
+ /*
+ * If scanning, just pass information to the scan module.
+ */
+ if (ic->ic_flags & IEEE80211_F_SCAN) {
+ if (ic->ic_flags_ext & IEEE80211_FEXT_PROBECHAN) {
+ /*
+ * Actively scanning a channel marked passive;
+ * send a probe request now that we know there
+ * is 802.11 traffic present.
+ *
+ * XXX check if the beacon we recv'd gives
+ * us what we need and suppress the probe req
+ */
+ ieee80211_probe_curchan(vap, 1);
+ ic->ic_flags_ext &= ~IEEE80211_FEXT_PROBECHAN;
+ }
+ ieee80211_add_scan(vap, &scan, wh,
+ subtype, rssi, nf);
+ return;
+ }
+
+ /* The rest of this code assumes we are running */
+ if (vap->iv_state != IEEE80211_S_RUN)
+ return;
+ /*
+ * Ignore non-mesh STAs.
+ */
+ if ((scan.capinfo &
+ (IEEE80211_CAPINFO_ESS|IEEE80211_CAPINFO_IBSS)) ||
+ scan.meshid == NULL || scan.meshconf == NULL) {
+ IEEE80211_DISCARD(vap, IEEE80211_MSG_INPUT,
+ wh, "beacon", "%s", "not a mesh sta");
+ vap->iv_stats.is_mesh_wrongmesh++;
+ return;
+ }
+ /*
+ * Ignore STAs for other mesh networks.
+ */
+ if (memcmp(scan.meshid+2, ms->ms_id, ms->ms_idlen) != 0 ||
+ mesh_verify_meshconf(vap, scan.meshconf)) {
+ IEEE80211_DISCARD(vap, IEEE80211_MSG_INPUT,
+ wh, "beacon", "%s", "not for our mesh");
+ vap->iv_stats.is_mesh_wrongmesh++;
+ return;
+ }
+ /*
+ * Peer only based on the current ACL policy.
+ */
+ if (vap->iv_acl != NULL &&
+ !vap->iv_acl->iac_check(vap, wh->i_addr2)) {
+ IEEE80211_DISCARD(vap, IEEE80211_MSG_ACL,
+ wh, NULL, "%s", "disallowed by ACL");
+ vap->iv_stats.is_rx_acl++;
+ return;
+ }
+ /*
+ * Do neighbor discovery.
+ */
+ if (!IEEE80211_ADDR_EQ(wh->i_addr2, ni->ni_macaddr)) {
+ /*
+ * Create a new entry in the neighbor table.
+ */
+ ni = ieee80211_add_neighbor(vap, wh, &scan);
+ }
+ /*
+ * Automatically peer with discovered nodes if possible.
+ * XXX backoff on repeated failure
+ */
+ if (ni != vap->iv_bss &&
+ (ms->ms_flags & IEEE80211_MESHFLAGS_AP) &&
+ ni->ni_mlstate == IEEE80211_NODE_MESH_IDLE) {
+ uint16_t args[1];
+
+ ni->ni_mlpid = mesh_generateid(vap);
+ if (ni->ni_mlpid == 0)
+ return;
+ mesh_linkchange(ni, IEEE80211_NODE_MESH_OPENSNT);
+ args[0] = ni->ni_mlpid;
+ ieee80211_send_action(ni,
+ IEEE80211_ACTION_CAT_MESHPEERING,
+ IEEE80211_ACTION_MESHPEERING_OPEN, args);
+ ni->ni_mlrcnt = 0;
+ mesh_peer_timeout_setup(ni);
+ }
+ break;
+ }
+ case IEEE80211_FC0_SUBTYPE_PROBE_REQ:
+ {
+ uint8_t *ssid, *meshid, *rates, *xrates;
+ uint8_t *sfrm;
+
+ if (vap->iv_state != IEEE80211_S_RUN) {
+ IEEE80211_DISCARD(vap, IEEE80211_MSG_INPUT,
+ wh, NULL, "wrong state %s",
+ ieee80211_state_name[vap->iv_state]);
+ vap->iv_stats.is_rx_mgtdiscard++;
+ return;
+ }
+ if (IEEE80211_IS_MULTICAST(wh->i_addr2)) {
+ /* frame must be directed */
+ IEEE80211_DISCARD(vap, IEEE80211_MSG_INPUT,
+ wh, NULL, "%s", "not unicast");
+ vap->iv_stats.is_rx_mgtdiscard++; /* XXX stat */
+ return;
+ }
+ /*
+ * prreq frame format
+ * [tlv] ssid
+ * [tlv] supported rates
+ * [tlv] extended supported rates
+ * [tlv] mesh id
+ */
+ ssid = meshid = rates = xrates = NULL;
+ sfrm = frm;
+ while (efrm - frm > 1) {
+ IEEE80211_VERIFY_LENGTH(efrm - frm, frm[1] + 2, return);
+ switch (*frm) {
+ case IEEE80211_ELEMID_SSID:
+ ssid = frm;
+ break;
+ case IEEE80211_ELEMID_RATES:
+ rates = frm;
+ break;
+ case IEEE80211_ELEMID_XRATES:
+ xrates = frm;
+ break;
+ case IEEE80211_ELEMID_MESHID:
+ meshid = frm;
+ break;
+ }
+ frm += frm[2] + 2;
+ }
+ IEEE80211_VERIFY_ELEMENT(ssid, IEEE80211_NWID_LEN, return);
+ IEEE80211_VERIFY_ELEMENT(rates, IEEE80211_RATE_MAXSIZE, return);
+ if (xrates != NULL)
+ IEEE80211_VERIFY_ELEMENT(xrates,
+ IEEE80211_RATE_MAXSIZE - rates[1], return);
+ if (meshid != NULL)
+ IEEE80211_VERIFY_ELEMENT(meshid,
+ IEEE80211_MESHID_LEN, return);
+ /* NB: meshid, not ssid */
+ IEEE80211_VERIFY_SSID(vap->iv_bss, meshid, return);
+
+ /* XXX find a better class or define it's own */
+ IEEE80211_NOTE_MAC(vap, IEEE80211_MSG_INPUT, wh->i_addr2,
+ "%s", "recv probe req");
+ /*
+ * Some legacy 11b clients cannot hack a complete
+ * probe response frame. When the request includes
+ * only a bare-bones rate set, communicate this to
+ * the transmit side.
+ */
+ ieee80211_send_proberesp(vap, wh->i_addr2, 0);
+ break;
+ }
+ case IEEE80211_FC0_SUBTYPE_ACTION:
+ if (vap->iv_state != IEEE80211_S_RUN) {
+ vap->iv_stats.is_rx_mgtdiscard++;
+ break;
+ }
+ /*
+ * We received an action for an unknown neighbor.
+ * XXX: wait for it to beacon or create ieee80211_node?
+ */
+ if (ni == vap->iv_bss) {
+ IEEE80211_DISCARD(vap, IEEE80211_MSG_MESH,
+ wh, NULL, "%s", "unknown node");
+ vap->iv_stats.is_rx_mgtdiscard++;
+ break;
+ }
+ /*
+ * Discard if not for us.
+ */
+ if (!IEEE80211_ADDR_EQ(vap->iv_myaddr, wh->i_addr1) &&
+ !IEEE80211_IS_MULTICAST(wh->i_addr1)) {
+ IEEE80211_DISCARD(vap, IEEE80211_MSG_MESH,
+ wh, NULL, "%s", "not for me");
+ vap->iv_stats.is_rx_mgtdiscard++;
+ break;
+ }
+ /* XXX parse_action is a bit useless now */
+ if (ieee80211_parse_action(ni, m0) == 0)
+ ic->ic_recv_action(ni, wh, frm, efrm);
+ break;
+ case IEEE80211_FC0_SUBTYPE_AUTH:
+ case IEEE80211_FC0_SUBTYPE_ASSOC_REQ:
+ case IEEE80211_FC0_SUBTYPE_REASSOC_REQ:
+ case IEEE80211_FC0_SUBTYPE_ASSOC_RESP:
+ case IEEE80211_FC0_SUBTYPE_REASSOC_RESP:
+ case IEEE80211_FC0_SUBTYPE_DEAUTH:
+ case IEEE80211_FC0_SUBTYPE_DISASSOC:
+ IEEE80211_DISCARD(vap, IEEE80211_MSG_INPUT,
+ wh, NULL, "%s", "not handled");
+ vap->iv_stats.is_rx_mgtdiscard++;
+ return;
+ default:
+ IEEE80211_DISCARD(vap, IEEE80211_MSG_ANY,
+ wh, "mgt", "subtype 0x%x not handled", subtype);
+ vap->iv_stats.is_rx_badsubtype++;
+ break;
+ }
+}
+
+/*
+ * Parse meshpeering action ie's for open+confirm frames; the
+ * important bits are returned in the supplied structure.
+ */
+static const struct ieee80211_meshpeer_ie *
+mesh_parse_meshpeering_action(struct ieee80211_node *ni,
+ const struct ieee80211_frame *wh, /* XXX for VERIFY_LENGTH */
+ const uint8_t *frm, const uint8_t *efrm,
+ struct ieee80211_meshpeer_ie *mp, uint8_t subtype)
+{
+ struct ieee80211vap *vap = ni->ni_vap;
+ const struct ieee80211_meshpeer_ie *mpie;
+ const uint8_t *meshid, *meshconf, *meshpeer;
+
+ meshid = meshconf = meshpeer = NULL;
+ while (efrm - frm > 1) {
+ IEEE80211_VERIFY_LENGTH(efrm - frm, frm[1] + 2, return NULL);
+ switch (*frm) {
+ case IEEE80211_ELEMID_MESHID:
+ meshid = frm;
+ break;
+ case IEEE80211_ELEMID_MESHCONF:
+ meshconf = frm;
+ break;
+ case IEEE80211_ELEMID_MESHPEER:
+ meshpeer = frm;
+ mpie = (const struct ieee80211_meshpeer_ie *) frm;
+ memset(mp, 0, sizeof(*mp));
+ mp->peer_llinkid = LE_READ_2(&mpie->peer_llinkid);
+ /* NB: peer link ID is optional on these frames */
+ if (subtype == IEEE80211_MESH_PEER_LINK_CLOSE &&
+ mpie->peer_len == 8) {
+ mp->peer_linkid = 0;
+ mp->peer_rcode = LE_READ_2(&mpie->peer_linkid);
+ } else {
+ mp->peer_linkid = LE_READ_2(&mpie->peer_linkid);
+ mp->peer_rcode = LE_READ_2(&mpie->peer_rcode);
+ }
+ break;
+ }
+ frm += frm[1] + 2;
+ }
+
+ /*
+ * Verify the contents of the frame. Action frames with
+ * close subtype don't have a Mesh Configuration IE.
+ * If if fails validation, close the peer link.
+ */
+ KASSERT(meshpeer != NULL &&
+ subtype != IEEE80211_ACTION_MESHPEERING_CLOSE,
+ ("parsing close action"));
+
+ if (mesh_verify_meshid(vap, meshid) ||
+ mesh_verify_meshpeer(vap, subtype, meshpeer) ||
+ mesh_verify_meshconf(vap, meshconf)) {
+ uint16_t args[3];
+
+ IEEE80211_DISCARD(vap,
+ IEEE80211_MSG_ACTION | IEEE80211_MSG_MESH,
+ wh, NULL, "%s", "not for our mesh");
+ vap->iv_stats.is_rx_mgtdiscard++;
+ switch (ni->ni_mlstate) {
+ case IEEE80211_NODE_MESH_IDLE:
+ case IEEE80211_NODE_MESH_ESTABLISHED:
+ case IEEE80211_NODE_MESH_HOLDING:
+ /* ignore */
+ break;
+ case IEEE80211_NODE_MESH_OPENSNT:
+ case IEEE80211_NODE_MESH_OPENRCV:
+ case IEEE80211_NODE_MESH_CONFIRMRCV:
+ args[0] = ni->ni_mlpid;
+ args[1] = ni->ni_mllid;
+ args[2] = IEEE80211_REASON_PEER_LINK_CANCELED;
+ ieee80211_send_action(ni,
+ IEEE80211_ACTION_CAT_MESHPEERING,
+ IEEE80211_ACTION_MESHPEERING_CLOSE,
+ args);
+ mesh_linkchange(ni, IEEE80211_NODE_MESH_HOLDING);
+ mesh_peer_timeout_setup(ni);
+ break;
+ }
+ return NULL;
+ }
+ return (const struct ieee80211_meshpeer_ie *) mp;
+}
+
+static int
+mesh_recv_action_meshpeering_open(struct ieee80211_node *ni,
+ const struct ieee80211_frame *wh,
+ const uint8_t *frm, const uint8_t *efrm)
+{
+ struct ieee80211vap *vap = ni->ni_vap;
+ struct ieee80211_meshpeer_ie ie;
+ const struct ieee80211_meshpeer_ie *meshpeer;
+ uint16_t args[3];
+
+ /* +2+2 for action + code + capabilites */
+ meshpeer = mesh_parse_meshpeering_action(ni, wh, frm+2+2, efrm, &ie,
+ IEEE80211_ACTION_MESHPEERING_OPEN);
+ if (meshpeer == NULL) {
+ return 0;
+ }
+
+ /* XXX move up */
+ IEEE80211_NOTE(vap, IEEE80211_MSG_ACTION | IEEE80211_MSG_MESH, ni,
+ "recv PEER OPEN, lid 0x%x", meshpeer->peer_llinkid);
+
+ switch (ni->ni_mlstate) {
+ case IEEE80211_NODE_MESH_IDLE:
+ mesh_linkchange(ni, IEEE80211_NODE_MESH_OPENRCV);
+ ni->ni_mllid = meshpeer->peer_llinkid;
+ ni->ni_mlpid = mesh_generateid(vap);
+ if (ni->ni_mlpid == 0)
+ return 0; /* XXX */
+ args[0] = ni->ni_mlpid;
+ /* Announce we're open too... */
+ ieee80211_send_action(ni,
+ IEEE80211_ACTION_CAT_MESHPEERING,
+ IEEE80211_ACTION_MESHPEERING_OPEN, args);
+ /* ...and confirm the link. */
+ args[0] = ni->ni_mlpid;
+ args[1] = ni->ni_mllid;
+ ieee80211_send_action(ni,
+ IEEE80211_ACTION_CAT_MESHPEERING,
+ IEEE80211_ACTION_MESHPEERING_CONFIRM,
+ args);
+ mesh_peer_timeout_setup(ni);
+ break;
+ case IEEE80211_NODE_MESH_OPENRCV:
+ /* Wrong Link ID */
+ if (ni->ni_mllid != meshpeer->peer_llinkid) {
+ args[0] = ni->ni_mllid;
+ args[1] = ni->ni_mlpid;
+ args[2] = IEEE80211_REASON_PEER_LINK_CANCELED;
+ ieee80211_send_action(ni,
+ IEEE80211_ACTION_CAT_MESHPEERING,
+ IEEE80211_ACTION_MESHPEERING_CLOSE,
+ args);
+ mesh_linkchange(ni, IEEE80211_NODE_MESH_HOLDING);
+ mesh_peer_timeout_setup(ni);
+ break;
+ }
+ /* Duplicate open, confirm again. */
+ args[0] = ni->ni_mlpid;
+ args[1] = ni->ni_mllid;
+ ieee80211_send_action(ni,
+ IEEE80211_ACTION_CAT_MESHPEERING,
+ IEEE80211_ACTION_MESHPEERING_CONFIRM,
+ args);
+ break;
+ case IEEE80211_NODE_MESH_OPENSNT:
+ ni->ni_mllid = meshpeer->peer_llinkid;
+ mesh_linkchange(ni, IEEE80211_NODE_MESH_OPENRCV);
+ args[0] = ni->ni_mlpid;
+ args[1] = ni->ni_mllid;
+ ieee80211_send_action(ni,
+ IEEE80211_ACTION_CAT_MESHPEERING,
+ IEEE80211_ACTION_MESHPEERING_CONFIRM,
+ args);
+ /* NB: don't setup/clear any timeout */
+ break;
+ case IEEE80211_NODE_MESH_CONFIRMRCV:
+ if (ni->ni_mlpid != meshpeer->peer_linkid ||
+ ni->ni_mllid != meshpeer->peer_llinkid) {
+ args[0] = ni->ni_mlpid;
+ args[1] = ni->ni_mllid;
+ args[2] = IEEE80211_REASON_PEER_LINK_CANCELED;
+ ieee80211_send_action(ni,
+ IEEE80211_ACTION_CAT_MESHPEERING,
+ IEEE80211_ACTION_MESHPEERING_CLOSE,
+ args);
+ mesh_linkchange(ni,
+ IEEE80211_NODE_MESH_HOLDING);
+ mesh_peer_timeout_setup(ni);
+ break;
+ }
+ mesh_linkchange(ni, IEEE80211_NODE_MESH_ESTABLISHED);
+ ni->ni_mllid = meshpeer->peer_llinkid;
+ args[0] = ni->ni_mlpid;
+ args[1] = ni->ni_mllid;
+ ieee80211_send_action(ni,
+ IEEE80211_ACTION_CAT_MESHPEERING,
+ IEEE80211_ACTION_MESHPEERING_CONFIRM,
+ args);
+ mesh_peer_timeout_stop(ni);
+ break;
+ case IEEE80211_NODE_MESH_ESTABLISHED:
+ if (ni->ni_mllid != meshpeer->peer_llinkid) {
+ args[0] = ni->ni_mllid;
+ args[1] = ni->ni_mlpid;
+ args[2] = IEEE80211_REASON_PEER_LINK_CANCELED;
+ ieee80211_send_action(ni,
+ IEEE80211_ACTION_CAT_MESHPEERING,
+ IEEE80211_ACTION_MESHPEERING_CLOSE,
+ args);
+ mesh_linkchange(ni, IEEE80211_NODE_MESH_HOLDING);
+ mesh_peer_timeout_setup(ni);
+ break;
+ }
+ args[0] = ni->ni_mlpid;
+ args[1] = ni->ni_mllid;
+ ieee80211_send_action(ni,
+ IEEE80211_ACTION_CAT_MESHPEERING,
+ IEEE80211_ACTION_MESHPEERING_CONFIRM,
+ args);
+ break;
+ case IEEE80211_NODE_MESH_HOLDING:
+ args[0] = ni->ni_mlpid;
+ args[1] = meshpeer->peer_llinkid;
+ args[2] = IEEE80211_REASON_MESH_MAX_RETRIES;
+ ieee80211_send_action(ni,
+ IEEE80211_ACTION_CAT_MESHPEERING,
+ IEEE80211_ACTION_MESHPEERING_CLOSE,
+ args);
+ break;
+ }
+ return 0;
+}
+
+static int
+mesh_recv_action_meshpeering_confirm(struct ieee80211_node *ni,
+ const struct ieee80211_frame *wh,
+ const uint8_t *frm, const uint8_t *efrm)
+{
+ struct ieee80211vap *vap = ni->ni_vap;
+ struct ieee80211_meshpeer_ie ie;
+ const struct ieee80211_meshpeer_ie *meshpeer;
+ uint16_t args[3];
+
+ /* +2+2+2+2 for action + code + capabilites + status code + AID */
+ meshpeer = mesh_parse_meshpeering_action(ni, wh, frm+2+2+2+2, efrm, &ie,
+ IEEE80211_ACTION_MESHPEERING_CONFIRM);
+ if (meshpeer == NULL) {
+ return 0;
+ }
+
+ IEEE80211_NOTE(vap, IEEE80211_MSG_ACTION | IEEE80211_MSG_MESH, ni,
+ "recv PEER CONFIRM, local id 0x%x, peer id 0x%x",
+ meshpeer->peer_llinkid, meshpeer->peer_linkid);
+
+ switch (ni->ni_mlstate) {
+ case IEEE80211_NODE_MESH_OPENRCV:
+ mesh_linkchange(ni, IEEE80211_NODE_MESH_ESTABLISHED);
+ mesh_peer_timeout_stop(ni);
+ break;
+ case IEEE80211_NODE_MESH_OPENSNT:
+ mesh_linkchange(ni, IEEE80211_NODE_MESH_CONFIRMRCV);
+ break;
+ case IEEE80211_NODE_MESH_HOLDING:
+ args[0] = ni->ni_mlpid;
+ args[1] = meshpeer->peer_llinkid;
+ args[2] = IEEE80211_REASON_MESH_MAX_RETRIES;
+ ieee80211_send_action(ni,
+ IEEE80211_ACTION_CAT_MESHPEERING,
+ IEEE80211_ACTION_MESHPEERING_CLOSE,
+ args);
+ break;
+ case IEEE80211_NODE_MESH_CONFIRMRCV:
+ if (ni->ni_mllid != meshpeer->peer_llinkid) {
+ args[0] = ni->ni_mlpid;
+ args[1] = ni->ni_mllid;
+ args[2] = IEEE80211_REASON_PEER_LINK_CANCELED;
+ ieee80211_send_action(ni,
+ IEEE80211_ACTION_CAT_MESHPEERING,
+ IEEE80211_ACTION_MESHPEERING_CLOSE,
+ args);
+ mesh_linkchange(ni, IEEE80211_NODE_MESH_HOLDING);
+ mesh_peer_timeout_setup(ni);
+ }
+ break;
+ default:
+ IEEE80211_DISCARD(vap,
+ IEEE80211_MSG_ACTION | IEEE80211_MSG_MESH,
+ wh, NULL, "received confirm in invalid state %d",
+ ni->ni_mlstate);
+ vap->iv_stats.is_rx_mgtdiscard++;
+ break;
+ }
+ return 0;
+}
+
+static int
+mesh_recv_action_meshpeering_close(struct ieee80211_node *ni,
+ const struct ieee80211_frame *wh,
+ const uint8_t *frm, const uint8_t *efrm)
+{
+ uint16_t args[3];
+
+ IEEE80211_NOTE(ni->ni_vap, IEEE80211_MSG_ACTION | IEEE80211_MSG_MESH,
+ ni, "%s", "recv PEER CLOSE");
+
+ switch (ni->ni_mlstate) {
+ case IEEE80211_NODE_MESH_IDLE:
+ /* ignore */
+ break;
+ case IEEE80211_NODE_MESH_OPENRCV:
+ case IEEE80211_NODE_MESH_OPENSNT:
+ case IEEE80211_NODE_MESH_CONFIRMRCV:
+ case IEEE80211_NODE_MESH_ESTABLISHED:
+ args[0] = ni->ni_mlpid;
+ args[1] = ni->ni_mllid;
+ args[2] = IEEE80211_REASON_MESH_CLOSE_RCVD;
+ ieee80211_send_action(ni,
+ IEEE80211_ACTION_CAT_MESHPEERING,
+ IEEE80211_ACTION_MESHPEERING_CLOSE,
+ args);
+ mesh_linkchange(ni, IEEE80211_NODE_MESH_HOLDING);
+ mesh_peer_timeout_setup(ni);
+ break;
+ case IEEE80211_NODE_MESH_HOLDING:
+ mesh_linkchange(ni, IEEE80211_NODE_MESH_IDLE);
+ mesh_peer_timeout_setup(ni);
+ break;
+ }
+ return 0;
+}
+
+/*
+ * Link Metric handling.
+ */
+static int
+mesh_recv_action_meshlmetric_req(struct ieee80211_node *ni,
+ const struct ieee80211_frame *wh,
+ const uint8_t *frm, const uint8_t *efrm)
+{
+ uint32_t metric;
+
+ metric = mesh_airtime_calc(ni);
+ ieee80211_send_action(ni,
+ IEEE80211_ACTION_CAT_MESHLMETRIC,
+ IEEE80211_ACTION_MESHLMETRIC_REP,
+ &metric);
+ return 0;
+}
+
+static int
+mesh_recv_action_meshlmetric_rep(struct ieee80211_node *ni,
+ const struct ieee80211_frame *wh,
+ const uint8_t *frm, const uint8_t *efrm)
+{
+ return 0;
+}
+
+static int
+mesh_send_action(struct ieee80211_node *ni, struct mbuf *m)
+{
+ struct ieee80211_bpf_params params;
+
+ memset(&params, 0, sizeof(params));
+ params.ibp_pri = WME_AC_VO;
+ params.ibp_rate0 = ni->ni_txparms->mgmtrate;
+ /* XXX ucast/mcast */
+ params.ibp_try0 = ni->ni_txparms->maxretry;
+ params.ibp_power = ni->ni_txpower;
+ return ieee80211_mgmt_output(ni, m, IEEE80211_FC0_SUBTYPE_ACTION,
+ &params);
+}
+
+#define ADDSHORT(frm, v) do { \
+ frm[0] = (v) & 0xff; \
+ frm[1] = (v) >> 8; \
+ frm += 2; \
+} while (0)
+#define ADDWORD(frm, v) do { \
+ frm[0] = (v) & 0xff; \
+ frm[1] = ((v) >> 8) & 0xff; \
+ frm[2] = ((v) >> 16) & 0xff; \
+ frm[3] = ((v) >> 24) & 0xff; \
+ frm += 4; \
+} while (0)
+
+static int
+mesh_send_action_meshpeering_open(struct ieee80211_node *ni,
+ int category, int action, void *args0)
+{
+ struct ieee80211vap *vap = ni->ni_vap;
+ struct ieee80211com *ic = ni->ni_ic;
+ uint16_t *args = args0;
+ const struct ieee80211_rateset *rs;
+ struct mbuf *m;
+ uint8_t *frm;
+
+ IEEE80211_NOTE(vap, IEEE80211_MSG_ACTION | IEEE80211_MSG_MESH, ni,
+ "send PEER OPEN action: localid 0x%x", args[0]);
+
+ IEEE80211_DPRINTF(vap, IEEE80211_MSG_NODE,
+ "ieee80211_ref_node (%s:%u) %p<%s> refcnt %d\n", __func__, __LINE__,
+ ni, ether_sprintf(ni->ni_macaddr), ieee80211_node_refcnt(ni)+1);
+ ieee80211_ref_node(ni);
+
+ m = ieee80211_getmgtframe(&frm,
+ ic->ic_headroom + sizeof(struct ieee80211_frame),
+ sizeof(uint16_t) /* action+category */
+ + sizeof(uint16_t) /* capabilites */
+ + 2 + IEEE80211_RATE_SIZE
+ + 2 + (IEEE80211_RATE_MAXSIZE - IEEE80211_RATE_SIZE)
+ + 2 + IEEE80211_MESHID_LEN
+ + sizeof(struct ieee80211_meshconf_ie)
+ + sizeof(struct ieee80211_meshpeer_ie)
+ );
+ if (m != NULL) {
+ /*
+ * mesh peer open action frame format:
+ * [1] category
+ * [1] action
+ * [2] capabilities
+ * [tlv] rates
+ * [tlv] xrates
+ * [tlv] mesh id
+ * [tlv] mesh conf
+ * [tlv] mesh peer link mgmt
+ */
+ *frm++ = category;
+ *frm++ = action;
+ ADDSHORT(frm, ieee80211_getcapinfo(vap, ni->ni_chan));
+ rs = ieee80211_get_suprates(ic, ic->ic_curchan);
+ frm = ieee80211_add_rates(frm, rs);
+ frm = ieee80211_add_xrates(frm, rs);
+ frm = ieee80211_add_meshid(frm, vap);
+ frm = ieee80211_add_meshconf(frm, vap);
+ frm = ieee80211_add_meshpeer(frm, IEEE80211_MESH_PEER_LINK_OPEN,
+ args[0], 0, 0);
+ m->m_pkthdr.len = m->m_len = frm - mtod(m, uint8_t *);
+ return mesh_send_action(ni, m);
+ } else {
+ vap->iv_stats.is_tx_nobuf++;
+ ieee80211_free_node(ni);
+ return ENOMEM;
+ }
+}
+
+static int
+mesh_send_action_meshpeering_confirm(struct ieee80211_node *ni,
+ int category, int action, void *args0)
+{
+ struct ieee80211vap *vap = ni->ni_vap;
+ struct ieee80211com *ic = ni->ni_ic;
+ uint16_t *args = args0;
+ const struct ieee80211_rateset *rs;
+ struct mbuf *m;
+ uint8_t *frm;
+
+ IEEE80211_NOTE(vap, IEEE80211_MSG_ACTION | IEEE80211_MSG_MESH, ni,
+ "send PEER CONFIRM action: localid 0x%x, peerid 0x%x",
+ args[0], args[1]);
+
+ IEEE80211_DPRINTF(vap, IEEE80211_MSG_NODE,
+ "ieee80211_ref_node (%s:%u) %p<%s> refcnt %d\n", __func__, __LINE__,
+ ni, ether_sprintf(ni->ni_macaddr), ieee80211_node_refcnt(ni)+1);
+ ieee80211_ref_node(ni);
+
+ m = ieee80211_getmgtframe(&frm,
+ ic->ic_headroom + sizeof(struct ieee80211_frame),
+ sizeof(uint16_t) /* action+category */
+ + sizeof(uint16_t) /* capabilites */
+ + sizeof(uint16_t) /* status code */
+ + sizeof(uint16_t) /* AID */
+ + 2 + IEEE80211_RATE_SIZE
+ + 2 + (IEEE80211_RATE_MAXSIZE - IEEE80211_RATE_SIZE)
+ + 2 + IEEE80211_MESHID_LEN
+ + sizeof(struct ieee80211_meshconf_ie)
+ + sizeof(struct ieee80211_meshpeer_ie)
+ );
+ if (m != NULL) {
+ /*
+ * mesh peer confirm action frame format:
+ * [1] category
+ * [1] action
+ * [2] capabilities
+ * [2] status code
+ * [2] association id (peer ID)
+ * [tlv] rates
+ * [tlv] xrates
+ * [tlv] mesh id
+ * [tlv] mesh conf
+ * [tlv] mesh peer link mgmt
+ */
+ *frm++ = category;
+ *frm++ = action;
+ ADDSHORT(frm, ieee80211_getcapinfo(vap, ni->ni_chan));
+ ADDSHORT(frm, 0); /* status code */
+ ADDSHORT(frm, args[1]); /* AID */
+ rs = ieee80211_get_suprates(ic, ic->ic_curchan);
+ frm = ieee80211_add_rates(frm, rs);
+ frm = ieee80211_add_xrates(frm, rs);
+ frm = ieee80211_add_meshid(frm, vap);
+ frm = ieee80211_add_meshconf(frm, vap);
+ frm = ieee80211_add_meshpeer(frm,
+ IEEE80211_MESH_PEER_LINK_CONFIRM,
+ args[0], args[1], 0);
+ m->m_pkthdr.len = m->m_len = frm - mtod(m, uint8_t *);
+ return mesh_send_action(ni, m);
+ } else {
+ vap->iv_stats.is_tx_nobuf++;
+ ieee80211_free_node(ni);
+ return ENOMEM;
+ }
+}
+
+static int
+mesh_send_action_meshpeering_close(struct ieee80211_node *ni,
+ int category, int action, void *args0)
+{
+ struct ieee80211vap *vap = ni->ni_vap;
+ struct ieee80211com *ic = ni->ni_ic;
+ uint16_t *args = args0;
+ struct mbuf *m;
+ uint8_t *frm;
+
+ IEEE80211_NOTE(vap, IEEE80211_MSG_ACTION | IEEE80211_MSG_MESH, ni,
+ "send PEER CLOSE action: localid 0x%x, peerid 0x%x reason %d",
+ args[0], args[1], args[2]);
+
+ IEEE80211_DPRINTF(vap, IEEE80211_MSG_NODE,
+ "ieee80211_ref_node (%s:%u) %p<%s> refcnt %d\n", __func__, __LINE__,
+ ni, ether_sprintf(ni->ni_macaddr), ieee80211_node_refcnt(ni)+1);
+ ieee80211_ref_node(ni);
+
+ m = ieee80211_getmgtframe(&frm,
+ ic->ic_headroom + sizeof(struct ieee80211_frame),
+ sizeof(uint16_t) /* action+category */
+ + sizeof(uint16_t) /* reason code */
+ + 2 + IEEE80211_MESHID_LEN
+ + sizeof(struct ieee80211_meshpeer_ie)
+ );
+ if (m != NULL) {
+ /*
+ * mesh peer close action frame format:
+ * [1] category
+ * [1] action
+ * [2] reason code
+ * [tlv] mesh id
+ * [tlv] mesh peer link mgmt
+ */
+ *frm++ = category;
+ *frm++ = action;
+ ADDSHORT(frm, args[2]); /* reason code */
+ frm = ieee80211_add_meshid(frm, vap);
+ frm = ieee80211_add_meshpeer(frm,
+ IEEE80211_MESH_PEER_LINK_CLOSE,
+ args[0], args[1], args[2]);
+ m->m_pkthdr.len = m->m_len = frm - mtod(m, uint8_t *);
+ return mesh_send_action(ni, m);
+ } else {
+ vap->iv_stats.is_tx_nobuf++;
+ ieee80211_free_node(ni);
+ return ENOMEM;
+ }
+}
+
+static int
+mesh_send_action_meshlink_request(struct ieee80211_node *ni,
+ int category, int action, void *arg0)
+{
+ struct ieee80211vap *vap = ni->ni_vap;
+ struct ieee80211com *ic = ni->ni_ic;
+ struct mbuf *m;
+ uint8_t *frm;
+
+ IEEE80211_NOTE(vap, IEEE80211_MSG_ACTION | IEEE80211_MSG_MESH, ni,
+ "%s", "send LINK METRIC REQUEST action");
+
+ IEEE80211_DPRINTF(vap, IEEE80211_MSG_NODE,
+ "ieee80211_ref_node (%s:%u) %p<%s> refcnt %d\n", __func__, __LINE__,
+ ni, ether_sprintf(ni->ni_macaddr), ieee80211_node_refcnt(ni)+1);
+ ieee80211_ref_node(ni);
+
+ m = ieee80211_getmgtframe(&frm,
+ ic->ic_headroom + sizeof(struct ieee80211_frame),
+ sizeof(uint16_t) /* action+category */
+ );
+ if (m != NULL) {
+ /*
+ * mesh link metric request
+ * [1] category
+ * [1] action
+ */
+ *frm++ = category;
+ *frm++ = action;
+ m->m_pkthdr.len = m->m_len = frm - mtod(m, uint8_t *);
+ return mesh_send_action(ni, m);
+ } else {
+ vap->iv_stats.is_tx_nobuf++;
+ ieee80211_free_node(ni);
+ return ENOMEM;
+ }
+}
+
+static int
+mesh_send_action_meshlink_reply(struct ieee80211_node *ni,
+ int category, int action, void *args0)
+{
+ struct ieee80211vap *vap = ni->ni_vap;
+ struct ieee80211com *ic = ni->ni_ic;
+ uint32_t *metric = args0;
+ struct mbuf *m;
+ uint8_t *frm;
+
+ IEEE80211_NOTE(vap, IEEE80211_MSG_ACTION | IEEE80211_MSG_MESH, ni,
+ "send LINK METRIC REPLY action: metric 0x%x", *metric);
+
+ IEEE80211_DPRINTF(vap, IEEE80211_MSG_NODE,
+ "ieee80211_ref_node (%s:%u) %p<%s> refcnt %d\n", __func__, __LINE__,
+ ni, ether_sprintf(ni->ni_macaddr), ieee80211_node_refcnt(ni)+1);
+ ieee80211_ref_node(ni);
+
+ m = ieee80211_getmgtframe(&frm,
+ ic->ic_headroom + sizeof(struct ieee80211_frame),
+ sizeof(uint16_t) /* action+category */
+ + sizeof(struct ieee80211_meshlmetric_ie)
+ );
+ if (m != NULL) {
+ /*
+ * mesh link metric reply
+ * [1] category
+ * [1] action
+ * [tlv] mesh link metric
+ */
+ *frm++ = category;
+ *frm++ = action;
+ frm = ieee80211_add_meshlmetric(frm, *metric);
+ m->m_pkthdr.len = m->m_len = frm - mtod(m, uint8_t *);
+ return mesh_send_action(ni, m);
+ } else {
+ vap->iv_stats.is_tx_nobuf++;
+ ieee80211_free_node(ni);
+ return ENOMEM;
+ }
+}
+
+static void
+mesh_peer_timeout_setup(struct ieee80211_node *ni)
+{
+ switch (ni->ni_mlstate) {
+ case IEEE80211_NODE_MESH_HOLDING:
+ ni->ni_mltval = ieee80211_mesh_holdingtimeout;
+ break;
+ case IEEE80211_NODE_MESH_CONFIRMRCV:
+ ni->ni_mltval = ieee80211_mesh_confirmtimeout;
+ break;
+ case IEEE80211_NODE_MESH_IDLE:
+ ni->ni_mltval = 0;
+ break;
+ default:
+ ni->ni_mltval = ieee80211_mesh_retrytimeout;
+ break;
+ }
+ if (ni->ni_mltval)
+ callout_reset(&ni->ni_mltimer, ni->ni_mltval,
+ mesh_peer_timeout_cb, ni);
+}
+
+/*
+ * Same as above but backoffs timer statisically 50%.
+ */
+static void
+mesh_peer_timeout_backoff(struct ieee80211_node *ni)
+{
+ uint32_t r;
+
+ r = arc4random();
+ ni->ni_mltval += r % ni->ni_mltval;
+ callout_reset(&ni->ni_mltimer, ni->ni_mltval, mesh_peer_timeout_cb,
+ ni);
+}
+
+static __inline void
+mesh_peer_timeout_stop(struct ieee80211_node *ni)
+{
+ callout_drain(&ni->ni_mltimer);
+}
+
+/*
+ * Mesh Peer Link Management FSM timeout handling.
+ */
+static void
+mesh_peer_timeout_cb(void *arg)
+{
+ struct ieee80211_node *ni = (struct ieee80211_node *)arg;
+ uint16_t args[3];
+
+ IEEE80211_NOTE(ni->ni_vap, IEEE80211_MSG_MESH,
+ ni, "mesh link timeout, state %d, retry counter %d",
+ ni->ni_mlstate, ni->ni_mlrcnt);
+
+ switch (ni->ni_mlstate) {
+ case IEEE80211_NODE_MESH_IDLE:
+ case IEEE80211_NODE_MESH_ESTABLISHED:
+ break;
+ case IEEE80211_NODE_MESH_OPENSNT:
+ case IEEE80211_NODE_MESH_OPENRCV:
+ if (ni->ni_mlrcnt == ieee80211_mesh_maxretries) {
+ args[0] = ni->ni_mlpid;
+ args[2] = IEEE80211_REASON_MESH_MAX_RETRIES;
+ ieee80211_send_action(ni,
+ IEEE80211_ACTION_CAT_MESHPEERING,
+ IEEE80211_ACTION_MESHPEERING_CLOSE, args);
+ ni->ni_mlrcnt = 0;
+ mesh_linkchange(ni, IEEE80211_NODE_MESH_HOLDING);
+ mesh_peer_timeout_setup(ni);
+ } else {
+ args[0] = ni->ni_mlpid;
+ ieee80211_send_action(ni,
+ IEEE80211_ACTION_CAT_MESHPEERING,
+ IEEE80211_ACTION_MESHPEERING_OPEN, args);
+ ni->ni_mlrcnt++;
+ mesh_peer_timeout_backoff(ni);
+ }
+ break;
+ case IEEE80211_NODE_MESH_CONFIRMRCV:
+ if (ni->ni_mlrcnt == ieee80211_mesh_maxretries) {
+ args[0] = ni->ni_mlpid;
+ args[2] = IEEE80211_REASON_MESH_CONFIRM_TIMEOUT;
+ ieee80211_send_action(ni,
+ IEEE80211_ACTION_CAT_MESHPEERING,
+ IEEE80211_ACTION_MESHPEERING_CLOSE, args);
+ ni->ni_mlrcnt = 0;
+ mesh_linkchange(ni, IEEE80211_NODE_MESH_HOLDING);
+ mesh_peer_timeout_setup(ni);
+ } else {
+ ni->ni_mlrcnt++;
+ mesh_peer_timeout_setup(ni);
+ }
+ break;
+ case IEEE80211_NODE_MESH_HOLDING:
+ mesh_linkchange(ni, IEEE80211_NODE_MESH_IDLE);
+ break;
+ }
+}
+
+static int
+mesh_verify_meshid(struct ieee80211vap *vap, const uint8_t *ie)
+{
+ struct ieee80211_mesh_state *ms = vap->iv_mesh;
+
+ if (ie == NULL || ie[1] != ms->ms_idlen)
+ return 1;
+ return memcmp(ms->ms_id, ie + 2, ms->ms_idlen);
+}
+
+/*
+ * Check if we are using the same algorithms for this mesh.
+ */
+static int
+mesh_verify_meshconf(struct ieee80211vap *vap, const uint8_t *ie)
+{
+ const struct ieee80211_meshconf_ie *meshconf =
+ (const struct ieee80211_meshconf_ie *) ie;
+ const struct ieee80211_mesh_state *ms = vap->iv_mesh;
+
+ if (meshconf == NULL)
+ return 1;
+ if (meshconf->conf_pselid != ms->ms_ppath->mpp_ie) {
+ IEEE80211_DPRINTF(vap, IEEE80211_MSG_MESH,
+ "unknown path selection algorithm: 0x%x\n",
+ meshconf->conf_pselid);
+ return 1;
+ }
+ if (meshconf->conf_pmetid != ms->ms_pmetric->mpm_ie) {
+ IEEE80211_DPRINTF(vap, IEEE80211_MSG_MESH,
+ "unknown path metric algorithm: 0x%x\n",
+ meshconf->conf_pmetid);
+ return 1;
+ }
+ if (meshconf->conf_ccid != 0) {
+ IEEE80211_DPRINTF(vap, IEEE80211_MSG_MESH,
+ "unknown congestion control algorithm: 0x%x\n",
+ meshconf->conf_ccid);
+ return 1;
+ }
+ if (meshconf->conf_syncid != IEEE80211_MESHCONF_SYNC_NEIGHOFF) {
+ IEEE80211_DPRINTF(vap, IEEE80211_MSG_MESH,
+ "unknown sync algorithm: 0x%x\n",
+ meshconf->conf_syncid);
+ return 1;
+ }
+ if (meshconf->conf_authid != 0) {
+ IEEE80211_DPRINTF(vap, IEEE80211_MSG_MESH,
+ "unknown auth auth algorithm: 0x%x\n",
+ meshconf->conf_pselid);
+ return 1;
+ }
+ /* Not accepting peers */
+ if (!(meshconf->conf_cap & IEEE80211_MESHCONF_CAP_AP)) {
+ IEEE80211_DPRINTF(vap, IEEE80211_MSG_MESH,
+ "not accepting peers: 0x%x\n", meshconf->conf_cap);
+ return 1;
+ }
+ return 0;
+}
+
+static int
+mesh_verify_meshpeer(struct ieee80211vap *vap, uint8_t subtype,
+ const uint8_t *ie)
+{
+ const struct ieee80211_meshpeer_ie *meshpeer =
+ (const struct ieee80211_meshpeer_ie *) ie;
+
+ if (meshpeer == NULL || meshpeer->peer_len < 6 ||
+ meshpeer->peer_len > 10)
+ return 1;
+ switch (subtype) {
+ case IEEE80211_MESH_PEER_LINK_OPEN:
+ if (meshpeer->peer_len != 6)
+ return 1;
+ break;
+ case IEEE80211_MESH_PEER_LINK_CONFIRM:
+ if (meshpeer->peer_len != 8)
+ return 1;
+ break;
+ case IEEE80211_MESH_PEER_LINK_CLOSE:
+ if (meshpeer->peer_len < 8)
+ return 1;
+ if (meshpeer->peer_len == 8 && meshpeer->peer_linkid != 0)
+ return 1;
+ if (meshpeer->peer_rcode == 0)
+ return 1;
+ break;
+ }
+ return 0;
+}
+
+/*
+ * Add a Mesh ID IE to a frame.
+ */
+uint8_t *
+ieee80211_add_meshid(uint8_t *frm, struct ieee80211vap *vap)
+{
+ struct ieee80211_mesh_state *ms = vap->iv_mesh;
+
+ KASSERT(vap->iv_opmode == IEEE80211_M_MBSS, ("not a mbss vap"));
+
+ *frm++ = IEEE80211_ELEMID_MESHID;
+ *frm++ = ms->ms_idlen;
+ memcpy(frm, ms->ms_id, ms->ms_idlen);
+ return frm + ms->ms_idlen;
+}
+
+/*
+ * Add a Mesh Configuration IE to a frame.
+ * For now just use HWMP routing, Airtime link metric, Null Congestion
+ * Signaling, Null Sync Protocol and Null Authentication.
+ */
+uint8_t *
+ieee80211_add_meshconf(uint8_t *frm, struct ieee80211vap *vap)
+{
+ const struct ieee80211_mesh_state *ms = vap->iv_mesh;
+
+ KASSERT(vap->iv_opmode == IEEE80211_M_MBSS, ("not a MBSS vap"));
+
+ *frm++ = IEEE80211_ELEMID_MESHCONF;
+ *frm++ = sizeof(struct ieee80211_meshconf_ie) - 2;
+ *frm++ = ms->ms_ppath->mpp_ie; /* path selection */
+ *frm++ = ms->ms_pmetric->mpm_ie; /* link metric */
+ *frm++ = IEEE80211_MESHCONF_CC_DISABLED;
+ *frm++ = IEEE80211_MESHCONF_SYNC_NEIGHOFF;
+ *frm++ = IEEE80211_MESHCONF_AUTH_DISABLED;
+ /* NB: set the number of neighbors before the rest */
+ *frm = (ms->ms_neighbors > 15 ? 15 : ms->ms_neighbors) << 1;
+ if (ms->ms_flags & IEEE80211_MESHFLAGS_PORTAL)
+ *frm |= IEEE80211_MESHCONF_FORM_MP;
+ frm += 1;
+ if (ms->ms_flags & IEEE80211_MESHFLAGS_AP)
+ *frm |= IEEE80211_MESHCONF_CAP_AP;
+ if (ms->ms_flags & IEEE80211_MESHFLAGS_FWD)
+ *frm |= IEEE80211_MESHCONF_CAP_FWRD;
+ frm += 1;
+ return frm;
+}
+
+/*
+ * Add a Mesh Peer Management IE to a frame.
+ */
+uint8_t *
+ieee80211_add_meshpeer(uint8_t *frm, uint8_t subtype, uint16_t localid,
+ uint16_t peerid, uint16_t reason)
+{
+ /* XXX change for AH */
+ static const uint8_t meshpeerproto[4] = IEEE80211_MESH_PEER_PROTO;
+
+ KASSERT(localid != 0, ("localid == 0"));
+
+ *frm++ = IEEE80211_ELEMID_MESHPEER;
+ switch (subtype) {
+ case IEEE80211_MESH_PEER_LINK_OPEN:
+ *frm++ = 6; /* length */
+ memcpy(frm, meshpeerproto, 4);
+ frm += 4;
+ ADDSHORT(frm, localid); /* local ID */
+ break;
+ case IEEE80211_MESH_PEER_LINK_CONFIRM:
+ KASSERT(peerid != 0, ("sending peer confirm without peer id"));
+ *frm++ = 8; /* length */
+ memcpy(frm, meshpeerproto, 4);
+ frm += 4;
+ ADDSHORT(frm, localid); /* local ID */
+ ADDSHORT(frm, peerid); /* peer ID */
+ break;
+ case IEEE80211_MESH_PEER_LINK_CLOSE:
+ if (peerid)
+ *frm++ = 10; /* length */
+ else
+ *frm++ = 8; /* length */
+ memcpy(frm, meshpeerproto, 4);
+ frm += 4;
+ ADDSHORT(frm, localid); /* local ID */
+ if (peerid)
+ ADDSHORT(frm, peerid); /* peer ID */
+ ADDSHORT(frm, reason);
+ break;
+ }
+ return frm;
+}
+
+/*
+ * Compute an Airtime Link Metric for the link with this node.
+ *
+ * Based on Draft 3.0 spec (11B.10, p.149).
+ */
+/*
+ * Max 802.11s overhead.
+ */
+#define IEEE80211_MESH_MAXOVERHEAD \
+ (sizeof(struct ieee80211_qosframe_addr4) \
+ + sizeof(struct ieee80211_meshcntl_ae11) \
+ + sizeof(struct llc) \
+ + IEEE80211_ADDR_LEN \
+ + IEEE80211_WEP_IVLEN \
+ + IEEE80211_WEP_KIDLEN \
+ + IEEE80211_WEP_CRCLEN \
+ + IEEE80211_WEP_MICLEN \
+ + IEEE80211_CRC_LEN)
+uint32_t
+mesh_airtime_calc(struct ieee80211_node *ni)
+{
+#define M_BITS 8
+#define S_FACTOR (2 * M_BITS)
+ struct ieee80211com *ic = ni->ni_ic;
+ struct ifnet *ifp = ni->ni_vap->iv_ifp;
+ const static int nbits = 8192 << M_BITS;
+ uint32_t overhead, rate, errrate;
+ uint64_t res;
+
+ /* Time to transmit a frame */
+ rate = ni->ni_txrate;
+ overhead = ieee80211_compute_duration(ic->ic_rt,
+ ifp->if_mtu + IEEE80211_MESH_MAXOVERHEAD, rate, 0) << M_BITS;
+ /* Error rate in percentage */
+ /* XXX assuming small failures are ok */
+ errrate = (((ifp->if_oerrors +
+ ifp->if_ierrors) / 100) << M_BITS) / 100;
+ res = (overhead + (nbits / rate)) *
+ ((1 << S_FACTOR) / ((1 << M_BITS) - errrate));
+
+ return (uint32_t)(res >> S_FACTOR);
+#undef M_BITS
+#undef S_FACTOR
+}
+
+/*
+ * Add a Mesh Link Metric report IE to a frame.
+ */
+uint8_t *
+ieee80211_add_meshlmetric(uint8_t *frm, uint32_t metric)
+{
+ *frm++ = IEEE80211_ELEMID_MESHLINK;
+ *frm++ = 4;
+ ADDWORD(frm, metric);
+ return frm;
+}
+#undef ADDSHORT
+#undef ADDWORD
+
+/*
+ * Initialize any mesh-specific node state.
+ */
+void
+ieee80211_mesh_node_init(struct ieee80211vap *vap, struct ieee80211_node *ni)
+{
+ ni->ni_flags |= IEEE80211_NODE_QOS;
+ callout_init(&ni->ni_mltimer, CALLOUT_MPSAFE);
+}
+
+/*
+ * Cleanup any mesh-specific node state.
+ */
+void
+ieee80211_mesh_node_cleanup(struct ieee80211_node *ni)
+{
+ struct ieee80211vap *vap = ni->ni_vap;
+ struct ieee80211_mesh_state *ms = vap->iv_mesh;
+
+ callout_drain(&ni->ni_mltimer);
+ /* NB: short-circuit callbacks after mesh_vdetach */
+ if (vap->iv_mesh != NULL)
+ ms->ms_ppath->mpp_peerdown(ni);
+}
+
+void
+ieee80211_parse_meshid(struct ieee80211_node *ni, const uint8_t *ie)
+{
+ ni->ni_meshidlen = ie[1];
+ memcpy(ni->ni_meshid, ie + 2, ie[1]);
+}
+
+/*
+ * Setup mesh-specific node state on neighbor discovery.
+ */
+void
+ieee80211_mesh_init_neighbor(struct ieee80211_node *ni,
+ const struct ieee80211_frame *wh,
+ const struct ieee80211_scanparams *sp)
+{
+ ieee80211_parse_meshid(ni, sp->meshid);
+}
+
+void
+ieee80211_mesh_update_beacon(struct ieee80211vap *vap,
+ struct ieee80211_beacon_offsets *bo)
+{
+ KASSERT(vap->iv_opmode == IEEE80211_M_MBSS, ("not a MBSS vap"));
+
+ if (isset(bo->bo_flags, IEEE80211_BEACON_MESHCONF)) {
+ (void)ieee80211_add_meshconf(bo->bo_meshconf, vap);
+ clrbit(bo->bo_flags, IEEE80211_BEACON_MESHCONF);
+ }
+}
+
+static int
+mesh_ioctl_get80211(struct ieee80211vap *vap, struct ieee80211req *ireq)
+{
+ struct ieee80211_mesh_state *ms = vap->iv_mesh;
+ uint8_t tmpmeshid[IEEE80211_NWID_LEN];
+ struct ieee80211_mesh_route *rt;
+ struct ieee80211req_mesh_route *imr;
+ size_t len, off;
+ uint8_t *p;
+ int error;
+
+ if (vap->iv_opmode != IEEE80211_M_MBSS)
+ return ENOSYS;
+
+ error = 0;
+ switch (ireq->i_type) {
+ case IEEE80211_IOC_MESH_ID:
+ ireq->i_len = ms->ms_idlen;
+ memcpy(tmpmeshid, ms->ms_id, ireq->i_len);
+ error = copyout(tmpmeshid, ireq->i_data, ireq->i_len);
+ break;
+ case IEEE80211_IOC_MESH_AP:
+ ireq->i_val = (ms->ms_flags & IEEE80211_MESHFLAGS_AP) != 0;
+ break;
+ case IEEE80211_IOC_MESH_FWRD:
+ ireq->i_val = (ms->ms_flags & IEEE80211_MESHFLAGS_FWD) != 0;
+ break;
+ case IEEE80211_IOC_MESH_TTL:
+ ireq->i_val = ms->ms_ttl;
+ break;
+ case IEEE80211_IOC_MESH_RTCMD:
+ switch (ireq->i_val) {
+ case IEEE80211_MESH_RTCMD_LIST:
+ len = 0;
+ MESH_RT_LOCK(ms);
+ TAILQ_FOREACH(rt, &ms->ms_routes, rt_next) {
+ len += sizeof(*imr);
+ }
+ MESH_RT_UNLOCK(ms);
+ if (len > ireq->i_len || ireq->i_len < sizeof(*imr)) {
+ ireq->i_len = len;
+ return ENOMEM;
+ }
+ ireq->i_len = len;
+ /* XXX M_WAIT? */
+ p = malloc(len, M_TEMP, M_NOWAIT | M_ZERO);
+ if (p == NULL)
+ return ENOMEM;
+ off = 0;
+ MESH_RT_LOCK(ms);
+ TAILQ_FOREACH(rt, &ms->ms_routes, rt_next) {
+ if (off >= len)
+ break;
+ imr = (struct ieee80211req_mesh_route *)
+ (p + off);
+ imr->imr_flags = rt->rt_flags;
+ IEEE80211_ADDR_COPY(imr->imr_dest,
+ rt->rt_dest);
+ IEEE80211_ADDR_COPY(imr->imr_nexthop,
+ rt->rt_nexthop);
+ imr->imr_metric = rt->rt_metric;
+ imr->imr_nhops = rt->rt_nhops;
+ imr->imr_lifetime = rt->rt_lifetime;
+ imr->imr_lastmseq = rt->rt_lastmseq;
+ off += sizeof(*imr);
+ }
+ MESH_RT_UNLOCK(ms);
+ error = copyout(p, (uint8_t *)ireq->i_data,
+ ireq->i_len);
+ free(p, M_TEMP);
+ break;
+ case IEEE80211_MESH_RTCMD_FLUSH:
+ case IEEE80211_MESH_RTCMD_ADD:
+ case IEEE80211_MESH_RTCMD_DELETE:
+ return EINVAL;
+ default:
+ return ENOSYS;
+ }
+ break;
+ case IEEE80211_IOC_MESH_PR_METRIC:
+ len = strlen(ms->ms_pmetric->mpm_descr);
+ if (ireq->i_len < len)
+ return EINVAL;
+ ireq->i_len = len;
+ error = copyout(ms->ms_pmetric->mpm_descr,
+ (uint8_t *)ireq->i_data, len);
+ break;
+ case IEEE80211_IOC_MESH_PR_PATH:
+ len = strlen(ms->ms_ppath->mpp_descr);
+ if (ireq->i_len < len)
+ return EINVAL;
+ ireq->i_len = len;
+ error = copyout(ms->ms_ppath->mpp_descr,
+ (uint8_t *)ireq->i_data, len);
+ break;
+ default:
+ return ENOSYS;
+ }
+
+ return error;
+}
+IEEE80211_IOCTL_GET(mesh, mesh_ioctl_get80211);
+
+static int
+mesh_ioctl_set80211(struct ieee80211vap *vap, struct ieee80211req *ireq)
+{
+ struct ieee80211_mesh_state *ms = vap->iv_mesh;
+ uint8_t tmpmeshid[IEEE80211_NWID_LEN];
+ uint8_t tmpaddr[IEEE80211_ADDR_LEN];
+ char tmpproto[IEEE80211_MESH_PROTO_DSZ];
+ int error;
+
+ if (vap->iv_opmode != IEEE80211_M_MBSS)
+ return ENOSYS;
+
+ error = 0;
+ switch (ireq->i_type) {
+ case IEEE80211_IOC_MESH_ID:
+ if (ireq->i_val != 0 || ireq->i_len > IEEE80211_MESHID_LEN)
+ return EINVAL;
+ error = copyin(ireq->i_data, tmpmeshid, ireq->i_len);
+ if (error != 0)
+ break;
+ memset(ms->ms_id, 0, IEEE80211_NWID_LEN);
+ ms->ms_idlen = ireq->i_len;
+ memcpy(ms->ms_id, tmpmeshid, ireq->i_len);
+ error = ENETRESET;
+ break;
+ case IEEE80211_IOC_MESH_AP:
+ if (ireq->i_val)
+ ms->ms_flags |= IEEE80211_MESHFLAGS_AP;
+ else
+ ms->ms_flags &= ~IEEE80211_MESHFLAGS_AP;
+ error = ENETRESET;
+ break;
+ case IEEE80211_IOC_MESH_FWRD:
+ if (ireq->i_val)
+ ms->ms_flags |= IEEE80211_MESHFLAGS_FWD;
+ else
+ ms->ms_flags &= ~IEEE80211_MESHFLAGS_FWD;
+ break;
+ case IEEE80211_IOC_MESH_TTL:
+ ms->ms_ttl = (uint8_t) ireq->i_val;
+ break;
+ case IEEE80211_IOC_MESH_RTCMD:
+ switch (ireq->i_val) {
+ case IEEE80211_MESH_RTCMD_LIST:
+ return EINVAL;
+ case IEEE80211_MESH_RTCMD_FLUSH:
+ ieee80211_mesh_rt_flush(vap);
+ break;
+ case IEEE80211_MESH_RTCMD_ADD:
+ if (IEEE80211_ADDR_EQ(vap->iv_myaddr, ireq->i_data) ||
+ IEEE80211_ADDR_EQ(broadcastaddr, ireq->i_data))
+ return EINVAL;
+ error = copyin(ireq->i_data, &tmpaddr,
+ IEEE80211_ADDR_LEN);
+ if (error == 0)
+ ieee80211_mesh_discover(vap, tmpaddr, NULL);
+ break;
+ case IEEE80211_MESH_RTCMD_DELETE:
+ ieee80211_mesh_rt_del(vap, ireq->i_data);
+ break;
+ default:
+ return ENOSYS;
+ }
+ break;
+ case IEEE80211_IOC_MESH_PR_METRIC:
+ error = copyin(ireq->i_data, tmpproto, sizeof(tmpproto));
+ if (error == 0) {
+ error = mesh_select_proto_metric(vap, tmpproto);
+ if (error == 0)
+ error = ENETRESET;
+ }
+ break;
+ case IEEE80211_IOC_MESH_PR_PATH:
+ error = copyin(ireq->i_data, tmpproto, sizeof(tmpproto));
+ if (error == 0) {
+ error = mesh_select_proto_path(vap, tmpproto);
+ if (error == 0)
+ error = ENETRESET;
+ }
+ break;
+ default:
+ return ENOSYS;
+ }
+ return error;
+}
+IEEE80211_IOCTL_SET(mesh, mesh_ioctl_set80211);
diff --git a/rtems/freebsd/net80211/ieee80211_mesh.h b/rtems/freebsd/net80211/ieee80211_mesh.h
new file mode 100644
index 00000000..e90cd402
--- /dev/null
+++ b/rtems/freebsd/net80211/ieee80211_mesh.h
@@ -0,0 +1,503 @@
+/*-
+ * Copyright (c) 2009 The FreeBSD Foundation
+ * All rights reserved.
+ *
+ * This software was developed by Rui Paulo under sponsorship from the
+ * FreeBSD Foundation.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+#ifndef _NET80211_IEEE80211_MESH_HH_
+#define _NET80211_IEEE80211_MESH_HH_
+
+#define IEEE80211_MESH_DEFAULT_TTL 31
+
+/*
+ * NB: all structures are __packed so sizeof works on arm, et. al.
+ */
+/*
+ * 802.11s Information Elements.
+*/
+/* Mesh Configuration */
+struct ieee80211_meshconf_ie {
+ uint8_t conf_ie; /* IEEE80211_ELEMID_MESHCONF */
+ uint8_t conf_len;
+ uint8_t conf_pselid; /* Active Path Sel. Proto. ID */
+ uint8_t conf_pmetid; /* Active Metric Identifier */
+ uint8_t conf_ccid; /* Congestion Control Mode ID */
+ uint8_t conf_syncid; /* Sync. Protocol ID */
+ uint8_t conf_authid; /* Auth. Protocol ID */
+ uint8_t conf_form; /* Formation Information */
+ uint8_t conf_cap;
+} __packed;
+
+/* Hybrid Wireless Mesh Protocol */
+#define IEEE80211_MESHCONF_PATH_HWMP 0x00
+/* Airtime Link Metric */
+#define IEEE80211_MESHCONF_METRIC_AIRTIME 0x00
+/* Congestion Control */
+#define IEEE80211_MESHCONF_CC_DISABLED 0x00
+#define IEEE80211_MESHCONF_CC_SIG 0x01
+/* Neighbour Offset */
+#define IEEE80211_MESHCONF_SYNC_NEIGHOFF 0x00
+#define IEEE80211_MESHCONF_AUTH_DISABLED 0x00
+/* Simultaneous Authenticaction of Equals */
+#define IEEE80211_MESHCONF_AUTH_SAE 0x01
+#define IEEE80211_MESHCONF_FORM_MP 0x01 /* Connected to Portal */
+#define IEEE80211_MESHCONF_FORM_NNEIGH_MASK 0x04 /* Number of Neighbours */
+#define IEEE80211_MESHCONF_CAP_AP 0x01 /* Accepting Peers */
+#define IEEE80211_MESHCONF_CAP_MCCAS 0x02 /* MCCA supported */
+#define IEEE80211_MESHCONF_CAP_MCCAE 0x04 /* MCCA enabled */
+#define IEEE80211_MESHCONF_CAP_FWRD 0x08 /* forwarding enabled */
+#define IEEE80211_MESHCONF_CAP_BTR 0x10 /* Beacon Timing Report Enab */
+#define IEEE80211_MESHCONF_CAP_TBTTA 0x20 /* TBTT Adj. Enabled */
+#define IEEE80211_MESHCONF_CAP_PSL 0x40 /* Power Save Level */
+
+/* Mesh Identifier */
+struct ieee80211_meshid_ie {
+ uint8_t id_ie; /* IEEE80211_ELEMID_MESHID */
+ uint8_t id_len;
+} __packed;
+
+/* Link Metric Report */
+struct ieee80211_meshlmetric_ie {
+ uint8_t lm_ie; /* IEEE80211_ELEMID_MESHLINK */
+ uint8_t lm_len;
+ uint32_t lm_metric;
+#define IEEE80211_MESHLMETRIC_INITIALVAL 0
+} __packed;
+
+/* Congestion Notification */
+struct ieee80211_meshcngst_ie {
+ uint8_t cngst_ie; /* IEEE80211_ELEMID_MESHCNGST */
+ uint8_t cngst_len;
+ uint16_t cngst_timer[4]; /* Expiration Timers: AC_BK,
+ AC_BE, AC_VI, AC_VO */
+} __packed;
+
+/* Peer Link Management */
+struct ieee80211_meshpeer_ie {
+ uint8_t peer_ie; /* IEEE80211_ELEMID_MESHPEER */
+ uint8_t peer_len;
+ uint8_t peer_proto[4]; /* Peer Management Protocol */
+ uint16_t peer_llinkid; /* Local Link ID */
+ uint16_t peer_linkid; /* Peer Link ID */
+ uint16_t peer_rcode;
+} __packed;
+
+enum {
+ IEEE80211_MESH_PEER_LINK_OPEN = 0,
+ IEEE80211_MESH_PEER_LINK_CONFIRM = 1,
+ IEEE80211_MESH_PEER_LINK_CLOSE = 2,
+ /* values 3-255 are reserved */
+};
+
+/* Mesh Peering Management Protocol */
+#define IEEE80211_MESH_PEER_PROTO_OUI 0x00, 0x0f, 0xac
+#define IEEE80211_MESH_PEER_PROTO_VALUE 0x2a
+#define IEEE80211_MESH_PEER_PROTO { IEEE80211_MESH_PEER_PROTO_OUI, \
+ IEEE80211_MESH_PEER_PROTO_VALUE }
+/* Abbreviated Handshake Protocol */
+#define IEEE80211_MESH_PEER_PROTO_AH_OUI 0x00, 0x0f, 0xac
+#define IEEE80211_MESH_PEER_PROTO_AH_VALUE 0x2b
+#define IEEE80211_MESH_PEER_PROTO_AH { IEEE80211_MESH_PEER_PROTO_AH_OUI, \
+ IEEE80211_MESH_PEER_PROTO_AH_VALUE }
+#ifdef notyet
+/* Mesh Channel Switch Annoucement */
+struct ieee80211_meshcsa_ie {
+ uint8_t csa_ie; /* IEEE80211_ELEMID_MESHCSA */
+ uint8_t csa_len;
+ uint8_t csa_mode;
+ uint8_t csa_newclass; /* New Regulatory Class */
+ uint8_t csa_newchan;
+ uint8_t csa_precvalue; /* Precedence Value */
+ uint8_t csa_count;
+} __packed;
+
+/* Mesh TIM */
+/* Equal to the non Mesh version */
+
+/* Mesh Awake Window */
+struct ieee80211_meshawakew_ie {
+ uint8_t awakew_ie; /* IEEE80211_ELEMID_MESHAWAKEW */
+ uint8_t awakew_len;
+ uint8_t awakew_windowlen; /* in TUs */
+} __packed;
+
+/* Mesh Beacon Timing */
+struct ieee80211_meshbeacont_ie {
+ uint8_t beacont_ie; /* IEEE80211_ELEMID_MESHBEACONT */
+ uint8_t beacont_len;
+ struct {
+ uint8_t mp_aid; /* Least Octet of AID */
+ uint16_t mp_btime; /* Beacon Time */
+ uint16_t mp_bint; /* Beacon Interval */
+ } __packed mp[1]; /* NB: variable size */
+} __packed;
+#endif
+
+/* Portal (MP) Annoucement */
+struct ieee80211_meshpann_ie {
+ uint8_t pann_ie; /* IEEE80211_ELEMID_MESHPANN */
+ uint8_t pann_len;
+ uint8_t pann_flags;
+ uint8_t pann_hopcount;
+ uint8_t pann_ttl;
+ uint8_t pann_addr[IEEE80211_ADDR_LEN];
+ uint8_t pann_seq; /* PANN Sequence Number */
+} __packed;
+
+/* Root (MP) Annoucement */
+struct ieee80211_meshrann_ie {
+ uint8_t rann_ie; /* IEEE80211_ELEMID_MESHRANN */
+ uint8_t rann_len;
+ uint8_t rann_flags;
+#define IEEE80211_MESHRANN_FLAGS_PR 0x01 /* Portal Role */
+ uint8_t rann_hopcount;
+ uint8_t rann_ttl;
+ uint8_t rann_addr[IEEE80211_ADDR_LEN];
+ uint32_t rann_seq; /* HWMP Sequence Number */
+ uint32_t rann_metric;
+} __packed;
+
+/* Mesh Path Request */
+struct ieee80211_meshpreq_ie {
+ uint8_t preq_ie; /* IEEE80211_ELEMID_MESHPREQ */
+ uint8_t preq_len;
+ uint8_t preq_flags;
+#define IEEE80211_MESHPREQ_FLAGS_PR 0x01 /* Portal Role */
+#define IEEE80211_MESHPREQ_FLAGS_AM 0x02 /* 0 = ucast / 1 = bcast */
+#define IEEE80211_MESHPREQ_FLAGS_PP 0x04 /* Proactive PREP */
+#define IEEE80211_MESHPREQ_FLAGS_AE 0x40 /* Address Extension */
+ uint8_t preq_hopcount;
+ uint8_t preq_ttl;
+ uint32_t preq_id;
+ uint8_t preq_origaddr[IEEE80211_ADDR_LEN];
+ uint32_t preq_origseq; /* HWMP Sequence Number */
+ /* NB: may have Originator Proxied Address */
+ uint32_t preq_lifetime;
+ uint32_t preq_metric;
+ uint8_t preq_tcount; /* target count */
+ struct {
+ uint8_t target_flags;
+#define IEEE80211_MESHPREQ_TFLAGS_TO 0x01 /* Target Only */
+#define IEEE80211_MESHPREQ_TFLAGS_RF 0x02 /* Reply and Forward */
+#define IEEE80211_MESHPREQ_TFLAGS_USN 0x04 /* Unknown HWMP seq number */
+ uint8_t target_addr[IEEE80211_ADDR_LEN];
+ uint32_t target_seq; /* HWMP Sequence Number */
+ } __packed preq_targets[1]; /* NB: variable size */
+} __packed;
+
+/* Mesh Path Reply */
+struct ieee80211_meshprep_ie {
+ uint8_t prep_ie; /* IEEE80211_ELEMID_MESHPREP */
+ uint8_t prep_len;
+ uint8_t prep_flags;
+ uint8_t prep_hopcount;
+ uint8_t prep_ttl;
+ uint8_t prep_targetaddr[IEEE80211_ADDR_LEN];
+ uint32_t prep_targetseq;
+ /* NB: May have Target Proxied Address */
+ uint32_t prep_lifetime;
+ uint32_t prep_metric;
+ uint8_t prep_origaddr[IEEE80211_ADDR_LEN];
+ uint32_t prep_origseq; /* HWMP Sequence Number */
+} __packed;
+
+/* Mesh Path Error */
+struct ieee80211_meshperr_ie {
+ uint8_t perr_ie; /* IEEE80211_ELEMID_MESHPERR */
+ uint8_t perr_len;
+ uint8_t perr_ttl;
+ uint8_t perr_ndests; /* Number of Destinations */
+ struct {
+ uint8_t dest_flags;
+#define IEEE80211_MESHPERR_DFLAGS_USN 0x01
+#define IEEE80211_MESHPERR_DFLAGS_RC 0x02
+ uint8_t dest_addr[IEEE80211_ADDR_LEN];
+ uint32_t dest_seq; /* HWMP Sequence Number */
+ uint16_t dest_rcode;
+ } __packed perr_dests[1]; /* NB: variable size */
+} __packed;
+
+#ifdef notyet
+/* Mesh Proxy Update */
+struct ieee80211_meshpu_ie {
+ uint8_t pu_ie; /* IEEE80211_ELEMID_MESHPU */
+ uint8_t pu_len;
+ uint8_t pu_flags;
+#define IEEE80211_MESHPU_FLAGS_MASK 0x1
+#define IEEE80211_MESHPU_FLAGS_DEL 0x0
+#define IEEE80211_MESHPU_FLAGS_ADD 0x1
+ uint8_t pu_seq; /* PU Sequence Number */
+ uint8_t pu_addr[IEEE80211_ADDR_LEN];
+ uint8_t pu_naddr; /* Number of Proxied Addresses */
+ /* NB: proxied address follows */
+} __packed;
+
+/* Mesh Proxy Update Confirmation */
+struct ieee80211_meshpuc_ie {
+ uint8_t puc_ie; /* IEEE80211_ELEMID_MESHPUC */
+ uint8_t puc_len;
+ uint8_t puc_flags;
+ uint8_t puc_seq; /* PU Sequence Number */
+ uint8_t puc_daddr[IEEE80211_ADDR_LEN];
+} __packed;
+#endif
+
+/*
+ * 802.11s Action Frames
+ */
+#define IEEE80211_ACTION_CAT_MESHPEERING 30 /* XXX Linux */
+#define IEEE80211_ACTION_CAT_MESHLMETRIC 13
+#define IEEE80211_ACTION_CAT_MESHPATH 32 /* XXX Linux */
+#define IEEE80211_ACTION_CAT_INTERWORK 15
+#define IEEE80211_ACTION_CAT_RESOURCE 16
+#define IEEE80211_ACTION_CAT_PROXY 17
+
+/*
+ * Mesh Peering Action codes.
+ */
+enum {
+ IEEE80211_ACTION_MESHPEERING_OPEN = 0,
+ IEEE80211_ACTION_MESHPEERING_CONFIRM = 1,
+ IEEE80211_ACTION_MESHPEERING_CLOSE = 2,
+ /* 3-255 reserved */
+};
+
+/*
+ * Mesh Path Selection Action code.
+ */
+enum {
+ IEEE80211_ACTION_MESHPATH_SEL = 0,
+ /* 1-255 reserved */
+};
+
+/*
+ * Mesh Link Metric Action codes.
+ */
+enum {
+ IEEE80211_ACTION_MESHLMETRIC_REQ = 0, /* Link Metric Request */
+ IEEE80211_ACTION_MESHLMETRIC_REP = 1, /* Link Metric Report */
+ /* 2-255 reserved */
+};
+
+/*
+ * Mesh Portal Annoucement Action codes.
+ */
+enum {
+ IEEE80211_ACTION_MESHPANN = 0,
+ /* 1-255 reserved */
+};
+
+/*
+ * Different mesh control structures based on the AE
+ * (Address Extension) bits.
+ */
+struct ieee80211_meshcntl {
+ uint8_t mc_flags; /* Address Extension 00 */
+ uint8_t mc_ttl; /* TTL */
+ uint8_t mc_seq[4]; /* Sequence No. */
+ /* NB: more addresses may follow */
+} __packed;
+
+struct ieee80211_meshcntl_ae01 {
+ uint8_t mc_flags; /* Address Extension 01 */
+ uint8_t mc_ttl; /* TTL */
+ uint8_t mc_seq[4]; /* Sequence No. */
+ uint8_t mc_addr4[IEEE80211_ADDR_LEN];
+} __packed;
+
+struct ieee80211_meshcntl_ae10 {
+ uint8_t mc_flags; /* Address Extension 10 */
+ uint8_t mc_ttl; /* TTL */
+ uint8_t mc_seq[4]; /* Sequence No. */
+ uint8_t mc_addr4[IEEE80211_ADDR_LEN];
+ uint8_t mc_addr5[IEEE80211_ADDR_LEN];
+} __packed;
+
+struct ieee80211_meshcntl_ae11 {
+ uint8_t mc_flags; /* Address Extension 11 */
+ uint8_t mc_ttl; /* TTL */
+ uint8_t mc_seq[4]; /* Sequence No. */
+ uint8_t mc_addr4[IEEE80211_ADDR_LEN];
+ uint8_t mc_addr5[IEEE80211_ADDR_LEN];
+ uint8_t mc_addr6[IEEE80211_ADDR_LEN];
+} __packed;
+
+#ifdef _KERNEL
+MALLOC_DECLARE(M_80211_MESH_RT);
+struct ieee80211_mesh_route {
+ TAILQ_ENTRY(ieee80211_mesh_route) rt_next;
+ int rt_crtime; /* creation time */
+ uint8_t rt_dest[IEEE80211_ADDR_LEN];
+ uint8_t rt_nexthop[IEEE80211_ADDR_LEN];
+ uint32_t rt_metric; /* path metric */
+ uint16_t rt_nhops; /* number of hops */
+ uint16_t rt_flags;
+#define IEEE80211_MESHRT_FLAGS_VALID 0x01 /* patch discovery complete */
+#define IEEE80211_MESHRT_FLAGS_PROXY 0x02 /* proxy entry */
+ uint32_t rt_lifetime;
+ uint32_t rt_lastmseq; /* last seq# seen dest */
+ void *rt_priv; /* private data */
+};
+#define IEEE80211_MESH_ROUTE_PRIV(rt, cast) ((cast *)rt->rt_priv)
+
+#define IEEE80211_MESH_PROTO_DSZ 12 /* description size */
+/*
+ * Mesh Path Selection Protocol.
+ */
+enum ieee80211_state;
+struct ieee80211_mesh_proto_path {
+ uint8_t mpp_active;
+ char mpp_descr[IEEE80211_MESH_PROTO_DSZ];
+ uint8_t mpp_ie;
+ struct ieee80211_node *
+ (*mpp_discover)(struct ieee80211vap *,
+ const uint8_t [IEEE80211_ADDR_LEN],
+ struct mbuf *);
+ void (*mpp_peerdown)(struct ieee80211_node *);
+ void (*mpp_vattach)(struct ieee80211vap *);
+ void (*mpp_vdetach)(struct ieee80211vap *);
+ int (*mpp_newstate)(struct ieee80211vap *,
+ enum ieee80211_state, int);
+ const size_t mpp_privlen; /* size required in the routing table
+ for private data */
+ int mpp_inact; /* inact. timeout for invalid routes
+ (ticks) */
+};
+
+/*
+ * Mesh Link Metric Report Protocol.
+ */
+struct ieee80211_mesh_proto_metric {
+ uint8_t mpm_active;
+ char mpm_descr[IEEE80211_MESH_PROTO_DSZ];
+ uint8_t mpm_ie;
+ uint32_t (*mpm_metric)(struct ieee80211_node *);
+};
+
+#ifdef notyet
+/*
+ * Mesh Authentication Protocol.
+ */
+struct ieee80211_mesh_proto_auth {
+ uint8_t mpa_ie[4];
+};
+
+struct ieee80211_mesh_proto_congestion {
+};
+
+struct ieee80211_mesh_proto_sync {
+};
+#endif
+
+typedef uint32_t ieee80211_mesh_seq;
+#define IEEE80211_MESH_SEQ_LEQ(a, b) ((int32_t)((a)-(b)) <= 0)
+#define IEEE80211_MESH_SEQ_GEQ(a, b) ((int32_t)((a)-(b)) >= 0)
+
+struct ieee80211_mesh_state {
+ int ms_idlen;
+ uint8_t ms_id[IEEE80211_MESHID_LEN];
+ ieee80211_mesh_seq ms_seq; /* seq no for meshcntl */
+ uint16_t ms_neighbors;
+ uint8_t ms_ttl; /* mesh ttl set in packets */
+#define IEEE80211_MESHFLAGS_AP 0x01 /* accept peers */
+#define IEEE80211_MESHFLAGS_PORTAL 0x02 /* mesh portal role */
+#define IEEE80211_MESHFLAGS_FWD 0x04 /* forward packets */
+ uint8_t ms_flags;
+ struct mtx ms_rt_lock;
+ struct callout ms_cleantimer;
+ TAILQ_HEAD(, ieee80211_mesh_route) ms_routes;
+ struct ieee80211_mesh_proto_metric *ms_pmetric;
+ struct ieee80211_mesh_proto_path *ms_ppath;
+};
+void ieee80211_mesh_attach(struct ieee80211com *);
+void ieee80211_mesh_detach(struct ieee80211com *);
+
+struct ieee80211_mesh_route *
+ ieee80211_mesh_rt_find(struct ieee80211vap *,
+ const uint8_t [IEEE80211_ADDR_LEN]);
+struct ieee80211_mesh_route *
+ ieee80211_mesh_rt_add(struct ieee80211vap *,
+ const uint8_t [IEEE80211_ADDR_LEN]);
+void ieee80211_mesh_rt_del(struct ieee80211vap *,
+ const uint8_t [IEEE80211_ADDR_LEN]);
+void ieee80211_mesh_rt_flush(struct ieee80211vap *);
+void ieee80211_mesh_rt_flush_peer(struct ieee80211vap *,
+ const uint8_t [IEEE80211_ADDR_LEN]);
+void ieee80211_mesh_proxy_check(struct ieee80211vap *,
+ const uint8_t [IEEE80211_ADDR_LEN]);
+
+int ieee80211_mesh_register_proto_path(const
+ struct ieee80211_mesh_proto_path *);
+int ieee80211_mesh_register_proto_metric(const
+ struct ieee80211_mesh_proto_metric *);
+
+uint8_t * ieee80211_add_meshid(uint8_t *, struct ieee80211vap *);
+uint8_t * ieee80211_add_meshconf(uint8_t *, struct ieee80211vap *);
+uint8_t * ieee80211_add_meshpeer(uint8_t *, uint8_t, uint16_t, uint16_t,
+ uint16_t);
+uint8_t * ieee80211_add_meshlmetric(uint8_t *, uint32_t);
+
+void ieee80211_mesh_node_init(struct ieee80211vap *,
+ struct ieee80211_node *);
+void ieee80211_mesh_node_cleanup(struct ieee80211_node *);
+void ieee80211_parse_meshid(struct ieee80211_node *,
+ const uint8_t *);
+struct ieee80211_scanparams;
+void ieee80211_mesh_init_neighbor(struct ieee80211_node *,
+ const struct ieee80211_frame *,
+ const struct ieee80211_scanparams *);
+void ieee80211_mesh_update_beacon(struct ieee80211vap *,
+ struct ieee80211_beacon_offsets *);
+
+/*
+ * Return non-zero if proxy operation is enabled.
+ */
+static __inline int
+ieee80211_mesh_isproxyena(struct ieee80211vap *vap)
+{
+ struct ieee80211_mesh_state *ms = vap->iv_mesh;
+ return (ms->ms_flags &
+ (IEEE80211_MESHFLAGS_AP | IEEE80211_MESHFLAGS_PORTAL)) != 0;
+}
+
+/*
+ * Process an outbound frame: if a path is known to the
+ * destination then return a reference to the next hop
+ * for immediate transmission. Otherwise initiate path
+ * discovery and, if possible queue the packet to be
+ * sent when path discovery completes.
+ */
+static __inline struct ieee80211_node *
+ieee80211_mesh_discover(struct ieee80211vap *vap,
+ const uint8_t dest[IEEE80211_ADDR_LEN], struct mbuf *m)
+{
+ struct ieee80211_mesh_state *ms = vap->iv_mesh;
+ return ms->ms_ppath->mpp_discover(vap, dest, m);
+}
+
+#endif /* _KERNEL */
+#endif /* !_NET80211_IEEE80211_MESH_HH_ */
diff --git a/rtems/freebsd/net80211/ieee80211_monitor.c b/rtems/freebsd/net80211/ieee80211_monitor.c
new file mode 100644
index 00000000..ae5850cf
--- /dev/null
+++ b/rtems/freebsd/net80211/ieee80211_monitor.c
@@ -0,0 +1,140 @@
+#include <rtems/freebsd/machine/rtems-bsd-config.h>
+
+/*-
+ * Copyright (c) 2007-2008 Sam Leffler, Errno Consulting
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <rtems/freebsd/sys/cdefs.h>
+#ifdef __FreeBSD__
+__FBSDID("$FreeBSD$");
+#endif
+
+/*
+ * IEEE 802.11 Monitor mode support.
+ */
+#include <rtems/freebsd/local/opt_inet.h>
+#include <rtems/freebsd/local/opt_wlan.h>
+
+#include <rtems/freebsd/sys/param.h>
+#include <rtems/freebsd/sys/systm.h>
+#include <rtems/freebsd/sys/mbuf.h>
+#include <rtems/freebsd/sys/malloc.h>
+#include <rtems/freebsd/sys/kernel.h>
+
+#include <rtems/freebsd/sys/socket.h>
+#include <rtems/freebsd/sys/sockio.h>
+#include <rtems/freebsd/sys/endian.h>
+#include <rtems/freebsd/sys/errno.h>
+#include <rtems/freebsd/sys/proc.h>
+#include <rtems/freebsd/sys/sysctl.h>
+
+#include <rtems/freebsd/net/if.h>
+#include <rtems/freebsd/net/if_media.h>
+#include <rtems/freebsd/net/if_llc.h>
+#include <rtems/freebsd/net/ethernet.h>
+
+#include <rtems/freebsd/net/bpf.h>
+
+#include <rtems/freebsd/net80211/ieee80211_var.h>
+#include <rtems/freebsd/net80211/ieee80211_monitor.h>
+
+static void monitor_vattach(struct ieee80211vap *);
+static int monitor_newstate(struct ieee80211vap *, enum ieee80211_state, int);
+static int monitor_input(struct ieee80211_node *ni, struct mbuf *m,
+ int rssi, int nf);
+
+void
+ieee80211_monitor_attach(struct ieee80211com *ic)
+{
+ ic->ic_vattach[IEEE80211_M_MONITOR] = monitor_vattach;
+}
+
+void
+ieee80211_monitor_detach(struct ieee80211com *ic)
+{
+}
+
+static void
+monitor_vdetach(struct ieee80211vap *vap)
+{
+}
+
+static void
+monitor_vattach(struct ieee80211vap *vap)
+{
+ vap->iv_newstate = monitor_newstate;
+ vap->iv_input = monitor_input;
+ vap->iv_opdetach = monitor_vdetach;
+}
+
+/*
+ * IEEE80211_M_MONITOR vap state machine handler.
+ */
+static int
+monitor_newstate(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg)
+{
+ struct ieee80211com *ic = vap->iv_ic;
+ enum ieee80211_state ostate;
+
+ IEEE80211_LOCK_ASSERT(ic);
+
+ ostate = vap->iv_state;
+ IEEE80211_DPRINTF(vap, IEEE80211_MSG_STATE, "%s: %s -> %s (%d)\n",
+ __func__, ieee80211_state_name[ostate],
+ ieee80211_state_name[nstate], arg);
+ vap->iv_state = nstate; /* state transition */
+ if (nstate == IEEE80211_S_RUN) {
+ switch (ostate) {
+ case IEEE80211_S_INIT:
+ ieee80211_create_ibss(vap, ic->ic_curchan);
+ break;
+ default:
+ break;
+ }
+ /*
+ * NB: this shouldn't be here but many people use
+ * monitor mode for raw packets; once we switch
+ * them over to adhoc demo mode remove this.
+ */
+ ieee80211_node_authorize(vap->iv_bss);
+ }
+ return 0;
+}
+
+/*
+ * Process a received frame in monitor mode.
+ */
+static int
+monitor_input(struct ieee80211_node *ni, struct mbuf *m, int rssi, int nf)
+{
+ struct ieee80211vap *vap = ni->ni_vap;
+ struct ifnet *ifp = vap->iv_ifp;
+
+ ifp->if_ipackets++;
+
+ if (ieee80211_radiotap_active_vap(vap))
+ ieee80211_radiotap_rx(vap, m);
+ m_freem(m);
+ return -1;
+}
diff --git a/rtems/freebsd/net80211/ieee80211_monitor.h b/rtems/freebsd/net80211/ieee80211_monitor.h
new file mode 100644
index 00000000..09f95dba
--- /dev/null
+++ b/rtems/freebsd/net80211/ieee80211_monitor.h
@@ -0,0 +1,35 @@
+/*-
+ * Copyright (c) 2007-2008 Sam Leffler, Errno Consulting
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+#ifndef _NET80211_IEEE80211_MONITOR_HH_
+#define _NET80211_IEEE80211_MONITOR_HH_
+
+/*
+ * Monitor implementation definitions.
+ */
+void ieee80211_monitor_attach(struct ieee80211com *);
+void ieee80211_monitor_detach(struct ieee80211com *);
+#endif /* !_NET80211_IEEE80211_MONITOR_HH_ */
diff --git a/rtems/freebsd/net80211/ieee80211_node.c b/rtems/freebsd/net80211/ieee80211_node.c
new file mode 100644
index 00000000..df341e3c
--- /dev/null
+++ b/rtems/freebsd/net80211/ieee80211_node.c
@@ -0,0 +1,2641 @@
+#include <rtems/freebsd/machine/rtems-bsd-config.h>
+
+/*-
+ * Copyright (c) 2001 Atsushi Onoe
+ * Copyright (c) 2002-2009 Sam Leffler, Errno Consulting
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <rtems/freebsd/sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <rtems/freebsd/local/opt_wlan.h>
+
+#include <rtems/freebsd/sys/param.h>
+#include <rtems/freebsd/sys/systm.h>
+#include <rtems/freebsd/sys/mbuf.h>
+#include <rtems/freebsd/sys/malloc.h>
+#include <rtems/freebsd/sys/kernel.h>
+
+#include <rtems/freebsd/sys/socket.h>
+
+#include <rtems/freebsd/net/if.h>
+#include <rtems/freebsd/net/if_media.h>
+#include <rtems/freebsd/net/ethernet.h>
+
+#include <rtems/freebsd/net80211/ieee80211_var.h>
+#include <rtems/freebsd/net80211/ieee80211_input.h>
+#ifdef IEEE80211_SUPPORT_SUPERG
+#include <rtems/freebsd/net80211/ieee80211_superg.h>
+#endif
+#ifdef IEEE80211_SUPPORT_TDMA
+#include <rtems/freebsd/net80211/ieee80211_tdma.h>
+#endif
+#include <rtems/freebsd/net80211/ieee80211_wds.h>
+#include <rtems/freebsd/net80211/ieee80211_mesh.h>
+#include <rtems/freebsd/net80211/ieee80211_ratectl.h>
+
+#include <rtems/freebsd/net/bpf.h>
+
+/*
+ * IEEE80211_NODE_HASHSIZE must be a power of 2.
+ */
+CTASSERT((IEEE80211_NODE_HASHSIZE & (IEEE80211_NODE_HASHSIZE-1)) == 0);
+
+/*
+ * Association id's are managed with a bit vector.
+ */
+#define IEEE80211_AID_SET(_vap, b) \
+ ((_vap)->iv_aid_bitmap[IEEE80211_AID(b) / 32] |= \
+ (1 << (IEEE80211_AID(b) % 32)))
+#define IEEE80211_AID_CLR(_vap, b) \
+ ((_vap)->iv_aid_bitmap[IEEE80211_AID(b) / 32] &= \
+ ~(1 << (IEEE80211_AID(b) % 32)))
+#define IEEE80211_AID_ISSET(_vap, b) \
+ ((_vap)->iv_aid_bitmap[IEEE80211_AID(b) / 32] & (1 << (IEEE80211_AID(b) % 32)))
+
+#ifdef IEEE80211_DEBUG_REFCNT
+#define REFCNT_LOC "%s (%s:%u) %p<%s> refcnt %d\n", __func__, func, line
+#else
+#define REFCNT_LOC "%s %p<%s> refcnt %d\n", __func__
+#endif
+
+static int ieee80211_sta_join1(struct ieee80211_node *);
+
+static struct ieee80211_node *node_alloc(struct ieee80211vap *,
+ const uint8_t [IEEE80211_ADDR_LEN]);
+static void node_cleanup(struct ieee80211_node *);
+static void node_free(struct ieee80211_node *);
+static void node_age(struct ieee80211_node *);
+static int8_t node_getrssi(const struct ieee80211_node *);
+static void node_getsignal(const struct ieee80211_node *, int8_t *, int8_t *);
+static void node_getmimoinfo(const struct ieee80211_node *,
+ struct ieee80211_mimo_info *);
+
+static void _ieee80211_free_node(struct ieee80211_node *);
+
+static void ieee80211_node_table_init(struct ieee80211com *ic,
+ struct ieee80211_node_table *nt, const char *name,
+ int inact, int keymaxix);
+static void ieee80211_node_table_reset(struct ieee80211_node_table *,
+ struct ieee80211vap *);
+static void ieee80211_node_table_cleanup(struct ieee80211_node_table *nt);
+static void ieee80211_erp_timeout(struct ieee80211com *);
+
+MALLOC_DEFINE(M_80211_NODE, "80211node", "802.11 node state");
+MALLOC_DEFINE(M_80211_NODE_IE, "80211nodeie", "802.11 node ie");
+
+void
+ieee80211_node_attach(struct ieee80211com *ic)
+{
+ /* XXX really want maxlen enforced per-sta */
+ ieee80211_ageq_init(&ic->ic_stageq, ic->ic_max_keyix * 8,
+ "802.11 staging q");
+ ieee80211_node_table_init(ic, &ic->ic_sta, "station",
+ IEEE80211_INACT_INIT, ic->ic_max_keyix);
+ callout_init(&ic->ic_inact, CALLOUT_MPSAFE);
+ callout_reset(&ic->ic_inact, IEEE80211_INACT_WAIT*hz,
+ ieee80211_node_timeout, ic);
+
+ ic->ic_node_alloc = node_alloc;
+ ic->ic_node_free = node_free;
+ ic->ic_node_cleanup = node_cleanup;
+ ic->ic_node_age = node_age;
+ ic->ic_node_drain = node_age; /* NB: same as age */
+ ic->ic_node_getrssi = node_getrssi;
+ ic->ic_node_getsignal = node_getsignal;
+ ic->ic_node_getmimoinfo = node_getmimoinfo;
+
+ /*
+ * Set flags to be propagated to all vap's;
+ * these define default behaviour/configuration.
+ */
+ ic->ic_flags_ext |= IEEE80211_FEXT_INACT; /* inactivity processing */
+}
+
+void
+ieee80211_node_detach(struct ieee80211com *ic)
+{
+
+ callout_drain(&ic->ic_inact);
+ ieee80211_node_table_cleanup(&ic->ic_sta);
+ ieee80211_ageq_cleanup(&ic->ic_stageq);
+}
+
+void
+ieee80211_node_vattach(struct ieee80211vap *vap)
+{
+ /* NB: driver can override */
+ vap->iv_max_aid = IEEE80211_AID_DEF;
+
+ /* default station inactivity timer setings */
+ vap->iv_inact_init = IEEE80211_INACT_INIT;
+ vap->iv_inact_auth = IEEE80211_INACT_AUTH;
+ vap->iv_inact_run = IEEE80211_INACT_RUN;
+ vap->iv_inact_probe = IEEE80211_INACT_PROBE;
+
+ IEEE80211_DPRINTF(vap, IEEE80211_MSG_INACT,
+ "%s: init %u auth %u run %u probe %u\n", __func__,
+ vap->iv_inact_init, vap->iv_inact_auth,
+ vap->iv_inact_run, vap->iv_inact_probe);
+}
+
+void
+ieee80211_node_latevattach(struct ieee80211vap *vap)
+{
+ if (vap->iv_opmode == IEEE80211_M_HOSTAP) {
+ /* XXX should we allow max aid to be zero? */
+ if (vap->iv_max_aid < IEEE80211_AID_MIN) {
+ vap->iv_max_aid = IEEE80211_AID_MIN;
+ if_printf(vap->iv_ifp,
+ "WARNING: max aid too small, changed to %d\n",
+ vap->iv_max_aid);
+ }
+ vap->iv_aid_bitmap = (uint32_t *) malloc(
+ howmany(vap->iv_max_aid, 32) * sizeof(uint32_t),
+ M_80211_NODE, M_NOWAIT | M_ZERO);
+ if (vap->iv_aid_bitmap == NULL) {
+ /* XXX no way to recover */
+ printf("%s: no memory for AID bitmap, max aid %d!\n",
+ __func__, vap->iv_max_aid);
+ vap->iv_max_aid = 0;
+ }
+ }
+
+ ieee80211_reset_bss(vap);
+
+ vap->iv_auth = ieee80211_authenticator_get(vap->iv_bss->ni_authmode);
+}
+
+void
+ieee80211_node_vdetach(struct ieee80211vap *vap)
+{
+ struct ieee80211com *ic = vap->iv_ic;
+
+ ieee80211_node_table_reset(&ic->ic_sta, vap);
+ if (vap->iv_bss != NULL) {
+ ieee80211_free_node(vap->iv_bss);
+ vap->iv_bss = NULL;
+ }
+ if (vap->iv_aid_bitmap != NULL) {
+ free(vap->iv_aid_bitmap, M_80211_NODE);
+ vap->iv_aid_bitmap = NULL;
+ }
+}
+
+/*
+ * Port authorize/unauthorize interfaces for use by an authenticator.
+ */
+
+void
+ieee80211_node_authorize(struct ieee80211_node *ni)
+{
+ struct ieee80211vap *vap = ni->ni_vap;
+
+ ni->ni_flags |= IEEE80211_NODE_AUTH;
+ ni->ni_inact_reload = vap->iv_inact_run;
+ ni->ni_inact = ni->ni_inact_reload;
+
+ IEEE80211_NOTE(vap, IEEE80211_MSG_INACT, ni,
+ "%s: inact_reload %u", __func__, ni->ni_inact_reload);
+}
+
+void
+ieee80211_node_unauthorize(struct ieee80211_node *ni)
+{
+ struct ieee80211vap *vap = ni->ni_vap;
+
+ ni->ni_flags &= ~IEEE80211_NODE_AUTH;
+ ni->ni_inact_reload = vap->iv_inact_auth;
+ if (ni->ni_inact > ni->ni_inact_reload)
+ ni->ni_inact = ni->ni_inact_reload;
+
+ IEEE80211_NOTE(vap, IEEE80211_MSG_INACT, ni,
+ "%s: inact_reload %u inact %u", __func__,
+ ni->ni_inact_reload, ni->ni_inact);
+}
+
+/*
+ * Fix tx parameters for a node according to ``association state''.
+ */
+void
+ieee80211_node_setuptxparms(struct ieee80211_node *ni)
+{
+ struct ieee80211vap *vap = ni->ni_vap;
+ enum ieee80211_phymode mode;
+
+ if (ni->ni_flags & IEEE80211_NODE_HT) {
+ if (IEEE80211_IS_CHAN_5GHZ(ni->ni_chan))
+ mode = IEEE80211_MODE_11NA;
+ else
+ mode = IEEE80211_MODE_11NG;
+ } else { /* legacy rate handling */
+ if (IEEE80211_IS_CHAN_ST(ni->ni_chan))
+ mode = IEEE80211_MODE_STURBO_A;
+ else if (IEEE80211_IS_CHAN_HALF(ni->ni_chan))
+ mode = IEEE80211_MODE_HALF;
+ else if (IEEE80211_IS_CHAN_QUARTER(ni->ni_chan))
+ mode = IEEE80211_MODE_QUARTER;
+ /* NB: 108A should be handled as 11a */
+ else if (IEEE80211_IS_CHAN_A(ni->ni_chan))
+ mode = IEEE80211_MODE_11A;
+ else if (IEEE80211_IS_CHAN_108G(ni->ni_chan) ||
+ (ni->ni_flags & IEEE80211_NODE_ERP))
+ mode = IEEE80211_MODE_11G;
+ else
+ mode = IEEE80211_MODE_11B;
+ }
+ ni->ni_txparms = &vap->iv_txparms[mode];
+}
+
+/*
+ * Set/change the channel. The rate set is also updated as
+ * to insure a consistent view by drivers.
+ * XXX should be private but hostap needs it to deal with CSA
+ */
+void
+ieee80211_node_set_chan(struct ieee80211_node *ni,
+ struct ieee80211_channel *chan)
+{
+ struct ieee80211com *ic = ni->ni_ic;
+ struct ieee80211vap *vap = ni->ni_vap;
+ enum ieee80211_phymode mode;
+
+ KASSERT(chan != IEEE80211_CHAN_ANYC, ("no channel"));
+
+ ni->ni_chan = chan;
+ mode = ieee80211_chan2mode(chan);
+ if (IEEE80211_IS_CHAN_HT(chan)) {
+ /*
+ * XXX Gotta be careful here; the rate set returned by
+ * ieee80211_get_suprates is actually any HT rate
+ * set so blindly copying it will be bad. We must
+ * install the legacy rate est in ni_rates and the
+ * HT rate set in ni_htrates.
+ */
+ ni->ni_htrates = *ieee80211_get_suphtrates(ic, chan);
+ /*
+ * Setup bss tx parameters based on operating mode. We
+ * use legacy rates when operating in a mixed HT+non-HT bss
+ * and non-ERP rates in 11g for mixed ERP+non-ERP bss.
+ */
+ if (mode == IEEE80211_MODE_11NA &&
+ (vap->iv_flags_ht & IEEE80211_FHT_PUREN) == 0)
+ mode = IEEE80211_MODE_11A;
+ else if (mode == IEEE80211_MODE_11NG &&
+ (vap->iv_flags_ht & IEEE80211_FHT_PUREN) == 0)
+ mode = IEEE80211_MODE_11G;
+ if (mode == IEEE80211_MODE_11G &&
+ (vap->iv_flags & IEEE80211_F_PUREG) == 0)
+ mode = IEEE80211_MODE_11B;
+ }
+ ni->ni_txparms = &vap->iv_txparms[mode];
+ ni->ni_rates = *ieee80211_get_suprates(ic, chan);
+}
+
+static __inline void
+copy_bss(struct ieee80211_node *nbss, const struct ieee80211_node *obss)
+{
+ /* propagate useful state */
+ nbss->ni_authmode = obss->ni_authmode;
+ nbss->ni_txpower = obss->ni_txpower;
+ nbss->ni_vlan = obss->ni_vlan;
+ /* XXX statistics? */
+ /* XXX legacy WDS bssid? */
+}
+
+void
+ieee80211_create_ibss(struct ieee80211vap* vap, struct ieee80211_channel *chan)
+{
+ struct ieee80211com *ic = vap->iv_ic;
+ struct ieee80211_node *ni;
+
+ IEEE80211_DPRINTF(vap, IEEE80211_MSG_SCAN,
+ "%s: creating %s on channel %u\n", __func__,
+ ieee80211_opmode_name[vap->iv_opmode],
+ ieee80211_chan2ieee(ic, chan));
+
+ ni = ieee80211_alloc_node(&ic->ic_sta, vap, vap->iv_myaddr);
+ if (ni == NULL) {
+ /* XXX recovery? */
+ return;
+ }
+ IEEE80211_ADDR_COPY(ni->ni_bssid, vap->iv_myaddr);
+ ni->ni_esslen = vap->iv_des_ssid[0].len;
+ memcpy(ni->ni_essid, vap->iv_des_ssid[0].ssid, ni->ni_esslen);
+ if (vap->iv_bss != NULL)
+ copy_bss(ni, vap->iv_bss);
+ ni->ni_intval = ic->ic_bintval;
+ if (vap->iv_flags & IEEE80211_F_PRIVACY)
+ ni->ni_capinfo |= IEEE80211_CAPINFO_PRIVACY;
+ if (ic->ic_phytype == IEEE80211_T_FH) {
+ ni->ni_fhdwell = 200; /* XXX */
+ ni->ni_fhindex = 1;
+ }
+ if (vap->iv_opmode == IEEE80211_M_IBSS) {
+ vap->iv_flags |= IEEE80211_F_SIBSS;
+ ni->ni_capinfo |= IEEE80211_CAPINFO_IBSS; /* XXX */
+ if (vap->iv_flags & IEEE80211_F_DESBSSID)
+ IEEE80211_ADDR_COPY(ni->ni_bssid, vap->iv_des_bssid);
+ else {
+ get_random_bytes(ni->ni_bssid, IEEE80211_ADDR_LEN);
+ /* clear group bit, add local bit */
+ ni->ni_bssid[0] = (ni->ni_bssid[0] &~ 0x01) | 0x02;
+ }
+ } else if (vap->iv_opmode == IEEE80211_M_AHDEMO) {
+ if (vap->iv_flags & IEEE80211_F_DESBSSID)
+ IEEE80211_ADDR_COPY(ni->ni_bssid, vap->iv_des_bssid);
+ else
+#ifdef IEEE80211_SUPPORT_TDMA
+ if ((vap->iv_caps & IEEE80211_C_TDMA) == 0)
+#endif
+ memset(ni->ni_bssid, 0, IEEE80211_ADDR_LEN);
+#ifdef IEEE80211_SUPPORT_MESH
+ } else if (vap->iv_opmode == IEEE80211_M_MBSS) {
+ ni->ni_meshidlen = vap->iv_mesh->ms_idlen;
+ memcpy(ni->ni_meshid, vap->iv_mesh->ms_id, ni->ni_meshidlen);
+#endif
+ }
+ /*
+ * Fix the channel and related attributes.
+ */
+ /* clear DFS CAC state on previous channel */
+ if (ic->ic_bsschan != IEEE80211_CHAN_ANYC &&
+ ic->ic_bsschan->ic_freq != chan->ic_freq &&
+ IEEE80211_IS_CHAN_CACDONE(ic->ic_bsschan))
+ ieee80211_dfs_cac_clear(ic, ic->ic_bsschan);
+ ic->ic_bsschan = chan;
+ ieee80211_node_set_chan(ni, chan);
+ ic->ic_curmode = ieee80211_chan2mode(chan);
+ /*
+ * Do mode-specific setup.
+ */
+ if (IEEE80211_IS_CHAN_FULL(chan)) {
+ if (IEEE80211_IS_CHAN_ANYG(chan)) {
+ /*
+ * Use a mixed 11b/11g basic rate set.
+ */
+ ieee80211_setbasicrates(&ni->ni_rates,
+ IEEE80211_MODE_11G);
+ if (vap->iv_flags & IEEE80211_F_PUREG) {
+ /*
+ * Also mark OFDM rates basic so 11b
+ * stations do not join (WiFi compliance).
+ */
+ ieee80211_addbasicrates(&ni->ni_rates,
+ IEEE80211_MODE_11A);
+ }
+ } else if (IEEE80211_IS_CHAN_B(chan)) {
+ /*
+ * Force pure 11b rate set.
+ */
+ ieee80211_setbasicrates(&ni->ni_rates,
+ IEEE80211_MODE_11B);
+ }
+ }
+
+ (void) ieee80211_sta_join1(ieee80211_ref_node(ni));
+}
+
+/*
+ * Reset bss state on transition to the INIT state.
+ * Clear any stations from the table (they have been
+ * deauth'd) and reset the bss node (clears key, rate
+ * etc. state).
+ */
+void
+ieee80211_reset_bss(struct ieee80211vap *vap)
+{
+ struct ieee80211com *ic = vap->iv_ic;
+ struct ieee80211_node *ni, *obss;
+
+ ieee80211_node_table_reset(&ic->ic_sta, vap);
+ /* XXX multi-bss: wrong */
+ ieee80211_reset_erp(ic);
+
+ ni = ieee80211_alloc_node(&ic->ic_sta, vap, vap->iv_myaddr);
+ KASSERT(ni != NULL, ("unable to setup initial BSS node"));
+ obss = vap->iv_bss;
+ vap->iv_bss = ieee80211_ref_node(ni);
+ if (obss != NULL) {
+ copy_bss(ni, obss);
+ ni->ni_intval = ic->ic_bintval;
+ ieee80211_free_node(obss);
+ } else
+ IEEE80211_ADDR_COPY(ni->ni_bssid, vap->iv_myaddr);
+}
+
+static int
+match_ssid(const struct ieee80211_node *ni,
+ int nssid, const struct ieee80211_scan_ssid ssids[])
+{
+ int i;
+
+ for (i = 0; i < nssid; i++) {
+ if (ni->ni_esslen == ssids[i].len &&
+ memcmp(ni->ni_essid, ssids[i].ssid, ni->ni_esslen) == 0)
+ return 1;
+ }
+ return 0;
+}
+
+/*
+ * Test a node for suitability/compatibility.
+ */
+static int
+check_bss(struct ieee80211vap *vap, struct ieee80211_node *ni)
+{
+ struct ieee80211com *ic = ni->ni_ic;
+ uint8_t rate;
+
+ if (isclr(ic->ic_chan_active, ieee80211_chan2ieee(ic, ni->ni_chan)))
+ return 0;
+ if (vap->iv_opmode == IEEE80211_M_IBSS) {
+ if ((ni->ni_capinfo & IEEE80211_CAPINFO_IBSS) == 0)
+ return 0;
+ } else {
+ if ((ni->ni_capinfo & IEEE80211_CAPINFO_ESS) == 0)
+ return 0;
+ }
+ if (vap->iv_flags & IEEE80211_F_PRIVACY) {
+ if ((ni->ni_capinfo & IEEE80211_CAPINFO_PRIVACY) == 0)
+ return 0;
+ } else {
+ /* XXX does this mean privacy is supported or required? */
+ if (ni->ni_capinfo & IEEE80211_CAPINFO_PRIVACY)
+ return 0;
+ }
+ rate = ieee80211_fix_rate(ni, &ni->ni_rates,
+ IEEE80211_F_JOIN | IEEE80211_F_DONEGO | IEEE80211_F_DOFRATE);
+ if (rate & IEEE80211_RATE_BASIC)
+ return 0;
+ if (vap->iv_des_nssid != 0 &&
+ !match_ssid(ni, vap->iv_des_nssid, vap->iv_des_ssid))
+ return 0;
+ if ((vap->iv_flags & IEEE80211_F_DESBSSID) &&
+ !IEEE80211_ADDR_EQ(vap->iv_des_bssid, ni->ni_bssid))
+ return 0;
+ return 1;
+}
+
+#ifdef IEEE80211_DEBUG
+/*
+ * Display node suitability/compatibility.
+ */
+static void
+check_bss_debug(struct ieee80211vap *vap, struct ieee80211_node *ni)
+{
+ struct ieee80211com *ic = ni->ni_ic;
+ uint8_t rate;
+ int fail;
+
+ fail = 0;
+ if (isclr(ic->ic_chan_active, ieee80211_chan2ieee(ic, ni->ni_chan)))
+ fail |= 0x01;
+ if (vap->iv_opmode == IEEE80211_M_IBSS) {
+ if ((ni->ni_capinfo & IEEE80211_CAPINFO_IBSS) == 0)
+ fail |= 0x02;
+ } else {
+ if ((ni->ni_capinfo & IEEE80211_CAPINFO_ESS) == 0)
+ fail |= 0x02;
+ }
+ if (vap->iv_flags & IEEE80211_F_PRIVACY) {
+ if ((ni->ni_capinfo & IEEE80211_CAPINFO_PRIVACY) == 0)
+ fail |= 0x04;
+ } else {
+ /* XXX does this mean privacy is supported or required? */
+ if (ni->ni_capinfo & IEEE80211_CAPINFO_PRIVACY)
+ fail |= 0x04;
+ }
+ rate = ieee80211_fix_rate(ni, &ni->ni_rates,
+ IEEE80211_F_JOIN | IEEE80211_F_DONEGO | IEEE80211_F_DOFRATE);
+ if (rate & IEEE80211_RATE_BASIC)
+ fail |= 0x08;
+ if (vap->iv_des_nssid != 0 &&
+ !match_ssid(ni, vap->iv_des_nssid, vap->iv_des_ssid))
+ fail |= 0x10;
+ if ((vap->iv_flags & IEEE80211_F_DESBSSID) &&
+ !IEEE80211_ADDR_EQ(vap->iv_des_bssid, ni->ni_bssid))
+ fail |= 0x20;
+
+ printf(" %c %s", fail ? '-' : '+', ether_sprintf(ni->ni_macaddr));
+ printf(" %s%c", ether_sprintf(ni->ni_bssid), fail & 0x20 ? '!' : ' ');
+ printf(" %3d%c",
+ ieee80211_chan2ieee(ic, ni->ni_chan), fail & 0x01 ? '!' : ' ');
+ printf(" %2dM%c", (rate & IEEE80211_RATE_VAL) / 2,
+ fail & 0x08 ? '!' : ' ');
+ printf(" %4s%c",
+ (ni->ni_capinfo & IEEE80211_CAPINFO_ESS) ? "ess" :
+ (ni->ni_capinfo & IEEE80211_CAPINFO_IBSS) ? "ibss" :
+ "????",
+ fail & 0x02 ? '!' : ' ');
+ printf(" %3s%c ",
+ (ni->ni_capinfo & IEEE80211_CAPINFO_PRIVACY) ? "wep" : "no",
+ fail & 0x04 ? '!' : ' ');
+ ieee80211_print_essid(ni->ni_essid, ni->ni_esslen);
+ printf("%s\n", fail & 0x10 ? "!" : "");
+}
+#endif /* IEEE80211_DEBUG */
+
+/*
+ * Handle 802.11 ad hoc network merge. The
+ * convention, set by the Wireless Ethernet Compatibility Alliance
+ * (WECA), is that an 802.11 station will change its BSSID to match
+ * the "oldest" 802.11 ad hoc network, on the same channel, that
+ * has the station's desired SSID. The "oldest" 802.11 network
+ * sends beacons with the greatest TSF timestamp.
+ *
+ * The caller is assumed to validate TSF's before attempting a merge.
+ *
+ * Return !0 if the BSSID changed, 0 otherwise.
+ */
+int
+ieee80211_ibss_merge(struct ieee80211_node *ni)
+{
+ struct ieee80211vap *vap = ni->ni_vap;
+#ifdef IEEE80211_DEBUG
+ struct ieee80211com *ic = ni->ni_ic;
+#endif
+
+ if (ni == vap->iv_bss ||
+ IEEE80211_ADDR_EQ(ni->ni_bssid, vap->iv_bss->ni_bssid)) {
+ /* unchanged, nothing to do */
+ return 0;
+ }
+ if (!check_bss(vap, ni)) {
+ /* capabilities mismatch */
+ IEEE80211_DPRINTF(vap, IEEE80211_MSG_ASSOC,
+ "%s: merge failed, capabilities mismatch\n", __func__);
+#ifdef IEEE80211_DEBUG
+ if (ieee80211_msg_assoc(vap))
+ check_bss_debug(vap, ni);
+#endif
+ vap->iv_stats.is_ibss_capmismatch++;
+ return 0;
+ }
+ IEEE80211_DPRINTF(vap, IEEE80211_MSG_ASSOC,
+ "%s: new bssid %s: %s preamble, %s slot time%s\n", __func__,
+ ether_sprintf(ni->ni_bssid),
+ ic->ic_flags&IEEE80211_F_SHPREAMBLE ? "short" : "long",
+ ic->ic_flags&IEEE80211_F_SHSLOT ? "short" : "long",
+ ic->ic_flags&IEEE80211_F_USEPROT ? ", protection" : ""
+ );
+ return ieee80211_sta_join1(ieee80211_ref_node(ni));
+}
+
+/*
+ * Calculate HT channel promotion flags for all vaps.
+ * This assumes ni_chan have been setup for each vap.
+ */
+static int
+gethtadjustflags(struct ieee80211com *ic)
+{
+ struct ieee80211vap *vap;
+ int flags;
+
+ flags = 0;
+ /* XXX locking */
+ TAILQ_FOREACH(vap, &ic->ic_vaps, iv_next) {
+ if (vap->iv_state < IEEE80211_S_RUN)
+ continue;
+ switch (vap->iv_opmode) {
+ case IEEE80211_M_WDS:
+ case IEEE80211_M_STA:
+ case IEEE80211_M_AHDEMO:
+ case IEEE80211_M_HOSTAP:
+ case IEEE80211_M_IBSS:
+ case IEEE80211_M_MBSS:
+ flags |= ieee80211_htchanflags(vap->iv_bss->ni_chan);
+ break;
+ default:
+ break;
+ }
+ }
+ return flags;
+}
+
+/*
+ * Check if the current channel needs to change based on whether
+ * any vap's are using HT20/HT40. This is used to sync the state
+ * of ic_curchan after a channel width change on a running vap.
+ */
+void
+ieee80211_sync_curchan(struct ieee80211com *ic)
+{
+ struct ieee80211_channel *c;
+
+ c = ieee80211_ht_adjust_channel(ic, ic->ic_curchan, gethtadjustflags(ic));
+ if (c != ic->ic_curchan) {
+ ic->ic_curchan = c;
+ ic->ic_curmode = ieee80211_chan2mode(ic->ic_curchan);
+ ic->ic_rt = ieee80211_get_ratetable(ic->ic_curchan);
+ IEEE80211_UNLOCK(ic);
+ ic->ic_set_channel(ic);
+ ieee80211_radiotap_chan_change(ic);
+ IEEE80211_LOCK(ic);
+ }
+}
+
+/*
+ * Setup the current channel. The request channel may be
+ * promoted if other vap's are operating with HT20/HT40.
+ */
+void
+ieee80211_setupcurchan(struct ieee80211com *ic, struct ieee80211_channel *c)
+{
+ if (ic->ic_htcaps & IEEE80211_HTC_HT) {
+ int flags = gethtadjustflags(ic);
+ /*
+ * Check for channel promotion required to support the
+ * set of running vap's. This assumes we are called
+ * after ni_chan is setup for each vap.
+ */
+ /* NB: this assumes IEEE80211_FHT_USEHT40 > IEEE80211_FHT_HT */
+ if (flags > ieee80211_htchanflags(c))
+ c = ieee80211_ht_adjust_channel(ic, c, flags);
+ }
+ ic->ic_bsschan = ic->ic_curchan = c;
+ ic->ic_curmode = ieee80211_chan2mode(ic->ic_curchan);
+ ic->ic_rt = ieee80211_get_ratetable(ic->ic_curchan);
+}
+
+/*
+ * Change the current channel. The channel change is guaranteed to have
+ * happened before the next state change.
+ */
+void
+ieee80211_setcurchan(struct ieee80211com *ic, struct ieee80211_channel *c)
+{
+ ieee80211_setupcurchan(ic, c);
+ ieee80211_runtask(ic, &ic->ic_chan_task);
+}
+
+/*
+ * Join the specified IBSS/BSS network. The node is assumed to
+ * be passed in with a held reference.
+ */
+static int
+ieee80211_sta_join1(struct ieee80211_node *selbs)
+{
+ struct ieee80211vap *vap = selbs->ni_vap;
+ struct ieee80211com *ic = selbs->ni_ic;
+ struct ieee80211_node *obss;
+ int canreassoc;
+
+ /*
+ * Committed to selbs, setup state.
+ */
+ obss = vap->iv_bss;
+ /*
+ * Check if old+new node have the same address in which
+ * case we can reassociate when operating in sta mode.
+ */
+ canreassoc = (obss != NULL &&
+ vap->iv_state == IEEE80211_S_RUN &&
+ IEEE80211_ADDR_EQ(obss->ni_macaddr, selbs->ni_macaddr));
+ vap->iv_bss = selbs; /* NB: caller assumed to bump refcnt */
+ if (obss != NULL) {
+ copy_bss(selbs, obss);
+ ieee80211_node_decref(obss); /* iv_bss reference */
+ ieee80211_free_node(obss); /* station table reference */
+ obss = NULL; /* NB: guard against later use */
+ }
+
+ /*
+ * Delete unusable rates; we've already checked
+ * that the negotiated rate set is acceptable.
+ */
+ ieee80211_fix_rate(vap->iv_bss, &vap->iv_bss->ni_rates,
+ IEEE80211_F_DODEL | IEEE80211_F_JOIN);
+
+ ieee80211_setcurchan(ic, selbs->ni_chan);
+ /*
+ * Set the erp state (mostly the slot time) to deal with
+ * the auto-select case; this should be redundant if the
+ * mode is locked.
+ */
+ ieee80211_reset_erp(ic);
+ ieee80211_wme_initparams(vap);
+
+ if (vap->iv_opmode == IEEE80211_M_STA) {
+ if (canreassoc) {
+ /* Reassociate */
+ ieee80211_new_state(vap, IEEE80211_S_ASSOC, 1);
+ } else {
+ /*
+ * Act as if we received a DEAUTH frame in case we
+ * are invoked from the RUN state. This will cause
+ * us to try to re-authenticate if we are operating
+ * as a station.
+ */
+ ieee80211_new_state(vap, IEEE80211_S_AUTH,
+ IEEE80211_FC0_SUBTYPE_DEAUTH);
+ }
+ } else
+ ieee80211_new_state(vap, IEEE80211_S_RUN, -1);
+ return 1;
+}
+
+int
+ieee80211_sta_join(struct ieee80211vap *vap, struct ieee80211_channel *chan,
+ const struct ieee80211_scan_entry *se)
+{
+ struct ieee80211com *ic = vap->iv_ic;
+ struct ieee80211_node *ni;
+
+ ni = ieee80211_alloc_node(&ic->ic_sta, vap, se->se_macaddr);
+ if (ni == NULL) {
+ /* XXX msg */
+ return 0;
+ }
+ /*
+ * Expand scan state into node's format.
+ * XXX may not need all this stuff
+ */
+ IEEE80211_ADDR_COPY(ni->ni_bssid, se->se_bssid);
+ ni->ni_esslen = se->se_ssid[1];
+ memcpy(ni->ni_essid, se->se_ssid+2, ni->ni_esslen);
+ ni->ni_tstamp.tsf = se->se_tstamp.tsf;
+ ni->ni_intval = se->se_intval;
+ ni->ni_capinfo = se->se_capinfo;
+ ni->ni_chan = chan;
+ ni->ni_timoff = se->se_timoff;
+ ni->ni_fhdwell = se->se_fhdwell;
+ ni->ni_fhindex = se->se_fhindex;
+ ni->ni_erp = se->se_erp;
+ IEEE80211_RSSI_LPF(ni->ni_avgrssi, se->se_rssi);
+ ni->ni_noise = se->se_noise;
+ if (vap->iv_opmode == IEEE80211_M_STA) {
+ /* NB: only infrastructure mode requires an associd */
+ ni->ni_flags |= IEEE80211_NODE_ASSOCID;
+ }
+
+ if (ieee80211_ies_init(&ni->ni_ies, se->se_ies.data, se->se_ies.len)) {
+ ieee80211_ies_expand(&ni->ni_ies);
+#ifdef IEEE80211_SUPPORT_SUPERG
+ if (ni->ni_ies.ath_ie != NULL)
+ ieee80211_parse_ath(ni, ni->ni_ies.ath_ie);
+#endif
+ if (ni->ni_ies.htcap_ie != NULL)
+ ieee80211_parse_htcap(ni, ni->ni_ies.htcap_ie);
+ if (ni->ni_ies.htinfo_ie != NULL)
+ ieee80211_parse_htinfo(ni, ni->ni_ies.htinfo_ie);
+#ifdef IEEE80211_SUPPORT_MESH
+ if (ni->ni_ies.meshid_ie != NULL)
+ ieee80211_parse_meshid(ni, ni->ni_ies.meshid_ie);
+#endif
+#ifdef IEEE80211_SUPPORT_TDMA
+ if (ni->ni_ies.tdma_ie != NULL)
+ ieee80211_parse_tdma(ni, ni->ni_ies.tdma_ie);
+#endif
+ }
+
+ vap->iv_dtim_period = se->se_dtimperiod;
+ vap->iv_dtim_count = 0;
+
+ /* NB: must be after ni_chan is setup */
+ ieee80211_setup_rates(ni, se->se_rates, se->se_xrates,
+ IEEE80211_F_DOSORT);
+ if (ieee80211_iserp_rateset(&ni->ni_rates))
+ ni->ni_flags |= IEEE80211_NODE_ERP;
+ ieee80211_node_setuptxparms(ni);
+ ieee80211_ratectl_node_init(ni);
+
+ return ieee80211_sta_join1(ieee80211_ref_node(ni));
+}
+
+/*
+ * Leave the specified IBSS/BSS network. The node is assumed to
+ * be passed in with a held reference.
+ */
+void
+ieee80211_sta_leave(struct ieee80211_node *ni)
+{
+ struct ieee80211com *ic = ni->ni_ic;
+
+ ic->ic_node_cleanup(ni);
+ ieee80211_notify_node_leave(ni);
+}
+
+/*
+ * Send a deauthenticate frame and drop the station.
+ */
+void
+ieee80211_node_deauth(struct ieee80211_node *ni, int reason)
+{
+ /* NB: bump the refcnt to be sure temporay nodes are not reclaimed */
+ ieee80211_ref_node(ni);
+ if (ni->ni_associd != 0)
+ IEEE80211_SEND_MGMT(ni, IEEE80211_FC0_SUBTYPE_DEAUTH, reason);
+ ieee80211_node_leave(ni);
+ ieee80211_free_node(ni);
+}
+
+static struct ieee80211_node *
+node_alloc(struct ieee80211vap *vap, const uint8_t macaddr[IEEE80211_ADDR_LEN])
+{
+ struct ieee80211_node *ni;
+
+ ni = (struct ieee80211_node *) malloc(sizeof(struct ieee80211_node),
+ M_80211_NODE, M_NOWAIT | M_ZERO);
+ return ni;
+}
+
+/*
+ * Initialize an ie blob with the specified data. If previous
+ * data exists re-use the data block. As a side effect we clear
+ * all references to specific ie's; the caller is required to
+ * recalculate them.
+ */
+int
+ieee80211_ies_init(struct ieee80211_ies *ies, const uint8_t *data, int len)
+{
+ /* NB: assumes data+len are the last fields */
+ memset(ies, 0, offsetof(struct ieee80211_ies, data));
+ if (ies->data != NULL && ies->len != len) {
+ /* data size changed */
+ free(ies->data, M_80211_NODE_IE);
+ ies->data = NULL;
+ }
+ if (ies->data == NULL) {
+ ies->data = (uint8_t *) malloc(len, M_80211_NODE_IE, M_NOWAIT);
+ if (ies->data == NULL) {
+ ies->len = 0;
+ /* NB: pointers have already been zero'd above */
+ return 0;
+ }
+ }
+ memcpy(ies->data, data, len);
+ ies->len = len;
+ return 1;
+}
+
+/*
+ * Reclaim storage for an ie blob.
+ */
+void
+ieee80211_ies_cleanup(struct ieee80211_ies *ies)
+{
+ if (ies->data != NULL)
+ free(ies->data, M_80211_NODE_IE);
+}
+
+/*
+ * Expand an ie blob data contents and to fillin individual
+ * ie pointers. The data blob is assumed to be well-formed;
+ * we don't do any validity checking of ie lengths.
+ */
+void
+ieee80211_ies_expand(struct ieee80211_ies *ies)
+{
+ uint8_t *ie;
+ int ielen;
+
+ ie = ies->data;
+ ielen = ies->len;
+ while (ielen > 0) {
+ switch (ie[0]) {
+ case IEEE80211_ELEMID_VENDOR:
+ if (iswpaoui(ie))
+ ies->wpa_ie = ie;
+ else if (iswmeoui(ie))
+ ies->wme_ie = ie;
+#ifdef IEEE80211_SUPPORT_SUPERG
+ else if (isatherosoui(ie))
+ ies->ath_ie = ie;
+#endif
+#ifdef IEEE80211_SUPPORT_TDMA
+ else if (istdmaoui(ie))
+ ies->tdma_ie = ie;
+#endif
+ break;
+ case IEEE80211_ELEMID_RSN:
+ ies->rsn_ie = ie;
+ break;
+ case IEEE80211_ELEMID_HTCAP:
+ ies->htcap_ie = ie;
+ break;
+#ifdef IEEE80211_SUPPORT_MESH
+ case IEEE80211_ELEMID_MESHID:
+ ies->meshid_ie = ie;
+ break;
+#endif
+ }
+ ielen -= 2 + ie[1];
+ ie += 2 + ie[1];
+ }
+}
+
+/*
+ * Reclaim any resources in a node and reset any critical
+ * state. Typically nodes are free'd immediately after,
+ * but in some cases the storage may be reused so we need
+ * to insure consistent state (should probably fix that).
+ */
+static void
+node_cleanup(struct ieee80211_node *ni)
+{
+#define N(a) (sizeof(a)/sizeof(a[0]))
+ struct ieee80211vap *vap = ni->ni_vap;
+ struct ieee80211com *ic = ni->ni_ic;
+ int i;
+
+ /* NB: preserve ni_table */
+ if (ni->ni_flags & IEEE80211_NODE_PWR_MGT) {
+ if (vap->iv_opmode != IEEE80211_M_STA)
+ vap->iv_ps_sta--;
+ ni->ni_flags &= ~IEEE80211_NODE_PWR_MGT;
+ IEEE80211_NOTE(vap, IEEE80211_MSG_POWER, ni,
+ "power save mode off, %u sta's in ps mode", vap->iv_ps_sta);
+ }
+ /*
+ * Cleanup any HT-related state.
+ */
+ if (ni->ni_flags & IEEE80211_NODE_HT)
+ ieee80211_ht_node_cleanup(ni);
+#ifdef IEEE80211_SUPPORT_SUPERG
+ else if (ni->ni_ath_flags & IEEE80211_NODE_ATH)
+ ieee80211_ff_node_cleanup(ni);
+#endif
+#ifdef IEEE80211_SUPPORT_MESH
+ /*
+ * Cleanup any mesh-related state.
+ */
+ if (vap->iv_opmode == IEEE80211_M_MBSS)
+ ieee80211_mesh_node_cleanup(ni);
+#endif
+ /*
+ * Clear any staging queue entries.
+ */
+ ieee80211_ageq_drain_node(&ic->ic_stageq, ni);
+
+ /*
+ * Clear AREF flag that marks the authorization refcnt bump
+ * has happened. This is probably not needed as the node
+ * should always be removed from the table so not found but
+ * do it just in case.
+ * Likewise clear the ASSOCID flag as these flags are intended
+ * to be managed in tandem.
+ */
+ ni->ni_flags &= ~(IEEE80211_NODE_AREF | IEEE80211_NODE_ASSOCID);
+
+ /*
+ * Drain power save queue and, if needed, clear TIM.
+ */
+ if (ieee80211_node_psq_drain(ni) != 0 && vap->iv_set_tim != NULL)
+ vap->iv_set_tim(ni, 0);
+
+ ni->ni_associd = 0;
+ if (ni->ni_challenge != NULL) {
+ free(ni->ni_challenge, M_80211_NODE);
+ ni->ni_challenge = NULL;
+ }
+ /*
+ * Preserve SSID, WPA, and WME ie's so the bss node is
+ * reusable during a re-auth/re-assoc state transition.
+ * If we remove these data they will not be recreated
+ * because they come from a probe-response or beacon frame
+ * which cannot be expected prior to the association-response.
+ * This should not be an issue when operating in other modes
+ * as stations leaving always go through a full state transition
+ * which will rebuild this state.
+ *
+ * XXX does this leave us open to inheriting old state?
+ */
+ for (i = 0; i < N(ni->ni_rxfrag); i++)
+ if (ni->ni_rxfrag[i] != NULL) {
+ m_freem(ni->ni_rxfrag[i]);
+ ni->ni_rxfrag[i] = NULL;
+ }
+ /*
+ * Must be careful here to remove any key map entry w/o a LOR.
+ */
+ ieee80211_node_delucastkey(ni);
+#undef N
+}
+
+static void
+node_free(struct ieee80211_node *ni)
+{
+ struct ieee80211com *ic = ni->ni_ic;
+
+ ieee80211_ratectl_node_deinit(ni);
+ ic->ic_node_cleanup(ni);
+ ieee80211_ies_cleanup(&ni->ni_ies);
+ ieee80211_psq_cleanup(&ni->ni_psq);
+ free(ni, M_80211_NODE);
+}
+
+static void
+node_age(struct ieee80211_node *ni)
+{
+ struct ieee80211vap *vap = ni->ni_vap;
+
+ IEEE80211_NODE_LOCK_ASSERT(&vap->iv_ic->ic_sta);
+
+ /*
+ * Age frames on the power save queue.
+ */
+ if (ieee80211_node_psq_age(ni) != 0 &&
+ ni->ni_psq.psq_len == 0 && vap->iv_set_tim != NULL)
+ vap->iv_set_tim(ni, 0);
+ /*
+ * Age out HT resources (e.g. frames on the
+ * A-MPDU reorder queues).
+ */
+ if (ni->ni_associd != 0 && (ni->ni_flags & IEEE80211_NODE_HT))
+ ieee80211_ht_node_age(ni);
+}
+
+static int8_t
+node_getrssi(const struct ieee80211_node *ni)
+{
+ uint32_t avgrssi = ni->ni_avgrssi;
+ int32_t rssi;
+
+ if (avgrssi == IEEE80211_RSSI_DUMMY_MARKER)
+ return 0;
+ rssi = IEEE80211_RSSI_GET(avgrssi);
+ return rssi < 0 ? 0 : rssi > 127 ? 127 : rssi;
+}
+
+static void
+node_getsignal(const struct ieee80211_node *ni, int8_t *rssi, int8_t *noise)
+{
+ *rssi = node_getrssi(ni);
+ *noise = ni->ni_noise;
+}
+
+static void
+node_getmimoinfo(const struct ieee80211_node *ni,
+ struct ieee80211_mimo_info *info)
+{
+ /* XXX zero data? */
+}
+
+struct ieee80211_node *
+ieee80211_alloc_node(struct ieee80211_node_table *nt,
+ struct ieee80211vap *vap, const uint8_t macaddr[IEEE80211_ADDR_LEN])
+{
+ struct ieee80211com *ic = nt->nt_ic;
+ struct ieee80211_node *ni;
+ int hash;
+
+ ni = ic->ic_node_alloc(vap, macaddr);
+ if (ni == NULL) {
+ vap->iv_stats.is_rx_nodealloc++;
+ return NULL;
+ }
+
+ IEEE80211_DPRINTF(vap, IEEE80211_MSG_NODE,
+ "%s %p<%s> in %s table\n", __func__, ni,
+ ether_sprintf(macaddr), nt->nt_name);
+
+ IEEE80211_ADDR_COPY(ni->ni_macaddr, macaddr);
+ hash = IEEE80211_NODE_HASH(ic, macaddr);
+ ieee80211_node_initref(ni); /* mark referenced */
+ ni->ni_chan = IEEE80211_CHAN_ANYC;
+ ni->ni_authmode = IEEE80211_AUTH_OPEN;
+ ni->ni_txpower = ic->ic_txpowlimit; /* max power */
+ ni->ni_txparms = &vap->iv_txparms[ieee80211_chan2mode(ic->ic_curchan)];
+ ieee80211_crypto_resetkey(vap, &ni->ni_ucastkey, IEEE80211_KEYIX_NONE);
+ ni->ni_avgrssi = IEEE80211_RSSI_DUMMY_MARKER;
+ ni->ni_inact_reload = nt->nt_inact_init;
+ ni->ni_inact = ni->ni_inact_reload;
+ ni->ni_ath_defkeyix = 0x7fff;
+ ieee80211_psq_init(&ni->ni_psq, "unknown");
+#ifdef IEEE80211_SUPPORT_MESH
+ if (vap->iv_opmode == IEEE80211_M_MBSS)
+ ieee80211_mesh_node_init(vap, ni);
+#endif
+ IEEE80211_NODE_LOCK(nt);
+ TAILQ_INSERT_TAIL(&nt->nt_node, ni, ni_list);
+ LIST_INSERT_HEAD(&nt->nt_hash[hash], ni, ni_hash);
+ ni->ni_table = nt;
+ ni->ni_vap = vap;
+ ni->ni_ic = ic;
+ IEEE80211_NODE_UNLOCK(nt);
+
+ IEEE80211_NOTE(vap, IEEE80211_MSG_INACT, ni,
+ "%s: inact_reload %u", __func__, ni->ni_inact_reload);
+
+ ieee80211_ratectl_node_init(ni);
+
+ return ni;
+}
+
+/*
+ * Craft a temporary node suitable for sending a management frame
+ * to the specified station. We craft only as much state as we
+ * need to do the work since the node will be immediately reclaimed
+ * once the send completes.
+ */
+struct ieee80211_node *
+ieee80211_tmp_node(struct ieee80211vap *vap,
+ const uint8_t macaddr[IEEE80211_ADDR_LEN])
+{
+ struct ieee80211com *ic = vap->iv_ic;
+ struct ieee80211_node *ni;
+
+ ni = ic->ic_node_alloc(vap, macaddr);
+ if (ni != NULL) {
+ struct ieee80211_node *bss = vap->iv_bss;
+
+ IEEE80211_DPRINTF(vap, IEEE80211_MSG_NODE,
+ "%s %p<%s>\n", __func__, ni, ether_sprintf(macaddr));
+
+ ni->ni_table = NULL; /* NB: pedantic */
+ ni->ni_ic = ic; /* NB: needed to set channel */
+ ni->ni_vap = vap;
+
+ IEEE80211_ADDR_COPY(ni->ni_macaddr, macaddr);
+ IEEE80211_ADDR_COPY(ni->ni_bssid, bss->ni_bssid);
+ ieee80211_node_initref(ni); /* mark referenced */
+ /* NB: required by ieee80211_fix_rate */
+ ieee80211_node_set_chan(ni, bss->ni_chan);
+ ieee80211_crypto_resetkey(vap, &ni->ni_ucastkey,
+ IEEE80211_KEYIX_NONE);
+ ni->ni_txpower = bss->ni_txpower;
+ /* XXX optimize away */
+ ieee80211_psq_init(&ni->ni_psq, "unknown");
+
+ ieee80211_ratectl_node_init(ni);
+ } else {
+ /* XXX msg */
+ vap->iv_stats.is_rx_nodealloc++;
+ }
+ return ni;
+}
+
+struct ieee80211_node *
+ieee80211_dup_bss(struct ieee80211vap *vap,
+ const uint8_t macaddr[IEEE80211_ADDR_LEN])
+{
+ struct ieee80211com *ic = vap->iv_ic;
+ struct ieee80211_node *ni;
+
+ ni = ieee80211_alloc_node(&ic->ic_sta, vap, macaddr);
+ if (ni != NULL) {
+ struct ieee80211_node *bss = vap->iv_bss;
+ /*
+ * Inherit from iv_bss.
+ */
+ copy_bss(ni, bss);
+ IEEE80211_ADDR_COPY(ni->ni_bssid, bss->ni_bssid);
+ ieee80211_node_set_chan(ni, bss->ni_chan);
+ }
+ return ni;
+}
+
+/*
+ * Create a bss node for a legacy WDS vap. The far end does
+ * not associate so we just create create a new node and
+ * simulate an association. The caller is responsible for
+ * installing the node as the bss node and handling any further
+ * setup work like authorizing the port.
+ */
+struct ieee80211_node *
+ieee80211_node_create_wds(struct ieee80211vap *vap,
+ const uint8_t bssid[IEEE80211_ADDR_LEN], struct ieee80211_channel *chan)
+{
+ struct ieee80211com *ic = vap->iv_ic;
+ struct ieee80211_node *ni;
+
+ /* XXX check if node already in sta table? */
+ ni = ieee80211_alloc_node(&ic->ic_sta, vap, bssid);
+ if (ni != NULL) {
+ ni->ni_wdsvap = vap;
+ IEEE80211_ADDR_COPY(ni->ni_bssid, bssid);
+ /*
+ * Inherit any manually configured settings.
+ */
+ copy_bss(ni, vap->iv_bss);
+ ieee80211_node_set_chan(ni, chan);
+ /* NB: propagate ssid so available to WPA supplicant */
+ ni->ni_esslen = vap->iv_des_ssid[0].len;
+ memcpy(ni->ni_essid, vap->iv_des_ssid[0].ssid, ni->ni_esslen);
+ /* NB: no associd for peer */
+ /*
+ * There are no management frames to use to
+ * discover neighbor capabilities, so blindly
+ * propagate the local configuration.
+ */
+ if (vap->iv_flags & IEEE80211_F_WME)
+ ni->ni_flags |= IEEE80211_NODE_QOS;
+#ifdef IEEE80211_SUPPORT_SUPERG
+ if (vap->iv_flags & IEEE80211_F_FF)
+ ni->ni_flags |= IEEE80211_NODE_FF;
+#endif
+ if ((ic->ic_htcaps & IEEE80211_HTC_HT) &&
+ (vap->iv_flags_ht & IEEE80211_FHT_HT)) {
+ /*
+ * Device is HT-capable and HT is enabled for
+ * the vap; setup HT operation. On return
+ * ni_chan will be adjusted to an HT channel.
+ */
+ ieee80211_ht_wds_init(ni);
+ } else {
+ struct ieee80211_channel *c = ni->ni_chan;
+ /*
+ * Force a legacy channel to be used.
+ */
+ c = ieee80211_find_channel(ic,
+ c->ic_freq, c->ic_flags &~ IEEE80211_CHAN_HT);
+ KASSERT(c != NULL, ("no legacy channel, %u/%x",
+ ni->ni_chan->ic_freq, ni->ni_chan->ic_flags));
+ ni->ni_chan = c;
+ }
+ }
+ return ni;
+}
+
+struct ieee80211_node *
+#ifdef IEEE80211_DEBUG_REFCNT
+ieee80211_find_node_locked_debug(struct ieee80211_node_table *nt,
+ const uint8_t macaddr[IEEE80211_ADDR_LEN], const char *func, int line)
+#else
+ieee80211_find_node_locked(struct ieee80211_node_table *nt,
+ const uint8_t macaddr[IEEE80211_ADDR_LEN])
+#endif
+{
+ struct ieee80211_node *ni;
+ int hash;
+
+ IEEE80211_NODE_LOCK_ASSERT(nt);
+
+ hash = IEEE80211_NODE_HASH(nt->nt_ic, macaddr);
+ LIST_FOREACH(ni, &nt->nt_hash[hash], ni_hash) {
+ if (IEEE80211_ADDR_EQ(ni->ni_macaddr, macaddr)) {
+ ieee80211_ref_node(ni); /* mark referenced */
+#ifdef IEEE80211_DEBUG_REFCNT
+ IEEE80211_DPRINTF(ni->ni_vap, IEEE80211_MSG_NODE,
+ "%s (%s:%u) %p<%s> refcnt %d\n", __func__,
+ func, line,
+ ni, ether_sprintf(ni->ni_macaddr),
+ ieee80211_node_refcnt(ni));
+#endif
+ return ni;
+ }
+ }
+ return NULL;
+}
+
+struct ieee80211_node *
+#ifdef IEEE80211_DEBUG_REFCNT
+ieee80211_find_node_debug(struct ieee80211_node_table *nt,
+ const uint8_t macaddr[IEEE80211_ADDR_LEN], const char *func, int line)
+#else
+ieee80211_find_node(struct ieee80211_node_table *nt,
+ const uint8_t macaddr[IEEE80211_ADDR_LEN])
+#endif
+{
+ struct ieee80211_node *ni;
+
+ IEEE80211_NODE_LOCK(nt);
+ ni = ieee80211_find_node_locked(nt, macaddr);
+ IEEE80211_NODE_UNLOCK(nt);
+ return ni;
+}
+
+struct ieee80211_node *
+#ifdef IEEE80211_DEBUG_REFCNT
+ieee80211_find_vap_node_locked_debug(struct ieee80211_node_table *nt,
+ const struct ieee80211vap *vap,
+ const uint8_t macaddr[IEEE80211_ADDR_LEN], const char *func, int line)
+#else
+ieee80211_find_vap_node_locked(struct ieee80211_node_table *nt,
+ const struct ieee80211vap *vap,
+ const uint8_t macaddr[IEEE80211_ADDR_LEN])
+#endif
+{
+ struct ieee80211_node *ni;
+ int hash;
+
+ IEEE80211_NODE_LOCK_ASSERT(nt);
+
+ hash = IEEE80211_NODE_HASH(nt->nt_ic, macaddr);
+ LIST_FOREACH(ni, &nt->nt_hash[hash], ni_hash) {
+ if (ni->ni_vap == vap &&
+ IEEE80211_ADDR_EQ(ni->ni_macaddr, macaddr)) {
+ ieee80211_ref_node(ni); /* mark referenced */
+#ifdef IEEE80211_DEBUG_REFCNT
+ IEEE80211_DPRINTF(ni->ni_vap, IEEE80211_MSG_NODE,
+ "%s (%s:%u) %p<%s> refcnt %d\n", __func__,
+ func, line,
+ ni, ether_sprintf(ni->ni_macaddr),
+ ieee80211_node_refcnt(ni));
+#endif
+ return ni;
+ }
+ }
+ return NULL;
+}
+
+struct ieee80211_node *
+#ifdef IEEE80211_DEBUG_REFCNT
+ieee80211_find_vap_node_debug(struct ieee80211_node_table *nt,
+ const struct ieee80211vap *vap,
+ const uint8_t macaddr[IEEE80211_ADDR_LEN], const char *func, int line)
+#else
+ieee80211_find_vap_node(struct ieee80211_node_table *nt,
+ const struct ieee80211vap *vap,
+ const uint8_t macaddr[IEEE80211_ADDR_LEN])
+#endif
+{
+ struct ieee80211_node *ni;
+
+ IEEE80211_NODE_LOCK(nt);
+ ni = ieee80211_find_vap_node_locked(nt, vap, macaddr);
+ IEEE80211_NODE_UNLOCK(nt);
+ return ni;
+}
+
+/*
+ * Fake up a node; this handles node discovery in adhoc mode.
+ * Note that for the driver's benefit we we treat this like
+ * an association so the driver has an opportunity to setup
+ * it's private state.
+ */
+struct ieee80211_node *
+ieee80211_fakeup_adhoc_node(struct ieee80211vap *vap,
+ const uint8_t macaddr[IEEE80211_ADDR_LEN])
+{
+ struct ieee80211_node *ni;
+
+ IEEE80211_DPRINTF(vap, IEEE80211_MSG_NODE,
+ "%s: mac<%s>\n", __func__, ether_sprintf(macaddr));
+ ni = ieee80211_dup_bss(vap, macaddr);
+ if (ni != NULL) {
+ struct ieee80211com *ic = vap->iv_ic;
+
+ /* XXX no rate negotiation; just dup */
+ ni->ni_rates = vap->iv_bss->ni_rates;
+ if (ieee80211_iserp_rateset(&ni->ni_rates))
+ ni->ni_flags |= IEEE80211_NODE_ERP;
+ if (vap->iv_opmode == IEEE80211_M_AHDEMO) {
+ /*
+ * In adhoc demo mode there are no management
+ * frames to use to discover neighbor capabilities,
+ * so blindly propagate the local configuration
+ * so we can do interesting things (e.g. use
+ * WME to disable ACK's).
+ */
+ if (vap->iv_flags & IEEE80211_F_WME)
+ ni->ni_flags |= IEEE80211_NODE_QOS;
+#ifdef IEEE80211_SUPPORT_SUPERG
+ if (vap->iv_flags & IEEE80211_F_FF)
+ ni->ni_flags |= IEEE80211_NODE_FF;
+#endif
+ }
+ ieee80211_node_setuptxparms(ni);
+ ieee80211_ratectl_node_init(ni);
+ if (ic->ic_newassoc != NULL)
+ ic->ic_newassoc(ni, 1);
+ /* XXX not right for 802.1x/WPA */
+ ieee80211_node_authorize(ni);
+ }
+ return ni;
+}
+
+void
+ieee80211_init_neighbor(struct ieee80211_node *ni,
+ const struct ieee80211_frame *wh,
+ const struct ieee80211_scanparams *sp)
+{
+ ni->ni_esslen = sp->ssid[1];
+ memcpy(ni->ni_essid, sp->ssid + 2, sp->ssid[1]);
+ IEEE80211_ADDR_COPY(ni->ni_bssid, wh->i_addr3);
+ memcpy(ni->ni_tstamp.data, sp->tstamp, sizeof(ni->ni_tstamp));
+ ni->ni_intval = sp->bintval;
+ ni->ni_capinfo = sp->capinfo;
+ ni->ni_chan = ni->ni_ic->ic_curchan;
+ ni->ni_fhdwell = sp->fhdwell;
+ ni->ni_fhindex = sp->fhindex;
+ ni->ni_erp = sp->erp;
+ ni->ni_timoff = sp->timoff;
+#ifdef IEEE80211_SUPPORT_MESH
+ if (ni->ni_vap->iv_opmode == IEEE80211_M_MBSS)
+ ieee80211_mesh_init_neighbor(ni, wh, sp);
+#endif
+ if (ieee80211_ies_init(&ni->ni_ies, sp->ies, sp->ies_len)) {
+ ieee80211_ies_expand(&ni->ni_ies);
+ if (ni->ni_ies.wme_ie != NULL)
+ ni->ni_flags |= IEEE80211_NODE_QOS;
+ else
+ ni->ni_flags &= ~IEEE80211_NODE_QOS;
+#ifdef IEEE80211_SUPPORT_SUPERG
+ if (ni->ni_ies.ath_ie != NULL)
+ ieee80211_parse_ath(ni, ni->ni_ies.ath_ie);
+#endif
+ }
+
+ /* NB: must be after ni_chan is setup */
+ ieee80211_setup_rates(ni, sp->rates, sp->xrates,
+ IEEE80211_F_DOSORT | IEEE80211_F_DOFRATE |
+ IEEE80211_F_DONEGO | IEEE80211_F_DODEL);
+}
+
+/*
+ * Do node discovery in adhoc mode on receipt of a beacon
+ * or probe response frame. Note that for the driver's
+ * benefit we we treat this like an association so the
+ * driver has an opportunity to setup it's private state.
+ */
+struct ieee80211_node *
+ieee80211_add_neighbor(struct ieee80211vap *vap,
+ const struct ieee80211_frame *wh,
+ const struct ieee80211_scanparams *sp)
+{
+ struct ieee80211_node *ni;
+
+ IEEE80211_DPRINTF(vap, IEEE80211_MSG_NODE,
+ "%s: mac<%s>\n", __func__, ether_sprintf(wh->i_addr2));
+ ni = ieee80211_dup_bss(vap, wh->i_addr2);/* XXX alloc_node? */
+ if (ni != NULL) {
+ struct ieee80211com *ic = vap->iv_ic;
+
+ ieee80211_init_neighbor(ni, wh, sp);
+ if (ieee80211_iserp_rateset(&ni->ni_rates))
+ ni->ni_flags |= IEEE80211_NODE_ERP;
+ ieee80211_node_setuptxparms(ni);
+ ieee80211_ratectl_node_init(ni);
+ if (ic->ic_newassoc != NULL)
+ ic->ic_newassoc(ni, 1);
+ /* XXX not right for 802.1x/WPA */
+ ieee80211_node_authorize(ni);
+ }
+ return ni;
+}
+
+#define IS_PROBEREQ(wh) \
+ ((wh->i_fc[0] & (IEEE80211_FC0_TYPE_MASK|IEEE80211_FC0_SUBTYPE_MASK)) \
+ == (IEEE80211_FC0_TYPE_MGT | IEEE80211_FC0_SUBTYPE_PROBE_REQ))
+#define IS_BCAST_PROBEREQ(wh) \
+ (IS_PROBEREQ(wh) && IEEE80211_IS_MULTICAST( \
+ ((const struct ieee80211_frame *)(wh))->i_addr3))
+
+static __inline struct ieee80211_node *
+_find_rxnode(struct ieee80211_node_table *nt,
+ const struct ieee80211_frame_min *wh)
+{
+ if (IS_BCAST_PROBEREQ(wh))
+ return NULL; /* spam bcast probe req to all vap's */
+ return ieee80211_find_node_locked(nt, wh->i_addr2);
+}
+
+/*
+ * Locate the node for sender, track state, and then pass the
+ * (referenced) node up to the 802.11 layer for its use. Note
+ * we can return NULL if the sender is not in the table.
+ */
+struct ieee80211_node *
+#ifdef IEEE80211_DEBUG_REFCNT
+ieee80211_find_rxnode_debug(struct ieee80211com *ic,
+ const struct ieee80211_frame_min *wh, const char *func, int line)
+#else
+ieee80211_find_rxnode(struct ieee80211com *ic,
+ const struct ieee80211_frame_min *wh)
+#endif
+{
+ struct ieee80211_node_table *nt;
+ struct ieee80211_node *ni;
+
+ nt = &ic->ic_sta;
+ IEEE80211_NODE_LOCK(nt);
+ ni = _find_rxnode(nt, wh);
+ IEEE80211_NODE_UNLOCK(nt);
+
+ return ni;
+}
+
+/*
+ * Like ieee80211_find_rxnode but use the supplied h/w
+ * key index as a hint to locate the node in the key
+ * mapping table. If an entry is present at the key
+ * index we return it; otherwise do a normal lookup and
+ * update the mapping table if the station has a unicast
+ * key assigned to it.
+ */
+struct ieee80211_node *
+#ifdef IEEE80211_DEBUG_REFCNT
+ieee80211_find_rxnode_withkey_debug(struct ieee80211com *ic,
+ const struct ieee80211_frame_min *wh, ieee80211_keyix keyix,
+ const char *func, int line)
+#else
+ieee80211_find_rxnode_withkey(struct ieee80211com *ic,
+ const struct ieee80211_frame_min *wh, ieee80211_keyix keyix)
+#endif
+{
+ struct ieee80211_node_table *nt;
+ struct ieee80211_node *ni;
+
+ nt = &ic->ic_sta;
+ IEEE80211_NODE_LOCK(nt);
+ if (nt->nt_keyixmap != NULL && keyix < nt->nt_keyixmax)
+ ni = nt->nt_keyixmap[keyix];
+ else
+ ni = NULL;
+ if (ni == NULL) {
+ ni = _find_rxnode(nt, wh);
+ if (ni != NULL && nt->nt_keyixmap != NULL) {
+ /*
+ * If the station has a unicast key cache slot
+ * assigned update the key->node mapping table.
+ */
+ keyix = ni->ni_ucastkey.wk_rxkeyix;
+ /* XXX can keyixmap[keyix] != NULL? */
+ if (keyix < nt->nt_keyixmax &&
+ nt->nt_keyixmap[keyix] == NULL) {
+ IEEE80211_DPRINTF(ni->ni_vap,
+ IEEE80211_MSG_NODE,
+ "%s: add key map entry %p<%s> refcnt %d\n",
+ __func__, ni, ether_sprintf(ni->ni_macaddr),
+ ieee80211_node_refcnt(ni)+1);
+ nt->nt_keyixmap[keyix] = ieee80211_ref_node(ni);
+ }
+ }
+ } else {
+ if (IS_BCAST_PROBEREQ(wh))
+ ni = NULL; /* spam bcast probe req to all vap's */
+ else
+ ieee80211_ref_node(ni);
+ }
+ IEEE80211_NODE_UNLOCK(nt);
+
+ return ni;
+}
+#undef IS_BCAST_PROBEREQ
+#undef IS_PROBEREQ
+
+/*
+ * Return a reference to the appropriate node for sending
+ * a data frame. This handles node discovery in adhoc networks.
+ */
+struct ieee80211_node *
+#ifdef IEEE80211_DEBUG_REFCNT
+ieee80211_find_txnode_debug(struct ieee80211vap *vap,
+ const uint8_t macaddr[IEEE80211_ADDR_LEN],
+ const char *func, int line)
+#else
+ieee80211_find_txnode(struct ieee80211vap *vap,
+ const uint8_t macaddr[IEEE80211_ADDR_LEN])
+#endif
+{
+ struct ieee80211_node_table *nt = &vap->iv_ic->ic_sta;
+ struct ieee80211_node *ni;
+
+ /*
+ * The destination address should be in the node table
+ * unless this is a multicast/broadcast frame. We can
+ * also optimize station mode operation, all frames go
+ * to the bss node.
+ */
+ /* XXX can't hold lock across dup_bss 'cuz of recursive locking */
+ IEEE80211_NODE_LOCK(nt);
+ if (vap->iv_opmode == IEEE80211_M_STA ||
+ vap->iv_opmode == IEEE80211_M_WDS ||
+ IEEE80211_IS_MULTICAST(macaddr))
+ ni = ieee80211_ref_node(vap->iv_bss);
+ else
+ ni = ieee80211_find_node_locked(nt, macaddr);
+ IEEE80211_NODE_UNLOCK(nt);
+
+ if (ni == NULL) {
+ if (vap->iv_opmode == IEEE80211_M_IBSS ||
+ vap->iv_opmode == IEEE80211_M_AHDEMO) {
+ /*
+ * In adhoc mode cons up a node for the destination.
+ * Note that we need an additional reference for the
+ * caller to be consistent with
+ * ieee80211_find_node_locked.
+ */
+ ni = ieee80211_fakeup_adhoc_node(vap, macaddr);
+ if (ni != NULL)
+ (void) ieee80211_ref_node(ni);
+ } else {
+ IEEE80211_NOTE_MAC(vap, IEEE80211_MSG_OUTPUT, macaddr,
+ "no node, discard frame (%s)", __func__);
+ vap->iv_stats.is_tx_nonode++;
+ }
+ }
+ return ni;
+}
+
+static void
+_ieee80211_free_node(struct ieee80211_node *ni)
+{
+ struct ieee80211_node_table *nt = ni->ni_table;
+
+ /*
+ * NB: careful about referencing the vap as it may be
+ * gone if the last reference was held by a driver.
+ * We know the com will always be present so it's safe
+ * to use ni_ic below to reclaim resources.
+ */
+#if 0
+ IEEE80211_DPRINTF(vap, IEEE80211_MSG_NODE,
+ "%s %p<%s> in %s table\n", __func__, ni,
+ ether_sprintf(ni->ni_macaddr),
+ nt != NULL ? nt->nt_name : "<gone>");
+#endif
+ if (ni->ni_associd != 0) {
+ struct ieee80211vap *vap = ni->ni_vap;
+ if (vap->iv_aid_bitmap != NULL)
+ IEEE80211_AID_CLR(vap, ni->ni_associd);
+ }
+ if (nt != NULL) {
+ TAILQ_REMOVE(&nt->nt_node, ni, ni_list);
+ LIST_REMOVE(ni, ni_hash);
+ }
+ ni->ni_ic->ic_node_free(ni);
+}
+
+void
+#ifdef IEEE80211_DEBUG_REFCNT
+ieee80211_free_node_debug(struct ieee80211_node *ni, const char *func, int line)
+#else
+ieee80211_free_node(struct ieee80211_node *ni)
+#endif
+{
+ struct ieee80211_node_table *nt = ni->ni_table;
+
+#ifdef IEEE80211_DEBUG_REFCNT
+ IEEE80211_DPRINTF(ni->ni_vap, IEEE80211_MSG_NODE,
+ "%s (%s:%u) %p<%s> refcnt %d\n", __func__, func, line, ni,
+ ether_sprintf(ni->ni_macaddr), ieee80211_node_refcnt(ni)-1);
+#endif
+ if (nt != NULL) {
+ IEEE80211_NODE_LOCK(nt);
+ if (ieee80211_node_dectestref(ni)) {
+ /*
+ * Last reference, reclaim state.
+ */
+ _ieee80211_free_node(ni);
+ } else if (ieee80211_node_refcnt(ni) == 1 &&
+ nt->nt_keyixmap != NULL) {
+ ieee80211_keyix keyix;
+ /*
+ * Check for a last reference in the key mapping table.
+ */
+ keyix = ni->ni_ucastkey.wk_rxkeyix;
+ if (keyix < nt->nt_keyixmax &&
+ nt->nt_keyixmap[keyix] == ni) {
+ IEEE80211_DPRINTF(ni->ni_vap,
+ IEEE80211_MSG_NODE,
+ "%s: %p<%s> clear key map entry", __func__,
+ ni, ether_sprintf(ni->ni_macaddr));
+ nt->nt_keyixmap[keyix] = NULL;
+ ieee80211_node_decref(ni); /* XXX needed? */
+ _ieee80211_free_node(ni);
+ }
+ }
+ IEEE80211_NODE_UNLOCK(nt);
+ } else {
+ if (ieee80211_node_dectestref(ni))
+ _ieee80211_free_node(ni);
+ }
+}
+
+/*
+ * Reclaim a unicast key and clear any key cache state.
+ */
+int
+ieee80211_node_delucastkey(struct ieee80211_node *ni)
+{
+ struct ieee80211com *ic = ni->ni_ic;
+ struct ieee80211_node_table *nt = &ic->ic_sta;
+ struct ieee80211_node *nikey;
+ ieee80211_keyix keyix;
+ int isowned, status;
+
+ /*
+ * NB: We must beware of LOR here; deleting the key
+ * can cause the crypto layer to block traffic updates
+ * which can generate a LOR against the node table lock;
+ * grab it here and stash the key index for our use below.
+ *
+ * Must also beware of recursion on the node table lock.
+ * When called from node_cleanup we may already have
+ * the node table lock held. Unfortunately there's no
+ * way to separate out this path so we must do this
+ * conditionally.
+ */
+ isowned = IEEE80211_NODE_IS_LOCKED(nt);
+ if (!isowned)
+ IEEE80211_NODE_LOCK(nt);
+ nikey = NULL;
+ status = 1; /* NB: success */
+ if (ni->ni_ucastkey.wk_keyix != IEEE80211_KEYIX_NONE) {
+ keyix = ni->ni_ucastkey.wk_rxkeyix;
+ status = ieee80211_crypto_delkey(ni->ni_vap, &ni->ni_ucastkey);
+ if (nt->nt_keyixmap != NULL && keyix < nt->nt_keyixmax) {
+ nikey = nt->nt_keyixmap[keyix];
+ nt->nt_keyixmap[keyix] = NULL;
+ }
+ }
+ if (!isowned)
+ IEEE80211_NODE_UNLOCK(nt);
+
+ if (nikey != NULL) {
+ KASSERT(nikey == ni,
+ ("key map out of sync, ni %p nikey %p", ni, nikey));
+ IEEE80211_DPRINTF(ni->ni_vap, IEEE80211_MSG_NODE,
+ "%s: delete key map entry %p<%s> refcnt %d\n",
+ __func__, ni, ether_sprintf(ni->ni_macaddr),
+ ieee80211_node_refcnt(ni)-1);
+ ieee80211_free_node(ni);
+ }
+ return status;
+}
+
+/*
+ * Reclaim a node. If this is the last reference count then
+ * do the normal free work. Otherwise remove it from the node
+ * table and mark it gone by clearing the back-reference.
+ */
+static void
+node_reclaim(struct ieee80211_node_table *nt, struct ieee80211_node *ni)
+{
+ ieee80211_keyix keyix;
+
+ IEEE80211_NODE_LOCK_ASSERT(nt);
+
+ IEEE80211_DPRINTF(ni->ni_vap, IEEE80211_MSG_NODE,
+ "%s: remove %p<%s> from %s table, refcnt %d\n",
+ __func__, ni, ether_sprintf(ni->ni_macaddr),
+ nt->nt_name, ieee80211_node_refcnt(ni)-1);
+ /*
+ * Clear any entry in the unicast key mapping table.
+ * We need to do it here so rx lookups don't find it
+ * in the mapping table even if it's not in the hash
+ * table. We cannot depend on the mapping table entry
+ * being cleared because the node may not be free'd.
+ */
+ keyix = ni->ni_ucastkey.wk_rxkeyix;
+ if (nt->nt_keyixmap != NULL && keyix < nt->nt_keyixmax &&
+ nt->nt_keyixmap[keyix] == ni) {
+ IEEE80211_DPRINTF(ni->ni_vap, IEEE80211_MSG_NODE,
+ "%s: %p<%s> clear key map entry %u\n",
+ __func__, ni, ether_sprintf(ni->ni_macaddr), keyix);
+ nt->nt_keyixmap[keyix] = NULL;
+ ieee80211_node_decref(ni); /* NB: don't need free */
+ }
+ if (!ieee80211_node_dectestref(ni)) {
+ /*
+ * Other references are present, just remove the
+ * node from the table so it cannot be found. When
+ * the references are dropped storage will be
+ * reclaimed.
+ */
+ TAILQ_REMOVE(&nt->nt_node, ni, ni_list);
+ LIST_REMOVE(ni, ni_hash);
+ ni->ni_table = NULL; /* clear reference */
+ } else
+ _ieee80211_free_node(ni);
+}
+
+/*
+ * Node table support.
+ */
+
+static void
+ieee80211_node_table_init(struct ieee80211com *ic,
+ struct ieee80211_node_table *nt,
+ const char *name, int inact, int keyixmax)
+{
+ struct ifnet *ifp = ic->ic_ifp;
+
+ nt->nt_ic = ic;
+ IEEE80211_NODE_LOCK_INIT(nt, ifp->if_xname);
+ IEEE80211_NODE_ITERATE_LOCK_INIT(nt, ifp->if_xname);
+ TAILQ_INIT(&nt->nt_node);
+ nt->nt_name = name;
+ nt->nt_scangen = 1;
+ nt->nt_inact_init = inact;
+ nt->nt_keyixmax = keyixmax;
+ if (nt->nt_keyixmax > 0) {
+ nt->nt_keyixmap = (struct ieee80211_node **) malloc(
+ keyixmax * sizeof(struct ieee80211_node *),
+ M_80211_NODE, M_NOWAIT | M_ZERO);
+ if (nt->nt_keyixmap == NULL)
+ if_printf(ic->ic_ifp,
+ "Cannot allocate key index map with %u entries\n",
+ keyixmax);
+ } else
+ nt->nt_keyixmap = NULL;
+}
+
+static void
+ieee80211_node_table_reset(struct ieee80211_node_table *nt,
+ struct ieee80211vap *match)
+{
+ struct ieee80211_node *ni, *next;
+
+ IEEE80211_NODE_LOCK(nt);
+ TAILQ_FOREACH_SAFE(ni, &nt->nt_node, ni_list, next) {
+ if (match != NULL && ni->ni_vap != match)
+ continue;
+ /* XXX can this happen? if so need's work */
+ if (ni->ni_associd != 0) {
+ struct ieee80211vap *vap = ni->ni_vap;
+
+ if (vap->iv_auth->ia_node_leave != NULL)
+ vap->iv_auth->ia_node_leave(ni);
+ if (vap->iv_aid_bitmap != NULL)
+ IEEE80211_AID_CLR(vap, ni->ni_associd);
+ }
+ ni->ni_wdsvap = NULL; /* clear reference */
+ node_reclaim(nt, ni);
+ }
+ if (match != NULL && match->iv_opmode == IEEE80211_M_WDS) {
+ /*
+ * Make a separate pass to clear references to this vap
+ * held by DWDS entries. They will not be matched above
+ * because ni_vap will point to the ap vap but we still
+ * need to clear ni_wdsvap when the WDS vap is destroyed
+ * and/or reset.
+ */
+ TAILQ_FOREACH_SAFE(ni, &nt->nt_node, ni_list, next)
+ if (ni->ni_wdsvap == match)
+ ni->ni_wdsvap = NULL;
+ }
+ IEEE80211_NODE_UNLOCK(nt);
+}
+
+static void
+ieee80211_node_table_cleanup(struct ieee80211_node_table *nt)
+{
+ ieee80211_node_table_reset(nt, NULL);
+ if (nt->nt_keyixmap != NULL) {
+#ifdef DIAGNOSTIC
+ /* XXX verify all entries are NULL */
+ int i;
+ for (i = 0; i < nt->nt_keyixmax; i++)
+ if (nt->nt_keyixmap[i] != NULL)
+ printf("%s: %s[%u] still active\n", __func__,
+ nt->nt_name, i);
+#endif
+ free(nt->nt_keyixmap, M_80211_NODE);
+ nt->nt_keyixmap = NULL;
+ }
+ IEEE80211_NODE_ITERATE_LOCK_DESTROY(nt);
+ IEEE80211_NODE_LOCK_DESTROY(nt);
+}
+
+/*
+ * Timeout inactive stations and do related housekeeping.
+ * Note that we cannot hold the node lock while sending a
+ * frame as this would lead to a LOR. Instead we use a
+ * generation number to mark nodes that we've scanned and
+ * drop the lock and restart a scan if we have to time out
+ * a node. Since we are single-threaded by virtue of
+ * controlling the inactivity timer we can be sure this will
+ * process each node only once.
+ */
+static void
+ieee80211_timeout_stations(struct ieee80211com *ic)
+{
+ struct ieee80211_node_table *nt = &ic->ic_sta;
+ struct ieee80211vap *vap;
+ struct ieee80211_node *ni;
+ int gen = 0;
+
+ IEEE80211_NODE_ITERATE_LOCK(nt);
+ gen = ++nt->nt_scangen;
+restart:
+ IEEE80211_NODE_LOCK(nt);
+ TAILQ_FOREACH(ni, &nt->nt_node, ni_list) {
+ if (ni->ni_scangen == gen) /* previously handled */
+ continue;
+ ni->ni_scangen = gen;
+ /*
+ * Ignore entries for which have yet to receive an
+ * authentication frame. These are transient and
+ * will be reclaimed when the last reference to them
+ * goes away (when frame xmits complete).
+ */
+ vap = ni->ni_vap;
+ /*
+ * Only process stations when in RUN state. This
+ * insures, for example, that we don't timeout an
+ * inactive station during CAC. Note that CSA state
+ * is actually handled in ieee80211_node_timeout as
+ * it applies to more than timeout processing.
+ */
+ if (vap->iv_state != IEEE80211_S_RUN)
+ continue;
+ /* XXX can vap be NULL? */
+ if ((vap->iv_opmode == IEEE80211_M_HOSTAP ||
+ vap->iv_opmode == IEEE80211_M_STA) &&
+ (ni->ni_flags & IEEE80211_NODE_AREF) == 0)
+ continue;
+ /*
+ * Free fragment if not needed anymore
+ * (last fragment older than 1s).
+ * XXX doesn't belong here, move to node_age
+ */
+ if (ni->ni_rxfrag[0] != NULL &&
+ ticks > ni->ni_rxfragstamp + hz) {
+ m_freem(ni->ni_rxfrag[0]);
+ ni->ni_rxfrag[0] = NULL;
+ }
+ if (ni->ni_inact > 0) {
+ ni->ni_inact--;
+ IEEE80211_NOTE(vap, IEEE80211_MSG_INACT, ni,
+ "%s: inact %u inact_reload %u nrates %u",
+ __func__, ni->ni_inact, ni->ni_inact_reload,
+ ni->ni_rates.rs_nrates);
+ }
+ /*
+ * Special case ourself; we may be idle for extended periods
+ * of time and regardless reclaiming our state is wrong.
+ * XXX run ic_node_age
+ */
+ if (ni == vap->iv_bss)
+ continue;
+ if (ni->ni_associd != 0 ||
+ (vap->iv_opmode == IEEE80211_M_IBSS ||
+ vap->iv_opmode == IEEE80211_M_AHDEMO)) {
+ /*
+ * Age/drain resources held by the station.
+ */
+ ic->ic_node_age(ni);
+ /*
+ * Probe the station before time it out. We
+ * send a null data frame which may not be
+ * universally supported by drivers (need it
+ * for ps-poll support so it should be...).
+ *
+ * XXX don't probe the station unless we've
+ * received a frame from them (and have
+ * some idea of the rates they are capable
+ * of); this will get fixed more properly
+ * soon with better handling of the rate set.
+ */
+ if ((vap->iv_flags_ext & IEEE80211_FEXT_INACT) &&
+ (0 < ni->ni_inact &&
+ ni->ni_inact <= vap->iv_inact_probe) &&
+ ni->ni_rates.rs_nrates != 0) {
+ IEEE80211_NOTE(vap,
+ IEEE80211_MSG_INACT | IEEE80211_MSG_NODE,
+ ni, "%s",
+ "probe station due to inactivity");
+ /*
+ * Grab a reference before unlocking the table
+ * so the node cannot be reclaimed before we
+ * send the frame. ieee80211_send_nulldata
+ * understands we've done this and reclaims the
+ * ref for us as needed.
+ */
+ ieee80211_ref_node(ni);
+ IEEE80211_NODE_UNLOCK(nt);
+ ieee80211_send_nulldata(ni);
+ /* XXX stat? */
+ goto restart;
+ }
+ }
+ if ((vap->iv_flags_ext & IEEE80211_FEXT_INACT) &&
+ ni->ni_inact <= 0) {
+ IEEE80211_NOTE(vap,
+ IEEE80211_MSG_INACT | IEEE80211_MSG_NODE, ni,
+ "station timed out due to inactivity "
+ "(refcnt %u)", ieee80211_node_refcnt(ni));
+ /*
+ * Send a deauthenticate frame and drop the station.
+ * This is somewhat complicated due to reference counts
+ * and locking. At this point a station will typically
+ * have a reference count of 1. ieee80211_node_leave
+ * will do a "free" of the node which will drop the
+ * reference count. But in the meantime a reference
+ * wil be held by the deauth frame. The actual reclaim
+ * of the node will happen either after the tx is
+ * completed or by ieee80211_node_leave.
+ *
+ * Separately we must drop the node lock before sending
+ * in case the driver takes a lock, as this can result
+ * in a LOR between the node lock and the driver lock.
+ */
+ ieee80211_ref_node(ni);
+ IEEE80211_NODE_UNLOCK(nt);
+ if (ni->ni_associd != 0) {
+ IEEE80211_SEND_MGMT(ni,
+ IEEE80211_FC0_SUBTYPE_DEAUTH,
+ IEEE80211_REASON_AUTH_EXPIRE);
+ }
+ ieee80211_node_leave(ni);
+ ieee80211_free_node(ni);
+ vap->iv_stats.is_node_timeout++;
+ goto restart;
+ }
+ }
+ IEEE80211_NODE_UNLOCK(nt);
+
+ IEEE80211_NODE_ITERATE_UNLOCK(nt);
+}
+
+/*
+ * Aggressively reclaim resources. This should be used
+ * only in a critical situation to reclaim mbuf resources.
+ */
+void
+ieee80211_drain(struct ieee80211com *ic)
+{
+ struct ieee80211_node_table *nt = &ic->ic_sta;
+ struct ieee80211vap *vap;
+ struct ieee80211_node *ni;
+
+ IEEE80211_NODE_LOCK(nt);
+ TAILQ_FOREACH(ni, &nt->nt_node, ni_list) {
+ /*
+ * Ignore entries for which have yet to receive an
+ * authentication frame. These are transient and
+ * will be reclaimed when the last reference to them
+ * goes away (when frame xmits complete).
+ */
+ vap = ni->ni_vap;
+ /*
+ * Only process stations when in RUN state. This
+ * insures, for example, that we don't timeout an
+ * inactive station during CAC. Note that CSA state
+ * is actually handled in ieee80211_node_timeout as
+ * it applies to more than timeout processing.
+ */
+ if (vap->iv_state != IEEE80211_S_RUN)
+ continue;
+ /* XXX can vap be NULL? */
+ if ((vap->iv_opmode == IEEE80211_M_HOSTAP ||
+ vap->iv_opmode == IEEE80211_M_STA) &&
+ (ni->ni_flags & IEEE80211_NODE_AREF) == 0)
+ continue;
+ /*
+ * Free fragments.
+ * XXX doesn't belong here, move to node_drain
+ */
+ if (ni->ni_rxfrag[0] != NULL) {
+ m_freem(ni->ni_rxfrag[0]);
+ ni->ni_rxfrag[0] = NULL;
+ }
+ /*
+ * Drain resources held by the station.
+ */
+ ic->ic_node_drain(ni);
+ }
+ IEEE80211_NODE_UNLOCK(nt);
+}
+
+/*
+ * Per-ieee80211com inactivity timer callback.
+ */
+void
+ieee80211_node_timeout(void *arg)
+{
+ struct ieee80211com *ic = arg;
+
+ /*
+ * Defer timeout processing if a channel switch is pending.
+ * We typically need to be mute so not doing things that
+ * might generate frames is good to handle in one place.
+ * Supressing the station timeout processing may extend the
+ * lifetime of inactive stations (by not decrementing their
+ * idle counters) but this should be ok unless the CSA is
+ * active for an unusually long time.
+ */
+ if ((ic->ic_flags & IEEE80211_F_CSAPENDING) == 0) {
+ ieee80211_scan_timeout(ic);
+ ieee80211_timeout_stations(ic);
+ ieee80211_ageq_age(&ic->ic_stageq, IEEE80211_INACT_WAIT);
+
+ IEEE80211_LOCK(ic);
+ ieee80211_erp_timeout(ic);
+ ieee80211_ht_timeout(ic);
+ IEEE80211_UNLOCK(ic);
+ }
+ callout_reset(&ic->ic_inact, IEEE80211_INACT_WAIT*hz,
+ ieee80211_node_timeout, ic);
+}
+
+void
+ieee80211_iterate_nodes(struct ieee80211_node_table *nt,
+ ieee80211_iter_func *f, void *arg)
+{
+ struct ieee80211_node *ni;
+ u_int gen;
+
+ IEEE80211_NODE_ITERATE_LOCK(nt);
+ gen = ++nt->nt_scangen;
+restart:
+ IEEE80211_NODE_LOCK(nt);
+ TAILQ_FOREACH(ni, &nt->nt_node, ni_list) {
+ if (ni->ni_scangen != gen) {
+ ni->ni_scangen = gen;
+ (void) ieee80211_ref_node(ni);
+ IEEE80211_NODE_UNLOCK(nt);
+ (*f)(arg, ni);
+ ieee80211_free_node(ni);
+ goto restart;
+ }
+ }
+ IEEE80211_NODE_UNLOCK(nt);
+
+ IEEE80211_NODE_ITERATE_UNLOCK(nt);
+}
+
+void
+ieee80211_dump_node(struct ieee80211_node_table *nt, struct ieee80211_node *ni)
+{
+ printf("0x%p: mac %s refcnt %d\n", ni,
+ ether_sprintf(ni->ni_macaddr), ieee80211_node_refcnt(ni));
+ printf("\tscangen %u authmode %u flags 0x%x\n",
+ ni->ni_scangen, ni->ni_authmode, ni->ni_flags);
+ printf("\tassocid 0x%x txpower %u vlan %u\n",
+ ni->ni_associd, ni->ni_txpower, ni->ni_vlan);
+ printf("\ttxseq %u rxseq %u fragno %u rxfragstamp %u\n",
+ ni->ni_txseqs[IEEE80211_NONQOS_TID],
+ ni->ni_rxseqs[IEEE80211_NONQOS_TID] >> IEEE80211_SEQ_SEQ_SHIFT,
+ ni->ni_rxseqs[IEEE80211_NONQOS_TID] & IEEE80211_SEQ_FRAG_MASK,
+ ni->ni_rxfragstamp);
+ printf("\trssi %d noise %d intval %u capinfo 0x%x\n",
+ node_getrssi(ni), ni->ni_noise,
+ ni->ni_intval, ni->ni_capinfo);
+ printf("\tbssid %s essid \"%.*s\" channel %u:0x%x\n",
+ ether_sprintf(ni->ni_bssid),
+ ni->ni_esslen, ni->ni_essid,
+ ni->ni_chan->ic_freq, ni->ni_chan->ic_flags);
+ printf("\tinact %u inact_reload %u txrate %u\n",
+ ni->ni_inact, ni->ni_inact_reload, ni->ni_txrate);
+ printf("\thtcap %x htparam %x htctlchan %u ht2ndchan %u\n",
+ ni->ni_htcap, ni->ni_htparam,
+ ni->ni_htctlchan, ni->ni_ht2ndchan);
+ printf("\thtopmode %x htstbc %x chw %u\n",
+ ni->ni_htopmode, ni->ni_htstbc, ni->ni_chw);
+}
+
+void
+ieee80211_dump_nodes(struct ieee80211_node_table *nt)
+{
+ ieee80211_iterate_nodes(nt,
+ (ieee80211_iter_func *) ieee80211_dump_node, nt);
+}
+
+static void
+ieee80211_notify_erp_locked(struct ieee80211com *ic)
+{
+ struct ieee80211vap *vap;
+
+ IEEE80211_LOCK_ASSERT(ic);
+
+ TAILQ_FOREACH(vap, &ic->ic_vaps, iv_next)
+ if (vap->iv_opmode == IEEE80211_M_HOSTAP)
+ ieee80211_beacon_notify(vap, IEEE80211_BEACON_ERP);
+}
+
+void
+ieee80211_notify_erp(struct ieee80211com *ic)
+{
+ IEEE80211_LOCK(ic);
+ ieee80211_notify_erp_locked(ic);
+ IEEE80211_UNLOCK(ic);
+}
+
+/*
+ * Handle a station joining an 11g network.
+ */
+static void
+ieee80211_node_join_11g(struct ieee80211_node *ni)
+{
+ struct ieee80211com *ic = ni->ni_ic;
+
+ IEEE80211_LOCK_ASSERT(ic);
+
+ /*
+ * Station isn't capable of short slot time. Bump
+ * the count of long slot time stations and disable
+ * use of short slot time. Note that the actual switch
+ * over to long slot time use may not occur until the
+ * next beacon transmission (per sec. 7.3.1.4 of 11g).
+ */
+ if ((ni->ni_capinfo & IEEE80211_CAPINFO_SHORT_SLOTTIME) == 0) {
+ ic->ic_longslotsta++;
+ IEEE80211_NOTE(ni->ni_vap, IEEE80211_MSG_ASSOC, ni,
+ "station needs long slot time, count %d",
+ ic->ic_longslotsta);
+ /* XXX vap's w/ conflicting needs won't work */
+ if (!IEEE80211_IS_CHAN_108G(ic->ic_bsschan)) {
+ /*
+ * Don't force slot time when switched to turbo
+ * mode as non-ERP stations won't be present; this
+ * need only be done when on the normal G channel.
+ */
+ ieee80211_set_shortslottime(ic, 0);
+ }
+ }
+ /*
+ * If the new station is not an ERP station
+ * then bump the counter and enable protection
+ * if configured.
+ */
+ if (!ieee80211_iserp_rateset(&ni->ni_rates)) {
+ ic->ic_nonerpsta++;
+ IEEE80211_NOTE(ni->ni_vap, IEEE80211_MSG_ASSOC, ni,
+ "station is !ERP, %d non-ERP stations associated",
+ ic->ic_nonerpsta);
+ /*
+ * If station does not support short preamble
+ * then we must enable use of Barker preamble.
+ */
+ if ((ni->ni_capinfo & IEEE80211_CAPINFO_SHORT_PREAMBLE) == 0) {
+ IEEE80211_NOTE(ni->ni_vap, IEEE80211_MSG_ASSOC, ni,
+ "%s", "station needs long preamble");
+ ic->ic_flags |= IEEE80211_F_USEBARKER;
+ ic->ic_flags &= ~IEEE80211_F_SHPREAMBLE;
+ }
+ /*
+ * If protection is configured and this is the first
+ * indication we should use protection, enable it.
+ */
+ if (ic->ic_protmode != IEEE80211_PROT_NONE &&
+ ic->ic_nonerpsta == 1 &&
+ (ic->ic_flags_ext & IEEE80211_FEXT_NONERP_PR) == 0) {
+ IEEE80211_DPRINTF(ni->ni_vap, IEEE80211_MSG_ASSOC,
+ "%s: enable use of protection\n", __func__);
+ ic->ic_flags |= IEEE80211_F_USEPROT;
+ ieee80211_notify_erp_locked(ic);
+ }
+ } else
+ ni->ni_flags |= IEEE80211_NODE_ERP;
+}
+
+void
+ieee80211_node_join(struct ieee80211_node *ni, int resp)
+{
+ struct ieee80211com *ic = ni->ni_ic;
+ struct ieee80211vap *vap = ni->ni_vap;
+ int newassoc;
+
+ if (ni->ni_associd == 0) {
+ uint16_t aid;
+
+ KASSERT(vap->iv_aid_bitmap != NULL, ("no aid bitmap"));
+ /*
+ * It would be good to search the bitmap
+ * more efficiently, but this will do for now.
+ */
+ for (aid = 1; aid < vap->iv_max_aid; aid++) {
+ if (!IEEE80211_AID_ISSET(vap, aid))
+ break;
+ }
+ if (aid >= vap->iv_max_aid) {
+ IEEE80211_SEND_MGMT(ni, resp, IEEE80211_STATUS_TOOMANY);
+ ieee80211_node_leave(ni);
+ return;
+ }
+ ni->ni_associd = aid | 0xc000;
+ ni->ni_jointime = time_uptime;
+ IEEE80211_LOCK(ic);
+ IEEE80211_AID_SET(vap, ni->ni_associd);
+ vap->iv_sta_assoc++;
+ ic->ic_sta_assoc++;
+
+ if (IEEE80211_IS_CHAN_HT(ic->ic_bsschan))
+ ieee80211_ht_node_join(ni);
+ if (IEEE80211_IS_CHAN_ANYG(ic->ic_bsschan) &&
+ IEEE80211_IS_CHAN_FULL(ic->ic_bsschan))
+ ieee80211_node_join_11g(ni);
+ IEEE80211_UNLOCK(ic);
+
+ newassoc = 1;
+ } else
+ newassoc = 0;
+
+ IEEE80211_NOTE(vap, IEEE80211_MSG_ASSOC | IEEE80211_MSG_DEBUG, ni,
+ "station associated at aid %d: %s preamble, %s slot time%s%s%s%s%s%s%s%s",
+ IEEE80211_NODE_AID(ni),
+ ic->ic_flags & IEEE80211_F_SHPREAMBLE ? "short" : "long",
+ ic->ic_flags & IEEE80211_F_SHSLOT ? "short" : "long",
+ ic->ic_flags & IEEE80211_F_USEPROT ? ", protection" : "",
+ ni->ni_flags & IEEE80211_NODE_QOS ? ", QoS" : "",
+ ni->ni_flags & IEEE80211_NODE_HT ?
+ (ni->ni_chw == 40 ? ", HT40" : ", HT20") : "",
+ ni->ni_flags & IEEE80211_NODE_AMPDU ? " (+AMPDU)" : "",
+ ni->ni_flags & IEEE80211_NODE_MIMO_RTS ? " (+SMPS-DYN)" :
+ ni->ni_flags & IEEE80211_NODE_MIMO_PS ? " (+SMPS)" : "",
+ ni->ni_flags & IEEE80211_NODE_RIFS ? " (+RIFS)" : "",
+ IEEE80211_ATH_CAP(vap, ni, IEEE80211_NODE_FF) ?
+ ", fast-frames" : "",
+ IEEE80211_ATH_CAP(vap, ni, IEEE80211_NODE_TURBOP) ?
+ ", turbo" : ""
+ );
+
+ ieee80211_node_setuptxparms(ni);
+ ieee80211_ratectl_node_init(ni);
+ /* give driver a chance to setup state like ni_txrate */
+ if (ic->ic_newassoc != NULL)
+ ic->ic_newassoc(ni, newassoc);
+ IEEE80211_SEND_MGMT(ni, resp, IEEE80211_STATUS_SUCCESS);
+ /* tell the authenticator about new station */
+ if (vap->iv_auth->ia_node_join != NULL)
+ vap->iv_auth->ia_node_join(ni);
+ ieee80211_notify_node_join(ni,
+ resp == IEEE80211_FC0_SUBTYPE_ASSOC_RESP);
+}
+
+static void
+disable_protection(struct ieee80211com *ic)
+{
+ KASSERT(ic->ic_nonerpsta == 0 &&
+ (ic->ic_flags_ext & IEEE80211_FEXT_NONERP_PR) == 0,
+ ("%d non ERP stations, flags 0x%x", ic->ic_nonerpsta,
+ ic->ic_flags_ext));
+
+ ic->ic_flags &= ~IEEE80211_F_USEPROT;
+ /* XXX verify mode? */
+ if (ic->ic_caps & IEEE80211_C_SHPREAMBLE) {
+ ic->ic_flags |= IEEE80211_F_SHPREAMBLE;
+ ic->ic_flags &= ~IEEE80211_F_USEBARKER;
+ }
+ ieee80211_notify_erp_locked(ic);
+}
+
+/*
+ * Handle a station leaving an 11g network.
+ */
+static void
+ieee80211_node_leave_11g(struct ieee80211_node *ni)
+{
+ struct ieee80211com *ic = ni->ni_ic;
+
+ IEEE80211_LOCK_ASSERT(ic);
+
+ KASSERT(IEEE80211_IS_CHAN_ANYG(ic->ic_bsschan),
+ ("not in 11g, bss %u:0x%x", ic->ic_bsschan->ic_freq,
+ ic->ic_bsschan->ic_flags));
+
+ /*
+ * If a long slot station do the slot time bookkeeping.
+ */
+ if ((ni->ni_capinfo & IEEE80211_CAPINFO_SHORT_SLOTTIME) == 0) {
+ KASSERT(ic->ic_longslotsta > 0,
+ ("bogus long slot station count %d", ic->ic_longslotsta));
+ ic->ic_longslotsta--;
+ IEEE80211_NOTE(ni->ni_vap, IEEE80211_MSG_ASSOC, ni,
+ "long slot time station leaves, count now %d",
+ ic->ic_longslotsta);
+ if (ic->ic_longslotsta == 0) {
+ /*
+ * Re-enable use of short slot time if supported
+ * and not operating in IBSS mode (per spec).
+ */
+ if ((ic->ic_caps & IEEE80211_C_SHSLOT) &&
+ ic->ic_opmode != IEEE80211_M_IBSS) {
+ IEEE80211_DPRINTF(ni->ni_vap,
+ IEEE80211_MSG_ASSOC,
+ "%s: re-enable use of short slot time\n",
+ __func__);
+ ieee80211_set_shortslottime(ic, 1);
+ }
+ }
+ }
+ /*
+ * If a non-ERP station do the protection-related bookkeeping.
+ */
+ if ((ni->ni_flags & IEEE80211_NODE_ERP) == 0) {
+ KASSERT(ic->ic_nonerpsta > 0,
+ ("bogus non-ERP station count %d", ic->ic_nonerpsta));
+ ic->ic_nonerpsta--;
+ IEEE80211_NOTE(ni->ni_vap, IEEE80211_MSG_ASSOC, ni,
+ "non-ERP station leaves, count now %d%s", ic->ic_nonerpsta,
+ (ic->ic_flags_ext & IEEE80211_FEXT_NONERP_PR) ?
+ " (non-ERP sta present)" : "");
+ if (ic->ic_nonerpsta == 0 &&
+ (ic->ic_flags_ext & IEEE80211_FEXT_NONERP_PR) == 0) {
+ IEEE80211_DPRINTF(ni->ni_vap, IEEE80211_MSG_ASSOC,
+ "%s: disable use of protection\n", __func__);
+ disable_protection(ic);
+ }
+ }
+}
+
+/*
+ * Time out presence of an overlapping bss with non-ERP
+ * stations. When operating in hostap mode we listen for
+ * beacons from other stations and if we identify a non-ERP
+ * station is present we enable protection. To identify
+ * when all non-ERP stations are gone we time out this
+ * condition.
+ */
+static void
+ieee80211_erp_timeout(struct ieee80211com *ic)
+{
+
+ IEEE80211_LOCK_ASSERT(ic);
+
+ if ((ic->ic_flags_ext & IEEE80211_FEXT_NONERP_PR) &&
+ time_after(ticks, ic->ic_lastnonerp + IEEE80211_NONERP_PRESENT_AGE)) {
+#if 0
+ IEEE80211_NOTE(vap, IEEE80211_MSG_ASSOC, ni,
+ "%s", "age out non-ERP sta present on channel");
+#endif
+ ic->ic_flags_ext &= ~IEEE80211_FEXT_NONERP_PR;
+ if (ic->ic_nonerpsta == 0)
+ disable_protection(ic);
+ }
+}
+
+/*
+ * Handle bookkeeping for station deauthentication/disassociation
+ * when operating as an ap.
+ */
+void
+ieee80211_node_leave(struct ieee80211_node *ni)
+{
+ struct ieee80211com *ic = ni->ni_ic;
+ struct ieee80211vap *vap = ni->ni_vap;
+ struct ieee80211_node_table *nt = ni->ni_table;
+
+ IEEE80211_NOTE(vap, IEEE80211_MSG_ASSOC | IEEE80211_MSG_DEBUG, ni,
+ "station with aid %d leaves", IEEE80211_NODE_AID(ni));
+
+ KASSERT(vap->iv_opmode != IEEE80211_M_STA,
+ ("unexpected operating mode %u", vap->iv_opmode));
+ /*
+ * If node wasn't previously associated all
+ * we need to do is reclaim the reference.
+ */
+ /* XXX ibss mode bypasses 11g and notification */
+ if (ni->ni_associd == 0)
+ goto done;
+ /*
+ * Tell the authenticator the station is leaving.
+ * Note that we must do this before yanking the
+ * association id as the authenticator uses the
+ * associd to locate it's state block.
+ */
+ if (vap->iv_auth->ia_node_leave != NULL)
+ vap->iv_auth->ia_node_leave(ni);
+
+ IEEE80211_LOCK(ic);
+ IEEE80211_AID_CLR(vap, ni->ni_associd);
+ ni->ni_associd = 0;
+ vap->iv_sta_assoc--;
+ ic->ic_sta_assoc--;
+
+ if (IEEE80211_IS_CHAN_HT(ic->ic_bsschan))
+ ieee80211_ht_node_leave(ni);
+ if (IEEE80211_IS_CHAN_ANYG(ic->ic_bsschan) &&
+ IEEE80211_IS_CHAN_FULL(ic->ic_bsschan))
+ ieee80211_node_leave_11g(ni);
+ IEEE80211_UNLOCK(ic);
+ /*
+ * Cleanup station state. In particular clear various
+ * state that might otherwise be reused if the node
+ * is reused before the reference count goes to zero
+ * (and memory is reclaimed).
+ */
+ ieee80211_sta_leave(ni);
+done:
+ /*
+ * Remove the node from any table it's recorded in and
+ * drop the caller's reference. Removal from the table
+ * is important to insure the node is not reprocessed
+ * for inactivity.
+ */
+ if (nt != NULL) {
+ IEEE80211_NODE_LOCK(nt);
+ node_reclaim(nt, ni);
+ IEEE80211_NODE_UNLOCK(nt);
+ } else
+ ieee80211_free_node(ni);
+}
+
+struct rssiinfo {
+ struct ieee80211vap *vap;
+ int rssi_samples;
+ uint32_t rssi_total;
+};
+
+static void
+get_hostap_rssi(void *arg, struct ieee80211_node *ni)
+{
+ struct rssiinfo *info = arg;
+ struct ieee80211vap *vap = ni->ni_vap;
+ int8_t rssi;
+
+ if (info->vap != vap)
+ return;
+ /* only associated stations */
+ if (ni->ni_associd == 0)
+ return;
+ rssi = vap->iv_ic->ic_node_getrssi(ni);
+ if (rssi != 0) {
+ info->rssi_samples++;
+ info->rssi_total += rssi;
+ }
+}
+
+static void
+get_adhoc_rssi(void *arg, struct ieee80211_node *ni)
+{
+ struct rssiinfo *info = arg;
+ struct ieee80211vap *vap = ni->ni_vap;
+ int8_t rssi;
+
+ if (info->vap != vap)
+ return;
+ /* only neighbors */
+ /* XXX check bssid */
+ if ((ni->ni_capinfo & IEEE80211_CAPINFO_IBSS) == 0)
+ return;
+ rssi = vap->iv_ic->ic_node_getrssi(ni);
+ if (rssi != 0) {
+ info->rssi_samples++;
+ info->rssi_total += rssi;
+ }
+}
+
+#ifdef IEEE80211_SUPPORT_MESH
+static void
+get_mesh_rssi(void *arg, struct ieee80211_node *ni)
+{
+ struct rssiinfo *info = arg;
+ struct ieee80211vap *vap = ni->ni_vap;
+ int8_t rssi;
+
+ if (info->vap != vap)
+ return;
+ /* only neighbors that peered successfully */
+ if (ni->ni_mlstate != IEEE80211_NODE_MESH_ESTABLISHED)
+ return;
+ rssi = vap->iv_ic->ic_node_getrssi(ni);
+ if (rssi != 0) {
+ info->rssi_samples++;
+ info->rssi_total += rssi;
+ }
+}
+#endif /* IEEE80211_SUPPORT_MESH */
+
+int8_t
+ieee80211_getrssi(struct ieee80211vap *vap)
+{
+#define NZ(x) ((x) == 0 ? 1 : (x))
+ struct ieee80211com *ic = vap->iv_ic;
+ struct rssiinfo info;
+
+ info.rssi_total = 0;
+ info.rssi_samples = 0;
+ info.vap = vap;
+ switch (vap->iv_opmode) {
+ case IEEE80211_M_IBSS: /* average of all ibss neighbors */
+ case IEEE80211_M_AHDEMO: /* average of all neighbors */
+ ieee80211_iterate_nodes(&ic->ic_sta, get_adhoc_rssi, &info);
+ break;
+ case IEEE80211_M_HOSTAP: /* average of all associated stations */
+ ieee80211_iterate_nodes(&ic->ic_sta, get_hostap_rssi, &info);
+ break;
+#ifdef IEEE80211_SUPPORT_MESH
+ case IEEE80211_M_MBSS: /* average of all mesh neighbors */
+ ieee80211_iterate_nodes(&ic->ic_sta, get_mesh_rssi, &info);
+ break;
+#endif
+ case IEEE80211_M_MONITOR: /* XXX */
+ case IEEE80211_M_STA: /* use stats from associated ap */
+ default:
+ if (vap->iv_bss != NULL)
+ info.rssi_total = ic->ic_node_getrssi(vap->iv_bss);
+ info.rssi_samples = 1;
+ break;
+ }
+ return info.rssi_total / NZ(info.rssi_samples);
+#undef NZ
+}
+
+void
+ieee80211_getsignal(struct ieee80211vap *vap, int8_t *rssi, int8_t *noise)
+{
+
+ if (vap->iv_bss == NULL) /* NB: shouldn't happen */
+ return;
+ vap->iv_ic->ic_node_getsignal(vap->iv_bss, rssi, noise);
+ /* for non-station mode return avg'd rssi accounting */
+ if (vap->iv_opmode != IEEE80211_M_STA)
+ *rssi = ieee80211_getrssi(vap);
+}
diff --git a/rtems/freebsd/net80211/ieee80211_node.h b/rtems/freebsd/net80211/ieee80211_node.h
new file mode 100644
index 00000000..893869cb
--- /dev/null
+++ b/rtems/freebsd/net80211/ieee80211_node.h
@@ -0,0 +1,456 @@
+/*-
+ * Copyright (c) 2001 Atsushi Onoe
+ * Copyright (c) 2002-2009 Sam Leffler, Errno Consulting
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+#ifndef _NET80211_IEEE80211_NODE_HH_
+#define _NET80211_IEEE80211_NODE_HH_
+
+#include <rtems/freebsd/net80211/ieee80211_ioctl.h> /* for ieee80211_nodestats */
+#include <rtems/freebsd/net80211/ieee80211_ht.h> /* for aggregation state */
+
+/*
+ * Each ieee80211com instance has a single timer that fires every
+ * IEEE80211_INACT_WAIT seconds to handle "inactivity processing".
+ * This is used to do node inactivity processing when operating
+ * as an AP, adhoc or mesh mode. For inactivity processing each node
+ * has a timeout set in it's ni_inact field that is decremented
+ * on each timeout and the node is reclaimed when the counter goes
+ * to zero. We use different inactivity timeout values depending
+ * on whether the node is associated and authorized (either by
+ * 802.1x or open/shared key authentication) or associated but yet
+ * to be authorized. The latter timeout is shorter to more aggressively
+ * reclaim nodes that leave part way through the 802.1x exchange.
+ */
+#define IEEE80211_INACT_WAIT 15 /* inactivity interval (secs) */
+#define IEEE80211_INACT_INIT (30/IEEE80211_INACT_WAIT) /* initial */
+#define IEEE80211_INACT_AUTH (180/IEEE80211_INACT_WAIT) /* associated but not authorized */
+#define IEEE80211_INACT_RUN (300/IEEE80211_INACT_WAIT) /* authorized */
+#define IEEE80211_INACT_PROBE (30/IEEE80211_INACT_WAIT) /* probe */
+#define IEEE80211_INACT_SCAN (300/IEEE80211_INACT_WAIT) /* scanned */
+
+#define IEEE80211_TRANS_WAIT 2 /* mgt frame tx timer (secs) */
+
+/* threshold for aging overlapping non-ERP bss */
+#define IEEE80211_NONERP_PRESENT_AGE msecs_to_ticks(60*1000)
+
+#define IEEE80211_NODE_HASHSIZE 32 /* NB: hash size must be pow2 */
+/* simple hash is enough for variation of macaddr */
+#define IEEE80211_NODE_HASH(ic, addr) \
+ (((const uint8_t *)(addr))[IEEE80211_ADDR_LEN - 1] % \
+ IEEE80211_NODE_HASHSIZE)
+
+struct ieee80211_node_table;
+struct ieee80211com;
+struct ieee80211vap;
+
+/*
+ * Information element ``blob''. We use this structure
+ * to capture management frame payloads that need to be
+ * retained. Information elements within the payload that
+ * we need to consult have references recorded.
+ */
+struct ieee80211_ies {
+ /* the following are either NULL or point within data */
+ uint8_t *wpa_ie; /* captured WPA ie */
+ uint8_t *rsn_ie; /* captured RSN ie */
+ uint8_t *wme_ie; /* captured WME ie */
+ uint8_t *ath_ie; /* captured Atheros ie */
+ uint8_t *htcap_ie; /* captured HTCAP ie */
+ uint8_t *htinfo_ie; /* captured HTINFO ie */
+ uint8_t *tdma_ie; /* captured TDMA ie */
+ uint8_t *meshid_ie; /* captured MESH ID ie */
+ uint8_t *spare[4];
+ /* NB: these must be the last members of this structure */
+ uint8_t *data; /* frame data > 802.11 header */
+ int len; /* data size in bytes */
+};
+
+/*
+ * 802.11s (Mesh) Peer Link FSM state.
+ */
+enum ieee80211_mesh_mlstate {
+ IEEE80211_NODE_MESH_IDLE = 0,
+ IEEE80211_NODE_MESH_OPENSNT = 1, /* open frame sent */
+ IEEE80211_NODE_MESH_OPENRCV = 2, /* open frame received */
+ IEEE80211_NODE_MESH_CONFIRMRCV = 3, /* confirm frame received */
+ IEEE80211_NODE_MESH_ESTABLISHED = 4, /* link established */
+ IEEE80211_NODE_MESH_HOLDING = 5, /* link closing */
+};
+#define IEEE80211_MESH_MLSTATE_BITS \
+ "\20\1IDLE\2OPENSNT\2OPENRCV\3CONFIRMRCV\4ESTABLISHED\5HOLDING"
+
+/*
+ * Node specific information. Note that drivers are expected
+ * to derive from this structure to add device-specific per-node
+ * state. This is done by overriding the ic_node_* methods in
+ * the ieee80211com structure.
+ */
+struct ieee80211_node {
+ struct ieee80211vap *ni_vap; /* associated vap */
+ struct ieee80211com *ni_ic; /* copy from vap to save deref*/
+ struct ieee80211_node_table *ni_table; /* NB: may be NULL */
+ TAILQ_ENTRY(ieee80211_node) ni_list; /* list of all nodes */
+ LIST_ENTRY(ieee80211_node) ni_hash; /* hash collision list */
+ u_int ni_refcnt; /* count of held references */
+ u_int ni_scangen; /* gen# for timeout scan */
+ u_int ni_flags;
+#define IEEE80211_NODE_AUTH 0x000001 /* authorized for data */
+#define IEEE80211_NODE_QOS 0x000002 /* QoS enabled */
+#define IEEE80211_NODE_ERP 0x000004 /* ERP enabled */
+/* NB: this must have the same value as IEEE80211_FC1_PWR_MGT */
+#define IEEE80211_NODE_PWR_MGT 0x000010 /* power save mode enabled */
+#define IEEE80211_NODE_AREF 0x000020 /* authentication ref held */
+#define IEEE80211_NODE_HT 0x000040 /* HT enabled */
+#define IEEE80211_NODE_HTCOMPAT 0x000080 /* HT setup w/ vendor OUI's */
+#define IEEE80211_NODE_WPS 0x000100 /* WPS association */
+#define IEEE80211_NODE_TSN 0x000200 /* TSN association */
+#define IEEE80211_NODE_AMPDU_RX 0x000400 /* AMPDU rx enabled */
+#define IEEE80211_NODE_AMPDU_TX 0x000800 /* AMPDU tx enabled */
+#define IEEE80211_NODE_MIMO_PS 0x001000 /* MIMO power save enabled */
+#define IEEE80211_NODE_MIMO_RTS 0x002000 /* send RTS in MIMO PS */
+#define IEEE80211_NODE_RIFS 0x004000 /* RIFS enabled */
+#define IEEE80211_NODE_SGI20 0x008000 /* Short GI in HT20 enabled */
+#define IEEE80211_NODE_SGI40 0x010000 /* Short GI in HT40 enabled */
+#define IEEE80211_NODE_ASSOCID 0x020000 /* xmit requires associd */
+#define IEEE80211_NODE_AMSDU_RX 0x040000 /* AMSDU rx enabled */
+#define IEEE80211_NODE_AMSDU_TX 0x080000 /* AMSDU tx enabled */
+ uint16_t ni_associd; /* association ID */
+ uint16_t ni_vlan; /* vlan tag */
+ uint16_t ni_txpower; /* current transmit power */
+ uint8_t ni_authmode; /* authentication algorithm */
+ uint8_t ni_ath_flags; /* Atheros feature flags */
+ /* NB: These must have the same values as IEEE80211_ATHC_* */
+#define IEEE80211_NODE_TURBOP 0x0001 /* Turbo prime enable */
+#define IEEE80211_NODE_COMP 0x0002 /* Compresssion enable */
+#define IEEE80211_NODE_FF 0x0004 /* Fast Frame capable */
+#define IEEE80211_NODE_XR 0x0008 /* Atheros WME enable */
+#define IEEE80211_NODE_AR 0x0010 /* AR capable */
+#define IEEE80211_NODE_BOOST 0x0080 /* Dynamic Turbo boosted */
+ uint16_t ni_ath_defkeyix;/* Atheros def key index */
+ const struct ieee80211_txparam *ni_txparms;
+ uint32_t ni_jointime; /* time of join (secs) */
+ uint32_t *ni_challenge; /* shared-key challenge */
+ struct ieee80211_ies ni_ies; /* captured ie's */
+ /* tx seq per-tid */
+ ieee80211_seq ni_txseqs[IEEE80211_TID_SIZE];
+ /* rx seq previous per-tid*/
+ ieee80211_seq ni_rxseqs[IEEE80211_TID_SIZE];
+ uint32_t ni_rxfragstamp; /* time stamp of last rx frag */
+ struct mbuf *ni_rxfrag[3]; /* rx frag reassembly */
+ struct ieee80211_key ni_ucastkey; /* unicast key */
+
+ /* hardware */
+ uint32_t ni_avgrssi; /* recv ssi state */
+ int8_t ni_noise; /* noise floor */
+
+ /* header */
+ uint8_t ni_macaddr[IEEE80211_ADDR_LEN];
+ uint8_t ni_bssid[IEEE80211_ADDR_LEN];
+
+ /* beacon, probe response */
+ union {
+ uint8_t data[8];
+ u_int64_t tsf;
+ } ni_tstamp; /* from last rcv'd beacon */
+ uint16_t ni_intval; /* beacon interval */
+ uint16_t ni_capinfo; /* capabilities */
+ uint8_t ni_esslen;
+ uint8_t ni_essid[IEEE80211_NWID_LEN];
+ struct ieee80211_rateset ni_rates; /* negotiated rate set */
+ struct ieee80211_channel *ni_chan;
+ uint16_t ni_fhdwell; /* FH only */
+ uint8_t ni_fhindex; /* FH only */
+ uint16_t ni_erp; /* ERP from beacon/probe resp */
+ uint16_t ni_timoff; /* byte offset to TIM ie */
+ uint8_t ni_dtim_period; /* DTIM period */
+ uint8_t ni_dtim_count; /* DTIM count for last bcn */
+
+ /* 11s state */
+ uint8_t ni_meshidlen;
+ uint8_t ni_meshid[IEEE80211_MESHID_LEN];
+ enum ieee80211_mesh_mlstate ni_mlstate; /* peering management state */
+ uint16_t ni_mllid; /* link local ID */
+ uint16_t ni_mlpid; /* link peer ID */
+ struct callout ni_mltimer; /* link mesh timer */
+ uint8_t ni_mlrcnt; /* link mesh retry counter */
+ uint8_t ni_mltval; /* link mesh timer value */
+
+ /* 11n state */
+ uint16_t ni_htcap; /* HT capabilities */
+ uint8_t ni_htparam; /* HT params */
+ uint8_t ni_htctlchan; /* HT control channel */
+ uint8_t ni_ht2ndchan; /* HT 2nd channel */
+ uint8_t ni_htopmode; /* HT operating mode */
+ uint8_t ni_htstbc; /* HT */
+ uint8_t ni_chw; /* negotiated channel width */
+ struct ieee80211_htrateset ni_htrates; /* negotiated ht rate set */
+ struct ieee80211_tx_ampdu ni_tx_ampdu[WME_NUM_AC];
+ struct ieee80211_rx_ampdu ni_rx_ampdu[WME_NUM_TID];
+
+ /* others */
+ short ni_inact; /* inactivity mark count */
+ short ni_inact_reload;/* inactivity reload value */
+ int ni_txrate; /* legacy rate/MCS */
+ struct ieee80211_psq ni_psq; /* power save queue */
+ struct ieee80211_nodestats ni_stats; /* per-node statistics */
+
+ struct ieee80211vap *ni_wdsvap; /* associated WDS vap */
+ void *ni_rctls; /* private ratectl state */
+ uint64_t ni_spare[3];
+};
+MALLOC_DECLARE(M_80211_NODE);
+MALLOC_DECLARE(M_80211_NODE_IE);
+
+#define IEEE80211_NODE_ATH (IEEE80211_NODE_FF | IEEE80211_NODE_TURBOP)
+#define IEEE80211_NODE_AMPDU \
+ (IEEE80211_NODE_AMPDU_RX | IEEE80211_NODE_AMPDU_TX)
+#define IEEE80211_NODE_AMSDU \
+ (IEEE80211_NODE_AMSDU_RX | IEEE80211_NODE_AMSDU_TX)
+#define IEEE80211_NODE_HT_ALL \
+ (IEEE80211_NODE_HT | IEEE80211_NODE_HTCOMPAT | \
+ IEEE80211_NODE_AMPDU | IEEE80211_NODE_AMSDU | \
+ IEEE80211_NODE_MIMO_PS | IEEE80211_NODE_MIMO_RTS | \
+ IEEE80211_NODE_RIFS | IEEE80211_NODE_SGI20 | IEEE80211_NODE_SGI40)
+
+#define IEEE80211_NODE_BITS \
+ "\20\1AUTH\2QOS\3ERP\5PWR_MGT\6AREF\7HT\10HTCOMPAT\11WPS\12TSN" \
+ "\13AMPDU_RX\14AMPDU_TX\15MIMO_PS\16MIMO_RTS\17RIFS\20SGI20\21SGI40" \
+ "\22ASSOCID"
+
+#define IEEE80211_NODE_AID(ni) IEEE80211_AID(ni->ni_associd)
+
+#define IEEE80211_NODE_STAT(ni,stat) (ni->ni_stats.ns_##stat++)
+#define IEEE80211_NODE_STAT_ADD(ni,stat,v) (ni->ni_stats.ns_##stat += v)
+#define IEEE80211_NODE_STAT_SET(ni,stat,v) (ni->ni_stats.ns_##stat = v)
+
+/*
+ * Filtered rssi calculation support. The receive rssi is maintained
+ * as an average over the last 10 frames received using a low pass filter
+ * (all frames for now, possibly need to be more selective). Calculations
+ * are designed such that a good compiler can optimize them. The avg
+ * rssi state should be initialized to IEEE80211_RSSI_DUMMY_MARKER and
+ * each sample incorporated with IEEE80211_RSSI_LPF. Use IEEE80211_RSSI_GET
+ * to extract the current value.
+ *
+ * Note that we assume rssi data are in the range [-127..127] and we
+ * discard values <-20. This is consistent with assumptions throughout
+ * net80211 that signal strength data are in .5 dBm units relative to
+ * the current noise floor (linear, not log).
+ */
+#define IEEE80211_RSSI_LPF_LEN 10
+#define IEEE80211_RSSI_DUMMY_MARKER 127
+/* NB: pow2 to optimize out * and / */
+#define IEEE80211_RSSI_EP_MULTIPLIER (1<<7)
+#define IEEE80211_RSSI_IN(x) ((x) * IEEE80211_RSSI_EP_MULTIPLIER)
+#define _IEEE80211_RSSI_LPF(x, y, len) \
+ (((x) != IEEE80211_RSSI_DUMMY_MARKER) ? (((x) * ((len) - 1) + (y)) / (len)) : (y))
+#define IEEE80211_RSSI_LPF(x, y) do { \
+ if ((y) >= -20) { \
+ x = _IEEE80211_RSSI_LPF((x), IEEE80211_RSSI_IN((y)), \
+ IEEE80211_RSSI_LPF_LEN); \
+ } \
+} while (0)
+#define IEEE80211_RSSI_EP_RND(x, mul) \
+ ((((x) % (mul)) >= ((mul)/2)) ? ((x) + ((mul) - 1)) / (mul) : (x)/(mul))
+#define IEEE80211_RSSI_GET(x) \
+ IEEE80211_RSSI_EP_RND(x, IEEE80211_RSSI_EP_MULTIPLIER)
+
+static __inline struct ieee80211_node *
+ieee80211_ref_node(struct ieee80211_node *ni)
+{
+ ieee80211_node_incref(ni);
+ return ni;
+}
+
+static __inline void
+ieee80211_unref_node(struct ieee80211_node **ni)
+{
+ ieee80211_node_decref(*ni);
+ *ni = NULL; /* guard against use */
+}
+
+struct ieee80211com;
+
+void ieee80211_node_attach(struct ieee80211com *);
+void ieee80211_node_lateattach(struct ieee80211com *);
+void ieee80211_node_detach(struct ieee80211com *);
+void ieee80211_node_vattach(struct ieee80211vap *);
+void ieee80211_node_latevattach(struct ieee80211vap *);
+void ieee80211_node_vdetach(struct ieee80211vap *);
+
+static __inline int
+ieee80211_node_is_authorized(const struct ieee80211_node *ni)
+{
+ return (ni->ni_flags & IEEE80211_NODE_AUTH);
+}
+
+void ieee80211_node_authorize(struct ieee80211_node *);
+void ieee80211_node_unauthorize(struct ieee80211_node *);
+
+void ieee80211_node_setuptxparms(struct ieee80211_node *);
+void ieee80211_node_set_chan(struct ieee80211_node *,
+ struct ieee80211_channel *);
+void ieee80211_create_ibss(struct ieee80211vap*, struct ieee80211_channel *);
+void ieee80211_reset_bss(struct ieee80211vap *);
+void ieee80211_sync_curchan(struct ieee80211com *);
+void ieee80211_setupcurchan(struct ieee80211com *,
+ struct ieee80211_channel *);
+void ieee80211_setcurchan(struct ieee80211com *, struct ieee80211_channel *);
+int ieee80211_ibss_merge(struct ieee80211_node *);
+struct ieee80211_scan_entry;
+int ieee80211_sta_join(struct ieee80211vap *, struct ieee80211_channel *,
+ const struct ieee80211_scan_entry *);
+void ieee80211_sta_leave(struct ieee80211_node *);
+void ieee80211_node_deauth(struct ieee80211_node *, int);
+
+int ieee80211_ies_init(struct ieee80211_ies *, const uint8_t *, int);
+void ieee80211_ies_cleanup(struct ieee80211_ies *);
+void ieee80211_ies_expand(struct ieee80211_ies *);
+#define ieee80211_ies_setie(_ies, _ie, _off) do { \
+ (_ies)._ie = (_ies).data + (_off); \
+} while (0)
+
+/*
+ * Table of ieee80211_node instances. Each ieee80211com
+ * has one that holds association stations (when operating
+ * as an ap) or neighbors (in ibss mode).
+ *
+ * XXX embed this in ieee80211com instead of indirect?
+ */
+struct ieee80211_node_table {
+ struct ieee80211com *nt_ic; /* back reference */
+ ieee80211_node_lock_t nt_nodelock; /* on node table */
+ TAILQ_HEAD(, ieee80211_node) nt_node; /* information of all nodes */
+ LIST_HEAD(, ieee80211_node) nt_hash[IEEE80211_NODE_HASHSIZE];
+ struct ieee80211_node **nt_keyixmap; /* key ix -> node map */
+ int nt_keyixmax; /* keyixmap size */
+ const char *nt_name; /* table name for debug msgs */
+ ieee80211_scan_lock_t nt_scanlock; /* on nt_scangen */
+ u_int nt_scangen; /* gen# for iterators */
+ int nt_inact_init; /* initial node inact setting */
+};
+
+struct ieee80211_node *ieee80211_alloc_node(struct ieee80211_node_table *,
+ struct ieee80211vap *,
+ const uint8_t macaddr[IEEE80211_ADDR_LEN]);
+struct ieee80211_node *ieee80211_tmp_node(struct ieee80211vap *,
+ const uint8_t macaddr[IEEE80211_ADDR_LEN]);
+struct ieee80211_node *ieee80211_dup_bss(struct ieee80211vap *,
+ const uint8_t macaddr[IEEE80211_ADDR_LEN]);
+struct ieee80211_node *ieee80211_node_create_wds(struct ieee80211vap *,
+ const uint8_t bssid[IEEE80211_ADDR_LEN],
+ struct ieee80211_channel *);
+#ifdef IEEE80211_DEBUG_REFCNT
+void ieee80211_free_node_debug(struct ieee80211_node *,
+ const char *func, int line);
+struct ieee80211_node *ieee80211_find_node_locked_debug(
+ struct ieee80211_node_table *,
+ const uint8_t macaddr[IEEE80211_ADDR_LEN],
+ const char *func, int line);
+struct ieee80211_node *ieee80211_find_node_debug(struct ieee80211_node_table *,
+ const uint8_t macaddr[IEEE80211_ADDR_LEN],
+ const char *func, int line);
+struct ieee80211_node *ieee80211_find_vap_node_locked_debug(
+ struct ieee80211_node_table *,
+ const struct ieee80211vap *vap,
+ const uint8_t macaddr[IEEE80211_ADDR_LEN],
+ const char *func, int line);
+struct ieee80211_node *ieee80211_find_vap_node_debug(
+ struct ieee80211_node_table *,
+ const struct ieee80211vap *vap,
+ const uint8_t macaddr[IEEE80211_ADDR_LEN],
+ const char *func, int line);
+struct ieee80211_node * ieee80211_find_rxnode_debug(struct ieee80211com *,
+ const struct ieee80211_frame_min *,
+ const char *func, int line);
+struct ieee80211_node * ieee80211_find_rxnode_withkey_debug(
+ struct ieee80211com *,
+ const struct ieee80211_frame_min *, uint16_t keyix,
+ const char *func, int line);
+struct ieee80211_node *ieee80211_find_txnode_debug(struct ieee80211vap *,
+ const uint8_t *,
+ const char *func, int line);
+#define ieee80211_free_node(ni) \
+ ieee80211_free_node_debug(ni, __func__, __LINE__)
+#define ieee80211_find_node_locked(nt, mac) \
+ ieee80211_find_node_locked_debug(nt, mac, __func__, __LINE__)
+#define ieee80211_find_node(nt, mac) \
+ ieee80211_find_node_debug(nt, mac, __func__, __LINE__)
+#define ieee80211_find_vap_node_locked(nt, vap, mac) \
+ ieee80211_find_vap_node_locked_debug(nt, vap, mac, __func__, __LINE__)
+#define ieee80211_find_vap_node(nt, vap, mac) \
+ ieee80211_find_vap_node_debug(nt, vap, mac, __func__, __LINE__)
+#define ieee80211_find_rxnode(ic, wh) \
+ ieee80211_find_rxnode_debug(ic, wh, __func__, __LINE__)
+#define ieee80211_find_rxnode_withkey(ic, wh, keyix) \
+ ieee80211_find_rxnode_withkey_debug(ic, wh, keyix, __func__, __LINE__)
+#define ieee80211_find_txnode(vap, mac) \
+ ieee80211_find_txnode_debug(vap, mac, __func__, __LINE__)
+#else
+void ieee80211_free_node(struct ieee80211_node *);
+struct ieee80211_node *ieee80211_find_node_locked(struct ieee80211_node_table *,
+ const uint8_t macaddr[IEEE80211_ADDR_LEN]);
+struct ieee80211_node *ieee80211_find_node(struct ieee80211_node_table *,
+ const uint8_t macaddr[IEEE80211_ADDR_LEN]);
+struct ieee80211_node *ieee80211_find_vap_node_locked(
+ struct ieee80211_node_table *, const struct ieee80211vap *,
+ const uint8_t macaddr[IEEE80211_ADDR_LEN]);
+struct ieee80211_node *ieee80211_find_vap_node(
+ struct ieee80211_node_table *, const struct ieee80211vap *,
+ const uint8_t macaddr[IEEE80211_ADDR_LEN]);
+struct ieee80211_node * ieee80211_find_rxnode(struct ieee80211com *,
+ const struct ieee80211_frame_min *);
+struct ieee80211_node * ieee80211_find_rxnode_withkey(struct ieee80211com *,
+ const struct ieee80211_frame_min *, uint16_t keyix);
+struct ieee80211_node *ieee80211_find_txnode(struct ieee80211vap *,
+ const uint8_t macaddr[IEEE80211_ADDR_LEN]);
+#endif
+int ieee80211_node_delucastkey(struct ieee80211_node *);
+void ieee80211_node_timeout(void *arg);
+
+typedef void ieee80211_iter_func(void *, struct ieee80211_node *);
+void ieee80211_iterate_nodes(struct ieee80211_node_table *,
+ ieee80211_iter_func *, void *);
+
+void ieee80211_notify_erp(struct ieee80211com *);
+void ieee80211_dump_node(struct ieee80211_node_table *,
+ struct ieee80211_node *);
+void ieee80211_dump_nodes(struct ieee80211_node_table *);
+
+struct ieee80211_node *ieee80211_fakeup_adhoc_node(struct ieee80211vap *,
+ const uint8_t macaddr[IEEE80211_ADDR_LEN]);
+struct ieee80211_scanparams;
+void ieee80211_init_neighbor(struct ieee80211_node *,
+ const struct ieee80211_frame *,
+ const struct ieee80211_scanparams *);
+struct ieee80211_node *ieee80211_add_neighbor(struct ieee80211vap *,
+ const struct ieee80211_frame *,
+ const struct ieee80211_scanparams *);
+void ieee80211_node_join(struct ieee80211_node *,int);
+void ieee80211_node_leave(struct ieee80211_node *);
+int8_t ieee80211_getrssi(struct ieee80211vap *);
+void ieee80211_getsignal(struct ieee80211vap *, int8_t *, int8_t *);
+#endif /* _NET80211_IEEE80211_NODE_HH_ */
diff --git a/rtems/freebsd/net80211/ieee80211_output.c b/rtems/freebsd/net80211/ieee80211_output.c
new file mode 100644
index 00000000..ee4f5b10
--- /dev/null
+++ b/rtems/freebsd/net80211/ieee80211_output.c
@@ -0,0 +1,3043 @@
+#include <rtems/freebsd/machine/rtems-bsd-config.h>
+
+/*-
+ * Copyright (c) 2001 Atsushi Onoe
+ * Copyright (c) 2002-2009 Sam Leffler, Errno Consulting
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <rtems/freebsd/sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <rtems/freebsd/local/opt_inet.h>
+#include <rtems/freebsd/local/opt_inet6.h>
+#include <rtems/freebsd/local/opt_wlan.h>
+
+#include <rtems/freebsd/sys/param.h>
+#include <rtems/freebsd/sys/systm.h>
+#include <rtems/freebsd/sys/mbuf.h>
+#include <rtems/freebsd/sys/kernel.h>
+#include <rtems/freebsd/sys/endian.h>
+
+#include <rtems/freebsd/sys/socket.h>
+
+#include <rtems/freebsd/net/bpf.h>
+#include <rtems/freebsd/net/ethernet.h>
+#include <rtems/freebsd/net/if.h>
+#include <rtems/freebsd/net/if_llc.h>
+#include <rtems/freebsd/net/if_media.h>
+#include <rtems/freebsd/net/if_vlan_var.h>
+
+#include <rtems/freebsd/net80211/ieee80211_var.h>
+#include <rtems/freebsd/net80211/ieee80211_regdomain.h>
+#ifdef IEEE80211_SUPPORT_SUPERG
+#include <rtems/freebsd/net80211/ieee80211_superg.h>
+#endif
+#ifdef IEEE80211_SUPPORT_TDMA
+#include <rtems/freebsd/net80211/ieee80211_tdma.h>
+#endif
+#include <rtems/freebsd/net80211/ieee80211_wds.h>
+#include <rtems/freebsd/net80211/ieee80211_mesh.h>
+
+#ifdef INET
+#include <rtems/freebsd/netinet/in.h>
+#include <rtems/freebsd/netinet/if_ether.h>
+#include <rtems/freebsd/netinet/in_systm.h>
+#include <rtems/freebsd/netinet/ip.h>
+#endif
+#ifdef INET6
+#include <rtems/freebsd/netinet/ip6.h>
+#endif
+
+#include <rtems/freebsd/security/mac/mac_framework.h>
+
+#define ETHER_HEADER_COPY(dst, src) \
+ memcpy(dst, src, sizeof(struct ether_header))
+
+/* unalligned little endian access */
+#define LE_WRITE_2(p, v) do { \
+ ((uint8_t *)(p))[0] = (v) & 0xff; \
+ ((uint8_t *)(p))[1] = ((v) >> 8) & 0xff; \
+} while (0)
+#define LE_WRITE_4(p, v) do { \
+ ((uint8_t *)(p))[0] = (v) & 0xff; \
+ ((uint8_t *)(p))[1] = ((v) >> 8) & 0xff; \
+ ((uint8_t *)(p))[2] = ((v) >> 16) & 0xff; \
+ ((uint8_t *)(p))[3] = ((v) >> 24) & 0xff; \
+} while (0)
+
+static int ieee80211_fragment(struct ieee80211vap *, struct mbuf *,
+ u_int hdrsize, u_int ciphdrsize, u_int mtu);
+static void ieee80211_tx_mgt_cb(struct ieee80211_node *, void *, int);
+
+#ifdef IEEE80211_DEBUG
+/*
+ * Decide if an outbound management frame should be
+ * printed when debugging is enabled. This filters some
+ * of the less interesting frames that come frequently
+ * (e.g. beacons).
+ */
+static __inline int
+doprint(struct ieee80211vap *vap, int subtype)
+{
+ switch (subtype) {
+ case IEEE80211_FC0_SUBTYPE_PROBE_RESP:
+ return (vap->iv_opmode == IEEE80211_M_IBSS);
+ }
+ return 1;
+}
+#endif
+
+/*
+ * Start method for vap's. All packets from the stack come
+ * through here. We handle common processing of the packets
+ * before dispatching them to the underlying device.
+ */
+void
+ieee80211_start(struct ifnet *ifp)
+{
+#define IS_DWDS(vap) \
+ (vap->iv_opmode == IEEE80211_M_WDS && \
+ (vap->iv_flags_ext & IEEE80211_FEXT_WDSLEGACY) == 0)
+ struct ieee80211vap *vap = ifp->if_softc;
+ struct ieee80211com *ic = vap->iv_ic;
+ struct ifnet *parent = ic->ic_ifp;
+ struct ieee80211_node *ni;
+ struct mbuf *m;
+ struct ether_header *eh;
+ int error;
+
+ /* NB: parent must be up and running */
+ if (!IFNET_IS_UP_RUNNING(parent)) {
+ IEEE80211_DPRINTF(vap, IEEE80211_MSG_OUTPUT,
+ "%s: ignore queue, parent %s not up+running\n",
+ __func__, parent->if_xname);
+ /* XXX stat */
+ return;
+ }
+ if (vap->iv_state == IEEE80211_S_SLEEP) {
+ /*
+ * In power save, wakeup device for transmit.
+ */
+ ieee80211_new_state(vap, IEEE80211_S_RUN, 0);
+ return;
+ }
+ /*
+ * No data frames go out unless we're running.
+ * Note in particular this covers CAC and CSA
+ * states (though maybe we should check muting
+ * for CSA).
+ */
+ if (vap->iv_state != IEEE80211_S_RUN) {
+ IEEE80211_LOCK(ic);
+ /* re-check under the com lock to avoid races */
+ if (vap->iv_state != IEEE80211_S_RUN) {
+ IEEE80211_DPRINTF(vap, IEEE80211_MSG_OUTPUT,
+ "%s: ignore queue, in %s state\n",
+ __func__, ieee80211_state_name[vap->iv_state]);
+ vap->iv_stats.is_tx_badstate++;
+ ifp->if_drv_flags |= IFF_DRV_OACTIVE;
+ IEEE80211_UNLOCK(ic);
+ return;
+ }
+ IEEE80211_UNLOCK(ic);
+ }
+ for (;;) {
+ IFQ_DEQUEUE(&ifp->if_snd, m);
+ if (m == NULL)
+ break;
+ /*
+ * Sanitize mbuf flags for net80211 use. We cannot
+ * clear M_PWR_SAV or M_MORE_DATA because these may
+ * be set for frames that are re-submitted from the
+ * power save queue.
+ *
+ * NB: This must be done before ieee80211_classify as
+ * it marks EAPOL in frames with M_EAPOL.
+ */
+ m->m_flags &= ~(M_80211_TX - M_PWR_SAV - M_MORE_DATA);
+ /*
+ * Cancel any background scan.
+ */
+ if (ic->ic_flags & IEEE80211_F_SCAN)
+ ieee80211_cancel_anyscan(vap);
+ /*
+ * Find the node for the destination so we can do
+ * things like power save and fast frames aggregation.
+ *
+ * NB: past this point various code assumes the first
+ * mbuf has the 802.3 header present (and contiguous).
+ */
+ ni = NULL;
+ if (m->m_len < sizeof(struct ether_header) &&
+ (m = m_pullup(m, sizeof(struct ether_header))) == NULL) {
+ IEEE80211_DPRINTF(vap, IEEE80211_MSG_OUTPUT,
+ "discard frame, %s\n", "m_pullup failed");
+ vap->iv_stats.is_tx_nobuf++; /* XXX */
+ ifp->if_oerrors++;
+ continue;
+ }
+ eh = mtod(m, struct ether_header *);
+ if (ETHER_IS_MULTICAST(eh->ether_dhost)) {
+ if (IS_DWDS(vap)) {
+ /*
+ * Only unicast frames from the above go out
+ * DWDS vaps; multicast frames are handled by
+ * dispatching the frame as it comes through
+ * the AP vap (see below).
+ */
+ IEEE80211_DISCARD_MAC(vap, IEEE80211_MSG_WDS,
+ eh->ether_dhost, "mcast", "%s", "on DWDS");
+ vap->iv_stats.is_dwds_mcast++;
+ m_freem(m);
+ continue;
+ }
+ if (vap->iv_opmode == IEEE80211_M_HOSTAP) {
+ /*
+ * Spam DWDS vap's w/ multicast traffic.
+ */
+ /* XXX only if dwds in use? */
+ ieee80211_dwds_mcast(vap, m);
+ }
+ }
+#ifdef IEEE80211_SUPPORT_MESH
+ if (vap->iv_opmode != IEEE80211_M_MBSS) {
+#endif
+ ni = ieee80211_find_txnode(vap, eh->ether_dhost);
+ if (ni == NULL) {
+ /* NB: ieee80211_find_txnode does stat+msg */
+ ifp->if_oerrors++;
+ m_freem(m);
+ continue;
+ }
+ if (ni->ni_associd == 0 &&
+ (ni->ni_flags & IEEE80211_NODE_ASSOCID)) {
+ IEEE80211_DISCARD_MAC(vap, IEEE80211_MSG_OUTPUT,
+ eh->ether_dhost, NULL,
+ "sta not associated (type 0x%04x)",
+ htons(eh->ether_type));
+ vap->iv_stats.is_tx_notassoc++;
+ ifp->if_oerrors++;
+ m_freem(m);
+ ieee80211_free_node(ni);
+ continue;
+ }
+#ifdef IEEE80211_SUPPORT_MESH
+ } else {
+ if (!IEEE80211_ADDR_EQ(eh->ether_shost, vap->iv_myaddr)) {
+ /*
+ * Proxy station only if configured.
+ */
+ if (!ieee80211_mesh_isproxyena(vap)) {
+ IEEE80211_DISCARD_MAC(vap,
+ IEEE80211_MSG_OUTPUT |
+ IEEE80211_MSG_MESH,
+ eh->ether_dhost, NULL,
+ "%s", "proxy not enabled");
+ vap->iv_stats.is_mesh_notproxy++;
+ ifp->if_oerrors++;
+ m_freem(m);
+ continue;
+ }
+ ieee80211_mesh_proxy_check(vap, eh->ether_shost);
+ }
+ ni = ieee80211_mesh_discover(vap, eh->ether_dhost, m);
+ if (ni == NULL) {
+ /*
+ * NB: ieee80211_mesh_discover holds/disposes
+ * frame (e.g. queueing on path discovery).
+ */
+ ifp->if_oerrors++;
+ continue;
+ }
+ }
+#endif
+ if ((ni->ni_flags & IEEE80211_NODE_PWR_MGT) &&
+ (m->m_flags & M_PWR_SAV) == 0) {
+ /*
+ * Station in power save mode; pass the frame
+ * to the 802.11 layer and continue. We'll get
+ * the frame back when the time is right.
+ * XXX lose WDS vap linkage?
+ */
+ (void) ieee80211_pwrsave(ni, m);
+ ieee80211_free_node(ni);
+ continue;
+ }
+ /* calculate priority so drivers can find the tx queue */
+ if (ieee80211_classify(ni, m)) {
+ IEEE80211_DISCARD_MAC(vap, IEEE80211_MSG_OUTPUT,
+ eh->ether_dhost, NULL,
+ "%s", "classification failure");
+ vap->iv_stats.is_tx_classify++;
+ ifp->if_oerrors++;
+ m_freem(m);
+ ieee80211_free_node(ni);
+ continue;
+ }
+ /*
+ * Stash the node pointer. Note that we do this after
+ * any call to ieee80211_dwds_mcast because that code
+ * uses any existing value for rcvif to identify the
+ * interface it (might have been) received on.
+ */
+ m->m_pkthdr.rcvif = (void *)ni;
+
+ BPF_MTAP(ifp, m); /* 802.3 tx */
+
+ /*
+ * Check if A-MPDU tx aggregation is setup or if we
+ * should try to enable it. The sta must be associated
+ * with HT and A-MPDU enabled for use. When the policy
+ * routine decides we should enable A-MPDU we issue an
+ * ADDBA request and wait for a reply. The frame being
+ * encapsulated will go out w/o using A-MPDU, or possibly
+ * it might be collected by the driver and held/retransmit.
+ * The default ic_ampdu_enable routine handles staggering
+ * ADDBA requests in case the receiver NAK's us or we are
+ * otherwise unable to establish a BA stream.
+ */
+ if ((ni->ni_flags & IEEE80211_NODE_AMPDU_TX) &&
+ (vap->iv_flags_ht & IEEE80211_FHT_AMPDU_TX) &&
+ (m->m_flags & M_EAPOL) == 0) {
+ const int ac = M_WME_GETAC(m);
+ struct ieee80211_tx_ampdu *tap = &ni->ni_tx_ampdu[ac];
+
+ ieee80211_txampdu_count_packet(tap);
+ if (IEEE80211_AMPDU_RUNNING(tap)) {
+ /*
+ * Operational, mark frame for aggregation.
+ *
+ * XXX do tx aggregation here
+ */
+ m->m_flags |= M_AMPDU_MPDU;
+ } else if (!IEEE80211_AMPDU_REQUESTED(tap) &&
+ ic->ic_ampdu_enable(ni, tap)) {
+ /*
+ * Not negotiated yet, request service.
+ */
+ ieee80211_ampdu_request(ni, tap);
+ /* XXX hold frame for reply? */
+ }
+ }
+#ifdef IEEE80211_SUPPORT_SUPERG
+ else if (IEEE80211_ATH_CAP(vap, ni, IEEE80211_NODE_FF)) {
+ m = ieee80211_ff_check(ni, m);
+ if (m == NULL) {
+ /* NB: any ni ref held on stageq */
+ continue;
+ }
+ }
+#endif /* IEEE80211_SUPPORT_SUPERG */
+ if (__predict_true((vap->iv_caps & IEEE80211_C_8023ENCAP) == 0)) {
+ /*
+ * Encapsulate the packet in prep for transmission.
+ */
+ m = ieee80211_encap(vap, ni, m);
+ if (m == NULL) {
+ /* NB: stat+msg handled in ieee80211_encap */
+ ieee80211_free_node(ni);
+ continue;
+ }
+ }
+
+ error = parent->if_transmit(parent, m);
+ if (error != 0) {
+ /* NB: IFQ_HANDOFF reclaims mbuf */
+ ieee80211_free_node(ni);
+ } else {
+ ifp->if_opackets++;
+ }
+ ic->ic_lastdata = ticks;
+ }
+#undef IS_DWDS
+}
+
+/*
+ * 802.11 output routine. This is (currently) used only to
+ * connect bpf write calls to the 802.11 layer for injecting
+ * raw 802.11 frames.
+ */
+int
+ieee80211_output(struct ifnet *ifp, struct mbuf *m,
+ struct sockaddr *dst, struct route *ro)
+{
+#define senderr(e) do { error = (e); goto bad;} while (0)
+ struct ieee80211_node *ni = NULL;
+ struct ieee80211vap *vap;
+ struct ieee80211_frame *wh;
+ int error;
+
+ if (ifp->if_drv_flags & IFF_DRV_OACTIVE) {
+ /*
+ * Short-circuit requests if the vap is marked OACTIVE
+ * as this can happen because a packet came down through
+ * ieee80211_start before the vap entered RUN state in
+ * which case it's ok to just drop the frame. This
+ * should not be necessary but callers of if_output don't
+ * check OACTIVE.
+ */
+ senderr(ENETDOWN);
+ }
+ vap = ifp->if_softc;
+ /*
+ * Hand to the 802.3 code if not tagged as
+ * a raw 802.11 frame.
+ */
+ if (dst->sa_family != AF_IEEE80211)
+ return vap->iv_output(ifp, m, dst, ro);
+#ifdef MAC
+ error = mac_ifnet_check_transmit(ifp, m);
+ if (error)
+ senderr(error);
+#endif
+ if (ifp->if_flags & IFF_MONITOR)
+ senderr(ENETDOWN);
+ if (!IFNET_IS_UP_RUNNING(ifp))
+ senderr(ENETDOWN);
+ if (vap->iv_state == IEEE80211_S_CAC) {
+ IEEE80211_DPRINTF(vap,
+ IEEE80211_MSG_OUTPUT | IEEE80211_MSG_DOTH,
+ "block %s frame in CAC state\n", "raw data");
+ vap->iv_stats.is_tx_badstate++;
+ senderr(EIO); /* XXX */
+ }
+ /* XXX bypass bridge, pfil, carp, etc. */
+
+ if (m->m_pkthdr.len < sizeof(struct ieee80211_frame_ack))
+ senderr(EIO); /* XXX */
+ wh = mtod(m, struct ieee80211_frame *);
+ if ((wh->i_fc[0] & IEEE80211_FC0_VERSION_MASK) !=
+ IEEE80211_FC0_VERSION_0)
+ senderr(EIO); /* XXX */
+
+ /* locate destination node */
+ switch (wh->i_fc[1] & IEEE80211_FC1_DIR_MASK) {
+ case IEEE80211_FC1_DIR_NODS:
+ case IEEE80211_FC1_DIR_FROMDS:
+ ni = ieee80211_find_txnode(vap, wh->i_addr1);
+ break;
+ case IEEE80211_FC1_DIR_TODS:
+ case IEEE80211_FC1_DIR_DSTODS:
+ if (m->m_pkthdr.len < sizeof(struct ieee80211_frame))
+ senderr(EIO); /* XXX */
+ ni = ieee80211_find_txnode(vap, wh->i_addr3);
+ break;
+ default:
+ senderr(EIO); /* XXX */
+ }
+ if (ni == NULL) {
+ /*
+ * Permit packets w/ bpf params through regardless
+ * (see below about sa_len).
+ */
+ if (dst->sa_len == 0)
+ senderr(EHOSTUNREACH);
+ ni = ieee80211_ref_node(vap->iv_bss);
+ }
+
+ /*
+ * Sanitize mbuf for net80211 flags leaked from above.
+ *
+ * NB: This must be done before ieee80211_classify as
+ * it marks EAPOL in frames with M_EAPOL.
+ */
+ m->m_flags &= ~M_80211_TX;
+
+ /* calculate priority so drivers can find the tx queue */
+ /* XXX assumes an 802.3 frame */
+ if (ieee80211_classify(ni, m))
+ senderr(EIO); /* XXX */
+
+ ifp->if_opackets++;
+ IEEE80211_NODE_STAT(ni, tx_data);
+ if (IEEE80211_IS_MULTICAST(wh->i_addr1)) {
+ IEEE80211_NODE_STAT(ni, tx_mcast);
+ m->m_flags |= M_MCAST;
+ } else
+ IEEE80211_NODE_STAT(ni, tx_ucast);
+ /* NB: ieee80211_encap does not include 802.11 header */
+ IEEE80211_NODE_STAT_ADD(ni, tx_bytes, m->m_pkthdr.len);
+
+ /*
+ * NB: DLT_IEEE802_11_RADIO identifies the parameters are
+ * present by setting the sa_len field of the sockaddr (yes,
+ * this is a hack).
+ * NB: we assume sa_data is suitably aligned to cast.
+ */
+ return vap->iv_ic->ic_raw_xmit(ni, m,
+ (const struct ieee80211_bpf_params *)(dst->sa_len ?
+ dst->sa_data : NULL));
+bad:
+ if (m != NULL)
+ m_freem(m);
+ if (ni != NULL)
+ ieee80211_free_node(ni);
+ ifp->if_oerrors++;
+ return error;
+#undef senderr
+}
+
+/*
+ * Set the direction field and address fields of an outgoing
+ * frame. Note this should be called early on in constructing
+ * a frame as it sets i_fc[1]; other bits can then be or'd in.
+ */
+void
+ieee80211_send_setup(
+ struct ieee80211_node *ni,
+ struct mbuf *m,
+ int type, int tid,
+ const uint8_t sa[IEEE80211_ADDR_LEN],
+ const uint8_t da[IEEE80211_ADDR_LEN],
+ const uint8_t bssid[IEEE80211_ADDR_LEN])
+{
+#define WH4(wh) ((struct ieee80211_frame_addr4 *)wh)
+ struct ieee80211vap *vap = ni->ni_vap;
+ struct ieee80211_frame *wh = mtod(m, struct ieee80211_frame *);
+ ieee80211_seq seqno;
+
+ wh->i_fc[0] = IEEE80211_FC0_VERSION_0 | type;
+ if ((type & IEEE80211_FC0_TYPE_MASK) == IEEE80211_FC0_TYPE_DATA) {
+ switch (vap->iv_opmode) {
+ case IEEE80211_M_STA:
+ wh->i_fc[1] = IEEE80211_FC1_DIR_TODS;
+ IEEE80211_ADDR_COPY(wh->i_addr1, bssid);
+ IEEE80211_ADDR_COPY(wh->i_addr2, sa);
+ IEEE80211_ADDR_COPY(wh->i_addr3, da);
+ break;
+ case IEEE80211_M_IBSS:
+ case IEEE80211_M_AHDEMO:
+ wh->i_fc[1] = IEEE80211_FC1_DIR_NODS;
+ IEEE80211_ADDR_COPY(wh->i_addr1, da);
+ IEEE80211_ADDR_COPY(wh->i_addr2, sa);
+ IEEE80211_ADDR_COPY(wh->i_addr3, bssid);
+ break;
+ case IEEE80211_M_HOSTAP:
+ wh->i_fc[1] = IEEE80211_FC1_DIR_FROMDS;
+ IEEE80211_ADDR_COPY(wh->i_addr1, da);
+ IEEE80211_ADDR_COPY(wh->i_addr2, bssid);
+ IEEE80211_ADDR_COPY(wh->i_addr3, sa);
+ break;
+ case IEEE80211_M_WDS:
+ wh->i_fc[1] = IEEE80211_FC1_DIR_DSTODS;
+ IEEE80211_ADDR_COPY(wh->i_addr1, da);
+ IEEE80211_ADDR_COPY(wh->i_addr2, vap->iv_myaddr);
+ IEEE80211_ADDR_COPY(wh->i_addr3, da);
+ IEEE80211_ADDR_COPY(WH4(wh)->i_addr4, sa);
+ break;
+ case IEEE80211_M_MBSS:
+#ifdef IEEE80211_SUPPORT_MESH
+ /* XXX add support for proxied addresses */
+ if (IEEE80211_IS_MULTICAST(da)) {
+ wh->i_fc[1] = IEEE80211_FC1_DIR_FROMDS;
+ /* XXX next hop */
+ IEEE80211_ADDR_COPY(wh->i_addr1, da);
+ IEEE80211_ADDR_COPY(wh->i_addr2,
+ vap->iv_myaddr);
+ } else {
+ wh->i_fc[1] = IEEE80211_FC1_DIR_DSTODS;
+ IEEE80211_ADDR_COPY(wh->i_addr1, da);
+ IEEE80211_ADDR_COPY(wh->i_addr2,
+ vap->iv_myaddr);
+ IEEE80211_ADDR_COPY(wh->i_addr3, da);
+ IEEE80211_ADDR_COPY(WH4(wh)->i_addr4, sa);
+ }
+#endif
+ break;
+ case IEEE80211_M_MONITOR: /* NB: to quiet compiler */
+ break;
+ }
+ } else {
+ wh->i_fc[1] = IEEE80211_FC1_DIR_NODS;
+ IEEE80211_ADDR_COPY(wh->i_addr1, da);
+ IEEE80211_ADDR_COPY(wh->i_addr2, sa);
+#ifdef IEEE80211_SUPPORT_MESH
+ if (vap->iv_opmode == IEEE80211_M_MBSS)
+ IEEE80211_ADDR_COPY(wh->i_addr3, sa);
+ else
+#endif
+ IEEE80211_ADDR_COPY(wh->i_addr3, bssid);
+ }
+ *(uint16_t *)&wh->i_dur[0] = 0;
+
+ seqno = ni->ni_txseqs[tid]++;
+ *(uint16_t *)&wh->i_seq[0] = htole16(seqno << IEEE80211_SEQ_SEQ_SHIFT);
+ M_SEQNO_SET(m, seqno);
+
+ if (IEEE80211_IS_MULTICAST(wh->i_addr1))
+ m->m_flags |= M_MCAST;
+#undef WH4
+}
+
+/*
+ * Send a management frame to the specified node. The node pointer
+ * must have a reference as the pointer will be passed to the driver
+ * and potentially held for a long time. If the frame is successfully
+ * dispatched to the driver, then it is responsible for freeing the
+ * reference (and potentially free'ing up any associated storage);
+ * otherwise deal with reclaiming any reference (on error).
+ */
+int
+ieee80211_mgmt_output(struct ieee80211_node *ni, struct mbuf *m, int type,
+ struct ieee80211_bpf_params *params)
+{
+ struct ieee80211vap *vap = ni->ni_vap;
+ struct ieee80211com *ic = ni->ni_ic;
+ struct ieee80211_frame *wh;
+
+ KASSERT(ni != NULL, ("null node"));
+
+ if (vap->iv_state == IEEE80211_S_CAC) {
+ IEEE80211_NOTE(vap, IEEE80211_MSG_OUTPUT | IEEE80211_MSG_DOTH,
+ ni, "block %s frame in CAC state",
+ ieee80211_mgt_subtype_name[
+ (type & IEEE80211_FC0_SUBTYPE_MASK) >>
+ IEEE80211_FC0_SUBTYPE_SHIFT]);
+ vap->iv_stats.is_tx_badstate++;
+ ieee80211_free_node(ni);
+ m_freem(m);
+ return EIO; /* XXX */
+ }
+
+ M_PREPEND(m, sizeof(struct ieee80211_frame), M_DONTWAIT);
+ if (m == NULL) {
+ ieee80211_free_node(ni);
+ return ENOMEM;
+ }
+
+ wh = mtod(m, struct ieee80211_frame *);
+ ieee80211_send_setup(ni, m,
+ IEEE80211_FC0_TYPE_MGT | type, IEEE80211_NONQOS_TID,
+ vap->iv_myaddr, ni->ni_macaddr, ni->ni_bssid);
+ if (params->ibp_flags & IEEE80211_BPF_CRYPTO) {
+ IEEE80211_NOTE_MAC(vap, IEEE80211_MSG_AUTH, wh->i_addr1,
+ "encrypting frame (%s)", __func__);
+ wh->i_fc[1] |= IEEE80211_FC1_WEP;
+ }
+ m->m_flags |= M_ENCAP; /* mark encapsulated */
+
+ KASSERT(type != IEEE80211_FC0_SUBTYPE_PROBE_RESP, ("probe response?"));
+ M_WME_SETAC(m, params->ibp_pri);
+
+#ifdef IEEE80211_DEBUG
+ /* avoid printing too many frames */
+ if ((ieee80211_msg_debug(vap) && doprint(vap, type)) ||
+ ieee80211_msg_dumppkts(vap)) {
+ printf("[%s] send %s on channel %u\n",
+ ether_sprintf(wh->i_addr1),
+ ieee80211_mgt_subtype_name[
+ (type & IEEE80211_FC0_SUBTYPE_MASK) >>
+ IEEE80211_FC0_SUBTYPE_SHIFT],
+ ieee80211_chan2ieee(ic, ic->ic_curchan));
+ }
+#endif
+ IEEE80211_NODE_STAT(ni, tx_mgmt);
+
+ return ic->ic_raw_xmit(ni, m, params);
+}
+
+/*
+ * Send a null data frame to the specified node. If the station
+ * is setup for QoS then a QoS Null Data frame is constructed.
+ * If this is a WDS station then a 4-address frame is constructed.
+ *
+ * NB: the caller is assumed to have setup a node reference
+ * for use; this is necessary to deal with a race condition
+ * when probing for inactive stations. Like ieee80211_mgmt_output
+ * we must cleanup any node reference on error; however we
+ * can safely just unref it as we know it will never be the
+ * last reference to the node.
+ */
+int
+ieee80211_send_nulldata(struct ieee80211_node *ni)
+{
+ struct ieee80211vap *vap = ni->ni_vap;
+ struct ieee80211com *ic = ni->ni_ic;
+ struct mbuf *m;
+ struct ieee80211_frame *wh;
+ int hdrlen;
+ uint8_t *frm;
+
+ if (vap->iv_state == IEEE80211_S_CAC) {
+ IEEE80211_NOTE(vap, IEEE80211_MSG_OUTPUT | IEEE80211_MSG_DOTH,
+ ni, "block %s frame in CAC state", "null data");
+ ieee80211_unref_node(&ni);
+ vap->iv_stats.is_tx_badstate++;
+ return EIO; /* XXX */
+ }
+
+ if (ni->ni_flags & (IEEE80211_NODE_QOS|IEEE80211_NODE_HT))
+ hdrlen = sizeof(struct ieee80211_qosframe);
+ else
+ hdrlen = sizeof(struct ieee80211_frame);
+ /* NB: only WDS vap's get 4-address frames */
+ if (vap->iv_opmode == IEEE80211_M_WDS)
+ hdrlen += IEEE80211_ADDR_LEN;
+ if (ic->ic_flags & IEEE80211_F_DATAPAD)
+ hdrlen = roundup(hdrlen, sizeof(uint32_t));
+
+ m = ieee80211_getmgtframe(&frm, ic->ic_headroom + hdrlen, 0);
+ if (m == NULL) {
+ /* XXX debug msg */
+ ieee80211_unref_node(&ni);
+ vap->iv_stats.is_tx_nobuf++;
+ return ENOMEM;
+ }
+ KASSERT(M_LEADINGSPACE(m) >= hdrlen,
+ ("leading space %zd", M_LEADINGSPACE(m)));
+ M_PREPEND(m, hdrlen, M_DONTWAIT);
+ if (m == NULL) {
+ /* NB: cannot happen */
+ ieee80211_free_node(ni);
+ return ENOMEM;
+ }
+
+ wh = mtod(m, struct ieee80211_frame *); /* NB: a little lie */
+ if (ni->ni_flags & IEEE80211_NODE_QOS) {
+ const int tid = WME_AC_TO_TID(WME_AC_BE);
+ uint8_t *qos;
+
+ ieee80211_send_setup(ni, m,
+ IEEE80211_FC0_TYPE_DATA | IEEE80211_FC0_SUBTYPE_QOS_NULL,
+ tid, vap->iv_myaddr, ni->ni_macaddr, ni->ni_bssid);
+
+ if (vap->iv_opmode == IEEE80211_M_WDS)
+ qos = ((struct ieee80211_qosframe_addr4 *) wh)->i_qos;
+ else
+ qos = ((struct ieee80211_qosframe *) wh)->i_qos;
+ qos[0] = tid & IEEE80211_QOS_TID;
+ if (ic->ic_wme.wme_wmeChanParams.cap_wmeParams[WME_AC_BE].wmep_noackPolicy)
+ qos[0] |= IEEE80211_QOS_ACKPOLICY_NOACK;
+ qos[1] = 0;
+ } else {
+ ieee80211_send_setup(ni, m,
+ IEEE80211_FC0_TYPE_DATA | IEEE80211_FC0_SUBTYPE_NODATA,
+ IEEE80211_NONQOS_TID,
+ vap->iv_myaddr, ni->ni_macaddr, ni->ni_bssid);
+ }
+ if (vap->iv_opmode != IEEE80211_M_WDS) {
+ /* NB: power management bit is never sent by an AP */
+ if ((ni->ni_flags & IEEE80211_NODE_PWR_MGT) &&
+ vap->iv_opmode != IEEE80211_M_HOSTAP)
+ wh->i_fc[1] |= IEEE80211_FC1_PWR_MGT;
+ }
+ m->m_len = m->m_pkthdr.len = hdrlen;
+ m->m_flags |= M_ENCAP; /* mark encapsulated */
+
+ M_WME_SETAC(m, WME_AC_BE);
+
+ IEEE80211_NODE_STAT(ni, tx_data);
+
+ IEEE80211_NOTE(vap, IEEE80211_MSG_DEBUG | IEEE80211_MSG_DUMPPKTS, ni,
+ "send %snull data frame on channel %u, pwr mgt %s",
+ ni->ni_flags & IEEE80211_NODE_QOS ? "QoS " : "",
+ ieee80211_chan2ieee(ic, ic->ic_curchan),
+ wh->i_fc[1] & IEEE80211_FC1_PWR_MGT ? "ena" : "dis");
+
+ return ic->ic_raw_xmit(ni, m, NULL);
+}
+
+/*
+ * Assign priority to a frame based on any vlan tag assigned
+ * to the station and/or any Diffserv setting in an IP header.
+ * Finally, if an ACM policy is setup (in station mode) it's
+ * applied.
+ */
+int
+ieee80211_classify(struct ieee80211_node *ni, struct mbuf *m)
+{
+ const struct ether_header *eh = mtod(m, struct ether_header *);
+ int v_wme_ac, d_wme_ac, ac;
+
+ /*
+ * Always promote PAE/EAPOL frames to high priority.
+ */
+ if (eh->ether_type == htons(ETHERTYPE_PAE)) {
+ /* NB: mark so others don't need to check header */
+ m->m_flags |= M_EAPOL;
+ ac = WME_AC_VO;
+ goto done;
+ }
+ /*
+ * Non-qos traffic goes to BE.
+ */
+ if ((ni->ni_flags & IEEE80211_NODE_QOS) == 0) {
+ ac = WME_AC_BE;
+ goto done;
+ }
+
+ /*
+ * If node has a vlan tag then all traffic
+ * to it must have a matching tag.
+ */
+ v_wme_ac = 0;
+ if (ni->ni_vlan != 0) {
+ if ((m->m_flags & M_VLANTAG) == 0) {
+ IEEE80211_NODE_STAT(ni, tx_novlantag);
+ return 1;
+ }
+ if (EVL_VLANOFTAG(m->m_pkthdr.ether_vtag) !=
+ EVL_VLANOFTAG(ni->ni_vlan)) {
+ IEEE80211_NODE_STAT(ni, tx_vlanmismatch);
+ return 1;
+ }
+ /* map vlan priority to AC */
+ v_wme_ac = TID_TO_WME_AC(EVL_PRIOFTAG(ni->ni_vlan));
+ }
+
+ /* XXX m_copydata may be too slow for fast path */
+#ifdef INET
+ if (eh->ether_type == htons(ETHERTYPE_IP)) {
+ uint8_t tos;
+ /*
+ * IP frame, map the DSCP bits from the TOS field.
+ */
+ /* NB: ip header may not be in first mbuf */
+ m_copydata(m, sizeof(struct ether_header) +
+ offsetof(struct ip, ip_tos), sizeof(tos), &tos);
+ tos >>= 5; /* NB: ECN + low 3 bits of DSCP */
+ d_wme_ac = TID_TO_WME_AC(tos);
+ } else {
+#endif /* INET */
+#ifdef INET6
+ if (eh->ether_type == htons(ETHERTYPE_IPV6)) {
+ uint32_t flow;
+ uint8_t tos;
+ /*
+ * IPv6 frame, map the DSCP bits from the TOS field.
+ */
+ m_copydata(m, sizeof(struct ether_header) +
+ offsetof(struct ip6_hdr, ip6_flow), sizeof(flow),
+ (caddr_t) &flow);
+ tos = (uint8_t)(ntohl(flow) >> 20);
+ tos >>= 5; /* NB: ECN + low 3 bits of DSCP */
+ d_wme_ac = TID_TO_WME_AC(tos);
+ } else {
+#endif /* INET6 */
+ d_wme_ac = WME_AC_BE;
+#ifdef INET6
+ }
+#endif
+#ifdef INET
+ }
+#endif
+ /*
+ * Use highest priority AC.
+ */
+ if (v_wme_ac > d_wme_ac)
+ ac = v_wme_ac;
+ else
+ ac = d_wme_ac;
+
+ /*
+ * Apply ACM policy.
+ */
+ if (ni->ni_vap->iv_opmode == IEEE80211_M_STA) {
+ static const int acmap[4] = {
+ WME_AC_BK, /* WME_AC_BE */
+ WME_AC_BK, /* WME_AC_BK */
+ WME_AC_BE, /* WME_AC_VI */
+ WME_AC_VI, /* WME_AC_VO */
+ };
+ struct ieee80211com *ic = ni->ni_ic;
+
+ while (ac != WME_AC_BK &&
+ ic->ic_wme.wme_wmeBssChanParams.cap_wmeParams[ac].wmep_acm)
+ ac = acmap[ac];
+ }
+done:
+ M_WME_SETAC(m, ac);
+ return 0;
+}
+
+/*
+ * Insure there is sufficient contiguous space to encapsulate the
+ * 802.11 data frame. If room isn't already there, arrange for it.
+ * Drivers and cipher modules assume we have done the necessary work
+ * and fail rudely if they don't find the space they need.
+ */
+struct mbuf *
+ieee80211_mbuf_adjust(struct ieee80211vap *vap, int hdrsize,
+ struct ieee80211_key *key, struct mbuf *m)
+{
+#define TO_BE_RECLAIMED (sizeof(struct ether_header) - sizeof(struct llc))
+ int needed_space = vap->iv_ic->ic_headroom + hdrsize;
+
+ if (key != NULL) {
+ /* XXX belongs in crypto code? */
+ needed_space += key->wk_cipher->ic_header;
+ /* XXX frags */
+ /*
+ * When crypto is being done in the host we must insure
+ * the data are writable for the cipher routines; clone
+ * a writable mbuf chain.
+ * XXX handle SWMIC specially
+ */
+ if (key->wk_flags & (IEEE80211_KEY_SWENCRYPT|IEEE80211_KEY_SWENMIC)) {
+ m = m_unshare(m, M_NOWAIT);
+ if (m == NULL) {
+ IEEE80211_DPRINTF(vap, IEEE80211_MSG_OUTPUT,
+ "%s: cannot get writable mbuf\n", __func__);
+ vap->iv_stats.is_tx_nobuf++; /* XXX new stat */
+ return NULL;
+ }
+ }
+ }
+ /*
+ * We know we are called just before stripping an Ethernet
+ * header and prepending an LLC header. This means we know
+ * there will be
+ * sizeof(struct ether_header) - sizeof(struct llc)
+ * bytes recovered to which we need additional space for the
+ * 802.11 header and any crypto header.
+ */
+ /* XXX check trailing space and copy instead? */
+ if (M_LEADINGSPACE(m) < needed_space - TO_BE_RECLAIMED) {
+ struct mbuf *n = m_gethdr(M_NOWAIT, m->m_type);
+ if (n == NULL) {
+ IEEE80211_DPRINTF(vap, IEEE80211_MSG_OUTPUT,
+ "%s: cannot expand storage\n", __func__);
+ vap->iv_stats.is_tx_nobuf++;
+ m_freem(m);
+ return NULL;
+ }
+ KASSERT(needed_space <= MHLEN,
+ ("not enough room, need %u got %zu\n", needed_space, MHLEN));
+ /*
+ * Setup new mbuf to have leading space to prepend the
+ * 802.11 header and any crypto header bits that are
+ * required (the latter are added when the driver calls
+ * back to ieee80211_crypto_encap to do crypto encapsulation).
+ */
+ /* NB: must be first 'cuz it clobbers m_data */
+ m_move_pkthdr(n, m);
+ n->m_len = 0; /* NB: m_gethdr does not set */
+ n->m_data += needed_space;
+ /*
+ * Pull up Ethernet header to create the expected layout.
+ * We could use m_pullup but that's overkill (i.e. we don't
+ * need the actual data) and it cannot fail so do it inline
+ * for speed.
+ */
+ /* NB: struct ether_header is known to be contiguous */
+ n->m_len += sizeof(struct ether_header);
+ m->m_len -= sizeof(struct ether_header);
+ m->m_data += sizeof(struct ether_header);
+ /*
+ * Replace the head of the chain.
+ */
+ n->m_next = m;
+ m = n;
+ }
+ return m;
+#undef TO_BE_RECLAIMED
+}
+
+/*
+ * Return the transmit key to use in sending a unicast frame.
+ * If a unicast key is set we use that. When no unicast key is set
+ * we fall back to the default transmit key.
+ */
+static __inline struct ieee80211_key *
+ieee80211_crypto_getucastkey(struct ieee80211vap *vap,
+ struct ieee80211_node *ni)
+{
+ if (IEEE80211_KEY_UNDEFINED(&ni->ni_ucastkey)) {
+ if (vap->iv_def_txkey == IEEE80211_KEYIX_NONE ||
+ IEEE80211_KEY_UNDEFINED(&vap->iv_nw_keys[vap->iv_def_txkey]))
+ return NULL;
+ return &vap->iv_nw_keys[vap->iv_def_txkey];
+ } else {
+ return &ni->ni_ucastkey;
+ }
+}
+
+/*
+ * Return the transmit key to use in sending a multicast frame.
+ * Multicast traffic always uses the group key which is installed as
+ * the default tx key.
+ */
+static __inline struct ieee80211_key *
+ieee80211_crypto_getmcastkey(struct ieee80211vap *vap,
+ struct ieee80211_node *ni)
+{
+ if (vap->iv_def_txkey == IEEE80211_KEYIX_NONE ||
+ IEEE80211_KEY_UNDEFINED(&vap->iv_nw_keys[vap->iv_def_txkey]))
+ return NULL;
+ return &vap->iv_nw_keys[vap->iv_def_txkey];
+}
+
+/*
+ * Encapsulate an outbound data frame. The mbuf chain is updated.
+ * If an error is encountered NULL is returned. The caller is required
+ * to provide a node reference and pullup the ethernet header in the
+ * first mbuf.
+ *
+ * NB: Packet is assumed to be processed by ieee80211_classify which
+ * marked EAPOL frames w/ M_EAPOL.
+ */
+struct mbuf *
+ieee80211_encap(struct ieee80211vap *vap, struct ieee80211_node *ni,
+ struct mbuf *m)
+{
+#define WH4(wh) ((struct ieee80211_frame_addr4 *)(wh))
+ struct ieee80211com *ic = ni->ni_ic;
+#ifdef IEEE80211_SUPPORT_MESH
+ struct ieee80211_mesh_state *ms = vap->iv_mesh;
+ struct ieee80211_meshcntl_ae10 *mc;
+#endif
+ struct ether_header eh;
+ struct ieee80211_frame *wh;
+ struct ieee80211_key *key;
+ struct llc *llc;
+ int hdrsize, hdrspace, datalen, addqos, txfrag, is4addr;
+ ieee80211_seq seqno;
+ int meshhdrsize, meshae;
+ uint8_t *qos;
+
+ /*
+ * Copy existing Ethernet header to a safe place. The
+ * rest of the code assumes it's ok to strip it when
+ * reorganizing state for the final encapsulation.
+ */
+ KASSERT(m->m_len >= sizeof(eh), ("no ethernet header!"));
+ ETHER_HEADER_COPY(&eh, mtod(m, caddr_t));
+
+ /*
+ * Insure space for additional headers. First identify
+ * transmit key to use in calculating any buffer adjustments
+ * required. This is also used below to do privacy
+ * encapsulation work. Then calculate the 802.11 header
+ * size and any padding required by the driver.
+ *
+ * Note key may be NULL if we fall back to the default
+ * transmit key and that is not set. In that case the
+ * buffer may not be expanded as needed by the cipher
+ * routines, but they will/should discard it.
+ */
+ if (vap->iv_flags & IEEE80211_F_PRIVACY) {
+ if (vap->iv_opmode == IEEE80211_M_STA ||
+ !IEEE80211_IS_MULTICAST(eh.ether_dhost) ||
+ (vap->iv_opmode == IEEE80211_M_WDS &&
+ (vap->iv_flags_ext & IEEE80211_FEXT_WDSLEGACY)))
+ key = ieee80211_crypto_getucastkey(vap, ni);
+ else
+ key = ieee80211_crypto_getmcastkey(vap, ni);
+ if (key == NULL && (m->m_flags & M_EAPOL) == 0) {
+ IEEE80211_NOTE_MAC(vap, IEEE80211_MSG_CRYPTO,
+ eh.ether_dhost,
+ "no default transmit key (%s) deftxkey %u",
+ __func__, vap->iv_def_txkey);
+ vap->iv_stats.is_tx_nodefkey++;
+ goto bad;
+ }
+ } else
+ key = NULL;
+ /*
+ * XXX Some ap's don't handle QoS-encapsulated EAPOL
+ * frames so suppress use. This may be an issue if other
+ * ap's require all data frames to be QoS-encapsulated
+ * once negotiated in which case we'll need to make this
+ * configurable.
+ */
+ addqos = (ni->ni_flags & (IEEE80211_NODE_QOS|IEEE80211_NODE_HT)) &&
+ (m->m_flags & M_EAPOL) == 0;
+ if (addqos)
+ hdrsize = sizeof(struct ieee80211_qosframe);
+ else
+ hdrsize = sizeof(struct ieee80211_frame);
+#ifdef IEEE80211_SUPPORT_MESH
+ if (vap->iv_opmode == IEEE80211_M_MBSS) {
+ /*
+ * Mesh data frames are encapsulated according to the
+ * rules of Section 11B.8.5 (p.139 of D3.0 spec).
+ * o Group Addressed data (aka multicast) originating
+ * at the local sta are sent w/ 3-address format and
+ * address extension mode 00
+ * o Individually Addressed data (aka unicast) originating
+ * at the local sta are sent w/ 4-address format and
+ * address extension mode 00
+ * o Group Addressed data forwarded from a non-mesh sta are
+ * sent w/ 3-address format and address extension mode 01
+ * o Individually Address data from another sta are sent
+ * w/ 4-address format and address extension mode 10
+ */
+ is4addr = 0; /* NB: don't use, disable */
+ if (!IEEE80211_IS_MULTICAST(eh.ether_dhost))
+ hdrsize += IEEE80211_ADDR_LEN; /* unicast are 4-addr */
+ meshhdrsize = sizeof(struct ieee80211_meshcntl);
+ /* XXX defines for AE modes */
+ if (IEEE80211_ADDR_EQ(eh.ether_shost, vap->iv_myaddr)) {
+ if (!IEEE80211_IS_MULTICAST(eh.ether_dhost))
+ meshae = 0;
+ else
+ meshae = 4; /* NB: pseudo */
+ } else if (IEEE80211_IS_MULTICAST(eh.ether_dhost)) {
+ meshae = 1;
+ meshhdrsize += 1*IEEE80211_ADDR_LEN;
+ } else {
+ meshae = 2;
+ meshhdrsize += 2*IEEE80211_ADDR_LEN;
+ }
+ } else {
+#endif
+ /*
+ * 4-address frames need to be generated for:
+ * o packets sent through a WDS vap (IEEE80211_M_WDS)
+ * o packets sent through a vap marked for relaying
+ * (e.g. a station operating with dynamic WDS)
+ */
+ is4addr = vap->iv_opmode == IEEE80211_M_WDS ||
+ ((vap->iv_flags_ext & IEEE80211_FEXT_4ADDR) &&
+ !IEEE80211_ADDR_EQ(eh.ether_shost, vap->iv_myaddr));
+ if (is4addr)
+ hdrsize += IEEE80211_ADDR_LEN;
+ meshhdrsize = meshae = 0;
+#ifdef IEEE80211_SUPPORT_MESH
+ }
+#endif
+ /*
+ * Honor driver DATAPAD requirement.
+ */
+ if (ic->ic_flags & IEEE80211_F_DATAPAD)
+ hdrspace = roundup(hdrsize, sizeof(uint32_t));
+ else
+ hdrspace = hdrsize;
+
+ if (__predict_true((m->m_flags & M_FF) == 0)) {
+ /*
+ * Normal frame.
+ */
+ m = ieee80211_mbuf_adjust(vap, hdrspace + meshhdrsize, key, m);
+ if (m == NULL) {
+ /* NB: ieee80211_mbuf_adjust handles msgs+statistics */
+ goto bad;
+ }
+ /* NB: this could be optimized 'cuz of ieee80211_mbuf_adjust */
+ m_adj(m, sizeof(struct ether_header) - sizeof(struct llc));
+ llc = mtod(m, struct llc *);
+ llc->llc_dsap = llc->llc_ssap = LLC_SNAP_LSAP;
+ llc->llc_control = LLC_UI;
+ llc->llc_snap.org_code[0] = 0;
+ llc->llc_snap.org_code[1] = 0;
+ llc->llc_snap.org_code[2] = 0;
+ llc->llc_snap.ether_type = eh.ether_type;
+ } else {
+#ifdef IEEE80211_SUPPORT_SUPERG
+ /*
+ * Aggregated frame.
+ */
+ m = ieee80211_ff_encap(vap, m, hdrspace + meshhdrsize, key);
+ if (m == NULL)
+#endif
+ goto bad;
+ }
+ datalen = m->m_pkthdr.len; /* NB: w/o 802.11 header */
+
+ M_PREPEND(m, hdrspace + meshhdrsize, M_DONTWAIT);
+ if (m == NULL) {
+ vap->iv_stats.is_tx_nobuf++;
+ goto bad;
+ }
+ wh = mtod(m, struct ieee80211_frame *);
+ wh->i_fc[0] = IEEE80211_FC0_VERSION_0 | IEEE80211_FC0_TYPE_DATA;
+ *(uint16_t *)wh->i_dur = 0;
+ qos = NULL; /* NB: quiet compiler */
+ if (is4addr) {
+ wh->i_fc[1] = IEEE80211_FC1_DIR_DSTODS;
+ IEEE80211_ADDR_COPY(wh->i_addr1, ni->ni_macaddr);
+ IEEE80211_ADDR_COPY(wh->i_addr2, vap->iv_myaddr);
+ IEEE80211_ADDR_COPY(wh->i_addr3, eh.ether_dhost);
+ IEEE80211_ADDR_COPY(WH4(wh)->i_addr4, eh.ether_shost);
+ } else switch (vap->iv_opmode) {
+ case IEEE80211_M_STA:
+ wh->i_fc[1] = IEEE80211_FC1_DIR_TODS;
+ IEEE80211_ADDR_COPY(wh->i_addr1, ni->ni_bssid);
+ IEEE80211_ADDR_COPY(wh->i_addr2, eh.ether_shost);
+ IEEE80211_ADDR_COPY(wh->i_addr3, eh.ether_dhost);
+ break;
+ case IEEE80211_M_IBSS:
+ case IEEE80211_M_AHDEMO:
+ wh->i_fc[1] = IEEE80211_FC1_DIR_NODS;
+ IEEE80211_ADDR_COPY(wh->i_addr1, eh.ether_dhost);
+ IEEE80211_ADDR_COPY(wh->i_addr2, eh.ether_shost);
+ /*
+ * NB: always use the bssid from iv_bss as the
+ * neighbor's may be stale after an ibss merge
+ */
+ IEEE80211_ADDR_COPY(wh->i_addr3, vap->iv_bss->ni_bssid);
+ break;
+ case IEEE80211_M_HOSTAP:
+ wh->i_fc[1] = IEEE80211_FC1_DIR_FROMDS;
+ IEEE80211_ADDR_COPY(wh->i_addr1, eh.ether_dhost);
+ IEEE80211_ADDR_COPY(wh->i_addr2, ni->ni_bssid);
+ IEEE80211_ADDR_COPY(wh->i_addr3, eh.ether_shost);
+ break;
+#ifdef IEEE80211_SUPPORT_MESH
+ case IEEE80211_M_MBSS:
+ /* NB: offset by hdrspace to deal with DATAPAD */
+ mc = (struct ieee80211_meshcntl_ae10 *)
+ (mtod(m, uint8_t *) + hdrspace);
+ switch (meshae) {
+ case 0: /* ucast, no proxy */
+ wh->i_fc[1] = IEEE80211_FC1_DIR_DSTODS;
+ IEEE80211_ADDR_COPY(wh->i_addr1, ni->ni_macaddr);
+ IEEE80211_ADDR_COPY(wh->i_addr2, vap->iv_myaddr);
+ IEEE80211_ADDR_COPY(wh->i_addr3, eh.ether_dhost);
+ IEEE80211_ADDR_COPY(WH4(wh)->i_addr4, eh.ether_shost);
+ mc->mc_flags = 0;
+ qos = ((struct ieee80211_qosframe_addr4 *) wh)->i_qos;
+ break;
+ case 4: /* mcast, no proxy */
+ wh->i_fc[1] = IEEE80211_FC1_DIR_FROMDS;
+ IEEE80211_ADDR_COPY(wh->i_addr1, eh.ether_dhost);
+ IEEE80211_ADDR_COPY(wh->i_addr2, vap->iv_myaddr);
+ IEEE80211_ADDR_COPY(wh->i_addr3, eh.ether_shost);
+ mc->mc_flags = 0; /* NB: AE is really 0 */
+ qos = ((struct ieee80211_qosframe *) wh)->i_qos;
+ break;
+ case 1: /* mcast, proxy */
+ wh->i_fc[1] = IEEE80211_FC1_DIR_FROMDS;
+ IEEE80211_ADDR_COPY(wh->i_addr1, eh.ether_dhost);
+ IEEE80211_ADDR_COPY(wh->i_addr2, vap->iv_myaddr);
+ IEEE80211_ADDR_COPY(wh->i_addr3, vap->iv_myaddr);
+ mc->mc_flags = 1;
+ IEEE80211_ADDR_COPY(mc->mc_addr4, eh.ether_shost);
+ qos = ((struct ieee80211_qosframe *) wh)->i_qos;
+ break;
+ case 2: /* ucast, proxy */
+ wh->i_fc[1] = IEEE80211_FC1_DIR_DSTODS;
+ IEEE80211_ADDR_COPY(wh->i_addr1, ni->ni_macaddr);
+ IEEE80211_ADDR_COPY(wh->i_addr2, vap->iv_myaddr);
+ /* XXX not right, need MeshDA */
+ IEEE80211_ADDR_COPY(wh->i_addr3, eh.ether_dhost);
+ /* XXX assume are MeshSA */
+ IEEE80211_ADDR_COPY(WH4(wh)->i_addr4, vap->iv_myaddr);
+ mc->mc_flags = 2;
+ IEEE80211_ADDR_COPY(mc->mc_addr4, eh.ether_dhost);
+ IEEE80211_ADDR_COPY(mc->mc_addr5, eh.ether_shost);
+ qos = ((struct ieee80211_qosframe_addr4 *) wh)->i_qos;
+ break;
+ default:
+ KASSERT(0, ("meshae %d", meshae));
+ break;
+ }
+ mc->mc_ttl = ms->ms_ttl;
+ ms->ms_seq++;
+ LE_WRITE_4(mc->mc_seq, ms->ms_seq);
+ break;
+#endif
+ case IEEE80211_M_WDS: /* NB: is4addr should always be true */
+ default:
+ goto bad;
+ }
+ if (m->m_flags & M_MORE_DATA)
+ wh->i_fc[1] |= IEEE80211_FC1_MORE_DATA;
+ if (addqos) {
+ int ac, tid;
+
+ if (is4addr) {
+ qos = ((struct ieee80211_qosframe_addr4 *) wh)->i_qos;
+ /* NB: mesh case handled earlier */
+ } else if (vap->iv_opmode != IEEE80211_M_MBSS)
+ qos = ((struct ieee80211_qosframe *) wh)->i_qos;
+ ac = M_WME_GETAC(m);
+ /* map from access class/queue to 11e header priorty value */
+ tid = WME_AC_TO_TID(ac);
+ qos[0] = tid & IEEE80211_QOS_TID;
+ if (ic->ic_wme.wme_wmeChanParams.cap_wmeParams[ac].wmep_noackPolicy)
+ qos[0] |= IEEE80211_QOS_ACKPOLICY_NOACK;
+ qos[1] = 0;
+ wh->i_fc[0] |= IEEE80211_FC0_SUBTYPE_QOS;
+
+ if ((m->m_flags & M_AMPDU_MPDU) == 0) {
+ /*
+ * NB: don't assign a sequence # to potential
+ * aggregates; we expect this happens at the
+ * point the frame comes off any aggregation q
+ * as otherwise we may introduce holes in the
+ * BA sequence space and/or make window accouting
+ * more difficult.
+ *
+ * XXX may want to control this with a driver
+ * capability; this may also change when we pull
+ * aggregation up into net80211
+ */
+ seqno = ni->ni_txseqs[tid]++;
+ *(uint16_t *)wh->i_seq =
+ htole16(seqno << IEEE80211_SEQ_SEQ_SHIFT);
+ M_SEQNO_SET(m, seqno);
+ }
+ } else {
+ seqno = ni->ni_txseqs[IEEE80211_NONQOS_TID]++;
+ *(uint16_t *)wh->i_seq =
+ htole16(seqno << IEEE80211_SEQ_SEQ_SHIFT);
+ M_SEQNO_SET(m, seqno);
+ }
+
+
+ /* check if xmit fragmentation is required */
+ txfrag = (m->m_pkthdr.len > vap->iv_fragthreshold &&
+ !IEEE80211_IS_MULTICAST(wh->i_addr1) &&
+ (vap->iv_caps & IEEE80211_C_TXFRAG) &&
+ (m->m_flags & (M_FF | M_AMPDU_MPDU)) == 0);
+ if (key != NULL) {
+ /*
+ * IEEE 802.1X: send EAPOL frames always in the clear.
+ * WPA/WPA2: encrypt EAPOL keys when pairwise keys are set.
+ */
+ if ((m->m_flags & M_EAPOL) == 0 ||
+ ((vap->iv_flags & IEEE80211_F_WPA) &&
+ (vap->iv_opmode == IEEE80211_M_STA ?
+ !IEEE80211_KEY_UNDEFINED(key) :
+ !IEEE80211_KEY_UNDEFINED(&ni->ni_ucastkey)))) {
+ wh->i_fc[1] |= IEEE80211_FC1_WEP;
+ if (!ieee80211_crypto_enmic(vap, key, m, txfrag)) {
+ IEEE80211_NOTE_MAC(vap, IEEE80211_MSG_OUTPUT,
+ eh.ether_dhost,
+ "%s", "enmic failed, discard frame");
+ vap->iv_stats.is_crypto_enmicfail++;
+ goto bad;
+ }
+ }
+ }
+ if (txfrag && !ieee80211_fragment(vap, m, hdrsize,
+ key != NULL ? key->wk_cipher->ic_header : 0, vap->iv_fragthreshold))
+ goto bad;
+
+ m->m_flags |= M_ENCAP; /* mark encapsulated */
+
+ IEEE80211_NODE_STAT(ni, tx_data);
+ if (IEEE80211_IS_MULTICAST(wh->i_addr1)) {
+ IEEE80211_NODE_STAT(ni, tx_mcast);
+ m->m_flags |= M_MCAST;
+ } else
+ IEEE80211_NODE_STAT(ni, tx_ucast);
+ IEEE80211_NODE_STAT_ADD(ni, tx_bytes, datalen);
+
+ return m;
+bad:
+ if (m != NULL)
+ m_freem(m);
+ return NULL;
+#undef WH4
+}
+
+/*
+ * Fragment the frame according to the specified mtu.
+ * The size of the 802.11 header (w/o padding) is provided
+ * so we don't need to recalculate it. We create a new
+ * mbuf for each fragment and chain it through m_nextpkt;
+ * we might be able to optimize this by reusing the original
+ * packet's mbufs but that is significantly more complicated.
+ */
+static int
+ieee80211_fragment(struct ieee80211vap *vap, struct mbuf *m0,
+ u_int hdrsize, u_int ciphdrsize, u_int mtu)
+{
+ struct ieee80211_frame *wh, *whf;
+ struct mbuf *m, *prev, *next;
+ u_int totalhdrsize, fragno, fragsize, off, remainder, payload;
+
+ KASSERT(m0->m_nextpkt == NULL, ("mbuf already chained?"));
+ KASSERT(m0->m_pkthdr.len > mtu,
+ ("pktlen %u mtu %u", m0->m_pkthdr.len, mtu));
+
+ wh = mtod(m0, struct ieee80211_frame *);
+ /* NB: mark the first frag; it will be propagated below */
+ wh->i_fc[1] |= IEEE80211_FC1_MORE_FRAG;
+ totalhdrsize = hdrsize + ciphdrsize;
+ fragno = 1;
+ off = mtu - ciphdrsize;
+ remainder = m0->m_pkthdr.len - off;
+ prev = m0;
+ do {
+ fragsize = totalhdrsize + remainder;
+ if (fragsize > mtu)
+ fragsize = mtu;
+ /* XXX fragsize can be >2048! */
+ KASSERT(fragsize < MCLBYTES,
+ ("fragment size %u too big!", fragsize));
+ if (fragsize > MHLEN)
+ m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
+ else
+ m = m_gethdr(M_DONTWAIT, MT_DATA);
+ if (m == NULL)
+ goto bad;
+ /* leave room to prepend any cipher header */
+ m_align(m, fragsize - ciphdrsize);
+
+ /*
+ * Form the header in the fragment. Note that since
+ * we mark the first fragment with the MORE_FRAG bit
+ * it automatically is propagated to each fragment; we
+ * need only clear it on the last fragment (done below).
+ */
+ whf = mtod(m, struct ieee80211_frame *);
+ memcpy(whf, wh, hdrsize);
+ *(uint16_t *)&whf->i_seq[0] |= htole16(
+ (fragno & IEEE80211_SEQ_FRAG_MASK) <<
+ IEEE80211_SEQ_FRAG_SHIFT);
+ fragno++;
+
+ payload = fragsize - totalhdrsize;
+ /* NB: destination is known to be contiguous */
+ m_copydata(m0, off, payload, mtod(m, uint8_t *) + hdrsize);
+ m->m_len = hdrsize + payload;
+ m->m_pkthdr.len = hdrsize + payload;
+ m->m_flags |= M_FRAG;
+
+ /* chain up the fragment */
+ prev->m_nextpkt = m;
+ prev = m;
+
+ /* deduct fragment just formed */
+ remainder -= payload;
+ off += payload;
+ } while (remainder != 0);
+
+ /* set the last fragment */
+ m->m_flags |= M_LASTFRAG;
+ whf->i_fc[1] &= ~IEEE80211_FC1_MORE_FRAG;
+
+ /* strip first mbuf now that everything has been copied */
+ m_adj(m0, -(m0->m_pkthdr.len - (mtu - ciphdrsize)));
+ m0->m_flags |= M_FIRSTFRAG | M_FRAG;
+
+ vap->iv_stats.is_tx_fragframes++;
+ vap->iv_stats.is_tx_frags += fragno-1;
+
+ return 1;
+bad:
+ /* reclaim fragments but leave original frame for caller to free */
+ for (m = m0->m_nextpkt; m != NULL; m = next) {
+ next = m->m_nextpkt;
+ m->m_nextpkt = NULL; /* XXX paranoid */
+ m_freem(m);
+ }
+ m0->m_nextpkt = NULL;
+ return 0;
+}
+
+/*
+ * Add a supported rates element id to a frame.
+ */
+uint8_t *
+ieee80211_add_rates(uint8_t *frm, const struct ieee80211_rateset *rs)
+{
+ int nrates;
+
+ *frm++ = IEEE80211_ELEMID_RATES;
+ nrates = rs->rs_nrates;
+ if (nrates > IEEE80211_RATE_SIZE)
+ nrates = IEEE80211_RATE_SIZE;
+ *frm++ = nrates;
+ memcpy(frm, rs->rs_rates, nrates);
+ return frm + nrates;
+}
+
+/*
+ * Add an extended supported rates element id to a frame.
+ */
+uint8_t *
+ieee80211_add_xrates(uint8_t *frm, const struct ieee80211_rateset *rs)
+{
+ /*
+ * Add an extended supported rates element if operating in 11g mode.
+ */
+ if (rs->rs_nrates > IEEE80211_RATE_SIZE) {
+ int nrates = rs->rs_nrates - IEEE80211_RATE_SIZE;
+ *frm++ = IEEE80211_ELEMID_XRATES;
+ *frm++ = nrates;
+ memcpy(frm, rs->rs_rates + IEEE80211_RATE_SIZE, nrates);
+ frm += nrates;
+ }
+ return frm;
+}
+
+/*
+ * Add an ssid element to a frame.
+ */
+static uint8_t *
+ieee80211_add_ssid(uint8_t *frm, const uint8_t *ssid, u_int len)
+{
+ *frm++ = IEEE80211_ELEMID_SSID;
+ *frm++ = len;
+ memcpy(frm, ssid, len);
+ return frm + len;
+}
+
+/*
+ * Add an erp element to a frame.
+ */
+static uint8_t *
+ieee80211_add_erp(uint8_t *frm, struct ieee80211com *ic)
+{
+ uint8_t erp;
+
+ *frm++ = IEEE80211_ELEMID_ERP;
+ *frm++ = 1;
+ erp = 0;
+ if (ic->ic_nonerpsta != 0)
+ erp |= IEEE80211_ERP_NON_ERP_PRESENT;
+ if (ic->ic_flags & IEEE80211_F_USEPROT)
+ erp |= IEEE80211_ERP_USE_PROTECTION;
+ if (ic->ic_flags & IEEE80211_F_USEBARKER)
+ erp |= IEEE80211_ERP_LONG_PREAMBLE;
+ *frm++ = erp;
+ return frm;
+}
+
+/*
+ * Add a CFParams element to a frame.
+ */
+static uint8_t *
+ieee80211_add_cfparms(uint8_t *frm, struct ieee80211com *ic)
+{
+#define ADDSHORT(frm, v) do { \
+ LE_WRITE_2(frm, v); \
+ frm += 2; \
+} while (0)
+ *frm++ = IEEE80211_ELEMID_CFPARMS;
+ *frm++ = 6;
+ *frm++ = 0; /* CFP count */
+ *frm++ = 2; /* CFP period */
+ ADDSHORT(frm, 0); /* CFP MaxDuration (TU) */
+ ADDSHORT(frm, 0); /* CFP CurRemaining (TU) */
+ return frm;
+#undef ADDSHORT
+}
+
+static __inline uint8_t *
+add_appie(uint8_t *frm, const struct ieee80211_appie *ie)
+{
+ memcpy(frm, ie->ie_data, ie->ie_len);
+ return frm + ie->ie_len;
+}
+
+static __inline uint8_t *
+add_ie(uint8_t *frm, const uint8_t *ie)
+{
+ memcpy(frm, ie, 2 + ie[1]);
+ return frm + 2 + ie[1];
+}
+
+#define WME_OUI_BYTES 0x00, 0x50, 0xf2
+/*
+ * Add a WME information element to a frame.
+ */
+static uint8_t *
+ieee80211_add_wme_info(uint8_t *frm, struct ieee80211_wme_state *wme)
+{
+ static const struct ieee80211_wme_info info = {
+ .wme_id = IEEE80211_ELEMID_VENDOR,
+ .wme_len = sizeof(struct ieee80211_wme_info) - 2,
+ .wme_oui = { WME_OUI_BYTES },
+ .wme_type = WME_OUI_TYPE,
+ .wme_subtype = WME_INFO_OUI_SUBTYPE,
+ .wme_version = WME_VERSION,
+ .wme_info = 0,
+ };
+ memcpy(frm, &info, sizeof(info));
+ return frm + sizeof(info);
+}
+
+/*
+ * Add a WME parameters element to a frame.
+ */
+static uint8_t *
+ieee80211_add_wme_param(uint8_t *frm, struct ieee80211_wme_state *wme)
+{
+#define SM(_v, _f) (((_v) << _f##_S) & _f)
+#define ADDSHORT(frm, v) do { \
+ LE_WRITE_2(frm, v); \
+ frm += 2; \
+} while (0)
+ /* NB: this works 'cuz a param has an info at the front */
+ static const struct ieee80211_wme_info param = {
+ .wme_id = IEEE80211_ELEMID_VENDOR,
+ .wme_len = sizeof(struct ieee80211_wme_param) - 2,
+ .wme_oui = { WME_OUI_BYTES },
+ .wme_type = WME_OUI_TYPE,
+ .wme_subtype = WME_PARAM_OUI_SUBTYPE,
+ .wme_version = WME_VERSION,
+ };
+ int i;
+
+ memcpy(frm, &param, sizeof(param));
+ frm += __offsetof(struct ieee80211_wme_info, wme_info);
+ *frm++ = wme->wme_bssChanParams.cap_info; /* AC info */
+ *frm++ = 0; /* reserved field */
+ for (i = 0; i < WME_NUM_AC; i++) {
+ const struct wmeParams *ac =
+ &wme->wme_bssChanParams.cap_wmeParams[i];
+ *frm++ = SM(i, WME_PARAM_ACI)
+ | SM(ac->wmep_acm, WME_PARAM_ACM)
+ | SM(ac->wmep_aifsn, WME_PARAM_AIFSN)
+ ;
+ *frm++ = SM(ac->wmep_logcwmax, WME_PARAM_LOGCWMAX)
+ | SM(ac->wmep_logcwmin, WME_PARAM_LOGCWMIN)
+ ;
+ ADDSHORT(frm, ac->wmep_txopLimit);
+ }
+ return frm;
+#undef SM
+#undef ADDSHORT
+}
+#undef WME_OUI_BYTES
+
+/*
+ * Add an 11h Power Constraint element to a frame.
+ */
+static uint8_t *
+ieee80211_add_powerconstraint(uint8_t *frm, struct ieee80211vap *vap)
+{
+ const struct ieee80211_channel *c = vap->iv_bss->ni_chan;
+ /* XXX per-vap tx power limit? */
+ int8_t limit = vap->iv_ic->ic_txpowlimit / 2;
+
+ frm[0] = IEEE80211_ELEMID_PWRCNSTR;
+ frm[1] = 1;
+ frm[2] = c->ic_maxregpower > limit ? c->ic_maxregpower - limit : 0;
+ return frm + 3;
+}
+
+/*
+ * Add an 11h Power Capability element to a frame.
+ */
+static uint8_t *
+ieee80211_add_powercapability(uint8_t *frm, const struct ieee80211_channel *c)
+{
+ frm[0] = IEEE80211_ELEMID_PWRCAP;
+ frm[1] = 2;
+ frm[2] = c->ic_minpower;
+ frm[3] = c->ic_maxpower;
+ return frm + 4;
+}
+
+/*
+ * Add an 11h Supported Channels element to a frame.
+ */
+static uint8_t *
+ieee80211_add_supportedchannels(uint8_t *frm, struct ieee80211com *ic)
+{
+ static const int ielen = 26;
+
+ frm[0] = IEEE80211_ELEMID_SUPPCHAN;
+ frm[1] = ielen;
+ /* XXX not correct */
+ memcpy(frm+2, ic->ic_chan_avail, ielen);
+ return frm + 2 + ielen;
+}
+
+/*
+ * Add an 11h Channel Switch Announcement element to a frame.
+ * Note that we use the per-vap CSA count to adjust the global
+ * counter so we can use this routine to form probe response
+ * frames and get the current count.
+ */
+static uint8_t *
+ieee80211_add_csa(uint8_t *frm, struct ieee80211vap *vap)
+{
+ struct ieee80211com *ic = vap->iv_ic;
+ struct ieee80211_csa_ie *csa = (struct ieee80211_csa_ie *) frm;
+
+ csa->csa_ie = IEEE80211_ELEMID_CSA;
+ csa->csa_len = 3;
+ csa->csa_mode = 1; /* XXX force quiet on channel */
+ csa->csa_newchan = ieee80211_chan2ieee(ic, ic->ic_csa_newchan);
+ csa->csa_count = ic->ic_csa_count - vap->iv_csa_count;
+ return frm + sizeof(*csa);
+}
+
+/*
+ * Add an 11h country information element to a frame.
+ */
+static uint8_t *
+ieee80211_add_countryie(uint8_t *frm, struct ieee80211com *ic)
+{
+
+ if (ic->ic_countryie == NULL ||
+ ic->ic_countryie_chan != ic->ic_bsschan) {
+ /*
+ * Handle lazy construction of ie. This is done on
+ * first use and after a channel change that requires
+ * re-calculation.
+ */
+ if (ic->ic_countryie != NULL)
+ free(ic->ic_countryie, M_80211_NODE_IE);
+ ic->ic_countryie = ieee80211_alloc_countryie(ic);
+ if (ic->ic_countryie == NULL)
+ return frm;
+ ic->ic_countryie_chan = ic->ic_bsschan;
+ }
+ return add_appie(frm, ic->ic_countryie);
+}
+
+/*
+ * Send a probe request frame with the specified ssid
+ * and any optional information element data.
+ */
+int
+ieee80211_send_probereq(struct ieee80211_node *ni,
+ const uint8_t sa[IEEE80211_ADDR_LEN],
+ const uint8_t da[IEEE80211_ADDR_LEN],
+ const uint8_t bssid[IEEE80211_ADDR_LEN],
+ const uint8_t *ssid, size_t ssidlen)
+{
+ struct ieee80211vap *vap = ni->ni_vap;
+ struct ieee80211com *ic = ni->ni_ic;
+ const struct ieee80211_txparam *tp;
+ struct ieee80211_bpf_params params;
+ struct ieee80211_frame *wh;
+ const struct ieee80211_rateset *rs;
+ struct mbuf *m;
+ uint8_t *frm;
+
+ if (vap->iv_state == IEEE80211_S_CAC) {
+ IEEE80211_NOTE(vap, IEEE80211_MSG_OUTPUT, ni,
+ "block %s frame in CAC state", "probe request");
+ vap->iv_stats.is_tx_badstate++;
+ return EIO; /* XXX */
+ }
+
+ /*
+ * Hold a reference on the node so it doesn't go away until after
+ * the xmit is complete all the way in the driver. On error we
+ * will remove our reference.
+ */
+ IEEE80211_DPRINTF(vap, IEEE80211_MSG_NODE,
+ "ieee80211_ref_node (%s:%u) %p<%s> refcnt %d\n",
+ __func__, __LINE__,
+ ni, ether_sprintf(ni->ni_macaddr),
+ ieee80211_node_refcnt(ni)+1);
+ ieee80211_ref_node(ni);
+
+ /*
+ * prreq frame format
+ * [tlv] ssid
+ * [tlv] supported rates
+ * [tlv] RSN (optional)
+ * [tlv] extended supported rates
+ * [tlv] WPA (optional)
+ * [tlv] user-specified ie's
+ */
+ m = ieee80211_getmgtframe(&frm,
+ ic->ic_headroom + sizeof(struct ieee80211_frame),
+ 2 + IEEE80211_NWID_LEN
+ + 2 + IEEE80211_RATE_SIZE
+ + sizeof(struct ieee80211_ie_wpa)
+ + 2 + (IEEE80211_RATE_MAXSIZE - IEEE80211_RATE_SIZE)
+ + sizeof(struct ieee80211_ie_wpa)
+ + (vap->iv_appie_probereq != NULL ?
+ vap->iv_appie_probereq->ie_len : 0)
+ );
+ if (m == NULL) {
+ vap->iv_stats.is_tx_nobuf++;
+ ieee80211_free_node(ni);
+ return ENOMEM;
+ }
+
+ frm = ieee80211_add_ssid(frm, ssid, ssidlen);
+ rs = ieee80211_get_suprates(ic, ic->ic_curchan);
+ frm = ieee80211_add_rates(frm, rs);
+ if (vap->iv_flags & IEEE80211_F_WPA2) {
+ if (vap->iv_rsn_ie != NULL)
+ frm = add_ie(frm, vap->iv_rsn_ie);
+ /* XXX else complain? */
+ }
+ frm = ieee80211_add_xrates(frm, rs);
+ if (vap->iv_flags & IEEE80211_F_WPA1) {
+ if (vap->iv_wpa_ie != NULL)
+ frm = add_ie(frm, vap->iv_wpa_ie);
+ /* XXX else complain? */
+ }
+ if (vap->iv_appie_probereq != NULL)
+ frm = add_appie(frm, vap->iv_appie_probereq);
+ m->m_pkthdr.len = m->m_len = frm - mtod(m, uint8_t *);
+
+ KASSERT(M_LEADINGSPACE(m) >= sizeof(struct ieee80211_frame),
+ ("leading space %zd", M_LEADINGSPACE(m)));
+ M_PREPEND(m, sizeof(struct ieee80211_frame), M_DONTWAIT);
+ if (m == NULL) {
+ /* NB: cannot happen */
+ ieee80211_free_node(ni);
+ return ENOMEM;
+ }
+
+ wh = mtod(m, struct ieee80211_frame *);
+ ieee80211_send_setup(ni, m,
+ IEEE80211_FC0_TYPE_MGT | IEEE80211_FC0_SUBTYPE_PROBE_REQ,
+ IEEE80211_NONQOS_TID, sa, da, bssid);
+ /* XXX power management? */
+ m->m_flags |= M_ENCAP; /* mark encapsulated */
+
+ M_WME_SETAC(m, WME_AC_BE);
+
+ IEEE80211_NODE_STAT(ni, tx_probereq);
+ IEEE80211_NODE_STAT(ni, tx_mgmt);
+
+ IEEE80211_DPRINTF(vap, IEEE80211_MSG_DEBUG | IEEE80211_MSG_DUMPPKTS,
+ "send probe req on channel %u bssid %s ssid \"%.*s\"\n",
+ ieee80211_chan2ieee(ic, ic->ic_curchan), ether_sprintf(bssid),
+ ssidlen, ssid);
+
+ memset(&params, 0, sizeof(params));
+ params.ibp_pri = M_WME_GETAC(m);
+ tp = &vap->iv_txparms[ieee80211_chan2mode(ic->ic_curchan)];
+ params.ibp_rate0 = tp->mgmtrate;
+ if (IEEE80211_IS_MULTICAST(da)) {
+ params.ibp_flags |= IEEE80211_BPF_NOACK;
+ params.ibp_try0 = 1;
+ } else
+ params.ibp_try0 = tp->maxretry;
+ params.ibp_power = ni->ni_txpower;
+ return ic->ic_raw_xmit(ni, m, &params);
+}
+
+/*
+ * Calculate capability information for mgt frames.
+ */
+uint16_t
+ieee80211_getcapinfo(struct ieee80211vap *vap, struct ieee80211_channel *chan)
+{
+ struct ieee80211com *ic = vap->iv_ic;
+ uint16_t capinfo;
+
+ KASSERT(vap->iv_opmode != IEEE80211_M_STA, ("station mode"));
+
+ if (vap->iv_opmode == IEEE80211_M_HOSTAP)
+ capinfo = IEEE80211_CAPINFO_ESS;
+ else if (vap->iv_opmode == IEEE80211_M_IBSS)
+ capinfo = IEEE80211_CAPINFO_IBSS;
+ else
+ capinfo = 0;
+ if (vap->iv_flags & IEEE80211_F_PRIVACY)
+ capinfo |= IEEE80211_CAPINFO_PRIVACY;
+ if ((ic->ic_flags & IEEE80211_F_SHPREAMBLE) &&
+ IEEE80211_IS_CHAN_2GHZ(chan))
+ capinfo |= IEEE80211_CAPINFO_SHORT_PREAMBLE;
+ if (ic->ic_flags & IEEE80211_F_SHSLOT)
+ capinfo |= IEEE80211_CAPINFO_SHORT_SLOTTIME;
+ if (IEEE80211_IS_CHAN_5GHZ(chan) && (vap->iv_flags & IEEE80211_F_DOTH))
+ capinfo |= IEEE80211_CAPINFO_SPECTRUM_MGMT;
+ return capinfo;
+}
+
+/*
+ * Send a management frame. The node is for the destination (or ic_bss
+ * when in station mode). Nodes other than ic_bss have their reference
+ * count bumped to reflect our use for an indeterminant time.
+ */
+int
+ieee80211_send_mgmt(struct ieee80211_node *ni, int type, int arg)
+{
+#define HTFLAGS (IEEE80211_NODE_HT | IEEE80211_NODE_HTCOMPAT)
+#define senderr(_x, _v) do { vap->iv_stats._v++; ret = _x; goto bad; } while (0)
+ struct ieee80211vap *vap = ni->ni_vap;
+ struct ieee80211com *ic = ni->ni_ic;
+ struct ieee80211_node *bss = vap->iv_bss;
+ struct ieee80211_bpf_params params;
+ struct mbuf *m;
+ uint8_t *frm;
+ uint16_t capinfo;
+ int has_challenge, is_shared_key, ret, status;
+
+ KASSERT(ni != NULL, ("null node"));
+
+ /*
+ * Hold a reference on the node so it doesn't go away until after
+ * the xmit is complete all the way in the driver. On error we
+ * will remove our reference.
+ */
+ IEEE80211_DPRINTF(vap, IEEE80211_MSG_NODE,
+ "ieee80211_ref_node (%s:%u) %p<%s> refcnt %d\n",
+ __func__, __LINE__,
+ ni, ether_sprintf(ni->ni_macaddr),
+ ieee80211_node_refcnt(ni)+1);
+ ieee80211_ref_node(ni);
+
+ memset(&params, 0, sizeof(params));
+ switch (type) {
+
+ case IEEE80211_FC0_SUBTYPE_AUTH:
+ status = arg >> 16;
+ arg &= 0xffff;
+ has_challenge = ((arg == IEEE80211_AUTH_SHARED_CHALLENGE ||
+ arg == IEEE80211_AUTH_SHARED_RESPONSE) &&
+ ni->ni_challenge != NULL);
+
+ /*
+ * Deduce whether we're doing open authentication or
+ * shared key authentication. We do the latter if
+ * we're in the middle of a shared key authentication
+ * handshake or if we're initiating an authentication
+ * request and configured to use shared key.
+ */
+ is_shared_key = has_challenge ||
+ arg >= IEEE80211_AUTH_SHARED_RESPONSE ||
+ (arg == IEEE80211_AUTH_SHARED_REQUEST &&
+ bss->ni_authmode == IEEE80211_AUTH_SHARED);
+
+ m = ieee80211_getmgtframe(&frm,
+ ic->ic_headroom + sizeof(struct ieee80211_frame),
+ 3 * sizeof(uint16_t)
+ + (has_challenge && status == IEEE80211_STATUS_SUCCESS ?
+ sizeof(uint16_t)+IEEE80211_CHALLENGE_LEN : 0)
+ );
+ if (m == NULL)
+ senderr(ENOMEM, is_tx_nobuf);
+
+ ((uint16_t *)frm)[0] =
+ (is_shared_key) ? htole16(IEEE80211_AUTH_ALG_SHARED)
+ : htole16(IEEE80211_AUTH_ALG_OPEN);
+ ((uint16_t *)frm)[1] = htole16(arg); /* sequence number */
+ ((uint16_t *)frm)[2] = htole16(status);/* status */
+
+ if (has_challenge && status == IEEE80211_STATUS_SUCCESS) {
+ ((uint16_t *)frm)[3] =
+ htole16((IEEE80211_CHALLENGE_LEN << 8) |
+ IEEE80211_ELEMID_CHALLENGE);
+ memcpy(&((uint16_t *)frm)[4], ni->ni_challenge,
+ IEEE80211_CHALLENGE_LEN);
+ m->m_pkthdr.len = m->m_len =
+ 4 * sizeof(uint16_t) + IEEE80211_CHALLENGE_LEN;
+ if (arg == IEEE80211_AUTH_SHARED_RESPONSE) {
+ IEEE80211_NOTE(vap, IEEE80211_MSG_AUTH, ni,
+ "request encrypt frame (%s)", __func__);
+ /* mark frame for encryption */
+ params.ibp_flags |= IEEE80211_BPF_CRYPTO;
+ }
+ } else
+ m->m_pkthdr.len = m->m_len = 3 * sizeof(uint16_t);
+
+ /* XXX not right for shared key */
+ if (status == IEEE80211_STATUS_SUCCESS)
+ IEEE80211_NODE_STAT(ni, tx_auth);
+ else
+ IEEE80211_NODE_STAT(ni, tx_auth_fail);
+
+ if (vap->iv_opmode == IEEE80211_M_STA)
+ ieee80211_add_callback(m, ieee80211_tx_mgt_cb,
+ (void *) vap->iv_state);
+ break;
+
+ case IEEE80211_FC0_SUBTYPE_DEAUTH:
+ IEEE80211_NOTE(vap, IEEE80211_MSG_AUTH, ni,
+ "send station deauthenticate (reason %d)", arg);
+ m = ieee80211_getmgtframe(&frm,
+ ic->ic_headroom + sizeof(struct ieee80211_frame),
+ sizeof(uint16_t));
+ if (m == NULL)
+ senderr(ENOMEM, is_tx_nobuf);
+ *(uint16_t *)frm = htole16(arg); /* reason */
+ m->m_pkthdr.len = m->m_len = sizeof(uint16_t);
+
+ IEEE80211_NODE_STAT(ni, tx_deauth);
+ IEEE80211_NODE_STAT_SET(ni, tx_deauth_code, arg);
+
+ ieee80211_node_unauthorize(ni); /* port closed */
+ break;
+
+ case IEEE80211_FC0_SUBTYPE_ASSOC_REQ:
+ case IEEE80211_FC0_SUBTYPE_REASSOC_REQ:
+ /*
+ * asreq frame format
+ * [2] capability information
+ * [2] listen interval
+ * [6*] current AP address (reassoc only)
+ * [tlv] ssid
+ * [tlv] supported rates
+ * [tlv] extended supported rates
+ * [4] power capability (optional)
+ * [28] supported channels (optional)
+ * [tlv] HT capabilities
+ * [tlv] WME (optional)
+ * [tlv] Vendor OUI HT capabilities (optional)
+ * [tlv] Atheros capabilities (if negotiated)
+ * [tlv] AppIE's (optional)
+ */
+ m = ieee80211_getmgtframe(&frm,
+ ic->ic_headroom + sizeof(struct ieee80211_frame),
+ sizeof(uint16_t)
+ + sizeof(uint16_t)
+ + IEEE80211_ADDR_LEN
+ + 2 + IEEE80211_NWID_LEN
+ + 2 + IEEE80211_RATE_SIZE
+ + 2 + (IEEE80211_RATE_MAXSIZE - IEEE80211_RATE_SIZE)
+ + 4
+ + 2 + 26
+ + sizeof(struct ieee80211_wme_info)
+ + sizeof(struct ieee80211_ie_htcap)
+ + 4 + sizeof(struct ieee80211_ie_htcap)
+#ifdef IEEE80211_SUPPORT_SUPERG
+ + sizeof(struct ieee80211_ath_ie)
+#endif
+ + (vap->iv_appie_wpa != NULL ?
+ vap->iv_appie_wpa->ie_len : 0)
+ + (vap->iv_appie_assocreq != NULL ?
+ vap->iv_appie_assocreq->ie_len : 0)
+ );
+ if (m == NULL)
+ senderr(ENOMEM, is_tx_nobuf);
+
+ KASSERT(vap->iv_opmode == IEEE80211_M_STA,
+ ("wrong mode %u", vap->iv_opmode));
+ capinfo = IEEE80211_CAPINFO_ESS;
+ if (vap->iv_flags & IEEE80211_F_PRIVACY)
+ capinfo |= IEEE80211_CAPINFO_PRIVACY;
+ /*
+ * NB: Some 11a AP's reject the request when
+ * short premable is set.
+ */
+ if ((ic->ic_flags & IEEE80211_F_SHPREAMBLE) &&
+ IEEE80211_IS_CHAN_2GHZ(ic->ic_curchan))
+ capinfo |= IEEE80211_CAPINFO_SHORT_PREAMBLE;
+ if (IEEE80211_IS_CHAN_ANYG(ic->ic_curchan) &&
+ (ic->ic_caps & IEEE80211_C_SHSLOT))
+ capinfo |= IEEE80211_CAPINFO_SHORT_SLOTTIME;
+ if ((ni->ni_capinfo & IEEE80211_CAPINFO_SPECTRUM_MGMT) &&
+ (vap->iv_flags & IEEE80211_F_DOTH))
+ capinfo |= IEEE80211_CAPINFO_SPECTRUM_MGMT;
+ *(uint16_t *)frm = htole16(capinfo);
+ frm += 2;
+
+ KASSERT(bss->ni_intval != 0, ("beacon interval is zero!"));
+ *(uint16_t *)frm = htole16(howmany(ic->ic_lintval,
+ bss->ni_intval));
+ frm += 2;
+
+ if (type == IEEE80211_FC0_SUBTYPE_REASSOC_REQ) {
+ IEEE80211_ADDR_COPY(frm, bss->ni_bssid);
+ frm += IEEE80211_ADDR_LEN;
+ }
+
+ frm = ieee80211_add_ssid(frm, ni->ni_essid, ni->ni_esslen);
+ frm = ieee80211_add_rates(frm, &ni->ni_rates);
+ if (vap->iv_flags & IEEE80211_F_WPA2) {
+ if (vap->iv_rsn_ie != NULL)
+ frm = add_ie(frm, vap->iv_rsn_ie);
+ /* XXX else complain? */
+ }
+ frm = ieee80211_add_xrates(frm, &ni->ni_rates);
+ if (capinfo & IEEE80211_CAPINFO_SPECTRUM_MGMT) {
+ frm = ieee80211_add_powercapability(frm,
+ ic->ic_curchan);
+ frm = ieee80211_add_supportedchannels(frm, ic);
+ }
+ if ((vap->iv_flags_ht & IEEE80211_FHT_HT) &&
+ ni->ni_ies.htcap_ie != NULL &&
+ ni->ni_ies.htcap_ie[0] == IEEE80211_ELEMID_HTCAP)
+ frm = ieee80211_add_htcap(frm, ni);
+ if (vap->iv_flags & IEEE80211_F_WPA1) {
+ if (vap->iv_wpa_ie != NULL)
+ frm = add_ie(frm, vap->iv_wpa_ie);
+ /* XXX else complain */
+ }
+ if ((ic->ic_flags & IEEE80211_F_WME) &&
+ ni->ni_ies.wme_ie != NULL)
+ frm = ieee80211_add_wme_info(frm, &ic->ic_wme);
+ if ((vap->iv_flags_ht & IEEE80211_FHT_HT) &&
+ ni->ni_ies.htcap_ie != NULL &&
+ ni->ni_ies.htcap_ie[0] == IEEE80211_ELEMID_VENDOR)
+ frm = ieee80211_add_htcap_vendor(frm, ni);
+#ifdef IEEE80211_SUPPORT_SUPERG
+ if (IEEE80211_ATH_CAP(vap, ni, IEEE80211_F_ATHEROS)) {
+ frm = ieee80211_add_ath(frm,
+ IEEE80211_ATH_CAP(vap, ni, IEEE80211_F_ATHEROS),
+ ((vap->iv_flags & IEEE80211_F_WPA) == 0 &&
+ ni->ni_authmode != IEEE80211_AUTH_8021X) ?
+ vap->iv_def_txkey : IEEE80211_KEYIX_NONE);
+ }
+#endif /* IEEE80211_SUPPORT_SUPERG */
+ if (vap->iv_appie_assocreq != NULL)
+ frm = add_appie(frm, vap->iv_appie_assocreq);
+ m->m_pkthdr.len = m->m_len = frm - mtod(m, uint8_t *);
+
+ ieee80211_add_callback(m, ieee80211_tx_mgt_cb,
+ (void *) vap->iv_state);
+ break;
+
+ case IEEE80211_FC0_SUBTYPE_ASSOC_RESP:
+ case IEEE80211_FC0_SUBTYPE_REASSOC_RESP:
+ /*
+ * asresp frame format
+ * [2] capability information
+ * [2] status
+ * [2] association ID
+ * [tlv] supported rates
+ * [tlv] extended supported rates
+ * [tlv] HT capabilities (standard, if STA enabled)
+ * [tlv] HT information (standard, if STA enabled)
+ * [tlv] WME (if configured and STA enabled)
+ * [tlv] HT capabilities (vendor OUI, if STA enabled)
+ * [tlv] HT information (vendor OUI, if STA enabled)
+ * [tlv] Atheros capabilities (if STA enabled)
+ * [tlv] AppIE's (optional)
+ */
+ m = ieee80211_getmgtframe(&frm,
+ ic->ic_headroom + sizeof(struct ieee80211_frame),
+ sizeof(uint16_t)
+ + sizeof(uint16_t)
+ + sizeof(uint16_t)
+ + 2 + IEEE80211_RATE_SIZE
+ + 2 + (IEEE80211_RATE_MAXSIZE - IEEE80211_RATE_SIZE)
+ + sizeof(struct ieee80211_ie_htcap) + 4
+ + sizeof(struct ieee80211_ie_htinfo) + 4
+ + sizeof(struct ieee80211_wme_param)
+#ifdef IEEE80211_SUPPORT_SUPERG
+ + sizeof(struct ieee80211_ath_ie)
+#endif
+ + (vap->iv_appie_assocresp != NULL ?
+ vap->iv_appie_assocresp->ie_len : 0)
+ );
+ if (m == NULL)
+ senderr(ENOMEM, is_tx_nobuf);
+
+ capinfo = ieee80211_getcapinfo(vap, bss->ni_chan);
+ *(uint16_t *)frm = htole16(capinfo);
+ frm += 2;
+
+ *(uint16_t *)frm = htole16(arg); /* status */
+ frm += 2;
+
+ if (arg == IEEE80211_STATUS_SUCCESS) {
+ *(uint16_t *)frm = htole16(ni->ni_associd);
+ IEEE80211_NODE_STAT(ni, tx_assoc);
+ } else
+ IEEE80211_NODE_STAT(ni, tx_assoc_fail);
+ frm += 2;
+
+ frm = ieee80211_add_rates(frm, &ni->ni_rates);
+ frm = ieee80211_add_xrates(frm, &ni->ni_rates);
+ /* NB: respond according to what we received */
+ if ((ni->ni_flags & HTFLAGS) == IEEE80211_NODE_HT) {
+ frm = ieee80211_add_htcap(frm, ni);
+ frm = ieee80211_add_htinfo(frm, ni);
+ }
+ if ((vap->iv_flags & IEEE80211_F_WME) &&
+ ni->ni_ies.wme_ie != NULL)
+ frm = ieee80211_add_wme_param(frm, &ic->ic_wme);
+ if ((ni->ni_flags & HTFLAGS) == HTFLAGS) {
+ frm = ieee80211_add_htcap_vendor(frm, ni);
+ frm = ieee80211_add_htinfo_vendor(frm, ni);
+ }
+#ifdef IEEE80211_SUPPORT_SUPERG
+ if (IEEE80211_ATH_CAP(vap, ni, IEEE80211_F_ATHEROS))
+ frm = ieee80211_add_ath(frm,
+ IEEE80211_ATH_CAP(vap, ni, IEEE80211_F_ATHEROS),
+ ((vap->iv_flags & IEEE80211_F_WPA) == 0 &&
+ ni->ni_authmode != IEEE80211_AUTH_8021X) ?
+ vap->iv_def_txkey : IEEE80211_KEYIX_NONE);
+#endif /* IEEE80211_SUPPORT_SUPERG */
+ if (vap->iv_appie_assocresp != NULL)
+ frm = add_appie(frm, vap->iv_appie_assocresp);
+ m->m_pkthdr.len = m->m_len = frm - mtod(m, uint8_t *);
+ break;
+
+ case IEEE80211_FC0_SUBTYPE_DISASSOC:
+ IEEE80211_NOTE(vap, IEEE80211_MSG_ASSOC, ni,
+ "send station disassociate (reason %d)", arg);
+ m = ieee80211_getmgtframe(&frm,
+ ic->ic_headroom + sizeof(struct ieee80211_frame),
+ sizeof(uint16_t));
+ if (m == NULL)
+ senderr(ENOMEM, is_tx_nobuf);
+ *(uint16_t *)frm = htole16(arg); /* reason */
+ m->m_pkthdr.len = m->m_len = sizeof(uint16_t);
+
+ IEEE80211_NODE_STAT(ni, tx_disassoc);
+ IEEE80211_NODE_STAT_SET(ni, tx_disassoc_code, arg);
+ break;
+
+ default:
+ IEEE80211_NOTE(vap, IEEE80211_MSG_ANY, ni,
+ "invalid mgmt frame type %u", type);
+ senderr(EINVAL, is_tx_unknownmgt);
+ /* NOTREACHED */
+ }
+
+ /* NB: force non-ProbeResp frames to the highest queue */
+ params.ibp_pri = WME_AC_VO;
+ params.ibp_rate0 = bss->ni_txparms->mgmtrate;
+ /* NB: we know all frames are unicast */
+ params.ibp_try0 = bss->ni_txparms->maxretry;
+ params.ibp_power = bss->ni_txpower;
+ return ieee80211_mgmt_output(ni, m, type, &params);
+bad:
+ ieee80211_free_node(ni);
+ return ret;
+#undef senderr
+#undef HTFLAGS
+}
+
+/*
+ * Return an mbuf with a probe response frame in it.
+ * Space is left to prepend and 802.11 header at the
+ * front but it's left to the caller to fill in.
+ */
+struct mbuf *
+ieee80211_alloc_proberesp(struct ieee80211_node *bss, int legacy)
+{
+ struct ieee80211vap *vap = bss->ni_vap;
+ struct ieee80211com *ic = bss->ni_ic;
+ const struct ieee80211_rateset *rs;
+ struct mbuf *m;
+ uint16_t capinfo;
+ uint8_t *frm;
+
+ /*
+ * probe response frame format
+ * [8] time stamp
+ * [2] beacon interval
+ * [2] cabability information
+ * [tlv] ssid
+ * [tlv] supported rates
+ * [tlv] parameter set (FH/DS)
+ * [tlv] parameter set (IBSS)
+ * [tlv] country (optional)
+ * [3] power control (optional)
+ * [5] channel switch announcement (CSA) (optional)
+ * [tlv] extended rate phy (ERP)
+ * [tlv] extended supported rates
+ * [tlv] RSN (optional)
+ * [tlv] HT capabilities
+ * [tlv] HT information
+ * [tlv] WPA (optional)
+ * [tlv] WME (optional)
+ * [tlv] Vendor OUI HT capabilities (optional)
+ * [tlv] Vendor OUI HT information (optional)
+ * [tlv] Atheros capabilities
+ * [tlv] AppIE's (optional)
+ * [tlv] Mesh ID (MBSS)
+ * [tlv] Mesh Conf (MBSS)
+ */
+ m = ieee80211_getmgtframe(&frm,
+ ic->ic_headroom + sizeof(struct ieee80211_frame),
+ 8
+ + sizeof(uint16_t)
+ + sizeof(uint16_t)
+ + 2 + IEEE80211_NWID_LEN
+ + 2 + IEEE80211_RATE_SIZE
+ + 7 /* max(7,3) */
+ + IEEE80211_COUNTRY_MAX_SIZE
+ + 3
+ + sizeof(struct ieee80211_csa_ie)
+ + 3
+ + 2 + (IEEE80211_RATE_MAXSIZE - IEEE80211_RATE_SIZE)
+ + sizeof(struct ieee80211_ie_wpa)
+ + sizeof(struct ieee80211_ie_htcap)
+ + sizeof(struct ieee80211_ie_htinfo)
+ + sizeof(struct ieee80211_ie_wpa)
+ + sizeof(struct ieee80211_wme_param)
+ + 4 + sizeof(struct ieee80211_ie_htcap)
+ + 4 + sizeof(struct ieee80211_ie_htinfo)
+#ifdef IEEE80211_SUPPORT_SUPERG
+ + sizeof(struct ieee80211_ath_ie)
+#endif
+#ifdef IEEE80211_SUPPORT_MESH
+ + 2 + IEEE80211_MESHID_LEN
+ + sizeof(struct ieee80211_meshconf_ie)
+#endif
+ + (vap->iv_appie_proberesp != NULL ?
+ vap->iv_appie_proberesp->ie_len : 0)
+ );
+ if (m == NULL) {
+ vap->iv_stats.is_tx_nobuf++;
+ return NULL;
+ }
+
+ memset(frm, 0, 8); /* timestamp should be filled later */
+ frm += 8;
+ *(uint16_t *)frm = htole16(bss->ni_intval);
+ frm += 2;
+ capinfo = ieee80211_getcapinfo(vap, bss->ni_chan);
+ *(uint16_t *)frm = htole16(capinfo);
+ frm += 2;
+
+ frm = ieee80211_add_ssid(frm, bss->ni_essid, bss->ni_esslen);
+ rs = ieee80211_get_suprates(ic, bss->ni_chan);
+ frm = ieee80211_add_rates(frm, rs);
+
+ if (IEEE80211_IS_CHAN_FHSS(bss->ni_chan)) {
+ *frm++ = IEEE80211_ELEMID_FHPARMS;
+ *frm++ = 5;
+ *frm++ = bss->ni_fhdwell & 0x00ff;
+ *frm++ = (bss->ni_fhdwell >> 8) & 0x00ff;
+ *frm++ = IEEE80211_FH_CHANSET(
+ ieee80211_chan2ieee(ic, bss->ni_chan));
+ *frm++ = IEEE80211_FH_CHANPAT(
+ ieee80211_chan2ieee(ic, bss->ni_chan));
+ *frm++ = bss->ni_fhindex;
+ } else {
+ *frm++ = IEEE80211_ELEMID_DSPARMS;
+ *frm++ = 1;
+ *frm++ = ieee80211_chan2ieee(ic, bss->ni_chan);
+ }
+
+ if (vap->iv_opmode == IEEE80211_M_IBSS) {
+ *frm++ = IEEE80211_ELEMID_IBSSPARMS;
+ *frm++ = 2;
+ *frm++ = 0; *frm++ = 0; /* TODO: ATIM window */
+ }
+ if ((vap->iv_flags & IEEE80211_F_DOTH) ||
+ (vap->iv_flags_ext & IEEE80211_FEXT_DOTD))
+ frm = ieee80211_add_countryie(frm, ic);
+ if (vap->iv_flags & IEEE80211_F_DOTH) {
+ if (IEEE80211_IS_CHAN_5GHZ(bss->ni_chan))
+ frm = ieee80211_add_powerconstraint(frm, vap);
+ if (ic->ic_flags & IEEE80211_F_CSAPENDING)
+ frm = ieee80211_add_csa(frm, vap);
+ }
+ if (IEEE80211_IS_CHAN_ANYG(bss->ni_chan))
+ frm = ieee80211_add_erp(frm, ic);
+ frm = ieee80211_add_xrates(frm, rs);
+ if (vap->iv_flags & IEEE80211_F_WPA2) {
+ if (vap->iv_rsn_ie != NULL)
+ frm = add_ie(frm, vap->iv_rsn_ie);
+ /* XXX else complain? */
+ }
+ /*
+ * NB: legacy 11b clients do not get certain ie's.
+ * The caller identifies such clients by passing
+ * a token in legacy to us. Could expand this to be
+ * any legacy client for stuff like HT ie's.
+ */
+ if (IEEE80211_IS_CHAN_HT(bss->ni_chan) &&
+ legacy != IEEE80211_SEND_LEGACY_11B) {
+ frm = ieee80211_add_htcap(frm, bss);
+ frm = ieee80211_add_htinfo(frm, bss);
+ }
+ if (vap->iv_flags & IEEE80211_F_WPA1) {
+ if (vap->iv_wpa_ie != NULL)
+ frm = add_ie(frm, vap->iv_wpa_ie);
+ /* XXX else complain? */
+ }
+ if (vap->iv_flags & IEEE80211_F_WME)
+ frm = ieee80211_add_wme_param(frm, &ic->ic_wme);
+ if (IEEE80211_IS_CHAN_HT(bss->ni_chan) &&
+ (vap->iv_flags_ht & IEEE80211_FHT_HTCOMPAT) &&
+ legacy != IEEE80211_SEND_LEGACY_11B) {
+ frm = ieee80211_add_htcap_vendor(frm, bss);
+ frm = ieee80211_add_htinfo_vendor(frm, bss);
+ }
+#ifdef IEEE80211_SUPPORT_SUPERG
+ if ((vap->iv_flags & IEEE80211_F_ATHEROS) &&
+ legacy != IEEE80211_SEND_LEGACY_11B)
+ frm = ieee80211_add_athcaps(frm, bss);
+#endif
+ if (vap->iv_appie_proberesp != NULL)
+ frm = add_appie(frm, vap->iv_appie_proberesp);
+#ifdef IEEE80211_SUPPORT_MESH
+ if (vap->iv_opmode == IEEE80211_M_MBSS) {
+ frm = ieee80211_add_meshid(frm, vap);
+ frm = ieee80211_add_meshconf(frm, vap);
+ }
+#endif
+ m->m_pkthdr.len = m->m_len = frm - mtod(m, uint8_t *);
+
+ return m;
+}
+
+/*
+ * Send a probe response frame to the specified mac address.
+ * This does not go through the normal mgt frame api so we
+ * can specify the destination address and re-use the bss node
+ * for the sta reference.
+ */
+int
+ieee80211_send_proberesp(struct ieee80211vap *vap,
+ const uint8_t da[IEEE80211_ADDR_LEN], int legacy)
+{
+ struct ieee80211_node *bss = vap->iv_bss;
+ struct ieee80211com *ic = vap->iv_ic;
+ struct ieee80211_frame *wh;
+ struct mbuf *m;
+
+ if (vap->iv_state == IEEE80211_S_CAC) {
+ IEEE80211_NOTE(vap, IEEE80211_MSG_OUTPUT, bss,
+ "block %s frame in CAC state", "probe response");
+ vap->iv_stats.is_tx_badstate++;
+ return EIO; /* XXX */
+ }
+
+ /*
+ * Hold a reference on the node so it doesn't go away until after
+ * the xmit is complete all the way in the driver. On error we
+ * will remove our reference.
+ */
+ IEEE80211_DPRINTF(vap, IEEE80211_MSG_NODE,
+ "ieee80211_ref_node (%s:%u) %p<%s> refcnt %d\n",
+ __func__, __LINE__, bss, ether_sprintf(bss->ni_macaddr),
+ ieee80211_node_refcnt(bss)+1);
+ ieee80211_ref_node(bss);
+
+ m = ieee80211_alloc_proberesp(bss, legacy);
+ if (m == NULL) {
+ ieee80211_free_node(bss);
+ return ENOMEM;
+ }
+
+ M_PREPEND(m, sizeof(struct ieee80211_frame), M_DONTWAIT);
+ KASSERT(m != NULL, ("no room for header"));
+
+ wh = mtod(m, struct ieee80211_frame *);
+ ieee80211_send_setup(bss, m,
+ IEEE80211_FC0_TYPE_MGT | IEEE80211_FC0_SUBTYPE_PROBE_RESP,
+ IEEE80211_NONQOS_TID, vap->iv_myaddr, da, bss->ni_bssid);
+ /* XXX power management? */
+ m->m_flags |= M_ENCAP; /* mark encapsulated */
+
+ M_WME_SETAC(m, WME_AC_BE);
+
+ IEEE80211_DPRINTF(vap, IEEE80211_MSG_DEBUG | IEEE80211_MSG_DUMPPKTS,
+ "send probe resp on channel %u to %s%s\n",
+ ieee80211_chan2ieee(ic, ic->ic_curchan), ether_sprintf(da),
+ legacy ? " <legacy>" : "");
+ IEEE80211_NODE_STAT(bss, tx_mgmt);
+
+ return ic->ic_raw_xmit(bss, m, NULL);
+}
+
+/*
+ * Allocate and build a RTS (Request To Send) control frame.
+ */
+struct mbuf *
+ieee80211_alloc_rts(struct ieee80211com *ic,
+ const uint8_t ra[IEEE80211_ADDR_LEN],
+ const uint8_t ta[IEEE80211_ADDR_LEN],
+ uint16_t dur)
+{
+ struct ieee80211_frame_rts *rts;
+ struct mbuf *m;
+
+ /* XXX honor ic_headroom */
+ m = m_gethdr(M_DONTWAIT, MT_DATA);
+ if (m != NULL) {
+ rts = mtod(m, struct ieee80211_frame_rts *);
+ rts->i_fc[0] = IEEE80211_FC0_VERSION_0 |
+ IEEE80211_FC0_TYPE_CTL | IEEE80211_FC0_SUBTYPE_RTS;
+ rts->i_fc[1] = IEEE80211_FC1_DIR_NODS;
+ *(u_int16_t *)rts->i_dur = htole16(dur);
+ IEEE80211_ADDR_COPY(rts->i_ra, ra);
+ IEEE80211_ADDR_COPY(rts->i_ta, ta);
+
+ m->m_pkthdr.len = m->m_len = sizeof(struct ieee80211_frame_rts);
+ }
+ return m;
+}
+
+/*
+ * Allocate and build a CTS (Clear To Send) control frame.
+ */
+struct mbuf *
+ieee80211_alloc_cts(struct ieee80211com *ic,
+ const uint8_t ra[IEEE80211_ADDR_LEN], uint16_t dur)
+{
+ struct ieee80211_frame_cts *cts;
+ struct mbuf *m;
+
+ /* XXX honor ic_headroom */
+ m = m_gethdr(M_DONTWAIT, MT_DATA);
+ if (m != NULL) {
+ cts = mtod(m, struct ieee80211_frame_cts *);
+ cts->i_fc[0] = IEEE80211_FC0_VERSION_0 |
+ IEEE80211_FC0_TYPE_CTL | IEEE80211_FC0_SUBTYPE_CTS;
+ cts->i_fc[1] = IEEE80211_FC1_DIR_NODS;
+ *(u_int16_t *)cts->i_dur = htole16(dur);
+ IEEE80211_ADDR_COPY(cts->i_ra, ra);
+
+ m->m_pkthdr.len = m->m_len = sizeof(struct ieee80211_frame_cts);
+ }
+ return m;
+}
+
+static void
+ieee80211_tx_mgt_timeout(void *arg)
+{
+ struct ieee80211_node *ni = arg;
+ struct ieee80211vap *vap = ni->ni_vap;
+
+ if (vap->iv_state != IEEE80211_S_INIT &&
+ (vap->iv_ic->ic_flags & IEEE80211_F_SCAN) == 0) {
+ /*
+ * NB: it's safe to specify a timeout as the reason here;
+ * it'll only be used in the right state.
+ */
+ ieee80211_new_state(vap, IEEE80211_S_SCAN,
+ IEEE80211_SCAN_FAIL_TIMEOUT);
+ }
+}
+
+static void
+ieee80211_tx_mgt_cb(struct ieee80211_node *ni, void *arg, int status)
+{
+ struct ieee80211vap *vap = ni->ni_vap;
+ enum ieee80211_state ostate = (enum ieee80211_state) arg;
+
+ /*
+ * Frame transmit completed; arrange timer callback. If
+ * transmit was successfuly we wait for response. Otherwise
+ * we arrange an immediate callback instead of doing the
+ * callback directly since we don't know what state the driver
+ * is in (e.g. what locks it is holding). This work should
+ * not be too time-critical and not happen too often so the
+ * added overhead is acceptable.
+ *
+ * XXX what happens if !acked but response shows up before callback?
+ */
+ if (vap->iv_state == ostate)
+ callout_reset(&vap->iv_mgtsend,
+ status == 0 ? IEEE80211_TRANS_WAIT*hz : 0,
+ ieee80211_tx_mgt_timeout, ni);
+}
+
+static void
+ieee80211_beacon_construct(struct mbuf *m, uint8_t *frm,
+ struct ieee80211_beacon_offsets *bo, struct ieee80211_node *ni)
+{
+ struct ieee80211vap *vap = ni->ni_vap;
+ struct ieee80211com *ic = ni->ni_ic;
+ struct ieee80211_rateset *rs = &ni->ni_rates;
+ uint16_t capinfo;
+
+ /*
+ * beacon frame format
+ * [8] time stamp
+ * [2] beacon interval
+ * [2] cabability information
+ * [tlv] ssid
+ * [tlv] supported rates
+ * [3] parameter set (DS)
+ * [8] CF parameter set (optional)
+ * [tlv] parameter set (IBSS/TIM)
+ * [tlv] country (optional)
+ * [3] power control (optional)
+ * [5] channel switch announcement (CSA) (optional)
+ * [tlv] extended rate phy (ERP)
+ * [tlv] extended supported rates
+ * [tlv] RSN parameters
+ * [tlv] HT capabilities
+ * [tlv] HT information
+ * XXX Vendor-specific OIDs (e.g. Atheros)
+ * [tlv] WPA parameters
+ * [tlv] WME parameters
+ * [tlv] Vendor OUI HT capabilities (optional)
+ * [tlv] Vendor OUI HT information (optional)
+ * [tlv] Atheros capabilities (optional)
+ * [tlv] TDMA parameters (optional)
+ * [tlv] Mesh ID (MBSS)
+ * [tlv] Mesh Conf (MBSS)
+ * [tlv] application data (optional)
+ */
+
+ memset(bo, 0, sizeof(*bo));
+
+ memset(frm, 0, 8); /* XXX timestamp is set by hardware/driver */
+ frm += 8;
+ *(uint16_t *)frm = htole16(ni->ni_intval);
+ frm += 2;
+ capinfo = ieee80211_getcapinfo(vap, ni->ni_chan);
+ bo->bo_caps = (uint16_t *)frm;
+ *(uint16_t *)frm = htole16(capinfo);
+ frm += 2;
+ *frm++ = IEEE80211_ELEMID_SSID;
+ if ((vap->iv_flags & IEEE80211_F_HIDESSID) == 0) {
+ *frm++ = ni->ni_esslen;
+ memcpy(frm, ni->ni_essid, ni->ni_esslen);
+ frm += ni->ni_esslen;
+ } else
+ *frm++ = 0;
+ frm = ieee80211_add_rates(frm, rs);
+ if (!IEEE80211_IS_CHAN_FHSS(ni->ni_chan)) {
+ *frm++ = IEEE80211_ELEMID_DSPARMS;
+ *frm++ = 1;
+ *frm++ = ieee80211_chan2ieee(ic, ni->ni_chan);
+ }
+ if (ic->ic_flags & IEEE80211_F_PCF) {
+ bo->bo_cfp = frm;
+ frm = ieee80211_add_cfparms(frm, ic);
+ }
+ bo->bo_tim = frm;
+ if (vap->iv_opmode == IEEE80211_M_IBSS) {
+ *frm++ = IEEE80211_ELEMID_IBSSPARMS;
+ *frm++ = 2;
+ *frm++ = 0; *frm++ = 0; /* TODO: ATIM window */
+ bo->bo_tim_len = 0;
+ } else if (vap->iv_opmode == IEEE80211_M_HOSTAP ||
+ vap->iv_opmode == IEEE80211_M_MBSS) {
+ /* TIM IE is the same for Mesh and Hostap */
+ struct ieee80211_tim_ie *tie = (struct ieee80211_tim_ie *) frm;
+
+ tie->tim_ie = IEEE80211_ELEMID_TIM;
+ tie->tim_len = 4; /* length */
+ tie->tim_count = 0; /* DTIM count */
+ tie->tim_period = vap->iv_dtim_period; /* DTIM period */
+ tie->tim_bitctl = 0; /* bitmap control */
+ tie->tim_bitmap[0] = 0; /* Partial Virtual Bitmap */
+ frm += sizeof(struct ieee80211_tim_ie);
+ bo->bo_tim_len = 1;
+ }
+ bo->bo_tim_trailer = frm;
+ if ((vap->iv_flags & IEEE80211_F_DOTH) ||
+ (vap->iv_flags_ext & IEEE80211_FEXT_DOTD))
+ frm = ieee80211_add_countryie(frm, ic);
+ if (vap->iv_flags & IEEE80211_F_DOTH) {
+ if (IEEE80211_IS_CHAN_5GHZ(ni->ni_chan))
+ frm = ieee80211_add_powerconstraint(frm, vap);
+ bo->bo_csa = frm;
+ if (ic->ic_flags & IEEE80211_F_CSAPENDING)
+ frm = ieee80211_add_csa(frm, vap);
+ } else
+ bo->bo_csa = frm;
+ if (IEEE80211_IS_CHAN_ANYG(ni->ni_chan)) {
+ bo->bo_erp = frm;
+ frm = ieee80211_add_erp(frm, ic);
+ }
+ frm = ieee80211_add_xrates(frm, rs);
+ if (vap->iv_flags & IEEE80211_F_WPA2) {
+ if (vap->iv_rsn_ie != NULL)
+ frm = add_ie(frm, vap->iv_rsn_ie);
+ /* XXX else complain */
+ }
+ if (IEEE80211_IS_CHAN_HT(ni->ni_chan)) {
+ frm = ieee80211_add_htcap(frm, ni);
+ bo->bo_htinfo = frm;
+ frm = ieee80211_add_htinfo(frm, ni);
+ }
+ if (vap->iv_flags & IEEE80211_F_WPA1) {
+ if (vap->iv_wpa_ie != NULL)
+ frm = add_ie(frm, vap->iv_wpa_ie);
+ /* XXX else complain */
+ }
+ if (vap->iv_flags & IEEE80211_F_WME) {
+ bo->bo_wme = frm;
+ frm = ieee80211_add_wme_param(frm, &ic->ic_wme);
+ }
+ if (IEEE80211_IS_CHAN_HT(ni->ni_chan) &&
+ (vap->iv_flags_ht & IEEE80211_FHT_HTCOMPAT)) {
+ frm = ieee80211_add_htcap_vendor(frm, ni);
+ frm = ieee80211_add_htinfo_vendor(frm, ni);
+ }
+#ifdef IEEE80211_SUPPORT_SUPERG
+ if (vap->iv_flags & IEEE80211_F_ATHEROS) {
+ bo->bo_ath = frm;
+ frm = ieee80211_add_athcaps(frm, ni);
+ }
+#endif
+#ifdef IEEE80211_SUPPORT_TDMA
+ if (vap->iv_caps & IEEE80211_C_TDMA) {
+ bo->bo_tdma = frm;
+ frm = ieee80211_add_tdma(frm, vap);
+ }
+#endif
+ if (vap->iv_appie_beacon != NULL) {
+ bo->bo_appie = frm;
+ bo->bo_appie_len = vap->iv_appie_beacon->ie_len;
+ frm = add_appie(frm, vap->iv_appie_beacon);
+ }
+#ifdef IEEE80211_SUPPORT_MESH
+ if (vap->iv_opmode == IEEE80211_M_MBSS) {
+ frm = ieee80211_add_meshid(frm, vap);
+ bo->bo_meshconf = frm;
+ frm = ieee80211_add_meshconf(frm, vap);
+ }
+#endif
+ bo->bo_tim_trailer_len = frm - bo->bo_tim_trailer;
+ bo->bo_csa_trailer_len = frm - bo->bo_csa;
+ m->m_pkthdr.len = m->m_len = frm - mtod(m, uint8_t *);
+}
+
+/*
+ * Allocate a beacon frame and fillin the appropriate bits.
+ */
+struct mbuf *
+ieee80211_beacon_alloc(struct ieee80211_node *ni,
+ struct ieee80211_beacon_offsets *bo)
+{
+ struct ieee80211vap *vap = ni->ni_vap;
+ struct ieee80211com *ic = ni->ni_ic;
+ struct ifnet *ifp = vap->iv_ifp;
+ struct ieee80211_frame *wh;
+ struct mbuf *m;
+ int pktlen;
+ uint8_t *frm;
+
+ /*
+ * beacon frame format
+ * [8] time stamp
+ * [2] beacon interval
+ * [2] cabability information
+ * [tlv] ssid
+ * [tlv] supported rates
+ * [3] parameter set (DS)
+ * [8] CF parameter set (optional)
+ * [tlv] parameter set (IBSS/TIM)
+ * [tlv] country (optional)
+ * [3] power control (optional)
+ * [5] channel switch announcement (CSA) (optional)
+ * [tlv] extended rate phy (ERP)
+ * [tlv] extended supported rates
+ * [tlv] RSN parameters
+ * [tlv] HT capabilities
+ * [tlv] HT information
+ * [tlv] Vendor OUI HT capabilities (optional)
+ * [tlv] Vendor OUI HT information (optional)
+ * XXX Vendor-specific OIDs (e.g. Atheros)
+ * [tlv] WPA parameters
+ * [tlv] WME parameters
+ * [tlv] TDMA parameters (optional)
+ * [tlv] Mesh ID (MBSS)
+ * [tlv] Mesh Conf (MBSS)
+ * [tlv] application data (optional)
+ * NB: we allocate the max space required for the TIM bitmap.
+ * XXX how big is this?
+ */
+ pktlen = 8 /* time stamp */
+ + sizeof(uint16_t) /* beacon interval */
+ + sizeof(uint16_t) /* capabilities */
+ + 2 + ni->ni_esslen /* ssid */
+ + 2 + IEEE80211_RATE_SIZE /* supported rates */
+ + 2 + 1 /* DS parameters */
+ + 2 + 6 /* CF parameters */
+ + 2 + 4 + vap->iv_tim_len /* DTIM/IBSSPARMS */
+ + IEEE80211_COUNTRY_MAX_SIZE /* country */
+ + 2 + 1 /* power control */
+ + sizeof(struct ieee80211_csa_ie) /* CSA */
+ + 2 + 1 /* ERP */
+ + 2 + (IEEE80211_RATE_MAXSIZE - IEEE80211_RATE_SIZE)
+ + (vap->iv_caps & IEEE80211_C_WPA ? /* WPA 1+2 */
+ 2*sizeof(struct ieee80211_ie_wpa) : 0)
+ /* XXX conditional? */
+ + 4+2*sizeof(struct ieee80211_ie_htcap)/* HT caps */
+ + 4+2*sizeof(struct ieee80211_ie_htinfo)/* HT info */
+ + (vap->iv_caps & IEEE80211_C_WME ? /* WME */
+ sizeof(struct ieee80211_wme_param) : 0)
+#ifdef IEEE80211_SUPPORT_SUPERG
+ + sizeof(struct ieee80211_ath_ie) /* ATH */
+#endif
+#ifdef IEEE80211_SUPPORT_TDMA
+ + (vap->iv_caps & IEEE80211_C_TDMA ? /* TDMA */
+ sizeof(struct ieee80211_tdma_param) : 0)
+#endif
+#ifdef IEEE80211_SUPPORT_MESH
+ + 2 + ni->ni_meshidlen
+ + sizeof(struct ieee80211_meshconf_ie)
+#endif
+ + IEEE80211_MAX_APPIE
+ ;
+ m = ieee80211_getmgtframe(&frm,
+ ic->ic_headroom + sizeof(struct ieee80211_frame), pktlen);
+ if (m == NULL) {
+ IEEE80211_DPRINTF(vap, IEEE80211_MSG_ANY,
+ "%s: cannot get buf; size %u\n", __func__, pktlen);
+ vap->iv_stats.is_tx_nobuf++;
+ return NULL;
+ }
+ ieee80211_beacon_construct(m, frm, bo, ni);
+
+ M_PREPEND(m, sizeof(struct ieee80211_frame), M_DONTWAIT);
+ KASSERT(m != NULL, ("no space for 802.11 header?"));
+ wh = mtod(m, struct ieee80211_frame *);
+ wh->i_fc[0] = IEEE80211_FC0_VERSION_0 | IEEE80211_FC0_TYPE_MGT |
+ IEEE80211_FC0_SUBTYPE_BEACON;
+ wh->i_fc[1] = IEEE80211_FC1_DIR_NODS;
+ *(uint16_t *)wh->i_dur = 0;
+ IEEE80211_ADDR_COPY(wh->i_addr1, ifp->if_broadcastaddr);
+ IEEE80211_ADDR_COPY(wh->i_addr2, vap->iv_myaddr);
+ IEEE80211_ADDR_COPY(wh->i_addr3, ni->ni_bssid);
+ *(uint16_t *)wh->i_seq = 0;
+
+ return m;
+}
+
+/*
+ * Update the dynamic parts of a beacon frame based on the current state.
+ */
+int
+ieee80211_beacon_update(struct ieee80211_node *ni,
+ struct ieee80211_beacon_offsets *bo, struct mbuf *m, int mcast)
+{
+ struct ieee80211vap *vap = ni->ni_vap;
+ struct ieee80211com *ic = ni->ni_ic;
+ int len_changed = 0;
+ uint16_t capinfo;
+
+ IEEE80211_LOCK(ic);
+ /*
+ * Handle 11h channel change when we've reached the count.
+ * We must recalculate the beacon frame contents to account
+ * for the new channel. Note we do this only for the first
+ * vap that reaches this point; subsequent vaps just update
+ * their beacon state to reflect the recalculated channel.
+ */
+ if (isset(bo->bo_flags, IEEE80211_BEACON_CSA) &&
+ vap->iv_csa_count == ic->ic_csa_count) {
+ vap->iv_csa_count = 0;
+ /*
+ * Effect channel change before reconstructing the beacon
+ * frame contents as many places reference ni_chan.
+ */
+ if (ic->ic_csa_newchan != NULL)
+ ieee80211_csa_completeswitch(ic);
+ /*
+ * NB: ieee80211_beacon_construct clears all pending
+ * updates in bo_flags so we don't need to explicitly
+ * clear IEEE80211_BEACON_CSA.
+ */
+ ieee80211_beacon_construct(m,
+ mtod(m, uint8_t*) + sizeof(struct ieee80211_frame), bo, ni);
+
+ /* XXX do WME aggressive mode processing? */
+ IEEE80211_UNLOCK(ic);
+ return 1; /* just assume length changed */
+ }
+
+ /* XXX faster to recalculate entirely or just changes? */
+ capinfo = ieee80211_getcapinfo(vap, ni->ni_chan);
+ *bo->bo_caps = htole16(capinfo);
+
+ if (vap->iv_flags & IEEE80211_F_WME) {
+ struct ieee80211_wme_state *wme = &ic->ic_wme;
+
+ /*
+ * Check for agressive mode change. When there is
+ * significant high priority traffic in the BSS
+ * throttle back BE traffic by using conservative
+ * parameters. Otherwise BE uses agressive params
+ * to optimize performance of legacy/non-QoS traffic.
+ */
+ if (wme->wme_flags & WME_F_AGGRMODE) {
+ if (wme->wme_hipri_traffic >
+ wme->wme_hipri_switch_thresh) {
+ IEEE80211_DPRINTF(vap, IEEE80211_MSG_WME,
+ "%s: traffic %u, disable aggressive mode\n",
+ __func__, wme->wme_hipri_traffic);
+ wme->wme_flags &= ~WME_F_AGGRMODE;
+ ieee80211_wme_updateparams_locked(vap);
+ wme->wme_hipri_traffic =
+ wme->wme_hipri_switch_hysteresis;
+ } else
+ wme->wme_hipri_traffic = 0;
+ } else {
+ if (wme->wme_hipri_traffic <=
+ wme->wme_hipri_switch_thresh) {
+ IEEE80211_DPRINTF(vap, IEEE80211_MSG_WME,
+ "%s: traffic %u, enable aggressive mode\n",
+ __func__, wme->wme_hipri_traffic);
+ wme->wme_flags |= WME_F_AGGRMODE;
+ ieee80211_wme_updateparams_locked(vap);
+ wme->wme_hipri_traffic = 0;
+ } else
+ wme->wme_hipri_traffic =
+ wme->wme_hipri_switch_hysteresis;
+ }
+ if (isset(bo->bo_flags, IEEE80211_BEACON_WME)) {
+ (void) ieee80211_add_wme_param(bo->bo_wme, wme);
+ clrbit(bo->bo_flags, IEEE80211_BEACON_WME);
+ }
+ }
+
+ if (isset(bo->bo_flags, IEEE80211_BEACON_HTINFO)) {
+ ieee80211_ht_update_beacon(vap, bo);
+ clrbit(bo->bo_flags, IEEE80211_BEACON_HTINFO);
+ }
+#ifdef IEEE80211_SUPPORT_TDMA
+ if (vap->iv_caps & IEEE80211_C_TDMA) {
+ /*
+ * NB: the beacon is potentially updated every TBTT.
+ */
+ ieee80211_tdma_update_beacon(vap, bo);
+ }
+#endif
+#ifdef IEEE80211_SUPPORT_MESH
+ if (vap->iv_opmode == IEEE80211_M_MBSS)
+ ieee80211_mesh_update_beacon(vap, bo);
+#endif
+
+ if (vap->iv_opmode == IEEE80211_M_HOSTAP ||
+ vap->iv_opmode == IEEE80211_M_MBSS) { /* NB: no IBSS support*/
+ struct ieee80211_tim_ie *tie =
+ (struct ieee80211_tim_ie *) bo->bo_tim;
+ if (isset(bo->bo_flags, IEEE80211_BEACON_TIM)) {
+ u_int timlen, timoff, i;
+ /*
+ * ATIM/DTIM needs updating. If it fits in the
+ * current space allocated then just copy in the
+ * new bits. Otherwise we need to move any trailing
+ * data to make room. Note that we know there is
+ * contiguous space because ieee80211_beacon_allocate
+ * insures there is space in the mbuf to write a
+ * maximal-size virtual bitmap (based on iv_max_aid).
+ */
+ /*
+ * Calculate the bitmap size and offset, copy any
+ * trailer out of the way, and then copy in the
+ * new bitmap and update the information element.
+ * Note that the tim bitmap must contain at least
+ * one byte and any offset must be even.
+ */
+ if (vap->iv_ps_pending != 0) {
+ timoff = 128; /* impossibly large */
+ for (i = 0; i < vap->iv_tim_len; i++)
+ if (vap->iv_tim_bitmap[i]) {
+ timoff = i &~ 1;
+ break;
+ }
+ KASSERT(timoff != 128, ("tim bitmap empty!"));
+ for (i = vap->iv_tim_len-1; i >= timoff; i--)
+ if (vap->iv_tim_bitmap[i])
+ break;
+ timlen = 1 + (i - timoff);
+ } else {
+ timoff = 0;
+ timlen = 1;
+ }
+ if (timlen != bo->bo_tim_len) {
+ /* copy up/down trailer */
+ int adjust = tie->tim_bitmap+timlen
+ - bo->bo_tim_trailer;
+ ovbcopy(bo->bo_tim_trailer,
+ bo->bo_tim_trailer+adjust,
+ bo->bo_tim_trailer_len);
+ bo->bo_tim_trailer += adjust;
+ bo->bo_erp += adjust;
+ bo->bo_htinfo += adjust;
+#ifdef IEEE80211_SUPERG_SUPPORT
+ bo->bo_ath += adjust;
+#endif
+#ifdef IEEE80211_TDMA_SUPPORT
+ bo->bo_tdma += adjust;
+#endif
+#ifdef IEEE80211_MESH_SUPPORT
+ bo->bo_meshconf += adjust;
+#endif
+ bo->bo_appie += adjust;
+ bo->bo_wme += adjust;
+ bo->bo_csa += adjust;
+ bo->bo_tim_len = timlen;
+
+ /* update information element */
+ tie->tim_len = 3 + timlen;
+ tie->tim_bitctl = timoff;
+ len_changed = 1;
+ }
+ memcpy(tie->tim_bitmap, vap->iv_tim_bitmap + timoff,
+ bo->bo_tim_len);
+
+ clrbit(bo->bo_flags, IEEE80211_BEACON_TIM);
+
+ IEEE80211_DPRINTF(vap, IEEE80211_MSG_POWER,
+ "%s: TIM updated, pending %u, off %u, len %u\n",
+ __func__, vap->iv_ps_pending, timoff, timlen);
+ }
+ /* count down DTIM period */
+ if (tie->tim_count == 0)
+ tie->tim_count = tie->tim_period - 1;
+ else
+ tie->tim_count--;
+ /* update state for buffered multicast frames on DTIM */
+ if (mcast && tie->tim_count == 0)
+ tie->tim_bitctl |= 1;
+ else
+ tie->tim_bitctl &= ~1;
+ if (isset(bo->bo_flags, IEEE80211_BEACON_CSA)) {
+ struct ieee80211_csa_ie *csa =
+ (struct ieee80211_csa_ie *) bo->bo_csa;
+
+ /*
+ * Insert or update CSA ie. If we're just starting
+ * to count down to the channel switch then we need
+ * to insert the CSA ie. Otherwise we just need to
+ * drop the count. The actual change happens above
+ * when the vap's count reaches the target count.
+ */
+ if (vap->iv_csa_count == 0) {
+ memmove(&csa[1], csa, bo->bo_csa_trailer_len);
+ bo->bo_erp += sizeof(*csa);
+ bo->bo_htinfo += sizeof(*csa);
+ bo->bo_wme += sizeof(*csa);
+#ifdef IEEE80211_SUPERG_SUPPORT
+ bo->bo_ath += sizeof(*csa);
+#endif
+#ifdef IEEE80211_TDMA_SUPPORT
+ bo->bo_tdma += sizeof(*csa);
+#endif
+#ifdef IEEE80211_MESH_SUPPORT
+ bo->bo_meshconf += sizeof(*csa);
+#endif
+ bo->bo_appie += sizeof(*csa);
+ bo->bo_csa_trailer_len += sizeof(*csa);
+ bo->bo_tim_trailer_len += sizeof(*csa);
+ m->m_len += sizeof(*csa);
+ m->m_pkthdr.len += sizeof(*csa);
+
+ ieee80211_add_csa(bo->bo_csa, vap);
+ } else
+ csa->csa_count--;
+ vap->iv_csa_count++;
+ /* NB: don't clear IEEE80211_BEACON_CSA */
+ }
+ if (isset(bo->bo_flags, IEEE80211_BEACON_ERP)) {
+ /*
+ * ERP element needs updating.
+ */
+ (void) ieee80211_add_erp(bo->bo_erp, ic);
+ clrbit(bo->bo_flags, IEEE80211_BEACON_ERP);
+ }
+#ifdef IEEE80211_SUPPORT_SUPERG
+ if (isset(bo->bo_flags, IEEE80211_BEACON_ATH)) {
+ ieee80211_add_athcaps(bo->bo_ath, ni);
+ clrbit(bo->bo_flags, IEEE80211_BEACON_ATH);
+ }
+#endif
+ }
+ if (isset(bo->bo_flags, IEEE80211_BEACON_APPIE)) {
+ const struct ieee80211_appie *aie = vap->iv_appie_beacon;
+ int aielen;
+ uint8_t *frm;
+
+ aielen = 0;
+ if (aie != NULL)
+ aielen += aie->ie_len;
+ if (aielen != bo->bo_appie_len) {
+ /* copy up/down trailer */
+ int adjust = aielen - bo->bo_appie_len;
+ ovbcopy(bo->bo_tim_trailer, bo->bo_tim_trailer+adjust,
+ bo->bo_tim_trailer_len);
+ bo->bo_tim_trailer += adjust;
+ bo->bo_appie += adjust;
+ bo->bo_appie_len = aielen;
+
+ len_changed = 1;
+ }
+ frm = bo->bo_appie;
+ if (aie != NULL)
+ frm = add_appie(frm, aie);
+ clrbit(bo->bo_flags, IEEE80211_BEACON_APPIE);
+ }
+ IEEE80211_UNLOCK(ic);
+
+ return len_changed;
+}
diff --git a/rtems/freebsd/net80211/ieee80211_phy.c b/rtems/freebsd/net80211/ieee80211_phy.c
new file mode 100644
index 00000000..b5ab9882
--- /dev/null
+++ b/rtems/freebsd/net80211/ieee80211_phy.c
@@ -0,0 +1,467 @@
+#include <rtems/freebsd/machine/rtems-bsd-config.h>
+
+/*-
+ * Copyright (c) 2007-2008 Sam Leffler, Errno Consulting
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <rtems/freebsd/sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+/*
+ * IEEE 802.11 PHY-related support.
+ */
+
+#include <rtems/freebsd/local/opt_inet.h>
+
+#include <rtems/freebsd/sys/param.h>
+#include <rtems/freebsd/sys/kernel.h>
+#include <rtems/freebsd/sys/systm.h>
+
+#include <rtems/freebsd/sys/socket.h>
+
+#include <rtems/freebsd/net/if.h>
+#include <rtems/freebsd/net/if_media.h>
+
+#include <rtems/freebsd/net80211/ieee80211_var.h>
+#include <rtems/freebsd/net80211/ieee80211_phy.h>
+
+#ifdef notyet
+struct ieee80211_ds_plcp_hdr {
+ uint8_t i_signal;
+ uint8_t i_service;
+ uint16_t i_length;
+ uint16_t i_crc;
+} __packed;
+
+#endif /* notyet */
+
+/* shorthands to compact tables for readability */
+#define OFDM IEEE80211_T_OFDM
+#define CCK IEEE80211_T_CCK
+#define TURBO IEEE80211_T_TURBO
+#define HALF IEEE80211_T_OFDM_HALF
+#define QUART IEEE80211_T_OFDM_QUARTER
+#define PBCC (IEEE80211_T_OFDM_QUARTER+1) /* XXX */
+#define B(r) (0x80 | r)
+#define Mb(x) (x*1000)
+
+static struct ieee80211_rate_table ieee80211_11b_table = {
+ .rateCount = 4, /* XXX no PBCC */
+ .info = {
+/* short ctrl */
+/* Preamble dot11Rate Rate */
+ [0] = { .phy = CCK, 1000, 0x00, B(2), 0 },/* 1 Mb */
+ [1] = { .phy = CCK, 2000, 0x04, B(4), 1 },/* 2 Mb */
+ [2] = { .phy = CCK, 5500, 0x04, B(11), 1 },/* 5.5 Mb */
+ [3] = { .phy = CCK, 11000, 0x04, B(22), 1 },/* 11 Mb */
+ [4] = { .phy = PBCC, 22000, 0x04, 44, 3 } /* 22 Mb */
+ },
+};
+
+static struct ieee80211_rate_table ieee80211_11g_table = {
+ .rateCount = 12,
+ .info = {
+/* short ctrl */
+/* Preamble dot11Rate Rate */
+ [0] = { .phy = CCK, 1000, 0x00, B(2), 0 },
+ [1] = { .phy = CCK, 2000, 0x04, B(4), 1 },
+ [2] = { .phy = CCK, 5500, 0x04, B(11), 2 },
+ [3] = { .phy = CCK, 11000, 0x04, B(22), 3 },
+ [4] = { .phy = OFDM, 6000, 0x00, 12, 4 },
+ [5] = { .phy = OFDM, 9000, 0x00, 18, 4 },
+ [6] = { .phy = OFDM, 12000, 0x00, 24, 6 },
+ [7] = { .phy = OFDM, 18000, 0x00, 36, 6 },
+ [8] = { .phy = OFDM, 24000, 0x00, 48, 8 },
+ [9] = { .phy = OFDM, 36000, 0x00, 72, 8 },
+ [10] = { .phy = OFDM, 48000, 0x00, 96, 8 },
+ [11] = { .phy = OFDM, 54000, 0x00, 108, 8 }
+ },
+};
+
+static struct ieee80211_rate_table ieee80211_11a_table = {
+ .rateCount = 8,
+ .info = {
+/* short ctrl */
+/* Preamble dot11Rate Rate */
+ [0] = { .phy = OFDM, 6000, 0x00, B(12), 0 },
+ [1] = { .phy = OFDM, 9000, 0x00, 18, 0 },
+ [2] = { .phy = OFDM, 12000, 0x00, B(24), 2 },
+ [3] = { .phy = OFDM, 18000, 0x00, 36, 2 },
+ [4] = { .phy = OFDM, 24000, 0x00, B(48), 4 },
+ [5] = { .phy = OFDM, 36000, 0x00, 72, 4 },
+ [6] = { .phy = OFDM, 48000, 0x00, 96, 4 },
+ [7] = { .phy = OFDM, 54000, 0x00, 108, 4 }
+ },
+};
+
+static struct ieee80211_rate_table ieee80211_half_table = {
+ .rateCount = 8,
+ .info = {
+/* short ctrl */
+/* Preamble dot11Rate Rate */
+ [0] = { .phy = HALF, 3000, 0x00, B(6), 0 },
+ [1] = { .phy = HALF, 4500, 0x00, 9, 0 },
+ [2] = { .phy = HALF, 6000, 0x00, B(12), 2 },
+ [3] = { .phy = HALF, 9000, 0x00, 18, 2 },
+ [4] = { .phy = HALF, 12000, 0x00, B(24), 4 },
+ [5] = { .phy = HALF, 18000, 0x00, 36, 4 },
+ [6] = { .phy = HALF, 24000, 0x00, 48, 4 },
+ [7] = { .phy = HALF, 27000, 0x00, 54, 4 }
+ },
+};
+
+static struct ieee80211_rate_table ieee80211_quarter_table = {
+ .rateCount = 8,
+ .info = {
+/* short ctrl */
+/* Preamble dot11Rate Rate */
+ [0] = { .phy = QUART, 1500, 0x00, B(3), 0 },
+ [1] = { .phy = QUART, 2250, 0x00, 4, 0 },
+ [2] = { .phy = QUART, 3000, 0x00, B(9), 2 },
+ [3] = { .phy = QUART, 4500, 0x00, 9, 2 },
+ [4] = { .phy = QUART, 6000, 0x00, B(12), 4 },
+ [5] = { .phy = QUART, 9000, 0x00, 18, 4 },
+ [6] = { .phy = QUART, 12000, 0x00, 24, 4 },
+ [7] = { .phy = QUART, 13500, 0x00, 27, 4 }
+ },
+};
+
+static struct ieee80211_rate_table ieee80211_turbog_table = {
+ .rateCount = 7,
+ .info = {
+/* short ctrl */
+/* Preamble dot11Rate Rate */
+ [0] = { .phy = TURBO, 12000, 0x00, B(12), 0 },
+ [1] = { .phy = TURBO, 24000, 0x00, B(24), 1 },
+ [2] = { .phy = TURBO, 36000, 0x00, 36, 1 },
+ [3] = { .phy = TURBO, 48000, 0x00, B(48), 3 },
+ [4] = { .phy = TURBO, 72000, 0x00, 72, 3 },
+ [5] = { .phy = TURBO, 96000, 0x00, 96, 3 },
+ [6] = { .phy = TURBO, 108000, 0x00, 108, 3 }
+ },
+};
+
+static struct ieee80211_rate_table ieee80211_turboa_table = {
+ .rateCount = 8,
+ .info = {
+/* short ctrl */
+/* Preamble dot11Rate Rate */
+ [0] = { .phy = TURBO, 12000, 0x00, B(12), 0 },
+ [1] = { .phy = TURBO, 18000, 0x00, 18, 0 },
+ [2] = { .phy = TURBO, 24000, 0x00, B(24), 2 },
+ [3] = { .phy = TURBO, 36000, 0x00, 36, 2 },
+ [4] = { .phy = TURBO, 48000, 0x00, B(48), 4 },
+ [5] = { .phy = TURBO, 72000, 0x00, 72, 4 },
+ [6] = { .phy = TURBO, 96000, 0x00, 96, 4 },
+ [7] = { .phy = TURBO, 108000, 0x00, 108, 4 }
+ },
+};
+
+#undef Mb
+#undef B
+#undef OFDM
+#undef HALF
+#undef QUART
+#undef CCK
+#undef TURBO
+#undef XR
+
+/*
+ * Setup a rate table's reverse lookup table and fill in
+ * ack durations. The reverse lookup tables are assumed
+ * to be initialized to zero (or at least the first entry).
+ * We use this as a key that indicates whether or not
+ * we've previously setup the reverse lookup table.
+ *
+ * XXX not reentrant, but shouldn't matter
+ */
+static void
+ieee80211_setup_ratetable(struct ieee80211_rate_table *rt)
+{
+#define N(a) (sizeof(a)/sizeof(a[0]))
+#define WLAN_CTRL_FRAME_SIZE \
+ (sizeof(struct ieee80211_frame_ack) + IEEE80211_CRC_LEN)
+
+ int i;
+
+ for (i = 0; i < N(rt->rateCodeToIndex); i++)
+ rt->rateCodeToIndex[i] = (uint8_t) -1;
+ for (i = 0; i < rt->rateCount; i++) {
+ uint8_t code = rt->info[i].dot11Rate;
+ uint8_t cix = rt->info[i].ctlRateIndex;
+ uint8_t ctl_rate = rt->info[cix].dot11Rate;
+
+ rt->rateCodeToIndex[code] = i;
+ if (code & IEEE80211_RATE_BASIC) {
+ /*
+ * Map w/o basic rate bit too.
+ */
+ code &= IEEE80211_RATE_VAL;
+ rt->rateCodeToIndex[code] = i;
+ }
+
+ /*
+ * XXX for 11g the control rate to use for 5.5 and 11 Mb/s
+ * depends on whether they are marked as basic rates;
+ * the static tables are setup with an 11b-compatible
+ * 2Mb/s rate which will work but is suboptimal
+ *
+ * NB: Control rate is always less than or equal to the
+ * current rate, so control rate's reverse lookup entry
+ * has been installed and following call is safe.
+ */
+ rt->info[i].lpAckDuration = ieee80211_compute_duration(rt,
+ WLAN_CTRL_FRAME_SIZE, ctl_rate, 0);
+ rt->info[i].spAckDuration = ieee80211_compute_duration(rt,
+ WLAN_CTRL_FRAME_SIZE, ctl_rate, IEEE80211_F_SHPREAMBLE);
+ }
+
+#undef WLAN_CTRL_FRAME_SIZE
+#undef N
+}
+
+/* Setup all rate tables */
+static void
+ieee80211_phy_init(void)
+{
+#define N(arr) (int)(sizeof(arr) / sizeof(arr[0]))
+ static struct ieee80211_rate_table * const ratetables[] = {
+ &ieee80211_half_table,
+ &ieee80211_quarter_table,
+ &ieee80211_11a_table,
+ &ieee80211_11g_table,
+ &ieee80211_turbog_table,
+ &ieee80211_turboa_table,
+ &ieee80211_turboa_table,
+ &ieee80211_11a_table,
+ &ieee80211_11g_table,
+ &ieee80211_11b_table
+ };
+ int i;
+
+ for (i = 0; i < N(ratetables); ++i)
+ ieee80211_setup_ratetable(ratetables[i]);
+
+#undef N
+}
+SYSINIT(wlan_phy, SI_SUB_DRIVERS, SI_ORDER_FIRST, ieee80211_phy_init, NULL);
+
+const struct ieee80211_rate_table *
+ieee80211_get_ratetable(struct ieee80211_channel *c)
+{
+ const struct ieee80211_rate_table *rt;
+
+ /* XXX HT */
+ if (IEEE80211_IS_CHAN_HALF(c))
+ rt = &ieee80211_half_table;
+ else if (IEEE80211_IS_CHAN_QUARTER(c))
+ rt = &ieee80211_quarter_table;
+ else if (IEEE80211_IS_CHAN_HTA(c))
+ rt = &ieee80211_11a_table; /* XXX */
+ else if (IEEE80211_IS_CHAN_HTG(c))
+ rt = &ieee80211_11g_table; /* XXX */
+ else if (IEEE80211_IS_CHAN_108G(c))
+ rt = &ieee80211_turbog_table;
+ else if (IEEE80211_IS_CHAN_ST(c))
+ rt = &ieee80211_turboa_table;
+ else if (IEEE80211_IS_CHAN_TURBO(c))
+ rt = &ieee80211_turboa_table;
+ else if (IEEE80211_IS_CHAN_A(c))
+ rt = &ieee80211_11a_table;
+ else if (IEEE80211_IS_CHAN_ANYG(c))
+ rt = &ieee80211_11g_table;
+ else if (IEEE80211_IS_CHAN_B(c))
+ rt = &ieee80211_11b_table;
+ else {
+ /* NB: should not get here */
+ panic("%s: no rate table for channel; freq %u flags 0x%x\n",
+ __func__, c->ic_freq, c->ic_flags);
+ }
+ return rt;
+}
+
+/*
+ * Convert PLCP signal/rate field to 802.11 rate (.5Mbits/s)
+ *
+ * Note we do no parameter checking; this routine is mainly
+ * used to derive an 802.11 rate for constructing radiotap
+ * header data for rx frames.
+ *
+ * XXX might be a candidate for inline
+ */
+uint8_t
+ieee80211_plcp2rate(uint8_t plcp, enum ieee80211_phytype type)
+{
+ if (type == IEEE80211_T_OFDM) {
+ static const uint8_t ofdm_plcp2rate[16] = {
+ [0xb] = 12,
+ [0xf] = 18,
+ [0xa] = 24,
+ [0xe] = 36,
+ [0x9] = 48,
+ [0xd] = 72,
+ [0x8] = 96,
+ [0xc] = 108
+ };
+ return ofdm_plcp2rate[plcp & 0xf];
+ }
+ if (type == IEEE80211_T_CCK) {
+ static const uint8_t cck_plcp2rate[16] = {
+ [0xa] = 2, /* 0x0a */
+ [0x4] = 4, /* 0x14 */
+ [0x7] = 11, /* 0x37 */
+ [0xe] = 22, /* 0x6e */
+ [0xc] = 44, /* 0xdc , actually PBCC */
+ };
+ return cck_plcp2rate[plcp & 0xf];
+ }
+ return 0;
+}
+
+/*
+ * Covert 802.11 rate to PLCP signal.
+ */
+uint8_t
+ieee80211_rate2plcp(int rate, enum ieee80211_phytype type)
+{
+ /* XXX ignore type for now since rates are unique */
+ switch (rate) {
+ /* OFDM rates (cf IEEE Std 802.11a-1999, pp. 14 Table 80) */
+ case 12: return 0xb;
+ case 18: return 0xf;
+ case 24: return 0xa;
+ case 36: return 0xe;
+ case 48: return 0x9;
+ case 72: return 0xd;
+ case 96: return 0x8;
+ case 108: return 0xc;
+ /* CCK rates (IEEE Std 802.11b-1999 page 15, subclause 18.2.3.3) */
+ case 2: return 10;
+ case 4: return 20;
+ case 11: return 55;
+ case 22: return 110;
+ /* IEEE Std 802.11g-2003 page 19, subclause 19.3.2.1 */
+ case 44: return 220;
+ }
+ return 0; /* XXX unsupported/unknown rate */
+}
+
+#define CCK_SIFS_TIME 10
+#define CCK_PREAMBLE_BITS 144
+#define CCK_PLCP_BITS 48
+
+#define OFDM_SIFS_TIME 16
+#define OFDM_PREAMBLE_TIME 20
+#define OFDM_PLCP_BITS 22
+#define OFDM_SYMBOL_TIME 4
+
+#define OFDM_HALF_SIFS_TIME 32
+#define OFDM_HALF_PREAMBLE_TIME 40
+#define OFDM_HALF_PLCP_BITS 22
+#define OFDM_HALF_SYMBOL_TIME 8
+
+#define OFDM_QUARTER_SIFS_TIME 64
+#define OFDM_QUARTER_PREAMBLE_TIME 80
+#define OFDM_QUARTER_PLCP_BITS 22
+#define OFDM_QUARTER_SYMBOL_TIME 16
+
+#define TURBO_SIFS_TIME 8
+#define TURBO_PREAMBLE_TIME 14
+#define TURBO_PLCP_BITS 22
+#define TURBO_SYMBOL_TIME 4
+
+/*
+ * Compute the time to transmit a frame of length frameLen bytes
+ * using the specified rate, phy, and short preamble setting.
+ * SIFS is included.
+ */
+uint16_t
+ieee80211_compute_duration(const struct ieee80211_rate_table *rt,
+ uint32_t frameLen, uint16_t rate, int isShortPreamble)
+{
+ uint8_t rix = rt->rateCodeToIndex[rate];
+ uint32_t bitsPerSymbol, numBits, numSymbols, phyTime, txTime;
+ uint32_t kbps;
+
+ KASSERT(rix != (uint8_t)-1, ("rate %d has no info", rate));
+ kbps = rt->info[rix].rateKbps;
+ if (kbps == 0) /* XXX bandaid for channel changes */
+ return 0;
+
+ switch (rt->info[rix].phy) {
+ case IEEE80211_T_CCK:
+ phyTime = CCK_PREAMBLE_BITS + CCK_PLCP_BITS;
+ if (isShortPreamble && rt->info[rix].shortPreamble)
+ phyTime >>= 1;
+ numBits = frameLen << 3;
+ txTime = CCK_SIFS_TIME + phyTime
+ + ((numBits * 1000)/kbps);
+ break;
+ case IEEE80211_T_OFDM:
+ bitsPerSymbol = (kbps * OFDM_SYMBOL_TIME) / 1000;
+ KASSERT(bitsPerSymbol != 0, ("full rate bps"));
+
+ numBits = OFDM_PLCP_BITS + (frameLen << 3);
+ numSymbols = howmany(numBits, bitsPerSymbol);
+ txTime = OFDM_SIFS_TIME
+ + OFDM_PREAMBLE_TIME
+ + (numSymbols * OFDM_SYMBOL_TIME);
+ break;
+ case IEEE80211_T_OFDM_HALF:
+ bitsPerSymbol = (kbps * OFDM_HALF_SYMBOL_TIME) / 1000;
+ KASSERT(bitsPerSymbol != 0, ("1/4 rate bps"));
+
+ numBits = OFDM_PLCP_BITS + (frameLen << 3);
+ numSymbols = howmany(numBits, bitsPerSymbol);
+ txTime = OFDM_HALF_SIFS_TIME
+ + OFDM_HALF_PREAMBLE_TIME
+ + (numSymbols * OFDM_HALF_SYMBOL_TIME);
+ break;
+ case IEEE80211_T_OFDM_QUARTER:
+ bitsPerSymbol = (kbps * OFDM_QUARTER_SYMBOL_TIME) / 1000;
+ KASSERT(bitsPerSymbol != 0, ("1/2 rate bps"));
+
+ numBits = OFDM_PLCP_BITS + (frameLen << 3);
+ numSymbols = howmany(numBits, bitsPerSymbol);
+ txTime = OFDM_QUARTER_SIFS_TIME
+ + OFDM_QUARTER_PREAMBLE_TIME
+ + (numSymbols * OFDM_QUARTER_SYMBOL_TIME);
+ break;
+ case IEEE80211_T_TURBO:
+ /* we still save OFDM rates in kbps - so double them */
+ bitsPerSymbol = ((kbps << 1) * TURBO_SYMBOL_TIME) / 1000;
+ KASSERT(bitsPerSymbol != 0, ("turbo bps"));
+
+ numBits = TURBO_PLCP_BITS + (frameLen << 3);
+ numSymbols = howmany(numBits, bitsPerSymbol);
+ txTime = TURBO_SIFS_TIME + TURBO_PREAMBLE_TIME
+ + (numSymbols * TURBO_SYMBOL_TIME);
+ break;
+ default:
+ panic("%s: unknown phy %u (rate %u)\n", __func__,
+ rt->info[rix].phy, rate);
+ break;
+ }
+ return txTime;
+}
diff --git a/rtems/freebsd/net80211/ieee80211_phy.h b/rtems/freebsd/net80211/ieee80211_phy.h
new file mode 100644
index 00000000..af76e666
--- /dev/null
+++ b/rtems/freebsd/net80211/ieee80211_phy.h
@@ -0,0 +1,155 @@
+/*-
+ * Copyright (c) 2007-2008 Sam Leffler, Errno Consulting
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _NET80211_IEEE80211_PHY_HH_
+#define _NET80211_IEEE80211_PHY_HH_
+
+#ifdef _KERNEL
+/*
+ * IEEE 802.11 PHY-related definitions.
+ */
+
+/*
+ * Contention window (slots).
+ */
+#define IEEE80211_CW_MAX 1023 /* aCWmax */
+#define IEEE80211_CW_MIN_0 31 /* DS/CCK aCWmin, ERP aCWmin(0) */
+#define IEEE80211_CW_MIN_1 15 /* OFDM aCWmin, ERP aCWmin(1) */
+
+/*
+ * SIFS (microseconds).
+ */
+#define IEEE80211_DUR_SIFS 10 /* DS/CCK/ERP SIFS */
+#define IEEE80211_DUR_OFDM_SIFS 16 /* OFDM SIFS */
+
+/*
+ * Slot time (microseconds).
+ */
+#define IEEE80211_DUR_SLOT 20 /* DS/CCK slottime, ERP long slottime */
+#define IEEE80211_DUR_SHSLOT 9 /* ERP short slottime */
+#define IEEE80211_DUR_OFDM_SLOT 9 /* OFDM slottime */
+
+/*
+ * DIFS (microseconds).
+ */
+#define IEEE80211_DUR_DIFS(sifs, slot) ((sifs) + 2 * (slot))
+
+struct ieee80211_channel;
+
+struct ieee80211_rate_table {
+ int rateCount; /* NB: for proper padding */
+ uint8_t rateCodeToIndex[256]; /* back mapping */
+ struct {
+ uint8_t phy; /* CCK/OFDM/TURBO */
+ uint32_t rateKbps; /* transfer rate in kbs */
+ uint8_t shortPreamble; /* mask for enabling short
+ * preamble in CCK rate code */
+ uint8_t dot11Rate; /* value for supported rates
+ * info element of MLME */
+ uint8_t ctlRateIndex; /* index of next lower basic
+ * rate; used for dur. calcs */
+ uint16_t lpAckDuration; /* long preamble ACK dur. */
+ uint16_t spAckDuration; /* short preamble ACK dur. */
+ } info[32];
+};
+
+const struct ieee80211_rate_table *ieee80211_get_ratetable(
+ struct ieee80211_channel *);
+
+static __inline__ uint8_t
+ieee80211_ack_rate(const struct ieee80211_rate_table *rt, uint8_t rate)
+{
+ uint8_t cix = rt->info[rt->rateCodeToIndex[rate]].ctlRateIndex;
+ KASSERT(cix != (uint8_t)-1, ("rate %d has no info", rate));
+ return rt->info[cix].dot11Rate;
+}
+
+static __inline__ uint8_t
+ieee80211_ctl_rate(const struct ieee80211_rate_table *rt, uint8_t rate)
+{
+ uint8_t cix = rt->info[rt->rateCodeToIndex[rate]].ctlRateIndex;
+ KASSERT(cix != (uint8_t)-1, ("rate %d has no info", rate));
+ return rt->info[cix].dot11Rate;
+}
+
+static __inline__ enum ieee80211_phytype
+ieee80211_rate2phytype(const struct ieee80211_rate_table *rt, uint8_t rate)
+{
+ uint8_t rix = rt->rateCodeToIndex[rate];
+ KASSERT(rix != (uint8_t)-1, ("rate %d has no info", rate));
+ return rt->info[rix].phy;
+}
+
+static __inline__ int
+ieee80211_isratevalid(const struct ieee80211_rate_table *rt, uint8_t rate)
+{
+ return rt->rateCodeToIndex[rate] != (uint8_t)-1;
+}
+
+/*
+ * Calculate ACK field for
+ * o non-fragment data frames
+ * o management frames
+ * sent using rate, phy and short preamble setting.
+ */
+static __inline__ uint16_t
+ieee80211_ack_duration(const struct ieee80211_rate_table *rt,
+ uint8_t rate, int isShortPreamble)
+{
+ uint8_t rix = rt->rateCodeToIndex[rate];
+
+ KASSERT(rix != (uint8_t)-1, ("rate %d has no info", rate));
+ if (isShortPreamble) {
+ KASSERT(rt->info[rix].spAckDuration != 0,
+ ("shpreamble ack dur is not computed!\n"));
+ return rt->info[rix].spAckDuration;
+ } else {
+ KASSERT(rt->info[rix].lpAckDuration != 0,
+ ("lgpreamble ack dur is not computed!\n"));
+ return rt->info[rix].lpAckDuration;
+ }
+}
+
+/*
+ * Compute the time to transmit a frame of length frameLen bytes
+ * using the specified 802.11 rate code, phy, and short preamble
+ * setting.
+ *
+ * NB: SIFS is included.
+ */
+uint16_t ieee80211_compute_duration(const struct ieee80211_rate_table *,
+ uint32_t frameLen, uint16_t rate, int isShortPreamble);
+/*
+ * Convert PLCP signal/rate field to 802.11 rate code (.5Mbits/s)
+ */
+uint8_t ieee80211_plcp2rate(uint8_t, enum ieee80211_phytype);
+/*
+ * Convert 802.11 rate code to PLCP signal.
+ */
+uint8_t ieee80211_rate2plcp(int, enum ieee80211_phytype);
+#endif /* _KERNEL */
+#endif /* !_NET80211_IEEE80211_PHY_HH_ */
diff --git a/rtems/freebsd/net80211/ieee80211_power.c b/rtems/freebsd/net80211/ieee80211_power.c
new file mode 100644
index 00000000..60aa230e
--- /dev/null
+++ b/rtems/freebsd/net80211/ieee80211_power.c
@@ -0,0 +1,529 @@
+#include <rtems/freebsd/machine/rtems-bsd-config.h>
+
+/*-
+ * Copyright (c) 2002-2008 Sam Leffler, Errno Consulting
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <rtems/freebsd/sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+/*
+ * IEEE 802.11 power save support.
+ */
+#include <rtems/freebsd/local/opt_wlan.h>
+
+#include <rtems/freebsd/sys/param.h>
+#include <rtems/freebsd/sys/systm.h>
+#include <rtems/freebsd/sys/kernel.h>
+
+#include <rtems/freebsd/sys/socket.h>
+
+#include <rtems/freebsd/net/if.h>
+#include <rtems/freebsd/net/if_media.h>
+#include <rtems/freebsd/net/ethernet.h>
+
+#include <rtems/freebsd/net80211/ieee80211_var.h>
+
+#include <rtems/freebsd/net/bpf.h>
+
+static void ieee80211_update_ps(struct ieee80211vap *, int);
+static int ieee80211_set_tim(struct ieee80211_node *, int);
+
+MALLOC_DEFINE(M_80211_POWER, "80211power", "802.11 power save state");
+
+void
+ieee80211_power_attach(struct ieee80211com *ic)
+{
+}
+
+void
+ieee80211_power_detach(struct ieee80211com *ic)
+{
+}
+
+void
+ieee80211_power_vattach(struct ieee80211vap *vap)
+{
+ if (vap->iv_opmode == IEEE80211_M_HOSTAP ||
+ vap->iv_opmode == IEEE80211_M_IBSS) {
+ /* NB: driver should override */
+ vap->iv_update_ps = ieee80211_update_ps;
+ vap->iv_set_tim = ieee80211_set_tim;
+ }
+}
+
+void
+ieee80211_power_latevattach(struct ieee80211vap *vap)
+{
+ /*
+ * Allocate these only if needed. Beware that we
+ * know adhoc mode doesn't support ATIM yet...
+ */
+ if (vap->iv_opmode == IEEE80211_M_HOSTAP) {
+ vap->iv_tim_len = howmany(vap->iv_max_aid,8) * sizeof(uint8_t);
+ vap->iv_tim_bitmap = (uint8_t *) malloc(vap->iv_tim_len,
+ M_80211_POWER, M_NOWAIT | M_ZERO);
+ if (vap->iv_tim_bitmap == NULL) {
+ printf("%s: no memory for TIM bitmap!\n", __func__);
+ /* XXX good enough to keep from crashing? */
+ vap->iv_tim_len = 0;
+ }
+ }
+}
+
+void
+ieee80211_power_vdetach(struct ieee80211vap *vap)
+{
+ if (vap->iv_tim_bitmap != NULL) {
+ free(vap->iv_tim_bitmap, M_80211_POWER);
+ vap->iv_tim_bitmap = NULL;
+ }
+}
+
+void
+ieee80211_psq_init(struct ieee80211_psq *psq, const char *name)
+{
+ memset(psq, 0, sizeof(psq));
+ psq->psq_maxlen = IEEE80211_PS_MAX_QUEUE;
+ IEEE80211_PSQ_INIT(psq, name); /* OS-dependent setup */
+}
+
+void
+ieee80211_psq_cleanup(struct ieee80211_psq *psq)
+{
+#if 0
+ psq_drain(psq); /* XXX should not be needed? */
+#else
+ KASSERT(psq->psq_len == 0, ("%d frames on ps q", psq->psq_len));
+#endif
+ IEEE80211_PSQ_DESTROY(psq); /* OS-dependent cleanup */
+}
+
+/*
+ * Return the highest priority frame in the ps queue.
+ */
+struct mbuf *
+ieee80211_node_psq_dequeue(struct ieee80211_node *ni, int *qlen)
+{
+ struct ieee80211_psq *psq = &ni->ni_psq;
+ struct ieee80211_psq_head *qhead;
+ struct mbuf *m;
+
+ IEEE80211_PSQ_LOCK(psq);
+ qhead = &psq->psq_head[0];
+again:
+ if ((m = qhead->head) != NULL) {
+ if ((qhead->head = m->m_nextpkt) == NULL)
+ qhead->tail = NULL;
+ KASSERT(qhead->len > 0, ("qhead len %d", qhead->len));
+ qhead->len--;
+ KASSERT(psq->psq_len > 0, ("psq len %d", psq->psq_len));
+ psq->psq_len--;
+ m->m_nextpkt = NULL;
+ }
+ if (m == NULL && qhead == &psq->psq_head[0]) {
+ /* Algol-68 style for loop */
+ qhead = &psq->psq_head[1];
+ goto again;
+ }
+ if (qlen != NULL)
+ *qlen = psq->psq_len;
+ IEEE80211_PSQ_UNLOCK(psq);
+ return m;
+}
+
+/*
+ * Reclaim an mbuf from the ps q. If marked with M_ENCAP
+ * we assume there is a node reference that must be relcaimed.
+ */
+static void
+psq_mfree(struct mbuf *m)
+{
+ if (m->m_flags & M_ENCAP) {
+ struct ieee80211_node *ni = (void *) m->m_pkthdr.rcvif;
+ ieee80211_free_node(ni);
+ }
+ m->m_nextpkt = NULL;
+ m_freem(m);
+}
+
+/*
+ * Clear any frames queued in the power save queue.
+ * The number of frames that were present is returned.
+ */
+static int
+psq_drain(struct ieee80211_psq *psq)
+{
+ struct ieee80211_psq_head *qhead;
+ struct mbuf *m;
+ int qlen;
+
+ IEEE80211_PSQ_LOCK(psq);
+ qlen = psq->psq_len;
+ qhead = &psq->psq_head[0];
+again:
+ while ((m = qhead->head) != NULL) {
+ qhead->head = m->m_nextpkt;
+ psq_mfree(m);
+ }
+ qhead->tail = NULL;
+ qhead->len = 0;
+ if (qhead == &psq->psq_head[0]) { /* Algol-68 style for loop */
+ qhead = &psq->psq_head[1];
+ goto again;
+ }
+ psq->psq_len = 0;
+ IEEE80211_PSQ_UNLOCK(psq);
+
+ return qlen;
+}
+
+/*
+ * Clear any frames queued in the power save queue.
+ * The number of frames that were present is returned.
+ */
+int
+ieee80211_node_psq_drain(struct ieee80211_node *ni)
+{
+ return psq_drain(&ni->ni_psq);
+}
+
+/*
+ * Age frames on the power save queue. The aging interval is
+ * 4 times the listen interval specified by the station. This
+ * number is factored into the age calculations when the frame
+ * is placed on the queue. We store ages as time differences
+ * so we can check and/or adjust only the head of the list.
+ * If a frame's age exceeds the threshold then discard it.
+ * The number of frames discarded is returned so the caller
+ * can check if it needs to adjust the tim.
+ */
+int
+ieee80211_node_psq_age(struct ieee80211_node *ni)
+{
+ struct ieee80211_psq *psq = &ni->ni_psq;
+ int discard = 0;
+
+ if (psq->psq_len != 0) {
+#ifdef IEEE80211_DEBUG
+ struct ieee80211vap *vap = ni->ni_vap;
+#endif
+ struct ieee80211_psq_head *qhead;
+ struct mbuf *m;
+
+ IEEE80211_PSQ_LOCK(psq);
+ qhead = &psq->psq_head[0];
+ again:
+ while ((m = qhead->head) != NULL &&
+ M_AGE_GET(m) < IEEE80211_INACT_WAIT) {
+ IEEE80211_NOTE(vap, IEEE80211_MSG_POWER, ni,
+ "discard frame, age %u", M_AGE_GET(m));
+ if ((qhead->head = m->m_nextpkt) == NULL)
+ qhead->tail = NULL;
+ KASSERT(qhead->len > 0, ("qhead len %d", qhead->len));
+ qhead->len--;
+ KASSERT(psq->psq_len > 0, ("psq len %d", psq->psq_len));
+ psq->psq_len--;
+ psq_mfree(m);
+ discard++;
+ }
+ if (qhead == &psq->psq_head[0]) { /* Algol-68 style for loop */
+ qhead = &psq->psq_head[1];
+ goto again;
+ }
+ if (m != NULL)
+ M_AGE_SUB(m, IEEE80211_INACT_WAIT);
+ IEEE80211_PSQ_UNLOCK(psq);
+
+ IEEE80211_NOTE(vap, IEEE80211_MSG_POWER, ni,
+ "discard %u frames for age", discard);
+ IEEE80211_NODE_STAT_ADD(ni, ps_discard, discard);
+ }
+ return discard;
+}
+
+/*
+ * Handle a change in the PS station occupancy.
+ */
+static void
+ieee80211_update_ps(struct ieee80211vap *vap, int nsta)
+{
+
+ KASSERT(vap->iv_opmode == IEEE80211_M_HOSTAP ||
+ vap->iv_opmode == IEEE80211_M_IBSS,
+ ("operating mode %u", vap->iv_opmode));
+}
+
+/*
+ * Indicate whether there are frames queued for a station in power-save mode.
+ */
+static int
+ieee80211_set_tim(struct ieee80211_node *ni, int set)
+{
+ struct ieee80211vap *vap = ni->ni_vap;
+ struct ieee80211com *ic = ni->ni_ic;
+ uint16_t aid;
+ int changed;
+
+ KASSERT(vap->iv_opmode == IEEE80211_M_HOSTAP ||
+ vap->iv_opmode == IEEE80211_M_IBSS,
+ ("operating mode %u", vap->iv_opmode));
+
+ aid = IEEE80211_AID(ni->ni_associd);
+ KASSERT(aid < vap->iv_max_aid,
+ ("bogus aid %u, max %u", aid, vap->iv_max_aid));
+
+ IEEE80211_LOCK(ic);
+ changed = (set != (isset(vap->iv_tim_bitmap, aid) != 0));
+ if (changed) {
+ if (set) {
+ setbit(vap->iv_tim_bitmap, aid);
+ vap->iv_ps_pending++;
+ } else {
+ clrbit(vap->iv_tim_bitmap, aid);
+ vap->iv_ps_pending--;
+ }
+ /* NB: we know vap is in RUN state so no need to check */
+ vap->iv_update_beacon(vap, IEEE80211_BEACON_TIM);
+ }
+ IEEE80211_UNLOCK(ic);
+
+ return changed;
+}
+
+/*
+ * Save an outbound packet for a node in power-save sleep state.
+ * The new packet is placed on the node's saved queue, and the TIM
+ * is changed, if necessary.
+ */
+int
+ieee80211_pwrsave(struct ieee80211_node *ni, struct mbuf *m)
+{
+ struct ieee80211_psq *psq = &ni->ni_psq;
+ struct ieee80211vap *vap = ni->ni_vap;
+ struct ieee80211com *ic = ni->ni_ic;
+ struct ieee80211_psq_head *qhead;
+ int qlen, age;
+
+ IEEE80211_PSQ_LOCK(psq);
+ if (psq->psq_len >= psq->psq_maxlen) {
+ psq->psq_drops++;
+ IEEE80211_PSQ_UNLOCK(psq);
+ IEEE80211_NOTE(vap, IEEE80211_MSG_ANY, ni,
+ "pwr save q overflow, drops %d (size %d)",
+ psq->psq_drops, psq->psq_len);
+#ifdef IEEE80211_DEBUG
+ if (ieee80211_msg_dumppkts(vap))
+ ieee80211_dump_pkt(ni->ni_ic, mtod(m, caddr_t),
+ m->m_len, -1, -1);
+#endif
+ psq_mfree(m);
+ return ENOSPC;
+ }
+ /*
+ * Tag the frame with it's expiry time and insert it in
+ * the appropriate queue. The aging interval is 4 times
+ * the listen interval specified by the station. Frames
+ * that sit around too long are reclaimed using this
+ * information.
+ */
+ /* TU -> secs. XXX handle overflow? */
+ age = IEEE80211_TU_TO_MS((ni->ni_intval * ic->ic_bintval) << 2) / 1000;
+ /*
+ * Encapsulated frames go on the high priority queue,
+ * other stuff goes on the low priority queue. We use
+ * this to order frames returned out of the driver
+ * ahead of frames we collect in ieee80211_start.
+ */
+ if (m->m_flags & M_ENCAP)
+ qhead = &psq->psq_head[0];
+ else
+ qhead = &psq->psq_head[1];
+ if (qhead->tail == NULL) {
+ struct mbuf *mh;
+
+ qhead->head = m;
+ /*
+ * Take care to adjust age when inserting the first
+ * frame of a queue and the other queue already has
+ * frames. We need to preserve the age difference
+ * relationship so ieee80211_node_psq_age works.
+ */
+ if (qhead == &psq->psq_head[1]) {
+ mh = psq->psq_head[0].head;
+ if (mh != NULL)
+ age-= M_AGE_GET(mh);
+ } else {
+ mh = psq->psq_head[1].head;
+ if (mh != NULL) {
+ int nage = M_AGE_GET(mh) - age;
+ /* XXX is clamping to zero good 'nuf? */
+ M_AGE_SET(mh, nage < 0 ? 0 : nage);
+ }
+ }
+ } else {
+ qhead->tail->m_nextpkt = m;
+ age -= M_AGE_GET(qhead->head);
+ }
+ KASSERT(age >= 0, ("age %d", age));
+ M_AGE_SET(m, age);
+ m->m_nextpkt = NULL;
+ qhead->tail = m;
+ qhead->len++;
+ qlen = ++(psq->psq_len);
+ IEEE80211_PSQ_UNLOCK(psq);
+
+ IEEE80211_NOTE(vap, IEEE80211_MSG_POWER, ni,
+ "save frame with age %d, %u now queued", age, qlen);
+
+ if (qlen == 1 && vap->iv_set_tim != NULL)
+ vap->iv_set_tim(ni, 1);
+
+ return 0;
+}
+
+/*
+ * Move frames from the ps q to the vap's send queue
+ * and/or the driver's send queue; and kick the start
+ * method for each, as appropriate. Note we're careful
+ * to preserve packet ordering here.
+ */
+static void
+pwrsave_flushq(struct ieee80211_node *ni)
+{
+ struct ieee80211_psq *psq = &ni->ni_psq;
+ struct ieee80211vap *vap = ni->ni_vap;
+ struct ieee80211_psq_head *qhead;
+ struct ifnet *parent, *ifp;
+
+ IEEE80211_NOTE(vap, IEEE80211_MSG_POWER, ni,
+ "flush ps queue, %u packets queued", psq->psq_len);
+
+ IEEE80211_PSQ_LOCK(psq);
+ qhead = &psq->psq_head[0]; /* 802.11 frames */
+ if (qhead->head != NULL) {
+ /* XXX could dispatch through vap and check M_ENCAP */
+ parent = vap->iv_ic->ic_ifp;
+ /* XXX need different driver interface */
+ /* XXX bypasses q max and OACTIVE */
+ IF_PREPEND_LIST(&parent->if_snd, qhead->head, qhead->tail,
+ qhead->len);
+ qhead->head = qhead->tail = NULL;
+ qhead->len = 0;
+ } else
+ parent = NULL;
+
+ qhead = &psq->psq_head[1]; /* 802.3 frames */
+ if (qhead->head != NULL) {
+ ifp = vap->iv_ifp;
+ /* XXX need different driver interface */
+ /* XXX bypasses q max and OACTIVE */
+ IF_PREPEND_LIST(&ifp->if_snd, qhead->head, qhead->tail,
+ qhead->len);
+ qhead->head = qhead->tail = NULL;
+ qhead->len = 0;
+ } else
+ ifp = NULL;
+ psq->psq_len = 0;
+ IEEE80211_PSQ_UNLOCK(psq);
+
+ /* NB: do this outside the psq lock */
+ /* XXX packets might get reordered if parent is OACTIVE */
+ if (parent != NULL)
+ if_start(parent);
+ if (ifp != NULL)
+ if_start(ifp);
+}
+
+/*
+ * Handle station power-save state change.
+ */
+void
+ieee80211_node_pwrsave(struct ieee80211_node *ni, int enable)
+{
+ struct ieee80211vap *vap = ni->ni_vap;
+ int update;
+
+ update = 0;
+ if (enable) {
+ if ((ni->ni_flags & IEEE80211_NODE_PWR_MGT) == 0) {
+ vap->iv_ps_sta++;
+ update = 1;
+ }
+ ni->ni_flags |= IEEE80211_NODE_PWR_MGT;
+ IEEE80211_NOTE(vap, IEEE80211_MSG_POWER, ni,
+ "power save mode on, %u sta's in ps mode", vap->iv_ps_sta);
+
+ if (update)
+ vap->iv_update_ps(vap, vap->iv_ps_sta);
+ } else {
+ if (ni->ni_flags & IEEE80211_NODE_PWR_MGT) {
+ vap->iv_ps_sta--;
+ update = 1;
+ }
+ ni->ni_flags &= ~IEEE80211_NODE_PWR_MGT;
+ IEEE80211_NOTE(vap, IEEE80211_MSG_POWER, ni,
+ "power save mode off, %u sta's in ps mode", vap->iv_ps_sta);
+
+ /* NB: order here is intentional so TIM is clear before flush */
+ if (vap->iv_set_tim != NULL)
+ vap->iv_set_tim(ni, 0);
+ if (update) {
+ /* NB if no sta's in ps, driver should flush mc q */
+ vap->iv_update_ps(vap, vap->iv_ps_sta);
+ }
+ if (ni->ni_psq.psq_len != 0)
+ pwrsave_flushq(ni);
+ }
+}
+
+/*
+ * Handle power-save state change in station mode.
+ */
+void
+ieee80211_sta_pwrsave(struct ieee80211vap *vap, int enable)
+{
+ struct ieee80211_node *ni = vap->iv_bss;
+
+ if (!((enable != 0) ^ ((ni->ni_flags & IEEE80211_NODE_PWR_MGT) != 0)))
+ return;
+
+ IEEE80211_NOTE(vap, IEEE80211_MSG_POWER, ni,
+ "sta power save mode %s", enable ? "on" : "off");
+ if (!enable) {
+ ni->ni_flags &= ~IEEE80211_NODE_PWR_MGT;
+ ieee80211_send_nulldata(ieee80211_ref_node(ni));
+ /*
+ * Flush any queued frames; we can do this immediately
+ * because we know they'll be queued behind the null
+ * data frame we send the ap.
+ * XXX can we use a data frame to take us out of ps?
+ */
+ if (ni->ni_psq.psq_len != 0)
+ pwrsave_flushq(ni);
+ } else {
+ ni->ni_flags |= IEEE80211_NODE_PWR_MGT;
+ ieee80211_send_nulldata(ieee80211_ref_node(ni));
+ }
+}
diff --git a/rtems/freebsd/net80211/ieee80211_power.h b/rtems/freebsd/net80211/ieee80211_power.h
new file mode 100644
index 00000000..6cb0eab8
--- /dev/null
+++ b/rtems/freebsd/net80211/ieee80211_power.h
@@ -0,0 +1,79 @@
+/*-
+ * Copyright (c) 2002-2008 Sam Leffler, Errno Consulting
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+#ifndef _NET80211_IEEE80211_POWER_HH_
+#define _NET80211_IEEE80211_POWER_HH_
+
+struct ieee80211com;
+struct ieee80211vap;
+struct ieee80211_node;
+struct mbuf;
+
+/*
+ * Power save packet queues. There are two queues, one
+ * for frames coming from the net80211 layer and the other
+ * for frames that come from the driver. Frames from the
+ * driver are expected to have M_ENCAP marked to indicate
+ * they have already been encapsulated and are treated as
+ * higher priority: they are sent first when flushing the
+ * queue on a power save state change or in response to a
+ * ps-poll frame.
+ *
+ * Note that frames sent from the high priority queue are
+ * fed directly to the driver without going through
+ * ieee80211_start again; drivers that send up encap'd
+ * frames are required to handle them when they come back.
+ */
+struct ieee80211_psq {
+ ieee80211_psq_lock_t psq_lock;
+ int psq_len;
+ int psq_maxlen;
+ int psq_drops;
+ struct ieee80211_psq_head {
+ struct mbuf *head;
+ struct mbuf *tail;
+ int len;
+ } psq_head[2]; /* 2 priorities */
+};
+
+void ieee80211_psq_init(struct ieee80211_psq *, const char *);
+void ieee80211_psq_cleanup(struct ieee80211_psq *);
+
+void ieee80211_power_attach(struct ieee80211com *);
+void ieee80211_power_detach(struct ieee80211com *);
+void ieee80211_power_vattach(struct ieee80211vap *);
+void ieee80211_power_vdetach(struct ieee80211vap *);
+void ieee80211_power_latevattach(struct ieee80211vap *);
+
+struct mbuf *ieee80211_node_psq_dequeue(struct ieee80211_node *ni, int *qlen);
+int ieee80211_node_psq_drain(struct ieee80211_node *);
+int ieee80211_node_psq_age(struct ieee80211_node *);
+int ieee80211_pwrsave(struct ieee80211_node *, struct mbuf *);
+void ieee80211_node_pwrsave(struct ieee80211_node *, int enable);
+void ieee80211_sta_pwrsave(struct ieee80211vap *, int enable);
+
+void ieee80211_power_poll(struct ieee80211com *);
+#endif /* _NET80211_IEEE80211_POWER_HH_ */
diff --git a/rtems/freebsd/net80211/ieee80211_proto.c b/rtems/freebsd/net80211/ieee80211_proto.c
new file mode 100644
index 00000000..10c7d9ca
--- /dev/null
+++ b/rtems/freebsd/net80211/ieee80211_proto.c
@@ -0,0 +1,1888 @@
+#include <rtems/freebsd/machine/rtems-bsd-config.h>
+
+/*-
+ * Copyright (c) 2001 Atsushi Onoe
+ * Copyright (c) 2002-2008 Sam Leffler, Errno Consulting
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <rtems/freebsd/sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+/*
+ * IEEE 802.11 protocol support.
+ */
+
+#include <rtems/freebsd/local/opt_inet.h>
+#include <rtems/freebsd/local/opt_wlan.h>
+
+#include <rtems/freebsd/sys/param.h>
+#include <rtems/freebsd/sys/kernel.h>
+#include <rtems/freebsd/sys/systm.h>
+
+#include <rtems/freebsd/sys/socket.h>
+#include <rtems/freebsd/sys/sockio.h>
+
+#include <rtems/freebsd/net/if.h>
+#include <rtems/freebsd/net/if_media.h>
+#include <rtems/freebsd/net/ethernet.h> /* XXX for ether_sprintf */
+
+#include <rtems/freebsd/net80211/ieee80211_var.h>
+#include <rtems/freebsd/net80211/ieee80211_adhoc.h>
+#include <rtems/freebsd/net80211/ieee80211_sta.h>
+#include <rtems/freebsd/net80211/ieee80211_hostap.h>
+#include <rtems/freebsd/net80211/ieee80211_wds.h>
+#ifdef IEEE80211_SUPPORT_MESH
+#include <rtems/freebsd/net80211/ieee80211_mesh.h>
+#endif
+#include <rtems/freebsd/net80211/ieee80211_monitor.h>
+#include <rtems/freebsd/net80211/ieee80211_input.h>
+
+/* XXX tunables */
+#define AGGRESSIVE_MODE_SWITCH_HYSTERESIS 3 /* pkts / 100ms */
+#define HIGH_PRI_SWITCH_THRESH 10 /* pkts / 100ms */
+
+const char *ieee80211_mgt_subtype_name[] = {
+ "assoc_req", "assoc_resp", "reassoc_req", "reassoc_resp",
+ "probe_req", "probe_resp", "reserved#6", "reserved#7",
+ "beacon", "atim", "disassoc", "auth",
+ "deauth", "action", "reserved#14", "reserved#15"
+};
+const char *ieee80211_ctl_subtype_name[] = {
+ "reserved#0", "reserved#1", "reserved#2", "reserved#3",
+ "reserved#3", "reserved#5", "reserved#6", "reserved#7",
+ "reserved#8", "reserved#9", "ps_poll", "rts",
+ "cts", "ack", "cf_end", "cf_end_ack"
+};
+const char *ieee80211_opmode_name[IEEE80211_OPMODE_MAX] = {
+ "IBSS", /* IEEE80211_M_IBSS */
+ "STA", /* IEEE80211_M_STA */
+ "WDS", /* IEEE80211_M_WDS */
+ "AHDEMO", /* IEEE80211_M_AHDEMO */
+ "HOSTAP", /* IEEE80211_M_HOSTAP */
+ "MONITOR", /* IEEE80211_M_MONITOR */
+ "MBSS" /* IEEE80211_M_MBSS */
+};
+const char *ieee80211_state_name[IEEE80211_S_MAX] = {
+ "INIT", /* IEEE80211_S_INIT */
+ "SCAN", /* IEEE80211_S_SCAN */
+ "AUTH", /* IEEE80211_S_AUTH */
+ "ASSOC", /* IEEE80211_S_ASSOC */
+ "CAC", /* IEEE80211_S_CAC */
+ "RUN", /* IEEE80211_S_RUN */
+ "CSA", /* IEEE80211_S_CSA */
+ "SLEEP", /* IEEE80211_S_SLEEP */
+};
+const char *ieee80211_wme_acnames[] = {
+ "WME_AC_BE",
+ "WME_AC_BK",
+ "WME_AC_VI",
+ "WME_AC_VO",
+ "WME_UPSD",
+};
+
+static void beacon_miss(void *, int);
+static void beacon_swmiss(void *, int);
+static void parent_updown(void *, int);
+static void update_mcast(void *, int);
+static void update_promisc(void *, int);
+static void update_channel(void *, int);
+static void ieee80211_newstate_cb(void *, int);
+static int ieee80211_new_state_locked(struct ieee80211vap *,
+ enum ieee80211_state, int);
+
+static int
+null_raw_xmit(struct ieee80211_node *ni, struct mbuf *m,
+ const struct ieee80211_bpf_params *params)
+{
+ struct ifnet *ifp = ni->ni_ic->ic_ifp;
+
+ if_printf(ifp, "missing ic_raw_xmit callback, drop frame\n");
+ m_freem(m);
+ return ENETDOWN;
+}
+
+void
+ieee80211_proto_attach(struct ieee80211com *ic)
+{
+ struct ifnet *ifp = ic->ic_ifp;
+
+ /* override the 802.3 setting */
+ ifp->if_hdrlen = ic->ic_headroom
+ + sizeof(struct ieee80211_qosframe_addr4)
+ + IEEE80211_WEP_IVLEN + IEEE80211_WEP_KIDLEN
+ + IEEE80211_WEP_EXTIVLEN;
+ /* XXX no way to recalculate on ifdetach */
+ if (ALIGN(ifp->if_hdrlen) > max_linkhdr) {
+ /* XXX sanity check... */
+ max_linkhdr = ALIGN(ifp->if_hdrlen);
+ max_hdr = max_linkhdr + max_protohdr;
+ max_datalen = MHLEN - max_hdr;
+ }
+ ic->ic_protmode = IEEE80211_PROT_CTSONLY;
+
+ TASK_INIT(&ic->ic_parent_task, 0, parent_updown, ifp);
+ TASK_INIT(&ic->ic_mcast_task, 0, update_mcast, ic);
+ TASK_INIT(&ic->ic_promisc_task, 0, update_promisc, ic);
+ TASK_INIT(&ic->ic_chan_task, 0, update_channel, ic);
+ TASK_INIT(&ic->ic_bmiss_task, 0, beacon_miss, ic);
+
+ ic->ic_wme.wme_hipri_switch_hysteresis =
+ AGGRESSIVE_MODE_SWITCH_HYSTERESIS;
+
+ /* initialize management frame handlers */
+ ic->ic_send_mgmt = ieee80211_send_mgmt;
+ ic->ic_raw_xmit = null_raw_xmit;
+
+ ieee80211_adhoc_attach(ic);
+ ieee80211_sta_attach(ic);
+ ieee80211_wds_attach(ic);
+ ieee80211_hostap_attach(ic);
+#ifdef IEEE80211_SUPPORT_MESH
+ ieee80211_mesh_attach(ic);
+#endif
+ ieee80211_monitor_attach(ic);
+}
+
+void
+ieee80211_proto_detach(struct ieee80211com *ic)
+{
+ ieee80211_monitor_detach(ic);
+#ifdef IEEE80211_SUPPORT_MESH
+ ieee80211_mesh_detach(ic);
+#endif
+ ieee80211_hostap_detach(ic);
+ ieee80211_wds_detach(ic);
+ ieee80211_adhoc_detach(ic);
+ ieee80211_sta_detach(ic);
+}
+
+static void
+null_update_beacon(struct ieee80211vap *vap, int item)
+{
+}
+
+void
+ieee80211_proto_vattach(struct ieee80211vap *vap)
+{
+ struct ieee80211com *ic = vap->iv_ic;
+ struct ifnet *ifp = vap->iv_ifp;
+ int i;
+
+ /* override the 802.3 setting */
+ ifp->if_hdrlen = ic->ic_ifp->if_hdrlen;
+
+ vap->iv_rtsthreshold = IEEE80211_RTS_DEFAULT;
+ vap->iv_fragthreshold = IEEE80211_FRAG_DEFAULT;
+ vap->iv_bmiss_max = IEEE80211_BMISS_MAX;
+ callout_init(&vap->iv_swbmiss, CALLOUT_MPSAFE);
+ callout_init(&vap->iv_mgtsend, CALLOUT_MPSAFE);
+ TASK_INIT(&vap->iv_nstate_task, 0, ieee80211_newstate_cb, vap);
+ TASK_INIT(&vap->iv_swbmiss_task, 0, beacon_swmiss, vap);
+ /*
+ * Install default tx rate handling: no fixed rate, lowest
+ * supported rate for mgmt and multicast frames. Default
+ * max retry count. These settings can be changed by the
+ * driver and/or user applications.
+ */
+ for (i = IEEE80211_MODE_11A; i < IEEE80211_MODE_MAX; i++) {
+ const struct ieee80211_rateset *rs = &ic->ic_sup_rates[i];
+
+ vap->iv_txparms[i].ucastrate = IEEE80211_FIXED_RATE_NONE;
+ if (i == IEEE80211_MODE_11NA || i == IEEE80211_MODE_11NG) {
+ vap->iv_txparms[i].mgmtrate = 0 | IEEE80211_RATE_MCS;
+ vap->iv_txparms[i].mcastrate = 0 | IEEE80211_RATE_MCS;
+ } else {
+ vap->iv_txparms[i].mgmtrate =
+ rs->rs_rates[0] & IEEE80211_RATE_VAL;
+ vap->iv_txparms[i].mcastrate =
+ rs->rs_rates[0] & IEEE80211_RATE_VAL;
+ }
+ vap->iv_txparms[i].maxretry = IEEE80211_TXMAX_DEFAULT;
+ }
+ vap->iv_roaming = IEEE80211_ROAMING_AUTO;
+
+ vap->iv_update_beacon = null_update_beacon;
+ vap->iv_deliver_data = ieee80211_deliver_data;
+
+ /* attach support for operating mode */
+ ic->ic_vattach[vap->iv_opmode](vap);
+}
+
+void
+ieee80211_proto_vdetach(struct ieee80211vap *vap)
+{
+#define FREEAPPIE(ie) do { \
+ if (ie != NULL) \
+ free(ie, M_80211_NODE_IE); \
+} while (0)
+ /*
+ * Detach operating mode module.
+ */
+ if (vap->iv_opdetach != NULL)
+ vap->iv_opdetach(vap);
+ /*
+ * This should not be needed as we detach when reseting
+ * the state but be conservative here since the
+ * authenticator may do things like spawn kernel threads.
+ */
+ if (vap->iv_auth->ia_detach != NULL)
+ vap->iv_auth->ia_detach(vap);
+ /*
+ * Detach any ACL'ator.
+ */
+ if (vap->iv_acl != NULL)
+ vap->iv_acl->iac_detach(vap);
+
+ FREEAPPIE(vap->iv_appie_beacon);
+ FREEAPPIE(vap->iv_appie_probereq);
+ FREEAPPIE(vap->iv_appie_proberesp);
+ FREEAPPIE(vap->iv_appie_assocreq);
+ FREEAPPIE(vap->iv_appie_assocresp);
+ FREEAPPIE(vap->iv_appie_wpa);
+#undef FREEAPPIE
+}
+
+/*
+ * Simple-minded authenticator module support.
+ */
+
+#define IEEE80211_AUTH_MAX (IEEE80211_AUTH_WPA+1)
+/* XXX well-known names */
+static const char *auth_modnames[IEEE80211_AUTH_MAX] = {
+ "wlan_internal", /* IEEE80211_AUTH_NONE */
+ "wlan_internal", /* IEEE80211_AUTH_OPEN */
+ "wlan_internal", /* IEEE80211_AUTH_SHARED */
+ "wlan_xauth", /* IEEE80211_AUTH_8021X */
+ "wlan_internal", /* IEEE80211_AUTH_AUTO */
+ "wlan_xauth", /* IEEE80211_AUTH_WPA */
+};
+static const struct ieee80211_authenticator *authenticators[IEEE80211_AUTH_MAX];
+
+static const struct ieee80211_authenticator auth_internal = {
+ .ia_name = "wlan_internal",
+ .ia_attach = NULL,
+ .ia_detach = NULL,
+ .ia_node_join = NULL,
+ .ia_node_leave = NULL,
+};
+
+/*
+ * Setup internal authenticators once; they are never unregistered.
+ */
+static void
+ieee80211_auth_setup(void)
+{
+ ieee80211_authenticator_register(IEEE80211_AUTH_OPEN, &auth_internal);
+ ieee80211_authenticator_register(IEEE80211_AUTH_SHARED, &auth_internal);
+ ieee80211_authenticator_register(IEEE80211_AUTH_AUTO, &auth_internal);
+}
+SYSINIT(wlan_auth, SI_SUB_DRIVERS, SI_ORDER_FIRST, ieee80211_auth_setup, NULL);
+
+const struct ieee80211_authenticator *
+ieee80211_authenticator_get(int auth)
+{
+ if (auth >= IEEE80211_AUTH_MAX)
+ return NULL;
+ if (authenticators[auth] == NULL)
+ ieee80211_load_module(auth_modnames[auth]);
+ return authenticators[auth];
+}
+
+void
+ieee80211_authenticator_register(int type,
+ const struct ieee80211_authenticator *auth)
+{
+ if (type >= IEEE80211_AUTH_MAX)
+ return;
+ authenticators[type] = auth;
+}
+
+void
+ieee80211_authenticator_unregister(int type)
+{
+
+ if (type >= IEEE80211_AUTH_MAX)
+ return;
+ authenticators[type] = NULL;
+}
+
+/*
+ * Very simple-minded ACL module support.
+ */
+/* XXX just one for now */
+static const struct ieee80211_aclator *acl = NULL;
+
+void
+ieee80211_aclator_register(const struct ieee80211_aclator *iac)
+{
+ printf("wlan: %s acl policy registered\n", iac->iac_name);
+ acl = iac;
+}
+
+void
+ieee80211_aclator_unregister(const struct ieee80211_aclator *iac)
+{
+ if (acl == iac)
+ acl = NULL;
+ printf("wlan: %s acl policy unregistered\n", iac->iac_name);
+}
+
+const struct ieee80211_aclator *
+ieee80211_aclator_get(const char *name)
+{
+ if (acl == NULL)
+ ieee80211_load_module("wlan_acl");
+ return acl != NULL && strcmp(acl->iac_name, name) == 0 ? acl : NULL;
+}
+
+void
+ieee80211_print_essid(const uint8_t *essid, int len)
+{
+ const uint8_t *p;
+ int i;
+
+ if (len > IEEE80211_NWID_LEN)
+ len = IEEE80211_NWID_LEN;
+ /* determine printable or not */
+ for (i = 0, p = essid; i < len; i++, p++) {
+ if (*p < ' ' || *p > 0x7e)
+ break;
+ }
+ if (i == len) {
+ printf("\"");
+ for (i = 0, p = essid; i < len; i++, p++)
+ printf("%c", *p);
+ printf("\"");
+ } else {
+ printf("0x");
+ for (i = 0, p = essid; i < len; i++, p++)
+ printf("%02x", *p);
+ }
+}
+
+void
+ieee80211_dump_pkt(struct ieee80211com *ic,
+ const uint8_t *buf, int len, int rate, int rssi)
+{
+ const struct ieee80211_frame *wh;
+ int i;
+
+ wh = (const struct ieee80211_frame *)buf;
+ switch (wh->i_fc[1] & IEEE80211_FC1_DIR_MASK) {
+ case IEEE80211_FC1_DIR_NODS:
+ printf("NODS %s", ether_sprintf(wh->i_addr2));
+ printf("->%s", ether_sprintf(wh->i_addr1));
+ printf("(%s)", ether_sprintf(wh->i_addr3));
+ break;
+ case IEEE80211_FC1_DIR_TODS:
+ printf("TODS %s", ether_sprintf(wh->i_addr2));
+ printf("->%s", ether_sprintf(wh->i_addr3));
+ printf("(%s)", ether_sprintf(wh->i_addr1));
+ break;
+ case IEEE80211_FC1_DIR_FROMDS:
+ printf("FRDS %s", ether_sprintf(wh->i_addr3));
+ printf("->%s", ether_sprintf(wh->i_addr1));
+ printf("(%s)", ether_sprintf(wh->i_addr2));
+ break;
+ case IEEE80211_FC1_DIR_DSTODS:
+ printf("DSDS %s", ether_sprintf((const uint8_t *)&wh[1]));
+ printf("->%s", ether_sprintf(wh->i_addr3));
+ printf("(%s", ether_sprintf(wh->i_addr2));
+ printf("->%s)", ether_sprintf(wh->i_addr1));
+ break;
+ }
+ switch (wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) {
+ case IEEE80211_FC0_TYPE_DATA:
+ printf(" data");
+ break;
+ case IEEE80211_FC0_TYPE_MGT:
+ printf(" %s", ieee80211_mgt_subtype_name[
+ (wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK)
+ >> IEEE80211_FC0_SUBTYPE_SHIFT]);
+ break;
+ default:
+ printf(" type#%d", wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK);
+ break;
+ }
+ if (IEEE80211_QOS_HAS_SEQ(wh)) {
+ const struct ieee80211_qosframe *qwh =
+ (const struct ieee80211_qosframe *)buf;
+ printf(" QoS [TID %u%s]", qwh->i_qos[0] & IEEE80211_QOS_TID,
+ qwh->i_qos[0] & IEEE80211_QOS_ACKPOLICY ? " ACM" : "");
+ }
+ if (wh->i_fc[1] & IEEE80211_FC1_WEP) {
+ int off;
+
+ off = ieee80211_anyhdrspace(ic, wh);
+ printf(" WEP [IV %.02x %.02x %.02x",
+ buf[off+0], buf[off+1], buf[off+2]);
+ if (buf[off+IEEE80211_WEP_IVLEN] & IEEE80211_WEP_EXTIV)
+ printf(" %.02x %.02x %.02x",
+ buf[off+4], buf[off+5], buf[off+6]);
+ printf(" KID %u]", buf[off+IEEE80211_WEP_IVLEN] >> 6);
+ }
+ if (rate >= 0)
+ printf(" %dM", rate / 2);
+ if (rssi >= 0)
+ printf(" +%d", rssi);
+ printf("\n");
+ if (len > 0) {
+ for (i = 0; i < len; i++) {
+ if ((i & 1) == 0)
+ printf(" ");
+ printf("%02x", buf[i]);
+ }
+ printf("\n");
+ }
+}
+
+static __inline int
+findrix(const struct ieee80211_rateset *rs, int r)
+{
+ int i;
+
+ for (i = 0; i < rs->rs_nrates; i++)
+ if ((rs->rs_rates[i] & IEEE80211_RATE_VAL) == r)
+ return i;
+ return -1;
+}
+
+int
+ieee80211_fix_rate(struct ieee80211_node *ni,
+ struct ieee80211_rateset *nrs, int flags)
+{
+#define RV(v) ((v) & IEEE80211_RATE_VAL)
+ struct ieee80211vap *vap = ni->ni_vap;
+ struct ieee80211com *ic = ni->ni_ic;
+ int i, j, rix, error;
+ int okrate, badrate, fixedrate, ucastrate;
+ const struct ieee80211_rateset *srs;
+ uint8_t r;
+
+ error = 0;
+ okrate = badrate = 0;
+ ucastrate = vap->iv_txparms[ieee80211_chan2mode(ni->ni_chan)].ucastrate;
+ if (ucastrate != IEEE80211_FIXED_RATE_NONE) {
+ /*
+ * Workaround awkwardness with fixed rate. We are called
+ * to check both the legacy rate set and the HT rate set
+ * but we must apply any legacy fixed rate check only to the
+ * legacy rate set and vice versa. We cannot tell what type
+ * of rate set we've been given (legacy or HT) but we can
+ * distinguish the fixed rate type (MCS have 0x80 set).
+ * So to deal with this the caller communicates whether to
+ * check MCS or legacy rate using the flags and we use the
+ * type of any fixed rate to avoid applying an MCS to a
+ * legacy rate and vice versa.
+ */
+ if (ucastrate & 0x80) {
+ if (flags & IEEE80211_F_DOFRATE)
+ flags &= ~IEEE80211_F_DOFRATE;
+ } else if ((ucastrate & 0x80) == 0) {
+ if (flags & IEEE80211_F_DOFMCS)
+ flags &= ~IEEE80211_F_DOFMCS;
+ }
+ /* NB: required to make MCS match below work */
+ ucastrate &= IEEE80211_RATE_VAL;
+ }
+ fixedrate = IEEE80211_FIXED_RATE_NONE;
+ /*
+ * XXX we are called to process both MCS and legacy rates;
+ * we must use the appropriate basic rate set or chaos will
+ * ensue; for now callers that want MCS must supply
+ * IEEE80211_F_DOBRS; at some point we'll need to split this
+ * function so there are two variants, one for MCS and one
+ * for legacy rates.
+ */
+ if (flags & IEEE80211_F_DOBRS)
+ srs = (const struct ieee80211_rateset *)
+ ieee80211_get_suphtrates(ic, ni->ni_chan);
+ else
+ srs = ieee80211_get_suprates(ic, ni->ni_chan);
+ for (i = 0; i < nrs->rs_nrates; ) {
+ if (flags & IEEE80211_F_DOSORT) {
+ /*
+ * Sort rates.
+ */
+ for (j = i + 1; j < nrs->rs_nrates; j++) {
+ if (RV(nrs->rs_rates[i]) > RV(nrs->rs_rates[j])) {
+ r = nrs->rs_rates[i];
+ nrs->rs_rates[i] = nrs->rs_rates[j];
+ nrs->rs_rates[j] = r;
+ }
+ }
+ }
+ r = nrs->rs_rates[i] & IEEE80211_RATE_VAL;
+ badrate = r;
+ /*
+ * Check for fixed rate.
+ */
+ if (r == ucastrate)
+ fixedrate = r;
+ /*
+ * Check against supported rates.
+ */
+ rix = findrix(srs, r);
+ if (flags & IEEE80211_F_DONEGO) {
+ if (rix < 0) {
+ /*
+ * A rate in the node's rate set is not
+ * supported. If this is a basic rate and we
+ * are operating as a STA then this is an error.
+ * Otherwise we just discard/ignore the rate.
+ */
+ if ((flags & IEEE80211_F_JOIN) &&
+ (nrs->rs_rates[i] & IEEE80211_RATE_BASIC))
+ error++;
+ } else if ((flags & IEEE80211_F_JOIN) == 0) {
+ /*
+ * Overwrite with the supported rate
+ * value so any basic rate bit is set.
+ */
+ nrs->rs_rates[i] = srs->rs_rates[rix];
+ }
+ }
+ if ((flags & IEEE80211_F_DODEL) && rix < 0) {
+ /*
+ * Delete unacceptable rates.
+ */
+ nrs->rs_nrates--;
+ for (j = i; j < nrs->rs_nrates; j++)
+ nrs->rs_rates[j] = nrs->rs_rates[j + 1];
+ nrs->rs_rates[j] = 0;
+ continue;
+ }
+ if (rix >= 0)
+ okrate = nrs->rs_rates[i];
+ i++;
+ }
+ if (okrate == 0 || error != 0 ||
+ ((flags & (IEEE80211_F_DOFRATE|IEEE80211_F_DOFMCS)) &&
+ fixedrate != ucastrate)) {
+ IEEE80211_NOTE(vap, IEEE80211_MSG_XRATE | IEEE80211_MSG_11N, ni,
+ "%s: flags 0x%x okrate %d error %d fixedrate 0x%x "
+ "ucastrate %x\n", __func__, fixedrate, ucastrate, flags);
+ return badrate | IEEE80211_RATE_BASIC;
+ } else
+ return RV(okrate);
+#undef RV
+}
+
+/*
+ * Reset 11g-related state.
+ */
+void
+ieee80211_reset_erp(struct ieee80211com *ic)
+{
+ ic->ic_flags &= ~IEEE80211_F_USEPROT;
+ ic->ic_nonerpsta = 0;
+ ic->ic_longslotsta = 0;
+ /*
+ * Short slot time is enabled only when operating in 11g
+ * and not in an IBSS. We must also honor whether or not
+ * the driver is capable of doing it.
+ */
+ ieee80211_set_shortslottime(ic,
+ IEEE80211_IS_CHAN_A(ic->ic_curchan) ||
+ IEEE80211_IS_CHAN_HT(ic->ic_curchan) ||
+ (IEEE80211_IS_CHAN_ANYG(ic->ic_curchan) &&
+ ic->ic_opmode == IEEE80211_M_HOSTAP &&
+ (ic->ic_caps & IEEE80211_C_SHSLOT)));
+ /*
+ * Set short preamble and ERP barker-preamble flags.
+ */
+ if (IEEE80211_IS_CHAN_A(ic->ic_curchan) ||
+ (ic->ic_caps & IEEE80211_C_SHPREAMBLE)) {
+ ic->ic_flags |= IEEE80211_F_SHPREAMBLE;
+ ic->ic_flags &= ~IEEE80211_F_USEBARKER;
+ } else {
+ ic->ic_flags &= ~IEEE80211_F_SHPREAMBLE;
+ ic->ic_flags |= IEEE80211_F_USEBARKER;
+ }
+}
+
+/*
+ * Set the short slot time state and notify the driver.
+ */
+void
+ieee80211_set_shortslottime(struct ieee80211com *ic, int onoff)
+{
+ if (onoff)
+ ic->ic_flags |= IEEE80211_F_SHSLOT;
+ else
+ ic->ic_flags &= ~IEEE80211_F_SHSLOT;
+ /* notify driver */
+ if (ic->ic_updateslot != NULL)
+ ic->ic_updateslot(ic->ic_ifp);
+}
+
+/*
+ * Check if the specified rate set supports ERP.
+ * NB: the rate set is assumed to be sorted.
+ */
+int
+ieee80211_iserp_rateset(const struct ieee80211_rateset *rs)
+{
+#define N(a) (sizeof(a) / sizeof(a[0]))
+ static const int rates[] = { 2, 4, 11, 22, 12, 24, 48 };
+ int i, j;
+
+ if (rs->rs_nrates < N(rates))
+ return 0;
+ for (i = 0; i < N(rates); i++) {
+ for (j = 0; j < rs->rs_nrates; j++) {
+ int r = rs->rs_rates[j] & IEEE80211_RATE_VAL;
+ if (rates[i] == r)
+ goto next;
+ if (r > rates[i])
+ return 0;
+ }
+ return 0;
+ next:
+ ;
+ }
+ return 1;
+#undef N
+}
+
+/*
+ * Mark the basic rates for the rate table based on the
+ * operating mode. For real 11g we mark all the 11b rates
+ * and 6, 12, and 24 OFDM. For 11b compatibility we mark only
+ * 11b rates. There's also a pseudo 11a-mode used to mark only
+ * the basic OFDM rates.
+ */
+static void
+setbasicrates(struct ieee80211_rateset *rs,
+ enum ieee80211_phymode mode, int add)
+{
+ static const struct ieee80211_rateset basic[IEEE80211_MODE_MAX] = {
+ [IEEE80211_MODE_11A] = { 3, { 12, 24, 48 } },
+ [IEEE80211_MODE_11B] = { 2, { 2, 4 } },
+ /* NB: mixed b/g */
+ [IEEE80211_MODE_11G] = { 4, { 2, 4, 11, 22 } },
+ [IEEE80211_MODE_TURBO_A] = { 3, { 12, 24, 48 } },
+ [IEEE80211_MODE_TURBO_G] = { 4, { 2, 4, 11, 22 } },
+ [IEEE80211_MODE_STURBO_A] = { 3, { 12, 24, 48 } },
+ [IEEE80211_MODE_HALF] = { 3, { 6, 12, 24 } },
+ [IEEE80211_MODE_QUARTER] = { 3, { 3, 6, 12 } },
+ [IEEE80211_MODE_11NA] = { 3, { 12, 24, 48 } },
+ /* NB: mixed b/g */
+ [IEEE80211_MODE_11NG] = { 4, { 2, 4, 11, 22 } },
+ };
+ int i, j;
+
+ for (i = 0; i < rs->rs_nrates; i++) {
+ if (!add)
+ rs->rs_rates[i] &= IEEE80211_RATE_VAL;
+ for (j = 0; j < basic[mode].rs_nrates; j++)
+ if (basic[mode].rs_rates[j] == rs->rs_rates[i]) {
+ rs->rs_rates[i] |= IEEE80211_RATE_BASIC;
+ break;
+ }
+ }
+}
+
+/*
+ * Set the basic rates in a rate set.
+ */
+void
+ieee80211_setbasicrates(struct ieee80211_rateset *rs,
+ enum ieee80211_phymode mode)
+{
+ setbasicrates(rs, mode, 0);
+}
+
+/*
+ * Add basic rates to a rate set.
+ */
+void
+ieee80211_addbasicrates(struct ieee80211_rateset *rs,
+ enum ieee80211_phymode mode)
+{
+ setbasicrates(rs, mode, 1);
+}
+
+/*
+ * WME protocol support.
+ *
+ * The default 11a/b/g/n parameters come from the WiFi Alliance WMM
+ * System Interopability Test Plan (v1.4, Appendix F) and the 802.11n
+ * Draft 2.0 Test Plan (Appendix D).
+ *
+ * Static/Dynamic Turbo mode settings come from Atheros.
+ */
+typedef struct phyParamType {
+ uint8_t aifsn;
+ uint8_t logcwmin;
+ uint8_t logcwmax;
+ uint16_t txopLimit;
+ uint8_t acm;
+} paramType;
+
+static const struct phyParamType phyParamForAC_BE[IEEE80211_MODE_MAX] = {
+ [IEEE80211_MODE_AUTO] = { 3, 4, 6, 0, 0 },
+ [IEEE80211_MODE_11A] = { 3, 4, 6, 0, 0 },
+ [IEEE80211_MODE_11B] = { 3, 4, 6, 0, 0 },
+ [IEEE80211_MODE_11G] = { 3, 4, 6, 0, 0 },
+ [IEEE80211_MODE_FH] = { 3, 4, 6, 0, 0 },
+ [IEEE80211_MODE_TURBO_A]= { 2, 3, 5, 0, 0 },
+ [IEEE80211_MODE_TURBO_G]= { 2, 3, 5, 0, 0 },
+ [IEEE80211_MODE_STURBO_A]={ 2, 3, 5, 0, 0 },
+ [IEEE80211_MODE_HALF] = { 3, 4, 6, 0, 0 },
+ [IEEE80211_MODE_QUARTER]= { 3, 4, 6, 0, 0 },
+ [IEEE80211_MODE_11NA] = { 3, 4, 6, 0, 0 },
+ [IEEE80211_MODE_11NG] = { 3, 4, 6, 0, 0 },
+};
+static const struct phyParamType phyParamForAC_BK[IEEE80211_MODE_MAX] = {
+ [IEEE80211_MODE_AUTO] = { 7, 4, 10, 0, 0 },
+ [IEEE80211_MODE_11A] = { 7, 4, 10, 0, 0 },
+ [IEEE80211_MODE_11B] = { 7, 4, 10, 0, 0 },
+ [IEEE80211_MODE_11G] = { 7, 4, 10, 0, 0 },
+ [IEEE80211_MODE_FH] = { 7, 4, 10, 0, 0 },
+ [IEEE80211_MODE_TURBO_A]= { 7, 3, 10, 0, 0 },
+ [IEEE80211_MODE_TURBO_G]= { 7, 3, 10, 0, 0 },
+ [IEEE80211_MODE_STURBO_A]={ 7, 3, 10, 0, 0 },
+ [IEEE80211_MODE_HALF] = { 7, 4, 10, 0, 0 },
+ [IEEE80211_MODE_QUARTER]= { 7, 4, 10, 0, 0 },
+ [IEEE80211_MODE_11NA] = { 7, 4, 10, 0, 0 },
+ [IEEE80211_MODE_11NG] = { 7, 4, 10, 0, 0 },
+};
+static const struct phyParamType phyParamForAC_VI[IEEE80211_MODE_MAX] = {
+ [IEEE80211_MODE_AUTO] = { 1, 3, 4, 94, 0 },
+ [IEEE80211_MODE_11A] = { 1, 3, 4, 94, 0 },
+ [IEEE80211_MODE_11B] = { 1, 3, 4, 188, 0 },
+ [IEEE80211_MODE_11G] = { 1, 3, 4, 94, 0 },
+ [IEEE80211_MODE_FH] = { 1, 3, 4, 188, 0 },
+ [IEEE80211_MODE_TURBO_A]= { 1, 2, 3, 94, 0 },
+ [IEEE80211_MODE_TURBO_G]= { 1, 2, 3, 94, 0 },
+ [IEEE80211_MODE_STURBO_A]={ 1, 2, 3, 94, 0 },
+ [IEEE80211_MODE_HALF] = { 1, 3, 4, 94, 0 },
+ [IEEE80211_MODE_QUARTER]= { 1, 3, 4, 94, 0 },
+ [IEEE80211_MODE_11NA] = { 1, 3, 4, 94, 0 },
+ [IEEE80211_MODE_11NG] = { 1, 3, 4, 94, 0 },
+};
+static const struct phyParamType phyParamForAC_VO[IEEE80211_MODE_MAX] = {
+ [IEEE80211_MODE_AUTO] = { 1, 2, 3, 47, 0 },
+ [IEEE80211_MODE_11A] = { 1, 2, 3, 47, 0 },
+ [IEEE80211_MODE_11B] = { 1, 2, 3, 102, 0 },
+ [IEEE80211_MODE_11G] = { 1, 2, 3, 47, 0 },
+ [IEEE80211_MODE_FH] = { 1, 2, 3, 102, 0 },
+ [IEEE80211_MODE_TURBO_A]= { 1, 2, 2, 47, 0 },
+ [IEEE80211_MODE_TURBO_G]= { 1, 2, 2, 47, 0 },
+ [IEEE80211_MODE_STURBO_A]={ 1, 2, 2, 47, 0 },
+ [IEEE80211_MODE_HALF] = { 1, 2, 3, 47, 0 },
+ [IEEE80211_MODE_QUARTER]= { 1, 2, 3, 47, 0 },
+ [IEEE80211_MODE_11NA] = { 1, 2, 3, 47, 0 },
+ [IEEE80211_MODE_11NG] = { 1, 2, 3, 47, 0 },
+};
+
+static const struct phyParamType bssPhyParamForAC_BE[IEEE80211_MODE_MAX] = {
+ [IEEE80211_MODE_AUTO] = { 3, 4, 10, 0, 0 },
+ [IEEE80211_MODE_11A] = { 3, 4, 10, 0, 0 },
+ [IEEE80211_MODE_11B] = { 3, 4, 10, 0, 0 },
+ [IEEE80211_MODE_11G] = { 3, 4, 10, 0, 0 },
+ [IEEE80211_MODE_FH] = { 3, 4, 10, 0, 0 },
+ [IEEE80211_MODE_TURBO_A]= { 2, 3, 10, 0, 0 },
+ [IEEE80211_MODE_TURBO_G]= { 2, 3, 10, 0, 0 },
+ [IEEE80211_MODE_STURBO_A]={ 2, 3, 10, 0, 0 },
+ [IEEE80211_MODE_HALF] = { 3, 4, 10, 0, 0 },
+ [IEEE80211_MODE_QUARTER]= { 3, 4, 10, 0, 0 },
+ [IEEE80211_MODE_11NA] = { 3, 4, 10, 0, 0 },
+ [IEEE80211_MODE_11NG] = { 3, 4, 10, 0, 0 },
+};
+static const struct phyParamType bssPhyParamForAC_VI[IEEE80211_MODE_MAX] = {
+ [IEEE80211_MODE_AUTO] = { 2, 3, 4, 94, 0 },
+ [IEEE80211_MODE_11A] = { 2, 3, 4, 94, 0 },
+ [IEEE80211_MODE_11B] = { 2, 3, 4, 188, 0 },
+ [IEEE80211_MODE_11G] = { 2, 3, 4, 94, 0 },
+ [IEEE80211_MODE_FH] = { 2, 3, 4, 188, 0 },
+ [IEEE80211_MODE_TURBO_A]= { 2, 2, 3, 94, 0 },
+ [IEEE80211_MODE_TURBO_G]= { 2, 2, 3, 94, 0 },
+ [IEEE80211_MODE_STURBO_A]={ 2, 2, 3, 94, 0 },
+ [IEEE80211_MODE_HALF] = { 2, 3, 4, 94, 0 },
+ [IEEE80211_MODE_QUARTER]= { 2, 3, 4, 94, 0 },
+ [IEEE80211_MODE_11NA] = { 2, 3, 4, 94, 0 },
+ [IEEE80211_MODE_11NG] = { 2, 3, 4, 94, 0 },
+};
+static const struct phyParamType bssPhyParamForAC_VO[IEEE80211_MODE_MAX] = {
+ [IEEE80211_MODE_AUTO] = { 2, 2, 3, 47, 0 },
+ [IEEE80211_MODE_11A] = { 2, 2, 3, 47, 0 },
+ [IEEE80211_MODE_11B] = { 2, 2, 3, 102, 0 },
+ [IEEE80211_MODE_11G] = { 2, 2, 3, 47, 0 },
+ [IEEE80211_MODE_FH] = { 2, 2, 3, 102, 0 },
+ [IEEE80211_MODE_TURBO_A]= { 1, 2, 2, 47, 0 },
+ [IEEE80211_MODE_TURBO_G]= { 1, 2, 2, 47, 0 },
+ [IEEE80211_MODE_STURBO_A]={ 1, 2, 2, 47, 0 },
+ [IEEE80211_MODE_HALF] = { 2, 2, 3, 47, 0 },
+ [IEEE80211_MODE_QUARTER]= { 2, 2, 3, 47, 0 },
+ [IEEE80211_MODE_11NA] = { 2, 2, 3, 47, 0 },
+ [IEEE80211_MODE_11NG] = { 2, 2, 3, 47, 0 },
+};
+
+static void
+_setifsparams(struct wmeParams *wmep, const paramType *phy)
+{
+ wmep->wmep_aifsn = phy->aifsn;
+ wmep->wmep_logcwmin = phy->logcwmin;
+ wmep->wmep_logcwmax = phy->logcwmax;
+ wmep->wmep_txopLimit = phy->txopLimit;
+}
+
+static void
+setwmeparams(struct ieee80211vap *vap, const char *type, int ac,
+ struct wmeParams *wmep, const paramType *phy)
+{
+ wmep->wmep_acm = phy->acm;
+ _setifsparams(wmep, phy);
+
+ IEEE80211_DPRINTF(vap, IEEE80211_MSG_WME,
+ "set %s (%s) [acm %u aifsn %u logcwmin %u logcwmax %u txop %u]\n",
+ ieee80211_wme_acnames[ac], type,
+ wmep->wmep_acm, wmep->wmep_aifsn, wmep->wmep_logcwmin,
+ wmep->wmep_logcwmax, wmep->wmep_txopLimit);
+}
+
+static void
+ieee80211_wme_initparams_locked(struct ieee80211vap *vap)
+{
+ struct ieee80211com *ic = vap->iv_ic;
+ struct ieee80211_wme_state *wme = &ic->ic_wme;
+ const paramType *pPhyParam, *pBssPhyParam;
+ struct wmeParams *wmep;
+ enum ieee80211_phymode mode;
+ int i;
+
+ IEEE80211_LOCK_ASSERT(ic);
+
+ if ((ic->ic_caps & IEEE80211_C_WME) == 0 || ic->ic_nrunning > 1)
+ return;
+
+ /*
+ * Select mode; we can be called early in which case we
+ * always use auto mode. We know we'll be called when
+ * entering the RUN state with bsschan setup properly
+ * so state will eventually get set correctly
+ */
+ if (ic->ic_bsschan != IEEE80211_CHAN_ANYC)
+ mode = ieee80211_chan2mode(ic->ic_bsschan);
+ else
+ mode = IEEE80211_MODE_AUTO;
+ for (i = 0; i < WME_NUM_AC; i++) {
+ switch (i) {
+ case WME_AC_BK:
+ pPhyParam = &phyParamForAC_BK[mode];
+ pBssPhyParam = &phyParamForAC_BK[mode];
+ break;
+ case WME_AC_VI:
+ pPhyParam = &phyParamForAC_VI[mode];
+ pBssPhyParam = &bssPhyParamForAC_VI[mode];
+ break;
+ case WME_AC_VO:
+ pPhyParam = &phyParamForAC_VO[mode];
+ pBssPhyParam = &bssPhyParamForAC_VO[mode];
+ break;
+ case WME_AC_BE:
+ default:
+ pPhyParam = &phyParamForAC_BE[mode];
+ pBssPhyParam = &bssPhyParamForAC_BE[mode];
+ break;
+ }
+ wmep = &wme->wme_wmeChanParams.cap_wmeParams[i];
+ if (ic->ic_opmode == IEEE80211_M_HOSTAP) {
+ setwmeparams(vap, "chan", i, wmep, pPhyParam);
+ } else {
+ setwmeparams(vap, "chan", i, wmep, pBssPhyParam);
+ }
+ wmep = &wme->wme_wmeBssChanParams.cap_wmeParams[i];
+ setwmeparams(vap, "bss ", i, wmep, pBssPhyParam);
+ }
+ /* NB: check ic_bss to avoid NULL deref on initial attach */
+ if (vap->iv_bss != NULL) {
+ /*
+ * Calculate agressive mode switching threshold based
+ * on beacon interval. This doesn't need locking since
+ * we're only called before entering the RUN state at
+ * which point we start sending beacon frames.
+ */
+ wme->wme_hipri_switch_thresh =
+ (HIGH_PRI_SWITCH_THRESH * vap->iv_bss->ni_intval) / 100;
+ wme->wme_flags &= ~WME_F_AGGRMODE;
+ ieee80211_wme_updateparams(vap);
+ }
+}
+
+void
+ieee80211_wme_initparams(struct ieee80211vap *vap)
+{
+ struct ieee80211com *ic = vap->iv_ic;
+
+ IEEE80211_LOCK(ic);
+ ieee80211_wme_initparams_locked(vap);
+ IEEE80211_UNLOCK(ic);
+}
+
+/*
+ * Update WME parameters for ourself and the BSS.
+ */
+void
+ieee80211_wme_updateparams_locked(struct ieee80211vap *vap)
+{
+ static const paramType aggrParam[IEEE80211_MODE_MAX] = {
+ [IEEE80211_MODE_AUTO] = { 2, 4, 10, 64, 0 },
+ [IEEE80211_MODE_11A] = { 2, 4, 10, 64, 0 },
+ [IEEE80211_MODE_11B] = { 2, 5, 10, 64, 0 },
+ [IEEE80211_MODE_11G] = { 2, 4, 10, 64, 0 },
+ [IEEE80211_MODE_FH] = { 2, 5, 10, 64, 0 },
+ [IEEE80211_MODE_TURBO_A] = { 1, 3, 10, 64, 0 },
+ [IEEE80211_MODE_TURBO_G] = { 1, 3, 10, 64, 0 },
+ [IEEE80211_MODE_STURBO_A] = { 1, 3, 10, 64, 0 },
+ [IEEE80211_MODE_HALF] = { 2, 4, 10, 64, 0 },
+ [IEEE80211_MODE_QUARTER] = { 2, 4, 10, 64, 0 },
+ [IEEE80211_MODE_11NA] = { 2, 4, 10, 64, 0 }, /* XXXcheck*/
+ [IEEE80211_MODE_11NG] = { 2, 4, 10, 64, 0 }, /* XXXcheck*/
+ };
+ struct ieee80211com *ic = vap->iv_ic;
+ struct ieee80211_wme_state *wme = &ic->ic_wme;
+ const struct wmeParams *wmep;
+ struct wmeParams *chanp, *bssp;
+ enum ieee80211_phymode mode;
+ int i;
+
+ /*
+ * Set up the channel access parameters for the physical
+ * device. First populate the configured settings.
+ */
+ for (i = 0; i < WME_NUM_AC; i++) {
+ chanp = &wme->wme_chanParams.cap_wmeParams[i];
+ wmep = &wme->wme_wmeChanParams.cap_wmeParams[i];
+ chanp->wmep_aifsn = wmep->wmep_aifsn;
+ chanp->wmep_logcwmin = wmep->wmep_logcwmin;
+ chanp->wmep_logcwmax = wmep->wmep_logcwmax;
+ chanp->wmep_txopLimit = wmep->wmep_txopLimit;
+
+ chanp = &wme->wme_bssChanParams.cap_wmeParams[i];
+ wmep = &wme->wme_wmeBssChanParams.cap_wmeParams[i];
+ chanp->wmep_aifsn = wmep->wmep_aifsn;
+ chanp->wmep_logcwmin = wmep->wmep_logcwmin;
+ chanp->wmep_logcwmax = wmep->wmep_logcwmax;
+ chanp->wmep_txopLimit = wmep->wmep_txopLimit;
+ }
+
+ /*
+ * Select mode; we can be called early in which case we
+ * always use auto mode. We know we'll be called when
+ * entering the RUN state with bsschan setup properly
+ * so state will eventually get set correctly
+ */
+ if (ic->ic_bsschan != IEEE80211_CHAN_ANYC)
+ mode = ieee80211_chan2mode(ic->ic_bsschan);
+ else
+ mode = IEEE80211_MODE_AUTO;
+
+ /*
+ * This implements agressive mode as found in certain
+ * vendors' AP's. When there is significant high
+ * priority (VI/VO) traffic in the BSS throttle back BE
+ * traffic by using conservative parameters. Otherwise
+ * BE uses agressive params to optimize performance of
+ * legacy/non-QoS traffic.
+ */
+ if ((vap->iv_opmode == IEEE80211_M_HOSTAP &&
+ (wme->wme_flags & WME_F_AGGRMODE) != 0) ||
+ (vap->iv_opmode == IEEE80211_M_STA &&
+ (vap->iv_bss->ni_flags & IEEE80211_NODE_QOS) == 0) ||
+ (vap->iv_flags & IEEE80211_F_WME) == 0) {
+ chanp = &wme->wme_chanParams.cap_wmeParams[WME_AC_BE];
+ bssp = &wme->wme_bssChanParams.cap_wmeParams[WME_AC_BE];
+
+ chanp->wmep_aifsn = bssp->wmep_aifsn = aggrParam[mode].aifsn;
+ chanp->wmep_logcwmin = bssp->wmep_logcwmin =
+ aggrParam[mode].logcwmin;
+ chanp->wmep_logcwmax = bssp->wmep_logcwmax =
+ aggrParam[mode].logcwmax;
+ chanp->wmep_txopLimit = bssp->wmep_txopLimit =
+ (vap->iv_flags & IEEE80211_F_BURST) ?
+ aggrParam[mode].txopLimit : 0;
+ IEEE80211_DPRINTF(vap, IEEE80211_MSG_WME,
+ "update %s (chan+bss) [acm %u aifsn %u logcwmin %u "
+ "logcwmax %u txop %u]\n", ieee80211_wme_acnames[WME_AC_BE],
+ chanp->wmep_acm, chanp->wmep_aifsn, chanp->wmep_logcwmin,
+ chanp->wmep_logcwmax, chanp->wmep_txopLimit);
+ }
+
+ if (vap->iv_opmode == IEEE80211_M_HOSTAP &&
+ ic->ic_sta_assoc < 2 && (wme->wme_flags & WME_F_AGGRMODE) != 0) {
+ static const uint8_t logCwMin[IEEE80211_MODE_MAX] = {
+ [IEEE80211_MODE_AUTO] = 3,
+ [IEEE80211_MODE_11A] = 3,
+ [IEEE80211_MODE_11B] = 4,
+ [IEEE80211_MODE_11G] = 3,
+ [IEEE80211_MODE_FH] = 4,
+ [IEEE80211_MODE_TURBO_A] = 3,
+ [IEEE80211_MODE_TURBO_G] = 3,
+ [IEEE80211_MODE_STURBO_A] = 3,
+ [IEEE80211_MODE_HALF] = 3,
+ [IEEE80211_MODE_QUARTER] = 3,
+ [IEEE80211_MODE_11NA] = 3,
+ [IEEE80211_MODE_11NG] = 3,
+ };
+ chanp = &wme->wme_chanParams.cap_wmeParams[WME_AC_BE];
+ bssp = &wme->wme_bssChanParams.cap_wmeParams[WME_AC_BE];
+
+ chanp->wmep_logcwmin = bssp->wmep_logcwmin = logCwMin[mode];
+ IEEE80211_DPRINTF(vap, IEEE80211_MSG_WME,
+ "update %s (chan+bss) logcwmin %u\n",
+ ieee80211_wme_acnames[WME_AC_BE], chanp->wmep_logcwmin);
+ }
+ if (vap->iv_opmode == IEEE80211_M_HOSTAP) { /* XXX ibss? */
+ /*
+ * Arrange for a beacon update and bump the parameter
+ * set number so associated stations load the new values.
+ */
+ wme->wme_bssChanParams.cap_info =
+ (wme->wme_bssChanParams.cap_info+1) & WME_QOSINFO_COUNT;
+ ieee80211_beacon_notify(vap, IEEE80211_BEACON_WME);
+ }
+
+ wme->wme_update(ic);
+
+ IEEE80211_DPRINTF(vap, IEEE80211_MSG_WME,
+ "%s: WME params updated, cap_info 0x%x\n", __func__,
+ vap->iv_opmode == IEEE80211_M_STA ?
+ wme->wme_wmeChanParams.cap_info :
+ wme->wme_bssChanParams.cap_info);
+}
+
+void
+ieee80211_wme_updateparams(struct ieee80211vap *vap)
+{
+ struct ieee80211com *ic = vap->iv_ic;
+
+ if (ic->ic_caps & IEEE80211_C_WME) {
+ IEEE80211_LOCK(ic);
+ ieee80211_wme_updateparams_locked(vap);
+ IEEE80211_UNLOCK(ic);
+ }
+}
+
+static void
+parent_updown(void *arg, int npending)
+{
+ struct ifnet *parent = arg;
+
+ parent->if_ioctl(parent, SIOCSIFFLAGS, NULL);
+}
+
+static void
+update_mcast(void *arg, int npending)
+{
+ struct ieee80211com *ic = arg;
+ struct ifnet *parent = ic->ic_ifp;
+
+ ic->ic_update_mcast(parent);
+}
+
+static void
+update_promisc(void *arg, int npending)
+{
+ struct ieee80211com *ic = arg;
+ struct ifnet *parent = ic->ic_ifp;
+
+ ic->ic_update_promisc(parent);
+}
+
+static void
+update_channel(void *arg, int npending)
+{
+ struct ieee80211com *ic = arg;
+
+ ic->ic_set_channel(ic);
+ ieee80211_radiotap_chan_change(ic);
+}
+
+/*
+ * Block until the parent is in a known state. This is
+ * used after any operations that dispatch a task (e.g.
+ * to auto-configure the parent device up/down).
+ */
+void
+ieee80211_waitfor_parent(struct ieee80211com *ic)
+{
+ taskqueue_block(ic->ic_tq);
+ ieee80211_draintask(ic, &ic->ic_parent_task);
+ ieee80211_draintask(ic, &ic->ic_mcast_task);
+ ieee80211_draintask(ic, &ic->ic_promisc_task);
+ ieee80211_draintask(ic, &ic->ic_chan_task);
+ ieee80211_draintask(ic, &ic->ic_bmiss_task);
+ taskqueue_unblock(ic->ic_tq);
+}
+
+/*
+ * Start a vap running. If this is the first vap to be
+ * set running on the underlying device then we
+ * automatically bring the device up.
+ */
+void
+ieee80211_start_locked(struct ieee80211vap *vap)
+{
+ struct ifnet *ifp = vap->iv_ifp;
+ struct ieee80211com *ic = vap->iv_ic;
+ struct ifnet *parent = ic->ic_ifp;
+
+ IEEE80211_LOCK_ASSERT(ic);
+
+ IEEE80211_DPRINTF(vap,
+ IEEE80211_MSG_STATE | IEEE80211_MSG_DEBUG,
+ "start running, %d vaps running\n", ic->ic_nrunning);
+
+ if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
+ /*
+ * Mark us running. Note that it's ok to do this first;
+ * if we need to bring the parent device up we defer that
+ * to avoid dropping the com lock. We expect the device
+ * to respond to being marked up by calling back into us
+ * through ieee80211_start_all at which point we'll come
+ * back in here and complete the work.
+ */
+ ifp->if_drv_flags |= IFF_DRV_RUNNING;
+ /*
+ * We are not running; if this we are the first vap
+ * to be brought up auto-up the parent if necessary.
+ */
+ if (ic->ic_nrunning++ == 0 &&
+ (parent->if_drv_flags & IFF_DRV_RUNNING) == 0) {
+ IEEE80211_DPRINTF(vap,
+ IEEE80211_MSG_STATE | IEEE80211_MSG_DEBUG,
+ "%s: up parent %s\n", __func__, parent->if_xname);
+ parent->if_flags |= IFF_UP;
+ ieee80211_runtask(ic, &ic->ic_parent_task);
+ return;
+ }
+ }
+ /*
+ * If the parent is up and running, then kick the
+ * 802.11 state machine as appropriate.
+ */
+ if ((parent->if_drv_flags & IFF_DRV_RUNNING) &&
+ vap->iv_roaming != IEEE80211_ROAMING_MANUAL) {
+ if (vap->iv_opmode == IEEE80211_M_STA) {
+#if 0
+ /* XXX bypasses scan too easily; disable for now */
+ /*
+ * Try to be intelligent about clocking the state
+ * machine. If we're currently in RUN state then
+ * we should be able to apply any new state/parameters
+ * simply by re-associating. Otherwise we need to
+ * re-scan to select an appropriate ap.
+ */
+ if (vap->iv_state >= IEEE80211_S_RUN)
+ ieee80211_new_state_locked(vap,
+ IEEE80211_S_ASSOC, 1);
+ else
+#endif
+ ieee80211_new_state_locked(vap,
+ IEEE80211_S_SCAN, 0);
+ } else {
+ /*
+ * For monitor+wds mode there's nothing to do but
+ * start running. Otherwise if this is the first
+ * vap to be brought up, start a scan which may be
+ * preempted if the station is locked to a particular
+ * channel.
+ */
+ vap->iv_flags_ext |= IEEE80211_FEXT_REINIT;
+ if (vap->iv_opmode == IEEE80211_M_MONITOR ||
+ vap->iv_opmode == IEEE80211_M_WDS)
+ ieee80211_new_state_locked(vap,
+ IEEE80211_S_RUN, -1);
+ else
+ ieee80211_new_state_locked(vap,
+ IEEE80211_S_SCAN, 0);
+ }
+ }
+}
+
+/*
+ * Start a single vap.
+ */
+void
+ieee80211_init(void *arg)
+{
+ struct ieee80211vap *vap = arg;
+
+ IEEE80211_DPRINTF(vap, IEEE80211_MSG_STATE | IEEE80211_MSG_DEBUG,
+ "%s\n", __func__);
+
+ IEEE80211_LOCK(vap->iv_ic);
+ ieee80211_start_locked(vap);
+ IEEE80211_UNLOCK(vap->iv_ic);
+}
+
+/*
+ * Start all runnable vap's on a device.
+ */
+void
+ieee80211_start_all(struct ieee80211com *ic)
+{
+ struct ieee80211vap *vap;
+
+ IEEE80211_LOCK(ic);
+ TAILQ_FOREACH(vap, &ic->ic_vaps, iv_next) {
+ struct ifnet *ifp = vap->iv_ifp;
+ if (IFNET_IS_UP_RUNNING(ifp)) /* NB: avoid recursion */
+ ieee80211_start_locked(vap);
+ }
+ IEEE80211_UNLOCK(ic);
+}
+
+/*
+ * Stop a vap. We force it down using the state machine
+ * then mark it's ifnet not running. If this is the last
+ * vap running on the underlying device then we close it
+ * too to insure it will be properly initialized when the
+ * next vap is brought up.
+ */
+void
+ieee80211_stop_locked(struct ieee80211vap *vap)
+{
+ struct ieee80211com *ic = vap->iv_ic;
+ struct ifnet *ifp = vap->iv_ifp;
+ struct ifnet *parent = ic->ic_ifp;
+
+ IEEE80211_LOCK_ASSERT(ic);
+
+ IEEE80211_DPRINTF(vap, IEEE80211_MSG_STATE | IEEE80211_MSG_DEBUG,
+ "stop running, %d vaps running\n", ic->ic_nrunning);
+
+ ieee80211_new_state_locked(vap, IEEE80211_S_INIT, -1);
+ if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
+ ifp->if_drv_flags &= ~IFF_DRV_RUNNING; /* mark us stopped */
+ if (--ic->ic_nrunning == 0 &&
+ (parent->if_drv_flags & IFF_DRV_RUNNING)) {
+ IEEE80211_DPRINTF(vap,
+ IEEE80211_MSG_STATE | IEEE80211_MSG_DEBUG,
+ "down parent %s\n", parent->if_xname);
+ parent->if_flags &= ~IFF_UP;
+ ieee80211_runtask(ic, &ic->ic_parent_task);
+ }
+ }
+}
+
+void
+ieee80211_stop(struct ieee80211vap *vap)
+{
+ struct ieee80211com *ic = vap->iv_ic;
+
+ IEEE80211_LOCK(ic);
+ ieee80211_stop_locked(vap);
+ IEEE80211_UNLOCK(ic);
+}
+
+/*
+ * Stop all vap's running on a device.
+ */
+void
+ieee80211_stop_all(struct ieee80211com *ic)
+{
+ struct ieee80211vap *vap;
+
+ IEEE80211_LOCK(ic);
+ TAILQ_FOREACH(vap, &ic->ic_vaps, iv_next) {
+ struct ifnet *ifp = vap->iv_ifp;
+ if (IFNET_IS_UP_RUNNING(ifp)) /* NB: avoid recursion */
+ ieee80211_stop_locked(vap);
+ }
+ IEEE80211_UNLOCK(ic);
+
+ ieee80211_waitfor_parent(ic);
+}
+
+/*
+ * Stop all vap's running on a device and arrange
+ * for those that were running to be resumed.
+ */
+void
+ieee80211_suspend_all(struct ieee80211com *ic)
+{
+ struct ieee80211vap *vap;
+
+ IEEE80211_LOCK(ic);
+ TAILQ_FOREACH(vap, &ic->ic_vaps, iv_next) {
+ struct ifnet *ifp = vap->iv_ifp;
+ if (IFNET_IS_UP_RUNNING(ifp)) { /* NB: avoid recursion */
+ vap->iv_flags_ext |= IEEE80211_FEXT_RESUME;
+ ieee80211_stop_locked(vap);
+ }
+ }
+ IEEE80211_UNLOCK(ic);
+
+ ieee80211_waitfor_parent(ic);
+}
+
+/*
+ * Start all vap's marked for resume.
+ */
+void
+ieee80211_resume_all(struct ieee80211com *ic)
+{
+ struct ieee80211vap *vap;
+
+ IEEE80211_LOCK(ic);
+ TAILQ_FOREACH(vap, &ic->ic_vaps, iv_next) {
+ struct ifnet *ifp = vap->iv_ifp;
+ if (!IFNET_IS_UP_RUNNING(ifp) &&
+ (vap->iv_flags_ext & IEEE80211_FEXT_RESUME)) {
+ vap->iv_flags_ext &= ~IEEE80211_FEXT_RESUME;
+ ieee80211_start_locked(vap);
+ }
+ }
+ IEEE80211_UNLOCK(ic);
+}
+
+void
+ieee80211_beacon_miss(struct ieee80211com *ic)
+{
+ IEEE80211_LOCK(ic);
+ if ((ic->ic_flags & IEEE80211_F_SCAN) == 0) {
+ /* Process in a taskq, the handler may reenter the driver */
+ ieee80211_runtask(ic, &ic->ic_bmiss_task);
+ }
+ IEEE80211_UNLOCK(ic);
+}
+
+static void
+beacon_miss(void *arg, int npending)
+{
+ struct ieee80211com *ic = arg;
+ struct ieee80211vap *vap;
+
+ /* XXX locking */
+ TAILQ_FOREACH(vap, &ic->ic_vaps, iv_next) {
+ /*
+ * We only pass events through for sta vap's in RUN state;
+ * may be too restrictive but for now this saves all the
+ * handlers duplicating these checks.
+ */
+ if (vap->iv_opmode == IEEE80211_M_STA &&
+ vap->iv_state >= IEEE80211_S_RUN &&
+ vap->iv_bmiss != NULL)
+ vap->iv_bmiss(vap);
+ }
+}
+
+static void
+beacon_swmiss(void *arg, int npending)
+{
+ struct ieee80211vap *vap = arg;
+
+ if (vap->iv_state != IEEE80211_S_RUN)
+ return;
+
+ /* XXX Call multiple times if npending > zero? */
+ vap->iv_bmiss(vap);
+}
+
+/*
+ * Software beacon miss handling. Check if any beacons
+ * were received in the last period. If not post a
+ * beacon miss; otherwise reset the counter.
+ */
+void
+ieee80211_swbmiss(void *arg)
+{
+ struct ieee80211vap *vap = arg;
+ struct ieee80211com *ic = vap->iv_ic;
+
+ /* XXX sleep state? */
+ KASSERT(vap->iv_state == IEEE80211_S_RUN,
+ ("wrong state %d", vap->iv_state));
+
+ if (ic->ic_flags & IEEE80211_F_SCAN) {
+ /*
+ * If scanning just ignore and reset state. If we get a
+ * bmiss after coming out of scan because we haven't had
+ * time to receive a beacon then we should probe the AP
+ * before posting a real bmiss (unless iv_bmiss_max has
+ * been artifiically lowered). A cleaner solution might
+ * be to disable the timer on scan start/end but to handle
+ * case of multiple sta vap's we'd need to disable the
+ * timers of all affected vap's.
+ */
+ vap->iv_swbmiss_count = 0;
+ } else if (vap->iv_swbmiss_count == 0) {
+ if (vap->iv_bmiss != NULL)
+ ieee80211_runtask(ic, &vap->iv_swbmiss_task);
+ } else
+ vap->iv_swbmiss_count = 0;
+ callout_reset(&vap->iv_swbmiss, vap->iv_swbmiss_period,
+ ieee80211_swbmiss, vap);
+}
+
+/*
+ * Start an 802.11h channel switch. We record the parameters,
+ * mark the operation pending, notify each vap through the
+ * beacon update mechanism so it can update the beacon frame
+ * contents, and then switch vap's to CSA state to block outbound
+ * traffic. Devices that handle CSA directly can use the state
+ * switch to do the right thing so long as they call
+ * ieee80211_csa_completeswitch when it's time to complete the
+ * channel change. Devices that depend on the net80211 layer can
+ * use ieee80211_beacon_update to handle the countdown and the
+ * channel switch.
+ */
+void
+ieee80211_csa_startswitch(struct ieee80211com *ic,
+ struct ieee80211_channel *c, int mode, int count)
+{
+ struct ieee80211vap *vap;
+
+ IEEE80211_LOCK_ASSERT(ic);
+
+ ic->ic_csa_newchan = c;
+ ic->ic_csa_mode = mode;
+ ic->ic_csa_count = count;
+ ic->ic_flags |= IEEE80211_F_CSAPENDING;
+ TAILQ_FOREACH(vap, &ic->ic_vaps, iv_next) {
+ if (vap->iv_opmode == IEEE80211_M_HOSTAP ||
+ vap->iv_opmode == IEEE80211_M_IBSS ||
+ vap->iv_opmode == IEEE80211_M_MBSS)
+ ieee80211_beacon_notify(vap, IEEE80211_BEACON_CSA);
+ /* switch to CSA state to block outbound traffic */
+ if (vap->iv_state == IEEE80211_S_RUN)
+ ieee80211_new_state_locked(vap, IEEE80211_S_CSA, 0);
+ }
+ ieee80211_notify_csa(ic, c, mode, count);
+}
+
+static void
+csa_completeswitch(struct ieee80211com *ic)
+{
+ struct ieee80211vap *vap;
+
+ ic->ic_csa_newchan = NULL;
+ ic->ic_flags &= ~IEEE80211_F_CSAPENDING;
+
+ TAILQ_FOREACH(vap, &ic->ic_vaps, iv_next)
+ if (vap->iv_state == IEEE80211_S_CSA)
+ ieee80211_new_state_locked(vap, IEEE80211_S_RUN, 0);
+}
+
+/*
+ * Complete an 802.11h channel switch started by ieee80211_csa_startswitch.
+ * We clear state and move all vap's in CSA state to RUN state
+ * so they can again transmit.
+ */
+void
+ieee80211_csa_completeswitch(struct ieee80211com *ic)
+{
+ IEEE80211_LOCK_ASSERT(ic);
+
+ KASSERT(ic->ic_flags & IEEE80211_F_CSAPENDING, ("csa not pending"));
+
+ ieee80211_setcurchan(ic, ic->ic_csa_newchan);
+ csa_completeswitch(ic);
+}
+
+/*
+ * Cancel an 802.11h channel switch started by ieee80211_csa_startswitch.
+ * We clear state and move all vap's in CSA state to RUN state
+ * so they can again transmit.
+ */
+void
+ieee80211_csa_cancelswitch(struct ieee80211com *ic)
+{
+ IEEE80211_LOCK_ASSERT(ic);
+
+ csa_completeswitch(ic);
+}
+
+/*
+ * Complete a DFS CAC started by ieee80211_dfs_cac_start.
+ * We clear state and move all vap's in CAC state to RUN state.
+ */
+void
+ieee80211_cac_completeswitch(struct ieee80211vap *vap0)
+{
+ struct ieee80211com *ic = vap0->iv_ic;
+ struct ieee80211vap *vap;
+
+ IEEE80211_LOCK(ic);
+ /*
+ * Complete CAC state change for lead vap first; then
+ * clock all the other vap's waiting.
+ */
+ KASSERT(vap0->iv_state == IEEE80211_S_CAC,
+ ("wrong state %d", vap0->iv_state));
+ ieee80211_new_state_locked(vap0, IEEE80211_S_RUN, 0);
+
+ TAILQ_FOREACH(vap, &ic->ic_vaps, iv_next)
+ if (vap->iv_state == IEEE80211_S_CAC)
+ ieee80211_new_state_locked(vap, IEEE80211_S_RUN, 0);
+ IEEE80211_UNLOCK(ic);
+}
+
+/*
+ * Force all vap's other than the specified vap to the INIT state
+ * and mark them as waiting for a scan to complete. These vaps
+ * will be brought up when the scan completes and the scanning vap
+ * reaches RUN state by wakeupwaiting.
+ */
+static void
+markwaiting(struct ieee80211vap *vap0)
+{
+ struct ieee80211com *ic = vap0->iv_ic;
+ struct ieee80211vap *vap;
+
+ IEEE80211_LOCK_ASSERT(ic);
+
+ /*
+ * A vap list entry can not disappear since we are running on the
+ * taskqueue and a vap destroy will queue and drain another state
+ * change task.
+ */
+ TAILQ_FOREACH(vap, &ic->ic_vaps, iv_next) {
+ if (vap == vap0)
+ continue;
+ if (vap->iv_state != IEEE80211_S_INIT) {
+ /* NB: iv_newstate may drop the lock */
+ vap->iv_newstate(vap, IEEE80211_S_INIT, 0);
+ vap->iv_flags_ext |= IEEE80211_FEXT_SCANWAIT;
+ }
+ }
+}
+
+/*
+ * Wakeup all vap's waiting for a scan to complete. This is the
+ * companion to markwaiting (above) and is used to coordinate
+ * multiple vaps scanning.
+ * This is called from the state taskqueue.
+ */
+static void
+wakeupwaiting(struct ieee80211vap *vap0)
+{
+ struct ieee80211com *ic = vap0->iv_ic;
+ struct ieee80211vap *vap;
+
+ IEEE80211_LOCK_ASSERT(ic);
+
+ /*
+ * A vap list entry can not disappear since we are running on the
+ * taskqueue and a vap destroy will queue and drain another state
+ * change task.
+ */
+ TAILQ_FOREACH(vap, &ic->ic_vaps, iv_next) {
+ if (vap == vap0)
+ continue;
+ if (vap->iv_flags_ext & IEEE80211_FEXT_SCANWAIT) {
+ vap->iv_flags_ext &= ~IEEE80211_FEXT_SCANWAIT;
+ /* NB: sta's cannot go INIT->RUN */
+ /* NB: iv_newstate may drop the lock */
+ vap->iv_newstate(vap,
+ vap->iv_opmode == IEEE80211_M_STA ?
+ IEEE80211_S_SCAN : IEEE80211_S_RUN, 0);
+ }
+ }
+}
+
+/*
+ * Handle post state change work common to all operating modes.
+ */
+static void
+ieee80211_newstate_cb(void *xvap, int npending)
+{
+ struct ieee80211vap *vap = xvap;
+ struct ieee80211com *ic = vap->iv_ic;
+ enum ieee80211_state nstate, ostate;
+ int arg, rc;
+
+ IEEE80211_LOCK(ic);
+ nstate = vap->iv_nstate;
+ arg = vap->iv_nstate_arg;
+
+ if (vap->iv_flags_ext & IEEE80211_FEXT_REINIT) {
+ /*
+ * We have been requested to drop back to the INIT before
+ * proceeding to the new state.
+ */
+ IEEE80211_DPRINTF(vap, IEEE80211_MSG_STATE,
+ "%s: %s -> %s arg %d\n", __func__,
+ ieee80211_state_name[vap->iv_state],
+ ieee80211_state_name[IEEE80211_S_INIT], arg);
+ vap->iv_newstate(vap, IEEE80211_S_INIT, arg);
+ vap->iv_flags_ext &= ~IEEE80211_FEXT_REINIT;
+ }
+
+ ostate = vap->iv_state;
+ if (nstate == IEEE80211_S_SCAN && ostate != IEEE80211_S_INIT) {
+ /*
+ * SCAN was forced; e.g. on beacon miss. Force other running
+ * vap's to INIT state and mark them as waiting for the scan to
+ * complete. This insures they don't interfere with our
+ * scanning. Since we are single threaded the vaps can not
+ * transition again while we are executing.
+ *
+ * XXX not always right, assumes ap follows sta
+ */
+ markwaiting(vap);
+ }
+ IEEE80211_DPRINTF(vap, IEEE80211_MSG_STATE,
+ "%s: %s -> %s arg %d\n", __func__,
+ ieee80211_state_name[ostate], ieee80211_state_name[nstate], arg);
+
+ rc = vap->iv_newstate(vap, nstate, arg);
+ vap->iv_flags_ext &= ~IEEE80211_FEXT_STATEWAIT;
+ if (rc != 0) {
+ /* State transition failed */
+ KASSERT(rc != EINPROGRESS, ("iv_newstate was deferred"));
+ KASSERT(nstate != IEEE80211_S_INIT,
+ ("INIT state change failed"));
+ IEEE80211_DPRINTF(vap, IEEE80211_MSG_STATE,
+ "%s: %s returned error %d\n", __func__,
+ ieee80211_state_name[nstate], rc);
+ goto done;
+ }
+
+ /* No actual transition, skip post processing */
+ if (ostate == nstate)
+ goto done;
+
+ if (nstate == IEEE80211_S_RUN) {
+ /*
+ * OACTIVE may be set on the vap if the upper layer
+ * tried to transmit (e.g. IPv6 NDP) before we reach
+ * RUN state. Clear it and restart xmit.
+ *
+ * Note this can also happen as a result of SLEEP->RUN
+ * (i.e. coming out of power save mode).
+ */
+ vap->iv_ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
+ if_start(vap->iv_ifp);
+
+ /* bring up any vaps waiting on us */
+ wakeupwaiting(vap);
+ } else if (nstate == IEEE80211_S_INIT) {
+ /*
+ * Flush the scan cache if we did the last scan (XXX?)
+ * and flush any frames on send queues from this vap.
+ * Note the mgt q is used only for legacy drivers and
+ * will go away shortly.
+ */
+ ieee80211_scan_flush(vap);
+
+ /* XXX NB: cast for altq */
+ ieee80211_flush_ifq((struct ifqueue *)&ic->ic_ifp->if_snd, vap);
+ }
+done:
+ IEEE80211_UNLOCK(ic);
+}
+
+/*
+ * Public interface for initiating a state machine change.
+ * This routine single-threads the request and coordinates
+ * the scheduling of multiple vaps for the purpose of selecting
+ * an operating channel. Specifically the following scenarios
+ * are handled:
+ * o only one vap can be selecting a channel so on transition to
+ * SCAN state if another vap is already scanning then
+ * mark the caller for later processing and return without
+ * doing anything (XXX? expectations by caller of synchronous operation)
+ * o only one vap can be doing CAC of a channel so on transition to
+ * CAC state if another vap is already scanning for radar then
+ * mark the caller for later processing and return without
+ * doing anything (XXX? expectations by caller of synchronous operation)
+ * o if another vap is already running when a request is made
+ * to SCAN then an operating channel has been chosen; bypass
+ * the scan and just join the channel
+ *
+ * Note that the state change call is done through the iv_newstate
+ * method pointer so any driver routine gets invoked. The driver
+ * will normally call back into operating mode-specific
+ * ieee80211_newstate routines (below) unless it needs to completely
+ * bypass the state machine (e.g. because the firmware has it's
+ * own idea how things should work). Bypassing the net80211 layer
+ * is usually a mistake and indicates lack of proper integration
+ * with the net80211 layer.
+ */
+static int
+ieee80211_new_state_locked(struct ieee80211vap *vap,
+ enum ieee80211_state nstate, int arg)
+{
+ struct ieee80211com *ic = vap->iv_ic;
+ struct ieee80211vap *vp;
+ enum ieee80211_state ostate;
+ int nrunning, nscanning;
+
+ IEEE80211_LOCK_ASSERT(ic);
+
+ if (vap->iv_flags_ext & IEEE80211_FEXT_STATEWAIT) {
+ if (vap->iv_nstate == IEEE80211_S_INIT) {
+ /*
+ * XXX The vap is being stopped, do no allow any other
+ * state changes until this is completed.
+ */
+ return -1;
+ } else if (vap->iv_state != vap->iv_nstate) {
+#if 0
+ /* Warn if the previous state hasn't completed. */
+ IEEE80211_DPRINTF(vap, IEEE80211_MSG_STATE,
+ "%s: pending %s -> %s transition lost\n", __func__,
+ ieee80211_state_name[vap->iv_state],
+ ieee80211_state_name[vap->iv_nstate]);
+#else
+ /* XXX temporarily enable to identify issues */
+ if_printf(vap->iv_ifp,
+ "%s: pending %s -> %s transition lost\n",
+ __func__, ieee80211_state_name[vap->iv_state],
+ ieee80211_state_name[vap->iv_nstate]);
+#endif
+ }
+ }
+
+ nrunning = nscanning = 0;
+ /* XXX can track this state instead of calculating */
+ TAILQ_FOREACH(vp, &ic->ic_vaps, iv_next) {
+ if (vp != vap) {
+ if (vp->iv_state >= IEEE80211_S_RUN)
+ nrunning++;
+ /* XXX doesn't handle bg scan */
+ /* NB: CAC+AUTH+ASSOC treated like SCAN */
+ else if (vp->iv_state > IEEE80211_S_INIT)
+ nscanning++;
+ }
+ }
+ ostate = vap->iv_state;
+ IEEE80211_DPRINTF(vap, IEEE80211_MSG_STATE,
+ "%s: %s -> %s (nrunning %d nscanning %d)\n", __func__,
+ ieee80211_state_name[ostate], ieee80211_state_name[nstate],
+ nrunning, nscanning);
+ switch (nstate) {
+ case IEEE80211_S_SCAN:
+ if (ostate == IEEE80211_S_INIT) {
+ /*
+ * INIT -> SCAN happens on initial bringup.
+ */
+ KASSERT(!(nscanning && nrunning),
+ ("%d scanning and %d running", nscanning, nrunning));
+ if (nscanning) {
+ /*
+ * Someone is scanning, defer our state
+ * change until the work has completed.
+ */
+ IEEE80211_DPRINTF(vap, IEEE80211_MSG_STATE,
+ "%s: defer %s -> %s\n",
+ __func__, ieee80211_state_name[ostate],
+ ieee80211_state_name[nstate]);
+ vap->iv_flags_ext |= IEEE80211_FEXT_SCANWAIT;
+ return 0;
+ }
+ if (nrunning) {
+ /*
+ * Someone is operating; just join the channel
+ * they have chosen.
+ */
+ /* XXX kill arg? */
+ /* XXX check each opmode, adhoc? */
+ if (vap->iv_opmode == IEEE80211_M_STA)
+ nstate = IEEE80211_S_SCAN;
+ else
+ nstate = IEEE80211_S_RUN;
+#ifdef IEEE80211_DEBUG
+ if (nstate != IEEE80211_S_SCAN) {
+ IEEE80211_DPRINTF(vap,
+ IEEE80211_MSG_STATE,
+ "%s: override, now %s -> %s\n",
+ __func__,
+ ieee80211_state_name[ostate],
+ ieee80211_state_name[nstate]);
+ }
+#endif
+ }
+ }
+ break;
+ case IEEE80211_S_RUN:
+ if (vap->iv_opmode == IEEE80211_M_WDS &&
+ (vap->iv_flags_ext & IEEE80211_FEXT_WDSLEGACY) &&
+ nscanning) {
+ /*
+ * Legacy WDS with someone else scanning; don't
+ * go online until that completes as we should
+ * follow the other vap to the channel they choose.
+ */
+ IEEE80211_DPRINTF(vap, IEEE80211_MSG_STATE,
+ "%s: defer %s -> %s (legacy WDS)\n", __func__,
+ ieee80211_state_name[ostate],
+ ieee80211_state_name[nstate]);
+ vap->iv_flags_ext |= IEEE80211_FEXT_SCANWAIT;
+ return 0;
+ }
+ if (vap->iv_opmode == IEEE80211_M_HOSTAP &&
+ IEEE80211_IS_CHAN_DFS(ic->ic_bsschan) &&
+ (vap->iv_flags_ext & IEEE80211_FEXT_DFS) &&
+ !IEEE80211_IS_CHAN_CACDONE(ic->ic_bsschan)) {
+ /*
+ * This is a DFS channel, transition to CAC state
+ * instead of RUN. This allows us to initiate
+ * Channel Availability Check (CAC) as specified
+ * by 11h/DFS.
+ */
+ nstate = IEEE80211_S_CAC;
+ IEEE80211_DPRINTF(vap, IEEE80211_MSG_STATE,
+ "%s: override %s -> %s (DFS)\n", __func__,
+ ieee80211_state_name[ostate],
+ ieee80211_state_name[nstate]);
+ }
+ break;
+ case IEEE80211_S_INIT:
+ /* cancel any scan in progress */
+ ieee80211_cancel_scan(vap);
+ if (ostate == IEEE80211_S_INIT ) {
+ /* XXX don't believe this */
+ /* INIT -> INIT. nothing to do */
+ vap->iv_flags_ext &= ~IEEE80211_FEXT_SCANWAIT;
+ }
+ /* fall thru... */
+ default:
+ break;
+ }
+ /* defer the state change to a thread */
+ vap->iv_nstate = nstate;
+ vap->iv_nstate_arg = arg;
+ vap->iv_flags_ext |= IEEE80211_FEXT_STATEWAIT;
+ ieee80211_runtask(ic, &vap->iv_nstate_task);
+ return EINPROGRESS;
+}
+
+int
+ieee80211_new_state(struct ieee80211vap *vap,
+ enum ieee80211_state nstate, int arg)
+{
+ struct ieee80211com *ic = vap->iv_ic;
+ int rc;
+
+ IEEE80211_LOCK(ic);
+ rc = ieee80211_new_state_locked(vap, nstate, arg);
+ IEEE80211_UNLOCK(ic);
+ return rc;
+}
diff --git a/rtems/freebsd/net80211/ieee80211_proto.h b/rtems/freebsd/net80211/ieee80211_proto.h
new file mode 100644
index 00000000..f81a6433
--- /dev/null
+++ b/rtems/freebsd/net80211/ieee80211_proto.h
@@ -0,0 +1,387 @@
+/*-
+ * Copyright (c) 2001 Atsushi Onoe
+ * Copyright (c) 2002-2009 Sam Leffler, Errno Consulting
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+#ifndef _NET80211_IEEE80211_PROTO_HH_
+#define _NET80211_IEEE80211_PROTO_HH_
+
+/*
+ * 802.11 protocol implementation definitions.
+ */
+
+enum ieee80211_state {
+ IEEE80211_S_INIT = 0, /* default state */
+ IEEE80211_S_SCAN = 1, /* scanning */
+ IEEE80211_S_AUTH = 2, /* try to authenticate */
+ IEEE80211_S_ASSOC = 3, /* try to assoc */
+ IEEE80211_S_CAC = 4, /* doing channel availability check */
+ IEEE80211_S_RUN = 5, /* operational (e.g. associated) */
+ IEEE80211_S_CSA = 6, /* channel switch announce pending */
+ IEEE80211_S_SLEEP = 7, /* power save */
+};
+#define IEEE80211_S_MAX (IEEE80211_S_SLEEP+1)
+
+#define IEEE80211_SEND_MGMT(_ni,_type,_arg) \
+ ((*(_ni)->ni_ic->ic_send_mgmt)(_ni, _type, _arg))
+
+extern const char *ieee80211_mgt_subtype_name[];
+extern const char *ieee80211_phymode_name[IEEE80211_MODE_MAX];
+extern const int ieee80211_opcap[IEEE80211_OPMODE_MAX];
+
+void ieee80211_proto_attach(struct ieee80211com *);
+void ieee80211_proto_detach(struct ieee80211com *);
+void ieee80211_proto_vattach(struct ieee80211vap *);
+void ieee80211_proto_vdetach(struct ieee80211vap *);
+
+void ieee80211_syncifflag_locked(struct ieee80211com *, int flag);
+void ieee80211_syncflag(struct ieee80211vap *, int flag);
+void ieee80211_syncflag_ht(struct ieee80211vap *, int flag);
+void ieee80211_syncflag_ext(struct ieee80211vap *, int flag);
+
+#define ieee80211_input(ni, m, rssi, nf) \
+ ((ni)->ni_vap->iv_input(ni, m, rssi, nf))
+int ieee80211_input_all(struct ieee80211com *, struct mbuf *, int, int);
+struct ieee80211_bpf_params;
+int ieee80211_mgmt_output(struct ieee80211_node *, struct mbuf *, int,
+ struct ieee80211_bpf_params *);
+int ieee80211_raw_xmit(struct ieee80211_node *, struct mbuf *,
+ const struct ieee80211_bpf_params *);
+int ieee80211_output(struct ifnet *, struct mbuf *,
+ struct sockaddr *, struct route *ro);
+void ieee80211_send_setup(struct ieee80211_node *, struct mbuf *, int, int,
+ const uint8_t [IEEE80211_ADDR_LEN], const uint8_t [IEEE80211_ADDR_LEN],
+ const uint8_t [IEEE80211_ADDR_LEN]);
+void ieee80211_start(struct ifnet *);
+int ieee80211_send_nulldata(struct ieee80211_node *);
+int ieee80211_classify(struct ieee80211_node *, struct mbuf *m);
+struct mbuf *ieee80211_mbuf_adjust(struct ieee80211vap *, int,
+ struct ieee80211_key *, struct mbuf *);
+struct mbuf *ieee80211_encap(struct ieee80211vap *, struct ieee80211_node *,
+ struct mbuf *);
+int ieee80211_send_mgmt(struct ieee80211_node *, int, int);
+struct ieee80211_appie;
+int ieee80211_send_probereq(struct ieee80211_node *ni,
+ const uint8_t sa[IEEE80211_ADDR_LEN],
+ const uint8_t da[IEEE80211_ADDR_LEN],
+ const uint8_t bssid[IEEE80211_ADDR_LEN],
+ const uint8_t *ssid, size_t ssidlen);
+/*
+ * The formation of ProbeResponse frames requires guidance to
+ * deal with legacy clients. When the client is identified as
+ * "legacy 11b" ieee80211_send_proberesp is passed this token.
+ */
+#define IEEE80211_SEND_LEGACY_11B 0x1 /* legacy 11b client */
+#define IEEE80211_SEND_LEGACY_11 0x2 /* other legacy client */
+#define IEEE80211_SEND_LEGACY 0x3 /* any legacy client */
+struct mbuf *ieee80211_alloc_proberesp(struct ieee80211_node *, int);
+int ieee80211_send_proberesp(struct ieee80211vap *,
+ const uint8_t da[IEEE80211_ADDR_LEN], int);
+struct mbuf *ieee80211_alloc_rts(struct ieee80211com *ic,
+ const uint8_t [IEEE80211_ADDR_LEN],
+ const uint8_t [IEEE80211_ADDR_LEN], uint16_t);
+struct mbuf *ieee80211_alloc_cts(struct ieee80211com *,
+ const uint8_t [IEEE80211_ADDR_LEN], uint16_t);
+
+uint8_t *ieee80211_add_rates(uint8_t *, const struct ieee80211_rateset *);
+uint8_t *ieee80211_add_xrates(uint8_t *, const struct ieee80211_rateset *);
+uint16_t ieee80211_getcapinfo(struct ieee80211vap *,
+ struct ieee80211_channel *);
+
+void ieee80211_reset_erp(struct ieee80211com *);
+void ieee80211_set_shortslottime(struct ieee80211com *, int onoff);
+int ieee80211_iserp_rateset(const struct ieee80211_rateset *);
+void ieee80211_setbasicrates(struct ieee80211_rateset *,
+ enum ieee80211_phymode);
+void ieee80211_addbasicrates(struct ieee80211_rateset *,
+ enum ieee80211_phymode);
+
+/*
+ * Return the size of the 802.11 header for a management or data frame.
+ */
+static __inline int
+ieee80211_hdrsize(const void *data)
+{
+ const struct ieee80211_frame *wh = data;
+ int size = sizeof(struct ieee80211_frame);
+
+ /* NB: we don't handle control frames */
+ KASSERT((wh->i_fc[0]&IEEE80211_FC0_TYPE_MASK) != IEEE80211_FC0_TYPE_CTL,
+ ("%s: control frame", __func__));
+ if (IEEE80211_IS_DSTODS(wh))
+ size += IEEE80211_ADDR_LEN;
+ if (IEEE80211_QOS_HAS_SEQ(wh))
+ size += sizeof(uint16_t);
+ return size;
+}
+
+/*
+ * Like ieee80211_hdrsize, but handles any type of frame.
+ */
+static __inline int
+ieee80211_anyhdrsize(const void *data)
+{
+ const struct ieee80211_frame *wh = data;
+
+ if ((wh->i_fc[0]&IEEE80211_FC0_TYPE_MASK) == IEEE80211_FC0_TYPE_CTL) {
+ switch (wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK) {
+ case IEEE80211_FC0_SUBTYPE_CTS:
+ case IEEE80211_FC0_SUBTYPE_ACK:
+ return sizeof(struct ieee80211_frame_ack);
+ case IEEE80211_FC0_SUBTYPE_BAR:
+ return sizeof(struct ieee80211_frame_bar);
+ }
+ return sizeof(struct ieee80211_frame_min);
+ } else
+ return ieee80211_hdrsize(data);
+}
+
+/*
+ * Template for an in-kernel authenticator. Authenticators
+ * register with the protocol code and are typically loaded
+ * as separate modules as needed. One special authenticator
+ * is xauth; it intercepts requests so that protocols like
+ * WPA can be handled in user space.
+ */
+struct ieee80211_authenticator {
+ const char *ia_name; /* printable name */
+ int (*ia_attach)(struct ieee80211vap *);
+ void (*ia_detach)(struct ieee80211vap *);
+ void (*ia_node_join)(struct ieee80211_node *);
+ void (*ia_node_leave)(struct ieee80211_node *);
+};
+void ieee80211_authenticator_register(int type,
+ const struct ieee80211_authenticator *);
+void ieee80211_authenticator_unregister(int type);
+const struct ieee80211_authenticator *ieee80211_authenticator_get(int auth);
+
+struct ieee80211req;
+/*
+ * Template for an MAC ACL policy module. Such modules
+ * register with the protocol code and are passed the sender's
+ * address of each received auth frame for validation.
+ */
+struct ieee80211_aclator {
+ const char *iac_name; /* printable name */
+ int (*iac_attach)(struct ieee80211vap *);
+ void (*iac_detach)(struct ieee80211vap *);
+ int (*iac_check)(struct ieee80211vap *,
+ const uint8_t mac[IEEE80211_ADDR_LEN]);
+ int (*iac_add)(struct ieee80211vap *,
+ const uint8_t mac[IEEE80211_ADDR_LEN]);
+ int (*iac_remove)(struct ieee80211vap *,
+ const uint8_t mac[IEEE80211_ADDR_LEN]);
+ int (*iac_flush)(struct ieee80211vap *);
+ int (*iac_setpolicy)(struct ieee80211vap *, int);
+ int (*iac_getpolicy)(struct ieee80211vap *);
+ int (*iac_setioctl)(struct ieee80211vap *, struct ieee80211req *);
+ int (*iac_getioctl)(struct ieee80211vap *, struct ieee80211req *);
+};
+void ieee80211_aclator_register(const struct ieee80211_aclator *);
+void ieee80211_aclator_unregister(const struct ieee80211_aclator *);
+const struct ieee80211_aclator *ieee80211_aclator_get(const char *name);
+
+/* flags for ieee80211_fix_rate() */
+#define IEEE80211_F_DOSORT 0x00000001 /* sort rate list */
+#define IEEE80211_F_DOFRATE 0x00000002 /* use fixed legacy rate */
+#define IEEE80211_F_DONEGO 0x00000004 /* calc negotiated rate */
+#define IEEE80211_F_DODEL 0x00000008 /* delete ignore rate */
+#define IEEE80211_F_DOBRS 0x00000010 /* check basic rate set */
+#define IEEE80211_F_JOIN 0x00000020 /* sta joining our bss */
+#define IEEE80211_F_DOFMCS 0x00000040 /* use fixed HT rate */
+int ieee80211_fix_rate(struct ieee80211_node *,
+ struct ieee80211_rateset *, int);
+
+/*
+ * WME/WMM support.
+ */
+struct wmeParams {
+ uint8_t wmep_acm;
+ uint8_t wmep_aifsn;
+ uint8_t wmep_logcwmin; /* log2(cwmin) */
+ uint8_t wmep_logcwmax; /* log2(cwmax) */
+ uint8_t wmep_txopLimit;
+ uint8_t wmep_noackPolicy; /* 0 (ack), 1 (no ack) */
+};
+#define IEEE80211_TXOP_TO_US(_txop) ((_txop)<<5)
+#define IEEE80211_US_TO_TXOP(_us) ((_us)>>5)
+
+struct chanAccParams {
+ uint8_t cap_info; /* version of the current set */
+ struct wmeParams cap_wmeParams[WME_NUM_AC];
+};
+
+struct ieee80211_wme_state {
+ u_int wme_flags;
+#define WME_F_AGGRMODE 0x00000001 /* STATUS: WME agressive mode */
+ u_int wme_hipri_traffic; /* VI/VO frames in beacon interval */
+ u_int wme_hipri_switch_thresh;/* agressive mode switch thresh */
+ u_int wme_hipri_switch_hysteresis;/* agressive mode switch hysteresis */
+
+ struct wmeParams wme_params[4]; /* from assoc resp for each AC*/
+ struct chanAccParams wme_wmeChanParams; /* WME params applied to self */
+ struct chanAccParams wme_wmeBssChanParams;/* WME params bcast to stations */
+ struct chanAccParams wme_chanParams; /* params applied to self */
+ struct chanAccParams wme_bssChanParams; /* params bcast to stations */
+
+ int (*wme_update)(struct ieee80211com *);
+};
+
+void ieee80211_wme_initparams(struct ieee80211vap *);
+void ieee80211_wme_updateparams(struct ieee80211vap *);
+void ieee80211_wme_updateparams_locked(struct ieee80211vap *);
+
+/*
+ * Return the WME TID from a QoS frame. If no TID
+ * is present return the index for the "non-QoS" entry.
+ */
+static __inline uint8_t
+ieee80211_gettid(const struct ieee80211_frame *wh)
+{
+ uint8_t tid;
+
+ if (IEEE80211_QOS_HAS_SEQ(wh)) {
+ if (IEEE80211_IS_DSTODS(wh))
+ tid = ((const struct ieee80211_qosframe_addr4 *)wh)->
+ i_qos[0];
+ else
+ tid = ((const struct ieee80211_qosframe *)wh)->i_qos[0];
+ tid &= IEEE80211_QOS_TID;
+ } else
+ tid = IEEE80211_NONQOS_TID;
+ return tid;
+}
+
+void ieee80211_waitfor_parent(struct ieee80211com *);
+void ieee80211_start_locked(struct ieee80211vap *);
+void ieee80211_init(void *);
+void ieee80211_start_all(struct ieee80211com *);
+void ieee80211_stop_locked(struct ieee80211vap *);
+void ieee80211_stop(struct ieee80211vap *);
+void ieee80211_stop_all(struct ieee80211com *);
+void ieee80211_suspend_all(struct ieee80211com *);
+void ieee80211_resume_all(struct ieee80211com *);
+void ieee80211_dturbo_switch(struct ieee80211vap *, int newflags);
+void ieee80211_swbmiss(void *arg);
+void ieee80211_beacon_miss(struct ieee80211com *);
+int ieee80211_new_state(struct ieee80211vap *, enum ieee80211_state, int);
+void ieee80211_print_essid(const uint8_t *, int);
+void ieee80211_dump_pkt(struct ieee80211com *,
+ const uint8_t *, int, int, int);
+
+extern const char *ieee80211_opmode_name[];
+extern const char *ieee80211_state_name[IEEE80211_S_MAX];
+extern const char *ieee80211_wme_acnames[];
+
+/*
+ * Beacon frames constructed by ieee80211_beacon_alloc
+ * have the following structure filled in so drivers
+ * can update the frame later w/ minimal overhead.
+ */
+struct ieee80211_beacon_offsets {
+ uint8_t bo_flags[4]; /* update/state flags */
+ uint16_t *bo_caps; /* capabilities */
+ uint8_t *bo_cfp; /* start of CFParms element */
+ uint8_t *bo_tim; /* start of atim/dtim */
+ uint8_t *bo_wme; /* start of WME parameters */
+ uint8_t *bo_tdma; /* start of TDMA parameters */
+ uint8_t *bo_tim_trailer;/* start of fixed-size trailer */
+ uint16_t bo_tim_len; /* atim/dtim length in bytes */
+ uint16_t bo_tim_trailer_len;/* tim trailer length in bytes */
+ uint8_t *bo_erp; /* start of ERP element */
+ uint8_t *bo_htinfo; /* start of HT info element */
+ uint8_t *bo_ath; /* start of ATH parameters */
+ uint8_t *bo_appie; /* start of AppIE element */
+ uint16_t bo_appie_len; /* AppIE length in bytes */
+ uint16_t bo_csa_trailer_len;
+ uint8_t *bo_csa; /* start of CSA element */
+ uint8_t *bo_meshconf; /* start of MESHCONF element */
+ uint8_t *bo_spare[3];
+};
+struct mbuf *ieee80211_beacon_alloc(struct ieee80211_node *,
+ struct ieee80211_beacon_offsets *);
+
+/*
+ * Beacon frame updates are signaled through calls to iv_update_beacon
+ * with one of the IEEE80211_BEACON_* tokens defined below. For devices
+ * that construct beacon frames on the host this can trigger a rebuild
+ * or defer the processing. For devices that offload beacon frame
+ * handling this callback can be used to signal a rebuild. The bo_flags
+ * array in the ieee80211_beacon_offsets structure is intended to record
+ * deferred processing requirements; ieee80211_beacon_update uses the
+ * state to optimize work. Since this structure is owned by the driver
+ * and not visible to the 802.11 layer drivers must supply an iv_update_beacon
+ * callback that marks the flag bits and schedules (as necessary) an update.
+ */
+enum {
+ IEEE80211_BEACON_CAPS = 0, /* capabilities */
+ IEEE80211_BEACON_TIM = 1, /* DTIM/ATIM */
+ IEEE80211_BEACON_WME = 2,
+ IEEE80211_BEACON_ERP = 3, /* Extended Rate Phy */
+ IEEE80211_BEACON_HTINFO = 4, /* HT Information */
+ IEEE80211_BEACON_APPIE = 5, /* Application IE's */
+ IEEE80211_BEACON_CFP = 6, /* CFParms */
+ IEEE80211_BEACON_CSA = 7, /* Channel Switch Announcement */
+ IEEE80211_BEACON_TDMA = 9, /* TDMA Info */
+ IEEE80211_BEACON_ATH = 10, /* ATH parameters */
+ IEEE80211_BEACON_MESHCONF = 11, /* Mesh Configuration */
+};
+int ieee80211_beacon_update(struct ieee80211_node *,
+ struct ieee80211_beacon_offsets *, struct mbuf *, int mcast);
+
+void ieee80211_csa_startswitch(struct ieee80211com *,
+ struct ieee80211_channel *, int mode, int count);
+void ieee80211_csa_completeswitch(struct ieee80211com *);
+void ieee80211_csa_cancelswitch(struct ieee80211com *);
+void ieee80211_cac_completeswitch(struct ieee80211vap *);
+
+/*
+ * Notification methods called from the 802.11 state machine.
+ * Note that while these are defined here, their implementation
+ * is OS-specific.
+ */
+void ieee80211_notify_node_join(struct ieee80211_node *, int newassoc);
+void ieee80211_notify_node_leave(struct ieee80211_node *);
+void ieee80211_notify_scan_done(struct ieee80211vap *);
+void ieee80211_notify_wds_discover(struct ieee80211_node *);
+void ieee80211_notify_csa(struct ieee80211com *,
+ const struct ieee80211_channel *, int mode, int count);
+void ieee80211_notify_radar(struct ieee80211com *,
+ const struct ieee80211_channel *);
+enum ieee80211_notify_cac_event {
+ IEEE80211_NOTIFY_CAC_START = 0, /* CAC timer started */
+ IEEE80211_NOTIFY_CAC_STOP = 1, /* CAC intentionally stopped */
+ IEEE80211_NOTIFY_CAC_RADAR = 2, /* CAC stopped due to radar detectio */
+ IEEE80211_NOTIFY_CAC_EXPIRE = 3, /* CAC expired w/o radar */
+};
+void ieee80211_notify_cac(struct ieee80211com *,
+ const struct ieee80211_channel *,
+ enum ieee80211_notify_cac_event);
+void ieee80211_notify_node_deauth(struct ieee80211_node *);
+void ieee80211_notify_node_auth(struct ieee80211_node *);
+void ieee80211_notify_country(struct ieee80211vap *, const uint8_t [],
+ const uint8_t cc[2]);
+void ieee80211_notify_radio(struct ieee80211com *, int);
+#endif /* _NET80211_IEEE80211_PROTO_HH_ */
diff --git a/rtems/freebsd/net80211/ieee80211_radiotap.c b/rtems/freebsd/net80211/ieee80211_radiotap.c
new file mode 100644
index 00000000..63106b2f
--- /dev/null
+++ b/rtems/freebsd/net80211/ieee80211_radiotap.c
@@ -0,0 +1,357 @@
+#include <rtems/freebsd/machine/rtems-bsd-config.h>
+
+/*-
+ * Copyright (c) 2009 Sam Leffler, Errno Consulting
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <rtems/freebsd/sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+/*
+ * IEEE 802.11 radiotap support.
+ */
+#include <rtems/freebsd/local/opt_wlan.h>
+
+#include <rtems/freebsd/sys/param.h>
+#include <rtems/freebsd/sys/systm.h>
+#include <rtems/freebsd/sys/mbuf.h>
+#include <rtems/freebsd/sys/malloc.h>
+#include <rtems/freebsd/sys/endian.h>
+#include <rtems/freebsd/sys/kernel.h>
+
+#include <rtems/freebsd/sys/socket.h>
+
+#include <rtems/freebsd/net/bpf.h>
+#include <rtems/freebsd/net/if.h>
+#include <rtems/freebsd/net/if_llc.h>
+#include <rtems/freebsd/net/if_media.h>
+
+#include <rtems/freebsd/net80211/ieee80211_var.h>
+
+static int radiotap_offset(struct ieee80211_radiotap_header *, int);
+
+void
+ieee80211_radiotap_attach(struct ieee80211com *ic,
+ struct ieee80211_radiotap_header *th, int tlen, uint32_t tx_radiotap,
+ struct ieee80211_radiotap_header *rh, int rlen, uint32_t rx_radiotap)
+{
+#define B(_v) (1<<(_v))
+ int off;
+
+ th->it_len = htole16(roundup2(tlen, sizeof(uint32_t)));
+ th->it_present = htole32(tx_radiotap);
+ ic->ic_th = th;
+ /* calculate offset to channel data */
+ off = -1;
+ if (tx_radiotap & B(IEEE80211_RADIOTAP_CHANNEL))
+ off = radiotap_offset(th, IEEE80211_RADIOTAP_CHANNEL);
+ else if (tx_radiotap & B(IEEE80211_RADIOTAP_XCHANNEL))
+ off = radiotap_offset(th, IEEE80211_RADIOTAP_XCHANNEL);
+ if (off == -1) {
+ if_printf(ic->ic_ifp, "%s: no tx channel, radiotap 0x%x",
+ __func__, tx_radiotap);
+ /* NB: we handle this case but data will have no chan spec */
+ } else
+ ic->ic_txchan = ((uint8_t *) th) + off;
+
+ rh->it_len = htole16(roundup2(rlen, sizeof(uint32_t)));
+ rh->it_present = htole32(rx_radiotap);
+ ic->ic_rh = rh;
+ /* calculate offset to channel data */
+ off = -1;
+ if (rx_radiotap & B(IEEE80211_RADIOTAP_CHANNEL))
+ off = radiotap_offset(rh, IEEE80211_RADIOTAP_CHANNEL);
+ else if (rx_radiotap & B(IEEE80211_RADIOTAP_XCHANNEL))
+ off = radiotap_offset(rh, IEEE80211_RADIOTAP_XCHANNEL);
+ if (off == -1) {
+ if_printf(ic->ic_ifp, "%s: no rx channel, radiotap 0x%x",
+ __func__, rx_radiotap);
+ /* NB: we handle this case but data will have no chan spec */
+ } else
+ ic->ic_rxchan = ((uint8_t *) rh) + off;
+#undef B
+}
+
+void
+ieee80211_radiotap_detach(struct ieee80211com *ic)
+{
+}
+
+void
+ieee80211_radiotap_vattach(struct ieee80211vap *vap)
+{
+ struct ieee80211com *ic = vap->iv_ic;
+ struct ieee80211_radiotap_header *th = ic->ic_th;
+
+ if (th != NULL && ic->ic_rh != NULL) {
+ /* radiotap DLT for raw 802.11 frames */
+ bpfattach2(vap->iv_ifp, DLT_IEEE802_11_RADIO,
+ sizeof(struct ieee80211_frame) + le16toh(th->it_len),
+ &vap->iv_rawbpf);
+ }
+}
+
+void
+ieee80211_radiotap_vdetach(struct ieee80211vap *vap)
+{
+ /* NB: bpfattach is called by ether_ifdetach and claims all taps */
+}
+
+static void
+set_channel(void *p, const struct ieee80211_channel *c)
+{
+ struct {
+ uint16_t freq;
+ uint16_t flags;
+ } *rc = p;
+
+ rc->freq = htole16(c->ic_freq);
+ rc->flags = htole16(c->ic_flags);
+}
+
+static void
+set_xchannel(void *p, const struct ieee80211_channel *c)
+{
+ struct {
+ uint32_t flags;
+ uint16_t freq;
+ uint8_t ieee;
+ uint8_t maxpow;
+ } *rc = p;
+
+ rc->flags = htole32(c->ic_flags);
+ rc->freq = htole16(c->ic_freq);
+ rc->ieee = c->ic_ieee;
+ rc->maxpow = c->ic_maxregpower;
+}
+
+/*
+ * Update radiotap state on channel change.
+ */
+void
+ieee80211_radiotap_chan_change(struct ieee80211com *ic)
+{
+ if (ic->ic_rxchan != NULL) {
+ struct ieee80211_radiotap_header *rh = ic->ic_rh;
+
+ if (rh->it_present & htole32(1<<IEEE80211_RADIOTAP_XCHANNEL))
+ set_xchannel(ic->ic_rxchan, ic->ic_curchan);
+ else if (rh->it_present & htole32(1<<IEEE80211_RADIOTAP_CHANNEL))
+ set_channel(ic->ic_rxchan, ic->ic_curchan);
+ }
+ if (ic->ic_txchan != NULL) {
+ struct ieee80211_radiotap_header *th = ic->ic_th;
+
+ if (th->it_present & htole32(1<<IEEE80211_RADIOTAP_XCHANNEL))
+ set_xchannel(ic->ic_txchan, ic->ic_curchan);
+ else if (th->it_present & htole32(1<<IEEE80211_RADIOTAP_CHANNEL))
+ set_channel(ic->ic_txchan, ic->ic_curchan);
+ }
+}
+
+/*
+ * Distribute radiotap data (+packet) to all monitor mode
+ * vaps with an active tap other than vap0.
+ */
+static void
+spam_vaps(struct ieee80211vap *vap0, struct mbuf *m,
+ struct ieee80211_radiotap_header *rh, int len)
+{
+ struct ieee80211com *ic = vap0->iv_ic;
+ struct ieee80211vap *vap;
+
+ TAILQ_FOREACH(vap, &ic->ic_vaps, iv_next) {
+ if (vap != vap0 &&
+ vap->iv_opmode == IEEE80211_M_MONITOR &&
+ (vap->iv_flags_ext & IEEE80211_FEXT_BPF) &&
+ vap->iv_state != IEEE80211_S_INIT)
+ bpf_mtap2(vap->iv_rawbpf, rh, len, m);
+ }
+}
+
+/*
+ * Dispatch radiotap data for transmitted packet.
+ */
+void
+ieee80211_radiotap_tx(struct ieee80211vap *vap0, struct mbuf *m)
+{
+ struct ieee80211com *ic = vap0->iv_ic;
+ struct ieee80211_radiotap_header *th = ic->ic_th;
+ int len;
+
+ KASSERT(th != NULL, ("no tx radiotap header"));
+ len = le16toh(th->it_len);
+
+ if (vap0->iv_flags_ext & IEEE80211_FEXT_BPF)
+ bpf_mtap2(vap0->iv_rawbpf, th, len, m);
+ /*
+ * Spam monitor mode vaps.
+ */
+ if (ic->ic_montaps != 0)
+ spam_vaps(vap0, m, th, len);
+}
+
+/*
+ * Dispatch radiotap data for received packet.
+ */
+void
+ieee80211_radiotap_rx(struct ieee80211vap *vap0, struct mbuf *m)
+{
+ struct ieee80211com *ic = vap0->iv_ic;
+ struct ieee80211_radiotap_header *rh = ic->ic_rh;
+ int len;
+
+ KASSERT(rh != NULL, ("no rx radiotap header"));
+ len = le16toh(rh->it_len);
+
+ if (vap0->iv_flags_ext & IEEE80211_FEXT_BPF)
+ bpf_mtap2(vap0->iv_rawbpf, rh, len, m);
+ /*
+ * Spam monitor mode vaps with unicast frames. Multicast
+ * frames are handled by passing through ieee80211_input_all
+ * which distributes copies to the monitor mode vaps.
+ */
+ if (ic->ic_montaps != 0 && (m->m_flags & M_BCAST) == 0)
+ spam_vaps(vap0, m, rh, len);
+}
+
+/*
+ * Dispatch radiotap data for a packet received outside the normal
+ * rx processing path; this is used, for example, to handle frames
+ * received with errors that would otherwise be dropped.
+ */
+void
+ieee80211_radiotap_rx_all(struct ieee80211com *ic, struct mbuf *m)
+{
+ struct ieee80211_radiotap_header *rh = ic->ic_rh;
+ int len = le16toh(rh->it_len);
+ struct ieee80211vap *vap;
+
+ /* XXX locking? */
+ TAILQ_FOREACH(vap, &ic->ic_vaps, iv_next) {
+ if (ieee80211_radiotap_active_vap(vap) &&
+ vap->iv_state != IEEE80211_S_INIT)
+ bpf_mtap2(vap->iv_rawbpf, rh, len, m);
+ }
+}
+
+/*
+ * Return the offset of the specified item in the radiotap
+ * header description. If the item is not present or is not
+ * known -1 is returned.
+ */
+static int
+radiotap_offset(struct ieee80211_radiotap_header *rh, int item)
+{
+ static const struct {
+ size_t align, width;
+ } items[] = {
+ [IEEE80211_RADIOTAP_TSFT] = {
+ .align = sizeof(uint64_t),
+ .width = sizeof(uint64_t),
+ },
+ [IEEE80211_RADIOTAP_FLAGS] = {
+ .align = sizeof(uint8_t),
+ .width = sizeof(uint8_t),
+ },
+ [IEEE80211_RADIOTAP_RATE] = {
+ .align = sizeof(uint8_t),
+ .width = sizeof(uint8_t),
+ },
+ [IEEE80211_RADIOTAP_CHANNEL] = {
+ .align = sizeof(uint16_t),
+ .width = 2*sizeof(uint16_t),
+ },
+ [IEEE80211_RADIOTAP_FHSS] = {
+ .align = sizeof(uint16_t),
+ .width = sizeof(uint16_t),
+ },
+ [IEEE80211_RADIOTAP_DBM_ANTSIGNAL] = {
+ .align = sizeof(uint8_t),
+ .width = sizeof(uint8_t),
+ },
+ [IEEE80211_RADIOTAP_DBM_ANTNOISE] = {
+ .align = sizeof(uint8_t),
+ .width = sizeof(uint8_t),
+ },
+ [IEEE80211_RADIOTAP_LOCK_QUALITY] = {
+ .align = sizeof(uint16_t),
+ .width = sizeof(uint16_t),
+ },
+ [IEEE80211_RADIOTAP_TX_ATTENUATION] = {
+ .align = sizeof(uint16_t),
+ .width = sizeof(uint16_t),
+ },
+ [IEEE80211_RADIOTAP_DB_TX_ATTENUATION] = {
+ .align = sizeof(uint16_t),
+ .width = sizeof(uint16_t),
+ },
+ [IEEE80211_RADIOTAP_DBM_TX_POWER] = {
+ .align = sizeof(uint8_t),
+ .width = sizeof(uint8_t),
+ },
+ [IEEE80211_RADIOTAP_ANTENNA] = {
+ .align = sizeof(uint8_t),
+ .width = sizeof(uint8_t),
+ },
+ [IEEE80211_RADIOTAP_DB_ANTSIGNAL] = {
+ .align = sizeof(uint8_t),
+ .width = sizeof(uint8_t),
+ },
+ [IEEE80211_RADIOTAP_DB_ANTNOISE] = {
+ .align = sizeof(uint8_t),
+ .width = sizeof(uint8_t),
+ },
+ [IEEE80211_RADIOTAP_XCHANNEL] = {
+ .align = sizeof(uint32_t),
+ .width = 2*sizeof(uint32_t),
+ },
+ };
+ uint32_t present = le32toh(rh->it_present);
+ int off, i;
+
+ off = sizeof(struct ieee80211_radiotap_header);
+ for (i = 0; i < IEEE80211_RADIOTAP_EXT; i++) {
+ if ((present & (1<<i)) == 0)
+ continue;
+ if (items[i].align == 0) {
+ /* NB: unidentified element, don't guess */
+ printf("%s: unknown item %d\n", __func__, i);
+ return -1;
+ }
+ off = roundup2(off, items[i].align);
+ if (i == item) {
+ if (off + items[i].width > le16toh(rh->it_len)) {
+ /* NB: item does not fit in header data */
+ printf("%s: item %d not in header data, "
+ "off %d width %zu len %d\n", __func__, i,
+ off, items[i].width, le16toh(rh->it_len));
+ return -1;
+ }
+ return off;
+ }
+ off += items[i].width;
+ }
+ return -1;
+}
diff --git a/rtems/freebsd/net80211/ieee80211_radiotap.h b/rtems/freebsd/net80211/ieee80211_radiotap.h
new file mode 100644
index 00000000..89eac4be
--- /dev/null
+++ b/rtems/freebsd/net80211/ieee80211_radiotap.h
@@ -0,0 +1,234 @@
+/* $FreeBSD$ */
+/* $NetBSD: ieee80211_radiotap.h,v 1.16 2007/01/06 05:51:15 dyoung Exp $ */
+
+/*-
+ * Copyright (c) 2003, 2004 David Young. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The name of David Young may not be used to endorse or promote
+ * products derived from this software without specific prior
+ * written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY DAVID YOUNG ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
+ * PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL DAVID
+ * YOUNG BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
+ * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+ * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
+ * OF SUCH DAMAGE.
+ */
+#ifndef _NET80211_IEEE80211_RADIOTAP_HH_
+#define _NET80211_IEEE80211_RADIOTAP_HH_
+
+/* A generic radio capture format is desirable. It must be
+ * rigidly defined (e.g., units for fields should be given),
+ * and easily extensible.
+ *
+ * The following is an extensible radio capture format. It is
+ * based on a bitmap indicating which fields are present.
+ *
+ * I am trying to describe precisely what the application programmer
+ * should expect in the following, and for that reason I tell the
+ * units and origin of each measurement (where it applies), or else I
+ * use sufficiently weaselly language ("is a monotonically nondecreasing
+ * function of...") that I cannot set false expectations for lawyerly
+ * readers.
+ */
+#if defined(__KERNEL__) || defined(_KERNEL)
+#ifndef DLT_IEEE802_11_RADIO
+#define DLT_IEEE802_11_RADIO 127 /* 802.11 plus WLAN header */
+#endif
+#endif /* defined(__KERNEL__) || defined(_KERNEL) */
+
+#define IEEE80211_RADIOTAP_HDRLEN 64 /* XXX deprecated */
+
+/*
+ * The radio capture header precedes the 802.11 header.
+ *
+ * Note well: all radiotap fields are little-endian.
+ */
+struct ieee80211_radiotap_header {
+ uint8_t it_version; /* Version 0. Only increases
+ * for drastic changes,
+ * introduction of compatible
+ * new fields does not count.
+ */
+ uint8_t it_pad;
+ uint16_t it_len; /* length of the whole
+ * header in bytes, including
+ * it_version, it_pad,
+ * it_len, and data fields.
+ */
+ uint32_t it_present; /* A bitmap telling which
+ * fields are present. Set bit 31
+ * (0x80000000) to extend the
+ * bitmap by another 32 bits.
+ * Additional extensions are made
+ * by setting bit 31.
+ */
+} __packed;
+
+/*
+ * Name Data type Units
+ * ---- --------- -----
+ *
+ * IEEE80211_RADIOTAP_TSFT uint64_t microseconds
+ *
+ * Value in microseconds of the MAC's 64-bit 802.11 Time
+ * Synchronization Function timer when the first bit of the
+ * MPDU arrived at the MAC. For received frames, only.
+ *
+ * IEEE80211_RADIOTAP_CHANNEL 2 x uint16_t MHz, bitmap
+ *
+ * Tx/Rx frequency in MHz, followed by flags (see below).
+ *
+ * IEEE80211_RADIOTAP_FHSS uint16_t see below
+ *
+ * For frequency-hopping radios, the hop set (first byte)
+ * and pattern (second byte).
+ *
+ * IEEE80211_RADIOTAP_RATE uint8_t 500kb/s or index
+ *
+ * Tx/Rx data rate. If bit 0x80 is set then it represents an
+ * an MCS index and not an IEEE rate.
+ *
+ * IEEE80211_RADIOTAP_DBM_ANTSIGNAL int8_t decibels from
+ * one milliwatt (dBm)
+ *
+ * RF signal power at the antenna, decibel difference from
+ * one milliwatt.
+ *
+ * IEEE80211_RADIOTAP_DBM_ANTNOISE int8_t decibels from
+ * one milliwatt (dBm)
+ *
+ * RF noise power at the antenna, decibel difference from one
+ * milliwatt.
+ *
+ * IEEE80211_RADIOTAP_DB_ANTSIGNAL uint8_t decibel (dB)
+ *
+ * RF signal power at the antenna, decibel difference from an
+ * arbitrary, fixed reference.
+ *
+ * IEEE80211_RADIOTAP_DB_ANTNOISE uint8_t decibel (dB)
+ *
+ * RF noise power at the antenna, decibel difference from an
+ * arbitrary, fixed reference point.
+ *
+ * IEEE80211_RADIOTAP_LOCK_QUALITY uint16_t unitless
+ *
+ * Quality of Barker code lock. Unitless. Monotonically
+ * nondecreasing with "better" lock strength. Called "Signal
+ * Quality" in datasheets. (Is there a standard way to measure
+ * this?)
+ *
+ * IEEE80211_RADIOTAP_TX_ATTENUATION uint16_t unitless
+ *
+ * Transmit power expressed as unitless distance from max
+ * power set at factory calibration. 0 is max power.
+ * Monotonically nondecreasing with lower power levels.
+ *
+ * IEEE80211_RADIOTAP_DB_TX_ATTENUATION uint16_t decibels (dB)
+ *
+ * Transmit power expressed as decibel distance from max power
+ * set at factory calibration. 0 is max power. Monotonically
+ * nondecreasing with lower power levels.
+ *
+ * IEEE80211_RADIOTAP_DBM_TX_POWER int8_t decibels from
+ * one milliwatt (dBm)
+ *
+ * Transmit power expressed as dBm (decibels from a 1 milliwatt
+ * reference). This is the absolute power level measured at
+ * the antenna port.
+ *
+ * IEEE80211_RADIOTAP_FLAGS uint8_t bitmap
+ *
+ * Properties of transmitted and received frames. See flags
+ * defined below.
+ *
+ * IEEE80211_RADIOTAP_ANTENNA uint8_t antenna index
+ *
+ * Unitless indication of the Rx/Tx antenna for this packet.
+ * The first antenna is antenna 0.
+ *
+ * IEEE80211_RADIOTAP_XCHANNEL uint32_t bitmap
+ * uint16_t MHz
+ * uint8_t channel number
+ * int8_t .5 dBm
+ *
+ * Extended channel specification: flags (see below) followed by
+ * frequency in MHz, the corresponding IEEE channel number, and
+ * finally the maximum regulatory transmit power cap in .5 dBm
+ * units. This property supersedes IEEE80211_RADIOTAP_CHANNEL
+ * and only one of the two should be present.
+ */
+enum ieee80211_radiotap_type {
+ IEEE80211_RADIOTAP_TSFT = 0,
+ IEEE80211_RADIOTAP_FLAGS = 1,
+ IEEE80211_RADIOTAP_RATE = 2,
+ IEEE80211_RADIOTAP_CHANNEL = 3,
+ IEEE80211_RADIOTAP_FHSS = 4,
+ IEEE80211_RADIOTAP_DBM_ANTSIGNAL = 5,
+ IEEE80211_RADIOTAP_DBM_ANTNOISE = 6,
+ IEEE80211_RADIOTAP_LOCK_QUALITY = 7,
+ IEEE80211_RADIOTAP_TX_ATTENUATION = 8,
+ IEEE80211_RADIOTAP_DB_TX_ATTENUATION = 9,
+ IEEE80211_RADIOTAP_DBM_TX_POWER = 10,
+ IEEE80211_RADIOTAP_ANTENNA = 11,
+ IEEE80211_RADIOTAP_DB_ANTSIGNAL = 12,
+ IEEE80211_RADIOTAP_DB_ANTNOISE = 13,
+ /* NB: gap for netbsd definitions */
+ IEEE80211_RADIOTAP_XCHANNEL = 18,
+ IEEE80211_RADIOTAP_EXT = 31,
+};
+
+#ifndef _KERNEL
+/* channel attributes */
+#define IEEE80211_CHAN_TURBO 0x00000010 /* Turbo channel */
+#define IEEE80211_CHAN_CCK 0x00000020 /* CCK channel */
+#define IEEE80211_CHAN_OFDM 0x00000040 /* OFDM channel */
+#define IEEE80211_CHAN_2GHZ 0x00000080 /* 2 GHz spectrum channel. */
+#define IEEE80211_CHAN_5GHZ 0x00000100 /* 5 GHz spectrum channel */
+#define IEEE80211_CHAN_PASSIVE 0x00000200 /* Only passive scan allowed */
+#define IEEE80211_CHAN_DYN 0x00000400 /* Dynamic CCK-OFDM channel */
+#define IEEE80211_CHAN_GFSK 0x00000800 /* GFSK channel (FHSS PHY) */
+#define IEEE80211_CHAN_GSM 0x00001000 /* 900 MHz spectrum channel */
+#define IEEE80211_CHAN_STURBO 0x00002000 /* 11a static turbo channel only */
+#define IEEE80211_CHAN_HALF 0x00004000 /* Half rate channel */
+#define IEEE80211_CHAN_QUARTER 0x00008000 /* Quarter rate channel */
+#endif /* !_KERNEL */
+
+/* For IEEE80211_RADIOTAP_FLAGS */
+#define IEEE80211_RADIOTAP_F_CFP 0x01 /* sent/received
+ * during CFP
+ */
+#define IEEE80211_RADIOTAP_F_SHORTPRE 0x02 /* sent/received
+ * with short
+ * preamble
+ */
+#define IEEE80211_RADIOTAP_F_WEP 0x04 /* sent/received
+ * with WEP encryption
+ */
+#define IEEE80211_RADIOTAP_F_FRAG 0x08 /* sent/received
+ * with fragmentation
+ */
+#define IEEE80211_RADIOTAP_F_FCS 0x10 /* frame includes FCS */
+#define IEEE80211_RADIOTAP_F_DATAPAD 0x20 /* frame has padding between
+ * 802.11 header and payload
+ * (to 32-bit boundary)
+ */
+#define IEEE80211_RADIOTAP_F_BADFCS 0x40 /* does not pass FCS check */
+#define IEEE80211_RADIOTAP_F_SHORTGI 0x80 /* HT short GI */
+
+#endif /* !_NET80211_IEEE80211_RADIOTAP_HH_ */
diff --git a/rtems/freebsd/net80211/ieee80211_ratectl.c b/rtems/freebsd/net80211/ieee80211_ratectl.c
new file mode 100644
index 00000000..6dd74f70
--- /dev/null
+++ b/rtems/freebsd/net80211/ieee80211_ratectl.c
@@ -0,0 +1,94 @@
+#include <rtems/freebsd/machine/rtems-bsd-config.h>
+
+/*-
+ * Copyright (c) 2010 Rui Paulo <rpaulo@FreeBSD.org>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <rtems/freebsd/sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <rtems/freebsd/sys/param.h>
+#include <rtems/freebsd/sys/kernel.h>
+#include <rtems/freebsd/sys/systm.h>
+#include <rtems/freebsd/sys/socket.h>
+
+#include <rtems/freebsd/net/if.h>
+#include <rtems/freebsd/net/if_media.h>
+
+#include <rtems/freebsd/net80211/ieee80211_var.h>
+#include <rtems/freebsd/net80211/ieee80211_ratectl.h>
+
+static const struct ieee80211_ratectl *ratectls[IEEE80211_RATECTL_MAX];
+
+static const char *ratectl_modnames[IEEE80211_RATECTL_MAX] = {
+ [IEEE80211_RATECTL_AMRR] = "wlan_amrr",
+ [IEEE80211_RATECTL_RSSADAPT] = "wlan_rssadapt",
+ [IEEE80211_RATECTL_ONOE] = "wlan_onoe",
+ [IEEE80211_RATECTL_SAMPLE] = "wlan_sample",
+ [IEEE80211_RATECTL_NONE] = "wlan_none",
+};
+
+MALLOC_DEFINE(M_80211_RATECTL, "80211ratectl", "802.11 rate control");
+
+void
+ieee80211_ratectl_register(int type, const struct ieee80211_ratectl *ratectl)
+{
+ if (type >= IEEE80211_RATECTL_MAX)
+ return;
+ ratectls[type] = ratectl;
+}
+
+void
+ieee80211_ratectl_unregister(int type)
+{
+ if (type >= IEEE80211_RATECTL_MAX)
+ return;
+ ratectls[type] = NULL;
+}
+
+void
+ieee80211_ratectl_init(struct ieee80211vap *vap)
+{
+ if (vap->iv_rate == ratectls[IEEE80211_RATECTL_NONE])
+ ieee80211_ratectl_set(vap, IEEE80211_RATECTL_AMRR);
+ vap->iv_rate->ir_init(vap);
+}
+
+void
+ieee80211_ratectl_set(struct ieee80211vap *vap, int type)
+{
+ if (type >= IEEE80211_RATECTL_MAX)
+ return;
+ if (ratectls[type] == NULL) {
+ ieee80211_load_module(ratectl_modnames[type]);
+ if (ratectls[type] == NULL) {
+ IEEE80211_DPRINTF(vap, IEEE80211_MSG_RATECTL,
+ "%s: unable to load algo %u, module %s\n",
+ __func__, type, ratectl_modnames[type]);
+ vap->iv_rate = ratectls[IEEE80211_RATECTL_NONE];
+ return;
+ }
+ }
+ vap->iv_rate = ratectls[type];
+}
diff --git a/rtems/freebsd/net80211/ieee80211_ratectl.h b/rtems/freebsd/net80211/ieee80211_ratectl.h
new file mode 100644
index 00000000..be81781c
--- /dev/null
+++ b/rtems/freebsd/net80211/ieee80211_ratectl.h
@@ -0,0 +1,117 @@
+/*-
+ * Copyright (c) 2010 Rui Paulo <rpaulo@FreeBSD.org>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+enum ieee80211_ratealgs {
+ IEEE80211_RATECTL_AMRR = 0,
+ IEEE80211_RATECTL_RSSADAPT = 1,
+ IEEE80211_RATECTL_ONOE = 2,
+ IEEE80211_RATECTL_SAMPLE = 3,
+ IEEE80211_RATECTL_NONE = 4,
+ IEEE80211_RATECTL_MAX
+};
+
+#define IEEE80211_RATECTL_TX_SUCCESS 1
+#define IEEE80211_RATECTL_TX_FAILURE 0
+
+struct ieee80211_ratectl {
+ const char *ir_name;
+ int (*ir_attach)(const struct ieee80211vap *);
+ void (*ir_detach)(const struct ieee80211vap *);
+ void (*ir_init)(struct ieee80211vap *);
+ void (*ir_deinit)(struct ieee80211vap *);
+ void (*ir_node_init)(struct ieee80211_node *);
+ void (*ir_node_deinit)(struct ieee80211_node *);
+ int (*ir_rate)(struct ieee80211_node *, void *, uint32_t);
+ void (*ir_tx_complete)(const struct ieee80211vap *,
+ const struct ieee80211_node *, int,
+ void *, void *);
+ void (*ir_tx_update)(const struct ieee80211vap *,
+ const struct ieee80211_node *,
+ void *, void *, void *);
+ void (*ir_setinterval)(const struct ieee80211vap *, int);
+};
+
+void ieee80211_ratectl_register(int, const struct ieee80211_ratectl *);
+void ieee80211_ratectl_unregister(int);
+void ieee80211_ratectl_init(struct ieee80211vap *);
+void ieee80211_ratectl_set(struct ieee80211vap *, int);
+
+MALLOC_DECLARE(M_80211_RATECTL);
+
+static void __inline
+ieee80211_ratectl_deinit(struct ieee80211vap *vap)
+{
+ vap->iv_rate->ir_deinit(vap);
+}
+
+static void __inline
+ieee80211_ratectl_node_init(struct ieee80211_node *ni)
+{
+ const struct ieee80211vap *vap = ni->ni_vap;
+
+ vap->iv_rate->ir_node_init(ni);
+}
+
+static void __inline
+ieee80211_ratectl_node_deinit(struct ieee80211_node *ni)
+{
+ const struct ieee80211vap *vap = ni->ni_vap;
+
+ vap->iv_rate->ir_node_deinit(ni);
+}
+
+static int __inline
+ieee80211_ratectl_rate(struct ieee80211_node *ni, void *arg, uint32_t iarg)
+{
+ const struct ieee80211vap *vap = ni->ni_vap;
+
+ return vap->iv_rate->ir_rate(ni, arg, iarg);
+}
+
+static void __inline
+ieee80211_ratectl_tx_complete(const struct ieee80211vap *vap,
+ const struct ieee80211_node *ni, int status, void *arg1, void *arg2)
+{
+ vap->iv_rate->ir_tx_complete(vap, ni, status, arg1, arg2);
+}
+
+static void __inline
+ieee80211_ratectl_tx_update(const struct ieee80211vap *vap,
+ const struct ieee80211_node *ni, void *arg1, void *arg2, void *arg3)
+{
+ if (vap->iv_rate->ir_tx_update == NULL)
+ return;
+ vap->iv_rate->ir_tx_update(vap, ni, arg1, arg2, arg3);
+}
+
+static void __inline
+ieee80211_ratectl_setinterval(const struct ieee80211vap *vap, int msecs)
+{
+ if (vap->iv_rate->ir_setinterval == NULL)
+ return;
+ vap->iv_rate->ir_setinterval(vap, msecs);
+}
diff --git a/rtems/freebsd/net80211/ieee80211_ratectl_none.c b/rtems/freebsd/net80211/ieee80211_ratectl_none.c
new file mode 100644
index 00000000..e72edbe2
--- /dev/null
+++ b/rtems/freebsd/net80211/ieee80211_ratectl_none.c
@@ -0,0 +1,116 @@
+#include <rtems/freebsd/machine/rtems-bsd-config.h>
+
+/*-
+ * Copyright (c) 2010 Bernhard Schmidt <bschmidt@FreeBSD.org>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <rtems/freebsd/sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <rtems/freebsd/local/opt_wlan.h>
+
+#include <rtems/freebsd/sys/param.h>
+#include <rtems/freebsd/sys/kernel.h>
+#include <rtems/freebsd/sys/module.h>
+#include <rtems/freebsd/sys/socket.h>
+#include <rtems/freebsd/sys/sysctl.h>
+
+#include <rtems/freebsd/net/if.h>
+#include <rtems/freebsd/net/if_media.h>
+
+#ifdef INET
+#include <rtems/freebsd/netinet/in.h>
+#include <rtems/freebsd/netinet/if_ether.h>
+#endif
+
+#include <rtems/freebsd/net80211/ieee80211_var.h>
+#include <rtems/freebsd/net80211/ieee80211_ratectl.h>
+
+static void
+none_init(struct ieee80211vap *vap)
+{
+}
+
+static void
+none_deinit(struct ieee80211vap *vap)
+{
+ free(vap->iv_rs, M_80211_RATECTL);
+}
+
+static void
+none_node_init(struct ieee80211_node *ni)
+{
+ ni->ni_txrate = ni->ni_rates.rs_rates[0] & IEEE80211_RATE_VAL;
+}
+
+static void
+none_node_deinit(struct ieee80211_node *ni)
+{
+}
+
+static int
+none_rate(struct ieee80211_node *ni, void *arg __unused, uint32_t iarg __unused)
+{
+ int rix = 0;
+
+ ni->ni_txrate = ni->ni_rates.rs_rates[rix] & IEEE80211_RATE_VAL;
+ return rix;
+}
+
+static void
+none_tx_complete(const struct ieee80211vap *vap,
+ const struct ieee80211_node *ni, int ok,
+ void *arg1, void *arg2 __unused)
+{
+}
+
+static void
+none_tx_update(const struct ieee80211vap *vap, const struct ieee80211_node *ni,
+ void *arg1, void *arg2, void *arg3)
+{
+}
+
+static void
+none_setinterval(const struct ieee80211vap *vap, int msecs)
+{
+}
+
+/* number of references from net80211 layer */
+static int nrefs = 0;
+
+static const struct ieee80211_ratectl none = {
+ .ir_name = "none",
+ .ir_attach = NULL,
+ .ir_detach = NULL,
+ .ir_init = none_init,
+ .ir_deinit = none_deinit,
+ .ir_node_init = none_node_init,
+ .ir_node_deinit = none_node_deinit,
+ .ir_rate = none_rate,
+ .ir_tx_complete = none_tx_complete,
+ .ir_tx_update = none_tx_update,
+ .ir_setinterval = none_setinterval,
+};
+IEEE80211_RATECTL_MODULE(ratectl_none, 1);
+IEEE80211_RATECTL_ALG(none, IEEE80211_RATECTL_NONE, none);
diff --git a/rtems/freebsd/net80211/ieee80211_regdomain.c b/rtems/freebsd/net80211/ieee80211_regdomain.c
new file mode 100644
index 00000000..19306f40
--- /dev/null
+++ b/rtems/freebsd/net80211/ieee80211_regdomain.c
@@ -0,0 +1,450 @@
+#include <rtems/freebsd/machine/rtems-bsd-config.h>
+
+/*-
+ * Copyright (c) 2005-2008 Sam Leffler, Errno Consulting
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <rtems/freebsd/sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+/*
+ * IEEE 802.11 regdomain support.
+ */
+#include <rtems/freebsd/local/opt_wlan.h>
+
+#include <rtems/freebsd/sys/param.h>
+#include <rtems/freebsd/sys/systm.h>
+#include <rtems/freebsd/sys/kernel.h>
+
+#include <rtems/freebsd/sys/socket.h>
+
+#include <rtems/freebsd/net/if.h>
+#include <rtems/freebsd/net/if_media.h>
+
+#include <rtems/freebsd/net80211/ieee80211_var.h>
+#include <rtems/freebsd/net80211/ieee80211_regdomain.h>
+
+static void
+null_getradiocaps(struct ieee80211com *ic, int maxchan,
+ int *n, struct ieee80211_channel *c)
+{
+ /* just feed back the current channel list */
+ if (maxchan > ic->ic_nchans)
+ maxchan = ic->ic_nchans;
+ memcpy(c, ic->ic_channels, maxchan*sizeof(struct ieee80211_channel));
+ *n = maxchan;
+}
+
+static int
+null_setregdomain(struct ieee80211com *ic,
+ struct ieee80211_regdomain *rd,
+ int nchans, struct ieee80211_channel chans[])
+{
+ return 0; /* accept anything */
+}
+
+void
+ieee80211_regdomain_attach(struct ieee80211com *ic)
+{
+ if (ic->ic_regdomain.regdomain == 0 &&
+ ic->ic_regdomain.country == CTRY_DEFAULT) {
+ ic->ic_regdomain.country = CTRY_UNITED_STATES; /* XXX */
+ ic->ic_regdomain.location = ' '; /* both */
+ ic->ic_regdomain.isocc[0] = 'U'; /* XXX */
+ ic->ic_regdomain.isocc[1] = 'S'; /* XXX */
+ /* NB: driver calls ieee80211_init_channels or similar */
+ }
+ ic->ic_getradiocaps = null_getradiocaps;
+ ic->ic_setregdomain = null_setregdomain;
+}
+
+void
+ieee80211_regdomain_detach(struct ieee80211com *ic)
+{
+ if (ic->ic_countryie != NULL) {
+ free(ic->ic_countryie, M_80211_NODE_IE);
+ ic->ic_countryie = NULL;
+ }
+}
+
+void
+ieee80211_regdomain_vattach(struct ieee80211vap *vap)
+{
+}
+
+void
+ieee80211_regdomain_vdetach(struct ieee80211vap *vap)
+{
+}
+
+static void
+addchan(struct ieee80211com *ic, int ieee, int flags)
+{
+ struct ieee80211_channel *c;
+
+ c = &ic->ic_channels[ic->ic_nchans++];
+ c->ic_freq = ieee80211_ieee2mhz(ieee, flags);
+ c->ic_ieee = ieee;
+ c->ic_flags = flags;
+ c->ic_extieee = 0;
+}
+
+/*
+ * Setup the channel list for the specified regulatory domain,
+ * country code, and operating modes. This interface is used
+ * when a driver does not obtain the channel list from another
+ * source (such as firmware).
+ */
+int
+ieee80211_init_channels(struct ieee80211com *ic,
+ const struct ieee80211_regdomain *rd, const uint8_t bands[])
+{
+ int i;
+
+ /* XXX just do something for now */
+ ic->ic_nchans = 0;
+ if (isset(bands, IEEE80211_MODE_11B) ||
+ isset(bands, IEEE80211_MODE_11G)) {
+ int maxchan = 11;
+ if (rd != NULL && rd->ecm)
+ maxchan = 14;
+ for (i = 1; i <= maxchan; i++) {
+ if (isset(bands, IEEE80211_MODE_11B))
+ addchan(ic, i, IEEE80211_CHAN_B);
+ if (isset(bands, IEEE80211_MODE_11G))
+ addchan(ic, i, IEEE80211_CHAN_G);
+ }
+ }
+ if (isset(bands, IEEE80211_MODE_11A)) {
+ for (i = 36; i <= 64; i += 4)
+ addchan(ic, i, IEEE80211_CHAN_A);
+ for (i = 100; i <= 140; i += 4)
+ addchan(ic, i, IEEE80211_CHAN_A);
+ for (i = 149; i <= 161; i += 4)
+ addchan(ic, i, IEEE80211_CHAN_A);
+ }
+ if (rd != NULL)
+ ic->ic_regdomain = *rd;
+
+ return 0;
+}
+
+static __inline int
+chancompar(const void *a, const void *b)
+{
+ const struct ieee80211_channel *ca = a;
+ const struct ieee80211_channel *cb = b;
+
+ return (ca->ic_freq == cb->ic_freq) ?
+ (ca->ic_flags & IEEE80211_CHAN_ALL) -
+ (cb->ic_flags & IEEE80211_CHAN_ALL) :
+ ca->ic_freq - cb->ic_freq;
+}
+
+/*
+ * Insertion sort.
+ */
+#define swap(_a, _b, _size) { \
+ uint8_t *s = _b; \
+ int i = _size; \
+ do { \
+ uint8_t tmp = *_a; \
+ *_a++ = *s; \
+ *s++ = tmp; \
+ } while (--i); \
+ _a -= _size; \
+}
+
+static void
+sort_channels(void *a, size_t n, size_t size)
+{
+ uint8_t *aa = a;
+ uint8_t *ai, *t;
+
+ KASSERT(n > 0, ("no channels"));
+ for (ai = aa+size; --n >= 1; ai += size)
+ for (t = ai; t > aa; t -= size) {
+ uint8_t *u = t - size;
+ if (chancompar(u, t) <= 0)
+ break;
+ swap(u, t, size);
+ }
+}
+#undef swap
+
+/*
+ * Order channels w/ the same frequency so that
+ * b < g < htg and a < hta. This is used to optimize
+ * channel table lookups and some user applications
+ * may also depend on it (though they should not).
+ */
+void
+ieee80211_sort_channels(struct ieee80211_channel chans[], int nchans)
+{
+ if (nchans > 0)
+ sort_channels(chans, nchans, sizeof(struct ieee80211_channel));
+}
+
+/*
+ * Allocate and construct a Country Information IE.
+ */
+struct ieee80211_appie *
+ieee80211_alloc_countryie(struct ieee80211com *ic)
+{
+#define CHAN_UNINTERESTING \
+ (IEEE80211_CHAN_TURBO | IEEE80211_CHAN_STURBO | \
+ IEEE80211_CHAN_HALF | IEEE80211_CHAN_QUARTER)
+ /* XXX what about auto? */
+ /* flag set of channels to be excluded (band added below) */
+ static const int skipflags[IEEE80211_MODE_MAX] = {
+ [IEEE80211_MODE_AUTO] = CHAN_UNINTERESTING,
+ [IEEE80211_MODE_11A] = CHAN_UNINTERESTING,
+ [IEEE80211_MODE_11B] = CHAN_UNINTERESTING,
+ [IEEE80211_MODE_11G] = CHAN_UNINTERESTING,
+ [IEEE80211_MODE_FH] = CHAN_UNINTERESTING
+ | IEEE80211_CHAN_OFDM
+ | IEEE80211_CHAN_CCK
+ | IEEE80211_CHAN_DYN,
+ [IEEE80211_MODE_TURBO_A] = CHAN_UNINTERESTING,
+ [IEEE80211_MODE_TURBO_G] = CHAN_UNINTERESTING,
+ [IEEE80211_MODE_STURBO_A] = CHAN_UNINTERESTING,
+ [IEEE80211_MODE_HALF] = IEEE80211_CHAN_TURBO
+ | IEEE80211_CHAN_STURBO,
+ [IEEE80211_MODE_QUARTER] = IEEE80211_CHAN_TURBO
+ | IEEE80211_CHAN_STURBO,
+ [IEEE80211_MODE_11NA] = CHAN_UNINTERESTING,
+ [IEEE80211_MODE_11NG] = CHAN_UNINTERESTING,
+ };
+ const struct ieee80211_regdomain *rd = &ic->ic_regdomain;
+ uint8_t nextchan, chans[IEEE80211_CHAN_BYTES], *frm;
+ struct ieee80211_appie *aie;
+ struct ieee80211_country_ie *ie;
+ int i, skip, nruns;
+
+ aie = malloc(IEEE80211_COUNTRY_MAX_SIZE, M_80211_NODE_IE,
+ M_NOWAIT | M_ZERO);
+ if (aie == NULL) {
+ if_printf(ic->ic_ifp,
+ "%s: unable to allocate memory for country ie\n", __func__);
+ /* XXX stat */
+ return NULL;
+ }
+ ie = (struct ieee80211_country_ie *) aie->ie_data;
+ ie->ie = IEEE80211_ELEMID_COUNTRY;
+ if (rd->isocc[0] == '\0') {
+ if_printf(ic->ic_ifp, "no ISO country string for cc %d; "
+ "using blanks\n", rd->country);
+ ie->cc[0] = ie->cc[1] = ' ';
+ } else {
+ ie->cc[0] = rd->isocc[0];
+ ie->cc[1] = rd->isocc[1];
+ }
+ /*
+ * Indoor/Outdoor portion of country string:
+ * 'I' indoor only
+ * 'O' outdoor only
+ * ' ' all enviroments
+ */
+ ie->cc[2] = (rd->location == 'I' ? 'I' :
+ rd->location == 'O' ? 'O' : ' ');
+ /*
+ * Run-length encoded channel+max tx power info.
+ */
+ frm = (uint8_t *)&ie->band[0];
+ nextchan = 0; /* NB: impossible channel # */
+ nruns = 0;
+ memset(chans, 0, sizeof(chans));
+ skip = skipflags[ieee80211_chan2mode(ic->ic_bsschan)];
+ if (IEEE80211_IS_CHAN_5GHZ(ic->ic_bsschan))
+ skip |= IEEE80211_CHAN_2GHZ;
+ else if (IEEE80211_IS_CHAN_2GHZ(ic->ic_bsschan))
+ skip |= IEEE80211_CHAN_5GHZ;
+ for (i = 0; i < ic->ic_nchans; i++) {
+ const struct ieee80211_channel *c = &ic->ic_channels[i];
+
+ if (isset(chans, c->ic_ieee)) /* suppress dup's */
+ continue;
+ if (c->ic_flags & skip) /* skip band, etc. */
+ continue;
+ setbit(chans, c->ic_ieee);
+ if (c->ic_ieee != nextchan ||
+ c->ic_maxregpower != frm[-1]) { /* new run */
+ if (nruns == IEEE80211_COUNTRY_MAX_BANDS) {
+ if_printf(ic->ic_ifp, "%s: country ie too big, "
+ "runs > max %d, truncating\n",
+ __func__, IEEE80211_COUNTRY_MAX_BANDS);
+ /* XXX stat? fail? */
+ break;
+ }
+ frm[0] = c->ic_ieee; /* starting channel # */
+ frm[1] = 1; /* # channels in run */
+ frm[2] = c->ic_maxregpower; /* tx power cap */
+ frm += 3;
+ nextchan = c->ic_ieee + 1; /* overflow? */
+ nruns++;
+ } else { /* extend run */
+ frm[-2]++;
+ nextchan++;
+ }
+ }
+ ie->len = frm - ie->cc;
+ if (ie->len & 1) { /* Zero pad to multiple of 2 */
+ ie->len++;
+ *frm++ = 0;
+ }
+ aie->ie_len = frm - aie->ie_data;
+
+ return aie;
+#undef CHAN_UNINTERESTING
+}
+
+static int
+allvapsdown(struct ieee80211com *ic)
+{
+ struct ieee80211vap *vap;
+
+ IEEE80211_LOCK_ASSERT(ic);
+ TAILQ_FOREACH(vap, &ic->ic_vaps, iv_next)
+ if (vap->iv_state != IEEE80211_S_INIT)
+ return 0;
+ return 1;
+}
+
+int
+ieee80211_setregdomain(struct ieee80211vap *vap,
+ struct ieee80211_regdomain_req *reg)
+{
+ struct ieee80211com *ic = vap->iv_ic;
+ struct ieee80211_channel *c;
+ int desfreq = 0, desflags = 0; /* XXX silence gcc complaint */
+ int error, i;
+
+ if (reg->rd.location != 'I' && reg->rd.location != 'O' &&
+ reg->rd.location != ' ') {
+ IEEE80211_DPRINTF(vap, IEEE80211_MSG_IOCTL,
+ "%s: invalid location 0x%x\n", __func__, reg->rd.location);
+ return EINVAL;
+ }
+ if (reg->rd.isocc[0] == '\0' || reg->rd.isocc[1] == '\0') {
+ IEEE80211_DPRINTF(vap, IEEE80211_MSG_IOCTL,
+ "%s: invalid iso cc 0x%x:0x%x\n", __func__,
+ reg->rd.isocc[0], reg->rd.isocc[1]);
+ return EINVAL;
+ }
+ if (reg->chaninfo.ic_nchans > IEEE80211_CHAN_MAX) {
+ IEEE80211_DPRINTF(vap, IEEE80211_MSG_IOCTL,
+ "%s: too many channels %u, max %u\n", __func__,
+ reg->chaninfo.ic_nchans, IEEE80211_CHAN_MAX);
+ return EINVAL;
+ }
+ /*
+ * Calculate freq<->IEEE mapping and default max tx power
+ * for channels not setup. The driver can override these
+ * setting to reflect device properties/requirements.
+ */
+ for (i = 0; i < reg->chaninfo.ic_nchans; i++) {
+ c = &reg->chaninfo.ic_chans[i];
+ if (c->ic_freq == 0 || c->ic_flags == 0) {
+ IEEE80211_DPRINTF(vap, IEEE80211_MSG_IOCTL,
+ "%s: invalid channel spec at [%u]\n", __func__, i);
+ return EINVAL;
+ }
+ if (c->ic_maxregpower == 0) {
+ IEEE80211_DPRINTF(vap, IEEE80211_MSG_IOCTL,
+ "%s: invalid channel spec, zero maxregpower, "
+ "freq %u flags 0x%x\n", __func__,
+ c->ic_freq, c->ic_flags);
+ return EINVAL;
+ }
+ if (c->ic_ieee == 0)
+ c->ic_ieee = ieee80211_mhz2ieee(c->ic_freq,c->ic_flags);
+ if (IEEE80211_IS_CHAN_HT40(c) && c->ic_extieee == 0)
+ c->ic_extieee = ieee80211_mhz2ieee(c->ic_freq +
+ (IEEE80211_IS_CHAN_HT40U(c) ? 20 : -20),
+ c->ic_flags);
+ if (c->ic_maxpower == 0)
+ c->ic_maxpower = 2*c->ic_maxregpower;
+ }
+ IEEE80211_LOCK(ic);
+ /* XXX bandaid; a running vap will likely crash */
+ if (!allvapsdown(ic)) {
+ IEEE80211_UNLOCK(ic);
+ IEEE80211_DPRINTF(vap, IEEE80211_MSG_IOCTL,
+ "%s: reject: vaps are running\n", __func__);
+ return EBUSY;
+ }
+ error = ic->ic_setregdomain(ic, &reg->rd,
+ reg->chaninfo.ic_nchans, reg->chaninfo.ic_chans);
+ if (error != 0) {
+ IEEE80211_UNLOCK(ic);
+ IEEE80211_DPRINTF(vap, IEEE80211_MSG_IOCTL,
+ "%s: driver rejected request, error %u\n", __func__, error);
+ return error;
+ }
+ /*
+ * Commit: copy in new channel table and reset media state.
+ * On return the state machines will be clocked so all vaps
+ * will reset their state.
+ *
+ * XXX ic_bsschan is marked undefined, must have vap's in
+ * INIT state or we blow up forcing stations off
+ */
+ /*
+ * Save any desired channel for restore below. Note this
+ * needs to be done for all vaps but for now we only do
+ * the one where the ioctl is issued.
+ */
+ if (vap->iv_des_chan != IEEE80211_CHAN_ANYC) {
+ desfreq = vap->iv_des_chan->ic_freq;
+ desflags = vap->iv_des_chan->ic_flags;
+ }
+ /* regdomain parameters */
+ memcpy(&ic->ic_regdomain, &reg->rd, sizeof(reg->rd));
+ /* channel table */
+ memcpy(ic->ic_channels, reg->chaninfo.ic_chans,
+ reg->chaninfo.ic_nchans * sizeof(struct ieee80211_channel));
+ ic->ic_nchans = reg->chaninfo.ic_nchans;
+ memset(&ic->ic_channels[ic->ic_nchans], 0,
+ (IEEE80211_CHAN_MAX - ic->ic_nchans) *
+ sizeof(struct ieee80211_channel));
+ ieee80211_media_init(ic);
+
+ /*
+ * Invalidate channel-related state.
+ */
+ if (ic->ic_countryie != NULL) {
+ free(ic->ic_countryie, M_80211_NODE_IE);
+ ic->ic_countryie = NULL;
+ }
+ ieee80211_scan_flush(vap);
+ ieee80211_dfs_reset(ic);
+ if (vap->iv_des_chan != IEEE80211_CHAN_ANYC) {
+ c = ieee80211_find_channel(ic, desfreq, desflags);
+ /* NB: may be NULL if not present in new channel list */
+ vap->iv_des_chan = (c != NULL) ? c : IEEE80211_CHAN_ANYC;
+ }
+ IEEE80211_UNLOCK(ic);
+
+ return 0;
+}
diff --git a/rtems/freebsd/net80211/ieee80211_regdomain.h b/rtems/freebsd/net80211/ieee80211_regdomain.h
new file mode 100644
index 00000000..f71c1093
--- /dev/null
+++ b/rtems/freebsd/net80211/ieee80211_regdomain.h
@@ -0,0 +1,282 @@
+/*-
+ * Copyright (c) 2005-2008 Sam Leffler, Errno Consulting
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+#ifndef _NET80211_IEEE80211_REGDOMAIN_HH_
+#define _NET80211_IEEE80211_REGDOMAIN_HH_
+
+/*
+ * 802.11 regulatory domain definitions.
+ */
+
+/*
+ * ISO 3166 Country/Region Codes
+ * http://ftp.ics.uci.edu/pub/ietf/http/related/iso3166.txt
+ */
+enum ISOCountryCode {
+ CTRY_AFGHANISTAN = 4,
+ CTRY_ALBANIA = 8, /* Albania */
+ CTRY_ALGERIA = 12, /* Algeria */
+ CTRY_AMERICAN_SAMOA = 16,
+ CTRY_ANDORRA = 20,
+ CTRY_ANGOLA = 24,
+ CTRY_ANGUILLA = 660,
+ CTRY_ANTARTICA = 10,
+ CTRY_ANTIGUA = 28, /* Antigua and Barbuda */
+ CTRY_ARGENTINA = 32, /* Argentina */
+ CTRY_ARMENIA = 51, /* Armenia */
+ CTRY_ARUBA = 533, /* Aruba */
+ CTRY_AUSTRALIA = 36, /* Australia */
+ CTRY_AUSTRIA = 40, /* Austria */
+ CTRY_AZERBAIJAN = 31, /* Azerbaijan */
+ CTRY_BAHAMAS = 44, /* Bahamas */
+ CTRY_BAHRAIN = 48, /* Bahrain */
+ CTRY_BANGLADESH = 50, /* Bangladesh */
+ CTRY_BARBADOS = 52,
+ CTRY_BELARUS = 112, /* Belarus */
+ CTRY_BELGIUM = 56, /* Belgium */
+ CTRY_BELIZE = 84,
+ CTRY_BENIN = 204,
+ CTRY_BERMUDA = 60,
+ CTRY_BHUTAN = 64,
+ CTRY_BOLIVIA = 68, /* Bolivia */
+ CTRY_BOSNIA_AND_HERZEGOWINA = 70,
+ CTRY_BOTSWANA = 72,
+ CTRY_BOUVET_ISLAND = 74,
+ CTRY_BRAZIL = 76, /* Brazil */
+ CTRY_BRITISH_INDIAN_OCEAN_TERRITORY = 86,
+ CTRY_BRUNEI_DARUSSALAM = 96, /* Brunei Darussalam */
+ CTRY_BULGARIA = 100, /* Bulgaria */
+ CTRY_BURKINA_FASO = 854,
+ CTRY_BURUNDI = 108,
+ CTRY_CAMBODIA = 116,
+ CTRY_CAMEROON = 120,
+ CTRY_CANADA = 124, /* Canada */
+ CTRY_CAPE_VERDE = 132,
+ CTRY_CAYMAN_ISLANDS = 136,
+ CTRY_CENTRAL_AFRICAN_REPUBLIC = 140,
+ CTRY_CHAD = 148,
+ CTRY_CHILE = 152, /* Chile */
+ CTRY_CHINA = 156, /* People's Republic of China */
+ CTRY_CHRISTMAS_ISLAND = 162,
+ CTRY_COCOS_ISLANDS = 166,
+ CTRY_COLOMBIA = 170, /* Colombia */
+ CTRY_COMOROS = 174,
+ CTRY_CONGO = 178,
+ CTRY_COOK_ISLANDS = 184,
+ CTRY_COSTA_RICA = 188, /* Costa Rica */
+ CTRY_COTE_DIVOIRE = 384,
+ CTRY_CROATIA = 191, /* Croatia (local name: Hrvatska) */
+ CTRY_CYPRUS = 196, /* Cyprus */
+ CTRY_CZECH = 203, /* Czech Republic */
+ CTRY_DENMARK = 208, /* Denmark */
+ CTRY_DJIBOUTI = 262,
+ CTRY_DOMINICA = 212,
+ CTRY_DOMINICAN_REPUBLIC = 214, /* Dominican Republic */
+ CTRY_EAST_TIMOR = 626,
+ CTRY_ECUADOR = 218, /* Ecuador */
+ CTRY_EGYPT = 818, /* Egypt */
+ CTRY_EL_SALVADOR = 222, /* El Salvador */
+ CTRY_EQUATORIAL_GUINEA = 226,
+ CTRY_ERITREA = 232,
+ CTRY_ESTONIA = 233, /* Estonia */
+ CTRY_ETHIOPIA = 210,
+ CTRY_FALKLAND_ISLANDS = 238, /* (Malvinas) */
+ CTRY_FAEROE_ISLANDS = 234, /* Faeroe Islands */
+ CTRY_FIJI = 242,
+ CTRY_FINLAND = 246, /* Finland */
+ CTRY_FRANCE = 250, /* France */
+ CTRY_FRANCE2 = 255, /* France (Metropolitan) */
+ CTRY_FRENCH_GUIANA = 254,
+ CTRY_FRENCH_POLYNESIA = 258,
+ CTRY_FRENCH_SOUTHERN_TERRITORIES = 260,
+ CTRY_GABON = 266,
+ CTRY_GAMBIA = 270,
+ CTRY_GEORGIA = 268, /* Georgia */
+ CTRY_GERMANY = 276, /* Germany */
+ CTRY_GHANA = 288,
+ CTRY_GIBRALTAR = 292,
+ CTRY_GREECE = 300, /* Greece */
+ CTRY_GREENLAND = 304,
+ CTRY_GRENADA = 308,
+ CTRY_GUADELOUPE = 312,
+ CTRY_GUAM = 316,
+ CTRY_GUATEMALA = 320, /* Guatemala */
+ CTRY_GUINEA = 324,
+ CTRY_GUINEA_BISSAU = 624,
+ CTRY_GUYANA = 328,
+ /* XXX correct remainder */
+ CTRY_HAITI = 332,
+ CTRY_HONDURAS = 340, /* Honduras */
+ CTRY_HONG_KONG = 344, /* Hong Kong S.A.R., P.R.C. */
+ CTRY_HUNGARY = 348, /* Hungary */
+ CTRY_ICELAND = 352, /* Iceland */
+ CTRY_INDIA = 356, /* India */
+ CTRY_INDONESIA = 360, /* Indonesia */
+ CTRY_IRAN = 364, /* Iran */
+ CTRY_IRAQ = 368, /* Iraq */
+ CTRY_IRELAND = 372, /* Ireland */
+ CTRY_ISRAEL = 376, /* Israel */
+ CTRY_ITALY = 380, /* Italy */
+ CTRY_JAMAICA = 388, /* Jamaica */
+ CTRY_JAPAN = 392, /* Japan */
+ CTRY_JORDAN = 400, /* Jordan */
+ CTRY_KAZAKHSTAN = 398, /* Kazakhstan */
+ CTRY_KENYA = 404, /* Kenya */
+ CTRY_KOREA_NORTH = 408, /* North Korea */
+ CTRY_KOREA_ROC = 410, /* South Korea */
+ CTRY_KOREA_ROC2 = 411, /* South Korea */
+ CTRY_KUWAIT = 414, /* Kuwait */
+ CTRY_LATVIA = 428, /* Latvia */
+ CTRY_LEBANON = 422, /* Lebanon */
+ CTRY_LIBYA = 434, /* Libya */
+ CTRY_LIECHTENSTEIN = 438, /* Liechtenstein */
+ CTRY_LITHUANIA = 440, /* Lithuania */
+ CTRY_LUXEMBOURG = 442, /* Luxembourg */
+ CTRY_MACAU = 446, /* Macau */
+ CTRY_MACEDONIA = 807, /* the Former Yugoslav Republic of Macedonia */
+ CTRY_MALAYSIA = 458, /* Malaysia */
+ CTRY_MALTA = 470, /* Malta */
+ CTRY_MEXICO = 484, /* Mexico */
+ CTRY_MONACO = 492, /* Principality of Monaco */
+ CTRY_MOROCCO = 504, /* Morocco */
+ CTRY_NEPAL = 524, /* Nepal */
+ CTRY_NETHERLANDS = 528, /* Netherlands */
+ CTRY_NEW_ZEALAND = 554, /* New Zealand */
+ CTRY_NICARAGUA = 558, /* Nicaragua */
+ CTRY_NORWAY = 578, /* Norway */
+ CTRY_OMAN = 512, /* Oman */
+ CTRY_PAKISTAN = 586, /* Islamic Republic of Pakistan */
+ CTRY_PANAMA = 591, /* Panama */
+ CTRY_PARAGUAY = 600, /* Paraguay */
+ CTRY_PERU = 604, /* Peru */
+ CTRY_PHILIPPINES = 608, /* Republic of the Philippines */
+ CTRY_POLAND = 616, /* Poland */
+ CTRY_PORTUGAL = 620, /* Portugal */
+ CTRY_PUERTO_RICO = 630, /* Puerto Rico */
+ CTRY_QATAR = 634, /* Qatar */
+ CTRY_ROMANIA = 642, /* Romania */
+ CTRY_RUSSIA = 643, /* Russia */
+ CTRY_SAUDI_ARABIA = 682, /* Saudi Arabia */
+ CTRY_SINGAPORE = 702, /* Singapore */
+ CTRY_SLOVAKIA = 703, /* Slovak Republic */
+ CTRY_SLOVENIA = 705, /* Slovenia */
+ CTRY_SOUTH_AFRICA = 710, /* South Africa */
+ CTRY_SPAIN = 724, /* Spain */
+ CTRY_SRILANKA = 144, /* Sri Lanka */
+ CTRY_SWEDEN = 752, /* Sweden */
+ CTRY_SWITZERLAND = 756, /* Switzerland */
+ CTRY_SYRIA = 760, /* Syria */
+ CTRY_TAIWAN = 158, /* Taiwan */
+ CTRY_THAILAND = 764, /* Thailand */
+ CTRY_TRINIDAD_Y_TOBAGO = 780, /* Trinidad y Tobago */
+ CTRY_TUNISIA = 788, /* Tunisia */
+ CTRY_TURKEY = 792, /* Turkey */
+ CTRY_UAE = 784, /* U.A.E. */
+ CTRY_UKRAINE = 804, /* Ukraine */
+ CTRY_UNITED_KINGDOM = 826, /* United Kingdom */
+ CTRY_UNITED_STATES = 840, /* United States */
+ CTRY_URUGUAY = 858, /* Uruguay */
+ CTRY_UZBEKISTAN = 860, /* Uzbekistan */
+ CTRY_VENEZUELA = 862, /* Venezuela */
+ CTRY_VIET_NAM = 704, /* Viet Nam */
+ CTRY_YEMEN = 887, /* Yemen */
+ CTRY_ZIMBABWE = 716, /* Zimbabwe */
+
+ /* NB: from here down not listed in 3166; they come from Atheros */
+ CTRY_DEBUG = 0x1ff, /* debug */
+ CTRY_DEFAULT = 0, /* default */
+
+ CTRY_UNITED_STATES_FCC49 = 842, /* United States (Public Safety)*/
+ CTRY_KOREA_ROC3 = 412, /* South Korea */
+
+ CTRY_JAPAN1 = 393, /* Japan (JP1) */
+ CTRY_JAPAN2 = 394, /* Japan (JP0) */
+ CTRY_JAPAN3 = 395, /* Japan (JP1-1) */
+ CTRY_JAPAN4 = 396, /* Japan (JE1) */
+ CTRY_JAPAN5 = 397, /* Japan (JE2) */
+ CTRY_JAPAN6 = 399, /* Japan (JP6) */
+ CTRY_JAPAN7 = 4007, /* Japan (J7) */
+ CTRY_JAPAN8 = 4008, /* Japan (J8) */
+ CTRY_JAPAN9 = 4009, /* Japan (J9) */
+ CTRY_JAPAN10 = 4010, /* Japan (J10) */
+ CTRY_JAPAN11 = 4011, /* Japan (J11) */
+ CTRY_JAPAN12 = 4012, /* Japan (J12) */
+ CTRY_JAPAN13 = 4013, /* Japan (J13) */
+ CTRY_JAPAN14 = 4014, /* Japan (J14) */
+ CTRY_JAPAN15 = 4015, /* Japan (J15) */
+ CTRY_JAPAN16 = 4016, /* Japan (J16) */
+ CTRY_JAPAN17 = 4017, /* Japan (J17) */
+ CTRY_JAPAN18 = 4018, /* Japan (J18) */
+ CTRY_JAPAN19 = 4019, /* Japan (J19) */
+ CTRY_JAPAN20 = 4020, /* Japan (J20) */
+ CTRY_JAPAN21 = 4021, /* Japan (J21) */
+ CTRY_JAPAN22 = 4022, /* Japan (J22) */
+ CTRY_JAPAN23 = 4023, /* Japan (J23) */
+ CTRY_JAPAN24 = 4024, /* Japan (J24) */
+};
+
+enum RegdomainCode {
+ SKU_FCC = 0x10, /* FCC, aka United States */
+ SKU_CA = 0x20, /* North America, aka Canada */
+ SKU_ETSI = 0x30, /* Europe */
+ SKU_ETSI2 = 0x32, /* Europe w/o HT40 in 5GHz */
+ SKU_ETSI3 = 0x33, /* Europe - channel 36 */
+ SKU_FCC3 = 0x3a, /* FCC w/5470 band, 11h, DFS */
+ SKU_JAPAN = 0x40,
+ SKU_KOREA = 0x45,
+ SKU_APAC = 0x50, /* Asia Pacific */
+ SKU_APAC2 = 0x51, /* Asia Pacific w/ DFS on mid-band */
+ SKU_APAC3 = 0x5d, /* Asia Pacific w/o ISM band */
+ SKU_ROW = 0x81, /* China/Taiwan/Rest of World */
+ SKU_NONE = 0xf0, /* "Region Free" */
+ SKU_DEBUG = 0x1ff,
+
+ /* NB: from here down private */
+ SKU_SR9 = 0x0298, /* Ubiquiti SR9 (900MHz/GSM) */
+ SKU_XR9 = 0x0299, /* Ubiquiti XR9 (900MHz/GSM) */
+ SKU_GZ901 = 0x029a, /* Zcomax GZ-901 (900MHz/GSM) */
+};
+
+#if defined(__KERNEL__) || defined(_KERNEL)
+struct ieee80211com;
+void ieee80211_regdomain_attach(struct ieee80211com *);
+void ieee80211_regdomain_detach(struct ieee80211com *);
+struct ieee80211vap;
+void ieee80211_regdomain_vattach(struct ieee80211vap *);
+void ieee80211_regdomain_vdetach(struct ieee80211vap *);
+
+struct ieee80211_regdomain;
+int ieee80211_init_channels(struct ieee80211com *,
+ const struct ieee80211_regdomain *, const uint8_t bands[]);
+struct ieee80211_channel;
+void ieee80211_sort_channels(struct ieee80211_channel *chans, int nchans);
+struct ieee80211_appie;
+struct ieee80211_appie *ieee80211_alloc_countryie(struct ieee80211com *);
+struct ieee80211_regdomain_req;
+int ieee80211_setregdomain(struct ieee80211vap *,
+ struct ieee80211_regdomain_req *);
+#endif /* defined(__KERNEL__) || defined(_KERNEL) */
+#endif /* _NET80211_IEEE80211_REGDOMAIN_HH_ */
diff --git a/rtems/freebsd/net80211/ieee80211_rssadapt.c b/rtems/freebsd/net80211/ieee80211_rssadapt.c
new file mode 100644
index 00000000..b725a12b
--- /dev/null
+++ b/rtems/freebsd/net80211/ieee80211_rssadapt.c
@@ -0,0 +1,351 @@
+#include <rtems/freebsd/machine/rtems-bsd-config.h>
+
+/* $FreeBSD$ */
+/* $NetBSD: ieee80211_rssadapt.c,v 1.9 2005/02/26 22:45:09 perry Exp $ */
+/*-
+ * Copyright (c) 2010 Rui Paulo <rpaulo@FreeBSD.org>
+ * Copyright (c) 2003, 2004 David Young. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+ * 3. The name of David Young may not be used to endorse or promote
+ * products derived from this software without specific prior
+ * written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY David Young ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
+ * PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL David
+ * Young BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
+ * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+ * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
+ * OF SUCH DAMAGE.
+ */
+#include <rtems/freebsd/local/opt_wlan.h>
+
+#include <rtems/freebsd/sys/param.h>
+#include <rtems/freebsd/sys/kernel.h>
+#include <rtems/freebsd/sys/module.h>
+#include <rtems/freebsd/sys/socket.h>
+#include <rtems/freebsd/sys/sysctl.h>
+
+#include <rtems/freebsd/net/if.h>
+#include <rtems/freebsd/net/if_media.h>
+
+#include <rtems/freebsd/net80211/ieee80211_var.h>
+#include <rtems/freebsd/net80211/ieee80211_rssadapt.h>
+#include <rtems/freebsd/net80211/ieee80211_ratectl.h>
+
+struct rssadapt_expavgctl {
+ /* RSS threshold decay. */
+ u_int rc_decay_denom;
+ u_int rc_decay_old;
+ /* RSS threshold update. */
+ u_int rc_thresh_denom;
+ u_int rc_thresh_old;
+ /* RSS average update. */
+ u_int rc_avgrssi_denom;
+ u_int rc_avgrssi_old;
+};
+
+static struct rssadapt_expavgctl master_expavgctl = {
+ .rc_decay_denom = 16,
+ .rc_decay_old = 15,
+ .rc_thresh_denom = 8,
+ .rc_thresh_old = 4,
+ .rc_avgrssi_denom = 8,
+ .rc_avgrssi_old = 4
+};
+
+#ifdef interpolate
+#undef interpolate
+#endif
+#define interpolate(parm, old, new) ((parm##_old * (old) + \
+ (parm##_denom - parm##_old) * (new)) / \
+ parm##_denom)
+
+static void rssadapt_setinterval(const struct ieee80211vap *, int);
+static void rssadapt_init(struct ieee80211vap *);
+static void rssadapt_deinit(struct ieee80211vap *);
+static void rssadapt_updatestats(struct ieee80211_rssadapt_node *);
+static void rssadapt_node_init(struct ieee80211_node *);
+static void rssadapt_node_deinit(struct ieee80211_node *);
+static int rssadapt_rate(struct ieee80211_node *, void *, uint32_t);
+static void rssadapt_lower_rate(struct ieee80211_rssadapt_node *, int, int);
+static void rssadapt_raise_rate(struct ieee80211_rssadapt_node *,
+ int, int);
+static void rssadapt_tx_complete(const struct ieee80211vap *,
+ const struct ieee80211_node *, int,
+ void *, void *);
+static void rssadapt_sysctlattach(struct ieee80211vap *,
+ struct sysctl_ctx_list *, struct sysctl_oid *);
+
+/* number of references from net80211 layer */
+static int nrefs = 0;
+
+static const struct ieee80211_ratectl rssadapt = {
+ .ir_name = "rssadapt",
+ .ir_attach = NULL,
+ .ir_detach = NULL,
+ .ir_init = rssadapt_init,
+ .ir_deinit = rssadapt_deinit,
+ .ir_node_init = rssadapt_node_init,
+ .ir_node_deinit = rssadapt_node_deinit,
+ .ir_rate = rssadapt_rate,
+ .ir_tx_complete = rssadapt_tx_complete,
+ .ir_tx_update = NULL,
+ .ir_setinterval = rssadapt_setinterval,
+};
+IEEE80211_RATECTL_MODULE(rssadapt, 1);
+IEEE80211_RATECTL_ALG(rssadapt, IEEE80211_RATECTL_RSSADAPT, rssadapt);
+
+static void
+rssadapt_setinterval(const struct ieee80211vap *vap, int msecs)
+{
+ struct ieee80211_rssadapt *rs = vap->iv_rs;
+ int t;
+
+ if (msecs < 100)
+ msecs = 100;
+ t = msecs_to_ticks(msecs);
+ rs->interval = (t < 1) ? 1 : t;
+}
+
+static void
+rssadapt_init(struct ieee80211vap *vap)
+{
+ struct ieee80211_rssadapt *rs;
+
+ KASSERT(vap->iv_rs == NULL, ("%s: iv_rs already initialized",
+ __func__));
+
+ vap->iv_rs = rs = malloc(sizeof(struct ieee80211_rssadapt),
+ M_80211_RATECTL, M_NOWAIT|M_ZERO);
+ if (rs == NULL) {
+ if_printf(vap->iv_ifp, "couldn't alloc ratectl structure\n");
+ return;
+ }
+ rs->vap = vap;
+ rssadapt_setinterval(vap, 500 /* msecs */);
+ rssadapt_sysctlattach(vap, vap->iv_sysctl, vap->iv_oid);
+}
+
+static void
+rssadapt_deinit(struct ieee80211vap *vap)
+{
+ free(vap->iv_rs, M_80211_RATECTL);
+}
+
+static void
+rssadapt_updatestats(struct ieee80211_rssadapt_node *ra)
+{
+ long interval;
+
+ ra->ra_pktrate = (ra->ra_pktrate + 10*(ra->ra_nfail + ra->ra_nok))/2;
+ ra->ra_nfail = ra->ra_nok = 0;
+
+ /*
+ * A node is eligible for its rate to be raised every 1/10 to 10
+ * seconds, more eligible in proportion to recent packet rates.
+ */
+ interval = MAX(10*1000, 10*1000 / MAX(1, 10 * ra->ra_pktrate));
+ ra->ra_raise_interval = msecs_to_ticks(interval);
+}
+
+static void
+rssadapt_node_init(struct ieee80211_node *ni)
+{
+ struct ieee80211_rssadapt_node *ra;
+ struct ieee80211vap *vap = ni->ni_vap;
+ struct ieee80211_rssadapt *rsa = vap->iv_rs;
+ const struct ieee80211_rateset *rs = &ni->ni_rates;
+
+ if (ni->ni_rctls == NULL) {
+ ni->ni_rctls = ra =
+ malloc(sizeof(struct ieee80211_rssadapt_node),
+ M_80211_RATECTL, M_NOWAIT|M_ZERO);
+ if (ra == NULL) {
+ if_printf(vap->iv_ifp, "couldn't alloc per-node ratectl "
+ "structure\n");
+ return;
+ }
+ } else
+ ra = ni->ni_rctls;
+ ra->ra_rs = rsa;
+ ra->ra_rates = *rs;
+ rssadapt_updatestats(ra);
+
+ /* pick initial rate */
+ for (ra->ra_rix = rs->rs_nrates - 1;
+ ra->ra_rix > 0 && (rs->rs_rates[ra->ra_rix] & IEEE80211_RATE_VAL) > 72;
+ ra->ra_rix--)
+ ;
+ ni->ni_txrate = rs->rs_rates[ra->ra_rix] & IEEE80211_RATE_VAL;
+ ra->ra_ticks = ticks;
+
+ IEEE80211_NOTE(ni->ni_vap, IEEE80211_MSG_RATECTL, ni,
+ "RSSADAPT initial rate %d", ni->ni_txrate);
+}
+
+static void
+rssadapt_node_deinit(struct ieee80211_node *ni)
+{
+
+ free(ni->ni_rctls, M_80211_RATECTL);
+}
+
+static __inline int
+bucket(int pktlen)
+{
+ int i, top, thridx;
+
+ for (i = 0, top = IEEE80211_RSSADAPT_BKT0;
+ i < IEEE80211_RSSADAPT_BKTS;
+ i++, top <<= IEEE80211_RSSADAPT_BKTPOWER) {
+ thridx = i;
+ if (pktlen <= top)
+ break;
+ }
+ return thridx;
+}
+
+static int
+rssadapt_rate(struct ieee80211_node *ni, void *arg __unused, uint32_t iarg)
+{
+ struct ieee80211_rssadapt_node *ra = ni->ni_rctls;
+ u_int pktlen = iarg;
+ const struct ieee80211_rateset *rs = &ra->ra_rates;
+ uint16_t (*thrs)[IEEE80211_RATE_SIZE];
+ int rix, rssi;
+
+ if ((ticks - ra->ra_ticks) > ra->ra_rs->interval) {
+ rssadapt_updatestats(ra);
+ ra->ra_ticks = ticks;
+ }
+
+ thrs = &ra->ra_rate_thresh[bucket(pktlen)];
+
+ /* XXX this is average rssi, should be using last value */
+ rssi = ni->ni_ic->ic_node_getrssi(ni);
+ for (rix = rs->rs_nrates-1; rix >= 0; rix--)
+ if ((*thrs)[rix] < (rssi << 8))
+ break;
+ if (rix != ra->ra_rix) {
+ /* update public rate */
+ ni->ni_txrate = ni->ni_rates.rs_rates[rix] & IEEE80211_RATE_VAL;
+ ra->ra_rix = rix;
+
+ IEEE80211_NOTE(ni->ni_vap, IEEE80211_MSG_RATECTL, ni,
+ "RSSADAPT new rate %d (pktlen %d rssi %d)",
+ ni->ni_txrate, pktlen, rssi);
+ }
+ return rix;
+}
+
+/*
+ * Adapt the data rate to suit the conditions. When a transmitted
+ * packet is dropped after RAL_RSSADAPT_RETRY_LIMIT retransmissions,
+ * raise the RSS threshold for transmitting packets of similar length at
+ * the same data rate.
+ */
+static void
+rssadapt_lower_rate(struct ieee80211_rssadapt_node *ra, int pktlen, int rssi)
+{
+ uint16_t last_thr;
+ uint16_t (*thrs)[IEEE80211_RATE_SIZE];
+ u_int rix;
+
+ thrs = &ra->ra_rate_thresh[bucket(pktlen)];
+
+ rix = ra->ra_rix;
+ last_thr = (*thrs)[rix];
+ (*thrs)[rix] = interpolate(master_expavgctl.rc_thresh,
+ last_thr, (rssi << 8));
+
+ IEEE80211_DPRINTF(ra->ra_rs->vap, IEEE80211_MSG_RATECTL,
+ "RSSADAPT lower threshold for rate %d (last_thr %d new thr %d rssi %d)\n",
+ ra->ra_rates.rs_rates[rix + 1] & IEEE80211_RATE_VAL,
+ last_thr, (*thrs)[rix], rssi);
+}
+
+static void
+rssadapt_raise_rate(struct ieee80211_rssadapt_node *ra, int pktlen, int rssi)
+{
+ uint16_t (*thrs)[IEEE80211_RATE_SIZE];
+ uint16_t newthr, oldthr;
+ int rix;
+
+ thrs = &ra->ra_rate_thresh[bucket(pktlen)];
+
+ rix = ra->ra_rix;
+ if ((*thrs)[rix + 1] > (*thrs)[rix]) {
+ oldthr = (*thrs)[rix + 1];
+ if ((*thrs)[rix] == 0)
+ newthr = (rssi << 8);
+ else
+ newthr = (*thrs)[rix];
+ (*thrs)[rix + 1] = interpolate(master_expavgctl.rc_decay,
+ oldthr, newthr);
+
+ IEEE80211_DPRINTF(ra->ra_rs->vap, IEEE80211_MSG_RATECTL,
+ "RSSADAPT raise threshold for rate %d (oldthr %d newthr %d rssi %d)\n",
+ ra->ra_rates.rs_rates[rix + 1] & IEEE80211_RATE_VAL,
+ oldthr, newthr, rssi);
+
+ ra->ra_last_raise = ticks;
+ }
+}
+
+static void
+rssadapt_tx_complete(const struct ieee80211vap *vap,
+ const struct ieee80211_node *ni, int success, void *arg1, void *arg2)
+{
+ struct ieee80211_rssadapt_node *ra = ni->ni_rctls;
+ int pktlen = *(int *)arg1, rssi = *(int *)arg2;
+
+ if (success) {
+ ra->ra_nok++;
+ if ((ra->ra_rix + 1) < ra->ra_rates.rs_nrates &&
+ (ticks - ra->ra_last_raise) >= ra->ra_raise_interval)
+ rssadapt_raise_rate(ra, pktlen, rssi);
+ } else {
+ ra->ra_nfail++;
+ rssadapt_lower_rate(ra, pktlen, rssi);
+ }
+}
+
+static int
+rssadapt_sysctl_interval(SYSCTL_HANDLER_ARGS)
+{
+ struct ieee80211vap *vap = arg1;
+ struct ieee80211_rssadapt *rs = vap->iv_rs;
+ int msecs = ticks_to_msecs(rs->interval);
+ int error;
+
+ error = sysctl_handle_int(oidp, &msecs, 0, req);
+ if (error || !req->newptr)
+ return error;
+ rssadapt_setinterval(vap, msecs);
+ return 0;
+}
+
+static void
+rssadapt_sysctlattach(struct ieee80211vap *vap,
+ struct sysctl_ctx_list *ctx, struct sysctl_oid *tree)
+{
+
+ SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
+ "rssadapt_rate_interval", CTLTYPE_INT | CTLFLAG_RW, vap,
+ 0, rssadapt_sysctl_interval, "I", "rssadapt operation interval (ms)");
+}
diff --git a/rtems/freebsd/net80211/ieee80211_rssadapt.h b/rtems/freebsd/net80211/ieee80211_rssadapt.h
new file mode 100644
index 00000000..26211ece
--- /dev/null
+++ b/rtems/freebsd/net80211/ieee80211_rssadapt.h
@@ -0,0 +1,71 @@
+/* $FreeBSD$ */
+/* $NetBSD: ieee80211_rssadapt.h,v 1.4 2005/02/26 22:45:09 perry Exp $ */
+/*-
+ * Copyright (c) 2003, 2004 David Young. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+ * 3. The name of David Young may not be used to endorse or promote
+ * products derived from this software without specific prior
+ * written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY David Young ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
+ * PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL David
+ * Young BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
+ * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+ * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
+ * OF SUCH DAMAGE.
+ */
+#ifndef _NET80211_IEEE80211_RSSADAPT_HH_
+#define _NET80211_IEEE80211_RSSADAPT_HH_
+
+/* Data-rate adaptation loosely based on "Link Adaptation Strategy
+ * for IEEE 802.11 WLAN via Received Signal Strength Measurement"
+ * by Javier del Prado Pavon and Sunghyun Choi.
+ */
+
+/* Buckets for frames 0-128 bytes long, 129-1024, 1025-maximum. */
+#define IEEE80211_RSSADAPT_BKTS 3
+#define IEEE80211_RSSADAPT_BKT0 128
+#define IEEE80211_RSSADAPT_BKTPOWER 3 /* 2**_BKTPOWER */
+
+struct ieee80211_rssadapt {
+ const struct ieee80211vap *vap;
+ int interval; /* update interval (ticks) */
+};
+
+struct ieee80211_rssadapt_node {
+ struct ieee80211_rssadapt *ra_rs; /* backpointer */
+ struct ieee80211_rateset ra_rates; /* negotiated rates */
+ int ra_rix; /* current rate index */
+ int ra_ticks; /* time of last update */
+ int ra_last_raise; /* time of last rate raise */
+ int ra_raise_interval; /* rate raise time threshold */
+
+ /* Tx failures in this update interval */
+ uint32_t ra_nfail;
+ /* Tx successes in this update interval */
+ uint32_t ra_nok;
+ /* exponential average packets/second */
+ uint32_t ra_pktrate;
+ /* RSSI threshold for each Tx rate */
+ uint16_t ra_rate_thresh[IEEE80211_RSSADAPT_BKTS]
+ [IEEE80211_RATE_SIZE];
+};
+
+#define IEEE80211_RSSADAPT_SUCCESS 1
+#define IEEE80211_RSSADAPT_FAILURE 0
+#endif /* _NET80211_IEEE80211_RSSADAPT_HH_ */
diff --git a/rtems/freebsd/net80211/ieee80211_scan.c b/rtems/freebsd/net80211/ieee80211_scan.c
new file mode 100644
index 00000000..3645c19f
--- /dev/null
+++ b/rtems/freebsd/net80211/ieee80211_scan.c
@@ -0,0 +1,1240 @@
+#include <rtems/freebsd/machine/rtems-bsd-config.h>
+
+/*-
+ * Copyright (c) 2002-2008 Sam Leffler, Errno Consulting
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <rtems/freebsd/sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+/*
+ * IEEE 802.11 scanning support.
+ */
+#include <rtems/freebsd/local/opt_wlan.h>
+
+#include <rtems/freebsd/sys/param.h>
+#include <rtems/freebsd/sys/systm.h>
+#include <rtems/freebsd/sys/proc.h>
+#include <rtems/freebsd/sys/kernel.h>
+#include <rtems/freebsd/sys/condvar.h>
+
+#include <rtems/freebsd/sys/socket.h>
+
+#include <rtems/freebsd/net/if.h>
+#include <rtems/freebsd/net/if_media.h>
+#include <rtems/freebsd/net/ethernet.h>
+
+#include <rtems/freebsd/net80211/ieee80211_var.h>
+
+#include <rtems/freebsd/net/bpf.h>
+
+struct scan_state {
+ struct ieee80211_scan_state base; /* public state */
+
+ u_int ss_iflags; /* flags used internally */
+#define ISCAN_MINDWELL 0x0001 /* min dwell time reached */
+#define ISCAN_DISCARD 0x0002 /* discard rx'd frames */
+#define ISCAN_CANCEL 0x0004 /* cancel current scan */
+#define ISCAN_ABORT 0x0008 /* end the scan immediately */
+ unsigned long ss_chanmindwell; /* min dwell on curchan */
+ unsigned long ss_scanend; /* time scan must stop */
+ u_int ss_duration; /* duration for next scan */
+ struct task ss_scan_task; /* scan execution */
+ struct cv ss_scan_cv; /* scan signal */
+ struct callout ss_scan_timer; /* scan timer */
+};
+#define SCAN_PRIVATE(ss) ((struct scan_state *) ss)
+
+/*
+ * Amount of time to go off-channel during a background
+ * scan. This value should be large enough to catch most
+ * ap's but short enough that we can return on-channel
+ * before our listen interval expires.
+ *
+ * XXX tunable
+ * XXX check against configured listen interval
+ */
+#define IEEE80211_SCAN_OFFCHANNEL msecs_to_ticks(150)
+
+/*
+ * Roaming-related defaults. RSSI thresholds are as returned by the
+ * driver (.5dBm). Transmit rate thresholds are IEEE rate codes (i.e
+ * .5M units) or MCS.
+ */
+/* rssi thresholds */
+#define ROAM_RSSI_11A_DEFAULT 14 /* 11a bss */
+#define ROAM_RSSI_11B_DEFAULT 14 /* 11b bss */
+#define ROAM_RSSI_11BONLY_DEFAULT 14 /* 11b-only bss */
+/* transmit rate thresholds */
+#define ROAM_RATE_11A_DEFAULT 2*12 /* 11a bss */
+#define ROAM_RATE_11B_DEFAULT 2*5 /* 11b bss */
+#define ROAM_RATE_11BONLY_DEFAULT 2*1 /* 11b-only bss */
+#define ROAM_RATE_HALF_DEFAULT 2*6 /* half-width 11a/g bss */
+#define ROAM_RATE_QUARTER_DEFAULT 2*3 /* quarter-width 11a/g bss */
+#define ROAM_MCS_11N_DEFAULT (1 | IEEE80211_RATE_MCS) /* 11n bss */
+
+static void scan_curchan(struct ieee80211_scan_state *, unsigned long);
+static void scan_mindwell(struct ieee80211_scan_state *);
+static void scan_signal(void *);
+static void scan_task(void *, int);
+
+MALLOC_DEFINE(M_80211_SCAN, "80211scan", "802.11 scan state");
+
+void
+ieee80211_scan_attach(struct ieee80211com *ic)
+{
+ struct scan_state *ss;
+
+ ss = (struct scan_state *) malloc(sizeof(struct scan_state),
+ M_80211_SCAN, M_NOWAIT | M_ZERO);
+ if (ss == NULL) {
+ ic->ic_scan = NULL;
+ return;
+ }
+ callout_init_mtx(&ss->ss_scan_timer, IEEE80211_LOCK_OBJ(ic), 0);
+ cv_init(&ss->ss_scan_cv, "scan");
+ TASK_INIT(&ss->ss_scan_task, 0, scan_task, ss);
+ ic->ic_scan = &ss->base;
+ ss->base.ss_ic = ic;
+
+ ic->ic_scan_curchan = scan_curchan;
+ ic->ic_scan_mindwell = scan_mindwell;
+}
+
+void
+ieee80211_scan_detach(struct ieee80211com *ic)
+{
+ struct ieee80211_scan_state *ss = ic->ic_scan;
+
+ if (ss != NULL) {
+ IEEE80211_LOCK(ic);
+ SCAN_PRIVATE(ss)->ss_iflags |= ISCAN_ABORT;
+ scan_signal(ss);
+ IEEE80211_UNLOCK(ic);
+ ieee80211_draintask(ic, &SCAN_PRIVATE(ss)->ss_scan_task);
+ callout_drain(&SCAN_PRIVATE(ss)->ss_scan_timer);
+ KASSERT((ic->ic_flags & IEEE80211_F_SCAN) == 0,
+ ("scan still running"));
+ if (ss->ss_ops != NULL) {
+ ss->ss_ops->scan_detach(ss);
+ ss->ss_ops = NULL;
+ }
+ ic->ic_scan = NULL;
+ free(SCAN_PRIVATE(ss), M_80211_SCAN);
+ }
+}
+
+static const struct ieee80211_roamparam defroam[IEEE80211_MODE_MAX] = {
+ [IEEE80211_MODE_11A] = { .rssi = ROAM_RSSI_11A_DEFAULT,
+ .rate = ROAM_RATE_11A_DEFAULT },
+ [IEEE80211_MODE_11G] = { .rssi = ROAM_RSSI_11B_DEFAULT,
+ .rate = ROAM_RATE_11B_DEFAULT },
+ [IEEE80211_MODE_11B] = { .rssi = ROAM_RSSI_11BONLY_DEFAULT,
+ .rate = ROAM_RATE_11BONLY_DEFAULT },
+ [IEEE80211_MODE_TURBO_A]= { .rssi = ROAM_RSSI_11A_DEFAULT,
+ .rate = ROAM_RATE_11A_DEFAULT },
+ [IEEE80211_MODE_TURBO_G]= { .rssi = ROAM_RSSI_11A_DEFAULT,
+ .rate = ROAM_RATE_11A_DEFAULT },
+ [IEEE80211_MODE_STURBO_A]={ .rssi = ROAM_RSSI_11A_DEFAULT,
+ .rate = ROAM_RATE_11A_DEFAULT },
+ [IEEE80211_MODE_HALF] = { .rssi = ROAM_RSSI_11A_DEFAULT,
+ .rate = ROAM_RATE_HALF_DEFAULT },
+ [IEEE80211_MODE_QUARTER]= { .rssi = ROAM_RSSI_11A_DEFAULT,
+ .rate = ROAM_RATE_QUARTER_DEFAULT },
+ [IEEE80211_MODE_11NA] = { .rssi = ROAM_RSSI_11A_DEFAULT,
+ .rate = ROAM_MCS_11N_DEFAULT },
+ [IEEE80211_MODE_11NG] = { .rssi = ROAM_RSSI_11B_DEFAULT,
+ .rate = ROAM_MCS_11N_DEFAULT },
+};
+
+void
+ieee80211_scan_vattach(struct ieee80211vap *vap)
+{
+ vap->iv_bgscanidle = (IEEE80211_BGSCAN_IDLE_DEFAULT*1000)/hz;
+ vap->iv_bgscanintvl = IEEE80211_BGSCAN_INTVAL_DEFAULT*hz;
+ vap->iv_scanvalid = IEEE80211_SCAN_VALID_DEFAULT*hz;
+
+ vap->iv_roaming = IEEE80211_ROAMING_AUTO;
+ memcpy(vap->iv_roamparms, defroam, sizeof(defroam));
+}
+
+void
+ieee80211_scan_vdetach(struct ieee80211vap *vap)
+{
+ struct ieee80211com *ic = vap->iv_ic;
+ struct ieee80211_scan_state *ss;
+
+ IEEE80211_LOCK(ic);
+ ss = ic->ic_scan;
+ if (ss != NULL && ss->ss_vap == vap) {
+ if (ic->ic_flags & IEEE80211_F_SCAN) {
+ SCAN_PRIVATE(ss)->ss_iflags |= ISCAN_ABORT;
+ scan_signal(ss);
+ }
+ if (ss->ss_ops != NULL) {
+ ss->ss_ops->scan_detach(ss);
+ ss->ss_ops = NULL;
+ }
+ ss->ss_vap = NULL;
+ }
+ IEEE80211_UNLOCK(ic);
+}
+
+/*
+ * Simple-minded scanner module support.
+ */
+static const char *scan_modnames[IEEE80211_OPMODE_MAX] = {
+ "wlan_scan_sta", /* IEEE80211_M_IBSS */
+ "wlan_scan_sta", /* IEEE80211_M_STA */
+ "wlan_scan_wds", /* IEEE80211_M_WDS */
+ "wlan_scan_sta", /* IEEE80211_M_AHDEMO */
+ "wlan_scan_ap", /* IEEE80211_M_HOSTAP */
+ "wlan_scan_monitor", /* IEEE80211_M_MONITOR */
+ "wlan_scan_sta", /* IEEE80211_M_MBSS */
+};
+static const struct ieee80211_scanner *scanners[IEEE80211_OPMODE_MAX];
+
+const struct ieee80211_scanner *
+ieee80211_scanner_get(enum ieee80211_opmode mode)
+{
+ if (mode >= IEEE80211_OPMODE_MAX)
+ return NULL;
+ if (scanners[mode] == NULL)
+ ieee80211_load_module(scan_modnames[mode]);
+ return scanners[mode];
+}
+
+void
+ieee80211_scanner_register(enum ieee80211_opmode mode,
+ const struct ieee80211_scanner *scan)
+{
+ if (mode >= IEEE80211_OPMODE_MAX)
+ return;
+ scanners[mode] = scan;
+}
+
+void
+ieee80211_scanner_unregister(enum ieee80211_opmode mode,
+ const struct ieee80211_scanner *scan)
+{
+ if (mode >= IEEE80211_OPMODE_MAX)
+ return;
+ if (scanners[mode] == scan)
+ scanners[mode] = NULL;
+}
+
+void
+ieee80211_scanner_unregister_all(const struct ieee80211_scanner *scan)
+{
+ int m;
+
+ for (m = 0; m < IEEE80211_OPMODE_MAX; m++)
+ if (scanners[m] == scan)
+ scanners[m] = NULL;
+}
+
+/*
+ * Update common scanner state to reflect the current
+ * operating mode. This is called when the state machine
+ * is transitioned to RUN state w/o scanning--e.g. when
+ * operating in monitor mode. The purpose of this is to
+ * ensure later callbacks find ss_ops set to properly
+ * reflect current operating mode.
+ */
+static void
+scan_update_locked(struct ieee80211vap *vap,
+ const struct ieee80211_scanner *scan)
+{
+ struct ieee80211com *ic = vap->iv_ic;
+ struct ieee80211_scan_state *ss = ic->ic_scan;
+
+ IEEE80211_LOCK_ASSERT(ic);
+
+#ifdef IEEE80211_DEBUG
+ if (ss->ss_vap != vap || ss->ss_ops != scan) {
+ IEEE80211_DPRINTF(vap, IEEE80211_MSG_SCAN,
+ "%s: current scanner is <%s:%s>, switch to <%s:%s>\n",
+ __func__,
+ ss->ss_vap != NULL ?
+ ss->ss_vap->iv_ifp->if_xname : "none",
+ ss->ss_vap != NULL ?
+ ieee80211_opmode_name[ss->ss_vap->iv_opmode] : "none",
+ vap->iv_ifp->if_xname,
+ ieee80211_opmode_name[vap->iv_opmode]);
+ }
+#endif
+ ss->ss_vap = vap;
+ if (ss->ss_ops != scan) {
+ /*
+ * Switch scanners; detach old, attach new. Special
+ * case where a single scan module implements multiple
+ * policies by using different scan ops but a common
+ * core. We assume if the old and new attach methods
+ * are identical then it's ok to just change ss_ops
+ * and not flush the internal state of the module.
+ */
+ if (scan == NULL || ss->ss_ops == NULL ||
+ ss->ss_ops->scan_attach != scan->scan_attach) {
+ if (ss->ss_ops != NULL)
+ ss->ss_ops->scan_detach(ss);
+ if (scan != NULL && !scan->scan_attach(ss)) {
+ /* XXX attach failure */
+ /* XXX stat+msg */
+ scan = NULL;
+ }
+ }
+ ss->ss_ops = scan;
+ }
+}
+
+static char
+channel_type(const struct ieee80211_channel *c)
+{
+ if (IEEE80211_IS_CHAN_ST(c))
+ return 'S';
+ if (IEEE80211_IS_CHAN_108A(c))
+ return 'T';
+ if (IEEE80211_IS_CHAN_108G(c))
+ return 'G';
+ if (IEEE80211_IS_CHAN_HT(c))
+ return 'n';
+ if (IEEE80211_IS_CHAN_A(c))
+ return 'a';
+ if (IEEE80211_IS_CHAN_ANYG(c))
+ return 'g';
+ if (IEEE80211_IS_CHAN_B(c))
+ return 'b';
+ return 'f';
+}
+
+void
+ieee80211_scan_dump_channels(const struct ieee80211_scan_state *ss)
+{
+ struct ieee80211com *ic = ss->ss_ic;
+ const char *sep;
+ int i;
+
+ sep = "";
+ for (i = ss->ss_next; i < ss->ss_last; i++) {
+ const struct ieee80211_channel *c = ss->ss_chans[i];
+
+ printf("%s%u%c", sep, ieee80211_chan2ieee(ic, c),
+ channel_type(c));
+ sep = ", ";
+ }
+}
+
+#ifdef IEEE80211_DEBUG
+static void
+scan_dump(struct ieee80211_scan_state *ss)
+{
+ struct ieee80211vap *vap = ss->ss_vap;
+
+ if_printf(vap->iv_ifp, "scan set ");
+ ieee80211_scan_dump_channels(ss);
+ printf(" dwell min %lums max %lums\n",
+ ticks_to_msecs(ss->ss_mindwell), ticks_to_msecs(ss->ss_maxdwell));
+}
+#endif /* IEEE80211_DEBUG */
+
+static void
+copy_ssid(struct ieee80211vap *vap, struct ieee80211_scan_state *ss,
+ int nssid, const struct ieee80211_scan_ssid ssids[])
+{
+ if (nssid > IEEE80211_SCAN_MAX_SSID) {
+ /* XXX printf */
+ IEEE80211_DPRINTF(vap, IEEE80211_MSG_SCAN,
+ "%s: too many ssid %d, ignoring all of them\n",
+ __func__, nssid);
+ return;
+ }
+ memcpy(ss->ss_ssid, ssids, nssid * sizeof(ssids[0]));
+ ss->ss_nssid = nssid;
+}
+
+/*
+ * Start a scan unless one is already going.
+ */
+static int
+start_scan_locked(const struct ieee80211_scanner *scan,
+ struct ieee80211vap *vap, int flags, u_int duration,
+ u_int mindwell, u_int maxdwell,
+ u_int nssid, const struct ieee80211_scan_ssid ssids[])
+{
+ struct ieee80211com *ic = vap->iv_ic;
+ struct ieee80211_scan_state *ss = ic->ic_scan;
+
+ IEEE80211_LOCK_ASSERT(ic);
+
+ if (ic->ic_flags & IEEE80211_F_CSAPENDING) {
+ IEEE80211_DPRINTF(vap, IEEE80211_MSG_SCAN,
+ "%s: scan inhibited by pending channel change\n", __func__);
+ } else if ((ic->ic_flags & IEEE80211_F_SCAN) == 0) {
+ IEEE80211_DPRINTF(vap, IEEE80211_MSG_SCAN,
+ "%s: %s scan, duration %u mindwell %u maxdwell %u, desired mode %s, %s%s%s%s%s%s\n"
+ , __func__
+ , flags & IEEE80211_SCAN_ACTIVE ? "active" : "passive"
+ , duration, mindwell, maxdwell
+ , ieee80211_phymode_name[vap->iv_des_mode]
+ , flags & IEEE80211_SCAN_FLUSH ? "flush" : "append"
+ , flags & IEEE80211_SCAN_NOPICK ? ", nopick" : ""
+ , flags & IEEE80211_SCAN_NOJOIN ? ", nojoin" : ""
+ , flags & IEEE80211_SCAN_NOBCAST ? ", nobcast" : ""
+ , flags & IEEE80211_SCAN_PICK1ST ? ", pick1st" : ""
+ , flags & IEEE80211_SCAN_ONCE ? ", once" : ""
+ );
+
+ scan_update_locked(vap, scan);
+ if (ss->ss_ops != NULL) {
+ if ((flags & IEEE80211_SCAN_NOSSID) == 0)
+ copy_ssid(vap, ss, nssid, ssids);
+
+ /* NB: top 4 bits for internal use */
+ ss->ss_flags = flags & 0xfff;
+ if (ss->ss_flags & IEEE80211_SCAN_ACTIVE)
+ vap->iv_stats.is_scan_active++;
+ else
+ vap->iv_stats.is_scan_passive++;
+ if (flags & IEEE80211_SCAN_FLUSH)
+ ss->ss_ops->scan_flush(ss);
+
+ /* NB: flush frames rx'd before 1st channel change */
+ SCAN_PRIVATE(ss)->ss_iflags |= ISCAN_DISCARD;
+ SCAN_PRIVATE(ss)->ss_duration = duration;
+ ss->ss_next = 0;
+ ss->ss_mindwell = mindwell;
+ ss->ss_maxdwell = maxdwell;
+ /* NB: scan_start must be before the scan runtask */
+ ss->ss_ops->scan_start(ss, vap);
+#ifdef IEEE80211_DEBUG
+ if (ieee80211_msg_scan(vap))
+ scan_dump(ss);
+#endif /* IEEE80211_DEBUG */
+ ic->ic_flags |= IEEE80211_F_SCAN;
+ ieee80211_runtask(ic, &SCAN_PRIVATE(ss)->ss_scan_task);
+ }
+ } else {
+ IEEE80211_DPRINTF(vap, IEEE80211_MSG_SCAN,
+ "%s: %s scan already in progress\n", __func__,
+ ss->ss_flags & IEEE80211_SCAN_ACTIVE ? "active" : "passive");
+ }
+ return (ic->ic_flags & IEEE80211_F_SCAN);
+}
+
+/*
+ * Start a scan unless one is already going.
+ */
+int
+ieee80211_start_scan(struct ieee80211vap *vap, int flags,
+ u_int duration, u_int mindwell, u_int maxdwell,
+ u_int nssid, const struct ieee80211_scan_ssid ssids[])
+{
+ struct ieee80211com *ic = vap->iv_ic;
+ const struct ieee80211_scanner *scan;
+ int result;
+
+ scan = ieee80211_scanner_get(vap->iv_opmode);
+ if (scan == NULL) {
+ IEEE80211_DPRINTF(vap, IEEE80211_MSG_SCAN,
+ "%s: no scanner support for %s mode\n",
+ __func__, ieee80211_opmode_name[vap->iv_opmode]);
+ /* XXX stat */
+ return 0;
+ }
+
+ IEEE80211_LOCK(ic);
+ result = start_scan_locked(scan, vap, flags, duration,
+ mindwell, maxdwell, nssid, ssids);
+ IEEE80211_UNLOCK(ic);
+
+ return result;
+}
+
+/*
+ * Check the scan cache for an ap/channel to use; if that
+ * fails then kick off a new scan.
+ */
+int
+ieee80211_check_scan(struct ieee80211vap *vap, int flags,
+ u_int duration, u_int mindwell, u_int maxdwell,
+ u_int nssid, const struct ieee80211_scan_ssid ssids[])
+{
+ struct ieee80211com *ic = vap->iv_ic;
+ struct ieee80211_scan_state *ss = ic->ic_scan;
+ const struct ieee80211_scanner *scan;
+ int result;
+
+ scan = ieee80211_scanner_get(vap->iv_opmode);
+ if (scan == NULL) {
+ IEEE80211_DPRINTF(vap, IEEE80211_MSG_SCAN,
+ "%s: no scanner support for %s mode\n",
+ __func__, vap->iv_opmode);
+ /* XXX stat */
+ return 0;
+ }
+
+ /*
+ * Check if there's a list of scan candidates already.
+ * XXX want more than the ap we're currently associated with
+ */
+
+ IEEE80211_LOCK(ic);
+ IEEE80211_DPRINTF(vap, IEEE80211_MSG_SCAN,
+ "%s: %s scan, %s%s%s%s%s\n"
+ , __func__
+ , flags & IEEE80211_SCAN_ACTIVE ? "active" : "passive"
+ , flags & IEEE80211_SCAN_FLUSH ? "flush" : "append"
+ , flags & IEEE80211_SCAN_NOPICK ? ", nopick" : ""
+ , flags & IEEE80211_SCAN_NOJOIN ? ", nojoin" : ""
+ , flags & IEEE80211_SCAN_PICK1ST ? ", pick1st" : ""
+ , flags & IEEE80211_SCAN_ONCE ? ", once" : ""
+ );
+
+ if (ss->ss_ops != scan) {
+ /* XXX re-use cache contents? e.g. adhoc<->sta */
+ flags |= IEEE80211_SCAN_FLUSH;
+ }
+ scan_update_locked(vap, scan);
+ if (ss->ss_ops != NULL) {
+ /* XXX verify ss_ops matches vap->iv_opmode */
+ if ((flags & IEEE80211_SCAN_NOSSID) == 0) {
+ /*
+ * Update the ssid list and mark flags so if
+ * we call start_scan it doesn't duplicate work.
+ */
+ copy_ssid(vap, ss, nssid, ssids);
+ flags |= IEEE80211_SCAN_NOSSID;
+ }
+ if ((ic->ic_flags & IEEE80211_F_SCAN) == 0 &&
+ (flags & IEEE80211_SCAN_FLUSH) == 0 &&
+ time_before(ticks, ic->ic_lastscan + vap->iv_scanvalid)) {
+ /*
+ * We're not currently scanning and the cache is
+ * deemed hot enough to consult. Lock out others
+ * by marking IEEE80211_F_SCAN while we decide if
+ * something is already in the scan cache we can
+ * use. Also discard any frames that might come
+ * in while temporarily marked as scanning.
+ */
+ SCAN_PRIVATE(ss)->ss_iflags |= ISCAN_DISCARD;
+ ic->ic_flags |= IEEE80211_F_SCAN;
+
+ /* NB: need to use supplied flags in check */
+ ss->ss_flags = flags & 0xff;
+ result = ss->ss_ops->scan_end(ss, vap);
+
+ ic->ic_flags &= ~IEEE80211_F_SCAN;
+ SCAN_PRIVATE(ss)->ss_iflags &= ~ISCAN_DISCARD;
+ if (result) {
+ ieee80211_notify_scan_done(vap);
+ IEEE80211_UNLOCK(ic);
+ return 1;
+ }
+ }
+ }
+ result = start_scan_locked(scan, vap, flags, duration,
+ mindwell, maxdwell, nssid, ssids);
+ IEEE80211_UNLOCK(ic);
+
+ return result;
+}
+
+/*
+ * Check the scan cache for an ap/channel to use; if that fails
+ * then kick off a scan using the current settings.
+ */
+int
+ieee80211_check_scan_current(struct ieee80211vap *vap)
+{
+ return ieee80211_check_scan(vap,
+ IEEE80211_SCAN_ACTIVE,
+ IEEE80211_SCAN_FOREVER, 0, 0,
+ vap->iv_des_nssid, vap->iv_des_ssid);
+}
+
+/*
+ * Restart a previous scan. If the previous scan completed
+ * then we start again using the existing channel list.
+ */
+int
+ieee80211_bg_scan(struct ieee80211vap *vap, int flags)
+{
+ struct ieee80211com *ic = vap->iv_ic;
+ struct ieee80211_scan_state *ss = ic->ic_scan;
+ const struct ieee80211_scanner *scan;
+
+ scan = ieee80211_scanner_get(vap->iv_opmode);
+ if (scan == NULL) {
+ IEEE80211_DPRINTF(vap, IEEE80211_MSG_SCAN,
+ "%s: no scanner support for %s mode\n",
+ __func__, vap->iv_opmode);
+ /* XXX stat */
+ return 0;
+ }
+
+ IEEE80211_LOCK(ic);
+ if ((ic->ic_flags & IEEE80211_F_SCAN) == 0) {
+ u_int duration;
+ /*
+ * Go off-channel for a fixed interval that is large
+ * enough to catch most ap's but short enough that
+ * we can return on-channel before our listen interval
+ * expires.
+ */
+ duration = IEEE80211_SCAN_OFFCHANNEL;
+
+ IEEE80211_DPRINTF(vap, IEEE80211_MSG_SCAN,
+ "%s: %s scan, ticks %u duration %lu\n", __func__,
+ ss->ss_flags & IEEE80211_SCAN_ACTIVE ? "active" : "passive",
+ ticks, duration);
+
+ scan_update_locked(vap, scan);
+ if (ss->ss_ops != NULL) {
+ ss->ss_vap = vap;
+ /*
+ * A background scan does not select a new sta; it
+ * just refreshes the scan cache. Also, indicate
+ * the scan logic should follow the beacon schedule:
+ * we go off-channel and scan for a while, then
+ * return to the bss channel to receive a beacon,
+ * then go off-channel again. All during this time
+ * we notify the ap we're in power save mode. When
+ * the scan is complete we leave power save mode.
+ * If any beacon indicates there are frames pending
+ * for us then we drop out of power save mode
+ * (and background scan) automatically by way of the
+ * usual sta power save logic.
+ */
+ ss->ss_flags |= IEEE80211_SCAN_NOPICK
+ | IEEE80211_SCAN_BGSCAN
+ | flags
+ ;
+ /* if previous scan completed, restart */
+ if (ss->ss_next >= ss->ss_last) {
+ if (ss->ss_flags & IEEE80211_SCAN_ACTIVE)
+ vap->iv_stats.is_scan_active++;
+ else
+ vap->iv_stats.is_scan_passive++;
+ /*
+ * NB: beware of the scan cache being flushed;
+ * if the channel list is empty use the
+ * scan_start method to populate it.
+ */
+ ss->ss_next = 0;
+ if (ss->ss_last != 0)
+ ss->ss_ops->scan_restart(ss, vap);
+ else {
+ ss->ss_ops->scan_start(ss, vap);
+#ifdef IEEE80211_DEBUG
+ if (ieee80211_msg_scan(vap))
+ scan_dump(ss);
+#endif /* IEEE80211_DEBUG */
+ }
+ }
+ /* NB: flush frames rx'd before 1st channel change */
+ SCAN_PRIVATE(ss)->ss_iflags |= ISCAN_DISCARD;
+ SCAN_PRIVATE(ss)->ss_duration = duration;
+ ss->ss_maxdwell = duration;
+ ic->ic_flags |= IEEE80211_F_SCAN;
+ ic->ic_flags_ext |= IEEE80211_FEXT_BGSCAN;
+ ieee80211_runtask(ic, &SCAN_PRIVATE(ss)->ss_scan_task);
+ } else {
+ /* XXX msg+stat */
+ }
+ } else {
+ IEEE80211_DPRINTF(vap, IEEE80211_MSG_SCAN,
+ "%s: %s scan already in progress\n", __func__,
+ ss->ss_flags & IEEE80211_SCAN_ACTIVE ? "active" : "passive");
+ }
+ IEEE80211_UNLOCK(ic);
+
+ /* NB: racey, does it matter? */
+ return (ic->ic_flags & IEEE80211_F_SCAN);
+}
+
+/*
+ * Cancel any scan currently going on for the specified vap.
+ */
+void
+ieee80211_cancel_scan(struct ieee80211vap *vap)
+{
+ struct ieee80211com *ic = vap->iv_ic;
+ struct ieee80211_scan_state *ss = ic->ic_scan;
+
+ IEEE80211_LOCK(ic);
+ if ((ic->ic_flags & IEEE80211_F_SCAN) &&
+ ss->ss_vap == vap &&
+ (SCAN_PRIVATE(ss)->ss_iflags & ISCAN_CANCEL) == 0) {
+ IEEE80211_DPRINTF(vap, IEEE80211_MSG_SCAN,
+ "%s: cancel %s scan\n", __func__,
+ ss->ss_flags & IEEE80211_SCAN_ACTIVE ?
+ "active" : "passive");
+
+ /* clear bg scan NOPICK and mark cancel request */
+ ss->ss_flags &= ~IEEE80211_SCAN_NOPICK;
+ SCAN_PRIVATE(ss)->ss_iflags |= ISCAN_CANCEL;
+ /* wake up the scan task */
+ scan_signal(ss);
+ }
+ IEEE80211_UNLOCK(ic);
+}
+
+/*
+ * Cancel any scan currently going on.
+ */
+void
+ieee80211_cancel_anyscan(struct ieee80211vap *vap)
+{
+ struct ieee80211com *ic = vap->iv_ic;
+ struct ieee80211_scan_state *ss = ic->ic_scan;
+
+ IEEE80211_LOCK(ic);
+ if ((ic->ic_flags & IEEE80211_F_SCAN) &&
+ (SCAN_PRIVATE(ss)->ss_iflags & ISCAN_CANCEL) == 0) {
+ IEEE80211_DPRINTF(vap, IEEE80211_MSG_SCAN,
+ "%s: cancel %s scan\n", __func__,
+ ss->ss_flags & IEEE80211_SCAN_ACTIVE ?
+ "active" : "passive");
+
+ /* clear bg scan NOPICK and mark cancel request */
+ ss->ss_flags &= ~IEEE80211_SCAN_NOPICK;
+ SCAN_PRIVATE(ss)->ss_iflags |= ISCAN_CANCEL;
+ /* wake up the scan task */
+ scan_signal(ss);
+ }
+ IEEE80211_UNLOCK(ic);
+}
+
+/*
+ * Public access to scan_next for drivers that manage
+ * scanning themselves (e.g. for firmware-based devices).
+ */
+void
+ieee80211_scan_next(struct ieee80211vap *vap)
+{
+ struct ieee80211com *ic = vap->iv_ic;
+ struct ieee80211_scan_state *ss = ic->ic_scan;
+
+ /* wake up the scan task */
+ IEEE80211_LOCK(ic);
+ scan_signal(ss);
+ IEEE80211_UNLOCK(ic);
+}
+
+/*
+ * Public access to scan_next for drivers that are not able to scan single
+ * channels (e.g. for firmware-based devices).
+ */
+void
+ieee80211_scan_done(struct ieee80211vap *vap)
+{
+ struct ieee80211com *ic = vap->iv_ic;
+ struct ieee80211_scan_state *ss;
+
+ IEEE80211_LOCK(ic);
+ ss = ic->ic_scan;
+ ss->ss_next = ss->ss_last; /* all channels are complete */
+ scan_signal(ss);
+ IEEE80211_UNLOCK(ic);
+}
+
+/*
+ * Probe the curent channel, if allowed, while scanning.
+ * If the channel is not marked passive-only then send
+ * a probe request immediately. Otherwise mark state and
+ * listen for beacons on the channel; if we receive something
+ * then we'll transmit a probe request.
+ */
+void
+ieee80211_probe_curchan(struct ieee80211vap *vap, int force)
+{
+ struct ieee80211com *ic = vap->iv_ic;
+ struct ieee80211_scan_state *ss = ic->ic_scan;
+ struct ifnet *ifp = vap->iv_ifp;
+ int i;
+
+ if ((ic->ic_curchan->ic_flags & IEEE80211_CHAN_PASSIVE) && !force) {
+ ic->ic_flags_ext |= IEEE80211_FEXT_PROBECHAN;
+ return;
+ }
+ /*
+ * Send directed probe requests followed by any
+ * broadcast probe request.
+ * XXX remove dependence on ic/vap->iv_bss
+ */
+ for (i = 0; i < ss->ss_nssid; i++)
+ ieee80211_send_probereq(vap->iv_bss,
+ vap->iv_myaddr, ifp->if_broadcastaddr,
+ ifp->if_broadcastaddr,
+ ss->ss_ssid[i].ssid, ss->ss_ssid[i].len);
+ if ((ss->ss_flags & IEEE80211_SCAN_NOBCAST) == 0)
+ ieee80211_send_probereq(vap->iv_bss,
+ vap->iv_myaddr, ifp->if_broadcastaddr,
+ ifp->if_broadcastaddr,
+ "", 0);
+}
+
+/*
+ * Scan curchan. If this is an active scan and the channel
+ * is not marked passive then send probe request frame(s).
+ * Arrange for the channel change after maxdwell ticks.
+ */
+static void
+scan_curchan(struct ieee80211_scan_state *ss, unsigned long maxdwell)
+{
+ struct ieee80211vap *vap = ss->ss_vap;
+
+ IEEE80211_LOCK(vap->iv_ic);
+ if (ss->ss_flags & IEEE80211_SCAN_ACTIVE)
+ ieee80211_probe_curchan(vap, 0);
+ callout_reset(&SCAN_PRIVATE(ss)->ss_scan_timer,
+ maxdwell, scan_signal, ss);
+ IEEE80211_UNLOCK(vap->iv_ic);
+}
+
+static void
+scan_signal(void *arg)
+{
+ struct ieee80211_scan_state *ss = (struct ieee80211_scan_state *) arg;
+
+ IEEE80211_LOCK_ASSERT(ss->ss_ic);
+
+ cv_signal(&SCAN_PRIVATE(ss)->ss_scan_cv);
+}
+
+/*
+ * Handle mindwell requirements completed; initiate a channel
+ * change to the next channel asap.
+ */
+static void
+scan_mindwell(struct ieee80211_scan_state *ss)
+{
+ struct ieee80211com *ic = ss->ss_ic;
+
+ IEEE80211_LOCK(ic);
+ scan_signal(ss);
+ IEEE80211_UNLOCK(ic);
+}
+
+static void
+scan_task(void *arg, int pending)
+{
+#define ISCAN_REP (ISCAN_MINDWELL | ISCAN_DISCARD)
+ struct ieee80211_scan_state *ss = (struct ieee80211_scan_state *) arg;
+ struct ieee80211vap *vap = ss->ss_vap;
+ struct ieee80211com *ic = ss->ss_ic;
+ struct ieee80211_channel *chan;
+ unsigned long maxdwell, scanend;
+ int scandone = 0;
+
+ IEEE80211_LOCK(ic);
+ if (vap == NULL || (ic->ic_flags & IEEE80211_F_SCAN) == 0 ||
+ (SCAN_PRIVATE(ss)->ss_iflags & ISCAN_ABORT)) {
+ /* Cancelled before we started */
+ goto done;
+ }
+
+ if (ss->ss_next == ss->ss_last) {
+ IEEE80211_DPRINTF(vap, IEEE80211_MSG_SCAN,
+ "%s: no channels to scan\n", __func__);
+ goto done;
+ }
+
+ if (vap->iv_opmode == IEEE80211_M_STA &&
+ vap->iv_state == IEEE80211_S_RUN) {
+ if ((vap->iv_bss->ni_flags & IEEE80211_NODE_PWR_MGT) == 0) {
+ /* Enable station power save mode */
+ ieee80211_sta_pwrsave(vap, 1);
+ /*
+ * Use an 1ms delay so the null data frame has a chance
+ * to go out.
+ * XXX Should use M_TXCB mechanism to eliminate this.
+ */
+ cv_timedwait(&SCAN_PRIVATE(ss)->ss_scan_cv,
+ IEEE80211_LOCK_OBJ(ic), hz / 1000);
+ if (SCAN_PRIVATE(ss)->ss_iflags & ISCAN_ABORT)
+ goto done;
+ }
+ }
+
+ scanend = ticks + SCAN_PRIVATE(ss)->ss_duration;
+ IEEE80211_UNLOCK(ic);
+ ic->ic_scan_start(ic); /* notify driver */
+ IEEE80211_LOCK(ic);
+
+ for (;;) {
+ scandone = (ss->ss_next >= ss->ss_last) ||
+ (SCAN_PRIVATE(ss)->ss_iflags & ISCAN_CANCEL) != 0;
+ if (scandone || (ss->ss_flags & IEEE80211_SCAN_GOTPICK) ||
+ (SCAN_PRIVATE(ss)->ss_iflags & ISCAN_ABORT) ||
+ time_after(ticks + ss->ss_mindwell, scanend))
+ break;
+
+ chan = ss->ss_chans[ss->ss_next++];
+
+ /*
+ * Watch for truncation due to the scan end time.
+ */
+ if (time_after(ticks + ss->ss_maxdwell, scanend))
+ maxdwell = scanend - ticks;
+ else
+ maxdwell = ss->ss_maxdwell;
+
+ IEEE80211_DPRINTF(vap, IEEE80211_MSG_SCAN,
+ "%s: chan %3d%c -> %3d%c [%s, dwell min %lums max %lums]\n",
+ __func__,
+ ieee80211_chan2ieee(ic, ic->ic_curchan),
+ channel_type(ic->ic_curchan),
+ ieee80211_chan2ieee(ic, chan), channel_type(chan),
+ (ss->ss_flags & IEEE80211_SCAN_ACTIVE) &&
+ (chan->ic_flags & IEEE80211_CHAN_PASSIVE) == 0 ?
+ "active" : "passive",
+ ticks_to_msecs(ss->ss_mindwell), ticks_to_msecs(maxdwell));
+
+ /*
+ * Potentially change channel and phy mode.
+ */
+ ic->ic_curchan = chan;
+ ic->ic_rt = ieee80211_get_ratetable(chan);
+ IEEE80211_UNLOCK(ic);
+ /*
+ * Perform the channel change and scan unlocked so the driver
+ * may sleep. Once set_channel returns the hardware has
+ * completed the channel change.
+ */
+ ic->ic_set_channel(ic);
+ ieee80211_radiotap_chan_change(ic);
+
+ /*
+ * Scan curchan. Drivers for "intelligent hardware"
+ * override ic_scan_curchan to tell the device to do
+ * the work. Otherwise we manage the work outselves;
+ * sending a probe request (as needed), and arming the
+ * timeout to switch channels after maxdwell ticks.
+ *
+ * scan_curchan should only pause for the time required to
+ * prepare/initiate the hardware for the scan (if at all), the
+ * below condvar is used to sleep for the channels dwell time
+ * and allows it to be signalled for abort.
+ */
+ ic->ic_scan_curchan(ss, maxdwell);
+ IEEE80211_LOCK(ic);
+
+ SCAN_PRIVATE(ss)->ss_chanmindwell = ticks + ss->ss_mindwell;
+ /* clear mindwell lock and initial channel change flush */
+ SCAN_PRIVATE(ss)->ss_iflags &= ~ISCAN_REP;
+
+ if ((SCAN_PRIVATE(ss)->ss_iflags & (ISCAN_CANCEL|ISCAN_ABORT)))
+ continue;
+
+ /* Wait to be signalled to scan the next channel */
+ cv_wait(&SCAN_PRIVATE(ss)->ss_scan_cv, IEEE80211_LOCK_OBJ(ic));
+ }
+ if (SCAN_PRIVATE(ss)->ss_iflags & ISCAN_ABORT)
+ goto done;
+
+ IEEE80211_UNLOCK(ic);
+ ic->ic_scan_end(ic); /* notify driver */
+ IEEE80211_LOCK(ic);
+
+ /*
+ * Record scan complete time. Note that we also do
+ * this when canceled so any background scan will
+ * not be restarted for a while.
+ */
+ if (scandone)
+ ic->ic_lastscan = ticks;
+ /* return to the bss channel */
+ if (ic->ic_bsschan != IEEE80211_CHAN_ANYC &&
+ ic->ic_curchan != ic->ic_bsschan) {
+ ieee80211_setupcurchan(ic, ic->ic_bsschan);
+ IEEE80211_UNLOCK(ic);
+ ic->ic_set_channel(ic);
+ ieee80211_radiotap_chan_change(ic);
+ IEEE80211_LOCK(ic);
+ }
+ /* clear internal flags and any indication of a pick */
+ SCAN_PRIVATE(ss)->ss_iflags &= ~ISCAN_REP;
+ ss->ss_flags &= ~IEEE80211_SCAN_GOTPICK;
+
+ /*
+ * If not canceled and scan completed, do post-processing.
+ * If the callback function returns 0, then it wants to
+ * continue/restart scanning. Unfortunately we needed to
+ * notify the driver to end the scan above to avoid having
+ * rx frames alter the scan candidate list.
+ */
+ if ((SCAN_PRIVATE(ss)->ss_iflags & ISCAN_CANCEL) == 0 &&
+ !ss->ss_ops->scan_end(ss, vap) &&
+ (ss->ss_flags & IEEE80211_SCAN_ONCE) == 0 &&
+ time_before(ticks + ss->ss_mindwell, scanend)) {
+ IEEE80211_DPRINTF(vap, IEEE80211_MSG_SCAN,
+ "%s: done, restart "
+ "[ticks %u, dwell min %lu scanend %lu]\n",
+ __func__,
+ ticks, ss->ss_mindwell, scanend);
+ ss->ss_next = 0; /* reset to begining */
+ if (ss->ss_flags & IEEE80211_SCAN_ACTIVE)
+ vap->iv_stats.is_scan_active++;
+ else
+ vap->iv_stats.is_scan_passive++;
+
+ ss->ss_ops->scan_restart(ss, vap); /* XXX? */
+ ieee80211_runtask(ic, &SCAN_PRIVATE(ss)->ss_scan_task);
+ IEEE80211_UNLOCK(ic);
+ return;
+ }
+
+ /* past here, scandone is ``true'' if not in bg mode */
+ if ((ss->ss_flags & IEEE80211_SCAN_BGSCAN) == 0)
+ scandone = 1;
+
+ IEEE80211_DPRINTF(vap, IEEE80211_MSG_SCAN,
+ "%s: %s, [ticks %u, dwell min %lu scanend %lu]\n",
+ __func__, scandone ? "done" : "stopped",
+ ticks, ss->ss_mindwell, scanend);
+
+ /*
+ * Clear the SCAN bit first in case frames are
+ * pending on the station power save queue. If
+ * we defer this then the dispatch of the frames
+ * may generate a request to cancel scanning.
+ */
+done:
+ ic->ic_flags &= ~IEEE80211_F_SCAN;
+ /*
+ * Drop out of power save mode when a scan has
+ * completed. If this scan was prematurely terminated
+ * because it is a background scan then don't notify
+ * the ap; we'll either return to scanning after we
+ * receive the beacon frame or we'll drop out of power
+ * save mode because the beacon indicates we have frames
+ * waiting for us.
+ */
+ if (scandone) {
+ ieee80211_sta_pwrsave(vap, 0);
+ if (ss->ss_next >= ss->ss_last) {
+ ieee80211_notify_scan_done(vap);
+ ic->ic_flags_ext &= ~IEEE80211_FEXT_BGSCAN;
+ }
+ }
+ SCAN_PRIVATE(ss)->ss_iflags &= ~(ISCAN_CANCEL|ISCAN_ABORT);
+ ss->ss_flags &= ~(IEEE80211_SCAN_ONCE | IEEE80211_SCAN_PICK1ST);
+ IEEE80211_UNLOCK(ic);
+#undef ISCAN_REP
+}
+
+#ifdef IEEE80211_DEBUG
+static void
+dump_country(const uint8_t *ie)
+{
+ const struct ieee80211_country_ie *cie =
+ (const struct ieee80211_country_ie *) ie;
+ int i, nbands, schan, nchan;
+
+ if (cie->len < 3) {
+ printf(" <bogus country ie, len %d>", cie->len);
+ return;
+ }
+ printf(" country [%c%c%c", cie->cc[0], cie->cc[1], cie->cc[2]);
+ nbands = (cie->len - 3) / sizeof(cie->band[0]);
+ for (i = 0; i < nbands; i++) {
+ schan = cie->band[i].schan;
+ nchan = cie->band[i].nchan;
+ if (nchan != 1)
+ printf(" %u-%u,%u", schan, schan + nchan-1,
+ cie->band[i].maxtxpwr);
+ else
+ printf(" %u,%u", schan, cie->band[i].maxtxpwr);
+ }
+ printf("]");
+}
+
+static void
+dump_probe_beacon(uint8_t subtype, int isnew,
+ const uint8_t mac[IEEE80211_ADDR_LEN],
+ const struct ieee80211_scanparams *sp, int rssi)
+{
+
+ printf("[%s] %s%s on chan %u (bss chan %u) ",
+ ether_sprintf(mac), isnew ? "new " : "",
+ ieee80211_mgt_subtype_name[subtype >> IEEE80211_FC0_SUBTYPE_SHIFT],
+ sp->chan, sp->bchan);
+ ieee80211_print_essid(sp->ssid + 2, sp->ssid[1]);
+ printf(" rssi %d\n", rssi);
+
+ if (isnew) {
+ printf("[%s] caps 0x%x bintval %u erp 0x%x",
+ ether_sprintf(mac), sp->capinfo, sp->bintval, sp->erp);
+ if (sp->country != NULL)
+ dump_country(sp->country);
+ printf("\n");
+ }
+}
+#endif /* IEEE80211_DEBUG */
+
+/*
+ * Process a beacon or probe response frame.
+ */
+void
+ieee80211_add_scan(struct ieee80211vap *vap,
+ const struct ieee80211_scanparams *sp,
+ const struct ieee80211_frame *wh,
+ int subtype, int rssi, int noise)
+{
+ struct ieee80211com *ic = vap->iv_ic;
+ struct ieee80211_scan_state *ss = ic->ic_scan;
+
+ /* XXX locking */
+ /*
+ * Frames received during startup are discarded to avoid
+ * using scan state setup on the initial entry to the timer
+ * callback. This can occur because the device may enable
+ * rx prior to our doing the initial channel change in the
+ * timer routine.
+ */
+ if (SCAN_PRIVATE(ss)->ss_iflags & ISCAN_DISCARD)
+ return;
+#ifdef IEEE80211_DEBUG
+ if (ieee80211_msg_scan(vap) && (ic->ic_flags & IEEE80211_F_SCAN))
+ dump_probe_beacon(subtype, 1, wh->i_addr2, sp, rssi);
+#endif
+ if (ss->ss_ops != NULL &&
+ ss->ss_ops->scan_add(ss, sp, wh, subtype, rssi, noise)) {
+ /*
+ * If we've reached the min dwell time terminate
+ * the timer so we'll switch to the next channel.
+ */
+ if ((SCAN_PRIVATE(ss)->ss_iflags & ISCAN_MINDWELL) == 0 &&
+ time_after_eq(ticks, SCAN_PRIVATE(ss)->ss_chanmindwell)) {
+ IEEE80211_DPRINTF(vap, IEEE80211_MSG_SCAN,
+ "%s: chan %3d%c min dwell met (%u > %lu)\n",
+ __func__,
+ ieee80211_chan2ieee(ic, ic->ic_curchan),
+ channel_type(ic->ic_curchan),
+ ticks, SCAN_PRIVATE(ss)->ss_chanmindwell);
+ SCAN_PRIVATE(ss)->ss_iflags |= ISCAN_MINDWELL;
+ /*
+ * NB: trigger at next clock tick or wait for the
+ * hardware.
+ */
+ ic->ic_scan_mindwell(ss);
+ }
+ }
+}
+
+/*
+ * Timeout/age scan cache entries; called from sta timeout
+ * timer (XXX should be self-contained).
+ */
+void
+ieee80211_scan_timeout(struct ieee80211com *ic)
+{
+ struct ieee80211_scan_state *ss = ic->ic_scan;
+
+ if (ss->ss_ops != NULL)
+ ss->ss_ops->scan_age(ss);
+}
+
+/*
+ * Mark a scan cache entry after a successful associate.
+ */
+void
+ieee80211_scan_assoc_success(struct ieee80211vap *vap, const uint8_t mac[])
+{
+ struct ieee80211_scan_state *ss = vap->iv_ic->ic_scan;
+
+ if (ss->ss_ops != NULL) {
+ IEEE80211_NOTE_MAC(vap, IEEE80211_MSG_SCAN,
+ mac, "%s", __func__);
+ ss->ss_ops->scan_assoc_success(ss, mac);
+ }
+}
+
+/*
+ * Demerit a scan cache entry after failing to associate.
+ */
+void
+ieee80211_scan_assoc_fail(struct ieee80211vap *vap,
+ const uint8_t mac[], int reason)
+{
+ struct ieee80211_scan_state *ss = vap->iv_ic->ic_scan;
+
+ if (ss->ss_ops != NULL) {
+ IEEE80211_NOTE_MAC(vap, IEEE80211_MSG_SCAN, mac,
+ "%s: reason %u", __func__, reason);
+ ss->ss_ops->scan_assoc_fail(ss, mac, reason);
+ }
+}
+
+/*
+ * Iterate over the contents of the scan cache.
+ */
+void
+ieee80211_scan_iterate(struct ieee80211vap *vap,
+ ieee80211_scan_iter_func *f, void *arg)
+{
+ struct ieee80211_scan_state *ss = vap->iv_ic->ic_scan;
+
+ if (ss->ss_ops != NULL)
+ ss->ss_ops->scan_iterate(ss, f, arg);
+}
+
+/*
+ * Flush the contents of the scan cache.
+ */
+void
+ieee80211_scan_flush(struct ieee80211vap *vap)
+{
+ struct ieee80211_scan_state *ss = vap->iv_ic->ic_scan;
+
+ if (ss->ss_ops != NULL && ss->ss_vap == vap) {
+ IEEE80211_DPRINTF(vap, IEEE80211_MSG_SCAN, "%s\n", __func__);
+ ss->ss_ops->scan_flush(ss);
+ }
+}
+
+/*
+ * Check the scan cache for an ap/channel to use; if that
+ * fails then kick off a new scan.
+ */
+struct ieee80211_channel *
+ieee80211_scan_pickchannel(struct ieee80211com *ic, int flags)
+{
+ struct ieee80211_scan_state *ss = ic->ic_scan;
+
+ IEEE80211_LOCK_ASSERT(ic);
+
+ if (ss == NULL || ss->ss_ops == NULL || ss->ss_vap == NULL) {
+ /* XXX printf? */
+ return NULL;
+ }
+ if (ss->ss_ops->scan_pickchan == NULL) {
+ IEEE80211_DPRINTF(ss->ss_vap, IEEE80211_MSG_SCAN,
+ "%s: scan module does not support picking a channel, "
+ "opmode %s\n", __func__, ss->ss_vap->iv_opmode);
+ return NULL;
+ }
+ return ss->ss_ops->scan_pickchan(ss, flags);
+}
diff --git a/rtems/freebsd/net80211/ieee80211_scan.h b/rtems/freebsd/net80211/ieee80211_scan.h
new file mode 100644
index 00000000..6273902d
--- /dev/null
+++ b/rtems/freebsd/net80211/ieee80211_scan.h
@@ -0,0 +1,301 @@
+/*-
+ * Copyright (c) 2005-2009 Sam Leffler, Errno Consulting
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+#ifndef _NET80211_IEEE80211_SCAN_HH_
+#define _NET80211_IEEE80211_SCAN_HH_
+
+/*
+ * 802.11 scanning support.
+ *
+ * Scanning is the procedure by which a station locates a bss to join
+ * (infrastructure/ibss mode), or a channel to use (when operating as
+ * an ap or ibss master). Scans are either "active" or "passive". An
+ * active scan causes one or more probe request frames to be sent on
+ * visiting each channel. A passive request causes each channel in the
+ * scan set to be visited but no frames to be transmitted; the station
+ * only listens for traffic. Note that active scanning may still need
+ * to listen for traffic before sending probe request frames depending
+ * on regulatory constraints; the 802.11 layer handles this by generating
+ * a callback when scanning on a ``passive channel'' when the
+ * IEEE80211_FEXT_PROBECHAN flag is set.
+ *
+ * A scan operation involves constructing a set of channels to inspect
+ * (the scan set), visiting each channel and collecting information
+ * (e.g. what bss are present), and then analyzing the results to make
+ * decisions like which bss to join. This process needs to be as fast
+ * as possible so we do things like intelligently construct scan sets
+ * and dwell on a channel only as long as necessary. The scan code also
+ * maintains a cache of recent scan results and uses it to bypass scanning
+ * whenever possible. The scan cache is also used to enable roaming
+ * between access points when operating in infrastructure mode.
+ *
+ * Scanning is handled with pluggable modules that implement "policy"
+ * per-operating mode. The core scanning support provides an
+ * instrastructure to support these modules and exports a common api
+ * to the rest of the 802.11 layer. Policy modules decide what
+ * channels to visit, what state to record to make decisions (e.g. ap
+ * mode scanning for auto channel selection keeps significantly less
+ * state than sta mode scanning for an ap to associate to), and selects
+ * the final station/channel to return as the result of a scan.
+ *
+ * Scanning is done synchronously when initially bringing a vap to an
+ * operational state and optionally in the background to maintain the
+ * scan cache for doing roaming and rogue ap monitoring. Scanning is
+ * not tied to the 802.11 state machine that governs vaps though there
+ * is linkage to the IEEE80211_SCAN state. Only one vap at a time may
+ * be scanning; this scheduling policy is handled in ieee80211_new_state
+ * and is invisible to the scanning code.
+*/
+#define IEEE80211_SCAN_MAX IEEE80211_CHAN_MAX
+
+struct ieee80211_scanner; /* scan policy state */
+
+struct ieee80211_scan_ssid {
+ int len; /* length in bytes */
+ uint8_t ssid[IEEE80211_NWID_LEN]; /* ssid contents */
+};
+#define IEEE80211_SCAN_MAX_SSID 1 /* max # ssid's to probe */
+
+/*
+ * Scan state visible to the 802.11 layer. Scan parameters and
+ * results are stored in this data structure. The ieee80211_scan_state
+ * structure is extended with space that is maintained private to
+ * the core scanning support. We allocate one instance and link it
+ * to the ieee80211com structure; then share it between all associated
+ * vaps. We could allocate multiple of these, e.g. to hold multiple
+ * scan results, but this is sufficient for current needs.
+ */
+struct ieee80211_scan_state {
+ struct ieee80211vap *ss_vap;
+ struct ieee80211com *ss_ic;
+ const struct ieee80211_scanner *ss_ops; /* policy hookup, see below */
+ void *ss_priv; /* scanner private state */
+ uint16_t ss_flags;
+#define IEEE80211_SCAN_NOPICK 0x0001 /* scan only, no selection */
+#define IEEE80211_SCAN_ACTIVE 0x0002 /* active scan (probe req) */
+#define IEEE80211_SCAN_PICK1ST 0x0004 /* ``hey sailor'' mode */
+#define IEEE80211_SCAN_BGSCAN 0x0008 /* bg scan, exit ps at end */
+#define IEEE80211_SCAN_ONCE 0x0010 /* do one complete pass */
+#define IEEE80211_SCAN_NOBCAST 0x0020 /* no broadcast probe req */
+#define IEEE80211_SCAN_NOJOIN 0x0040 /* no auto-sequencing */
+#define IEEE80211_SCAN_GOTPICK 0x1000 /* got candidate, can stop */
+ uint8_t ss_nssid; /* # ssid's to probe/match */
+ struct ieee80211_scan_ssid ss_ssid[IEEE80211_SCAN_MAX_SSID];
+ /* ssid's to probe/match */
+ /* ordered channel set */
+ struct ieee80211_channel *ss_chans[IEEE80211_SCAN_MAX];
+ uint16_t ss_next; /* ix of next chan to scan */
+ uint16_t ss_last; /* ix+1 of last chan to scan */
+ unsigned long ss_mindwell; /* min dwell on channel */
+ unsigned long ss_maxdwell; /* max dwell on channel */
+};
+
+/*
+ * The upper 16 bits of the flags word is used to communicate
+ * information to the scanning code that is NOT recorded in
+ * ss_flags. It might be better to split this stuff out into
+ * a separate variable to avoid confusion.
+ */
+#define IEEE80211_SCAN_FLUSH 0x00010000 /* flush candidate table */
+#define IEEE80211_SCAN_NOSSID 0x80000000 /* don't update ssid list */
+
+struct ieee80211com;
+void ieee80211_scan_attach(struct ieee80211com *);
+void ieee80211_scan_detach(struct ieee80211com *);
+void ieee80211_scan_vattach(struct ieee80211vap *);
+void ieee80211_scan_vdetach(struct ieee80211vap *);
+
+void ieee80211_scan_dump_channels(const struct ieee80211_scan_state *);
+
+#define IEEE80211_SCAN_FOREVER 0x7fffffff
+int ieee80211_start_scan(struct ieee80211vap *, int flags,
+ u_int duration, u_int mindwell, u_int maxdwell,
+ u_int nssid, const struct ieee80211_scan_ssid ssids[]);
+int ieee80211_check_scan(struct ieee80211vap *, int flags,
+ u_int duration, u_int mindwell, u_int maxdwell,
+ u_int nssid, const struct ieee80211_scan_ssid ssids[]);
+int ieee80211_check_scan_current(struct ieee80211vap *);
+int ieee80211_bg_scan(struct ieee80211vap *, int);
+void ieee80211_cancel_scan(struct ieee80211vap *);
+void ieee80211_cancel_anyscan(struct ieee80211vap *);
+void ieee80211_scan_next(struct ieee80211vap *);
+void ieee80211_scan_done(struct ieee80211vap *);
+void ieee80211_probe_curchan(struct ieee80211vap *, int);
+struct ieee80211_channel *ieee80211_scan_pickchannel(struct ieee80211com *, int);
+
+struct ieee80211_scanparams;
+void ieee80211_add_scan(struct ieee80211vap *,
+ const struct ieee80211_scanparams *,
+ const struct ieee80211_frame *,
+ int subtype, int rssi, int noise);
+void ieee80211_scan_timeout(struct ieee80211com *);
+
+void ieee80211_scan_assoc_success(struct ieee80211vap *,
+ const uint8_t mac[IEEE80211_ADDR_LEN]);
+enum {
+ IEEE80211_SCAN_FAIL_TIMEOUT = 1, /* no response to mgmt frame */
+ IEEE80211_SCAN_FAIL_STATUS = 2 /* negative response to " " */
+};
+void ieee80211_scan_assoc_fail(struct ieee80211vap *,
+ const uint8_t mac[IEEE80211_ADDR_LEN], int reason);
+void ieee80211_scan_flush(struct ieee80211vap *);
+
+struct ieee80211_scan_entry;
+typedef void ieee80211_scan_iter_func(void *,
+ const struct ieee80211_scan_entry *);
+void ieee80211_scan_iterate(struct ieee80211vap *,
+ ieee80211_scan_iter_func, void *);
+enum {
+ IEEE80211_BPARSE_BADIELEN = 0x01, /* ie len past end of frame */
+ IEEE80211_BPARSE_RATES_INVALID = 0x02, /* invalid RATES ie */
+ IEEE80211_BPARSE_XRATES_INVALID = 0x04, /* invalid XRATES ie */
+ IEEE80211_BPARSE_SSID_INVALID = 0x08, /* invalid SSID ie */
+ IEEE80211_BPARSE_CHAN_INVALID = 0x10, /* invalid FH/DSPARMS chan */
+ IEEE80211_BPARSE_OFFCHAN = 0x20, /* DSPARMS chan != curchan */
+ IEEE80211_BPARSE_BINTVAL_INVALID= 0x40, /* invalid beacon interval */
+ IEEE80211_BPARSE_CSA_INVALID = 0x80, /* invalid CSA ie */
+};
+
+/*
+ * Parameters supplied when adding/updating an entry in a
+ * scan cache. Pointer variables should be set to NULL
+ * if no data is available. Pointer references can be to
+ * local data; any information that is saved will be copied.
+ * All multi-byte values must be in host byte order.
+ */
+struct ieee80211_scanparams {
+ uint8_t status; /* bitmask of IEEE80211_BPARSE_* */
+ uint8_t chan; /* channel # from FH/DSPARMS */
+ uint8_t bchan; /* curchan's channel # */
+ uint8_t fhindex;
+ uint16_t fhdwell; /* FHSS dwell interval */
+ uint16_t capinfo; /* 802.11 capabilities */
+ uint16_t erp; /* NB: 0x100 indicates ie present */
+ uint16_t bintval;
+ uint8_t timoff;
+ uint8_t *ies; /* all captured ies */
+ size_t ies_len; /* length of all captured ies */
+ uint8_t *tim;
+ uint8_t *tstamp;
+ uint8_t *country;
+ uint8_t *ssid;
+ uint8_t *rates;
+ uint8_t *xrates;
+ uint8_t *doth;
+ uint8_t *wpa;
+ uint8_t *rsn;
+ uint8_t *wme;
+ uint8_t *htcap;
+ uint8_t *htinfo;
+ uint8_t *ath;
+ uint8_t *tdma;
+ uint8_t *csa;
+ uint8_t *meshid;
+ uint8_t *meshconf;
+ uint8_t *spare[3];
+};
+
+/*
+ * Scan cache entry format used when exporting data from a policy
+ * module; this data may be represented some other way internally.
+ */
+struct ieee80211_scan_entry {
+ uint8_t se_macaddr[IEEE80211_ADDR_LEN];
+ uint8_t se_bssid[IEEE80211_ADDR_LEN];
+ /* XXX can point inside se_ies */
+ uint8_t se_ssid[2+IEEE80211_NWID_LEN];
+ uint8_t se_rates[2+IEEE80211_RATE_MAXSIZE];
+ uint8_t se_xrates[2+IEEE80211_RATE_MAXSIZE];
+ union {
+ uint8_t data[8];
+ u_int64_t tsf;
+ } se_tstamp; /* from last rcv'd beacon */
+ uint16_t se_intval; /* beacon interval (host byte order) */
+ uint16_t se_capinfo; /* capabilities (host byte order) */
+ struct ieee80211_channel *se_chan;/* channel where sta found */
+ uint16_t se_timoff; /* byte offset to TIM ie */
+ uint16_t se_fhdwell; /* FH only (host byte order) */
+ uint8_t se_fhindex; /* FH only */
+ uint8_t se_dtimperiod; /* DTIM period */
+ uint16_t se_erp; /* ERP from beacon/probe resp */
+ int8_t se_rssi; /* avg'd recv ssi */
+ int8_t se_noise; /* noise floor */
+ uint8_t se_cc[2]; /* captured country code */
+ uint8_t se_meshid[2+IEEE80211_MESHID_LEN];
+ struct ieee80211_ies se_ies; /* captured ie's */
+ u_int se_age; /* age of entry (0 on create) */
+};
+MALLOC_DECLARE(M_80211_SCAN);
+
+/*
+ * Template for an in-kernel scan policy module.
+ * Modules register with the scanning code and are
+ * typically loaded as needed.
+ */
+struct ieee80211_scanner {
+ const char *scan_name; /* printable name */
+ int (*scan_attach)(struct ieee80211_scan_state *);
+ int (*scan_detach)(struct ieee80211_scan_state *);
+ int (*scan_start)(struct ieee80211_scan_state *,
+ struct ieee80211vap *);
+ int (*scan_restart)(struct ieee80211_scan_state *,
+ struct ieee80211vap *);
+ int (*scan_cancel)(struct ieee80211_scan_state *,
+ struct ieee80211vap *);
+ int (*scan_end)(struct ieee80211_scan_state *,
+ struct ieee80211vap *);
+ int (*scan_flush)(struct ieee80211_scan_state *);
+ struct ieee80211_channel *(*scan_pickchan)(
+ struct ieee80211_scan_state *, int);
+ /* add an entry to the cache */
+ int (*scan_add)(struct ieee80211_scan_state *,
+ const struct ieee80211_scanparams *,
+ const struct ieee80211_frame *,
+ int subtype, int rssi, int noise);
+ /* age and/or purge entries in the cache */
+ void (*scan_age)(struct ieee80211_scan_state *);
+ /* note that association failed for an entry */
+ void (*scan_assoc_fail)(struct ieee80211_scan_state *,
+ const uint8_t macaddr[IEEE80211_ADDR_LEN],
+ int reason);
+ /* note that association succeed for an entry */
+ void (*scan_assoc_success)(struct ieee80211_scan_state *,
+ const uint8_t macaddr[IEEE80211_ADDR_LEN]);
+ /* iterate over entries in the scan cache */
+ void (*scan_iterate)(struct ieee80211_scan_state *,
+ ieee80211_scan_iter_func *, void *);
+ void (*scan_spare0)(void);
+ void (*scan_spare1)(void);
+ void (*scan_spare2)(void);
+ void (*scan_spare4)(void);
+};
+void ieee80211_scanner_register(enum ieee80211_opmode,
+ const struct ieee80211_scanner *);
+void ieee80211_scanner_unregister(enum ieee80211_opmode,
+ const struct ieee80211_scanner *);
+void ieee80211_scanner_unregister_all(const struct ieee80211_scanner *);
+const struct ieee80211_scanner *ieee80211_scanner_get(enum ieee80211_opmode);
+#endif /* _NET80211_IEEE80211_SCAN_HH_ */
diff --git a/rtems/freebsd/net80211/ieee80211_scan_sta.c b/rtems/freebsd/net80211/ieee80211_scan_sta.c
new file mode 100644
index 00000000..456eed3f
--- /dev/null
+++ b/rtems/freebsd/net80211/ieee80211_scan_sta.c
@@ -0,0 +1,1928 @@
+#include <rtems/freebsd/machine/rtems-bsd-config.h>
+
+/*-
+ * Copyright (c) 2002-2009 Sam Leffler, Errno Consulting
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <rtems/freebsd/sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+/*
+ * IEEE 802.11 station scanning support.
+ */
+#include <rtems/freebsd/local/opt_wlan.h>
+
+#include <rtems/freebsd/sys/param.h>
+#include <rtems/freebsd/sys/systm.h>
+#include <rtems/freebsd/sys/kernel.h>
+#include <rtems/freebsd/sys/module.h>
+
+#include <rtems/freebsd/sys/socket.h>
+
+#include <rtems/freebsd/net/if.h>
+#include <rtems/freebsd/net/if_media.h>
+#include <rtems/freebsd/net/ethernet.h>
+
+#include <rtems/freebsd/net80211/ieee80211_var.h>
+#include <rtems/freebsd/net80211/ieee80211_input.h>
+#include <rtems/freebsd/net80211/ieee80211_regdomain.h>
+#ifdef IEEE80211_SUPPORT_TDMA
+#include <rtems/freebsd/net80211/ieee80211_tdma.h>
+#endif
+#ifdef IEEE80211_SUPPORT_MESH
+#include <rtems/freebsd/net80211/ieee80211_mesh.h>
+#endif
+
+#include <rtems/freebsd/net/bpf.h>
+
+/*
+ * Parameters for managing cache entries:
+ *
+ * o a station with STA_FAILS_MAX failures is not considered
+ * when picking a candidate
+ * o a station that hasn't had an update in STA_PURGE_SCANS
+ * (background) scans is discarded
+ * o after STA_FAILS_AGE seconds we clear the failure count
+ */
+#define STA_FAILS_MAX 2 /* assoc failures before ignored */
+#define STA_FAILS_AGE (2*60) /* time before clearing fails (secs) */
+#define STA_PURGE_SCANS 2 /* age for purging entries (scans) */
+
+/* XXX tunable */
+#define STA_RSSI_MIN 8 /* min acceptable rssi */
+#define STA_RSSI_MAX 40 /* max rssi for comparison */
+
+struct sta_entry {
+ struct ieee80211_scan_entry base;
+ TAILQ_ENTRY(sta_entry) se_list;
+ LIST_ENTRY(sta_entry) se_hash;
+ uint8_t se_fails; /* failure to associate count */
+ uint8_t se_seen; /* seen during current scan */
+ uint8_t se_notseen; /* not seen in previous scans */
+ uint8_t se_flags;
+#define STA_DEMOTE11B 0x01 /* match w/ demoted 11b chan */
+ uint32_t se_avgrssi; /* LPF rssi state */
+ unsigned long se_lastupdate; /* time of last update */
+ unsigned long se_lastfail; /* time of last failure */
+ unsigned long se_lastassoc; /* time of last association */
+ u_int se_scangen; /* iterator scan gen# */
+ u_int se_countrygen; /* gen# of last cc notify */
+};
+
+#define STA_HASHSIZE 32
+/* simple hash is enough for variation of macaddr */
+#define STA_HASH(addr) \
+ (((const uint8_t *)(addr))[IEEE80211_ADDR_LEN - 1] % STA_HASHSIZE)
+
+#define MAX_IEEE_CHAN 256 /* max acceptable IEEE chan # */
+CTASSERT(MAX_IEEE_CHAN >= 256);
+
+struct sta_table {
+ ieee80211_scan_table_lock_t st_lock; /* on scan table */
+ TAILQ_HEAD(, sta_entry) st_entry; /* all entries */
+ LIST_HEAD(, sta_entry) st_hash[STA_HASHSIZE];
+ struct mtx st_scanlock; /* on st_scaniter */
+ u_int st_scaniter; /* gen# for iterator */
+ u_int st_scangen; /* scan generation # */
+ int st_newscan;
+ /* ap-related state */
+ int st_maxrssi[MAX_IEEE_CHAN];
+};
+
+static void sta_flush_table(struct sta_table *);
+/*
+ * match_bss returns a bitmask describing if an entry is suitable
+ * for use. If non-zero the entry was deemed not suitable and it's
+ * contents explains why. The following flags are or'd to to this
+ * mask and can be used to figure out why the entry was rejected.
+ */
+#define MATCH_CHANNEL 0x00001 /* channel mismatch */
+#define MATCH_CAPINFO 0x00002 /* capabilities mismatch, e.g. no ess */
+#define MATCH_PRIVACY 0x00004 /* privacy mismatch */
+#define MATCH_RATE 0x00008 /* rate set mismatch */
+#define MATCH_SSID 0x00010 /* ssid mismatch */
+#define MATCH_BSSID 0x00020 /* bssid mismatch */
+#define MATCH_FAILS 0x00040 /* too many failed auth attempts */
+#define MATCH_NOTSEEN 0x00080 /* not seen in recent scans */
+#define MATCH_RSSI 0x00100 /* rssi deemed too low to use */
+#define MATCH_CC 0x00200 /* country code mismatch */
+#define MATCH_TDMA_NOIE 0x00400 /* no TDMA ie */
+#define MATCH_TDMA_NOTMASTER 0x00800 /* not TDMA master */
+#define MATCH_TDMA_NOSLOT 0x01000 /* all TDMA slots occupied */
+#define MATCH_TDMA_LOCAL 0x02000 /* local address */
+#define MATCH_TDMA_VERSION 0x04000 /* protocol version mismatch */
+#define MATCH_MESH_NOID 0x10000 /* no MESHID ie */
+#define MATCH_MESHID 0x20000 /* meshid mismatch */
+static int match_bss(struct ieee80211vap *,
+ const struct ieee80211_scan_state *, struct sta_entry *, int);
+static void adhoc_age(struct ieee80211_scan_state *);
+
+static __inline int
+isocmp(const uint8_t cc1[], const uint8_t cc2[])
+{
+ return (cc1[0] == cc2[0] && cc1[1] == cc2[1]);
+}
+
+/* number of references from net80211 layer */
+static int nrefs = 0;
+/*
+ * Module glue.
+ */
+IEEE80211_SCANNER_MODULE(sta, 1);
+
+/*
+ * Attach prior to any scanning work.
+ */
+static int
+sta_attach(struct ieee80211_scan_state *ss)
+{
+ struct sta_table *st;
+
+ st = (struct sta_table *) malloc(sizeof(struct sta_table),
+ M_80211_SCAN, M_NOWAIT | M_ZERO);
+ if (st == NULL)
+ return 0;
+ IEEE80211_SCAN_TABLE_LOCK_INIT(st, "scantable");
+ mtx_init(&st->st_scanlock, "scangen", "802.11 scangen", MTX_DEF);
+ TAILQ_INIT(&st->st_entry);
+ ss->ss_priv = st;
+ nrefs++; /* NB: we assume caller locking */
+ return 1;
+}
+
+/*
+ * Cleanup any private state.
+ */
+static int
+sta_detach(struct ieee80211_scan_state *ss)
+{
+ struct sta_table *st = ss->ss_priv;
+
+ if (st != NULL) {
+ sta_flush_table(st);
+ IEEE80211_SCAN_TABLE_LOCK_DESTROY(st);
+ mtx_destroy(&st->st_scanlock);
+ free(st, M_80211_SCAN);
+ KASSERT(nrefs > 0, ("imbalanced attach/detach"));
+ nrefs--; /* NB: we assume caller locking */
+ }
+ return 1;
+}
+
+/*
+ * Flush all per-scan state.
+ */
+static int
+sta_flush(struct ieee80211_scan_state *ss)
+{
+ struct sta_table *st = ss->ss_priv;
+
+ IEEE80211_SCAN_TABLE_LOCK(st);
+ sta_flush_table(st);
+ IEEE80211_SCAN_TABLE_UNLOCK(st);
+ ss->ss_last = 0;
+ return 0;
+}
+
+/*
+ * Flush all entries in the scan cache.
+ */
+static void
+sta_flush_table(struct sta_table *st)
+{
+ struct sta_entry *se, *next;
+
+ TAILQ_FOREACH_SAFE(se, &st->st_entry, se_list, next) {
+ TAILQ_REMOVE(&st->st_entry, se, se_list);
+ LIST_REMOVE(se, se_hash);
+ ieee80211_ies_cleanup(&se->base.se_ies);
+ free(se, M_80211_SCAN);
+ }
+ memset(st->st_maxrssi, 0, sizeof(st->st_maxrssi));
+}
+
+/*
+ * Process a beacon or probe response frame; create an
+ * entry in the scan cache or update any previous entry.
+ */
+static int
+sta_add(struct ieee80211_scan_state *ss,
+ const struct ieee80211_scanparams *sp,
+ const struct ieee80211_frame *wh,
+ int subtype, int rssi, int noise)
+{
+#define ISPROBE(_st) ((_st) == IEEE80211_FC0_SUBTYPE_PROBE_RESP)
+#define PICK1ST(_ss) \
+ ((ss->ss_flags & (IEEE80211_SCAN_PICK1ST | IEEE80211_SCAN_GOTPICK)) == \
+ IEEE80211_SCAN_PICK1ST)
+ struct sta_table *st = ss->ss_priv;
+ const uint8_t *macaddr = wh->i_addr2;
+ struct ieee80211vap *vap = ss->ss_vap;
+ struct ieee80211com *ic = vap->iv_ic;
+ struct sta_entry *se;
+ struct ieee80211_scan_entry *ise;
+ int hash;
+
+ hash = STA_HASH(macaddr);
+
+ IEEE80211_SCAN_TABLE_LOCK(st);
+ LIST_FOREACH(se, &st->st_hash[hash], se_hash)
+ if (IEEE80211_ADDR_EQ(se->base.se_macaddr, macaddr))
+ goto found;
+ se = (struct sta_entry *) malloc(sizeof(struct sta_entry),
+ M_80211_SCAN, M_NOWAIT | M_ZERO);
+ if (se == NULL) {
+ IEEE80211_SCAN_TABLE_UNLOCK(st);
+ return 0;
+ }
+ se->se_scangen = st->st_scaniter-1;
+ se->se_avgrssi = IEEE80211_RSSI_DUMMY_MARKER;
+ IEEE80211_ADDR_COPY(se->base.se_macaddr, macaddr);
+ TAILQ_INSERT_TAIL(&st->st_entry, se, se_list);
+ LIST_INSERT_HEAD(&st->st_hash[hash], se, se_hash);
+found:
+ ise = &se->base;
+ /* XXX ap beaconing multiple ssid w/ same bssid */
+ if (sp->ssid[1] != 0 &&
+ (ISPROBE(subtype) || ise->se_ssid[1] == 0))
+ memcpy(ise->se_ssid, sp->ssid, 2+sp->ssid[1]);
+ KASSERT(sp->rates[1] <= IEEE80211_RATE_MAXSIZE,
+ ("rate set too large: %u", sp->rates[1]));
+ memcpy(ise->se_rates, sp->rates, 2+sp->rates[1]);
+ if (sp->xrates != NULL) {
+ /* XXX validate xrates[1] */
+ KASSERT(sp->xrates[1] <= IEEE80211_RATE_MAXSIZE,
+ ("xrate set too large: %u", sp->xrates[1]));
+ memcpy(ise->se_xrates, sp->xrates, 2+sp->xrates[1]);
+ } else
+ ise->se_xrates[1] = 0;
+ IEEE80211_ADDR_COPY(ise->se_bssid, wh->i_addr3);
+ if ((sp->status & IEEE80211_BPARSE_OFFCHAN) == 0) {
+ /*
+ * Record rssi data using extended precision LPF filter.
+ *
+ * NB: use only on-channel data to insure we get a good
+ * estimate of the signal we'll see when associated.
+ */
+ IEEE80211_RSSI_LPF(se->se_avgrssi, rssi);
+ ise->se_rssi = IEEE80211_RSSI_GET(se->se_avgrssi);
+ ise->se_noise = noise;
+ }
+ memcpy(ise->se_tstamp.data, sp->tstamp, sizeof(ise->se_tstamp));
+ ise->se_intval = sp->bintval;
+ ise->se_capinfo = sp->capinfo;
+#ifdef IEEE80211_SUPPORT_MESH
+ if (sp->meshid != NULL && sp->meshid[1] != 0)
+ memcpy(ise->se_meshid, sp->meshid, 2+sp->meshid[1]);
+#endif
+ /*
+ * Beware of overriding se_chan for frames seen
+ * off-channel; this can cause us to attempt an
+ * association on the wrong channel.
+ */
+ if (sp->status & IEEE80211_BPARSE_OFFCHAN) {
+ struct ieee80211_channel *c;
+ /*
+ * Off-channel, locate the home/bss channel for the sta
+ * using the value broadcast in the DSPARMS ie. We know
+ * sp->chan has this value because it's used to calculate
+ * IEEE80211_BPARSE_OFFCHAN.
+ */
+ c = ieee80211_find_channel_byieee(ic, sp->chan,
+ ic->ic_curchan->ic_flags);
+ if (c != NULL) {
+ ise->se_chan = c;
+ } else if (ise->se_chan == NULL) {
+ /* should not happen, pick something */
+ ise->se_chan = ic->ic_curchan;
+ }
+ } else
+ ise->se_chan = ic->ic_curchan;
+ ise->se_fhdwell = sp->fhdwell;
+ ise->se_fhindex = sp->fhindex;
+ ise->se_erp = sp->erp;
+ ise->se_timoff = sp->timoff;
+ if (sp->tim != NULL) {
+ const struct ieee80211_tim_ie *tim =
+ (const struct ieee80211_tim_ie *) sp->tim;
+ ise->se_dtimperiod = tim->tim_period;
+ }
+ if (sp->country != NULL) {
+ const struct ieee80211_country_ie *cie =
+ (const struct ieee80211_country_ie *) sp->country;
+ /*
+ * If 11d is enabled and we're attempting to join a bss
+ * that advertises it's country code then compare our
+ * current settings to what we fetched from the country ie.
+ * If our country code is unspecified or different then
+ * dispatch an event to user space that identifies the
+ * country code so our regdomain config can be changed.
+ */
+ /* XXX only for STA mode? */
+ if ((IEEE80211_IS_CHAN_11D(ise->se_chan) ||
+ (vap->iv_flags_ext & IEEE80211_FEXT_DOTD)) &&
+ (ic->ic_regdomain.country == CTRY_DEFAULT ||
+ !isocmp(cie->cc, ic->ic_regdomain.isocc))) {
+ /* only issue one notify event per scan */
+ if (se->se_countrygen != st->st_scangen) {
+ ieee80211_notify_country(vap, ise->se_bssid,
+ cie->cc);
+ se->se_countrygen = st->st_scangen;
+ }
+ }
+ ise->se_cc[0] = cie->cc[0];
+ ise->se_cc[1] = cie->cc[1];
+ }
+ /* NB: no need to setup ie ptrs; they are not (currently) used */
+ (void) ieee80211_ies_init(&ise->se_ies, sp->ies, sp->ies_len);
+
+ /* clear failure count after STA_FAIL_AGE passes */
+ if (se->se_fails && (ticks - se->se_lastfail) > STA_FAILS_AGE*hz) {
+ se->se_fails = 0;
+ IEEE80211_NOTE_MAC(vap, IEEE80211_MSG_SCAN, macaddr,
+ "%s: fails %u", __func__, se->se_fails);
+ }
+
+ se->se_lastupdate = ticks; /* update time */
+ se->se_seen = 1;
+ se->se_notseen = 0;
+
+ KASSERT(sizeof(sp->bchan) == 1, ("bchan size"));
+ if (rssi > st->st_maxrssi[sp->bchan])
+ st->st_maxrssi[sp->bchan] = rssi;
+
+ IEEE80211_SCAN_TABLE_UNLOCK(st);
+
+ /*
+ * If looking for a quick choice and nothing's
+ * been found check here.
+ */
+ if (PICK1ST(ss) && match_bss(vap, ss, se, IEEE80211_MSG_SCAN) == 0)
+ ss->ss_flags |= IEEE80211_SCAN_GOTPICK;
+
+ return 1;
+#undef PICK1ST
+#undef ISPROBE
+}
+
+/*
+ * Check if a channel is excluded by user request.
+ */
+static int
+isexcluded(struct ieee80211vap *vap, const struct ieee80211_channel *c)
+{
+ return (isclr(vap->iv_ic->ic_chan_active, c->ic_ieee) ||
+ (vap->iv_des_chan != IEEE80211_CHAN_ANYC &&
+ c->ic_freq != vap->iv_des_chan->ic_freq));
+}
+
+static struct ieee80211_channel *
+find11gchannel(struct ieee80211com *ic, int i, int freq)
+{
+ struct ieee80211_channel *c;
+ int j;
+
+ /*
+ * The normal ordering in the channel list is b channel
+ * immediately followed by g so optimize the search for
+ * this. We'll still do a full search just in case.
+ */
+ for (j = i+1; j < ic->ic_nchans; j++) {
+ c = &ic->ic_channels[j];
+ if (c->ic_freq == freq && IEEE80211_IS_CHAN_G(c))
+ return c;
+ }
+ for (j = 0; j < i; j++) {
+ c = &ic->ic_channels[j];
+ if (c->ic_freq == freq && IEEE80211_IS_CHAN_G(c))
+ return c;
+ }
+ return NULL;
+}
+
+static const u_int chanflags[IEEE80211_MODE_MAX] = {
+ [IEEE80211_MODE_AUTO] = IEEE80211_CHAN_B,
+ [IEEE80211_MODE_11A] = IEEE80211_CHAN_A,
+ [IEEE80211_MODE_11B] = IEEE80211_CHAN_B,
+ [IEEE80211_MODE_11G] = IEEE80211_CHAN_G,
+ [IEEE80211_MODE_FH] = IEEE80211_CHAN_FHSS,
+ /* check base channel */
+ [IEEE80211_MODE_TURBO_A] = IEEE80211_CHAN_A,
+ [IEEE80211_MODE_TURBO_G] = IEEE80211_CHAN_G,
+ [IEEE80211_MODE_STURBO_A] = IEEE80211_CHAN_ST,
+ [IEEE80211_MODE_HALF] = IEEE80211_CHAN_HALF,
+ [IEEE80211_MODE_QUARTER] = IEEE80211_CHAN_QUARTER,
+ /* check legacy */
+ [IEEE80211_MODE_11NA] = IEEE80211_CHAN_A,
+ [IEEE80211_MODE_11NG] = IEEE80211_CHAN_G,
+};
+
+static void
+add_channels(struct ieee80211vap *vap,
+ struct ieee80211_scan_state *ss,
+ enum ieee80211_phymode mode, const uint16_t freq[], int nfreq)
+{
+#define N(a) (sizeof(a) / sizeof(a[0]))
+ struct ieee80211com *ic = vap->iv_ic;
+ struct ieee80211_channel *c, *cg;
+ u_int modeflags;
+ int i;
+
+ KASSERT(mode < N(chanflags), ("Unexpected mode %u", mode));
+ modeflags = chanflags[mode];
+ for (i = 0; i < nfreq; i++) {
+ if (ss->ss_last >= IEEE80211_SCAN_MAX)
+ break;
+
+ c = ieee80211_find_channel(ic, freq[i], modeflags);
+ if (c == NULL || isexcluded(vap, c))
+ continue;
+ if (mode == IEEE80211_MODE_AUTO) {
+ /*
+ * XXX special-case 11b/g channels so we select
+ * the g channel if both are present.
+ */
+ if (IEEE80211_IS_CHAN_B(c) &&
+ (cg = find11gchannel(ic, i, c->ic_freq)) != NULL)
+ c = cg;
+ }
+ ss->ss_chans[ss->ss_last++] = c;
+ }
+#undef N
+}
+
+struct scanlist {
+ uint16_t mode;
+ uint16_t count;
+ const uint16_t *list;
+};
+
+static int
+checktable(const struct scanlist *scan, const struct ieee80211_channel *c)
+{
+ int i;
+
+ for (; scan->list != NULL; scan++) {
+ for (i = 0; i < scan->count; i++)
+ if (scan->list[i] == c->ic_freq)
+ return 1;
+ }
+ return 0;
+}
+
+static int
+onscanlist(const struct ieee80211_scan_state *ss,
+ const struct ieee80211_channel *c)
+{
+ int i;
+
+ for (i = 0; i < ss->ss_last; i++)
+ if (ss->ss_chans[i] == c)
+ return 1;
+ return 0;
+}
+
+static void
+sweepchannels(struct ieee80211_scan_state *ss, struct ieee80211vap *vap,
+ const struct scanlist table[])
+{
+ struct ieee80211com *ic = vap->iv_ic;
+ struct ieee80211_channel *c;
+ int i;
+
+ for (i = 0; i < ic->ic_nchans; i++) {
+ if (ss->ss_last >= IEEE80211_SCAN_MAX)
+ break;
+
+ c = &ic->ic_channels[i];
+ /*
+ * Ignore dynamic turbo channels; we scan them
+ * in normal mode (i.e. not boosted). Likewise
+ * for HT channels, they get scanned using
+ * legacy rates.
+ */
+ if (IEEE80211_IS_CHAN_DTURBO(c) || IEEE80211_IS_CHAN_HT(c))
+ continue;
+
+ /*
+ * If a desired mode was specified, scan only
+ * channels that satisfy that constraint.
+ */
+ if (vap->iv_des_mode != IEEE80211_MODE_AUTO &&
+ vap->iv_des_mode != ieee80211_chan2mode(c))
+ continue;
+
+ /*
+ * Skip channels excluded by user request.
+ */
+ if (isexcluded(vap, c))
+ continue;
+
+ /*
+ * Add the channel unless it is listed in the
+ * fixed scan order tables. This insures we
+ * don't sweep back in channels we filtered out
+ * above.
+ */
+ if (checktable(table, c))
+ continue;
+
+ /* Add channel to scanning list. */
+ ss->ss_chans[ss->ss_last++] = c;
+ }
+ /*
+ * Explicitly add any desired channel if:
+ * - not already on the scan list
+ * - allowed by any desired mode constraint
+ * - there is space in the scan list
+ * This allows the channel to be used when the filtering
+ * mechanisms would otherwise elide it (e.g HT, turbo).
+ */
+ c = vap->iv_des_chan;
+ if (c != IEEE80211_CHAN_ANYC &&
+ !onscanlist(ss, c) &&
+ (vap->iv_des_mode == IEEE80211_MODE_AUTO ||
+ vap->iv_des_mode == ieee80211_chan2mode(c)) &&
+ ss->ss_last < IEEE80211_SCAN_MAX)
+ ss->ss_chans[ss->ss_last++] = c;
+}
+
+static void
+makescanlist(struct ieee80211_scan_state *ss, struct ieee80211vap *vap,
+ const struct scanlist table[])
+{
+ const struct scanlist *scan;
+ enum ieee80211_phymode mode;
+
+ ss->ss_last = 0;
+ /*
+ * Use the table of ordered channels to construct the list
+ * of channels for scanning. Any channels in the ordered
+ * list not in the master list will be discarded.
+ */
+ for (scan = table; scan->list != NULL; scan++) {
+ mode = scan->mode;
+ if (vap->iv_des_mode != IEEE80211_MODE_AUTO) {
+ /*
+ * If a desired mode was specified, scan only
+ * channels that satisfy that constraint.
+ */
+ if (vap->iv_des_mode != mode) {
+ /*
+ * The scan table marks 2.4Ghz channels as b
+ * so if the desired mode is 11g, then use
+ * the 11b channel list but upgrade the mode.
+ */
+ if (vap->iv_des_mode != IEEE80211_MODE_11G ||
+ mode != IEEE80211_MODE_11B)
+ continue;
+ mode = IEEE80211_MODE_11G; /* upgrade */
+ }
+ } else {
+ /*
+ * This lets add_channels upgrade an 11b channel
+ * to 11g if available.
+ */
+ if (mode == IEEE80211_MODE_11B)
+ mode = IEEE80211_MODE_AUTO;
+ }
+#ifdef IEEE80211_F_XR
+ /* XR does not operate on turbo channels */
+ if ((vap->iv_flags & IEEE80211_F_XR) &&
+ (mode == IEEE80211_MODE_TURBO_A ||
+ mode == IEEE80211_MODE_TURBO_G ||
+ mode == IEEE80211_MODE_STURBO_A))
+ continue;
+#endif
+ /*
+ * Add the list of the channels; any that are not
+ * in the master channel list will be discarded.
+ */
+ add_channels(vap, ss, mode, scan->list, scan->count);
+ }
+
+ /*
+ * Add the channels from the ic that are not present
+ * in the table.
+ */
+ sweepchannels(ss, vap, table);
+}
+
+static const uint16_t rcl1[] = /* 8 FCC channel: 52, 56, 60, 64, 36, 40, 44, 48 */
+{ 5260, 5280, 5300, 5320, 5180, 5200, 5220, 5240 };
+static const uint16_t rcl2[] = /* 4 MKK channels: 34, 38, 42, 46 */
+{ 5170, 5190, 5210, 5230 };
+static const uint16_t rcl3[] = /* 2.4Ghz ch: 1,6,11,7,13 */
+{ 2412, 2437, 2462, 2442, 2472 };
+static const uint16_t rcl4[] = /* 5 FCC channel: 149, 153, 161, 165 */
+{ 5745, 5765, 5785, 5805, 5825 };
+static const uint16_t rcl7[] = /* 11 ETSI channel: 100,104,108,112,116,120,124,128,132,136,140 */
+{ 5500, 5520, 5540, 5560, 5580, 5600, 5620, 5640, 5660, 5680, 5700 };
+static const uint16_t rcl8[] = /* 2.4Ghz ch: 2,3,4,5,8,9,10,12 */
+{ 2417, 2422, 2427, 2432, 2447, 2452, 2457, 2467 };
+static const uint16_t rcl9[] = /* 2.4Ghz ch: 14 */
+{ 2484 };
+static const uint16_t rcl10[] = /* Added Korean channels 2312-2372 */
+{ 2312, 2317, 2322, 2327, 2332, 2337, 2342, 2347, 2352, 2357, 2362, 2367, 2372 };
+static const uint16_t rcl11[] = /* Added Japan channels in 4.9/5.0 spectrum */
+{ 5040, 5060, 5080, 4920, 4940, 4960, 4980 };
+#ifdef ATH_TURBO_SCAN
+static const uint16_t rcl5[] = /* 3 static turbo channels */
+{ 5210, 5250, 5290 };
+static const uint16_t rcl6[] = /* 2 static turbo channels */
+{ 5760, 5800 };
+static const uint16_t rcl6x[] = /* 4 FCC3 turbo channels */
+{ 5540, 5580, 5620, 5660 };
+static const uint16_t rcl12[] = /* 2.4Ghz Turbo channel 6 */
+{ 2437 };
+static const uint16_t rcl13[] = /* dynamic Turbo channels */
+{ 5200, 5240, 5280, 5765, 5805 };
+#endif /* ATH_TURBO_SCAN */
+
+#define X(a) .count = sizeof(a)/sizeof(a[0]), .list = a
+
+static const struct scanlist staScanTable[] = {
+ { IEEE80211_MODE_11B, X(rcl3) },
+ { IEEE80211_MODE_11A, X(rcl1) },
+ { IEEE80211_MODE_11A, X(rcl2) },
+ { IEEE80211_MODE_11B, X(rcl8) },
+ { IEEE80211_MODE_11B, X(rcl9) },
+ { IEEE80211_MODE_11A, X(rcl4) },
+#ifdef ATH_TURBO_SCAN
+ { IEEE80211_MODE_STURBO_A, X(rcl5) },
+ { IEEE80211_MODE_STURBO_A, X(rcl6) },
+ { IEEE80211_MODE_TURBO_A, X(rcl6x) },
+ { IEEE80211_MODE_TURBO_A, X(rcl13) },
+#endif /* ATH_TURBO_SCAN */
+ { IEEE80211_MODE_11A, X(rcl7) },
+ { IEEE80211_MODE_11B, X(rcl10) },
+ { IEEE80211_MODE_11A, X(rcl11) },
+#ifdef ATH_TURBO_SCAN
+ { IEEE80211_MODE_TURBO_G, X(rcl12) },
+#endif /* ATH_TURBO_SCAN */
+ { .list = NULL }
+};
+
+/*
+ * Start a station-mode scan by populating the channel list.
+ */
+static int
+sta_start(struct ieee80211_scan_state *ss, struct ieee80211vap *vap)
+{
+ struct sta_table *st = ss->ss_priv;
+
+ makescanlist(ss, vap, staScanTable);
+
+ if (ss->ss_mindwell == 0)
+ ss->ss_mindwell = msecs_to_ticks(20); /* 20ms */
+ if (ss->ss_maxdwell == 0)
+ ss->ss_maxdwell = msecs_to_ticks(200); /* 200ms */
+
+ st->st_scangen++;
+ st->st_newscan = 1;
+
+ return 0;
+}
+
+/*
+ * Restart a scan, typically a bg scan but can
+ * also be a fg scan that came up empty.
+ */
+static int
+sta_restart(struct ieee80211_scan_state *ss, struct ieee80211vap *vap)
+{
+ struct sta_table *st = ss->ss_priv;
+
+ st->st_newscan = 1;
+ return 0;
+}
+
+/*
+ * Cancel an ongoing scan.
+ */
+static int
+sta_cancel(struct ieee80211_scan_state *ss, struct ieee80211vap *vap)
+{
+ return 0;
+}
+
+/* unalligned little endian access */
+#define LE_READ_2(p) \
+ ((uint16_t) \
+ ((((const uint8_t *)(p))[0] ) | \
+ (((const uint8_t *)(p))[1] << 8)))
+
+/*
+ * Demote any supplied 11g channel to 11b. There should
+ * always be an 11b channel but we check anyway...
+ */
+static struct ieee80211_channel *
+demote11b(struct ieee80211vap *vap, struct ieee80211_channel *chan)
+{
+ struct ieee80211_channel *c;
+
+ if (IEEE80211_IS_CHAN_ANYG(chan) &&
+ vap->iv_des_mode == IEEE80211_MODE_AUTO) {
+ c = ieee80211_find_channel(vap->iv_ic, chan->ic_freq,
+ (chan->ic_flags &~ (IEEE80211_CHAN_PUREG | IEEE80211_CHAN_G)) |
+ IEEE80211_CHAN_B);
+ if (c != NULL)
+ chan = c;
+ }
+ return chan;
+}
+
+static int
+maxrate(const struct ieee80211_scan_entry *se)
+{
+ const struct ieee80211_ie_htcap *htcap =
+ (const struct ieee80211_ie_htcap *) se->se_ies.htcap_ie;
+ int rmax, r, i;
+ uint16_t caps;
+
+ rmax = 0;
+ if (htcap != NULL) {
+ /*
+ * HT station; inspect supported MCS and then adjust
+ * rate by channel width. Could also include short GI
+ * in this if we want to be extra accurate.
+ */
+ /* XXX assumes MCS15 is max */
+ for (i = 15; i >= 0 && isclr(htcap->hc_mcsset, i); i--)
+ ;
+ if (i >= 0) {
+ caps = LE_READ_2(&htcap->hc_cap);
+ /* XXX short/long GI */
+ if (caps & IEEE80211_HTCAP_CHWIDTH40)
+ rmax = ieee80211_htrates[i].ht40_rate_400ns;
+ else
+ rmax = ieee80211_htrates[i].ht40_rate_800ns;
+ }
+ }
+ for (i = 0; i < se->se_rates[1]; i++) {
+ r = se->se_rates[2+i] & IEEE80211_RATE_VAL;
+ if (r > rmax)
+ rmax = r;
+ }
+ for (i = 0; i < se->se_xrates[1]; i++) {
+ r = se->se_xrates[2+i] & IEEE80211_RATE_VAL;
+ if (r > rmax)
+ rmax = r;
+ }
+ return rmax;
+}
+
+/*
+ * Compare the capabilities of two entries and decide which is
+ * more desirable (return >0 if a is considered better). Note
+ * that we assume compatibility/usability has already been checked
+ * so we don't need to (e.g. validate whether privacy is supported).
+ * Used to select the best scan candidate for association in a BSS.
+ */
+static int
+sta_compare(const struct sta_entry *a, const struct sta_entry *b)
+{
+#define PREFER(_a,_b,_what) do { \
+ if (((_a) ^ (_b)) & (_what)) \
+ return ((_a) & (_what)) ? 1 : -1; \
+} while (0)
+ int maxa, maxb;
+ int8_t rssia, rssib;
+ int weight;
+
+ /* privacy support */
+ PREFER(a->base.se_capinfo, b->base.se_capinfo,
+ IEEE80211_CAPINFO_PRIVACY);
+
+ /* compare count of previous failures */
+ weight = b->se_fails - a->se_fails;
+ if (abs(weight) > 1)
+ return weight;
+
+ /*
+ * Compare rssi. If the two are considered equivalent
+ * then fallback to other criteria. We threshold the
+ * comparisons to avoid selecting an ap purely by rssi
+ * when both values may be good but one ap is otherwise
+ * more desirable (e.g. an 11b-only ap with stronger
+ * signal than an 11g ap).
+ */
+ rssia = MIN(a->base.se_rssi, STA_RSSI_MAX);
+ rssib = MIN(b->base.se_rssi, STA_RSSI_MAX);
+ if (abs(rssib - rssia) < 5) {
+ /* best/max rate preferred if signal level close enough XXX */
+ maxa = maxrate(&a->base);
+ maxb = maxrate(&b->base);
+ if (maxa != maxb)
+ return maxa - maxb;
+ /* XXX use freq for channel preference */
+ /* for now just prefer 5Ghz band to all other bands */
+ PREFER(IEEE80211_IS_CHAN_5GHZ(a->base.se_chan),
+ IEEE80211_IS_CHAN_5GHZ(b->base.se_chan), 1);
+ }
+ /* all things being equal, use signal level */
+ return a->base.se_rssi - b->base.se_rssi;
+#undef PREFER
+}
+
+/*
+ * Check rate set suitability and return the best supported rate.
+ * XXX inspect MCS for HT
+ */
+static int
+check_rate(struct ieee80211vap *vap, const struct ieee80211_channel *chan,
+ const struct ieee80211_scan_entry *se)
+{
+#define RV(v) ((v) & IEEE80211_RATE_VAL)
+ const struct ieee80211_rateset *srs;
+ int i, j, nrs, r, okrate, badrate, fixedrate, ucastrate;
+ const uint8_t *rs;
+
+ okrate = badrate = 0;
+
+ srs = ieee80211_get_suprates(vap->iv_ic, chan);
+ nrs = se->se_rates[1];
+ rs = se->se_rates+2;
+ /* XXX MCS */
+ ucastrate = vap->iv_txparms[ieee80211_chan2mode(chan)].ucastrate;
+ fixedrate = IEEE80211_FIXED_RATE_NONE;
+again:
+ for (i = 0; i < nrs; i++) {
+ r = RV(rs[i]);
+ badrate = r;
+ /*
+ * Check any fixed rate is included.
+ */
+ if (r == ucastrate)
+ fixedrate = r;
+ /*
+ * Check against our supported rates.
+ */
+ for (j = 0; j < srs->rs_nrates; j++)
+ if (r == RV(srs->rs_rates[j])) {
+ if (r > okrate) /* NB: track max */
+ okrate = r;
+ break;
+ }
+
+ if (j == srs->rs_nrates && (rs[i] & IEEE80211_RATE_BASIC)) {
+ /*
+ * Don't try joining a BSS, if we don't support
+ * one of its basic rates.
+ */
+ okrate = 0;
+ goto back;
+ }
+ }
+ if (rs == se->se_rates+2) {
+ /* scan xrates too; sort of an algol68-style for loop */
+ nrs = se->se_xrates[1];
+ rs = se->se_xrates+2;
+ goto again;
+ }
+
+back:
+ if (okrate == 0 || ucastrate != fixedrate)
+ return badrate | IEEE80211_RATE_BASIC;
+ else
+ return RV(okrate);
+#undef RV
+}
+
+static __inline int
+match_id(const uint8_t *ie, const uint8_t *val, int len)
+{
+ return (ie[1] == len && memcmp(ie+2, val, len) == 0);
+}
+
+static int
+match_ssid(const uint8_t *ie,
+ int nssid, const struct ieee80211_scan_ssid ssids[])
+{
+ int i;
+
+ for (i = 0; i < nssid; i++) {
+ if (match_id(ie, ssids[i].ssid, ssids[i].len))
+ return 1;
+ }
+ return 0;
+}
+
+#ifdef IEEE80211_SUPPORT_TDMA
+static int
+tdma_isfull(const struct ieee80211_tdma_param *tdma)
+{
+ int slot, slotcnt;
+
+ slotcnt = tdma->tdma_slotcnt;
+ for (slot = slotcnt-1; slot >= 0; slot--)
+ if (isclr(tdma->tdma_inuse, slot))
+ return 0;
+ return 1;
+}
+#endif /* IEEE80211_SUPPORT_TDMA */
+
+/*
+ * Test a scan candidate for suitability/compatibility.
+ */
+static int
+match_bss(struct ieee80211vap *vap,
+ const struct ieee80211_scan_state *ss, struct sta_entry *se0,
+ int debug)
+{
+ struct ieee80211com *ic = vap->iv_ic;
+ struct ieee80211_scan_entry *se = &se0->base;
+ uint8_t rate;
+ int fail;
+
+ fail = 0;
+ if (isclr(ic->ic_chan_active, ieee80211_chan2ieee(ic, se->se_chan)))
+ fail |= MATCH_CHANNEL;
+ /*
+ * NB: normally the desired mode is used to construct
+ * the channel list, but it's possible for the scan
+ * cache to include entries for stations outside this
+ * list so we check the desired mode here to weed them
+ * out.
+ */
+ if (vap->iv_des_mode != IEEE80211_MODE_AUTO &&
+ (se->se_chan->ic_flags & IEEE80211_CHAN_ALLTURBO) !=
+ chanflags[vap->iv_des_mode])
+ fail |= MATCH_CHANNEL;
+ if (vap->iv_opmode == IEEE80211_M_IBSS) {
+ if ((se->se_capinfo & IEEE80211_CAPINFO_IBSS) == 0)
+ fail |= MATCH_CAPINFO;
+#ifdef IEEE80211_SUPPORT_TDMA
+ } else if (vap->iv_opmode == IEEE80211_M_AHDEMO) {
+ /*
+ * Adhoc demo network setup shouldn't really be scanning
+ * but just in case skip stations operating in IBSS or
+ * BSS mode.
+ */
+ if (se->se_capinfo & (IEEE80211_CAPINFO_IBSS|IEEE80211_CAPINFO_ESS))
+ fail |= MATCH_CAPINFO;
+ /*
+ * TDMA operation cannot coexist with a normal 802.11 network;
+ * skip if IBSS or ESS capabilities are marked and require
+ * the beacon have a TDMA ie present.
+ */
+ if (vap->iv_caps & IEEE80211_C_TDMA) {
+ const struct ieee80211_tdma_param *tdma =
+ (const struct ieee80211_tdma_param *)se->se_ies.tdma_ie;
+ const struct ieee80211_tdma_state *ts = vap->iv_tdma;
+
+ if (tdma == NULL)
+ fail |= MATCH_TDMA_NOIE;
+ else if (tdma->tdma_version != ts->tdma_version)
+ fail |= MATCH_TDMA_VERSION;
+ else if (tdma->tdma_slot != 0)
+ fail |= MATCH_TDMA_NOTMASTER;
+ else if (tdma_isfull(tdma))
+ fail |= MATCH_TDMA_NOSLOT;
+#if 0
+ else if (ieee80211_local_address(se->se_macaddr))
+ fail |= MATCH_TDMA_LOCAL;
+#endif
+ }
+#endif /* IEEE80211_SUPPORT_TDMA */
+#ifdef IEEE80211_SUPPORT_MESH
+ } else if (vap->iv_opmode == IEEE80211_M_MBSS) {
+ const struct ieee80211_mesh_state *ms = vap->iv_mesh;
+ /*
+ * Mesh nodes have IBSS & ESS bits in capinfo turned off
+ * and two special ie's that must be present.
+ */
+ if (se->se_capinfo & (IEEE80211_CAPINFO_IBSS|IEEE80211_CAPINFO_ESS))
+ fail |= MATCH_CAPINFO;
+ else if (se->se_meshid[0] != IEEE80211_ELEMID_MESHID)
+ fail |= MATCH_MESH_NOID;
+ else if (ms->ms_idlen != 0 &&
+ match_id(se->se_meshid, ms->ms_id, ms->ms_idlen))
+ fail |= MATCH_MESHID;
+#endif
+ } else {
+ if ((se->se_capinfo & IEEE80211_CAPINFO_ESS) == 0)
+ fail |= MATCH_CAPINFO;
+ /*
+ * If 11d is enabled and we're attempting to join a bss
+ * that advertises it's country code then compare our
+ * current settings to what we fetched from the country ie.
+ * If our country code is unspecified or different then do
+ * not attempt to join the bss. We should have already
+ * dispatched an event to user space that identifies the
+ * new country code so our regdomain config should match.
+ */
+ if ((IEEE80211_IS_CHAN_11D(se->se_chan) ||
+ (vap->iv_flags_ext & IEEE80211_FEXT_DOTD)) &&
+ se->se_cc[0] != 0 &&
+ (ic->ic_regdomain.country == CTRY_DEFAULT ||
+ !isocmp(se->se_cc, ic->ic_regdomain.isocc)))
+ fail |= MATCH_CC;
+ }
+ if (vap->iv_flags & IEEE80211_F_PRIVACY) {
+ if ((se->se_capinfo & IEEE80211_CAPINFO_PRIVACY) == 0)
+ fail |= MATCH_PRIVACY;
+ } else {
+ /* XXX does this mean privacy is supported or required? */
+ if (se->se_capinfo & IEEE80211_CAPINFO_PRIVACY)
+ fail |= MATCH_PRIVACY;
+ }
+ se0->se_flags &= ~STA_DEMOTE11B;
+ rate = check_rate(vap, se->se_chan, se);
+ if (rate & IEEE80211_RATE_BASIC) {
+ fail |= MATCH_RATE;
+ /*
+ * An 11b-only ap will give a rate mismatch if there is an
+ * OFDM fixed tx rate for 11g. Try downgrading the channel
+ * in the scan list to 11b and retry the rate check.
+ */
+ if (IEEE80211_IS_CHAN_ANYG(se->se_chan)) {
+ rate = check_rate(vap, demote11b(vap, se->se_chan), se);
+ if ((rate & IEEE80211_RATE_BASIC) == 0) {
+ fail &= ~MATCH_RATE;
+ se0->se_flags |= STA_DEMOTE11B;
+ }
+ }
+ } else if (rate < 2*24) {
+ /*
+ * This is an 11b-only ap. Check the desired mode in
+ * case that needs to be honored (mode 11g filters out
+ * 11b-only ap's). Otherwise force any 11g channel used
+ * in scanning to be demoted.
+ *
+ * NB: we cheat a bit here by looking at the max rate;
+ * we could/should check the rates.
+ */
+ if (!(vap->iv_des_mode == IEEE80211_MODE_AUTO ||
+ vap->iv_des_mode == IEEE80211_MODE_11B))
+ fail |= MATCH_RATE;
+ else
+ se0->se_flags |= STA_DEMOTE11B;
+ }
+ if (ss->ss_nssid != 0 &&
+ !match_ssid(se->se_ssid, ss->ss_nssid, ss->ss_ssid))
+ fail |= MATCH_SSID;
+ if ((vap->iv_flags & IEEE80211_F_DESBSSID) &&
+ !IEEE80211_ADDR_EQ(vap->iv_des_bssid, se->se_bssid))
+ fail |= MATCH_BSSID;
+ if (se0->se_fails >= STA_FAILS_MAX)
+ fail |= MATCH_FAILS;
+ if (se0->se_notseen >= STA_PURGE_SCANS)
+ fail |= MATCH_NOTSEEN;
+ if (se->se_rssi < STA_RSSI_MIN)
+ fail |= MATCH_RSSI;
+#ifdef IEEE80211_DEBUG
+ if (ieee80211_msg(vap, debug)) {
+ printf(" %c %s",
+ fail & MATCH_FAILS ? '=' :
+ fail & MATCH_NOTSEEN ? '^' :
+ fail & MATCH_CC ? '$' :
+#ifdef IEEE80211_SUPPORT_TDMA
+ fail & MATCH_TDMA_NOIE ? '&' :
+ fail & MATCH_TDMA_VERSION ? 'v' :
+ fail & MATCH_TDMA_NOTMASTER ? 's' :
+ fail & MATCH_TDMA_NOSLOT ? 'f' :
+ fail & MATCH_TDMA_LOCAL ? 'l' :
+#endif
+ fail & MATCH_MESH_NOID ? 'm' :
+ fail ? '-' : '+', ether_sprintf(se->se_macaddr));
+ printf(" %s%c", ether_sprintf(se->se_bssid),
+ fail & MATCH_BSSID ? '!' : ' ');
+ printf(" %3d%c", ieee80211_chan2ieee(ic, se->se_chan),
+ fail & MATCH_CHANNEL ? '!' : ' ');
+ printf(" %+4d%c", se->se_rssi, fail & MATCH_RSSI ? '!' : ' ');
+ printf(" %2dM%c", (rate & IEEE80211_RATE_VAL) / 2,
+ fail & MATCH_RATE ? '!' : ' ');
+ printf(" %4s%c",
+ (se->se_capinfo & IEEE80211_CAPINFO_ESS) ? "ess" :
+ (se->se_capinfo & IEEE80211_CAPINFO_IBSS) ? "ibss" : "",
+ fail & MATCH_CAPINFO ? '!' : ' ');
+ printf(" %3s%c ",
+ (se->se_capinfo & IEEE80211_CAPINFO_PRIVACY) ?
+ "wep" : "no",
+ fail & MATCH_PRIVACY ? '!' : ' ');
+ ieee80211_print_essid(se->se_ssid+2, se->se_ssid[1]);
+ printf("%s\n", fail & (MATCH_SSID | MATCH_MESHID) ? "!" : "");
+ }
+#endif
+ return fail;
+}
+
+static void
+sta_update_notseen(struct sta_table *st)
+{
+ struct sta_entry *se;
+
+ IEEE80211_SCAN_TABLE_LOCK(st);
+ TAILQ_FOREACH(se, &st->st_entry, se_list) {
+ /*
+ * If seen the reset and don't bump the count;
+ * otherwise bump the ``not seen'' count. Note
+ * that this insures that stations for which we
+ * see frames while not scanning but not during
+ * this scan will not be penalized.
+ */
+ if (se->se_seen)
+ se->se_seen = 0;
+ else
+ se->se_notseen++;
+ }
+ IEEE80211_SCAN_TABLE_UNLOCK(st);
+}
+
+static void
+sta_dec_fails(struct sta_table *st)
+{
+ struct sta_entry *se;
+
+ IEEE80211_SCAN_TABLE_LOCK(st);
+ TAILQ_FOREACH(se, &st->st_entry, se_list)
+ if (se->se_fails)
+ se->se_fails--;
+ IEEE80211_SCAN_TABLE_UNLOCK(st);
+}
+
+static struct sta_entry *
+select_bss(struct ieee80211_scan_state *ss, struct ieee80211vap *vap, int debug)
+{
+ struct sta_table *st = ss->ss_priv;
+ struct sta_entry *se, *selbs = NULL;
+
+ IEEE80211_DPRINTF(vap, debug, " %s\n",
+ "macaddr bssid chan rssi rate flag wep essid");
+ IEEE80211_SCAN_TABLE_LOCK(st);
+ TAILQ_FOREACH(se, &st->st_entry, se_list) {
+ ieee80211_ies_expand(&se->base.se_ies);
+ if (match_bss(vap, ss, se, debug) == 0) {
+ if (selbs == NULL)
+ selbs = se;
+ else if (sta_compare(se, selbs) > 0)
+ selbs = se;
+ }
+ }
+ IEEE80211_SCAN_TABLE_UNLOCK(st);
+
+ return selbs;
+}
+
+/*
+ * Pick an ap or ibss network to join or find a channel
+ * to use to start an ibss network.
+ */
+static int
+sta_pick_bss(struct ieee80211_scan_state *ss, struct ieee80211vap *vap)
+{
+ struct sta_table *st = ss->ss_priv;
+ struct sta_entry *selbs;
+ struct ieee80211_channel *chan;
+
+ KASSERT(vap->iv_opmode == IEEE80211_M_STA,
+ ("wrong mode %u", vap->iv_opmode));
+
+ if (st->st_newscan) {
+ sta_update_notseen(st);
+ st->st_newscan = 0;
+ }
+ if (ss->ss_flags & IEEE80211_SCAN_NOPICK) {
+ /*
+ * Manual/background scan, don't select+join the
+ * bss, just return. The scanning framework will
+ * handle notification that this has completed.
+ */
+ ss->ss_flags &= ~IEEE80211_SCAN_NOPICK;
+ return 1;
+ }
+ /*
+ * Automatic sequencing; look for a candidate and
+ * if found join the network.
+ */
+ /* NB: unlocked read should be ok */
+ if (TAILQ_FIRST(&st->st_entry) == NULL) {
+ IEEE80211_DPRINTF(vap, IEEE80211_MSG_SCAN,
+ "%s: no scan candidate\n", __func__);
+ if (ss->ss_flags & IEEE80211_SCAN_NOJOIN)
+ return 0;
+notfound:
+ /*
+ * If nothing suitable was found decrement
+ * the failure counts so entries will be
+ * reconsidered the next time around. We
+ * really want to do this only for sta's
+ * where we've previously had some success.
+ */
+ sta_dec_fails(st);
+ st->st_newscan = 1;
+ return 0; /* restart scan */
+ }
+ selbs = select_bss(ss, vap, IEEE80211_MSG_SCAN);
+ if (ss->ss_flags & IEEE80211_SCAN_NOJOIN)
+ return (selbs != NULL);
+ if (selbs == NULL)
+ goto notfound;
+ chan = selbs->base.se_chan;
+ if (selbs->se_flags & STA_DEMOTE11B)
+ chan = demote11b(vap, chan);
+ if (!ieee80211_sta_join(vap, chan, &selbs->base))
+ goto notfound;
+ return 1; /* terminate scan */
+}
+
+/*
+ * Lookup an entry in the scan cache. We assume we're
+ * called from the bottom half or such that we don't need
+ * to block the bottom half so that it's safe to return
+ * a reference to an entry w/o holding the lock on the table.
+ */
+static struct sta_entry *
+sta_lookup(struct sta_table *st, const uint8_t macaddr[IEEE80211_ADDR_LEN])
+{
+ struct sta_entry *se;
+ int hash = STA_HASH(macaddr);
+
+ IEEE80211_SCAN_TABLE_LOCK(st);
+ LIST_FOREACH(se, &st->st_hash[hash], se_hash)
+ if (IEEE80211_ADDR_EQ(se->base.se_macaddr, macaddr))
+ break;
+ IEEE80211_SCAN_TABLE_UNLOCK(st);
+
+ return se; /* NB: unlocked */
+}
+
+static void
+sta_roam_check(struct ieee80211_scan_state *ss, struct ieee80211vap *vap)
+{
+ struct ieee80211com *ic = vap->iv_ic;
+ struct ieee80211_node *ni = vap->iv_bss;
+ struct sta_table *st = ss->ss_priv;
+ enum ieee80211_phymode mode;
+ struct sta_entry *se, *selbs;
+ uint8_t roamRate, curRate, ucastRate;
+ int8_t roamRssi, curRssi;
+
+ se = sta_lookup(st, ni->ni_macaddr);
+ if (se == NULL) {
+ /* XXX something is wrong */
+ return;
+ }
+
+ mode = ieee80211_chan2mode(ic->ic_bsschan);
+ roamRate = vap->iv_roamparms[mode].rate;
+ roamRssi = vap->iv_roamparms[mode].rssi;
+ ucastRate = vap->iv_txparms[mode].ucastrate;
+ /* NB: the most up to date rssi is in the node, not the scan cache */
+ curRssi = ic->ic_node_getrssi(ni);
+ if (ucastRate == IEEE80211_FIXED_RATE_NONE) {
+ curRate = ni->ni_txrate;
+ roamRate &= IEEE80211_RATE_VAL;
+ IEEE80211_DPRINTF(vap, IEEE80211_MSG_ROAM,
+ "%s: currssi %d currate %u roamrssi %d roamrate %u\n",
+ __func__, curRssi, curRate, roamRssi, roamRate);
+ } else {
+ curRate = roamRate; /* NB: insure compare below fails */
+ IEEE80211_DPRINTF(vap, IEEE80211_MSG_ROAM,
+ "%s: currssi %d roamrssi %d\n", __func__, curRssi, roamRssi);
+ }
+ /*
+ * Check if a new ap should be used and switch.
+ * XXX deauth current ap
+ */
+ if (curRate < roamRate || curRssi < roamRssi) {
+ if (time_after(ticks, ic->ic_lastscan + vap->iv_scanvalid)) {
+ /*
+ * Scan cache contents are too old; force a scan now
+ * if possible so we have current state to make a
+ * decision with. We don't kick off a bg scan if
+ * we're using dynamic turbo and boosted or if the
+ * channel is busy.
+ * XXX force immediate switch on scan complete
+ */
+ if (!IEEE80211_IS_CHAN_DTURBO(ic->ic_curchan) &&
+ time_after(ticks, ic->ic_lastdata + vap->iv_bgscanidle))
+ ieee80211_bg_scan(vap, 0);
+ return;
+ }
+ se->base.se_rssi = curRssi;
+ selbs = select_bss(ss, vap, IEEE80211_MSG_ROAM);
+ if (selbs != NULL && selbs != se) {
+ struct ieee80211_channel *chan;
+
+ IEEE80211_DPRINTF(vap,
+ IEEE80211_MSG_ROAM | IEEE80211_MSG_DEBUG,
+ "%s: ROAM: curRate %u, roamRate %u, "
+ "curRssi %d, roamRssi %d\n", __func__,
+ curRate, roamRate, curRssi, roamRssi);
+
+ chan = selbs->base.se_chan;
+ if (selbs->se_flags & STA_DEMOTE11B)
+ chan = demote11b(vap, chan);
+ (void) ieee80211_sta_join(vap, chan, &selbs->base);
+ }
+ }
+}
+
+/*
+ * Age entries in the scan cache.
+ * XXX also do roaming since it's convenient
+ */
+static void
+sta_age(struct ieee80211_scan_state *ss)
+{
+ struct ieee80211vap *vap = ss->ss_vap;
+
+ adhoc_age(ss);
+ /*
+ * If rate control is enabled check periodically to see if
+ * we should roam from our current connection to one that
+ * might be better. This only applies when we're operating
+ * in sta mode and automatic roaming is set.
+ * XXX defer if busy
+ * XXX repeater station
+ * XXX do when !bgscan?
+ */
+ KASSERT(vap->iv_opmode == IEEE80211_M_STA,
+ ("wrong mode %u", vap->iv_opmode));
+ if (vap->iv_roaming == IEEE80211_ROAMING_AUTO &&
+ (vap->iv_flags & IEEE80211_F_BGSCAN) &&
+ vap->iv_state >= IEEE80211_S_RUN)
+ /* XXX vap is implicit */
+ sta_roam_check(ss, vap);
+}
+
+/*
+ * Iterate over the entries in the scan cache, invoking
+ * the callback function on each one.
+ */
+static void
+sta_iterate(struct ieee80211_scan_state *ss,
+ ieee80211_scan_iter_func *f, void *arg)
+{
+ struct sta_table *st = ss->ss_priv;
+ struct sta_entry *se;
+ u_int gen;
+
+ mtx_lock(&st->st_scanlock);
+ gen = st->st_scaniter++;
+restart:
+ IEEE80211_SCAN_TABLE_LOCK(st);
+ TAILQ_FOREACH(se, &st->st_entry, se_list) {
+ if (se->se_scangen != gen) {
+ se->se_scangen = gen;
+ /* update public state */
+ se->base.se_age = ticks - se->se_lastupdate;
+ IEEE80211_SCAN_TABLE_UNLOCK(st);
+ (*f)(arg, &se->base);
+ goto restart;
+ }
+ }
+ IEEE80211_SCAN_TABLE_UNLOCK(st);
+
+ mtx_unlock(&st->st_scanlock);
+}
+
+static void
+sta_assoc_fail(struct ieee80211_scan_state *ss,
+ const uint8_t macaddr[IEEE80211_ADDR_LEN], int reason)
+{
+ struct sta_table *st = ss->ss_priv;
+ struct sta_entry *se;
+
+ se = sta_lookup(st, macaddr);
+ if (se != NULL) {
+ se->se_fails++;
+ se->se_lastfail = ticks;
+ IEEE80211_NOTE_MAC(ss->ss_vap, IEEE80211_MSG_SCAN,
+ macaddr, "%s: reason %u fails %u",
+ __func__, reason, se->se_fails);
+ }
+}
+
+static void
+sta_assoc_success(struct ieee80211_scan_state *ss,
+ const uint8_t macaddr[IEEE80211_ADDR_LEN])
+{
+ struct sta_table *st = ss->ss_priv;
+ struct sta_entry *se;
+
+ se = sta_lookup(st, macaddr);
+ if (se != NULL) {
+#if 0
+ se->se_fails = 0;
+ IEEE80211_NOTE_MAC(ss->ss_vap, IEEE80211_MSG_SCAN,
+ macaddr, "%s: fails %u",
+ __func__, se->se_fails);
+#endif
+ se->se_lastassoc = ticks;
+ }
+}
+
+static const struct ieee80211_scanner sta_default = {
+ .scan_name = "default",
+ .scan_attach = sta_attach,
+ .scan_detach = sta_detach,
+ .scan_start = sta_start,
+ .scan_restart = sta_restart,
+ .scan_cancel = sta_cancel,
+ .scan_end = sta_pick_bss,
+ .scan_flush = sta_flush,
+ .scan_add = sta_add,
+ .scan_age = sta_age,
+ .scan_iterate = sta_iterate,
+ .scan_assoc_fail = sta_assoc_fail,
+ .scan_assoc_success = sta_assoc_success,
+};
+IEEE80211_SCANNER_ALG(sta, IEEE80211_M_STA, sta_default);
+
+/*
+ * Adhoc mode-specific support.
+ */
+
+static const uint16_t adhocWorld[] = /* 36, 40, 44, 48 */
+{ 5180, 5200, 5220, 5240 };
+static const uint16_t adhocFcc3[] = /* 36, 40, 44, 48 145, 149, 153, 157, 161, 165 */
+{ 5180, 5200, 5220, 5240, 5725, 5745, 5765, 5785, 5805, 5825 };
+static const uint16_t adhocMkk[] = /* 34, 38, 42, 46 */
+{ 5170, 5190, 5210, 5230 };
+static const uint16_t adhoc11b[] = /* 10, 11 */
+{ 2457, 2462 };
+
+static const struct scanlist adhocScanTable[] = {
+ { IEEE80211_MODE_11B, X(adhoc11b) },
+ { IEEE80211_MODE_11A, X(adhocWorld) },
+ { IEEE80211_MODE_11A, X(adhocFcc3) },
+ { IEEE80211_MODE_11B, X(adhocMkk) },
+ { .list = NULL }
+};
+#undef X
+
+/*
+ * Start an adhoc-mode scan by populating the channel list.
+ */
+static int
+adhoc_start(struct ieee80211_scan_state *ss, struct ieee80211vap *vap)
+{
+ struct sta_table *st = ss->ss_priv;
+
+ makescanlist(ss, vap, adhocScanTable);
+
+ if (ss->ss_mindwell == 0)
+ ss->ss_mindwell = msecs_to_ticks(200); /* 200ms */
+ if (ss->ss_maxdwell == 0)
+ ss->ss_maxdwell = msecs_to_ticks(200); /* 200ms */
+
+ st->st_scangen++;
+ st->st_newscan = 1;
+
+ return 0;
+}
+
+/*
+ * Select a channel to start an adhoc network on.
+ * The channel list was populated with appropriate
+ * channels so select one that looks least occupied.
+ */
+static struct ieee80211_channel *
+adhoc_pick_channel(struct ieee80211_scan_state *ss, int flags)
+{
+ struct sta_table *st = ss->ss_priv;
+ struct sta_entry *se;
+ struct ieee80211_channel *c, *bestchan;
+ int i, bestrssi, maxrssi;
+
+ bestchan = NULL;
+ bestrssi = -1;
+
+ IEEE80211_SCAN_TABLE_LOCK(st);
+ for (i = 0; i < ss->ss_last; i++) {
+ c = ss->ss_chans[i];
+ /* never consider a channel with radar */
+ if (IEEE80211_IS_CHAN_RADAR(c))
+ continue;
+ /* skip channels disallowed by regulatory settings */
+ if (IEEE80211_IS_CHAN_NOADHOC(c))
+ continue;
+ /* check channel attributes for band compatibility */
+ if (flags != 0 && (c->ic_flags & flags) != flags)
+ continue;
+ maxrssi = 0;
+ TAILQ_FOREACH(se, &st->st_entry, se_list) {
+ if (se->base.se_chan != c)
+ continue;
+ if (se->base.se_rssi > maxrssi)
+ maxrssi = se->base.se_rssi;
+ }
+ if (bestchan == NULL || maxrssi < bestrssi)
+ bestchan = c;
+ }
+ IEEE80211_SCAN_TABLE_UNLOCK(st);
+
+ return bestchan;
+}
+
+/*
+ * Pick an ibss network to join or find a channel
+ * to use to start an ibss network.
+ */
+static int
+adhoc_pick_bss(struct ieee80211_scan_state *ss, struct ieee80211vap *vap)
+{
+ struct sta_table *st = ss->ss_priv;
+ struct sta_entry *selbs;
+ struct ieee80211_channel *chan;
+
+ KASSERT(vap->iv_opmode == IEEE80211_M_IBSS ||
+ vap->iv_opmode == IEEE80211_M_AHDEMO ||
+ vap->iv_opmode == IEEE80211_M_MBSS,
+ ("wrong opmode %u", vap->iv_opmode));
+
+ if (st->st_newscan) {
+ sta_update_notseen(st);
+ st->st_newscan = 0;
+ }
+ if (ss->ss_flags & IEEE80211_SCAN_NOPICK) {
+ /*
+ * Manual/background scan, don't select+join the
+ * bss, just return. The scanning framework will
+ * handle notification that this has completed.
+ */
+ ss->ss_flags &= ~IEEE80211_SCAN_NOPICK;
+ return 1;
+ }
+ /*
+ * Automatic sequencing; look for a candidate and
+ * if found join the network.
+ */
+ /* NB: unlocked read should be ok */
+ if (TAILQ_FIRST(&st->st_entry) == NULL) {
+ IEEE80211_DPRINTF(vap, IEEE80211_MSG_SCAN,
+ "%s: no scan candidate\n", __func__);
+ if (ss->ss_flags & IEEE80211_SCAN_NOJOIN)
+ return 0;
+notfound:
+ /* NB: never auto-start a tdma network for slot !0 */
+#ifdef IEEE80211_SUPPORT_TDMA
+ if (vap->iv_des_nssid &&
+ ((vap->iv_caps & IEEE80211_C_TDMA) == 0 ||
+ ieee80211_tdma_getslot(vap) == 0)) {
+#else
+ if (vap->iv_des_nssid) {
+#endif
+ /*
+ * No existing adhoc network to join and we have
+ * an ssid; start one up. If no channel was
+ * specified, try to select a channel.
+ */
+ if (vap->iv_des_chan == IEEE80211_CHAN_ANYC ||
+ IEEE80211_IS_CHAN_RADAR(vap->iv_des_chan)) {
+ struct ieee80211com *ic = vap->iv_ic;
+
+ chan = adhoc_pick_channel(ss, 0);
+ if (chan != NULL)
+ chan = ieee80211_ht_adjust_channel(ic,
+ chan, vap->iv_flags_ht);
+ } else
+ chan = vap->iv_des_chan;
+ if (chan != NULL) {
+ ieee80211_create_ibss(vap, chan);
+ return 1;
+ }
+ }
+ /*
+ * If nothing suitable was found decrement
+ * the failure counts so entries will be
+ * reconsidered the next time around. We
+ * really want to do this only for sta's
+ * where we've previously had some success.
+ */
+ sta_dec_fails(st);
+ st->st_newscan = 1;
+ return 0; /* restart scan */
+ }
+ selbs = select_bss(ss, vap, IEEE80211_MSG_SCAN);
+ if (ss->ss_flags & IEEE80211_SCAN_NOJOIN)
+ return (selbs != NULL);
+ if (selbs == NULL)
+ goto notfound;
+ chan = selbs->base.se_chan;
+ if (selbs->se_flags & STA_DEMOTE11B)
+ chan = demote11b(vap, chan);
+ if (!ieee80211_sta_join(vap, chan, &selbs->base))
+ goto notfound;
+ return 1; /* terminate scan */
+}
+
+/*
+ * Age entries in the scan cache.
+ */
+static void
+adhoc_age(struct ieee80211_scan_state *ss)
+{
+ struct sta_table *st = ss->ss_priv;
+ struct sta_entry *se, *next;
+
+ IEEE80211_SCAN_TABLE_LOCK(st);
+ TAILQ_FOREACH_SAFE(se, &st->st_entry, se_list, next) {
+ if (se->se_notseen > STA_PURGE_SCANS) {
+ TAILQ_REMOVE(&st->st_entry, se, se_list);
+ LIST_REMOVE(se, se_hash);
+ ieee80211_ies_cleanup(&se->base.se_ies);
+ free(se, M_80211_SCAN);
+ }
+ }
+ IEEE80211_SCAN_TABLE_UNLOCK(st);
+}
+
+static const struct ieee80211_scanner adhoc_default = {
+ .scan_name = "default",
+ .scan_attach = sta_attach,
+ .scan_detach = sta_detach,
+ .scan_start = adhoc_start,
+ .scan_restart = sta_restart,
+ .scan_cancel = sta_cancel,
+ .scan_end = adhoc_pick_bss,
+ .scan_flush = sta_flush,
+ .scan_pickchan = adhoc_pick_channel,
+ .scan_add = sta_add,
+ .scan_age = adhoc_age,
+ .scan_iterate = sta_iterate,
+ .scan_assoc_fail = sta_assoc_fail,
+ .scan_assoc_success = sta_assoc_success,
+};
+IEEE80211_SCANNER_ALG(ibss, IEEE80211_M_IBSS, adhoc_default);
+IEEE80211_SCANNER_ALG(ahdemo, IEEE80211_M_AHDEMO, adhoc_default);
+
+static void
+ap_force_promisc(struct ieee80211com *ic)
+{
+ struct ifnet *ifp = ic->ic_ifp;
+
+ IEEE80211_LOCK(ic);
+ /* set interface into promiscuous mode */
+ ifp->if_flags |= IFF_PROMISC;
+ ieee80211_runtask(ic, &ic->ic_promisc_task);
+ IEEE80211_UNLOCK(ic);
+}
+
+static void
+ap_reset_promisc(struct ieee80211com *ic)
+{
+ IEEE80211_LOCK(ic);
+ ieee80211_syncifflag_locked(ic, IFF_PROMISC);
+ IEEE80211_UNLOCK(ic);
+}
+
+static int
+ap_start(struct ieee80211_scan_state *ss, struct ieee80211vap *vap)
+{
+ struct sta_table *st = ss->ss_priv;
+
+ makescanlist(ss, vap, staScanTable);
+
+ if (ss->ss_mindwell == 0)
+ ss->ss_mindwell = msecs_to_ticks(200); /* 200ms */
+ if (ss->ss_maxdwell == 0)
+ ss->ss_maxdwell = msecs_to_ticks(200); /* 200ms */
+
+ st->st_scangen++;
+ st->st_newscan = 1;
+
+ ap_force_promisc(vap->iv_ic);
+ return 0;
+}
+
+/*
+ * Cancel an ongoing scan.
+ */
+static int
+ap_cancel(struct ieee80211_scan_state *ss, struct ieee80211vap *vap)
+{
+ ap_reset_promisc(vap->iv_ic);
+ return 0;
+}
+
+/*
+ * Pick a quiet channel to use for ap operation.
+ */
+static struct ieee80211_channel *
+ap_pick_channel(struct ieee80211_scan_state *ss, int flags)
+{
+ struct sta_table *st = ss->ss_priv;
+ struct ieee80211_channel *bestchan = NULL;
+ int i;
+
+ /* XXX select channel more intelligently, e.g. channel spread, power */
+ /* NB: use scan list order to preserve channel preference */
+ for (i = 0; i < ss->ss_last; i++) {
+ struct ieee80211_channel *chan = ss->ss_chans[i];
+ /*
+ * If the channel is unoccupied the max rssi
+ * should be zero; just take it. Otherwise
+ * track the channel with the lowest rssi and
+ * use that when all channels appear occupied.
+ */
+ if (IEEE80211_IS_CHAN_RADAR(chan))
+ continue;
+ if (IEEE80211_IS_CHAN_NOHOSTAP(chan))
+ continue;
+ /* check channel attributes for band compatibility */
+ if (flags != 0 && (chan->ic_flags & flags) != flags)
+ continue;
+ KASSERT(sizeof(chan->ic_ieee) == 1, ("ic_chan size"));
+ /* XXX channel have interference */
+ if (st->st_maxrssi[chan->ic_ieee] == 0) {
+ /* XXX use other considerations */
+ return chan;
+ }
+ if (bestchan == NULL ||
+ st->st_maxrssi[chan->ic_ieee] < st->st_maxrssi[bestchan->ic_ieee])
+ bestchan = chan;
+ }
+ return bestchan;
+}
+
+/*
+ * Pick a quiet channel to use for ap operation.
+ */
+static int
+ap_end(struct ieee80211_scan_state *ss, struct ieee80211vap *vap)
+{
+ struct ieee80211com *ic = vap->iv_ic;
+ struct ieee80211_channel *bestchan;
+
+ KASSERT(vap->iv_opmode == IEEE80211_M_HOSTAP,
+ ("wrong opmode %u", vap->iv_opmode));
+ bestchan = ap_pick_channel(ss, 0);
+ if (bestchan == NULL) {
+ /* no suitable channel, should not happen */
+ IEEE80211_DPRINTF(vap, IEEE80211_MSG_SCAN,
+ "%s: no suitable channel! (should not happen)\n", __func__);
+ /* XXX print something? */
+ return 0; /* restart scan */
+ }
+ /*
+ * If this is a dynamic turbo channel, start with the unboosted one.
+ */
+ if (IEEE80211_IS_CHAN_TURBO(bestchan)) {
+ bestchan = ieee80211_find_channel(ic, bestchan->ic_freq,
+ bestchan->ic_flags & ~IEEE80211_CHAN_TURBO);
+ if (bestchan == NULL) {
+ /* should never happen ?? */
+ return 0;
+ }
+ }
+ ap_reset_promisc(ic);
+ if (ss->ss_flags & (IEEE80211_SCAN_NOPICK | IEEE80211_SCAN_NOJOIN)) {
+ /*
+ * Manual/background scan, don't select+join the
+ * bss, just return. The scanning framework will
+ * handle notification that this has completed.
+ */
+ ss->ss_flags &= ~IEEE80211_SCAN_NOPICK;
+ return 1;
+ }
+ ieee80211_create_ibss(vap,
+ ieee80211_ht_adjust_channel(ic, bestchan, vap->iv_flags_ht));
+ return 1;
+}
+
+static const struct ieee80211_scanner ap_default = {
+ .scan_name = "default",
+ .scan_attach = sta_attach,
+ .scan_detach = sta_detach,
+ .scan_start = ap_start,
+ .scan_restart = sta_restart,
+ .scan_cancel = ap_cancel,
+ .scan_end = ap_end,
+ .scan_flush = sta_flush,
+ .scan_pickchan = ap_pick_channel,
+ .scan_add = sta_add,
+ .scan_age = adhoc_age,
+ .scan_iterate = sta_iterate,
+ .scan_assoc_success = sta_assoc_success,
+ .scan_assoc_fail = sta_assoc_fail,
+};
+IEEE80211_SCANNER_ALG(ap, IEEE80211_M_HOSTAP, ap_default);
+
+#ifdef IEEE80211_SUPPORT_MESH
+/*
+ * Pick an mbss network to join or find a channel
+ * to use to start an mbss network.
+ */
+static int
+mesh_pick_bss(struct ieee80211_scan_state *ss, struct ieee80211vap *vap)
+{
+ struct sta_table *st = ss->ss_priv;
+ struct ieee80211_mesh_state *ms = vap->iv_mesh;
+ struct sta_entry *selbs;
+ struct ieee80211_channel *chan;
+
+ KASSERT(vap->iv_opmode == IEEE80211_M_MBSS,
+ ("wrong opmode %u", vap->iv_opmode));
+
+ if (st->st_newscan) {
+ sta_update_notseen(st);
+ st->st_newscan = 0;
+ }
+ if (ss->ss_flags & IEEE80211_SCAN_NOPICK) {
+ /*
+ * Manual/background scan, don't select+join the
+ * bss, just return. The scanning framework will
+ * handle notification that this has completed.
+ */
+ ss->ss_flags &= ~IEEE80211_SCAN_NOPICK;
+ return 1;
+ }
+ /*
+ * Automatic sequencing; look for a candidate and
+ * if found join the network.
+ */
+ /* NB: unlocked read should be ok */
+ if (TAILQ_FIRST(&st->st_entry) == NULL) {
+ IEEE80211_DPRINTF(vap, IEEE80211_MSG_SCAN,
+ "%s: no scan candidate\n", __func__);
+ if (ss->ss_flags & IEEE80211_SCAN_NOJOIN)
+ return 0;
+notfound:
+ if (ms->ms_idlen != 0) {
+ /*
+ * No existing mbss network to join and we have
+ * a meshid; start one up. If no channel was
+ * specified, try to select a channel.
+ */
+ if (vap->iv_des_chan == IEEE80211_CHAN_ANYC ||
+ IEEE80211_IS_CHAN_RADAR(vap->iv_des_chan)) {
+ struct ieee80211com *ic = vap->iv_ic;
+
+ chan = adhoc_pick_channel(ss, 0);
+ if (chan != NULL)
+ chan = ieee80211_ht_adjust_channel(ic,
+ chan, vap->iv_flags_ht);
+ } else
+ chan = vap->iv_des_chan;
+ if (chan != NULL) {
+ ieee80211_create_ibss(vap, chan);
+ return 1;
+ }
+ }
+ /*
+ * If nothing suitable was found decrement
+ * the failure counts so entries will be
+ * reconsidered the next time around. We
+ * really want to do this only for sta's
+ * where we've previously had some success.
+ */
+ sta_dec_fails(st);
+ st->st_newscan = 1;
+ return 0; /* restart scan */
+ }
+ selbs = select_bss(ss, vap, IEEE80211_MSG_SCAN);
+ if (ss->ss_flags & IEEE80211_SCAN_NOJOIN)
+ return (selbs != NULL);
+ if (selbs == NULL)
+ goto notfound;
+ chan = selbs->base.se_chan;
+ if (selbs->se_flags & STA_DEMOTE11B)
+ chan = demote11b(vap, chan);
+ if (!ieee80211_sta_join(vap, chan, &selbs->base))
+ goto notfound;
+ return 1; /* terminate scan */
+}
+
+static const struct ieee80211_scanner mesh_default = {
+ .scan_name = "default",
+ .scan_attach = sta_attach,
+ .scan_detach = sta_detach,
+ .scan_start = adhoc_start,
+ .scan_restart = sta_restart,
+ .scan_cancel = sta_cancel,
+ .scan_end = mesh_pick_bss,
+ .scan_flush = sta_flush,
+ .scan_pickchan = adhoc_pick_channel,
+ .scan_add = sta_add,
+ .scan_age = adhoc_age,
+ .scan_iterate = sta_iterate,
+ .scan_assoc_fail = sta_assoc_fail,
+ .scan_assoc_success = sta_assoc_success,
+};
+IEEE80211_SCANNER_ALG(mesh, IEEE80211_M_MBSS, mesh_default);
+#endif /* IEEE80211_SUPPORT_MESH */
diff --git a/rtems/freebsd/net80211/ieee80211_sta.c b/rtems/freebsd/net80211/ieee80211_sta.c
new file mode 100644
index 00000000..91934c47
--- /dev/null
+++ b/rtems/freebsd/net80211/ieee80211_sta.c
@@ -0,0 +1,1748 @@
+#include <rtems/freebsd/machine/rtems-bsd-config.h>
+
+/*-
+ * Copyright (c) 2007-2008 Sam Leffler, Errno Consulting
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <rtems/freebsd/sys/cdefs.h>
+#ifdef __FreeBSD__
+__FBSDID("$FreeBSD$");
+#endif
+
+/*
+ * IEEE 802.11 Station mode support.
+ */
+#include <rtems/freebsd/local/opt_inet.h>
+#include <rtems/freebsd/local/opt_wlan.h>
+
+#include <rtems/freebsd/sys/param.h>
+#include <rtems/freebsd/sys/systm.h>
+#include <rtems/freebsd/sys/mbuf.h>
+#include <rtems/freebsd/sys/malloc.h>
+#include <rtems/freebsd/sys/kernel.h>
+
+#include <rtems/freebsd/sys/socket.h>
+#include <rtems/freebsd/sys/sockio.h>
+#include <rtems/freebsd/sys/endian.h>
+#include <rtems/freebsd/sys/errno.h>
+#include <rtems/freebsd/sys/proc.h>
+#include <rtems/freebsd/sys/sysctl.h>
+
+#include <rtems/freebsd/net/if.h>
+#include <rtems/freebsd/net/if_media.h>
+#include <rtems/freebsd/net/if_llc.h>
+#include <rtems/freebsd/net/ethernet.h>
+
+#include <rtems/freebsd/net/bpf.h>
+
+#include <rtems/freebsd/net80211/ieee80211_var.h>
+#include <rtems/freebsd/net80211/ieee80211_sta.h>
+#include <rtems/freebsd/net80211/ieee80211_input.h>
+#ifdef IEEE80211_SUPPORT_SUPERG
+#include <rtems/freebsd/net80211/ieee80211_superg.h>
+#endif
+#include <rtems/freebsd/net80211/ieee80211_ratectl.h>
+
+#define IEEE80211_RATE2MBS(r) (((r) & IEEE80211_RATE_VAL) / 2)
+
+static void sta_vattach(struct ieee80211vap *);
+static void sta_beacon_miss(struct ieee80211vap *);
+static int sta_newstate(struct ieee80211vap *, enum ieee80211_state, int);
+static int sta_input(struct ieee80211_node *, struct mbuf *, int, int);
+static void sta_recv_mgmt(struct ieee80211_node *, struct mbuf *,
+ int subtype, int rssi, int nf);
+static void sta_recv_ctl(struct ieee80211_node *, struct mbuf *, int subtype);
+
+void
+ieee80211_sta_attach(struct ieee80211com *ic)
+{
+ ic->ic_vattach[IEEE80211_M_STA] = sta_vattach;
+}
+
+void
+ieee80211_sta_detach(struct ieee80211com *ic)
+{
+}
+
+static void
+sta_vdetach(struct ieee80211vap *vap)
+{
+}
+
+static void
+sta_vattach(struct ieee80211vap *vap)
+{
+ vap->iv_newstate = sta_newstate;
+ vap->iv_input = sta_input;
+ vap->iv_recv_mgmt = sta_recv_mgmt;
+ vap->iv_recv_ctl = sta_recv_ctl;
+ vap->iv_opdetach = sta_vdetach;
+ vap->iv_bmiss = sta_beacon_miss;
+}
+
+/*
+ * Handle a beacon miss event. The common code filters out
+ * spurious events that can happen when scanning and/or before
+ * reaching RUN state.
+ */
+static void
+sta_beacon_miss(struct ieee80211vap *vap)
+{
+ struct ieee80211com *ic = vap->iv_ic;
+
+ KASSERT((ic->ic_flags & IEEE80211_F_SCAN) == 0, ("scanning"));
+ KASSERT(vap->iv_state >= IEEE80211_S_RUN,
+ ("wrong state %s", ieee80211_state_name[vap->iv_state]));
+
+ IEEE80211_DPRINTF(vap, IEEE80211_MSG_STATE | IEEE80211_MSG_DEBUG,
+ "beacon miss, mode %s state %s\n",
+ ieee80211_opmode_name[vap->iv_opmode],
+ ieee80211_state_name[vap->iv_state]);
+
+ if (vap->iv_state == IEEE80211_S_CSA) {
+ /*
+ * A Channel Switch is pending; assume we missed the
+ * beacon that would've completed the process and just
+ * force the switch. If we made a mistake we'll not
+ * find the AP on the new channel and fall back to a
+ * normal scan.
+ */
+ ieee80211_csa_completeswitch(ic);
+ return;
+ }
+ if (++vap->iv_bmiss_count < vap->iv_bmiss_max) {
+ /*
+ * Send a directed probe req before falling back to a
+ * scan; if we receive a response ic_bmiss_count will
+ * be reset. Some cards mistakenly report beacon miss
+ * so this avoids the expensive scan if the ap is
+ * still there.
+ */
+ ieee80211_send_probereq(vap->iv_bss, vap->iv_myaddr,
+ vap->iv_bss->ni_bssid, vap->iv_bss->ni_bssid,
+ vap->iv_bss->ni_essid, vap->iv_bss->ni_esslen);
+ return;
+ }
+
+ callout_stop(&vap->iv_swbmiss);
+ vap->iv_bmiss_count = 0;
+ vap->iv_stats.is_beacon_miss++;
+ if (vap->iv_roaming == IEEE80211_ROAMING_AUTO) {
+#ifdef IEEE80211_SUPPORT_SUPERG
+ struct ieee80211com *ic = vap->iv_ic;
+
+ /*
+ * If we receive a beacon miss interrupt when using
+ * dynamic turbo, attempt to switch modes before
+ * reassociating.
+ */
+ if (IEEE80211_ATH_CAP(vap, vap->iv_bss, IEEE80211_NODE_TURBOP))
+ ieee80211_dturbo_switch(vap,
+ ic->ic_bsschan->ic_flags ^ IEEE80211_CHAN_TURBO);
+#endif
+ /*
+ * Try to reassociate before scanning for a new ap.
+ */
+ ieee80211_new_state(vap, IEEE80211_S_ASSOC, 1);
+ } else {
+ /*
+ * Somebody else is controlling state changes (e.g.
+ * a user-mode app) don't do anything that would
+ * confuse them; just drop into scan mode so they'll
+ * notified of the state change and given control.
+ */
+ ieee80211_new_state(vap, IEEE80211_S_SCAN, 0);
+ }
+}
+
+/*
+ * Handle deauth with reason. We retry only for
+ * the cases where we might succeed. Otherwise
+ * we downgrade the ap and scan.
+ */
+static void
+sta_authretry(struct ieee80211vap *vap, struct ieee80211_node *ni, int reason)
+{
+ switch (reason) {
+ case IEEE80211_STATUS_SUCCESS: /* NB: MLME assoc */
+ case IEEE80211_STATUS_TIMEOUT:
+ case IEEE80211_REASON_ASSOC_EXPIRE:
+ case IEEE80211_REASON_NOT_AUTHED:
+ case IEEE80211_REASON_NOT_ASSOCED:
+ case IEEE80211_REASON_ASSOC_LEAVE:
+ case IEEE80211_REASON_ASSOC_NOT_AUTHED:
+ IEEE80211_SEND_MGMT(ni, IEEE80211_FC0_SUBTYPE_AUTH, 1);
+ break;
+ default:
+ ieee80211_scan_assoc_fail(vap, vap->iv_bss->ni_macaddr, reason);
+ if (vap->iv_roaming == IEEE80211_ROAMING_AUTO)
+ ieee80211_check_scan_current(vap);
+ break;
+ }
+}
+
+/*
+ * IEEE80211_M_STA vap state machine handler.
+ * This routine handles the main states in the 802.11 protocol.
+ */
+static int
+sta_newstate(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg)
+{
+ struct ieee80211com *ic = vap->iv_ic;
+ struct ieee80211_node *ni;
+ enum ieee80211_state ostate;
+
+ IEEE80211_LOCK_ASSERT(ic);
+
+ ostate = vap->iv_state;
+ IEEE80211_DPRINTF(vap, IEEE80211_MSG_STATE, "%s: %s -> %s (%d)\n",
+ __func__, ieee80211_state_name[ostate],
+ ieee80211_state_name[nstate], arg);
+ vap->iv_state = nstate; /* state transition */
+ callout_stop(&vap->iv_mgtsend); /* XXX callout_drain */
+ if (ostate != IEEE80211_S_SCAN)
+ ieee80211_cancel_scan(vap); /* background scan */
+ ni = vap->iv_bss; /* NB: no reference held */
+ if (vap->iv_flags_ext & IEEE80211_FEXT_SWBMISS)
+ callout_stop(&vap->iv_swbmiss);
+ switch (nstate) {
+ case IEEE80211_S_INIT:
+ switch (ostate) {
+ case IEEE80211_S_SLEEP:
+ /* XXX wakeup */
+ case IEEE80211_S_RUN:
+ IEEE80211_SEND_MGMT(ni,
+ IEEE80211_FC0_SUBTYPE_DISASSOC,
+ IEEE80211_REASON_ASSOC_LEAVE);
+ ieee80211_sta_leave(ni);
+ break;
+ case IEEE80211_S_ASSOC:
+ IEEE80211_SEND_MGMT(ni,
+ IEEE80211_FC0_SUBTYPE_DEAUTH,
+ IEEE80211_REASON_AUTH_LEAVE);
+ break;
+ case IEEE80211_S_SCAN:
+ ieee80211_cancel_scan(vap);
+ break;
+ default:
+ goto invalid;
+ }
+ if (ostate != IEEE80211_S_INIT) {
+ /* NB: optimize INIT -> INIT case */
+ ieee80211_reset_bss(vap);
+ }
+ if (vap->iv_auth->ia_detach != NULL)
+ vap->iv_auth->ia_detach(vap);
+ break;
+ case IEEE80211_S_SCAN:
+ switch (ostate) {
+ case IEEE80211_S_INIT:
+ /*
+ * Initiate a scan. We can come here as a result
+ * of an IEEE80211_IOC_SCAN_REQ too in which case
+ * the vap will be marked with IEEE80211_FEXT_SCANREQ
+ * and the scan request parameters will be present
+ * in iv_scanreq. Otherwise we do the default.
+ */
+ if (vap->iv_flags_ext & IEEE80211_FEXT_SCANREQ) {
+ ieee80211_check_scan(vap,
+ vap->iv_scanreq_flags,
+ vap->iv_scanreq_duration,
+ vap->iv_scanreq_mindwell,
+ vap->iv_scanreq_maxdwell,
+ vap->iv_scanreq_nssid, vap->iv_scanreq_ssid);
+ vap->iv_flags_ext &= ~IEEE80211_FEXT_SCANREQ;
+ } else
+ ieee80211_check_scan_current(vap);
+ break;
+ case IEEE80211_S_SCAN:
+ case IEEE80211_S_AUTH:
+ case IEEE80211_S_ASSOC:
+ /*
+ * These can happen either because of a timeout
+ * on an assoc/auth response or because of a
+ * change in state that requires a reset. For
+ * the former we're called with a non-zero arg
+ * that is the cause for the failure; pass this
+ * to the scan code so it can update state.
+ * Otherwise trigger a new scan unless we're in
+ * manual roaming mode in which case an application
+ * must issue an explicit scan request.
+ */
+ if (arg != 0)
+ ieee80211_scan_assoc_fail(vap,
+ vap->iv_bss->ni_macaddr, arg);
+ if (vap->iv_roaming == IEEE80211_ROAMING_AUTO)
+ ieee80211_check_scan_current(vap);
+ break;
+ case IEEE80211_S_RUN: /* beacon miss */
+ /*
+ * Beacon miss. Notify user space and if not
+ * under control of a user application (roaming
+ * manual) kick off a scan to re-connect.
+ */
+ ieee80211_sta_leave(ni);
+ if (vap->iv_roaming == IEEE80211_ROAMING_AUTO)
+ ieee80211_check_scan_current(vap);
+ break;
+ default:
+ goto invalid;
+ }
+ break;
+ case IEEE80211_S_AUTH:
+ switch (ostate) {
+ case IEEE80211_S_INIT:
+ case IEEE80211_S_SCAN:
+ IEEE80211_SEND_MGMT(ni,
+ IEEE80211_FC0_SUBTYPE_AUTH, 1);
+ break;
+ case IEEE80211_S_AUTH:
+ case IEEE80211_S_ASSOC:
+ switch (arg & 0xff) {
+ case IEEE80211_FC0_SUBTYPE_AUTH:
+ /* ??? */
+ IEEE80211_SEND_MGMT(ni,
+ IEEE80211_FC0_SUBTYPE_AUTH, 2);
+ break;
+ case IEEE80211_FC0_SUBTYPE_DEAUTH:
+ sta_authretry(vap, ni, arg>>8);
+ break;
+ }
+ break;
+ case IEEE80211_S_RUN:
+ switch (arg & 0xff) {
+ case IEEE80211_FC0_SUBTYPE_AUTH:
+ IEEE80211_SEND_MGMT(ni,
+ IEEE80211_FC0_SUBTYPE_AUTH, 2);
+ vap->iv_state = ostate; /* stay RUN */
+ break;
+ case IEEE80211_FC0_SUBTYPE_DEAUTH:
+ ieee80211_sta_leave(ni);
+ if (vap->iv_roaming == IEEE80211_ROAMING_AUTO) {
+ /* try to reauth */
+ IEEE80211_SEND_MGMT(ni,
+ IEEE80211_FC0_SUBTYPE_AUTH, 1);
+ }
+ break;
+ }
+ break;
+ default:
+ goto invalid;
+ }
+ break;
+ case IEEE80211_S_ASSOC:
+ switch (ostate) {
+ case IEEE80211_S_AUTH:
+ case IEEE80211_S_ASSOC:
+ IEEE80211_SEND_MGMT(ni,
+ IEEE80211_FC0_SUBTYPE_ASSOC_REQ, 0);
+ break;
+ case IEEE80211_S_SLEEP: /* cannot happen */
+ case IEEE80211_S_RUN:
+ ieee80211_sta_leave(ni);
+ if (vap->iv_roaming == IEEE80211_ROAMING_AUTO) {
+ IEEE80211_SEND_MGMT(ni, arg ?
+ IEEE80211_FC0_SUBTYPE_REASSOC_REQ :
+ IEEE80211_FC0_SUBTYPE_ASSOC_REQ, 0);
+ }
+ break;
+ default:
+ goto invalid;
+ }
+ break;
+ case IEEE80211_S_RUN:
+ if (vap->iv_flags & IEEE80211_F_WPA) {
+ /* XXX validate prerequisites */
+ }
+ switch (ostate) {
+ case IEEE80211_S_RUN:
+ case IEEE80211_S_CSA:
+ break;
+ case IEEE80211_S_AUTH: /* when join is done in fw */
+ case IEEE80211_S_ASSOC:
+#ifdef IEEE80211_DEBUG
+ if (ieee80211_msg_debug(vap)) {
+ ieee80211_note(vap, "%s with %s ssid ",
+ (vap->iv_opmode == IEEE80211_M_STA ?
+ "associated" : "synchronized"),
+ ether_sprintf(ni->ni_bssid));
+ ieee80211_print_essid(vap->iv_bss->ni_essid,
+ ni->ni_esslen);
+ /* XXX MCS/HT */
+ printf(" channel %d start %uMb\n",
+ ieee80211_chan2ieee(ic, ic->ic_curchan),
+ IEEE80211_RATE2MBS(ni->ni_txrate));
+ }
+#endif
+ ieee80211_scan_assoc_success(vap, ni->ni_macaddr);
+ ieee80211_notify_node_join(ni,
+ arg == IEEE80211_FC0_SUBTYPE_ASSOC_RESP);
+ break;
+ case IEEE80211_S_SLEEP:
+ ieee80211_sta_pwrsave(vap, 0);
+ break;
+ default:
+ goto invalid;
+ }
+ ieee80211_sync_curchan(ic);
+ if (ostate != IEEE80211_S_RUN &&
+ (vap->iv_flags_ext & IEEE80211_FEXT_SWBMISS)) {
+ /*
+ * Start s/w beacon miss timer for devices w/o
+ * hardware support. We fudge a bit here since
+ * we're doing this in software.
+ */
+ vap->iv_swbmiss_period = IEEE80211_TU_TO_TICKS(
+ 2 * vap->iv_bmissthreshold * ni->ni_intval);
+ vap->iv_swbmiss_count = 0;
+ callout_reset(&vap->iv_swbmiss, vap->iv_swbmiss_period,
+ ieee80211_swbmiss, vap);
+ }
+ /*
+ * When 802.1x is not in use mark the port authorized
+ * at this point so traffic can flow.
+ */
+ if (ni->ni_authmode != IEEE80211_AUTH_8021X)
+ ieee80211_node_authorize(ni);
+ /*
+ * Fake association when joining an existing bss.
+ */
+ if (ic->ic_newassoc != NULL)
+ ic->ic_newassoc(vap->iv_bss, ostate != IEEE80211_S_RUN);
+ break;
+ case IEEE80211_S_CSA:
+ if (ostate != IEEE80211_S_RUN)
+ goto invalid;
+ break;
+ case IEEE80211_S_SLEEP:
+ ieee80211_sta_pwrsave(vap, 0);
+ break;
+ default:
+ invalid:
+ IEEE80211_DPRINTF(vap, IEEE80211_MSG_STATE,
+ "%s: unexpected state transition %s -> %s\n", __func__,
+ ieee80211_state_name[ostate], ieee80211_state_name[nstate]);
+ break;
+ }
+ return 0;
+}
+
+/*
+ * Return non-zero if the frame is an echo of a multicast
+ * frame sent by ourself. The dir is known to be DSTODS.
+ */
+static __inline int
+isdstods_mcastecho(struct ieee80211vap *vap, const struct ieee80211_frame *wh)
+{
+#define QWH4(wh) ((const struct ieee80211_qosframe_addr4 *)wh)
+#define WH4(wh) ((const struct ieee80211_frame_addr4 *)wh)
+ const uint8_t *sa;
+
+ KASSERT(vap->iv_opmode == IEEE80211_M_STA, ("wrong mode"));
+
+ if (!IEEE80211_IS_MULTICAST(wh->i_addr3))
+ return 0;
+ sa = IEEE80211_QOS_HAS_SEQ(wh) ? QWH4(wh)->i_addr4 : WH4(wh)->i_addr4;
+ return IEEE80211_ADDR_EQ(sa, vap->iv_myaddr);
+#undef WH4
+#undef QWH4
+}
+
+/*
+ * Return non-zero if the frame is an echo of a multicast
+ * frame sent by ourself. The dir is known to be FROMDS.
+ */
+static __inline int
+isfromds_mcastecho(struct ieee80211vap *vap, const struct ieee80211_frame *wh)
+{
+ KASSERT(vap->iv_opmode == IEEE80211_M_STA, ("wrong mode"));
+
+ if (!IEEE80211_IS_MULTICAST(wh->i_addr1))
+ return 0;
+ return IEEE80211_ADDR_EQ(wh->i_addr3, vap->iv_myaddr);
+}
+
+/*
+ * Decide if a received management frame should be
+ * printed when debugging is enabled. This filters some
+ * of the less interesting frames that come frequently
+ * (e.g. beacons).
+ */
+static __inline int
+doprint(struct ieee80211vap *vap, int subtype)
+{
+ switch (subtype) {
+ case IEEE80211_FC0_SUBTYPE_BEACON:
+ return (vap->iv_ic->ic_flags & IEEE80211_F_SCAN);
+ case IEEE80211_FC0_SUBTYPE_PROBE_REQ:
+ return 0;
+ }
+ return 1;
+}
+
+/*
+ * Process a received frame. The node associated with the sender
+ * should be supplied. If nothing was found in the node table then
+ * the caller is assumed to supply a reference to iv_bss instead.
+ * The RSSI and a timestamp are also supplied. The RSSI data is used
+ * during AP scanning to select a AP to associate with; it can have
+ * any units so long as values have consistent units and higher values
+ * mean ``better signal''. The receive timestamp is currently not used
+ * by the 802.11 layer.
+ */
+static int
+sta_input(struct ieee80211_node *ni, struct mbuf *m, int rssi, int nf)
+{
+#define SEQ_LEQ(a,b) ((int)((a)-(b)) <= 0)
+#define HAS_SEQ(type) ((type & 0x4) == 0)
+ struct ieee80211vap *vap = ni->ni_vap;
+ struct ieee80211com *ic = ni->ni_ic;
+ struct ifnet *ifp = vap->iv_ifp;
+ struct ieee80211_frame *wh;
+ struct ieee80211_key *key;
+ struct ether_header *eh;
+ int hdrspace, need_tap = 1; /* mbuf need to be tapped. */
+ uint8_t dir, type, subtype, qos;
+ uint8_t *bssid;
+ uint16_t rxseq;
+
+ if (m->m_flags & M_AMPDU_MPDU) {
+ /*
+ * Fastpath for A-MPDU reorder q resubmission. Frames
+ * w/ M_AMPDU_MPDU marked have already passed through
+ * here but were received out of order and been held on
+ * the reorder queue. When resubmitted they are marked
+ * with the M_AMPDU_MPDU flag and we can bypass most of
+ * the normal processing.
+ */
+ wh = mtod(m, struct ieee80211_frame *);
+ type = IEEE80211_FC0_TYPE_DATA;
+ dir = wh->i_fc[1] & IEEE80211_FC1_DIR_MASK;
+ subtype = IEEE80211_FC0_SUBTYPE_QOS;
+ hdrspace = ieee80211_hdrspace(ic, wh); /* XXX optimize? */
+ goto resubmit_ampdu;
+ }
+
+ KASSERT(ni != NULL, ("null node"));
+ ni->ni_inact = ni->ni_inact_reload;
+
+ type = -1; /* undefined */
+
+ if (m->m_pkthdr.len < sizeof(struct ieee80211_frame_min)) {
+ IEEE80211_DISCARD_MAC(vap, IEEE80211_MSG_ANY,
+ ni->ni_macaddr, NULL,
+ "too short (1): len %u", m->m_pkthdr.len);
+ vap->iv_stats.is_rx_tooshort++;
+ goto out;
+ }
+ /*
+ * Bit of a cheat here, we use a pointer for a 3-address
+ * frame format but don't reference fields past outside
+ * ieee80211_frame_min w/o first validating the data is
+ * present.
+ */
+ wh = mtod(m, struct ieee80211_frame *);
+
+ if ((wh->i_fc[0] & IEEE80211_FC0_VERSION_MASK) !=
+ IEEE80211_FC0_VERSION_0) {
+ IEEE80211_DISCARD_MAC(vap, IEEE80211_MSG_ANY,
+ ni->ni_macaddr, NULL, "wrong version, fc %02x:%02x",
+ wh->i_fc[0], wh->i_fc[1]);
+ vap->iv_stats.is_rx_badversion++;
+ goto err;
+ }
+
+ dir = wh->i_fc[1] & IEEE80211_FC1_DIR_MASK;
+ type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
+ subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
+ if ((ic->ic_flags & IEEE80211_F_SCAN) == 0) {
+ bssid = wh->i_addr2;
+ if (!IEEE80211_ADDR_EQ(bssid, ni->ni_bssid)) {
+ /* not interested in */
+ IEEE80211_DISCARD_MAC(vap, IEEE80211_MSG_INPUT,
+ bssid, NULL, "%s", "not to bss");
+ vap->iv_stats.is_rx_wrongbss++;
+ goto out;
+ }
+ IEEE80211_RSSI_LPF(ni->ni_avgrssi, rssi);
+ ni->ni_noise = nf;
+ if (HAS_SEQ(type) && !IEEE80211_IS_MULTICAST(wh->i_addr1)) {
+ uint8_t tid = ieee80211_gettid(wh);
+ if (IEEE80211_QOS_HAS_SEQ(wh) &&
+ TID_TO_WME_AC(tid) >= WME_AC_VI)
+ ic->ic_wme.wme_hipri_traffic++;
+ rxseq = le16toh(*(uint16_t *)wh->i_seq);
+ if ((ni->ni_flags & IEEE80211_NODE_HT) == 0 &&
+ (wh->i_fc[1] & IEEE80211_FC1_RETRY) &&
+ SEQ_LEQ(rxseq, ni->ni_rxseqs[tid])) {
+ /* duplicate, discard */
+ IEEE80211_DISCARD_MAC(vap, IEEE80211_MSG_INPUT,
+ bssid, "duplicate",
+ "seqno <%u,%u> fragno <%u,%u> tid %u",
+ rxseq >> IEEE80211_SEQ_SEQ_SHIFT,
+ ni->ni_rxseqs[tid] >>
+ IEEE80211_SEQ_SEQ_SHIFT,
+ rxseq & IEEE80211_SEQ_FRAG_MASK,
+ ni->ni_rxseqs[tid] &
+ IEEE80211_SEQ_FRAG_MASK,
+ tid);
+ vap->iv_stats.is_rx_dup++;
+ IEEE80211_NODE_STAT(ni, rx_dup);
+ goto out;
+ }
+ ni->ni_rxseqs[tid] = rxseq;
+ }
+ }
+
+ switch (type) {
+ case IEEE80211_FC0_TYPE_DATA:
+ hdrspace = ieee80211_hdrspace(ic, wh);
+ if (m->m_len < hdrspace &&
+ (m = m_pullup(m, hdrspace)) == NULL) {
+ IEEE80211_DISCARD_MAC(vap, IEEE80211_MSG_ANY,
+ ni->ni_macaddr, NULL,
+ "data too short: expecting %u", hdrspace);
+ vap->iv_stats.is_rx_tooshort++;
+ goto out; /* XXX */
+ }
+ /*
+ * Handle A-MPDU re-ordering. If the frame is to be
+ * processed directly then ieee80211_ampdu_reorder
+ * will return 0; otherwise it has consumed the mbuf
+ * and we should do nothing more with it.
+ */
+ if ((m->m_flags & M_AMPDU) &&
+ (dir == IEEE80211_FC1_DIR_FROMDS ||
+ dir == IEEE80211_FC1_DIR_DSTODS) &&
+ ieee80211_ampdu_reorder(ni, m) != 0) {
+ m = NULL;
+ goto out;
+ }
+ resubmit_ampdu:
+ if (dir == IEEE80211_FC1_DIR_FROMDS) {
+ if ((ifp->if_flags & IFF_SIMPLEX) &&
+ isfromds_mcastecho(vap, wh)) {
+ /*
+ * In IEEE802.11 network, multicast
+ * packets sent from "me" are broadcast
+ * from the AP; silently discard for
+ * SIMPLEX interface.
+ */
+ IEEE80211_DISCARD(vap, IEEE80211_MSG_INPUT,
+ wh, "data", "%s", "multicast echo");
+ vap->iv_stats.is_rx_mcastecho++;
+ goto out;
+ }
+ if ((vap->iv_flags & IEEE80211_F_DWDS) &&
+ IEEE80211_IS_MULTICAST(wh->i_addr1)) {
+ /*
+ * DWDS sta's must drop 3-address mcast frames
+ * as they will be sent separately as a 4-addr
+ * frame. Accepting the 3-addr frame will
+ * confuse the bridge into thinking the sending
+ * sta is located at the end of WDS link.
+ */
+ IEEE80211_DISCARD(vap, IEEE80211_MSG_INPUT, wh,
+ "3-address data", "%s", "DWDS enabled");
+ vap->iv_stats.is_rx_mcastecho++;
+ goto out;
+ }
+ } else if (dir == IEEE80211_FC1_DIR_DSTODS) {
+ if ((vap->iv_flags & IEEE80211_F_DWDS) == 0) {
+ IEEE80211_DISCARD(vap,
+ IEEE80211_MSG_INPUT, wh, "4-address data",
+ "%s", "DWDS not enabled");
+ vap->iv_stats.is_rx_wrongdir++;
+ goto out;
+ }
+ if ((ifp->if_flags & IFF_SIMPLEX) &&
+ isdstods_mcastecho(vap, wh)) {
+ /*
+ * In IEEE802.11 network, multicast
+ * packets sent from "me" are broadcast
+ * from the AP; silently discard for
+ * SIMPLEX interface.
+ */
+ IEEE80211_DISCARD(vap, IEEE80211_MSG_INPUT, wh,
+ "4-address data", "%s", "multicast echo");
+ vap->iv_stats.is_rx_mcastecho++;
+ goto out;
+ }
+ } else {
+ IEEE80211_DISCARD(vap, IEEE80211_MSG_INPUT, wh,
+ "data", "incorrect dir 0x%x", dir);
+ vap->iv_stats.is_rx_wrongdir++;
+ goto out;
+ }
+
+ /*
+ * Handle privacy requirements. Note that we
+ * must not be preempted from here until after
+ * we (potentially) call ieee80211_crypto_demic;
+ * otherwise we may violate assumptions in the
+ * crypto cipher modules used to do delayed update
+ * of replay sequence numbers.
+ */
+ if (wh->i_fc[1] & IEEE80211_FC1_WEP) {
+ if ((vap->iv_flags & IEEE80211_F_PRIVACY) == 0) {
+ /*
+ * Discard encrypted frames when privacy is off.
+ */
+ IEEE80211_DISCARD(vap, IEEE80211_MSG_INPUT,
+ wh, "WEP", "%s", "PRIVACY off");
+ vap->iv_stats.is_rx_noprivacy++;
+ IEEE80211_NODE_STAT(ni, rx_noprivacy);
+ goto out;
+ }
+ key = ieee80211_crypto_decap(ni, m, hdrspace);
+ if (key == NULL) {
+ /* NB: stats+msgs handled in crypto_decap */
+ IEEE80211_NODE_STAT(ni, rx_wepfail);
+ goto out;
+ }
+ wh = mtod(m, struct ieee80211_frame *);
+ wh->i_fc[1] &= ~IEEE80211_FC1_WEP;
+ } else {
+ /* XXX M_WEP and IEEE80211_F_PRIVACY */
+ key = NULL;
+ }
+
+ /*
+ * Save QoS bits for use below--before we strip the header.
+ */
+ if (subtype == IEEE80211_FC0_SUBTYPE_QOS) {
+ qos = (dir == IEEE80211_FC1_DIR_DSTODS) ?
+ ((struct ieee80211_qosframe_addr4 *)wh)->i_qos[0] :
+ ((struct ieee80211_qosframe *)wh)->i_qos[0];
+ } else
+ qos = 0;
+
+ /*
+ * Next up, any fragmentation.
+ */
+ if (!IEEE80211_IS_MULTICAST(wh->i_addr1)) {
+ m = ieee80211_defrag(ni, m, hdrspace);
+ if (m == NULL) {
+ /* Fragment dropped or frame not complete yet */
+ goto out;
+ }
+ }
+ wh = NULL; /* no longer valid, catch any uses */
+
+ /*
+ * Next strip any MSDU crypto bits.
+ */
+ if (key != NULL && !ieee80211_crypto_demic(vap, key, m, 0)) {
+ IEEE80211_DISCARD_MAC(vap, IEEE80211_MSG_INPUT,
+ ni->ni_macaddr, "data", "%s", "demic error");
+ vap->iv_stats.is_rx_demicfail++;
+ IEEE80211_NODE_STAT(ni, rx_demicfail);
+ goto out;
+ }
+
+ /* copy to listener after decrypt */
+ if (ieee80211_radiotap_active_vap(vap))
+ ieee80211_radiotap_rx(vap, m);
+ need_tap = 0;
+
+ /*
+ * Finally, strip the 802.11 header.
+ */
+ m = ieee80211_decap(vap, m, hdrspace);
+ if (m == NULL) {
+ /* XXX mask bit to check for both */
+ /* don't count Null data frames as errors */
+ if (subtype == IEEE80211_FC0_SUBTYPE_NODATA ||
+ subtype == IEEE80211_FC0_SUBTYPE_QOS_NULL)
+ goto out;
+ IEEE80211_DISCARD_MAC(vap, IEEE80211_MSG_INPUT,
+ ni->ni_macaddr, "data", "%s", "decap error");
+ vap->iv_stats.is_rx_decap++;
+ IEEE80211_NODE_STAT(ni, rx_decap);
+ goto err;
+ }
+ eh = mtod(m, struct ether_header *);
+ if (!ieee80211_node_is_authorized(ni)) {
+ /*
+ * Deny any non-PAE frames received prior to
+ * authorization. For open/shared-key
+ * authentication the port is mark authorized
+ * after authentication completes. For 802.1x
+ * the port is not marked authorized by the
+ * authenticator until the handshake has completed.
+ */
+ if (eh->ether_type != htons(ETHERTYPE_PAE)) {
+ IEEE80211_DISCARD_MAC(vap, IEEE80211_MSG_INPUT,
+ eh->ether_shost, "data",
+ "unauthorized port: ether type 0x%x len %u",
+ eh->ether_type, m->m_pkthdr.len);
+ vap->iv_stats.is_rx_unauth++;
+ IEEE80211_NODE_STAT(ni, rx_unauth);
+ goto err;
+ }
+ } else {
+ /*
+ * When denying unencrypted frames, discard
+ * any non-PAE frames received without encryption.
+ */
+ if ((vap->iv_flags & IEEE80211_F_DROPUNENC) &&
+ (key == NULL && (m->m_flags & M_WEP) == 0) &&
+ eh->ether_type != htons(ETHERTYPE_PAE)) {
+ /*
+ * Drop unencrypted frames.
+ */
+ vap->iv_stats.is_rx_unencrypted++;
+ IEEE80211_NODE_STAT(ni, rx_unencrypted);
+ goto out;
+ }
+ }
+ /* XXX require HT? */
+ if (qos & IEEE80211_QOS_AMSDU) {
+ m = ieee80211_decap_amsdu(ni, m);
+ if (m == NULL)
+ return IEEE80211_FC0_TYPE_DATA;
+ } else {
+#ifdef IEEE80211_SUPPORT_SUPERG
+ m = ieee80211_decap_fastframe(vap, ni, m);
+ if (m == NULL)
+ return IEEE80211_FC0_TYPE_DATA;
+#endif
+ }
+ ieee80211_deliver_data(vap, ni, m);
+ return IEEE80211_FC0_TYPE_DATA;
+
+ case IEEE80211_FC0_TYPE_MGT:
+ vap->iv_stats.is_rx_mgmt++;
+ IEEE80211_NODE_STAT(ni, rx_mgmt);
+ if (dir != IEEE80211_FC1_DIR_NODS) {
+ IEEE80211_DISCARD(vap, IEEE80211_MSG_INPUT,
+ wh, "data", "incorrect dir 0x%x", dir);
+ vap->iv_stats.is_rx_wrongdir++;
+ goto err;
+ }
+ if (m->m_pkthdr.len < sizeof(struct ieee80211_frame)) {
+ IEEE80211_DISCARD_MAC(vap, IEEE80211_MSG_ANY,
+ ni->ni_macaddr, "mgt", "too short: len %u",
+ m->m_pkthdr.len);
+ vap->iv_stats.is_rx_tooshort++;
+ goto out;
+ }
+#ifdef IEEE80211_DEBUG
+ if ((ieee80211_msg_debug(vap) && doprint(vap, subtype)) ||
+ ieee80211_msg_dumppkts(vap)) {
+ if_printf(ifp, "received %s from %s rssi %d\n",
+ ieee80211_mgt_subtype_name[subtype >>
+ IEEE80211_FC0_SUBTYPE_SHIFT],
+ ether_sprintf(wh->i_addr2), rssi);
+ }
+#endif
+ if (wh->i_fc[1] & IEEE80211_FC1_WEP) {
+ if (subtype != IEEE80211_FC0_SUBTYPE_AUTH) {
+ /*
+ * Only shared key auth frames with a challenge
+ * should be encrypted, discard all others.
+ */
+ IEEE80211_DISCARD(vap, IEEE80211_MSG_INPUT,
+ wh, ieee80211_mgt_subtype_name[subtype >>
+ IEEE80211_FC0_SUBTYPE_SHIFT],
+ "%s", "WEP set but not permitted");
+ vap->iv_stats.is_rx_mgtdiscard++; /* XXX */
+ goto out;
+ }
+ if ((vap->iv_flags & IEEE80211_F_PRIVACY) == 0) {
+ /*
+ * Discard encrypted frames when privacy is off.
+ */
+ IEEE80211_DISCARD(vap, IEEE80211_MSG_INPUT,
+ wh, "mgt", "%s", "WEP set but PRIVACY off");
+ vap->iv_stats.is_rx_noprivacy++;
+ goto out;
+ }
+ hdrspace = ieee80211_hdrspace(ic, wh);
+ key = ieee80211_crypto_decap(ni, m, hdrspace);
+ if (key == NULL) {
+ /* NB: stats+msgs handled in crypto_decap */
+ goto out;
+ }
+ wh = mtod(m, struct ieee80211_frame *);
+ wh->i_fc[1] &= ~IEEE80211_FC1_WEP;
+ }
+ vap->iv_recv_mgmt(ni, m, subtype, rssi, nf);
+ goto out;
+
+ case IEEE80211_FC0_TYPE_CTL:
+ vap->iv_stats.is_rx_ctl++;
+ IEEE80211_NODE_STAT(ni, rx_ctrl);
+ vap->iv_recv_ctl(ni, m, subtype);
+ goto out;
+
+ default:
+ IEEE80211_DISCARD(vap, IEEE80211_MSG_ANY,
+ wh, NULL, "bad frame type 0x%x", type);
+ /* should not come here */
+ break;
+ }
+err:
+ ifp->if_ierrors++;
+out:
+ if (m != NULL) {
+ if (need_tap && ieee80211_radiotap_active_vap(vap))
+ ieee80211_radiotap_rx(vap, m);
+ m_freem(m);
+ }
+ return type;
+#undef SEQ_LEQ
+}
+
+static void
+sta_auth_open(struct ieee80211_node *ni, struct ieee80211_frame *wh,
+ int rssi, int nf, uint16_t seq, uint16_t status)
+{
+ struct ieee80211vap *vap = ni->ni_vap;
+
+ if (ni->ni_authmode == IEEE80211_AUTH_SHARED) {
+ IEEE80211_DISCARD_MAC(vap, IEEE80211_MSG_AUTH,
+ ni->ni_macaddr, "open auth",
+ "bad sta auth mode %u", ni->ni_authmode);
+ vap->iv_stats.is_rx_bad_auth++; /* XXX */
+ return;
+ }
+ if (vap->iv_state != IEEE80211_S_AUTH ||
+ seq != IEEE80211_AUTH_OPEN_RESPONSE) {
+ vap->iv_stats.is_rx_bad_auth++;
+ return;
+ }
+ if (status != 0) {
+ IEEE80211_NOTE(vap, IEEE80211_MSG_DEBUG | IEEE80211_MSG_AUTH,
+ ni, "open auth failed (reason %d)", status);
+ vap->iv_stats.is_rx_auth_fail++;
+ vap->iv_stats.is_rx_authfail_code = status;
+ ieee80211_new_state(vap, IEEE80211_S_SCAN,
+ IEEE80211_SCAN_FAIL_STATUS);
+ } else
+ ieee80211_new_state(vap, IEEE80211_S_ASSOC, 0);
+}
+
+static void
+sta_auth_shared(struct ieee80211_node *ni, struct ieee80211_frame *wh,
+ uint8_t *frm, uint8_t *efrm, int rssi, int nf,
+ uint16_t seq, uint16_t status)
+{
+ struct ieee80211vap *vap = ni->ni_vap;
+ uint8_t *challenge;
+ int estatus;
+
+ /*
+ * NB: this can happen as we allow pre-shared key
+ * authentication to be enabled w/o wep being turned
+ * on so that configuration of these can be done
+ * in any order. It may be better to enforce the
+ * ordering in which case this check would just be
+ * for sanity/consistency.
+ */
+ if ((vap->iv_flags & IEEE80211_F_PRIVACY) == 0) {
+ IEEE80211_DISCARD_MAC(vap, IEEE80211_MSG_AUTH,
+ ni->ni_macaddr, "shared key auth",
+ "%s", " PRIVACY is disabled");
+ estatus = IEEE80211_STATUS_ALG;
+ goto bad;
+ }
+ /*
+ * Pre-shared key authentication is evil; accept
+ * it only if explicitly configured (it is supported
+ * mainly for compatibility with clients like OS X).
+ */
+ if (ni->ni_authmode != IEEE80211_AUTH_AUTO &&
+ ni->ni_authmode != IEEE80211_AUTH_SHARED) {
+ IEEE80211_DISCARD_MAC(vap, IEEE80211_MSG_AUTH,
+ ni->ni_macaddr, "shared key auth",
+ "bad sta auth mode %u", ni->ni_authmode);
+ vap->iv_stats.is_rx_bad_auth++; /* XXX maybe a unique error? */
+ estatus = IEEE80211_STATUS_ALG;
+ goto bad;
+ }
+
+ challenge = NULL;
+ if (frm + 1 < efrm) {
+ if ((frm[1] + 2) > (efrm - frm)) {
+ IEEE80211_DISCARD_MAC(vap, IEEE80211_MSG_AUTH,
+ ni->ni_macaddr, "shared key auth",
+ "ie %d/%d too long",
+ frm[0], (frm[1] + 2) - (efrm - frm));
+ vap->iv_stats.is_rx_bad_auth++;
+ estatus = IEEE80211_STATUS_CHALLENGE;
+ goto bad;
+ }
+ if (*frm == IEEE80211_ELEMID_CHALLENGE)
+ challenge = frm;
+ frm += frm[1] + 2;
+ }
+ switch (seq) {
+ case IEEE80211_AUTH_SHARED_CHALLENGE:
+ case IEEE80211_AUTH_SHARED_RESPONSE:
+ if (challenge == NULL) {
+ IEEE80211_DISCARD_MAC(vap, IEEE80211_MSG_AUTH,
+ ni->ni_macaddr, "shared key auth",
+ "%s", "no challenge");
+ vap->iv_stats.is_rx_bad_auth++;
+ estatus = IEEE80211_STATUS_CHALLENGE;
+ goto bad;
+ }
+ if (challenge[1] != IEEE80211_CHALLENGE_LEN) {
+ IEEE80211_DISCARD_MAC(vap, IEEE80211_MSG_AUTH,
+ ni->ni_macaddr, "shared key auth",
+ "bad challenge len %d", challenge[1]);
+ vap->iv_stats.is_rx_bad_auth++;
+ estatus = IEEE80211_STATUS_CHALLENGE;
+ goto bad;
+ }
+ default:
+ break;
+ }
+ if (vap->iv_state != IEEE80211_S_AUTH)
+ return;
+ switch (seq) {
+ case IEEE80211_AUTH_SHARED_PASS:
+ if (ni->ni_challenge != NULL) {
+ free(ni->ni_challenge, M_80211_NODE);
+ ni->ni_challenge = NULL;
+ }
+ if (status != 0) {
+ IEEE80211_NOTE_FRAME(vap,
+ IEEE80211_MSG_DEBUG | IEEE80211_MSG_AUTH, wh,
+ "shared key auth failed (reason %d)", status);
+ vap->iv_stats.is_rx_auth_fail++;
+ vap->iv_stats.is_rx_authfail_code = status;
+ return;
+ }
+ ieee80211_new_state(vap, IEEE80211_S_ASSOC, 0);
+ break;
+ case IEEE80211_AUTH_SHARED_CHALLENGE:
+ if (!ieee80211_alloc_challenge(ni))
+ return;
+ /* XXX could optimize by passing recvd challenge */
+ memcpy(ni->ni_challenge, &challenge[2], challenge[1]);
+ IEEE80211_SEND_MGMT(ni,
+ IEEE80211_FC0_SUBTYPE_AUTH, seq + 1);
+ break;
+ default:
+ IEEE80211_DISCARD(vap, IEEE80211_MSG_AUTH,
+ wh, "shared key auth", "bad seq %d", seq);
+ vap->iv_stats.is_rx_bad_auth++;
+ return;
+ }
+ return;
+bad:
+ /*
+ * Kick the state machine. This short-circuits
+ * using the mgt frame timeout to trigger the
+ * state transition.
+ */
+ if (vap->iv_state == IEEE80211_S_AUTH)
+ ieee80211_new_state(vap, IEEE80211_S_SCAN,
+ IEEE80211_SCAN_FAIL_STATUS);
+}
+
+static int
+ieee80211_parse_wmeparams(struct ieee80211vap *vap, uint8_t *frm,
+ const struct ieee80211_frame *wh)
+{
+#define MS(_v, _f) (((_v) & _f) >> _f##_S)
+ struct ieee80211_wme_state *wme = &vap->iv_ic->ic_wme;
+ u_int len = frm[1], qosinfo;
+ int i;
+
+ if (len < sizeof(struct ieee80211_wme_param)-2) {
+ IEEE80211_DISCARD_IE(vap,
+ IEEE80211_MSG_ELEMID | IEEE80211_MSG_WME,
+ wh, "WME", "too short, len %u", len);
+ return -1;
+ }
+ qosinfo = frm[__offsetof(struct ieee80211_wme_param, param_qosInfo)];
+ qosinfo &= WME_QOSINFO_COUNT;
+ /* XXX do proper check for wraparound */
+ if (qosinfo == wme->wme_wmeChanParams.cap_info)
+ return 0;
+ frm += __offsetof(struct ieee80211_wme_param, params_acParams);
+ for (i = 0; i < WME_NUM_AC; i++) {
+ struct wmeParams *wmep =
+ &wme->wme_wmeChanParams.cap_wmeParams[i];
+ /* NB: ACI not used */
+ wmep->wmep_acm = MS(frm[0], WME_PARAM_ACM);
+ wmep->wmep_aifsn = MS(frm[0], WME_PARAM_AIFSN);
+ wmep->wmep_logcwmin = MS(frm[1], WME_PARAM_LOGCWMIN);
+ wmep->wmep_logcwmax = MS(frm[1], WME_PARAM_LOGCWMAX);
+ wmep->wmep_txopLimit = LE_READ_2(frm+2);
+ frm += 4;
+ }
+ wme->wme_wmeChanParams.cap_info = qosinfo;
+ return 1;
+#undef MS
+}
+
+/*
+ * Process 11h Channel Switch Announcement (CSA) ie. If this
+ * is the first CSA then initiate the switch. Otherwise we
+ * track state and trigger completion and/or cancel of the switch.
+ * XXX should be public for IBSS use
+ */
+static void
+ieee80211_parse_csaparams(struct ieee80211vap *vap, uint8_t *frm,
+ const struct ieee80211_frame *wh)
+{
+ struct ieee80211com *ic = vap->iv_ic;
+ const struct ieee80211_csa_ie *csa =
+ (const struct ieee80211_csa_ie *) frm;
+
+ KASSERT(vap->iv_state >= IEEE80211_S_RUN,
+ ("state %s", ieee80211_state_name[vap->iv_state]));
+
+ if (csa->csa_mode > 1) {
+ IEEE80211_DISCARD_IE(vap,
+ IEEE80211_MSG_ELEMID | IEEE80211_MSG_DOTH,
+ wh, "CSA", "invalid mode %u", csa->csa_mode);
+ return;
+ }
+ IEEE80211_LOCK(ic);
+ if ((ic->ic_flags & IEEE80211_F_CSAPENDING) == 0) {
+ /*
+ * Convert the channel number to a channel reference. We
+ * try first to preserve turbo attribute of the current
+ * channel then fallback. Note this will not work if the
+ * CSA specifies a channel that requires a band switch (e.g.
+ * 11a => 11g). This is intentional as 11h is defined only
+ * for 5GHz/11a and because the switch does not involve a
+ * reassociation, protocol state (capabilities, negotated
+ * rates, etc) may/will be wrong.
+ */
+ struct ieee80211_channel *c =
+ ieee80211_find_channel_byieee(ic, csa->csa_newchan,
+ (ic->ic_bsschan->ic_flags & IEEE80211_CHAN_ALLTURBO));
+ if (c == NULL) {
+ c = ieee80211_find_channel_byieee(ic,
+ csa->csa_newchan,
+ (ic->ic_bsschan->ic_flags & IEEE80211_CHAN_ALL));
+ if (c == NULL) {
+ IEEE80211_DISCARD_IE(vap,
+ IEEE80211_MSG_ELEMID | IEEE80211_MSG_DOTH,
+ wh, "CSA", "invalid channel %u",
+ csa->csa_newchan);
+ goto done;
+ }
+ }
+#if IEEE80211_CSA_COUNT_MIN > 0
+ if (csa->csa_count < IEEE80211_CSA_COUNT_MIN) {
+ /*
+ * Require at least IEEE80211_CSA_COUNT_MIN count to
+ * reduce the risk of being redirected by a fabricated
+ * CSA. If a valid CSA is dropped we'll still get a
+ * beacon miss when the AP leaves the channel so we'll
+ * eventually follow to the new channel.
+ *
+ * NOTE: this violates the 11h spec that states that
+ * count may be any value and if 0 then a switch
+ * should happen asap.
+ */
+ IEEE80211_DISCARD_IE(vap,
+ IEEE80211_MSG_ELEMID | IEEE80211_MSG_DOTH,
+ wh, "CSA", "count %u too small, must be >= %u",
+ csa->csa_count, IEEE80211_CSA_COUNT_MIN);
+ goto done;
+ }
+#endif
+ ieee80211_csa_startswitch(ic, c, csa->csa_mode, csa->csa_count);
+ } else {
+ /*
+ * Validate this ie against the initial CSA. We require
+ * mode and channel not change and the count must be
+ * monotonically decreasing. This may be pointless and
+ * canceling the switch as a result may be too paranoid but
+ * in the worst case if we drop out of CSA because of this
+ * and the AP does move then we'll just end up taking a
+ * beacon miss and scan to find the AP.
+ *
+ * XXX may want <= on count as we also process ProbeResp
+ * frames and those may come in w/ the same count as the
+ * previous beacon; but doing so leaves us open to a stuck
+ * count until we add a dead-man timer
+ */
+ if (!(csa->csa_count < ic->ic_csa_count &&
+ csa->csa_mode == ic->ic_csa_mode &&
+ csa->csa_newchan == ieee80211_chan2ieee(ic, ic->ic_csa_newchan))) {
+ IEEE80211_NOTE_FRAME(vap, IEEE80211_MSG_DOTH, wh,
+ "CSA ie mismatch, initial ie <%d,%d,%d>, "
+ "this ie <%d,%d,%d>", ic->ic_csa_mode,
+ ic->ic_csa_newchan, ic->ic_csa_count,
+ csa->csa_mode, csa->csa_newchan, csa->csa_count);
+ ieee80211_csa_cancelswitch(ic);
+ } else {
+ if (csa->csa_count <= 1)
+ ieee80211_csa_completeswitch(ic);
+ else
+ ic->ic_csa_count = csa->csa_count;
+ }
+ }
+done:
+ IEEE80211_UNLOCK(ic);
+}
+
+/*
+ * Return non-zero if a background scan may be continued:
+ * o bg scan is active
+ * o no channel switch is pending
+ * o there has not been any traffic recently
+ *
+ * Note we do not check if there is an administrative enable;
+ * this is only done to start the scan. We assume that any
+ * change in state will be accompanied by a request to cancel
+ * active scans which will otherwise cause this test to fail.
+ */
+static __inline int
+contbgscan(struct ieee80211vap *vap)
+{
+ struct ieee80211com *ic = vap->iv_ic;
+
+ return ((ic->ic_flags_ext & IEEE80211_FEXT_BGSCAN) &&
+ (ic->ic_flags & IEEE80211_F_CSAPENDING) == 0 &&
+ vap->iv_state == IEEE80211_S_RUN && /* XXX? */
+ time_after(ticks, ic->ic_lastdata + vap->iv_bgscanidle));
+}
+
+/*
+ * Return non-zero if a backgrond scan may be started:
+ * o bg scanning is administratively enabled
+ * o no channel switch is pending
+ * o we are not boosted on a dynamic turbo channel
+ * o there has not been a scan recently
+ * o there has not been any traffic recently
+ */
+static __inline int
+startbgscan(struct ieee80211vap *vap)
+{
+ struct ieee80211com *ic = vap->iv_ic;
+
+ return ((vap->iv_flags & IEEE80211_F_BGSCAN) &&
+ (ic->ic_flags & IEEE80211_F_CSAPENDING) == 0 &&
+#ifdef IEEE80211_SUPPORT_SUPERG
+ !IEEE80211_IS_CHAN_DTURBO(ic->ic_curchan) &&
+#endif
+ time_after(ticks, ic->ic_lastscan + vap->iv_bgscanintvl) &&
+ time_after(ticks, ic->ic_lastdata + vap->iv_bgscanidle));
+}
+
+static void
+sta_recv_mgmt(struct ieee80211_node *ni, struct mbuf *m0,
+ int subtype, int rssi, int nf)
+{
+#define ISPROBE(_st) ((_st) == IEEE80211_FC0_SUBTYPE_PROBE_RESP)
+#define ISREASSOC(_st) ((_st) == IEEE80211_FC0_SUBTYPE_REASSOC_RESP)
+ struct ieee80211vap *vap = ni->ni_vap;
+ struct ieee80211com *ic = ni->ni_ic;
+ struct ieee80211_frame *wh;
+ uint8_t *frm, *efrm;
+ uint8_t *rates, *xrates, *wme, *htcap, *htinfo;
+ uint8_t rate;
+
+ wh = mtod(m0, struct ieee80211_frame *);
+ frm = (uint8_t *)&wh[1];
+ efrm = mtod(m0, uint8_t *) + m0->m_len;
+ switch (subtype) {
+ case IEEE80211_FC0_SUBTYPE_PROBE_RESP:
+ case IEEE80211_FC0_SUBTYPE_BEACON: {
+ struct ieee80211_scanparams scan;
+ /*
+ * We process beacon/probe response frames:
+ * o when scanning, or
+ * o station mode when associated (to collect state
+ * updates such as 802.11g slot time)
+ * Frames otherwise received are discarded.
+ */
+ if (!((ic->ic_flags & IEEE80211_F_SCAN) || ni->ni_associd)) {
+ vap->iv_stats.is_rx_mgtdiscard++;
+ return;
+ }
+ /* XXX probe response in sta mode when !scanning? */
+ if (ieee80211_parse_beacon(ni, m0, &scan) != 0)
+ return;
+ /*
+ * Count frame now that we know it's to be processed.
+ */
+ if (subtype == IEEE80211_FC0_SUBTYPE_BEACON) {
+ vap->iv_stats.is_rx_beacon++; /* XXX remove */
+ IEEE80211_NODE_STAT(ni, rx_beacons);
+ } else
+ IEEE80211_NODE_STAT(ni, rx_proberesp);
+ /*
+ * When operating in station mode, check for state updates.
+ * Be careful to ignore beacons received while doing a
+ * background scan. We consider only 11g/WMM stuff right now.
+ */
+ if (ni->ni_associd != 0 &&
+ ((ic->ic_flags & IEEE80211_F_SCAN) == 0 ||
+ IEEE80211_ADDR_EQ(wh->i_addr2, ni->ni_bssid))) {
+ /* record tsf of last beacon */
+ memcpy(ni->ni_tstamp.data, scan.tstamp,
+ sizeof(ni->ni_tstamp));
+ /* count beacon frame for s/w bmiss handling */
+ vap->iv_swbmiss_count++;
+ vap->iv_bmiss_count = 0;
+ if (ni->ni_erp != scan.erp) {
+ IEEE80211_NOTE_MAC(vap, IEEE80211_MSG_ASSOC,
+ wh->i_addr2,
+ "erp change: was 0x%x, now 0x%x",
+ ni->ni_erp, scan.erp);
+ if (IEEE80211_IS_CHAN_ANYG(ic->ic_curchan) &&
+ (ni->ni_erp & IEEE80211_ERP_USE_PROTECTION))
+ ic->ic_flags |= IEEE80211_F_USEPROT;
+ else
+ ic->ic_flags &= ~IEEE80211_F_USEPROT;
+ ni->ni_erp = scan.erp;
+ /* XXX statistic */
+ /* XXX driver notification */
+ }
+ if ((ni->ni_capinfo ^ scan.capinfo) & IEEE80211_CAPINFO_SHORT_SLOTTIME) {
+ IEEE80211_NOTE_MAC(vap, IEEE80211_MSG_ASSOC,
+ wh->i_addr2,
+ "capabilities change: was 0x%x, now 0x%x",
+ ni->ni_capinfo, scan.capinfo);
+ /*
+ * NB: we assume short preamble doesn't
+ * change dynamically
+ */
+ ieee80211_set_shortslottime(ic,
+ IEEE80211_IS_CHAN_A(ic->ic_bsschan) ||
+ (scan.capinfo & IEEE80211_CAPINFO_SHORT_SLOTTIME));
+ ni->ni_capinfo = (ni->ni_capinfo &~ IEEE80211_CAPINFO_SHORT_SLOTTIME)
+ | (scan.capinfo & IEEE80211_CAPINFO_SHORT_SLOTTIME);
+ /* XXX statistic */
+ }
+ if (scan.wme != NULL &&
+ (ni->ni_flags & IEEE80211_NODE_QOS) &&
+ ieee80211_parse_wmeparams(vap, scan.wme, wh) > 0)
+ ieee80211_wme_updateparams(vap);
+#ifdef IEEE80211_SUPPORT_SUPERG
+ if (scan.ath != NULL)
+ ieee80211_parse_athparams(ni, scan.ath, wh);
+#endif
+ if (scan.htcap != NULL && scan.htinfo != NULL &&
+ (vap->iv_flags_ht & IEEE80211_FHT_HT)) {
+ ieee80211_ht_updateparams(ni,
+ scan.htcap, scan.htinfo);
+ /* XXX state changes? */
+ }
+ if (scan.tim != NULL) {
+ struct ieee80211_tim_ie *tim =
+ (struct ieee80211_tim_ie *) scan.tim;
+#if 0
+ int aid = IEEE80211_AID(ni->ni_associd);
+ int ix = aid / NBBY;
+ int min = tim->tim_bitctl &~ 1;
+ int max = tim->tim_len + min - 4;
+ if ((tim->tim_bitctl&1) ||
+ (min <= ix && ix <= max &&
+ isset(tim->tim_bitmap - min, aid))) {
+ /*
+ * XXX Do not let bg scan kick off
+ * we are expecting data.
+ */
+ ic->ic_lastdata = ticks;
+ ieee80211_sta_pwrsave(vap, 0);
+ }
+#endif
+ ni->ni_dtim_count = tim->tim_count;
+ ni->ni_dtim_period = tim->tim_period;
+ }
+ if (scan.csa != NULL &&
+ (vap->iv_flags & IEEE80211_F_DOTH))
+ ieee80211_parse_csaparams(vap, scan.csa, wh);
+ else if (ic->ic_flags & IEEE80211_F_CSAPENDING) {
+ /*
+ * No CSA ie or 11h disabled, but a channel
+ * switch is pending; drop out so we aren't
+ * stuck in CSA state. If the AP really is
+ * moving we'll get a beacon miss and scan.
+ */
+ IEEE80211_LOCK(ic);
+ ieee80211_csa_cancelswitch(ic);
+ IEEE80211_UNLOCK(ic);
+ }
+ /*
+ * If scanning, pass the info to the scan module.
+ * Otherwise, check if it's the right time to do
+ * a background scan. Background scanning must
+ * be enabled and we must not be operating in the
+ * turbo phase of dynamic turbo mode. Then,
+ * it's been a while since the last background
+ * scan and if no data frames have come through
+ * recently, kick off a scan. Note that this
+ * is the mechanism by which a background scan
+ * is started _and_ continued each time we
+ * return on-channel to receive a beacon from
+ * our ap.
+ */
+ if (ic->ic_flags & IEEE80211_F_SCAN) {
+ ieee80211_add_scan(vap, &scan, wh,
+ subtype, rssi, nf);
+ } else if (contbgscan(vap)) {
+ ieee80211_bg_scan(vap, 0);
+ } else if (startbgscan(vap)) {
+ vap->iv_stats.is_scan_bg++;
+#if 0
+ /* wakeup if we are sleeing */
+ ieee80211_set_pwrsave(vap, 0);
+#endif
+ ieee80211_bg_scan(vap, 0);
+ }
+ return;
+ }
+ /*
+ * If scanning, just pass information to the scan module.
+ */
+ if (ic->ic_flags & IEEE80211_F_SCAN) {
+ if (ic->ic_flags_ext & IEEE80211_FEXT_PROBECHAN) {
+ /*
+ * Actively scanning a channel marked passive;
+ * send a probe request now that we know there
+ * is 802.11 traffic present.
+ *
+ * XXX check if the beacon we recv'd gives
+ * us what we need and suppress the probe req
+ */
+ ieee80211_probe_curchan(vap, 1);
+ ic->ic_flags_ext &= ~IEEE80211_FEXT_PROBECHAN;
+ }
+ ieee80211_add_scan(vap, &scan, wh, subtype, rssi, nf);
+ return;
+ }
+ break;
+ }
+
+ case IEEE80211_FC0_SUBTYPE_AUTH: {
+ uint16_t algo, seq, status;
+ /*
+ * auth frame format
+ * [2] algorithm
+ * [2] sequence
+ * [2] status
+ * [tlv*] challenge
+ */
+ IEEE80211_VERIFY_LENGTH(efrm - frm, 6, return);
+ algo = le16toh(*(uint16_t *)frm);
+ seq = le16toh(*(uint16_t *)(frm + 2));
+ status = le16toh(*(uint16_t *)(frm + 4));
+ IEEE80211_NOTE_MAC(vap, IEEE80211_MSG_AUTH, wh->i_addr2,
+ "recv auth frame with algorithm %d seq %d", algo, seq);
+
+ if (vap->iv_flags & IEEE80211_F_COUNTERM) {
+ IEEE80211_DISCARD(vap,
+ IEEE80211_MSG_AUTH | IEEE80211_MSG_CRYPTO,
+ wh, "auth", "%s", "TKIP countermeasures enabled");
+ vap->iv_stats.is_rx_auth_countermeasures++;
+ if (vap->iv_opmode == IEEE80211_M_HOSTAP) {
+ ieee80211_send_error(ni, wh->i_addr2,
+ IEEE80211_FC0_SUBTYPE_AUTH,
+ IEEE80211_REASON_MIC_FAILURE);
+ }
+ return;
+ }
+ if (algo == IEEE80211_AUTH_ALG_SHARED)
+ sta_auth_shared(ni, wh, frm + 6, efrm, rssi, nf,
+ seq, status);
+ else if (algo == IEEE80211_AUTH_ALG_OPEN)
+ sta_auth_open(ni, wh, rssi, nf, seq, status);
+ else {
+ IEEE80211_DISCARD(vap, IEEE80211_MSG_ANY,
+ wh, "auth", "unsupported alg %d", algo);
+ vap->iv_stats.is_rx_auth_unsupported++;
+ return;
+ }
+ break;
+ }
+
+ case IEEE80211_FC0_SUBTYPE_ASSOC_RESP:
+ case IEEE80211_FC0_SUBTYPE_REASSOC_RESP: {
+ uint16_t capinfo, associd;
+ uint16_t status;
+
+ if (vap->iv_state != IEEE80211_S_ASSOC) {
+ vap->iv_stats.is_rx_mgtdiscard++;
+ return;
+ }
+
+ /*
+ * asresp frame format
+ * [2] capability information
+ * [2] status
+ * [2] association ID
+ * [tlv] supported rates
+ * [tlv] extended supported rates
+ * [tlv] WME
+ * [tlv] HT capabilities
+ * [tlv] HT info
+ */
+ IEEE80211_VERIFY_LENGTH(efrm - frm, 6, return);
+ ni = vap->iv_bss;
+ capinfo = le16toh(*(uint16_t *)frm);
+ frm += 2;
+ status = le16toh(*(uint16_t *)frm);
+ frm += 2;
+ if (status != 0) {
+ IEEE80211_NOTE_MAC(vap, IEEE80211_MSG_ASSOC,
+ wh->i_addr2, "%sassoc failed (reason %d)",
+ ISREASSOC(subtype) ? "re" : "", status);
+ vap->iv_stats.is_rx_auth_fail++; /* XXX */
+ return;
+ }
+ associd = le16toh(*(uint16_t *)frm);
+ frm += 2;
+
+ rates = xrates = wme = htcap = htinfo = NULL;
+ while (efrm - frm > 1) {
+ IEEE80211_VERIFY_LENGTH(efrm - frm, frm[1] + 2, return);
+ switch (*frm) {
+ case IEEE80211_ELEMID_RATES:
+ rates = frm;
+ break;
+ case IEEE80211_ELEMID_XRATES:
+ xrates = frm;
+ break;
+ case IEEE80211_ELEMID_HTCAP:
+ htcap = frm;
+ break;
+ case IEEE80211_ELEMID_HTINFO:
+ htinfo = frm;
+ break;
+ case IEEE80211_ELEMID_VENDOR:
+ if (iswmeoui(frm))
+ wme = frm;
+ else if (vap->iv_flags_ht & IEEE80211_FHT_HTCOMPAT) {
+ /*
+ * Accept pre-draft HT ie's if the
+ * standard ones have not been seen.
+ */
+ if (ishtcapoui(frm)) {
+ if (htcap == NULL)
+ htcap = frm;
+ } else if (ishtinfooui(frm)) {
+ if (htinfo == NULL)
+ htcap = frm;
+ }
+ }
+ /* XXX Atheros OUI support */
+ break;
+ }
+ frm += frm[1] + 2;
+ }
+
+ IEEE80211_VERIFY_ELEMENT(rates, IEEE80211_RATE_MAXSIZE, return);
+ if (xrates != NULL)
+ IEEE80211_VERIFY_ELEMENT(xrates,
+ IEEE80211_RATE_MAXSIZE - rates[1], return);
+ rate = ieee80211_setup_rates(ni, rates, xrates,
+ IEEE80211_F_JOIN |
+ IEEE80211_F_DOSORT | IEEE80211_F_DOFRATE |
+ IEEE80211_F_DONEGO | IEEE80211_F_DODEL);
+ if (rate & IEEE80211_RATE_BASIC) {
+ IEEE80211_NOTE_MAC(vap, IEEE80211_MSG_ASSOC,
+ wh->i_addr2,
+ "%sassoc failed (rate set mismatch)",
+ ISREASSOC(subtype) ? "re" : "");
+ vap->iv_stats.is_rx_assoc_norate++;
+ ieee80211_new_state(vap, IEEE80211_S_SCAN,
+ IEEE80211_SCAN_FAIL_STATUS);
+ return;
+ }
+
+ ni->ni_capinfo = capinfo;
+ ni->ni_associd = associd;
+ if (ni->ni_jointime == 0)
+ ni->ni_jointime = time_uptime;
+ if (wme != NULL &&
+ ieee80211_parse_wmeparams(vap, wme, wh) >= 0) {
+ ni->ni_flags |= IEEE80211_NODE_QOS;
+ ieee80211_wme_updateparams(vap);
+ } else
+ ni->ni_flags &= ~IEEE80211_NODE_QOS;
+ /*
+ * Setup HT state according to the negotiation.
+ *
+ * NB: shouldn't need to check if HT use is enabled but some
+ * ap's send back HT ie's even when we don't indicate we
+ * are HT capable in our AssocReq.
+ */
+ if (htcap != NULL && htinfo != NULL &&
+ (vap->iv_flags_ht & IEEE80211_FHT_HT)) {
+ ieee80211_ht_node_init(ni);
+ ieee80211_ht_updateparams(ni, htcap, htinfo);
+ ieee80211_setup_htrates(ni, htcap,
+ IEEE80211_F_JOIN | IEEE80211_F_DOBRS);
+ ieee80211_setup_basic_htrates(ni, htinfo);
+ ieee80211_node_setuptxparms(ni);
+ ieee80211_ratectl_node_init(ni);
+ } else {
+#ifdef IEEE80211_SUPPORT_SUPERG
+ if (IEEE80211_ATH_CAP(vap, ni, IEEE80211_NODE_ATH))
+ ieee80211_ff_node_init(ni);
+#endif
+ }
+ /*
+ * Configure state now that we are associated.
+ *
+ * XXX may need different/additional driver callbacks?
+ */
+ if (IEEE80211_IS_CHAN_A(ic->ic_curchan) ||
+ (ni->ni_capinfo & IEEE80211_CAPINFO_SHORT_PREAMBLE)) {
+ ic->ic_flags |= IEEE80211_F_SHPREAMBLE;
+ ic->ic_flags &= ~IEEE80211_F_USEBARKER;
+ } else {
+ ic->ic_flags &= ~IEEE80211_F_SHPREAMBLE;
+ ic->ic_flags |= IEEE80211_F_USEBARKER;
+ }
+ ieee80211_set_shortslottime(ic,
+ IEEE80211_IS_CHAN_A(ic->ic_curchan) ||
+ (ni->ni_capinfo & IEEE80211_CAPINFO_SHORT_SLOTTIME));
+ /*
+ * Honor ERP protection.
+ *
+ * NB: ni_erp should zero for non-11g operation.
+ */
+ if (IEEE80211_IS_CHAN_ANYG(ic->ic_curchan) &&
+ (ni->ni_erp & IEEE80211_ERP_USE_PROTECTION))
+ ic->ic_flags |= IEEE80211_F_USEPROT;
+ else
+ ic->ic_flags &= ~IEEE80211_F_USEPROT;
+ IEEE80211_NOTE_MAC(vap,
+ IEEE80211_MSG_ASSOC | IEEE80211_MSG_DEBUG, wh->i_addr2,
+ "%sassoc success at aid %d: %s preamble, %s slot time%s%s%s%s%s%s%s%s",
+ ISREASSOC(subtype) ? "re" : "",
+ IEEE80211_NODE_AID(ni),
+ ic->ic_flags&IEEE80211_F_SHPREAMBLE ? "short" : "long",
+ ic->ic_flags&IEEE80211_F_SHSLOT ? "short" : "long",
+ ic->ic_flags&IEEE80211_F_USEPROT ? ", protection" : "",
+ ni->ni_flags & IEEE80211_NODE_QOS ? ", QoS" : "",
+ ni->ni_flags & IEEE80211_NODE_HT ?
+ (ni->ni_chw == 40 ? ", HT40" : ", HT20") : "",
+ ni->ni_flags & IEEE80211_NODE_AMPDU ? " (+AMPDU)" : "",
+ ni->ni_flags & IEEE80211_NODE_MIMO_RTS ? " (+SMPS-DYN)" :
+ ni->ni_flags & IEEE80211_NODE_MIMO_PS ? " (+SMPS)" : "",
+ ni->ni_flags & IEEE80211_NODE_RIFS ? " (+RIFS)" : "",
+ IEEE80211_ATH_CAP(vap, ni, IEEE80211_NODE_FF) ?
+ ", fast-frames" : "",
+ IEEE80211_ATH_CAP(vap, ni, IEEE80211_NODE_TURBOP) ?
+ ", turbo" : ""
+ );
+ ieee80211_new_state(vap, IEEE80211_S_RUN, subtype);
+ break;
+ }
+
+ case IEEE80211_FC0_SUBTYPE_DEAUTH: {
+ uint16_t reason;
+
+ if (vap->iv_state == IEEE80211_S_SCAN) {
+ vap->iv_stats.is_rx_mgtdiscard++;
+ return;
+ }
+ if (!IEEE80211_ADDR_EQ(wh->i_addr1, vap->iv_myaddr)) {
+ /* NB: can happen when in promiscuous mode */
+ vap->iv_stats.is_rx_mgtdiscard++;
+ break;
+ }
+
+ /*
+ * deauth frame format
+ * [2] reason
+ */
+ IEEE80211_VERIFY_LENGTH(efrm - frm, 2, return);
+ reason = le16toh(*(uint16_t *)frm);
+
+ vap->iv_stats.is_rx_deauth++;
+ vap->iv_stats.is_rx_deauth_code = reason;
+ IEEE80211_NODE_STAT(ni, rx_deauth);
+
+ IEEE80211_NOTE(vap, IEEE80211_MSG_AUTH, ni,
+ "recv deauthenticate (reason %d)", reason);
+ ieee80211_new_state(vap, IEEE80211_S_AUTH,
+ (reason << 8) | IEEE80211_FC0_SUBTYPE_DEAUTH);
+ break;
+ }
+
+ case IEEE80211_FC0_SUBTYPE_DISASSOC: {
+ uint16_t reason;
+
+ if (vap->iv_state != IEEE80211_S_RUN &&
+ vap->iv_state != IEEE80211_S_ASSOC &&
+ vap->iv_state != IEEE80211_S_AUTH) {
+ vap->iv_stats.is_rx_mgtdiscard++;
+ return;
+ }
+ if (!IEEE80211_ADDR_EQ(wh->i_addr1, vap->iv_myaddr)) {
+ /* NB: can happen when in promiscuous mode */
+ vap->iv_stats.is_rx_mgtdiscard++;
+ break;
+ }
+
+ /*
+ * disassoc frame format
+ * [2] reason
+ */
+ IEEE80211_VERIFY_LENGTH(efrm - frm, 2, return);
+ reason = le16toh(*(uint16_t *)frm);
+
+ vap->iv_stats.is_rx_disassoc++;
+ vap->iv_stats.is_rx_disassoc_code = reason;
+ IEEE80211_NODE_STAT(ni, rx_disassoc);
+
+ IEEE80211_NOTE(vap, IEEE80211_MSG_ASSOC, ni,
+ "recv disassociate (reason %d)", reason);
+ ieee80211_new_state(vap, IEEE80211_S_ASSOC, 0);
+ break;
+ }
+
+ case IEEE80211_FC0_SUBTYPE_ACTION:
+ if (vap->iv_state == IEEE80211_S_RUN) {
+ if (ieee80211_parse_action(ni, m0) == 0)
+ ic->ic_recv_action(ni, wh, frm, efrm);
+ } else
+ vap->iv_stats.is_rx_mgtdiscard++;
+ break;
+
+ case IEEE80211_FC0_SUBTYPE_PROBE_REQ:
+ case IEEE80211_FC0_SUBTYPE_ASSOC_REQ:
+ case IEEE80211_FC0_SUBTYPE_REASSOC_REQ:
+ vap->iv_stats.is_rx_mgtdiscard++;
+ return;
+ default:
+ IEEE80211_DISCARD(vap, IEEE80211_MSG_ANY,
+ wh, "mgt", "subtype 0x%x not handled", subtype);
+ vap->iv_stats.is_rx_badsubtype++;
+ break;
+ }
+#undef ISREASSOC
+#undef ISPROBE
+}
+
+static void
+sta_recv_ctl(struct ieee80211_node *ni, struct mbuf *m0, int subtype)
+{
+}
diff --git a/rtems/freebsd/net80211/ieee80211_sta.h b/rtems/freebsd/net80211/ieee80211_sta.h
new file mode 100644
index 00000000..43316f5b
--- /dev/null
+++ b/rtems/freebsd/net80211/ieee80211_sta.h
@@ -0,0 +1,36 @@
+/*-
+ * Copyright (c) 2007-2008 Sam Leffler, Errno Consulting
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+#ifndef _NET80211_IEEE80211_STA_HH_
+#define _NET80211_IEEE80211_STA_HH_
+
+/*
+ * Station-mode implementation definitions.
+ */
+void ieee80211_sta_attach(struct ieee80211com *);
+void ieee80211_sta_detach(struct ieee80211com *);
+void ieee80211_sta_vattach(struct ieee80211vap *);
+#endif /* !_NET80211_IEEE80211_STA_HH_ */
diff --git a/rtems/freebsd/net80211/ieee80211_superg.c b/rtems/freebsd/net80211/ieee80211_superg.c
new file mode 100644
index 00000000..d578967b
--- /dev/null
+++ b/rtems/freebsd/net80211/ieee80211_superg.c
@@ -0,0 +1,902 @@
+#include <rtems/freebsd/machine/rtems-bsd-config.h>
+
+/*-
+ * Copyright (c) 2002-2009 Sam Leffler, Errno Consulting
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <rtems/freebsd/sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <rtems/freebsd/local/opt_wlan.h>
+
+#include <rtems/freebsd/sys/param.h>
+#include <rtems/freebsd/sys/systm.h>
+#include <rtems/freebsd/sys/mbuf.h>
+#include <rtems/freebsd/sys/kernel.h>
+#include <rtems/freebsd/sys/endian.h>
+
+#include <rtems/freebsd/sys/socket.h>
+
+#include <rtems/freebsd/net/bpf.h>
+#include <rtems/freebsd/net/ethernet.h>
+#include <rtems/freebsd/net/if.h>
+#include <rtems/freebsd/net/if_llc.h>
+#include <rtems/freebsd/net/if_media.h>
+
+#include <rtems/freebsd/net80211/ieee80211_var.h>
+#include <rtems/freebsd/net80211/ieee80211_input.h>
+#include <rtems/freebsd/net80211/ieee80211_phy.h>
+#include <rtems/freebsd/net80211/ieee80211_superg.h>
+
+/*
+ * Atheros fast-frame encapsulation format.
+ * FF max payload:
+ * 802.2 + FFHDR + HPAD + 802.3 + 802.2 + 1500 + SPAD + 802.3 + 802.2 + 1500:
+ * 8 + 4 + 4 + 14 + 8 + 1500 + 6 + 14 + 8 + 1500
+ * = 3066
+ */
+/* fast frame header is 32-bits */
+#define ATH_FF_PROTO 0x0000003f /* protocol */
+#define ATH_FF_PROTO_S 0
+#define ATH_FF_FTYPE 0x000000c0 /* frame type */
+#define ATH_FF_FTYPE_S 6
+#define ATH_FF_HLEN32 0x00000300 /* optional hdr length */
+#define ATH_FF_HLEN32_S 8
+#define ATH_FF_SEQNUM 0x001ffc00 /* sequence number */
+#define ATH_FF_SEQNUM_S 10
+#define ATH_FF_OFFSET 0xffe00000 /* offset to 2nd payload */
+#define ATH_FF_OFFSET_S 21
+
+#define ATH_FF_MAX_HDR_PAD 4
+#define ATH_FF_MAX_SEP_PAD 6
+#define ATH_FF_MAX_HDR 30
+
+#define ATH_FF_PROTO_L2TUNNEL 0 /* L2 tunnel protocol */
+#define ATH_FF_ETH_TYPE 0x88bd /* Ether type for encapsulated frames */
+#define ATH_FF_SNAP_ORGCODE_0 0x00
+#define ATH_FF_SNAP_ORGCODE_1 0x03
+#define ATH_FF_SNAP_ORGCODE_2 0x7f
+
+#define ATH_FF_TXQMIN 2 /* min txq depth for staging */
+#define ATH_FF_TXQMAX 50 /* maximum # of queued frames allowed */
+#define ATH_FF_STAGEMAX 5 /* max waiting period for staged frame*/
+
+#define ETHER_HEADER_COPY(dst, src) \
+ memcpy(dst, src, sizeof(struct ether_header))
+
+static int ieee80211_ffppsmin = 2; /* pps threshold for ff aggregation */
+SYSCTL_INT(_net_wlan, OID_AUTO, ffppsmin, CTLTYPE_INT | CTLFLAG_RW,
+ &ieee80211_ffppsmin, 0, "min packet rate before fast-frame staging");
+static int ieee80211_ffagemax = -1; /* max time frames held on stage q */
+SYSCTL_PROC(_net_wlan, OID_AUTO, ffagemax, CTLTYPE_INT | CTLFLAG_RW,
+ &ieee80211_ffagemax, 0, ieee80211_sysctl_msecs_ticks, "I",
+ "max hold time for fast-frame staging (ms)");
+
+void
+ieee80211_superg_attach(struct ieee80211com *ic)
+{
+ struct ieee80211_superg *sg;
+
+ if (ic->ic_caps & IEEE80211_C_FF) {
+ sg = (struct ieee80211_superg *) malloc(
+ sizeof(struct ieee80211_superg), M_80211_VAP,
+ M_NOWAIT | M_ZERO);
+ if (sg == NULL) {
+ printf("%s: cannot allocate SuperG state block\n",
+ __func__);
+ return;
+ }
+ ic->ic_superg = sg;
+ }
+ ieee80211_ffagemax = msecs_to_ticks(150);
+}
+
+void
+ieee80211_superg_detach(struct ieee80211com *ic)
+{
+ if (ic->ic_superg != NULL) {
+ free(ic->ic_superg, M_80211_VAP);
+ ic->ic_superg = NULL;
+ }
+}
+
+void
+ieee80211_superg_vattach(struct ieee80211vap *vap)
+{
+ struct ieee80211com *ic = vap->iv_ic;
+
+ if (ic->ic_superg == NULL) /* NB: can't do fast-frames w/o state */
+ vap->iv_caps &= ~IEEE80211_C_FF;
+ if (vap->iv_caps & IEEE80211_C_FF)
+ vap->iv_flags |= IEEE80211_F_FF;
+ /* NB: we only implement sta mode */
+ if (vap->iv_opmode == IEEE80211_M_STA &&
+ (vap->iv_caps & IEEE80211_C_TURBOP))
+ vap->iv_flags |= IEEE80211_F_TURBOP;
+}
+
+void
+ieee80211_superg_vdetach(struct ieee80211vap *vap)
+{
+}
+
+#define ATH_OUI_BYTES 0x00, 0x03, 0x7f
+/*
+ * Add a WME information element to a frame.
+ */
+uint8_t *
+ieee80211_add_ath(uint8_t *frm, uint8_t caps, ieee80211_keyix defkeyix)
+{
+ static const struct ieee80211_ath_ie info = {
+ .ath_id = IEEE80211_ELEMID_VENDOR,
+ .ath_len = sizeof(struct ieee80211_ath_ie) - 2,
+ .ath_oui = { ATH_OUI_BYTES },
+ .ath_oui_type = ATH_OUI_TYPE,
+ .ath_oui_subtype= ATH_OUI_SUBTYPE,
+ .ath_version = ATH_OUI_VERSION,
+ };
+ struct ieee80211_ath_ie *ath = (struct ieee80211_ath_ie *) frm;
+
+ memcpy(frm, &info, sizeof(info));
+ ath->ath_capability = caps;
+ if (defkeyix != IEEE80211_KEYIX_NONE) {
+ ath->ath_defkeyix[0] = (defkeyix & 0xff);
+ ath->ath_defkeyix[1] = ((defkeyix >> 8) & 0xff);
+ } else {
+ ath->ath_defkeyix[0] = 0xff;
+ ath->ath_defkeyix[1] = 0x7f;
+ }
+ return frm + sizeof(info);
+}
+#undef ATH_OUI_BYTES
+
+uint8_t *
+ieee80211_add_athcaps(uint8_t *frm, const struct ieee80211_node *bss)
+{
+ const struct ieee80211vap *vap = bss->ni_vap;
+
+ return ieee80211_add_ath(frm,
+ vap->iv_flags & IEEE80211_F_ATHEROS,
+ ((vap->iv_flags & IEEE80211_F_WPA) == 0 &&
+ bss->ni_authmode != IEEE80211_AUTH_8021X) ?
+ vap->iv_def_txkey : IEEE80211_KEYIX_NONE);
+}
+
+void
+ieee80211_parse_ath(struct ieee80211_node *ni, uint8_t *ie)
+{
+ const struct ieee80211_ath_ie *ath =
+ (const struct ieee80211_ath_ie *) ie;
+
+ ni->ni_ath_flags = ath->ath_capability;
+ ni->ni_ath_defkeyix = LE_READ_2(&ath->ath_defkeyix);
+}
+
+int
+ieee80211_parse_athparams(struct ieee80211_node *ni, uint8_t *frm,
+ const struct ieee80211_frame *wh)
+{
+ struct ieee80211vap *vap = ni->ni_vap;
+ const struct ieee80211_ath_ie *ath;
+ u_int len = frm[1];
+ int capschanged;
+ uint16_t defkeyix;
+
+ if (len < sizeof(struct ieee80211_ath_ie)-2) {
+ IEEE80211_DISCARD_IE(vap,
+ IEEE80211_MSG_ELEMID | IEEE80211_MSG_SUPERG,
+ wh, "Atheros", "too short, len %u", len);
+ return -1;
+ }
+ ath = (const struct ieee80211_ath_ie *)frm;
+ capschanged = (ni->ni_ath_flags != ath->ath_capability);
+ defkeyix = LE_READ_2(ath->ath_defkeyix);
+ if (capschanged || defkeyix != ni->ni_ath_defkeyix) {
+ ni->ni_ath_flags = ath->ath_capability;
+ ni->ni_ath_defkeyix = defkeyix;
+ IEEE80211_NOTE(vap, IEEE80211_MSG_SUPERG, ni,
+ "ath ie change: new caps 0x%x defkeyix 0x%x",
+ ni->ni_ath_flags, ni->ni_ath_defkeyix);
+ }
+ if (IEEE80211_ATH_CAP(vap, ni, ATHEROS_CAP_TURBO_PRIME)) {
+ uint16_t curflags, newflags;
+
+ /*
+ * Check for turbo mode switch. Calculate flags
+ * for the new mode and effect the switch.
+ */
+ newflags = curflags = vap->iv_ic->ic_bsschan->ic_flags;
+ /* NB: BOOST is not in ic_flags, so get it from the ie */
+ if (ath->ath_capability & ATHEROS_CAP_BOOST)
+ newflags |= IEEE80211_CHAN_TURBO;
+ else
+ newflags &= ~IEEE80211_CHAN_TURBO;
+ if (newflags != curflags)
+ ieee80211_dturbo_switch(vap, newflags);
+ }
+ return capschanged;
+}
+
+/*
+ * Decap the encapsulated frame pair and dispatch the first
+ * for delivery. The second frame is returned for delivery
+ * via the normal path.
+ */
+struct mbuf *
+ieee80211_ff_decap(struct ieee80211_node *ni, struct mbuf *m)
+{
+#define FF_LLC_SIZE (sizeof(struct ether_header) + sizeof(struct llc))
+#define MS(x,f) (((x) & f) >> f##_S)
+ struct ieee80211vap *vap = ni->ni_vap;
+ struct llc *llc;
+ uint32_t ath;
+ struct mbuf *n;
+ int framelen;
+
+ /* NB: we assume caller does this check for us */
+ KASSERT(IEEE80211_ATH_CAP(vap, ni, IEEE80211_NODE_FF),
+ ("ff not negotiated"));
+ /*
+ * Check for fast-frame tunnel encapsulation.
+ */
+ if (m->m_pkthdr.len < 3*FF_LLC_SIZE)
+ return m;
+ if (m->m_len < FF_LLC_SIZE &&
+ (m = m_pullup(m, FF_LLC_SIZE)) == NULL) {
+ IEEE80211_DISCARD_MAC(vap, IEEE80211_MSG_ANY,
+ ni->ni_macaddr, "fast-frame",
+ "%s", "m_pullup(llc) failed");
+ vap->iv_stats.is_rx_tooshort++;
+ return NULL;
+ }
+ llc = (struct llc *)(mtod(m, uint8_t *) +
+ sizeof(struct ether_header));
+ if (llc->llc_snap.ether_type != htons(ATH_FF_ETH_TYPE))
+ return m;
+ m_adj(m, FF_LLC_SIZE);
+ m_copydata(m, 0, sizeof(uint32_t), (caddr_t) &ath);
+ if (MS(ath, ATH_FF_PROTO) != ATH_FF_PROTO_L2TUNNEL) {
+ IEEE80211_DISCARD_MAC(vap, IEEE80211_MSG_ANY,
+ ni->ni_macaddr, "fast-frame",
+ "unsupport tunnel protocol, header 0x%x", ath);
+ vap->iv_stats.is_ff_badhdr++;
+ m_freem(m);
+ return NULL;
+ }
+ /* NB: skip header and alignment padding */
+ m_adj(m, roundup(sizeof(uint32_t) - 2, 4) + 2);
+
+ vap->iv_stats.is_ff_decap++;
+
+ /*
+ * Decap the first frame, bust it apart from the
+ * second and deliver; then decap the second frame
+ * and return it to the caller for normal delivery.
+ */
+ m = ieee80211_decap1(m, &framelen);
+ if (m == NULL) {
+ IEEE80211_DISCARD_MAC(vap, IEEE80211_MSG_ANY,
+ ni->ni_macaddr, "fast-frame", "%s", "first decap failed");
+ vap->iv_stats.is_ff_tooshort++;
+ return NULL;
+ }
+ n = m_split(m, framelen, M_NOWAIT);
+ if (n == NULL) {
+ IEEE80211_DISCARD_MAC(vap, IEEE80211_MSG_ANY,
+ ni->ni_macaddr, "fast-frame",
+ "%s", "unable to split encapsulated frames");
+ vap->iv_stats.is_ff_split++;
+ m_freem(m); /* NB: must reclaim */
+ return NULL;
+ }
+ /* XXX not right for WDS */
+ vap->iv_deliver_data(vap, ni, m); /* 1st of pair */
+
+ /*
+ * Decap second frame.
+ */
+ m_adj(n, roundup2(framelen, 4) - framelen); /* padding */
+ n = ieee80211_decap1(n, &framelen);
+ if (n == NULL) {
+ IEEE80211_DISCARD_MAC(vap, IEEE80211_MSG_ANY,
+ ni->ni_macaddr, "fast-frame", "%s", "second decap failed");
+ vap->iv_stats.is_ff_tooshort++;
+ }
+ /* XXX verify framelen against mbuf contents */
+ return n; /* 2nd delivered by caller */
+#undef MS
+#undef FF_LLC_SIZE
+}
+
+/*
+ * Do Ethernet-LLC encapsulation for each payload in a fast frame
+ * tunnel encapsulation. The frame is assumed to have an Ethernet
+ * header at the front that must be stripped before prepending the
+ * LLC followed by the Ethernet header passed in (with an Ethernet
+ * type that specifies the payload size).
+ */
+static struct mbuf *
+ff_encap1(struct ieee80211vap *vap, struct mbuf *m,
+ const struct ether_header *eh)
+{
+ struct llc *llc;
+ uint16_t payload;
+
+ /* XXX optimize by combining m_adj+M_PREPEND */
+ m_adj(m, sizeof(struct ether_header) - sizeof(struct llc));
+ llc = mtod(m, struct llc *);
+ llc->llc_dsap = llc->llc_ssap = LLC_SNAP_LSAP;
+ llc->llc_control = LLC_UI;
+ llc->llc_snap.org_code[0] = 0;
+ llc->llc_snap.org_code[1] = 0;
+ llc->llc_snap.org_code[2] = 0;
+ llc->llc_snap.ether_type = eh->ether_type;
+ payload = m->m_pkthdr.len; /* NB: w/o Ethernet header */
+
+ M_PREPEND(m, sizeof(struct ether_header), M_DONTWAIT);
+ if (m == NULL) { /* XXX cannot happen */
+ IEEE80211_DPRINTF(vap, IEEE80211_MSG_SUPERG,
+ "%s: no space for ether_header\n", __func__);
+ vap->iv_stats.is_tx_nobuf++;
+ return NULL;
+ }
+ ETHER_HEADER_COPY(mtod(m, void *), eh);
+ mtod(m, struct ether_header *)->ether_type = htons(payload);
+ return m;
+}
+
+/*
+ * Fast frame encapsulation. There must be two packets
+ * chained with m_nextpkt. We do header adjustment for
+ * each, add the tunnel encapsulation, and then concatenate
+ * the mbuf chains to form a single frame for transmission.
+ */
+struct mbuf *
+ieee80211_ff_encap(struct ieee80211vap *vap, struct mbuf *m1, int hdrspace,
+ struct ieee80211_key *key)
+{
+ struct mbuf *m2;
+ struct ether_header eh1, eh2;
+ struct llc *llc;
+ struct mbuf *m;
+ int pad;
+
+ m2 = m1->m_nextpkt;
+ if (m2 == NULL) {
+ IEEE80211_DPRINTF(vap, IEEE80211_MSG_SUPERG,
+ "%s: only one frame\n", __func__);
+ goto bad;
+ }
+ m1->m_nextpkt = NULL;
+ /*
+ * Include fast frame headers in adjusting header layout.
+ */
+ KASSERT(m1->m_len >= sizeof(eh1), ("no ethernet header!"));
+ ETHER_HEADER_COPY(&eh1, mtod(m1, caddr_t));
+ m1 = ieee80211_mbuf_adjust(vap,
+ hdrspace + sizeof(struct llc) + sizeof(uint32_t) + 2 +
+ sizeof(struct ether_header),
+ key, m1);
+ if (m1 == NULL) {
+ /* NB: ieee80211_mbuf_adjust handles msgs+statistics */
+ m_freem(m2);
+ goto bad;
+ }
+
+ /*
+ * Copy second frame's Ethernet header out of line
+ * and adjust for encapsulation headers. Note that
+ * we make room for padding in case there isn't room
+ * at the end of first frame.
+ */
+ KASSERT(m2->m_len >= sizeof(eh2), ("no ethernet header!"));
+ ETHER_HEADER_COPY(&eh2, mtod(m2, caddr_t));
+ m2 = ieee80211_mbuf_adjust(vap,
+ ATH_FF_MAX_HDR_PAD + sizeof(struct ether_header),
+ NULL, m2);
+ if (m2 == NULL) {
+ /* NB: ieee80211_mbuf_adjust handles msgs+statistics */
+ goto bad;
+ }
+
+ /*
+ * Now do tunnel encapsulation. First, each
+ * frame gets a standard encapsulation.
+ */
+ m1 = ff_encap1(vap, m1, &eh1);
+ if (m1 == NULL)
+ goto bad;
+ m2 = ff_encap1(vap, m2, &eh2);
+ if (m2 == NULL)
+ goto bad;
+
+ /*
+ * Pad leading frame to a 4-byte boundary. If there
+ * is space at the end of the first frame, put it
+ * there; otherwise prepend to the front of the second
+ * frame. We know doing the second will always work
+ * because we reserve space above. We prefer appending
+ * as this typically has better DMA alignment properties.
+ */
+ for (m = m1; m->m_next != NULL; m = m->m_next)
+ ;
+ pad = roundup2(m1->m_pkthdr.len, 4) - m1->m_pkthdr.len;
+ if (pad) {
+ if (M_TRAILINGSPACE(m) < pad) { /* prepend to second */
+ m2->m_data -= pad;
+ m2->m_len += pad;
+ m2->m_pkthdr.len += pad;
+ } else { /* append to first */
+ m->m_len += pad;
+ m1->m_pkthdr.len += pad;
+ }
+ }
+
+ /*
+ * Now, stick 'em together and prepend the tunnel headers;
+ * first the Atheros tunnel header (all zero for now) and
+ * then a special fast frame LLC.
+ *
+ * XXX optimize by prepending together
+ */
+ m->m_next = m2; /* NB: last mbuf from above */
+ m1->m_pkthdr.len += m2->m_pkthdr.len;
+ M_PREPEND(m1, sizeof(uint32_t)+2, M_DONTWAIT);
+ if (m1 == NULL) { /* XXX cannot happen */
+ IEEE80211_DPRINTF(vap, IEEE80211_MSG_SUPERG,
+ "%s: no space for tunnel header\n", __func__);
+ vap->iv_stats.is_tx_nobuf++;
+ return NULL;
+ }
+ memset(mtod(m1, void *), 0, sizeof(uint32_t)+2);
+
+ M_PREPEND(m1, sizeof(struct llc), M_DONTWAIT);
+ if (m1 == NULL) { /* XXX cannot happen */
+ IEEE80211_DPRINTF(vap, IEEE80211_MSG_SUPERG,
+ "%s: no space for llc header\n", __func__);
+ vap->iv_stats.is_tx_nobuf++;
+ return NULL;
+ }
+ llc = mtod(m1, struct llc *);
+ llc->llc_dsap = llc->llc_ssap = LLC_SNAP_LSAP;
+ llc->llc_control = LLC_UI;
+ llc->llc_snap.org_code[0] = ATH_FF_SNAP_ORGCODE_0;
+ llc->llc_snap.org_code[1] = ATH_FF_SNAP_ORGCODE_1;
+ llc->llc_snap.org_code[2] = ATH_FF_SNAP_ORGCODE_2;
+ llc->llc_snap.ether_type = htons(ATH_FF_ETH_TYPE);
+
+ vap->iv_stats.is_ff_encap++;
+
+ return m1;
+bad:
+ if (m1 != NULL)
+ m_freem(m1);
+ if (m2 != NULL)
+ m_freem(m2);
+ return NULL;
+}
+
+static void
+ff_transmit(struct ieee80211_node *ni, struct mbuf *m)
+{
+ struct ieee80211vap *vap = ni->ni_vap;
+ int error;
+
+ /* encap and xmit */
+ m = ieee80211_encap(vap, ni, m);
+ if (m != NULL) {
+ struct ifnet *ifp = vap->iv_ifp;
+ struct ifnet *parent = ni->ni_ic->ic_ifp;
+
+ error = parent->if_transmit(parent, m);
+ if (error != 0) {
+ /* NB: IFQ_HANDOFF reclaims mbuf */
+ ieee80211_free_node(ni);
+ } else {
+ ifp->if_opackets++;
+ }
+ } else
+ ieee80211_free_node(ni);
+}
+
+/*
+ * Flush frames to device; note we re-use the linked list
+ * the frames were stored on and use the sentinel (unchanged)
+ * which may be non-NULL.
+ */
+static void
+ff_flush(struct mbuf *head, struct mbuf *last)
+{
+ struct mbuf *m, *next;
+ struct ieee80211_node *ni;
+ struct ieee80211vap *vap;
+
+ for (m = head; m != last; m = next) {
+ next = m->m_nextpkt;
+ m->m_nextpkt = NULL;
+
+ ni = (struct ieee80211_node *) m->m_pkthdr.rcvif;
+ vap = ni->ni_vap;
+
+ IEEE80211_NOTE(vap, IEEE80211_MSG_SUPERG, ni,
+ "%s: flush frame, age %u", __func__, M_AGE_GET(m));
+ vap->iv_stats.is_ff_flush++;
+
+ ff_transmit(ni, m);
+ }
+}
+
+/*
+ * Age frames on the staging queue.
+ */
+void
+ieee80211_ff_age(struct ieee80211com *ic, struct ieee80211_stageq *sq,
+ int quanta)
+{
+ struct ieee80211_superg *sg = ic->ic_superg;
+ struct mbuf *m, *head;
+ struct ieee80211_node *ni;
+ struct ieee80211_tx_ampdu *tap;
+
+ KASSERT(sq->head != NULL, ("stageq empty"));
+
+ IEEE80211_LOCK(ic);
+ head = sq->head;
+ while ((m = sq->head) != NULL && M_AGE_GET(m) < quanta) {
+ /* clear tap ref to frame */
+ ni = (struct ieee80211_node *) m->m_pkthdr.rcvif;
+ tap = &ni->ni_tx_ampdu[M_WME_GETAC(m)];
+ KASSERT(tap->txa_private == m, ("staging queue empty"));
+ tap->txa_private = NULL;
+
+ sq->head = m->m_nextpkt;
+ sq->depth--;
+ sg->ff_stageqdepth--;
+ }
+ if (m == NULL)
+ sq->tail = NULL;
+ else
+ M_AGE_SUB(m, quanta);
+ IEEE80211_UNLOCK(ic);
+
+ ff_flush(head, m);
+}
+
+static void
+stageq_add(struct ieee80211_stageq *sq, struct mbuf *m)
+{
+ int age = ieee80211_ffagemax;
+ if (sq->tail != NULL) {
+ sq->tail->m_nextpkt = m;
+ age -= M_AGE_GET(sq->head);
+ } else
+ sq->head = m;
+ KASSERT(age >= 0, ("age %d", age));
+ M_AGE_SET(m, age);
+ m->m_nextpkt = NULL;
+ sq->tail = m;
+ sq->depth++;
+}
+
+static void
+stageq_remove(struct ieee80211_stageq *sq, struct mbuf *mstaged)
+{
+ struct mbuf *m, *mprev;
+
+ mprev = NULL;
+ for (m = sq->head; m != NULL; m = m->m_nextpkt) {
+ if (m == mstaged) {
+ if (mprev == NULL)
+ sq->head = m->m_nextpkt;
+ else
+ mprev->m_nextpkt = m->m_nextpkt;
+ if (sq->tail == m)
+ sq->tail = mprev;
+ sq->depth--;
+ return;
+ }
+ mprev = m;
+ }
+ printf("%s: packet not found\n", __func__);
+}
+
+static uint32_t
+ff_approx_txtime(struct ieee80211_node *ni,
+ const struct mbuf *m1, const struct mbuf *m2)
+{
+ struct ieee80211com *ic = ni->ni_ic;
+ struct ieee80211vap *vap = ni->ni_vap;
+ uint32_t framelen;
+
+ /*
+ * Approximate the frame length to be transmitted. A swag to add
+ * the following maximal values to the skb payload:
+ * - 32: 802.11 encap + CRC
+ * - 24: encryption overhead (if wep bit)
+ * - 4 + 6: fast-frame header and padding
+ * - 16: 2 LLC FF tunnel headers
+ * - 14: 1 802.3 FF tunnel header (mbuf already accounts for 2nd)
+ */
+ framelen = m1->m_pkthdr.len + 32 +
+ ATH_FF_MAX_HDR_PAD + ATH_FF_MAX_SEP_PAD + ATH_FF_MAX_HDR;
+ if (vap->iv_flags & IEEE80211_F_PRIVACY)
+ framelen += 24;
+ if (m2 != NULL)
+ framelen += m2->m_pkthdr.len;
+ return ieee80211_compute_duration(ic->ic_rt, framelen, ni->ni_txrate, 0);
+}
+
+/*
+ * Check if the supplied frame can be partnered with an existing
+ * or pending frame. Return a reference to any frame that should be
+ * sent on return; otherwise return NULL.
+ */
+struct mbuf *
+ieee80211_ff_check(struct ieee80211_node *ni, struct mbuf *m)
+{
+ struct ieee80211vap *vap = ni->ni_vap;
+ struct ieee80211com *ic = ni->ni_ic;
+ struct ieee80211_superg *sg = ic->ic_superg;
+ const int pri = M_WME_GETAC(m);
+ struct ieee80211_stageq *sq;
+ struct ieee80211_tx_ampdu *tap;
+ struct mbuf *mstaged;
+ uint32_t txtime, limit;
+
+ /*
+ * Check if the supplied frame can be aggregated.
+ *
+ * NB: we allow EAPOL frames to be aggregated with other ucast traffic.
+ * Do 802.1x EAPOL frames proceed in the clear? Then they couldn't
+ * be aggregated with other types of frames when encryption is on?
+ */
+ IEEE80211_LOCK(ic);
+ tap = &ni->ni_tx_ampdu[pri];
+ mstaged = tap->txa_private; /* NB: we reuse AMPDU state */
+ ieee80211_txampdu_count_packet(tap);
+
+ /*
+ * When not in station mode never aggregate a multicast
+ * frame; this insures, for example, that a combined frame
+ * does not require multiple encryption keys.
+ */
+ if (vap->iv_opmode != IEEE80211_M_STA &&
+ ETHER_IS_MULTICAST(mtod(m, struct ether_header *)->ether_dhost)) {
+ /* XXX flush staged frame? */
+ IEEE80211_UNLOCK(ic);
+ return m;
+ }
+ /*
+ * If there is no frame to combine with and the pps is
+ * too low; then do not attempt to aggregate this frame.
+ */
+ if (mstaged == NULL &&
+ ieee80211_txampdu_getpps(tap) < ieee80211_ffppsmin) {
+ IEEE80211_UNLOCK(ic);
+ return m;
+ }
+ sq = &sg->ff_stageq[pri];
+ /*
+ * Check the txop limit to insure the aggregate fits.
+ */
+ limit = IEEE80211_TXOP_TO_US(
+ ic->ic_wme.wme_chanParams.cap_wmeParams[pri].wmep_txopLimit);
+ if (limit != 0 &&
+ (txtime = ff_approx_txtime(ni, m, mstaged)) > limit) {
+ /*
+ * Aggregate too long, return to the caller for direct
+ * transmission. In addition, flush any pending frame
+ * before sending this one.
+ */
+ IEEE80211_DPRINTF(vap, IEEE80211_MSG_SUPERG,
+ "%s: txtime %u exceeds txop limit %u\n",
+ __func__, txtime, limit);
+
+ tap->txa_private = NULL;
+ if (mstaged != NULL)
+ stageq_remove(sq, mstaged);
+ IEEE80211_UNLOCK(ic);
+
+ if (mstaged != NULL) {
+ IEEE80211_NOTE(vap, IEEE80211_MSG_SUPERG, ni,
+ "%s: flush staged frame", __func__);
+ /* encap and xmit */
+ ff_transmit(ni, mstaged);
+ }
+ return m; /* NB: original frame */
+ }
+ /*
+ * An aggregation candidate. If there's a frame to partner
+ * with then combine and return for processing. Otherwise
+ * save this frame and wait for a partner to show up (or
+ * the frame to be flushed). Note that staged frames also
+ * hold their node reference.
+ */
+ if (mstaged != NULL) {
+ tap->txa_private = NULL;
+ stageq_remove(sq, mstaged);
+ IEEE80211_UNLOCK(ic);
+
+ IEEE80211_NOTE(vap, IEEE80211_MSG_SUPERG, ni,
+ "%s: aggregate fast-frame", __func__);
+ /*
+ * Release the node reference; we only need
+ * the one already in mstaged.
+ */
+ KASSERT(mstaged->m_pkthdr.rcvif == (void *)ni,
+ ("rcvif %p ni %p", mstaged->m_pkthdr.rcvif, ni));
+ ieee80211_free_node(ni);
+
+ m->m_nextpkt = NULL;
+ mstaged->m_nextpkt = m;
+ mstaged->m_flags |= M_FF; /* NB: mark for encap work */
+ } else {
+ KASSERT(tap->txa_private == NULL,
+ ("txa_private %p", tap->txa_private));
+ tap->txa_private = m;
+
+ stageq_add(sq, m);
+ sg->ff_stageqdepth++;
+ IEEE80211_UNLOCK(ic);
+
+ IEEE80211_NOTE(vap, IEEE80211_MSG_SUPERG, ni,
+ "%s: stage frame, %u queued", __func__, sq->depth);
+ /* NB: mstaged is NULL */
+ }
+ return mstaged;
+}
+
+void
+ieee80211_ff_node_init(struct ieee80211_node *ni)
+{
+ /*
+ * Clean FF state on re-associate. This handles the case
+ * where a station leaves w/o notifying us and then returns
+ * before node is reaped for inactivity.
+ */
+ ieee80211_ff_node_cleanup(ni);
+}
+
+void
+ieee80211_ff_node_cleanup(struct ieee80211_node *ni)
+{
+ struct ieee80211com *ic = ni->ni_ic;
+ struct ieee80211_superg *sg = ic->ic_superg;
+ struct ieee80211_tx_ampdu *tap;
+ struct mbuf *m, *head;
+ int ac;
+
+ IEEE80211_LOCK(ic);
+ head = NULL;
+ for (ac = 0; ac < WME_NUM_AC; ac++) {
+ tap = &ni->ni_tx_ampdu[ac];
+ m = tap->txa_private;
+ if (m != NULL) {
+ tap->txa_private = NULL;
+ stageq_remove(&sg->ff_stageq[ac], m);
+ m->m_nextpkt = head;
+ head = m;
+ }
+ }
+ IEEE80211_UNLOCK(ic);
+
+ for (m = head; m != NULL; m = m->m_nextpkt) {
+ m_freem(m);
+ ieee80211_free_node(ni);
+ }
+}
+
+/*
+ * Switch between turbo and non-turbo operating modes.
+ * Use the specified channel flags to locate the new
+ * channel, update 802.11 state, and then call back into
+ * the driver to effect the change.
+ */
+void
+ieee80211_dturbo_switch(struct ieee80211vap *vap, int newflags)
+{
+ struct ieee80211com *ic = vap->iv_ic;
+ struct ieee80211_channel *chan;
+
+ chan = ieee80211_find_channel(ic, ic->ic_bsschan->ic_freq, newflags);
+ if (chan == NULL) { /* XXX should not happen */
+ IEEE80211_DPRINTF(vap, IEEE80211_MSG_SUPERG,
+ "%s: no channel with freq %u flags 0x%x\n",
+ __func__, ic->ic_bsschan->ic_freq, newflags);
+ return;
+ }
+
+ IEEE80211_DPRINTF(vap, IEEE80211_MSG_SUPERG,
+ "%s: %s -> %s (freq %u flags 0x%x)\n", __func__,
+ ieee80211_phymode_name[ieee80211_chan2mode(ic->ic_bsschan)],
+ ieee80211_phymode_name[ieee80211_chan2mode(chan)],
+ chan->ic_freq, chan->ic_flags);
+
+ ic->ic_bsschan = chan;
+ ic->ic_prevchan = ic->ic_curchan;
+ ic->ic_curchan = chan;
+ ic->ic_rt = ieee80211_get_ratetable(chan);
+ ic->ic_set_channel(ic);
+ ieee80211_radiotap_chan_change(ic);
+ /* NB: do not need to reset ERP state 'cuz we're in sta mode */
+}
+
+/*
+ * Return the current ``state'' of an Atheros capbility.
+ * If associated in station mode report the negotiated
+ * setting. Otherwise report the current setting.
+ */
+static int
+getathcap(struct ieee80211vap *vap, int cap)
+{
+ if (vap->iv_opmode == IEEE80211_M_STA &&
+ vap->iv_state == IEEE80211_S_RUN)
+ return IEEE80211_ATH_CAP(vap, vap->iv_bss, cap) != 0;
+ else
+ return (vap->iv_flags & cap) != 0;
+}
+
+static int
+superg_ioctl_get80211(struct ieee80211vap *vap, struct ieee80211req *ireq)
+{
+ switch (ireq->i_type) {
+ case IEEE80211_IOC_FF:
+ ireq->i_val = getathcap(vap, IEEE80211_F_FF);
+ break;
+ case IEEE80211_IOC_TURBOP:
+ ireq->i_val = getathcap(vap, IEEE80211_F_TURBOP);
+ break;
+ default:
+ return ENOSYS;
+ }
+ return 0;
+}
+IEEE80211_IOCTL_GET(superg, superg_ioctl_get80211);
+
+static int
+superg_ioctl_set80211(struct ieee80211vap *vap, struct ieee80211req *ireq)
+{
+ switch (ireq->i_type) {
+ case IEEE80211_IOC_FF:
+ if (ireq->i_val) {
+ if ((vap->iv_caps & IEEE80211_C_FF) == 0)
+ return EOPNOTSUPP;
+ vap->iv_flags |= IEEE80211_F_FF;
+ } else
+ vap->iv_flags &= ~IEEE80211_F_FF;
+ return ENETRESET;
+ case IEEE80211_IOC_TURBOP:
+ if (ireq->i_val) {
+ if ((vap->iv_caps & IEEE80211_C_TURBOP) == 0)
+ return EOPNOTSUPP;
+ vap->iv_flags |= IEEE80211_F_TURBOP;
+ } else
+ vap->iv_flags &= ~IEEE80211_F_TURBOP;
+ return ENETRESET;
+ default:
+ return ENOSYS;
+ }
+ return 0;
+}
+IEEE80211_IOCTL_SET(superg, superg_ioctl_set80211);
diff --git a/rtems/freebsd/net80211/ieee80211_superg.h b/rtems/freebsd/net80211/ieee80211_superg.h
new file mode 100644
index 00000000..bda45dda
--- /dev/null
+++ b/rtems/freebsd/net80211/ieee80211_superg.h
@@ -0,0 +1,129 @@
+/*-
+ * Copyright (c) 2009 Sam Leffler, Errno Consulting
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+#ifndef _NET80211_IEEE80211_SUPERG_HH_
+#define _NET80211_IEEE80211_SUPERG_HH_
+
+/*
+ * Atheros' 802.11 SuperG protocol support.
+ */
+
+/*
+ * Atheros advanced capability information element.
+ */
+struct ieee80211_ath_ie {
+ uint8_t ath_id; /* IEEE80211_ELEMID_VENDOR */
+ uint8_t ath_len; /* length in bytes */
+ uint8_t ath_oui[3]; /* ATH_OUI */
+ uint8_t ath_oui_type; /* ATH_OUI_TYPE */
+ uint8_t ath_oui_subtype; /* ATH_OUI_SUBTYPE */
+ uint8_t ath_version; /* spec revision */
+ uint8_t ath_capability; /* capability info */
+#define ATHEROS_CAP_TURBO_PRIME 0x01 /* dynamic turbo--aka Turbo' */
+#define ATHEROS_CAP_COMPRESSION 0x02 /* data compression */
+#define ATHEROS_CAP_FAST_FRAME 0x04 /* fast (jumbo) frames */
+#define ATHEROS_CAP_XR 0x08 /* Xtended Range support */
+#define ATHEROS_CAP_AR 0x10 /* Advanded Radar support */
+#define ATHEROS_CAP_BURST 0x20 /* Bursting - not negotiated */
+#define ATHEROS_CAP_WME 0x40 /* CWMin tuning */
+#define ATHEROS_CAP_BOOST 0x80 /* use turbo/!turbo mode */
+ uint8_t ath_defkeyix[2];
+} __packed;
+
+#define ATH_OUI_VERSION 0x00
+#define ATH_OUI_SUBTYPE 0x01
+
+#ifdef _KERNEL
+struct ieee80211_stageq {
+ struct mbuf *head; /* frames linked w/ m_nextpkt */
+ struct mbuf *tail; /* last frame in queue */
+ int depth; /* # items on head */
+};
+
+struct ieee80211_superg {
+ /* fast-frames staging q */
+ struct ieee80211_stageq ff_stageq[WME_NUM_AC];
+ int ff_stageqdepth; /* cumulative depth */
+};
+
+void ieee80211_superg_attach(struct ieee80211com *);
+void ieee80211_superg_detach(struct ieee80211com *);
+void ieee80211_superg_vattach(struct ieee80211vap *);
+void ieee80211_superg_vdetach(struct ieee80211vap *);
+
+uint8_t *ieee80211_add_ath(uint8_t *, uint8_t, ieee80211_keyix);
+uint8_t *ieee80211_add_athcaps(uint8_t *, const struct ieee80211_node *);
+void ieee80211_parse_ath(struct ieee80211_node *, uint8_t *);
+int ieee80211_parse_athparams(struct ieee80211_node *, uint8_t *,
+ const struct ieee80211_frame *);
+
+void ieee80211_ff_node_init(struct ieee80211_node *);
+void ieee80211_ff_node_cleanup(struct ieee80211_node *);
+
+struct mbuf *ieee80211_ff_check(struct ieee80211_node *, struct mbuf *);
+void ieee80211_ff_age(struct ieee80211com *, struct ieee80211_stageq *,
+ int quanta);
+
+static __inline void
+ieee80211_ff_flush(struct ieee80211com *ic, int ac)
+{
+ struct ieee80211_superg *sg = ic->ic_superg;
+
+ if (sg != NULL && sg->ff_stageq[ac].depth)
+ ieee80211_ff_age(ic, &sg->ff_stageq[ac], 0x7fffffff);
+}
+
+static __inline void
+ieee80211_ff_age_all(struct ieee80211com *ic, int quanta)
+{
+ struct ieee80211_superg *sg = ic->ic_superg;
+
+ if (sg != NULL && sg->ff_stageqdepth) {
+ if (sg->ff_stageq[WME_AC_VO].depth)
+ ieee80211_ff_age(ic, &sg->ff_stageq[WME_AC_VO], quanta);
+ if (sg->ff_stageq[WME_AC_VI].depth)
+ ieee80211_ff_age(ic, &sg->ff_stageq[WME_AC_VI], quanta);
+ if (sg->ff_stageq[WME_AC_BE].depth)
+ ieee80211_ff_age(ic, &sg->ff_stageq[WME_AC_BE], quanta);
+ if (sg->ff_stageq[WME_AC_BK].depth)
+ ieee80211_ff_age(ic, &sg->ff_stageq[WME_AC_BK], quanta);
+ }
+}
+
+struct mbuf *ieee80211_ff_encap(struct ieee80211vap *, struct mbuf *,
+ int, struct ieee80211_key *);
+
+struct mbuf *ieee80211_ff_decap(struct ieee80211_node *, struct mbuf *);
+
+static __inline struct mbuf *
+ieee80211_decap_fastframe(struct ieee80211vap *vap, struct ieee80211_node *ni,
+ struct mbuf *m)
+{
+ return IEEE80211_ATH_CAP(vap, ni, IEEE80211_NODE_FF) ?
+ ieee80211_ff_decap(ni, m) : m;
+}
+#endif /* _KERNEL */
+#endif /* _NET80211_IEEE80211_SUPERG_HH_ */
diff --git a/rtems/freebsd/net80211/ieee80211_tdma.c b/rtems/freebsd/net80211/ieee80211_tdma.c
new file mode 100644
index 00000000..1096d4e0
--- /dev/null
+++ b/rtems/freebsd/net80211/ieee80211_tdma.c
@@ -0,0 +1,822 @@
+#include <rtems/freebsd/machine/rtems-bsd-config.h>
+
+/*-
+ * Copyright (c) 2007-2009 Sam Leffler, Errno Consulting
+ * Copyright (c) 2007-2009 Intel Corporation
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <rtems/freebsd/sys/cdefs.h>
+#ifdef __FreeBSD__
+__FBSDID("$FreeBSD$");
+#endif
+
+/*
+ * IEEE 802.11 TDMA mode support.
+ */
+#include <rtems/freebsd/local/opt_inet.h>
+#include <rtems/freebsd/local/opt_tdma.h>
+#include <rtems/freebsd/local/opt_wlan.h>
+
+#include <rtems/freebsd/sys/param.h>
+#include <rtems/freebsd/sys/systm.h>
+#include <rtems/freebsd/sys/mbuf.h>
+#include <rtems/freebsd/sys/malloc.h>
+#include <rtems/freebsd/sys/kernel.h>
+
+#include <rtems/freebsd/sys/socket.h>
+#include <rtems/freebsd/sys/sockio.h>
+#include <rtems/freebsd/sys/endian.h>
+#include <rtems/freebsd/sys/errno.h>
+#include <rtems/freebsd/sys/proc.h>
+#include <rtems/freebsd/sys/sysctl.h>
+
+#include <rtems/freebsd/net/if.h>
+#include <rtems/freebsd/net/if_media.h>
+#include <rtems/freebsd/net/if_llc.h>
+#include <rtems/freebsd/net/ethernet.h>
+
+#include <rtems/freebsd/net/bpf.h>
+
+#include <rtems/freebsd/net80211/ieee80211_var.h>
+#include <rtems/freebsd/net80211/ieee80211_tdma.h>
+#include <rtems/freebsd/net80211/ieee80211_input.h>
+
+#ifndef TDMA_SLOTLEN_DEFAULT
+#define TDMA_SLOTLEN_DEFAULT 10*1000 /* 10ms */
+#endif
+#ifndef TDMA_SLOTCNT_DEFAULT
+#define TDMA_SLOTCNT_DEFAULT 2 /* 2x (pt-to-pt) */
+#endif
+#ifndef TDMA_BINTVAL_DEFAULT
+#define TDMA_BINTVAL_DEFAULT 5 /* 5x ~= 100TU beacon intvl */
+#endif
+#ifndef TDMA_TXRATE_11B_DEFAULT
+#define TDMA_TXRATE_11B_DEFAULT 2*11
+#endif
+#ifndef TDMA_TXRATE_11G_DEFAULT
+#define TDMA_TXRATE_11G_DEFAULT 2*24
+#endif
+#ifndef TDMA_TXRATE_11A_DEFAULT
+#define TDMA_TXRATE_11A_DEFAULT 2*24
+#endif
+#ifndef TDMA_TXRATE_TURBO_DEFAULT
+#define TDMA_TXRATE_TURBO_DEFAULT 2*24
+#endif
+#ifndef TDMA_TXRATE_HALF_DEFAULT
+#define TDMA_TXRATE_HALF_DEFAULT 2*12
+#endif
+#ifndef TDMA_TXRATE_QUARTER_DEFAULT
+#define TDMA_TXRATE_QUARTER_DEFAULT 2*6
+#endif
+#ifndef TDMA_TXRATE_11NA_DEFAULT
+#define TDMA_TXRATE_11NA_DEFAULT (4 | IEEE80211_RATE_MCS)
+#endif
+#ifndef TDMA_TXRATE_11NG_DEFAULT
+#define TDMA_TXRATE_11NG_DEFAULT (4 | IEEE80211_RATE_MCS)
+#endif
+
+#define TDMA_VERSION_VALID(_version) \
+ (TDMA_VERSION_V2 <= (_version) && (_version) <= TDMA_VERSION)
+#define TDMA_SLOTCNT_VALID(_slotcnt) \
+ (2 <= (_slotcnt) && (_slotcnt) <= TDMA_MAXSLOTS)
+/* XXX magic constants */
+#define TDMA_SLOTLEN_VALID(_slotlen) \
+ (2*100 <= (_slotlen) && (unsigned)(_slotlen) <= 0xfffff)
+/* XXX probably should set a max */
+#define TDMA_BINTVAL_VALID(_bintval) (1 <= (_bintval))
+
+/*
+ * This code is not prepared to handle more than 2 slots.
+ */
+CTASSERT(TDMA_MAXSLOTS == 2);
+
+static void tdma_vdetach(struct ieee80211vap *vap);
+static int tdma_newstate(struct ieee80211vap *, enum ieee80211_state, int);
+static void tdma_beacon_miss(struct ieee80211vap *vap);
+static void tdma_recv_mgmt(struct ieee80211_node *, struct mbuf *,
+ int subtype, int rssi, int nf);
+static int tdma_update(struct ieee80211vap *vap,
+ const struct ieee80211_tdma_param *tdma, struct ieee80211_node *ni,
+ int pickslot);
+static int tdma_process_params(struct ieee80211_node *ni,
+ const u_int8_t *ie, int rssi, int nf, const struct ieee80211_frame *wh);
+
+static void
+settxparms(struct ieee80211vap *vap, enum ieee80211_phymode mode, int rate)
+{
+ vap->iv_txparms[mode].ucastrate = rate;
+ vap->iv_txparms[mode].mcastrate = rate;
+}
+
+static void
+setackpolicy(struct ieee80211com *ic, int noack)
+{
+ struct ieee80211_wme_state *wme = &ic->ic_wme;
+ int ac;
+
+ for (ac = 0; ac < WME_NUM_AC; ac++) {
+ wme->wme_chanParams.cap_wmeParams[ac].wmep_noackPolicy = noack;
+ wme->wme_wmeChanParams.cap_wmeParams[ac].wmep_noackPolicy = noack;
+ }
+}
+
+void
+ieee80211_tdma_vattach(struct ieee80211vap *vap)
+{
+ struct ieee80211_tdma_state *ts;
+
+ KASSERT(vap->iv_caps & IEEE80211_C_TDMA,
+ ("not a tdma vap, caps 0x%x", vap->iv_caps));
+
+ ts = (struct ieee80211_tdma_state *) malloc(
+ sizeof(struct ieee80211_tdma_state), M_80211_VAP, M_NOWAIT | M_ZERO);
+ if (ts == NULL) {
+ printf("%s: cannot allocate TDMA state block\n", __func__);
+ /* NB: fall back to adhdemo mode */
+ vap->iv_caps &= ~IEEE80211_C_TDMA;
+ return;
+ }
+ /* NB: default configuration is passive so no beacons */
+ ts->tdma_version = TDMA_VERSION;
+ ts->tdma_slotlen = TDMA_SLOTLEN_DEFAULT;
+ ts->tdma_slotcnt = TDMA_SLOTCNT_DEFAULT;
+ ts->tdma_bintval = TDMA_BINTVAL_DEFAULT;
+ ts->tdma_slot = 1; /* passive operation */
+
+ /* setup default fixed rates */
+ settxparms(vap, IEEE80211_MODE_11A, TDMA_TXRATE_11A_DEFAULT);
+ settxparms(vap, IEEE80211_MODE_11B, TDMA_TXRATE_11B_DEFAULT);
+ settxparms(vap, IEEE80211_MODE_11G, TDMA_TXRATE_11G_DEFAULT);
+ settxparms(vap, IEEE80211_MODE_TURBO_A, TDMA_TXRATE_TURBO_DEFAULT);
+ settxparms(vap, IEEE80211_MODE_TURBO_G, TDMA_TXRATE_TURBO_DEFAULT);
+ settxparms(vap, IEEE80211_MODE_STURBO_A, TDMA_TXRATE_TURBO_DEFAULT);
+ settxparms(vap, IEEE80211_MODE_11NA, TDMA_TXRATE_11NA_DEFAULT);
+ settxparms(vap, IEEE80211_MODE_11NG, TDMA_TXRATE_11NG_DEFAULT);
+ settxparms(vap, IEEE80211_MODE_HALF, TDMA_TXRATE_HALF_DEFAULT);
+ settxparms(vap, IEEE80211_MODE_QUARTER, TDMA_TXRATE_QUARTER_DEFAULT);
+
+ setackpolicy(vap->iv_ic, 1); /* disable ACK's */
+
+ ts->tdma_opdetach = vap->iv_opdetach;
+ vap->iv_opdetach = tdma_vdetach;
+ ts->tdma_newstate = vap->iv_newstate;
+ vap->iv_newstate = tdma_newstate;
+ vap->iv_bmiss = tdma_beacon_miss;
+ ts->tdma_recv_mgmt = vap->iv_recv_mgmt;
+ vap->iv_recv_mgmt = tdma_recv_mgmt;
+
+ vap->iv_tdma = ts;
+}
+
+static void
+tdma_vdetach(struct ieee80211vap *vap)
+{
+ struct ieee80211_tdma_state *ts = vap->iv_tdma;
+
+ if (ts == NULL) {
+ /* NB: should not have touched any ic state */
+ return;
+ }
+ ts->tdma_opdetach(vap);
+ free(vap->iv_tdma, M_80211_VAP);
+ vap->iv_tdma = NULL;
+
+ setackpolicy(vap->iv_ic, 0); /* enable ACK's */
+}
+
+static void
+sta_leave(void *arg, struct ieee80211_node *ni)
+{
+ struct ieee80211vap *vap = arg;
+
+ if (ni->ni_vap == vap && ni != vap->iv_bss)
+ ieee80211_node_leave(ni);
+}
+
+/*
+ * TDMA state machine handler.
+ */
+static int
+tdma_newstate(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg)
+{
+ struct ieee80211_tdma_state *ts = vap->iv_tdma;
+ struct ieee80211com *ic = vap->iv_ic;
+ enum ieee80211_state ostate;
+ int status;
+
+ IEEE80211_LOCK_ASSERT(ic);
+
+ ostate = vap->iv_state;
+ IEEE80211_DPRINTF(vap, IEEE80211_MSG_STATE, "%s: %s -> %s (%d)\n",
+ __func__, ieee80211_state_name[ostate],
+ ieee80211_state_name[nstate], arg);
+
+ if (vap->iv_flags_ext & IEEE80211_FEXT_SWBMISS)
+ callout_stop(&vap->iv_swbmiss);
+ if (nstate == IEEE80211_S_SCAN &&
+ (ostate == IEEE80211_S_INIT || ostate == IEEE80211_S_RUN) &&
+ ts->tdma_slot != 0) {
+ /*
+ * Override adhoc behaviour when operating as a slave;
+ * we need to scan even if the channel is locked.
+ */
+ vap->iv_state = nstate; /* state transition */
+ ieee80211_cancel_scan(vap); /* background scan */
+ if (ostate == IEEE80211_S_RUN) {
+ /* purge station table; entries are stale */
+ ieee80211_iterate_nodes(&ic->ic_sta, sta_leave, vap);
+ }
+ if (vap->iv_flags_ext & IEEE80211_FEXT_SCANREQ) {
+ ieee80211_check_scan(vap,
+ vap->iv_scanreq_flags,
+ vap->iv_scanreq_duration,
+ vap->iv_scanreq_mindwell,
+ vap->iv_scanreq_maxdwell,
+ vap->iv_scanreq_nssid, vap->iv_scanreq_ssid);
+ vap->iv_flags_ext &= ~IEEE80211_FEXT_SCANREQ;
+ } else
+ ieee80211_check_scan_current(vap);
+ status = 0;
+ } else {
+ status = ts->tdma_newstate(vap, nstate, arg);
+ }
+ if (status == 0 &&
+ nstate == IEEE80211_S_RUN && ostate != IEEE80211_S_RUN &&
+ (vap->iv_flags_ext & IEEE80211_FEXT_SWBMISS) &&
+ ts->tdma_slot != 0 &&
+ vap->iv_des_chan == IEEE80211_CHAN_ANYC) {
+ /*
+ * Start s/w beacon miss timer for slave devices w/o
+ * hardware support. Note we do this only if we're
+ * not locked to a channel (i.e. roam to follow the
+ * master). The 2x is a fudge for our doing this in
+ * software.
+ */
+ vap->iv_swbmiss_period = IEEE80211_TU_TO_TICKS(
+ 2 * vap->iv_bmissthreshold * ts->tdma_bintval *
+ ((ts->tdma_slotcnt * ts->tdma_slotlen) / 1024));
+ vap->iv_swbmiss_count = 0;
+ callout_reset(&vap->iv_swbmiss, vap->iv_swbmiss_period,
+ ieee80211_swbmiss, vap);
+ }
+ return status;
+}
+
+static void
+tdma_beacon_miss(struct ieee80211vap *vap)
+{
+ struct ieee80211_tdma_state *ts = vap->iv_tdma;
+
+ KASSERT((vap->iv_ic->ic_flags & IEEE80211_F_SCAN) == 0, ("scanning"));
+ KASSERT(vap->iv_state == IEEE80211_S_RUN,
+ ("wrong state %d", vap->iv_state));
+
+ IEEE80211_DPRINTF(vap,
+ IEEE80211_MSG_STATE | IEEE80211_MSG_TDMA | IEEE80211_MSG_DEBUG,
+ "beacon miss, mode %u state %s\n",
+ vap->iv_opmode, ieee80211_state_name[vap->iv_state]);
+
+ callout_stop(&vap->iv_swbmiss);
+
+ if (ts->tdma_peer != NULL) { /* XXX? can this be null? */
+ ieee80211_notify_node_leave(vap->iv_bss);
+ ts->tdma_peer = NULL;
+ /*
+ * Treat beacon miss like an associate failure wrt the
+ * scan policy; this forces the entry in the scan cache
+ * to be ignored after several tries.
+ */
+ ieee80211_scan_assoc_fail(vap, vap->iv_bss->ni_macaddr,
+ IEEE80211_STATUS_TIMEOUT);
+ }
+#if 0
+ ts->tdma_inuse = 0; /* clear slot usage */
+#endif
+ ieee80211_new_state(vap, IEEE80211_S_SCAN, 0);
+}
+
+static void
+tdma_recv_mgmt(struct ieee80211_node *ni, struct mbuf *m0,
+ int subtype, int rssi, int nf)
+{
+ struct ieee80211com *ic = ni->ni_ic;
+ struct ieee80211vap *vap = ni->ni_vap;
+ struct ieee80211_tdma_state *ts = vap->iv_tdma;
+
+ if (subtype == IEEE80211_FC0_SUBTYPE_BEACON &&
+ (ic->ic_flags & IEEE80211_F_SCAN) == 0) {
+ struct ieee80211_frame *wh = mtod(m0, struct ieee80211_frame *);
+ struct ieee80211_scanparams scan;
+
+ if (ieee80211_parse_beacon(ni, m0, &scan) != 0)
+ return;
+ if (scan.tdma == NULL) {
+ /*
+ * TDMA stations must beacon a TDMA ie; ignore
+ * any other station.
+ * XXX detect overlapping bss and change channel
+ */
+ IEEE80211_DISCARD(vap,
+ IEEE80211_MSG_ELEMID | IEEE80211_MSG_INPUT,
+ wh, ieee80211_mgt_subtype_name[subtype >>
+ IEEE80211_FC0_SUBTYPE_SHIFT],
+ "%s", "no TDMA ie");
+ vap->iv_stats.is_rx_mgtdiscard++;
+ return;
+ }
+ if (ni == vap->iv_bss &&
+ !IEEE80211_ADDR_EQ(wh->i_addr2, ni->ni_macaddr)) {
+ /*
+ * Fake up a node for this newly
+ * discovered member of the IBSS.
+ */
+ ni = ieee80211_add_neighbor(vap, wh, &scan);
+ if (ni == NULL) {
+ /* NB: stat kept for alloc failure */
+ return;
+ }
+ }
+ /*
+ * Check for state updates.
+ */
+ if (IEEE80211_ADDR_EQ(wh->i_addr3, ni->ni_bssid)) {
+ /*
+ * Count frame now that we know it's to be processed.
+ */
+ vap->iv_stats.is_rx_beacon++;
+ IEEE80211_NODE_STAT(ni, rx_beacons);
+ /*
+ * Record tsf of last beacon. NB: this must be
+ * done before calling tdma_process_params
+ * as deeper routines reference it.
+ */
+ memcpy(&ni->ni_tstamp.data, scan.tstamp,
+ sizeof(ni->ni_tstamp.data));
+ /*
+ * Count beacon frame for s/w bmiss handling.
+ */
+ vap->iv_swbmiss_count++;
+ /*
+ * Process tdma ie. The contents are used to sync
+ * the slot timing, reconfigure the bss, etc.
+ */
+ (void) tdma_process_params(ni, scan.tdma, rssi, nf, wh);
+ return;
+ }
+ /*
+ * NB: defer remaining work to the adhoc code; this causes
+ * 2x parsing of the frame but should happen infrequently
+ */
+ }
+ ts->tdma_recv_mgmt(ni, m0, subtype, rssi, nf);
+}
+
+/*
+ * Update TDMA state on receipt of a beacon frame with
+ * a TDMA information element. The sender's identity
+ * is provided so we can track who our peer is. If pickslot
+ * is non-zero we scan the slot allocation state in the ie
+ * to locate a free slot for our use.
+ */
+static int
+tdma_update(struct ieee80211vap *vap, const struct ieee80211_tdma_param *tdma,
+ struct ieee80211_node *ni, int pickslot)
+{
+ struct ieee80211_tdma_state *ts = vap->iv_tdma;
+ int slot, slotlen, update;
+
+ KASSERT(vap->iv_caps & IEEE80211_C_TDMA,
+ ("not a tdma vap, caps 0x%x", vap->iv_caps));
+
+ update = 0;
+ if (tdma->tdma_slotcnt != ts->tdma_slotcnt) {
+ if (!TDMA_SLOTCNT_VALID(tdma->tdma_slotcnt)) {
+ if (ppsratecheck(&ts->tdma_lastprint, &ts->tdma_fails, 1))
+ printf("%s: bad slot cnt %u\n",
+ __func__, tdma->tdma_slotcnt);
+ return 0;
+ }
+ update |= TDMA_UPDATE_SLOTCNT;
+ }
+ slotlen = le16toh(tdma->tdma_slotlen) * 100;
+ if (slotlen != ts->tdma_slotlen) {
+ if (!TDMA_SLOTLEN_VALID(slotlen)) {
+ if (ppsratecheck(&ts->tdma_lastprint, &ts->tdma_fails, 1))
+ printf("%s: bad slot len %u\n",
+ __func__, slotlen);
+ return 0;
+ }
+ update |= TDMA_UPDATE_SLOTLEN;
+ }
+ if (tdma->tdma_bintval != ts->tdma_bintval) {
+ if (!TDMA_BINTVAL_VALID(tdma->tdma_bintval)) {
+ if (ppsratecheck(&ts->tdma_lastprint, &ts->tdma_fails, 1))
+ printf("%s: bad beacon interval %u\n",
+ __func__, tdma->tdma_bintval);
+ return 0;
+ }
+ update |= TDMA_UPDATE_BINTVAL;
+ }
+ slot = ts->tdma_slot;
+ if (pickslot) {
+ /*
+ * Pick unoccupied slot. Note we never choose slot 0.
+ */
+ for (slot = tdma->tdma_slotcnt-1; slot > 0; slot--)
+ if (isclr(tdma->tdma_inuse, slot))
+ break;
+ if (slot <= 0) {
+ printf("%s: no free slot, slotcnt %u inuse: 0x%x\n",
+ __func__, tdma->tdma_slotcnt,
+ tdma->tdma_inuse[0]);
+ /* XXX need to do something better */
+ return 0;
+ }
+ if (slot != ts->tdma_slot)
+ update |= TDMA_UPDATE_SLOT;
+ }
+ if (ni != ts->tdma_peer) {
+ /* update everything */
+ update = TDMA_UPDATE_SLOT
+ | TDMA_UPDATE_SLOTCNT
+ | TDMA_UPDATE_SLOTLEN
+ | TDMA_UPDATE_BINTVAL;
+ }
+
+ if (update) {
+ /*
+ * New/changed parameters; update runtime state.
+ */
+ /* XXX overwrites user parameters */
+ if (update & TDMA_UPDATE_SLOTCNT)
+ ts->tdma_slotcnt = tdma->tdma_slotcnt;
+ if (update & TDMA_UPDATE_SLOTLEN)
+ ts->tdma_slotlen = slotlen;
+ if (update & TDMA_UPDATE_SLOT)
+ ts->tdma_slot = slot;
+ if (update & TDMA_UPDATE_BINTVAL)
+ ts->tdma_bintval = tdma->tdma_bintval;
+ /* mark beacon to be updated before next xmit */
+ ieee80211_beacon_notify(vap, IEEE80211_BEACON_TDMA);
+
+ IEEE80211_DPRINTF(vap, IEEE80211_MSG_TDMA,
+ "%s: slot %u slotcnt %u slotlen %u us bintval %u\n",
+ __func__, ts->tdma_slot, ts->tdma_slotcnt,
+ ts->tdma_slotlen, ts->tdma_bintval);
+ }
+ /*
+ * Notify driver. Note we can be called before
+ * entering RUN state if we scanned and are
+ * joining an existing bss. In that case do not
+ * call the driver because not all necessary state
+ * has been setup. The next beacon will dtrt.
+ */
+ if (vap->iv_state == IEEE80211_S_RUN)
+ vap->iv_ic->ic_tdma_update(ni, tdma, update);
+ /*
+ * Dispatch join event on first beacon from new master.
+ */
+ if (ts->tdma_peer != ni) {
+ if (ts->tdma_peer != NULL)
+ ieee80211_notify_node_leave(vap->iv_bss);
+ ieee80211_notify_node_join(ni, 1);
+ /* NB: no reference, we just use the address */
+ ts->tdma_peer = ni;
+ }
+ return 1;
+}
+
+/*
+ * Process received TDMA parameters.
+ */
+static int
+tdma_process_params(struct ieee80211_node *ni, const u_int8_t *ie,
+ int rssi, int nf, const struct ieee80211_frame *wh)
+{
+ struct ieee80211vap *vap = ni->ni_vap;
+ struct ieee80211_tdma_state *ts = vap->iv_tdma;
+ const struct ieee80211_tdma_param *tdma =
+ (const struct ieee80211_tdma_param *) ie;
+ u_int len = ie[1];
+
+ KASSERT(vap->iv_caps & IEEE80211_C_TDMA,
+ ("not a tdma vap, caps 0x%x", vap->iv_caps));
+
+ if (len < sizeof(*tdma) - 2) {
+ IEEE80211_DISCARD_IE(vap,
+ IEEE80211_MSG_ELEMID | IEEE80211_MSG_TDMA,
+ wh, "tdma", "too short, len %u", len);
+ return IEEE80211_REASON_IE_INVALID;
+ }
+ if (tdma->tdma_version != ts->tdma_version) {
+ IEEE80211_DISCARD_IE(vap,
+ IEEE80211_MSG_ELEMID | IEEE80211_MSG_TDMA,
+ wh, "tdma", "bad version %u (ours %u)",
+ tdma->tdma_version, ts->tdma_version);
+ return IEEE80211_REASON_IE_INVALID;
+ }
+ /*
+ * NB: ideally we'd check against tdma_slotcnt, but that
+ * would require extra effort so do this easy check that
+ * covers the work below; more stringent checks are done
+ * before we make more extensive use of the ie contents.
+ */
+ if (tdma->tdma_slot >= TDMA_MAXSLOTS) {
+ IEEE80211_DISCARD_IE(vap,
+ IEEE80211_MSG_ELEMID | IEEE80211_MSG_TDMA,
+ wh, "tdma", "invalid slot %u", tdma->tdma_slot);
+ return IEEE80211_REASON_IE_INVALID;
+ }
+ /*
+ * Can reach here while scanning, update
+ * operational state only in RUN state.
+ */
+ if (vap->iv_state == IEEE80211_S_RUN) {
+ if (tdma->tdma_slot != ts->tdma_slot &&
+ isclr(ts->tdma_inuse, tdma->tdma_slot)) {
+ IEEE80211_NOTE(vap, IEEE80211_MSG_TDMA, ni,
+ "discovered in slot %u", tdma->tdma_slot);
+ setbit(ts->tdma_inuse, tdma->tdma_slot);
+ /* XXX dispatch event only when operating as master */
+ if (ts->tdma_slot == 0)
+ ieee80211_notify_node_join(ni, 1);
+ }
+ setbit(ts->tdma_active, tdma->tdma_slot);
+ if (tdma->tdma_slot == ts->tdma_slot-1) {
+ /*
+ * Slave tsf synchronization to station
+ * just before us in the schedule. The driver
+ * is responsible for copying the timestamp
+ * of the received beacon into our beacon
+ * frame so the sender can calculate round
+ * trip time. We cannot do that here because
+ * we don't know how to update our beacon frame.
+ */
+ (void) tdma_update(vap, tdma, ni, 0);
+ /* XXX reschedule swbmiss timer on parameter change */
+ } else if (tdma->tdma_slot == ts->tdma_slot+1) {
+ uint64_t tstamp;
+#if 0
+ uint32_t rstamp = (uint32_t) le64toh(rs->tsf);
+ int32_t rtt;
+#endif
+ /*
+ * Use returned timstamp to calculate the
+ * roundtrip time.
+ */
+ memcpy(&tstamp, tdma->tdma_tstamp, 8);
+#if 0
+ /* XXX use only 15 bits of rstamp */
+ rtt = rstamp - (le64toh(tstamp) & 0x7fff);
+ if (rtt < 0)
+ rtt += 0x7fff;
+ /* XXX hack to quiet normal use */
+ IEEE80211_DPRINTF(vap, IEEE80211_MSG_DOT1X,
+ "tdma rtt %5u [rstamp %5u tstamp %llu]\n",
+ rtt, rstamp,
+ (unsigned long long) le64toh(tstamp));
+#endif
+ } else if (tdma->tdma_slot == ts->tdma_slot &&
+ le64toh(ni->ni_tstamp.tsf) > vap->iv_bss->ni_tstamp.tsf) {
+ /*
+ * Station using the same slot as us and has
+ * been around longer than us; we must move.
+ * Note this can happen if stations do not
+ * see each other while scanning.
+ */
+ IEEE80211_DPRINTF(vap, IEEE80211_MSG_TDMA,
+ "slot %u collision rxtsf %llu tsf %llu\n",
+ tdma->tdma_slot,
+ (unsigned long long) le64toh(ni->ni_tstamp.tsf),
+ vap->iv_bss->ni_tstamp.tsf);
+ setbit(ts->tdma_inuse, tdma->tdma_slot);
+
+ (void) tdma_update(vap, tdma, ni, 1);
+ }
+ }
+ return 0;
+}
+
+int
+ieee80211_tdma_getslot(struct ieee80211vap *vap)
+{
+ struct ieee80211_tdma_state *ts = vap->iv_tdma;
+
+ KASSERT(vap->iv_caps & IEEE80211_C_TDMA,
+ ("not a tdma vap, caps 0x%x", vap->iv_caps));
+ return ts->tdma_slot;
+}
+
+/*
+ * Parse a TDMA ie on station join and use it to setup node state.
+ */
+void
+ieee80211_parse_tdma(struct ieee80211_node *ni, const uint8_t *ie)
+{
+ struct ieee80211vap *vap = ni->ni_vap;
+
+ if (vap->iv_caps & IEEE80211_C_TDMA) {
+ const struct ieee80211_tdma_param *tdma =
+ (const struct ieee80211_tdma_param *)ie;
+ struct ieee80211_tdma_state *ts = vap->iv_tdma;
+ /*
+ * Adopt TDMA configuration when joining an
+ * existing network.
+ */
+ setbit(ts->tdma_inuse, tdma->tdma_slot);
+ (void) tdma_update(vap, tdma, ni, 1);
+ /*
+ * Propagate capabilities based on the local
+ * configuration and the remote station's advertised
+ * capabilities. In particular this permits us to
+ * enable use of QoS to disable ACK's.
+ */
+ if ((vap->iv_flags & IEEE80211_F_WME) &&
+ ni->ni_ies.wme_ie != NULL)
+ ni->ni_flags |= IEEE80211_NODE_QOS;
+ }
+}
+
+#define TDMA_OUI_BYTES 0x00, 0x03, 0x7f
+/*
+ * Add a TDMA parameters element to a frame.
+ */
+uint8_t *
+ieee80211_add_tdma(uint8_t *frm, struct ieee80211vap *vap)
+{
+#define ADDSHORT(frm, v) do { \
+ frm[0] = (v) & 0xff; \
+ frm[1] = (v) >> 8; \
+ frm += 2; \
+} while (0)
+ static const struct ieee80211_tdma_param param = {
+ .tdma_id = IEEE80211_ELEMID_VENDOR,
+ .tdma_len = sizeof(struct ieee80211_tdma_param) - 2,
+ .tdma_oui = { TDMA_OUI_BYTES },
+ .tdma_type = TDMA_OUI_TYPE,
+ .tdma_subtype = TDMA_SUBTYPE_PARAM,
+ .tdma_version = TDMA_VERSION,
+ };
+ const struct ieee80211_tdma_state *ts = vap->iv_tdma;
+ uint16_t slotlen;
+
+ KASSERT(vap->iv_caps & IEEE80211_C_TDMA,
+ ("not a tdma vap, caps 0x%x", vap->iv_caps));
+
+ memcpy(frm, &param, sizeof(param));
+ frm += __offsetof(struct ieee80211_tdma_param, tdma_slot);
+ *frm++ = ts->tdma_slot;
+ *frm++ = ts->tdma_slotcnt;
+ /* NB: convert units to fit in 16-bits */
+ slotlen = ts->tdma_slotlen / 100; /* 100us units */
+ ADDSHORT(frm, slotlen);
+ *frm++ = ts->tdma_bintval;
+ *frm++ = ts->tdma_inuse[0];
+ frm += 10; /* pad+timestamp */
+ return frm;
+#undef ADDSHORT
+}
+#undef TDMA_OUI_BYTES
+
+/*
+ * Update TDMA state at TBTT.
+ */
+void
+ieee80211_tdma_update_beacon(struct ieee80211vap *vap,
+ struct ieee80211_beacon_offsets *bo)
+{
+ struct ieee80211_tdma_state *ts = vap->iv_tdma;
+
+ KASSERT(vap->iv_caps & IEEE80211_C_TDMA,
+ ("not a tdma vap, caps 0x%x", vap->iv_caps));
+
+ if (isset(bo->bo_flags, IEEE80211_BEACON_TDMA)) {
+ (void) ieee80211_add_tdma(bo->bo_tdma, vap);
+ clrbit(bo->bo_flags, IEEE80211_BEACON_TDMA);
+ }
+ if (ts->tdma_slot != 0) /* only on master */
+ return;
+ if (ts->tdma_count <= 0) {
+ /*
+ * Time to update the mask of active/inuse stations.
+ * We track stations that we've received a beacon
+ * frame from and update this mask periodically.
+ * This allows us to miss a few beacons before marking
+ * a slot free for re-use.
+ */
+ ts->tdma_inuse[0] = ts->tdma_active[0];
+ ts->tdma_active[0] = 0x01;
+ /* update next time 'round */
+ /* XXX use notify framework */
+ setbit(bo->bo_flags, IEEE80211_BEACON_TDMA);
+ /* NB: use s/w beacon miss threshold; may be too high */
+ ts->tdma_count = vap->iv_bmissthreshold-1;
+ } else
+ ts->tdma_count--;
+}
+
+static int
+tdma_ioctl_get80211(struct ieee80211vap *vap, struct ieee80211req *ireq)
+{
+ struct ieee80211_tdma_state *ts = vap->iv_tdma;
+
+ if ((vap->iv_caps & IEEE80211_C_TDMA) == 0)
+ return EOPNOTSUPP;
+
+ switch (ireq->i_type) {
+ case IEEE80211_IOC_TDMA_SLOT:
+ ireq->i_val = ts->tdma_slot;
+ break;
+ case IEEE80211_IOC_TDMA_SLOTCNT:
+ ireq->i_val = ts->tdma_slotcnt;
+ break;
+ case IEEE80211_IOC_TDMA_SLOTLEN:
+ ireq->i_val = ts->tdma_slotlen;
+ break;
+ case IEEE80211_IOC_TDMA_BINTERVAL:
+ ireq->i_val = ts->tdma_bintval;
+ break;
+ default:
+ return ENOSYS;
+ }
+ return 0;
+}
+IEEE80211_IOCTL_GET(tdma, tdma_ioctl_get80211);
+
+static int
+tdma_ioctl_set80211(struct ieee80211vap *vap, struct ieee80211req *ireq)
+{
+ struct ieee80211_tdma_state *ts = vap->iv_tdma;
+
+ if ((vap->iv_caps & IEEE80211_C_TDMA) == 0)
+ return EOPNOTSUPP;
+
+ switch (ireq->i_type) {
+ case IEEE80211_IOC_TDMA_SLOT:
+ if (!(0 <= ireq->i_val && ireq->i_val <= ts->tdma_slotcnt))
+ return EINVAL;
+ if (ireq->i_val != ts->tdma_slot) {
+ ts->tdma_slot = ireq->i_val;
+ goto restart;
+ }
+ break;
+ case IEEE80211_IOC_TDMA_SLOTCNT:
+ if (!TDMA_SLOTCNT_VALID(ireq->i_val))
+ return EINVAL;
+ if (ireq->i_val != ts->tdma_slotcnt) {
+ ts->tdma_slotcnt = ireq->i_val;
+ goto restart;
+ }
+ break;
+ case IEEE80211_IOC_TDMA_SLOTLEN:
+ /*
+ * XXX
+ * 150 insures at least 1/8 TU
+ * 0xfffff is the max duration for bursting
+ * (implict by way of 16-bit data type for i_val)
+ */
+ if (!TDMA_SLOTLEN_VALID(ireq->i_val))
+ return EINVAL;
+ if (ireq->i_val != ts->tdma_slotlen) {
+ ts->tdma_slotlen = ireq->i_val;
+ goto restart;
+ }
+ break;
+ case IEEE80211_IOC_TDMA_BINTERVAL:
+ if (!TDMA_BINTVAL_VALID(ireq->i_val))
+ return EINVAL;
+ if (ireq->i_val != ts->tdma_bintval) {
+ ts->tdma_bintval = ireq->i_val;
+ goto restart;
+ }
+ break;
+ default:
+ return ENOSYS;
+ }
+ return 0;
+restart:
+ ieee80211_beacon_notify(vap, IEEE80211_BEACON_TDMA);
+ return ERESTART;
+}
+IEEE80211_IOCTL_SET(tdma, tdma_ioctl_set80211);
diff --git a/rtems/freebsd/net80211/ieee80211_tdma.h b/rtems/freebsd/net80211/ieee80211_tdma.h
new file mode 100644
index 00000000..989d6417
--- /dev/null
+++ b/rtems/freebsd/net80211/ieee80211_tdma.h
@@ -0,0 +1,102 @@
+/*-
+ * Copyright (c) 2007-2009 Sam Leffler, Errno Consulting
+ * Copyright (c) 2007-2009 Intel Corporation
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+#ifndef _NET80211_IEEE80211_TDMA_HH_
+#define _NET80211_IEEE80211_TDMA_HH_
+
+/*
+ * TDMA-mode implementation definitions.
+ */
+
+#define TDMA_SUBTYPE_PARAM 0x01
+#define TDMA_VERSION_V2 2
+#define TDMA_VERSION TDMA_VERSION_V2
+
+/* NB: we only support 2 right now but protocol handles up to 8 */
+#define TDMA_MAXSLOTS 2 /* max slots/sta's */
+
+#define TDMA_PARAM_LEN_V2 sizeof(struct ieee80211_tdma_param)
+
+/*
+ * TDMA information element.
+ */
+struct ieee80211_tdma_param {
+ u_int8_t tdma_id; /* IEEE80211_ELEMID_VENDOR */
+ u_int8_t tdma_len;
+ u_int8_t tdma_oui[3]; /* TDMA_OUI */
+ u_int8_t tdma_type; /* TDMA_OUI_TYPE */
+ u_int8_t tdma_subtype; /* TDMA_SUBTYPE_PARAM */
+ u_int8_t tdma_version; /* spec revision */
+ u_int8_t tdma_slot; /* station slot # [0..7] */
+ u_int8_t tdma_slotcnt; /* bss slot count [1..8] */
+ u_int16_t tdma_slotlen; /* bss slot len (100us) */
+ u_int8_t tdma_bintval; /* beacon interval (superframes) */
+ u_int8_t tdma_inuse[1]; /* slot occupancy map */
+ u_int8_t tdma_pad[2];
+ u_int8_t tdma_tstamp[8]; /* timestamp from last beacon */
+} __packed;
+
+#ifdef _KERNEL
+/*
+ * Implementation state.
+ */
+struct ieee80211_tdma_state {
+ u_int tdma_slotlen; /* bss slot length (us) */
+ uint8_t tdma_version; /* protocol version to use */
+ uint8_t tdma_slotcnt; /* bss slot count */
+ uint8_t tdma_bintval; /* beacon interval (slots) */
+ uint8_t tdma_slot; /* station slot # */
+ uint8_t tdma_inuse[1]; /* mask of slots in use */
+ uint8_t tdma_active[1]; /* mask of active slots */
+ int tdma_count; /* active/inuse countdown */
+ void *tdma_peer; /* peer station cookie */
+ struct timeval tdma_lastprint; /* time of last rate-limited printf */
+ int tdma_fails; /* fail count for rate-limiting */
+
+ /* parent method pointers */
+ int (*tdma_newstate)(struct ieee80211vap *, enum ieee80211_state,
+ int arg);
+ void (*tdma_recv_mgmt)(struct ieee80211_node *,
+ struct mbuf *, int, int, int);
+ void (*tdma_opdetach)(struct ieee80211vap *);
+};
+
+#define TDMA_UPDATE_SLOT 0x0001 /* tdma_slot changed */
+#define TDMA_UPDATE_SLOTCNT 0x0002 /* tdma_slotcnt changed */
+#define TDMA_UPDATE_SLOTLEN 0x0004 /* tdma_slotlen changed */
+#define TDMA_UPDATE_BINTVAL 0x0008 /* tdma_bintval changed */
+
+void ieee80211_tdma_vattach(struct ieee80211vap *);
+
+int ieee80211_tdma_getslot(struct ieee80211vap *vap);
+void ieee80211_parse_tdma(struct ieee80211_node *ni, const uint8_t *ie);
+uint8_t *ieee80211_add_tdma(uint8_t *frm, struct ieee80211vap *vap);
+struct ieee80211_beacon_offsets;
+void ieee80211_tdma_update_beacon(struct ieee80211vap *vap,
+ struct ieee80211_beacon_offsets *bo);
+#endif /* _KERNEL */
+#endif /* !_NET80211_IEEE80211_TDMA_HH_ */
diff --git a/rtems/freebsd/net80211/ieee80211_var.h b/rtems/freebsd/net80211/ieee80211_var.h
new file mode 100644
index 00000000..d0338dfc
--- /dev/null
+++ b/rtems/freebsd/net80211/ieee80211_var.h
@@ -0,0 +1,916 @@
+/*-
+ * Copyright (c) 2001 Atsushi Onoe
+ * Copyright (c) 2002-2009 Sam Leffler, Errno Consulting
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+#ifndef _NET80211_IEEE80211_VAR_HH_
+#define _NET80211_IEEE80211_VAR_HH_
+
+/*
+ * Definitions for IEEE 802.11 drivers.
+ */
+/* NB: portability glue must go first */
+#if defined(__NetBSD__)
+#include <rtems/freebsd/net80211/ieee80211_netbsd.h>
+#elif defined(__FreeBSD__)
+#include <rtems/freebsd/net80211/ieee80211_freebsd.h>
+#elif defined(__linux__)
+#include <rtems/freebsd/net80211/ieee80211_linux.h>
+#else
+#error "No support for your operating system!"
+#endif
+
+#include <rtems/freebsd/net80211/_ieee80211.h>
+#include <rtems/freebsd/net80211/ieee80211.h>
+#include <rtems/freebsd/net80211/ieee80211_ageq.h>
+#include <rtems/freebsd/net80211/ieee80211_crypto.h>
+#include <rtems/freebsd/net80211/ieee80211_dfs.h>
+#include <rtems/freebsd/net80211/ieee80211_ioctl.h> /* for ieee80211_stats */
+#include <rtems/freebsd/net80211/ieee80211_phy.h>
+#include <rtems/freebsd/net80211/ieee80211_power.h>
+#include <rtems/freebsd/net80211/ieee80211_node.h>
+#include <rtems/freebsd/net80211/ieee80211_proto.h>
+#include <rtems/freebsd/net80211/ieee80211_radiotap.h>
+#include <rtems/freebsd/net80211/ieee80211_scan.h>
+
+#define IEEE80211_TXPOWER_MAX 100 /* .5 dbM (XXX units?) */
+#define IEEE80211_TXPOWER_MIN 0 /* kill radio */
+
+#define IEEE80211_DTIM_DEFAULT 1 /* default DTIM period */
+#define IEEE80211_BINTVAL_DEFAULT 100 /* default beacon interval (TU's) */
+
+#define IEEE80211_BMISS_MAX 2 /* maximum consecutive bmiss allowed */
+#define IEEE80211_HWBMISS_DEFAULT 7 /* h/w bmiss threshold (beacons) */
+
+#define IEEE80211_BGSCAN_INTVAL_MIN 15 /* min bg scan intvl (secs) */
+#define IEEE80211_BGSCAN_INTVAL_DEFAULT (5*60) /* default bg scan intvl */
+
+#define IEEE80211_BGSCAN_IDLE_MIN 100 /* min idle time (ms) */
+#define IEEE80211_BGSCAN_IDLE_DEFAULT 250 /* default idle time (ms) */
+
+#define IEEE80211_SCAN_VALID_MIN 10 /* min scan valid time (secs) */
+#define IEEE80211_SCAN_VALID_DEFAULT 60 /* default scan valid time */
+
+#define IEEE80211_PS_SLEEP 0x1 /* STA is in power saving mode */
+#define IEEE80211_PS_MAX_QUEUE 50 /* maximum saved packets */
+
+#define IEEE80211_FIXED_RATE_NONE 0xff
+#define IEEE80211_TXMAX_DEFAULT 6 /* default ucast max retries */
+
+#define IEEE80211_RTS_DEFAULT IEEE80211_RTS_MAX
+#define IEEE80211_FRAG_DEFAULT IEEE80211_FRAG_MAX
+
+#define IEEE80211_MS_TO_TU(x) (((x) * 1000) / 1024)
+#define IEEE80211_TU_TO_MS(x) (((x) * 1024) / 1000)
+#define IEEE80211_TU_TO_TICKS(x)(((x) * 1024 * hz) / (1000 * 1000))
+
+/*
+ * 802.11 control state is split into a common portion that maps
+ * 1-1 to a physical device and one or more "Virtual AP's" (VAP)
+ * that are bound to an ieee80211com instance and share a single
+ * underlying device. Each VAP has a corresponding OS device
+ * entity through which traffic flows and that applications use
+ * for issuing ioctls, etc.
+ */
+
+/*
+ * Data common to one or more virtual AP's. State shared by
+ * the underlying device and the net80211 layer is exposed here;
+ * e.g. device-specific callbacks.
+ */
+struct ieee80211vap;
+typedef void (*ieee80211vap_attach)(struct ieee80211vap *);
+
+struct ieee80211_appie {
+ uint16_t ie_len; /* size of ie_data */
+ uint8_t ie_data[]; /* user-specified IE's */
+};
+
+struct ieee80211_tdma_param;
+struct ieee80211_rate_table;
+struct ieee80211_tx_ampdu;
+struct ieee80211_rx_ampdu;
+struct ieee80211_superg;
+struct ieee80211_frame;
+
+struct ieee80211com {
+ struct ifnet *ic_ifp; /* associated device */
+ ieee80211_com_lock_t ic_comlock; /* state update lock */
+ TAILQ_HEAD(, ieee80211vap) ic_vaps; /* list of vap instances */
+ int ic_headroom; /* driver tx headroom needs */
+ enum ieee80211_phytype ic_phytype; /* XXX wrong for multi-mode */
+ enum ieee80211_opmode ic_opmode; /* operation mode */
+ struct ifmedia ic_media; /* interface media config */
+ struct callout ic_inact; /* inactivity processing */
+ struct taskqueue *ic_tq; /* deferred state thread */
+ struct task ic_parent_task; /* deferred parent processing */
+ struct task ic_promisc_task;/* deferred promisc update */
+ struct task ic_mcast_task; /* deferred mcast update */
+ struct task ic_chan_task; /* deferred channel change */
+ struct task ic_bmiss_task; /* deferred beacon miss hndlr */
+
+ uint32_t ic_flags; /* state flags */
+ uint32_t ic_flags_ext; /* extended state flags */
+ uint32_t ic_flags_ht; /* HT state flags */
+ uint32_t ic_flags_ven; /* vendor state flags */
+ uint32_t ic_caps; /* capabilities */
+ uint32_t ic_htcaps; /* HT capabilities */
+ uint32_t ic_cryptocaps; /* crypto capabilities */
+ uint8_t ic_modecaps[2]; /* set of mode capabilities */
+ uint8_t ic_promisc; /* vap's needing promisc mode */
+ uint8_t ic_allmulti; /* vap's needing all multicast*/
+ uint8_t ic_nrunning; /* vap's marked running */
+ uint8_t ic_curmode; /* current mode */
+ uint16_t ic_bintval; /* beacon interval */
+ uint16_t ic_lintval; /* listen interval */
+ uint16_t ic_holdover; /* PM hold over duration */
+ uint16_t ic_txpowlimit; /* global tx power limit */
+ struct ieee80211_rateset ic_sup_rates[IEEE80211_MODE_MAX];
+
+ /*
+ * Channel state:
+ *
+ * ic_channels is the set of available channels for the device;
+ * it is setup by the driver
+ * ic_nchans is the number of valid entries in ic_channels
+ * ic_chan_avail is a bit vector of these channels used to check
+ * whether a channel is available w/o searching the channel table.
+ * ic_chan_active is a (potentially) constrained subset of
+ * ic_chan_avail that reflects any mode setting or user-specified
+ * limit on the set of channels to use/scan
+ * ic_curchan is the current channel the device is set to; it may
+ * be different from ic_bsschan when we are off-channel scanning
+ * or otherwise doing background work
+ * ic_bsschan is the channel selected for operation; it may
+ * be undefined (IEEE80211_CHAN_ANYC)
+ * ic_prevchan is a cached ``previous channel'' used to optimize
+ * lookups when switching back+forth between two channels
+ * (e.g. for dynamic turbo)
+ */
+ int ic_nchans; /* # entries in ic_channels */
+ struct ieee80211_channel ic_channels[IEEE80211_CHAN_MAX];
+ uint8_t ic_chan_avail[IEEE80211_CHAN_BYTES];
+ uint8_t ic_chan_active[IEEE80211_CHAN_BYTES];
+ uint8_t ic_chan_scan[IEEE80211_CHAN_BYTES];
+ struct ieee80211_channel *ic_curchan; /* current channel */
+ const struct ieee80211_rate_table *ic_rt; /* table for ic_curchan */
+ struct ieee80211_channel *ic_bsschan; /* bss channel */
+ struct ieee80211_channel *ic_prevchan; /* previous channel */
+ struct ieee80211_regdomain ic_regdomain;/* regulatory data */
+ struct ieee80211_appie *ic_countryie; /* calculated country ie */
+ struct ieee80211_channel *ic_countryie_chan;
+
+ /* 802.11h/DFS state */
+ struct ieee80211_channel *ic_csa_newchan;/* channel for doing CSA */
+ short ic_csa_mode; /* mode for doing CSA */
+ short ic_csa_count; /* count for doing CSA */
+ struct ieee80211_dfs_state ic_dfs; /* DFS state */
+
+ struct ieee80211_scan_state *ic_scan; /* scan state */
+ int ic_lastdata; /* time of last data frame */
+ int ic_lastscan; /* time last scan completed */
+
+ /* NB: this is the union of all vap stations/neighbors */
+ int ic_max_keyix; /* max h/w key index */
+ struct ieee80211_node_table ic_sta; /* stations/neighbors */
+ struct ieee80211_ageq ic_stageq; /* frame staging queue */
+ uint32_t ic_hash_key; /* random key for mac hash */
+
+ /* XXX multi-bss: split out common/vap parts */
+ struct ieee80211_wme_state ic_wme; /* WME/WMM state */
+
+ /* XXX multi-bss: can per-vap be done/make sense? */
+ enum ieee80211_protmode ic_protmode; /* 802.11g protection mode */
+ uint16_t ic_nonerpsta; /* # non-ERP stations */
+ uint16_t ic_longslotsta; /* # long slot time stations */
+ uint16_t ic_sta_assoc; /* stations associated */
+ uint16_t ic_ht_sta_assoc;/* HT stations associated */
+ uint16_t ic_ht40_sta_assoc;/* HT40 stations associated */
+ uint8_t ic_curhtprotmode;/* HTINFO bss state */
+ enum ieee80211_protmode ic_htprotmode; /* HT protection mode */
+ int ic_lastnonerp; /* last time non-ERP sta noted*/
+ int ic_lastnonht; /* last time non-HT sta noted */
+
+ /* optional state for Atheros SuperG protocol extensions */
+ struct ieee80211_superg *ic_superg;
+
+ /* radiotap handling */
+ struct ieee80211_radiotap_header *ic_th;/* tx radiotap headers */
+ void *ic_txchan; /* channel state in ic_th */
+ struct ieee80211_radiotap_header *ic_rh;/* rx radiotap headers */
+ void *ic_rxchan; /* channel state in ic_rh */
+ int ic_montaps; /* active monitor mode taps */
+
+ /* virtual ap create/delete */
+ struct ieee80211vap* (*ic_vap_create)(struct ieee80211com *,
+ const char name[IFNAMSIZ], int unit,
+ int opmode, int flags,
+ const uint8_t bssid[IEEE80211_ADDR_LEN],
+ const uint8_t macaddr[IEEE80211_ADDR_LEN]);
+ void (*ic_vap_delete)(struct ieee80211vap *);
+ /* operating mode attachment */
+ ieee80211vap_attach ic_vattach[IEEE80211_OPMODE_MAX];
+ /* return hardware/radio capabilities */
+ void (*ic_getradiocaps)(struct ieee80211com *,
+ int, int *, struct ieee80211_channel []);
+ /* check and/or prepare regdomain state change */
+ int (*ic_setregdomain)(struct ieee80211com *,
+ struct ieee80211_regdomain *,
+ int, struct ieee80211_channel []);
+ /* send/recv 802.11 management frame */
+ int (*ic_send_mgmt)(struct ieee80211_node *,
+ int, int);
+ /* send raw 802.11 frame */
+ int (*ic_raw_xmit)(struct ieee80211_node *,
+ struct mbuf *,
+ const struct ieee80211_bpf_params *);
+ /* update device state for 802.11 slot time change */
+ void (*ic_updateslot)(struct ifnet *);
+ /* handle multicast state changes */
+ void (*ic_update_mcast)(struct ifnet *);
+ /* handle promiscuous mode changes */
+ void (*ic_update_promisc)(struct ifnet *);
+ /* new station association callback/notification */
+ void (*ic_newassoc)(struct ieee80211_node *, int);
+ /* TDMA update notification */
+ void (*ic_tdma_update)(struct ieee80211_node *,
+ const struct ieee80211_tdma_param *, int);
+ /* node state management */
+ struct ieee80211_node* (*ic_node_alloc)(struct ieee80211vap *,
+ const uint8_t [IEEE80211_ADDR_LEN]);
+ void (*ic_node_free)(struct ieee80211_node *);
+ void (*ic_node_cleanup)(struct ieee80211_node *);
+ void (*ic_node_age)(struct ieee80211_node *);
+ void (*ic_node_drain)(struct ieee80211_node *);
+ int8_t (*ic_node_getrssi)(const struct ieee80211_node*);
+ void (*ic_node_getsignal)(const struct ieee80211_node*,
+ int8_t *, int8_t *);
+ void (*ic_node_getmimoinfo)(
+ const struct ieee80211_node*,
+ struct ieee80211_mimo_info *);
+ /* scanning support */
+ void (*ic_scan_start)(struct ieee80211com *);
+ void (*ic_scan_end)(struct ieee80211com *);
+ void (*ic_set_channel)(struct ieee80211com *);
+ void (*ic_scan_curchan)(struct ieee80211_scan_state *,
+ unsigned long);
+ void (*ic_scan_mindwell)(struct ieee80211_scan_state *);
+
+ /*
+ * 802.11n ADDBA support. A simple/generic implementation
+ * of A-MPDU tx aggregation is provided; the driver may
+ * override these methods to provide their own support.
+ * A-MPDU rx re-ordering happens automatically if the
+ * driver passes out-of-order frames to ieee80211_input
+ * from an assocated HT station.
+ */
+ int (*ic_recv_action)(struct ieee80211_node *,
+ const struct ieee80211_frame *,
+ const uint8_t *frm, const uint8_t *efrm);
+ int (*ic_send_action)(struct ieee80211_node *,
+ int category, int action, void *);
+ /* check if A-MPDU should be enabled this station+ac */
+ int (*ic_ampdu_enable)(struct ieee80211_node *,
+ struct ieee80211_tx_ampdu *);
+ /* start/stop doing A-MPDU tx aggregation for a station */
+ int (*ic_addba_request)(struct ieee80211_node *,
+ struct ieee80211_tx_ampdu *,
+ int dialogtoken, int baparamset,
+ int batimeout);
+ int (*ic_addba_response)(struct ieee80211_node *,
+ struct ieee80211_tx_ampdu *,
+ int status, int baparamset, int batimeout);
+ void (*ic_addba_stop)(struct ieee80211_node *,
+ struct ieee80211_tx_ampdu *);
+ /* BAR response received */
+ void (*ic_bar_response)(struct ieee80211_node *,
+ struct ieee80211_tx_ampdu *, int status);
+ /* start/stop doing A-MPDU rx processing for a station */
+ int (*ic_ampdu_rx_start)(struct ieee80211_node *,
+ struct ieee80211_rx_ampdu *, int baparamset,
+ int batimeout, int baseqctl);
+ void (*ic_ampdu_rx_stop)(struct ieee80211_node *,
+ struct ieee80211_rx_ampdu *);
+ uint64_t ic_spare[8];
+};
+
+struct ieee80211_aclator;
+struct ieee80211_tdma_state;
+struct ieee80211_mesh_state;
+struct ieee80211_hwmp_state;
+
+struct ieee80211vap {
+ struct ifmedia iv_media; /* interface media config */
+ struct ifnet *iv_ifp; /* associated device */
+ struct bpf_if *iv_rawbpf; /* packet filter structure */
+ struct sysctl_ctx_list *iv_sysctl; /* dynamic sysctl context */
+ struct sysctl_oid *iv_oid; /* net.wlan.X sysctl oid */
+
+ TAILQ_ENTRY(ieee80211vap) iv_next; /* list of vap instances */
+ struct ieee80211com *iv_ic; /* back ptr to common state */
+ uint32_t iv_debug; /* debug msg flags */
+ struct ieee80211_stats iv_stats; /* statistics */
+
+ uint8_t iv_myaddr[IEEE80211_ADDR_LEN];
+ uint32_t iv_flags; /* state flags */
+ uint32_t iv_flags_ext; /* extended state flags */
+ uint32_t iv_flags_ht; /* HT state flags */
+ uint32_t iv_flags_ven; /* vendor state flags */
+ uint32_t iv_caps; /* capabilities */
+ uint32_t iv_htcaps; /* HT capabilities */
+ enum ieee80211_opmode iv_opmode; /* operation mode */
+ enum ieee80211_state iv_state; /* state machine state */
+ enum ieee80211_state iv_nstate; /* pending state */
+ int iv_nstate_arg; /* pending state arg */
+ struct task iv_nstate_task; /* deferred state processing */
+ struct task iv_swbmiss_task;/* deferred iv_bmiss call */
+ struct callout iv_mgtsend; /* mgmt frame response timer */
+ /* inactivity timer settings */
+ int iv_inact_init; /* setting for new station */
+ int iv_inact_auth; /* auth but not assoc setting */
+ int iv_inact_run; /* authorized setting */
+ int iv_inact_probe; /* inactive probe time */
+
+ int iv_des_nssid; /* # desired ssids */
+ struct ieee80211_scan_ssid iv_des_ssid[1];/* desired ssid table */
+ uint8_t iv_des_bssid[IEEE80211_ADDR_LEN];
+ struct ieee80211_channel *iv_des_chan; /* desired channel */
+ uint16_t iv_des_mode; /* desired mode */
+ int iv_nicknamelen; /* XXX junk */
+ uint8_t iv_nickname[IEEE80211_NWID_LEN];
+ u_int iv_bgscanidle; /* bg scan idle threshold */
+ u_int iv_bgscanintvl; /* bg scan min interval */
+ u_int iv_scanvalid; /* scan cache valid threshold */
+ u_int iv_scanreq_duration;
+ u_int iv_scanreq_mindwell;
+ u_int iv_scanreq_maxdwell;
+ uint16_t iv_scanreq_flags;/* held scan request params */
+ uint8_t iv_scanreq_nssid;
+ struct ieee80211_scan_ssid iv_scanreq_ssid[IEEE80211_SCAN_MAX_SSID];
+ /* sta-mode roaming state */
+ enum ieee80211_roamingmode iv_roaming; /* roaming mode */
+ struct ieee80211_roamparam iv_roamparms[IEEE80211_MODE_MAX];
+
+ uint8_t iv_bmissthreshold;
+ uint8_t iv_bmiss_count; /* current beacon miss count */
+ int iv_bmiss_max; /* max bmiss before scan */
+ uint16_t iv_swbmiss_count;/* beacons in last period */
+ uint16_t iv_swbmiss_period;/* s/w bmiss period */
+ struct callout iv_swbmiss; /* s/w beacon miss timer */
+
+ int iv_ampdu_rxmax; /* A-MPDU rx limit (bytes) */
+ int iv_ampdu_density;/* A-MPDU density */
+ int iv_ampdu_limit; /* A-MPDU tx limit (bytes) */
+ int iv_amsdu_limit; /* A-MSDU tx limit (bytes) */
+ u_int iv_ampdu_mintraffic[WME_NUM_AC];
+
+ uint32_t *iv_aid_bitmap; /* association id map */
+ uint16_t iv_max_aid;
+ uint16_t iv_sta_assoc; /* stations associated */
+ uint16_t iv_ps_sta; /* stations in power save */
+ uint16_t iv_ps_pending; /* ps sta's w/ pending frames */
+ uint16_t iv_txseq; /* mcast xmit seq# space */
+ uint16_t iv_tim_len; /* ic_tim_bitmap size (bytes) */
+ uint8_t *iv_tim_bitmap; /* power-save stations w/ data*/
+ uint8_t iv_dtim_period; /* DTIM period */
+ uint8_t iv_dtim_count; /* DTIM count from last bcn */
+ /* set/unset aid pwrsav state */
+ int iv_csa_count; /* count for doing CSA */
+
+ struct ieee80211_node *iv_bss; /* information for this node */
+ struct ieee80211_txparam iv_txparms[IEEE80211_MODE_MAX];
+ uint16_t iv_rtsthreshold;
+ uint16_t iv_fragthreshold;
+ int iv_inact_timer; /* inactivity timer wait */
+ /* application-specified IE's to attach to mgt frames */
+ struct ieee80211_appie *iv_appie_beacon;
+ struct ieee80211_appie *iv_appie_probereq;
+ struct ieee80211_appie *iv_appie_proberesp;
+ struct ieee80211_appie *iv_appie_assocreq;
+ struct ieee80211_appie *iv_appie_assocresp;
+ struct ieee80211_appie *iv_appie_wpa;
+ uint8_t *iv_wpa_ie;
+ uint8_t *iv_rsn_ie;
+ uint16_t iv_max_keyix; /* max h/w key index */
+ ieee80211_keyix iv_def_txkey; /* default/group tx key index */
+ struct ieee80211_key iv_nw_keys[IEEE80211_WEP_NKID];
+ int (*iv_key_alloc)(struct ieee80211vap *,
+ struct ieee80211_key *,
+ ieee80211_keyix *, ieee80211_keyix *);
+ int (*iv_key_delete)(struct ieee80211vap *,
+ const struct ieee80211_key *);
+ int (*iv_key_set)(struct ieee80211vap *,
+ const struct ieee80211_key *,
+ const uint8_t mac[IEEE80211_ADDR_LEN]);
+ void (*iv_key_update_begin)(struct ieee80211vap *);
+ void (*iv_key_update_end)(struct ieee80211vap *);
+
+ const struct ieee80211_authenticator *iv_auth; /* authenticator glue */
+ void *iv_ec; /* private auth state */
+
+ const struct ieee80211_aclator *iv_acl; /* acl glue */
+ void *iv_as; /* private aclator state */
+
+ const struct ieee80211_ratectl *iv_rate;
+ void *iv_rs; /* private ratectl state */
+
+ struct ieee80211_tdma_state *iv_tdma; /* tdma state */
+ struct ieee80211_mesh_state *iv_mesh; /* MBSS state */
+ struct ieee80211_hwmp_state *iv_hwmp; /* HWMP state */
+
+ /* operate-mode detach hook */
+ void (*iv_opdetach)(struct ieee80211vap *);
+ /* receive processing */
+ int (*iv_input)(struct ieee80211_node *,
+ struct mbuf *, int, int);
+ void (*iv_recv_mgmt)(struct ieee80211_node *,
+ struct mbuf *, int, int, int);
+ void (*iv_recv_ctl)(struct ieee80211_node *,
+ struct mbuf *, int);
+ void (*iv_deliver_data)(struct ieee80211vap *,
+ struct ieee80211_node *, struct mbuf *);
+#if 0
+ /* send processing */
+ int (*iv_send_mgmt)(struct ieee80211_node *,
+ int, int);
+#endif
+ /* beacon miss processing */
+ void (*iv_bmiss)(struct ieee80211vap *);
+ /* reset device state after 802.11 parameter/state change */
+ int (*iv_reset)(struct ieee80211vap *, u_long);
+ /* [schedule] beacon frame update */
+ void (*iv_update_beacon)(struct ieee80211vap *, int);
+ /* power save handling */
+ void (*iv_update_ps)(struct ieee80211vap *, int);
+ int (*iv_set_tim)(struct ieee80211_node *, int);
+ /* state machine processing */
+ int (*iv_newstate)(struct ieee80211vap *,
+ enum ieee80211_state, int);
+ /* 802.3 output method for raw frame xmit */
+ int (*iv_output)(struct ifnet *, struct mbuf *,
+ struct sockaddr *, struct route *);
+ uint64_t iv_spare[6];
+};
+MALLOC_DECLARE(M_80211_VAP);
+
+#define IEEE80211_ADDR_EQ(a1,a2) (memcmp(a1,a2,IEEE80211_ADDR_LEN) == 0)
+#define IEEE80211_ADDR_COPY(dst,src) memcpy(dst,src,IEEE80211_ADDR_LEN)
+
+/* ic_flags/iv_flags */
+#define IEEE80211_F_TURBOP 0x00000001 /* CONF: ATH Turbo enabled*/
+#define IEEE80211_F_COMP 0x00000002 /* CONF: ATH comp enabled */
+#define IEEE80211_F_FF 0x00000004 /* CONF: ATH FF enabled */
+#define IEEE80211_F_BURST 0x00000008 /* CONF: bursting enabled */
+/* NB: this is intentionally setup to be IEEE80211_CAPINFO_PRIVACY */
+#define IEEE80211_F_PRIVACY 0x00000010 /* CONF: privacy enabled */
+#define IEEE80211_F_PUREG 0x00000020 /* CONF: 11g w/o 11b sta's */
+#define IEEE80211_F_SCAN 0x00000080 /* STATUS: scanning */
+#define IEEE80211_F_ASCAN 0x00000100 /* STATUS: active scan */
+#define IEEE80211_F_SIBSS 0x00000200 /* STATUS: start IBSS */
+/* NB: this is intentionally setup to be IEEE80211_CAPINFO_SHORT_SLOTTIME */
+#define IEEE80211_F_SHSLOT 0x00000400 /* STATUS: use short slot time*/
+#define IEEE80211_F_PMGTON 0x00000800 /* CONF: Power mgmt enable */
+#define IEEE80211_F_DESBSSID 0x00001000 /* CONF: des_bssid is set */
+#define IEEE80211_F_WME 0x00002000 /* CONF: enable WME use */
+#define IEEE80211_F_BGSCAN 0x00004000 /* CONF: bg scan enabled (???)*/
+#define IEEE80211_F_SWRETRY 0x00008000 /* CONF: sw tx retry enabled */
+#define IEEE80211_F_TXPOW_FIXED 0x00010000 /* TX Power: fixed rate */
+#define IEEE80211_F_IBSSON 0x00020000 /* CONF: IBSS creation enable */
+#define IEEE80211_F_SHPREAMBLE 0x00040000 /* STATUS: use short preamble */
+#define IEEE80211_F_DATAPAD 0x00080000 /* CONF: do alignment pad */
+#define IEEE80211_F_USEPROT 0x00100000 /* STATUS: protection enabled */
+#define IEEE80211_F_USEBARKER 0x00200000 /* STATUS: use barker preamble*/
+#define IEEE80211_F_CSAPENDING 0x00400000 /* STATUS: chan switch pending*/
+#define IEEE80211_F_WPA1 0x00800000 /* CONF: WPA enabled */
+#define IEEE80211_F_WPA2 0x01000000 /* CONF: WPA2 enabled */
+#define IEEE80211_F_WPA 0x01800000 /* CONF: WPA/WPA2 enabled */
+#define IEEE80211_F_DROPUNENC 0x02000000 /* CONF: drop unencrypted */
+#define IEEE80211_F_COUNTERM 0x04000000 /* CONF: TKIP countermeasures */
+#define IEEE80211_F_HIDESSID 0x08000000 /* CONF: hide SSID in beacon */
+#define IEEE80211_F_NOBRIDGE 0x10000000 /* CONF: dis. internal bridge */
+#define IEEE80211_F_PCF 0x20000000 /* CONF: PCF enabled */
+#define IEEE80211_F_DOTH 0x40000000 /* CONF: 11h enabled */
+#define IEEE80211_F_DWDS 0x80000000 /* CONF: Dynamic WDS enabled */
+
+#define IEEE80211_F_BITS \
+ "\20\1TURBOP\2COMP\3FF\4BURST\5PRIVACY\6PUREG\10SCAN\11ASCAN\12SIBSS" \
+ "\13SHSLOT\14PMGTON\15DESBSSID\16WME\17BGSCAN\20SWRETRY\21TXPOW_FIXED" \
+ "\22IBSSON\23SHPREAMBLE\24DATAPAD\25USEPROT\26USERBARKER\27CSAPENDING" \
+ "\30WPA1\31WPA2\32DROPUNENC\33COUNTERM\34HIDESSID\35NOBRIDG\36PCF" \
+ "\37DOTH\40DWDS"
+
+/* Atheros protocol-specific flags */
+#define IEEE80211_F_ATHEROS \
+ (IEEE80211_F_FF | IEEE80211_F_COMP | IEEE80211_F_TURBOP)
+/* Check if an Atheros capability was negotiated for use */
+#define IEEE80211_ATH_CAP(vap, ni, bit) \
+ ((vap)->iv_flags & (ni)->ni_ath_flags & (bit))
+
+/* ic_flags_ext/iv_flags_ext */
+#define IEEE80211_FEXT_INACT 0x00000002 /* CONF: sta inact handling */
+#define IEEE80211_FEXT_SCANWAIT 0x00000004 /* STATUS: awaiting scan */
+/* 0x00000006 reserved */
+#define IEEE80211_FEXT_BGSCAN 0x00000008 /* STATUS: complete bgscan */
+#define IEEE80211_FEXT_WPS 0x00000010 /* CONF: WPS enabled */
+#define IEEE80211_FEXT_TSN 0x00000020 /* CONF: TSN enabled */
+#define IEEE80211_FEXT_SCANREQ 0x00000040 /* STATUS: scan req params */
+#define IEEE80211_FEXT_RESUME 0x00000080 /* STATUS: start on resume */
+#define IEEE80211_FEXT_4ADDR 0x00000100 /* CONF: apply 4-addr encap */
+#define IEEE80211_FEXT_NONERP_PR 0x00000200 /* STATUS: non-ERP sta present*/
+#define IEEE80211_FEXT_SWBMISS 0x00000400 /* CONF: do bmiss in s/w */
+#define IEEE80211_FEXT_DFS 0x00000800 /* CONF: DFS enabled */
+#define IEEE80211_FEXT_DOTD 0x00001000 /* CONF: 11d enabled */
+#define IEEE80211_FEXT_STATEWAIT 0x00002000 /* STATUS: awaiting state chg */
+#define IEEE80211_FEXT_REINIT 0x00004000 /* STATUS: INIT state first */
+#define IEEE80211_FEXT_BPF 0x00008000 /* STATUS: BPF tap present */
+/* NB: immutable: should be set only when creating a vap */
+#define IEEE80211_FEXT_WDSLEGACY 0x00010000 /* CONF: legacy WDS operation */
+#define IEEE80211_FEXT_PROBECHAN 0x00020000 /* CONF: probe passive channel*/
+#define IEEE80211_FEXT_UNIQMAC 0x00040000 /* CONF: user or computed mac */
+
+#define IEEE80211_FEXT_BITS \
+ "\20\2INACT\3SCANWAIT\4BGSCAN\5WPS\6TSN\7SCANREQ\10RESUME" \
+ "\0114ADDR\12NONEPR_PR\13SWBMISS\14DFS\15DOTD\16STATEWAIT\17REINIT" \
+ "\20BPF\21WDSLEGACY\22PROBECHAN\23UNIQMAC"
+
+/* ic_flags_ht/iv_flags_ht */
+#define IEEE80211_FHT_NONHT_PR 0x00000001 /* STATUS: non-HT sta present */
+#define IEEE80211_FHT_GF 0x00040000 /* CONF: Greenfield enabled */
+#define IEEE80211_FHT_HT 0x00080000 /* CONF: HT supported */
+#define IEEE80211_FHT_AMPDU_TX 0x00100000 /* CONF: A-MPDU tx supported */
+#define IEEE80211_FHT_AMPDU_RX 0x00200000 /* CONF: A-MPDU rx supported */
+#define IEEE80211_FHT_AMSDU_TX 0x00400000 /* CONF: A-MSDU tx supported */
+#define IEEE80211_FHT_AMSDU_RX 0x00800000 /* CONF: A-MSDU rx supported */
+#define IEEE80211_FHT_USEHT40 0x01000000 /* CONF: 20/40 use enabled */
+#define IEEE80211_FHT_PUREN 0x02000000 /* CONF: 11n w/o legacy sta's */
+#define IEEE80211_FHT_SHORTGI20 0x04000000 /* CONF: short GI in HT20 */
+#define IEEE80211_FHT_SHORTGI40 0x08000000 /* CONF: short GI in HT40 */
+#define IEEE80211_FHT_HTCOMPAT 0x10000000 /* CONF: HT vendor OUI's */
+#define IEEE80211_FHT_RIFS 0x20000000 /* CONF: RIFS enabled */
+#define IEEE80211_FHT_STBC_TX 0x40000000 /* CONF: STBC tx enabled */
+#define IEEE80211_FHT_STBC_RX 0x80000000 /* CONF: STBC rx enabled */
+
+#define IEEE80211_FHT_BITS \
+ "\20\1NONHT_PR" \
+ "\23GF\24HT\25AMDPU_TX\26AMPDU_TX" \
+ "\27AMSDU_TX\30AMSDU_RX\31USEHT40\32PUREN\33SHORTGI20\34SHORTGI40" \
+ "\35HTCOMPAT\36RIFS\37STBC_TX\40STBC_RX"
+
+#define IEEE80211_FVEN_BITS "\20"
+
+/* ic_caps/iv_caps: device driver capabilities */
+/* 0x2e available */
+#define IEEE80211_C_STA 0x00000001 /* CAPABILITY: STA available */
+#define IEEE80211_C_8023ENCAP 0x00000002 /* CAPABILITY: 802.3 encap */
+#define IEEE80211_C_FF 0x00000040 /* CAPABILITY: ATH FF avail */
+#define IEEE80211_C_TURBOP 0x00000080 /* CAPABILITY: ATH Turbo avail*/
+#define IEEE80211_C_IBSS 0x00000100 /* CAPABILITY: IBSS available */
+#define IEEE80211_C_PMGT 0x00000200 /* CAPABILITY: Power mgmt */
+#define IEEE80211_C_HOSTAP 0x00000400 /* CAPABILITY: HOSTAP avail */
+#define IEEE80211_C_AHDEMO 0x00000800 /* CAPABILITY: Old Adhoc Demo */
+#define IEEE80211_C_SWRETRY 0x00001000 /* CAPABILITY: sw tx retry */
+#define IEEE80211_C_TXPMGT 0x00002000 /* CAPABILITY: tx power mgmt */
+#define IEEE80211_C_SHSLOT 0x00004000 /* CAPABILITY: short slottime */
+#define IEEE80211_C_SHPREAMBLE 0x00008000 /* CAPABILITY: short preamble */
+#define IEEE80211_C_MONITOR 0x00010000 /* CAPABILITY: monitor mode */
+#define IEEE80211_C_DFS 0x00020000 /* CAPABILITY: DFS/radar avail*/
+#define IEEE80211_C_MBSS 0x00040000 /* CAPABILITY: MBSS available */
+/* 0x7c0000 available */
+#define IEEE80211_C_WPA1 0x00800000 /* CAPABILITY: WPA1 avail */
+#define IEEE80211_C_WPA2 0x01000000 /* CAPABILITY: WPA2 avail */
+#define IEEE80211_C_WPA 0x01800000 /* CAPABILITY: WPA1+WPA2 avail*/
+#define IEEE80211_C_BURST 0x02000000 /* CAPABILITY: frame bursting */
+#define IEEE80211_C_WME 0x04000000 /* CAPABILITY: WME avail */
+#define IEEE80211_C_WDS 0x08000000 /* CAPABILITY: 4-addr support */
+/* 0x10000000 reserved */
+#define IEEE80211_C_BGSCAN 0x20000000 /* CAPABILITY: bg scanning */
+#define IEEE80211_C_TXFRAG 0x40000000 /* CAPABILITY: tx fragments */
+#define IEEE80211_C_TDMA 0x80000000 /* CAPABILITY: TDMA avail */
+/* XXX protection/barker? */
+
+#define IEEE80211_C_OPMODE \
+ (IEEE80211_C_STA | IEEE80211_C_IBSS | IEEE80211_C_HOSTAP | \
+ IEEE80211_C_AHDEMO | IEEE80211_C_MONITOR | IEEE80211_C_WDS | \
+ IEEE80211_C_TDMA | IEEE80211_C_MBSS)
+
+#define IEEE80211_C_BITS \
+ "\20\1STA\002803ENCAP\7FF\10TURBOP\11IBSS\12PMGT" \
+ "\13HOSTAP\14AHDEMO\15SWRETRY\16TXPMGT\17SHSLOT\20SHPREAMBLE" \
+ "\21MONITOR\22DFS\23MBSS\30WPA1\31WPA2\32BURST\33WME\34WDS\36BGSCAN" \
+ "\37TXFRAG\40TDMA"
+
+/*
+ * ic_htcaps/iv_htcaps: HT-specific device/driver capabilities
+ *
+ * NB: the low 16-bits are the 802.11 definitions, the upper
+ * 16-bits are used to define s/w/driver capabilities.
+ */
+#define IEEE80211_HTC_AMPDU 0x00010000 /* CAPABILITY: A-MPDU tx */
+#define IEEE80211_HTC_AMSDU 0x00020000 /* CAPABILITY: A-MSDU tx */
+/* NB: HT40 is implied by IEEE80211_HTCAP_CHWIDTH40 */
+#define IEEE80211_HTC_HT 0x00040000 /* CAPABILITY: HT operation */
+#define IEEE80211_HTC_SMPS 0x00080000 /* CAPABILITY: MIMO power save*/
+#define IEEE80211_HTC_RIFS 0x00100000 /* CAPABILITY: RIFS support */
+
+#define IEEE80211_C_HTCAP_BITS \
+ "\20\1LDPC\2CHWIDTH40\5GREENFIELD\6SHORTGI20\7SHORTGI40\10TXSTBC" \
+ "\21AMPDU\22AMSDU\23HT\24SMPS\25RIFS"
+
+void ieee80211_ifattach(struct ieee80211com *,
+ const uint8_t macaddr[IEEE80211_ADDR_LEN]);
+void ieee80211_ifdetach(struct ieee80211com *);
+int ieee80211_vap_setup(struct ieee80211com *, struct ieee80211vap *,
+ const char name[IFNAMSIZ], int unit, int opmode, int flags,
+ const uint8_t bssid[IEEE80211_ADDR_LEN],
+ const uint8_t macaddr[IEEE80211_ADDR_LEN]);
+int ieee80211_vap_attach(struct ieee80211vap *,
+ ifm_change_cb_t, ifm_stat_cb_t);
+void ieee80211_vap_detach(struct ieee80211vap *);
+const struct ieee80211_rateset *ieee80211_get_suprates(struct ieee80211com *ic,
+ const struct ieee80211_channel *);
+void ieee80211_announce(struct ieee80211com *);
+void ieee80211_announce_channels(struct ieee80211com *);
+void ieee80211_drain(struct ieee80211com *);
+void ieee80211_media_init(struct ieee80211com *);
+struct ieee80211com *ieee80211_find_vap(const uint8_t mac[IEEE80211_ADDR_LEN]);
+int ieee80211_media_change(struct ifnet *);
+void ieee80211_media_status(struct ifnet *, struct ifmediareq *);
+int ieee80211_ioctl(struct ifnet *, u_long, caddr_t);
+int ieee80211_rate2media(struct ieee80211com *, int,
+ enum ieee80211_phymode);
+int ieee80211_media2rate(int);
+int ieee80211_mhz2ieee(u_int, u_int);
+int ieee80211_chan2ieee(struct ieee80211com *,
+ const struct ieee80211_channel *);
+u_int ieee80211_ieee2mhz(u_int, u_int);
+struct ieee80211_channel *ieee80211_find_channel(struct ieee80211com *,
+ int freq, int flags);
+struct ieee80211_channel *ieee80211_find_channel_byieee(struct ieee80211com *,
+ int ieee, int flags);
+int ieee80211_setmode(struct ieee80211com *, enum ieee80211_phymode);
+enum ieee80211_phymode ieee80211_chan2mode(const struct ieee80211_channel *);
+uint32_t ieee80211_mac_hash(const struct ieee80211com *,
+ const uint8_t addr[IEEE80211_ADDR_LEN]);
+
+void ieee80211_radiotap_attach(struct ieee80211com *,
+ struct ieee80211_radiotap_header *th, int tlen,
+ uint32_t tx_radiotap,
+ struct ieee80211_radiotap_header *rh, int rlen,
+ uint32_t rx_radiotap);
+void ieee80211_radiotap_detach(struct ieee80211com *);
+void ieee80211_radiotap_vattach(struct ieee80211vap *);
+void ieee80211_radiotap_vdetach(struct ieee80211vap *);
+void ieee80211_radiotap_chan_change(struct ieee80211com *);
+void ieee80211_radiotap_tx(struct ieee80211vap *, struct mbuf *);
+void ieee80211_radiotap_rx(struct ieee80211vap *, struct mbuf *);
+void ieee80211_radiotap_rx_all(struct ieee80211com *, struct mbuf *);
+
+static __inline int
+ieee80211_radiotap_active(const struct ieee80211com *ic)
+{
+ return (ic->ic_flags_ext & IEEE80211_FEXT_BPF) != 0;
+}
+
+static __inline int
+ieee80211_radiotap_active_vap(const struct ieee80211vap *vap)
+{
+ return (vap->iv_flags_ext & IEEE80211_FEXT_BPF) ||
+ vap->iv_ic->ic_montaps != 0;
+}
+
+/*
+ * Enqueue a task on the state thread.
+ */
+static __inline void
+ieee80211_runtask(struct ieee80211com *ic, struct task *task)
+{
+ taskqueue_enqueue(ic->ic_tq, task);
+}
+
+/*
+ * Wait for a queued task to complete.
+ */
+static __inline void
+ieee80211_draintask(struct ieee80211com *ic, struct task *task)
+{
+ taskqueue_drain(ic->ic_tq, task);
+}
+
+/*
+ * Key update synchronization methods. XXX should not be visible.
+ */
+static __inline void
+ieee80211_key_update_begin(struct ieee80211vap *vap)
+{
+ vap->iv_key_update_begin(vap);
+}
+static __inline void
+ieee80211_key_update_end(struct ieee80211vap *vap)
+{
+ vap->iv_key_update_end(vap);
+}
+
+/*
+ * XXX these need to be here for IEEE80211_F_DATAPAD
+ */
+
+/*
+ * Return the space occupied by the 802.11 header and any
+ * padding required by the driver. This works for a
+ * management or data frame.
+ */
+static __inline int
+ieee80211_hdrspace(struct ieee80211com *ic, const void *data)
+{
+ int size = ieee80211_hdrsize(data);
+ if (ic->ic_flags & IEEE80211_F_DATAPAD)
+ size = roundup(size, sizeof(uint32_t));
+ return size;
+}
+
+/*
+ * Like ieee80211_hdrspace, but handles any type of frame.
+ */
+static __inline int
+ieee80211_anyhdrspace(struct ieee80211com *ic, const void *data)
+{
+ int size = ieee80211_anyhdrsize(data);
+ if (ic->ic_flags & IEEE80211_F_DATAPAD)
+ size = roundup(size, sizeof(uint32_t));
+ return size;
+}
+
+/*
+ * Notify a vap that beacon state has been updated.
+ */
+static __inline void
+ieee80211_beacon_notify(struct ieee80211vap *vap, int what)
+{
+ if (vap->iv_state == IEEE80211_S_RUN)
+ vap->iv_update_beacon(vap, what);
+}
+
+/*
+ * Calculate HT channel promotion flags for a channel.
+ * XXX belongs in ieee80211_ht.h but needs IEEE80211_FHT_*
+ */
+static __inline int
+ieee80211_htchanflags(const struct ieee80211_channel *c)
+{
+ return IEEE80211_IS_CHAN_HT40(c) ?
+ IEEE80211_FHT_HT | IEEE80211_FHT_USEHT40 :
+ IEEE80211_IS_CHAN_HT(c) ? IEEE80211_FHT_HT : 0;
+}
+
+/*
+ * Debugging facilities compiled in when IEEE80211_DEBUG is defined.
+ *
+ * The intent is that any problem in the net80211 layer can be
+ * diagnosed by inspecting the statistics (dumped by the wlanstats
+ * program) and/or the msgs generated by net80211. Messages are
+ * broken into functional classes and can be controlled with the
+ * wlandebug program. Certain of these msg groups are for facilities
+ * that are no longer part of net80211 (e.g. IEEE80211_MSG_DOT1XSM).
+ */
+#define IEEE80211_MSG_11N 0x80000000 /* 11n mode debug */
+#define IEEE80211_MSG_DEBUG 0x40000000 /* IFF_DEBUG equivalent */
+#define IEEE80211_MSG_DUMPPKTS 0x20000000 /* IFF_LINK2 equivalant */
+#define IEEE80211_MSG_CRYPTO 0x10000000 /* crypto work */
+#define IEEE80211_MSG_INPUT 0x08000000 /* input handling */
+#define IEEE80211_MSG_XRATE 0x04000000 /* rate set handling */
+#define IEEE80211_MSG_ELEMID 0x02000000 /* element id parsing */
+#define IEEE80211_MSG_NODE 0x01000000 /* node handling */
+#define IEEE80211_MSG_ASSOC 0x00800000 /* association handling */
+#define IEEE80211_MSG_AUTH 0x00400000 /* authentication handling */
+#define IEEE80211_MSG_SCAN 0x00200000 /* scanning */
+#define IEEE80211_MSG_OUTPUT 0x00100000 /* output handling */
+#define IEEE80211_MSG_STATE 0x00080000 /* state machine */
+#define IEEE80211_MSG_POWER 0x00040000 /* power save handling */
+#define IEEE80211_MSG_HWMP 0x00020000 /* hybrid mesh protocol */
+#define IEEE80211_MSG_DOT1XSM 0x00010000 /* 802.1x state machine */
+#define IEEE80211_MSG_RADIUS 0x00008000 /* 802.1x radius client */
+#define IEEE80211_MSG_RADDUMP 0x00004000 /* dump 802.1x radius packets */
+#define IEEE80211_MSG_MESH 0x00002000 /* mesh networking */
+#define IEEE80211_MSG_WPA 0x00001000 /* WPA/RSN protocol */
+#define IEEE80211_MSG_ACL 0x00000800 /* ACL handling */
+#define IEEE80211_MSG_WME 0x00000400 /* WME protocol */
+#define IEEE80211_MSG_SUPERG 0x00000200 /* Atheros SuperG protocol */
+#define IEEE80211_MSG_DOTH 0x00000100 /* 802.11h support */
+#define IEEE80211_MSG_INACT 0x00000080 /* inactivity handling */
+#define IEEE80211_MSG_ROAM 0x00000040 /* sta-mode roaming */
+#define IEEE80211_MSG_RATECTL 0x00000020 /* tx rate control */
+#define IEEE80211_MSG_ACTION 0x00000010 /* action frame handling */
+#define IEEE80211_MSG_WDS 0x00000008 /* WDS handling */
+#define IEEE80211_MSG_IOCTL 0x00000004 /* ioctl handling */
+#define IEEE80211_MSG_TDMA 0x00000002 /* TDMA handling */
+
+#define IEEE80211_MSG_ANY 0xffffffff /* anything */
+
+#define IEEE80211_MSG_BITS \
+ "\20\2TDMA\3IOCTL\4WDS\5ACTION\6RATECTL\7ROAM\10INACT\11DOTH\12SUPERG" \
+ "\13WME\14ACL\15WPA\16RADKEYS\17RADDUMP\20RADIUS\21DOT1XSM\22HWMP" \
+ "\23POWER\24STATE\25OUTPUT\26SCAN\27AUTH\30ASSOC\31NODE\32ELEMID" \
+ "\33XRATE\34INPUT\35CRYPTO\36DUPMPKTS\37DEBUG\04011N"
+
+#ifdef IEEE80211_DEBUG
+#define ieee80211_msg(_vap, _m) ((_vap)->iv_debug & (_m))
+#define IEEE80211_DPRINTF(_vap, _m, _fmt, ...) do { \
+ if (ieee80211_msg(_vap, _m)) \
+ ieee80211_note(_vap, _fmt, __VA_ARGS__); \
+} while (0)
+#define IEEE80211_NOTE(_vap, _m, _ni, _fmt, ...) do { \
+ if (ieee80211_msg(_vap, _m)) \
+ ieee80211_note_mac(_vap, (_ni)->ni_macaddr, _fmt, __VA_ARGS__);\
+} while (0)
+#define IEEE80211_NOTE_MAC(_vap, _m, _mac, _fmt, ...) do { \
+ if (ieee80211_msg(_vap, _m)) \
+ ieee80211_note_mac(_vap, _mac, _fmt, __VA_ARGS__); \
+} while (0)
+#define IEEE80211_NOTE_FRAME(_vap, _m, _wh, _fmt, ...) do { \
+ if (ieee80211_msg(_vap, _m)) \
+ ieee80211_note_frame(_vap, _wh, _fmt, __VA_ARGS__); \
+} while (0)
+void ieee80211_note(const struct ieee80211vap *, const char *, ...);
+void ieee80211_note_mac(const struct ieee80211vap *,
+ const uint8_t mac[IEEE80211_ADDR_LEN], const char *, ...);
+void ieee80211_note_frame(const struct ieee80211vap *,
+ const struct ieee80211_frame *, const char *, ...);
+#define ieee80211_msg_debug(_vap) \
+ ((_vap)->iv_debug & IEEE80211_MSG_DEBUG)
+#define ieee80211_msg_dumppkts(_vap) \
+ ((_vap)->iv_debug & IEEE80211_MSG_DUMPPKTS)
+#define ieee80211_msg_input(_vap) \
+ ((_vap)->iv_debug & IEEE80211_MSG_INPUT)
+#define ieee80211_msg_radius(_vap) \
+ ((_vap)->iv_debug & IEEE80211_MSG_RADIUS)
+#define ieee80211_msg_dumpradius(_vap) \
+ ((_vap)->iv_debug & IEEE80211_MSG_RADDUMP)
+#define ieee80211_msg_dumpradkeys(_vap) \
+ ((_vap)->iv_debug & IEEE80211_MSG_RADKEYS)
+#define ieee80211_msg_scan(_vap) \
+ ((_vap)->iv_debug & IEEE80211_MSG_SCAN)
+#define ieee80211_msg_assoc(_vap) \
+ ((_vap)->iv_debug & IEEE80211_MSG_ASSOC)
+
+/*
+ * Emit a debug message about discarding a frame or information
+ * element. One format is for extracting the mac address from
+ * the frame header; the other is for when a header is not
+ * available or otherwise appropriate.
+ */
+#define IEEE80211_DISCARD(_vap, _m, _wh, _type, _fmt, ...) do { \
+ if ((_vap)->iv_debug & (_m)) \
+ ieee80211_discard_frame(_vap, _wh, _type, _fmt, __VA_ARGS__);\
+} while (0)
+#define IEEE80211_DISCARD_IE(_vap, _m, _wh, _type, _fmt, ...) do { \
+ if ((_vap)->iv_debug & (_m)) \
+ ieee80211_discard_ie(_vap, _wh, _type, _fmt, __VA_ARGS__);\
+} while (0)
+#define IEEE80211_DISCARD_MAC(_vap, _m, _mac, _type, _fmt, ...) do { \
+ if ((_vap)->iv_debug & (_m)) \
+ ieee80211_discard_mac(_vap, _mac, _type, _fmt, __VA_ARGS__);\
+} while (0)
+
+void ieee80211_discard_frame(const struct ieee80211vap *,
+ const struct ieee80211_frame *, const char *type, const char *fmt, ...);
+void ieee80211_discard_ie(const struct ieee80211vap *,
+ const struct ieee80211_frame *, const char *type, const char *fmt, ...);
+void ieee80211_discard_mac(const struct ieee80211vap *,
+ const uint8_t mac[IEEE80211_ADDR_LEN], const char *type,
+ const char *fmt, ...);
+#else
+#define IEEE80211_DPRINTF(_vap, _m, _fmt, ...)
+#define IEEE80211_NOTE(_vap, _m, _ni, _fmt, ...)
+#define IEEE80211_NOTE_FRAME(_vap, _m, _wh, _fmt, ...)
+#define IEEE80211_NOTE_MAC(_vap, _m, _mac, _fmt, ...)
+#define ieee80211_msg_dumppkts(_vap) 0
+#define ieee80211_msg(_vap, _m) 0
+
+#define IEEE80211_DISCARD(_vap, _m, _wh, _type, _fmt, ...)
+#define IEEE80211_DISCARD_IE(_vap, _m, _wh, _type, _fmt, ...)
+#define IEEE80211_DISCARD_MAC(_vap, _m, _mac, _type, _fmt, ...)
+#endif
+
+#endif /* _NET80211_IEEE80211_VAR_HH_ */
diff --git a/rtems/freebsd/net80211/ieee80211_wds.c b/rtems/freebsd/net80211/ieee80211_wds.c
new file mode 100644
index 00000000..126c9f3a
--- /dev/null
+++ b/rtems/freebsd/net80211/ieee80211_wds.c
@@ -0,0 +1,789 @@
+#include <rtems/freebsd/machine/rtems-bsd-config.h>
+
+/*-
+ * Copyright (c) 2007-2008 Sam Leffler, Errno Consulting
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <rtems/freebsd/sys/cdefs.h>
+#ifdef __FreeBSD__
+__FBSDID("$FreeBSD$");
+#endif
+
+/*
+ * IEEE 802.11 WDS mode support.
+ */
+#include <rtems/freebsd/local/opt_inet.h>
+#include <rtems/freebsd/local/opt_wlan.h>
+
+#include <rtems/freebsd/sys/param.h>
+#include <rtems/freebsd/sys/systm.h>
+#include <rtems/freebsd/sys/mbuf.h>
+#include <rtems/freebsd/sys/malloc.h>
+#include <rtems/freebsd/sys/kernel.h>
+
+#include <rtems/freebsd/sys/socket.h>
+#include <rtems/freebsd/sys/sockio.h>
+#include <rtems/freebsd/sys/endian.h>
+#include <rtems/freebsd/sys/errno.h>
+#include <rtems/freebsd/sys/proc.h>
+#include <rtems/freebsd/sys/sysctl.h>
+
+#include <rtems/freebsd/net/if.h>
+#include <rtems/freebsd/net/if_media.h>
+#include <rtems/freebsd/net/if_llc.h>
+#include <rtems/freebsd/net/ethernet.h>
+
+#include <rtems/freebsd/net/bpf.h>
+
+#include <rtems/freebsd/net80211/ieee80211_var.h>
+#include <rtems/freebsd/net80211/ieee80211_wds.h>
+#include <rtems/freebsd/net80211/ieee80211_input.h>
+#ifdef IEEE80211_SUPPORT_SUPERG
+#include <rtems/freebsd/net80211/ieee80211_superg.h>
+#endif
+
+static void wds_vattach(struct ieee80211vap *);
+static int wds_newstate(struct ieee80211vap *, enum ieee80211_state, int);
+static int wds_input(struct ieee80211_node *ni, struct mbuf *m, int, int);
+static void wds_recv_mgmt(struct ieee80211_node *, struct mbuf *,
+ int subtype, int, int);
+
+void
+ieee80211_wds_attach(struct ieee80211com *ic)
+{
+ ic->ic_vattach[IEEE80211_M_WDS] = wds_vattach;
+}
+
+void
+ieee80211_wds_detach(struct ieee80211com *ic)
+{
+}
+
+static void
+wds_vdetach(struct ieee80211vap *vap)
+{
+ if (vap->iv_bss != NULL) {
+ /* XXX locking? */
+ if (vap->iv_bss->ni_wdsvap == vap)
+ vap->iv_bss->ni_wdsvap = NULL;
+ }
+}
+
+static void
+wds_vattach(struct ieee80211vap *vap)
+{
+ vap->iv_newstate = wds_newstate;
+ vap->iv_input = wds_input;
+ vap->iv_recv_mgmt = wds_recv_mgmt;
+ vap->iv_opdetach = wds_vdetach;
+}
+
+static void
+wds_flush(struct ieee80211_node *ni)
+{
+ struct ieee80211com *ic = ni->ni_ic;
+ struct mbuf *m, *next;
+ int8_t rssi, nf;
+
+ m = ieee80211_ageq_remove(&ic->ic_stageq,
+ (void *)(uintptr_t) ieee80211_mac_hash(ic, ni->ni_macaddr));
+ if (m == NULL)
+ return;
+
+ IEEE80211_NOTE(ni->ni_vap, IEEE80211_MSG_WDS, ni,
+ "%s", "flush wds queue");
+ ic->ic_node_getsignal(ni, &rssi, &nf);
+ for (; m != NULL; m = next) {
+ next = m->m_nextpkt;
+ m->m_nextpkt = NULL;
+ ieee80211_input(ni, m, rssi, nf);
+ }
+}
+
+static int
+ieee80211_create_wds(struct ieee80211vap *vap, struct ieee80211_channel *chan)
+{
+ struct ieee80211com *ic = vap->iv_ic;
+ struct ieee80211_node_table *nt = &ic->ic_sta;
+ struct ieee80211_node *ni, *obss;
+
+ IEEE80211_DPRINTF(vap, IEEE80211_MSG_WDS,
+ "%s: creating link to %s on channel %u\n", __func__,
+ ether_sprintf(vap->iv_des_bssid), ieee80211_chan2ieee(ic, chan));
+
+ /* NB: vap create must specify the bssid for the link */
+ KASSERT(vap->iv_flags & IEEE80211_F_DESBSSID, ("no bssid"));
+ /* NB: we should only be called on RUN transition */
+ KASSERT(vap->iv_state == IEEE80211_S_RUN, ("!RUN state"));
+
+ if ((vap->iv_flags_ext & IEEE80211_FEXT_WDSLEGACY) == 0) {
+ /*
+ * Dynamic/non-legacy WDS. Reference the associated
+ * station specified by the desired bssid setup at vap
+ * create. Point ni_wdsvap at the WDS vap so 4-address
+ * frames received through the associated AP vap will
+ * be dispatched upward (e.g. to a bridge) as though
+ * they arrived on the WDS vap.
+ */
+ IEEE80211_NODE_LOCK(nt);
+ obss = NULL;
+ ni = ieee80211_find_node_locked(&ic->ic_sta, vap->iv_des_bssid);
+ if (ni == NULL) {
+ /*
+ * Node went away before we could hookup. This
+ * should be ok; no traffic will flow and a leave
+ * event will be dispatched that should cause
+ * the vap to be destroyed.
+ */
+ IEEE80211_DPRINTF(vap, IEEE80211_MSG_WDS,
+ "%s: station %s went away\n",
+ __func__, ether_sprintf(vap->iv_des_bssid));
+ /* XXX stat? */
+ } else if (ni->ni_wdsvap != NULL) {
+ /*
+ * Node already setup with a WDS vap; we cannot
+ * allow multiple references so disallow. If
+ * ni_wdsvap points at us that's ok; we should
+ * do nothing anyway.
+ */
+ /* XXX printf instead? */
+ IEEE80211_DPRINTF(vap, IEEE80211_MSG_WDS,
+ "%s: station %s in use with %s\n",
+ __func__, ether_sprintf(vap->iv_des_bssid),
+ ni->ni_wdsvap->iv_ifp->if_xname);
+ /* XXX stat? */
+ } else {
+ /*
+ * Committed to new node, setup state.
+ */
+ obss = vap->iv_bss;
+ vap->iv_bss = ni;
+ ni->ni_wdsvap = vap;
+ }
+ IEEE80211_NODE_UNLOCK(nt);
+ if (obss != NULL) {
+ /* NB: deferred to avoid recursive lock */
+ ieee80211_free_node(obss);
+ }
+ } else {
+ /*
+ * Legacy WDS vap setup.
+ */
+ /*
+ * The far end does not associate so we just create
+ * create a new node and install it as the vap's
+ * bss node. We must simulate an association and
+ * authorize the port for traffic to flow.
+ * XXX check if node already in sta table?
+ */
+ ni = ieee80211_node_create_wds(vap, vap->iv_des_bssid, chan);
+ if (ni != NULL) {
+ obss = vap->iv_bss;
+ vap->iv_bss = ieee80211_ref_node(ni);
+ ni->ni_flags |= IEEE80211_NODE_AREF;
+ if (obss != NULL)
+ ieee80211_free_node(obss);
+ /* give driver a chance to setup state like ni_txrate */
+ if (ic->ic_newassoc != NULL)
+ ic->ic_newassoc(ni, 1);
+ /* tell the authenticator about new station */
+ if (vap->iv_auth->ia_node_join != NULL)
+ vap->iv_auth->ia_node_join(ni);
+ if (ni->ni_authmode != IEEE80211_AUTH_8021X)
+ ieee80211_node_authorize(ni);
+
+ ieee80211_notify_node_join(ni, 1 /*newassoc*/);
+ /* XXX inject l2uf frame */
+ }
+ }
+
+ /*
+ * Flush any pending frames now that were setup.
+ */
+ if (ni != NULL)
+ wds_flush(ni);
+ return (ni == NULL ? ENOENT : 0);
+}
+
+/*
+ * Propagate multicast frames of an ap vap to all DWDS links.
+ * The caller is assumed to have verified this frame is multicast.
+ */
+void
+ieee80211_dwds_mcast(struct ieee80211vap *vap0, struct mbuf *m)
+{
+ struct ieee80211com *ic = vap0->iv_ic;
+ struct ifnet *parent = ic->ic_ifp;
+ const struct ether_header *eh = mtod(m, const struct ether_header *);
+ struct ieee80211_node *ni;
+ struct ieee80211vap *vap;
+ struct ifnet *ifp;
+ struct mbuf *mcopy;
+ int err;
+
+ KASSERT(ETHER_IS_MULTICAST(eh->ether_dhost),
+ ("%s not mcast", ether_sprintf(eh->ether_dhost)));
+
+ /* XXX locking */
+ TAILQ_FOREACH(vap, &ic->ic_vaps, iv_next) {
+ /* only DWDS vaps are interesting */
+ if (vap->iv_opmode != IEEE80211_M_WDS ||
+ (vap->iv_flags_ext & IEEE80211_FEXT_WDSLEGACY))
+ continue;
+ /* if it came in this interface, don't send it back out */
+ ifp = vap->iv_ifp;
+ if (ifp == m->m_pkthdr.rcvif)
+ continue;
+ /*
+ * Duplicate the frame and send it.
+ */
+ mcopy = m_copypacket(m, M_DONTWAIT);
+ if (mcopy == NULL) {
+ ifp->if_oerrors++;
+ /* XXX stat + msg */
+ continue;
+ }
+ ni = ieee80211_find_txnode(vap, eh->ether_dhost);
+ if (ni == NULL) {
+ /* NB: ieee80211_find_txnode does stat+msg */
+ ifp->if_oerrors++;
+ m_freem(mcopy);
+ continue;
+ }
+ /* calculate priority so drivers can find the tx queue */
+ if (ieee80211_classify(ni, mcopy)) {
+ IEEE80211_DISCARD_MAC(vap,
+ IEEE80211_MSG_OUTPUT | IEEE80211_MSG_WDS,
+ eh->ether_dhost, NULL,
+ "%s", "classification failure");
+ vap->iv_stats.is_tx_classify++;
+ ifp->if_oerrors++;
+ m_freem(mcopy);
+ ieee80211_free_node(ni);
+ continue;
+ }
+
+ BPF_MTAP(ifp, m); /* 802.3 tx */
+
+ /*
+ * Encapsulate the packet in prep for transmission.
+ */
+ mcopy = ieee80211_encap(vap, ni, mcopy);
+ if (mcopy == NULL) {
+ /* NB: stat+msg handled in ieee80211_encap */
+ ieee80211_free_node(ni);
+ continue;
+ }
+ mcopy->m_flags |= M_MCAST;
+ mcopy->m_pkthdr.rcvif = (void *) ni;
+
+ err = parent->if_transmit(parent, mcopy);
+ if (err) {
+ /* NB: IFQ_HANDOFF reclaims mbuf */
+ ifp->if_oerrors++;
+ ieee80211_free_node(ni);
+ } else
+ ifp->if_opackets++;
+ }
+}
+
+/*
+ * Handle DWDS discovery on receipt of a 4-address frame in
+ * ap mode. Queue the frame and post an event for someone
+ * to plumb the necessary WDS vap for this station. Frames
+ * received prior to the vap set running will then be reprocessed
+ * as if they were just received.
+ */
+void
+ieee80211_dwds_discover(struct ieee80211_node *ni, struct mbuf *m)
+{
+ struct ieee80211com *ic = ni->ni_ic;
+
+ /*
+ * Save the frame with an aging interval 4 times
+ * the listen interval specified by the station.
+ * Frames that sit around too long are reclaimed
+ * using this information.
+ * XXX handle overflow?
+ * XXX per/vap beacon interval?
+ */
+ m->m_pkthdr.rcvif = (void *)(uintptr_t)
+ ieee80211_mac_hash(ic, ni->ni_macaddr);
+ (void) ieee80211_ageq_append(&ic->ic_stageq, m,
+ ((ni->ni_intval * ic->ic_lintval) << 2) / 1024);
+ ieee80211_notify_wds_discover(ni);
+}
+
+/*
+ * IEEE80211_M_WDS vap state machine handler.
+ */
+static int
+wds_newstate(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg)
+{
+ struct ieee80211com *ic = vap->iv_ic;
+ struct ieee80211_node *ni;
+ enum ieee80211_state ostate;
+ int error;
+
+ IEEE80211_LOCK_ASSERT(ic);
+
+ ostate = vap->iv_state;
+ IEEE80211_DPRINTF(vap, IEEE80211_MSG_STATE, "%s: %s -> %s\n", __func__,
+ ieee80211_state_name[ostate], ieee80211_state_name[nstate]);
+ vap->iv_state = nstate; /* state transition */
+ callout_stop(&vap->iv_mgtsend); /* XXX callout_drain */
+ if (ostate != IEEE80211_S_SCAN)
+ ieee80211_cancel_scan(vap); /* background scan */
+ ni = vap->iv_bss; /* NB: no reference held */
+ error = 0;
+ switch (nstate) {
+ case IEEE80211_S_INIT:
+ switch (ostate) {
+ case IEEE80211_S_SCAN:
+ ieee80211_cancel_scan(vap);
+ break;
+ default:
+ break;
+ }
+ if (ostate != IEEE80211_S_INIT) {
+ /* NB: optimize INIT -> INIT case */
+ ieee80211_reset_bss(vap);
+ }
+ break;
+ case IEEE80211_S_SCAN:
+ switch (ostate) {
+ case IEEE80211_S_INIT:
+ ieee80211_check_scan_current(vap);
+ break;
+ default:
+ break;
+ }
+ break;
+ case IEEE80211_S_RUN:
+ if (ostate == IEEE80211_S_INIT) {
+ /*
+ * Already have a channel; bypass the scan
+ * and startup immediately.
+ */
+ error = ieee80211_create_wds(vap, ic->ic_curchan);
+ }
+ break;
+ default:
+ break;
+ }
+ return error;
+}
+
+/*
+ * Process a received frame. The node associated with the sender
+ * should be supplied. If nothing was found in the node table then
+ * the caller is assumed to supply a reference to iv_bss instead.
+ * The RSSI and a timestamp are also supplied. The RSSI data is used
+ * during AP scanning to select a AP to associate with; it can have
+ * any units so long as values have consistent units and higher values
+ * mean ``better signal''. The receive timestamp is currently not used
+ * by the 802.11 layer.
+ */
+static int
+wds_input(struct ieee80211_node *ni, struct mbuf *m, int rssi, int nf)
+{
+#define SEQ_LEQ(a,b) ((int)((a)-(b)) <= 0)
+#define HAS_SEQ(type) ((type & 0x4) == 0)
+ struct ieee80211vap *vap = ni->ni_vap;
+ struct ieee80211com *ic = ni->ni_ic;
+ struct ifnet *ifp = vap->iv_ifp;
+ struct ieee80211_frame *wh;
+ struct ieee80211_key *key;
+ struct ether_header *eh;
+ int hdrspace, need_tap = 1; /* mbuf need to be tapped. */
+ uint8_t dir, type, subtype, qos;
+ uint16_t rxseq;
+
+ if (m->m_flags & M_AMPDU_MPDU) {
+ /*
+ * Fastpath for A-MPDU reorder q resubmission. Frames
+ * w/ M_AMPDU_MPDU marked have already passed through
+ * here but were received out of order and been held on
+ * the reorder queue. When resubmitted they are marked
+ * with the M_AMPDU_MPDU flag and we can bypass most of
+ * the normal processing.
+ */
+ wh = mtod(m, struct ieee80211_frame *);
+ type = IEEE80211_FC0_TYPE_DATA;
+ dir = wh->i_fc[1] & IEEE80211_FC1_DIR_MASK;
+ subtype = IEEE80211_FC0_SUBTYPE_QOS;
+ hdrspace = ieee80211_hdrspace(ic, wh); /* XXX optimize? */
+ goto resubmit_ampdu;
+ }
+
+ KASSERT(ni != NULL, ("null node"));
+
+ type = -1; /* undefined */
+
+ if (m->m_pkthdr.len < sizeof(struct ieee80211_frame_min)) {
+ IEEE80211_DISCARD_MAC(vap, IEEE80211_MSG_ANY,
+ ni->ni_macaddr, NULL,
+ "too short (1): len %u", m->m_pkthdr.len);
+ vap->iv_stats.is_rx_tooshort++;
+ goto out;
+ }
+ /*
+ * Bit of a cheat here, we use a pointer for a 3-address
+ * frame format but don't reference fields past outside
+ * ieee80211_frame_min w/o first validating the data is
+ * present.
+ */
+ wh = mtod(m, struct ieee80211_frame *);
+
+ if ((wh->i_fc[0] & IEEE80211_FC0_VERSION_MASK) !=
+ IEEE80211_FC0_VERSION_0) {
+ IEEE80211_DISCARD_MAC(vap, IEEE80211_MSG_ANY,
+ ni->ni_macaddr, NULL, "wrong version, fc %02x:%02x",
+ wh->i_fc[0], wh->i_fc[1]);
+ vap->iv_stats.is_rx_badversion++;
+ goto err;
+ }
+
+ dir = wh->i_fc[1] & IEEE80211_FC1_DIR_MASK;
+ type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
+ subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
+
+ /* NB: WDS vap's do not scan */
+ if (m->m_pkthdr.len < sizeof(struct ieee80211_frame_addr4)) {
+ IEEE80211_DISCARD_MAC(vap,
+ IEEE80211_MSG_ANY, ni->ni_macaddr, NULL,
+ "too short (3): len %u", m->m_pkthdr.len);
+ vap->iv_stats.is_rx_tooshort++;
+ goto out;
+ }
+ /* NB: the TA is implicitly verified by finding the wds peer node */
+ if (!IEEE80211_ADDR_EQ(wh->i_addr1, vap->iv_myaddr) &&
+ !IEEE80211_ADDR_EQ(wh->i_addr1, ifp->if_broadcastaddr)) {
+ /* not interested in */
+ IEEE80211_DISCARD_MAC(vap, IEEE80211_MSG_INPUT,
+ wh->i_addr1, NULL, "%s", "not to bss");
+ vap->iv_stats.is_rx_wrongbss++;
+ goto out;
+ }
+ IEEE80211_RSSI_LPF(ni->ni_avgrssi, rssi);
+ ni->ni_noise = nf;
+ if (HAS_SEQ(type)) {
+ uint8_t tid = ieee80211_gettid(wh);
+ if (IEEE80211_QOS_HAS_SEQ(wh) &&
+ TID_TO_WME_AC(tid) >= WME_AC_VI)
+ ic->ic_wme.wme_hipri_traffic++;
+ rxseq = le16toh(*(uint16_t *)wh->i_seq);
+ if ((ni->ni_flags & IEEE80211_NODE_HT) == 0 &&
+ (wh->i_fc[1] & IEEE80211_FC1_RETRY) &&
+ SEQ_LEQ(rxseq, ni->ni_rxseqs[tid])) {
+ /* duplicate, discard */
+ IEEE80211_DISCARD_MAC(vap, IEEE80211_MSG_INPUT,
+ wh->i_addr1, "duplicate",
+ "seqno <%u,%u> fragno <%u,%u> tid %u",
+ rxseq >> IEEE80211_SEQ_SEQ_SHIFT,
+ ni->ni_rxseqs[tid] >> IEEE80211_SEQ_SEQ_SHIFT,
+ rxseq & IEEE80211_SEQ_FRAG_MASK,
+ ni->ni_rxseqs[tid] & IEEE80211_SEQ_FRAG_MASK,
+ tid);
+ vap->iv_stats.is_rx_dup++;
+ IEEE80211_NODE_STAT(ni, rx_dup);
+ goto out;
+ }
+ ni->ni_rxseqs[tid] = rxseq;
+ }
+ switch (type) {
+ case IEEE80211_FC0_TYPE_DATA:
+ hdrspace = ieee80211_hdrspace(ic, wh);
+ if (m->m_len < hdrspace &&
+ (m = m_pullup(m, hdrspace)) == NULL) {
+ IEEE80211_DISCARD_MAC(vap, IEEE80211_MSG_ANY,
+ ni->ni_macaddr, NULL,
+ "data too short: expecting %u", hdrspace);
+ vap->iv_stats.is_rx_tooshort++;
+ goto out; /* XXX */
+ }
+ if (dir != IEEE80211_FC1_DIR_DSTODS) {
+ IEEE80211_DISCARD(vap, IEEE80211_MSG_INPUT,
+ wh, "data", "incorrect dir 0x%x", dir);
+ vap->iv_stats.is_rx_wrongdir++;
+ goto out;
+ }
+ /*
+ * Only legacy WDS traffic should take this path.
+ */
+ if ((vap->iv_flags_ext & IEEE80211_FEXT_WDSLEGACY) == 0) {
+ IEEE80211_DISCARD(vap, IEEE80211_MSG_INPUT,
+ wh, "data", "%s", "not legacy wds");
+ vap->iv_stats.is_rx_wrongdir++;/*XXX*/
+ goto out;
+ }
+ if (!IEEE80211_IS_MULTICAST(wh->i_addr1))
+ ni->ni_inact = ni->ni_inact_reload;
+ /*
+ * Handle A-MPDU re-ordering. If the frame is to be
+ * processed directly then ieee80211_ampdu_reorder
+ * will return 0; otherwise it has consumed the mbuf
+ * and we should do nothing more with it.
+ */
+ if ((m->m_flags & M_AMPDU) &&
+ ieee80211_ampdu_reorder(ni, m) != 0) {
+ m = NULL;
+ goto out;
+ }
+ resubmit_ampdu:
+
+ /*
+ * Handle privacy requirements. Note that we
+ * must not be preempted from here until after
+ * we (potentially) call ieee80211_crypto_demic;
+ * otherwise we may violate assumptions in the
+ * crypto cipher modules used to do delayed update
+ * of replay sequence numbers.
+ */
+ if (wh->i_fc[1] & IEEE80211_FC1_WEP) {
+ if ((vap->iv_flags & IEEE80211_F_PRIVACY) == 0) {
+ /*
+ * Discard encrypted frames when privacy is off.
+ */
+ IEEE80211_DISCARD(vap, IEEE80211_MSG_INPUT,
+ wh, "WEP", "%s", "PRIVACY off");
+ vap->iv_stats.is_rx_noprivacy++;
+ IEEE80211_NODE_STAT(ni, rx_noprivacy);
+ goto out;
+ }
+ key = ieee80211_crypto_decap(ni, m, hdrspace);
+ if (key == NULL) {
+ /* NB: stats+msgs handled in crypto_decap */
+ IEEE80211_NODE_STAT(ni, rx_wepfail);
+ goto out;
+ }
+ wh = mtod(m, struct ieee80211_frame *);
+ wh->i_fc[1] &= ~IEEE80211_FC1_WEP;
+ } else {
+ /* XXX M_WEP and IEEE80211_F_PRIVACY */
+ key = NULL;
+ }
+
+ /*
+ * Save QoS bits for use below--before we strip the header.
+ */
+ if (subtype == IEEE80211_FC0_SUBTYPE_QOS) {
+ qos = (dir == IEEE80211_FC1_DIR_DSTODS) ?
+ ((struct ieee80211_qosframe_addr4 *)wh)->i_qos[0] :
+ ((struct ieee80211_qosframe *)wh)->i_qos[0];
+ } else
+ qos = 0;
+
+ /*
+ * Next up, any fragmentation.
+ */
+ if (!IEEE80211_IS_MULTICAST(wh->i_addr1)) {
+ m = ieee80211_defrag(ni, m, hdrspace);
+ if (m == NULL) {
+ /* Fragment dropped or frame not complete yet */
+ goto out;
+ }
+ }
+ wh = NULL; /* no longer valid, catch any uses */
+
+ /*
+ * Next strip any MSDU crypto bits.
+ */
+ if (key != NULL && !ieee80211_crypto_demic(vap, key, m, 0)) {
+ IEEE80211_DISCARD_MAC(vap, IEEE80211_MSG_INPUT,
+ ni->ni_macaddr, "data", "%s", "demic error");
+ vap->iv_stats.is_rx_demicfail++;
+ IEEE80211_NODE_STAT(ni, rx_demicfail);
+ goto out;
+ }
+
+ /* copy to listener after decrypt */
+ if (ieee80211_radiotap_active_vap(vap))
+ ieee80211_radiotap_rx(vap, m);
+ need_tap = 0;
+
+ /*
+ * Finally, strip the 802.11 header.
+ */
+ m = ieee80211_decap(vap, m, hdrspace);
+ if (m == NULL) {
+ /* XXX mask bit to check for both */
+ /* don't count Null data frames as errors */
+ if (subtype == IEEE80211_FC0_SUBTYPE_NODATA ||
+ subtype == IEEE80211_FC0_SUBTYPE_QOS_NULL)
+ goto out;
+ IEEE80211_DISCARD_MAC(vap, IEEE80211_MSG_INPUT,
+ ni->ni_macaddr, "data", "%s", "decap error");
+ vap->iv_stats.is_rx_decap++;
+ IEEE80211_NODE_STAT(ni, rx_decap);
+ goto err;
+ }
+ eh = mtod(m, struct ether_header *);
+ if (!ieee80211_node_is_authorized(ni)) {
+ /*
+ * Deny any non-PAE frames received prior to
+ * authorization. For open/shared-key
+ * authentication the port is mark authorized
+ * after authentication completes. For 802.1x
+ * the port is not marked authorized by the
+ * authenticator until the handshake has completed.
+ */
+ if (eh->ether_type != htons(ETHERTYPE_PAE)) {
+ IEEE80211_DISCARD_MAC(vap, IEEE80211_MSG_INPUT,
+ eh->ether_shost, "data",
+ "unauthorized port: ether type 0x%x len %u",
+ eh->ether_type, m->m_pkthdr.len);
+ vap->iv_stats.is_rx_unauth++;
+ IEEE80211_NODE_STAT(ni, rx_unauth);
+ goto err;
+ }
+ } else {
+ /*
+ * When denying unencrypted frames, discard
+ * any non-PAE frames received without encryption.
+ */
+ if ((vap->iv_flags & IEEE80211_F_DROPUNENC) &&
+ (key == NULL && (m->m_flags & M_WEP) == 0) &&
+ eh->ether_type != htons(ETHERTYPE_PAE)) {
+ /*
+ * Drop unencrypted frames.
+ */
+ vap->iv_stats.is_rx_unencrypted++;
+ IEEE80211_NODE_STAT(ni, rx_unencrypted);
+ goto out;
+ }
+ }
+ /* XXX require HT? */
+ if (qos & IEEE80211_QOS_AMSDU) {
+ m = ieee80211_decap_amsdu(ni, m);
+ if (m == NULL)
+ return IEEE80211_FC0_TYPE_DATA;
+ } else {
+#ifdef IEEE80211_SUPPORT_SUPERG
+ m = ieee80211_decap_fastframe(vap, ni, m);
+ if (m == NULL)
+ return IEEE80211_FC0_TYPE_DATA;
+#endif
+ }
+ ieee80211_deliver_data(vap, ni, m);
+ return IEEE80211_FC0_TYPE_DATA;
+
+ case IEEE80211_FC0_TYPE_MGT:
+ vap->iv_stats.is_rx_mgmt++;
+ IEEE80211_NODE_STAT(ni, rx_mgmt);
+ if (dir != IEEE80211_FC1_DIR_NODS) {
+ IEEE80211_DISCARD(vap, IEEE80211_MSG_INPUT,
+ wh, "data", "incorrect dir 0x%x", dir);
+ vap->iv_stats.is_rx_wrongdir++;
+ goto err;
+ }
+ if (m->m_pkthdr.len < sizeof(struct ieee80211_frame)) {
+ IEEE80211_DISCARD_MAC(vap, IEEE80211_MSG_ANY,
+ ni->ni_macaddr, "mgt", "too short: len %u",
+ m->m_pkthdr.len);
+ vap->iv_stats.is_rx_tooshort++;
+ goto out;
+ }
+#ifdef IEEE80211_DEBUG
+ if (ieee80211_msg_debug(vap) || ieee80211_msg_dumppkts(vap)) {
+ if_printf(ifp, "received %s from %s rssi %d\n",
+ ieee80211_mgt_subtype_name[subtype >>
+ IEEE80211_FC0_SUBTYPE_SHIFT],
+ ether_sprintf(wh->i_addr2), rssi);
+ }
+#endif
+ if (wh->i_fc[1] & IEEE80211_FC1_WEP) {
+ IEEE80211_DISCARD(vap, IEEE80211_MSG_INPUT,
+ wh, NULL, "%s", "WEP set but not permitted");
+ vap->iv_stats.is_rx_mgtdiscard++; /* XXX */
+ goto out;
+ }
+ vap->iv_recv_mgmt(ni, m, subtype, rssi, nf);
+ goto out;
+
+ case IEEE80211_FC0_TYPE_CTL:
+ vap->iv_stats.is_rx_ctl++;
+ IEEE80211_NODE_STAT(ni, rx_ctrl);
+ goto out;
+
+ default:
+ IEEE80211_DISCARD(vap, IEEE80211_MSG_ANY,
+ wh, "bad", "frame type 0x%x", type);
+ /* should not come here */
+ break;
+ }
+err:
+ ifp->if_ierrors++;
+out:
+ if (m != NULL) {
+ if (need_tap && ieee80211_radiotap_active_vap(vap))
+ ieee80211_radiotap_rx(vap, m);
+ m_freem(m);
+ }
+ return type;
+#undef SEQ_LEQ
+}
+
+static void
+wds_recv_mgmt(struct ieee80211_node *ni, struct mbuf *m0,
+ int subtype, int rssi, int nf)
+{
+ struct ieee80211vap *vap = ni->ni_vap;
+ struct ieee80211com *ic = ni->ni_ic;
+ struct ieee80211_frame *wh;
+ u_int8_t *frm, *efrm;
+
+ wh = mtod(m0, struct ieee80211_frame *);
+ frm = (u_int8_t *)&wh[1];
+ efrm = mtod(m0, u_int8_t *) + m0->m_len;
+ switch (subtype) {
+ case IEEE80211_FC0_SUBTYPE_DEAUTH:
+ case IEEE80211_FC0_SUBTYPE_PROBE_RESP:
+ case IEEE80211_FC0_SUBTYPE_BEACON:
+ case IEEE80211_FC0_SUBTYPE_PROBE_REQ:
+ case IEEE80211_FC0_SUBTYPE_AUTH:
+ case IEEE80211_FC0_SUBTYPE_ASSOC_REQ:
+ case IEEE80211_FC0_SUBTYPE_REASSOC_REQ:
+ case IEEE80211_FC0_SUBTYPE_ASSOC_RESP:
+ case IEEE80211_FC0_SUBTYPE_REASSOC_RESP:
+ case IEEE80211_FC0_SUBTYPE_DISASSOC:
+ vap->iv_stats.is_rx_mgtdiscard++;
+ break;
+ case IEEE80211_FC0_SUBTYPE_ACTION:
+ if (vap->iv_state != IEEE80211_S_RUN ||
+ IEEE80211_IS_MULTICAST(wh->i_addr1)) {
+ vap->iv_stats.is_rx_mgtdiscard++;
+ break;
+ }
+ ni->ni_inact = ni->ni_inact_reload;
+ if (ieee80211_parse_action(ni, m0) == 0)
+ ic->ic_recv_action(ni, wh, frm, efrm);
+ break;
+ default:
+ IEEE80211_DISCARD(vap, IEEE80211_MSG_ANY,
+ wh, "mgt", "subtype 0x%x not handled", subtype);
+ vap->iv_stats.is_rx_badsubtype++;
+ break;
+ }
+}
diff --git a/rtems/freebsd/net80211/ieee80211_wds.h b/rtems/freebsd/net80211/ieee80211_wds.h
new file mode 100644
index 00000000..200cba27
--- /dev/null
+++ b/rtems/freebsd/net80211/ieee80211_wds.h
@@ -0,0 +1,39 @@
+/*-
+ * Copyright (c) 2007-2008 Sam Leffler, Errno Consulting
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+#ifndef _NET80211_IEEE80211_WDS_HH_
+#define _NET80211_IEEE80211_WDS_HH_
+
+/*
+ * WDS implementation definitions.
+ */
+void ieee80211_wds_attach(struct ieee80211com *);
+void ieee80211_wds_detach(struct ieee80211com *);
+
+void ieee80211_dwds_mcast(struct ieee80211vap *, struct mbuf *);
+void ieee80211_dwds_discover(struct ieee80211_node *, struct mbuf *);
+int ieee80211_node_wdsq_age(struct ieee80211_node *);
+#endif /* !_NET80211_IEEE80211_WDS_HH_ */
diff --git a/rtems/freebsd/net80211/ieee80211_xauth.c b/rtems/freebsd/net80211/ieee80211_xauth.c
new file mode 100644
index 00000000..0f3d0ed1
--- /dev/null
+++ b/rtems/freebsd/net80211/ieee80211_xauth.c
@@ -0,0 +1,78 @@
+#include <rtems/freebsd/machine/rtems-bsd-config.h>
+
+/*-
+ * Copyright (c) 2004 Video54 Technologies, Inc.
+ * Copyright (c) 2004-2008 Sam Leffler, Errno Consulting
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <rtems/freebsd/sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+/*
+ * External authenticator placeholder module.
+ *
+ * This support is optional; it is only used when the 802.11 layer's
+ * authentication mode is set to use 802.1x or WPA is enabled separately
+ * (for WPA-PSK). If compiled as a module this code does not need
+ * to be present unless 802.1x/WPA is in use.
+ *
+ * The authenticator hooks into the 802.11 layer. At present we use none
+ * of the available callbacks--the user mode authenticator process works
+ * entirely from messages about stations joining and leaving.
+ */
+#include <rtems/freebsd/local/opt_wlan.h>
+
+#include <rtems/freebsd/sys/param.h>
+#include <rtems/freebsd/sys/kernel.h>
+#include <rtems/freebsd/sys/systm.h>
+#include <rtems/freebsd/sys/mbuf.h>
+#include <rtems/freebsd/sys/module.h>
+
+#include <rtems/freebsd/sys/socket.h>
+
+#include <rtems/freebsd/net/if.h>
+#include <rtems/freebsd/net/if_media.h>
+#include <rtems/freebsd/net/ethernet.h>
+#include <rtems/freebsd/net/route.h>
+
+#include <rtems/freebsd/net80211/ieee80211_var.h>
+
+/* XXX number of references from net80211 layer; needed for module code */
+static int nrefs = 0;
+
+/*
+ * One module handles everything for now. May want
+ * to split things up for embedded applications.
+ */
+static const struct ieee80211_authenticator xauth = {
+ .ia_name = "external",
+ .ia_attach = NULL,
+ .ia_detach = NULL,
+ .ia_node_join = NULL,
+ .ia_node_leave = NULL,
+};
+
+IEEE80211_AUTH_MODULE(xauth, 1);
+IEEE80211_AUTH_ALG(x8021x, IEEE80211_AUTH_8021X, xauth);
+IEEE80211_AUTH_ALG(wpa, IEEE80211_AUTH_WPA, xauth);
diff --git a/rtems/freebsd/netgraph/ng_ipfw.h b/rtems/freebsd/netgraph/ng_ipfw.h
new file mode 100644
index 00000000..936ffd88
--- /dev/null
+++ b/rtems/freebsd/netgraph/ng_ipfw.h
@@ -0,0 +1 @@
+/* EMPTY */
diff --git a/rtems/freebsd/netinet/accf_data.c b/rtems/freebsd/netinet/accf_data.c
new file mode 100644
index 00000000..6ba73d4f
--- /dev/null
+++ b/rtems/freebsd/netinet/accf_data.c
@@ -0,0 +1,68 @@
+#include <rtems/freebsd/machine/rtems-bsd-config.h>
+
+/*-
+ * Copyright (c) 2000 Alfred Perlstein <alfred@FreeBSD.org>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <rtems/freebsd/sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#define ACCEPT_FILTER_MOD
+
+#include <rtems/freebsd/sys/param.h>
+#include <rtems/freebsd/sys/kernel.h>
+#include <rtems/freebsd/sys/module.h>
+#include <rtems/freebsd/sys/sysctl.h>
+#include <rtems/freebsd/sys/signalvar.h>
+#include <rtems/freebsd/sys/socketvar.h>
+
+/* accept filter that holds a socket until data arrives */
+
+static int sohasdata(struct socket *so, void *arg, int waitflag);
+
+static struct accept_filter accf_data_filter = {
+ "dataready",
+ sohasdata,
+ NULL,
+ NULL
+};
+
+static moduledata_t accf_data_mod = {
+ "accf_data",
+ accept_filt_generic_mod_event,
+ &accf_data_filter
+};
+
+DECLARE_MODULE(accf_data, accf_data_mod, SI_SUB_DRIVERS, SI_ORDER_MIDDLE);
+
+static int
+sohasdata(struct socket *so, void *arg, int waitflag)
+{
+
+ if (!soreadable(so))
+ return (SU_OK);
+
+ return (SU_ISCONNECTED);
+}
diff --git a/rtems/freebsd/netinet/accf_dns.c b/rtems/freebsd/netinet/accf_dns.c
new file mode 100644
index 00000000..9278b883
--- /dev/null
+++ b/rtems/freebsd/netinet/accf_dns.c
@@ -0,0 +1,134 @@
+#include <rtems/freebsd/machine/rtems-bsd-config.h>
+
+/*
+ * Copyright (C) 2007 David Malone <dwmalone@FreeBSD.org>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#define ACCEPT_FILTER_MOD
+
+#include <rtems/freebsd/sys/param.h>
+#include <rtems/freebsd/sys/kernel.h>
+#include <rtems/freebsd/sys/mbuf.h>
+#include <rtems/freebsd/sys/module.h>
+#include <rtems/freebsd/sys/signalvar.h>
+#include <rtems/freebsd/sys/sysctl.h>
+#include <rtems/freebsd/sys/socketvar.h>
+
+/* check for full DNS request */
+static int sohasdns(struct socket *so, void *arg, int waitflag);
+
+struct packet {
+ struct mbuf *m; /* Current mbuf. */
+ struct mbuf *n; /* nextpkt mbuf. */
+ unsigned long moff; /* Offset of the beginning of m. */
+ unsigned long offset; /* Which offset we are working at. */
+ unsigned long len; /* The number of bytes we have to play with. */
+};
+
+#define DNS_OK 0
+#define DNS_WAIT -1
+#define DNS_RUN -2
+
+/* check we can skip over various parts of DNS request */
+static int skippacket(struct sockbuf *sb);
+
+static struct accept_filter accf_dns_filter = {
+ "dnsready",
+ sohasdns,
+ NULL,
+ NULL
+};
+
+static moduledata_t accf_dns_mod = {
+ "accf_dns",
+ accept_filt_generic_mod_event,
+ &accf_dns_filter
+};
+
+DECLARE_MODULE(accf_dns, accf_dns_mod, SI_SUB_DRIVERS, SI_ORDER_MIDDLE);
+
+static int
+sohasdns(struct socket *so, void *arg, int waitflag)
+{
+ struct sockbuf *sb = &so->so_rcv;
+
+ /* If the socket is full, we're ready. */
+ if (sb->sb_cc >= sb->sb_hiwat || sb->sb_mbcnt >= sb->sb_mbmax)
+ goto ready;
+
+ /* Check to see if we have a request. */
+ if (skippacket(sb) == DNS_WAIT)
+ return (SU_OK);
+
+ready:
+ return (SU_ISCONNECTED);
+}
+
+#define GET8(p, val) do { \
+ if (p->offset < p->moff) \
+ return DNS_RUN; \
+ while (p->offset >= p->moff + p->m->m_len) { \
+ p->moff += p->m->m_len; \
+ p->m = p->m->m_next; \
+ if (p->m == NULL) { \
+ p->m = p->n; \
+ p->n = p->m->m_nextpkt; \
+ } \
+ if (p->m == NULL) \
+ return DNS_WAIT; \
+ } \
+ val = *(mtod(p->m, unsigned char *) + (p->offset - p->moff)); \
+ p->offset++; \
+ } while (0)
+
+#define GET16(p, val) do { \
+ unsigned int v0, v1; \
+ GET8(p, v0); \
+ GET8(p, v1); \
+ val = v0 * 0x100 + v1; \
+ } while (0)
+
+static int
+skippacket(struct sockbuf *sb) {
+ unsigned long packlen;
+ struct packet q, *p = &q;
+
+ if (sb->sb_cc < 2)
+ return DNS_WAIT;
+
+ q.m = sb->sb_mb;
+ q.n = q.m->m_nextpkt;
+ q.moff = 0;
+ q.offset = 0;
+ q.len = sb->sb_cc;
+
+ GET16(p, packlen);
+ if (packlen + 2 > q.len)
+ return DNS_WAIT;
+
+ return DNS_OK;
+}
diff --git a/rtems/freebsd/netinet/accf_http.c b/rtems/freebsd/netinet/accf_http.c
new file mode 100644
index 00000000..121b34cd
--- /dev/null
+++ b/rtems/freebsd/netinet/accf_http.c
@@ -0,0 +1,351 @@
+#include <rtems/freebsd/machine/rtems-bsd-config.h>
+
+/*-
+ * Copyright (c) 2000 Paycounter, Inc.
+ * Author: Alfred Perlstein <alfred@paycounter.com>, <alfred@FreeBSD.org>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <rtems/freebsd/sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#define ACCEPT_FILTER_MOD
+
+#include <rtems/freebsd/sys/param.h>
+#include <rtems/freebsd/sys/kernel.h>
+#include <rtems/freebsd/sys/mbuf.h>
+#include <rtems/freebsd/sys/module.h>
+#include <rtems/freebsd/sys/signalvar.h>
+#include <rtems/freebsd/sys/sysctl.h>
+#include <rtems/freebsd/sys/socketvar.h>
+
+/* check for GET/HEAD */
+static int sohashttpget(struct socket *so, void *arg, int waitflag);
+/* check for HTTP/1.0 or HTTP/1.1 */
+static int soparsehttpvers(struct socket *so, void *arg, int waitflag);
+/* check for end of HTTP/1.x request */
+static int soishttpconnected(struct socket *so, void *arg, int waitflag);
+/* strcmp on an mbuf chain */
+static int mbufstrcmp(struct mbuf *m, struct mbuf *npkt, int offset, char *cmp);
+/* strncmp on an mbuf chain */
+static int mbufstrncmp(struct mbuf *m, struct mbuf *npkt, int offset,
+ int max, char *cmp);
+/* socketbuffer is full */
+static int sbfull(struct sockbuf *sb);
+
+static struct accept_filter accf_http_filter = {
+ "httpready",
+ sohashttpget,
+ NULL,
+ NULL
+};
+
+static moduledata_t accf_http_mod = {
+ "accf_http",
+ accept_filt_generic_mod_event,
+ &accf_http_filter
+};
+
+DECLARE_MODULE(accf_http, accf_http_mod, SI_SUB_DRIVERS, SI_ORDER_MIDDLE);
+
+static int parse_http_version = 1;
+
+SYSCTL_NODE(_net_inet_accf, OID_AUTO, http, CTLFLAG_RW, 0,
+"HTTP accept filter");
+SYSCTL_INT(_net_inet_accf_http, OID_AUTO, parsehttpversion, CTLFLAG_RW,
+&parse_http_version, 1,
+"Parse http version so that non 1.x requests work");
+
+#ifdef ACCF_HTTP_DEBUG
+#define DPRINT(fmt, args...) \
+ do { \
+ printf("%s:%d: " fmt "\n", __func__, __LINE__, ##args); \
+ } while (0)
+#else
+#define DPRINT(fmt, args...)
+#endif
+
+static int
+sbfull(struct sockbuf *sb)
+{
+
+ DPRINT("sbfull, cc(%ld) >= hiwat(%ld): %d, "
+ "mbcnt(%ld) >= mbmax(%ld): %d",
+ sb->sb_cc, sb->sb_hiwat, sb->sb_cc >= sb->sb_hiwat,
+ sb->sb_mbcnt, sb->sb_mbmax, sb->sb_mbcnt >= sb->sb_mbmax);
+ return (sb->sb_cc >= sb->sb_hiwat || sb->sb_mbcnt >= sb->sb_mbmax);
+}
+
+/*
+ * start at mbuf m, (must provide npkt if exists)
+ * starting at offset in m compare characters in mbuf chain for 'cmp'
+ */
+static int
+mbufstrcmp(struct mbuf *m, struct mbuf *npkt, int offset, char *cmp)
+{
+ struct mbuf *n;
+
+ for (; m != NULL; m = n) {
+ n = npkt;
+ if (npkt)
+ npkt = npkt->m_nextpkt;
+ for (; m; m = m->m_next) {
+ for (; offset < m->m_len; offset++, cmp++) {
+ if (*cmp == '\0')
+ return (1);
+ else if (*cmp != *(mtod(m, char *) + offset))
+ return (0);
+ }
+ if (*cmp == '\0')
+ return (1);
+ offset = 0;
+ }
+ }
+ return (0);
+}
+
+/*
+ * start at mbuf m, (must provide npkt if exists)
+ * starting at offset in m compare characters in mbuf chain for 'cmp'
+ * stop at 'max' characters
+ */
+static int
+mbufstrncmp(struct mbuf *m, struct mbuf *npkt, int offset, int max, char *cmp)
+{
+ struct mbuf *n;
+
+ for (; m != NULL; m = n) {
+ n = npkt;
+ if (npkt)
+ npkt = npkt->m_nextpkt;
+ for (; m; m = m->m_next) {
+ for (; offset < m->m_len; offset++, cmp++, max--) {
+ if (max == 0 || *cmp == '\0')
+ return (1);
+ else if (*cmp != *(mtod(m, char *) + offset))
+ return (0);
+ }
+ if (max == 0 || *cmp == '\0')
+ return (1);
+ offset = 0;
+ }
+ }
+ return (0);
+}
+
+#define STRSETUP(sptr, slen, str) \
+ do { \
+ sptr = str; \
+ slen = sizeof(str) - 1; \
+ } while(0)
+
+static int
+sohashttpget(struct socket *so, void *arg, int waitflag)
+{
+
+ if ((so->so_rcv.sb_state & SBS_CANTRCVMORE) == 0 && !sbfull(&so->so_rcv)) {
+ struct mbuf *m;
+ char *cmp;
+ int cmplen, cc;
+
+ m = so->so_rcv.sb_mb;
+ cc = so->so_rcv.sb_cc - 1;
+ if (cc < 1)
+ return (SU_OK);
+ switch (*mtod(m, char *)) {
+ case 'G':
+ STRSETUP(cmp, cmplen, "ET ");
+ break;
+ case 'H':
+ STRSETUP(cmp, cmplen, "EAD ");
+ break;
+ default:
+ goto fallout;
+ }
+ if (cc < cmplen) {
+ if (mbufstrncmp(m, m->m_nextpkt, 1, cc, cmp) == 1) {
+ DPRINT("short cc (%d) but mbufstrncmp ok", cc);
+ return (SU_OK);
+ } else {
+ DPRINT("short cc (%d) mbufstrncmp failed", cc);
+ goto fallout;
+ }
+ }
+ if (mbufstrcmp(m, m->m_nextpkt, 1, cmp) == 1) {
+ DPRINT("mbufstrcmp ok");
+ if (parse_http_version == 0)
+ return (soishttpconnected(so, arg, waitflag));
+ else
+ return (soparsehttpvers(so, arg, waitflag));
+ }
+ DPRINT("mbufstrcmp bad");
+ }
+
+fallout:
+ DPRINT("fallout");
+ return (SU_ISCONNECTED);
+}
+
+static int
+soparsehttpvers(struct socket *so, void *arg, int waitflag)
+{
+ struct mbuf *m, *n;
+ int i, cc, spaces, inspaces;
+
+ if ((so->so_rcv.sb_state & SBS_CANTRCVMORE) != 0 || sbfull(&so->so_rcv))
+ goto fallout;
+
+ m = so->so_rcv.sb_mb;
+ cc = so->so_rcv.sb_cc;
+ inspaces = spaces = 0;
+ for (m = so->so_rcv.sb_mb; m; m = n) {
+ n = m->m_nextpkt;
+ for (; m; m = m->m_next) {
+ for (i = 0; i < m->m_len; i++, cc--) {
+ switch (*(mtod(m, char *) + i)) {
+ case ' ':
+ /* tabs? '\t' */
+ if (!inspaces) {
+ spaces++;
+ inspaces = 1;
+ }
+ break;
+ case '\r':
+ case '\n':
+ DPRINT("newline");
+ goto fallout;
+ default:
+ if (spaces != 2) {
+ inspaces = 0;
+ break;
+ }
+
+ /*
+ * if we don't have enough characters
+ * left (cc < sizeof("HTTP/1.0") - 1)
+ * then see if the remaining ones
+ * are a request we can parse.
+ */
+ if (cc < sizeof("HTTP/1.0") - 1) {
+ if (mbufstrncmp(m, n, i, cc,
+ "HTTP/1.") == 1) {
+ DPRINT("ok");
+ goto readmore;
+ } else {
+ DPRINT("bad");
+ goto fallout;
+ }
+ } else if (
+ mbufstrcmp(m, n, i, "HTTP/1.0") ||
+ mbufstrcmp(m, n, i, "HTTP/1.1")) {
+ DPRINT("ok");
+ return (soishttpconnected(so,
+ arg, waitflag));
+ } else {
+ DPRINT("bad");
+ goto fallout;
+ }
+ }
+ }
+ }
+ }
+readmore:
+ DPRINT("readmore");
+ /*
+ * if we hit here we haven't hit something
+ * we don't understand or a newline, so try again
+ */
+ soupcall_set(so, SO_RCV, soparsehttpvers, arg);
+ return (SU_OK);
+
+fallout:
+ DPRINT("fallout");
+ return (SU_ISCONNECTED);
+}
+
+
+#define NCHRS 3
+
+static int
+soishttpconnected(struct socket *so, void *arg, int waitflag)
+{
+ char a, b, c;
+ struct mbuf *m, *n;
+ int ccleft, copied;
+
+ DPRINT("start");
+ if ((so->so_rcv.sb_state & SBS_CANTRCVMORE) != 0 || sbfull(&so->so_rcv))
+ goto gotit;
+
+ /*
+ * Walk the socketbuffer and copy the last NCHRS (3) into a, b, and c
+ * copied - how much we've copied so far
+ * ccleft - how many bytes remaining in the socketbuffer
+ * just loop over the mbufs subtracting from 'ccleft' until we only
+ * have NCHRS left
+ */
+ copied = 0;
+ ccleft = so->so_rcv.sb_cc;
+ if (ccleft < NCHRS)
+ goto readmore;
+ a = b = c = '\0';
+ for (m = so->so_rcv.sb_mb; m; m = n) {
+ n = m->m_nextpkt;
+ for (; m; m = m->m_next) {
+ ccleft -= m->m_len;
+ if (ccleft <= NCHRS) {
+ char *src;
+ int tocopy;
+
+ tocopy = (NCHRS - ccleft) - copied;
+ src = mtod(m, char *) + (m->m_len - tocopy);
+
+ while (tocopy--) {
+ switch (copied++) {
+ case 0:
+ a = *src++;
+ break;
+ case 1:
+ b = *src++;
+ break;
+ case 2:
+ c = *src++;
+ break;
+ }
+ }
+ }
+ }
+ }
+ if (c == '\n' && (b == '\n' || (b == '\r' && a == '\n'))) {
+ /* we have all request headers */
+ goto gotit;
+ }
+
+readmore:
+ soupcall_set(so, SO_RCV, soishttpconnected, arg);
+ return (SU_OK);
+
+gotit:
+ return (SU_ISCONNECTED);
+}
diff --git a/rtems/freebsd/netinet/icmp6.h b/rtems/freebsd/netinet/icmp6.h
new file mode 100644
index 00000000..f2f0577e
--- /dev/null
+++ b/rtems/freebsd/netinet/icmp6.h
@@ -0,0 +1,741 @@
+/* $FreeBSD$ */
+/* $KAME: icmp6.h,v 1.46 2001/04/27 15:09:48 itojun Exp $ */
+
+/*-
+ * Copyright (C) 1995, 1996, 1997, and 1998 WIDE Project.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the project nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+/*-
+ * Copyright (c) 1982, 1986, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * @(#)ip_icmp.h 8.1 (Berkeley) 6/10/93
+ */
+
+#ifndef _NETINET_ICMP6_HH_
+#define _NETINET_ICMP6_HH_
+
+#define ICMPV6_PLD_MAXLEN 1232 /* IPV6_MMTU - sizeof(struct ip6_hdr)
+ - sizeof(struct icmp6_hdr) */
+
+struct icmp6_hdr {
+ u_int8_t icmp6_type; /* type field */
+ u_int8_t icmp6_code; /* code field */
+ u_int16_t icmp6_cksum; /* checksum field */
+ union {
+ u_int32_t icmp6_un_data32[1]; /* type-specific field */
+ u_int16_t icmp6_un_data16[2]; /* type-specific field */
+ u_int8_t icmp6_un_data8[4]; /* type-specific field */
+ } icmp6_dataun;
+} __packed;
+
+#define icmp6_data32 icmp6_dataun.icmp6_un_data32
+#define icmp6_data16 icmp6_dataun.icmp6_un_data16
+#define icmp6_data8 icmp6_dataun.icmp6_un_data8
+#define icmp6_pptr icmp6_data32[0] /* parameter prob */
+#define icmp6_mtu icmp6_data32[0] /* packet too big */
+#define icmp6_id icmp6_data16[0] /* echo request/reply */
+#define icmp6_seq icmp6_data16[1] /* echo request/reply */
+#define icmp6_maxdelay icmp6_data16[0] /* mcast group membership */
+
+#define ICMP6_DST_UNREACH 1 /* dest unreachable, codes: */
+#define ICMP6_PACKET_TOO_BIG 2 /* packet too big */
+#define ICMP6_TIME_EXCEEDED 3 /* time exceeded, code: */
+#define ICMP6_PARAM_PROB 4 /* ip6 header bad */
+
+#define ICMP6_ECHO_REQUEST 128 /* echo service */
+#define ICMP6_ECHO_REPLY 129 /* echo reply */
+#define MLD_LISTENER_QUERY 130 /* multicast listener query */
+#define MLD_LISTENER_REPORT 131 /* multicast listener report */
+#define MLD_LISTENER_DONE 132 /* multicast listener done */
+#define MLD_LISTENER_REDUCTION MLD_LISTENER_DONE /* RFC3542 definition */
+
+/* RFC2292 decls */
+#define ICMP6_MEMBERSHIP_QUERY 130 /* group membership query */
+#define ICMP6_MEMBERSHIP_REPORT 131 /* group membership report */
+#define ICMP6_MEMBERSHIP_REDUCTION 132 /* group membership termination */
+
+#ifndef _KERNEL
+/* the followings are for backward compatibility to old KAME apps. */
+#define MLD6_LISTENER_QUERY MLD_LISTENER_QUERY
+#define MLD6_LISTENER_REPORT MLD_LISTENER_REPORT
+#define MLD6_LISTENER_DONE MLD_LISTENER_DONE
+#endif
+
+#define ND_ROUTER_SOLICIT 133 /* router solicitation */
+#define ND_ROUTER_ADVERT 134 /* router advertisement */
+#define ND_NEIGHBOR_SOLICIT 135 /* neighbor solicitation */
+#define ND_NEIGHBOR_ADVERT 136 /* neighbor advertisement */
+#define ND_REDIRECT 137 /* redirect */
+
+#define ICMP6_ROUTER_RENUMBERING 138 /* router renumbering */
+
+#define ICMP6_WRUREQUEST 139 /* who are you request */
+#define ICMP6_WRUREPLY 140 /* who are you reply */
+#define ICMP6_FQDN_QUERY 139 /* FQDN query */
+#define ICMP6_FQDN_REPLY 140 /* FQDN reply */
+#define ICMP6_NI_QUERY 139 /* node information request */
+#define ICMP6_NI_REPLY 140 /* node information reply */
+#define MLDV2_LISTENER_REPORT 143 /* RFC3810 listener report */
+
+/* The definitions below are experimental. TBA */
+#define MLD_MTRACE_RESP 200 /* mtrace resp (to sender) */
+#define MLD_MTRACE 201 /* mtrace messages */
+
+#ifndef _KERNEL
+#define MLD6_MTRACE_RESP MLD_MTRACE_RESP
+#define MLD6_MTRACE MLD_MTRACE
+#endif
+
+#define ICMP6_MAXTYPE 201
+
+#define ICMP6_DST_UNREACH_NOROUTE 0 /* no route to destination */
+#define ICMP6_DST_UNREACH_ADMIN 1 /* administratively prohibited */
+#define ICMP6_DST_UNREACH_NOTNEIGHBOR 2 /* not a neighbor(obsolete) */
+#define ICMP6_DST_UNREACH_BEYONDSCOPE 2 /* beyond scope of source address */
+#define ICMP6_DST_UNREACH_ADDR 3 /* address unreachable */
+#define ICMP6_DST_UNREACH_NOPORT 4 /* port unreachable */
+
+#define ICMP6_TIME_EXCEED_TRANSIT 0 /* ttl==0 in transit */
+#define ICMP6_TIME_EXCEED_REASSEMBLY 1 /* ttl==0 in reass */
+
+#define ICMP6_PARAMPROB_HEADER 0 /* erroneous header field */
+#define ICMP6_PARAMPROB_NEXTHEADER 1 /* unrecognized next header */
+#define ICMP6_PARAMPROB_OPTION 2 /* unrecognized option */
+
+#define ICMP6_INFOMSG_MASK 0x80 /* all informational messages */
+
+#define ICMP6_NI_SUBJ_IPV6 0 /* Query Subject is an IPv6 address */
+#define ICMP6_NI_SUBJ_FQDN 1 /* Query Subject is a Domain name */
+#define ICMP6_NI_SUBJ_IPV4 2 /* Query Subject is an IPv4 address */
+
+#define ICMP6_NI_SUCCESS 0 /* node information successful reply */
+#define ICMP6_NI_REFUSED 1 /* node information request is refused */
+#define ICMP6_NI_UNKNOWN 2 /* unknown Qtype */
+
+#define ICMP6_ROUTER_RENUMBERING_COMMAND 0 /* rr command */
+#define ICMP6_ROUTER_RENUMBERING_RESULT 1 /* rr result */
+#define ICMP6_ROUTER_RENUMBERING_SEQNUM_RESET 255 /* rr seq num reset */
+
+/* Used in kernel only */
+#define ND_REDIRECT_ONLINK 0 /* redirect to an on-link node */
+#define ND_REDIRECT_ROUTER 1 /* redirect to a better router */
+
+/*
+ * Multicast Listener Discovery
+ */
+struct mld_hdr {
+ struct icmp6_hdr mld_icmp6_hdr;
+ struct in6_addr mld_addr; /* multicast address */
+} __packed;
+
+/* definitions to provide backward compatibility to old KAME applications */
+#ifndef _KERNEL
+#define mld6_hdr mld_hdr
+#define mld6_type mld_type
+#define mld6_code mld_code
+#define mld6_cksum mld_cksum
+#define mld6_maxdelay mld_maxdelay
+#define mld6_reserved mld_reserved
+#define mld6_addr mld_addr
+#endif
+
+/* shortcut macro definitions */
+#define mld_type mld_icmp6_hdr.icmp6_type
+#define mld_code mld_icmp6_hdr.icmp6_code
+#define mld_cksum mld_icmp6_hdr.icmp6_cksum
+#define mld_maxdelay mld_icmp6_hdr.icmp6_data16[0]
+#define mld_reserved mld_icmp6_hdr.icmp6_data16[1]
+#define mld_v2_reserved mld_icmp6_hdr.icmp6_data16[0]
+#define mld_v2_numrecs mld_icmp6_hdr.icmp6_data16[1]
+
+/*
+ * Neighbor Discovery
+ */
+
+struct nd_router_solicit { /* router solicitation */
+ struct icmp6_hdr nd_rs_hdr;
+ /* could be followed by options */
+} __packed;
+
+#define nd_rs_type nd_rs_hdr.icmp6_type
+#define nd_rs_code nd_rs_hdr.icmp6_code
+#define nd_rs_cksum nd_rs_hdr.icmp6_cksum
+#define nd_rs_reserved nd_rs_hdr.icmp6_data32[0]
+
+struct nd_router_advert { /* router advertisement */
+ struct icmp6_hdr nd_ra_hdr;
+ u_int32_t nd_ra_reachable; /* reachable time */
+ u_int32_t nd_ra_retransmit; /* retransmit timer */
+ /* could be followed by options */
+} __packed;
+
+#define nd_ra_type nd_ra_hdr.icmp6_type
+#define nd_ra_code nd_ra_hdr.icmp6_code
+#define nd_ra_cksum nd_ra_hdr.icmp6_cksum
+#define nd_ra_curhoplimit nd_ra_hdr.icmp6_data8[0]
+#define nd_ra_flags_reserved nd_ra_hdr.icmp6_data8[1]
+#define ND_RA_FLAG_MANAGED 0x80
+#define ND_RA_FLAG_OTHER 0x40
+#define ND_RA_FLAG_HA 0x20
+
+/*
+ * Router preference values based on draft-draves-ipngwg-router-selection-01.
+ * These are non-standard definitions.
+ */
+#define ND_RA_FLAG_RTPREF_MASK 0x18 /* 00011000 */
+
+#define ND_RA_FLAG_RTPREF_HIGH 0x08 /* 00001000 */
+#define ND_RA_FLAG_RTPREF_MEDIUM 0x00 /* 00000000 */
+#define ND_RA_FLAG_RTPREF_LOW 0x18 /* 00011000 */
+#define ND_RA_FLAG_RTPREF_RSV 0x10 /* 00010000 */
+
+#define nd_ra_router_lifetime nd_ra_hdr.icmp6_data16[1]
+
+struct nd_neighbor_solicit { /* neighbor solicitation */
+ struct icmp6_hdr nd_ns_hdr;
+ struct in6_addr nd_ns_target; /*target address */
+ /* could be followed by options */
+} __packed;
+
+#define nd_ns_type nd_ns_hdr.icmp6_type
+#define nd_ns_code nd_ns_hdr.icmp6_code
+#define nd_ns_cksum nd_ns_hdr.icmp6_cksum
+#define nd_ns_reserved nd_ns_hdr.icmp6_data32[0]
+
+struct nd_neighbor_advert { /* neighbor advertisement */
+ struct icmp6_hdr nd_na_hdr;
+ struct in6_addr nd_na_target; /* target address */
+ /* could be followed by options */
+} __packed;
+
+#define nd_na_type nd_na_hdr.icmp6_type
+#define nd_na_code nd_na_hdr.icmp6_code
+#define nd_na_cksum nd_na_hdr.icmp6_cksum
+#define nd_na_flags_reserved nd_na_hdr.icmp6_data32[0]
+#if BYTE_ORDER == BIG_ENDIAN
+#define ND_NA_FLAG_ROUTER 0x80000000
+#define ND_NA_FLAG_SOLICITED 0x40000000
+#define ND_NA_FLAG_OVERRIDE 0x20000000
+#else
+#if BYTE_ORDER == LITTLE_ENDIAN
+#define ND_NA_FLAG_ROUTER 0x80
+#define ND_NA_FLAG_SOLICITED 0x40
+#define ND_NA_FLAG_OVERRIDE 0x20
+#endif
+#endif
+
+struct nd_redirect { /* redirect */
+ struct icmp6_hdr nd_rd_hdr;
+ struct in6_addr nd_rd_target; /* target address */
+ struct in6_addr nd_rd_dst; /* destination address */
+ /* could be followed by options */
+} __packed;
+
+#define nd_rd_type nd_rd_hdr.icmp6_type
+#define nd_rd_code nd_rd_hdr.icmp6_code
+#define nd_rd_cksum nd_rd_hdr.icmp6_cksum
+#define nd_rd_reserved nd_rd_hdr.icmp6_data32[0]
+
+struct nd_opt_hdr { /* Neighbor discovery option header */
+ u_int8_t nd_opt_type;
+ u_int8_t nd_opt_len;
+ /* followed by option specific data*/
+} __packed;
+
+#define ND_OPT_SOURCE_LINKADDR 1
+#define ND_OPT_TARGET_LINKADDR 2
+#define ND_OPT_PREFIX_INFORMATION 3
+#define ND_OPT_REDIRECTED_HEADER 4
+#define ND_OPT_MTU 5
+
+#define ND_OPT_ROUTE_INFO 200 /* draft-ietf-ipngwg-router-preference, not officially assigned yet */
+
+struct nd_opt_prefix_info { /* prefix information */
+ u_int8_t nd_opt_pi_type;
+ u_int8_t nd_opt_pi_len;
+ u_int8_t nd_opt_pi_prefix_len;
+ u_int8_t nd_opt_pi_flags_reserved;
+ u_int32_t nd_opt_pi_valid_time;
+ u_int32_t nd_opt_pi_preferred_time;
+ u_int32_t nd_opt_pi_reserved2;
+ struct in6_addr nd_opt_pi_prefix;
+} __packed;
+
+#define ND_OPT_PI_FLAG_ONLINK 0x80
+#define ND_OPT_PI_FLAG_AUTO 0x40
+
+struct nd_opt_rd_hdr { /* redirected header */
+ u_int8_t nd_opt_rh_type;
+ u_int8_t nd_opt_rh_len;
+ u_int16_t nd_opt_rh_reserved1;
+ u_int32_t nd_opt_rh_reserved2;
+ /* followed by IP header and data */
+} __packed;
+
+struct nd_opt_mtu { /* MTU option */
+ u_int8_t nd_opt_mtu_type;
+ u_int8_t nd_opt_mtu_len;
+ u_int16_t nd_opt_mtu_reserved;
+ u_int32_t nd_opt_mtu_mtu;
+} __packed;
+
+struct nd_opt_route_info { /* route info */
+ u_int8_t nd_opt_rti_type;
+ u_int8_t nd_opt_rti_len;
+ u_int8_t nd_opt_rti_prefixlen;
+ u_int8_t nd_opt_rti_flags;
+ u_int32_t nd_opt_rti_lifetime;
+ /* prefix follows */
+} __packed;
+
+/*
+ * icmp6 namelookup
+ */
+
+struct icmp6_namelookup {
+ struct icmp6_hdr icmp6_nl_hdr;
+ u_int8_t icmp6_nl_nonce[8];
+ int32_t icmp6_nl_ttl;
+#if 0
+ u_int8_t icmp6_nl_len;
+ u_int8_t icmp6_nl_name[3];
+#endif
+ /* could be followed by options */
+} __packed;
+
+/*
+ * icmp6 node information
+ */
+struct icmp6_nodeinfo {
+ struct icmp6_hdr icmp6_ni_hdr;
+ u_int8_t icmp6_ni_nonce[8];
+ /* could be followed by reply data */
+} __packed;
+
+#define ni_type icmp6_ni_hdr.icmp6_type
+#define ni_code icmp6_ni_hdr.icmp6_code
+#define ni_cksum icmp6_ni_hdr.icmp6_cksum
+#define ni_qtype icmp6_ni_hdr.icmp6_data16[0]
+#define ni_flags icmp6_ni_hdr.icmp6_data16[1]
+
+#define NI_QTYPE_NOOP 0 /* NOOP */
+#define NI_QTYPE_SUPTYPES 1 /* Supported Qtypes */
+#define NI_QTYPE_FQDN 2 /* FQDN (draft 04) */
+#define NI_QTYPE_DNSNAME 2 /* DNS Name */
+#define NI_QTYPE_NODEADDR 3 /* Node Addresses */
+#define NI_QTYPE_IPV4ADDR 4 /* IPv4 Addresses */
+
+#if BYTE_ORDER == BIG_ENDIAN
+#define NI_SUPTYPE_FLAG_COMPRESS 0x1
+#define NI_FQDN_FLAG_VALIDTTL 0x1
+#elif BYTE_ORDER == LITTLE_ENDIAN
+#define NI_SUPTYPE_FLAG_COMPRESS 0x0100
+#define NI_FQDN_FLAG_VALIDTTL 0x0100
+#endif
+
+#ifdef NAME_LOOKUPS_04
+#if BYTE_ORDER == BIG_ENDIAN
+#define NI_NODEADDR_FLAG_LINKLOCAL 0x1
+#define NI_NODEADDR_FLAG_SITELOCAL 0x2
+#define NI_NODEADDR_FLAG_GLOBAL 0x4
+#define NI_NODEADDR_FLAG_ALL 0x8
+#define NI_NODEADDR_FLAG_TRUNCATE 0x10
+#define NI_NODEADDR_FLAG_ANYCAST 0x20 /* just experimental. not in spec */
+#elif BYTE_ORDER == LITTLE_ENDIAN
+#define NI_NODEADDR_FLAG_LINKLOCAL 0x0100
+#define NI_NODEADDR_FLAG_SITELOCAL 0x0200
+#define NI_NODEADDR_FLAG_GLOBAL 0x0400
+#define NI_NODEADDR_FLAG_ALL 0x0800
+#define NI_NODEADDR_FLAG_TRUNCATE 0x1000
+#define NI_NODEADDR_FLAG_ANYCAST 0x2000 /* just experimental. not in spec */
+#endif
+#else /* draft-ietf-ipngwg-icmp-name-lookups-05 (and later?) */
+#if BYTE_ORDER == BIG_ENDIAN
+#define NI_NODEADDR_FLAG_TRUNCATE 0x1
+#define NI_NODEADDR_FLAG_ALL 0x2
+#define NI_NODEADDR_FLAG_COMPAT 0x4
+#define NI_NODEADDR_FLAG_LINKLOCAL 0x8
+#define NI_NODEADDR_FLAG_SITELOCAL 0x10
+#define NI_NODEADDR_FLAG_GLOBAL 0x20
+#define NI_NODEADDR_FLAG_ANYCAST 0x40 /* just experimental. not in spec */
+#elif BYTE_ORDER == LITTLE_ENDIAN
+#define NI_NODEADDR_FLAG_TRUNCATE 0x0100
+#define NI_NODEADDR_FLAG_ALL 0x0200
+#define NI_NODEADDR_FLAG_COMPAT 0x0400
+#define NI_NODEADDR_FLAG_LINKLOCAL 0x0800
+#define NI_NODEADDR_FLAG_SITELOCAL 0x1000
+#define NI_NODEADDR_FLAG_GLOBAL 0x2000
+#define NI_NODEADDR_FLAG_ANYCAST 0x4000 /* just experimental. not in spec */
+#endif
+#endif
+
+struct ni_reply_fqdn {
+ u_int32_t ni_fqdn_ttl; /* TTL */
+ u_int8_t ni_fqdn_namelen; /* length in octets of the FQDN */
+ u_int8_t ni_fqdn_name[3]; /* XXX: alignment */
+} __packed;
+
+/*
+ * Router Renumbering. as router-renum-08.txt
+ */
+struct icmp6_router_renum { /* router renumbering header */
+ struct icmp6_hdr rr_hdr;
+ u_int8_t rr_segnum;
+ u_int8_t rr_flags;
+ u_int16_t rr_maxdelay;
+ u_int32_t rr_reserved;
+} __packed;
+
+#define ICMP6_RR_FLAGS_TEST 0x80
+#define ICMP6_RR_FLAGS_REQRESULT 0x40
+#define ICMP6_RR_FLAGS_FORCEAPPLY 0x20
+#define ICMP6_RR_FLAGS_SPECSITE 0x10
+#define ICMP6_RR_FLAGS_PREVDONE 0x08
+
+#define rr_type rr_hdr.icmp6_type
+#define rr_code rr_hdr.icmp6_code
+#define rr_cksum rr_hdr.icmp6_cksum
+#define rr_seqnum rr_hdr.icmp6_data32[0]
+
+struct rr_pco_match { /* match prefix part */
+ u_int8_t rpm_code;
+ u_int8_t rpm_len;
+ u_int8_t rpm_ordinal;
+ u_int8_t rpm_matchlen;
+ u_int8_t rpm_minlen;
+ u_int8_t rpm_maxlen;
+ u_int16_t rpm_reserved;
+ struct in6_addr rpm_prefix;
+} __packed;
+
+#define RPM_PCO_ADD 1
+#define RPM_PCO_CHANGE 2
+#define RPM_PCO_SETGLOBAL 3
+#define RPM_PCO_MAX 4
+
+struct rr_pco_use { /* use prefix part */
+ u_int8_t rpu_uselen;
+ u_int8_t rpu_keeplen;
+ u_int8_t rpu_ramask;
+ u_int8_t rpu_raflags;
+ u_int32_t rpu_vltime;
+ u_int32_t rpu_pltime;
+ u_int32_t rpu_flags;
+ struct in6_addr rpu_prefix;
+} __packed;
+#define ICMP6_RR_PCOUSE_RAFLAGS_ONLINK 0x80
+#define ICMP6_RR_PCOUSE_RAFLAGS_AUTO 0x40
+
+#if BYTE_ORDER == BIG_ENDIAN
+#define ICMP6_RR_PCOUSE_FLAGS_DECRVLTIME 0x80000000
+#define ICMP6_RR_PCOUSE_FLAGS_DECRPLTIME 0x40000000
+#elif BYTE_ORDER == LITTLE_ENDIAN
+#define ICMP6_RR_PCOUSE_FLAGS_DECRVLTIME 0x80
+#define ICMP6_RR_PCOUSE_FLAGS_DECRPLTIME 0x40
+#endif
+
+struct rr_result { /* router renumbering result message */
+ u_int16_t rrr_flags;
+ u_int8_t rrr_ordinal;
+ u_int8_t rrr_matchedlen;
+ u_int32_t rrr_ifid;
+ struct in6_addr rrr_prefix;
+} __packed;
+#if BYTE_ORDER == BIG_ENDIAN
+#define ICMP6_RR_RESULT_FLAGS_OOB 0x0002
+#define ICMP6_RR_RESULT_FLAGS_FORBIDDEN 0x0001
+#elif BYTE_ORDER == LITTLE_ENDIAN
+#define ICMP6_RR_RESULT_FLAGS_OOB 0x0200
+#define ICMP6_RR_RESULT_FLAGS_FORBIDDEN 0x0100
+#endif
+
+/*
+ * icmp6 filter structures.
+ */
+
+struct icmp6_filter {
+ u_int32_t icmp6_filt[8];
+};
+
+#ifdef _KERNEL
+#define ICMP6_FILTER_SETPASSALL(filterp) \
+do { \
+ int i; u_char *p; \
+ p = (u_char *)filterp; \
+ for (i = 0; i < sizeof(struct icmp6_filter); i++) \
+ p[i] = 0xff; \
+} while (/*CONSTCOND*/ 0)
+#define ICMP6_FILTER_SETBLOCKALL(filterp) \
+ bzero(filterp, sizeof(struct icmp6_filter))
+#else /* _KERNEL */
+#define ICMP6_FILTER_SETPASSALL(filterp) \
+ memset(filterp, 0xff, sizeof(struct icmp6_filter))
+#define ICMP6_FILTER_SETBLOCKALL(filterp) \
+ memset(filterp, 0x00, sizeof(struct icmp6_filter))
+#endif /* _KERNEL */
+
+#define ICMP6_FILTER_SETPASS(type, filterp) \
+ (((filterp)->icmp6_filt[(type) >> 5]) |= (1 << ((type) & 31)))
+#define ICMP6_FILTER_SETBLOCK(type, filterp) \
+ (((filterp)->icmp6_filt[(type) >> 5]) &= ~(1 << ((type) & 31)))
+#define ICMP6_FILTER_WILLPASS(type, filterp) \
+ ((((filterp)->icmp6_filt[(type) >> 5]) & (1 << ((type) & 31))) != 0)
+#define ICMP6_FILTER_WILLBLOCK(type, filterp) \
+ ((((filterp)->icmp6_filt[(type) >> 5]) & (1 << ((type) & 31))) == 0)
+
+/*
+ * Variables related to this implementation
+ * of the internet control message protocol version 6.
+ */
+struct icmp6errstat {
+ u_quad_t icp6errs_dst_unreach_noroute;
+ u_quad_t icp6errs_dst_unreach_admin;
+ u_quad_t icp6errs_dst_unreach_beyondscope;
+ u_quad_t icp6errs_dst_unreach_addr;
+ u_quad_t icp6errs_dst_unreach_noport;
+ u_quad_t icp6errs_packet_too_big;
+ u_quad_t icp6errs_time_exceed_transit;
+ u_quad_t icp6errs_time_exceed_reassembly;
+ u_quad_t icp6errs_paramprob_header;
+ u_quad_t icp6errs_paramprob_nextheader;
+ u_quad_t icp6errs_paramprob_option;
+ u_quad_t icp6errs_redirect; /* we regard redirect as an error here */
+ u_quad_t icp6errs_unknown;
+};
+
+struct icmp6stat {
+/* statistics related to icmp6 packets generated */
+ u_quad_t icp6s_error; /* # of calls to icmp6_error */
+ u_quad_t icp6s_canterror; /* no error 'cuz old was icmp */
+ u_quad_t icp6s_toofreq; /* no error 'cuz rate limitation */
+ u_quad_t icp6s_outhist[256];
+/* statistics related to input message processed */
+ u_quad_t icp6s_badcode; /* icmp6_code out of range */
+ u_quad_t icp6s_tooshort; /* packet < sizeof(struct icmp6_hdr) */
+ u_quad_t icp6s_checksum; /* bad checksum */
+ u_quad_t icp6s_badlen; /* calculated bound mismatch */
+ /*
+ * number of responses: this member is inherited from netinet code, but
+ * for netinet6 code, it is already available in icp6s_outhist[].
+ */
+ u_quad_t icp6s_reflect;
+ u_quad_t icp6s_inhist[256];
+ u_quad_t icp6s_nd_toomanyopt; /* too many ND options */
+ struct icmp6errstat icp6s_outerrhist;
+#define icp6s_odst_unreach_noroute \
+ icp6s_outerrhist.icp6errs_dst_unreach_noroute
+#define icp6s_odst_unreach_admin icp6s_outerrhist.icp6errs_dst_unreach_admin
+#define icp6s_odst_unreach_beyondscope \
+ icp6s_outerrhist.icp6errs_dst_unreach_beyondscope
+#define icp6s_odst_unreach_addr icp6s_outerrhist.icp6errs_dst_unreach_addr
+#define icp6s_odst_unreach_noport icp6s_outerrhist.icp6errs_dst_unreach_noport
+#define icp6s_opacket_too_big icp6s_outerrhist.icp6errs_packet_too_big
+#define icp6s_otime_exceed_transit \
+ icp6s_outerrhist.icp6errs_time_exceed_transit
+#define icp6s_otime_exceed_reassembly \
+ icp6s_outerrhist.icp6errs_time_exceed_reassembly
+#define icp6s_oparamprob_header icp6s_outerrhist.icp6errs_paramprob_header
+#define icp6s_oparamprob_nextheader \
+ icp6s_outerrhist.icp6errs_paramprob_nextheader
+#define icp6s_oparamprob_option icp6s_outerrhist.icp6errs_paramprob_option
+#define icp6s_oredirect icp6s_outerrhist.icp6errs_redirect
+#define icp6s_ounknown icp6s_outerrhist.icp6errs_unknown
+ u_quad_t icp6s_pmtuchg; /* path MTU changes */
+ u_quad_t icp6s_nd_badopt; /* bad ND options */
+ u_quad_t icp6s_badns; /* bad neighbor solicitation */
+ u_quad_t icp6s_badna; /* bad neighbor advertisement */
+ u_quad_t icp6s_badrs; /* bad router advertisement */
+ u_quad_t icp6s_badra; /* bad router advertisement */
+ u_quad_t icp6s_badredirect; /* bad redirect message */
+};
+
+#ifdef _KERNEL
+/*
+ * In-kernel consumers can use these accessor macros directly to update
+ * stats.
+ */
+#define ICMP6STAT_ADD(name, val) V_icmp6stat.name += (val)
+#define ICMP6STAT_INC(name) ICMP6STAT_ADD(name, 1)
+
+/*
+ * Kernel module consumers must use this accessor macro.
+ */
+void kmod_icmp6stat_inc(int statnum);
+#define KMOD_ICMP6STAT_INC(name) \
+ kmod_icmp6stat_inc(offsetof(struct icmp6stat, name) / sizeof(u_quad_t))
+#endif
+
+/*
+ * Names for ICMP sysctl objects
+ */
+#define ICMPV6CTL_STATS 1
+#define ICMPV6CTL_REDIRACCEPT 2 /* accept/process redirects */
+#define ICMPV6CTL_REDIRTIMEOUT 3 /* redirect cache time */
+#if 0 /*obsoleted*/
+#define ICMPV6CTL_ERRRATELIMIT 5 /* ICMPv6 error rate limitation */
+#endif
+#define ICMPV6CTL_ND6_PRUNE 6
+#define ICMPV6CTL_ND6_DELAY 8
+#define ICMPV6CTL_ND6_UMAXTRIES 9
+#define ICMPV6CTL_ND6_MMAXTRIES 10
+#define ICMPV6CTL_ND6_USELOOPBACK 11
+/*#define ICMPV6CTL_ND6_PROXYALL 12 obsoleted, do not reuse here */
+#define ICMPV6CTL_NODEINFO 13
+#define ICMPV6CTL_ERRPPSLIMIT 14 /* ICMPv6 error pps limitation */
+#define ICMPV6CTL_ND6_MAXNUDHINT 15
+#define ICMPV6CTL_MTUDISC_HIWAT 16
+#define ICMPV6CTL_MTUDISC_LOWAT 17
+#define ICMPV6CTL_ND6_DEBUG 18
+#define ICMPV6CTL_ND6_DRLIST 19
+#define ICMPV6CTL_ND6_PRLIST 20
+#define ICMPV6CTL_MLD_MAXSRCFILTER 21
+#define ICMPV6CTL_MLD_SOMAXSRC 22
+#define ICMPV6CTL_MLD_VERSION 23
+#define ICMPV6CTL_ND6_MAXQLEN 24
+#define ICMPV6CTL_MAXID 25
+
+#define RTF_PROBEMTU RTF_PROTO1
+
+#ifdef _KERNEL
+# ifdef __STDC__
+struct rtentry;
+struct rttimer;
+struct in6_multi;
+# endif
+void icmp6_paramerror(struct mbuf *, int);
+void icmp6_error(struct mbuf *, int, int, int);
+void icmp6_error2(struct mbuf *, int, int, int, struct ifnet *);
+int icmp6_input(struct mbuf **, int *, int);
+void icmp6_fasttimo(void);
+void icmp6_slowtimo(void);
+void icmp6_reflect(struct mbuf *, size_t);
+void icmp6_prepare(struct mbuf *);
+void icmp6_redirect_input(struct mbuf *, int);
+void icmp6_redirect_output(struct mbuf *, struct rtentry *);
+
+struct ip6ctlparam;
+void icmp6_mtudisc_update(struct ip6ctlparam *, int);
+
+/* XXX: is this the right place for these macros? */
+#define icmp6_ifstat_inc(ifp, tag) \
+do { \
+ if (ifp) \
+ ((struct in6_ifextra *)((ifp)->if_afdata[AF_INET6]))->icmp6_ifstat->tag++; \
+} while (/*CONSTCOND*/ 0)
+
+#define icmp6_ifoutstat_inc(ifp, type, code) \
+do { \
+ icmp6_ifstat_inc(ifp, ifs6_out_msg); \
+ if (type < ICMP6_INFOMSG_MASK) \
+ icmp6_ifstat_inc(ifp, ifs6_out_error); \
+ switch (type) { \
+ case ICMP6_DST_UNREACH: \
+ icmp6_ifstat_inc(ifp, ifs6_out_dstunreach); \
+ if (code == ICMP6_DST_UNREACH_ADMIN) \
+ icmp6_ifstat_inc(ifp, ifs6_out_adminprohib); \
+ break; \
+ case ICMP6_PACKET_TOO_BIG: \
+ icmp6_ifstat_inc(ifp, ifs6_out_pkttoobig); \
+ break; \
+ case ICMP6_TIME_EXCEEDED: \
+ icmp6_ifstat_inc(ifp, ifs6_out_timeexceed); \
+ break; \
+ case ICMP6_PARAM_PROB: \
+ icmp6_ifstat_inc(ifp, ifs6_out_paramprob); \
+ break; \
+ case ICMP6_ECHO_REQUEST: \
+ icmp6_ifstat_inc(ifp, ifs6_out_echo); \
+ break; \
+ case ICMP6_ECHO_REPLY: \
+ icmp6_ifstat_inc(ifp, ifs6_out_echoreply); \
+ break; \
+ case MLD_LISTENER_QUERY: \
+ icmp6_ifstat_inc(ifp, ifs6_out_mldquery); \
+ break; \
+ case MLD_LISTENER_REPORT: \
+ icmp6_ifstat_inc(ifp, ifs6_out_mldreport); \
+ break; \
+ case MLD_LISTENER_DONE: \
+ icmp6_ifstat_inc(ifp, ifs6_out_mlddone); \
+ break; \
+ case ND_ROUTER_SOLICIT: \
+ icmp6_ifstat_inc(ifp, ifs6_out_routersolicit); \
+ break; \
+ case ND_ROUTER_ADVERT: \
+ icmp6_ifstat_inc(ifp, ifs6_out_routeradvert); \
+ break; \
+ case ND_NEIGHBOR_SOLICIT: \
+ icmp6_ifstat_inc(ifp, ifs6_out_neighborsolicit); \
+ break; \
+ case ND_NEIGHBOR_ADVERT: \
+ icmp6_ifstat_inc(ifp, ifs6_out_neighboradvert); \
+ break; \
+ case ND_REDIRECT: \
+ icmp6_ifstat_inc(ifp, ifs6_out_redirect); \
+ break; \
+ } \
+} while (/*CONSTCOND*/ 0)
+
+VNET_DECLARE(int, icmp6_rediraccept); /* accept/process redirects */
+VNET_DECLARE(int, icmp6_redirtimeout); /* cache time for redirect routes */
+
+#define V_icmp6_rediraccept VNET(icmp6_rediraccept)
+#define V_icmp6_redirtimeout VNET(icmp6_redirtimeout)
+
+#define ICMP6_NODEINFO_FQDNOK 0x1
+#define ICMP6_NODEINFO_NODEADDROK 0x2
+#define ICMP6_NODEINFO_TMPADDROK 0x4
+#define ICMP6_NODEINFO_GLOBALOK 0x8
+#endif /* _KERNEL */
+
+#endif /* not _NETINET_ICMP6_HH_ */
diff --git a/rtems/freebsd/netinet/icmp_var.h b/rtems/freebsd/netinet/icmp_var.h
new file mode 100644
index 00000000..d55fc4d3
--- /dev/null
+++ b/rtems/freebsd/netinet/icmp_var.h
@@ -0,0 +1,108 @@
+/*-
+ * Copyright (c) 1982, 1986, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * @(#)icmp_var.h 8.1 (Berkeley) 6/10/93
+ * $FreeBSD$
+ */
+
+#ifndef _NETINET_ICMP_VAR_HH_
+#define _NETINET_ICMP_VAR_HH_
+
+
+/*
+ * Variables related to this implementation
+ * of the internet control message protocol.
+ */
+struct icmpstat {
+/* statistics related to icmp packets generated */
+ u_long icps_error; /* # of calls to icmp_error */
+ u_long icps_oldshort; /* no error 'cuz old ip too short */
+ u_long icps_oldicmp; /* no error 'cuz old was icmp */
+ u_long icps_outhist[ICMP_MAXTYPE + 1];
+/* statistics related to input messages processed */
+ u_long icps_badcode; /* icmp_code out of range */
+ u_long icps_tooshort; /* packet < ICMP_MINLEN */
+ u_long icps_checksum; /* bad checksum */
+ u_long icps_badlen; /* calculated bound mismatch */
+ u_long icps_reflect; /* number of responses */
+ u_long icps_inhist[ICMP_MAXTYPE + 1];
+ u_long icps_bmcastecho; /* b/mcast echo requests dropped */
+ u_long icps_bmcasttstamp; /* b/mcast tstamp requests dropped */
+ u_long icps_badaddr; /* bad return address */
+ u_long icps_noroute; /* no route back */
+};
+
+#ifdef _KERNEL
+/*
+ * In-kernel consumers can use these accessor macros directly to update
+ * stats.
+ */
+#define ICMPSTAT_ADD(name, val) V_icmpstat.name += (val)
+#define ICMPSTAT_INC(name) ICMPSTAT_ADD(name, 1)
+
+/*
+ * Kernel module consumers must use this accessor macro.
+ */
+void kmod_icmpstat_inc(int statnum);
+#define KMOD_ICMPSTAT_INC(name) \
+ kmod_icmpstat_inc(offsetof(struct icmpstat, name) / sizeof(u_long))
+#endif
+
+/*
+ * Names for ICMP sysctl objects
+ */
+#define ICMPCTL_MASKREPL 1 /* allow replies to netmask requests */
+#define ICMPCTL_STATS 2 /* statistics (read-only) */
+#define ICMPCTL_ICMPLIM 3
+#define ICMPCTL_MAXID 4
+
+#define ICMPCTL_NAMES { \
+ { 0, 0 }, \
+ { "maskrepl", CTLTYPE_INT }, \
+ { "stats", CTLTYPE_STRUCT }, \
+ { "icmplim", CTLTYPE_INT }, \
+}
+
+#ifdef _KERNEL
+SYSCTL_DECL(_net_inet_icmp);
+
+VNET_DECLARE(struct icmpstat, icmpstat); /* icmp statistics. */
+#define V_icmpstat VNET(icmpstat)
+
+extern int badport_bandlim(int);
+#define BANDLIM_UNLIMITED -1
+#define BANDLIM_ICMP_UNREACH 0
+#define BANDLIM_ICMP_ECHO 1
+#define BANDLIM_ICMP_TSTAMP 2
+#define BANDLIM_RST_CLOSEDPORT 3 /* No connection, and no listeners */
+#define BANDLIM_RST_OPENPORT 4 /* No connection, listener */
+#define BANDLIM_ICMP6_UNREACH 5
+#define BANDLIM_MAX 5
+#endif
+
+#endif
diff --git a/rtems/freebsd/netinet/if_atm.c b/rtems/freebsd/netinet/if_atm.c
new file mode 100644
index 00000000..a15f4b3e
--- /dev/null
+++ b/rtems/freebsd/netinet/if_atm.c
@@ -0,0 +1,366 @@
+#include <rtems/freebsd/machine/rtems-bsd-config.h>
+
+/* $NetBSD: if_atm.c,v 1.6 1996/10/13 02:03:01 christos Exp $ */
+
+/*-
+ *
+ * Copyright (c) 1996 Charles D. Cranor and Washington University.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by Charles D. Cranor and
+ * Washington University.
+ * 4. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#include <rtems/freebsd/sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+/*
+ * IP <=> ATM address resolution.
+ */
+#include <rtems/freebsd/local/opt_inet.h>
+#include <rtems/freebsd/local/opt_inet6.h>
+#include <rtems/freebsd/local/opt_natm.h>
+
+#if defined(INET) || defined(INET6)
+
+#include <rtems/freebsd/sys/param.h>
+#include <rtems/freebsd/sys/systm.h>
+#include <rtems/freebsd/sys/queue.h>
+#include <rtems/freebsd/sys/mbuf.h>
+#include <rtems/freebsd/sys/socket.h>
+#include <rtems/freebsd/sys/sockio.h>
+#include <rtems/freebsd/sys/syslog.h>
+
+#include <rtems/freebsd/net/if.h>
+#include <rtems/freebsd/net/if_dl.h>
+#include <rtems/freebsd/net/route.h>
+#include <rtems/freebsd/net/if_atm.h>
+
+#include <rtems/freebsd/netinet/in.h>
+#include <rtems/freebsd/netinet/if_atm.h>
+
+#ifdef NATM
+#include <rtems/freebsd/netnatm/natm.h>
+#endif
+
+#define SDL(s) ((struct sockaddr_dl *)s)
+
+#define GET3BYTE(V, A, L) do { \
+ (V) = ((A)[0] << 16) | ((A)[1] << 8) | (A)[2]; \
+ (A) += 3; \
+ (L) -= 3; \
+ } while (0)
+
+#define GET2BYTE(V, A, L) do { \
+ (V) = ((A)[0] << 8) | (A)[1]; \
+ (A) += 2; \
+ (L) -= 2; \
+ } while (0)
+
+#define GET1BYTE(V, A, L) do { \
+ (V) = *(A)++; \
+ (L)--; \
+ } while (0)
+
+
+/*
+ * atm_rtrequest: handle ATM rt request (in support of generic code)
+ * inputs: "req" = request code
+ * "rt" = route entry
+ * "info" = rt_addrinfo
+ */
+void
+atm_rtrequest(int req, struct rtentry *rt, struct rt_addrinfo *info)
+{
+ struct sockaddr *gate = rt->rt_gateway;
+ struct atmio_openvcc op;
+ struct atmio_closevcc cl;
+ u_char *addr;
+ u_int alen;
+#ifdef NATM
+ struct sockaddr_in *sin;
+ struct natmpcb *npcb = NULL;
+#endif
+ static struct sockaddr_dl null_sdl = {sizeof(null_sdl), AF_LINK};
+
+ if (rt->rt_flags & RTF_GATEWAY) /* link level requests only */
+ return;
+
+ switch (req) {
+
+ case RTM_RESOLVE: /* resolve: only happens when cloning */
+ printf("atm_rtrequest: RTM_RESOLVE request detected?\n");
+ break;
+
+ case RTM_ADD:
+ /*
+ * route added by a command (e.g. ifconfig, route, arp...).
+ *
+ * first check to see if this is not a host route, in which
+ * case we are being called via "ifconfig" to set the address.
+ */
+ if ((rt->rt_flags & RTF_HOST) == 0) {
+ rt_setgate(rt,rt_key(rt),(struct sockaddr *)&null_sdl);
+ gate = rt->rt_gateway;
+ SDL(gate)->sdl_type = rt->rt_ifp->if_type;
+ SDL(gate)->sdl_index = rt->rt_ifp->if_index;
+ break;
+ }
+
+ if (gate->sa_family != AF_LINK ||
+ gate->sa_len < sizeof(null_sdl)) {
+ log(LOG_DEBUG, "atm_rtrequest: bad gateway value");
+ break;
+ }
+
+ KASSERT(rt->rt_ifp->if_ioctl != NULL,
+ ("atm_rtrequest: null ioctl"));
+
+ /*
+ * Parse and verify the link level address as
+ * an open request
+ */
+#ifdef NATM
+ NATM_LOCK();
+#endif
+ bzero(&op, sizeof(op));
+ addr = LLADDR(SDL(gate));
+ alen = SDL(gate)->sdl_alen;
+ if (alen < 4) {
+ printf("%s: bad link-level address\n", __func__);
+ goto failed;
+ }
+
+ if (alen == 4) {
+ /* old type address */
+ GET1BYTE(op.param.flags, addr, alen);
+ GET1BYTE(op.param.vpi, addr, alen);
+ GET2BYTE(op.param.vci, addr, alen);
+ op.param.traffic = ATMIO_TRAFFIC_UBR;
+ op.param.aal = (op.param.flags & ATM_PH_AAL5) ?
+ ATMIO_AAL_5 : ATMIO_AAL_0;
+ } else {
+ /* new address */
+ op.param.aal = ATMIO_AAL_5;
+
+ GET1BYTE(op.param.flags, addr, alen);
+ op.param.flags &= ATM_PH_LLCSNAP;
+
+ GET1BYTE(op.param.vpi, addr, alen);
+ GET2BYTE(op.param.vci, addr, alen);
+
+ GET1BYTE(op.param.traffic, addr, alen);
+
+ switch (op.param.traffic) {
+
+ case ATMIO_TRAFFIC_UBR:
+ if (alen >= 3)
+ GET3BYTE(op.param.tparam.pcr,
+ addr, alen);
+ break;
+
+ case ATMIO_TRAFFIC_CBR:
+ if (alen < 3)
+ goto bad_param;
+ GET3BYTE(op.param.tparam.pcr, addr, alen);
+ break;
+
+ case ATMIO_TRAFFIC_VBR:
+ if (alen < 3 * 3)
+ goto bad_param;
+ GET3BYTE(op.param.tparam.pcr, addr, alen);
+ GET3BYTE(op.param.tparam.scr, addr, alen);
+ GET3BYTE(op.param.tparam.mbs, addr, alen);
+ break;
+
+ case ATMIO_TRAFFIC_ABR:
+ if (alen < 4 * 3 + 2 + 1 * 2 + 3)
+ goto bad_param;
+ GET3BYTE(op.param.tparam.pcr, addr, alen);
+ GET3BYTE(op.param.tparam.mcr, addr, alen);
+ GET3BYTE(op.param.tparam.icr, addr, alen);
+ GET3BYTE(op.param.tparam.tbe, addr, alen);
+ GET1BYTE(op.param.tparam.nrm, addr, alen);
+ GET1BYTE(op.param.tparam.trm, addr, alen);
+ GET2BYTE(op.param.tparam.adtf, addr, alen);
+ GET1BYTE(op.param.tparam.rif, addr, alen);
+ GET1BYTE(op.param.tparam.rdf, addr, alen);
+ GET1BYTE(op.param.tparam.cdf, addr, alen);
+ break;
+
+ default:
+ bad_param:
+ printf("%s: bad traffic params\n", __func__);
+ goto failed;
+ }
+ }
+ op.param.rmtu = op.param.tmtu = rt->rt_ifp->if_mtu;
+#ifdef NATM
+ /*
+ * let native ATM know we are using this VCI/VPI
+ * (i.e. reserve it)
+ */
+ sin = (struct sockaddr_in *) rt_key(rt);
+ if (sin->sin_family != AF_INET)
+ goto failed;
+ npcb = npcb_add(NULL, rt->rt_ifp, op.param.vci, op.param.vpi);
+ if (npcb == NULL)
+ goto failed;
+ npcb->npcb_flags |= NPCB_IP;
+ npcb->ipaddr.s_addr = sin->sin_addr.s_addr;
+ /* XXX: move npcb to llinfo when ATM ARP is ready */
+ rt->rt_llinfo = (caddr_t) npcb;
+ rt->rt_flags |= RTF_LLINFO;
+#endif
+ /*
+ * let the lower level know this circuit is active
+ */
+ op.rxhand = NULL;
+ op.param.flags |= ATMIO_FLAG_ASYNC;
+ if (rt->rt_ifp->if_ioctl(rt->rt_ifp, SIOCATMOPENVCC,
+ (caddr_t)&op) != 0) {
+ printf("atm: couldn't add VC\n");
+ goto failed;
+ }
+
+ SDL(gate)->sdl_type = rt->rt_ifp->if_type;
+ SDL(gate)->sdl_index = rt->rt_ifp->if_index;
+
+#ifdef NATM
+ NATM_UNLOCK();
+#endif
+ break;
+
+failed:
+#ifdef NATM
+ if (npcb) {
+ npcb_free(npcb, NPCB_DESTROY);
+ rt->rt_llinfo = NULL;
+ rt->rt_flags &= ~RTF_LLINFO;
+ }
+ NATM_UNLOCK();
+#endif
+ /* mark as invalid. We cannot RTM_DELETE the route from
+ * here, because the recursive call to rtrequest1 does
+ * not really work. */
+ rt->rt_flags |= RTF_REJECT;
+ break;
+
+ case RTM_DELETE:
+#ifdef NATM
+ /*
+ * tell native ATM we are done with this VC
+ */
+ if (rt->rt_flags & RTF_LLINFO) {
+ NATM_LOCK();
+ npcb_free((struct natmpcb *)rt->rt_llinfo,
+ NPCB_DESTROY);
+ rt->rt_llinfo = NULL;
+ rt->rt_flags &= ~RTF_LLINFO;
+ NATM_UNLOCK();
+ }
+#endif
+ /*
+ * tell the lower layer to disable this circuit
+ */
+ bzero(&op, sizeof(op));
+ addr = LLADDR(SDL(gate));
+ addr++;
+ cl.vpi = *addr++;
+ cl.vci = *addr++ << 8;
+ cl.vci |= *addr++;
+ (void)rt->rt_ifp->if_ioctl(rt->rt_ifp, SIOCATMCLOSEVCC,
+ (caddr_t)&cl);
+ break;
+ }
+}
+
+/*
+ * atmresolve:
+ * inputs:
+ * [1] "rt" = the link level route to use (or null if need to look one up)
+ * [2] "m" = mbuf containing the data to be sent
+ * [3] "dst" = sockaddr_in (IP) address of dest.
+ * output:
+ * [4] "desten" = ATM pseudo header which we will fill in VPI/VCI info
+ * return:
+ * 0 == resolve FAILED; note that "m" gets m_freem'd in this case
+ * 1 == resolve OK; desten contains result
+ *
+ * XXX: will need more work if we wish to support ATMARP in the kernel,
+ * but this is enough for PVCs entered via the "route" command.
+ */
+int
+atmresolve(struct rtentry *rt, struct mbuf *m, struct sockaddr *dst,
+ struct atm_pseudohdr *desten)
+{
+ struct sockaddr_dl *sdl;
+
+ if (m->m_flags & (M_BCAST | M_MCAST)) {
+ log(LOG_INFO,
+ "atmresolve: BCAST/MCAST packet detected/dumped\n");
+ goto bad;
+ }
+
+ if (rt == NULL) {
+ rt = RTALLOC1(dst, 0); /* link level on table 0 XXX MRT */
+ if (rt == NULL)
+ goto bad; /* failed */
+ RT_REMREF(rt); /* don't keep LL references */
+ if ((rt->rt_flags & RTF_GATEWAY) != 0 ||
+ rt->rt_gateway->sa_family != AF_LINK) {
+ RT_UNLOCK(rt);
+ goto bad;
+ }
+ RT_UNLOCK(rt);
+ }
+
+ /*
+ * note that rt_gateway is a sockaddr_dl which contains the
+ * atm_pseudohdr data structure for this route. we currently
+ * don't need any rt_llinfo info (but will if we want to support
+ * ATM ARP [c.f. if_ether.c]).
+ */
+ sdl = SDL(rt->rt_gateway);
+
+ /*
+ * Check the address family and length is valid, the address
+ * is resolved; otherwise, try to resolve.
+ */
+ if (sdl->sdl_family == AF_LINK && sdl->sdl_alen >= sizeof(*desten)) {
+ bcopy(LLADDR(sdl), desten, sizeof(*desten));
+ return (1); /* ok, go for it! */
+ }
+
+ /*
+ * we got an entry, but it doesn't have valid link address
+ * info in it (it is prob. the interface route, which has
+ * sdl_alen == 0). dump packet. (fall through to "bad").
+ */
+bad:
+ m_freem(m);
+ return (0);
+}
+#endif /* INET */
diff --git a/rtems/freebsd/netinet/if_atm.h b/rtems/freebsd/netinet/if_atm.h
new file mode 100644
index 00000000..bd8b5143
--- /dev/null
+++ b/rtems/freebsd/netinet/if_atm.h
@@ -0,0 +1,47 @@
+/* $FreeBSD$ */
+/* $NetBSD: if_atm.h,v 1.2 1996/07/03 17:17:17 chuck Exp $ */
+
+/*-
+ *
+ * Copyright (c) 1996 Charles D. Cranor and Washington University.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by Charles D. Cranor and
+ * Washington University.
+ * 4. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * if_atm.h
+ */
+
+struct atm_pseudohdr;
+struct mbuf;
+struct rtentry;
+struct sockaddr;
+
+void atm_rtrequest(int, struct rtentry *, struct rt_addrinfo *);
+int atmresolve(struct rtentry *, struct mbuf *, struct sockaddr *,
+ struct atm_pseudohdr *);
diff --git a/rtems/freebsd/netinet/if_ether.c b/rtems/freebsd/netinet/if_ether.c
new file mode 100644
index 00000000..c100bca4
--- /dev/null
+++ b/rtems/freebsd/netinet/if_ether.c
@@ -0,0 +1,859 @@
+#include <rtems/freebsd/machine/rtems-bsd-config.h>
+
+/*-
+ * Copyright (c) 1982, 1986, 1988, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * @(#)if_ether.c 8.1 (Berkeley) 6/10/93
+ */
+
+/*
+ * Ethernet address resolution protocol.
+ * TODO:
+ * add "inuse/lock" bit (or ref. count) along with valid bit
+ */
+
+#include <rtems/freebsd/sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <rtems/freebsd/local/opt_inet.h>
+
+#include <rtems/freebsd/sys/param.h>
+#include <rtems/freebsd/sys/kernel.h>
+#include <rtems/freebsd/sys/queue.h>
+#include <rtems/freebsd/sys/sysctl.h>
+#include <rtems/freebsd/sys/systm.h>
+#include <rtems/freebsd/sys/mbuf.h>
+#include <rtems/freebsd/sys/malloc.h>
+#include <rtems/freebsd/sys/proc.h>
+#include <rtems/freebsd/sys/socket.h>
+#include <rtems/freebsd/sys/syslog.h>
+
+#include <rtems/freebsd/net/if.h>
+#include <rtems/freebsd/net/if_dl.h>
+#include <rtems/freebsd/net/if_types.h>
+#include <rtems/freebsd/net/netisr.h>
+#include <rtems/freebsd/net/if_llc.h>
+#include <rtems/freebsd/net/ethernet.h>
+#include <rtems/freebsd/net/route.h>
+#include <rtems/freebsd/net/vnet.h>
+
+#include <rtems/freebsd/netinet/in.h>
+#include <rtems/freebsd/netinet/in_var.h>
+#include <rtems/freebsd/net/if_llatbl.h>
+#include <rtems/freebsd/netinet/if_ether.h>
+#if defined(INET) || defined(INET6)
+#include <rtems/freebsd/netinet/ip_carp.h>
+#endif
+
+#include <rtems/freebsd/net/if_arc.h>
+#include <rtems/freebsd/net/iso88025.h>
+
+#include <rtems/freebsd/security/mac/mac_framework.h>
+
+#define SIN(s) ((struct sockaddr_in *)s)
+#define SDL(s) ((struct sockaddr_dl *)s)
+
+SYSCTL_DECL(_net_link_ether);
+SYSCTL_NODE(_net_link_ether, PF_INET, inet, CTLFLAG_RW, 0, "");
+SYSCTL_NODE(_net_link_ether, PF_ARP, arp, CTLFLAG_RW, 0, "");
+
+/* timer values */
+static VNET_DEFINE(int, arpt_keep) = (20*60); /* once resolved, good for 20
+ * minutes */
+static VNET_DEFINE(int, arp_maxtries) = 5;
+VNET_DEFINE(int, useloopback) = 1; /* use loopback interface for
+ * local traffic */
+static VNET_DEFINE(int, arp_proxyall) = 0;
+static VNET_DEFINE(int, arpt_down) = 20; /* keep incomplete entries for
+ * 20 seconds */
+static VNET_DEFINE(struct arpstat, arpstat); /* ARP statistics, see if_arp.h */
+
+#define V_arpt_keep VNET(arpt_keep)
+#define V_arpt_down VNET(arpt_down)
+#define V_arp_maxtries VNET(arp_maxtries)
+#define V_arp_proxyall VNET(arp_proxyall)
+#define V_arpstat VNET(arpstat)
+
+SYSCTL_VNET_INT(_net_link_ether_inet, OID_AUTO, max_age, CTLFLAG_RW,
+ &VNET_NAME(arpt_keep), 0,
+ "ARP entry lifetime in seconds");
+SYSCTL_VNET_INT(_net_link_ether_inet, OID_AUTO, maxtries, CTLFLAG_RW,
+ &VNET_NAME(arp_maxtries), 0,
+ "ARP resolution attempts before returning error");
+SYSCTL_VNET_INT(_net_link_ether_inet, OID_AUTO, useloopback, CTLFLAG_RW,
+ &VNET_NAME(useloopback), 0,
+ "Use the loopback interface for local traffic");
+SYSCTL_VNET_INT(_net_link_ether_inet, OID_AUTO, proxyall, CTLFLAG_RW,
+ &VNET_NAME(arp_proxyall), 0,
+ "Enable proxy ARP for all suitable requests");
+SYSCTL_VNET_STRUCT(_net_link_ether_arp, OID_AUTO, stats, CTLFLAG_RW,
+ &VNET_NAME(arpstat), arpstat,
+ "ARP statistics (struct arpstat, net/if_arp.h)");
+
+static void arp_init(void);
+void arprequest(struct ifnet *,
+ struct in_addr *, struct in_addr *, u_char *);
+static void arpintr(struct mbuf *);
+static void arptimer(void *);
+#ifdef INET
+static void in_arpinput(struct mbuf *);
+#endif
+
+static const struct netisr_handler arp_nh = {
+ .nh_name = "arp",
+ .nh_handler = arpintr,
+ .nh_proto = NETISR_ARP,
+ .nh_policy = NETISR_POLICY_SOURCE,
+};
+
+#ifdef AF_INET
+void arp_ifscrub(struct ifnet *ifp, uint32_t addr);
+
+/*
+ * called by in_ifscrub to remove entry from the table when
+ * the interface goes away
+ */
+void
+arp_ifscrub(struct ifnet *ifp, uint32_t addr)
+{
+ struct sockaddr_in addr4;
+
+ bzero((void *)&addr4, sizeof(addr4));
+ addr4.sin_len = sizeof(addr4);
+ addr4.sin_family = AF_INET;
+ addr4.sin_addr.s_addr = addr;
+ IF_AFDATA_LOCK(ifp);
+ lla_lookup(LLTABLE(ifp), (LLE_DELETE | LLE_IFADDR),
+ (struct sockaddr *)&addr4);
+ IF_AFDATA_UNLOCK(ifp);
+}
+#endif
+
+/*
+ * Timeout routine. Age arp_tab entries periodically.
+ */
+static void
+arptimer(void *arg)
+{
+ struct ifnet *ifp;
+ struct llentry *lle;
+
+ KASSERT(arg != NULL, ("%s: arg NULL", __func__));
+ lle = (struct llentry *)arg;
+ ifp = lle->lle_tbl->llt_ifp;
+ CURVNET_SET(ifp->if_vnet);
+ IF_AFDATA_LOCK(ifp);
+ LLE_WLOCK(lle);
+ if (lle->la_flags & LLE_STATIC)
+ LLE_WUNLOCK(lle);
+ else {
+ if (!callout_pending(&lle->la_timer) &&
+ callout_active(&lle->la_timer)) {
+ callout_stop(&lle->la_timer);
+ LLE_REMREF(lle);
+ (void) llentry_free(lle);
+ ARPSTAT_INC(timeouts);
+ }
+#ifdef DIAGNOSTIC
+ else {
+ struct sockaddr *l3addr = L3_ADDR(lle);
+ log(LOG_INFO,
+ "arptimer issue: %p, IPv4 address: \"%s\"\n", lle,
+ inet_ntoa(
+ ((const struct sockaddr_in *)l3addr)->sin_addr));
+ }
+#endif
+ }
+ IF_AFDATA_UNLOCK(ifp);
+ CURVNET_RESTORE();
+}
+
+/*
+ * Broadcast an ARP request. Caller specifies:
+ * - arp header source ip address
+ * - arp header target ip address
+ * - arp header source ethernet address
+ */
+void
+arprequest(struct ifnet *ifp, struct in_addr *sip, struct in_addr *tip,
+ u_char *enaddr)
+{
+ struct mbuf *m;
+ struct arphdr *ah;
+ struct sockaddr sa;
+
+ if (sip == NULL) {
+ /* XXX don't believe this can happen (or explain why) */
+ /*
+ * The caller did not supply a source address, try to find
+ * a compatible one among those assigned to this interface.
+ */
+ struct ifaddr *ifa;
+
+ TAILQ_FOREACH(ifa, &ifp->if_addrhead, ifa_link) {
+ if (!ifa->ifa_addr ||
+ ifa->ifa_addr->sa_family != AF_INET)
+ continue;
+ sip = &SIN(ifa->ifa_addr)->sin_addr;
+ if (0 == ((sip->s_addr ^ tip->s_addr) &
+ SIN(ifa->ifa_netmask)->sin_addr.s_addr) )
+ break; /* found it. */
+ }
+ if (sip == NULL) {
+ printf("%s: cannot find matching address\n", __func__);
+ return;
+ }
+ }
+
+ if ((m = m_gethdr(M_DONTWAIT, MT_DATA)) == NULL)
+ return;
+ m->m_len = sizeof(*ah) + 2*sizeof(struct in_addr) +
+ 2*ifp->if_data.ifi_addrlen;
+ m->m_pkthdr.len = m->m_len;
+ MH_ALIGN(m, m->m_len);
+ ah = mtod(m, struct arphdr *);
+ bzero((caddr_t)ah, m->m_len);
+#ifdef MAC
+ mac_netinet_arp_send(ifp, m);
+#endif
+ ah->ar_pro = htons(ETHERTYPE_IP);
+ ah->ar_hln = ifp->if_addrlen; /* hardware address length */
+ ah->ar_pln = sizeof(struct in_addr); /* protocol address length */
+ ah->ar_op = htons(ARPOP_REQUEST);
+ bcopy((caddr_t)enaddr, (caddr_t)ar_sha(ah), ah->ar_hln);
+ bcopy((caddr_t)sip, (caddr_t)ar_spa(ah), ah->ar_pln);
+ bcopy((caddr_t)tip, (caddr_t)ar_tpa(ah), ah->ar_pln);
+ sa.sa_family = AF_ARP;
+ sa.sa_len = 2;
+ m->m_flags |= M_BCAST;
+ (*ifp->if_output)(ifp, m, &sa, NULL);
+ ARPSTAT_INC(txrequests);
+}
+
+/*
+ * Resolve an IP address into an ethernet address.
+ * On input:
+ * ifp is the interface we use
+ * rt0 is the route to the final destination (possibly useless)
+ * m is the mbuf. May be NULL if we don't have a packet.
+ * dst is the next hop,
+ * desten is where we want the address.
+ *
+ * On success, desten is filled in and the function returns 0;
+ * If the packet must be held pending resolution, we return EWOULDBLOCK
+ * On other errors, we return the corresponding error code.
+ * Note that m_freem() handles NULL.
+ */
+int
+arpresolve(struct ifnet *ifp, struct rtentry *rt0, struct mbuf *m,
+ struct sockaddr *dst, u_char *desten, struct llentry **lle)
+{
+ struct llentry *la = 0;
+ u_int flags = 0;
+ int error, renew;
+
+ *lle = NULL;
+ if (m != NULL) {
+ if (m->m_flags & M_BCAST) {
+ /* broadcast */
+ (void)memcpy(desten,
+ ifp->if_broadcastaddr, ifp->if_addrlen);
+ return (0);
+ }
+ if (m->m_flags & M_MCAST && ifp->if_type != IFT_ARCNET) {
+ /* multicast */
+ ETHER_MAP_IP_MULTICAST(&SIN(dst)->sin_addr, desten);
+ return (0);
+ }
+ }
+ /* XXXXX
+ */
+retry:
+ IF_AFDATA_RLOCK(ifp);
+ la = lla_lookup(LLTABLE(ifp), flags, dst);
+ IF_AFDATA_RUNLOCK(ifp);
+ if ((la == NULL) && ((flags & LLE_EXCLUSIVE) == 0)
+ && ((ifp->if_flags & (IFF_NOARP | IFF_STATICARP)) == 0)) {
+ flags |= (LLE_CREATE | LLE_EXCLUSIVE);
+ IF_AFDATA_WLOCK(ifp);
+ la = lla_lookup(LLTABLE(ifp), flags, dst);
+ IF_AFDATA_WUNLOCK(ifp);
+ }
+ if (la == NULL) {
+ if (flags & LLE_CREATE)
+ log(LOG_DEBUG,
+ "arpresolve: can't allocate llinfo for %s\n",
+ inet_ntoa(SIN(dst)->sin_addr));
+ m_freem(m);
+ return (EINVAL);
+ }
+
+ if ((la->la_flags & LLE_VALID) &&
+ ((la->la_flags & LLE_STATIC) || la->la_expire > time_second)) {
+ bcopy(&la->ll_addr, desten, ifp->if_addrlen);
+ /*
+ * If entry has an expiry time and it is approaching,
+ * see if we need to send an ARP request within this
+ * arpt_down interval.
+ */
+ if (!(la->la_flags & LLE_STATIC) &&
+ time_second + la->la_preempt > la->la_expire) {
+ arprequest(ifp, NULL,
+ &SIN(dst)->sin_addr, IF_LLADDR(ifp));
+
+ la->la_preempt--;
+ }
+
+ *lle = la;
+ error = 0;
+ goto done;
+ }
+
+ if (la->la_flags & LLE_STATIC) { /* should not happen! */
+ log(LOG_DEBUG, "arpresolve: ouch, empty static llinfo for %s\n",
+ inet_ntoa(SIN(dst)->sin_addr));
+ m_freem(m);
+ error = EINVAL;
+ goto done;
+ }
+
+ renew = (la->la_asked == 0 || la->la_expire != time_second);
+ if ((renew || m != NULL) && (flags & LLE_EXCLUSIVE) == 0) {
+ flags |= LLE_EXCLUSIVE;
+ LLE_RUNLOCK(la);
+ goto retry;
+ }
+ /*
+ * There is an arptab entry, but no ethernet address
+ * response yet. Replace the held mbuf with this
+ * latest one.
+ */
+ if (m != NULL) {
+ if (la->la_hold != NULL) {
+ m_freem(la->la_hold);
+ ARPSTAT_INC(dropped);
+ }
+ la->la_hold = m;
+ if (renew == 0 && (flags & LLE_EXCLUSIVE)) {
+ flags &= ~LLE_EXCLUSIVE;
+ LLE_DOWNGRADE(la);
+ }
+
+ }
+ /*
+ * Return EWOULDBLOCK if we have tried less than arp_maxtries. It
+ * will be masked by ether_output(). Return EHOSTDOWN/EHOSTUNREACH
+ * if we have already sent arp_maxtries ARP requests. Retransmit the
+ * ARP request, but not faster than one request per second.
+ */
+ if (la->la_asked < V_arp_maxtries)
+ error = EWOULDBLOCK; /* First request. */
+ else
+ error = rt0 != NULL && (rt0->rt_flags & RTF_GATEWAY) ?
+ EHOSTUNREACH : EHOSTDOWN;
+
+ if (renew) {
+ int canceled;
+
+ LLE_ADDREF(la);
+ la->la_expire = time_second;
+ canceled = callout_reset(&la->la_timer, hz * V_arpt_down,
+ arptimer, la);
+ if (canceled)
+ LLE_REMREF(la);
+ la->la_asked++;
+ LLE_WUNLOCK(la);
+ arprequest(ifp, NULL, &SIN(dst)->sin_addr,
+ IF_LLADDR(ifp));
+ return (error);
+ }
+done:
+ if (flags & LLE_EXCLUSIVE)
+ LLE_WUNLOCK(la);
+ else
+ LLE_RUNLOCK(la);
+ return (error);
+}
+
+/*
+ * Common length and type checks are done here,
+ * then the protocol-specific routine is called.
+ */
+static void
+arpintr(struct mbuf *m)
+{
+ struct arphdr *ar;
+
+ if (m->m_len < sizeof(struct arphdr) &&
+ ((m = m_pullup(m, sizeof(struct arphdr))) == NULL)) {
+ log(LOG_ERR, "arp: runt packet -- m_pullup failed\n");
+ return;
+ }
+ ar = mtod(m, struct arphdr *);
+
+ if (ntohs(ar->ar_hrd) != ARPHRD_ETHER &&
+ ntohs(ar->ar_hrd) != ARPHRD_IEEE802 &&
+ ntohs(ar->ar_hrd) != ARPHRD_ARCNET &&
+ ntohs(ar->ar_hrd) != ARPHRD_IEEE1394) {
+ log(LOG_ERR, "arp: unknown hardware address format (0x%2D)\n",
+ (unsigned char *)&ar->ar_hrd, "");
+ m_freem(m);
+ return;
+ }
+
+ if (m->m_len < arphdr_len(ar)) {
+ if ((m = m_pullup(m, arphdr_len(ar))) == NULL) {
+ log(LOG_ERR, "arp: runt packet\n");
+ m_freem(m);
+ return;
+ }
+ ar = mtod(m, struct arphdr *);
+ }
+
+ ARPSTAT_INC(received);
+ switch (ntohs(ar->ar_pro)) {
+#ifdef INET
+ case ETHERTYPE_IP:
+ in_arpinput(m);
+ return;
+#endif
+ }
+ m_freem(m);
+}
+
+#ifdef INET
+/*
+ * ARP for Internet protocols on 10 Mb/s Ethernet.
+ * Algorithm is that given in RFC 826.
+ * In addition, a sanity check is performed on the sender
+ * protocol address, to catch impersonators.
+ * We no longer handle negotiations for use of trailer protocol:
+ * Formerly, ARP replied for protocol type ETHERTYPE_TRAIL sent
+ * along with IP replies if we wanted trailers sent to us,
+ * and also sent them in response to IP replies.
+ * This allowed either end to announce the desire to receive
+ * trailer packets.
+ * We no longer reply to requests for ETHERTYPE_TRAIL protocol either,
+ * but formerly didn't normally send requests.
+ */
+static int log_arp_wrong_iface = 1;
+static int log_arp_movements = 1;
+static int log_arp_permanent_modify = 1;
+
+SYSCTL_INT(_net_link_ether_inet, OID_AUTO, log_arp_wrong_iface, CTLFLAG_RW,
+ &log_arp_wrong_iface, 0,
+ "log arp packets arriving on the wrong interface");
+SYSCTL_INT(_net_link_ether_inet, OID_AUTO, log_arp_movements, CTLFLAG_RW,
+ &log_arp_movements, 0,
+ "log arp replies from MACs different than the one in the cache");
+SYSCTL_INT(_net_link_ether_inet, OID_AUTO, log_arp_permanent_modify, CTLFLAG_RW,
+ &log_arp_permanent_modify, 0,
+ "log arp replies from MACs different than the one in the permanent arp entry");
+
+
+static void
+in_arpinput(struct mbuf *m)
+{
+ struct arphdr *ah;
+ struct ifnet *ifp = m->m_pkthdr.rcvif;
+ struct llentry *la = NULL;
+ struct rtentry *rt;
+ struct ifaddr *ifa;
+ struct in_ifaddr *ia;
+ struct mbuf *hold;
+ struct sockaddr sa;
+ struct in_addr isaddr, itaddr, myaddr;
+ u_int8_t *enaddr = NULL;
+ int op, flags;
+ int req_len;
+ int bridged = 0, is_bridge = 0;
+ int carp_match = 0;
+ struct sockaddr_in sin;
+ sin.sin_len = sizeof(struct sockaddr_in);
+ sin.sin_family = AF_INET;
+ sin.sin_addr.s_addr = 0;
+
+ if (ifp->if_bridge)
+ bridged = 1;
+ if (ifp->if_type == IFT_BRIDGE)
+ is_bridge = 1;
+
+ req_len = arphdr_len2(ifp->if_addrlen, sizeof(struct in_addr));
+ if (m->m_len < req_len && (m = m_pullup(m, req_len)) == NULL) {
+ log(LOG_ERR, "in_arp: runt packet -- m_pullup failed\n");
+ return;
+ }
+
+ ah = mtod(m, struct arphdr *);
+ op = ntohs(ah->ar_op);
+ (void)memcpy(&isaddr, ar_spa(ah), sizeof (isaddr));
+ (void)memcpy(&itaddr, ar_tpa(ah), sizeof (itaddr));
+
+ if (op == ARPOP_REPLY)
+ ARPSTAT_INC(rxreplies);
+
+ /*
+ * For a bridge, we want to check the address irrespective
+ * of the receive interface. (This will change slightly
+ * when we have clusters of interfaces).
+ * If the interface does not match, but the recieving interface
+ * is part of carp, we call carp_iamatch to see if this is a
+ * request for the virtual host ip.
+ * XXX: This is really ugly!
+ */
+ IN_IFADDR_RLOCK();
+ LIST_FOREACH(ia, INADDR_HASH(itaddr.s_addr), ia_hash) {
+ if (((bridged && ia->ia_ifp->if_bridge != NULL) ||
+ ia->ia_ifp == ifp) &&
+ itaddr.s_addr == ia->ia_addr.sin_addr.s_addr) {
+ ifa_ref(&ia->ia_ifa);
+ IN_IFADDR_RUNLOCK();
+ goto match;
+ }
+ if (ifp->if_carp != NULL &&
+ (*carp_iamatch_p)(ifp, ia, &isaddr, &enaddr) &&
+ itaddr.s_addr == ia->ia_addr.sin_addr.s_addr) {
+ carp_match = 1;
+ ifa_ref(&ia->ia_ifa);
+ IN_IFADDR_RUNLOCK();
+ goto match;
+ }
+ }
+ LIST_FOREACH(ia, INADDR_HASH(isaddr.s_addr), ia_hash)
+ if (((bridged && ia->ia_ifp->if_bridge != NULL) ||
+ ia->ia_ifp == ifp) &&
+ isaddr.s_addr == ia->ia_addr.sin_addr.s_addr) {
+ ifa_ref(&ia->ia_ifa);
+ IN_IFADDR_RUNLOCK();
+ goto match;
+ }
+
+#define BDG_MEMBER_MATCHES_ARP(addr, ifp, ia) \
+ (ia->ia_ifp->if_bridge == ifp->if_softc && \
+ !bcmp(IF_LLADDR(ia->ia_ifp), IF_LLADDR(ifp), ifp->if_addrlen) && \
+ addr == ia->ia_addr.sin_addr.s_addr)
+ /*
+ * Check the case when bridge shares its MAC address with
+ * some of its children, so packets are claimed by bridge
+ * itself (bridge_input() does it first), but they are really
+ * meant to be destined to the bridge member.
+ */
+ if (is_bridge) {
+ LIST_FOREACH(ia, INADDR_HASH(itaddr.s_addr), ia_hash) {
+ if (BDG_MEMBER_MATCHES_ARP(itaddr.s_addr, ifp, ia)) {
+ ifa_ref(&ia->ia_ifa);
+ ifp = ia->ia_ifp;
+ IN_IFADDR_RUNLOCK();
+ goto match;
+ }
+ }
+ }
+#undef BDG_MEMBER_MATCHES_ARP
+ IN_IFADDR_RUNLOCK();
+
+ /*
+ * No match, use the first inet address on the receive interface
+ * as a dummy address for the rest of the function.
+ */
+ IF_ADDR_LOCK(ifp);
+ TAILQ_FOREACH(ifa, &ifp->if_addrhead, ifa_link)
+ if (ifa->ifa_addr->sa_family == AF_INET) {
+ ia = ifatoia(ifa);
+ ifa_ref(ifa);
+ IF_ADDR_UNLOCK(ifp);
+ goto match;
+ }
+ IF_ADDR_UNLOCK(ifp);
+
+ /*
+ * If bridging, fall back to using any inet address.
+ */
+ IN_IFADDR_RLOCK();
+ if (!bridged || (ia = TAILQ_FIRST(&V_in_ifaddrhead)) == NULL) {
+ IN_IFADDR_RUNLOCK();
+ goto drop;
+ }
+ ifa_ref(&ia->ia_ifa);
+ IN_IFADDR_RUNLOCK();
+match:
+ if (!enaddr)
+ enaddr = (u_int8_t *)IF_LLADDR(ifp);
+ myaddr = ia->ia_addr.sin_addr;
+ ifa_free(&ia->ia_ifa);
+ if (!bcmp(ar_sha(ah), enaddr, ifp->if_addrlen))
+ goto drop; /* it's from me, ignore it. */
+ if (!bcmp(ar_sha(ah), ifp->if_broadcastaddr, ifp->if_addrlen)) {
+ log(LOG_ERR,
+ "arp: link address is broadcast for IP address %s!\n",
+ inet_ntoa(isaddr));
+ goto drop;
+ }
+ /*
+ * Warn if another host is using the same IP address, but only if the
+ * IP address isn't 0.0.0.0, which is used for DHCP only, in which
+ * case we suppress the warning to avoid false positive complaints of
+ * potential misconfiguration.
+ */
+ if (!bridged && isaddr.s_addr == myaddr.s_addr && myaddr.s_addr != 0) {
+ log(LOG_ERR,
+ "arp: %*D is using my IP address %s on %s!\n",
+ ifp->if_addrlen, (u_char *)ar_sha(ah), ":",
+ inet_ntoa(isaddr), ifp->if_xname);
+ itaddr = myaddr;
+ ARPSTAT_INC(dupips);
+ goto reply;
+ }
+ if (ifp->if_flags & IFF_STATICARP)
+ goto reply;
+
+ bzero(&sin, sizeof(sin));
+ sin.sin_len = sizeof(struct sockaddr_in);
+ sin.sin_family = AF_INET;
+ sin.sin_addr = isaddr;
+ flags = (itaddr.s_addr == myaddr.s_addr) ? LLE_CREATE : 0;
+ flags |= LLE_EXCLUSIVE;
+ IF_AFDATA_LOCK(ifp);
+ la = lla_lookup(LLTABLE(ifp), flags, (struct sockaddr *)&sin);
+ IF_AFDATA_UNLOCK(ifp);
+ if (la != NULL) {
+ /* the following is not an error when doing bridging */
+ if (!bridged && la->lle_tbl->llt_ifp != ifp && !carp_match) {
+ if (log_arp_wrong_iface)
+ log(LOG_ERR, "arp: %s is on %s "
+ "but got reply from %*D on %s\n",
+ inet_ntoa(isaddr),
+ la->lle_tbl->llt_ifp->if_xname,
+ ifp->if_addrlen, (u_char *)ar_sha(ah), ":",
+ ifp->if_xname);
+ LLE_WUNLOCK(la);
+ goto reply;
+ }
+ if ((la->la_flags & LLE_VALID) &&
+ bcmp(ar_sha(ah), &la->ll_addr, ifp->if_addrlen)) {
+ if (la->la_flags & LLE_STATIC) {
+ LLE_WUNLOCK(la);
+ log(LOG_ERR,
+ "arp: %*D attempts to modify permanent "
+ "entry for %s on %s\n",
+ ifp->if_addrlen, (u_char *)ar_sha(ah), ":",
+ inet_ntoa(isaddr), ifp->if_xname);
+ goto reply;
+ }
+ if (log_arp_movements) {
+ log(LOG_INFO, "arp: %s moved from %*D "
+ "to %*D on %s\n",
+ inet_ntoa(isaddr),
+ ifp->if_addrlen,
+ (u_char *)&la->ll_addr, ":",
+ ifp->if_addrlen, (u_char *)ar_sha(ah), ":",
+ ifp->if_xname);
+ }
+ }
+
+ if (ifp->if_addrlen != ah->ar_hln) {
+ LLE_WUNLOCK(la);
+ log(LOG_WARNING,
+ "arp from %*D: addr len: new %d, i/f %d (ignored)",
+ ifp->if_addrlen, (u_char *) ar_sha(ah), ":",
+ ah->ar_hln, ifp->if_addrlen);
+ goto reply;
+ }
+ (void)memcpy(&la->ll_addr, ar_sha(ah), ifp->if_addrlen);
+ la->la_flags |= LLE_VALID;
+
+ if (!(la->la_flags & LLE_STATIC)) {
+ int canceled;
+
+ LLE_ADDREF(la);
+ la->la_expire = time_second + V_arpt_keep;
+ canceled = callout_reset(&la->la_timer,
+ hz * V_arpt_keep, arptimer, la);
+ if (canceled)
+ LLE_REMREF(la);
+ }
+ la->la_asked = 0;
+ la->la_preempt = V_arp_maxtries;
+ hold = la->la_hold;
+ if (hold != NULL) {
+ la->la_hold = NULL;
+ memcpy(&sa, L3_ADDR(la), sizeof(sa));
+ }
+ LLE_WUNLOCK(la);
+ if (hold != NULL)
+ (*ifp->if_output)(ifp, hold, &sa, NULL);
+ }
+reply:
+ if (op != ARPOP_REQUEST)
+ goto drop;
+ ARPSTAT_INC(rxrequests);
+
+ if (itaddr.s_addr == myaddr.s_addr) {
+ /* Shortcut.. the receiving interface is the target. */
+ (void)memcpy(ar_tha(ah), ar_sha(ah), ah->ar_hln);
+ (void)memcpy(ar_sha(ah), enaddr, ah->ar_hln);
+ } else {
+ struct llentry *lle = NULL;
+
+ sin.sin_addr = itaddr;
+ IF_AFDATA_LOCK(ifp);
+ lle = lla_lookup(LLTABLE(ifp), 0, (struct sockaddr *)&sin);
+ IF_AFDATA_UNLOCK(ifp);
+
+ if ((lle != NULL) && (lle->la_flags & LLE_PUB)) {
+ (void)memcpy(ar_tha(ah), ar_sha(ah), ah->ar_hln);
+ (void)memcpy(ar_sha(ah), &lle->ll_addr, ah->ar_hln);
+ LLE_RUNLOCK(lle);
+ } else {
+
+ if (lle != NULL)
+ LLE_RUNLOCK(lle);
+
+ if (!V_arp_proxyall)
+ goto drop;
+
+ sin.sin_addr = itaddr;
+ /* XXX MRT use table 0 for arp reply */
+ rt = in_rtalloc1((struct sockaddr *)&sin, 0, 0UL, 0);
+ if (!rt)
+ goto drop;
+
+ /*
+ * Don't send proxies for nodes on the same interface
+ * as this one came out of, or we'll get into a fight
+ * over who claims what Ether address.
+ */
+ if (!rt->rt_ifp || rt->rt_ifp == ifp) {
+ RTFREE_LOCKED(rt);
+ goto drop;
+ }
+ RTFREE_LOCKED(rt);
+
+ (void)memcpy(ar_tha(ah), ar_sha(ah), ah->ar_hln);
+ (void)memcpy(ar_sha(ah), enaddr, ah->ar_hln);
+
+ /*
+ * Also check that the node which sent the ARP packet
+ * is on the the interface we expect it to be on. This
+ * avoids ARP chaos if an interface is connected to the
+ * wrong network.
+ */
+ sin.sin_addr = isaddr;
+
+ /* XXX MRT use table 0 for arp checks */
+ rt = in_rtalloc1((struct sockaddr *)&sin, 0, 0UL, 0);
+ if (!rt)
+ goto drop;
+ if (rt->rt_ifp != ifp) {
+ log(LOG_INFO, "arp_proxy: ignoring request"
+ " from %s via %s, expecting %s\n",
+ inet_ntoa(isaddr), ifp->if_xname,
+ rt->rt_ifp->if_xname);
+ RTFREE_LOCKED(rt);
+ goto drop;
+ }
+ RTFREE_LOCKED(rt);
+
+#ifdef DEBUG_PROXY
+ printf("arp: proxying for %s\n",
+ inet_ntoa(itaddr));
+#endif
+ }
+ }
+
+ if (itaddr.s_addr == myaddr.s_addr &&
+ IN_LINKLOCAL(ntohl(itaddr.s_addr))) {
+ /* RFC 3927 link-local IPv4; always reply by broadcast. */
+#ifdef DEBUG_LINKLOCAL
+ printf("arp: sending reply for link-local addr %s\n",
+ inet_ntoa(itaddr));
+#endif
+ m->m_flags |= M_BCAST;
+ m->m_flags &= ~M_MCAST;
+ } else {
+ /* default behaviour; never reply by broadcast. */
+ m->m_flags &= ~(M_BCAST|M_MCAST);
+ }
+ (void)memcpy(ar_tpa(ah), ar_spa(ah), ah->ar_pln);
+ (void)memcpy(ar_spa(ah), &itaddr, ah->ar_pln);
+ ah->ar_op = htons(ARPOP_REPLY);
+ ah->ar_pro = htons(ETHERTYPE_IP); /* let's be sure! */
+ m->m_len = sizeof(*ah) + (2 * ah->ar_pln) + (2 * ah->ar_hln);
+ m->m_pkthdr.len = m->m_len;
+ sa.sa_family = AF_ARP;
+ sa.sa_len = 2;
+ (*ifp->if_output)(ifp, m, &sa, NULL);
+ ARPSTAT_INC(txreplies);
+ return;
+
+drop:
+ m_freem(m);
+}
+#endif
+
+void
+arp_ifinit(struct ifnet *ifp, struct ifaddr *ifa)
+{
+ struct llentry *lle;
+
+ if (ntohl(IA_SIN(ifa)->sin_addr.s_addr) != INADDR_ANY) {
+ arprequest(ifp, &IA_SIN(ifa)->sin_addr,
+ &IA_SIN(ifa)->sin_addr, IF_LLADDR(ifp));
+ /*
+ * interface address is considered static entry
+ * because the output of the arp utility shows
+ * that L2 entry as permanent
+ */
+ IF_AFDATA_LOCK(ifp);
+ lle = lla_lookup(LLTABLE(ifp), (LLE_CREATE | LLE_IFADDR | LLE_STATIC),
+ (struct sockaddr *)IA_SIN(ifa));
+ IF_AFDATA_UNLOCK(ifp);
+ if (lle == NULL)
+ log(LOG_INFO, "arp_ifinit: cannot create arp "
+ "entry for interface address\n");
+ else
+ LLE_RUNLOCK(lle);
+ }
+ ifa->ifa_rtrequest = NULL;
+}
+
+void
+arp_ifinit2(struct ifnet *ifp, struct ifaddr *ifa, u_char *enaddr)
+{
+ if (ntohl(IA_SIN(ifa)->sin_addr.s_addr) != INADDR_ANY)
+ arprequest(ifp, &IA_SIN(ifa)->sin_addr,
+ &IA_SIN(ifa)->sin_addr, enaddr);
+ ifa->ifa_rtrequest = NULL;
+}
+
+static void
+arp_init(void)
+{
+
+ netisr_register(&arp_nh);
+}
+SYSINIT(arp, SI_SUB_PROTO_DOMAIN, SI_ORDER_ANY, arp_init, 0);
diff --git a/rtems/freebsd/netinet/if_ether.h b/rtems/freebsd/netinet/if_ether.h
new file mode 100644
index 00000000..dcdb235c
--- /dev/null
+++ b/rtems/freebsd/netinet/if_ether.h
@@ -0,0 +1,122 @@
+/*-
+ * Copyright (c) 1982, 1986, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * @(#)if_ether.h 8.3 (Berkeley) 5/2/95
+ * $FreeBSD$
+ */
+
+#ifndef _NETINET_IF_ETHER_HH_
+#define _NETINET_IF_ETHER_HH_
+
+#include <rtems/freebsd/net/ethernet.h>
+#include <rtems/freebsd/net/if_arp.h>
+
+/*
+ * Macro to map an IP multicast address to an Ethernet multicast address.
+ * The high-order 25 bits of the Ethernet address are statically assigned,
+ * and the low-order 23 bits are taken from the low end of the IP address.
+ */
+#define ETHER_MAP_IP_MULTICAST(ipaddr, enaddr) \
+ /* struct in_addr *ipaddr; */ \
+ /* u_char enaddr[ETHER_ADDR_LEN]; */ \
+{ \
+ (enaddr)[0] = 0x01; \
+ (enaddr)[1] = 0x00; \
+ (enaddr)[2] = 0x5e; \
+ (enaddr)[3] = ((u_char *)ipaddr)[1] & 0x7f; \
+ (enaddr)[4] = ((u_char *)ipaddr)[2]; \
+ (enaddr)[5] = ((u_char *)ipaddr)[3]; \
+}
+/*
+ * Macro to map an IP6 multicast address to an Ethernet multicast address.
+ * The high-order 16 bits of the Ethernet address are statically assigned,
+ * and the low-order 32 bits are taken from the low end of the IP6 address.
+ */
+#define ETHER_MAP_IPV6_MULTICAST(ip6addr, enaddr) \
+/* struct in6_addr *ip6addr; */ \
+/* u_char enaddr[ETHER_ADDR_LEN]; */ \
+{ \
+ (enaddr)[0] = 0x33; \
+ (enaddr)[1] = 0x33; \
+ (enaddr)[2] = ((u_char *)ip6addr)[12]; \
+ (enaddr)[3] = ((u_char *)ip6addr)[13]; \
+ (enaddr)[4] = ((u_char *)ip6addr)[14]; \
+ (enaddr)[5] = ((u_char *)ip6addr)[15]; \
+}
+
+/*
+ * Ethernet Address Resolution Protocol.
+ *
+ * See RFC 826 for protocol description. Structure below is adapted
+ * to resolving internet addresses. Field names used correspond to
+ * RFC 826.
+ */
+struct ether_arp {
+ struct arphdr ea_hdr; /* fixed-size header */
+ u_char arp_sha[ETHER_ADDR_LEN]; /* sender hardware address */
+ u_char arp_spa[4]; /* sender protocol address */
+ u_char arp_tha[ETHER_ADDR_LEN]; /* target hardware address */
+ u_char arp_tpa[4]; /* target protocol address */
+};
+#define arp_hrd ea_hdr.ar_hrd
+#define arp_pro ea_hdr.ar_pro
+#define arp_hln ea_hdr.ar_hln
+#define arp_pln ea_hdr.ar_pln
+#define arp_op ea_hdr.ar_op
+
+struct sockaddr_inarp {
+ u_char sin_len;
+ u_char sin_family;
+ u_short sin_port;
+ struct in_addr sin_addr;
+ struct in_addr sin_srcaddr;
+ u_short sin_tos;
+ u_short sin_other;
+#define SIN_PROXY 1
+};
+/*
+ * IP and ethernet specific routing flags
+ */
+#define RTF_USETRAILERS RTF_PROTO1 /* use trailers */
+#define RTF_ANNOUNCE RTF_PROTO2 /* announce new arp entry */
+
+#ifdef _KERNEL
+extern u_char ether_ipmulticast_min[ETHER_ADDR_LEN];
+extern u_char ether_ipmulticast_max[ETHER_ADDR_LEN];
+
+struct llentry;
+struct ifaddr;
+
+int arpresolve(struct ifnet *ifp, struct rtentry *rt,
+ struct mbuf *m, struct sockaddr *dst, u_char *desten,
+ struct llentry **lle);
+void arp_ifinit(struct ifnet *, struct ifaddr *);
+void arp_ifinit2(struct ifnet *, struct ifaddr *, u_char *);
+#endif
+
+#endif
diff --git a/rtems/freebsd/netinet/igmp.c b/rtems/freebsd/netinet/igmp.c
new file mode 100644
index 00000000..37f97e45
--- /dev/null
+++ b/rtems/freebsd/netinet/igmp.c
@@ -0,0 +1,3655 @@
+#include <rtems/freebsd/machine/rtems-bsd-config.h>
+
+/*-
+ * Copyright (c) 2007-2009 Bruce Simpson.
+ * Copyright (c) 1988 Stephen Deering.
+ * Copyright (c) 1992, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * This code is derived from software contributed to Berkeley by
+ * Stephen Deering of Stanford University.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * @(#)igmp.c 8.1 (Berkeley) 7/19/93
+ */
+
+/*
+ * Internet Group Management Protocol (IGMP) routines.
+ * [RFC1112, RFC2236, RFC3376]
+ *
+ * Written by Steve Deering, Stanford, May 1988.
+ * Modified by Rosen Sharma, Stanford, Aug 1994.
+ * Modified by Bill Fenner, Xerox PARC, Feb 1995.
+ * Modified to fully comply to IGMPv2 by Bill Fenner, Oct 1995.
+ * Significantly rewritten for IGMPv3, VIMAGE, and SMP by Bruce Simpson.
+ *
+ * MULTICAST Revision: 3.5.1.4
+ */
+
+#include <rtems/freebsd/sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <rtems/freebsd/sys/param.h>
+#include <rtems/freebsd/sys/systm.h>
+#include <rtems/freebsd/sys/module.h>
+#include <rtems/freebsd/sys/malloc.h>
+#include <rtems/freebsd/sys/mbuf.h>
+#include <rtems/freebsd/sys/socket.h>
+#include <rtems/freebsd/sys/protosw.h>
+#include <rtems/freebsd/sys/kernel.h>
+#include <rtems/freebsd/sys/sysctl.h>
+#include <rtems/freebsd/sys/ktr.h>
+#include <rtems/freebsd/sys/condvar.h>
+
+#include <rtems/freebsd/net/if.h>
+#include <rtems/freebsd/net/netisr.h>
+#include <rtems/freebsd/net/vnet.h>
+
+#include <rtems/freebsd/netinet/in.h>
+#include <rtems/freebsd/netinet/in_var.h>
+#include <rtems/freebsd/netinet/in_systm.h>
+#include <rtems/freebsd/netinet/ip.h>
+#include <rtems/freebsd/netinet/ip_var.h>
+#include <rtems/freebsd/netinet/ip_options.h>
+#include <rtems/freebsd/netinet/igmp.h>
+#include <rtems/freebsd/netinet/igmp_var.h>
+
+#include <rtems/freebsd/machine/in_cksum.h>
+
+#include <rtems/freebsd/security/mac/mac_framework.h>
+
+#ifndef KTR_IGMPV3
+#define KTR_IGMPV3 KTR_INET
+#endif
+
+static struct igmp_ifinfo *
+ igi_alloc_locked(struct ifnet *);
+static void igi_delete_locked(const struct ifnet *);
+static void igmp_dispatch_queue(struct ifqueue *, int, const int);
+static void igmp_fasttimo_vnet(void);
+static void igmp_final_leave(struct in_multi *, struct igmp_ifinfo *);
+static int igmp_handle_state_change(struct in_multi *,
+ struct igmp_ifinfo *);
+static int igmp_initial_join(struct in_multi *, struct igmp_ifinfo *);
+static int igmp_input_v1_query(struct ifnet *, const struct ip *,
+ const struct igmp *);
+static int igmp_input_v2_query(struct ifnet *, const struct ip *,
+ const struct igmp *);
+static int igmp_input_v3_query(struct ifnet *, const struct ip *,
+ /*const*/ struct igmpv3 *);
+static int igmp_input_v3_group_query(struct in_multi *,
+ struct igmp_ifinfo *, int, /*const*/ struct igmpv3 *);
+static int igmp_input_v1_report(struct ifnet *, /*const*/ struct ip *,
+ /*const*/ struct igmp *);
+static int igmp_input_v2_report(struct ifnet *, /*const*/ struct ip *,
+ /*const*/ struct igmp *);
+static void igmp_intr(struct mbuf *);
+static int igmp_isgroupreported(const struct in_addr);
+static struct mbuf *
+ igmp_ra_alloc(void);
+#ifdef KTR
+static char * igmp_rec_type_to_str(const int);
+#endif
+static void igmp_set_version(struct igmp_ifinfo *, const int);
+static void igmp_slowtimo_vnet(void);
+static int igmp_v1v2_queue_report(struct in_multi *, const int);
+static void igmp_v1v2_process_group_timer(struct in_multi *, const int);
+static void igmp_v1v2_process_querier_timers(struct igmp_ifinfo *);
+static void igmp_v2_update_group(struct in_multi *, const int);
+static void igmp_v3_cancel_link_timers(struct igmp_ifinfo *);
+static void igmp_v3_dispatch_general_query(struct igmp_ifinfo *);
+static struct mbuf *
+ igmp_v3_encap_report(struct ifnet *, struct mbuf *);
+static int igmp_v3_enqueue_group_record(struct ifqueue *,
+ struct in_multi *, const int, const int, const int);
+static int igmp_v3_enqueue_filter_change(struct ifqueue *,
+ struct in_multi *);
+static void igmp_v3_process_group_timers(struct igmp_ifinfo *,
+ struct ifqueue *, struct ifqueue *, struct in_multi *,
+ const int);
+static int igmp_v3_merge_state_changes(struct in_multi *,
+ struct ifqueue *);
+static void igmp_v3_suppress_group_record(struct in_multi *);
+static int sysctl_igmp_default_version(SYSCTL_HANDLER_ARGS);
+static int sysctl_igmp_gsr(SYSCTL_HANDLER_ARGS);
+static int sysctl_igmp_ifinfo(SYSCTL_HANDLER_ARGS);
+
+static const struct netisr_handler igmp_nh = {
+ .nh_name = "igmp",
+ .nh_handler = igmp_intr,
+ .nh_proto = NETISR_IGMP,
+ .nh_policy = NETISR_POLICY_SOURCE,
+};
+
+/*
+ * System-wide globals.
+ *
+ * Unlocked access to these is OK, except for the global IGMP output
+ * queue. The IGMP subsystem lock ends up being system-wide for the moment,
+ * because all VIMAGEs have to share a global output queue, as netisrs
+ * themselves are not virtualized.
+ *
+ * Locking:
+ * * The permitted lock order is: IN_MULTI_LOCK, IGMP_LOCK, IF_ADDR_LOCK.
+ * Any may be taken independently; if any are held at the same
+ * time, the above lock order must be followed.
+ * * All output is delegated to the netisr.
+ * Now that Giant has been eliminated, the netisr may be inlined.
+ * * IN_MULTI_LOCK covers in_multi.
+ * * IGMP_LOCK covers igmp_ifinfo and any global variables in this file,
+ * including the output queue.
+ * * IF_ADDR_LOCK covers if_multiaddrs, which is used for a variety of
+ * per-link state iterators.
+ * * igmp_ifinfo is valid as long as PF_INET is attached to the interface,
+ * therefore it is not refcounted.
+ * We allow unlocked reads of igmp_ifinfo when accessed via in_multi.
+ *
+ * Reference counting
+ * * IGMP acquires its own reference every time an in_multi is passed to
+ * it and the group is being joined for the first time.
+ * * IGMP releases its reference(s) on in_multi in a deferred way,
+ * because the operations which process the release run as part of
+ * a loop whose control variables are directly affected by the release
+ * (that, and not recursing on the IF_ADDR_LOCK).
+ *
+ * VIMAGE: Each in_multi corresponds to an ifp, and each ifp corresponds
+ * to a vnet in ifp->if_vnet.
+ *
+ * SMPng: XXX We may potentially race operations on ifma_protospec.
+ * The problem is that we currently lack a clean way of taking the
+ * IF_ADDR_LOCK() between the ifnet and in layers w/o recursing,
+ * as anything which modifies ifma needs to be covered by that lock.
+ * So check for ifma_protospec being NULL before proceeding.
+ */
+struct mtx igmp_mtx;
+
+struct mbuf *m_raopt; /* Router Alert option */
+MALLOC_DEFINE(M_IGMP, "igmp", "igmp state");
+
+/*
+ * VIMAGE-wide globals.
+ *
+ * The IGMPv3 timers themselves need to run per-image, however,
+ * protosw timers run globally (see tcp).
+ * An ifnet can only be in one vimage at a time, and the loopback
+ * ifnet, loif, is itself virtualized.
+ * It would otherwise be possible to seriously hose IGMP state,
+ * and create inconsistencies in upstream multicast routing, if you have
+ * multiple VIMAGEs running on the same link joining different multicast
+ * groups, UNLESS the "primary IP address" is different. This is because
+ * IGMP for IPv4 does not force link-local addresses to be used for each
+ * node, unlike MLD for IPv6.
+ * Obviously the IGMPv3 per-interface state has per-vimage granularity
+ * also as a result.
+ *
+ * FUTURE: Stop using IFP_TO_IA/INADDR_ANY, and use source address selection
+ * policy to control the address used by IGMP on the link.
+ */
+static VNET_DEFINE(int, interface_timers_running); /* IGMPv3 general
+ * query response */
+static VNET_DEFINE(int, state_change_timers_running); /* IGMPv3 state-change
+ * retransmit */
+static VNET_DEFINE(int, current_state_timers_running); /* IGMPv1/v2 host
+ * report; IGMPv3 g/sg
+ * query response */
+
+#define V_interface_timers_running VNET(interface_timers_running)
+#define V_state_change_timers_running VNET(state_change_timers_running)
+#define V_current_state_timers_running VNET(current_state_timers_running)
+
+static VNET_DEFINE(LIST_HEAD(, igmp_ifinfo), igi_head);
+static VNET_DEFINE(struct igmpstat, igmpstat) = {
+ .igps_version = IGPS_VERSION_3,
+ .igps_len = sizeof(struct igmpstat),
+};
+static VNET_DEFINE(struct timeval, igmp_gsrdelay) = {10, 0};
+
+#define V_igi_head VNET(igi_head)
+#define V_igmpstat VNET(igmpstat)
+#define V_igmp_gsrdelay VNET(igmp_gsrdelay)
+
+static VNET_DEFINE(int, igmp_recvifkludge) = 1;
+static VNET_DEFINE(int, igmp_sendra) = 1;
+static VNET_DEFINE(int, igmp_sendlocal) = 1;
+static VNET_DEFINE(int, igmp_v1enable) = 1;
+static VNET_DEFINE(int, igmp_v2enable) = 1;
+static VNET_DEFINE(int, igmp_legacysupp);
+static VNET_DEFINE(int, igmp_default_version) = IGMP_VERSION_3;
+
+#define V_igmp_recvifkludge VNET(igmp_recvifkludge)
+#define V_igmp_sendra VNET(igmp_sendra)
+#define V_igmp_sendlocal VNET(igmp_sendlocal)
+#define V_igmp_v1enable VNET(igmp_v1enable)
+#define V_igmp_v2enable VNET(igmp_v2enable)
+#define V_igmp_legacysupp VNET(igmp_legacysupp)
+#define V_igmp_default_version VNET(igmp_default_version)
+
+/*
+ * Virtualized sysctls.
+ */
+SYSCTL_VNET_STRUCT(_net_inet_igmp, IGMPCTL_STATS, stats, CTLFLAG_RW,
+ &VNET_NAME(igmpstat), igmpstat, "");
+SYSCTL_VNET_INT(_net_inet_igmp, OID_AUTO, recvifkludge, CTLFLAG_RW,
+ &VNET_NAME(igmp_recvifkludge), 0,
+ "Rewrite IGMPv1/v2 reports from 0.0.0.0 to contain subnet address");
+SYSCTL_VNET_INT(_net_inet_igmp, OID_AUTO, sendra, CTLFLAG_RW,
+ &VNET_NAME(igmp_sendra), 0,
+ "Send IP Router Alert option in IGMPv2/v3 messages");
+SYSCTL_VNET_INT(_net_inet_igmp, OID_AUTO, sendlocal, CTLFLAG_RW,
+ &VNET_NAME(igmp_sendlocal), 0,
+ "Send IGMP membership reports for 224.0.0.0/24 groups");
+SYSCTL_VNET_INT(_net_inet_igmp, OID_AUTO, v1enable, CTLFLAG_RW,
+ &VNET_NAME(igmp_v1enable), 0,
+ "Enable backwards compatibility with IGMPv1");
+SYSCTL_VNET_INT(_net_inet_igmp, OID_AUTO, v2enable, CTLFLAG_RW,
+ &VNET_NAME(igmp_v2enable), 0,
+ "Enable backwards compatibility with IGMPv2");
+SYSCTL_VNET_INT(_net_inet_igmp, OID_AUTO, legacysupp, CTLFLAG_RW,
+ &VNET_NAME(igmp_legacysupp), 0,
+ "Allow v1/v2 reports to suppress v3 group responses");
+SYSCTL_VNET_PROC(_net_inet_igmp, OID_AUTO, default_version,
+ CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE,
+ &VNET_NAME(igmp_default_version), 0, sysctl_igmp_default_version, "I",
+ "Default version of IGMP to run on each interface");
+SYSCTL_VNET_PROC(_net_inet_igmp, OID_AUTO, gsrdelay,
+ CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE,
+ &VNET_NAME(igmp_gsrdelay.tv_sec), 0, sysctl_igmp_gsr, "I",
+ "Rate limit for IGMPv3 Group-and-Source queries in seconds");
+
+/*
+ * Non-virtualized sysctls.
+ */
+SYSCTL_NODE(_net_inet_igmp, OID_AUTO, ifinfo, CTLFLAG_RD | CTLFLAG_MPSAFE,
+ sysctl_igmp_ifinfo, "Per-interface IGMPv3 state");
+
+static __inline void
+igmp_save_context(struct mbuf *m, struct ifnet *ifp)
+{
+
+#ifdef VIMAGE
+ m->m_pkthdr.header = ifp->if_vnet;
+#endif /* VIMAGE */
+ m->m_pkthdr.flowid = ifp->if_index;
+}
+
+static __inline void
+igmp_scrub_context(struct mbuf *m)
+{
+
+ m->m_pkthdr.header = NULL;
+ m->m_pkthdr.flowid = 0;
+}
+
+#ifdef KTR
+static __inline char *
+inet_ntoa_haddr(in_addr_t haddr)
+{
+ struct in_addr ia;
+
+ ia.s_addr = htonl(haddr);
+ return (inet_ntoa(ia));
+}
+#endif
+
+/*
+ * Restore context from a queued IGMP output chain.
+ * Return saved ifindex.
+ *
+ * VIMAGE: The assertion is there to make sure that we
+ * actually called CURVNET_SET() with what's in the mbuf chain.
+ */
+static __inline uint32_t
+igmp_restore_context(struct mbuf *m)
+{
+
+#ifdef notyet
+#if defined(VIMAGE) && defined(INVARIANTS)
+ KASSERT(curvnet == (m->m_pkthdr.header),
+ ("%s: called when curvnet was not restored", __func__));
+#endif
+#endif
+ return (m->m_pkthdr.flowid);
+}
+
+/*
+ * Retrieve or set default IGMP version.
+ *
+ * VIMAGE: Assume curvnet set by caller.
+ * SMPng: NOTE: Serialized by IGMP lock.
+ */
+static int
+sysctl_igmp_default_version(SYSCTL_HANDLER_ARGS)
+{
+ int error;
+ int new;
+
+ error = sysctl_wire_old_buffer(req, sizeof(int));
+ if (error)
+ return (error);
+
+ IGMP_LOCK();
+
+ new = V_igmp_default_version;
+
+ error = sysctl_handle_int(oidp, &new, 0, req);
+ if (error || !req->newptr)
+ goto out_locked;
+
+ if (new < IGMP_VERSION_1 || new > IGMP_VERSION_3) {
+ error = EINVAL;
+ goto out_locked;
+ }
+
+ CTR2(KTR_IGMPV3, "change igmp_default_version from %d to %d",
+ V_igmp_default_version, new);
+
+ V_igmp_default_version = new;
+
+out_locked:
+ IGMP_UNLOCK();
+ return (error);
+}
+
+/*
+ * Retrieve or set threshold between group-source queries in seconds.
+ *
+ * VIMAGE: Assume curvnet set by caller.
+ * SMPng: NOTE: Serialized by IGMP lock.
+ */
+static int
+sysctl_igmp_gsr(SYSCTL_HANDLER_ARGS)
+{
+ int error;
+ int i;
+
+ error = sysctl_wire_old_buffer(req, sizeof(int));
+ if (error)
+ return (error);
+
+ IGMP_LOCK();
+
+ i = V_igmp_gsrdelay.tv_sec;
+
+ error = sysctl_handle_int(oidp, &i, 0, req);
+ if (error || !req->newptr)
+ goto out_locked;
+
+ if (i < -1 || i >= 60) {
+ error = EINVAL;
+ goto out_locked;
+ }
+
+ CTR2(KTR_IGMPV3, "change igmp_gsrdelay from %d to %d",
+ V_igmp_gsrdelay.tv_sec, i);
+ V_igmp_gsrdelay.tv_sec = i;
+
+out_locked:
+ IGMP_UNLOCK();
+ return (error);
+}
+
+/*
+ * Expose struct igmp_ifinfo to userland, keyed by ifindex.
+ * For use by ifmcstat(8).
+ *
+ * SMPng: NOTE: Does an unlocked ifindex space read.
+ * VIMAGE: Assume curvnet set by caller. The node handler itself
+ * is not directly virtualized.
+ */
+static int
+sysctl_igmp_ifinfo(SYSCTL_HANDLER_ARGS)
+{
+ int *name;
+ int error;
+ u_int namelen;
+ struct ifnet *ifp;
+ struct igmp_ifinfo *igi;
+
+ name = (int *)arg1;
+ namelen = arg2;
+
+ if (req->newptr != NULL)
+ return (EPERM);
+
+ if (namelen != 1)
+ return (EINVAL);
+
+ error = sysctl_wire_old_buffer(req, sizeof(struct igmp_ifinfo));
+ if (error)
+ return (error);
+
+ IN_MULTI_LOCK();
+ IGMP_LOCK();
+
+ if (name[0] <= 0 || name[0] > V_if_index) {
+ error = ENOENT;
+ goto out_locked;
+ }
+
+ error = ENOENT;
+
+ ifp = ifnet_byindex(name[0]);
+ if (ifp == NULL)
+ goto out_locked;
+
+ LIST_FOREACH(igi, &V_igi_head, igi_link) {
+ if (ifp == igi->igi_ifp) {
+ error = SYSCTL_OUT(req, igi,
+ sizeof(struct igmp_ifinfo));
+ break;
+ }
+ }
+
+out_locked:
+ IGMP_UNLOCK();
+ IN_MULTI_UNLOCK();
+ return (error);
+}
+
+/*
+ * Dispatch an entire queue of pending packet chains
+ * using the netisr.
+ * VIMAGE: Assumes the vnet pointer has been set.
+ */
+static void
+igmp_dispatch_queue(struct ifqueue *ifq, int limit, const int loop)
+{
+ struct mbuf *m;
+
+ for (;;) {
+ _IF_DEQUEUE(ifq, m);
+ if (m == NULL)
+ break;
+ CTR3(KTR_IGMPV3, "%s: dispatch %p from %p", __func__, ifq, m);
+ if (loop)
+ m->m_flags |= M_IGMP_LOOP;
+ netisr_dispatch(NETISR_IGMP, m);
+ if (--limit == 0)
+ break;
+ }
+}
+
+/*
+ * Filter outgoing IGMP report state by group.
+ *
+ * Reports are ALWAYS suppressed for ALL-HOSTS (224.0.0.1).
+ * If the net.inet.igmp.sendlocal sysctl is 0, then IGMP reports are
+ * disabled for all groups in the 224.0.0.0/24 link-local scope. However,
+ * this may break certain IGMP snooping switches which rely on the old
+ * report behaviour.
+ *
+ * Return zero if the given group is one for which IGMP reports
+ * should be suppressed, or non-zero if reports should be issued.
+ */
+static __inline int
+igmp_isgroupreported(const struct in_addr addr)
+{
+
+ if (in_allhosts(addr) ||
+ ((!V_igmp_sendlocal && IN_LOCAL_GROUP(ntohl(addr.s_addr)))))
+ return (0);
+
+ return (1);
+}
+
+/*
+ * Construct a Router Alert option to use in outgoing packets.
+ */
+static struct mbuf *
+igmp_ra_alloc(void)
+{
+ struct mbuf *m;
+ struct ipoption *p;
+
+ MGET(m, M_DONTWAIT, MT_DATA);
+ p = mtod(m, struct ipoption *);
+ p->ipopt_dst.s_addr = INADDR_ANY;
+ p->ipopt_list[0] = IPOPT_RA; /* Router Alert Option */
+ p->ipopt_list[1] = 0x04; /* 4 bytes long */
+ p->ipopt_list[2] = IPOPT_EOL; /* End of IP option list */
+ p->ipopt_list[3] = 0x00; /* pad byte */
+ m->m_len = sizeof(p->ipopt_dst) + p->ipopt_list[1];
+
+ return (m);
+}
+
+/*
+ * Attach IGMP when PF_INET is attached to an interface.
+ */
+struct igmp_ifinfo *
+igmp_domifattach(struct ifnet *ifp)
+{
+ struct igmp_ifinfo *igi;
+
+ CTR3(KTR_IGMPV3, "%s: called for ifp %p(%s)",
+ __func__, ifp, ifp->if_xname);
+
+ IGMP_LOCK();
+
+ igi = igi_alloc_locked(ifp);
+ if (!(ifp->if_flags & IFF_MULTICAST))
+ igi->igi_flags |= IGIF_SILENT;
+
+ IGMP_UNLOCK();
+
+ return (igi);
+}
+
+/*
+ * VIMAGE: assume curvnet set by caller.
+ */
+static struct igmp_ifinfo *
+igi_alloc_locked(/*const*/ struct ifnet *ifp)
+{
+ struct igmp_ifinfo *igi;
+
+ IGMP_LOCK_ASSERT();
+
+ igi = malloc(sizeof(struct igmp_ifinfo), M_IGMP, M_NOWAIT|M_ZERO);
+ if (igi == NULL)
+ goto out;
+
+ igi->igi_ifp = ifp;
+ igi->igi_version = V_igmp_default_version;
+ igi->igi_flags = 0;
+ igi->igi_rv = IGMP_RV_INIT;
+ igi->igi_qi = IGMP_QI_INIT;
+ igi->igi_qri = IGMP_QRI_INIT;
+ igi->igi_uri = IGMP_URI_INIT;
+
+ SLIST_INIT(&igi->igi_relinmhead);
+
+ /*
+ * Responses to general queries are subject to bounds.
+ */
+ IFQ_SET_MAXLEN(&igi->igi_gq, IGMP_MAX_RESPONSE_PACKETS);
+
+ LIST_INSERT_HEAD(&V_igi_head, igi, igi_link);
+
+ CTR2(KTR_IGMPV3, "allocate igmp_ifinfo for ifp %p(%s)",
+ ifp, ifp->if_xname);
+
+out:
+ return (igi);
+}
+
+/*
+ * Hook for ifdetach.
+ *
+ * NOTE: Some finalization tasks need to run before the protocol domain
+ * is detached, but also before the link layer does its cleanup.
+ *
+ * SMPNG: igmp_ifdetach() needs to take IF_ADDR_LOCK().
+ * XXX This is also bitten by unlocked ifma_protospec access.
+ */
+void
+igmp_ifdetach(struct ifnet *ifp)
+{
+ struct igmp_ifinfo *igi;
+ struct ifmultiaddr *ifma;
+ struct in_multi *inm, *tinm;
+
+ CTR3(KTR_IGMPV3, "%s: called for ifp %p(%s)", __func__, ifp,
+ ifp->if_xname);
+
+ IGMP_LOCK();
+
+ igi = ((struct in_ifinfo *)ifp->if_afdata[AF_INET])->ii_igmp;
+ if (igi->igi_version == IGMP_VERSION_3) {
+ IF_ADDR_LOCK(ifp);
+ TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
+ if (ifma->ifma_addr->sa_family != AF_INET ||
+ ifma->ifma_protospec == NULL)
+ continue;
+#if 0
+ KASSERT(ifma->ifma_protospec != NULL,
+ ("%s: ifma_protospec is NULL", __func__));
+#endif
+ inm = (struct in_multi *)ifma->ifma_protospec;
+ if (inm->inm_state == IGMP_LEAVING_MEMBER) {
+ SLIST_INSERT_HEAD(&igi->igi_relinmhead,
+ inm, inm_nrele);
+ }
+ inm_clear_recorded(inm);
+ }
+ IF_ADDR_UNLOCK(ifp);
+ /*
+ * Free the in_multi reference(s) for this IGMP lifecycle.
+ */
+ SLIST_FOREACH_SAFE(inm, &igi->igi_relinmhead, inm_nrele,
+ tinm) {
+ SLIST_REMOVE_HEAD(&igi->igi_relinmhead, inm_nrele);
+ inm_release_locked(inm);
+ }
+ }
+
+ IGMP_UNLOCK();
+}
+
+/*
+ * Hook for domifdetach.
+ */
+void
+igmp_domifdetach(struct ifnet *ifp)
+{
+ struct igmp_ifinfo *igi;
+
+ CTR3(KTR_IGMPV3, "%s: called for ifp %p(%s)",
+ __func__, ifp, ifp->if_xname);
+
+ IGMP_LOCK();
+
+ igi = ((struct in_ifinfo *)ifp->if_afdata[AF_INET])->ii_igmp;
+ igi_delete_locked(ifp);
+
+ IGMP_UNLOCK();
+}
+
+static void
+igi_delete_locked(const struct ifnet *ifp)
+{
+ struct igmp_ifinfo *igi, *tigi;
+
+ CTR3(KTR_IGMPV3, "%s: freeing igmp_ifinfo for ifp %p(%s)",
+ __func__, ifp, ifp->if_xname);
+
+ IGMP_LOCK_ASSERT();
+
+ LIST_FOREACH_SAFE(igi, &V_igi_head, igi_link, tigi) {
+ if (igi->igi_ifp == ifp) {
+ /*
+ * Free deferred General Query responses.
+ */
+ _IF_DRAIN(&igi->igi_gq);
+
+ LIST_REMOVE(igi, igi_link);
+
+ KASSERT(SLIST_EMPTY(&igi->igi_relinmhead),
+ ("%s: there are dangling in_multi references",
+ __func__));
+
+ free(igi, M_IGMP);
+ return;
+ }
+ }
+
+#ifdef INVARIANTS
+ panic("%s: igmp_ifinfo not found for ifp %p\n", __func__, ifp);
+#endif
+}
+
+/*
+ * Process a received IGMPv1 query.
+ * Return non-zero if the message should be dropped.
+ *
+ * VIMAGE: The curvnet pointer is derived from the input ifp.
+ */
+static int
+igmp_input_v1_query(struct ifnet *ifp, const struct ip *ip,
+ const struct igmp *igmp)
+{
+ struct ifmultiaddr *ifma;
+ struct igmp_ifinfo *igi;
+ struct in_multi *inm;
+
+ /*
+ * IGMPv1 Host Mmembership Queries SHOULD always be addressed to
+ * 224.0.0.1. They are always treated as General Queries.
+ * igmp_group is always ignored. Do not drop it as a userland
+ * daemon may wish to see it.
+ * XXX SMPng: unlocked increments in igmpstat assumed atomic.
+ */
+ if (!in_allhosts(ip->ip_dst) || !in_nullhost(igmp->igmp_group)) {
+ IGMPSTAT_INC(igps_rcv_badqueries);
+ return (0);
+ }
+ IGMPSTAT_INC(igps_rcv_gen_queries);
+
+ IN_MULTI_LOCK();
+ IGMP_LOCK();
+
+ igi = ((struct in_ifinfo *)ifp->if_afdata[AF_INET])->ii_igmp;
+ KASSERT(igi != NULL, ("%s: no igmp_ifinfo for ifp %p", __func__, ifp));
+
+ if (igi->igi_flags & IGIF_LOOPBACK) {
+ CTR2(KTR_IGMPV3, "ignore v1 query on IGIF_LOOPBACK ifp %p(%s)",
+ ifp, ifp->if_xname);
+ goto out_locked;
+ }
+
+ /*
+ * Switch to IGMPv1 host compatibility mode.
+ */
+ igmp_set_version(igi, IGMP_VERSION_1);
+
+ CTR2(KTR_IGMPV3, "process v1 query on ifp %p(%s)", ifp, ifp->if_xname);
+
+ /*
+ * Start the timers in all of our group records
+ * for the interface on which the query arrived,
+ * except those which are already running.
+ */
+ IF_ADDR_LOCK(ifp);
+ TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
+ if (ifma->ifma_addr->sa_family != AF_INET ||
+ ifma->ifma_protospec == NULL)
+ continue;
+ inm = (struct in_multi *)ifma->ifma_protospec;
+ if (inm->inm_timer != 0)
+ continue;
+ switch (inm->inm_state) {
+ case IGMP_NOT_MEMBER:
+ case IGMP_SILENT_MEMBER:
+ break;
+ case IGMP_G_QUERY_PENDING_MEMBER:
+ case IGMP_SG_QUERY_PENDING_MEMBER:
+ case IGMP_REPORTING_MEMBER:
+ case IGMP_IDLE_MEMBER:
+ case IGMP_LAZY_MEMBER:
+ case IGMP_SLEEPING_MEMBER:
+ case IGMP_AWAKENING_MEMBER:
+ inm->inm_state = IGMP_REPORTING_MEMBER;
+ inm->inm_timer = IGMP_RANDOM_DELAY(
+ IGMP_V1V2_MAX_RI * PR_FASTHZ);
+ V_current_state_timers_running = 1;
+ break;
+ case IGMP_LEAVING_MEMBER:
+ break;
+ }
+ }
+ IF_ADDR_UNLOCK(ifp);
+
+out_locked:
+ IGMP_UNLOCK();
+ IN_MULTI_UNLOCK();
+
+ return (0);
+}
+
+/*
+ * Process a received IGMPv2 general or group-specific query.
+ */
+static int
+igmp_input_v2_query(struct ifnet *ifp, const struct ip *ip,
+ const struct igmp *igmp)
+{
+ struct ifmultiaddr *ifma;
+ struct igmp_ifinfo *igi;
+ struct in_multi *inm;
+ int is_general_query;
+ uint16_t timer;
+
+ is_general_query = 0;
+
+ /*
+ * Validate address fields upfront.
+ * XXX SMPng: unlocked increments in igmpstat assumed atomic.
+ */
+ if (in_nullhost(igmp->igmp_group)) {
+ /*
+ * IGMPv2 General Query.
+ * If this was not sent to the all-hosts group, ignore it.
+ */
+ if (!in_allhosts(ip->ip_dst))
+ return (0);
+ IGMPSTAT_INC(igps_rcv_gen_queries);
+ is_general_query = 1;
+ } else {
+ /* IGMPv2 Group-Specific Query. */
+ IGMPSTAT_INC(igps_rcv_group_queries);
+ }
+
+ IN_MULTI_LOCK();
+ IGMP_LOCK();
+
+ igi = ((struct in_ifinfo *)ifp->if_afdata[AF_INET])->ii_igmp;
+ KASSERT(igi != NULL, ("%s: no igmp_ifinfo for ifp %p", __func__, ifp));
+
+ if (igi->igi_flags & IGIF_LOOPBACK) {
+ CTR2(KTR_IGMPV3, "ignore v2 query on IGIF_LOOPBACK ifp %p(%s)",
+ ifp, ifp->if_xname);
+ goto out_locked;
+ }
+
+ /*
+ * Ignore v2 query if in v1 Compatibility Mode.
+ */
+ if (igi->igi_version == IGMP_VERSION_1)
+ goto out_locked;
+
+ igmp_set_version(igi, IGMP_VERSION_2);
+
+ timer = igmp->igmp_code * PR_FASTHZ / IGMP_TIMER_SCALE;
+ if (timer == 0)
+ timer = 1;
+
+ if (is_general_query) {
+ /*
+ * For each reporting group joined on this
+ * interface, kick the report timer.
+ */
+ CTR2(KTR_IGMPV3, "process v2 general query on ifp %p(%s)",
+ ifp, ifp->if_xname);
+ IF_ADDR_LOCK(ifp);
+ TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
+ if (ifma->ifma_addr->sa_family != AF_INET ||
+ ifma->ifma_protospec == NULL)
+ continue;
+ inm = (struct in_multi *)ifma->ifma_protospec;
+ igmp_v2_update_group(inm, timer);
+ }
+ IF_ADDR_UNLOCK(ifp);
+ } else {
+ /*
+ * Group-specific IGMPv2 query, we need only
+ * look up the single group to process it.
+ */
+ inm = inm_lookup(ifp, igmp->igmp_group);
+ if (inm != NULL) {
+ CTR3(KTR_IGMPV3, "process v2 query %s on ifp %p(%s)",
+ inet_ntoa(igmp->igmp_group), ifp, ifp->if_xname);
+ igmp_v2_update_group(inm, timer);
+ }
+ }
+
+out_locked:
+ IGMP_UNLOCK();
+ IN_MULTI_UNLOCK();
+
+ return (0);
+}
+
+/*
+ * Update the report timer on a group in response to an IGMPv2 query.
+ *
+ * If we are becoming the reporting member for this group, start the timer.
+ * If we already are the reporting member for this group, and timer is
+ * below the threshold, reset it.
+ *
+ * We may be updating the group for the first time since we switched
+ * to IGMPv3. If we are, then we must clear any recorded source lists,
+ * and transition to REPORTING state; the group timer is overloaded
+ * for group and group-source query responses.
+ *
+ * Unlike IGMPv3, the delay per group should be jittered
+ * to avoid bursts of IGMPv2 reports.
+ */
+static void
+igmp_v2_update_group(struct in_multi *inm, const int timer)
+{
+
+ CTR4(KTR_IGMPV3, "%s: %s/%s timer=%d", __func__,
+ inet_ntoa(inm->inm_addr), inm->inm_ifp->if_xname, timer);
+
+ IN_MULTI_LOCK_ASSERT();
+
+ switch (inm->inm_state) {
+ case IGMP_NOT_MEMBER:
+ case IGMP_SILENT_MEMBER:
+ break;
+ case IGMP_REPORTING_MEMBER:
+ if (inm->inm_timer != 0 &&
+ inm->inm_timer <= timer) {
+ CTR1(KTR_IGMPV3, "%s: REPORTING and timer running, "
+ "skipping.", __func__);
+ break;
+ }
+ /* FALLTHROUGH */
+ case IGMP_SG_QUERY_PENDING_MEMBER:
+ case IGMP_G_QUERY_PENDING_MEMBER:
+ case IGMP_IDLE_MEMBER:
+ case IGMP_LAZY_MEMBER:
+ case IGMP_AWAKENING_MEMBER:
+ CTR1(KTR_IGMPV3, "%s: ->REPORTING", __func__);
+ inm->inm_state = IGMP_REPORTING_MEMBER;
+ inm->inm_timer = IGMP_RANDOM_DELAY(timer);
+ V_current_state_timers_running = 1;
+ break;
+ case IGMP_SLEEPING_MEMBER:
+ CTR1(KTR_IGMPV3, "%s: ->AWAKENING", __func__);
+ inm->inm_state = IGMP_AWAKENING_MEMBER;
+ break;
+ case IGMP_LEAVING_MEMBER:
+ break;
+ }
+}
+
+/*
+ * Process a received IGMPv3 general, group-specific or
+ * group-and-source-specific query.
+ * Assumes m has already been pulled up to the full IGMP message length.
+ * Return 0 if successful, otherwise an appropriate error code is returned.
+ */
+static int
+igmp_input_v3_query(struct ifnet *ifp, const struct ip *ip,
+ /*const*/ struct igmpv3 *igmpv3)
+{
+ struct igmp_ifinfo *igi;
+ struct in_multi *inm;
+ int is_general_query;
+ uint32_t maxresp, nsrc, qqi;
+ uint16_t timer;
+ uint8_t qrv;
+
+ is_general_query = 0;
+
+ CTR2(KTR_IGMPV3, "process v3 query on ifp %p(%s)", ifp, ifp->if_xname);
+
+ maxresp = igmpv3->igmp_code; /* in 1/10ths of a second */
+ if (maxresp >= 128) {
+ maxresp = IGMP_MANT(igmpv3->igmp_code) <<
+ (IGMP_EXP(igmpv3->igmp_code) + 3);
+ }
+
+ /*
+ * Robustness must never be less than 2 for on-wire IGMPv3.
+ * FUTURE: Check if ifp has IGIF_LOOPBACK set, as we will make
+ * an exception for interfaces whose IGMPv3 state changes
+ * are redirected to loopback (e.g. MANET).
+ */
+ qrv = IGMP_QRV(igmpv3->igmp_misc);
+ if (qrv < 2) {
+ CTR3(KTR_IGMPV3, "%s: clamping qrv %d to %d", __func__,
+ qrv, IGMP_RV_INIT);
+ qrv = IGMP_RV_INIT;
+ }
+
+ qqi = igmpv3->igmp_qqi;
+ if (qqi >= 128) {
+ qqi = IGMP_MANT(igmpv3->igmp_qqi) <<
+ (IGMP_EXP(igmpv3->igmp_qqi) + 3);
+ }
+
+ timer = maxresp * PR_FASTHZ / IGMP_TIMER_SCALE;
+ if (timer == 0)
+ timer = 1;
+
+ nsrc = ntohs(igmpv3->igmp_numsrc);
+
+ /*
+ * Validate address fields and versions upfront before
+ * accepting v3 query.
+ * XXX SMPng: Unlocked access to igmpstat counters here.
+ */
+ if (in_nullhost(igmpv3->igmp_group)) {
+ /*
+ * IGMPv3 General Query.
+ *
+ * General Queries SHOULD be directed to 224.0.0.1.
+ * A general query with a source list has undefined
+ * behaviour; discard it.
+ */
+ IGMPSTAT_INC(igps_rcv_gen_queries);
+ if (!in_allhosts(ip->ip_dst) || nsrc > 0) {
+ IGMPSTAT_INC(igps_rcv_badqueries);
+ return (0);
+ }
+ is_general_query = 1;
+ } else {
+ /* Group or group-source specific query. */
+ if (nsrc == 0)
+ IGMPSTAT_INC(igps_rcv_group_queries);
+ else
+ IGMPSTAT_INC(igps_rcv_gsr_queries);
+ }
+
+ IN_MULTI_LOCK();
+ IGMP_LOCK();
+
+ igi = ((struct in_ifinfo *)ifp->if_afdata[AF_INET])->ii_igmp;
+ KASSERT(igi != NULL, ("%s: no igmp_ifinfo for ifp %p", __func__, ifp));
+
+ if (igi->igi_flags & IGIF_LOOPBACK) {
+ CTR2(KTR_IGMPV3, "ignore v3 query on IGIF_LOOPBACK ifp %p(%s)",
+ ifp, ifp->if_xname);
+ goto out_locked;
+ }
+
+ /*
+ * Discard the v3 query if we're in Compatibility Mode.
+ * The RFC is not obviously worded that hosts need to stay in
+ * compatibility mode until the Old Version Querier Present
+ * timer expires.
+ */
+ if (igi->igi_version != IGMP_VERSION_3) {
+ CTR3(KTR_IGMPV3, "ignore v3 query in v%d mode on ifp %p(%s)",
+ igi->igi_version, ifp, ifp->if_xname);
+ goto out_locked;
+ }
+
+ igmp_set_version(igi, IGMP_VERSION_3);
+ igi->igi_rv = qrv;
+ igi->igi_qi = qqi;
+ igi->igi_qri = maxresp;
+
+ CTR4(KTR_IGMPV3, "%s: qrv %d qi %d qri %d", __func__, qrv, qqi,
+ maxresp);
+
+ if (is_general_query) {
+ /*
+ * Schedule a current-state report on this ifp for
+ * all groups, possibly containing source lists.
+ * If there is a pending General Query response
+ * scheduled earlier than the selected delay, do
+ * not schedule any other reports.
+ * Otherwise, reset the interface timer.
+ */
+ CTR2(KTR_IGMPV3, "process v3 general query on ifp %p(%s)",
+ ifp, ifp->if_xname);
+ if (igi->igi_v3_timer == 0 || igi->igi_v3_timer >= timer) {
+ igi->igi_v3_timer = IGMP_RANDOM_DELAY(timer);
+ V_interface_timers_running = 1;
+ }
+ } else {
+ /*
+ * Group-source-specific queries are throttled on
+ * a per-group basis to defeat denial-of-service attempts.
+ * Queries for groups we are not a member of on this
+ * link are simply ignored.
+ */
+ inm = inm_lookup(ifp, igmpv3->igmp_group);
+ if (inm == NULL)
+ goto out_locked;
+ if (nsrc > 0) {
+ if (!ratecheck(&inm->inm_lastgsrtv,
+ &V_igmp_gsrdelay)) {
+ CTR1(KTR_IGMPV3, "%s: GS query throttled.",
+ __func__);
+ IGMPSTAT_INC(igps_drop_gsr_queries);
+ goto out_locked;
+ }
+ }
+ CTR3(KTR_IGMPV3, "process v3 %s query on ifp %p(%s)",
+ inet_ntoa(igmpv3->igmp_group), ifp, ifp->if_xname);
+ /*
+ * If there is a pending General Query response
+ * scheduled sooner than the selected delay, no
+ * further report need be scheduled.
+ * Otherwise, prepare to respond to the
+ * group-specific or group-and-source query.
+ */
+ if (igi->igi_v3_timer == 0 || igi->igi_v3_timer >= timer)
+ igmp_input_v3_group_query(inm, igi, timer, igmpv3);
+ }
+
+out_locked:
+ IGMP_UNLOCK();
+ IN_MULTI_UNLOCK();
+
+ return (0);
+}
+
+/*
+ * Process a recieved IGMPv3 group-specific or group-and-source-specific
+ * query.
+ * Return <0 if any error occured. Currently this is ignored.
+ */
+static int
+igmp_input_v3_group_query(struct in_multi *inm, struct igmp_ifinfo *igi,
+ int timer, /*const*/ struct igmpv3 *igmpv3)
+{
+ int retval;
+ uint16_t nsrc;
+
+ IN_MULTI_LOCK_ASSERT();
+ IGMP_LOCK_ASSERT();
+
+ retval = 0;
+
+ switch (inm->inm_state) {
+ case IGMP_NOT_MEMBER:
+ case IGMP_SILENT_MEMBER:
+ case IGMP_SLEEPING_MEMBER:
+ case IGMP_LAZY_MEMBER:
+ case IGMP_AWAKENING_MEMBER:
+ case IGMP_IDLE_MEMBER:
+ case IGMP_LEAVING_MEMBER:
+ return (retval);
+ break;
+ case IGMP_REPORTING_MEMBER:
+ case IGMP_G_QUERY_PENDING_MEMBER:
+ case IGMP_SG_QUERY_PENDING_MEMBER:
+ break;
+ }
+
+ nsrc = ntohs(igmpv3->igmp_numsrc);
+
+ /*
+ * Deal with group-specific queries upfront.
+ * If any group query is already pending, purge any recorded
+ * source-list state if it exists, and schedule a query response
+ * for this group-specific query.
+ */
+ if (nsrc == 0) {
+ if (inm->inm_state == IGMP_G_QUERY_PENDING_MEMBER ||
+ inm->inm_state == IGMP_SG_QUERY_PENDING_MEMBER) {
+ inm_clear_recorded(inm);
+ timer = min(inm->inm_timer, timer);
+ }
+ inm->inm_state = IGMP_G_QUERY_PENDING_MEMBER;
+ inm->inm_timer = IGMP_RANDOM_DELAY(timer);
+ V_current_state_timers_running = 1;
+ return (retval);
+ }
+
+ /*
+ * Deal with the case where a group-and-source-specific query has
+ * been received but a group-specific query is already pending.
+ */
+ if (inm->inm_state == IGMP_G_QUERY_PENDING_MEMBER) {
+ timer = min(inm->inm_timer, timer);
+ inm->inm_timer = IGMP_RANDOM_DELAY(timer);
+ V_current_state_timers_running = 1;
+ return (retval);
+ }
+
+ /*
+ * Finally, deal with the case where a group-and-source-specific
+ * query has been received, where a response to a previous g-s-r
+ * query exists, or none exists.
+ * In this case, we need to parse the source-list which the Querier
+ * has provided us with and check if we have any source list filter
+ * entries at T1 for these sources. If we do not, there is no need
+ * schedule a report and the query may be dropped.
+ * If we do, we must record them and schedule a current-state
+ * report for those sources.
+ * FIXME: Handling source lists larger than 1 mbuf requires that
+ * we pass the mbuf chain pointer down to this function, and use
+ * m_getptr() to walk the chain.
+ */
+ if (inm->inm_nsrc > 0) {
+ const struct in_addr *ap;
+ int i, nrecorded;
+
+ ap = (const struct in_addr *)(igmpv3 + 1);
+ nrecorded = 0;
+ for (i = 0; i < nsrc; i++, ap++) {
+ retval = inm_record_source(inm, ap->s_addr);
+ if (retval < 0)
+ break;
+ nrecorded += retval;
+ }
+ if (nrecorded > 0) {
+ CTR1(KTR_IGMPV3,
+ "%s: schedule response to SG query", __func__);
+ inm->inm_state = IGMP_SG_QUERY_PENDING_MEMBER;
+ inm->inm_timer = IGMP_RANDOM_DELAY(timer);
+ V_current_state_timers_running = 1;
+ }
+ }
+
+ return (retval);
+}
+
+/*
+ * Process a received IGMPv1 host membership report.
+ *
+ * NOTE: 0.0.0.0 workaround breaks const correctness.
+ */
+static int
+igmp_input_v1_report(struct ifnet *ifp, /*const*/ struct ip *ip,
+ /*const*/ struct igmp *igmp)
+{
+ struct in_ifaddr *ia;
+ struct in_multi *inm;
+
+ IGMPSTAT_INC(igps_rcv_reports);
+
+ if (ifp->if_flags & IFF_LOOPBACK)
+ return (0);
+
+ if (!IN_MULTICAST(ntohl(igmp->igmp_group.s_addr)) ||
+ !in_hosteq(igmp->igmp_group, ip->ip_dst)) {
+ IGMPSTAT_INC(igps_rcv_badreports);
+ return (EINVAL);
+ }
+
+ /*
+ * RFC 3376, Section 4.2.13, 9.2, 9.3:
+ * Booting clients may use the source address 0.0.0.0. Some
+ * IGMP daemons may not know how to use IP_RECVIF to determine
+ * the interface upon which this message was received.
+ * Replace 0.0.0.0 with the subnet address if told to do so.
+ */
+ if (V_igmp_recvifkludge && in_nullhost(ip->ip_src)) {
+ IFP_TO_IA(ifp, ia);
+ if (ia != NULL) {
+ ip->ip_src.s_addr = htonl(ia->ia_subnet);
+ ifa_free(&ia->ia_ifa);
+ }
+ }
+
+ CTR3(KTR_IGMPV3, "process v1 report %s on ifp %p(%s)",
+ inet_ntoa(igmp->igmp_group), ifp, ifp->if_xname);
+
+ /*
+ * IGMPv1 report suppression.
+ * If we are a member of this group, and our membership should be
+ * reported, stop our group timer and transition to the 'lazy' state.
+ */
+ IN_MULTI_LOCK();
+ inm = inm_lookup(ifp, igmp->igmp_group);
+ if (inm != NULL) {
+ struct igmp_ifinfo *igi;
+
+ igi = inm->inm_igi;
+ if (igi == NULL) {
+ KASSERT(igi != NULL,
+ ("%s: no igi for ifp %p", __func__, ifp));
+ goto out_locked;
+ }
+
+ IGMPSTAT_INC(igps_rcv_ourreports);
+
+ /*
+ * If we are in IGMPv3 host mode, do not allow the
+ * other host's IGMPv1 report to suppress our reports
+ * unless explicitly configured to do so.
+ */
+ if (igi->igi_version == IGMP_VERSION_3) {
+ if (V_igmp_legacysupp)
+ igmp_v3_suppress_group_record(inm);
+ goto out_locked;
+ }
+
+ inm->inm_timer = 0;
+
+ switch (inm->inm_state) {
+ case IGMP_NOT_MEMBER:
+ case IGMP_SILENT_MEMBER:
+ break;
+ case IGMP_IDLE_MEMBER:
+ case IGMP_LAZY_MEMBER:
+ case IGMP_AWAKENING_MEMBER:
+ CTR3(KTR_IGMPV3,
+ "report suppressed for %s on ifp %p(%s)",
+ inet_ntoa(igmp->igmp_group), ifp, ifp->if_xname);
+ case IGMP_SLEEPING_MEMBER:
+ inm->inm_state = IGMP_SLEEPING_MEMBER;
+ break;
+ case IGMP_REPORTING_MEMBER:
+ CTR3(KTR_IGMPV3,
+ "report suppressed for %s on ifp %p(%s)",
+ inet_ntoa(igmp->igmp_group), ifp, ifp->if_xname);
+ if (igi->igi_version == IGMP_VERSION_1)
+ inm->inm_state = IGMP_LAZY_MEMBER;
+ else if (igi->igi_version == IGMP_VERSION_2)
+ inm->inm_state = IGMP_SLEEPING_MEMBER;
+ break;
+ case IGMP_G_QUERY_PENDING_MEMBER:
+ case IGMP_SG_QUERY_PENDING_MEMBER:
+ case IGMP_LEAVING_MEMBER:
+ break;
+ }
+ }
+
+out_locked:
+ IN_MULTI_UNLOCK();
+
+ return (0);
+}
+
+/*
+ * Process a received IGMPv2 host membership report.
+ *
+ * NOTE: 0.0.0.0 workaround breaks const correctness.
+ */
+static int
+igmp_input_v2_report(struct ifnet *ifp, /*const*/ struct ip *ip,
+ /*const*/ struct igmp *igmp)
+{
+ struct in_ifaddr *ia;
+ struct in_multi *inm;
+
+ /*
+ * Make sure we don't hear our own membership report. Fast
+ * leave requires knowing that we are the only member of a
+ * group.
+ */
+ IFP_TO_IA(ifp, ia);
+ if (ia != NULL && in_hosteq(ip->ip_src, IA_SIN(ia)->sin_addr)) {
+ ifa_free(&ia->ia_ifa);
+ return (0);
+ }
+
+ IGMPSTAT_INC(igps_rcv_reports);
+
+ if (ifp->if_flags & IFF_LOOPBACK) {
+ if (ia != NULL)
+ ifa_free(&ia->ia_ifa);
+ return (0);
+ }
+
+ if (!IN_MULTICAST(ntohl(igmp->igmp_group.s_addr)) ||
+ !in_hosteq(igmp->igmp_group, ip->ip_dst)) {
+ if (ia != NULL)
+ ifa_free(&ia->ia_ifa);
+ IGMPSTAT_INC(igps_rcv_badreports);
+ return (EINVAL);
+ }
+
+ /*
+ * RFC 3376, Section 4.2.13, 9.2, 9.3:
+ * Booting clients may use the source address 0.0.0.0. Some
+ * IGMP daemons may not know how to use IP_RECVIF to determine
+ * the interface upon which this message was received.
+ * Replace 0.0.0.0 with the subnet address if told to do so.
+ */
+ if (V_igmp_recvifkludge && in_nullhost(ip->ip_src)) {
+ if (ia != NULL)
+ ip->ip_src.s_addr = htonl(ia->ia_subnet);
+ }
+ if (ia != NULL)
+ ifa_free(&ia->ia_ifa);
+
+ CTR3(KTR_IGMPV3, "process v2 report %s on ifp %p(%s)",
+ inet_ntoa(igmp->igmp_group), ifp, ifp->if_xname);
+
+ /*
+ * IGMPv2 report suppression.
+ * If we are a member of this group, and our membership should be
+ * reported, and our group timer is pending or about to be reset,
+ * stop our group timer by transitioning to the 'lazy' state.
+ */
+ IN_MULTI_LOCK();
+ inm = inm_lookup(ifp, igmp->igmp_group);
+ if (inm != NULL) {
+ struct igmp_ifinfo *igi;
+
+ igi = inm->inm_igi;
+ KASSERT(igi != NULL, ("%s: no igi for ifp %p", __func__, ifp));
+
+ IGMPSTAT_INC(igps_rcv_ourreports);
+
+ /*
+ * If we are in IGMPv3 host mode, do not allow the
+ * other host's IGMPv1 report to suppress our reports
+ * unless explicitly configured to do so.
+ */
+ if (igi->igi_version == IGMP_VERSION_3) {
+ if (V_igmp_legacysupp)
+ igmp_v3_suppress_group_record(inm);
+ goto out_locked;
+ }
+
+ inm->inm_timer = 0;
+
+ switch (inm->inm_state) {
+ case IGMP_NOT_MEMBER:
+ case IGMP_SILENT_MEMBER:
+ case IGMP_SLEEPING_MEMBER:
+ break;
+ case IGMP_REPORTING_MEMBER:
+ case IGMP_IDLE_MEMBER:
+ case IGMP_AWAKENING_MEMBER:
+ CTR3(KTR_IGMPV3,
+ "report suppressed for %s on ifp %p(%s)",
+ inet_ntoa(igmp->igmp_group), ifp, ifp->if_xname);
+ case IGMP_LAZY_MEMBER:
+ inm->inm_state = IGMP_LAZY_MEMBER;
+ break;
+ case IGMP_G_QUERY_PENDING_MEMBER:
+ case IGMP_SG_QUERY_PENDING_MEMBER:
+ case IGMP_LEAVING_MEMBER:
+ break;
+ }
+ }
+
+out_locked:
+ IN_MULTI_UNLOCK();
+
+ return (0);
+}
+
+void
+igmp_input(struct mbuf *m, int off)
+{
+ int iphlen;
+ struct ifnet *ifp;
+ struct igmp *igmp;
+ struct ip *ip;
+ int igmplen;
+ int minlen;
+ int queryver;
+
+ CTR3(KTR_IGMPV3, "%s: called w/mbuf (%p,%d)", __func__, m, off);
+
+ ifp = m->m_pkthdr.rcvif;
+
+ IGMPSTAT_INC(igps_rcv_total);
+
+ ip = mtod(m, struct ip *);
+ iphlen = off;
+ igmplen = ip->ip_len;
+
+ /*
+ * Validate lengths.
+ */
+ if (igmplen < IGMP_MINLEN) {
+ IGMPSTAT_INC(igps_rcv_tooshort);
+ m_freem(m);
+ return;
+ }
+
+ /*
+ * Always pullup to the minimum size for v1/v2 or v3
+ * to amortize calls to m_pullup().
+ */
+ minlen = iphlen;
+ if (igmplen >= IGMP_V3_QUERY_MINLEN)
+ minlen += IGMP_V3_QUERY_MINLEN;
+ else
+ minlen += IGMP_MINLEN;
+ if ((m->m_flags & M_EXT || m->m_len < minlen) &&
+ (m = m_pullup(m, minlen)) == 0) {
+ IGMPSTAT_INC(igps_rcv_tooshort);
+ return;
+ }
+ ip = mtod(m, struct ip *);
+
+ /*
+ * Validate checksum.
+ */
+ m->m_data += iphlen;
+ m->m_len -= iphlen;
+ igmp = mtod(m, struct igmp *);
+ if (in_cksum(m, igmplen)) {
+ IGMPSTAT_INC(igps_rcv_badsum);
+ m_freem(m);
+ return;
+ }
+ m->m_data -= iphlen;
+ m->m_len += iphlen;
+
+ /*
+ * IGMP control traffic is link-scope, and must have a TTL of 1.
+ * DVMRP traffic (e.g. mrinfo, mtrace) is an exception;
+ * probe packets may come from beyond the LAN.
+ */
+ if (igmp->igmp_type != IGMP_DVMRP && ip->ip_ttl != 1) {
+ IGMPSTAT_INC(igps_rcv_badttl);
+ m_freem(m);
+ return;
+ }
+
+ switch (igmp->igmp_type) {
+ case IGMP_HOST_MEMBERSHIP_QUERY:
+ if (igmplen == IGMP_MINLEN) {
+ if (igmp->igmp_code == 0)
+ queryver = IGMP_VERSION_1;
+ else
+ queryver = IGMP_VERSION_2;
+ } else if (igmplen >= IGMP_V3_QUERY_MINLEN) {
+ queryver = IGMP_VERSION_3;
+ } else {
+ IGMPSTAT_INC(igps_rcv_tooshort);
+ m_freem(m);
+ return;
+ }
+
+ switch (queryver) {
+ case IGMP_VERSION_1:
+ IGMPSTAT_INC(igps_rcv_v1v2_queries);
+ if (!V_igmp_v1enable)
+ break;
+ if (igmp_input_v1_query(ifp, ip, igmp) != 0) {
+ m_freem(m);
+ return;
+ }
+ break;
+
+ case IGMP_VERSION_2:
+ IGMPSTAT_INC(igps_rcv_v1v2_queries);
+ if (!V_igmp_v2enable)
+ break;
+ if (igmp_input_v2_query(ifp, ip, igmp) != 0) {
+ m_freem(m);
+ return;
+ }
+ break;
+
+ case IGMP_VERSION_3: {
+ struct igmpv3 *igmpv3;
+ uint16_t igmpv3len;
+ uint16_t srclen;
+ int nsrc;
+
+ IGMPSTAT_INC(igps_rcv_v3_queries);
+ igmpv3 = (struct igmpv3 *)igmp;
+ /*
+ * Validate length based on source count.
+ */
+ nsrc = ntohs(igmpv3->igmp_numsrc);
+ srclen = sizeof(struct in_addr) * nsrc;
+ if (nsrc * sizeof(in_addr_t) > srclen) {
+ IGMPSTAT_INC(igps_rcv_tooshort);
+ return;
+ }
+ /*
+ * m_pullup() may modify m, so pullup in
+ * this scope.
+ */
+ igmpv3len = iphlen + IGMP_V3_QUERY_MINLEN +
+ srclen;
+ if ((m->m_flags & M_EXT ||
+ m->m_len < igmpv3len) &&
+ (m = m_pullup(m, igmpv3len)) == NULL) {
+ IGMPSTAT_INC(igps_rcv_tooshort);
+ return;
+ }
+ igmpv3 = (struct igmpv3 *)(mtod(m, uint8_t *)
+ + iphlen);
+ if (igmp_input_v3_query(ifp, ip, igmpv3) != 0) {
+ m_freem(m);
+ return;
+ }
+ }
+ break;
+ }
+ break;
+
+ case IGMP_v1_HOST_MEMBERSHIP_REPORT:
+ if (!V_igmp_v1enable)
+ break;
+ if (igmp_input_v1_report(ifp, ip, igmp) != 0) {
+ m_freem(m);
+ return;
+ }
+ break;
+
+ case IGMP_v2_HOST_MEMBERSHIP_REPORT:
+ if (!V_igmp_v2enable)
+ break;
+ if (!ip_checkrouteralert(m))
+ IGMPSTAT_INC(igps_rcv_nora);
+ if (igmp_input_v2_report(ifp, ip, igmp) != 0) {
+ m_freem(m);
+ return;
+ }
+ break;
+
+ case IGMP_v3_HOST_MEMBERSHIP_REPORT:
+ /*
+ * Hosts do not need to process IGMPv3 membership reports,
+ * as report suppression is no longer required.
+ */
+ if (!ip_checkrouteralert(m))
+ IGMPSTAT_INC(igps_rcv_nora);
+ break;
+
+ default:
+ break;
+ }
+
+ /*
+ * Pass all valid IGMP packets up to any process(es) listening on a
+ * raw IGMP socket.
+ */
+ rip_input(m, off);
+}
+
+
+/*
+ * Fast timeout handler (global).
+ * VIMAGE: Timeout handlers are expected to service all vimages.
+ */
+void
+igmp_fasttimo(void)
+{
+ VNET_ITERATOR_DECL(vnet_iter);
+
+ VNET_LIST_RLOCK_NOSLEEP();
+ VNET_FOREACH(vnet_iter) {
+ CURVNET_SET(vnet_iter);
+ igmp_fasttimo_vnet();
+ CURVNET_RESTORE();
+ }
+ VNET_LIST_RUNLOCK_NOSLEEP();
+}
+
+/*
+ * Fast timeout handler (per-vnet).
+ * Sends are shuffled off to a netisr to deal with Giant.
+ *
+ * VIMAGE: Assume caller has set up our curvnet.
+ */
+static void
+igmp_fasttimo_vnet(void)
+{
+ struct ifqueue scq; /* State-change packets */
+ struct ifqueue qrq; /* Query response packets */
+ struct ifnet *ifp;
+ struct igmp_ifinfo *igi;
+ struct ifmultiaddr *ifma, *tifma;
+ struct in_multi *inm;
+ int loop, uri_fasthz;
+
+ loop = 0;
+ uri_fasthz = 0;
+
+ /*
+ * Quick check to see if any work needs to be done, in order to
+ * minimize the overhead of fasttimo processing.
+ * SMPng: XXX Unlocked reads.
+ */
+ if (!V_current_state_timers_running &&
+ !V_interface_timers_running &&
+ !V_state_change_timers_running)
+ return;
+
+ IN_MULTI_LOCK();
+ IGMP_LOCK();
+
+ /*
+ * IGMPv3 General Query response timer processing.
+ */
+ if (V_interface_timers_running) {
+ CTR1(KTR_IGMPV3, "%s: interface timers running", __func__);
+
+ V_interface_timers_running = 0;
+ LIST_FOREACH(igi, &V_igi_head, igi_link) {
+ if (igi->igi_v3_timer == 0) {
+ /* Do nothing. */
+ } else if (--igi->igi_v3_timer == 0) {
+ igmp_v3_dispatch_general_query(igi);
+ } else {
+ V_interface_timers_running = 1;
+ }
+ }
+ }
+
+ if (!V_current_state_timers_running &&
+ !V_state_change_timers_running)
+ goto out_locked;
+
+ V_current_state_timers_running = 0;
+ V_state_change_timers_running = 0;
+
+ CTR1(KTR_IGMPV3, "%s: state change timers running", __func__);
+
+ /*
+ * IGMPv1/v2/v3 host report and state-change timer processing.
+ * Note: Processing a v3 group timer may remove a node.
+ */
+ LIST_FOREACH(igi, &V_igi_head, igi_link) {
+ ifp = igi->igi_ifp;
+
+ if (igi->igi_version == IGMP_VERSION_3) {
+ loop = (igi->igi_flags & IGIF_LOOPBACK) ? 1 : 0;
+ uri_fasthz = IGMP_RANDOM_DELAY(igi->igi_uri *
+ PR_FASTHZ);
+
+ memset(&qrq, 0, sizeof(struct ifqueue));
+ IFQ_SET_MAXLEN(&qrq, IGMP_MAX_G_GS_PACKETS);
+
+ memset(&scq, 0, sizeof(struct ifqueue));
+ IFQ_SET_MAXLEN(&scq, IGMP_MAX_STATE_CHANGE_PACKETS);
+ }
+
+ IF_ADDR_LOCK(ifp);
+ TAILQ_FOREACH_SAFE(ifma, &ifp->if_multiaddrs, ifma_link,
+ tifma) {
+ if (ifma->ifma_addr->sa_family != AF_INET ||
+ ifma->ifma_protospec == NULL)
+ continue;
+ inm = (struct in_multi *)ifma->ifma_protospec;
+ switch (igi->igi_version) {
+ case IGMP_VERSION_1:
+ case IGMP_VERSION_2:
+ igmp_v1v2_process_group_timer(inm,
+ igi->igi_version);
+ break;
+ case IGMP_VERSION_3:
+ igmp_v3_process_group_timers(igi, &qrq,
+ &scq, inm, uri_fasthz);
+ break;
+ }
+ }
+ IF_ADDR_UNLOCK(ifp);
+
+ if (igi->igi_version == IGMP_VERSION_3) {
+ struct in_multi *tinm;
+
+ igmp_dispatch_queue(&qrq, 0, loop);
+ igmp_dispatch_queue(&scq, 0, loop);
+
+ /*
+ * Free the in_multi reference(s) for this
+ * IGMP lifecycle.
+ */
+ SLIST_FOREACH_SAFE(inm, &igi->igi_relinmhead,
+ inm_nrele, tinm) {
+ SLIST_REMOVE_HEAD(&igi->igi_relinmhead,
+ inm_nrele);
+ inm_release_locked(inm);
+ }
+ }
+ }
+
+out_locked:
+ IGMP_UNLOCK();
+ IN_MULTI_UNLOCK();
+}
+
+/*
+ * Update host report group timer for IGMPv1/v2.
+ * Will update the global pending timer flags.
+ */
+static void
+igmp_v1v2_process_group_timer(struct in_multi *inm, const int version)
+{
+ int report_timer_expired;
+
+ IN_MULTI_LOCK_ASSERT();
+ IGMP_LOCK_ASSERT();
+
+ if (inm->inm_timer == 0) {
+ report_timer_expired = 0;
+ } else if (--inm->inm_timer == 0) {
+ report_timer_expired = 1;
+ } else {
+ V_current_state_timers_running = 1;
+ return;
+ }
+
+ switch (inm->inm_state) {
+ case IGMP_NOT_MEMBER:
+ case IGMP_SILENT_MEMBER:
+ case IGMP_IDLE_MEMBER:
+ case IGMP_LAZY_MEMBER:
+ case IGMP_SLEEPING_MEMBER:
+ case IGMP_AWAKENING_MEMBER:
+ break;
+ case IGMP_REPORTING_MEMBER:
+ if (report_timer_expired) {
+ inm->inm_state = IGMP_IDLE_MEMBER;
+ (void)igmp_v1v2_queue_report(inm,
+ (version == IGMP_VERSION_2) ?
+ IGMP_v2_HOST_MEMBERSHIP_REPORT :
+ IGMP_v1_HOST_MEMBERSHIP_REPORT);
+ }
+ break;
+ case IGMP_G_QUERY_PENDING_MEMBER:
+ case IGMP_SG_QUERY_PENDING_MEMBER:
+ case IGMP_LEAVING_MEMBER:
+ break;
+ }
+}
+
+/*
+ * Update a group's timers for IGMPv3.
+ * Will update the global pending timer flags.
+ * Note: Unlocked read from igi.
+ */
+static void
+igmp_v3_process_group_timers(struct igmp_ifinfo *igi,
+ struct ifqueue *qrq, struct ifqueue *scq,
+ struct in_multi *inm, const int uri_fasthz)
+{
+ int query_response_timer_expired;
+ int state_change_retransmit_timer_expired;
+
+ IN_MULTI_LOCK_ASSERT();
+ IGMP_LOCK_ASSERT();
+
+ query_response_timer_expired = 0;
+ state_change_retransmit_timer_expired = 0;
+
+ /*
+ * During a transition from v1/v2 compatibility mode back to v3,
+ * a group record in REPORTING state may still have its group
+ * timer active. This is a no-op in this function; it is easier
+ * to deal with it here than to complicate the slow-timeout path.
+ */
+ if (inm->inm_timer == 0) {
+ query_response_timer_expired = 0;
+ } else if (--inm->inm_timer == 0) {
+ query_response_timer_expired = 1;
+ } else {
+ V_current_state_timers_running = 1;
+ }
+
+ if (inm->inm_sctimer == 0) {
+ state_change_retransmit_timer_expired = 0;
+ } else if (--inm->inm_sctimer == 0) {
+ state_change_retransmit_timer_expired = 1;
+ } else {
+ V_state_change_timers_running = 1;
+ }
+
+ /* We are in fasttimo, so be quick about it. */
+ if (!state_change_retransmit_timer_expired &&
+ !query_response_timer_expired)
+ return;
+
+ switch (inm->inm_state) {
+ case IGMP_NOT_MEMBER:
+ case IGMP_SILENT_MEMBER:
+ case IGMP_SLEEPING_MEMBER:
+ case IGMP_LAZY_MEMBER:
+ case IGMP_AWAKENING_MEMBER:
+ case IGMP_IDLE_MEMBER:
+ break;
+ case IGMP_G_QUERY_PENDING_MEMBER:
+ case IGMP_SG_QUERY_PENDING_MEMBER:
+ /*
+ * Respond to a previously pending Group-Specific
+ * or Group-and-Source-Specific query by enqueueing
+ * the appropriate Current-State report for
+ * immediate transmission.
+ */
+ if (query_response_timer_expired) {
+ int retval;
+
+ retval = igmp_v3_enqueue_group_record(qrq, inm, 0, 1,
+ (inm->inm_state == IGMP_SG_QUERY_PENDING_MEMBER));
+ CTR2(KTR_IGMPV3, "%s: enqueue record = %d",
+ __func__, retval);
+ inm->inm_state = IGMP_REPORTING_MEMBER;
+ /* XXX Clear recorded sources for next time. */
+ inm_clear_recorded(inm);
+ }
+ /* FALLTHROUGH */
+ case IGMP_REPORTING_MEMBER:
+ case IGMP_LEAVING_MEMBER:
+ if (state_change_retransmit_timer_expired) {
+ /*
+ * State-change retransmission timer fired.
+ * If there are any further pending retransmissions,
+ * set the global pending state-change flag, and
+ * reset the timer.
+ */
+ if (--inm->inm_scrv > 0) {
+ inm->inm_sctimer = uri_fasthz;
+ V_state_change_timers_running = 1;
+ }
+ /*
+ * Retransmit the previously computed state-change
+ * report. If there are no further pending
+ * retransmissions, the mbuf queue will be consumed.
+ * Update T0 state to T1 as we have now sent
+ * a state-change.
+ */
+ (void)igmp_v3_merge_state_changes(inm, scq);
+
+ inm_commit(inm);
+ CTR3(KTR_IGMPV3, "%s: T1 -> T0 for %s/%s", __func__,
+ inet_ntoa(inm->inm_addr), inm->inm_ifp->if_xname);
+
+ /*
+ * If we are leaving the group for good, make sure
+ * we release IGMP's reference to it.
+ * This release must be deferred using a SLIST,
+ * as we are called from a loop which traverses
+ * the in_ifmultiaddr TAILQ.
+ */
+ if (inm->inm_state == IGMP_LEAVING_MEMBER &&
+ inm->inm_scrv == 0) {
+ inm->inm_state = IGMP_NOT_MEMBER;
+ SLIST_INSERT_HEAD(&igi->igi_relinmhead,
+ inm, inm_nrele);
+ }
+ }
+ break;
+ }
+}
+
+
+/*
+ * Suppress a group's pending response to a group or source/group query.
+ *
+ * Do NOT suppress state changes. This leads to IGMPv3 inconsistency.
+ * Do NOT update ST1/ST0 as this operation merely suppresses
+ * the currently pending group record.
+ * Do NOT suppress the response to a general query. It is possible but
+ * it would require adding another state or flag.
+ */
+static void
+igmp_v3_suppress_group_record(struct in_multi *inm)
+{
+
+ IN_MULTI_LOCK_ASSERT();
+
+ KASSERT(inm->inm_igi->igi_version == IGMP_VERSION_3,
+ ("%s: not IGMPv3 mode on link", __func__));
+
+ if (inm->inm_state != IGMP_G_QUERY_PENDING_MEMBER ||
+ inm->inm_state != IGMP_SG_QUERY_PENDING_MEMBER)
+ return;
+
+ if (inm->inm_state == IGMP_SG_QUERY_PENDING_MEMBER)
+ inm_clear_recorded(inm);
+
+ inm->inm_timer = 0;
+ inm->inm_state = IGMP_REPORTING_MEMBER;
+}
+
+/*
+ * Switch to a different IGMP version on the given interface,
+ * as per Section 7.2.1.
+ */
+static void
+igmp_set_version(struct igmp_ifinfo *igi, const int version)
+{
+ int old_version_timer;
+
+ IGMP_LOCK_ASSERT();
+
+ CTR4(KTR_IGMPV3, "%s: switching to v%d on ifp %p(%s)", __func__,
+ version, igi->igi_ifp, igi->igi_ifp->if_xname);
+
+ if (version == IGMP_VERSION_1 || version == IGMP_VERSION_2) {
+ /*
+ * Compute the "Older Version Querier Present" timer as per
+ * Section 8.12.
+ */
+ old_version_timer = igi->igi_rv * igi->igi_qi + igi->igi_qri;
+ old_version_timer *= PR_SLOWHZ;
+
+ if (version == IGMP_VERSION_1) {
+ igi->igi_v1_timer = old_version_timer;
+ igi->igi_v2_timer = 0;
+ } else if (version == IGMP_VERSION_2) {
+ igi->igi_v1_timer = 0;
+ igi->igi_v2_timer = old_version_timer;
+ }
+ }
+
+ if (igi->igi_v1_timer == 0 && igi->igi_v2_timer > 0) {
+ if (igi->igi_version != IGMP_VERSION_2) {
+ igi->igi_version = IGMP_VERSION_2;
+ igmp_v3_cancel_link_timers(igi);
+ }
+ } else if (igi->igi_v1_timer > 0) {
+ if (igi->igi_version != IGMP_VERSION_1) {
+ igi->igi_version = IGMP_VERSION_1;
+ igmp_v3_cancel_link_timers(igi);
+ }
+ }
+}
+
+/*
+ * Cancel pending IGMPv3 timers for the given link and all groups
+ * joined on it; state-change, general-query, and group-query timers.
+ *
+ * Only ever called on a transition from v3 to Compatibility mode. Kill
+ * the timers stone dead (this may be expensive for large N groups), they
+ * will be restarted if Compatibility Mode deems that they must be due to
+ * query processing.
+ */
+static void
+igmp_v3_cancel_link_timers(struct igmp_ifinfo *igi)
+{
+ struct ifmultiaddr *ifma;
+ struct ifnet *ifp;
+ struct in_multi *inm;
+
+ CTR3(KTR_IGMPV3, "%s: cancel v3 timers on ifp %p(%s)", __func__,
+ igi->igi_ifp, igi->igi_ifp->if_xname);
+
+ IN_MULTI_LOCK_ASSERT();
+ IGMP_LOCK_ASSERT();
+
+ /*
+ * Stop the v3 General Query Response on this link stone dead.
+ * If fasttimo is woken up due to V_interface_timers_running,
+ * the flag will be cleared if there are no pending link timers.
+ */
+ igi->igi_v3_timer = 0;
+
+ /*
+ * Now clear the current-state and state-change report timers
+ * for all memberships scoped to this link.
+ */
+ ifp = igi->igi_ifp;
+ IF_ADDR_LOCK(ifp);
+ TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
+ if (ifma->ifma_addr->sa_family != AF_INET ||
+ ifma->ifma_protospec == NULL)
+ continue;
+ inm = (struct in_multi *)ifma->ifma_protospec;
+ switch (inm->inm_state) {
+ case IGMP_NOT_MEMBER:
+ case IGMP_SILENT_MEMBER:
+ case IGMP_IDLE_MEMBER:
+ case IGMP_LAZY_MEMBER:
+ case IGMP_SLEEPING_MEMBER:
+ case IGMP_AWAKENING_MEMBER:
+ /*
+ * These states are either not relevant in v3 mode,
+ * or are unreported. Do nothing.
+ */
+ break;
+ case IGMP_LEAVING_MEMBER:
+ /*
+ * If we are leaving the group and switching to
+ * compatibility mode, we need to release the final
+ * reference held for issuing the INCLUDE {}, and
+ * transition to REPORTING to ensure the host leave
+ * message is sent upstream to the old querier --
+ * transition to NOT would lose the leave and race.
+ *
+ * SMPNG: Must drop and re-acquire IF_ADDR_LOCK
+ * around inm_release_locked(), as it is not
+ * a recursive mutex.
+ */
+ IF_ADDR_UNLOCK(ifp);
+ inm_release_locked(inm);
+ IF_ADDR_LOCK(ifp);
+ /* FALLTHROUGH */
+ case IGMP_G_QUERY_PENDING_MEMBER:
+ case IGMP_SG_QUERY_PENDING_MEMBER:
+ inm_clear_recorded(inm);
+ /* FALLTHROUGH */
+ case IGMP_REPORTING_MEMBER:
+ inm->inm_state = IGMP_REPORTING_MEMBER;
+ break;
+ }
+ /*
+ * Always clear state-change and group report timers.
+ * Free any pending IGMPv3 state-change records.
+ */
+ inm->inm_sctimer = 0;
+ inm->inm_timer = 0;
+ _IF_DRAIN(&inm->inm_scq);
+ }
+ IF_ADDR_UNLOCK(ifp);
+}
+
+/*
+ * Update the Older Version Querier Present timers for a link.
+ * See Section 7.2.1 of RFC 3376.
+ */
+static void
+igmp_v1v2_process_querier_timers(struct igmp_ifinfo *igi)
+{
+
+ IGMP_LOCK_ASSERT();
+
+ if (igi->igi_v1_timer == 0 && igi->igi_v2_timer == 0) {
+ /*
+ * IGMPv1 and IGMPv2 Querier Present timers expired.
+ *
+ * Revert to IGMPv3.
+ */
+ if (igi->igi_version != IGMP_VERSION_3) {
+ CTR5(KTR_IGMPV3,
+ "%s: transition from v%d -> v%d on %p(%s)",
+ __func__, igi->igi_version, IGMP_VERSION_3,
+ igi->igi_ifp, igi->igi_ifp->if_xname);
+ igi->igi_version = IGMP_VERSION_3;
+ }
+ } else if (igi->igi_v1_timer == 0 && igi->igi_v2_timer > 0) {
+ /*
+ * IGMPv1 Querier Present timer expired,
+ * IGMPv2 Querier Present timer running.
+ * If IGMPv2 was disabled since last timeout,
+ * revert to IGMPv3.
+ * If IGMPv2 is enabled, revert to IGMPv2.
+ */
+ if (!V_igmp_v2enable) {
+ CTR5(KTR_IGMPV3,
+ "%s: transition from v%d -> v%d on %p(%s)",
+ __func__, igi->igi_version, IGMP_VERSION_3,
+ igi->igi_ifp, igi->igi_ifp->if_xname);
+ igi->igi_v2_timer = 0;
+ igi->igi_version = IGMP_VERSION_3;
+ } else {
+ --igi->igi_v2_timer;
+ if (igi->igi_version != IGMP_VERSION_2) {
+ CTR5(KTR_IGMPV3,
+ "%s: transition from v%d -> v%d on %p(%s)",
+ __func__, igi->igi_version, IGMP_VERSION_2,
+ igi->igi_ifp, igi->igi_ifp->if_xname);
+ igi->igi_version = IGMP_VERSION_2;
+ }
+ }
+ } else if (igi->igi_v1_timer > 0) {
+ /*
+ * IGMPv1 Querier Present timer running.
+ * Stop IGMPv2 timer if running.
+ *
+ * If IGMPv1 was disabled since last timeout,
+ * revert to IGMPv3.
+ * If IGMPv1 is enabled, reset IGMPv2 timer if running.
+ */
+ if (!V_igmp_v1enable) {
+ CTR5(KTR_IGMPV3,
+ "%s: transition from v%d -> v%d on %p(%s)",
+ __func__, igi->igi_version, IGMP_VERSION_3,
+ igi->igi_ifp, igi->igi_ifp->if_xname);
+ igi->igi_v1_timer = 0;
+ igi->igi_version = IGMP_VERSION_3;
+ } else {
+ --igi->igi_v1_timer;
+ }
+ if (igi->igi_v2_timer > 0) {
+ CTR3(KTR_IGMPV3,
+ "%s: cancel v2 timer on %p(%s)",
+ __func__, igi->igi_ifp, igi->igi_ifp->if_xname);
+ igi->igi_v2_timer = 0;
+ }
+ }
+}
+
+/*
+ * Global slowtimo handler.
+ * VIMAGE: Timeout handlers are expected to service all vimages.
+ */
+void
+igmp_slowtimo(void)
+{
+ VNET_ITERATOR_DECL(vnet_iter);
+
+ VNET_LIST_RLOCK_NOSLEEP();
+ VNET_FOREACH(vnet_iter) {
+ CURVNET_SET(vnet_iter);
+ igmp_slowtimo_vnet();
+ CURVNET_RESTORE();
+ }
+ VNET_LIST_RUNLOCK_NOSLEEP();
+}
+
+/*
+ * Per-vnet slowtimo handler.
+ */
+static void
+igmp_slowtimo_vnet(void)
+{
+ struct igmp_ifinfo *igi;
+
+ IGMP_LOCK();
+
+ LIST_FOREACH(igi, &V_igi_head, igi_link) {
+ igmp_v1v2_process_querier_timers(igi);
+ }
+
+ IGMP_UNLOCK();
+}
+
+/*
+ * Dispatch an IGMPv1/v2 host report or leave message.
+ * These are always small enough to fit inside a single mbuf.
+ */
+static int
+igmp_v1v2_queue_report(struct in_multi *inm, const int type)
+{
+ struct ifnet *ifp;
+ struct igmp *igmp;
+ struct ip *ip;
+ struct mbuf *m;
+
+ IN_MULTI_LOCK_ASSERT();
+ IGMP_LOCK_ASSERT();
+
+ ifp = inm->inm_ifp;
+
+ MGETHDR(m, M_DONTWAIT, MT_DATA);
+ if (m == NULL)
+ return (ENOMEM);
+ MH_ALIGN(m, sizeof(struct ip) + sizeof(struct igmp));
+
+ m->m_pkthdr.len = sizeof(struct ip) + sizeof(struct igmp);
+
+ m->m_data += sizeof(struct ip);
+ m->m_len = sizeof(struct igmp);
+
+ igmp = mtod(m, struct igmp *);
+ igmp->igmp_type = type;
+ igmp->igmp_code = 0;
+ igmp->igmp_group = inm->inm_addr;
+ igmp->igmp_cksum = 0;
+ igmp->igmp_cksum = in_cksum(m, sizeof(struct igmp));
+
+ m->m_data -= sizeof(struct ip);
+ m->m_len += sizeof(struct ip);
+
+ ip = mtod(m, struct ip *);
+ ip->ip_tos = 0;
+ ip->ip_len = sizeof(struct ip) + sizeof(struct igmp);
+ ip->ip_off = 0;
+ ip->ip_p = IPPROTO_IGMP;
+ ip->ip_src.s_addr = INADDR_ANY;
+
+ if (type == IGMP_HOST_LEAVE_MESSAGE)
+ ip->ip_dst.s_addr = htonl(INADDR_ALLRTRS_GROUP);
+ else
+ ip->ip_dst = inm->inm_addr;
+
+ igmp_save_context(m, ifp);
+
+ m->m_flags |= M_IGMPV2;
+ if (inm->inm_igi->igi_flags & IGIF_LOOPBACK)
+ m->m_flags |= M_IGMP_LOOP;
+
+ CTR2(KTR_IGMPV3, "%s: netisr_dispatch(NETISR_IGMP, %p)", __func__, m);
+ netisr_dispatch(NETISR_IGMP, m);
+
+ return (0);
+}
+
+/*
+ * Process a state change from the upper layer for the given IPv4 group.
+ *
+ * Each socket holds a reference on the in_multi in its own ip_moptions.
+ * The socket layer will have made the necessary updates to.the group
+ * state, it is now up to IGMP to issue a state change report if there
+ * has been any change between T0 (when the last state-change was issued)
+ * and T1 (now).
+ *
+ * We use the IGMPv3 state machine at group level. The IGMP module
+ * however makes the decision as to which IGMP protocol version to speak.
+ * A state change *from* INCLUDE {} always means an initial join.
+ * A state change *to* INCLUDE {} always means a final leave.
+ *
+ * FUTURE: If IGIF_V3LITE is enabled for this interface, then we can
+ * save ourselves a bunch of work; any exclusive mode groups need not
+ * compute source filter lists.
+ *
+ * VIMAGE: curvnet should have been set by caller, as this routine
+ * is called from the socket option handlers.
+ */
+int
+igmp_change_state(struct in_multi *inm)
+{
+ struct igmp_ifinfo *igi;
+ struct ifnet *ifp;
+ int error;
+
+ IN_MULTI_LOCK_ASSERT();
+
+ error = 0;
+
+ /*
+ * Try to detect if the upper layer just asked us to change state
+ * for an interface which has now gone away.
+ */
+ KASSERT(inm->inm_ifma != NULL, ("%s: no ifma", __func__));
+ ifp = inm->inm_ifma->ifma_ifp;
+ if (ifp != NULL) {
+ /*
+ * Sanity check that netinet's notion of ifp is the
+ * same as net's.
+ */
+ KASSERT(inm->inm_ifp == ifp, ("%s: bad ifp", __func__));
+ }
+
+ IGMP_LOCK();
+
+ igi = ((struct in_ifinfo *)ifp->if_afdata[AF_INET])->ii_igmp;
+ KASSERT(igi != NULL, ("%s: no igmp_ifinfo for ifp %p", __func__, ifp));
+
+ /*
+ * If we detect a state transition to or from MCAST_UNDEFINED
+ * for this group, then we are starting or finishing an IGMP
+ * life cycle for this group.
+ */
+ if (inm->inm_st[1].iss_fmode != inm->inm_st[0].iss_fmode) {
+ CTR3(KTR_IGMPV3, "%s: inm transition %d -> %d", __func__,
+ inm->inm_st[0].iss_fmode, inm->inm_st[1].iss_fmode);
+ if (inm->inm_st[0].iss_fmode == MCAST_UNDEFINED) {
+ CTR1(KTR_IGMPV3, "%s: initial join", __func__);
+ error = igmp_initial_join(inm, igi);
+ goto out_locked;
+ } else if (inm->inm_st[1].iss_fmode == MCAST_UNDEFINED) {
+ CTR1(KTR_IGMPV3, "%s: final leave", __func__);
+ igmp_final_leave(inm, igi);
+ goto out_locked;
+ }
+ } else {
+ CTR1(KTR_IGMPV3, "%s: filter set change", __func__);
+ }
+
+ error = igmp_handle_state_change(inm, igi);
+
+out_locked:
+ IGMP_UNLOCK();
+ return (error);
+}
+
+/*
+ * Perform the initial join for an IGMP group.
+ *
+ * When joining a group:
+ * If the group should have its IGMP traffic suppressed, do nothing.
+ * IGMPv1 starts sending IGMPv1 host membership reports.
+ * IGMPv2 starts sending IGMPv2 host membership reports.
+ * IGMPv3 will schedule an IGMPv3 state-change report containing the
+ * initial state of the membership.
+ */
+static int
+igmp_initial_join(struct in_multi *inm, struct igmp_ifinfo *igi)
+{
+ struct ifnet *ifp;
+ struct ifqueue *ifq;
+ int error, retval, syncstates;
+
+ CTR4(KTR_IGMPV3, "%s: initial join %s on ifp %p(%s)",
+ __func__, inet_ntoa(inm->inm_addr), inm->inm_ifp,
+ inm->inm_ifp->if_xname);
+
+ error = 0;
+ syncstates = 1;
+
+ ifp = inm->inm_ifp;
+
+ IN_MULTI_LOCK_ASSERT();
+ IGMP_LOCK_ASSERT();
+
+ KASSERT(igi && igi->igi_ifp == ifp, ("%s: inconsistent ifp", __func__));
+
+ /*
+ * Groups joined on loopback or marked as 'not reported',
+ * e.g. 224.0.0.1, enter the IGMP_SILENT_MEMBER state and
+ * are never reported in any IGMP protocol exchanges.
+ * All other groups enter the appropriate IGMP state machine
+ * for the version in use on this link.
+ * A link marked as IGIF_SILENT causes IGMP to be completely
+ * disabled for the link.
+ */
+ if ((ifp->if_flags & IFF_LOOPBACK) ||
+ (igi->igi_flags & IGIF_SILENT) ||
+ !igmp_isgroupreported(inm->inm_addr)) {
+ CTR1(KTR_IGMPV3,
+"%s: not kicking state machine for silent group", __func__);
+ inm->inm_state = IGMP_SILENT_MEMBER;
+ inm->inm_timer = 0;
+ } else {
+ /*
+ * Deal with overlapping in_multi lifecycle.
+ * If this group was LEAVING, then make sure
+ * we drop the reference we picked up to keep the
+ * group around for the final INCLUDE {} enqueue.
+ */
+ if (igi->igi_version == IGMP_VERSION_3 &&
+ inm->inm_state == IGMP_LEAVING_MEMBER)
+ inm_release_locked(inm);
+
+ inm->inm_state = IGMP_REPORTING_MEMBER;
+
+ switch (igi->igi_version) {
+ case IGMP_VERSION_1:
+ case IGMP_VERSION_2:
+ inm->inm_state = IGMP_IDLE_MEMBER;
+ error = igmp_v1v2_queue_report(inm,
+ (igi->igi_version == IGMP_VERSION_2) ?
+ IGMP_v2_HOST_MEMBERSHIP_REPORT :
+ IGMP_v1_HOST_MEMBERSHIP_REPORT);
+ if (error == 0) {
+ inm->inm_timer = IGMP_RANDOM_DELAY(
+ IGMP_V1V2_MAX_RI * PR_FASTHZ);
+ V_current_state_timers_running = 1;
+ }
+ break;
+
+ case IGMP_VERSION_3:
+ /*
+ * Defer update of T0 to T1, until the first copy
+ * of the state change has been transmitted.
+ */
+ syncstates = 0;
+
+ /*
+ * Immediately enqueue a State-Change Report for
+ * this interface, freeing any previous reports.
+ * Don't kick the timers if there is nothing to do,
+ * or if an error occurred.
+ */
+ ifq = &inm->inm_scq;
+ _IF_DRAIN(ifq);
+ retval = igmp_v3_enqueue_group_record(ifq, inm, 1,
+ 0, 0);
+ CTR2(KTR_IGMPV3, "%s: enqueue record = %d",
+ __func__, retval);
+ if (retval <= 0) {
+ error = retval * -1;
+ break;
+ }
+
+ /*
+ * Schedule transmission of pending state-change
+ * report up to RV times for this link. The timer
+ * will fire at the next igmp_fasttimo (~200ms),
+ * giving us an opportunity to merge the reports.
+ */
+ if (igi->igi_flags & IGIF_LOOPBACK) {
+ inm->inm_scrv = 1;
+ } else {
+ KASSERT(igi->igi_rv > 1,
+ ("%s: invalid robustness %d", __func__,
+ igi->igi_rv));
+ inm->inm_scrv = igi->igi_rv;
+ }
+ inm->inm_sctimer = 1;
+ V_state_change_timers_running = 1;
+
+ error = 0;
+ break;
+ }
+ }
+
+ /*
+ * Only update the T0 state if state change is atomic,
+ * i.e. we don't need to wait for a timer to fire before we
+ * can consider the state change to have been communicated.
+ */
+ if (syncstates) {
+ inm_commit(inm);
+ CTR3(KTR_IGMPV3, "%s: T1 -> T0 for %s/%s", __func__,
+ inet_ntoa(inm->inm_addr), inm->inm_ifp->if_xname);
+ }
+
+ return (error);
+}
+
+/*
+ * Issue an intermediate state change during the IGMP life-cycle.
+ */
+static int
+igmp_handle_state_change(struct in_multi *inm, struct igmp_ifinfo *igi)
+{
+ struct ifnet *ifp;
+ int retval;
+
+ CTR4(KTR_IGMPV3, "%s: state change for %s on ifp %p(%s)",
+ __func__, inet_ntoa(inm->inm_addr), inm->inm_ifp,
+ inm->inm_ifp->if_xname);
+
+ ifp = inm->inm_ifp;
+
+ IN_MULTI_LOCK_ASSERT();
+ IGMP_LOCK_ASSERT();
+
+ KASSERT(igi && igi->igi_ifp == ifp, ("%s: inconsistent ifp", __func__));
+
+ if ((ifp->if_flags & IFF_LOOPBACK) ||
+ (igi->igi_flags & IGIF_SILENT) ||
+ !igmp_isgroupreported(inm->inm_addr) ||
+ (igi->igi_version != IGMP_VERSION_3)) {
+ if (!igmp_isgroupreported(inm->inm_addr)) {
+ CTR1(KTR_IGMPV3,
+"%s: not kicking state machine for silent group", __func__);
+ }
+ CTR1(KTR_IGMPV3, "%s: nothing to do", __func__);
+ inm_commit(inm);
+ CTR3(KTR_IGMPV3, "%s: T1 -> T0 for %s/%s", __func__,
+ inet_ntoa(inm->inm_addr), inm->inm_ifp->if_xname);
+ return (0);
+ }
+
+ _IF_DRAIN(&inm->inm_scq);
+
+ retval = igmp_v3_enqueue_group_record(&inm->inm_scq, inm, 1, 0, 0);
+ CTR2(KTR_IGMPV3, "%s: enqueue record = %d", __func__, retval);
+ if (retval <= 0)
+ return (-retval);
+
+ /*
+ * If record(s) were enqueued, start the state-change
+ * report timer for this group.
+ */
+ inm->inm_scrv = ((igi->igi_flags & IGIF_LOOPBACK) ? 1 : igi->igi_rv);
+ inm->inm_sctimer = 1;
+ V_state_change_timers_running = 1;
+
+ return (0);
+}
+
+/*
+ * Perform the final leave for an IGMP group.
+ *
+ * When leaving a group:
+ * IGMPv1 does nothing.
+ * IGMPv2 sends a host leave message, if and only if we are the reporter.
+ * IGMPv3 enqueues a state-change report containing a transition
+ * to INCLUDE {} for immediate transmission.
+ */
+static void
+igmp_final_leave(struct in_multi *inm, struct igmp_ifinfo *igi)
+{
+ int syncstates;
+
+ syncstates = 1;
+
+ CTR4(KTR_IGMPV3, "%s: final leave %s on ifp %p(%s)",
+ __func__, inet_ntoa(inm->inm_addr), inm->inm_ifp,
+ inm->inm_ifp->if_xname);
+
+ IN_MULTI_LOCK_ASSERT();
+ IGMP_LOCK_ASSERT();
+
+ switch (inm->inm_state) {
+ case IGMP_NOT_MEMBER:
+ case IGMP_SILENT_MEMBER:
+ case IGMP_LEAVING_MEMBER:
+ /* Already leaving or left; do nothing. */
+ CTR1(KTR_IGMPV3,
+"%s: not kicking state machine for silent group", __func__);
+ break;
+ case IGMP_REPORTING_MEMBER:
+ case IGMP_IDLE_MEMBER:
+ case IGMP_G_QUERY_PENDING_MEMBER:
+ case IGMP_SG_QUERY_PENDING_MEMBER:
+ if (igi->igi_version == IGMP_VERSION_2) {
+#ifdef INVARIANTS
+ if (inm->inm_state == IGMP_G_QUERY_PENDING_MEMBER ||
+ inm->inm_state == IGMP_SG_QUERY_PENDING_MEMBER)
+ panic("%s: IGMPv3 state reached, not IGMPv3 mode",
+ __func__);
+#endif
+ igmp_v1v2_queue_report(inm, IGMP_HOST_LEAVE_MESSAGE);
+ inm->inm_state = IGMP_NOT_MEMBER;
+ } else if (igi->igi_version == IGMP_VERSION_3) {
+ /*
+ * Stop group timer and all pending reports.
+ * Immediately enqueue a state-change report
+ * TO_IN {} to be sent on the next fast timeout,
+ * giving us an opportunity to merge reports.
+ */
+ _IF_DRAIN(&inm->inm_scq);
+ inm->inm_timer = 0;
+ if (igi->igi_flags & IGIF_LOOPBACK) {
+ inm->inm_scrv = 1;
+ } else {
+ inm->inm_scrv = igi->igi_rv;
+ }
+ CTR4(KTR_IGMPV3, "%s: Leaving %s/%s with %d "
+ "pending retransmissions.", __func__,
+ inet_ntoa(inm->inm_addr),
+ inm->inm_ifp->if_xname, inm->inm_scrv);
+ if (inm->inm_scrv == 0) {
+ inm->inm_state = IGMP_NOT_MEMBER;
+ inm->inm_sctimer = 0;
+ } else {
+ int retval;
+
+ inm_acquire_locked(inm);
+
+ retval = igmp_v3_enqueue_group_record(
+ &inm->inm_scq, inm, 1, 0, 0);
+ KASSERT(retval != 0,
+ ("%s: enqueue record = %d", __func__,
+ retval));
+
+ inm->inm_state = IGMP_LEAVING_MEMBER;
+ inm->inm_sctimer = 1;
+ V_state_change_timers_running = 1;
+ syncstates = 0;
+ }
+ break;
+ }
+ break;
+ case IGMP_LAZY_MEMBER:
+ case IGMP_SLEEPING_MEMBER:
+ case IGMP_AWAKENING_MEMBER:
+ /* Our reports are suppressed; do nothing. */
+ break;
+ }
+
+ if (syncstates) {
+ inm_commit(inm);
+ CTR3(KTR_IGMPV3, "%s: T1 -> T0 for %s/%s", __func__,
+ inet_ntoa(inm->inm_addr), inm->inm_ifp->if_xname);
+ inm->inm_st[1].iss_fmode = MCAST_UNDEFINED;
+ CTR3(KTR_IGMPV3, "%s: T1 now MCAST_UNDEFINED for %s/%s",
+ __func__, inet_ntoa(inm->inm_addr), inm->inm_ifp->if_xname);
+ }
+}
+
+/*
+ * Enqueue an IGMPv3 group record to the given output queue.
+ *
+ * XXX This function could do with having the allocation code
+ * split out, and the multiple-tree-walks coalesced into a single
+ * routine as has been done in igmp_v3_enqueue_filter_change().
+ *
+ * If is_state_change is zero, a current-state record is appended.
+ * If is_state_change is non-zero, a state-change report is appended.
+ *
+ * If is_group_query is non-zero, an mbuf packet chain is allocated.
+ * If is_group_query is zero, and if there is a packet with free space
+ * at the tail of the queue, it will be appended to providing there
+ * is enough free space.
+ * Otherwise a new mbuf packet chain is allocated.
+ *
+ * If is_source_query is non-zero, each source is checked to see if
+ * it was recorded for a Group-Source query, and will be omitted if
+ * it is not both in-mode and recorded.
+ *
+ * The function will attempt to allocate leading space in the packet
+ * for the IP/IGMP header to be prepended without fragmenting the chain.
+ *
+ * If successful the size of all data appended to the queue is returned,
+ * otherwise an error code less than zero is returned, or zero if
+ * no record(s) were appended.
+ */
+static int
+igmp_v3_enqueue_group_record(struct ifqueue *ifq, struct in_multi *inm,
+ const int is_state_change, const int is_group_query,
+ const int is_source_query)
+{
+ struct igmp_grouprec ig;
+ struct igmp_grouprec *pig;
+ struct ifnet *ifp;
+ struct ip_msource *ims, *nims;
+ struct mbuf *m0, *m, *md;
+ int error, is_filter_list_change;
+ int minrec0len, m0srcs, msrcs, nbytes, off;
+ int record_has_sources;
+ int now;
+ int type;
+ in_addr_t naddr;
+ uint8_t mode;
+
+ IN_MULTI_LOCK_ASSERT();
+
+ error = 0;
+ ifp = inm->inm_ifp;
+ is_filter_list_change = 0;
+ m = NULL;
+ m0 = NULL;
+ m0srcs = 0;
+ msrcs = 0;
+ nbytes = 0;
+ nims = NULL;
+ record_has_sources = 1;
+ pig = NULL;
+ type = IGMP_DO_NOTHING;
+ mode = inm->inm_st[1].iss_fmode;
+
+ /*
+ * If we did not transition out of ASM mode during t0->t1,
+ * and there are no source nodes to process, we can skip
+ * the generation of source records.
+ */
+ if (inm->inm_st[0].iss_asm > 0 && inm->inm_st[1].iss_asm > 0 &&
+ inm->inm_nsrc == 0)
+ record_has_sources = 0;
+
+ if (is_state_change) {
+ /*
+ * Queue a state change record.
+ * If the mode did not change, and there are non-ASM
+ * listeners or source filters present,
+ * we potentially need to issue two records for the group.
+ * If we are transitioning to MCAST_UNDEFINED, we need
+ * not send any sources.
+ * If there are ASM listeners, and there was no filter
+ * mode transition of any kind, do nothing.
+ */
+ if (mode != inm->inm_st[0].iss_fmode) {
+ if (mode == MCAST_EXCLUDE) {
+ CTR1(KTR_IGMPV3, "%s: change to EXCLUDE",
+ __func__);
+ type = IGMP_CHANGE_TO_EXCLUDE_MODE;
+ } else {
+ CTR1(KTR_IGMPV3, "%s: change to INCLUDE",
+ __func__);
+ type = IGMP_CHANGE_TO_INCLUDE_MODE;
+ if (mode == MCAST_UNDEFINED)
+ record_has_sources = 0;
+ }
+ } else {
+ if (record_has_sources) {
+ is_filter_list_change = 1;
+ } else {
+ type = IGMP_DO_NOTHING;
+ }
+ }
+ } else {
+ /*
+ * Queue a current state record.
+ */
+ if (mode == MCAST_EXCLUDE) {
+ type = IGMP_MODE_IS_EXCLUDE;
+ } else if (mode == MCAST_INCLUDE) {
+ type = IGMP_MODE_IS_INCLUDE;
+ KASSERT(inm->inm_st[1].iss_asm == 0,
+ ("%s: inm %p is INCLUDE but ASM count is %d",
+ __func__, inm, inm->inm_st[1].iss_asm));
+ }
+ }
+
+ /*
+ * Generate the filter list changes using a separate function.
+ */
+ if (is_filter_list_change)
+ return (igmp_v3_enqueue_filter_change(ifq, inm));
+
+ if (type == IGMP_DO_NOTHING) {
+ CTR3(KTR_IGMPV3, "%s: nothing to do for %s/%s",
+ __func__, inet_ntoa(inm->inm_addr),
+ inm->inm_ifp->if_xname);
+ return (0);
+ }
+
+ /*
+ * If any sources are present, we must be able to fit at least
+ * one in the trailing space of the tail packet's mbuf,
+ * ideally more.
+ */
+ minrec0len = sizeof(struct igmp_grouprec);
+ if (record_has_sources)
+ minrec0len += sizeof(in_addr_t);
+
+ CTR4(KTR_IGMPV3, "%s: queueing %s for %s/%s", __func__,
+ igmp_rec_type_to_str(type), inet_ntoa(inm->inm_addr),
+ inm->inm_ifp->if_xname);
+
+ /*
+ * Check if we have a packet in the tail of the queue for this
+ * group into which the first group record for this group will fit.
+ * Otherwise allocate a new packet.
+ * Always allocate leading space for IP+RA_OPT+IGMP+REPORT.
+ * Note: Group records for G/GSR query responses MUST be sent
+ * in their own packet.
+ */
+ m0 = ifq->ifq_tail;
+ if (!is_group_query &&
+ m0 != NULL &&
+ (m0->m_pkthdr.PH_vt.vt_nrecs + 1 <= IGMP_V3_REPORT_MAXRECS) &&
+ (m0->m_pkthdr.len + minrec0len) <
+ (ifp->if_mtu - IGMP_LEADINGSPACE)) {
+ m0srcs = (ifp->if_mtu - m0->m_pkthdr.len -
+ sizeof(struct igmp_grouprec)) / sizeof(in_addr_t);
+ m = m0;
+ CTR1(KTR_IGMPV3, "%s: use existing packet", __func__);
+ } else {
+ if (_IF_QFULL(ifq)) {
+ CTR1(KTR_IGMPV3, "%s: outbound queue full", __func__);
+ return (-ENOMEM);
+ }
+ m = NULL;
+ m0srcs = (ifp->if_mtu - IGMP_LEADINGSPACE -
+ sizeof(struct igmp_grouprec)) / sizeof(in_addr_t);
+ if (!is_state_change && !is_group_query) {
+ m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
+ if (m)
+ m->m_data += IGMP_LEADINGSPACE;
+ }
+ if (m == NULL) {
+ m = m_gethdr(M_DONTWAIT, MT_DATA);
+ if (m)
+ MH_ALIGN(m, IGMP_LEADINGSPACE);
+ }
+ if (m == NULL)
+ return (-ENOMEM);
+
+ igmp_save_context(m, ifp);
+
+ CTR1(KTR_IGMPV3, "%s: allocated first packet", __func__);
+ }
+
+ /*
+ * Append group record.
+ * If we have sources, we don't know how many yet.
+ */
+ ig.ig_type = type;
+ ig.ig_datalen = 0;
+ ig.ig_numsrc = 0;
+ ig.ig_group = inm->inm_addr;
+ if (!m_append(m, sizeof(struct igmp_grouprec), (void *)&ig)) {
+ if (m != m0)
+ m_freem(m);
+ CTR1(KTR_IGMPV3, "%s: m_append() failed.", __func__);
+ return (-ENOMEM);
+ }
+ nbytes += sizeof(struct igmp_grouprec);
+
+ /*
+ * Append as many sources as will fit in the first packet.
+ * If we are appending to a new packet, the chain allocation
+ * may potentially use clusters; use m_getptr() in this case.
+ * If we are appending to an existing packet, we need to obtain
+ * a pointer to the group record after m_append(), in case a new
+ * mbuf was allocated.
+ * Only append sources which are in-mode at t1. If we are
+ * transitioning to MCAST_UNDEFINED state on the group, do not
+ * include source entries.
+ * Only report recorded sources in our filter set when responding
+ * to a group-source query.
+ */
+ if (record_has_sources) {
+ if (m == m0) {
+ md = m_last(m);
+ pig = (struct igmp_grouprec *)(mtod(md, uint8_t *) +
+ md->m_len - nbytes);
+ } else {
+ md = m_getptr(m, 0, &off);
+ pig = (struct igmp_grouprec *)(mtod(md, uint8_t *) +
+ off);
+ }
+ msrcs = 0;
+ RB_FOREACH_SAFE(ims, ip_msource_tree, &inm->inm_srcs, nims) {
+ CTR2(KTR_IGMPV3, "%s: visit node %s", __func__,
+ inet_ntoa_haddr(ims->ims_haddr));
+ now = ims_get_mode(inm, ims, 1);
+ CTR2(KTR_IGMPV3, "%s: node is %d", __func__, now);
+ if ((now != mode) ||
+ (now == mode && mode == MCAST_UNDEFINED)) {
+ CTR1(KTR_IGMPV3, "%s: skip node", __func__);
+ continue;
+ }
+ if (is_source_query && ims->ims_stp == 0) {
+ CTR1(KTR_IGMPV3, "%s: skip unrecorded node",
+ __func__);
+ continue;
+ }
+ CTR1(KTR_IGMPV3, "%s: append node", __func__);
+ naddr = htonl(ims->ims_haddr);
+ if (!m_append(m, sizeof(in_addr_t), (void *)&naddr)) {
+ if (m != m0)
+ m_freem(m);
+ CTR1(KTR_IGMPV3, "%s: m_append() failed.",
+ __func__);
+ return (-ENOMEM);
+ }
+ nbytes += sizeof(in_addr_t);
+ ++msrcs;
+ if (msrcs == m0srcs)
+ break;
+ }
+ CTR2(KTR_IGMPV3, "%s: msrcs is %d this packet", __func__,
+ msrcs);
+ pig->ig_numsrc = htons(msrcs);
+ nbytes += (msrcs * sizeof(in_addr_t));
+ }
+
+ if (is_source_query && msrcs == 0) {
+ CTR1(KTR_IGMPV3, "%s: no recorded sources to report", __func__);
+ if (m != m0)
+ m_freem(m);
+ return (0);
+ }
+
+ /*
+ * We are good to go with first packet.
+ */
+ if (m != m0) {
+ CTR1(KTR_IGMPV3, "%s: enqueueing first packet", __func__);
+ m->m_pkthdr.PH_vt.vt_nrecs = 1;
+ _IF_ENQUEUE(ifq, m);
+ } else
+ m->m_pkthdr.PH_vt.vt_nrecs++;
+
+ /*
+ * No further work needed if no source list in packet(s).
+ */
+ if (!record_has_sources)
+ return (nbytes);
+
+ /*
+ * Whilst sources remain to be announced, we need to allocate
+ * a new packet and fill out as many sources as will fit.
+ * Always try for a cluster first.
+ */
+ while (nims != NULL) {
+ if (_IF_QFULL(ifq)) {
+ CTR1(KTR_IGMPV3, "%s: outbound queue full", __func__);
+ return (-ENOMEM);
+ }
+ m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
+ if (m)
+ m->m_data += IGMP_LEADINGSPACE;
+ if (m == NULL) {
+ m = m_gethdr(M_DONTWAIT, MT_DATA);
+ if (m)
+ MH_ALIGN(m, IGMP_LEADINGSPACE);
+ }
+ if (m == NULL)
+ return (-ENOMEM);
+ igmp_save_context(m, ifp);
+ md = m_getptr(m, 0, &off);
+ pig = (struct igmp_grouprec *)(mtod(md, uint8_t *) + off);
+ CTR1(KTR_IGMPV3, "%s: allocated next packet", __func__);
+
+ if (!m_append(m, sizeof(struct igmp_grouprec), (void *)&ig)) {
+ if (m != m0)
+ m_freem(m);
+ CTR1(KTR_IGMPV3, "%s: m_append() failed.", __func__);
+ return (-ENOMEM);
+ }
+ m->m_pkthdr.PH_vt.vt_nrecs = 1;
+ nbytes += sizeof(struct igmp_grouprec);
+
+ m0srcs = (ifp->if_mtu - IGMP_LEADINGSPACE -
+ sizeof(struct igmp_grouprec)) / sizeof(in_addr_t);
+
+ msrcs = 0;
+ RB_FOREACH_FROM(ims, ip_msource_tree, nims) {
+ CTR2(KTR_IGMPV3, "%s: visit node %s", __func__,
+ inet_ntoa_haddr(ims->ims_haddr));
+ now = ims_get_mode(inm, ims, 1);
+ if ((now != mode) ||
+ (now == mode && mode == MCAST_UNDEFINED)) {
+ CTR1(KTR_IGMPV3, "%s: skip node", __func__);
+ continue;
+ }
+ if (is_source_query && ims->ims_stp == 0) {
+ CTR1(KTR_IGMPV3, "%s: skip unrecorded node",
+ __func__);
+ continue;
+ }
+ CTR1(KTR_IGMPV3, "%s: append node", __func__);
+ naddr = htonl(ims->ims_haddr);
+ if (!m_append(m, sizeof(in_addr_t), (void *)&naddr)) {
+ if (m != m0)
+ m_freem(m);
+ CTR1(KTR_IGMPV3, "%s: m_append() failed.",
+ __func__);
+ return (-ENOMEM);
+ }
+ ++msrcs;
+ if (msrcs == m0srcs)
+ break;
+ }
+ pig->ig_numsrc = htons(msrcs);
+ nbytes += (msrcs * sizeof(in_addr_t));
+
+ CTR1(KTR_IGMPV3, "%s: enqueueing next packet", __func__);
+ _IF_ENQUEUE(ifq, m);
+ }
+
+ return (nbytes);
+}
+
+/*
+ * Type used to mark record pass completion.
+ * We exploit the fact we can cast to this easily from the
+ * current filter modes on each ip_msource node.
+ */
+typedef enum {
+ REC_NONE = 0x00, /* MCAST_UNDEFINED */
+ REC_ALLOW = 0x01, /* MCAST_INCLUDE */
+ REC_BLOCK = 0x02, /* MCAST_EXCLUDE */
+ REC_FULL = REC_ALLOW | REC_BLOCK
+} rectype_t;
+
+/*
+ * Enqueue an IGMPv3 filter list change to the given output queue.
+ *
+ * Source list filter state is held in an RB-tree. When the filter list
+ * for a group is changed without changing its mode, we need to compute
+ * the deltas between T0 and T1 for each source in the filter set,
+ * and enqueue the appropriate ALLOW_NEW/BLOCK_OLD records.
+ *
+ * As we may potentially queue two record types, and the entire R-B tree
+ * needs to be walked at once, we break this out into its own function
+ * so we can generate a tightly packed queue of packets.
+ *
+ * XXX This could be written to only use one tree walk, although that makes
+ * serializing into the mbuf chains a bit harder. For now we do two walks
+ * which makes things easier on us, and it may or may not be harder on
+ * the L2 cache.
+ *
+ * If successful the size of all data appended to the queue is returned,
+ * otherwise an error code less than zero is returned, or zero if
+ * no record(s) were appended.
+ */
+static int
+igmp_v3_enqueue_filter_change(struct ifqueue *ifq, struct in_multi *inm)
+{
+ static const int MINRECLEN =
+ sizeof(struct igmp_grouprec) + sizeof(in_addr_t);
+ struct ifnet *ifp;
+ struct igmp_grouprec ig;
+ struct igmp_grouprec *pig;
+ struct ip_msource *ims, *nims;
+ struct mbuf *m, *m0, *md;
+ in_addr_t naddr;
+ int m0srcs, nbytes, npbytes, off, rsrcs, schanged;
+ int nallow, nblock;
+ uint8_t mode, now, then;
+ rectype_t crt, drt, nrt;
+
+ IN_MULTI_LOCK_ASSERT();
+
+ if (inm->inm_nsrc == 0 ||
+ (inm->inm_st[0].iss_asm > 0 && inm->inm_st[1].iss_asm > 0))
+ return (0);
+
+ ifp = inm->inm_ifp; /* interface */
+ mode = inm->inm_st[1].iss_fmode; /* filter mode at t1 */
+ crt = REC_NONE; /* current group record type */
+ drt = REC_NONE; /* mask of completed group record types */
+ nrt = REC_NONE; /* record type for current node */
+ m0srcs = 0; /* # source which will fit in current mbuf chain */
+ nbytes = 0; /* # of bytes appended to group's state-change queue */
+ npbytes = 0; /* # of bytes appended this packet */
+ rsrcs = 0; /* # sources encoded in current record */
+ schanged = 0; /* # nodes encoded in overall filter change */
+ nallow = 0; /* # of source entries in ALLOW_NEW */
+ nblock = 0; /* # of source entries in BLOCK_OLD */
+ nims = NULL; /* next tree node pointer */
+
+ /*
+ * For each possible filter record mode.
+ * The first kind of source we encounter tells us which
+ * is the first kind of record we start appending.
+ * If a node transitioned to UNDEFINED at t1, its mode is treated
+ * as the inverse of the group's filter mode.
+ */
+ while (drt != REC_FULL) {
+ do {
+ m0 = ifq->ifq_tail;
+ if (m0 != NULL &&
+ (m0->m_pkthdr.PH_vt.vt_nrecs + 1 <=
+ IGMP_V3_REPORT_MAXRECS) &&
+ (m0->m_pkthdr.len + MINRECLEN) <
+ (ifp->if_mtu - IGMP_LEADINGSPACE)) {
+ m = m0;
+ m0srcs = (ifp->if_mtu - m0->m_pkthdr.len -
+ sizeof(struct igmp_grouprec)) /
+ sizeof(in_addr_t);
+ CTR1(KTR_IGMPV3,
+ "%s: use previous packet", __func__);
+ } else {
+ m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
+ if (m)
+ m->m_data += IGMP_LEADINGSPACE;
+ if (m == NULL) {
+ m = m_gethdr(M_DONTWAIT, MT_DATA);
+ if (m)
+ MH_ALIGN(m, IGMP_LEADINGSPACE);
+ }
+ if (m == NULL) {
+ CTR1(KTR_IGMPV3,
+ "%s: m_get*() failed", __func__);
+ return (-ENOMEM);
+ }
+ m->m_pkthdr.PH_vt.vt_nrecs = 0;
+ igmp_save_context(m, ifp);
+ m0srcs = (ifp->if_mtu - IGMP_LEADINGSPACE -
+ sizeof(struct igmp_grouprec)) /
+ sizeof(in_addr_t);
+ npbytes = 0;
+ CTR1(KTR_IGMPV3,
+ "%s: allocated new packet", __func__);
+ }
+ /*
+ * Append the IGMP group record header to the
+ * current packet's data area.
+ * Recalculate pointer to free space for next
+ * group record, in case m_append() allocated
+ * a new mbuf or cluster.
+ */
+ memset(&ig, 0, sizeof(ig));
+ ig.ig_group = inm->inm_addr;
+ if (!m_append(m, sizeof(ig), (void *)&ig)) {
+ if (m != m0)
+ m_freem(m);
+ CTR1(KTR_IGMPV3,
+ "%s: m_append() failed", __func__);
+ return (-ENOMEM);
+ }
+ npbytes += sizeof(struct igmp_grouprec);
+ if (m != m0) {
+ /* new packet; offset in c hain */
+ md = m_getptr(m, npbytes -
+ sizeof(struct igmp_grouprec), &off);
+ pig = (struct igmp_grouprec *)(mtod(md,
+ uint8_t *) + off);
+ } else {
+ /* current packet; offset from last append */
+ md = m_last(m);
+ pig = (struct igmp_grouprec *)(mtod(md,
+ uint8_t *) + md->m_len -
+ sizeof(struct igmp_grouprec));
+ }
+ /*
+ * Begin walking the tree for this record type
+ * pass, or continue from where we left off
+ * previously if we had to allocate a new packet.
+ * Only report deltas in-mode at t1.
+ * We need not report included sources as allowed
+ * if we are in inclusive mode on the group,
+ * however the converse is not true.
+ */
+ rsrcs = 0;
+ if (nims == NULL)
+ nims = RB_MIN(ip_msource_tree, &inm->inm_srcs);
+ RB_FOREACH_FROM(ims, ip_msource_tree, nims) {
+ CTR2(KTR_IGMPV3, "%s: visit node %s",
+ __func__, inet_ntoa_haddr(ims->ims_haddr));
+ now = ims_get_mode(inm, ims, 1);
+ then = ims_get_mode(inm, ims, 0);
+ CTR3(KTR_IGMPV3, "%s: mode: t0 %d, t1 %d",
+ __func__, then, now);
+ if (now == then) {
+ CTR1(KTR_IGMPV3,
+ "%s: skip unchanged", __func__);
+ continue;
+ }
+ if (mode == MCAST_EXCLUDE &&
+ now == MCAST_INCLUDE) {
+ CTR1(KTR_IGMPV3,
+ "%s: skip IN src on EX group",
+ __func__);
+ continue;
+ }
+ nrt = (rectype_t)now;
+ if (nrt == REC_NONE)
+ nrt = (rectype_t)(~mode & REC_FULL);
+ if (schanged++ == 0) {
+ crt = nrt;
+ } else if (crt != nrt)
+ continue;
+ naddr = htonl(ims->ims_haddr);
+ if (!m_append(m, sizeof(in_addr_t),
+ (void *)&naddr)) {
+ if (m != m0)
+ m_freem(m);
+ CTR1(KTR_IGMPV3,
+ "%s: m_append() failed", __func__);
+ return (-ENOMEM);
+ }
+ nallow += !!(crt == REC_ALLOW);
+ nblock += !!(crt == REC_BLOCK);
+ if (++rsrcs == m0srcs)
+ break;
+ }
+ /*
+ * If we did not append any tree nodes on this
+ * pass, back out of allocations.
+ */
+ if (rsrcs == 0) {
+ npbytes -= sizeof(struct igmp_grouprec);
+ if (m != m0) {
+ CTR1(KTR_IGMPV3,
+ "%s: m_free(m)", __func__);
+ m_freem(m);
+ } else {
+ CTR1(KTR_IGMPV3,
+ "%s: m_adj(m, -ig)", __func__);
+ m_adj(m, -((int)sizeof(
+ struct igmp_grouprec)));
+ }
+ continue;
+ }
+ npbytes += (rsrcs * sizeof(in_addr_t));
+ if (crt == REC_ALLOW)
+ pig->ig_type = IGMP_ALLOW_NEW_SOURCES;
+ else if (crt == REC_BLOCK)
+ pig->ig_type = IGMP_BLOCK_OLD_SOURCES;
+ pig->ig_numsrc = htons(rsrcs);
+ /*
+ * Count the new group record, and enqueue this
+ * packet if it wasn't already queued.
+ */
+ m->m_pkthdr.PH_vt.vt_nrecs++;
+ if (m != m0)
+ _IF_ENQUEUE(ifq, m);
+ nbytes += npbytes;
+ } while (nims != NULL);
+ drt |= crt;
+ crt = (~crt & REC_FULL);
+ }
+
+ CTR3(KTR_IGMPV3, "%s: queued %d ALLOW_NEW, %d BLOCK_OLD", __func__,
+ nallow, nblock);
+
+ return (nbytes);
+}
+
+static int
+igmp_v3_merge_state_changes(struct in_multi *inm, struct ifqueue *ifscq)
+{
+ struct ifqueue *gq;
+ struct mbuf *m; /* pending state-change */
+ struct mbuf *m0; /* copy of pending state-change */
+ struct mbuf *mt; /* last state-change in packet */
+ int docopy, domerge;
+ u_int recslen;
+
+ docopy = 0;
+ domerge = 0;
+ recslen = 0;
+
+ IN_MULTI_LOCK_ASSERT();
+ IGMP_LOCK_ASSERT();
+
+ /*
+ * If there are further pending retransmissions, make a writable
+ * copy of each queued state-change message before merging.
+ */
+ if (inm->inm_scrv > 0)
+ docopy = 1;
+
+ gq = &inm->inm_scq;
+#ifdef KTR
+ if (gq->ifq_head == NULL) {
+ CTR2(KTR_IGMPV3, "%s: WARNING: queue for inm %p is empty",
+ __func__, inm);
+ }
+#endif
+
+ m = gq->ifq_head;
+ while (m != NULL) {
+ /*
+ * Only merge the report into the current packet if
+ * there is sufficient space to do so; an IGMPv3 report
+ * packet may only contain 65,535 group records.
+ * Always use a simple mbuf chain concatentation to do this,
+ * as large state changes for single groups may have
+ * allocated clusters.
+ */
+ domerge = 0;
+ mt = ifscq->ifq_tail;
+ if (mt != NULL) {
+ recslen = m_length(m, NULL);
+
+ if ((mt->m_pkthdr.PH_vt.vt_nrecs +
+ m->m_pkthdr.PH_vt.vt_nrecs <=
+ IGMP_V3_REPORT_MAXRECS) &&
+ (mt->m_pkthdr.len + recslen <=
+ (inm->inm_ifp->if_mtu - IGMP_LEADINGSPACE)))
+ domerge = 1;
+ }
+
+ if (!domerge && _IF_QFULL(gq)) {
+ CTR2(KTR_IGMPV3,
+ "%s: outbound queue full, skipping whole packet %p",
+ __func__, m);
+ mt = m->m_nextpkt;
+ if (!docopy)
+ m_freem(m);
+ m = mt;
+ continue;
+ }
+
+ if (!docopy) {
+ CTR2(KTR_IGMPV3, "%s: dequeueing %p", __func__, m);
+ _IF_DEQUEUE(gq, m0);
+ m = m0->m_nextpkt;
+ } else {
+ CTR2(KTR_IGMPV3, "%s: copying %p", __func__, m);
+ m0 = m_dup(m, M_NOWAIT);
+ if (m0 == NULL)
+ return (ENOMEM);
+ m0->m_nextpkt = NULL;
+ m = m->m_nextpkt;
+ }
+
+ if (!domerge) {
+ CTR3(KTR_IGMPV3, "%s: queueing %p to ifscq %p)",
+ __func__, m0, ifscq);
+ _IF_ENQUEUE(ifscq, m0);
+ } else {
+ struct mbuf *mtl; /* last mbuf of packet mt */
+
+ CTR3(KTR_IGMPV3, "%s: merging %p with ifscq tail %p)",
+ __func__, m0, mt);
+
+ mtl = m_last(mt);
+ m0->m_flags &= ~M_PKTHDR;
+ mt->m_pkthdr.len += recslen;
+ mt->m_pkthdr.PH_vt.vt_nrecs +=
+ m0->m_pkthdr.PH_vt.vt_nrecs;
+
+ mtl->m_next = m0;
+ }
+ }
+
+ return (0);
+}
+
+/*
+ * Respond to a pending IGMPv3 General Query.
+ */
+static void
+igmp_v3_dispatch_general_query(struct igmp_ifinfo *igi)
+{
+ struct ifmultiaddr *ifma, *tifma;
+ struct ifnet *ifp;
+ struct in_multi *inm;
+ int retval, loop;
+
+ IN_MULTI_LOCK_ASSERT();
+ IGMP_LOCK_ASSERT();
+
+ KASSERT(igi->igi_version == IGMP_VERSION_3,
+ ("%s: called when version %d", __func__, igi->igi_version));
+
+ ifp = igi->igi_ifp;
+
+ IF_ADDR_LOCK(ifp);
+ TAILQ_FOREACH_SAFE(ifma, &ifp->if_multiaddrs, ifma_link, tifma) {
+ if (ifma->ifma_addr->sa_family != AF_INET ||
+ ifma->ifma_protospec == NULL)
+ continue;
+
+ inm = (struct in_multi *)ifma->ifma_protospec;
+ KASSERT(ifp == inm->inm_ifp,
+ ("%s: inconsistent ifp", __func__));
+
+ switch (inm->inm_state) {
+ case IGMP_NOT_MEMBER:
+ case IGMP_SILENT_MEMBER:
+ break;
+ case IGMP_REPORTING_MEMBER:
+ case IGMP_IDLE_MEMBER:
+ case IGMP_LAZY_MEMBER:
+ case IGMP_SLEEPING_MEMBER:
+ case IGMP_AWAKENING_MEMBER:
+ inm->inm_state = IGMP_REPORTING_MEMBER;
+ retval = igmp_v3_enqueue_group_record(&igi->igi_gq,
+ inm, 0, 0, 0);
+ CTR2(KTR_IGMPV3, "%s: enqueue record = %d",
+ __func__, retval);
+ break;
+ case IGMP_G_QUERY_PENDING_MEMBER:
+ case IGMP_SG_QUERY_PENDING_MEMBER:
+ case IGMP_LEAVING_MEMBER:
+ break;
+ }
+ }
+ IF_ADDR_UNLOCK(ifp);
+
+ loop = (igi->igi_flags & IGIF_LOOPBACK) ? 1 : 0;
+ igmp_dispatch_queue(&igi->igi_gq, IGMP_MAX_RESPONSE_BURST, loop);
+
+ /*
+ * Slew transmission of bursts over 500ms intervals.
+ */
+ if (igi->igi_gq.ifq_head != NULL) {
+ igi->igi_v3_timer = 1 + IGMP_RANDOM_DELAY(
+ IGMP_RESPONSE_BURST_INTERVAL);
+ V_interface_timers_running = 1;
+ }
+}
+
+/*
+ * Transmit the next pending IGMP message in the output queue.
+ *
+ * We get called from netisr_processqueue(). A mutex private to igmpoq
+ * will be acquired and released around this routine.
+ *
+ * VIMAGE: Needs to store/restore vnet pointer on a per-mbuf-chain basis.
+ * MRT: Nothing needs to be done, as IGMP traffic is always local to
+ * a link and uses a link-scope multicast address.
+ */
+static void
+igmp_intr(struct mbuf *m)
+{
+ struct ip_moptions imo;
+ struct ifnet *ifp;
+ struct mbuf *ipopts, *m0;
+ int error;
+ uint32_t ifindex;
+
+ CTR2(KTR_IGMPV3, "%s: transmit %p", __func__, m);
+
+ /*
+ * Set VNET image pointer from enqueued mbuf chain
+ * before doing anything else. Whilst we use interface
+ * indexes to guard against interface detach, they are
+ * unique to each VIMAGE and must be retrieved.
+ */
+ CURVNET_SET((struct vnet *)(m->m_pkthdr.header));
+ ifindex = igmp_restore_context(m);
+
+ /*
+ * Check if the ifnet still exists. This limits the scope of
+ * any race in the absence of a global ifp lock for low cost
+ * (an array lookup).
+ */
+ ifp = ifnet_byindex(ifindex);
+ if (ifp == NULL) {
+ CTR3(KTR_IGMPV3, "%s: dropped %p as ifindex %u went away.",
+ __func__, m, ifindex);
+ m_freem(m);
+ IPSTAT_INC(ips_noroute);
+ goto out;
+ }
+
+ ipopts = V_igmp_sendra ? m_raopt : NULL;
+
+ imo.imo_multicast_ttl = 1;
+ imo.imo_multicast_vif = -1;
+ imo.imo_multicast_loop = (V_ip_mrouter != NULL);
+
+ /*
+ * If the user requested that IGMP traffic be explicitly
+ * redirected to the loopback interface (e.g. they are running a
+ * MANET interface and the routing protocol needs to see the
+ * updates), handle this now.
+ */
+ if (m->m_flags & M_IGMP_LOOP)
+ imo.imo_multicast_ifp = V_loif;
+ else
+ imo.imo_multicast_ifp = ifp;
+
+ if (m->m_flags & M_IGMPV2) {
+ m0 = m;
+ } else {
+ m0 = igmp_v3_encap_report(ifp, m);
+ if (m0 == NULL) {
+ CTR2(KTR_IGMPV3, "%s: dropped %p", __func__, m);
+ m_freem(m);
+ IPSTAT_INC(ips_odropped);
+ goto out;
+ }
+ }
+
+ igmp_scrub_context(m0);
+ m->m_flags &= ~(M_PROTOFLAGS);
+ m0->m_pkthdr.rcvif = V_loif;
+#ifdef MAC
+ mac_netinet_igmp_send(ifp, m0);
+#endif
+ error = ip_output(m0, ipopts, NULL, 0, &imo, NULL);
+ if (error) {
+ CTR3(KTR_IGMPV3, "%s: ip_output(%p) = %d", __func__, m0, error);
+ goto out;
+ }
+
+ IGMPSTAT_INC(igps_snd_reports);
+
+out:
+ /*
+ * We must restore the existing vnet pointer before
+ * continuing as we are run from netisr context.
+ */
+ CURVNET_RESTORE();
+}
+
+/*
+ * Encapsulate an IGMPv3 report.
+ *
+ * The internal mbuf flag M_IGMPV3_HDR is used to indicate that the mbuf
+ * chain has already had its IP/IGMPv3 header prepended. In this case
+ * the function will not attempt to prepend; the lengths and checksums
+ * will however be re-computed.
+ *
+ * Returns a pointer to the new mbuf chain head, or NULL if the
+ * allocation failed.
+ */
+static struct mbuf *
+igmp_v3_encap_report(struct ifnet *ifp, struct mbuf *m)
+{
+ struct igmp_report *igmp;
+ struct ip *ip;
+ int hdrlen, igmpreclen;
+
+ KASSERT((m->m_flags & M_PKTHDR),
+ ("%s: mbuf chain %p is !M_PKTHDR", __func__, m));
+
+ igmpreclen = m_length(m, NULL);
+ hdrlen = sizeof(struct ip) + sizeof(struct igmp_report);
+
+ if (m->m_flags & M_IGMPV3_HDR) {
+ igmpreclen -= hdrlen;
+ } else {
+ M_PREPEND(m, hdrlen, M_DONTWAIT);
+ if (m == NULL)
+ return (NULL);
+ m->m_flags |= M_IGMPV3_HDR;
+ }
+
+ CTR2(KTR_IGMPV3, "%s: igmpreclen is %d", __func__, igmpreclen);
+
+ m->m_data += sizeof(struct ip);
+ m->m_len -= sizeof(struct ip);
+
+ igmp = mtod(m, struct igmp_report *);
+ igmp->ir_type = IGMP_v3_HOST_MEMBERSHIP_REPORT;
+ igmp->ir_rsv1 = 0;
+ igmp->ir_rsv2 = 0;
+ igmp->ir_numgrps = htons(m->m_pkthdr.PH_vt.vt_nrecs);
+ igmp->ir_cksum = 0;
+ igmp->ir_cksum = in_cksum(m, sizeof(struct igmp_report) + igmpreclen);
+ m->m_pkthdr.PH_vt.vt_nrecs = 0;
+
+ m->m_data -= sizeof(struct ip);
+ m->m_len += sizeof(struct ip);
+
+ ip = mtod(m, struct ip *);
+ ip->ip_tos = IPTOS_PREC_INTERNETCONTROL;
+ ip->ip_len = hdrlen + igmpreclen;
+ ip->ip_off = IP_DF;
+ ip->ip_p = IPPROTO_IGMP;
+ ip->ip_sum = 0;
+
+ ip->ip_src.s_addr = INADDR_ANY;
+
+ if (m->m_flags & M_IGMP_LOOP) {
+ struct in_ifaddr *ia;
+
+ IFP_TO_IA(ifp, ia);
+ if (ia != NULL) {
+ ip->ip_src = ia->ia_addr.sin_addr;
+ ifa_free(&ia->ia_ifa);
+ }
+ }
+
+ ip->ip_dst.s_addr = htonl(INADDR_ALLRPTS_GROUP);
+
+ return (m);
+}
+
+#ifdef KTR
+static char *
+igmp_rec_type_to_str(const int type)
+{
+
+ switch (type) {
+ case IGMP_CHANGE_TO_EXCLUDE_MODE:
+ return "TO_EX";
+ break;
+ case IGMP_CHANGE_TO_INCLUDE_MODE:
+ return "TO_IN";
+ break;
+ case IGMP_MODE_IS_EXCLUDE:
+ return "MODE_EX";
+ break;
+ case IGMP_MODE_IS_INCLUDE:
+ return "MODE_IN";
+ break;
+ case IGMP_ALLOW_NEW_SOURCES:
+ return "ALLOW_NEW";
+ break;
+ case IGMP_BLOCK_OLD_SOURCES:
+ return "BLOCK_OLD";
+ break;
+ default:
+ break;
+ }
+ return "unknown";
+}
+#endif
+
+static void
+igmp_init(void *unused __unused)
+{
+
+ CTR1(KTR_IGMPV3, "%s: initializing", __func__);
+
+ IGMP_LOCK_INIT();
+
+ m_raopt = igmp_ra_alloc();
+
+ netisr_register(&igmp_nh);
+}
+SYSINIT(igmp_init, SI_SUB_PSEUDO, SI_ORDER_MIDDLE, igmp_init, NULL);
+
+static void
+igmp_uninit(void *unused __unused)
+{
+
+ CTR1(KTR_IGMPV3, "%s: tearing down", __func__);
+
+ netisr_unregister(&igmp_nh);
+
+ m_free(m_raopt);
+ m_raopt = NULL;
+
+ IGMP_LOCK_DESTROY();
+}
+SYSUNINIT(igmp_uninit, SI_SUB_PSEUDO, SI_ORDER_MIDDLE, igmp_uninit, NULL);
+
+static void
+vnet_igmp_init(const void *unused __unused)
+{
+
+ CTR1(KTR_IGMPV3, "%s: initializing", __func__);
+
+ LIST_INIT(&V_igi_head);
+}
+VNET_SYSINIT(vnet_igmp_init, SI_SUB_PSEUDO, SI_ORDER_ANY, vnet_igmp_init,
+ NULL);
+
+static void
+vnet_igmp_uninit(const void *unused __unused)
+{
+
+ CTR1(KTR_IGMPV3, "%s: tearing down", __func__);
+
+ KASSERT(LIST_EMPTY(&V_igi_head),
+ ("%s: igi list not empty; ifnets not detached?", __func__));
+}
+VNET_SYSUNINIT(vnet_igmp_uninit, SI_SUB_PSEUDO, SI_ORDER_ANY,
+ vnet_igmp_uninit, NULL);
+
+static int
+igmp_modevent(module_t mod, int type, void *unused __unused)
+{
+
+ switch (type) {
+ case MOD_LOAD:
+ case MOD_UNLOAD:
+ break;
+ default:
+ return (EOPNOTSUPP);
+ }
+ return (0);
+}
+
+static moduledata_t igmp_mod = {
+ "igmp",
+ igmp_modevent,
+ 0
+};
+DECLARE_MODULE(igmp, igmp_mod, SI_SUB_PSEUDO, SI_ORDER_ANY);
diff --git a/rtems/freebsd/netinet/igmp.h b/rtems/freebsd/netinet/igmp.h
new file mode 100644
index 00000000..3c7df0d3
--- /dev/null
+++ b/rtems/freebsd/netinet/igmp.h
@@ -0,0 +1,148 @@
+/*-
+ * Copyright (c) 1988 Stephen Deering.
+ * Copyright (c) 1992, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * This code is derived from software contributed to Berkeley by
+ * Stephen Deering of Stanford University.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * @(#)igmp.h 8.1 (Berkeley) 6/10/93
+ * $FreeBSD$
+ */
+
+#ifndef _NETINET_IGMP_HH_
+#define _NETINET_IGMP_HH_
+
+/*
+ * Internet Group Management Protocol (IGMP) definitions.
+ *
+ * Written by Steve Deering, Stanford, May 1988.
+ *
+ * MULTICAST Revision: 3.5.1.2
+ */
+
+/* Minimum length of any IGMP protocol message. */
+#define IGMP_MINLEN 8
+
+/*
+ * IGMPv1/v2 query and host report format.
+ */
+struct igmp {
+ u_char igmp_type; /* version & type of IGMP message */
+ u_char igmp_code; /* subtype for routing msgs */
+ u_short igmp_cksum; /* IP-style checksum */
+ struct in_addr igmp_group; /* group address being reported */
+}; /* (zero for queries) */
+
+/*
+ * IGMP v3 query format.
+ */
+struct igmpv3 {
+ u_char igmp_type; /* version & type of IGMP message */
+ u_char igmp_code; /* subtype for routing msgs */
+ u_short igmp_cksum; /* IP-style checksum */
+ struct in_addr igmp_group; /* group address being reported */
+ /* (zero for queries) */
+ u_char igmp_misc; /* reserved/suppress/robustness */
+ u_char igmp_qqi; /* querier's query interval */
+ u_short igmp_numsrc; /* number of sources */
+ /*struct in_addr igmp_sources[1];*/ /* source addresses */
+};
+#define IGMP_V3_QUERY_MINLEN 12
+#define IGMP_EXP(x) (((x) >> 4) & 0x07)
+#define IGMP_MANT(x) ((x) & 0x0f)
+#define IGMP_QRESV(x) (((x) >> 4) & 0x0f)
+#define IGMP_SFLAG(x) (((x) >> 3) & 0x01)
+#define IGMP_QRV(x) ((x) & 0x07)
+
+struct igmp_grouprec {
+ u_char ig_type; /* record type */
+ u_char ig_datalen; /* length of auxiliary data */
+ u_short ig_numsrc; /* number of sources */
+ struct in_addr ig_group; /* group address being reported */
+ /*struct in_addr ig_sources[1];*/ /* source addresses */
+};
+#define IGMP_GRPREC_HDRLEN 8
+
+/*
+ * IGMPv3 host membership report header.
+ */
+struct igmp_report {
+ u_char ir_type; /* IGMP_v3_HOST_MEMBERSHIP_REPORT */
+ u_char ir_rsv1; /* must be zero */
+ u_short ir_cksum; /* checksum */
+ u_short ir_rsv2; /* must be zero */
+ u_short ir_numgrps; /* number of group records */
+ /*struct igmp_grouprec ir_groups[1];*/ /* group records */
+};
+#define IGMP_V3_REPORT_MINLEN 8
+#define IGMP_V3_REPORT_MAXRECS 65535
+
+/*
+ * Message types, including version number.
+ */
+#define IGMP_HOST_MEMBERSHIP_QUERY 0x11 /* membership query */
+#define IGMP_v1_HOST_MEMBERSHIP_REPORT 0x12 /* Ver. 1 membership report */
+#define IGMP_DVMRP 0x13 /* DVMRP routing message */
+#define IGMP_PIM 0x14 /* PIMv1 message (historic) */
+#define IGMP_v2_HOST_MEMBERSHIP_REPORT 0x16 /* Ver. 2 membership report */
+#define IGMP_HOST_LEAVE_MESSAGE 0x17 /* Leave-group message */
+#define IGMP_MTRACE_REPLY 0x1e /* mtrace(8) reply */
+#define IGMP_MTRACE_QUERY 0x1f /* mtrace(8) probe */
+#define IGMP_v3_HOST_MEMBERSHIP_REPORT 0x22 /* Ver. 3 membership report */
+
+/*
+ * IGMPv3 report modes.
+ */
+#define IGMP_DO_NOTHING 0 /* don't send a record */
+#define IGMP_MODE_IS_INCLUDE 1 /* MODE_IN */
+#define IGMP_MODE_IS_EXCLUDE 2 /* MODE_EX */
+#define IGMP_CHANGE_TO_INCLUDE_MODE 3 /* TO_IN */
+#define IGMP_CHANGE_TO_EXCLUDE_MODE 4 /* TO_EX */
+#define IGMP_ALLOW_NEW_SOURCES 5 /* ALLOW_NEW */
+#define IGMP_BLOCK_OLD_SOURCES 6 /* BLOCK_OLD */
+
+/*
+ * IGMPv3 query types.
+ */
+#define IGMP_V3_GENERAL_QUERY 1
+#define IGMP_V3_GROUP_QUERY 2
+#define IGMP_V3_GROUP_SOURCE_QUERY 3
+
+/*
+ * Maximum report interval for IGMP v1/v2 host membership reports [RFC 1112]
+ */
+#define IGMP_V1V2_MAX_RI 10
+#define IGMP_MAX_HOST_REPORT_DELAY IGMP_V1V2_MAX_RI
+
+/*
+ * IGMP_TIMER_SCALE denotes that the igmp code field specifies
+ * time in tenths of a second.
+ */
+#define IGMP_TIMER_SCALE 10
+
+#endif /* _NETINET_IGMP_HH_ */
diff --git a/rtems/freebsd/netinet/igmp_var.h b/rtems/freebsd/netinet/igmp_var.h
new file mode 100644
index 00000000..e1abe6ab
--- /dev/null
+++ b/rtems/freebsd/netinet/igmp_var.h
@@ -0,0 +1,225 @@
+/*-a
+ * Copyright (c) 1988 Stephen Deering.
+ * Copyright (c) 1992, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * This code is derived from software contributed to Berkeley by
+ * Stephen Deering of Stanford University.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * from: @(#)igmp_var.h 8.1 (Berkeley) 7/19/93
+ * $FreeBSD$
+ */
+
+#ifndef _NETINET_IGMP_VAR_HH_
+#define _NETINET_IGMP_VAR_HH_
+
+/*
+ * Internet Group Management Protocol (IGMP),
+ * implementation-specific definitions.
+ *
+ * Written by Steve Deering, Stanford, May 1988.
+ *
+ * MULTICAST Revision: 3.5.1.3
+ */
+
+#ifndef BURN_BRIDGES
+/*
+ * Pre-IGMPV3 igmpstat structure.
+ */
+struct oigmpstat {
+ u_int igps_rcv_total; /* total IGMP messages received */
+ u_int igps_rcv_tooshort; /* received with too few bytes */
+ u_int igps_rcv_badsum; /* received with bad checksum */
+ u_int igps_rcv_queries; /* received membership queries */
+ u_int igps_rcv_badqueries; /* received invalid queries */
+ u_int igps_rcv_reports; /* received membership reports */
+ u_int igps_rcv_badreports; /* received invalid reports */
+ u_int igps_rcv_ourreports; /* received reports for our groups */
+ u_int igps_snd_reports; /* sent membership reports */
+ u_int igps_rcv_toolong; /* received with too many bytes */
+};
+#endif
+
+/*
+ * IGMPv3 protocol statistics.
+ */
+struct igmpstat {
+ /*
+ * Structure header (to insulate ABI changes).
+ */
+ uint32_t igps_version; /* version of this structure */
+ uint32_t igps_len; /* length of this structure */
+ /*
+ * Message statistics.
+ */
+ uint64_t igps_rcv_total; /* total IGMP messages received */
+ uint64_t igps_rcv_tooshort; /* received with too few bytes */
+ uint64_t igps_rcv_badttl; /* received with ttl other than 1 */
+ uint64_t igps_rcv_badsum; /* received with bad checksum */
+ /*
+ * Query statistics.
+ */
+ uint64_t igps_rcv_v1v2_queries; /* received IGMPv1/IGMPv2 queries */
+ uint64_t igps_rcv_v3_queries; /* received IGMPv3 queries */
+ uint64_t igps_rcv_badqueries; /* received invalid queries */
+ uint64_t igps_rcv_gen_queries; /* received general queries */
+ uint64_t igps_rcv_group_queries;/* received group queries */
+ uint64_t igps_rcv_gsr_queries; /* received group-source queries */
+ uint64_t igps_drop_gsr_queries; /* dropped group-source queries */
+ /*
+ * Report statistics.
+ */
+ uint64_t igps_rcv_reports; /* received membership reports */
+ uint64_t igps_rcv_badreports; /* received invalid reports */
+ uint64_t igps_rcv_ourreports; /* received reports for our groups */
+ uint64_t igps_rcv_nora; /* received w/o Router Alert option */
+ uint64_t igps_snd_reports; /* sent membership reports */
+ /*
+ * Padding for future additions.
+ */
+ uint64_t __igps_pad[4];
+};
+#define IGPS_VERSION_3 3 /* as of FreeBSD 8.x */
+#define IGPS_VERSION3_LEN 168
+
+#ifdef _KERNEL
+#define IGMPSTAT_ADD(name, val) V_igmpstat.name += (val)
+#define IGMPSTAT_INC(name) IGMPSTAT_ADD(name, 1)
+#endif
+
+#ifdef CTASSERT
+CTASSERT(sizeof(struct igmpstat) == 168);
+#endif
+
+#ifdef _KERNEL
+#define IGMP_RANDOM_DELAY(X) (random() % (X) + 1)
+
+#define IGMP_MAX_STATE_CHANGES 24 /* Max pending changes per group */
+
+/*
+ * IGMP per-group states.
+ */
+#define IGMP_NOT_MEMBER 0 /* Can garbage collect in_multi */
+#define IGMP_SILENT_MEMBER 1 /* Do not perform IGMP for group */
+#define IGMP_REPORTING_MEMBER 2 /* IGMPv1/2/3 we are reporter */
+#define IGMP_IDLE_MEMBER 3 /* IGMPv1/2 we reported last */
+#define IGMP_LAZY_MEMBER 4 /* IGMPv1/2 other member reporting */
+#define IGMP_SLEEPING_MEMBER 5 /* IGMPv1/2 start query response */
+#define IGMP_AWAKENING_MEMBER 6 /* IGMPv1/2 group timer will start */
+#define IGMP_G_QUERY_PENDING_MEMBER 7 /* IGMPv3 group query pending */
+#define IGMP_SG_QUERY_PENDING_MEMBER 8 /* IGMPv3 source query pending */
+#define IGMP_LEAVING_MEMBER 9 /* IGMPv3 dying gasp (pending last */
+ /* retransmission of INCLUDE {}) */
+
+/*
+ * IGMP version tag.
+ */
+#define IGMP_VERSION_NONE 0 /* Invalid */
+#define IGMP_VERSION_1 1
+#define IGMP_VERSION_2 2
+#define IGMP_VERSION_3 3 /* Default */
+
+/*
+ * IGMPv3 protocol control variables.
+ */
+#define IGMP_RV_INIT 2 /* Robustness Variable */
+#define IGMP_RV_MIN 1
+#define IGMP_RV_MAX 7
+
+#define IGMP_QI_INIT 125 /* Query Interval (s) */
+#define IGMP_QI_MIN 1
+#define IGMP_QI_MAX 255
+
+#define IGMP_QRI_INIT 10 /* Query Response Interval (s) */
+#define IGMP_QRI_MIN 1
+#define IGMP_QRI_MAX 255
+
+#define IGMP_URI_INIT 3 /* Unsolicited Report Interval (s) */
+#define IGMP_URI_MIN 0
+#define IGMP_URI_MAX 10
+
+#define IGMP_MAX_G_GS_PACKETS 8 /* # of packets to answer G/GS */
+#define IGMP_MAX_STATE_CHANGE_PACKETS 8 /* # of packets per state change */
+#define IGMP_MAX_RESPONSE_PACKETS 16 /* # of packets for general query */
+#define IGMP_MAX_RESPONSE_BURST 4 /* # of responses to send at once */
+#define IGMP_RESPONSE_BURST_INTERVAL (PR_FASTHZ / 2) /* 500ms */
+
+/*
+ * IGMP-specific mbuf flags.
+ */
+#define M_IGMPV2 M_PROTO1 /* Packet is IGMPv2 */
+#define M_IGMPV3_HDR M_PROTO2 /* Packet has IGMPv3 headers */
+#define M_GROUPREC M_PROTO3 /* mbuf chain is a group record */
+#define M_IGMP_LOOP M_PROTO4 /* transmit on loif, not real ifp */
+
+/*
+ * Default amount of leading space for IGMPv3 to allocate at the
+ * beginning of its mbuf packet chains, to avoid fragmentation and
+ * unnecessary allocation of leading mbufs.
+ */
+#define RAOPT_LEN 4 /* Length of IP Router Alert option */
+#define IGMP_LEADINGSPACE \
+ (sizeof(struct ip) + RAOPT_LEN + sizeof(struct igmp_report))
+
+/*
+ * Subsystem lock macros.
+ * The IGMP lock is only taken with IGMP. Currently it is system-wide.
+ * VIMAGE: The lock could be pushed to per-VIMAGE granularity in future.
+ */
+#define IGMP_LOCK_INIT() mtx_init(&igmp_mtx, "igmp_mtx", NULL, MTX_DEF)
+#define IGMP_LOCK_DESTROY() mtx_destroy(&igmp_mtx)
+#define IGMP_LOCK() mtx_lock(&igmp_mtx)
+#define IGMP_LOCK_ASSERT() mtx_assert(&igmp_mtx, MA_OWNED)
+#define IGMP_UNLOCK() mtx_unlock(&igmp_mtx)
+#define IGMP_UNLOCK_ASSERT() mtx_assert(&igmp_mtx, MA_NOTOWNED)
+
+struct igmp_ifinfo;
+
+int igmp_change_state(struct in_multi *);
+void igmp_fasttimo(void);
+struct igmp_ifinfo *
+ igmp_domifattach(struct ifnet *);
+void igmp_domifdetach(struct ifnet *);
+void igmp_ifdetach(struct ifnet *);
+void igmp_input(struct mbuf *, int);
+void igmp_slowtimo(void);
+
+SYSCTL_DECL(_net_inet_igmp);
+
+#endif /* _KERNEL */
+
+/*
+ * Names for IGMP sysctl objects
+ */
+#define IGMPCTL_STATS 1 /* statistics (read-only) */
+#define IGMPCTL_MAXID 2
+
+#define IGMPCTL_NAMES { \
+ { 0, 0 }, \
+ { "stats", CTLTYPE_STRUCT } \
+}
+#endif
diff --git a/rtems/freebsd/netinet/in.c b/rtems/freebsd/netinet/in.c
new file mode 100644
index 00000000..95b8d3cf
--- /dev/null
+++ b/rtems/freebsd/netinet/in.c
@@ -0,0 +1,1601 @@
+#include <rtems/freebsd/machine/rtems-bsd-config.h>
+
+/*-
+ * Copyright (c) 1982, 1986, 1991, 1993
+ * The Regents of the University of California. All rights reserved.
+ * Copyright (C) 2001 WIDE Project. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * @(#)in.c 8.4 (Berkeley) 1/9/95
+ */
+
+#include <rtems/freebsd/sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <rtems/freebsd/local/opt_mpath.h>
+
+#include <rtems/freebsd/sys/param.h>
+#include <rtems/freebsd/sys/systm.h>
+#include <rtems/freebsd/sys/sockio.h>
+#include <rtems/freebsd/sys/malloc.h>
+#include <rtems/freebsd/sys/priv.h>
+#include <rtems/freebsd/sys/socket.h>
+#include <rtems/freebsd/sys/jail.h>
+#include <rtems/freebsd/sys/kernel.h>
+#include <rtems/freebsd/sys/proc.h>
+#include <rtems/freebsd/sys/sysctl.h>
+#include <rtems/freebsd/sys/syslog.h>
+
+#include <rtems/freebsd/net/if.h>
+#include <rtems/freebsd/net/if_var.h>
+#include <rtems/freebsd/net/if_dl.h>
+#include <rtems/freebsd/net/if_llatbl.h>
+#include <rtems/freebsd/net/if_types.h>
+#include <rtems/freebsd/net/route.h>
+#include <rtems/freebsd/net/vnet.h>
+
+#include <rtems/freebsd/netinet/in.h>
+#include <rtems/freebsd/netinet/in_var.h>
+#include <rtems/freebsd/netinet/in_pcb.h>
+#include <rtems/freebsd/netinet/ip_var.h>
+#include <rtems/freebsd/netinet/igmp_var.h>
+#include <rtems/freebsd/netinet/udp.h>
+#include <rtems/freebsd/netinet/udp_var.h>
+
+static int in_mask2len(struct in_addr *);
+static void in_len2mask(struct in_addr *, int);
+static int in_lifaddr_ioctl(struct socket *, u_long, caddr_t,
+ struct ifnet *, struct thread *);
+
+static int in_addprefix(struct in_ifaddr *, int);
+static int in_scrubprefix(struct in_ifaddr *);
+static void in_socktrim(struct sockaddr_in *);
+static int in_ifinit(struct ifnet *,
+ struct in_ifaddr *, struct sockaddr_in *, int);
+static void in_purgemaddrs(struct ifnet *);
+
+static VNET_DEFINE(int, subnetsarelocal);
+#define V_subnetsarelocal VNET(subnetsarelocal)
+SYSCTL_VNET_INT(_net_inet_ip, OID_AUTO, subnets_are_local, CTLFLAG_RW,
+ &VNET_NAME(subnetsarelocal), 0,
+ "Treat all subnets as directly connected");
+static VNET_DEFINE(int, sameprefixcarponly);
+#define V_sameprefixcarponly VNET(sameprefixcarponly)
+SYSCTL_VNET_INT(_net_inet_ip, OID_AUTO, same_prefix_carp_only, CTLFLAG_RW,
+ &VNET_NAME(sameprefixcarponly), 0,
+ "Refuse to create same prefixes on different interfaces");
+
+VNET_DECLARE(struct inpcbinfo, ripcbinfo);
+#define V_ripcbinfo VNET(ripcbinfo)
+
+/*
+ * Return 1 if an internet address is for a ``local'' host
+ * (one to which we have a connection). If subnetsarelocal
+ * is true, this includes other subnets of the local net.
+ * Otherwise, it includes only the directly-connected (sub)nets.
+ */
+int
+in_localaddr(struct in_addr in)
+{
+ register u_long i = ntohl(in.s_addr);
+ register struct in_ifaddr *ia;
+
+ IN_IFADDR_RLOCK();
+ if (V_subnetsarelocal) {
+ TAILQ_FOREACH(ia, &V_in_ifaddrhead, ia_link) {
+ if ((i & ia->ia_netmask) == ia->ia_net) {
+ IN_IFADDR_RUNLOCK();
+ return (1);
+ }
+ }
+ } else {
+ TAILQ_FOREACH(ia, &V_in_ifaddrhead, ia_link) {
+ if ((i & ia->ia_subnetmask) == ia->ia_subnet) {
+ IN_IFADDR_RUNLOCK();
+ return (1);
+ }
+ }
+ }
+ IN_IFADDR_RUNLOCK();
+ return (0);
+}
+
+/*
+ * Return 1 if an internet address is for the local host and configured
+ * on one of its interfaces.
+ */
+int
+in_localip(struct in_addr in)
+{
+ struct in_ifaddr *ia;
+
+ IN_IFADDR_RLOCK();
+ LIST_FOREACH(ia, INADDR_HASH(in.s_addr), ia_hash) {
+ if (IA_SIN(ia)->sin_addr.s_addr == in.s_addr) {
+ IN_IFADDR_RUNLOCK();
+ return (1);
+ }
+ }
+ IN_IFADDR_RUNLOCK();
+ return (0);
+}
+
+/*
+ * Determine whether an IP address is in a reserved set of addresses
+ * that may not be forwarded, or whether datagrams to that destination
+ * may be forwarded.
+ */
+int
+in_canforward(struct in_addr in)
+{
+ register u_long i = ntohl(in.s_addr);
+ register u_long net;
+
+ if (IN_EXPERIMENTAL(i) || IN_MULTICAST(i) || IN_LINKLOCAL(i))
+ return (0);
+ if (IN_CLASSA(i)) {
+ net = i & IN_CLASSA_NET;
+ if (net == 0 || net == (IN_LOOPBACKNET << IN_CLASSA_NSHIFT))
+ return (0);
+ }
+ return (1);
+}
+
+/*
+ * Trim a mask in a sockaddr
+ */
+static void
+in_socktrim(struct sockaddr_in *ap)
+{
+ register char *cplim = (char *) &ap->sin_addr;
+ register char *cp = (char *) (&ap->sin_addr + 1);
+
+ ap->sin_len = 0;
+ while (--cp >= cplim)
+ if (*cp) {
+ (ap)->sin_len = cp - (char *) (ap) + 1;
+ break;
+ }
+}
+
+static int
+in_mask2len(mask)
+ struct in_addr *mask;
+{
+ int x, y;
+ u_char *p;
+
+ p = (u_char *)mask;
+ for (x = 0; x < sizeof(*mask); x++) {
+ if (p[x] != 0xff)
+ break;
+ }
+ y = 0;
+ if (x < sizeof(*mask)) {
+ for (y = 0; y < 8; y++) {
+ if ((p[x] & (0x80 >> y)) == 0)
+ break;
+ }
+ }
+ return (x * 8 + y);
+}
+
+static void
+in_len2mask(struct in_addr *mask, int len)
+{
+ int i;
+ u_char *p;
+
+ p = (u_char *)mask;
+ bzero(mask, sizeof(*mask));
+ for (i = 0; i < len / 8; i++)
+ p[i] = 0xff;
+ if (len % 8)
+ p[i] = (0xff00 >> (len % 8)) & 0xff;
+}
+
+/*
+ * Generic internet control operations (ioctl's).
+ *
+ * ifp is NULL if not an interface-specific ioctl.
+ */
+/* ARGSUSED */
+int
+in_control(struct socket *so, u_long cmd, caddr_t data, struct ifnet *ifp,
+ struct thread *td)
+{
+ register struct ifreq *ifr = (struct ifreq *)data;
+ register struct in_ifaddr *ia, *iap;
+ register struct ifaddr *ifa;
+ struct in_addr allhosts_addr;
+ struct in_addr dst;
+ struct in_ifinfo *ii;
+ struct in_aliasreq *ifra = (struct in_aliasreq *)data;
+ struct sockaddr_in oldaddr;
+ int error, hostIsNew, iaIsNew, maskIsNew;
+ int iaIsFirst;
+
+ ia = NULL;
+ iaIsFirst = 0;
+ iaIsNew = 0;
+ allhosts_addr.s_addr = htonl(INADDR_ALLHOSTS_GROUP);
+
+ /*
+ * Filter out ioctls we implement directly; forward the rest on to
+ * in_lifaddr_ioctl() and ifp->if_ioctl().
+ */
+ switch (cmd) {
+ case SIOCAIFADDR:
+ case SIOCDIFADDR:
+ case SIOCGIFADDR:
+ case SIOCGIFBRDADDR:
+ case SIOCGIFDSTADDR:
+ case SIOCGIFNETMASK:
+ case SIOCSIFADDR:
+ case SIOCSIFBRDADDR:
+ case SIOCSIFDSTADDR:
+ case SIOCSIFNETMASK:
+ break;
+
+ case SIOCALIFADDR:
+ if (td != NULL) {
+ error = priv_check(td, PRIV_NET_ADDIFADDR);
+ if (error)
+ return (error);
+ }
+ if (ifp == NULL)
+ return (EINVAL);
+ return in_lifaddr_ioctl(so, cmd, data, ifp, td);
+
+ case SIOCDLIFADDR:
+ if (td != NULL) {
+ error = priv_check(td, PRIV_NET_DELIFADDR);
+ if (error)
+ return (error);
+ }
+ if (ifp == NULL)
+ return (EINVAL);
+ return in_lifaddr_ioctl(so, cmd, data, ifp, td);
+
+ case SIOCGLIFADDR:
+ if (ifp == NULL)
+ return (EINVAL);
+ return in_lifaddr_ioctl(so, cmd, data, ifp, td);
+
+ default:
+ if (ifp == NULL || ifp->if_ioctl == NULL)
+ return (EOPNOTSUPP);
+ return ((*ifp->if_ioctl)(ifp, cmd, data));
+ }
+
+ if (ifp == NULL)
+ return (EADDRNOTAVAIL);
+
+ /*
+ * Security checks before we get involved in any work.
+ */
+ switch (cmd) {
+ case SIOCAIFADDR:
+ case SIOCSIFADDR:
+ case SIOCSIFBRDADDR:
+ case SIOCSIFNETMASK:
+ case SIOCSIFDSTADDR:
+ if (td != NULL) {
+ error = priv_check(td, PRIV_NET_ADDIFADDR);
+ if (error)
+ return (error);
+ }
+ break;
+
+ case SIOCDIFADDR:
+ if (td != NULL) {
+ error = priv_check(td, PRIV_NET_DELIFADDR);
+ if (error)
+ return (error);
+ }
+ break;
+ }
+
+ /*
+ * Find address for this interface, if it exists.
+ *
+ * If an alias address was specified, find that one instead of the
+ * first one on the interface, if possible.
+ */
+ dst = ((struct sockaddr_in *)&ifr->ifr_addr)->sin_addr;
+ IN_IFADDR_RLOCK();
+ LIST_FOREACH(iap, INADDR_HASH(dst.s_addr), ia_hash) {
+ if (iap->ia_ifp == ifp &&
+ iap->ia_addr.sin_addr.s_addr == dst.s_addr) {
+ if (td == NULL || prison_check_ip4(td->td_ucred,
+ &dst) == 0)
+ ia = iap;
+ break;
+ }
+ }
+ if (ia != NULL)
+ ifa_ref(&ia->ia_ifa);
+ IN_IFADDR_RUNLOCK();
+ if (ia == NULL) {
+ IF_ADDR_LOCK(ifp);
+ TAILQ_FOREACH(ifa, &ifp->if_addrhead, ifa_link) {
+ iap = ifatoia(ifa);
+ if (iap->ia_addr.sin_family == AF_INET) {
+ if (td != NULL &&
+ prison_check_ip4(td->td_ucred,
+ &iap->ia_addr.sin_addr) != 0)
+ continue;
+ ia = iap;
+ break;
+ }
+ }
+ if (ia != NULL)
+ ifa_ref(&ia->ia_ifa);
+ IF_ADDR_UNLOCK(ifp);
+ }
+ if (ia == NULL)
+ iaIsFirst = 1;
+
+ error = 0;
+ switch (cmd) {
+ case SIOCAIFADDR:
+ case SIOCDIFADDR:
+ if (ifra->ifra_addr.sin_family == AF_INET) {
+ struct in_ifaddr *oia;
+
+ IN_IFADDR_RLOCK();
+ for (oia = ia; ia; ia = TAILQ_NEXT(ia, ia_link)) {
+ if (ia->ia_ifp == ifp &&
+ ia->ia_addr.sin_addr.s_addr ==
+ ifra->ifra_addr.sin_addr.s_addr)
+ break;
+ }
+ if (ia != NULL && ia != oia)
+ ifa_ref(&ia->ia_ifa);
+ if (oia != NULL && ia != oia)
+ ifa_free(&oia->ia_ifa);
+ IN_IFADDR_RUNLOCK();
+ if ((ifp->if_flags & IFF_POINTOPOINT)
+ && (cmd == SIOCAIFADDR)
+ && (ifra->ifra_dstaddr.sin_addr.s_addr
+ == INADDR_ANY)) {
+ error = EDESTADDRREQ;
+ goto out;
+ }
+ }
+ if (cmd == SIOCDIFADDR && ia == NULL) {
+ error = EADDRNOTAVAIL;
+ goto out;
+ }
+ /* FALLTHROUGH */
+ case SIOCSIFADDR:
+ case SIOCSIFNETMASK:
+ case SIOCSIFDSTADDR:
+ if (ia == NULL) {
+ ia = (struct in_ifaddr *)
+ malloc(sizeof *ia, M_IFADDR, M_NOWAIT |
+ M_ZERO);
+ if (ia == NULL) {
+ error = ENOBUFS;
+ goto out;
+ }
+
+ ifa = &ia->ia_ifa;
+ ifa_init(ifa);
+ ifa->ifa_addr = (struct sockaddr *)&ia->ia_addr;
+ ifa->ifa_dstaddr = (struct sockaddr *)&ia->ia_dstaddr;
+ ifa->ifa_netmask = (struct sockaddr *)&ia->ia_sockmask;
+
+ ia->ia_sockmask.sin_len = 8;
+ ia->ia_sockmask.sin_family = AF_INET;
+ if (ifp->if_flags & IFF_BROADCAST) {
+ ia->ia_broadaddr.sin_len = sizeof(ia->ia_addr);
+ ia->ia_broadaddr.sin_family = AF_INET;
+ }
+ ia->ia_ifp = ifp;
+
+ ifa_ref(ifa); /* if_addrhead */
+ IF_ADDR_LOCK(ifp);
+ TAILQ_INSERT_TAIL(&ifp->if_addrhead, ifa, ifa_link);
+ IF_ADDR_UNLOCK(ifp);
+ ifa_ref(ifa); /* in_ifaddrhead */
+ IN_IFADDR_WLOCK();
+ TAILQ_INSERT_TAIL(&V_in_ifaddrhead, ia, ia_link);
+ IN_IFADDR_WUNLOCK();
+ iaIsNew = 1;
+ }
+ break;
+
+ case SIOCSIFBRDADDR:
+ case SIOCGIFADDR:
+ case SIOCGIFNETMASK:
+ case SIOCGIFDSTADDR:
+ case SIOCGIFBRDADDR:
+ if (ia == NULL) {
+ error = EADDRNOTAVAIL;
+ goto out;
+ }
+ break;
+ }
+
+ /*
+ * Most paths in this switch return directly or via out. Only paths
+ * that remove the address break in order to hit common removal code.
+ */
+ switch (cmd) {
+ case SIOCGIFADDR:
+ *((struct sockaddr_in *)&ifr->ifr_addr) = ia->ia_addr;
+ goto out;
+
+ case SIOCGIFBRDADDR:
+ if ((ifp->if_flags & IFF_BROADCAST) == 0) {
+ error = EINVAL;
+ goto out;
+ }
+ *((struct sockaddr_in *)&ifr->ifr_dstaddr) = ia->ia_broadaddr;
+ goto out;
+
+ case SIOCGIFDSTADDR:
+ if ((ifp->if_flags & IFF_POINTOPOINT) == 0) {
+ error = EINVAL;
+ goto out;
+ }
+ *((struct sockaddr_in *)&ifr->ifr_dstaddr) = ia->ia_dstaddr;
+ goto out;
+
+ case SIOCGIFNETMASK:
+ *((struct sockaddr_in *)&ifr->ifr_addr) = ia->ia_sockmask;
+ goto out;
+
+ case SIOCSIFDSTADDR:
+ if ((ifp->if_flags & IFF_POINTOPOINT) == 0) {
+ error = EINVAL;
+ goto out;
+ }
+ oldaddr = ia->ia_dstaddr;
+ ia->ia_dstaddr = *(struct sockaddr_in *)&ifr->ifr_dstaddr;
+ if (ifp->if_ioctl != NULL) {
+ error = (*ifp->if_ioctl)(ifp, SIOCSIFDSTADDR,
+ (caddr_t)ia);
+ if (error) {
+ ia->ia_dstaddr = oldaddr;
+ goto out;
+ }
+ }
+ if (ia->ia_flags & IFA_ROUTE) {
+ ia->ia_ifa.ifa_dstaddr = (struct sockaddr *)&oldaddr;
+ rtinit(&(ia->ia_ifa), (int)RTM_DELETE, RTF_HOST);
+ ia->ia_ifa.ifa_dstaddr =
+ (struct sockaddr *)&ia->ia_dstaddr;
+ rtinit(&(ia->ia_ifa), (int)RTM_ADD, RTF_HOST|RTF_UP);
+ }
+ goto out;
+
+ case SIOCSIFBRDADDR:
+ if ((ifp->if_flags & IFF_BROADCAST) == 0) {
+ error = EINVAL;
+ goto out;
+ }
+ ia->ia_broadaddr = *(struct sockaddr_in *)&ifr->ifr_broadaddr;
+ goto out;
+
+ case SIOCSIFADDR:
+ error = in_ifinit(ifp, ia,
+ (struct sockaddr_in *) &ifr->ifr_addr, 1);
+ if (error != 0 && iaIsNew)
+ break;
+ if (error == 0) {
+ ii = ((struct in_ifinfo *)ifp->if_afdata[AF_INET]);
+ if (iaIsFirst &&
+ (ifp->if_flags & IFF_MULTICAST) != 0) {
+ error = in_joingroup(ifp, &allhosts_addr,
+ NULL, &ii->ii_allhosts);
+ }
+ EVENTHANDLER_INVOKE(ifaddr_event, ifp);
+ }
+ error = 0;
+ goto out;
+
+ case SIOCSIFNETMASK:
+ ia->ia_sockmask.sin_addr = ifra->ifra_addr.sin_addr;
+ ia->ia_subnetmask = ntohl(ia->ia_sockmask.sin_addr.s_addr);
+ goto out;
+
+ case SIOCAIFADDR:
+ maskIsNew = 0;
+ hostIsNew = 1;
+ error = 0;
+ if (ia->ia_addr.sin_family == AF_INET) {
+ if (ifra->ifra_addr.sin_len == 0) {
+ ifra->ifra_addr = ia->ia_addr;
+ hostIsNew = 0;
+ } else if (ifra->ifra_addr.sin_addr.s_addr ==
+ ia->ia_addr.sin_addr.s_addr)
+ hostIsNew = 0;
+ }
+ if (ifra->ifra_mask.sin_len) {
+ /*
+ * QL: XXX
+ * Need to scrub the prefix here in case
+ * the issued command is SIOCAIFADDR with
+ * the same address, but with a different
+ * prefix length. And if the prefix length
+ * is the same as before, then the call is
+ * un-necessarily executed here.
+ */
+ in_ifscrub(ifp, ia);
+ ia->ia_sockmask = ifra->ifra_mask;
+ ia->ia_sockmask.sin_family = AF_INET;
+ ia->ia_subnetmask =
+ ntohl(ia->ia_sockmask.sin_addr.s_addr);
+ maskIsNew = 1;
+ }
+ if ((ifp->if_flags & IFF_POINTOPOINT) &&
+ (ifra->ifra_dstaddr.sin_family == AF_INET)) {
+ in_ifscrub(ifp, ia);
+ ia->ia_dstaddr = ifra->ifra_dstaddr;
+ maskIsNew = 1; /* We lie; but the effect's the same */
+ }
+ if (ifra->ifra_addr.sin_family == AF_INET &&
+ (hostIsNew || maskIsNew))
+ error = in_ifinit(ifp, ia, &ifra->ifra_addr, 0);
+ if (error != 0 && iaIsNew)
+ goto out;
+
+ if ((ifp->if_flags & IFF_BROADCAST) &&
+ (ifra->ifra_broadaddr.sin_family == AF_INET))
+ ia->ia_broadaddr = ifra->ifra_broadaddr;
+ if (error == 0) {
+ ii = ((struct in_ifinfo *)ifp->if_afdata[AF_INET]);
+ if (iaIsFirst &&
+ (ifp->if_flags & IFF_MULTICAST) != 0) {
+ error = in_joingroup(ifp, &allhosts_addr,
+ NULL, &ii->ii_allhosts);
+ }
+ EVENTHANDLER_INVOKE(ifaddr_event, ifp);
+ }
+ goto out;
+
+ case SIOCDIFADDR:
+ /*
+ * in_ifscrub kills the interface route.
+ */
+ in_ifscrub(ifp, ia);
+
+ /*
+ * in_ifadown gets rid of all the rest of
+ * the routes. This is not quite the right
+ * thing to do, but at least if we are running
+ * a routing process they will come back.
+ */
+ in_ifadown(&ia->ia_ifa, 1);
+ EVENTHANDLER_INVOKE(ifaddr_event, ifp);
+ error = 0;
+ break;
+
+ default:
+ panic("in_control: unsupported ioctl");
+ }
+
+ IF_ADDR_LOCK(ifp);
+ /* Re-check that ia is still part of the list. */
+ TAILQ_FOREACH(ifa, &ifp->if_addrhead, ifa_link) {
+ if (ifa == &ia->ia_ifa)
+ break;
+ }
+ if (ifa == NULL) {
+ /*
+ * If we lost the race with another thread, there is no need to
+ * try it again for the next loop as there is no other exit
+ * path between here and out.
+ */
+ IF_ADDR_UNLOCK(ifp);
+ error = EADDRNOTAVAIL;
+ goto out;
+ }
+ TAILQ_REMOVE(&ifp->if_addrhead, &ia->ia_ifa, ifa_link);
+ IF_ADDR_UNLOCK(ifp);
+ ifa_free(&ia->ia_ifa); /* if_addrhead */
+
+ IN_IFADDR_WLOCK();
+ TAILQ_REMOVE(&V_in_ifaddrhead, ia, ia_link);
+ if (ia->ia_addr.sin_family == AF_INET) {
+ struct in_ifaddr *if_ia;
+
+ LIST_REMOVE(ia, ia_hash);
+ IN_IFADDR_WUNLOCK();
+ /*
+ * If this is the last IPv4 address configured on this
+ * interface, leave the all-hosts group.
+ * No state-change report need be transmitted.
+ */
+ if_ia = NULL;
+ IFP_TO_IA(ifp, if_ia);
+ if (if_ia == NULL) {
+ ii = ((struct in_ifinfo *)ifp->if_afdata[AF_INET]);
+ IN_MULTI_LOCK();
+ if (ii->ii_allhosts) {
+ (void)in_leavegroup_locked(ii->ii_allhosts,
+ NULL);
+ ii->ii_allhosts = NULL;
+ }
+ IN_MULTI_UNLOCK();
+ } else
+ ifa_free(&if_ia->ia_ifa);
+ } else
+ IN_IFADDR_WUNLOCK();
+ ifa_free(&ia->ia_ifa); /* in_ifaddrhead */
+out:
+ if (ia != NULL)
+ ifa_free(&ia->ia_ifa);
+ return (error);
+}
+
+/*
+ * SIOC[GAD]LIFADDR.
+ * SIOCGLIFADDR: get first address. (?!?)
+ * SIOCGLIFADDR with IFLR_PREFIX:
+ * get first address that matches the specified prefix.
+ * SIOCALIFADDR: add the specified address.
+ * SIOCALIFADDR with IFLR_PREFIX:
+ * EINVAL since we can't deduce hostid part of the address.
+ * SIOCDLIFADDR: delete the specified address.
+ * SIOCDLIFADDR with IFLR_PREFIX:
+ * delete the first address that matches the specified prefix.
+ * return values:
+ * EINVAL on invalid parameters
+ * EADDRNOTAVAIL on prefix match failed/specified address not found
+ * other values may be returned from in_ioctl()
+ */
+static int
+in_lifaddr_ioctl(struct socket *so, u_long cmd, caddr_t data,
+ struct ifnet *ifp, struct thread *td)
+{
+ struct if_laddrreq *iflr = (struct if_laddrreq *)data;
+ struct ifaddr *ifa;
+
+ /* sanity checks */
+ if (data == NULL || ifp == NULL) {
+ panic("invalid argument to in_lifaddr_ioctl");
+ /*NOTRECHED*/
+ }
+
+ switch (cmd) {
+ case SIOCGLIFADDR:
+ /* address must be specified on GET with IFLR_PREFIX */
+ if ((iflr->flags & IFLR_PREFIX) == 0)
+ break;
+ /*FALLTHROUGH*/
+ case SIOCALIFADDR:
+ case SIOCDLIFADDR:
+ /* address must be specified on ADD and DELETE */
+ if (iflr->addr.ss_family != AF_INET)
+ return (EINVAL);
+ if (iflr->addr.ss_len != sizeof(struct sockaddr_in))
+ return (EINVAL);
+ /* XXX need improvement */
+ if (iflr->dstaddr.ss_family
+ && iflr->dstaddr.ss_family != AF_INET)
+ return (EINVAL);
+ if (iflr->dstaddr.ss_family
+ && iflr->dstaddr.ss_len != sizeof(struct sockaddr_in))
+ return (EINVAL);
+ break;
+ default: /*shouldn't happen*/
+ return (EOPNOTSUPP);
+ }
+ if (sizeof(struct in_addr) * 8 < iflr->prefixlen)
+ return (EINVAL);
+
+ switch (cmd) {
+ case SIOCALIFADDR:
+ {
+ struct in_aliasreq ifra;
+
+ if (iflr->flags & IFLR_PREFIX)
+ return (EINVAL);
+
+ /* copy args to in_aliasreq, perform ioctl(SIOCAIFADDR_IN6). */
+ bzero(&ifra, sizeof(ifra));
+ bcopy(iflr->iflr_name, ifra.ifra_name,
+ sizeof(ifra.ifra_name));
+
+ bcopy(&iflr->addr, &ifra.ifra_addr, iflr->addr.ss_len);
+
+ if (iflr->dstaddr.ss_family) { /*XXX*/
+ bcopy(&iflr->dstaddr, &ifra.ifra_dstaddr,
+ iflr->dstaddr.ss_len);
+ }
+
+ ifra.ifra_mask.sin_family = AF_INET;
+ ifra.ifra_mask.sin_len = sizeof(struct sockaddr_in);
+ in_len2mask(&ifra.ifra_mask.sin_addr, iflr->prefixlen);
+
+ return (in_control(so, SIOCAIFADDR, (caddr_t)&ifra, ifp, td));
+ }
+ case SIOCGLIFADDR:
+ case SIOCDLIFADDR:
+ {
+ struct in_ifaddr *ia;
+ struct in_addr mask, candidate, match;
+ struct sockaddr_in *sin;
+
+ bzero(&mask, sizeof(mask));
+ bzero(&match, sizeof(match));
+ if (iflr->flags & IFLR_PREFIX) {
+ /* lookup a prefix rather than address. */
+ in_len2mask(&mask, iflr->prefixlen);
+
+ sin = (struct sockaddr_in *)&iflr->addr;
+ match.s_addr = sin->sin_addr.s_addr;
+ match.s_addr &= mask.s_addr;
+
+ /* if you set extra bits, that's wrong */
+ if (match.s_addr != sin->sin_addr.s_addr)
+ return (EINVAL);
+
+ } else {
+ /* on getting an address, take the 1st match */
+ /* on deleting an address, do exact match */
+ if (cmd != SIOCGLIFADDR) {
+ in_len2mask(&mask, 32);
+ sin = (struct sockaddr_in *)&iflr->addr;
+ match.s_addr = sin->sin_addr.s_addr;
+ }
+ }
+
+ TAILQ_FOREACH(ifa, &ifp->if_addrhead, ifa_link) {
+ if (ifa->ifa_addr->sa_family != AF_INET6)
+ continue;
+ if (match.s_addr == 0)
+ break;
+ candidate.s_addr = ((struct sockaddr_in *)&ifa->ifa_addr)->sin_addr.s_addr;
+ candidate.s_addr &= mask.s_addr;
+ if (candidate.s_addr == match.s_addr)
+ break;
+ }
+ if (ifa == NULL)
+ return (EADDRNOTAVAIL);
+ ia = (struct in_ifaddr *)ifa;
+
+ if (cmd == SIOCGLIFADDR) {
+ /* fill in the if_laddrreq structure */
+ bcopy(&ia->ia_addr, &iflr->addr, ia->ia_addr.sin_len);
+
+ if ((ifp->if_flags & IFF_POINTOPOINT) != 0) {
+ bcopy(&ia->ia_dstaddr, &iflr->dstaddr,
+ ia->ia_dstaddr.sin_len);
+ } else
+ bzero(&iflr->dstaddr, sizeof(iflr->dstaddr));
+
+ iflr->prefixlen =
+ in_mask2len(&ia->ia_sockmask.sin_addr);
+
+ iflr->flags = 0; /*XXX*/
+
+ return (0);
+ } else {
+ struct in_aliasreq ifra;
+
+ /* fill in_aliasreq and do ioctl(SIOCDIFADDR_IN6) */
+ bzero(&ifra, sizeof(ifra));
+ bcopy(iflr->iflr_name, ifra.ifra_name,
+ sizeof(ifra.ifra_name));
+
+ bcopy(&ia->ia_addr, &ifra.ifra_addr,
+ ia->ia_addr.sin_len);
+ if ((ifp->if_flags & IFF_POINTOPOINT) != 0) {
+ bcopy(&ia->ia_dstaddr, &ifra.ifra_dstaddr,
+ ia->ia_dstaddr.sin_len);
+ }
+ bcopy(&ia->ia_sockmask, &ifra.ifra_dstaddr,
+ ia->ia_sockmask.sin_len);
+
+ return (in_control(so, SIOCDIFADDR, (caddr_t)&ifra,
+ ifp, td));
+ }
+ }
+ }
+
+ return (EOPNOTSUPP); /*just for safety*/
+}
+
+/*
+ * Delete any existing route for an interface.
+ */
+void
+in_ifscrub(struct ifnet *ifp, struct in_ifaddr *ia)
+{
+
+ in_scrubprefix(ia);
+}
+
+/*
+ * Initialize an interface's internet address
+ * and routing table entry.
+ */
+static int
+in_ifinit(struct ifnet *ifp, struct in_ifaddr *ia, struct sockaddr_in *sin,
+ int scrub)
+{
+ register u_long i = ntohl(sin->sin_addr.s_addr);
+ struct sockaddr_in oldaddr;
+ int s = splimp(), flags = RTF_UP, error = 0;
+
+ oldaddr = ia->ia_addr;
+ if (oldaddr.sin_family == AF_INET)
+ LIST_REMOVE(ia, ia_hash);
+ ia->ia_addr = *sin;
+ if (ia->ia_addr.sin_family == AF_INET) {
+ IN_IFADDR_WLOCK();
+ LIST_INSERT_HEAD(INADDR_HASH(ia->ia_addr.sin_addr.s_addr),
+ ia, ia_hash);
+ IN_IFADDR_WUNLOCK();
+ }
+ /*
+ * Give the interface a chance to initialize
+ * if this is its first address,
+ * and to validate the address if necessary.
+ */
+ if (ifp->if_ioctl != NULL) {
+ error = (*ifp->if_ioctl)(ifp, SIOCSIFADDR, (caddr_t)ia);
+ if (error) {
+ splx(s);
+ /* LIST_REMOVE(ia, ia_hash) is done in in_control */
+ ia->ia_addr = oldaddr;
+ IN_IFADDR_WLOCK();
+ if (ia->ia_addr.sin_family == AF_INET)
+ LIST_INSERT_HEAD(INADDR_HASH(
+ ia->ia_addr.sin_addr.s_addr), ia, ia_hash);
+ else
+ /*
+ * If oldaddr family is not AF_INET (e.g.
+ * interface has been just created) in_control
+ * does not call LIST_REMOVE, and we end up
+ * with bogus ia entries in hash
+ */
+ LIST_REMOVE(ia, ia_hash);
+ IN_IFADDR_WUNLOCK();
+ return (error);
+ }
+ }
+ splx(s);
+ if (scrub) {
+ ia->ia_ifa.ifa_addr = (struct sockaddr *)&oldaddr;
+ in_ifscrub(ifp, ia);
+ ia->ia_ifa.ifa_addr = (struct sockaddr *)&ia->ia_addr;
+ }
+ if (IN_CLASSA(i))
+ ia->ia_netmask = IN_CLASSA_NET;
+ else if (IN_CLASSB(i))
+ ia->ia_netmask = IN_CLASSB_NET;
+ else
+ ia->ia_netmask = IN_CLASSC_NET;
+ /*
+ * The subnet mask usually includes at least the standard network part,
+ * but may may be smaller in the case of supernetting.
+ * If it is set, we believe it.
+ */
+ if (ia->ia_subnetmask == 0) {
+ ia->ia_subnetmask = ia->ia_netmask;
+ ia->ia_sockmask.sin_addr.s_addr = htonl(ia->ia_subnetmask);
+ } else
+ ia->ia_netmask &= ia->ia_subnetmask;
+ ia->ia_net = i & ia->ia_netmask;
+ ia->ia_subnet = i & ia->ia_subnetmask;
+ in_socktrim(&ia->ia_sockmask);
+ /*
+ * XXX: carp(4) does not have interface route
+ */
+ if (ifp->if_type == IFT_CARP)
+ return (0);
+ /*
+ * Add route for the network.
+ */
+ ia->ia_ifa.ifa_metric = ifp->if_metric;
+ if (ifp->if_flags & IFF_BROADCAST) {
+ ia->ia_broadaddr.sin_addr.s_addr =
+ htonl(ia->ia_subnet | ~ia->ia_subnetmask);
+ ia->ia_netbroadcast.s_addr =
+ htonl(ia->ia_net | ~ ia->ia_netmask);
+ } else if (ifp->if_flags & IFF_LOOPBACK) {
+ ia->ia_dstaddr = ia->ia_addr;
+ flags |= RTF_HOST;
+ } else if (ifp->if_flags & IFF_POINTOPOINT) {
+ if (ia->ia_dstaddr.sin_family != AF_INET)
+ return (0);
+ flags |= RTF_HOST;
+ }
+ if ((error = in_addprefix(ia, flags)) != 0)
+ return (error);
+
+ if (ia->ia_addr.sin_addr.s_addr == INADDR_ANY)
+ return (0);
+
+ if (ifp->if_flags & IFF_POINTOPOINT) {
+ if (ia->ia_dstaddr.sin_addr.s_addr == ia->ia_addr.sin_addr.s_addr)
+ return (0);
+ }
+
+
+ /*
+ * add a loopback route to self
+ */
+ if (V_useloopback && !(ifp->if_flags & IFF_LOOPBACK)) {
+ struct route ia_ro;
+
+ bzero(&ia_ro, sizeof(ia_ro));
+ *((struct sockaddr_in *)(&ia_ro.ro_dst)) = ia->ia_addr;
+ rtalloc_ign_fib(&ia_ro, 0, 0);
+ if ((ia_ro.ro_rt != NULL) && (ia_ro.ro_rt->rt_ifp != NULL) &&
+ (ia_ro.ro_rt->rt_ifp == V_loif)) {
+ RT_LOCK(ia_ro.ro_rt);
+ RT_ADDREF(ia_ro.ro_rt);
+ RTFREE_LOCKED(ia_ro.ro_rt);
+ } else
+ error = ifa_add_loopback_route((struct ifaddr *)ia,
+ (struct sockaddr *)&ia->ia_addr);
+ if (error == 0)
+ ia->ia_flags |= IFA_RTSELF;
+ if (ia_ro.ro_rt != NULL)
+ RTFREE(ia_ro.ro_rt);
+ }
+
+ return (error);
+}
+
+#define rtinitflags(x) \
+ ((((x)->ia_ifp->if_flags & (IFF_LOOPBACK | IFF_POINTOPOINT)) != 0) \
+ ? RTF_HOST : 0)
+
+/*
+ * Generate a routing message when inserting or deleting
+ * an interface address alias.
+ */
+static void in_addralias_rtmsg(int cmd, struct in_addr *prefix,
+ struct in_ifaddr *target)
+{
+ struct route pfx_ro;
+ struct sockaddr_in *pfx_addr;
+ struct rtentry msg_rt;
+
+ /* QL: XXX
+ * This is a bit questionable because there is no
+ * additional route entry added/deleted for an address
+ * alias. Therefore this route report is inaccurate.
+ */
+ bzero(&pfx_ro, sizeof(pfx_ro));
+ pfx_addr = (struct sockaddr_in *)(&pfx_ro.ro_dst);
+ pfx_addr->sin_len = sizeof(*pfx_addr);
+ pfx_addr->sin_family = AF_INET;
+ pfx_addr->sin_addr = *prefix;
+ rtalloc_ign_fib(&pfx_ro, 0, 0);
+ if (pfx_ro.ro_rt != NULL) {
+ msg_rt = *pfx_ro.ro_rt;
+
+ /* QL: XXX
+ * Point the gateway to the new interface
+ * address as if a new prefix route entry has
+ * been added through the new address alias.
+ * All other parts of the rtentry is accurate,
+ * e.g., rt_key, rt_mask, rt_ifp etc.
+ */
+ msg_rt.rt_gateway =
+ (struct sockaddr *)&target->ia_addr;
+ rt_newaddrmsg(cmd,
+ (struct ifaddr *)target,
+ 0, &msg_rt);
+ RTFREE(pfx_ro.ro_rt);
+ }
+ return;
+}
+
+/*
+ * Check if we have a route for the given prefix already or add one accordingly.
+ */
+static int
+in_addprefix(struct in_ifaddr *target, int flags)
+{
+ struct in_ifaddr *ia;
+ struct in_addr prefix, mask, p, m;
+ int error;
+
+ if ((flags & RTF_HOST) != 0) {
+ prefix = target->ia_dstaddr.sin_addr;
+ mask.s_addr = 0;
+ } else {
+ prefix = target->ia_addr.sin_addr;
+ mask = target->ia_sockmask.sin_addr;
+ prefix.s_addr &= mask.s_addr;
+ }
+
+ IN_IFADDR_RLOCK();
+ TAILQ_FOREACH(ia, &V_in_ifaddrhead, ia_link) {
+ if (rtinitflags(ia)) {
+ p = ia->ia_addr.sin_addr;
+
+ if (prefix.s_addr != p.s_addr)
+ continue;
+ } else {
+ p = ia->ia_addr.sin_addr;
+ m = ia->ia_sockmask.sin_addr;
+ p.s_addr &= m.s_addr;
+
+ if (prefix.s_addr != p.s_addr ||
+ mask.s_addr != m.s_addr)
+ continue;
+ }
+
+ /*
+ * If we got a matching prefix route inserted by other
+ * interface address, we are done here.
+ */
+ if (ia->ia_flags & IFA_ROUTE) {
+#ifdef RADIX_MPATH
+ if (ia->ia_addr.sin_addr.s_addr ==
+ target->ia_addr.sin_addr.s_addr) {
+ IN_IFADDR_RUNLOCK();
+ return (EEXIST);
+ } else
+ break;
+#endif
+ if (V_sameprefixcarponly &&
+ target->ia_ifp->if_type != IFT_CARP &&
+ ia->ia_ifp->if_type != IFT_CARP) {
+ IN_IFADDR_RUNLOCK();
+ return (EEXIST);
+ } else {
+ in_addralias_rtmsg(RTM_ADD, &prefix, target);
+ IN_IFADDR_RUNLOCK();
+ return (0);
+ }
+ }
+ }
+ IN_IFADDR_RUNLOCK();
+
+ /*
+ * No-one seem to have this prefix route, so we try to insert it.
+ */
+ error = rtinit(&target->ia_ifa, (int)RTM_ADD, flags);
+ if (!error)
+ target->ia_flags |= IFA_ROUTE;
+ return (error);
+}
+
+extern void arp_ifscrub(struct ifnet *ifp, uint32_t addr);
+
+/*
+ * If there is no other address in the system that can serve a route to the
+ * same prefix, remove the route. Hand over the route to the new address
+ * otherwise.
+ */
+static int
+in_scrubprefix(struct in_ifaddr *target)
+{
+ struct in_ifaddr *ia;
+ struct in_addr prefix, mask, p;
+ int error = 0;
+ struct sockaddr_in prefix0, mask0;
+
+ /*
+ * Remove the loopback route to the interface address.
+ * The "useloopback" setting is not consulted because if the
+ * user configures an interface address, turns off this
+ * setting, and then tries to delete that interface address,
+ * checking the current setting of "useloopback" would leave
+ * that interface address loopback route untouched, which
+ * would be wrong. Therefore the interface address loopback route
+ * deletion is unconditional.
+ */
+ if ((target->ia_addr.sin_addr.s_addr != INADDR_ANY) &&
+ !(target->ia_ifp->if_flags & IFF_LOOPBACK) &&
+ (target->ia_flags & IFA_RTSELF)) {
+ struct route ia_ro;
+ int freeit = 0;
+
+ bzero(&ia_ro, sizeof(ia_ro));
+ *((struct sockaddr_in *)(&ia_ro.ro_dst)) = target->ia_addr;
+ rtalloc_ign_fib(&ia_ro, 0, 0);
+ if ((ia_ro.ro_rt != NULL) && (ia_ro.ro_rt->rt_ifp != NULL) &&
+ (ia_ro.ro_rt->rt_ifp == V_loif)) {
+ RT_LOCK(ia_ro.ro_rt);
+ if (ia_ro.ro_rt->rt_refcnt <= 1)
+ freeit = 1;
+ else
+ RT_REMREF(ia_ro.ro_rt);
+ RTFREE_LOCKED(ia_ro.ro_rt);
+ }
+ if (freeit)
+ error = ifa_del_loopback_route((struct ifaddr *)target,
+ (struct sockaddr *)&target->ia_addr);
+ if (error == 0)
+ target->ia_flags &= ~IFA_RTSELF;
+ /* remove arp cache */
+ arp_ifscrub(target->ia_ifp, IA_SIN(target)->sin_addr.s_addr);
+ }
+
+ if (rtinitflags(target))
+ prefix = target->ia_dstaddr.sin_addr;
+ else {
+ prefix = target->ia_addr.sin_addr;
+ mask = target->ia_sockmask.sin_addr;
+ prefix.s_addr &= mask.s_addr;
+ }
+
+ if ((target->ia_flags & IFA_ROUTE) == 0) {
+ in_addralias_rtmsg(RTM_DELETE, &prefix, target);
+ return (0);
+ }
+
+ IN_IFADDR_RLOCK();
+ TAILQ_FOREACH(ia, &V_in_ifaddrhead, ia_link) {
+ if (rtinitflags(ia))
+ p = ia->ia_dstaddr.sin_addr;
+ else {
+ p = ia->ia_addr.sin_addr;
+ p.s_addr &= ia->ia_sockmask.sin_addr.s_addr;
+ }
+
+ if (prefix.s_addr != p.s_addr)
+ continue;
+
+ /*
+ * If we got a matching prefix address, move IFA_ROUTE and
+ * the route itself to it. Make sure that routing daemons
+ * get a heads-up.
+ *
+ * XXX: a special case for carp(4) interface - this should
+ * be more generally specified as an interface that
+ * doesn't support such action.
+ */
+ if ((ia->ia_flags & IFA_ROUTE) == 0
+ && (ia->ia_ifp->if_type != IFT_CARP)
+ ) {
+ IN_IFADDR_RUNLOCK();
+ rtinit(&(target->ia_ifa), (int)RTM_DELETE,
+ rtinitflags(target));
+ target->ia_flags &= ~IFA_ROUTE;
+
+ error = rtinit(&ia->ia_ifa, (int)RTM_ADD,
+ rtinitflags(ia) | RTF_UP);
+ if (error == 0)
+ ia->ia_flags |= IFA_ROUTE;
+ return (error);
+ }
+ }
+ IN_IFADDR_RUNLOCK();
+
+ /*
+ * remove all L2 entries on the given prefix
+ */
+ bzero(&prefix0, sizeof(prefix0));
+ prefix0.sin_len = sizeof(prefix0);
+ prefix0.sin_family = AF_INET;
+ prefix0.sin_addr.s_addr = target->ia_subnet;
+ bzero(&mask0, sizeof(mask0));
+ mask0.sin_len = sizeof(mask0);
+ mask0.sin_family = AF_INET;
+ mask0.sin_addr.s_addr = target->ia_subnetmask;
+ lltable_prefix_free(AF_INET, (struct sockaddr *)&prefix0,
+ (struct sockaddr *)&mask0);
+
+ /*
+ * As no-one seem to have this prefix, we can remove the route.
+ */
+ rtinit(&(target->ia_ifa), (int)RTM_DELETE, rtinitflags(target));
+ target->ia_flags &= ~IFA_ROUTE;
+ return (0);
+}
+
+#undef rtinitflags
+
+/*
+ * Return 1 if the address might be a local broadcast address.
+ */
+int
+in_broadcast(struct in_addr in, struct ifnet *ifp)
+{
+ register struct ifaddr *ifa;
+ u_long t;
+
+ if (in.s_addr == INADDR_BROADCAST ||
+ in.s_addr == INADDR_ANY)
+ return (1);
+ if ((ifp->if_flags & IFF_BROADCAST) == 0)
+ return (0);
+ t = ntohl(in.s_addr);
+ /*
+ * Look through the list of addresses for a match
+ * with a broadcast address.
+ */
+#define ia ((struct in_ifaddr *)ifa)
+ TAILQ_FOREACH(ifa, &ifp->if_addrhead, ifa_link)
+ if (ifa->ifa_addr->sa_family == AF_INET &&
+ (in.s_addr == ia->ia_broadaddr.sin_addr.s_addr ||
+ in.s_addr == ia->ia_netbroadcast.s_addr ||
+ /*
+ * Check for old-style (host 0) broadcast.
+ */
+ t == ia->ia_subnet || t == ia->ia_net) &&
+ /*
+ * Check for an all one subnetmask. These
+ * only exist when an interface gets a secondary
+ * address.
+ */
+ ia->ia_subnetmask != (u_long)0xffffffff)
+ return (1);
+ return (0);
+#undef ia
+}
+
+/*
+ * On interface removal, clean up IPv4 data structures hung off of the ifnet.
+ */
+void
+in_ifdetach(struct ifnet *ifp)
+{
+
+ in_pcbpurgeif0(&V_ripcbinfo, ifp);
+ in_pcbpurgeif0(&V_udbinfo, ifp);
+ in_purgemaddrs(ifp);
+}
+
+/*
+ * Delete all IPv4 multicast address records, and associated link-layer
+ * multicast address records, associated with ifp.
+ * XXX It looks like domifdetach runs AFTER the link layer cleanup.
+ * XXX This should not race with ifma_protospec being set during
+ * a new allocation, if it does, we have bigger problems.
+ */
+static void
+in_purgemaddrs(struct ifnet *ifp)
+{
+ LIST_HEAD(,in_multi) purgeinms;
+ struct in_multi *inm, *tinm;
+ struct ifmultiaddr *ifma;
+
+ LIST_INIT(&purgeinms);
+ IN_MULTI_LOCK();
+
+ /*
+ * Extract list of in_multi associated with the detaching ifp
+ * which the PF_INET layer is about to release.
+ * We need to do this as IF_ADDR_LOCK() may be re-acquired
+ * by code further down.
+ */
+ IF_ADDR_LOCK(ifp);
+ TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
+ if (ifma->ifma_addr->sa_family != AF_INET ||
+ ifma->ifma_protospec == NULL)
+ continue;
+#if 0
+ KASSERT(ifma->ifma_protospec != NULL,
+ ("%s: ifma_protospec is NULL", __func__));
+#endif
+ inm = (struct in_multi *)ifma->ifma_protospec;
+ LIST_INSERT_HEAD(&purgeinms, inm, inm_link);
+ }
+ IF_ADDR_UNLOCK(ifp);
+
+ LIST_FOREACH_SAFE(inm, &purgeinms, inm_link, tinm) {
+ LIST_REMOVE(inm, inm_link);
+ inm_release_locked(inm);
+ }
+ igmp_ifdetach(ifp);
+
+ IN_MULTI_UNLOCK();
+}
+
+#include <rtems/freebsd/net/if_dl.h>
+#include <rtems/freebsd/netinet/if_ether.h>
+
+struct in_llentry {
+ struct llentry base;
+ struct sockaddr_in l3_addr4;
+};
+
+static struct llentry *
+in_lltable_new(const struct sockaddr *l3addr, u_int flags)
+{
+ struct in_llentry *lle;
+
+ lle = malloc(sizeof(struct in_llentry), M_LLTABLE, M_DONTWAIT | M_ZERO);
+ if (lle == NULL) /* NB: caller generates msg */
+ return NULL;
+
+ callout_init(&lle->base.la_timer, CALLOUT_MPSAFE);
+ /*
+ * For IPv4 this will trigger "arpresolve" to generate
+ * an ARP request.
+ */
+ lle->base.la_expire = time_second; /* mark expired */
+ lle->l3_addr4 = *(const struct sockaddr_in *)l3addr;
+ lle->base.lle_refcnt = 1;
+ LLE_LOCK_INIT(&lle->base);
+ return &lle->base;
+}
+
+/*
+ * Deletes an address from the address table.
+ * This function is called by the timer functions
+ * such as arptimer() and nd6_llinfo_timer(), and
+ * the caller does the locking.
+ */
+static void
+in_lltable_free(struct lltable *llt, struct llentry *lle)
+{
+ LLE_WUNLOCK(lle);
+ LLE_LOCK_DESTROY(lle);
+ free(lle, M_LLTABLE);
+}
+
+
+#define IN_ARE_MASKED_ADDR_EQUAL(d, a, m) ( \
+ (((ntohl((d)->sin_addr.s_addr) ^ (a)->sin_addr.s_addr) & (m)->sin_addr.s_addr)) == 0 )
+
+static void
+in_lltable_prefix_free(struct lltable *llt,
+ const struct sockaddr *prefix,
+ const struct sockaddr *mask)
+{
+ const struct sockaddr_in *pfx = (const struct sockaddr_in *)prefix;
+ const struct sockaddr_in *msk = (const struct sockaddr_in *)mask;
+ struct llentry *lle, *next;
+ register int i;
+
+ for (i=0; i < LLTBL_HASHTBL_SIZE; i++) {
+ LIST_FOREACH_SAFE(lle, &llt->lle_head[i], lle_next, next) {
+
+ if (IN_ARE_MASKED_ADDR_EQUAL((struct sockaddr_in *)L3_ADDR(lle),
+ pfx, msk)) {
+ int canceled;
+
+ canceled = callout_drain(&lle->la_timer);
+ LLE_WLOCK(lle);
+ if (canceled)
+ LLE_REMREF(lle);
+ llentry_free(lle);
+ }
+ }
+ }
+}
+
+
+static int
+in_lltable_rtcheck(struct ifnet *ifp, u_int flags, const struct sockaddr *l3addr)
+{
+ struct rtentry *rt;
+
+ KASSERT(l3addr->sa_family == AF_INET,
+ ("sin_family %d", l3addr->sa_family));
+
+ /* XXX rtalloc1 should take a const param */
+ rt = rtalloc1(__DECONST(struct sockaddr *, l3addr), 0, 0);
+ if (rt == NULL || (!(flags & LLE_PUB) &&
+ ((rt->rt_flags & RTF_GATEWAY) ||
+ (rt->rt_ifp != ifp)))) {
+#ifdef DIAGNOSTIC
+ log(LOG_INFO, "IPv4 address: \"%s\" is not on the network\n",
+ inet_ntoa(((const struct sockaddr_in *)l3addr)->sin_addr));
+#endif
+ if (rt != NULL)
+ RTFREE_LOCKED(rt);
+ return (EINVAL);
+ }
+ RTFREE_LOCKED(rt);
+ return 0;
+}
+
+/*
+ * Return NULL if not found or marked for deletion.
+ * If found return lle read locked.
+ */
+static struct llentry *
+in_lltable_lookup(struct lltable *llt, u_int flags, const struct sockaddr *l3addr)
+{
+ const struct sockaddr_in *sin = (const struct sockaddr_in *)l3addr;
+ struct ifnet *ifp = llt->llt_ifp;
+ struct llentry *lle;
+ struct llentries *lleh;
+ u_int hashkey;
+
+ IF_AFDATA_LOCK_ASSERT(ifp);
+ KASSERT(l3addr->sa_family == AF_INET,
+ ("sin_family %d", l3addr->sa_family));
+
+ hashkey = sin->sin_addr.s_addr;
+ lleh = &llt->lle_head[LLATBL_HASH(hashkey, LLTBL_HASHMASK)];
+ LIST_FOREACH(lle, lleh, lle_next) {
+ struct sockaddr_in *sa2 = (struct sockaddr_in *)L3_ADDR(lle);
+ if (lle->la_flags & LLE_DELETED)
+ continue;
+ if (sa2->sin_addr.s_addr == sin->sin_addr.s_addr)
+ break;
+ }
+ if (lle == NULL) {
+#ifdef DIAGNOSTIC
+ if (flags & LLE_DELETE)
+ log(LOG_INFO, "interface address is missing from cache = %p in delete\n", lle);
+#endif
+ if (!(flags & LLE_CREATE))
+ return (NULL);
+ /*
+ * A route that covers the given address must have
+ * been installed 1st because we are doing a resolution,
+ * verify this.
+ */
+ if (!(flags & LLE_IFADDR) &&
+ in_lltable_rtcheck(ifp, flags, l3addr) != 0)
+ goto done;
+
+ lle = in_lltable_new(l3addr, flags);
+ if (lle == NULL) {
+ log(LOG_INFO, "lla_lookup: new lle malloc failed\n");
+ goto done;
+ }
+ lle->la_flags = flags & ~LLE_CREATE;
+ if ((flags & (LLE_CREATE | LLE_IFADDR)) == (LLE_CREATE | LLE_IFADDR)) {
+ bcopy(IF_LLADDR(ifp), &lle->ll_addr, ifp->if_addrlen);
+ lle->la_flags |= (LLE_VALID | LLE_STATIC);
+ }
+
+ lle->lle_tbl = llt;
+ lle->lle_head = lleh;
+ LIST_INSERT_HEAD(lleh, lle, lle_next);
+ } else if (flags & LLE_DELETE) {
+ if (!(lle->la_flags & LLE_IFADDR) || (flags & LLE_IFADDR)) {
+ LLE_WLOCK(lle);
+ lle->la_flags = LLE_DELETED;
+ LLE_WUNLOCK(lle);
+#ifdef DIAGNOSTIC
+ log(LOG_INFO, "ifaddr cache = %p is deleted\n", lle);
+#endif
+ }
+ lle = (void *)-1;
+
+ }
+ if (LLE_IS_VALID(lle)) {
+ if (flags & LLE_EXCLUSIVE)
+ LLE_WLOCK(lle);
+ else
+ LLE_RLOCK(lle);
+ }
+done:
+ return (lle);
+}
+
+static int
+in_lltable_dump(struct lltable *llt, struct sysctl_req *wr)
+{
+#define SIN(lle) ((struct sockaddr_in *) L3_ADDR(lle))
+ struct ifnet *ifp = llt->llt_ifp;
+ struct llentry *lle;
+ /* XXX stack use */
+ struct {
+ struct rt_msghdr rtm;
+ struct sockaddr_inarp sin;
+ struct sockaddr_dl sdl;
+ } arpc;
+ int error, i;
+
+ LLTABLE_LOCK_ASSERT();
+
+ error = 0;
+ for (i = 0; i < LLTBL_HASHTBL_SIZE; i++) {
+ LIST_FOREACH(lle, &llt->lle_head[i], lle_next) {
+ struct sockaddr_dl *sdl;
+
+ /* skip deleted entries */
+ if ((lle->la_flags & LLE_DELETED) == LLE_DELETED)
+ continue;
+ /* Skip if jailed and not a valid IP of the prison. */
+ if (prison_if(wr->td->td_ucred, L3_ADDR(lle)) != 0)
+ continue;
+ /*
+ * produce a msg made of:
+ * struct rt_msghdr;
+ * struct sockaddr_inarp; (IPv4)
+ * struct sockaddr_dl;
+ */
+ bzero(&arpc, sizeof(arpc));
+ arpc.rtm.rtm_msglen = sizeof(arpc);
+ arpc.rtm.rtm_version = RTM_VERSION;
+ arpc.rtm.rtm_type = RTM_GET;
+ arpc.rtm.rtm_flags = RTF_UP;
+ arpc.rtm.rtm_addrs = RTA_DST | RTA_GATEWAY;
+ arpc.sin.sin_family = AF_INET;
+ arpc.sin.sin_len = sizeof(arpc.sin);
+ arpc.sin.sin_addr.s_addr = SIN(lle)->sin_addr.s_addr;
+
+ /* publish */
+ if (lle->la_flags & LLE_PUB) {
+ arpc.rtm.rtm_flags |= RTF_ANNOUNCE;
+ /* proxy only */
+ if (lle->la_flags & LLE_PROXY)
+ arpc.sin.sin_other = SIN_PROXY;
+ }
+
+ sdl = &arpc.sdl;
+ sdl->sdl_family = AF_LINK;
+ sdl->sdl_len = sizeof(*sdl);
+ sdl->sdl_index = ifp->if_index;
+ sdl->sdl_type = ifp->if_type;
+ if ((lle->la_flags & LLE_VALID) == LLE_VALID) {
+ sdl->sdl_alen = ifp->if_addrlen;
+ bcopy(&lle->ll_addr, LLADDR(sdl), ifp->if_addrlen);
+ } else {
+ sdl->sdl_alen = 0;
+ bzero(LLADDR(sdl), ifp->if_addrlen);
+ }
+
+ arpc.rtm.rtm_rmx.rmx_expire =
+ lle->la_flags & LLE_STATIC ? 0 : lle->la_expire;
+ arpc.rtm.rtm_flags |= (RTF_HOST | RTF_LLDATA);
+ if (lle->la_flags & LLE_STATIC)
+ arpc.rtm.rtm_flags |= RTF_STATIC;
+ arpc.rtm.rtm_index = ifp->if_index;
+ error = SYSCTL_OUT(wr, &arpc, sizeof(arpc));
+ if (error)
+ break;
+ }
+ }
+ return error;
+#undef SIN
+}
+
+void *
+in_domifattach(struct ifnet *ifp)
+{
+ struct in_ifinfo *ii;
+ struct lltable *llt;
+
+ ii = malloc(sizeof(struct in_ifinfo), M_IFADDR, M_WAITOK|M_ZERO);
+
+ llt = lltable_init(ifp, AF_INET);
+ if (llt != NULL) {
+ llt->llt_new = in_lltable_new;
+ llt->llt_free = in_lltable_free;
+ llt->llt_prefix_free = in_lltable_prefix_free;
+ llt->llt_rtcheck = in_lltable_rtcheck;
+ llt->llt_lookup = in_lltable_lookup;
+ llt->llt_dump = in_lltable_dump;
+ }
+ ii->ii_llt = llt;
+
+ ii->ii_igmp = igmp_domifattach(ifp);
+
+ return ii;
+}
+
+void
+in_domifdetach(struct ifnet *ifp, void *aux)
+{
+ struct in_ifinfo *ii = (struct in_ifinfo *)aux;
+
+ igmp_domifdetach(ifp);
+ lltable_free(ii->ii_llt);
+ free(ii, M_IFADDR);
+}
diff --git a/rtems/freebsd/netinet/in.h b/rtems/freebsd/netinet/in.h
new file mode 100644
index 00000000..f342930b
--- /dev/null
+++ b/rtems/freebsd/netinet/in.h
@@ -0,0 +1,794 @@
+/*-
+ * Copyright (c) 1982, 1986, 1990, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * @(#)in.h 8.3 (Berkeley) 1/3/94
+ * $FreeBSD$
+ */
+
+#ifndef _NETINET_IN_HH_
+#define _NETINET_IN_HH_
+
+#include <rtems/freebsd/sys/cdefs.h>
+#include <rtems/freebsd/sys/_types.h>
+#include <rtems/freebsd/machine/endian.h>
+
+/* Protocols common to RFC 1700, POSIX, and X/Open. */
+#define IPPROTO_IP 0 /* dummy for IP */
+#define IPPROTO_ICMP 1 /* control message protocol */
+#define IPPROTO_TCP 6 /* tcp */
+#define IPPROTO_UDP 17 /* user datagram protocol */
+
+#define INADDR_ANY (u_int32_t)0x00000000
+#define INADDR_BROADCAST (u_int32_t)0xffffffff /* must be masked */
+
+#ifndef _UINT8_T_DECLARED
+typedef __uint8_t uint8_t;
+#define _UINT8_T_DECLARED
+#endif
+
+#ifndef _UINT16_T_DECLARED
+typedef __uint16_t uint16_t;
+#define _UINT16_T_DECLARED
+#endif
+
+#ifndef _UINT32_T_DECLARED
+typedef __uint32_t uint32_t;
+#define _UINT32_T_DECLARED
+#endif
+
+#ifndef _IN_ADDR_T_DECLARED
+typedef uint32_t in_addr_t;
+#define _IN_ADDR_T_DECLARED
+#endif
+
+#ifndef _IN_PORT_T_DECLARED
+typedef uint16_t in_port_t;
+#define _IN_PORT_T_DECLARED
+#endif
+
+#ifndef _SA_FAMILY_T_DECLARED
+typedef __sa_family_t sa_family_t;
+#define _SA_FAMILY_T_DECLARED
+#endif
+
+/* Internet address (a structure for historical reasons). */
+#ifndef _STRUCT_IN_ADDR_DECLARED
+struct in_addr {
+ in_addr_t s_addr;
+};
+#define _STRUCT_IN_ADDR_DECLARED
+#endif
+
+#ifndef _SOCKLEN_T_DECLARED
+typedef __socklen_t socklen_t;
+#define _SOCKLEN_T_DECLARED
+#endif
+
+/* Avoid collision with original definition in sys/socket.h. */
+#ifndef _STRUCT_SOCKADDR_STORAGE_DECLARED
+/*
+ * RFC 2553: protocol-independent placeholder for socket addresses
+ */
+#define _SS_MAXSIZE 128U
+#define _SS_ALIGNSIZE (sizeof(__int64_t))
+#define _SS_PAD1SIZE (_SS_ALIGNSIZE - sizeof(unsigned char) - \
+ sizeof(sa_family_t))
+#define _SS_PAD2SIZE (_SS_MAXSIZE - sizeof(unsigned char) - \
+ sizeof(sa_family_t) - _SS_PAD1SIZE - _SS_ALIGNSIZE)
+
+struct sockaddr_storage {
+ unsigned char ss_len; /* address length */
+ sa_family_t ss_family; /* address family */
+ char __ss_pad1[_SS_PAD1SIZE];
+ __int64_t __ss_align; /* force desired struct alignment */
+ char __ss_pad2[_SS_PAD2SIZE];
+};
+#define _STRUCT_SOCKADDR_STORAGE_DECLARED
+#endif
+
+/* Socket address, internet style. */
+struct sockaddr_in {
+ uint8_t sin_len;
+ sa_family_t sin_family;
+ in_port_t sin_port;
+ struct in_addr sin_addr;
+ char sin_zero[8];
+};
+
+#if !defined(_KERNEL) && __BSD_VISIBLE
+
+#ifndef _BYTEORDER_PROTOTYPED
+#define _BYTEORDER_PROTOTYPED
+__BEGIN_DECLS
+uint32_t htonl(uint32_t);
+uint16_t htons(uint16_t);
+uint32_t ntohl(uint32_t);
+uint16_t ntohs(uint16_t);
+__END_DECLS
+#endif
+
+#ifndef _BYTEORDER_FUNC_DEFINED
+#define _BYTEORDER_FUNC_DEFINED
+#define htonl(x) __htonl(x)
+#define htons(x) __htons(x)
+#define ntohl(x) __ntohl(x)
+#define ntohs(x) __ntohs(x)
+#endif
+
+#endif /* !_KERNEL && __BSD_VISIBLE */
+
+#if __POSIX_VISIBLE >= 200112
+#define IPPROTO_RAW 255 /* raw IP packet */
+#define INET_ADDRSTRLEN 16
+#endif
+
+#if __BSD_VISIBLE
+/*
+ * Constants and structures defined by the internet system,
+ * Per RFC 790, September 1981, and numerous additions.
+ */
+
+/*
+ * Protocols (RFC 1700)
+ */
+#define IPPROTO_HOPOPTS 0 /* IP6 hop-by-hop options */
+#define IPPROTO_IGMP 2 /* group mgmt protocol */
+#define IPPROTO_GGP 3 /* gateway^2 (deprecated) */
+#define IPPROTO_IPV4 4 /* IPv4 encapsulation */
+#define IPPROTO_IPIP IPPROTO_IPV4 /* for compatibility */
+#define IPPROTO_ST 7 /* Stream protocol II */
+#define IPPROTO_EGP 8 /* exterior gateway protocol */
+#define IPPROTO_PIGP 9 /* private interior gateway */
+#define IPPROTO_RCCMON 10 /* BBN RCC Monitoring */
+#define IPPROTO_NVPII 11 /* network voice protocol*/
+#define IPPROTO_PUP 12 /* pup */
+#define IPPROTO_ARGUS 13 /* Argus */
+#define IPPROTO_EMCON 14 /* EMCON */
+#define IPPROTO_XNET 15 /* Cross Net Debugger */
+#define IPPROTO_CHAOS 16 /* Chaos*/
+#define IPPROTO_MUX 18 /* Multiplexing */
+#define IPPROTO_MEAS 19 /* DCN Measurement Subsystems */
+#define IPPROTO_HMP 20 /* Host Monitoring */
+#define IPPROTO_PRM 21 /* Packet Radio Measurement */
+#define IPPROTO_IDP 22 /* xns idp */
+#define IPPROTO_TRUNK1 23 /* Trunk-1 */
+#define IPPROTO_TRUNK2 24 /* Trunk-2 */
+#define IPPROTO_LEAF1 25 /* Leaf-1 */
+#define IPPROTO_LEAF2 26 /* Leaf-2 */
+#define IPPROTO_RDP 27 /* Reliable Data */
+#define IPPROTO_IRTP 28 /* Reliable Transaction */
+#define IPPROTO_TP 29 /* tp-4 w/ class negotiation */
+#define IPPROTO_BLT 30 /* Bulk Data Transfer */
+#define IPPROTO_NSP 31 /* Network Services */
+#define IPPROTO_INP 32 /* Merit Internodal */
+#define IPPROTO_SEP 33 /* Sequential Exchange */
+#define IPPROTO_3PC 34 /* Third Party Connect */
+#define IPPROTO_IDPR 35 /* InterDomain Policy Routing */
+#define IPPROTO_XTP 36 /* XTP */
+#define IPPROTO_DDP 37 /* Datagram Delivery */
+#define IPPROTO_CMTP 38 /* Control Message Transport */
+#define IPPROTO_TPXX 39 /* TP++ Transport */
+#define IPPROTO_IL 40 /* IL transport protocol */
+#define IPPROTO_IPV6 41 /* IP6 header */
+#define IPPROTO_SDRP 42 /* Source Demand Routing */
+#define IPPROTO_ROUTING 43 /* IP6 routing header */
+#define IPPROTO_FRAGMENT 44 /* IP6 fragmentation header */
+#define IPPROTO_IDRP 45 /* InterDomain Routing*/
+#define IPPROTO_RSVP 46 /* resource reservation */
+#define IPPROTO_GRE 47 /* General Routing Encap. */
+#define IPPROTO_MHRP 48 /* Mobile Host Routing */
+#define IPPROTO_BHA 49 /* BHA */
+#define IPPROTO_ESP 50 /* IP6 Encap Sec. Payload */
+#define IPPROTO_AH 51 /* IP6 Auth Header */
+#define IPPROTO_INLSP 52 /* Integ. Net Layer Security */
+#define IPPROTO_SWIPE 53 /* IP with encryption */
+#define IPPROTO_NHRP 54 /* Next Hop Resolution */
+#define IPPROTO_MOBILE 55 /* IP Mobility */
+#define IPPROTO_TLSP 56 /* Transport Layer Security */
+#define IPPROTO_SKIP 57 /* SKIP */
+#define IPPROTO_ICMPV6 58 /* ICMP6 */
+#define IPPROTO_NONE 59 /* IP6 no next header */
+#define IPPROTO_DSTOPTS 60 /* IP6 destination option */
+#define IPPROTO_AHIP 61 /* any host internal protocol */
+#define IPPROTO_CFTP 62 /* CFTP */
+#define IPPROTO_HELLO 63 /* "hello" routing protocol */
+#define IPPROTO_SATEXPAK 64 /* SATNET/Backroom EXPAK */
+#define IPPROTO_KRYPTOLAN 65 /* Kryptolan */
+#define IPPROTO_RVD 66 /* Remote Virtual Disk */
+#define IPPROTO_IPPC 67 /* Pluribus Packet Core */
+#define IPPROTO_ADFS 68 /* Any distributed FS */
+#define IPPROTO_SATMON 69 /* Satnet Monitoring */
+#define IPPROTO_VISA 70 /* VISA Protocol */
+#define IPPROTO_IPCV 71 /* Packet Core Utility */
+#define IPPROTO_CPNX 72 /* Comp. Prot. Net. Executive */
+#define IPPROTO_CPHB 73 /* Comp. Prot. HeartBeat */
+#define IPPROTO_WSN 74 /* Wang Span Network */
+#define IPPROTO_PVP 75 /* Packet Video Protocol */
+#define IPPROTO_BRSATMON 76 /* BackRoom SATNET Monitoring */
+#define IPPROTO_ND 77 /* Sun net disk proto (temp.) */
+#define IPPROTO_WBMON 78 /* WIDEBAND Monitoring */
+#define IPPROTO_WBEXPAK 79 /* WIDEBAND EXPAK */
+#define IPPROTO_EON 80 /* ISO cnlp */
+#define IPPROTO_VMTP 81 /* VMTP */
+#define IPPROTO_SVMTP 82 /* Secure VMTP */
+#define IPPROTO_VINES 83 /* Banyon VINES */
+#define IPPROTO_TTP 84 /* TTP */
+#define IPPROTO_IGP 85 /* NSFNET-IGP */
+#define IPPROTO_DGP 86 /* dissimilar gateway prot. */
+#define IPPROTO_TCF 87 /* TCF */
+#define IPPROTO_IGRP 88 /* Cisco/GXS IGRP */
+#define IPPROTO_OSPFIGP 89 /* OSPFIGP */
+#define IPPROTO_SRPC 90 /* Strite RPC protocol */
+#define IPPROTO_LARP 91 /* Locus Address Resoloution */
+#define IPPROTO_MTP 92 /* Multicast Transport */
+#define IPPROTO_AX25 93 /* AX.25 Frames */
+#define IPPROTO_IPEIP 94 /* IP encapsulated in IP */
+#define IPPROTO_MICP 95 /* Mobile Int.ing control */
+#define IPPROTO_SCCSP 96 /* Semaphore Comm. security */
+#define IPPROTO_ETHERIP 97 /* Ethernet IP encapsulation */
+#define IPPROTO_ENCAP 98 /* encapsulation header */
+#define IPPROTO_APES 99 /* any private encr. scheme */
+#define IPPROTO_GMTP 100 /* GMTP*/
+#define IPPROTO_IPCOMP 108 /* payload compression (IPComp) */
+#define IPPROTO_SCTP 132 /* SCTP */
+/* 101-254: Partly Unassigned */
+#define IPPROTO_PIM 103 /* Protocol Independent Mcast */
+#define IPPROTO_CARP 112 /* CARP */
+#define IPPROTO_PGM 113 /* PGM */
+#define IPPROTO_PFSYNC 240 /* PFSYNC */
+/* 255: Reserved */
+/* BSD Private, local use, namespace incursion, no longer used */
+#define IPPROTO_OLD_DIVERT 254 /* OLD divert pseudo-proto */
+#define IPPROTO_MAX 256
+
+/* last return value of *_input(), meaning "all job for this pkt is done". */
+#define IPPROTO_DONE 257
+
+/* Only used internally, so can be outside the range of valid IP protocols. */
+#define IPPROTO_DIVERT 258 /* divert pseudo-protocol */
+
+/*
+ * Defined to avoid confusion. The master value is defined by
+ * PROTO_SPACER in sys/protosw.h.
+ */
+#define IPPROTO_SPACER 32767 /* spacer for loadable protos */
+
+/*
+ * Local port number conventions:
+ *
+ * When a user does a bind(2) or connect(2) with a port number of zero,
+ * a non-conflicting local port address is chosen.
+ * The default range is IPPORT_HIFIRSTAUTO through
+ * IPPORT_HILASTAUTO, although that is settable by sysctl.
+ *
+ * A user may set the IPPROTO_IP option IP_PORTRANGE to change this
+ * default assignment range.
+ *
+ * The value IP_PORTRANGE_DEFAULT causes the default behavior.
+ *
+ * The value IP_PORTRANGE_HIGH changes the range of candidate port numbers
+ * into the "high" range. These are reserved for client outbound connections
+ * which do not want to be filtered by any firewalls.
+ *
+ * The value IP_PORTRANGE_LOW changes the range to the "low" are
+ * that is (by convention) restricted to privileged processes. This
+ * convention is based on "vouchsafe" principles only. It is only secure
+ * if you trust the remote host to restrict these ports.
+ *
+ * The default range of ports and the high range can be changed by
+ * sysctl(3). (net.inet.ip.port{hi,low}{first,last}_auto)
+ *
+ * Changing those values has bad security implications if you are
+ * using a stateless firewall that is allowing packets outside of that
+ * range in order to allow transparent outgoing connections.
+ *
+ * Such a firewall configuration will generally depend on the use of these
+ * default values. If you change them, you may find your Security
+ * Administrator looking for you with a heavy object.
+ *
+ * For a slightly more orthodox text view on this:
+ *
+ * ftp://ftp.isi.edu/in-notes/iana/assignments/port-numbers
+ *
+ * port numbers are divided into three ranges:
+ *
+ * 0 - 1023 Well Known Ports
+ * 1024 - 49151 Registered Ports
+ * 49152 - 65535 Dynamic and/or Private Ports
+ *
+ */
+
+/*
+ * Ports < IPPORT_RESERVED are reserved for
+ * privileged processes (e.g. root). (IP_PORTRANGE_LOW)
+ */
+#define IPPORT_RESERVED 1024
+
+/*
+ * Default local port range, used by IP_PORTRANGE_DEFAULT
+ */
+#define IPPORT_EPHEMERALFIRST 10000
+#define IPPORT_EPHEMERALLAST 65535
+
+/*
+ * Dynamic port range, used by IP_PORTRANGE_HIGH.
+ */
+#define IPPORT_HIFIRSTAUTO 49152
+#define IPPORT_HILASTAUTO 65535
+
+/*
+ * Scanning for a free reserved port return a value below IPPORT_RESERVED,
+ * but higher than IPPORT_RESERVEDSTART. Traditionally the start value was
+ * 512, but that conflicts with some well-known-services that firewalls may
+ * have a fit if we use.
+ */
+#define IPPORT_RESERVEDSTART 600
+
+#define IPPORT_MAX 65535
+
+/*
+ * Definitions of bits in internet address integers.
+ * On subnets, the decomposition of addresses to host and net parts
+ * is done according to subnet mask, not the masks here.
+ */
+#define IN_CLASSA(i) (((u_int32_t)(i) & 0x80000000) == 0)
+#define IN_CLASSA_NET 0xff000000
+#define IN_CLASSA_NSHIFT 24
+#define IN_CLASSA_HOST 0x00ffffff
+#define IN_CLASSA_MAX 128
+
+#define IN_CLASSB(i) (((u_int32_t)(i) & 0xc0000000) == 0x80000000)
+#define IN_CLASSB_NET 0xffff0000
+#define IN_CLASSB_NSHIFT 16
+#define IN_CLASSB_HOST 0x0000ffff
+#define IN_CLASSB_MAX 65536
+
+#define IN_CLASSC(i) (((u_int32_t)(i) & 0xe0000000) == 0xc0000000)
+#define IN_CLASSC_NET 0xffffff00
+#define IN_CLASSC_NSHIFT 8
+#define IN_CLASSC_HOST 0x000000ff
+
+#define IN_CLASSD(i) (((u_int32_t)(i) & 0xf0000000) == 0xe0000000)
+#define IN_CLASSD_NET 0xf0000000 /* These ones aren't really */
+#define IN_CLASSD_NSHIFT 28 /* net and host fields, but */
+#define IN_CLASSD_HOST 0x0fffffff /* routing needn't know. */
+#define IN_MULTICAST(i) IN_CLASSD(i)
+
+#define IN_EXPERIMENTAL(i) (((u_int32_t)(i) & 0xf0000000) == 0xf0000000)
+#define IN_BADCLASS(i) (((u_int32_t)(i) & 0xf0000000) == 0xf0000000)
+
+#define IN_LINKLOCAL(i) (((u_int32_t)(i) & 0xffff0000) == 0xa9fe0000)
+#define IN_LOOPBACK(i) (((u_int32_t)(i) & 0xff000000) == 0x7f000000)
+#define IN_ZERONET(i) (((u_int32_t)(i) & 0xff000000) == 0)
+
+#define IN_PRIVATE(i) ((((u_int32_t)(i) & 0xff000000) == 0x0a000000) || \
+ (((u_int32_t)(i) & 0xfff00000) == 0xac100000) || \
+ (((u_int32_t)(i) & 0xffff0000) == 0xc0a80000))
+
+#define IN_LOCAL_GROUP(i) (((u_int32_t)(i) & 0xffffff00) == 0xe0000000)
+
+#define IN_ANY_LOCAL(i) (IN_LINKLOCAL(i) || IN_LOCAL_GROUP(i))
+
+#define INADDR_LOOPBACK (u_int32_t)0x7f000001
+#ifndef _KERNEL
+#define INADDR_NONE 0xffffffff /* -1 return */
+#endif
+
+#define INADDR_UNSPEC_GROUP (u_int32_t)0xe0000000 /* 224.0.0.0 */
+#define INADDR_ALLHOSTS_GROUP (u_int32_t)0xe0000001 /* 224.0.0.1 */
+#define INADDR_ALLRTRS_GROUP (u_int32_t)0xe0000002 /* 224.0.0.2 */
+#define INADDR_ALLRPTS_GROUP (u_int32_t)0xe0000016 /* 224.0.0.22, IGMPv3 */
+#define INADDR_CARP_GROUP (u_int32_t)0xe0000012 /* 224.0.0.18 */
+#define INADDR_PFSYNC_GROUP (u_int32_t)0xe00000f0 /* 224.0.0.240 */
+#define INADDR_ALLMDNS_GROUP (u_int32_t)0xe00000fb /* 224.0.0.251 */
+#define INADDR_MAX_LOCAL_GROUP (u_int32_t)0xe00000ff /* 224.0.0.255 */
+
+#define IN_LOOPBACKNET 127 /* official! */
+
+/*
+ * Options for use with [gs]etsockopt at the IP level.
+ * First word of comment is data type; bool is stored in int.
+ */
+#define IP_OPTIONS 1 /* buf/ip_opts; set/get IP options */
+#define IP_HDRINCL 2 /* int; header is included with data */
+#define IP_TOS 3 /* int; IP type of service and preced. */
+#define IP_TTL 4 /* int; IP time to live */
+#define IP_RECVOPTS 5 /* bool; receive all IP opts w/dgram */
+#define IP_RECVRETOPTS 6 /* bool; receive IP opts for response */
+#define IP_RECVDSTADDR 7 /* bool; receive IP dst addr w/dgram */
+#define IP_SENDSRCADDR IP_RECVDSTADDR /* cmsg_type to set src addr */
+#define IP_RETOPTS 8 /* ip_opts; set/get IP options */
+#define IP_MULTICAST_IF 9 /* struct in_addr *or* struct ip_mreqn;
+ * set/get IP multicast i/f */
+#define IP_MULTICAST_TTL 10 /* u_char; set/get IP multicast ttl */
+#define IP_MULTICAST_LOOP 11 /* u_char; set/get IP multicast loopback */
+#define IP_ADD_MEMBERSHIP 12 /* ip_mreq; add an IP group membership */
+#define IP_DROP_MEMBERSHIP 13 /* ip_mreq; drop an IP group membership */
+#define IP_MULTICAST_VIF 14 /* set/get IP mcast virt. iface */
+#define IP_RSVP_ON 15 /* enable RSVP in kernel */
+#define IP_RSVP_OFF 16 /* disable RSVP in kernel */
+#define IP_RSVP_VIF_ON 17 /* set RSVP per-vif socket */
+#define IP_RSVP_VIF_OFF 18 /* unset RSVP per-vif socket */
+#define IP_PORTRANGE 19 /* int; range to choose for unspec port */
+#define IP_RECVIF 20 /* bool; receive reception if w/dgram */
+/* for IPSEC */
+#define IP_IPSEC_POLICY 21 /* int; set/get security policy */
+#define IP_FAITH 22 /* bool; accept FAITH'ed connections */
+
+#define IP_ONESBCAST 23 /* bool: send all-ones broadcast */
+#define IP_BINDANY 24 /* bool: allow bind to any address */
+
+/*
+ * Options for controlling the firewall and dummynet.
+ * Historical options (from 40 to 64) will eventually be
+ * replaced by only two options, IP_FW3 and IP_DUMMYNET3.
+ */
+#define IP_FW_TABLE_ADD 40 /* add entry */
+#define IP_FW_TABLE_DEL 41 /* delete entry */
+#define IP_FW_TABLE_FLUSH 42 /* flush table */
+#define IP_FW_TABLE_GETSIZE 43 /* get table size */
+#define IP_FW_TABLE_LIST 44 /* list table contents */
+
+#define IP_FW3 48 /* generic ipfw v.3 sockopts */
+#define IP_DUMMYNET3 49 /* generic dummynet v.3 sockopts */
+
+#define IP_FW_ADD 50 /* add a firewall rule to chain */
+#define IP_FW_DEL 51 /* delete a firewall rule from chain */
+#define IP_FW_FLUSH 52 /* flush firewall rule chain */
+#define IP_FW_ZERO 53 /* clear single/all firewall counter(s) */
+#define IP_FW_GET 54 /* get entire firewall rule chain */
+#define IP_FW_RESETLOG 55 /* reset logging counters */
+
+#define IP_FW_NAT_CFG 56 /* add/config a nat rule */
+#define IP_FW_NAT_DEL 57 /* delete a nat rule */
+#define IP_FW_NAT_GET_CONFIG 58 /* get configuration of a nat rule */
+#define IP_FW_NAT_GET_LOG 59 /* get log of a nat rule */
+
+#define IP_DUMMYNET_CONFIGURE 60 /* add/configure a dummynet pipe */
+#define IP_DUMMYNET_DEL 61 /* delete a dummynet pipe from chain */
+#define IP_DUMMYNET_FLUSH 62 /* flush dummynet */
+#define IP_DUMMYNET_GET 64 /* get entire dummynet pipes */
+
+#define IP_RECVTTL 65 /* bool; receive IP TTL w/dgram */
+#define IP_MINTTL 66 /* minimum TTL for packet or drop */
+#define IP_DONTFRAG 67 /* don't fragment packet */
+
+/* IPv4 Source Filter Multicast API [RFC3678] */
+#define IP_ADD_SOURCE_MEMBERSHIP 70 /* join a source-specific group */
+#define IP_DROP_SOURCE_MEMBERSHIP 71 /* drop a single source */
+#define IP_BLOCK_SOURCE 72 /* block a source */
+#define IP_UNBLOCK_SOURCE 73 /* unblock a source */
+
+/* The following option is private; do not use it from user applications. */
+#define IP_MSFILTER 74 /* set/get filter list */
+
+/* Protocol Independent Multicast API [RFC3678] */
+#define MCAST_JOIN_GROUP 80 /* join an any-source group */
+#define MCAST_LEAVE_GROUP 81 /* leave all sources for group */
+#define MCAST_JOIN_SOURCE_GROUP 82 /* join a source-specific group */
+#define MCAST_LEAVE_SOURCE_GROUP 83 /* leave a single source */
+#define MCAST_BLOCK_SOURCE 84 /* block a source */
+#define MCAST_UNBLOCK_SOURCE 85 /* unblock a source */
+
+/*
+ * Defaults and limits for options
+ */
+#define IP_DEFAULT_MULTICAST_TTL 1 /* normally limit m'casts to 1 hop */
+#define IP_DEFAULT_MULTICAST_LOOP 1 /* normally hear sends if a member */
+
+/*
+ * The imo_membership vector for each socket is now dynamically allocated at
+ * run-time, bounded by USHRT_MAX, and is reallocated when needed, sized
+ * according to a power-of-two increment.
+ */
+#define IP_MIN_MEMBERSHIPS 31
+#define IP_MAX_MEMBERSHIPS 4095
+#define IP_MAX_SOURCE_FILTER 1024 /* XXX to be unused */
+
+/*
+ * Default resource limits for IPv4 multicast source filtering.
+ * These may be modified by sysctl.
+ */
+#define IP_MAX_GROUP_SRC_FILTER 512 /* sources per group */
+#define IP_MAX_SOCK_SRC_FILTER 128 /* sources per socket/group */
+#define IP_MAX_SOCK_MUTE_FILTER 128 /* XXX no longer used */
+
+/*
+ * Argument structure for IP_ADD_MEMBERSHIP and IP_DROP_MEMBERSHIP.
+ */
+struct ip_mreq {
+ struct in_addr imr_multiaddr; /* IP multicast address of group */
+ struct in_addr imr_interface; /* local IP address of interface */
+};
+
+/*
+ * Modified argument structure for IP_MULTICAST_IF, obtained from Linux.
+ * This is used to specify an interface index for multicast sends, as
+ * the IPv4 legacy APIs do not support this (unless IP_SENDIF is available).
+ */
+struct ip_mreqn {
+ struct in_addr imr_multiaddr; /* IP multicast address of group */
+ struct in_addr imr_address; /* local IP address of interface */
+ int imr_ifindex; /* Interface index; cast to uint32_t */
+};
+
+/*
+ * Argument structure for IPv4 Multicast Source Filter APIs. [RFC3678]
+ */
+struct ip_mreq_source {
+ struct in_addr imr_multiaddr; /* IP multicast address of group */
+ struct in_addr imr_sourceaddr; /* IP address of source */
+ struct in_addr imr_interface; /* local IP address of interface */
+};
+
+/*
+ * Argument structures for Protocol-Independent Multicast Source
+ * Filter APIs. [RFC3678]
+ */
+struct group_req {
+ uint32_t gr_interface; /* interface index */
+ struct sockaddr_storage gr_group; /* group address */
+};
+
+struct group_source_req {
+ uint32_t gsr_interface; /* interface index */
+ struct sockaddr_storage gsr_group; /* group address */
+ struct sockaddr_storage gsr_source; /* source address */
+};
+
+#ifndef __MSFILTERREQ_DEFINED
+#define __MSFILTERREQ_DEFINED
+/*
+ * The following structure is private; do not use it from user applications.
+ * It is used to communicate IP_MSFILTER/IPV6_MSFILTER information between
+ * the RFC 3678 libc functions and the kernel.
+ */
+struct __msfilterreq {
+ uint32_t msfr_ifindex; /* interface index */
+ uint32_t msfr_fmode; /* filter mode for group */
+ uint32_t msfr_nsrcs; /* # of sources in msfr_srcs */
+ struct sockaddr_storage msfr_group; /* group address */
+ struct sockaddr_storage *msfr_srcs; /* pointer to the first member
+ * of a contiguous array of
+ * sources to filter in full.
+ */
+};
+#endif
+
+struct sockaddr;
+
+/*
+ * Advanced (Full-state) APIs [RFC3678]
+ * The RFC specifies uint_t for the 6th argument to [sg]etsourcefilter().
+ * We use uint32_t here to be consistent.
+ */
+int setipv4sourcefilter(int, struct in_addr, struct in_addr, uint32_t,
+ uint32_t, struct in_addr *);
+int getipv4sourcefilter(int, struct in_addr, struct in_addr, uint32_t *,
+ uint32_t *, struct in_addr *);
+int setsourcefilter(int, uint32_t, struct sockaddr *, socklen_t,
+ uint32_t, uint32_t, struct sockaddr_storage *);
+int getsourcefilter(int, uint32_t, struct sockaddr *, socklen_t,
+ uint32_t *, uint32_t *, struct sockaddr_storage *);
+
+/*
+ * Filter modes; also used to represent per-socket filter mode internally.
+ */
+#define MCAST_UNDEFINED 0 /* fmode: not yet defined */
+#define MCAST_INCLUDE 1 /* fmode: include these source(s) */
+#define MCAST_EXCLUDE 2 /* fmode: exclude these source(s) */
+
+/*
+ * Argument for IP_PORTRANGE:
+ * - which range to search when port is unspecified at bind() or connect()
+ */
+#define IP_PORTRANGE_DEFAULT 0 /* default range */
+#define IP_PORTRANGE_HIGH 1 /* "high" - request firewall bypass */
+#define IP_PORTRANGE_LOW 2 /* "low" - vouchsafe security */
+
+/*
+ * Definitions for inet sysctl operations.
+ *
+ * Third level is protocol number.
+ * Fourth level is desired variable within that protocol.
+ */
+#define IPPROTO_MAXID (IPPROTO_AH + 1) /* don't list to IPPROTO_MAX */
+
+#define CTL_IPPROTO_NAMES { \
+ { "ip", CTLTYPE_NODE }, \
+ { "icmp", CTLTYPE_NODE }, \
+ { "igmp", CTLTYPE_NODE }, \
+ { "ggp", CTLTYPE_NODE }, \
+ { 0, 0 }, \
+ { 0, 0 }, \
+ { "tcp", CTLTYPE_NODE }, \
+ { 0, 0 }, \
+ { "egp", CTLTYPE_NODE }, \
+ { 0, 0 }, \
+ { 0, 0 }, \
+ { 0, 0 }, \
+ { "pup", CTLTYPE_NODE }, \
+ { 0, 0 }, \
+ { 0, 0 }, \
+ { 0, 0 }, \
+ { 0, 0 }, \
+ { "udp", CTLTYPE_NODE }, \
+ { 0, 0 }, \
+ { 0, 0 }, \
+ { 0, 0 }, \
+ { 0, 0 }, \
+ { "idp", CTLTYPE_NODE }, \
+ { 0, 0 }, \
+ { 0, 0 }, \
+ { 0, 0 }, \
+ { 0, 0 }, \
+ { 0, 0 }, \
+ { 0, 0 }, \
+ { 0, 0 }, \
+ { 0, 0 }, \
+ { 0, 0 }, \
+ { 0, 0 }, \
+ { 0, 0 }, \
+ { 0, 0 }, \
+ { 0, 0 }, \
+ { 0, 0 }, \
+ { 0, 0 }, \
+ { 0, 0 }, \
+ { 0, 0 }, \
+ { 0, 0 }, \
+ { 0, 0 }, \
+ { 0, 0 }, \
+ { 0, 0 }, \
+ { 0, 0 }, \
+ { 0, 0 }, \
+ { 0, 0 }, \
+ { 0, 0 }, \
+ { 0, 0 }, \
+ { 0, 0 }, \
+ { 0, 0 }, \
+ { "ipsec", CTLTYPE_NODE }, \
+ { 0, 0 }, \
+ { 0, 0 }, \
+ { 0, 0 }, \
+ { 0, 0 }, { 0, 0 }, { 0, 0 }, { 0, 0 }, { 0, 0 }, \
+ { 0, 0 }, { 0, 0 }, { 0, 0 }, { 0, 0 }, { 0, 0 }, \
+ { 0, 0 }, { 0, 0 }, { 0, 0 }, { 0, 0 }, { 0, 0 }, \
+ { 0, 0 }, { 0, 0 }, { 0, 0 }, { 0, 0 }, { 0, 0 }, \
+ { 0, 0 }, { 0, 0 }, { 0, 0 }, { 0, 0 }, { 0, 0 }, \
+ { 0, 0 }, { 0, 0 }, { 0, 0 }, { 0, 0 }, { 0, 0 }, \
+ { 0, 0 }, { 0, 0 }, { 0, 0 }, { 0, 0 }, { 0, 0 }, \
+ { 0, 0 }, { 0, 0 }, { 0, 0 }, { 0, 0 }, { 0, 0 }, \
+ { 0, 0 }, { 0, 0 }, { 0, 0 }, { 0, 0 }, { 0, 0 }, \
+ { 0, 0 }, \
+ { 0, 0 }, \
+ { 0, 0 }, \
+ { "pim", CTLTYPE_NODE }, \
+}
+
+/*
+ * Names for IP sysctl objects
+ */
+#define IPCTL_FORWARDING 1 /* act as router */
+#define IPCTL_SENDREDIRECTS 2 /* may send redirects when forwarding */
+#define IPCTL_DEFTTL 3 /* default TTL */
+#ifdef notyet
+#define IPCTL_DEFMTU 4 /* default MTU */
+#endif
+#define IPCTL_RTEXPIRE 5 /* cloned route expiration time */
+#define IPCTL_RTMINEXPIRE 6 /* min value for expiration time */
+#define IPCTL_RTMAXCACHE 7 /* trigger level for dynamic expire */
+#define IPCTL_SOURCEROUTE 8 /* may perform source routes */
+#define IPCTL_DIRECTEDBROADCAST 9 /* may re-broadcast received packets */
+#define IPCTL_INTRQMAXLEN 10 /* max length of netisr queue */
+#define IPCTL_INTRQDROPS 11 /* number of netisr q drops */
+#define IPCTL_STATS 12 /* ipstat structure */
+#define IPCTL_ACCEPTSOURCEROUTE 13 /* may accept source routed packets */
+#define IPCTL_FASTFORWARDING 14 /* use fast IP forwarding code */
+#define IPCTL_KEEPFAITH 15 /* FAITH IPv4->IPv6 translater ctl */
+#define IPCTL_GIF_TTL 16 /* default TTL for gif encap packet */
+#define IPCTL_MAXID 17
+
+#define IPCTL_NAMES { \
+ { 0, 0 }, \
+ { "forwarding", CTLTYPE_INT }, \
+ { "redirect", CTLTYPE_INT }, \
+ { "ttl", CTLTYPE_INT }, \
+ { "mtu", CTLTYPE_INT }, \
+ { "rtexpire", CTLTYPE_INT }, \
+ { "rtminexpire", CTLTYPE_INT }, \
+ { "rtmaxcache", CTLTYPE_INT }, \
+ { "sourceroute", CTLTYPE_INT }, \
+ { "directed-broadcast", CTLTYPE_INT }, \
+ { "intr-queue-maxlen", CTLTYPE_INT }, \
+ { "intr-queue-drops", CTLTYPE_INT }, \
+ { "stats", CTLTYPE_STRUCT }, \
+ { "accept_sourceroute", CTLTYPE_INT }, \
+ { "fastforwarding", CTLTYPE_INT }, \
+}
+
+#endif /* __BSD_VISIBLE */
+
+#ifdef _KERNEL
+
+struct ifnet; struct mbuf; /* forward declarations for Standard C */
+
+int in_broadcast(struct in_addr, struct ifnet *);
+int in_canforward(struct in_addr);
+int in_localaddr(struct in_addr);
+int in_localip(struct in_addr);
+int inet_aton(const char *, struct in_addr *); /* in libkern */
+char *inet_ntoa(struct in_addr); /* in libkern */
+char *inet_ntoa_r(struct in_addr ina, char *buf); /* in libkern */
+char *inet_ntop(int, const void *, char *, socklen_t); /* in libkern */
+int inet_pton(int af, const char *, void *); /* in libkern */
+void in_ifdetach(struct ifnet *);
+
+#define in_hosteq(s, t) ((s).s_addr == (t).s_addr)
+#define in_nullhost(x) ((x).s_addr == INADDR_ANY)
+#define in_allhosts(x) ((x).s_addr == htonl(INADDR_ALLHOSTS_GROUP))
+
+#define satosin(sa) ((struct sockaddr_in *)(sa))
+#define sintosa(sin) ((struct sockaddr *)(sin))
+#define ifatoia(ifa) ((struct in_ifaddr *)(ifa))
+
+/*
+ * Historically, BSD keeps ip_len and ip_off in host format
+ * when doing layer 3 processing, and this often requires
+ * to translate the format back and forth.
+ * To make the process explicit, we define a couple of macros
+ * that also take into account the fact that at some point
+ * we may want to keep those fields always in net format.
+ */
+
+#if (BYTE_ORDER == BIG_ENDIAN) || defined(HAVE_NET_IPLEN)
+#define SET_NET_IPLEN(p) do {} while (0)
+#define SET_HOST_IPLEN(p) do {} while (0)
+#else
+#define SET_NET_IPLEN(p) do { \
+ struct ip *h_ip = (p); \
+ h_ip->ip_len = htons(h_ip->ip_len); \
+ h_ip->ip_off = htons(h_ip->ip_off); \
+ } while (0)
+
+#define SET_HOST_IPLEN(p) do { \
+ struct ip *h_ip = (p); \
+ h_ip->ip_len = ntohs(h_ip->ip_len); \
+ h_ip->ip_off = ntohs(h_ip->ip_off); \
+ } while (0)
+#endif /* !HAVE_NET_IPLEN */
+
+#endif /* _KERNEL */
+
+/* INET6 stuff */
+#if __POSIX_VISIBLE >= 200112
+#define __KAME_NETINET_IN_HH_INCLUDED_
+#include <rtems/freebsd/netinet6/in6.h>
+#undef __KAME_NETINET_IN_HH_INCLUDED_
+#endif
+
+#endif /* !_NETINET_IN_HH_*/
diff --git a/rtems/freebsd/netinet/in_cksum.c b/rtems/freebsd/netinet/in_cksum.c
new file mode 100644
index 00000000..c4d113a8
--- /dev/null
+++ b/rtems/freebsd/netinet/in_cksum.c
@@ -0,0 +1,148 @@
+#include <rtems/freebsd/machine/rtems-bsd-config.h>
+
+/*-
+ * Copyright (c) 1988, 1992, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * @(#)in_cksum.c 8.1 (Berkeley) 6/10/93
+ */
+
+#include <rtems/freebsd/sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <rtems/freebsd/sys/param.h>
+#include <rtems/freebsd/sys/mbuf.h>
+
+/*
+ * Checksum routine for Internet Protocol family headers (Portable Version).
+ *
+ * This routine is very heavily used in the network
+ * code and should be modified for each CPU to be as fast as possible.
+ */
+
+#define ADDCARRY(x) (x > 65535 ? x -= 65535 : x)
+#define REDUCE {l_util.l = sum; sum = l_util.s[0] + l_util.s[1]; ADDCARRY(sum);}
+
+int
+in_cksum(struct mbuf *m, int len)
+{
+ register u_short *w;
+ register int sum = 0;
+ register int mlen = 0;
+ int byte_swapped = 0;
+
+ union {
+ char c[2];
+ u_short s;
+ } s_util;
+ union {
+ u_short s[2];
+ long l;
+ } l_util;
+
+ for (;m && len; m = m->m_next) {
+ if (m->m_len == 0)
+ continue;
+ w = mtod(m, u_short *);
+ if (mlen == -1) {
+ /*
+ * The first byte of this mbuf is the continuation
+ * of a word spanning between this mbuf and the
+ * last mbuf.
+ *
+ * s_util.c[0] is already saved when scanning previous
+ * mbuf.
+ */
+ s_util.c[1] = *(char *)w;
+ sum += s_util.s;
+ w = (u_short *)((char *)w + 1);
+ mlen = m->m_len - 1;
+ len--;
+ } else
+ mlen = m->m_len;
+ if (len < mlen)
+ mlen = len;
+ len -= mlen;
+ /*
+ * Force to even boundary.
+ */
+ if ((1 & (int) w) && (mlen > 0)) {
+ REDUCE;
+ sum <<= 8;
+ s_util.c[0] = *(u_char *)w;
+ w = (u_short *)((char *)w + 1);
+ mlen--;
+ byte_swapped = 1;
+ }
+ /*
+ * Unroll the loop to make overhead from
+ * branches &c small.
+ */
+ while ((mlen -= 32) >= 0) {
+ sum += w[0]; sum += w[1]; sum += w[2]; sum += w[3];
+ sum += w[4]; sum += w[5]; sum += w[6]; sum += w[7];
+ sum += w[8]; sum += w[9]; sum += w[10]; sum += w[11];
+ sum += w[12]; sum += w[13]; sum += w[14]; sum += w[15];
+ w += 16;
+ }
+ mlen += 32;
+ while ((mlen -= 8) >= 0) {
+ sum += w[0]; sum += w[1]; sum += w[2]; sum += w[3];
+ w += 4;
+ }
+ mlen += 8;
+ if (mlen == 0 && byte_swapped == 0)
+ continue;
+ REDUCE;
+ while ((mlen -= 2) >= 0) {
+ sum += *w++;
+ }
+ if (byte_swapped) {
+ REDUCE;
+ sum <<= 8;
+ byte_swapped = 0;
+ if (mlen == -1) {
+ s_util.c[1] = *(char *)w;
+ sum += s_util.s;
+ mlen = 0;
+ } else
+ mlen = -1;
+ } else if (mlen == -1)
+ s_util.c[0] = *(char *)w;
+ }
+ if (len)
+ printf("cksum: out of data\n");
+ if (mlen == -1) {
+ /* The last mbuf has odd # of bytes. Follow the
+ standard (the odd byte may be shifted left by 8 bits
+ or not as determined by endian-ness of the machine) */
+ s_util.c[1] = 0;
+ sum += s_util.s;
+ }
+ REDUCE;
+ return (~sum & 0xffff);
+}
diff --git a/rtems/freebsd/netinet/in_gif.c b/rtems/freebsd/netinet/in_gif.c
new file mode 100644
index 00000000..6b9d8508
--- /dev/null
+++ b/rtems/freebsd/netinet/in_gif.c
@@ -0,0 +1,469 @@
+#include <rtems/freebsd/machine/rtems-bsd-config.h>
+
+/* $KAME: in_gif.c,v 1.54 2001/05/14 14:02:16 itojun Exp $ */
+
+/*-
+ * Copyright (C) 1995, 1996, 1997, and 1998 WIDE Project.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the project nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <rtems/freebsd/sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <rtems/freebsd/local/opt_mrouting.h>
+#include <rtems/freebsd/local/opt_inet.h>
+#include <rtems/freebsd/local/opt_inet6.h>
+
+#include <rtems/freebsd/sys/param.h>
+#include <rtems/freebsd/sys/systm.h>
+#include <rtems/freebsd/sys/socket.h>
+#include <rtems/freebsd/sys/sockio.h>
+#include <rtems/freebsd/sys/mbuf.h>
+#include <rtems/freebsd/sys/errno.h>
+#include <rtems/freebsd/sys/kernel.h>
+#include <rtems/freebsd/sys/sysctl.h>
+#include <rtems/freebsd/sys/protosw.h>
+#include <rtems/freebsd/sys/malloc.h>
+
+#include <rtems/freebsd/net/if.h>
+#include <rtems/freebsd/net/route.h>
+#include <rtems/freebsd/net/vnet.h>
+
+#include <rtems/freebsd/netinet/in.h>
+#include <rtems/freebsd/netinet/in_systm.h>
+#include <rtems/freebsd/netinet/ip.h>
+#include <rtems/freebsd/netinet/ip_var.h>
+#include <rtems/freebsd/netinet/in_gif.h>
+#include <rtems/freebsd/netinet/in_var.h>
+#include <rtems/freebsd/netinet/ip_encap.h>
+#include <rtems/freebsd/netinet/ip_ecn.h>
+
+#ifdef INET6
+#include <rtems/freebsd/netinet/ip6.h>
+#endif
+
+#ifdef MROUTING
+#include <rtems/freebsd/netinet/ip_mroute.h>
+#endif /* MROUTING */
+
+#include <rtems/freebsd/net/if_gif.h>
+
+static int gif_validate4(const struct ip *, struct gif_softc *,
+ struct ifnet *);
+
+extern struct domain inetdomain;
+struct protosw in_gif_protosw = {
+ .pr_type = SOCK_RAW,
+ .pr_domain = &inetdomain,
+ .pr_protocol = 0/* IPPROTO_IPV[46] */,
+ .pr_flags = PR_ATOMIC|PR_ADDR,
+ .pr_input = in_gif_input,
+ .pr_output = (pr_output_t*)rip_output,
+ .pr_ctloutput = rip_ctloutput,
+ .pr_usrreqs = &rip_usrreqs
+};
+
+VNET_DEFINE(int, ip_gif_ttl) = GIF_TTL;
+#define V_ip_gif_ttl VNET(ip_gif_ttl)
+SYSCTL_VNET_INT(_net_inet_ip, IPCTL_GIF_TTL, gifttl, CTLFLAG_RW,
+ &VNET_NAME(ip_gif_ttl), 0, "");
+
+int
+in_gif_output(struct ifnet *ifp, int family, struct mbuf *m)
+{
+ struct gif_softc *sc = ifp->if_softc;
+ struct sockaddr_in *dst = (struct sockaddr_in *)&sc->gif_ro.ro_dst;
+ struct sockaddr_in *sin_src = (struct sockaddr_in *)sc->gif_psrc;
+ struct sockaddr_in *sin_dst = (struct sockaddr_in *)sc->gif_pdst;
+ struct ip iphdr; /* capsule IP header, host byte ordered */
+ struct etherip_header eiphdr;
+ int error, len, proto;
+ u_int8_t tos;
+
+ GIF_LOCK_ASSERT(sc);
+
+ if (sin_src == NULL || sin_dst == NULL ||
+ sin_src->sin_family != AF_INET ||
+ sin_dst->sin_family != AF_INET) {
+ m_freem(m);
+ return EAFNOSUPPORT;
+ }
+
+ switch (family) {
+#ifdef INET
+ case AF_INET:
+ {
+ struct ip *ip;
+
+ proto = IPPROTO_IPV4;
+ if (m->m_len < sizeof(*ip)) {
+ m = m_pullup(m, sizeof(*ip));
+ if (!m)
+ return ENOBUFS;
+ }
+ ip = mtod(m, struct ip *);
+ tos = ip->ip_tos;
+ break;
+ }
+#endif /* INET */
+#ifdef INET6
+ case AF_INET6:
+ {
+ struct ip6_hdr *ip6;
+ proto = IPPROTO_IPV6;
+ if (m->m_len < sizeof(*ip6)) {
+ m = m_pullup(m, sizeof(*ip6));
+ if (!m)
+ return ENOBUFS;
+ }
+ ip6 = mtod(m, struct ip6_hdr *);
+ tos = (ntohl(ip6->ip6_flow) >> 20) & 0xff;
+ break;
+ }
+#endif /* INET6 */
+ case AF_LINK:
+ proto = IPPROTO_ETHERIP;
+
+ /*
+ * GIF_SEND_REVETHIP (disabled by default) intentionally
+ * sends an EtherIP packet with revered version field in
+ * the header. This is a knob for backward compatibility
+ * with FreeBSD 7.2R or prior.
+ */
+ if ((sc->gif_options & GIF_SEND_REVETHIP)) {
+ eiphdr.eip_ver = 0;
+ eiphdr.eip_resvl = ETHERIP_VERSION;
+ eiphdr.eip_resvh = 0;
+ } else {
+ eiphdr.eip_ver = ETHERIP_VERSION;
+ eiphdr.eip_resvl = 0;
+ eiphdr.eip_resvh = 0;
+ }
+ /* prepend Ethernet-in-IP header */
+ M_PREPEND(m, sizeof(struct etherip_header), M_DONTWAIT);
+ if (m && m->m_len < sizeof(struct etherip_header))
+ m = m_pullup(m, sizeof(struct etherip_header));
+ if (m == NULL)
+ return ENOBUFS;
+ bcopy(&eiphdr, mtod(m, struct etherip_header *),
+ sizeof(struct etherip_header));
+ break;
+
+ default:
+#ifdef DEBUG
+ printf("in_gif_output: warning: unknown family %d passed\n",
+ family);
+#endif
+ m_freem(m);
+ return EAFNOSUPPORT;
+ }
+
+ bzero(&iphdr, sizeof(iphdr));
+ iphdr.ip_src = sin_src->sin_addr;
+ /* bidirectional configured tunnel mode */
+ if (sin_dst->sin_addr.s_addr != INADDR_ANY)
+ iphdr.ip_dst = sin_dst->sin_addr;
+ else {
+ m_freem(m);
+ return ENETUNREACH;
+ }
+ iphdr.ip_p = proto;
+ /* version will be set in ip_output() */
+ iphdr.ip_ttl = V_ip_gif_ttl;
+ iphdr.ip_len = m->m_pkthdr.len + sizeof(struct ip);
+ ip_ecn_ingress((ifp->if_flags & IFF_LINK1) ? ECN_ALLOWED : ECN_NOCARE,
+ &iphdr.ip_tos, &tos);
+
+ /* prepend new IP header */
+ len = sizeof(struct ip);
+#ifndef __NO_STRICT_ALIGNMENT
+ if (family == AF_LINK)
+ len += ETHERIP_ALIGN;
+#endif
+ M_PREPEND(m, len, M_DONTWAIT);
+ if (m != NULL && m->m_len < len)
+ m = m_pullup(m, len);
+ if (m == NULL) {
+ printf("ENOBUFS in in_gif_output %d\n", __LINE__);
+ return ENOBUFS;
+ }
+#ifndef __NO_STRICT_ALIGNMENT
+ if (family == AF_LINK) {
+ len = mtod(m, vm_offset_t) & 3;
+ KASSERT(len == 0 || len == ETHERIP_ALIGN,
+ ("in_gif_output: unexpected misalignment"));
+ m->m_data += len;
+ m->m_len -= ETHERIP_ALIGN;
+ }
+#endif
+ bcopy(&iphdr, mtod(m, struct ip *), sizeof(struct ip));
+
+ M_SETFIB(m, sc->gif_fibnum);
+
+ if (dst->sin_family != sin_dst->sin_family ||
+ dst->sin_addr.s_addr != sin_dst->sin_addr.s_addr) {
+ /* cache route doesn't match */
+ bzero(dst, sizeof(*dst));
+ dst->sin_family = sin_dst->sin_family;
+ dst->sin_len = sizeof(struct sockaddr_in);
+ dst->sin_addr = sin_dst->sin_addr;
+ if (sc->gif_ro.ro_rt) {
+ RTFREE(sc->gif_ro.ro_rt);
+ sc->gif_ro.ro_rt = NULL;
+ }
+#if 0
+ GIF2IFP(sc)->if_mtu = GIF_MTU;
+#endif
+ }
+
+ if (sc->gif_ro.ro_rt == NULL) {
+ in_rtalloc_ign(&sc->gif_ro, 0, sc->gif_fibnum);
+ if (sc->gif_ro.ro_rt == NULL) {
+ m_freem(m);
+ return ENETUNREACH;
+ }
+
+ /* if it constitutes infinite encapsulation, punt. */
+ if (sc->gif_ro.ro_rt->rt_ifp == ifp) {
+ m_freem(m);
+ return ENETUNREACH; /* XXX */
+ }
+#if 0
+ ifp->if_mtu = sc->gif_ro.ro_rt->rt_ifp->if_mtu
+ - sizeof(struct ip);
+#endif
+ }
+
+ error = ip_output(m, NULL, &sc->gif_ro, 0, NULL, NULL);
+
+ if (!(GIF2IFP(sc)->if_flags & IFF_LINK0) &&
+ sc->gif_ro.ro_rt != NULL) {
+ RTFREE(sc->gif_ro.ro_rt);
+ sc->gif_ro.ro_rt = NULL;
+ }
+
+ return (error);
+}
+
+void
+in_gif_input(struct mbuf *m, int off)
+{
+ struct ifnet *gifp = NULL;
+ struct gif_softc *sc;
+ struct ip *ip;
+ int af;
+ u_int8_t otos;
+ int proto;
+
+ ip = mtod(m, struct ip *);
+ proto = ip->ip_p;
+
+ sc = (struct gif_softc *)encap_getarg(m);
+ if (sc == NULL) {
+ m_freem(m);
+ KMOD_IPSTAT_INC(ips_nogif);
+ return;
+ }
+
+ gifp = GIF2IFP(sc);
+ if (gifp == NULL || (gifp->if_flags & IFF_UP) == 0) {
+ m_freem(m);
+ KMOD_IPSTAT_INC(ips_nogif);
+ return;
+ }
+
+ otos = ip->ip_tos;
+ m_adj(m, off);
+
+ switch (proto) {
+#ifdef INET
+ case IPPROTO_IPV4:
+ {
+ struct ip *ip;
+ af = AF_INET;
+ if (m->m_len < sizeof(*ip)) {
+ m = m_pullup(m, sizeof(*ip));
+ if (!m)
+ return;
+ }
+ ip = mtod(m, struct ip *);
+ if (ip_ecn_egress((gifp->if_flags & IFF_LINK1) ?
+ ECN_ALLOWED : ECN_NOCARE,
+ &otos, &ip->ip_tos) == 0) {
+ m_freem(m);
+ return;
+ }
+ break;
+ }
+#endif
+#ifdef INET6
+ case IPPROTO_IPV6:
+ {
+ struct ip6_hdr *ip6;
+ u_int8_t itos, oitos;
+
+ af = AF_INET6;
+ if (m->m_len < sizeof(*ip6)) {
+ m = m_pullup(m, sizeof(*ip6));
+ if (!m)
+ return;
+ }
+ ip6 = mtod(m, struct ip6_hdr *);
+ itos = oitos = (ntohl(ip6->ip6_flow) >> 20) & 0xff;
+ if (ip_ecn_egress((gifp->if_flags & IFF_LINK1) ?
+ ECN_ALLOWED : ECN_NOCARE,
+ &otos, &itos) == 0) {
+ m_freem(m);
+ return;
+ }
+ if (itos != oitos) {
+ ip6->ip6_flow &= ~htonl(0xff << 20);
+ ip6->ip6_flow |= htonl((u_int32_t)itos << 20);
+ }
+ break;
+ }
+#endif /* INET6 */
+ case IPPROTO_ETHERIP:
+ af = AF_LINK;
+ break;
+
+ default:
+ KMOD_IPSTAT_INC(ips_nogif);
+ m_freem(m);
+ return;
+ }
+ gif_input(m, af, gifp);
+ return;
+}
+
+/*
+ * validate outer address.
+ */
+static int
+gif_validate4(const struct ip *ip, struct gif_softc *sc, struct ifnet *ifp)
+{
+ struct sockaddr_in *src, *dst;
+ struct in_ifaddr *ia4;
+
+ src = (struct sockaddr_in *)sc->gif_psrc;
+ dst = (struct sockaddr_in *)sc->gif_pdst;
+
+ /* check for address match */
+ if (src->sin_addr.s_addr != ip->ip_dst.s_addr ||
+ dst->sin_addr.s_addr != ip->ip_src.s_addr)
+ return 0;
+
+ /* martian filters on outer source - NOT done in ip_input! */
+ if (IN_MULTICAST(ntohl(ip->ip_src.s_addr)))
+ return 0;
+ switch ((ntohl(ip->ip_src.s_addr) & 0xff000000) >> 24) {
+ case 0: case 127: case 255:
+ return 0;
+ }
+
+ /* reject packets with broadcast on source */
+ /* XXXRW: should use hash lists? */
+ IN_IFADDR_RLOCK();
+ TAILQ_FOREACH(ia4, &V_in_ifaddrhead, ia_link) {
+ if ((ia4->ia_ifa.ifa_ifp->if_flags & IFF_BROADCAST) == 0)
+ continue;
+ if (ip->ip_src.s_addr == ia4->ia_broadaddr.sin_addr.s_addr) {
+ IN_IFADDR_RUNLOCK();
+ return 0;
+ }
+ }
+ IN_IFADDR_RUNLOCK();
+
+ /* ingress filters on outer source */
+ if ((GIF2IFP(sc)->if_flags & IFF_LINK2) == 0 && ifp) {
+ struct sockaddr_in sin;
+ struct rtentry *rt;
+
+ bzero(&sin, sizeof(sin));
+ sin.sin_family = AF_INET;
+ sin.sin_len = sizeof(struct sockaddr_in);
+ sin.sin_addr = ip->ip_src;
+ /* XXX MRT check for the interface we would use on output */
+ rt = in_rtalloc1((struct sockaddr *)&sin, 0,
+ 0UL, sc->gif_fibnum);
+ if (!rt || rt->rt_ifp != ifp) {
+#if 0
+ log(LOG_WARNING, "%s: packet from 0x%x dropped "
+ "due to ingress filter\n", if_name(GIF2IFP(sc)),
+ (u_int32_t)ntohl(sin.sin_addr.s_addr));
+#endif
+ if (rt)
+ RTFREE_LOCKED(rt);
+ return 0;
+ }
+ RTFREE_LOCKED(rt);
+ }
+
+ return 32 * 2;
+}
+
+/*
+ * we know that we are in IFF_UP, outer address available, and outer family
+ * matched the physical addr family. see gif_encapcheck().
+ */
+int
+gif_encapcheck4(const struct mbuf *m, int off, int proto, void *arg)
+{
+ struct ip ip;
+ struct gif_softc *sc;
+ struct ifnet *ifp;
+
+ /* sanity check done in caller */
+ sc = (struct gif_softc *)arg;
+
+ /* LINTED const cast */
+ m_copydata(m, 0, sizeof(ip), (caddr_t)&ip);
+ ifp = ((m->m_flags & M_PKTHDR) != 0) ? m->m_pkthdr.rcvif : NULL;
+
+ return gif_validate4(&ip, sc, ifp);
+}
+
+int
+in_gif_attach(struct gif_softc *sc)
+{
+ sc->encap_cookie4 = encap_attach_func(AF_INET, -1, gif_encapcheck,
+ &in_gif_protosw, sc);
+ if (sc->encap_cookie4 == NULL)
+ return EEXIST;
+ return 0;
+}
+
+int
+in_gif_detach(struct gif_softc *sc)
+{
+ int error;
+
+ error = encap_detach(sc->encap_cookie4);
+ if (error == 0)
+ sc->encap_cookie4 = NULL;
+ return error;
+}
diff --git a/rtems/freebsd/netinet/in_gif.h b/rtems/freebsd/netinet/in_gif.h
new file mode 100644
index 00000000..1e42b01f
--- /dev/null
+++ b/rtems/freebsd/netinet/in_gif.h
@@ -0,0 +1,45 @@
+/* $FreeBSD$ */
+/* $KAME: in_gif.h,v 1.5 2000/04/14 08:36:02 itojun Exp $ */
+
+/*-
+ * Copyright (C) 1995, 1996, 1997, and 1998 WIDE Project.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the project nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifndef _NETINET_IN_GIF_HH_
+#define _NETINET_IN_GIF_HH_
+
+#define GIF_TTL 30
+
+struct gif_softc;
+void in_gif_input(struct mbuf *, int);
+int in_gif_output(struct ifnet *, int, struct mbuf *);
+int gif_encapcheck4(const struct mbuf *, int, int, void *);
+int in_gif_attach(struct gif_softc *);
+int in_gif_detach(struct gif_softc *);
+
+#endif /*_NETINET_IN_GIF_HH_*/
diff --git a/rtems/freebsd/netinet/in_mcast.c b/rtems/freebsd/netinet/in_mcast.c
new file mode 100644
index 00000000..6887bc69
--- /dev/null
+++ b/rtems/freebsd/netinet/in_mcast.c
@@ -0,0 +1,2902 @@
+#include <rtems/freebsd/machine/rtems-bsd-config.h>
+
+/*-
+ * Copyright (c) 2007-2009 Bruce Simpson.
+ * Copyright (c) 2005 Robert N. M. Watson.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote
+ * products derived from this software without specific prior written
+ * permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+/*
+ * IPv4 multicast socket, group, and socket option processing module.
+ */
+
+#include <rtems/freebsd/sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <rtems/freebsd/sys/param.h>
+#include <rtems/freebsd/sys/systm.h>
+#include <rtems/freebsd/sys/kernel.h>
+#include <rtems/freebsd/sys/malloc.h>
+#include <rtems/freebsd/sys/mbuf.h>
+#include <rtems/freebsd/sys/protosw.h>
+#include <rtems/freebsd/sys/socket.h>
+#include <rtems/freebsd/sys/socketvar.h>
+#include <rtems/freebsd/sys/protosw.h>
+#include <rtems/freebsd/sys/sysctl.h>
+#include <rtems/freebsd/sys/ktr.h>
+#include <rtems/freebsd/sys/tree.h>
+
+#include <rtems/freebsd/net/if.h>
+#include <rtems/freebsd/net/if_dl.h>
+#include <rtems/freebsd/net/route.h>
+#include <rtems/freebsd/net/vnet.h>
+
+#include <rtems/freebsd/netinet/in.h>
+#include <rtems/freebsd/netinet/in_systm.h>
+#include <rtems/freebsd/netinet/in_pcb.h>
+#include <rtems/freebsd/netinet/in_var.h>
+#include <rtems/freebsd/netinet/ip_var.h>
+#include <rtems/freebsd/netinet/igmp_var.h>
+
+#ifndef KTR_IGMPV3
+#define KTR_IGMPV3 KTR_INET
+#endif
+
+#ifndef __SOCKUNION_DECLARED
+union sockunion {
+ struct sockaddr_storage ss;
+ struct sockaddr sa;
+ struct sockaddr_dl sdl;
+ struct sockaddr_in sin;
+};
+typedef union sockunion sockunion_t;
+#define __SOCKUNION_DECLARED
+#endif /* __SOCKUNION_DECLARED */
+
+static MALLOC_DEFINE(M_INMFILTER, "in_mfilter",
+ "IPv4 multicast PCB-layer source filter");
+static MALLOC_DEFINE(M_IPMADDR, "in_multi", "IPv4 multicast group");
+static MALLOC_DEFINE(M_IPMOPTS, "ip_moptions", "IPv4 multicast options");
+static MALLOC_DEFINE(M_IPMSOURCE, "ip_msource",
+ "IPv4 multicast IGMP-layer source filter");
+
+/*
+ * Locking:
+ * - Lock order is: Giant, INP_WLOCK, IN_MULTI_LOCK, IGMP_LOCK, IF_ADDR_LOCK.
+ * - The IF_ADDR_LOCK is implicitly taken by inm_lookup() earlier, however
+ * it can be taken by code in net/if.c also.
+ * - ip_moptions and in_mfilter are covered by the INP_WLOCK.
+ *
+ * struct in_multi is covered by IN_MULTI_LOCK. There isn't strictly
+ * any need for in_multi itself to be virtualized -- it is bound to an ifp
+ * anyway no matter what happens.
+ */
+struct mtx in_multi_mtx;
+MTX_SYSINIT(in_multi_mtx, &in_multi_mtx, "in_multi_mtx", MTX_DEF);
+
+/*
+ * Functions with non-static linkage defined in this file should be
+ * declared in in_var.h:
+ * imo_multi_filter()
+ * in_addmulti()
+ * in_delmulti()
+ * in_joingroup()
+ * in_joingroup_locked()
+ * in_leavegroup()
+ * in_leavegroup_locked()
+ * and ip_var.h:
+ * inp_freemoptions()
+ * inp_getmoptions()
+ * inp_setmoptions()
+ *
+ * XXX: Both carp and pf need to use the legacy (*,G) KPIs in_addmulti()
+ * and in_delmulti().
+ */
+static void imf_commit(struct in_mfilter *);
+static int imf_get_source(struct in_mfilter *imf,
+ const struct sockaddr_in *psin,
+ struct in_msource **);
+static struct in_msource *
+ imf_graft(struct in_mfilter *, const uint8_t,
+ const struct sockaddr_in *);
+static void imf_leave(struct in_mfilter *);
+static int imf_prune(struct in_mfilter *, const struct sockaddr_in *);
+static void imf_purge(struct in_mfilter *);
+static void imf_rollback(struct in_mfilter *);
+static void imf_reap(struct in_mfilter *);
+static int imo_grow(struct ip_moptions *);
+static size_t imo_match_group(const struct ip_moptions *,
+ const struct ifnet *, const struct sockaddr *);
+static struct in_msource *
+ imo_match_source(const struct ip_moptions *, const size_t,
+ const struct sockaddr *);
+static void ims_merge(struct ip_msource *ims,
+ const struct in_msource *lims, const int rollback);
+static int in_getmulti(struct ifnet *, const struct in_addr *,
+ struct in_multi **);
+static int inm_get_source(struct in_multi *inm, const in_addr_t haddr,
+ const int noalloc, struct ip_msource **pims);
+static int inm_is_ifp_detached(const struct in_multi *);
+static int inm_merge(struct in_multi *, /*const*/ struct in_mfilter *);
+static void inm_purge(struct in_multi *);
+static void inm_reap(struct in_multi *);
+static struct ip_moptions *
+ inp_findmoptions(struct inpcb *);
+static int inp_get_source_filters(struct inpcb *, struct sockopt *);
+static int inp_join_group(struct inpcb *, struct sockopt *);
+static int inp_leave_group(struct inpcb *, struct sockopt *);
+static struct ifnet *
+ inp_lookup_mcast_ifp(const struct inpcb *,
+ const struct sockaddr_in *, const struct in_addr);
+static int inp_block_unblock_source(struct inpcb *, struct sockopt *);
+static int inp_set_multicast_if(struct inpcb *, struct sockopt *);
+static int inp_set_source_filters(struct inpcb *, struct sockopt *);
+static int sysctl_ip_mcast_filters(SYSCTL_HANDLER_ARGS);
+
+SYSCTL_NODE(_net_inet_ip, OID_AUTO, mcast, CTLFLAG_RW, 0, "IPv4 multicast");
+
+static u_long in_mcast_maxgrpsrc = IP_MAX_GROUP_SRC_FILTER;
+SYSCTL_ULONG(_net_inet_ip_mcast, OID_AUTO, maxgrpsrc,
+ CTLFLAG_RW | CTLFLAG_TUN, &in_mcast_maxgrpsrc, 0,
+ "Max source filters per group");
+TUNABLE_ULONG("net.inet.ip.mcast.maxgrpsrc", &in_mcast_maxgrpsrc);
+
+static u_long in_mcast_maxsocksrc = IP_MAX_SOCK_SRC_FILTER;
+SYSCTL_ULONG(_net_inet_ip_mcast, OID_AUTO, maxsocksrc,
+ CTLFLAG_RW | CTLFLAG_TUN, &in_mcast_maxsocksrc, 0,
+ "Max source filters per socket");
+TUNABLE_ULONG("net.inet.ip.mcast.maxsocksrc", &in_mcast_maxsocksrc);
+
+int in_mcast_loop = IP_DEFAULT_MULTICAST_LOOP;
+SYSCTL_INT(_net_inet_ip_mcast, OID_AUTO, loop, CTLFLAG_RW | CTLFLAG_TUN,
+ &in_mcast_loop, 0, "Loopback multicast datagrams by default");
+TUNABLE_INT("net.inet.ip.mcast.loop", &in_mcast_loop);
+
+SYSCTL_NODE(_net_inet_ip_mcast, OID_AUTO, filters,
+ CTLFLAG_RD | CTLFLAG_MPSAFE, sysctl_ip_mcast_filters,
+ "Per-interface stack-wide source filters");
+
+/*
+ * Inline function which wraps assertions for a valid ifp.
+ * The ifnet layer will set the ifma's ifp pointer to NULL if the ifp
+ * is detached.
+ */
+static int __inline
+inm_is_ifp_detached(const struct in_multi *inm)
+{
+ struct ifnet *ifp;
+
+ KASSERT(inm->inm_ifma != NULL, ("%s: no ifma", __func__));
+ ifp = inm->inm_ifma->ifma_ifp;
+ if (ifp != NULL) {
+ /*
+ * Sanity check that netinet's notion of ifp is the
+ * same as net's.
+ */
+ KASSERT(inm->inm_ifp == ifp, ("%s: bad ifp", __func__));
+ }
+
+ return (ifp == NULL);
+}
+
+/*
+ * Initialize an in_mfilter structure to a known state at t0, t1
+ * with an empty source filter list.
+ */
+static __inline void
+imf_init(struct in_mfilter *imf, const int st0, const int st1)
+{
+ memset(imf, 0, sizeof(struct in_mfilter));
+ RB_INIT(&imf->imf_sources);
+ imf->imf_st[0] = st0;
+ imf->imf_st[1] = st1;
+}
+
+/*
+ * Resize the ip_moptions vector to the next power-of-two minus 1.
+ * May be called with locks held; do not sleep.
+ */
+static int
+imo_grow(struct ip_moptions *imo)
+{
+ struct in_multi **nmships;
+ struct in_multi **omships;
+ struct in_mfilter *nmfilters;
+ struct in_mfilter *omfilters;
+ size_t idx;
+ size_t newmax;
+ size_t oldmax;
+
+ nmships = NULL;
+ nmfilters = NULL;
+ omships = imo->imo_membership;
+ omfilters = imo->imo_mfilters;
+ oldmax = imo->imo_max_memberships;
+ newmax = ((oldmax + 1) * 2) - 1;
+
+ if (newmax <= IP_MAX_MEMBERSHIPS) {
+ nmships = (struct in_multi **)realloc(omships,
+ sizeof(struct in_multi *) * newmax, M_IPMOPTS, M_NOWAIT);
+ nmfilters = (struct in_mfilter *)realloc(omfilters,
+ sizeof(struct in_mfilter) * newmax, M_INMFILTER, M_NOWAIT);
+ if (nmships != NULL && nmfilters != NULL) {
+ /* Initialize newly allocated source filter heads. */
+ for (idx = oldmax; idx < newmax; idx++) {
+ imf_init(&nmfilters[idx], MCAST_UNDEFINED,
+ MCAST_EXCLUDE);
+ }
+ imo->imo_max_memberships = newmax;
+ imo->imo_membership = nmships;
+ imo->imo_mfilters = nmfilters;
+ }
+ }
+
+ if (nmships == NULL || nmfilters == NULL) {
+ if (nmships != NULL)
+ free(nmships, M_IPMOPTS);
+ if (nmfilters != NULL)
+ free(nmfilters, M_INMFILTER);
+ return (ETOOMANYREFS);
+ }
+
+ return (0);
+}
+
+/*
+ * Find an IPv4 multicast group entry for this ip_moptions instance
+ * which matches the specified group, and optionally an interface.
+ * Return its index into the array, or -1 if not found.
+ */
+static size_t
+imo_match_group(const struct ip_moptions *imo, const struct ifnet *ifp,
+ const struct sockaddr *group)
+{
+ const struct sockaddr_in *gsin;
+ struct in_multi **pinm;
+ int idx;
+ int nmships;
+
+ gsin = (const struct sockaddr_in *)group;
+
+ /* The imo_membership array may be lazy allocated. */
+ if (imo->imo_membership == NULL || imo->imo_num_memberships == 0)
+ return (-1);
+
+ nmships = imo->imo_num_memberships;
+ pinm = &imo->imo_membership[0];
+ for (idx = 0; idx < nmships; idx++, pinm++) {
+ if (*pinm == NULL)
+ continue;
+ if ((ifp == NULL || ((*pinm)->inm_ifp == ifp)) &&
+ in_hosteq((*pinm)->inm_addr, gsin->sin_addr)) {
+ break;
+ }
+ }
+ if (idx >= nmships)
+ idx = -1;
+
+ return (idx);
+}
+
+/*
+ * Find an IPv4 multicast source entry for this imo which matches
+ * the given group index for this socket, and source address.
+ *
+ * NOTE: This does not check if the entry is in-mode, merely if
+ * it exists, which may not be the desired behaviour.
+ */
+static struct in_msource *
+imo_match_source(const struct ip_moptions *imo, const size_t gidx,
+ const struct sockaddr *src)
+{
+ struct ip_msource find;
+ struct in_mfilter *imf;
+ struct ip_msource *ims;
+ const sockunion_t *psa;
+
+ KASSERT(src->sa_family == AF_INET, ("%s: !AF_INET", __func__));
+ KASSERT(gidx != -1 && gidx < imo->imo_num_memberships,
+ ("%s: invalid index %d\n", __func__, (int)gidx));
+
+ /* The imo_mfilters array may be lazy allocated. */
+ if (imo->imo_mfilters == NULL)
+ return (NULL);
+ imf = &imo->imo_mfilters[gidx];
+
+ /* Source trees are keyed in host byte order. */
+ psa = (const sockunion_t *)src;
+ find.ims_haddr = ntohl(psa->sin.sin_addr.s_addr);
+ ims = RB_FIND(ip_msource_tree, &imf->imf_sources, &find);
+
+ return ((struct in_msource *)ims);
+}
+
+/*
+ * Perform filtering for multicast datagrams on a socket by group and source.
+ *
+ * Returns 0 if a datagram should be allowed through, or various error codes
+ * if the socket was not a member of the group, or the source was muted, etc.
+ */
+int
+imo_multi_filter(const struct ip_moptions *imo, const struct ifnet *ifp,
+ const struct sockaddr *group, const struct sockaddr *src)
+{
+ size_t gidx;
+ struct in_msource *ims;
+ int mode;
+
+ KASSERT(ifp != NULL, ("%s: null ifp", __func__));
+
+ gidx = imo_match_group(imo, ifp, group);
+ if (gidx == -1)
+ return (MCAST_NOTGMEMBER);
+
+ /*
+ * Check if the source was included in an (S,G) join.
+ * Allow reception on exclusive memberships by default,
+ * reject reception on inclusive memberships by default.
+ * Exclude source only if an in-mode exclude filter exists.
+ * Include source only if an in-mode include filter exists.
+ * NOTE: We are comparing group state here at IGMP t1 (now)
+ * with socket-layer t0 (since last downcall).
+ */
+ mode = imo->imo_mfilters[gidx].imf_st[1];
+ ims = imo_match_source(imo, gidx, src);
+
+ if ((ims == NULL && mode == MCAST_INCLUDE) ||
+ (ims != NULL && ims->imsl_st[0] != mode))
+ return (MCAST_NOTSMEMBER);
+
+ return (MCAST_PASS);
+}
+
+/*
+ * Find and return a reference to an in_multi record for (ifp, group),
+ * and bump its reference count.
+ * If one does not exist, try to allocate it, and update link-layer multicast
+ * filters on ifp to listen for group.
+ * Assumes the IN_MULTI lock is held across the call.
+ * Return 0 if successful, otherwise return an appropriate error code.
+ */
+static int
+in_getmulti(struct ifnet *ifp, const struct in_addr *group,
+ struct in_multi **pinm)
+{
+ struct sockaddr_in gsin;
+ struct ifmultiaddr *ifma;
+ struct in_ifinfo *ii;
+ struct in_multi *inm;
+ int error;
+
+ IN_MULTI_LOCK_ASSERT();
+
+ ii = (struct in_ifinfo *)ifp->if_afdata[AF_INET];
+
+ inm = inm_lookup(ifp, *group);
+ if (inm != NULL) {
+ /*
+ * If we already joined this group, just bump the
+ * refcount and return it.
+ */
+ KASSERT(inm->inm_refcount >= 1,
+ ("%s: bad refcount %d", __func__, inm->inm_refcount));
+ ++inm->inm_refcount;
+ *pinm = inm;
+ return (0);
+ }
+
+ memset(&gsin, 0, sizeof(gsin));
+ gsin.sin_family = AF_INET;
+ gsin.sin_len = sizeof(struct sockaddr_in);
+ gsin.sin_addr = *group;
+
+ /*
+ * Check if a link-layer group is already associated
+ * with this network-layer group on the given ifnet.
+ */
+ error = if_addmulti(ifp, (struct sockaddr *)&gsin, &ifma);
+ if (error != 0)
+ return (error);
+
+ /* XXX ifma_protospec must be covered by IF_ADDR_LOCK */
+ IF_ADDR_LOCK(ifp);
+
+ /*
+ * If something other than netinet is occupying the link-layer
+ * group, print a meaningful error message and back out of
+ * the allocation.
+ * Otherwise, bump the refcount on the existing network-layer
+ * group association and return it.
+ */
+ if (ifma->ifma_protospec != NULL) {
+ inm = (struct in_multi *)ifma->ifma_protospec;
+#ifdef INVARIANTS
+ KASSERT(ifma->ifma_addr != NULL, ("%s: no ifma_addr",
+ __func__));
+ KASSERT(ifma->ifma_addr->sa_family == AF_INET,
+ ("%s: ifma not AF_INET", __func__));
+ KASSERT(inm != NULL, ("%s: no ifma_protospec", __func__));
+ if (inm->inm_ifma != ifma || inm->inm_ifp != ifp ||
+ !in_hosteq(inm->inm_addr, *group))
+ panic("%s: ifma %p is inconsistent with %p (%s)",
+ __func__, ifma, inm, inet_ntoa(*group));
+#endif
+ ++inm->inm_refcount;
+ *pinm = inm;
+ IF_ADDR_UNLOCK(ifp);
+ return (0);
+ }
+
+ IF_ADDR_LOCK_ASSERT(ifp);
+
+ /*
+ * A new in_multi record is needed; allocate and initialize it.
+ * We DO NOT perform an IGMP join as the in_ layer may need to
+ * push an initial source list down to IGMP to support SSM.
+ *
+ * The initial source filter state is INCLUDE, {} as per the RFC.
+ */
+ inm = malloc(sizeof(*inm), M_IPMADDR, M_NOWAIT | M_ZERO);
+ if (inm == NULL) {
+ if_delmulti_ifma(ifma);
+ IF_ADDR_UNLOCK(ifp);
+ return (ENOMEM);
+ }
+ inm->inm_addr = *group;
+ inm->inm_ifp = ifp;
+ inm->inm_igi = ii->ii_igmp;
+ inm->inm_ifma = ifma;
+ inm->inm_refcount = 1;
+ inm->inm_state = IGMP_NOT_MEMBER;
+
+ /*
+ * Pending state-changes per group are subject to a bounds check.
+ */
+ IFQ_SET_MAXLEN(&inm->inm_scq, IGMP_MAX_STATE_CHANGES);
+
+ inm->inm_st[0].iss_fmode = MCAST_UNDEFINED;
+ inm->inm_st[1].iss_fmode = MCAST_UNDEFINED;
+ RB_INIT(&inm->inm_srcs);
+
+ ifma->ifma_protospec = inm;
+
+ *pinm = inm;
+
+ IF_ADDR_UNLOCK(ifp);
+ return (0);
+}
+
+/*
+ * Drop a reference to an in_multi record.
+ *
+ * If the refcount drops to 0, free the in_multi record and
+ * delete the underlying link-layer membership.
+ */
+void
+inm_release_locked(struct in_multi *inm)
+{
+ struct ifmultiaddr *ifma;
+
+ IN_MULTI_LOCK_ASSERT();
+
+ CTR2(KTR_IGMPV3, "%s: refcount is %d", __func__, inm->inm_refcount);
+
+ if (--inm->inm_refcount > 0) {
+ CTR2(KTR_IGMPV3, "%s: refcount is now %d", __func__,
+ inm->inm_refcount);
+ return;
+ }
+
+ CTR2(KTR_IGMPV3, "%s: freeing inm %p", __func__, inm);
+
+ ifma = inm->inm_ifma;
+
+ /* XXX this access is not covered by IF_ADDR_LOCK */
+ CTR2(KTR_IGMPV3, "%s: purging ifma %p", __func__, ifma);
+ KASSERT(ifma->ifma_protospec == inm,
+ ("%s: ifma_protospec != inm", __func__));
+ ifma->ifma_protospec = NULL;
+
+ inm_purge(inm);
+
+ free(inm, M_IPMADDR);
+
+ if_delmulti_ifma(ifma);
+}
+
+/*
+ * Clear recorded source entries for a group.
+ * Used by the IGMP code. Caller must hold the IN_MULTI lock.
+ * FIXME: Should reap.
+ */
+void
+inm_clear_recorded(struct in_multi *inm)
+{
+ struct ip_msource *ims;
+
+ IN_MULTI_LOCK_ASSERT();
+
+ RB_FOREACH(ims, ip_msource_tree, &inm->inm_srcs) {
+ if (ims->ims_stp) {
+ ims->ims_stp = 0;
+ --inm->inm_st[1].iss_rec;
+ }
+ }
+ KASSERT(inm->inm_st[1].iss_rec == 0,
+ ("%s: iss_rec %d not 0", __func__, inm->inm_st[1].iss_rec));
+}
+
+/*
+ * Record a source as pending for a Source-Group IGMPv3 query.
+ * This lives here as it modifies the shared tree.
+ *
+ * inm is the group descriptor.
+ * naddr is the address of the source to record in network-byte order.
+ *
+ * If the net.inet.igmp.sgalloc sysctl is non-zero, we will
+ * lazy-allocate a source node in response to an SG query.
+ * Otherwise, no allocation is performed. This saves some memory
+ * with the trade-off that the source will not be reported to the
+ * router if joined in the window between the query response and
+ * the group actually being joined on the local host.
+ *
+ * VIMAGE: XXX: Currently the igmp_sgalloc feature has been removed.
+ * This turns off the allocation of a recorded source entry if
+ * the group has not been joined.
+ *
+ * Return 0 if the source didn't exist or was already marked as recorded.
+ * Return 1 if the source was marked as recorded by this function.
+ * Return <0 if any error occured (negated errno code).
+ */
+int
+inm_record_source(struct in_multi *inm, const in_addr_t naddr)
+{
+ struct ip_msource find;
+ struct ip_msource *ims, *nims;
+
+ IN_MULTI_LOCK_ASSERT();
+
+ find.ims_haddr = ntohl(naddr);
+ ims = RB_FIND(ip_msource_tree, &inm->inm_srcs, &find);
+ if (ims && ims->ims_stp)
+ return (0);
+ if (ims == NULL) {
+ if (inm->inm_nsrc == in_mcast_maxgrpsrc)
+ return (-ENOSPC);
+ nims = malloc(sizeof(struct ip_msource), M_IPMSOURCE,
+ M_NOWAIT | M_ZERO);
+ if (nims == NULL)
+ return (-ENOMEM);
+ nims->ims_haddr = find.ims_haddr;
+ RB_INSERT(ip_msource_tree, &inm->inm_srcs, nims);
+ ++inm->inm_nsrc;
+ ims = nims;
+ }
+
+ /*
+ * Mark the source as recorded and update the recorded
+ * source count.
+ */
+ ++ims->ims_stp;
+ ++inm->inm_st[1].iss_rec;
+
+ return (1);
+}
+
+/*
+ * Return a pointer to an in_msource owned by an in_mfilter,
+ * given its source address.
+ * Lazy-allocate if needed. If this is a new entry its filter state is
+ * undefined at t0.
+ *
+ * imf is the filter set being modified.
+ * haddr is the source address in *host* byte-order.
+ *
+ * SMPng: May be called with locks held; malloc must not block.
+ */
+static int
+imf_get_source(struct in_mfilter *imf, const struct sockaddr_in *psin,
+ struct in_msource **plims)
+{
+ struct ip_msource find;
+ struct ip_msource *ims, *nims;
+ struct in_msource *lims;
+ int error;
+
+ error = 0;
+ ims = NULL;
+ lims = NULL;
+
+ /* key is host byte order */
+ find.ims_haddr = ntohl(psin->sin_addr.s_addr);
+ ims = RB_FIND(ip_msource_tree, &imf->imf_sources, &find);
+ lims = (struct in_msource *)ims;
+ if (lims == NULL) {
+ if (imf->imf_nsrc == in_mcast_maxsocksrc)
+ return (ENOSPC);
+ nims = malloc(sizeof(struct in_msource), M_INMFILTER,
+ M_NOWAIT | M_ZERO);
+ if (nims == NULL)
+ return (ENOMEM);
+ lims = (struct in_msource *)nims;
+ lims->ims_haddr = find.ims_haddr;
+ lims->imsl_st[0] = MCAST_UNDEFINED;
+ RB_INSERT(ip_msource_tree, &imf->imf_sources, nims);
+ ++imf->imf_nsrc;
+ }
+
+ *plims = lims;
+
+ return (error);
+}
+
+/*
+ * Graft a source entry into an existing socket-layer filter set,
+ * maintaining any required invariants and checking allocations.
+ *
+ * The source is marked as being in the new filter mode at t1.
+ *
+ * Return the pointer to the new node, otherwise return NULL.
+ */
+static struct in_msource *
+imf_graft(struct in_mfilter *imf, const uint8_t st1,
+ const struct sockaddr_in *psin)
+{
+ struct ip_msource *nims;
+ struct in_msource *lims;
+
+ nims = malloc(sizeof(struct in_msource), M_INMFILTER,
+ M_NOWAIT | M_ZERO);
+ if (nims == NULL)
+ return (NULL);
+ lims = (struct in_msource *)nims;
+ lims->ims_haddr = ntohl(psin->sin_addr.s_addr);
+ lims->imsl_st[0] = MCAST_UNDEFINED;
+ lims->imsl_st[1] = st1;
+ RB_INSERT(ip_msource_tree, &imf->imf_sources, nims);
+ ++imf->imf_nsrc;
+
+ return (lims);
+}
+
+/*
+ * Prune a source entry from an existing socket-layer filter set,
+ * maintaining any required invariants and checking allocations.
+ *
+ * The source is marked as being left at t1, it is not freed.
+ *
+ * Return 0 if no error occurred, otherwise return an errno value.
+ */
+static int
+imf_prune(struct in_mfilter *imf, const struct sockaddr_in *psin)
+{
+ struct ip_msource find;
+ struct ip_msource *ims;
+ struct in_msource *lims;
+
+ /* key is host byte order */
+ find.ims_haddr = ntohl(psin->sin_addr.s_addr);
+ ims = RB_FIND(ip_msource_tree, &imf->imf_sources, &find);
+ if (ims == NULL)
+ return (ENOENT);
+ lims = (struct in_msource *)ims;
+ lims->imsl_st[1] = MCAST_UNDEFINED;
+ return (0);
+}
+
+/*
+ * Revert socket-layer filter set deltas at t1 to t0 state.
+ */
+static void
+imf_rollback(struct in_mfilter *imf)
+{
+ struct ip_msource *ims, *tims;
+ struct in_msource *lims;
+
+ RB_FOREACH_SAFE(ims, ip_msource_tree, &imf->imf_sources, tims) {
+ lims = (struct in_msource *)ims;
+ if (lims->imsl_st[0] == lims->imsl_st[1]) {
+ /* no change at t1 */
+ continue;
+ } else if (lims->imsl_st[0] != MCAST_UNDEFINED) {
+ /* revert change to existing source at t1 */
+ lims->imsl_st[1] = lims->imsl_st[0];
+ } else {
+ /* revert source added t1 */
+ CTR2(KTR_IGMPV3, "%s: free ims %p", __func__, ims);
+ RB_REMOVE(ip_msource_tree, &imf->imf_sources, ims);
+ free(ims, M_INMFILTER);
+ imf->imf_nsrc--;
+ }
+ }
+ imf->imf_st[1] = imf->imf_st[0];
+}
+
+/*
+ * Mark socket-layer filter set as INCLUDE {} at t1.
+ */
+static void
+imf_leave(struct in_mfilter *imf)
+{
+ struct ip_msource *ims;
+ struct in_msource *lims;
+
+ RB_FOREACH(ims, ip_msource_tree, &imf->imf_sources) {
+ lims = (struct in_msource *)ims;
+ lims->imsl_st[1] = MCAST_UNDEFINED;
+ }
+ imf->imf_st[1] = MCAST_INCLUDE;
+}
+
+/*
+ * Mark socket-layer filter set deltas as committed.
+ */
+static void
+imf_commit(struct in_mfilter *imf)
+{
+ struct ip_msource *ims;
+ struct in_msource *lims;
+
+ RB_FOREACH(ims, ip_msource_tree, &imf->imf_sources) {
+ lims = (struct in_msource *)ims;
+ lims->imsl_st[0] = lims->imsl_st[1];
+ }
+ imf->imf_st[0] = imf->imf_st[1];
+}
+
+/*
+ * Reap unreferenced sources from socket-layer filter set.
+ */
+static void
+imf_reap(struct in_mfilter *imf)
+{
+ struct ip_msource *ims, *tims;
+ struct in_msource *lims;
+
+ RB_FOREACH_SAFE(ims, ip_msource_tree, &imf->imf_sources, tims) {
+ lims = (struct in_msource *)ims;
+ if ((lims->imsl_st[0] == MCAST_UNDEFINED) &&
+ (lims->imsl_st[1] == MCAST_UNDEFINED)) {
+ CTR2(KTR_IGMPV3, "%s: free lims %p", __func__, ims);
+ RB_REMOVE(ip_msource_tree, &imf->imf_sources, ims);
+ free(ims, M_INMFILTER);
+ imf->imf_nsrc--;
+ }
+ }
+}
+
+/*
+ * Purge socket-layer filter set.
+ */
+static void
+imf_purge(struct in_mfilter *imf)
+{
+ struct ip_msource *ims, *tims;
+
+ RB_FOREACH_SAFE(ims, ip_msource_tree, &imf->imf_sources, tims) {
+ CTR2(KTR_IGMPV3, "%s: free ims %p", __func__, ims);
+ RB_REMOVE(ip_msource_tree, &imf->imf_sources, ims);
+ free(ims, M_INMFILTER);
+ imf->imf_nsrc--;
+ }
+ imf->imf_st[0] = imf->imf_st[1] = MCAST_UNDEFINED;
+ KASSERT(RB_EMPTY(&imf->imf_sources),
+ ("%s: imf_sources not empty", __func__));
+}
+
+/*
+ * Look up a source filter entry for a multicast group.
+ *
+ * inm is the group descriptor to work with.
+ * haddr is the host-byte-order IPv4 address to look up.
+ * noalloc may be non-zero to suppress allocation of sources.
+ * *pims will be set to the address of the retrieved or allocated source.
+ *
+ * SMPng: NOTE: may be called with locks held.
+ * Return 0 if successful, otherwise return a non-zero error code.
+ */
+static int
+inm_get_source(struct in_multi *inm, const in_addr_t haddr,
+ const int noalloc, struct ip_msource **pims)
+{
+ struct ip_msource find;
+ struct ip_msource *ims, *nims;
+#ifdef KTR
+ struct in_addr ia;
+#endif
+
+ find.ims_haddr = haddr;
+ ims = RB_FIND(ip_msource_tree, &inm->inm_srcs, &find);
+ if (ims == NULL && !noalloc) {
+ if (inm->inm_nsrc == in_mcast_maxgrpsrc)
+ return (ENOSPC);
+ nims = malloc(sizeof(struct ip_msource), M_IPMSOURCE,
+ M_NOWAIT | M_ZERO);
+ if (nims == NULL)
+ return (ENOMEM);
+ nims->ims_haddr = haddr;
+ RB_INSERT(ip_msource_tree, &inm->inm_srcs, nims);
+ ++inm->inm_nsrc;
+ ims = nims;
+#ifdef KTR
+ ia.s_addr = htonl(haddr);
+ CTR3(KTR_IGMPV3, "%s: allocated %s as %p", __func__,
+ inet_ntoa(ia), ims);
+#endif
+ }
+
+ *pims = ims;
+ return (0);
+}
+
+/*
+ * Merge socket-layer source into IGMP-layer source.
+ * If rollback is non-zero, perform the inverse of the merge.
+ */
+static void
+ims_merge(struct ip_msource *ims, const struct in_msource *lims,
+ const int rollback)
+{
+ int n = rollback ? -1 : 1;
+#ifdef KTR
+ struct in_addr ia;
+
+ ia.s_addr = htonl(ims->ims_haddr);
+#endif
+
+ if (lims->imsl_st[0] == MCAST_EXCLUDE) {
+ CTR3(KTR_IGMPV3, "%s: t1 ex -= %d on %s",
+ __func__, n, inet_ntoa(ia));
+ ims->ims_st[1].ex -= n;
+ } else if (lims->imsl_st[0] == MCAST_INCLUDE) {
+ CTR3(KTR_IGMPV3, "%s: t1 in -= %d on %s",
+ __func__, n, inet_ntoa(ia));
+ ims->ims_st[1].in -= n;
+ }
+
+ if (lims->imsl_st[1] == MCAST_EXCLUDE) {
+ CTR3(KTR_IGMPV3, "%s: t1 ex += %d on %s",
+ __func__, n, inet_ntoa(ia));
+ ims->ims_st[1].ex += n;
+ } else if (lims->imsl_st[1] == MCAST_INCLUDE) {
+ CTR3(KTR_IGMPV3, "%s: t1 in += %d on %s",
+ __func__, n, inet_ntoa(ia));
+ ims->ims_st[1].in += n;
+ }
+}
+
+/*
+ * Atomically update the global in_multi state, when a membership's
+ * filter list is being updated in any way.
+ *
+ * imf is the per-inpcb-membership group filter pointer.
+ * A fake imf may be passed for in-kernel consumers.
+ *
+ * XXX This is a candidate for a set-symmetric-difference style loop
+ * which would eliminate the repeated lookup from root of ims nodes,
+ * as they share the same key space.
+ *
+ * If any error occurred this function will back out of refcounts
+ * and return a non-zero value.
+ */
+static int
+inm_merge(struct in_multi *inm, /*const*/ struct in_mfilter *imf)
+{
+ struct ip_msource *ims, *nims;
+ struct in_msource *lims;
+ int schanged, error;
+ int nsrc0, nsrc1;
+
+ schanged = 0;
+ error = 0;
+ nsrc1 = nsrc0 = 0;
+
+ /*
+ * Update the source filters first, as this may fail.
+ * Maintain count of in-mode filters at t0, t1. These are
+ * used to work out if we transition into ASM mode or not.
+ * Maintain a count of source filters whose state was
+ * actually modified by this operation.
+ */
+ RB_FOREACH(ims, ip_msource_tree, &imf->imf_sources) {
+ lims = (struct in_msource *)ims;
+ if (lims->imsl_st[0] == imf->imf_st[0]) nsrc0++;
+ if (lims->imsl_st[1] == imf->imf_st[1]) nsrc1++;
+ if (lims->imsl_st[0] == lims->imsl_st[1]) continue;
+ error = inm_get_source(inm, lims->ims_haddr, 0, &nims);
+ ++schanged;
+ if (error)
+ break;
+ ims_merge(nims, lims, 0);
+ }
+ if (error) {
+ struct ip_msource *bims;
+
+ RB_FOREACH_REVERSE_FROM(ims, ip_msource_tree, nims) {
+ lims = (struct in_msource *)ims;
+ if (lims->imsl_st[0] == lims->imsl_st[1])
+ continue;
+ (void)inm_get_source(inm, lims->ims_haddr, 1, &bims);
+ if (bims == NULL)
+ continue;
+ ims_merge(bims, lims, 1);
+ }
+ goto out_reap;
+ }
+
+ CTR3(KTR_IGMPV3, "%s: imf filters in-mode: %d at t0, %d at t1",
+ __func__, nsrc0, nsrc1);
+
+ /* Handle transition between INCLUDE {n} and INCLUDE {} on socket. */
+ if (imf->imf_st[0] == imf->imf_st[1] &&
+ imf->imf_st[1] == MCAST_INCLUDE) {
+ if (nsrc1 == 0) {
+ CTR1(KTR_IGMPV3, "%s: --in on inm at t1", __func__);
+ --inm->inm_st[1].iss_in;
+ }
+ }
+
+ /* Handle filter mode transition on socket. */
+ if (imf->imf_st[0] != imf->imf_st[1]) {
+ CTR3(KTR_IGMPV3, "%s: imf transition %d to %d",
+ __func__, imf->imf_st[0], imf->imf_st[1]);
+
+ if (imf->imf_st[0] == MCAST_EXCLUDE) {
+ CTR1(KTR_IGMPV3, "%s: --ex on inm at t1", __func__);
+ --inm->inm_st[1].iss_ex;
+ } else if (imf->imf_st[0] == MCAST_INCLUDE) {
+ CTR1(KTR_IGMPV3, "%s: --in on inm at t1", __func__);
+ --inm->inm_st[1].iss_in;
+ }
+
+ if (imf->imf_st[1] == MCAST_EXCLUDE) {
+ CTR1(KTR_IGMPV3, "%s: ex++ on inm at t1", __func__);
+ inm->inm_st[1].iss_ex++;
+ } else if (imf->imf_st[1] == MCAST_INCLUDE && nsrc1 > 0) {
+ CTR1(KTR_IGMPV3, "%s: in++ on inm at t1", __func__);
+ inm->inm_st[1].iss_in++;
+ }
+ }
+
+ /*
+ * Track inm filter state in terms of listener counts.
+ * If there are any exclusive listeners, stack-wide
+ * membership is exclusive.
+ * Otherwise, if only inclusive listeners, stack-wide is inclusive.
+ * If no listeners remain, state is undefined at t1,
+ * and the IGMP lifecycle for this group should finish.
+ */
+ if (inm->inm_st[1].iss_ex > 0) {
+ CTR1(KTR_IGMPV3, "%s: transition to EX", __func__);
+ inm->inm_st[1].iss_fmode = MCAST_EXCLUDE;
+ } else if (inm->inm_st[1].iss_in > 0) {
+ CTR1(KTR_IGMPV3, "%s: transition to IN", __func__);
+ inm->inm_st[1].iss_fmode = MCAST_INCLUDE;
+ } else {
+ CTR1(KTR_IGMPV3, "%s: transition to UNDEF", __func__);
+ inm->inm_st[1].iss_fmode = MCAST_UNDEFINED;
+ }
+
+ /* Decrement ASM listener count on transition out of ASM mode. */
+ if (imf->imf_st[0] == MCAST_EXCLUDE && nsrc0 == 0) {
+ if ((imf->imf_st[1] != MCAST_EXCLUDE) ||
+ (imf->imf_st[1] == MCAST_EXCLUDE && nsrc1 > 0))
+ CTR1(KTR_IGMPV3, "%s: --asm on inm at t1", __func__);
+ --inm->inm_st[1].iss_asm;
+ }
+
+ /* Increment ASM listener count on transition to ASM mode. */
+ if (imf->imf_st[1] == MCAST_EXCLUDE && nsrc1 == 0) {
+ CTR1(KTR_IGMPV3, "%s: asm++ on inm at t1", __func__);
+ inm->inm_st[1].iss_asm++;
+ }
+
+ CTR3(KTR_IGMPV3, "%s: merged imf %p to inm %p", __func__, imf, inm);
+ inm_print(inm);
+
+out_reap:
+ if (schanged > 0) {
+ CTR1(KTR_IGMPV3, "%s: sources changed; reaping", __func__);
+ inm_reap(inm);
+ }
+ return (error);
+}
+
+/*
+ * Mark an in_multi's filter set deltas as committed.
+ * Called by IGMP after a state change has been enqueued.
+ */
+void
+inm_commit(struct in_multi *inm)
+{
+ struct ip_msource *ims;
+
+ CTR2(KTR_IGMPV3, "%s: commit inm %p", __func__, inm);
+ CTR1(KTR_IGMPV3, "%s: pre commit:", __func__);
+ inm_print(inm);
+
+ RB_FOREACH(ims, ip_msource_tree, &inm->inm_srcs) {
+ ims->ims_st[0] = ims->ims_st[1];
+ }
+ inm->inm_st[0] = inm->inm_st[1];
+}
+
+/*
+ * Reap unreferenced nodes from an in_multi's filter set.
+ */
+static void
+inm_reap(struct in_multi *inm)
+{
+ struct ip_msource *ims, *tims;
+
+ RB_FOREACH_SAFE(ims, ip_msource_tree, &inm->inm_srcs, tims) {
+ if (ims->ims_st[0].ex > 0 || ims->ims_st[0].in > 0 ||
+ ims->ims_st[1].ex > 0 || ims->ims_st[1].in > 0 ||
+ ims->ims_stp != 0)
+ continue;
+ CTR2(KTR_IGMPV3, "%s: free ims %p", __func__, ims);
+ RB_REMOVE(ip_msource_tree, &inm->inm_srcs, ims);
+ free(ims, M_IPMSOURCE);
+ inm->inm_nsrc--;
+ }
+}
+
+/*
+ * Purge all source nodes from an in_multi's filter set.
+ */
+static void
+inm_purge(struct in_multi *inm)
+{
+ struct ip_msource *ims, *tims;
+
+ RB_FOREACH_SAFE(ims, ip_msource_tree, &inm->inm_srcs, tims) {
+ CTR2(KTR_IGMPV3, "%s: free ims %p", __func__, ims);
+ RB_REMOVE(ip_msource_tree, &inm->inm_srcs, ims);
+ free(ims, M_IPMSOURCE);
+ inm->inm_nsrc--;
+ }
+}
+
+/*
+ * Join a multicast group; unlocked entry point.
+ *
+ * SMPng: XXX: in_joingroup() is called from in_control() when Giant
+ * is not held. Fortunately, ifp is unlikely to have been detached
+ * at this point, so we assume it's OK to recurse.
+ */
+int
+in_joingroup(struct ifnet *ifp, const struct in_addr *gina,
+ /*const*/ struct in_mfilter *imf, struct in_multi **pinm)
+{
+ int error;
+
+ IN_MULTI_LOCK();
+ error = in_joingroup_locked(ifp, gina, imf, pinm);
+ IN_MULTI_UNLOCK();
+
+ return (error);
+}
+
+/*
+ * Join a multicast group; real entry point.
+ *
+ * Only preserves atomicity at inm level.
+ * NOTE: imf argument cannot be const due to sys/tree.h limitations.
+ *
+ * If the IGMP downcall fails, the group is not joined, and an error
+ * code is returned.
+ */
+int
+in_joingroup_locked(struct ifnet *ifp, const struct in_addr *gina,
+ /*const*/ struct in_mfilter *imf, struct in_multi **pinm)
+{
+ struct in_mfilter timf;
+ struct in_multi *inm;
+ int error;
+
+ IN_MULTI_LOCK_ASSERT();
+
+ CTR4(KTR_IGMPV3, "%s: join %s on %p(%s))", __func__,
+ inet_ntoa(*gina), ifp, ifp->if_xname);
+
+ error = 0;
+ inm = NULL;
+
+ /*
+ * If no imf was specified (i.e. kernel consumer),
+ * fake one up and assume it is an ASM join.
+ */
+ if (imf == NULL) {
+ imf_init(&timf, MCAST_UNDEFINED, MCAST_EXCLUDE);
+ imf = &timf;
+ }
+
+ error = in_getmulti(ifp, gina, &inm);
+ if (error) {
+ CTR1(KTR_IGMPV3, "%s: in_getmulti() failure", __func__);
+ return (error);
+ }
+
+ CTR1(KTR_IGMPV3, "%s: merge inm state", __func__);
+ error = inm_merge(inm, imf);
+ if (error) {
+ CTR1(KTR_IGMPV3, "%s: failed to merge inm state", __func__);
+ goto out_inm_release;
+ }
+
+ CTR1(KTR_IGMPV3, "%s: doing igmp downcall", __func__);
+ error = igmp_change_state(inm);
+ if (error) {
+ CTR1(KTR_IGMPV3, "%s: failed to update source", __func__);
+ goto out_inm_release;
+ }
+
+out_inm_release:
+ if (error) {
+ CTR2(KTR_IGMPV3, "%s: dropping ref on %p", __func__, inm);
+ inm_release_locked(inm);
+ } else {
+ *pinm = inm;
+ }
+
+ return (error);
+}
+
+/*
+ * Leave a multicast group; unlocked entry point.
+ */
+int
+in_leavegroup(struct in_multi *inm, /*const*/ struct in_mfilter *imf)
+{
+ struct ifnet *ifp;
+ int error;
+
+ ifp = inm->inm_ifp;
+
+ IN_MULTI_LOCK();
+ error = in_leavegroup_locked(inm, imf);
+ IN_MULTI_UNLOCK();
+
+ return (error);
+}
+
+/*
+ * Leave a multicast group; real entry point.
+ * All source filters will be expunged.
+ *
+ * Only preserves atomicity at inm level.
+ *
+ * Holding the write lock for the INP which contains imf
+ * is highly advisable. We can't assert for it as imf does not
+ * contain a back-pointer to the owning inp.
+ *
+ * Note: This is not the same as inm_release(*) as this function also
+ * makes a state change downcall into IGMP.
+ */
+int
+in_leavegroup_locked(struct in_multi *inm, /*const*/ struct in_mfilter *imf)
+{
+ struct in_mfilter timf;
+ int error;
+
+ error = 0;
+
+ IN_MULTI_LOCK_ASSERT();
+
+ CTR5(KTR_IGMPV3, "%s: leave inm %p, %s/%s, imf %p", __func__,
+ inm, inet_ntoa(inm->inm_addr),
+ (inm_is_ifp_detached(inm) ? "null" : inm->inm_ifp->if_xname),
+ imf);
+
+ /*
+ * If no imf was specified (i.e. kernel consumer),
+ * fake one up and assume it is an ASM join.
+ */
+ if (imf == NULL) {
+ imf_init(&timf, MCAST_EXCLUDE, MCAST_UNDEFINED);
+ imf = &timf;
+ }
+
+ /*
+ * Begin state merge transaction at IGMP layer.
+ *
+ * As this particular invocation should not cause any memory
+ * to be allocated, and there is no opportunity to roll back
+ * the transaction, it MUST NOT fail.
+ */
+ CTR1(KTR_IGMPV3, "%s: merge inm state", __func__);
+ error = inm_merge(inm, imf);
+ KASSERT(error == 0, ("%s: failed to merge inm state", __func__));
+
+ CTR1(KTR_IGMPV3, "%s: doing igmp downcall", __func__);
+ error = igmp_change_state(inm);
+ if (error)
+ CTR1(KTR_IGMPV3, "%s: failed igmp downcall", __func__);
+
+ CTR2(KTR_IGMPV3, "%s: dropping ref on %p", __func__, inm);
+ inm_release_locked(inm);
+
+ return (error);
+}
+
+/*#ifndef BURN_BRIDGES*/
+/*
+ * Join an IPv4 multicast group in (*,G) exclusive mode.
+ * The group must be a 224.0.0.0/24 link-scope group.
+ * This KPI is for legacy kernel consumers only.
+ */
+struct in_multi *
+in_addmulti(struct in_addr *ap, struct ifnet *ifp)
+{
+ struct in_multi *pinm;
+ int error;
+
+ KASSERT(IN_LOCAL_GROUP(ntohl(ap->s_addr)),
+ ("%s: %s not in 224.0.0.0/24", __func__, inet_ntoa(*ap)));
+
+ error = in_joingroup(ifp, ap, NULL, &pinm);
+ if (error != 0)
+ pinm = NULL;
+
+ return (pinm);
+}
+
+/*
+ * Leave an IPv4 multicast group, assumed to be in exclusive (*,G) mode.
+ * This KPI is for legacy kernel consumers only.
+ */
+void
+in_delmulti(struct in_multi *inm)
+{
+
+ (void)in_leavegroup(inm, NULL);
+}
+/*#endif*/
+
+/*
+ * Block or unblock an ASM multicast source on an inpcb.
+ * This implements the delta-based API described in RFC 3678.
+ *
+ * The delta-based API applies only to exclusive-mode memberships.
+ * An IGMP downcall will be performed.
+ *
+ * SMPng: NOTE: Must take Giant as a join may create a new ifma.
+ *
+ * Return 0 if successful, otherwise return an appropriate error code.
+ */
+static int
+inp_block_unblock_source(struct inpcb *inp, struct sockopt *sopt)
+{
+ struct group_source_req gsr;
+ sockunion_t *gsa, *ssa;
+ struct ifnet *ifp;
+ struct in_mfilter *imf;
+ struct ip_moptions *imo;
+ struct in_msource *ims;
+ struct in_multi *inm;
+ size_t idx;
+ uint16_t fmode;
+ int error, doblock;
+
+ ifp = NULL;
+ error = 0;
+ doblock = 0;
+
+ memset(&gsr, 0, sizeof(struct group_source_req));
+ gsa = (sockunion_t *)&gsr.gsr_group;
+ ssa = (sockunion_t *)&gsr.gsr_source;
+
+ switch (sopt->sopt_name) {
+ case IP_BLOCK_SOURCE:
+ case IP_UNBLOCK_SOURCE: {
+ struct ip_mreq_source mreqs;
+
+ error = sooptcopyin(sopt, &mreqs,
+ sizeof(struct ip_mreq_source),
+ sizeof(struct ip_mreq_source));
+ if (error)
+ return (error);
+
+ gsa->sin.sin_family = AF_INET;
+ gsa->sin.sin_len = sizeof(struct sockaddr_in);
+ gsa->sin.sin_addr = mreqs.imr_multiaddr;
+
+ ssa->sin.sin_family = AF_INET;
+ ssa->sin.sin_len = sizeof(struct sockaddr_in);
+ ssa->sin.sin_addr = mreqs.imr_sourceaddr;
+
+ if (!in_nullhost(mreqs.imr_interface))
+ INADDR_TO_IFP(mreqs.imr_interface, ifp);
+
+ if (sopt->sopt_name == IP_BLOCK_SOURCE)
+ doblock = 1;
+
+ CTR3(KTR_IGMPV3, "%s: imr_interface = %s, ifp = %p",
+ __func__, inet_ntoa(mreqs.imr_interface), ifp);
+ break;
+ }
+
+ case MCAST_BLOCK_SOURCE:
+ case MCAST_UNBLOCK_SOURCE:
+ error = sooptcopyin(sopt, &gsr,
+ sizeof(struct group_source_req),
+ sizeof(struct group_source_req));
+ if (error)
+ return (error);
+
+ if (gsa->sin.sin_family != AF_INET ||
+ gsa->sin.sin_len != sizeof(struct sockaddr_in))
+ return (EINVAL);
+
+ if (ssa->sin.sin_family != AF_INET ||
+ ssa->sin.sin_len != sizeof(struct sockaddr_in))
+ return (EINVAL);
+
+ if (gsr.gsr_interface == 0 || V_if_index < gsr.gsr_interface)
+ return (EADDRNOTAVAIL);
+
+ ifp = ifnet_byindex(gsr.gsr_interface);
+
+ if (sopt->sopt_name == MCAST_BLOCK_SOURCE)
+ doblock = 1;
+ break;
+
+ default:
+ CTR2(KTR_IGMPV3, "%s: unknown sopt_name %d",
+ __func__, sopt->sopt_name);
+ return (EOPNOTSUPP);
+ break;
+ }
+
+ if (!IN_MULTICAST(ntohl(gsa->sin.sin_addr.s_addr)))
+ return (EINVAL);
+
+ /*
+ * Check if we are actually a member of this group.
+ */
+ imo = inp_findmoptions(inp);
+ idx = imo_match_group(imo, ifp, &gsa->sa);
+ if (idx == -1 || imo->imo_mfilters == NULL) {
+ error = EADDRNOTAVAIL;
+ goto out_inp_locked;
+ }
+
+ KASSERT(imo->imo_mfilters != NULL,
+ ("%s: imo_mfilters not allocated", __func__));
+ imf = &imo->imo_mfilters[idx];
+ inm = imo->imo_membership[idx];
+
+ /*
+ * Attempting to use the delta-based API on an
+ * non exclusive-mode membership is an error.
+ */
+ fmode = imf->imf_st[0];
+ if (fmode != MCAST_EXCLUDE) {
+ error = EINVAL;
+ goto out_inp_locked;
+ }
+
+ /*
+ * Deal with error cases up-front:
+ * Asked to block, but already blocked; or
+ * Asked to unblock, but nothing to unblock.
+ * If adding a new block entry, allocate it.
+ */
+ ims = imo_match_source(imo, idx, &ssa->sa);
+ if ((ims != NULL && doblock) || (ims == NULL && !doblock)) {
+ CTR3(KTR_IGMPV3, "%s: source %s %spresent", __func__,
+ inet_ntoa(ssa->sin.sin_addr), doblock ? "" : "not ");
+ error = EADDRNOTAVAIL;
+ goto out_inp_locked;
+ }
+
+ INP_WLOCK_ASSERT(inp);
+
+ /*
+ * Begin state merge transaction at socket layer.
+ */
+ if (doblock) {
+ CTR2(KTR_IGMPV3, "%s: %s source", __func__, "block");
+ ims = imf_graft(imf, fmode, &ssa->sin);
+ if (ims == NULL)
+ error = ENOMEM;
+ } else {
+ CTR2(KTR_IGMPV3, "%s: %s source", __func__, "allow");
+ error = imf_prune(imf, &ssa->sin);
+ }
+
+ if (error) {
+ CTR1(KTR_IGMPV3, "%s: merge imf state failed", __func__);
+ goto out_imf_rollback;
+ }
+
+ /*
+ * Begin state merge transaction at IGMP layer.
+ */
+ IN_MULTI_LOCK();
+
+ CTR1(KTR_IGMPV3, "%s: merge inm state", __func__);
+ error = inm_merge(inm, imf);
+ if (error) {
+ CTR1(KTR_IGMPV3, "%s: failed to merge inm state", __func__);
+ goto out_imf_rollback;
+ }
+
+ CTR1(KTR_IGMPV3, "%s: doing igmp downcall", __func__);
+ error = igmp_change_state(inm);
+ if (error)
+ CTR1(KTR_IGMPV3, "%s: failed igmp downcall", __func__);
+
+ IN_MULTI_UNLOCK();
+
+out_imf_rollback:
+ if (error)
+ imf_rollback(imf);
+ else
+ imf_commit(imf);
+
+ imf_reap(imf);
+
+out_inp_locked:
+ INP_WUNLOCK(inp);
+ return (error);
+}
+
+/*
+ * Given an inpcb, return its multicast options structure pointer. Accepts
+ * an unlocked inpcb pointer, but will return it locked. May sleep.
+ *
+ * SMPng: NOTE: Potentially calls malloc(M_WAITOK) with Giant held.
+ * SMPng: NOTE: Returns with the INP write lock held.
+ */
+static struct ip_moptions *
+inp_findmoptions(struct inpcb *inp)
+{
+ struct ip_moptions *imo;
+ struct in_multi **immp;
+ struct in_mfilter *imfp;
+ size_t idx;
+
+ INP_WLOCK(inp);
+ if (inp->inp_moptions != NULL)
+ return (inp->inp_moptions);
+
+ INP_WUNLOCK(inp);
+
+ imo = malloc(sizeof(*imo), M_IPMOPTS, M_WAITOK);
+ immp = malloc(sizeof(*immp) * IP_MIN_MEMBERSHIPS, M_IPMOPTS,
+ M_WAITOK | M_ZERO);
+ imfp = malloc(sizeof(struct in_mfilter) * IP_MIN_MEMBERSHIPS,
+ M_INMFILTER, M_WAITOK);
+
+ imo->imo_multicast_ifp = NULL;
+ imo->imo_multicast_addr.s_addr = INADDR_ANY;
+ imo->imo_multicast_vif = -1;
+ imo->imo_multicast_ttl = IP_DEFAULT_MULTICAST_TTL;
+ imo->imo_multicast_loop = in_mcast_loop;
+ imo->imo_num_memberships = 0;
+ imo->imo_max_memberships = IP_MIN_MEMBERSHIPS;
+ imo->imo_membership = immp;
+
+ /* Initialize per-group source filters. */
+ for (idx = 0; idx < IP_MIN_MEMBERSHIPS; idx++)
+ imf_init(&imfp[idx], MCAST_UNDEFINED, MCAST_EXCLUDE);
+ imo->imo_mfilters = imfp;
+
+ INP_WLOCK(inp);
+ if (inp->inp_moptions != NULL) {
+ free(imfp, M_INMFILTER);
+ free(immp, M_IPMOPTS);
+ free(imo, M_IPMOPTS);
+ return (inp->inp_moptions);
+ }
+ inp->inp_moptions = imo;
+ return (imo);
+}
+
+/*
+ * Discard the IP multicast options (and source filters).
+ *
+ * SMPng: NOTE: assumes INP write lock is held.
+ */
+void
+inp_freemoptions(struct ip_moptions *imo)
+{
+ struct in_mfilter *imf;
+ size_t idx, nmships;
+
+ KASSERT(imo != NULL, ("%s: ip_moptions is NULL", __func__));
+
+ nmships = imo->imo_num_memberships;
+ for (idx = 0; idx < nmships; ++idx) {
+ imf = imo->imo_mfilters ? &imo->imo_mfilters[idx] : NULL;
+ if (imf)
+ imf_leave(imf);
+ (void)in_leavegroup(imo->imo_membership[idx], imf);
+ if (imf)
+ imf_purge(imf);
+ }
+
+ if (imo->imo_mfilters)
+ free(imo->imo_mfilters, M_INMFILTER);
+ free(imo->imo_membership, M_IPMOPTS);
+ free(imo, M_IPMOPTS);
+}
+
+/*
+ * Atomically get source filters on a socket for an IPv4 multicast group.
+ * Called with INP lock held; returns with lock released.
+ */
+static int
+inp_get_source_filters(struct inpcb *inp, struct sockopt *sopt)
+{
+ struct __msfilterreq msfr;
+ sockunion_t *gsa;
+ struct ifnet *ifp;
+ struct ip_moptions *imo;
+ struct in_mfilter *imf;
+ struct ip_msource *ims;
+ struct in_msource *lims;
+ struct sockaddr_in *psin;
+ struct sockaddr_storage *ptss;
+ struct sockaddr_storage *tss;
+ int error;
+ size_t idx, nsrcs, ncsrcs;
+
+ INP_WLOCK_ASSERT(inp);
+
+ imo = inp->inp_moptions;
+ KASSERT(imo != NULL, ("%s: null ip_moptions", __func__));
+
+ INP_WUNLOCK(inp);
+
+ error = sooptcopyin(sopt, &msfr, sizeof(struct __msfilterreq),
+ sizeof(struct __msfilterreq));
+ if (error)
+ return (error);
+
+ if (msfr.msfr_ifindex == 0 || V_if_index < msfr.msfr_ifindex)
+ return (EINVAL);
+
+ ifp = ifnet_byindex(msfr.msfr_ifindex);
+ if (ifp == NULL)
+ return (EINVAL);
+
+ INP_WLOCK(inp);
+
+ /*
+ * Lookup group on the socket.
+ */
+ gsa = (sockunion_t *)&msfr.msfr_group;
+ idx = imo_match_group(imo, ifp, &gsa->sa);
+ if (idx == -1 || imo->imo_mfilters == NULL) {
+ INP_WUNLOCK(inp);
+ return (EADDRNOTAVAIL);
+ }
+ imf = &imo->imo_mfilters[idx];
+
+ /*
+ * Ignore memberships which are in limbo.
+ */
+ if (imf->imf_st[1] == MCAST_UNDEFINED) {
+ INP_WUNLOCK(inp);
+ return (EAGAIN);
+ }
+ msfr.msfr_fmode = imf->imf_st[1];
+
+ /*
+ * If the user specified a buffer, copy out the source filter
+ * entries to userland gracefully.
+ * We only copy out the number of entries which userland
+ * has asked for, but we always tell userland how big the
+ * buffer really needs to be.
+ */
+ tss = NULL;
+ if (msfr.msfr_srcs != NULL && msfr.msfr_nsrcs > 0) {
+ tss = malloc(sizeof(struct sockaddr_storage) * msfr.msfr_nsrcs,
+ M_TEMP, M_NOWAIT | M_ZERO);
+ if (tss == NULL) {
+ INP_WUNLOCK(inp);
+ return (ENOBUFS);
+ }
+ }
+
+ /*
+ * Count number of sources in-mode at t0.
+ * If buffer space exists and remains, copy out source entries.
+ */
+ nsrcs = msfr.msfr_nsrcs;
+ ncsrcs = 0;
+ ptss = tss;
+ RB_FOREACH(ims, ip_msource_tree, &imf->imf_sources) {
+ lims = (struct in_msource *)ims;
+ if (lims->imsl_st[0] == MCAST_UNDEFINED ||
+ lims->imsl_st[0] != imf->imf_st[0])
+ continue;
+ ++ncsrcs;
+ if (tss != NULL && nsrcs > 0) {
+ psin = (struct sockaddr_in *)ptss;
+ psin->sin_family = AF_INET;
+ psin->sin_len = sizeof(struct sockaddr_in);
+ psin->sin_addr.s_addr = htonl(lims->ims_haddr);
+ psin->sin_port = 0;
+ ++ptss;
+ --nsrcs;
+ }
+ }
+
+ INP_WUNLOCK(inp);
+
+ if (tss != NULL) {
+ error = copyout(tss, msfr.msfr_srcs,
+ sizeof(struct sockaddr_storage) * msfr.msfr_nsrcs);
+ free(tss, M_TEMP);
+ if (error)
+ return (error);
+ }
+
+ msfr.msfr_nsrcs = ncsrcs;
+ error = sooptcopyout(sopt, &msfr, sizeof(struct __msfilterreq));
+
+ return (error);
+}
+
+/*
+ * Return the IP multicast options in response to user getsockopt().
+ */
+int
+inp_getmoptions(struct inpcb *inp, struct sockopt *sopt)
+{
+ struct ip_mreqn mreqn;
+ struct ip_moptions *imo;
+ struct ifnet *ifp;
+ struct in_ifaddr *ia;
+ int error, optval;
+ u_char coptval;
+
+ INP_WLOCK(inp);
+ imo = inp->inp_moptions;
+ /*
+ * If socket is neither of type SOCK_RAW or SOCK_DGRAM,
+ * or is a divert socket, reject it.
+ */
+ if (inp->inp_socket->so_proto->pr_protocol == IPPROTO_DIVERT ||
+ (inp->inp_socket->so_proto->pr_type != SOCK_RAW &&
+ inp->inp_socket->so_proto->pr_type != SOCK_DGRAM)) {
+ INP_WUNLOCK(inp);
+ return (EOPNOTSUPP);
+ }
+
+ error = 0;
+ switch (sopt->sopt_name) {
+ case IP_MULTICAST_VIF:
+ if (imo != NULL)
+ optval = imo->imo_multicast_vif;
+ else
+ optval = -1;
+ INP_WUNLOCK(inp);
+ error = sooptcopyout(sopt, &optval, sizeof(int));
+ break;
+
+ case IP_MULTICAST_IF:
+ memset(&mreqn, 0, sizeof(struct ip_mreqn));
+ if (imo != NULL) {
+ ifp = imo->imo_multicast_ifp;
+ if (!in_nullhost(imo->imo_multicast_addr)) {
+ mreqn.imr_address = imo->imo_multicast_addr;
+ } else if (ifp != NULL) {
+ mreqn.imr_ifindex = ifp->if_index;
+ IFP_TO_IA(ifp, ia);
+ if (ia != NULL) {
+ mreqn.imr_address =
+ IA_SIN(ia)->sin_addr;
+ ifa_free(&ia->ia_ifa);
+ }
+ }
+ }
+ INP_WUNLOCK(inp);
+ if (sopt->sopt_valsize == sizeof(struct ip_mreqn)) {
+ error = sooptcopyout(sopt, &mreqn,
+ sizeof(struct ip_mreqn));
+ } else {
+ error = sooptcopyout(sopt, &mreqn.imr_address,
+ sizeof(struct in_addr));
+ }
+ break;
+
+ case IP_MULTICAST_TTL:
+ if (imo == 0)
+ optval = coptval = IP_DEFAULT_MULTICAST_TTL;
+ else
+ optval = coptval = imo->imo_multicast_ttl;
+ INP_WUNLOCK(inp);
+ if (sopt->sopt_valsize == sizeof(u_char))
+ error = sooptcopyout(sopt, &coptval, sizeof(u_char));
+ else
+ error = sooptcopyout(sopt, &optval, sizeof(int));
+ break;
+
+ case IP_MULTICAST_LOOP:
+ if (imo == 0)
+ optval = coptval = IP_DEFAULT_MULTICAST_LOOP;
+ else
+ optval = coptval = imo->imo_multicast_loop;
+ INP_WUNLOCK(inp);
+ if (sopt->sopt_valsize == sizeof(u_char))
+ error = sooptcopyout(sopt, &coptval, sizeof(u_char));
+ else
+ error = sooptcopyout(sopt, &optval, sizeof(int));
+ break;
+
+ case IP_MSFILTER:
+ if (imo == NULL) {
+ error = EADDRNOTAVAIL;
+ INP_WUNLOCK(inp);
+ } else {
+ error = inp_get_source_filters(inp, sopt);
+ }
+ break;
+
+ default:
+ INP_WUNLOCK(inp);
+ error = ENOPROTOOPT;
+ break;
+ }
+
+ INP_UNLOCK_ASSERT(inp);
+
+ return (error);
+}
+
+/*
+ * Look up the ifnet to use for a multicast group membership,
+ * given the IPv4 address of an interface, and the IPv4 group address.
+ *
+ * This routine exists to support legacy multicast applications
+ * which do not understand that multicast memberships are scoped to
+ * specific physical links in the networking stack, or which need
+ * to join link-scope groups before IPv4 addresses are configured.
+ *
+ * If inp is non-NULL, use this socket's current FIB number for any
+ * required FIB lookup.
+ * If ina is INADDR_ANY, look up the group address in the unicast FIB,
+ * and use its ifp; usually, this points to the default next-hop.
+ *
+ * If the FIB lookup fails, attempt to use the first non-loopback
+ * interface with multicast capability in the system as a
+ * last resort. The legacy IPv4 ASM API requires that we do
+ * this in order to allow groups to be joined when the routing
+ * table has not yet been populated during boot.
+ *
+ * Returns NULL if no ifp could be found.
+ *
+ * SMPng: TODO: Acquire the appropriate locks for INADDR_TO_IFP.
+ * FUTURE: Implement IPv4 source-address selection.
+ */
+static struct ifnet *
+inp_lookup_mcast_ifp(const struct inpcb *inp,
+ const struct sockaddr_in *gsin, const struct in_addr ina)
+{
+ struct ifnet *ifp;
+
+ KASSERT(gsin->sin_family == AF_INET, ("%s: not AF_INET", __func__));
+ KASSERT(IN_MULTICAST(ntohl(gsin->sin_addr.s_addr)),
+ ("%s: not multicast", __func__));
+
+ ifp = NULL;
+ if (!in_nullhost(ina)) {
+ INADDR_TO_IFP(ina, ifp);
+ } else {
+ struct route ro;
+
+ ro.ro_rt = NULL;
+ memcpy(&ro.ro_dst, gsin, sizeof(struct sockaddr_in));
+ in_rtalloc_ign(&ro, 0, inp ? inp->inp_inc.inc_fibnum : 0);
+ if (ro.ro_rt != NULL) {
+ ifp = ro.ro_rt->rt_ifp;
+ KASSERT(ifp != NULL, ("%s: null ifp", __func__));
+ RTFREE(ro.ro_rt);
+ } else {
+ struct in_ifaddr *ia;
+ struct ifnet *mifp;
+
+ mifp = NULL;
+ IN_IFADDR_RLOCK();
+ TAILQ_FOREACH(ia, &V_in_ifaddrhead, ia_link) {
+ mifp = ia->ia_ifp;
+ if (!(mifp->if_flags & IFF_LOOPBACK) &&
+ (mifp->if_flags & IFF_MULTICAST)) {
+ ifp = mifp;
+ break;
+ }
+ }
+ IN_IFADDR_RUNLOCK();
+ }
+ }
+
+ return (ifp);
+}
+
+/*
+ * Join an IPv4 multicast group, possibly with a source.
+ */
+static int
+inp_join_group(struct inpcb *inp, struct sockopt *sopt)
+{
+ struct group_source_req gsr;
+ sockunion_t *gsa, *ssa;
+ struct ifnet *ifp;
+ struct in_mfilter *imf;
+ struct ip_moptions *imo;
+ struct in_multi *inm;
+ struct in_msource *lims;
+ size_t idx;
+ int error, is_new;
+
+ ifp = NULL;
+ imf = NULL;
+ error = 0;
+ is_new = 0;
+
+ memset(&gsr, 0, sizeof(struct group_source_req));
+ gsa = (sockunion_t *)&gsr.gsr_group;
+ gsa->ss.ss_family = AF_UNSPEC;
+ ssa = (sockunion_t *)&gsr.gsr_source;
+ ssa->ss.ss_family = AF_UNSPEC;
+
+ switch (sopt->sopt_name) {
+ case IP_ADD_MEMBERSHIP:
+ case IP_ADD_SOURCE_MEMBERSHIP: {
+ struct ip_mreq_source mreqs;
+
+ if (sopt->sopt_name == IP_ADD_MEMBERSHIP) {
+ error = sooptcopyin(sopt, &mreqs,
+ sizeof(struct ip_mreq),
+ sizeof(struct ip_mreq));
+ /*
+ * Do argument switcharoo from ip_mreq into
+ * ip_mreq_source to avoid using two instances.
+ */
+ mreqs.imr_interface = mreqs.imr_sourceaddr;
+ mreqs.imr_sourceaddr.s_addr = INADDR_ANY;
+ } else if (sopt->sopt_name == IP_ADD_SOURCE_MEMBERSHIP) {
+ error = sooptcopyin(sopt, &mreqs,
+ sizeof(struct ip_mreq_source),
+ sizeof(struct ip_mreq_source));
+ }
+ if (error)
+ return (error);
+
+ gsa->sin.sin_family = AF_INET;
+ gsa->sin.sin_len = sizeof(struct sockaddr_in);
+ gsa->sin.sin_addr = mreqs.imr_multiaddr;
+
+ if (sopt->sopt_name == IP_ADD_SOURCE_MEMBERSHIP) {
+ ssa->sin.sin_family = AF_INET;
+ ssa->sin.sin_len = sizeof(struct sockaddr_in);
+ ssa->sin.sin_addr = mreqs.imr_sourceaddr;
+ }
+
+ if (!IN_MULTICAST(ntohl(gsa->sin.sin_addr.s_addr)))
+ return (EINVAL);
+
+ ifp = inp_lookup_mcast_ifp(inp, &gsa->sin,
+ mreqs.imr_interface);
+ CTR3(KTR_IGMPV3, "%s: imr_interface = %s, ifp = %p",
+ __func__, inet_ntoa(mreqs.imr_interface), ifp);
+ break;
+ }
+
+ case MCAST_JOIN_GROUP:
+ case MCAST_JOIN_SOURCE_GROUP:
+ if (sopt->sopt_name == MCAST_JOIN_GROUP) {
+ error = sooptcopyin(sopt, &gsr,
+ sizeof(struct group_req),
+ sizeof(struct group_req));
+ } else if (sopt->sopt_name == MCAST_JOIN_SOURCE_GROUP) {
+ error = sooptcopyin(sopt, &gsr,
+ sizeof(struct group_source_req),
+ sizeof(struct group_source_req));
+ }
+ if (error)
+ return (error);
+
+ if (gsa->sin.sin_family != AF_INET ||
+ gsa->sin.sin_len != sizeof(struct sockaddr_in))
+ return (EINVAL);
+
+ /*
+ * Overwrite the port field if present, as the sockaddr
+ * being copied in may be matched with a binary comparison.
+ */
+ gsa->sin.sin_port = 0;
+ if (sopt->sopt_name == MCAST_JOIN_SOURCE_GROUP) {
+ if (ssa->sin.sin_family != AF_INET ||
+ ssa->sin.sin_len != sizeof(struct sockaddr_in))
+ return (EINVAL);
+ ssa->sin.sin_port = 0;
+ }
+
+ if (!IN_MULTICAST(ntohl(gsa->sin.sin_addr.s_addr)))
+ return (EINVAL);
+
+ if (gsr.gsr_interface == 0 || V_if_index < gsr.gsr_interface)
+ return (EADDRNOTAVAIL);
+ ifp = ifnet_byindex(gsr.gsr_interface);
+ break;
+
+ default:
+ CTR2(KTR_IGMPV3, "%s: unknown sopt_name %d",
+ __func__, sopt->sopt_name);
+ return (EOPNOTSUPP);
+ break;
+ }
+
+ if (ifp == NULL || (ifp->if_flags & IFF_MULTICAST) == 0)
+ return (EADDRNOTAVAIL);
+
+ imo = inp_findmoptions(inp);
+ idx = imo_match_group(imo, ifp, &gsa->sa);
+ if (idx == -1) {
+ is_new = 1;
+ } else {
+ inm = imo->imo_membership[idx];
+ imf = &imo->imo_mfilters[idx];
+ if (ssa->ss.ss_family != AF_UNSPEC) {
+ /*
+ * MCAST_JOIN_SOURCE_GROUP on an exclusive membership
+ * is an error. On an existing inclusive membership,
+ * it just adds the source to the filter list.
+ */
+ if (imf->imf_st[1] != MCAST_INCLUDE) {
+ error = EINVAL;
+ goto out_inp_locked;
+ }
+ /* Throw out duplicates. */
+ lims = imo_match_source(imo, idx, &ssa->sa);
+ if (lims != NULL) {
+ error = EADDRNOTAVAIL;
+ goto out_inp_locked;
+ }
+ } else {
+ /*
+ * MCAST_JOIN_GROUP on an existing inclusive
+ * membership is an error; if you want to change
+ * filter mode, you must use the userland API
+ * setsourcefilter().
+ */
+ if (imf->imf_st[1] == MCAST_INCLUDE) {
+ error = EINVAL;
+ goto out_inp_locked;
+ }
+ /*
+ * MCAST_JOIN_GROUP on an existing exclusive
+ * membership is an error; return EADDRINUSE
+ * to preserve 4.4BSD API idempotence, and
+ * avoid tedious detour to code below.
+ * NOTE: This is bending RFC 3678 a bit.
+ */
+ if (imf->imf_st[1] == MCAST_EXCLUDE) {
+ error = EADDRINUSE;
+ goto out_inp_locked;
+ }
+ }
+ }
+
+ /*
+ * Begin state merge transaction at socket layer.
+ */
+ INP_WLOCK_ASSERT(inp);
+
+ if (is_new) {
+ if (imo->imo_num_memberships == imo->imo_max_memberships) {
+ error = imo_grow(imo);
+ if (error)
+ goto out_inp_locked;
+ }
+ /*
+ * Allocate the new slot upfront so we can deal with
+ * grafting the new source filter in same code path
+ * as for join-source on existing membership.
+ */
+ idx = imo->imo_num_memberships;
+ imo->imo_membership[idx] = NULL;
+ imo->imo_num_memberships++;
+ KASSERT(imo->imo_mfilters != NULL,
+ ("%s: imf_mfilters vector was not allocated", __func__));
+ imf = &imo->imo_mfilters[idx];
+ KASSERT(RB_EMPTY(&imf->imf_sources),
+ ("%s: imf_sources not empty", __func__));
+ }
+
+ /*
+ * Graft new source into filter list for this inpcb's
+ * membership of the group. The in_multi may not have
+ * been allocated yet if this is a new membership, however,
+ * the in_mfilter slot will be allocated and must be initialized.
+ */
+ if (ssa->ss.ss_family != AF_UNSPEC) {
+ /* Membership starts in IN mode */
+ if (is_new) {
+ CTR1(KTR_IGMPV3, "%s: new join w/source", __func__);
+ imf_init(imf, MCAST_UNDEFINED, MCAST_INCLUDE);
+ } else {
+ CTR2(KTR_IGMPV3, "%s: %s source", __func__, "allow");
+ }
+ lims = imf_graft(imf, MCAST_INCLUDE, &ssa->sin);
+ if (lims == NULL) {
+ CTR1(KTR_IGMPV3, "%s: merge imf state failed",
+ __func__);
+ error = ENOMEM;
+ goto out_imo_free;
+ }
+ } else {
+ /* No address specified; Membership starts in EX mode */
+ if (is_new) {
+ CTR1(KTR_IGMPV3, "%s: new join w/o source", __func__);
+ imf_init(imf, MCAST_UNDEFINED, MCAST_EXCLUDE);
+ }
+ }
+
+ /*
+ * Begin state merge transaction at IGMP layer.
+ */
+ IN_MULTI_LOCK();
+
+ if (is_new) {
+ error = in_joingroup_locked(ifp, &gsa->sin.sin_addr, imf,
+ &inm);
+ if (error)
+ goto out_imo_free;
+ imo->imo_membership[idx] = inm;
+ } else {
+ CTR1(KTR_IGMPV3, "%s: merge inm state", __func__);
+ error = inm_merge(inm, imf);
+ if (error) {
+ CTR1(KTR_IGMPV3, "%s: failed to merge inm state",
+ __func__);
+ goto out_imf_rollback;
+ }
+ CTR1(KTR_IGMPV3, "%s: doing igmp downcall", __func__);
+ error = igmp_change_state(inm);
+ if (error) {
+ CTR1(KTR_IGMPV3, "%s: failed igmp downcall",
+ __func__);
+ goto out_imf_rollback;
+ }
+ }
+
+ IN_MULTI_UNLOCK();
+
+out_imf_rollback:
+ INP_WLOCK_ASSERT(inp);
+ if (error) {
+ imf_rollback(imf);
+ if (is_new)
+ imf_purge(imf);
+ else
+ imf_reap(imf);
+ } else {
+ imf_commit(imf);
+ }
+
+out_imo_free:
+ if (error && is_new) {
+ imo->imo_membership[idx] = NULL;
+ --imo->imo_num_memberships;
+ }
+
+out_inp_locked:
+ INP_WUNLOCK(inp);
+ return (error);
+}
+
+/*
+ * Leave an IPv4 multicast group on an inpcb, possibly with a source.
+ */
+static int
+inp_leave_group(struct inpcb *inp, struct sockopt *sopt)
+{
+ struct group_source_req gsr;
+ struct ip_mreq_source mreqs;
+ sockunion_t *gsa, *ssa;
+ struct ifnet *ifp;
+ struct in_mfilter *imf;
+ struct ip_moptions *imo;
+ struct in_msource *ims;
+ struct in_multi *inm;
+ size_t idx;
+ int error, is_final;
+
+ ifp = NULL;
+ error = 0;
+ is_final = 1;
+
+ memset(&gsr, 0, sizeof(struct group_source_req));
+ gsa = (sockunion_t *)&gsr.gsr_group;
+ gsa->ss.ss_family = AF_UNSPEC;
+ ssa = (sockunion_t *)&gsr.gsr_source;
+ ssa->ss.ss_family = AF_UNSPEC;
+
+ switch (sopt->sopt_name) {
+ case IP_DROP_MEMBERSHIP:
+ case IP_DROP_SOURCE_MEMBERSHIP:
+ if (sopt->sopt_name == IP_DROP_MEMBERSHIP) {
+ error = sooptcopyin(sopt, &mreqs,
+ sizeof(struct ip_mreq),
+ sizeof(struct ip_mreq));
+ /*
+ * Swap interface and sourceaddr arguments,
+ * as ip_mreq and ip_mreq_source are laid
+ * out differently.
+ */
+ mreqs.imr_interface = mreqs.imr_sourceaddr;
+ mreqs.imr_sourceaddr.s_addr = INADDR_ANY;
+ } else if (sopt->sopt_name == IP_DROP_SOURCE_MEMBERSHIP) {
+ error = sooptcopyin(sopt, &mreqs,
+ sizeof(struct ip_mreq_source),
+ sizeof(struct ip_mreq_source));
+ }
+ if (error)
+ return (error);
+
+ gsa->sin.sin_family = AF_INET;
+ gsa->sin.sin_len = sizeof(struct sockaddr_in);
+ gsa->sin.sin_addr = mreqs.imr_multiaddr;
+
+ if (sopt->sopt_name == IP_DROP_SOURCE_MEMBERSHIP) {
+ ssa->sin.sin_family = AF_INET;
+ ssa->sin.sin_len = sizeof(struct sockaddr_in);
+ ssa->sin.sin_addr = mreqs.imr_sourceaddr;
+ }
+
+ /*
+ * Attempt to look up hinted ifp from interface address.
+ * Fallthrough with null ifp iff lookup fails, to
+ * preserve 4.4BSD mcast API idempotence.
+ * XXX NOTE WELL: The RFC 3678 API is preferred because
+ * using an IPv4 address as a key is racy.
+ */
+ if (!in_nullhost(mreqs.imr_interface))
+ INADDR_TO_IFP(mreqs.imr_interface, ifp);
+
+ CTR3(KTR_IGMPV3, "%s: imr_interface = %s, ifp = %p",
+ __func__, inet_ntoa(mreqs.imr_interface), ifp);
+
+ break;
+
+ case MCAST_LEAVE_GROUP:
+ case MCAST_LEAVE_SOURCE_GROUP:
+ if (sopt->sopt_name == MCAST_LEAVE_GROUP) {
+ error = sooptcopyin(sopt, &gsr,
+ sizeof(struct group_req),
+ sizeof(struct group_req));
+ } else if (sopt->sopt_name == MCAST_LEAVE_SOURCE_GROUP) {
+ error = sooptcopyin(sopt, &gsr,
+ sizeof(struct group_source_req),
+ sizeof(struct group_source_req));
+ }
+ if (error)
+ return (error);
+
+ if (gsa->sin.sin_family != AF_INET ||
+ gsa->sin.sin_len != sizeof(struct sockaddr_in))
+ return (EINVAL);
+
+ if (sopt->sopt_name == MCAST_LEAVE_SOURCE_GROUP) {
+ if (ssa->sin.sin_family != AF_INET ||
+ ssa->sin.sin_len != sizeof(struct sockaddr_in))
+ return (EINVAL);
+ }
+
+ if (gsr.gsr_interface == 0 || V_if_index < gsr.gsr_interface)
+ return (EADDRNOTAVAIL);
+
+ ifp = ifnet_byindex(gsr.gsr_interface);
+
+ if (ifp == NULL)
+ return (EADDRNOTAVAIL);
+ break;
+
+ default:
+ CTR2(KTR_IGMPV3, "%s: unknown sopt_name %d",
+ __func__, sopt->sopt_name);
+ return (EOPNOTSUPP);
+ break;
+ }
+
+ if (!IN_MULTICAST(ntohl(gsa->sin.sin_addr.s_addr)))
+ return (EINVAL);
+
+ /*
+ * Find the membership in the membership array.
+ */
+ imo = inp_findmoptions(inp);
+ idx = imo_match_group(imo, ifp, &gsa->sa);
+ if (idx == -1) {
+ error = EADDRNOTAVAIL;
+ goto out_inp_locked;
+ }
+ inm = imo->imo_membership[idx];
+ imf = &imo->imo_mfilters[idx];
+
+ if (ssa->ss.ss_family != AF_UNSPEC)
+ is_final = 0;
+
+ /*
+ * Begin state merge transaction at socket layer.
+ */
+ INP_WLOCK_ASSERT(inp);
+
+ /*
+ * If we were instructed only to leave a given source, do so.
+ * MCAST_LEAVE_SOURCE_GROUP is only valid for inclusive memberships.
+ */
+ if (is_final) {
+ imf_leave(imf);
+ } else {
+ if (imf->imf_st[0] == MCAST_EXCLUDE) {
+ error = EADDRNOTAVAIL;
+ goto out_inp_locked;
+ }
+ ims = imo_match_source(imo, idx, &ssa->sa);
+ if (ims == NULL) {
+ CTR3(KTR_IGMPV3, "%s: source %s %spresent", __func__,
+ inet_ntoa(ssa->sin.sin_addr), "not ");
+ error = EADDRNOTAVAIL;
+ goto out_inp_locked;
+ }
+ CTR2(KTR_IGMPV3, "%s: %s source", __func__, "block");
+ error = imf_prune(imf, &ssa->sin);
+ if (error) {
+ CTR1(KTR_IGMPV3, "%s: merge imf state failed",
+ __func__);
+ goto out_inp_locked;
+ }
+ }
+
+ /*
+ * Begin state merge transaction at IGMP layer.
+ */
+ IN_MULTI_LOCK();
+
+ if (is_final) {
+ /*
+ * Give up the multicast address record to which
+ * the membership points.
+ */
+ (void)in_leavegroup_locked(inm, imf);
+ } else {
+ CTR1(KTR_IGMPV3, "%s: merge inm state", __func__);
+ error = inm_merge(inm, imf);
+ if (error) {
+ CTR1(KTR_IGMPV3, "%s: failed to merge inm state",
+ __func__);
+ goto out_imf_rollback;
+ }
+
+ CTR1(KTR_IGMPV3, "%s: doing igmp downcall", __func__);
+ error = igmp_change_state(inm);
+ if (error) {
+ CTR1(KTR_IGMPV3, "%s: failed igmp downcall",
+ __func__);
+ }
+ }
+
+ IN_MULTI_UNLOCK();
+
+out_imf_rollback:
+ if (error)
+ imf_rollback(imf);
+ else
+ imf_commit(imf);
+
+ imf_reap(imf);
+
+ if (is_final) {
+ /* Remove the gap in the membership and filter array. */
+ for (++idx; idx < imo->imo_num_memberships; ++idx) {
+ imo->imo_membership[idx-1] = imo->imo_membership[idx];
+ imo->imo_mfilters[idx-1] = imo->imo_mfilters[idx];
+ }
+ imo->imo_num_memberships--;
+ }
+
+out_inp_locked:
+ INP_WUNLOCK(inp);
+ return (error);
+}
+
+/*
+ * Select the interface for transmitting IPv4 multicast datagrams.
+ *
+ * Either an instance of struct in_addr or an instance of struct ip_mreqn
+ * may be passed to this socket option. An address of INADDR_ANY or an
+ * interface index of 0 is used to remove a previous selection.
+ * When no interface is selected, one is chosen for every send.
+ */
+static int
+inp_set_multicast_if(struct inpcb *inp, struct sockopt *sopt)
+{
+ struct in_addr addr;
+ struct ip_mreqn mreqn;
+ struct ifnet *ifp;
+ struct ip_moptions *imo;
+ int error;
+
+ if (sopt->sopt_valsize == sizeof(struct ip_mreqn)) {
+ /*
+ * An interface index was specified using the
+ * Linux-derived ip_mreqn structure.
+ */
+ error = sooptcopyin(sopt, &mreqn, sizeof(struct ip_mreqn),
+ sizeof(struct ip_mreqn));
+ if (error)
+ return (error);
+
+ if (mreqn.imr_ifindex < 0 || V_if_index < mreqn.imr_ifindex)
+ return (EINVAL);
+
+ if (mreqn.imr_ifindex == 0) {
+ ifp = NULL;
+ } else {
+ ifp = ifnet_byindex(mreqn.imr_ifindex);
+ if (ifp == NULL)
+ return (EADDRNOTAVAIL);
+ }
+ } else {
+ /*
+ * An interface was specified by IPv4 address.
+ * This is the traditional BSD usage.
+ */
+ error = sooptcopyin(sopt, &addr, sizeof(struct in_addr),
+ sizeof(struct in_addr));
+ if (error)
+ return (error);
+ if (in_nullhost(addr)) {
+ ifp = NULL;
+ } else {
+ INADDR_TO_IFP(addr, ifp);
+ if (ifp == NULL)
+ return (EADDRNOTAVAIL);
+ }
+ CTR3(KTR_IGMPV3, "%s: ifp = %p, addr = %s", __func__, ifp,
+ inet_ntoa(addr));
+ }
+
+ /* Reject interfaces which do not support multicast. */
+ if (ifp != NULL && (ifp->if_flags & IFF_MULTICAST) == 0)
+ return (EOPNOTSUPP);
+
+ imo = inp_findmoptions(inp);
+ imo->imo_multicast_ifp = ifp;
+ imo->imo_multicast_addr.s_addr = INADDR_ANY;
+ INP_WUNLOCK(inp);
+
+ return (0);
+}
+
+/*
+ * Atomically set source filters on a socket for an IPv4 multicast group.
+ *
+ * SMPng: NOTE: Potentially calls malloc(M_WAITOK) with Giant held.
+ */
+static int
+inp_set_source_filters(struct inpcb *inp, struct sockopt *sopt)
+{
+ struct __msfilterreq msfr;
+ sockunion_t *gsa;
+ struct ifnet *ifp;
+ struct in_mfilter *imf;
+ struct ip_moptions *imo;
+ struct in_multi *inm;
+ size_t idx;
+ int error;
+
+ error = sooptcopyin(sopt, &msfr, sizeof(struct __msfilterreq),
+ sizeof(struct __msfilterreq));
+ if (error)
+ return (error);
+
+ if (msfr.msfr_nsrcs > in_mcast_maxsocksrc ||
+ (msfr.msfr_fmode != MCAST_EXCLUDE &&
+ msfr.msfr_fmode != MCAST_INCLUDE))
+ return (EINVAL);
+
+ if (msfr.msfr_group.ss_family != AF_INET ||
+ msfr.msfr_group.ss_len != sizeof(struct sockaddr_in))
+ return (EINVAL);
+
+ gsa = (sockunion_t *)&msfr.msfr_group;
+ if (!IN_MULTICAST(ntohl(gsa->sin.sin_addr.s_addr)))
+ return (EINVAL);
+
+ gsa->sin.sin_port = 0; /* ignore port */
+
+ if (msfr.msfr_ifindex == 0 || V_if_index < msfr.msfr_ifindex)
+ return (EADDRNOTAVAIL);
+
+ ifp = ifnet_byindex(msfr.msfr_ifindex);
+ if (ifp == NULL)
+ return (EADDRNOTAVAIL);
+
+ /*
+ * Take the INP write lock.
+ * Check if this socket is a member of this group.
+ */
+ imo = inp_findmoptions(inp);
+ idx = imo_match_group(imo, ifp, &gsa->sa);
+ if (idx == -1 || imo->imo_mfilters == NULL) {
+ error = EADDRNOTAVAIL;
+ goto out_inp_locked;
+ }
+ inm = imo->imo_membership[idx];
+ imf = &imo->imo_mfilters[idx];
+
+ /*
+ * Begin state merge transaction at socket layer.
+ */
+ INP_WLOCK_ASSERT(inp);
+
+ imf->imf_st[1] = msfr.msfr_fmode;
+
+ /*
+ * Apply any new source filters, if present.
+ * Make a copy of the user-space source vector so
+ * that we may copy them with a single copyin. This
+ * allows us to deal with page faults up-front.
+ */
+ if (msfr.msfr_nsrcs > 0) {
+ struct in_msource *lims;
+ struct sockaddr_in *psin;
+ struct sockaddr_storage *kss, *pkss;
+ int i;
+
+ INP_WUNLOCK(inp);
+
+ CTR2(KTR_IGMPV3, "%s: loading %lu source list entries",
+ __func__, (unsigned long)msfr.msfr_nsrcs);
+ kss = malloc(sizeof(struct sockaddr_storage) * msfr.msfr_nsrcs,
+ M_TEMP, M_WAITOK);
+ error = copyin(msfr.msfr_srcs, kss,
+ sizeof(struct sockaddr_storage) * msfr.msfr_nsrcs);
+ if (error) {
+ free(kss, M_TEMP);
+ return (error);
+ }
+
+ INP_WLOCK(inp);
+
+ /*
+ * Mark all source filters as UNDEFINED at t1.
+ * Restore new group filter mode, as imf_leave()
+ * will set it to INCLUDE.
+ */
+ imf_leave(imf);
+ imf->imf_st[1] = msfr.msfr_fmode;
+
+ /*
+ * Update socket layer filters at t1, lazy-allocating
+ * new entries. This saves a bunch of memory at the
+ * cost of one RB_FIND() per source entry; duplicate
+ * entries in the msfr_nsrcs vector are ignored.
+ * If we encounter an error, rollback transaction.
+ *
+ * XXX This too could be replaced with a set-symmetric
+ * difference like loop to avoid walking from root
+ * every time, as the key space is common.
+ */
+ for (i = 0, pkss = kss; i < msfr.msfr_nsrcs; i++, pkss++) {
+ psin = (struct sockaddr_in *)pkss;
+ if (psin->sin_family != AF_INET) {
+ error = EAFNOSUPPORT;
+ break;
+ }
+ if (psin->sin_len != sizeof(struct sockaddr_in)) {
+ error = EINVAL;
+ break;
+ }
+ error = imf_get_source(imf, psin, &lims);
+ if (error)
+ break;
+ lims->imsl_st[1] = imf->imf_st[1];
+ }
+ free(kss, M_TEMP);
+ }
+
+ if (error)
+ goto out_imf_rollback;
+
+ INP_WLOCK_ASSERT(inp);
+ IN_MULTI_LOCK();
+
+ /*
+ * Begin state merge transaction at IGMP layer.
+ */
+ CTR1(KTR_IGMPV3, "%s: merge inm state", __func__);
+ error = inm_merge(inm, imf);
+ if (error) {
+ CTR1(KTR_IGMPV3, "%s: failed to merge inm state", __func__);
+ goto out_imf_rollback;
+ }
+
+ CTR1(KTR_IGMPV3, "%s: doing igmp downcall", __func__);
+ error = igmp_change_state(inm);
+ if (error)
+ CTR1(KTR_IGMPV3, "%s: failed igmp downcall", __func__);
+
+ IN_MULTI_UNLOCK();
+
+out_imf_rollback:
+ if (error)
+ imf_rollback(imf);
+ else
+ imf_commit(imf);
+
+ imf_reap(imf);
+
+out_inp_locked:
+ INP_WUNLOCK(inp);
+ return (error);
+}
+
+/*
+ * Set the IP multicast options in response to user setsockopt().
+ *
+ * Many of the socket options handled in this function duplicate the
+ * functionality of socket options in the regular unicast API. However,
+ * it is not possible to merge the duplicate code, because the idempotence
+ * of the IPv4 multicast part of the BSD Sockets API must be preserved;
+ * the effects of these options must be treated as separate and distinct.
+ *
+ * SMPng: XXX: Unlocked read of inp_socket believed OK.
+ * FUTURE: The IP_MULTICAST_VIF option may be eliminated if MROUTING
+ * is refactored to no longer use vifs.
+ */
+int
+inp_setmoptions(struct inpcb *inp, struct sockopt *sopt)
+{
+ struct ip_moptions *imo;
+ int error;
+
+ error = 0;
+
+ /*
+ * If socket is neither of type SOCK_RAW or SOCK_DGRAM,
+ * or is a divert socket, reject it.
+ */
+ if (inp->inp_socket->so_proto->pr_protocol == IPPROTO_DIVERT ||
+ (inp->inp_socket->so_proto->pr_type != SOCK_RAW &&
+ inp->inp_socket->so_proto->pr_type != SOCK_DGRAM))
+ return (EOPNOTSUPP);
+
+ switch (sopt->sopt_name) {
+ case IP_MULTICAST_VIF: {
+ int vifi;
+ /*
+ * Select a multicast VIF for transmission.
+ * Only useful if multicast forwarding is active.
+ */
+ if (legal_vif_num == NULL) {
+ error = EOPNOTSUPP;
+ break;
+ }
+ error = sooptcopyin(sopt, &vifi, sizeof(int), sizeof(int));
+ if (error)
+ break;
+ if (!legal_vif_num(vifi) && (vifi != -1)) {
+ error = EINVAL;
+ break;
+ }
+ imo = inp_findmoptions(inp);
+ imo->imo_multicast_vif = vifi;
+ INP_WUNLOCK(inp);
+ break;
+ }
+
+ case IP_MULTICAST_IF:
+ error = inp_set_multicast_if(inp, sopt);
+ break;
+
+ case IP_MULTICAST_TTL: {
+ u_char ttl;
+
+ /*
+ * Set the IP time-to-live for outgoing multicast packets.
+ * The original multicast API required a char argument,
+ * which is inconsistent with the rest of the socket API.
+ * We allow either a char or an int.
+ */
+ if (sopt->sopt_valsize == sizeof(u_char)) {
+ error = sooptcopyin(sopt, &ttl, sizeof(u_char),
+ sizeof(u_char));
+ if (error)
+ break;
+ } else {
+ u_int ittl;
+
+ error = sooptcopyin(sopt, &ittl, sizeof(u_int),
+ sizeof(u_int));
+ if (error)
+ break;
+ if (ittl > 255) {
+ error = EINVAL;
+ break;
+ }
+ ttl = (u_char)ittl;
+ }
+ imo = inp_findmoptions(inp);
+ imo->imo_multicast_ttl = ttl;
+ INP_WUNLOCK(inp);
+ break;
+ }
+
+ case IP_MULTICAST_LOOP: {
+ u_char loop;
+
+ /*
+ * Set the loopback flag for outgoing multicast packets.
+ * Must be zero or one. The original multicast API required a
+ * char argument, which is inconsistent with the rest
+ * of the socket API. We allow either a char or an int.
+ */
+ if (sopt->sopt_valsize == sizeof(u_char)) {
+ error = sooptcopyin(sopt, &loop, sizeof(u_char),
+ sizeof(u_char));
+ if (error)
+ break;
+ } else {
+ u_int iloop;
+
+ error = sooptcopyin(sopt, &iloop, sizeof(u_int),
+ sizeof(u_int));
+ if (error)
+ break;
+ loop = (u_char)iloop;
+ }
+ imo = inp_findmoptions(inp);
+ imo->imo_multicast_loop = !!loop;
+ INP_WUNLOCK(inp);
+ break;
+ }
+
+ case IP_ADD_MEMBERSHIP:
+ case IP_ADD_SOURCE_MEMBERSHIP:
+ case MCAST_JOIN_GROUP:
+ case MCAST_JOIN_SOURCE_GROUP:
+ error = inp_join_group(inp, sopt);
+ break;
+
+ case IP_DROP_MEMBERSHIP:
+ case IP_DROP_SOURCE_MEMBERSHIP:
+ case MCAST_LEAVE_GROUP:
+ case MCAST_LEAVE_SOURCE_GROUP:
+ error = inp_leave_group(inp, sopt);
+ break;
+
+ case IP_BLOCK_SOURCE:
+ case IP_UNBLOCK_SOURCE:
+ case MCAST_BLOCK_SOURCE:
+ case MCAST_UNBLOCK_SOURCE:
+ error = inp_block_unblock_source(inp, sopt);
+ break;
+
+ case IP_MSFILTER:
+ error = inp_set_source_filters(inp, sopt);
+ break;
+
+ default:
+ error = EOPNOTSUPP;
+ break;
+ }
+
+ INP_UNLOCK_ASSERT(inp);
+
+ return (error);
+}
+
+/*
+ * Expose IGMP's multicast filter mode and source list(s) to userland,
+ * keyed by (ifindex, group).
+ * The filter mode is written out as a uint32_t, followed by
+ * 0..n of struct in_addr.
+ * For use by ifmcstat(8).
+ * SMPng: NOTE: unlocked read of ifindex space.
+ */
+static int
+sysctl_ip_mcast_filters(SYSCTL_HANDLER_ARGS)
+{
+ struct in_addr src, group;
+ struct ifnet *ifp;
+ struct ifmultiaddr *ifma;
+ struct in_multi *inm;
+ struct ip_msource *ims;
+ int *name;
+ int retval;
+ u_int namelen;
+ uint32_t fmode, ifindex;
+
+ name = (int *)arg1;
+ namelen = arg2;
+
+ if (req->newptr != NULL)
+ return (EPERM);
+
+ if (namelen != 2)
+ return (EINVAL);
+
+ ifindex = name[0];
+ if (ifindex <= 0 || ifindex > V_if_index) {
+ CTR2(KTR_IGMPV3, "%s: ifindex %u out of range",
+ __func__, ifindex);
+ return (ENOENT);
+ }
+
+ group.s_addr = name[1];
+ if (!IN_MULTICAST(ntohl(group.s_addr))) {
+ CTR2(KTR_IGMPV3, "%s: group %s is not multicast",
+ __func__, inet_ntoa(group));
+ return (EINVAL);
+ }
+
+ ifp = ifnet_byindex(ifindex);
+ if (ifp == NULL) {
+ CTR2(KTR_IGMPV3, "%s: no ifp for ifindex %u",
+ __func__, ifindex);
+ return (ENOENT);
+ }
+
+ retval = sysctl_wire_old_buffer(req,
+ sizeof(uint32_t) + (in_mcast_maxgrpsrc * sizeof(struct in_addr)));
+ if (retval)
+ return (retval);
+
+ IN_MULTI_LOCK();
+
+ IF_ADDR_LOCK(ifp);
+ TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
+ if (ifma->ifma_addr->sa_family != AF_INET ||
+ ifma->ifma_protospec == NULL)
+ continue;
+ inm = (struct in_multi *)ifma->ifma_protospec;
+ if (!in_hosteq(inm->inm_addr, group))
+ continue;
+ fmode = inm->inm_st[1].iss_fmode;
+ retval = SYSCTL_OUT(req, &fmode, sizeof(uint32_t));
+ if (retval != 0)
+ break;
+ RB_FOREACH(ims, ip_msource_tree, &inm->inm_srcs) {
+#ifdef KTR
+ struct in_addr ina;
+ ina.s_addr = htonl(ims->ims_haddr);
+ CTR2(KTR_IGMPV3, "%s: visit node %s", __func__,
+ inet_ntoa(ina));
+#endif
+ /*
+ * Only copy-out sources which are in-mode.
+ */
+ if (fmode != ims_get_mode(inm, ims, 1)) {
+ CTR1(KTR_IGMPV3, "%s: skip non-in-mode",
+ __func__);
+ continue;
+ }
+ src.s_addr = htonl(ims->ims_haddr);
+ retval = SYSCTL_OUT(req, &src, sizeof(struct in_addr));
+ if (retval != 0)
+ break;
+ }
+ }
+ IF_ADDR_UNLOCK(ifp);
+
+ IN_MULTI_UNLOCK();
+
+ return (retval);
+}
+
+#ifdef KTR
+
+static const char *inm_modestrs[] = { "un", "in", "ex" };
+
+static const char *
+inm_mode_str(const int mode)
+{
+
+ if (mode >= MCAST_UNDEFINED && mode <= MCAST_EXCLUDE)
+ return (inm_modestrs[mode]);
+ return ("??");
+}
+
+static const char *inm_statestrs[] = {
+ "not-member",
+ "silent",
+ "idle",
+ "lazy",
+ "sleeping",
+ "awakening",
+ "query-pending",
+ "sg-query-pending",
+ "leaving"
+};
+
+static const char *
+inm_state_str(const int state)
+{
+
+ if (state >= IGMP_NOT_MEMBER && state <= IGMP_LEAVING_MEMBER)
+ return (inm_statestrs[state]);
+ return ("??");
+}
+
+/*
+ * Dump an in_multi structure to the console.
+ */
+void
+inm_print(const struct in_multi *inm)
+{
+ int t;
+
+ if ((ktr_mask & KTR_IGMPV3) == 0)
+ return;
+
+ printf("%s: --- begin inm %p ---\n", __func__, inm);
+ printf("addr %s ifp %p(%s) ifma %p\n",
+ inet_ntoa(inm->inm_addr),
+ inm->inm_ifp,
+ inm->inm_ifp->if_xname,
+ inm->inm_ifma);
+ printf("timer %u state %s refcount %u scq.len %u\n",
+ inm->inm_timer,
+ inm_state_str(inm->inm_state),
+ inm->inm_refcount,
+ inm->inm_scq.ifq_len);
+ printf("igi %p nsrc %lu sctimer %u scrv %u\n",
+ inm->inm_igi,
+ inm->inm_nsrc,
+ inm->inm_sctimer,
+ inm->inm_scrv);
+ for (t = 0; t < 2; t++) {
+ printf("t%d: fmode %s asm %u ex %u in %u rec %u\n", t,
+ inm_mode_str(inm->inm_st[t].iss_fmode),
+ inm->inm_st[t].iss_asm,
+ inm->inm_st[t].iss_ex,
+ inm->inm_st[t].iss_in,
+ inm->inm_st[t].iss_rec);
+ }
+ printf("%s: --- end inm %p ---\n", __func__, inm);
+}
+
+#else /* !KTR */
+
+void
+inm_print(const struct in_multi *inm)
+{
+
+}
+
+#endif /* KTR */
+
+RB_GENERATE(ip_msource_tree, ip_msource, ims_link, ip_msource_cmp);
diff --git a/rtems/freebsd/netinet/in_pcb.c b/rtems/freebsd/netinet/in_pcb.c
new file mode 100644
index 00000000..f98a90ef
--- /dev/null
+++ b/rtems/freebsd/netinet/in_pcb.c
@@ -0,0 +1,1958 @@
+#include <rtems/freebsd/machine/rtems-bsd-config.h>
+
+/*-
+ * Copyright (c) 1982, 1986, 1991, 1993, 1995
+ * The Regents of the University of California.
+ * Copyright (c) 2007-2009 Robert N. M. Watson
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * @(#)in_pcb.c 8.4 (Berkeley) 5/24/95
+ */
+
+#include <rtems/freebsd/sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <rtems/freebsd/local/opt_ddb.h>
+#include <rtems/freebsd/local/opt_ipsec.h>
+#include <rtems/freebsd/local/opt_inet6.h>
+
+#include <rtems/freebsd/sys/param.h>
+#include <rtems/freebsd/sys/systm.h>
+#include <rtems/freebsd/sys/malloc.h>
+#include <rtems/freebsd/sys/mbuf.h>
+#include <rtems/freebsd/sys/domain.h>
+#include <rtems/freebsd/sys/protosw.h>
+#include <rtems/freebsd/sys/socket.h>
+#include <rtems/freebsd/sys/socketvar.h>
+#include <rtems/freebsd/sys/priv.h>
+#include <rtems/freebsd/sys/proc.h>
+#include <rtems/freebsd/sys/jail.h>
+#include <rtems/freebsd/sys/kernel.h>
+#include <rtems/freebsd/sys/sysctl.h>
+
+#ifdef DDB
+#include <rtems/freebsd/ddb/ddb.h>
+#endif
+
+#include <rtems/freebsd/vm/uma.h>
+
+#include <rtems/freebsd/net/if.h>
+#include <rtems/freebsd/net/if_types.h>
+#include <rtems/freebsd/net/route.h>
+#include <rtems/freebsd/net/vnet.h>
+
+#include <rtems/freebsd/netinet/in.h>
+#include <rtems/freebsd/netinet/in_pcb.h>
+#include <rtems/freebsd/netinet/in_var.h>
+#include <rtems/freebsd/netinet/ip_var.h>
+#include <rtems/freebsd/netinet/tcp_var.h>
+#include <rtems/freebsd/netinet/udp.h>
+#include <rtems/freebsd/netinet/udp_var.h>
+#ifdef INET6
+#include <rtems/freebsd/netinet/ip6.h>
+#include <rtems/freebsd/netinet6/ip6_var.h>
+#endif /* INET6 */
+
+
+#ifdef IPSEC
+#include <rtems/freebsd/netipsec/ipsec.h>
+#include <rtems/freebsd/netipsec/key.h>
+#endif /* IPSEC */
+
+#include <rtems/freebsd/security/mac/mac_framework.h>
+
+/*
+ * These configure the range of local port addresses assigned to
+ * "unspecified" outgoing connections/packets/whatever.
+ */
+VNET_DEFINE(int, ipport_lowfirstauto) = IPPORT_RESERVED - 1; /* 1023 */
+VNET_DEFINE(int, ipport_lowlastauto) = IPPORT_RESERVEDSTART; /* 600 */
+VNET_DEFINE(int, ipport_firstauto) = IPPORT_EPHEMERALFIRST; /* 10000 */
+VNET_DEFINE(int, ipport_lastauto) = IPPORT_EPHEMERALLAST; /* 65535 */
+VNET_DEFINE(int, ipport_hifirstauto) = IPPORT_HIFIRSTAUTO; /* 49152 */
+VNET_DEFINE(int, ipport_hilastauto) = IPPORT_HILASTAUTO; /* 65535 */
+
+/*
+ * Reserved ports accessible only to root. There are significant
+ * security considerations that must be accounted for when changing these,
+ * but the security benefits can be great. Please be careful.
+ */
+VNET_DEFINE(int, ipport_reservedhigh) = IPPORT_RESERVED - 1; /* 1023 */
+VNET_DEFINE(int, ipport_reservedlow);
+
+/* Variables dealing with random ephemeral port allocation. */
+VNET_DEFINE(int, ipport_randomized) = 1; /* user controlled via sysctl */
+VNET_DEFINE(int, ipport_randomcps) = 10; /* user controlled via sysctl */
+VNET_DEFINE(int, ipport_randomtime) = 45; /* user controlled via sysctl */
+VNET_DEFINE(int, ipport_stoprandom); /* toggled by ipport_tick */
+VNET_DEFINE(int, ipport_tcpallocs);
+static VNET_DEFINE(int, ipport_tcplastcount);
+
+#define V_ipport_tcplastcount VNET(ipport_tcplastcount)
+
+#define RANGECHK(var, min, max) \
+ if ((var) < (min)) { (var) = (min); } \
+ else if ((var) > (max)) { (var) = (max); }
+
+static void in_pcbremlists(struct inpcb *inp);
+
+static int
+sysctl_net_ipport_check(SYSCTL_HANDLER_ARGS)
+{
+ int error;
+
+#ifdef VIMAGE
+ error = vnet_sysctl_handle_int(oidp, arg1, arg2, req);
+#else
+ error = sysctl_handle_int(oidp, arg1, arg2, req);
+#endif
+ if (error == 0) {
+ RANGECHK(V_ipport_lowfirstauto, 1, IPPORT_RESERVED - 1);
+ RANGECHK(V_ipport_lowlastauto, 1, IPPORT_RESERVED - 1);
+ RANGECHK(V_ipport_firstauto, IPPORT_RESERVED, IPPORT_MAX);
+ RANGECHK(V_ipport_lastauto, IPPORT_RESERVED, IPPORT_MAX);
+ RANGECHK(V_ipport_hifirstauto, IPPORT_RESERVED, IPPORT_MAX);
+ RANGECHK(V_ipport_hilastauto, IPPORT_RESERVED, IPPORT_MAX);
+ }
+ return (error);
+}
+
+#undef RANGECHK
+
+SYSCTL_NODE(_net_inet_ip, IPPROTO_IP, portrange, CTLFLAG_RW, 0, "IP Ports");
+
+SYSCTL_VNET_PROC(_net_inet_ip_portrange, OID_AUTO, lowfirst,
+ CTLTYPE_INT|CTLFLAG_RW, &VNET_NAME(ipport_lowfirstauto), 0,
+ &sysctl_net_ipport_check, "I", "");
+SYSCTL_VNET_PROC(_net_inet_ip_portrange, OID_AUTO, lowlast,
+ CTLTYPE_INT|CTLFLAG_RW, &VNET_NAME(ipport_lowlastauto), 0,
+ &sysctl_net_ipport_check, "I", "");
+SYSCTL_VNET_PROC(_net_inet_ip_portrange, OID_AUTO, first,
+ CTLTYPE_INT|CTLFLAG_RW, &VNET_NAME(ipport_firstauto), 0,
+ &sysctl_net_ipport_check, "I", "");
+SYSCTL_VNET_PROC(_net_inet_ip_portrange, OID_AUTO, last,
+ CTLTYPE_INT|CTLFLAG_RW, &VNET_NAME(ipport_lastauto), 0,
+ &sysctl_net_ipport_check, "I", "");
+SYSCTL_VNET_PROC(_net_inet_ip_portrange, OID_AUTO, hifirst,
+ CTLTYPE_INT|CTLFLAG_RW, &VNET_NAME(ipport_hifirstauto), 0,
+ &sysctl_net_ipport_check, "I", "");
+SYSCTL_VNET_PROC(_net_inet_ip_portrange, OID_AUTO, hilast,
+ CTLTYPE_INT|CTLFLAG_RW, &VNET_NAME(ipport_hilastauto), 0,
+ &sysctl_net_ipport_check, "I", "");
+SYSCTL_VNET_INT(_net_inet_ip_portrange, OID_AUTO, reservedhigh,
+ CTLFLAG_RW|CTLFLAG_SECURE, &VNET_NAME(ipport_reservedhigh), 0, "");
+SYSCTL_VNET_INT(_net_inet_ip_portrange, OID_AUTO, reservedlow,
+ CTLFLAG_RW|CTLFLAG_SECURE, &VNET_NAME(ipport_reservedlow), 0, "");
+SYSCTL_VNET_INT(_net_inet_ip_portrange, OID_AUTO, randomized, CTLFLAG_RW,
+ &VNET_NAME(ipport_randomized), 0, "Enable random port allocation");
+SYSCTL_VNET_INT(_net_inet_ip_portrange, OID_AUTO, randomcps, CTLFLAG_RW,
+ &VNET_NAME(ipport_randomcps), 0, "Maximum number of random port "
+ "allocations before switching to a sequental one");
+SYSCTL_VNET_INT(_net_inet_ip_portrange, OID_AUTO, randomtime, CTLFLAG_RW,
+ &VNET_NAME(ipport_randomtime), 0,
+ "Minimum time to keep sequental port "
+ "allocation before switching to a random one");
+
+/*
+ * in_pcb.c: manage the Protocol Control Blocks.
+ *
+ * NOTE: It is assumed that most of these functions will be called with
+ * the pcbinfo lock held, and often, the inpcb lock held, as these utility
+ * functions often modify hash chains or addresses in pcbs.
+ */
+
+/*
+ * Allocate a PCB and associate it with the socket.
+ * On success return with the PCB locked.
+ */
+int
+in_pcballoc(struct socket *so, struct inpcbinfo *pcbinfo)
+{
+ struct inpcb *inp;
+ int error;
+
+ INP_INFO_WLOCK_ASSERT(pcbinfo);
+ error = 0;
+ inp = uma_zalloc(pcbinfo->ipi_zone, M_NOWAIT);
+ if (inp == NULL)
+ return (ENOBUFS);
+ bzero(inp, inp_zero_size);
+ inp->inp_pcbinfo = pcbinfo;
+ inp->inp_socket = so;
+ inp->inp_cred = crhold(so->so_cred);
+ inp->inp_inc.inc_fibnum = so->so_fibnum;
+#ifdef MAC
+ error = mac_inpcb_init(inp, M_NOWAIT);
+ if (error != 0)
+ goto out;
+ mac_inpcb_create(so, inp);
+#endif
+#ifdef IPSEC
+ error = ipsec_init_policy(so, &inp->inp_sp);
+ if (error != 0) {
+#ifdef MAC
+ mac_inpcb_destroy(inp);
+#endif
+ goto out;
+ }
+#endif /*IPSEC*/
+#ifdef INET6
+ if (INP_SOCKAF(so) == AF_INET6) {
+ inp->inp_vflag |= INP_IPV6PROTO;
+ if (V_ip6_v6only)
+ inp->inp_flags |= IN6P_IPV6_V6ONLY;
+ }
+#endif
+ LIST_INSERT_HEAD(pcbinfo->ipi_listhead, inp, inp_list);
+ pcbinfo->ipi_count++;
+ so->so_pcb = (caddr_t)inp;
+#ifdef INET6
+ if (V_ip6_auto_flowlabel)
+ inp->inp_flags |= IN6P_AUTOFLOWLABEL;
+#endif
+ INP_WLOCK(inp);
+ inp->inp_gencnt = ++pcbinfo->ipi_gencnt;
+ inp->inp_refcount = 1; /* Reference from the inpcbinfo */
+#if defined(IPSEC) || defined(MAC)
+out:
+ if (error != 0) {
+ crfree(inp->inp_cred);
+ uma_zfree(pcbinfo->ipi_zone, inp);
+ }
+#endif
+ return (error);
+}
+
+int
+in_pcbbind(struct inpcb *inp, struct sockaddr *nam, struct ucred *cred)
+{
+ int anonport, error;
+
+ INP_INFO_WLOCK_ASSERT(inp->inp_pcbinfo);
+ INP_WLOCK_ASSERT(inp);
+
+ if (inp->inp_lport != 0 || inp->inp_laddr.s_addr != INADDR_ANY)
+ return (EINVAL);
+ anonport = inp->inp_lport == 0 && (nam == NULL ||
+ ((struct sockaddr_in *)nam)->sin_port == 0);
+ error = in_pcbbind_setup(inp, nam, &inp->inp_laddr.s_addr,
+ &inp->inp_lport, cred);
+ if (error)
+ return (error);
+ if (in_pcbinshash(inp) != 0) {
+ inp->inp_laddr.s_addr = INADDR_ANY;
+ inp->inp_lport = 0;
+ return (EAGAIN);
+ }
+ if (anonport)
+ inp->inp_flags |= INP_ANONPORT;
+ return (0);
+}
+
+/*
+ * Set up a bind operation on a PCB, performing port allocation
+ * as required, but do not actually modify the PCB. Callers can
+ * either complete the bind by setting inp_laddr/inp_lport and
+ * calling in_pcbinshash(), or they can just use the resulting
+ * port and address to authorise the sending of a once-off packet.
+ *
+ * On error, the values of *laddrp and *lportp are not changed.
+ */
+int
+in_pcbbind_setup(struct inpcb *inp, struct sockaddr *nam, in_addr_t *laddrp,
+ u_short *lportp, struct ucred *cred)
+{
+ struct socket *so = inp->inp_socket;
+ unsigned short *lastport;
+ struct sockaddr_in *sin;
+ struct inpcbinfo *pcbinfo = inp->inp_pcbinfo;
+ struct in_addr laddr;
+ u_short lport = 0;
+ int wild = 0, reuseport = (so->so_options & SO_REUSEPORT);
+ int error;
+ int dorandom;
+
+ /*
+ * Because no actual state changes occur here, a global write lock on
+ * the pcbinfo isn't required.
+ */
+ INP_INFO_LOCK_ASSERT(pcbinfo);
+ INP_LOCK_ASSERT(inp);
+
+ if (TAILQ_EMPTY(&V_in_ifaddrhead)) /* XXX broken! */
+ return (EADDRNOTAVAIL);
+ laddr.s_addr = *laddrp;
+ if (nam != NULL && laddr.s_addr != INADDR_ANY)
+ return (EINVAL);
+ if ((so->so_options & (SO_REUSEADDR|SO_REUSEPORT)) == 0)
+ wild = INPLOOKUP_WILDCARD;
+ if (nam == NULL) {
+ if ((error = prison_local_ip4(cred, &laddr)) != 0)
+ return (error);
+ } else {
+ sin = (struct sockaddr_in *)nam;
+ if (nam->sa_len != sizeof (*sin))
+ return (EINVAL);
+#ifdef notdef
+ /*
+ * We should check the family, but old programs
+ * incorrectly fail to initialize it.
+ */
+ if (sin->sin_family != AF_INET)
+ return (EAFNOSUPPORT);
+#endif
+ error = prison_local_ip4(cred, &sin->sin_addr);
+ if (error)
+ return (error);
+ if (sin->sin_port != *lportp) {
+ /* Don't allow the port to change. */
+ if (*lportp != 0)
+ return (EINVAL);
+ lport = sin->sin_port;
+ }
+ /* NB: lport is left as 0 if the port isn't being changed. */
+ if (IN_MULTICAST(ntohl(sin->sin_addr.s_addr))) {
+ /*
+ * Treat SO_REUSEADDR as SO_REUSEPORT for multicast;
+ * allow complete duplication of binding if
+ * SO_REUSEPORT is set, or if SO_REUSEADDR is set
+ * and a multicast address is bound on both
+ * new and duplicated sockets.
+ */
+ if (so->so_options & SO_REUSEADDR)
+ reuseport = SO_REUSEADDR|SO_REUSEPORT;
+ } else if (sin->sin_addr.s_addr != INADDR_ANY) {
+ sin->sin_port = 0; /* yech... */
+ bzero(&sin->sin_zero, sizeof(sin->sin_zero));
+ /*
+ * Is the address a local IP address?
+ * If INP_BINDANY is set, then the socket may be bound
+ * to any endpoint address, local or not.
+ */
+ if ((inp->inp_flags & INP_BINDANY) == 0 &&
+ ifa_ifwithaddr_check((struct sockaddr *)sin) == 0)
+ return (EADDRNOTAVAIL);
+ }
+ laddr = sin->sin_addr;
+ if (lport) {
+ struct inpcb *t;
+ struct tcptw *tw;
+
+ /* GROSS */
+ if (ntohs(lport) <= V_ipport_reservedhigh &&
+ ntohs(lport) >= V_ipport_reservedlow &&
+ priv_check_cred(cred, PRIV_NETINET_RESERVEDPORT,
+ 0))
+ return (EACCES);
+ if (!IN_MULTICAST(ntohl(sin->sin_addr.s_addr)) &&
+ priv_check_cred(inp->inp_cred,
+ PRIV_NETINET_REUSEPORT, 0) != 0) {
+ t = in_pcblookup_local(pcbinfo, sin->sin_addr,
+ lport, INPLOOKUP_WILDCARD, cred);
+ /*
+ * XXX
+ * This entire block sorely needs a rewrite.
+ */
+ if (t &&
+ ((t->inp_flags & INP_TIMEWAIT) == 0) &&
+ (so->so_type != SOCK_STREAM ||
+ ntohl(t->inp_faddr.s_addr) == INADDR_ANY) &&
+ (ntohl(sin->sin_addr.s_addr) != INADDR_ANY ||
+ ntohl(t->inp_laddr.s_addr) != INADDR_ANY ||
+ (t->inp_socket->so_options &
+ SO_REUSEPORT) == 0) &&
+ (inp->inp_cred->cr_uid !=
+ t->inp_cred->cr_uid))
+ return (EADDRINUSE);
+ }
+ t = in_pcblookup_local(pcbinfo, sin->sin_addr,
+ lport, wild, cred);
+ if (t && (t->inp_flags & INP_TIMEWAIT)) {
+ /*
+ * XXXRW: If an incpb has had its timewait
+ * state recycled, we treat the address as
+ * being in use (for now). This is better
+ * than a panic, but not desirable.
+ */
+ tw = intotw(inp);
+ if (tw == NULL ||
+ (reuseport & tw->tw_so_options) == 0)
+ return (EADDRINUSE);
+ } else if (t &&
+ (reuseport & t->inp_socket->so_options) == 0) {
+#ifdef INET6
+ if (ntohl(sin->sin_addr.s_addr) !=
+ INADDR_ANY ||
+ ntohl(t->inp_laddr.s_addr) !=
+ INADDR_ANY ||
+ INP_SOCKAF(so) ==
+ INP_SOCKAF(t->inp_socket))
+#endif
+ return (EADDRINUSE);
+ }
+ }
+ }
+ if (*lportp != 0)
+ lport = *lportp;
+ if (lport == 0) {
+ u_short first, last, aux;
+ int count;
+
+ if (inp->inp_flags & INP_HIGHPORT) {
+ first = V_ipport_hifirstauto; /* sysctl */
+ last = V_ipport_hilastauto;
+ lastport = &pcbinfo->ipi_lasthi;
+ } else if (inp->inp_flags & INP_LOWPORT) {
+ error = priv_check_cred(cred,
+ PRIV_NETINET_RESERVEDPORT, 0);
+ if (error)
+ return error;
+ first = V_ipport_lowfirstauto; /* 1023 */
+ last = V_ipport_lowlastauto; /* 600 */
+ lastport = &pcbinfo->ipi_lastlow;
+ } else {
+ first = V_ipport_firstauto; /* sysctl */
+ last = V_ipport_lastauto;
+ lastport = &pcbinfo->ipi_lastport;
+ }
+ /*
+ * For UDP, use random port allocation as long as the user
+ * allows it. For TCP (and as of yet unknown) connections,
+ * use random port allocation only if the user allows it AND
+ * ipport_tick() allows it.
+ */
+ if (V_ipport_randomized &&
+ (!V_ipport_stoprandom || pcbinfo == &V_udbinfo))
+ dorandom = 1;
+ else
+ dorandom = 0;
+ /*
+ * It makes no sense to do random port allocation if
+ * we have the only port available.
+ */
+ if (first == last)
+ dorandom = 0;
+ /* Make sure to not include UDP packets in the count. */
+ if (pcbinfo != &V_udbinfo)
+ V_ipport_tcpallocs++;
+ /*
+ * Instead of having two loops further down counting up or down
+ * make sure that first is always <= last and go with only one
+ * code path implementing all logic.
+ */
+ if (first > last) {
+ aux = first;
+ first = last;
+ last = aux;
+ }
+
+ if (dorandom)
+ *lastport = first +
+ (arc4random() % (last - first));
+
+ count = last - first;
+
+ do {
+ if (count-- < 0) /* completely used? */
+ return (EADDRNOTAVAIL);
+ ++*lastport;
+ if (*lastport < first || *lastport > last)
+ *lastport = first;
+ lport = htons(*lastport);
+ } while (in_pcblookup_local(pcbinfo, laddr,
+ lport, wild, cred));
+ }
+ *laddrp = laddr.s_addr;
+ *lportp = lport;
+ return (0);
+}
+
+/*
+ * Connect from a socket to a specified address.
+ * Both address and port must be specified in argument sin.
+ * If don't have a local address for this socket yet,
+ * then pick one.
+ */
+int
+in_pcbconnect(struct inpcb *inp, struct sockaddr *nam, struct ucred *cred)
+{
+ u_short lport, fport;
+ in_addr_t laddr, faddr;
+ int anonport, error;
+
+ INP_INFO_WLOCK_ASSERT(inp->inp_pcbinfo);
+ INP_WLOCK_ASSERT(inp);
+
+ lport = inp->inp_lport;
+ laddr = inp->inp_laddr.s_addr;
+ anonport = (lport == 0);
+ error = in_pcbconnect_setup(inp, nam, &laddr, &lport, &faddr, &fport,
+ NULL, cred);
+ if (error)
+ return (error);
+
+ /* Do the initial binding of the local address if required. */
+ if (inp->inp_laddr.s_addr == INADDR_ANY && inp->inp_lport == 0) {
+ inp->inp_lport = lport;
+ inp->inp_laddr.s_addr = laddr;
+ if (in_pcbinshash(inp) != 0) {
+ inp->inp_laddr.s_addr = INADDR_ANY;
+ inp->inp_lport = 0;
+ return (EAGAIN);
+ }
+ }
+
+ /* Commit the remaining changes. */
+ inp->inp_lport = lport;
+ inp->inp_laddr.s_addr = laddr;
+ inp->inp_faddr.s_addr = faddr;
+ inp->inp_fport = fport;
+ in_pcbrehash(inp);
+
+ if (anonport)
+ inp->inp_flags |= INP_ANONPORT;
+ return (0);
+}
+
+/*
+ * Do proper source address selection on an unbound socket in case
+ * of connect. Take jails into account as well.
+ */
+static int
+in_pcbladdr(struct inpcb *inp, struct in_addr *faddr, struct in_addr *laddr,
+ struct ucred *cred)
+{
+ struct ifaddr *ifa;
+ struct sockaddr *sa;
+ struct sockaddr_in *sin;
+ struct route sro;
+ int error;
+
+ KASSERT(laddr != NULL, ("%s: laddr NULL", __func__));
+
+ /*
+ * Bypass source address selection and use the primary jail IP
+ * if requested.
+ */
+ if (cred != NULL && !prison_saddrsel_ip4(cred, laddr))
+ return (0);
+
+ error = 0;
+ bzero(&sro, sizeof(sro));
+
+ sin = (struct sockaddr_in *)&sro.ro_dst;
+ sin->sin_family = AF_INET;
+ sin->sin_len = sizeof(struct sockaddr_in);
+ sin->sin_addr.s_addr = faddr->s_addr;
+
+ /*
+ * If route is known our src addr is taken from the i/f,
+ * else punt.
+ *
+ * Find out route to destination.
+ */
+ if ((inp->inp_socket->so_options & SO_DONTROUTE) == 0)
+ in_rtalloc_ign(&sro, 0, inp->inp_inc.inc_fibnum);
+
+ /*
+ * If we found a route, use the address corresponding to
+ * the outgoing interface.
+ *
+ * Otherwise assume faddr is reachable on a directly connected
+ * network and try to find a corresponding interface to take
+ * the source address from.
+ */
+ if (sro.ro_rt == NULL || sro.ro_rt->rt_ifp == NULL) {
+ struct in_ifaddr *ia;
+ struct ifnet *ifp;
+
+ ia = ifatoia(ifa_ifwithdstaddr((struct sockaddr *)sin));
+ if (ia == NULL)
+ ia = ifatoia(ifa_ifwithnet((struct sockaddr *)sin, 0));
+ if (ia == NULL) {
+ error = ENETUNREACH;
+ goto done;
+ }
+
+ if (cred == NULL || !prison_flag(cred, PR_IP4)) {
+ laddr->s_addr = ia->ia_addr.sin_addr.s_addr;
+ ifa_free(&ia->ia_ifa);
+ goto done;
+ }
+
+ ifp = ia->ia_ifp;
+ ifa_free(&ia->ia_ifa);
+ ia = NULL;
+ IF_ADDR_LOCK(ifp);
+ TAILQ_FOREACH(ifa, &ifp->if_addrhead, ifa_link) {
+
+ sa = ifa->ifa_addr;
+ if (sa->sa_family != AF_INET)
+ continue;
+ sin = (struct sockaddr_in *)sa;
+ if (prison_check_ip4(cred, &sin->sin_addr) == 0) {
+ ia = (struct in_ifaddr *)ifa;
+ break;
+ }
+ }
+ if (ia != NULL) {
+ laddr->s_addr = ia->ia_addr.sin_addr.s_addr;
+ IF_ADDR_UNLOCK(ifp);
+ goto done;
+ }
+ IF_ADDR_UNLOCK(ifp);
+
+ /* 3. As a last resort return the 'default' jail address. */
+ error = prison_get_ip4(cred, laddr);
+ goto done;
+ }
+
+ /*
+ * If the outgoing interface on the route found is not
+ * a loopback interface, use the address from that interface.
+ * In case of jails do those three steps:
+ * 1. check if the interface address belongs to the jail. If so use it.
+ * 2. check if we have any address on the outgoing interface
+ * belonging to this jail. If so use it.
+ * 3. as a last resort return the 'default' jail address.
+ */
+ if ((sro.ro_rt->rt_ifp->if_flags & IFF_LOOPBACK) == 0) {
+ struct in_ifaddr *ia;
+ struct ifnet *ifp;
+
+ /* If not jailed, use the default returned. */
+ if (cred == NULL || !prison_flag(cred, PR_IP4)) {
+ ia = (struct in_ifaddr *)sro.ro_rt->rt_ifa;
+ laddr->s_addr = ia->ia_addr.sin_addr.s_addr;
+ goto done;
+ }
+
+ /* Jailed. */
+ /* 1. Check if the iface address belongs to the jail. */
+ sin = (struct sockaddr_in *)sro.ro_rt->rt_ifa->ifa_addr;
+ if (prison_check_ip4(cred, &sin->sin_addr) == 0) {
+ ia = (struct in_ifaddr *)sro.ro_rt->rt_ifa;
+ laddr->s_addr = ia->ia_addr.sin_addr.s_addr;
+ goto done;
+ }
+
+ /*
+ * 2. Check if we have any address on the outgoing interface
+ * belonging to this jail.
+ */
+ ia = NULL;
+ ifp = sro.ro_rt->rt_ifp;
+ IF_ADDR_LOCK(ifp);
+ TAILQ_FOREACH(ifa, &ifp->if_addrhead, ifa_link) {
+ sa = ifa->ifa_addr;
+ if (sa->sa_family != AF_INET)
+ continue;
+ sin = (struct sockaddr_in *)sa;
+ if (prison_check_ip4(cred, &sin->sin_addr) == 0) {
+ ia = (struct in_ifaddr *)ifa;
+ break;
+ }
+ }
+ if (ia != NULL) {
+ laddr->s_addr = ia->ia_addr.sin_addr.s_addr;
+ IF_ADDR_UNLOCK(ifp);
+ goto done;
+ }
+ IF_ADDR_UNLOCK(ifp);
+
+ /* 3. As a last resort return the 'default' jail address. */
+ error = prison_get_ip4(cred, laddr);
+ goto done;
+ }
+
+ /*
+ * The outgoing interface is marked with 'loopback net', so a route
+ * to ourselves is here.
+ * Try to find the interface of the destination address and then
+ * take the address from there. That interface is not necessarily
+ * a loopback interface.
+ * In case of jails, check that it is an address of the jail
+ * and if we cannot find, fall back to the 'default' jail address.
+ */
+ if ((sro.ro_rt->rt_ifp->if_flags & IFF_LOOPBACK) != 0) {
+ struct sockaddr_in sain;
+ struct in_ifaddr *ia;
+
+ bzero(&sain, sizeof(struct sockaddr_in));
+ sain.sin_family = AF_INET;
+ sain.sin_len = sizeof(struct sockaddr_in);
+ sain.sin_addr.s_addr = faddr->s_addr;
+
+ ia = ifatoia(ifa_ifwithdstaddr(sintosa(&sain)));
+ if (ia == NULL)
+ ia = ifatoia(ifa_ifwithnet(sintosa(&sain), 0));
+ if (ia == NULL)
+ ia = ifatoia(ifa_ifwithaddr(sintosa(&sain)));
+
+ if (cred == NULL || !prison_flag(cred, PR_IP4)) {
+ if (ia == NULL) {
+ error = ENETUNREACH;
+ goto done;
+ }
+ laddr->s_addr = ia->ia_addr.sin_addr.s_addr;
+ ifa_free(&ia->ia_ifa);
+ goto done;
+ }
+
+ /* Jailed. */
+ if (ia != NULL) {
+ struct ifnet *ifp;
+
+ ifp = ia->ia_ifp;
+ ifa_free(&ia->ia_ifa);
+ ia = NULL;
+ IF_ADDR_LOCK(ifp);
+ TAILQ_FOREACH(ifa, &ifp->if_addrhead, ifa_link) {
+
+ sa = ifa->ifa_addr;
+ if (sa->sa_family != AF_INET)
+ continue;
+ sin = (struct sockaddr_in *)sa;
+ if (prison_check_ip4(cred,
+ &sin->sin_addr) == 0) {
+ ia = (struct in_ifaddr *)ifa;
+ break;
+ }
+ }
+ if (ia != NULL) {
+ laddr->s_addr = ia->ia_addr.sin_addr.s_addr;
+ IF_ADDR_UNLOCK(ifp);
+ goto done;
+ }
+ IF_ADDR_UNLOCK(ifp);
+ }
+
+ /* 3. As a last resort return the 'default' jail address. */
+ error = prison_get_ip4(cred, laddr);
+ goto done;
+ }
+
+done:
+ if (sro.ro_rt != NULL)
+ RTFREE(sro.ro_rt);
+ return (error);
+}
+
+/*
+ * Set up for a connect from a socket to the specified address.
+ * On entry, *laddrp and *lportp should contain the current local
+ * address and port for the PCB; these are updated to the values
+ * that should be placed in inp_laddr and inp_lport to complete
+ * the connect.
+ *
+ * On success, *faddrp and *fportp will be set to the remote address
+ * and port. These are not updated in the error case.
+ *
+ * If the operation fails because the connection already exists,
+ * *oinpp will be set to the PCB of that connection so that the
+ * caller can decide to override it. In all other cases, *oinpp
+ * is set to NULL.
+ */
+int
+in_pcbconnect_setup(struct inpcb *inp, struct sockaddr *nam,
+ in_addr_t *laddrp, u_short *lportp, in_addr_t *faddrp, u_short *fportp,
+ struct inpcb **oinpp, struct ucred *cred)
+{
+ struct sockaddr_in *sin = (struct sockaddr_in *)nam;
+ struct in_ifaddr *ia;
+ struct inpcb *oinp;
+ struct in_addr laddr, faddr;
+ u_short lport, fport;
+ int error;
+
+ /*
+ * Because a global state change doesn't actually occur here, a read
+ * lock is sufficient.
+ */
+ INP_INFO_LOCK_ASSERT(inp->inp_pcbinfo);
+ INP_LOCK_ASSERT(inp);
+
+ if (oinpp != NULL)
+ *oinpp = NULL;
+ if (nam->sa_len != sizeof (*sin))
+ return (EINVAL);
+ if (sin->sin_family != AF_INET)
+ return (EAFNOSUPPORT);
+ if (sin->sin_port == 0)
+ return (EADDRNOTAVAIL);
+ laddr.s_addr = *laddrp;
+ lport = *lportp;
+ faddr = sin->sin_addr;
+ fport = sin->sin_port;
+
+ if (!TAILQ_EMPTY(&V_in_ifaddrhead)) {
+ /*
+ * If the destination address is INADDR_ANY,
+ * use the primary local address.
+ * If the supplied address is INADDR_BROADCAST,
+ * and the primary interface supports broadcast,
+ * choose the broadcast address for that interface.
+ */
+ if (faddr.s_addr == INADDR_ANY) {
+ IN_IFADDR_RLOCK();
+ faddr =
+ IA_SIN(TAILQ_FIRST(&V_in_ifaddrhead))->sin_addr;
+ IN_IFADDR_RUNLOCK();
+ if (cred != NULL &&
+ (error = prison_get_ip4(cred, &faddr)) != 0)
+ return (error);
+ } else if (faddr.s_addr == (u_long)INADDR_BROADCAST) {
+ IN_IFADDR_RLOCK();
+ if (TAILQ_FIRST(&V_in_ifaddrhead)->ia_ifp->if_flags &
+ IFF_BROADCAST)
+ faddr = satosin(&TAILQ_FIRST(
+ &V_in_ifaddrhead)->ia_broadaddr)->sin_addr;
+ IN_IFADDR_RUNLOCK();
+ }
+ }
+ if (laddr.s_addr == INADDR_ANY) {
+ error = in_pcbladdr(inp, &faddr, &laddr, cred);
+ /*
+ * If the destination address is multicast and an outgoing
+ * interface has been set as a multicast option, prefer the
+ * address of that interface as our source address.
+ */
+ if (IN_MULTICAST(ntohl(faddr.s_addr)) &&
+ inp->inp_moptions != NULL) {
+ struct ip_moptions *imo;
+ struct ifnet *ifp;
+
+ imo = inp->inp_moptions;
+ if (imo->imo_multicast_ifp != NULL) {
+ ifp = imo->imo_multicast_ifp;
+ IN_IFADDR_RLOCK();
+ TAILQ_FOREACH(ia, &V_in_ifaddrhead, ia_link)
+ if (ia->ia_ifp == ifp)
+ break;
+ if (ia == NULL) {
+ IN_IFADDR_RUNLOCK();
+ error = EADDRNOTAVAIL;
+ } else {
+ laddr = ia->ia_addr.sin_addr;
+ IN_IFADDR_RUNLOCK();
+ error = 0;
+ }
+ }
+ }
+ if (error)
+ return (error);
+ }
+ oinp = in_pcblookup_hash(inp->inp_pcbinfo, faddr, fport, laddr, lport,
+ 0, NULL);
+ if (oinp != NULL) {
+ if (oinpp != NULL)
+ *oinpp = oinp;
+ return (EADDRINUSE);
+ }
+ if (lport == 0) {
+ error = in_pcbbind_setup(inp, NULL, &laddr.s_addr, &lport,
+ cred);
+ if (error)
+ return (error);
+ }
+ *laddrp = laddr.s_addr;
+ *lportp = lport;
+ *faddrp = faddr.s_addr;
+ *fportp = fport;
+ return (0);
+}
+
+void
+in_pcbdisconnect(struct inpcb *inp)
+{
+
+ INP_INFO_WLOCK_ASSERT(inp->inp_pcbinfo);
+ INP_WLOCK_ASSERT(inp);
+
+ inp->inp_faddr.s_addr = INADDR_ANY;
+ inp->inp_fport = 0;
+ in_pcbrehash(inp);
+}
+
+/*
+ * in_pcbdetach() is responsibe for disassociating a socket from an inpcb.
+ * For most protocols, this will be invoked immediately prior to calling
+ * in_pcbfree(). However, with TCP the inpcb may significantly outlive the
+ * socket, in which case in_pcbfree() is deferred.
+ */
+void
+in_pcbdetach(struct inpcb *inp)
+{
+
+ KASSERT(inp->inp_socket != NULL, ("%s: inp_socket == NULL", __func__));
+
+ inp->inp_socket->so_pcb = NULL;
+ inp->inp_socket = NULL;
+}
+
+/*
+ * in_pcbfree_internal() frees an inpcb that has been detached from its
+ * socket, and whose reference count has reached 0. It will also remove the
+ * inpcb from any global lists it might remain on.
+ */
+static void
+in_pcbfree_internal(struct inpcb *inp)
+{
+ struct inpcbinfo *ipi = inp->inp_pcbinfo;
+
+ KASSERT(inp->inp_socket == NULL, ("%s: inp_socket != NULL", __func__));
+ KASSERT(inp->inp_refcount == 0, ("%s: refcount !0", __func__));
+
+ INP_INFO_WLOCK_ASSERT(ipi);
+ INP_WLOCK_ASSERT(inp);
+
+#ifdef IPSEC
+ if (inp->inp_sp != NULL)
+ ipsec_delete_pcbpolicy(inp);
+#endif /* IPSEC */
+ inp->inp_gencnt = ++ipi->ipi_gencnt;
+ in_pcbremlists(inp);
+#ifdef INET6
+ if (inp->inp_vflag & INP_IPV6PROTO) {
+ ip6_freepcbopts(inp->in6p_outputopts);
+ if (inp->in6p_moptions != NULL)
+ ip6_freemoptions(inp->in6p_moptions);
+ }
+#endif
+ if (inp->inp_options)
+ (void)m_free(inp->inp_options);
+ if (inp->inp_moptions != NULL)
+ inp_freemoptions(inp->inp_moptions);
+ inp->inp_vflag = 0;
+ crfree(inp->inp_cred);
+
+#ifdef MAC
+ mac_inpcb_destroy(inp);
+#endif
+ INP_WUNLOCK(inp);
+ uma_zfree(ipi->ipi_zone, inp);
+}
+
+/*
+ * in_pcbref() bumps the reference count on an inpcb in order to maintain
+ * stability of an inpcb pointer despite the inpcb lock being released. This
+ * is used in TCP when the inpcbinfo lock needs to be acquired or upgraded,
+ * but where the inpcb lock is already held.
+ *
+ * While the inpcb will not be freed, releasing the inpcb lock means that the
+ * connection's state may change, so the caller should be careful to
+ * revalidate any cached state on reacquiring the lock. Drop the reference
+ * using in_pcbrele().
+ */
+void
+in_pcbref(struct inpcb *inp)
+{
+
+ INP_WLOCK_ASSERT(inp);
+
+ KASSERT(inp->inp_refcount > 0, ("%s: refcount 0", __func__));
+
+ inp->inp_refcount++;
+}
+
+/*
+ * Drop a refcount on an inpcb elevated using in_pcbref(); because a call to
+ * in_pcbfree() may have been made between in_pcbref() and in_pcbrele(), we
+ * return a flag indicating whether or not the inpcb remains valid. If it is
+ * valid, we return with the inpcb lock held.
+ */
+int
+in_pcbrele(struct inpcb *inp)
+{
+#ifdef INVARIANTS
+ struct inpcbinfo *ipi = inp->inp_pcbinfo;
+#endif
+
+ KASSERT(inp->inp_refcount > 0, ("%s: refcount 0", __func__));
+
+ INP_INFO_WLOCK_ASSERT(ipi);
+ INP_WLOCK_ASSERT(inp);
+
+ inp->inp_refcount--;
+ if (inp->inp_refcount > 0)
+ return (0);
+ in_pcbfree_internal(inp);
+ return (1);
+}
+
+/*
+ * Unconditionally schedule an inpcb to be freed by decrementing its
+ * reference count, which should occur only after the inpcb has been detached
+ * from its socket. If another thread holds a temporary reference (acquired
+ * using in_pcbref()) then the free is deferred until that reference is
+ * released using in_pcbrele(), but the inpcb is still unlocked.
+ */
+void
+in_pcbfree(struct inpcb *inp)
+{
+#ifdef INVARIANTS
+ struct inpcbinfo *ipi = inp->inp_pcbinfo;
+#endif
+
+ KASSERT(inp->inp_socket == NULL, ("%s: inp_socket != NULL",
+ __func__));
+
+ INP_INFO_WLOCK_ASSERT(ipi);
+ INP_WLOCK_ASSERT(inp);
+
+ if (!in_pcbrele(inp))
+ INP_WUNLOCK(inp);
+}
+
+/*
+ * in_pcbdrop() removes an inpcb from hashed lists, releasing its address and
+ * port reservation, and preventing it from being returned by inpcb lookups.
+ *
+ * It is used by TCP to mark an inpcb as unused and avoid future packet
+ * delivery or event notification when a socket remains open but TCP has
+ * closed. This might occur as a result of a shutdown()-initiated TCP close
+ * or a RST on the wire, and allows the port binding to be reused while still
+ * maintaining the invariant that so_pcb always points to a valid inpcb until
+ * in_pcbdetach().
+ *
+ * XXXRW: An inp_lport of 0 is used to indicate that the inpcb is not on hash
+ * lists, but can lead to confusing netstat output, as open sockets with
+ * closed TCP connections will no longer appear to have their bound port
+ * number. An explicit flag would be better, as it would allow us to leave
+ * the port number intact after the connection is dropped.
+ *
+ * XXXRW: Possibly in_pcbdrop() should also prevent future notifications by
+ * in_pcbnotifyall() and in_pcbpurgeif0()?
+ */
+void
+in_pcbdrop(struct inpcb *inp)
+{
+
+ INP_INFO_WLOCK_ASSERT(inp->inp_pcbinfo);
+ INP_WLOCK_ASSERT(inp);
+
+ inp->inp_flags |= INP_DROPPED;
+ if (inp->inp_flags & INP_INHASHLIST) {
+ struct inpcbport *phd = inp->inp_phd;
+
+ LIST_REMOVE(inp, inp_hash);
+ LIST_REMOVE(inp, inp_portlist);
+ if (LIST_FIRST(&phd->phd_pcblist) == NULL) {
+ LIST_REMOVE(phd, phd_hash);
+ free(phd, M_PCB);
+ }
+ inp->inp_flags &= ~INP_INHASHLIST;
+ }
+}
+
+/*
+ * Common routines to return the socket addresses associated with inpcbs.
+ */
+struct sockaddr *
+in_sockaddr(in_port_t port, struct in_addr *addr_p)
+{
+ struct sockaddr_in *sin;
+
+ sin = malloc(sizeof *sin, M_SONAME,
+ M_WAITOK | M_ZERO);
+ sin->sin_family = AF_INET;
+ sin->sin_len = sizeof(*sin);
+ sin->sin_addr = *addr_p;
+ sin->sin_port = port;
+
+ return (struct sockaddr *)sin;
+}
+
+int
+in_getsockaddr(struct socket *so, struct sockaddr **nam)
+{
+ struct inpcb *inp;
+ struct in_addr addr;
+ in_port_t port;
+
+ inp = sotoinpcb(so);
+ KASSERT(inp != NULL, ("in_getsockaddr: inp == NULL"));
+
+ INP_RLOCK(inp);
+ port = inp->inp_lport;
+ addr = inp->inp_laddr;
+ INP_RUNLOCK(inp);
+
+ *nam = in_sockaddr(port, &addr);
+ return 0;
+}
+
+int
+in_getpeeraddr(struct socket *so, struct sockaddr **nam)
+{
+ struct inpcb *inp;
+ struct in_addr addr;
+ in_port_t port;
+
+ inp = sotoinpcb(so);
+ KASSERT(inp != NULL, ("in_getpeeraddr: inp == NULL"));
+
+ INP_RLOCK(inp);
+ port = inp->inp_fport;
+ addr = inp->inp_faddr;
+ INP_RUNLOCK(inp);
+
+ *nam = in_sockaddr(port, &addr);
+ return 0;
+}
+
+void in_pcbnotifyall(struct inpcbinfo *pcbinfo, struct in_addr faddr, int errno,
+ struct inpcb *(*notify)(struct inpcb *, int))
+{
+ struct inpcb *inp, *inp_temp;
+
+ INP_INFO_WLOCK(pcbinfo);
+ LIST_FOREACH_SAFE(inp, pcbinfo->ipi_listhead, inp_list, inp_temp) {
+ INP_WLOCK(inp);
+#ifdef INET6
+ if ((inp->inp_vflag & INP_IPV4) == 0) {
+ INP_WUNLOCK(inp);
+ continue;
+ }
+#endif
+ if (inp->inp_faddr.s_addr != faddr.s_addr ||
+ inp->inp_socket == NULL) {
+ INP_WUNLOCK(inp);
+ continue;
+ }
+ if ((*notify)(inp, errno))
+ INP_WUNLOCK(inp);
+ }
+ INP_INFO_WUNLOCK(pcbinfo);
+}
+
+void
+in_pcbpurgeif0(struct inpcbinfo *pcbinfo, struct ifnet *ifp)
+{
+ struct inpcb *inp;
+ struct ip_moptions *imo;
+ int i, gap;
+
+ INP_INFO_RLOCK(pcbinfo);
+ LIST_FOREACH(inp, pcbinfo->ipi_listhead, inp_list) {
+ INP_WLOCK(inp);
+ imo = inp->inp_moptions;
+ if ((inp->inp_vflag & INP_IPV4) &&
+ imo != NULL) {
+ /*
+ * Unselect the outgoing interface if it is being
+ * detached.
+ */
+ if (imo->imo_multicast_ifp == ifp)
+ imo->imo_multicast_ifp = NULL;
+
+ /*
+ * Drop multicast group membership if we joined
+ * through the interface being detached.
+ */
+ for (i = 0, gap = 0; i < imo->imo_num_memberships;
+ i++) {
+ if (imo->imo_membership[i]->inm_ifp == ifp) {
+ in_delmulti(imo->imo_membership[i]);
+ gap++;
+ } else if (gap != 0)
+ imo->imo_membership[i - gap] =
+ imo->imo_membership[i];
+ }
+ imo->imo_num_memberships -= gap;
+ }
+ INP_WUNLOCK(inp);
+ }
+ INP_INFO_RUNLOCK(pcbinfo);
+}
+
+/*
+ * Lookup a PCB based on the local address and port.
+ */
+#define INP_LOOKUP_MAPPED_PCB_COST 3
+struct inpcb *
+in_pcblookup_local(struct inpcbinfo *pcbinfo, struct in_addr laddr,
+ u_short lport, int wild_okay, struct ucred *cred)
+{
+ struct inpcb *inp;
+#ifdef INET6
+ int matchwild = 3 + INP_LOOKUP_MAPPED_PCB_COST;
+#else
+ int matchwild = 3;
+#endif
+ int wildcard;
+
+ INP_INFO_LOCK_ASSERT(pcbinfo);
+
+ if (!wild_okay) {
+ struct inpcbhead *head;
+ /*
+ * Look for an unconnected (wildcard foreign addr) PCB that
+ * matches the local address and port we're looking for.
+ */
+ head = &pcbinfo->ipi_hashbase[INP_PCBHASH(INADDR_ANY, lport,
+ 0, pcbinfo->ipi_hashmask)];
+ LIST_FOREACH(inp, head, inp_hash) {
+#ifdef INET6
+ /* XXX inp locking */
+ if ((inp->inp_vflag & INP_IPV4) == 0)
+ continue;
+#endif
+ if (inp->inp_faddr.s_addr == INADDR_ANY &&
+ inp->inp_laddr.s_addr == laddr.s_addr &&
+ inp->inp_lport == lport) {
+ /*
+ * Found?
+ */
+ if (cred == NULL ||
+ prison_equal_ip4(cred->cr_prison,
+ inp->inp_cred->cr_prison))
+ return (inp);
+ }
+ }
+ /*
+ * Not found.
+ */
+ return (NULL);
+ } else {
+ struct inpcbporthead *porthash;
+ struct inpcbport *phd;
+ struct inpcb *match = NULL;
+ /*
+ * Best fit PCB lookup.
+ *
+ * First see if this local port is in use by looking on the
+ * port hash list.
+ */
+ porthash = &pcbinfo->ipi_porthashbase[INP_PCBPORTHASH(lport,
+ pcbinfo->ipi_porthashmask)];
+ LIST_FOREACH(phd, porthash, phd_hash) {
+ if (phd->phd_port == lport)
+ break;
+ }
+ if (phd != NULL) {
+ /*
+ * Port is in use by one or more PCBs. Look for best
+ * fit.
+ */
+ LIST_FOREACH(inp, &phd->phd_pcblist, inp_portlist) {
+ wildcard = 0;
+ if (cred != NULL &&
+ !prison_equal_ip4(inp->inp_cred->cr_prison,
+ cred->cr_prison))
+ continue;
+#ifdef INET6
+ /* XXX inp locking */
+ if ((inp->inp_vflag & INP_IPV4) == 0)
+ continue;
+ /*
+ * We never select the PCB that has
+ * INP_IPV6 flag and is bound to :: if
+ * we have another PCB which is bound
+ * to 0.0.0.0. If a PCB has the
+ * INP_IPV6 flag, then we set its cost
+ * higher than IPv4 only PCBs.
+ *
+ * Note that the case only happens
+ * when a socket is bound to ::, under
+ * the condition that the use of the
+ * mapped address is allowed.
+ */
+ if ((inp->inp_vflag & INP_IPV6) != 0)
+ wildcard += INP_LOOKUP_MAPPED_PCB_COST;
+#endif
+ if (inp->inp_faddr.s_addr != INADDR_ANY)
+ wildcard++;
+ if (inp->inp_laddr.s_addr != INADDR_ANY) {
+ if (laddr.s_addr == INADDR_ANY)
+ wildcard++;
+ else if (inp->inp_laddr.s_addr != laddr.s_addr)
+ continue;
+ } else {
+ if (laddr.s_addr != INADDR_ANY)
+ wildcard++;
+ }
+ if (wildcard < matchwild) {
+ match = inp;
+ matchwild = wildcard;
+ if (matchwild == 0)
+ break;
+ }
+ }
+ }
+ return (match);
+ }
+}
+#undef INP_LOOKUP_MAPPED_PCB_COST
+
+/*
+ * Lookup PCB in hash list.
+ */
+struct inpcb *
+in_pcblookup_hash(struct inpcbinfo *pcbinfo, struct in_addr faddr,
+ u_int fport_arg, struct in_addr laddr, u_int lport_arg, int wildcard,
+ struct ifnet *ifp)
+{
+ struct inpcbhead *head;
+ struct inpcb *inp, *tmpinp;
+ u_short fport = fport_arg, lport = lport_arg;
+
+ INP_INFO_LOCK_ASSERT(pcbinfo);
+
+ /*
+ * First look for an exact match.
+ */
+ tmpinp = NULL;
+ head = &pcbinfo->ipi_hashbase[INP_PCBHASH(faddr.s_addr, lport, fport,
+ pcbinfo->ipi_hashmask)];
+ LIST_FOREACH(inp, head, inp_hash) {
+#ifdef INET6
+ /* XXX inp locking */
+ if ((inp->inp_vflag & INP_IPV4) == 0)
+ continue;
+#endif
+ if (inp->inp_faddr.s_addr == faddr.s_addr &&
+ inp->inp_laddr.s_addr == laddr.s_addr &&
+ inp->inp_fport == fport &&
+ inp->inp_lport == lport) {
+ /*
+ * XXX We should be able to directly return
+ * the inp here, without any checks.
+ * Well unless both bound with SO_REUSEPORT?
+ */
+ if (prison_flag(inp->inp_cred, PR_IP4))
+ return (inp);
+ if (tmpinp == NULL)
+ tmpinp = inp;
+ }
+ }
+ if (tmpinp != NULL)
+ return (tmpinp);
+
+ /*
+ * Then look for a wildcard match, if requested.
+ */
+ if (wildcard == INPLOOKUP_WILDCARD) {
+ struct inpcb *local_wild = NULL, *local_exact = NULL;
+#ifdef INET6
+ struct inpcb *local_wild_mapped = NULL;
+#endif
+ struct inpcb *jail_wild = NULL;
+ int injail;
+
+ /*
+ * Order of socket selection - we always prefer jails.
+ * 1. jailed, non-wild.
+ * 2. jailed, wild.
+ * 3. non-jailed, non-wild.
+ * 4. non-jailed, wild.
+ */
+
+ head = &pcbinfo->ipi_hashbase[INP_PCBHASH(INADDR_ANY, lport,
+ 0, pcbinfo->ipi_hashmask)];
+ LIST_FOREACH(inp, head, inp_hash) {
+#ifdef INET6
+ /* XXX inp locking */
+ if ((inp->inp_vflag & INP_IPV4) == 0)
+ continue;
+#endif
+ if (inp->inp_faddr.s_addr != INADDR_ANY ||
+ inp->inp_lport != lport)
+ continue;
+
+ /* XXX inp locking */
+ if (ifp && ifp->if_type == IFT_FAITH &&
+ (inp->inp_flags & INP_FAITH) == 0)
+ continue;
+
+ injail = prison_flag(inp->inp_cred, PR_IP4);
+ if (injail) {
+ if (prison_check_ip4(inp->inp_cred,
+ &laddr) != 0)
+ continue;
+ } else {
+ if (local_exact != NULL)
+ continue;
+ }
+
+ if (inp->inp_laddr.s_addr == laddr.s_addr) {
+ if (injail)
+ return (inp);
+ else
+ local_exact = inp;
+ } else if (inp->inp_laddr.s_addr == INADDR_ANY) {
+#ifdef INET6
+ /* XXX inp locking, NULL check */
+ if (inp->inp_vflag & INP_IPV6PROTO)
+ local_wild_mapped = inp;
+ else
+#endif /* INET6 */
+ if (injail)
+ jail_wild = inp;
+ else
+ local_wild = inp;
+ }
+ } /* LIST_FOREACH */
+ if (jail_wild != NULL)
+ return (jail_wild);
+ if (local_exact != NULL)
+ return (local_exact);
+ if (local_wild != NULL)
+ return (local_wild);
+#ifdef INET6
+ if (local_wild_mapped != NULL)
+ return (local_wild_mapped);
+#endif /* defined(INET6) */
+ } /* if (wildcard == INPLOOKUP_WILDCARD) */
+
+ return (NULL);
+}
+
+/*
+ * Insert PCB onto various hash lists.
+ */
+int
+in_pcbinshash(struct inpcb *inp)
+{
+ struct inpcbhead *pcbhash;
+ struct inpcbporthead *pcbporthash;
+ struct inpcbinfo *pcbinfo = inp->inp_pcbinfo;
+ struct inpcbport *phd;
+ u_int32_t hashkey_faddr;
+
+ INP_INFO_WLOCK_ASSERT(pcbinfo);
+ INP_WLOCK_ASSERT(inp);
+ KASSERT((inp->inp_flags & INP_INHASHLIST) == 0,
+ ("in_pcbinshash: INP_INHASHLIST"));
+
+#ifdef INET6
+ if (inp->inp_vflag & INP_IPV6)
+ hashkey_faddr = inp->in6p_faddr.s6_addr32[3] /* XXX */;
+ else
+#endif /* INET6 */
+ hashkey_faddr = inp->inp_faddr.s_addr;
+
+ pcbhash = &pcbinfo->ipi_hashbase[INP_PCBHASH(hashkey_faddr,
+ inp->inp_lport, inp->inp_fport, pcbinfo->ipi_hashmask)];
+
+ pcbporthash = &pcbinfo->ipi_porthashbase[
+ INP_PCBPORTHASH(inp->inp_lport, pcbinfo->ipi_porthashmask)];
+
+ /*
+ * Go through port list and look for a head for this lport.
+ */
+ LIST_FOREACH(phd, pcbporthash, phd_hash) {
+ if (phd->phd_port == inp->inp_lport)
+ break;
+ }
+ /*
+ * If none exists, malloc one and tack it on.
+ */
+ if (phd == NULL) {
+ phd = malloc(sizeof(struct inpcbport), M_PCB, M_NOWAIT);
+ if (phd == NULL) {
+ return (ENOBUFS); /* XXX */
+ }
+ phd->phd_port = inp->inp_lport;
+ LIST_INIT(&phd->phd_pcblist);
+ LIST_INSERT_HEAD(pcbporthash, phd, phd_hash);
+ }
+ inp->inp_phd = phd;
+ LIST_INSERT_HEAD(&phd->phd_pcblist, inp, inp_portlist);
+ LIST_INSERT_HEAD(pcbhash, inp, inp_hash);
+ inp->inp_flags |= INP_INHASHLIST;
+ return (0);
+}
+
+/*
+ * Move PCB to the proper hash bucket when { faddr, fport } have been
+ * changed. NOTE: This does not handle the case of the lport changing (the
+ * hashed port list would have to be updated as well), so the lport must
+ * not change after in_pcbinshash() has been called.
+ */
+void
+in_pcbrehash(struct inpcb *inp)
+{
+ struct inpcbinfo *pcbinfo = inp->inp_pcbinfo;
+ struct inpcbhead *head;
+ u_int32_t hashkey_faddr;
+
+ INP_INFO_WLOCK_ASSERT(pcbinfo);
+ INP_WLOCK_ASSERT(inp);
+ KASSERT(inp->inp_flags & INP_INHASHLIST,
+ ("in_pcbrehash: !INP_INHASHLIST"));
+
+#ifdef INET6
+ if (inp->inp_vflag & INP_IPV6)
+ hashkey_faddr = inp->in6p_faddr.s6_addr32[3] /* XXX */;
+ else
+#endif /* INET6 */
+ hashkey_faddr = inp->inp_faddr.s_addr;
+
+ head = &pcbinfo->ipi_hashbase[INP_PCBHASH(hashkey_faddr,
+ inp->inp_lport, inp->inp_fport, pcbinfo->ipi_hashmask)];
+
+ LIST_REMOVE(inp, inp_hash);
+ LIST_INSERT_HEAD(head, inp, inp_hash);
+}
+
+/*
+ * Remove PCB from various lists.
+ */
+static void
+in_pcbremlists(struct inpcb *inp)
+{
+ struct inpcbinfo *pcbinfo = inp->inp_pcbinfo;
+
+ INP_INFO_WLOCK_ASSERT(pcbinfo);
+ INP_WLOCK_ASSERT(inp);
+
+ inp->inp_gencnt = ++pcbinfo->ipi_gencnt;
+ if (inp->inp_flags & INP_INHASHLIST) {
+ struct inpcbport *phd = inp->inp_phd;
+
+ LIST_REMOVE(inp, inp_hash);
+ LIST_REMOVE(inp, inp_portlist);
+ if (LIST_FIRST(&phd->phd_pcblist) == NULL) {
+ LIST_REMOVE(phd, phd_hash);
+ free(phd, M_PCB);
+ }
+ inp->inp_flags &= ~INP_INHASHLIST;
+ }
+ LIST_REMOVE(inp, inp_list);
+ pcbinfo->ipi_count--;
+}
+
+/*
+ * A set label operation has occurred at the socket layer, propagate the
+ * label change into the in_pcb for the socket.
+ */
+void
+in_pcbsosetlabel(struct socket *so)
+{
+#ifdef MAC
+ struct inpcb *inp;
+
+ inp = sotoinpcb(so);
+ KASSERT(inp != NULL, ("in_pcbsosetlabel: so->so_pcb == NULL"));
+
+ INP_WLOCK(inp);
+ SOCK_LOCK(so);
+ mac_inpcb_sosetlabel(so, inp);
+ SOCK_UNLOCK(so);
+ INP_WUNLOCK(inp);
+#endif
+}
+
+/*
+ * ipport_tick runs once per second, determining if random port allocation
+ * should be continued. If more than ipport_randomcps ports have been
+ * allocated in the last second, then we return to sequential port
+ * allocation. We return to random allocation only once we drop below
+ * ipport_randomcps for at least ipport_randomtime seconds.
+ */
+void
+ipport_tick(void *xtp)
+{
+ VNET_ITERATOR_DECL(vnet_iter);
+
+ VNET_LIST_RLOCK_NOSLEEP();
+ VNET_FOREACH(vnet_iter) {
+ CURVNET_SET(vnet_iter); /* XXX appease INVARIANTS here */
+ if (V_ipport_tcpallocs <=
+ V_ipport_tcplastcount + V_ipport_randomcps) {
+ if (V_ipport_stoprandom > 0)
+ V_ipport_stoprandom--;
+ } else
+ V_ipport_stoprandom = V_ipport_randomtime;
+ V_ipport_tcplastcount = V_ipport_tcpallocs;
+ CURVNET_RESTORE();
+ }
+ VNET_LIST_RUNLOCK_NOSLEEP();
+ callout_reset(&ipport_tick_callout, hz, ipport_tick, NULL);
+}
+
+void
+inp_wlock(struct inpcb *inp)
+{
+
+ INP_WLOCK(inp);
+}
+
+void
+inp_wunlock(struct inpcb *inp)
+{
+
+ INP_WUNLOCK(inp);
+}
+
+void
+inp_rlock(struct inpcb *inp)
+{
+
+ INP_RLOCK(inp);
+}
+
+void
+inp_runlock(struct inpcb *inp)
+{
+
+ INP_RUNLOCK(inp);
+}
+
+#ifdef INVARIANTS
+void
+inp_lock_assert(struct inpcb *inp)
+{
+
+ INP_WLOCK_ASSERT(inp);
+}
+
+void
+inp_unlock_assert(struct inpcb *inp)
+{
+
+ INP_UNLOCK_ASSERT(inp);
+}
+#endif
+
+void
+inp_apply_all(void (*func)(struct inpcb *, void *), void *arg)
+{
+ struct inpcb *inp;
+
+ INP_INFO_RLOCK(&V_tcbinfo);
+ LIST_FOREACH(inp, V_tcbinfo.ipi_listhead, inp_list) {
+ INP_WLOCK(inp);
+ func(inp, arg);
+ INP_WUNLOCK(inp);
+ }
+ INP_INFO_RUNLOCK(&V_tcbinfo);
+}
+
+struct socket *
+inp_inpcbtosocket(struct inpcb *inp)
+{
+
+ INP_WLOCK_ASSERT(inp);
+ return (inp->inp_socket);
+}
+
+struct tcpcb *
+inp_inpcbtotcpcb(struct inpcb *inp)
+{
+
+ INP_WLOCK_ASSERT(inp);
+ return ((struct tcpcb *)inp->inp_ppcb);
+}
+
+int
+inp_ip_tos_get(const struct inpcb *inp)
+{
+
+ return (inp->inp_ip_tos);
+}
+
+void
+inp_ip_tos_set(struct inpcb *inp, int val)
+{
+
+ inp->inp_ip_tos = val;
+}
+
+void
+inp_4tuple_get(struct inpcb *inp, uint32_t *laddr, uint16_t *lp,
+ uint32_t *faddr, uint16_t *fp)
+{
+
+ INP_LOCK_ASSERT(inp);
+ *laddr = inp->inp_laddr.s_addr;
+ *faddr = inp->inp_faddr.s_addr;
+ *lp = inp->inp_lport;
+ *fp = inp->inp_fport;
+}
+
+struct inpcb *
+so_sotoinpcb(struct socket *so)
+{
+
+ return (sotoinpcb(so));
+}
+
+struct tcpcb *
+so_sototcpcb(struct socket *so)
+{
+
+ return (sototcpcb(so));
+}
+
+#ifdef DDB
+static void
+db_print_indent(int indent)
+{
+ int i;
+
+ for (i = 0; i < indent; i++)
+ db_printf(" ");
+}
+
+static void
+db_print_inconninfo(struct in_conninfo *inc, const char *name, int indent)
+{
+ char faddr_str[48], laddr_str[48];
+
+ db_print_indent(indent);
+ db_printf("%s at %p\n", name, inc);
+
+ indent += 2;
+
+#ifdef INET6
+ if (inc->inc_flags & INC_ISIPV6) {
+ /* IPv6. */
+ ip6_sprintf(laddr_str, &inc->inc6_laddr);
+ ip6_sprintf(faddr_str, &inc->inc6_faddr);
+ } else {
+#endif
+ /* IPv4. */
+ inet_ntoa_r(inc->inc_laddr, laddr_str);
+ inet_ntoa_r(inc->inc_faddr, faddr_str);
+#ifdef INET6
+ }
+#endif
+ db_print_indent(indent);
+ db_printf("inc_laddr %s inc_lport %u\n", laddr_str,
+ ntohs(inc->inc_lport));
+ db_print_indent(indent);
+ db_printf("inc_faddr %s inc_fport %u\n", faddr_str,
+ ntohs(inc->inc_fport));
+}
+
+static void
+db_print_inpflags(int inp_flags)
+{
+ int comma;
+
+ comma = 0;
+ if (inp_flags & INP_RECVOPTS) {
+ db_printf("%sINP_RECVOPTS", comma ? ", " : "");
+ comma = 1;
+ }
+ if (inp_flags & INP_RECVRETOPTS) {
+ db_printf("%sINP_RECVRETOPTS", comma ? ", " : "");
+ comma = 1;
+ }
+ if (inp_flags & INP_RECVDSTADDR) {
+ db_printf("%sINP_RECVDSTADDR", comma ? ", " : "");
+ comma = 1;
+ }
+ if (inp_flags & INP_HDRINCL) {
+ db_printf("%sINP_HDRINCL", comma ? ", " : "");
+ comma = 1;
+ }
+ if (inp_flags & INP_HIGHPORT) {
+ db_printf("%sINP_HIGHPORT", comma ? ", " : "");
+ comma = 1;
+ }
+ if (inp_flags & INP_LOWPORT) {
+ db_printf("%sINP_LOWPORT", comma ? ", " : "");
+ comma = 1;
+ }
+ if (inp_flags & INP_ANONPORT) {
+ db_printf("%sINP_ANONPORT", comma ? ", " : "");
+ comma = 1;
+ }
+ if (inp_flags & INP_RECVIF) {
+ db_printf("%sINP_RECVIF", comma ? ", " : "");
+ comma = 1;
+ }
+ if (inp_flags & INP_MTUDISC) {
+ db_printf("%sINP_MTUDISC", comma ? ", " : "");
+ comma = 1;
+ }
+ if (inp_flags & INP_FAITH) {
+ db_printf("%sINP_FAITH", comma ? ", " : "");
+ comma = 1;
+ }
+ if (inp_flags & INP_RECVTTL) {
+ db_printf("%sINP_RECVTTL", comma ? ", " : "");
+ comma = 1;
+ }
+ if (inp_flags & INP_DONTFRAG) {
+ db_printf("%sINP_DONTFRAG", comma ? ", " : "");
+ comma = 1;
+ }
+ if (inp_flags & IN6P_IPV6_V6ONLY) {
+ db_printf("%sIN6P_IPV6_V6ONLY", comma ? ", " : "");
+ comma = 1;
+ }
+ if (inp_flags & IN6P_PKTINFO) {
+ db_printf("%sIN6P_PKTINFO", comma ? ", " : "");
+ comma = 1;
+ }
+ if (inp_flags & IN6P_HOPLIMIT) {
+ db_printf("%sIN6P_HOPLIMIT", comma ? ", " : "");
+ comma = 1;
+ }
+ if (inp_flags & IN6P_HOPOPTS) {
+ db_printf("%sIN6P_HOPOPTS", comma ? ", " : "");
+ comma = 1;
+ }
+ if (inp_flags & IN6P_DSTOPTS) {
+ db_printf("%sIN6P_DSTOPTS", comma ? ", " : "");
+ comma = 1;
+ }
+ if (inp_flags & IN6P_RTHDR) {
+ db_printf("%sIN6P_RTHDR", comma ? ", " : "");
+ comma = 1;
+ }
+ if (inp_flags & IN6P_RTHDRDSTOPTS) {
+ db_printf("%sIN6P_RTHDRDSTOPTS", comma ? ", " : "");
+ comma = 1;
+ }
+ if (inp_flags & IN6P_TCLASS) {
+ db_printf("%sIN6P_TCLASS", comma ? ", " : "");
+ comma = 1;
+ }
+ if (inp_flags & IN6P_AUTOFLOWLABEL) {
+ db_printf("%sIN6P_AUTOFLOWLABEL", comma ? ", " : "");
+ comma = 1;
+ }
+ if (inp_flags & INP_TIMEWAIT) {
+ db_printf("%sINP_TIMEWAIT", comma ? ", " : "");
+ comma = 1;
+ }
+ if (inp_flags & INP_ONESBCAST) {
+ db_printf("%sINP_ONESBCAST", comma ? ", " : "");
+ comma = 1;
+ }
+ if (inp_flags & INP_DROPPED) {
+ db_printf("%sINP_DROPPED", comma ? ", " : "");
+ comma = 1;
+ }
+ if (inp_flags & INP_SOCKREF) {
+ db_printf("%sINP_SOCKREF", comma ? ", " : "");
+ comma = 1;
+ }
+ if (inp_flags & IN6P_RFC2292) {
+ db_printf("%sIN6P_RFC2292", comma ? ", " : "");
+ comma = 1;
+ }
+ if (inp_flags & IN6P_MTU) {
+ db_printf("IN6P_MTU%s", comma ? ", " : "");
+ comma = 1;
+ }
+}
+
+static void
+db_print_inpvflag(u_char inp_vflag)
+{
+ int comma;
+
+ comma = 0;
+ if (inp_vflag & INP_IPV4) {
+ db_printf("%sINP_IPV4", comma ? ", " : "");
+ comma = 1;
+ }
+ if (inp_vflag & INP_IPV6) {
+ db_printf("%sINP_IPV6", comma ? ", " : "");
+ comma = 1;
+ }
+ if (inp_vflag & INP_IPV6PROTO) {
+ db_printf("%sINP_IPV6PROTO", comma ? ", " : "");
+ comma = 1;
+ }
+}
+
+static void
+db_print_inpcb(struct inpcb *inp, const char *name, int indent)
+{
+
+ db_print_indent(indent);
+ db_printf("%s at %p\n", name, inp);
+
+ indent += 2;
+
+ db_print_indent(indent);
+ db_printf("inp_flow: 0x%x\n", inp->inp_flow);
+
+ db_print_inconninfo(&inp->inp_inc, "inp_conninfo", indent);
+
+ db_print_indent(indent);
+ db_printf("inp_ppcb: %p inp_pcbinfo: %p inp_socket: %p\n",
+ inp->inp_ppcb, inp->inp_pcbinfo, inp->inp_socket);
+
+ db_print_indent(indent);
+ db_printf("inp_label: %p inp_flags: 0x%x (",
+ inp->inp_label, inp->inp_flags);
+ db_print_inpflags(inp->inp_flags);
+ db_printf(")\n");
+
+ db_print_indent(indent);
+ db_printf("inp_sp: %p inp_vflag: 0x%x (", inp->inp_sp,
+ inp->inp_vflag);
+ db_print_inpvflag(inp->inp_vflag);
+ db_printf(")\n");
+
+ db_print_indent(indent);
+ db_printf("inp_ip_ttl: %d inp_ip_p: %d inp_ip_minttl: %d\n",
+ inp->inp_ip_ttl, inp->inp_ip_p, inp->inp_ip_minttl);
+
+ db_print_indent(indent);
+#ifdef INET6
+ if (inp->inp_vflag & INP_IPV6) {
+ db_printf("in6p_options: %p in6p_outputopts: %p "
+ "in6p_moptions: %p\n", inp->in6p_options,
+ inp->in6p_outputopts, inp->in6p_moptions);
+ db_printf("in6p_icmp6filt: %p in6p_cksum %d "
+ "in6p_hops %u\n", inp->in6p_icmp6filt, inp->in6p_cksum,
+ inp->in6p_hops);
+ } else
+#endif
+ {
+ db_printf("inp_ip_tos: %d inp_ip_options: %p "
+ "inp_ip_moptions: %p\n", inp->inp_ip_tos,
+ inp->inp_options, inp->inp_moptions);
+ }
+
+ db_print_indent(indent);
+ db_printf("inp_phd: %p inp_gencnt: %ju\n", inp->inp_phd,
+ (uintmax_t)inp->inp_gencnt);
+}
+
+DB_SHOW_COMMAND(inpcb, db_show_inpcb)
+{
+ struct inpcb *inp;
+
+ if (!have_addr) {
+ db_printf("usage: show inpcb <addr>\n");
+ return;
+ }
+ inp = (struct inpcb *)addr;
+
+ db_print_inpcb(inp, "inpcb", 0);
+}
+#endif
diff --git a/rtems/freebsd/netinet/in_pcb.h b/rtems/freebsd/netinet/in_pcb.h
new file mode 100644
index 00000000..516edd50
--- /dev/null
+++ b/rtems/freebsd/netinet/in_pcb.h
@@ -0,0 +1,525 @@
+/*-
+ * Copyright (c) 1982, 1986, 1990, 1993
+ * The Regents of the University of California.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * @(#)in_pcb.h 8.1 (Berkeley) 6/10/93
+ * $FreeBSD$
+ */
+
+#ifndef _NETINET_IN_PCB_HH_
+#define _NETINET_IN_PCB_HH_
+
+#include <rtems/freebsd/sys/queue.h>
+#include <rtems/freebsd/sys/_lock.h>
+#include <rtems/freebsd/sys/_mutex.h>
+#include <rtems/freebsd/sys/_rwlock.h>
+
+#ifdef _KERNEL
+#include <rtems/freebsd/sys/rwlock.h>
+#include <rtems/freebsd/net/vnet.h>
+#endif
+
+#define in6pcb inpcb /* for KAME src sync over BSD*'s */
+#define in6p_sp inp_sp /* for KAME src sync over BSD*'s */
+struct inpcbpolicy;
+
+/*
+ * struct inpcb is the common protocol control block structure used in most
+ * IP transport protocols.
+ *
+ * Pointers to local and foreign host table entries, local and foreign socket
+ * numbers, and pointers up (to a socket structure) and down (to a
+ * protocol-specific control block) are stored here.
+ */
+LIST_HEAD(inpcbhead, inpcb);
+LIST_HEAD(inpcbporthead, inpcbport);
+typedef u_quad_t inp_gen_t;
+
+/*
+ * PCB with AF_INET6 null bind'ed laddr can receive AF_INET input packet.
+ * So, AF_INET6 null laddr is also used as AF_INET null laddr, by utilizing
+ * the following structure.
+ */
+struct in_addr_4in6 {
+ u_int32_t ia46_pad32[3];
+ struct in_addr ia46_addr4;
+};
+
+/*
+ * NOTE: ipv6 addrs should be 64-bit aligned, per RFC 2553. in_conninfo has
+ * some extra padding to accomplish this.
+ */
+struct in_endpoints {
+ u_int16_t ie_fport; /* foreign port */
+ u_int16_t ie_lport; /* local port */
+ /* protocol dependent part, local and foreign addr */
+ union {
+ /* foreign host table entry */
+ struct in_addr_4in6 ie46_foreign;
+ struct in6_addr ie6_foreign;
+ } ie_dependfaddr;
+ union {
+ /* local host table entry */
+ struct in_addr_4in6 ie46_local;
+ struct in6_addr ie6_local;
+ } ie_dependladdr;
+};
+#define ie_faddr ie_dependfaddr.ie46_foreign.ia46_addr4
+#define ie_laddr ie_dependladdr.ie46_local.ia46_addr4
+#define ie6_faddr ie_dependfaddr.ie6_foreign
+#define ie6_laddr ie_dependladdr.ie6_local
+
+/*
+ * XXX The defines for inc_* are hacks and should be changed to direct
+ * references.
+ */
+struct in_conninfo {
+ u_int8_t inc_flags;
+ u_int8_t inc_len;
+ u_int16_t inc_fibnum; /* XXX was pad, 16 bits is plenty */
+ /* protocol dependent part */
+ struct in_endpoints inc_ie;
+};
+
+/*
+ * Flags for inc_flags.
+ */
+#define INC_ISIPV6 0x01
+
+#define inc_isipv6 inc_flags /* temp compatability */
+#define inc_fport inc_ie.ie_fport
+#define inc_lport inc_ie.ie_lport
+#define inc_faddr inc_ie.ie_faddr
+#define inc_laddr inc_ie.ie_laddr
+#define inc6_faddr inc_ie.ie6_faddr
+#define inc6_laddr inc_ie.ie6_laddr
+
+struct icmp6_filter;
+
+/*-
+ * struct inpcb captures the network layer state for TCP, UDP, and raw IPv4
+ * and IPv6 sockets. In the case of TCP, further per-connection state is
+ * hung off of inp_ppcb most of the time. Almost all fields of struct inpcb
+ * are static after creation or protected by a per-inpcb rwlock, inp_lock. A
+ * few fields also require the global pcbinfo lock for the inpcb to be held,
+ * when modified, such as the global connection lists and hashes, as well as
+ * binding information (which affects which hash a connection is on). This
+ * model means that connections can be looked up without holding the
+ * per-connection lock, which is important for performance when attempting to
+ * find the connection for a packet given its IP and port tuple. Writing to
+ * these fields that write locks be held on both the inpcb and global locks.
+ *
+ * Key:
+ * (c) - Constant after initialization
+ * (i) - Protected by the inpcb lock
+ * (p) - Protected by the pcbinfo lock for the inpcb
+ * (s) - Protected by another subsystem's locks
+ * (x) - Undefined locking
+ *
+ * A few other notes:
+ *
+ * When a read lock is held, stability of the field is guaranteed; to write
+ * to a field, a write lock must generally be held.
+ *
+ * netinet/netinet6-layer code should not assume that the inp_socket pointer
+ * is safe to dereference without inp_lock being held, even for protocols
+ * other than TCP (where the inpcb persists during TIMEWAIT even after the
+ * socket has been freed), or there may be close(2)-related races.
+ *
+ * The inp_vflag field is overloaded, and would otherwise ideally be (c).
+ */
+struct inpcb {
+ LIST_ENTRY(inpcb) inp_hash; /* (i/p) hash list */
+ LIST_ENTRY(inpcb) inp_list; /* (i/p) list for all PCBs for proto */
+ void *inp_ppcb; /* (i) pointer to per-protocol pcb */
+ struct inpcbinfo *inp_pcbinfo; /* (c) PCB list info */
+ struct socket *inp_socket; /* (i) back pointer to socket */
+ struct ucred *inp_cred; /* (c) cache of socket cred */
+ u_int32_t inp_flow; /* (i) IPv6 flow information */
+ int inp_flags; /* (i) generic IP/datagram flags */
+ int inp_flags2; /* (i) generic IP/datagram flags #2*/
+ u_char inp_vflag; /* (i) IP version flag (v4/v6) */
+ u_char inp_ip_ttl; /* (i) time to live proto */
+ u_char inp_ip_p; /* (c) protocol proto */
+ u_char inp_ip_minttl; /* (i) minimum TTL or drop */
+ uint32_t inp_flowid; /* (x) flow id / queue id */
+ u_int inp_refcount; /* (i) refcount */
+ void *inp_pspare[4]; /* (x) rtentry / general use */
+ u_int inp_ispare[4]; /* general use */
+
+ /* Local and foreign ports, local and foreign addr. */
+ struct in_conninfo inp_inc; /* (i/p) list for PCB's local port */
+
+ /* MAC and IPSEC policy information. */
+ struct label *inp_label; /* (i) MAC label */
+ struct inpcbpolicy *inp_sp; /* (s) for IPSEC */
+
+ /* Protocol-dependent part; options. */
+ struct {
+ u_char inp4_ip_tos; /* (i) type of service proto */
+ struct mbuf *inp4_options; /* (i) IP options */
+ struct ip_moptions *inp4_moptions; /* (i) IP mcast options */
+ } inp_depend4;
+ struct {
+ /* (i) IP options */
+ struct mbuf *inp6_options;
+ /* (i) IP6 options for outgoing packets */
+ struct ip6_pktopts *inp6_outputopts;
+ /* (i) IP multicast options */
+ struct ip6_moptions *inp6_moptions;
+ /* (i) ICMPv6 code type filter */
+ struct icmp6_filter *inp6_icmp6filt;
+ /* (i) IPV6_CHECKSUM setsockopt */
+ int inp6_cksum;
+ short inp6_hops;
+ } inp_depend6;
+ LIST_ENTRY(inpcb) inp_portlist; /* (i/p) */
+ struct inpcbport *inp_phd; /* (i/p) head of this list */
+#define inp_zero_size offsetof(struct inpcb, inp_gencnt)
+ inp_gen_t inp_gencnt; /* (c) generation count */
+ struct llentry *inp_lle; /* cached L2 information */
+ struct rtentry *inp_rt; /* cached L3 information */
+ struct rwlock inp_lock;
+};
+#define inp_fport inp_inc.inc_fport
+#define inp_lport inp_inc.inc_lport
+#define inp_faddr inp_inc.inc_faddr
+#define inp_laddr inp_inc.inc_laddr
+#define inp_ip_tos inp_depend4.inp4_ip_tos
+#define inp_options inp_depend4.inp4_options
+#define inp_moptions inp_depend4.inp4_moptions
+
+#define in6p_faddr inp_inc.inc6_faddr
+#define in6p_laddr inp_inc.inc6_laddr
+#define in6p_hops inp_depend6.inp6_hops /* default hop limit */
+#define in6p_flowinfo inp_flow
+#define in6p_options inp_depend6.inp6_options
+#define in6p_outputopts inp_depend6.inp6_outputopts
+#define in6p_moptions inp_depend6.inp6_moptions
+#define in6p_icmp6filt inp_depend6.inp6_icmp6filt
+#define in6p_cksum inp_depend6.inp6_cksum
+
+#define inp_vnet inp_pcbinfo->ipi_vnet
+
+/*
+ * The range of the generation count, as used in this implementation, is 9e19.
+ * We would have to create 300 billion connections per second for this number
+ * to roll over in a year. This seems sufficiently unlikely that we simply
+ * don't concern ourselves with that possibility.
+ */
+
+/*
+ * Interface exported to userland by various protocols which use inpcbs. Hack
+ * alert -- only define if struct xsocket is in scope.
+ */
+#ifdef _SYS_SOCKETVAR_HH_
+struct xinpcb {
+ size_t xi_len; /* length of this structure */
+ struct inpcb xi_inp;
+ struct xsocket xi_socket;
+ u_quad_t xi_alignment_hack;
+};
+
+struct xinpgen {
+ size_t xig_len; /* length of this structure */
+ u_int xig_count; /* number of PCBs at this time */
+ inp_gen_t xig_gen; /* generation count at this time */
+ so_gen_t xig_sogen; /* socket generation count at this time */
+};
+#endif /* _SYS_SOCKETVAR_HH_ */
+
+struct inpcbport {
+ LIST_ENTRY(inpcbport) phd_hash;
+ struct inpcbhead phd_pcblist;
+ u_short phd_port;
+};
+
+/*
+ * Global data structure for each high-level protocol (UDP, TCP, ...) in both
+ * IPv4 and IPv6. Holds inpcb lists and information for managing them.
+ */
+struct inpcbinfo {
+ /*
+ * Global list of inpcbs on the protocol.
+ */
+ struct inpcbhead *ipi_listhead;
+ u_int ipi_count;
+
+ /*
+ * Global hash of inpcbs, hashed by local and foreign addresses and
+ * port numbers.
+ */
+ struct inpcbhead *ipi_hashbase;
+ u_long ipi_hashmask;
+
+ /*
+ * Global hash of inpcbs, hashed by only local port number.
+ */
+ struct inpcbporthead *ipi_porthashbase;
+ u_long ipi_porthashmask;
+
+ /*
+ * Fields associated with port lookup and allocation.
+ */
+ u_short ipi_lastport;
+ u_short ipi_lastlow;
+ u_short ipi_lasthi;
+
+ /*
+ * UMA zone from which inpcbs are allocated for this protocol.
+ */
+ struct uma_zone *ipi_zone;
+
+ /*
+ * Generation count--incremented each time a connection is allocated
+ * or freed.
+ */
+ u_quad_t ipi_gencnt;
+ struct rwlock ipi_lock;
+
+ /*
+ * Pointer to network stack instance
+ */
+ struct vnet *ipi_vnet;
+
+ /*
+ * general use 2
+ */
+ void *ipi_pspare[2];
+};
+
+#define INP_LOCK_INIT(inp, d, t) \
+ rw_init_flags(&(inp)->inp_lock, (t), RW_RECURSE | RW_DUPOK)
+#define INP_LOCK_DESTROY(inp) rw_destroy(&(inp)->inp_lock)
+#define INP_RLOCK(inp) rw_rlock(&(inp)->inp_lock)
+#define INP_WLOCK(inp) rw_wlock(&(inp)->inp_lock)
+#define INP_TRY_RLOCK(inp) rw_try_rlock(&(inp)->inp_lock)
+#define INP_TRY_WLOCK(inp) rw_try_wlock(&(inp)->inp_lock)
+#define INP_RUNLOCK(inp) rw_runlock(&(inp)->inp_lock)
+#define INP_WUNLOCK(inp) rw_wunlock(&(inp)->inp_lock)
+#define INP_TRY_UPGRADE(inp) rw_try_upgrade(&(inp)->inp_lock)
+#define INP_DOWNGRADE(inp) rw_downgrade(&(inp)->inp_lock)
+#define INP_WLOCKED(inp) rw_wowned(&(inp)->inp_lock)
+#define INP_LOCK_ASSERT(inp) rw_assert(&(inp)->inp_lock, RA_LOCKED)
+#define INP_RLOCK_ASSERT(inp) rw_assert(&(inp)->inp_lock, RA_RLOCKED)
+#define INP_WLOCK_ASSERT(inp) rw_assert(&(inp)->inp_lock, RA_WLOCKED)
+#define INP_UNLOCK_ASSERT(inp) rw_assert(&(inp)->inp_lock, RA_UNLOCKED)
+
+#ifdef _KERNEL
+/*
+ * These locking functions are for inpcb consumers outside of sys/netinet,
+ * more specifically, they were added for the benefit of TOE drivers. The
+ * macros are reserved for use by the stack.
+ */
+void inp_wlock(struct inpcb *);
+void inp_wunlock(struct inpcb *);
+void inp_rlock(struct inpcb *);
+void inp_runlock(struct inpcb *);
+
+#ifdef INVARIANTS
+void inp_lock_assert(struct inpcb *);
+void inp_unlock_assert(struct inpcb *);
+#else
+static __inline void
+inp_lock_assert(struct inpcb *inp __unused)
+{
+}
+
+static __inline void
+inp_unlock_assert(struct inpcb *inp __unused)
+{
+}
+
+#endif
+
+void inp_apply_all(void (*func)(struct inpcb *, void *), void *arg);
+int inp_ip_tos_get(const struct inpcb *inp);
+void inp_ip_tos_set(struct inpcb *inp, int val);
+struct socket *
+ inp_inpcbtosocket(struct inpcb *inp);
+struct tcpcb *
+ inp_inpcbtotcpcb(struct inpcb *inp);
+void inp_4tuple_get(struct inpcb *inp, uint32_t *laddr, uint16_t *lp,
+ uint32_t *faddr, uint16_t *fp);
+
+#endif /* _KERNEL */
+
+#define INP_INFO_LOCK_INIT(ipi, d) \
+ rw_init_flags(&(ipi)->ipi_lock, (d), RW_RECURSE)
+#define INP_INFO_LOCK_DESTROY(ipi) rw_destroy(&(ipi)->ipi_lock)
+#define INP_INFO_RLOCK(ipi) rw_rlock(&(ipi)->ipi_lock)
+#define INP_INFO_WLOCK(ipi) rw_wlock(&(ipi)->ipi_lock)
+#define INP_INFO_TRY_RLOCK(ipi) rw_try_rlock(&(ipi)->ipi_lock)
+#define INP_INFO_TRY_WLOCK(ipi) rw_try_wlock(&(ipi)->ipi_lock)
+#define INP_INFO_TRY_UPGRADE(ipi) rw_try_upgrade(&(ipi)->ipi_lock)
+#define INP_INFO_RUNLOCK(ipi) rw_runlock(&(ipi)->ipi_lock)
+#define INP_INFO_WUNLOCK(ipi) rw_wunlock(&(ipi)->ipi_lock)
+#define INP_INFO_LOCK_ASSERT(ipi) rw_assert(&(ipi)->ipi_lock, RA_LOCKED)
+#define INP_INFO_RLOCK_ASSERT(ipi) rw_assert(&(ipi)->ipi_lock, RA_RLOCKED)
+#define INP_INFO_WLOCK_ASSERT(ipi) rw_assert(&(ipi)->ipi_lock, RA_WLOCKED)
+#define INP_INFO_UNLOCK_ASSERT(ipi) rw_assert(&(ipi)->ipi_lock, RA_UNLOCKED)
+
+#define INP_PCBHASH(faddr, lport, fport, mask) \
+ (((faddr) ^ ((faddr) >> 16) ^ ntohs((lport) ^ (fport))) & (mask))
+#define INP_PCBPORTHASH(lport, mask) \
+ (ntohs((lport)) & (mask))
+
+/*
+ * Flags for inp_vflags -- historically version flags only
+ */
+#define INP_IPV4 0x1
+#define INP_IPV6 0x2
+#define INP_IPV6PROTO 0x4 /* opened under IPv6 protocol */
+
+/*
+ * Flags for inp_flags.
+ */
+#define INP_RECVOPTS 0x00000001 /* receive incoming IP options */
+#define INP_RECVRETOPTS 0x00000002 /* receive IP options for reply */
+#define INP_RECVDSTADDR 0x00000004 /* receive IP dst address */
+#define INP_HDRINCL 0x00000008 /* user supplies entire IP header */
+#define INP_HIGHPORT 0x00000010 /* user wants "high" port binding */
+#define INP_LOWPORT 0x00000020 /* user wants "low" port binding */
+#define INP_ANONPORT 0x00000040 /* port chosen for user */
+#define INP_RECVIF 0x00000080 /* receive incoming interface */
+#define INP_MTUDISC 0x00000100 /* user can do MTU discovery */
+#define INP_FAITH 0x00000200 /* accept FAITH'ed connections */
+#define INP_RECVTTL 0x00000400 /* receive incoming IP TTL */
+#define INP_DONTFRAG 0x00000800 /* don't fragment packet */
+#define INP_BINDANY 0x00001000 /* allow bind to any address */
+#define INP_INHASHLIST 0x00002000 /* in_pcbinshash() has been called */
+#define IN6P_IPV6_V6ONLY 0x00008000 /* restrict AF_INET6 socket for v6 */
+#define IN6P_PKTINFO 0x00010000 /* receive IP6 dst and I/F */
+#define IN6P_HOPLIMIT 0x00020000 /* receive hoplimit */
+#define IN6P_HOPOPTS 0x00040000 /* receive hop-by-hop options */
+#define IN6P_DSTOPTS 0x00080000 /* receive dst options after rthdr */
+#define IN6P_RTHDR 0x00100000 /* receive routing header */
+#define IN6P_RTHDRDSTOPTS 0x00200000 /* receive dstoptions before rthdr */
+#define IN6P_TCLASS 0x00400000 /* receive traffic class value */
+#define IN6P_AUTOFLOWLABEL 0x00800000 /* attach flowlabel automatically */
+#define INP_TIMEWAIT 0x01000000 /* in TIMEWAIT, ppcb is tcptw */
+#define INP_ONESBCAST 0x02000000 /* send all-ones broadcast */
+#define INP_DROPPED 0x04000000 /* protocol drop flag */
+#define INP_SOCKREF 0x08000000 /* strong socket reference */
+#define INP_SW_FLOWID 0x10000000 /* software generated flow id */
+#define INP_HW_FLOWID 0x20000000 /* hardware generated flow id */
+#define IN6P_RFC2292 0x40000000 /* used RFC2292 API on the socket */
+#define IN6P_MTU 0x80000000 /* receive path MTU */
+
+#define INP_CONTROLOPTS (INP_RECVOPTS|INP_RECVRETOPTS|INP_RECVDSTADDR|\
+ INP_RECVIF|INP_RECVTTL|\
+ IN6P_PKTINFO|IN6P_HOPLIMIT|IN6P_HOPOPTS|\
+ IN6P_DSTOPTS|IN6P_RTHDR|IN6P_RTHDRDSTOPTS|\
+ IN6P_TCLASS|IN6P_AUTOFLOWLABEL|IN6P_RFC2292|\
+ IN6P_MTU)
+
+/*
+ * Flags for inp_flags2.
+ */
+#define INP_LLE_VALID 0x00000001 /* cached lle is valid */
+#define INP_RT_VALID 0x00000002 /* cached rtentry is valid */
+
+#define INPLOOKUP_WILDCARD 1
+#define sotoinpcb(so) ((struct inpcb *)(so)->so_pcb)
+#define sotoin6pcb(so) sotoinpcb(so) /* for KAME src sync over BSD*'s */
+
+#define INP_SOCKAF(so) so->so_proto->pr_domain->dom_family
+
+#define INP_CHECK_SOCKAF(so, af) (INP_SOCKAF(so) == af)
+
+#ifdef _KERNEL
+VNET_DECLARE(int, ipport_reservedhigh);
+VNET_DECLARE(int, ipport_reservedlow);
+VNET_DECLARE(int, ipport_lowfirstauto);
+VNET_DECLARE(int, ipport_lowlastauto);
+VNET_DECLARE(int, ipport_firstauto);
+VNET_DECLARE(int, ipport_lastauto);
+VNET_DECLARE(int, ipport_hifirstauto);
+VNET_DECLARE(int, ipport_hilastauto);
+VNET_DECLARE(int, ipport_randomized);
+VNET_DECLARE(int, ipport_randomcps);
+VNET_DECLARE(int, ipport_randomtime);
+VNET_DECLARE(int, ipport_stoprandom);
+VNET_DECLARE(int, ipport_tcpallocs);
+
+#define V_ipport_reservedhigh VNET(ipport_reservedhigh)
+#define V_ipport_reservedlow VNET(ipport_reservedlow)
+#define V_ipport_lowfirstauto VNET(ipport_lowfirstauto)
+#define V_ipport_lowlastauto VNET(ipport_lowlastauto)
+#define V_ipport_firstauto VNET(ipport_firstauto)
+#define V_ipport_lastauto VNET(ipport_lastauto)
+#define V_ipport_hifirstauto VNET(ipport_hifirstauto)
+#define V_ipport_hilastauto VNET(ipport_hilastauto)
+#define V_ipport_randomized VNET(ipport_randomized)
+#define V_ipport_randomcps VNET(ipport_randomcps)
+#define V_ipport_randomtime VNET(ipport_randomtime)
+#define V_ipport_stoprandom VNET(ipport_stoprandom)
+#define V_ipport_tcpallocs VNET(ipport_tcpallocs)
+
+extern struct callout ipport_tick_callout;
+
+void in_pcbpurgeif0(struct inpcbinfo *, struct ifnet *);
+int in_pcballoc(struct socket *, struct inpcbinfo *);
+int in_pcbbind(struct inpcb *, struct sockaddr *, struct ucred *);
+int in_pcbbind_setup(struct inpcb *, struct sockaddr *, in_addr_t *,
+ u_short *, struct ucred *);
+int in_pcbconnect(struct inpcb *, struct sockaddr *, struct ucred *);
+int in_pcbconnect_setup(struct inpcb *, struct sockaddr *, in_addr_t *,
+ u_short *, in_addr_t *, u_short *, struct inpcb **,
+ struct ucred *);
+void in_pcbdetach(struct inpcb *);
+void in_pcbdisconnect(struct inpcb *);
+void in_pcbdrop(struct inpcb *);
+void in_pcbfree(struct inpcb *);
+int in_pcbinshash(struct inpcb *);
+struct inpcb *
+ in_pcblookup_local(struct inpcbinfo *,
+ struct in_addr, u_short, int, struct ucred *);
+struct inpcb *
+ in_pcblookup_hash(struct inpcbinfo *, struct in_addr, u_int,
+ struct in_addr, u_int, int, struct ifnet *);
+#ifndef __rtems__
+void in_pcbnotifyall(struct inpcbinfo *pcbinfo, struct in_addr,
+ int, struct inpcb *(*)(struct inpcb *, int));
+#else
+void in_pcbnotifyall(struct inpcbinfo *pcbinfo, struct in_addr faddr, int errno,
+ struct inpcb *(*notify)(struct inpcb *, int));
+#endif
+void in_pcbref(struct inpcb *);
+void in_pcbrehash(struct inpcb *);
+int in_pcbrele(struct inpcb *);
+void in_pcbsetsolabel(struct socket *so);
+int in_getpeeraddr(struct socket *so, struct sockaddr **nam);
+int in_getsockaddr(struct socket *so, struct sockaddr **nam);
+struct sockaddr *
+ in_sockaddr(in_port_t port, struct in_addr *addr);
+void in_pcbsosetlabel(struct socket *so);
+void ipport_tick(void *xtp);
+#endif /* _KERNEL */
+
+#endif /* !_NETINET_IN_PCB_HH_ */
diff --git a/rtems/freebsd/netinet/in_proto.c b/rtems/freebsd/netinet/in_proto.c
new file mode 100644
index 00000000..8e8d737e
--- /dev/null
+++ b/rtems/freebsd/netinet/in_proto.c
@@ -0,0 +1,400 @@
+#include <rtems/freebsd/machine/rtems-bsd-config.h>
+
+/*-
+ * Copyright (c) 1982, 1986, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * @(#)in_proto.c 8.2 (Berkeley) 2/9/95
+ */
+
+#include <rtems/freebsd/sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <rtems/freebsd/local/opt_ipx.h>
+#include <rtems/freebsd/local/opt_mrouting.h>
+#include <rtems/freebsd/local/opt_ipsec.h>
+#include <rtems/freebsd/local/opt_inet6.h>
+#include <rtems/freebsd/local/opt_pf.h>
+#include <rtems/freebsd/local/opt_sctp.h>
+#include <rtems/freebsd/local/opt_mpath.h>
+
+#include <rtems/freebsd/sys/param.h>
+#include <rtems/freebsd/sys/systm.h>
+#include <rtems/freebsd/sys/kernel.h>
+#include <rtems/freebsd/sys/socket.h>
+#include <rtems/freebsd/sys/domain.h>
+#include <rtems/freebsd/sys/proc.h>
+#include <rtems/freebsd/sys/protosw.h>
+#include <rtems/freebsd/sys/queue.h>
+#include <rtems/freebsd/sys/sysctl.h>
+
+#include <rtems/freebsd/net/if.h>
+#include <rtems/freebsd/net/route.h>
+#ifdef RADIX_MPATH
+#include <rtems/freebsd/net/radix_mpath.h>
+#endif
+#include <rtems/freebsd/net/vnet.h>
+
+#include <rtems/freebsd/netinet/in.h>
+#include <rtems/freebsd/netinet/in_systm.h>
+#include <rtems/freebsd/netinet/in_var.h>
+#include <rtems/freebsd/netinet/ip.h>
+#include <rtems/freebsd/netinet/ip_var.h>
+#include <rtems/freebsd/netinet/ip_icmp.h>
+#include <rtems/freebsd/netinet/igmp_var.h>
+#include <rtems/freebsd/netinet/tcp.h>
+#include <rtems/freebsd/netinet/tcp_timer.h>
+#include <rtems/freebsd/netinet/tcp_var.h>
+#include <rtems/freebsd/netinet/udp.h>
+#include <rtems/freebsd/netinet/udp_var.h>
+#include <rtems/freebsd/netinet/ip_encap.h>
+
+/*
+ * TCP/IP protocol family: IP, ICMP, UDP, TCP.
+ */
+
+static struct pr_usrreqs nousrreqs;
+
+#ifdef IPSEC
+#include <rtems/freebsd/netipsec/ipsec.h>
+#endif /* IPSEC */
+
+#ifdef SCTP
+#include <rtems/freebsd/netinet/in_pcb.h>
+#include <rtems/freebsd/netinet/sctp_pcb.h>
+#include <rtems/freebsd/netinet/sctp.h>
+#include <rtems/freebsd/netinet/sctp_var.h>
+#endif /* SCTP */
+
+#ifdef DEV_PFSYNC
+#include <rtems/freebsd/net/pfvar.h>
+#include <rtems/freebsd/net/if_pfsync.h>
+#endif
+
+extern struct domain inetdomain;
+
+/* Spacer for loadable protocols. */
+#define IPPROTOSPACER \
+{ \
+ .pr_domain = &inetdomain, \
+ .pr_protocol = PROTO_SPACER, \
+ .pr_usrreqs = &nousrreqs \
+}
+
+struct protosw inetsw[] = {
+{
+ .pr_type = 0,
+ .pr_domain = &inetdomain,
+ .pr_protocol = IPPROTO_IP,
+ .pr_init = ip_init,
+#ifdef VIMAGE
+ .pr_destroy = ip_destroy,
+#endif
+ .pr_slowtimo = ip_slowtimo,
+ .pr_drain = ip_drain,
+ .pr_usrreqs = &nousrreqs
+},
+{
+ .pr_type = SOCK_DGRAM,
+ .pr_domain = &inetdomain,
+ .pr_protocol = IPPROTO_UDP,
+ .pr_flags = PR_ATOMIC|PR_ADDR,
+ .pr_input = udp_input,
+ .pr_ctlinput = udp_ctlinput,
+ .pr_ctloutput = udp_ctloutput,
+ .pr_init = udp_init,
+#ifdef VIMAGE
+ .pr_destroy = udp_destroy,
+#endif
+ .pr_usrreqs = &udp_usrreqs
+},
+{
+ .pr_type = SOCK_STREAM,
+ .pr_domain = &inetdomain,
+ .pr_protocol = IPPROTO_TCP,
+ .pr_flags = PR_CONNREQUIRED|PR_IMPLOPCL|PR_WANTRCVD,
+ .pr_input = tcp_input,
+ .pr_ctlinput = tcp_ctlinput,
+ .pr_ctloutput = tcp_ctloutput,
+ .pr_init = tcp_init,
+#ifdef VIMAGE
+ .pr_destroy = tcp_destroy,
+#endif
+ .pr_slowtimo = tcp_slowtimo,
+ .pr_drain = tcp_drain,
+ .pr_usrreqs = &tcp_usrreqs
+},
+#ifdef SCTP
+{
+ .pr_type = SOCK_DGRAM,
+ .pr_domain = &inetdomain,
+ .pr_protocol = IPPROTO_SCTP,
+ .pr_flags = PR_WANTRCVD,
+ .pr_input = sctp_input,
+ .pr_ctlinput = sctp_ctlinput,
+ .pr_ctloutput = sctp_ctloutput,
+ .pr_init = sctp_init,
+#ifdef VIMAGE
+ .pr_destroy = sctp_finish,
+#endif
+ .pr_drain = sctp_drain,
+ .pr_usrreqs = &sctp_usrreqs
+},
+{
+ .pr_type = SOCK_SEQPACKET,
+ .pr_domain = &inetdomain,
+ .pr_protocol = IPPROTO_SCTP,
+ .pr_flags = PR_WANTRCVD,
+ .pr_input = sctp_input,
+ .pr_ctlinput = sctp_ctlinput,
+ .pr_ctloutput = sctp_ctloutput,
+ .pr_drain = sctp_drain,
+ .pr_usrreqs = &sctp_usrreqs
+},
+
+{
+ .pr_type = SOCK_STREAM,
+ .pr_domain = &inetdomain,
+ .pr_protocol = IPPROTO_SCTP,
+ .pr_flags = PR_WANTRCVD,
+ .pr_input = sctp_input,
+ .pr_ctlinput = sctp_ctlinput,
+ .pr_ctloutput = sctp_ctloutput,
+ .pr_drain = sctp_drain,
+ .pr_usrreqs = &sctp_usrreqs
+},
+#endif /* SCTP */
+{
+ .pr_type = SOCK_RAW,
+ .pr_domain = &inetdomain,
+ .pr_protocol = IPPROTO_RAW,
+ .pr_flags = PR_ATOMIC|PR_ADDR,
+ .pr_input = rip_input,
+ .pr_ctlinput = rip_ctlinput,
+ .pr_ctloutput = rip_ctloutput,
+ .pr_usrreqs = &rip_usrreqs
+},
+{
+ .pr_type = SOCK_RAW,
+ .pr_domain = &inetdomain,
+ .pr_protocol = IPPROTO_ICMP,
+ .pr_flags = PR_ATOMIC|PR_ADDR|PR_LASTHDR,
+ .pr_input = icmp_input,
+ .pr_ctloutput = rip_ctloutput,
+ .pr_usrreqs = &rip_usrreqs
+},
+{
+ .pr_type = SOCK_RAW,
+ .pr_domain = &inetdomain,
+ .pr_protocol = IPPROTO_IGMP,
+ .pr_flags = PR_ATOMIC|PR_ADDR|PR_LASTHDR,
+ .pr_input = igmp_input,
+ .pr_ctloutput = rip_ctloutput,
+ .pr_fasttimo = igmp_fasttimo,
+ .pr_slowtimo = igmp_slowtimo,
+ .pr_usrreqs = &rip_usrreqs
+},
+{
+ .pr_type = SOCK_RAW,
+ .pr_domain = &inetdomain,
+ .pr_protocol = IPPROTO_RSVP,
+ .pr_flags = PR_ATOMIC|PR_ADDR|PR_LASTHDR,
+ .pr_input = rsvp_input,
+ .pr_ctloutput = rip_ctloutput,
+ .pr_usrreqs = &rip_usrreqs
+},
+#ifdef IPSEC
+{
+ .pr_type = SOCK_RAW,
+ .pr_domain = &inetdomain,
+ .pr_protocol = IPPROTO_AH,
+ .pr_flags = PR_ATOMIC|PR_ADDR,
+ .pr_input = ah4_input,
+ .pr_ctlinput = ah4_ctlinput,
+ .pr_usrreqs = &nousrreqs
+},
+{
+ .pr_type = SOCK_RAW,
+ .pr_domain = &inetdomain,
+ .pr_protocol = IPPROTO_ESP,
+ .pr_flags = PR_ATOMIC|PR_ADDR,
+ .pr_input = esp4_input,
+ .pr_ctlinput = esp4_ctlinput,
+ .pr_usrreqs = &nousrreqs
+},
+{
+ .pr_type = SOCK_RAW,
+ .pr_domain = &inetdomain,
+ .pr_protocol = IPPROTO_IPCOMP,
+ .pr_flags = PR_ATOMIC|PR_ADDR,
+ .pr_input = ipcomp4_input,
+ .pr_usrreqs = &nousrreqs
+},
+#endif /* IPSEC */
+{
+ .pr_type = SOCK_RAW,
+ .pr_domain = &inetdomain,
+ .pr_protocol = IPPROTO_IPV4,
+ .pr_flags = PR_ATOMIC|PR_ADDR|PR_LASTHDR,
+ .pr_input = encap4_input,
+ .pr_ctloutput = rip_ctloutput,
+ .pr_init = encap_init,
+ .pr_usrreqs = &rip_usrreqs
+},
+{
+ .pr_type = SOCK_RAW,
+ .pr_domain = &inetdomain,
+ .pr_protocol = IPPROTO_MOBILE,
+ .pr_flags = PR_ATOMIC|PR_ADDR|PR_LASTHDR,
+ .pr_input = encap4_input,
+ .pr_ctloutput = rip_ctloutput,
+ .pr_init = encap_init,
+ .pr_usrreqs = &rip_usrreqs
+},
+{
+ .pr_type = SOCK_RAW,
+ .pr_domain = &inetdomain,
+ .pr_protocol = IPPROTO_ETHERIP,
+ .pr_flags = PR_ATOMIC|PR_ADDR|PR_LASTHDR,
+ .pr_input = encap4_input,
+ .pr_ctloutput = rip_ctloutput,
+ .pr_init = encap_init,
+ .pr_usrreqs = &rip_usrreqs
+},
+{
+ .pr_type = SOCK_RAW,
+ .pr_domain = &inetdomain,
+ .pr_protocol = IPPROTO_GRE,
+ .pr_flags = PR_ATOMIC|PR_ADDR|PR_LASTHDR,
+ .pr_input = encap4_input,
+ .pr_ctloutput = rip_ctloutput,
+ .pr_init = encap_init,
+ .pr_usrreqs = &rip_usrreqs
+},
+# ifdef INET6
+{
+ .pr_type = SOCK_RAW,
+ .pr_domain = &inetdomain,
+ .pr_protocol = IPPROTO_IPV6,
+ .pr_flags = PR_ATOMIC|PR_ADDR|PR_LASTHDR,
+ .pr_input = encap4_input,
+ .pr_ctloutput = rip_ctloutput,
+ .pr_init = encap_init,
+ .pr_usrreqs = &rip_usrreqs
+},
+#endif
+{
+ .pr_type = SOCK_RAW,
+ .pr_domain = &inetdomain,
+ .pr_protocol = IPPROTO_PIM,
+ .pr_flags = PR_ATOMIC|PR_ADDR|PR_LASTHDR,
+ .pr_input = encap4_input,
+ .pr_ctloutput = rip_ctloutput,
+ .pr_usrreqs = &rip_usrreqs
+},
+#ifdef DEV_PFSYNC
+{
+ .pr_type = SOCK_RAW,
+ .pr_domain = &inetdomain,
+ .pr_protocol = IPPROTO_PFSYNC,
+ .pr_flags = PR_ATOMIC|PR_ADDR,
+ .pr_input = pfsync_input,
+ .pr_ctloutput = rip_ctloutput,
+ .pr_usrreqs = &rip_usrreqs
+},
+#endif /* DEV_PFSYNC */
+/* Spacer n-times for loadable protocols. */
+IPPROTOSPACER,
+IPPROTOSPACER,
+IPPROTOSPACER,
+IPPROTOSPACER,
+IPPROTOSPACER,
+IPPROTOSPACER,
+IPPROTOSPACER,
+IPPROTOSPACER,
+/* raw wildcard */
+{
+ .pr_type = SOCK_RAW,
+ .pr_domain = &inetdomain,
+ .pr_flags = PR_ATOMIC|PR_ADDR,
+ .pr_input = rip_input,
+ .pr_ctloutput = rip_ctloutput,
+ .pr_init = rip_init,
+#ifdef VIMAGE
+ .pr_destroy = rip_destroy,
+#endif
+ .pr_usrreqs = &rip_usrreqs
+},
+};
+
+extern int in_inithead(void **, int);
+extern int in_detachhead(void **, int);
+
+struct domain inetdomain = {
+ .dom_family = AF_INET,
+ .dom_name = "internet",
+ .dom_protosw = inetsw,
+ .dom_protoswNPROTOSW = &inetsw[sizeof(inetsw)/sizeof(inetsw[0])],
+#ifdef RADIX_MPATH
+ .dom_rtattach = rn4_mpath_inithead,
+#else
+ .dom_rtattach = in_inithead,
+#endif
+#ifdef VIMAGE
+ .dom_rtdetach = in_detachhead,
+#endif
+ .dom_rtoffset = 32,
+ .dom_maxrtkey = sizeof(struct sockaddr_in),
+ .dom_ifattach = in_domifattach,
+ .dom_ifdetach = in_domifdetach
+};
+
+VNET_DOMAIN_SET(inet);
+
+SYSCTL_NODE(_net, PF_INET, inet, CTLFLAG_RW, 0,
+ "Internet Family");
+
+SYSCTL_NODE(_net_inet, IPPROTO_IP, ip, CTLFLAG_RW, 0, "IP");
+SYSCTL_NODE(_net_inet, IPPROTO_ICMP, icmp, CTLFLAG_RW, 0, "ICMP");
+SYSCTL_NODE(_net_inet, IPPROTO_UDP, udp, CTLFLAG_RW, 0, "UDP");
+SYSCTL_NODE(_net_inet, IPPROTO_TCP, tcp, CTLFLAG_RW, 0, "TCP");
+#ifdef SCTP
+SYSCTL_NODE(_net_inet, IPPROTO_SCTP, sctp, CTLFLAG_RW, 0, "SCTP");
+#endif
+SYSCTL_NODE(_net_inet, IPPROTO_IGMP, igmp, CTLFLAG_RW, 0, "IGMP");
+#ifdef IPSEC
+/* XXX no protocol # to use, pick something "reserved" */
+SYSCTL_NODE(_net_inet, 253, ipsec, CTLFLAG_RW, 0, "IPSEC");
+SYSCTL_NODE(_net_inet, IPPROTO_AH, ah, CTLFLAG_RW, 0, "AH");
+SYSCTL_NODE(_net_inet, IPPROTO_ESP, esp, CTLFLAG_RW, 0, "ESP");
+SYSCTL_NODE(_net_inet, IPPROTO_IPCOMP, ipcomp, CTLFLAG_RW, 0, "IPCOMP");
+SYSCTL_NODE(_net_inet, IPPROTO_IPIP, ipip, CTLFLAG_RW, 0, "IPIP");
+#endif /* IPSEC */
+SYSCTL_NODE(_net_inet, IPPROTO_RAW, raw, CTLFLAG_RW, 0, "RAW");
+#ifdef DEV_PFSYNC
+SYSCTL_NODE(_net_inet, IPPROTO_PFSYNC, pfsync, CTLFLAG_RW, 0, "PFSYNC");
+#endif
diff --git a/rtems/freebsd/netinet/in_rmx.c b/rtems/freebsd/netinet/in_rmx.c
new file mode 100644
index 00000000..48e209e3
--- /dev/null
+++ b/rtems/freebsd/netinet/in_rmx.c
@@ -0,0 +1,516 @@
+#include <rtems/freebsd/machine/rtems-bsd-config.h>
+
+/*-
+ * Copyright 1994, 1995 Massachusetts Institute of Technology
+ *
+ * Permission to use, copy, modify, and distribute this software and
+ * its documentation for any purpose and without fee is hereby
+ * granted, provided that both the above copyright notice and this
+ * permission notice appear in all copies, that both the above
+ * copyright notice and this permission notice appear in all
+ * supporting documentation, and that the name of M.I.T. not be used
+ * in advertising or publicity pertaining to distribution of the
+ * software without specific, written prior permission. M.I.T. makes
+ * no representations about the suitability of this software for any
+ * purpose. It is provided "as is" without express or implied
+ * warranty.
+ *
+ * THIS SOFTWARE IS PROVIDED BY M.I.T. ``AS IS''. M.I.T. DISCLAIMS
+ * ALL EXPRESS OR IMPLIED WARRANTIES WITH REGARD TO THIS SOFTWARE,
+ * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. IN NO EVENT
+ * SHALL M.I.T. BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
+ * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+ * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
+ * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+/*
+ * This code does two things necessary for the enhanced TCP metrics to
+ * function in a useful manner:
+ * 1) It marks all non-host routes as `cloning', thus ensuring that
+ * every actual reference to such a route actually gets turned
+ * into a reference to a host route to the specific destination
+ * requested.
+ * 2) When such routes lose all their references, it arranges for them
+ * to be deleted in some random collection of circumstances, so that
+ * a large quantity of stale routing data is not kept in kernel memory
+ * indefinitely. See in_rtqtimo() below for the exact mechanism.
+ */
+
+#include <rtems/freebsd/sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <rtems/freebsd/sys/param.h>
+#include <rtems/freebsd/sys/systm.h>
+#include <rtems/freebsd/sys/kernel.h>
+#include <rtems/freebsd/sys/sysctl.h>
+#include <rtems/freebsd/sys/socket.h>
+#include <rtems/freebsd/sys/mbuf.h>
+#include <rtems/freebsd/sys/syslog.h>
+#include <rtems/freebsd/sys/callout.h>
+
+#include <rtems/freebsd/net/if.h>
+#include <rtems/freebsd/net/route.h>
+#include <rtems/freebsd/net/vnet.h>
+
+#include <rtems/freebsd/netinet/in.h>
+#include <rtems/freebsd/netinet/in_var.h>
+#include <rtems/freebsd/netinet/ip_var.h>
+
+extern int in_inithead(void **head, int off);
+#ifdef VIMAGE
+extern int in_detachhead(void **head, int off);
+#endif
+
+#define RTPRF_OURS RTF_PROTO3 /* set on routes we manage */
+
+/*
+ * Do what we need to do when inserting a route.
+ */
+static struct radix_node *
+in_addroute(void *v_arg, void *n_arg, struct radix_node_head *head,
+ struct radix_node *treenodes)
+{
+ struct rtentry *rt = (struct rtentry *)treenodes;
+ struct sockaddr_in *sin = (struct sockaddr_in *)rt_key(rt);
+
+ RADIX_NODE_HEAD_WLOCK_ASSERT(head);
+ /*
+ * A little bit of help for both IP output and input:
+ * For host routes, we make sure that RTF_BROADCAST
+ * is set for anything that looks like a broadcast address.
+ * This way, we can avoid an expensive call to in_broadcast()
+ * in ip_output() most of the time (because the route passed
+ * to ip_output() is almost always a host route).
+ *
+ * We also do the same for local addresses, with the thought
+ * that this might one day be used to speed up ip_input().
+ *
+ * We also mark routes to multicast addresses as such, because
+ * it's easy to do and might be useful (but this is much more
+ * dubious since it's so easy to inspect the address).
+ */
+ if (rt->rt_flags & RTF_HOST) {
+ if (in_broadcast(sin->sin_addr, rt->rt_ifp)) {
+ rt->rt_flags |= RTF_BROADCAST;
+ } else if (satosin(rt->rt_ifa->ifa_addr)->sin_addr.s_addr ==
+ sin->sin_addr.s_addr) {
+ rt->rt_flags |= RTF_LOCAL;
+ }
+ }
+ if (IN_MULTICAST(ntohl(sin->sin_addr.s_addr)))
+ rt->rt_flags |= RTF_MULTICAST;
+
+ if (!rt->rt_rmx.rmx_mtu && rt->rt_ifp)
+ rt->rt_rmx.rmx_mtu = rt->rt_ifp->if_mtu;
+
+ return (rn_addroute(v_arg, n_arg, head, treenodes));
+}
+
+/*
+ * This code is the inverse of in_clsroute: on first reference, if we
+ * were managing the route, stop doing so and set the expiration timer
+ * back off again.
+ */
+static struct radix_node *
+in_matroute(void *v_arg, struct radix_node_head *head)
+{
+ struct radix_node *rn = rn_match(v_arg, head);
+ struct rtentry *rt = (struct rtentry *)rn;
+
+ if (rt) {
+ RT_LOCK(rt);
+ if (rt->rt_flags & RTPRF_OURS) {
+ rt->rt_flags &= ~RTPRF_OURS;
+ rt->rt_rmx.rmx_expire = 0;
+ }
+ RT_UNLOCK(rt);
+ }
+ return rn;
+}
+
+static VNET_DEFINE(int, rtq_reallyold) = 60*60; /* one hour is "really old" */
+#define V_rtq_reallyold VNET(rtq_reallyold)
+SYSCTL_VNET_INT(_net_inet_ip, IPCTL_RTEXPIRE, rtexpire, CTLFLAG_RW,
+ &VNET_NAME(rtq_reallyold), 0,
+ "Default expiration time on dynamically learned routes");
+
+/* never automatically crank down to less */
+static VNET_DEFINE(int, rtq_minreallyold) = 10;
+#define V_rtq_minreallyold VNET(rtq_minreallyold)
+SYSCTL_VNET_INT(_net_inet_ip, IPCTL_RTMINEXPIRE, rtminexpire, CTLFLAG_RW,
+ &VNET_NAME(rtq_minreallyold), 0,
+ "Minimum time to attempt to hold onto dynamically learned routes");
+
+/* 128 cached routes is "too many" */
+static VNET_DEFINE(int, rtq_toomany) = 128;
+#define V_rtq_toomany VNET(rtq_toomany)
+SYSCTL_VNET_INT(_net_inet_ip, IPCTL_RTMAXCACHE, rtmaxcache, CTLFLAG_RW,
+ &VNET_NAME(rtq_toomany), 0,
+ "Upper limit on dynamically learned routes");
+
+/*
+ * On last reference drop, mark the route as belong to us so that it can be
+ * timed out.
+ */
+static void
+in_clsroute(struct radix_node *rn, struct radix_node_head *head)
+{
+ struct rtentry *rt = (struct rtentry *)rn;
+
+ RT_LOCK_ASSERT(rt);
+
+ if (!(rt->rt_flags & RTF_UP))
+ return; /* prophylactic measures */
+
+ if (rt->rt_flags & RTPRF_OURS)
+ return;
+
+ if (!(rt->rt_flags & RTF_DYNAMIC))
+ return;
+
+ /*
+ * If rtq_reallyold is 0, just delete the route without
+ * waiting for a timeout cycle to kill it.
+ */
+ if (V_rtq_reallyold != 0) {
+ rt->rt_flags |= RTPRF_OURS;
+ rt->rt_rmx.rmx_expire = time_uptime + V_rtq_reallyold;
+ } else {
+ rtexpunge(rt);
+ }
+}
+
+struct rtqk_arg {
+ struct radix_node_head *rnh;
+ int draining;
+ int killed;
+ int found;
+ int updating;
+ time_t nextstop;
+};
+
+/*
+ * Get rid of old routes. When draining, this deletes everything, even when
+ * the timeout is not expired yet. When updating, this makes sure that
+ * nothing has a timeout longer than the current value of rtq_reallyold.
+ */
+static int
+in_rtqkill(struct radix_node *rn, void *rock)
+{
+ struct rtqk_arg *ap = rock;
+ struct rtentry *rt = (struct rtentry *)rn;
+ int err;
+
+ RADIX_NODE_HEAD_WLOCK_ASSERT(ap->rnh);
+
+ if (rt->rt_flags & RTPRF_OURS) {
+ ap->found++;
+
+ if (ap->draining || rt->rt_rmx.rmx_expire <= time_uptime) {
+ if (rt->rt_refcnt > 0)
+ panic("rtqkill route really not free");
+
+ err = in_rtrequest(RTM_DELETE,
+ (struct sockaddr *)rt_key(rt),
+ rt->rt_gateway, rt_mask(rt),
+ rt->rt_flags | RTF_RNH_LOCKED, 0,
+ rt->rt_fibnum);
+ if (err) {
+ log(LOG_WARNING, "in_rtqkill: error %d\n", err);
+ } else {
+ ap->killed++;
+ }
+ } else {
+ if (ap->updating &&
+ (rt->rt_rmx.rmx_expire - time_uptime >
+ V_rtq_reallyold)) {
+ rt->rt_rmx.rmx_expire =
+ time_uptime + V_rtq_reallyold;
+ }
+ ap->nextstop = lmin(ap->nextstop,
+ rt->rt_rmx.rmx_expire);
+ }
+ }
+
+ return 0;
+}
+
+#define RTQ_TIMEOUT 60*10 /* run no less than once every ten minutes */
+static VNET_DEFINE(int, rtq_timeout) = RTQ_TIMEOUT;
+static VNET_DEFINE(struct callout, rtq_timer);
+
+#define V_rtq_timeout VNET(rtq_timeout)
+#define V_rtq_timer VNET(rtq_timer)
+
+static void in_rtqtimo_one(void *rock);
+
+static void
+in_rtqtimo(void *rock)
+{
+ CURVNET_SET((struct vnet *) rock);
+ int fibnum;
+ void *newrock;
+ struct timeval atv;
+
+ for (fibnum = 0; fibnum < rt_numfibs; fibnum++) {
+ newrock = rt_tables_get_rnh(fibnum, AF_INET);
+ if (newrock != NULL)
+ in_rtqtimo_one(newrock);
+ }
+ atv.tv_usec = 0;
+ atv.tv_sec = V_rtq_timeout;
+ callout_reset(&V_rtq_timer, tvtohz(&atv), in_rtqtimo, rock);
+ CURVNET_RESTORE();
+}
+
+static void
+in_rtqtimo_one(void *rock)
+{
+ struct radix_node_head *rnh = rock;
+ struct rtqk_arg arg;
+ static time_t last_adjusted_timeout = 0;
+
+ arg.found = arg.killed = 0;
+ arg.rnh = rnh;
+ arg.nextstop = time_uptime + V_rtq_timeout;
+ arg.draining = arg.updating = 0;
+ RADIX_NODE_HEAD_LOCK(rnh);
+ rnh->rnh_walktree(rnh, in_rtqkill, &arg);
+ RADIX_NODE_HEAD_UNLOCK(rnh);
+
+ /*
+ * Attempt to be somewhat dynamic about this:
+ * If there are ``too many'' routes sitting around taking up space,
+ * then crank down the timeout, and see if we can't make some more
+ * go away. However, we make sure that we will never adjust more
+ * than once in rtq_timeout seconds, to keep from cranking down too
+ * hard.
+ */
+ if ((arg.found - arg.killed > V_rtq_toomany) &&
+ (time_uptime - last_adjusted_timeout >= V_rtq_timeout) &&
+ V_rtq_reallyold > V_rtq_minreallyold) {
+ V_rtq_reallyold = 2 * V_rtq_reallyold / 3;
+ if (V_rtq_reallyold < V_rtq_minreallyold) {
+ V_rtq_reallyold = V_rtq_minreallyold;
+ }
+
+ last_adjusted_timeout = time_uptime;
+#ifdef DIAGNOSTIC
+ log(LOG_DEBUG, "in_rtqtimo: adjusted rtq_reallyold to %d\n",
+ V_rtq_reallyold);
+#endif
+ arg.found = arg.killed = 0;
+ arg.updating = 1;
+ RADIX_NODE_HEAD_LOCK(rnh);
+ rnh->rnh_walktree(rnh, in_rtqkill, &arg);
+ RADIX_NODE_HEAD_UNLOCK(rnh);
+ }
+
+}
+
+void
+in_rtqdrain(void)
+{
+ VNET_ITERATOR_DECL(vnet_iter);
+ struct radix_node_head *rnh;
+ struct rtqk_arg arg;
+ int fibnum;
+
+ VNET_LIST_RLOCK_NOSLEEP();
+ VNET_FOREACH(vnet_iter) {
+ CURVNET_SET(vnet_iter);
+
+ for ( fibnum = 0; fibnum < rt_numfibs; fibnum++) {
+ rnh = rt_tables_get_rnh(fibnum, AF_INET);
+ arg.found = arg.killed = 0;
+ arg.rnh = rnh;
+ arg.nextstop = 0;
+ arg.draining = 1;
+ arg.updating = 0;
+ RADIX_NODE_HEAD_LOCK(rnh);
+ rnh->rnh_walktree(rnh, in_rtqkill, &arg);
+ RADIX_NODE_HEAD_UNLOCK(rnh);
+ }
+ CURVNET_RESTORE();
+ }
+ VNET_LIST_RUNLOCK_NOSLEEP();
+}
+
+static int _in_rt_was_here;
+/*
+ * Initialize our routing tree.
+ */
+int
+in_inithead(void **head, int off)
+{
+ struct radix_node_head *rnh;
+
+ /* XXX MRT
+ * This can be called from vfs_export.c too in which case 'off'
+ * will be 0. We know the correct value so just use that and
+ * return directly if it was 0.
+ * This is a hack that replaces an even worse hack on a bad hack
+ * on a bad design. After RELENG_7 this should be fixed but that
+ * will change the ABI, so for now do it this way.
+ */
+ if (!rn_inithead(head, 32))
+ return 0;
+
+ if (off == 0) /* XXX MRT see above */
+ return 1; /* only do the rest for a real routing table */
+
+ rnh = *head;
+ rnh->rnh_addaddr = in_addroute;
+ rnh->rnh_matchaddr = in_matroute;
+ rnh->rnh_close = in_clsroute;
+ if (_in_rt_was_here == 0 ) {
+ callout_init(&V_rtq_timer, CALLOUT_MPSAFE);
+ callout_reset(&V_rtq_timer, 1, in_rtqtimo, curvnet);
+ _in_rt_was_here = 1;
+ }
+ return 1;
+}
+
+#ifdef VIMAGE
+int
+in_detachhead(void **head, int off)
+{
+
+ callout_drain(&V_rtq_timer);
+ return (1);
+}
+#endif
+
+/*
+ * This zaps old routes when the interface goes down or interface
+ * address is deleted. In the latter case, it deletes static routes
+ * that point to this address. If we don't do this, we may end up
+ * using the old address in the future. The ones we always want to
+ * get rid of are things like ARP entries, since the user might down
+ * the interface, walk over to a completely different network, and
+ * plug back in.
+ */
+struct in_ifadown_arg {
+ struct ifaddr *ifa;
+ int del;
+};
+
+static int
+in_ifadownkill(struct radix_node *rn, void *xap)
+{
+ struct in_ifadown_arg *ap = xap;
+ struct rtentry *rt = (struct rtentry *)rn;
+
+ RT_LOCK(rt);
+ if (rt->rt_ifa == ap->ifa &&
+ (ap->del || !(rt->rt_flags & RTF_STATIC))) {
+ /*
+ * Aquire a reference so that it can later be freed
+ * as the refcount would be 0 here in case of at least
+ * ap->del.
+ */
+ RT_ADDREF(rt);
+ /*
+ * Disconnect it from the tree and permit protocols
+ * to cleanup.
+ */
+ rtexpunge(rt);
+ /*
+ * At this point it is an rttrash node, and in case
+ * the above is the only reference we must free it.
+ * If we do not noone will have a pointer and the
+ * rtentry will be leaked forever.
+ * In case someone else holds a reference, we are
+ * fine as we only decrement the refcount. In that
+ * case if the other entity calls RT_REMREF, we
+ * will still be leaking but at least we tried.
+ */
+ RTFREE_LOCKED(rt);
+ return (0);
+ }
+ RT_UNLOCK(rt);
+ return 0;
+}
+
+int
+in_ifadown(struct ifaddr *ifa, int delete)
+{
+ struct in_ifadown_arg arg;
+ struct radix_node_head *rnh;
+ int fibnum;
+
+ if (ifa->ifa_addr->sa_family != AF_INET)
+ return 1;
+
+ for ( fibnum = 0; fibnum < rt_numfibs; fibnum++) {
+ rnh = rt_tables_get_rnh(fibnum, AF_INET);
+ arg.ifa = ifa;
+ arg.del = delete;
+ RADIX_NODE_HEAD_LOCK(rnh);
+ rnh->rnh_walktree(rnh, in_ifadownkill, &arg);
+ RADIX_NODE_HEAD_UNLOCK(rnh);
+ ifa->ifa_flags &= ~IFA_ROUTE; /* XXXlocking? */
+ }
+ return 0;
+}
+
+/*
+ * inet versions of rt functions. These have fib extensions and
+ * for now will just reference the _fib variants.
+ * eventually this order will be reversed,
+ */
+void
+in_rtalloc_ign(struct route *ro, u_long ignflags, u_int fibnum)
+{
+ rtalloc_ign_fib(ro, ignflags, fibnum);
+}
+
+int
+in_rtrequest( int req,
+ struct sockaddr *dst,
+ struct sockaddr *gateway,
+ struct sockaddr *netmask,
+ int flags,
+ struct rtentry **ret_nrt,
+ u_int fibnum)
+{
+ return (rtrequest_fib(req, dst, gateway, netmask,
+ flags, ret_nrt, fibnum));
+}
+
+struct rtentry *
+in_rtalloc1(struct sockaddr *dst, int report, u_long ignflags, u_int fibnum)
+{
+ return (rtalloc1_fib(dst, report, ignflags, fibnum));
+}
+
+void
+in_rtredirect(struct sockaddr *dst,
+ struct sockaddr *gateway,
+ struct sockaddr *netmask,
+ int flags,
+ struct sockaddr *src,
+ u_int fibnum)
+{
+ rtredirect_fib(dst, gateway, netmask, flags, src, fibnum);
+}
+
+void
+in_rtalloc(struct route *ro, u_int fibnum)
+{
+ rtalloc_ign_fib(ro, 0UL, fibnum);
+}
+
+#if 0
+int in_rt_getifa(struct rt_addrinfo *, u_int fibnum);
+int in_rtioctl(u_long, caddr_t, u_int);
+int in_rtrequest1(int, struct rt_addrinfo *, struct rtentry **, u_int);
+#endif
+
+
diff --git a/rtems/freebsd/netinet/in_systm.h b/rtems/freebsd/netinet/in_systm.h
new file mode 100644
index 00000000..a55d196e
--- /dev/null
+++ b/rtems/freebsd/netinet/in_systm.h
@@ -0,0 +1,58 @@
+/*-
+ * Copyright (c) 1982, 1986, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * @(#)in_systm.h 8.1 (Berkeley) 6/10/93
+ * $FreeBSD$
+ */
+
+#ifndef _NETINET_IN_SYSTM_HH_
+#define _NETINET_IN_SYSTM_HH_
+
+/*
+ * Miscellaneous internetwork
+ * definitions for kernel.
+ */
+
+/*
+ * Network types.
+ *
+ * Internally the system keeps counters in the headers with the bytes
+ * swapped so that VAX instructions will work on them. It reverses
+ * the bytes before transmission at each protocol level. The n_ types
+ * represent the types with the bytes in ``high-ender'' order.
+ */
+typedef u_int16_t n_short; /* short as received from the net */
+typedef u_int32_t n_long; /* long as received from the net */
+
+typedef u_int32_t n_time; /* ms since 00:00 GMT, byte rev */
+
+#ifdef _KERNEL
+uint32_t iptime(void);
+#endif
+
+#endif
diff --git a/rtems/freebsd/netinet/in_var.h b/rtems/freebsd/netinet/in_var.h
new file mode 100644
index 00000000..6c57eef8
--- /dev/null
+++ b/rtems/freebsd/netinet/in_var.h
@@ -0,0 +1,475 @@
+/*-
+ * Copyright (c) 1985, 1986, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * @(#)in_var.h 8.2 (Berkeley) 1/9/95
+ * $FreeBSD$
+ */
+
+#ifndef _NETINET_IN_VAR_HH_
+#define _NETINET_IN_VAR_HH_
+
+#include <rtems/freebsd/sys/queue.h>
+#include <rtems/freebsd/sys/fnv_hash.h>
+#include <rtems/freebsd/sys/tree.h>
+
+struct igmp_ifinfo;
+struct in_multi;
+struct lltable;
+
+/*
+ * IPv4 per-interface state.
+ */
+struct in_ifinfo {
+ struct lltable *ii_llt; /* ARP state */
+ struct igmp_ifinfo *ii_igmp; /* IGMP state */
+ struct in_multi *ii_allhosts; /* 224.0.0.1 membership */
+};
+
+/*
+ * Interface address, Internet version. One of these structures
+ * is allocated for each Internet address on an interface.
+ * The ifaddr structure contains the protocol-independent part
+ * of the structure and is assumed to be first.
+ */
+struct in_ifaddr {
+ struct ifaddr ia_ifa; /* protocol-independent info */
+#define ia_ifp ia_ifa.ifa_ifp
+#define ia_flags ia_ifa.ifa_flags
+ /* ia_{,sub}net{,mask} in host order */
+ u_long ia_net; /* network number of interface */
+ u_long ia_netmask; /* mask of net part */
+ u_long ia_subnet; /* subnet number, including net */
+ u_long ia_subnetmask; /* mask of subnet part */
+ struct in_addr ia_netbroadcast; /* to recognize net broadcasts */
+ LIST_ENTRY(in_ifaddr) ia_hash; /* entry in bucket of inet addresses */
+ TAILQ_ENTRY(in_ifaddr) ia_link; /* list of internet addresses */
+ struct sockaddr_in ia_addr; /* reserve space for interface name */
+ struct sockaddr_in ia_dstaddr; /* reserve space for broadcast addr */
+#define ia_broadaddr ia_dstaddr
+ struct sockaddr_in ia_sockmask; /* reserve space for general netmask */
+};
+
+struct in_aliasreq {
+ char ifra_name[IFNAMSIZ]; /* if name, e.g. "en0" */
+ struct sockaddr_in ifra_addr;
+ struct sockaddr_in ifra_broadaddr;
+#define ifra_dstaddr ifra_broadaddr
+ struct sockaddr_in ifra_mask;
+};
+/*
+ * Given a pointer to an in_ifaddr (ifaddr),
+ * return a pointer to the addr as a sockaddr_in.
+ */
+#define IA_SIN(ia) (&(((struct in_ifaddr *)(ia))->ia_addr))
+#define IA_DSTSIN(ia) (&(((struct in_ifaddr *)(ia))->ia_dstaddr))
+
+#define IN_LNAOF(in, ifa) \
+ ((ntohl((in).s_addr) & ~((struct in_ifaddr *)(ifa)->ia_subnetmask))
+
+
+#ifdef _KERNEL
+extern u_char inetctlerrmap[];
+
+#define LLTABLE(ifp) \
+ ((struct in_ifinfo *)(ifp)->if_afdata[AF_INET])->ii_llt
+/*
+ * Hash table for IP addresses.
+ */
+TAILQ_HEAD(in_ifaddrhead, in_ifaddr);
+LIST_HEAD(in_ifaddrhashhead, in_ifaddr);
+
+VNET_DECLARE(struct in_ifaddrhashhead *, in_ifaddrhashtbl);
+VNET_DECLARE(struct in_ifaddrhead, in_ifaddrhead);
+VNET_DECLARE(u_long, in_ifaddrhmask); /* mask for hash table */
+
+#define V_in_ifaddrhashtbl VNET(in_ifaddrhashtbl)
+#define V_in_ifaddrhead VNET(in_ifaddrhead)
+#define V_in_ifaddrhmask VNET(in_ifaddrhmask)
+
+#define INADDR_NHASH_LOG2 9
+#define INADDR_NHASH (1 << INADDR_NHASH_LOG2)
+#define INADDR_HASHVAL(x) fnv_32_buf((&(x)), sizeof(x), FNV1_32_INIT)
+#define INADDR_HASH(x) \
+ (&V_in_ifaddrhashtbl[INADDR_HASHVAL(x) & V_in_ifaddrhmask])
+
+extern struct rwlock in_ifaddr_lock;
+
+#define IN_IFADDR_LOCK_ASSERT() rw_assert(&in_ifaddr_lock, RA_LOCKED)
+#define IN_IFADDR_RLOCK() rw_rlock(&in_ifaddr_lock)
+#define IN_IFADDR_RLOCK_ASSERT() rw_assert(&in_ifaddr_lock, RA_RLOCKED)
+#define IN_IFADDR_RUNLOCK() rw_runlock(&in_ifaddr_lock)
+#define IN_IFADDR_WLOCK() rw_wlock(&in_ifaddr_lock)
+#define IN_IFADDR_WLOCK_ASSERT() rw_assert(&in_ifaddr_lock, RA_WLOCKED)
+#define IN_IFADDR_WUNLOCK() rw_wunlock(&in_ifaddr_lock)
+
+/*
+ * Macro for finding the internet address structure (in_ifaddr)
+ * corresponding to one of our IP addresses (in_addr).
+ */
+#define INADDR_TO_IFADDR(addr, ia) \
+ /* struct in_addr addr; */ \
+ /* struct in_ifaddr *ia; */ \
+do { \
+\
+ LIST_FOREACH(ia, INADDR_HASH((addr).s_addr), ia_hash) \
+ if (IA_SIN(ia)->sin_addr.s_addr == (addr).s_addr) \
+ break; \
+} while (0)
+
+/*
+ * Macro for finding the interface (ifnet structure) corresponding to one
+ * of our IP addresses.
+ */
+#define INADDR_TO_IFP(addr, ifp) \
+ /* struct in_addr addr; */ \
+ /* struct ifnet *ifp; */ \
+{ \
+ struct in_ifaddr *ia; \
+\
+ INADDR_TO_IFADDR(addr, ia); \
+ (ifp) = (ia == NULL) ? NULL : ia->ia_ifp; \
+}
+
+/*
+ * Macro for finding the internet address structure (in_ifaddr) corresponding
+ * to a given interface (ifnet structure).
+ */
+#define IFP_TO_IA(ifp, ia) \
+ /* struct ifnet *ifp; */ \
+ /* struct in_ifaddr *ia; */ \
+{ \
+ for ((ia) = TAILQ_FIRST(&V_in_ifaddrhead); \
+ (ia) != NULL && (ia)->ia_ifp != (ifp); \
+ (ia) = TAILQ_NEXT((ia), ia_link)) \
+ continue; \
+ if ((ia) != NULL) \
+ ifa_ref(&(ia)->ia_ifa); \
+}
+#endif
+
+/*
+ * IP datagram reassembly.
+ */
+#define IPREASS_NHASH_LOG2 6
+#define IPREASS_NHASH (1 << IPREASS_NHASH_LOG2)
+#define IPREASS_HMASK (IPREASS_NHASH - 1)
+#define IPREASS_HASH(x,y) \
+ (((((x) & 0xF) | ((((x) >> 8) & 0xF) << 4)) ^ (y)) & IPREASS_HMASK)
+
+/*
+ * Legacy IPv4 IGMP per-link structure.
+ */
+struct router_info {
+ struct ifnet *rti_ifp;
+ int rti_type; /* type of router which is querier on this interface */
+ int rti_time; /* # of slow timeouts since last old query */
+ SLIST_ENTRY(router_info) rti_list;
+};
+
+/*
+ * Per-interface IGMP router version information.
+ */
+struct igmp_ifinfo {
+ LIST_ENTRY(igmp_ifinfo) igi_link;
+ struct ifnet *igi_ifp; /* interface this instance belongs to */
+ uint32_t igi_version; /* IGMPv3 Host Compatibility Mode */
+ uint32_t igi_v1_timer; /* IGMPv1 Querier Present timer (s) */
+ uint32_t igi_v2_timer; /* IGMPv2 Querier Present timer (s) */
+ uint32_t igi_v3_timer; /* IGMPv3 General Query (interface) timer (s)*/
+ uint32_t igi_flags; /* IGMP per-interface flags */
+ uint32_t igi_rv; /* IGMPv3 Robustness Variable */
+ uint32_t igi_qi; /* IGMPv3 Query Interval (s) */
+ uint32_t igi_qri; /* IGMPv3 Query Response Interval (s) */
+ uint32_t igi_uri; /* IGMPv3 Unsolicited Report Interval (s) */
+ SLIST_HEAD(,in_multi) igi_relinmhead; /* released groups */
+ struct ifqueue igi_gq; /* queue of general query responses */
+};
+
+#define IGIF_SILENT 0x00000001 /* Do not use IGMP on this ifp */
+#define IGIF_LOOPBACK 0x00000002 /* Send IGMP reports to loopback */
+
+/*
+ * IPv4 multicast IGMP-layer source entry.
+ */
+struct ip_msource {
+ RB_ENTRY(ip_msource) ims_link; /* RB tree links */
+ in_addr_t ims_haddr; /* host byte order */
+ struct ims_st {
+ uint16_t ex; /* # of exclusive members */
+ uint16_t in; /* # of inclusive members */
+ } ims_st[2]; /* state at t0, t1 */
+ uint8_t ims_stp; /* pending query */
+};
+
+/*
+ * IPv4 multicast PCB-layer source entry.
+ */
+struct in_msource {
+ RB_ENTRY(ip_msource) ims_link; /* RB tree links */
+ in_addr_t ims_haddr; /* host byte order */
+ uint8_t imsl_st[2]; /* state before/at commit */
+};
+
+RB_HEAD(ip_msource_tree, ip_msource); /* define struct ip_msource_tree */
+
+static __inline int
+ip_msource_cmp(const struct ip_msource *a, const struct ip_msource *b)
+{
+
+ if (a->ims_haddr < b->ims_haddr)
+ return (-1);
+ if (a->ims_haddr == b->ims_haddr)
+ return (0);
+ return (1);
+}
+RB_PROTOTYPE(ip_msource_tree, ip_msource, ims_link, ip_msource_cmp);
+
+/*
+ * IPv4 multicast PCB-layer group filter descriptor.
+ */
+struct in_mfilter {
+ struct ip_msource_tree imf_sources; /* source list for (S,G) */
+ u_long imf_nsrc; /* # of source entries */
+ uint8_t imf_st[2]; /* state before/at commit */
+};
+
+/*
+ * IPv4 group descriptor.
+ *
+ * For every entry on an ifnet's if_multiaddrs list which represents
+ * an IP multicast group, there is one of these structures.
+ *
+ * If any source filters are present, then a node will exist in the RB-tree
+ * to permit fast lookup by source whenever an operation takes place.
+ * This permits pre-order traversal when we issue reports.
+ * Source filter trees are kept separately from the socket layer to
+ * greatly simplify locking.
+ *
+ * When IGMPv3 is active, inm_timer is the response to group query timer.
+ * The state-change timer inm_sctimer is separate; whenever state changes
+ * for the group the state change record is generated and transmitted,
+ * and kept if retransmissions are necessary.
+ *
+ * FUTURE: inm_link is now only used when groups are being purged
+ * on a detaching ifnet. It could be demoted to a SLIST_ENTRY, but
+ * because it is at the very start of the struct, we can't do this
+ * w/o breaking the ABI for ifmcstat.
+ */
+struct in_multi {
+ LIST_ENTRY(in_multi) inm_link; /* to-be-released by in_ifdetach */
+ struct in_addr inm_addr; /* IP multicast address, convenience */
+ struct ifnet *inm_ifp; /* back pointer to ifnet */
+ struct ifmultiaddr *inm_ifma; /* back pointer to ifmultiaddr */
+ u_int inm_timer; /* IGMPv1/v2 group / v3 query timer */
+ u_int inm_state; /* state of the membership */
+ void *inm_rti; /* unused, legacy field */
+ u_int inm_refcount; /* reference count */
+
+ /* New fields for IGMPv3 follow. */
+ struct igmp_ifinfo *inm_igi; /* IGMP info */
+ SLIST_ENTRY(in_multi) inm_nrele; /* to-be-released by IGMP */
+ struct ip_msource_tree inm_srcs; /* tree of sources */
+ u_long inm_nsrc; /* # of tree entries */
+
+ struct ifqueue inm_scq; /* queue of pending
+ * state-change packets */
+ struct timeval inm_lastgsrtv; /* Time of last G-S-R query */
+ uint16_t inm_sctimer; /* state-change timer */
+ uint16_t inm_scrv; /* state-change rexmit count */
+
+ /*
+ * SSM state counters which track state at T0 (the time the last
+ * state-change report's RV timer went to zero) and T1
+ * (time of pending report, i.e. now).
+ * Used for computing IGMPv3 state-change reports. Several refcounts
+ * are maintained here to optimize for common use-cases.
+ */
+ struct inm_st {
+ uint16_t iss_fmode; /* IGMP filter mode */
+ uint16_t iss_asm; /* # of ASM listeners */
+ uint16_t iss_ex; /* # of exclusive members */
+ uint16_t iss_in; /* # of inclusive members */
+ uint16_t iss_rec; /* # of recorded sources */
+ } inm_st[2]; /* state at t0, t1 */
+};
+
+/*
+ * Helper function to derive the filter mode on a source entry
+ * from its internal counters. Predicates are:
+ * A source is only excluded if all listeners exclude it.
+ * A source is only included if no listeners exclude it,
+ * and at least one listener includes it.
+ * May be used by ifmcstat(8).
+ */
+static __inline uint8_t
+ims_get_mode(const struct in_multi *inm, const struct ip_msource *ims,
+ uint8_t t)
+{
+
+ t = !!t;
+ if (inm->inm_st[t].iss_ex > 0 &&
+ inm->inm_st[t].iss_ex == ims->ims_st[t].ex)
+ return (MCAST_EXCLUDE);
+ else if (ims->ims_st[t].in > 0 && ims->ims_st[t].ex == 0)
+ return (MCAST_INCLUDE);
+ return (MCAST_UNDEFINED);
+}
+
+#ifdef _KERNEL
+
+#ifdef SYSCTL_DECL
+SYSCTL_DECL(_net_inet);
+SYSCTL_DECL(_net_inet_ip);
+SYSCTL_DECL(_net_inet_raw);
+#endif
+
+/*
+ * Lock macros for IPv4 layer multicast address lists. IPv4 lock goes
+ * before link layer multicast locks in the lock order. In most cases,
+ * consumers of IN_*_MULTI() macros should acquire the locks before
+ * calling them; users of the in_{add,del}multi() functions should not.
+ */
+extern struct mtx in_multi_mtx;
+#define IN_MULTI_LOCK() mtx_lock(&in_multi_mtx)
+#define IN_MULTI_UNLOCK() mtx_unlock(&in_multi_mtx)
+#define IN_MULTI_LOCK_ASSERT() mtx_assert(&in_multi_mtx, MA_OWNED)
+#define IN_MULTI_UNLOCK_ASSERT() mtx_assert(&in_multi_mtx, MA_NOTOWNED)
+
+/*
+ * Function for looking up an in_multi record for an IPv4 multicast address
+ * on a given interface. ifp must be valid. If no record found, return NULL.
+ * The IN_MULTI_LOCK and IF_ADDR_LOCK on ifp must be held.
+ */
+static __inline struct in_multi *
+inm_lookup_locked(struct ifnet *ifp, const struct in_addr ina)
+{
+ struct ifmultiaddr *ifma;
+ struct in_multi *inm;
+
+ IN_MULTI_LOCK_ASSERT();
+ IF_ADDR_LOCK_ASSERT(ifp);
+
+ inm = NULL;
+ TAILQ_FOREACH(ifma, &((ifp)->if_multiaddrs), ifma_link) {
+ if (ifma->ifma_addr->sa_family == AF_INET) {
+ inm = (struct in_multi *)ifma->ifma_protospec;
+ if (inm->inm_addr.s_addr == ina.s_addr)
+ break;
+ inm = NULL;
+ }
+ }
+ return (inm);
+}
+
+/*
+ * Wrapper for inm_lookup_locked().
+ * The IF_ADDR_LOCK will be taken on ifp and released on return.
+ */
+static __inline struct in_multi *
+inm_lookup(struct ifnet *ifp, const struct in_addr ina)
+{
+ struct in_multi *inm;
+
+ IN_MULTI_LOCK_ASSERT();
+ IF_ADDR_LOCK(ifp);
+ inm = inm_lookup_locked(ifp, ina);
+ IF_ADDR_UNLOCK(ifp);
+
+ return (inm);
+}
+
+/* Acquire an in_multi record. */
+static __inline void
+inm_acquire_locked(struct in_multi *inm)
+{
+
+ IN_MULTI_LOCK_ASSERT();
+ ++inm->inm_refcount;
+}
+
+/*
+ * Return values for imo_multi_filter().
+ */
+#define MCAST_PASS 0 /* Pass */
+#define MCAST_NOTGMEMBER 1 /* This host not a member of group */
+#define MCAST_NOTSMEMBER 2 /* This host excluded source */
+#define MCAST_MUTED 3 /* [deprecated] */
+
+struct rtentry;
+struct route;
+struct ip_moptions;
+
+int imo_multi_filter(const struct ip_moptions *, const struct ifnet *,
+ const struct sockaddr *, const struct sockaddr *);
+void inm_commit(struct in_multi *);
+void inm_clear_recorded(struct in_multi *);
+void inm_print(const struct in_multi *);
+int inm_record_source(struct in_multi *inm, const in_addr_t);
+void inm_release(struct in_multi *);
+void inm_release_locked(struct in_multi *);
+struct in_multi *
+ in_addmulti(struct in_addr *, struct ifnet *);
+void in_delmulti(struct in_multi *);
+int in_joingroup(struct ifnet *, const struct in_addr *,
+ /*const*/ struct in_mfilter *, struct in_multi **);
+int in_joingroup_locked(struct ifnet *, const struct in_addr *,
+ /*const*/ struct in_mfilter *, struct in_multi **);
+int in_leavegroup(struct in_multi *, /*const*/ struct in_mfilter *);
+int in_leavegroup_locked(struct in_multi *,
+ /*const*/ struct in_mfilter *);
+int in_control(struct socket *, u_long, caddr_t, struct ifnet *,
+ struct thread *);
+void in_rtqdrain(void);
+void ip_input(struct mbuf *);
+int in_ifadown(struct ifaddr *ifa, int);
+void in_ifscrub(struct ifnet *, struct in_ifaddr *);
+struct mbuf *ip_fastforward(struct mbuf *);
+void *in_domifattach(struct ifnet *);
+void in_domifdetach(struct ifnet *, void *);
+
+
+/* XXX */
+void in_rtalloc_ign(struct route *ro, u_long ignflags, u_int fibnum);
+void in_rtalloc(struct route *ro, u_int fibnum);
+struct rtentry *in_rtalloc1(struct sockaddr *, int, u_long, u_int);
+void in_rtredirect(struct sockaddr *, struct sockaddr *,
+ struct sockaddr *, int, struct sockaddr *, u_int);
+int in_rtrequest(int, struct sockaddr *,
+ struct sockaddr *, struct sockaddr *, int, struct rtentry **, u_int);
+
+#if 0
+int in_rt_getifa(struct rt_addrinfo *, u_int fibnum);
+int in_rtioctl(u_long, caddr_t, u_int);
+int in_rtrequest1(int, struct rt_addrinfo *, struct rtentry **, u_int);
+#endif
+#endif /* _KERNEL */
+
+/* INET6 stuff */
+#include <rtems/freebsd/netinet6/in6_var.h>
+
+#endif /* _NETINET_IN_VAR_HH_ */
diff --git a/rtems/freebsd/netinet/ip.h b/rtems/freebsd/netinet/ip.h
new file mode 100644
index 00000000..e08b23a6
--- /dev/null
+++ b/rtems/freebsd/netinet/ip.h
@@ -0,0 +1,196 @@
+/*-
+ * Copyright (c) 1982, 1986, 1993
+ * The Regents of the University of California.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * @(#)ip.h 8.2 (Berkeley) 6/1/94
+ * $FreeBSD$
+ */
+
+#ifndef _NETINET_IP_HH_
+#define _NETINET_IP_HH_
+
+#include <rtems/freebsd/sys/cdefs.h>
+
+/*
+ * Definitions for internet protocol version 4.
+ *
+ * Per RFC 791, September 1981.
+ */
+#define IPVERSION 4
+
+/*
+ * Structure of an internet header, naked of options.
+ */
+struct ip {
+#if BYTE_ORDER == LITTLE_ENDIAN
+ u_int ip_hl:4, /* header length */
+ ip_v:4; /* version */
+#endif
+#if BYTE_ORDER == BIG_ENDIAN
+ u_int ip_v:4, /* version */
+ ip_hl:4; /* header length */
+#endif
+ u_char ip_tos; /* type of service */
+ u_short ip_len; /* total length */
+ u_short ip_id; /* identification */
+ u_short ip_off; /* fragment offset field */
+#define IP_RF 0x8000 /* reserved fragment flag */
+#define IP_DF 0x4000 /* dont fragment flag */
+#define IP_MF 0x2000 /* more fragments flag */
+#define IP_OFFMASK 0x1fff /* mask for fragmenting bits */
+ u_char ip_ttl; /* time to live */
+ u_char ip_p; /* protocol */
+ u_short ip_sum; /* checksum */
+ struct in_addr ip_src,ip_dst; /* source and dest address */
+} __packed __aligned(4);
+
+#define IP_MAXPACKET 65535 /* maximum packet size */
+
+/*
+ * Definitions for IP type of service (ip_tos).
+ */
+#define IPTOS_LOWDELAY 0x10
+#define IPTOS_THROUGHPUT 0x08
+#define IPTOS_RELIABILITY 0x04
+#define IPTOS_MINCOST 0x02
+
+/*
+ * Definitions for IP precedence (also in ip_tos) (hopefully unused).
+ */
+#define IPTOS_PREC_NETCONTROL 0xe0
+#define IPTOS_PREC_INTERNETCONTROL 0xc0
+#define IPTOS_PREC_CRITIC_ECP 0xa0
+#define IPTOS_PREC_FLASHOVERRIDE 0x80
+#define IPTOS_PREC_FLASH 0x60
+#define IPTOS_PREC_IMMEDIATE 0x40
+#define IPTOS_PREC_PRIORITY 0x20
+#define IPTOS_PREC_ROUTINE 0x00
+
+/*
+ * ECN (Explicit Congestion Notification) codepoints in RFC3168 mapped to the
+ * lower 2 bits of the TOS field.
+ */
+#define IPTOS_ECN_NOTECT 0x00 /* not-ECT */
+#define IPTOS_ECN_ECT1 0x01 /* ECN-capable transport (1) */
+#define IPTOS_ECN_ECT0 0x02 /* ECN-capable transport (0) */
+#define IPTOS_ECN_CE 0x03 /* congestion experienced */
+#define IPTOS_ECN_MASK 0x03 /* ECN field mask */
+
+/*
+ * Definitions for options.
+ */
+#define IPOPT_COPIED(o) ((o)&0x80)
+#define IPOPT_CLASS(o) ((o)&0x60)
+#define IPOPT_NUMBER(o) ((o)&0x1f)
+
+#define IPOPT_CONTROL 0x00
+#define IPOPT_RESERVED1 0x20
+#define IPOPT_DEBMEAS 0x40
+#define IPOPT_RESERVED2 0x60
+
+#define IPOPT_EOL 0 /* end of option list */
+#define IPOPT_NOP 1 /* no operation */
+
+#define IPOPT_RR 7 /* record packet route */
+#define IPOPT_TS 68 /* timestamp */
+#define IPOPT_SECURITY 130 /* provide s,c,h,tcc */
+#define IPOPT_LSRR 131 /* loose source route */
+#define IPOPT_ESO 133 /* extended security */
+#define IPOPT_CIPSO 134 /* commerical security */
+#define IPOPT_SATID 136 /* satnet id */
+#define IPOPT_SSRR 137 /* strict source route */
+#define IPOPT_RA 148 /* router alert */
+
+/*
+ * Offsets to fields in options other than EOL and NOP.
+ */
+#define IPOPT_OPTVAL 0 /* option ID */
+#define IPOPT_OLEN 1 /* option length */
+#define IPOPT_OFFSET 2 /* offset within option */
+#define IPOPT_MINOFF 4 /* min value of above */
+
+/*
+ * Time stamp option structure.
+ */
+struct ip_timestamp {
+ u_char ipt_code; /* IPOPT_TS */
+ u_char ipt_len; /* size of structure (variable) */
+ u_char ipt_ptr; /* index of current entry */
+#if BYTE_ORDER == LITTLE_ENDIAN
+ u_int ipt_flg:4, /* flags, see below */
+ ipt_oflw:4; /* overflow counter */
+#endif
+#if BYTE_ORDER == BIG_ENDIAN
+ u_int ipt_oflw:4, /* overflow counter */
+ ipt_flg:4; /* flags, see below */
+#endif
+ union ipt_timestamp {
+ uint32_t ipt_time[1]; /* network format */
+ struct ipt_ta {
+ struct in_addr ipt_addr;
+ uint32_t ipt_time; /* network format */
+ } ipt_ta[1];
+ } ipt_timestamp;
+};
+
+/* Flag bits for ipt_flg. */
+#define IPOPT_TS_TSONLY 0 /* timestamps only */
+#define IPOPT_TS_TSANDADDR 1 /* timestamps and addresses */
+#define IPOPT_TS_PRESPEC 3 /* specified modules only */
+
+/* Bits for security (not byte swapped). */
+#define IPOPT_SECUR_UNCLASS 0x0000
+#define IPOPT_SECUR_CONFID 0xf135
+#define IPOPT_SECUR_EFTO 0x789a
+#define IPOPT_SECUR_MMMM 0xbc4d
+#define IPOPT_SECUR_RESTR 0xaf13
+#define IPOPT_SECUR_SECRET 0xd788
+#define IPOPT_SECUR_TOPSECRET 0x6bc5
+
+/*
+ * Internet implementation parameters.
+ */
+#define MAXTTL 255 /* maximum time to live (seconds) */
+#define IPDEFTTL 64 /* default ttl, from RFC 1340 */
+#define IPFRAGTTL 60 /* time to live for frags, slowhz */
+#define IPTTLDEC 1 /* subtracted when forwarding */
+#define IP_MSS 576 /* default maximum segment size */
+
+/*
+ * This is the real IPv4 pseudo header, used for computing the TCP and UDP
+ * checksums. For the Internet checksum, struct ipovly can be used instead.
+ * For stronger checksums, the real thing must be used.
+ */
+struct ippseudo {
+ struct in_addr ippseudo_src; /* source internet address */
+ struct in_addr ippseudo_dst; /* destination internet address */
+ u_char ippseudo_pad; /* pad, must be zero */
+ u_char ippseudo_p; /* protocol */
+ u_short ippseudo_len; /* protocol length */
+};
+#endif
diff --git a/rtems/freebsd/netinet/ip6.h b/rtems/freebsd/netinet/ip6.h
new file mode 100644
index 00000000..26270d25
--- /dev/null
+++ b/rtems/freebsd/netinet/ip6.h
@@ -0,0 +1,352 @@
+/* $FreeBSD$ */
+/* $KAME: ip6.h,v 1.18 2001/03/29 05:34:30 itojun Exp $ */
+
+/*-
+ * Copyright (C) 1995, 1996, 1997, and 1998 WIDE Project.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the project nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+/*-
+ * Copyright (c) 1982, 1986, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * @(#)ip.h 8.1 (Berkeley) 6/10/93
+ */
+
+#ifndef _NETINET_IP6_HH_
+#define _NETINET_IP6_HH_
+
+/*
+ * Definition for internet protocol version 6.
+ * RFC 2460
+ */
+
+struct ip6_hdr {
+ union {
+ struct ip6_hdrctl {
+ u_int32_t ip6_un1_flow; /* 20 bits of flow-ID */
+ u_int16_t ip6_un1_plen; /* payload length */
+ u_int8_t ip6_un1_nxt; /* next header */
+ u_int8_t ip6_un1_hlim; /* hop limit */
+ } ip6_un1;
+ u_int8_t ip6_un2_vfc; /* 4 bits version, top 4 bits class */
+ } ip6_ctlun;
+ struct in6_addr ip6_src; /* source address */
+ struct in6_addr ip6_dst; /* destination address */
+} __packed;
+
+#define ip6_vfc ip6_ctlun.ip6_un2_vfc
+#define ip6_flow ip6_ctlun.ip6_un1.ip6_un1_flow
+#define ip6_plen ip6_ctlun.ip6_un1.ip6_un1_plen
+#define ip6_nxt ip6_ctlun.ip6_un1.ip6_un1_nxt
+#define ip6_hlim ip6_ctlun.ip6_un1.ip6_un1_hlim
+#define ip6_hops ip6_ctlun.ip6_un1.ip6_un1_hlim
+
+#define IPV6_VERSION 0x60
+#define IPV6_VERSION_MASK 0xf0
+
+#if BYTE_ORDER == BIG_ENDIAN
+#define IPV6_FLOWINFO_MASK 0x0fffffff /* flow info (28 bits) */
+#define IPV6_FLOWLABEL_MASK 0x000fffff /* flow label (20 bits) */
+#else
+#if BYTE_ORDER == LITTLE_ENDIAN
+#define IPV6_FLOWINFO_MASK 0xffffff0f /* flow info (28 bits) */
+#define IPV6_FLOWLABEL_MASK 0xffff0f00 /* flow label (20 bits) */
+#endif /* LITTLE_ENDIAN */
+#endif
+#if 1
+/* ECN bits proposed by Sally Floyd */
+#define IP6TOS_CE 0x01 /* congestion experienced */
+#define IP6TOS_ECT 0x02 /* ECN-capable transport */
+#endif
+
+/*
+ * Extension Headers
+ */
+
+struct ip6_ext {
+ u_int8_t ip6e_nxt;
+ u_int8_t ip6e_len;
+} __packed;
+
+/* Hop-by-Hop options header */
+/* XXX should we pad it to force alignment on an 8-byte boundary? */
+struct ip6_hbh {
+ u_int8_t ip6h_nxt; /* next header */
+ u_int8_t ip6h_len; /* length in units of 8 octets */
+ /* followed by options */
+} __packed;
+
+/* Destination options header */
+/* XXX should we pad it to force alignment on an 8-byte boundary? */
+struct ip6_dest {
+ u_int8_t ip6d_nxt; /* next header */
+ u_int8_t ip6d_len; /* length in units of 8 octets */
+ /* followed by options */
+} __packed;
+
+/* Option types and related macros */
+#define IP6OPT_PAD1 0x00 /* 00 0 00000 */
+#define IP6OPT_PADN 0x01 /* 00 0 00001 */
+#define IP6OPT_JUMBO 0xC2 /* 11 0 00010 = 194 */
+#define IP6OPT_NSAP_ADDR 0xC3 /* 11 0 00011 */
+#define IP6OPT_TUNNEL_LIMIT 0x04 /* 00 0 00100 */
+#ifndef _KERNEL
+#define IP6OPT_RTALERT 0x05 /* 00 0 00101 (KAME definition) */
+#endif
+#define IP6OPT_ROUTER_ALERT 0x05 /* 00 0 00101 (RFC3542, recommended) */
+
+#define IP6OPT_RTALERT_LEN 4
+#define IP6OPT_RTALERT_MLD 0 /* Datagram contains an MLD message */
+#define IP6OPT_RTALERT_RSVP 1 /* Datagram contains an RSVP message */
+#define IP6OPT_RTALERT_ACTNET 2 /* contains an Active Networks msg */
+#define IP6OPT_MINLEN 2
+
+#define IP6OPT_EID 0x8a /* 10 0 01010 */
+
+#define IP6OPT_TYPE(o) ((o) & 0xC0)
+#define IP6OPT_TYPE_SKIP 0x00
+#define IP6OPT_TYPE_DISCARD 0x40
+#define IP6OPT_TYPE_FORCEICMP 0x80
+#define IP6OPT_TYPE_ICMP 0xC0
+
+#define IP6OPT_MUTABLE 0x20
+
+/* IPv6 options: common part */
+struct ip6_opt {
+ u_int8_t ip6o_type;
+ u_int8_t ip6o_len;
+} __packed;
+
+/* Jumbo Payload Option */
+struct ip6_opt_jumbo {
+ u_int8_t ip6oj_type;
+ u_int8_t ip6oj_len;
+ u_int8_t ip6oj_jumbo_len[4];
+} __packed;
+#define IP6OPT_JUMBO_LEN 6
+
+/* NSAP Address Option */
+struct ip6_opt_nsap {
+ u_int8_t ip6on_type;
+ u_int8_t ip6on_len;
+ u_int8_t ip6on_src_nsap_len;
+ u_int8_t ip6on_dst_nsap_len;
+ /* followed by source NSAP */
+ /* followed by destination NSAP */
+} __packed;
+
+/* Tunnel Limit Option */
+struct ip6_opt_tunnel {
+ u_int8_t ip6ot_type;
+ u_int8_t ip6ot_len;
+ u_int8_t ip6ot_encap_limit;
+} __packed;
+
+/* Router Alert Option */
+struct ip6_opt_router {
+ u_int8_t ip6or_type;
+ u_int8_t ip6or_len;
+ u_int8_t ip6or_value[2];
+} __packed;
+/* Router alert values (in network byte order) */
+#if BYTE_ORDER == BIG_ENDIAN
+#define IP6_ALERT_MLD 0x0000
+#define IP6_ALERT_RSVP 0x0001
+#define IP6_ALERT_AN 0x0002
+#else
+#if BYTE_ORDER == LITTLE_ENDIAN
+#define IP6_ALERT_MLD 0x0000
+#define IP6_ALERT_RSVP 0x0100
+#define IP6_ALERT_AN 0x0200
+#endif /* LITTLE_ENDIAN */
+#endif
+
+/* Routing header */
+struct ip6_rthdr {
+ u_int8_t ip6r_nxt; /* next header */
+ u_int8_t ip6r_len; /* length in units of 8 octets */
+ u_int8_t ip6r_type; /* routing type */
+ u_int8_t ip6r_segleft; /* segments left */
+ /* followed by routing type specific data */
+} __packed;
+
+/* Type 0 Routing header, deprecated by RFC 5095. */
+struct ip6_rthdr0 {
+ u_int8_t ip6r0_nxt; /* next header */
+ u_int8_t ip6r0_len; /* length in units of 8 octets */
+ u_int8_t ip6r0_type; /* always zero */
+ u_int8_t ip6r0_segleft; /* segments left */
+ u_int32_t ip6r0_reserved; /* reserved field */
+ /* followed by up to 127 struct in6_addr */
+} __packed;
+
+/* Fragment header */
+struct ip6_frag {
+ u_int8_t ip6f_nxt; /* next header */
+ u_int8_t ip6f_reserved; /* reserved field */
+ u_int16_t ip6f_offlg; /* offset, reserved, and flag */
+ u_int32_t ip6f_ident; /* identification */
+} __packed;
+
+#if BYTE_ORDER == BIG_ENDIAN
+#define IP6F_OFF_MASK 0xfff8 /* mask out offset from _offlg */
+#define IP6F_RESERVED_MASK 0x0006 /* reserved bits in ip6f_offlg */
+#define IP6F_MORE_FRAG 0x0001 /* more-fragments flag */
+#else /* BYTE_ORDER == LITTLE_ENDIAN */
+#define IP6F_OFF_MASK 0xf8ff /* mask out offset from _offlg */
+#define IP6F_RESERVED_MASK 0x0600 /* reserved bits in ip6f_offlg */
+#define IP6F_MORE_FRAG 0x0100 /* more-fragments flag */
+#endif /* BYTE_ORDER == LITTLE_ENDIAN */
+
+/*
+ * Internet implementation parameters.
+ */
+#define IPV6_MAXHLIM 255 /* maximum hoplimit */
+#define IPV6_DEFHLIM 64 /* default hlim */
+#define IPV6_FRAGTTL 120 /* ttl for fragment packets, in slowtimo tick */
+#define IPV6_HLIMDEC 1 /* subtracted when forwarding */
+
+#define IPV6_MMTU 1280 /* minimal MTU and reassembly. 1024 + 256 */
+#define IPV6_MAXPACKET 65535 /* ip6 max packet size without Jumbo payload*/
+#define IPV6_MAXOPTHDR 2048 /* max option header size, 256 64-bit words */
+
+#ifdef _KERNEL
+/*
+ * IP6_EXTHDR_CHECK ensures that region between the IP6 header and the
+ * target header (including IPv6 itself, extension headers and
+ * TCP/UDP/ICMP6 headers) are continuous. KAME requires drivers
+ * to store incoming data into one internal mbuf or one or more external
+ * mbufs(never into two or more internal mbufs). Thus, the third case is
+ * supposed to never be matched but is prepared just in case.
+ */
+
+#define IP6_EXTHDR_CHECK(m, off, hlen, ret) \
+do { \
+ if ((m)->m_next != NULL) { \
+ if (((m)->m_flags & M_LOOP) && \
+ ((m)->m_len < (off) + (hlen)) && \
+ (((m) = m_pullup((m), (off) + (hlen))) == NULL)) { \
+ V_ip6stat.ip6s_exthdrtoolong++; \
+ return ret; \
+ } else if ((m)->m_flags & M_EXT) { \
+ if ((m)->m_len < (off) + (hlen)) { \
+ V_ip6stat.ip6s_exthdrtoolong++; \
+ m_freem(m); \
+ return ret; \
+ } \
+ } else { \
+ if ((m)->m_len < (off) + (hlen)) { \
+ V_ip6stat.ip6s_exthdrtoolong++; \
+ m_freem(m); \
+ return ret; \
+ } \
+ } \
+ } else { \
+ if ((m)->m_len < (off) + (hlen)) { \
+ V_ip6stat.ip6s_tooshort++; \
+ in6_ifstat_inc(m->m_pkthdr.rcvif, ifs6_in_truncated); \
+ m_freem(m); \
+ return ret; \
+ } \
+ } \
+} while (/*CONSTCOND*/ 0)
+
+/*
+ * IP6_EXTHDR_GET ensures that intermediate protocol header (from "off" to
+ * "len") is located in single mbuf, on contiguous memory region.
+ * The pointer to the region will be returned to pointer variable "val",
+ * with type "typ".
+ * IP6_EXTHDR_GET0 does the same, except that it aligns the structure at the
+ * very top of mbuf. GET0 is likely to make memory copy than GET.
+ *
+ * XXX we're now testing this, needs m_pulldown()
+ */
+#define IP6_EXTHDR_GET(val, typ, m, off, len) \
+do { \
+ struct mbuf *t; \
+ int tmp; \
+ if ((m)->m_len >= (off) + (len)) \
+ (val) = (typ)(mtod((m), caddr_t) + (off)); \
+ else { \
+ t = m_pulldown((m), (off), (len), &tmp); \
+ if (t) { \
+ if (t->m_len < tmp + (len)) \
+ panic("m_pulldown malfunction"); \
+ (val) = (typ)(mtod(t, caddr_t) + tmp); \
+ } else { \
+ (val) = (typ)NULL; \
+ (m) = NULL; \
+ } \
+ } \
+} while (/*CONSTCOND*/ 0)
+
+#define IP6_EXTHDR_GET0(val, typ, m, off, len) \
+do { \
+ struct mbuf *t; \
+ if ((off) == 0) \
+ (val) = (typ)mtod(m, caddr_t); \
+ else { \
+ t = m_pulldown((m), (off), (len), NULL); \
+ if (t) { \
+ if (t->m_len < (len)) \
+ panic("m_pulldown malfunction"); \
+ (val) = (typ)mtod(t, caddr_t); \
+ } else { \
+ (val) = (typ)NULL; \
+ (m) = NULL; \
+ } \
+ } \
+} while (/*CONSTCOND*/ 0)
+
+#endif /*_KERNEL*/
+
+#endif /* not _NETINET_IP6_HH_ */
diff --git a/rtems/freebsd/netinet/ip_carp.c b/rtems/freebsd/netinet/ip_carp.c
new file mode 100644
index 00000000..e38b6942
--- /dev/null
+++ b/rtems/freebsd/netinet/ip_carp.c
@@ -0,0 +1,2427 @@
+#include <rtems/freebsd/machine/rtems-bsd-config.h>
+
+/*
+ * Copyright (c) 2002 Michael Shalayeff. All rights reserved.
+ * Copyright (c) 2003 Ryan McBride. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR OR HIS RELATIVES BE LIABLE FOR ANY DIRECT,
+ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF MIND, USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <rtems/freebsd/sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <rtems/freebsd/local/opt_bpf.h>
+#include <rtems/freebsd/local/opt_inet.h>
+#include <rtems/freebsd/local/opt_inet6.h>
+
+#include <rtems/freebsd/sys/types.h>
+#include <rtems/freebsd/sys/param.h>
+#include <rtems/freebsd/sys/systm.h>
+#include <rtems/freebsd/sys/conf.h>
+#include <rtems/freebsd/sys/kernel.h>
+#include <rtems/freebsd/sys/limits.h>
+#include <rtems/freebsd/sys/malloc.h>
+#include <rtems/freebsd/sys/mbuf.h>
+#include <rtems/freebsd/sys/module.h>
+#include <rtems/freebsd/sys/time.h>
+#include <rtems/freebsd/sys/priv.h>
+#include <rtems/freebsd/sys/proc.h>
+#include <rtems/freebsd/sys/protosw.h>
+#include <rtems/freebsd/sys/sysctl.h>
+#include <rtems/freebsd/sys/syslog.h>
+#include <rtems/freebsd/sys/signalvar.h>
+#include <rtems/freebsd/sys/filio.h>
+#include <rtems/freebsd/sys/sockio.h>
+
+#include <rtems/freebsd/sys/socket.h>
+#ifndef __rtems__
+#include <rtems/freebsd/sys/vnode.h>
+#endif
+
+#include <rtems/freebsd/machine/stdarg.h>
+
+#include <rtems/freebsd/net/bpf.h>
+#include <rtems/freebsd/net/ethernet.h>
+#include <rtems/freebsd/net/fddi.h>
+#include <rtems/freebsd/net/iso88025.h>
+#include <rtems/freebsd/net/if.h>
+#include <rtems/freebsd/net/if_clone.h>
+#include <rtems/freebsd/net/if_dl.h>
+#include <rtems/freebsd/net/if_types.h>
+#include <rtems/freebsd/net/route.h>
+#include <rtems/freebsd/net/vnet.h>
+
+#ifdef INET
+#include <rtems/freebsd/netinet/in.h>
+#include <rtems/freebsd/netinet/in_var.h>
+#include <rtems/freebsd/netinet/in_systm.h>
+#include <rtems/freebsd/netinet/ip.h>
+#include <rtems/freebsd/netinet/ip_var.h>
+#include <rtems/freebsd/netinet/if_ether.h>
+#include <rtems/freebsd/machine/in_cksum.h>
+#endif
+
+#ifdef INET6
+#include <rtems/freebsd/netinet/icmp6.h>
+#include <rtems/freebsd/netinet/ip6.h>
+#include <rtems/freebsd/netinet6/ip6protosw.h>
+#include <rtems/freebsd/netinet6/ip6_var.h>
+#include <rtems/freebsd/netinet6/scope6_var.h>
+#include <rtems/freebsd/netinet6/nd6.h>
+#endif
+
+#include <rtems/freebsd/crypto/sha1.h>
+#include <rtems/freebsd/netinet/ip_carp.h>
+
+#define CARP_IFNAME "carp"
+static MALLOC_DEFINE(M_CARP, "CARP", "CARP interfaces");
+SYSCTL_DECL(_net_inet_carp);
+
+struct carp_softc {
+ struct ifnet *sc_ifp; /* Interface clue */
+ struct ifnet *sc_carpdev; /* Pointer to parent interface */
+ struct in_ifaddr *sc_ia; /* primary iface address */
+ struct ip_moptions sc_imo;
+#ifdef INET6
+ struct in6_ifaddr *sc_ia6; /* primary iface address v6 */
+ struct ip6_moptions sc_im6o;
+#endif /* INET6 */
+ TAILQ_ENTRY(carp_softc) sc_list;
+
+ enum { INIT = 0, BACKUP, MASTER } sc_state;
+
+ int sc_flags_backup;
+ int sc_suppress;
+
+ int sc_sendad_errors;
+#define CARP_SENDAD_MAX_ERRORS 3
+ int sc_sendad_success;
+#define CARP_SENDAD_MIN_SUCCESS 3
+
+ int sc_vhid;
+ int sc_advskew;
+ int sc_naddrs;
+ int sc_naddrs6;
+ int sc_advbase; /* seconds */
+ int sc_init_counter;
+ u_int64_t sc_counter;
+
+ /* authentication */
+#define CARP_HMAC_PAD 64
+ unsigned char sc_key[CARP_KEY_LEN];
+ unsigned char sc_pad[CARP_HMAC_PAD];
+ SHA1_CTX sc_sha1;
+
+ struct callout sc_ad_tmo; /* advertisement timeout */
+ struct callout sc_md_tmo; /* master down timeout */
+ struct callout sc_md6_tmo; /* master down timeout */
+
+ LIST_ENTRY(carp_softc) sc_next; /* Interface clue */
+};
+#define SC2IFP(sc) ((sc)->sc_ifp)
+
+int carp_suppress_preempt = 0;
+int carp_opts[CARPCTL_MAXID] = { 0, 1, 0, 1, 0, 0 }; /* XXX for now */
+SYSCTL_NODE(_net_inet, IPPROTO_CARP, carp, CTLFLAG_RW, 0, "CARP");
+SYSCTL_INT(_net_inet_carp, CARPCTL_ALLOW, allow, CTLFLAG_RW,
+ &carp_opts[CARPCTL_ALLOW], 0, "Accept incoming CARP packets");
+SYSCTL_INT(_net_inet_carp, CARPCTL_PREEMPT, preempt, CTLFLAG_RW,
+ &carp_opts[CARPCTL_PREEMPT], 0, "high-priority backup preemption mode");
+SYSCTL_INT(_net_inet_carp, CARPCTL_LOG, log, CTLFLAG_RW,
+ &carp_opts[CARPCTL_LOG], 0, "log bad carp packets");
+SYSCTL_INT(_net_inet_carp, CARPCTL_ARPBALANCE, arpbalance, CTLFLAG_RW,
+ &carp_opts[CARPCTL_ARPBALANCE], 0, "balance arp responses");
+SYSCTL_INT(_net_inet_carp, OID_AUTO, suppress_preempt, CTLFLAG_RD,
+ &carp_suppress_preempt, 0, "Preemption is suppressed");
+
+struct carpstats carpstats;
+SYSCTL_STRUCT(_net_inet_carp, CARPCTL_STATS, stats, CTLFLAG_RW,
+ &carpstats, carpstats,
+ "CARP statistics (struct carpstats, netinet/ip_carp.h)");
+
+struct carp_if {
+ TAILQ_HEAD(, carp_softc) vhif_vrs;
+ int vhif_nvrs;
+
+ struct ifnet *vhif_ifp;
+ struct mtx vhif_mtx;
+};
+
+#define CARP_INET 0
+#define CARP_INET6 1
+static int proto_reg[] = {-1, -1};
+
+/* Get carp_if from softc. Valid after carp_set_addr{,6}. */
+#define SC2CIF(sc) ((struct carp_if *)(sc)->sc_carpdev->if_carp)
+
+/* lock per carp_if queue */
+#define CARP_LOCK_INIT(cif) mtx_init(&(cif)->vhif_mtx, "carp_if", \
+ NULL, MTX_DEF)
+#define CARP_LOCK_DESTROY(cif) mtx_destroy(&(cif)->vhif_mtx)
+#define CARP_LOCK_ASSERT(cif) mtx_assert(&(cif)->vhif_mtx, MA_OWNED)
+#define CARP_LOCK(cif) mtx_lock(&(cif)->vhif_mtx)
+#define CARP_UNLOCK(cif) mtx_unlock(&(cif)->vhif_mtx)
+
+#define CARP_SCLOCK(sc) mtx_lock(&SC2CIF(sc)->vhif_mtx)
+#define CARP_SCUNLOCK(sc) mtx_unlock(&SC2CIF(sc)->vhif_mtx)
+#define CARP_SCLOCK_ASSERT(sc) mtx_assert(&SC2CIF(sc)->vhif_mtx, MA_OWNED)
+
+#define CARP_LOG(...) do { \
+ if (carp_opts[CARPCTL_LOG] > 0) \
+ log(LOG_INFO, __VA_ARGS__); \
+} while (0)
+
+#define CARP_DEBUG(...) do { \
+ if (carp_opts[CARPCTL_LOG] > 1) \
+ log(LOG_DEBUG, __VA_ARGS__); \
+} while (0)
+
+static void carp_hmac_prepare(struct carp_softc *);
+static void carp_hmac_generate(struct carp_softc *, u_int32_t *,
+ unsigned char *);
+static int carp_hmac_verify(struct carp_softc *, u_int32_t *,
+ unsigned char *);
+static void carp_setroute(struct carp_softc *, int);
+static void carp_input_c(struct mbuf *, struct carp_header *, sa_family_t);
+static int carp_clone_create(struct if_clone *, int, caddr_t);
+static void carp_clone_destroy(struct ifnet *);
+static void carpdetach(struct carp_softc *, int);
+static int carp_prepare_ad(struct mbuf *, struct carp_softc *,
+ struct carp_header *);
+static void carp_send_ad_all(void);
+static void carp_send_ad(void *);
+static void carp_send_ad_locked(struct carp_softc *);
+static void carp_send_arp(struct carp_softc *);
+static void carp_master_down(void *);
+static void carp_master_down_locked(struct carp_softc *);
+static int carp_ioctl(struct ifnet *, u_long, caddr_t);
+static int carp_looutput(struct ifnet *, struct mbuf *, struct sockaddr *,
+ struct route *);
+static void carp_start(struct ifnet *);
+static void carp_setrun(struct carp_softc *, sa_family_t);
+static void carp_set_state(struct carp_softc *, int);
+static int carp_addrcount(struct carp_if *, struct in_ifaddr *, int);
+enum { CARP_COUNT_MASTER, CARP_COUNT_RUNNING };
+
+static void carp_multicast_cleanup(struct carp_softc *);
+static int carp_set_addr(struct carp_softc *, struct sockaddr_in *);
+static int carp_del_addr(struct carp_softc *, struct sockaddr_in *);
+static void carp_carpdev_state_locked(struct carp_if *);
+static void carp_sc_state_locked(struct carp_softc *);
+#ifdef INET6
+static void carp_send_na(struct carp_softc *);
+static int carp_set_addr6(struct carp_softc *, struct sockaddr_in6 *);
+static int carp_del_addr6(struct carp_softc *, struct sockaddr_in6 *);
+static void carp_multicast6_cleanup(struct carp_softc *);
+#endif
+
+static LIST_HEAD(, carp_softc) carpif_list;
+static struct mtx carp_mtx;
+IFC_SIMPLE_DECLARE(carp, 0);
+
+static eventhandler_tag if_detach_event_tag;
+
+static __inline u_int16_t
+carp_cksum(struct mbuf *m, int len)
+{
+ return (in_cksum(m, len));
+}
+
+static void
+carp_hmac_prepare(struct carp_softc *sc)
+{
+ u_int8_t version = CARP_VERSION, type = CARP_ADVERTISEMENT;
+ u_int8_t vhid = sc->sc_vhid & 0xff;
+ struct ifaddr *ifa;
+ int i, found;
+#ifdef INET
+ struct in_addr last, cur, in;
+#endif
+#ifdef INET6
+ struct in6_addr last6, cur6, in6;
+#endif
+
+ if (sc->sc_carpdev)
+ CARP_SCLOCK(sc);
+
+ /* XXX: possible race here */
+
+ /* compute ipad from key */
+ bzero(sc->sc_pad, sizeof(sc->sc_pad));
+ bcopy(sc->sc_key, sc->sc_pad, sizeof(sc->sc_key));
+ for (i = 0; i < sizeof(sc->sc_pad); i++)
+ sc->sc_pad[i] ^= 0x36;
+
+ /* precompute first part of inner hash */
+ SHA1Init(&sc->sc_sha1);
+ SHA1Update(&sc->sc_sha1, sc->sc_pad, sizeof(sc->sc_pad));
+ SHA1Update(&sc->sc_sha1, (void *)&version, sizeof(version));
+ SHA1Update(&sc->sc_sha1, (void *)&type, sizeof(type));
+ SHA1Update(&sc->sc_sha1, (void *)&vhid, sizeof(vhid));
+#ifdef INET
+ cur.s_addr = 0;
+ do {
+ found = 0;
+ last = cur;
+ cur.s_addr = 0xffffffff;
+ IF_ADDR_LOCK(SC2IFP(sc));
+ TAILQ_FOREACH(ifa, &SC2IFP(sc)->if_addrlist, ifa_list) {
+ in.s_addr = ifatoia(ifa)->ia_addr.sin_addr.s_addr;
+ if (ifa->ifa_addr->sa_family == AF_INET &&
+ ntohl(in.s_addr) > ntohl(last.s_addr) &&
+ ntohl(in.s_addr) < ntohl(cur.s_addr)) {
+ cur.s_addr = in.s_addr;
+ found++;
+ }
+ }
+ IF_ADDR_UNLOCK(SC2IFP(sc));
+ if (found)
+ SHA1Update(&sc->sc_sha1, (void *)&cur, sizeof(cur));
+ } while (found);
+#endif /* INET */
+#ifdef INET6
+ memset(&cur6, 0, sizeof(cur6));
+ do {
+ found = 0;
+ last6 = cur6;
+ memset(&cur6, 0xff, sizeof(cur6));
+ IF_ADDR_LOCK(SC2IFP(sc));
+ TAILQ_FOREACH(ifa, &SC2IFP(sc)->if_addrlist, ifa_list) {
+ in6 = ifatoia6(ifa)->ia_addr.sin6_addr;
+ if (IN6_IS_SCOPE_EMBED(&in6))
+ in6.s6_addr16[1] = 0;
+ if (ifa->ifa_addr->sa_family == AF_INET6 &&
+ memcmp(&in6, &last6, sizeof(in6)) > 0 &&
+ memcmp(&in6, &cur6, sizeof(in6)) < 0) {
+ cur6 = in6;
+ found++;
+ }
+ }
+ IF_ADDR_UNLOCK(SC2IFP(sc));
+ if (found)
+ SHA1Update(&sc->sc_sha1, (void *)&cur6, sizeof(cur6));
+ } while (found);
+#endif /* INET6 */
+
+ /* convert ipad to opad */
+ for (i = 0; i < sizeof(sc->sc_pad); i++)
+ sc->sc_pad[i] ^= 0x36 ^ 0x5c;
+
+ if (sc->sc_carpdev)
+ CARP_SCUNLOCK(sc);
+}
+
+static void
+carp_hmac_generate(struct carp_softc *sc, u_int32_t counter[2],
+ unsigned char md[20])
+{
+ SHA1_CTX sha1ctx;
+
+ /* fetch first half of inner hash */
+ bcopy(&sc->sc_sha1, &sha1ctx, sizeof(sha1ctx));
+
+ SHA1Update(&sha1ctx, (void *)counter, sizeof(sc->sc_counter));
+ SHA1Final(md, &sha1ctx);
+
+ /* outer hash */
+ SHA1Init(&sha1ctx);
+ SHA1Update(&sha1ctx, sc->sc_pad, sizeof(sc->sc_pad));
+ SHA1Update(&sha1ctx, md, 20);
+ SHA1Final(md, &sha1ctx);
+}
+
+static int
+carp_hmac_verify(struct carp_softc *sc, u_int32_t counter[2],
+ unsigned char md[20])
+{
+ unsigned char md2[20];
+
+ CARP_SCLOCK_ASSERT(sc);
+
+ carp_hmac_generate(sc, counter, md2);
+
+ return (bcmp(md, md2, sizeof(md2)));
+}
+
+static void
+carp_setroute(struct carp_softc *sc, int cmd)
+{
+ struct ifaddr *ifa;
+ int s;
+
+ if (sc->sc_carpdev)
+ CARP_SCLOCK_ASSERT(sc);
+
+ s = splnet();
+ TAILQ_FOREACH(ifa, &SC2IFP(sc)->if_addrlist, ifa_list) {
+ if (ifa->ifa_addr->sa_family == AF_INET &&
+ sc->sc_carpdev != NULL) {
+ int count = carp_addrcount(
+ (struct carp_if *)sc->sc_carpdev->if_carp,
+ ifatoia(ifa), CARP_COUNT_MASTER);
+
+ if ((cmd == RTM_ADD && count == 1) ||
+ (cmd == RTM_DELETE && count == 0))
+ rtinit(ifa, cmd, RTF_UP | RTF_HOST);
+ }
+ }
+ splx(s);
+}
+
+static int
+carp_clone_create(struct if_clone *ifc, int unit, caddr_t params)
+{
+
+ struct carp_softc *sc;
+ struct ifnet *ifp;
+
+ sc = malloc(sizeof(*sc), M_CARP, M_WAITOK|M_ZERO);
+ ifp = SC2IFP(sc) = if_alloc(IFT_ETHER);
+ if (ifp == NULL) {
+ free(sc, M_CARP);
+ return (ENOSPC);
+ }
+
+ sc->sc_flags_backup = 0;
+ sc->sc_suppress = 0;
+ sc->sc_advbase = CARP_DFLTINTV;
+ sc->sc_vhid = -1; /* required setting */
+ sc->sc_advskew = 0;
+ sc->sc_init_counter = 1;
+ sc->sc_naddrs = sc->sc_naddrs6 = 0; /* M_ZERO? */
+ sc->sc_imo.imo_membership = (struct in_multi **)malloc(
+ (sizeof(struct in_multi *) * IP_MIN_MEMBERSHIPS), M_CARP,
+ M_WAITOK);
+ sc->sc_imo.imo_mfilters = NULL;
+ sc->sc_imo.imo_max_memberships = IP_MIN_MEMBERSHIPS;
+ sc->sc_imo.imo_multicast_vif = -1;
+#ifdef INET6
+ sc->sc_im6o.im6o_membership = (struct in6_multi **)malloc(
+ (sizeof(struct in6_multi *) * IPV6_MIN_MEMBERSHIPS), M_CARP,
+ M_WAITOK);
+ sc->sc_im6o.im6o_mfilters = NULL;
+ sc->sc_im6o.im6o_max_memberships = IPV6_MIN_MEMBERSHIPS;
+ sc->sc_im6o.im6o_multicast_hlim = CARP_DFLTTL;
+#endif
+
+ callout_init(&sc->sc_ad_tmo, CALLOUT_MPSAFE);
+ callout_init(&sc->sc_md_tmo, CALLOUT_MPSAFE);
+ callout_init(&sc->sc_md6_tmo, CALLOUT_MPSAFE);
+
+ ifp->if_softc = sc;
+ if_initname(ifp, CARP_IFNAME, unit);
+ ifp->if_mtu = ETHERMTU;
+ ifp->if_flags = IFF_LOOPBACK;
+ ifp->if_ioctl = carp_ioctl;
+ ifp->if_output = carp_looutput;
+ ifp->if_start = carp_start;
+ ifp->if_type = IFT_CARP;
+ ifp->if_snd.ifq_maxlen = ifqmaxlen;
+ ifp->if_hdrlen = 0;
+ if_attach(ifp);
+ bpfattach(SC2IFP(sc), DLT_NULL, sizeof(u_int32_t));
+ mtx_lock(&carp_mtx);
+ LIST_INSERT_HEAD(&carpif_list, sc, sc_next);
+ mtx_unlock(&carp_mtx);
+ return (0);
+}
+
+static void
+carp_clone_destroy(struct ifnet *ifp)
+{
+ struct carp_softc *sc = ifp->if_softc;
+
+ if (sc->sc_carpdev)
+ CARP_SCLOCK(sc);
+ carpdetach(sc, 1); /* Returns unlocked. */
+
+ mtx_lock(&carp_mtx);
+ LIST_REMOVE(sc, sc_next);
+ mtx_unlock(&carp_mtx);
+ bpfdetach(ifp);
+ if_detach(ifp);
+ if_free_type(ifp, IFT_ETHER);
+ free(sc->sc_imo.imo_membership, M_CARP);
+#ifdef INET6
+ free(sc->sc_im6o.im6o_membership, M_CARP);
+#endif
+ free(sc, M_CARP);
+}
+
+/*
+ * This function can be called on CARP interface destroy path,
+ * and in case of the removal of the underlying interface as
+ * well. We differentiate these two cases. In the latter case
+ * we do not cleanup our multicast memberships, since they
+ * are already freed. Also, in the latter case we do not
+ * release the lock on return, because the function will be
+ * called once more, for another CARP instance on the same
+ * interface.
+ */
+static void
+carpdetach(struct carp_softc *sc, int unlock)
+{
+ struct carp_if *cif;
+
+ callout_stop(&sc->sc_ad_tmo);
+ callout_stop(&sc->sc_md_tmo);
+ callout_stop(&sc->sc_md6_tmo);
+
+ if (sc->sc_suppress)
+ carp_suppress_preempt--;
+ sc->sc_suppress = 0;
+
+ if (sc->sc_sendad_errors >= CARP_SENDAD_MAX_ERRORS)
+ carp_suppress_preempt--;
+ sc->sc_sendad_errors = 0;
+
+ carp_set_state(sc, INIT);
+ SC2IFP(sc)->if_flags &= ~IFF_UP;
+ carp_setrun(sc, 0);
+ if (unlock)
+ carp_multicast_cleanup(sc);
+#ifdef INET6
+ carp_multicast6_cleanup(sc);
+#endif
+
+ if (sc->sc_carpdev != NULL) {
+ cif = (struct carp_if *)sc->sc_carpdev->if_carp;
+ CARP_LOCK_ASSERT(cif);
+ TAILQ_REMOVE(&cif->vhif_vrs, sc, sc_list);
+ if (!--cif->vhif_nvrs) {
+ ifpromisc(sc->sc_carpdev, 0);
+ sc->sc_carpdev->if_carp = NULL;
+ CARP_LOCK_DESTROY(cif);
+ free(cif, M_CARP);
+ } else if (unlock)
+ CARP_UNLOCK(cif);
+ sc->sc_carpdev = NULL;
+ }
+}
+
+/* Detach an interface from the carp. */
+static void
+carp_ifdetach(void *arg __unused, struct ifnet *ifp)
+{
+ struct carp_if *cif = (struct carp_if *)ifp->if_carp;
+ struct carp_softc *sc, *nextsc;
+
+ if (cif == NULL)
+ return;
+
+ /*
+ * XXX: At the end of for() cycle the lock will be destroyed.
+ */
+ CARP_LOCK(cif);
+ for (sc = TAILQ_FIRST(&cif->vhif_vrs); sc; sc = nextsc) {
+ nextsc = TAILQ_NEXT(sc, sc_list);
+ carpdetach(sc, 0);
+ }
+}
+
+/*
+ * process input packet.
+ * we have rearranged checks order compared to the rfc,
+ * but it seems more efficient this way or not possible otherwise.
+ */
+void
+carp_input(struct mbuf *m, int hlen)
+{
+ struct ip *ip = mtod(m, struct ip *);
+ struct carp_header *ch;
+ int iplen, len;
+
+ CARPSTATS_INC(carps_ipackets);
+
+ if (!carp_opts[CARPCTL_ALLOW]) {
+ m_freem(m);
+ return;
+ }
+
+ /* check if received on a valid carp interface */
+ if (m->m_pkthdr.rcvif->if_carp == NULL) {
+ CARPSTATS_INC(carps_badif);
+ CARP_DEBUG("carp_input: packet received on non-carp "
+ "interface: %s\n",
+ m->m_pkthdr.rcvif->if_xname);
+ m_freem(m);
+ return;
+ }
+
+ /* verify that the IP TTL is 255. */
+ if (ip->ip_ttl != CARP_DFLTTL) {
+ CARPSTATS_INC(carps_badttl);
+ CARP_DEBUG("carp_input: received ttl %d != 255 on %s\n",
+ ip->ip_ttl,
+ m->m_pkthdr.rcvif->if_xname);
+ m_freem(m);
+ return;
+ }
+
+ iplen = ip->ip_hl << 2;
+
+ if (m->m_pkthdr.len < iplen + sizeof(*ch)) {
+ CARPSTATS_INC(carps_badlen);
+ CARP_DEBUG("carp_input: received len %zd < "
+ "sizeof(struct carp_header) on %s\n",
+ m->m_len - sizeof(struct ip),
+ m->m_pkthdr.rcvif->if_xname);
+ m_freem(m);
+ return;
+ }
+
+ if (iplen + sizeof(*ch) < m->m_len) {
+ if ((m = m_pullup(m, iplen + sizeof(*ch))) == NULL) {
+ CARPSTATS_INC(carps_hdrops);
+ CARP_DEBUG("carp_input: pullup failed\n");
+ return;
+ }
+ ip = mtod(m, struct ip *);
+ }
+ ch = (struct carp_header *)((char *)ip + iplen);
+
+ /*
+ * verify that the received packet length is
+ * equal to the CARP header
+ */
+ len = iplen + sizeof(*ch);
+ if (len > m->m_pkthdr.len) {
+ CARPSTATS_INC(carps_badlen);
+ CARP_DEBUG("carp_input: packet too short %d on %s\n",
+ m->m_pkthdr.len,
+ m->m_pkthdr.rcvif->if_xname);
+ m_freem(m);
+ return;
+ }
+
+ if ((m = m_pullup(m, len)) == NULL) {
+ CARPSTATS_INC(carps_hdrops);
+ return;
+ }
+ ip = mtod(m, struct ip *);
+ ch = (struct carp_header *)((char *)ip + iplen);
+
+ /* verify the CARP checksum */
+ m->m_data += iplen;
+ if (carp_cksum(m, len - iplen)) {
+ CARPSTATS_INC(carps_badsum);
+ CARP_DEBUG("carp_input: checksum failed on %s\n",
+ m->m_pkthdr.rcvif->if_xname);
+ m_freem(m);
+ return;
+ }
+ m->m_data -= iplen;
+
+ carp_input_c(m, ch, AF_INET);
+}
+
+#ifdef INET6
+int
+carp6_input(struct mbuf **mp, int *offp, int proto)
+{
+ struct mbuf *m = *mp;
+ struct ip6_hdr *ip6 = mtod(m, struct ip6_hdr *);
+ struct carp_header *ch;
+ u_int len;
+
+ CARPSTATS_INC(carps_ipackets6);
+
+ if (!carp_opts[CARPCTL_ALLOW]) {
+ m_freem(m);
+ return (IPPROTO_DONE);
+ }
+
+ /* check if received on a valid carp interface */
+ if (m->m_pkthdr.rcvif->if_carp == NULL) {
+ CARPSTATS_INC(carps_badif);
+ CARP_DEBUG("carp6_input: packet received on non-carp "
+ "interface: %s\n",
+ m->m_pkthdr.rcvif->if_xname);
+ m_freem(m);
+ return (IPPROTO_DONE);
+ }
+
+ /* verify that the IP TTL is 255 */
+ if (ip6->ip6_hlim != CARP_DFLTTL) {
+ CARPSTATS_INC(carps_badttl);
+ CARP_DEBUG("carp6_input: received ttl %d != 255 on %s\n",
+ ip6->ip6_hlim,
+ m->m_pkthdr.rcvif->if_xname);
+ m_freem(m);
+ return (IPPROTO_DONE);
+ }
+
+ /* verify that we have a complete carp packet */
+ len = m->m_len;
+ IP6_EXTHDR_GET(ch, struct carp_header *, m, *offp, sizeof(*ch));
+ if (ch == NULL) {
+ CARPSTATS_INC(carps_badlen);
+ CARP_DEBUG("carp6_input: packet size %u too small\n", len);
+ return (IPPROTO_DONE);
+ }
+
+
+ /* verify the CARP checksum */
+ m->m_data += *offp;
+ if (carp_cksum(m, sizeof(*ch))) {
+ CARPSTATS_INC(carps_badsum);
+ CARP_DEBUG("carp6_input: checksum failed, on %s\n",
+ m->m_pkthdr.rcvif->if_xname);
+ m_freem(m);
+ return (IPPROTO_DONE);
+ }
+ m->m_data -= *offp;
+
+ carp_input_c(m, ch, AF_INET6);
+ return (IPPROTO_DONE);
+}
+#endif /* INET6 */
+
+static void
+carp_input_c(struct mbuf *m, struct carp_header *ch, sa_family_t af)
+{
+ struct ifnet *ifp = m->m_pkthdr.rcvif;
+ struct carp_softc *sc;
+ u_int64_t tmp_counter;
+ struct timeval sc_tv, ch_tv;
+
+ /* verify that the VHID is valid on the receiving interface */
+ CARP_LOCK(ifp->if_carp);
+ TAILQ_FOREACH(sc, &((struct carp_if *)ifp->if_carp)->vhif_vrs, sc_list)
+ if (sc->sc_vhid == ch->carp_vhid)
+ break;
+
+ if (!sc || !((SC2IFP(sc)->if_flags & IFF_UP) &&
+ (SC2IFP(sc)->if_drv_flags & IFF_DRV_RUNNING))) {
+ CARPSTATS_INC(carps_badvhid);
+ CARP_UNLOCK(ifp->if_carp);
+ m_freem(m);
+ return;
+ }
+
+ getmicrotime(&SC2IFP(sc)->if_lastchange);
+ SC2IFP(sc)->if_ipackets++;
+ SC2IFP(sc)->if_ibytes += m->m_pkthdr.len;
+
+ if (bpf_peers_present(SC2IFP(sc)->if_bpf)) {
+ struct ip *ip = mtod(m, struct ip *);
+ uint32_t af1 = af;
+
+ /* BPF wants net byte order */
+ ip->ip_len = htons(ip->ip_len + (ip->ip_hl << 2));
+ ip->ip_off = htons(ip->ip_off);
+ bpf_mtap2(SC2IFP(sc)->if_bpf, &af1, sizeof(af1), m);
+ }
+
+ /* verify the CARP version. */
+ if (ch->carp_version != CARP_VERSION) {
+ CARPSTATS_INC(carps_badver);
+ SC2IFP(sc)->if_ierrors++;
+ CARP_UNLOCK(ifp->if_carp);
+ CARP_DEBUG("%s; invalid version %d\n",
+ SC2IFP(sc)->if_xname,
+ ch->carp_version);
+ m_freem(m);
+ return;
+ }
+
+ /* verify the hash */
+ if (carp_hmac_verify(sc, ch->carp_counter, ch->carp_md)) {
+ CARPSTATS_INC(carps_badauth);
+ SC2IFP(sc)->if_ierrors++;
+ CARP_UNLOCK(ifp->if_carp);
+ CARP_DEBUG("%s: incorrect hash\n", SC2IFP(sc)->if_xname);
+ m_freem(m);
+ return;
+ }
+
+ tmp_counter = ntohl(ch->carp_counter[0]);
+ tmp_counter = tmp_counter<<32;
+ tmp_counter += ntohl(ch->carp_counter[1]);
+
+ /* XXX Replay protection goes here */
+
+ sc->sc_init_counter = 0;
+ sc->sc_counter = tmp_counter;
+
+ sc_tv.tv_sec = sc->sc_advbase;
+ if (carp_suppress_preempt && sc->sc_advskew < 240)
+ sc_tv.tv_usec = 240 * 1000000 / 256;
+ else
+ sc_tv.tv_usec = sc->sc_advskew * 1000000 / 256;
+ ch_tv.tv_sec = ch->carp_advbase;
+ ch_tv.tv_usec = ch->carp_advskew * 1000000 / 256;
+
+ switch (sc->sc_state) {
+ case INIT:
+ break;
+ case MASTER:
+ /*
+ * If we receive an advertisement from a master who's going to
+ * be more frequent than us, go into BACKUP state.
+ */
+ if (timevalcmp(&sc_tv, &ch_tv, >) ||
+ timevalcmp(&sc_tv, &ch_tv, ==)) {
+ callout_stop(&sc->sc_ad_tmo);
+ CARP_LOG("%s: MASTER -> BACKUP "
+ "(more frequent advertisement received)\n",
+ SC2IFP(sc)->if_xname);
+ carp_set_state(sc, BACKUP);
+ carp_setrun(sc, 0);
+ carp_setroute(sc, RTM_DELETE);
+ }
+ break;
+ case BACKUP:
+ /*
+ * If we're pre-empting masters who advertise slower than us,
+ * and this one claims to be slower, treat him as down.
+ */
+ if (carp_opts[CARPCTL_PREEMPT] &&
+ timevalcmp(&sc_tv, &ch_tv, <)) {
+ CARP_LOG("%s: BACKUP -> MASTER "
+ "(preempting a slower master)\n",
+ SC2IFP(sc)->if_xname);
+ carp_master_down_locked(sc);
+ break;
+ }
+
+ /*
+ * If the master is going to advertise at such a low frequency
+ * that he's guaranteed to time out, we'd might as well just
+ * treat him as timed out now.
+ */
+ sc_tv.tv_sec = sc->sc_advbase * 3;
+ if (timevalcmp(&sc_tv, &ch_tv, <)) {
+ CARP_LOG("%s: BACKUP -> MASTER "
+ "(master timed out)\n",
+ SC2IFP(sc)->if_xname);
+ carp_master_down_locked(sc);
+ break;
+ }
+
+ /*
+ * Otherwise, we reset the counter and wait for the next
+ * advertisement.
+ */
+ carp_setrun(sc, af);
+ break;
+ }
+
+ CARP_UNLOCK(ifp->if_carp);
+
+ m_freem(m);
+ return;
+}
+
+static int
+carp_prepare_ad(struct mbuf *m, struct carp_softc *sc, struct carp_header *ch)
+{
+ struct m_tag *mtag;
+ struct ifnet *ifp = SC2IFP(sc);
+
+ if (sc->sc_init_counter) {
+ /* this could also be seconds since unix epoch */
+ sc->sc_counter = arc4random();
+ sc->sc_counter = sc->sc_counter << 32;
+ sc->sc_counter += arc4random();
+ } else
+ sc->sc_counter++;
+
+ ch->carp_counter[0] = htonl((sc->sc_counter>>32)&0xffffffff);
+ ch->carp_counter[1] = htonl(sc->sc_counter&0xffffffff);
+
+ carp_hmac_generate(sc, ch->carp_counter, ch->carp_md);
+
+ /* Tag packet for carp_output */
+ mtag = m_tag_get(PACKET_TAG_CARP, sizeof(struct ifnet *), M_NOWAIT);
+ if (mtag == NULL) {
+ m_freem(m);
+ SC2IFP(sc)->if_oerrors++;
+ return (ENOMEM);
+ }
+ bcopy(&ifp, (caddr_t)(mtag + 1), sizeof(struct ifnet *));
+ m_tag_prepend(m, mtag);
+
+ return (0);
+}
+
+static void
+carp_send_ad_all(void)
+{
+ struct carp_softc *sc;
+
+ mtx_lock(&carp_mtx);
+ LIST_FOREACH(sc, &carpif_list, sc_next) {
+ if (sc->sc_carpdev == NULL)
+ continue;
+ CARP_SCLOCK(sc);
+ if ((SC2IFP(sc)->if_flags & IFF_UP) &&
+ (SC2IFP(sc)->if_drv_flags & IFF_DRV_RUNNING) &&
+ sc->sc_state == MASTER)
+ carp_send_ad_locked(sc);
+ CARP_SCUNLOCK(sc);
+ }
+ mtx_unlock(&carp_mtx);
+}
+
+static void
+carp_send_ad(void *v)
+{
+ struct carp_softc *sc = v;
+
+ CARP_SCLOCK(sc);
+ carp_send_ad_locked(sc);
+ CARP_SCUNLOCK(sc);
+}
+
+static void
+carp_send_ad_locked(struct carp_softc *sc)
+{
+ struct carp_header ch;
+ struct timeval tv;
+ struct carp_header *ch_ptr;
+ struct mbuf *m;
+ int len, advbase, advskew;
+
+ CARP_SCLOCK_ASSERT(sc);
+
+ /* bow out if we've lost our UPness or RUNNINGuiness */
+ if (!((SC2IFP(sc)->if_flags & IFF_UP) &&
+ (SC2IFP(sc)->if_drv_flags & IFF_DRV_RUNNING))) {
+ advbase = 255;
+ advskew = 255;
+ } else {
+ advbase = sc->sc_advbase;
+ if (!carp_suppress_preempt || sc->sc_advskew > 240)
+ advskew = sc->sc_advskew;
+ else
+ advskew = 240;
+ tv.tv_sec = advbase;
+ tv.tv_usec = advskew * 1000000 / 256;
+ }
+
+ ch.carp_version = CARP_VERSION;
+ ch.carp_type = CARP_ADVERTISEMENT;
+ ch.carp_vhid = sc->sc_vhid;
+ ch.carp_advbase = advbase;
+ ch.carp_advskew = advskew;
+ ch.carp_authlen = 7; /* XXX DEFINE */
+ ch.carp_pad1 = 0; /* must be zero */
+ ch.carp_cksum = 0;
+
+#ifdef INET
+ if (sc->sc_ia) {
+ struct ip *ip;
+
+ MGETHDR(m, M_DONTWAIT, MT_HEADER);
+ if (m == NULL) {
+ SC2IFP(sc)->if_oerrors++;
+ CARPSTATS_INC(carps_onomem);
+ /* XXX maybe less ? */
+ if (advbase != 255 || advskew != 255)
+ callout_reset(&sc->sc_ad_tmo, tvtohz(&tv),
+ carp_send_ad, sc);
+ return;
+ }
+ len = sizeof(*ip) + sizeof(ch);
+ m->m_pkthdr.len = len;
+ m->m_pkthdr.rcvif = NULL;
+ m->m_len = len;
+ MH_ALIGN(m, m->m_len);
+ m->m_flags |= M_MCAST;
+ ip = mtod(m, struct ip *);
+ ip->ip_v = IPVERSION;
+ ip->ip_hl = sizeof(*ip) >> 2;
+ ip->ip_tos = IPTOS_LOWDELAY;
+ ip->ip_len = len;
+ ip->ip_id = ip_newid();
+ ip->ip_off = IP_DF;
+ ip->ip_ttl = CARP_DFLTTL;
+ ip->ip_p = IPPROTO_CARP;
+ ip->ip_sum = 0;
+ ip->ip_src.s_addr = sc->sc_ia->ia_addr.sin_addr.s_addr;
+ ip->ip_dst.s_addr = htonl(INADDR_CARP_GROUP);
+
+ ch_ptr = (struct carp_header *)(&ip[1]);
+ bcopy(&ch, ch_ptr, sizeof(ch));
+ if (carp_prepare_ad(m, sc, ch_ptr))
+ return;
+
+ m->m_data += sizeof(*ip);
+ ch_ptr->carp_cksum = carp_cksum(m, len - sizeof(*ip));
+ m->m_data -= sizeof(*ip);
+
+ getmicrotime(&SC2IFP(sc)->if_lastchange);
+ SC2IFP(sc)->if_opackets++;
+ SC2IFP(sc)->if_obytes += len;
+ CARPSTATS_INC(carps_opackets);
+
+ if (ip_output(m, NULL, NULL, IP_RAWOUTPUT, &sc->sc_imo, NULL)) {
+ SC2IFP(sc)->if_oerrors++;
+ if (sc->sc_sendad_errors < INT_MAX)
+ sc->sc_sendad_errors++;
+ if (sc->sc_sendad_errors == CARP_SENDAD_MAX_ERRORS) {
+ carp_suppress_preempt++;
+ if (carp_suppress_preempt == 1) {
+ CARP_SCUNLOCK(sc);
+ carp_send_ad_all();
+ CARP_SCLOCK(sc);
+ }
+ }
+ sc->sc_sendad_success = 0;
+ } else {
+ if (sc->sc_sendad_errors >= CARP_SENDAD_MAX_ERRORS) {
+ if (++sc->sc_sendad_success >=
+ CARP_SENDAD_MIN_SUCCESS) {
+ carp_suppress_preempt--;
+ sc->sc_sendad_errors = 0;
+ }
+ } else
+ sc->sc_sendad_errors = 0;
+ }
+ }
+#endif /* INET */
+#ifdef INET6
+ if (sc->sc_ia6) {
+ struct ip6_hdr *ip6;
+
+ MGETHDR(m, M_DONTWAIT, MT_HEADER);
+ if (m == NULL) {
+ SC2IFP(sc)->if_oerrors++;
+ CARPSTATS_INC(carps_onomem);
+ /* XXX maybe less ? */
+ if (advbase != 255 || advskew != 255)
+ callout_reset(&sc->sc_ad_tmo, tvtohz(&tv),
+ carp_send_ad, sc);
+ return;
+ }
+ len = sizeof(*ip6) + sizeof(ch);
+ m->m_pkthdr.len = len;
+ m->m_pkthdr.rcvif = NULL;
+ m->m_len = len;
+ MH_ALIGN(m, m->m_len);
+ m->m_flags |= M_MCAST;
+ ip6 = mtod(m, struct ip6_hdr *);
+ bzero(ip6, sizeof(*ip6));
+ ip6->ip6_vfc |= IPV6_VERSION;
+ ip6->ip6_hlim = CARP_DFLTTL;
+ ip6->ip6_nxt = IPPROTO_CARP;
+ bcopy(&sc->sc_ia6->ia_addr.sin6_addr, &ip6->ip6_src,
+ sizeof(struct in6_addr));
+ /* set the multicast destination */
+
+ ip6->ip6_dst.s6_addr16[0] = htons(0xff02);
+ ip6->ip6_dst.s6_addr8[15] = 0x12;
+ if (in6_setscope(&ip6->ip6_dst, sc->sc_carpdev, NULL) != 0) {
+ SC2IFP(sc)->if_oerrors++;
+ m_freem(m);
+ CARP_DEBUG("%s: in6_setscope failed\n", __func__);
+ return;
+ }
+
+ ch_ptr = (struct carp_header *)(&ip6[1]);
+ bcopy(&ch, ch_ptr, sizeof(ch));
+ if (carp_prepare_ad(m, sc, ch_ptr))
+ return;
+
+ m->m_data += sizeof(*ip6);
+ ch_ptr->carp_cksum = carp_cksum(m, len - sizeof(*ip6));
+ m->m_data -= sizeof(*ip6);
+
+ getmicrotime(&SC2IFP(sc)->if_lastchange);
+ SC2IFP(sc)->if_opackets++;
+ SC2IFP(sc)->if_obytes += len;
+ CARPSTATS_INC(carps_opackets6);
+
+ if (ip6_output(m, NULL, NULL, 0, &sc->sc_im6o, NULL, NULL)) {
+ SC2IFP(sc)->if_oerrors++;
+ if (sc->sc_sendad_errors < INT_MAX)
+ sc->sc_sendad_errors++;
+ if (sc->sc_sendad_errors == CARP_SENDAD_MAX_ERRORS) {
+ carp_suppress_preempt++;
+ if (carp_suppress_preempt == 1) {
+ CARP_SCUNLOCK(sc);
+ carp_send_ad_all();
+ CARP_SCLOCK(sc);
+ }
+ }
+ sc->sc_sendad_success = 0;
+ } else {
+ if (sc->sc_sendad_errors >= CARP_SENDAD_MAX_ERRORS) {
+ if (++sc->sc_sendad_success >=
+ CARP_SENDAD_MIN_SUCCESS) {
+ carp_suppress_preempt--;
+ sc->sc_sendad_errors = 0;
+ }
+ } else
+ sc->sc_sendad_errors = 0;
+ }
+ }
+#endif /* INET6 */
+
+ if (advbase != 255 || advskew != 255)
+ callout_reset(&sc->sc_ad_tmo, tvtohz(&tv),
+ carp_send_ad, sc);
+
+}
+
+/*
+ * Broadcast a gratuitous ARP request containing
+ * the virtual router MAC address for each IP address
+ * associated with the virtual router.
+ */
+static void
+carp_send_arp(struct carp_softc *sc)
+{
+ struct ifaddr *ifa;
+
+ TAILQ_FOREACH(ifa, &SC2IFP(sc)->if_addrlist, ifa_list) {
+
+ if (ifa->ifa_addr->sa_family != AF_INET)
+ continue;
+
+/* arprequest(sc->sc_carpdev, &in, &in, IF_LLADDR(sc->sc_ifp)); */
+ arp_ifinit2(sc->sc_carpdev, ifa, IF_LLADDR(sc->sc_ifp));
+
+ DELAY(1000); /* XXX */
+ }
+}
+
+#ifdef INET6
+static void
+carp_send_na(struct carp_softc *sc)
+{
+ struct ifaddr *ifa;
+ struct in6_addr *in6;
+ static struct in6_addr mcast = IN6ADDR_LINKLOCAL_ALLNODES_INIT;
+
+ TAILQ_FOREACH(ifa, &SC2IFP(sc)->if_addrlist, ifa_list) {
+
+ if (ifa->ifa_addr->sa_family != AF_INET6)
+ continue;
+
+ in6 = &ifatoia6(ifa)->ia_addr.sin6_addr;
+ nd6_na_output(sc->sc_carpdev, &mcast, in6,
+ ND_NA_FLAG_OVERRIDE, 1, NULL);
+ DELAY(1000); /* XXX */
+ }
+}
+#endif /* INET6 */
+
+static int
+carp_addrcount(struct carp_if *cif, struct in_ifaddr *ia, int type)
+{
+ struct carp_softc *vh;
+ struct ifaddr *ifa;
+ int count = 0;
+
+ CARP_LOCK_ASSERT(cif);
+
+ TAILQ_FOREACH(vh, &cif->vhif_vrs, sc_list) {
+ if ((type == CARP_COUNT_RUNNING &&
+ (SC2IFP(vh)->if_flags & IFF_UP) &&
+ (SC2IFP(vh)->if_drv_flags & IFF_DRV_RUNNING)) ||
+ (type == CARP_COUNT_MASTER && vh->sc_state == MASTER)) {
+ IF_ADDR_LOCK(SC2IFP(vh));
+ TAILQ_FOREACH(ifa, &SC2IFP(vh)->if_addrlist,
+ ifa_list) {
+ if (ifa->ifa_addr->sa_family == AF_INET &&
+ ia->ia_addr.sin_addr.s_addr ==
+ ifatoia(ifa)->ia_addr.sin_addr.s_addr)
+ count++;
+ }
+ IF_ADDR_UNLOCK(SC2IFP(vh));
+ }
+ }
+ return (count);
+}
+
+int
+carp_iamatch(struct ifnet *ifp, struct in_ifaddr *ia,
+ struct in_addr *isaddr, u_int8_t **enaddr)
+{
+ struct carp_if *cif;
+ struct carp_softc *vh;
+ int index, count = 0;
+ struct ifaddr *ifa;
+
+ cif = ifp->if_carp;
+ CARP_LOCK(cif);
+
+ if (carp_opts[CARPCTL_ARPBALANCE]) {
+ /*
+ * XXX proof of concept implementation.
+ * We use the source ip to decide which virtual host should
+ * handle the request. If we're master of that virtual host,
+ * then we respond, otherwise, just drop the arp packet on
+ * the floor.
+ */
+ count = carp_addrcount(cif, ia, CARP_COUNT_RUNNING);
+ if (count == 0) {
+ /* should never reach this */
+ CARP_UNLOCK(cif);
+ return (0);
+ }
+
+ /* this should be a hash, like pf_hash() */
+ index = ntohl(isaddr->s_addr) % count;
+ count = 0;
+
+ TAILQ_FOREACH(vh, &cif->vhif_vrs, sc_list) {
+ if ((SC2IFP(vh)->if_flags & IFF_UP) &&
+ (SC2IFP(vh)->if_drv_flags & IFF_DRV_RUNNING)) {
+ IF_ADDR_LOCK(SC2IFP(vh));
+ TAILQ_FOREACH(ifa, &SC2IFP(vh)->if_addrlist,
+ ifa_list) {
+ if (ifa->ifa_addr->sa_family ==
+ AF_INET &&
+ ia->ia_addr.sin_addr.s_addr ==
+ ifatoia(ifa)->ia_addr.sin_addr.s_addr) {
+ if (count == index) {
+ if (vh->sc_state ==
+ MASTER) {
+ *enaddr = IF_LLADDR(vh->sc_ifp);
+ IF_ADDR_UNLOCK(SC2IFP(vh));
+ CARP_UNLOCK(cif);
+ return (1);
+ } else {
+ IF_ADDR_UNLOCK(SC2IFP(vh));
+ CARP_UNLOCK(cif);
+ return (0);
+ }
+ }
+ count++;
+ }
+ }
+ IF_ADDR_UNLOCK(SC2IFP(vh));
+ }
+ }
+ } else {
+ TAILQ_FOREACH(vh, &cif->vhif_vrs, sc_list) {
+ if ((SC2IFP(vh)->if_flags & IFF_UP) &&
+ (SC2IFP(vh)->if_drv_flags & IFF_DRV_RUNNING) &&
+ ia->ia_ifp == SC2IFP(vh) &&
+ vh->sc_state == MASTER) {
+ *enaddr = IF_LLADDR(vh->sc_ifp);
+ CARP_UNLOCK(cif);
+ return (1);
+ }
+ }
+ }
+ CARP_UNLOCK(cif);
+ return (0);
+}
+
+#ifdef INET6
+struct ifaddr *
+carp_iamatch6(struct ifnet *ifp, struct in6_addr *taddr)
+{
+ struct carp_if *cif;
+ struct carp_softc *vh;
+ struct ifaddr *ifa;
+
+ cif = ifp->if_carp;
+ CARP_LOCK(cif);
+ TAILQ_FOREACH(vh, &cif->vhif_vrs, sc_list) {
+ IF_ADDR_LOCK(SC2IFP(vh));
+ TAILQ_FOREACH(ifa, &SC2IFP(vh)->if_addrlist, ifa_list) {
+ if (IN6_ARE_ADDR_EQUAL(taddr,
+ &ifatoia6(ifa)->ia_addr.sin6_addr) &&
+ (SC2IFP(vh)->if_flags & IFF_UP) &&
+ (SC2IFP(vh)->if_drv_flags & IFF_DRV_RUNNING) &&
+ vh->sc_state == MASTER) {
+ ifa_ref(ifa);
+ IF_ADDR_UNLOCK(SC2IFP(vh));
+ CARP_UNLOCK(cif);
+ return (ifa);
+ }
+ }
+ IF_ADDR_UNLOCK(SC2IFP(vh));
+ }
+ CARP_UNLOCK(cif);
+
+ return (NULL);
+}
+
+caddr_t
+carp_macmatch6(struct ifnet *ifp, struct mbuf *m, const struct in6_addr *taddr)
+{
+ struct m_tag *mtag;
+ struct carp_if *cif;
+ struct carp_softc *sc;
+ struct ifaddr *ifa;
+
+ cif = ifp->if_carp;
+ CARP_LOCK(cif);
+ TAILQ_FOREACH(sc, &cif->vhif_vrs, sc_list) {
+ IF_ADDR_LOCK(SC2IFP(sc));
+ TAILQ_FOREACH(ifa, &SC2IFP(sc)->if_addrlist, ifa_list) {
+ if (IN6_ARE_ADDR_EQUAL(taddr,
+ &ifatoia6(ifa)->ia_addr.sin6_addr) &&
+ (SC2IFP(sc)->if_flags & IFF_UP) &&
+ (SC2IFP(sc)->if_drv_flags & IFF_DRV_RUNNING)) {
+ struct ifnet *ifp = SC2IFP(sc);
+ mtag = m_tag_get(PACKET_TAG_CARP,
+ sizeof(struct ifnet *), M_NOWAIT);
+ if (mtag == NULL) {
+ /* better a bit than nothing */
+ IF_ADDR_UNLOCK(SC2IFP(sc));
+ CARP_UNLOCK(cif);
+ return (IF_LLADDR(sc->sc_ifp));
+ }
+ bcopy(&ifp, (caddr_t)(mtag + 1),
+ sizeof(struct ifnet *));
+ m_tag_prepend(m, mtag);
+
+ IF_ADDR_UNLOCK(SC2IFP(sc));
+ CARP_UNLOCK(cif);
+ return (IF_LLADDR(sc->sc_ifp));
+ }
+ }
+ IF_ADDR_UNLOCK(SC2IFP(sc));
+ }
+ CARP_UNLOCK(cif);
+
+ return (NULL);
+}
+#endif
+
+struct ifnet *
+carp_forus(struct ifnet *ifp, u_char *dhost)
+{
+ struct carp_if *cif;
+ struct carp_softc *vh;
+ u_int8_t *ena = dhost;
+
+ if (ena[0] || ena[1] || ena[2] != 0x5e || ena[3] || ena[4] != 1)
+ return (NULL);
+
+ cif = ifp->if_carp;
+ CARP_LOCK(cif);
+ TAILQ_FOREACH(vh, &cif->vhif_vrs, sc_list)
+ if ((SC2IFP(vh)->if_flags & IFF_UP) &&
+ (SC2IFP(vh)->if_drv_flags & IFF_DRV_RUNNING) &&
+ vh->sc_state == MASTER &&
+ !bcmp(dhost, IF_LLADDR(vh->sc_ifp), ETHER_ADDR_LEN)) {
+ CARP_UNLOCK(cif);
+ return (SC2IFP(vh));
+ }
+
+ CARP_UNLOCK(cif);
+ return (NULL);
+}
+
+static void
+carp_master_down(void *v)
+{
+ struct carp_softc *sc = v;
+
+ CARP_SCLOCK(sc);
+ carp_master_down_locked(sc);
+ CARP_SCUNLOCK(sc);
+}
+
+static void
+carp_master_down_locked(struct carp_softc *sc)
+{
+ if (sc->sc_carpdev)
+ CARP_SCLOCK_ASSERT(sc);
+
+ switch (sc->sc_state) {
+ case INIT:
+ printf("%s: master_down event in INIT state\n",
+ SC2IFP(sc)->if_xname);
+ break;
+ case MASTER:
+ break;
+ case BACKUP:
+ carp_set_state(sc, MASTER);
+ carp_send_ad_locked(sc);
+ carp_send_arp(sc);
+#ifdef INET6
+ carp_send_na(sc);
+#endif /* INET6 */
+ carp_setrun(sc, 0);
+ carp_setroute(sc, RTM_ADD);
+ break;
+ }
+}
+
+/*
+ * When in backup state, af indicates whether to reset the master down timer
+ * for v4 or v6. If it's set to zero, reset the ones which are already pending.
+ */
+static void
+carp_setrun(struct carp_softc *sc, sa_family_t af)
+{
+ struct timeval tv;
+
+ if (sc->sc_carpdev == NULL) {
+ SC2IFP(sc)->if_drv_flags &= ~IFF_DRV_RUNNING;
+ carp_set_state(sc, INIT);
+ return;
+ } else
+ CARP_SCLOCK_ASSERT(sc);
+
+ if (SC2IFP(sc)->if_flags & IFF_UP &&
+ sc->sc_vhid > 0 && (sc->sc_naddrs || sc->sc_naddrs6) &&
+ sc->sc_carpdev->if_link_state == LINK_STATE_UP)
+ SC2IFP(sc)->if_drv_flags |= IFF_DRV_RUNNING;
+ else {
+ SC2IFP(sc)->if_drv_flags &= ~IFF_DRV_RUNNING;
+ carp_setroute(sc, RTM_DELETE);
+ return;
+ }
+
+ switch (sc->sc_state) {
+ case INIT:
+ if (carp_opts[CARPCTL_PREEMPT] && !carp_suppress_preempt) {
+ carp_send_ad_locked(sc);
+ carp_send_arp(sc);
+#ifdef INET6
+ carp_send_na(sc);
+#endif /* INET6 */
+ CARP_LOG("%s: INIT -> MASTER (preempting)\n",
+ SC2IFP(sc)->if_xname);
+ carp_set_state(sc, MASTER);
+ carp_setroute(sc, RTM_ADD);
+ } else {
+ CARP_LOG("%s: INIT -> BACKUP\n", SC2IFP(sc)->if_xname);
+ carp_set_state(sc, BACKUP);
+ carp_setroute(sc, RTM_DELETE);
+ carp_setrun(sc, 0);
+ }
+ break;
+ case BACKUP:
+ callout_stop(&sc->sc_ad_tmo);
+ tv.tv_sec = 3 * sc->sc_advbase;
+ tv.tv_usec = sc->sc_advskew * 1000000 / 256;
+ switch (af) {
+#ifdef INET
+ case AF_INET:
+ callout_reset(&sc->sc_md_tmo, tvtohz(&tv),
+ carp_master_down, sc);
+ break;
+#endif /* INET */
+#ifdef INET6
+ case AF_INET6:
+ callout_reset(&sc->sc_md6_tmo, tvtohz(&tv),
+ carp_master_down, sc);
+ break;
+#endif /* INET6 */
+ default:
+ if (sc->sc_naddrs)
+ callout_reset(&sc->sc_md_tmo, tvtohz(&tv),
+ carp_master_down, sc);
+ if (sc->sc_naddrs6)
+ callout_reset(&sc->sc_md6_tmo, tvtohz(&tv),
+ carp_master_down, sc);
+ break;
+ }
+ break;
+ case MASTER:
+ tv.tv_sec = sc->sc_advbase;
+ tv.tv_usec = sc->sc_advskew * 1000000 / 256;
+ callout_reset(&sc->sc_ad_tmo, tvtohz(&tv),
+ carp_send_ad, sc);
+ break;
+ }
+}
+
+static void
+carp_multicast_cleanup(struct carp_softc *sc)
+{
+ struct ip_moptions *imo = &sc->sc_imo;
+ u_int16_t n = imo->imo_num_memberships;
+
+ /* Clean up our own multicast memberships */
+ while (n-- > 0) {
+ if (imo->imo_membership[n] != NULL) {
+ in_delmulti(imo->imo_membership[n]);
+ imo->imo_membership[n] = NULL;
+ }
+ }
+ KASSERT(imo->imo_mfilters == NULL,
+ ("%s: imo_mfilters != NULL", __func__));
+ imo->imo_num_memberships = 0;
+ imo->imo_multicast_ifp = NULL;
+}
+
+#ifdef INET6
+static void
+carp_multicast6_cleanup(struct carp_softc *sc)
+{
+ struct ip6_moptions *im6o = &sc->sc_im6o;
+ u_int16_t n = im6o->im6o_num_memberships;
+
+ while (n-- > 0) {
+ if (im6o->im6o_membership[n] != NULL) {
+ in6_mc_leave(im6o->im6o_membership[n], NULL);
+ im6o->im6o_membership[n] = NULL;
+ }
+ }
+ KASSERT(im6o->im6o_mfilters == NULL,
+ ("%s: im6o_mfilters != NULL", __func__));
+ im6o->im6o_num_memberships = 0;
+ im6o->im6o_multicast_ifp = NULL;
+}
+#endif
+
+static int
+carp_set_addr(struct carp_softc *sc, struct sockaddr_in *sin)
+{
+ struct ifnet *ifp;
+ struct carp_if *cif;
+ struct in_ifaddr *ia, *ia_if;
+ struct ip_moptions *imo = &sc->sc_imo;
+ struct in_addr addr;
+ u_long iaddr = htonl(sin->sin_addr.s_addr);
+ int own, error;
+
+ if (sin->sin_addr.s_addr == 0) {
+ if (!(SC2IFP(sc)->if_flags & IFF_UP))
+ carp_set_state(sc, INIT);
+ if (sc->sc_naddrs)
+ SC2IFP(sc)->if_flags |= IFF_UP;
+ if (sc->sc_carpdev)
+ CARP_SCLOCK(sc);
+ carp_setrun(sc, 0);
+ if (sc->sc_carpdev)
+ CARP_SCUNLOCK(sc);
+ return (0);
+ }
+
+ /* we have to do it by hands to check we won't match on us */
+ ia_if = NULL; own = 0;
+ IN_IFADDR_RLOCK();
+ TAILQ_FOREACH(ia, &V_in_ifaddrhead, ia_link) {
+ /* and, yeah, we need a multicast-capable iface too */
+ if (ia->ia_ifp != SC2IFP(sc) &&
+ (ia->ia_ifp->if_flags & IFF_MULTICAST) &&
+ (iaddr & ia->ia_subnetmask) == ia->ia_subnet) {
+ if (!ia_if)
+ ia_if = ia;
+ if (sin->sin_addr.s_addr ==
+ ia->ia_addr.sin_addr.s_addr)
+ own++;
+ }
+ }
+
+ if (!ia_if) {
+ IN_IFADDR_RUNLOCK();
+ return (EADDRNOTAVAIL);
+ }
+
+ ia = ia_if;
+ ifa_ref(&ia->ia_ifa);
+ IN_IFADDR_RUNLOCK();
+
+ ifp = ia->ia_ifp;
+
+ if (ifp == NULL || (ifp->if_flags & IFF_MULTICAST) == 0 ||
+ (imo->imo_multicast_ifp && imo->imo_multicast_ifp != ifp)) {
+ ifa_free(&ia->ia_ifa);
+ return (EADDRNOTAVAIL);
+ }
+
+ if (imo->imo_num_memberships == 0) {
+ addr.s_addr = htonl(INADDR_CARP_GROUP);
+ if ((imo->imo_membership[0] = in_addmulti(&addr, ifp)) ==
+ NULL) {
+ ifa_free(&ia->ia_ifa);
+ return (ENOBUFS);
+ }
+ imo->imo_num_memberships++;
+ imo->imo_multicast_ifp = ifp;
+ imo->imo_multicast_ttl = CARP_DFLTTL;
+ imo->imo_multicast_loop = 0;
+ }
+
+ if (!ifp->if_carp) {
+
+ cif = malloc(sizeof(*cif), M_CARP,
+ M_WAITOK|M_ZERO);
+ if (!cif) {
+ error = ENOBUFS;
+ goto cleanup;
+ }
+ if ((error = ifpromisc(ifp, 1))) {
+ free(cif, M_CARP);
+ goto cleanup;
+ }
+
+ CARP_LOCK_INIT(cif);
+ CARP_LOCK(cif);
+ cif->vhif_ifp = ifp;
+ TAILQ_INIT(&cif->vhif_vrs);
+ ifp->if_carp = cif;
+
+ } else {
+ struct carp_softc *vr;
+
+ cif = (struct carp_if *)ifp->if_carp;
+ CARP_LOCK(cif);
+ TAILQ_FOREACH(vr, &cif->vhif_vrs, sc_list)
+ if (vr != sc && vr->sc_vhid == sc->sc_vhid) {
+ CARP_UNLOCK(cif);
+ error = EEXIST;
+ goto cleanup;
+ }
+ }
+ sc->sc_ia = ia;
+ sc->sc_carpdev = ifp;
+
+ { /* XXX prevent endless loop if already in queue */
+ struct carp_softc *vr, *after = NULL;
+ int myself = 0;
+ cif = (struct carp_if *)ifp->if_carp;
+
+ /* XXX: cif should not change, right? So we still hold the lock */
+ CARP_LOCK_ASSERT(cif);
+
+ TAILQ_FOREACH(vr, &cif->vhif_vrs, sc_list) {
+ if (vr == sc)
+ myself = 1;
+ if (vr->sc_vhid < sc->sc_vhid)
+ after = vr;
+ }
+
+ if (!myself) {
+ /* We're trying to keep things in order */
+ if (after == NULL) {
+ TAILQ_INSERT_TAIL(&cif->vhif_vrs, sc, sc_list);
+ } else {
+ TAILQ_INSERT_AFTER(&cif->vhif_vrs, after, sc, sc_list);
+ }
+ cif->vhif_nvrs++;
+ }
+ }
+
+ sc->sc_naddrs++;
+ SC2IFP(sc)->if_flags |= IFF_UP;
+ if (own)
+ sc->sc_advskew = 0;
+ carp_sc_state_locked(sc);
+ carp_setrun(sc, 0);
+
+ CARP_UNLOCK(cif);
+ ifa_free(&ia->ia_ifa); /* XXXRW: should hold reference for softc. */
+
+ return (0);
+
+cleanup:
+ in_delmulti(imo->imo_membership[--imo->imo_num_memberships]);
+ ifa_free(&ia->ia_ifa);
+ return (error);
+}
+
+static int
+carp_del_addr(struct carp_softc *sc, struct sockaddr_in *sin)
+{
+ int error = 0;
+
+ if (!--sc->sc_naddrs) {
+ struct carp_if *cif = (struct carp_if *)sc->sc_carpdev->if_carp;
+ struct ip_moptions *imo = &sc->sc_imo;
+
+ CARP_LOCK(cif);
+ callout_stop(&sc->sc_ad_tmo);
+ SC2IFP(sc)->if_flags &= ~IFF_UP;
+ SC2IFP(sc)->if_drv_flags &= ~IFF_DRV_RUNNING;
+ sc->sc_vhid = -1;
+ in_delmulti(imo->imo_membership[--imo->imo_num_memberships]);
+ imo->imo_multicast_ifp = NULL;
+ TAILQ_REMOVE(&cif->vhif_vrs, sc, sc_list);
+ if (!--cif->vhif_nvrs) {
+ sc->sc_carpdev->if_carp = NULL;
+ CARP_LOCK_DESTROY(cif);
+ free(cif, M_CARP);
+ } else {
+ CARP_UNLOCK(cif);
+ }
+ }
+
+ return (error);
+}
+
+#ifdef INET6
+static int
+carp_set_addr6(struct carp_softc *sc, struct sockaddr_in6 *sin6)
+{
+ struct ifnet *ifp;
+ struct carp_if *cif;
+ struct in6_ifaddr *ia, *ia_if;
+ struct ip6_moptions *im6o = &sc->sc_im6o;
+ struct in6_addr in6;
+ int own, error;
+
+ error = 0;
+
+ if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) {
+ if (!(SC2IFP(sc)->if_flags & IFF_UP))
+ carp_set_state(sc, INIT);
+ if (sc->sc_naddrs6)
+ SC2IFP(sc)->if_flags |= IFF_UP;
+ if (sc->sc_carpdev)
+ CARP_SCLOCK(sc);
+ carp_setrun(sc, 0);
+ if (sc->sc_carpdev)
+ CARP_SCUNLOCK(sc);
+ return (0);
+ }
+
+ /* we have to do it by hands to check we won't match on us */
+ ia_if = NULL; own = 0;
+ IN6_IFADDR_RLOCK();
+ TAILQ_FOREACH(ia, &V_in6_ifaddrhead, ia_link) {
+ int i;
+
+ for (i = 0; i < 4; i++) {
+ if ((sin6->sin6_addr.s6_addr32[i] &
+ ia->ia_prefixmask.sin6_addr.s6_addr32[i]) !=
+ (ia->ia_addr.sin6_addr.s6_addr32[i] &
+ ia->ia_prefixmask.sin6_addr.s6_addr32[i]))
+ break;
+ }
+ /* and, yeah, we need a multicast-capable iface too */
+ if (ia->ia_ifp != SC2IFP(sc) &&
+ (ia->ia_ifp->if_flags & IFF_MULTICAST) &&
+ (i == 4)) {
+ if (!ia_if)
+ ia_if = ia;
+ if (IN6_ARE_ADDR_EQUAL(&sin6->sin6_addr,
+ &ia->ia_addr.sin6_addr))
+ own++;
+ }
+ }
+
+ if (!ia_if) {
+ IN6_IFADDR_RUNLOCK();
+ return (EADDRNOTAVAIL);
+ }
+ ia = ia_if;
+ ifa_ref(&ia->ia_ifa);
+ IN6_IFADDR_RUNLOCK();
+ ifp = ia->ia_ifp;
+
+ if (ifp == NULL || (ifp->if_flags & IFF_MULTICAST) == 0 ||
+ (im6o->im6o_multicast_ifp && im6o->im6o_multicast_ifp != ifp)) {
+ ifa_free(&ia->ia_ifa);
+ return (EADDRNOTAVAIL);
+ }
+
+ if (!sc->sc_naddrs6) {
+ struct in6_multi *in6m;
+
+ im6o->im6o_multicast_ifp = ifp;
+
+ /* join CARP multicast address */
+ bzero(&in6, sizeof(in6));
+ in6.s6_addr16[0] = htons(0xff02);
+ in6.s6_addr8[15] = 0x12;
+ if (in6_setscope(&in6, ifp, NULL) != 0)
+ goto cleanup;
+ in6m = NULL;
+ error = in6_mc_join(ifp, &in6, NULL, &in6m, 0);
+ if (error)
+ goto cleanup;
+ im6o->im6o_membership[0] = in6m;
+ im6o->im6o_num_memberships++;
+
+ /* join solicited multicast address */
+ bzero(&in6, sizeof(in6));
+ in6.s6_addr16[0] = htons(0xff02);
+ in6.s6_addr32[1] = 0;
+ in6.s6_addr32[2] = htonl(1);
+ in6.s6_addr32[3] = sin6->sin6_addr.s6_addr32[3];
+ in6.s6_addr8[12] = 0xff;
+ if (in6_setscope(&in6, ifp, NULL) != 0)
+ goto cleanup;
+ in6m = NULL;
+ error = in6_mc_join(ifp, &in6, NULL, &in6m, 0);
+ if (error)
+ goto cleanup;
+ im6o->im6o_membership[1] = in6m;
+ im6o->im6o_num_memberships++;
+ }
+
+ if (!ifp->if_carp) {
+ cif = malloc(sizeof(*cif), M_CARP,
+ M_WAITOK|M_ZERO);
+ if (!cif) {
+ error = ENOBUFS;
+ goto cleanup;
+ }
+ if ((error = ifpromisc(ifp, 1))) {
+ free(cif, M_CARP);
+ goto cleanup;
+ }
+
+ CARP_LOCK_INIT(cif);
+ CARP_LOCK(cif);
+ cif->vhif_ifp = ifp;
+ TAILQ_INIT(&cif->vhif_vrs);
+ ifp->if_carp = cif;
+
+ } else {
+ struct carp_softc *vr;
+
+ cif = (struct carp_if *)ifp->if_carp;
+ CARP_LOCK(cif);
+ TAILQ_FOREACH(vr, &cif->vhif_vrs, sc_list)
+ if (vr != sc && vr->sc_vhid == sc->sc_vhid) {
+ CARP_UNLOCK(cif);
+ error = EINVAL;
+ goto cleanup;
+ }
+ }
+ sc->sc_ia6 = ia;
+ sc->sc_carpdev = ifp;
+
+ { /* XXX prevent endless loop if already in queue */
+ struct carp_softc *vr, *after = NULL;
+ int myself = 0;
+ cif = (struct carp_if *)ifp->if_carp;
+ CARP_LOCK_ASSERT(cif);
+
+ TAILQ_FOREACH(vr, &cif->vhif_vrs, sc_list) {
+ if (vr == sc)
+ myself = 1;
+ if (vr->sc_vhid < sc->sc_vhid)
+ after = vr;
+ }
+
+ if (!myself) {
+ /* We're trying to keep things in order */
+ if (after == NULL) {
+ TAILQ_INSERT_TAIL(&cif->vhif_vrs, sc, sc_list);
+ } else {
+ TAILQ_INSERT_AFTER(&cif->vhif_vrs, after, sc, sc_list);
+ }
+ cif->vhif_nvrs++;
+ }
+ }
+
+ sc->sc_naddrs6++;
+ SC2IFP(sc)->if_flags |= IFF_UP;
+ if (own)
+ sc->sc_advskew = 0;
+ carp_sc_state_locked(sc);
+ carp_setrun(sc, 0);
+
+ CARP_UNLOCK(cif);
+ ifa_free(&ia->ia_ifa); /* XXXRW: should hold reference for softc. */
+
+ return (0);
+
+cleanup:
+ if (!sc->sc_naddrs6)
+ carp_multicast6_cleanup(sc);
+ ifa_free(&ia->ia_ifa);
+ return (error);
+}
+
+static int
+carp_del_addr6(struct carp_softc *sc, struct sockaddr_in6 *sin6)
+{
+ int error = 0;
+
+ if (!--sc->sc_naddrs6) {
+ struct carp_if *cif = (struct carp_if *)sc->sc_carpdev->if_carp;
+
+ CARP_LOCK(cif);
+ callout_stop(&sc->sc_ad_tmo);
+ SC2IFP(sc)->if_flags &= ~IFF_UP;
+ SC2IFP(sc)->if_drv_flags &= ~IFF_DRV_RUNNING;
+ sc->sc_vhid = -1;
+ carp_multicast6_cleanup(sc);
+ TAILQ_REMOVE(&cif->vhif_vrs, sc, sc_list);
+ if (!--cif->vhif_nvrs) {
+ CARP_LOCK_DESTROY(cif);
+ sc->sc_carpdev->if_carp = NULL;
+ free(cif, M_CARP);
+ } else
+ CARP_UNLOCK(cif);
+ }
+
+ return (error);
+}
+#endif /* INET6 */
+
+static int
+carp_ioctl(struct ifnet *ifp, u_long cmd, caddr_t addr)
+{
+ struct carp_softc *sc = ifp->if_softc, *vr;
+ struct carpreq carpr;
+ struct ifaddr *ifa;
+ struct ifreq *ifr;
+ struct ifaliasreq *ifra;
+ int locked = 0, error = 0;
+
+ ifa = (struct ifaddr *)addr;
+ ifra = (struct ifaliasreq *)addr;
+ ifr = (struct ifreq *)addr;
+
+ switch (cmd) {
+ case SIOCSIFADDR:
+ switch (ifa->ifa_addr->sa_family) {
+#ifdef INET
+ case AF_INET:
+ SC2IFP(sc)->if_flags |= IFF_UP;
+ bcopy(ifa->ifa_addr, ifa->ifa_dstaddr,
+ sizeof(struct sockaddr));
+ error = carp_set_addr(sc, satosin(ifa->ifa_addr));
+ break;
+#endif /* INET */
+#ifdef INET6
+ case AF_INET6:
+ SC2IFP(sc)->if_flags |= IFF_UP;
+ error = carp_set_addr6(sc, satosin6(ifa->ifa_addr));
+ break;
+#endif /* INET6 */
+ default:
+ error = EAFNOSUPPORT;
+ break;
+ }
+ break;
+
+ case SIOCAIFADDR:
+ switch (ifa->ifa_addr->sa_family) {
+#ifdef INET
+ case AF_INET:
+ SC2IFP(sc)->if_flags |= IFF_UP;
+ bcopy(ifa->ifa_addr, ifa->ifa_dstaddr,
+ sizeof(struct sockaddr));
+ error = carp_set_addr(sc, satosin(&ifra->ifra_addr));
+ break;
+#endif /* INET */
+#ifdef INET6
+ case AF_INET6:
+ SC2IFP(sc)->if_flags |= IFF_UP;
+ error = carp_set_addr6(sc, satosin6(&ifra->ifra_addr));
+ break;
+#endif /* INET6 */
+ default:
+ error = EAFNOSUPPORT;
+ break;
+ }
+ break;
+
+ case SIOCDIFADDR:
+ switch (ifa->ifa_addr->sa_family) {
+#ifdef INET
+ case AF_INET:
+ error = carp_del_addr(sc, satosin(&ifra->ifra_addr));
+ break;
+#endif /* INET */
+#ifdef INET6
+ case AF_INET6:
+ error = carp_del_addr6(sc, satosin6(&ifra->ifra_addr));
+ break;
+#endif /* INET6 */
+ default:
+ error = EAFNOSUPPORT;
+ break;
+ }
+ break;
+
+ case SIOCSIFFLAGS:
+ if (sc->sc_carpdev) {
+ locked = 1;
+ CARP_SCLOCK(sc);
+ }
+ if (sc->sc_state != INIT && !(ifr->ifr_flags & IFF_UP)) {
+ callout_stop(&sc->sc_ad_tmo);
+ callout_stop(&sc->sc_md_tmo);
+ callout_stop(&sc->sc_md6_tmo);
+ if (sc->sc_state == MASTER)
+ carp_send_ad_locked(sc);
+ carp_set_state(sc, INIT);
+ carp_setrun(sc, 0);
+ } else if (sc->sc_state == INIT && (ifr->ifr_flags & IFF_UP)) {
+ SC2IFP(sc)->if_flags |= IFF_UP;
+ carp_setrun(sc, 0);
+ }
+ break;
+
+ case SIOCSVH:
+ error = priv_check(curthread, PRIV_NETINET_CARP);
+ if (error)
+ break;
+ if ((error = copyin(ifr->ifr_data, &carpr, sizeof carpr)))
+ break;
+ error = 1;
+ if (sc->sc_carpdev) {
+ locked = 1;
+ CARP_SCLOCK(sc);
+ }
+ if (sc->sc_state != INIT && carpr.carpr_state != sc->sc_state) {
+ switch (carpr.carpr_state) {
+ case BACKUP:
+ callout_stop(&sc->sc_ad_tmo);
+ carp_set_state(sc, BACKUP);
+ carp_setrun(sc, 0);
+ carp_setroute(sc, RTM_DELETE);
+ break;
+ case MASTER:
+ carp_master_down_locked(sc);
+ break;
+ default:
+ break;
+ }
+ }
+ if (carpr.carpr_vhid > 0) {
+ if (carpr.carpr_vhid > 255) {
+ error = EINVAL;
+ break;
+ }
+ if (sc->sc_carpdev) {
+ struct carp_if *cif;
+ cif = (struct carp_if *)sc->sc_carpdev->if_carp;
+ TAILQ_FOREACH(vr, &cif->vhif_vrs, sc_list)
+ if (vr != sc &&
+ vr->sc_vhid == carpr.carpr_vhid) {
+ error = EEXIST;
+ break;
+ }
+ if (error == EEXIST)
+ break;
+ }
+ sc->sc_vhid = carpr.carpr_vhid;
+ IF_LLADDR(sc->sc_ifp)[0] = 0;
+ IF_LLADDR(sc->sc_ifp)[1] = 0;
+ IF_LLADDR(sc->sc_ifp)[2] = 0x5e;
+ IF_LLADDR(sc->sc_ifp)[3] = 0;
+ IF_LLADDR(sc->sc_ifp)[4] = 1;
+ IF_LLADDR(sc->sc_ifp)[5] = sc->sc_vhid;
+ error--;
+ }
+ if (carpr.carpr_advbase > 0 || carpr.carpr_advskew > 0) {
+ if (carpr.carpr_advskew >= 255) {
+ error = EINVAL;
+ break;
+ }
+ if (carpr.carpr_advbase > 255) {
+ error = EINVAL;
+ break;
+ }
+ sc->sc_advbase = carpr.carpr_advbase;
+ sc->sc_advskew = carpr.carpr_advskew;
+ error--;
+ }
+ bcopy(carpr.carpr_key, sc->sc_key, sizeof(sc->sc_key));
+ if (error > 0)
+ error = EINVAL;
+ else {
+ error = 0;
+ carp_setrun(sc, 0);
+ }
+ break;
+
+ case SIOCGVH:
+ /* XXX: lockless read */
+ bzero(&carpr, sizeof(carpr));
+ carpr.carpr_state = sc->sc_state;
+ carpr.carpr_vhid = sc->sc_vhid;
+ carpr.carpr_advbase = sc->sc_advbase;
+ carpr.carpr_advskew = sc->sc_advskew;
+ error = priv_check(curthread, PRIV_NETINET_CARP);
+ if (error == 0)
+ bcopy(sc->sc_key, carpr.carpr_key,
+ sizeof(carpr.carpr_key));
+ error = copyout(&carpr, ifr->ifr_data, sizeof(carpr));
+ break;
+
+ default:
+ error = EINVAL;
+ }
+
+ if (locked)
+ CARP_SCUNLOCK(sc);
+
+ carp_hmac_prepare(sc);
+
+ return (error);
+}
+
+/*
+ * XXX: this is looutput. We should eventually use it from there.
+ */
+static int
+carp_looutput(struct ifnet *ifp, struct mbuf *m, struct sockaddr *dst,
+ struct route *ro)
+{
+ u_int32_t af;
+ struct rtentry *rt = NULL;
+
+ M_ASSERTPKTHDR(m); /* check if we have the packet header */
+
+ if (ro != NULL)
+ rt = ro->ro_rt;
+ if (rt && rt->rt_flags & (RTF_REJECT|RTF_BLACKHOLE)) {
+ m_freem(m);
+ return (rt->rt_flags & RTF_BLACKHOLE ? 0 :
+ rt->rt_flags & RTF_HOST ? EHOSTUNREACH : ENETUNREACH);
+ }
+
+ ifp->if_opackets++;
+ ifp->if_obytes += m->m_pkthdr.len;
+
+ /* BPF writes need to be handled specially. */
+ if (dst->sa_family == AF_UNSPEC) {
+ bcopy(dst->sa_data, &af, sizeof(af));
+ dst->sa_family = af;
+ }
+
+#if 1 /* XXX */
+ switch (dst->sa_family) {
+ case AF_INET:
+ case AF_INET6:
+ case AF_IPX:
+ case AF_APPLETALK:
+ break;
+ default:
+ printf("carp_looutput: af=%d unexpected\n", dst->sa_family);
+ m_freem(m);
+ return (EAFNOSUPPORT);
+ }
+#endif
+ return(if_simloop(ifp, m, dst->sa_family, 0));
+}
+
+/*
+ * Start output on carp interface. This function should never be called.
+ */
+static void
+carp_start(struct ifnet *ifp)
+{
+#ifdef DEBUG
+ printf("%s: start called\n", ifp->if_xname);
+#endif
+}
+
+int
+carp_output(struct ifnet *ifp, struct mbuf *m, struct sockaddr *sa,
+ struct rtentry *rt)
+{
+ struct m_tag *mtag;
+ struct carp_softc *sc;
+ struct ifnet *carp_ifp;
+
+ if (!sa)
+ return (0);
+
+ switch (sa->sa_family) {
+#ifdef INET
+ case AF_INET:
+ break;
+#endif /* INET */
+#ifdef INET6
+ case AF_INET6:
+ break;
+#endif /* INET6 */
+ default:
+ return (0);
+ }
+
+ mtag = m_tag_find(m, PACKET_TAG_CARP, NULL);
+ if (mtag == NULL)
+ return (0);
+
+ bcopy(mtag + 1, &carp_ifp, sizeof(struct ifnet *));
+ sc = carp_ifp->if_softc;
+
+ /* Set the source MAC address to Virtual Router MAC Address */
+ switch (ifp->if_type) {
+ case IFT_ETHER:
+ case IFT_L2VLAN: {
+ struct ether_header *eh;
+
+ eh = mtod(m, struct ether_header *);
+ eh->ether_shost[0] = 0;
+ eh->ether_shost[1] = 0;
+ eh->ether_shost[2] = 0x5e;
+ eh->ether_shost[3] = 0;
+ eh->ether_shost[4] = 1;
+ eh->ether_shost[5] = sc->sc_vhid;
+ }
+ break;
+ case IFT_FDDI: {
+ struct fddi_header *fh;
+
+ fh = mtod(m, struct fddi_header *);
+ fh->fddi_shost[0] = 0;
+ fh->fddi_shost[1] = 0;
+ fh->fddi_shost[2] = 0x5e;
+ fh->fddi_shost[3] = 0;
+ fh->fddi_shost[4] = 1;
+ fh->fddi_shost[5] = sc->sc_vhid;
+ }
+ break;
+ case IFT_ISO88025: {
+ struct iso88025_header *th;
+ th = mtod(m, struct iso88025_header *);
+ th->iso88025_shost[0] = 3;
+ th->iso88025_shost[1] = 0;
+ th->iso88025_shost[2] = 0x40 >> (sc->sc_vhid - 1);
+ th->iso88025_shost[3] = 0x40000 >> (sc->sc_vhid - 1);
+ th->iso88025_shost[4] = 0;
+ th->iso88025_shost[5] = 0;
+ }
+ break;
+ default:
+ printf("%s: carp is not supported for this interface type\n",
+ ifp->if_xname);
+ return (EOPNOTSUPP);
+ }
+
+ return (0);
+}
+
+static void
+carp_set_state(struct carp_softc *sc, int state)
+{
+ int link_state;
+
+ if (sc->sc_carpdev)
+ CARP_SCLOCK_ASSERT(sc);
+
+ if (sc->sc_state == state)
+ return;
+
+ sc->sc_state = state;
+ switch (state) {
+ case BACKUP:
+ link_state = LINK_STATE_DOWN;
+ break;
+ case MASTER:
+ link_state = LINK_STATE_UP;
+ break;
+ default:
+ link_state = LINK_STATE_UNKNOWN;
+ break;
+ }
+ if_link_state_change(SC2IFP(sc), link_state);
+}
+
+void
+carp_carpdev_state(struct ifnet *ifp)
+{
+ struct carp_if *cif;
+
+ cif = ifp->if_carp;
+ CARP_LOCK(cif);
+ carp_carpdev_state_locked(cif);
+ CARP_UNLOCK(cif);
+}
+
+static void
+carp_carpdev_state_locked(struct carp_if *cif)
+{
+ struct carp_softc *sc;
+
+ TAILQ_FOREACH(sc, &cif->vhif_vrs, sc_list)
+ carp_sc_state_locked(sc);
+}
+
+static void
+carp_sc_state_locked(struct carp_softc *sc)
+{
+ CARP_SCLOCK_ASSERT(sc);
+
+ if (sc->sc_carpdev->if_link_state != LINK_STATE_UP ||
+ !(sc->sc_carpdev->if_flags & IFF_UP)) {
+ sc->sc_flags_backup = SC2IFP(sc)->if_flags;
+ SC2IFP(sc)->if_flags &= ~IFF_UP;
+ SC2IFP(sc)->if_drv_flags &= ~IFF_DRV_RUNNING;
+ callout_stop(&sc->sc_ad_tmo);
+ callout_stop(&sc->sc_md_tmo);
+ callout_stop(&sc->sc_md6_tmo);
+ carp_set_state(sc, INIT);
+ carp_setrun(sc, 0);
+ if (!sc->sc_suppress) {
+ carp_suppress_preempt++;
+ if (carp_suppress_preempt == 1) {
+ CARP_SCUNLOCK(sc);
+ carp_send_ad_all();
+ CARP_SCLOCK(sc);
+ }
+ }
+ sc->sc_suppress = 1;
+ } else {
+ SC2IFP(sc)->if_flags |= sc->sc_flags_backup;
+ carp_set_state(sc, INIT);
+ carp_setrun(sc, 0);
+ if (sc->sc_suppress)
+ carp_suppress_preempt--;
+ sc->sc_suppress = 0;
+ }
+
+ return;
+}
+
+#ifdef INET
+extern struct domain inetdomain;
+static struct protosw in_carp_protosw = {
+ .pr_type = SOCK_RAW,
+ .pr_domain = &inetdomain,
+ .pr_protocol = IPPROTO_CARP,
+ .pr_flags = PR_ATOMIC|PR_ADDR,
+ .pr_input = carp_input,
+ .pr_output = (pr_output_t *)rip_output,
+ .pr_ctloutput = rip_ctloutput,
+ .pr_usrreqs = &rip_usrreqs
+};
+#endif
+
+#ifdef INET6
+extern struct domain inet6domain;
+static struct ip6protosw in6_carp_protosw = {
+ .pr_type = SOCK_RAW,
+ .pr_domain = &inet6domain,
+ .pr_protocol = IPPROTO_CARP,
+ .pr_flags = PR_ATOMIC|PR_ADDR,
+ .pr_input = carp6_input,
+ .pr_output = rip6_output,
+ .pr_ctloutput = rip6_ctloutput,
+ .pr_usrreqs = &rip6_usrreqs
+};
+#endif
+
+static void
+carp_mod_cleanup(void)
+{
+
+ if (if_detach_event_tag == NULL)
+ return;
+ EVENTHANDLER_DEREGISTER(ifnet_departure_event, if_detach_event_tag);
+ if_clone_detach(&carp_cloner);
+#ifdef INET
+ if (proto_reg[CARP_INET] == 0) {
+ (void)ipproto_unregister(IPPROTO_CARP);
+ pf_proto_unregister(PF_INET, IPPROTO_CARP, SOCK_RAW);
+ proto_reg[CARP_INET] = -1;
+ }
+ carp_iamatch_p = NULL;
+#endif
+#ifdef INET6
+ if (proto_reg[CARP_INET6] == 0) {
+ (void)ip6proto_unregister(IPPROTO_CARP);
+ pf_proto_unregister(PF_INET6, IPPROTO_CARP, SOCK_RAW);
+ proto_reg[CARP_INET6] = -1;
+ }
+ carp_iamatch6_p = NULL;
+ carp_macmatch6_p = NULL;
+#endif
+ carp_linkstate_p = NULL;
+ carp_forus_p = NULL;
+ carp_output_p = NULL;
+ mtx_destroy(&carp_mtx);
+}
+
+static int
+carp_mod_load(void)
+{
+ int err;
+
+ if_detach_event_tag = EVENTHANDLER_REGISTER(ifnet_departure_event,
+ carp_ifdetach, NULL, EVENTHANDLER_PRI_ANY);
+ if (if_detach_event_tag == NULL)
+ return (ENOMEM);
+ mtx_init(&carp_mtx, "carp_mtx", NULL, MTX_DEF);
+ LIST_INIT(&carpif_list);
+ if_clone_attach(&carp_cloner);
+ carp_linkstate_p = carp_carpdev_state;
+ carp_forus_p = carp_forus;
+ carp_output_p = carp_output;
+#ifdef INET6
+ carp_iamatch6_p = carp_iamatch6;
+ carp_macmatch6_p = carp_macmatch6;
+ proto_reg[CARP_INET6] = pf_proto_register(PF_INET6,
+ (struct protosw *)&in6_carp_protosw);
+ if (proto_reg[CARP_INET6] != 0) {
+ printf("carp: error %d attaching to PF_INET6\n",
+ proto_reg[CARP_INET6]);
+ carp_mod_cleanup();
+ return (EINVAL);
+ }
+ err = ip6proto_register(IPPROTO_CARP);
+ if (err) {
+ printf("carp: error %d registering with INET6\n", err);
+ carp_mod_cleanup();
+ return (EINVAL);
+ }
+#endif
+#ifdef INET
+ carp_iamatch_p = carp_iamatch;
+ proto_reg[CARP_INET] = pf_proto_register(PF_INET, &in_carp_protosw);
+ if (proto_reg[CARP_INET] != 0) {
+ printf("carp: error %d attaching to PF_INET\n",
+ proto_reg[CARP_INET]);
+ carp_mod_cleanup();
+ return (EINVAL);
+ }
+ err = ipproto_register(IPPROTO_CARP);
+ if (err) {
+ printf("carp: error %d registering with INET\n", err);
+ carp_mod_cleanup();
+ return (EINVAL);
+ }
+#endif
+ return 0;
+}
+
+static int
+carp_modevent(module_t mod, int type, void *data)
+{
+ switch (type) {
+ case MOD_LOAD:
+ return carp_mod_load();
+ /* NOTREACHED */
+ case MOD_UNLOAD:
+ /*
+ * XXX: For now, disallow module unloading by default due to
+ * a race condition where a thread may dereference one of the
+ * function pointer hooks after the module has been
+ * unloaded, during processing of a packet, causing a panic.
+ */
+#ifdef CARPMOD_CAN_UNLOAD
+ carp_mod_cleanup();
+#else
+ return (EBUSY);
+#endif
+ break;
+
+ default:
+ return (EINVAL);
+ }
+
+ return (0);
+}
+
+static moduledata_t carp_mod = {
+ "carp",
+ carp_modevent,
+ 0
+};
+
+DECLARE_MODULE(carp, carp_mod, SI_SUB_PROTO_DOMAIN, SI_ORDER_ANY);
diff --git a/rtems/freebsd/netinet/ip_carp.h b/rtems/freebsd/netinet/ip_carp.h
new file mode 100644
index 00000000..2f2b4f28
--- /dev/null
+++ b/rtems/freebsd/netinet/ip_carp.h
@@ -0,0 +1,191 @@
+/* $FreeBSD$ */
+/* $OpenBSD: ip_carp.h,v 1.8 2004/07/29 22:12:15 mcbride Exp $ */
+
+/*
+ * Copyright (c) 2002 Michael Shalayeff. All rights reserved.
+ * Copyright (c) 2003 Ryan McBride. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR OR HIS RELATIVES BE LIABLE FOR ANY DIRECT,
+ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF MIND, USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _IP_CARP_H
+#define _IP_CARP_H
+
+/*
+ * The CARP header layout is as follows:
+ *
+ * 0 1 2 3
+ * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * |Version| Type | VirtualHostID | AdvSkew | Auth Len |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | Reserved | AdvBase | Checksum |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | Counter (1) |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | Counter (2) |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | SHA-1 HMAC (1) |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | SHA-1 HMAC (2) |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | SHA-1 HMAC (3) |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | SHA-1 HMAC (4) |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | SHA-1 HMAC (5) |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ *
+ */
+
+struct carp_header {
+#if BYTE_ORDER == LITTLE_ENDIAN
+ u_int8_t carp_type:4,
+ carp_version:4;
+#endif
+#if BYTE_ORDER == BIG_ENDIAN
+ u_int8_t carp_version:4,
+ carp_type:4;
+#endif
+ u_int8_t carp_vhid; /* virtual host id */
+ u_int8_t carp_advskew; /* advertisement skew */
+ u_int8_t carp_authlen; /* size of counter+md, 32bit chunks */
+ u_int8_t carp_pad1; /* reserved */
+ u_int8_t carp_advbase; /* advertisement interval */
+ u_int16_t carp_cksum;
+ u_int32_t carp_counter[2];
+ unsigned char carp_md[20]; /* SHA1 HMAC */
+} __packed;
+
+#ifdef CTASSERT
+CTASSERT(sizeof(struct carp_header) == 36);
+#endif
+
+#define CARP_DFLTTL 255
+
+/* carp_version */
+#define CARP_VERSION 2
+
+/* carp_type */
+#define CARP_ADVERTISEMENT 0x01
+
+#define CARP_KEY_LEN 20 /* a sha1 hash of a passphrase */
+
+/* carp_advbase */
+#define CARP_DFLTINTV 1
+
+/*
+ * Statistics.
+ */
+struct carpstats {
+ uint64_t carps_ipackets; /* total input packets, IPv4 */
+ uint64_t carps_ipackets6; /* total input packets, IPv6 */
+ uint64_t carps_badif; /* wrong interface */
+ uint64_t carps_badttl; /* TTL is not CARP_DFLTTL */
+ uint64_t carps_hdrops; /* packets shorter than hdr */
+ uint64_t carps_badsum; /* bad checksum */
+ uint64_t carps_badver; /* bad (incl unsupp) version */
+ uint64_t carps_badlen; /* data length does not match */
+ uint64_t carps_badauth; /* bad authentication */
+ uint64_t carps_badvhid; /* bad VHID */
+ uint64_t carps_badaddrs; /* bad address list */
+
+ uint64_t carps_opackets; /* total output packets, IPv4 */
+ uint64_t carps_opackets6; /* total output packets, IPv6 */
+ uint64_t carps_onomem; /* no memory for an mbuf */
+ uint64_t carps_ostates; /* total state updates sent */
+
+ uint64_t carps_preempt; /* if enabled, preemptions */
+};
+
+#ifdef _KERNEL
+#define CARPSTATS_ADD(name, val) carpstats.name += (val)
+#define CARPSTATS_INC(name) CARPSTATS_ADD(name, 1)
+#endif
+
+/*
+ * Configuration structure for SIOCSVH SIOCGVH
+ */
+struct carpreq {
+ int carpr_state;
+#define CARP_STATES "INIT", "BACKUP", "MASTER"
+#define CARP_MAXSTATE 2
+ int carpr_vhid;
+ int carpr_advskew;
+ int carpr_advbase;
+ unsigned char carpr_key[CARP_KEY_LEN];
+};
+#define SIOCSVH _IOWR('i', 245, struct ifreq)
+#define SIOCGVH _IOWR('i', 246, struct ifreq)
+
+/*
+ * Names for CARP sysctl objects
+ */
+#define CARPCTL_ALLOW 1 /* accept incoming CARP packets */
+#define CARPCTL_PREEMPT 2 /* high-pri backup preemption mode */
+#define CARPCTL_LOG 3 /* log bad packets */
+#define CARPCTL_STATS 4 /* statistics (read-only) */
+#define CARPCTL_ARPBALANCE 5 /* balance arp responses */
+#define CARPCTL_MAXID 6
+
+#define CARPCTL_NAMES { \
+ { 0, 0 }, \
+ { "allow", CTLTYPE_INT }, \
+ { "preempt", CTLTYPE_INT }, \
+ { "log", CTLTYPE_INT }, \
+ { "stats", CTLTYPE_STRUCT }, \
+ { "arpbalance", CTLTYPE_INT }, \
+}
+
+#ifdef _KERNEL
+void carp_carpdev_state(struct ifnet *);
+void carp_input (struct mbuf *, int);
+int carp6_input (struct mbuf **, int *, int);
+int carp_output (struct ifnet *, struct mbuf *, struct sockaddr *,
+ struct rtentry *);
+int carp_iamatch (struct ifnet *, struct in_ifaddr *, struct in_addr *,
+ u_int8_t **);
+struct ifaddr *carp_iamatch6(struct ifnet *, struct in6_addr *);
+caddr_t carp_macmatch6(struct ifnet *, struct mbuf *, const struct in6_addr *);
+struct ifnet *carp_forus (struct ifnet *, u_char *);
+
+/* These are external networking stack hooks for CARP */
+/* net/if.c */
+extern void (*carp_linkstate_p)(struct ifnet *);
+/* net/if_bridge.c net/if_ethersubr.c */
+extern struct ifnet *(*carp_forus_p)(struct ifnet *, u_char *);
+/* net/if_ethersubr.c */
+extern int (*carp_output_p)(struct ifnet *, struct mbuf *,
+ struct sockaddr *, struct rtentry *);
+#ifdef INET
+/* netinet/if_ether.c */
+extern int (*carp_iamatch_p)(struct ifnet *, struct in_ifaddr *,
+ struct in_addr *, u_int8_t **);
+#endif
+#ifdef INET6
+/* netinet6/nd6_nbr.c */
+extern struct ifaddr *(*carp_iamatch6_p)(struct ifnet *, struct in6_addr *);
+extern caddr_t (*carp_macmatch6_p)(struct ifnet *, struct mbuf *,
+ const struct in6_addr *);
+#endif
+#endif
+#endif /* _IP_CARP_H */
diff --git a/rtems/freebsd/netinet/ip_divert.c b/rtems/freebsd/netinet/ip_divert.c
new file mode 100644
index 00000000..d7dde8e9
--- /dev/null
+++ b/rtems/freebsd/netinet/ip_divert.c
@@ -0,0 +1,818 @@
+#include <rtems/freebsd/machine/rtems-bsd-config.h>
+
+/*-
+ * Copyright (c) 1982, 1986, 1988, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <rtems/freebsd/sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#if !defined(KLD_MODULE)
+#include <rtems/freebsd/local/opt_inet.h>
+#include <rtems/freebsd/local/opt_sctp.h>
+#ifndef INET
+#error "IPDIVERT requires INET."
+#endif
+#endif
+
+#include <rtems/freebsd/sys/param.h>
+#include <rtems/freebsd/sys/kernel.h>
+#include <rtems/freebsd/sys/lock.h>
+#include <rtems/freebsd/sys/malloc.h>
+#include <rtems/freebsd/sys/mbuf.h>
+#include <rtems/freebsd/sys/module.h>
+#include <rtems/freebsd/sys/kernel.h>
+#include <rtems/freebsd/sys/priv.h>
+#include <rtems/freebsd/sys/proc.h>
+#include <rtems/freebsd/sys/protosw.h>
+#include <rtems/freebsd/sys/rwlock.h>
+#include <rtems/freebsd/sys/signalvar.h>
+#include <rtems/freebsd/sys/socket.h>
+#include <rtems/freebsd/sys/socketvar.h>
+#include <rtems/freebsd/sys/sx.h>
+#include <rtems/freebsd/sys/sysctl.h>
+#include <rtems/freebsd/sys/systm.h>
+
+#include <rtems/freebsd/vm/uma.h>
+
+#include <rtems/freebsd/net/if.h>
+#include <rtems/freebsd/net/netisr.h>
+#include <rtems/freebsd/net/route.h>
+#include <rtems/freebsd/net/vnet.h>
+
+#include <rtems/freebsd/netinet/in.h>
+#include <rtems/freebsd/netinet/in_pcb.h>
+#include <rtems/freebsd/netinet/in_systm.h>
+#include <rtems/freebsd/netinet/in_var.h>
+#include <rtems/freebsd/netinet/ip.h>
+#include <rtems/freebsd/netinet/ip_var.h>
+#ifdef SCTP
+#include <rtems/freebsd/netinet/sctp_crc32.h>
+#endif
+
+#include <rtems/freebsd/security/mac/mac_framework.h>
+
+/*
+ * Divert sockets
+ */
+
+/*
+ * Allocate enough space to hold a full IP packet
+ */
+#define DIVSNDQ (65536 + 100)
+#define DIVRCVQ (65536 + 100)
+
+/*
+ * Divert sockets work in conjunction with ipfw or other packet filters,
+ * see the divert(4) manpage for features.
+ * Packets are selected by the packet filter and tagged with an
+ * MTAG_IPFW_RULE tag carrying the 'divert port' number (as set by
+ * the packet filter) and information on the matching filter rule for
+ * subsequent reinjection. The divert_port is used to put the packet
+ * on the corresponding divert socket, while the rule number is passed
+ * up (at least partially) as the sin_port in the struct sockaddr.
+ *
+ * Packets written to the divert socket carry in sin_addr a
+ * destination address, and in sin_port the number of the filter rule
+ * after which to continue processing.
+ * If the destination address is INADDR_ANY, the packet is treated as
+ * as outgoing and sent to ip_output(); otherwise it is treated as
+ * incoming and sent to ip_input().
+ * Further, sin_zero carries some information on the interface,
+ * which can be used in the reinject -- see comments in the code.
+ *
+ * On reinjection, processing in ip_input() and ip_output()
+ * will be exactly the same as for the original packet, except that
+ * packet filter processing will start at the rule number after the one
+ * written in the sin_port (ipfw does not allow a rule #0, so sin_port=0
+ * will apply the entire ruleset to the packet).
+ */
+
+/* Internal variables. */
+static VNET_DEFINE(struct inpcbhead, divcb);
+static VNET_DEFINE(struct inpcbinfo, divcbinfo);
+
+#define V_divcb VNET(divcb)
+#define V_divcbinfo VNET(divcbinfo)
+
+static u_long div_sendspace = DIVSNDQ; /* XXX sysctl ? */
+static u_long div_recvspace = DIVRCVQ; /* XXX sysctl ? */
+
+static eventhandler_tag ip_divert_event_tag;
+
+/*
+ * Initialize divert connection block queue.
+ */
+static void
+div_zone_change(void *tag)
+{
+
+ uma_zone_set_max(V_divcbinfo.ipi_zone, maxsockets);
+}
+
+static int
+div_inpcb_init(void *mem, int size, int flags)
+{
+ struct inpcb *inp = mem;
+
+ INP_LOCK_INIT(inp, "inp", "divinp");
+ return (0);
+}
+
+static void
+div_inpcb_fini(void *mem, int size)
+{
+ struct inpcb *inp = mem;
+
+ INP_LOCK_DESTROY(inp);
+}
+
+static void
+div_init(void)
+{
+
+ INP_INFO_LOCK_INIT(&V_divcbinfo, "div");
+ LIST_INIT(&V_divcb);
+ V_divcbinfo.ipi_listhead = &V_divcb;
+#ifdef VIMAGE
+ V_divcbinfo.ipi_vnet = curvnet;
+#endif
+ /*
+ * XXX We don't use the hash list for divert IP, but it's easier
+ * to allocate a one entry hash list than it is to check all
+ * over the place for hashbase == NULL.
+ */
+ V_divcbinfo.ipi_hashbase = hashinit(1, M_PCB, &V_divcbinfo.ipi_hashmask);
+ V_divcbinfo.ipi_porthashbase = hashinit(1, M_PCB,
+ &V_divcbinfo.ipi_porthashmask);
+ V_divcbinfo.ipi_zone = uma_zcreate("divcb", sizeof(struct inpcb),
+ NULL, NULL, div_inpcb_init, div_inpcb_fini, UMA_ALIGN_PTR,
+ UMA_ZONE_NOFREE);
+ uma_zone_set_max(V_divcbinfo.ipi_zone, maxsockets);
+}
+
+static void
+div_destroy(void)
+{
+
+ INP_INFO_LOCK_DESTROY(&V_divcbinfo);
+ uma_zdestroy(V_divcbinfo.ipi_zone);
+ hashdestroy(V_divcbinfo.ipi_hashbase, M_PCB, V_divcbinfo.ipi_hashmask);
+ hashdestroy(V_divcbinfo.ipi_porthashbase, M_PCB,
+ V_divcbinfo.ipi_porthashmask);
+}
+
+/*
+ * IPPROTO_DIVERT is not in the real IP protocol number space; this
+ * function should never be called. Just in case, drop any packets.
+ */
+static void
+div_input(struct mbuf *m, int off)
+{
+
+ KMOD_IPSTAT_INC(ips_noproto);
+ m_freem(m);
+}
+
+/*
+ * Divert a packet by passing it up to the divert socket at port 'port'.
+ *
+ * Setup generic address and protocol structures for div_input routine,
+ * then pass them along with mbuf chain.
+ */
+static void
+divert_packet(struct mbuf *m, int incoming)
+{
+ struct ip *ip;
+ struct inpcb *inp;
+ struct socket *sa;
+ u_int16_t nport;
+ struct sockaddr_in divsrc;
+ struct m_tag *mtag;
+
+ mtag = m_tag_locate(m, MTAG_IPFW_RULE, 0, NULL);
+ if (mtag == NULL) {
+ m_freem(m);
+ return;
+ }
+ /* Assure header */
+ if (m->m_len < sizeof(struct ip) &&
+ (m = m_pullup(m, sizeof(struct ip))) == 0)
+ return;
+ ip = mtod(m, struct ip *);
+
+ /* Delayed checksums are currently not compatible with divert. */
+ if (m->m_pkthdr.csum_flags & CSUM_DELAY_DATA) {
+ ip->ip_len = ntohs(ip->ip_len);
+ in_delayed_cksum(m);
+ m->m_pkthdr.csum_flags &= ~CSUM_DELAY_DATA;
+ ip->ip_len = htons(ip->ip_len);
+ }
+#ifdef SCTP
+ if (m->m_pkthdr.csum_flags & CSUM_SCTP) {
+ ip->ip_len = ntohs(ip->ip_len);
+ sctp_delayed_cksum(m, (uint32_t)(ip->ip_hl << 2));
+ m->m_pkthdr.csum_flags &= ~CSUM_SCTP;
+ ip->ip_len = htons(ip->ip_len);
+ }
+#endif
+ bzero(&divsrc, sizeof(divsrc));
+ divsrc.sin_len = sizeof(divsrc);
+ divsrc.sin_family = AF_INET;
+ /* record matching rule, in host format */
+ divsrc.sin_port = ((struct ipfw_rule_ref *)(mtag+1))->rulenum;
+ /*
+ * Record receive interface address, if any.
+ * But only for incoming packets.
+ */
+ if (incoming) {
+ struct ifaddr *ifa;
+ struct ifnet *ifp;
+
+ /* Sanity check */
+ M_ASSERTPKTHDR(m);
+
+ /* Find IP address for receive interface */
+ ifp = m->m_pkthdr.rcvif;
+ if_addr_rlock(ifp);
+ TAILQ_FOREACH(ifa, &ifp->if_addrhead, ifa_link) {
+ if (ifa->ifa_addr->sa_family != AF_INET)
+ continue;
+ divsrc.sin_addr =
+ ((struct sockaddr_in *) ifa->ifa_addr)->sin_addr;
+ break;
+ }
+ if_addr_runlock(ifp);
+ }
+ /*
+ * Record the incoming interface name whenever we have one.
+ */
+ if (m->m_pkthdr.rcvif) {
+ /*
+ * Hide the actual interface name in there in the
+ * sin_zero array. XXX This needs to be moved to a
+ * different sockaddr type for divert, e.g.
+ * sockaddr_div with multiple fields like
+ * sockaddr_dl. Presently we have only 7 bytes
+ * but that will do for now as most interfaces
+ * are 4 or less + 2 or less bytes for unit.
+ * There is probably a faster way of doing this,
+ * possibly taking it from the sockaddr_dl on the iface.
+ * This solves the problem of a P2P link and a LAN interface
+ * having the same address, which can result in the wrong
+ * interface being assigned to the packet when fed back
+ * into the divert socket. Theoretically if the daemon saves
+ * and re-uses the sockaddr_in as suggested in the man pages,
+ * this iface name will come along for the ride.
+ * (see div_output for the other half of this.)
+ */
+ strlcpy(divsrc.sin_zero, m->m_pkthdr.rcvif->if_xname,
+ sizeof(divsrc.sin_zero));
+ }
+
+ /* Put packet on socket queue, if any */
+ sa = NULL;
+ nport = htons((u_int16_t)(((struct ipfw_rule_ref *)(mtag+1))->info));
+ INP_INFO_RLOCK(&V_divcbinfo);
+ LIST_FOREACH(inp, &V_divcb, inp_list) {
+ /* XXX why does only one socket match? */
+ if (inp->inp_lport == nport) {
+ INP_RLOCK(inp);
+ sa = inp->inp_socket;
+ SOCKBUF_LOCK(&sa->so_rcv);
+ if (sbappendaddr_locked(&sa->so_rcv,
+ (struct sockaddr *)&divsrc, m,
+ (struct mbuf *)0) == 0) {
+ SOCKBUF_UNLOCK(&sa->so_rcv);
+ sa = NULL; /* force mbuf reclaim below */
+ } else
+ sorwakeup_locked(sa);
+ INP_RUNLOCK(inp);
+ break;
+ }
+ }
+ INP_INFO_RUNLOCK(&V_divcbinfo);
+ if (sa == NULL) {
+ m_freem(m);
+ KMOD_IPSTAT_INC(ips_noproto);
+ KMOD_IPSTAT_DEC(ips_delivered);
+ }
+}
+
+/*
+ * Deliver packet back into the IP processing machinery.
+ *
+ * If no address specified, or address is 0.0.0.0, send to ip_output();
+ * otherwise, send to ip_input() and mark as having been received on
+ * the interface with that address.
+ */
+static int
+div_output(struct socket *so, struct mbuf *m, struct sockaddr_in *sin,
+ struct mbuf *control)
+{
+ struct m_tag *mtag;
+ struct ipfw_rule_ref *dt;
+ int error = 0;
+ struct mbuf *options;
+
+ /*
+ * An mbuf may hasn't come from userland, but we pretend
+ * that it has.
+ */
+ m->m_pkthdr.rcvif = NULL;
+ m->m_nextpkt = NULL;
+ M_SETFIB(m, so->so_fibnum);
+
+ if (control)
+ m_freem(control); /* XXX */
+
+ mtag = m_tag_locate(m, MTAG_IPFW_RULE, 0, NULL);
+ if (mtag == NULL) {
+ /* this should be normal */
+ mtag = m_tag_alloc(MTAG_IPFW_RULE, 0,
+ sizeof(struct ipfw_rule_ref), M_NOWAIT | M_ZERO);
+ if (mtag == NULL) {
+ error = ENOBUFS;
+ goto cantsend;
+ }
+ m_tag_prepend(m, mtag);
+ }
+ dt = (struct ipfw_rule_ref *)(mtag+1);
+
+ /* Loopback avoidance and state recovery */
+ if (sin) {
+ int i;
+
+ /* set the starting point. We provide a non-zero slot,
+ * but a non_matching chain_id to skip that info and use
+ * the rulenum/rule_id.
+ */
+ dt->slot = 1; /* dummy, chain_id is invalid */
+ dt->chain_id = 0;
+ dt->rulenum = sin->sin_port+1; /* host format ? */
+ dt->rule_id = 0;
+ /*
+ * Find receive interface with the given name, stuffed
+ * (if it exists) in the sin_zero[] field.
+ * The name is user supplied data so don't trust its size
+ * or that it is zero terminated.
+ */
+ for (i = 0; i < sizeof(sin->sin_zero) && sin->sin_zero[i]; i++)
+ ;
+ if ( i > 0 && i < sizeof(sin->sin_zero))
+ m->m_pkthdr.rcvif = ifunit(sin->sin_zero);
+ }
+
+ /* Reinject packet into the system as incoming or outgoing */
+ if (!sin || sin->sin_addr.s_addr == 0) {
+ struct ip *const ip = mtod(m, struct ip *);
+ struct inpcb *inp;
+
+ dt->info |= IPFW_IS_DIVERT | IPFW_INFO_OUT;
+ inp = sotoinpcb(so);
+ INP_RLOCK(inp);
+ /*
+ * Don't allow both user specified and setsockopt options,
+ * and don't allow packet length sizes that will crash
+ */
+ if (((ip->ip_hl != (sizeof (*ip) >> 2)) && inp->inp_options) ||
+ ((u_short)ntohs(ip->ip_len) > m->m_pkthdr.len)) {
+ error = EINVAL;
+ INP_RUNLOCK(inp);
+ m_freem(m);
+ } else {
+ /* Convert fields to host order for ip_output() */
+ ip->ip_len = ntohs(ip->ip_len);
+ ip->ip_off = ntohs(ip->ip_off);
+
+ /* Send packet to output processing */
+ KMOD_IPSTAT_INC(ips_rawout); /* XXX */
+
+#ifdef MAC
+ mac_inpcb_create_mbuf(inp, m);
+#endif
+ /*
+ * Get ready to inject the packet into ip_output().
+ * Just in case socket options were specified on the
+ * divert socket, we duplicate them. This is done
+ * to avoid having to hold the PCB locks over the call
+ * to ip_output(), as doing this results in a number of
+ * lock ordering complexities.
+ *
+ * Note that we set the multicast options argument for
+ * ip_output() to NULL since it should be invariant that
+ * they are not present.
+ */
+ KASSERT(inp->inp_moptions == NULL,
+ ("multicast options set on a divert socket"));
+ options = NULL;
+ /*
+ * XXXCSJP: It is unclear to me whether or not it makes
+ * sense for divert sockets to have options. However,
+ * for now we will duplicate them with the INP locks
+ * held so we can use them in ip_output() without
+ * requring a reference to the pcb.
+ */
+ if (inp->inp_options != NULL) {
+ options = m_dup(inp->inp_options, M_DONTWAIT);
+ if (options == NULL)
+ error = ENOBUFS;
+ }
+ INP_RUNLOCK(inp);
+ if (error == ENOBUFS) {
+ m_freem(m);
+ return (error);
+ }
+ error = ip_output(m, options, NULL,
+ ((so->so_options & SO_DONTROUTE) ?
+ IP_ROUTETOIF : 0) | IP_ALLOWBROADCAST |
+ IP_RAWOUTPUT, NULL, NULL);
+ if (options != NULL)
+ m_freem(options);
+ }
+ } else {
+ dt->info |= IPFW_IS_DIVERT | IPFW_INFO_IN;
+ if (m->m_pkthdr.rcvif == NULL) {
+ /*
+ * No luck with the name, check by IP address.
+ * Clear the port and the ifname to make sure
+ * there are no distractions for ifa_ifwithaddr.
+ */
+ struct ifaddr *ifa;
+
+ bzero(sin->sin_zero, sizeof(sin->sin_zero));
+ sin->sin_port = 0;
+ ifa = ifa_ifwithaddr((struct sockaddr *) sin);
+ if (ifa == NULL) {
+ error = EADDRNOTAVAIL;
+ goto cantsend;
+ }
+ m->m_pkthdr.rcvif = ifa->ifa_ifp;
+ ifa_free(ifa);
+ }
+#ifdef MAC
+ mac_socket_create_mbuf(so, m);
+#endif
+ /* Send packet to input processing via netisr */
+ netisr_queue_src(NETISR_IP, (uintptr_t)so, m);
+ }
+
+ return error;
+
+cantsend:
+ m_freem(m);
+ return error;
+}
+
+static int
+div_attach(struct socket *so, int proto, struct thread *td)
+{
+ struct inpcb *inp;
+ int error;
+
+ inp = sotoinpcb(so);
+ KASSERT(inp == NULL, ("div_attach: inp != NULL"));
+ if (td != NULL) {
+ error = priv_check(td, PRIV_NETINET_DIVERT);
+ if (error)
+ return (error);
+ }
+ error = soreserve(so, div_sendspace, div_recvspace);
+ if (error)
+ return error;
+ INP_INFO_WLOCK(&V_divcbinfo);
+ error = in_pcballoc(so, &V_divcbinfo);
+ if (error) {
+ INP_INFO_WUNLOCK(&V_divcbinfo);
+ return error;
+ }
+ inp = (struct inpcb *)so->so_pcb;
+ INP_INFO_WUNLOCK(&V_divcbinfo);
+ inp->inp_ip_p = proto;
+ inp->inp_vflag |= INP_IPV4;
+ inp->inp_flags |= INP_HDRINCL;
+ INP_WUNLOCK(inp);
+ return 0;
+}
+
+static void
+div_detach(struct socket *so)
+{
+ struct inpcb *inp;
+
+ inp = sotoinpcb(so);
+ KASSERT(inp != NULL, ("div_detach: inp == NULL"));
+ INP_INFO_WLOCK(&V_divcbinfo);
+ INP_WLOCK(inp);
+ in_pcbdetach(inp);
+ in_pcbfree(inp);
+ INP_INFO_WUNLOCK(&V_divcbinfo);
+}
+
+static int
+div_bind(struct socket *so, struct sockaddr *nam, struct thread *td)
+{
+ struct inpcb *inp;
+ int error;
+
+ inp = sotoinpcb(so);
+ KASSERT(inp != NULL, ("div_bind: inp == NULL"));
+ /* in_pcbbind assumes that nam is a sockaddr_in
+ * and in_pcbbind requires a valid address. Since divert
+ * sockets don't we need to make sure the address is
+ * filled in properly.
+ * XXX -- divert should not be abusing in_pcbind
+ * and should probably have its own family.
+ */
+ if (nam->sa_family != AF_INET)
+ return EAFNOSUPPORT;
+ ((struct sockaddr_in *)nam)->sin_addr.s_addr = INADDR_ANY;
+ INP_INFO_WLOCK(&V_divcbinfo);
+ INP_WLOCK(inp);
+ error = in_pcbbind(inp, nam, td->td_ucred);
+ INP_WUNLOCK(inp);
+ INP_INFO_WUNLOCK(&V_divcbinfo);
+ return error;
+}
+
+static int
+div_shutdown(struct socket *so)
+{
+ struct inpcb *inp;
+
+ inp = sotoinpcb(so);
+ KASSERT(inp != NULL, ("div_shutdown: inp == NULL"));
+ INP_WLOCK(inp);
+ socantsendmore(so);
+ INP_WUNLOCK(inp);
+ return 0;
+}
+
+static int
+div_send(struct socket *so, int flags, struct mbuf *m, struct sockaddr *nam,
+ struct mbuf *control, struct thread *td)
+{
+
+ /* Packet must have a header (but that's about it) */
+ if (m->m_len < sizeof (struct ip) &&
+ (m = m_pullup(m, sizeof (struct ip))) == 0) {
+ KMOD_IPSTAT_INC(ips_toosmall);
+ m_freem(m);
+ return EINVAL;
+ }
+
+ /* Send packet */
+ return div_output(so, m, (struct sockaddr_in *)nam, control);
+}
+
+static void
+div_ctlinput(int cmd, struct sockaddr *sa, void *vip)
+{
+ struct in_addr faddr;
+
+ faddr = ((struct sockaddr_in *)sa)->sin_addr;
+ if (sa->sa_family != AF_INET || faddr.s_addr == INADDR_ANY)
+ return;
+ if (PRC_IS_REDIRECT(cmd))
+ return;
+}
+
+static int
+div_pcblist(SYSCTL_HANDLER_ARGS)
+{
+ int error, i, n;
+ struct inpcb *inp, **inp_list;
+ inp_gen_t gencnt;
+ struct xinpgen xig;
+
+ /*
+ * The process of preparing the TCB list is too time-consuming and
+ * resource-intensive to repeat twice on every request.
+ */
+ if (req->oldptr == 0) {
+ n = V_divcbinfo.ipi_count;
+ n += imax(n / 8, 10);
+ req->oldidx = 2 * (sizeof xig) + n * sizeof(struct xinpcb);
+ return 0;
+ }
+
+ if (req->newptr != 0)
+ return EPERM;
+
+ /*
+ * OK, now we're committed to doing something.
+ */
+ INP_INFO_RLOCK(&V_divcbinfo);
+ gencnt = V_divcbinfo.ipi_gencnt;
+ n = V_divcbinfo.ipi_count;
+ INP_INFO_RUNLOCK(&V_divcbinfo);
+
+ error = sysctl_wire_old_buffer(req,
+ 2 * sizeof(xig) + n*sizeof(struct xinpcb));
+ if (error != 0)
+ return (error);
+
+ xig.xig_len = sizeof xig;
+ xig.xig_count = n;
+ xig.xig_gen = gencnt;
+ xig.xig_sogen = so_gencnt;
+ error = SYSCTL_OUT(req, &xig, sizeof xig);
+ if (error)
+ return error;
+
+ inp_list = malloc(n * sizeof *inp_list, M_TEMP, M_WAITOK);
+ if (inp_list == 0)
+ return ENOMEM;
+
+ INP_INFO_RLOCK(&V_divcbinfo);
+ for (inp = LIST_FIRST(V_divcbinfo.ipi_listhead), i = 0; inp && i < n;
+ inp = LIST_NEXT(inp, inp_list)) {
+ INP_WLOCK(inp);
+ if (inp->inp_gencnt <= gencnt &&
+ cr_canseeinpcb(req->td->td_ucred, inp) == 0) {
+ in_pcbref(inp);
+ inp_list[i++] = inp;
+ }
+ INP_WUNLOCK(inp);
+ }
+ INP_INFO_RUNLOCK(&V_divcbinfo);
+ n = i;
+
+ error = 0;
+ for (i = 0; i < n; i++) {
+ inp = inp_list[i];
+ INP_RLOCK(inp);
+ if (inp->inp_gencnt <= gencnt) {
+ struct xinpcb xi;
+ bzero(&xi, sizeof(xi));
+ xi.xi_len = sizeof xi;
+ /* XXX should avoid extra copy */
+ bcopy(inp, &xi.xi_inp, sizeof *inp);
+ if (inp->inp_socket)
+ sotoxsocket(inp->inp_socket, &xi.xi_socket);
+ INP_RUNLOCK(inp);
+ error = SYSCTL_OUT(req, &xi, sizeof xi);
+ } else
+ INP_RUNLOCK(inp);
+ }
+ INP_INFO_WLOCK(&V_divcbinfo);
+ for (i = 0; i < n; i++) {
+ inp = inp_list[i];
+ INP_WLOCK(inp);
+ if (!in_pcbrele(inp))
+ INP_WUNLOCK(inp);
+ }
+ INP_INFO_WUNLOCK(&V_divcbinfo);
+
+ if (!error) {
+ /*
+ * Give the user an updated idea of our state.
+ * If the generation differs from what we told
+ * her before, she knows that something happened
+ * while we were processing this request, and it
+ * might be necessary to retry.
+ */
+ INP_INFO_RLOCK(&V_divcbinfo);
+ xig.xig_gen = V_divcbinfo.ipi_gencnt;
+ xig.xig_sogen = so_gencnt;
+ xig.xig_count = V_divcbinfo.ipi_count;
+ INP_INFO_RUNLOCK(&V_divcbinfo);
+ error = SYSCTL_OUT(req, &xig, sizeof xig);
+ }
+ free(inp_list, M_TEMP);
+ return error;
+}
+
+#ifdef SYSCTL_NODE
+SYSCTL_NODE(_net_inet, IPPROTO_DIVERT, divert, CTLFLAG_RW, 0, "IPDIVERT");
+SYSCTL_PROC(_net_inet_divert, OID_AUTO, pcblist, CTLFLAG_RD, 0, 0,
+ div_pcblist, "S,xinpcb", "List of active divert sockets");
+#endif
+
+struct pr_usrreqs div_usrreqs = {
+ .pru_attach = div_attach,
+ .pru_bind = div_bind,
+ .pru_control = in_control,
+ .pru_detach = div_detach,
+ .pru_peeraddr = in_getpeeraddr,
+ .pru_send = div_send,
+ .pru_shutdown = div_shutdown,
+ .pru_sockaddr = in_getsockaddr,
+ .pru_sosetlabel = in_pcbsosetlabel
+};
+
+struct protosw div_protosw = {
+ .pr_type = SOCK_RAW,
+ .pr_protocol = IPPROTO_DIVERT,
+ .pr_flags = PR_ATOMIC|PR_ADDR,
+ .pr_input = div_input,
+ .pr_ctlinput = div_ctlinput,
+ .pr_ctloutput = ip_ctloutput,
+ .pr_init = div_init,
+#ifdef VIMAGE
+ .pr_destroy = div_destroy,
+#endif
+ .pr_usrreqs = &div_usrreqs
+};
+
+static int
+div_modevent(module_t mod, int type, void *unused)
+{
+ int err = 0;
+#ifndef VIMAGE
+ int n;
+#endif
+
+ switch (type) {
+ case MOD_LOAD:
+ /*
+ * Protocol will be initialized by pf_proto_register().
+ * We don't have to register ip_protox because we are not
+ * a true IP protocol that goes over the wire.
+ */
+ err = pf_proto_register(PF_INET, &div_protosw);
+ if (err != 0)
+ return (err);
+ ip_divert_ptr = divert_packet;
+ ip_divert_event_tag = EVENTHANDLER_REGISTER(maxsockets_change,
+ div_zone_change, NULL, EVENTHANDLER_PRI_ANY);
+ break;
+ case MOD_QUIESCE:
+ /*
+ * IPDIVERT may normally not be unloaded because of the
+ * potential race conditions. Tell kldunload we can't be
+ * unloaded unless the unload is forced.
+ */
+ err = EPERM;
+ break;
+ case MOD_UNLOAD:
+#ifdef VIMAGE
+ err = EPERM;
+ break;
+#else
+ /*
+ * Forced unload.
+ *
+ * Module ipdivert can only be unloaded if no sockets are
+ * connected. Maybe this can be changed later to forcefully
+ * disconnect any open sockets.
+ *
+ * XXXRW: Note that there is a slight race here, as a new
+ * socket open request could be spinning on the lock and then
+ * we destroy the lock.
+ */
+ INP_INFO_WLOCK(&V_divcbinfo);
+ n = V_divcbinfo.ipi_count;
+ if (n != 0) {
+ err = EBUSY;
+ INP_INFO_WUNLOCK(&V_divcbinfo);
+ break;
+ }
+ ip_divert_ptr = NULL;
+ err = pf_proto_unregister(PF_INET, IPPROTO_DIVERT, SOCK_RAW);
+ INP_INFO_WUNLOCK(&V_divcbinfo);
+ div_destroy();
+ EVENTHANDLER_DEREGISTER(maxsockets_change, ip_divert_event_tag);
+ break;
+#endif /* !VIMAGE */
+ default:
+ err = EOPNOTSUPP;
+ break;
+ }
+ return err;
+}
+
+static moduledata_t ipdivertmod = {
+ "ipdivert",
+ div_modevent,
+ 0
+};
+
+DECLARE_MODULE(ipdivert, ipdivertmod, SI_SUB_PROTO_IFATTACHDOMAIN, SI_ORDER_ANY);
+MODULE_DEPEND(ipdivert, ipfw, 2, 2, 2);
+MODULE_VERSION(ipdivert, 1);
diff --git a/rtems/freebsd/netinet/ip_divert.h b/rtems/freebsd/netinet/ip_divert.h
new file mode 100644
index 00000000..eb9b33d4
--- /dev/null
+++ b/rtems/freebsd/netinet/ip_divert.h
@@ -0,0 +1,55 @@
+/*-
+ * Copyright (c) 2003 Sam Leffler, Errno Consulting
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ * similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any
+ * redistribution must be conditioned upon including a substantially
+ * similar Disclaimer requirement for further binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ * of any contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY
+ * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY,
+ * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
+ * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGES.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _NETINET_IP_DIVERT_HH_
+#define _NETINET_IP_DIVERT_HH_
+
+/*
+ * divert has no custom kernel-userland API.
+ *
+ * All communication occurs through a sockaddr_in socket where
+ *
+ * kernel-->userland
+ * sin_port = matching rule, host format;
+ * sin_addr = IN: first address of the incoming interface;
+ * OUT: INADDR_ANY
+ * sin_zero = if fits, the interface name (max 7 bytes + NUL)
+ *
+ * userland->kernel
+ * sin_port = restart-rule - 1, host order
+ * (we restart at sin_port + 1)
+ * sin_addr = IN: address of the incoming interface;
+ * OUT: INADDR_ANY
+ */
+#endif /* _NETINET_IP_DIVERT_HH_ */
diff --git a/rtems/freebsd/netinet/ip_dummynet.h b/rtems/freebsd/netinet/ip_dummynet.h
new file mode 100644
index 00000000..0bbc3263
--- /dev/null
+++ b/rtems/freebsd/netinet/ip_dummynet.h
@@ -0,0 +1,263 @@
+/*-
+ * Copyright (c) 1998-2010 Luigi Rizzo, Universita` di Pisa
+ * Portions Copyright (c) 2000 Akamba Corp.
+ * All rights reserved
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _IP_DUMMYNET_H
+#define _IP_DUMMYNET_H
+
+/*
+ * Definition of the kernel-userland API for dummynet.
+ *
+ * Setsockopt() and getsockopt() pass a batch of objects, each
+ * of them starting with a "struct dn_id" which should fully identify
+ * the object and its relation with others in the sequence.
+ * The first object in each request should have
+ * type= DN_CMD_*, id = DN_API_VERSION.
+ * For other objects, type and subtype specify the object, len indicates
+ * the total length including the header, and 'id' identifies the specific
+ * object.
+ *
+ * Most objects are numbered with an identifier in the range 1..65535.
+ * DN_MAX_ID indicates the first value outside the range.
+ */
+
+#define DN_API_VERSION 12500000
+#define DN_MAX_ID 0x10000
+
+struct dn_id {
+ uint16_t len; /* total obj len including this header */
+ uint8_t type;
+ uint8_t subtype;
+ uint32_t id; /* generic id */
+};
+
+/*
+ * These values are in the type field of struct dn_id.
+ * To preserve the ABI, never rearrange the list or delete
+ * entries with the exception of DN_LAST
+ */
+enum {
+ DN_NONE = 0,
+ DN_LINK = 1,
+ DN_FS,
+ DN_SCH,
+ DN_SCH_I,
+ DN_QUEUE,
+ DN_DELAY_LINE,
+ DN_PROFILE,
+ DN_FLOW, /* struct dn_flow */
+ DN_TEXT, /* opaque text is the object */
+
+ DN_CMD_CONFIG = 0x80, /* objects follow */
+ DN_CMD_DELETE, /* subtype + list of entries */
+ DN_CMD_GET, /* subtype + list of entries */
+ DN_CMD_FLUSH,
+ /* for compatibility with FreeBSD 7.2/8 */
+ DN_COMPAT_PIPE,
+ DN_COMPAT_QUEUE,
+ DN_GET_COMPAT,
+
+ /* special commands for emulation of sysctl variables */
+ DN_SYSCTL_GET,
+ DN_SYSCTL_SET,
+
+ DN_LAST,
+} ;
+
+enum { /* subtype for schedulers, flowset and the like */
+ DN_SCHED_UNKNOWN = 0,
+ DN_SCHED_FIFO = 1,
+ DN_SCHED_WF2QP = 2,
+ /* others are in individual modules */
+} ;
+
+enum { /* user flags */
+ DN_HAVE_MASK = 0x0001, /* fs or sched has a mask */
+ DN_NOERROR = 0x0002, /* do not report errors */
+ DN_QHT_HASH = 0x0004, /* qht is a hash table */
+ DN_QSIZE_BYTES = 0x0008, /* queue size is in bytes */
+ DN_HAS_PROFILE = 0x0010, /* a link has a profile */
+ DN_IS_RED = 0x0020,
+ DN_IS_GENTLE_RED= 0x0040,
+ DN_PIPE_CMD = 0x1000, /* pipe config... */
+};
+
+/*
+ * link template.
+ */
+struct dn_link {
+ struct dn_id oid;
+
+ /*
+ * Userland sets bw and delay in bits/s and milliseconds.
+ * The kernel converts this back and forth to bits/tick and ticks.
+ * XXX what about burst ?
+ */
+ int32_t link_nr;
+ int bandwidth; /* bit/s or bits/tick. */
+ int delay; /* ms and ticks */
+ uint64_t burst; /* scaled. bits*Hz XXX */
+} ;
+
+/*
+ * A flowset, which is a template for flows. Contains parameters
+ * from the command line: id, target scheduler, queue sizes, plr,
+ * flow masks, buckets for the flow hash, and possibly scheduler-
+ * specific parameters (weight, quantum and so on).
+ */
+struct dn_fs {
+ struct dn_id oid;
+ uint32_t fs_nr; /* the flowset number */
+ uint32_t flags; /* userland flags */
+ int qsize ; /* queue size in slots or bytes */
+ int32_t plr; /* PLR, pkt loss rate (2^31-1 means 100%) */
+ uint32_t buckets; /* buckets used for the queue hash table */
+
+ struct ipfw_flow_id flow_mask ;
+ uint32_t sched_nr; /* the scheduler we attach to */
+ /* generic scheduler parameters. Leave them at -1 if unset.
+ * Now we use 0: weight, 1: lmax, 2: priority
+ */
+ int par[4];
+
+ /* RED/GRED parameters.
+ * weight and probabilities are in the range 0..1 represented
+ * in fixed point arithmetic with SCALE_RED decimal bits.
+ */
+#define SCALE_RED 16
+#define SCALE(x) ( (x) << SCALE_RED )
+#define SCALE_VAL(x) ( (x) >> SCALE_RED )
+#define SCALE_MUL(x,y) ( ( (x) * (y) ) >> SCALE_RED )
+ int w_q ; /* queue weight (scaled) */
+ int max_th ; /* maximum threshold for queue (scaled) */
+ int min_th ; /* minimum threshold for queue (scaled) */
+ int max_p ; /* maximum value for p_b (scaled) */
+
+};
+
+/*
+ * dn_flow collects flow_id and stats for queues and scheduler
+ * instances, and is used to pass these info to userland.
+ * oid.type/oid.subtype describe the object, oid.id is number
+ * of the parent object.
+ */
+struct dn_flow {
+ struct dn_id oid;
+ struct ipfw_flow_id fid;
+ uint64_t tot_pkts; /* statistics counters */
+ uint64_t tot_bytes;
+ uint32_t length; /* Queue lenght, in packets */
+ uint32_t len_bytes; /* Queue lenght, in bytes */
+ uint32_t drops;
+};
+
+
+ /*
+ * Scheduler template, mostly indicating the name, number,
+ * sched_mask and buckets.
+ */
+struct dn_sch {
+ struct dn_id oid;
+ uint32_t sched_nr; /* N, scheduler number */
+ uint32_t buckets; /* number of buckets for the instances */
+ uint32_t flags; /* have_mask, ... */
+
+ char name[16]; /* null terminated */
+ /* mask to select the appropriate scheduler instance */
+ struct ipfw_flow_id sched_mask; /* M */
+};
+
+
+/* A delay profile is attached to a link.
+ * Note that a profile, as any other object, cannot be longer than 2^16
+ */
+#define ED_MAX_SAMPLES_NO 1024
+struct dn_profile {
+ struct dn_id oid;
+ /* fields to simulate a delay profile */
+#define ED_MAX_NAME_LEN 32
+ char name[ED_MAX_NAME_LEN];
+ int link_nr;
+ int loss_level;
+ int bandwidth; // XXX use link bandwidth?
+ int samples_no; /* actual length of samples[] */
+ int samples[ED_MAX_SAMPLES_NO]; /* may be shorter */
+};
+
+
+
+/*
+ * Overall structure of dummynet
+
+In dummynet, packets are selected with the firewall rules, and passed
+to two different objects: PIPE or QUEUE (bad name).
+
+A QUEUE defines a classifier, which groups packets into flows
+according to a 'mask', puts them into independent queues (one
+per flow) with configurable size and queue management policy,
+and passes flows to a scheduler:
+
+ (flow_mask|sched_mask) sched_mask
+ +---------+ weight Wx +-------------+
+ | |->-[flow]-->--| |-+
+ -->--| QUEUE x | ... | | |
+ | |->-[flow]-->--| SCHEDuler N | |
+ +---------+ | | |
+ ... | +--[LINK N]-->--
+ +---------+ weight Wy | | +--[LINK N]-->--
+ | |->-[flow]-->--| | |
+ -->--| QUEUE y | ... | | |
+ | |->-[flow]-->--| | |
+ +---------+ +-------------+ |
+ +-------------+
+
+Many QUEUE objects can connect to the same scheduler, each
+QUEUE object can have its own set of parameters.
+
+In turn, the SCHEDuler 'forks' multiple instances according
+to a 'sched_mask', each instance manages its own set of queues
+and transmits on a private instance of a configurable LINK.
+
+A PIPE is a simplified version of the above, where there
+is no flow_mask, and each scheduler instance handles a single queue.
+
+The following data structures (visible from userland) describe
+the objects used by dummynet:
+
+ + dn_link, contains the main configuration parameters related
+ to delay and bandwidth;
+ + dn_profile describes a delay profile;
+ + dn_flow describes the flow status (flow id, statistics)
+
+ + dn_sch describes a scheduler
+ + dn_fs describes a flowset (msk, weight, queue parameters)
+
+ *
+ */
+
+#endif /* _IP_DUMMYNET_H */
diff --git a/rtems/freebsd/netinet/ip_ecn.c b/rtems/freebsd/netinet/ip_ecn.c
new file mode 100644
index 00000000..aa86e9b0
--- /dev/null
+++ b/rtems/freebsd/netinet/ip_ecn.c
@@ -0,0 +1,194 @@
+#include <rtems/freebsd/machine/rtems-bsd-config.h>
+
+/* $KAME: ip_ecn.c,v 1.12 2002/01/07 11:34:47 kjc Exp $ */
+
+/*-
+ * Copyright (C) 1999 WIDE Project.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the project nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ */
+/*
+ * ECN consideration on tunnel ingress/egress operation.
+ * http://www.aciri.org/floyd/papers/draft-ipsec-ecn-00.txt
+ */
+
+#include <rtems/freebsd/sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <rtems/freebsd/local/opt_inet.h>
+#include <rtems/freebsd/local/opt_inet6.h>
+
+#include <rtems/freebsd/sys/param.h>
+#include <rtems/freebsd/sys/systm.h>
+#include <rtems/freebsd/sys/mbuf.h>
+#include <rtems/freebsd/sys/errno.h>
+
+#include <rtems/freebsd/netinet/in.h>
+#include <rtems/freebsd/netinet/in_systm.h>
+#include <rtems/freebsd/netinet/ip.h>
+#ifdef INET6
+#include <rtems/freebsd/netinet/ip6.h>
+#endif
+
+#include <rtems/freebsd/netinet/ip_ecn.h>
+#ifdef INET6
+#include <rtems/freebsd/netinet6/ip6_ecn.h>
+#endif
+
+/*
+ * ECN and TOS (or TCLASS) processing rules at tunnel encapsulation and
+ * decapsulation from RFC3168:
+ *
+ * Outer Hdr at Inner Hdr at
+ * Encapsulator Decapsulator
+ * Header fields: -------------------- ------------
+ * DS Field copied from inner hdr no change
+ * ECN Field constructed by (I) constructed by (E)
+ *
+ * ECN_ALLOWED (full functionality):
+ * (I) if the ECN field in the inner header is set to CE, then set the
+ * ECN field in the outer header to ECT(0).
+ * otherwise, copy the ECN field to the outer header.
+ *
+ * (E) if the ECN field in the outer header is set to CE and the ECN
+ * field of the inner header is not-ECT, drop the packet.
+ * if the ECN field in the inner header is set to ECT(0) or ECT(1)
+ * and the ECN field in the outer header is set to CE, then copy CE to
+ * the inner header. otherwise, make no change to the inner header.
+ *
+ * ECN_FORBIDDEN (limited functionality):
+ * (I) set the ECN field to not-ECT in the outer header.
+ *
+ * (E) if the ECN field in the outer header is set to CE, drop the packet.
+ * otherwise, make no change to the ECN field in the inner header.
+ *
+ * the drop rule is for backward compatibility and protection against
+ * erasure of CE.
+ */
+
+/*
+ * modify outer ECN (TOS) field on ingress operation (tunnel encapsulation).
+ */
+void
+ip_ecn_ingress(int mode, u_int8_t *outer, const u_int8_t *inner)
+{
+
+ if (!outer || !inner)
+ panic("NULL pointer passed to ip_ecn_ingress");
+
+ *outer = *inner;
+ switch (mode) {
+ case ECN_ALLOWED: /* ECN allowed */
+ /*
+ * full-functionality: if the inner is CE, set ECT(0)
+ * to the outer. otherwise, copy the ECN field.
+ */
+ if ((*inner & IPTOS_ECN_MASK) == IPTOS_ECN_CE)
+ *outer &= ~IPTOS_ECN_ECT1;
+ break;
+ case ECN_FORBIDDEN: /* ECN forbidden */
+ /*
+ * limited-functionality: set not-ECT to the outer
+ */
+ *outer &= ~IPTOS_ECN_MASK;
+ break;
+ case ECN_NOCARE: /* no consideration to ECN */
+ break;
+ }
+}
+
+/*
+ * modify inner ECN (TOS) field on egress operation (tunnel decapsulation).
+ * the caller should drop the packet if the return value is 0.
+ */
+int
+ip_ecn_egress(int mode, const u_int8_t *outer, u_int8_t *inner)
+{
+
+ if (!outer || !inner)
+ panic("NULL pointer passed to ip_ecn_egress");
+
+ switch (mode) {
+ case ECN_ALLOWED:
+ /*
+ * full-functionality: if the outer is CE and the inner is
+ * not-ECT, should drop it. otherwise, copy CE.
+ */
+ if ((*outer & IPTOS_ECN_MASK) == IPTOS_ECN_CE) {
+ if ((*inner & IPTOS_ECN_MASK) == IPTOS_ECN_NOTECT)
+ return (0);
+ *inner |= IPTOS_ECN_CE;
+ }
+ break;
+ case ECN_FORBIDDEN: /* ECN forbidden */
+ /*
+ * limited-functionality: if the outer is CE, should drop it.
+ * otherwise, leave the inner.
+ */
+ if ((*outer & IPTOS_ECN_MASK) == IPTOS_ECN_CE)
+ return (0);
+ break;
+ case ECN_NOCARE: /* no consideration to ECN */
+ break;
+ }
+ return (1);
+}
+
+#ifdef INET6
+void
+ip6_ecn_ingress(int mode, u_int32_t *outer, const u_int32_t *inner)
+{
+ u_int8_t outer8, inner8;
+
+ if (!outer || !inner)
+ panic("NULL pointer passed to ip6_ecn_ingress");
+
+ inner8 = (ntohl(*inner) >> 20) & 0xff;
+ ip_ecn_ingress(mode, &outer8, &inner8);
+ *outer &= ~htonl(0xff << 20);
+ *outer |= htonl((u_int32_t)outer8 << 20);
+}
+
+int
+ip6_ecn_egress(int mode, const u_int32_t *outer, u_int32_t *inner)
+{
+ u_int8_t outer8, inner8, oinner8;
+
+ if (!outer || !inner)
+ panic("NULL pointer passed to ip6_ecn_egress");
+
+ outer8 = (ntohl(*outer) >> 20) & 0xff;
+ inner8 = oinner8 = (ntohl(*inner) >> 20) & 0xff;
+ if (ip_ecn_egress(mode, &outer8, &inner8) == 0)
+ return (0);
+ if (inner8 != oinner8) {
+ *inner &= ~htonl(0xff << 20);
+ *inner |= htonl((u_int32_t)inner8 << 20);
+ }
+ return (1);
+}
+#endif
diff --git a/rtems/freebsd/netinet/ip_ecn.h b/rtems/freebsd/netinet/ip_ecn.h
new file mode 100644
index 00000000..591df2ef
--- /dev/null
+++ b/rtems/freebsd/netinet/ip_ecn.h
@@ -0,0 +1,53 @@
+/* $FreeBSD$ */
+/* $KAME: ip_ecn.h,v 1.8 2002/01/07 11:34:47 kjc Exp $ */
+
+/*-
+ * Copyright (C) 1999 WIDE Project.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the project nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ */
+/*
+ * ECN consideration on tunnel ingress/egress operation.
+ * http://www.aciri.org/floyd/papers/draft-ipsec-ecn-00.txt
+ */
+
+#ifndef _NETINET_IP_ECN_HH_
+#define _NETINET_IP_ECN_HH_
+
+#if defined(_KERNEL) && !defined(_LKM)
+#include <rtems/freebsd/local/opt_inet.h>
+#endif
+
+#define ECN_ALLOWED 1 /* ECN allowed */
+#define ECN_FORBIDDEN 0 /* ECN forbidden */
+#define ECN_NOCARE (-1) /* no consideration to ECN */
+
+#ifdef _KERNEL
+extern void ip_ecn_ingress(int, u_int8_t *, const u_int8_t *);
+extern int ip_ecn_egress(int, const u_int8_t *, u_int8_t *);
+#endif
+#endif
diff --git a/rtems/freebsd/netinet/ip_encap.c b/rtems/freebsd/netinet/ip_encap.c
new file mode 100644
index 00000000..cbef5df1
--- /dev/null
+++ b/rtems/freebsd/netinet/ip_encap.c
@@ -0,0 +1,465 @@
+#include <rtems/freebsd/machine/rtems-bsd-config.h>
+
+/* $KAME: ip_encap.c,v 1.41 2001/03/15 08:35:08 itojun Exp $ */
+
+/*-
+ * Copyright (C) 1995, 1996, 1997, and 1998 WIDE Project.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the project nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+/*
+ * My grandfather said that there's a devil inside tunnelling technology...
+ *
+ * We have surprisingly many protocols that want packets with IP protocol
+ * #4 or #41. Here's a list of protocols that want protocol #41:
+ * RFC1933 configured tunnel
+ * RFC1933 automatic tunnel
+ * RFC2401 IPsec tunnel
+ * RFC2473 IPv6 generic packet tunnelling
+ * RFC2529 6over4 tunnel
+ * mobile-ip6 (uses RFC2473)
+ * RFC3056 6to4 tunnel
+ * isatap tunnel
+ * Here's a list of protocol that want protocol #4:
+ * RFC1853 IPv4-in-IPv4 tunnelling
+ * RFC2003 IPv4 encapsulation within IPv4
+ * RFC2344 reverse tunnelling for mobile-ip4
+ * RFC2401 IPsec tunnel
+ * Well, what can I say. They impose different en/decapsulation mechanism
+ * from each other, so they need separate protocol handler. The only one
+ * we can easily determine by protocol # is IPsec, which always has
+ * AH/ESP/IPComp header right after outer IP header.
+ *
+ * So, clearly good old protosw does not work for protocol #4 and #41.
+ * The code will let you match protocol via src/dst address pair.
+ */
+/* XXX is M_NETADDR correct? */
+
+#include <rtems/freebsd/sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <rtems/freebsd/local/opt_mrouting.h>
+#include <rtems/freebsd/local/opt_inet.h>
+#include <rtems/freebsd/local/opt_inet6.h>
+
+#include <rtems/freebsd/sys/param.h>
+#include <rtems/freebsd/sys/systm.h>
+#include <rtems/freebsd/sys/socket.h>
+#include <rtems/freebsd/sys/sockio.h>
+#include <rtems/freebsd/sys/mbuf.h>
+#include <rtems/freebsd/sys/errno.h>
+#include <rtems/freebsd/sys/protosw.h>
+#include <rtems/freebsd/sys/queue.h>
+
+#include <rtems/freebsd/net/if.h>
+#include <rtems/freebsd/net/route.h>
+
+#include <rtems/freebsd/netinet/in.h>
+#include <rtems/freebsd/netinet/in_systm.h>
+#include <rtems/freebsd/netinet/ip.h>
+#include <rtems/freebsd/netinet/ip_var.h>
+#include <rtems/freebsd/netinet/ip_encap.h>
+
+#ifdef INET6
+#include <rtems/freebsd/netinet/ip6.h>
+#include <rtems/freebsd/netinet6/ip6_var.h>
+#include <rtems/freebsd/netinet6/ip6protosw.h>
+#endif
+
+#include <rtems/freebsd/machine/stdarg.h>
+
+#include <rtems/freebsd/sys/kernel.h>
+#include <rtems/freebsd/sys/malloc.h>
+static MALLOC_DEFINE(M_NETADDR, "encap_export_host", "Export host address structure");
+
+static void encap_add(struct encaptab *);
+static int mask_match(const struct encaptab *, const struct sockaddr *,
+ const struct sockaddr *);
+static void encap_fillarg(struct mbuf *, const struct encaptab *);
+
+/*
+ * All global variables in ip_encap.c are locked using encapmtx.
+ */
+static struct mtx encapmtx;
+MTX_SYSINIT(encapmtx, &encapmtx, "encapmtx", MTX_DEF);
+LIST_HEAD(, encaptab) encaptab = LIST_HEAD_INITIALIZER(encaptab);
+
+/*
+ * We currently keey encap_init() for source code compatibility reasons --
+ * it's referenced by KAME pieces in netinet6.
+ */
+void
+encap_init(void)
+{
+}
+
+#ifdef INET
+void
+encap4_input(struct mbuf *m, int off)
+{
+ struct ip *ip;
+ int proto;
+ struct sockaddr_in s, d;
+ const struct protosw *psw;
+ struct encaptab *ep, *match;
+ int prio, matchprio;
+
+ ip = mtod(m, struct ip *);
+ proto = ip->ip_p;
+
+ bzero(&s, sizeof(s));
+ s.sin_family = AF_INET;
+ s.sin_len = sizeof(struct sockaddr_in);
+ s.sin_addr = ip->ip_src;
+ bzero(&d, sizeof(d));
+ d.sin_family = AF_INET;
+ d.sin_len = sizeof(struct sockaddr_in);
+ d.sin_addr = ip->ip_dst;
+
+ match = NULL;
+ matchprio = 0;
+ mtx_lock(&encapmtx);
+ LIST_FOREACH(ep, &encaptab, chain) {
+ if (ep->af != AF_INET)
+ continue;
+ if (ep->proto >= 0 && ep->proto != proto)
+ continue;
+ if (ep->func)
+ prio = (*ep->func)(m, off, proto, ep->arg);
+ else {
+ /*
+ * it's inbound traffic, we need to match in reverse
+ * order
+ */
+ prio = mask_match(ep, (struct sockaddr *)&d,
+ (struct sockaddr *)&s);
+ }
+
+ /*
+ * We prioritize the matches by using bit length of the
+ * matches. mask_match() and user-supplied matching function
+ * should return the bit length of the matches (for example,
+ * if both src/dst are matched for IPv4, 64 should be returned).
+ * 0 or negative return value means "it did not match".
+ *
+ * The question is, since we have two "mask" portion, we
+ * cannot really define total order between entries.
+ * For example, which of these should be preferred?
+ * mask_match() returns 48 (32 + 16) for both of them.
+ * src=3ffe::/16, dst=3ffe:501::/32
+ * src=3ffe:501::/32, dst=3ffe::/16
+ *
+ * We need to loop through all the possible candidates
+ * to get the best match - the search takes O(n) for
+ * n attachments (i.e. interfaces).
+ */
+ if (prio <= 0)
+ continue;
+ if (prio > matchprio) {
+ matchprio = prio;
+ match = ep;
+ }
+ }
+ mtx_unlock(&encapmtx);
+
+ if (match) {
+ /* found a match, "match" has the best one */
+ psw = match->psw;
+ if (psw && psw->pr_input) {
+ encap_fillarg(m, match);
+ (*psw->pr_input)(m, off);
+ } else
+ m_freem(m);
+ return;
+ }
+
+ /* last resort: inject to raw socket */
+ rip_input(m, off);
+}
+#endif
+
+#ifdef INET6
+int
+encap6_input(struct mbuf **mp, int *offp, int proto)
+{
+ struct mbuf *m = *mp;
+ struct ip6_hdr *ip6;
+ struct sockaddr_in6 s, d;
+ const struct ip6protosw *psw;
+ struct encaptab *ep, *match;
+ int prio, matchprio;
+
+ ip6 = mtod(m, struct ip6_hdr *);
+
+ bzero(&s, sizeof(s));
+ s.sin6_family = AF_INET6;
+ s.sin6_len = sizeof(struct sockaddr_in6);
+ s.sin6_addr = ip6->ip6_src;
+ bzero(&d, sizeof(d));
+ d.sin6_family = AF_INET6;
+ d.sin6_len = sizeof(struct sockaddr_in6);
+ d.sin6_addr = ip6->ip6_dst;
+
+ match = NULL;
+ matchprio = 0;
+ mtx_lock(&encapmtx);
+ LIST_FOREACH(ep, &encaptab, chain) {
+ if (ep->af != AF_INET6)
+ continue;
+ if (ep->proto >= 0 && ep->proto != proto)
+ continue;
+ if (ep->func)
+ prio = (*ep->func)(m, *offp, proto, ep->arg);
+ else {
+ /*
+ * it's inbound traffic, we need to match in reverse
+ * order
+ */
+ prio = mask_match(ep, (struct sockaddr *)&d,
+ (struct sockaddr *)&s);
+ }
+
+ /* see encap4_input() for issues here */
+ if (prio <= 0)
+ continue;
+ if (prio > matchprio) {
+ matchprio = prio;
+ match = ep;
+ }
+ }
+ mtx_unlock(&encapmtx);
+
+ if (match) {
+ /* found a match */
+ psw = (const struct ip6protosw *)match->psw;
+ if (psw && psw->pr_input) {
+ encap_fillarg(m, match);
+ return (*psw->pr_input)(mp, offp, proto);
+ } else {
+ m_freem(m);
+ return IPPROTO_DONE;
+ }
+ }
+
+ /* last resort: inject to raw socket */
+ return rip6_input(mp, offp, proto);
+}
+#endif
+
+/*lint -sem(encap_add, custodial(1)) */
+static void
+encap_add(struct encaptab *ep)
+{
+
+ mtx_assert(&encapmtx, MA_OWNED);
+ LIST_INSERT_HEAD(&encaptab, ep, chain);
+}
+
+/*
+ * sp (src ptr) is always my side, and dp (dst ptr) is always remote side.
+ * length of mask (sm and dm) is assumed to be same as sp/dp.
+ * Return value will be necessary as input (cookie) for encap_detach().
+ */
+const struct encaptab *
+encap_attach(int af, int proto, const struct sockaddr *sp,
+ const struct sockaddr *sm, const struct sockaddr *dp,
+ const struct sockaddr *dm, const struct protosw *psw, void *arg)
+{
+ struct encaptab *ep;
+
+ /* sanity check on args */
+ if (sp->sa_len > sizeof(ep->src) || dp->sa_len > sizeof(ep->dst))
+ return (NULL);
+ if (sp->sa_len != dp->sa_len)
+ return (NULL);
+ if (af != sp->sa_family || af != dp->sa_family)
+ return (NULL);
+
+ /* check if anyone have already attached with exactly same config */
+ mtx_lock(&encapmtx);
+ LIST_FOREACH(ep, &encaptab, chain) {
+ if (ep->af != af)
+ continue;
+ if (ep->proto != proto)
+ continue;
+ if (ep->src.ss_len != sp->sa_len ||
+ bcmp(&ep->src, sp, sp->sa_len) != 0 ||
+ bcmp(&ep->srcmask, sm, sp->sa_len) != 0)
+ continue;
+ if (ep->dst.ss_len != dp->sa_len ||
+ bcmp(&ep->dst, dp, dp->sa_len) != 0 ||
+ bcmp(&ep->dstmask, dm, dp->sa_len) != 0)
+ continue;
+
+ mtx_unlock(&encapmtx);
+ return (NULL);
+ }
+
+ ep = malloc(sizeof(*ep), M_NETADDR, M_NOWAIT); /*XXX*/
+ if (ep == NULL) {
+ mtx_unlock(&encapmtx);
+ return (NULL);
+ }
+ bzero(ep, sizeof(*ep));
+
+ ep->af = af;
+ ep->proto = proto;
+ bcopy(sp, &ep->src, sp->sa_len);
+ bcopy(sm, &ep->srcmask, sp->sa_len);
+ bcopy(dp, &ep->dst, dp->sa_len);
+ bcopy(dm, &ep->dstmask, dp->sa_len);
+ ep->psw = psw;
+ ep->arg = arg;
+
+ encap_add(ep);
+ mtx_unlock(&encapmtx);
+ return (ep);
+}
+
+const struct encaptab *
+encap_attach_func(int af, int proto,
+ int (*func)(const struct mbuf *, int, int, void *),
+ const struct protosw *psw, void *arg)
+{
+ struct encaptab *ep;
+
+ /* sanity check on args */
+ if (!func)
+ return (NULL);
+
+ ep = malloc(sizeof(*ep), M_NETADDR, M_NOWAIT); /*XXX*/
+ if (ep == NULL)
+ return (NULL);
+ bzero(ep, sizeof(*ep));
+
+ ep->af = af;
+ ep->proto = proto;
+ ep->func = func;
+ ep->psw = psw;
+ ep->arg = arg;
+
+ mtx_lock(&encapmtx);
+ encap_add(ep);
+ mtx_unlock(&encapmtx);
+ return (ep);
+}
+
+int
+encap_detach(const struct encaptab *cookie)
+{
+ const struct encaptab *ep = cookie;
+ struct encaptab *p;
+
+ mtx_lock(&encapmtx);
+ LIST_FOREACH(p, &encaptab, chain) {
+ if (p == ep) {
+ LIST_REMOVE(p, chain);
+ mtx_unlock(&encapmtx);
+ free(p, M_NETADDR); /*XXX*/
+ return 0;
+ }
+ }
+ mtx_unlock(&encapmtx);
+
+ return EINVAL;
+}
+
+static int
+mask_match(const struct encaptab *ep, const struct sockaddr *sp,
+ const struct sockaddr *dp)
+{
+ struct sockaddr_storage s;
+ struct sockaddr_storage d;
+ int i;
+ const u_int8_t *p, *q;
+ u_int8_t *r;
+ int matchlen;
+
+ if (sp->sa_len > sizeof(s) || dp->sa_len > sizeof(d))
+ return 0;
+ if (sp->sa_family != ep->af || dp->sa_family != ep->af)
+ return 0;
+ if (sp->sa_len != ep->src.ss_len || dp->sa_len != ep->dst.ss_len)
+ return 0;
+
+ matchlen = 0;
+
+ p = (const u_int8_t *)sp;
+ q = (const u_int8_t *)&ep->srcmask;
+ r = (u_int8_t *)&s;
+ for (i = 0 ; i < sp->sa_len; i++) {
+ r[i] = p[i] & q[i];
+ /* XXX estimate */
+ matchlen += (q[i] ? 8 : 0);
+ }
+
+ p = (const u_int8_t *)dp;
+ q = (const u_int8_t *)&ep->dstmask;
+ r = (u_int8_t *)&d;
+ for (i = 0 ; i < dp->sa_len; i++) {
+ r[i] = p[i] & q[i];
+ /* XXX rough estimate */
+ matchlen += (q[i] ? 8 : 0);
+ }
+
+ /* need to overwrite len/family portion as we don't compare them */
+ s.ss_len = sp->sa_len;
+ s.ss_family = sp->sa_family;
+ d.ss_len = dp->sa_len;
+ d.ss_family = dp->sa_family;
+
+ if (bcmp(&s, &ep->src, ep->src.ss_len) == 0 &&
+ bcmp(&d, &ep->dst, ep->dst.ss_len) == 0) {
+ return matchlen;
+ } else
+ return 0;
+}
+
+static void
+encap_fillarg(struct mbuf *m, const struct encaptab *ep)
+{
+ struct m_tag *tag;
+
+ tag = m_tag_get(PACKET_TAG_ENCAP, sizeof (void*), M_NOWAIT);
+ if (tag) {
+ *(void**)(tag+1) = ep->arg;
+ m_tag_prepend(m, tag);
+ }
+}
+
+void *
+encap_getarg(struct mbuf *m)
+{
+ void *p = NULL;
+ struct m_tag *tag;
+
+ tag = m_tag_find(m, PACKET_TAG_ENCAP, NULL);
+ if (tag) {
+ p = *(void**)(tag+1);
+ m_tag_delete(m, tag);
+ }
+ return p;
+}
diff --git a/rtems/freebsd/netinet/ip_encap.h b/rtems/freebsd/netinet/ip_encap.h
new file mode 100644
index 00000000..44dd1a0d
--- /dev/null
+++ b/rtems/freebsd/netinet/ip_encap.h
@@ -0,0 +1,64 @@
+/* $FreeBSD$ */
+/* $KAME: ip_encap.h,v 1.7 2000/03/25 07:23:37 sumikawa Exp $ */
+
+/*-
+ * Copyright (C) 1995, 1996, 1997, and 1998 WIDE Project.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the project nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifndef _NETINET_IP_ENCAP_HH_
+#define _NETINET_IP_ENCAP_HH_
+
+#ifdef _KERNEL
+
+struct encaptab {
+ LIST_ENTRY(encaptab) chain;
+ int af;
+ int proto; /* -1: don't care, I'll check myself */
+ struct sockaddr_storage src; /* my addr */
+ struct sockaddr_storage srcmask;
+ struct sockaddr_storage dst; /* remote addr */
+ struct sockaddr_storage dstmask;
+ int (*func)(const struct mbuf *, int, int, void *);
+ const struct protosw *psw; /* only pr_input will be used */
+ void *arg; /* passed via m->m_pkthdr.aux */
+};
+
+void encap_init(void);
+void encap4_input(struct mbuf *, int);
+int encap6_input(struct mbuf **, int *, int);
+const struct encaptab *encap_attach(int, int, const struct sockaddr *,
+ const struct sockaddr *, const struct sockaddr *,
+ const struct sockaddr *, const struct protosw *, void *);
+const struct encaptab *encap_attach_func(int, int,
+ int (*)(const struct mbuf *, int, int, void *),
+ const struct protosw *, void *);
+int encap_detach(const struct encaptab *);
+void *encap_getarg(struct mbuf *);
+#endif
+
+#endif /*_NETINET_IP_ENCAP_HH_*/
diff --git a/rtems/freebsd/netinet/ip_fastfwd.c b/rtems/freebsd/netinet/ip_fastfwd.c
new file mode 100644
index 00000000..83a86fc6
--- /dev/null
+++ b/rtems/freebsd/netinet/ip_fastfwd.c
@@ -0,0 +1,619 @@
+#include <rtems/freebsd/machine/rtems-bsd-config.h>
+
+/*-
+ * Copyright (c) 2003 Andre Oppermann, Internet Business Solutions AG
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote
+ * products derived from this software without specific prior written
+ * permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+/*
+ * ip_fastforward gets its speed from processing the forwarded packet to
+ * completion (if_output on the other side) without any queues or netisr's.
+ * The receiving interface DMAs the packet into memory, the upper half of
+ * driver calls ip_fastforward, we do our routing table lookup and directly
+ * send it off to the outgoing interface, which DMAs the packet to the
+ * network card. The only part of the packet we touch with the CPU is the
+ * IP header (unless there are complex firewall rules touching other parts
+ * of the packet, but that is up to you). We are essentially limited by bus
+ * bandwidth and how fast the network card/driver can set up receives and
+ * transmits.
+ *
+ * We handle basic errors, IP header errors, checksum errors,
+ * destination unreachable, fragmentation and fragmentation needed and
+ * report them via ICMP to the sender.
+ *
+ * Else if something is not pure IPv4 unicast forwarding we fall back to
+ * the normal ip_input processing path. We should only be called from
+ * interfaces connected to the outside world.
+ *
+ * Firewalling is fully supported including divert, ipfw fwd and ipfilter
+ * ipnat and address rewrite.
+ *
+ * IPSEC is not supported if this host is a tunnel broker. IPSEC is
+ * supported for connections to/from local host.
+ *
+ * We try to do the least expensive (in CPU ops) checks and operations
+ * first to catch junk with as little overhead as possible.
+ *
+ * We take full advantage of hardware support for IP checksum and
+ * fragmentation offloading.
+ *
+ * We don't do ICMP redirect in the fast forwarding path. I have had my own
+ * cases where two core routers with Zebra routing suite would send millions
+ * ICMP redirects to connected hosts if the destination router was not the
+ * default gateway. In one case it was filling the routing table of a host
+ * with approximately 300.000 cloned redirect entries until it ran out of
+ * kernel memory. However the networking code proved very robust and it didn't
+ * crash or fail in other ways.
+ */
+
+/*
+ * Many thanks to Matt Thomas of NetBSD for basic structure of ip_flow.c which
+ * is being followed here.
+ */
+
+#include <rtems/freebsd/sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <rtems/freebsd/local/opt_ipfw.h>
+#include <rtems/freebsd/local/opt_ipstealth.h>
+
+#include <rtems/freebsd/sys/param.h>
+#include <rtems/freebsd/sys/systm.h>
+#include <rtems/freebsd/sys/kernel.h>
+#include <rtems/freebsd/sys/malloc.h>
+#include <rtems/freebsd/sys/mbuf.h>
+#include <rtems/freebsd/sys/protosw.h>
+#include <rtems/freebsd/sys/socket.h>
+#include <rtems/freebsd/sys/sysctl.h>
+
+#include <rtems/freebsd/net/pfil.h>
+#include <rtems/freebsd/net/if.h>
+#include <rtems/freebsd/net/if_types.h>
+#include <rtems/freebsd/net/if_var.h>
+#include <rtems/freebsd/net/if_dl.h>
+#include <rtems/freebsd/net/route.h>
+#include <rtems/freebsd/net/vnet.h>
+
+#include <rtems/freebsd/netinet/in.h>
+#include <rtems/freebsd/netinet/in_systm.h>
+#include <rtems/freebsd/netinet/in_var.h>
+#include <rtems/freebsd/netinet/ip.h>
+#include <rtems/freebsd/netinet/ip_var.h>
+#include <rtems/freebsd/netinet/ip_icmp.h>
+#include <rtems/freebsd/netinet/ip_options.h>
+
+#include <rtems/freebsd/machine/in_cksum.h>
+
+static VNET_DEFINE(int, ipfastforward_active);
+#define V_ipfastforward_active VNET(ipfastforward_active)
+
+SYSCTL_VNET_INT(_net_inet_ip, OID_AUTO, fastforwarding, CTLFLAG_RW,
+ &VNET_NAME(ipfastforward_active), 0, "Enable fast IP forwarding");
+
+static struct sockaddr_in *
+ip_findroute(struct route *ro, struct in_addr dest, struct mbuf *m)
+{
+ struct sockaddr_in *dst;
+ struct rtentry *rt;
+
+ /*
+ * Find route to destination.
+ */
+ bzero(ro, sizeof(*ro));
+ dst = (struct sockaddr_in *)&ro->ro_dst;
+ dst->sin_family = AF_INET;
+ dst->sin_len = sizeof(*dst);
+ dst->sin_addr.s_addr = dest.s_addr;
+ in_rtalloc_ign(ro, 0, M_GETFIB(m));
+
+ /*
+ * Route there and interface still up?
+ */
+ rt = ro->ro_rt;
+ if (rt && (rt->rt_flags & RTF_UP) &&
+ (rt->rt_ifp->if_flags & IFF_UP) &&
+ (rt->rt_ifp->if_drv_flags & IFF_DRV_RUNNING)) {
+ if (rt->rt_flags & RTF_GATEWAY)
+ dst = (struct sockaddr_in *)rt->rt_gateway;
+ } else {
+ IPSTAT_INC(ips_noroute);
+ IPSTAT_INC(ips_cantforward);
+ if (rt)
+ RTFREE(rt);
+ icmp_error(m, ICMP_UNREACH, ICMP_UNREACH_HOST, 0, 0);
+ return NULL;
+ }
+ return dst;
+}
+
+/*
+ * Try to forward a packet based on the destination address.
+ * This is a fast path optimized for the plain forwarding case.
+ * If the packet is handled (and consumed) here then we return 1;
+ * otherwise 0 is returned and the packet should be delivered
+ * to ip_input for full processing.
+ */
+struct mbuf *
+ip_fastforward(struct mbuf *m)
+{
+ struct ip *ip;
+ struct mbuf *m0 = NULL;
+ struct route ro;
+ struct sockaddr_in *dst = NULL;
+ struct ifnet *ifp;
+ struct in_addr odest, dest;
+ u_short sum, ip_len;
+ int error = 0;
+ int hlen, mtu;
+#ifdef IPFIREWALL_FORWARD
+ struct m_tag *fwd_tag;
+#endif
+
+ /*
+ * Are we active and forwarding packets?
+ */
+ if (!V_ipfastforward_active || !V_ipforwarding)
+ return m;
+
+ M_ASSERTVALID(m);
+ M_ASSERTPKTHDR(m);
+
+ bzero(&ro, sizeof(ro));
+
+ /*
+ * Step 1: check for packet drop conditions (and sanity checks)
+ */
+
+ /*
+ * Is entire packet big enough?
+ */
+ if (m->m_pkthdr.len < sizeof(struct ip)) {
+ IPSTAT_INC(ips_tooshort);
+ goto drop;
+ }
+
+ /*
+ * Is first mbuf large enough for ip header and is header present?
+ */
+ if (m->m_len < sizeof (struct ip) &&
+ (m = m_pullup(m, sizeof (struct ip))) == NULL) {
+ IPSTAT_INC(ips_toosmall);
+ return NULL; /* mbuf already free'd */
+ }
+
+ ip = mtod(m, struct ip *);
+
+ /*
+ * Is it IPv4?
+ */
+ if (ip->ip_v != IPVERSION) {
+ IPSTAT_INC(ips_badvers);
+ goto drop;
+ }
+
+ /*
+ * Is IP header length correct and is it in first mbuf?
+ */
+ hlen = ip->ip_hl << 2;
+ if (hlen < sizeof(struct ip)) { /* minimum header length */
+ IPSTAT_INC(ips_badhlen);
+ goto drop;
+ }
+ if (hlen > m->m_len) {
+ if ((m = m_pullup(m, hlen)) == NULL) {
+ IPSTAT_INC(ips_badhlen);
+ return NULL; /* mbuf already free'd */
+ }
+ ip = mtod(m, struct ip *);
+ }
+
+ /*
+ * Checksum correct?
+ */
+ if (m->m_pkthdr.csum_flags & CSUM_IP_CHECKED)
+ sum = !(m->m_pkthdr.csum_flags & CSUM_IP_VALID);
+ else {
+ if (hlen == sizeof(struct ip))
+ sum = in_cksum_hdr(ip);
+ else
+ sum = in_cksum(m, hlen);
+ }
+ if (sum) {
+ IPSTAT_INC(ips_badsum);
+ goto drop;
+ }
+
+ /*
+ * Remember that we have checked the IP header and found it valid.
+ */
+ m->m_pkthdr.csum_flags |= (CSUM_IP_CHECKED | CSUM_IP_VALID);
+
+ ip_len = ntohs(ip->ip_len);
+
+ /*
+ * Is IP length longer than packet we have got?
+ */
+ if (m->m_pkthdr.len < ip_len) {
+ IPSTAT_INC(ips_tooshort);
+ goto drop;
+ }
+
+ /*
+ * Is packet longer than IP header tells us? If yes, truncate packet.
+ */
+ if (m->m_pkthdr.len > ip_len) {
+ if (m->m_len == m->m_pkthdr.len) {
+ m->m_len = ip_len;
+ m->m_pkthdr.len = ip_len;
+ } else
+ m_adj(m, ip_len - m->m_pkthdr.len);
+ }
+
+ /*
+ * Is packet from or to 127/8?
+ */
+ if ((ntohl(ip->ip_dst.s_addr) >> IN_CLASSA_NSHIFT) == IN_LOOPBACKNET ||
+ (ntohl(ip->ip_src.s_addr) >> IN_CLASSA_NSHIFT) == IN_LOOPBACKNET) {
+ IPSTAT_INC(ips_badaddr);
+ goto drop;
+ }
+
+#ifdef ALTQ
+ /*
+ * Is packet dropped by traffic conditioner?
+ */
+ if (altq_input != NULL && (*altq_input)(m, AF_INET) == 0)
+ goto drop;
+#endif
+
+ /*
+ * Step 2: fallback conditions to normal ip_input path processing
+ */
+
+ /*
+ * Only IP packets without options
+ */
+ if (ip->ip_hl != (sizeof(struct ip) >> 2)) {
+ if (ip_doopts == 1)
+ return m;
+ else if (ip_doopts == 2) {
+ icmp_error(m, ICMP_UNREACH, ICMP_UNREACH_FILTER_PROHIB,
+ 0, 0);
+ return NULL; /* mbuf already free'd */
+ }
+ /* else ignore IP options and continue */
+ }
+
+ /*
+ * Only unicast IP, not from loopback, no L2 or IP broadcast,
+ * no multicast, no INADDR_ANY
+ *
+ * XXX: Probably some of these checks could be direct drop
+ * conditions. However it is not clear whether there are some
+ * hacks or obscure behaviours which make it neccessary to
+ * let ip_input handle it. We play safe here and let ip_input
+ * deal with it until it is proven that we can directly drop it.
+ */
+ if ((m->m_flags & (M_BCAST|M_MCAST)) ||
+ (m->m_pkthdr.rcvif->if_flags & IFF_LOOPBACK) ||
+ ntohl(ip->ip_src.s_addr) == (u_long)INADDR_BROADCAST ||
+ ntohl(ip->ip_dst.s_addr) == (u_long)INADDR_BROADCAST ||
+ IN_MULTICAST(ntohl(ip->ip_src.s_addr)) ||
+ IN_MULTICAST(ntohl(ip->ip_dst.s_addr)) ||
+ IN_LINKLOCAL(ntohl(ip->ip_src.s_addr)) ||
+ IN_LINKLOCAL(ntohl(ip->ip_dst.s_addr)) ||
+ ip->ip_src.s_addr == INADDR_ANY ||
+ ip->ip_dst.s_addr == INADDR_ANY )
+ return m;
+
+ /*
+ * Is it for a local address on this host?
+ */
+ if (in_localip(ip->ip_dst))
+ return m;
+
+ IPSTAT_INC(ips_total);
+
+ /*
+ * Step 3: incoming packet firewall processing
+ */
+
+ /*
+ * Convert to host representation
+ */
+ ip->ip_len = ntohs(ip->ip_len);
+ ip->ip_off = ntohs(ip->ip_off);
+
+ odest.s_addr = dest.s_addr = ip->ip_dst.s_addr;
+
+ /*
+ * Run through list of ipfilter hooks for input packets
+ */
+ if (!PFIL_HOOKED(&V_inet_pfil_hook))
+ goto passin;
+
+ if (pfil_run_hooks(
+ &V_inet_pfil_hook, &m, m->m_pkthdr.rcvif, PFIL_IN, NULL) ||
+ m == NULL)
+ goto drop;
+
+ M_ASSERTVALID(m);
+ M_ASSERTPKTHDR(m);
+
+ ip = mtod(m, struct ip *); /* m may have changed by pfil hook */
+ dest.s_addr = ip->ip_dst.s_addr;
+
+ /*
+ * Destination address changed?
+ */
+ if (odest.s_addr != dest.s_addr) {
+ /*
+ * Is it now for a local address on this host?
+ */
+ if (in_localip(dest))
+ goto forwardlocal;
+ /*
+ * Go on with new destination address
+ */
+ }
+#ifdef IPFIREWALL_FORWARD
+ if (m->m_flags & M_FASTFWD_OURS) {
+ /*
+ * ipfw changed it for a local address on this host.
+ */
+ goto forwardlocal;
+ }
+#endif /* IPFIREWALL_FORWARD */
+
+passin:
+ /*
+ * Step 4: decrement TTL and look up route
+ */
+
+ /*
+ * Check TTL
+ */
+#ifdef IPSTEALTH
+ if (!V_ipstealth) {
+#endif
+ if (ip->ip_ttl <= IPTTLDEC) {
+ icmp_error(m, ICMP_TIMXCEED, ICMP_TIMXCEED_INTRANS, 0, 0);
+ return NULL; /* mbuf already free'd */
+ }
+
+ /*
+ * Decrement the TTL and incrementally change the IP header checksum.
+ * Don't bother doing this with hw checksum offloading, it's faster
+ * doing it right here.
+ */
+ ip->ip_ttl -= IPTTLDEC;
+ if (ip->ip_sum >= (u_int16_t) ~htons(IPTTLDEC << 8))
+ ip->ip_sum -= ~htons(IPTTLDEC << 8);
+ else
+ ip->ip_sum += htons(IPTTLDEC << 8);
+#ifdef IPSTEALTH
+ }
+#endif
+
+ /*
+ * Find route to destination.
+ */
+ if ((dst = ip_findroute(&ro, dest, m)) == NULL)
+ return NULL; /* icmp unreach already sent */
+ ifp = ro.ro_rt->rt_ifp;
+
+ /*
+ * Immediately drop blackholed traffic, and directed broadcasts
+ * for either the all-ones or all-zero subnet addresses on
+ * locally attached networks.
+ */
+ if ((ro.ro_rt->rt_flags & (RTF_BLACKHOLE|RTF_BROADCAST)) != 0)
+ goto drop;
+
+ /*
+ * Step 5: outgoing firewall packet processing
+ */
+
+ /*
+ * Run through list of hooks for output packets.
+ */
+ if (!PFIL_HOOKED(&V_inet_pfil_hook))
+ goto passout;
+
+ if (pfil_run_hooks(&V_inet_pfil_hook, &m, ifp, PFIL_OUT, NULL) || m == NULL) {
+ goto drop;
+ }
+
+ M_ASSERTVALID(m);
+ M_ASSERTPKTHDR(m);
+
+ ip = mtod(m, struct ip *);
+ dest.s_addr = ip->ip_dst.s_addr;
+
+ /*
+ * Destination address changed?
+ */
+#ifndef IPFIREWALL_FORWARD
+ if (odest.s_addr != dest.s_addr) {
+#else
+ fwd_tag = m_tag_find(m, PACKET_TAG_IPFORWARD, NULL);
+ if (odest.s_addr != dest.s_addr || fwd_tag != NULL) {
+#endif /* IPFIREWALL_FORWARD */
+ /*
+ * Is it now for a local address on this host?
+ */
+#ifndef IPFIREWALL_FORWARD
+ if (in_localip(dest)) {
+#else
+ if (m->m_flags & M_FASTFWD_OURS || in_localip(dest)) {
+#endif /* IPFIREWALL_FORWARD */
+forwardlocal:
+ /*
+ * Return packet for processing by ip_input().
+ * Keep host byte order as expected at ip_input's
+ * "ours"-label.
+ */
+ m->m_flags |= M_FASTFWD_OURS;
+ if (ro.ro_rt)
+ RTFREE(ro.ro_rt);
+ return m;
+ }
+ /*
+ * Redo route lookup with new destination address
+ */
+#ifdef IPFIREWALL_FORWARD
+ if (fwd_tag) {
+ dest.s_addr = ((struct sockaddr_in *)
+ (fwd_tag + 1))->sin_addr.s_addr;
+ m_tag_delete(m, fwd_tag);
+ }
+#endif /* IPFIREWALL_FORWARD */
+ RTFREE(ro.ro_rt);
+ if ((dst = ip_findroute(&ro, dest, m)) == NULL)
+ return NULL; /* icmp unreach already sent */
+ ifp = ro.ro_rt->rt_ifp;
+ }
+
+passout:
+ /*
+ * Step 6: send off the packet
+ */
+
+ /*
+ * Check if route is dampned (when ARP is unable to resolve)
+ */
+ if ((ro.ro_rt->rt_flags & RTF_REJECT) &&
+ (ro.ro_rt->rt_rmx.rmx_expire == 0 ||
+ time_uptime < ro.ro_rt->rt_rmx.rmx_expire)) {
+ icmp_error(m, ICMP_UNREACH, ICMP_UNREACH_HOST, 0, 0);
+ goto consumed;
+ }
+
+#ifndef ALTQ
+ /*
+ * Check if there is enough space in the interface queue
+ */
+ if ((ifp->if_snd.ifq_len + ip->ip_len / ifp->if_mtu + 1) >=
+ ifp->if_snd.ifq_maxlen) {
+ IPSTAT_INC(ips_odropped);
+ /* would send source quench here but that is depreciated */
+ goto drop;
+ }
+#endif
+
+ /*
+ * Check if media link state of interface is not down
+ */
+ if (ifp->if_link_state == LINK_STATE_DOWN) {
+ icmp_error(m, ICMP_UNREACH, ICMP_UNREACH_HOST, 0, 0);
+ goto consumed;
+ }
+
+ /*
+ * Check if packet fits MTU or if hardware will fragment for us
+ */
+ if (ro.ro_rt->rt_rmx.rmx_mtu)
+ mtu = min(ro.ro_rt->rt_rmx.rmx_mtu, ifp->if_mtu);
+ else
+ mtu = ifp->if_mtu;
+
+ if (ip->ip_len <= mtu ||
+ (ifp->if_hwassist & CSUM_FRAGMENT && (ip->ip_off & IP_DF) == 0)) {
+ /*
+ * Restore packet header fields to original values
+ */
+ ip->ip_len = htons(ip->ip_len);
+ ip->ip_off = htons(ip->ip_off);
+ /*
+ * Send off the packet via outgoing interface
+ */
+ error = (*ifp->if_output)(ifp, m,
+ (struct sockaddr *)dst, &ro);
+ } else {
+ /*
+ * Handle EMSGSIZE with icmp reply needfrag for TCP MTU discovery
+ */
+ if (ip->ip_off & IP_DF) {
+ IPSTAT_INC(ips_cantfrag);
+ icmp_error(m, ICMP_UNREACH, ICMP_UNREACH_NEEDFRAG,
+ 0, mtu);
+ goto consumed;
+ } else {
+ /*
+ * We have to fragment the packet
+ */
+ m->m_pkthdr.csum_flags |= CSUM_IP;
+ /*
+ * ip_fragment expects ip_len and ip_off in host byte
+ * order but returns all packets in network byte order
+ */
+ if (ip_fragment(ip, &m, mtu, ifp->if_hwassist,
+ (~ifp->if_hwassist & CSUM_DELAY_IP))) {
+ goto drop;
+ }
+ KASSERT(m != NULL, ("null mbuf and no error"));
+ /*
+ * Send off the fragments via outgoing interface
+ */
+ error = 0;
+ do {
+ m0 = m->m_nextpkt;
+ m->m_nextpkt = NULL;
+
+ error = (*ifp->if_output)(ifp, m,
+ (struct sockaddr *)dst, &ro);
+ if (error)
+ break;
+ } while ((m = m0) != NULL);
+ if (error) {
+ /* Reclaim remaining fragments */
+ for (m = m0; m; m = m0) {
+ m0 = m->m_nextpkt;
+ m_freem(m);
+ }
+ } else
+ IPSTAT_INC(ips_fragmented);
+ }
+ }
+
+ if (error != 0)
+ IPSTAT_INC(ips_odropped);
+ else {
+ ro.ro_rt->rt_rmx.rmx_pksent++;
+ IPSTAT_INC(ips_forward);
+ IPSTAT_INC(ips_fastforward);
+ }
+consumed:
+ RTFREE(ro.ro_rt);
+ return NULL;
+drop:
+ if (m)
+ m_freem(m);
+ if (ro.ro_rt)
+ RTFREE(ro.ro_rt);
+ return NULL;
+}
diff --git a/rtems/freebsd/netinet/ip_fw.h b/rtems/freebsd/netinet/ip_fw.h
new file mode 100644
index 00000000..cf5d8d03
--- /dev/null
+++ b/rtems/freebsd/netinet/ip_fw.h
@@ -0,0 +1,579 @@
+/*-
+ * Copyright (c) 2002-2009 Luigi Rizzo, Universita` di Pisa
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _IPFW2_H
+#define _IPFW2_H
+
+/*
+ * The default rule number. By the design of ip_fw, the default rule
+ * is the last one, so its number can also serve as the highest number
+ * allowed for a rule. The ip_fw code relies on both meanings of this
+ * constant.
+ */
+#define IPFW_DEFAULT_RULE 65535
+
+/*
+ * The number of ipfw tables. The maximum allowed table number is the
+ * (IPFW_TABLES_MAX - 1).
+ */
+#define IPFW_TABLES_MAX 128
+
+/*
+ * Most commands (queue, pipe, tag, untag, limit...) can have a 16-bit
+ * argument between 1 and 65534. The value 0 is unused, the value
+ * 65535 (IP_FW_TABLEARG) is used to represent 'tablearg', i.e. the
+ * can be 1..65534, or 65535 to indicate the use of a 'tablearg'
+ * result of the most recent table() lookup.
+ * Note that 16bit is only a historical limit, resulting from
+ * the use of a 16-bit fields for that value. In reality, we can have
+ * 2^32 pipes, queues, tag values and so on, and use 0 as a tablearg.
+ */
+#define IPFW_ARG_MIN 1
+#define IPFW_ARG_MAX 65534
+#define IP_FW_TABLEARG 65535 /* XXX should use 0 */
+
+/*
+ * The kernel representation of ipfw rules is made of a list of
+ * 'instructions' (for all practical purposes equivalent to BPF
+ * instructions), which specify which fields of the packet
+ * (or its metadata) should be analysed.
+ *
+ * Each instruction is stored in a structure which begins with
+ * "ipfw_insn", and can contain extra fields depending on the
+ * instruction type (listed below).
+ * Note that the code is written so that individual instructions
+ * have a size which is a multiple of 32 bits. This means that, if
+ * such structures contain pointers or other 64-bit entities,
+ * (there is just one instance now) they may end up unaligned on
+ * 64-bit architectures, so the must be handled with care.
+ *
+ * "enum ipfw_opcodes" are the opcodes supported. We can have up
+ * to 256 different opcodes. When adding new opcodes, they should
+ * be appended to the end of the opcode list before O_LAST_OPCODE,
+ * this will prevent the ABI from being broken, otherwise users
+ * will have to recompile ipfw(8) when they update the kernel.
+ */
+
+enum ipfw_opcodes { /* arguments (4 byte each) */
+ O_NOP,
+
+ O_IP_SRC, /* u32 = IP */
+ O_IP_SRC_MASK, /* ip = IP/mask */
+ O_IP_SRC_ME, /* none */
+ O_IP_SRC_SET, /* u32=base, arg1=len, bitmap */
+
+ O_IP_DST, /* u32 = IP */
+ O_IP_DST_MASK, /* ip = IP/mask */
+ O_IP_DST_ME, /* none */
+ O_IP_DST_SET, /* u32=base, arg1=len, bitmap */
+
+ O_IP_SRCPORT, /* (n)port list:mask 4 byte ea */
+ O_IP_DSTPORT, /* (n)port list:mask 4 byte ea */
+ O_PROTO, /* arg1=protocol */
+
+ O_MACADDR2, /* 2 mac addr:mask */
+ O_MAC_TYPE, /* same as srcport */
+
+ O_LAYER2, /* none */
+ O_IN, /* none */
+ O_FRAG, /* none */
+
+ O_RECV, /* none */
+ O_XMIT, /* none */
+ O_VIA, /* none */
+
+ O_IPOPT, /* arg1 = 2*u8 bitmap */
+ O_IPLEN, /* arg1 = len */
+ O_IPID, /* arg1 = id */
+
+ O_IPTOS, /* arg1 = id */
+ O_IPPRECEDENCE, /* arg1 = precedence << 5 */
+ O_IPTTL, /* arg1 = TTL */
+
+ O_IPVER, /* arg1 = version */
+ O_UID, /* u32 = id */
+ O_GID, /* u32 = id */
+ O_ESTAB, /* none (tcp established) */
+ O_TCPFLAGS, /* arg1 = 2*u8 bitmap */
+ O_TCPWIN, /* arg1 = desired win */
+ O_TCPSEQ, /* u32 = desired seq. */
+ O_TCPACK, /* u32 = desired seq. */
+ O_ICMPTYPE, /* u32 = icmp bitmap */
+ O_TCPOPTS, /* arg1 = 2*u8 bitmap */
+
+ O_VERREVPATH, /* none */
+ O_VERSRCREACH, /* none */
+
+ O_PROBE_STATE, /* none */
+ O_KEEP_STATE, /* none */
+ O_LIMIT, /* ipfw_insn_limit */
+ O_LIMIT_PARENT, /* dyn_type, not an opcode. */
+
+ /*
+ * These are really 'actions'.
+ */
+
+ O_LOG, /* ipfw_insn_log */
+ O_PROB, /* u32 = match probability */
+
+ O_CHECK_STATE, /* none */
+ O_ACCEPT, /* none */
+ O_DENY, /* none */
+ O_REJECT, /* arg1=icmp arg (same as deny) */
+ O_COUNT, /* none */
+ O_SKIPTO, /* arg1=next rule number */
+ O_PIPE, /* arg1=pipe number */
+ O_QUEUE, /* arg1=queue number */
+ O_DIVERT, /* arg1=port number */
+ O_TEE, /* arg1=port number */
+ O_FORWARD_IP, /* fwd sockaddr */
+ O_FORWARD_MAC, /* fwd mac */
+ O_NAT, /* nope */
+ O_REASS, /* none */
+
+ /*
+ * More opcodes.
+ */
+ O_IPSEC, /* has ipsec history */
+ O_IP_SRC_LOOKUP, /* arg1=table number, u32=value */
+ O_IP_DST_LOOKUP, /* arg1=table number, u32=value */
+ O_ANTISPOOF, /* none */
+ O_JAIL, /* u32 = id */
+ O_ALTQ, /* u32 = altq classif. qid */
+ O_DIVERTED, /* arg1=bitmap (1:loop, 2:out) */
+ O_TCPDATALEN, /* arg1 = tcp data len */
+ O_IP6_SRC, /* address without mask */
+ O_IP6_SRC_ME, /* my addresses */
+ O_IP6_SRC_MASK, /* address with the mask */
+ O_IP6_DST,
+ O_IP6_DST_ME,
+ O_IP6_DST_MASK,
+ O_FLOW6ID, /* for flow id tag in the ipv6 pkt */
+ O_ICMP6TYPE, /* icmp6 packet type filtering */
+ O_EXT_HDR, /* filtering for ipv6 extension header */
+ O_IP6,
+
+ /*
+ * actions for ng_ipfw
+ */
+ O_NETGRAPH, /* send to ng_ipfw */
+ O_NGTEE, /* copy to ng_ipfw */
+
+ O_IP4,
+
+ O_UNREACH6, /* arg1=icmpv6 code arg (deny) */
+
+ O_TAG, /* arg1=tag number */
+ O_TAGGED, /* arg1=tag number */
+
+ O_SETFIB, /* arg1=FIB number */
+ O_FIB, /* arg1=FIB desired fib number */
+
+ O_LAST_OPCODE /* not an opcode! */
+};
+
+/*
+ * The extension header are filtered only for presence using a bit
+ * vector with a flag for each header.
+ */
+#define EXT_FRAGMENT 0x1
+#define EXT_HOPOPTS 0x2
+#define EXT_ROUTING 0x4
+#define EXT_AH 0x8
+#define EXT_ESP 0x10
+#define EXT_DSTOPTS 0x20
+#define EXT_RTHDR0 0x40
+#define EXT_RTHDR2 0x80
+
+/*
+ * Template for instructions.
+ *
+ * ipfw_insn is used for all instructions which require no operands,
+ * a single 16-bit value (arg1), or a couple of 8-bit values.
+ *
+ * For other instructions which require different/larger arguments
+ * we have derived structures, ipfw_insn_*.
+ *
+ * The size of the instruction (in 32-bit words) is in the low
+ * 6 bits of "len". The 2 remaining bits are used to implement
+ * NOT and OR on individual instructions. Given a type, you can
+ * compute the length to be put in "len" using F_INSN_SIZE(t)
+ *
+ * F_NOT negates the match result of the instruction.
+ *
+ * F_OR is used to build or blocks. By default, instructions
+ * are evaluated as part of a logical AND. An "or" block
+ * { X or Y or Z } contains F_OR set in all but the last
+ * instruction of the block. A match will cause the code
+ * to skip past the last instruction of the block.
+ *
+ * NOTA BENE: in a couple of places we assume that
+ * sizeof(ipfw_insn) == sizeof(u_int32_t)
+ * this needs to be fixed.
+ *
+ */
+typedef struct _ipfw_insn { /* template for instructions */
+ u_int8_t opcode;
+ u_int8_t len; /* number of 32-bit words */
+#define F_NOT 0x80
+#define F_OR 0x40
+#define F_LEN_MASK 0x3f
+#define F_LEN(cmd) ((cmd)->len & F_LEN_MASK)
+
+ u_int16_t arg1;
+} ipfw_insn;
+
+/*
+ * The F_INSN_SIZE(type) computes the size, in 4-byte words, of
+ * a given type.
+ */
+#define F_INSN_SIZE(t) ((sizeof (t))/sizeof(u_int32_t))
+
+/*
+ * This is used to store an array of 16-bit entries (ports etc.)
+ */
+typedef struct _ipfw_insn_u16 {
+ ipfw_insn o;
+ u_int16_t ports[2]; /* there may be more */
+} ipfw_insn_u16;
+
+/*
+ * This is used to store an array of 32-bit entries
+ * (uid, single IPv4 addresses etc.)
+ */
+typedef struct _ipfw_insn_u32 {
+ ipfw_insn o;
+ u_int32_t d[1]; /* one or more */
+} ipfw_insn_u32;
+
+/*
+ * This is used to store IP addr-mask pairs.
+ */
+typedef struct _ipfw_insn_ip {
+ ipfw_insn o;
+ struct in_addr addr;
+ struct in_addr mask;
+} ipfw_insn_ip;
+
+/*
+ * This is used to forward to a given address (ip).
+ */
+typedef struct _ipfw_insn_sa {
+ ipfw_insn o;
+ struct sockaddr_in sa;
+} ipfw_insn_sa;
+
+/*
+ * This is used for MAC addr-mask pairs.
+ */
+typedef struct _ipfw_insn_mac {
+ ipfw_insn o;
+ u_char addr[12]; /* dst[6] + src[6] */
+ u_char mask[12]; /* dst[6] + src[6] */
+} ipfw_insn_mac;
+
+/*
+ * This is used for interface match rules (recv xx, xmit xx).
+ */
+typedef struct _ipfw_insn_if {
+ ipfw_insn o;
+ union {
+ struct in_addr ip;
+ int glob;
+ } p;
+ char name[IFNAMSIZ];
+} ipfw_insn_if;
+
+/*
+ * This is used for storing an altq queue id number.
+ */
+typedef struct _ipfw_insn_altq {
+ ipfw_insn o;
+ u_int32_t qid;
+} ipfw_insn_altq;
+
+/*
+ * This is used for limit rules.
+ */
+typedef struct _ipfw_insn_limit {
+ ipfw_insn o;
+ u_int8_t _pad;
+ u_int8_t limit_mask; /* combination of DYN_* below */
+#define DYN_SRC_ADDR 0x1
+#define DYN_SRC_PORT 0x2
+#define DYN_DST_ADDR 0x4
+#define DYN_DST_PORT 0x8
+
+ u_int16_t conn_limit;
+} ipfw_insn_limit;
+
+/*
+ * This is used for log instructions.
+ */
+typedef struct _ipfw_insn_log {
+ ipfw_insn o;
+ u_int32_t max_log; /* how many do we log -- 0 = all */
+ u_int32_t log_left; /* how many left to log */
+} ipfw_insn_log;
+
+/*
+ * Data structures required by both ipfw(8) and ipfw(4) but not part of the
+ * management API are protected by IPFW_INTERNAL.
+ */
+#ifdef IPFW_INTERNAL
+/* Server pool support (LSNAT). */
+struct cfg_spool {
+ LIST_ENTRY(cfg_spool) _next; /* chain of spool instances */
+ struct in_addr addr;
+ u_short port;
+};
+#endif
+
+/* Redirect modes id. */
+#define REDIR_ADDR 0x01
+#define REDIR_PORT 0x02
+#define REDIR_PROTO 0x04
+
+#ifdef IPFW_INTERNAL
+/* Nat redirect configuration. */
+struct cfg_redir {
+ LIST_ENTRY(cfg_redir) _next; /* chain of redir instances */
+ u_int16_t mode; /* type of redirect mode */
+ struct in_addr laddr; /* local ip address */
+ struct in_addr paddr; /* public ip address */
+ struct in_addr raddr; /* remote ip address */
+ u_short lport; /* local port */
+ u_short pport; /* public port */
+ u_short rport; /* remote port */
+ u_short pport_cnt; /* number of public ports */
+ u_short rport_cnt; /* number of remote ports */
+ int proto; /* protocol: tcp/udp */
+ struct alias_link **alink;
+ /* num of entry in spool chain */
+ u_int16_t spool_cnt;
+ /* chain of spool instances */
+ LIST_HEAD(spool_chain, cfg_spool) spool_chain;
+};
+#endif
+
+#define NAT_BUF_LEN 1024
+
+#ifdef IPFW_INTERNAL
+/* Nat configuration data struct. */
+struct cfg_nat {
+ /* chain of nat instances */
+ LIST_ENTRY(cfg_nat) _next;
+ int id; /* nat id */
+ struct in_addr ip; /* nat ip address */
+ char if_name[IF_NAMESIZE]; /* interface name */
+ int mode; /* aliasing mode */
+ struct libalias *lib; /* libalias instance */
+ /* number of entry in spool chain */
+ int redir_cnt;
+ /* chain of redir instances */
+ LIST_HEAD(redir_chain, cfg_redir) redir_chain;
+};
+#endif
+
+#define SOF_NAT sizeof(struct cfg_nat)
+#define SOF_REDIR sizeof(struct cfg_redir)
+#define SOF_SPOOL sizeof(struct cfg_spool)
+
+/* Nat command. */
+typedef struct _ipfw_insn_nat {
+ ipfw_insn o;
+ struct cfg_nat *nat;
+} ipfw_insn_nat;
+
+/* Apply ipv6 mask on ipv6 addr */
+#define APPLY_MASK(addr,mask) \
+ (addr)->__u6_addr.__u6_addr32[0] &= (mask)->__u6_addr.__u6_addr32[0]; \
+ (addr)->__u6_addr.__u6_addr32[1] &= (mask)->__u6_addr.__u6_addr32[1]; \
+ (addr)->__u6_addr.__u6_addr32[2] &= (mask)->__u6_addr.__u6_addr32[2]; \
+ (addr)->__u6_addr.__u6_addr32[3] &= (mask)->__u6_addr.__u6_addr32[3];
+
+/* Structure for ipv6 */
+typedef struct _ipfw_insn_ip6 {
+ ipfw_insn o;
+ struct in6_addr addr6;
+ struct in6_addr mask6;
+} ipfw_insn_ip6;
+
+/* Used to support icmp6 types */
+typedef struct _ipfw_insn_icmp6 {
+ ipfw_insn o;
+ uint32_t d[7]; /* XXX This number si related to the netinet/icmp6.h
+ * define ICMP6_MAXTYPE
+ * as follows: n = ICMP6_MAXTYPE/32 + 1
+ * Actually is 203
+ */
+} ipfw_insn_icmp6;
+
+/*
+ * Here we have the structure representing an ipfw rule.
+ *
+ * It starts with a general area (with link fields and counters)
+ * followed by an array of one or more instructions, which the code
+ * accesses as an array of 32-bit values.
+ *
+ * Given a rule pointer r:
+ *
+ * r->cmd is the start of the first instruction.
+ * ACTION_PTR(r) is the start of the first action (things to do
+ * once a rule matched).
+ *
+ * When assembling instruction, remember the following:
+ *
+ * + if a rule has a "keep-state" (or "limit") option, then the
+ * first instruction (at r->cmd) MUST BE an O_PROBE_STATE
+ * + if a rule has a "log" option, then the first action
+ * (at ACTION_PTR(r)) MUST be O_LOG
+ * + if a rule has an "altq" option, it comes after "log"
+ * + if a rule has an O_TAG option, it comes after "log" and "altq"
+ *
+ * NOTE: we use a simple linked list of rules because we never need
+ * to delete a rule without scanning the list. We do not use
+ * queue(3) macros for portability and readability.
+ */
+
+struct ip_fw {
+ struct ip_fw *x_next; /* linked list of rules */
+ struct ip_fw *next_rule; /* ptr to next [skipto] rule */
+ /* 'next_rule' is used to pass up 'set_disable' status */
+
+ uint16_t act_ofs; /* offset of action in 32-bit units */
+ uint16_t cmd_len; /* # of 32-bit words in cmd */
+ uint16_t rulenum; /* rule number */
+ uint8_t set; /* rule set (0..31) */
+#define RESVD_SET 31 /* set for default and persistent rules */
+ uint8_t _pad; /* padding */
+ uint32_t id; /* rule id */
+
+ /* These fields are present in all rules. */
+ uint64_t pcnt; /* Packet counter */
+ uint64_t bcnt; /* Byte counter */
+ uint32_t timestamp; /* tv_sec of last match */
+
+ ipfw_insn cmd[1]; /* storage for commands */
+};
+
+#define ACTION_PTR(rule) \
+ (ipfw_insn *)( (u_int32_t *)((rule)->cmd) + ((rule)->act_ofs) )
+
+#define RULESIZE(rule) (sizeof(struct ip_fw) + \
+ ((struct ip_fw *)(rule))->cmd_len * 4 - 4)
+
+#if 1 // should be moved to in.h
+/*
+ * This structure is used as a flow mask and a flow id for various
+ * parts of the code.
+ * addr_type is used in userland and kernel to mark the address type.
+ * fib is used in the kernel to record the fib in use.
+ * _flags is used in the kernel to store tcp flags for dynamic rules.
+ */
+struct ipfw_flow_id {
+ uint32_t dst_ip;
+ uint32_t src_ip;
+ uint16_t dst_port;
+ uint16_t src_port;
+ uint8_t fib;
+ uint8_t proto;
+ uint8_t _flags; /* protocol-specific flags */
+ uint8_t addr_type; /* 4=ip4, 6=ip6, 1=ether ? */
+ struct in6_addr dst_ip6;
+ struct in6_addr src_ip6;
+ uint32_t flow_id6;
+ uint32_t extra; /* queue/pipe or frag_id */
+};
+#endif
+
+#define IS_IP6_FLOW_ID(id) ((id)->addr_type == 6)
+
+/*
+ * Dynamic ipfw rule.
+ */
+typedef struct _ipfw_dyn_rule ipfw_dyn_rule;
+
+struct _ipfw_dyn_rule {
+ ipfw_dyn_rule *next; /* linked list of rules. */
+ struct ip_fw *rule; /* pointer to rule */
+ /* 'rule' is used to pass up the rule number (from the parent) */
+
+ ipfw_dyn_rule *parent; /* pointer to parent rule */
+ u_int64_t pcnt; /* packet match counter */
+ u_int64_t bcnt; /* byte match counter */
+ struct ipfw_flow_id id; /* (masked) flow id */
+ u_int32_t expire; /* expire time */
+ u_int32_t bucket; /* which bucket in hash table */
+ u_int32_t state; /* state of this rule (typically a
+ * combination of TCP flags)
+ */
+ u_int32_t ack_fwd; /* most recent ACKs in forward */
+ u_int32_t ack_rev; /* and reverse directions (used */
+ /* to generate keepalives) */
+ u_int16_t dyn_type; /* rule type */
+ u_int16_t count; /* refcount */
+};
+
+/*
+ * Definitions for IP option names.
+ */
+#define IP_FW_IPOPT_LSRR 0x01
+#define IP_FW_IPOPT_SSRR 0x02
+#define IP_FW_IPOPT_RR 0x04
+#define IP_FW_IPOPT_TS 0x08
+
+/*
+ * Definitions for TCP option names.
+ */
+#define IP_FW_TCPOPT_MSS 0x01
+#define IP_FW_TCPOPT_WINDOW 0x02
+#define IP_FW_TCPOPT_SACK 0x04
+#define IP_FW_TCPOPT_TS 0x08
+#define IP_FW_TCPOPT_CC 0x10
+
+#define ICMP_REJECT_RST 0x100 /* fake ICMP code (send a TCP RST) */
+#define ICMP6_UNREACH_RST 0x100 /* fake ICMPv6 code (send a TCP RST) */
+
+/*
+ * These are used for lookup tables.
+ */
+typedef struct _ipfw_table_entry {
+ in_addr_t addr; /* network address */
+ u_int32_t value; /* value */
+ u_int16_t tbl; /* table number */
+ u_int8_t masklen; /* mask length */
+} ipfw_table_entry;
+
+typedef struct _ipfw_table {
+ u_int32_t size; /* size of entries in bytes */
+ u_int32_t cnt; /* # of entries */
+ u_int16_t tbl; /* table number */
+ ipfw_table_entry ent[0]; /* entries */
+} ipfw_table;
+
+#endif /* _IPFW2_H */
diff --git a/rtems/freebsd/netinet/ip_gre.c b/rtems/freebsd/netinet/ip_gre.c
new file mode 100644
index 00000000..affea34c
--- /dev/null
+++ b/rtems/freebsd/netinet/ip_gre.c
@@ -0,0 +1,336 @@
+#include <rtems/freebsd/machine/rtems-bsd-config.h>
+
+/* $NetBSD: ip_gre.c,v 1.29 2003/09/05 23:02:43 itojun Exp $ */
+
+/*-
+ * Copyright (c) 1998 The NetBSD Foundation, Inc.
+ * All rights reserved.
+ *
+ * This code is derived from software contributed to The NetBSD Foundation
+ * by Heiko W.Rupp <hwr@pilhuhn.de>
+ *
+ * IPv6-over-GRE contributed by Gert Doering <gert@greenie.muc.de>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the NetBSD
+ * Foundation, Inc. and its contributors.
+ * 4. Neither the name of The NetBSD Foundation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
+ * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * deencapsulate tunneled packets and send them on
+ * output half is in net/if_gre.[ch]
+ * This currently handles IPPROTO_GRE, IPPROTO_MOBILE
+ */
+
+#include <rtems/freebsd/sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <rtems/freebsd/local/opt_inet.h>
+#include <rtems/freebsd/local/opt_atalk.h>
+#include <rtems/freebsd/local/opt_inet6.h>
+
+#include <rtems/freebsd/sys/param.h>
+#include <rtems/freebsd/sys/systm.h>
+#include <rtems/freebsd/sys/mbuf.h>
+#include <rtems/freebsd/sys/socket.h>
+#include <rtems/freebsd/sys/socketvar.h>
+#include <rtems/freebsd/sys/protosw.h>
+#include <rtems/freebsd/sys/errno.h>
+#include <rtems/freebsd/sys/time.h>
+#include <rtems/freebsd/sys/kernel.h>
+#include <rtems/freebsd/sys/syslog.h>
+#include <rtems/freebsd/net/bpf.h>
+#include <rtems/freebsd/net/ethernet.h>
+#include <rtems/freebsd/net/if.h>
+#include <rtems/freebsd/net/netisr.h>
+#include <rtems/freebsd/net/route.h>
+#include <rtems/freebsd/net/raw_cb.h>
+
+#ifdef INET
+#include <rtems/freebsd/netinet/in.h>
+#include <rtems/freebsd/netinet/in_var.h>
+#include <rtems/freebsd/netinet/in_systm.h>
+#include <rtems/freebsd/netinet/ip.h>
+#include <rtems/freebsd/netinet/ip_var.h>
+#include <rtems/freebsd/netinet/ip_gre.h>
+#include <rtems/freebsd/machine/in_cksum.h>
+#else
+#error ip_gre input without IP?
+#endif
+
+#ifdef NETATALK
+#include <rtems/freebsd/netatalk/at.h>
+#include <rtems/freebsd/netatalk/at_var.h>
+#include <rtems/freebsd/netatalk/at_extern.h>
+#endif
+
+/* Needs IP headers. */
+#include <rtems/freebsd/net/if_gre.h>
+
+#include <rtems/freebsd/machine/stdarg.h>
+
+#if 1
+void gre_inet_ntoa(struct in_addr in); /* XXX */
+#endif
+
+static struct gre_softc *gre_lookup(struct mbuf *, u_int8_t);
+
+static struct mbuf *gre_input2(struct mbuf *, int, u_char);
+
+/*
+ * De-encapsulate a packet and feed it back through ip input (this
+ * routine is called whenever IP gets a packet with proto type
+ * IPPROTO_GRE and a local destination address).
+ * This really is simple
+ */
+void
+gre_input(struct mbuf *m, int off)
+{
+ int proto;
+
+ proto = (mtod(m, struct ip *))->ip_p;
+
+ m = gre_input2(m, off, proto);
+
+ /*
+ * If no matching tunnel that is up is found. We inject
+ * the mbuf to raw ip socket to see if anyone picks it up.
+ */
+ if (m != NULL)
+ rip_input(m, off);
+}
+
+/*
+ * Decapsulate. Does the real work and is called from gre_input()
+ * (above). Returns an mbuf back if packet is not yet processed,
+ * and NULL if it needs no further processing. proto is the protocol
+ * number of the "calling" foo_input() routine.
+ */
+static struct mbuf *
+gre_input2(struct mbuf *m ,int hlen, u_char proto)
+{
+ struct greip *gip;
+ int isr;
+ struct gre_softc *sc;
+ u_int16_t flags;
+ u_int32_t af;
+
+ if ((sc = gre_lookup(m, proto)) == NULL) {
+ /* No matching tunnel or tunnel is down. */
+ return (m);
+ }
+
+ if (m->m_len < sizeof(*gip)) {
+ m = m_pullup(m, sizeof(*gip));
+ if (m == NULL)
+ return (NULL);
+ }
+ gip = mtod(m, struct greip *);
+
+ GRE2IFP(sc)->if_ipackets++;
+ GRE2IFP(sc)->if_ibytes += m->m_pkthdr.len;
+
+ switch (proto) {
+ case IPPROTO_GRE:
+ hlen += sizeof(struct gre_h);
+
+ /* process GRE flags as packet can be of variable len */
+ flags = ntohs(gip->gi_flags);
+
+ /* Checksum & Offset are present */
+ if ((flags & GRE_CP) | (flags & GRE_RP))
+ hlen += 4;
+ /* We don't support routing fields (variable length) */
+ if (flags & GRE_RP)
+ return (m);
+ if (flags & GRE_KP)
+ hlen += 4;
+ if (flags & GRE_SP)
+ hlen += 4;
+
+ switch (ntohs(gip->gi_ptype)) { /* ethertypes */
+ case WCCP_PROTOCOL_TYPE:
+ if (sc->wccp_ver == WCCP_V2)
+ hlen += 4;
+ /* FALLTHROUGH */
+ case ETHERTYPE_IP: /* shouldn't need a schednetisr(), */
+ isr = NETISR_IP;/* as we are in ip_input */
+ af = AF_INET;
+ break;
+#ifdef INET6
+ case ETHERTYPE_IPV6:
+ isr = NETISR_IPV6;
+ af = AF_INET6;
+ break;
+#endif
+#ifdef NETATALK
+ case ETHERTYPE_ATALK:
+ isr = NETISR_ATALK1;
+ af = AF_APPLETALK;
+ break;
+#endif
+ default:
+ /* Others not yet supported. */
+ return (m);
+ }
+ break;
+ default:
+ /* Others not yet supported. */
+ return (m);
+ }
+
+ if (hlen > m->m_pkthdr.len) {
+ m_freem(m);
+ return (NULL);
+ }
+ /* Unlike NetBSD, in FreeBSD m_adj() adjusts m->m_pkthdr.len as well */
+ m_adj(m, hlen);
+
+ if (bpf_peers_present(GRE2IFP(sc)->if_bpf)) {
+ bpf_mtap2(GRE2IFP(sc)->if_bpf, &af, sizeof(af), m);
+ }
+
+ m->m_pkthdr.rcvif = GRE2IFP(sc);
+
+ netisr_queue(isr, m);
+
+ /* Packet is done, no further processing needed. */
+ return (NULL);
+}
+
+/*
+ * input routine for IPPRPOTO_MOBILE
+ * This is a little bit diffrent from the other modes, as the
+ * encapsulating header was not prepended, but instead inserted
+ * between IP header and payload
+ */
+
+void
+gre_mobile_input(struct mbuf *m, int hlen)
+{
+ struct ip *ip;
+ struct mobip_h *mip;
+ struct gre_softc *sc;
+ int msiz;
+
+ if ((sc = gre_lookup(m, IPPROTO_MOBILE)) == NULL) {
+ /* No matching tunnel or tunnel is down. */
+ m_freem(m);
+ return;
+ }
+
+ if (m->m_len < sizeof(*mip)) {
+ m = m_pullup(m, sizeof(*mip));
+ if (m == NULL)
+ return;
+ }
+ ip = mtod(m, struct ip *);
+ mip = mtod(m, struct mobip_h *);
+
+ GRE2IFP(sc)->if_ipackets++;
+ GRE2IFP(sc)->if_ibytes += m->m_pkthdr.len;
+
+ if (ntohs(mip->mh.proto) & MOB_HH_SBIT) {
+ msiz = MOB_HH_SIZ_L;
+ mip->mi.ip_src.s_addr = mip->mh.osrc;
+ } else
+ msiz = MOB_HH_SIZ_S;
+
+ if (m->m_len < (ip->ip_hl << 2) + msiz) {
+ m = m_pullup(m, (ip->ip_hl << 2) + msiz);
+ if (m == NULL)
+ return;
+ ip = mtod(m, struct ip *);
+ mip = mtod(m, struct mobip_h *);
+ }
+
+ mip->mi.ip_dst.s_addr = mip->mh.odst;
+ mip->mi.ip_p = (ntohs(mip->mh.proto) >> 8);
+
+ if (gre_in_cksum((u_int16_t *)&mip->mh, msiz) != 0) {
+ m_freem(m);
+ return;
+ }
+
+ bcopy((caddr_t)(ip) + (ip->ip_hl << 2) + msiz, (caddr_t)(ip) +
+ (ip->ip_hl << 2), m->m_len - msiz - (ip->ip_hl << 2));
+ m->m_len -= msiz;
+ m->m_pkthdr.len -= msiz;
+
+ /*
+ * On FreeBSD, rip_input() supplies us with ip->ip_len
+ * already converted into host byteorder and also decreases
+ * it by the lengh of IP header, however, ip_input() expects
+ * that this field is in the original format (network byteorder
+ * and full size of IP packet), so that adjust accordingly.
+ */
+ ip->ip_len = htons(ip->ip_len + sizeof(struct ip) - msiz);
+
+ ip->ip_sum = 0;
+ ip->ip_sum = in_cksum(m, (ip->ip_hl << 2));
+
+ if (bpf_peers_present(GRE2IFP(sc)->if_bpf)) {
+ u_int32_t af = AF_INET;
+ bpf_mtap2(GRE2IFP(sc)->if_bpf, &af, sizeof(af), m);
+ }
+
+ m->m_pkthdr.rcvif = GRE2IFP(sc);
+
+ netisr_queue(NETISR_IP, m);
+}
+
+/*
+ * Find the gre interface associated with our src/dst/proto set.
+ *
+ * XXXRW: Need some sort of drain/refcount mechanism so that the softc
+ * reference remains valid after it's returned from gre_lookup(). Right
+ * now, I'm thinking it should be reference-counted with a gre_dropref()
+ * when the caller is done with the softc. This is complicated by how
+ * to handle destroying the gre softc; probably using a gre_drain() in
+ * in_gre.c during destroy.
+ */
+static struct gre_softc *
+gre_lookup(struct mbuf *m, u_int8_t proto)
+{
+ struct ip *ip = mtod(m, struct ip *);
+ struct gre_softc *sc;
+
+ mtx_lock(&gre_mtx);
+ for (sc = LIST_FIRST(&gre_softc_list); sc != NULL;
+ sc = LIST_NEXT(sc, sc_list)) {
+ if ((sc->g_dst.s_addr == ip->ip_src.s_addr) &&
+ (sc->g_src.s_addr == ip->ip_dst.s_addr) &&
+ (sc->g_proto == proto) &&
+ ((GRE2IFP(sc)->if_flags & IFF_UP) != 0)) {
+ mtx_unlock(&gre_mtx);
+ return (sc);
+ }
+ }
+ mtx_unlock(&gre_mtx);
+
+ return (NULL);
+}
diff --git a/rtems/freebsd/netinet/ip_gre.h b/rtems/freebsd/netinet/ip_gre.h
new file mode 100644
index 00000000..1fb67d93
--- /dev/null
+++ b/rtems/freebsd/netinet/ip_gre.h
@@ -0,0 +1,43 @@
+/* $NetBSD: ip_gre.h,v 1.5 2002/06/09 16:33:40 itojun Exp $ */
+/* $FreeBSD$ */
+
+/*-
+ * Copyright (c) 1998 The NetBSD Foundation, Inc.
+ * All rights reserved.
+ *
+ * This code is derived from software contributed to The NetBSD Foundation
+ * by Heiko W.Rupp <hwr@pilhuhn.de>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the NetBSD
+ * Foundation, Inc. and its contributors.
+ * 4. Neither the name of The NetBSD Foundation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
+ * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifdef _KERNEL
+void gre_input(struct mbuf *, int);
+void gre_mobile_input(struct mbuf *, int);
+#endif /* _KERNEL */
diff --git a/rtems/freebsd/netinet/ip_icmp.c b/rtems/freebsd/netinet/ip_icmp.c
new file mode 100644
index 00000000..73d8d728
--- /dev/null
+++ b/rtems/freebsd/netinet/ip_icmp.c
@@ -0,0 +1,986 @@
+#include <rtems/freebsd/machine/rtems-bsd-config.h>
+
+/*-
+ * Copyright (c) 1982, 1986, 1988, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * @(#)ip_icmp.c 8.2 (Berkeley) 1/4/94
+ */
+
+#include <rtems/freebsd/sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <rtems/freebsd/local/opt_ipsec.h>
+
+#include <rtems/freebsd/sys/param.h>
+#include <rtems/freebsd/sys/systm.h>
+#include <rtems/freebsd/sys/mbuf.h>
+#include <rtems/freebsd/sys/protosw.h>
+#include <rtems/freebsd/sys/socket.h>
+#include <rtems/freebsd/sys/time.h>
+#include <rtems/freebsd/sys/kernel.h>
+#include <rtems/freebsd/sys/sysctl.h>
+#include <rtems/freebsd/sys/syslog.h>
+
+#include <rtems/freebsd/net/if.h>
+#include <rtems/freebsd/net/if_types.h>
+#include <rtems/freebsd/net/route.h>
+#include <rtems/freebsd/net/vnet.h>
+
+#include <rtems/freebsd/netinet/in.h>
+#include <rtems/freebsd/netinet/in_pcb.h>
+#include <rtems/freebsd/netinet/in_systm.h>
+#include <rtems/freebsd/netinet/in_var.h>
+#include <rtems/freebsd/netinet/ip.h>
+#include <rtems/freebsd/netinet/ip_icmp.h>
+#include <rtems/freebsd/netinet/ip_var.h>
+#include <rtems/freebsd/netinet/ip_options.h>
+#include <rtems/freebsd/netinet/tcp.h>
+#include <rtems/freebsd/netinet/tcp_var.h>
+#include <rtems/freebsd/netinet/tcpip.h>
+#include <rtems/freebsd/netinet/icmp_var.h>
+
+#ifdef IPSEC
+#include <rtems/freebsd/netipsec/ipsec.h>
+#include <rtems/freebsd/netipsec/key.h>
+#endif
+
+#include <rtems/freebsd/machine/in_cksum.h>
+
+#include <rtems/freebsd/security/mac/mac_framework.h>
+
+/*
+ * ICMP routines: error generation, receive packet processing, and
+ * routines to turnaround packets back to the originator, and
+ * host table maintenance routines.
+ */
+VNET_DEFINE(struct icmpstat, icmpstat);
+SYSCTL_VNET_STRUCT(_net_inet_icmp, ICMPCTL_STATS, stats, CTLFLAG_RW,
+ &VNET_NAME(icmpstat), icmpstat, "");
+
+static VNET_DEFINE(int, icmpmaskrepl) = 0;
+#define V_icmpmaskrepl VNET(icmpmaskrepl)
+SYSCTL_VNET_INT(_net_inet_icmp, ICMPCTL_MASKREPL, maskrepl, CTLFLAG_RW,
+ &VNET_NAME(icmpmaskrepl), 0,
+ "Reply to ICMP Address Mask Request packets.");
+
+static VNET_DEFINE(u_int, icmpmaskfake) = 0;
+#define V_icmpmaskfake VNET(icmpmaskfake)
+SYSCTL_VNET_UINT(_net_inet_icmp, OID_AUTO, maskfake, CTLFLAG_RW,
+ &VNET_NAME(icmpmaskfake), 0,
+ "Fake reply to ICMP Address Mask Request packets.");
+
+static VNET_DEFINE(int, drop_redirect) = 0;
+#define V_drop_redirect VNET(drop_redirect)
+SYSCTL_VNET_INT(_net_inet_icmp, OID_AUTO, drop_redirect, CTLFLAG_RW,
+ &VNET_NAME(drop_redirect), 0,
+ "Ignore ICMP redirects");
+
+static VNET_DEFINE(int, log_redirect) = 0;
+#define V_log_redirect VNET(log_redirect)
+SYSCTL_VNET_INT(_net_inet_icmp, OID_AUTO, log_redirect, CTLFLAG_RW,
+ &VNET_NAME(log_redirect), 0,
+ "Log ICMP redirects to the console");
+
+static VNET_DEFINE(int, icmplim) = 200;
+#define V_icmplim VNET(icmplim)
+SYSCTL_VNET_INT(_net_inet_icmp, ICMPCTL_ICMPLIM, icmplim, CTLFLAG_RW,
+ &VNET_NAME(icmplim), 0,
+ "Maximum number of ICMP responses per second");
+
+static VNET_DEFINE(int, icmplim_output) = 1;
+#define V_icmplim_output VNET(icmplim_output)
+SYSCTL_VNET_INT(_net_inet_icmp, OID_AUTO, icmplim_output, CTLFLAG_RW,
+ &VNET_NAME(icmplim_output), 0,
+ "Enable rate limiting of ICMP responses");
+
+static VNET_DEFINE(char, reply_src[IFNAMSIZ]);
+#define V_reply_src VNET(reply_src)
+SYSCTL_VNET_STRING(_net_inet_icmp, OID_AUTO, reply_src, CTLFLAG_RW,
+ &VNET_NAME(reply_src), IFNAMSIZ,
+ "icmp reply source for non-local packets.");
+
+static VNET_DEFINE(int, icmp_rfi) = 0;
+#define V_icmp_rfi VNET(icmp_rfi)
+SYSCTL_VNET_INT(_net_inet_icmp, OID_AUTO, reply_from_interface, CTLFLAG_RW,
+ &VNET_NAME(icmp_rfi), 0,
+ "ICMP reply from incoming interface for non-local packets");
+
+static VNET_DEFINE(int, icmp_quotelen) = 8;
+#define V_icmp_quotelen VNET(icmp_quotelen)
+SYSCTL_VNET_INT(_net_inet_icmp, OID_AUTO, quotelen, CTLFLAG_RW,
+ &VNET_NAME(icmp_quotelen), 0,
+ "Number of bytes from original packet to quote in ICMP reply");
+
+/*
+ * ICMP broadcast echo sysctl
+ */
+static VNET_DEFINE(int, icmpbmcastecho) = 0;
+#define V_icmpbmcastecho VNET(icmpbmcastecho)
+SYSCTL_VNET_INT(_net_inet_icmp, OID_AUTO, bmcastecho, CTLFLAG_RW,
+ &VNET_NAME(icmpbmcastecho), 0,
+ "");
+
+
+#ifdef ICMPPRINTFS
+int icmpprintfs = 0;
+#endif
+
+static void icmp_reflect(struct mbuf *);
+static void icmp_send(struct mbuf *, struct mbuf *);
+
+extern struct protosw inetsw[];
+
+/*
+ * Kernel module interface for updating icmpstat. The argument is an index
+ * into icmpstat treated as an array of u_long. While this encodes the
+ * general layout of icmpstat into the caller, it doesn't encode its
+ * location, so that future changes to add, for example, per-CPU stats
+ * support won't cause binary compatibility problems for kernel modules.
+ */
+void
+kmod_icmpstat_inc(int statnum)
+{
+
+ (*((u_long *)&V_icmpstat + statnum))++;
+}
+
+/*
+ * Generate an error packet of type error
+ * in response to bad packet ip.
+ */
+void
+icmp_error(struct mbuf *n, int type, int code, uint32_t dest, int mtu)
+{
+ register struct ip *oip = mtod(n, struct ip *), *nip;
+ register unsigned oiphlen = oip->ip_hl << 2;
+ register struct icmp *icp;
+ register struct mbuf *m;
+ unsigned icmplen, icmpelen, nlen;
+
+ KASSERT((u_int)type <= ICMP_MAXTYPE, ("%s: illegal ICMP type", __func__));
+#ifdef ICMPPRINTFS
+ if (icmpprintfs)
+ printf("icmp_error(%p, %x, %d)\n", oip, type, code);
+#endif
+ if (type != ICMP_REDIRECT)
+ ICMPSTAT_INC(icps_error);
+ /*
+ * Don't send error:
+ * if the original packet was encrypted.
+ * if not the first fragment of message.
+ * in response to a multicast or broadcast packet.
+ * if the old packet protocol was an ICMP error message.
+ */
+ if (n->m_flags & M_DECRYPTED)
+ goto freeit;
+ if (oip->ip_off & ~(IP_MF|IP_DF))
+ goto freeit;
+ if (n->m_flags & (M_BCAST|M_MCAST))
+ goto freeit;
+ if (oip->ip_p == IPPROTO_ICMP && type != ICMP_REDIRECT &&
+ n->m_len >= oiphlen + ICMP_MINLEN &&
+ !ICMP_INFOTYPE(((struct icmp *)((caddr_t)oip + oiphlen))->icmp_type)) {
+ ICMPSTAT_INC(icps_oldicmp);
+ goto freeit;
+ }
+ /* Drop if IP header plus 8 bytes is not contignous in first mbuf. */
+ if (oiphlen + 8 > n->m_len)
+ goto freeit;
+ /*
+ * Calculate length to quote from original packet and
+ * prevent the ICMP mbuf from overflowing.
+ * Unfortunatly this is non-trivial since ip_forward()
+ * sends us truncated packets.
+ */
+ nlen = m_length(n, NULL);
+ if (oip->ip_p == IPPROTO_TCP) {
+ struct tcphdr *th;
+ int tcphlen;
+
+ if (oiphlen + sizeof(struct tcphdr) > n->m_len &&
+ n->m_next == NULL)
+ goto stdreply;
+ if (n->m_len < oiphlen + sizeof(struct tcphdr) &&
+ ((n = m_pullup(n, oiphlen + sizeof(struct tcphdr))) == NULL))
+ goto freeit;
+ th = (struct tcphdr *)((caddr_t)oip + oiphlen);
+ tcphlen = th->th_off << 2;
+ if (tcphlen < sizeof(struct tcphdr))
+ goto freeit;
+ if (oip->ip_len < oiphlen + tcphlen)
+ goto freeit;
+ if (oiphlen + tcphlen > n->m_len && n->m_next == NULL)
+ goto stdreply;
+ if (n->m_len < oiphlen + tcphlen &&
+ ((n = m_pullup(n, oiphlen + tcphlen)) == NULL))
+ goto freeit;
+ icmpelen = max(tcphlen, min(V_icmp_quotelen, oip->ip_len - oiphlen));
+ } else
+stdreply: icmpelen = max(8, min(V_icmp_quotelen, oip->ip_len - oiphlen));
+
+ icmplen = min(oiphlen + icmpelen, nlen);
+ if (icmplen < sizeof(struct ip))
+ goto freeit;
+
+ if (MHLEN > sizeof(struct ip) + ICMP_MINLEN + icmplen)
+ m = m_gethdr(M_DONTWAIT, MT_DATA);
+ else
+ m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
+ if (m == NULL)
+ goto freeit;
+#ifdef MAC
+ mac_netinet_icmp_reply(n, m);
+#endif
+ icmplen = min(icmplen, M_TRAILINGSPACE(m) - sizeof(struct ip) - ICMP_MINLEN);
+ m_align(m, ICMP_MINLEN + icmplen);
+ m->m_len = ICMP_MINLEN + icmplen;
+
+ /* XXX MRT make the outgoing packet use the same FIB
+ * that was associated with the incoming packet
+ */
+ M_SETFIB(m, M_GETFIB(n));
+ icp = mtod(m, struct icmp *);
+ ICMPSTAT_INC(icps_outhist[type]);
+ icp->icmp_type = type;
+ if (type == ICMP_REDIRECT)
+ icp->icmp_gwaddr.s_addr = dest;
+ else {
+ icp->icmp_void = 0;
+ /*
+ * The following assignments assume an overlay with the
+ * just zeroed icmp_void field.
+ */
+ if (type == ICMP_PARAMPROB) {
+ icp->icmp_pptr = code;
+ code = 0;
+ } else if (type == ICMP_UNREACH &&
+ code == ICMP_UNREACH_NEEDFRAG && mtu) {
+ icp->icmp_nextmtu = htons(mtu);
+ }
+ }
+ icp->icmp_code = code;
+
+ /*
+ * Copy the quotation into ICMP message and
+ * convert quoted IP header back to network representation.
+ */
+ m_copydata(n, 0, icmplen, (caddr_t)&icp->icmp_ip);
+ nip = &icp->icmp_ip;
+ nip->ip_len = htons(nip->ip_len);
+ nip->ip_off = htons(nip->ip_off);
+
+ /*
+ * Set up ICMP message mbuf and copy old IP header (without options
+ * in front of ICMP message.
+ * If the original mbuf was meant to bypass the firewall, the error
+ * reply should bypass as well.
+ */
+ m->m_flags |= n->m_flags & M_SKIP_FIREWALL;
+ m->m_data -= sizeof(struct ip);
+ m->m_len += sizeof(struct ip);
+ m->m_pkthdr.len = m->m_len;
+ m->m_pkthdr.rcvif = n->m_pkthdr.rcvif;
+ nip = mtod(m, struct ip *);
+ bcopy((caddr_t)oip, (caddr_t)nip, sizeof(struct ip));
+ nip->ip_len = m->m_len;
+ nip->ip_v = IPVERSION;
+ nip->ip_hl = 5;
+ nip->ip_p = IPPROTO_ICMP;
+ nip->ip_tos = 0;
+ icmp_reflect(m);
+
+freeit:
+ m_freem(n);
+}
+
+/*
+ * Process a received ICMP message.
+ */
+void
+icmp_input(struct mbuf *m, int off)
+{
+ struct icmp *icp;
+ struct in_ifaddr *ia;
+ struct ip *ip = mtod(m, struct ip *);
+ struct sockaddr_in icmpsrc, icmpdst, icmpgw;
+ int hlen = off;
+ int icmplen = ip->ip_len;
+ int i, code;
+ void (*ctlfunc)(int, struct sockaddr *, void *);
+ int fibnum;
+
+ /*
+ * Locate icmp structure in mbuf, and check
+ * that not corrupted and of at least minimum length.
+ */
+#ifdef ICMPPRINTFS
+ if (icmpprintfs) {
+ char buf[4 * sizeof "123"];
+ strcpy(buf, inet_ntoa(ip->ip_src));
+ printf("icmp_input from %s to %s, len %d\n",
+ buf, inet_ntoa(ip->ip_dst), icmplen);
+ }
+#endif
+ if (icmplen < ICMP_MINLEN) {
+ ICMPSTAT_INC(icps_tooshort);
+ goto freeit;
+ }
+ i = hlen + min(icmplen, ICMP_ADVLENMIN);
+ if (m->m_len < i && (m = m_pullup(m, i)) == NULL) {
+ ICMPSTAT_INC(icps_tooshort);
+ return;
+ }
+ ip = mtod(m, struct ip *);
+ m->m_len -= hlen;
+ m->m_data += hlen;
+ icp = mtod(m, struct icmp *);
+ if (in_cksum(m, icmplen)) {
+ ICMPSTAT_INC(icps_checksum);
+ goto freeit;
+ }
+ m->m_len += hlen;
+ m->m_data -= hlen;
+
+ if (m->m_pkthdr.rcvif && m->m_pkthdr.rcvif->if_type == IFT_FAITH) {
+ /*
+ * Deliver very specific ICMP type only.
+ */
+ switch (icp->icmp_type) {
+ case ICMP_UNREACH:
+ case ICMP_TIMXCEED:
+ break;
+ default:
+ goto freeit;
+ }
+ }
+
+#ifdef ICMPPRINTFS
+ if (icmpprintfs)
+ printf("icmp_input, type %d code %d\n", icp->icmp_type,
+ icp->icmp_code);
+#endif
+
+ /*
+ * Message type specific processing.
+ */
+ if (icp->icmp_type > ICMP_MAXTYPE)
+ goto raw;
+
+ /* Initialize */
+ bzero(&icmpsrc, sizeof(icmpsrc));
+ icmpsrc.sin_len = sizeof(struct sockaddr_in);
+ icmpsrc.sin_family = AF_INET;
+ bzero(&icmpdst, sizeof(icmpdst));
+ icmpdst.sin_len = sizeof(struct sockaddr_in);
+ icmpdst.sin_family = AF_INET;
+ bzero(&icmpgw, sizeof(icmpgw));
+ icmpgw.sin_len = sizeof(struct sockaddr_in);
+ icmpgw.sin_family = AF_INET;
+
+ ICMPSTAT_INC(icps_inhist[icp->icmp_type]);
+ code = icp->icmp_code;
+ switch (icp->icmp_type) {
+
+ case ICMP_UNREACH:
+ switch (code) {
+ case ICMP_UNREACH_NET:
+ case ICMP_UNREACH_HOST:
+ case ICMP_UNREACH_SRCFAIL:
+ case ICMP_UNREACH_NET_UNKNOWN:
+ case ICMP_UNREACH_HOST_UNKNOWN:
+ case ICMP_UNREACH_ISOLATED:
+ case ICMP_UNREACH_TOSNET:
+ case ICMP_UNREACH_TOSHOST:
+ case ICMP_UNREACH_HOST_PRECEDENCE:
+ case ICMP_UNREACH_PRECEDENCE_CUTOFF:
+ code = PRC_UNREACH_NET;
+ break;
+
+ case ICMP_UNREACH_NEEDFRAG:
+ code = PRC_MSGSIZE;
+ break;
+
+ /*
+ * RFC 1122, Sections 3.2.2.1 and 4.2.3.9.
+ * Treat subcodes 2,3 as immediate RST
+ */
+ case ICMP_UNREACH_PROTOCOL:
+ case ICMP_UNREACH_PORT:
+ code = PRC_UNREACH_PORT;
+ break;
+
+ case ICMP_UNREACH_NET_PROHIB:
+ case ICMP_UNREACH_HOST_PROHIB:
+ case ICMP_UNREACH_FILTER_PROHIB:
+ code = PRC_UNREACH_ADMIN_PROHIB;
+ break;
+
+ default:
+ goto badcode;
+ }
+ goto deliver;
+
+ case ICMP_TIMXCEED:
+ if (code > 1)
+ goto badcode;
+ code += PRC_TIMXCEED_INTRANS;
+ goto deliver;
+
+ case ICMP_PARAMPROB:
+ if (code > 1)
+ goto badcode;
+ code = PRC_PARAMPROB;
+ goto deliver;
+
+ case ICMP_SOURCEQUENCH:
+ if (code)
+ goto badcode;
+ code = PRC_QUENCH;
+ deliver:
+ /*
+ * Problem with datagram; advise higher level routines.
+ */
+ if (icmplen < ICMP_ADVLENMIN || icmplen < ICMP_ADVLEN(icp) ||
+ icp->icmp_ip.ip_hl < (sizeof(struct ip) >> 2)) {
+ ICMPSTAT_INC(icps_badlen);
+ goto freeit;
+ }
+ icp->icmp_ip.ip_len = ntohs(icp->icmp_ip.ip_len);
+ /* Discard ICMP's in response to multicast packets */
+ if (IN_MULTICAST(ntohl(icp->icmp_ip.ip_dst.s_addr)))
+ goto badcode;
+#ifdef ICMPPRINTFS
+ if (icmpprintfs)
+ printf("deliver to protocol %d\n", icp->icmp_ip.ip_p);
+#endif
+ icmpsrc.sin_addr = icp->icmp_ip.ip_dst;
+ /*
+ * XXX if the packet contains [IPv4 AH TCP], we can't make a
+ * notification to TCP layer.
+ */
+ ctlfunc = inetsw[ip_protox[icp->icmp_ip.ip_p]].pr_ctlinput;
+ if (ctlfunc)
+ (*ctlfunc)(code, (struct sockaddr *)&icmpsrc,
+ (void *)&icp->icmp_ip);
+ break;
+
+ badcode:
+ ICMPSTAT_INC(icps_badcode);
+ break;
+
+ case ICMP_ECHO:
+ if (!V_icmpbmcastecho
+ && (m->m_flags & (M_MCAST | M_BCAST)) != 0) {
+ ICMPSTAT_INC(icps_bmcastecho);
+ break;
+ }
+ icp->icmp_type = ICMP_ECHOREPLY;
+ if (badport_bandlim(BANDLIM_ICMP_ECHO) < 0)
+ goto freeit;
+ else
+ goto reflect;
+
+ case ICMP_TSTAMP:
+ if (!V_icmpbmcastecho
+ && (m->m_flags & (M_MCAST | M_BCAST)) != 0) {
+ ICMPSTAT_INC(icps_bmcasttstamp);
+ break;
+ }
+ if (icmplen < ICMP_TSLEN) {
+ ICMPSTAT_INC(icps_badlen);
+ break;
+ }
+ icp->icmp_type = ICMP_TSTAMPREPLY;
+ icp->icmp_rtime = iptime();
+ icp->icmp_ttime = icp->icmp_rtime; /* bogus, do later! */
+ if (badport_bandlim(BANDLIM_ICMP_TSTAMP) < 0)
+ goto freeit;
+ else
+ goto reflect;
+
+ case ICMP_MASKREQ:
+ if (V_icmpmaskrepl == 0)
+ break;
+ /*
+ * We are not able to respond with all ones broadcast
+ * unless we receive it over a point-to-point interface.
+ */
+ if (icmplen < ICMP_MASKLEN)
+ break;
+ switch (ip->ip_dst.s_addr) {
+
+ case INADDR_BROADCAST:
+ case INADDR_ANY:
+ icmpdst.sin_addr = ip->ip_src;
+ break;
+
+ default:
+ icmpdst.sin_addr = ip->ip_dst;
+ }
+ ia = (struct in_ifaddr *)ifaof_ifpforaddr(
+ (struct sockaddr *)&icmpdst, m->m_pkthdr.rcvif);
+ if (ia == NULL)
+ break;
+ if (ia->ia_ifp == NULL) {
+ ifa_free(&ia->ia_ifa);
+ break;
+ }
+ icp->icmp_type = ICMP_MASKREPLY;
+ if (V_icmpmaskfake == 0)
+ icp->icmp_mask = ia->ia_sockmask.sin_addr.s_addr;
+ else
+ icp->icmp_mask = V_icmpmaskfake;
+ if (ip->ip_src.s_addr == 0) {
+ if (ia->ia_ifp->if_flags & IFF_BROADCAST)
+ ip->ip_src = satosin(&ia->ia_broadaddr)->sin_addr;
+ else if (ia->ia_ifp->if_flags & IFF_POINTOPOINT)
+ ip->ip_src = satosin(&ia->ia_dstaddr)->sin_addr;
+ }
+ ifa_free(&ia->ia_ifa);
+reflect:
+ ip->ip_len += hlen; /* since ip_input deducts this */
+ ICMPSTAT_INC(icps_reflect);
+ ICMPSTAT_INC(icps_outhist[icp->icmp_type]);
+ icmp_reflect(m);
+ return;
+
+ case ICMP_REDIRECT:
+ if (V_log_redirect) {
+ u_long src, dst, gw;
+
+ src = ntohl(ip->ip_src.s_addr);
+ dst = ntohl(icp->icmp_ip.ip_dst.s_addr);
+ gw = ntohl(icp->icmp_gwaddr.s_addr);
+ printf("icmp redirect from %d.%d.%d.%d: "
+ "%d.%d.%d.%d => %d.%d.%d.%d\n",
+ (int)(src >> 24), (int)((src >> 16) & 0xff),
+ (int)((src >> 8) & 0xff), (int)(src & 0xff),
+ (int)(dst >> 24), (int)((dst >> 16) & 0xff),
+ (int)((dst >> 8) & 0xff), (int)(dst & 0xff),
+ (int)(gw >> 24), (int)((gw >> 16) & 0xff),
+ (int)((gw >> 8) & 0xff), (int)(gw & 0xff));
+ }
+ /*
+ * RFC1812 says we must ignore ICMP redirects if we
+ * are acting as router.
+ */
+ if (V_drop_redirect || V_ipforwarding)
+ break;
+ if (code > 3)
+ goto badcode;
+ if (icmplen < ICMP_ADVLENMIN || icmplen < ICMP_ADVLEN(icp) ||
+ icp->icmp_ip.ip_hl < (sizeof(struct ip) >> 2)) {
+ ICMPSTAT_INC(icps_badlen);
+ break;
+ }
+ /*
+ * Short circuit routing redirects to force
+ * immediate change in the kernel's routing
+ * tables. The message is also handed to anyone
+ * listening on a raw socket (e.g. the routing
+ * daemon for use in updating its tables).
+ */
+ icmpgw.sin_addr = ip->ip_src;
+ icmpdst.sin_addr = icp->icmp_gwaddr;
+#ifdef ICMPPRINTFS
+ if (icmpprintfs) {
+ char buf[4 * sizeof "123"];
+ strcpy(buf, inet_ntoa(icp->icmp_ip.ip_dst));
+
+ printf("redirect dst %s to %s\n",
+ buf, inet_ntoa(icp->icmp_gwaddr));
+ }
+#endif
+ icmpsrc.sin_addr = icp->icmp_ip.ip_dst;
+ for ( fibnum = 0; fibnum < rt_numfibs; fibnum++) {
+ in_rtredirect((struct sockaddr *)&icmpsrc,
+ (struct sockaddr *)&icmpdst,
+ (struct sockaddr *)0, RTF_GATEWAY | RTF_HOST,
+ (struct sockaddr *)&icmpgw, fibnum);
+ }
+ pfctlinput(PRC_REDIRECT_HOST, (struct sockaddr *)&icmpsrc);
+#ifdef IPSEC
+ key_sa_routechange((struct sockaddr *)&icmpsrc);
+#endif
+ break;
+
+ /*
+ * No kernel processing for the following;
+ * just fall through to send to raw listener.
+ */
+ case ICMP_ECHOREPLY:
+ case ICMP_ROUTERADVERT:
+ case ICMP_ROUTERSOLICIT:
+ case ICMP_TSTAMPREPLY:
+ case ICMP_IREQREPLY:
+ case ICMP_MASKREPLY:
+ default:
+ break;
+ }
+
+raw:
+ rip_input(m, off);
+ return;
+
+freeit:
+ m_freem(m);
+}
+
+/*
+ * Reflect the ip packet back to the source
+ */
+static void
+icmp_reflect(struct mbuf *m)
+{
+ struct ip *ip = mtod(m, struct ip *);
+ struct ifaddr *ifa;
+ struct ifnet *ifp;
+ struct in_ifaddr *ia;
+ struct in_addr t;
+ struct mbuf *opts = 0;
+ int optlen = (ip->ip_hl << 2) - sizeof(struct ip);
+
+ if (IN_MULTICAST(ntohl(ip->ip_src.s_addr)) ||
+ IN_EXPERIMENTAL(ntohl(ip->ip_src.s_addr)) ||
+ IN_ZERONET(ntohl(ip->ip_src.s_addr)) ) {
+ m_freem(m); /* Bad return address */
+ ICMPSTAT_INC(icps_badaddr);
+ goto done; /* Ip_output() will check for broadcast */
+ }
+
+ t = ip->ip_dst;
+ ip->ip_dst = ip->ip_src;
+
+ /*
+ * Source selection for ICMP replies:
+ *
+ * If the incoming packet was addressed directly to one of our
+ * own addresses, use dst as the src for the reply.
+ */
+ IN_IFADDR_RLOCK();
+ LIST_FOREACH(ia, INADDR_HASH(t.s_addr), ia_hash) {
+ if (t.s_addr == IA_SIN(ia)->sin_addr.s_addr) {
+ t = IA_SIN(ia)->sin_addr;
+ IN_IFADDR_RUNLOCK();
+ goto match;
+ }
+ }
+ IN_IFADDR_RUNLOCK();
+
+ /*
+ * If the incoming packet was addressed to one of our broadcast
+ * addresses, use the first non-broadcast address which corresponds
+ * to the incoming interface.
+ */
+ ifp = m->m_pkthdr.rcvif;
+ if (ifp != NULL && ifp->if_flags & IFF_BROADCAST) {
+ IF_ADDR_LOCK(ifp);
+ TAILQ_FOREACH(ifa, &ifp->if_addrhead, ifa_link) {
+ if (ifa->ifa_addr->sa_family != AF_INET)
+ continue;
+ ia = ifatoia(ifa);
+ if (satosin(&ia->ia_broadaddr)->sin_addr.s_addr ==
+ t.s_addr) {
+ t = IA_SIN(ia)->sin_addr;
+ IF_ADDR_UNLOCK(ifp);
+ goto match;
+ }
+ }
+ IF_ADDR_UNLOCK(ifp);
+ }
+ /*
+ * If the packet was transiting through us, use the address of
+ * the interface the packet came through in. If that interface
+ * doesn't have a suitable IP address, the normal selection
+ * criteria apply.
+ */
+ if (V_icmp_rfi && ifp != NULL) {
+ IF_ADDR_LOCK(ifp);
+ TAILQ_FOREACH(ifa, &ifp->if_addrhead, ifa_link) {
+ if (ifa->ifa_addr->sa_family != AF_INET)
+ continue;
+ ia = ifatoia(ifa);
+ t = IA_SIN(ia)->sin_addr;
+ IF_ADDR_UNLOCK(ifp);
+ goto match;
+ }
+ IF_ADDR_UNLOCK(ifp);
+ }
+ /*
+ * If the incoming packet was not addressed directly to us, use
+ * designated interface for icmp replies specified by sysctl
+ * net.inet.icmp.reply_src (default not set). Otherwise continue
+ * with normal source selection.
+ */
+ if (V_reply_src[0] != '\0' && (ifp = ifunit(V_reply_src))) {
+ IF_ADDR_LOCK(ifp);
+ TAILQ_FOREACH(ifa, &ifp->if_addrhead, ifa_link) {
+ if (ifa->ifa_addr->sa_family != AF_INET)
+ continue;
+ ia = ifatoia(ifa);
+ t = IA_SIN(ia)->sin_addr;
+ IF_ADDR_UNLOCK(ifp);
+ goto match;
+ }
+ IF_ADDR_UNLOCK(ifp);
+ }
+ /*
+ * If the packet was transiting through us, use the address of
+ * the interface that is the closest to the packet source.
+ * When we don't have a route back to the packet source, stop here
+ * and drop the packet.
+ */
+ ia = ip_rtaddr(ip->ip_dst, M_GETFIB(m));
+ if (ia == NULL) {
+ m_freem(m);
+ ICMPSTAT_INC(icps_noroute);
+ goto done;
+ }
+ t = IA_SIN(ia)->sin_addr;
+ ifa_free(&ia->ia_ifa);
+match:
+#ifdef MAC
+ mac_netinet_icmp_replyinplace(m);
+#endif
+ ip->ip_src = t;
+ ip->ip_ttl = V_ip_defttl;
+
+ if (optlen > 0) {
+ register u_char *cp;
+ int opt, cnt;
+ u_int len;
+
+ /*
+ * Retrieve any source routing from the incoming packet;
+ * add on any record-route or timestamp options.
+ */
+ cp = (u_char *) (ip + 1);
+ if ((opts = ip_srcroute(m)) == 0 &&
+ (opts = m_gethdr(M_DONTWAIT, MT_DATA))) {
+ opts->m_len = sizeof(struct in_addr);
+ mtod(opts, struct in_addr *)->s_addr = 0;
+ }
+ if (opts) {
+#ifdef ICMPPRINTFS
+ if (icmpprintfs)
+ printf("icmp_reflect optlen %d rt %d => ",
+ optlen, opts->m_len);
+#endif
+ for (cnt = optlen; cnt > 0; cnt -= len, cp += len) {
+ opt = cp[IPOPT_OPTVAL];
+ if (opt == IPOPT_EOL)
+ break;
+ if (opt == IPOPT_NOP)
+ len = 1;
+ else {
+ if (cnt < IPOPT_OLEN + sizeof(*cp))
+ break;
+ len = cp[IPOPT_OLEN];
+ if (len < IPOPT_OLEN + sizeof(*cp) ||
+ len > cnt)
+ break;
+ }
+ /*
+ * Should check for overflow, but it "can't happen"
+ */
+ if (opt == IPOPT_RR || opt == IPOPT_TS ||
+ opt == IPOPT_SECURITY) {
+ bcopy((caddr_t)cp,
+ mtod(opts, caddr_t) + opts->m_len, len);
+ opts->m_len += len;
+ }
+ }
+ /* Terminate & pad, if necessary */
+ cnt = opts->m_len % 4;
+ if (cnt) {
+ for (; cnt < 4; cnt++) {
+ *(mtod(opts, caddr_t) + opts->m_len) =
+ IPOPT_EOL;
+ opts->m_len++;
+ }
+ }
+#ifdef ICMPPRINTFS
+ if (icmpprintfs)
+ printf("%d\n", opts->m_len);
+#endif
+ }
+ /*
+ * Now strip out original options by copying rest of first
+ * mbuf's data back, and adjust the IP length.
+ */
+ ip->ip_len -= optlen;
+ ip->ip_v = IPVERSION;
+ ip->ip_hl = 5;
+ m->m_len -= optlen;
+ if (m->m_flags & M_PKTHDR)
+ m->m_pkthdr.len -= optlen;
+ optlen += sizeof(struct ip);
+ bcopy((caddr_t)ip + optlen, (caddr_t)(ip + 1),
+ (unsigned)(m->m_len - sizeof(struct ip)));
+ }
+ m_tag_delete_nonpersistent(m);
+ m->m_flags &= ~(M_BCAST|M_MCAST);
+ icmp_send(m, opts);
+done:
+ if (opts)
+ (void)m_free(opts);
+}
+
+/*
+ * Send an icmp packet back to the ip level,
+ * after supplying a checksum.
+ */
+static void
+icmp_send(struct mbuf *m, struct mbuf *opts)
+{
+ register struct ip *ip = mtod(m, struct ip *);
+ register int hlen;
+ register struct icmp *icp;
+
+ hlen = ip->ip_hl << 2;
+ m->m_data += hlen;
+ m->m_len -= hlen;
+ icp = mtod(m, struct icmp *);
+ icp->icmp_cksum = 0;
+ icp->icmp_cksum = in_cksum(m, ip->ip_len - hlen);
+ m->m_data -= hlen;
+ m->m_len += hlen;
+ m->m_pkthdr.rcvif = (struct ifnet *)0;
+#ifdef ICMPPRINTFS
+ if (icmpprintfs) {
+ char buf[4 * sizeof "123"];
+ strcpy(buf, inet_ntoa(ip->ip_dst));
+ printf("icmp_send dst %s src %s\n",
+ buf, inet_ntoa(ip->ip_src));
+ }
+#endif
+ (void) ip_output(m, opts, NULL, 0, NULL, NULL);
+}
+
+/*
+ * Return milliseconds since 00:00 GMT in network format.
+ */
+uint32_t
+iptime(void)
+{
+ struct timeval atv;
+ u_long t;
+
+ getmicrotime(&atv);
+ t = (atv.tv_sec % (24*60*60)) * 1000 + atv.tv_usec / 1000;
+ return (htonl(t));
+}
+
+/*
+ * Return the next larger or smaller MTU plateau (table from RFC 1191)
+ * given current value MTU. If DIR is less than zero, a larger plateau
+ * is returned; otherwise, a smaller value is returned.
+ */
+int
+ip_next_mtu(int mtu, int dir)
+{
+ static int mtutab[] = {
+ 65535, 32000, 17914, 8166, 4352, 2002, 1492, 1280, 1006, 508,
+ 296, 68, 0
+ };
+ int i, size;
+
+ size = (sizeof mtutab) / (sizeof mtutab[0]);
+ if (dir >= 0) {
+ for (i = 0; i < size; i++)
+ if (mtu > mtutab[i])
+ return mtutab[i];
+ } else {
+ for (i = size - 1; i >= 0; i--)
+ if (mtu < mtutab[i])
+ return mtutab[i];
+ if (mtu == mtutab[0])
+ return mtutab[0];
+ }
+ return 0;
+}
+
+
+/*
+ * badport_bandlim() - check for ICMP bandwidth limit
+ *
+ * Return 0 if it is ok to send an ICMP error response, -1 if we have
+ * hit our bandwidth limit and it is not ok.
+ *
+ * If icmplim is <= 0, the feature is disabled and 0 is returned.
+ *
+ * For now we separate the TCP and UDP subsystems w/ different 'which'
+ * values. We may eventually remove this separation (and simplify the
+ * code further).
+ *
+ * Note that the printing of the error message is delayed so we can
+ * properly print the icmp error rate that the system was trying to do
+ * (i.e. 22000/100 pps, etc...). This can cause long delays in printing
+ * the 'final' error, but it doesn't make sense to solve the printing
+ * delay with more complex code.
+ */
+
+int
+badport_bandlim(int which)
+{
+
+#define N(a) (sizeof (a) / sizeof (a[0]))
+ static struct rate {
+ const char *type;
+ struct timeval lasttime;
+ int curpps;
+ } rates[BANDLIM_MAX+1] = {
+ { "icmp unreach response" },
+ { "icmp ping response" },
+ { "icmp tstamp response" },
+ { "closed port RST response" },
+ { "open port RST response" },
+ { "icmp6 unreach response" }
+ };
+
+ /*
+ * Return ok status if feature disabled or argument out of range.
+ */
+ if (V_icmplim > 0 && (u_int) which < N(rates)) {
+ struct rate *r = &rates[which];
+ int opps = r->curpps;
+
+ if (!ppsratecheck(&r->lasttime, &r->curpps, V_icmplim))
+ return -1; /* discard packet */
+ /*
+ * If we've dropped below the threshold after having
+ * rate-limited traffic print the message. This preserves
+ * the previous behaviour at the expense of added complexity.
+ */
+ if (V_icmplim_output && opps > V_icmplim)
+ log(LOG_NOTICE, "Limiting %s from %d to %d packets/sec\n",
+ r->type, opps, V_icmplim);
+ }
+ return 0; /* okay to send packet */
+#undef N
+}
diff --git a/rtems/freebsd/netinet/ip_icmp.h b/rtems/freebsd/netinet/ip_icmp.h
new file mode 100644
index 00000000..05579242
--- /dev/null
+++ b/rtems/freebsd/netinet/ip_icmp.h
@@ -0,0 +1,214 @@
+/*-
+ * Copyright (c) 1982, 1986, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * @(#)ip_icmp.h 8.1 (Berkeley) 6/10/93
+ * $FreeBSD$
+ */
+
+#ifndef _NETINET_IP_ICMP_HH_
+#define _NETINET_IP_ICMP_HH_
+
+/*
+ * Interface Control Message Protocol Definitions.
+ * Per RFC 792, September 1981.
+ */
+
+/*
+ * Internal of an ICMP Router Advertisement
+ */
+struct icmp_ra_addr {
+ u_int32_t ira_addr;
+ u_int32_t ira_preference;
+};
+
+/*
+ * Structure of an icmp header.
+ */
+struct icmphdr {
+ u_char icmp_type; /* type of message, see below */
+ u_char icmp_code; /* type sub code */
+ u_short icmp_cksum; /* ones complement cksum of struct */
+};
+
+/*
+ * Structure of an icmp packet.
+ *
+ * XXX: should start with a struct icmphdr.
+ */
+struct icmp {
+ u_char icmp_type; /* type of message, see below */
+ u_char icmp_code; /* type sub code */
+ u_short icmp_cksum; /* ones complement cksum of struct */
+ union {
+ u_char ih_pptr; /* ICMP_PARAMPROB */
+ struct in_addr ih_gwaddr; /* ICMP_REDIRECT */
+ struct ih_idseq {
+ uint16_t icd_id; /* network format */
+ uint16_t icd_seq; /* network format */
+ } ih_idseq;
+ int ih_void;
+
+ /* ICMP_UNREACH_NEEDFRAG -- Path MTU Discovery (RFC1191) */
+ struct ih_pmtu {
+ uint16_t ipm_void; /* network format */
+ uint16_t ipm_nextmtu; /* network format */
+ } ih_pmtu;
+
+ struct ih_rtradv {
+ u_char irt_num_addrs;
+ u_char irt_wpa;
+ u_int16_t irt_lifetime;
+ } ih_rtradv;
+ } icmp_hun;
+#define icmp_pptr icmp_hun.ih_pptr
+#define icmp_gwaddr icmp_hun.ih_gwaddr
+#define icmp_id icmp_hun.ih_idseq.icd_id
+#define icmp_seq icmp_hun.ih_idseq.icd_seq
+#define icmp_void icmp_hun.ih_void
+#define icmp_pmvoid icmp_hun.ih_pmtu.ipm_void
+#define icmp_nextmtu icmp_hun.ih_pmtu.ipm_nextmtu
+#define icmp_num_addrs icmp_hun.ih_rtradv.irt_num_addrs
+#define icmp_wpa icmp_hun.ih_rtradv.irt_wpa
+#define icmp_lifetime icmp_hun.ih_rtradv.irt_lifetime
+ union {
+ struct id_ts { /* ICMP Timestamp */
+ /*
+ * The next 3 fields are in network format,
+ * milliseconds since 00:00 GMT
+ */
+ uint32_t its_otime; /* Originate */
+ uint32_t its_rtime; /* Receive */
+ uint32_t its_ttime; /* Transmit */
+ } id_ts;
+ struct id_ip {
+ struct ip idi_ip;
+ /* options and then 64 bits of data */
+ } id_ip;
+ struct icmp_ra_addr id_radv;
+ u_int32_t id_mask;
+ char id_data[1];
+ } icmp_dun;
+#define icmp_otime icmp_dun.id_ts.its_otime
+#define icmp_rtime icmp_dun.id_ts.its_rtime
+#define icmp_ttime icmp_dun.id_ts.its_ttime
+#define icmp_ip icmp_dun.id_ip.idi_ip
+#define icmp_radv icmp_dun.id_radv
+#define icmp_mask icmp_dun.id_mask
+#define icmp_data icmp_dun.id_data
+};
+
+/*
+ * Lower bounds on packet lengths for various types.
+ * For the error advice packets must first insure that the
+ * packet is large enough to contain the returned ip header.
+ * Only then can we do the check to see if 64 bits of packet
+ * data have been returned, since we need to check the returned
+ * ip header length.
+ */
+#define ICMP_MINLEN 8 /* abs minimum */
+#define ICMP_TSLEN (8 + 3 * sizeof (uint32_t)) /* timestamp */
+#define ICMP_MASKLEN 12 /* address mask */
+#define ICMP_ADVLENMIN (8 + sizeof (struct ip) + 8) /* min */
+#define ICMP_ADVLEN(p) (8 + ((p)->icmp_ip.ip_hl << 2) + 8)
+ /* N.B.: must separately check that ip_hl >= 5 */
+
+/*
+ * Definition of type and code field values.
+ */
+#define ICMP_ECHOREPLY 0 /* echo reply */
+#define ICMP_UNREACH 3 /* dest unreachable, codes: */
+#define ICMP_UNREACH_NET 0 /* bad net */
+#define ICMP_UNREACH_HOST 1 /* bad host */
+#define ICMP_UNREACH_PROTOCOL 2 /* bad protocol */
+#define ICMP_UNREACH_PORT 3 /* bad port */
+#define ICMP_UNREACH_NEEDFRAG 4 /* IP_DF caused drop */
+#define ICMP_UNREACH_SRCFAIL 5 /* src route failed */
+#define ICMP_UNREACH_NET_UNKNOWN 6 /* unknown net */
+#define ICMP_UNREACH_HOST_UNKNOWN 7 /* unknown host */
+#define ICMP_UNREACH_ISOLATED 8 /* src host isolated */
+#define ICMP_UNREACH_NET_PROHIB 9 /* prohibited access */
+#define ICMP_UNREACH_HOST_PROHIB 10 /* ditto */
+#define ICMP_UNREACH_TOSNET 11 /* bad tos for net */
+#define ICMP_UNREACH_TOSHOST 12 /* bad tos for host */
+#define ICMP_UNREACH_FILTER_PROHIB 13 /* admin prohib */
+#define ICMP_UNREACH_HOST_PRECEDENCE 14 /* host prec vio. */
+#define ICMP_UNREACH_PRECEDENCE_CUTOFF 15 /* prec cutoff */
+#define ICMP_SOURCEQUENCH 4 /* packet lost, slow down */
+#define ICMP_REDIRECT 5 /* shorter route, codes: */
+#define ICMP_REDIRECT_NET 0 /* for network */
+#define ICMP_REDIRECT_HOST 1 /* for host */
+#define ICMP_REDIRECT_TOSNET 2 /* for tos and net */
+#define ICMP_REDIRECT_TOSHOST 3 /* for tos and host */
+#define ICMP_ALTHOSTADDR 6 /* alternate host address */
+#define ICMP_ECHO 8 /* echo service */
+#define ICMP_ROUTERADVERT 9 /* router advertisement */
+#define ICMP_ROUTERADVERT_NORMAL 0 /* normal advertisement */
+#define ICMP_ROUTERADVERT_NOROUTE_COMMON 16 /* selective routing */
+#define ICMP_ROUTERSOLICIT 10 /* router solicitation */
+#define ICMP_TIMXCEED 11 /* time exceeded, code: */
+#define ICMP_TIMXCEED_INTRANS 0 /* ttl==0 in transit */
+#define ICMP_TIMXCEED_REASS 1 /* ttl==0 in reass */
+#define ICMP_PARAMPROB 12 /* ip header bad */
+#define ICMP_PARAMPROB_ERRATPTR 0 /* error at param ptr */
+#define ICMP_PARAMPROB_OPTABSENT 1 /* req. opt. absent */
+#define ICMP_PARAMPROB_LENGTH 2 /* bad length */
+#define ICMP_TSTAMP 13 /* timestamp request */
+#define ICMP_TSTAMPREPLY 14 /* timestamp reply */
+#define ICMP_IREQ 15 /* information request */
+#define ICMP_IREQREPLY 16 /* information reply */
+#define ICMP_MASKREQ 17 /* address mask request */
+#define ICMP_MASKREPLY 18 /* address mask reply */
+#define ICMP_TRACEROUTE 30 /* traceroute */
+#define ICMP_DATACONVERR 31 /* data conversion error */
+#define ICMP_MOBILE_REDIRECT 32 /* mobile host redirect */
+#define ICMP_IPV6_WHEREAREYOU 33 /* IPv6 where-are-you */
+#define ICMP_IPV6_IAMHERE 34 /* IPv6 i-am-here */
+#define ICMP_MOBILE_REGREQUEST 35 /* mobile registration req */
+#define ICMP_MOBILE_REGREPLY 36 /* mobile registration reply */
+#define ICMP_SKIP 39 /* SKIP */
+#define ICMP_PHOTURIS 40 /* Photuris */
+#define ICMP_PHOTURIS_UNKNOWN_INDEX 1 /* unknown sec index */
+#define ICMP_PHOTURIS_AUTH_FAILED 2 /* auth failed */
+#define ICMP_PHOTURIS_DECRYPT_FAILED 3 /* decrypt failed */
+
+#define ICMP_MAXTYPE 40
+
+#define ICMP_INFOTYPE(type) \
+ ((type) == ICMP_ECHOREPLY || (type) == ICMP_ECHO || \
+ (type) == ICMP_ROUTERADVERT || (type) == ICMP_ROUTERSOLICIT || \
+ (type) == ICMP_TSTAMP || (type) == ICMP_TSTAMPREPLY || \
+ (type) == ICMP_IREQ || (type) == ICMP_IREQREPLY || \
+ (type) == ICMP_MASKREQ || (type) == ICMP_MASKREPLY)
+
+#ifdef _KERNEL
+void icmp_error(struct mbuf *, int, int, uint32_t, int);
+void icmp_input(struct mbuf *, int);
+int ip_next_mtu(int, int);
+#endif
+
+#endif
diff --git a/rtems/freebsd/netinet/ip_id.c b/rtems/freebsd/netinet/ip_id.c
new file mode 100644
index 00000000..9a52f2f3
--- /dev/null
+++ b/rtems/freebsd/netinet/ip_id.c
@@ -0,0 +1,211 @@
+#include <rtems/freebsd/machine/rtems-bsd-config.h>
+
+
+/*-
+ * Copyright (c) 2008 Michael J. Silbersack.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice unmodified, this list of conditions, and the following
+ * disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <rtems/freebsd/sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+/*
+ * IP ID generation is a fascinating topic.
+ *
+ * In order to avoid ID collisions during packet reassembly, common sense
+ * dictates that the period between reuse of IDs be as large as possible.
+ * This leads to the classic implementation of a system-wide counter, thereby
+ * ensuring that IDs repeat only once every 2^16 packets.
+ *
+ * Subsequent security researchers have pointed out that using a global
+ * counter makes ID values predictable. This predictability allows traffic
+ * analysis, idle scanning, and even packet injection in specific cases.
+ * These results suggest that IP IDs should be as random as possible.
+ *
+ * The "searchable queues" algorithm used in this IP ID implementation was
+ * proposed by Amit Klein. It is a compromise between the above two
+ * viewpoints that has provable behavior that can be tuned to the user's
+ * requirements.
+ *
+ * The basic concept is that we supplement a standard random number generator
+ * with a queue of the last L IDs that we have handed out to ensure that all
+ * IDs have a period of at least L.
+ *
+ * To efficiently implement this idea, we keep two data structures: a
+ * circular array of IDs of size L and a bitstring of 65536 bits.
+ *
+ * To start, we ask the RNG for a new ID. A quick index into the bitstring
+ * is used to determine if this is a recently used value. The process is
+ * repeated until a value is returned that is not in the bitstring.
+ *
+ * Having found a usable ID, we remove the ID stored at the current position
+ * in the queue from the bitstring and replace it with our new ID. Our new
+ * ID is then added to the bitstring and the queue pointer is incremented.
+ *
+ * The lower limit of 512 was chosen because there doesn't seem to be much
+ * point to having a smaller value. The upper limit of 32768 was chosen for
+ * two reasons. First, every step above 32768 decreases the entropy. Taken
+ * to an extreme, 65533 would offer 1 bit of entropy. Second, the number of
+ * attempts it takes the algorithm to find an unused ID drastically
+ * increases, killing performance. The default value of 8192 was chosen
+ * because it provides a good tradeoff between randomness and non-repetition.
+ *
+ * With L=8192, the queue will use 16K of memory. The bitstring always
+ * uses 8K of memory. No memory is allocated until the use of random ids is
+ * enabled.
+ */
+
+#include <rtems/freebsd/sys/types.h>
+#include <rtems/freebsd/sys/malloc.h>
+#include <rtems/freebsd/sys/param.h>
+#include <rtems/freebsd/sys/time.h>
+#include <rtems/freebsd/sys/kernel.h>
+#include <rtems/freebsd/sys/libkern.h>
+#include <rtems/freebsd/sys/lock.h>
+#include <rtems/freebsd/sys/mutex.h>
+#include <rtems/freebsd/sys/random.h>
+#include <rtems/freebsd/sys/systm.h>
+#include <rtems/freebsd/sys/sysctl.h>
+#include <rtems/freebsd/netinet/in.h>
+#include <rtems/freebsd/netinet/ip_var.h>
+#include <rtems/freebsd/sys/bitstring.h>
+
+static MALLOC_DEFINE(M_IPID, "ipid", "randomized ip id state");
+
+static u_int16_t *id_array = NULL;
+static bitstr_t *id_bits = NULL;
+static int array_ptr = 0;
+static int array_size = 8192;
+static int random_id_collisions = 0;
+static int random_id_total = 0;
+static struct mtx ip_id_mtx;
+
+static void ip_initid(void);
+static int sysctl_ip_id_change(SYSCTL_HANDLER_ARGS);
+
+MTX_SYSINIT(ip_id_mtx, &ip_id_mtx, "ip_id_mtx", MTX_DEF);
+
+SYSCTL_DECL(_net_inet_ip);
+SYSCTL_PROC(_net_inet_ip, OID_AUTO, random_id_period, CTLTYPE_INT|CTLFLAG_RW,
+ &array_size, 0, sysctl_ip_id_change, "IU", "IP ID Array size");
+SYSCTL_INT(_net_inet_ip, OID_AUTO, random_id_collisions, CTLFLAG_RD,
+ &random_id_collisions, 0, "Count of IP ID collisions");
+SYSCTL_INT(_net_inet_ip, OID_AUTO, random_id_total, CTLFLAG_RD,
+ &random_id_total, 0, "Count of IP IDs created");
+
+static int
+sysctl_ip_id_change(SYSCTL_HANDLER_ARGS)
+{
+ int error, new;
+
+ new = array_size;
+ error = sysctl_handle_int(oidp, &new, 0, req);
+ if (error == 0 && req->newptr) {
+ if (new >= 512 && new <= 32768) {
+ mtx_lock(&ip_id_mtx);
+ array_size = new;
+ ip_initid();
+ mtx_unlock(&ip_id_mtx);
+ } else
+ error = EINVAL;
+ }
+ return (error);
+}
+
+/*
+ * ip_initid() runs with a mutex held and may execute in a network context.
+ * As a result, it uses M_NOWAIT. Ideally, we would always do this
+ * allocation from the sysctl contact and have it be an invariant that if
+ * this random ID allocation mode is selected, the buffers are present. This
+ * would also avoid potential network context failures of IP ID generation.
+ */
+static void
+ip_initid(void)
+{
+
+ mtx_assert(&ip_id_mtx, MA_OWNED);
+
+ if (id_array != NULL) {
+ free(id_array, M_IPID);
+ free(id_bits, M_IPID);
+ }
+ random_id_collisions = 0;
+ random_id_total = 0;
+ array_ptr = 0;
+ id_array = (u_int16_t *) malloc(array_size * sizeof(u_int16_t),
+ M_IPID, M_NOWAIT | M_ZERO);
+ id_bits = (bitstr_t *) malloc(bitstr_size(65536), M_IPID,
+ M_NOWAIT | M_ZERO);
+ if (id_array == NULL || id_bits == NULL) {
+ /* Neither or both. */
+ if (id_array != NULL) {
+ free(id_array, M_IPID);
+ id_array = NULL;
+ }
+ if (id_bits != NULL) {
+ free(id_bits, M_IPID);
+ id_bits = NULL;
+ }
+ }
+}
+
+u_int16_t
+ip_randomid(void)
+{
+ u_int16_t new_id;
+
+ mtx_lock(&ip_id_mtx);
+ if (id_array == NULL)
+ ip_initid();
+
+ /*
+ * Fail gracefully; return a fixed id if memory allocation failed;
+ * ideally we wouldn't do allocation in this context in order to
+ * avoid the possibility of this failure mode.
+ */
+ if (id_array == NULL) {
+ mtx_unlock(&ip_id_mtx);
+ return (1);
+ }
+
+ /*
+ * To avoid a conflict with the zeros that the array is initially
+ * filled with, we never hand out an id of zero.
+ */
+ new_id = 0;
+ do {
+ if (new_id != 0)
+ random_id_collisions++;
+ arc4rand(&new_id, sizeof(new_id), 0);
+ } while (bit_test(id_bits, new_id) || new_id == 0);
+ bit_clear(id_bits, id_array[array_ptr]);
+ bit_set(id_bits, new_id);
+ id_array[array_ptr] = new_id;
+ array_ptr++;
+ if (array_ptr == array_size)
+ array_ptr = 0;
+ random_id_total++;
+ mtx_unlock(&ip_id_mtx);
+ return (new_id);
+}
diff --git a/rtems/freebsd/netinet/ip_input.c b/rtems/freebsd/netinet/ip_input.c
new file mode 100644
index 00000000..b3f347a0
--- /dev/null
+++ b/rtems/freebsd/netinet/ip_input.c
@@ -0,0 +1,1794 @@
+#include <rtems/freebsd/machine/rtems-bsd-config.h>
+
+/*-
+ * Copyright (c) 1982, 1986, 1988, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * @(#)ip_input.c 8.2 (Berkeley) 1/4/94
+ */
+
+#include <rtems/freebsd/sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <rtems/freebsd/local/opt_bootp.h>
+#include <rtems/freebsd/local/opt_ipfw.h>
+#include <rtems/freebsd/local/opt_ipstealth.h>
+#include <rtems/freebsd/local/opt_ipsec.h>
+#include <rtems/freebsd/local/opt_route.h>
+
+#include <rtems/freebsd/sys/param.h>
+#include <rtems/freebsd/sys/systm.h>
+#include <rtems/freebsd/sys/callout.h>
+#include <rtems/freebsd/sys/mbuf.h>
+#include <rtems/freebsd/sys/malloc.h>
+#include <rtems/freebsd/sys/domain.h>
+#include <rtems/freebsd/sys/protosw.h>
+#include <rtems/freebsd/sys/socket.h>
+#include <rtems/freebsd/sys/time.h>
+#include <rtems/freebsd/sys/kernel.h>
+#include <rtems/freebsd/sys/lock.h>
+#include <rtems/freebsd/sys/rwlock.h>
+#include <rtems/freebsd/sys/syslog.h>
+#include <rtems/freebsd/sys/sysctl.h>
+
+#include <rtems/freebsd/net/pfil.h>
+#include <rtems/freebsd/net/if.h>
+#include <rtems/freebsd/net/if_types.h>
+#include <rtems/freebsd/net/if_var.h>
+#include <rtems/freebsd/net/if_dl.h>
+#include <rtems/freebsd/net/route.h>
+#include <rtems/freebsd/net/netisr.h>
+#include <rtems/freebsd/net/vnet.h>
+#include <rtems/freebsd/net/flowtable.h>
+
+#include <rtems/freebsd/netinet/in.h>
+#include <rtems/freebsd/netinet/in_systm.h>
+#include <rtems/freebsd/netinet/in_var.h>
+#include <rtems/freebsd/netinet/ip.h>
+#include <rtems/freebsd/netinet/in_pcb.h>
+#include <rtems/freebsd/netinet/ip_var.h>
+#include <rtems/freebsd/netinet/ip_fw.h>
+#include <rtems/freebsd/netinet/ip_icmp.h>
+#include <rtems/freebsd/netinet/ip_options.h>
+#include <rtems/freebsd/machine/in_cksum.h>
+#include <rtems/freebsd/netinet/ip_carp.h>
+#ifdef IPSEC
+#include <rtems/freebsd/netinet/ip_ipsec.h>
+#endif /* IPSEC */
+
+#include <rtems/freebsd/sys/socketvar.h>
+
+#include <rtems/freebsd/security/mac/mac_framework.h>
+
+#ifdef CTASSERT
+CTASSERT(sizeof(struct ip) == 20);
+#endif
+
+struct rwlock in_ifaddr_lock;
+RW_SYSINIT(in_ifaddr_lock, &in_ifaddr_lock, "in_ifaddr_lock");
+
+VNET_DEFINE(int, rsvp_on);
+
+VNET_DEFINE(int, ipforwarding);
+SYSCTL_VNET_INT(_net_inet_ip, IPCTL_FORWARDING, forwarding, CTLFLAG_RW,
+ &VNET_NAME(ipforwarding), 0,
+ "Enable IP forwarding between interfaces");
+
+static VNET_DEFINE(int, ipsendredirects) = 1; /* XXX */
+#define V_ipsendredirects VNET(ipsendredirects)
+SYSCTL_VNET_INT(_net_inet_ip, IPCTL_SENDREDIRECTS, redirect, CTLFLAG_RW,
+ &VNET_NAME(ipsendredirects), 0,
+ "Enable sending IP redirects");
+
+VNET_DEFINE(int, ip_defttl) = IPDEFTTL;
+SYSCTL_VNET_INT(_net_inet_ip, IPCTL_DEFTTL, ttl, CTLFLAG_RW,
+ &VNET_NAME(ip_defttl), 0,
+ "Maximum TTL on IP packets");
+
+static VNET_DEFINE(int, ip_keepfaith);
+#define V_ip_keepfaith VNET(ip_keepfaith)
+SYSCTL_VNET_INT(_net_inet_ip, IPCTL_KEEPFAITH, keepfaith, CTLFLAG_RW,
+ &VNET_NAME(ip_keepfaith), 0,
+ "Enable packet capture for FAITH IPv4->IPv6 translater daemon");
+
+static VNET_DEFINE(int, ip_sendsourcequench);
+#define V_ip_sendsourcequench VNET(ip_sendsourcequench)
+SYSCTL_VNET_INT(_net_inet_ip, OID_AUTO, sendsourcequench, CTLFLAG_RW,
+ &VNET_NAME(ip_sendsourcequench), 0,
+ "Enable the transmission of source quench packets");
+
+VNET_DEFINE(int, ip_do_randomid);
+SYSCTL_VNET_INT(_net_inet_ip, OID_AUTO, random_id, CTLFLAG_RW,
+ &VNET_NAME(ip_do_randomid), 0,
+ "Assign random ip_id values");
+
+/*
+ * XXX - Setting ip_checkinterface mostly implements the receive side of
+ * the Strong ES model described in RFC 1122, but since the routing table
+ * and transmit implementation do not implement the Strong ES model,
+ * setting this to 1 results in an odd hybrid.
+ *
+ * XXX - ip_checkinterface currently must be disabled if you use ipnat
+ * to translate the destination address to another local interface.
+ *
+ * XXX - ip_checkinterface must be disabled if you add IP aliases
+ * to the loopback interface instead of the interface where the
+ * packets for those addresses are received.
+ */
+static VNET_DEFINE(int, ip_checkinterface);
+#define V_ip_checkinterface VNET(ip_checkinterface)
+SYSCTL_VNET_INT(_net_inet_ip, OID_AUTO, check_interface, CTLFLAG_RW,
+ &VNET_NAME(ip_checkinterface), 0,
+ "Verify packet arrives on correct interface");
+
+VNET_DEFINE(struct pfil_head, inet_pfil_hook); /* Packet filter hooks */
+
+static struct netisr_handler ip_nh = {
+ .nh_name = "ip",
+ .nh_handler = ip_input,
+ .nh_proto = NETISR_IP,
+ .nh_policy = NETISR_POLICY_FLOW,
+};
+
+extern struct domain inetdomain;
+extern struct protosw inetsw[];
+u_char ip_protox[IPPROTO_MAX];
+VNET_DEFINE(struct in_ifaddrhead, in_ifaddrhead); /* first inet address */
+VNET_DEFINE(struct in_ifaddrhashhead *, in_ifaddrhashtbl); /* inet addr hash table */
+VNET_DEFINE(u_long, in_ifaddrhmask); /* mask for hash table */
+
+VNET_DEFINE(struct ipstat, ipstat);
+SYSCTL_VNET_STRUCT(_net_inet_ip, IPCTL_STATS, stats, CTLFLAG_RW,
+ &VNET_NAME(ipstat), ipstat,
+ "IP statistics (struct ipstat, netinet/ip_var.h)");
+
+static VNET_DEFINE(uma_zone_t, ipq_zone);
+static VNET_DEFINE(TAILQ_HEAD(ipqhead, ipq), ipq[IPREASS_NHASH]);
+static struct mtx ipqlock;
+
+#define V_ipq_zone VNET(ipq_zone)
+#define V_ipq VNET(ipq)
+
+#define IPQ_LOCK() mtx_lock(&ipqlock)
+#define IPQ_UNLOCK() mtx_unlock(&ipqlock)
+#define IPQ_LOCK_INIT() mtx_init(&ipqlock, "ipqlock", NULL, MTX_DEF)
+#define IPQ_LOCK_ASSERT() mtx_assert(&ipqlock, MA_OWNED)
+
+static void maxnipq_update(void);
+static void ipq_zone_change(void *);
+static void ip_drain_locked(void);
+
+static VNET_DEFINE(int, maxnipq); /* Administrative limit on # reass queues. */
+static VNET_DEFINE(int, nipq); /* Total # of reass queues */
+#define V_maxnipq VNET(maxnipq)
+#define V_nipq VNET(nipq)
+SYSCTL_VNET_INT(_net_inet_ip, OID_AUTO, fragpackets, CTLFLAG_RD,
+ &VNET_NAME(nipq), 0,
+ "Current number of IPv4 fragment reassembly queue entries");
+
+static VNET_DEFINE(int, maxfragsperpacket);
+#define V_maxfragsperpacket VNET(maxfragsperpacket)
+SYSCTL_VNET_INT(_net_inet_ip, OID_AUTO, maxfragsperpacket, CTLFLAG_RW,
+ &VNET_NAME(maxfragsperpacket), 0,
+ "Maximum number of IPv4 fragments allowed per packet");
+
+struct callout ipport_tick_callout;
+
+#ifdef IPCTL_DEFMTU
+SYSCTL_INT(_net_inet_ip, IPCTL_DEFMTU, mtu, CTLFLAG_RW,
+ &ip_mtu, 0, "Default MTU");
+#endif
+
+#ifdef IPSTEALTH
+VNET_DEFINE(int, ipstealth);
+SYSCTL_VNET_INT(_net_inet_ip, OID_AUTO, stealth, CTLFLAG_RW,
+ &VNET_NAME(ipstealth), 0,
+ "IP stealth mode, no TTL decrementation on forwarding");
+#endif
+
+#ifdef FLOWTABLE
+static VNET_DEFINE(int, ip_output_flowtable_size) = 2048;
+VNET_DEFINE(struct flowtable *, ip_ft);
+#define V_ip_output_flowtable_size VNET(ip_output_flowtable_size)
+
+SYSCTL_VNET_INT(_net_inet_ip, OID_AUTO, output_flowtable_size, CTLFLAG_RDTUN,
+ &VNET_NAME(ip_output_flowtable_size), 2048,
+ "number of entries in the per-cpu output flow caches");
+#endif
+
+VNET_DEFINE(int, fw_one_pass) = 1;
+
+static void ip_freef(struct ipqhead *, struct ipq *);
+
+/*
+ * Kernel module interface for updating ipstat. The argument is an index
+ * into ipstat treated as an array of u_long. While this encodes the general
+ * layout of ipstat into the caller, it doesn't encode its location, so that
+ * future changes to add, for example, per-CPU stats support won't cause
+ * binary compatibility problems for kernel modules.
+ */
+void
+kmod_ipstat_inc(int statnum)
+{
+
+ (*((u_long *)&V_ipstat + statnum))++;
+}
+
+void
+kmod_ipstat_dec(int statnum)
+{
+
+ (*((u_long *)&V_ipstat + statnum))--;
+}
+
+static int
+sysctl_netinet_intr_queue_maxlen(SYSCTL_HANDLER_ARGS)
+{
+ int error, qlimit;
+
+ netisr_getqlimit(&ip_nh, &qlimit);
+ error = sysctl_handle_int(oidp, &qlimit, 0, req);
+ if (error || !req->newptr)
+ return (error);
+ if (qlimit < 1)
+ return (EINVAL);
+ return (netisr_setqlimit(&ip_nh, qlimit));
+}
+SYSCTL_PROC(_net_inet_ip, IPCTL_INTRQMAXLEN, intr_queue_maxlen,
+ CTLTYPE_INT|CTLFLAG_RW, 0, 0, sysctl_netinet_intr_queue_maxlen, "I",
+ "Maximum size of the IP input queue");
+
+static int
+sysctl_netinet_intr_queue_drops(SYSCTL_HANDLER_ARGS)
+{
+ u_int64_t qdrops_long;
+ int error, qdrops;
+
+ netisr_getqdrops(&ip_nh, &qdrops_long);
+ qdrops = qdrops_long;
+ error = sysctl_handle_int(oidp, &qdrops, 0, req);
+ if (error || !req->newptr)
+ return (error);
+ if (qdrops != 0)
+ return (EINVAL);
+ netisr_clearqdrops(&ip_nh);
+ return (0);
+}
+
+SYSCTL_PROC(_net_inet_ip, IPCTL_INTRQDROPS, intr_queue_drops,
+ CTLTYPE_INT|CTLFLAG_RD, 0, 0, sysctl_netinet_intr_queue_drops, "I",
+ "Number of packets dropped from the IP input queue");
+
+/*
+ * IP initialization: fill in IP protocol switch table.
+ * All protocols not implemented in kernel go to raw IP protocol handler.
+ */
+void
+ip_init(void)
+{
+ struct protosw *pr;
+ int i;
+
+ V_ip_id = time_second & 0xffff;
+
+ TAILQ_INIT(&V_in_ifaddrhead);
+ V_in_ifaddrhashtbl = hashinit(INADDR_NHASH, M_IFADDR, &V_in_ifaddrhmask);
+
+ /* Initialize IP reassembly queue. */
+ for (i = 0; i < IPREASS_NHASH; i++)
+ TAILQ_INIT(&V_ipq[i]);
+ V_maxnipq = nmbclusters / 32;
+ V_maxfragsperpacket = 16;
+ V_ipq_zone = uma_zcreate("ipq", sizeof(struct ipq), NULL, NULL, NULL,
+ NULL, UMA_ALIGN_PTR, 0);
+ maxnipq_update();
+
+ /* Initialize packet filter hooks. */
+ V_inet_pfil_hook.ph_type = PFIL_TYPE_AF;
+ V_inet_pfil_hook.ph_af = AF_INET;
+ if ((i = pfil_head_register(&V_inet_pfil_hook)) != 0)
+ printf("%s: WARNING: unable to register pfil hook, "
+ "error %d\n", __func__, i);
+
+#ifdef FLOWTABLE
+ if (TUNABLE_INT_FETCH("net.inet.ip.output_flowtable_size",
+ &V_ip_output_flowtable_size)) {
+ if (V_ip_output_flowtable_size < 256)
+ V_ip_output_flowtable_size = 256;
+ if (!powerof2(V_ip_output_flowtable_size)) {
+ printf("flowtable must be power of 2 size\n");
+ V_ip_output_flowtable_size = 2048;
+ }
+ } else {
+ /*
+ * round up to the next power of 2
+ */
+ V_ip_output_flowtable_size = 1 << fls((1024 + maxusers * 64)-1);
+ }
+ V_ip_ft = flowtable_alloc("ipv4", V_ip_output_flowtable_size, FL_PCPU);
+#endif
+
+ /* Skip initialization of globals for non-default instances. */
+ if (!IS_DEFAULT_VNET(curvnet))
+ return;
+
+ pr = pffindproto(PF_INET, IPPROTO_RAW, SOCK_RAW);
+ if (pr == NULL)
+ panic("ip_init: PF_INET not found");
+
+ /* Initialize the entire ip_protox[] array to IPPROTO_RAW. */
+ for (i = 0; i < IPPROTO_MAX; i++)
+ ip_protox[i] = pr - inetsw;
+ /*
+ * Cycle through IP protocols and put them into the appropriate place
+ * in ip_protox[].
+ */
+ for (pr = inetdomain.dom_protosw;
+ pr < inetdomain.dom_protoswNPROTOSW; pr++)
+ if (pr->pr_domain->dom_family == PF_INET &&
+ pr->pr_protocol && pr->pr_protocol != IPPROTO_RAW) {
+ /* Be careful to only index valid IP protocols. */
+ if (pr->pr_protocol < IPPROTO_MAX)
+ ip_protox[pr->pr_protocol] = pr - inetsw;
+ }
+
+ /* Start ipport_tick. */
+ callout_init(&ipport_tick_callout, CALLOUT_MPSAFE);
+ callout_reset(&ipport_tick_callout, 1, ipport_tick, NULL);
+ EVENTHANDLER_REGISTER(shutdown_pre_sync, ip_fini, NULL,
+ SHUTDOWN_PRI_DEFAULT);
+ EVENTHANDLER_REGISTER(nmbclusters_change, ipq_zone_change,
+ NULL, EVENTHANDLER_PRI_ANY);
+
+ /* Initialize various other remaining things. */
+ IPQ_LOCK_INIT();
+ netisr_register(&ip_nh);
+}
+
+#ifdef VIMAGE
+void
+ip_destroy(void)
+{
+
+ /* Cleanup in_ifaddr hash table; should be empty. */
+ hashdestroy(V_in_ifaddrhashtbl, M_IFADDR, V_in_ifaddrhmask);
+
+ IPQ_LOCK();
+ ip_drain_locked();
+ IPQ_UNLOCK();
+
+ uma_zdestroy(V_ipq_zone);
+}
+#endif
+
+void
+ip_fini(void *xtp)
+{
+
+ callout_stop(&ipport_tick_callout);
+}
+
+/*
+ * Ip input routine. Checksum and byte swap header. If fragmented
+ * try to reassemble. Process options. Pass to next level.
+ */
+void
+ip_input(struct mbuf *m)
+{
+ struct ip *ip = NULL;
+ struct in_ifaddr *ia = NULL;
+ struct ifaddr *ifa;
+ struct ifnet *ifp;
+ int checkif, hlen = 0;
+ u_short sum;
+ int dchg = 0; /* dest changed after fw */
+ struct in_addr odst; /* original dst address */
+
+ M_ASSERTPKTHDR(m);
+
+ if (m->m_flags & M_FASTFWD_OURS) {
+ /*
+ * Firewall or NAT changed destination to local.
+ * We expect ip_len and ip_off to be in host byte order.
+ */
+ m->m_flags &= ~M_FASTFWD_OURS;
+ /* Set up some basics that will be used later. */
+ ip = mtod(m, struct ip *);
+ hlen = ip->ip_hl << 2;
+ goto ours;
+ }
+
+ IPSTAT_INC(ips_total);
+
+ if (m->m_pkthdr.len < sizeof(struct ip))
+ goto tooshort;
+
+ if (m->m_len < sizeof (struct ip) &&
+ (m = m_pullup(m, sizeof (struct ip))) == NULL) {
+ IPSTAT_INC(ips_toosmall);
+ return;
+ }
+ ip = mtod(m, struct ip *);
+
+ if (ip->ip_v != IPVERSION) {
+ IPSTAT_INC(ips_badvers);
+ goto bad;
+ }
+
+ hlen = ip->ip_hl << 2;
+ if (hlen < sizeof(struct ip)) { /* minimum header length */
+ IPSTAT_INC(ips_badhlen);
+ goto bad;
+ }
+ if (hlen > m->m_len) {
+ if ((m = m_pullup(m, hlen)) == NULL) {
+ IPSTAT_INC(ips_badhlen);
+ return;
+ }
+ ip = mtod(m, struct ip *);
+ }
+
+ /* 127/8 must not appear on wire - RFC1122 */
+ ifp = m->m_pkthdr.rcvif;
+ if ((ntohl(ip->ip_dst.s_addr) >> IN_CLASSA_NSHIFT) == IN_LOOPBACKNET ||
+ (ntohl(ip->ip_src.s_addr) >> IN_CLASSA_NSHIFT) == IN_LOOPBACKNET) {
+ if ((ifp->if_flags & IFF_LOOPBACK) == 0) {
+ IPSTAT_INC(ips_badaddr);
+ goto bad;
+ }
+ }
+
+ if (m->m_pkthdr.csum_flags & CSUM_IP_CHECKED) {
+ sum = !(m->m_pkthdr.csum_flags & CSUM_IP_VALID);
+ } else {
+ if (hlen == sizeof(struct ip)) {
+ sum = in_cksum_hdr(ip);
+ } else {
+ sum = in_cksum(m, hlen);
+ }
+ }
+ if (sum) {
+ IPSTAT_INC(ips_badsum);
+ goto bad;
+ }
+
+#ifdef ALTQ
+ if (altq_input != NULL && (*altq_input)(m, AF_INET) == 0)
+ /* packet is dropped by traffic conditioner */
+ return;
+#endif
+
+ /*
+ * Convert fields to host representation.
+ */
+ ip->ip_len = ntohs(ip->ip_len);
+ if (ip->ip_len < hlen) {
+ IPSTAT_INC(ips_badlen);
+ goto bad;
+ }
+ ip->ip_off = ntohs(ip->ip_off);
+
+ /*
+ * Check that the amount of data in the buffers
+ * is as at least much as the IP header would have us expect.
+ * Trim mbufs if longer than we expect.
+ * Drop packet if shorter than we expect.
+ */
+ if (m->m_pkthdr.len < ip->ip_len) {
+tooshort:
+ IPSTAT_INC(ips_tooshort);
+ goto bad;
+ }
+ if (m->m_pkthdr.len > ip->ip_len) {
+ if (m->m_len == m->m_pkthdr.len) {
+ m->m_len = ip->ip_len;
+ m->m_pkthdr.len = ip->ip_len;
+ } else
+ m_adj(m, ip->ip_len - m->m_pkthdr.len);
+ }
+#ifdef IPSEC
+ /*
+ * Bypass packet filtering for packets from a tunnel (gif).
+ */
+ if (ip_ipsec_filtertunnel(m))
+ goto passin;
+#endif /* IPSEC */
+
+ /*
+ * Run through list of hooks for input packets.
+ *
+ * NB: Beware of the destination address changing (e.g.
+ * by NAT rewriting). When this happens, tell
+ * ip_forward to do the right thing.
+ */
+
+ /* Jump over all PFIL processing if hooks are not active. */
+ if (!PFIL_HOOKED(&V_inet_pfil_hook))
+ goto passin;
+
+ odst = ip->ip_dst;
+ if (pfil_run_hooks(&V_inet_pfil_hook, &m, ifp, PFIL_IN, NULL) != 0)
+ return;
+ if (m == NULL) /* consumed by filter */
+ return;
+
+ ip = mtod(m, struct ip *);
+ dchg = (odst.s_addr != ip->ip_dst.s_addr);
+ ifp = m->m_pkthdr.rcvif;
+
+#ifdef IPFIREWALL_FORWARD
+ if (m->m_flags & M_FASTFWD_OURS) {
+ m->m_flags &= ~M_FASTFWD_OURS;
+ goto ours;
+ }
+ if ((dchg = (m_tag_find(m, PACKET_TAG_IPFORWARD, NULL) != NULL)) != 0) {
+ /*
+ * Directly ship the packet on. This allows forwarding
+ * packets originally destined to us to some other directly
+ * connected host.
+ */
+ ip_forward(m, dchg);
+ return;
+ }
+#endif /* IPFIREWALL_FORWARD */
+
+passin:
+ /*
+ * Process options and, if not destined for us,
+ * ship it on. ip_dooptions returns 1 when an
+ * error was detected (causing an icmp message
+ * to be sent and the original packet to be freed).
+ */
+ if (hlen > sizeof (struct ip) && ip_dooptions(m, 0))
+ return;
+
+ /* greedy RSVP, snatches any PATH packet of the RSVP protocol and no
+ * matter if it is destined to another node, or whether it is
+ * a multicast one, RSVP wants it! and prevents it from being forwarded
+ * anywhere else. Also checks if the rsvp daemon is running before
+ * grabbing the packet.
+ */
+ if (V_rsvp_on && ip->ip_p==IPPROTO_RSVP)
+ goto ours;
+
+ /*
+ * Check our list of addresses, to see if the packet is for us.
+ * If we don't have any addresses, assume any unicast packet
+ * we receive might be for us (and let the upper layers deal
+ * with it).
+ */
+ if (TAILQ_EMPTY(&V_in_ifaddrhead) &&
+ (m->m_flags & (M_MCAST|M_BCAST)) == 0)
+ goto ours;
+
+ /*
+ * Enable a consistency check between the destination address
+ * and the arrival interface for a unicast packet (the RFC 1122
+ * strong ES model) if IP forwarding is disabled and the packet
+ * is not locally generated and the packet is not subject to
+ * 'ipfw fwd'.
+ *
+ * XXX - Checking also should be disabled if the destination
+ * address is ipnat'ed to a different interface.
+ *
+ * XXX - Checking is incompatible with IP aliases added
+ * to the loopback interface instead of the interface where
+ * the packets are received.
+ *
+ * XXX - This is the case for carp vhost IPs as well so we
+ * insert a workaround. If the packet got here, we already
+ * checked with carp_iamatch() and carp_forus().
+ */
+ checkif = V_ip_checkinterface && (V_ipforwarding == 0) &&
+ ifp != NULL && ((ifp->if_flags & IFF_LOOPBACK) == 0) &&
+ ifp->if_carp == NULL && (dchg == 0);
+
+ /*
+ * Check for exact addresses in the hash bucket.
+ */
+ /* IN_IFADDR_RLOCK(); */
+ LIST_FOREACH(ia, INADDR_HASH(ip->ip_dst.s_addr), ia_hash) {
+ /*
+ * If the address matches, verify that the packet
+ * arrived via the correct interface if checking is
+ * enabled.
+ */
+ if (IA_SIN(ia)->sin_addr.s_addr == ip->ip_dst.s_addr &&
+ (!checkif || ia->ia_ifp == ifp)) {
+ ifa_ref(&ia->ia_ifa);
+ /* IN_IFADDR_RUNLOCK(); */
+ goto ours;
+ }
+ }
+ /* IN_IFADDR_RUNLOCK(); */
+
+ /*
+ * Check for broadcast addresses.
+ *
+ * Only accept broadcast packets that arrive via the matching
+ * interface. Reception of forwarded directed broadcasts would
+ * be handled via ip_forward() and ether_output() with the loopback
+ * into the stack for SIMPLEX interfaces handled by ether_output().
+ */
+ if (ifp != NULL && ifp->if_flags & IFF_BROADCAST) {
+ IF_ADDR_LOCK(ifp);
+ TAILQ_FOREACH(ifa, &ifp->if_addrhead, ifa_link) {
+ if (ifa->ifa_addr->sa_family != AF_INET)
+ continue;
+ ia = ifatoia(ifa);
+ if (satosin(&ia->ia_broadaddr)->sin_addr.s_addr ==
+ ip->ip_dst.s_addr) {
+ ifa_ref(ifa);
+ IF_ADDR_UNLOCK(ifp);
+ goto ours;
+ }
+ if (ia->ia_netbroadcast.s_addr == ip->ip_dst.s_addr) {
+ ifa_ref(ifa);
+ IF_ADDR_UNLOCK(ifp);
+ goto ours;
+ }
+#ifdef BOOTP_COMPAT
+ if (IA_SIN(ia)->sin_addr.s_addr == INADDR_ANY) {
+ ifa_ref(ifa);
+ IF_ADDR_UNLOCK(ifp);
+ goto ours;
+ }
+#endif
+ }
+ IF_ADDR_UNLOCK(ifp);
+ ia = NULL;
+ }
+ /* RFC 3927 2.7: Do not forward datagrams for 169.254.0.0/16. */
+ if (IN_LINKLOCAL(ntohl(ip->ip_dst.s_addr))) {
+ IPSTAT_INC(ips_cantforward);
+ m_freem(m);
+ return;
+ }
+ if (IN_MULTICAST(ntohl(ip->ip_dst.s_addr))) {
+ if (V_ip_mrouter) {
+ /*
+ * If we are acting as a multicast router, all
+ * incoming multicast packets are passed to the
+ * kernel-level multicast forwarding function.
+ * The packet is returned (relatively) intact; if
+ * ip_mforward() returns a non-zero value, the packet
+ * must be discarded, else it may be accepted below.
+ */
+ if (ip_mforward && ip_mforward(ip, ifp, m, 0) != 0) {
+ IPSTAT_INC(ips_cantforward);
+ m_freem(m);
+ return;
+ }
+
+ /*
+ * The process-level routing daemon needs to receive
+ * all multicast IGMP packets, whether or not this
+ * host belongs to their destination groups.
+ */
+ if (ip->ip_p == IPPROTO_IGMP)
+ goto ours;
+ IPSTAT_INC(ips_forward);
+ }
+ /*
+ * Assume the packet is for us, to avoid prematurely taking
+ * a lock on the in_multi hash. Protocols must perform
+ * their own filtering and update statistics accordingly.
+ */
+ goto ours;
+ }
+ if (ip->ip_dst.s_addr == (u_long)INADDR_BROADCAST)
+ goto ours;
+ if (ip->ip_dst.s_addr == INADDR_ANY)
+ goto ours;
+
+ /*
+ * FAITH(Firewall Aided Internet Translator)
+ */
+ if (ifp && ifp->if_type == IFT_FAITH) {
+ if (V_ip_keepfaith) {
+ if (ip->ip_p == IPPROTO_TCP || ip->ip_p == IPPROTO_ICMP)
+ goto ours;
+ }
+ m_freem(m);
+ return;
+ }
+
+ /*
+ * Not for us; forward if possible and desirable.
+ */
+ if (V_ipforwarding == 0) {
+ IPSTAT_INC(ips_cantforward);
+ m_freem(m);
+ } else {
+#ifdef IPSEC
+ if (ip_ipsec_fwd(m))
+ goto bad;
+#endif /* IPSEC */
+ ip_forward(m, dchg);
+ }
+ return;
+
+ours:
+#ifdef IPSTEALTH
+ /*
+ * IPSTEALTH: Process non-routing options only
+ * if the packet is destined for us.
+ */
+ if (V_ipstealth && hlen > sizeof (struct ip) && ip_dooptions(m, 1)) {
+ if (ia != NULL)
+ ifa_free(&ia->ia_ifa);
+ return;
+ }
+#endif /* IPSTEALTH */
+
+ /* Count the packet in the ip address stats */
+ if (ia != NULL) {
+ ia->ia_ifa.if_ipackets++;
+ ia->ia_ifa.if_ibytes += m->m_pkthdr.len;
+ ifa_free(&ia->ia_ifa);
+ }
+
+ /*
+ * Attempt reassembly; if it succeeds, proceed.
+ * ip_reass() will return a different mbuf.
+ */
+ if (ip->ip_off & (IP_MF | IP_OFFMASK)) {
+ m = ip_reass(m);
+ if (m == NULL)
+ return;
+ ip = mtod(m, struct ip *);
+ /* Get the header length of the reassembled packet */
+ hlen = ip->ip_hl << 2;
+ }
+
+ /*
+ * Further protocols expect the packet length to be w/o the
+ * IP header.
+ */
+ ip->ip_len -= hlen;
+
+#ifdef IPSEC
+ /*
+ * enforce IPsec policy checking if we are seeing last header.
+ * note that we do not visit this with protocols with pcb layer
+ * code - like udp/tcp/raw ip.
+ */
+ if (ip_ipsec_input(m))
+ goto bad;
+#endif /* IPSEC */
+
+ /*
+ * Switch out to protocol's input routine.
+ */
+ IPSTAT_INC(ips_delivered);
+
+ (*inetsw[ip_protox[ip->ip_p]].pr_input)(m, hlen);
+ return;
+bad:
+ m_freem(m);
+}
+
+/*
+ * After maxnipq has been updated, propagate the change to UMA. The UMA zone
+ * max has slightly different semantics than the sysctl, for historical
+ * reasons.
+ */
+static void
+maxnipq_update(void)
+{
+
+ /*
+ * -1 for unlimited allocation.
+ */
+ if (V_maxnipq < 0)
+ uma_zone_set_max(V_ipq_zone, 0);
+ /*
+ * Positive number for specific bound.
+ */
+ if (V_maxnipq > 0)
+ uma_zone_set_max(V_ipq_zone, V_maxnipq);
+ /*
+ * Zero specifies no further fragment queue allocation -- set the
+ * bound very low, but rely on implementation elsewhere to actually
+ * prevent allocation and reclaim current queues.
+ */
+ if (V_maxnipq == 0)
+ uma_zone_set_max(V_ipq_zone, 1);
+}
+
+static void
+ipq_zone_change(void *tag)
+{
+
+ if (V_maxnipq > 0 && V_maxnipq < (nmbclusters / 32)) {
+ V_maxnipq = nmbclusters / 32;
+ maxnipq_update();
+ }
+}
+
+static int
+sysctl_maxnipq(SYSCTL_HANDLER_ARGS)
+{
+ int error, i;
+
+ i = V_maxnipq;
+ error = sysctl_handle_int(oidp, &i, 0, req);
+ if (error || !req->newptr)
+ return (error);
+
+ /*
+ * XXXRW: Might be a good idea to sanity check the argument and place
+ * an extreme upper bound.
+ */
+ if (i < -1)
+ return (EINVAL);
+ V_maxnipq = i;
+ maxnipq_update();
+ return (0);
+}
+
+SYSCTL_PROC(_net_inet_ip, OID_AUTO, maxfragpackets, CTLTYPE_INT|CTLFLAG_RW,
+ NULL, 0, sysctl_maxnipq, "I",
+ "Maximum number of IPv4 fragment reassembly queue entries");
+
+/*
+ * Take incoming datagram fragment and try to reassemble it into
+ * whole datagram. If the argument is the first fragment or one
+ * in between the function will return NULL and store the mbuf
+ * in the fragment chain. If the argument is the last fragment
+ * the packet will be reassembled and the pointer to the new
+ * mbuf returned for further processing. Only m_tags attached
+ * to the first packet/fragment are preserved.
+ * The IP header is *NOT* adjusted out of iplen.
+ */
+struct mbuf *
+ip_reass(struct mbuf *m)
+{
+ struct ip *ip;
+ struct mbuf *p, *q, *nq, *t;
+ struct ipq *fp = NULL;
+ struct ipqhead *head;
+ int i, hlen, next;
+ u_int8_t ecn, ecn0;
+ u_short hash;
+
+ /* If maxnipq or maxfragsperpacket are 0, never accept fragments. */
+ if (V_maxnipq == 0 || V_maxfragsperpacket == 0) {
+ IPSTAT_INC(ips_fragments);
+ IPSTAT_INC(ips_fragdropped);
+ m_freem(m);
+ return (NULL);
+ }
+
+ ip = mtod(m, struct ip *);
+ hlen = ip->ip_hl << 2;
+
+ hash = IPREASS_HASH(ip->ip_src.s_addr, ip->ip_id);
+ head = &V_ipq[hash];
+ IPQ_LOCK();
+
+ /*
+ * Look for queue of fragments
+ * of this datagram.
+ */
+ TAILQ_FOREACH(fp, head, ipq_list)
+ if (ip->ip_id == fp->ipq_id &&
+ ip->ip_src.s_addr == fp->ipq_src.s_addr &&
+ ip->ip_dst.s_addr == fp->ipq_dst.s_addr &&
+#ifdef MAC
+ mac_ipq_match(m, fp) &&
+#endif
+ ip->ip_p == fp->ipq_p)
+ goto found;
+
+ fp = NULL;
+
+ /*
+ * Attempt to trim the number of allocated fragment queues if it
+ * exceeds the administrative limit.
+ */
+ if ((V_nipq > V_maxnipq) && (V_maxnipq > 0)) {
+ /*
+ * drop something from the tail of the current queue
+ * before proceeding further
+ */
+ struct ipq *q = TAILQ_LAST(head, ipqhead);
+ if (q == NULL) { /* gak */
+ for (i = 0; i < IPREASS_NHASH; i++) {
+ struct ipq *r = TAILQ_LAST(&V_ipq[i], ipqhead);
+ if (r) {
+ IPSTAT_ADD(ips_fragtimeout,
+ r->ipq_nfrags);
+ ip_freef(&V_ipq[i], r);
+ break;
+ }
+ }
+ } else {
+ IPSTAT_ADD(ips_fragtimeout, q->ipq_nfrags);
+ ip_freef(head, q);
+ }
+ }
+
+found:
+ /*
+ * Adjust ip_len to not reflect header,
+ * convert offset of this to bytes.
+ */
+ ip->ip_len -= hlen;
+ if (ip->ip_off & IP_MF) {
+ /*
+ * Make sure that fragments have a data length
+ * that's a non-zero multiple of 8 bytes.
+ */
+ if (ip->ip_len == 0 || (ip->ip_len & 0x7) != 0) {
+ IPSTAT_INC(ips_toosmall); /* XXX */
+ goto dropfrag;
+ }
+ m->m_flags |= M_FRAG;
+ } else
+ m->m_flags &= ~M_FRAG;
+ ip->ip_off <<= 3;
+
+
+ /*
+ * Attempt reassembly; if it succeeds, proceed.
+ * ip_reass() will return a different mbuf.
+ */
+ IPSTAT_INC(ips_fragments);
+ m->m_pkthdr.header = ip;
+
+ /* Previous ip_reass() started here. */
+ /*
+ * Presence of header sizes in mbufs
+ * would confuse code below.
+ */
+ m->m_data += hlen;
+ m->m_len -= hlen;
+
+ /*
+ * If first fragment to arrive, create a reassembly queue.
+ */
+ if (fp == NULL) {
+ fp = uma_zalloc(V_ipq_zone, M_NOWAIT);
+ if (fp == NULL)
+ goto dropfrag;
+#ifdef MAC
+ if (mac_ipq_init(fp, M_NOWAIT) != 0) {
+ uma_zfree(V_ipq_zone, fp);
+ fp = NULL;
+ goto dropfrag;
+ }
+ mac_ipq_create(m, fp);
+#endif
+ TAILQ_INSERT_HEAD(head, fp, ipq_list);
+ V_nipq++;
+ fp->ipq_nfrags = 1;
+ fp->ipq_ttl = IPFRAGTTL;
+ fp->ipq_p = ip->ip_p;
+ fp->ipq_id = ip->ip_id;
+ fp->ipq_src = ip->ip_src;
+ fp->ipq_dst = ip->ip_dst;
+ fp->ipq_frags = m;
+ m->m_nextpkt = NULL;
+ goto done;
+ } else {
+ fp->ipq_nfrags++;
+#ifdef MAC
+ mac_ipq_update(m, fp);
+#endif
+ }
+
+#define GETIP(m) ((struct ip*)((m)->m_pkthdr.header))
+
+ /*
+ * Handle ECN by comparing this segment with the first one;
+ * if CE is set, do not lose CE.
+ * drop if CE and not-ECT are mixed for the same packet.
+ */
+ ecn = ip->ip_tos & IPTOS_ECN_MASK;
+ ecn0 = GETIP(fp->ipq_frags)->ip_tos & IPTOS_ECN_MASK;
+ if (ecn == IPTOS_ECN_CE) {
+ if (ecn0 == IPTOS_ECN_NOTECT)
+ goto dropfrag;
+ if (ecn0 != IPTOS_ECN_CE)
+ GETIP(fp->ipq_frags)->ip_tos |= IPTOS_ECN_CE;
+ }
+ if (ecn == IPTOS_ECN_NOTECT && ecn0 != IPTOS_ECN_NOTECT)
+ goto dropfrag;
+
+ /*
+ * Find a segment which begins after this one does.
+ */
+ for (p = NULL, q = fp->ipq_frags; q; p = q, q = q->m_nextpkt)
+ if (GETIP(q)->ip_off > ip->ip_off)
+ break;
+
+ /*
+ * If there is a preceding segment, it may provide some of
+ * our data already. If so, drop the data from the incoming
+ * segment. If it provides all of our data, drop us, otherwise
+ * stick new segment in the proper place.
+ *
+ * If some of the data is dropped from the the preceding
+ * segment, then it's checksum is invalidated.
+ */
+ if (p) {
+ i = GETIP(p)->ip_off + GETIP(p)->ip_len - ip->ip_off;
+ if (i > 0) {
+ if (i >= ip->ip_len)
+ goto dropfrag;
+ m_adj(m, i);
+ m->m_pkthdr.csum_flags = 0;
+ ip->ip_off += i;
+ ip->ip_len -= i;
+ }
+ m->m_nextpkt = p->m_nextpkt;
+ p->m_nextpkt = m;
+ } else {
+ m->m_nextpkt = fp->ipq_frags;
+ fp->ipq_frags = m;
+ }
+
+ /*
+ * While we overlap succeeding segments trim them or,
+ * if they are completely covered, dequeue them.
+ */
+ for (; q != NULL && ip->ip_off + ip->ip_len > GETIP(q)->ip_off;
+ q = nq) {
+ i = (ip->ip_off + ip->ip_len) - GETIP(q)->ip_off;
+ if (i < GETIP(q)->ip_len) {
+ GETIP(q)->ip_len -= i;
+ GETIP(q)->ip_off += i;
+ m_adj(q, i);
+ q->m_pkthdr.csum_flags = 0;
+ break;
+ }
+ nq = q->m_nextpkt;
+ m->m_nextpkt = nq;
+ IPSTAT_INC(ips_fragdropped);
+ fp->ipq_nfrags--;
+ m_freem(q);
+ }
+
+ /*
+ * Check for complete reassembly and perform frag per packet
+ * limiting.
+ *
+ * Frag limiting is performed here so that the nth frag has
+ * a chance to complete the packet before we drop the packet.
+ * As a result, n+1 frags are actually allowed per packet, but
+ * only n will ever be stored. (n = maxfragsperpacket.)
+ *
+ */
+ next = 0;
+ for (p = NULL, q = fp->ipq_frags; q; p = q, q = q->m_nextpkt) {
+ if (GETIP(q)->ip_off != next) {
+ if (fp->ipq_nfrags > V_maxfragsperpacket) {
+ IPSTAT_ADD(ips_fragdropped, fp->ipq_nfrags);
+ ip_freef(head, fp);
+ }
+ goto done;
+ }
+ next += GETIP(q)->ip_len;
+ }
+ /* Make sure the last packet didn't have the IP_MF flag */
+ if (p->m_flags & M_FRAG) {
+ if (fp->ipq_nfrags > V_maxfragsperpacket) {
+ IPSTAT_ADD(ips_fragdropped, fp->ipq_nfrags);
+ ip_freef(head, fp);
+ }
+ goto done;
+ }
+
+ /*
+ * Reassembly is complete. Make sure the packet is a sane size.
+ */
+ q = fp->ipq_frags;
+ ip = GETIP(q);
+ if (next + (ip->ip_hl << 2) > IP_MAXPACKET) {
+ IPSTAT_INC(ips_toolong);
+ IPSTAT_ADD(ips_fragdropped, fp->ipq_nfrags);
+ ip_freef(head, fp);
+ goto done;
+ }
+
+ /*
+ * Concatenate fragments.
+ */
+ m = q;
+ t = m->m_next;
+ m->m_next = NULL;
+ m_cat(m, t);
+ nq = q->m_nextpkt;
+ q->m_nextpkt = NULL;
+ for (q = nq; q != NULL; q = nq) {
+ nq = q->m_nextpkt;
+ q->m_nextpkt = NULL;
+ m->m_pkthdr.csum_flags &= q->m_pkthdr.csum_flags;
+ m->m_pkthdr.csum_data += q->m_pkthdr.csum_data;
+ m_cat(m, q);
+ }
+ /*
+ * In order to do checksumming faster we do 'end-around carry' here
+ * (and not in for{} loop), though it implies we are not going to
+ * reassemble more than 64k fragments.
+ */
+ m->m_pkthdr.csum_data =
+ (m->m_pkthdr.csum_data & 0xffff) + (m->m_pkthdr.csum_data >> 16);
+#ifdef MAC
+ mac_ipq_reassemble(fp, m);
+ mac_ipq_destroy(fp);
+#endif
+
+ /*
+ * Create header for new ip packet by modifying header of first
+ * packet; dequeue and discard fragment reassembly header.
+ * Make header visible.
+ */
+ ip->ip_len = (ip->ip_hl << 2) + next;
+ ip->ip_src = fp->ipq_src;
+ ip->ip_dst = fp->ipq_dst;
+ TAILQ_REMOVE(head, fp, ipq_list);
+ V_nipq--;
+ uma_zfree(V_ipq_zone, fp);
+ m->m_len += (ip->ip_hl << 2);
+ m->m_data -= (ip->ip_hl << 2);
+ /* some debugging cruft by sklower, below, will go away soon */
+ if (m->m_flags & M_PKTHDR) /* XXX this should be done elsewhere */
+ m_fixhdr(m);
+ IPSTAT_INC(ips_reassembled);
+ IPQ_UNLOCK();
+ return (m);
+
+dropfrag:
+ IPSTAT_INC(ips_fragdropped);
+ if (fp != NULL)
+ fp->ipq_nfrags--;
+ m_freem(m);
+done:
+ IPQ_UNLOCK();
+ return (NULL);
+
+#undef GETIP
+}
+
+/*
+ * Free a fragment reassembly header and all
+ * associated datagrams.
+ */
+static void
+ip_freef(struct ipqhead *fhp, struct ipq *fp)
+{
+ struct mbuf *q;
+
+ IPQ_LOCK_ASSERT();
+
+ while (fp->ipq_frags) {
+ q = fp->ipq_frags;
+ fp->ipq_frags = q->m_nextpkt;
+ m_freem(q);
+ }
+ TAILQ_REMOVE(fhp, fp, ipq_list);
+ uma_zfree(V_ipq_zone, fp);
+ V_nipq--;
+}
+
+/*
+ * IP timer processing;
+ * if a timer expires on a reassembly
+ * queue, discard it.
+ */
+void
+ip_slowtimo(void)
+{
+ VNET_ITERATOR_DECL(vnet_iter);
+ struct ipq *fp;
+ int i;
+
+ VNET_LIST_RLOCK_NOSLEEP();
+ IPQ_LOCK();
+ VNET_FOREACH(vnet_iter) {
+ CURVNET_SET(vnet_iter);
+ for (i = 0; i < IPREASS_NHASH; i++) {
+ for(fp = TAILQ_FIRST(&V_ipq[i]); fp;) {
+ struct ipq *fpp;
+
+ fpp = fp;
+ fp = TAILQ_NEXT(fp, ipq_list);
+ if(--fpp->ipq_ttl == 0) {
+ IPSTAT_ADD(ips_fragtimeout,
+ fpp->ipq_nfrags);
+ ip_freef(&V_ipq[i], fpp);
+ }
+ }
+ }
+ /*
+ * If we are over the maximum number of fragments
+ * (due to the limit being lowered), drain off
+ * enough to get down to the new limit.
+ */
+ if (V_maxnipq >= 0 && V_nipq > V_maxnipq) {
+ for (i = 0; i < IPREASS_NHASH; i++) {
+ while (V_nipq > V_maxnipq &&
+ !TAILQ_EMPTY(&V_ipq[i])) {
+ IPSTAT_ADD(ips_fragdropped,
+ TAILQ_FIRST(&V_ipq[i])->ipq_nfrags);
+ ip_freef(&V_ipq[i],
+ TAILQ_FIRST(&V_ipq[i]));
+ }
+ }
+ }
+ CURVNET_RESTORE();
+ }
+ IPQ_UNLOCK();
+ VNET_LIST_RUNLOCK_NOSLEEP();
+}
+
+/*
+ * Drain off all datagram fragments.
+ */
+static void
+ip_drain_locked(void)
+{
+ int i;
+
+ IPQ_LOCK_ASSERT();
+
+ for (i = 0; i < IPREASS_NHASH; i++) {
+ while(!TAILQ_EMPTY(&V_ipq[i])) {
+ IPSTAT_ADD(ips_fragdropped,
+ TAILQ_FIRST(&V_ipq[i])->ipq_nfrags);
+ ip_freef(&V_ipq[i], TAILQ_FIRST(&V_ipq[i]));
+ }
+ }
+}
+
+void
+ip_drain(void)
+{
+ VNET_ITERATOR_DECL(vnet_iter);
+
+ VNET_LIST_RLOCK_NOSLEEP();
+ IPQ_LOCK();
+ VNET_FOREACH(vnet_iter) {
+ CURVNET_SET(vnet_iter);
+ ip_drain_locked();
+ CURVNET_RESTORE();
+ }
+ IPQ_UNLOCK();
+ VNET_LIST_RUNLOCK_NOSLEEP();
+ in_rtqdrain();
+}
+
+/*
+ * The protocol to be inserted into ip_protox[] must be already registered
+ * in inetsw[], either statically or through pf_proto_register().
+ */
+int
+ipproto_register(short ipproto)
+{
+ struct protosw *pr;
+
+ /* Sanity checks. */
+ if (ipproto <= 0 || ipproto >= IPPROTO_MAX)
+ return (EPROTONOSUPPORT);
+
+ /*
+ * The protocol slot must not be occupied by another protocol
+ * already. An index pointing to IPPROTO_RAW is unused.
+ */
+ pr = pffindproto(PF_INET, IPPROTO_RAW, SOCK_RAW);
+ if (pr == NULL)
+ return (EPFNOSUPPORT);
+ if (ip_protox[ipproto] != pr - inetsw) /* IPPROTO_RAW */
+ return (EEXIST);
+
+ /* Find the protocol position in inetsw[] and set the index. */
+ for (pr = inetdomain.dom_protosw;
+ pr < inetdomain.dom_protoswNPROTOSW; pr++) {
+ if (pr->pr_domain->dom_family == PF_INET &&
+ pr->pr_protocol && pr->pr_protocol == ipproto) {
+ ip_protox[pr->pr_protocol] = pr - inetsw;
+ return (0);
+ }
+ }
+ return (EPROTONOSUPPORT);
+}
+
+int
+ipproto_unregister(short ipproto)
+{
+ struct protosw *pr;
+
+ /* Sanity checks. */
+ if (ipproto <= 0 || ipproto >= IPPROTO_MAX)
+ return (EPROTONOSUPPORT);
+
+ /* Check if the protocol was indeed registered. */
+ pr = pffindproto(PF_INET, IPPROTO_RAW, SOCK_RAW);
+ if (pr == NULL)
+ return (EPFNOSUPPORT);
+ if (ip_protox[ipproto] == pr - inetsw) /* IPPROTO_RAW */
+ return (ENOENT);
+
+ /* Reset the protocol slot to IPPROTO_RAW. */
+ ip_protox[ipproto] = pr - inetsw;
+ return (0);
+}
+
+/*
+ * Given address of next destination (final or next hop), return (referenced)
+ * internet address info of interface to be used to get there.
+ */
+struct in_ifaddr *
+ip_rtaddr(struct in_addr dst, u_int fibnum)
+{
+ struct route sro;
+ struct sockaddr_in *sin;
+ struct in_ifaddr *ia;
+
+ bzero(&sro, sizeof(sro));
+ sin = (struct sockaddr_in *)&sro.ro_dst;
+ sin->sin_family = AF_INET;
+ sin->sin_len = sizeof(*sin);
+ sin->sin_addr = dst;
+ in_rtalloc_ign(&sro, 0, fibnum);
+
+ if (sro.ro_rt == NULL)
+ return (NULL);
+
+ ia = ifatoia(sro.ro_rt->rt_ifa);
+ ifa_ref(&ia->ia_ifa);
+ RTFREE(sro.ro_rt);
+ return (ia);
+}
+
+u_char inetctlerrmap[PRC_NCMDS] = {
+ 0, 0, 0, 0,
+ 0, EMSGSIZE, EHOSTDOWN, EHOSTUNREACH,
+ EHOSTUNREACH, EHOSTUNREACH, ECONNREFUSED, ECONNREFUSED,
+ EMSGSIZE, EHOSTUNREACH, 0, 0,
+ 0, 0, EHOSTUNREACH, 0,
+ ENOPROTOOPT, ECONNREFUSED
+};
+
+/*
+ * Forward a packet. If some error occurs return the sender
+ * an icmp packet. Note we can't always generate a meaningful
+ * icmp message because icmp doesn't have a large enough repertoire
+ * of codes and types.
+ *
+ * If not forwarding, just drop the packet. This could be confusing
+ * if ipforwarding was zero but some routing protocol was advancing
+ * us as a gateway to somewhere. However, we must let the routing
+ * protocol deal with that.
+ *
+ * The srcrt parameter indicates whether the packet is being forwarded
+ * via a source route.
+ */
+void
+ip_forward(struct mbuf *m, int srcrt)
+{
+ struct ip *ip = mtod(m, struct ip *);
+ struct in_ifaddr *ia;
+ struct mbuf *mcopy;
+ struct in_addr dest;
+ struct route ro;
+ int error, type = 0, code = 0, mtu = 0;
+
+ if (m->m_flags & (M_BCAST|M_MCAST) || in_canforward(ip->ip_dst) == 0) {
+ IPSTAT_INC(ips_cantforward);
+ m_freem(m);
+ return;
+ }
+#ifdef IPSTEALTH
+ if (!V_ipstealth) {
+#endif
+ if (ip->ip_ttl <= IPTTLDEC) {
+ icmp_error(m, ICMP_TIMXCEED, ICMP_TIMXCEED_INTRANS,
+ 0, 0);
+ return;
+ }
+#ifdef IPSTEALTH
+ }
+#endif
+
+ ia = ip_rtaddr(ip->ip_dst, M_GETFIB(m));
+#ifndef IPSEC
+ /*
+ * 'ia' may be NULL if there is no route for this destination.
+ * In case of IPsec, Don't discard it just yet, but pass it to
+ * ip_output in case of outgoing IPsec policy.
+ */
+ if (!srcrt && ia == NULL) {
+ icmp_error(m, ICMP_UNREACH, ICMP_UNREACH_HOST, 0, 0);
+ return;
+ }
+#endif
+
+ /*
+ * Save the IP header and at most 8 bytes of the payload,
+ * in case we need to generate an ICMP message to the src.
+ *
+ * XXX this can be optimized a lot by saving the data in a local
+ * buffer on the stack (72 bytes at most), and only allocating the
+ * mbuf if really necessary. The vast majority of the packets
+ * are forwarded without having to send an ICMP back (either
+ * because unnecessary, or because rate limited), so we are
+ * really we are wasting a lot of work here.
+ *
+ * We don't use m_copy() because it might return a reference
+ * to a shared cluster. Both this function and ip_output()
+ * assume exclusive access to the IP header in `m', so any
+ * data in a cluster may change before we reach icmp_error().
+ */
+ MGETHDR(mcopy, M_DONTWAIT, m->m_type);
+ if (mcopy != NULL && !m_dup_pkthdr(mcopy, m, M_DONTWAIT)) {
+ /*
+ * It's probably ok if the pkthdr dup fails (because
+ * the deep copy of the tag chain failed), but for now
+ * be conservative and just discard the copy since
+ * code below may some day want the tags.
+ */
+ m_free(mcopy);
+ mcopy = NULL;
+ }
+ if (mcopy != NULL) {
+ mcopy->m_len = min(ip->ip_len, M_TRAILINGSPACE(mcopy));
+ mcopy->m_pkthdr.len = mcopy->m_len;
+ m_copydata(m, 0, mcopy->m_len, mtod(mcopy, caddr_t));
+ }
+
+#ifdef IPSTEALTH
+ if (!V_ipstealth) {
+#endif
+ ip->ip_ttl -= IPTTLDEC;
+#ifdef IPSTEALTH
+ }
+#endif
+
+ /*
+ * If forwarding packet using same interface that it came in on,
+ * perhaps should send a redirect to sender to shortcut a hop.
+ * Only send redirect if source is sending directly to us,
+ * and if packet was not source routed (or has any options).
+ * Also, don't send redirect if forwarding using a default route
+ * or a route modified by a redirect.
+ */
+ dest.s_addr = 0;
+ if (!srcrt && V_ipsendredirects &&
+ ia != NULL && ia->ia_ifp == m->m_pkthdr.rcvif) {
+ struct sockaddr_in *sin;
+ struct rtentry *rt;
+
+ bzero(&ro, sizeof(ro));
+ sin = (struct sockaddr_in *)&ro.ro_dst;
+ sin->sin_family = AF_INET;
+ sin->sin_len = sizeof(*sin);
+ sin->sin_addr = ip->ip_dst;
+ in_rtalloc_ign(&ro, 0, M_GETFIB(m));
+
+ rt = ro.ro_rt;
+
+ if (rt && (rt->rt_flags & (RTF_DYNAMIC|RTF_MODIFIED)) == 0 &&
+ satosin(rt_key(rt))->sin_addr.s_addr != 0) {
+#define RTA(rt) ((struct in_ifaddr *)(rt->rt_ifa))
+ u_long src = ntohl(ip->ip_src.s_addr);
+
+ if (RTA(rt) &&
+ (src & RTA(rt)->ia_subnetmask) == RTA(rt)->ia_subnet) {
+ if (rt->rt_flags & RTF_GATEWAY)
+ dest.s_addr = satosin(rt->rt_gateway)->sin_addr.s_addr;
+ else
+ dest.s_addr = ip->ip_dst.s_addr;
+ /* Router requirements says to only send host redirects */
+ type = ICMP_REDIRECT;
+ code = ICMP_REDIRECT_HOST;
+ }
+ }
+ if (rt)
+ RTFREE(rt);
+ }
+
+ /*
+ * Try to cache the route MTU from ip_output so we can consider it for
+ * the ICMP_UNREACH_NEEDFRAG "Next-Hop MTU" field described in RFC1191.
+ */
+ bzero(&ro, sizeof(ro));
+
+ error = ip_output(m, NULL, &ro, IP_FORWARDING, NULL, NULL);
+
+ if (error == EMSGSIZE && ro.ro_rt)
+ mtu = ro.ro_rt->rt_rmx.rmx_mtu;
+ if (ro.ro_rt)
+ RTFREE(ro.ro_rt);
+
+ if (error)
+ IPSTAT_INC(ips_cantforward);
+ else {
+ IPSTAT_INC(ips_forward);
+ if (type)
+ IPSTAT_INC(ips_redirectsent);
+ else {
+ if (mcopy)
+ m_freem(mcopy);
+ if (ia != NULL)
+ ifa_free(&ia->ia_ifa);
+ return;
+ }
+ }
+ if (mcopy == NULL) {
+ if (ia != NULL)
+ ifa_free(&ia->ia_ifa);
+ return;
+ }
+
+ switch (error) {
+
+ case 0: /* forwarded, but need redirect */
+ /* type, code set above */
+ break;
+
+ case ENETUNREACH:
+ case EHOSTUNREACH:
+ case ENETDOWN:
+ case EHOSTDOWN:
+ default:
+ type = ICMP_UNREACH;
+ code = ICMP_UNREACH_HOST;
+ break;
+
+ case EMSGSIZE:
+ type = ICMP_UNREACH;
+ code = ICMP_UNREACH_NEEDFRAG;
+
+#ifdef IPSEC
+ /*
+ * If IPsec is configured for this path,
+ * override any possibly mtu value set by ip_output.
+ */
+ mtu = ip_ipsec_mtu(mcopy, mtu);
+#endif /* IPSEC */
+ /*
+ * If the MTU was set before make sure we are below the
+ * interface MTU.
+ * If the MTU wasn't set before use the interface mtu or
+ * fall back to the next smaller mtu step compared to the
+ * current packet size.
+ */
+ if (mtu != 0) {
+ if (ia != NULL)
+ mtu = min(mtu, ia->ia_ifp->if_mtu);
+ } else {
+ if (ia != NULL)
+ mtu = ia->ia_ifp->if_mtu;
+ else
+ mtu = ip_next_mtu(ip->ip_len, 0);
+ }
+ IPSTAT_INC(ips_cantfrag);
+ break;
+
+ case ENOBUFS:
+ /*
+ * A router should not generate ICMP_SOURCEQUENCH as
+ * required in RFC1812 Requirements for IP Version 4 Routers.
+ * Source quench could be a big problem under DoS attacks,
+ * or if the underlying interface is rate-limited.
+ * Those who need source quench packets may re-enable them
+ * via the net.inet.ip.sendsourcequench sysctl.
+ */
+ if (V_ip_sendsourcequench == 0) {
+ m_freem(mcopy);
+ if (ia != NULL)
+ ifa_free(&ia->ia_ifa);
+ return;
+ } else {
+ type = ICMP_SOURCEQUENCH;
+ code = 0;
+ }
+ break;
+
+ case EACCES: /* ipfw denied packet */
+ m_freem(mcopy);
+ if (ia != NULL)
+ ifa_free(&ia->ia_ifa);
+ return;
+ }
+ if (ia != NULL)
+ ifa_free(&ia->ia_ifa);
+ icmp_error(mcopy, type, code, dest.s_addr, mtu);
+}
+
+void
+ip_savecontrol(struct inpcb *inp, struct mbuf **mp, struct ip *ip,
+ struct mbuf *m)
+{
+
+ if (inp->inp_socket->so_options & (SO_BINTIME | SO_TIMESTAMP)) {
+ struct bintime bt;
+
+ bintime(&bt);
+ if (inp->inp_socket->so_options & SO_BINTIME) {
+ *mp = sbcreatecontrol((caddr_t) &bt, sizeof(bt),
+ SCM_BINTIME, SOL_SOCKET);
+ if (*mp)
+ mp = &(*mp)->m_next;
+ }
+ if (inp->inp_socket->so_options & SO_TIMESTAMP) {
+ struct timeval tv;
+
+ bintime2timeval(&bt, &tv);
+ *mp = sbcreatecontrol((caddr_t) &tv, sizeof(tv),
+ SCM_TIMESTAMP, SOL_SOCKET);
+ if (*mp)
+ mp = &(*mp)->m_next;
+ }
+ }
+ if (inp->inp_flags & INP_RECVDSTADDR) {
+ *mp = sbcreatecontrol((caddr_t) &ip->ip_dst,
+ sizeof(struct in_addr), IP_RECVDSTADDR, IPPROTO_IP);
+ if (*mp)
+ mp = &(*mp)->m_next;
+ }
+ if (inp->inp_flags & INP_RECVTTL) {
+ *mp = sbcreatecontrol((caddr_t) &ip->ip_ttl,
+ sizeof(u_char), IP_RECVTTL, IPPROTO_IP);
+ if (*mp)
+ mp = &(*mp)->m_next;
+ }
+#ifdef notyet
+ /* XXX
+ * Moving these out of udp_input() made them even more broken
+ * than they already were.
+ */
+ /* options were tossed already */
+ if (inp->inp_flags & INP_RECVOPTS) {
+ *mp = sbcreatecontrol((caddr_t) opts_deleted_above,
+ sizeof(struct in_addr), IP_RECVOPTS, IPPROTO_IP);
+ if (*mp)
+ mp = &(*mp)->m_next;
+ }
+ /* ip_srcroute doesn't do what we want here, need to fix */
+ if (inp->inp_flags & INP_RECVRETOPTS) {
+ *mp = sbcreatecontrol((caddr_t) ip_srcroute(m),
+ sizeof(struct in_addr), IP_RECVRETOPTS, IPPROTO_IP);
+ if (*mp)
+ mp = &(*mp)->m_next;
+ }
+#endif
+ if (inp->inp_flags & INP_RECVIF) {
+ struct ifnet *ifp;
+ struct sdlbuf {
+ struct sockaddr_dl sdl;
+ u_char pad[32];
+ } sdlbuf;
+ struct sockaddr_dl *sdp;
+ struct sockaddr_dl *sdl2 = &sdlbuf.sdl;
+
+ if (((ifp = m->m_pkthdr.rcvif))
+ && ( ifp->if_index && (ifp->if_index <= V_if_index))) {
+ sdp = (struct sockaddr_dl *)ifp->if_addr->ifa_addr;
+ /*
+ * Change our mind and don't try copy.
+ */
+ if ((sdp->sdl_family != AF_LINK)
+ || (sdp->sdl_len > sizeof(sdlbuf))) {
+ goto makedummy;
+ }
+ bcopy(sdp, sdl2, sdp->sdl_len);
+ } else {
+makedummy:
+ sdl2->sdl_len
+ = offsetof(struct sockaddr_dl, sdl_data[0]);
+ sdl2->sdl_family = AF_LINK;
+ sdl2->sdl_index = 0;
+ sdl2->sdl_nlen = sdl2->sdl_alen = sdl2->sdl_slen = 0;
+ }
+ *mp = sbcreatecontrol((caddr_t) sdl2, sdl2->sdl_len,
+ IP_RECVIF, IPPROTO_IP);
+ if (*mp)
+ mp = &(*mp)->m_next;
+ }
+}
+
+/*
+ * XXXRW: Multicast routing code in ip_mroute.c is generally MPSAFE, but the
+ * ip_rsvp and ip_rsvp_on variables need to be interlocked with rsvp_on
+ * locking. This code remains in ip_input.c as ip_mroute.c is optionally
+ * compiled.
+ */
+static VNET_DEFINE(int, ip_rsvp_on);
+VNET_DEFINE(struct socket *, ip_rsvpd);
+
+#define V_ip_rsvp_on VNET(ip_rsvp_on)
+
+int
+ip_rsvp_init(struct socket *so)
+{
+
+ if (so->so_type != SOCK_RAW ||
+ so->so_proto->pr_protocol != IPPROTO_RSVP)
+ return EOPNOTSUPP;
+
+ if (V_ip_rsvpd != NULL)
+ return EADDRINUSE;
+
+ V_ip_rsvpd = so;
+ /*
+ * This may seem silly, but we need to be sure we don't over-increment
+ * the RSVP counter, in case something slips up.
+ */
+ if (!V_ip_rsvp_on) {
+ V_ip_rsvp_on = 1;
+ V_rsvp_on++;
+ }
+
+ return 0;
+}
+
+int
+ip_rsvp_done(void)
+{
+
+ V_ip_rsvpd = NULL;
+ /*
+ * This may seem silly, but we need to be sure we don't over-decrement
+ * the RSVP counter, in case something slips up.
+ */
+ if (V_ip_rsvp_on) {
+ V_ip_rsvp_on = 0;
+ V_rsvp_on--;
+ }
+ return 0;
+}
+
+void
+rsvp_input(struct mbuf *m, int off) /* XXX must fixup manually */
+{
+
+ if (rsvp_input_p) { /* call the real one if loaded */
+ rsvp_input_p(m, off);
+ return;
+ }
+
+ /* Can still get packets with rsvp_on = 0 if there is a local member
+ * of the group to which the RSVP packet is addressed. But in this
+ * case we want to throw the packet away.
+ */
+
+ if (!V_rsvp_on) {
+ m_freem(m);
+ return;
+ }
+
+ if (V_ip_rsvpd != NULL) {
+ rip_input(m, off);
+ return;
+ }
+ /* Drop the packet */
+ m_freem(m);
+}
diff --git a/rtems/freebsd/netinet/ip_ipsec.c b/rtems/freebsd/netinet/ip_ipsec.c
new file mode 100644
index 00000000..dc415d35
--- /dev/null
+++ b/rtems/freebsd/netinet/ip_ipsec.c
@@ -0,0 +1,424 @@
+#include <rtems/freebsd/machine/rtems-bsd-config.h>
+
+/*-
+ * Copyright (c) 1982, 1986, 1988, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <rtems/freebsd/sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <rtems/freebsd/local/opt_ipsec.h>
+#include <rtems/freebsd/local/opt_sctp.h>
+
+#include <rtems/freebsd/sys/param.h>
+#include <rtems/freebsd/sys/systm.h>
+#include <rtems/freebsd/sys/errno.h>
+#include <rtems/freebsd/sys/kernel.h>
+#include <rtems/freebsd/sys/malloc.h>
+#include <rtems/freebsd/sys/mbuf.h>
+#include <rtems/freebsd/sys/protosw.h>
+#include <rtems/freebsd/sys/socket.h>
+#include <rtems/freebsd/sys/socketvar.h>
+#include <rtems/freebsd/sys/sysctl.h>
+
+#include <rtems/freebsd/net/if.h>
+#include <rtems/freebsd/net/route.h>
+#include <rtems/freebsd/net/vnet.h>
+
+#include <rtems/freebsd/netinet/in.h>
+#include <rtems/freebsd/netinet/in_systm.h>
+#include <rtems/freebsd/netinet/in_var.h>
+#include <rtems/freebsd/netinet/ip.h>
+#include <rtems/freebsd/netinet/in_pcb.h>
+#include <rtems/freebsd/netinet/ip_var.h>
+#include <rtems/freebsd/netinet/ip_options.h>
+#include <rtems/freebsd/netinet/ip_ipsec.h>
+#ifdef SCTP
+#include <rtems/freebsd/netinet/sctp_crc32.h>
+#endif
+
+#include <rtems/freebsd/machine/in_cksum.h>
+
+#ifdef IPSEC
+#include <rtems/freebsd/netipsec/ipsec.h>
+#include <rtems/freebsd/netipsec/xform.h>
+#include <rtems/freebsd/netipsec/key.h>
+#endif /*IPSEC*/
+
+extern struct protosw inetsw[];
+
+#ifdef IPSEC
+#ifdef IPSEC_FILTERTUNNEL
+static VNET_DEFINE(int, ip4_ipsec_filtertunnel) = 1;
+#else
+static VNET_DEFINE(int, ip4_ipsec_filtertunnel) = 0;
+#endif
+#define V_ip4_ipsec_filtertunnel VNET(ip4_ipsec_filtertunnel)
+
+SYSCTL_DECL(_net_inet_ipsec);
+SYSCTL_VNET_INT(_net_inet_ipsec, OID_AUTO, filtertunnel,
+ CTLFLAG_RW, &VNET_NAME(ip4_ipsec_filtertunnel), 0,
+ "If set filter packets from an IPsec tunnel.");
+#endif /* IPSEC */
+
+/*
+ * Check if we have to jump over firewall processing for this packet.
+ * Called from ip_input().
+ * 1 = jump over firewall, 0 = packet goes through firewall.
+ */
+int
+ip_ipsec_filtertunnel(struct mbuf *m)
+{
+#if defined(IPSEC)
+
+ /*
+ * Bypass packet filtering for packets from a tunnel.
+ */
+ if (!V_ip4_ipsec_filtertunnel &&
+ m_tag_find(m, PACKET_TAG_IPSEC_IN_DONE, NULL) != NULL)
+ return 1;
+#endif
+ return 0;
+}
+
+/*
+ * Check if this packet has an active SA and needs to be dropped instead
+ * of forwarded.
+ * Called from ip_input().
+ * 1 = drop packet, 0 = forward packet.
+ */
+int
+ip_ipsec_fwd(struct mbuf *m)
+{
+#ifdef IPSEC
+ struct m_tag *mtag;
+ struct tdb_ident *tdbi;
+ struct secpolicy *sp;
+ int s, error;
+
+ mtag = m_tag_find(m, PACKET_TAG_IPSEC_IN_DONE, NULL);
+ s = splnet();
+ if (mtag != NULL) {
+ tdbi = (struct tdb_ident *)(mtag + 1);
+ sp = ipsec_getpolicy(tdbi, IPSEC_DIR_INBOUND);
+ } else {
+ sp = ipsec_getpolicybyaddr(m, IPSEC_DIR_INBOUND,
+ IP_FORWARDING, &error);
+ }
+ if (sp == NULL) { /* NB: can happen if error */
+ splx(s);
+ /*XXX error stat???*/
+ DPRINTF(("ip_input: no SP for forwarding\n")); /*XXX*/
+ return 1;
+ }
+
+ /*
+ * Check security policy against packet attributes.
+ */
+ error = ipsec_in_reject(sp, m);
+ KEY_FREESP(&sp);
+ splx(s);
+ if (error) {
+ IPSTAT_INC(ips_cantforward);
+ return 1;
+ }
+#endif /* IPSEC */
+ return 0;
+}
+
+/*
+ * Check if protocol type doesn't have a further header and do IPSEC
+ * decryption or reject right now. Protocols with further headers get
+ * their IPSEC treatment within the protocol specific processing.
+ * Called from ip_input().
+ * 1 = drop packet, 0 = continue processing packet.
+ */
+int
+ip_ipsec_input(struct mbuf *m)
+{
+#ifdef IPSEC
+ struct ip *ip = mtod(m, struct ip *);
+ struct m_tag *mtag;
+ struct tdb_ident *tdbi;
+ struct secpolicy *sp;
+ int s, error;
+ /*
+ * enforce IPsec policy checking if we are seeing last header.
+ * note that we do not visit this with protocols with pcb layer
+ * code - like udp/tcp/raw ip.
+ */
+ if ((inetsw[ip_protox[ip->ip_p]].pr_flags & PR_LASTHDR) != 0) {
+ /*
+ * Check if the packet has already had IPsec processing
+ * done. If so, then just pass it along. This tag gets
+ * set during AH, ESP, etc. input handling, before the
+ * packet is returned to the ip input queue for delivery.
+ */
+ mtag = m_tag_find(m, PACKET_TAG_IPSEC_IN_DONE, NULL);
+ s = splnet();
+ if (mtag != NULL) {
+ tdbi = (struct tdb_ident *)(mtag + 1);
+ sp = ipsec_getpolicy(tdbi, IPSEC_DIR_INBOUND);
+ } else {
+ sp = ipsec_getpolicybyaddr(m, IPSEC_DIR_INBOUND,
+ IP_FORWARDING, &error);
+ }
+ if (sp != NULL) {
+ /*
+ * Check security policy against packet attributes.
+ */
+ error = ipsec_in_reject(sp, m);
+ KEY_FREESP(&sp);
+ } else {
+ /* XXX error stat??? */
+ error = EINVAL;
+ DPRINTF(("ip_input: no SP, packet discarded\n"));/*XXX*/
+ return 1;
+ }
+ splx(s);
+ if (error)
+ return 1;
+ }
+#endif /* IPSEC */
+ return 0;
+}
+
+/*
+ * Compute the MTU for a forwarded packet that gets IPSEC encapsulated.
+ * Called from ip_forward().
+ * Returns MTU suggestion for ICMP needfrag reply.
+ */
+int
+ip_ipsec_mtu(struct mbuf *m, int mtu)
+{
+ /*
+ * If the packet is routed over IPsec tunnel, tell the
+ * originator the tunnel MTU.
+ * tunnel MTU = if MTU - sizeof(IP) - ESP/AH hdrsiz
+ * XXX quickhack!!!
+ */
+ struct secpolicy *sp = NULL;
+ int ipsecerror;
+ int ipsechdr;
+ struct route *ro;
+ sp = ipsec_getpolicybyaddr(m,
+ IPSEC_DIR_OUTBOUND,
+ IP_FORWARDING,
+ &ipsecerror);
+ if (sp != NULL) {
+ /* count IPsec header size */
+ ipsechdr = ipsec_hdrsiz(m, IPSEC_DIR_OUTBOUND, NULL);
+
+ /*
+ * find the correct route for outer IPv4
+ * header, compute tunnel MTU.
+ */
+ if (sp->req != NULL &&
+ sp->req->sav != NULL &&
+ sp->req->sav->sah != NULL) {
+ ro = &sp->req->sav->sah->route_cache.sa_route;
+ if (ro->ro_rt && ro->ro_rt->rt_ifp) {
+ mtu =
+ ro->ro_rt->rt_rmx.rmx_mtu ?
+ ro->ro_rt->rt_rmx.rmx_mtu :
+ ro->ro_rt->rt_ifp->if_mtu;
+ mtu -= ipsechdr;
+ }
+ }
+ KEY_FREESP(&sp);
+ }
+ return mtu;
+}
+
+/*
+ *
+ * Called from ip_output().
+ * 1 = drop packet, 0 = continue processing packet,
+ * -1 = packet was reinjected and stop processing packet
+ */
+int
+ip_ipsec_output(struct mbuf **m, struct inpcb *inp, int *flags, int *error,
+ struct ifnet **ifp)
+{
+#ifdef IPSEC
+ struct secpolicy *sp = NULL;
+ struct ip *ip = mtod(*m, struct ip *);
+ struct tdb_ident *tdbi;
+ struct m_tag *mtag;
+ int s;
+ /*
+ * Check the security policy (SP) for the packet and, if
+ * required, do IPsec-related processing. There are two
+ * cases here; the first time a packet is sent through
+ * it will be untagged and handled by ipsec4_checkpolicy.
+ * If the packet is resubmitted to ip_output (e.g. after
+ * AH, ESP, etc. processing), there will be a tag to bypass
+ * the lookup and related policy checking.
+ */
+ mtag = m_tag_find(*m, PACKET_TAG_IPSEC_PENDING_TDB, NULL);
+ s = splnet();
+ if (mtag != NULL) {
+ tdbi = (struct tdb_ident *)(mtag + 1);
+ sp = ipsec_getpolicy(tdbi, IPSEC_DIR_OUTBOUND);
+ if (sp == NULL)
+ *error = -EINVAL; /* force silent drop */
+ m_tag_delete(*m, mtag);
+ } else {
+ sp = ipsec4_checkpolicy(*m, IPSEC_DIR_OUTBOUND, *flags,
+ error, inp);
+ }
+ /*
+ * There are four return cases:
+ * sp != NULL apply IPsec policy
+ * sp == NULL, error == 0 no IPsec handling needed
+ * sp == NULL, error == -EINVAL discard packet w/o error
+ * sp == NULL, error != 0 discard packet, report error
+ */
+ if (sp != NULL) {
+ /* Loop detection, check if ipsec processing already done */
+ KASSERT(sp->req != NULL, ("ip_output: no ipsec request"));
+ for (mtag = m_tag_first(*m); mtag != NULL;
+ mtag = m_tag_next(*m, mtag)) {
+ if (mtag->m_tag_cookie != MTAG_ABI_COMPAT)
+ continue;
+ if (mtag->m_tag_id != PACKET_TAG_IPSEC_OUT_DONE &&
+ mtag->m_tag_id != PACKET_TAG_IPSEC_OUT_CRYPTO_NEEDED)
+ continue;
+ /*
+ * Check if policy has an SA associated with it.
+ * This can happen when an SP has yet to acquire
+ * an SA; e.g. on first reference. If it occurs,
+ * then we let ipsec4_process_packet do its thing.
+ */
+ if (sp->req->sav == NULL)
+ break;
+ tdbi = (struct tdb_ident *)(mtag + 1);
+ if (tdbi->spi == sp->req->sav->spi &&
+ tdbi->proto == sp->req->sav->sah->saidx.proto &&
+ bcmp(&tdbi->dst, &sp->req->sav->sah->saidx.dst,
+ sizeof (union sockaddr_union)) == 0) {
+ /*
+ * No IPsec processing is needed, free
+ * reference to SP.
+ *
+ * NB: null pointer to avoid free at
+ * done: below.
+ */
+ KEY_FREESP(&sp), sp = NULL;
+ splx(s);
+ goto done;
+ }
+ }
+
+ /*
+ * Do delayed checksums now because we send before
+ * this is done in the normal processing path.
+ */
+ if ((*m)->m_pkthdr.csum_flags & CSUM_DELAY_DATA) {
+ in_delayed_cksum(*m);
+ (*m)->m_pkthdr.csum_flags &= ~CSUM_DELAY_DATA;
+ }
+#ifdef SCTP
+ if ((*m)->m_pkthdr.csum_flags & CSUM_SCTP) {
+ sctp_delayed_cksum(*m, (uint32_t)(ip->ip_hl << 2));
+ (*m)->m_pkthdr.csum_flags &= ~CSUM_SCTP;
+ }
+#endif
+ ip->ip_len = htons(ip->ip_len);
+ ip->ip_off = htons(ip->ip_off);
+
+ /* NB: callee frees mbuf */
+ *error = ipsec4_process_packet(*m, sp->req, *flags, 0);
+ if (*error == EJUSTRETURN) {
+ /*
+ * We had a SP with a level of 'use' and no SA. We
+ * will just continue to process the packet without
+ * IPsec processing and return without error.
+ */
+ *error = 0;
+ ip->ip_len = ntohs(ip->ip_len);
+ ip->ip_off = ntohs(ip->ip_off);
+ goto done;
+ }
+ /*
+ * Preserve KAME behaviour: ENOENT can be returned
+ * when an SA acquire is in progress. Don't propagate
+ * this to user-level; it confuses applications.
+ *
+ * XXX this will go away when the SADB is redone.
+ */
+ if (*error == ENOENT)
+ *error = 0;
+ splx(s);
+ goto reinjected;
+ } else { /* sp == NULL */
+ splx(s);
+
+ if (*error != 0) {
+ /*
+ * Hack: -EINVAL is used to signal that a packet
+ * should be silently discarded. This is typically
+ * because we asked key management for an SA and
+ * it was delayed (e.g. kicked up to IKE).
+ */
+ if (*error == -EINVAL)
+ *error = 0;
+ goto bad;
+ } else {
+ /* No IPsec processing for this packet. */
+ }
+#ifdef notyet
+ /*
+ * If deferred crypto processing is needed, check that
+ * the interface supports it.
+ */
+ mtag = m_tag_find(*m, PACKET_TAG_IPSEC_OUT_CRYPTO_NEEDED, NULL);
+ if (mtag != NULL && ifp != NULL &&
+ ((*ifp)->if_capenable & IFCAP_IPSEC) == 0) {
+ /* notify IPsec to do its own crypto */
+ ipsp_skipcrypto_unmark((struct tdb_ident *)(mtag + 1));
+ *error = EHOSTUNREACH;
+ goto bad;
+ }
+#endif
+ }
+done:
+ if (sp != NULL)
+ KEY_FREESP(&sp);
+ return 0;
+reinjected:
+ if (sp != NULL)
+ KEY_FREESP(&sp);
+ return -1;
+bad:
+ if (sp != NULL)
+ KEY_FREESP(&sp);
+ return 1;
+#endif /* IPSEC */
+ return 0;
+}
diff --git a/rtems/freebsd/netinet/ip_ipsec.h b/rtems/freebsd/netinet/ip_ipsec.h
new file mode 100644
index 00000000..c4de1652
--- /dev/null
+++ b/rtems/freebsd/netinet/ip_ipsec.h
@@ -0,0 +1,41 @@
+/*-
+ * Copyright (c) 1982, 1986, 1988, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _NETINET_IP_IPSEC_HH_
+#define _NETINET_IP_IPSEC_HH_
+
+int ip_ipsec_filtertunnel(struct mbuf *);
+int ip_ipsec_fwd(struct mbuf *);
+int ip_ipsec_input(struct mbuf *);
+int ip_ipsec_mtu(struct mbuf *, int);
+int ip_ipsec_output(struct mbuf **, struct inpcb *, int *, int *,
+ struct ifnet **);
+#endif
diff --git a/rtems/freebsd/netinet/ip_mroute.c b/rtems/freebsd/netinet/ip_mroute.c
new file mode 100644
index 00000000..b568f7a0
--- /dev/null
+++ b/rtems/freebsd/netinet/ip_mroute.c
@@ -0,0 +1,2952 @@
+#include <rtems/freebsd/machine/rtems-bsd-config.h>
+
+/*-
+ * Copyright (c) 1989 Stephen Deering
+ * Copyright (c) 1992, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * This code is derived from software contributed to Berkeley by
+ * Stephen Deering of Stanford University.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * @(#)ip_mroute.c 8.2 (Berkeley) 11/15/93
+ */
+
+/*
+ * IP multicast forwarding procedures
+ *
+ * Written by David Waitzman, BBN Labs, August 1988.
+ * Modified by Steve Deering, Stanford, February 1989.
+ * Modified by Mark J. Steiglitz, Stanford, May, 1991
+ * Modified by Van Jacobson, LBL, January 1993
+ * Modified by Ajit Thyagarajan, PARC, August 1993
+ * Modified by Bill Fenner, PARC, April 1995
+ * Modified by Ahmed Helmy, SGI, June 1996
+ * Modified by George Edmond Eddy (Rusty), ISI, February 1998
+ * Modified by Pavlin Radoslavov, USC/ISI, May 1998, August 1999, October 2000
+ * Modified by Hitoshi Asaeda, WIDE, August 2000
+ * Modified by Pavlin Radoslavov, ICSI, October 2002
+ *
+ * MROUTING Revision: 3.5
+ * and PIM-SMv2 and PIM-DM support, advanced API support,
+ * bandwidth metering and signaling
+ */
+
+/*
+ * TODO: Prefix functions with ipmf_.
+ * TODO: Maintain a refcount on if_allmulti() in ifnet or in the protocol
+ * domain attachment (if_afdata) so we can track consumers of that service.
+ * TODO: Deprecate routing socket path for SIOCGETSGCNT and SIOCGETVIFCNT,
+ * move it to socket options.
+ * TODO: Cleanup LSRR removal further.
+ * TODO: Push RSVP stubs into raw_ip.c.
+ * TODO: Use bitstring.h for vif set.
+ * TODO: Fix mrt6_ioctl dangling ref when dynamically loaded.
+ * TODO: Sync ip6_mroute.c with this file.
+ */
+
+#include <rtems/freebsd/sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <rtems/freebsd/local/opt_inet.h>
+#include <rtems/freebsd/local/opt_mrouting.h>
+
+#define _PIM_VT 1
+
+#include <rtems/freebsd/sys/param.h>
+#include <rtems/freebsd/sys/kernel.h>
+#include <rtems/freebsd/sys/stddef.h>
+#include <rtems/freebsd/sys/lock.h>
+#include <rtems/freebsd/sys/ktr.h>
+#include <rtems/freebsd/sys/malloc.h>
+#include <rtems/freebsd/sys/mbuf.h>
+#include <rtems/freebsd/sys/module.h>
+#include <rtems/freebsd/sys/priv.h>
+#include <rtems/freebsd/sys/protosw.h>
+#include <rtems/freebsd/sys/signalvar.h>
+#include <rtems/freebsd/sys/socket.h>
+#include <rtems/freebsd/sys/socketvar.h>
+#include <rtems/freebsd/sys/sockio.h>
+#include <rtems/freebsd/sys/sx.h>
+#include <rtems/freebsd/sys/sysctl.h>
+#include <rtems/freebsd/sys/syslog.h>
+#include <rtems/freebsd/sys/systm.h>
+#include <rtems/freebsd/sys/time.h>
+
+#include <rtems/freebsd/net/if.h>
+#include <rtems/freebsd/net/netisr.h>
+#include <rtems/freebsd/net/route.h>
+#include <rtems/freebsd/net/vnet.h>
+
+#include <rtems/freebsd/netinet/in.h>
+#include <rtems/freebsd/netinet/igmp.h>
+#include <rtems/freebsd/netinet/in_systm.h>
+#include <rtems/freebsd/netinet/in_var.h>
+#include <rtems/freebsd/netinet/ip.h>
+#include <rtems/freebsd/netinet/ip_encap.h>
+#include <rtems/freebsd/netinet/ip_mroute.h>
+#include <rtems/freebsd/netinet/ip_var.h>
+#include <rtems/freebsd/netinet/ip_options.h>
+#include <rtems/freebsd/netinet/pim.h>
+#include <rtems/freebsd/netinet/pim_var.h>
+#include <rtems/freebsd/netinet/udp.h>
+
+#include <rtems/freebsd/machine/in_cksum.h>
+
+#include <rtems/freebsd/security/mac/mac_framework.h>
+
+#ifndef KTR_IPMF
+#define KTR_IPMF KTR_INET
+#endif
+
+#define VIFI_INVALID ((vifi_t) -1)
+#define M_HASCL(m) ((m)->m_flags & M_EXT)
+
+static VNET_DEFINE(uint32_t, last_tv_sec); /* last time we processed this */
+#define V_last_tv_sec VNET(last_tv_sec)
+
+static MALLOC_DEFINE(M_MRTABLE, "mroutetbl", "multicast forwarding cache");
+
+/*
+ * Locking. We use two locks: one for the virtual interface table and
+ * one for the forwarding table. These locks may be nested in which case
+ * the VIF lock must always be taken first. Note that each lock is used
+ * to cover not only the specific data structure but also related data
+ * structures.
+ */
+
+static struct mtx mrouter_mtx;
+#define MROUTER_LOCK() mtx_lock(&mrouter_mtx)
+#define MROUTER_UNLOCK() mtx_unlock(&mrouter_mtx)
+#define MROUTER_LOCK_ASSERT() mtx_assert(&mrouter_mtx, MA_OWNED)
+#define MROUTER_LOCK_INIT() \
+ mtx_init(&mrouter_mtx, "IPv4 multicast forwarding", NULL, MTX_DEF)
+#define MROUTER_LOCK_DESTROY() mtx_destroy(&mrouter_mtx)
+
+static int ip_mrouter_cnt; /* # of vnets with active mrouters */
+static int ip_mrouter_unloading; /* Allow no more V_ip_mrouter sockets */
+
+static VNET_DEFINE(struct mrtstat, mrtstat);
+#define V_mrtstat VNET(mrtstat)
+SYSCTL_VNET_STRUCT(_net_inet_ip, OID_AUTO, mrtstat, CTLFLAG_RW,
+ &VNET_NAME(mrtstat), mrtstat,
+ "IPv4 Multicast Forwarding Statistics (struct mrtstat, "
+ "netinet/ip_mroute.h)");
+
+static VNET_DEFINE(u_long, mfchash);
+#define V_mfchash VNET(mfchash)
+#define MFCHASH(a, g) \
+ ((((a).s_addr >> 20) ^ ((a).s_addr >> 10) ^ (a).s_addr ^ \
+ ((g).s_addr >> 20) ^ ((g).s_addr >> 10) ^ (g).s_addr) & V_mfchash)
+#define MFCHASHSIZE 256
+
+static u_long mfchashsize; /* Hash size */
+static VNET_DEFINE(u_char *, nexpire); /* 0..mfchashsize-1 */
+#define V_nexpire VNET(nexpire)
+static VNET_DEFINE(LIST_HEAD(mfchashhdr, mfc)*, mfchashtbl);
+#define V_mfchashtbl VNET(mfchashtbl)
+
+static struct mtx mfc_mtx;
+#define MFC_LOCK() mtx_lock(&mfc_mtx)
+#define MFC_UNLOCK() mtx_unlock(&mfc_mtx)
+#define MFC_LOCK_ASSERT() mtx_assert(&mfc_mtx, MA_OWNED)
+#define MFC_LOCK_INIT() \
+ mtx_init(&mfc_mtx, "IPv4 multicast forwarding cache", NULL, MTX_DEF)
+#define MFC_LOCK_DESTROY() mtx_destroy(&mfc_mtx)
+
+static VNET_DEFINE(vifi_t, numvifs);
+#define V_numvifs VNET(numvifs)
+static VNET_DEFINE(struct vif, viftable[MAXVIFS]);
+#define V_viftable VNET(viftable)
+SYSCTL_VNET_OPAQUE(_net_inet_ip, OID_AUTO, viftable, CTLFLAG_RD,
+ &VNET_NAME(viftable), sizeof(V_viftable), "S,vif[MAXVIFS]",
+ "IPv4 Multicast Interfaces (struct vif[MAXVIFS], netinet/ip_mroute.h)");
+
+static struct mtx vif_mtx;
+#define VIF_LOCK() mtx_lock(&vif_mtx)
+#define VIF_UNLOCK() mtx_unlock(&vif_mtx)
+#define VIF_LOCK_ASSERT() mtx_assert(&vif_mtx, MA_OWNED)
+#define VIF_LOCK_INIT() \
+ mtx_init(&vif_mtx, "IPv4 multicast interfaces", NULL, MTX_DEF)
+#define VIF_LOCK_DESTROY() mtx_destroy(&vif_mtx)
+
+static eventhandler_tag if_detach_event_tag = NULL;
+
+static VNET_DEFINE(struct callout, expire_upcalls_ch);
+#define V_expire_upcalls_ch VNET(expire_upcalls_ch)
+
+#define EXPIRE_TIMEOUT (hz / 4) /* 4x / second */
+#define UPCALL_EXPIRE 6 /* number of timeouts */
+
+/*
+ * Bandwidth meter variables and constants
+ */
+static MALLOC_DEFINE(M_BWMETER, "bwmeter", "multicast upcall bw meters");
+/*
+ * Pending timeouts are stored in a hash table, the key being the
+ * expiration time. Periodically, the entries are analysed and processed.
+ */
+#define BW_METER_BUCKETS 1024
+static VNET_DEFINE(struct bw_meter*, bw_meter_timers[BW_METER_BUCKETS]);
+#define V_bw_meter_timers VNET(bw_meter_timers)
+static VNET_DEFINE(struct callout, bw_meter_ch);
+#define V_bw_meter_ch VNET(bw_meter_ch)
+#define BW_METER_PERIOD (hz) /* periodical handling of bw meters */
+
+/*
+ * Pending upcalls are stored in a vector which is flushed when
+ * full, or periodically
+ */
+static VNET_DEFINE(struct bw_upcall, bw_upcalls[BW_UPCALLS_MAX]);
+#define V_bw_upcalls VNET(bw_upcalls)
+static VNET_DEFINE(u_int, bw_upcalls_n); /* # of pending upcalls */
+#define V_bw_upcalls_n VNET(bw_upcalls_n)
+static VNET_DEFINE(struct callout, bw_upcalls_ch);
+#define V_bw_upcalls_ch VNET(bw_upcalls_ch)
+
+#define BW_UPCALLS_PERIOD (hz) /* periodical flush of bw upcalls */
+
+static VNET_DEFINE(struct pimstat, pimstat);
+#define V_pimstat VNET(pimstat)
+
+SYSCTL_NODE(_net_inet, IPPROTO_PIM, pim, CTLFLAG_RW, 0, "PIM");
+SYSCTL_VNET_STRUCT(_net_inet_pim, PIMCTL_STATS, stats, CTLFLAG_RD,
+ &VNET_NAME(pimstat), pimstat,
+ "PIM Statistics (struct pimstat, netinet/pim_var.h)");
+
+static u_long pim_squelch_wholepkt = 0;
+SYSCTL_ULONG(_net_inet_pim, OID_AUTO, squelch_wholepkt, CTLFLAG_RW,
+ &pim_squelch_wholepkt, 0,
+ "Disable IGMP_WHOLEPKT notifications if rendezvous point is unspecified");
+
+extern struct domain inetdomain;
+static const struct protosw in_pim_protosw = {
+ .pr_type = SOCK_RAW,
+ .pr_domain = &inetdomain,
+ .pr_protocol = IPPROTO_PIM,
+ .pr_flags = PR_ATOMIC|PR_ADDR|PR_LASTHDR,
+ .pr_input = pim_input,
+ .pr_output = (pr_output_t*)rip_output,
+ .pr_ctloutput = rip_ctloutput,
+ .pr_usrreqs = &rip_usrreqs
+};
+static const struct encaptab *pim_encap_cookie;
+
+static int pim_encapcheck(const struct mbuf *, int, int, void *);
+
+/*
+ * Note: the PIM Register encapsulation adds the following in front of a
+ * data packet:
+ *
+ * struct pim_encap_hdr {
+ * struct ip ip;
+ * struct pim_encap_pimhdr pim;
+ * }
+ *
+ */
+
+struct pim_encap_pimhdr {
+ struct pim pim;
+ uint32_t flags;
+};
+#define PIM_ENCAP_TTL 64
+
+static struct ip pim_encap_iphdr = {
+#if BYTE_ORDER == LITTLE_ENDIAN
+ sizeof(struct ip) >> 2,
+ IPVERSION,
+#else
+ IPVERSION,
+ sizeof(struct ip) >> 2,
+#endif
+ 0, /* tos */
+ sizeof(struct ip), /* total length */
+ 0, /* id */
+ 0, /* frag offset */
+ PIM_ENCAP_TTL,
+ IPPROTO_PIM,
+ 0, /* checksum */
+};
+
+static struct pim_encap_pimhdr pim_encap_pimhdr = {
+ {
+ PIM_MAKE_VT(PIM_VERSION, PIM_REGISTER), /* PIM vers and message type */
+ 0, /* reserved */
+ 0, /* checksum */
+ },
+ 0 /* flags */
+};
+
+static VNET_DEFINE(vifi_t, reg_vif_num) = VIFI_INVALID;
+#define V_reg_vif_num VNET(reg_vif_num)
+static VNET_DEFINE(struct ifnet, multicast_register_if);
+#define V_multicast_register_if VNET(multicast_register_if)
+
+/*
+ * Private variables.
+ */
+
+static u_long X_ip_mcast_src(int);
+static int X_ip_mforward(struct ip *, struct ifnet *, struct mbuf *,
+ struct ip_moptions *);
+static int X_ip_mrouter_done(void);
+static int X_ip_mrouter_get(struct socket *, struct sockopt *);
+static int X_ip_mrouter_set(struct socket *, struct sockopt *);
+static int X_legal_vif_num(int);
+static int X_mrt_ioctl(u_long, caddr_t, int);
+
+static int add_bw_upcall(struct bw_upcall *);
+static int add_mfc(struct mfcctl2 *);
+static int add_vif(struct vifctl *);
+static void bw_meter_prepare_upcall(struct bw_meter *, struct timeval *);
+static void bw_meter_process(void);
+static void bw_meter_receive_packet(struct bw_meter *, int,
+ struct timeval *);
+static void bw_upcalls_send(void);
+static int del_bw_upcall(struct bw_upcall *);
+static int del_mfc(struct mfcctl2 *);
+static int del_vif(vifi_t);
+static int del_vif_locked(vifi_t);
+static void expire_bw_meter_process(void *);
+static void expire_bw_upcalls_send(void *);
+static void expire_mfc(struct mfc *);
+static void expire_upcalls(void *);
+static void free_bw_list(struct bw_meter *);
+static int get_sg_cnt(struct sioc_sg_req *);
+static int get_vif_cnt(struct sioc_vif_req *);
+static void if_detached_event(void *, struct ifnet *);
+static int ip_mdq(struct mbuf *, struct ifnet *, struct mfc *, vifi_t);
+static int ip_mrouter_init(struct socket *, int);
+static __inline struct mfc *
+ mfc_find(struct in_addr *, struct in_addr *);
+static void phyint_send(struct ip *, struct vif *, struct mbuf *);
+static struct mbuf *
+ pim_register_prepare(struct ip *, struct mbuf *);
+static int pim_register_send(struct ip *, struct vif *,
+ struct mbuf *, struct mfc *);
+static int pim_register_send_rp(struct ip *, struct vif *,
+ struct mbuf *, struct mfc *);
+static int pim_register_send_upcall(struct ip *, struct vif *,
+ struct mbuf *, struct mfc *);
+static void schedule_bw_meter(struct bw_meter *, struct timeval *);
+static void send_packet(struct vif *, struct mbuf *);
+static int set_api_config(uint32_t *);
+static int set_assert(int);
+static int socket_send(struct socket *, struct mbuf *,
+ struct sockaddr_in *);
+static void unschedule_bw_meter(struct bw_meter *);
+
+/*
+ * Kernel multicast forwarding API capabilities and setup.
+ * If more API capabilities are added to the kernel, they should be
+ * recorded in `mrt_api_support'.
+ */
+#define MRT_API_VERSION 0x0305
+
+static const int mrt_api_version = MRT_API_VERSION;
+static const uint32_t mrt_api_support = (MRT_MFC_FLAGS_DISABLE_WRONGVIF |
+ MRT_MFC_FLAGS_BORDER_VIF |
+ MRT_MFC_RP |
+ MRT_MFC_BW_UPCALL);
+static VNET_DEFINE(uint32_t, mrt_api_config);
+#define V_mrt_api_config VNET(mrt_api_config)
+static VNET_DEFINE(int, pim_assert_enabled);
+#define V_pim_assert_enabled VNET(pim_assert_enabled)
+static struct timeval pim_assert_interval = { 3, 0 }; /* Rate limit */
+
+/*
+ * Find a route for a given origin IP address and multicast group address.
+ * Statistics must be updated by the caller.
+ */
+static __inline struct mfc *
+mfc_find(struct in_addr *o, struct in_addr *g)
+{
+ struct mfc *rt;
+
+ MFC_LOCK_ASSERT();
+
+ LIST_FOREACH(rt, &V_mfchashtbl[MFCHASH(*o, *g)], mfc_hash) {
+ if (in_hosteq(rt->mfc_origin, *o) &&
+ in_hosteq(rt->mfc_mcastgrp, *g) &&
+ TAILQ_EMPTY(&rt->mfc_stall))
+ break;
+ }
+
+ return (rt);
+}
+
+/*
+ * Handle MRT setsockopt commands to modify the multicast forwarding tables.
+ */
+static int
+X_ip_mrouter_set(struct socket *so, struct sockopt *sopt)
+{
+ int error, optval;
+ vifi_t vifi;
+ struct vifctl vifc;
+ struct mfcctl2 mfc;
+ struct bw_upcall bw_upcall;
+ uint32_t i;
+
+ if (so != V_ip_mrouter && sopt->sopt_name != MRT_INIT)
+ return EPERM;
+
+ error = 0;
+ switch (sopt->sopt_name) {
+ case MRT_INIT:
+ error = sooptcopyin(sopt, &optval, sizeof optval, sizeof optval);
+ if (error)
+ break;
+ error = ip_mrouter_init(so, optval);
+ break;
+
+ case MRT_DONE:
+ error = ip_mrouter_done();
+ break;
+
+ case MRT_ADD_VIF:
+ error = sooptcopyin(sopt, &vifc, sizeof vifc, sizeof vifc);
+ if (error)
+ break;
+ error = add_vif(&vifc);
+ break;
+
+ case MRT_DEL_VIF:
+ error = sooptcopyin(sopt, &vifi, sizeof vifi, sizeof vifi);
+ if (error)
+ break;
+ error = del_vif(vifi);
+ break;
+
+ case MRT_ADD_MFC:
+ case MRT_DEL_MFC:
+ /*
+ * select data size depending on API version.
+ */
+ if (sopt->sopt_name == MRT_ADD_MFC &&
+ V_mrt_api_config & MRT_API_FLAGS_ALL) {
+ error = sooptcopyin(sopt, &mfc, sizeof(struct mfcctl2),
+ sizeof(struct mfcctl2));
+ } else {
+ error = sooptcopyin(sopt, &mfc, sizeof(struct mfcctl),
+ sizeof(struct mfcctl));
+ bzero((caddr_t)&mfc + sizeof(struct mfcctl),
+ sizeof(mfc) - sizeof(struct mfcctl));
+ }
+ if (error)
+ break;
+ if (sopt->sopt_name == MRT_ADD_MFC)
+ error = add_mfc(&mfc);
+ else
+ error = del_mfc(&mfc);
+ break;
+
+ case MRT_ASSERT:
+ error = sooptcopyin(sopt, &optval, sizeof optval, sizeof optval);
+ if (error)
+ break;
+ set_assert(optval);
+ break;
+
+ case MRT_API_CONFIG:
+ error = sooptcopyin(sopt, &i, sizeof i, sizeof i);
+ if (!error)
+ error = set_api_config(&i);
+ if (!error)
+ error = sooptcopyout(sopt, &i, sizeof i);
+ break;
+
+ case MRT_ADD_BW_UPCALL:
+ case MRT_DEL_BW_UPCALL:
+ error = sooptcopyin(sopt, &bw_upcall, sizeof bw_upcall,
+ sizeof bw_upcall);
+ if (error)
+ break;
+ if (sopt->sopt_name == MRT_ADD_BW_UPCALL)
+ error = add_bw_upcall(&bw_upcall);
+ else
+ error = del_bw_upcall(&bw_upcall);
+ break;
+
+ default:
+ error = EOPNOTSUPP;
+ break;
+ }
+ return error;
+}
+
+/*
+ * Handle MRT getsockopt commands
+ */
+static int
+X_ip_mrouter_get(struct socket *so, struct sockopt *sopt)
+{
+ int error;
+
+ switch (sopt->sopt_name) {
+ case MRT_VERSION:
+ error = sooptcopyout(sopt, &mrt_api_version, sizeof mrt_api_version);
+ break;
+
+ case MRT_ASSERT:
+ error = sooptcopyout(sopt, &V_pim_assert_enabled,
+ sizeof V_pim_assert_enabled);
+ break;
+
+ case MRT_API_SUPPORT:
+ error = sooptcopyout(sopt, &mrt_api_support, sizeof mrt_api_support);
+ break;
+
+ case MRT_API_CONFIG:
+ error = sooptcopyout(sopt, &V_mrt_api_config, sizeof V_mrt_api_config);
+ break;
+
+ default:
+ error = EOPNOTSUPP;
+ break;
+ }
+ return error;
+}
+
+/*
+ * Handle ioctl commands to obtain information from the cache
+ */
+static int
+X_mrt_ioctl(u_long cmd, caddr_t data, int fibnum __unused)
+{
+ int error = 0;
+
+ /*
+ * Currently the only function calling this ioctl routine is rtioctl().
+ * Typically, only root can create the raw socket in order to execute
+ * this ioctl method, however the request might be coming from a prison
+ */
+ error = priv_check(curthread, PRIV_NETINET_MROUTE);
+ if (error)
+ return (error);
+ switch (cmd) {
+ case (SIOCGETVIFCNT):
+ error = get_vif_cnt((struct sioc_vif_req *)data);
+ break;
+
+ case (SIOCGETSGCNT):
+ error = get_sg_cnt((struct sioc_sg_req *)data);
+ break;
+
+ default:
+ error = EINVAL;
+ break;
+ }
+ return error;
+}
+
+/*
+ * returns the packet, byte, rpf-failure count for the source group provided
+ */
+static int
+get_sg_cnt(struct sioc_sg_req *req)
+{
+ struct mfc *rt;
+
+ MFC_LOCK();
+ rt = mfc_find(&req->src, &req->grp);
+ if (rt == NULL) {
+ MFC_UNLOCK();
+ req->pktcnt = req->bytecnt = req->wrong_if = 0xffffffff;
+ return EADDRNOTAVAIL;
+ }
+ req->pktcnt = rt->mfc_pkt_cnt;
+ req->bytecnt = rt->mfc_byte_cnt;
+ req->wrong_if = rt->mfc_wrong_if;
+ MFC_UNLOCK();
+ return 0;
+}
+
+/*
+ * returns the input and output packet and byte counts on the vif provided
+ */
+static int
+get_vif_cnt(struct sioc_vif_req *req)
+{
+ vifi_t vifi = req->vifi;
+
+ VIF_LOCK();
+ if (vifi >= V_numvifs) {
+ VIF_UNLOCK();
+ return EINVAL;
+ }
+
+ req->icount = V_viftable[vifi].v_pkt_in;
+ req->ocount = V_viftable[vifi].v_pkt_out;
+ req->ibytes = V_viftable[vifi].v_bytes_in;
+ req->obytes = V_viftable[vifi].v_bytes_out;
+ VIF_UNLOCK();
+
+ return 0;
+}
+
+static void
+if_detached_event(void *arg __unused, struct ifnet *ifp)
+{
+ vifi_t vifi;
+ int i;
+
+ MROUTER_LOCK();
+
+ if (V_ip_mrouter == NULL) {
+ MROUTER_UNLOCK();
+ return;
+ }
+
+ VIF_LOCK();
+ MFC_LOCK();
+
+ /*
+ * Tear down multicast forwarder state associated with this ifnet.
+ * 1. Walk the vif list, matching vifs against this ifnet.
+ * 2. Walk the multicast forwarding cache (mfc) looking for
+ * inner matches with this vif's index.
+ * 3. Expire any matching multicast forwarding cache entries.
+ * 4. Free vif state. This should disable ALLMULTI on the interface.
+ */
+ for (vifi = 0; vifi < V_numvifs; vifi++) {
+ if (V_viftable[vifi].v_ifp != ifp)
+ continue;
+ for (i = 0; i < mfchashsize; i++) {
+ struct mfc *rt, *nrt;
+ for (rt = LIST_FIRST(&V_mfchashtbl[i]); rt; rt = nrt) {
+ nrt = LIST_NEXT(rt, mfc_hash);
+ if (rt->mfc_parent == vifi) {
+ expire_mfc(rt);
+ }
+ }
+ }
+ del_vif_locked(vifi);
+ }
+
+ MFC_UNLOCK();
+ VIF_UNLOCK();
+
+ MROUTER_UNLOCK();
+}
+
+/*
+ * Enable multicast forwarding.
+ */
+static int
+ip_mrouter_init(struct socket *so, int version)
+{
+
+ CTR3(KTR_IPMF, "%s: so_type %d, pr_protocol %d", __func__,
+ so->so_type, so->so_proto->pr_protocol);
+
+ if (so->so_type != SOCK_RAW || so->so_proto->pr_protocol != IPPROTO_IGMP)
+ return EOPNOTSUPP;
+
+ if (version != 1)
+ return ENOPROTOOPT;
+
+ MROUTER_LOCK();
+
+ if (ip_mrouter_unloading) {
+ MROUTER_UNLOCK();
+ return ENOPROTOOPT;
+ }
+
+ if (V_ip_mrouter != NULL) {
+ MROUTER_UNLOCK();
+ return EADDRINUSE;
+ }
+
+ V_mfchashtbl = hashinit_flags(mfchashsize, M_MRTABLE, &V_mfchash,
+ HASH_NOWAIT);
+
+ callout_reset(&V_expire_upcalls_ch, EXPIRE_TIMEOUT, expire_upcalls,
+ curvnet);
+ callout_reset(&V_bw_upcalls_ch, BW_UPCALLS_PERIOD, expire_bw_upcalls_send,
+ curvnet);
+ callout_reset(&V_bw_meter_ch, BW_METER_PERIOD, expire_bw_meter_process,
+ curvnet);
+
+ V_ip_mrouter = so;
+ ip_mrouter_cnt++;
+
+ MROUTER_UNLOCK();
+
+ CTR1(KTR_IPMF, "%s: done", __func__);
+
+ return 0;
+}
+
+/*
+ * Disable multicast forwarding.
+ */
+static int
+X_ip_mrouter_done(void)
+{
+ vifi_t vifi;
+ int i;
+ struct ifnet *ifp;
+ struct ifreq ifr;
+
+ MROUTER_LOCK();
+
+ if (V_ip_mrouter == NULL) {
+ MROUTER_UNLOCK();
+ return EINVAL;
+ }
+
+ /*
+ * Detach/disable hooks to the reset of the system.
+ */
+ V_ip_mrouter = NULL;
+ ip_mrouter_cnt--;
+ V_mrt_api_config = 0;
+
+ VIF_LOCK();
+
+ /*
+ * For each phyint in use, disable promiscuous reception of all IP
+ * multicasts.
+ */
+ for (vifi = 0; vifi < V_numvifs; vifi++) {
+ if (!in_nullhost(V_viftable[vifi].v_lcl_addr) &&
+ !(V_viftable[vifi].v_flags & (VIFF_TUNNEL | VIFF_REGISTER))) {
+ struct sockaddr_in *so = (struct sockaddr_in *)&(ifr.ifr_addr);
+
+ so->sin_len = sizeof(struct sockaddr_in);
+ so->sin_family = AF_INET;
+ so->sin_addr.s_addr = INADDR_ANY;
+ ifp = V_viftable[vifi].v_ifp;
+ if_allmulti(ifp, 0);
+ }
+ }
+ bzero((caddr_t)V_viftable, sizeof(V_viftable));
+ V_numvifs = 0;
+ V_pim_assert_enabled = 0;
+
+ VIF_UNLOCK();
+
+ callout_stop(&V_expire_upcalls_ch);
+ callout_stop(&V_bw_upcalls_ch);
+ callout_stop(&V_bw_meter_ch);
+
+ MFC_LOCK();
+
+ /*
+ * Free all multicast forwarding cache entries.
+ * Do not use hashdestroy(), as we must perform other cleanup.
+ */
+ for (i = 0; i < mfchashsize; i++) {
+ struct mfc *rt, *nrt;
+ for (rt = LIST_FIRST(&V_mfchashtbl[i]); rt; rt = nrt) {
+ nrt = LIST_NEXT(rt, mfc_hash);
+ expire_mfc(rt);
+ }
+ }
+ free(V_mfchashtbl, M_MRTABLE);
+ V_mfchashtbl = NULL;
+
+ bzero(V_nexpire, sizeof(V_nexpire[0]) * mfchashsize);
+
+ V_bw_upcalls_n = 0;
+ bzero(V_bw_meter_timers, sizeof(V_bw_meter_timers));
+
+ MFC_UNLOCK();
+
+ V_reg_vif_num = VIFI_INVALID;
+
+ MROUTER_UNLOCK();
+
+ CTR1(KTR_IPMF, "%s: done", __func__);
+
+ return 0;
+}
+
+/*
+ * Set PIM assert processing global
+ */
+static int
+set_assert(int i)
+{
+ if ((i != 1) && (i != 0))
+ return EINVAL;
+
+ V_pim_assert_enabled = i;
+
+ return 0;
+}
+
+/*
+ * Configure API capabilities
+ */
+int
+set_api_config(uint32_t *apival)
+{
+ int i;
+
+ /*
+ * We can set the API capabilities only if it is the first operation
+ * after MRT_INIT. I.e.:
+ * - there are no vifs installed
+ * - pim_assert is not enabled
+ * - the MFC table is empty
+ */
+ if (V_numvifs > 0) {
+ *apival = 0;
+ return EPERM;
+ }
+ if (V_pim_assert_enabled) {
+ *apival = 0;
+ return EPERM;
+ }
+
+ MFC_LOCK();
+
+ for (i = 0; i < mfchashsize; i++) {
+ if (LIST_FIRST(&V_mfchashtbl[i]) != NULL) {
+ *apival = 0;
+ return EPERM;
+ }
+ }
+
+ MFC_UNLOCK();
+
+ V_mrt_api_config = *apival & mrt_api_support;
+ *apival = V_mrt_api_config;
+
+ return 0;
+}
+
+/*
+ * Add a vif to the vif table
+ */
+static int
+add_vif(struct vifctl *vifcp)
+{
+ struct vif *vifp = V_viftable + vifcp->vifc_vifi;
+ struct sockaddr_in sin = {sizeof sin, AF_INET};
+ struct ifaddr *ifa;
+ struct ifnet *ifp;
+ int error;
+
+ VIF_LOCK();
+ if (vifcp->vifc_vifi >= MAXVIFS) {
+ VIF_UNLOCK();
+ return EINVAL;
+ }
+ /* rate limiting is no longer supported by this code */
+ if (vifcp->vifc_rate_limit != 0) {
+ log(LOG_ERR, "rate limiting is no longer supported\n");
+ VIF_UNLOCK();
+ return EINVAL;
+ }
+ if (!in_nullhost(vifp->v_lcl_addr)) {
+ VIF_UNLOCK();
+ return EADDRINUSE;
+ }
+ if (in_nullhost(vifcp->vifc_lcl_addr)) {
+ VIF_UNLOCK();
+ return EADDRNOTAVAIL;
+ }
+
+ /* Find the interface with an address in AF_INET family */
+ if (vifcp->vifc_flags & VIFF_REGISTER) {
+ /*
+ * XXX: Because VIFF_REGISTER does not really need a valid
+ * local interface (e.g. it could be 127.0.0.2), we don't
+ * check its address.
+ */
+ ifp = NULL;
+ } else {
+ sin.sin_addr = vifcp->vifc_lcl_addr;
+ ifa = ifa_ifwithaddr((struct sockaddr *)&sin);
+ if (ifa == NULL) {
+ VIF_UNLOCK();
+ return EADDRNOTAVAIL;
+ }
+ ifp = ifa->ifa_ifp;
+ ifa_free(ifa);
+ }
+
+ if ((vifcp->vifc_flags & VIFF_TUNNEL) != 0) {
+ CTR1(KTR_IPMF, "%s: tunnels are no longer supported", __func__);
+ VIF_UNLOCK();
+ return EOPNOTSUPP;
+ } else if (vifcp->vifc_flags & VIFF_REGISTER) {
+ ifp = &V_multicast_register_if;
+ CTR2(KTR_IPMF, "%s: add register vif for ifp %p", __func__, ifp);
+ if (V_reg_vif_num == VIFI_INVALID) {
+ if_initname(&V_multicast_register_if, "register_vif", 0);
+ V_multicast_register_if.if_flags = IFF_LOOPBACK;
+ V_reg_vif_num = vifcp->vifc_vifi;
+ }
+ } else { /* Make sure the interface supports multicast */
+ if ((ifp->if_flags & IFF_MULTICAST) == 0) {
+ VIF_UNLOCK();
+ return EOPNOTSUPP;
+ }
+
+ /* Enable promiscuous reception of all IP multicasts from the if */
+ error = if_allmulti(ifp, 1);
+ if (error) {
+ VIF_UNLOCK();
+ return error;
+ }
+ }
+
+ vifp->v_flags = vifcp->vifc_flags;
+ vifp->v_threshold = vifcp->vifc_threshold;
+ vifp->v_lcl_addr = vifcp->vifc_lcl_addr;
+ vifp->v_rmt_addr = vifcp->vifc_rmt_addr;
+ vifp->v_ifp = ifp;
+ /* initialize per vif pkt counters */
+ vifp->v_pkt_in = 0;
+ vifp->v_pkt_out = 0;
+ vifp->v_bytes_in = 0;
+ vifp->v_bytes_out = 0;
+ bzero(&vifp->v_route, sizeof(vifp->v_route));
+
+ /* Adjust numvifs up if the vifi is higher than numvifs */
+ if (V_numvifs <= vifcp->vifc_vifi)
+ V_numvifs = vifcp->vifc_vifi + 1;
+
+ VIF_UNLOCK();
+
+ CTR4(KTR_IPMF, "%s: add vif %d laddr %s thresh %x", __func__,
+ (int)vifcp->vifc_vifi, inet_ntoa(vifcp->vifc_lcl_addr),
+ (int)vifcp->vifc_threshold);
+
+ return 0;
+}
+
+/*
+ * Delete a vif from the vif table
+ */
+static int
+del_vif_locked(vifi_t vifi)
+{
+ struct vif *vifp;
+
+ VIF_LOCK_ASSERT();
+
+ if (vifi >= V_numvifs) {
+ return EINVAL;
+ }
+ vifp = &V_viftable[vifi];
+ if (in_nullhost(vifp->v_lcl_addr)) {
+ return EADDRNOTAVAIL;
+ }
+
+ if (!(vifp->v_flags & (VIFF_TUNNEL | VIFF_REGISTER)))
+ if_allmulti(vifp->v_ifp, 0);
+
+ if (vifp->v_flags & VIFF_REGISTER)
+ V_reg_vif_num = VIFI_INVALID;
+
+ bzero((caddr_t)vifp, sizeof (*vifp));
+
+ CTR2(KTR_IPMF, "%s: delete vif %d", __func__, (int)vifi);
+
+ /* Adjust numvifs down */
+ for (vifi = V_numvifs; vifi > 0; vifi--)
+ if (!in_nullhost(V_viftable[vifi-1].v_lcl_addr))
+ break;
+ V_numvifs = vifi;
+
+ return 0;
+}
+
+static int
+del_vif(vifi_t vifi)
+{
+ int cc;
+
+ VIF_LOCK();
+ cc = del_vif_locked(vifi);
+ VIF_UNLOCK();
+
+ return cc;
+}
+
+/*
+ * update an mfc entry without resetting counters and S,G addresses.
+ */
+static void
+update_mfc_params(struct mfc *rt, struct mfcctl2 *mfccp)
+{
+ int i;
+
+ rt->mfc_parent = mfccp->mfcc_parent;
+ for (i = 0; i < V_numvifs; i++) {
+ rt->mfc_ttls[i] = mfccp->mfcc_ttls[i];
+ rt->mfc_flags[i] = mfccp->mfcc_flags[i] & V_mrt_api_config &
+ MRT_MFC_FLAGS_ALL;
+ }
+ /* set the RP address */
+ if (V_mrt_api_config & MRT_MFC_RP)
+ rt->mfc_rp = mfccp->mfcc_rp;
+ else
+ rt->mfc_rp.s_addr = INADDR_ANY;
+}
+
+/*
+ * fully initialize an mfc entry from the parameter.
+ */
+static void
+init_mfc_params(struct mfc *rt, struct mfcctl2 *mfccp)
+{
+ rt->mfc_origin = mfccp->mfcc_origin;
+ rt->mfc_mcastgrp = mfccp->mfcc_mcastgrp;
+
+ update_mfc_params(rt, mfccp);
+
+ /* initialize pkt counters per src-grp */
+ rt->mfc_pkt_cnt = 0;
+ rt->mfc_byte_cnt = 0;
+ rt->mfc_wrong_if = 0;
+ timevalclear(&rt->mfc_last_assert);
+}
+
+static void
+expire_mfc(struct mfc *rt)
+{
+ struct rtdetq *rte, *nrte;
+
+ free_bw_list(rt->mfc_bw_meter);
+
+ TAILQ_FOREACH_SAFE(rte, &rt->mfc_stall, rte_link, nrte) {
+ m_freem(rte->m);
+ TAILQ_REMOVE(&rt->mfc_stall, rte, rte_link);
+ free(rte, M_MRTABLE);
+ }
+
+ LIST_REMOVE(rt, mfc_hash);
+ free(rt, M_MRTABLE);
+}
+
+/*
+ * Add an mfc entry
+ */
+static int
+add_mfc(struct mfcctl2 *mfccp)
+{
+ struct mfc *rt;
+ struct rtdetq *rte, *nrte;
+ u_long hash = 0;
+ u_short nstl;
+
+ VIF_LOCK();
+ MFC_LOCK();
+
+ rt = mfc_find(&mfccp->mfcc_origin, &mfccp->mfcc_mcastgrp);
+
+ /* If an entry already exists, just update the fields */
+ if (rt) {
+ CTR4(KTR_IPMF, "%s: update mfc orig %s group %lx parent %x",
+ __func__, inet_ntoa(mfccp->mfcc_origin),
+ (u_long)ntohl(mfccp->mfcc_mcastgrp.s_addr),
+ mfccp->mfcc_parent);
+ update_mfc_params(rt, mfccp);
+ MFC_UNLOCK();
+ VIF_UNLOCK();
+ return (0);
+ }
+
+ /*
+ * Find the entry for which the upcall was made and update
+ */
+ nstl = 0;
+ hash = MFCHASH(mfccp->mfcc_origin, mfccp->mfcc_mcastgrp);
+ LIST_FOREACH(rt, &V_mfchashtbl[hash], mfc_hash) {
+ if (in_hosteq(rt->mfc_origin, mfccp->mfcc_origin) &&
+ in_hosteq(rt->mfc_mcastgrp, mfccp->mfcc_mcastgrp) &&
+ !TAILQ_EMPTY(&rt->mfc_stall)) {
+ CTR5(KTR_IPMF,
+ "%s: add mfc orig %s group %lx parent %x qh %p",
+ __func__, inet_ntoa(mfccp->mfcc_origin),
+ (u_long)ntohl(mfccp->mfcc_mcastgrp.s_addr),
+ mfccp->mfcc_parent,
+ TAILQ_FIRST(&rt->mfc_stall));
+ if (nstl++)
+ CTR1(KTR_IPMF, "%s: multiple matches", __func__);
+
+ init_mfc_params(rt, mfccp);
+ rt->mfc_expire = 0; /* Don't clean this guy up */
+ V_nexpire[hash]--;
+
+ /* Free queued packets, but attempt to forward them first. */
+ TAILQ_FOREACH_SAFE(rte, &rt->mfc_stall, rte_link, nrte) {
+ if (rte->ifp != NULL)
+ ip_mdq(rte->m, rte->ifp, rt, -1);
+ m_freem(rte->m);
+ TAILQ_REMOVE(&rt->mfc_stall, rte, rte_link);
+ rt->mfc_nstall--;
+ free(rte, M_MRTABLE);
+ }
+ }
+ }
+
+ /*
+ * It is possible that an entry is being inserted without an upcall
+ */
+ if (nstl == 0) {
+ CTR1(KTR_IPMF, "%s: adding mfc w/o upcall", __func__);
+ LIST_FOREACH(rt, &V_mfchashtbl[hash], mfc_hash) {
+ if (in_hosteq(rt->mfc_origin, mfccp->mfcc_origin) &&
+ in_hosteq(rt->mfc_mcastgrp, mfccp->mfcc_mcastgrp)) {
+ init_mfc_params(rt, mfccp);
+ if (rt->mfc_expire)
+ V_nexpire[hash]--;
+ rt->mfc_expire = 0;
+ break; /* XXX */
+ }
+ }
+
+ if (rt == NULL) { /* no upcall, so make a new entry */
+ rt = (struct mfc *)malloc(sizeof(*rt), M_MRTABLE, M_NOWAIT);
+ if (rt == NULL) {
+ MFC_UNLOCK();
+ VIF_UNLOCK();
+ return (ENOBUFS);
+ }
+
+ init_mfc_params(rt, mfccp);
+ TAILQ_INIT(&rt->mfc_stall);
+ rt->mfc_nstall = 0;
+
+ rt->mfc_expire = 0;
+ rt->mfc_bw_meter = NULL;
+
+ /* insert new entry at head of hash chain */
+ LIST_INSERT_HEAD(&V_mfchashtbl[hash], rt, mfc_hash);
+ }
+ }
+
+ MFC_UNLOCK();
+ VIF_UNLOCK();
+
+ return (0);
+}
+
+/*
+ * Delete an mfc entry
+ */
+static int
+del_mfc(struct mfcctl2 *mfccp)
+{
+ struct in_addr origin;
+ struct in_addr mcastgrp;
+ struct mfc *rt;
+
+ origin = mfccp->mfcc_origin;
+ mcastgrp = mfccp->mfcc_mcastgrp;
+
+ CTR3(KTR_IPMF, "%s: delete mfc orig %s group %lx", __func__,
+ inet_ntoa(origin), (u_long)ntohl(mcastgrp.s_addr));
+
+ MFC_LOCK();
+
+ rt = mfc_find(&origin, &mcastgrp);
+ if (rt == NULL) {
+ MFC_UNLOCK();
+ return EADDRNOTAVAIL;
+ }
+
+ /*
+ * free the bw_meter entries
+ */
+ free_bw_list(rt->mfc_bw_meter);
+ rt->mfc_bw_meter = NULL;
+
+ LIST_REMOVE(rt, mfc_hash);
+ free(rt, M_MRTABLE);
+
+ MFC_UNLOCK();
+
+ return (0);
+}
+
+/*
+ * Send a message to the routing daemon on the multicast routing socket.
+ */
+static int
+socket_send(struct socket *s, struct mbuf *mm, struct sockaddr_in *src)
+{
+ if (s) {
+ SOCKBUF_LOCK(&s->so_rcv);
+ if (sbappendaddr_locked(&s->so_rcv, (struct sockaddr *)src, mm,
+ NULL) != 0) {
+ sorwakeup_locked(s);
+ return 0;
+ }
+ SOCKBUF_UNLOCK(&s->so_rcv);
+ }
+ m_freem(mm);
+ return -1;
+}
+
+/*
+ * IP multicast forwarding function. This function assumes that the packet
+ * pointed to by "ip" has arrived on (or is about to be sent to) the interface
+ * pointed to by "ifp", and the packet is to be relayed to other networks
+ * that have members of the packet's destination IP multicast group.
+ *
+ * The packet is returned unscathed to the caller, unless it is
+ * erroneous, in which case a non-zero return value tells the caller to
+ * discard it.
+ */
+
+#define TUNNEL_LEN 12 /* # bytes of IP option for tunnel encapsulation */
+
+static int
+X_ip_mforward(struct ip *ip, struct ifnet *ifp, struct mbuf *m,
+ struct ip_moptions *imo)
+{
+ struct mfc *rt;
+ int error;
+ vifi_t vifi;
+
+ CTR3(KTR_IPMF, "ip_mforward: delete mfc orig %s group %lx ifp %p",
+ inet_ntoa(ip->ip_src), (u_long)ntohl(ip->ip_dst.s_addr), ifp);
+
+ if (ip->ip_hl < (sizeof(struct ip) + TUNNEL_LEN) >> 2 ||
+ ((u_char *)(ip + 1))[1] != IPOPT_LSRR ) {
+ /*
+ * Packet arrived via a physical interface or
+ * an encapsulated tunnel or a register_vif.
+ */
+ } else {
+ /*
+ * Packet arrived through a source-route tunnel.
+ * Source-route tunnels are no longer supported.
+ */
+ return (1);
+ }
+
+ VIF_LOCK();
+ MFC_LOCK();
+ if (imo && ((vifi = imo->imo_multicast_vif) < V_numvifs)) {
+ if (ip->ip_ttl < MAXTTL)
+ ip->ip_ttl++; /* compensate for -1 in *_send routines */
+ error = ip_mdq(m, ifp, NULL, vifi);
+ MFC_UNLOCK();
+ VIF_UNLOCK();
+ return error;
+ }
+
+ /*
+ * Don't forward a packet with time-to-live of zero or one,
+ * or a packet destined to a local-only group.
+ */
+ if (ip->ip_ttl <= 1 || IN_LOCAL_GROUP(ntohl(ip->ip_dst.s_addr))) {
+ MFC_UNLOCK();
+ VIF_UNLOCK();
+ return 0;
+ }
+
+ /*
+ * Determine forwarding vifs from the forwarding cache table
+ */
+ MRTSTAT_INC(mrts_mfc_lookups);
+ rt = mfc_find(&ip->ip_src, &ip->ip_dst);
+
+ /* Entry exists, so forward if necessary */
+ if (rt != NULL) {
+ error = ip_mdq(m, ifp, rt, -1);
+ MFC_UNLOCK();
+ VIF_UNLOCK();
+ return error;
+ } else {
+ /*
+ * If we don't have a route for packet's origin,
+ * Make a copy of the packet & send message to routing daemon
+ */
+
+ struct mbuf *mb0;
+ struct rtdetq *rte;
+ u_long hash;
+ int hlen = ip->ip_hl << 2;
+
+ MRTSTAT_INC(mrts_mfc_misses);
+ MRTSTAT_INC(mrts_no_route);
+ CTR2(KTR_IPMF, "ip_mforward: no mfc for (%s,%lx)",
+ inet_ntoa(ip->ip_src), (u_long)ntohl(ip->ip_dst.s_addr));
+
+ /*
+ * Allocate mbufs early so that we don't do extra work if we are
+ * just going to fail anyway. Make sure to pullup the header so
+ * that other people can't step on it.
+ */
+ rte = (struct rtdetq *)malloc((sizeof *rte), M_MRTABLE,
+ M_NOWAIT|M_ZERO);
+ if (rte == NULL) {
+ MFC_UNLOCK();
+ VIF_UNLOCK();
+ return ENOBUFS;
+ }
+
+ mb0 = m_copypacket(m, M_DONTWAIT);
+ if (mb0 && (M_HASCL(mb0) || mb0->m_len < hlen))
+ mb0 = m_pullup(mb0, hlen);
+ if (mb0 == NULL) {
+ free(rte, M_MRTABLE);
+ MFC_UNLOCK();
+ VIF_UNLOCK();
+ return ENOBUFS;
+ }
+
+ /* is there an upcall waiting for this flow ? */
+ hash = MFCHASH(ip->ip_src, ip->ip_dst);
+ LIST_FOREACH(rt, &V_mfchashtbl[hash], mfc_hash) {
+ if (in_hosteq(ip->ip_src, rt->mfc_origin) &&
+ in_hosteq(ip->ip_dst, rt->mfc_mcastgrp) &&
+ !TAILQ_EMPTY(&rt->mfc_stall))
+ break;
+ }
+
+ if (rt == NULL) {
+ int i;
+ struct igmpmsg *im;
+ struct sockaddr_in k_igmpsrc = { sizeof k_igmpsrc, AF_INET };
+ struct mbuf *mm;
+
+ /*
+ * Locate the vifi for the incoming interface for this packet.
+ * If none found, drop packet.
+ */
+ for (vifi = 0; vifi < V_numvifs &&
+ V_viftable[vifi].v_ifp != ifp; vifi++)
+ ;
+ if (vifi >= V_numvifs) /* vif not found, drop packet */
+ goto non_fatal;
+
+ /* no upcall, so make a new entry */
+ rt = (struct mfc *)malloc(sizeof(*rt), M_MRTABLE, M_NOWAIT);
+ if (rt == NULL)
+ goto fail;
+
+ /* Make a copy of the header to send to the user level process */
+ mm = m_copy(mb0, 0, hlen);
+ if (mm == NULL)
+ goto fail1;
+
+ /*
+ * Send message to routing daemon to install
+ * a route into the kernel table
+ */
+
+ im = mtod(mm, struct igmpmsg *);
+ im->im_msgtype = IGMPMSG_NOCACHE;
+ im->im_mbz = 0;
+ im->im_vif = vifi;
+
+ MRTSTAT_INC(mrts_upcalls);
+
+ k_igmpsrc.sin_addr = ip->ip_src;
+ if (socket_send(V_ip_mrouter, mm, &k_igmpsrc) < 0) {
+ CTR0(KTR_IPMF, "ip_mforward: socket queue full");
+ MRTSTAT_INC(mrts_upq_sockfull);
+fail1:
+ free(rt, M_MRTABLE);
+fail:
+ free(rte, M_MRTABLE);
+ m_freem(mb0);
+ MFC_UNLOCK();
+ VIF_UNLOCK();
+ return ENOBUFS;
+ }
+
+ /* insert new entry at head of hash chain */
+ rt->mfc_origin.s_addr = ip->ip_src.s_addr;
+ rt->mfc_mcastgrp.s_addr = ip->ip_dst.s_addr;
+ rt->mfc_expire = UPCALL_EXPIRE;
+ V_nexpire[hash]++;
+ for (i = 0; i < V_numvifs; i++) {
+ rt->mfc_ttls[i] = 0;
+ rt->mfc_flags[i] = 0;
+ }
+ rt->mfc_parent = -1;
+
+ /* clear the RP address */
+ rt->mfc_rp.s_addr = INADDR_ANY;
+ rt->mfc_bw_meter = NULL;
+
+ /* initialize pkt counters per src-grp */
+ rt->mfc_pkt_cnt = 0;
+ rt->mfc_byte_cnt = 0;
+ rt->mfc_wrong_if = 0;
+ timevalclear(&rt->mfc_last_assert);
+
+ TAILQ_INIT(&rt->mfc_stall);
+ rt->mfc_nstall = 0;
+
+ /* link into table */
+ LIST_INSERT_HEAD(&V_mfchashtbl[hash], rt, mfc_hash);
+ TAILQ_INSERT_HEAD(&rt->mfc_stall, rte, rte_link);
+ rt->mfc_nstall++;
+
+ } else {
+ /* determine if queue has overflowed */
+ if (rt->mfc_nstall > MAX_UPQ) {
+ MRTSTAT_INC(mrts_upq_ovflw);
+non_fatal:
+ free(rte, M_MRTABLE);
+ m_freem(mb0);
+ MFC_UNLOCK();
+ VIF_UNLOCK();
+ return (0);
+ }
+ TAILQ_INSERT_TAIL(&rt->mfc_stall, rte, rte_link);
+ rt->mfc_nstall++;
+ }
+
+ rte->m = mb0;
+ rte->ifp = ifp;
+
+ MFC_UNLOCK();
+ VIF_UNLOCK();
+
+ return 0;
+ }
+}
+
+/*
+ * Clean up the cache entry if upcall is not serviced
+ */
+static void
+expire_upcalls(void *arg)
+{
+ int i;
+
+ CURVNET_SET((struct vnet *) arg);
+
+ MFC_LOCK();
+
+ for (i = 0; i < mfchashsize; i++) {
+ struct mfc *rt, *nrt;
+
+ if (V_nexpire[i] == 0)
+ continue;
+
+ for (rt = LIST_FIRST(&V_mfchashtbl[i]); rt; rt = nrt) {
+ nrt = LIST_NEXT(rt, mfc_hash);
+
+ if (TAILQ_EMPTY(&rt->mfc_stall))
+ continue;
+
+ if (rt->mfc_expire == 0 || --rt->mfc_expire > 0)
+ continue;
+
+ /*
+ * free the bw_meter entries
+ */
+ while (rt->mfc_bw_meter != NULL) {
+ struct bw_meter *x = rt->mfc_bw_meter;
+
+ rt->mfc_bw_meter = x->bm_mfc_next;
+ free(x, M_BWMETER);
+ }
+
+ MRTSTAT_INC(mrts_cache_cleanups);
+ CTR3(KTR_IPMF, "%s: expire (%lx, %lx)", __func__,
+ (u_long)ntohl(rt->mfc_origin.s_addr),
+ (u_long)ntohl(rt->mfc_mcastgrp.s_addr));
+
+ expire_mfc(rt);
+ }
+ }
+
+ MFC_UNLOCK();
+
+ callout_reset(&V_expire_upcalls_ch, EXPIRE_TIMEOUT, expire_upcalls,
+ curvnet);
+
+ CURVNET_RESTORE();
+}
+
+/*
+ * Packet forwarding routine once entry in the cache is made
+ */
+static int
+ip_mdq(struct mbuf *m, struct ifnet *ifp, struct mfc *rt, vifi_t xmt_vif)
+{
+ struct ip *ip = mtod(m, struct ip *);
+ vifi_t vifi;
+ int plen = ip->ip_len;
+
+ VIF_LOCK_ASSERT();
+
+ /*
+ * If xmt_vif is not -1, send on only the requested vif.
+ *
+ * (since vifi_t is u_short, -1 becomes MAXUSHORT, which > numvifs.)
+ */
+ if (xmt_vif < V_numvifs) {
+ if (V_viftable[xmt_vif].v_flags & VIFF_REGISTER)
+ pim_register_send(ip, V_viftable + xmt_vif, m, rt);
+ else
+ phyint_send(ip, V_viftable + xmt_vif, m);
+ return 1;
+ }
+
+ /*
+ * Don't forward if it didn't arrive from the parent vif for its origin.
+ */
+ vifi = rt->mfc_parent;
+ if ((vifi >= V_numvifs) || (V_viftable[vifi].v_ifp != ifp)) {
+ CTR4(KTR_IPMF, "%s: rx on wrong ifp %p (vifi %d, v_ifp %p)",
+ __func__, ifp, (int)vifi, V_viftable[vifi].v_ifp);
+ MRTSTAT_INC(mrts_wrong_if);
+ ++rt->mfc_wrong_if;
+ /*
+ * If we are doing PIM assert processing, send a message
+ * to the routing daemon.
+ *
+ * XXX: A PIM-SM router needs the WRONGVIF detection so it
+ * can complete the SPT switch, regardless of the type
+ * of the iif (broadcast media, GRE tunnel, etc).
+ */
+ if (V_pim_assert_enabled && (vifi < V_numvifs) &&
+ V_viftable[vifi].v_ifp) {
+
+ if (ifp == &V_multicast_register_if)
+ PIMSTAT_INC(pims_rcv_registers_wrongiif);
+
+ /* Get vifi for the incoming packet */
+ for (vifi = 0; vifi < V_numvifs && V_viftable[vifi].v_ifp != ifp;
+ vifi++)
+ ;
+ if (vifi >= V_numvifs)
+ return 0; /* The iif is not found: ignore the packet. */
+
+ if (rt->mfc_flags[vifi] & MRT_MFC_FLAGS_DISABLE_WRONGVIF)
+ return 0; /* WRONGVIF disabled: ignore the packet */
+
+ if (ratecheck(&rt->mfc_last_assert, &pim_assert_interval)) {
+ struct sockaddr_in k_igmpsrc = { sizeof k_igmpsrc, AF_INET };
+ struct igmpmsg *im;
+ int hlen = ip->ip_hl << 2;
+ struct mbuf *mm = m_copy(m, 0, hlen);
+
+ if (mm && (M_HASCL(mm) || mm->m_len < hlen))
+ mm = m_pullup(mm, hlen);
+ if (mm == NULL)
+ return ENOBUFS;
+
+ im = mtod(mm, struct igmpmsg *);
+ im->im_msgtype = IGMPMSG_WRONGVIF;
+ im->im_mbz = 0;
+ im->im_vif = vifi;
+
+ MRTSTAT_INC(mrts_upcalls);
+
+ k_igmpsrc.sin_addr = im->im_src;
+ if (socket_send(V_ip_mrouter, mm, &k_igmpsrc) < 0) {
+ CTR1(KTR_IPMF, "%s: socket queue full", __func__);
+ MRTSTAT_INC(mrts_upq_sockfull);
+ return ENOBUFS;
+ }
+ }
+ }
+ return 0;
+ }
+
+
+ /* If I sourced this packet, it counts as output, else it was input. */
+ if (in_hosteq(ip->ip_src, V_viftable[vifi].v_lcl_addr)) {
+ V_viftable[vifi].v_pkt_out++;
+ V_viftable[vifi].v_bytes_out += plen;
+ } else {
+ V_viftable[vifi].v_pkt_in++;
+ V_viftable[vifi].v_bytes_in += plen;
+ }
+ rt->mfc_pkt_cnt++;
+ rt->mfc_byte_cnt += plen;
+
+ /*
+ * For each vif, decide if a copy of the packet should be forwarded.
+ * Forward if:
+ * - the ttl exceeds the vif's threshold
+ * - there are group members downstream on interface
+ */
+ for (vifi = 0; vifi < V_numvifs; vifi++)
+ if ((rt->mfc_ttls[vifi] > 0) && (ip->ip_ttl > rt->mfc_ttls[vifi])) {
+ V_viftable[vifi].v_pkt_out++;
+ V_viftable[vifi].v_bytes_out += plen;
+ if (V_viftable[vifi].v_flags & VIFF_REGISTER)
+ pim_register_send(ip, V_viftable + vifi, m, rt);
+ else
+ phyint_send(ip, V_viftable + vifi, m);
+ }
+
+ /*
+ * Perform upcall-related bw measuring.
+ */
+ if (rt->mfc_bw_meter != NULL) {
+ struct bw_meter *x;
+ struct timeval now;
+
+ microtime(&now);
+ MFC_LOCK_ASSERT();
+ for (x = rt->mfc_bw_meter; x != NULL; x = x->bm_mfc_next)
+ bw_meter_receive_packet(x, plen, &now);
+ }
+
+ return 0;
+}
+
+/*
+ * Check if a vif number is legal/ok. This is used by in_mcast.c.
+ */
+static int
+X_legal_vif_num(int vif)
+{
+ int ret;
+
+ ret = 0;
+ if (vif < 0)
+ return (ret);
+
+ VIF_LOCK();
+ if (vif < V_numvifs)
+ ret = 1;
+ VIF_UNLOCK();
+
+ return (ret);
+}
+
+/*
+ * Return the local address used by this vif
+ */
+static u_long
+X_ip_mcast_src(int vifi)
+{
+ in_addr_t addr;
+
+ addr = INADDR_ANY;
+ if (vifi < 0)
+ return (addr);
+
+ VIF_LOCK();
+ if (vifi < V_numvifs)
+ addr = V_viftable[vifi].v_lcl_addr.s_addr;
+ VIF_UNLOCK();
+
+ return (addr);
+}
+
+static void
+phyint_send(struct ip *ip, struct vif *vifp, struct mbuf *m)
+{
+ struct mbuf *mb_copy;
+ int hlen = ip->ip_hl << 2;
+
+ VIF_LOCK_ASSERT();
+
+ /*
+ * Make a new reference to the packet; make sure that
+ * the IP header is actually copied, not just referenced,
+ * so that ip_output() only scribbles on the copy.
+ */
+ mb_copy = m_copypacket(m, M_DONTWAIT);
+ if (mb_copy && (M_HASCL(mb_copy) || mb_copy->m_len < hlen))
+ mb_copy = m_pullup(mb_copy, hlen);
+ if (mb_copy == NULL)
+ return;
+
+ send_packet(vifp, mb_copy);
+}
+
+static void
+send_packet(struct vif *vifp, struct mbuf *m)
+{
+ struct ip_moptions imo;
+ struct in_multi *imm[2];
+ int error;
+
+ VIF_LOCK_ASSERT();
+
+ imo.imo_multicast_ifp = vifp->v_ifp;
+ imo.imo_multicast_ttl = mtod(m, struct ip *)->ip_ttl - 1;
+ imo.imo_multicast_loop = 1;
+ imo.imo_multicast_vif = -1;
+ imo.imo_num_memberships = 0;
+ imo.imo_max_memberships = 2;
+ imo.imo_membership = &imm[0];
+
+ /*
+ * Re-entrancy should not be a problem here, because
+ * the packets that we send out and are looped back at us
+ * should get rejected because they appear to come from
+ * the loopback interface, thus preventing looping.
+ */
+ error = ip_output(m, NULL, &vifp->v_route, IP_FORWARDING, &imo, NULL);
+ CTR3(KTR_IPMF, "%s: vif %td err %d", __func__,
+ (ptrdiff_t)(vifp - V_viftable), error);
+}
+
+/*
+ * Stubs for old RSVP socket shim implementation.
+ */
+
+static int
+X_ip_rsvp_vif(struct socket *so __unused, struct sockopt *sopt __unused)
+{
+
+ return (EOPNOTSUPP);
+}
+
+static void
+X_ip_rsvp_force_done(struct socket *so __unused)
+{
+
+}
+
+static void
+X_rsvp_input(struct mbuf *m, int off __unused)
+{
+
+ if (!V_rsvp_on)
+ m_freem(m);
+}
+
+/*
+ * Code for bandwidth monitors
+ */
+
+/*
+ * Define common interface for timeval-related methods
+ */
+#define BW_TIMEVALCMP(tvp, uvp, cmp) timevalcmp((tvp), (uvp), cmp)
+#define BW_TIMEVALDECR(vvp, uvp) timevalsub((vvp), (uvp))
+#define BW_TIMEVALADD(vvp, uvp) timevaladd((vvp), (uvp))
+
+static uint32_t
+compute_bw_meter_flags(struct bw_upcall *req)
+{
+ uint32_t flags = 0;
+
+ if (req->bu_flags & BW_UPCALL_UNIT_PACKETS)
+ flags |= BW_METER_UNIT_PACKETS;
+ if (req->bu_flags & BW_UPCALL_UNIT_BYTES)
+ flags |= BW_METER_UNIT_BYTES;
+ if (req->bu_flags & BW_UPCALL_GEQ)
+ flags |= BW_METER_GEQ;
+ if (req->bu_flags & BW_UPCALL_LEQ)
+ flags |= BW_METER_LEQ;
+
+ return flags;
+}
+
+/*
+ * Add a bw_meter entry
+ */
+static int
+add_bw_upcall(struct bw_upcall *req)
+{
+ struct mfc *mfc;
+ struct timeval delta = { BW_UPCALL_THRESHOLD_INTERVAL_MIN_SEC,
+ BW_UPCALL_THRESHOLD_INTERVAL_MIN_USEC };
+ struct timeval now;
+ struct bw_meter *x;
+ uint32_t flags;
+
+ if (!(V_mrt_api_config & MRT_MFC_BW_UPCALL))
+ return EOPNOTSUPP;
+
+ /* Test if the flags are valid */
+ if (!(req->bu_flags & (BW_UPCALL_UNIT_PACKETS | BW_UPCALL_UNIT_BYTES)))
+ return EINVAL;
+ if (!(req->bu_flags & (BW_UPCALL_GEQ | BW_UPCALL_LEQ)))
+ return EINVAL;
+ if ((req->bu_flags & (BW_UPCALL_GEQ | BW_UPCALL_LEQ))
+ == (BW_UPCALL_GEQ | BW_UPCALL_LEQ))
+ return EINVAL;
+
+ /* Test if the threshold time interval is valid */
+ if (BW_TIMEVALCMP(&req->bu_threshold.b_time, &delta, <))
+ return EINVAL;
+
+ flags = compute_bw_meter_flags(req);
+
+ /*
+ * Find if we have already same bw_meter entry
+ */
+ MFC_LOCK();
+ mfc = mfc_find(&req->bu_src, &req->bu_dst);
+ if (mfc == NULL) {
+ MFC_UNLOCK();
+ return EADDRNOTAVAIL;
+ }
+ for (x = mfc->mfc_bw_meter; x != NULL; x = x->bm_mfc_next) {
+ if ((BW_TIMEVALCMP(&x->bm_threshold.b_time,
+ &req->bu_threshold.b_time, ==)) &&
+ (x->bm_threshold.b_packets == req->bu_threshold.b_packets) &&
+ (x->bm_threshold.b_bytes == req->bu_threshold.b_bytes) &&
+ (x->bm_flags & BW_METER_USER_FLAGS) == flags) {
+ MFC_UNLOCK();
+ return 0; /* XXX Already installed */
+ }
+ }
+
+ /* Allocate the new bw_meter entry */
+ x = (struct bw_meter *)malloc(sizeof(*x), M_BWMETER, M_NOWAIT);
+ if (x == NULL) {
+ MFC_UNLOCK();
+ return ENOBUFS;
+ }
+
+ /* Set the new bw_meter entry */
+ x->bm_threshold.b_time = req->bu_threshold.b_time;
+ microtime(&now);
+ x->bm_start_time = now;
+ x->bm_threshold.b_packets = req->bu_threshold.b_packets;
+ x->bm_threshold.b_bytes = req->bu_threshold.b_bytes;
+ x->bm_measured.b_packets = 0;
+ x->bm_measured.b_bytes = 0;
+ x->bm_flags = flags;
+ x->bm_time_next = NULL;
+ x->bm_time_hash = BW_METER_BUCKETS;
+
+ /* Add the new bw_meter entry to the front of entries for this MFC */
+ x->bm_mfc = mfc;
+ x->bm_mfc_next = mfc->mfc_bw_meter;
+ mfc->mfc_bw_meter = x;
+ schedule_bw_meter(x, &now);
+ MFC_UNLOCK();
+
+ return 0;
+}
+
+static void
+free_bw_list(struct bw_meter *list)
+{
+ while (list != NULL) {
+ struct bw_meter *x = list;
+
+ list = list->bm_mfc_next;
+ unschedule_bw_meter(x);
+ free(x, M_BWMETER);
+ }
+}
+
+/*
+ * Delete one or multiple bw_meter entries
+ */
+static int
+del_bw_upcall(struct bw_upcall *req)
+{
+ struct mfc *mfc;
+ struct bw_meter *x;
+
+ if (!(V_mrt_api_config & MRT_MFC_BW_UPCALL))
+ return EOPNOTSUPP;
+
+ MFC_LOCK();
+
+ /* Find the corresponding MFC entry */
+ mfc = mfc_find(&req->bu_src, &req->bu_dst);
+ if (mfc == NULL) {
+ MFC_UNLOCK();
+ return EADDRNOTAVAIL;
+ } else if (req->bu_flags & BW_UPCALL_DELETE_ALL) {
+ /*
+ * Delete all bw_meter entries for this mfc
+ */
+ struct bw_meter *list;
+
+ list = mfc->mfc_bw_meter;
+ mfc->mfc_bw_meter = NULL;
+ free_bw_list(list);
+ MFC_UNLOCK();
+ return 0;
+ } else { /* Delete a single bw_meter entry */
+ struct bw_meter *prev;
+ uint32_t flags = 0;
+
+ flags = compute_bw_meter_flags(req);
+
+ /* Find the bw_meter entry to delete */
+ for (prev = NULL, x = mfc->mfc_bw_meter; x != NULL;
+ prev = x, x = x->bm_mfc_next) {
+ if ((BW_TIMEVALCMP(&x->bm_threshold.b_time,
+ &req->bu_threshold.b_time, ==)) &&
+ (x->bm_threshold.b_packets == req->bu_threshold.b_packets) &&
+ (x->bm_threshold.b_bytes == req->bu_threshold.b_bytes) &&
+ (x->bm_flags & BW_METER_USER_FLAGS) == flags)
+ break;
+ }
+ if (x != NULL) { /* Delete entry from the list for this MFC */
+ if (prev != NULL)
+ prev->bm_mfc_next = x->bm_mfc_next; /* remove from middle*/
+ else
+ x->bm_mfc->mfc_bw_meter = x->bm_mfc_next;/* new head of list */
+
+ unschedule_bw_meter(x);
+ MFC_UNLOCK();
+ /* Free the bw_meter entry */
+ free(x, M_BWMETER);
+ return 0;
+ } else {
+ MFC_UNLOCK();
+ return EINVAL;
+ }
+ }
+ /* NOTREACHED */
+}
+
+/*
+ * Perform bandwidth measurement processing that may result in an upcall
+ */
+static void
+bw_meter_receive_packet(struct bw_meter *x, int plen, struct timeval *nowp)
+{
+ struct timeval delta;
+
+ MFC_LOCK_ASSERT();
+
+ delta = *nowp;
+ BW_TIMEVALDECR(&delta, &x->bm_start_time);
+
+ if (x->bm_flags & BW_METER_GEQ) {
+ /*
+ * Processing for ">=" type of bw_meter entry
+ */
+ if (BW_TIMEVALCMP(&delta, &x->bm_threshold.b_time, >)) {
+ /* Reset the bw_meter entry */
+ x->bm_start_time = *nowp;
+ x->bm_measured.b_packets = 0;
+ x->bm_measured.b_bytes = 0;
+ x->bm_flags &= ~BW_METER_UPCALL_DELIVERED;
+ }
+
+ /* Record that a packet is received */
+ x->bm_measured.b_packets++;
+ x->bm_measured.b_bytes += plen;
+
+ /*
+ * Test if we should deliver an upcall
+ */
+ if (!(x->bm_flags & BW_METER_UPCALL_DELIVERED)) {
+ if (((x->bm_flags & BW_METER_UNIT_PACKETS) &&
+ (x->bm_measured.b_packets >= x->bm_threshold.b_packets)) ||
+ ((x->bm_flags & BW_METER_UNIT_BYTES) &&
+ (x->bm_measured.b_bytes >= x->bm_threshold.b_bytes))) {
+ /* Prepare an upcall for delivery */
+ bw_meter_prepare_upcall(x, nowp);
+ x->bm_flags |= BW_METER_UPCALL_DELIVERED;
+ }
+ }
+ } else if (x->bm_flags & BW_METER_LEQ) {
+ /*
+ * Processing for "<=" type of bw_meter entry
+ */
+ if (BW_TIMEVALCMP(&delta, &x->bm_threshold.b_time, >)) {
+ /*
+ * We are behind time with the multicast forwarding table
+ * scanning for "<=" type of bw_meter entries, so test now
+ * if we should deliver an upcall.
+ */
+ if (((x->bm_flags & BW_METER_UNIT_PACKETS) &&
+ (x->bm_measured.b_packets <= x->bm_threshold.b_packets)) ||
+ ((x->bm_flags & BW_METER_UNIT_BYTES) &&
+ (x->bm_measured.b_bytes <= x->bm_threshold.b_bytes))) {
+ /* Prepare an upcall for delivery */
+ bw_meter_prepare_upcall(x, nowp);
+ }
+ /* Reschedule the bw_meter entry */
+ unschedule_bw_meter(x);
+ schedule_bw_meter(x, nowp);
+ }
+
+ /* Record that a packet is received */
+ x->bm_measured.b_packets++;
+ x->bm_measured.b_bytes += plen;
+
+ /*
+ * Test if we should restart the measuring interval
+ */
+ if ((x->bm_flags & BW_METER_UNIT_PACKETS &&
+ x->bm_measured.b_packets <= x->bm_threshold.b_packets) ||
+ (x->bm_flags & BW_METER_UNIT_BYTES &&
+ x->bm_measured.b_bytes <= x->bm_threshold.b_bytes)) {
+ /* Don't restart the measuring interval */
+ } else {
+ /* Do restart the measuring interval */
+ /*
+ * XXX: note that we don't unschedule and schedule, because this
+ * might be too much overhead per packet. Instead, when we process
+ * all entries for a given timer hash bin, we check whether it is
+ * really a timeout. If not, we reschedule at that time.
+ */
+ x->bm_start_time = *nowp;
+ x->bm_measured.b_packets = 0;
+ x->bm_measured.b_bytes = 0;
+ x->bm_flags &= ~BW_METER_UPCALL_DELIVERED;
+ }
+ }
+}
+
+/*
+ * Prepare a bandwidth-related upcall
+ */
+static void
+bw_meter_prepare_upcall(struct bw_meter *x, struct timeval *nowp)
+{
+ struct timeval delta;
+ struct bw_upcall *u;
+
+ MFC_LOCK_ASSERT();
+
+ /*
+ * Compute the measured time interval
+ */
+ delta = *nowp;
+ BW_TIMEVALDECR(&delta, &x->bm_start_time);
+
+ /*
+ * If there are too many pending upcalls, deliver them now
+ */
+ if (V_bw_upcalls_n >= BW_UPCALLS_MAX)
+ bw_upcalls_send();
+
+ /*
+ * Set the bw_upcall entry
+ */
+ u = &V_bw_upcalls[V_bw_upcalls_n++];
+ u->bu_src = x->bm_mfc->mfc_origin;
+ u->bu_dst = x->bm_mfc->mfc_mcastgrp;
+ u->bu_threshold.b_time = x->bm_threshold.b_time;
+ u->bu_threshold.b_packets = x->bm_threshold.b_packets;
+ u->bu_threshold.b_bytes = x->bm_threshold.b_bytes;
+ u->bu_measured.b_time = delta;
+ u->bu_measured.b_packets = x->bm_measured.b_packets;
+ u->bu_measured.b_bytes = x->bm_measured.b_bytes;
+ u->bu_flags = 0;
+ if (x->bm_flags & BW_METER_UNIT_PACKETS)
+ u->bu_flags |= BW_UPCALL_UNIT_PACKETS;
+ if (x->bm_flags & BW_METER_UNIT_BYTES)
+ u->bu_flags |= BW_UPCALL_UNIT_BYTES;
+ if (x->bm_flags & BW_METER_GEQ)
+ u->bu_flags |= BW_UPCALL_GEQ;
+ if (x->bm_flags & BW_METER_LEQ)
+ u->bu_flags |= BW_UPCALL_LEQ;
+}
+
+/*
+ * Send the pending bandwidth-related upcalls
+ */
+static void
+bw_upcalls_send(void)
+{
+ struct mbuf *m;
+ int len = V_bw_upcalls_n * sizeof(V_bw_upcalls[0]);
+ struct sockaddr_in k_igmpsrc = { sizeof k_igmpsrc, AF_INET };
+ static struct igmpmsg igmpmsg = { 0, /* unused1 */
+ 0, /* unused2 */
+ IGMPMSG_BW_UPCALL,/* im_msgtype */
+ 0, /* im_mbz */
+ 0, /* im_vif */
+ 0, /* unused3 */
+ { 0 }, /* im_src */
+ { 0 } }; /* im_dst */
+
+ MFC_LOCK_ASSERT();
+
+ if (V_bw_upcalls_n == 0)
+ return; /* No pending upcalls */
+
+ V_bw_upcalls_n = 0;
+
+ /*
+ * Allocate a new mbuf, initialize it with the header and
+ * the payload for the pending calls.
+ */
+ MGETHDR(m, M_DONTWAIT, MT_DATA);
+ if (m == NULL) {
+ log(LOG_WARNING, "bw_upcalls_send: cannot allocate mbuf\n");
+ return;
+ }
+
+ m->m_len = m->m_pkthdr.len = 0;
+ m_copyback(m, 0, sizeof(struct igmpmsg), (caddr_t)&igmpmsg);
+ m_copyback(m, sizeof(struct igmpmsg), len, (caddr_t)&V_bw_upcalls[0]);
+
+ /*
+ * Send the upcalls
+ * XXX do we need to set the address in k_igmpsrc ?
+ */
+ MRTSTAT_INC(mrts_upcalls);
+ if (socket_send(V_ip_mrouter, m, &k_igmpsrc) < 0) {
+ log(LOG_WARNING, "bw_upcalls_send: ip_mrouter socket queue full\n");
+ MRTSTAT_INC(mrts_upq_sockfull);
+ }
+}
+
+/*
+ * Compute the timeout hash value for the bw_meter entries
+ */
+#define BW_METER_TIMEHASH(bw_meter, hash) \
+ do { \
+ struct timeval next_timeval = (bw_meter)->bm_start_time; \
+ \
+ BW_TIMEVALADD(&next_timeval, &(bw_meter)->bm_threshold.b_time); \
+ (hash) = next_timeval.tv_sec; \
+ if (next_timeval.tv_usec) \
+ (hash)++; /* XXX: make sure we don't timeout early */ \
+ (hash) %= BW_METER_BUCKETS; \
+ } while (0)
+
+/*
+ * Schedule a timer to process periodically bw_meter entry of type "<="
+ * by linking the entry in the proper hash bucket.
+ */
+static void
+schedule_bw_meter(struct bw_meter *x, struct timeval *nowp)
+{
+ int time_hash;
+
+ MFC_LOCK_ASSERT();
+
+ if (!(x->bm_flags & BW_METER_LEQ))
+ return; /* XXX: we schedule timers only for "<=" entries */
+
+ /*
+ * Reset the bw_meter entry
+ */
+ x->bm_start_time = *nowp;
+ x->bm_measured.b_packets = 0;
+ x->bm_measured.b_bytes = 0;
+ x->bm_flags &= ~BW_METER_UPCALL_DELIVERED;
+
+ /*
+ * Compute the timeout hash value and insert the entry
+ */
+ BW_METER_TIMEHASH(x, time_hash);
+ x->bm_time_next = V_bw_meter_timers[time_hash];
+ V_bw_meter_timers[time_hash] = x;
+ x->bm_time_hash = time_hash;
+}
+
+/*
+ * Unschedule the periodic timer that processes bw_meter entry of type "<="
+ * by removing the entry from the proper hash bucket.
+ */
+static void
+unschedule_bw_meter(struct bw_meter *x)
+{
+ int time_hash;
+ struct bw_meter *prev, *tmp;
+
+ MFC_LOCK_ASSERT();
+
+ if (!(x->bm_flags & BW_METER_LEQ))
+ return; /* XXX: we schedule timers only for "<=" entries */
+
+ /*
+ * Compute the timeout hash value and delete the entry
+ */
+ time_hash = x->bm_time_hash;
+ if (time_hash >= BW_METER_BUCKETS)
+ return; /* Entry was not scheduled */
+
+ for (prev = NULL, tmp = V_bw_meter_timers[time_hash];
+ tmp != NULL; prev = tmp, tmp = tmp->bm_time_next)
+ if (tmp == x)
+ break;
+
+ if (tmp == NULL)
+ panic("unschedule_bw_meter: bw_meter entry not found");
+
+ if (prev != NULL)
+ prev->bm_time_next = x->bm_time_next;
+ else
+ V_bw_meter_timers[time_hash] = x->bm_time_next;
+
+ x->bm_time_next = NULL;
+ x->bm_time_hash = BW_METER_BUCKETS;
+}
+
+
+/*
+ * Process all "<=" type of bw_meter that should be processed now,
+ * and for each entry prepare an upcall if necessary. Each processed
+ * entry is rescheduled again for the (periodic) processing.
+ *
+ * This is run periodically (once per second normally). On each round,
+ * all the potentially matching entries are in the hash slot that we are
+ * looking at.
+ */
+static void
+bw_meter_process()
+{
+ uint32_t loops;
+ int i;
+ struct timeval now, process_endtime;
+
+ microtime(&now);
+ if (V_last_tv_sec == now.tv_sec)
+ return; /* nothing to do */
+
+ loops = now.tv_sec - V_last_tv_sec;
+ V_last_tv_sec = now.tv_sec;
+ if (loops > BW_METER_BUCKETS)
+ loops = BW_METER_BUCKETS;
+
+ MFC_LOCK();
+ /*
+ * Process all bins of bw_meter entries from the one after the last
+ * processed to the current one. On entry, i points to the last bucket
+ * visited, so we need to increment i at the beginning of the loop.
+ */
+ for (i = (now.tv_sec - loops) % BW_METER_BUCKETS; loops > 0; loops--) {
+ struct bw_meter *x, *tmp_list;
+
+ if (++i >= BW_METER_BUCKETS)
+ i = 0;
+
+ /* Disconnect the list of bw_meter entries from the bin */
+ tmp_list = V_bw_meter_timers[i];
+ V_bw_meter_timers[i] = NULL;
+
+ /* Process the list of bw_meter entries */
+ while (tmp_list != NULL) {
+ x = tmp_list;
+ tmp_list = tmp_list->bm_time_next;
+
+ /* Test if the time interval is over */
+ process_endtime = x->bm_start_time;
+ BW_TIMEVALADD(&process_endtime, &x->bm_threshold.b_time);
+ if (BW_TIMEVALCMP(&process_endtime, &now, >)) {
+ /* Not yet: reschedule, but don't reset */
+ int time_hash;
+
+ BW_METER_TIMEHASH(x, time_hash);
+ if (time_hash == i && process_endtime.tv_sec == now.tv_sec) {
+ /*
+ * XXX: somehow the bin processing is a bit ahead of time.
+ * Put the entry in the next bin.
+ */
+ if (++time_hash >= BW_METER_BUCKETS)
+ time_hash = 0;
+ }
+ x->bm_time_next = V_bw_meter_timers[time_hash];
+ V_bw_meter_timers[time_hash] = x;
+ x->bm_time_hash = time_hash;
+
+ continue;
+ }
+
+ /*
+ * Test if we should deliver an upcall
+ */
+ if (((x->bm_flags & BW_METER_UNIT_PACKETS) &&
+ (x->bm_measured.b_packets <= x->bm_threshold.b_packets)) ||
+ ((x->bm_flags & BW_METER_UNIT_BYTES) &&
+ (x->bm_measured.b_bytes <= x->bm_threshold.b_bytes))) {
+ /* Prepare an upcall for delivery */
+ bw_meter_prepare_upcall(x, &now);
+ }
+
+ /*
+ * Reschedule for next processing
+ */
+ schedule_bw_meter(x, &now);
+ }
+ }
+
+ /* Send all upcalls that are pending delivery */
+ bw_upcalls_send();
+
+ MFC_UNLOCK();
+}
+
+/*
+ * A periodic function for sending all upcalls that are pending delivery
+ */
+static void
+expire_bw_upcalls_send(void *arg)
+{
+ CURVNET_SET((struct vnet *) arg);
+
+ MFC_LOCK();
+ bw_upcalls_send();
+ MFC_UNLOCK();
+
+ callout_reset(&V_bw_upcalls_ch, BW_UPCALLS_PERIOD, expire_bw_upcalls_send,
+ curvnet);
+ CURVNET_RESTORE();
+}
+
+/*
+ * A periodic function for periodic scanning of the multicast forwarding
+ * table for processing all "<=" bw_meter entries.
+ */
+static void
+expire_bw_meter_process(void *arg)
+{
+ CURVNET_SET((struct vnet *) arg);
+
+ if (V_mrt_api_config & MRT_MFC_BW_UPCALL)
+ bw_meter_process();
+
+ callout_reset(&V_bw_meter_ch, BW_METER_PERIOD, expire_bw_meter_process,
+ curvnet);
+ CURVNET_RESTORE();
+}
+
+/*
+ * End of bandwidth monitoring code
+ */
+
+/*
+ * Send the packet up to the user daemon, or eventually do kernel encapsulation
+ *
+ */
+static int
+pim_register_send(struct ip *ip, struct vif *vifp, struct mbuf *m,
+ struct mfc *rt)
+{
+ struct mbuf *mb_copy, *mm;
+
+ /*
+ * Do not send IGMP_WHOLEPKT notifications to userland, if the
+ * rendezvous point was unspecified, and we were told not to.
+ */
+ if (pim_squelch_wholepkt != 0 && (V_mrt_api_config & MRT_MFC_RP) &&
+ in_nullhost(rt->mfc_rp))
+ return 0;
+
+ mb_copy = pim_register_prepare(ip, m);
+ if (mb_copy == NULL)
+ return ENOBUFS;
+
+ /*
+ * Send all the fragments. Note that the mbuf for each fragment
+ * is freed by the sending machinery.
+ */
+ for (mm = mb_copy; mm; mm = mb_copy) {
+ mb_copy = mm->m_nextpkt;
+ mm->m_nextpkt = 0;
+ mm = m_pullup(mm, sizeof(struct ip));
+ if (mm != NULL) {
+ ip = mtod(mm, struct ip *);
+ if ((V_mrt_api_config & MRT_MFC_RP) && !in_nullhost(rt->mfc_rp)) {
+ pim_register_send_rp(ip, vifp, mm, rt);
+ } else {
+ pim_register_send_upcall(ip, vifp, mm, rt);
+ }
+ }
+ }
+
+ return 0;
+}
+
+/*
+ * Return a copy of the data packet that is ready for PIM Register
+ * encapsulation.
+ * XXX: Note that in the returned copy the IP header is a valid one.
+ */
+static struct mbuf *
+pim_register_prepare(struct ip *ip, struct mbuf *m)
+{
+ struct mbuf *mb_copy = NULL;
+ int mtu;
+
+ /* Take care of delayed checksums */
+ if (m->m_pkthdr.csum_flags & CSUM_DELAY_DATA) {
+ in_delayed_cksum(m);
+ m->m_pkthdr.csum_flags &= ~CSUM_DELAY_DATA;
+ }
+
+ /*
+ * Copy the old packet & pullup its IP header into the
+ * new mbuf so we can modify it.
+ */
+ mb_copy = m_copypacket(m, M_DONTWAIT);
+ if (mb_copy == NULL)
+ return NULL;
+ mb_copy = m_pullup(mb_copy, ip->ip_hl << 2);
+ if (mb_copy == NULL)
+ return NULL;
+
+ /* take care of the TTL */
+ ip = mtod(mb_copy, struct ip *);
+ --ip->ip_ttl;
+
+ /* Compute the MTU after the PIM Register encapsulation */
+ mtu = 0xffff - sizeof(pim_encap_iphdr) - sizeof(pim_encap_pimhdr);
+
+ if (ip->ip_len <= mtu) {
+ /* Turn the IP header into a valid one */
+ ip->ip_len = htons(ip->ip_len);
+ ip->ip_off = htons(ip->ip_off);
+ ip->ip_sum = 0;
+ ip->ip_sum = in_cksum(mb_copy, ip->ip_hl << 2);
+ } else {
+ /* Fragment the packet */
+ if (ip_fragment(ip, &mb_copy, mtu, 0, CSUM_DELAY_IP) != 0) {
+ m_freem(mb_copy);
+ return NULL;
+ }
+ }
+ return mb_copy;
+}
+
+/*
+ * Send an upcall with the data packet to the user-level process.
+ */
+static int
+pim_register_send_upcall(struct ip *ip, struct vif *vifp,
+ struct mbuf *mb_copy, struct mfc *rt)
+{
+ struct mbuf *mb_first;
+ int len = ntohs(ip->ip_len);
+ struct igmpmsg *im;
+ struct sockaddr_in k_igmpsrc = { sizeof k_igmpsrc, AF_INET };
+
+ VIF_LOCK_ASSERT();
+
+ /*
+ * Add a new mbuf with an upcall header
+ */
+ MGETHDR(mb_first, M_DONTWAIT, MT_DATA);
+ if (mb_first == NULL) {
+ m_freem(mb_copy);
+ return ENOBUFS;
+ }
+ mb_first->m_data += max_linkhdr;
+ mb_first->m_pkthdr.len = len + sizeof(struct igmpmsg);
+ mb_first->m_len = sizeof(struct igmpmsg);
+ mb_first->m_next = mb_copy;
+
+ /* Send message to routing daemon */
+ im = mtod(mb_first, struct igmpmsg *);
+ im->im_msgtype = IGMPMSG_WHOLEPKT;
+ im->im_mbz = 0;
+ im->im_vif = vifp - V_viftable;
+ im->im_src = ip->ip_src;
+ im->im_dst = ip->ip_dst;
+
+ k_igmpsrc.sin_addr = ip->ip_src;
+
+ MRTSTAT_INC(mrts_upcalls);
+
+ if (socket_send(V_ip_mrouter, mb_first, &k_igmpsrc) < 0) {
+ CTR1(KTR_IPMF, "%s: socket queue full", __func__);
+ MRTSTAT_INC(mrts_upq_sockfull);
+ return ENOBUFS;
+ }
+
+ /* Keep statistics */
+ PIMSTAT_INC(pims_snd_registers_msgs);
+ PIMSTAT_ADD(pims_snd_registers_bytes, len);
+
+ return 0;
+}
+
+/*
+ * Encapsulate the data packet in PIM Register message and send it to the RP.
+ */
+static int
+pim_register_send_rp(struct ip *ip, struct vif *vifp, struct mbuf *mb_copy,
+ struct mfc *rt)
+{
+ struct mbuf *mb_first;
+ struct ip *ip_outer;
+ struct pim_encap_pimhdr *pimhdr;
+ int len = ntohs(ip->ip_len);
+ vifi_t vifi = rt->mfc_parent;
+
+ VIF_LOCK_ASSERT();
+
+ if ((vifi >= V_numvifs) || in_nullhost(V_viftable[vifi].v_lcl_addr)) {
+ m_freem(mb_copy);
+ return EADDRNOTAVAIL; /* The iif vif is invalid */
+ }
+
+ /*
+ * Add a new mbuf with the encapsulating header
+ */
+ MGETHDR(mb_first, M_DONTWAIT, MT_DATA);
+ if (mb_first == NULL) {
+ m_freem(mb_copy);
+ return ENOBUFS;
+ }
+ mb_first->m_data += max_linkhdr;
+ mb_first->m_len = sizeof(pim_encap_iphdr) + sizeof(pim_encap_pimhdr);
+ mb_first->m_next = mb_copy;
+
+ mb_first->m_pkthdr.len = len + mb_first->m_len;
+
+ /*
+ * Fill in the encapsulating IP and PIM header
+ */
+ ip_outer = mtod(mb_first, struct ip *);
+ *ip_outer = pim_encap_iphdr;
+ ip_outer->ip_id = ip_newid();
+ ip_outer->ip_len = len + sizeof(pim_encap_iphdr) + sizeof(pim_encap_pimhdr);
+ ip_outer->ip_src = V_viftable[vifi].v_lcl_addr;
+ ip_outer->ip_dst = rt->mfc_rp;
+ /*
+ * Copy the inner header TOS to the outer header, and take care of the
+ * IP_DF bit.
+ */
+ ip_outer->ip_tos = ip->ip_tos;
+ if (ntohs(ip->ip_off) & IP_DF)
+ ip_outer->ip_off |= IP_DF;
+ pimhdr = (struct pim_encap_pimhdr *)((caddr_t)ip_outer
+ + sizeof(pim_encap_iphdr));
+ *pimhdr = pim_encap_pimhdr;
+ /* If the iif crosses a border, set the Border-bit */
+ if (rt->mfc_flags[vifi] & MRT_MFC_FLAGS_BORDER_VIF & V_mrt_api_config)
+ pimhdr->flags |= htonl(PIM_BORDER_REGISTER);
+
+ mb_first->m_data += sizeof(pim_encap_iphdr);
+ pimhdr->pim.pim_cksum = in_cksum(mb_first, sizeof(pim_encap_pimhdr));
+ mb_first->m_data -= sizeof(pim_encap_iphdr);
+
+ send_packet(vifp, mb_first);
+
+ /* Keep statistics */
+ PIMSTAT_INC(pims_snd_registers_msgs);
+ PIMSTAT_ADD(pims_snd_registers_bytes, len);
+
+ return 0;
+}
+
+/*
+ * pim_encapcheck() is called by the encap4_input() path at runtime to
+ * determine if a packet is for PIM; allowing PIM to be dynamically loaded
+ * into the kernel.
+ */
+static int
+pim_encapcheck(const struct mbuf *m, int off, int proto, void *arg)
+{
+
+#ifdef DIAGNOSTIC
+ KASSERT(proto == IPPROTO_PIM, ("not for IPPROTO_PIM"));
+#endif
+ if (proto != IPPROTO_PIM)
+ return 0; /* not for us; reject the datagram. */
+
+ return 64; /* claim the datagram. */
+}
+
+/*
+ * PIM-SMv2 and PIM-DM messages processing.
+ * Receives and verifies the PIM control messages, and passes them
+ * up to the listening socket, using rip_input().
+ * The only message with special processing is the PIM_REGISTER message
+ * (used by PIM-SM): the PIM header is stripped off, and the inner packet
+ * is passed to if_simloop().
+ */
+void
+pim_input(struct mbuf *m, int off)
+{
+ struct ip *ip = mtod(m, struct ip *);
+ struct pim *pim;
+ int minlen;
+ int datalen = ip->ip_len;
+ int ip_tos;
+ int iphlen = off;
+
+ /* Keep statistics */
+ PIMSTAT_INC(pims_rcv_total_msgs);
+ PIMSTAT_ADD(pims_rcv_total_bytes, datalen);
+
+ /*
+ * Validate lengths
+ */
+ if (datalen < PIM_MINLEN) {
+ PIMSTAT_INC(pims_rcv_tooshort);
+ CTR3(KTR_IPMF, "%s: short packet (%d) from %s",
+ __func__, datalen, inet_ntoa(ip->ip_src));
+ m_freem(m);
+ return;
+ }
+
+ /*
+ * If the packet is at least as big as a REGISTER, go agead
+ * and grab the PIM REGISTER header size, to avoid another
+ * possible m_pullup() later.
+ *
+ * PIM_MINLEN == pimhdr + u_int32_t == 4 + 4 = 8
+ * PIM_REG_MINLEN == pimhdr + reghdr + encap_iphdr == 4 + 4 + 20 = 28
+ */
+ minlen = iphlen + (datalen >= PIM_REG_MINLEN ? PIM_REG_MINLEN : PIM_MINLEN);
+ /*
+ * Get the IP and PIM headers in contiguous memory, and
+ * possibly the PIM REGISTER header.
+ */
+ if ((m->m_flags & M_EXT || m->m_len < minlen) &&
+ (m = m_pullup(m, minlen)) == 0) {
+ CTR1(KTR_IPMF, "%s: m_pullup() failed", __func__);
+ return;
+ }
+
+ /* m_pullup() may have given us a new mbuf so reset ip. */
+ ip = mtod(m, struct ip *);
+ ip_tos = ip->ip_tos;
+
+ /* adjust mbuf to point to the PIM header */
+ m->m_data += iphlen;
+ m->m_len -= iphlen;
+ pim = mtod(m, struct pim *);
+
+ /*
+ * Validate checksum. If PIM REGISTER, exclude the data packet.
+ *
+ * XXX: some older PIMv2 implementations don't make this distinction,
+ * so for compatibility reason perform the checksum over part of the
+ * message, and if error, then over the whole message.
+ */
+ if (PIM_VT_T(pim->pim_vt) == PIM_REGISTER && in_cksum(m, PIM_MINLEN) == 0) {
+ /* do nothing, checksum okay */
+ } else if (in_cksum(m, datalen)) {
+ PIMSTAT_INC(pims_rcv_badsum);
+ CTR1(KTR_IPMF, "%s: invalid checksum", __func__);
+ m_freem(m);
+ return;
+ }
+
+ /* PIM version check */
+ if (PIM_VT_V(pim->pim_vt) < PIM_VERSION) {
+ PIMSTAT_INC(pims_rcv_badversion);
+ CTR3(KTR_IPMF, "%s: bad version %d expect %d", __func__,
+ (int)PIM_VT_V(pim->pim_vt), PIM_VERSION);
+ m_freem(m);
+ return;
+ }
+
+ /* restore mbuf back to the outer IP */
+ m->m_data -= iphlen;
+ m->m_len += iphlen;
+
+ if (PIM_VT_T(pim->pim_vt) == PIM_REGISTER) {
+ /*
+ * Since this is a REGISTER, we'll make a copy of the register
+ * headers ip + pim + u_int32 + encap_ip, to be passed up to the
+ * routing daemon.
+ */
+ struct sockaddr_in dst = { sizeof(dst), AF_INET };
+ struct mbuf *mcp;
+ struct ip *encap_ip;
+ u_int32_t *reghdr;
+ struct ifnet *vifp;
+
+ VIF_LOCK();
+ if ((V_reg_vif_num >= V_numvifs) || (V_reg_vif_num == VIFI_INVALID)) {
+ VIF_UNLOCK();
+ CTR2(KTR_IPMF, "%s: register vif not set: %d", __func__,
+ (int)V_reg_vif_num);
+ m_freem(m);
+ return;
+ }
+ /* XXX need refcnt? */
+ vifp = V_viftable[V_reg_vif_num].v_ifp;
+ VIF_UNLOCK();
+
+ /*
+ * Validate length
+ */
+ if (datalen < PIM_REG_MINLEN) {
+ PIMSTAT_INC(pims_rcv_tooshort);
+ PIMSTAT_INC(pims_rcv_badregisters);
+ CTR1(KTR_IPMF, "%s: register packet size too small", __func__);
+ m_freem(m);
+ return;
+ }
+
+ reghdr = (u_int32_t *)(pim + 1);
+ encap_ip = (struct ip *)(reghdr + 1);
+
+ CTR3(KTR_IPMF, "%s: register: encap ip src %s len %d",
+ __func__, inet_ntoa(encap_ip->ip_src), ntohs(encap_ip->ip_len));
+
+ /* verify the version number of the inner packet */
+ if (encap_ip->ip_v != IPVERSION) {
+ PIMSTAT_INC(pims_rcv_badregisters);
+ CTR1(KTR_IPMF, "%s: bad encap ip version", __func__);
+ m_freem(m);
+ return;
+ }
+
+ /* verify the inner packet is destined to a mcast group */
+ if (!IN_MULTICAST(ntohl(encap_ip->ip_dst.s_addr))) {
+ PIMSTAT_INC(pims_rcv_badregisters);
+ CTR2(KTR_IPMF, "%s: bad encap ip dest %s", __func__,
+ inet_ntoa(encap_ip->ip_dst));
+ m_freem(m);
+ return;
+ }
+
+ /* If a NULL_REGISTER, pass it to the daemon */
+ if ((ntohl(*reghdr) & PIM_NULL_REGISTER))
+ goto pim_input_to_daemon;
+
+ /*
+ * Copy the TOS from the outer IP header to the inner IP header.
+ */
+ if (encap_ip->ip_tos != ip_tos) {
+ /* Outer TOS -> inner TOS */
+ encap_ip->ip_tos = ip_tos;
+ /* Recompute the inner header checksum. Sigh... */
+
+ /* adjust mbuf to point to the inner IP header */
+ m->m_data += (iphlen + PIM_MINLEN);
+ m->m_len -= (iphlen + PIM_MINLEN);
+
+ encap_ip->ip_sum = 0;
+ encap_ip->ip_sum = in_cksum(m, encap_ip->ip_hl << 2);
+
+ /* restore mbuf to point back to the outer IP header */
+ m->m_data -= (iphlen + PIM_MINLEN);
+ m->m_len += (iphlen + PIM_MINLEN);
+ }
+
+ /*
+ * Decapsulate the inner IP packet and loopback to forward it
+ * as a normal multicast packet. Also, make a copy of the
+ * outer_iphdr + pimhdr + reghdr + encap_iphdr
+ * to pass to the daemon later, so it can take the appropriate
+ * actions (e.g., send back PIM_REGISTER_STOP).
+ * XXX: here m->m_data points to the outer IP header.
+ */
+ mcp = m_copy(m, 0, iphlen + PIM_REG_MINLEN);
+ if (mcp == NULL) {
+ CTR1(KTR_IPMF, "%s: m_copy() failed", __func__);
+ m_freem(m);
+ return;
+ }
+
+ /* Keep statistics */
+ /* XXX: registers_bytes include only the encap. mcast pkt */
+ PIMSTAT_INC(pims_rcv_registers_msgs);
+ PIMSTAT_ADD(pims_rcv_registers_bytes, ntohs(encap_ip->ip_len));
+
+ /*
+ * forward the inner ip packet; point m_data at the inner ip.
+ */
+ m_adj(m, iphlen + PIM_MINLEN);
+
+ CTR4(KTR_IPMF,
+ "%s: forward decap'd REGISTER: src %lx dst %lx vif %d",
+ __func__,
+ (u_long)ntohl(encap_ip->ip_src.s_addr),
+ (u_long)ntohl(encap_ip->ip_dst.s_addr),
+ (int)V_reg_vif_num);
+
+ /* NB: vifp was collected above; can it change on us? */
+ if_simloop(vifp, m, dst.sin_family, 0);
+
+ /* prepare the register head to send to the mrouting daemon */
+ m = mcp;
+ }
+
+pim_input_to_daemon:
+ /*
+ * Pass the PIM message up to the daemon; if it is a Register message,
+ * pass the 'head' only up to the daemon. This includes the
+ * outer IP header, PIM header, PIM-Register header and the
+ * inner IP header.
+ * XXX: the outer IP header pkt size of a Register is not adjust to
+ * reflect the fact that the inner multicast data is truncated.
+ */
+ rip_input(m, iphlen);
+
+ return;
+}
+
+static int
+sysctl_mfctable(SYSCTL_HANDLER_ARGS)
+{
+ struct mfc *rt;
+ int error, i;
+
+ if (req->newptr)
+ return (EPERM);
+ if (V_mfchashtbl == NULL) /* XXX unlocked */
+ return (0);
+ error = sysctl_wire_old_buffer(req, 0);
+ if (error)
+ return (error);
+
+ MFC_LOCK();
+ for (i = 0; i < mfchashsize; i++) {
+ LIST_FOREACH(rt, &V_mfchashtbl[i], mfc_hash) {
+ error = SYSCTL_OUT(req, rt, sizeof(struct mfc));
+ if (error)
+ goto out_locked;
+ }
+ }
+out_locked:
+ MFC_UNLOCK();
+ return (error);
+}
+
+SYSCTL_NODE(_net_inet_ip, OID_AUTO, mfctable, CTLFLAG_RD, sysctl_mfctable,
+ "IPv4 Multicast Forwarding Table (struct *mfc[mfchashsize], "
+ "netinet/ip_mroute.h)");
+
+static void
+vnet_mroute_init(const void *unused __unused)
+{
+
+ MALLOC(V_nexpire, u_char *, mfchashsize, M_MRTABLE, M_WAITOK|M_ZERO);
+ bzero(V_bw_meter_timers, sizeof(V_bw_meter_timers));
+ callout_init(&V_expire_upcalls_ch, CALLOUT_MPSAFE);
+ callout_init(&V_bw_upcalls_ch, CALLOUT_MPSAFE);
+ callout_init(&V_bw_meter_ch, CALLOUT_MPSAFE);
+}
+
+VNET_SYSINIT(vnet_mroute_init, SI_SUB_PSEUDO, SI_ORDER_MIDDLE, vnet_mroute_init,
+ NULL);
+
+static void
+vnet_mroute_uninit(const void *unused __unused)
+{
+
+ FREE(V_nexpire, M_MRTABLE);
+ V_nexpire = NULL;
+}
+
+VNET_SYSUNINIT(vnet_mroute_uninit, SI_SUB_PSEUDO, SI_ORDER_MIDDLE,
+ vnet_mroute_uninit, NULL);
+
+static int
+ip_mroute_modevent(module_t mod, int type, void *unused)
+{
+
+ switch (type) {
+ case MOD_LOAD:
+ MROUTER_LOCK_INIT();
+
+ if_detach_event_tag = EVENTHANDLER_REGISTER(ifnet_departure_event,
+ if_detached_event, NULL, EVENTHANDLER_PRI_ANY);
+ if (if_detach_event_tag == NULL) {
+ printf("ip_mroute: unable to ifnet_deperture_even handler\n");
+ MROUTER_LOCK_DESTROY();
+ return (EINVAL);
+ }
+
+ MFC_LOCK_INIT();
+ VIF_LOCK_INIT();
+
+ mfchashsize = MFCHASHSIZE;
+#ifndef __rtems__
+ if (TUNABLE_ULONG_FETCH("net.inet.ip.mfchashsize", &mfchashsize) &&
+ !powerof2(mfchashsize)) {
+ printf("WARNING: %s not a power of 2; using default\n",
+ "net.inet.ip.mfchashsize");
+ mfchashsize = MFCHASHSIZE;
+ }
+#endif
+
+ pim_squelch_wholepkt = 0;
+ TUNABLE_ULONG_FETCH("net.inet.pim.squelch_wholepkt",
+ &pim_squelch_wholepkt);
+
+ pim_encap_cookie = encap_attach_func(AF_INET, IPPROTO_PIM,
+ pim_encapcheck, &in_pim_protosw, NULL);
+ if (pim_encap_cookie == NULL) {
+ printf("ip_mroute: unable to attach pim encap\n");
+ VIF_LOCK_DESTROY();
+ MFC_LOCK_DESTROY();
+ MROUTER_LOCK_DESTROY();
+ return (EINVAL);
+ }
+
+ ip_mcast_src = X_ip_mcast_src;
+ ip_mforward = X_ip_mforward;
+ ip_mrouter_done = X_ip_mrouter_done;
+ ip_mrouter_get = X_ip_mrouter_get;
+ ip_mrouter_set = X_ip_mrouter_set;
+
+ ip_rsvp_force_done = X_ip_rsvp_force_done;
+ ip_rsvp_vif = X_ip_rsvp_vif;
+
+ legal_vif_num = X_legal_vif_num;
+ mrt_ioctl = X_mrt_ioctl;
+ rsvp_input_p = X_rsvp_input;
+ break;
+
+ case MOD_UNLOAD:
+ /*
+ * Typically module unload happens after the user-level
+ * process has shutdown the kernel services (the check
+ * below insures someone can't just yank the module out
+ * from under a running process). But if the module is
+ * just loaded and then unloaded w/o starting up a user
+ * process we still need to cleanup.
+ */
+ MROUTER_LOCK();
+ if (ip_mrouter_cnt != 0) {
+ MROUTER_UNLOCK();
+ return (EINVAL);
+ }
+ ip_mrouter_unloading = 1;
+ MROUTER_UNLOCK();
+
+ EVENTHANDLER_DEREGISTER(ifnet_departure_event, if_detach_event_tag);
+
+ if (pim_encap_cookie) {
+ encap_detach(pim_encap_cookie);
+ pim_encap_cookie = NULL;
+ }
+
+ ip_mcast_src = NULL;
+ ip_mforward = NULL;
+ ip_mrouter_done = NULL;
+ ip_mrouter_get = NULL;
+ ip_mrouter_set = NULL;
+
+ ip_rsvp_force_done = NULL;
+ ip_rsvp_vif = NULL;
+
+ legal_vif_num = NULL;
+ mrt_ioctl = NULL;
+ rsvp_input_p = NULL;
+
+ VIF_LOCK_DESTROY();
+ MFC_LOCK_DESTROY();
+ MROUTER_LOCK_DESTROY();
+ break;
+
+ default:
+ return EOPNOTSUPP;
+ }
+ return 0;
+}
+
+static moduledata_t ip_mroutemod = {
+ "ip_mroute",
+ ip_mroute_modevent,
+ 0
+};
+
+DECLARE_MODULE(ip_mroute, ip_mroutemod, SI_SUB_PSEUDO, SI_ORDER_ANY);
diff --git a/rtems/freebsd/netinet/ip_mroute.h b/rtems/freebsd/netinet/ip_mroute.h
new file mode 100644
index 00000000..3bc7f52f
--- /dev/null
+++ b/rtems/freebsd/netinet/ip_mroute.h
@@ -0,0 +1,359 @@
+/*-
+ * Copyright (c) 1989 Stephen Deering.
+ * Copyright (c) 1992, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * This code is derived from software contributed to Berkeley by
+ * Stephen Deering of Stanford University.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * @(#)ip_mroute.h 8.1 (Berkeley) 6/10/93
+ * $FreeBSD$
+ */
+
+#ifndef _NETINET_IP_MROUTE_HH_
+#define _NETINET_IP_MROUTE_HH_
+
+/*
+ * Definitions for IP multicast forwarding.
+ *
+ * Written by David Waitzman, BBN Labs, August 1988.
+ * Modified by Steve Deering, Stanford, February 1989.
+ * Modified by Ajit Thyagarajan, PARC, August 1993.
+ * Modified by Ajit Thyagarajan, PARC, August 1994.
+ * Modified by Ahmed Helmy, SGI, June 1996.
+ * Modified by Pavlin Radoslavov, ICSI, October 2002.
+ *
+ * MROUTING Revision: 3.3.1.3
+ * and PIM-SMv2 and PIM-DM support, advanced API support,
+ * bandwidth metering and signaling.
+ */
+
+/*
+ * Multicast Routing set/getsockopt commands.
+ */
+#define MRT_INIT 100 /* initialize forwarder */
+#define MRT_DONE 101 /* shut down forwarder */
+#define MRT_ADD_VIF 102 /* create virtual interface */
+#define MRT_DEL_VIF 103 /* delete virtual interface */
+#define MRT_ADD_MFC 104 /* insert forwarding cache entry */
+#define MRT_DEL_MFC 105 /* delete forwarding cache entry */
+#define MRT_VERSION 106 /* get kernel version number */
+#define MRT_ASSERT 107 /* enable assert processing */
+#define MRT_PIM MRT_ASSERT /* enable PIM processing */
+#define MRT_API_SUPPORT 109 /* supported MRT API */
+#define MRT_API_CONFIG 110 /* config MRT API */
+#define MRT_ADD_BW_UPCALL 111 /* create bandwidth monitor */
+#define MRT_DEL_BW_UPCALL 112 /* delete bandwidth monitor */
+
+/*
+ * Types and macros for handling bitmaps with one bit per virtual interface.
+ */
+#define MAXVIFS 32
+typedef u_long vifbitmap_t;
+typedef u_short vifi_t; /* type of a vif index */
+#define ALL_VIFS (vifi_t)-1
+
+#define VIFM_SET(n, m) ((m) |= (1 << (n)))
+#define VIFM_CLR(n, m) ((m) &= ~(1 << (n)))
+#define VIFM_ISSET(n, m) ((m) & (1 << (n)))
+#define VIFM_CLRALL(m) ((m) = 0x00000000)
+#define VIFM_COPY(mfrom, mto) ((mto) = (mfrom))
+#define VIFM_SAME(m1, m2) ((m1) == (m2))
+
+struct mfc;
+
+/*
+ * Argument structure for MRT_ADD_VIF.
+ * (MRT_DEL_VIF takes a single vifi_t argument.)
+ */
+struct vifctl {
+ vifi_t vifc_vifi; /* the index of the vif to be added */
+ u_char vifc_flags; /* VIFF_ flags defined below */
+ u_char vifc_threshold; /* min ttl required to forward on vif */
+ u_int vifc_rate_limit; /* max rate */
+ struct in_addr vifc_lcl_addr; /* local interface address */
+ struct in_addr vifc_rmt_addr; /* remote address (tunnels only) */
+};
+
+#define VIFF_TUNNEL 0x1 /* no-op; retained for old source */
+#define VIFF_SRCRT 0x2 /* no-op; retained for old source */
+#define VIFF_REGISTER 0x4 /* used for PIM Register encap/decap */
+
+/*
+ * Argument structure for MRT_ADD_MFC and MRT_DEL_MFC
+ * XXX if you change this, make sure to change struct mfcctl2 as well.
+ */
+struct mfcctl {
+ struct in_addr mfcc_origin; /* ip origin of mcasts */
+ struct in_addr mfcc_mcastgrp; /* multicast group associated*/
+ vifi_t mfcc_parent; /* incoming vif */
+ u_char mfcc_ttls[MAXVIFS]; /* forwarding ttls on vifs */
+};
+
+/*
+ * The new argument structure for MRT_ADD_MFC and MRT_DEL_MFC overlays
+ * and extends the old struct mfcctl.
+ */
+struct mfcctl2 {
+ /* the mfcctl fields */
+ struct in_addr mfcc_origin; /* ip origin of mcasts */
+ struct in_addr mfcc_mcastgrp; /* multicast group associated*/
+ vifi_t mfcc_parent; /* incoming vif */
+ u_char mfcc_ttls[MAXVIFS]; /* forwarding ttls on vifs */
+
+ /* extension fields */
+ uint8_t mfcc_flags[MAXVIFS]; /* the MRT_MFC_FLAGS_* flags */
+ struct in_addr mfcc_rp; /* the RP address */
+};
+/*
+ * The advanced-API flags.
+ *
+ * The MRT_MFC_FLAGS_XXX API flags are also used as flags
+ * for the mfcc_flags field.
+ */
+#define MRT_MFC_FLAGS_DISABLE_WRONGVIF (1 << 0) /* disable WRONGVIF signals */
+#define MRT_MFC_FLAGS_BORDER_VIF (1 << 1) /* border vif */
+#define MRT_MFC_RP (1 << 8) /* enable RP address */
+#define MRT_MFC_BW_UPCALL (1 << 9) /* enable bw upcalls */
+#define MRT_MFC_FLAGS_ALL (MRT_MFC_FLAGS_DISABLE_WRONGVIF | \
+ MRT_MFC_FLAGS_BORDER_VIF)
+#define MRT_API_FLAGS_ALL (MRT_MFC_FLAGS_ALL | \
+ MRT_MFC_RP | \
+ MRT_MFC_BW_UPCALL)
+
+/*
+ * Structure for installing or delivering an upcall if the
+ * measured bandwidth is above or below a threshold.
+ *
+ * User programs (e.g. daemons) may have a need to know when the
+ * bandwidth used by some data flow is above or below some threshold.
+ * This interface allows the userland to specify the threshold (in
+ * bytes and/or packets) and the measurement interval. Flows are
+ * all packet with the same source and destination IP address.
+ * At the moment the code is only used for multicast destinations
+ * but there is nothing that prevents its use for unicast.
+ *
+ * The measurement interval cannot be shorter than some Tmin (currently, 3s).
+ * The threshold is set in packets and/or bytes per_interval.
+ *
+ * Measurement works as follows:
+ *
+ * For >= measurements:
+ * The first packet marks the start of a measurement interval.
+ * During an interval we count packets and bytes, and when we
+ * pass the threshold we deliver an upcall and we are done.
+ * The first packet after the end of the interval resets the
+ * count and restarts the measurement.
+ *
+ * For <= measurement:
+ * We start a timer to fire at the end of the interval, and
+ * then for each incoming packet we count packets and bytes.
+ * When the timer fires, we compare the value with the threshold,
+ * schedule an upcall if we are below, and restart the measurement
+ * (reschedule timer and zero counters).
+ */
+
+struct bw_data {
+ struct timeval b_time;
+ uint64_t b_packets;
+ uint64_t b_bytes;
+};
+
+struct bw_upcall {
+ struct in_addr bu_src; /* source address */
+ struct in_addr bu_dst; /* destination address */
+ uint32_t bu_flags; /* misc flags (see below) */
+#define BW_UPCALL_UNIT_PACKETS (1 << 0) /* threshold (in packets) */
+#define BW_UPCALL_UNIT_BYTES (1 << 1) /* threshold (in bytes) */
+#define BW_UPCALL_GEQ (1 << 2) /* upcall if bw >= threshold */
+#define BW_UPCALL_LEQ (1 << 3) /* upcall if bw <= threshold */
+#define BW_UPCALL_DELETE_ALL (1 << 4) /* delete all upcalls for s,d*/
+ struct bw_data bu_threshold; /* the bw threshold */
+ struct bw_data bu_measured; /* the measured bw */
+};
+
+/* max. number of upcalls to deliver together */
+#define BW_UPCALLS_MAX 128
+/* min. threshold time interval for bandwidth measurement */
+#define BW_UPCALL_THRESHOLD_INTERVAL_MIN_SEC 3
+#define BW_UPCALL_THRESHOLD_INTERVAL_MIN_USEC 0
+
+/*
+ * The kernel's multicast routing statistics.
+ */
+struct mrtstat {
+ u_long mrts_mfc_lookups; /* # forw. cache hash table hits */
+ u_long mrts_mfc_misses; /* # forw. cache hash table misses */
+ u_long mrts_upcalls; /* # calls to multicast routing daemon */
+ u_long mrts_no_route; /* no route for packet's origin */
+ u_long mrts_bad_tunnel; /* malformed tunnel options */
+ u_long mrts_cant_tunnel; /* no room for tunnel options */
+ u_long mrts_wrong_if; /* arrived on wrong interface */
+ u_long mrts_upq_ovflw; /* upcall Q overflow */
+ u_long mrts_cache_cleanups; /* # entries with no upcalls */
+ u_long mrts_drop_sel; /* pkts dropped selectively */
+ u_long mrts_q_overflow; /* pkts dropped - Q overflow */
+ u_long mrts_pkt2large; /* pkts dropped - size > BKT SIZE */
+ u_long mrts_upq_sockfull; /* upcalls dropped - socket full */
+};
+
+#ifdef _KERNEL
+#define MRTSTAT_ADD(name, val) V_mrtstat.name += (val)
+#define MRTSTAT_INC(name) MRTSTAT_ADD(name, 1)
+#endif
+
+/*
+ * Argument structure used by mrouted to get src-grp pkt counts
+ */
+struct sioc_sg_req {
+ struct in_addr src;
+ struct in_addr grp;
+ u_long pktcnt;
+ u_long bytecnt;
+ u_long wrong_if;
+};
+
+/*
+ * Argument structure used by mrouted to get vif pkt counts
+ */
+struct sioc_vif_req {
+ vifi_t vifi; /* vif number */
+ u_long icount; /* Input packet count on vif */
+ u_long ocount; /* Output packet count on vif */
+ u_long ibytes; /* Input byte count on vif */
+ u_long obytes; /* Output byte count on vif */
+};
+
+
+/*
+ * The kernel's virtual-interface structure.
+ */
+struct vif {
+ u_char v_flags; /* VIFF_ flags defined above */
+ u_char v_threshold; /* min ttl required to forward on vif*/
+ struct in_addr v_lcl_addr; /* local interface address */
+ struct in_addr v_rmt_addr; /* remote address (tunnels only) */
+ struct ifnet *v_ifp; /* pointer to interface */
+ u_long v_pkt_in; /* # pkts in on interface */
+ u_long v_pkt_out; /* # pkts out on interface */
+ u_long v_bytes_in; /* # bytes in on interface */
+ u_long v_bytes_out; /* # bytes out on interface */
+ struct route v_route; /* cached route */
+};
+
+#ifdef _KERNEL
+/*
+ * The kernel's multicast forwarding cache entry structure
+ */
+struct mfc {
+ LIST_ENTRY(mfc) mfc_hash;
+ struct in_addr mfc_origin; /* IP origin of mcasts */
+ struct in_addr mfc_mcastgrp; /* multicast group associated*/
+ vifi_t mfc_parent; /* incoming vif */
+ u_char mfc_ttls[MAXVIFS]; /* forwarding ttls on vifs */
+ u_long mfc_pkt_cnt; /* pkt count for src-grp */
+ u_long mfc_byte_cnt; /* byte count for src-grp */
+ u_long mfc_wrong_if; /* wrong if for src-grp */
+ int mfc_expire; /* time to clean entry up */
+ struct timeval mfc_last_assert; /* last time I sent an assert*/
+ uint8_t mfc_flags[MAXVIFS]; /* the MRT_MFC_FLAGS_* flags */
+ struct in_addr mfc_rp; /* the RP address */
+ struct bw_meter *mfc_bw_meter; /* list of bandwidth meters */
+ u_long mfc_nstall; /* # of packets awaiting mfc */
+ TAILQ_HEAD(, rtdetq) mfc_stall; /* q of packets awaiting mfc */
+};
+#endif /* _KERNEL */
+
+/*
+ * Struct used to communicate from kernel to multicast router
+ * note the convenient similarity to an IP packet
+ */
+struct igmpmsg {
+ uint32_t unused1;
+ uint32_t unused2;
+ u_char im_msgtype; /* what type of message */
+#define IGMPMSG_NOCACHE 1 /* no MFC in the kernel */
+#define IGMPMSG_WRONGVIF 2 /* packet came from wrong interface */
+#define IGMPMSG_WHOLEPKT 3 /* PIM pkt for user level encap. */
+#define IGMPMSG_BW_UPCALL 4 /* BW monitoring upcall */
+ u_char im_mbz; /* must be zero */
+ u_char im_vif; /* vif rec'd on */
+ u_char unused3;
+ struct in_addr im_src, im_dst;
+};
+
+#ifdef _KERNEL
+/*
+ * Argument structure used for pkt info. while upcall is made
+ */
+struct rtdetq {
+ TAILQ_ENTRY(rtdetq) rte_link;
+ struct mbuf *m; /* A copy of the packet */
+ struct ifnet *ifp; /* Interface pkt came in on */
+ vifi_t xmt_vif; /* Saved copy of imo_multicast_vif */
+};
+#define MAX_UPQ 4 /* max. no of pkts in upcall Q */
+#endif /* _KERNEL */
+
+/*
+ * Structure for measuring the bandwidth and sending an upcall if the
+ * measured bandwidth is above or below a threshold.
+ */
+struct bw_meter {
+ struct bw_meter *bm_mfc_next; /* next bw meter (same mfc) */
+ struct bw_meter *bm_time_next; /* next bw meter (same time) */
+ uint32_t bm_time_hash; /* the time hash value */
+ struct mfc *bm_mfc; /* the corresponding mfc */
+ uint32_t bm_flags; /* misc flags (see below) */
+#define BW_METER_UNIT_PACKETS (1 << 0) /* threshold (in packets) */
+#define BW_METER_UNIT_BYTES (1 << 1) /* threshold (in bytes) */
+#define BW_METER_GEQ (1 << 2) /* upcall if bw >= threshold */
+#define BW_METER_LEQ (1 << 3) /* upcall if bw <= threshold */
+#define BW_METER_USER_FLAGS (BW_METER_UNIT_PACKETS | \
+ BW_METER_UNIT_BYTES | \
+ BW_METER_GEQ | \
+ BW_METER_LEQ)
+
+#define BW_METER_UPCALL_DELIVERED (1 << 24) /* upcall was delivered */
+
+ struct bw_data bm_threshold; /* the upcall threshold */
+ struct bw_data bm_measured; /* the measured bw */
+ struct timeval bm_start_time; /* abs. time */
+};
+
+#ifdef _KERNEL
+
+struct sockopt;
+
+extern int (*ip_mrouter_set)(struct socket *, struct sockopt *);
+extern int (*ip_mrouter_get)(struct socket *, struct sockopt *);
+extern int (*ip_mrouter_done)(void);
+extern int (*mrt_ioctl)(u_long, caddr_t, int);
+
+#endif /* _KERNEL */
+
+#endif /* _NETINET_IP_MROUTE_HH_ */
diff --git a/rtems/freebsd/netinet/ip_options.c b/rtems/freebsd/netinet/ip_options.c
new file mode 100644
index 00000000..22f5b56a
--- /dev/null
+++ b/rtems/freebsd/netinet/ip_options.c
@@ -0,0 +1,747 @@
+#include <rtems/freebsd/machine/rtems-bsd-config.h>
+
+/*
+ * Copyright (c) 1982, 1986, 1988, 1993
+ * The Regents of the University of California.
+ * Copyright (c) 2005 Andre Oppermann, Internet Business Solutions AG.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <rtems/freebsd/sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <rtems/freebsd/local/opt_ipstealth.h>
+
+#include <rtems/freebsd/sys/param.h>
+#include <rtems/freebsd/sys/systm.h>
+#include <rtems/freebsd/sys/mbuf.h>
+#include <rtems/freebsd/sys/domain.h>
+#include <rtems/freebsd/sys/protosw.h>
+#include <rtems/freebsd/sys/socket.h>
+#include <rtems/freebsd/sys/time.h>
+#include <rtems/freebsd/sys/kernel.h>
+#include <rtems/freebsd/sys/syslog.h>
+#include <rtems/freebsd/sys/sysctl.h>
+
+#include <rtems/freebsd/net/if.h>
+#include <rtems/freebsd/net/if_types.h>
+#include <rtems/freebsd/net/if_var.h>
+#include <rtems/freebsd/net/if_dl.h>
+#include <rtems/freebsd/net/route.h>
+#include <rtems/freebsd/net/netisr.h>
+#include <rtems/freebsd/net/vnet.h>
+
+#include <rtems/freebsd/netinet/in.h>
+#include <rtems/freebsd/netinet/in_systm.h>
+#include <rtems/freebsd/netinet/in_var.h>
+#include <rtems/freebsd/netinet/ip.h>
+#include <rtems/freebsd/netinet/in_pcb.h>
+#include <rtems/freebsd/netinet/ip_var.h>
+#include <rtems/freebsd/netinet/ip_options.h>
+#include <rtems/freebsd/netinet/ip_icmp.h>
+#include <rtems/freebsd/machine/in_cksum.h>
+
+#include <rtems/freebsd/sys/socketvar.h>
+
+#include <rtems/freebsd/security/mac/mac_framework.h>
+
+static int ip_dosourceroute = 0;
+SYSCTL_INT(_net_inet_ip, IPCTL_SOURCEROUTE, sourceroute, CTLFLAG_RW,
+ &ip_dosourceroute, 0, "Enable forwarding source routed IP packets");
+
+static int ip_acceptsourceroute = 0;
+SYSCTL_INT(_net_inet_ip, IPCTL_ACCEPTSOURCEROUTE, accept_sourceroute,
+ CTLFLAG_RW, &ip_acceptsourceroute, 0,
+ "Enable accepting source routed IP packets");
+
+int ip_doopts = 1; /* 0 = ignore, 1 = process, 2 = reject */
+SYSCTL_INT(_net_inet_ip, OID_AUTO, process_options, CTLFLAG_RW,
+ &ip_doopts, 0, "Enable IP options processing ([LS]SRR, RR, TS)");
+
+static void save_rte(struct mbuf *m, u_char *, struct in_addr);
+
+/*
+ * Do option processing on a datagram, possibly discarding it if bad options
+ * are encountered, or forwarding it if source-routed.
+ *
+ * The pass argument is used when operating in the IPSTEALTH mode to tell
+ * what options to process: [LS]SRR (pass 0) or the others (pass 1). The
+ * reason for as many as two passes is that when doing IPSTEALTH, non-routing
+ * options should be processed only if the packet is for us.
+ *
+ * Returns 1 if packet has been forwarded/freed, 0 if the packet should be
+ * processed further.
+ */
+int
+ip_dooptions(struct mbuf *m, int pass)
+{
+ struct ip *ip = mtod(m, struct ip *);
+ u_char *cp;
+ struct in_ifaddr *ia;
+ int opt, optlen, cnt, off, code, type = ICMP_PARAMPROB, forward = 0;
+ struct in_addr *sin, dst;
+ uint32_t ntime;
+ struct sockaddr_in ipaddr = { sizeof(ipaddr), AF_INET };
+
+ /* Ignore or reject packets with IP options. */
+ if (ip_doopts == 0)
+ return 0;
+ else if (ip_doopts == 2) {
+ type = ICMP_UNREACH;
+ code = ICMP_UNREACH_FILTER_PROHIB;
+ goto bad;
+ }
+
+ dst = ip->ip_dst;
+ cp = (u_char *)(ip + 1);
+ cnt = (ip->ip_hl << 2) - sizeof (struct ip);
+ for (; cnt > 0; cnt -= optlen, cp += optlen) {
+ opt = cp[IPOPT_OPTVAL];
+ if (opt == IPOPT_EOL)
+ break;
+ if (opt == IPOPT_NOP)
+ optlen = 1;
+ else {
+ if (cnt < IPOPT_OLEN + sizeof(*cp)) {
+ code = &cp[IPOPT_OLEN] - (u_char *)ip;
+ goto bad;
+ }
+ optlen = cp[IPOPT_OLEN];
+ if (optlen < IPOPT_OLEN + sizeof(*cp) || optlen > cnt) {
+ code = &cp[IPOPT_OLEN] - (u_char *)ip;
+ goto bad;
+ }
+ }
+ switch (opt) {
+
+ default:
+ break;
+
+ /*
+ * Source routing with record. Find interface with current
+ * destination address. If none on this machine then drop if
+ * strictly routed, or do nothing if loosely routed. Record
+ * interface address and bring up next address component. If
+ * strictly routed make sure next address is on directly
+ * accessible net.
+ */
+ case IPOPT_LSRR:
+ case IPOPT_SSRR:
+#ifdef IPSTEALTH
+ if (V_ipstealth && pass > 0)
+ break;
+#endif
+ if (optlen < IPOPT_OFFSET + sizeof(*cp)) {
+ code = &cp[IPOPT_OLEN] - (u_char *)ip;
+ goto bad;
+ }
+ if ((off = cp[IPOPT_OFFSET]) < IPOPT_MINOFF) {
+ code = &cp[IPOPT_OFFSET] - (u_char *)ip;
+ goto bad;
+ }
+ ipaddr.sin_addr = ip->ip_dst;
+ if (ifa_ifwithaddr_check((struct sockaddr *)&ipaddr)
+ == 0) {
+ if (opt == IPOPT_SSRR) {
+ type = ICMP_UNREACH;
+ code = ICMP_UNREACH_SRCFAIL;
+ goto bad;
+ }
+ if (!ip_dosourceroute)
+ goto nosourcerouting;
+ /*
+ * Loose routing, and not at next destination
+ * yet; nothing to do except forward.
+ */
+ break;
+ }
+ off--; /* 0 origin */
+ if (off > optlen - (int)sizeof(struct in_addr)) {
+ /*
+ * End of source route. Should be for us.
+ */
+ if (!ip_acceptsourceroute)
+ goto nosourcerouting;
+ save_rte(m, cp, ip->ip_src);
+ break;
+ }
+#ifdef IPSTEALTH
+ if (V_ipstealth)
+ goto dropit;
+#endif
+ if (!ip_dosourceroute) {
+ if (V_ipforwarding) {
+ char buf[16]; /* aaa.bbb.ccc.ddd\0 */
+ /*
+ * Acting as a router, so generate
+ * ICMP
+ */
+nosourcerouting:
+ strcpy(buf, inet_ntoa(ip->ip_dst));
+ log(LOG_WARNING,
+ "attempted source route from %s to %s\n",
+ inet_ntoa(ip->ip_src), buf);
+ type = ICMP_UNREACH;
+ code = ICMP_UNREACH_SRCFAIL;
+ goto bad;
+ } else {
+ /*
+ * Not acting as a router, so
+ * silently drop.
+ */
+#ifdef IPSTEALTH
+dropit:
+#endif
+ IPSTAT_INC(ips_cantforward);
+ m_freem(m);
+ return (1);
+ }
+ }
+
+ /*
+ * locate outgoing interface
+ */
+ (void)memcpy(&ipaddr.sin_addr, cp + off,
+ sizeof(ipaddr.sin_addr));
+
+ if (opt == IPOPT_SSRR) {
+#define INA struct in_ifaddr *
+#define SA struct sockaddr *
+ if ((ia = (INA)ifa_ifwithdstaddr((SA)&ipaddr)) == NULL)
+ ia = (INA)ifa_ifwithnet((SA)&ipaddr, 0);
+ } else
+/* XXX MRT 0 for routing */
+ ia = ip_rtaddr(ipaddr.sin_addr, M_GETFIB(m));
+ if (ia == NULL) {
+ type = ICMP_UNREACH;
+ code = ICMP_UNREACH_SRCFAIL;
+ goto bad;
+ }
+ ip->ip_dst = ipaddr.sin_addr;
+ (void)memcpy(cp + off, &(IA_SIN(ia)->sin_addr),
+ sizeof(struct in_addr));
+ ifa_free(&ia->ia_ifa);
+ cp[IPOPT_OFFSET] += sizeof(struct in_addr);
+ /*
+ * Let ip_intr's mcast routing check handle mcast pkts
+ */
+ forward = !IN_MULTICAST(ntohl(ip->ip_dst.s_addr));
+ break;
+
+ case IPOPT_RR:
+#ifdef IPSTEALTH
+ if (V_ipstealth && pass == 0)
+ break;
+#endif
+ if (optlen < IPOPT_OFFSET + sizeof(*cp)) {
+ code = &cp[IPOPT_OFFSET] - (u_char *)ip;
+ goto bad;
+ }
+ if ((off = cp[IPOPT_OFFSET]) < IPOPT_MINOFF) {
+ code = &cp[IPOPT_OFFSET] - (u_char *)ip;
+ goto bad;
+ }
+ /*
+ * If no space remains, ignore.
+ */
+ off--; /* 0 origin */
+ if (off > optlen - (int)sizeof(struct in_addr))
+ break;
+ (void)memcpy(&ipaddr.sin_addr, &ip->ip_dst,
+ sizeof(ipaddr.sin_addr));
+ /*
+ * Locate outgoing interface; if we're the
+ * destination, use the incoming interface (should be
+ * same).
+ */
+ if ((ia = (INA)ifa_ifwithaddr((SA)&ipaddr)) == NULL &&
+ (ia = ip_rtaddr(ipaddr.sin_addr, M_GETFIB(m))) == NULL) {
+ type = ICMP_UNREACH;
+ code = ICMP_UNREACH_HOST;
+ goto bad;
+ }
+ (void)memcpy(cp + off, &(IA_SIN(ia)->sin_addr),
+ sizeof(struct in_addr));
+ ifa_free(&ia->ia_ifa);
+ cp[IPOPT_OFFSET] += sizeof(struct in_addr);
+ break;
+
+ case IPOPT_TS:
+#ifdef IPSTEALTH
+ if (V_ipstealth && pass == 0)
+ break;
+#endif
+ code = cp - (u_char *)ip;
+ if (optlen < 4 || optlen > 40) {
+ code = &cp[IPOPT_OLEN] - (u_char *)ip;
+ goto bad;
+ }
+ if ((off = cp[IPOPT_OFFSET]) < 5) {
+ code = &cp[IPOPT_OLEN] - (u_char *)ip;
+ goto bad;
+ }
+ if (off > optlen - (int)sizeof(int32_t)) {
+ cp[IPOPT_OFFSET + 1] += (1 << 4);
+ if ((cp[IPOPT_OFFSET + 1] & 0xf0) == 0) {
+ code = &cp[IPOPT_OFFSET] - (u_char *)ip;
+ goto bad;
+ }
+ break;
+ }
+ off--; /* 0 origin */
+ sin = (struct in_addr *)(cp + off);
+ switch (cp[IPOPT_OFFSET + 1] & 0x0f) {
+
+ case IPOPT_TS_TSONLY:
+ break;
+
+ case IPOPT_TS_TSANDADDR:
+ if (off + sizeof(uint32_t) +
+ sizeof(struct in_addr) > optlen) {
+ code = &cp[IPOPT_OFFSET] - (u_char *)ip;
+ goto bad;
+ }
+ ipaddr.sin_addr = dst;
+ ia = (INA)ifaof_ifpforaddr((SA)&ipaddr,
+ m->m_pkthdr.rcvif);
+ if (ia == NULL)
+ continue;
+ (void)memcpy(sin, &IA_SIN(ia)->sin_addr,
+ sizeof(struct in_addr));
+ ifa_free(&ia->ia_ifa);
+ cp[IPOPT_OFFSET] += sizeof(struct in_addr);
+ off += sizeof(struct in_addr);
+ break;
+
+ case IPOPT_TS_PRESPEC:
+ if (off + sizeof(uint32_t) +
+ sizeof(struct in_addr) > optlen) {
+ code = &cp[IPOPT_OFFSET] - (u_char *)ip;
+ goto bad;
+ }
+ (void)memcpy(&ipaddr.sin_addr, sin,
+ sizeof(struct in_addr));
+ if (ifa_ifwithaddr_check((SA)&ipaddr) == 0)
+ continue;
+ cp[IPOPT_OFFSET] += sizeof(struct in_addr);
+ off += sizeof(struct in_addr);
+ break;
+
+ default:
+ code = &cp[IPOPT_OFFSET + 1] - (u_char *)ip;
+ goto bad;
+ }
+ ntime = iptime();
+ (void)memcpy(cp + off, &ntime, sizeof(uint32_t));
+ cp[IPOPT_OFFSET] += sizeof(uint32_t);
+ }
+ }
+ if (forward && V_ipforwarding) {
+ ip_forward(m, 1);
+ return (1);
+ }
+ return (0);
+bad:
+ icmp_error(m, type, code, 0, 0);
+ IPSTAT_INC(ips_badoptions);
+ return (1);
+}
+
+/*
+ * Save incoming source route for use in replies, to be picked up later by
+ * ip_srcroute if the receiver is interested.
+ */
+static void
+save_rte(struct mbuf *m, u_char *option, struct in_addr dst)
+{
+ unsigned olen;
+ struct ipopt_tag *opts;
+
+ opts = (struct ipopt_tag *)m_tag_get(PACKET_TAG_IPOPTIONS,
+ sizeof(struct ipopt_tag), M_NOWAIT);
+ if (opts == NULL)
+ return;
+
+ olen = option[IPOPT_OLEN];
+ if (olen > sizeof(opts->ip_srcrt) - (1 + sizeof(dst))) {
+ m_tag_free((struct m_tag *)opts);
+ return;
+ }
+ bcopy(option, opts->ip_srcrt.srcopt, olen);
+ opts->ip_nhops = (olen - IPOPT_OFFSET - 1) / sizeof(struct in_addr);
+ opts->ip_srcrt.dst = dst;
+ m_tag_prepend(m, (struct m_tag *)opts);
+}
+
+/*
+ * Retrieve incoming source route for use in replies, in the same form used
+ * by setsockopt. The first hop is placed before the options, will be
+ * removed later.
+ */
+struct mbuf *
+ip_srcroute(struct mbuf *m0)
+{
+ struct in_addr *p, *q;
+ struct mbuf *m;
+ struct ipopt_tag *opts;
+
+ opts = (struct ipopt_tag *)m_tag_find(m0, PACKET_TAG_IPOPTIONS, NULL);
+ if (opts == NULL)
+ return (NULL);
+
+ if (opts->ip_nhops == 0)
+ return (NULL);
+ m = m_get(M_DONTWAIT, MT_DATA);
+ if (m == NULL)
+ return (NULL);
+
+#define OPTSIZ (sizeof(opts->ip_srcrt.nop) + sizeof(opts->ip_srcrt.srcopt))
+
+ /* length is (nhops+1)*sizeof(addr) + sizeof(nop + srcrt header) */
+ m->m_len = opts->ip_nhops * sizeof(struct in_addr) +
+ sizeof(struct in_addr) + OPTSIZ;
+
+ /*
+ * First, save first hop for return route.
+ */
+ p = &(opts->ip_srcrt.route[opts->ip_nhops - 1]);
+ *(mtod(m, struct in_addr *)) = *p--;
+
+ /*
+ * Copy option fields and padding (nop) to mbuf.
+ */
+ opts->ip_srcrt.nop = IPOPT_NOP;
+ opts->ip_srcrt.srcopt[IPOPT_OFFSET] = IPOPT_MINOFF;
+ (void)memcpy(mtod(m, caddr_t) + sizeof(struct in_addr),
+ &(opts->ip_srcrt.nop), OPTSIZ);
+ q = (struct in_addr *)(mtod(m, caddr_t) +
+ sizeof(struct in_addr) + OPTSIZ);
+#undef OPTSIZ
+ /*
+ * Record return path as an IP source route, reversing the path
+ * (pointers are now aligned).
+ */
+ while (p >= opts->ip_srcrt.route) {
+ *q++ = *p--;
+ }
+ /*
+ * Last hop goes to final destination.
+ */
+ *q = opts->ip_srcrt.dst;
+ m_tag_delete(m0, (struct m_tag *)opts);
+ return (m);
+}
+
+/*
+ * Strip out IP options, at higher level protocol in the kernel. Second
+ * argument is buffer to which options will be moved, and return value is
+ * their length.
+ *
+ * XXX should be deleted; last arg currently ignored.
+ */
+void
+ip_stripoptions(struct mbuf *m, struct mbuf *mopt)
+{
+ int i;
+ struct ip *ip = mtod(m, struct ip *);
+ caddr_t opts;
+ int olen;
+
+ olen = (ip->ip_hl << 2) - sizeof (struct ip);
+ opts = (caddr_t)(ip + 1);
+ i = m->m_len - (sizeof (struct ip) + olen);
+ bcopy(opts + olen, opts, (unsigned)i);
+ m->m_len -= olen;
+ if (m->m_flags & M_PKTHDR)
+ m->m_pkthdr.len -= olen;
+ ip->ip_v = IPVERSION;
+ ip->ip_hl = sizeof(struct ip) >> 2;
+}
+
+/*
+ * Insert IP options into preformed packet. Adjust IP destination as
+ * required for IP source routing, as indicated by a non-zero in_addr at the
+ * start of the options.
+ *
+ * XXX This routine assumes that the packet has no options in place.
+ */
+struct mbuf *
+ip_insertoptions(struct mbuf *m, struct mbuf *opt, int *phlen)
+{
+ struct ipoption *p = mtod(opt, struct ipoption *);
+ struct mbuf *n;
+ struct ip *ip = mtod(m, struct ip *);
+ unsigned optlen;
+
+ optlen = opt->m_len - sizeof(p->ipopt_dst);
+ if (optlen + ip->ip_len > IP_MAXPACKET) {
+ *phlen = 0;
+ return (m); /* XXX should fail */
+ }
+ if (p->ipopt_dst.s_addr)
+ ip->ip_dst = p->ipopt_dst;
+ if (m->m_flags & M_EXT || m->m_data - optlen < m->m_pktdat) {
+ MGETHDR(n, M_DONTWAIT, MT_DATA);
+ if (n == NULL) {
+ *phlen = 0;
+ return (m);
+ }
+ M_MOVE_PKTHDR(n, m);
+ n->m_pkthdr.rcvif = NULL;
+ n->m_pkthdr.len += optlen;
+ m->m_len -= sizeof(struct ip);
+ m->m_data += sizeof(struct ip);
+ n->m_next = m;
+ m = n;
+ m->m_len = optlen + sizeof(struct ip);
+ m->m_data += max_linkhdr;
+ bcopy(ip, mtod(m, void *), sizeof(struct ip));
+ } else {
+ m->m_data -= optlen;
+ m->m_len += optlen;
+ m->m_pkthdr.len += optlen;
+ bcopy(ip, mtod(m, void *), sizeof(struct ip));
+ }
+ ip = mtod(m, struct ip *);
+ bcopy(p->ipopt_list, ip + 1, optlen);
+ *phlen = sizeof(struct ip) + optlen;
+ ip->ip_v = IPVERSION;
+ ip->ip_hl = *phlen >> 2;
+ ip->ip_len += optlen;
+ return (m);
+}
+
+/*
+ * Copy options from ip to jp, omitting those not copied during
+ * fragmentation.
+ */
+int
+ip_optcopy(struct ip *ip, struct ip *jp)
+{
+ u_char *cp, *dp;
+ int opt, optlen, cnt;
+
+ cp = (u_char *)(ip + 1);
+ dp = (u_char *)(jp + 1);
+ cnt = (ip->ip_hl << 2) - sizeof (struct ip);
+ for (; cnt > 0; cnt -= optlen, cp += optlen) {
+ opt = cp[0];
+ if (opt == IPOPT_EOL)
+ break;
+ if (opt == IPOPT_NOP) {
+ /* Preserve for IP mcast tunnel's LSRR alignment. */
+ *dp++ = IPOPT_NOP;
+ optlen = 1;
+ continue;
+ }
+
+ KASSERT(cnt >= IPOPT_OLEN + sizeof(*cp),
+ ("ip_optcopy: malformed ipv4 option"));
+ optlen = cp[IPOPT_OLEN];
+ KASSERT(optlen >= IPOPT_OLEN + sizeof(*cp) && optlen <= cnt,
+ ("ip_optcopy: malformed ipv4 option"));
+
+ /* Bogus lengths should have been caught by ip_dooptions. */
+ if (optlen > cnt)
+ optlen = cnt;
+ if (IPOPT_COPIED(opt)) {
+ bcopy(cp, dp, optlen);
+ dp += optlen;
+ }
+ }
+ for (optlen = dp - (u_char *)(jp+1); optlen & 0x3; optlen++)
+ *dp++ = IPOPT_EOL;
+ return (optlen);
+}
+
+/*
+ * Set up IP options in pcb for insertion in output packets. Store in mbuf
+ * with pointer in pcbopt, adding pseudo-option with destination address if
+ * source routed.
+ */
+int
+ip_pcbopts(struct inpcb *inp, int optname, struct mbuf *m)
+{
+ int cnt, optlen;
+ u_char *cp;
+ struct mbuf **pcbopt;
+ u_char opt;
+
+ INP_WLOCK_ASSERT(inp);
+
+ pcbopt = &inp->inp_options;
+
+ /* turn off any old options */
+ if (*pcbopt)
+ (void)m_free(*pcbopt);
+ *pcbopt = 0;
+ if (m == NULL || m->m_len == 0) {
+ /*
+ * Only turning off any previous options.
+ */
+ if (m != NULL)
+ (void)m_free(m);
+ return (0);
+ }
+
+ if (m->m_len % sizeof(int32_t))
+ goto bad;
+ /*
+ * IP first-hop destination address will be stored before actual
+ * options; move other options back and clear it when none present.
+ */
+ if (m->m_data + m->m_len + sizeof(struct in_addr) >= &m->m_dat[MLEN])
+ goto bad;
+ cnt = m->m_len;
+ m->m_len += sizeof(struct in_addr);
+ cp = mtod(m, u_char *) + sizeof(struct in_addr);
+ bcopy(mtod(m, void *), cp, (unsigned)cnt);
+ bzero(mtod(m, void *), sizeof(struct in_addr));
+
+ for (; cnt > 0; cnt -= optlen, cp += optlen) {
+ opt = cp[IPOPT_OPTVAL];
+ if (opt == IPOPT_EOL)
+ break;
+ if (opt == IPOPT_NOP)
+ optlen = 1;
+ else {
+ if (cnt < IPOPT_OLEN + sizeof(*cp))
+ goto bad;
+ optlen = cp[IPOPT_OLEN];
+ if (optlen < IPOPT_OLEN + sizeof(*cp) || optlen > cnt)
+ goto bad;
+ }
+ switch (opt) {
+
+ default:
+ break;
+
+ case IPOPT_LSRR:
+ case IPOPT_SSRR:
+ /*
+ * User process specifies route as:
+ *
+ * ->A->B->C->D
+ *
+ * D must be our final destination (but we can't
+ * check that since we may not have connected yet).
+ * A is first hop destination, which doesn't appear
+ * in actual IP option, but is stored before the
+ * options.
+ */
+ /* XXX-BZ PRIV_NETINET_SETHDROPTS? */
+ if (optlen < IPOPT_MINOFF - 1 + sizeof(struct in_addr))
+ goto bad;
+ m->m_len -= sizeof(struct in_addr);
+ cnt -= sizeof(struct in_addr);
+ optlen -= sizeof(struct in_addr);
+ cp[IPOPT_OLEN] = optlen;
+ /*
+ * Move first hop before start of options.
+ */
+ bcopy((caddr_t)&cp[IPOPT_OFFSET+1], mtod(m, caddr_t),
+ sizeof(struct in_addr));
+ /*
+ * Then copy rest of options back
+ * to close up the deleted entry.
+ */
+ bcopy((&cp[IPOPT_OFFSET+1] + sizeof(struct in_addr)),
+ &cp[IPOPT_OFFSET+1],
+ (unsigned)cnt - (IPOPT_MINOFF - 1));
+ break;
+ }
+ }
+ if (m->m_len > MAX_IPOPTLEN + sizeof(struct in_addr))
+ goto bad;
+ *pcbopt = m;
+ return (0);
+
+bad:
+ (void)m_free(m);
+ return (EINVAL);
+}
+
+/*
+ * Check for the presence of the IP Router Alert option [RFC2113]
+ * in the header of an IPv4 datagram.
+ *
+ * This call is not intended for use from the forwarding path; it is here
+ * so that protocol domains may check for the presence of the option.
+ * Given how FreeBSD's IPv4 stack is currently structured, the Router Alert
+ * option does not have much relevance to the implementation, though this
+ * may change in future.
+ * Router alert options SHOULD be passed if running in IPSTEALTH mode and
+ * we are not the endpoint.
+ * Length checks on individual options should already have been peformed
+ * by ip_dooptions() therefore they are folded under INVARIANTS here.
+ *
+ * Return zero if not present or options are invalid, non-zero if present.
+ */
+int
+ip_checkrouteralert(struct mbuf *m)
+{
+ struct ip *ip = mtod(m, struct ip *);
+ u_char *cp;
+ int opt, optlen, cnt, found_ra;
+
+ found_ra = 0;
+ cp = (u_char *)(ip + 1);
+ cnt = (ip->ip_hl << 2) - sizeof (struct ip);
+ for (; cnt > 0; cnt -= optlen, cp += optlen) {
+ opt = cp[IPOPT_OPTVAL];
+ if (opt == IPOPT_EOL)
+ break;
+ if (opt == IPOPT_NOP)
+ optlen = 1;
+ else {
+#ifdef INVARIANTS
+ if (cnt < IPOPT_OLEN + sizeof(*cp))
+ break;
+#endif
+ optlen = cp[IPOPT_OLEN];
+#ifdef INVARIANTS
+ if (optlen < IPOPT_OLEN + sizeof(*cp) || optlen > cnt)
+ break;
+#endif
+ }
+ switch (opt) {
+ case IPOPT_RA:
+#ifdef INVARIANTS
+ if (optlen != IPOPT_OFFSET + sizeof(uint16_t) ||
+ (*((uint16_t *)&cp[IPOPT_OFFSET]) != 0))
+ break;
+ else
+#endif
+ found_ra = 1;
+ break;
+ default:
+ break;
+ }
+ }
+
+ return (found_ra);
+}
diff --git a/rtems/freebsd/netinet/ip_options.h b/rtems/freebsd/netinet/ip_options.h
new file mode 100644
index 00000000..9c08004d
--- /dev/null
+++ b/rtems/freebsd/netinet/ip_options.h
@@ -0,0 +1,60 @@
+/*
+ * Copyright (c) 1982, 1986, 1993
+ * The Regents of the University of California.
+ * Copyright (c) 2005 Andre Oppermann, Internet Business Solutions AG.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _NETINET_IP_OPTIONS_HH_
+#define _NETINET_IP_OPTIONS_HH_
+
+struct ipoptrt {
+ struct in_addr dst; /* final destination */
+ char nop; /* one NOP to align */
+ char srcopt[IPOPT_OFFSET + 1]; /* OPTVAL, OLEN and OFFSET */
+ struct in_addr route[MAX_IPOPTLEN/sizeof(struct in_addr)];
+};
+
+struct ipopt_tag {
+ struct m_tag tag; /* m_tag */
+ int ip_nhops;
+ struct ipoptrt ip_srcrt;
+};
+
+extern int ip_doopts; /* process or ignore IP options */
+
+int ip_checkrouteralert(struct mbuf *);
+int ip_dooptions(struct mbuf *, int);
+struct mbuf *ip_insertoptions(struct mbuf *, struct mbuf *, int *);
+int ip_optcopy(struct ip *, struct ip *);
+int ip_pcbopts(struct inpcb *, int, struct mbuf *);
+void ip_stripoptions(struct mbuf *, struct mbuf *);
+struct mbuf *ip_srcroute(struct mbuf *);
+
+#endif /* !_NETINET_IP_OPTIONS_HH_ */
diff --git a/rtems/freebsd/netinet/ip_output.c b/rtems/freebsd/netinet/ip_output.c
new file mode 100644
index 00000000..81a26e28
--- /dev/null
+++ b/rtems/freebsd/netinet/ip_output.c
@@ -0,0 +1,1284 @@
+#include <rtems/freebsd/machine/rtems-bsd-config.h>
+
+/*-
+ * Copyright (c) 1982, 1986, 1988, 1990, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * @(#)ip_output.c 8.3 (Berkeley) 1/21/94
+ */
+
+#include <rtems/freebsd/sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <rtems/freebsd/local/opt_ipfw.h>
+#include <rtems/freebsd/local/opt_ipsec.h>
+#include <rtems/freebsd/local/opt_route.h>
+#include <rtems/freebsd/local/opt_mbuf_stress_test.h>
+#include <rtems/freebsd/local/opt_mpath.h>
+#include <rtems/freebsd/local/opt_sctp.h>
+
+#include <rtems/freebsd/sys/param.h>
+#include <rtems/freebsd/sys/systm.h>
+#include <rtems/freebsd/sys/kernel.h>
+#include <rtems/freebsd/sys/malloc.h>
+#include <rtems/freebsd/sys/mbuf.h>
+#include <rtems/freebsd/sys/priv.h>
+#include <rtems/freebsd/sys/proc.h>
+#include <rtems/freebsd/sys/protosw.h>
+#include <rtems/freebsd/sys/socket.h>
+#include <rtems/freebsd/sys/socketvar.h>
+#include <rtems/freebsd/sys/sysctl.h>
+#include <rtems/freebsd/sys/ucred.h>
+
+#include <rtems/freebsd/net/if.h>
+#include <rtems/freebsd/net/if_llatbl.h>
+#include <rtems/freebsd/net/netisr.h>
+#include <rtems/freebsd/net/pfil.h>
+#include <rtems/freebsd/net/route.h>
+#include <rtems/freebsd/net/flowtable.h>
+#ifdef RADIX_MPATH
+#include <rtems/freebsd/net/radix_mpath.h>
+#endif
+#include <rtems/freebsd/net/vnet.h>
+
+#include <rtems/freebsd/netinet/in.h>
+#include <rtems/freebsd/netinet/in_systm.h>
+#include <rtems/freebsd/netinet/ip.h>
+#include <rtems/freebsd/netinet/in_pcb.h>
+#include <rtems/freebsd/netinet/in_var.h>
+#include <rtems/freebsd/netinet/ip_var.h>
+#include <rtems/freebsd/netinet/ip_options.h>
+#ifdef SCTP
+#include <rtems/freebsd/netinet/sctp.h>
+#include <rtems/freebsd/netinet/sctp_crc32.h>
+#endif
+
+#ifdef IPSEC
+#include <rtems/freebsd/netinet/ip_ipsec.h>
+#include <rtems/freebsd/netipsec/ipsec.h>
+#endif /* IPSEC*/
+
+#include <rtems/freebsd/machine/in_cksum.h>
+
+#include <rtems/freebsd/security/mac/mac_framework.h>
+
+#define print_ip(x, a, y) printf("%s %d.%d.%d.%d%s",\
+ x, (ntohl(a.s_addr)>>24)&0xFF,\
+ (ntohl(a.s_addr)>>16)&0xFF,\
+ (ntohl(a.s_addr)>>8)&0xFF,\
+ (ntohl(a.s_addr))&0xFF, y);
+
+VNET_DEFINE(u_short, ip_id);
+
+#ifdef MBUF_STRESS_TEST
+int mbuf_frag_size = 0;
+SYSCTL_INT(_net_inet_ip, OID_AUTO, mbuf_frag_size, CTLFLAG_RW,
+ &mbuf_frag_size, 0, "Fragment outgoing mbufs to this size");
+#endif
+
+static void ip_mloopback
+ (struct ifnet *, struct mbuf *, struct sockaddr_in *, int);
+
+
+extern int in_mcast_loop;
+extern struct protosw inetsw[];
+
+/*
+ * IP output. The packet in mbuf chain m contains a skeletal IP
+ * header (with len, off, ttl, proto, tos, src, dst).
+ * The mbuf chain containing the packet will be freed.
+ * The mbuf opt, if present, will not be freed.
+ * In the IP forwarding case, the packet will arrive with options already
+ * inserted, so must have a NULL opt pointer.
+ */
+int
+ip_output(struct mbuf *m, struct mbuf *opt, struct route *ro, int flags,
+ struct ip_moptions *imo, struct inpcb *inp)
+{
+ struct ip *ip;
+ struct ifnet *ifp = NULL; /* keep compiler happy */
+ struct mbuf *m0;
+ int hlen = sizeof (struct ip);
+ int mtu;
+ int len, error = 0;
+ int nortfree = 0;
+ struct sockaddr_in *dst = NULL; /* keep compiler happy */
+ struct in_ifaddr *ia = NULL;
+ int isbroadcast, sw_csum;
+ struct route iproute;
+ struct rtentry *rte; /* cache for ro->ro_rt */
+ struct in_addr odst;
+#ifdef IPFIREWALL_FORWARD
+ struct m_tag *fwd_tag = NULL;
+#endif
+#ifdef IPSEC
+ int no_route_but_check_spd = 0;
+#endif
+ M_ASSERTPKTHDR(m);
+
+ if (inp != NULL) {
+ INP_LOCK_ASSERT(inp);
+ M_SETFIB(m, inp->inp_inc.inc_fibnum);
+ if (inp->inp_flags & (INP_HW_FLOWID|INP_SW_FLOWID)) {
+ m->m_pkthdr.flowid = inp->inp_flowid;
+ m->m_flags |= M_FLOWID;
+ }
+ }
+
+ if (ro == NULL) {
+ ro = &iproute;
+ bzero(ro, sizeof (*ro));
+
+#ifdef FLOWTABLE
+ {
+ struct flentry *fle;
+
+ /*
+ * The flow table returns route entries valid for up to 30
+ * seconds; we rely on the remainder of ip_output() taking no
+ * longer than that long for the stability of ro_rt. The
+ * flow ID assignment must have happened before this point.
+ */
+ if ((fle = flowtable_lookup_mbuf(V_ip_ft, m, AF_INET)) != NULL) {
+ flow_to_route(fle, ro);
+ nortfree = 1;
+ }
+ }
+#endif
+ }
+
+ if (opt) {
+ len = 0;
+ m = ip_insertoptions(m, opt, &len);
+ if (len != 0)
+ hlen = len;
+ }
+ ip = mtod(m, struct ip *);
+
+ /*
+ * Fill in IP header. If we are not allowing fragmentation,
+ * then the ip_id field is meaningless, but we don't set it
+ * to zero. Doing so causes various problems when devices along
+ * the path (routers, load balancers, firewalls, etc.) illegally
+ * disable DF on our packet. Note that a 16-bit counter
+ * will wrap around in less than 10 seconds at 100 Mbit/s on a
+ * medium with MTU 1500. See Steven M. Bellovin, "A Technique
+ * for Counting NATted Hosts", Proc. IMW'02, available at
+ * <http://www.cs.columbia.edu/~smb/papers/fnat.pdf>.
+ */
+ if ((flags & (IP_FORWARDING|IP_RAWOUTPUT)) == 0) {
+ ip->ip_v = IPVERSION;
+ ip->ip_hl = hlen >> 2;
+ ip->ip_id = ip_newid();
+ IPSTAT_INC(ips_localout);
+ } else {
+ hlen = ip->ip_hl << 2;
+ }
+
+ dst = (struct sockaddr_in *)&ro->ro_dst;
+again:
+ /*
+ * If there is a cached route,
+ * check that it is to the same destination
+ * and is still up. If not, free it and try again.
+ * The address family should also be checked in case of sharing the
+ * cache with IPv6.
+ */
+ rte = ro->ro_rt;
+ if (rte && ((rte->rt_flags & RTF_UP) == 0 ||
+ rte->rt_ifp == NULL ||
+ !RT_LINK_IS_UP(rte->rt_ifp) ||
+ dst->sin_family != AF_INET ||
+ dst->sin_addr.s_addr != ip->ip_dst.s_addr)) {
+ if (!nortfree)
+ RTFREE(rte);
+ rte = ro->ro_rt = (struct rtentry *)NULL;
+ ro->ro_lle = (struct llentry *)NULL;
+ }
+#ifdef IPFIREWALL_FORWARD
+ if (rte == NULL && fwd_tag == NULL) {
+#else
+ if (rte == NULL) {
+#endif
+ bzero(dst, sizeof(*dst));
+ dst->sin_family = AF_INET;
+ dst->sin_len = sizeof(*dst);
+ dst->sin_addr = ip->ip_dst;
+ }
+ /*
+ * If routing to interface only, short circuit routing lookup.
+ * The use of an all-ones broadcast address implies this; an
+ * interface is specified by the broadcast address of an interface,
+ * or the destination address of a ptp interface.
+ */
+ if (flags & IP_SENDONES) {
+ if ((ia = ifatoia(ifa_ifwithbroadaddr(sintosa(dst)))) == NULL &&
+ (ia = ifatoia(ifa_ifwithdstaddr(sintosa(dst)))) == NULL) {
+ IPSTAT_INC(ips_noroute);
+ error = ENETUNREACH;
+ goto bad;
+ }
+ ip->ip_dst.s_addr = INADDR_BROADCAST;
+ dst->sin_addr = ip->ip_dst;
+ ifp = ia->ia_ifp;
+ ip->ip_ttl = 1;
+ isbroadcast = 1;
+ } else if (flags & IP_ROUTETOIF) {
+ if ((ia = ifatoia(ifa_ifwithdstaddr(sintosa(dst)))) == NULL &&
+ (ia = ifatoia(ifa_ifwithnet(sintosa(dst), 0))) == NULL) {
+ IPSTAT_INC(ips_noroute);
+ error = ENETUNREACH;
+ goto bad;
+ }
+ ifp = ia->ia_ifp;
+ ip->ip_ttl = 1;
+ isbroadcast = in_broadcast(dst->sin_addr, ifp);
+ } else if (IN_MULTICAST(ntohl(ip->ip_dst.s_addr)) &&
+ imo != NULL && imo->imo_multicast_ifp != NULL) {
+ /*
+ * Bypass the normal routing lookup for multicast
+ * packets if the interface is specified.
+ */
+ ifp = imo->imo_multicast_ifp;
+ IFP_TO_IA(ifp, ia);
+ isbroadcast = 0; /* fool gcc */
+ } else {
+ /*
+ * We want to do any cloning requested by the link layer,
+ * as this is probably required in all cases for correct
+ * operation (as it is for ARP).
+ */
+ if (rte == NULL) {
+#ifdef RADIX_MPATH
+ rtalloc_mpath_fib(ro,
+ ntohl(ip->ip_src.s_addr ^ ip->ip_dst.s_addr),
+ inp ? inp->inp_inc.inc_fibnum : M_GETFIB(m));
+#else
+ in_rtalloc_ign(ro, 0,
+ inp ? inp->inp_inc.inc_fibnum : M_GETFIB(m));
+#endif
+ rte = ro->ro_rt;
+ }
+ if (rte == NULL ||
+ rte->rt_ifp == NULL ||
+ !RT_LINK_IS_UP(rte->rt_ifp)) {
+#ifdef IPSEC
+ /*
+ * There is no route for this packet, but it is
+ * possible that a matching SPD entry exists.
+ */
+ no_route_but_check_spd = 1;
+ mtu = 0; /* Silence GCC warning. */
+ goto sendit;
+#endif
+ IPSTAT_INC(ips_noroute);
+ error = EHOSTUNREACH;
+ goto bad;
+ }
+ ia = ifatoia(rte->rt_ifa);
+ ifa_ref(&ia->ia_ifa);
+ ifp = rte->rt_ifp;
+ rte->rt_rmx.rmx_pksent++;
+ if (rte->rt_flags & RTF_GATEWAY)
+ dst = (struct sockaddr_in *)rte->rt_gateway;
+ if (rte->rt_flags & RTF_HOST)
+ isbroadcast = (rte->rt_flags & RTF_BROADCAST);
+ else
+ isbroadcast = in_broadcast(dst->sin_addr, ifp);
+ }
+ /*
+ * Calculate MTU. If we have a route that is up, use that,
+ * otherwise use the interface's MTU.
+ */
+ if (rte != NULL && (rte->rt_flags & (RTF_UP|RTF_HOST))) {
+ /*
+ * This case can happen if the user changed the MTU
+ * of an interface after enabling IP on it. Because
+ * most netifs don't keep track of routes pointing to
+ * them, there is no way for one to update all its
+ * routes when the MTU is changed.
+ */
+ if (rte->rt_rmx.rmx_mtu > ifp->if_mtu)
+ rte->rt_rmx.rmx_mtu = ifp->if_mtu;
+ mtu = rte->rt_rmx.rmx_mtu;
+ } else {
+ mtu = ifp->if_mtu;
+ }
+ if (IN_MULTICAST(ntohl(ip->ip_dst.s_addr))) {
+ m->m_flags |= M_MCAST;
+ /*
+ * IP destination address is multicast. Make sure "dst"
+ * still points to the address in "ro". (It may have been
+ * changed to point to a gateway address, above.)
+ */
+ dst = (struct sockaddr_in *)&ro->ro_dst;
+ /*
+ * See if the caller provided any multicast options
+ */
+ if (imo != NULL) {
+ ip->ip_ttl = imo->imo_multicast_ttl;
+ if (imo->imo_multicast_vif != -1)
+ ip->ip_src.s_addr =
+ ip_mcast_src ?
+ ip_mcast_src(imo->imo_multicast_vif) :
+ INADDR_ANY;
+ } else
+ ip->ip_ttl = IP_DEFAULT_MULTICAST_TTL;
+ /*
+ * Confirm that the outgoing interface supports multicast.
+ */
+ if ((imo == NULL) || (imo->imo_multicast_vif == -1)) {
+ if ((ifp->if_flags & IFF_MULTICAST) == 0) {
+ IPSTAT_INC(ips_noroute);
+ error = ENETUNREACH;
+ goto bad;
+ }
+ }
+ /*
+ * If source address not specified yet, use address
+ * of outgoing interface.
+ */
+ if (ip->ip_src.s_addr == INADDR_ANY) {
+ /* Interface may have no addresses. */
+ if (ia != NULL)
+ ip->ip_src = IA_SIN(ia)->sin_addr;
+ }
+
+ if ((imo == NULL && in_mcast_loop) ||
+ (imo && imo->imo_multicast_loop)) {
+ /*
+ * Loop back multicast datagram if not expressly
+ * forbidden to do so, even if we are not a member
+ * of the group; ip_input() will filter it later,
+ * thus deferring a hash lookup and mutex acquisition
+ * at the expense of a cheap copy using m_copym().
+ */
+ ip_mloopback(ifp, m, dst, hlen);
+ } else {
+ /*
+ * If we are acting as a multicast router, perform
+ * multicast forwarding as if the packet had just
+ * arrived on the interface to which we are about
+ * to send. The multicast forwarding function
+ * recursively calls this function, using the
+ * IP_FORWARDING flag to prevent infinite recursion.
+ *
+ * Multicasts that are looped back by ip_mloopback(),
+ * above, will be forwarded by the ip_input() routine,
+ * if necessary.
+ */
+ if (V_ip_mrouter && (flags & IP_FORWARDING) == 0) {
+ /*
+ * If rsvp daemon is not running, do not
+ * set ip_moptions. This ensures that the packet
+ * is multicast and not just sent down one link
+ * as prescribed by rsvpd.
+ */
+ if (!V_rsvp_on)
+ imo = NULL;
+ if (ip_mforward &&
+ ip_mforward(ip, ifp, m, imo) != 0) {
+ m_freem(m);
+ goto done;
+ }
+ }
+ }
+
+ /*
+ * Multicasts with a time-to-live of zero may be looped-
+ * back, above, but must not be transmitted on a network.
+ * Also, multicasts addressed to the loopback interface
+ * are not sent -- the above call to ip_mloopback() will
+ * loop back a copy. ip_input() will drop the copy if
+ * this host does not belong to the destination group on
+ * the loopback interface.
+ */
+ if (ip->ip_ttl == 0 || ifp->if_flags & IFF_LOOPBACK) {
+ m_freem(m);
+ goto done;
+ }
+
+ goto sendit;
+ }
+
+ /*
+ * If the source address is not specified yet, use the address
+ * of the outoing interface.
+ */
+ if (ip->ip_src.s_addr == INADDR_ANY) {
+ /* Interface may have no addresses. */
+ if (ia != NULL) {
+ ip->ip_src = IA_SIN(ia)->sin_addr;
+ }
+ }
+
+ /*
+ * Verify that we have any chance at all of being able to queue the
+ * packet or packet fragments, unless ALTQ is enabled on the given
+ * interface in which case packetdrop should be done by queueing.
+ */
+#ifdef ALTQ
+ if ((!ALTQ_IS_ENABLED(&ifp->if_snd)) &&
+ ((ifp->if_snd.ifq_len + ip->ip_len / mtu + 1) >=
+ ifp->if_snd.ifq_maxlen))
+#else
+ if ((ifp->if_snd.ifq_len + ip->ip_len / mtu + 1) >=
+ ifp->if_snd.ifq_maxlen)
+#endif /* ALTQ */
+ {
+ error = ENOBUFS;
+ IPSTAT_INC(ips_odropped);
+ ifp->if_snd.ifq_drops += (ip->ip_len / ifp->if_mtu + 1);
+ goto bad;
+ }
+
+ /*
+ * Look for broadcast address and
+ * verify user is allowed to send
+ * such a packet.
+ */
+ if (isbroadcast) {
+ if ((ifp->if_flags & IFF_BROADCAST) == 0) {
+ error = EADDRNOTAVAIL;
+ goto bad;
+ }
+ if ((flags & IP_ALLOWBROADCAST) == 0) {
+ error = EACCES;
+ goto bad;
+ }
+ /* don't allow broadcast messages to be fragmented */
+ if (ip->ip_len > mtu) {
+ error = EMSGSIZE;
+ goto bad;
+ }
+ m->m_flags |= M_BCAST;
+ } else {
+ m->m_flags &= ~M_BCAST;
+ }
+
+sendit:
+#ifdef IPSEC
+ switch(ip_ipsec_output(&m, inp, &flags, &error, &ifp)) {
+ case 1:
+ goto bad;
+ case -1:
+ goto done;
+ case 0:
+ default:
+ break; /* Continue with packet processing. */
+ }
+ /*
+ * Check if there was a route for this packet; return error if not.
+ */
+ if (no_route_but_check_spd) {
+ IPSTAT_INC(ips_noroute);
+ error = EHOSTUNREACH;
+ goto bad;
+ }
+ /* Update variables that are affected by ipsec4_output(). */
+ ip = mtod(m, struct ip *);
+ hlen = ip->ip_hl << 2;
+#endif /* IPSEC */
+
+ /* Jump over all PFIL processing if hooks are not active. */
+ if (!PFIL_HOOKED(&V_inet_pfil_hook))
+ goto passout;
+
+ /* Run through list of hooks for output packets. */
+ odst.s_addr = ip->ip_dst.s_addr;
+ error = pfil_run_hooks(&V_inet_pfil_hook, &m, ifp, PFIL_OUT, inp);
+ if (error != 0 || m == NULL)
+ goto done;
+
+ ip = mtod(m, struct ip *);
+
+ /* See if destination IP address was changed by packet filter. */
+ if (odst.s_addr != ip->ip_dst.s_addr) {
+ m->m_flags |= M_SKIP_FIREWALL;
+ /* If destination is now ourself drop to ip_input(). */
+ if (in_localip(ip->ip_dst)) {
+ m->m_flags |= M_FASTFWD_OURS;
+ if (m->m_pkthdr.rcvif == NULL)
+ m->m_pkthdr.rcvif = V_loif;
+ if (m->m_pkthdr.csum_flags & CSUM_DELAY_DATA) {
+ m->m_pkthdr.csum_flags |=
+ CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
+ m->m_pkthdr.csum_data = 0xffff;
+ }
+ m->m_pkthdr.csum_flags |=
+ CSUM_IP_CHECKED | CSUM_IP_VALID;
+#ifdef SCTP
+ if (m->m_pkthdr.csum_flags & CSUM_SCTP)
+ m->m_pkthdr.csum_flags |= CSUM_SCTP_VALID;
+#endif
+ error = netisr_queue(NETISR_IP, m);
+ goto done;
+ } else
+ goto again; /* Redo the routing table lookup. */
+ }
+
+#ifdef IPFIREWALL_FORWARD
+ /* See if local, if yes, send it to netisr with IP_FASTFWD_OURS. */
+ if (m->m_flags & M_FASTFWD_OURS) {
+ if (m->m_pkthdr.rcvif == NULL)
+ m->m_pkthdr.rcvif = V_loif;
+ if (m->m_pkthdr.csum_flags & CSUM_DELAY_DATA) {
+ m->m_pkthdr.csum_flags |=
+ CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
+ m->m_pkthdr.csum_data = 0xffff;
+ }
+#ifdef SCTP
+ if (m->m_pkthdr.csum_flags & CSUM_SCTP)
+ m->m_pkthdr.csum_flags |= CSUM_SCTP_VALID;
+#endif
+ m->m_pkthdr.csum_flags |=
+ CSUM_IP_CHECKED | CSUM_IP_VALID;
+
+ error = netisr_queue(NETISR_IP, m);
+ goto done;
+ }
+ /* Or forward to some other address? */
+ fwd_tag = m_tag_find(m, PACKET_TAG_IPFORWARD, NULL);
+ if (fwd_tag) {
+ dst = (struct sockaddr_in *)&ro->ro_dst;
+ bcopy((fwd_tag+1), dst, sizeof(struct sockaddr_in));
+ m->m_flags |= M_SKIP_FIREWALL;
+ m_tag_delete(m, fwd_tag);
+ goto again;
+ }
+#endif /* IPFIREWALL_FORWARD */
+
+passout:
+ /* 127/8 must not appear on wire - RFC1122. */
+ if ((ntohl(ip->ip_dst.s_addr) >> IN_CLASSA_NSHIFT) == IN_LOOPBACKNET ||
+ (ntohl(ip->ip_src.s_addr) >> IN_CLASSA_NSHIFT) == IN_LOOPBACKNET) {
+ if ((ifp->if_flags & IFF_LOOPBACK) == 0) {
+ IPSTAT_INC(ips_badaddr);
+ error = EADDRNOTAVAIL;
+ goto bad;
+ }
+ }
+
+ m->m_pkthdr.csum_flags |= CSUM_IP;
+ sw_csum = m->m_pkthdr.csum_flags & ~ifp->if_hwassist;
+ if (sw_csum & CSUM_DELAY_DATA) {
+ in_delayed_cksum(m);
+ sw_csum &= ~CSUM_DELAY_DATA;
+ }
+#ifdef SCTP
+ if (sw_csum & CSUM_SCTP) {
+ sctp_delayed_cksum(m, (uint32_t)(ip->ip_hl << 2));
+ sw_csum &= ~CSUM_SCTP;
+ }
+#endif
+ m->m_pkthdr.csum_flags &= ifp->if_hwassist;
+
+ /*
+ * If small enough for interface, or the interface will take
+ * care of the fragmentation for us, we can just send directly.
+ */
+ if (ip->ip_len <= mtu ||
+ (m->m_pkthdr.csum_flags & ifp->if_hwassist & CSUM_TSO) != 0 ||
+ ((ip->ip_off & IP_DF) == 0 && (ifp->if_hwassist & CSUM_FRAGMENT))) {
+ ip->ip_len = htons(ip->ip_len);
+ ip->ip_off = htons(ip->ip_off);
+ ip->ip_sum = 0;
+ if (sw_csum & CSUM_DELAY_IP)
+ ip->ip_sum = in_cksum(m, hlen);
+
+ /*
+ * Record statistics for this interface address.
+ * With CSUM_TSO the byte/packet count will be slightly
+ * incorrect because we count the IP+TCP headers only
+ * once instead of for every generated packet.
+ */
+ if (!(flags & IP_FORWARDING) && ia) {
+ if (m->m_pkthdr.csum_flags & CSUM_TSO)
+ ia->ia_ifa.if_opackets +=
+ m->m_pkthdr.len / m->m_pkthdr.tso_segsz;
+ else
+ ia->ia_ifa.if_opackets++;
+ ia->ia_ifa.if_obytes += m->m_pkthdr.len;
+ }
+#ifdef MBUF_STRESS_TEST
+ if (mbuf_frag_size && m->m_pkthdr.len > mbuf_frag_size)
+ m = m_fragment(m, M_DONTWAIT, mbuf_frag_size);
+#endif
+ /*
+ * Reset layer specific mbuf flags
+ * to avoid confusing lower layers.
+ */
+ m->m_flags &= ~(M_PROTOFLAGS);
+ error = (*ifp->if_output)(ifp, m,
+ (struct sockaddr *)dst, ro);
+ goto done;
+ }
+
+ /* Balk when DF bit is set or the interface didn't support TSO. */
+ if ((ip->ip_off & IP_DF) || (m->m_pkthdr.csum_flags & CSUM_TSO)) {
+ error = EMSGSIZE;
+ IPSTAT_INC(ips_cantfrag);
+ goto bad;
+ }
+
+ /*
+ * Too large for interface; fragment if possible. If successful,
+ * on return, m will point to a list of packets to be sent.
+ */
+ error = ip_fragment(ip, &m, mtu, ifp->if_hwassist, sw_csum);
+ if (error)
+ goto bad;
+ for (; m; m = m0) {
+ m0 = m->m_nextpkt;
+ m->m_nextpkt = 0;
+ if (error == 0) {
+ /* Record statistics for this interface address. */
+ if (ia != NULL) {
+ ia->ia_ifa.if_opackets++;
+ ia->ia_ifa.if_obytes += m->m_pkthdr.len;
+ }
+ /*
+ * Reset layer specific mbuf flags
+ * to avoid confusing upper layers.
+ */
+ m->m_flags &= ~(M_PROTOFLAGS);
+
+ error = (*ifp->if_output)(ifp, m,
+ (struct sockaddr *)dst, ro);
+ } else
+ m_freem(m);
+ }
+
+ if (error == 0)
+ IPSTAT_INC(ips_fragmented);
+
+done:
+ if (ro == &iproute && ro->ro_rt && !nortfree) {
+ RTFREE(ro->ro_rt);
+ }
+ if (ia != NULL)
+ ifa_free(&ia->ia_ifa);
+ return (error);
+bad:
+ m_freem(m);
+ goto done;
+}
+
+/*
+ * Create a chain of fragments which fit the given mtu. m_frag points to the
+ * mbuf to be fragmented; on return it points to the chain with the fragments.
+ * Return 0 if no error. If error, m_frag may contain a partially built
+ * chain of fragments that should be freed by the caller.
+ *
+ * if_hwassist_flags is the hw offload capabilities (see if_data.ifi_hwassist)
+ * sw_csum contains the delayed checksums flags (e.g., CSUM_DELAY_IP).
+ */
+int
+ip_fragment(struct ip *ip, struct mbuf **m_frag, int mtu,
+ u_long if_hwassist_flags, int sw_csum)
+{
+ int error = 0;
+ int hlen = ip->ip_hl << 2;
+ int len = (mtu - hlen) & ~7; /* size of payload in each fragment */
+ int off;
+ struct mbuf *m0 = *m_frag; /* the original packet */
+ int firstlen;
+ struct mbuf **mnext;
+ int nfrags;
+
+ if (ip->ip_off & IP_DF) { /* Fragmentation not allowed */
+ IPSTAT_INC(ips_cantfrag);
+ return EMSGSIZE;
+ }
+
+ /*
+ * Must be able to put at least 8 bytes per fragment.
+ */
+ if (len < 8)
+ return EMSGSIZE;
+
+ /*
+ * If the interface will not calculate checksums on
+ * fragmented packets, then do it here.
+ */
+ if (m0->m_pkthdr.csum_flags & CSUM_DELAY_DATA &&
+ (if_hwassist_flags & CSUM_IP_FRAGS) == 0) {
+ in_delayed_cksum(m0);
+ m0->m_pkthdr.csum_flags &= ~CSUM_DELAY_DATA;
+ }
+#ifdef SCTP
+ if (m0->m_pkthdr.csum_flags & CSUM_SCTP &&
+ (if_hwassist_flags & CSUM_IP_FRAGS) == 0) {
+ sctp_delayed_cksum(m0, hlen);
+ m0->m_pkthdr.csum_flags &= ~CSUM_SCTP;
+ }
+#endif
+ if (len > PAGE_SIZE) {
+ /*
+ * Fragment large datagrams such that each segment
+ * contains a multiple of PAGE_SIZE amount of data,
+ * plus headers. This enables a receiver to perform
+ * page-flipping zero-copy optimizations.
+ *
+ * XXX When does this help given that sender and receiver
+ * could have different page sizes, and also mtu could
+ * be less than the receiver's page size ?
+ */
+ int newlen;
+ struct mbuf *m;
+
+ for (m = m0, off = 0; m && (off+m->m_len) <= mtu; m = m->m_next)
+ off += m->m_len;
+
+ /*
+ * firstlen (off - hlen) must be aligned on an
+ * 8-byte boundary
+ */
+ if (off < hlen)
+ goto smart_frag_failure;
+ off = ((off - hlen) & ~7) + hlen;
+ newlen = (~PAGE_MASK) & mtu;
+ if ((newlen + sizeof (struct ip)) > mtu) {
+ /* we failed, go back the default */
+smart_frag_failure:
+ newlen = len;
+ off = hlen + len;
+ }
+ len = newlen;
+
+ } else {
+ off = hlen + len;
+ }
+
+ firstlen = off - hlen;
+ mnext = &m0->m_nextpkt; /* pointer to next packet */
+
+ /*
+ * Loop through length of segment after first fragment,
+ * make new header and copy data of each part and link onto chain.
+ * Here, m0 is the original packet, m is the fragment being created.
+ * The fragments are linked off the m_nextpkt of the original
+ * packet, which after processing serves as the first fragment.
+ */
+ for (nfrags = 1; off < ip->ip_len; off += len, nfrags++) {
+ struct ip *mhip; /* ip header on the fragment */
+ struct mbuf *m;
+ int mhlen = sizeof (struct ip);
+
+ MGETHDR(m, M_DONTWAIT, MT_DATA);
+ if (m == NULL) {
+ error = ENOBUFS;
+ IPSTAT_INC(ips_odropped);
+ goto done;
+ }
+ m->m_flags |= (m0->m_flags & M_MCAST) | M_FRAG;
+ /*
+ * In the first mbuf, leave room for the link header, then
+ * copy the original IP header including options. The payload
+ * goes into an additional mbuf chain returned by m_copym().
+ */
+ m->m_data += max_linkhdr;
+ mhip = mtod(m, struct ip *);
+ *mhip = *ip;
+ if (hlen > sizeof (struct ip)) {
+ mhlen = ip_optcopy(ip, mhip) + sizeof (struct ip);
+ mhip->ip_v = IPVERSION;
+ mhip->ip_hl = mhlen >> 2;
+ }
+ m->m_len = mhlen;
+ /* XXX do we need to add ip->ip_off below ? */
+ mhip->ip_off = ((off - hlen) >> 3) + ip->ip_off;
+ if (off + len >= ip->ip_len) { /* last fragment */
+ len = ip->ip_len - off;
+ m->m_flags |= M_LASTFRAG;
+ } else
+ mhip->ip_off |= IP_MF;
+ mhip->ip_len = htons((u_short)(len + mhlen));
+ m->m_next = m_copym(m0, off, len, M_DONTWAIT);
+ if (m->m_next == NULL) { /* copy failed */
+ m_free(m);
+ error = ENOBUFS; /* ??? */
+ IPSTAT_INC(ips_odropped);
+ goto done;
+ }
+ m->m_pkthdr.len = mhlen + len;
+ m->m_pkthdr.rcvif = NULL;
+#ifdef MAC
+ mac_netinet_fragment(m0, m);
+#endif
+ m->m_pkthdr.csum_flags = m0->m_pkthdr.csum_flags;
+ mhip->ip_off = htons(mhip->ip_off);
+ mhip->ip_sum = 0;
+ if (sw_csum & CSUM_DELAY_IP)
+ mhip->ip_sum = in_cksum(m, mhlen);
+ *mnext = m;
+ mnext = &m->m_nextpkt;
+ }
+ IPSTAT_ADD(ips_ofragments, nfrags);
+
+ /* set first marker for fragment chain */
+ m0->m_flags |= M_FIRSTFRAG | M_FRAG;
+ m0->m_pkthdr.csum_data = nfrags;
+
+ /*
+ * Update first fragment by trimming what's been copied out
+ * and updating header.
+ */
+ m_adj(m0, hlen + firstlen - ip->ip_len);
+ m0->m_pkthdr.len = hlen + firstlen;
+ ip->ip_len = htons((u_short)m0->m_pkthdr.len);
+ ip->ip_off |= IP_MF;
+ ip->ip_off = htons(ip->ip_off);
+ ip->ip_sum = 0;
+ if (sw_csum & CSUM_DELAY_IP)
+ ip->ip_sum = in_cksum(m0, hlen);
+
+done:
+ *m_frag = m0;
+ return error;
+}
+
+void
+in_delayed_cksum(struct mbuf *m)
+{
+ struct ip *ip;
+ u_short csum, offset;
+
+ ip = mtod(m, struct ip *);
+ offset = ip->ip_hl << 2 ;
+ csum = in_cksum_skip(m, ip->ip_len, offset);
+ if (m->m_pkthdr.csum_flags & CSUM_UDP && csum == 0)
+ csum = 0xffff;
+ offset += m->m_pkthdr.csum_data; /* checksum offset */
+
+ if (offset + sizeof(u_short) > m->m_len) {
+ printf("delayed m_pullup, m->len: %d off: %d p: %d\n",
+ m->m_len, offset, ip->ip_p);
+ /*
+ * XXX
+ * this shouldn't happen, but if it does, the
+ * correct behavior may be to insert the checksum
+ * in the appropriate next mbuf in the chain.
+ */
+ return;
+ }
+ *(u_short *)(m->m_data + offset) = csum;
+}
+
+/*
+ * IP socket option processing.
+ */
+int
+ip_ctloutput(struct socket *so, struct sockopt *sopt)
+{
+ struct inpcb *inp = sotoinpcb(so);
+ int error, optval;
+
+ error = optval = 0;
+ if (sopt->sopt_level != IPPROTO_IP) {
+ if ((sopt->sopt_level == SOL_SOCKET) &&
+ (sopt->sopt_name == SO_SETFIB)) {
+ inp->inp_inc.inc_fibnum = so->so_fibnum;
+ return (0);
+ }
+ return (EINVAL);
+ }
+
+ switch (sopt->sopt_dir) {
+ case SOPT_SET:
+ switch (sopt->sopt_name) {
+ case IP_OPTIONS:
+#ifdef notyet
+ case IP_RETOPTS:
+#endif
+ {
+ struct mbuf *m;
+ if (sopt->sopt_valsize > MLEN) {
+ error = EMSGSIZE;
+ break;
+ }
+ MGET(m, sopt->sopt_td ? M_WAIT : M_DONTWAIT, MT_DATA);
+ if (m == NULL) {
+ error = ENOBUFS;
+ break;
+ }
+ m->m_len = sopt->sopt_valsize;
+ error = sooptcopyin(sopt, mtod(m, char *), m->m_len,
+ m->m_len);
+ if (error) {
+ m_free(m);
+ break;
+ }
+ INP_WLOCK(inp);
+ error = ip_pcbopts(inp, sopt->sopt_name, m);
+ INP_WUNLOCK(inp);
+ return (error);
+ }
+
+ case IP_BINDANY:
+ if (sopt->sopt_td != NULL) {
+ error = priv_check(sopt->sopt_td,
+ PRIV_NETINET_BINDANY);
+ if (error)
+ break;
+ }
+ /* FALLTHROUGH */
+ case IP_TOS:
+ case IP_TTL:
+ case IP_MINTTL:
+ case IP_RECVOPTS:
+ case IP_RECVRETOPTS:
+ case IP_RECVDSTADDR:
+ case IP_RECVTTL:
+ case IP_RECVIF:
+ case IP_FAITH:
+ case IP_ONESBCAST:
+ case IP_DONTFRAG:
+ error = sooptcopyin(sopt, &optval, sizeof optval,
+ sizeof optval);
+ if (error)
+ break;
+
+ switch (sopt->sopt_name) {
+ case IP_TOS:
+ inp->inp_ip_tos = optval;
+ break;
+
+ case IP_TTL:
+ inp->inp_ip_ttl = optval;
+ break;
+
+ case IP_MINTTL:
+ if (optval >= 0 && optval <= MAXTTL)
+ inp->inp_ip_minttl = optval;
+ else
+ error = EINVAL;
+ break;
+
+#define OPTSET(bit) do { \
+ INP_WLOCK(inp); \
+ if (optval) \
+ inp->inp_flags |= bit; \
+ else \
+ inp->inp_flags &= ~bit; \
+ INP_WUNLOCK(inp); \
+} while (0)
+
+ case IP_RECVOPTS:
+ OPTSET(INP_RECVOPTS);
+ break;
+
+ case IP_RECVRETOPTS:
+ OPTSET(INP_RECVRETOPTS);
+ break;
+
+ case IP_RECVDSTADDR:
+ OPTSET(INP_RECVDSTADDR);
+ break;
+
+ case IP_RECVTTL:
+ OPTSET(INP_RECVTTL);
+ break;
+
+ case IP_RECVIF:
+ OPTSET(INP_RECVIF);
+ break;
+
+ case IP_FAITH:
+ OPTSET(INP_FAITH);
+ break;
+
+ case IP_ONESBCAST:
+ OPTSET(INP_ONESBCAST);
+ break;
+ case IP_DONTFRAG:
+ OPTSET(INP_DONTFRAG);
+ break;
+ case IP_BINDANY:
+ OPTSET(INP_BINDANY);
+ break;
+ }
+ break;
+#undef OPTSET
+
+ /*
+ * Multicast socket options are processed by the in_mcast
+ * module.
+ */
+ case IP_MULTICAST_IF:
+ case IP_MULTICAST_VIF:
+ case IP_MULTICAST_TTL:
+ case IP_MULTICAST_LOOP:
+ case IP_ADD_MEMBERSHIP:
+ case IP_DROP_MEMBERSHIP:
+ case IP_ADD_SOURCE_MEMBERSHIP:
+ case IP_DROP_SOURCE_MEMBERSHIP:
+ case IP_BLOCK_SOURCE:
+ case IP_UNBLOCK_SOURCE:
+ case IP_MSFILTER:
+ case MCAST_JOIN_GROUP:
+ case MCAST_LEAVE_GROUP:
+ case MCAST_JOIN_SOURCE_GROUP:
+ case MCAST_LEAVE_SOURCE_GROUP:
+ case MCAST_BLOCK_SOURCE:
+ case MCAST_UNBLOCK_SOURCE:
+ error = inp_setmoptions(inp, sopt);
+ break;
+
+ case IP_PORTRANGE:
+ error = sooptcopyin(sopt, &optval, sizeof optval,
+ sizeof optval);
+ if (error)
+ break;
+
+ INP_WLOCK(inp);
+ switch (optval) {
+ case IP_PORTRANGE_DEFAULT:
+ inp->inp_flags &= ~(INP_LOWPORT);
+ inp->inp_flags &= ~(INP_HIGHPORT);
+ break;
+
+ case IP_PORTRANGE_HIGH:
+ inp->inp_flags &= ~(INP_LOWPORT);
+ inp->inp_flags |= INP_HIGHPORT;
+ break;
+
+ case IP_PORTRANGE_LOW:
+ inp->inp_flags &= ~(INP_HIGHPORT);
+ inp->inp_flags |= INP_LOWPORT;
+ break;
+
+ default:
+ error = EINVAL;
+ break;
+ }
+ INP_WUNLOCK(inp);
+ break;
+
+#ifdef IPSEC
+ case IP_IPSEC_POLICY:
+ {
+ caddr_t req;
+ struct mbuf *m;
+
+ if ((error = soopt_getm(sopt, &m)) != 0) /* XXX */
+ break;
+ if ((error = soopt_mcopyin(sopt, m)) != 0) /* XXX */
+ break;
+ req = mtod(m, caddr_t);
+ error = ipsec_set_policy(inp, sopt->sopt_name, req,
+ m->m_len, (sopt->sopt_td != NULL) ?
+ sopt->sopt_td->td_ucred : NULL);
+ m_freem(m);
+ break;
+ }
+#endif /* IPSEC */
+
+ default:
+ error = ENOPROTOOPT;
+ break;
+ }
+ break;
+
+ case SOPT_GET:
+ switch (sopt->sopt_name) {
+ case IP_OPTIONS:
+ case IP_RETOPTS:
+ if (inp->inp_options)
+ error = sooptcopyout(sopt,
+ mtod(inp->inp_options,
+ char *),
+ inp->inp_options->m_len);
+ else
+ sopt->sopt_valsize = 0;
+ break;
+
+ case IP_TOS:
+ case IP_TTL:
+ case IP_MINTTL:
+ case IP_RECVOPTS:
+ case IP_RECVRETOPTS:
+ case IP_RECVDSTADDR:
+ case IP_RECVTTL:
+ case IP_RECVIF:
+ case IP_PORTRANGE:
+ case IP_FAITH:
+ case IP_ONESBCAST:
+ case IP_DONTFRAG:
+ case IP_BINDANY:
+ switch (sopt->sopt_name) {
+
+ case IP_TOS:
+ optval = inp->inp_ip_tos;
+ break;
+
+ case IP_TTL:
+ optval = inp->inp_ip_ttl;
+ break;
+
+ case IP_MINTTL:
+ optval = inp->inp_ip_minttl;
+ break;
+
+#define OPTBIT(bit) (inp->inp_flags & bit ? 1 : 0)
+
+ case IP_RECVOPTS:
+ optval = OPTBIT(INP_RECVOPTS);
+ break;
+
+ case IP_RECVRETOPTS:
+ optval = OPTBIT(INP_RECVRETOPTS);
+ break;
+
+ case IP_RECVDSTADDR:
+ optval = OPTBIT(INP_RECVDSTADDR);
+ break;
+
+ case IP_RECVTTL:
+ optval = OPTBIT(INP_RECVTTL);
+ break;
+
+ case IP_RECVIF:
+ optval = OPTBIT(INP_RECVIF);
+ break;
+
+ case IP_PORTRANGE:
+ if (inp->inp_flags & INP_HIGHPORT)
+ optval = IP_PORTRANGE_HIGH;
+ else if (inp->inp_flags & INP_LOWPORT)
+ optval = IP_PORTRANGE_LOW;
+ else
+ optval = 0;
+ break;
+
+ case IP_FAITH:
+ optval = OPTBIT(INP_FAITH);
+ break;
+
+ case IP_ONESBCAST:
+ optval = OPTBIT(INP_ONESBCAST);
+ break;
+ case IP_DONTFRAG:
+ optval = OPTBIT(INP_DONTFRAG);
+ break;
+ case IP_BINDANY:
+ optval = OPTBIT(INP_BINDANY);
+ break;
+ }
+ error = sooptcopyout(sopt, &optval, sizeof optval);
+ break;
+
+ /*
+ * Multicast socket options are processed by the in_mcast
+ * module.
+ */
+ case IP_MULTICAST_IF:
+ case IP_MULTICAST_VIF:
+ case IP_MULTICAST_TTL:
+ case IP_MULTICAST_LOOP:
+ case IP_MSFILTER:
+ error = inp_getmoptions(inp, sopt);
+ break;
+
+#ifdef IPSEC
+ case IP_IPSEC_POLICY:
+ {
+ struct mbuf *m = NULL;
+ caddr_t req = NULL;
+ size_t len = 0;
+
+ if (m != 0) {
+ req = mtod(m, caddr_t);
+ len = m->m_len;
+ }
+ error = ipsec_get_policy(sotoinpcb(so), req, len, &m);
+ if (error == 0)
+ error = soopt_mcopyout(sopt, m); /* XXX */
+ if (error == 0)
+ m_freem(m);
+ break;
+ }
+#endif /* IPSEC */
+
+ default:
+ error = ENOPROTOOPT;
+ break;
+ }
+ break;
+ }
+ return (error);
+}
+
+/*
+ * Routine called from ip_output() to loop back a copy of an IP multicast
+ * packet to the input queue of a specified interface. Note that this
+ * calls the output routine of the loopback "driver", but with an interface
+ * pointer that might NOT be a loopback interface -- evil, but easier than
+ * replicating that code here.
+ */
+static void
+ip_mloopback(struct ifnet *ifp, struct mbuf *m, struct sockaddr_in *dst,
+ int hlen)
+{
+ register struct ip *ip;
+ struct mbuf *copym;
+
+ /*
+ * Make a deep copy of the packet because we're going to
+ * modify the pack in order to generate checksums.
+ */
+ copym = m_dup(m, M_DONTWAIT);
+ if (copym != NULL && (copym->m_flags & M_EXT || copym->m_len < hlen))
+ copym = m_pullup(copym, hlen);
+ if (copym != NULL) {
+ /* If needed, compute the checksum and mark it as valid. */
+ if (copym->m_pkthdr.csum_flags & CSUM_DELAY_DATA) {
+ in_delayed_cksum(copym);
+ copym->m_pkthdr.csum_flags &= ~CSUM_DELAY_DATA;
+ copym->m_pkthdr.csum_flags |=
+ CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
+ copym->m_pkthdr.csum_data = 0xffff;
+ }
+ /*
+ * We don't bother to fragment if the IP length is greater
+ * than the interface's MTU. Can this possibly matter?
+ */
+ ip = mtod(copym, struct ip *);
+ ip->ip_len = htons(ip->ip_len);
+ ip->ip_off = htons(ip->ip_off);
+ ip->ip_sum = 0;
+ ip->ip_sum = in_cksum(copym, hlen);
+#if 1 /* XXX */
+ if (dst->sin_family != AF_INET) {
+ printf("ip_mloopback: bad address family %d\n",
+ dst->sin_family);
+ dst->sin_family = AF_INET;
+ }
+#endif
+ if_simloop(ifp, copym, dst->sin_family, 0);
+ }
+}
diff --git a/rtems/freebsd/netinet/ip_var.h b/rtems/freebsd/netinet/ip_var.h
new file mode 100644
index 00000000..884f1e3c
--- /dev/null
+++ b/rtems/freebsd/netinet/ip_var.h
@@ -0,0 +1,315 @@
+/*-
+ * Copyright (c) 1982, 1986, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * @(#)ip_var.h 8.2 (Berkeley) 1/9/95
+ * $FreeBSD$
+ */
+
+#ifndef _NETINET_IP_VAR_HH_
+#define _NETINET_IP_VAR_HH_
+
+#include <rtems/freebsd/sys/queue.h>
+
+/*
+ * Overlay for ip header used by other protocols (tcp, udp).
+ */
+struct ipovly {
+ u_char ih_x1[9]; /* (unused) */
+ u_char ih_pr; /* protocol */
+ u_short ih_len; /* protocol length */
+ struct in_addr ih_src; /* source internet address */
+ struct in_addr ih_dst; /* destination internet address */
+};
+
+#ifdef _KERNEL
+/*
+ * Ip reassembly queue structure. Each fragment
+ * being reassembled is attached to one of these structures.
+ * They are timed out after ipq_ttl drops to 0, and may also
+ * be reclaimed if memory becomes tight.
+ */
+struct ipq {
+ TAILQ_ENTRY(ipq) ipq_list; /* to other reass headers */
+ u_char ipq_ttl; /* time for reass q to live */
+ u_char ipq_p; /* protocol of this fragment */
+ u_short ipq_id; /* sequence id for reassembly */
+ struct mbuf *ipq_frags; /* to ip headers of fragments */
+ struct in_addr ipq_src,ipq_dst;
+ u_char ipq_nfrags; /* # frags in this packet */
+ struct label *ipq_label; /* MAC label */
+};
+#endif /* _KERNEL */
+
+/*
+ * Structure stored in mbuf in inpcb.ip_options
+ * and passed to ip_output when ip options are in use.
+ * The actual length of the options (including ipopt_dst)
+ * is in m_len.
+ */
+#define MAX_IPOPTLEN 40
+
+struct ipoption {
+ struct in_addr ipopt_dst; /* first-hop dst if source routed */
+ char ipopt_list[MAX_IPOPTLEN]; /* options proper */
+};
+
+/*
+ * Structure attached to inpcb.ip_moptions and
+ * passed to ip_output when IP multicast options are in use.
+ * This structure is lazy-allocated.
+ */
+struct ip_moptions {
+ struct ifnet *imo_multicast_ifp; /* ifp for outgoing multicasts */
+ struct in_addr imo_multicast_addr; /* ifindex/addr on MULTICAST_IF */
+ u_long imo_multicast_vif; /* vif num outgoing multicasts */
+ u_char imo_multicast_ttl; /* TTL for outgoing multicasts */
+ u_char imo_multicast_loop; /* 1 => hear sends if a member */
+ u_short imo_num_memberships; /* no. memberships this socket */
+ u_short imo_max_memberships; /* max memberships this socket */
+ struct in_multi **imo_membership; /* group memberships */
+ struct in_mfilter *imo_mfilters; /* source filters */
+};
+
+struct ipstat {
+ u_long ips_total; /* total packets received */
+ u_long ips_badsum; /* checksum bad */
+ u_long ips_tooshort; /* packet too short */
+ u_long ips_toosmall; /* not enough data */
+ u_long ips_badhlen; /* ip header length < data size */
+ u_long ips_badlen; /* ip length < ip header length */
+ u_long ips_fragments; /* fragments received */
+ u_long ips_fragdropped; /* frags dropped (dups, out of space) */
+ u_long ips_fragtimeout; /* fragments timed out */
+ u_long ips_forward; /* packets forwarded */
+ u_long ips_fastforward; /* packets fast forwarded */
+ u_long ips_cantforward; /* packets rcvd for unreachable dest */
+ u_long ips_redirectsent; /* packets forwarded on same net */
+ u_long ips_noproto; /* unknown or unsupported protocol */
+ u_long ips_delivered; /* datagrams delivered to upper level*/
+ u_long ips_localout; /* total ip packets generated here */
+ u_long ips_odropped; /* lost packets due to nobufs, etc. */
+ u_long ips_reassembled; /* total packets reassembled ok */
+ u_long ips_fragmented; /* datagrams successfully fragmented */
+ u_long ips_ofragments; /* output fragments created */
+ u_long ips_cantfrag; /* don't fragment flag was set, etc. */
+ u_long ips_badoptions; /* error in option processing */
+ u_long ips_noroute; /* packets discarded due to no route */
+ u_long ips_badvers; /* ip version != 4 */
+ u_long ips_rawout; /* total raw ip packets generated */
+ u_long ips_toolong; /* ip length > max ip packet size */
+ u_long ips_notmember; /* multicasts for unregistered grps */
+ u_long ips_nogif; /* no match gif found */
+ u_long ips_badaddr; /* invalid address on header */
+};
+
+#ifdef _KERNEL
+
+#include <rtems/freebsd/net/vnet.h>
+
+/*
+ * In-kernel consumers can use these accessor macros directly to update
+ * stats.
+ */
+#define IPSTAT_ADD(name, val) V_ipstat.name += (val)
+#define IPSTAT_SUB(name, val) V_ipstat.name -= (val)
+#define IPSTAT_INC(name) IPSTAT_ADD(name, 1)
+#define IPSTAT_DEC(name) IPSTAT_SUB(name, 1)
+
+/*
+ * Kernel module consumers must use this accessor macro.
+ */
+void kmod_ipstat_inc(int statnum);
+#define KMOD_IPSTAT_INC(name) \
+ kmod_ipstat_inc(offsetof(struct ipstat, name) / sizeof(u_long))
+void kmod_ipstat_dec(int statnum);
+#define KMOD_IPSTAT_DEC(name) \
+ kmod_ipstat_dec(offsetof(struct ipstat, name) / sizeof(u_long))
+
+/* flags passed to ip_output as last parameter */
+#define IP_FORWARDING 0x1 /* most of ip header exists */
+#define IP_RAWOUTPUT 0x2 /* raw ip header exists */
+#define IP_SENDONES 0x4 /* send all-ones broadcast */
+#define IP_SENDTOIF 0x8 /* send on specific ifnet */
+#define IP_ROUTETOIF SO_DONTROUTE /* 0x10 bypass routing tables */
+#define IP_ALLOWBROADCAST SO_BROADCAST /* 0x20 can send broadcast packets */
+
+/*
+ * mbuf flag used by ip_fastfwd
+ */
+#define M_FASTFWD_OURS M_PROTO1 /* changed dst to local */
+
+#ifdef __NO_STRICT_ALIGNMENT
+#define IP_HDR_ALIGNED_P(ip) 1
+#else
+#define IP_HDR_ALIGNED_P(ip) ((((intptr_t) (ip)) & 3) == 0)
+#endif
+
+struct ip;
+struct inpcb;
+struct route;
+struct sockopt;
+
+VNET_DECLARE(struct ipstat, ipstat);
+VNET_DECLARE(u_short, ip_id); /* ip packet ctr, for ids */
+VNET_DECLARE(int, ip_defttl); /* default IP ttl */
+VNET_DECLARE(int, ipforwarding); /* ip forwarding */
+#ifdef IPSTEALTH
+VNET_DECLARE(int, ipstealth); /* stealth forwarding */
+#endif
+extern u_char ip_protox[];
+VNET_DECLARE(struct socket *, ip_rsvpd); /* reservation protocol daemon*/
+VNET_DECLARE(struct socket *, ip_mrouter); /* multicast routing daemon */
+extern int (*legal_vif_num)(int);
+extern u_long (*ip_mcast_src)(int);
+VNET_DECLARE(int, rsvp_on);
+extern struct pr_usrreqs rip_usrreqs;
+
+#define V_ipstat VNET(ipstat)
+#define V_ip_id VNET(ip_id)
+#define V_ip_defttl VNET(ip_defttl)
+#define V_ipforwarding VNET(ipforwarding)
+#ifdef IPSTEALTH
+#define V_ipstealth VNET(ipstealth)
+#endif
+#define V_ip_rsvpd VNET(ip_rsvpd)
+#define V_ip_mrouter VNET(ip_mrouter)
+#define V_rsvp_on VNET(rsvp_on)
+
+void inp_freemoptions(struct ip_moptions *);
+int inp_getmoptions(struct inpcb *, struct sockopt *);
+int inp_setmoptions(struct inpcb *, struct sockopt *);
+
+int ip_ctloutput(struct socket *, struct sockopt *sopt);
+void ip_drain(void);
+void ip_fini(void *xtp);
+int ip_fragment(struct ip *ip, struct mbuf **m_frag, int mtu,
+ u_long if_hwassist_flags, int sw_csum);
+void ip_forward(struct mbuf *m, int srcrt);
+void ip_init(void);
+#ifdef VIMAGE
+void ip_destroy(void);
+#endif
+extern int
+ (*ip_mforward)(struct ip *, struct ifnet *, struct mbuf *,
+ struct ip_moptions *);
+int ip_output(struct mbuf *,
+ struct mbuf *, struct route *, int, struct ip_moptions *,
+ struct inpcb *);
+int ipproto_register(short);
+int ipproto_unregister(short);
+struct mbuf *
+ ip_reass(struct mbuf *);
+struct in_ifaddr *
+ ip_rtaddr(struct in_addr, u_int fibnum);
+void ip_savecontrol(struct inpcb *, struct mbuf **, struct ip *,
+ struct mbuf *);
+void ip_slowtimo(void);
+u_int16_t ip_randomid(void);
+int rip_ctloutput(struct socket *, struct sockopt *);
+void rip_ctlinput(int, struct sockaddr *, void *);
+void rip_init(void);
+#ifdef VIMAGE
+void rip_destroy(void);
+#endif
+void rip_input(struct mbuf *, int);
+int rip_output(struct mbuf *, struct socket *, u_long);
+void ipip_input(struct mbuf *, int);
+void rsvp_input(struct mbuf *, int);
+int ip_rsvp_init(struct socket *);
+int ip_rsvp_done(void);
+extern int (*ip_rsvp_vif)(struct socket *, struct sockopt *);
+extern void (*ip_rsvp_force_done)(struct socket *);
+extern void (*rsvp_input_p)(struct mbuf *m, int off);
+
+VNET_DECLARE(struct pfil_head, inet_pfil_hook); /* packet filter hooks */
+#define V_inet_pfil_hook VNET(inet_pfil_hook)
+
+void in_delayed_cksum(struct mbuf *m);
+
+/* Hooks for ipfw, dummynet, divert etc. Most are declared in raw_ip.c */
+/*
+ * Reference to an ipfw or packet filter rule that can be carried
+ * outside critical sections.
+ * A rule is identified by rulenum:rule_id which is ordered.
+ * In version chain_id the rule can be found in slot 'slot', so
+ * we don't need a lookup if chain_id == chain->id.
+ *
+ * On exit from the firewall this structure refers to the rule after
+ * the matching one (slot points to the new rule; rulenum:rule_id-1
+ * is the matching rule), and additional info (e.g. info often contains
+ * the insn argument or tablearg in the low 16 bits, in host format).
+ * On entry, the structure is valid if slot>0, and refers to the starting
+ * rules. 'info' contains the reason for reinject, e.g. divert port,
+ * divert direction, and so on.
+ */
+struct ipfw_rule_ref {
+ uint32_t slot; /* slot for matching rule */
+ uint32_t rulenum; /* matching rule number */
+ uint32_t rule_id; /* matching rule id */
+ uint32_t chain_id; /* ruleset id */
+ uint32_t info; /* see below */
+};
+
+enum {
+ IPFW_INFO_MASK = 0x0000ffff,
+ IPFW_INFO_OUT = 0x00000000, /* outgoing, just for convenience */
+ IPFW_INFO_IN = 0x80000000, /* incoming, overloads dir */
+ IPFW_ONEPASS = 0x40000000, /* One-pass, do not reinject */
+ IPFW_IS_MASK = 0x30000000, /* which source ? */
+ IPFW_IS_DIVERT = 0x20000000,
+ IPFW_IS_DUMMYNET =0x10000000,
+ IPFW_IS_PIPE = 0x08000000, /* pip1=1, queue = 0 */
+};
+#define MTAG_IPFW 1148380143 /* IPFW-tagged cookie */
+#define MTAG_IPFW_RULE 1262273568 /* rule reference */
+
+struct ip_fw_args;
+typedef int (*ip_fw_chk_ptr_t)(struct ip_fw_args *args);
+typedef int (*ip_fw_ctl_ptr_t)(struct sockopt *);
+VNET_DECLARE(ip_fw_chk_ptr_t, ip_fw_chk_ptr);
+VNET_DECLARE(ip_fw_ctl_ptr_t, ip_fw_ctl_ptr);
+#define V_ip_fw_chk_ptr VNET(ip_fw_chk_ptr)
+#define V_ip_fw_ctl_ptr VNET(ip_fw_ctl_ptr)
+
+/* Divert hooks. */
+extern void (*ip_divert_ptr)(struct mbuf *m, int incoming);
+/* ng_ipfw hooks -- XXX make it the same as divert and dummynet */
+extern int (*ng_ipfw_input_p)(struct mbuf **, int,
+ struct ip_fw_args *, int);
+
+extern int (*ip_dn_ctl_ptr)(struct sockopt *);
+extern int (*ip_dn_io_ptr)(struct mbuf **, int, struct ip_fw_args *);
+
+VNET_DECLARE(int, ip_do_randomid);
+#define V_ip_do_randomid VNET(ip_do_randomid)
+#define ip_newid() ((V_ip_do_randomid != 0) ? ip_randomid() : \
+ htons(V_ip_id++))
+
+#endif /* _KERNEL */
+
+#endif /* !_NETINET_IP_VAR_HH_ */
diff --git a/rtems/freebsd/netinet/ipfw/dn_heap.c b/rtems/freebsd/netinet/ipfw/dn_heap.c
new file mode 100644
index 00000000..eef52ba1
--- /dev/null
+++ b/rtems/freebsd/netinet/ipfw/dn_heap.c
@@ -0,0 +1,552 @@
+#include <rtems/freebsd/machine/rtems-bsd-config.h>
+
+/*-
+ * Copyright (c) 1998-2002,2010 Luigi Rizzo, Universita` di Pisa
+ * All rights reserved
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+/*
+ * Binary heap and hash tables, used in dummynet
+ *
+ * $FreeBSD$
+ */
+
+#include <rtems/freebsd/sys/cdefs.h>
+#include <rtems/freebsd/sys/param.h>
+#ifdef _KERNEL
+__FBSDID("$FreeBSD$");
+#include <rtems/freebsd/sys/systm.h>
+#include <rtems/freebsd/sys/malloc.h>
+#include <rtems/freebsd/sys/kernel.h>
+#include <rtems/freebsd/netinet/ipfw/dn_heap.h>
+#ifndef log
+#define log(x, arg...)
+#endif
+
+#else /* !_KERNEL */
+
+#include <rtems/freebsd/stdio.h>
+#include <rtems/freebsd/dn_test.h>
+#include <rtems/freebsd/strings.h>
+#include <rtems/freebsd/stdlib.h>
+
+#include "dn_heap.h"
+#define log(x, arg...) fprintf(stderr, ## arg)
+#define panic(x...) fprintf(stderr, ## x), exit(1)
+#define MALLOC_DEFINE(a, b, c)
+static void *my_malloc(int s) { return malloc(s); }
+static void my_free(void *p) { free(p); }
+#define malloc(s, t, w) my_malloc(s)
+#define free(p, t) my_free(p)
+#endif /* !_KERNEL */
+
+MALLOC_DEFINE(M_DN_HEAP, "dummynet", "dummynet heap");
+
+/*
+ * Heap management functions.
+ *
+ * In the heap, first node is element 0. Children of i are 2i+1 and 2i+2.
+ * Some macros help finding parent/children so we can optimize them.
+ *
+ * heap_init() is called to expand the heap when needed.
+ * Increment size in blocks of 16 entries.
+ * Returns 1 on error, 0 on success
+ */
+#define HEAP_FATHER(x) ( ( (x) - 1 ) / 2 )
+#define HEAP_LEFT(x) ( (x)+(x) + 1 )
+#define HEAP_SWAP(a, b, buffer) { buffer = a ; a = b ; b = buffer ; }
+#define HEAP_INCREMENT 15
+
+static int
+heap_resize(struct dn_heap *h, unsigned int new_size)
+{
+ struct dn_heap_entry *p;
+
+ if (h->size >= new_size ) /* have enough room */
+ return 0;
+#if 1 /* round to the next power of 2 */
+ new_size |= new_size >> 1;
+ new_size |= new_size >> 2;
+ new_size |= new_size >> 4;
+ new_size |= new_size >> 8;
+ new_size |= new_size >> 16;
+#else
+ new_size = (new_size + HEAP_INCREMENT ) & ~HEAP_INCREMENT;
+#endif
+ p = malloc(new_size * sizeof(*p), M_DN_HEAP, M_NOWAIT);
+ if (p == NULL) {
+ printf("--- %s, resize %d failed\n", __func__, new_size );
+ return 1; /* error */
+ }
+ if (h->size > 0) {
+ bcopy(h->p, p, h->size * sizeof(*p) );
+ free(h->p, M_DN_HEAP);
+ }
+ h->p = p;
+ h->size = new_size;
+ return 0;
+}
+
+int
+heap_init(struct dn_heap *h, int size, int ofs)
+{
+ if (heap_resize(h, size))
+ return 1;
+ h->elements = 0;
+ h->ofs = ofs;
+ return 0;
+}
+
+/*
+ * Insert element in heap. Normally, p != NULL, we insert p in
+ * a new position and bubble up. If p == NULL, then the element is
+ * already in place, and key is the position where to start the
+ * bubble-up.
+ * Returns 1 on failure (cannot allocate new heap entry)
+ *
+ * If ofs > 0 the position (index, int) of the element in the heap is
+ * also stored in the element itself at the given offset in bytes.
+ */
+#define SET_OFFSET(h, i) do { \
+ if (h->ofs > 0) \
+ *((int32_t *)((char *)(h->p[i].object) + h->ofs)) = i; \
+ } while (0)
+/*
+ * RESET_OFFSET is used for sanity checks. It sets ofs
+ * to an invalid value.
+ */
+#define RESET_OFFSET(h, i) do { \
+ if (h->ofs > 0) \
+ *((int32_t *)((char *)(h->p[i].object) + h->ofs)) = -16; \
+ } while (0)
+
+int
+heap_insert(struct dn_heap *h, uint64_t key1, void *p)
+{
+ int son = h->elements;
+
+ //log("%s key %llu p %p\n", __FUNCTION__, key1, p);
+ if (p == NULL) { /* data already there, set starting point */
+ son = key1;
+ } else { /* insert new element at the end, possibly resize */
+ son = h->elements;
+ if (son == h->size) /* need resize... */
+ // XXX expand by 16 or so
+ if (heap_resize(h, h->elements+16) )
+ return 1; /* failure... */
+ h->p[son].object = p;
+ h->p[son].key = key1;
+ h->elements++;
+ }
+ /* make sure that son >= father along the path */
+ while (son > 0) {
+ int father = HEAP_FATHER(son);
+ struct dn_heap_entry tmp;
+
+ if (DN_KEY_LT( h->p[father].key, h->p[son].key ) )
+ break; /* found right position */
+ /* son smaller than father, swap and repeat */
+ HEAP_SWAP(h->p[son], h->p[father], tmp);
+ SET_OFFSET(h, son);
+ son = father;
+ }
+ SET_OFFSET(h, son);
+ return 0;
+}
+
+/*
+ * remove top element from heap, or obj if obj != NULL
+ */
+void
+heap_extract(struct dn_heap *h, void *obj)
+{
+ int child, father, max = h->elements - 1;
+
+ if (max < 0) {
+ printf("--- %s: empty heap 0x%p\n", __FUNCTION__, h);
+ return;
+ }
+ if (obj == NULL)
+ father = 0; /* default: move up smallest child */
+ else { /* extract specific element, index is at offset */
+ if (h->ofs <= 0)
+ panic("%s: extract from middle not set on %p\n",
+ __FUNCTION__, h);
+ father = *((int *)((char *)obj + h->ofs));
+ if (father < 0 || father >= h->elements) {
+ panic("%s: father %d out of bound 0..%d\n",
+ __FUNCTION__, father, h->elements);
+ }
+ }
+ /*
+ * below, father is the index of the empty element, which
+ * we replace at each step with the smallest child until we
+ * reach the bottom level.
+ */
+ // XXX why removing RESET_OFFSET increases runtime by 10% ?
+ RESET_OFFSET(h, father);
+ while ( (child = HEAP_LEFT(father)) <= max ) {
+ if (child != max &&
+ DN_KEY_LT(h->p[child+1].key, h->p[child].key) )
+ child++; /* take right child, otherwise left */
+ h->p[father] = h->p[child];
+ SET_OFFSET(h, father);
+ father = child;
+ }
+ h->elements--;
+ if (father != max) {
+ /*
+ * Fill hole with last entry and bubble up,
+ * reusing the insert code
+ */
+ h->p[father] = h->p[max];
+ heap_insert(h, father, NULL);
+ }
+}
+
+#if 0
+/*
+ * change object position and update references
+ * XXX this one is never used!
+ */
+static void
+heap_move(struct dn_heap *h, uint64_t new_key, void *object)
+{
+ int temp, i, max = h->elements-1;
+ struct dn_heap_entry *p, buf;
+
+ if (h->ofs <= 0)
+ panic("cannot move items on this heap");
+ p = h->p; /* shortcut */
+
+ i = *((int *)((char *)object + h->ofs));
+ if (DN_KEY_LT(new_key, p[i].key) ) { /* must move up */
+ p[i].key = new_key;
+ for (; i>0 &&
+ DN_KEY_LT(new_key, p[(temp = HEAP_FATHER(i))].key);
+ i = temp ) { /* bubble up */
+ HEAP_SWAP(p[i], p[temp], buf);
+ SET_OFFSET(h, i);
+ }
+ } else { /* must move down */
+ p[i].key = new_key;
+ while ( (temp = HEAP_LEFT(i)) <= max ) {
+ /* found left child */
+ if (temp != max &&
+ DN_KEY_LT(p[temp+1].key, p[temp].key))
+ temp++; /* select child with min key */
+ if (DN_KEY_LT(>p[temp].key, new_key)) {
+ /* go down */
+ HEAP_SWAP(p[i], p[temp], buf);
+ SET_OFFSET(h, i);
+ } else
+ break;
+ i = temp;
+ }
+ }
+ SET_OFFSET(h, i);
+}
+#endif /* heap_move, unused */
+
+/*
+ * heapify() will reorganize data inside an array to maintain the
+ * heap property. It is needed when we delete a bunch of entries.
+ */
+static void
+heapify(struct dn_heap *h)
+{
+ int i;
+
+ for (i = 0; i < h->elements; i++ )
+ heap_insert(h, i , NULL);
+}
+
+int
+heap_scan(struct dn_heap *h, int (*fn)(void *, uintptr_t),
+ uintptr_t arg)
+{
+ int i, ret, found;
+
+ for (i = found = 0 ; i < h->elements ;) {
+ ret = fn(h->p[i].object, arg);
+ if (ret & HEAP_SCAN_DEL) {
+ h->elements-- ;
+ h->p[i] = h->p[h->elements] ;
+ found++ ;
+ } else
+ i++ ;
+ if (ret & HEAP_SCAN_END)
+ break;
+ }
+ if (found)
+ heapify(h);
+ return found;
+}
+
+/*
+ * cleanup the heap and free data structure
+ */
+void
+heap_free(struct dn_heap *h)
+{
+ if (h->size >0 )
+ free(h->p, M_DN_HEAP);
+ bzero(h, sizeof(*h) );
+}
+
+/*
+ * hash table support.
+ */
+
+struct dn_ht {
+ int buckets; /* how many buckets, really buckets - 1*/
+ int entries; /* how many entries */
+ int ofs; /* offset of link field */
+ uint32_t (*hash)(uintptr_t, int, void *arg);
+ int (*match)(void *_el, uintptr_t key, int, void *);
+ void *(*newh)(uintptr_t, int, void *);
+ void **ht; /* bucket heads */
+};
+/*
+ * Initialize, allocating bucket pointers inline.
+ * Recycle previous record if possible.
+ * If the 'newh' function is not supplied, we assume that the
+ * key passed to ht_find is the same object to be stored in.
+ */
+struct dn_ht *
+dn_ht_init(struct dn_ht *ht, int buckets, int ofs,
+ uint32_t (*h)(uintptr_t, int, void *),
+ int (*match)(void *, uintptr_t, int, void *),
+ void *(*newh)(uintptr_t, int, void *))
+{
+ int l;
+
+ /*
+ * Notes about rounding bucket size to a power of two.
+ * Given the original bucket size, we compute the nearest lower and
+ * higher power of two, minus 1 (respectively b_min and b_max) because
+ * this value will be used to do an AND with the index returned
+ * by hash function.
+ * To choice between these two values, the original bucket size is
+ * compared with b_min. If the original size is greater than 4/3 b_min,
+ * we round the bucket size to b_max, else to b_min.
+ * This ratio try to round to the nearest power of two, advantaging
+ * the greater size if the different between two power is relatively
+ * big.
+ * Rounding the bucket size to a power of two avoid the use of
+ * module when calculating the correct bucket.
+ * The ht->buckets variable store the bucket size - 1 to simply
+ * do an AND between the index returned by hash function and ht->bucket
+ * instead of a module.
+ */
+ int b_min; /* min buckets */
+ int b_max; /* max buckets */
+ int b_ori; /* original buckets */
+
+ if (h == NULL || match == NULL) {
+ printf("--- missing hash or match function");
+ return NULL;
+ }
+ if (buckets < 1 || buckets > 65536)
+ return NULL;
+
+ b_ori = buckets;
+ /* calculate next power of 2, - 1*/
+ buckets |= buckets >> 1;
+ buckets |= buckets >> 2;
+ buckets |= buckets >> 4;
+ buckets |= buckets >> 8;
+ buckets |= buckets >> 16;
+
+ b_max = buckets; /* Next power */
+ b_min = buckets >> 1; /* Previous power */
+
+ /* Calculate the 'nearest' bucket size */
+ if (b_min * 4000 / 3000 < b_ori)
+ buckets = b_max;
+ else
+ buckets = b_min;
+
+ if (ht) { /* see if we can reuse */
+ if (buckets <= ht->buckets) {
+ ht->buckets = buckets;
+ } else {
+ /* free pointers if not allocated inline */
+ if (ht->ht != (void *)(ht + 1))
+ free(ht->ht, M_DN_HEAP);
+ free(ht, M_DN_HEAP);
+ ht = NULL;
+ }
+ }
+ if (ht == NULL) {
+ /* Allocate buckets + 1 entries because buckets is use to
+ * do the AND with the index returned by hash function
+ */
+ l = sizeof(*ht) + (buckets + 1) * sizeof(void **);
+ ht = malloc(l, M_DN_HEAP, M_NOWAIT | M_ZERO);
+ }
+ if (ht) {
+ ht->ht = (void **)(ht + 1);
+ ht->buckets = buckets;
+ ht->ofs = ofs;
+ ht->hash = h;
+ ht->match = match;
+ ht->newh = newh;
+ }
+ return ht;
+}
+
+/* dummy callback for dn_ht_free to unlink all */
+static int
+do_del(void *obj, void *arg)
+{
+ return DNHT_SCAN_DEL;
+}
+
+void
+dn_ht_free(struct dn_ht *ht, int flags)
+{
+ if (ht == NULL)
+ return;
+ if (flags & DNHT_REMOVE) {
+ (void)dn_ht_scan(ht, do_del, NULL);
+ } else {
+ if (ht->ht && ht->ht != (void *)(ht + 1))
+ free(ht->ht, M_DN_HEAP);
+ free(ht, M_DN_HEAP);
+ }
+}
+
+int
+dn_ht_entries(struct dn_ht *ht)
+{
+ return ht ? ht->entries : 0;
+}
+
+/* lookup and optionally create or delete element */
+void *
+dn_ht_find(struct dn_ht *ht, uintptr_t key, int flags, void *arg)
+{
+ int i;
+ void **pp, *p;
+
+ if (ht == NULL) /* easy on an empty hash */
+ return NULL;
+ i = (ht->buckets == 1) ? 0 :
+ (ht->hash(key, flags, arg) & ht->buckets);
+
+ for (pp = &ht->ht[i]; (p = *pp); pp = (void **)((char *)p + ht->ofs)) {
+ if (flags & DNHT_MATCH_PTR) {
+ if (key == (uintptr_t)p)
+ break;
+ } else if (ht->match(p, key, flags, arg)) /* found match */
+ break;
+ }
+ if (p) {
+ if (flags & DNHT_REMOVE) {
+ /* link in the next element */
+ *pp = *(void **)((char *)p + ht->ofs);
+ *(void **)((char *)p + ht->ofs) = NULL;
+ ht->entries--;
+ }
+ } else if (flags & DNHT_INSERT) {
+ // printf("%s before calling new, bucket %d ofs %d\n",
+ // __FUNCTION__, i, ht->ofs);
+ p = ht->newh ? ht->newh(key, flags, arg) : (void *)key;
+ // printf("%s newh returns %p\n", __FUNCTION__, p);
+ if (p) {
+ ht->entries++;
+ *(void **)((char *)p + ht->ofs) = ht->ht[i];
+ ht->ht[i] = p;
+ }
+ }
+ return p;
+}
+
+/*
+ * do a scan with the option to delete the object. Extract next before
+ * running the callback because the element may be destroyed there.
+ */
+int
+dn_ht_scan(struct dn_ht *ht, int (*fn)(void *, void *), void *arg)
+{
+ int i, ret, found = 0;
+ void **curp, *cur, *next;
+
+ if (ht == NULL || fn == NULL)
+ return 0;
+ for (i = 0; i <= ht->buckets; i++) {
+ curp = &ht->ht[i];
+ while ( (cur = *curp) != NULL) {
+ next = *(void **)((char *)cur + ht->ofs);
+ ret = fn(cur, arg);
+ if (ret & DNHT_SCAN_DEL) {
+ found++;
+ ht->entries--;
+ *curp = next;
+ } else {
+ curp = (void **)((char *)cur + ht->ofs);
+ }
+ if (ret & DNHT_SCAN_END)
+ return found;
+ }
+ }
+ return found;
+}
+
+/*
+ * Similar to dn_ht_scan(), except thah the scan is performed only
+ * in the bucket 'bucket'. The function returns a correct bucket number if
+ * the original is invalid
+ */
+int
+dn_ht_scan_bucket(struct dn_ht *ht, int *bucket, int (*fn)(void *, void *),
+ void *arg)
+{
+ int i, ret, found = 0;
+ void **curp, *cur, *next;
+
+ if (ht == NULL || fn == NULL)
+ return 0;
+ if (*bucket > ht->buckets)
+ *bucket = 0;
+ i = *bucket;
+
+ curp = &ht->ht[i];
+ while ( (cur = *curp) != NULL) {
+ next = *(void **)((char *)cur + ht->ofs);
+ ret = fn(cur, arg);
+ if (ret & DNHT_SCAN_DEL) {
+ found++;
+ ht->entries--;
+ *curp = next;
+ } else {
+ curp = (void **)((char *)cur + ht->ofs);
+ }
+ if (ret & DNHT_SCAN_END)
+ return found;
+ }
+ return found;
+}
+
diff --git a/rtems/freebsd/netinet/ipfw/dn_heap.h b/rtems/freebsd/netinet/ipfw/dn_heap.h
new file mode 100644
index 00000000..c95473ad
--- /dev/null
+++ b/rtems/freebsd/netinet/ipfw/dn_heap.h
@@ -0,0 +1,191 @@
+/*-
+ * Copyright (c) 1998-2010 Luigi Rizzo, Universita` di Pisa
+ * All rights reserved
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+/*
+ * Binary heap and hash tables, header file
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _IP_DN_HEAP_H
+#define _IP_DN_HEAP_H
+
+#define DN_KEY_LT(a,b) ((int64_t)((a)-(b)) < 0)
+#define DN_KEY_LEQ(a,b) ((int64_t)((a)-(b)) <= 0)
+
+/*
+ * This module implements a binary heap supporting random extraction.
+ *
+ * A heap entry contains an uint64_t key and a pointer to object.
+ * DN_KEY_LT(a,b) returns true if key 'a' is smaller than 'b'
+ *
+ * The heap is a struct dn_heap plus a dynamically allocated
+ * array of dn_heap_entry entries. 'size' represents the size of
+ * the array, 'elements' count entries in use. The topmost
+ * element has the smallest key.
+ * The heap supports ordered insert, and extract from the top.
+ * To extract an object from the middle of the heap, we the object
+ * must reserve an 'int32_t' to store the position of the object
+ * in the heap itself, and the location of this field must be
+ * passed as an argument to heap_init() -- use -1 if the feature
+ * is not used.
+ */
+struct dn_heap_entry {
+ uint64_t key; /* sorting key, smallest comes first */
+ void *object; /* object pointer */
+};
+
+struct dn_heap {
+ int size; /* the size of the array */
+ int elements; /* elements in use */
+ int ofs; /* offset in the object of heap index */
+ struct dn_heap_entry *p; /* array of "size" entries */
+};
+
+enum {
+ HEAP_SCAN_DEL = 1,
+ HEAP_SCAN_END = 2,
+};
+
+/*
+ * heap_init() reinitializes the heap setting the size and the offset
+ * of the index for random extraction (use -1 if not used).
+ * The 'elements' counter is set to 0.
+ *
+ * SET_HEAP_OFS() indicates where, in the object, is stored the index
+ * for random extractions from the heap.
+ *
+ * heap_free() frees the memory associated to a heap.
+ *
+ * heap_insert() adds a key-pointer pair to the heap
+ *
+ * HEAP_TOP() returns a pointer to the top element of the heap,
+ * but makes no checks on its existance (XXX should we change ?)
+ *
+ * heap_extract() removes the entry at the top, returing the pointer.
+ * (the key should have been read before).
+ *
+ * heap_scan() invokes a callback on each entry of the heap.
+ * The callback can return a combination of HEAP_SCAN_DEL and
+ * HEAP_SCAN_END. HEAP_SCAN_DEL means the current element must
+ * be removed, and HEAP_SCAN_END means to terminate the scan.
+ * heap_scan() returns the number of elements removed.
+ * Because the order is not guaranteed, we should use heap_scan()
+ * only as a last resort mechanism.
+ */
+#define HEAP_TOP(h) ((h)->p)
+#define SET_HEAP_OFS(h, n) do { (h)->ofs = n; } while (0)
+int heap_init(struct dn_heap *h, int size, int ofs);
+int heap_insert(struct dn_heap *h, uint64_t key1, void *p);
+void heap_extract(struct dn_heap *h, void *obj);
+void heap_free(struct dn_heap *h);
+int heap_scan(struct dn_heap *, int (*)(void *, uintptr_t), uintptr_t);
+
+/*------------------------------------------------------
+ * This module implements a generic hash table with support for
+ * running callbacks on the entire table. To avoid allocating
+ * memory during hash table operations, objects must reserve
+ * space for a link field. XXX if the heap is moderately full,
+ * an SLIST suffices, and we can tolerate the cost of a hash
+ * computation on each removal.
+ *
+ * dn_ht_init() initializes the table, setting the number of
+ * buckets, the offset of the link field, the main callbacks.
+ * Callbacks are:
+ *
+ * hash(key, flags, arg) called to return a bucket index.
+ * match(obj, key, flags, arg) called to determine if key
+ * matches the current 'obj' in the heap
+ * newh(key, flags, arg) optional, used to allocate a new
+ * object during insertions.
+ *
+ * dn_ht_free() frees the heap or unlink elements.
+ * DNHT_REMOVE unlink elements, 0 frees the heap.
+ * You need two calls to do both.
+ *
+ * dn_ht_find() is the main lookup function, which can also be
+ * used to insert or delete elements in the hash table.
+ * The final 'arg' is passed to all callbacks.
+ *
+ * dn_ht_scan() is used to invoke a callback on all entries of
+ * the heap, or possibly on just one bucket. The callback
+ * is invoked with a pointer to the object, and must return
+ * one of DNHT_SCAN_DEL or DNHT_SCAN_END to request the
+ * removal of the object from the heap and the end of the
+ * scan, respectively.
+ *
+ * dn_ht_scan_bucket() is similar to dn_ht_scan(), except that it scans
+ * only the specific bucket of the table. The bucket is a in-out
+ * parameter and return a valid bucket number if the original
+ * is invalid.
+ *
+ * A combination of flags can be used to modify the operation
+ * of the dn_ht_find(), and of the callbacks:
+ *
+ * DNHT_KEY_IS_OBJ means the key is the object pointer.
+ * It is usally of interest for the hash and match functions.
+ *
+ * DNHT_MATCH_PTR during a lookup, match pointers instead
+ * of calling match(). Normally used when removing specific
+ * entries. Does not imply KEY_IS_OBJ as the latter _is_ used
+ * by the match function.
+ *
+ * DNHT_INSERT insert the element if not found.
+ * Calls new() to allocates a new object unless
+ * DNHT_KEY_IS_OBJ is set.
+ *
+ * DNHT_UNIQUE only insert if object not found.
+ * XXX should it imply DNHT_INSERT ?
+ *
+ * DNHT_REMOVE remove objects if we find them.
+ */
+struct dn_ht; /* should be opaque */
+
+struct dn_ht *dn_ht_init(struct dn_ht *, int buckets, int ofs,
+ uint32_t (*hash)(uintptr_t, int, void *),
+ int (*match)(void *, uintptr_t, int, void *),
+ void *(*newh)(uintptr_t, int, void *));
+void dn_ht_free(struct dn_ht *, int flags);
+
+void *dn_ht_find(struct dn_ht *, uintptr_t, int, void *);
+int dn_ht_scan(struct dn_ht *, int (*)(void *, void *), void *);
+int dn_ht_scan_bucket(struct dn_ht *, int * , int (*)(void *, void *), void *);
+int dn_ht_entries(struct dn_ht *);
+
+enum { /* flags values.
+ * first two are returned by the scan callback to indicate
+ * to delete the matching element or to end the scan
+ */
+ DNHT_SCAN_DEL = 0x0001,
+ DNHT_SCAN_END = 0x0002,
+ DNHT_KEY_IS_OBJ = 0x0004, /* key is the obj pointer */
+ DNHT_MATCH_PTR = 0x0008, /* match by pointer, not match() */
+ DNHT_INSERT = 0x0010, /* insert if not found */
+ DNHT_UNIQUE = 0x0020, /* report error if already there */
+ DNHT_REMOVE = 0x0040, /* remove on find or dn_ht_free */
+};
+
+#endif /* _IP_DN_HEAP_H */
diff --git a/rtems/freebsd/netinet/ipfw/dn_sched.h b/rtems/freebsd/netinet/ipfw/dn_sched.h
new file mode 100644
index 00000000..fe54b020
--- /dev/null
+++ b/rtems/freebsd/netinet/ipfw/dn_sched.h
@@ -0,0 +1,189 @@
+/*
+ * Copyright (c) 2010 Riccardo Panicucci, Luigi Rizzo, Universita` di Pisa
+ * All rights reserved
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+/*
+ * The API to write a packet scheduling algorithm for dummynet.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _DN_SCHED_H
+#define _DN_SCHED_H
+
+#define DN_MULTIQUEUE 0x01
+/*
+ * Descriptor for a scheduling algorithm.
+ * Contains all function pointers for a given scheduler
+ * This is typically created when a module is loaded, and stored
+ * in a global list of schedulers.
+ */
+struct dn_alg {
+ uint32_t type; /* the scheduler type */
+ const char *name; /* scheduler name */
+ uint32_t flags; /* DN_MULTIQUEUE if supports multiple queues */
+
+ /*
+ * The following define the size of 3 optional data structures
+ * that may need to be allocated at runtime, and are appended
+ * to each of the base data structures: scheduler, sched.inst,
+ * and queue. We don't have a per-flowset structure.
+ */
+ /* + parameters attached to the template, e.g.
+ * default queue sizes, weights, quantum size, and so on;
+ */
+ size_t schk_datalen;
+
+ /* + per-instance parameters, such as timestamps,
+ * containers for queues, etc;
+ */
+ size_t si_datalen;
+
+ size_t q_datalen; /* per-queue parameters (e.g. S,F) */
+
+ /*
+ * Methods implemented by the scheduler:
+ * enqueue enqueue packet 'm' on scheduler 's', queue 'q'.
+ * q is NULL for !MULTIQUEUE.
+ * Return 0 on success, 1 on drop (packet consumed anyways).
+ * Note that q should be interpreted only as a hint
+ * on the flow that the mbuf belongs to: while a
+ * scheduler will normally enqueue m into q, it is ok
+ * to leave q alone and put the mbuf elsewhere.
+ * This function is called in two cases:
+ * - when a new packet arrives to the scheduler;
+ * - when a scheduler is reconfigured. In this case the
+ * call is issued by the new_queue callback, with a
+ * non empty queue (q) and m pointing to the first
+ * mbuf in the queue. For this reason, the function
+ * should internally check for (m != q->mq.head)
+ * before calling dn_enqueue().
+ *
+ * dequeue Called when scheduler instance 's' can
+ * dequeue a packet. Return NULL if none are available.
+ * XXX what about non work-conserving ?
+ *
+ * config called on 'sched X config ...', normally writes
+ * in the area of size sch_arg
+ *
+ * destroy called on 'sched delete', frees everything
+ * in sch_arg (other parts are handled by more specific
+ * functions)
+ *
+ * new_sched called when a new instance is created, e.g.
+ * to create the local queue for !MULTIQUEUE, set V or
+ * copy parameters for WFQ, and so on.
+ *
+ * free_sched called when deleting an instance, cleans
+ * extra data in the per-instance area.
+ *
+ * new_fsk called when a flowset is linked to a scheduler,
+ * e.g. to validate parameters such as weights etc.
+ * free_fsk when a flowset is unlinked from a scheduler.
+ * (probably unnecessary)
+ *
+ * new_queue called to set the per-queue parameters,
+ * e.g. S and F, adjust sum of weights in the parent, etc.
+ *
+ * The new_queue callback is normally called from when
+ * creating a new queue. In some cases (such as a
+ * scheduler change or reconfiguration) it can be called
+ * with a non empty queue. In this case, the queue
+ * In case of non empty queue, the new_queue callback could
+ * need to call the enqueue function. In this case,
+ * the callback should eventually call enqueue() passing
+ * as m the first element in the queue.
+ *
+ * free_queue actions related to a queue removal, e.g. undo
+ * all the above. If the queue has data in it, also remove
+ * from the scheduler. This can e.g. happen during a reconfigure.
+ */
+ int (*enqueue)(struct dn_sch_inst *, struct dn_queue *,
+ struct mbuf *);
+ struct mbuf * (*dequeue)(struct dn_sch_inst *);
+
+ int (*config)(struct dn_schk *);
+ int (*destroy)(struct dn_schk*);
+ int (*new_sched)(struct dn_sch_inst *);
+ int (*free_sched)(struct dn_sch_inst *);
+ int (*new_fsk)(struct dn_fsk *f);
+ int (*free_fsk)(struct dn_fsk *f);
+ int (*new_queue)(struct dn_queue *q);
+ int (*free_queue)(struct dn_queue *q);
+
+ /* run-time fields */
+ int ref_count; /* XXX number of instances in the system */
+ SLIST_ENTRY(dn_alg) next; /* Next scheduler in the list */
+};
+
+/* MSVC does not support initializers so we need this ugly macro */
+#ifdef _WIN32
+#define _SI(fld)
+#else
+#define _SI(fld) fld
+#endif
+
+/*
+ * Additionally, dummynet exports some functions and macros
+ * to be used by schedulers:
+ */
+
+void dn_free_pkts(struct mbuf *mnext);
+int dn_enqueue(struct dn_queue *q, struct mbuf* m, int drop);
+/* bound a variable between min and max */
+int ipdn_bound_var(int *v, int dflt, int lo, int hi, const char *msg);
+
+/*
+ * Extract the head of a queue, update stats. Must be the very last
+ * thing done on a dequeue as the queue itself may go away.
+ */
+static __inline struct mbuf*
+dn_dequeue(struct dn_queue *q)
+{
+ struct mbuf *m = q->mq.head;
+ if (m == NULL)
+ return NULL;
+ q->mq.head = m->m_nextpkt;
+ q->ni.length--;
+ q->ni.len_bytes -= m->m_pkthdr.len;
+ if (q->_si) {
+ q->_si->ni.length--;
+ q->_si->ni.len_bytes -= m->m_pkthdr.len;
+ }
+ if (q->ni.length == 0) /* queue is now idle */
+ q->q_time = dn_cfg.curr_time;
+ return m;
+}
+
+int dn_sched_modevent(module_t mod, int cmd, void *arg);
+
+#define DECLARE_DNSCHED_MODULE(name, dnsched) \
+ static moduledata_t name##_mod = { \
+ #name, dn_sched_modevent, dnsched \
+ }; \
+ DECLARE_MODULE(name, name##_mod, \
+ SI_SUB_PROTO_IFATTACHDOMAIN, SI_ORDER_ANY); \
+ MODULE_DEPEND(name, dummynet, 3, 3, 3);
+#endif /* _DN_SCHED_H */
diff --git a/rtems/freebsd/netinet/ipfw/dn_sched_fifo.c b/rtems/freebsd/netinet/ipfw/dn_sched_fifo.c
new file mode 100644
index 00000000..041d9afb
--- /dev/null
+++ b/rtems/freebsd/netinet/ipfw/dn_sched_fifo.c
@@ -0,0 +1,122 @@
+#include <rtems/freebsd/machine/rtems-bsd-config.h>
+
+/*
+ * Copyright (c) 2010 Riccardo Panicucci, Universita` di Pisa
+ * All rights reserved
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+/*
+ * $FreeBSD$
+ */
+
+#ifdef _KERNEL
+#include <rtems/freebsd/sys/malloc.h>
+#include <rtems/freebsd/sys/socket.h>
+#include <rtems/freebsd/sys/socketvar.h>
+#include <rtems/freebsd/sys/kernel.h>
+#include <rtems/freebsd/sys/mbuf.h>
+#include <rtems/freebsd/sys/module.h>
+#include <rtems/freebsd/net/if.h> /* IFNAMSIZ */
+#include <rtems/freebsd/netinet/in.h>
+#include <rtems/freebsd/netinet/ip_var.h> /* ipfw_rule_ref */
+#include <rtems/freebsd/netinet/ip_fw.h> /* flow_id */
+#include <rtems/freebsd/netinet/ip_dummynet.h>
+#include <rtems/freebsd/netinet/ipfw/dn_heap.h>
+#include <rtems/freebsd/netinet/ipfw/ip_dn_private.h>
+#include <rtems/freebsd/netinet/ipfw/dn_sched.h>
+#else
+#include <rtems/freebsd/dn_test.h>
+#endif
+
+/*
+ * This file implements a FIFO scheduler for a single queue.
+ * The queue is allocated as part of the scheduler instance,
+ * and there is a single flowset is in the template which stores
+ * queue size and policy.
+ * Enqueue and dequeue use the default library functions.
+ */
+static int
+fifo_enqueue(struct dn_sch_inst *si, struct dn_queue *q, struct mbuf *m)
+{
+ /* XXX if called with q != NULL and m=NULL, this is a
+ * re-enqueue from an existing scheduler, which we should
+ * handle.
+ */
+ return dn_enqueue((struct dn_queue *)(si+1), m, 0);
+}
+
+static struct mbuf *
+fifo_dequeue(struct dn_sch_inst *si)
+{
+ return dn_dequeue((struct dn_queue *)(si + 1));
+}
+
+static int
+fifo_new_sched(struct dn_sch_inst *si)
+{
+ /* This scheduler instance contains the queue */
+ struct dn_queue *q = (struct dn_queue *)(si + 1);
+
+ set_oid(&q->ni.oid, DN_QUEUE, sizeof(*q));
+ q->_si = si;
+ q->fs = si->sched->fs;
+ return 0;
+}
+
+static int
+fifo_free_sched(struct dn_sch_inst *si)
+{
+ struct dn_queue *q = (struct dn_queue *)(si + 1);
+ dn_free_pkts(q->mq.head);
+ bzero(q, sizeof(*q));
+ return 0;
+}
+
+/*
+ * FIFO scheduler descriptor
+ * contains the type of the scheduler, the name, the size of extra
+ * data structures, and function pointers.
+ */
+static struct dn_alg fifo_desc = {
+ _SI( .type = ) DN_SCHED_FIFO,
+ _SI( .name = ) "FIFO",
+ _SI( .flags = ) 0,
+
+ _SI( .schk_datalen = ) 0,
+ _SI( .si_datalen = ) sizeof(struct dn_queue),
+ _SI( .q_datalen = ) 0,
+
+ _SI( .enqueue = ) fifo_enqueue,
+ _SI( .dequeue = ) fifo_dequeue,
+ _SI( .config = ) NULL,
+ _SI( .destroy = ) NULL,
+ _SI( .new_sched = ) fifo_new_sched,
+ _SI( .free_sched = ) fifo_free_sched,
+ _SI( .new_fsk = ) NULL,
+ _SI( .free_fsk = ) NULL,
+ _SI( .new_queue = ) NULL,
+ _SI( .free_queue = ) NULL,
+};
+
+DECLARE_DNSCHED_MODULE(dn_fifo, &fifo_desc);
diff --git a/rtems/freebsd/netinet/ipfw/dn_sched_prio.c b/rtems/freebsd/netinet/ipfw/dn_sched_prio.c
new file mode 100644
index 00000000..df91f8a0
--- /dev/null
+++ b/rtems/freebsd/netinet/ipfw/dn_sched_prio.c
@@ -0,0 +1,231 @@
+#include <rtems/freebsd/machine/rtems-bsd-config.h>
+
+/*
+ * Copyright (c) 2010 Riccardo Panicucci, Universita` di Pisa
+ * All rights reserved
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+/*
+ * $FreeBSD$
+ */
+#ifdef _KERNEL
+#include <rtems/freebsd/sys/malloc.h>
+#include <rtems/freebsd/sys/socket.h>
+#include <rtems/freebsd/sys/socketvar.h>
+#include <rtems/freebsd/sys/kernel.h>
+#include <rtems/freebsd/sys/mbuf.h>
+#include <rtems/freebsd/sys/module.h>
+#include <rtems/freebsd/net/if.h> /* IFNAMSIZ */
+#include <rtems/freebsd/netinet/in.h>
+#include <rtems/freebsd/netinet/ip_var.h> /* ipfw_rule_ref */
+#include <rtems/freebsd/netinet/ip_fw.h> /* flow_id */
+#include <rtems/freebsd/netinet/ip_dummynet.h>
+#include <rtems/freebsd/netinet/ipfw/dn_heap.h>
+#include <rtems/freebsd/netinet/ipfw/ip_dn_private.h>
+#include <rtems/freebsd/netinet/ipfw/dn_sched.h>
+#else
+#include <rtems/freebsd/dn_test.h>
+#endif
+
+#define DN_SCHED_PRIO 5 //XXX
+
+#if !defined(_KERNEL) || !defined(__linux__)
+#define test_bit(ix, pData) ((*pData) & (1<<(ix)))
+#define __set_bit(ix, pData) (*pData) |= (1<<(ix))
+#define __clear_bit(ix, pData) (*pData) &= ~(1<<(ix))
+#endif
+
+#ifdef __MIPSEL__
+#define __clear_bit(ix, pData) (*pData) &= ~(1<<(ix))
+#endif
+
+/* Size of the array of queues pointers. */
+#define BITMAP_T unsigned long
+#define MAXPRIO (sizeof(BITMAP_T) * 8)
+
+/*
+ * The scheduler instance contains an array of pointers to queues,
+ * one for each priority, and a bitmap listing backlogged queues.
+ */
+struct prio_si {
+ BITMAP_T bitmap; /* array bitmap */
+ struct dn_queue *q_array[MAXPRIO]; /* Array of queues pointers */
+};
+
+/*
+ * If a queue with the same priority is already backlogged, use
+ * that one instead of the queue passed as argument.
+ */
+static int
+prio_enqueue(struct dn_sch_inst *_si, struct dn_queue *q, struct mbuf *m)
+{
+ struct prio_si *si = (struct prio_si *)(_si + 1);
+ int prio = q->fs->fs.par[0];
+
+ if (test_bit(prio, &si->bitmap) == 0) {
+ /* No queue with this priority, insert */
+ __set_bit(prio, &si->bitmap);
+ si->q_array[prio] = q;
+ } else { /* use the existing queue */
+ q = si->q_array[prio];
+ }
+ if (dn_enqueue(q, m, 0))
+ return 1;
+ return 0;
+}
+
+/*
+ * Packets are dequeued only from the highest priority queue.
+ * The function ffs() return the lowest bit in the bitmap that rapresent
+ * the array index (-1) which contains the pointer to the highest priority
+ * queue.
+ * After the dequeue, if this queue become empty, it is index is removed
+ * from the bitmap.
+ * Scheduler is idle if the bitmap is empty
+ *
+ * NOTE: highest priority is 0, lowest is sched->max_prio_q
+ */
+static struct mbuf *
+prio_dequeue(struct dn_sch_inst *_si)
+{
+ struct prio_si *si = (struct prio_si *)(_si + 1);
+ struct mbuf *m;
+ struct dn_queue *q;
+ int prio;
+
+ if (si->bitmap == 0) /* scheduler idle */
+ return NULL;
+
+ prio = ffs(si->bitmap) - 1;
+
+ /* Take the highest priority queue in the scheduler */
+ q = si->q_array[prio];
+ // assert(q)
+
+ m = dn_dequeue(q);
+ if (q->mq.head == NULL) {
+ /* Queue is now empty, remove from scheduler
+ * and mark it
+ */
+ si->q_array[prio] = NULL;
+ __clear_bit(prio, &si->bitmap);
+ }
+ return m;
+}
+
+static int
+prio_new_sched(struct dn_sch_inst *_si)
+{
+ struct prio_si *si = (struct prio_si *)(_si + 1);
+
+ bzero(si->q_array, sizeof(si->q_array));
+ si->bitmap = 0;
+
+ return 0;
+}
+
+static int
+prio_new_fsk(struct dn_fsk *fs)
+{
+ /* Check if the prioritiy is between 0 and MAXPRIO-1 */
+ ipdn_bound_var(&fs->fs.par[0], 0, 0, MAXPRIO - 1, "PRIO priority");
+ return 0;
+}
+
+static int
+prio_new_queue(struct dn_queue *q)
+{
+ struct prio_si *si = (struct prio_si *)(q->_si + 1);
+ int prio = q->fs->fs.par[0];
+ struct dn_queue *oldq;
+
+ q->ni.oid.subtype = DN_SCHED_PRIO;
+
+ if (q->mq.head == NULL)
+ return 0;
+
+ /* Queue already full, must insert in the scheduler or append
+ * mbufs to existing queue. This partly duplicates prio_enqueue
+ */
+ if (test_bit(prio, &si->bitmap) == 0) {
+ /* No queue with this priority, insert */
+ __set_bit(prio, &si->bitmap);
+ si->q_array[prio] = q;
+ } else if ( (oldq = si->q_array[prio]) != q) {
+ /* must append to the existing queue.
+ * can simply append q->mq.head to q2->...
+ * and add the counters to those of q2
+ */
+ oldq->mq.tail->m_nextpkt = q->mq.head;
+ oldq->mq.tail = q->mq.tail;
+ oldq->ni.length += q->ni.length;
+ q->ni.length = 0;
+ oldq->ni.len_bytes += q->ni.len_bytes;
+ q->ni.len_bytes = 0;
+ q->mq.tail = q->mq.head = NULL;
+ }
+ return 0;
+}
+
+static int
+prio_free_queue(struct dn_queue *q)
+{
+ int prio = q->fs->fs.par[0];
+ struct prio_si *si = (struct prio_si *)(q->_si + 1);
+
+ if (si->q_array[prio] == q) {
+ si->q_array[prio] = NULL;
+ __clear_bit(prio, &si->bitmap);
+ }
+ return 0;
+}
+
+
+static struct dn_alg prio_desc = {
+ _SI( .type = ) DN_SCHED_PRIO,
+ _SI( .name = ) "PRIO",
+ _SI( .flags = ) DN_MULTIQUEUE,
+
+ /* we need extra space in the si and the queue */
+ _SI( .schk_datalen = ) 0,
+ _SI( .si_datalen = ) sizeof(struct prio_si),
+ _SI( .q_datalen = ) 0,
+
+ _SI( .enqueue = ) prio_enqueue,
+ _SI( .dequeue = ) prio_dequeue,
+
+ _SI( .config = ) NULL,
+ _SI( .destroy = ) NULL,
+ _SI( .new_sched = ) prio_new_sched,
+ _SI( .free_sched = ) NULL,
+
+ _SI( .new_fsk = ) prio_new_fsk,
+ _SI( .free_fsk = ) NULL,
+
+ _SI( .new_queue = ) prio_new_queue,
+ _SI( .free_queue = ) prio_free_queue,
+};
+
+
+DECLARE_DNSCHED_MODULE(dn_prio, &prio_desc);
diff --git a/rtems/freebsd/netinet/ipfw/dn_sched_qfq.c b/rtems/freebsd/netinet/ipfw/dn_sched_qfq.c
new file mode 100644
index 00000000..e7a3c98b
--- /dev/null
+++ b/rtems/freebsd/netinet/ipfw/dn_sched_qfq.c
@@ -0,0 +1,866 @@
+#include <rtems/freebsd/machine/rtems-bsd-config.h>
+
+/*
+ * Copyright (c) 2010 Fabio Checconi, Luigi Rizzo, Paolo Valente
+ * All rights reserved
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+/*
+ * $FreeBSD$
+ */
+
+#ifdef _KERNEL
+#include <rtems/freebsd/sys/malloc.h>
+#include <rtems/freebsd/sys/socket.h>
+#include <rtems/freebsd/sys/socketvar.h>
+#include <rtems/freebsd/sys/kernel.h>
+#include <rtems/freebsd/sys/mbuf.h>
+#include <rtems/freebsd/sys/module.h>
+#include <rtems/freebsd/net/if.h> /* IFNAMSIZ */
+#include <rtems/freebsd/netinet/in.h>
+#include <rtems/freebsd/netinet/ip_var.h> /* ipfw_rule_ref */
+#include <rtems/freebsd/netinet/ip_fw.h> /* flow_id */
+#include <rtems/freebsd/netinet/ip_dummynet.h>
+#include <rtems/freebsd/netinet/ipfw/dn_heap.h>
+#include <rtems/freebsd/netinet/ipfw/ip_dn_private.h>
+#include <rtems/freebsd/netinet/ipfw/dn_sched.h>
+#else
+#include <rtems/freebsd/dn_test.h>
+#endif
+
+#ifdef QFQ_DEBUG
+struct qfq_sched;
+static void dump_sched(struct qfq_sched *q, const char *msg);
+#define NO(x) x
+#else
+#define NO(x)
+#endif
+#define DN_SCHED_QFQ 4 // XXX Where?
+typedef unsigned long bitmap;
+
+/*
+ * bitmaps ops are critical. Some linux versions have __fls
+ * and the bitmap ops. Some machines have ffs
+ */
+#if defined(_WIN32)
+int fls(unsigned int n)
+{
+ int i = 0;
+ for (i = 0; n > 0; n >>= 1, i++)
+ ;
+ return i;
+}
+#endif
+
+#if !defined(_KERNEL) || defined( __FreeBSD__ ) || defined(_WIN32)
+static inline unsigned long __fls(unsigned long word)
+{
+ return fls(word) - 1;
+}
+#endif
+
+#if !defined(_KERNEL) || !defined(__linux__)
+#ifdef QFQ_DEBUG
+int test_bit(int ix, bitmap *p)
+{
+ if (ix < 0 || ix > 31)
+ D("bad index %d", ix);
+ return *p & (1<<ix);
+}
+void __set_bit(int ix, bitmap *p)
+{
+ if (ix < 0 || ix > 31)
+ D("bad index %d", ix);
+ *p |= (1<<ix);
+}
+void __clear_bit(int ix, bitmap *p)
+{
+ if (ix < 0 || ix > 31)
+ D("bad index %d", ix);
+ *p &= ~(1<<ix);
+}
+#else /* !QFQ_DEBUG */
+/* XXX do we have fast version, or leave it to the compiler ? */
+#define test_bit(ix, pData) ((*pData) & (1<<(ix)))
+#define __set_bit(ix, pData) (*pData) |= (1<<(ix))
+#define __clear_bit(ix, pData) (*pData) &= ~(1<<(ix))
+#endif /* !QFQ_DEBUG */
+#endif /* !__linux__ */
+
+#ifdef __MIPSEL__
+#define __clear_bit(ix, pData) (*pData) &= ~(1<<(ix))
+#endif
+
+/*-------------------------------------------*/
+/*
+
+Virtual time computations.
+
+S, F and V are all computed in fixed point arithmetic with
+FRAC_BITS decimal bits.
+
+ QFQ_MAX_INDEX is the maximum index allowed for a group. We need
+ one bit per index.
+ QFQ_MAX_WSHIFT is the maximum power of two supported as a weight.
+ The layout of the bits is as below:
+
+ [ MTU_SHIFT ][ FRAC_BITS ]
+ [ MAX_INDEX ][ MIN_SLOT_SHIFT ]
+ ^.__grp->index = 0
+ *.__grp->slot_shift
+
+ where MIN_SLOT_SHIFT is derived by difference from the others.
+
+The max group index corresponds to Lmax/w_min, where
+Lmax=1<<MTU_SHIFT, w_min = 1 .
+From this, and knowing how many groups (MAX_INDEX) we want,
+we can derive the shift corresponding to each group.
+
+Because we often need to compute
+ F = S + len/w_i and V = V + len/wsum
+instead of storing w_i store the value
+ inv_w = (1<<FRAC_BITS)/w_i
+so we can do F = S + len * inv_w * wsum.
+We use W_TOT in the formulas so we can easily move between
+static and adaptive weight sum.
+
+The per-scheduler-instance data contain all the data structures
+for the scheduler: bitmaps and bucket lists.
+
+ */
+/*
+ * Maximum number of consecutive slots occupied by backlogged classes
+ * inside a group. This is approx lmax/lmin + 5.
+ * XXX check because it poses constraints on MAX_INDEX
+ */
+#define QFQ_MAX_SLOTS 32
+/*
+ * Shifts used for class<->group mapping. Class weights are
+ * in the range [1, QFQ_MAX_WEIGHT], we to map each class i to the
+ * group with the smallest index that can support the L_i / r_i
+ * configured for the class.
+ *
+ * grp->index is the index of the group; and grp->slot_shift
+ * is the shift for the corresponding (scaled) sigma_i.
+ *
+ * When computing the group index, we do (len<<FP_SHIFT)/weight,
+ * then compute an FLS (which is like a log2()), and if the result
+ * is below the MAX_INDEX region we use 0 (which is the same as
+ * using a larger len).
+ */
+#define QFQ_MAX_INDEX 19
+#define QFQ_MAX_WSHIFT 16 /* log2(max_weight) */
+
+#define QFQ_MAX_WEIGHT (1<<QFQ_MAX_WSHIFT)
+#define QFQ_MAX_WSUM (2*QFQ_MAX_WEIGHT)
+//#define IWSUM (q->i_wsum)
+#define IWSUM ((1<<FRAC_BITS)/QFQ_MAX_WSUM)
+
+#define FRAC_BITS 30 /* fixed point arithmetic */
+#define ONE_FP (1UL << FRAC_BITS)
+
+#define QFQ_MTU_SHIFT 11 /* log2(max_len) */
+#define QFQ_MIN_SLOT_SHIFT (FRAC_BITS + QFQ_MTU_SHIFT - QFQ_MAX_INDEX)
+
+/*
+ * Possible group states, also indexes for the bitmaps array in
+ * struct qfq_queue. We rely on ER, IR, EB, IB being numbered 0..3
+ */
+enum qfq_state { ER, IR, EB, IB, QFQ_MAX_STATE };
+
+struct qfq_group;
+/*
+ * additional queue info. Some of this info should come from
+ * the flowset, we copy them here for faster processing.
+ * This is an overlay of the struct dn_queue
+ */
+struct qfq_class {
+ struct dn_queue _q;
+ uint64_t S, F; /* flow timestamps (exact) */
+ struct qfq_class *next; /* Link for the slot list. */
+
+ /* group we belong to. In principle we would need the index,
+ * which is log_2(lmax/weight), but we never reference it
+ * directly, only the group.
+ */
+ struct qfq_group *grp;
+
+ /* these are copied from the flowset. */
+ uint32_t inv_w; /* ONE_FP/weight */
+ uint32_t lmax; /* Max packet size for this flow. */
+};
+
+/* Group descriptor, see the paper for details.
+ * Basically this contains the bucket lists
+ */
+struct qfq_group {
+ uint64_t S, F; /* group timestamps (approx). */
+ unsigned int slot_shift; /* Slot shift. */
+ unsigned int index; /* Group index. */
+ unsigned int front; /* Index of the front slot. */
+ bitmap full_slots; /* non-empty slots */
+
+ /* Array of lists of active classes. */
+ struct qfq_class *slots[QFQ_MAX_SLOTS];
+};
+
+/* scheduler instance descriptor. */
+struct qfq_sched {
+ uint64_t V; /* Precise virtual time. */
+ uint32_t wsum; /* weight sum */
+ NO(uint32_t i_wsum; /* ONE_FP/w_sum */
+ uint32_t _queued; /* debugging */
+ uint32_t loops; /* debugging */)
+ bitmap bitmaps[QFQ_MAX_STATE]; /* Group bitmaps. */
+ struct qfq_group groups[QFQ_MAX_INDEX + 1]; /* The groups. */
+};
+
+/*---- support functions ----------------------------*/
+
+/* Generic comparison function, handling wraparound. */
+static inline int qfq_gt(uint64_t a, uint64_t b)
+{
+ return (int64_t)(a - b) > 0;
+}
+
+/* Round a precise timestamp to its slotted value. */
+static inline uint64_t qfq_round_down(uint64_t ts, unsigned int shift)
+{
+ return ts & ~((1ULL << shift) - 1);
+}
+
+/* return the pointer to the group with lowest index in the bitmap */
+static inline struct qfq_group *qfq_ffs(struct qfq_sched *q,
+ unsigned long bitmap)
+{
+ int index = ffs(bitmap) - 1; // zero-based
+ return &q->groups[index];
+}
+
+/*
+ * Calculate a flow index, given its weight and maximum packet length.
+ * index = log_2(maxlen/weight) but we need to apply the scaling.
+ * This is used only once at flow creation.
+ */
+static int qfq_calc_index(uint32_t inv_w, unsigned int maxlen)
+{
+ uint64_t slot_size = (uint64_t)maxlen *inv_w;
+ unsigned long size_map;
+ int index = 0;
+
+ size_map = (unsigned long)(slot_size >> QFQ_MIN_SLOT_SHIFT);
+ if (!size_map)
+ goto out;
+
+ index = __fls(size_map) + 1; // basically a log_2()
+ index -= !(slot_size - (1ULL << (index + QFQ_MIN_SLOT_SHIFT - 1)));
+
+ if (index < 0)
+ index = 0;
+
+out:
+ ND("W = %d, L = %d, I = %d\n", ONE_FP/inv_w, maxlen, index);
+ return index;
+}
+/*---- end support functions ----*/
+
+/*-------- API calls --------------------------------*/
+/*
+ * Validate and copy parameters from flowset.
+ */
+static int
+qfq_new_queue(struct dn_queue *_q)
+{
+ struct qfq_sched *q = (struct qfq_sched *)(_q->_si + 1);
+ struct qfq_class *cl = (struct qfq_class *)_q;
+ int i;
+ uint32_t w; /* approximated weight */
+
+ /* import parameters from the flowset. They should be correct
+ * already.
+ */
+ w = _q->fs->fs.par[0];
+ cl->lmax = _q->fs->fs.par[1];
+ if (!w || w > QFQ_MAX_WEIGHT) {
+ w = 1;
+ D("rounding weight to 1");
+ }
+ cl->inv_w = ONE_FP/w;
+ w = ONE_FP/cl->inv_w;
+ if (q->wsum + w > QFQ_MAX_WSUM)
+ return EINVAL;
+
+ i = qfq_calc_index(cl->inv_w, cl->lmax);
+ cl->grp = &q->groups[i];
+ q->wsum += w;
+ // XXX cl->S = q->V; ?
+ // XXX compute q->i_wsum
+ return 0;
+}
+
+/* remove an empty queue */
+static int
+qfq_free_queue(struct dn_queue *_q)
+{
+ struct qfq_sched *q = (struct qfq_sched *)(_q->_si + 1);
+ struct qfq_class *cl = (struct qfq_class *)_q;
+ if (cl->inv_w) {
+ q->wsum -= ONE_FP/cl->inv_w;
+ cl->inv_w = 0; /* reset weight to avoid run twice */
+ }
+ return 0;
+}
+
+/* Calculate a mask to mimic what would be ffs_from(). */
+static inline unsigned long
+mask_from(unsigned long bitmap, int from)
+{
+ return bitmap & ~((1UL << from) - 1);
+}
+
+/*
+ * The state computation relies on ER=0, IR=1, EB=2, IB=3
+ * First compute eligibility comparing grp->S, q->V,
+ * then check if someone is blocking us and possibly add EB
+ */
+static inline unsigned int
+qfq_calc_state(struct qfq_sched *q, struct qfq_group *grp)
+{
+ /* if S > V we are not eligible */
+ unsigned int state = qfq_gt(grp->S, q->V);
+ unsigned long mask = mask_from(q->bitmaps[ER], grp->index);
+ struct qfq_group *next;
+
+ if (mask) {
+ next = qfq_ffs(q, mask);
+ if (qfq_gt(grp->F, next->F))
+ state |= EB;
+ }
+
+ return state;
+}
+
+/*
+ * In principle
+ * q->bitmaps[dst] |= q->bitmaps[src] & mask;
+ * q->bitmaps[src] &= ~mask;
+ * but we should make sure that src != dst
+ */
+static inline void
+qfq_move_groups(struct qfq_sched *q, unsigned long mask, int src, int dst)
+{
+ q->bitmaps[dst] |= q->bitmaps[src] & mask;
+ q->bitmaps[src] &= ~mask;
+}
+
+static inline void
+qfq_unblock_groups(struct qfq_sched *q, int index, uint64_t old_finish)
+{
+ unsigned long mask = mask_from(q->bitmaps[ER], index + 1);
+ struct qfq_group *next;
+
+ if (mask) {
+ next = qfq_ffs(q, mask);
+ if (!qfq_gt(next->F, old_finish))
+ return;
+ }
+
+ mask = (1UL << index) - 1;
+ qfq_move_groups(q, mask, EB, ER);
+ qfq_move_groups(q, mask, IB, IR);
+}
+
+/*
+ * perhaps
+ *
+ old_V ^= q->V;
+ old_V >>= QFQ_MIN_SLOT_SHIFT;
+ if (old_V) {
+ ...
+ }
+ *
+ */
+static inline void
+qfq_make_eligible(struct qfq_sched *q, uint64_t old_V)
+{
+ unsigned long mask, vslot, old_vslot;
+
+ vslot = q->V >> QFQ_MIN_SLOT_SHIFT;
+ old_vslot = old_V >> QFQ_MIN_SLOT_SHIFT;
+
+ if (vslot != old_vslot) {
+ mask = (2UL << (__fls(vslot ^ old_vslot))) - 1;
+ qfq_move_groups(q, mask, IR, ER);
+ qfq_move_groups(q, mask, IB, EB);
+ }
+}
+
+/*
+ * XXX we should make sure that slot becomes less than 32.
+ * This is guaranteed by the input values.
+ * roundedS is always cl->S rounded on grp->slot_shift bits.
+ */
+static inline void
+qfq_slot_insert(struct qfq_group *grp, struct qfq_class *cl, uint64_t roundedS)
+{
+ uint64_t slot = (roundedS - grp->S) >> grp->slot_shift;
+ unsigned int i = (grp->front + slot) % QFQ_MAX_SLOTS;
+
+ cl->next = grp->slots[i];
+ grp->slots[i] = cl;
+ __set_bit(slot, &grp->full_slots);
+}
+
+/*
+ * remove the entry from the slot
+ */
+static inline void
+qfq_front_slot_remove(struct qfq_group *grp)
+{
+ struct qfq_class **h = &grp->slots[grp->front];
+
+ *h = (*h)->next;
+ if (!*h)
+ __clear_bit(0, &grp->full_slots);
+}
+
+/*
+ * Returns the first full queue in a group. As a side effect,
+ * adjust the bucket list so the first non-empty bucket is at
+ * position 0 in full_slots.
+ */
+static inline struct qfq_class *
+qfq_slot_scan(struct qfq_group *grp)
+{
+ int i;
+
+ ND("grp %d full %x", grp->index, grp->full_slots);
+ if (!grp->full_slots)
+ return NULL;
+
+ i = ffs(grp->full_slots) - 1; // zero-based
+ if (i > 0) {
+ grp->front = (grp->front + i) % QFQ_MAX_SLOTS;
+ grp->full_slots >>= i;
+ }
+
+ return grp->slots[grp->front];
+}
+
+/*
+ * adjust the bucket list. When the start time of a group decreases,
+ * we move the index down (modulo QFQ_MAX_SLOTS) so we don't need to
+ * move the objects. The mask of occupied slots must be shifted
+ * because we use ffs() to find the first non-empty slot.
+ * This covers decreases in the group's start time, but what about
+ * increases of the start time ?
+ * Here too we should make sure that i is less than 32
+ */
+static inline void
+qfq_slot_rotate(struct qfq_sched *q, struct qfq_group *grp, uint64_t roundedS)
+{
+ unsigned int i = (grp->S - roundedS) >> grp->slot_shift;
+
+ grp->full_slots <<= i;
+ grp->front = (grp->front - i) % QFQ_MAX_SLOTS;
+}
+
+
+static inline void
+qfq_update_eligible(struct qfq_sched *q, uint64_t old_V)
+{
+ bitmap ineligible;
+
+ ineligible = q->bitmaps[IR] | q->bitmaps[IB];
+ if (ineligible) {
+ if (!q->bitmaps[ER]) {
+ struct qfq_group *grp;
+ grp = qfq_ffs(q, ineligible);
+ if (qfq_gt(grp->S, q->V))
+ q->V = grp->S;
+ }
+ qfq_make_eligible(q, old_V);
+ }
+}
+
+/*
+ * Updates the class, returns true if also the group needs to be updated.
+ */
+static inline int
+qfq_update_class(struct qfq_sched *q, struct qfq_group *grp,
+ struct qfq_class *cl)
+{
+
+ cl->S = cl->F;
+ if (cl->_q.mq.head == NULL) {
+ qfq_front_slot_remove(grp);
+ } else {
+ unsigned int len;
+ uint64_t roundedS;
+
+ len = cl->_q.mq.head->m_pkthdr.len;
+ cl->F = cl->S + (uint64_t)len * cl->inv_w;
+ roundedS = qfq_round_down(cl->S, grp->slot_shift);
+ if (roundedS == grp->S)
+ return 0;
+
+ qfq_front_slot_remove(grp);
+ qfq_slot_insert(grp, cl, roundedS);
+ }
+ return 1;
+}
+
+static struct mbuf *
+qfq_dequeue(struct dn_sch_inst *si)
+{
+ struct qfq_sched *q = (struct qfq_sched *)(si + 1);
+ struct qfq_group *grp;
+ struct qfq_class *cl;
+ struct mbuf *m;
+ uint64_t old_V;
+
+ NO(q->loops++;)
+ if (!q->bitmaps[ER]) {
+ NO(if (q->queued)
+ dump_sched(q, "start dequeue");)
+ return NULL;
+ }
+
+ grp = qfq_ffs(q, q->bitmaps[ER]);
+
+ cl = grp->slots[grp->front];
+ /* extract from the first bucket in the bucket list */
+ m = dn_dequeue(&cl->_q);
+
+ if (!m) {
+ D("BUG/* non-workconserving leaf */");
+ return NULL;
+ }
+ NO(q->queued--;)
+ old_V = q->V;
+ q->V += (uint64_t)m->m_pkthdr.len * IWSUM;
+ ND("m is %p F 0x%llx V now 0x%llx", m, cl->F, q->V);
+
+ if (qfq_update_class(q, grp, cl)) {
+ uint64_t old_F = grp->F;
+ cl = qfq_slot_scan(grp);
+ if (!cl) { /* group gone, remove from ER */
+ __clear_bit(grp->index, &q->bitmaps[ER]);
+ // grp->S = grp->F + 1; // XXX debugging only
+ } else {
+ uint64_t roundedS = qfq_round_down(cl->S, grp->slot_shift);
+ unsigned int s;
+
+ if (grp->S == roundedS)
+ goto skip_unblock;
+ grp->S = roundedS;
+ grp->F = roundedS + (2ULL << grp->slot_shift);
+ /* remove from ER and put in the new set */
+ __clear_bit(grp->index, &q->bitmaps[ER]);
+ s = qfq_calc_state(q, grp);
+ __set_bit(grp->index, &q->bitmaps[s]);
+ }
+ /* we need to unblock even if the group has gone away */
+ qfq_unblock_groups(q, grp->index, old_F);
+ }
+
+skip_unblock:
+ qfq_update_eligible(q, old_V);
+ NO(if (!q->bitmaps[ER] && q->queued)
+ dump_sched(q, "end dequeue");)
+
+ return m;
+}
+
+/*
+ * Assign a reasonable start time for a new flow k in group i.
+ * Admissible values for \hat(F) are multiples of \sigma_i
+ * no greater than V+\sigma_i . Larger values mean that
+ * we had a wraparound so we consider the timestamp to be stale.
+ *
+ * If F is not stale and F >= V then we set S = F.
+ * Otherwise we should assign S = V, but this may violate
+ * the ordering in ER. So, if we have groups in ER, set S to
+ * the F_j of the first group j which would be blocking us.
+ * We are guaranteed not to move S backward because
+ * otherwise our group i would still be blocked.
+ */
+static inline void
+qfq_update_start(struct qfq_sched *q, struct qfq_class *cl)
+{
+ unsigned long mask;
+ uint32_t limit, roundedF;
+ int slot_shift = cl->grp->slot_shift;
+
+ roundedF = qfq_round_down(cl->F, slot_shift);
+ limit = qfq_round_down(q->V, slot_shift) + (1UL << slot_shift);
+
+ if (!qfq_gt(cl->F, q->V) || qfq_gt(roundedF, limit)) {
+ /* timestamp was stale */
+ mask = mask_from(q->bitmaps[ER], cl->grp->index);
+ if (mask) {
+ struct qfq_group *next = qfq_ffs(q, mask);
+ if (qfq_gt(roundedF, next->F)) {
+ cl->S = next->F;
+ return;
+ }
+ }
+ cl->S = q->V;
+ } else { /* timestamp is not stale */
+ cl->S = cl->F;
+ }
+}
+
+static int
+qfq_enqueue(struct dn_sch_inst *si, struct dn_queue *_q, struct mbuf *m)
+{
+ struct qfq_sched *q = (struct qfq_sched *)(si + 1);
+ struct qfq_group *grp;
+ struct qfq_class *cl = (struct qfq_class *)_q;
+ uint64_t roundedS;
+ int s;
+
+ NO(q->loops++;)
+ DX(4, "len %d flow %p inv_w 0x%x grp %d", m->m_pkthdr.len,
+ _q, cl->inv_w, cl->grp->index);
+ /* XXX verify that the packet obeys the parameters */
+ if (m != _q->mq.head) {
+ if (dn_enqueue(_q, m, 0)) /* packet was dropped */
+ return 1;
+ NO(q->queued++;)
+ if (m != _q->mq.head)
+ return 0;
+ }
+ /* If reach this point, queue q was idle */
+ grp = cl->grp;
+ qfq_update_start(q, cl); /* adjust start time */
+ /* compute new finish time and rounded start. */
+ cl->F = cl->S + (uint64_t)(m->m_pkthdr.len) * cl->inv_w;
+ roundedS = qfq_round_down(cl->S, grp->slot_shift);
+
+ /*
+ * insert cl in the correct bucket.
+ * If cl->S >= grp->S we don't need to adjust the
+ * bucket list and simply go to the insertion phase.
+ * Otherwise grp->S is decreasing, we must make room
+ * in the bucket list, and also recompute the group state.
+ * Finally, if there were no flows in this group and nobody
+ * was in ER make sure to adjust V.
+ */
+ if (grp->full_slots) {
+ if (!qfq_gt(grp->S, cl->S))
+ goto skip_update;
+ /* create a slot for this cl->S */
+ qfq_slot_rotate(q, grp, roundedS);
+ /* group was surely ineligible, remove */
+ __clear_bit(grp->index, &q->bitmaps[IR]);
+ __clear_bit(grp->index, &q->bitmaps[IB]);
+ } else if (!q->bitmaps[ER] && qfq_gt(roundedS, q->V))
+ q->V = roundedS;
+
+ grp->S = roundedS;
+ grp->F = roundedS + (2ULL << grp->slot_shift); // i.e. 2\sigma_i
+ s = qfq_calc_state(q, grp);
+ __set_bit(grp->index, &q->bitmaps[s]);
+ ND("new state %d 0x%x", s, q->bitmaps[s]);
+ ND("S %llx F %llx V %llx", cl->S, cl->F, q->V);
+skip_update:
+ qfq_slot_insert(grp, cl, roundedS);
+
+ return 0;
+}
+
+
+#if 0
+static inline void
+qfq_slot_remove(struct qfq_sched *q, struct qfq_group *grp,
+ struct qfq_class *cl, struct qfq_class **pprev)
+{
+ unsigned int i, offset;
+ uint64_t roundedS;
+
+ roundedS = qfq_round_down(cl->S, grp->slot_shift);
+ offset = (roundedS - grp->S) >> grp->slot_shift;
+ i = (grp->front + offset) % QFQ_MAX_SLOTS;
+
+#ifdef notyet
+ if (!pprev) {
+ pprev = &grp->slots[i];
+ while (*pprev && *pprev != cl)
+ pprev = &(*pprev)->next;
+ }
+#endif
+
+ *pprev = cl->next;
+ if (!grp->slots[i])
+ __clear_bit(offset, &grp->full_slots);
+}
+
+/*
+ * called to forcibly destroy a queue.
+ * If the queue is not in the front bucket, or if it has
+ * other queues in the front bucket, we can simply remove
+ * the queue with no other side effects.
+ * Otherwise we must propagate the event up.
+ * XXX description to be completed.
+ */
+static void
+qfq_deactivate_class(struct qfq_sched *q, struct qfq_class *cl,
+ struct qfq_class **pprev)
+{
+ struct qfq_group *grp = &q->groups[cl->index];
+ unsigned long mask;
+ uint64_t roundedS;
+ int s;
+
+ cl->F = cl->S; // not needed if the class goes away.
+ qfq_slot_remove(q, grp, cl, pprev);
+
+ if (!grp->full_slots) {
+ /* nothing left in the group, remove from all sets.
+ * Do ER last because if we were blocking other groups
+ * we must unblock them.
+ */
+ __clear_bit(grp->index, &q->bitmaps[IR]);
+ __clear_bit(grp->index, &q->bitmaps[EB]);
+ __clear_bit(grp->index, &q->bitmaps[IB]);
+
+ if (test_bit(grp->index, &q->bitmaps[ER]) &&
+ !(q->bitmaps[ER] & ~((1UL << grp->index) - 1))) {
+ mask = q->bitmaps[ER] & ((1UL << grp->index) - 1);
+ if (mask)
+ mask = ~((1UL << __fls(mask)) - 1);
+ else
+ mask = ~0UL;
+ qfq_move_groups(q, mask, EB, ER);
+ qfq_move_groups(q, mask, IB, IR);
+ }
+ __clear_bit(grp->index, &q->bitmaps[ER]);
+ } else if (!grp->slots[grp->front]) {
+ cl = qfq_slot_scan(grp);
+ roundedS = qfq_round_down(cl->S, grp->slot_shift);
+ if (grp->S != roundedS) {
+ __clear_bit(grp->index, &q->bitmaps[ER]);
+ __clear_bit(grp->index, &q->bitmaps[IR]);
+ __clear_bit(grp->index, &q->bitmaps[EB]);
+ __clear_bit(grp->index, &q->bitmaps[IB]);
+ grp->S = roundedS;
+ grp->F = roundedS + (2ULL << grp->slot_shift);
+ s = qfq_calc_state(q, grp);
+ __set_bit(grp->index, &q->bitmaps[s]);
+ }
+ }
+ qfq_update_eligible(q, q->V);
+}
+#endif
+
+static int
+qfq_new_fsk(struct dn_fsk *f)
+{
+ ipdn_bound_var(&f->fs.par[0], 1, 1, QFQ_MAX_WEIGHT, "qfq weight");
+ ipdn_bound_var(&f->fs.par[1], 1500, 1, 2000, "qfq maxlen");
+ ND("weight %d len %d\n", f->fs.par[0], f->fs.par[1]);
+ return 0;
+}
+
+/*
+ * initialize a new scheduler instance
+ */
+static int
+qfq_new_sched(struct dn_sch_inst *si)
+{
+ struct qfq_sched *q = (struct qfq_sched *)(si + 1);
+ struct qfq_group *grp;
+ int i;
+
+ for (i = 0; i <= QFQ_MAX_INDEX; i++) {
+ grp = &q->groups[i];
+ grp->index = i;
+ grp->slot_shift = QFQ_MTU_SHIFT + FRAC_BITS -
+ (QFQ_MAX_INDEX - i);
+ }
+ return 0;
+}
+
+/*
+ * QFQ scheduler descriptor
+ */
+static struct dn_alg qfq_desc = {
+ _SI( .type = ) DN_SCHED_QFQ,
+ _SI( .name = ) "QFQ",
+ _SI( .flags = ) DN_MULTIQUEUE,
+
+ _SI( .schk_datalen = ) 0,
+ _SI( .si_datalen = ) sizeof(struct qfq_sched),
+ _SI( .q_datalen = ) sizeof(struct qfq_class) - sizeof(struct dn_queue),
+
+ _SI( .enqueue = ) qfq_enqueue,
+ _SI( .dequeue = ) qfq_dequeue,
+
+ _SI( .config = ) NULL,
+ _SI( .destroy = ) NULL,
+ _SI( .new_sched = ) qfq_new_sched,
+ _SI( .free_sched = ) NULL,
+ _SI( .new_fsk = ) qfq_new_fsk,
+ _SI( .free_fsk = ) NULL,
+ _SI( .new_queue = ) qfq_new_queue,
+ _SI( .free_queue = ) qfq_free_queue,
+};
+
+DECLARE_DNSCHED_MODULE(dn_qfq, &qfq_desc);
+
+#ifdef QFQ_DEBUG
+static void
+dump_groups(struct qfq_sched *q, uint32_t mask)
+{
+ int i, j;
+
+ for (i = 0; i < QFQ_MAX_INDEX + 1; i++) {
+ struct qfq_group *g = &q->groups[i];
+
+ if (0 == (mask & (1<<i)))
+ continue;
+ for (j = 0; j < QFQ_MAX_SLOTS; j++) {
+ if (g->slots[j])
+ D(" bucket %d %p", j, g->slots[j]);
+ }
+ D("full_slots 0x%x", g->full_slots);
+ D(" %2d S 0x%20llx F 0x%llx %c", i,
+ g->S, g->F,
+ mask & (1<<i) ? '1' : '0');
+ }
+}
+
+static void
+dump_sched(struct qfq_sched *q, const char *msg)
+{
+ D("--- in %s: ---", msg);
+ ND("loops %d queued %d V 0x%llx", q->loops, q->queued, q->V);
+ D(" ER 0x%08x", q->bitmaps[ER]);
+ D(" EB 0x%08x", q->bitmaps[EB]);
+ D(" IR 0x%08x", q->bitmaps[IR]);
+ D(" IB 0x%08x", q->bitmaps[IB]);
+ dump_groups(q, 0xffffffff);
+};
+#endif /* QFQ_DEBUG */
diff --git a/rtems/freebsd/netinet/ipfw/dn_sched_rr.c b/rtems/freebsd/netinet/ipfw/dn_sched_rr.c
new file mode 100644
index 00000000..ef0ba342
--- /dev/null
+++ b/rtems/freebsd/netinet/ipfw/dn_sched_rr.c
@@ -0,0 +1,309 @@
+#include <rtems/freebsd/machine/rtems-bsd-config.h>
+
+/*
+ * Copyright (c) 2010 Riccardo Panicucci, Universita` di Pisa
+ * All rights reserved
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+/*
+ * $FreeBSD$
+ */
+
+#ifdef _KERNEL
+#include <rtems/freebsd/sys/malloc.h>
+#include <rtems/freebsd/sys/socket.h>
+#include <rtems/freebsd/sys/socketvar.h>
+#include <rtems/freebsd/sys/kernel.h>
+#include <rtems/freebsd/sys/mbuf.h>
+#include <rtems/freebsd/sys/module.h>
+#include <rtems/freebsd/net/if.h> /* IFNAMSIZ */
+#include <rtems/freebsd/netinet/in.h>
+#include <rtems/freebsd/netinet/ip_var.h> /* ipfw_rule_ref */
+#include <rtems/freebsd/netinet/ip_fw.h> /* flow_id */
+#include <rtems/freebsd/netinet/ip_dummynet.h>
+#include <rtems/freebsd/netinet/ipfw/dn_heap.h>
+#include <rtems/freebsd/netinet/ipfw/ip_dn_private.h>
+#include <rtems/freebsd/netinet/ipfw/dn_sched.h>
+#else
+#include <rtems/freebsd/dn_test.h>
+#endif
+
+#define DN_SCHED_RR 3 // XXX Where?
+
+struct rr_queue {
+ struct dn_queue q; /* Standard queue */
+ int status; /* 1: queue is in the list */
+ int credit; /* Number of bytes to transmit */
+ int quantum; /* quantum * C */
+ struct rr_queue *qnext; /* */
+};
+
+/* struct rr_schk contains global config parameters
+ * and is right after dn_schk
+ */
+struct rr_schk {
+ int min_q; /* Min quantum */
+ int max_q; /* Max quantum */
+ int q_bytes; /* Bytes per quantum */
+};
+
+/* per-instance round robin list, right after dn_sch_inst */
+struct rr_si {
+ struct rr_queue *head, *tail; /* Pointer to current queue */
+};
+
+/* Append a queue to the rr list */
+static inline void
+rr_append(struct rr_queue *q, struct rr_si *si)
+{
+ q->status = 1; /* mark as in-rr_list */
+ q->credit = q->quantum; /* initialize credit */
+
+ /* append to the tail */
+ if (si->head == NULL)
+ si->head = q;
+ else
+ si->tail->qnext = q;
+ si->tail = q; /* advance the tail pointer */
+ q->qnext = si->head; /* make it circular */
+}
+
+/* Remove the head queue from circular list. */
+static inline void
+rr_remove_head(struct rr_si *si)
+{
+ if (si->head == NULL)
+ return; /* empty queue */
+ si->head->status = 0;
+
+ if (si->head == si->tail) {
+ si->head = si->tail = NULL;
+ return;
+ }
+
+ si->head = si->head->qnext;
+ si->tail->qnext = si->head;
+}
+
+/* Remove a queue from circular list.
+ * XXX see if ti can be merge with remove_queue()
+ */
+static inline void
+remove_queue_q(struct rr_queue *q, struct rr_si *si)
+{
+ struct rr_queue *prev;
+
+ if (q->status != 1)
+ return;
+ if (q == si->head) {
+ rr_remove_head(si);
+ return;
+ }
+
+ for (prev = si->head; prev; prev = prev->qnext) {
+ if (prev->qnext != q)
+ continue;
+ prev->qnext = q->qnext;
+ if (q == si->tail)
+ si->tail = prev;
+ q->status = 0;
+ break;
+ }
+}
+
+
+static inline void
+next_pointer(struct rr_si *si)
+{
+ if (si->head == NULL)
+ return; /* empty queue */
+
+ si->head = si->head->qnext;
+ si->tail = si->tail->qnext;
+}
+
+static int
+rr_enqueue(struct dn_sch_inst *_si, struct dn_queue *q, struct mbuf *m)
+{
+ struct rr_si *si;
+ struct rr_queue *rrq;
+
+ if (m != q->mq.head) {
+ if (dn_enqueue(q, m, 0)) /* packet was dropped */
+ return 1;
+ if (m != q->mq.head)
+ return 0;
+ }
+
+ /* If reach this point, queue q was idle */
+ si = (struct rr_si *)(_si + 1);
+ rrq = (struct rr_queue *)q;
+
+ if (rrq->status == 1) /* Queue is already in the queue list */
+ return 0;
+
+ /* Insert the queue in the queue list */
+ rr_append(rrq, si);
+
+ return 0;
+}
+
+static struct mbuf *
+rr_dequeue(struct dn_sch_inst *_si)
+{
+ /* Access scheduler instance private data */
+ struct rr_si *si = (struct rr_si *)(_si + 1);
+ struct rr_queue *rrq;
+ uint64_t len;
+
+ while ( (rrq = si->head) ) {
+ struct mbuf *m = rrq->q.mq.head;
+ if ( m == NULL) {
+ /* empty queue, remove from list */
+ rr_remove_head(si);
+ continue;
+ }
+ len = m->m_pkthdr.len;
+
+ if (len > rrq->credit) {
+ /* Packet too big */
+ rrq->credit += rrq->quantum;
+ /* Try next queue */
+ next_pointer(si);
+ } else {
+ rrq->credit -= len;
+ return dn_dequeue(&rrq->q);
+ }
+ }
+
+ /* no packet to dequeue*/
+ return NULL;
+}
+
+static int
+rr_config(struct dn_schk *_schk)
+{
+ struct rr_schk *schk = (struct rr_schk *)(_schk + 1);
+ ND("called");
+
+ /* use reasonable quantums (64..2k bytes, default 1500) */
+ schk->min_q = 64;
+ schk->max_q = 2048;
+ schk->q_bytes = 1500; /* quantum */
+
+ return 0;
+}
+
+static int
+rr_new_sched(struct dn_sch_inst *_si)
+{
+ struct rr_si *si = (struct rr_si *)(_si + 1);
+
+ ND("called");
+ si->head = si->tail = NULL;
+
+ return 0;
+}
+
+static int
+rr_free_sched(struct dn_sch_inst *_si)
+{
+ ND("called");
+ /* Nothing to do? */
+ return 0;
+}
+
+static int
+rr_new_fsk(struct dn_fsk *fs)
+{
+ struct rr_schk *schk = (struct rr_schk *)(fs->sched + 1);
+ /* par[0] is the weight, par[1] is the quantum step */
+ ipdn_bound_var(&fs->fs.par[0], 1,
+ 1, 65536, "RR weight");
+ ipdn_bound_var(&fs->fs.par[1], schk->q_bytes,
+ schk->min_q, schk->max_q, "RR quantum");
+ return 0;
+}
+
+static int
+rr_new_queue(struct dn_queue *_q)
+{
+ struct rr_queue *q = (struct rr_queue *)_q;
+
+ _q->ni.oid.subtype = DN_SCHED_RR;
+
+ q->quantum = _q->fs->fs.par[0] * _q->fs->fs.par[1];
+ ND("called, q->quantum %d", q->quantum);
+ q->credit = q->quantum;
+ q->status = 0;
+
+ if (_q->mq.head != NULL) {
+ /* Queue NOT empty, insert in the queue list */
+ rr_append(q, (struct rr_si *)(_q->_si + 1));
+ }
+ return 0;
+}
+
+static int
+rr_free_queue(struct dn_queue *_q)
+{
+ struct rr_queue *q = (struct rr_queue *)_q;
+
+ ND("called");
+ if (q->status == 1) {
+ struct rr_si *si = (struct rr_si *)(_q->_si + 1);
+ remove_queue_q(q, si);
+ }
+ return 0;
+}
+
+/*
+ * RR scheduler descriptor
+ * contains the type of the scheduler, the name, the size of the
+ * structures and function pointers.
+ */
+static struct dn_alg rr_desc = {
+ _SI( .type = ) DN_SCHED_RR,
+ _SI( .name = ) "RR",
+ _SI( .flags = ) DN_MULTIQUEUE,
+
+ _SI( .schk_datalen = ) 0,
+ _SI( .si_datalen = ) sizeof(struct rr_si),
+ _SI( .q_datalen = ) sizeof(struct rr_queue) - sizeof(struct dn_queue),
+
+ _SI( .enqueue = ) rr_enqueue,
+ _SI( .dequeue = ) rr_dequeue,
+
+ _SI( .config = ) rr_config,
+ _SI( .destroy = ) NULL,
+ _SI( .new_sched = ) rr_new_sched,
+ _SI( .free_sched = ) rr_free_sched,
+ _SI( .new_fsk = ) rr_new_fsk,
+ _SI( .free_fsk = ) NULL,
+ _SI( .new_queue = ) rr_new_queue,
+ _SI( .free_queue = ) rr_free_queue,
+};
+
+
+DECLARE_DNSCHED_MODULE(dn_rr, &rr_desc);
diff --git a/rtems/freebsd/netinet/ipfw/dn_sched_wf2q.c b/rtems/freebsd/netinet/ipfw/dn_sched_wf2q.c
new file mode 100644
index 00000000..8393e8c9
--- /dev/null
+++ b/rtems/freebsd/netinet/ipfw/dn_sched_wf2q.c
@@ -0,0 +1,375 @@
+#include <rtems/freebsd/machine/rtems-bsd-config.h>
+
+/*
+ * Copyright (c) 2010 Riccardo Panicucci, Universita` di Pisa
+ * Copyright (c) 2000-2002 Luigi Rizzo, Universita` di Pisa
+ * All rights reserved
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+/*
+ * $FreeBSD$
+ */
+
+#ifdef _KERNEL
+#include <rtems/freebsd/sys/malloc.h>
+#include <rtems/freebsd/sys/socket.h>
+#include <rtems/freebsd/sys/socketvar.h>
+#include <rtems/freebsd/sys/kernel.h>
+#include <rtems/freebsd/sys/mbuf.h>
+#include <rtems/freebsd/sys/module.h>
+#include <rtems/freebsd/net/if.h> /* IFNAMSIZ */
+#include <rtems/freebsd/netinet/in.h>
+#include <rtems/freebsd/netinet/ip_var.h> /* ipfw_rule_ref */
+#include <rtems/freebsd/netinet/ip_fw.h> /* flow_id */
+#include <rtems/freebsd/netinet/ip_dummynet.h>
+#include <rtems/freebsd/netinet/ipfw/dn_heap.h>
+#include <rtems/freebsd/netinet/ipfw/ip_dn_private.h>
+#include <rtems/freebsd/netinet/ipfw/dn_sched.h>
+#else
+#include <rtems/freebsd/dn_test.h>
+#endif
+
+#ifndef MAX64
+#define MAX64(x,y) (( (int64_t) ( (y)-(x) )) > 0 ) ? (y) : (x)
+#endif
+
+/*
+ * timestamps are computed on 64 bit using fixed point arithmetic.
+ * LMAX_BITS, WMAX_BITS are the max number of bits for the packet len
+ * and sum of weights, respectively. FRAC_BITS is the number of
+ * fractional bits. We want FRAC_BITS >> WMAX_BITS to avoid too large
+ * errors when computing the inverse, FRAC_BITS < 32 so we can do 1/w
+ * using an unsigned 32-bit division, and to avoid wraparounds we need
+ * LMAX_BITS + WMAX_BITS + FRAC_BITS << 64
+ * As an example
+ * FRAC_BITS = 26, LMAX_BITS=14, WMAX_BITS = 19
+ */
+#ifndef FRAC_BITS
+#define FRAC_BITS 28 /* shift for fixed point arithmetic */
+#define ONE_FP (1UL << FRAC_BITS)
+#endif
+
+/*
+ * Private information for the scheduler instance:
+ * sch_heap (key is Finish time) returns the next queue to serve
+ * ne_heap (key is Start time) stores not-eligible queues
+ * idle_heap (key=start/finish time) stores idle flows. It must
+ * support extract-from-middle.
+ * A flow is only in 1 of the three heaps.
+ * XXX todo: use a more efficient data structure, e.g. a tree sorted
+ * by F with min_subtree(S) in each node
+ */
+struct wf2qp_si {
+ struct dn_heap sch_heap; /* top extract - key Finish time */
+ struct dn_heap ne_heap; /* top extract - key Start time */
+ struct dn_heap idle_heap; /* random extract - key Start=Finish time */
+ uint64_t V; /* virtual time */
+ uint32_t inv_wsum; /* inverse of sum of weights */
+ uint32_t wsum; /* sum of weights */
+};
+
+struct wf2qp_queue {
+ struct dn_queue _q;
+ uint64_t S, F; /* start time, finish time */
+ uint32_t inv_w; /* ONE_FP / weight */
+ int32_t heap_pos; /* position (index) of struct in heap */
+};
+
+/*
+ * This file implements a WF2Q+ scheduler as it has been in dummynet
+ * since 2000.
+ * The scheduler supports per-flow queues and has O(log N) complexity.
+ *
+ * WF2Q+ needs to drain entries from the idle heap so that we
+ * can keep the sum of weights up to date. We can do it whenever
+ * we get a chance, or periodically, or following some other
+ * strategy. The function idle_check() drains at most N elements
+ * from the idle heap.
+ */
+static void
+idle_check(struct wf2qp_si *si, int n, int force)
+{
+ struct dn_heap *h = &si->idle_heap;
+ while (n-- > 0 && h->elements > 0 &&
+ (force || DN_KEY_LT(HEAP_TOP(h)->key, si->V))) {
+ struct dn_queue *q = HEAP_TOP(h)->object;
+ struct wf2qp_queue *alg_fq = (struct wf2qp_queue *)q;
+
+ heap_extract(h, NULL);
+ /* XXX to let the flowset delete the queue we should
+ * mark it as 'unused' by the scheduler.
+ */
+ alg_fq->S = alg_fq->F + 1; /* Mark timestamp as invalid. */
+ si->wsum -= q->fs->fs.par[0]; /* adjust sum of weights */
+ if (si->wsum > 0)
+ si->inv_wsum = ONE_FP/si->wsum;
+ }
+}
+
+static int
+wf2qp_enqueue(struct dn_sch_inst *_si, struct dn_queue *q, struct mbuf *m)
+{
+ struct dn_fsk *fs = q->fs;
+ struct wf2qp_si *si = (struct wf2qp_si *)(_si + 1);
+ struct wf2qp_queue *alg_fq;
+ uint64_t len = m->m_pkthdr.len;
+
+ if (m != q->mq.head) {
+ if (dn_enqueue(q, m, 0)) /* packet was dropped */
+ return 1;
+ if (m != q->mq.head) /* queue was already busy */
+ return 0;
+ }
+
+ /* If reach this point, queue q was idle */
+ alg_fq = (struct wf2qp_queue *)q;
+
+ if (DN_KEY_LT(alg_fq->F, alg_fq->S)) {
+ /* F<S means timestamps are invalid ->brand new queue. */
+ alg_fq->S = si->V; /* init start time */
+ si->wsum += fs->fs.par[0]; /* add weight of new queue. */
+ si->inv_wsum = ONE_FP/si->wsum;
+ } else { /* if it was idle then it was in the idle heap */
+ heap_extract(&si->idle_heap, q);
+ alg_fq->S = MAX64(alg_fq->F, si->V); /* compute new S */
+ }
+ alg_fq->F = alg_fq->S + len * alg_fq->inv_w;
+
+ /* if nothing is backlogged, make sure this flow is eligible */
+ if (si->ne_heap.elements == 0 && si->sch_heap.elements == 0)
+ si->V = MAX64(alg_fq->S, si->V);
+
+ /*
+ * Look at eligibility. A flow is not eligibile if S>V (when
+ * this happens, it means that there is some other flow already
+ * scheduled for the same pipe, so the sch_heap cannot be
+ * empty). If the flow is not eligible we just store it in the
+ * ne_heap. Otherwise, we store in the sch_heap.
+ * Note that for all flows in sch_heap (SCH), S_i <= V,
+ * and for all flows in ne_heap (NEH), S_i > V.
+ * So when we need to compute max(V, min(S_i)) forall i in
+ * SCH+NEH, we only need to look into NEH.
+ */
+ if (DN_KEY_LT(si->V, alg_fq->S)) {
+ /* S>V means flow Not eligible. */
+ if (si->sch_heap.elements == 0)
+ D("++ ouch! not eligible but empty scheduler!");
+ heap_insert(&si->ne_heap, alg_fq->S, q);
+ } else {
+ heap_insert(&si->sch_heap, alg_fq->F, q);
+ }
+ return 0;
+}
+
+/* XXX invariant: sch > 0 || V >= min(S in neh) */
+static struct mbuf *
+wf2qp_dequeue(struct dn_sch_inst *_si)
+{
+ /* Access scheduler instance private data */
+ struct wf2qp_si *si = (struct wf2qp_si *)(_si + 1);
+ struct mbuf *m;
+ struct dn_queue *q;
+ struct dn_heap *sch = &si->sch_heap;
+ struct dn_heap *neh = &si->ne_heap;
+ struct wf2qp_queue *alg_fq;
+
+ if (sch->elements == 0 && neh->elements == 0) {
+ /* we have nothing to do. We could kill the idle heap
+ * altogether and reset V
+ */
+ idle_check(si, 0x7fffffff, 1);
+ si->V = 0;
+ si->wsum = 0; /* should be set already */
+ return NULL; /* quick return if nothing to do */
+ }
+ idle_check(si, 1, 0); /* drain something from the idle heap */
+
+ /* make sure at least one element is eligible, bumping V
+ * and moving entries that have become eligible.
+ * We need to repeat the first part twice, before and
+ * after extracting the candidate, or enqueue() will
+ * find the data structure in a wrong state.
+ */
+ m = NULL;
+ for(;;) {
+ /*
+ * Compute V = max(V, min(S_i)). Remember that all elements
+ * in sch have by definition S_i <= V so if sch is not empty,
+ * V is surely the max and we must not update it. Conversely,
+ * if sch is empty we only need to look at neh.
+ * We don't need to move the queues, as it will be done at the
+ * next enqueue
+ */
+ if (sch->elements == 0 && neh->elements > 0) {
+ si->V = MAX64(si->V, HEAP_TOP(neh)->key);
+ }
+ while (neh->elements > 0 &&
+ DN_KEY_LEQ(HEAP_TOP(neh)->key, si->V)) {
+ q = HEAP_TOP(neh)->object;
+ alg_fq = (struct wf2qp_queue *)q;
+ heap_extract(neh, NULL);
+ heap_insert(sch, alg_fq->F, q);
+ }
+ if (m) /* pkt found in previous iteration */
+ break;
+ /* ok we have at least one eligible pkt */
+ q = HEAP_TOP(sch)->object;
+ alg_fq = (struct wf2qp_queue *)q;
+ m = dn_dequeue(q);
+ heap_extract(sch, NULL); /* Remove queue from heap. */
+ si->V += (uint64_t)(m->m_pkthdr.len) * si->inv_wsum;
+ alg_fq->S = alg_fq->F; /* Update start time. */
+ if (q->mq.head == 0) { /* not backlogged any more. */
+ heap_insert(&si->idle_heap, alg_fq->F, q);
+ } else { /* Still backlogged. */
+ /* Update F, store in neh or sch */
+ uint64_t len = q->mq.head->m_pkthdr.len;
+ alg_fq->F += len * alg_fq->inv_w;
+ if (DN_KEY_LEQ(alg_fq->S, si->V)) {
+ heap_insert(sch, alg_fq->F, q);
+ } else {
+ heap_insert(neh, alg_fq->S, q);
+ }
+ }
+ }
+ return m;
+}
+
+static int
+wf2qp_new_sched(struct dn_sch_inst *_si)
+{
+ struct wf2qp_si *si = (struct wf2qp_si *)(_si + 1);
+ int ofs = offsetof(struct wf2qp_queue, heap_pos);
+
+ /* all heaps support extract from middle */
+ if (heap_init(&si->idle_heap, 16, ofs) ||
+ heap_init(&si->sch_heap, 16, ofs) ||
+ heap_init(&si->ne_heap, 16, ofs)) {
+ heap_free(&si->ne_heap);
+ heap_free(&si->sch_heap);
+ heap_free(&si->idle_heap);
+ return ENOMEM;
+ }
+ return 0;
+}
+
+static int
+wf2qp_free_sched(struct dn_sch_inst *_si)
+{
+ struct wf2qp_si *si = (struct wf2qp_si *)(_si + 1);
+
+ heap_free(&si->sch_heap);
+ heap_free(&si->ne_heap);
+ heap_free(&si->idle_heap);
+
+ return 0;
+}
+
+static int
+wf2qp_new_fsk(struct dn_fsk *fs)
+{
+ ipdn_bound_var(&fs->fs.par[0], 1,
+ 1, 100, "WF2Q+ weight");
+ return 0;
+}
+
+static int
+wf2qp_new_queue(struct dn_queue *_q)
+{
+ struct wf2qp_queue *q = (struct wf2qp_queue *)_q;
+
+ _q->ni.oid.subtype = DN_SCHED_WF2QP;
+ q->F = 0; /* not strictly necessary */
+ q->S = q->F + 1; /* mark timestamp as invalid. */
+ q->inv_w = ONE_FP / _q->fs->fs.par[0];
+ if (_q->mq.head != NULL) {
+ wf2qp_enqueue(_q->_si, _q, _q->mq.head);
+ }
+ return 0;
+}
+
+/*
+ * Called when the infrastructure removes a queue (e.g. flowset
+ * is reconfigured). Nothing to do if we did not 'own' the queue,
+ * otherwise remove it from the right heap and adjust the sum
+ * of weights.
+ */
+static int
+wf2qp_free_queue(struct dn_queue *q)
+{
+ struct wf2qp_queue *alg_fq = (struct wf2qp_queue *)q;
+ struct wf2qp_si *si = (struct wf2qp_si *)(q->_si + 1);
+
+ if (alg_fq->S >= alg_fq->F + 1)
+ return 0; /* nothing to do, not in any heap */
+ si->wsum -= q->fs->fs.par[0];
+ if (si->wsum > 0)
+ si->inv_wsum = ONE_FP/si->wsum;
+
+ /* extract from the heap. XXX TODO we may need to adjust V
+ * to make sure the invariants hold.
+ */
+ if (q->mq.head == NULL) {
+ heap_extract(&si->idle_heap, q);
+ } else if (DN_KEY_LT(si->V, alg_fq->S)) {
+ heap_extract(&si->ne_heap, q);
+ } else {
+ heap_extract(&si->sch_heap, q);
+ }
+ return 0;
+}
+
+/*
+ * WF2Q+ scheduler descriptor
+ * contains the type of the scheduler, the name, the size of the
+ * structures and function pointers.
+ */
+static struct dn_alg wf2qp_desc = {
+ _SI( .type = ) DN_SCHED_WF2QP,
+ _SI( .name = ) "WF2Q+",
+ _SI( .flags = ) DN_MULTIQUEUE,
+
+ /* we need extra space in the si and the queue */
+ _SI( .schk_datalen = ) 0,
+ _SI( .si_datalen = ) sizeof(struct wf2qp_si),
+ _SI( .q_datalen = ) sizeof(struct wf2qp_queue) -
+ sizeof(struct dn_queue),
+
+ _SI( .enqueue = ) wf2qp_enqueue,
+ _SI( .dequeue = ) wf2qp_dequeue,
+
+ _SI( .config = ) NULL,
+ _SI( .destroy = ) NULL,
+ _SI( .new_sched = ) wf2qp_new_sched,
+ _SI( .free_sched = ) wf2qp_free_sched,
+
+ _SI( .new_fsk = ) wf2qp_new_fsk,
+ _SI( .free_fsk = ) NULL,
+
+ _SI( .new_queue = ) wf2qp_new_queue,
+ _SI( .free_queue = ) wf2qp_free_queue,
+};
+
+
+DECLARE_DNSCHED_MODULE(dn_wf2qp, &wf2qp_desc);
diff --git a/rtems/freebsd/netinet/ipfw/ip_dn_glue.c b/rtems/freebsd/netinet/ipfw/ip_dn_glue.c
new file mode 100644
index 00000000..dc0daa1a
--- /dev/null
+++ b/rtems/freebsd/netinet/ipfw/ip_dn_glue.c
@@ -0,0 +1,847 @@
+#include <rtems/freebsd/machine/rtems-bsd-config.h>
+
+/*-
+ * Copyright (c) 2010 Riccardo Panicucci, Universita` di Pisa
+ * All rights reserved
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+/*
+ * $FreeBSD$
+ *
+ * Binary compatibility support for /sbin/ipfw RELENG_7 and RELENG_8
+ */
+
+#include <rtems/freebsd/local/opt_inet6.h>
+
+#include <rtems/freebsd/sys/param.h>
+#include <rtems/freebsd/sys/systm.h>
+#include <rtems/freebsd/sys/malloc.h>
+#include <rtems/freebsd/sys/mbuf.h>
+#include <rtems/freebsd/sys/kernel.h>
+#include <rtems/freebsd/sys/lock.h>
+#include <rtems/freebsd/sys/module.h>
+#include <rtems/freebsd/sys/priv.h>
+#include <rtems/freebsd/sys/proc.h>
+#include <rtems/freebsd/sys/rwlock.h>
+#include <rtems/freebsd/sys/socket.h>
+#include <rtems/freebsd/sys/socketvar.h>
+#include <rtems/freebsd/sys/time.h>
+#include <rtems/freebsd/sys/taskqueue.h>
+#include <rtems/freebsd/net/if.h> /* IFNAMSIZ, struct ifaddr, ifq head, lock.h mutex.h */
+#include <rtems/freebsd/netinet/in.h>
+#include <rtems/freebsd/netinet/ip_var.h> /* ip_output(), IP_FORWARDING */
+#include <rtems/freebsd/netinet/ip_fw.h>
+#include <rtems/freebsd/netinet/ipfw/ip_fw_private.h>
+#include <rtems/freebsd/netinet/ipfw/dn_heap.h>
+#include <rtems/freebsd/netinet/ip_dummynet.h>
+#include <rtems/freebsd/netinet/ipfw/ip_dn_private.h>
+#include <rtems/freebsd/netinet/ipfw/dn_sched.h>
+
+/* FREEBSD7.2 ip_dummynet.h r191715*/
+
+struct dn_heap_entry7 {
+ int64_t key; /* sorting key. Topmost element is smallest one */
+ void *object; /* object pointer */
+};
+
+struct dn_heap7 {
+ int size;
+ int elements;
+ int offset; /* XXX if > 0 this is the offset of direct ptr to obj */
+ struct dn_heap_entry7 *p; /* really an array of "size" entries */
+};
+
+/* Common to 7.2 and 8 */
+struct dn_flow_set {
+ SLIST_ENTRY(dn_flow_set) next; /* linked list in a hash slot */
+
+ u_short fs_nr ; /* flow_set number */
+ u_short flags_fs;
+#define DNOLD_HAVE_FLOW_MASK 0x0001
+#define DNOLD_IS_RED 0x0002
+#define DNOLD_IS_GENTLE_RED 0x0004
+#define DNOLD_QSIZE_IS_BYTES 0x0008 /* queue size is measured in bytes */
+#define DNOLD_NOERROR 0x0010 /* do not report ENOBUFS on drops */
+#define DNOLD_HAS_PROFILE 0x0020 /* the pipe has a delay profile. */
+#define DNOLD_IS_PIPE 0x4000
+#define DNOLD_IS_QUEUE 0x8000
+
+ struct dn_pipe7 *pipe ; /* pointer to parent pipe */
+ u_short parent_nr ; /* parent pipe#, 0 if local to a pipe */
+
+ int weight ; /* WFQ queue weight */
+ int qsize ; /* queue size in slots or bytes */
+ int plr ; /* pkt loss rate (2^31-1 means 100%) */
+
+ struct ipfw_flow_id flow_mask ;
+
+ /* hash table of queues onto this flow_set */
+ int rq_size ; /* number of slots */
+ int rq_elements ; /* active elements */
+ struct dn_flow_queue7 **rq; /* array of rq_size entries */
+
+ u_int32_t last_expired ; /* do not expire too frequently */
+ int backlogged ; /* #active queues for this flowset */
+
+ /* RED parameters */
+#define SCALE_RED 16
+#define SCALE(x) ( (x) << SCALE_RED )
+#define SCALE_VAL(x) ( (x) >> SCALE_RED )
+#define SCALE_MUL(x,y) ( ( (x) * (y) ) >> SCALE_RED )
+ int w_q ; /* queue weight (scaled) */
+ int max_th ; /* maximum threshold for queue (scaled) */
+ int min_th ; /* minimum threshold for queue (scaled) */
+ int max_p ; /* maximum value for p_b (scaled) */
+ u_int c_1 ; /* max_p/(max_th-min_th) (scaled) */
+ u_int c_2 ; /* max_p*min_th/(max_th-min_th) (scaled) */
+ u_int c_3 ; /* for GRED, (1-max_p)/max_th (scaled) */
+ u_int c_4 ; /* for GRED, 1 - 2*max_p (scaled) */
+ u_int * w_q_lookup ; /* lookup table for computing (1-w_q)^t */
+ u_int lookup_depth ; /* depth of lookup table */
+ int lookup_step ; /* granularity inside the lookup table */
+ int lookup_weight ; /* equal to (1-w_q)^t / (1-w_q)^(t+1) */
+ int avg_pkt_size ; /* medium packet size */
+ int max_pkt_size ; /* max packet size */
+};
+SLIST_HEAD(dn_flow_set_head, dn_flow_set);
+
+#define DN_IS_PIPE 0x4000
+#define DN_IS_QUEUE 0x8000
+struct dn_flow_queue7 {
+ struct dn_flow_queue7 *next ;
+ struct ipfw_flow_id id ;
+
+ struct mbuf *head, *tail ; /* queue of packets */
+ u_int len ;
+ u_int len_bytes ;
+
+ u_long numbytes;
+
+ u_int64_t tot_pkts ; /* statistics counters */
+ u_int64_t tot_bytes ;
+ u_int32_t drops ;
+
+ int hash_slot ; /* debugging/diagnostic */
+
+ /* RED parameters */
+ int avg ; /* average queue length est. (scaled) */
+ int count ; /* arrivals since last RED drop */
+ int random ; /* random value (scaled) */
+ u_int32_t q_time; /* start of queue idle time */
+
+ /* WF2Q+ support */
+ struct dn_flow_set *fs ; /* parent flow set */
+ int heap_pos ; /* position (index) of struct in heap */
+ int64_t sched_time ; /* current time when queue enters ready_heap */
+
+ int64_t S,F ; /* start time, finish time */
+};
+
+struct dn_pipe7 { /* a pipe */
+ SLIST_ENTRY(dn_pipe7) next; /* linked list in a hash slot */
+
+ int pipe_nr ; /* number */
+ int bandwidth; /* really, bytes/tick. */
+ int delay ; /* really, ticks */
+
+ struct mbuf *head, *tail ; /* packets in delay line */
+
+ /* WF2Q+ */
+ struct dn_heap7 scheduler_heap ; /* top extract - key Finish time*/
+ struct dn_heap7 not_eligible_heap; /* top extract- key Start time */
+ struct dn_heap7 idle_heap ; /* random extract - key Start=Finish time */
+
+ int64_t V ; /* virtual time */
+ int sum; /* sum of weights of all active sessions */
+
+ int numbytes;
+
+ int64_t sched_time ; /* time pipe was scheduled in ready_heap */
+
+ /*
+ * When the tx clock come from an interface (if_name[0] != '\0'), its name
+ * is stored below, whereas the ifp is filled when the rule is configured.
+ */
+ char if_name[IFNAMSIZ];
+ struct ifnet *ifp ;
+ int ready ; /* set if ifp != NULL and we got a signal from it */
+
+ struct dn_flow_set fs ; /* used with fixed-rate flows */
+};
+SLIST_HEAD(dn_pipe_head7, dn_pipe7);
+
+
+/* FREEBSD8 ip_dummynet.h r196045 */
+struct dn_flow_queue8 {
+ struct dn_flow_queue8 *next ;
+ struct ipfw_flow_id id ;
+
+ struct mbuf *head, *tail ; /* queue of packets */
+ u_int len ;
+ u_int len_bytes ;
+
+ uint64_t numbytes ; /* credit for transmission (dynamic queues) */
+ int64_t extra_bits; /* extra bits simulating unavailable channel */
+
+ u_int64_t tot_pkts ; /* statistics counters */
+ u_int64_t tot_bytes ;
+ u_int32_t drops ;
+
+ int hash_slot ; /* debugging/diagnostic */
+
+ /* RED parameters */
+ int avg ; /* average queue length est. (scaled) */
+ int count ; /* arrivals since last RED drop */
+ int random ; /* random value (scaled) */
+ int64_t idle_time; /* start of queue idle time */
+
+ /* WF2Q+ support */
+ struct dn_flow_set *fs ; /* parent flow set */
+ int heap_pos ; /* position (index) of struct in heap */
+ int64_t sched_time ; /* current time when queue enters ready_heap */
+
+ int64_t S,F ; /* start time, finish time */
+};
+
+struct dn_pipe8 { /* a pipe */
+ SLIST_ENTRY(dn_pipe8) next; /* linked list in a hash slot */
+
+ int pipe_nr ; /* number */
+ int bandwidth; /* really, bytes/tick. */
+ int delay ; /* really, ticks */
+
+ struct mbuf *head, *tail ; /* packets in delay line */
+
+ /* WF2Q+ */
+ struct dn_heap7 scheduler_heap ; /* top extract - key Finish time*/
+ struct dn_heap7 not_eligible_heap; /* top extract- key Start time */
+ struct dn_heap7 idle_heap ; /* random extract - key Start=Finish time */
+
+ int64_t V ; /* virtual time */
+ int sum; /* sum of weights of all active sessions */
+
+ /* Same as in dn_flow_queue, numbytes can become large */
+ int64_t numbytes; /* bits I can transmit (more or less). */
+ uint64_t burst; /* burst size, scaled: bits * hz */
+
+ int64_t sched_time ; /* time pipe was scheduled in ready_heap */
+ int64_t idle_time; /* start of pipe idle time */
+
+ char if_name[IFNAMSIZ];
+ struct ifnet *ifp ;
+ int ready ; /* set if ifp != NULL and we got a signal from it */
+
+ struct dn_flow_set fs ; /* used with fixed-rate flows */
+
+ /* fields to simulate a delay profile */
+#define ED_MAX_NAME_LEN 32
+ char name[ED_MAX_NAME_LEN];
+ int loss_level;
+ int samples_no;
+ int *samples;
+};
+
+#define ED_MAX_SAMPLES_NO 1024
+struct dn_pipe_max8 {
+ struct dn_pipe8 pipe;
+ int samples[ED_MAX_SAMPLES_NO];
+};
+SLIST_HEAD(dn_pipe_head8, dn_pipe8);
+
+/*
+ * Changes from 7.2 to 8:
+ * dn_pipe:
+ * numbytes from int to int64_t
+ * add burst (int64_t)
+ * add idle_time (int64_t)
+ * add profile
+ * add struct dn_pipe_max
+ * add flag DN_HAS_PROFILE
+ *
+ * dn_flow_queue
+ * numbytes from u_long to int64_t
+ * add extra_bits (int64_t)
+ * q_time from u_int32_t to int64_t and name idle_time
+ *
+ * dn_flow_set unchanged
+ *
+ */
+
+/* NOTE:XXX copied from dummynet.c */
+#define O_NEXT(p, len) ((void *)((char *)p + len))
+static void
+oid_fill(struct dn_id *oid, int len, int type, uintptr_t id)
+{
+ oid->len = len;
+ oid->type = type;
+ oid->subtype = 0;
+ oid->id = id;
+}
+/* make room in the buffer and move the pointer forward */
+static void *
+o_next(struct dn_id **o, int len, int type)
+{
+ struct dn_id *ret = *o;
+ oid_fill(ret, len, type, 0);
+ *o = O_NEXT(*o, len);
+ return ret;
+}
+
+
+static size_t pipesize7 = sizeof(struct dn_pipe7);
+static size_t pipesize8 = sizeof(struct dn_pipe8);
+static size_t pipesizemax8 = sizeof(struct dn_pipe_max8);
+
+/* Indicate 'ipfw' version
+ * 1: from FreeBSD 7.2
+ * 0: from FreeBSD 8
+ * -1: unknow (for now is unused)
+ *
+ * It is update when a IP_DUMMYNET_DEL or IP_DUMMYNET_CONFIGURE request arrives
+ * NOTE: if a IP_DUMMYNET_GET arrives and the 'ipfw' version is unknow,
+ * it is suppose to be the FreeBSD 8 version.
+ */
+static int is7 = 0;
+
+static int
+convertflags2new(int src)
+{
+ int dst = 0;
+
+ if (src & DNOLD_HAVE_FLOW_MASK)
+ dst |= DN_HAVE_MASK;
+ if (src & DNOLD_QSIZE_IS_BYTES)
+ dst |= DN_QSIZE_BYTES;
+ if (src & DNOLD_NOERROR)
+ dst |= DN_NOERROR;
+ if (src & DNOLD_IS_RED)
+ dst |= DN_IS_RED;
+ if (src & DNOLD_IS_GENTLE_RED)
+ dst |= DN_IS_GENTLE_RED;
+ if (src & DNOLD_HAS_PROFILE)
+ dst |= DN_HAS_PROFILE;
+
+ return dst;
+}
+
+static int
+convertflags2old(int src)
+{
+ int dst = 0;
+
+ if (src & DN_HAVE_MASK)
+ dst |= DNOLD_HAVE_FLOW_MASK;
+ if (src & DN_IS_RED)
+ dst |= DNOLD_IS_RED;
+ if (src & DN_IS_GENTLE_RED)
+ dst |= DNOLD_IS_GENTLE_RED;
+ if (src & DN_NOERROR)
+ dst |= DNOLD_NOERROR;
+ if (src & DN_HAS_PROFILE)
+ dst |= DNOLD_HAS_PROFILE;
+ if (src & DN_QSIZE_BYTES)
+ dst |= DNOLD_QSIZE_IS_BYTES;
+
+ return dst;
+}
+
+static int
+dn_compat_del(void *v)
+{
+ struct dn_pipe7 *p = (struct dn_pipe7 *) v;
+ struct dn_pipe8 *p8 = (struct dn_pipe8 *) v;
+ struct {
+ struct dn_id oid;
+ uintptr_t a[1]; /* add more if we want a list */
+ } cmd;
+
+ /* XXX DN_API_VERSION ??? */
+ oid_fill((void *)&cmd, sizeof(cmd), DN_CMD_DELETE, DN_API_VERSION);
+
+ if (is7) {
+ if (p->pipe_nr == 0 && p->fs.fs_nr == 0)
+ return EINVAL;
+ if (p->pipe_nr != 0 && p->fs.fs_nr != 0)
+ return EINVAL;
+ } else {
+ if (p8->pipe_nr == 0 && p8->fs.fs_nr == 0)
+ return EINVAL;
+ if (p8->pipe_nr != 0 && p8->fs.fs_nr != 0)
+ return EINVAL;
+ }
+
+ if (p->pipe_nr != 0) { /* pipe x delete */
+ cmd.a[0] = p->pipe_nr;
+ cmd.oid.subtype = DN_LINK;
+ } else { /* queue x delete */
+ cmd.oid.subtype = DN_FS;
+ cmd.a[0] = (is7) ? p->fs.fs_nr : p8->fs.fs_nr;
+ }
+
+ return do_config(&cmd, cmd.oid.len);
+}
+
+static int
+dn_compat_config_queue(struct dn_fs *fs, void* v)
+{
+ struct dn_pipe7 *p7 = (struct dn_pipe7 *)v;
+ struct dn_pipe8 *p8 = (struct dn_pipe8 *)v;
+ struct dn_flow_set *f;
+
+ if (is7)
+ f = &p7->fs;
+ else
+ f = &p8->fs;
+
+ fs->fs_nr = f->fs_nr;
+ fs->sched_nr = f->parent_nr;
+ fs->flow_mask = f->flow_mask;
+ fs->buckets = f->rq_size;
+ fs->qsize = f->qsize;
+ fs->plr = f->plr;
+ fs->par[0] = f->weight;
+ fs->flags = convertflags2new(f->flags_fs);
+ if (fs->flags & DN_IS_GENTLE_RED || fs->flags & DN_IS_RED) {
+ fs->w_q = f->w_q;
+ fs->max_th = f->max_th;
+ fs->min_th = f->min_th;
+ fs->max_p = f->max_p;
+ }
+
+ return 0;
+}
+
+static int
+dn_compat_config_pipe(struct dn_sch *sch, struct dn_link *p,
+ struct dn_fs *fs, void* v)
+{
+ struct dn_pipe7 *p7 = (struct dn_pipe7 *)v;
+ struct dn_pipe8 *p8 = (struct dn_pipe8 *)v;
+ int i = p7->pipe_nr;
+
+ sch->sched_nr = i;
+ sch->oid.subtype = 0;
+ p->link_nr = i;
+ fs->fs_nr = i + 2*DN_MAX_ID;
+ fs->sched_nr = i + DN_MAX_ID;
+
+ /* Common to 7 and 8 */
+ p->bandwidth = p7->bandwidth;
+ p->delay = p7->delay;
+ if (!is7) {
+ /* FreeBSD 8 has burst */
+ p->burst = p8->burst;
+ }
+
+ /* fill the fifo flowset */
+ dn_compat_config_queue(fs, v);
+ fs->fs_nr = i + 2*DN_MAX_ID;
+ fs->sched_nr = i + DN_MAX_ID;
+
+ /* Move scheduler related parameter from fs to sch */
+ sch->buckets = fs->buckets; /*XXX*/
+ fs->buckets = 0;
+ if (fs->flags & DN_HAVE_MASK) {
+ sch->flags |= DN_HAVE_MASK;
+ fs->flags &= ~DN_HAVE_MASK;
+ sch->sched_mask = fs->flow_mask;
+ bzero(&fs->flow_mask, sizeof(struct ipfw_flow_id));
+ }
+
+ return 0;
+}
+
+static int
+dn_compat_config_profile(struct dn_profile *pf, struct dn_link *p,
+ void *v)
+{
+ struct dn_pipe8 *p8 = (struct dn_pipe8 *)v;
+
+ p8->samples = &(((struct dn_pipe_max8 *)p8)->samples[0]);
+
+ pf->link_nr = p->link_nr;
+ pf->loss_level = p8->loss_level;
+// pf->bandwidth = p->bandwidth; //XXX bandwidth redundant?
+ pf->samples_no = p8->samples_no;
+ strncpy(pf->name, p8->name,sizeof(pf->name));
+ bcopy(p8->samples, pf->samples, sizeof(pf->samples));
+
+ return 0;
+}
+
+/*
+ * If p->pipe_nr != 0 the command is 'pipe x config', so need to create
+ * the three main struct, else only a flowset is created
+ */
+static int
+dn_compat_configure(void *v)
+{
+ struct dn_id *buf = NULL, *base;
+ struct dn_sch *sch = NULL;
+ struct dn_link *p = NULL;
+ struct dn_fs *fs = NULL;
+ struct dn_profile *pf = NULL;
+ int lmax;
+ int error;
+
+ struct dn_pipe7 *p7 = (struct dn_pipe7 *)v;
+ struct dn_pipe8 *p8 = (struct dn_pipe8 *)v;
+
+ int i; /* number of object to configure */
+
+ lmax = sizeof(struct dn_id); /* command header */
+ lmax += sizeof(struct dn_sch) + sizeof(struct dn_link) +
+ sizeof(struct dn_fs) + sizeof(struct dn_profile);
+
+ base = buf = malloc(lmax, M_DUMMYNET, M_WAIT|M_ZERO);
+ o_next(&buf, sizeof(struct dn_id), DN_CMD_CONFIG);
+ base->id = DN_API_VERSION;
+
+ /* pipe_nr is the same in p7 and p8 */
+ i = p7->pipe_nr;
+ if (i != 0) { /* pipe config */
+ sch = o_next(&buf, sizeof(*sch), DN_SCH);
+ p = o_next(&buf, sizeof(*p), DN_LINK);
+ fs = o_next(&buf, sizeof(*fs), DN_FS);
+
+ error = dn_compat_config_pipe(sch, p, fs, v);
+ if (error) {
+ free(buf, M_DUMMYNET);
+ return error;
+ }
+ if (!is7 && p8->samples_no > 0) {
+ /* Add profiles*/
+ pf = o_next(&buf, sizeof(*pf), DN_PROFILE);
+ error = dn_compat_config_profile(pf, p, v);
+ if (error) {
+ free(buf, M_DUMMYNET);
+ return error;
+ }
+ }
+ } else { /* queue config */
+ fs = o_next(&buf, sizeof(*fs), DN_FS);
+ error = dn_compat_config_queue(fs, v);
+ if (error) {
+ free(buf, M_DUMMYNET);
+ return error;
+ }
+ }
+ error = do_config(base, (char *)buf - (char *)base);
+
+ if (buf)
+ free(buf, M_DUMMYNET);
+ return error;
+}
+
+int
+dn_compat_calc_size(struct dn_parms dn_cfg)
+{
+ int need = 0;
+ /* XXX use FreeBSD 8 struct size */
+ /* NOTE:
+ * - half scheduler: schk_count/2
+ * - all flowset: fsk_count
+ * - all flowset queues: queue_count
+ * - all pipe queue: si_count
+ */
+ need += dn_cfg.schk_count * sizeof(struct dn_pipe8) / 2;
+ need += dn_cfg.fsk_count * sizeof(struct dn_flow_set);
+ need += dn_cfg.si_count * sizeof(struct dn_flow_queue8);
+ need += dn_cfg.queue_count * sizeof(struct dn_flow_queue8);
+
+ return need;
+}
+
+int
+dn_c_copy_q (void *_ni, void *arg)
+{
+ struct copy_args *a = arg;
+ struct dn_flow_queue7 *fq7 = (struct dn_flow_queue7 *)*a->start;
+ struct dn_flow_queue8 *fq8 = (struct dn_flow_queue8 *)*a->start;
+ struct dn_flow *ni = (struct dn_flow *)_ni;
+ int size = 0;
+
+ /* XXX hash slot not set */
+ /* No difference between 7.2/8 */
+ fq7->len = ni->length;
+ fq7->len_bytes = ni->len_bytes;
+ fq7->id = ni->fid;
+
+ if (is7) {
+ size = sizeof(struct dn_flow_queue7);
+ fq7->tot_pkts = ni->tot_pkts;
+ fq7->tot_bytes = ni->tot_bytes;
+ fq7->drops = ni->drops;
+ } else {
+ size = sizeof(struct dn_flow_queue8);
+ fq8->tot_pkts = ni->tot_pkts;
+ fq8->tot_bytes = ni->tot_bytes;
+ fq8->drops = ni->drops;
+ }
+
+ *a->start += size;
+ return 0;
+}
+
+int
+dn_c_copy_pipe(struct dn_schk *s, struct copy_args *a, int nq)
+{
+ struct dn_link *l = &s->link;
+ struct dn_fsk *f = s->fs;
+
+ struct dn_pipe7 *pipe7 = (struct dn_pipe7 *)*a->start;
+ struct dn_pipe8 *pipe8 = (struct dn_pipe8 *)*a->start;
+ struct dn_flow_set *fs;
+ int size = 0;
+
+ if (is7) {
+ fs = &pipe7->fs;
+ size = sizeof(struct dn_pipe7);
+ } else {
+ fs = &pipe8->fs;
+ size = sizeof(struct dn_pipe8);
+ }
+
+ /* These 4 field are the same in pipe7 and pipe8 */
+ pipe7->next.sle_next = (struct dn_pipe7 *)DN_IS_PIPE;
+ pipe7->bandwidth = l->bandwidth;
+ pipe7->delay = l->delay;
+ pipe7->pipe_nr = l->link_nr - DN_MAX_ID;
+
+ if (!is7) {
+ if (s->profile) {
+ struct dn_profile *pf = s->profile;
+ strncpy(pipe8->name, pf->name, sizeof(pf->name));
+ pipe8->loss_level = pf->loss_level;
+ pipe8->samples_no = pf->samples_no;
+ }
+ pipe8->burst = div64(l->burst , 8 * hz);
+ }
+
+ fs->flow_mask = s->sch.sched_mask;
+ fs->rq_size = s->sch.buckets ? s->sch.buckets : 1;
+
+ fs->parent_nr = l->link_nr - DN_MAX_ID;
+ fs->qsize = f->fs.qsize;
+ fs->plr = f->fs.plr;
+ fs->w_q = f->fs.w_q;
+ fs->max_th = f->max_th;
+ fs->min_th = f->min_th;
+ fs->max_p = f->fs.max_p;
+ fs->rq_elements = nq;
+
+ fs->flags_fs = convertflags2old(f->fs.flags);
+
+ *a->start += size;
+ return 0;
+}
+
+
+int
+dn_compat_copy_pipe(struct copy_args *a, void *_o)
+{
+ int have = a->end - *a->start;
+ int need = 0;
+ int pipe_size = sizeof(struct dn_pipe8);
+ int queue_size = sizeof(struct dn_flow_queue8);
+ int n_queue = 0; /* number of queues */
+
+ struct dn_schk *s = (struct dn_schk *)_o;
+ /* calculate needed space:
+ * - struct dn_pipe
+ * - if there are instances, dn_queue * n_instances
+ */
+ n_queue = (s->sch.flags & DN_HAVE_MASK ? dn_ht_entries(s->siht) :
+ (s->siht ? 1 : 0));
+ need = pipe_size + queue_size * n_queue;
+ if (have < need) {
+ D("have %d < need %d", have, need);
+ return 1;
+ }
+ /* copy pipe */
+ dn_c_copy_pipe(s, a, n_queue);
+
+ /* copy queues */
+ if (s->sch.flags & DN_HAVE_MASK)
+ dn_ht_scan(s->siht, dn_c_copy_q, a);
+ else if (s->siht)
+ dn_c_copy_q(s->siht, a);
+ return 0;
+}
+
+int
+dn_c_copy_fs(struct dn_fsk *f, struct copy_args *a, int nq)
+{
+ struct dn_flow_set *fs = (struct dn_flow_set *)*a->start;
+
+ fs->next.sle_next = (struct dn_flow_set *)DN_IS_QUEUE;
+ fs->fs_nr = f->fs.fs_nr;
+ fs->qsize = f->fs.qsize;
+ fs->plr = f->fs.plr;
+ fs->w_q = f->fs.w_q;
+ fs->max_th = f->max_th;
+ fs->min_th = f->min_th;
+ fs->max_p = f->fs.max_p;
+ fs->flow_mask = f->fs.flow_mask;
+ fs->rq_elements = nq;
+ fs->rq_size = (f->fs.buckets ? f->fs.buckets : 1);
+ fs->parent_nr = f->fs.sched_nr;
+ fs->weight = f->fs.par[0];
+
+ fs->flags_fs = convertflags2old(f->fs.flags);
+ *a->start += sizeof(struct dn_flow_set);
+ return 0;
+}
+
+int
+dn_compat_copy_queue(struct copy_args *a, void *_o)
+{
+ int have = a->end - *a->start;
+ int need = 0;
+ int fs_size = sizeof(struct dn_flow_set);
+ int queue_size = sizeof(struct dn_flow_queue8);
+
+ struct dn_fsk *fs = (struct dn_fsk *)_o;
+ int n_queue = 0; /* number of queues */
+
+ n_queue = (fs->fs.flags & DN_HAVE_MASK ? dn_ht_entries(fs->qht) :
+ (fs->qht ? 1 : 0));
+
+ need = fs_size + queue_size * n_queue;
+ if (have < need) {
+ D("have < need");
+ return 1;
+ }
+
+ /* copy flowset */
+ dn_c_copy_fs(fs, a, n_queue);
+
+ /* copy queues */
+ if (fs->fs.flags & DN_HAVE_MASK)
+ dn_ht_scan(fs->qht, dn_c_copy_q, a);
+ else if (fs->qht)
+ dn_c_copy_q(fs->qht, a);
+
+ return 0;
+}
+
+int
+copy_data_helper_compat(void *_o, void *_arg)
+{
+ struct copy_args *a = _arg;
+
+ if (a->type == DN_COMPAT_PIPE) {
+ struct dn_schk *s = _o;
+ if (s->sch.oid.subtype != 1 || s->sch.sched_nr <= DN_MAX_ID) {
+ return 0; /* not old type */
+ }
+ /* copy pipe parameters, and if instance exists, copy
+ * other parameters and eventually queues.
+ */
+ if(dn_compat_copy_pipe(a, _o))
+ return DNHT_SCAN_END;
+ } else if (a->type == DN_COMPAT_QUEUE) {
+ struct dn_fsk *fs = _o;
+ if (fs->fs.fs_nr >= DN_MAX_ID)
+ return 0;
+ if (dn_compat_copy_queue(a, _o))
+ return DNHT_SCAN_END;
+ }
+ return 0;
+}
+
+/* Main function to manage old requests */
+int
+ip_dummynet_compat(struct sockopt *sopt)
+{
+ int error=0;
+ void *v = NULL;
+ struct dn_id oid;
+
+ /* Lenght of data, used to found ipfw version... */
+ int len = sopt->sopt_valsize;
+
+ /* len can be 0 if command was dummynet_flush */
+ if (len == pipesize7) {
+ D("setting compatibility with FreeBSD 7.2");
+ is7 = 1;
+ }
+ else if (len == pipesize8 || len == pipesizemax8) {
+ D("setting compatibility with FreeBSD 8");
+ is7 = 0;
+ }
+
+ switch (sopt->sopt_name) {
+ default:
+ printf("dummynet: -- unknown option %d", sopt->sopt_name);
+ error = EINVAL;
+ break;
+
+ case IP_DUMMYNET_FLUSH:
+ oid_fill(&oid, sizeof(oid), DN_CMD_FLUSH, DN_API_VERSION);
+ do_config(&oid, oid.len);
+ break;
+
+ case IP_DUMMYNET_DEL:
+ v = malloc(len, M_TEMP, M_WAITOK);
+ error = sooptcopyin(sopt, v, len, len);
+ if (error)
+ break;
+ error = dn_compat_del(v);
+ free(v, M_DUMMYNET);
+ break;
+
+ case IP_DUMMYNET_CONFIGURE:
+ v = malloc(len, M_TEMP, M_WAITOK);
+ error = sooptcopyin(sopt, v, len, len);
+ if (error)
+ break;
+ error = dn_compat_configure(v);
+ free(v, M_DUMMYNET);
+ break;
+
+ case IP_DUMMYNET_GET: {
+ void *buf;
+ int ret;
+ int original_size = sopt->sopt_valsize;
+ int size;
+
+ ret = dummynet_get(sopt, &buf);
+ if (ret)
+ return 0;//XXX ?
+ size = sopt->sopt_valsize;
+ sopt->sopt_valsize = original_size;
+ D("size=%d, buf=%p", size, buf);
+ ret = sooptcopyout(sopt, buf, size);
+ if (ret)
+ printf(" %s ERROR sooptcopyout\n", __FUNCTION__);
+ if (buf)
+ free(buf, M_DUMMYNET);
+ }
+ }
+
+ return error;
+}
+
+
diff --git a/rtems/freebsd/netinet/ipfw/ip_dn_io.c b/rtems/freebsd/netinet/ipfw/ip_dn_io.c
new file mode 100644
index 00000000..be579e9b
--- /dev/null
+++ b/rtems/freebsd/netinet/ipfw/ip_dn_io.c
@@ -0,0 +1,796 @@
+#include <rtems/freebsd/machine/rtems-bsd-config.h>
+
+/*-
+ * Copyright (c) 2010 Luigi Rizzo, Riccardo Panicucci, Universita` di Pisa
+ * All rights reserved
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+/*
+ * Dummynet portions related to packet handling.
+ */
+#include <rtems/freebsd/sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <rtems/freebsd/local/opt_inet6.h>
+
+#include <rtems/freebsd/sys/param.h>
+#include <rtems/freebsd/sys/systm.h>
+#include <rtems/freebsd/sys/malloc.h>
+#include <rtems/freebsd/sys/mbuf.h>
+#include <rtems/freebsd/sys/kernel.h>
+#include <rtems/freebsd/sys/lock.h>
+#include <rtems/freebsd/sys/module.h>
+#include <rtems/freebsd/sys/priv.h>
+#include <rtems/freebsd/sys/proc.h>
+#include <rtems/freebsd/sys/rwlock.h>
+#include <rtems/freebsd/sys/socket.h>
+#include <rtems/freebsd/sys/time.h>
+#include <rtems/freebsd/sys/sysctl.h>
+#include <rtems/freebsd/net/if.h> /* IFNAMSIZ, struct ifaddr, ifq head, lock.h mutex.h */
+#include <rtems/freebsd/net/netisr.h>
+#include <rtems/freebsd/netinet/in.h>
+#include <rtems/freebsd/netinet/ip.h> /* ip_len, ip_off */
+#include <rtems/freebsd/netinet/ip_var.h> /* ip_output(), IP_FORWARDING */
+#include <rtems/freebsd/netinet/ip_fw.h>
+#include <rtems/freebsd/netinet/ipfw/ip_fw_private.h>
+#include <rtems/freebsd/netinet/ipfw/dn_heap.h>
+#include <rtems/freebsd/netinet/ip_dummynet.h>
+#include <rtems/freebsd/netinet/ipfw/ip_dn_private.h>
+#include <rtems/freebsd/netinet/ipfw/dn_sched.h>
+
+#include <rtems/freebsd/netinet/if_ether.h> /* various ether_* routines */
+
+#include <rtems/freebsd/netinet/ip6.h> /* for ip6_input, ip6_output prototypes */
+#include <rtems/freebsd/netinet6/ip6_var.h>
+
+/*
+ * We keep a private variable for the simulation time, but we could
+ * probably use an existing one ("softticks" in sys/kern/kern_timeout.c)
+ * instead of dn_cfg.curr_time
+ */
+
+struct dn_parms dn_cfg;
+
+static long tick_last; /* Last tick duration (usec). */
+static long tick_delta; /* Last vs standard tick diff (usec). */
+static long tick_delta_sum; /* Accumulated tick difference (usec).*/
+static long tick_adjustment; /* Tick adjustments done. */
+static long tick_lost; /* Lost(coalesced) ticks number. */
+/* Adjusted vs non-adjusted curr_time difference (ticks). */
+static long tick_diff;
+
+static unsigned long io_pkt;
+static unsigned long io_pkt_fast;
+static unsigned long io_pkt_drop;
+
+/*
+ * We use a heap to store entities for which we have pending timer events.
+ * The heap is checked at every tick and all entities with expired events
+ * are extracted.
+ */
+
+MALLOC_DEFINE(M_DUMMYNET, "dummynet", "dummynet heap");
+
+extern void (*bridge_dn_p)(struct mbuf *, struct ifnet *);
+
+#ifdef SYSCTL_NODE
+
+SYSBEGIN(f4)
+
+SYSCTL_DECL(_net_inet);
+SYSCTL_DECL(_net_inet_ip);
+SYSCTL_NODE(_net_inet_ip, OID_AUTO, dummynet, CTLFLAG_RW, 0, "Dummynet");
+
+/* parameters */
+SYSCTL_INT(_net_inet_ip_dummynet, OID_AUTO, hash_size,
+ CTLFLAG_RW, &dn_cfg.hash_size, 0, "Default hash table size");
+SYSCTL_LONG(_net_inet_ip_dummynet, OID_AUTO, pipe_slot_limit,
+ CTLFLAG_RW, &dn_cfg.slot_limit, 0,
+ "Upper limit in slots for pipe queue.");
+SYSCTL_LONG(_net_inet_ip_dummynet, OID_AUTO, pipe_byte_limit,
+ CTLFLAG_RW, &dn_cfg.byte_limit, 0,
+ "Upper limit in bytes for pipe queue.");
+SYSCTL_INT(_net_inet_ip_dummynet, OID_AUTO, io_fast,
+ CTLFLAG_RW, &dn_cfg.io_fast, 0, "Enable fast dummynet io.");
+SYSCTL_INT(_net_inet_ip_dummynet, OID_AUTO, debug,
+ CTLFLAG_RW, &dn_cfg.debug, 0, "Dummynet debug level");
+SYSCTL_INT(_net_inet_ip_dummynet, OID_AUTO, expire,
+ CTLFLAG_RW, &dn_cfg.expire, 0, "Expire empty queues/pipes");
+SYSCTL_INT(_net_inet_ip_dummynet, OID_AUTO, expire_cycle,
+ CTLFLAG_RD, &dn_cfg.expire_cycle, 0, "Expire cycle for queues/pipes");
+
+/* RED parameters */
+SYSCTL_INT(_net_inet_ip_dummynet, OID_AUTO, red_lookup_depth,
+ CTLFLAG_RD, &dn_cfg.red_lookup_depth, 0, "Depth of RED lookup table");
+SYSCTL_INT(_net_inet_ip_dummynet, OID_AUTO, red_avg_pkt_size,
+ CTLFLAG_RD, &dn_cfg.red_avg_pkt_size, 0, "RED Medium packet size");
+SYSCTL_INT(_net_inet_ip_dummynet, OID_AUTO, red_max_pkt_size,
+ CTLFLAG_RD, &dn_cfg.red_max_pkt_size, 0, "RED Max packet size");
+
+/* time adjustment */
+SYSCTL_LONG(_net_inet_ip_dummynet, OID_AUTO, tick_delta,
+ CTLFLAG_RD, &tick_delta, 0, "Last vs standard tick difference (usec).");
+SYSCTL_LONG(_net_inet_ip_dummynet, OID_AUTO, tick_delta_sum,
+ CTLFLAG_RD, &tick_delta_sum, 0, "Accumulated tick difference (usec).");
+SYSCTL_LONG(_net_inet_ip_dummynet, OID_AUTO, tick_adjustment,
+ CTLFLAG_RD, &tick_adjustment, 0, "Tick adjustments done.");
+SYSCTL_LONG(_net_inet_ip_dummynet, OID_AUTO, tick_diff,
+ CTLFLAG_RD, &tick_diff, 0,
+ "Adjusted vs non-adjusted curr_time difference (ticks).");
+SYSCTL_LONG(_net_inet_ip_dummynet, OID_AUTO, tick_lost,
+ CTLFLAG_RD, &tick_lost, 0,
+ "Number of ticks coalesced by dummynet taskqueue.");
+
+/* statistics */
+SYSCTL_INT(_net_inet_ip_dummynet, OID_AUTO, schk_count,
+ CTLFLAG_RD, &dn_cfg.schk_count, 0, "Number of schedulers");
+SYSCTL_INT(_net_inet_ip_dummynet, OID_AUTO, si_count,
+ CTLFLAG_RD, &dn_cfg.si_count, 0, "Number of scheduler instances");
+SYSCTL_INT(_net_inet_ip_dummynet, OID_AUTO, fsk_count,
+ CTLFLAG_RD, &dn_cfg.fsk_count, 0, "Number of flowsets");
+SYSCTL_INT(_net_inet_ip_dummynet, OID_AUTO, queue_count,
+ CTLFLAG_RD, &dn_cfg.queue_count, 0, "Number of queues");
+SYSCTL_ULONG(_net_inet_ip_dummynet, OID_AUTO, io_pkt,
+ CTLFLAG_RD, &io_pkt, 0,
+ "Number of packets passed to dummynet.");
+SYSCTL_ULONG(_net_inet_ip_dummynet, OID_AUTO, io_pkt_fast,
+ CTLFLAG_RD, &io_pkt_fast, 0,
+ "Number of packets bypassed dummynet scheduler.");
+SYSCTL_ULONG(_net_inet_ip_dummynet, OID_AUTO, io_pkt_drop,
+ CTLFLAG_RD, &io_pkt_drop, 0,
+ "Number of packets dropped by dummynet.");
+
+SYSEND
+
+#endif
+
+static void dummynet_send(struct mbuf *);
+
+/*
+ * Packets processed by dummynet have an mbuf tag associated with
+ * them that carries their dummynet state.
+ * Outside dummynet, only the 'rule' field is relevant, and it must
+ * be at the beginning of the structure.
+ */
+struct dn_pkt_tag {
+ struct ipfw_rule_ref rule; /* matching rule */
+
+ /* second part, dummynet specific */
+ int dn_dir; /* action when packet comes out.*/
+ /* see ip_fw_private.h */
+ uint64_t output_time; /* when the pkt is due for delivery*/
+ struct ifnet *ifp; /* interface, for ip_output */
+ struct _ip6dn_args ip6opt; /* XXX ipv6 options */
+};
+
+/*
+ * Return the mbuf tag holding the dummynet state (it should
+ * be the first one on the list).
+ */
+static struct dn_pkt_tag *
+dn_tag_get(struct mbuf *m)
+{
+ struct m_tag *mtag = m_tag_first(m);
+ KASSERT(mtag != NULL &&
+ mtag->m_tag_cookie == MTAG_ABI_COMPAT &&
+ mtag->m_tag_id == PACKET_TAG_DUMMYNET,
+ ("packet on dummynet queue w/o dummynet tag!"));
+ return (struct dn_pkt_tag *)(mtag+1);
+}
+
+static inline void
+mq_append(struct mq *q, struct mbuf *m)
+{
+ if (q->head == NULL)
+ q->head = m;
+ else
+ q->tail->m_nextpkt = m;
+ q->tail = m;
+ m->m_nextpkt = NULL;
+}
+
+/*
+ * Dispose a list of packet. Use a functions so if we need to do
+ * more work, this is a central point to do it.
+ */
+void dn_free_pkts(struct mbuf *mnext)
+{
+ struct mbuf *m;
+
+ while ((m = mnext) != NULL) {
+ mnext = m->m_nextpkt;
+ FREE_PKT(m);
+ }
+}
+
+static int
+red_drops (struct dn_queue *q, int len)
+{
+ /*
+ * RED algorithm
+ *
+ * RED calculates the average queue size (avg) using a low-pass filter
+ * with an exponential weighted (w_q) moving average:
+ * avg <- (1-w_q) * avg + w_q * q_size
+ * where q_size is the queue length (measured in bytes or * packets).
+ *
+ * If q_size == 0, we compute the idle time for the link, and set
+ * avg = (1 - w_q)^(idle/s)
+ * where s is the time needed for transmitting a medium-sized packet.
+ *
+ * Now, if avg < min_th the packet is enqueued.
+ * If avg > max_th the packet is dropped. Otherwise, the packet is
+ * dropped with probability P function of avg.
+ */
+
+ struct dn_fsk *fs = q->fs;
+ int64_t p_b = 0;
+
+ /* Queue in bytes or packets? */
+ uint32_t q_size = (fs->fs.flags & DN_QSIZE_BYTES) ?
+ q->ni.len_bytes : q->ni.length;
+
+ /* Average queue size estimation. */
+ if (q_size != 0) {
+ /* Queue is not empty, avg <- avg + (q_size - avg) * w_q */
+ int diff = SCALE(q_size) - q->avg;
+ int64_t v = SCALE_MUL((int64_t)diff, (int64_t)fs->w_q);
+
+ q->avg += (int)v;
+ } else {
+ /*
+ * Queue is empty, find for how long the queue has been
+ * empty and use a lookup table for computing
+ * (1 - * w_q)^(idle_time/s) where s is the time to send a
+ * (small) packet.
+ * XXX check wraps...
+ */
+ if (q->avg) {
+ u_int t = div64((dn_cfg.curr_time - q->q_time), fs->lookup_step);
+
+ q->avg = (t < fs->lookup_depth) ?
+ SCALE_MUL(q->avg, fs->w_q_lookup[t]) : 0;
+ }
+ }
+
+ /* Should i drop? */
+ if (q->avg < fs->min_th) {
+ q->count = -1;
+ return (0); /* accept packet */
+ }
+ if (q->avg >= fs->max_th) { /* average queue >= max threshold */
+ if (fs->fs.flags & DN_IS_GENTLE_RED) {
+ /*
+ * According to Gentle-RED, if avg is greater than
+ * max_th the packet is dropped with a probability
+ * p_b = c_3 * avg - c_4
+ * where c_3 = (1 - max_p) / max_th
+ * c_4 = 1 - 2 * max_p
+ */
+ p_b = SCALE_MUL((int64_t)fs->c_3, (int64_t)q->avg) -
+ fs->c_4;
+ } else {
+ q->count = -1;
+ return (1);
+ }
+ } else if (q->avg > fs->min_th) {
+ /*
+ * We compute p_b using the linear dropping function
+ * p_b = c_1 * avg - c_2
+ * where c_1 = max_p / (max_th - min_th)
+ * c_2 = max_p * min_th / (max_th - min_th)
+ */
+ p_b = SCALE_MUL((int64_t)fs->c_1, (int64_t)q->avg) - fs->c_2;
+ }
+
+ if (fs->fs.flags & DN_QSIZE_BYTES)
+ p_b = div64((p_b * len) , fs->max_pkt_size);
+ if (++q->count == 0)
+ q->random = random() & 0xffff;
+ else {
+ /*
+ * q->count counts packets arrived since last drop, so a greater
+ * value of q->count means a greater packet drop probability.
+ */
+ if (SCALE_MUL(p_b, SCALE((int64_t)q->count)) > q->random) {
+ q->count = 0;
+ /* After a drop we calculate a new random value. */
+ q->random = random() & 0xffff;
+ return (1); /* drop */
+ }
+ }
+ /* End of RED algorithm. */
+
+ return (0); /* accept */
+
+}
+
+/*
+ * Enqueue a packet in q, subject to space and queue management policy
+ * (whose parameters are in q->fs).
+ * Update stats for the queue and the scheduler.
+ * Return 0 on success, 1 on drop. The packet is consumed anyways.
+ */
+int
+dn_enqueue(struct dn_queue *q, struct mbuf* m, int drop)
+{
+ struct dn_fs *f;
+ struct dn_flow *ni; /* stats for scheduler instance */
+ uint64_t len;
+
+ if (q->fs == NULL || q->_si == NULL) {
+ printf("%s fs %p si %p, dropping\n",
+ __FUNCTION__, q->fs, q->_si);
+ FREE_PKT(m);
+ return 1;
+ }
+ f = &(q->fs->fs);
+ ni = &q->_si->ni;
+ len = m->m_pkthdr.len;
+ /* Update statistics, then check reasons to drop pkt. */
+ q->ni.tot_bytes += len;
+ q->ni.tot_pkts++;
+ ni->tot_bytes += len;
+ ni->tot_pkts++;
+ if (drop)
+ goto drop;
+ if (f->plr && random() < f->plr)
+ goto drop;
+ if (f->flags & DN_IS_RED && red_drops(q, m->m_pkthdr.len))
+ goto drop;
+ if (f->flags & DN_QSIZE_BYTES) {
+ if (q->ni.len_bytes > f->qsize)
+ goto drop;
+ } else if (q->ni.length >= f->qsize) {
+ goto drop;
+ }
+ mq_append(&q->mq, m);
+ q->ni.length++;
+ q->ni.len_bytes += len;
+ ni->length++;
+ ni->len_bytes += len;
+ return 0;
+
+drop:
+ io_pkt_drop++;
+ q->ni.drops++;
+ ni->drops++;
+ FREE_PKT(m);
+ return 1;
+}
+
+/*
+ * Fetch packets from the delay line which are due now. If there are
+ * leftover packets, reinsert the delay line in the heap.
+ * Runs under scheduler lock.
+ */
+static void
+transmit_event(struct mq *q, struct delay_line *dline, uint64_t now)
+{
+ struct mbuf *m;
+ struct dn_pkt_tag *pkt = NULL;
+
+ dline->oid.subtype = 0; /* not in heap */
+ while ((m = dline->mq.head) != NULL) {
+ pkt = dn_tag_get(m);
+ if (!DN_KEY_LEQ(pkt->output_time, now))
+ break;
+ dline->mq.head = m->m_nextpkt;
+ mq_append(q, m);
+ }
+ if (m != NULL) {
+ dline->oid.subtype = 1; /* in heap */
+ heap_insert(&dn_cfg.evheap, pkt->output_time, dline);
+ }
+}
+
+/*
+ * Convert the additional MAC overheads/delays into an equivalent
+ * number of bits for the given data rate. The samples are
+ * in milliseconds so we need to divide by 1000.
+ */
+static uint64_t
+extra_bits(struct mbuf *m, struct dn_schk *s)
+{
+ int index;
+ uint64_t bits;
+ struct dn_profile *pf = s->profile;
+
+ if (!pf || pf->samples_no == 0)
+ return 0;
+ index = random() % pf->samples_no;
+ bits = div64((uint64_t)pf->samples[index] * s->link.bandwidth, 1000);
+ if (index >= pf->loss_level) {
+ struct dn_pkt_tag *dt = dn_tag_get(m);
+ if (dt)
+ dt->dn_dir = DIR_DROP;
+ }
+ return bits;
+}
+
+/*
+ * Send traffic from a scheduler instance due by 'now'.
+ * Return a pointer to the head of the queue.
+ */
+static struct mbuf *
+serve_sched(struct mq *q, struct dn_sch_inst *si, uint64_t now)
+{
+ struct mq def_q;
+ struct dn_schk *s = si->sched;
+ struct mbuf *m = NULL;
+ int delay_line_idle = (si->dline.mq.head == NULL);
+ int done, bw;
+
+ if (q == NULL) {
+ q = &def_q;
+ q->head = NULL;
+ }
+
+ bw = s->link.bandwidth;
+ si->kflags &= ~DN_ACTIVE;
+
+ if (bw > 0)
+ si->credit += (now - si->sched_time) * bw;
+ else
+ si->credit = 0;
+ si->sched_time = now;
+ done = 0;
+ while (si->credit >= 0 && (m = s->fp->dequeue(si)) != NULL) {
+ uint64_t len_scaled;
+ done++;
+ len_scaled = (bw == 0) ? 0 : hz *
+ (m->m_pkthdr.len * 8 + extra_bits(m, s));
+ si->credit -= len_scaled;
+ /* Move packet in the delay line */
+ dn_tag_get(m)->output_time += s->link.delay ;
+ mq_append(&si->dline.mq, m);
+ }
+ /*
+ * If credit >= 0 the instance is idle, mark time.
+ * Otherwise put back in the heap, and adjust the output
+ * time of the last inserted packet, m, which was too early.
+ */
+ if (si->credit >= 0) {
+ si->idle_time = now;
+ } else {
+ uint64_t t;
+ KASSERT (bw > 0, ("bw=0 and credit<0 ?"));
+ t = div64(bw - 1 - si->credit, bw);
+ if (m)
+ dn_tag_get(m)->output_time += t;
+ si->kflags |= DN_ACTIVE;
+ heap_insert(&dn_cfg.evheap, now + t, si);
+ }
+ if (delay_line_idle && done)
+ transmit_event(q, &si->dline, now);
+ return q->head;
+}
+
+/*
+ * The timer handler for dummynet. Time is computed in ticks, but
+ * but the code is tolerant to the actual rate at which this is called.
+ * Once complete, the function reschedules itself for the next tick.
+ */
+void
+dummynet_task(void *context, int pending)
+{
+ struct timeval t;
+ struct mq q = { NULL, NULL }; /* queue to accumulate results */
+
+ DN_BH_WLOCK();
+
+ /* Update number of lost(coalesced) ticks. */
+ tick_lost += pending - 1;
+
+ getmicrouptime(&t);
+ /* Last tick duration (usec). */
+ tick_last = (t.tv_sec - dn_cfg.prev_t.tv_sec) * 1000000 +
+ (t.tv_usec - dn_cfg.prev_t.tv_usec);
+ /* Last tick vs standard tick difference (usec). */
+ tick_delta = (tick_last * hz - 1000000) / hz;
+ /* Accumulated tick difference (usec). */
+ tick_delta_sum += tick_delta;
+
+ dn_cfg.prev_t = t;
+
+ /*
+ * Adjust curr_time if the accumulated tick difference is
+ * greater than the 'standard' tick. Since curr_time should
+ * be monotonically increasing, we do positive adjustments
+ * as required, and throttle curr_time in case of negative
+ * adjustment.
+ */
+ dn_cfg.curr_time++;
+ if (tick_delta_sum - tick >= 0) {
+ int diff = tick_delta_sum / tick;
+
+ dn_cfg.curr_time += diff;
+ tick_diff += diff;
+ tick_delta_sum %= tick;
+ tick_adjustment++;
+ } else if (tick_delta_sum + tick <= 0) {
+ dn_cfg.curr_time--;
+ tick_diff--;
+ tick_delta_sum += tick;
+ tick_adjustment++;
+ }
+
+ /* serve pending events, accumulate in q */
+ for (;;) {
+ struct dn_id *p; /* generic parameter to handler */
+
+ if (dn_cfg.evheap.elements == 0 ||
+ DN_KEY_LT(dn_cfg.curr_time, HEAP_TOP(&dn_cfg.evheap)->key))
+ break;
+ p = HEAP_TOP(&dn_cfg.evheap)->object;
+ heap_extract(&dn_cfg.evheap, NULL);
+
+ if (p->type == DN_SCH_I) {
+ serve_sched(&q, (struct dn_sch_inst *)p, dn_cfg.curr_time);
+ } else { /* extracted a delay line */
+ transmit_event(&q, (struct delay_line *)p, dn_cfg.curr_time);
+ }
+ }
+ if (dn_cfg.expire && ++dn_cfg.expire_cycle >= dn_cfg.expire) {
+ dn_cfg.expire_cycle = 0;
+ dn_drain_scheduler();
+ dn_drain_queue();
+ }
+
+ DN_BH_WUNLOCK();
+ dn_reschedule();
+ if (q.head != NULL)
+ dummynet_send(q.head);
+}
+
+/*
+ * forward a chain of packets to the proper destination.
+ * This runs outside the dummynet lock.
+ */
+static void
+dummynet_send(struct mbuf *m)
+{
+ struct mbuf *n;
+
+ for (; m != NULL; m = n) {
+ struct ifnet *ifp = NULL; /* gcc 3.4.6 complains */
+ struct m_tag *tag;
+ int dst;
+
+ n = m->m_nextpkt;
+ m->m_nextpkt = NULL;
+ tag = m_tag_first(m);
+ if (tag == NULL) { /* should not happen */
+ dst = DIR_DROP;
+ } else {
+ struct dn_pkt_tag *pkt = dn_tag_get(m);
+ /* extract the dummynet info, rename the tag
+ * to carry reinject info.
+ */
+ dst = pkt->dn_dir;
+ ifp = pkt->ifp;
+ tag->m_tag_cookie = MTAG_IPFW_RULE;
+ tag->m_tag_id = 0;
+ }
+
+ switch (dst) {
+ case DIR_OUT:
+ SET_HOST_IPLEN(mtod(m, struct ip *));
+ ip_output(m, NULL, NULL, IP_FORWARDING, NULL, NULL);
+ break ;
+
+ case DIR_IN :
+ /* put header in network format for ip_input() */
+ //SET_NET_IPLEN(mtod(m, struct ip *));
+ netisr_dispatch(NETISR_IP, m);
+ break;
+
+#ifdef INET6
+ case DIR_IN | PROTO_IPV6:
+ netisr_dispatch(NETISR_IPV6, m);
+ break;
+
+ case DIR_OUT | PROTO_IPV6:
+ SET_HOST_IPLEN(mtod(m, struct ip *));
+ ip6_output(m, NULL, NULL, IPV6_FORWARDING, NULL, NULL, NULL);
+ break;
+#endif
+
+ case DIR_FWD | PROTO_IFB: /* DN_TO_IFB_FWD: */
+ if (bridge_dn_p != NULL)
+ ((*bridge_dn_p)(m, ifp));
+ else
+ printf("dummynet: if_bridge not loaded\n");
+
+ break;
+
+ case DIR_IN | PROTO_LAYER2: /* DN_TO_ETH_DEMUX: */
+ /*
+ * The Ethernet code assumes the Ethernet header is
+ * contiguous in the first mbuf header.
+ * Insure this is true.
+ */
+ if (m->m_len < ETHER_HDR_LEN &&
+ (m = m_pullup(m, ETHER_HDR_LEN)) == NULL) {
+ printf("dummynet/ether: pullup failed, "
+ "dropping packet\n");
+ break;
+ }
+ ether_demux(m->m_pkthdr.rcvif, m);
+ break;
+
+ case DIR_OUT | PROTO_LAYER2: /* N_TO_ETH_OUT: */
+ ether_output_frame(ifp, m);
+ break;
+
+ case DIR_DROP:
+ /* drop the packet after some time */
+ FREE_PKT(m);
+ break;
+
+ default:
+ printf("dummynet: bad switch %d!\n", dst);
+ FREE_PKT(m);
+ break;
+ }
+ }
+}
+
+static inline int
+tag_mbuf(struct mbuf *m, int dir, struct ip_fw_args *fwa)
+{
+ struct dn_pkt_tag *dt;
+ struct m_tag *mtag;
+
+ mtag = m_tag_get(PACKET_TAG_DUMMYNET,
+ sizeof(*dt), M_NOWAIT | M_ZERO);
+ if (mtag == NULL)
+ return 1; /* Cannot allocate packet header. */
+ m_tag_prepend(m, mtag); /* Attach to mbuf chain. */
+ dt = (struct dn_pkt_tag *)(mtag + 1);
+ dt->rule = fwa->rule;
+ dt->rule.info &= IPFW_ONEPASS; /* only keep this info */
+ dt->dn_dir = dir;
+ dt->ifp = fwa->oif;
+ /* dt->output tame is updated as we move through */
+ dt->output_time = dn_cfg.curr_time;
+ return 0;
+}
+
+
+/*
+ * dummynet hook for packets.
+ * We use the argument to locate the flowset fs and the sched_set sch
+ * associated to it. The we apply flow_mask and sched_mask to
+ * determine the queue and scheduler instances.
+ *
+ * dir where shall we send the packet after dummynet.
+ * *m0 the mbuf with the packet
+ * ifp the 'ifp' parameter from the caller.
+ * NULL in ip_input, destination interface in ip_output,
+ */
+int
+dummynet_io(struct mbuf **m0, int dir, struct ip_fw_args *fwa)
+{
+ struct mbuf *m = *m0;
+ struct dn_fsk *fs = NULL;
+ struct dn_sch_inst *si;
+ struct dn_queue *q = NULL; /* default */
+
+ int fs_id = (fwa->rule.info & IPFW_INFO_MASK) +
+ ((fwa->rule.info & IPFW_IS_PIPE) ? 2*DN_MAX_ID : 0);
+ DN_BH_WLOCK();
+ io_pkt++;
+ /* we could actually tag outside the lock, but who cares... */
+ if (tag_mbuf(m, dir, fwa))
+ goto dropit;
+ if (dn_cfg.busy) {
+ /* if the upper half is busy doing something expensive,
+ * lets queue the packet and move forward
+ */
+ mq_append(&dn_cfg.pending, m);
+ m = *m0 = NULL; /* consumed */
+ goto done; /* already active, nothing to do */
+ }
+ /* XXX locate_flowset could be optimised with a direct ref. */
+ fs = dn_ht_find(dn_cfg.fshash, fs_id, 0, NULL);
+ if (fs == NULL)
+ goto dropit; /* This queue/pipe does not exist! */
+ if (fs->sched == NULL) /* should not happen */
+ goto dropit;
+ /* find scheduler instance, possibly applying sched_mask */
+ si = ipdn_si_find(fs->sched, &(fwa->f_id));
+ if (si == NULL)
+ goto dropit;
+ /*
+ * If the scheduler supports multiple queues, find the right one
+ * (otherwise it will be ignored by enqueue).
+ */
+ if (fs->sched->fp->flags & DN_MULTIQUEUE) {
+ q = ipdn_q_find(fs, si, &(fwa->f_id));
+ if (q == NULL)
+ goto dropit;
+ }
+ if (fs->sched->fp->enqueue(si, q, m)) {
+ /* packet was dropped by enqueue() */
+ m = *m0 = NULL;
+ goto dropit;
+ }
+
+ if (si->kflags & DN_ACTIVE) {
+ m = *m0 = NULL; /* consumed */
+ goto done; /* already active, nothing to do */
+ }
+
+ /* compute the initial allowance */
+ if (si->idle_time < dn_cfg.curr_time) {
+ /* Do this only on the first packet on an idle pipe */
+ struct dn_link *p = &fs->sched->link;
+
+ si->sched_time = dn_cfg.curr_time;
+ si->credit = dn_cfg.io_fast ? p->bandwidth : 0;
+ if (p->burst) {
+ uint64_t burst = (dn_cfg.curr_time - si->idle_time) * p->bandwidth;
+ if (burst > p->burst)
+ burst = p->burst;
+ si->credit += burst;
+ }
+ }
+ /* pass through scheduler and delay line */
+ m = serve_sched(NULL, si, dn_cfg.curr_time);
+
+ /* optimization -- pass it back to ipfw for immediate send */
+ /* XXX Don't call dummynet_send() if scheduler return the packet
+ * just enqueued. This avoid a lock order reversal.
+ *
+ */
+ if (/*dn_cfg.io_fast &&*/ m == *m0 && (dir & PROTO_LAYER2) == 0 ) {
+ /* fast io, rename the tag * to carry reinject info. */
+ struct m_tag *tag = m_tag_first(m);
+
+ tag->m_tag_cookie = MTAG_IPFW_RULE;
+ tag->m_tag_id = 0;
+ io_pkt_fast++;
+ if (m->m_nextpkt != NULL) {
+ printf("dummynet: fast io: pkt chain detected!\n");
+ m->m_nextpkt = NULL;
+ }
+ m = NULL;
+ } else {
+ *m0 = NULL;
+ }
+done:
+ DN_BH_WUNLOCK();
+ if (m)
+ dummynet_send(m);
+ return 0;
+
+dropit:
+ io_pkt_drop++;
+ DN_BH_WUNLOCK();
+ if (m)
+ FREE_PKT(m);
+ *m0 = NULL;
+ return (fs && (fs->fs.flags & DN_NOERROR)) ? 0 : ENOBUFS;
+}
diff --git a/rtems/freebsd/netinet/ipfw/ip_dn_private.h b/rtems/freebsd/netinet/ipfw/ip_dn_private.h
new file mode 100644
index 00000000..270f1881
--- /dev/null
+++ b/rtems/freebsd/netinet/ipfw/ip_dn_private.h
@@ -0,0 +1,402 @@
+/*-
+ * Copyright (c) 2010 Luigi Rizzo, Riccardo Panicucci, Universita` di Pisa
+ * All rights reserved
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+/*
+ * internal dummynet APIs.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _IP_DN_PRIVATE_H
+#define _IP_DN_PRIVATE_H
+
+/* debugging support
+ * use ND() to remove debugging, D() to print a line,
+ * DX(level, ...) to print above a certain level
+ * If you redefine D() you are expected to redefine all.
+ */
+#ifndef D
+#define ND(fmt, ...) do {} while (0)
+#define D1(fmt, ...) do {} while (0)
+#define D(fmt, ...) printf("%-10s " fmt "\n", \
+ __FUNCTION__, ## __VA_ARGS__)
+#define DX(lev, fmt, ...) do { \
+ if (dn_cfg.debug > lev) D(fmt, ## __VA_ARGS__); } while (0)
+#endif
+
+MALLOC_DECLARE(M_DUMMYNET);
+
+#ifndef FREE_PKT
+#define FREE_PKT(m) m_freem(m)
+#endif
+
+#ifndef __linux__
+#define div64(a, b) ((int64_t)(a) / (int64_t)(b))
+#endif
+
+#define DN_LOCK_INIT() do { \
+ mtx_init(&dn_cfg.uh_mtx, "dn_uh", NULL, MTX_DEF); \
+ mtx_init(&dn_cfg.bh_mtx, "dn_bh", NULL, MTX_DEF); \
+ } while (0)
+#define DN_LOCK_DESTROY() do { \
+ mtx_destroy(&dn_cfg.uh_mtx); \
+ mtx_destroy(&dn_cfg.bh_mtx); \
+ } while (0)
+#if 0 /* not used yet */
+#define DN_UH_RLOCK() mtx_lock(&dn_cfg.uh_mtx)
+#define DN_UH_RUNLOCK() mtx_unlock(&dn_cfg.uh_mtx)
+#define DN_UH_WLOCK() mtx_lock(&dn_cfg.uh_mtx)
+#define DN_UH_WUNLOCK() mtx_unlock(&dn_cfg.uh_mtx)
+#define DN_UH_LOCK_ASSERT() mtx_assert(&dn_cfg.uh_mtx, MA_OWNED)
+#endif
+
+#define DN_BH_RLOCK() mtx_lock(&dn_cfg.uh_mtx)
+#define DN_BH_RUNLOCK() mtx_unlock(&dn_cfg.uh_mtx)
+#define DN_BH_WLOCK() mtx_lock(&dn_cfg.uh_mtx)
+#define DN_BH_WUNLOCK() mtx_unlock(&dn_cfg.uh_mtx)
+#define DN_BH_LOCK_ASSERT() mtx_assert(&dn_cfg.uh_mtx, MA_OWNED)
+
+SLIST_HEAD(dn_schk_head, dn_schk);
+SLIST_HEAD(dn_sch_inst_head, dn_sch_inst);
+SLIST_HEAD(dn_fsk_head, dn_fsk);
+SLIST_HEAD(dn_queue_head, dn_queue);
+SLIST_HEAD(dn_alg_head, dn_alg);
+
+struct mq { /* a basic queue of packets*/
+ struct mbuf *head, *tail;
+};
+
+static inline void
+set_oid(struct dn_id *o, int type, int len)
+{
+ o->type = type;
+ o->len = len;
+ o->subtype = 0;
+};
+
+/*
+ * configuration and global data for a dummynet instance
+ *
+ * When a configuration is modified from userland, 'id' is incremented
+ * so we can use the value to check for stale pointers.
+ */
+struct dn_parms {
+ uint32_t id; /* configuration version */
+
+ /* defaults (sysctl-accessible) */
+ int red_lookup_depth;
+ int red_avg_pkt_size;
+ int red_max_pkt_size;
+ int hash_size;
+ int max_hash_size;
+ long byte_limit; /* max queue sizes */
+ long slot_limit;
+
+ int io_fast;
+ int debug;
+
+ /* timekeeping */
+ struct timeval prev_t; /* last time dummynet_tick ran */
+ struct dn_heap evheap; /* scheduled events */
+
+ /* counters of objects -- used for reporting space */
+ int schk_count;
+ int si_count;
+ int fsk_count;
+ int queue_count;
+
+ /* ticks and other stuff */
+ uint64_t curr_time;
+ /* flowsets and schedulers are in hash tables, with 'hash_size'
+ * buckets. fshash is looked up at every packet arrival
+ * so better be generous if we expect many entries.
+ */
+ struct dn_ht *fshash;
+ struct dn_ht *schedhash;
+ /* list of flowsets without a scheduler -- use sch_chain */
+ struct dn_fsk_head fsu; /* list of unlinked flowsets */
+ struct dn_alg_head schedlist; /* list of algorithms */
+
+ /* Store the fs/sch to scan when draining. The value is the
+ * bucket number of the hash table. Expire can be disabled
+ * with net.inet.ip.dummynet.expire=0, or it happens every
+ * expire ticks.
+ **/
+ int drain_fs;
+ int drain_sch;
+ uint32_t expire;
+ uint32_t expire_cycle; /* tick count */
+
+ /* if the upper half is busy doing something long,
+ * can set the busy flag and we will enqueue packets in
+ * a queue for later processing.
+ */
+ int busy;
+ struct mq pending;
+
+#ifdef _KERNEL
+ /*
+ * This file is normally used in the kernel, unless we do
+ * some userland tests, in which case we do not need a mtx.
+ * uh_mtx arbitrates between system calls and also
+ * protects fshash, schedhash and fsunlinked.
+ * These structures are readonly for the lower half.
+ * bh_mtx protects all other structures which may be
+ * modified upon packet arrivals
+ */
+#if defined( __linux__ ) || defined( _WIN32 )
+ spinlock_t uh_mtx;
+ spinlock_t bh_mtx;
+#else
+ struct mtx uh_mtx;
+ struct mtx bh_mtx;
+#endif
+
+#endif /* _KERNEL */
+};
+
+/*
+ * Delay line, contains all packets on output from a link.
+ * Every scheduler instance has one.
+ */
+struct delay_line {
+ struct dn_id oid;
+ struct dn_sch_inst *si;
+ struct mq mq;
+};
+
+/*
+ * The kernel side of a flowset. It is linked in a hash table
+ * of flowsets, and in a list of children of their parent scheduler.
+ * qht is either the queue or (if HAVE_MASK) a hash table queues.
+ * Note that the mask to use is the (flow_mask|sched_mask), which
+ * changes as we attach/detach schedulers. So we store it here.
+ *
+ * XXX If we want to add scheduler-specific parameters, we need to
+ * put them in external storage because the scheduler may not be
+ * available when the fsk is created.
+ */
+struct dn_fsk { /* kernel side of a flowset */
+ struct dn_fs fs;
+ SLIST_ENTRY(dn_fsk) fsk_next; /* hash chain for fshash */
+
+ struct ipfw_flow_id fsk_mask;
+
+ /* qht is a hash table of queues, or just a single queue
+ * a bit in fs.flags tells us which one
+ */
+ struct dn_ht *qht;
+ struct dn_schk *sched; /* Sched we are linked to */
+ SLIST_ENTRY(dn_fsk) sch_chain; /* list of fsk attached to sched */
+
+ /* bucket index used by drain routine to drain queues for this
+ * flowset
+ */
+ int drain_bucket;
+ /* Parameter realted to RED / GRED */
+ /* original values are in dn_fs*/
+ int w_q ; /* queue weight (scaled) */
+ int max_th ; /* maximum threshold for queue (scaled) */
+ int min_th ; /* minimum threshold for queue (scaled) */
+ int max_p ; /* maximum value for p_b (scaled) */
+
+ u_int c_1 ; /* max_p/(max_th-min_th) (scaled) */
+ u_int c_2 ; /* max_p*min_th/(max_th-min_th) (scaled) */
+ u_int c_3 ; /* for GRED, (1-max_p)/max_th (scaled) */
+ u_int c_4 ; /* for GRED, 1 - 2*max_p (scaled) */
+ u_int * w_q_lookup ; /* lookup table for computing (1-w_q)^t */
+ u_int lookup_depth ; /* depth of lookup table */
+ int lookup_step ; /* granularity inside the lookup table */
+ int lookup_weight ; /* equal to (1-w_q)^t / (1-w_q)^(t+1) */
+ int avg_pkt_size ; /* medium packet size */
+ int max_pkt_size ; /* max packet size */
+};
+
+/*
+ * A queue is created as a child of a flowset unless it belongs to
+ * a !MULTIQUEUE scheduler. It is normally in a hash table in the
+ * flowset. fs always points to the parent flowset.
+ * si normally points to the sch_inst, unless the flowset has been
+ * detached from the scheduler -- in this case si == NULL and we
+ * should not enqueue.
+ */
+struct dn_queue {
+ struct dn_flow ni; /* oid, flow_id, stats */
+ struct mq mq; /* packets queue */
+ struct dn_sch_inst *_si; /* owner scheduler instance */
+ SLIST_ENTRY(dn_queue) q_next; /* hash chain list for qht */
+ struct dn_fsk *fs; /* parent flowset. */
+
+ /* RED parameters */
+ int avg; /* average queue length est. (scaled) */
+ int count; /* arrivals since last RED drop */
+ int random; /* random value (scaled) */
+ uint64_t q_time; /* start of queue idle time */
+
+};
+
+/*
+ * The kernel side of a scheduler. Contains the userland config,
+ * a link, pointer to extra config arguments from command line,
+ * kernel flags, and a pointer to the scheduler methods.
+ * It is stored in a hash table, and holds a list of all
+ * flowsets and scheduler instances.
+ * XXX sch must be at the beginning, see schk_hash().
+ */
+struct dn_schk {
+ struct dn_sch sch;
+ struct dn_alg *fp; /* Pointer to scheduler functions */
+ struct dn_link link; /* The link, embedded */
+ struct dn_profile *profile; /* delay profile, if any */
+ struct dn_id *cfg; /* extra config arguments */
+
+ SLIST_ENTRY(dn_schk) schk_next; /* hash chain for schedhash */
+
+ struct dn_fsk_head fsk_list; /* all fsk linked to me */
+ struct dn_fsk *fs; /* Flowset for !MULTIQUEUE */
+
+ /* bucket index used by the drain routine to drain the scheduler
+ * instance for this flowset.
+ */
+ int drain_bucket;
+
+ /* Hash table of all instances (through sch.sched_mask)
+ * or single instance if no mask. Always valid.
+ */
+ struct dn_ht *siht;
+};
+
+
+/*
+ * Scheduler instance.
+ * Contains variables and all queues relative to a this instance.
+ * This struct is created a runtime.
+ */
+struct dn_sch_inst {
+ struct dn_flow ni; /* oid, flowid and stats */
+ SLIST_ENTRY(dn_sch_inst) si_next; /* hash chain for siht */
+ struct delay_line dline;
+ struct dn_schk *sched; /* the template */
+ int kflags; /* DN_ACTIVE */
+
+ int64_t credit; /* bits I can transmit (more or less). */
+ uint64_t sched_time; /* time link was scheduled in ready_heap */
+ uint64_t idle_time; /* start of scheduler instance idle time */
+
+ /* q_count is the number of queues that this instance is using.
+ * The counter is incremented or decremented when
+ * a reference from the queue is created or deleted.
+ * It is used to make sure that a scheduler instance can be safely
+ * deleted by the drain routine. See notes below.
+ */
+ int q_count;
+
+};
+
+/*
+ * NOTE about object drain.
+ * The system will automatically (XXX check when) drain queues and
+ * scheduler instances when they are idle.
+ * A queue is idle when it has no packets; an instance is idle when
+ * it is not in the evheap heap, and the corresponding delay line is empty.
+ * A queue can be safely deleted when it is idle because of the scheduler
+ * function xxx_free_queue() will remove any references to it.
+ * An instance can be only deleted when no queues reference it. To be sure
+ * of that, a counter (q_count) stores the number of queues that are pointing
+ * to the instance.
+ *
+ * XXX
+ * Order of scan:
+ * - take all flowset in a bucket for the flowset hash table
+ * - take all queues in a bucket for the flowset
+ * - increment the queue bucket
+ * - scan next flowset bucket
+ * Nothing is done if a bucket contains no entries.
+ *
+ * The same schema is used for sceduler instances
+ */
+
+
+/* kernel-side flags. Linux has DN_DELETE in fcntl.h
+ */
+enum {
+ /* 1 and 2 are reserved for the SCAN flags */
+ DN_DESTROY = 0x0004, /* destroy */
+ DN_DELETE_FS = 0x0008, /* destroy flowset */
+ DN_DETACH = 0x0010,
+ DN_ACTIVE = 0x0020, /* object is in evheap */
+ DN_F_DLINE = 0x0040, /* object is a delay line */
+ DN_F_SCHI = 0x00C0, /* object is a sched.instance */
+ DN_QHT_IS_Q = 0x0100, /* in flowset, qht is a single queue */
+};
+
+extern struct dn_parms dn_cfg;
+
+int dummynet_io(struct mbuf **, int , struct ip_fw_args *);
+void dummynet_task(void *context, int pending);
+void dn_reschedule(void);
+
+struct dn_queue *ipdn_q_find(struct dn_fsk *, struct dn_sch_inst *,
+ struct ipfw_flow_id *);
+struct dn_sch_inst *ipdn_si_find(struct dn_schk *, struct ipfw_flow_id *);
+
+/*
+ * copy_range is a template for requests for ranges of pipes/queues/scheds.
+ * The number of ranges is variable and can be derived by o.len.
+ * As a default, we use a small number of entries so that the struct
+ * fits easily on the stack and is sufficient for most common requests.
+ */
+#define DEFAULT_RANGES 5
+struct copy_range {
+ struct dn_id o;
+ uint32_t r[ 2 * DEFAULT_RANGES ];
+};
+
+struct copy_args {
+ char **start;
+ char *end;
+ int flags;
+ int type;
+ struct copy_range *extra; /* extra filtering */
+};
+
+struct sockopt;
+int ip_dummynet_compat(struct sockopt *sopt);
+int dummynet_get(struct sockopt *sopt, void **compat);
+int dn_c_copy_q (void *_ni, void *arg);
+int dn_c_copy_pipe(struct dn_schk *s, struct copy_args *a, int nq);
+int dn_c_copy_fs(struct dn_fsk *f, struct copy_args *a, int nq);
+int dn_compat_copy_queue(struct copy_args *a, void *_o);
+int dn_compat_copy_pipe(struct copy_args *a, void *_o);
+int copy_data_helper_compat(void *_o, void *_arg);
+int dn_compat_calc_size(struct dn_parms dn_cfg);
+int do_config(void *p, int l);
+
+/* function to drain idle object */
+void dn_drain_scheduler(void);
+void dn_drain_queue(void);
+
+#endif /* _IP_DN_PRIVATE_H */
diff --git a/rtems/freebsd/netinet/ipfw/ip_dummynet.c b/rtems/freebsd/netinet/ipfw/ip_dummynet.c
new file mode 100644
index 00000000..0849154c
--- /dev/null
+++ b/rtems/freebsd/netinet/ipfw/ip_dummynet.c
@@ -0,0 +1,2297 @@
+#include <rtems/freebsd/machine/rtems-bsd-config.h>
+
+/*-
+ * Copyright (c) 1998-2002,2010 Luigi Rizzo, Universita` di Pisa
+ * Portions Copyright (c) 2000 Akamba Corp.
+ * All rights reserved
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <rtems/freebsd/sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+/*
+ * Configuration and internal object management for dummynet.
+ */
+
+#include <rtems/freebsd/local/opt_inet6.h>
+
+#include <rtems/freebsd/sys/param.h>
+#include <rtems/freebsd/sys/systm.h>
+#include <rtems/freebsd/sys/malloc.h>
+#include <rtems/freebsd/sys/mbuf.h>
+#include <rtems/freebsd/sys/kernel.h>
+#include <rtems/freebsd/sys/lock.h>
+#include <rtems/freebsd/sys/module.h>
+#include <rtems/freebsd/sys/priv.h>
+#include <rtems/freebsd/sys/proc.h>
+#include <rtems/freebsd/sys/rwlock.h>
+#include <rtems/freebsd/sys/socket.h>
+#include <rtems/freebsd/sys/socketvar.h>
+#include <rtems/freebsd/sys/time.h>
+#include <rtems/freebsd/sys/taskqueue.h>
+#include <rtems/freebsd/net/if.h> /* IFNAMSIZ, struct ifaddr, ifq head, lock.h mutex.h */
+#include <rtems/freebsd/netinet/in.h>
+#include <rtems/freebsd/netinet/ip_var.h> /* ip_output(), IP_FORWARDING */
+#include <rtems/freebsd/netinet/ip_fw.h>
+#include <rtems/freebsd/netinet/ipfw/ip_fw_private.h>
+#include <rtems/freebsd/netinet/ipfw/dn_heap.h>
+#include <rtems/freebsd/netinet/ip_dummynet.h>
+#include <rtems/freebsd/netinet/ipfw/ip_dn_private.h>
+#include <rtems/freebsd/netinet/ipfw/dn_sched.h>
+
+/* which objects to copy */
+#define DN_C_LINK 0x01
+#define DN_C_SCH 0x02
+#define DN_C_FLOW 0x04
+#define DN_C_FS 0x08
+#define DN_C_QUEUE 0x10
+
+/* we use this argument in case of a schk_new */
+struct schk_new_arg {
+ struct dn_alg *fp;
+ struct dn_sch *sch;
+};
+
+/*---- callout hooks. ----*/
+static struct callout dn_timeout;
+static struct task dn_task;
+static struct taskqueue *dn_tq = NULL;
+
+static void
+dummynet(void * __unused unused)
+{
+
+ taskqueue_enqueue(dn_tq, &dn_task);
+}
+
+void
+dn_reschedule(void)
+{
+ callout_reset(&dn_timeout, 1, dummynet, NULL);
+}
+/*----- end of callout hooks -----*/
+
+/* Return a scheduler descriptor given the type or name. */
+static struct dn_alg *
+find_sched_type(int type, char *name)
+{
+ struct dn_alg *d;
+
+ SLIST_FOREACH(d, &dn_cfg.schedlist, next) {
+ if (d->type == type || (name && !strcmp(d->name, name)))
+ return d;
+ }
+ return NULL; /* not found */
+}
+
+int
+ipdn_bound_var(int *v, int dflt, int lo, int hi, const char *msg)
+{
+ int oldv = *v;
+ const char *op = NULL;
+ if (oldv < lo) {
+ *v = dflt;
+ op = "Bump";
+ } else if (oldv > hi) {
+ *v = hi;
+ op = "Clamp";
+ } else
+ return *v;
+ if (op && msg)
+ printf("%s %s to %d (was %d)\n", op, msg, *v, oldv);
+ return *v;
+}
+
+/*---- flow_id mask, hash and compare functions ---*/
+/*
+ * The flow_id includes the 5-tuple, the queue/pipe number
+ * which we store in the extra area in host order,
+ * and for ipv6 also the flow_id6.
+ * XXX see if we want the tos byte (can store in 'flags')
+ */
+static struct ipfw_flow_id *
+flow_id_mask(struct ipfw_flow_id *mask, struct ipfw_flow_id *id)
+{
+ int is_v6 = IS_IP6_FLOW_ID(id);
+
+ id->dst_port &= mask->dst_port;
+ id->src_port &= mask->src_port;
+ id->proto &= mask->proto;
+ id->extra &= mask->extra;
+ if (is_v6) {
+ APPLY_MASK(&id->dst_ip6, &mask->dst_ip6);
+ APPLY_MASK(&id->src_ip6, &mask->src_ip6);
+ id->flow_id6 &= mask->flow_id6;
+ } else {
+ id->dst_ip &= mask->dst_ip;
+ id->src_ip &= mask->src_ip;
+ }
+ return id;
+}
+
+/* computes an OR of two masks, result in dst and also returned */
+static struct ipfw_flow_id *
+flow_id_or(struct ipfw_flow_id *src, struct ipfw_flow_id *dst)
+{
+ int is_v6 = IS_IP6_FLOW_ID(dst);
+
+ dst->dst_port |= src->dst_port;
+ dst->src_port |= src->src_port;
+ dst->proto |= src->proto;
+ dst->extra |= src->extra;
+ if (is_v6) {
+#define OR_MASK(_d, _s) \
+ (_d)->__u6_addr.__u6_addr32[0] |= (_s)->__u6_addr.__u6_addr32[0]; \
+ (_d)->__u6_addr.__u6_addr32[1] |= (_s)->__u6_addr.__u6_addr32[1]; \
+ (_d)->__u6_addr.__u6_addr32[2] |= (_s)->__u6_addr.__u6_addr32[2]; \
+ (_d)->__u6_addr.__u6_addr32[3] |= (_s)->__u6_addr.__u6_addr32[3];
+ OR_MASK(&dst->dst_ip6, &src->dst_ip6);
+ OR_MASK(&dst->src_ip6, &src->src_ip6);
+#undef OR_MASK
+ dst->flow_id6 |= src->flow_id6;
+ } else {
+ dst->dst_ip |= src->dst_ip;
+ dst->src_ip |= src->src_ip;
+ }
+ return dst;
+}
+
+static int
+nonzero_mask(struct ipfw_flow_id *m)
+{
+ if (m->dst_port || m->src_port || m->proto || m->extra)
+ return 1;
+ if (IS_IP6_FLOW_ID(m)) {
+ return
+ m->dst_ip6.__u6_addr.__u6_addr32[0] ||
+ m->dst_ip6.__u6_addr.__u6_addr32[1] ||
+ m->dst_ip6.__u6_addr.__u6_addr32[2] ||
+ m->dst_ip6.__u6_addr.__u6_addr32[3] ||
+ m->src_ip6.__u6_addr.__u6_addr32[0] ||
+ m->src_ip6.__u6_addr.__u6_addr32[1] ||
+ m->src_ip6.__u6_addr.__u6_addr32[2] ||
+ m->src_ip6.__u6_addr.__u6_addr32[3] ||
+ m->flow_id6;
+ } else {
+ return m->dst_ip || m->src_ip;
+ }
+}
+
+/* XXX we may want a better hash function */
+static uint32_t
+flow_id_hash(struct ipfw_flow_id *id)
+{
+ uint32_t i;
+
+ if (IS_IP6_FLOW_ID(id)) {
+ uint32_t *d = (uint32_t *)&id->dst_ip6;
+ uint32_t *s = (uint32_t *)&id->src_ip6;
+ i = (d[0] ) ^ (d[1]) ^
+ (d[2] ) ^ (d[3]) ^
+ (d[0] >> 15) ^ (d[1] >> 15) ^
+ (d[2] >> 15) ^ (d[3] >> 15) ^
+ (s[0] << 1) ^ (s[1] << 1) ^
+ (s[2] << 1) ^ (s[3] << 1) ^
+ (s[0] << 16) ^ (s[1] << 16) ^
+ (s[2] << 16) ^ (s[3] << 16) ^
+ (id->dst_port << 1) ^ (id->src_port) ^
+ (id->extra) ^
+ (id->proto ) ^ (id->flow_id6);
+ } else {
+ i = (id->dst_ip) ^ (id->dst_ip >> 15) ^
+ (id->src_ip << 1) ^ (id->src_ip >> 16) ^
+ (id->extra) ^
+ (id->dst_port << 1) ^ (id->src_port) ^ (id->proto);
+ }
+ return i;
+}
+
+/* Like bcmp, returns 0 if ids match, 1 otherwise. */
+static int
+flow_id_cmp(struct ipfw_flow_id *id1, struct ipfw_flow_id *id2)
+{
+ int is_v6 = IS_IP6_FLOW_ID(id1);
+
+ if (!is_v6) {
+ if (IS_IP6_FLOW_ID(id2))
+ return 1; /* different address families */
+
+ return (id1->dst_ip == id2->dst_ip &&
+ id1->src_ip == id2->src_ip &&
+ id1->dst_port == id2->dst_port &&
+ id1->src_port == id2->src_port &&
+ id1->proto == id2->proto &&
+ id1->extra == id2->extra) ? 0 : 1;
+ }
+ /* the ipv6 case */
+ return (
+ !bcmp(&id1->dst_ip6,&id2->dst_ip6, sizeof(id1->dst_ip6)) &&
+ !bcmp(&id1->src_ip6,&id2->src_ip6, sizeof(id1->src_ip6)) &&
+ id1->dst_port == id2->dst_port &&
+ id1->src_port == id2->src_port &&
+ id1->proto == id2->proto &&
+ id1->extra == id2->extra &&
+ id1->flow_id6 == id2->flow_id6) ? 0 : 1;
+}
+/*--------- end of flow-id mask, hash and compare ---------*/
+
+/*--- support functions for the qht hashtable ----
+ * Entries are hashed by flow-id
+ */
+static uint32_t
+q_hash(uintptr_t key, int flags, void *arg)
+{
+ /* compute the hash slot from the flow id */
+ struct ipfw_flow_id *id = (flags & DNHT_KEY_IS_OBJ) ?
+ &((struct dn_queue *)key)->ni.fid :
+ (struct ipfw_flow_id *)key;
+
+ return flow_id_hash(id);
+}
+
+static int
+q_match(void *obj, uintptr_t key, int flags, void *arg)
+{
+ struct dn_queue *o = (struct dn_queue *)obj;
+ struct ipfw_flow_id *id2;
+
+ if (flags & DNHT_KEY_IS_OBJ) {
+ /* compare pointers */
+ id2 = &((struct dn_queue *)key)->ni.fid;
+ } else {
+ id2 = (struct ipfw_flow_id *)key;
+ }
+ return (0 == flow_id_cmp(&o->ni.fid, id2));
+}
+
+/*
+ * create a new queue instance for the given 'key'.
+ */
+static void *
+q_new(uintptr_t key, int flags, void *arg)
+{
+ struct dn_queue *q, *template = arg;
+ struct dn_fsk *fs = template->fs;
+ int size = sizeof(*q) + fs->sched->fp->q_datalen;
+
+ q = malloc(size, M_DUMMYNET, M_NOWAIT | M_ZERO);
+ if (q == NULL) {
+ D("no memory for new queue");
+ return NULL;
+ }
+
+ set_oid(&q->ni.oid, DN_QUEUE, size);
+ if (fs->fs.flags & DN_QHT_HASH)
+ q->ni.fid = *(struct ipfw_flow_id *)key;
+ q->fs = fs;
+ q->_si = template->_si;
+ q->_si->q_count++;
+
+ if (fs->sched->fp->new_queue)
+ fs->sched->fp->new_queue(q);
+ dn_cfg.queue_count++;
+ return q;
+}
+
+/*
+ * Notify schedulers that a queue is going away.
+ * If (flags & DN_DESTROY), also free the packets.
+ * The version for callbacks is called q_delete_cb().
+ */
+static void
+dn_delete_queue(struct dn_queue *q, int flags)
+{
+ struct dn_fsk *fs = q->fs;
+
+ // D("fs %p si %p\n", fs, q->_si);
+ /* notify the parent scheduler that the queue is going away */
+ if (fs && fs->sched->fp->free_queue)
+ fs->sched->fp->free_queue(q);
+ q->_si->q_count--;
+ q->_si = NULL;
+ if (flags & DN_DESTROY) {
+ if (q->mq.head)
+ dn_free_pkts(q->mq.head);
+ bzero(q, sizeof(*q)); // safety
+ free(q, M_DUMMYNET);
+ dn_cfg.queue_count--;
+ }
+}
+
+static int
+q_delete_cb(void *q, void *arg)
+{
+ int flags = (int)(uintptr_t)arg;
+ dn_delete_queue(q, flags);
+ return (flags & DN_DESTROY) ? DNHT_SCAN_DEL : 0;
+}
+
+/*
+ * calls dn_delete_queue/q_delete_cb on all queues,
+ * which notifies the parent scheduler and possibly drains packets.
+ * flags & DN_DESTROY: drains queues and destroy qht;
+ */
+static void
+qht_delete(struct dn_fsk *fs, int flags)
+{
+ ND("fs %d start flags %d qht %p",
+ fs->fs.fs_nr, flags, fs->qht);
+ if (!fs->qht)
+ return;
+ if (fs->fs.flags & DN_QHT_HASH) {
+ dn_ht_scan(fs->qht, q_delete_cb, (void *)(uintptr_t)flags);
+ if (flags & DN_DESTROY) {
+ dn_ht_free(fs->qht, 0);
+ fs->qht = NULL;
+ }
+ } else {
+ dn_delete_queue((struct dn_queue *)(fs->qht), flags);
+ if (flags & DN_DESTROY)
+ fs->qht = NULL;
+ }
+}
+
+/*
+ * Find and possibly create the queue for a MULTIQUEUE scheduler.
+ * We never call it for !MULTIQUEUE (the queue is in the sch_inst).
+ */
+struct dn_queue *
+ipdn_q_find(struct dn_fsk *fs, struct dn_sch_inst *si,
+ struct ipfw_flow_id *id)
+{
+ struct dn_queue template;
+
+ template._si = si;
+ template.fs = fs;
+
+ if (fs->fs.flags & DN_QHT_HASH) {
+ struct ipfw_flow_id masked_id;
+ if (fs->qht == NULL) {
+ fs->qht = dn_ht_init(NULL, fs->fs.buckets,
+ offsetof(struct dn_queue, q_next),
+ q_hash, q_match, q_new);
+ if (fs->qht == NULL)
+ return NULL;
+ }
+ masked_id = *id;
+ flow_id_mask(&fs->fsk_mask, &masked_id);
+ return dn_ht_find(fs->qht, (uintptr_t)&masked_id,
+ DNHT_INSERT, &template);
+ } else {
+ if (fs->qht == NULL)
+ fs->qht = q_new(0, 0, &template);
+ return (struct dn_queue *)fs->qht;
+ }
+}
+/*--- end of queue hash table ---*/
+
+/*--- support functions for the sch_inst hashtable ----
+ *
+ * These are hashed by flow-id
+ */
+static uint32_t
+si_hash(uintptr_t key, int flags, void *arg)
+{
+ /* compute the hash slot from the flow id */
+ struct ipfw_flow_id *id = (flags & DNHT_KEY_IS_OBJ) ?
+ &((struct dn_sch_inst *)key)->ni.fid :
+ (struct ipfw_flow_id *)key;
+
+ return flow_id_hash(id);
+}
+
+static int
+si_match(void *obj, uintptr_t key, int flags, void *arg)
+{
+ struct dn_sch_inst *o = obj;
+ struct ipfw_flow_id *id2;
+
+ id2 = (flags & DNHT_KEY_IS_OBJ) ?
+ &((struct dn_sch_inst *)key)->ni.fid :
+ (struct ipfw_flow_id *)key;
+ return flow_id_cmp(&o->ni.fid, id2) == 0;
+}
+
+/*
+ * create a new instance for the given 'key'
+ * Allocate memory for instance, delay line and scheduler private data.
+ */
+static void *
+si_new(uintptr_t key, int flags, void *arg)
+{
+ struct dn_schk *s = arg;
+ struct dn_sch_inst *si;
+ int l = sizeof(*si) + s->fp->si_datalen;
+
+ si = malloc(l, M_DUMMYNET, M_NOWAIT | M_ZERO);
+ if (si == NULL)
+ goto error;
+ /* Set length only for the part passed up to userland. */
+ set_oid(&si->ni.oid, DN_SCH_I, sizeof(struct dn_flow));
+ set_oid(&(si->dline.oid), DN_DELAY_LINE,
+ sizeof(struct delay_line));
+ /* mark si and dline as outside the event queue */
+ si->ni.oid.id = si->dline.oid.id = -1;
+
+ si->sched = s;
+ si->dline.si = si;
+
+ if (s->fp->new_sched && s->fp->new_sched(si)) {
+ D("new_sched error");
+ goto error;
+ }
+ if (s->sch.flags & DN_HAVE_MASK)
+ si->ni.fid = *(struct ipfw_flow_id *)key;
+
+ dn_cfg.si_count++;
+ return si;
+
+error:
+ if (si) {
+ bzero(si, sizeof(*si)); // safety
+ free(si, M_DUMMYNET);
+ }
+ return NULL;
+}
+
+/*
+ * Callback from siht to delete all scheduler instances. Remove
+ * si and delay line from the system heap, destroy all queues.
+ * We assume that all flowset have been notified and do not
+ * point to us anymore.
+ */
+static int
+si_destroy(void *_si, void *arg)
+{
+ struct dn_sch_inst *si = _si;
+ struct dn_schk *s = si->sched;
+ struct delay_line *dl = &si->dline;
+
+ if (dl->oid.subtype) /* remove delay line from event heap */
+ heap_extract(&dn_cfg.evheap, dl);
+ dn_free_pkts(dl->mq.head); /* drain delay line */
+ if (si->kflags & DN_ACTIVE) /* remove si from event heap */
+ heap_extract(&dn_cfg.evheap, si);
+ if (s->fp->free_sched)
+ s->fp->free_sched(si);
+ bzero(si, sizeof(*si)); /* safety */
+ free(si, M_DUMMYNET);
+ dn_cfg.si_count--;
+ return DNHT_SCAN_DEL;
+}
+
+/*
+ * Find the scheduler instance for this packet. If we need to apply
+ * a mask, do on a local copy of the flow_id to preserve the original.
+ * Assume siht is always initialized if we have a mask.
+ */
+struct dn_sch_inst *
+ipdn_si_find(struct dn_schk *s, struct ipfw_flow_id *id)
+{
+
+ if (s->sch.flags & DN_HAVE_MASK) {
+ struct ipfw_flow_id id_t = *id;
+ flow_id_mask(&s->sch.sched_mask, &id_t);
+ return dn_ht_find(s->siht, (uintptr_t)&id_t,
+ DNHT_INSERT, s);
+ }
+ if (!s->siht)
+ s->siht = si_new(0, 0, s);
+ return (struct dn_sch_inst *)s->siht;
+}
+
+/* callback to flush credit for the scheduler instance */
+static int
+si_reset_credit(void *_si, void *arg)
+{
+ struct dn_sch_inst *si = _si;
+ struct dn_link *p = &si->sched->link;
+
+ si->credit = p->burst + (dn_cfg.io_fast ? p->bandwidth : 0);
+ return 0;
+}
+
+static void
+schk_reset_credit(struct dn_schk *s)
+{
+ if (s->sch.flags & DN_HAVE_MASK)
+ dn_ht_scan(s->siht, si_reset_credit, NULL);
+ else if (s->siht)
+ si_reset_credit(s->siht, NULL);
+}
+/*---- end of sch_inst hashtable ---------------------*/
+
+/*-------------------------------------------------------
+ * flowset hash (fshash) support. Entries are hashed by fs_nr.
+ * New allocations are put in the fsunlinked list, from which
+ * they are removed when they point to a specific scheduler.
+ */
+static uint32_t
+fsk_hash(uintptr_t key, int flags, void *arg)
+{
+ uint32_t i = !(flags & DNHT_KEY_IS_OBJ) ? key :
+ ((struct dn_fsk *)key)->fs.fs_nr;
+
+ return ( (i>>8)^(i>>4)^i );
+}
+
+static int
+fsk_match(void *obj, uintptr_t key, int flags, void *arg)
+{
+ struct dn_fsk *fs = obj;
+ int i = !(flags & DNHT_KEY_IS_OBJ) ? key :
+ ((struct dn_fsk *)key)->fs.fs_nr;
+
+ return (fs->fs.fs_nr == i);
+}
+
+static void *
+fsk_new(uintptr_t key, int flags, void *arg)
+{
+ struct dn_fsk *fs;
+
+ fs = malloc(sizeof(*fs), M_DUMMYNET, M_NOWAIT | M_ZERO);
+ if (fs) {
+ set_oid(&fs->fs.oid, DN_FS, sizeof(fs->fs));
+ dn_cfg.fsk_count++;
+ fs->drain_bucket = 0;
+ SLIST_INSERT_HEAD(&dn_cfg.fsu, fs, sch_chain);
+ }
+ return fs;
+}
+
+/*
+ * detach flowset from its current scheduler. Flags as follows:
+ * DN_DETACH removes from the fsk_list
+ * DN_DESTROY deletes individual queues
+ * DN_DELETE_FS destroys the flowset (otherwise goes in unlinked).
+ */
+static void
+fsk_detach(struct dn_fsk *fs, int flags)
+{
+ if (flags & DN_DELETE_FS)
+ flags |= DN_DESTROY;
+ ND("fs %d from sched %d flags %s %s %s",
+ fs->fs.fs_nr, fs->fs.sched_nr,
+ (flags & DN_DELETE_FS) ? "DEL_FS":"",
+ (flags & DN_DESTROY) ? "DEL":"",
+ (flags & DN_DETACH) ? "DET":"");
+ if (flags & DN_DETACH) { /* detach from the list */
+ struct dn_fsk_head *h;
+ h = fs->sched ? &fs->sched->fsk_list : &dn_cfg.fsu;
+ SLIST_REMOVE(h, fs, dn_fsk, sch_chain);
+ }
+ /* Free the RED parameters, they will be recomputed on
+ * subsequent attach if needed.
+ */
+ if (fs->w_q_lookup)
+ free(fs->w_q_lookup, M_DUMMYNET);
+ fs->w_q_lookup = NULL;
+ qht_delete(fs, flags);
+ if (fs->sched && fs->sched->fp->free_fsk)
+ fs->sched->fp->free_fsk(fs);
+ fs->sched = NULL;
+ if (flags & DN_DELETE_FS) {
+ bzero(fs, sizeof(fs)); /* safety */
+ free(fs, M_DUMMYNET);
+ dn_cfg.fsk_count--;
+ } else {
+ SLIST_INSERT_HEAD(&dn_cfg.fsu, fs, sch_chain);
+ }
+}
+
+/*
+ * Detach or destroy all flowsets in a list.
+ * flags specifies what to do:
+ * DN_DESTROY: flush all queues
+ * DN_DELETE_FS: DN_DESTROY + destroy flowset
+ * DN_DELETE_FS implies DN_DESTROY
+ */
+static void
+fsk_detach_list(struct dn_fsk_head *h, int flags)
+{
+ struct dn_fsk *fs;
+ int n = 0; /* only for stats */
+
+ ND("head %p flags %x", h, flags);
+ while ((fs = SLIST_FIRST(h))) {
+ SLIST_REMOVE_HEAD(h, sch_chain);
+ n++;
+ fsk_detach(fs, flags);
+ }
+ ND("done %d flowsets", n);
+}
+
+/*
+ * called on 'queue X delete' -- removes the flowset from fshash,
+ * deletes all queues for the flowset, and removes the flowset.
+ */
+static int
+delete_fs(int i, int locked)
+{
+ struct dn_fsk *fs;
+ int err = 0;
+
+ if (!locked)
+ DN_BH_WLOCK();
+ fs = dn_ht_find(dn_cfg.fshash, i, DNHT_REMOVE, NULL);
+ ND("fs %d found %p", i, fs);
+ if (fs) {
+ fsk_detach(fs, DN_DETACH | DN_DELETE_FS);
+ err = 0;
+ } else
+ err = EINVAL;
+ if (!locked)
+ DN_BH_WUNLOCK();
+ return err;
+}
+
+/*----- end of flowset hashtable support -------------*/
+
+/*------------------------------------------------------------
+ * Scheduler hash. When searching by index we pass sched_nr,
+ * otherwise we pass struct dn_sch * which is the first field in
+ * struct dn_schk so we can cast between the two. We use this trick
+ * because in the create phase (but it should be fixed).
+ */
+static uint32_t
+schk_hash(uintptr_t key, int flags, void *_arg)
+{
+ uint32_t i = !(flags & DNHT_KEY_IS_OBJ) ? key :
+ ((struct dn_schk *)key)->sch.sched_nr;
+ return ( (i>>8)^(i>>4)^i );
+}
+
+static int
+schk_match(void *obj, uintptr_t key, int flags, void *_arg)
+{
+ struct dn_schk *s = (struct dn_schk *)obj;
+ int i = !(flags & DNHT_KEY_IS_OBJ) ? key :
+ ((struct dn_schk *)key)->sch.sched_nr;
+ return (s->sch.sched_nr == i);
+}
+
+/*
+ * Create the entry and intialize with the sched hash if needed.
+ * Leave s->fp unset so we can tell whether a dn_ht_find() returns
+ * a new object or a previously existing one.
+ */
+static void *
+schk_new(uintptr_t key, int flags, void *arg)
+{
+ struct schk_new_arg *a = arg;
+ struct dn_schk *s;
+ int l = sizeof(*s) +a->fp->schk_datalen;
+
+ s = malloc(l, M_DUMMYNET, M_NOWAIT | M_ZERO);
+ if (s == NULL)
+ return NULL;
+ set_oid(&s->link.oid, DN_LINK, sizeof(s->link));
+ s->sch = *a->sch; // copy initial values
+ s->link.link_nr = s->sch.sched_nr;
+ SLIST_INIT(&s->fsk_list);
+ /* initialize the hash table or create the single instance */
+ s->fp = a->fp; /* si_new needs this */
+ s->drain_bucket = 0;
+ if (s->sch.flags & DN_HAVE_MASK) {
+ s->siht = dn_ht_init(NULL, s->sch.buckets,
+ offsetof(struct dn_sch_inst, si_next),
+ si_hash, si_match, si_new);
+ if (s->siht == NULL) {
+ free(s, M_DUMMYNET);
+ return NULL;
+ }
+ }
+ s->fp = NULL; /* mark as a new scheduler */
+ dn_cfg.schk_count++;
+ return s;
+}
+
+/*
+ * Callback for sched delete. Notify all attached flowsets to
+ * detach from the scheduler, destroy the internal flowset, and
+ * all instances. The scheduler goes away too.
+ * arg is 0 (only detach flowsets and destroy instances)
+ * DN_DESTROY (detach & delete queues, delete schk)
+ * or DN_DELETE_FS (delete queues and flowsets, delete schk)
+ */
+static int
+schk_delete_cb(void *obj, void *arg)
+{
+ struct dn_schk *s = obj;
+#if 0
+ int a = (int)arg;
+ ND("sched %d arg %s%s",
+ s->sch.sched_nr,
+ a&DN_DESTROY ? "DEL ":"",
+ a&DN_DELETE_FS ? "DEL_FS":"");
+#endif
+ fsk_detach_list(&s->fsk_list, arg ? DN_DESTROY : 0);
+ /* no more flowset pointing to us now */
+ if (s->sch.flags & DN_HAVE_MASK)
+ dn_ht_scan(s->siht, si_destroy, NULL);
+ else if (s->siht)
+ si_destroy(s->siht, NULL);
+ if (s->profile) {
+ free(s->profile, M_DUMMYNET);
+ s->profile = NULL;
+ }
+ s->siht = NULL;
+ if (s->fp->destroy)
+ s->fp->destroy(s);
+ bzero(s, sizeof(*s)); // safety
+ free(obj, M_DUMMYNET);
+ dn_cfg.schk_count--;
+ return DNHT_SCAN_DEL;
+}
+
+/*
+ * called on a 'sched X delete' command. Deletes a single scheduler.
+ * This is done by removing from the schedhash, unlinking all
+ * flowsets and deleting their traffic.
+ */
+static int
+delete_schk(int i)
+{
+ struct dn_schk *s;
+
+ s = dn_ht_find(dn_cfg.schedhash, i, DNHT_REMOVE, NULL);
+ ND("%d %p", i, s);
+ if (!s)
+ return EINVAL;
+ delete_fs(i + DN_MAX_ID, 1); /* first delete internal fs */
+ /* then detach flowsets, delete traffic */
+ schk_delete_cb(s, (void*)(uintptr_t)DN_DESTROY);
+ return 0;
+}
+/*--- end of schk hashtable support ---*/
+
+static int
+copy_obj(char **start, char *end, void *_o, const char *msg, int i)
+{
+ struct dn_id *o = _o;
+ int have = end - *start;
+
+ if (have < o->len || o->len == 0 || o->type == 0) {
+ D("(WARN) type %d %s %d have %d need %d",
+ o->type, msg, i, have, o->len);
+ return 1;
+ }
+ ND("type %d %s %d len %d", o->type, msg, i, o->len);
+ bcopy(_o, *start, o->len);
+ if (o->type == DN_LINK) {
+ /* Adjust burst parameter for link */
+ struct dn_link *l = (struct dn_link *)*start;
+ l->burst = div64(l->burst, 8 * hz);
+ } else if (o->type == DN_SCH) {
+ /* Set id->id to the number of instances */
+ struct dn_schk *s = _o;
+ struct dn_id *id = (struct dn_id *)(*start);
+ id->id = (s->sch.flags & DN_HAVE_MASK) ?
+ dn_ht_entries(s->siht) : (s->siht ? 1 : 0);
+ }
+ *start += o->len;
+ return 0;
+}
+
+/* Specific function to copy a queue.
+ * Copies only the user-visible part of a queue (which is in
+ * a struct dn_flow), and sets len accordingly.
+ */
+static int
+copy_obj_q(char **start, char *end, void *_o, const char *msg, int i)
+{
+ struct dn_id *o = _o;
+ int have = end - *start;
+ int len = sizeof(struct dn_flow); /* see above comment */
+
+ if (have < len || o->len == 0 || o->type != DN_QUEUE) {
+ D("ERROR type %d %s %d have %d need %d",
+ o->type, msg, i, have, len);
+ return 1;
+ }
+ ND("type %d %s %d len %d", o->type, msg, i, len);
+ bcopy(_o, *start, len);
+ ((struct dn_id*)(*start))->len = len;
+ *start += len;
+ return 0;
+}
+
+static int
+copy_q_cb(void *obj, void *arg)
+{
+ struct dn_queue *q = obj;
+ struct copy_args *a = arg;
+ struct dn_flow *ni = (struct dn_flow *)(*a->start);
+ if (copy_obj_q(a->start, a->end, &q->ni, "queue", -1))
+ return DNHT_SCAN_END;
+ ni->oid.type = DN_FLOW; /* override the DN_QUEUE */
+ ni->oid.id = si_hash((uintptr_t)&ni->fid, 0, NULL);
+ return 0;
+}
+
+static int
+copy_q(struct copy_args *a, struct dn_fsk *fs, int flags)
+{
+ if (!fs->qht)
+ return 0;
+ if (fs->fs.flags & DN_QHT_HASH)
+ dn_ht_scan(fs->qht, copy_q_cb, a);
+ else
+ copy_q_cb(fs->qht, a);
+ return 0;
+}
+
+/*
+ * This routine only copies the initial part of a profile ? XXX
+ */
+static int
+copy_profile(struct copy_args *a, struct dn_profile *p)
+{
+ int have = a->end - *a->start;
+ /* XXX here we check for max length */
+ int profile_len = sizeof(struct dn_profile) -
+ ED_MAX_SAMPLES_NO*sizeof(int);
+
+ if (p == NULL)
+ return 0;
+ if (have < profile_len) {
+ D("error have %d need %d", have, profile_len);
+ return 1;
+ }
+ bcopy(p, *a->start, profile_len);
+ ((struct dn_id *)(*a->start))->len = profile_len;
+ *a->start += profile_len;
+ return 0;
+}
+
+static int
+copy_flowset(struct copy_args *a, struct dn_fsk *fs, int flags)
+{
+ struct dn_fs *ufs = (struct dn_fs *)(*a->start);
+ if (!fs)
+ return 0;
+ ND("flowset %d", fs->fs.fs_nr);
+ if (copy_obj(a->start, a->end, &fs->fs, "flowset", fs->fs.fs_nr))
+ return DNHT_SCAN_END;
+ ufs->oid.id = (fs->fs.flags & DN_QHT_HASH) ?
+ dn_ht_entries(fs->qht) : (fs->qht ? 1 : 0);
+ if (flags) { /* copy queues */
+ copy_q(a, fs, 0);
+ }
+ return 0;
+}
+
+static int
+copy_si_cb(void *obj, void *arg)
+{
+ struct dn_sch_inst *si = obj;
+ struct copy_args *a = arg;
+ struct dn_flow *ni = (struct dn_flow *)(*a->start);
+ if (copy_obj(a->start, a->end, &si->ni, "inst",
+ si->sched->sch.sched_nr))
+ return DNHT_SCAN_END;
+ ni->oid.type = DN_FLOW; /* override the DN_SCH_I */
+ ni->oid.id = si_hash((uintptr_t)si, DNHT_KEY_IS_OBJ, NULL);
+ return 0;
+}
+
+static int
+copy_si(struct copy_args *a, struct dn_schk *s, int flags)
+{
+ if (s->sch.flags & DN_HAVE_MASK)
+ dn_ht_scan(s->siht, copy_si_cb, a);
+ else if (s->siht)
+ copy_si_cb(s->siht, a);
+ return 0;
+}
+
+/*
+ * compute a list of children of a scheduler and copy up
+ */
+static int
+copy_fsk_list(struct copy_args *a, struct dn_schk *s, int flags)
+{
+ struct dn_fsk *fs;
+ struct dn_id *o;
+ uint32_t *p;
+
+ int n = 0, space = sizeof(*o);
+ SLIST_FOREACH(fs, &s->fsk_list, sch_chain) {
+ if (fs->fs.fs_nr < DN_MAX_ID)
+ n++;
+ }
+ space += n * sizeof(uint32_t);
+ DX(3, "sched %d has %d flowsets", s->sch.sched_nr, n);
+ if (a->end - *(a->start) < space)
+ return DNHT_SCAN_END;
+ o = (struct dn_id *)(*(a->start));
+ o->len = space;
+ *a->start += o->len;
+ o->type = DN_TEXT;
+ p = (uint32_t *)(o+1);
+ SLIST_FOREACH(fs, &s->fsk_list, sch_chain)
+ if (fs->fs.fs_nr < DN_MAX_ID)
+ *p++ = fs->fs.fs_nr;
+ return 0;
+}
+
+static int
+copy_data_helper(void *_o, void *_arg)
+{
+ struct copy_args *a = _arg;
+ uint32_t *r = a->extra->r; /* start of first range */
+ uint32_t *lim; /* first invalid pointer */
+ int n;
+
+ lim = (uint32_t *)((char *)(a->extra) + a->extra->o.len);
+
+ if (a->type == DN_LINK || a->type == DN_SCH) {
+ /* pipe|sched show, we receive a dn_schk */
+ struct dn_schk *s = _o;
+
+ n = s->sch.sched_nr;
+ if (a->type == DN_SCH && n >= DN_MAX_ID)
+ return 0; /* not a scheduler */
+ if (a->type == DN_LINK && n <= DN_MAX_ID)
+ return 0; /* not a pipe */
+
+ /* see if the object is within one of our ranges */
+ for (;r < lim; r += 2) {
+ if (n < r[0] || n > r[1])
+ continue;
+ /* Found a valid entry, copy and we are done */
+ if (a->flags & DN_C_LINK) {
+ if (copy_obj(a->start, a->end,
+ &s->link, "link", n))
+ return DNHT_SCAN_END;
+ if (copy_profile(a, s->profile))
+ return DNHT_SCAN_END;
+ if (copy_flowset(a, s->fs, 0))
+ return DNHT_SCAN_END;
+ }
+ if (a->flags & DN_C_SCH) {
+ if (copy_obj(a->start, a->end,
+ &s->sch, "sched", n))
+ return DNHT_SCAN_END;
+ /* list all attached flowsets */
+ if (copy_fsk_list(a, s, 0))
+ return DNHT_SCAN_END;
+ }
+ if (a->flags & DN_C_FLOW)
+ copy_si(a, s, 0);
+ break;
+ }
+ } else if (a->type == DN_FS) {
+ /* queue show, skip internal flowsets */
+ struct dn_fsk *fs = _o;
+
+ n = fs->fs.fs_nr;
+ if (n >= DN_MAX_ID)
+ return 0;
+ /* see if the object is within one of our ranges */
+ for (;r < lim; r += 2) {
+ if (n < r[0] || n > r[1])
+ continue;
+ if (copy_flowset(a, fs, 0))
+ return DNHT_SCAN_END;
+ copy_q(a, fs, 0);
+ break; /* we are done */
+ }
+ }
+ return 0;
+}
+
+static inline struct dn_schk *
+locate_scheduler(int i)
+{
+ return dn_ht_find(dn_cfg.schedhash, i, 0, NULL);
+}
+
+/*
+ * red parameters are in fixed point arithmetic.
+ */
+static int
+config_red(struct dn_fsk *fs)
+{
+ int64_t s, idle, weight, w0;
+ int t, i;
+
+ fs->w_q = fs->fs.w_q;
+ fs->max_p = fs->fs.max_p;
+ D("called");
+ /* Doing stuff that was in userland */
+ i = fs->sched->link.bandwidth;
+ s = (i <= 0) ? 0 :
+ hz * dn_cfg.red_avg_pkt_size * 8 * SCALE(1) / i;
+
+ idle = div64((s * 3) , fs->w_q); /* s, fs->w_q scaled; idle not scaled */
+ fs->lookup_step = div64(idle , dn_cfg.red_lookup_depth);
+ /* fs->lookup_step not scaled, */
+ if (!fs->lookup_step)
+ fs->lookup_step = 1;
+ w0 = weight = SCALE(1) - fs->w_q; //fs->w_q scaled
+
+ for (t = fs->lookup_step; t > 1; --t)
+ weight = SCALE_MUL(weight, w0);
+ fs->lookup_weight = (int)(weight); // scaled
+
+ /* Now doing stuff that was in kerneland */
+ fs->min_th = SCALE(fs->fs.min_th);
+ fs->max_th = SCALE(fs->fs.max_th);
+
+ fs->c_1 = fs->max_p / (fs->fs.max_th - fs->fs.min_th);
+ fs->c_2 = SCALE_MUL(fs->c_1, SCALE(fs->fs.min_th));
+
+ if (fs->fs.flags & DN_IS_GENTLE_RED) {
+ fs->c_3 = (SCALE(1) - fs->max_p) / fs->fs.max_th;
+ fs->c_4 = SCALE(1) - 2 * fs->max_p;
+ }
+
+ /* If the lookup table already exist, free and create it again. */
+ if (fs->w_q_lookup) {
+ free(fs->w_q_lookup, M_DUMMYNET);
+ fs->w_q_lookup = NULL;
+ }
+ if (dn_cfg.red_lookup_depth == 0) {
+ printf("\ndummynet: net.inet.ip.dummynet.red_lookup_depth"
+ "must be > 0\n");
+ fs->fs.flags &= ~DN_IS_RED;
+ fs->fs.flags &= ~DN_IS_GENTLE_RED;
+ return (EINVAL);
+ }
+ fs->lookup_depth = dn_cfg.red_lookup_depth;
+ fs->w_q_lookup = (u_int *)malloc(fs->lookup_depth * sizeof(int),
+ M_DUMMYNET, M_NOWAIT);
+ if (fs->w_q_lookup == NULL) {
+ printf("dummynet: sorry, cannot allocate red lookup table\n");
+ fs->fs.flags &= ~DN_IS_RED;
+ fs->fs.flags &= ~DN_IS_GENTLE_RED;
+ return(ENOSPC);
+ }
+
+ /* Fill the lookup table with (1 - w_q)^x */
+ fs->w_q_lookup[0] = SCALE(1) - fs->w_q;
+
+ for (i = 1; i < fs->lookup_depth; i++)
+ fs->w_q_lookup[i] =
+ SCALE_MUL(fs->w_q_lookup[i - 1], fs->lookup_weight);
+
+ if (dn_cfg.red_avg_pkt_size < 1)
+ dn_cfg.red_avg_pkt_size = 512;
+ fs->avg_pkt_size = dn_cfg.red_avg_pkt_size;
+ if (dn_cfg.red_max_pkt_size < 1)
+ dn_cfg.red_max_pkt_size = 1500;
+ fs->max_pkt_size = dn_cfg.red_max_pkt_size;
+ D("exit");
+ return 0;
+}
+
+/* Scan all flowset attached to this scheduler and update red */
+static void
+update_red(struct dn_schk *s)
+{
+ struct dn_fsk *fs;
+ SLIST_FOREACH(fs, &s->fsk_list, sch_chain) {
+ if (fs && (fs->fs.flags & DN_IS_RED))
+ config_red(fs);
+ }
+}
+
+/* attach flowset to scheduler s, possibly requeue */
+static void
+fsk_attach(struct dn_fsk *fs, struct dn_schk *s)
+{
+ ND("remove fs %d from fsunlinked, link to sched %d",
+ fs->fs.fs_nr, s->sch.sched_nr);
+ SLIST_REMOVE(&dn_cfg.fsu, fs, dn_fsk, sch_chain);
+ fs->sched = s;
+ SLIST_INSERT_HEAD(&s->fsk_list, fs, sch_chain);
+ if (s->fp->new_fsk)
+ s->fp->new_fsk(fs);
+ /* XXX compute fsk_mask */
+ fs->fsk_mask = fs->fs.flow_mask;
+ if (fs->sched->sch.flags & DN_HAVE_MASK)
+ flow_id_or(&fs->sched->sch.sched_mask, &fs->fsk_mask);
+ if (fs->qht) {
+ /*
+ * we must drain qht according to the old
+ * type, and reinsert according to the new one.
+ * The requeue is complex -- in general we need to
+ * reclassify every single packet.
+ * For the time being, let's hope qht is never set
+ * when we reach this point.
+ */
+ D("XXX TODO requeue from fs %d to sch %d",
+ fs->fs.fs_nr, s->sch.sched_nr);
+ fs->qht = NULL;
+ }
+ /* set the new type for qht */
+ if (nonzero_mask(&fs->fsk_mask))
+ fs->fs.flags |= DN_QHT_HASH;
+ else
+ fs->fs.flags &= ~DN_QHT_HASH;
+
+ /* XXX config_red() can fail... */
+ if (fs->fs.flags & DN_IS_RED)
+ config_red(fs);
+}
+
+/* update all flowsets which may refer to this scheduler */
+static void
+update_fs(struct dn_schk *s)
+{
+ struct dn_fsk *fs, *tmp;
+
+ SLIST_FOREACH_SAFE(fs, &dn_cfg.fsu, sch_chain, tmp) {
+ if (s->sch.sched_nr != fs->fs.sched_nr) {
+ D("fs %d for sch %d not %d still unlinked",
+ fs->fs.fs_nr, fs->fs.sched_nr,
+ s->sch.sched_nr);
+ continue;
+ }
+ fsk_attach(fs, s);
+ }
+}
+
+/*
+ * Configuration -- to preserve backward compatibility we use
+ * the following scheme (N is 65536)
+ * NUMBER SCHED LINK FLOWSET
+ * 1 .. N-1 (1)WFQ (2)WFQ (3)queue
+ * N+1 .. 2N-1 (4)FIFO (5)FIFO (6)FIFO for sched 1..N-1
+ * 2N+1 .. 3N-1 -- -- (7)FIFO for sched N+1..2N-1
+ *
+ * "pipe i config" configures #1, #2 and #3
+ * "sched i config" configures #1 and possibly #6
+ * "queue i config" configures #3
+ * #1 is configured with 'pipe i config' or 'sched i config'
+ * #2 is configured with 'pipe i config', and created if not
+ * existing with 'sched i config'
+ * #3 is configured with 'queue i config'
+ * #4 is automatically configured after #1, can only be FIFO
+ * #5 is automatically configured after #2
+ * #6 is automatically created when #1 is !MULTIQUEUE,
+ * and can be updated.
+ * #7 is automatically configured after #2
+ */
+
+/*
+ * configure a link (and its FIFO instance)
+ */
+static int
+config_link(struct dn_link *p, struct dn_id *arg)
+{
+ int i;
+
+ if (p->oid.len != sizeof(*p)) {
+ D("invalid pipe len %d", p->oid.len);
+ return EINVAL;
+ }
+ i = p->link_nr;
+ if (i <= 0 || i >= DN_MAX_ID)
+ return EINVAL;
+ /*
+ * The config program passes parameters as follows:
+ * bw = bits/second (0 means no limits),
+ * delay = ms, must be translated into ticks.
+ * qsize = slots/bytes
+ * burst ???
+ */
+ p->delay = (p->delay * hz) / 1000;
+ /* Scale burst size: bytes -> bits * hz */
+ p->burst *= 8 * hz;
+
+ DN_BH_WLOCK();
+ /* do it twice, base link and FIFO link */
+ for (; i < 2*DN_MAX_ID; i += DN_MAX_ID) {
+ struct dn_schk *s = locate_scheduler(i);
+ if (s == NULL) {
+ DN_BH_WUNLOCK();
+ D("sched %d not found", i);
+ return EINVAL;
+ }
+ /* remove profile if exists */
+ if (s->profile) {
+ free(s->profile, M_DUMMYNET);
+ s->profile = NULL;
+ }
+ /* copy all parameters */
+ s->link.oid = p->oid;
+ s->link.link_nr = i;
+ s->link.delay = p->delay;
+ if (s->link.bandwidth != p->bandwidth) {
+ /* XXX bandwidth changes, need to update red params */
+ s->link.bandwidth = p->bandwidth;
+ update_red(s);
+ }
+ s->link.burst = p->burst;
+ schk_reset_credit(s);
+ }
+ dn_cfg.id++;
+ DN_BH_WUNLOCK();
+ return 0;
+}
+
+/*
+ * configure a flowset. Can be called from inside with locked=1,
+ */
+static struct dn_fsk *
+config_fs(struct dn_fs *nfs, struct dn_id *arg, int locked)
+{
+ int i;
+ struct dn_fsk *fs;
+
+ if (nfs->oid.len != sizeof(*nfs)) {
+ D("invalid flowset len %d", nfs->oid.len);
+ return NULL;
+ }
+ i = nfs->fs_nr;
+ if (i <= 0 || i >= 3*DN_MAX_ID)
+ return NULL;
+ ND("flowset %d", i);
+ /* XXX other sanity checks */
+ if (nfs->flags & DN_QSIZE_BYTES) {
+ ipdn_bound_var(&nfs->qsize, 16384,
+ 1500, dn_cfg.byte_limit, NULL); // "queue byte size");
+ } else {
+ ipdn_bound_var(&nfs->qsize, 50,
+ 1, dn_cfg.slot_limit, NULL); // "queue slot size");
+ }
+ if (nfs->flags & DN_HAVE_MASK) {
+ /* make sure we have some buckets */
+ ipdn_bound_var(&nfs->buckets, dn_cfg.hash_size,
+ 1, dn_cfg.max_hash_size, "flowset buckets");
+ } else {
+ nfs->buckets = 1; /* we only need 1 */
+ }
+ if (!locked)
+ DN_BH_WLOCK();
+ do { /* exit with break when done */
+ struct dn_schk *s;
+ int flags = nfs->sched_nr ? DNHT_INSERT : 0;
+ int j;
+ int oldc = dn_cfg.fsk_count;
+ fs = dn_ht_find(dn_cfg.fshash, i, flags, NULL);
+ if (fs == NULL) {
+ D("missing sched for flowset %d", i);
+ break;
+ }
+ /* grab some defaults from the existing one */
+ if (nfs->sched_nr == 0) /* reuse */
+ nfs->sched_nr = fs->fs.sched_nr;
+ for (j = 0; j < sizeof(nfs->par)/sizeof(nfs->par[0]); j++) {
+ if (nfs->par[j] == -1) /* reuse */
+ nfs->par[j] = fs->fs.par[j];
+ }
+ if (bcmp(&fs->fs, nfs, sizeof(*nfs)) == 0) {
+ ND("flowset %d unchanged", i);
+ break; /* no change, nothing to do */
+ }
+ if (oldc != dn_cfg.fsk_count) /* new item */
+ dn_cfg.id++;
+ s = locate_scheduler(nfs->sched_nr);
+ /* detach from old scheduler if needed, preserving
+ * queues if we need to reattach. Then update the
+ * configuration, and possibly attach to the new sched.
+ */
+ DX(2, "fs %d changed sched %d@%p to %d@%p",
+ fs->fs.fs_nr,
+ fs->fs.sched_nr, fs->sched, nfs->sched_nr, s);
+ if (fs->sched) {
+ int flags = s ? DN_DETACH : (DN_DETACH | DN_DESTROY);
+ flags |= DN_DESTROY; /* XXX temporary */
+ fsk_detach(fs, flags);
+ }
+ fs->fs = *nfs; /* copy configuration */
+ if (s != NULL)
+ fsk_attach(fs, s);
+ } while (0);
+ if (!locked)
+ DN_BH_WUNLOCK();
+ return fs;
+}
+
+/*
+ * config/reconfig a scheduler and its FIFO variant.
+ * For !MULTIQUEUE schedulers, also set up the flowset.
+ *
+ * On reconfigurations (detected because s->fp is set),
+ * detach existing flowsets preserving traffic, preserve link,
+ * and delete the old scheduler creating a new one.
+ */
+static int
+config_sched(struct dn_sch *_nsch, struct dn_id *arg)
+{
+ struct dn_schk *s;
+ struct schk_new_arg a; /* argument for schk_new */
+ int i;
+ struct dn_link p; /* copy of oldlink */
+ struct dn_profile *pf = NULL; /* copy of old link profile */
+ /* Used to preserv mask parameter */
+ struct ipfw_flow_id new_mask;
+ int new_buckets = 0;
+ int new_flags = 0;
+ int pipe_cmd;
+ int err = ENOMEM;
+
+ a.sch = _nsch;
+ if (a.sch->oid.len != sizeof(*a.sch)) {
+ D("bad sched len %d", a.sch->oid.len);
+ return EINVAL;
+ }
+ i = a.sch->sched_nr;
+ if (i <= 0 || i >= DN_MAX_ID)
+ return EINVAL;
+ /* make sure we have some buckets */
+ if (a.sch->flags & DN_HAVE_MASK)
+ ipdn_bound_var(&a.sch->buckets, dn_cfg.hash_size,
+ 1, dn_cfg.max_hash_size, "sched buckets");
+ /* XXX other sanity checks */
+ bzero(&p, sizeof(p));
+
+ pipe_cmd = a.sch->flags & DN_PIPE_CMD;
+ a.sch->flags &= ~DN_PIPE_CMD; //XXX do it even if is not set?
+ if (pipe_cmd) {
+ /* Copy mask parameter */
+ new_mask = a.sch->sched_mask;
+ new_buckets = a.sch->buckets;
+ new_flags = a.sch->flags;
+ }
+ DN_BH_WLOCK();
+again: /* run twice, for wfq and fifo */
+ /*
+ * lookup the type. If not supplied, use the previous one
+ * or default to WF2Q+. Otherwise, return an error.
+ */
+ dn_cfg.id++;
+ a.fp = find_sched_type(a.sch->oid.subtype, a.sch->name);
+ if (a.fp != NULL) {
+ /* found. Lookup or create entry */
+ s = dn_ht_find(dn_cfg.schedhash, i, DNHT_INSERT, &a);
+ } else if (a.sch->oid.subtype == 0 && !a.sch->name[0]) {
+ /* No type. search existing s* or retry with WF2Q+ */
+ s = dn_ht_find(dn_cfg.schedhash, i, 0, &a);
+ if (s != NULL) {
+ a.fp = s->fp;
+ /* Scheduler exists, skip to FIFO scheduler
+ * if command was pipe config...
+ */
+ if (pipe_cmd)
+ goto next;
+ } else {
+ /* New scheduler, create a wf2q+ with no mask
+ * if command was pipe config...
+ */
+ if (pipe_cmd) {
+ /* clear mask parameter */
+ bzero(&a.sch->sched_mask, sizeof(new_mask));
+ a.sch->buckets = 0;
+ a.sch->flags &= ~DN_HAVE_MASK;
+ }
+ a.sch->oid.subtype = DN_SCHED_WF2QP;
+ goto again;
+ }
+ } else {
+ D("invalid scheduler type %d %s",
+ a.sch->oid.subtype, a.sch->name);
+ err = EINVAL;
+ goto error;
+ }
+ /* normalize name and subtype */
+ a.sch->oid.subtype = a.fp->type;
+ bzero(a.sch->name, sizeof(a.sch->name));
+ strlcpy(a.sch->name, a.fp->name, sizeof(a.sch->name));
+ if (s == NULL) {
+ D("cannot allocate scheduler %d", i);
+ goto error;
+ }
+ /* restore existing link if any */
+ if (p.link_nr) {
+ s->link = p;
+ if (!pf || pf->link_nr != p.link_nr) { /* no saved value */
+ s->profile = NULL; /* XXX maybe not needed */
+ } else {
+ s->profile = malloc(sizeof(struct dn_profile),
+ M_DUMMYNET, M_NOWAIT | M_ZERO);
+ if (s->profile == NULL) {
+ D("cannot allocate profile");
+ goto error; //XXX
+ }
+ bcopy(pf, s->profile, sizeof(*pf));
+ }
+ }
+ p.link_nr = 0;
+ if (s->fp == NULL) {
+ DX(2, "sched %d new type %s", i, a.fp->name);
+ } else if (s->fp != a.fp ||
+ bcmp(a.sch, &s->sch, sizeof(*a.sch)) ) {
+ /* already existing. */
+ DX(2, "sched %d type changed from %s to %s",
+ i, s->fp->name, a.fp->name);
+ DX(4, " type/sub %d/%d -> %d/%d",
+ s->sch.oid.type, s->sch.oid.subtype,
+ a.sch->oid.type, a.sch->oid.subtype);
+ if (s->link.link_nr == 0)
+ D("XXX WARNING link 0 for sched %d", i);
+ p = s->link; /* preserve link */
+ if (s->profile) {/* preserve profile */
+ if (!pf)
+ pf = malloc(sizeof(*pf),
+ M_DUMMYNET, M_NOWAIT | M_ZERO);
+ if (pf) /* XXX should issue a warning otherwise */
+ bcopy(s->profile, pf, sizeof(*pf));
+ }
+ /* remove from the hash */
+ dn_ht_find(dn_cfg.schedhash, i, DNHT_REMOVE, NULL);
+ /* Detach flowsets, preserve queues. */
+ // schk_delete_cb(s, NULL);
+ // XXX temporarily, kill queues
+ schk_delete_cb(s, (void *)DN_DESTROY);
+ goto again;
+ } else {
+ DX(4, "sched %d unchanged type %s", i, a.fp->name);
+ }
+ /* complete initialization */
+ s->sch = *a.sch;
+ s->fp = a.fp;
+ s->cfg = arg;
+ // XXX schk_reset_credit(s);
+ /* create the internal flowset if needed,
+ * trying to reuse existing ones if available
+ */
+ if (!(s->fp->flags & DN_MULTIQUEUE) && !s->fs) {
+ s->fs = dn_ht_find(dn_cfg.fshash, i, 0, NULL);
+ if (!s->fs) {
+ struct dn_fs fs;
+ bzero(&fs, sizeof(fs));
+ set_oid(&fs.oid, DN_FS, sizeof(fs));
+ fs.fs_nr = i + DN_MAX_ID;
+ fs.sched_nr = i;
+ s->fs = config_fs(&fs, NULL, 1 /* locked */);
+ }
+ if (!s->fs) {
+ schk_delete_cb(s, (void *)DN_DESTROY);
+ D("error creating internal fs for %d", i);
+ goto error;
+ }
+ }
+ /* call init function after the flowset is created */
+ if (s->fp->config)
+ s->fp->config(s);
+ update_fs(s);
+next:
+ if (i < DN_MAX_ID) { /* now configure the FIFO instance */
+ i += DN_MAX_ID;
+ if (pipe_cmd) {
+ /* Restore mask parameter for FIFO */
+ a.sch->sched_mask = new_mask;
+ a.sch->buckets = new_buckets;
+ a.sch->flags = new_flags;
+ } else {
+ /* sched config shouldn't modify the FIFO scheduler */
+ if (dn_ht_find(dn_cfg.schedhash, i, 0, &a) != NULL) {
+ /* FIFO already exist, don't touch it */
+ err = 0; /* and this is not an error */
+ goto error;
+ }
+ }
+ a.sch->sched_nr = i;
+ a.sch->oid.subtype = DN_SCHED_FIFO;
+ bzero(a.sch->name, sizeof(a.sch->name));
+ goto again;
+ }
+ err = 0;
+error:
+ DN_BH_WUNLOCK();
+ if (pf)
+ free(pf, M_DUMMYNET);
+ return err;
+}
+
+/*
+ * attach a profile to a link
+ */
+static int
+config_profile(struct dn_profile *pf, struct dn_id *arg)
+{
+ struct dn_schk *s;
+ int i, olen, err = 0;
+
+ if (pf->oid.len < sizeof(*pf)) {
+ D("short profile len %d", pf->oid.len);
+ return EINVAL;
+ }
+ i = pf->link_nr;
+ if (i <= 0 || i >= DN_MAX_ID)
+ return EINVAL;
+ /* XXX other sanity checks */
+ DN_BH_WLOCK();
+ for (; i < 2*DN_MAX_ID; i += DN_MAX_ID) {
+ s = locate_scheduler(i);
+
+ if (s == NULL) {
+ err = EINVAL;
+ break;
+ }
+ dn_cfg.id++;
+ /*
+ * If we had a profile and the new one does not fit,
+ * or it is deleted, then we need to free memory.
+ */
+ if (s->profile && (pf->samples_no == 0 ||
+ s->profile->oid.len < pf->oid.len)) {
+ free(s->profile, M_DUMMYNET);
+ s->profile = NULL;
+ }
+ if (pf->samples_no == 0)
+ continue;
+ /*
+ * new profile, possibly allocate memory
+ * and copy data.
+ */
+ if (s->profile == NULL)
+ s->profile = malloc(pf->oid.len,
+ M_DUMMYNET, M_NOWAIT | M_ZERO);
+ if (s->profile == NULL) {
+ D("no memory for profile %d", i);
+ err = ENOMEM;
+ break;
+ }
+ /* preserve larger length XXX double check */
+ olen = s->profile->oid.len;
+ if (olen < pf->oid.len)
+ olen = pf->oid.len;
+ bcopy(pf, s->profile, pf->oid.len);
+ s->profile->oid.len = olen;
+ }
+ DN_BH_WUNLOCK();
+ return err;
+}
+
+/*
+ * Delete all objects:
+ */
+static void
+dummynet_flush(void)
+{
+
+ /* delete all schedulers and related links/queues/flowsets */
+ dn_ht_scan(dn_cfg.schedhash, schk_delete_cb,
+ (void *)(uintptr_t)DN_DELETE_FS);
+ /* delete all remaining (unlinked) flowsets */
+ DX(4, "still %d unlinked fs", dn_cfg.fsk_count);
+ dn_ht_free(dn_cfg.fshash, DNHT_REMOVE);
+ fsk_detach_list(&dn_cfg.fsu, DN_DELETE_FS);
+ /* Reinitialize system heap... */
+ heap_init(&dn_cfg.evheap, 16, offsetof(struct dn_id, id));
+}
+
+/*
+ * Main handler for configuration. We are guaranteed to be called
+ * with an oid which is at least a dn_id.
+ * - the first object is the command (config, delete, flush, ...)
+ * - config_link must be issued after the corresponding config_sched
+ * - parameters (DN_TXT) for an object must preceed the object
+ * processed on a config_sched.
+ */
+int
+do_config(void *p, int l)
+{
+ struct dn_id *next, *o;
+ int err = 0, err2 = 0;
+ struct dn_id *arg = NULL;
+ uintptr_t *a;
+
+ o = p;
+ if (o->id != DN_API_VERSION) {
+ D("invalid api version got %d need %d",
+ o->id, DN_API_VERSION);
+ return EINVAL;
+ }
+ for (; l >= sizeof(*o); o = next) {
+ struct dn_id *prev = arg;
+ if (o->len < sizeof(*o) || l < o->len) {
+ D("bad len o->len %d len %d", o->len, l);
+ err = EINVAL;
+ break;
+ }
+ l -= o->len;
+ next = (struct dn_id *)((char *)o + o->len);
+ err = 0;
+ switch (o->type) {
+ default:
+ D("cmd %d not implemented", o->type);
+ break;
+#ifdef EMULATE_SYSCTL
+ /* sysctl emulation.
+ * if we recognize the command, jump to the correct
+ * handler and return
+ */
+ case DN_SYSCTL_SET:
+ err = kesysctl_emu_set(p, l);
+ return err;
+#endif
+ case DN_CMD_CONFIG: /* simply a header */
+ break;
+
+ case DN_CMD_DELETE:
+ /* the argument is in the first uintptr_t after o */
+ a = (uintptr_t *)(o+1);
+ if (o->len < sizeof(*o) + sizeof(*a)) {
+ err = EINVAL;
+ break;
+ }
+ switch (o->subtype) {
+ case DN_LINK:
+ /* delete base and derived schedulers */
+ DN_BH_WLOCK();
+ err = delete_schk(*a);
+ err2 = delete_schk(*a + DN_MAX_ID);
+ DN_BH_WUNLOCK();
+ if (!err)
+ err = err2;
+ break;
+
+ default:
+ D("invalid delete type %d",
+ o->subtype);
+ err = EINVAL;
+ break;
+
+ case DN_FS:
+ err = (*a <1 || *a >= DN_MAX_ID) ?
+ EINVAL : delete_fs(*a, 0) ;
+ break;
+ }
+ break;
+
+ case DN_CMD_FLUSH:
+ DN_BH_WLOCK();
+ dummynet_flush();
+ DN_BH_WUNLOCK();
+ break;
+ case DN_TEXT: /* store argument the next block */
+ prev = NULL;
+ arg = o;
+ break;
+ case DN_LINK:
+ err = config_link((struct dn_link *)o, arg);
+ break;
+ case DN_PROFILE:
+ err = config_profile((struct dn_profile *)o, arg);
+ break;
+ case DN_SCH:
+ err = config_sched((struct dn_sch *)o, arg);
+ break;
+ case DN_FS:
+ err = (NULL==config_fs((struct dn_fs *)o, arg, 0));
+ break;
+ }
+ if (prev)
+ arg = NULL;
+ if (err != 0)
+ break;
+ }
+ return err;
+}
+
+static int
+compute_space(struct dn_id *cmd, struct copy_args *a)
+{
+ int x = 0, need = 0;
+ int profile_size = sizeof(struct dn_profile) -
+ ED_MAX_SAMPLES_NO*sizeof(int);
+
+ /* NOTE about compute space:
+ * NP = dn_cfg.schk_count
+ * NSI = dn_cfg.si_count
+ * NF = dn_cfg.fsk_count
+ * NQ = dn_cfg.queue_count
+ * - ipfw pipe show
+ * (NP/2)*(dn_link + dn_sch + dn_id + dn_fs) only half scheduler
+ * link, scheduler template, flowset
+ * integrated in scheduler and header
+ * for flowset list
+ * (NSI)*(dn_flow) all scheduler instance (includes
+ * the queue instance)
+ * - ipfw sched show
+ * (NP/2)*(dn_link + dn_sch + dn_id + dn_fs) only half scheduler
+ * link, scheduler template, flowset
+ * integrated in scheduler and header
+ * for flowset list
+ * (NSI * dn_flow) all scheduler instances
+ * (NF * sizeof(uint_32)) space for flowset list linked to scheduler
+ * (NQ * dn_queue) all queue [XXXfor now not listed]
+ * - ipfw queue show
+ * (NF * dn_fs) all flowset
+ * (NQ * dn_queue) all queues
+ */
+ switch (cmd->subtype) {
+ default:
+ return -1;
+ /* XXX where do LINK and SCH differ ? */
+ /* 'ipfw sched show' could list all queues associated to
+ * a scheduler. This feature for now is disabled
+ */
+ case DN_LINK: /* pipe show */
+ x = DN_C_LINK | DN_C_SCH | DN_C_FLOW;
+ need += dn_cfg.schk_count *
+ (sizeof(struct dn_fs) + profile_size) / 2;
+ need += dn_cfg.fsk_count * sizeof(uint32_t);
+ break;
+ case DN_SCH: /* sched show */
+ need += dn_cfg.schk_count *
+ (sizeof(struct dn_fs) + profile_size) / 2;
+ need += dn_cfg.fsk_count * sizeof(uint32_t);
+ x = DN_C_SCH | DN_C_LINK | DN_C_FLOW;
+ break;
+ case DN_FS: /* queue show */
+ x = DN_C_FS | DN_C_QUEUE;
+ break;
+ case DN_GET_COMPAT: /* compatibility mode */
+ need = dn_compat_calc_size(dn_cfg);
+ break;
+ }
+ a->flags = x;
+ if (x & DN_C_SCH) {
+ need += dn_cfg.schk_count * sizeof(struct dn_sch) / 2;
+ /* NOT also, each fs might be attached to a sched */
+ need += dn_cfg.schk_count * sizeof(struct dn_id) / 2;
+ }
+ if (x & DN_C_FS)
+ need += dn_cfg.fsk_count * sizeof(struct dn_fs);
+ if (x & DN_C_LINK) {
+ need += dn_cfg.schk_count * sizeof(struct dn_link) / 2;
+ }
+ /*
+ * When exporting a queue to userland, only pass up the
+ * struct dn_flow, which is the only visible part.
+ */
+
+ if (x & DN_C_QUEUE)
+ need += dn_cfg.queue_count * sizeof(struct dn_flow);
+ if (x & DN_C_FLOW)
+ need += dn_cfg.si_count * (sizeof(struct dn_flow));
+ return need;
+}
+
+/*
+ * If compat != NULL dummynet_get is called in compatibility mode.
+ * *compat will be the pointer to the buffer to pass to ipfw
+ */
+int
+dummynet_get(struct sockopt *sopt, void **compat)
+{
+ int have, i, need, error;
+ char *start = NULL, *buf;
+ size_t sopt_valsize;
+ struct dn_id *cmd;
+ struct copy_args a;
+ struct copy_range r;
+ int l = sizeof(struct dn_id);
+
+ bzero(&a, sizeof(a));
+ bzero(&r, sizeof(r));
+
+ /* save and restore original sopt_valsize around copyin */
+ sopt_valsize = sopt->sopt_valsize;
+
+ cmd = &r.o;
+
+ if (!compat) {
+ /* copy at least an oid, and possibly a full object */
+ error = sooptcopyin(sopt, cmd, sizeof(r), sizeof(*cmd));
+ sopt->sopt_valsize = sopt_valsize;
+ if (error)
+ goto done;
+ l = cmd->len;
+#ifdef EMULATE_SYSCTL
+ /* sysctl emulation. */
+ if (cmd->type == DN_SYSCTL_GET)
+ return kesysctl_emu_get(sopt);
+#endif
+ if (l > sizeof(r)) {
+ /* request larger than default, allocate buffer */
+ cmd = malloc(l, M_DUMMYNET, M_WAIT);
+ if (cmd == NULL)
+ return ENOMEM; //XXX
+ error = sooptcopyin(sopt, cmd, l, l);
+ sopt->sopt_valsize = sopt_valsize;
+ if (error)
+ goto done;
+ }
+ } else { /* compatibility */
+ error = 0;
+ cmd->type = DN_CMD_GET;
+ cmd->len = sizeof(struct dn_id);
+ cmd->subtype = DN_GET_COMPAT;
+ // cmd->id = sopt_valsize;
+ D("compatibility mode");
+ }
+ a.extra = (struct copy_range *)cmd;
+ if (cmd->len == sizeof(*cmd)) { /* no range, create a default */
+ uint32_t *rp = (uint32_t *)(cmd + 1);
+ cmd->len += 2* sizeof(uint32_t);
+ rp[0] = 1;
+ rp[1] = DN_MAX_ID - 1;
+ if (cmd->subtype == DN_LINK) {
+ rp[0] += DN_MAX_ID;
+ rp[1] += DN_MAX_ID;
+ }
+ }
+ /* Count space (under lock) and allocate (outside lock).
+ * Exit with lock held if we manage to get enough buffer.
+ * Try a few times then give up.
+ */
+ for (have = 0, i = 0; i < 10; i++) {
+ DN_BH_WLOCK();
+ need = compute_space(cmd, &a);
+
+ /* if there is a range, ignore value from compute_space() */
+ if (l > sizeof(*cmd))
+ need = sopt_valsize - sizeof(*cmd);
+
+ if (need < 0) {
+ DN_BH_WUNLOCK();
+ error = EINVAL;
+ goto done;
+ }
+ need += sizeof(*cmd);
+ cmd->id = need;
+ if (have >= need)
+ break;
+
+ DN_BH_WUNLOCK();
+ if (start)
+ free(start, M_DUMMYNET);
+ start = NULL;
+ if (need > sopt_valsize)
+ break;
+
+ have = need;
+ start = malloc(have, M_DUMMYNET, M_WAITOK | M_ZERO);
+ if (start == NULL) {
+ error = ENOMEM;
+ goto done;
+ }
+ }
+
+ if (start == NULL) {
+ if (compat) {
+ *compat = NULL;
+ error = 1; // XXX
+ } else {
+ error = sooptcopyout(sopt, cmd, sizeof(*cmd));
+ }
+ goto done;
+ }
+ ND("have %d:%d sched %d, %d:%d links %d, %d:%d flowsets %d, "
+ "%d:%d si %d, %d:%d queues %d",
+ dn_cfg.schk_count, sizeof(struct dn_sch), DN_SCH,
+ dn_cfg.schk_count, sizeof(struct dn_link), DN_LINK,
+ dn_cfg.fsk_count, sizeof(struct dn_fs), DN_FS,
+ dn_cfg.si_count, sizeof(struct dn_flow), DN_SCH_I,
+ dn_cfg.queue_count, sizeof(struct dn_queue), DN_QUEUE);
+ sopt->sopt_valsize = sopt_valsize;
+ a.type = cmd->subtype;
+
+ if (compat == NULL) {
+ bcopy(cmd, start, sizeof(*cmd));
+ ((struct dn_id*)(start))->len = sizeof(struct dn_id);
+ buf = start + sizeof(*cmd);
+ } else
+ buf = start;
+ a.start = &buf;
+ a.end = start + have;
+ /* start copying other objects */
+ if (compat) {
+ a.type = DN_COMPAT_PIPE;
+ dn_ht_scan(dn_cfg.schedhash, copy_data_helper_compat, &a);
+ a.type = DN_COMPAT_QUEUE;
+ dn_ht_scan(dn_cfg.fshash, copy_data_helper_compat, &a);
+ } else if (a.type == DN_FS) {
+ dn_ht_scan(dn_cfg.fshash, copy_data_helper, &a);
+ } else {
+ dn_ht_scan(dn_cfg.schedhash, copy_data_helper, &a);
+ }
+ DN_BH_WUNLOCK();
+
+ if (compat) {
+ *compat = start;
+ sopt->sopt_valsize = buf - start;
+ /* free() is done by ip_dummynet_compat() */
+ start = NULL; //XXX hack
+ } else {
+ error = sooptcopyout(sopt, start, buf - start);
+ }
+done:
+ if (cmd && cmd != &r.o)
+ free(cmd, M_DUMMYNET);
+ if (start)
+ free(start, M_DUMMYNET);
+ return error;
+}
+
+/* Callback called on scheduler instance to delete it if idle */
+static int
+drain_scheduler_cb(void *_si, void *arg)
+{
+ struct dn_sch_inst *si = _si;
+
+ if ((si->kflags & DN_ACTIVE) || si->dline.mq.head != NULL)
+ return 0;
+
+ if (si->sched->fp->flags & DN_MULTIQUEUE) {
+ if (si->q_count == 0)
+ return si_destroy(si, NULL);
+ else
+ return 0;
+ } else { /* !DN_MULTIQUEUE */
+ if ((si+1)->ni.length == 0)
+ return si_destroy(si, NULL);
+ else
+ return 0;
+ }
+ return 0; /* unreachable */
+}
+
+/* Callback called on scheduler to check if it has instances */
+static int
+drain_scheduler_sch_cb(void *_s, void *arg)
+{
+ struct dn_schk *s = _s;
+
+ if (s->sch.flags & DN_HAVE_MASK) {
+ dn_ht_scan_bucket(s->siht, &s->drain_bucket,
+ drain_scheduler_cb, NULL);
+ s->drain_bucket++;
+ } else {
+ if (s->siht) {
+ if (drain_scheduler_cb(s->siht, NULL) == DNHT_SCAN_DEL)
+ s->siht = NULL;
+ }
+ }
+ return 0;
+}
+
+/* Called every tick, try to delete a 'bucket' of scheduler */
+void
+dn_drain_scheduler(void)
+{
+ dn_ht_scan_bucket(dn_cfg.schedhash, &dn_cfg.drain_sch,
+ drain_scheduler_sch_cb, NULL);
+ dn_cfg.drain_sch++;
+}
+
+/* Callback called on queue to delete if it is idle */
+static int
+drain_queue_cb(void *_q, void *arg)
+{
+ struct dn_queue *q = _q;
+
+ if (q->ni.length == 0) {
+ dn_delete_queue(q, DN_DESTROY);
+ return DNHT_SCAN_DEL; /* queue is deleted */
+ }
+
+ return 0; /* queue isn't deleted */
+}
+
+/* Callback called on flowset used to check if it has queues */
+static int
+drain_queue_fs_cb(void *_fs, void *arg)
+{
+ struct dn_fsk *fs = _fs;
+
+ if (fs->fs.flags & DN_QHT_HASH) {
+ /* Flowset has a hash table for queues */
+ dn_ht_scan_bucket(fs->qht, &fs->drain_bucket,
+ drain_queue_cb, NULL);
+ fs->drain_bucket++;
+ } else {
+ /* No hash table for this flowset, null the pointer
+ * if the queue is deleted
+ */
+ if (fs->qht) {
+ if (drain_queue_cb(fs->qht, NULL) == DNHT_SCAN_DEL)
+ fs->qht = NULL;
+ }
+ }
+ return 0;
+}
+
+/* Called every tick, try to delete a 'bucket' of queue */
+void
+dn_drain_queue(void)
+{
+ /* scan a bucket of flowset */
+ dn_ht_scan_bucket(dn_cfg.fshash, &dn_cfg.drain_fs,
+ drain_queue_fs_cb, NULL);
+ dn_cfg.drain_fs++;
+}
+
+/*
+ * Handler for the various dummynet socket options
+ */
+static int
+ip_dn_ctl(struct sockopt *sopt)
+{
+ void *p = NULL;
+ int error, l;
+
+ error = priv_check(sopt->sopt_td, PRIV_NETINET_DUMMYNET);
+ if (error)
+ return (error);
+
+ /* Disallow sets in really-really secure mode. */
+ if (sopt->sopt_dir == SOPT_SET) {
+ error = securelevel_ge(sopt->sopt_td->td_ucred, 3);
+ if (error)
+ return (error);
+ }
+
+ switch (sopt->sopt_name) {
+ default :
+ D("dummynet: unknown option %d", sopt->sopt_name);
+ error = EINVAL;
+ break;
+
+ case IP_DUMMYNET_FLUSH:
+ case IP_DUMMYNET_CONFIGURE:
+ case IP_DUMMYNET_DEL: /* remove a pipe or queue */
+ case IP_DUMMYNET_GET:
+ D("dummynet: compat option %d", sopt->sopt_name);
+ error = ip_dummynet_compat(sopt);
+ break;
+
+ case IP_DUMMYNET3 :
+ if (sopt->sopt_dir == SOPT_GET) {
+ error = dummynet_get(sopt, NULL);
+ break;
+ }
+ l = sopt->sopt_valsize;
+ if (l < sizeof(struct dn_id) || l > 12000) {
+ D("argument len %d invalid", l);
+ break;
+ }
+ p = malloc(l, M_TEMP, M_WAITOK); // XXX can it fail ?
+ error = sooptcopyin(sopt, p, l, l);
+ if (error)
+ break ;
+ error = do_config(p, l);
+ break;
+ }
+
+ if (p != NULL)
+ free(p, M_TEMP);
+
+ return error ;
+}
+
+
+static void
+ip_dn_init(void)
+{
+ static int init_done = 0;
+
+ if (init_done)
+ return;
+ init_done = 1;
+ if (bootverbose)
+ printf("DUMMYNET with IPv6 initialized (100131)\n");
+
+ /* Set defaults here. MSVC does not accept initializers,
+ * and this is also useful for vimages
+ */
+ /* queue limits */
+ dn_cfg.slot_limit = 100; /* Foot shooting limit for queues. */
+ dn_cfg.byte_limit = 1024 * 1024;
+ dn_cfg.expire = 1;
+
+ /* RED parameters */
+ dn_cfg.red_lookup_depth = 256; /* default lookup table depth */
+ dn_cfg.red_avg_pkt_size = 512; /* default medium packet size */
+ dn_cfg.red_max_pkt_size = 1500; /* default max packet size */
+
+ /* hash tables */
+ dn_cfg.max_hash_size = 1024; /* max in the hash tables */
+ dn_cfg.hash_size = 64; /* default hash size */
+
+ /* create hash tables for schedulers and flowsets.
+ * In both we search by key and by pointer.
+ */
+ dn_cfg.schedhash = dn_ht_init(NULL, dn_cfg.hash_size,
+ offsetof(struct dn_schk, schk_next),
+ schk_hash, schk_match, schk_new);
+ dn_cfg.fshash = dn_ht_init(NULL, dn_cfg.hash_size,
+ offsetof(struct dn_fsk, fsk_next),
+ fsk_hash, fsk_match, fsk_new);
+
+ /* bucket index to drain object */
+ dn_cfg.drain_fs = 0;
+ dn_cfg.drain_sch = 0;
+
+ heap_init(&dn_cfg.evheap, 16, offsetof(struct dn_id, id));
+ SLIST_INIT(&dn_cfg.fsu);
+ SLIST_INIT(&dn_cfg.schedlist);
+
+ DN_LOCK_INIT();
+ ip_dn_ctl_ptr = ip_dn_ctl;
+ ip_dn_io_ptr = dummynet_io;
+
+ TASK_INIT(&dn_task, 0, dummynet_task, NULL);
+ dn_tq = taskqueue_create_fast("dummynet", M_NOWAIT,
+ taskqueue_thread_enqueue, &dn_tq);
+ taskqueue_start_threads(&dn_tq, 1, PI_NET, "dummynet");
+
+ callout_init(&dn_timeout, CALLOUT_MPSAFE);
+ callout_reset(&dn_timeout, 1, dummynet, NULL);
+
+ /* Initialize curr_time adjustment mechanics. */
+ getmicrouptime(&dn_cfg.prev_t);
+}
+
+#ifdef KLD_MODULE
+static void
+ip_dn_destroy(void)
+{
+ callout_drain(&dn_timeout);
+
+ DN_BH_WLOCK();
+ ip_dn_ctl_ptr = NULL;
+ ip_dn_io_ptr = NULL;
+
+ dummynet_flush();
+ DN_BH_WUNLOCK();
+ taskqueue_drain(dn_tq, &dn_task);
+ taskqueue_free(dn_tq);
+
+ dn_ht_free(dn_cfg.schedhash, 0);
+ dn_ht_free(dn_cfg.fshash, 0);
+ heap_free(&dn_cfg.evheap);
+
+ DN_LOCK_DESTROY();
+}
+#endif /* KLD_MODULE */
+
+static int
+dummynet_modevent(module_t mod, int type, void *data)
+{
+
+ if (type == MOD_LOAD) {
+ if (ip_dn_io_ptr) {
+ printf("DUMMYNET already loaded\n");
+ return EEXIST ;
+ }
+ ip_dn_init();
+ return 0;
+ } else if (type == MOD_UNLOAD) {
+#if !defined(KLD_MODULE)
+ printf("dummynet statically compiled, cannot unload\n");
+ return EINVAL ;
+#else
+ ip_dn_destroy();
+ return 0;
+#endif
+ } else
+ return EOPNOTSUPP;
+}
+
+/* modevent helpers for the modules */
+static int
+load_dn_sched(struct dn_alg *d)
+{
+ struct dn_alg *s;
+
+ if (d == NULL)
+ return 1; /* error */
+ ip_dn_init(); /* just in case, we need the lock */
+
+ /* Check that mandatory funcs exists */
+ if (d->enqueue == NULL || d->dequeue == NULL) {
+ D("missing enqueue or dequeue for %s", d->name);
+ return 1;
+ }
+
+ /* Search if scheduler already exists */
+ DN_BH_WLOCK();
+ SLIST_FOREACH(s, &dn_cfg.schedlist, next) {
+ if (strcmp(s->name, d->name) == 0) {
+ D("%s already loaded", d->name);
+ break; /* scheduler already exists */
+ }
+ }
+ if (s == NULL)
+ SLIST_INSERT_HEAD(&dn_cfg.schedlist, d, next);
+ DN_BH_WUNLOCK();
+ D("dn_sched %s %sloaded", d->name, s ? "not ":"");
+ return s ? 1 : 0;
+}
+
+static int
+unload_dn_sched(struct dn_alg *s)
+{
+ struct dn_alg *tmp, *r;
+ int err = EINVAL;
+
+ D("called for %s", s->name);
+
+ DN_BH_WLOCK();
+ SLIST_FOREACH_SAFE(r, &dn_cfg.schedlist, next, tmp) {
+ if (strcmp(s->name, r->name) != 0)
+ continue;
+ D("ref_count = %d", r->ref_count);
+ err = (r->ref_count != 0) ? EBUSY : 0;
+ if (err == 0)
+ SLIST_REMOVE(&dn_cfg.schedlist, r, dn_alg, next);
+ break;
+ }
+ DN_BH_WUNLOCK();
+ D("dn_sched %s %sunloaded", s->name, err ? "not ":"");
+ return err;
+}
+
+int
+dn_sched_modevent(module_t mod, int cmd, void *arg)
+{
+ struct dn_alg *sch = arg;
+
+ if (cmd == MOD_LOAD)
+ return load_dn_sched(sch);
+ else if (cmd == MOD_UNLOAD)
+ return unload_dn_sched(sch);
+ else
+ return EINVAL;
+}
+
+static moduledata_t dummynet_mod = {
+ "dummynet", dummynet_modevent, NULL
+};
+
+DECLARE_MODULE(dummynet, dummynet_mod,
+ SI_SUB_PROTO_IFATTACHDOMAIN, SI_ORDER_ANY-1);
+MODULE_DEPEND(dummynet, ipfw, 2, 2, 2);
+MODULE_VERSION(dummynet, 1);
+/* end of file */
diff --git a/rtems/freebsd/netinet/ipfw/ip_fw2.c b/rtems/freebsd/netinet/ipfw/ip_fw2.c
new file mode 100644
index 00000000..cc96fd68
--- /dev/null
+++ b/rtems/freebsd/netinet/ipfw/ip_fw2.c
@@ -0,0 +1,2495 @@
+#include <rtems/freebsd/machine/rtems-bsd-config.h>
+
+/*-
+ * Copyright (c) 2002-2009 Luigi Rizzo, Universita` di Pisa
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <rtems/freebsd/sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+/*
+ * The FreeBSD IP packet firewall, main file
+ */
+
+#if !defined(KLD_MODULE)
+#include <rtems/freebsd/local/opt_ipfw.h>
+#include <rtems/freebsd/local/opt_ipdivert.h>
+#include <rtems/freebsd/local/opt_ipdn.h>
+#include <rtems/freebsd/local/opt_inet.h>
+#ifndef INET
+#error IPFIREWALL requires INET.
+#endif /* INET */
+#endif
+#include <rtems/freebsd/local/opt_inet6.h>
+#include <rtems/freebsd/local/opt_ipsec.h>
+
+#include <rtems/freebsd/sys/param.h>
+#include <rtems/freebsd/sys/systm.h>
+#include <rtems/freebsd/sys/condvar.h>
+#include <rtems/freebsd/sys/eventhandler.h>
+#include <rtems/freebsd/sys/malloc.h>
+#include <rtems/freebsd/sys/mbuf.h>
+#include <rtems/freebsd/sys/kernel.h>
+#include <rtems/freebsd/sys/lock.h>
+#include <rtems/freebsd/sys/jail.h>
+#include <rtems/freebsd/sys/module.h>
+#include <rtems/freebsd/sys/priv.h>
+#include <rtems/freebsd/sys/proc.h>
+#include <rtems/freebsd/sys/rwlock.h>
+#include <rtems/freebsd/sys/socket.h>
+#include <rtems/freebsd/sys/socketvar.h>
+#include <rtems/freebsd/sys/sysctl.h>
+#include <rtems/freebsd/sys/syslog.h>
+#include <rtems/freebsd/sys/ucred.h>
+#include <rtems/freebsd/net/ethernet.h> /* for ETHERTYPE_IP */
+#include <rtems/freebsd/net/if.h>
+#include <rtems/freebsd/net/route.h>
+#include <rtems/freebsd/net/pf_mtag.h>
+#include <rtems/freebsd/net/vnet.h>
+
+#include <rtems/freebsd/netinet/in.h>
+#include <rtems/freebsd/netinet/in_var.h>
+#include <rtems/freebsd/netinet/in_pcb.h>
+#include <rtems/freebsd/netinet/ip.h>
+#include <rtems/freebsd/netinet/ip_var.h>
+#include <rtems/freebsd/netinet/ip_icmp.h>
+#include <rtems/freebsd/netinet/ip_fw.h>
+#include <rtems/freebsd/netinet/ipfw/ip_fw_private.h>
+#include <rtems/freebsd/netinet/ip_carp.h>
+#include <rtems/freebsd/netinet/pim.h>
+#include <rtems/freebsd/netinet/tcp_var.h>
+#include <rtems/freebsd/netinet/udp.h>
+#include <rtems/freebsd/netinet/udp_var.h>
+#include <rtems/freebsd/netinet/sctp.h>
+
+#include <rtems/freebsd/netinet/ip6.h>
+#include <rtems/freebsd/netinet/icmp6.h>
+#ifdef INET6
+#include <rtems/freebsd/netinet6/scope6_var.h>
+#include <rtems/freebsd/netinet6/ip6_var.h>
+#endif
+
+#include <rtems/freebsd/machine/in_cksum.h> /* XXX for in_cksum */
+
+#ifdef MAC
+#include <rtems/freebsd/security/mac/mac_framework.h>
+#endif
+
+/*
+ * static variables followed by global ones.
+ * All ipfw global variables are here.
+ */
+
+/* ipfw_vnet_ready controls when we are open for business */
+static VNET_DEFINE(int, ipfw_vnet_ready) = 0;
+#define V_ipfw_vnet_ready VNET(ipfw_vnet_ready)
+
+static VNET_DEFINE(int, fw_deny_unknown_exthdrs);
+#define V_fw_deny_unknown_exthdrs VNET(fw_deny_unknown_exthdrs)
+
+#ifdef IPFIREWALL_DEFAULT_TO_ACCEPT
+static int default_to_accept = 1;
+#else
+static int default_to_accept;
+#endif
+
+VNET_DEFINE(int, autoinc_step);
+
+/*
+ * Each rule belongs to one of 32 different sets (0..31).
+ * The variable set_disable contains one bit per set.
+ * If the bit is set, all rules in the corresponding set
+ * are disabled. Set RESVD_SET(31) is reserved for the default rule
+ * and rules that are not deleted by the flush command,
+ * and CANNOT be disabled.
+ * Rules in set RESVD_SET can only be deleted individually.
+ */
+VNET_DEFINE(u_int32_t, set_disable);
+#define V_set_disable VNET(set_disable)
+
+VNET_DEFINE(int, fw_verbose);
+/* counter for ipfw_log(NULL...) */
+VNET_DEFINE(u_int64_t, norule_counter);
+VNET_DEFINE(int, verbose_limit);
+
+/* layer3_chain contains the list of rules for layer 3 */
+VNET_DEFINE(struct ip_fw_chain, layer3_chain);
+
+ipfw_nat_t *ipfw_nat_ptr = NULL;
+struct cfg_nat *(*lookup_nat_ptr)(struct nat_list *, int);
+ipfw_nat_cfg_t *ipfw_nat_cfg_ptr;
+ipfw_nat_cfg_t *ipfw_nat_del_ptr;
+ipfw_nat_cfg_t *ipfw_nat_get_cfg_ptr;
+ipfw_nat_cfg_t *ipfw_nat_get_log_ptr;
+
+#ifdef SYSCTL_NODE
+uint32_t dummy_def = IPFW_DEFAULT_RULE;
+uint32_t dummy_tables_max = IPFW_TABLES_MAX;
+
+SYSBEGIN(f3)
+
+SYSCTL_NODE(_net_inet_ip, OID_AUTO, fw, CTLFLAG_RW, 0, "Firewall");
+SYSCTL_VNET_INT(_net_inet_ip_fw, OID_AUTO, one_pass,
+ CTLFLAG_RW | CTLFLAG_SECURE3, &VNET_NAME(fw_one_pass), 0,
+ "Only do a single pass through ipfw when using dummynet(4)");
+SYSCTL_VNET_INT(_net_inet_ip_fw, OID_AUTO, autoinc_step,
+ CTLFLAG_RW, &VNET_NAME(autoinc_step), 0,
+ "Rule number auto-increment step");
+SYSCTL_VNET_INT(_net_inet_ip_fw, OID_AUTO, verbose,
+ CTLFLAG_RW | CTLFLAG_SECURE3, &VNET_NAME(fw_verbose), 0,
+ "Log matches to ipfw rules");
+SYSCTL_VNET_INT(_net_inet_ip_fw, OID_AUTO, verbose_limit,
+ CTLFLAG_RW, &VNET_NAME(verbose_limit), 0,
+ "Set upper limit of matches of ipfw rules logged");
+SYSCTL_UINT(_net_inet_ip_fw, OID_AUTO, default_rule, CTLFLAG_RD,
+ &dummy_def, 0,
+ "The default/max possible rule number.");
+SYSCTL_UINT(_net_inet_ip_fw, OID_AUTO, tables_max, CTLFLAG_RD,
+ &dummy_tables_max, 0,
+ "The maximum number of tables.");
+SYSCTL_INT(_net_inet_ip_fw, OID_AUTO, default_to_accept, CTLFLAG_RDTUN,
+ &default_to_accept, 0,
+ "Make the default rule accept all packets.");
+TUNABLE_INT("net.inet.ip.fw.default_to_accept", &default_to_accept);
+SYSCTL_VNET_INT(_net_inet_ip_fw, OID_AUTO, static_count,
+ CTLFLAG_RD, &VNET_NAME(layer3_chain.n_rules), 0,
+ "Number of static rules");
+
+#ifdef INET6
+SYSCTL_DECL(_net_inet6_ip6);
+SYSCTL_NODE(_net_inet6_ip6, OID_AUTO, fw, CTLFLAG_RW, 0, "Firewall");
+SYSCTL_VNET_INT(_net_inet6_ip6_fw, OID_AUTO, deny_unknown_exthdrs,
+ CTLFLAG_RW | CTLFLAG_SECURE, &VNET_NAME(fw_deny_unknown_exthdrs), 0,
+ "Deny packets with unknown IPv6 Extension Headers");
+#endif /* INET6 */
+
+SYSEND
+
+#endif /* SYSCTL_NODE */
+
+
+/*
+ * Some macros used in the various matching options.
+ * L3HDR maps an ipv4 pointer into a layer3 header pointer of type T
+ * Other macros just cast void * into the appropriate type
+ */
+#define L3HDR(T, ip) ((T *)((u_int32_t *)(ip) + (ip)->ip_hl))
+#define TCP(p) ((struct tcphdr *)(p))
+#define SCTP(p) ((struct sctphdr *)(p))
+#define UDP(p) ((struct udphdr *)(p))
+#define ICMP(p) ((struct icmphdr *)(p))
+#define ICMP6(p) ((struct icmp6_hdr *)(p))
+
+static __inline int
+icmptype_match(struct icmphdr *icmp, ipfw_insn_u32 *cmd)
+{
+ int type = icmp->icmp_type;
+
+ return (type <= ICMP_MAXTYPE && (cmd->d[0] & (1<<type)) );
+}
+
+#define TT ( (1 << ICMP_ECHO) | (1 << ICMP_ROUTERSOLICIT) | \
+ (1 << ICMP_TSTAMP) | (1 << ICMP_IREQ) | (1 << ICMP_MASKREQ) )
+
+static int
+is_icmp_query(struct icmphdr *icmp)
+{
+ int type = icmp->icmp_type;
+
+ return (type <= ICMP_MAXTYPE && (TT & (1<<type)) );
+}
+#undef TT
+
+/*
+ * The following checks use two arrays of 8 or 16 bits to store the
+ * bits that we want set or clear, respectively. They are in the
+ * low and high half of cmd->arg1 or cmd->d[0].
+ *
+ * We scan options and store the bits we find set. We succeed if
+ *
+ * (want_set & ~bits) == 0 && (want_clear & ~bits) == want_clear
+ *
+ * The code is sometimes optimized not to store additional variables.
+ */
+
+static int
+flags_match(ipfw_insn *cmd, u_int8_t bits)
+{
+ u_char want_clear;
+ bits = ~bits;
+
+ if ( ((cmd->arg1 & 0xff) & bits) != 0)
+ return 0; /* some bits we want set were clear */
+ want_clear = (cmd->arg1 >> 8) & 0xff;
+ if ( (want_clear & bits) != want_clear)
+ return 0; /* some bits we want clear were set */
+ return 1;
+}
+
+static int
+ipopts_match(struct ip *ip, ipfw_insn *cmd)
+{
+ int optlen, bits = 0;
+ u_char *cp = (u_char *)(ip + 1);
+ int x = (ip->ip_hl << 2) - sizeof (struct ip);
+
+ for (; x > 0; x -= optlen, cp += optlen) {
+ int opt = cp[IPOPT_OPTVAL];
+
+ if (opt == IPOPT_EOL)
+ break;
+ if (opt == IPOPT_NOP)
+ optlen = 1;
+ else {
+ optlen = cp[IPOPT_OLEN];
+ if (optlen <= 0 || optlen > x)
+ return 0; /* invalid or truncated */
+ }
+ switch (opt) {
+
+ default:
+ break;
+
+ case IPOPT_LSRR:
+ bits |= IP_FW_IPOPT_LSRR;
+ break;
+
+ case IPOPT_SSRR:
+ bits |= IP_FW_IPOPT_SSRR;
+ break;
+
+ case IPOPT_RR:
+ bits |= IP_FW_IPOPT_RR;
+ break;
+
+ case IPOPT_TS:
+ bits |= IP_FW_IPOPT_TS;
+ break;
+ }
+ }
+ return (flags_match(cmd, bits));
+}
+
+static int
+tcpopts_match(struct tcphdr *tcp, ipfw_insn *cmd)
+{
+ int optlen, bits = 0;
+ u_char *cp = (u_char *)(tcp + 1);
+ int x = (tcp->th_off << 2) - sizeof(struct tcphdr);
+
+ for (; x > 0; x -= optlen, cp += optlen) {
+ int opt = cp[0];
+ if (opt == TCPOPT_EOL)
+ break;
+ if (opt == TCPOPT_NOP)
+ optlen = 1;
+ else {
+ optlen = cp[1];
+ if (optlen <= 0)
+ break;
+ }
+
+ switch (opt) {
+
+ default:
+ break;
+
+ case TCPOPT_MAXSEG:
+ bits |= IP_FW_TCPOPT_MSS;
+ break;
+
+ case TCPOPT_WINDOW:
+ bits |= IP_FW_TCPOPT_WINDOW;
+ break;
+
+ case TCPOPT_SACK_PERMITTED:
+ case TCPOPT_SACK:
+ bits |= IP_FW_TCPOPT_SACK;
+ break;
+
+ case TCPOPT_TIMESTAMP:
+ bits |= IP_FW_TCPOPT_TS;
+ break;
+
+ }
+ }
+ return (flags_match(cmd, bits));
+}
+
+static int
+iface_match(struct ifnet *ifp, ipfw_insn_if *cmd)
+{
+ if (ifp == NULL) /* no iface with this packet, match fails */
+ return 0;
+ /* Check by name or by IP address */
+ if (cmd->name[0] != '\0') { /* match by name */
+ /* Check name */
+ if (cmd->p.glob) {
+ if (fnmatch(cmd->name, ifp->if_xname, 0) == 0)
+ return(1);
+ } else {
+ if (strncmp(ifp->if_xname, cmd->name, IFNAMSIZ) == 0)
+ return(1);
+ }
+ } else {
+#ifdef __FreeBSD__ /* and OSX too ? */
+ struct ifaddr *ia;
+
+ if_addr_rlock(ifp);
+ TAILQ_FOREACH(ia, &ifp->if_addrhead, ifa_link) {
+ if (ia->ifa_addr->sa_family != AF_INET)
+ continue;
+ if (cmd->p.ip.s_addr == ((struct sockaddr_in *)
+ (ia->ifa_addr))->sin_addr.s_addr) {
+ if_addr_runlock(ifp);
+ return(1); /* match */
+ }
+ }
+ if_addr_runlock(ifp);
+#endif /* __FreeBSD__ */
+ }
+ return(0); /* no match, fail ... */
+}
+
+/*
+ * The verify_path function checks if a route to the src exists and
+ * if it is reachable via ifp (when provided).
+ *
+ * The 'verrevpath' option checks that the interface that an IP packet
+ * arrives on is the same interface that traffic destined for the
+ * packet's source address would be routed out of.
+ * The 'versrcreach' option just checks that the source address is
+ * reachable via any route (except default) in the routing table.
+ * These two are a measure to block forged packets. This is also
+ * commonly known as "anti-spoofing" or Unicast Reverse Path
+ * Forwarding (Unicast RFP) in Cisco-ese. The name of the knobs
+ * is purposely reminiscent of the Cisco IOS command,
+ *
+ * ip verify unicast reverse-path
+ * ip verify unicast source reachable-via any
+ *
+ * which implements the same functionality. But note that the syntax
+ * is misleading, and the check may be performed on all IP packets
+ * whether unicast, multicast, or broadcast.
+ */
+static int
+verify_path(struct in_addr src, struct ifnet *ifp, u_int fib)
+{
+#ifndef __FreeBSD__
+ return 0;
+#else
+ struct route ro;
+ struct sockaddr_in *dst;
+
+ bzero(&ro, sizeof(ro));
+
+ dst = (struct sockaddr_in *)&(ro.ro_dst);
+ dst->sin_family = AF_INET;
+ dst->sin_len = sizeof(*dst);
+ dst->sin_addr = src;
+ in_rtalloc_ign(&ro, 0, fib);
+
+ if (ro.ro_rt == NULL)
+ return 0;
+
+ /*
+ * If ifp is provided, check for equality with rtentry.
+ * We should use rt->rt_ifa->ifa_ifp, instead of rt->rt_ifp,
+ * in order to pass packets injected back by if_simloop():
+ * if useloopback == 1 routing entry (via lo0) for our own address
+ * may exist, so we need to handle routing assymetry.
+ */
+ if (ifp != NULL && ro.ro_rt->rt_ifa->ifa_ifp != ifp) {
+ RTFREE(ro.ro_rt);
+ return 0;
+ }
+
+ /* if no ifp provided, check if rtentry is not default route */
+ if (ifp == NULL &&
+ satosin(rt_key(ro.ro_rt))->sin_addr.s_addr == INADDR_ANY) {
+ RTFREE(ro.ro_rt);
+ return 0;
+ }
+
+ /* or if this is a blackhole/reject route */
+ if (ifp == NULL && ro.ro_rt->rt_flags & (RTF_REJECT|RTF_BLACKHOLE)) {
+ RTFREE(ro.ro_rt);
+ return 0;
+ }
+
+ /* found valid route */
+ RTFREE(ro.ro_rt);
+ return 1;
+#endif /* __FreeBSD__ */
+}
+
+#ifdef INET6
+/*
+ * ipv6 specific rules here...
+ */
+static __inline int
+icmp6type_match (int type, ipfw_insn_u32 *cmd)
+{
+ return (type <= ICMP6_MAXTYPE && (cmd->d[type/32] & (1<<(type%32)) ) );
+}
+
+static int
+flow6id_match( int curr_flow, ipfw_insn_u32 *cmd )
+{
+ int i;
+ for (i=0; i <= cmd->o.arg1; ++i )
+ if (curr_flow == cmd->d[i] )
+ return 1;
+ return 0;
+}
+
+/* support for IP6_*_ME opcodes */
+static int
+search_ip6_addr_net (struct in6_addr * ip6_addr)
+{
+ struct ifnet *mdc;
+ struct ifaddr *mdc2;
+ struct in6_ifaddr *fdm;
+ struct in6_addr copia;
+
+ TAILQ_FOREACH(mdc, &V_ifnet, if_link) {
+ if_addr_rlock(mdc);
+ TAILQ_FOREACH(mdc2, &mdc->if_addrhead, ifa_link) {
+ if (mdc2->ifa_addr->sa_family == AF_INET6) {
+ fdm = (struct in6_ifaddr *)mdc2;
+ copia = fdm->ia_addr.sin6_addr;
+ /* need for leaving scope_id in the sock_addr */
+ in6_clearscope(&copia);
+ if (IN6_ARE_ADDR_EQUAL(ip6_addr, &copia)) {
+ if_addr_runlock(mdc);
+ return 1;
+ }
+ }
+ }
+ if_addr_runlock(mdc);
+ }
+ return 0;
+}
+
+static int
+verify_path6(struct in6_addr *src, struct ifnet *ifp)
+{
+ struct route_in6 ro;
+ struct sockaddr_in6 *dst;
+
+ bzero(&ro, sizeof(ro));
+
+ dst = (struct sockaddr_in6 * )&(ro.ro_dst);
+ dst->sin6_family = AF_INET6;
+ dst->sin6_len = sizeof(*dst);
+ dst->sin6_addr = *src;
+ /* XXX MRT 0 for ipv6 at this time */
+ rtalloc_ign((struct route *)&ro, 0);
+
+ if (ro.ro_rt == NULL)
+ return 0;
+
+ /*
+ * if ifp is provided, check for equality with rtentry
+ * We should use rt->rt_ifa->ifa_ifp, instead of rt->rt_ifp,
+ * to support the case of sending packets to an address of our own.
+ * (where the former interface is the first argument of if_simloop()
+ * (=ifp), the latter is lo0)
+ */
+ if (ifp != NULL && ro.ro_rt->rt_ifa->ifa_ifp != ifp) {
+ RTFREE(ro.ro_rt);
+ return 0;
+ }
+
+ /* if no ifp provided, check if rtentry is not default route */
+ if (ifp == NULL &&
+ IN6_IS_ADDR_UNSPECIFIED(&satosin6(rt_key(ro.ro_rt))->sin6_addr)) {
+ RTFREE(ro.ro_rt);
+ return 0;
+ }
+
+ /* or if this is a blackhole/reject route */
+ if (ifp == NULL && ro.ro_rt->rt_flags & (RTF_REJECT|RTF_BLACKHOLE)) {
+ RTFREE(ro.ro_rt);
+ return 0;
+ }
+
+ /* found valid route */
+ RTFREE(ro.ro_rt);
+ return 1;
+
+}
+
+static int
+is_icmp6_query(int icmp6_type)
+{
+ if ((icmp6_type <= ICMP6_MAXTYPE) &&
+ (icmp6_type == ICMP6_ECHO_REQUEST ||
+ icmp6_type == ICMP6_MEMBERSHIP_QUERY ||
+ icmp6_type == ICMP6_WRUREQUEST ||
+ icmp6_type == ICMP6_FQDN_QUERY ||
+ icmp6_type == ICMP6_NI_QUERY))
+ return (1);
+
+ return (0);
+}
+
+static void
+send_reject6(struct ip_fw_args *args, int code, u_int hlen, struct ip6_hdr *ip6)
+{
+ struct mbuf *m;
+
+ m = args->m;
+ if (code == ICMP6_UNREACH_RST && args->f_id.proto == IPPROTO_TCP) {
+ struct tcphdr *tcp;
+ tcp = (struct tcphdr *)((char *)ip6 + hlen);
+
+ if ((tcp->th_flags & TH_RST) == 0) {
+ struct mbuf *m0;
+ m0 = ipfw_send_pkt(args->m, &(args->f_id),
+ ntohl(tcp->th_seq), ntohl(tcp->th_ack),
+ tcp->th_flags | TH_RST);
+ if (m0 != NULL)
+ ip6_output(m0, NULL, NULL, 0, NULL, NULL,
+ NULL);
+ }
+ FREE_PKT(m);
+ } else if (code != ICMP6_UNREACH_RST) { /* Send an ICMPv6 unreach. */
+#if 0
+ /*
+ * Unlike above, the mbufs need to line up with the ip6 hdr,
+ * as the contents are read. We need to m_adj() the
+ * needed amount.
+ * The mbuf will however be thrown away so we can adjust it.
+ * Remember we did an m_pullup on it already so we
+ * can make some assumptions about contiguousness.
+ */
+ if (args->L3offset)
+ m_adj(m, args->L3offset);
+#endif
+ icmp6_error(m, ICMP6_DST_UNREACH, code, 0);
+ } else
+ FREE_PKT(m);
+
+ args->m = NULL;
+}
+
+#endif /* INET6 */
+
+
+/*
+ * sends a reject message, consuming the mbuf passed as an argument.
+ */
+static void
+send_reject(struct ip_fw_args *args, int code, int iplen, struct ip *ip)
+{
+
+#if 0
+ /* XXX When ip is not guaranteed to be at mtod() we will
+ * need to account for this */
+ * The mbuf will however be thrown away so we can adjust it.
+ * Remember we did an m_pullup on it already so we
+ * can make some assumptions about contiguousness.
+ */
+ if (args->L3offset)
+ m_adj(m, args->L3offset);
+#endif
+ if (code != ICMP_REJECT_RST) { /* Send an ICMP unreach */
+ /* We need the IP header in host order for icmp_error(). */
+ SET_HOST_IPLEN(ip);
+ icmp_error(args->m, ICMP_UNREACH, code, 0L, 0);
+ } else if (args->f_id.proto == IPPROTO_TCP) {
+ struct tcphdr *const tcp =
+ L3HDR(struct tcphdr, mtod(args->m, struct ip *));
+ if ( (tcp->th_flags & TH_RST) == 0) {
+ struct mbuf *m;
+ m = ipfw_send_pkt(args->m, &(args->f_id),
+ ntohl(tcp->th_seq), ntohl(tcp->th_ack),
+ tcp->th_flags | TH_RST);
+ if (m != NULL)
+ ip_output(m, NULL, NULL, 0, NULL, NULL);
+ }
+ FREE_PKT(args->m);
+ } else
+ FREE_PKT(args->m);
+ args->m = NULL;
+}
+
+/*
+ * Support for uid/gid/jail lookup. These tests are expensive
+ * (because we may need to look into the list of active sockets)
+ * so we cache the results. ugid_lookupp is 0 if we have not
+ * yet done a lookup, 1 if we succeeded, and -1 if we tried
+ * and failed. The function always returns the match value.
+ * We could actually spare the variable and use *uc, setting
+ * it to '(void *)check_uidgid if we have no info, NULL if
+ * we tried and failed, or any other value if successful.
+ */
+static int
+check_uidgid(ipfw_insn_u32 *insn, int proto, struct ifnet *oif,
+ struct in_addr dst_ip, u_int16_t dst_port, struct in_addr src_ip,
+ u_int16_t src_port, int *ugid_lookupp,
+ struct ucred **uc, struct inpcb *inp)
+{
+#ifndef __FreeBSD__
+ return cred_check(insn, proto, oif,
+ dst_ip, dst_port, src_ip, src_port,
+ (struct bsd_ucred *)uc, ugid_lookupp, ((struct mbuf *)inp)->m_skb);
+#else /* FreeBSD */
+ struct inpcbinfo *pi;
+ int wildcard;
+ struct inpcb *pcb;
+ int match;
+
+ /*
+ * Check to see if the UDP or TCP stack supplied us with
+ * the PCB. If so, rather then holding a lock and looking
+ * up the PCB, we can use the one that was supplied.
+ */
+ if (inp && *ugid_lookupp == 0) {
+ INP_LOCK_ASSERT(inp);
+ if (inp->inp_socket != NULL) {
+ *uc = crhold(inp->inp_cred);
+ *ugid_lookupp = 1;
+ } else
+ *ugid_lookupp = -1;
+ }
+ /*
+ * If we have already been here and the packet has no
+ * PCB entry associated with it, then we can safely
+ * assume that this is a no match.
+ */
+ if (*ugid_lookupp == -1)
+ return (0);
+ if (proto == IPPROTO_TCP) {
+ wildcard = 0;
+ pi = &V_tcbinfo;
+ } else if (proto == IPPROTO_UDP) {
+ wildcard = INPLOOKUP_WILDCARD;
+ pi = &V_udbinfo;
+ } else
+ return 0;
+ match = 0;
+ if (*ugid_lookupp == 0) {
+ INP_INFO_RLOCK(pi);
+ pcb = (oif) ?
+ in_pcblookup_hash(pi,
+ dst_ip, htons(dst_port),
+ src_ip, htons(src_port),
+ wildcard, oif) :
+ in_pcblookup_hash(pi,
+ src_ip, htons(src_port),
+ dst_ip, htons(dst_port),
+ wildcard, NULL);
+ if (pcb != NULL) {
+ *uc = crhold(pcb->inp_cred);
+ *ugid_lookupp = 1;
+ }
+ INP_INFO_RUNLOCK(pi);
+ if (*ugid_lookupp == 0) {
+ /*
+ * We tried and failed, set the variable to -1
+ * so we will not try again on this packet.
+ */
+ *ugid_lookupp = -1;
+ return (0);
+ }
+ }
+ if (insn->o.opcode == O_UID)
+ match = ((*uc)->cr_uid == (uid_t)insn->d[0]);
+ else if (insn->o.opcode == O_GID)
+ match = groupmember((gid_t)insn->d[0], *uc);
+ else if (insn->o.opcode == O_JAIL)
+ match = ((*uc)->cr_prison->pr_id == (int)insn->d[0]);
+ return match;
+#endif /* __FreeBSD__ */
+}
+
+/*
+ * Helper function to set args with info on the rule after the matching
+ * one. slot is precise, whereas we guess rule_id as they are
+ * assigned sequentially.
+ */
+static inline void
+set_match(struct ip_fw_args *args, int slot,
+ struct ip_fw_chain *chain)
+{
+ args->rule.chain_id = chain->id;
+ args->rule.slot = slot + 1; /* we use 0 as a marker */
+ args->rule.rule_id = 1 + chain->map[slot]->id;
+ args->rule.rulenum = chain->map[slot]->rulenum;
+}
+
+/*
+ * The main check routine for the firewall.
+ *
+ * All arguments are in args so we can modify them and return them
+ * back to the caller.
+ *
+ * Parameters:
+ *
+ * args->m (in/out) The packet; we set to NULL when/if we nuke it.
+ * Starts with the IP header.
+ * args->eh (in) Mac header if present, NULL for layer3 packet.
+ * args->L3offset Number of bytes bypassed if we came from L2.
+ * e.g. often sizeof(eh) ** NOTYET **
+ * args->oif Outgoing interface, NULL if packet is incoming.
+ * The incoming interface is in the mbuf. (in)
+ * args->divert_rule (in/out)
+ * Skip up to the first rule past this rule number;
+ * upon return, non-zero port number for divert or tee.
+ *
+ * args->rule Pointer to the last matching rule (in/out)
+ * args->next_hop Socket we are forwarding to (out).
+ * args->f_id Addresses grabbed from the packet (out)
+ * args->rule.info a cookie depending on rule action
+ *
+ * Return value:
+ *
+ * IP_FW_PASS the packet must be accepted
+ * IP_FW_DENY the packet must be dropped
+ * IP_FW_DIVERT divert packet, port in m_tag
+ * IP_FW_TEE tee packet, port in m_tag
+ * IP_FW_DUMMYNET to dummynet, pipe in args->cookie
+ * IP_FW_NETGRAPH into netgraph, cookie args->cookie
+ * args->rule contains the matching rule,
+ * args->rule.info has additional information.
+ *
+ */
+int
+ipfw_chk(struct ip_fw_args *args)
+{
+
+ /*
+ * Local variables holding state while processing a packet:
+ *
+ * IMPORTANT NOTE: to speed up the processing of rules, there
+ * are some assumption on the values of the variables, which
+ * are documented here. Should you change them, please check
+ * the implementation of the various instructions to make sure
+ * that they still work.
+ *
+ * args->eh The MAC header. It is non-null for a layer2
+ * packet, it is NULL for a layer-3 packet.
+ * **notyet**
+ * args->L3offset Offset in the packet to the L3 (IP or equiv.) header.
+ *
+ * m | args->m Pointer to the mbuf, as received from the caller.
+ * It may change if ipfw_chk() does an m_pullup, or if it
+ * consumes the packet because it calls send_reject().
+ * XXX This has to change, so that ipfw_chk() never modifies
+ * or consumes the buffer.
+ * ip is the beginning of the ip(4 or 6) header.
+ * Calculated by adding the L3offset to the start of data.
+ * (Until we start using L3offset, the packet is
+ * supposed to start with the ip header).
+ */
+ struct mbuf *m = args->m;
+ struct ip *ip = mtod(m, struct ip *);
+
+ /*
+ * For rules which contain uid/gid or jail constraints, cache
+ * a copy of the users credentials after the pcb lookup has been
+ * executed. This will speed up the processing of rules with
+ * these types of constraints, as well as decrease contention
+ * on pcb related locks.
+ */
+#ifndef __FreeBSD__
+ struct bsd_ucred ucred_cache;
+#else
+ struct ucred *ucred_cache = NULL;
+#endif
+ int ucred_lookup = 0;
+
+ /*
+ * oif | args->oif If NULL, ipfw_chk has been called on the
+ * inbound path (ether_input, ip_input).
+ * If non-NULL, ipfw_chk has been called on the outbound path
+ * (ether_output, ip_output).
+ */
+ struct ifnet *oif = args->oif;
+
+ int f_pos = 0; /* index of current rule in the array */
+ int retval = 0;
+
+ /*
+ * hlen The length of the IP header.
+ */
+ u_int hlen = 0; /* hlen >0 means we have an IP pkt */
+
+ /*
+ * offset The offset of a fragment. offset != 0 means that
+ * we have a fragment at this offset of an IPv4 packet.
+ * offset == 0 means that (if this is an IPv4 packet)
+ * this is the first or only fragment.
+ * For IPv6 offset == 0 means there is no Fragment Header.
+ * If offset != 0 for IPv6 always use correct mask to
+ * get the correct offset because we add IP6F_MORE_FRAG
+ * to be able to dectect the first fragment which would
+ * otherwise have offset = 0.
+ */
+ u_short offset = 0;
+
+ /*
+ * Local copies of addresses. They are only valid if we have
+ * an IP packet.
+ *
+ * proto The protocol. Set to 0 for non-ip packets,
+ * or to the protocol read from the packet otherwise.
+ * proto != 0 means that we have an IPv4 packet.
+ *
+ * src_port, dst_port port numbers, in HOST format. Only
+ * valid for TCP and UDP packets.
+ *
+ * src_ip, dst_ip ip addresses, in NETWORK format.
+ * Only valid for IPv4 packets.
+ */
+ uint8_t proto;
+ uint16_t src_port = 0, dst_port = 0; /* NOTE: host format */
+ struct in_addr src_ip, dst_ip; /* NOTE: network format */
+ uint16_t iplen=0;
+ int pktlen;
+ uint16_t etype = 0; /* Host order stored ether type */
+
+ /*
+ * dyn_dir = MATCH_UNKNOWN when rules unchecked,
+ * MATCH_NONE when checked and not matched (q = NULL),
+ * MATCH_FORWARD or MATCH_REVERSE otherwise (q != NULL)
+ */
+ int dyn_dir = MATCH_UNKNOWN;
+ ipfw_dyn_rule *q = NULL;
+ struct ip_fw_chain *chain = &V_layer3_chain;
+
+ /*
+ * We store in ulp a pointer to the upper layer protocol header.
+ * In the ipv4 case this is easy to determine from the header,
+ * but for ipv6 we might have some additional headers in the middle.
+ * ulp is NULL if not found.
+ */
+ void *ulp = NULL; /* upper layer protocol pointer. */
+
+ /* XXX ipv6 variables */
+ int is_ipv6 = 0;
+ uint8_t icmp6_type = 0;
+ uint16_t ext_hd = 0; /* bits vector for extension header filtering */
+ /* end of ipv6 variables */
+
+ int is_ipv4 = 0;
+
+ int done = 0; /* flag to exit the outer loop */
+
+ if (m->m_flags & M_SKIP_FIREWALL || (! V_ipfw_vnet_ready))
+ return (IP_FW_PASS); /* accept */
+
+ dst_ip.s_addr = 0; /* make sure it is initialized */
+ src_ip.s_addr = 0; /* make sure it is initialized */
+ pktlen = m->m_pkthdr.len;
+ args->f_id.fib = M_GETFIB(m); /* note mbuf not altered) */
+ proto = args->f_id.proto = 0; /* mark f_id invalid */
+ /* XXX 0 is a valid proto: IP/IPv6 Hop-by-Hop Option */
+
+/*
+ * PULLUP_TO(len, p, T) makes sure that len + sizeof(T) is contiguous,
+ * then it sets p to point at the offset "len" in the mbuf. WARNING: the
+ * pointer might become stale after other pullups (but we never use it
+ * this way).
+ */
+#define PULLUP_TO(_len, p, T) \
+do { \
+ int x = (_len) + sizeof(T); \
+ if ((m)->m_len < x) { \
+ args->m = m = m_pullup(m, x); \
+ if (m == NULL) \
+ goto pullup_failed; \
+ } \
+ p = (mtod(m, char *) + (_len)); \
+} while (0)
+
+ /*
+ * if we have an ether header,
+ */
+ if (args->eh)
+ etype = ntohs(args->eh->ether_type);
+
+ /* Identify IP packets and fill up variables. */
+ if (pktlen >= sizeof(struct ip6_hdr) &&
+ (args->eh == NULL || etype == ETHERTYPE_IPV6) && ip->ip_v == 6) {
+ struct ip6_hdr *ip6 = (struct ip6_hdr *)ip;
+ is_ipv6 = 1;
+ args->f_id.addr_type = 6;
+ hlen = sizeof(struct ip6_hdr);
+ proto = ip6->ip6_nxt;
+
+ /* Search extension headers to find upper layer protocols */
+ while (ulp == NULL) {
+ switch (proto) {
+ case IPPROTO_ICMPV6:
+ PULLUP_TO(hlen, ulp, struct icmp6_hdr);
+ icmp6_type = ICMP6(ulp)->icmp6_type;
+ break;
+
+ case IPPROTO_TCP:
+ PULLUP_TO(hlen, ulp, struct tcphdr);
+ dst_port = TCP(ulp)->th_dport;
+ src_port = TCP(ulp)->th_sport;
+ /* save flags for dynamic rules */
+ args->f_id._flags = TCP(ulp)->th_flags;
+ break;
+
+ case IPPROTO_SCTP:
+ PULLUP_TO(hlen, ulp, struct sctphdr);
+ src_port = SCTP(ulp)->src_port;
+ dst_port = SCTP(ulp)->dest_port;
+ break;
+
+ case IPPROTO_UDP:
+ PULLUP_TO(hlen, ulp, struct udphdr);
+ dst_port = UDP(ulp)->uh_dport;
+ src_port = UDP(ulp)->uh_sport;
+ break;
+
+ case IPPROTO_HOPOPTS: /* RFC 2460 */
+ PULLUP_TO(hlen, ulp, struct ip6_hbh);
+ ext_hd |= EXT_HOPOPTS;
+ hlen += (((struct ip6_hbh *)ulp)->ip6h_len + 1) << 3;
+ proto = ((struct ip6_hbh *)ulp)->ip6h_nxt;
+ ulp = NULL;
+ break;
+
+ case IPPROTO_ROUTING: /* RFC 2460 */
+ PULLUP_TO(hlen, ulp, struct ip6_rthdr);
+ switch (((struct ip6_rthdr *)ulp)->ip6r_type) {
+ case 0:
+ ext_hd |= EXT_RTHDR0;
+ break;
+ case 2:
+ ext_hd |= EXT_RTHDR2;
+ break;
+ default:
+ printf("IPFW2: IPV6 - Unknown Routing "
+ "Header type(%d)\n",
+ ((struct ip6_rthdr *)ulp)->ip6r_type);
+ if (V_fw_deny_unknown_exthdrs)
+ return (IP_FW_DENY);
+ break;
+ }
+ ext_hd |= EXT_ROUTING;
+ hlen += (((struct ip6_rthdr *)ulp)->ip6r_len + 1) << 3;
+ proto = ((struct ip6_rthdr *)ulp)->ip6r_nxt;
+ ulp = NULL;
+ break;
+
+ case IPPROTO_FRAGMENT: /* RFC 2460 */
+ PULLUP_TO(hlen, ulp, struct ip6_frag);
+ ext_hd |= EXT_FRAGMENT;
+ hlen += sizeof (struct ip6_frag);
+ proto = ((struct ip6_frag *)ulp)->ip6f_nxt;
+ offset = ((struct ip6_frag *)ulp)->ip6f_offlg &
+ IP6F_OFF_MASK;
+ /* Add IP6F_MORE_FRAG for offset of first
+ * fragment to be != 0. */
+ offset |= ((struct ip6_frag *)ulp)->ip6f_offlg &
+ IP6F_MORE_FRAG;
+ if (offset == 0) {
+ printf("IPFW2: IPV6 - Invalid Fragment "
+ "Header\n");
+ if (V_fw_deny_unknown_exthdrs)
+ return (IP_FW_DENY);
+ break;
+ }
+ args->f_id.extra =
+ ntohl(((struct ip6_frag *)ulp)->ip6f_ident);
+ ulp = NULL;
+ break;
+
+ case IPPROTO_DSTOPTS: /* RFC 2460 */
+ PULLUP_TO(hlen, ulp, struct ip6_hbh);
+ ext_hd |= EXT_DSTOPTS;
+ hlen += (((struct ip6_hbh *)ulp)->ip6h_len + 1) << 3;
+ proto = ((struct ip6_hbh *)ulp)->ip6h_nxt;
+ ulp = NULL;
+ break;
+
+ case IPPROTO_AH: /* RFC 2402 */
+ PULLUP_TO(hlen, ulp, struct ip6_ext);
+ ext_hd |= EXT_AH;
+ hlen += (((struct ip6_ext *)ulp)->ip6e_len + 2) << 2;
+ proto = ((struct ip6_ext *)ulp)->ip6e_nxt;
+ ulp = NULL;
+ break;
+
+ case IPPROTO_ESP: /* RFC 2406 */
+ PULLUP_TO(hlen, ulp, uint32_t); /* SPI, Seq# */
+ /* Anything past Seq# is variable length and
+ * data past this ext. header is encrypted. */
+ ext_hd |= EXT_ESP;
+ break;
+
+ case IPPROTO_NONE: /* RFC 2460 */
+ /*
+ * Packet ends here, and IPv6 header has
+ * already been pulled up. If ip6e_len!=0
+ * then octets must be ignored.
+ */
+ ulp = ip; /* non-NULL to get out of loop. */
+ break;
+
+ case IPPROTO_OSPFIGP:
+ /* XXX OSPF header check? */
+ PULLUP_TO(hlen, ulp, struct ip6_ext);
+ break;
+
+ case IPPROTO_PIM:
+ /* XXX PIM header check? */
+ PULLUP_TO(hlen, ulp, struct pim);
+ break;
+
+ case IPPROTO_CARP:
+ PULLUP_TO(hlen, ulp, struct carp_header);
+ if (((struct carp_header *)ulp)->carp_version !=
+ CARP_VERSION)
+ return (IP_FW_DENY);
+ if (((struct carp_header *)ulp)->carp_type !=
+ CARP_ADVERTISEMENT)
+ return (IP_FW_DENY);
+ break;
+
+ case IPPROTO_IPV6: /* RFC 2893 */
+ PULLUP_TO(hlen, ulp, struct ip6_hdr);
+ break;
+
+ case IPPROTO_IPV4: /* RFC 2893 */
+ PULLUP_TO(hlen, ulp, struct ip);
+ break;
+
+ default:
+ printf("IPFW2: IPV6 - Unknown Extension "
+ "Header(%d), ext_hd=%x\n", proto, ext_hd);
+ if (V_fw_deny_unknown_exthdrs)
+ return (IP_FW_DENY);
+ PULLUP_TO(hlen, ulp, struct ip6_ext);
+ break;
+ } /*switch */
+ }
+ ip = mtod(m, struct ip *);
+ ip6 = (struct ip6_hdr *)ip;
+ args->f_id.src_ip6 = ip6->ip6_src;
+ args->f_id.dst_ip6 = ip6->ip6_dst;
+ args->f_id.src_ip = 0;
+ args->f_id.dst_ip = 0;
+ args->f_id.flow_id6 = ntohl(ip6->ip6_flow);
+ } else if (pktlen >= sizeof(struct ip) &&
+ (args->eh == NULL || etype == ETHERTYPE_IP) && ip->ip_v == 4) {
+ is_ipv4 = 1;
+ hlen = ip->ip_hl << 2;
+ args->f_id.addr_type = 4;
+
+ /*
+ * Collect parameters into local variables for faster matching.
+ */
+ proto = ip->ip_p;
+ src_ip = ip->ip_src;
+ dst_ip = ip->ip_dst;
+ offset = ntohs(ip->ip_off) & IP_OFFMASK;
+ iplen = ntohs(ip->ip_len);
+ pktlen = iplen < pktlen ? iplen : pktlen;
+
+ if (offset == 0) {
+ switch (proto) {
+ case IPPROTO_TCP:
+ PULLUP_TO(hlen, ulp, struct tcphdr);
+ dst_port = TCP(ulp)->th_dport;
+ src_port = TCP(ulp)->th_sport;
+ /* save flags for dynamic rules */
+ args->f_id._flags = TCP(ulp)->th_flags;
+ break;
+
+ case IPPROTO_UDP:
+ PULLUP_TO(hlen, ulp, struct udphdr);
+ dst_port = UDP(ulp)->uh_dport;
+ src_port = UDP(ulp)->uh_sport;
+ break;
+
+ case IPPROTO_ICMP:
+ PULLUP_TO(hlen, ulp, struct icmphdr);
+ //args->f_id.flags = ICMP(ulp)->icmp_type;
+ break;
+
+ default:
+ break;
+ }
+ }
+
+ ip = mtod(m, struct ip *);
+ args->f_id.src_ip = ntohl(src_ip.s_addr);
+ args->f_id.dst_ip = ntohl(dst_ip.s_addr);
+ }
+#undef PULLUP_TO
+ if (proto) { /* we may have port numbers, store them */
+ args->f_id.proto = proto;
+ args->f_id.src_port = src_port = ntohs(src_port);
+ args->f_id.dst_port = dst_port = ntohs(dst_port);
+ }
+
+ IPFW_RLOCK(chain);
+ if (! V_ipfw_vnet_ready) { /* shutting down, leave NOW. */
+ IPFW_RUNLOCK(chain);
+ return (IP_FW_PASS); /* accept */
+ }
+ if (args->rule.slot) {
+ /*
+ * Packet has already been tagged as a result of a previous
+ * match on rule args->rule aka args->rule_id (PIPE, QUEUE,
+ * REASS, NETGRAPH, DIVERT/TEE...)
+ * Validate the slot and continue from the next one
+ * if still present, otherwise do a lookup.
+ */
+ f_pos = (args->rule.chain_id == chain->id) ?
+ args->rule.slot :
+ ipfw_find_rule(chain, args->rule.rulenum,
+ args->rule.rule_id);
+ } else {
+ f_pos = 0;
+ }
+
+ /*
+ * Now scan the rules, and parse microinstructions for each rule.
+ * We have two nested loops and an inner switch. Sometimes we
+ * need to break out of one or both loops, or re-enter one of
+ * the loops with updated variables. Loop variables are:
+ *
+ * f_pos (outer loop) points to the current rule.
+ * On output it points to the matching rule.
+ * done (outer loop) is used as a flag to break the loop.
+ * l (inner loop) residual length of current rule.
+ * cmd points to the current microinstruction.
+ *
+ * We break the inner loop by setting l=0 and possibly
+ * cmdlen=0 if we don't want to advance cmd.
+ * We break the outer loop by setting done=1
+ * We can restart the inner loop by setting l>0 and f_pos, f, cmd
+ * as needed.
+ */
+ for (; f_pos < chain->n_rules; f_pos++) {
+ ipfw_insn *cmd;
+ uint32_t tablearg = 0;
+ int l, cmdlen, skip_or; /* skip rest of OR block */
+ struct ip_fw *f;
+
+ f = chain->map[f_pos];
+ if (V_set_disable & (1 << f->set) )
+ continue;
+
+ skip_or = 0;
+ for (l = f->cmd_len, cmd = f->cmd ; l > 0 ;
+ l -= cmdlen, cmd += cmdlen) {
+ int match;
+
+ /*
+ * check_body is a jump target used when we find a
+ * CHECK_STATE, and need to jump to the body of
+ * the target rule.
+ */
+
+/* check_body: */
+ cmdlen = F_LEN(cmd);
+ /*
+ * An OR block (insn_1 || .. || insn_n) has the
+ * F_OR bit set in all but the last instruction.
+ * The first match will set "skip_or", and cause
+ * the following instructions to be skipped until
+ * past the one with the F_OR bit clear.
+ */
+ if (skip_or) { /* skip this instruction */
+ if ((cmd->len & F_OR) == 0)
+ skip_or = 0; /* next one is good */
+ continue;
+ }
+ match = 0; /* set to 1 if we succeed */
+
+ switch (cmd->opcode) {
+ /*
+ * The first set of opcodes compares the packet's
+ * fields with some pattern, setting 'match' if a
+ * match is found. At the end of the loop there is
+ * logic to deal with F_NOT and F_OR flags associated
+ * with the opcode.
+ */
+ case O_NOP:
+ match = 1;
+ break;
+
+ case O_FORWARD_MAC:
+ printf("ipfw: opcode %d unimplemented\n",
+ cmd->opcode);
+ break;
+
+ case O_GID:
+ case O_UID:
+ case O_JAIL:
+ /*
+ * We only check offset == 0 && proto != 0,
+ * as this ensures that we have a
+ * packet with the ports info.
+ */
+ if (offset!=0)
+ break;
+ if (is_ipv6) /* XXX to be fixed later */
+ break;
+ if (proto == IPPROTO_TCP ||
+ proto == IPPROTO_UDP)
+ match = check_uidgid(
+ (ipfw_insn_u32 *)cmd,
+ proto, oif,
+ dst_ip, dst_port,
+ src_ip, src_port, &ucred_lookup,
+#ifdef __FreeBSD__
+ &ucred_cache, args->inp);
+#else
+ (void *)&ucred_cache,
+ (struct inpcb *)args->m);
+#endif
+ break;
+
+ case O_RECV:
+ match = iface_match(m->m_pkthdr.rcvif,
+ (ipfw_insn_if *)cmd);
+ break;
+
+ case O_XMIT:
+ match = iface_match(oif, (ipfw_insn_if *)cmd);
+ break;
+
+ case O_VIA:
+ match = iface_match(oif ? oif :
+ m->m_pkthdr.rcvif, (ipfw_insn_if *)cmd);
+ break;
+
+ case O_MACADDR2:
+ if (args->eh != NULL) { /* have MAC header */
+ u_int32_t *want = (u_int32_t *)
+ ((ipfw_insn_mac *)cmd)->addr;
+ u_int32_t *mask = (u_int32_t *)
+ ((ipfw_insn_mac *)cmd)->mask;
+ u_int32_t *hdr = (u_int32_t *)args->eh;
+
+ match =
+ ( want[0] == (hdr[0] & mask[0]) &&
+ want[1] == (hdr[1] & mask[1]) &&
+ want[2] == (hdr[2] & mask[2]) );
+ }
+ break;
+
+ case O_MAC_TYPE:
+ if (args->eh != NULL) {
+ u_int16_t *p =
+ ((ipfw_insn_u16 *)cmd)->ports;
+ int i;
+
+ for (i = cmdlen - 1; !match && i>0;
+ i--, p += 2)
+ match = (etype >= p[0] &&
+ etype <= p[1]);
+ }
+ break;
+
+ case O_FRAG:
+ match = (offset != 0);
+ break;
+
+ case O_IN: /* "out" is "not in" */
+ match = (oif == NULL);
+ break;
+
+ case O_LAYER2:
+ match = (args->eh != NULL);
+ break;
+
+ case O_DIVERTED:
+ {
+ /* For diverted packets, args->rule.info
+ * contains the divert port (in host format)
+ * reason and direction.
+ */
+ uint32_t i = args->rule.info;
+ match = (i&IPFW_IS_MASK) == IPFW_IS_DIVERT &&
+ cmd->arg1 & ((i & IPFW_INFO_IN) ? 1 : 2);
+ }
+ break;
+
+ case O_PROTO:
+ /*
+ * We do not allow an arg of 0 so the
+ * check of "proto" only suffices.
+ */
+ match = (proto == cmd->arg1);
+ break;
+
+ case O_IP_SRC:
+ match = is_ipv4 &&
+ (((ipfw_insn_ip *)cmd)->addr.s_addr ==
+ src_ip.s_addr);
+ break;
+
+ case O_IP_SRC_LOOKUP:
+ case O_IP_DST_LOOKUP:
+ if (is_ipv4) {
+ uint32_t key =
+ (cmd->opcode == O_IP_DST_LOOKUP) ?
+ dst_ip.s_addr : src_ip.s_addr;
+ uint32_t v = 0;
+
+ if (cmdlen > F_INSN_SIZE(ipfw_insn_u32)) {
+ /* generic lookup. The key must be
+ * in 32bit big-endian format.
+ */
+ v = ((ipfw_insn_u32 *)cmd)->d[1];
+ if (v == 0)
+ key = dst_ip.s_addr;
+ else if (v == 1)
+ key = src_ip.s_addr;
+ else if (v == 6) /* dscp */
+ key = (ip->ip_tos >> 2) & 0x3f;
+ else if (offset != 0)
+ break;
+ else if (proto != IPPROTO_TCP &&
+ proto != IPPROTO_UDP)
+ break;
+ else if (v == 2)
+ key = htonl(dst_port);
+ else if (v == 3)
+ key = htonl(src_port);
+ else if (v == 4 || v == 5) {
+ check_uidgid(
+ (ipfw_insn_u32 *)cmd,
+ proto, oif,
+ dst_ip, dst_port,
+ src_ip, src_port, &ucred_lookup,
+#ifdef __FreeBSD__
+ &ucred_cache, args->inp);
+ if (v == 4 /* O_UID */)
+ key = ucred_cache->cr_uid;
+ else if (v == 5 /* O_JAIL */)
+ key = ucred_cache->cr_prison->pr_id;
+#else /* !__FreeBSD__ */
+ (void *)&ucred_cache,
+ (struct inpcb *)args->m);
+ if (v ==4 /* O_UID */)
+ key = ucred_cache.uid;
+ else if (v == 5 /* O_JAIL */)
+ key = ucred_cache.xid;
+#endif /* !__FreeBSD__ */
+ key = htonl(key);
+ } else
+ break;
+ }
+ match = ipfw_lookup_table(chain,
+ cmd->arg1, key, &v);
+ if (!match)
+ break;
+ if (cmdlen == F_INSN_SIZE(ipfw_insn_u32))
+ match =
+ ((ipfw_insn_u32 *)cmd)->d[0] == v;
+ else
+ tablearg = v;
+ }
+ break;
+
+ case O_IP_SRC_MASK:
+ case O_IP_DST_MASK:
+ if (is_ipv4) {
+ uint32_t a =
+ (cmd->opcode == O_IP_DST_MASK) ?
+ dst_ip.s_addr : src_ip.s_addr;
+ uint32_t *p = ((ipfw_insn_u32 *)cmd)->d;
+ int i = cmdlen-1;
+
+ for (; !match && i>0; i-= 2, p+= 2)
+ match = (p[0] == (a & p[1]));
+ }
+ break;
+
+ case O_IP_SRC_ME:
+ if (is_ipv4) {
+ struct ifnet *tif;
+
+ INADDR_TO_IFP(src_ip, tif);
+ match = (tif != NULL);
+ break;
+ }
+#ifdef INET6
+ /* FALLTHROUGH */
+ case O_IP6_SRC_ME:
+ match= is_ipv6 && search_ip6_addr_net(&args->f_id.src_ip6);
+#endif
+ break;
+
+ case O_IP_DST_SET:
+ case O_IP_SRC_SET:
+ if (is_ipv4) {
+ u_int32_t *d = (u_int32_t *)(cmd+1);
+ u_int32_t addr =
+ cmd->opcode == O_IP_DST_SET ?
+ args->f_id.dst_ip :
+ args->f_id.src_ip;
+
+ if (addr < d[0])
+ break;
+ addr -= d[0]; /* subtract base */
+ match = (addr < cmd->arg1) &&
+ ( d[ 1 + (addr>>5)] &
+ (1<<(addr & 0x1f)) );
+ }
+ break;
+
+ case O_IP_DST:
+ match = is_ipv4 &&
+ (((ipfw_insn_ip *)cmd)->addr.s_addr ==
+ dst_ip.s_addr);
+ break;
+
+ case O_IP_DST_ME:
+ if (is_ipv4) {
+ struct ifnet *tif;
+
+ INADDR_TO_IFP(dst_ip, tif);
+ match = (tif != NULL);
+ break;
+ }
+#ifdef INET6
+ /* FALLTHROUGH */
+ case O_IP6_DST_ME:
+ match= is_ipv6 && search_ip6_addr_net(&args->f_id.dst_ip6);
+#endif
+ break;
+
+
+ case O_IP_SRCPORT:
+ case O_IP_DSTPORT:
+ /*
+ * offset == 0 && proto != 0 is enough
+ * to guarantee that we have a
+ * packet with port info.
+ */
+ if ((proto==IPPROTO_UDP || proto==IPPROTO_TCP)
+ && offset == 0) {
+ u_int16_t x =
+ (cmd->opcode == O_IP_SRCPORT) ?
+ src_port : dst_port ;
+ u_int16_t *p =
+ ((ipfw_insn_u16 *)cmd)->ports;
+ int i;
+
+ for (i = cmdlen - 1; !match && i>0;
+ i--, p += 2)
+ match = (x>=p[0] && x<=p[1]);
+ }
+ break;
+
+ case O_ICMPTYPE:
+ match = (offset == 0 && proto==IPPROTO_ICMP &&
+ icmptype_match(ICMP(ulp), (ipfw_insn_u32 *)cmd) );
+ break;
+
+#ifdef INET6
+ case O_ICMP6TYPE:
+ match = is_ipv6 && offset == 0 &&
+ proto==IPPROTO_ICMPV6 &&
+ icmp6type_match(
+ ICMP6(ulp)->icmp6_type,
+ (ipfw_insn_u32 *)cmd);
+ break;
+#endif /* INET6 */
+
+ case O_IPOPT:
+ match = (is_ipv4 &&
+ ipopts_match(ip, cmd) );
+ break;
+
+ case O_IPVER:
+ match = (is_ipv4 &&
+ cmd->arg1 == ip->ip_v);
+ break;
+
+ case O_IPID:
+ case O_IPLEN:
+ case O_IPTTL:
+ if (is_ipv4) { /* only for IP packets */
+ uint16_t x;
+ uint16_t *p;
+ int i;
+
+ if (cmd->opcode == O_IPLEN)
+ x = iplen;
+ else if (cmd->opcode == O_IPTTL)
+ x = ip->ip_ttl;
+ else /* must be IPID */
+ x = ntohs(ip->ip_id);
+ if (cmdlen == 1) {
+ match = (cmd->arg1 == x);
+ break;
+ }
+ /* otherwise we have ranges */
+ p = ((ipfw_insn_u16 *)cmd)->ports;
+ i = cmdlen - 1;
+ for (; !match && i>0; i--, p += 2)
+ match = (x >= p[0] && x <= p[1]);
+ }
+ break;
+
+ case O_IPPRECEDENCE:
+ match = (is_ipv4 &&
+ (cmd->arg1 == (ip->ip_tos & 0xe0)) );
+ break;
+
+ case O_IPTOS:
+ match = (is_ipv4 &&
+ flags_match(cmd, ip->ip_tos));
+ break;
+
+ case O_TCPDATALEN:
+ if (proto == IPPROTO_TCP && offset == 0) {
+ struct tcphdr *tcp;
+ uint16_t x;
+ uint16_t *p;
+ int i;
+
+ tcp = TCP(ulp);
+ x = iplen -
+ ((ip->ip_hl + tcp->th_off) << 2);
+ if (cmdlen == 1) {
+ match = (cmd->arg1 == x);
+ break;
+ }
+ /* otherwise we have ranges */
+ p = ((ipfw_insn_u16 *)cmd)->ports;
+ i = cmdlen - 1;
+ for (; !match && i>0; i--, p += 2)
+ match = (x >= p[0] && x <= p[1]);
+ }
+ break;
+
+ case O_TCPFLAGS:
+ match = (proto == IPPROTO_TCP && offset == 0 &&
+ flags_match(cmd, TCP(ulp)->th_flags));
+ break;
+
+ case O_TCPOPTS:
+ match = (proto == IPPROTO_TCP && offset == 0 &&
+ tcpopts_match(TCP(ulp), cmd));
+ break;
+
+ case O_TCPSEQ:
+ match = (proto == IPPROTO_TCP && offset == 0 &&
+ ((ipfw_insn_u32 *)cmd)->d[0] ==
+ TCP(ulp)->th_seq);
+ break;
+
+ case O_TCPACK:
+ match = (proto == IPPROTO_TCP && offset == 0 &&
+ ((ipfw_insn_u32 *)cmd)->d[0] ==
+ TCP(ulp)->th_ack);
+ break;
+
+ case O_TCPWIN:
+ match = (proto == IPPROTO_TCP && offset == 0 &&
+ cmd->arg1 == TCP(ulp)->th_win);
+ break;
+
+ case O_ESTAB:
+ /* reject packets which have SYN only */
+ /* XXX should i also check for TH_ACK ? */
+ match = (proto == IPPROTO_TCP && offset == 0 &&
+ (TCP(ulp)->th_flags &
+ (TH_RST | TH_ACK | TH_SYN)) != TH_SYN);
+ break;
+
+ case O_ALTQ: {
+ struct pf_mtag *at;
+ ipfw_insn_altq *altq = (ipfw_insn_altq *)cmd;
+
+ match = 1;
+ at = pf_find_mtag(m);
+ if (at != NULL && at->qid != 0)
+ break;
+ at = pf_get_mtag(m);
+ if (at == NULL) {
+ /*
+ * Let the packet fall back to the
+ * default ALTQ.
+ */
+ break;
+ }
+ at->qid = altq->qid;
+ if (is_ipv4)
+ at->af = AF_INET;
+ else
+ at->af = AF_LINK;
+ at->hdr = ip;
+ break;
+ }
+
+ case O_LOG:
+ ipfw_log(f, hlen, args, m,
+ oif, offset, tablearg, ip);
+ match = 1;
+ break;
+
+ case O_PROB:
+ match = (random()<((ipfw_insn_u32 *)cmd)->d[0]);
+ break;
+
+ case O_VERREVPATH:
+ /* Outgoing packets automatically pass/match */
+ match = ((oif != NULL) ||
+ (m->m_pkthdr.rcvif == NULL) ||
+ (
+#ifdef INET6
+ is_ipv6 ?
+ verify_path6(&(args->f_id.src_ip6),
+ m->m_pkthdr.rcvif) :
+#endif
+ verify_path(src_ip, m->m_pkthdr.rcvif,
+ args->f_id.fib)));
+ break;
+
+ case O_VERSRCREACH:
+ /* Outgoing packets automatically pass/match */
+ match = (hlen > 0 && ((oif != NULL) ||
+#ifdef INET6
+ is_ipv6 ?
+ verify_path6(&(args->f_id.src_ip6),
+ NULL) :
+#endif
+ verify_path(src_ip, NULL, args->f_id.fib)));
+ break;
+
+ case O_ANTISPOOF:
+ /* Outgoing packets automatically pass/match */
+ if (oif == NULL && hlen > 0 &&
+ ( (is_ipv4 && in_localaddr(src_ip))
+#ifdef INET6
+ || (is_ipv6 &&
+ in6_localaddr(&(args->f_id.src_ip6)))
+#endif
+ ))
+ match =
+#ifdef INET6
+ is_ipv6 ? verify_path6(
+ &(args->f_id.src_ip6),
+ m->m_pkthdr.rcvif) :
+#endif
+ verify_path(src_ip,
+ m->m_pkthdr.rcvif,
+ args->f_id.fib);
+ else
+ match = 1;
+ break;
+
+ case O_IPSEC:
+#ifdef IPSEC
+ match = (m_tag_find(m,
+ PACKET_TAG_IPSEC_IN_DONE, NULL) != NULL);
+#endif
+ /* otherwise no match */
+ break;
+
+#ifdef INET6
+ case O_IP6_SRC:
+ match = is_ipv6 &&
+ IN6_ARE_ADDR_EQUAL(&args->f_id.src_ip6,
+ &((ipfw_insn_ip6 *)cmd)->addr6);
+ break;
+
+ case O_IP6_DST:
+ match = is_ipv6 &&
+ IN6_ARE_ADDR_EQUAL(&args->f_id.dst_ip6,
+ &((ipfw_insn_ip6 *)cmd)->addr6);
+ break;
+ case O_IP6_SRC_MASK:
+ case O_IP6_DST_MASK:
+ if (is_ipv6) {
+ int i = cmdlen - 1;
+ struct in6_addr p;
+ struct in6_addr *d =
+ &((ipfw_insn_ip6 *)cmd)->addr6;
+
+ for (; !match && i > 0; d += 2,
+ i -= F_INSN_SIZE(struct in6_addr)
+ * 2) {
+ p = (cmd->opcode ==
+ O_IP6_SRC_MASK) ?
+ args->f_id.src_ip6:
+ args->f_id.dst_ip6;
+ APPLY_MASK(&p, &d[1]);
+ match =
+ IN6_ARE_ADDR_EQUAL(&d[0],
+ &p);
+ }
+ }
+ break;
+
+ case O_FLOW6ID:
+ match = is_ipv6 &&
+ flow6id_match(args->f_id.flow_id6,
+ (ipfw_insn_u32 *) cmd);
+ break;
+
+ case O_EXT_HDR:
+ match = is_ipv6 &&
+ (ext_hd & ((ipfw_insn *) cmd)->arg1);
+ break;
+
+ case O_IP6:
+ match = is_ipv6;
+ break;
+#endif
+
+ case O_IP4:
+ match = is_ipv4;
+ break;
+
+ case O_TAG: {
+ struct m_tag *mtag;
+ uint32_t tag = (cmd->arg1 == IP_FW_TABLEARG) ?
+ tablearg : cmd->arg1;
+
+ /* Packet is already tagged with this tag? */
+ mtag = m_tag_locate(m, MTAG_IPFW, tag, NULL);
+
+ /* We have `untag' action when F_NOT flag is
+ * present. And we must remove this mtag from
+ * mbuf and reset `match' to zero (`match' will
+ * be inversed later).
+ * Otherwise we should allocate new mtag and
+ * push it into mbuf.
+ */
+ if (cmd->len & F_NOT) { /* `untag' action */
+ if (mtag != NULL)
+ m_tag_delete(m, mtag);
+ match = 0;
+ } else if (mtag == NULL) {
+ if ((mtag = m_tag_alloc(MTAG_IPFW,
+ tag, 0, M_NOWAIT)) != NULL)
+ m_tag_prepend(m, mtag);
+ match = 1;
+ }
+ break;
+ }
+
+ case O_FIB: /* try match the specified fib */
+ if (args->f_id.fib == cmd->arg1)
+ match = 1;
+ break;
+
+ case O_TAGGED: {
+ struct m_tag *mtag;
+ uint32_t tag = (cmd->arg1 == IP_FW_TABLEARG) ?
+ tablearg : cmd->arg1;
+
+ if (cmdlen == 1) {
+ match = m_tag_locate(m, MTAG_IPFW,
+ tag, NULL) != NULL;
+ break;
+ }
+
+ /* we have ranges */
+ for (mtag = m_tag_first(m);
+ mtag != NULL && !match;
+ mtag = m_tag_next(m, mtag)) {
+ uint16_t *p;
+ int i;
+
+ if (mtag->m_tag_cookie != MTAG_IPFW)
+ continue;
+
+ p = ((ipfw_insn_u16 *)cmd)->ports;
+ i = cmdlen - 1;
+ for(; !match && i > 0; i--, p += 2)
+ match =
+ mtag->m_tag_id >= p[0] &&
+ mtag->m_tag_id <= p[1];
+ }
+ break;
+ }
+
+ /*
+ * The second set of opcodes represents 'actions',
+ * i.e. the terminal part of a rule once the packet
+ * matches all previous patterns.
+ * Typically there is only one action for each rule,
+ * and the opcode is stored at the end of the rule
+ * (but there are exceptions -- see below).
+ *
+ * In general, here we set retval and terminate the
+ * outer loop (would be a 'break 3' in some language,
+ * but we need to set l=0, done=1)
+ *
+ * Exceptions:
+ * O_COUNT and O_SKIPTO actions:
+ * instead of terminating, we jump to the next rule
+ * (setting l=0), or to the SKIPTO target (setting
+ * f/f_len, cmd and l as needed), respectively.
+ *
+ * O_TAG, O_LOG and O_ALTQ action parameters:
+ * perform some action and set match = 1;
+ *
+ * O_LIMIT and O_KEEP_STATE: these opcodes are
+ * not real 'actions', and are stored right
+ * before the 'action' part of the rule.
+ * These opcodes try to install an entry in the
+ * state tables; if successful, we continue with
+ * the next opcode (match=1; break;), otherwise
+ * the packet must be dropped (set retval,
+ * break loops with l=0, done=1)
+ *
+ * O_PROBE_STATE and O_CHECK_STATE: these opcodes
+ * cause a lookup of the state table, and a jump
+ * to the 'action' part of the parent rule
+ * if an entry is found, or
+ * (CHECK_STATE only) a jump to the next rule if
+ * the entry is not found.
+ * The result of the lookup is cached so that
+ * further instances of these opcodes become NOPs.
+ * The jump to the next rule is done by setting
+ * l=0, cmdlen=0.
+ */
+ case O_LIMIT:
+ case O_KEEP_STATE:
+ if (ipfw_install_state(f,
+ (ipfw_insn_limit *)cmd, args, tablearg)) {
+ /* error or limit violation */
+ retval = IP_FW_DENY;
+ l = 0; /* exit inner loop */
+ done = 1; /* exit outer loop */
+ }
+ match = 1;
+ break;
+
+ case O_PROBE_STATE:
+ case O_CHECK_STATE:
+ /*
+ * dynamic rules are checked at the first
+ * keep-state or check-state occurrence,
+ * with the result being stored in dyn_dir.
+ * The compiler introduces a PROBE_STATE
+ * instruction for us when we have a
+ * KEEP_STATE (because PROBE_STATE needs
+ * to be run first).
+ */
+ if (dyn_dir == MATCH_UNKNOWN &&
+ (q = ipfw_lookup_dyn_rule(&args->f_id,
+ &dyn_dir, proto == IPPROTO_TCP ?
+ TCP(ulp) : NULL))
+ != NULL) {
+ /*
+ * Found dynamic entry, update stats
+ * and jump to the 'action' part of
+ * the parent rule by setting
+ * f, cmd, l and clearing cmdlen.
+ */
+ q->pcnt++;
+ q->bcnt += pktlen;
+ /* XXX we would like to have f_pos
+ * readily accessible in the dynamic
+ * rule, instead of having to
+ * lookup q->rule.
+ */
+ f = q->rule;
+ f_pos = ipfw_find_rule(chain,
+ f->rulenum, f->id);
+ cmd = ACTION_PTR(f);
+ l = f->cmd_len - f->act_ofs;
+ ipfw_dyn_unlock();
+ cmdlen = 0;
+ match = 1;
+ break;
+ }
+ /*
+ * Dynamic entry not found. If CHECK_STATE,
+ * skip to next rule, if PROBE_STATE just
+ * ignore and continue with next opcode.
+ */
+ if (cmd->opcode == O_CHECK_STATE)
+ l = 0; /* exit inner loop */
+ match = 1;
+ break;
+
+ case O_ACCEPT:
+ retval = 0; /* accept */
+ l = 0; /* exit inner loop */
+ done = 1; /* exit outer loop */
+ break;
+
+ case O_PIPE:
+ case O_QUEUE:
+ set_match(args, f_pos, chain);
+ args->rule.info = (cmd->arg1 == IP_FW_TABLEARG) ?
+ tablearg : cmd->arg1;
+ if (cmd->opcode == O_PIPE)
+ args->rule.info |= IPFW_IS_PIPE;
+ if (V_fw_one_pass)
+ args->rule.info |= IPFW_ONEPASS;
+ retval = IP_FW_DUMMYNET;
+ l = 0; /* exit inner loop */
+ done = 1; /* exit outer loop */
+ break;
+
+ case O_DIVERT:
+ case O_TEE:
+ if (args->eh) /* not on layer 2 */
+ break;
+ /* otherwise this is terminal */
+ l = 0; /* exit inner loop */
+ done = 1; /* exit outer loop */
+ retval = (cmd->opcode == O_DIVERT) ?
+ IP_FW_DIVERT : IP_FW_TEE;
+ set_match(args, f_pos, chain);
+ args->rule.info = (cmd->arg1 == IP_FW_TABLEARG) ?
+ tablearg : cmd->arg1;
+ break;
+
+ case O_COUNT:
+ f->pcnt++; /* update stats */
+ f->bcnt += pktlen;
+ f->timestamp = time_uptime;
+ l = 0; /* exit inner loop */
+ break;
+
+ case O_SKIPTO:
+ f->pcnt++; /* update stats */
+ f->bcnt += pktlen;
+ f->timestamp = time_uptime;
+ /* If possible use cached f_pos (in f->next_rule),
+ * whose version is written in f->next_rule
+ * (horrible hacks to avoid changing the ABI).
+ */
+ if (cmd->arg1 != IP_FW_TABLEARG &&
+ (uintptr_t)f->x_next == chain->id) {
+ f_pos = (uintptr_t)f->next_rule;
+ } else {
+ int i = (cmd->arg1 == IP_FW_TABLEARG) ?
+ tablearg : cmd->arg1;
+ /* make sure we do not jump backward */
+ if (i <= f->rulenum)
+ i = f->rulenum + 1;
+ f_pos = ipfw_find_rule(chain, i, 0);
+ /* update the cache */
+ if (cmd->arg1 != IP_FW_TABLEARG) {
+ f->next_rule =
+ (void *)(uintptr_t)f_pos;
+ f->x_next =
+ (void *)(uintptr_t)chain->id;
+ }
+ }
+ /*
+ * Skip disabled rules, and re-enter
+ * the inner loop with the correct
+ * f_pos, f, l and cmd.
+ * Also clear cmdlen and skip_or
+ */
+ for (; f_pos < chain->n_rules - 1 &&
+ (V_set_disable &
+ (1 << chain->map[f_pos]->set));
+ f_pos++)
+ ;
+ /* Re-enter the inner loop at the skipto rule. */
+ f = chain->map[f_pos];
+ l = f->cmd_len;
+ cmd = f->cmd;
+ match = 1;
+ cmdlen = 0;
+ skip_or = 0;
+ continue;
+ break; /* not reached */
+
+ case O_REJECT:
+ /*
+ * Drop the packet and send a reject notice
+ * if the packet is not ICMP (or is an ICMP
+ * query), and it is not multicast/broadcast.
+ */
+ if (hlen > 0 && is_ipv4 && offset == 0 &&
+ (proto != IPPROTO_ICMP ||
+ is_icmp_query(ICMP(ulp))) &&
+ !(m->m_flags & (M_BCAST|M_MCAST)) &&
+ !IN_MULTICAST(ntohl(dst_ip.s_addr))) {
+ send_reject(args, cmd->arg1, iplen, ip);
+ m = args->m;
+ }
+ /* FALLTHROUGH */
+#ifdef INET6
+ case O_UNREACH6:
+ if (hlen > 0 && is_ipv6 &&
+ ((offset & IP6F_OFF_MASK) == 0) &&
+ (proto != IPPROTO_ICMPV6 ||
+ (is_icmp6_query(icmp6_type) == 1)) &&
+ !(m->m_flags & (M_BCAST|M_MCAST)) &&
+ !IN6_IS_ADDR_MULTICAST(&args->f_id.dst_ip6)) {
+ send_reject6(
+ args, cmd->arg1, hlen,
+ (struct ip6_hdr *)ip);
+ m = args->m;
+ }
+ /* FALLTHROUGH */
+#endif
+ case O_DENY:
+ retval = IP_FW_DENY;
+ l = 0; /* exit inner loop */
+ done = 1; /* exit outer loop */
+ break;
+
+ case O_FORWARD_IP:
+ if (args->eh) /* not valid on layer2 pkts */
+ break;
+ if (!q || dyn_dir == MATCH_FORWARD) {
+ struct sockaddr_in *sa;
+ sa = &(((ipfw_insn_sa *)cmd)->sa);
+ if (sa->sin_addr.s_addr == INADDR_ANY) {
+ bcopy(sa, &args->hopstore,
+ sizeof(*sa));
+ args->hopstore.sin_addr.s_addr =
+ htonl(tablearg);
+ args->next_hop = &args->hopstore;
+ } else {
+ args->next_hop = sa;
+ }
+ }
+ retval = IP_FW_PASS;
+ l = 0; /* exit inner loop */
+ done = 1; /* exit outer loop */
+ break;
+
+ case O_NETGRAPH:
+ case O_NGTEE:
+ set_match(args, f_pos, chain);
+ args->rule.info = (cmd->arg1 == IP_FW_TABLEARG) ?
+ tablearg : cmd->arg1;
+ if (V_fw_one_pass)
+ args->rule.info |= IPFW_ONEPASS;
+ retval = (cmd->opcode == O_NETGRAPH) ?
+ IP_FW_NETGRAPH : IP_FW_NGTEE;
+ l = 0; /* exit inner loop */
+ done = 1; /* exit outer loop */
+ break;
+
+ case O_SETFIB:
+ f->pcnt++; /* update stats */
+ f->bcnt += pktlen;
+ f->timestamp = time_uptime;
+ M_SETFIB(m, cmd->arg1);
+ args->f_id.fib = cmd->arg1;
+ l = 0; /* exit inner loop */
+ break;
+
+ case O_NAT:
+ if (!IPFW_NAT_LOADED) {
+ retval = IP_FW_DENY;
+ } else {
+ struct cfg_nat *t;
+ int nat_id;
+
+ set_match(args, f_pos, chain);
+ t = ((ipfw_insn_nat *)cmd)->nat;
+ if (t == NULL) {
+ nat_id = (cmd->arg1 == IP_FW_TABLEARG) ?
+ tablearg : cmd->arg1;
+ t = (*lookup_nat_ptr)(&chain->nat, nat_id);
+
+ if (t == NULL) {
+ retval = IP_FW_DENY;
+ l = 0; /* exit inner loop */
+ done = 1; /* exit outer loop */
+ break;
+ }
+ if (cmd->arg1 != IP_FW_TABLEARG)
+ ((ipfw_insn_nat *)cmd)->nat = t;
+ }
+ retval = ipfw_nat_ptr(args, t, m);
+ }
+ l = 0; /* exit inner loop */
+ done = 1; /* exit outer loop */
+ break;
+
+ case O_REASS: {
+ int ip_off;
+
+ f->pcnt++;
+ f->bcnt += pktlen;
+ l = 0; /* in any case exit inner loop */
+ ip_off = ntohs(ip->ip_off);
+
+ /* if not fragmented, go to next rule */
+ if ((ip_off & (IP_MF | IP_OFFMASK)) == 0)
+ break;
+ /*
+ * ip_reass() expects len & off in host
+ * byte order.
+ */
+ SET_HOST_IPLEN(ip);
+
+ args->m = m = ip_reass(m);
+
+ /*
+ * do IP header checksum fixup.
+ */
+ if (m == NULL) { /* fragment got swallowed */
+ retval = IP_FW_DENY;
+ } else { /* good, packet complete */
+ int hlen;
+
+ ip = mtod(m, struct ip *);
+ hlen = ip->ip_hl << 2;
+ SET_NET_IPLEN(ip);
+ ip->ip_sum = 0;
+ if (hlen == sizeof(struct ip))
+ ip->ip_sum = in_cksum_hdr(ip);
+ else
+ ip->ip_sum = in_cksum(m, hlen);
+ retval = IP_FW_REASS;
+ set_match(args, f_pos, chain);
+ }
+ done = 1; /* exit outer loop */
+ break;
+ }
+
+ default:
+ panic("-- unknown opcode %d\n", cmd->opcode);
+ } /* end of switch() on opcodes */
+ /*
+ * if we get here with l=0, then match is irrelevant.
+ */
+
+ if (cmd->len & F_NOT)
+ match = !match;
+
+ if (match) {
+ if (cmd->len & F_OR)
+ skip_or = 1;
+ } else {
+ if (!(cmd->len & F_OR)) /* not an OR block, */
+ break; /* try next rule */
+ }
+
+ } /* end of inner loop, scan opcodes */
+
+ if (done)
+ break;
+
+/* next_rule:; */ /* try next rule */
+
+ } /* end of outer for, scan rules */
+
+ if (done) {
+ struct ip_fw *rule = chain->map[f_pos];
+ /* Update statistics */
+ rule->pcnt++;
+ rule->bcnt += pktlen;
+ rule->timestamp = time_uptime;
+ } else {
+ retval = IP_FW_DENY;
+ printf("ipfw: ouch!, skip past end of rules, denying packet\n");
+ }
+ IPFW_RUNLOCK(chain);
+#ifdef __FreeBSD__
+ if (ucred_cache != NULL)
+ crfree(ucred_cache);
+#endif
+ return (retval);
+
+pullup_failed:
+ if (V_fw_verbose)
+ printf("ipfw: pullup failed\n");
+ return (IP_FW_DENY);
+}
+
+/*
+ * Module and VNET glue
+ */
+
+/*
+ * Stuff that must be initialised only on boot or module load
+ */
+static int
+ipfw_init(void)
+{
+ int error = 0;
+
+ ipfw_dyn_attach();
+ /*
+ * Only print out this stuff the first time around,
+ * when called from the sysinit code.
+ */
+ printf("ipfw2 "
+#ifdef INET6
+ "(+ipv6) "
+#endif
+ "initialized, divert %s, nat %s, "
+ "rule-based forwarding "
+#ifdef IPFIREWALL_FORWARD
+ "enabled, "
+#else
+ "disabled, "
+#endif
+ "default to %s, logging ",
+#ifdef IPDIVERT
+ "enabled",
+#else
+ "loadable",
+#endif
+#ifdef IPFIREWALL_NAT
+ "enabled",
+#else
+ "loadable",
+#endif
+ default_to_accept ? "accept" : "deny");
+
+ /*
+ * Note: V_xxx variables can be accessed here but the vnet specific
+ * initializer may not have been called yet for the VIMAGE case.
+ * Tuneables will have been processed. We will print out values for
+ * the default vnet.
+ * XXX This should all be rationalized AFTER 8.0
+ */
+ if (V_fw_verbose == 0)
+ printf("disabled\n");
+ else if (V_verbose_limit == 0)
+ printf("unlimited\n");
+ else
+ printf("limited to %d packets/entry by default\n",
+ V_verbose_limit);
+
+ ipfw_log_bpf(1); /* init */
+ return (error);
+}
+
+/*
+ * Called for the removal of the last instance only on module unload.
+ */
+static void
+ipfw_destroy(void)
+{
+
+ ipfw_log_bpf(0); /* uninit */
+ ipfw_dyn_detach();
+ printf("IP firewall unloaded\n");
+}
+
+/*
+ * Stuff that must be initialized for every instance
+ * (including the first of course).
+ */
+static int
+vnet_ipfw_init(const void *unused)
+{
+ int error;
+ struct ip_fw *rule = NULL;
+ struct ip_fw_chain *chain;
+
+ chain = &V_layer3_chain;
+
+ /* First set up some values that are compile time options */
+ V_autoinc_step = 100; /* bounded to 1..1000 in add_rule() */
+ V_fw_deny_unknown_exthdrs = 1;
+#ifdef IPFIREWALL_VERBOSE
+ V_fw_verbose = 1;
+#endif
+#ifdef IPFIREWALL_VERBOSE_LIMIT
+ V_verbose_limit = IPFIREWALL_VERBOSE_LIMIT;
+#endif
+#ifdef IPFIREWALL_NAT
+ LIST_INIT(&chain->nat);
+#endif
+
+ /* insert the default rule and create the initial map */
+ chain->n_rules = 1;
+ chain->static_len = sizeof(struct ip_fw);
+ chain->map = malloc(sizeof(struct ip_fw *), M_IPFW, M_NOWAIT | M_ZERO);
+ if (chain->map)
+ rule = malloc(chain->static_len, M_IPFW, M_NOWAIT | M_ZERO);
+ if (rule == NULL) {
+ if (chain->map)
+ free(chain->map, M_IPFW);
+ printf("ipfw2: ENOSPC initializing default rule "
+ "(support disabled)\n");
+ return (ENOSPC);
+ }
+ error = ipfw_init_tables(chain);
+ if (error) {
+ panic("init_tables"); /* XXX Marko fix this ! */
+ }
+
+ /* fill and insert the default rule */
+ rule->act_ofs = 0;
+ rule->rulenum = IPFW_DEFAULT_RULE;
+ rule->cmd_len = 1;
+ rule->set = RESVD_SET;
+ rule->cmd[0].len = 1;
+ rule->cmd[0].opcode = default_to_accept ? O_ACCEPT : O_DENY;
+ chain->rules = chain->default_rule = chain->map[0] = rule;
+ chain->id = rule->id = 1;
+
+ IPFW_LOCK_INIT(chain);
+ ipfw_dyn_init();
+
+ /* First set up some values that are compile time options */
+ V_ipfw_vnet_ready = 1; /* Open for business */
+
+ /*
+ * Hook the sockopt handler, and the layer2 (V_ip_fw_chk_ptr)
+ * and pfil hooks for ipv4 and ipv6. Even if the latter two fail
+ * we still keep the module alive because the sockopt and
+ * layer2 paths are still useful.
+ * ipfw[6]_hook return 0 on success, ENOENT on failure,
+ * so we can ignore the exact return value and just set a flag.
+ *
+ * Note that V_fw[6]_enable are manipulated by a SYSCTL_PROC so
+ * changes in the underlying (per-vnet) variables trigger
+ * immediate hook()/unhook() calls.
+ * In layer2 we have the same behaviour, except that V_ether_ipfw
+ * is checked on each packet because there are no pfil hooks.
+ */
+ V_ip_fw_ctl_ptr = ipfw_ctl;
+ V_ip_fw_chk_ptr = ipfw_chk;
+ error = ipfw_attach_hooks(1);
+ return (error);
+}
+
+/*
+ * Called for the removal of each instance.
+ */
+static int
+vnet_ipfw_uninit(const void *unused)
+{
+ struct ip_fw *reap, *rule;
+ struct ip_fw_chain *chain = &V_layer3_chain;
+ int i;
+
+ V_ipfw_vnet_ready = 0; /* tell new callers to go away */
+ /*
+ * disconnect from ipv4, ipv6, layer2 and sockopt.
+ * Then grab, release and grab again the WLOCK so we make
+ * sure the update is propagated and nobody will be in.
+ */
+ (void)ipfw_attach_hooks(0 /* detach */);
+ V_ip_fw_chk_ptr = NULL;
+ V_ip_fw_ctl_ptr = NULL;
+ IPFW_UH_WLOCK(chain);
+ IPFW_UH_WUNLOCK(chain);
+ IPFW_UH_WLOCK(chain);
+
+ IPFW_WLOCK(chain);
+ IPFW_WUNLOCK(chain);
+ IPFW_WLOCK(chain);
+
+ ipfw_dyn_uninit(0); /* run the callout_drain */
+ ipfw_destroy_tables(chain);
+ reap = NULL;
+ for (i = 0; i < chain->n_rules; i++) {
+ rule = chain->map[i];
+ rule->x_next = reap;
+ reap = rule;
+ }
+ if (chain->map)
+ free(chain->map, M_IPFW);
+ IPFW_WUNLOCK(chain);
+ IPFW_UH_WUNLOCK(chain);
+ if (reap != NULL)
+ ipfw_reap_rules(reap);
+ IPFW_LOCK_DESTROY(chain);
+ ipfw_dyn_uninit(1); /* free the remaining parts */
+ return 0;
+}
+
+/*
+ * Module event handler.
+ * In general we have the choice of handling most of these events by the
+ * event handler or by the (VNET_)SYS(UN)INIT handlers. I have chosen to
+ * use the SYSINIT handlers as they are more capable of expressing the
+ * flow of control during module and vnet operations, so this is just
+ * a skeleton. Note there is no SYSINIT equivalent of the module
+ * SHUTDOWN handler, but we don't have anything to do in that case anyhow.
+ */
+static int
+ipfw_modevent(module_t mod, int type, void *unused)
+{
+ int err = 0;
+
+ switch (type) {
+ case MOD_LOAD:
+ /* Called once at module load or
+ * system boot if compiled in. */
+ break;
+ case MOD_QUIESCE:
+ /* Called before unload. May veto unloading. */
+ break;
+ case MOD_UNLOAD:
+ /* Called during unload. */
+ break;
+ case MOD_SHUTDOWN:
+ /* Called during system shutdown. */
+ break;
+ default:
+ err = EOPNOTSUPP;
+ break;
+ }
+ return err;
+}
+
+static moduledata_t ipfwmod = {
+ "ipfw",
+ ipfw_modevent,
+ 0
+};
+
+/* Define startup order. */
+#define IPFW_SI_SUB_FIREWALL SI_SUB_PROTO_IFATTACHDOMAIN
+#define IPFW_MODEVENT_ORDER (SI_ORDER_ANY - 255) /* On boot slot in here. */
+#define IPFW_MODULE_ORDER (IPFW_MODEVENT_ORDER + 1) /* A little later. */
+#define IPFW_VNET_ORDER (IPFW_MODEVENT_ORDER + 2) /* Later still. */
+
+DECLARE_MODULE(ipfw, ipfwmod, IPFW_SI_SUB_FIREWALL, IPFW_MODEVENT_ORDER);
+MODULE_VERSION(ipfw, 2);
+/* should declare some dependencies here */
+
+/*
+ * Starting up. Done in order after ipfwmod() has been called.
+ * VNET_SYSINIT is also called for each existing vnet and each new vnet.
+ */
+SYSINIT(ipfw_init, IPFW_SI_SUB_FIREWALL, IPFW_MODULE_ORDER,
+ ipfw_init, NULL);
+VNET_SYSINIT(vnet_ipfw_init, IPFW_SI_SUB_FIREWALL, IPFW_VNET_ORDER,
+ vnet_ipfw_init, NULL);
+
+/*
+ * Closing up shop. These are done in REVERSE ORDER, but still
+ * after ipfwmod() has been called. Not called on reboot.
+ * VNET_SYSUNINIT is also called for each exiting vnet as it exits.
+ * or when the module is unloaded.
+ */
+SYSUNINIT(ipfw_destroy, IPFW_SI_SUB_FIREWALL, IPFW_MODULE_ORDER,
+ ipfw_destroy, NULL);
+VNET_SYSUNINIT(vnet_ipfw_uninit, IPFW_SI_SUB_FIREWALL, IPFW_VNET_ORDER,
+ vnet_ipfw_uninit, NULL);
+/* end of file */
diff --git a/rtems/freebsd/netinet/ipfw/ip_fw_log.c b/rtems/freebsd/netinet/ipfw/ip_fw_log.c
new file mode 100644
index 00000000..e3249d02
--- /dev/null
+++ b/rtems/freebsd/netinet/ipfw/ip_fw_log.c
@@ -0,0 +1,451 @@
+#include <rtems/freebsd/machine/rtems-bsd-config.h>
+
+/*-
+ * Copyright (c) 2002-2009 Luigi Rizzo, Universita` di Pisa
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <rtems/freebsd/sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+/*
+ * Logging support for ipfw
+ */
+
+#if !defined(KLD_MODULE)
+#include <rtems/freebsd/local/opt_ipfw.h>
+#include <rtems/freebsd/local/opt_ipdivert.h>
+#include <rtems/freebsd/local/opt_ipdn.h>
+#include <rtems/freebsd/local/opt_inet.h>
+#ifndef INET
+#error IPFIREWALL requires INET.
+#endif /* INET */
+#endif
+#include <rtems/freebsd/local/opt_inet6.h>
+#include <rtems/freebsd/local/opt_ipsec.h>
+
+#include <rtems/freebsd/sys/param.h>
+#include <rtems/freebsd/sys/systm.h>
+#include <rtems/freebsd/sys/mbuf.h>
+#include <rtems/freebsd/sys/kernel.h>
+#include <rtems/freebsd/sys/socket.h>
+#include <rtems/freebsd/sys/sysctl.h>
+#include <rtems/freebsd/sys/syslog.h>
+#include <rtems/freebsd/net/ethernet.h> /* for ETHERTYPE_IP */
+#include <rtems/freebsd/net/if.h>
+#include <rtems/freebsd/net/vnet.h>
+#include <rtems/freebsd/net/if_types.h> /* for IFT_ETHER */
+#include <rtems/freebsd/net/bpf.h> /* for BPF */
+
+#include <rtems/freebsd/netinet/in.h>
+#include <rtems/freebsd/netinet/ip.h>
+#include <rtems/freebsd/netinet/ip_icmp.h>
+#include <rtems/freebsd/netinet/ip_var.h>
+#include <rtems/freebsd/netinet/ip_fw.h>
+#include <rtems/freebsd/netinet/ipfw/ip_fw_private.h>
+#include <rtems/freebsd/netinet/tcp_var.h>
+#include <rtems/freebsd/netinet/udp.h>
+
+#include <rtems/freebsd/netinet/ip6.h>
+#include <rtems/freebsd/netinet/icmp6.h>
+#ifdef INET6
+#include <rtems/freebsd/netinet6/in6_var.h> /* ip6_sprintf() */
+#endif
+
+#ifdef MAC
+#include <rtems/freebsd/security/mac/mac_framework.h>
+#endif
+
+/*
+ * L3HDR maps an ipv4 pointer into a layer3 header pointer of type T
+ * Other macros just cast void * into the appropriate type
+ */
+#define L3HDR(T, ip) ((T *)((u_int32_t *)(ip) + (ip)->ip_hl))
+#define TCP(p) ((struct tcphdr *)(p))
+#define SCTP(p) ((struct sctphdr *)(p))
+#define UDP(p) ((struct udphdr *)(p))
+#define ICMP(p) ((struct icmphdr *)(p))
+#define ICMP6(p) ((struct icmp6_hdr *)(p))
+
+#define SNPARGS(buf, len) buf + len, sizeof(buf) > len ? sizeof(buf) - len : 0
+#define SNP(buf) buf, sizeof(buf)
+
+#ifdef WITHOUT_BPF
+void
+ipfw_log_bpf(int onoff)
+{
+}
+#else /* !WITHOUT_BPF */
+static struct ifnet *log_if; /* hook to attach to bpf */
+
+/* we use this dummy function for all ifnet callbacks */
+static int
+log_dummy(struct ifnet *ifp, u_long cmd, caddr_t addr)
+{
+ return EINVAL;
+}
+
+static int
+ipfw_log_output(struct ifnet *ifp, struct mbuf *m,
+ struct sockaddr *dst, struct route *ro)
+{
+ if (m != NULL)
+ m_freem(m);
+ return EINVAL;
+}
+
+static void
+ipfw_log_start(struct ifnet* ifp)
+{
+ panic("ipfw_log_start() must not be called");
+}
+
+static const u_char ipfwbroadcastaddr[6] =
+ { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
+
+void
+ipfw_log_bpf(int onoff)
+{
+ struct ifnet *ifp;
+
+ if (onoff) {
+ if (log_if)
+ return;
+ ifp = if_alloc(IFT_ETHER);
+ if (ifp == NULL)
+ return;
+ if_initname(ifp, "ipfw", 0);
+ ifp->if_mtu = 65536;
+ ifp->if_flags = IFF_UP | IFF_SIMPLEX | IFF_MULTICAST;
+ ifp->if_init = (void *)log_dummy;
+ ifp->if_ioctl = log_dummy;
+ ifp->if_start = ipfw_log_start;
+ ifp->if_output = ipfw_log_output;
+ ifp->if_addrlen = 6;
+ ifp->if_hdrlen = 14;
+ if_attach(ifp);
+ ifp->if_broadcastaddr = ipfwbroadcastaddr;
+ ifp->if_baudrate = IF_Mbps(10);
+ bpfattach(ifp, DLT_EN10MB, 14);
+ log_if = ifp;
+ } else {
+ if (log_if) {
+ ether_ifdetach(log_if);
+ if_free(log_if);
+ }
+ log_if = NULL;
+ }
+}
+#endif /* !WITHOUT_BPF */
+
+/*
+ * We enter here when we have a rule with O_LOG.
+ * XXX this function alone takes about 2Kbytes of code!
+ */
+void
+ipfw_log(struct ip_fw *f, u_int hlen, struct ip_fw_args *args,
+ struct mbuf *m, struct ifnet *oif, u_short offset, uint32_t tablearg,
+ struct ip *ip)
+{
+ char *action;
+ int limit_reached = 0;
+ char action2[40], proto[128], fragment[32];
+
+ if (V_fw_verbose == 0) {
+#ifndef WITHOUT_BPF
+
+ if (log_if == NULL || log_if->if_bpf == NULL)
+ return;
+
+ if (args->eh) /* layer2, use orig hdr */
+ BPF_MTAP2(log_if, args->eh, ETHER_HDR_LEN, m);
+ else
+ /* Add fake header. Later we will store
+ * more info in the header.
+ */
+ BPF_MTAP2(log_if, "DDDDDDSSSSSS\x08\x00", ETHER_HDR_LEN, m);
+#endif /* !WITHOUT_BPF */
+ return;
+ }
+ /* the old 'log' function */
+ fragment[0] = '\0';
+ proto[0] = '\0';
+
+ if (f == NULL) { /* bogus pkt */
+ if (V_verbose_limit != 0 && V_norule_counter >= V_verbose_limit)
+ return;
+ V_norule_counter++;
+ if (V_norule_counter == V_verbose_limit)
+ limit_reached = V_verbose_limit;
+ action = "Refuse";
+ } else { /* O_LOG is the first action, find the real one */
+ ipfw_insn *cmd = ACTION_PTR(f);
+ ipfw_insn_log *l = (ipfw_insn_log *)cmd;
+
+ if (l->max_log != 0 && l->log_left == 0)
+ return;
+ l->log_left--;
+ if (l->log_left == 0)
+ limit_reached = l->max_log;
+ cmd += F_LEN(cmd); /* point to first action */
+ if (cmd->opcode == O_ALTQ) {
+ ipfw_insn_altq *altq = (ipfw_insn_altq *)cmd;
+
+ snprintf(SNPARGS(action2, 0), "Altq %d",
+ altq->qid);
+ cmd += F_LEN(cmd);
+ }
+ if (cmd->opcode == O_PROB)
+ cmd += F_LEN(cmd);
+
+ if (cmd->opcode == O_TAG)
+ cmd += F_LEN(cmd);
+
+ action = action2;
+ switch (cmd->opcode) {
+ case O_DENY:
+ action = "Deny";
+ break;
+
+ case O_REJECT:
+ if (cmd->arg1==ICMP_REJECT_RST)
+ action = "Reset";
+ else if (cmd->arg1==ICMP_UNREACH_HOST)
+ action = "Reject";
+ else
+ snprintf(SNPARGS(action2, 0), "Unreach %d",
+ cmd->arg1);
+ break;
+
+ case O_UNREACH6:
+ if (cmd->arg1==ICMP6_UNREACH_RST)
+ action = "Reset";
+ else
+ snprintf(SNPARGS(action2, 0), "Unreach %d",
+ cmd->arg1);
+ break;
+
+ case O_ACCEPT:
+ action = "Accept";
+ break;
+ case O_COUNT:
+ action = "Count";
+ break;
+ case O_DIVERT:
+ snprintf(SNPARGS(action2, 0), "Divert %d",
+ cmd->arg1);
+ break;
+ case O_TEE:
+ snprintf(SNPARGS(action2, 0), "Tee %d",
+ cmd->arg1);
+ break;
+ case O_SETFIB:
+ snprintf(SNPARGS(action2, 0), "SetFib %d",
+ cmd->arg1);
+ break;
+ case O_SKIPTO:
+ snprintf(SNPARGS(action2, 0), "SkipTo %d",
+ cmd->arg1);
+ break;
+ case O_PIPE:
+ snprintf(SNPARGS(action2, 0), "Pipe %d",
+ cmd->arg1);
+ break;
+ case O_QUEUE:
+ snprintf(SNPARGS(action2, 0), "Queue %d",
+ cmd->arg1);
+ break;
+ case O_FORWARD_IP: {
+ ipfw_insn_sa *sa = (ipfw_insn_sa *)cmd;
+ int len;
+ struct in_addr dummyaddr;
+ if (sa->sa.sin_addr.s_addr == INADDR_ANY)
+ dummyaddr.s_addr = htonl(tablearg);
+ else
+ dummyaddr.s_addr = sa->sa.sin_addr.s_addr;
+
+ len = snprintf(SNPARGS(action2, 0), "Forward to %s",
+ inet_ntoa(dummyaddr));
+
+ if (sa->sa.sin_port)
+ snprintf(SNPARGS(action2, len), ":%d",
+ sa->sa.sin_port);
+ }
+ break;
+ case O_NETGRAPH:
+ snprintf(SNPARGS(action2, 0), "Netgraph %d",
+ cmd->arg1);
+ break;
+ case O_NGTEE:
+ snprintf(SNPARGS(action2, 0), "Ngtee %d",
+ cmd->arg1);
+ break;
+ case O_NAT:
+ action = "Nat";
+ break;
+ case O_REASS:
+ action = "Reass";
+ break;
+ default:
+ action = "UNKNOWN";
+ break;
+ }
+ }
+
+ if (hlen == 0) { /* non-ip */
+ snprintf(SNPARGS(proto, 0), "MAC");
+
+ } else {
+ int len;
+#ifdef INET6
+ char src[INET6_ADDRSTRLEN + 2], dst[INET6_ADDRSTRLEN + 2];
+#else
+ char src[INET_ADDRSTRLEN], dst[INET_ADDRSTRLEN];
+#endif
+ struct icmphdr *icmp;
+ struct tcphdr *tcp;
+ struct udphdr *udp;
+#ifdef INET6
+ struct ip6_hdr *ip6 = NULL;
+ struct icmp6_hdr *icmp6;
+#endif
+ src[0] = '\0';
+ dst[0] = '\0';
+#ifdef INET6
+ if (IS_IP6_FLOW_ID(&(args->f_id))) {
+ char ip6buf[INET6_ADDRSTRLEN];
+ snprintf(src, sizeof(src), "[%s]",
+ ip6_sprintf(ip6buf, &args->f_id.src_ip6));
+ snprintf(dst, sizeof(dst), "[%s]",
+ ip6_sprintf(ip6buf, &args->f_id.dst_ip6));
+
+ ip6 = (struct ip6_hdr *)ip;
+ tcp = (struct tcphdr *)(((char *)ip) + hlen);
+ udp = (struct udphdr *)(((char *)ip) + hlen);
+ } else
+#endif
+ {
+ tcp = L3HDR(struct tcphdr, ip);
+ udp = L3HDR(struct udphdr, ip);
+
+ inet_ntoa_r(ip->ip_src, src);
+ inet_ntoa_r(ip->ip_dst, dst);
+ }
+
+ switch (args->f_id.proto) {
+ case IPPROTO_TCP:
+ len = snprintf(SNPARGS(proto, 0), "TCP %s", src);
+ if (offset == 0)
+ snprintf(SNPARGS(proto, len), ":%d %s:%d",
+ ntohs(tcp->th_sport),
+ dst,
+ ntohs(tcp->th_dport));
+ else
+ snprintf(SNPARGS(proto, len), " %s", dst);
+ break;
+
+ case IPPROTO_UDP:
+ len = snprintf(SNPARGS(proto, 0), "UDP %s", src);
+ if (offset == 0)
+ snprintf(SNPARGS(proto, len), ":%d %s:%d",
+ ntohs(udp->uh_sport),
+ dst,
+ ntohs(udp->uh_dport));
+ else
+ snprintf(SNPARGS(proto, len), " %s", dst);
+ break;
+
+ case IPPROTO_ICMP:
+ icmp = L3HDR(struct icmphdr, ip);
+ if (offset == 0)
+ len = snprintf(SNPARGS(proto, 0),
+ "ICMP:%u.%u ",
+ icmp->icmp_type, icmp->icmp_code);
+ else
+ len = snprintf(SNPARGS(proto, 0), "ICMP ");
+ len += snprintf(SNPARGS(proto, len), "%s", src);
+ snprintf(SNPARGS(proto, len), " %s", dst);
+ break;
+#ifdef INET6
+ case IPPROTO_ICMPV6:
+ icmp6 = (struct icmp6_hdr *)(((char *)ip) + hlen);
+ if (offset == 0)
+ len = snprintf(SNPARGS(proto, 0),
+ "ICMPv6:%u.%u ",
+ icmp6->icmp6_type, icmp6->icmp6_code);
+ else
+ len = snprintf(SNPARGS(proto, 0), "ICMPv6 ");
+ len += snprintf(SNPARGS(proto, len), "%s", src);
+ snprintf(SNPARGS(proto, len), " %s", dst);
+ break;
+#endif
+ default:
+ len = snprintf(SNPARGS(proto, 0), "P:%d %s",
+ args->f_id.proto, src);
+ snprintf(SNPARGS(proto, len), " %s", dst);
+ break;
+ }
+
+#ifdef INET6
+ if (IS_IP6_FLOW_ID(&(args->f_id))) {
+ if (offset & (IP6F_OFF_MASK | IP6F_MORE_FRAG))
+ snprintf(SNPARGS(fragment, 0),
+ " (frag %08x:%d@%d%s)",
+ args->f_id.extra,
+ ntohs(ip6->ip6_plen) - hlen,
+ ntohs(offset & IP6F_OFF_MASK) << 3,
+ (offset & IP6F_MORE_FRAG) ? "+" : "");
+ } else
+#endif
+ {
+ int ipoff, iplen;
+ ipoff = ntohs(ip->ip_off);
+ iplen = ntohs(ip->ip_len);
+ if (ipoff & (IP_MF | IP_OFFMASK))
+ snprintf(SNPARGS(fragment, 0),
+ " (frag %d:%d@%d%s)",
+ ntohs(ip->ip_id), iplen - (ip->ip_hl << 2),
+ offset << 3,
+ (ipoff & IP_MF) ? "+" : "");
+ }
+ }
+#ifdef __FreeBSD__
+ if (oif || m->m_pkthdr.rcvif)
+ log(LOG_SECURITY | LOG_INFO,
+ "ipfw: %d %s %s %s via %s%s\n",
+ f ? f->rulenum : -1,
+ action, proto, oif ? "out" : "in",
+ oif ? oif->if_xname : m->m_pkthdr.rcvif->if_xname,
+ fragment);
+ else
+#endif
+ log(LOG_SECURITY | LOG_INFO,
+ "ipfw: %d %s %s [no if info]%s\n",
+ f ? f->rulenum : -1,
+ action, proto, fragment);
+ if (limit_reached)
+ log(LOG_SECURITY | LOG_NOTICE,
+ "ipfw: limit %d reached on entry %d\n",
+ limit_reached, f ? f->rulenum : -1);
+}
+/* end of file */
diff --git a/rtems/freebsd/netinet/ipfw/ip_fw_nat.c b/rtems/freebsd/netinet/ipfw/ip_fw_nat.c
new file mode 100644
index 00000000..12efd1ab
--- /dev/null
+++ b/rtems/freebsd/netinet/ipfw/ip_fw_nat.c
@@ -0,0 +1,606 @@
+#include <rtems/freebsd/machine/rtems-bsd-config.h>
+
+/*-
+ * Copyright (c) 2008 Paolo Pisati
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <rtems/freebsd/sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <rtems/freebsd/sys/param.h>
+#include <rtems/freebsd/sys/systm.h>
+#include <rtems/freebsd/sys/eventhandler.h>
+#include <rtems/freebsd/sys/malloc.h>
+#include <rtems/freebsd/sys/kernel.h>
+#include <rtems/freebsd/sys/lock.h>
+#include <rtems/freebsd/sys/module.h>
+#include <rtems/freebsd/sys/rwlock.h>
+
+#define IPFW_INTERNAL /* Access to protected data structures in ip_fw.h. */
+
+#include <rtems/freebsd/netinet/libalias/alias.h>
+#include <rtems/freebsd/netinet/libalias/alias_local.h>
+
+#include <rtems/freebsd/net/if.h>
+#include <rtems/freebsd/netinet/in.h>
+#include <rtems/freebsd/netinet/ip.h>
+#include <rtems/freebsd/netinet/ip_var.h>
+#include <rtems/freebsd/netinet/ip_fw.h>
+#include <rtems/freebsd/netinet/ipfw/ip_fw_private.h>
+#include <rtems/freebsd/netinet/tcp.h>
+#include <rtems/freebsd/netinet/udp.h>
+
+#include <rtems/freebsd/machine/in_cksum.h> /* XXX for in_cksum */
+
+static VNET_DEFINE(eventhandler_tag, ifaddr_event_tag);
+#define V_ifaddr_event_tag VNET(ifaddr_event_tag)
+
+static void
+ifaddr_change(void *arg __unused, struct ifnet *ifp)
+{
+ struct cfg_nat *ptr;
+ struct ifaddr *ifa;
+ struct ip_fw_chain *chain;
+
+ chain = &V_layer3_chain;
+ IPFW_WLOCK(chain);
+ /* Check every nat entry... */
+ LIST_FOREACH(ptr, &chain->nat, _next) {
+ /* ...using nic 'ifp->if_xname' as dynamic alias address. */
+ if (strncmp(ptr->if_name, ifp->if_xname, IF_NAMESIZE) != 0)
+ continue;
+ if_addr_rlock(ifp);
+ TAILQ_FOREACH(ifa, &ifp->if_addrhead, ifa_link) {
+ if (ifa->ifa_addr == NULL)
+ continue;
+ if (ifa->ifa_addr->sa_family != AF_INET)
+ continue;
+ ptr->ip = ((struct sockaddr_in *)
+ (ifa->ifa_addr))->sin_addr;
+ LibAliasSetAddress(ptr->lib, ptr->ip);
+ }
+ if_addr_runlock(ifp);
+ }
+ IPFW_WUNLOCK(chain);
+}
+
+/*
+ * delete the pointers for nat entry ix, or all of them if ix < 0
+ */
+static void
+flush_nat_ptrs(struct ip_fw_chain *chain, const int ix)
+{
+ int i;
+ ipfw_insn_nat *cmd;
+
+ IPFW_WLOCK_ASSERT(chain);
+ for (i = 0; i < chain->n_rules; i++) {
+ cmd = (ipfw_insn_nat *)ACTION_PTR(chain->map[i]);
+ /* XXX skip log and the like ? */
+ if (cmd->o.opcode == O_NAT && cmd->nat != NULL &&
+ (ix < 0 || cmd->nat->id == ix))
+ cmd->nat = NULL;
+ }
+}
+
+static void
+del_redir_spool_cfg(struct cfg_nat *n, struct redir_chain *head)
+{
+ struct cfg_redir *r, *tmp_r;
+ struct cfg_spool *s, *tmp_s;
+ int i, num;
+
+ LIST_FOREACH_SAFE(r, head, _next, tmp_r) {
+ num = 1; /* Number of alias_link to delete. */
+ switch (r->mode) {
+ case REDIR_PORT:
+ num = r->pport_cnt;
+ /* FALLTHROUGH */
+ case REDIR_ADDR:
+ case REDIR_PROTO:
+ /* Delete all libalias redirect entry. */
+ for (i = 0; i < num; i++)
+ LibAliasRedirectDelete(n->lib, r->alink[i]);
+ /* Del spool cfg if any. */
+ LIST_FOREACH_SAFE(s, &r->spool_chain, _next, tmp_s) {
+ LIST_REMOVE(s, _next);
+ free(s, M_IPFW);
+ }
+ free(r->alink, M_IPFW);
+ LIST_REMOVE(r, _next);
+ free(r, M_IPFW);
+ break;
+ default:
+ printf("unknown redirect mode: %u\n", r->mode);
+ /* XXX - panic?!?!? */
+ break;
+ }
+ }
+}
+
+static int
+add_redir_spool_cfg(char *buf, struct cfg_nat *ptr)
+{
+ struct cfg_redir *r, *ser_r;
+ struct cfg_spool *s, *ser_s;
+ int cnt, off, i;
+
+ for (cnt = 0, off = 0; cnt < ptr->redir_cnt; cnt++) {
+ ser_r = (struct cfg_redir *)&buf[off];
+ r = malloc(SOF_REDIR, M_IPFW, M_WAITOK | M_ZERO);
+ memcpy(r, ser_r, SOF_REDIR);
+ LIST_INIT(&r->spool_chain);
+ off += SOF_REDIR;
+ r->alink = malloc(sizeof(struct alias_link *) * r->pport_cnt,
+ M_IPFW, M_WAITOK | M_ZERO);
+ switch (r->mode) {
+ case REDIR_ADDR:
+ r->alink[0] = LibAliasRedirectAddr(ptr->lib, r->laddr,
+ r->paddr);
+ break;
+ case REDIR_PORT:
+ for (i = 0 ; i < r->pport_cnt; i++) {
+ /* If remotePort is all ports, set it to 0. */
+ u_short remotePortCopy = r->rport + i;
+ if (r->rport_cnt == 1 && r->rport == 0)
+ remotePortCopy = 0;
+ r->alink[i] = LibAliasRedirectPort(ptr->lib,
+ r->laddr, htons(r->lport + i), r->raddr,
+ htons(remotePortCopy), r->paddr,
+ htons(r->pport + i), r->proto);
+ if (r->alink[i] == NULL) {
+ r->alink[0] = NULL;
+ break;
+ }
+ }
+ break;
+ case REDIR_PROTO:
+ r->alink[0] = LibAliasRedirectProto(ptr->lib ,r->laddr,
+ r->raddr, r->paddr, r->proto);
+ break;
+ default:
+ printf("unknown redirect mode: %u\n", r->mode);
+ break;
+ }
+ /* XXX perhaps return an error instead of panic ? */
+ if (r->alink[0] == NULL)
+ panic("LibAliasRedirect* returned NULL");
+ /* LSNAT handling. */
+ for (i = 0; i < r->spool_cnt; i++) {
+ ser_s = (struct cfg_spool *)&buf[off];
+ s = malloc(SOF_REDIR, M_IPFW, M_WAITOK | M_ZERO);
+ memcpy(s, ser_s, SOF_SPOOL);
+ LibAliasAddServer(ptr->lib, r->alink[0],
+ s->addr, htons(s->port));
+ off += SOF_SPOOL;
+ /* Hook spool entry. */
+ LIST_INSERT_HEAD(&r->spool_chain, s, _next);
+ }
+ /* And finally hook this redir entry. */
+ LIST_INSERT_HEAD(&ptr->redir_chain, r, _next);
+ }
+ return (1);
+}
+
+static int
+ipfw_nat(struct ip_fw_args *args, struct cfg_nat *t, struct mbuf *m)
+{
+ struct mbuf *mcl;
+ struct ip *ip;
+ /* XXX - libalias duct tape */
+ int ldt, retval;
+ char *c;
+
+ ldt = 0;
+ retval = 0;
+ mcl = m_megapullup(m, m->m_pkthdr.len);
+ if (mcl == NULL) {
+ args->m = NULL;
+ return (IP_FW_DENY);
+ }
+ ip = mtod(mcl, struct ip *);
+
+ /*
+ * XXX - Libalias checksum offload 'duct tape':
+ *
+ * locally generated packets have only pseudo-header checksum
+ * calculated and libalias will break it[1], so mark them for
+ * later fix. Moreover there are cases when libalias modifies
+ * tcp packet data[2], mark them for later fix too.
+ *
+ * [1] libalias was never meant to run in kernel, so it does
+ * not have any knowledge about checksum offloading, and
+ * expects a packet with a full internet checksum.
+ * Unfortunately, packets generated locally will have just the
+ * pseudo header calculated, and when libalias tries to adjust
+ * the checksum it will actually compute a wrong value.
+ *
+ * [2] when libalias modifies tcp's data content, full TCP
+ * checksum has to be recomputed: the problem is that
+ * libalias does not have any idea about checksum offloading.
+ * To work around this, we do not do checksumming in LibAlias,
+ * but only mark the packets in th_x2 field. If we receive a
+ * marked packet, we calculate correct checksum for it
+ * aware of offloading. Why such a terrible hack instead of
+ * recalculating checksum for each packet?
+ * Because the previous checksum was not checked!
+ * Recalculating checksums for EVERY packet will hide ALL
+ * transmission errors. Yes, marked packets still suffer from
+ * this problem. But, sigh, natd(8) has this problem, too.
+ *
+ * TODO: -make libalias mbuf aware (so
+ * it can handle delayed checksum and tso)
+ */
+
+ if (mcl->m_pkthdr.rcvif == NULL &&
+ mcl->m_pkthdr.csum_flags & CSUM_DELAY_DATA)
+ ldt = 1;
+
+ c = mtod(mcl, char *);
+ if (args->oif == NULL)
+ retval = LibAliasIn(t->lib, c,
+ mcl->m_len + M_TRAILINGSPACE(mcl));
+ else
+ retval = LibAliasOut(t->lib, c,
+ mcl->m_len + M_TRAILINGSPACE(mcl));
+ if (retval == PKT_ALIAS_RESPOND) {
+ m->m_flags |= M_SKIP_FIREWALL;
+ retval = PKT_ALIAS_OK;
+ }
+ if (retval != PKT_ALIAS_OK &&
+ retval != PKT_ALIAS_FOUND_HEADER_FRAGMENT) {
+ /* XXX - should i add some logging? */
+ m_free(mcl);
+ args->m = NULL;
+ return (IP_FW_DENY);
+ }
+ mcl->m_pkthdr.len = mcl->m_len = ntohs(ip->ip_len);
+
+ /*
+ * XXX - libalias checksum offload
+ * 'duct tape' (see above)
+ */
+
+ if ((ip->ip_off & htons(IP_OFFMASK)) == 0 &&
+ ip->ip_p == IPPROTO_TCP) {
+ struct tcphdr *th;
+
+ th = (struct tcphdr *)(ip + 1);
+ if (th->th_x2)
+ ldt = 1;
+ }
+
+ if (ldt) {
+ struct tcphdr *th;
+ struct udphdr *uh;
+ u_short cksum;
+
+ ip->ip_len = ntohs(ip->ip_len);
+ cksum = in_pseudo(ip->ip_src.s_addr, ip->ip_dst.s_addr,
+ htons(ip->ip_p + ip->ip_len - (ip->ip_hl << 2)));
+
+ switch (ip->ip_p) {
+ case IPPROTO_TCP:
+ th = (struct tcphdr *)(ip + 1);
+ /*
+ * Maybe it was set in
+ * libalias...
+ */
+ th->th_x2 = 0;
+ th->th_sum = cksum;
+ mcl->m_pkthdr.csum_data =
+ offsetof(struct tcphdr, th_sum);
+ break;
+ case IPPROTO_UDP:
+ uh = (struct udphdr *)(ip + 1);
+ uh->uh_sum = cksum;
+ mcl->m_pkthdr.csum_data =
+ offsetof(struct udphdr, uh_sum);
+ break;
+ }
+ /* No hw checksum offloading: do it ourselves */
+ if ((mcl->m_pkthdr.csum_flags & CSUM_DELAY_DATA) == 0) {
+ in_delayed_cksum(mcl);
+ mcl->m_pkthdr.csum_flags &= ~CSUM_DELAY_DATA;
+ }
+ ip->ip_len = htons(ip->ip_len);
+ }
+ args->m = mcl;
+ return (IP_FW_NAT);
+}
+
+static struct cfg_nat *
+lookup_nat(struct nat_list *l, int nat_id)
+{
+ struct cfg_nat *res;
+
+ LIST_FOREACH(res, l, _next) {
+ if (res->id == nat_id)
+ break;
+ }
+ return res;
+}
+
+static int
+ipfw_nat_cfg(struct sockopt *sopt)
+{
+ struct cfg_nat *ptr, *ser_n;
+ char *buf;
+ struct ip_fw_chain *chain = &V_layer3_chain;
+
+ buf = malloc(NAT_BUF_LEN, M_IPFW, M_WAITOK | M_ZERO);
+ sooptcopyin(sopt, buf, NAT_BUF_LEN, sizeof(struct cfg_nat));
+ ser_n = (struct cfg_nat *)buf;
+
+ /* check valid parameter ser_n->id > 0 ? */
+ /*
+ * Find/create nat rule.
+ */
+ IPFW_WLOCK(chain);
+ ptr = lookup_nat(&chain->nat, ser_n->id);
+ if (ptr == NULL) {
+ /* New rule: allocate and init new instance. */
+ ptr = malloc(sizeof(struct cfg_nat),
+ M_IPFW, M_NOWAIT | M_ZERO);
+ if (ptr == NULL) {
+ IPFW_WUNLOCK(chain);
+ free(buf, M_IPFW);
+ return (ENOSPC);
+ }
+ ptr->lib = LibAliasInit(NULL);
+ if (ptr->lib == NULL) {
+ IPFW_WUNLOCK(chain);
+ free(ptr, M_IPFW);
+ free(buf, M_IPFW);
+ return (EINVAL);
+ }
+ LIST_INIT(&ptr->redir_chain);
+ } else {
+ /* Entry already present: temporarly unhook it. */
+ LIST_REMOVE(ptr, _next);
+ flush_nat_ptrs(chain, ser_n->id);
+ }
+ IPFW_WUNLOCK(chain);
+
+ /*
+ * Basic nat configuration.
+ */
+ ptr->id = ser_n->id;
+ /*
+ * XXX - what if this rule doesn't nat any ip and just
+ * redirect?
+ * do we set aliasaddress to 0.0.0.0?
+ */
+ ptr->ip = ser_n->ip;
+ ptr->redir_cnt = ser_n->redir_cnt;
+ ptr->mode = ser_n->mode;
+ LibAliasSetMode(ptr->lib, ser_n->mode, ser_n->mode);
+ LibAliasSetAddress(ptr->lib, ptr->ip);
+ memcpy(ptr->if_name, ser_n->if_name, IF_NAMESIZE);
+
+ /*
+ * Redir and LSNAT configuration.
+ */
+ /* Delete old cfgs. */
+ del_redir_spool_cfg(ptr, &ptr->redir_chain);
+ /* Add new entries. */
+ add_redir_spool_cfg(&buf[(sizeof(struct cfg_nat))], ptr);
+ free(buf, M_IPFW);
+ IPFW_WLOCK(chain);
+ LIST_INSERT_HEAD(&chain->nat, ptr, _next);
+ IPFW_WUNLOCK(chain);
+ return (0);
+}
+
+static int
+ipfw_nat_del(struct sockopt *sopt)
+{
+ struct cfg_nat *ptr;
+ struct ip_fw_chain *chain = &V_layer3_chain;
+ int i;
+
+ sooptcopyin(sopt, &i, sizeof i, sizeof i);
+ /* XXX validate i */
+ IPFW_WLOCK(chain);
+ ptr = lookup_nat(&chain->nat, i);
+ if (ptr == NULL) {
+ IPFW_WUNLOCK(chain);
+ return (EINVAL);
+ }
+ LIST_REMOVE(ptr, _next);
+ flush_nat_ptrs(chain, i);
+ IPFW_WUNLOCK(chain);
+ del_redir_spool_cfg(ptr, &ptr->redir_chain);
+ LibAliasUninit(ptr->lib);
+ free(ptr, M_IPFW);
+ return (0);
+}
+
+static int
+ipfw_nat_get_cfg(struct sockopt *sopt)
+{
+ uint8_t *data;
+ struct cfg_nat *n;
+ struct cfg_redir *r;
+ struct cfg_spool *s;
+ int nat_cnt, off;
+ struct ip_fw_chain *chain;
+ int err = ENOSPC;
+
+ chain = &V_layer3_chain;
+ nat_cnt = 0;
+ off = sizeof(nat_cnt);
+
+ data = malloc(NAT_BUF_LEN, M_IPFW, M_WAITOK | M_ZERO);
+ IPFW_RLOCK(chain);
+ /* Serialize all the data. */
+ LIST_FOREACH(n, &chain->nat, _next) {
+ nat_cnt++;
+ if (off + SOF_NAT >= NAT_BUF_LEN)
+ goto nospace;
+ bcopy(n, &data[off], SOF_NAT);
+ off += SOF_NAT;
+ LIST_FOREACH(r, &n->redir_chain, _next) {
+ if (off + SOF_REDIR >= NAT_BUF_LEN)
+ goto nospace;
+ bcopy(r, &data[off], SOF_REDIR);
+ off += SOF_REDIR;
+ LIST_FOREACH(s, &r->spool_chain, _next) {
+ if (off + SOF_SPOOL >= NAT_BUF_LEN)
+ goto nospace;
+ bcopy(s, &data[off], SOF_SPOOL);
+ off += SOF_SPOOL;
+ }
+ }
+ }
+ err = 0; /* all good */
+nospace:
+ IPFW_RUNLOCK(chain);
+ if (err == 0) {
+ bcopy(&nat_cnt, data, sizeof(nat_cnt));
+ sooptcopyout(sopt, data, NAT_BUF_LEN);
+ } else {
+ printf("serialized data buffer not big enough:"
+ "please increase NAT_BUF_LEN\n");
+ }
+ free(data, M_IPFW);
+ return (err);
+}
+
+static int
+ipfw_nat_get_log(struct sockopt *sopt)
+{
+ uint8_t *data;
+ struct cfg_nat *ptr;
+ int i, size;
+ struct ip_fw_chain *chain;
+
+ chain = &V_layer3_chain;
+
+ IPFW_RLOCK(chain);
+ /* one pass to count, one to copy the data */
+ i = 0;
+ LIST_FOREACH(ptr, &chain->nat, _next) {
+ if (ptr->lib->logDesc == NULL)
+ continue;
+ i++;
+ }
+ size = i * (LIBALIAS_BUF_SIZE + sizeof(int));
+ data = malloc(size, M_IPFW, M_NOWAIT | M_ZERO);
+ if (data == NULL) {
+ IPFW_RUNLOCK(chain);
+ return (ENOSPC);
+ }
+ i = 0;
+ LIST_FOREACH(ptr, &chain->nat, _next) {
+ if (ptr->lib->logDesc == NULL)
+ continue;
+ bcopy(&ptr->id, &data[i], sizeof(int));
+ i += sizeof(int);
+ bcopy(ptr->lib->logDesc, &data[i], LIBALIAS_BUF_SIZE);
+ i += LIBALIAS_BUF_SIZE;
+ }
+ IPFW_RUNLOCK(chain);
+ sooptcopyout(sopt, data, size);
+ free(data, M_IPFW);
+ return(0);
+}
+
+static void
+ipfw_nat_init(void)
+{
+
+ IPFW_WLOCK(&V_layer3_chain);
+ /* init ipfw hooks */
+ ipfw_nat_ptr = ipfw_nat;
+ lookup_nat_ptr = lookup_nat;
+ ipfw_nat_cfg_ptr = ipfw_nat_cfg;
+ ipfw_nat_del_ptr = ipfw_nat_del;
+ ipfw_nat_get_cfg_ptr = ipfw_nat_get_cfg;
+ ipfw_nat_get_log_ptr = ipfw_nat_get_log;
+ IPFW_WUNLOCK(&V_layer3_chain);
+ V_ifaddr_event_tag = EVENTHANDLER_REGISTER(
+ ifaddr_event, ifaddr_change,
+ NULL, EVENTHANDLER_PRI_ANY);
+}
+
+static void
+ipfw_nat_destroy(void)
+{
+ struct cfg_nat *ptr, *ptr_temp;
+ struct ip_fw_chain *chain;
+
+ chain = &V_layer3_chain;
+ IPFW_WLOCK(chain);
+ LIST_FOREACH_SAFE(ptr, &chain->nat, _next, ptr_temp) {
+ LIST_REMOVE(ptr, _next);
+ del_redir_spool_cfg(ptr, &ptr->redir_chain);
+ LibAliasUninit(ptr->lib);
+ free(ptr, M_IPFW);
+ }
+ EVENTHANDLER_DEREGISTER(ifaddr_event, V_ifaddr_event_tag);
+ flush_nat_ptrs(chain, -1 /* flush all */);
+ /* deregister ipfw_nat */
+ ipfw_nat_ptr = NULL;
+ lookup_nat_ptr = NULL;
+ ipfw_nat_cfg_ptr = NULL;
+ ipfw_nat_del_ptr = NULL;
+ ipfw_nat_get_cfg_ptr = NULL;
+ ipfw_nat_get_log_ptr = NULL;
+ IPFW_WUNLOCK(chain);
+}
+
+static int
+ipfw_nat_modevent(module_t mod, int type, void *unused)
+{
+ int err = 0;
+
+ switch (type) {
+ case MOD_LOAD:
+ ipfw_nat_init();
+ break;
+
+ case MOD_UNLOAD:
+ ipfw_nat_destroy();
+ break;
+
+ default:
+ return EOPNOTSUPP;
+ break;
+ }
+ return err;
+}
+
+static moduledata_t ipfw_nat_mod = {
+ "ipfw_nat",
+ ipfw_nat_modevent,
+ 0
+};
+
+DECLARE_MODULE(ipfw_nat, ipfw_nat_mod, SI_SUB_PROTO_IFATTACHDOMAIN, SI_ORDER_ANY);
+MODULE_DEPEND(ipfw_nat, libalias, 1, 1, 1);
+MODULE_DEPEND(ipfw_nat, ipfw, 2, 2, 2);
+MODULE_VERSION(ipfw_nat, 1);
+/* end of file */
diff --git a/rtems/freebsd/netinet/ipfw/ip_fw_pfil.c b/rtems/freebsd/netinet/ipfw/ip_fw_pfil.c
new file mode 100644
index 00000000..abc29b2e
--- /dev/null
+++ b/rtems/freebsd/netinet/ipfw/ip_fw_pfil.c
@@ -0,0 +1,417 @@
+#include <rtems/freebsd/machine/rtems-bsd-config.h>
+
+/*-
+ * Copyright (c) 2004 Andre Oppermann, Internet Business Solutions AG
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <rtems/freebsd/sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#if !defined(KLD_MODULE)
+#include <rtems/freebsd/local/opt_ipfw.h>
+#include <rtems/freebsd/local/opt_ipdn.h>
+#include <rtems/freebsd/local/opt_inet.h>
+#ifndef INET
+#error IPFIREWALL requires INET.
+#endif /* INET */
+#endif /* KLD_MODULE */
+#include <rtems/freebsd/local/opt_inet6.h>
+
+#include <rtems/freebsd/sys/param.h>
+#include <rtems/freebsd/sys/systm.h>
+#include <rtems/freebsd/sys/malloc.h>
+#include <rtems/freebsd/sys/mbuf.h>
+#include <rtems/freebsd/sys/module.h>
+#include <rtems/freebsd/sys/kernel.h>
+#include <rtems/freebsd/sys/lock.h>
+#include <rtems/freebsd/sys/rwlock.h>
+#include <rtems/freebsd/sys/socket.h>
+#include <rtems/freebsd/sys/sysctl.h>
+
+#include <rtems/freebsd/net/if.h>
+#include <rtems/freebsd/net/route.h>
+#include <rtems/freebsd/net/pfil.h>
+#include <rtems/freebsd/net/vnet.h>
+
+#include <rtems/freebsd/netinet/in.h>
+#include <rtems/freebsd/netinet/in_systm.h>
+#include <rtems/freebsd/netinet/ip.h>
+#include <rtems/freebsd/netinet/ip_var.h>
+#include <rtems/freebsd/netinet/ip_fw.h>
+#include <rtems/freebsd/netinet/ipfw/ip_fw_private.h>
+#include <rtems/freebsd/netgraph/ng_ipfw.h>
+
+#include <rtems/freebsd/machine/in_cksum.h>
+
+static VNET_DEFINE(int, fw_enable) = 1;
+#define V_fw_enable VNET(fw_enable)
+
+#ifdef INET6
+static VNET_DEFINE(int, fw6_enable) = 1;
+#define V_fw6_enable VNET(fw6_enable)
+#endif
+
+int ipfw_chg_hook(SYSCTL_HANDLER_ARGS);
+
+/* Forward declarations. */
+static int ipfw_divert(struct mbuf **, int, struct ipfw_rule_ref *, int);
+
+#ifdef SYSCTL_NODE
+
+SYSBEGIN(f1)
+
+SYSCTL_DECL(_net_inet_ip_fw);
+SYSCTL_VNET_PROC(_net_inet_ip_fw, OID_AUTO, enable,
+ CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_SECURE3, &VNET_NAME(fw_enable), 0,
+ ipfw_chg_hook, "I", "Enable ipfw");
+#ifdef INET6
+SYSCTL_DECL(_net_inet6_ip6_fw);
+SYSCTL_VNET_PROC(_net_inet6_ip6_fw, OID_AUTO, enable,
+ CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_SECURE3, &VNET_NAME(fw6_enable), 0,
+ ipfw_chg_hook, "I", "Enable ipfw+6");
+#endif /* INET6 */
+
+SYSEND
+
+#endif /* SYSCTL_NODE */
+
+/*
+ * The pfilter hook to pass packets to ipfw_chk and then to
+ * dummynet, divert, netgraph or other modules.
+ * The packet may be consumed.
+ */
+int
+ipfw_check_hook(void *arg, struct mbuf **m0, struct ifnet *ifp, int dir,
+ struct inpcb *inp)
+{
+ struct ip_fw_args args;
+ struct m_tag *tag;
+ int ipfw;
+ int ret;
+
+ /* all the processing now uses ip_len in net format */
+ if (mtod(*m0, struct ip *)->ip_v == 4)
+ SET_NET_IPLEN(mtod(*m0, struct ip *));
+
+ /* convert dir to IPFW values */
+ dir = (dir == PFIL_IN) ? DIR_IN : DIR_OUT;
+ bzero(&args, sizeof(args));
+
+again:
+ /*
+ * extract and remove the tag if present. If we are left
+ * with onepass, optimize the outgoing path.
+ */
+ tag = m_tag_locate(*m0, MTAG_IPFW_RULE, 0, NULL);
+ if (tag != NULL) {
+ args.rule = *((struct ipfw_rule_ref *)(tag+1));
+ m_tag_delete(*m0, tag);
+ if (args.rule.info & IPFW_ONEPASS) {
+ SET_HOST_IPLEN(mtod(*m0, struct ip *));
+ return 0;
+ }
+ }
+
+ args.m = *m0;
+ args.oif = dir == DIR_OUT ? ifp : NULL;
+ args.inp = inp;
+
+ ipfw = ipfw_chk(&args);
+ *m0 = args.m;
+
+ KASSERT(*m0 != NULL || ipfw == IP_FW_DENY, ("%s: m0 is NULL",
+ __func__));
+
+ /* breaking out of the switch means drop */
+ ret = 0; /* default return value for pass */
+ switch (ipfw) {
+ case IP_FW_PASS:
+ /* next_hop may be set by ipfw_chk */
+ if (args.next_hop == NULL)
+ break; /* pass */
+#ifndef IPFIREWALL_FORWARD
+ ret = EACCES;
+#else
+ {
+ struct m_tag *fwd_tag;
+
+ /* Incoming packets should not be tagged so we do not
+ * m_tag_find. Outgoing packets may be tagged, so we
+ * reuse the tag if present.
+ */
+ fwd_tag = (dir == DIR_IN) ? NULL :
+ m_tag_find(*m0, PACKET_TAG_IPFORWARD, NULL);
+ if (fwd_tag != NULL) {
+ m_tag_unlink(*m0, fwd_tag);
+ } else {
+ fwd_tag = m_tag_get(PACKET_TAG_IPFORWARD,
+ sizeof(struct sockaddr_in), M_NOWAIT);
+ if (fwd_tag == NULL) {
+ ret = EACCES;
+ break; /* i.e. drop */
+ }
+ }
+ bcopy(args.next_hop, (fwd_tag+1), sizeof(struct sockaddr_in));
+ m_tag_prepend(*m0, fwd_tag);
+
+ if (in_localip(args.next_hop->sin_addr))
+ (*m0)->m_flags |= M_FASTFWD_OURS;
+ }
+#endif
+ break;
+
+ case IP_FW_DENY:
+ ret = EACCES;
+ break; /* i.e. drop */
+
+ case IP_FW_DUMMYNET:
+ ret = EACCES;
+ if (ip_dn_io_ptr == NULL)
+ break; /* i.e. drop */
+ if (mtod(*m0, struct ip *)->ip_v == 4)
+ ret = ip_dn_io_ptr(m0, dir, &args);
+ else if (mtod(*m0, struct ip *)->ip_v == 6)
+ ret = ip_dn_io_ptr(m0, dir | PROTO_IPV6, &args);
+ else
+ break; /* drop it */
+ /*
+ * XXX should read the return value.
+ * dummynet normally eats the packet and sets *m0=NULL
+ * unless the packet can be sent immediately. In this
+ * case args is updated and we should re-run the
+ * check without clearing args.
+ */
+ if (*m0 != NULL)
+ goto again;
+ break;
+
+ case IP_FW_TEE:
+ case IP_FW_DIVERT:
+ if (ip_divert_ptr == NULL) {
+ ret = EACCES;
+ break; /* i.e. drop */
+ }
+ ret = ipfw_divert(m0, dir, &args.rule,
+ (ipfw == IP_FW_TEE) ? 1 : 0);
+ /* continue processing for the original packet (tee). */
+ if (*m0)
+ goto again;
+ break;
+
+ case IP_FW_NGTEE:
+ case IP_FW_NETGRAPH:
+ if (ng_ipfw_input_p == NULL) {
+ ret = EACCES;
+ break; /* i.e. drop */
+ }
+ ret = ng_ipfw_input_p(m0, dir, &args,
+ (ipfw == IP_FW_NGTEE) ? 1 : 0);
+ if (ipfw == IP_FW_NGTEE) /* ignore errors for NGTEE */
+ goto again; /* continue with packet */
+ break;
+
+ case IP_FW_NAT:
+ /* honor one-pass in case of successful nat */
+ if (V_fw_one_pass)
+ break; /* ret is already 0 */
+ goto again;
+
+ case IP_FW_REASS:
+ goto again; /* continue with packet */
+
+ default:
+ KASSERT(0, ("%s: unknown retval", __func__));
+ }
+
+ if (ret != 0) {
+ if (*m0)
+ FREE_PKT(*m0);
+ *m0 = NULL;
+ }
+ if (*m0 && mtod(*m0, struct ip *)->ip_v == 4)
+ SET_HOST_IPLEN(mtod(*m0, struct ip *));
+ return ret;
+}
+
+/* do the divert, return 1 on error 0 on success */
+static int
+ipfw_divert(struct mbuf **m0, int incoming, struct ipfw_rule_ref *rule,
+ int tee)
+{
+ /*
+ * ipfw_chk() has already tagged the packet with the divert tag.
+ * If tee is set, copy packet and return original.
+ * If not tee, consume packet and send it to divert socket.
+ */
+ struct mbuf *clone;
+ struct ip *ip;
+ struct m_tag *tag;
+
+ /* Cloning needed for tee? */
+ if (tee == 0) {
+ clone = *m0; /* use the original mbuf */
+ *m0 = NULL;
+ } else {
+ clone = m_dup(*m0, M_DONTWAIT);
+ /* If we cannot duplicate the mbuf, we sacrifice the divert
+ * chain and continue with the tee-ed packet.
+ */
+ if (clone == NULL)
+ return 1;
+ }
+
+ /*
+ * Divert listeners can normally handle non-fragmented packets,
+ * but we can only reass in the non-tee case.
+ * This means that listeners on a tee rule may get fragments,
+ * and have to live with that.
+ * Note that we now have the 'reass' ipfw option so if we care
+ * we can do it before a 'tee'.
+ */
+ ip = mtod(clone, struct ip *);
+ if (!tee && ntohs(ip->ip_off) & (IP_MF | IP_OFFMASK)) {
+ int hlen;
+ struct mbuf *reass;
+
+ SET_HOST_IPLEN(ip); /* ip_reass wants host order */
+ reass = ip_reass(clone); /* Reassemble packet. */
+ if (reass == NULL)
+ return 0; /* not an error */
+ /* if reass = NULL then it was consumed by ip_reass */
+ /*
+ * IP header checksum fixup after reassembly and leave header
+ * in network byte order.
+ */
+ ip = mtod(reass, struct ip *);
+ hlen = ip->ip_hl << 2;
+ SET_NET_IPLEN(ip);
+ ip->ip_sum = 0;
+ if (hlen == sizeof(struct ip))
+ ip->ip_sum = in_cksum_hdr(ip);
+ else
+ ip->ip_sum = in_cksum(reass, hlen);
+ clone = reass;
+ }
+ /* attach a tag to the packet with the reinject info */
+ tag = m_tag_alloc(MTAG_IPFW_RULE, 0,
+ sizeof(struct ipfw_rule_ref), M_NOWAIT);
+ if (tag == NULL) {
+ FREE_PKT(clone);
+ return 1;
+ }
+ *((struct ipfw_rule_ref *)(tag+1)) = *rule;
+ m_tag_prepend(clone, tag);
+
+ /* Do the dirty job... */
+ ip_divert_ptr(clone, incoming);
+ return 0;
+}
+
+/*
+ * attach or detach hooks for a given protocol family
+ */
+static int
+ipfw_hook(int onoff, int pf)
+{
+ struct pfil_head *pfh;
+
+ pfh = pfil_head_get(PFIL_TYPE_AF, pf);
+ if (pfh == NULL)
+ return ENOENT;
+
+ (void) (onoff ? pfil_add_hook : pfil_remove_hook)
+ (ipfw_check_hook, NULL, PFIL_IN | PFIL_OUT | PFIL_WAITOK, pfh);
+
+ return 0;
+}
+
+int
+ipfw_attach_hooks(int arg)
+{
+ int error = 0;
+
+ if (arg == 0) /* detach */
+ ipfw_hook(0, AF_INET);
+ else if (V_fw_enable && ipfw_hook(1, AF_INET) != 0) {
+ error = ENOENT; /* see ip_fw_pfil.c::ipfw_hook() */
+ printf("ipfw_hook() error\n");
+ }
+#ifdef INET6
+ if (arg == 0) /* detach */
+ ipfw_hook(0, AF_INET6);
+ else if (V_fw6_enable && ipfw_hook(1, AF_INET6) != 0) {
+ error = ENOENT;
+ printf("ipfw6_hook() error\n");
+ }
+#endif
+ return error;
+}
+
+int
+ipfw_chg_hook(SYSCTL_HANDLER_ARGS)
+{
+ int enable;
+ int oldenable;
+ int error;
+ int af;
+
+ if (arg1 == &VNET_NAME(fw_enable)) {
+ enable = V_fw_enable;
+ af = AF_INET;
+ }
+#ifdef INET6
+ else if (arg1 == &VNET_NAME(fw6_enable)) {
+ enable = V_fw6_enable;
+ af = AF_INET6;
+ }
+#endif
+ else
+ return (EINVAL);
+
+ oldenable = enable;
+
+ error = sysctl_handle_int(oidp, &enable, 0, req);
+
+ if (error)
+ return (error);
+
+ enable = (enable) ? 1 : 0;
+
+ if (enable == oldenable)
+ return (0);
+
+ error = ipfw_hook(enable, af);
+ if (error)
+ return (error);
+ if (af == AF_INET)
+ V_fw_enable = enable;
+#ifdef INET6
+ else if (af == AF_INET6)
+ V_fw6_enable = enable;
+#endif
+
+ return (0);
+}
+/* end of file */
diff --git a/rtems/freebsd/netinet/ipfw/ip_fw_private.h b/rtems/freebsd/netinet/ipfw/ip_fw_private.h
new file mode 100644
index 00000000..c29ae0ad
--- /dev/null
+++ b/rtems/freebsd/netinet/ipfw/ip_fw_private.h
@@ -0,0 +1,301 @@
+/*-
+ * Copyright (c) 2002-2009 Luigi Rizzo, Universita` di Pisa
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _IPFW2_PRIVATE_H
+#define _IPFW2_PRIVATE_H
+
+/*
+ * Internal constants and data structures used by ipfw components
+ * and not meant to be exported outside the kernel.
+ */
+
+#ifdef _KERNEL
+
+/*
+ * For platforms that do not have SYSCTL support, we wrap the
+ * SYSCTL_* into a function (one per file) to collect the values
+ * into an array at module initialization. The wrapping macros,
+ * SYSBEGIN() and SYSEND, are empty in the default case.
+ */
+#ifndef SYSBEGIN
+#define SYSBEGIN(x)
+#endif
+#ifndef SYSEND
+#define SYSEND
+#endif
+
+/* Return values from ipfw_chk() */
+enum {
+ IP_FW_PASS = 0,
+ IP_FW_DENY,
+ IP_FW_DIVERT,
+ IP_FW_TEE,
+ IP_FW_DUMMYNET,
+ IP_FW_NETGRAPH,
+ IP_FW_NGTEE,
+ IP_FW_NAT,
+ IP_FW_REASS,
+};
+
+/*
+ * Structure for collecting parameters to dummynet for ip6_output forwarding
+ */
+struct _ip6dn_args {
+ struct ip6_pktopts *opt_or;
+ struct route_in6 ro_or;
+ int flags_or;
+ struct ip6_moptions *im6o_or;
+ struct ifnet *origifp_or;
+ struct ifnet *ifp_or;
+ struct sockaddr_in6 dst_or;
+ u_long mtu_or;
+ struct route_in6 ro_pmtu_or;
+};
+
+
+/*
+ * Arguments for calling ipfw_chk() and dummynet_io(). We put them
+ * all into a structure because this way it is easier and more
+ * efficient to pass variables around and extend the interface.
+ */
+struct ip_fw_args {
+ struct mbuf *m; /* the mbuf chain */
+ struct ifnet *oif; /* output interface */
+ struct sockaddr_in *next_hop; /* forward address */
+
+ /*
+ * On return, it points to the matching rule.
+ * On entry, rule.slot > 0 means the info is valid and
+ * contains the the starting rule for an ipfw search.
+ * If chain_id == chain->id && slot >0 then jump to that slot.
+ * Otherwise, we locate the first rule >= rulenum:rule_id
+ */
+ struct ipfw_rule_ref rule; /* match/restart info */
+
+ struct ether_header *eh; /* for bridged packets */
+
+ struct ipfw_flow_id f_id; /* grabbed from IP header */
+ //uint32_t cookie; /* a cookie depending on rule action */
+ struct inpcb *inp;
+
+ struct _ip6dn_args dummypar; /* dummynet->ip6_output */
+ struct sockaddr_in hopstore; /* store here if cannot use a pointer */
+};
+
+MALLOC_DECLARE(M_IPFW);
+
+/*
+ * Hooks sometime need to know the direction of the packet
+ * (divert, dummynet, netgraph, ...)
+ * We use a generic definition here, with bit0-1 indicating the
+ * direction, bit 2 indicating layer2 or 3, bit 3-4 indicating the
+ * specific protocol
+ * indicating the protocol (if necessary)
+ */
+enum {
+ DIR_MASK = 0x3,
+ DIR_OUT = 0,
+ DIR_IN = 1,
+ DIR_FWD = 2,
+ DIR_DROP = 3,
+ PROTO_LAYER2 = 0x4, /* set for layer 2 */
+ /* PROTO_DEFAULT = 0, */
+ PROTO_IPV4 = 0x08,
+ PROTO_IPV6 = 0x10,
+ PROTO_IFB = 0x0c, /* layer2 + ifbridge */
+ /* PROTO_OLDBDG = 0x14, unused, old bridge */
+};
+
+/* wrapper for freeing a packet, in case we need to do more work */
+#ifndef FREE_PKT
+#if defined(__linux__) || defined(_WIN32)
+#define FREE_PKT(m) netisr_dispatch(-1, m)
+#else
+#define FREE_PKT(m) m_freem(m)
+#endif
+#endif /* !FREE_PKT */
+
+/*
+ * Function definitions.
+ */
+
+/* attach (arg = 1) or detach (arg = 0) hooks */
+int ipfw_attach_hooks(int);
+#ifdef NOTYET
+void ipfw_nat_destroy(void);
+#endif
+
+/* In ip_fw_log.c */
+struct ip;
+void ipfw_log_bpf(int);
+void ipfw_log(struct ip_fw *f, u_int hlen, struct ip_fw_args *args,
+ struct mbuf *m, struct ifnet *oif, u_short offset, uint32_t tablearg,
+ struct ip *ip);
+VNET_DECLARE(u_int64_t, norule_counter);
+#define V_norule_counter VNET(norule_counter)
+VNET_DECLARE(int, verbose_limit);
+#define V_verbose_limit VNET(verbose_limit)
+
+/* In ip_fw_dynamic.c */
+
+enum { /* result for matching dynamic rules */
+ MATCH_REVERSE = 0,
+ MATCH_FORWARD,
+ MATCH_NONE,
+ MATCH_UNKNOWN,
+};
+
+/*
+ * The lock for dynamic rules is only used once outside the file,
+ * and only to release the result of lookup_dyn_rule().
+ * Eventually we may implement it with a callback on the function.
+ */
+void ipfw_dyn_unlock(void);
+
+struct tcphdr;
+struct mbuf *ipfw_send_pkt(struct mbuf *, struct ipfw_flow_id *,
+ u_int32_t, u_int32_t, int);
+int ipfw_install_state(struct ip_fw *rule, ipfw_insn_limit *cmd,
+ struct ip_fw_args *args, uint32_t tablearg);
+ipfw_dyn_rule *ipfw_lookup_dyn_rule(struct ipfw_flow_id *pkt,
+ int *match_direction, struct tcphdr *tcp);
+void ipfw_remove_dyn_children(struct ip_fw *rule);
+void ipfw_get_dynamic(char **bp, const char *ep);
+
+void ipfw_dyn_attach(void); /* uma_zcreate .... */
+void ipfw_dyn_detach(void); /* uma_zdestroy ... */
+void ipfw_dyn_init(void); /* per-vnet initialization */
+void ipfw_dyn_uninit(int); /* per-vnet deinitialization */
+int ipfw_dyn_len(void);
+
+/* common variables */
+VNET_DECLARE(int, fw_one_pass);
+#define V_fw_one_pass VNET(fw_one_pass)
+
+VNET_DECLARE(int, fw_verbose);
+#define V_fw_verbose VNET(fw_verbose)
+
+VNET_DECLARE(struct ip_fw_chain, layer3_chain);
+#define V_layer3_chain VNET(layer3_chain)
+
+VNET_DECLARE(u_int32_t, set_disable);
+#define V_set_disable VNET(set_disable)
+
+VNET_DECLARE(int, autoinc_step);
+#define V_autoinc_step VNET(autoinc_step)
+
+struct ip_fw_chain {
+ struct ip_fw *rules; /* list of rules */
+ struct ip_fw *reap; /* list of rules to reap */
+ struct ip_fw *default_rule;
+ int n_rules; /* number of static rules */
+ int static_len; /* total len of static rules */
+ struct ip_fw **map; /* array of rule ptrs to ease lookup */
+ LIST_HEAD(nat_list, cfg_nat) nat; /* list of nat entries */
+ struct radix_node_head *tables[IPFW_TABLES_MAX];
+#if defined( __linux__ ) || defined( _WIN32 )
+ spinlock_t rwmtx;
+ spinlock_t uh_lock;
+#else
+ struct rwlock rwmtx;
+ struct rwlock uh_lock; /* lock for upper half */
+#endif
+ uint32_t id; /* ruleset id */
+};
+
+struct sockopt; /* used by tcp_var.h */
+
+/*
+ * The lock is heavily used by ip_fw2.c (the main file) and ip_fw_nat.c
+ * so the variable and the macros must be here.
+ */
+
+#define IPFW_LOCK_INIT(_chain) do { \
+ rw_init(&(_chain)->rwmtx, "IPFW static rules"); \
+ rw_init(&(_chain)->uh_lock, "IPFW UH lock"); \
+ } while (0)
+
+#define IPFW_LOCK_DESTROY(_chain) do { \
+ rw_destroy(&(_chain)->rwmtx); \
+ rw_destroy(&(_chain)->uh_lock); \
+ } while (0)
+
+#define IPFW_WLOCK_ASSERT(_chain) rw_assert(&(_chain)->rwmtx, RA_WLOCKED)
+
+#define IPFW_RLOCK(p) rw_rlock(&(p)->rwmtx)
+#define IPFW_RUNLOCK(p) rw_runlock(&(p)->rwmtx)
+#define IPFW_WLOCK(p) rw_wlock(&(p)->rwmtx)
+#define IPFW_WUNLOCK(p) rw_wunlock(&(p)->rwmtx)
+
+#define IPFW_UH_RLOCK(p) rw_rlock(&(p)->uh_lock)
+#define IPFW_UH_RUNLOCK(p) rw_runlock(&(p)->uh_lock)
+#define IPFW_UH_WLOCK(p) rw_wlock(&(p)->uh_lock)
+#define IPFW_UH_WUNLOCK(p) rw_wunlock(&(p)->uh_lock)
+
+/* In ip_fw_sockopt.c */
+int ipfw_find_rule(struct ip_fw_chain *chain, uint32_t key, uint32_t id);
+int ipfw_add_rule(struct ip_fw_chain *chain, struct ip_fw *input_rule);
+int ipfw_ctl(struct sockopt *sopt);
+int ipfw_chk(struct ip_fw_args *args);
+void ipfw_reap_rules(struct ip_fw *head);
+
+/* In ip_fw_pfil */
+int ipfw_check_hook(void *arg, struct mbuf **m0, struct ifnet *ifp, int dir,
+ struct inpcb *inp);
+
+/* In ip_fw_table.c */
+struct radix_node;
+int ipfw_lookup_table(struct ip_fw_chain *ch, uint16_t tbl, in_addr_t addr,
+ uint32_t *val);
+int ipfw_init_tables(struct ip_fw_chain *ch);
+void ipfw_destroy_tables(struct ip_fw_chain *ch);
+int ipfw_flush_table(struct ip_fw_chain *ch, uint16_t tbl);
+int ipfw_add_table_entry(struct ip_fw_chain *ch, uint16_t tbl, in_addr_t addr,
+ uint8_t mlen, uint32_t value);
+int ipfw_dump_table_entry(struct radix_node *rn, void *arg);
+int ipfw_del_table_entry(struct ip_fw_chain *ch, uint16_t tbl, in_addr_t addr,
+ uint8_t mlen);
+int ipfw_count_table(struct ip_fw_chain *ch, uint32_t tbl, uint32_t *cnt);
+int ipfw_dump_table(struct ip_fw_chain *ch, ipfw_table *tbl);
+
+/* In ip_fw_nat.c -- XXX to be moved to ip_var.h */
+
+extern struct cfg_nat *(*lookup_nat_ptr)(struct nat_list *, int);
+
+typedef int ipfw_nat_t(struct ip_fw_args *, struct cfg_nat *, struct mbuf *);
+typedef int ipfw_nat_cfg_t(struct sockopt *);
+
+extern ipfw_nat_t *ipfw_nat_ptr;
+#define IPFW_NAT_LOADED (ipfw_nat_ptr != NULL)
+
+extern ipfw_nat_cfg_t *ipfw_nat_cfg_ptr;
+extern ipfw_nat_cfg_t *ipfw_nat_del_ptr;
+extern ipfw_nat_cfg_t *ipfw_nat_get_cfg_ptr;
+extern ipfw_nat_cfg_t *ipfw_nat_get_log_ptr;
+
+#endif /* _KERNEL */
+#endif /* _IPFW2_PRIVATE_H */
diff --git a/rtems/freebsd/netinet/ipfw/ip_fw_sockopt.c b/rtems/freebsd/netinet/ipfw/ip_fw_sockopt.c
new file mode 100644
index 00000000..72f81154
--- /dev/null
+++ b/rtems/freebsd/netinet/ipfw/ip_fw_sockopt.c
@@ -0,0 +1,1345 @@
+#include <rtems/freebsd/machine/rtems-bsd-config.h>
+
+/*-
+ * Copyright (c) 2002-2009 Luigi Rizzo, Universita` di Pisa
+ *
+ * Supported by: Valeria Paoli
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <rtems/freebsd/sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+/*
+ * Sockopt support for ipfw. The routines here implement
+ * the upper half of the ipfw code.
+ */
+
+#if !defined(KLD_MODULE)
+#include <rtems/freebsd/local/opt_ipfw.h>
+#include <rtems/freebsd/local/opt_ipdivert.h>
+#include <rtems/freebsd/local/opt_ipdn.h>
+#include <rtems/freebsd/local/opt_inet.h>
+#ifndef INET
+#error IPFIREWALL requires INET.
+#endif /* INET */
+#endif
+#include <rtems/freebsd/local/opt_inet6.h>
+#include <rtems/freebsd/local/opt_ipsec.h>
+
+#include <rtems/freebsd/sys/param.h>
+#include <rtems/freebsd/sys/systm.h>
+#include <rtems/freebsd/sys/malloc.h>
+#include <rtems/freebsd/sys/mbuf.h> /* struct m_tag used by nested headers */
+#include <rtems/freebsd/sys/kernel.h>
+#include <rtems/freebsd/sys/lock.h>
+#include <rtems/freebsd/sys/priv.h>
+#include <rtems/freebsd/sys/proc.h>
+#include <rtems/freebsd/sys/rwlock.h>
+#include <rtems/freebsd/sys/socket.h>
+#include <rtems/freebsd/sys/socketvar.h>
+#include <rtems/freebsd/sys/sysctl.h>
+#include <rtems/freebsd/sys/syslog.h>
+#include <rtems/freebsd/net/if.h>
+#include <rtems/freebsd/net/route.h>
+#include <rtems/freebsd/net/vnet.h>
+
+#include <rtems/freebsd/netinet/in.h>
+#include <rtems/freebsd/netinet/ip_var.h> /* hooks */
+#include <rtems/freebsd/netinet/ip_fw.h>
+#include <rtems/freebsd/netinet/ipfw/ip_fw_private.h>
+
+#ifdef MAC
+#include <rtems/freebsd/security/mac/mac_framework.h>
+#endif
+
+MALLOC_DEFINE(M_IPFW, "IpFw/IpAcct", "IpFw/IpAcct chain's");
+
+/*
+ * static variables followed by global ones (none in this file)
+ */
+
+/*
+ * Find the smallest rule >= key, id.
+ * We could use bsearch but it is so simple that we code it directly
+ */
+int
+ipfw_find_rule(struct ip_fw_chain *chain, uint32_t key, uint32_t id)
+{
+ int i, lo, hi;
+ struct ip_fw *r;
+
+ for (lo = 0, hi = chain->n_rules - 1; lo < hi;) {
+ i = (lo + hi) / 2;
+ r = chain->map[i];
+ if (r->rulenum < key)
+ lo = i + 1; /* continue from the next one */
+ else if (r->rulenum > key)
+ hi = i; /* this might be good */
+ else if (r->id < id)
+ lo = i + 1; /* continue from the next one */
+ else /* r->id >= id */
+ hi = i; /* this might be good */
+ };
+ return hi;
+}
+
+/*
+ * allocate a new map, returns the chain locked. extra is the number
+ * of entries to add or delete.
+ */
+static struct ip_fw **
+get_map(struct ip_fw_chain *chain, int extra, int locked)
+{
+
+ for (;;) {
+ struct ip_fw **map;
+ int i;
+
+ i = chain->n_rules + extra;
+ map = malloc(i * sizeof(struct ip_fw *), M_IPFW,
+ locked ? M_NOWAIT : M_WAITOK);
+ if (map == NULL) {
+ printf("%s: cannot allocate map\n", __FUNCTION__);
+ return NULL;
+ }
+ if (!locked)
+ IPFW_UH_WLOCK(chain);
+ if (i >= chain->n_rules + extra) /* good */
+ return map;
+ /* otherwise we lost the race, free and retry */
+ if (!locked)
+ IPFW_UH_WUNLOCK(chain);
+ free(map, M_IPFW);
+ }
+}
+
+/*
+ * swap the maps. It is supposed to be called with IPFW_UH_WLOCK
+ */
+static struct ip_fw **
+swap_map(struct ip_fw_chain *chain, struct ip_fw **new_map, int new_len)
+{
+ struct ip_fw **old_map;
+
+ IPFW_WLOCK(chain);
+ chain->id++;
+ chain->n_rules = new_len;
+ old_map = chain->map;
+ chain->map = new_map;
+ IPFW_WUNLOCK(chain);
+ return old_map;
+}
+
+/*
+ * Add a new rule to the list. Copy the rule into a malloc'ed area, then
+ * possibly create a rule number and add the rule to the list.
+ * Update the rule_number in the input struct so the caller knows it as well.
+ * XXX DO NOT USE FOR THE DEFAULT RULE.
+ * Must be called without IPFW_UH held
+ */
+int
+ipfw_add_rule(struct ip_fw_chain *chain, struct ip_fw *input_rule)
+{
+ struct ip_fw *rule;
+ int i, l, insert_before;
+ struct ip_fw **map; /* the new array of pointers */
+
+ if (chain->rules == NULL || input_rule->rulenum > IPFW_DEFAULT_RULE-1)
+ return (EINVAL);
+
+ l = RULESIZE(input_rule);
+ rule = malloc(l, M_IPFW, M_WAITOK | M_ZERO);
+ if (rule == NULL)
+ return (ENOSPC);
+ /* get_map returns with IPFW_UH_WLOCK if successful */
+ map = get_map(chain, 1, 0 /* not locked */);
+ if (map == NULL) {
+ free(rule, M_IPFW);
+ return ENOSPC;
+ }
+
+ bcopy(input_rule, rule, l);
+ /* clear fields not settable from userland */
+ rule->x_next = NULL;
+ rule->next_rule = NULL;
+ rule->pcnt = 0;
+ rule->bcnt = 0;
+ rule->timestamp = 0;
+
+ if (V_autoinc_step < 1)
+ V_autoinc_step = 1;
+ else if (V_autoinc_step > 1000)
+ V_autoinc_step = 1000;
+ /* find the insertion point, we will insert before */
+ insert_before = rule->rulenum ? rule->rulenum + 1 : IPFW_DEFAULT_RULE;
+ i = ipfw_find_rule(chain, insert_before, 0);
+ /* duplicate first part */
+ if (i > 0)
+ bcopy(chain->map, map, i * sizeof(struct ip_fw *));
+ map[i] = rule;
+ /* duplicate remaining part, we always have the default rule */
+ bcopy(chain->map + i, map + i + 1,
+ sizeof(struct ip_fw *) *(chain->n_rules - i));
+ if (rule->rulenum == 0) {
+ /* write back the number */
+ rule->rulenum = i > 0 ? map[i-1]->rulenum : 0;
+ if (rule->rulenum < IPFW_DEFAULT_RULE - V_autoinc_step)
+ rule->rulenum += V_autoinc_step;
+ input_rule->rulenum = rule->rulenum;
+ }
+
+ rule->id = chain->id + 1;
+ map = swap_map(chain, map, chain->n_rules + 1);
+ chain->static_len += l;
+ IPFW_UH_WUNLOCK(chain);
+ if (map)
+ free(map, M_IPFW);
+ return (0);
+}
+
+/*
+ * Reclaim storage associated with a list of rules. This is
+ * typically the list created using remove_rule.
+ * A NULL pointer on input is handled correctly.
+ */
+void
+ipfw_reap_rules(struct ip_fw *head)
+{
+ struct ip_fw *rule;
+
+ while ((rule = head) != NULL) {
+ head = head->x_next;
+ free(rule, M_IPFW);
+ }
+}
+
+/*
+ * Used by del_entry() to check if a rule should be kept.
+ * Returns 1 if the rule must be kept, 0 otherwise.
+ *
+ * Called with cmd = {0,1,5}.
+ * cmd == 0 matches on rule numbers, excludes rules in RESVD_SET if n == 0 ;
+ * cmd == 1 matches on set numbers only, rule numbers are ignored;
+ * cmd == 5 matches on rule and set numbers.
+ *
+ * n == 0 is a wildcard for rule numbers, there is no wildcard for sets.
+ *
+ * Rules to keep are
+ * (default || reserved || !match_set || !match_number)
+ * where
+ * default ::= (rule->rulenum == IPFW_DEFAULT_RULE)
+ * // the default rule is always protected
+ *
+ * reserved ::= (cmd == 0 && n == 0 && rule->set == RESVD_SET)
+ * // RESVD_SET is protected only if cmd == 0 and n == 0 ("ipfw flush")
+ *
+ * match_set ::= (cmd == 0 || rule->set == set)
+ * // set number is ignored for cmd == 0
+ *
+ * match_number ::= (cmd == 1 || n == 0 || n == rule->rulenum)
+ * // number is ignored for cmd == 1 or n == 0
+ *
+ */
+static int
+keep_rule(struct ip_fw *rule, uint8_t cmd, uint8_t set, uint32_t n)
+{
+ return
+ (rule->rulenum == IPFW_DEFAULT_RULE) ||
+ (cmd == 0 && n == 0 && rule->set == RESVD_SET) ||
+ !(cmd == 0 || rule->set == set) ||
+ !(cmd == 1 || n == 0 || n == rule->rulenum);
+}
+
+/**
+ * Remove all rules with given number, or do set manipulation.
+ * Assumes chain != NULL && *chain != NULL.
+ *
+ * The argument is an uint32_t. The low 16 bit are the rule or set number;
+ * the next 8 bits are the new set; the top 8 bits indicate the command:
+ *
+ * 0 delete rules numbered "rulenum"
+ * 1 delete rules in set "rulenum"
+ * 2 move rules "rulenum" to set "new_set"
+ * 3 move rules from set "rulenum" to set "new_set"
+ * 4 swap sets "rulenum" and "new_set"
+ * 5 delete rules "rulenum" and set "new_set"
+ */
+static int
+del_entry(struct ip_fw_chain *chain, uint32_t arg)
+{
+ struct ip_fw *rule;
+ uint32_t num; /* rule number or old_set */
+ uint8_t cmd, new_set;
+ int start, end, i, ofs, n;
+ struct ip_fw **map = NULL;
+ int error = 0;
+
+ num = arg & 0xffff;
+ cmd = (arg >> 24) & 0xff;
+ new_set = (arg >> 16) & 0xff;
+
+ if (cmd > 5 || new_set > RESVD_SET)
+ return EINVAL;
+ if (cmd == 0 || cmd == 2 || cmd == 5) {
+ if (num >= IPFW_DEFAULT_RULE)
+ return EINVAL;
+ } else {
+ if (num > RESVD_SET) /* old_set */
+ return EINVAL;
+ }
+
+ IPFW_UH_WLOCK(chain); /* arbitrate writers */
+ chain->reap = NULL; /* prepare for deletions */
+
+ switch (cmd) {
+ case 0: /* delete rules "num" (num == 0 matches all) */
+ case 1: /* delete all rules in set N */
+ case 5: /* delete rules with number N and set "new_set". */
+
+ /*
+ * Locate first rule to delete (start), the rule after
+ * the last one to delete (end), and count how many
+ * rules to delete (n). Always use keep_rule() to
+ * determine which rules to keep.
+ */
+ n = 0;
+ if (cmd == 1) {
+ /* look for a specific set including RESVD_SET.
+ * Must scan the entire range, ignore num.
+ */
+ new_set = num;
+ for (start = -1, end = i = 0; i < chain->n_rules; i++) {
+ if (keep_rule(chain->map[i], cmd, new_set, 0))
+ continue;
+ if (start < 0)
+ start = i;
+ end = i;
+ n++;
+ }
+ end++; /* first non-matching */
+ } else {
+ /* Optimized search on rule numbers */
+ start = ipfw_find_rule(chain, num, 0);
+ for (end = start; end < chain->n_rules; end++) {
+ rule = chain->map[end];
+ if (num > 0 && rule->rulenum != num)
+ break;
+ if (!keep_rule(rule, cmd, new_set, num))
+ n++;
+ }
+ }
+
+ if (n == 0) {
+ /* A flush request (arg == 0) on empty ruleset
+ * returns with no error. On the contrary,
+ * if there is no match on a specific request,
+ * we return EINVAL.
+ */
+ error = (arg == 0) ? 0 : EINVAL;
+ break;
+ }
+
+ /* We have something to delete. Allocate the new map */
+ map = get_map(chain, -n, 1 /* locked */);
+ if (map == NULL) {
+ error = EINVAL;
+ break;
+ }
+
+ /* 1. bcopy the initial part of the map */
+ if (start > 0)
+ bcopy(chain->map, map, start * sizeof(struct ip_fw *));
+ /* 2. copy active rules between start and end */
+ for (i = ofs = start; i < end; i++) {
+ rule = chain->map[i];
+ if (keep_rule(rule, cmd, new_set, num))
+ map[ofs++] = rule;
+ }
+ /* 3. copy the final part of the map */
+ bcopy(chain->map + end, map + ofs,
+ (chain->n_rules - end) * sizeof(struct ip_fw *));
+ /* 4. swap the maps (under BH_LOCK) */
+ map = swap_map(chain, map, chain->n_rules - n);
+ /* 5. now remove the rules deleted from the old map */
+ for (i = start; i < end; i++) {
+ int l;
+ rule = map[i];
+ if (keep_rule(rule, cmd, new_set, num))
+ continue;
+ l = RULESIZE(rule);
+ chain->static_len -= l;
+ ipfw_remove_dyn_children(rule);
+ rule->x_next = chain->reap;
+ chain->reap = rule;
+ }
+ break;
+
+ /*
+ * In the next 3 cases the loop stops at (n_rules - 1)
+ * because the default rule is never eligible..
+ */
+
+ case 2: /* move rules with given RULE number to new set */
+ for (i = 0; i < chain->n_rules - 1; i++) {
+ rule = chain->map[i];
+ if (rule->rulenum == num)
+ rule->set = new_set;
+ }
+ break;
+
+ case 3: /* move rules with given SET number to new set */
+ for (i = 0; i < chain->n_rules - 1; i++) {
+ rule = chain->map[i];
+ if (rule->set == num)
+ rule->set = new_set;
+ }
+ break;
+
+ case 4: /* swap two sets */
+ for (i = 0; i < chain->n_rules - 1; i++) {
+ rule = chain->map[i];
+ if (rule->set == num)
+ rule->set = new_set;
+ else if (rule->set == new_set)
+ rule->set = num;
+ }
+ break;
+ }
+
+ rule = chain->reap;
+ chain->reap = NULL;
+ IPFW_UH_WUNLOCK(chain);
+ ipfw_reap_rules(rule);
+ if (map)
+ free(map, M_IPFW);
+ return error;
+}
+
+/*
+ * Clear counters for a specific rule.
+ * Normally run under IPFW_UH_RLOCK, but these are idempotent ops
+ * so we only care that rules do not disappear.
+ */
+static void
+clear_counters(struct ip_fw *rule, int log_only)
+{
+ ipfw_insn_log *l = (ipfw_insn_log *)ACTION_PTR(rule);
+
+ if (log_only == 0) {
+ rule->bcnt = rule->pcnt = 0;
+ rule->timestamp = 0;
+ }
+ if (l->o.opcode == O_LOG)
+ l->log_left = l->max_log;
+}
+
+/**
+ * Reset some or all counters on firewall rules.
+ * The argument `arg' is an u_int32_t. The low 16 bit are the rule number,
+ * the next 8 bits are the set number, the top 8 bits are the command:
+ * 0 work with rules from all set's;
+ * 1 work with rules only from specified set.
+ * Specified rule number is zero if we want to clear all entries.
+ * log_only is 1 if we only want to reset logs, zero otherwise.
+ */
+static int
+zero_entry(struct ip_fw_chain *chain, u_int32_t arg, int log_only)
+{
+ struct ip_fw *rule;
+ char *msg;
+ int i;
+
+ uint16_t rulenum = arg & 0xffff;
+ uint8_t set = (arg >> 16) & 0xff;
+ uint8_t cmd = (arg >> 24) & 0xff;
+
+ if (cmd > 1)
+ return (EINVAL);
+ if (cmd == 1 && set > RESVD_SET)
+ return (EINVAL);
+
+ IPFW_UH_RLOCK(chain);
+ if (rulenum == 0) {
+ V_norule_counter = 0;
+ for (i = 0; i < chain->n_rules; i++) {
+ rule = chain->map[i];
+ /* Skip rules not in our set. */
+ if (cmd == 1 && rule->set != set)
+ continue;
+ clear_counters(rule, log_only);
+ }
+ msg = log_only ? "All logging counts reset" :
+ "Accounting cleared";
+ } else {
+ int cleared = 0;
+ for (i = 0; i < chain->n_rules; i++) {
+ rule = chain->map[i];
+ if (rule->rulenum == rulenum) {
+ if (cmd == 0 || rule->set == set)
+ clear_counters(rule, log_only);
+ cleared = 1;
+ }
+ if (rule->rulenum > rulenum)
+ break;
+ }
+ if (!cleared) { /* we did not find any matching rules */
+ IPFW_UH_RUNLOCK(chain);
+ return (EINVAL);
+ }
+ msg = log_only ? "logging count reset" : "cleared";
+ }
+ IPFW_UH_RUNLOCK(chain);
+
+ if (V_fw_verbose) {
+ int lev = LOG_SECURITY | LOG_NOTICE;
+
+ if (rulenum)
+ log(lev, "ipfw: Entry %d %s.\n", rulenum, msg);
+ else
+ log(lev, "ipfw: %s.\n", msg);
+ }
+ return (0);
+}
+
+/*
+ * Check validity of the structure before insert.
+ * Rules are simple, so this mostly need to check rule sizes.
+ */
+static int
+check_ipfw_struct(struct ip_fw *rule, int size)
+{
+ int l, cmdlen = 0;
+ int have_action=0;
+ ipfw_insn *cmd;
+
+ if (size < sizeof(*rule)) {
+ printf("ipfw: rule too short\n");
+ return (EINVAL);
+ }
+ /* first, check for valid size */
+ l = RULESIZE(rule);
+ if (l != size) {
+ printf("ipfw: size mismatch (have %d want %d)\n", size, l);
+ return (EINVAL);
+ }
+ if (rule->act_ofs >= rule->cmd_len) {
+ printf("ipfw: bogus action offset (%u > %u)\n",
+ rule->act_ofs, rule->cmd_len - 1);
+ return (EINVAL);
+ }
+ /*
+ * Now go for the individual checks. Very simple ones, basically only
+ * instruction sizes.
+ */
+ for (l = rule->cmd_len, cmd = rule->cmd ;
+ l > 0 ; l -= cmdlen, cmd += cmdlen) {
+ cmdlen = F_LEN(cmd);
+ if (cmdlen > l) {
+ printf("ipfw: opcode %d size truncated\n",
+ cmd->opcode);
+ return EINVAL;
+ }
+ switch (cmd->opcode) {
+ case O_PROBE_STATE:
+ case O_KEEP_STATE:
+ case O_PROTO:
+ case O_IP_SRC_ME:
+ case O_IP_DST_ME:
+ case O_LAYER2:
+ case O_IN:
+ case O_FRAG:
+ case O_DIVERTED:
+ case O_IPOPT:
+ case O_IPTOS:
+ case O_IPPRECEDENCE:
+ case O_IPVER:
+ case O_TCPWIN:
+ case O_TCPFLAGS:
+ case O_TCPOPTS:
+ case O_ESTAB:
+ case O_VERREVPATH:
+ case O_VERSRCREACH:
+ case O_ANTISPOOF:
+ case O_IPSEC:
+#ifdef INET6
+ case O_IP6_SRC_ME:
+ case O_IP6_DST_ME:
+ case O_EXT_HDR:
+ case O_IP6:
+#endif
+ case O_IP4:
+ case O_TAG:
+ if (cmdlen != F_INSN_SIZE(ipfw_insn))
+ goto bad_size;
+ break;
+
+ case O_FIB:
+ if (cmdlen != F_INSN_SIZE(ipfw_insn))
+ goto bad_size;
+ if (cmd->arg1 >= rt_numfibs) {
+ printf("ipfw: invalid fib number %d\n",
+ cmd->arg1);
+ return EINVAL;
+ }
+ break;
+
+ case O_SETFIB:
+ if (cmdlen != F_INSN_SIZE(ipfw_insn))
+ goto bad_size;
+ if (cmd->arg1 >= rt_numfibs) {
+ printf("ipfw: invalid fib number %d\n",
+ cmd->arg1);
+ return EINVAL;
+ }
+ goto check_action;
+
+ case O_UID:
+ case O_GID:
+ case O_JAIL:
+ case O_IP_SRC:
+ case O_IP_DST:
+ case O_TCPSEQ:
+ case O_TCPACK:
+ case O_PROB:
+ case O_ICMPTYPE:
+ if (cmdlen != F_INSN_SIZE(ipfw_insn_u32))
+ goto bad_size;
+ break;
+
+ case O_LIMIT:
+ if (cmdlen != F_INSN_SIZE(ipfw_insn_limit))
+ goto bad_size;
+ break;
+
+ case O_LOG:
+ if (cmdlen != F_INSN_SIZE(ipfw_insn_log))
+ goto bad_size;
+
+ ((ipfw_insn_log *)cmd)->log_left =
+ ((ipfw_insn_log *)cmd)->max_log;
+
+ break;
+
+ case O_IP_SRC_MASK:
+ case O_IP_DST_MASK:
+ /* only odd command lengths */
+ if ( !(cmdlen & 1) || cmdlen > 31)
+ goto bad_size;
+ break;
+
+ case O_IP_SRC_SET:
+ case O_IP_DST_SET:
+ if (cmd->arg1 == 0 || cmd->arg1 > 256) {
+ printf("ipfw: invalid set size %d\n",
+ cmd->arg1);
+ return EINVAL;
+ }
+ if (cmdlen != F_INSN_SIZE(ipfw_insn_u32) +
+ (cmd->arg1+31)/32 )
+ goto bad_size;
+ break;
+
+ case O_IP_SRC_LOOKUP:
+ case O_IP_DST_LOOKUP:
+ if (cmd->arg1 >= IPFW_TABLES_MAX) {
+ printf("ipfw: invalid table number %d\n",
+ cmd->arg1);
+ return (EINVAL);
+ }
+ if (cmdlen != F_INSN_SIZE(ipfw_insn) &&
+ cmdlen != F_INSN_SIZE(ipfw_insn_u32) + 1 &&
+ cmdlen != F_INSN_SIZE(ipfw_insn_u32))
+ goto bad_size;
+ break;
+
+ case O_MACADDR2:
+ if (cmdlen != F_INSN_SIZE(ipfw_insn_mac))
+ goto bad_size;
+ break;
+
+ case O_NOP:
+ case O_IPID:
+ case O_IPTTL:
+ case O_IPLEN:
+ case O_TCPDATALEN:
+ case O_TAGGED:
+ if (cmdlen < 1 || cmdlen > 31)
+ goto bad_size;
+ break;
+
+ case O_MAC_TYPE:
+ case O_IP_SRCPORT:
+ case O_IP_DSTPORT: /* XXX artificial limit, 30 port pairs */
+ if (cmdlen < 2 || cmdlen > 31)
+ goto bad_size;
+ break;
+
+ case O_RECV:
+ case O_XMIT:
+ case O_VIA:
+ if (cmdlen != F_INSN_SIZE(ipfw_insn_if))
+ goto bad_size;
+ break;
+
+ case O_ALTQ:
+ if (cmdlen != F_INSN_SIZE(ipfw_insn_altq))
+ goto bad_size;
+ break;
+
+ case O_PIPE:
+ case O_QUEUE:
+ if (cmdlen != F_INSN_SIZE(ipfw_insn))
+ goto bad_size;
+ goto check_action;
+
+ case O_FORWARD_IP:
+#ifdef IPFIREWALL_FORWARD
+ if (cmdlen != F_INSN_SIZE(ipfw_insn_sa))
+ goto bad_size;
+ goto check_action;
+#else
+ return EINVAL;
+#endif
+
+ case O_DIVERT:
+ case O_TEE:
+ if (ip_divert_ptr == NULL)
+ return EINVAL;
+ else
+ goto check_size;
+ case O_NETGRAPH:
+ case O_NGTEE:
+ if (ng_ipfw_input_p == NULL)
+ return EINVAL;
+ else
+ goto check_size;
+ case O_NAT:
+ if (!IPFW_NAT_LOADED)
+ return EINVAL;
+ if (cmdlen != F_INSN_SIZE(ipfw_insn_nat))
+ goto bad_size;
+ goto check_action;
+ case O_FORWARD_MAC: /* XXX not implemented yet */
+ case O_CHECK_STATE:
+ case O_COUNT:
+ case O_ACCEPT:
+ case O_DENY:
+ case O_REJECT:
+#ifdef INET6
+ case O_UNREACH6:
+#endif
+ case O_SKIPTO:
+ case O_REASS:
+check_size:
+ if (cmdlen != F_INSN_SIZE(ipfw_insn))
+ goto bad_size;
+check_action:
+ if (have_action) {
+ printf("ipfw: opcode %d, multiple actions"
+ " not allowed\n",
+ cmd->opcode);
+ return EINVAL;
+ }
+ have_action = 1;
+ if (l != cmdlen) {
+ printf("ipfw: opcode %d, action must be"
+ " last opcode\n",
+ cmd->opcode);
+ return EINVAL;
+ }
+ break;
+#ifdef INET6
+ case O_IP6_SRC:
+ case O_IP6_DST:
+ if (cmdlen != F_INSN_SIZE(struct in6_addr) +
+ F_INSN_SIZE(ipfw_insn))
+ goto bad_size;
+ break;
+
+ case O_FLOW6ID:
+ if (cmdlen != F_INSN_SIZE(ipfw_insn_u32) +
+ ((ipfw_insn_u32 *)cmd)->o.arg1)
+ goto bad_size;
+ break;
+
+ case O_IP6_SRC_MASK:
+ case O_IP6_DST_MASK:
+ if ( !(cmdlen & 1) || cmdlen > 127)
+ goto bad_size;
+ break;
+ case O_ICMP6TYPE:
+ if( cmdlen != F_INSN_SIZE( ipfw_insn_icmp6 ) )
+ goto bad_size;
+ break;
+#endif
+
+ default:
+ switch (cmd->opcode) {
+#ifndef INET6
+ case O_IP6_SRC_ME:
+ case O_IP6_DST_ME:
+ case O_EXT_HDR:
+ case O_IP6:
+ case O_UNREACH6:
+ case O_IP6_SRC:
+ case O_IP6_DST:
+ case O_FLOW6ID:
+ case O_IP6_SRC_MASK:
+ case O_IP6_DST_MASK:
+ case O_ICMP6TYPE:
+ printf("ipfw: no IPv6 support in kernel\n");
+ return EPROTONOSUPPORT;
+#endif
+ default:
+ printf("ipfw: opcode %d, unknown opcode\n",
+ cmd->opcode);
+ return EINVAL;
+ }
+ }
+ }
+ if (have_action == 0) {
+ printf("ipfw: missing action\n");
+ return EINVAL;
+ }
+ return 0;
+
+bad_size:
+ printf("ipfw: opcode %d size %d wrong\n",
+ cmd->opcode, cmdlen);
+ return EINVAL;
+}
+
+
+/*
+ * Translation of requests for compatibility with FreeBSD 7.2/8.
+ * a static variable tells us if we have an old client from userland,
+ * and if necessary we translate requests and responses between the
+ * two formats.
+ */
+static int is7 = 0;
+
+struct ip_fw7 {
+ struct ip_fw7 *next; /* linked list of rules */
+ struct ip_fw7 *next_rule; /* ptr to next [skipto] rule */
+ /* 'next_rule' is used to pass up 'set_disable' status */
+
+ uint16_t act_ofs; /* offset of action in 32-bit units */
+ uint16_t cmd_len; /* # of 32-bit words in cmd */
+ uint16_t rulenum; /* rule number */
+ uint8_t set; /* rule set (0..31) */
+ // #define RESVD_SET 31 /* set for default and persistent rules */
+ uint8_t _pad; /* padding */
+ // uint32_t id; /* rule id, only in v.8 */
+ /* These fields are present in all rules. */
+ uint64_t pcnt; /* Packet counter */
+ uint64_t bcnt; /* Byte counter */
+ uint32_t timestamp; /* tv_sec of last match */
+
+ ipfw_insn cmd[1]; /* storage for commands */
+};
+
+ int convert_rule_to_7(struct ip_fw *rule);
+int convert_rule_to_8(struct ip_fw *rule);
+
+#ifndef RULESIZE7
+#define RULESIZE7(rule) (sizeof(struct ip_fw7) + \
+ ((struct ip_fw7 *)(rule))->cmd_len * 4 - 4)
+#endif
+
+
+/*
+ * Copy the static and dynamic rules to the supplied buffer
+ * and return the amount of space actually used.
+ * Must be run under IPFW_UH_RLOCK
+ */
+static size_t
+ipfw_getrules(struct ip_fw_chain *chain, void *buf, size_t space)
+{
+ char *bp = buf;
+ char *ep = bp + space;
+ struct ip_fw *rule, *dst;
+ int l, i;
+ time_t boot_seconds;
+
+ boot_seconds = boottime.tv_sec;
+ for (i = 0; i < chain->n_rules; i++) {
+ rule = chain->map[i];
+
+ if (is7) {
+ /* Convert rule to FreeBSd 7.2 format */
+ l = RULESIZE7(rule);
+ if (bp + l + sizeof(uint32_t) <= ep) {
+ int error;
+ bcopy(rule, bp, l + sizeof(uint32_t));
+ error = convert_rule_to_7((struct ip_fw *) bp);
+ if (error)
+ return 0; /*XXX correct? */
+ /*
+ * XXX HACK. Store the disable mask in the "next"
+ * pointer in a wild attempt to keep the ABI the same.
+ * Why do we do this on EVERY rule?
+ */
+ bcopy(&V_set_disable,
+ &(((struct ip_fw7 *)bp)->next_rule),
+ sizeof(V_set_disable));
+ if (((struct ip_fw7 *)bp)->timestamp)
+ ((struct ip_fw7 *)bp)->timestamp += boot_seconds;
+ bp += l;
+ }
+ continue; /* go to next rule */
+ }
+
+ /* normal mode, don't touch rules */
+ l = RULESIZE(rule);
+ if (bp + l > ep) { /* should not happen */
+ printf("overflow dumping static rules\n");
+ break;
+ }
+ dst = (struct ip_fw *)bp;
+ bcopy(rule, dst, l);
+ /*
+ * XXX HACK. Store the disable mask in the "next"
+ * pointer in a wild attempt to keep the ABI the same.
+ * Why do we do this on EVERY rule?
+ */
+ bcopy(&V_set_disable, &dst->next_rule, sizeof(V_set_disable));
+ if (dst->timestamp)
+ dst->timestamp += boot_seconds;
+ bp += l;
+ }
+ ipfw_get_dynamic(&bp, ep); /* protected by the dynamic lock */
+ return (bp - (char *)buf);
+}
+
+
+/**
+ * {set|get}sockopt parser.
+ */
+int
+ipfw_ctl(struct sockopt *sopt)
+{
+#define RULE_MAXSIZE (256*sizeof(u_int32_t))
+ int error;
+ size_t size;
+ struct ip_fw *buf, *rule;
+ struct ip_fw_chain *chain;
+ u_int32_t rulenum[2];
+
+ error = priv_check(sopt->sopt_td, PRIV_NETINET_IPFW);
+ if (error)
+ return (error);
+
+ /*
+ * Disallow modifications in really-really secure mode, but still allow
+ * the logging counters to be reset.
+ */
+ if (sopt->sopt_name == IP_FW_ADD ||
+ (sopt->sopt_dir == SOPT_SET && sopt->sopt_name != IP_FW_RESETLOG)) {
+ error = securelevel_ge(sopt->sopt_td->td_ucred, 3);
+ if (error)
+ return (error);
+ }
+
+ chain = &V_layer3_chain;
+ error = 0;
+
+ switch (sopt->sopt_name) {
+ case IP_FW_GET:
+ /*
+ * pass up a copy of the current rules. Static rules
+ * come first (the last of which has number IPFW_DEFAULT_RULE),
+ * followed by a possibly empty list of dynamic rule.
+ * The last dynamic rule has NULL in the "next" field.
+ *
+ * Note that the calculated size is used to bound the
+ * amount of data returned to the user. The rule set may
+ * change between calculating the size and returning the
+ * data in which case we'll just return what fits.
+ */
+ for (;;) {
+ int len = 0, want;
+
+ size = chain->static_len;
+ size += ipfw_dyn_len();
+ if (size >= sopt->sopt_valsize)
+ break;
+ buf = malloc(size, M_TEMP, M_WAITOK);
+ if (buf == NULL)
+ break;
+ IPFW_UH_RLOCK(chain);
+ /* check again how much space we need */
+ want = chain->static_len + ipfw_dyn_len();
+ if (size >= want)
+ len = ipfw_getrules(chain, buf, size);
+ IPFW_UH_RUNLOCK(chain);
+ if (size >= want)
+ error = sooptcopyout(sopt, buf, len);
+ free(buf, M_TEMP);
+ if (size >= want)
+ break;
+ }
+ break;
+
+ case IP_FW_FLUSH:
+ /* locking is done within del_entry() */
+ error = del_entry(chain, 0); /* special case, rule=0, cmd=0 means all */
+ break;
+
+ case IP_FW_ADD:
+ rule = malloc(RULE_MAXSIZE, M_TEMP, M_WAITOK);
+ error = sooptcopyin(sopt, rule, RULE_MAXSIZE,
+ sizeof(struct ip_fw7) );
+
+ /*
+ * If the size of commands equals RULESIZE7 then we assume
+ * a FreeBSD7.2 binary is talking to us (set is7=1).
+ * is7 is persistent so the next 'ipfw list' command
+ * will use this format.
+ * NOTE: If wrong version is guessed (this can happen if
+ * the first ipfw command is 'ipfw [pipe] list')
+ * the ipfw binary may crash or loop infinitly...
+ */
+ if (sopt->sopt_valsize == RULESIZE7(rule)) {
+ is7 = 1;
+ error = convert_rule_to_8(rule);
+ if (error)
+ return error;
+ if (error == 0)
+ error = check_ipfw_struct(rule, RULESIZE(rule));
+ } else {
+ is7 = 0;
+ if (error == 0)
+ error = check_ipfw_struct(rule, sopt->sopt_valsize);
+ }
+ if (error == 0) {
+ /* locking is done within ipfw_add_rule() */
+ error = ipfw_add_rule(chain, rule);
+ size = RULESIZE(rule);
+ if (!error && sopt->sopt_dir == SOPT_GET) {
+ if (is7) {
+ error = convert_rule_to_7(rule);
+ size = RULESIZE7(rule);
+ if (error)
+ return error;
+ }
+ error = sooptcopyout(sopt, rule, size);
+ }
+ }
+ free(rule, M_TEMP);
+ break;
+
+ case IP_FW_DEL:
+ /*
+ * IP_FW_DEL is used for deleting single rules or sets,
+ * and (ab)used to atomically manipulate sets. Argument size
+ * is used to distinguish between the two:
+ * sizeof(u_int32_t)
+ * delete single rule or set of rules,
+ * or reassign rules (or sets) to a different set.
+ * 2*sizeof(u_int32_t)
+ * atomic disable/enable sets.
+ * first u_int32_t contains sets to be disabled,
+ * second u_int32_t contains sets to be enabled.
+ */
+ error = sooptcopyin(sopt, rulenum,
+ 2*sizeof(u_int32_t), sizeof(u_int32_t));
+ if (error)
+ break;
+ size = sopt->sopt_valsize;
+ if (size == sizeof(u_int32_t) && rulenum[0] != 0) {
+ /* delete or reassign, locking done in del_entry() */
+ error = del_entry(chain, rulenum[0]);
+ } else if (size == 2*sizeof(u_int32_t)) { /* set enable/disable */
+ IPFW_UH_WLOCK(chain);
+ V_set_disable =
+ (V_set_disable | rulenum[0]) & ~rulenum[1] &
+ ~(1<<RESVD_SET); /* set RESVD_SET always enabled */
+ IPFW_UH_WUNLOCK(chain);
+ } else
+ error = EINVAL;
+ break;
+
+ case IP_FW_ZERO:
+ case IP_FW_RESETLOG: /* argument is an u_int_32, the rule number */
+ rulenum[0] = 0;
+ if (sopt->sopt_val != 0) {
+ error = sooptcopyin(sopt, rulenum,
+ sizeof(u_int32_t), sizeof(u_int32_t));
+ if (error)
+ break;
+ }
+ error = zero_entry(chain, rulenum[0],
+ sopt->sopt_name == IP_FW_RESETLOG);
+ break;
+
+ /*--- TABLE manipulations are protected by the IPFW_LOCK ---*/
+ case IP_FW_TABLE_ADD:
+ {
+ ipfw_table_entry ent;
+
+ error = sooptcopyin(sopt, &ent,
+ sizeof(ent), sizeof(ent));
+ if (error)
+ break;
+ error = ipfw_add_table_entry(chain, ent.tbl,
+ ent.addr, ent.masklen, ent.value);
+ }
+ break;
+
+ case IP_FW_TABLE_DEL:
+ {
+ ipfw_table_entry ent;
+
+ error = sooptcopyin(sopt, &ent,
+ sizeof(ent), sizeof(ent));
+ if (error)
+ break;
+ error = ipfw_del_table_entry(chain, ent.tbl,
+ ent.addr, ent.masklen);
+ }
+ break;
+
+ case IP_FW_TABLE_FLUSH:
+ {
+ u_int16_t tbl;
+
+ error = sooptcopyin(sopt, &tbl,
+ sizeof(tbl), sizeof(tbl));
+ if (error)
+ break;
+ IPFW_WLOCK(chain);
+ error = ipfw_flush_table(chain, tbl);
+ IPFW_WUNLOCK(chain);
+ }
+ break;
+
+ case IP_FW_TABLE_GETSIZE:
+ {
+ u_int32_t tbl, cnt;
+
+ if ((error = sooptcopyin(sopt, &tbl, sizeof(tbl),
+ sizeof(tbl))))
+ break;
+ IPFW_RLOCK(chain);
+ error = ipfw_count_table(chain, tbl, &cnt);
+ IPFW_RUNLOCK(chain);
+ if (error)
+ break;
+ error = sooptcopyout(sopt, &cnt, sizeof(cnt));
+ }
+ break;
+
+ case IP_FW_TABLE_LIST:
+ {
+ ipfw_table *tbl;
+
+ if (sopt->sopt_valsize < sizeof(*tbl)) {
+ error = EINVAL;
+ break;
+ }
+ size = sopt->sopt_valsize;
+ tbl = malloc(size, M_TEMP, M_WAITOK);
+ error = sooptcopyin(sopt, tbl, size, sizeof(*tbl));
+ if (error) {
+ free(tbl, M_TEMP);
+ break;
+ }
+ tbl->size = (size - sizeof(*tbl)) /
+ sizeof(ipfw_table_entry);
+ IPFW_RLOCK(chain);
+ error = ipfw_dump_table(chain, tbl);
+ IPFW_RUNLOCK(chain);
+ if (error) {
+ free(tbl, M_TEMP);
+ break;
+ }
+ error = sooptcopyout(sopt, tbl, size);
+ free(tbl, M_TEMP);
+ }
+ break;
+
+ /*--- NAT operations are protected by the IPFW_LOCK ---*/
+ case IP_FW_NAT_CFG:
+ if (IPFW_NAT_LOADED)
+ error = ipfw_nat_cfg_ptr(sopt);
+ else {
+ printf("IP_FW_NAT_CFG: %s\n",
+ "ipfw_nat not present, please load it");
+ error = EINVAL;
+ }
+ break;
+
+ case IP_FW_NAT_DEL:
+ if (IPFW_NAT_LOADED)
+ error = ipfw_nat_del_ptr(sopt);
+ else {
+ printf("IP_FW_NAT_DEL: %s\n",
+ "ipfw_nat not present, please load it");
+ error = EINVAL;
+ }
+ break;
+
+ case IP_FW_NAT_GET_CONFIG:
+ if (IPFW_NAT_LOADED)
+ error = ipfw_nat_get_cfg_ptr(sopt);
+ else {
+ printf("IP_FW_NAT_GET_CFG: %s\n",
+ "ipfw_nat not present, please load it");
+ error = EINVAL;
+ }
+ break;
+
+ case IP_FW_NAT_GET_LOG:
+ if (IPFW_NAT_LOADED)
+ error = ipfw_nat_get_log_ptr(sopt);
+ else {
+ printf("IP_FW_NAT_GET_LOG: %s\n",
+ "ipfw_nat not present, please load it");
+ error = EINVAL;
+ }
+ break;
+
+ default:
+ printf("ipfw: ipfw_ctl invalid option %d\n", sopt->sopt_name);
+ error = EINVAL;
+ }
+
+ return (error);
+#undef RULE_MAXSIZE
+}
+
+
+#define RULE_MAXSIZE (256*sizeof(u_int32_t))
+
+/* Functions to convert rules 7.2 <==> 8.0 */
+int
+convert_rule_to_7(struct ip_fw *rule)
+{
+ /* Used to modify original rule */
+ struct ip_fw7 *rule7 = (struct ip_fw7 *)rule;
+ /* copy of original rule, version 8 */
+ struct ip_fw *tmp;
+
+ /* Used to copy commands */
+ ipfw_insn *ccmd, *dst;
+ int ll = 0, ccmdlen = 0;
+
+ tmp = malloc(RULE_MAXSIZE, M_TEMP, M_NOWAIT | M_ZERO);
+ if (tmp == NULL) {
+ return 1; //XXX error
+ }
+ bcopy(rule, tmp, RULE_MAXSIZE);
+
+ /* Copy fields */
+ rule7->_pad = tmp->_pad;
+ rule7->set = tmp->set;
+ rule7->rulenum = tmp->rulenum;
+ rule7->cmd_len = tmp->cmd_len;
+ rule7->act_ofs = tmp->act_ofs;
+ rule7->next_rule = (struct ip_fw7 *)tmp->next_rule;
+ rule7->next = (struct ip_fw7 *)tmp->x_next;
+ rule7->cmd_len = tmp->cmd_len;
+ rule7->pcnt = tmp->pcnt;
+ rule7->bcnt = tmp->bcnt;
+ rule7->timestamp = tmp->timestamp;
+
+ /* Copy commands */
+ for (ll = tmp->cmd_len, ccmd = tmp->cmd, dst = rule7->cmd ;
+ ll > 0 ; ll -= ccmdlen, ccmd += ccmdlen, dst += ccmdlen) {
+ ccmdlen = F_LEN(ccmd);
+
+ bcopy(ccmd, dst, F_LEN(ccmd)*sizeof(uint32_t));
+
+ if (dst->opcode > O_NAT)
+ /* O_REASS doesn't exists in 7.2 version, so
+ * decrement opcode if it is after O_REASS
+ */
+ dst->opcode--;
+
+ if (ccmdlen > ll) {
+ printf("ipfw: opcode %d size truncated\n",
+ ccmd->opcode);
+ return EINVAL;
+ }
+ }
+ free(tmp, M_TEMP);
+
+ return 0;
+}
+
+int
+convert_rule_to_8(struct ip_fw *rule)
+{
+ /* Used to modify original rule */
+ struct ip_fw7 *rule7 = (struct ip_fw7 *) rule;
+
+ /* Used to copy commands */
+ ipfw_insn *ccmd, *dst;
+ int ll = 0, ccmdlen = 0;
+
+ /* Copy of original rule */
+ struct ip_fw7 *tmp = malloc(RULE_MAXSIZE, M_TEMP, M_NOWAIT | M_ZERO);
+ if (tmp == NULL) {
+ return 1; //XXX error
+ }
+
+ bcopy(rule7, tmp, RULE_MAXSIZE);
+
+ for (ll = tmp->cmd_len, ccmd = tmp->cmd, dst = rule->cmd ;
+ ll > 0 ; ll -= ccmdlen, ccmd += ccmdlen, dst += ccmdlen) {
+ ccmdlen = F_LEN(ccmd);
+
+ bcopy(ccmd, dst, F_LEN(ccmd)*sizeof(uint32_t));
+
+ if (dst->opcode > O_NAT)
+ /* O_REASS doesn't exists in 7.2 version, so
+ * increment opcode if it is after O_REASS
+ */
+ dst->opcode++;
+
+ if (ccmdlen > ll) {
+ printf("ipfw: opcode %d size truncated\n",
+ ccmd->opcode);
+ return EINVAL;
+ }
+ }
+
+ rule->_pad = tmp->_pad;
+ rule->set = tmp->set;
+ rule->rulenum = tmp->rulenum;
+ rule->cmd_len = tmp->cmd_len;
+ rule->act_ofs = tmp->act_ofs;
+ rule->next_rule = (struct ip_fw *)tmp->next_rule;
+ rule->x_next = (struct ip_fw *)tmp->next;
+ rule->cmd_len = tmp->cmd_len;
+ rule->id = 0; /* XXX see if is ok = 0 */
+ rule->pcnt = tmp->pcnt;
+ rule->bcnt = tmp->bcnt;
+ rule->timestamp = tmp->timestamp;
+
+ free (tmp, M_TEMP);
+ return 0;
+}
+
+/* end of file */
diff --git a/rtems/freebsd/netinet/ipfw/ip_fw_table.c b/rtems/freebsd/netinet/ipfw/ip_fw_table.c
new file mode 100644
index 00000000..e0c81668
--- /dev/null
+++ b/rtems/freebsd/netinet/ipfw/ip_fw_table.c
@@ -0,0 +1,288 @@
+#include <rtems/freebsd/machine/rtems-bsd-config.h>
+
+/*-
+ * Copyright (c) 2004 Ruslan Ermilov and Vsevolod Lobko.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <rtems/freebsd/sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+/*
+ * Lookup table support for ipfw
+ *
+ * Lookup tables are implemented (at the moment) using the radix
+ * tree used for routing tables. Tables store key-value entries, where
+ * keys are network prefixes (addr/masklen), and values are integers.
+ * As a degenerate case we can interpret keys as 32-bit integers
+ * (with a /32 mask).
+ *
+ * The table is protected by the IPFW lock even for manipulation coming
+ * from userland, because operations are typically fast.
+ */
+
+#if !defined(KLD_MODULE)
+#include <rtems/freebsd/local/opt_ipfw.h>
+#include <rtems/freebsd/local/opt_ipdivert.h>
+#include <rtems/freebsd/local/opt_ipdn.h>
+#include <rtems/freebsd/local/opt_inet.h>
+#ifndef INET
+#error IPFIREWALL requires INET.
+#endif /* INET */
+#endif
+#include <rtems/freebsd/local/opt_inet6.h>
+#include <rtems/freebsd/local/opt_ipsec.h>
+
+#include <rtems/freebsd/sys/param.h>
+#include <rtems/freebsd/sys/systm.h>
+#include <rtems/freebsd/sys/malloc.h>
+#include <rtems/freebsd/sys/kernel.h>
+#include <rtems/freebsd/sys/lock.h>
+#include <rtems/freebsd/sys/rwlock.h>
+#include <rtems/freebsd/sys/socket.h>
+#include <rtems/freebsd/net/if.h> /* ip_fw.h requires IFNAMSIZ */
+#include <rtems/freebsd/net/radix.h>
+#include <rtems/freebsd/net/route.h>
+#include <rtems/freebsd/net/vnet.h>
+
+#include <rtems/freebsd/netinet/in.h>
+#include <rtems/freebsd/netinet/ip_var.h> /* struct ipfw_rule_ref */
+#include <rtems/freebsd/netinet/ip_fw.h>
+#include <rtems/freebsd/sys/queue.h> /* LIST_HEAD */
+#include <rtems/freebsd/netinet/ipfw/ip_fw_private.h>
+
+#ifdef MAC
+#include <rtems/freebsd/security/mac/mac_framework.h>
+#endif
+
+MALLOC_DEFINE(M_IPFW_TBL, "ipfw_tbl", "IpFw tables");
+
+struct table_entry {
+ struct radix_node rn[2];
+ struct sockaddr_in addr, mask;
+ u_int32_t value;
+};
+
+/*
+ * The radix code expects addr and mask to be array of bytes,
+ * with the first byte being the length of the array. rn_inithead
+ * is called with the offset in bits of the lookup key within the
+ * array. If we use a sockaddr_in as the underlying type,
+ * sin_len is conveniently located at offset 0, sin_addr is at
+ * offset 4 and normally aligned.
+ * But for portability, let's avoid assumption and make the code explicit
+ */
+#define KEY_LEN(v) *((uint8_t *)&(v))
+#define KEY_OFS (8*offsetof(struct sockaddr_in, sin_addr))
+
+int
+ipfw_add_table_entry(struct ip_fw_chain *ch, uint16_t tbl, in_addr_t addr,
+ uint8_t mlen, uint32_t value)
+{
+ struct radix_node_head *rnh;
+ struct table_entry *ent;
+ struct radix_node *rn;
+
+ if (tbl >= IPFW_TABLES_MAX)
+ return (EINVAL);
+ rnh = ch->tables[tbl];
+ ent = malloc(sizeof(*ent), M_IPFW_TBL, M_NOWAIT | M_ZERO);
+ if (ent == NULL)
+ return (ENOMEM);
+ ent->value = value;
+ KEY_LEN(ent->addr) = KEY_LEN(ent->mask) = 8;
+ ent->mask.sin_addr.s_addr = htonl(mlen ? ~((1 << (32 - mlen)) - 1) : 0);
+ ent->addr.sin_addr.s_addr = addr & ent->mask.sin_addr.s_addr;
+ IPFW_WLOCK(ch);
+ rn = rnh->rnh_addaddr(&ent->addr, &ent->mask, rnh, (void *)ent);
+ if (rn == NULL) {
+ IPFW_WUNLOCK(ch);
+ free(ent, M_IPFW_TBL);
+ return (EEXIST);
+ }
+ IPFW_WUNLOCK(ch);
+ return (0);
+}
+
+int
+ipfw_del_table_entry(struct ip_fw_chain *ch, uint16_t tbl, in_addr_t addr,
+ uint8_t mlen)
+{
+ struct radix_node_head *rnh;
+ struct table_entry *ent;
+ struct sockaddr_in sa, mask;
+
+ if (tbl >= IPFW_TABLES_MAX)
+ return (EINVAL);
+ rnh = ch->tables[tbl];
+ KEY_LEN(sa) = KEY_LEN(mask) = 8;
+ mask.sin_addr.s_addr = htonl(mlen ? ~((1 << (32 - mlen)) - 1) : 0);
+ sa.sin_addr.s_addr = addr & mask.sin_addr.s_addr;
+ IPFW_WLOCK(ch);
+ ent = (struct table_entry *)rnh->rnh_deladdr(&sa, &mask, rnh);
+ if (ent == NULL) {
+ IPFW_WUNLOCK(ch);
+ return (ESRCH);
+ }
+ IPFW_WUNLOCK(ch);
+ free(ent, M_IPFW_TBL);
+ return (0);
+}
+
+static int
+flush_table_entry(struct radix_node *rn, void *arg)
+{
+ struct radix_node_head * const rnh = arg;
+ struct table_entry *ent;
+
+ ent = (struct table_entry *)
+ rnh->rnh_deladdr(rn->rn_key, rn->rn_mask, rnh);
+ if (ent != NULL)
+ free(ent, M_IPFW_TBL);
+ return (0);
+}
+
+int
+ipfw_flush_table(struct ip_fw_chain *ch, uint16_t tbl)
+{
+ struct radix_node_head *rnh;
+
+ IPFW_WLOCK_ASSERT(ch);
+
+ if (tbl >= IPFW_TABLES_MAX)
+ return (EINVAL);
+ rnh = ch->tables[tbl];
+ KASSERT(rnh != NULL, ("NULL IPFW table"));
+ rnh->rnh_walktree(rnh, flush_table_entry, rnh);
+ return (0);
+}
+
+void
+ipfw_destroy_tables(struct ip_fw_chain *ch)
+{
+ uint16_t tbl;
+ struct radix_node_head *rnh;
+
+ IPFW_WLOCK_ASSERT(ch);
+
+ for (tbl = 0; tbl < IPFW_TABLES_MAX; tbl++) {
+ ipfw_flush_table(ch, tbl);
+ rnh = ch->tables[tbl];
+ rn_detachhead((void **)&rnh);
+ }
+}
+
+int
+ipfw_init_tables(struct ip_fw_chain *ch)
+{
+ int i;
+ uint16_t j;
+
+ for (i = 0; i < IPFW_TABLES_MAX; i++) {
+ if (!rn_inithead((void **)&ch->tables[i], KEY_OFS)) {
+ for (j = 0; j < i; j++) {
+ (void) ipfw_flush_table(ch, j);
+ }
+ return (ENOMEM);
+ }
+ }
+ return (0);
+}
+
+int
+ipfw_lookup_table(struct ip_fw_chain *ch, uint16_t tbl, in_addr_t addr,
+ uint32_t *val)
+{
+ struct radix_node_head *rnh;
+ struct table_entry *ent;
+ struct sockaddr_in sa;
+
+ if (tbl >= IPFW_TABLES_MAX)
+ return (0);
+ rnh = ch->tables[tbl];
+ KEY_LEN(sa) = 8;
+ sa.sin_addr.s_addr = addr;
+ ent = (struct table_entry *)(rnh->rnh_lookup(&sa, NULL, rnh));
+ if (ent != NULL) {
+ *val = ent->value;
+ return (1);
+ }
+ return (0);
+}
+
+static int
+count_table_entry(struct radix_node *rn, void *arg)
+{
+ u_int32_t * const cnt = arg;
+
+ (*cnt)++;
+ return (0);
+}
+
+int
+ipfw_count_table(struct ip_fw_chain *ch, uint32_t tbl, uint32_t *cnt)
+{
+ struct radix_node_head *rnh;
+
+ if (tbl >= IPFW_TABLES_MAX)
+ return (EINVAL);
+ rnh = ch->tables[tbl];
+ *cnt = 0;
+ rnh->rnh_walktree(rnh, count_table_entry, cnt);
+ return (0);
+}
+
+static int
+dump_table_entry(struct radix_node *rn, void *arg)
+{
+ struct table_entry * const n = (struct table_entry *)rn;
+ ipfw_table * const tbl = arg;
+ ipfw_table_entry *ent;
+
+ if (tbl->cnt == tbl->size)
+ return (1);
+ ent = &tbl->ent[tbl->cnt];
+ ent->tbl = tbl->tbl;
+ if (in_nullhost(n->mask.sin_addr))
+ ent->masklen = 0;
+ else
+ ent->masklen = 33 - ffs(ntohl(n->mask.sin_addr.s_addr));
+ ent->addr = n->addr.sin_addr.s_addr;
+ ent->value = n->value;
+ tbl->cnt++;
+ return (0);
+}
+
+int
+ipfw_dump_table(struct ip_fw_chain *ch, ipfw_table *tbl)
+{
+ struct radix_node_head *rnh;
+
+ if (tbl->tbl >= IPFW_TABLES_MAX)
+ return (EINVAL);
+ rnh = ch->tables[tbl->tbl];
+ tbl->cnt = 0;
+ rnh->rnh_walktree(rnh, dump_table_entry, tbl);
+ return (0);
+}
+/* end of file */
diff --git a/rtems/freebsd/netinet/libalias/alias.c b/rtems/freebsd/netinet/libalias/alias.c
new file mode 100644
index 00000000..cc873f68
--- /dev/null
+++ b/rtems/freebsd/netinet/libalias/alias.c
@@ -0,0 +1,1793 @@
+#include <rtems/freebsd/machine/rtems-bsd-config.h>
+
+/*-
+ * Copyright (c) 2001 Charles Mott <cm@linktel.net>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <rtems/freebsd/sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+/*
+ Alias.c provides supervisory control for the functions of the
+ packet aliasing software. It consists of routines to monitor
+ TCP connection state, protocol-specific aliasing routines,
+ fragment handling and the following outside world functional
+ interfaces: SaveFragmentPtr, GetFragmentPtr, FragmentAliasIn,
+ PacketAliasIn and PacketAliasOut.
+
+ The other C program files are briefly described. The data
+ structure framework which holds information needed to translate
+ packets is encapsulated in alias_db.c. Data is accessed by
+ function calls, so other segments of the program need not know
+ about the underlying data structures. Alias_ftp.c contains
+ special code for modifying the ftp PORT command used to establish
+ data connections, while alias_irc.c does the same for IRC
+ DCC. Alias_util.c contains a few utility routines.
+
+ Version 1.0 August, 1996 (cjm)
+
+ Version 1.1 August 20, 1996 (cjm)
+ PPP host accepts incoming connections for ports 0 to 1023.
+ (Gary Roberts pointed out the need to handle incoming
+ connections.)
+
+ Version 1.2 September 7, 1996 (cjm)
+ Fragment handling error in alias_db.c corrected.
+ (Tom Torrance helped fix this problem.)
+
+ Version 1.4 September 16, 1996 (cjm)
+ - A more generalized method for handling incoming
+ connections, without the 0-1023 restriction, is
+ implemented in alias_db.c
+ - Improved ICMP support in alias.c. Traceroute
+ packet streams can now be correctly aliased.
+ - TCP connection closing logic simplified in
+ alias.c and now allows for additional 1 minute
+ "grace period" after FIN or RST is observed.
+
+ Version 1.5 September 17, 1996 (cjm)
+ Corrected error in handling incoming UDP packets with 0 checksum.
+ (Tom Torrance helped fix this problem.)
+
+ Version 1.6 September 18, 1996 (cjm)
+ Simplified ICMP aliasing scheme. Should now support
+ traceroute from Win95 as well as FreeBSD.
+
+ Version 1.7 January 9, 1997 (cjm)
+ - Out-of-order fragment handling.
+ - IP checksum error fixed for ftp transfers
+ from aliasing host.
+ - Integer return codes added to all
+ aliasing/de-aliasing functions.
+ - Some obsolete comments cleaned up.
+ - Differential checksum computations for
+ IP header (TCP, UDP and ICMP were already
+ differential).
+
+ Version 2.1 May 1997 (cjm)
+ - Added support for outgoing ICMP error
+ messages.
+ - Added two functions PacketAliasIn2()
+ and PacketAliasOut2() for dynamic address
+ control (e.g. round-robin allocation of
+ incoming packets).
+
+ Version 2.2 July 1997 (cjm)
+ - Rationalized API function names to begin
+ with "PacketAlias..."
+ - Eliminated PacketAliasIn2() and
+ PacketAliasOut2() as poorly conceived.
+
+ Version 2.3 Dec 1998 (dillon)
+ - Major bounds checking additions, see FreeBSD/CVS
+
+ Version 3.1 May, 2000 (salander)
+ - Added hooks to handle PPTP.
+
+ Version 3.2 July, 2000 (salander and satoh)
+ - Added PacketUnaliasOut routine.
+ - Added hooks to handle RTSP/RTP.
+
+ See HISTORY file for additional revisions.
+*/
+
+#ifdef _KERNEL
+#include <rtems/freebsd/sys/param.h>
+#include <rtems/freebsd/sys/systm.h>
+#include <rtems/freebsd/sys/mbuf.h>
+#include <rtems/freebsd/sys/sysctl.h>
+#else
+#include <rtems/freebsd/sys/types.h>
+#include <rtems/freebsd/stdlib.h>
+#include <rtems/freebsd/stdio.h>
+#include <rtems/freebsd/ctype.h>
+#include <rtems/freebsd/dlfcn.h>
+#include <rtems/freebsd/errno.h>
+#include <rtems/freebsd/string.h>
+#endif
+
+#include <rtems/freebsd/netinet/in_systm.h>
+#include <rtems/freebsd/netinet/in.h>
+#include <rtems/freebsd/netinet/ip.h>
+#include <rtems/freebsd/netinet/ip_icmp.h>
+#include <rtems/freebsd/netinet/tcp.h>
+#include <rtems/freebsd/netinet/udp.h>
+
+#ifdef _KERNEL
+#include <rtems/freebsd/netinet/libalias/alias.h>
+#include <rtems/freebsd/netinet/libalias/alias_local.h>
+#include <rtems/freebsd/netinet/libalias/alias_mod.h>
+#else
+#include <rtems/freebsd/err.h>
+#include <rtems/freebsd/local/alias.h>
+#include <rtems/freebsd/local/alias_local.h>
+#include <rtems/freebsd/local/alias_mod.h>
+#endif
+
+/*
+ * Define libalias SYSCTL Node
+ */
+#ifdef SYSCTL_NODE
+
+SYSCTL_DECL(_net_inet);
+SYSCTL_DECL(_net_inet_ip);
+SYSCTL_NODE(_net_inet_ip, OID_AUTO, alias, CTLFLAG_RW, NULL, "Libalias sysctl API");
+
+#endif
+
+static __inline int
+twowords(void *p)
+{
+ uint8_t *c = p;
+
+#if BYTE_ORDER == LITTLE_ENDIAN
+ uint16_t s1 = ((uint16_t)c[1] << 8) + (uint16_t)c[0];
+ uint16_t s2 = ((uint16_t)c[3] << 8) + (uint16_t)c[2];
+#else
+ uint16_t s1 = ((uint16_t)c[0] << 8) + (uint16_t)c[1];
+ uint16_t s2 = ((uint16_t)c[2] << 8) + (uint16_t)c[3];
+#endif
+ return (s1 + s2);
+}
+
+/* TCP Handling Routines
+
+ TcpMonitorIn() -- These routines monitor TCP connections, and
+ TcpMonitorOut() delete a link when a connection is closed.
+
+These routines look for SYN, FIN and RST flags to determine when TCP
+connections open and close. When a TCP connection closes, the data
+structure containing packet aliasing information is deleted after
+a timeout period.
+*/
+
+/* Local prototypes */
+static void TcpMonitorIn(u_char, struct alias_link *);
+
+static void TcpMonitorOut(u_char, struct alias_link *);
+
+
+static void
+TcpMonitorIn(u_char th_flags, struct alias_link *lnk)
+{
+
+ switch (GetStateIn(lnk)) {
+ case ALIAS_TCP_STATE_NOT_CONNECTED:
+ if (th_flags & TH_RST)
+ SetStateIn(lnk, ALIAS_TCP_STATE_DISCONNECTED);
+ else if (th_flags & TH_SYN)
+ SetStateIn(lnk, ALIAS_TCP_STATE_CONNECTED);
+ break;
+ case ALIAS_TCP_STATE_CONNECTED:
+ if (th_flags & (TH_FIN | TH_RST))
+ SetStateIn(lnk, ALIAS_TCP_STATE_DISCONNECTED);
+ break;
+ }
+}
+
+static void
+TcpMonitorOut(u_char th_flags, struct alias_link *lnk)
+{
+
+ switch (GetStateOut(lnk)) {
+ case ALIAS_TCP_STATE_NOT_CONNECTED:
+ if (th_flags & TH_RST)
+ SetStateOut(lnk, ALIAS_TCP_STATE_DISCONNECTED);
+ else if (th_flags & TH_SYN)
+ SetStateOut(lnk, ALIAS_TCP_STATE_CONNECTED);
+ break;
+ case ALIAS_TCP_STATE_CONNECTED:
+ if (th_flags & (TH_FIN | TH_RST))
+ SetStateOut(lnk, ALIAS_TCP_STATE_DISCONNECTED);
+ break;
+ }
+}
+
+
+
+
+
+/* Protocol Specific Packet Aliasing Routines
+
+ IcmpAliasIn(), IcmpAliasIn1(), IcmpAliasIn2()
+ IcmpAliasOut(), IcmpAliasOut1(), IcmpAliasOut2()
+ ProtoAliasIn(), ProtoAliasOut()
+ UdpAliasIn(), UdpAliasOut()
+ TcpAliasIn(), TcpAliasOut()
+
+These routines handle protocol specific details of packet aliasing.
+One may observe a certain amount of repetitive arithmetic in these
+functions, the purpose of which is to compute a revised checksum
+without actually summing over the entire data packet, which could be
+unnecessarily time consuming.
+
+The purpose of the packet aliasing routines is to replace the source
+address of the outgoing packet and then correctly put it back for
+any incoming packets. For TCP and UDP, ports are also re-mapped.
+
+For ICMP echo/timestamp requests and replies, the following scheme
+is used: the ID number is replaced by an alias for the outgoing
+packet.
+
+ICMP error messages are handled by looking at the IP fragment
+in the data section of the message.
+
+For TCP and UDP protocols, a port number is chosen for an outgoing
+packet, and then incoming packets are identified by IP address and
+port numbers. For TCP packets, there is additional logic in the event
+that sequence and ACK numbers have been altered (as in the case for
+FTP data port commands).
+
+The port numbers used by the packet aliasing module are not true
+ports in the Unix sense. No sockets are actually bound to ports.
+They are more correctly thought of as placeholders.
+
+All packets go through the aliasing mechanism, whether they come from
+the gateway machine or other machines on a local area network.
+*/
+
+
+/* Local prototypes */
+static int IcmpAliasIn1(struct libalias *, struct ip *);
+static int IcmpAliasIn2(struct libalias *, struct ip *);
+static int IcmpAliasIn(struct libalias *, struct ip *);
+
+static int IcmpAliasOut1(struct libalias *, struct ip *, int create);
+static int IcmpAliasOut2(struct libalias *, struct ip *);
+static int IcmpAliasOut(struct libalias *, struct ip *, int create);
+
+static int ProtoAliasIn(struct libalias *la, struct in_addr ip_src,
+ struct in_addr *ip_dst, u_char ip_p, u_short *ip_sum);
+static int ProtoAliasOut(struct libalias *la, struct in_addr *ip_src,
+ struct in_addr ip_dst, u_char ip_p, u_short *ip_sum,
+ int create);
+
+static int UdpAliasIn(struct libalias *, struct ip *);
+static int UdpAliasOut(struct libalias *, struct ip *, int, int create);
+
+static int TcpAliasIn(struct libalias *, struct ip *);
+static int TcpAliasOut(struct libalias *, struct ip *, int, int create);
+
+
+static int
+IcmpAliasIn1(struct libalias *la, struct ip *pip)
+{
+
+ LIBALIAS_LOCK_ASSERT(la);
+/*
+ De-alias incoming echo and timestamp replies.
+ Alias incoming echo and timestamp requests.
+*/
+ struct alias_link *lnk;
+ struct icmp *ic;
+
+ ic = (struct icmp *)ip_next(pip);
+
+/* Get source address from ICMP data field and restore original data */
+ lnk = FindIcmpIn(la, pip->ip_src, pip->ip_dst, ic->icmp_id, 1);
+ if (lnk != NULL) {
+ u_short original_id;
+ int accumulate;
+
+ original_id = GetOriginalPort(lnk);
+
+/* Adjust ICMP checksum */
+ accumulate = ic->icmp_id;
+ accumulate -= original_id;
+ ADJUST_CHECKSUM(accumulate, ic->icmp_cksum);
+
+/* Put original sequence number back in */
+ ic->icmp_id = original_id;
+
+/* Put original address back into IP header */
+ {
+ struct in_addr original_address;
+
+ original_address = GetOriginalAddress(lnk);
+ DifferentialChecksum(&pip->ip_sum,
+ &original_address, &pip->ip_dst, 2);
+ pip->ip_dst = original_address;
+ }
+
+ return (PKT_ALIAS_OK);
+ }
+ return (PKT_ALIAS_IGNORED);
+}
+
+static int
+IcmpAliasIn2(struct libalias *la, struct ip *pip)
+{
+
+ LIBALIAS_LOCK_ASSERT(la);
+/*
+ Alias incoming ICMP error messages containing
+ IP header and first 64 bits of datagram.
+*/
+ struct ip *ip;
+ struct icmp *ic, *ic2;
+ struct udphdr *ud;
+ struct tcphdr *tc;
+ struct alias_link *lnk;
+
+ ic = (struct icmp *)ip_next(pip);
+ ip = &ic->icmp_ip;
+
+ ud = (struct udphdr *)ip_next(ip);
+ tc = (struct tcphdr *)ip_next(ip);
+ ic2 = (struct icmp *)ip_next(ip);
+
+ if (ip->ip_p == IPPROTO_UDP)
+ lnk = FindUdpTcpIn(la, ip->ip_dst, ip->ip_src,
+ ud->uh_dport, ud->uh_sport,
+ IPPROTO_UDP, 0);
+ else if (ip->ip_p == IPPROTO_TCP)
+ lnk = FindUdpTcpIn(la, ip->ip_dst, ip->ip_src,
+ tc->th_dport, tc->th_sport,
+ IPPROTO_TCP, 0);
+ else if (ip->ip_p == IPPROTO_ICMP) {
+ if (ic2->icmp_type == ICMP_ECHO || ic2->icmp_type == ICMP_TSTAMP)
+ lnk = FindIcmpIn(la, ip->ip_dst, ip->ip_src, ic2->icmp_id, 0);
+ else
+ lnk = NULL;
+ } else
+ lnk = NULL;
+
+ if (lnk != NULL) {
+ if (ip->ip_p == IPPROTO_UDP || ip->ip_p == IPPROTO_TCP) {
+ int accumulate, accumulate2;
+ struct in_addr original_address;
+ u_short original_port;
+
+ original_address = GetOriginalAddress(lnk);
+ original_port = GetOriginalPort(lnk);
+
+/* Adjust ICMP checksum */
+ accumulate = twowords(&ip->ip_src);
+ accumulate -= twowords(&original_address);
+ accumulate += ud->uh_sport;
+ accumulate -= original_port;
+ accumulate2 = accumulate;
+ accumulate2 += ip->ip_sum;
+ ADJUST_CHECKSUM(accumulate, ip->ip_sum);
+ accumulate2 -= ip->ip_sum;
+ ADJUST_CHECKSUM(accumulate2, ic->icmp_cksum);
+
+/* Un-alias address in IP header */
+ DifferentialChecksum(&pip->ip_sum,
+ &original_address, &pip->ip_dst, 2);
+ pip->ip_dst = original_address;
+
+/* Un-alias address and port number of original IP packet
+fragment contained in ICMP data section */
+ ip->ip_src = original_address;
+ ud->uh_sport = original_port;
+ } else if (ip->ip_p == IPPROTO_ICMP) {
+ int accumulate, accumulate2;
+ struct in_addr original_address;
+ u_short original_id;
+
+ original_address = GetOriginalAddress(lnk);
+ original_id = GetOriginalPort(lnk);
+
+/* Adjust ICMP checksum */
+ accumulate = twowords(&ip->ip_src);
+ accumulate -= twowords(&original_address);
+ accumulate += ic2->icmp_id;
+ accumulate -= original_id;
+ accumulate2 = accumulate;
+ accumulate2 += ip->ip_sum;
+ ADJUST_CHECKSUM(accumulate, ip->ip_sum);
+ accumulate2 -= ip->ip_sum;
+ ADJUST_CHECKSUM(accumulate2, ic->icmp_cksum);
+
+/* Un-alias address in IP header */
+ DifferentialChecksum(&pip->ip_sum,
+ &original_address, &pip->ip_dst, 2);
+ pip->ip_dst = original_address;
+
+/* Un-alias address of original IP packet and sequence number of
+ embedded ICMP datagram */
+ ip->ip_src = original_address;
+ ic2->icmp_id = original_id;
+ }
+ return (PKT_ALIAS_OK);
+ }
+ return (PKT_ALIAS_IGNORED);
+}
+
+
+static int
+IcmpAliasIn(struct libalias *la, struct ip *pip)
+{
+ int iresult;
+ struct icmp *ic;
+
+ LIBALIAS_LOCK_ASSERT(la);
+/* Return if proxy-only mode is enabled */
+ if (la->packetAliasMode & PKT_ALIAS_PROXY_ONLY)
+ return (PKT_ALIAS_OK);
+
+ ic = (struct icmp *)ip_next(pip);
+
+ iresult = PKT_ALIAS_IGNORED;
+ switch (ic->icmp_type) {
+ case ICMP_ECHOREPLY:
+ case ICMP_TSTAMPREPLY:
+ if (ic->icmp_code == 0) {
+ iresult = IcmpAliasIn1(la, pip);
+ }
+ break;
+ case ICMP_UNREACH:
+ case ICMP_SOURCEQUENCH:
+ case ICMP_TIMXCEED:
+ case ICMP_PARAMPROB:
+ iresult = IcmpAliasIn2(la, pip);
+ break;
+ case ICMP_ECHO:
+ case ICMP_TSTAMP:
+ iresult = IcmpAliasIn1(la, pip);
+ break;
+ }
+ return (iresult);
+}
+
+
+static int
+IcmpAliasOut1(struct libalias *la, struct ip *pip, int create)
+{
+/*
+ Alias outgoing echo and timestamp requests.
+ De-alias outgoing echo and timestamp replies.
+*/
+ struct alias_link *lnk;
+ struct icmp *ic;
+
+ LIBALIAS_LOCK_ASSERT(la);
+ ic = (struct icmp *)ip_next(pip);
+
+/* Save overwritten data for when echo packet returns */
+ lnk = FindIcmpOut(la, pip->ip_src, pip->ip_dst, ic->icmp_id, create);
+ if (lnk != NULL) {
+ u_short alias_id;
+ int accumulate;
+
+ alias_id = GetAliasPort(lnk);
+
+/* Since data field is being modified, adjust ICMP checksum */
+ accumulate = ic->icmp_id;
+ accumulate -= alias_id;
+ ADJUST_CHECKSUM(accumulate, ic->icmp_cksum);
+
+/* Alias sequence number */
+ ic->icmp_id = alias_id;
+
+/* Change source address */
+ {
+ struct in_addr alias_address;
+
+ alias_address = GetAliasAddress(lnk);
+ DifferentialChecksum(&pip->ip_sum,
+ &alias_address, &pip->ip_src, 2);
+ pip->ip_src = alias_address;
+ }
+
+ return (PKT_ALIAS_OK);
+ }
+ return (PKT_ALIAS_IGNORED);
+}
+
+
+static int
+IcmpAliasOut2(struct libalias *la, struct ip *pip)
+{
+/*
+ Alias outgoing ICMP error messages containing
+ IP header and first 64 bits of datagram.
+*/
+ struct ip *ip;
+ struct icmp *ic, *ic2;
+ struct udphdr *ud;
+ struct tcphdr *tc;
+ struct alias_link *lnk;
+
+ LIBALIAS_LOCK_ASSERT(la);
+ ic = (struct icmp *)ip_next(pip);
+ ip = &ic->icmp_ip;
+
+ ud = (struct udphdr *)ip_next(ip);
+ tc = (struct tcphdr *)ip_next(ip);
+ ic2 = (struct icmp *)ip_next(ip);
+
+ if (ip->ip_p == IPPROTO_UDP)
+ lnk = FindUdpTcpOut(la, ip->ip_dst, ip->ip_src,
+ ud->uh_dport, ud->uh_sport,
+ IPPROTO_UDP, 0);
+ else if (ip->ip_p == IPPROTO_TCP)
+ lnk = FindUdpTcpOut(la, ip->ip_dst, ip->ip_src,
+ tc->th_dport, tc->th_sport,
+ IPPROTO_TCP, 0);
+ else if (ip->ip_p == IPPROTO_ICMP) {
+ if (ic2->icmp_type == ICMP_ECHO || ic2->icmp_type == ICMP_TSTAMP)
+ lnk = FindIcmpOut(la, ip->ip_dst, ip->ip_src, ic2->icmp_id, 0);
+ else
+ lnk = NULL;
+ } else
+ lnk = NULL;
+
+ if (lnk != NULL) {
+ if (ip->ip_p == IPPROTO_UDP || ip->ip_p == IPPROTO_TCP) {
+ int accumulate;
+ struct in_addr alias_address;
+ u_short alias_port;
+
+ alias_address = GetAliasAddress(lnk);
+ alias_port = GetAliasPort(lnk);
+
+/* Adjust ICMP checksum */
+ accumulate = twowords(&ip->ip_dst);
+ accumulate -= twowords(&alias_address);
+ accumulate += ud->uh_dport;
+ accumulate -= alias_port;
+ ADJUST_CHECKSUM(accumulate, ic->icmp_cksum);
+
+/*
+ * Alias address in IP header if it comes from the host
+ * the original TCP/UDP packet was destined for.
+ */
+ if (pip->ip_src.s_addr == ip->ip_dst.s_addr) {
+ DifferentialChecksum(&pip->ip_sum,
+ &alias_address, &pip->ip_src, 2);
+ pip->ip_src = alias_address;
+ }
+/* Alias address and port number of original IP packet
+fragment contained in ICMP data section */
+ ip->ip_dst = alias_address;
+ ud->uh_dport = alias_port;
+ } else if (ip->ip_p == IPPROTO_ICMP) {
+ int accumulate;
+ struct in_addr alias_address;
+ u_short alias_id;
+
+ alias_address = GetAliasAddress(lnk);
+ alias_id = GetAliasPort(lnk);
+
+/* Adjust ICMP checksum */
+ accumulate = twowords(&ip->ip_dst);
+ accumulate -= twowords(&alias_address);
+ accumulate += ic2->icmp_id;
+ accumulate -= alias_id;
+ ADJUST_CHECKSUM(accumulate, ic->icmp_cksum);
+
+/*
+ * Alias address in IP header if it comes from the host
+ * the original ICMP message was destined for.
+ */
+ if (pip->ip_src.s_addr == ip->ip_dst.s_addr) {
+ DifferentialChecksum(&pip->ip_sum,
+ &alias_address, &pip->ip_src, 2);
+ pip->ip_src = alias_address;
+ }
+/* Alias address of original IP packet and sequence number of
+ embedded ICMP datagram */
+ ip->ip_dst = alias_address;
+ ic2->icmp_id = alias_id;
+ }
+ return (PKT_ALIAS_OK);
+ }
+ return (PKT_ALIAS_IGNORED);
+}
+
+
+static int
+IcmpAliasOut(struct libalias *la, struct ip *pip, int create)
+{
+ int iresult;
+ struct icmp *ic;
+
+ LIBALIAS_LOCK_ASSERT(la);
+ (void)create;
+
+/* Return if proxy-only mode is enabled */
+ if (la->packetAliasMode & PKT_ALIAS_PROXY_ONLY)
+ return (PKT_ALIAS_OK);
+
+ ic = (struct icmp *)ip_next(pip);
+
+ iresult = PKT_ALIAS_IGNORED;
+ switch (ic->icmp_type) {
+ case ICMP_ECHO:
+ case ICMP_TSTAMP:
+ if (ic->icmp_code == 0) {
+ iresult = IcmpAliasOut1(la, pip, create);
+ }
+ break;
+ case ICMP_UNREACH:
+ case ICMP_SOURCEQUENCH:
+ case ICMP_TIMXCEED:
+ case ICMP_PARAMPROB:
+ iresult = IcmpAliasOut2(la, pip);
+ break;
+ case ICMP_ECHOREPLY:
+ case ICMP_TSTAMPREPLY:
+ iresult = IcmpAliasOut1(la, pip, create);
+ }
+ return (iresult);
+}
+
+static int
+ProtoAliasIn(struct libalias *la, struct in_addr ip_src,
+ struct in_addr *ip_dst, u_char ip_p, u_short *ip_sum)
+{
+/*
+ Handle incoming IP packets. The
+ only thing which is done in this case is to alias
+ the dest IP address of the packet to our inside
+ machine.
+*/
+ struct alias_link *lnk;
+
+ LIBALIAS_LOCK_ASSERT(la);
+/* Return if proxy-only mode is enabled */
+ if (la->packetAliasMode & PKT_ALIAS_PROXY_ONLY)
+ return (PKT_ALIAS_OK);
+
+ lnk = FindProtoIn(la, ip_src, *ip_dst, ip_p);
+ if (lnk != NULL) {
+ struct in_addr original_address;
+
+ original_address = GetOriginalAddress(lnk);
+
+/* Restore original IP address */
+ DifferentialChecksum(ip_sum,
+ &original_address, ip_dst, 2);
+ *ip_dst = original_address;
+
+ return (PKT_ALIAS_OK);
+ }
+ return (PKT_ALIAS_IGNORED);
+}
+
+static int
+ProtoAliasOut(struct libalias *la, struct in_addr *ip_src,
+ struct in_addr ip_dst, u_char ip_p, u_short *ip_sum, int create)
+{
+/*
+ Handle outgoing IP packets. The
+ only thing which is done in this case is to alias
+ the source IP address of the packet.
+*/
+ struct alias_link *lnk;
+
+ LIBALIAS_LOCK_ASSERT(la);
+ (void)create;
+
+/* Return if proxy-only mode is enabled */
+ if (la->packetAliasMode & PKT_ALIAS_PROXY_ONLY)
+ return (PKT_ALIAS_OK);
+
+ lnk = FindProtoOut(la, *ip_src, ip_dst, ip_p);
+ if (lnk != NULL) {
+ struct in_addr alias_address;
+
+ alias_address = GetAliasAddress(lnk);
+
+/* Change source address */
+ DifferentialChecksum(ip_sum,
+ &alias_address, ip_src, 2);
+ *ip_src = alias_address;
+
+ return (PKT_ALIAS_OK);
+ }
+ return (PKT_ALIAS_IGNORED);
+}
+
+
+static int
+UdpAliasIn(struct libalias *la, struct ip *pip)
+{
+ struct udphdr *ud;
+ struct alias_link *lnk;
+
+ LIBALIAS_LOCK_ASSERT(la);
+
+ ud = (struct udphdr *)ip_next(pip);
+
+ lnk = FindUdpTcpIn(la, pip->ip_src, pip->ip_dst,
+ ud->uh_sport, ud->uh_dport,
+ IPPROTO_UDP, !(la->packetAliasMode & PKT_ALIAS_PROXY_ONLY));
+ if (lnk != NULL) {
+ struct in_addr alias_address;
+ struct in_addr original_address;
+ struct in_addr proxy_address;
+ u_short alias_port;
+ u_short proxy_port;
+ int accumulate;
+ int error;
+ struct alias_data ad = {
+ .lnk = lnk,
+ .oaddr = &original_address,
+ .aaddr = &alias_address,
+ .aport = &alias_port,
+ .sport = &ud->uh_sport,
+ .dport = &ud->uh_dport,
+ .maxpktsize = 0
+ };
+
+ alias_address = GetAliasAddress(lnk);
+ original_address = GetOriginalAddress(lnk);
+ proxy_address = GetProxyAddress(lnk);
+ alias_port = ud->uh_dport;
+ ud->uh_dport = GetOriginalPort(lnk);
+ proxy_port = GetProxyPort(lnk);
+
+ /* Walk out chain. */
+ error = find_handler(IN, UDP, la, pip, &ad);
+ /* If we cannot figure out the packet, ignore it. */
+ if (error < 0)
+ return (PKT_ALIAS_IGNORED);
+
+/* If UDP checksum is not zero, then adjust since destination port */
+/* is being unaliased and destination address is being altered. */
+ if (ud->uh_sum != 0) {
+ accumulate = alias_port;
+ accumulate -= ud->uh_dport;
+ accumulate += twowords(&alias_address);
+ accumulate -= twowords(&original_address);
+
+/* If this is a proxy packet, modify checksum because of source change.*/
+ if (proxy_port != 0) {
+ accumulate += ud->uh_sport;
+ accumulate -= proxy_port;
+ }
+
+ if (proxy_address.s_addr != 0) {
+ accumulate += twowords(&pip->ip_src);
+ accumulate -= twowords(&proxy_address);
+ }
+
+ ADJUST_CHECKSUM(accumulate, ud->uh_sum);
+ }
+/* XXX: Could the two if's below be concatenated to one ? */
+/* Restore source port and/or address in case of proxying*/
+
+ if (proxy_port != 0)
+ ud->uh_sport = proxy_port;
+
+ if (proxy_address.s_addr != 0) {
+ DifferentialChecksum(&pip->ip_sum,
+ &proxy_address, &pip->ip_src, 2);
+ pip->ip_src = proxy_address;
+ }
+
+/* Restore original IP address */
+ DifferentialChecksum(&pip->ip_sum,
+ &original_address, &pip->ip_dst, 2);
+ pip->ip_dst = original_address;
+
+ return (PKT_ALIAS_OK);
+ }
+ return (PKT_ALIAS_IGNORED);
+}
+
+static int
+UdpAliasOut(struct libalias *la, struct ip *pip, int maxpacketsize, int create)
+{
+ struct udphdr *ud;
+ struct alias_link *lnk;
+ struct in_addr dest_address;
+ struct in_addr proxy_server_address;
+ u_short dest_port;
+ u_short proxy_server_port;
+ int proxy_type;
+ int error;
+
+ LIBALIAS_LOCK_ASSERT(la);
+
+/* Return if proxy-only mode is enabled and not proxyrule found.*/
+ ud = (struct udphdr *)ip_next(pip);
+ proxy_type = ProxyCheck(la, &proxy_server_address,
+ &proxy_server_port, pip->ip_src, pip->ip_dst,
+ ud->uh_dport, pip->ip_p);
+ if (proxy_type == 0 && (la->packetAliasMode & PKT_ALIAS_PROXY_ONLY))
+ return (PKT_ALIAS_OK);
+
+/* If this is a transparent proxy, save original destination,
+ * then alter the destination and adjust checksums */
+ dest_port = ud->uh_dport;
+ dest_address = pip->ip_dst;
+
+ if (proxy_type != 0) {
+ int accumulate;
+
+ accumulate = twowords(&pip->ip_dst);
+ accumulate -= twowords(&proxy_server_address);
+
+ ADJUST_CHECKSUM(accumulate, pip->ip_sum);
+
+ if (ud->uh_sum != 0) {
+ accumulate = twowords(&pip->ip_dst);
+ accumulate -= twowords(&proxy_server_address);
+ accumulate += ud->uh_dport;
+ accumulate -= proxy_server_port;
+ ADJUST_CHECKSUM(accumulate, ud->uh_sum);
+ }
+ pip->ip_dst = proxy_server_address;
+ ud->uh_dport = proxy_server_port;
+ }
+ lnk = FindUdpTcpOut(la, pip->ip_src, pip->ip_dst,
+ ud->uh_sport, ud->uh_dport,
+ IPPROTO_UDP, create);
+ if (lnk != NULL) {
+ u_short alias_port;
+ struct in_addr alias_address;
+ struct alias_data ad = {
+ .lnk = lnk,
+ .oaddr = NULL,
+ .aaddr = &alias_address,
+ .aport = &alias_port,
+ .sport = &ud->uh_sport,
+ .dport = &ud->uh_dport,
+ .maxpktsize = 0
+ };
+
+/* Save original destination address, if this is a proxy packet.
+ * Also modify packet to include destination encoding. This may
+ * change the size of IP header. */
+ if (proxy_type != 0) {
+ SetProxyPort(lnk, dest_port);
+ SetProxyAddress(lnk, dest_address);
+ ProxyModify(la, lnk, pip, maxpacketsize, proxy_type);
+ ud = (struct udphdr *)ip_next(pip);
+ }
+
+ alias_address = GetAliasAddress(lnk);
+ alias_port = GetAliasPort(lnk);
+
+ /* Walk out chain. */
+ error = find_handler(OUT, UDP, la, pip, &ad);
+
+/* If UDP checksum is not zero, adjust since source port is */
+/* being aliased and source address is being altered */
+ if (ud->uh_sum != 0) {
+ int accumulate;
+
+ accumulate = ud->uh_sport;
+ accumulate -= alias_port;
+ accumulate += twowords(&pip->ip_src);
+ accumulate -= twowords(&alias_address);
+ ADJUST_CHECKSUM(accumulate, ud->uh_sum);
+ }
+/* Put alias port in UDP header */
+ ud->uh_sport = alias_port;
+
+/* Change source address */
+ DifferentialChecksum(&pip->ip_sum,
+ &alias_address, &pip->ip_src, 2);
+ pip->ip_src = alias_address;
+
+ return (PKT_ALIAS_OK);
+ }
+ return (PKT_ALIAS_IGNORED);
+}
+
+
+
+static int
+TcpAliasIn(struct libalias *la, struct ip *pip)
+{
+ struct tcphdr *tc;
+ struct alias_link *lnk;
+
+ LIBALIAS_LOCK_ASSERT(la);
+ tc = (struct tcphdr *)ip_next(pip);
+
+ lnk = FindUdpTcpIn(la, pip->ip_src, pip->ip_dst,
+ tc->th_sport, tc->th_dport,
+ IPPROTO_TCP,
+ !(la->packetAliasMode & PKT_ALIAS_PROXY_ONLY));
+ if (lnk != NULL) {
+ struct in_addr alias_address;
+ struct in_addr original_address;
+ struct in_addr proxy_address;
+ u_short alias_port;
+ u_short proxy_port;
+ int accumulate, error;
+
+ /*
+ * The init of MANY vars is a bit below, but aliashandlepptpin
+ * seems to need the destination port that came within the
+ * packet and not the original one looks below [*].
+ */
+
+ struct alias_data ad = {
+ .lnk = lnk,
+ .oaddr = NULL,
+ .aaddr = NULL,
+ .aport = NULL,
+ .sport = &tc->th_sport,
+ .dport = &tc->th_dport,
+ .maxpktsize = 0
+ };
+
+ /* Walk out chain. */
+ error = find_handler(IN, TCP, la, pip, &ad);
+
+ alias_address = GetAliasAddress(lnk);
+ original_address = GetOriginalAddress(lnk);
+ proxy_address = GetProxyAddress(lnk);
+ alias_port = tc->th_dport;
+ tc->th_dport = GetOriginalPort(lnk);
+ proxy_port = GetProxyPort(lnk);
+
+ /*
+ * Look above, if anyone is going to add find_handler AFTER
+ * this aliashandlepptpin/point, please redo alias_data too.
+ * Uncommenting the piece here below should be enough.
+ */
+#if 0
+ struct alias_data ad = {
+ .lnk = lnk,
+ .oaddr = &original_address,
+ .aaddr = &alias_address,
+ .aport = &alias_port,
+ .sport = &ud->uh_sport,
+ .dport = &ud->uh_dport,
+ .maxpktsize = 0
+ };
+
+ /* Walk out chain. */
+ error = find_handler(la, pip, &ad);
+ if (error == EHDNOF)
+ printf("Protocol handler not found\n");
+#endif
+
+/* Adjust TCP checksum since destination port is being unaliased */
+/* and destination port is being altered. */
+ accumulate = alias_port;
+ accumulate -= tc->th_dport;
+ accumulate += twowords(&alias_address);
+ accumulate -= twowords(&original_address);
+
+/* If this is a proxy, then modify the TCP source port and
+ checksum accumulation */
+ if (proxy_port != 0) {
+ accumulate += tc->th_sport;
+ tc->th_sport = proxy_port;
+ accumulate -= tc->th_sport;
+ accumulate += twowords(&pip->ip_src);
+ accumulate -= twowords(&proxy_address);
+ }
+/* See if ACK number needs to be modified */
+ if (GetAckModified(lnk) == 1) {
+ int delta;
+
+ tc = (struct tcphdr *)ip_next(pip);
+ delta = GetDeltaAckIn(tc->th_ack, lnk);
+ if (delta != 0) {
+ accumulate += twowords(&tc->th_ack);
+ tc->th_ack = htonl(ntohl(tc->th_ack) - delta);
+ accumulate -= twowords(&tc->th_ack);
+ }
+ }
+ ADJUST_CHECKSUM(accumulate, tc->th_sum);
+
+/* Restore original IP address */
+ accumulate = twowords(&pip->ip_dst);
+ pip->ip_dst = original_address;
+ accumulate -= twowords(&pip->ip_dst);
+
+/* If this is a transparent proxy packet, then modify the source
+ address */
+ if (proxy_address.s_addr != 0) {
+ accumulate += twowords(&pip->ip_src);
+ pip->ip_src = proxy_address;
+ accumulate -= twowords(&pip->ip_src);
+ }
+ ADJUST_CHECKSUM(accumulate, pip->ip_sum);
+
+/* Monitor TCP connection state */
+ tc = (struct tcphdr *)ip_next(pip);
+ TcpMonitorIn(tc->th_flags, lnk);
+
+ return (PKT_ALIAS_OK);
+ }
+ return (PKT_ALIAS_IGNORED);
+}
+
+static int
+TcpAliasOut(struct libalias *la, struct ip *pip, int maxpacketsize, int create)
+{
+ int proxy_type, error;
+ u_short dest_port;
+ u_short proxy_server_port;
+ struct in_addr dest_address;
+ struct in_addr proxy_server_address;
+ struct tcphdr *tc;
+ struct alias_link *lnk;
+
+ LIBALIAS_LOCK_ASSERT(la);
+ tc = (struct tcphdr *)ip_next(pip);
+
+ if (create)
+ proxy_type = ProxyCheck(la, &proxy_server_address,
+ &proxy_server_port, pip->ip_src, pip->ip_dst,
+ tc->th_dport, pip->ip_p);
+ else
+ proxy_type = 0;
+
+ if (proxy_type == 0 && (la->packetAliasMode & PKT_ALIAS_PROXY_ONLY))
+ return (PKT_ALIAS_OK);
+
+/* If this is a transparent proxy, save original destination,
+ then alter the destination and adjust checksums */
+ dest_port = tc->th_dport;
+ dest_address = pip->ip_dst;
+ if (proxy_type != 0) {
+ int accumulate;
+
+ accumulate = tc->th_dport;
+ tc->th_dport = proxy_server_port;
+ accumulate -= tc->th_dport;
+ accumulate += twowords(&pip->ip_dst);
+ accumulate -= twowords(&proxy_server_address);
+ ADJUST_CHECKSUM(accumulate, tc->th_sum);
+
+ accumulate = twowords(&pip->ip_dst);
+ pip->ip_dst = proxy_server_address;
+ accumulate -= twowords(&pip->ip_dst);
+ ADJUST_CHECKSUM(accumulate, pip->ip_sum);
+ }
+ lnk = FindUdpTcpOut(la, pip->ip_src, pip->ip_dst,
+ tc->th_sport, tc->th_dport,
+ IPPROTO_TCP, create);
+ if (lnk == NULL)
+ return (PKT_ALIAS_IGNORED);
+ if (lnk != NULL) {
+ u_short alias_port;
+ struct in_addr alias_address;
+ int accumulate;
+ struct alias_data ad = {
+ .lnk = lnk,
+ .oaddr = NULL,
+ .aaddr = &alias_address,
+ .aport = &alias_port,
+ .sport = &tc->th_sport,
+ .dport = &tc->th_dport,
+ .maxpktsize = maxpacketsize
+ };
+
+/* Save original destination address, if this is a proxy packet.
+ Also modify packet to include destination encoding. This may
+ change the size of IP header. */
+ if (proxy_type != 0) {
+ SetProxyPort(lnk, dest_port);
+ SetProxyAddress(lnk, dest_address);
+ ProxyModify(la, lnk, pip, maxpacketsize, proxy_type);
+ tc = (struct tcphdr *)ip_next(pip);
+ }
+/* Get alias address and port */
+ alias_port = GetAliasPort(lnk);
+ alias_address = GetAliasAddress(lnk);
+
+/* Monitor TCP connection state */
+ tc = (struct tcphdr *)ip_next(pip);
+ TcpMonitorOut(tc->th_flags, lnk);
+
+ /* Walk out chain. */
+ error = find_handler(OUT, TCP, la, pip, &ad);
+
+/* Adjust TCP checksum since source port is being aliased */
+/* and source address is being altered */
+ accumulate = tc->th_sport;
+ tc->th_sport = alias_port;
+ accumulate -= tc->th_sport;
+ accumulate += twowords(&pip->ip_src);
+ accumulate -= twowords(&alias_address);
+
+/* Modify sequence number if necessary */
+ if (GetAckModified(lnk) == 1) {
+ int delta;
+
+ tc = (struct tcphdr *)ip_next(pip);
+ delta = GetDeltaSeqOut(tc->th_seq, lnk);
+ if (delta != 0) {
+ accumulate += twowords(&tc->th_seq);
+ tc->th_seq = htonl(ntohl(tc->th_seq) + delta);
+ accumulate -= twowords(&tc->th_seq);
+ }
+ }
+ ADJUST_CHECKSUM(accumulate, tc->th_sum);
+
+/* Change source address */
+ accumulate = twowords(&pip->ip_src);
+ pip->ip_src = alias_address;
+ accumulate -= twowords(&pip->ip_src);
+ ADJUST_CHECKSUM(accumulate, pip->ip_sum);
+
+ return (PKT_ALIAS_OK);
+ }
+ return (PKT_ALIAS_IGNORED);
+}
+
+
+
+
+/* Fragment Handling
+
+ FragmentIn()
+ FragmentOut()
+
+The packet aliasing module has a limited ability for handling IP
+fragments. If the ICMP, TCP or UDP header is in the first fragment
+received, then the ID number of the IP packet is saved, and other
+fragments are identified according to their ID number and IP address
+they were sent from. Pointers to unresolved fragments can also be
+saved and recalled when a header fragment is seen.
+*/
+
+/* Local prototypes */
+static int FragmentIn(struct libalias *la, struct in_addr ip_src,
+ struct in_addr *ip_dst, u_short ip_id, u_short *ip_sum);
+static int FragmentOut(struct libalias *, struct in_addr *ip_src,
+ u_short *ip_sum);
+
+static int
+FragmentIn(struct libalias *la, struct in_addr ip_src, struct in_addr *ip_dst,
+ u_short ip_id, u_short *ip_sum)
+{
+ struct alias_link *lnk;
+
+ LIBALIAS_LOCK_ASSERT(la);
+ lnk = FindFragmentIn2(la, ip_src, *ip_dst, ip_id);
+ if (lnk != NULL) {
+ struct in_addr original_address;
+
+ GetFragmentAddr(lnk, &original_address);
+ DifferentialChecksum(ip_sum,
+ &original_address, ip_dst, 2);
+ *ip_dst = original_address;
+
+ return (PKT_ALIAS_OK);
+ }
+ return (PKT_ALIAS_UNRESOLVED_FRAGMENT);
+}
+
+static int
+FragmentOut(struct libalias *la, struct in_addr *ip_src, u_short *ip_sum)
+{
+ struct in_addr alias_address;
+
+ LIBALIAS_LOCK_ASSERT(la);
+ alias_address = FindAliasAddress(la, *ip_src);
+ DifferentialChecksum(ip_sum,
+ &alias_address, ip_src, 2);
+ *ip_src = alias_address;
+
+ return (PKT_ALIAS_OK);
+}
+
+
+
+
+
+
+/* Outside World Access
+
+ PacketAliasSaveFragment()
+ PacketAliasGetFragment()
+ PacketAliasFragmentIn()
+ PacketAliasIn()
+ PacketAliasOut()
+ PacketUnaliasOut()
+
+(prototypes in alias.h)
+*/
+
+int
+LibAliasSaveFragment(struct libalias *la, char *ptr)
+{
+ int iresult;
+ struct alias_link *lnk;
+ struct ip *pip;
+
+ LIBALIAS_LOCK(la);
+ pip = (struct ip *)ptr;
+ lnk = AddFragmentPtrLink(la, pip->ip_src, pip->ip_id);
+ iresult = PKT_ALIAS_ERROR;
+ if (lnk != NULL) {
+ SetFragmentPtr(lnk, ptr);
+ iresult = PKT_ALIAS_OK;
+ }
+ LIBALIAS_UNLOCK(la);
+ return (iresult);
+}
+
+char *
+LibAliasGetFragment(struct libalias *la, char *ptr)
+{
+ struct alias_link *lnk;
+ char *fptr;
+ struct ip *pip;
+
+ LIBALIAS_LOCK(la);
+ pip = (struct ip *)ptr;
+ lnk = FindFragmentPtr(la, pip->ip_src, pip->ip_id);
+ if (lnk != NULL) {
+ GetFragmentPtr(lnk, &fptr);
+ SetFragmentPtr(lnk, NULL);
+ SetExpire(lnk, 0); /* Deletes link */
+ } else
+ fptr = NULL;
+
+ LIBALIAS_UNLOCK(la);
+ return (fptr);
+}
+
+void
+LibAliasFragmentIn(struct libalias *la, char *ptr, /* Points to correctly
+ * de-aliased header
+ * fragment */
+ char *ptr_fragment /* Points to fragment which must be
+ * de-aliased */
+)
+{
+ struct ip *pip;
+ struct ip *fpip;
+
+ LIBALIAS_LOCK(la);
+ (void)la;
+ pip = (struct ip *)ptr;
+ fpip = (struct ip *)ptr_fragment;
+
+ DifferentialChecksum(&fpip->ip_sum,
+ &pip->ip_dst, &fpip->ip_dst, 2);
+ fpip->ip_dst = pip->ip_dst;
+ LIBALIAS_UNLOCK(la);
+}
+
+/* Local prototypes */
+static int
+LibAliasOutLocked(struct libalias *la, char *ptr,
+ int maxpacketsize, int create);
+static int
+LibAliasInLocked(struct libalias *la, char *ptr,
+ int maxpacketsize);
+
+int
+LibAliasIn(struct libalias *la, char *ptr, int maxpacketsize)
+{
+ int res;
+
+ LIBALIAS_LOCK(la);
+ res = LibAliasInLocked(la, ptr, maxpacketsize);
+ LIBALIAS_UNLOCK(la);
+ return (res);
+}
+
+static int
+LibAliasInLocked(struct libalias *la, char *ptr, int maxpacketsize)
+{
+ struct in_addr alias_addr;
+ struct ip *pip;
+ int iresult;
+
+ if (la->packetAliasMode & PKT_ALIAS_REVERSE) {
+ la->packetAliasMode &= ~PKT_ALIAS_REVERSE;
+ iresult = LibAliasOutLocked(la, ptr, maxpacketsize, 1);
+ la->packetAliasMode |= PKT_ALIAS_REVERSE;
+ goto getout;
+ }
+ HouseKeeping(la);
+ ClearCheckNewLink(la);
+ pip = (struct ip *)ptr;
+ alias_addr = pip->ip_dst;
+
+ /* Defense against mangled packets */
+ if (ntohs(pip->ip_len) > maxpacketsize
+ || (pip->ip_hl << 2) > maxpacketsize) {
+ iresult = PKT_ALIAS_IGNORED;
+ goto getout;
+ }
+
+ iresult = PKT_ALIAS_IGNORED;
+ if ((ntohs(pip->ip_off) & IP_OFFMASK) == 0) {
+ switch (pip->ip_p) {
+ case IPPROTO_ICMP:
+ iresult = IcmpAliasIn(la, pip);
+ break;
+ case IPPROTO_UDP:
+ iresult = UdpAliasIn(la, pip);
+ break;
+ case IPPROTO_TCP:
+ iresult = TcpAliasIn(la, pip);
+ break;
+#ifdef _KERNEL
+ case IPPROTO_SCTP:
+ iresult = SctpAlias(la, pip, SN_TO_LOCAL);
+ break;
+#endif
+ case IPPROTO_GRE: {
+ int error;
+ struct alias_data ad = {
+ .lnk = NULL,
+ .oaddr = NULL,
+ .aaddr = NULL,
+ .aport = NULL,
+ .sport = NULL,
+ .dport = NULL,
+ .maxpktsize = 0
+ };
+
+ /* Walk out chain. */
+ error = find_handler(IN, IP, la, pip, &ad);
+ if (error == 0)
+ iresult = PKT_ALIAS_OK;
+ else
+ iresult = ProtoAliasIn(la, pip->ip_src,
+ &pip->ip_dst, pip->ip_p, &pip->ip_sum);
+ }
+ break;
+ default:
+ iresult = ProtoAliasIn(la, pip->ip_src, &pip->ip_dst,
+ pip->ip_p, &pip->ip_sum);
+ break;
+ }
+
+ if (ntohs(pip->ip_off) & IP_MF) {
+ struct alias_link *lnk;
+
+ lnk = FindFragmentIn1(la, pip->ip_src, alias_addr, pip->ip_id);
+ if (lnk != NULL) {
+ iresult = PKT_ALIAS_FOUND_HEADER_FRAGMENT;
+ SetFragmentAddr(lnk, pip->ip_dst);
+ } else {
+ iresult = PKT_ALIAS_ERROR;
+ }
+ }
+ } else {
+ iresult = FragmentIn(la, pip->ip_src, &pip->ip_dst, pip->ip_id,
+ &pip->ip_sum);
+ }
+
+getout:
+ return (iresult);
+}
+
+
+
+/* Unregistered address ranges */
+
+/* 10.0.0.0 -> 10.255.255.255 */
+#define UNREG_ADDR_A_LOWER 0x0a000000
+#define UNREG_ADDR_A_UPPER 0x0affffff
+
+/* 172.16.0.0 -> 172.31.255.255 */
+#define UNREG_ADDR_B_LOWER 0xac100000
+#define UNREG_ADDR_B_UPPER 0xac1fffff
+
+/* 192.168.0.0 -> 192.168.255.255 */
+#define UNREG_ADDR_C_LOWER 0xc0a80000
+#define UNREG_ADDR_C_UPPER 0xc0a8ffff
+
+int
+LibAliasOut(struct libalias *la, char *ptr, int maxpacketsize)
+{
+ int res;
+
+ LIBALIAS_LOCK(la);
+ res = LibAliasOutLocked(la, ptr, maxpacketsize, 1);
+ LIBALIAS_UNLOCK(la);
+ return (res);
+}
+
+int
+LibAliasOutTry(struct libalias *la, char *ptr, int maxpacketsize, int create)
+{
+ int res;
+
+ LIBALIAS_LOCK(la);
+ res = LibAliasOutLocked(la, ptr, maxpacketsize, create);
+ LIBALIAS_UNLOCK(la);
+ return (res);
+}
+
+static int
+LibAliasOutLocked(struct libalias *la, char *ptr, /* valid IP packet */
+ int maxpacketsize, /* How much the packet data may grow (FTP
+ * and IRC inline changes) */
+ int create /* Create new entries ? */
+)
+{
+ int iresult;
+ struct in_addr addr_save;
+ struct ip *pip;
+
+ if (la->packetAliasMode & PKT_ALIAS_REVERSE) {
+ la->packetAliasMode &= ~PKT_ALIAS_REVERSE;
+ iresult = LibAliasInLocked(la, ptr, maxpacketsize);
+ la->packetAliasMode |= PKT_ALIAS_REVERSE;
+ goto getout;
+ }
+ HouseKeeping(la);
+ ClearCheckNewLink(la);
+ pip = (struct ip *)ptr;
+
+ /* Defense against mangled packets */
+ if (ntohs(pip->ip_len) > maxpacketsize
+ || (pip->ip_hl << 2) > maxpacketsize) {
+ iresult = PKT_ALIAS_IGNORED;
+ goto getout;
+ }
+
+ addr_save = GetDefaultAliasAddress(la);
+ if (la->packetAliasMode & PKT_ALIAS_UNREGISTERED_ONLY) {
+ u_long addr;
+ int iclass;
+
+ iclass = 0;
+ addr = ntohl(pip->ip_src.s_addr);
+ if (addr >= UNREG_ADDR_C_LOWER && addr <= UNREG_ADDR_C_UPPER)
+ iclass = 3;
+ else if (addr >= UNREG_ADDR_B_LOWER && addr <= UNREG_ADDR_B_UPPER)
+ iclass = 2;
+ else if (addr >= UNREG_ADDR_A_LOWER && addr <= UNREG_ADDR_A_UPPER)
+ iclass = 1;
+
+ if (iclass == 0) {
+ SetDefaultAliasAddress(la, pip->ip_src);
+ }
+ } else if (la->packetAliasMode & PKT_ALIAS_PROXY_ONLY) {
+ SetDefaultAliasAddress(la, pip->ip_src);
+ }
+ iresult = PKT_ALIAS_IGNORED;
+ if ((ntohs(pip->ip_off) & IP_OFFMASK) == 0) {
+ switch (pip->ip_p) {
+ case IPPROTO_ICMP:
+ iresult = IcmpAliasOut(la, pip, create);
+ break;
+ case IPPROTO_UDP:
+ iresult = UdpAliasOut(la, pip, maxpacketsize, create);
+ break;
+ case IPPROTO_TCP:
+ iresult = TcpAliasOut(la, pip, maxpacketsize, create);
+ break;
+#ifdef _KERNEL
+ case IPPROTO_SCTP:
+ iresult = SctpAlias(la, pip, SN_TO_GLOBAL);
+ break;
+#endif
+ case IPPROTO_GRE: {
+ int error;
+ struct alias_data ad = {
+ .lnk = NULL,
+ .oaddr = NULL,
+ .aaddr = NULL,
+ .aport = NULL,
+ .sport = NULL,
+ .dport = NULL,
+ .maxpktsize = 0
+ };
+ /* Walk out chain. */
+ error = find_handler(OUT, IP, la, pip, &ad);
+ if (error == 0)
+ iresult = PKT_ALIAS_OK;
+ else
+ iresult = ProtoAliasOut(la, &pip->ip_src,
+ pip->ip_dst, pip->ip_p, &pip->ip_sum, create);
+ }
+ break;
+ default:
+ iresult = ProtoAliasOut(la, &pip->ip_src,
+ pip->ip_dst, pip->ip_p, &pip->ip_sum, create);
+ break;
+ }
+ } else {
+ iresult = FragmentOut(la, &pip->ip_src, &pip->ip_sum);
+ }
+
+ SetDefaultAliasAddress(la, addr_save);
+getout:
+ return (iresult);
+}
+
+int
+LibAliasUnaliasOut(struct libalias *la, char *ptr, /* valid IP packet */
+ int maxpacketsize /* for error checking */
+)
+{
+ struct ip *pip;
+ struct icmp *ic;
+ struct udphdr *ud;
+ struct tcphdr *tc;
+ struct alias_link *lnk;
+ int iresult = PKT_ALIAS_IGNORED;
+
+ LIBALIAS_LOCK(la);
+ pip = (struct ip *)ptr;
+
+ /* Defense against mangled packets */
+ if (ntohs(pip->ip_len) > maxpacketsize
+ || (pip->ip_hl << 2) > maxpacketsize)
+ goto getout;
+
+ ud = (struct udphdr *)ip_next(pip);
+ tc = (struct tcphdr *)ip_next(pip);
+ ic = (struct icmp *)ip_next(pip);
+
+ /* Find a link */
+ if (pip->ip_p == IPPROTO_UDP)
+ lnk = FindUdpTcpIn(la, pip->ip_dst, pip->ip_src,
+ ud->uh_dport, ud->uh_sport,
+ IPPROTO_UDP, 0);
+ else if (pip->ip_p == IPPROTO_TCP)
+ lnk = FindUdpTcpIn(la, pip->ip_dst, pip->ip_src,
+ tc->th_dport, tc->th_sport,
+ IPPROTO_TCP, 0);
+ else if (pip->ip_p == IPPROTO_ICMP)
+ lnk = FindIcmpIn(la, pip->ip_dst, pip->ip_src, ic->icmp_id, 0);
+ else
+ lnk = NULL;
+
+ /* Change it from an aliased packet to an unaliased packet */
+ if (lnk != NULL) {
+ if (pip->ip_p == IPPROTO_UDP || pip->ip_p == IPPROTO_TCP) {
+ int accumulate;
+ struct in_addr original_address;
+ u_short original_port;
+
+ original_address = GetOriginalAddress(lnk);
+ original_port = GetOriginalPort(lnk);
+
+ /* Adjust TCP/UDP checksum */
+ accumulate = twowords(&pip->ip_src);
+ accumulate -= twowords(&original_address);
+
+ if (pip->ip_p == IPPROTO_UDP) {
+ accumulate += ud->uh_sport;
+ accumulate -= original_port;
+ ADJUST_CHECKSUM(accumulate, ud->uh_sum);
+ } else {
+ accumulate += tc->th_sport;
+ accumulate -= original_port;
+ ADJUST_CHECKSUM(accumulate, tc->th_sum);
+ }
+
+ /* Adjust IP checksum */
+ DifferentialChecksum(&pip->ip_sum,
+ &original_address, &pip->ip_src, 2);
+
+ /* Un-alias source address and port number */
+ pip->ip_src = original_address;
+ if (pip->ip_p == IPPROTO_UDP)
+ ud->uh_sport = original_port;
+ else
+ tc->th_sport = original_port;
+
+ iresult = PKT_ALIAS_OK;
+
+ } else if (pip->ip_p == IPPROTO_ICMP) {
+
+ int accumulate;
+ struct in_addr original_address;
+ u_short original_id;
+
+ original_address = GetOriginalAddress(lnk);
+ original_id = GetOriginalPort(lnk);
+
+ /* Adjust ICMP checksum */
+ accumulate = twowords(&pip->ip_src);
+ accumulate -= twowords(&original_address);
+ accumulate += ic->icmp_id;
+ accumulate -= original_id;
+ ADJUST_CHECKSUM(accumulate, ic->icmp_cksum);
+
+ /* Adjust IP checksum */
+ DifferentialChecksum(&pip->ip_sum,
+ &original_address, &pip->ip_src, 2);
+
+ /* Un-alias source address and port number */
+ pip->ip_src = original_address;
+ ic->icmp_id = original_id;
+
+ iresult = PKT_ALIAS_OK;
+ }
+ }
+getout:
+ LIBALIAS_UNLOCK(la);
+ return (iresult);
+
+}
+
+#ifndef _KERNEL
+
+int
+LibAliasRefreshModules(void)
+{
+ char buf[256], conf[] = "/etc/libalias.conf";
+ FILE *fd;
+ int i, len;
+
+ fd = fopen(conf, "r");
+ if (fd == NULL)
+ err(1, "fopen(%s)", conf);
+
+ LibAliasUnLoadAllModule();
+
+ for (;;) {
+ fgets(buf, 256, fd);
+ if (feof(fd))
+ break;
+ len = strlen(buf);
+ if (len > 1) {
+ for (i = 0; i < len; i++)
+ if (!isspace(buf[i]))
+ break;
+ if (buf[i] == '#')
+ continue;
+ buf[len - 1] = '\0';
+ LibAliasLoadModule(buf);
+ }
+ }
+ fclose(fd);
+ return (0);
+}
+
+int
+LibAliasLoadModule(char *path)
+{
+ struct dll *t;
+ void *handle;
+ struct proto_handler *m;
+ const char *error;
+ moduledata_t *p;
+
+ handle = dlopen (path, RTLD_LAZY);
+ if (!handle) {
+ fprintf(stderr, "%s\n", dlerror());
+ return (EINVAL);
+ }
+
+ p = dlsym(handle, "alias_mod");
+ if ((error = dlerror()) != NULL) {
+ fprintf(stderr, "%s\n", dlerror());
+ return (EINVAL);
+ }
+
+ t = malloc(sizeof(struct dll));
+ if (t == NULL)
+ return (ENOMEM);
+ strncpy(t->name, p->name, DLL_LEN);
+ t->handle = handle;
+ if (attach_dll(t) == EEXIST) {
+ free(t);
+ fprintf(stderr, "dll conflict\n");
+ return (EEXIST);
+ }
+
+ m = dlsym(t->handle, "handlers");
+ if ((error = dlerror()) != NULL) {
+ fprintf(stderr, "%s\n", error);
+ return (EINVAL);
+ }
+
+ LibAliasAttachHandlers(m);
+ return (0);
+}
+
+int
+LibAliasUnLoadAllModule(void)
+{
+ struct dll *t;
+ struct proto_handler *p;
+
+ /* Unload all modules then reload everything. */
+ while ((p = first_handler()) != NULL) {
+ detach_handler(p);
+ }
+ while ((t = walk_dll_chain()) != NULL) {
+ dlclose(t->handle);
+ free(t);
+ }
+ return (1);
+}
+
+#endif
+
+#ifdef _KERNEL
+/*
+ * m_megapullup() - this function is a big hack.
+ * Thankfully, it's only used in ng_nat and ipfw+nat.
+ *
+ * It allocates an mbuf with cluster and copies the specified part of the chain
+ * into cluster, so that it is all contiguous and can be accessed via a plain
+ * (char *) pointer. This is required, because libalias doesn't know how to
+ * handle mbuf chains.
+ *
+ * On success, m_megapullup returns an mbuf (possibly with cluster) containing
+ * the input packet, on failure NULL. The input packet is always consumed.
+ */
+struct mbuf *
+m_megapullup(struct mbuf *m, int len) {
+ struct mbuf *mcl;
+
+ if (len > m->m_pkthdr.len)
+ goto bad;
+
+ /* Do not reallocate packet if it is sequentional,
+ * writable and has some extra space for expansion.
+ * XXX: Constant 100bytes is completely empirical. */
+#define RESERVE 100
+ if (m->m_next == NULL && M_WRITABLE(m) && M_TRAILINGSPACE(m) >= RESERVE)
+ return (m);
+
+ if (len <= MCLBYTES - RESERVE) {
+ mcl = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
+ } else if (len < MJUM16BYTES) {
+ int size;
+ if (len <= MJUMPAGESIZE - RESERVE) {
+ size = MJUMPAGESIZE;
+ } else if (len <= MJUM9BYTES - RESERVE) {
+ size = MJUM9BYTES;
+ } else {
+ size = MJUM16BYTES;
+ };
+ mcl = m_getjcl(M_DONTWAIT, MT_DATA, M_PKTHDR, size);
+ } else {
+ goto bad;
+ }
+ if (mcl == NULL)
+ goto bad;
+
+ m_move_pkthdr(mcl, m);
+ m_copydata(m, 0, len, mtod(mcl, caddr_t));
+ mcl->m_len = mcl->m_pkthdr.len = len;
+ m_freem(m);
+
+ return (mcl);
+bad:
+ m_freem(m);
+ return (NULL);
+}
+#endif
diff --git a/rtems/freebsd/netinet/libalias/alias.h b/rtems/freebsd/netinet/libalias/alias.h
new file mode 100644
index 00000000..b845811e
--- /dev/null
+++ b/rtems/freebsd/netinet/libalias/alias.h
@@ -0,0 +1,232 @@
+/* lint -save -library Flexelint comment for external headers */
+
+/*-
+ * Copyright (c) 2001 Charles Mott <cm@linktel.net>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+/*
+ * Alias.h defines the outside world interfaces for the packet aliasing
+ * software.
+ *
+ * This software is placed into the public domain with no restrictions on its
+ * distribution.
+ */
+
+#ifndef _ALIAS_HH_
+#define _ALIAS_HH_
+
+#include <rtems/freebsd/netinet/in_systm.h>
+#include <rtems/freebsd/netinet/in.h>
+#include <rtems/freebsd/netinet/ip.h>
+
+#define LIBALIAS_BUF_SIZE 128
+#ifdef _KERNEL
+/*
+ * The kernel version of libalias does not support these features.
+ */
+#define NO_FW_PUNCH
+#define NO_USE_SOCKETS
+#endif
+
+/*
+ * The external interface to libalias, the packet aliasing engine.
+ *
+ * There are two sets of functions:
+ *
+ * PacketAlias*() the old API which doesn't take an instance pointer
+ * and therefore can only have one packet engine at a time.
+ *
+ * LibAlias*() the new API which takes as first argument a pointer to
+ * the instance of the packet aliasing engine.
+ *
+ * The functions otherwise correspond to each other one for one, except
+ * for the LibAliasUnaliasOut()/PacketUnaliasOut() function which were
+ * were misnamed in the old API.
+ */
+
+/*
+ * The instance structure
+ */
+struct libalias;
+
+/*
+ * An anonymous structure, a pointer to which is returned from
+ * PacketAliasRedirectAddr(), PacketAliasRedirectPort() or
+ * PacketAliasRedirectProto(), passed to PacketAliasAddServer(),
+ * and freed by PacketAliasRedirectDelete().
+ */
+struct alias_link;
+
+/* Initialization and control functions. */
+struct libalias *LibAliasInit(struct libalias *);
+void LibAliasSetAddress(struct libalias *, struct in_addr _addr);
+void LibAliasSetFWBase(struct libalias *, unsigned int _base, unsigned int _num);
+void LibAliasSetSkinnyPort(struct libalias *, unsigned int _port);
+unsigned int
+ LibAliasSetMode(struct libalias *, unsigned int _flags, unsigned int _mask);
+void LibAliasUninit(struct libalias *);
+
+/* Packet Handling functions. */
+int LibAliasIn (struct libalias *, char *_ptr, int _maxpacketsize);
+int LibAliasOut(struct libalias *, char *_ptr, int _maxpacketsize);
+int LibAliasOutTry(struct libalias *, char *_ptr, int _maxpacketsize, int _create);
+int LibAliasUnaliasOut(struct libalias *, char *_ptr, int _maxpacketsize);
+
+/* Port and address redirection functions. */
+
+int
+LibAliasAddServer(struct libalias *, struct alias_link *_lnk,
+ struct in_addr _addr, unsigned short _port);
+struct alias_link *
+LibAliasRedirectAddr(struct libalias *, struct in_addr _src_addr,
+ struct in_addr _alias_addr);
+int LibAliasRedirectDynamic(struct libalias *, struct alias_link *_lnk);
+void LibAliasRedirectDelete(struct libalias *, struct alias_link *_lnk);
+struct alias_link *
+LibAliasRedirectPort(struct libalias *, struct in_addr _src_addr,
+ unsigned short _src_port, struct in_addr _dst_addr,
+ unsigned short _dst_port, struct in_addr _alias_addr,
+ unsigned short _alias_port, unsigned char _proto);
+struct alias_link *
+LibAliasRedirectProto(struct libalias *, struct in_addr _src_addr,
+ struct in_addr _dst_addr, struct in_addr _alias_addr,
+ unsigned char _proto);
+
+/* Fragment Handling functions. */
+void LibAliasFragmentIn(struct libalias *, char *_ptr, char *_ptr_fragment);
+char *LibAliasGetFragment(struct libalias *, char *_ptr);
+int LibAliasSaveFragment(struct libalias *, char *_ptr);
+
+/* Miscellaneous functions. */
+int LibAliasCheckNewLink(struct libalias *);
+unsigned short
+ LibAliasInternetChecksum(struct libalias *, unsigned short *_ptr, int _nbytes);
+void LibAliasSetTarget(struct libalias *, struct in_addr _target_addr);
+
+/* Transparent proxying routines. */
+int LibAliasProxyRule(struct libalias *, const char *_cmd);
+
+/* Module handling API */
+int LibAliasLoadModule(char *);
+int LibAliasUnLoadAllModule(void);
+int LibAliasRefreshModules(void);
+
+/* Mbuf helper function. */
+struct mbuf *m_megapullup(struct mbuf *, int);
+
+/*
+ * Mode flags and other constants.
+ */
+
+
+/* Mode flags, set using PacketAliasSetMode() */
+
+/*
+ * If PKT_ALIAS_LOG is set, a message will be printed to /var/log/alias.log
+ * every time a link is created or deleted. This is useful for debugging.
+ */
+#define PKT_ALIAS_LOG 0x01
+
+/*
+ * If PKT_ALIAS_DENY_INCOMING is set, then incoming connections (e.g. to ftp,
+ * telnet or web servers will be prevented by the aliasing mechanism.
+ */
+#define PKT_ALIAS_DENY_INCOMING 0x02
+
+/*
+ * If PKT_ALIAS_SAME_PORTS is set, packets will be attempted sent from the
+ * same port as they originated on. This allows e.g. rsh to work *99% of the
+ * time*, but _not_ 100% (it will be slightly flakey instead of not working
+ * at all). This mode bit is set by PacketAliasInit(), so it is a default
+ * mode of operation.
+ */
+#define PKT_ALIAS_SAME_PORTS 0x04
+
+/*
+ * If PKT_ALIAS_USE_SOCKETS is set, then when partially specified links (e.g.
+ * destination port and/or address is zero), the packet aliasing engine will
+ * attempt to allocate a socket for the aliasing port it chooses. This will
+ * avoid interference with the host machine. Fully specified links do not
+ * require this. This bit is set after a call to PacketAliasInit(), so it is
+ * a default mode of operation.
+ */
+#ifndef NO_USE_SOCKETS
+#define PKT_ALIAS_USE_SOCKETS 0x08
+#endif
+/*-
+ * If PKT_ALIAS_UNREGISTERED_ONLY is set, then only packets with
+ * unregistered source addresses will be aliased. Private
+ * addresses are those in the following ranges:
+ *
+ * 10.0.0.0 -> 10.255.255.255
+ * 172.16.0.0 -> 172.31.255.255
+ * 192.168.0.0 -> 192.168.255.255
+ */
+#define PKT_ALIAS_UNREGISTERED_ONLY 0x10
+
+/*
+ * If PKT_ALIAS_RESET_ON_ADDR_CHANGE is set, then the table of dynamic
+ * aliasing links will be reset whenever PacketAliasSetAddress() changes the
+ * default aliasing address. If the default aliasing address is left
+ * unchanged by this function call, then the table of dynamic aliasing links
+ * will be left intact. This bit is set after a call to PacketAliasInit().
+ */
+#define PKT_ALIAS_RESET_ON_ADDR_CHANGE 0x20
+
+#ifndef NO_FW_PUNCH
+/*
+ * If PKT_ALIAS_PUNCH_FW is set, active FTP and IRC DCC connections will
+ * create a 'hole' in the firewall to allow the transfers to work. The
+ * ipfw rule number that the hole is created with is controlled by
+ * PacketAliasSetFWBase(). The hole will be attached to that
+ * particular alias_link, so when the link goes away the hole is deleted.
+ */
+#define PKT_ALIAS_PUNCH_FW 0x100
+#endif
+
+/*
+ * If PKT_ALIAS_PROXY_ONLY is set, then NAT will be disabled and only
+ * transparent proxying is performed.
+ */
+#define PKT_ALIAS_PROXY_ONLY 0x40
+
+/*
+ * If PKT_ALIAS_REVERSE is set, the actions of PacketAliasIn() and
+ * PacketAliasOut() are reversed.
+ */
+#define PKT_ALIAS_REVERSE 0x80
+
+/* Function return codes. */
+#define PKT_ALIAS_ERROR -1
+#define PKT_ALIAS_OK 1
+#define PKT_ALIAS_IGNORED 2
+#define PKT_ALIAS_UNRESOLVED_FRAGMENT 3
+#define PKT_ALIAS_FOUND_HEADER_FRAGMENT 4
+
+#endif /* !_ALIAS_HH_ */
+
+/* lint -restore */
diff --git a/rtems/freebsd/netinet/libalias/alias_cuseeme.c b/rtems/freebsd/netinet/libalias/alias_cuseeme.c
new file mode 100644
index 00000000..f66318a8
--- /dev/null
+++ b/rtems/freebsd/netinet/libalias/alias_cuseeme.c
@@ -0,0 +1,230 @@
+#include <rtems/freebsd/machine/rtems-bsd-config.h>
+
+/*-
+ * Copyright (c) 1998 Brian Somers <brian@Awfulhak.org>
+ * with the aid of code written by
+ * Junichi SATOH <junichi@astec.co.jp> 1996, 1997.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <rtems/freebsd/sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#ifdef _KERNEL
+#include <rtems/freebsd/sys/param.h>
+#include <rtems/freebsd/sys/kernel.h>
+#include <rtems/freebsd/sys/module.h>
+#else
+#include <rtems/freebsd/errno.h>
+#include <rtems/freebsd/sys/types.h>
+#include <rtems/freebsd/stdio.h>
+#endif
+
+#include <rtems/freebsd/netinet/in_systm.h>
+#include <rtems/freebsd/netinet/in.h>
+#include <rtems/freebsd/netinet/ip.h>
+#include <rtems/freebsd/netinet/udp.h>
+
+#ifdef _KERNEL
+#include <rtems/freebsd/netinet/libalias/alias.h>
+#include <rtems/freebsd/netinet/libalias/alias_local.h>
+#include <rtems/freebsd/netinet/libalias/alias_mod.h>
+#else
+#include <rtems/freebsd/local/alias_local.h>
+#include <rtems/freebsd/local/alias_mod.h>
+#endif
+
+#define CUSEEME_PORT_NUMBER 7648
+
+static void
+AliasHandleCUSeeMeOut(struct libalias *la, struct ip *pip,
+ struct alias_link *lnk);
+
+static void
+AliasHandleCUSeeMeIn(struct libalias *la, struct ip *pip,
+ struct in_addr original_addr);
+
+static int
+fingerprint(struct libalias *la, struct alias_data *ah)
+{
+
+ if (ah->dport == NULL || ah->oaddr == NULL)
+ return (-1);
+ if (ntohs(*ah->dport) == CUSEEME_PORT_NUMBER)
+ return (0);
+ return (-1);
+}
+
+static int
+protohandlerin(struct libalias *la, struct ip *pip, struct alias_data *ah)
+{
+
+ AliasHandleCUSeeMeIn(la, pip, *ah->oaddr);
+ return (0);
+}
+
+static int
+protohandlerout(struct libalias *la, struct ip *pip, struct alias_data *ah)
+{
+
+ AliasHandleCUSeeMeOut(la, pip, ah->lnk);
+ return (0);
+}
+
+/* Kernel module definition. */
+struct proto_handler handlers[] = {
+ {
+ .pri = 120,
+ .dir = OUT,
+ .proto = UDP,
+ .fingerprint = &fingerprint,
+ .protohandler = &protohandlerout
+ },
+ {
+ .pri = 120,
+ .dir = IN,
+ .proto = UDP,
+ .fingerprint = &fingerprint,
+ .protohandler = &protohandlerin
+ },
+ { EOH }
+};
+
+static int
+mod_handler(module_t mod, int type, void *data)
+{
+ int error;
+
+ switch (type) {
+ case MOD_LOAD:
+ error = 0;
+ LibAliasAttachHandlers(handlers);
+ break;
+ case MOD_UNLOAD:
+ error = 0;
+ LibAliasDetachHandlers(handlers);
+ break;
+ default:
+ error = EINVAL;
+ }
+ return (error);
+}
+
+#ifdef _KERNEL
+static
+#endif
+moduledata_t
+alias_mod = {
+ "alias_cuseeme", mod_handler, NULL
+};
+
+#ifdef _KERNEL
+DECLARE_MODULE(alias_cuseeme, alias_mod, SI_SUB_DRIVERS, SI_ORDER_SECOND);
+MODULE_VERSION(alias_cuseeme, 1);
+MODULE_DEPEND(alias_cuseeme, libalias, 1, 1, 1);
+#endif
+
+/* CU-SeeMe Data Header */
+struct cu_header {
+ u_int16_t dest_family;
+ u_int16_t dest_port;
+ u_int32_t dest_addr;
+ int16_t family;
+ u_int16_t port;
+ u_int32_t addr;
+ u_int32_t seq;
+ u_int16_t msg;
+ u_int16_t data_type;
+ u_int16_t packet_len;
+};
+
+/* Open Continue Header */
+struct oc_header {
+ u_int16_t client_count; /* Number of client info structs */
+ u_int32_t seq_no;
+ char user_name [20];
+ char reserved [4]; /* flags, version stuff, etc */
+};
+
+/* client info structures */
+struct client_info {
+ u_int32_t address;/* Client address */
+ char reserved [8]; /* Flags, pruning bitfield, packet
+ * counts etc */
+};
+
+static void
+AliasHandleCUSeeMeOut(struct libalias *la, struct ip *pip, struct alias_link *lnk)
+{
+ struct udphdr *ud = ip_next(pip);
+
+ if (ntohs(ud->uh_ulen) - sizeof(struct udphdr) >= sizeof(struct cu_header)) {
+ struct cu_header *cu;
+ struct alias_link *cu_lnk;
+
+ cu = udp_next(ud);
+ if (cu->addr)
+ cu->addr = (u_int32_t) GetAliasAddress(lnk).s_addr;
+
+ cu_lnk = FindUdpTcpOut(la, pip->ip_src, GetDestAddress(lnk),
+ ud->uh_dport, 0, IPPROTO_UDP, 1);
+
+#ifndef NO_FW_PUNCH
+ if (cu_lnk)
+ PunchFWHole(cu_lnk);
+#endif
+ }
+}
+
+static void
+AliasHandleCUSeeMeIn(struct libalias *la, struct ip *pip, struct in_addr original_addr)
+{
+ struct in_addr alias_addr;
+ struct udphdr *ud;
+ struct cu_header *cu;
+ struct oc_header *oc;
+ struct client_info *ci;
+ char *end;
+ int i;
+
+ (void)la;
+ alias_addr.s_addr = pip->ip_dst.s_addr;
+ ud = ip_next(pip);
+ cu = udp_next(ud);
+ oc = (struct oc_header *)(cu + 1);
+ ci = (struct client_info *)(oc + 1);
+ end = (char *)ud + ntohs(ud->uh_ulen);
+
+ if ((char *)oc <= end) {
+ if (cu->dest_addr)
+ cu->dest_addr = (u_int32_t) original_addr.s_addr;
+ if (ntohs(cu->data_type) == 101)
+ /* Find and change our address */
+ for (i = 0; (char *)(ci + 1) <= end && i < oc->client_count; i++, ci++)
+ if (ci->address == (u_int32_t) alias_addr.s_addr) {
+ ci->address = (u_int32_t) original_addr.s_addr;
+ break;
+ }
+ }
+}
diff --git a/rtems/freebsd/netinet/libalias/alias_db.c b/rtems/freebsd/netinet/libalias/alias_db.c
new file mode 100644
index 00000000..bd72d55c
--- /dev/null
+++ b/rtems/freebsd/netinet/libalias/alias_db.c
@@ -0,0 +1,2940 @@
+#include <rtems/freebsd/machine/rtems-bsd-config.h>
+
+/*-
+ * Copyright (c) 2001 Charles Mott <cm@linktel.net>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <rtems/freebsd/sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+/*
+ Alias_db.c encapsulates all data structures used for storing
+ packet aliasing data. Other parts of the aliasing software
+ access data through functions provided in this file.
+
+ Data storage is based on the notion of a "link", which is
+ established for ICMP echo/reply packets, UDP datagrams and
+ TCP stream connections. A link stores the original source
+ and destination addresses. For UDP and TCP, it also stores
+ source and destination port numbers, as well as an alias
+ port number. Links are also used to store information about
+ fragments.
+
+ There is a facility for sweeping through and deleting old
+ links as new packets are sent through. A simple timeout is
+ used for ICMP and UDP links. TCP links are left alone unless
+ there is an incomplete connection, in which case the link
+ can be deleted after a certain amount of time.
+
+
+ Initial version: August, 1996 (cjm)
+
+ Version 1.4: September 16, 1996 (cjm)
+ Facility for handling incoming links added.
+
+ Version 1.6: September 18, 1996 (cjm)
+ ICMP data handling simplified.
+
+ Version 1.7: January 9, 1997 (cjm)
+ Fragment handling simplified.
+ Saves pointers for unresolved fragments.
+ Permits links for unspecified remote ports
+ or unspecified remote addresses.
+ Fixed bug which did not properly zero port
+ table entries after a link was deleted.
+ Cleaned up some obsolete comments.
+
+ Version 1.8: January 14, 1997 (cjm)
+ Fixed data type error in StartPoint().
+ (This error did not exist prior to v1.7
+ and was discovered and fixed by Ari Suutari)
+
+ Version 1.9: February 1, 1997
+ Optionally, connections initiated from packet aliasing host
+ machine will will not have their port number aliased unless it
+ conflicts with an aliasing port already being used. (cjm)
+
+ All options earlier being #ifdef'ed are now available through
+ a new interface, SetPacketAliasMode(). This allows run time
+ control (which is now available in PPP+pktAlias through the
+ 'alias' keyword). (ee)
+
+ Added ability to create an alias port without
+ either destination address or port specified.
+ port type = ALIAS_PORT_UNKNOWN_DEST_ALL (ee)
+
+ Removed K&R style function headers
+ and general cleanup. (ee)
+
+ Added packetAliasMode to replace compiler #defines's (ee)
+
+ Allocates sockets for partially specified
+ ports if ALIAS_USE_SOCKETS defined. (cjm)
+
+ Version 2.0: March, 1997
+ SetAliasAddress() will now clean up alias links
+ if the aliasing address is changed. (cjm)
+
+ PacketAliasPermanentLink() function added to support permanent
+ links. (J. Fortes suggested the need for this.)
+ Examples:
+
+ (192.168.0.1, port 23) <-> alias port 6002, unknown dest addr/port
+
+ (192.168.0.2, port 21) <-> alias port 3604, known dest addr
+ unknown dest port
+
+ These permanent links allow for incoming connections to
+ machines on the local network. They can be given with a
+ user-chosen amount of specificity, with increasing specificity
+ meaning more security. (cjm)
+
+ Quite a bit of rework to the basic engine. The portTable[]
+ array, which kept track of which ports were in use was replaced
+ by a table/linked list structure. (cjm)
+
+ SetExpire() function added. (cjm)
+
+ DeleteLink() no longer frees memory association with a pointer
+ to a fragment (this bug was first recognized by E. Eklund in
+ v1.9).
+
+ Version 2.1: May, 1997 (cjm)
+ Packet aliasing engine reworked so that it can handle
+ multiple external addresses rather than just a single
+ host address.
+
+ PacketAliasRedirectPort() and PacketAliasRedirectAddr()
+ added to the API. The first function is a more generalized
+ version of PacketAliasPermanentLink(). The second function
+ implements static network address translation.
+
+ Version 3.2: July, 2000 (salander and satoh)
+ Added FindNewPortGroup to get contiguous range of port values.
+
+ Added QueryUdpTcpIn and QueryUdpTcpOut to look for an aliasing
+ link but not actually add one.
+
+ Added FindRtspOut, which is closely derived from FindUdpTcpOut,
+ except that the alias port (from FindNewPortGroup) is provided
+ as input.
+
+ See HISTORY file for additional revisions.
+*/
+
+#ifdef _KERNEL
+#include <rtems/freebsd/machine/stdarg.h>
+#include <rtems/freebsd/sys/param.h>
+#include <rtems/freebsd/sys/kernel.h>
+#include <rtems/freebsd/sys/lock.h>
+#include <rtems/freebsd/sys/module.h>
+#include <rtems/freebsd/sys/rwlock.h>
+#include <rtems/freebsd/sys/syslog.h>
+#else
+#include <rtems/freebsd/stdarg.h>
+#include <rtems/freebsd/stdlib.h>
+#include <rtems/freebsd/stdio.h>
+#include <rtems/freebsd/sys/errno.h>
+#include <rtems/freebsd/sys/time.h>
+#include <rtems/freebsd/unistd.h>
+#endif
+
+#include <rtems/freebsd/sys/socket.h>
+#include <rtems/freebsd/netinet/tcp.h>
+
+#ifdef _KERNEL
+#include <rtems/freebsd/netinet/libalias/alias.h>
+#include <rtems/freebsd/netinet/libalias/alias_local.h>
+#include <rtems/freebsd/netinet/libalias/alias_mod.h>
+#include <rtems/freebsd/net/if.h>
+#else
+#include <rtems/freebsd/local/alias.h>
+#include <rtems/freebsd/local/alias_local.h>
+#include <rtems/freebsd/local/alias_mod.h>
+#endif
+
+static LIST_HEAD(, libalias) instancehead = LIST_HEAD_INITIALIZER(instancehead);
+
+
+/*
+ Constants (note: constants are also defined
+ near relevant functions or structs)
+*/
+
+/* Parameters used for cleanup of expired links */
+/* NOTE: ALIAS_CLEANUP_INTERVAL_SECS must be less then LINK_TABLE_OUT_SIZE */
+#define ALIAS_CLEANUP_INTERVAL_SECS 64
+#define ALIAS_CLEANUP_MAX_SPOKES (LINK_TABLE_OUT_SIZE/5)
+
+/* Timeouts (in seconds) for different link types */
+#define ICMP_EXPIRE_TIME 60
+#define UDP_EXPIRE_TIME 60
+#define PROTO_EXPIRE_TIME 60
+#define FRAGMENT_ID_EXPIRE_TIME 10
+#define FRAGMENT_PTR_EXPIRE_TIME 30
+
+/* TCP link expire time for different cases */
+/* When the link has been used and closed - minimal grace time to
+ allow ACKs and potential re-connect in FTP (XXX - is this allowed?) */
+#ifndef TCP_EXPIRE_DEAD
+#define TCP_EXPIRE_DEAD 10
+#endif
+
+/* When the link has been used and closed on one side - the other side
+ is allowed to still send data */
+#ifndef TCP_EXPIRE_SINGLEDEAD
+#define TCP_EXPIRE_SINGLEDEAD 90
+#endif
+
+/* When the link isn't yet up */
+#ifndef TCP_EXPIRE_INITIAL
+#define TCP_EXPIRE_INITIAL 300
+#endif
+
+/* When the link is up */
+#ifndef TCP_EXPIRE_CONNECTED
+#define TCP_EXPIRE_CONNECTED 86400
+#endif
+
+
+/* Dummy port number codes used for FindLinkIn/Out() and AddLink().
+ These constants can be anything except zero, which indicates an
+ unknown port number. */
+
+#define NO_DEST_PORT 1
+#define NO_SRC_PORT 1
+
+
+
+/* Data Structures
+
+ The fundamental data structure used in this program is
+ "struct alias_link". Whenever a TCP connection is made,
+ a UDP datagram is sent out, or an ICMP echo request is made,
+ a link record is made (if it has not already been created).
+ The link record is identified by the source address/port
+ and the destination address/port. In the case of an ICMP
+ echo request, the source port is treated as being equivalent
+ with the 16-bit ID number of the ICMP packet.
+
+ The link record also can store some auxiliary data. For
+ TCP connections that have had sequence and acknowledgment
+ modifications, data space is available to track these changes.
+ A state field is used to keep track in changes to the TCP
+ connection state. ID numbers of fragments can also be
+ stored in the auxiliary space. Pointers to unresolved
+ fragments can also be stored.
+
+ The link records support two independent chainings. Lookup
+ tables for input and out tables hold the initial pointers
+ the link chains. On input, the lookup table indexes on alias
+ port and link type. On output, the lookup table indexes on
+ source address, destination address, source port, destination
+ port and link type.
+*/
+
+struct ack_data_record { /* used to save changes to ACK/sequence
+ * numbers */
+ u_long ack_old;
+ u_long ack_new;
+ int delta;
+ int active;
+};
+
+struct tcp_state { /* Information about TCP connection */
+ int in; /* State for outside -> inside */
+ int out; /* State for inside -> outside */
+ int index; /* Index to ACK data array */
+ int ack_modified; /* Indicates whether ACK and
+ * sequence numbers */
+ /* been modified */
+};
+
+#define N_LINK_TCP_DATA 3 /* Number of distinct ACK number changes
+ * saved for a modified TCP stream */
+struct tcp_dat {
+ struct tcp_state state;
+ struct ack_data_record ack[N_LINK_TCP_DATA];
+ int fwhole; /* Which firewall record is used for this
+ * hole? */
+};
+
+struct server { /* LSNAT server pool (circular list) */
+ struct in_addr addr;
+ u_short port;
+ struct server *next;
+};
+
+struct alias_link { /* Main data structure */
+ struct libalias *la;
+ struct in_addr src_addr; /* Address and port information */
+ struct in_addr dst_addr;
+ struct in_addr alias_addr;
+ struct in_addr proxy_addr;
+ u_short src_port;
+ u_short dst_port;
+ u_short alias_port;
+ u_short proxy_port;
+ struct server *server;
+
+ int link_type; /* Type of link: TCP, UDP, ICMP,
+ * proto, frag */
+
+/* values for link_type */
+#define LINK_ICMP IPPROTO_ICMP
+#define LINK_UDP IPPROTO_UDP
+#define LINK_TCP IPPROTO_TCP
+#define LINK_FRAGMENT_ID (IPPROTO_MAX + 1)
+#define LINK_FRAGMENT_PTR (IPPROTO_MAX + 2)
+#define LINK_ADDR (IPPROTO_MAX + 3)
+#define LINK_PPTP (IPPROTO_MAX + 4)
+
+ int flags; /* indicates special characteristics */
+ int pflags; /* protocol-specific flags */
+
+/* flag bits */
+#define LINK_UNKNOWN_DEST_PORT 0x01
+#define LINK_UNKNOWN_DEST_ADDR 0x02
+#define LINK_PERMANENT 0x04
+#define LINK_PARTIALLY_SPECIFIED 0x03 /* logical-or of first two bits */
+#define LINK_UNFIREWALLED 0x08
+
+ int timestamp; /* Time link was last accessed */
+ int expire_time; /* Expire time for link */
+#ifndef NO_USE_SOCKETS
+ int sockfd; /* socket descriptor */
+#endif
+ LIST_ENTRY (alias_link) list_out; /* Linked list of
+ * pointers for */
+ LIST_ENTRY (alias_link) list_in; /* input and output
+ * lookup tables */
+
+ union { /* Auxiliary data */
+ char *frag_ptr;
+ struct in_addr frag_addr;
+ struct tcp_dat *tcp;
+ } data;
+};
+
+/* Clean up procedure. */
+static void finishoff(void);
+
+/* Kernel module definition. */
+#ifdef _KERNEL
+MALLOC_DEFINE(M_ALIAS, "libalias", "packet aliasing");
+
+MODULE_VERSION(libalias, 1);
+
+static int
+alias_mod_handler(module_t mod, int type, void *data)
+{
+ int error;
+
+ switch (type) {
+ case MOD_LOAD:
+ error = 0;
+ handler_chain_init();
+ break;
+ case MOD_QUIESCE:
+ case MOD_UNLOAD:
+ handler_chain_destroy();
+ finishoff();
+ error = 0;
+ break;
+ default:
+ error = EINVAL;
+ }
+
+ return (error);
+}
+
+static moduledata_t alias_mod = {
+ "alias", alias_mod_handler, NULL
+};
+
+DECLARE_MODULE(alias, alias_mod, SI_SUB_DRIVERS, SI_ORDER_SECOND);
+#endif
+
+/* Internal utility routines (used only in alias_db.c)
+
+Lookup table starting points:
+ StartPointIn() -- link table initial search point for
+ incoming packets
+ StartPointOut() -- link table initial search point for
+ outgoing packets
+
+Miscellaneous:
+ SeqDiff() -- difference between two TCP sequences
+ ShowAliasStats() -- send alias statistics to a monitor file
+*/
+
+
+/* Local prototypes */
+static u_int StartPointIn(struct in_addr, u_short, int);
+
+static u_int
+StartPointOut(struct in_addr, struct in_addr,
+ u_short, u_short, int);
+
+static int SeqDiff(u_long, u_long);
+
+#ifndef NO_FW_PUNCH
+/* Firewall control */
+static void InitPunchFW(struct libalias *);
+static void UninitPunchFW(struct libalias *);
+static void ClearFWHole(struct alias_link *);
+
+#endif
+
+/* Log file control */
+static void ShowAliasStats(struct libalias *);
+static int InitPacketAliasLog(struct libalias *);
+static void UninitPacketAliasLog(struct libalias *);
+
+void SctpShowAliasStats(struct libalias *la);
+
+static u_int
+StartPointIn(struct in_addr alias_addr,
+ u_short alias_port,
+ int link_type)
+{
+ u_int n;
+
+ n = alias_addr.s_addr;
+ if (link_type != LINK_PPTP)
+ n += alias_port;
+ n += link_type;
+ return (n % LINK_TABLE_IN_SIZE);
+}
+
+
+static u_int
+StartPointOut(struct in_addr src_addr, struct in_addr dst_addr,
+ u_short src_port, u_short dst_port, int link_type)
+{
+ u_int n;
+
+ n = src_addr.s_addr;
+ n += dst_addr.s_addr;
+ if (link_type != LINK_PPTP) {
+ n += src_port;
+ n += dst_port;
+ }
+ n += link_type;
+
+ return (n % LINK_TABLE_OUT_SIZE);
+}
+
+
+static int
+SeqDiff(u_long x, u_long y)
+{
+/* Return the difference between two TCP sequence numbers */
+
+/*
+ This function is encapsulated in case there are any unusual
+ arithmetic conditions that need to be considered.
+*/
+
+ return (ntohl(y) - ntohl(x));
+}
+
+#ifdef _KERNEL
+
+static void
+AliasLog(char *str, const char *format, ...)
+{
+ va_list ap;
+
+ va_start(ap, format);
+ vsnprintf(str, LIBALIAS_BUF_SIZE, format, ap);
+ va_end(ap);
+}
+#else
+static void
+AliasLog(FILE *stream, const char *format, ...)
+{
+ va_list ap;
+
+ va_start(ap, format);
+ vfprintf(stream, format, ap);
+ va_end(ap);
+ fflush(stream);
+}
+#endif
+
+static void
+ShowAliasStats(struct libalias *la)
+{
+
+ LIBALIAS_LOCK_ASSERT(la);
+/* Used for debugging */
+ if (la->logDesc) {
+ int tot = la->icmpLinkCount + la->udpLinkCount +
+ (la->sctpLinkCount>>1) + /* sctp counts half associations */
+ la->tcpLinkCount + la->pptpLinkCount +
+ la->protoLinkCount + la->fragmentIdLinkCount +
+ la->fragmentPtrLinkCount;
+
+ AliasLog(la->logDesc,
+ "icmp=%u, udp=%u, tcp=%u, sctp=%u, pptp=%u, proto=%u, frag_id=%u frag_ptr=%u / tot=%u",
+ la->icmpLinkCount,
+ la->udpLinkCount,
+ la->tcpLinkCount,
+ la->sctpLinkCount>>1, /* sctp counts half associations */
+ la->pptpLinkCount,
+ la->protoLinkCount,
+ la->fragmentIdLinkCount,
+ la->fragmentPtrLinkCount, tot);
+#ifndef _KERNEL
+ AliasLog(la->logDesc, " (sock=%u)\n", la->sockCount);
+#endif
+ }
+}
+
+void SctpShowAliasStats(struct libalias *la)
+{
+
+ ShowAliasStats(la);
+}
+
+
+/* Internal routines for finding, deleting and adding links
+
+Port Allocation:
+ GetNewPort() -- find and reserve new alias port number
+ GetSocket() -- try to allocate a socket for a given port
+
+Link creation and deletion:
+ CleanupAliasData() - remove all link chains from lookup table
+ IncrementalCleanup() - look for stale links in a single chain
+ DeleteLink() - remove link
+ AddLink() - add link
+ ReLink() - change link
+
+Link search:
+ FindLinkOut() - find link for outgoing packets
+ FindLinkIn() - find link for incoming packets
+
+Port search:
+ FindNewPortGroup() - find an available group of ports
+*/
+
+/* Local prototypes */
+static int GetNewPort(struct libalias *, struct alias_link *, int);
+#ifndef NO_USE_SOCKETS
+static u_short GetSocket(struct libalias *, u_short, int *, int);
+#endif
+static void CleanupAliasData(struct libalias *);
+
+static void IncrementalCleanup(struct libalias *);
+
+static void DeleteLink(struct alias_link *);
+
+static struct alias_link *
+AddLink(struct libalias *, struct in_addr, struct in_addr, struct in_addr,
+ u_short, u_short, int, int);
+
+static struct alias_link *
+ReLink(struct alias_link *,
+ struct in_addr, struct in_addr, struct in_addr,
+ u_short, u_short, int, int);
+
+static struct alias_link *
+ FindLinkOut (struct libalias *, struct in_addr, struct in_addr, u_short, u_short, int, int);
+
+static struct alias_link *
+ FindLinkIn (struct libalias *, struct in_addr, struct in_addr, u_short, u_short, int, int);
+
+
+#define ALIAS_PORT_BASE 0x08000
+#define ALIAS_PORT_MASK 0x07fff
+#define ALIAS_PORT_MASK_EVEN 0x07ffe
+#define GET_NEW_PORT_MAX_ATTEMPTS 20
+
+#define GET_ALIAS_PORT -1
+#define GET_ALIAS_ID GET_ALIAS_PORT
+
+#define FIND_EVEN_ALIAS_BASE 1
+
+/* GetNewPort() allocates port numbers. Note that if a port number
+ is already in use, that does not mean that it cannot be used by
+ another link concurrently. This is because GetNewPort() looks for
+ unused triplets: (dest addr, dest port, alias port). */
+
+static int
+GetNewPort(struct libalias *la, struct alias_link *lnk, int alias_port_param)
+{
+ int i;
+ int max_trials;
+ u_short port_sys;
+ u_short port_net;
+
+ LIBALIAS_LOCK_ASSERT(la);
+/*
+ Description of alias_port_param for GetNewPort(). When
+ this parameter is zero or positive, it precisely specifies
+ the port number. GetNewPort() will return this number
+ without check that it is in use.
+
+ When this parameter is GET_ALIAS_PORT, it indicates to get a randomly
+ selected port number.
+*/
+
+ if (alias_port_param == GET_ALIAS_PORT) {
+ /*
+ * The aliasing port is automatically selected by one of
+ * two methods below:
+ */
+ max_trials = GET_NEW_PORT_MAX_ATTEMPTS;
+
+ if (la->packetAliasMode & PKT_ALIAS_SAME_PORTS) {
+ /*
+ * When the PKT_ALIAS_SAME_PORTS option is chosen,
+ * the first try will be the actual source port. If
+ * this is already in use, the remainder of the
+ * trials will be random.
+ */
+ port_net = lnk->src_port;
+ port_sys = ntohs(port_net);
+ } else {
+ /* First trial and all subsequent are random. */
+ port_sys = arc4random() & ALIAS_PORT_MASK;
+ port_sys += ALIAS_PORT_BASE;
+ port_net = htons(port_sys);
+ }
+ } else if (alias_port_param >= 0 && alias_port_param < 0x10000) {
+ lnk->alias_port = (u_short) alias_port_param;
+ return (0);
+ } else {
+#ifdef LIBALIAS_DEBUG
+ fprintf(stderr, "PacketAlias/GetNewPort(): ");
+ fprintf(stderr, "input parameter error\n");
+#endif
+ return (-1);
+ }
+
+
+/* Port number search */
+ for (i = 0; i < max_trials; i++) {
+ int go_ahead;
+ struct alias_link *search_result;
+
+ search_result = FindLinkIn(la, lnk->dst_addr, lnk->alias_addr,
+ lnk->dst_port, port_net,
+ lnk->link_type, 0);
+
+ if (search_result == NULL)
+ go_ahead = 1;
+ else if (!(lnk->flags & LINK_PARTIALLY_SPECIFIED)
+ && (search_result->flags & LINK_PARTIALLY_SPECIFIED))
+ go_ahead = 1;
+ else
+ go_ahead = 0;
+
+ if (go_ahead) {
+#ifndef NO_USE_SOCKETS
+ if ((la->packetAliasMode & PKT_ALIAS_USE_SOCKETS)
+ && (lnk->flags & LINK_PARTIALLY_SPECIFIED)
+ && ((lnk->link_type == LINK_TCP) ||
+ (lnk->link_type == LINK_UDP))) {
+ if (GetSocket(la, port_net, &lnk->sockfd, lnk->link_type)) {
+ lnk->alias_port = port_net;
+ return (0);
+ }
+ } else {
+#endif
+ lnk->alias_port = port_net;
+ return (0);
+#ifndef NO_USE_SOCKETS
+ }
+#endif
+ }
+ port_sys = arc4random() & ALIAS_PORT_MASK;
+ port_sys += ALIAS_PORT_BASE;
+ port_net = htons(port_sys);
+ }
+
+#ifdef LIBALIAS_DEBUG
+ fprintf(stderr, "PacketAlias/GetnewPort(): ");
+ fprintf(stderr, "could not find free port\n");
+#endif
+
+ return (-1);
+}
+
+#ifndef NO_USE_SOCKETS
+static u_short
+GetSocket(struct libalias *la, u_short port_net, int *sockfd, int link_type)
+{
+ int err;
+ int sock;
+ struct sockaddr_in sock_addr;
+
+ LIBALIAS_LOCK_ASSERT(la);
+ if (link_type == LINK_TCP)
+ sock = socket(AF_INET, SOCK_STREAM, 0);
+ else if (link_type == LINK_UDP)
+ sock = socket(AF_INET, SOCK_DGRAM, 0);
+ else {
+#ifdef LIBALIAS_DEBUG
+ fprintf(stderr, "PacketAlias/GetSocket(): ");
+ fprintf(stderr, "incorrect link type\n");
+#endif
+ return (0);
+ }
+
+ if (sock < 0) {
+#ifdef LIBALIAS_DEBUG
+ fprintf(stderr, "PacketAlias/GetSocket(): ");
+ fprintf(stderr, "socket() error %d\n", *sockfd);
+#endif
+ return (0);
+ }
+ sock_addr.sin_family = AF_INET;
+ sock_addr.sin_addr.s_addr = htonl(INADDR_ANY);
+ sock_addr.sin_port = port_net;
+
+ err = bind(sock,
+ (struct sockaddr *)&sock_addr,
+ sizeof(sock_addr));
+ if (err == 0) {
+ la->sockCount++;
+ *sockfd = sock;
+ return (1);
+ } else {
+ close(sock);
+ return (0);
+ }
+}
+#endif
+
+/* FindNewPortGroup() returns a base port number for an available
+ range of contiguous port numbers. Note that if a port number
+ is already in use, that does not mean that it cannot be used by
+ another link concurrently. This is because FindNewPortGroup()
+ looks for unused triplets: (dest addr, dest port, alias port). */
+
+int
+FindNewPortGroup(struct libalias *la,
+ struct in_addr dst_addr,
+ struct in_addr alias_addr,
+ u_short src_port,
+ u_short dst_port,
+ u_short port_count,
+ u_char proto,
+ u_char align)
+{
+ int i, j;
+ int max_trials;
+ u_short port_sys;
+ int link_type;
+
+ LIBALIAS_LOCK_ASSERT(la);
+ /*
+ * Get link_type from protocol
+ */
+
+ switch (proto) {
+ case IPPROTO_UDP:
+ link_type = LINK_UDP;
+ break;
+ case IPPROTO_TCP:
+ link_type = LINK_TCP;
+ break;
+ default:
+ return (0);
+ break;
+ }
+
+ /*
+ * The aliasing port is automatically selected by one of two
+ * methods below:
+ */
+ max_trials = GET_NEW_PORT_MAX_ATTEMPTS;
+
+ if (la->packetAliasMode & PKT_ALIAS_SAME_PORTS) {
+ /*
+ * When the ALIAS_SAME_PORTS option is chosen, the first
+ * try will be the actual source port. If this is already
+ * in use, the remainder of the trials will be random.
+ */
+ port_sys = ntohs(src_port);
+
+ } else {
+
+ /* First trial and all subsequent are random. */
+ if (align == FIND_EVEN_ALIAS_BASE)
+ port_sys = arc4random() & ALIAS_PORT_MASK_EVEN;
+ else
+ port_sys = arc4random() & ALIAS_PORT_MASK;
+
+ port_sys += ALIAS_PORT_BASE;
+ }
+
+/* Port number search */
+ for (i = 0; i < max_trials; i++) {
+
+ struct alias_link *search_result;
+
+ for (j = 0; j < port_count; j++)
+ if (0 != (search_result = FindLinkIn(la, dst_addr, alias_addr,
+ dst_port, htons(port_sys + j),
+ link_type, 0)))
+ break;
+
+ /* Found a good range, return base */
+ if (j == port_count)
+ return (htons(port_sys));
+
+ /* Find a new base to try */
+ if (align == FIND_EVEN_ALIAS_BASE)
+ port_sys = arc4random() & ALIAS_PORT_MASK_EVEN;
+ else
+ port_sys = arc4random() & ALIAS_PORT_MASK;
+
+ port_sys += ALIAS_PORT_BASE;
+ }
+
+#ifdef LIBALIAS_DEBUG
+ fprintf(stderr, "PacketAlias/FindNewPortGroup(): ");
+ fprintf(stderr, "could not find free port(s)\n");
+#endif
+
+ return (0);
+}
+
+static void
+CleanupAliasData(struct libalias *la)
+{
+ struct alias_link *lnk;
+ int i;
+
+ LIBALIAS_LOCK_ASSERT(la);
+ for (i = 0; i < LINK_TABLE_OUT_SIZE; i++) {
+ lnk = LIST_FIRST(&la->linkTableOut[i]);
+ while (lnk != NULL) {
+ struct alias_link *link_next = LIST_NEXT(lnk, list_out);
+ DeleteLink(lnk);
+ lnk = link_next;
+ }
+ }
+
+ la->cleanupIndex = 0;
+}
+
+
+static void
+IncrementalCleanup(struct libalias *la)
+{
+ struct alias_link *lnk, *lnk_tmp;
+
+ LIBALIAS_LOCK_ASSERT(la);
+ LIST_FOREACH_SAFE(lnk, &la->linkTableOut[la->cleanupIndex++],
+ list_out, lnk_tmp) {
+ if (la->timeStamp - lnk->timestamp > lnk->expire_time)
+ DeleteLink(lnk);
+ }
+
+ if (la->cleanupIndex == LINK_TABLE_OUT_SIZE)
+ la->cleanupIndex = 0;
+}
+
+static void
+DeleteLink(struct alias_link *lnk)
+{
+ struct libalias *la = lnk->la;
+
+ LIBALIAS_LOCK_ASSERT(la);
+/* Don't do anything if the link is marked permanent */
+ if (la->deleteAllLinks == 0 && lnk->flags & LINK_PERMANENT)
+ return;
+
+#ifndef NO_FW_PUNCH
+/* Delete associated firewall hole, if any */
+ ClearFWHole(lnk);
+#endif
+
+/* Free memory allocated for LSNAT server pool */
+ if (lnk->server != NULL) {
+ struct server *head, *curr, *next;
+
+ head = curr = lnk->server;
+ do {
+ next = curr->next;
+ free(curr);
+ } while ((curr = next) != head);
+ }
+/* Adjust output table pointers */
+ LIST_REMOVE(lnk, list_out);
+
+/* Adjust input table pointers */
+ LIST_REMOVE(lnk, list_in);
+#ifndef NO_USE_SOCKETS
+/* Close socket, if one has been allocated */
+ if (lnk->sockfd != -1) {
+ la->sockCount--;
+ close(lnk->sockfd);
+ }
+#endif
+/* Link-type dependent cleanup */
+ switch (lnk->link_type) {
+ case LINK_ICMP:
+ la->icmpLinkCount--;
+ break;
+ case LINK_UDP:
+ la->udpLinkCount--;
+ break;
+ case LINK_TCP:
+ la->tcpLinkCount--;
+ free(lnk->data.tcp);
+ break;
+ case LINK_PPTP:
+ la->pptpLinkCount--;
+ break;
+ case LINK_FRAGMENT_ID:
+ la->fragmentIdLinkCount--;
+ break;
+ case LINK_FRAGMENT_PTR:
+ la->fragmentPtrLinkCount--;
+ if (lnk->data.frag_ptr != NULL)
+ free(lnk->data.frag_ptr);
+ break;
+ case LINK_ADDR:
+ break;
+ default:
+ la->protoLinkCount--;
+ break;
+ }
+
+/* Free memory */
+ free(lnk);
+
+/* Write statistics, if logging enabled */
+ if (la->packetAliasMode & PKT_ALIAS_LOG) {
+ ShowAliasStats(la);
+ }
+}
+
+
+static struct alias_link *
+AddLink(struct libalias *la, struct in_addr src_addr,
+ struct in_addr dst_addr,
+ struct in_addr alias_addr,
+ u_short src_port,
+ u_short dst_port,
+ int alias_port_param, /* if less than zero, alias */
+ int link_type)
+{ /* port will be automatically *//* chosen.
+ * If greater than */
+ u_int start_point; /* zero, equal to alias port */
+ struct alias_link *lnk;
+
+ LIBALIAS_LOCK_ASSERT(la);
+ lnk = malloc(sizeof(struct alias_link));
+ if (lnk != NULL) {
+ /* Basic initialization */
+ lnk->la = la;
+ lnk->src_addr = src_addr;
+ lnk->dst_addr = dst_addr;
+ lnk->alias_addr = alias_addr;
+ lnk->proxy_addr.s_addr = INADDR_ANY;
+ lnk->src_port = src_port;
+ lnk->dst_port = dst_port;
+ lnk->proxy_port = 0;
+ lnk->server = NULL;
+ lnk->link_type = link_type;
+#ifndef NO_USE_SOCKETS
+ lnk->sockfd = -1;
+#endif
+ lnk->flags = 0;
+ lnk->pflags = 0;
+ lnk->timestamp = la->timeStamp;
+
+ /* Expiration time */
+ switch (link_type) {
+ case LINK_ICMP:
+ lnk->expire_time = ICMP_EXPIRE_TIME;
+ break;
+ case LINK_UDP:
+ lnk->expire_time = UDP_EXPIRE_TIME;
+ break;
+ case LINK_TCP:
+ lnk->expire_time = TCP_EXPIRE_INITIAL;
+ break;
+ case LINK_PPTP:
+ lnk->flags |= LINK_PERMANENT; /* no timeout. */
+ break;
+ case LINK_FRAGMENT_ID:
+ lnk->expire_time = FRAGMENT_ID_EXPIRE_TIME;
+ break;
+ case LINK_FRAGMENT_PTR:
+ lnk->expire_time = FRAGMENT_PTR_EXPIRE_TIME;
+ break;
+ case LINK_ADDR:
+ break;
+ default:
+ lnk->expire_time = PROTO_EXPIRE_TIME;
+ break;
+ }
+
+ /* Determine alias flags */
+ if (dst_addr.s_addr == INADDR_ANY)
+ lnk->flags |= LINK_UNKNOWN_DEST_ADDR;
+ if (dst_port == 0)
+ lnk->flags |= LINK_UNKNOWN_DEST_PORT;
+
+ /* Determine alias port */
+ if (GetNewPort(la, lnk, alias_port_param) != 0) {
+ free(lnk);
+ return (NULL);
+ }
+ /* Link-type dependent initialization */
+ switch (link_type) {
+ struct tcp_dat *aux_tcp;
+
+ case LINK_ICMP:
+ la->icmpLinkCount++;
+ break;
+ case LINK_UDP:
+ la->udpLinkCount++;
+ break;
+ case LINK_TCP:
+ aux_tcp = malloc(sizeof(struct tcp_dat));
+ if (aux_tcp != NULL) {
+ int i;
+
+ la->tcpLinkCount++;
+ aux_tcp->state.in = ALIAS_TCP_STATE_NOT_CONNECTED;
+ aux_tcp->state.out = ALIAS_TCP_STATE_NOT_CONNECTED;
+ aux_tcp->state.index = 0;
+ aux_tcp->state.ack_modified = 0;
+ for (i = 0; i < N_LINK_TCP_DATA; i++)
+ aux_tcp->ack[i].active = 0;
+ aux_tcp->fwhole = -1;
+ lnk->data.tcp = aux_tcp;
+ } else {
+#ifdef LIBALIAS_DEBUG
+ fprintf(stderr, "PacketAlias/AddLink: ");
+ fprintf(stderr, " cannot allocate auxiliary TCP data\n");
+#endif
+ free(lnk);
+ return (NULL);
+ }
+ break;
+ case LINK_PPTP:
+ la->pptpLinkCount++;
+ break;
+ case LINK_FRAGMENT_ID:
+ la->fragmentIdLinkCount++;
+ break;
+ case LINK_FRAGMENT_PTR:
+ la->fragmentPtrLinkCount++;
+ break;
+ case LINK_ADDR:
+ break;
+ default:
+ la->protoLinkCount++;
+ break;
+ }
+
+ /* Set up pointers for output lookup table */
+ start_point = StartPointOut(src_addr, dst_addr,
+ src_port, dst_port, link_type);
+ LIST_INSERT_HEAD(&la->linkTableOut[start_point], lnk, list_out);
+
+ /* Set up pointers for input lookup table */
+ start_point = StartPointIn(alias_addr, lnk->alias_port, link_type);
+ LIST_INSERT_HEAD(&la->linkTableIn[start_point], lnk, list_in);
+ } else {
+#ifdef LIBALIAS_DEBUG
+ fprintf(stderr, "PacketAlias/AddLink(): ");
+ fprintf(stderr, "malloc() call failed.\n");
+#endif
+ }
+ if (la->packetAliasMode & PKT_ALIAS_LOG) {
+ ShowAliasStats(la);
+ }
+ return (lnk);
+}
+
+static struct alias_link *
+ReLink(struct alias_link *old_lnk,
+ struct in_addr src_addr,
+ struct in_addr dst_addr,
+ struct in_addr alias_addr,
+ u_short src_port,
+ u_short dst_port,
+ int alias_port_param, /* if less than zero, alias */
+ int link_type)
+{ /* port will be automatically *//* chosen.
+ * If greater than */
+ struct alias_link *new_lnk; /* zero, equal to alias port */
+ struct libalias *la = old_lnk->la;
+
+ LIBALIAS_LOCK_ASSERT(la);
+ new_lnk = AddLink(la, src_addr, dst_addr, alias_addr,
+ src_port, dst_port, alias_port_param,
+ link_type);
+#ifndef NO_FW_PUNCH
+ if (new_lnk != NULL &&
+ old_lnk->link_type == LINK_TCP &&
+ old_lnk->data.tcp->fwhole > 0) {
+ PunchFWHole(new_lnk);
+ }
+#endif
+ DeleteLink(old_lnk);
+ return (new_lnk);
+}
+
+static struct alias_link *
+_FindLinkOut(struct libalias *la, struct in_addr src_addr,
+ struct in_addr dst_addr,
+ u_short src_port,
+ u_short dst_port,
+ int link_type,
+ int replace_partial_links)
+{
+ u_int i;
+ struct alias_link *lnk;
+
+ LIBALIAS_LOCK_ASSERT(la);
+ i = StartPointOut(src_addr, dst_addr, src_port, dst_port, link_type);
+ LIST_FOREACH(lnk, &la->linkTableOut[i], list_out) {
+ if (lnk->dst_addr.s_addr == dst_addr.s_addr &&
+ lnk->src_addr.s_addr == src_addr.s_addr &&
+ lnk->src_port == src_port &&
+ lnk->dst_port == dst_port &&
+ lnk->link_type == link_type &&
+ lnk->server == NULL) {
+ lnk->timestamp = la->timeStamp;
+ break;
+ }
+ }
+
+/* Search for partially specified links. */
+ if (lnk == NULL && replace_partial_links) {
+ if (dst_port != 0 && dst_addr.s_addr != INADDR_ANY) {
+ lnk = _FindLinkOut(la, src_addr, dst_addr, src_port, 0,
+ link_type, 0);
+ if (lnk == NULL)
+ lnk = _FindLinkOut(la, src_addr, la->nullAddress, src_port,
+ dst_port, link_type, 0);
+ }
+ if (lnk == NULL &&
+ (dst_port != 0 || dst_addr.s_addr != INADDR_ANY)) {
+ lnk = _FindLinkOut(la, src_addr, la->nullAddress, src_port, 0,
+ link_type, 0);
+ }
+ if (lnk != NULL) {
+ lnk = ReLink(lnk,
+ src_addr, dst_addr, lnk->alias_addr,
+ src_port, dst_port, lnk->alias_port,
+ link_type);
+ }
+ }
+ return (lnk);
+}
+
+static struct alias_link *
+FindLinkOut(struct libalias *la, struct in_addr src_addr,
+ struct in_addr dst_addr,
+ u_short src_port,
+ u_short dst_port,
+ int link_type,
+ int replace_partial_links)
+{
+ struct alias_link *lnk;
+
+ LIBALIAS_LOCK_ASSERT(la);
+ lnk = _FindLinkOut(la, src_addr, dst_addr, src_port, dst_port,
+ link_type, replace_partial_links);
+
+ if (lnk == NULL) {
+ /*
+ * The following allows permanent links to be specified as
+ * using the default source address (i.e. device interface
+ * address) without knowing in advance what that address
+ * is.
+ */
+ if (la->aliasAddress.s_addr != INADDR_ANY &&
+ src_addr.s_addr == la->aliasAddress.s_addr) {
+ lnk = _FindLinkOut(la, la->nullAddress, dst_addr, src_port, dst_port,
+ link_type, replace_partial_links);
+ }
+ }
+ return (lnk);
+}
+
+
+static struct alias_link *
+_FindLinkIn(struct libalias *la, struct in_addr dst_addr,
+ struct in_addr alias_addr,
+ u_short dst_port,
+ u_short alias_port,
+ int link_type,
+ int replace_partial_links)
+{
+ int flags_in;
+ u_int start_point;
+ struct alias_link *lnk;
+ struct alias_link *lnk_fully_specified;
+ struct alias_link *lnk_unknown_all;
+ struct alias_link *lnk_unknown_dst_addr;
+ struct alias_link *lnk_unknown_dst_port;
+
+ LIBALIAS_LOCK_ASSERT(la);
+/* Initialize pointers */
+ lnk_fully_specified = NULL;
+ lnk_unknown_all = NULL;
+ lnk_unknown_dst_addr = NULL;
+ lnk_unknown_dst_port = NULL;
+
+/* If either the dest addr or port is unknown, the search
+ loop will have to know about this. */
+
+ flags_in = 0;
+ if (dst_addr.s_addr == INADDR_ANY)
+ flags_in |= LINK_UNKNOWN_DEST_ADDR;
+ if (dst_port == 0)
+ flags_in |= LINK_UNKNOWN_DEST_PORT;
+
+/* Search loop */
+ start_point = StartPointIn(alias_addr, alias_port, link_type);
+ LIST_FOREACH(lnk, &la->linkTableIn[start_point], list_in) {
+ int flags;
+
+ flags = flags_in | lnk->flags;
+ if (!(flags & LINK_PARTIALLY_SPECIFIED)) {
+ if (lnk->alias_addr.s_addr == alias_addr.s_addr
+ && lnk->alias_port == alias_port
+ && lnk->dst_addr.s_addr == dst_addr.s_addr
+ && lnk->dst_port == dst_port
+ && lnk->link_type == link_type) {
+ lnk_fully_specified = lnk;
+ break;
+ }
+ } else if ((flags & LINK_UNKNOWN_DEST_ADDR)
+ && (flags & LINK_UNKNOWN_DEST_PORT)) {
+ if (lnk->alias_addr.s_addr == alias_addr.s_addr
+ && lnk->alias_port == alias_port
+ && lnk->link_type == link_type) {
+ if (lnk_unknown_all == NULL)
+ lnk_unknown_all = lnk;
+ }
+ } else if (flags & LINK_UNKNOWN_DEST_ADDR) {
+ if (lnk->alias_addr.s_addr == alias_addr.s_addr
+ && lnk->alias_port == alias_port
+ && lnk->link_type == link_type
+ && lnk->dst_port == dst_port) {
+ if (lnk_unknown_dst_addr == NULL)
+ lnk_unknown_dst_addr = lnk;
+ }
+ } else if (flags & LINK_UNKNOWN_DEST_PORT) {
+ if (lnk->alias_addr.s_addr == alias_addr.s_addr
+ && lnk->alias_port == alias_port
+ && lnk->link_type == link_type
+ && lnk->dst_addr.s_addr == dst_addr.s_addr) {
+ if (lnk_unknown_dst_port == NULL)
+ lnk_unknown_dst_port = lnk;
+ }
+ }
+ }
+
+
+
+ if (lnk_fully_specified != NULL) {
+ lnk_fully_specified->timestamp = la->timeStamp;
+ lnk = lnk_fully_specified;
+ } else if (lnk_unknown_dst_port != NULL)
+ lnk = lnk_unknown_dst_port;
+ else if (lnk_unknown_dst_addr != NULL)
+ lnk = lnk_unknown_dst_addr;
+ else if (lnk_unknown_all != NULL)
+ lnk = lnk_unknown_all;
+ else
+ return (NULL);
+
+ if (replace_partial_links &&
+ (lnk->flags & LINK_PARTIALLY_SPECIFIED || lnk->server != NULL)) {
+ struct in_addr src_addr;
+ u_short src_port;
+
+ if (lnk->server != NULL) { /* LSNAT link */
+ src_addr = lnk->server->addr;
+ src_port = lnk->server->port;
+ lnk->server = lnk->server->next;
+ } else {
+ src_addr = lnk->src_addr;
+ src_port = lnk->src_port;
+ }
+
+ if (link_type == LINK_SCTP) {
+ lnk->src_addr = src_addr;
+ lnk->src_port = src_port;
+ return(lnk);
+ }
+ lnk = ReLink(lnk,
+ src_addr, dst_addr, alias_addr,
+ src_port, dst_port, alias_port,
+ link_type);
+ }
+ return (lnk);
+}
+
+static struct alias_link *
+FindLinkIn(struct libalias *la, struct in_addr dst_addr,
+ struct in_addr alias_addr,
+ u_short dst_port,
+ u_short alias_port,
+ int link_type,
+ int replace_partial_links)
+{
+ struct alias_link *lnk;
+
+ LIBALIAS_LOCK_ASSERT(la);
+ lnk = _FindLinkIn(la, dst_addr, alias_addr, dst_port, alias_port,
+ link_type, replace_partial_links);
+
+ if (lnk == NULL) {
+ /*
+ * The following allows permanent links to be specified as
+ * using the default aliasing address (i.e. device
+ * interface address) without knowing in advance what that
+ * address is.
+ */
+ if (la->aliasAddress.s_addr != INADDR_ANY &&
+ alias_addr.s_addr == la->aliasAddress.s_addr) {
+ lnk = _FindLinkIn(la, dst_addr, la->nullAddress, dst_port, alias_port,
+ link_type, replace_partial_links);
+ }
+ }
+ return (lnk);
+}
+
+
+
+
+/* External routines for finding/adding links
+
+-- "external" means outside alias_db.c, but within alias*.c --
+
+ FindIcmpIn(), FindIcmpOut()
+ FindFragmentIn1(), FindFragmentIn2()
+ AddFragmentPtrLink(), FindFragmentPtr()
+ FindProtoIn(), FindProtoOut()
+ FindUdpTcpIn(), FindUdpTcpOut()
+ AddPptp(), FindPptpOutByCallId(), FindPptpInByCallId(),
+ FindPptpOutByPeerCallId(), FindPptpInByPeerCallId()
+ FindOriginalAddress(), FindAliasAddress()
+
+(prototypes in alias_local.h)
+*/
+
+
+struct alias_link *
+FindIcmpIn(struct libalias *la, struct in_addr dst_addr,
+ struct in_addr alias_addr,
+ u_short id_alias,
+ int create)
+{
+ struct alias_link *lnk;
+
+ LIBALIAS_LOCK_ASSERT(la);
+ lnk = FindLinkIn(la, dst_addr, alias_addr,
+ NO_DEST_PORT, id_alias,
+ LINK_ICMP, 0);
+ if (lnk == NULL && create && !(la->packetAliasMode & PKT_ALIAS_DENY_INCOMING)) {
+ struct in_addr target_addr;
+
+ target_addr = FindOriginalAddress(la, alias_addr);
+ lnk = AddLink(la, target_addr, dst_addr, alias_addr,
+ id_alias, NO_DEST_PORT, id_alias,
+ LINK_ICMP);
+ }
+ return (lnk);
+}
+
+
+struct alias_link *
+FindIcmpOut(struct libalias *la, struct in_addr src_addr,
+ struct in_addr dst_addr,
+ u_short id,
+ int create)
+{
+ struct alias_link *lnk;
+
+ LIBALIAS_LOCK_ASSERT(la);
+ lnk = FindLinkOut(la, src_addr, dst_addr,
+ id, NO_DEST_PORT,
+ LINK_ICMP, 0);
+ if (lnk == NULL && create) {
+ struct in_addr alias_addr;
+
+ alias_addr = FindAliasAddress(la, src_addr);
+ lnk = AddLink(la, src_addr, dst_addr, alias_addr,
+ id, NO_DEST_PORT, GET_ALIAS_ID,
+ LINK_ICMP);
+ }
+ return (lnk);
+}
+
+
+struct alias_link *
+FindFragmentIn1(struct libalias *la, struct in_addr dst_addr,
+ struct in_addr alias_addr,
+ u_short ip_id)
+{
+ struct alias_link *lnk;
+
+ LIBALIAS_LOCK_ASSERT(la);
+ lnk = FindLinkIn(la, dst_addr, alias_addr,
+ NO_DEST_PORT, ip_id,
+ LINK_FRAGMENT_ID, 0);
+
+ if (lnk == NULL) {
+ lnk = AddLink(la, la->nullAddress, dst_addr, alias_addr,
+ NO_SRC_PORT, NO_DEST_PORT, ip_id,
+ LINK_FRAGMENT_ID);
+ }
+ return (lnk);
+}
+
+
+struct alias_link *
+FindFragmentIn2(struct libalias *la, struct in_addr dst_addr, /* Doesn't add a link if
+ * one */
+ struct in_addr alias_addr, /* is not found. */
+ u_short ip_id)
+{
+
+ LIBALIAS_LOCK_ASSERT(la);
+ return FindLinkIn(la, dst_addr, alias_addr,
+ NO_DEST_PORT, ip_id,
+ LINK_FRAGMENT_ID, 0);
+}
+
+
+struct alias_link *
+AddFragmentPtrLink(struct libalias *la, struct in_addr dst_addr,
+ u_short ip_id)
+{
+
+ LIBALIAS_LOCK_ASSERT(la);
+ return AddLink(la, la->nullAddress, dst_addr, la->nullAddress,
+ NO_SRC_PORT, NO_DEST_PORT, ip_id,
+ LINK_FRAGMENT_PTR);
+}
+
+
+struct alias_link *
+FindFragmentPtr(struct libalias *la, struct in_addr dst_addr,
+ u_short ip_id)
+{
+
+ LIBALIAS_LOCK_ASSERT(la);
+ return FindLinkIn(la, dst_addr, la->nullAddress,
+ NO_DEST_PORT, ip_id,
+ LINK_FRAGMENT_PTR, 0);
+}
+
+
+struct alias_link *
+FindProtoIn(struct libalias *la, struct in_addr dst_addr,
+ struct in_addr alias_addr,
+ u_char proto)
+{
+ struct alias_link *lnk;
+
+ LIBALIAS_LOCK_ASSERT(la);
+ lnk = FindLinkIn(la, dst_addr, alias_addr,
+ NO_DEST_PORT, 0,
+ proto, 1);
+
+ if (lnk == NULL && !(la->packetAliasMode & PKT_ALIAS_DENY_INCOMING)) {
+ struct in_addr target_addr;
+
+ target_addr = FindOriginalAddress(la, alias_addr);
+ lnk = AddLink(la, target_addr, dst_addr, alias_addr,
+ NO_SRC_PORT, NO_DEST_PORT, 0,
+ proto);
+ }
+ return (lnk);
+}
+
+
+struct alias_link *
+FindProtoOut(struct libalias *la, struct in_addr src_addr,
+ struct in_addr dst_addr,
+ u_char proto)
+{
+ struct alias_link *lnk;
+
+ LIBALIAS_LOCK_ASSERT(la);
+ lnk = FindLinkOut(la, src_addr, dst_addr,
+ NO_SRC_PORT, NO_DEST_PORT,
+ proto, 1);
+
+ if (lnk == NULL) {
+ struct in_addr alias_addr;
+
+ alias_addr = FindAliasAddress(la, src_addr);
+ lnk = AddLink(la, src_addr, dst_addr, alias_addr,
+ NO_SRC_PORT, NO_DEST_PORT, 0,
+ proto);
+ }
+ return (lnk);
+}
+
+
+struct alias_link *
+FindUdpTcpIn(struct libalias *la, struct in_addr dst_addr,
+ struct in_addr alias_addr,
+ u_short dst_port,
+ u_short alias_port,
+ u_char proto,
+ int create)
+{
+ int link_type;
+ struct alias_link *lnk;
+
+ LIBALIAS_LOCK_ASSERT(la);
+ switch (proto) {
+ case IPPROTO_UDP:
+ link_type = LINK_UDP;
+ break;
+ case IPPROTO_TCP:
+ link_type = LINK_TCP;
+ break;
+ default:
+ return (NULL);
+ break;
+ }
+
+ lnk = FindLinkIn(la, dst_addr, alias_addr,
+ dst_port, alias_port,
+ link_type, create);
+
+ if (lnk == NULL && create && !(la->packetAliasMode & PKT_ALIAS_DENY_INCOMING)) {
+ struct in_addr target_addr;
+
+ target_addr = FindOriginalAddress(la, alias_addr);
+ lnk = AddLink(la, target_addr, dst_addr, alias_addr,
+ alias_port, dst_port, alias_port,
+ link_type);
+ }
+ return (lnk);
+}
+
+
+struct alias_link *
+FindUdpTcpOut(struct libalias *la, struct in_addr src_addr,
+ struct in_addr dst_addr,
+ u_short src_port,
+ u_short dst_port,
+ u_char proto,
+ int create)
+{
+ int link_type;
+ struct alias_link *lnk;
+
+ LIBALIAS_LOCK_ASSERT(la);
+ switch (proto) {
+ case IPPROTO_UDP:
+ link_type = LINK_UDP;
+ break;
+ case IPPROTO_TCP:
+ link_type = LINK_TCP;
+ break;
+ default:
+ return (NULL);
+ break;
+ }
+
+ lnk = FindLinkOut(la, src_addr, dst_addr, src_port, dst_port, link_type, create);
+
+ if (lnk == NULL && create) {
+ struct in_addr alias_addr;
+
+ alias_addr = FindAliasAddress(la, src_addr);
+ lnk = AddLink(la, src_addr, dst_addr, alias_addr,
+ src_port, dst_port, GET_ALIAS_PORT,
+ link_type);
+ }
+ return (lnk);
+}
+
+
+struct alias_link *
+AddPptp(struct libalias *la, struct in_addr src_addr,
+ struct in_addr dst_addr,
+ struct in_addr alias_addr,
+ u_int16_t src_call_id)
+{
+ struct alias_link *lnk;
+
+ LIBALIAS_LOCK_ASSERT(la);
+ lnk = AddLink(la, src_addr, dst_addr, alias_addr,
+ src_call_id, 0, GET_ALIAS_PORT,
+ LINK_PPTP);
+
+ return (lnk);
+}
+
+
+struct alias_link *
+FindPptpOutByCallId(struct libalias *la, struct in_addr src_addr,
+ struct in_addr dst_addr,
+ u_int16_t src_call_id)
+{
+ u_int i;
+ struct alias_link *lnk;
+
+ LIBALIAS_LOCK_ASSERT(la);
+ i = StartPointOut(src_addr, dst_addr, 0, 0, LINK_PPTP);
+ LIST_FOREACH(lnk, &la->linkTableOut[i], list_out)
+ if (lnk->link_type == LINK_PPTP &&
+ lnk->src_addr.s_addr == src_addr.s_addr &&
+ lnk->dst_addr.s_addr == dst_addr.s_addr &&
+ lnk->src_port == src_call_id)
+ break;
+
+ return (lnk);
+}
+
+
+struct alias_link *
+FindPptpOutByPeerCallId(struct libalias *la, struct in_addr src_addr,
+ struct in_addr dst_addr,
+ u_int16_t dst_call_id)
+{
+ u_int i;
+ struct alias_link *lnk;
+
+ LIBALIAS_LOCK_ASSERT(la);
+ i = StartPointOut(src_addr, dst_addr, 0, 0, LINK_PPTP);
+ LIST_FOREACH(lnk, &la->linkTableOut[i], list_out)
+ if (lnk->link_type == LINK_PPTP &&
+ lnk->src_addr.s_addr == src_addr.s_addr &&
+ lnk->dst_addr.s_addr == dst_addr.s_addr &&
+ lnk->dst_port == dst_call_id)
+ break;
+
+ return (lnk);
+}
+
+
+struct alias_link *
+FindPptpInByCallId(struct libalias *la, struct in_addr dst_addr,
+ struct in_addr alias_addr,
+ u_int16_t dst_call_id)
+{
+ u_int i;
+ struct alias_link *lnk;
+
+ LIBALIAS_LOCK_ASSERT(la);
+ i = StartPointIn(alias_addr, 0, LINK_PPTP);
+ LIST_FOREACH(lnk, &la->linkTableIn[i], list_in)
+ if (lnk->link_type == LINK_PPTP &&
+ lnk->dst_addr.s_addr == dst_addr.s_addr &&
+ lnk->alias_addr.s_addr == alias_addr.s_addr &&
+ lnk->dst_port == dst_call_id)
+ break;
+
+ return (lnk);
+}
+
+
+struct alias_link *
+FindPptpInByPeerCallId(struct libalias *la, struct in_addr dst_addr,
+ struct in_addr alias_addr,
+ u_int16_t alias_call_id)
+{
+ struct alias_link *lnk;
+
+ LIBALIAS_LOCK_ASSERT(la);
+ lnk = FindLinkIn(la, dst_addr, alias_addr,
+ 0 /* any */ , alias_call_id,
+ LINK_PPTP, 0);
+
+
+ return (lnk);
+}
+
+
+struct alias_link *
+FindRtspOut(struct libalias *la, struct in_addr src_addr,
+ struct in_addr dst_addr,
+ u_short src_port,
+ u_short alias_port,
+ u_char proto)
+{
+ int link_type;
+ struct alias_link *lnk;
+
+ LIBALIAS_LOCK_ASSERT(la);
+ switch (proto) {
+ case IPPROTO_UDP:
+ link_type = LINK_UDP;
+ break;
+ case IPPROTO_TCP:
+ link_type = LINK_TCP;
+ break;
+ default:
+ return (NULL);
+ break;
+ }
+
+ lnk = FindLinkOut(la, src_addr, dst_addr, src_port, 0, link_type, 1);
+
+ if (lnk == NULL) {
+ struct in_addr alias_addr;
+
+ alias_addr = FindAliasAddress(la, src_addr);
+ lnk = AddLink(la, src_addr, dst_addr, alias_addr,
+ src_port, 0, alias_port,
+ link_type);
+ }
+ return (lnk);
+}
+
+
+struct in_addr
+FindOriginalAddress(struct libalias *la, struct in_addr alias_addr)
+{
+ struct alias_link *lnk;
+
+ LIBALIAS_LOCK_ASSERT(la);
+ lnk = FindLinkIn(la, la->nullAddress, alias_addr,
+ 0, 0, LINK_ADDR, 0);
+ if (lnk == NULL) {
+ la->newDefaultLink = 1;
+ if (la->targetAddress.s_addr == INADDR_ANY)
+ return (alias_addr);
+ else if (la->targetAddress.s_addr == INADDR_NONE)
+ return (la->aliasAddress.s_addr != INADDR_ANY) ?
+ la->aliasAddress : alias_addr;
+ else
+ return (la->targetAddress);
+ } else {
+ if (lnk->server != NULL) { /* LSNAT link */
+ struct in_addr src_addr;
+
+ src_addr = lnk->server->addr;
+ lnk->server = lnk->server->next;
+ return (src_addr);
+ } else if (lnk->src_addr.s_addr == INADDR_ANY)
+ return (la->aliasAddress.s_addr != INADDR_ANY) ?
+ la->aliasAddress : alias_addr;
+ else
+ return (lnk->src_addr);
+ }
+}
+
+
+struct in_addr
+FindAliasAddress(struct libalias *la, struct in_addr original_addr)
+{
+ struct alias_link *lnk;
+
+ LIBALIAS_LOCK_ASSERT(la);
+ lnk = FindLinkOut(la, original_addr, la->nullAddress,
+ 0, 0, LINK_ADDR, 0);
+ if (lnk == NULL) {
+ return (la->aliasAddress.s_addr != INADDR_ANY) ?
+ la->aliasAddress : original_addr;
+ } else {
+ if (lnk->alias_addr.s_addr == INADDR_ANY)
+ return (la->aliasAddress.s_addr != INADDR_ANY) ?
+ la->aliasAddress : original_addr;
+ else
+ return (lnk->alias_addr);
+ }
+}
+
+
+/* External routines for getting or changing link data
+ (external to alias_db.c, but internal to alias*.c)
+
+ SetFragmentData(), GetFragmentData()
+ SetFragmentPtr(), GetFragmentPtr()
+ SetStateIn(), SetStateOut(), GetStateIn(), GetStateOut()
+ GetOriginalAddress(), GetDestAddress(), GetAliasAddress()
+ GetOriginalPort(), GetAliasPort()
+ SetAckModified(), GetAckModified()
+ GetDeltaAckIn(), GetDeltaSeqOut(), AddSeq()
+ SetProtocolFlags(), GetProtocolFlags()
+ SetDestCallId()
+*/
+
+
+void
+SetFragmentAddr(struct alias_link *lnk, struct in_addr src_addr)
+{
+ lnk->data.frag_addr = src_addr;
+}
+
+
+void
+GetFragmentAddr(struct alias_link *lnk, struct in_addr *src_addr)
+{
+ *src_addr = lnk->data.frag_addr;
+}
+
+
+void
+SetFragmentPtr(struct alias_link *lnk, char *fptr)
+{
+ lnk->data.frag_ptr = fptr;
+}
+
+
+void
+GetFragmentPtr(struct alias_link *lnk, char **fptr)
+{
+ *fptr = lnk->data.frag_ptr;
+}
+
+
+void
+SetStateIn(struct alias_link *lnk, int state)
+{
+ /* TCP input state */
+ switch (state) {
+ case ALIAS_TCP_STATE_DISCONNECTED:
+ if (lnk->data.tcp->state.out != ALIAS_TCP_STATE_CONNECTED)
+ lnk->expire_time = TCP_EXPIRE_DEAD;
+ else
+ lnk->expire_time = TCP_EXPIRE_SINGLEDEAD;
+ break;
+ case ALIAS_TCP_STATE_CONNECTED:
+ if (lnk->data.tcp->state.out == ALIAS_TCP_STATE_CONNECTED)
+ lnk->expire_time = TCP_EXPIRE_CONNECTED;
+ break;
+ default:
+#ifdef _KERNEL
+ panic("libalias:SetStateIn() unknown state");
+#else
+ abort();
+#endif
+ }
+ lnk->data.tcp->state.in = state;
+}
+
+
+void
+SetStateOut(struct alias_link *lnk, int state)
+{
+ /* TCP output state */
+ switch (state) {
+ case ALIAS_TCP_STATE_DISCONNECTED:
+ if (lnk->data.tcp->state.in != ALIAS_TCP_STATE_CONNECTED)
+ lnk->expire_time = TCP_EXPIRE_DEAD;
+ else
+ lnk->expire_time = TCP_EXPIRE_SINGLEDEAD;
+ break;
+ case ALIAS_TCP_STATE_CONNECTED:
+ if (lnk->data.tcp->state.in == ALIAS_TCP_STATE_CONNECTED)
+ lnk->expire_time = TCP_EXPIRE_CONNECTED;
+ break;
+ default:
+#ifdef _KERNEL
+ panic("libalias:SetStateOut() unknown state");
+#else
+ abort();
+#endif
+ }
+ lnk->data.tcp->state.out = state;
+}
+
+
+int
+GetStateIn(struct alias_link *lnk)
+{
+ /* TCP input state */
+ return (lnk->data.tcp->state.in);
+}
+
+
+int
+GetStateOut(struct alias_link *lnk)
+{
+ /* TCP output state */
+ return (lnk->data.tcp->state.out);
+}
+
+
+struct in_addr
+GetOriginalAddress(struct alias_link *lnk)
+{
+ if (lnk->src_addr.s_addr == INADDR_ANY)
+ return (lnk->la->aliasAddress);
+ else
+ return (lnk->src_addr);
+}
+
+
+struct in_addr
+GetDestAddress(struct alias_link *lnk)
+{
+ return (lnk->dst_addr);
+}
+
+
+struct in_addr
+GetAliasAddress(struct alias_link *lnk)
+{
+ if (lnk->alias_addr.s_addr == INADDR_ANY)
+ return (lnk->la->aliasAddress);
+ else
+ return (lnk->alias_addr);
+}
+
+
+struct in_addr
+GetDefaultAliasAddress(struct libalias *la)
+{
+
+ LIBALIAS_LOCK_ASSERT(la);
+ return (la->aliasAddress);
+}
+
+
+void
+SetDefaultAliasAddress(struct libalias *la, struct in_addr alias_addr)
+{
+
+ LIBALIAS_LOCK_ASSERT(la);
+ la->aliasAddress = alias_addr;
+}
+
+
+u_short
+GetOriginalPort(struct alias_link *lnk)
+{
+ return (lnk->src_port);
+}
+
+
+u_short
+GetAliasPort(struct alias_link *lnk)
+{
+ return (lnk->alias_port);
+}
+
+#ifndef NO_FW_PUNCH
+static u_short
+GetDestPort(struct alias_link *lnk)
+{
+ return (lnk->dst_port);
+}
+
+#endif
+
+void
+SetAckModified(struct alias_link *lnk)
+{
+/* Indicate that ACK numbers have been modified in a TCP connection */
+ lnk->data.tcp->state.ack_modified = 1;
+}
+
+
+struct in_addr
+GetProxyAddress(struct alias_link *lnk)
+{
+ return (lnk->proxy_addr);
+}
+
+
+void
+SetProxyAddress(struct alias_link *lnk, struct in_addr addr)
+{
+ lnk->proxy_addr = addr;
+}
+
+
+u_short
+GetProxyPort(struct alias_link *lnk)
+{
+ return (lnk->proxy_port);
+}
+
+
+void
+SetProxyPort(struct alias_link *lnk, u_short port)
+{
+ lnk->proxy_port = port;
+}
+
+
+int
+GetAckModified(struct alias_link *lnk)
+{
+/* See if ACK numbers have been modified */
+ return (lnk->data.tcp->state.ack_modified);
+}
+
+// XXX ip free
+int
+GetDeltaAckIn(u_long ack, struct alias_link *lnk)
+{
+/*
+Find out how much the ACK number has been altered for an incoming
+TCP packet. To do this, a circular list of ACK numbers where the TCP
+packet size was altered is searched.
+*/
+
+ int i;
+ int delta, ack_diff_min;
+
+ delta = 0;
+ ack_diff_min = -1;
+ for (i = 0; i < N_LINK_TCP_DATA; i++) {
+ struct ack_data_record x;
+
+ x = lnk->data.tcp->ack[i];
+ if (x.active == 1) {
+ int ack_diff;
+
+ ack_diff = SeqDiff(x.ack_new, ack);
+ if (ack_diff >= 0) {
+ if (ack_diff_min >= 0) {
+ if (ack_diff < ack_diff_min) {
+ delta = x.delta;
+ ack_diff_min = ack_diff;
+ }
+ } else {
+ delta = x.delta;
+ ack_diff_min = ack_diff;
+ }
+ }
+ }
+ }
+ return (delta);
+}
+
+// XXX ip free
+int
+GetDeltaSeqOut(u_long seq, struct alias_link *lnk)
+{
+/*
+Find out how much the sequence number has been altered for an outgoing
+TCP packet. To do this, a circular list of ACK numbers where the TCP
+packet size was altered is searched.
+*/
+
+ int i;
+ int delta, seq_diff_min;
+
+ delta = 0;
+ seq_diff_min = -1;
+ for (i = 0; i < N_LINK_TCP_DATA; i++) {
+ struct ack_data_record x;
+
+ x = lnk->data.tcp->ack[i];
+ if (x.active == 1) {
+ int seq_diff;
+
+ seq_diff = SeqDiff(x.ack_old, seq);
+ if (seq_diff >= 0) {
+ if (seq_diff_min >= 0) {
+ if (seq_diff < seq_diff_min) {
+ delta = x.delta;
+ seq_diff_min = seq_diff;
+ }
+ } else {
+ delta = x.delta;
+ seq_diff_min = seq_diff;
+ }
+ }
+ }
+ }
+ return (delta);
+}
+
+// XXX ip free
+void
+AddSeq(struct alias_link *lnk, int delta, u_int ip_hl, u_short ip_len,
+ u_long th_seq, u_int th_off)
+{
+/*
+When a TCP packet has been altered in length, save this
+information in a circular list. If enough packets have
+been altered, then this list will begin to overwrite itself.
+*/
+
+ struct ack_data_record x;
+ int hlen, tlen, dlen;
+ int i;
+
+ hlen = (ip_hl + th_off) << 2;
+ tlen = ntohs(ip_len);
+ dlen = tlen - hlen;
+
+ x.ack_old = htonl(ntohl(th_seq) + dlen);
+ x.ack_new = htonl(ntohl(th_seq) + dlen + delta);
+ x.delta = delta;
+ x.active = 1;
+
+ i = lnk->data.tcp->state.index;
+ lnk->data.tcp->ack[i] = x;
+
+ i++;
+ if (i == N_LINK_TCP_DATA)
+ lnk->data.tcp->state.index = 0;
+ else
+ lnk->data.tcp->state.index = i;
+}
+
+void
+SetExpire(struct alias_link *lnk, int expire)
+{
+ if (expire == 0) {
+ lnk->flags &= ~LINK_PERMANENT;
+ DeleteLink(lnk);
+ } else if (expire == -1) {
+ lnk->flags |= LINK_PERMANENT;
+ } else if (expire > 0) {
+ lnk->expire_time = expire;
+ } else {
+#ifdef LIBALIAS_DEBUG
+ fprintf(stderr, "PacketAlias/SetExpire(): ");
+ fprintf(stderr, "error in expire parameter\n");
+#endif
+ }
+}
+
+void
+ClearCheckNewLink(struct libalias *la)
+{
+
+ LIBALIAS_LOCK_ASSERT(la);
+ la->newDefaultLink = 0;
+}
+
+void
+SetProtocolFlags(struct alias_link *lnk, int pflags)
+{
+
+ lnk->pflags = pflags;
+}
+
+int
+GetProtocolFlags(struct alias_link *lnk)
+{
+
+ return (lnk->pflags);
+}
+
+void
+SetDestCallId(struct alias_link *lnk, u_int16_t cid)
+{
+ struct libalias *la = lnk->la;
+
+ LIBALIAS_LOCK_ASSERT(la);
+ la->deleteAllLinks = 1;
+ ReLink(lnk, lnk->src_addr, lnk->dst_addr, lnk->alias_addr,
+ lnk->src_port, cid, lnk->alias_port, lnk->link_type);
+ la->deleteAllLinks = 0;
+}
+
+
+/* Miscellaneous Functions
+
+ HouseKeeping()
+ InitPacketAliasLog()
+ UninitPacketAliasLog()
+*/
+
+/*
+ Whenever an outgoing or incoming packet is handled, HouseKeeping()
+ is called to find and remove timed-out aliasing links. Logic exists
+ to sweep through the entire table and linked list structure
+ every 60 seconds.
+
+ (prototype in alias_local.h)
+*/
+
+void
+HouseKeeping(struct libalias *la)
+{
+ int i, n;
+#ifndef _KERNEL
+ struct timeval tv;
+ struct timezone tz;
+#endif
+
+ LIBALIAS_LOCK_ASSERT(la);
+ /*
+ * Save system time (seconds) in global variable timeStamp for use
+ * by other functions. This is done so as not to unnecessarily
+ * waste timeline by making system calls.
+ */
+#ifdef _KERNEL
+ la->timeStamp = time_uptime;
+#else
+ gettimeofday(&tv, &tz);
+ la->timeStamp = tv.tv_sec;
+#endif
+
+ /* Compute number of spokes (output table link chains) to cover */
+ n = LINK_TABLE_OUT_SIZE * (la->timeStamp - la->lastCleanupTime);
+ n /= ALIAS_CLEANUP_INTERVAL_SECS;
+
+ /* Handle different cases */
+ if (n > 0) {
+ if (n > ALIAS_CLEANUP_MAX_SPOKES)
+ n = ALIAS_CLEANUP_MAX_SPOKES;
+ la->lastCleanupTime = la->timeStamp;
+ for (i = 0; i < n; i++)
+ IncrementalCleanup(la);
+ } else if (n < 0) {
+#ifdef LIBALIAS_DEBUG
+ fprintf(stderr, "PacketAlias/HouseKeeping(): ");
+ fprintf(stderr, "something unexpected in time values\n");
+#endif
+ la->lastCleanupTime = la->timeStamp;
+ }
+}
+
+/* Init the log file and enable logging */
+static int
+InitPacketAliasLog(struct libalias *la)
+{
+
+ LIBALIAS_LOCK_ASSERT(la);
+ if (~la->packetAliasMode & PKT_ALIAS_LOG) {
+#ifdef _KERNEL
+ if ((la->logDesc = malloc(LIBALIAS_BUF_SIZE)))
+ ;
+#else
+ if ((la->logDesc = fopen("/var/log/alias.log", "w")))
+ fprintf(la->logDesc, "PacketAlias/InitPacketAliasLog: Packet alias logging enabled.\n");
+#endif
+ else
+ return (ENOMEM); /* log initialization failed */
+ la->packetAliasMode |= PKT_ALIAS_LOG;
+ }
+
+ return (1);
+}
+
+/* Close the log-file and disable logging. */
+static void
+UninitPacketAliasLog(struct libalias *la)
+{
+
+ LIBALIAS_LOCK_ASSERT(la);
+ if (la->logDesc) {
+#ifdef _KERNEL
+ free(la->logDesc);
+#else
+ fclose(la->logDesc);
+#endif
+ la->logDesc = NULL;
+ }
+ la->packetAliasMode &= ~PKT_ALIAS_LOG;
+}
+
+/* Outside world interfaces
+
+-- "outside world" means other than alias*.c routines --
+
+ PacketAliasRedirectPort()
+ PacketAliasAddServer()
+ PacketAliasRedirectProto()
+ PacketAliasRedirectAddr()
+ PacketAliasRedirectDynamic()
+ PacketAliasRedirectDelete()
+ PacketAliasSetAddress()
+ PacketAliasInit()
+ PacketAliasUninit()
+ PacketAliasSetMode()
+
+(prototypes in alias.h)
+*/
+
+/* Redirection from a specific public addr:port to a
+ private addr:port */
+struct alias_link *
+LibAliasRedirectPort(struct libalias *la, struct in_addr src_addr, u_short src_port,
+ struct in_addr dst_addr, u_short dst_port,
+ struct in_addr alias_addr, u_short alias_port,
+ u_char proto)
+{
+ int link_type;
+ struct alias_link *lnk;
+
+ LIBALIAS_LOCK(la);
+ switch (proto) {
+ case IPPROTO_UDP:
+ link_type = LINK_UDP;
+ break;
+ case IPPROTO_TCP:
+ link_type = LINK_TCP;
+ break;
+ case IPPROTO_SCTP:
+ link_type = LINK_SCTP;
+ break;
+ default:
+#ifdef LIBALIAS_DEBUG
+ fprintf(stderr, "PacketAliasRedirectPort(): ");
+ fprintf(stderr, "only SCTP, TCP and UDP protocols allowed\n");
+#endif
+ lnk = NULL;
+ goto getout;
+ }
+
+ lnk = AddLink(la, src_addr, dst_addr, alias_addr,
+ src_port, dst_port, alias_port,
+ link_type);
+
+ if (lnk != NULL) {
+ lnk->flags |= LINK_PERMANENT;
+ }
+#ifdef LIBALIAS_DEBUG
+ else {
+ fprintf(stderr, "PacketAliasRedirectPort(): "
+ "call to AddLink() failed\n");
+ }
+#endif
+
+getout:
+ LIBALIAS_UNLOCK(la);
+ return (lnk);
+}
+
+/* Add server to the pool of servers */
+int
+LibAliasAddServer(struct libalias *la, struct alias_link *lnk, struct in_addr addr, u_short port)
+{
+ struct server *server;
+ int res;
+
+ LIBALIAS_LOCK(la);
+ (void)la;
+
+ server = malloc(sizeof(struct server));
+
+ if (server != NULL) {
+ struct server *head;
+
+ server->addr = addr;
+ server->port = port;
+
+ head = lnk->server;
+ if (head == NULL)
+ server->next = server;
+ else {
+ struct server *s;
+
+ for (s = head; s->next != head; s = s->next);
+ s->next = server;
+ server->next = head;
+ }
+ lnk->server = server;
+ res = 0;
+ } else
+ res = -1;
+
+ LIBALIAS_UNLOCK(la);
+ return (res);
+}
+
+/* Redirect packets of a given IP protocol from a specific
+ public address to a private address */
+struct alias_link *
+LibAliasRedirectProto(struct libalias *la, struct in_addr src_addr,
+ struct in_addr dst_addr,
+ struct in_addr alias_addr,
+ u_char proto)
+{
+ struct alias_link *lnk;
+
+ LIBALIAS_LOCK(la);
+ lnk = AddLink(la, src_addr, dst_addr, alias_addr,
+ NO_SRC_PORT, NO_DEST_PORT, 0,
+ proto);
+
+ if (lnk != NULL) {
+ lnk->flags |= LINK_PERMANENT;
+ }
+#ifdef LIBALIAS_DEBUG
+ else {
+ fprintf(stderr, "PacketAliasRedirectProto(): "
+ "call to AddLink() failed\n");
+ }
+#endif
+
+ LIBALIAS_UNLOCK(la);
+ return (lnk);
+}
+
+/* Static address translation */
+struct alias_link *
+LibAliasRedirectAddr(struct libalias *la, struct in_addr src_addr,
+ struct in_addr alias_addr)
+{
+ struct alias_link *lnk;
+
+ LIBALIAS_LOCK(la);
+ lnk = AddLink(la, src_addr, la->nullAddress, alias_addr,
+ 0, 0, 0,
+ LINK_ADDR);
+
+ if (lnk != NULL) {
+ lnk->flags |= LINK_PERMANENT;
+ }
+#ifdef LIBALIAS_DEBUG
+ else {
+ fprintf(stderr, "PacketAliasRedirectAddr(): "
+ "call to AddLink() failed\n");
+ }
+#endif
+
+ LIBALIAS_UNLOCK(la);
+ return (lnk);
+}
+
+
+/* Mark the aliasing link dynamic */
+int
+LibAliasRedirectDynamic(struct libalias *la, struct alias_link *lnk)
+{
+ int res;
+
+ LIBALIAS_LOCK(la);
+ (void)la;
+
+ if (lnk->flags & LINK_PARTIALLY_SPECIFIED)
+ res = -1;
+ else {
+ lnk->flags &= ~LINK_PERMANENT;
+ res = 0;
+ }
+ LIBALIAS_UNLOCK(la);
+ return (res);
+}
+
+
+void
+LibAliasRedirectDelete(struct libalias *la, struct alias_link *lnk)
+{
+/* This is a dangerous function to put in the API,
+ because an invalid pointer can crash the program. */
+
+ LIBALIAS_LOCK(la);
+ la->deleteAllLinks = 1;
+ DeleteLink(lnk);
+ la->deleteAllLinks = 0;
+ LIBALIAS_UNLOCK(la);
+}
+
+
+void
+LibAliasSetAddress(struct libalias *la, struct in_addr addr)
+{
+
+ LIBALIAS_LOCK(la);
+ if (la->packetAliasMode & PKT_ALIAS_RESET_ON_ADDR_CHANGE
+ && la->aliasAddress.s_addr != addr.s_addr)
+ CleanupAliasData(la);
+
+ la->aliasAddress = addr;
+ LIBALIAS_UNLOCK(la);
+}
+
+
+void
+LibAliasSetTarget(struct libalias *la, struct in_addr target_addr)
+{
+
+ LIBALIAS_LOCK(la);
+ la->targetAddress = target_addr;
+ LIBALIAS_UNLOCK(la);
+}
+
+static void
+finishoff(void)
+{
+
+ while (!LIST_EMPTY(&instancehead))
+ LibAliasUninit(LIST_FIRST(&instancehead));
+}
+
+struct libalias *
+LibAliasInit(struct libalias *la)
+{
+ int i;
+#ifndef _KERNEL
+ struct timeval tv;
+ struct timezone tz;
+#endif
+
+ if (la == NULL) {
+ la = calloc(sizeof *la, 1);
+ if (la == NULL)
+ return (la);
+
+#ifndef _KERNEL /* kernel cleans up on module unload */
+ if (LIST_EMPTY(&instancehead))
+ atexit(finishoff);
+#endif
+ LIST_INSERT_HEAD(&instancehead, la, instancelist);
+
+#ifdef _KERNEL
+ la->timeStamp = time_uptime;
+ la->lastCleanupTime = time_uptime;
+#else
+ gettimeofday(&tv, &tz);
+ la->timeStamp = tv.tv_sec;
+ la->lastCleanupTime = tv.tv_sec;
+#endif
+
+ for (i = 0; i < LINK_TABLE_OUT_SIZE; i++)
+ LIST_INIT(&la->linkTableOut[i]);
+ for (i = 0; i < LINK_TABLE_IN_SIZE; i++)
+ LIST_INIT(&la->linkTableIn[i]);
+#ifdef _KERNEL
+ AliasSctpInit(la);
+#endif
+ LIBALIAS_LOCK_INIT(la);
+ LIBALIAS_LOCK(la);
+ } else {
+ LIBALIAS_LOCK(la);
+ la->deleteAllLinks = 1;
+ CleanupAliasData(la);
+ la->deleteAllLinks = 0;
+#ifdef _KERNEL
+ AliasSctpTerm(la);
+ AliasSctpInit(la);
+#endif
+ }
+
+ la->aliasAddress.s_addr = INADDR_ANY;
+ la->targetAddress.s_addr = INADDR_ANY;
+
+ la->icmpLinkCount = 0;
+ la->udpLinkCount = 0;
+ la->tcpLinkCount = 0;
+ la->sctpLinkCount = 0;
+ la->pptpLinkCount = 0;
+ la->protoLinkCount = 0;
+ la->fragmentIdLinkCount = 0;
+ la->fragmentPtrLinkCount = 0;
+ la->sockCount = 0;
+
+ la->cleanupIndex = 0;
+
+ la->packetAliasMode = PKT_ALIAS_SAME_PORTS
+#ifndef NO_USE_SOCKETS
+ | PKT_ALIAS_USE_SOCKETS
+#endif
+ | PKT_ALIAS_RESET_ON_ADDR_CHANGE;
+#ifndef NO_FW_PUNCH
+ la->fireWallFD = -1;
+#endif
+#ifndef _KERNEL
+ LibAliasRefreshModules();
+#endif
+ LIBALIAS_UNLOCK(la);
+ return (la);
+}
+
+void
+LibAliasUninit(struct libalias *la)
+{
+
+ LIBALIAS_LOCK(la);
+#ifdef _KERNEL
+ AliasSctpTerm(la);
+#endif
+ la->deleteAllLinks = 1;
+ CleanupAliasData(la);
+ la->deleteAllLinks = 0;
+ UninitPacketAliasLog(la);
+#ifndef NO_FW_PUNCH
+ UninitPunchFW(la);
+#endif
+ LIST_REMOVE(la, instancelist);
+ LIBALIAS_UNLOCK(la);
+ LIBALIAS_LOCK_DESTROY(la);
+ free(la);
+}
+
+/* Change mode for some operations */
+unsigned int
+LibAliasSetMode(
+ struct libalias *la,
+ unsigned int flags, /* Which state to bring flags to */
+ unsigned int mask /* Mask of which flags to affect (use 0 to
+ * do a probe for flag values) */
+)
+{
+ int res = -1;
+
+ LIBALIAS_LOCK(la);
+/* Enable logging? */
+ if (flags & mask & PKT_ALIAS_LOG) {
+ /* Do the enable */
+ if (InitPacketAliasLog(la) == ENOMEM)
+ goto getout;
+ } else
+/* _Disable_ logging? */
+ if (~flags & mask & PKT_ALIAS_LOG) {
+ UninitPacketAliasLog(la);
+ }
+#ifndef NO_FW_PUNCH
+/* Start punching holes in the firewall? */
+ if (flags & mask & PKT_ALIAS_PUNCH_FW) {
+ InitPunchFW(la);
+ } else
+/* Stop punching holes in the firewall? */
+ if (~flags & mask & PKT_ALIAS_PUNCH_FW) {
+ UninitPunchFW(la);
+ }
+#endif
+
+/* Other flags can be set/cleared without special action */
+ la->packetAliasMode = (flags & mask) | (la->packetAliasMode & ~mask);
+ res = la->packetAliasMode;
+getout:
+ LIBALIAS_UNLOCK(la);
+ return (res);
+}
+
+
+int
+LibAliasCheckNewLink(struct libalias *la)
+{
+ int res;
+
+ LIBALIAS_LOCK(la);
+ res = la->newDefaultLink;
+ LIBALIAS_UNLOCK(la);
+ return (res);
+}
+
+
+#ifndef NO_FW_PUNCH
+
+/*****************
+ Code to support firewall punching. This shouldn't really be in this
+ file, but making variables global is evil too.
+ ****************/
+
+/* Firewall include files */
+#include <rtems/freebsd/net/if.h>
+#include <rtems/freebsd/netinet/ip_fw.h>
+#include <rtems/freebsd/string.h>
+#include <rtems/freebsd/err.h>
+
+/*
+ * helper function, updates the pointer to cmd with the length
+ * of the current command, and also cleans up the first word of
+ * the new command in case it has been clobbered before.
+ */
+static ipfw_insn *
+next_cmd(ipfw_insn * cmd)
+{
+ cmd += F_LEN(cmd);
+ bzero(cmd, sizeof(*cmd));
+ return (cmd);
+}
+
+/*
+ * A function to fill simple commands of size 1.
+ * Existing flags are preserved.
+ */
+static ipfw_insn *
+fill_cmd(ipfw_insn * cmd, enum ipfw_opcodes opcode, int size,
+ int flags, u_int16_t arg)
+{
+ cmd->opcode = opcode;
+ cmd->len = ((cmd->len | flags) & (F_NOT | F_OR)) | (size & F_LEN_MASK);
+ cmd->arg1 = arg;
+ return next_cmd(cmd);
+}
+
+static ipfw_insn *
+fill_ip(ipfw_insn * cmd1, enum ipfw_opcodes opcode, u_int32_t addr)
+{
+ ipfw_insn_ip *cmd = (ipfw_insn_ip *) cmd1;
+
+ cmd->addr.s_addr = addr;
+ return fill_cmd(cmd1, opcode, F_INSN_SIZE(ipfw_insn_u32), 0, 0);
+}
+
+static ipfw_insn *
+fill_one_port(ipfw_insn * cmd1, enum ipfw_opcodes opcode, u_int16_t port)
+{
+ ipfw_insn_u16 *cmd = (ipfw_insn_u16 *) cmd1;
+
+ cmd->ports[0] = cmd->ports[1] = port;
+ return fill_cmd(cmd1, opcode, F_INSN_SIZE(ipfw_insn_u16), 0, 0);
+}
+
+static int
+fill_rule(void *buf, int bufsize, int rulenum,
+ enum ipfw_opcodes action, int proto,
+ struct in_addr sa, u_int16_t sp, struct in_addr da, u_int16_t dp)
+{
+ struct ip_fw *rule = (struct ip_fw *)buf;
+ ipfw_insn *cmd = (ipfw_insn *) rule->cmd;
+
+ bzero(buf, bufsize);
+ rule->rulenum = rulenum;
+
+ cmd = fill_cmd(cmd, O_PROTO, F_INSN_SIZE(ipfw_insn), 0, proto);
+ cmd = fill_ip(cmd, O_IP_SRC, sa.s_addr);
+ cmd = fill_one_port(cmd, O_IP_SRCPORT, sp);
+ cmd = fill_ip(cmd, O_IP_DST, da.s_addr);
+ cmd = fill_one_port(cmd, O_IP_DSTPORT, dp);
+
+ rule->act_ofs = (u_int32_t *) cmd - (u_int32_t *) rule->cmd;
+ cmd = fill_cmd(cmd, action, F_INSN_SIZE(ipfw_insn), 0, 0);
+
+ rule->cmd_len = (u_int32_t *) cmd - (u_int32_t *) rule->cmd;
+
+ return ((char *)cmd - (char *)buf);
+}
+
+static void ClearAllFWHoles(struct libalias *la);
+
+
+#define fw_setfield(la, field, num) \
+do { \
+ (field)[(num) - la->fireWallBaseNum] = 1; \
+} /*lint -save -e717 */ while(0)/* lint -restore */
+
+#define fw_clrfield(la, field, num) \
+do { \
+ (field)[(num) - la->fireWallBaseNum] = 0; \
+} /*lint -save -e717 */ while(0)/* lint -restore */
+
+#define fw_tstfield(la, field, num) ((field)[(num) - la->fireWallBaseNum])
+
+static void
+InitPunchFW(struct libalias *la)
+{
+
+ LIBALIAS_LOCK_ASSERT(la);
+ la->fireWallField = malloc(la->fireWallNumNums);
+ if (la->fireWallField) {
+ memset(la->fireWallField, 0, la->fireWallNumNums);
+ if (la->fireWallFD < 0) {
+ la->fireWallFD = socket(AF_INET, SOCK_RAW, IPPROTO_RAW);
+ }
+ ClearAllFWHoles(la);
+ la->fireWallActiveNum = la->fireWallBaseNum;
+ }
+}
+
+static void
+UninitPunchFW(struct libalias *la)
+{
+
+ LIBALIAS_LOCK_ASSERT(la);
+ ClearAllFWHoles(la);
+ if (la->fireWallFD >= 0)
+ close(la->fireWallFD);
+ la->fireWallFD = -1;
+ if (la->fireWallField)
+ free(la->fireWallField);
+ la->fireWallField = NULL;
+ la->packetAliasMode &= ~PKT_ALIAS_PUNCH_FW;
+}
+
+/* Make a certain link go through the firewall */
+void
+PunchFWHole(struct alias_link *lnk)
+{
+ struct libalias *la;
+ int r; /* Result code */
+ struct ip_fw rule; /* On-the-fly built rule */
+ int fwhole; /* Where to punch hole */
+
+ LIBALIAS_LOCK_ASSERT(la);
+ la = lnk->la;
+
+/* Don't do anything unless we are asked to */
+ if (!(la->packetAliasMode & PKT_ALIAS_PUNCH_FW) ||
+ la->fireWallFD < 0 ||
+ lnk->link_type != LINK_TCP)
+ return;
+
+ memset(&rule, 0, sizeof rule);
+
+/** Build rule **/
+
+ /* Find empty slot */
+ for (fwhole = la->fireWallActiveNum;
+ fwhole < la->fireWallBaseNum + la->fireWallNumNums &&
+ fw_tstfield(la, la->fireWallField, fwhole);
+ fwhole++);
+ if (fwhole == la->fireWallBaseNum + la->fireWallNumNums) {
+ for (fwhole = la->fireWallBaseNum;
+ fwhole < la->fireWallActiveNum &&
+ fw_tstfield(la, la->fireWallField, fwhole);
+ fwhole++);
+ if (fwhole == la->fireWallActiveNum) {
+ /* No rule point empty - we can't punch more holes. */
+ la->fireWallActiveNum = la->fireWallBaseNum;
+#ifdef LIBALIAS_DEBUG
+ fprintf(stderr, "libalias: Unable to create firewall hole!\n");
+#endif
+ return;
+ }
+ }
+ /* Start next search at next position */
+ la->fireWallActiveNum = fwhole + 1;
+
+ /*
+ * generate two rules of the form
+ *
+ * add fwhole accept tcp from OAddr OPort to DAddr DPort add fwhole
+ * accept tcp from DAddr DPort to OAddr OPort
+ */
+ if (GetOriginalPort(lnk) != 0 && GetDestPort(lnk) != 0) {
+ u_int32_t rulebuf[255];
+ int i;
+
+ i = fill_rule(rulebuf, sizeof(rulebuf), fwhole,
+ O_ACCEPT, IPPROTO_TCP,
+ GetOriginalAddress(lnk), ntohs(GetOriginalPort(lnk)),
+ GetDestAddress(lnk), ntohs(GetDestPort(lnk)));
+ r = setsockopt(la->fireWallFD, IPPROTO_IP, IP_FW_ADD, rulebuf, i);
+ if (r)
+ err(1, "alias punch inbound(1) setsockopt(IP_FW_ADD)");
+
+ i = fill_rule(rulebuf, sizeof(rulebuf), fwhole,
+ O_ACCEPT, IPPROTO_TCP,
+ GetDestAddress(lnk), ntohs(GetDestPort(lnk)),
+ GetOriginalAddress(lnk), ntohs(GetOriginalPort(lnk)));
+ r = setsockopt(la->fireWallFD, IPPROTO_IP, IP_FW_ADD, rulebuf, i);
+ if (r)
+ err(1, "alias punch inbound(2) setsockopt(IP_FW_ADD)");
+ }
+
+/* Indicate hole applied */
+ lnk->data.tcp->fwhole = fwhole;
+ fw_setfield(la, la->fireWallField, fwhole);
+}
+
+/* Remove a hole in a firewall associated with a particular alias
+ lnk. Calling this too often is harmless. */
+static void
+ClearFWHole(struct alias_link *lnk)
+{
+ struct libalias *la;
+
+ LIBALIAS_LOCK_ASSERT(la);
+ la = lnk->la;
+ if (lnk->link_type == LINK_TCP) {
+ int fwhole = lnk->data.tcp->fwhole; /* Where is the firewall
+ * hole? */
+ struct ip_fw rule;
+
+ if (fwhole < 0)
+ return;
+
+ memset(&rule, 0, sizeof rule); /* useless for ipfw2 */
+ while (!setsockopt(la->fireWallFD, IPPROTO_IP, IP_FW_DEL,
+ &fwhole, sizeof fwhole));
+ fw_clrfield(la, la->fireWallField, fwhole);
+ lnk->data.tcp->fwhole = -1;
+ }
+}
+
+/* Clear out the entire range dedicated to firewall holes. */
+static void
+ClearAllFWHoles(struct libalias *la)
+{
+ struct ip_fw rule; /* On-the-fly built rule */
+ int i;
+
+ LIBALIAS_LOCK_ASSERT(la);
+ if (la->fireWallFD < 0)
+ return;
+
+ memset(&rule, 0, sizeof rule);
+ for (i = la->fireWallBaseNum; i < la->fireWallBaseNum + la->fireWallNumNums; i++) {
+ int r = i;
+
+ while (!setsockopt(la->fireWallFD, IPPROTO_IP, IP_FW_DEL, &r, sizeof r));
+ }
+ /* XXX: third arg correct here ? /phk */
+ memset(la->fireWallField, 0, la->fireWallNumNums);
+}
+
+#endif
+
+void
+LibAliasSetFWBase(struct libalias *la, unsigned int base, unsigned int num)
+{
+
+ LIBALIAS_LOCK(la);
+#ifndef NO_FW_PUNCH
+ la->fireWallBaseNum = base;
+ la->fireWallNumNums = num;
+#endif
+ LIBALIAS_UNLOCK(la);
+}
+
+void
+LibAliasSetSkinnyPort(struct libalias *la, unsigned int port)
+{
+
+ LIBALIAS_LOCK(la);
+ la->skinnyPort = port;
+ LIBALIAS_UNLOCK(la);
+}
+
+/*
+ * Find the address to redirect incoming packets
+ */
+struct in_addr
+FindSctpRedirectAddress(struct libalias *la, struct sctp_nat_msg *sm)
+{
+ struct alias_link *lnk;
+ struct in_addr redir;
+
+ LIBALIAS_LOCK_ASSERT(la);
+ lnk = FindLinkIn(la, sm->ip_hdr->ip_src, sm->ip_hdr->ip_dst,
+ sm->sctp_hdr->dest_port,sm->sctp_hdr->dest_port, LINK_SCTP, 1);
+ if (lnk != NULL) {
+ return(lnk->src_addr); /* port redirect */
+ } else {
+ redir = FindOriginalAddress(la,sm->ip_hdr->ip_dst);
+ if (redir.s_addr == la->aliasAddress.s_addr ||
+ redir.s_addr == la->targetAddress.s_addr) { /* No address found */
+ lnk = FindLinkIn(la, sm->ip_hdr->ip_src, sm->ip_hdr->ip_dst,
+ NO_DEST_PORT, 0, LINK_SCTP, 1);
+ if (lnk != NULL)
+ return(lnk->src_addr); /* redirect proto */
+ }
+ return(redir); /* address redirect */
+ }
+}
diff --git a/rtems/freebsd/netinet/libalias/alias_dummy.c b/rtems/freebsd/netinet/libalias/alias_dummy.c
new file mode 100644
index 00000000..2df81551
--- /dev/null
+++ b/rtems/freebsd/netinet/libalias/alias_dummy.c
@@ -0,0 +1,155 @@
+#include <rtems/freebsd/machine/rtems-bsd-config.h>
+
+/*-
+ * Copyright (c) 2005 Paolo Pisati <piso@FreeBSD.org>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <rtems/freebsd/sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+/*
+ * Alias_dummy is just an empty skeleton used to demostrate how to write
+ * a module for libalias, that will run unalterated in userland or in
+ * kernel land.
+ */
+
+#ifdef _KERNEL
+#include <rtems/freebsd/sys/param.h>
+#include <rtems/freebsd/sys/kernel.h>
+#include <rtems/freebsd/sys/module.h>
+#else
+#include <rtems/freebsd/errno.h>
+#include <rtems/freebsd/sys/types.h>
+#include <rtems/freebsd/stdio.h>
+#endif
+
+#include <rtems/freebsd/netinet/in_systm.h>
+#include <rtems/freebsd/netinet/in.h>
+#include <rtems/freebsd/netinet/ip.h>
+#include <rtems/freebsd/netinet/udp.h>
+
+#ifdef _KERNEL
+#include <rtems/freebsd/netinet/libalias/alias_local.h>
+#include <rtems/freebsd/netinet/libalias/alias_mod.h>
+#else
+#include <rtems/freebsd/local/alias_local.h>
+#include <rtems/freebsd/local/alias_mod.h>
+#endif
+
+static void
+AliasHandleDummy(struct libalias *la, struct ip *ip, struct alias_data *ah);
+
+static int
+fingerprint(struct libalias *la, struct alias_data *ah)
+{
+
+ /*
+ * Check here all the data that will be used later, if any field
+ * is empy/NULL, return a -1 value.
+ */
+ if (ah->dport == NULL || ah->sport == NULL || ah->lnk == NULL ||
+ ah->maxpktsize == 0)
+ return (-1);
+ /*
+ * Fingerprint the incoming packet, if it matches any conditions
+ * return an OK value.
+ */
+ if (ntohs(*ah->dport) == 123
+ || ntohs(*ah->sport) == 456)
+ return (0); /* I know how to handle it. */
+ return (-1); /* I don't recognize this packet. */
+}
+
+/*
+ * Wrap in this general purpose function, the real function used to alias the
+ * packets.
+ */
+
+static int
+protohandler(struct libalias *la, struct ip *pip, struct alias_data *ah)
+{
+
+ AliasHandleDummy(la, pip, ah);
+ return (0);
+}
+
+/*
+ * NOTA BENE: the next variable MUST NOT be renamed in any case if you want
+ * your module to work in userland, cause it's used to find and use all
+ * the protocol handlers present in every module.
+ * So WATCH OUT, your module needs this variables and it needs it with
+ * ITS EXACT NAME: handlers.
+ */
+
+struct proto_handler handlers [] = {
+ {
+ .pri = 666,
+ .dir = IN|OUT,
+ .proto = UDP|TCP,
+ .fingerprint = &fingerprint,
+ .protohandler = &protohandler
+ },
+ { EOH }
+};
+
+static int
+mod_handler(module_t mod, int type, void *data)
+{
+ int error;
+
+ switch (type) {
+ case MOD_LOAD:
+ error = 0;
+ LibAliasAttachHandlers(handlers);
+ break;
+ case MOD_UNLOAD:
+ error = 0;
+ LibAliasDetachHandlers(handlers);
+ break;
+ default:
+ error = EINVAL;
+ }
+ return (error);
+}
+
+#ifdef _KERNEL
+static
+#endif
+moduledata_t alias_mod = {
+ "alias_dummy", mod_handler, NULL
+};
+
+#ifdef _KERNEL
+DECLARE_MODULE(alias_dummy, alias_mod, SI_SUB_DRIVERS, SI_ORDER_SECOND);
+MODULE_VERSION(alias_dummy, 1);
+MODULE_DEPEND(alias_dummy, libalias, 1, 1, 1);
+#endif
+
+static void
+AliasHandleDummy(struct libalias *la, struct ip *ip, struct alias_data *ah)
+{
+ ; /* Dummy. */
+}
+
diff --git a/rtems/freebsd/netinet/libalias/alias_ftp.c b/rtems/freebsd/netinet/libalias/alias_ftp.c
new file mode 100644
index 00000000..b279f5d2
--- /dev/null
+++ b/rtems/freebsd/netinet/libalias/alias_ftp.c
@@ -0,0 +1,696 @@
+#include <rtems/freebsd/machine/rtems-bsd-config.h>
+
+/*-
+ * Copyright (c) 2001 Charles Mott <cm@linktel.net>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <rtems/freebsd/sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+/*
+ Alias_ftp.c performs special processing for FTP sessions under
+ TCP. Specifically, when a PORT/EPRT command from the client
+ side or 227/229 reply from the server is sent, it is intercepted
+ and modified. The address is changed to the gateway machine
+ and an aliasing port is used.
+
+ For this routine to work, the message must fit entirely into a
+ single TCP packet. This is typically the case, but exceptions
+ can easily be envisioned under the actual specifications.
+
+ Probably the most troubling aspect of the approach taken here is
+ that the new message will typically be a different length, and
+ this causes a certain amount of bookkeeping to keep track of the
+ changes of sequence and acknowledgment numbers, since the client
+ machine is totally unaware of the modification to the TCP stream.
+
+
+ References: RFC 959, RFC 2428.
+
+ Initial version: August, 1996 (cjm)
+
+ Version 1.6
+ Brian Somers and Martin Renters identified an IP checksum
+ error for modified IP packets.
+
+ Version 1.7: January 9, 1996 (cjm)
+ Differential checksum computation for change
+ in IP packet length.
+
+ Version 2.1: May, 1997 (cjm)
+ Very minor changes to conform with
+ local/global/function naming conventions
+ within the packet aliasing module.
+
+ Version 3.1: May, 2000 (eds)
+ Add support for passive mode, alias the 227 replies.
+
+ See HISTORY file for record of revisions.
+*/
+
+/* Includes */
+#ifdef _KERNEL
+#include <rtems/freebsd/sys/param.h>
+#include <rtems/freebsd/sys/ctype.h>
+#include <rtems/freebsd/sys/systm.h>
+#include <rtems/freebsd/sys/kernel.h>
+#include <rtems/freebsd/sys/module.h>
+#else
+#include <rtems/freebsd/ctype.h>
+#include <rtems/freebsd/errno.h>
+#include <rtems/freebsd/sys/types.h>
+#include <rtems/freebsd/stdio.h>
+#include <rtems/freebsd/string.h>
+#endif
+
+#include <rtems/freebsd/netinet/in_systm.h>
+#include <rtems/freebsd/netinet/in.h>
+#include <rtems/freebsd/netinet/ip.h>
+#include <rtems/freebsd/netinet/tcp.h>
+
+#ifdef _KERNEL
+#include <rtems/freebsd/netinet/libalias/alias.h>
+#include <rtems/freebsd/netinet/libalias/alias_local.h>
+#include <rtems/freebsd/netinet/libalias/alias_mod.h>
+#else
+#include <rtems/freebsd/local/alias_local.h>
+#include <rtems/freebsd/local/alias_mod.h>
+#endif
+
+#define FTP_CONTROL_PORT_NUMBER 21
+
+static void
+AliasHandleFtpOut(struct libalias *, struct ip *, struct alias_link *,
+ int maxpacketsize);
+
+static int
+fingerprint(struct libalias *la, struct alias_data *ah)
+{
+
+ if (ah->dport == NULL || ah->sport == NULL || ah->lnk == NULL ||
+ ah->maxpktsize == 0)
+ return (-1);
+ if (ntohs(*ah->dport) == FTP_CONTROL_PORT_NUMBER
+ || ntohs(*ah->sport) == FTP_CONTROL_PORT_NUMBER)
+ return (0);
+ return (-1);
+}
+
+static int
+protohandler(struct libalias *la, struct ip *pip, struct alias_data *ah)
+{
+
+ AliasHandleFtpOut(la, pip, ah->lnk, ah->maxpktsize);
+ return (0);
+}
+
+struct proto_handler handlers[] = {
+ {
+ .pri = 80,
+ .dir = OUT,
+ .proto = TCP,
+ .fingerprint = &fingerprint,
+ .protohandler = &protohandler
+ },
+ { EOH }
+};
+
+static int
+mod_handler(module_t mod, int type, void *data)
+{
+ int error;
+
+ switch (type) {
+ case MOD_LOAD:
+ error = 0;
+ LibAliasAttachHandlers(handlers);
+ break;
+ case MOD_UNLOAD:
+ error = 0;
+ LibAliasDetachHandlers(handlers);
+ break;
+ default:
+ error = EINVAL;
+ }
+ return (error);
+}
+
+#ifdef _KERNEL
+static
+#endif
+moduledata_t alias_mod = {
+ "alias_ftp", mod_handler, NULL
+};
+
+#ifdef _KERNEL
+DECLARE_MODULE(alias_ftp, alias_mod, SI_SUB_DRIVERS, SI_ORDER_SECOND);
+MODULE_VERSION(alias_ftp, 1);
+MODULE_DEPEND(alias_ftp, libalias, 1, 1, 1);
+#endif
+
+#define FTP_CONTROL_PORT_NUMBER 21
+#define MAX_MESSAGE_SIZE 128
+
+/* FTP protocol flags. */
+#define WAIT_CRLF 0x01
+
+enum ftp_message_type {
+ FTP_PORT_COMMAND,
+ FTP_EPRT_COMMAND,
+ FTP_227_REPLY,
+ FTP_229_REPLY,
+ FTP_UNKNOWN_MESSAGE
+};
+
+static int ParseFtpPortCommand(struct libalias *la, char *, int);
+static int ParseFtpEprtCommand(struct libalias *la, char *, int);
+static int ParseFtp227Reply(struct libalias *la, char *, int);
+static int ParseFtp229Reply(struct libalias *la, char *, int);
+static void NewFtpMessage(struct libalias *la, struct ip *, struct alias_link *, int, int);
+
+static void
+AliasHandleFtpOut(
+ struct libalias *la,
+ struct ip *pip, /* IP packet to examine/patch */
+ struct alias_link *lnk, /* The link to go through (aliased port) */
+ int maxpacketsize /* The maximum size this packet can grow to
+ (including headers) */ )
+{
+ int hlen, tlen, dlen, pflags;
+ char *sptr;
+ struct tcphdr *tc;
+ int ftp_message_type;
+
+/* Calculate data length of TCP packet */
+ tc = (struct tcphdr *)ip_next(pip);
+ hlen = (pip->ip_hl + tc->th_off) << 2;
+ tlen = ntohs(pip->ip_len);
+ dlen = tlen - hlen;
+
+/* Place string pointer and beginning of data */
+ sptr = (char *)pip;
+ sptr += hlen;
+
+/*
+ * Check that data length is not too long and previous message was
+ * properly terminated with CRLF.
+ */
+ pflags = GetProtocolFlags(lnk);
+ if (dlen <= MAX_MESSAGE_SIZE && !(pflags & WAIT_CRLF)) {
+ ftp_message_type = FTP_UNKNOWN_MESSAGE;
+
+ if (ntohs(tc->th_dport) == FTP_CONTROL_PORT_NUMBER) {
+/*
+ * When aliasing a client, check for the PORT/EPRT command.
+ */
+ if (ParseFtpPortCommand(la, sptr, dlen))
+ ftp_message_type = FTP_PORT_COMMAND;
+ else if (ParseFtpEprtCommand(la, sptr, dlen))
+ ftp_message_type = FTP_EPRT_COMMAND;
+ } else {
+/*
+ * When aliasing a server, check for the 227/229 reply.
+ */
+ if (ParseFtp227Reply(la, sptr, dlen))
+ ftp_message_type = FTP_227_REPLY;
+ else if (ParseFtp229Reply(la, sptr, dlen)) {
+ ftp_message_type = FTP_229_REPLY;
+ la->true_addr.s_addr = pip->ip_src.s_addr;
+ }
+ }
+
+ if (ftp_message_type != FTP_UNKNOWN_MESSAGE)
+ NewFtpMessage(la, pip, lnk, maxpacketsize, ftp_message_type);
+ }
+/* Track the msgs which are CRLF term'd for PORT/PASV FW breach */
+
+ if (dlen) { /* only if there's data */
+ sptr = (char *)pip; /* start over at beginning */
+ tlen = ntohs(pip->ip_len); /* recalc tlen, pkt may
+ * have grown */
+ if (sptr[tlen - 2] == '\r' && sptr[tlen - 1] == '\n')
+ pflags &= ~WAIT_CRLF;
+ else
+ pflags |= WAIT_CRLF;
+ SetProtocolFlags(lnk, pflags);
+ }
+}
+
+static int
+ParseFtpPortCommand(struct libalias *la, char *sptr, int dlen)
+{
+ char ch;
+ int i, state;
+ u_int32_t addr;
+ u_short port;
+ u_int8_t octet;
+
+ /* Format: "PORT A,D,D,R,PO,RT". */
+
+ /* Return if data length is too short. */
+ if (dlen < 18)
+ return (0);
+
+ if (strncasecmp("PORT ", sptr, 5))
+ return (0);
+
+ addr = port = octet = 0;
+ state = 0;
+ for (i = 5; i < dlen; i++) {
+ ch = sptr[i];
+ switch (state) {
+ case 0:
+ if (isspace(ch))
+ break;
+ else
+ state++;
+ case 1:
+ case 3:
+ case 5:
+ case 7:
+ case 9:
+ case 11:
+ if (isdigit(ch)) {
+ octet = ch - '0';
+ state++;
+ } else
+ return (0);
+ break;
+ case 2:
+ case 4:
+ case 6:
+ case 8:
+ if (isdigit(ch))
+ octet = 10 * octet + ch - '0';
+ else if (ch == ',') {
+ addr = (addr << 8) + octet;
+ state++;
+ } else
+ return (0);
+ break;
+ case 10:
+ case 12:
+ if (isdigit(ch))
+ octet = 10 * octet + ch - '0';
+ else if (ch == ',' || state == 12) {
+ port = (port << 8) + octet;
+ state++;
+ } else
+ return (0);
+ break;
+ }
+ }
+
+ if (state == 13) {
+ la->true_addr.s_addr = htonl(addr);
+ la->true_port = port;
+ return (1);
+ } else
+ return (0);
+}
+
+static int
+ParseFtpEprtCommand(struct libalias *la, char *sptr, int dlen)
+{
+ char ch, delim;
+ int i, state;
+ u_int32_t addr;
+ u_short port;
+ u_int8_t octet;
+
+ /* Format: "EPRT |1|A.D.D.R|PORT|". */
+
+ /* Return if data length is too short. */
+ if (dlen < 18)
+ return (0);
+
+ if (strncasecmp("EPRT ", sptr, 5))
+ return (0);
+
+ addr = port = octet = 0;
+ delim = '|'; /* XXX gcc -Wuninitialized */
+ state = 0;
+ for (i = 5; i < dlen; i++) {
+ ch = sptr[i];
+ switch (state) {
+ case 0:
+ if (!isspace(ch)) {
+ delim = ch;
+ state++;
+ }
+ break;
+ case 1:
+ if (ch == '1') /* IPv4 address */
+ state++;
+ else
+ return (0);
+ break;
+ case 2:
+ if (ch == delim)
+ state++;
+ else
+ return (0);
+ break;
+ case 3:
+ case 5:
+ case 7:
+ case 9:
+ if (isdigit(ch)) {
+ octet = ch - '0';
+ state++;
+ } else
+ return (0);
+ break;
+ case 4:
+ case 6:
+ case 8:
+ case 10:
+ if (isdigit(ch))
+ octet = 10 * octet + ch - '0';
+ else if (ch == '.' || state == 10) {
+ addr = (addr << 8) + octet;
+ state++;
+ } else
+ return (0);
+ break;
+ case 11:
+ if (isdigit(ch)) {
+ port = ch - '0';
+ state++;
+ } else
+ return (0);
+ break;
+ case 12:
+ if (isdigit(ch))
+ port = 10 * port + ch - '0';
+ else if (ch == delim)
+ state++;
+ else
+ return (0);
+ break;
+ }
+ }
+
+ if (state == 13) {
+ la->true_addr.s_addr = htonl(addr);
+ la->true_port = port;
+ return (1);
+ } else
+ return (0);
+}
+
+static int
+ParseFtp227Reply(struct libalias *la, char *sptr, int dlen)
+{
+ char ch;
+ int i, state;
+ u_int32_t addr;
+ u_short port;
+ u_int8_t octet;
+
+ /* Format: "227 Entering Passive Mode (A,D,D,R,PO,RT)" */
+
+ /* Return if data length is too short. */
+ if (dlen < 17)
+ return (0);
+
+ if (strncmp("227 ", sptr, 4))
+ return (0);
+
+ addr = port = octet = 0;
+
+ state = 0;
+ for (i = 4; i < dlen; i++) {
+ ch = sptr[i];
+ switch (state) {
+ case 0:
+ if (ch == '(')
+ state++;
+ break;
+ case 1:
+ case 3:
+ case 5:
+ case 7:
+ case 9:
+ case 11:
+ if (isdigit(ch)) {
+ octet = ch - '0';
+ state++;
+ } else
+ return (0);
+ break;
+ case 2:
+ case 4:
+ case 6:
+ case 8:
+ if (isdigit(ch))
+ octet = 10 * octet + ch - '0';
+ else if (ch == ',') {
+ addr = (addr << 8) + octet;
+ state++;
+ } else
+ return (0);
+ break;
+ case 10:
+ case 12:
+ if (isdigit(ch))
+ octet = 10 * octet + ch - '0';
+ else if (ch == ',' || (state == 12 && ch == ')')) {
+ port = (port << 8) + octet;
+ state++;
+ } else
+ return (0);
+ break;
+ }
+ }
+
+ if (state == 13) {
+ la->true_port = port;
+ la->true_addr.s_addr = htonl(addr);
+ return (1);
+ } else
+ return (0);
+}
+
+static int
+ParseFtp229Reply(struct libalias *la, char *sptr, int dlen)
+{
+ char ch, delim;
+ int i, state;
+ u_short port;
+
+ /* Format: "229 Entering Extended Passive Mode (|||PORT|)" */
+
+ /* Return if data length is too short. */
+ if (dlen < 11)
+ return (0);
+
+ if (strncmp("229 ", sptr, 4))
+ return (0);
+
+ port = 0;
+ delim = '|'; /* XXX gcc -Wuninitialized */
+
+ state = 0;
+ for (i = 4; i < dlen; i++) {
+ ch = sptr[i];
+ switch (state) {
+ case 0:
+ if (ch == '(')
+ state++;
+ break;
+ case 1:
+ delim = ch;
+ state++;
+ break;
+ case 2:
+ case 3:
+ if (ch == delim)
+ state++;
+ else
+ return (0);
+ break;
+ case 4:
+ if (isdigit(ch)) {
+ port = ch - '0';
+ state++;
+ } else
+ return (0);
+ break;
+ case 5:
+ if (isdigit(ch))
+ port = 10 * port + ch - '0';
+ else if (ch == delim)
+ state++;
+ else
+ return (0);
+ break;
+ case 6:
+ if (ch == ')')
+ state++;
+ else
+ return (0);
+ break;
+ }
+ }
+
+ if (state == 7) {
+ la->true_port = port;
+ return (1);
+ } else
+ return (0);
+}
+
+static void
+NewFtpMessage(struct libalias *la, struct ip *pip,
+ struct alias_link *lnk,
+ int maxpacketsize,
+ int ftp_message_type)
+{
+ struct alias_link *ftp_lnk;
+
+/* Security checks. */
+ if (pip->ip_src.s_addr != la->true_addr.s_addr)
+ return;
+
+ if (la->true_port < IPPORT_RESERVED)
+ return;
+
+/* Establish link to address and port found in FTP control message. */
+ ftp_lnk = FindUdpTcpOut(la, la->true_addr, GetDestAddress(lnk),
+ htons(la->true_port), 0, IPPROTO_TCP, 1);
+
+ if (ftp_lnk != NULL) {
+ int slen, hlen, tlen, dlen;
+ struct tcphdr *tc;
+
+#ifndef NO_FW_PUNCH
+ /* Punch hole in firewall */
+ PunchFWHole(ftp_lnk);
+#endif
+
+/* Calculate data length of TCP packet */
+ tc = (struct tcphdr *)ip_next(pip);
+ hlen = (pip->ip_hl + tc->th_off) << 2;
+ tlen = ntohs(pip->ip_len);
+ dlen = tlen - hlen;
+
+/* Create new FTP message. */
+ {
+ char stemp[MAX_MESSAGE_SIZE + 1];
+ char *sptr;
+ u_short alias_port;
+ u_char *ptr;
+ int a1, a2, a3, a4, p1, p2;
+ struct in_addr alias_address;
+
+/* Decompose alias address into quad format */
+ alias_address = GetAliasAddress(lnk);
+ ptr = (u_char *) & alias_address.s_addr;
+ a1 = *ptr++;
+ a2 = *ptr++;
+ a3 = *ptr++;
+ a4 = *ptr;
+
+ alias_port = GetAliasPort(ftp_lnk);
+
+/* Prepare new command */
+ switch (ftp_message_type) {
+ case FTP_PORT_COMMAND:
+ case FTP_227_REPLY:
+ /* Decompose alias port into pair format. */
+ ptr = (char *)&alias_port;
+ p1 = *ptr++;
+ p2 = *ptr;
+
+ if (ftp_message_type == FTP_PORT_COMMAND) {
+ /* Generate PORT command string. */
+ sprintf(stemp, "PORT %d,%d,%d,%d,%d,%d\r\n",
+ a1, a2, a3, a4, p1, p2);
+ } else {
+ /* Generate 227 reply string. */
+ sprintf(stemp,
+ "227 Entering Passive Mode (%d,%d,%d,%d,%d,%d)\r\n",
+ a1, a2, a3, a4, p1, p2);
+ }
+ break;
+ case FTP_EPRT_COMMAND:
+ /* Generate EPRT command string. */
+ sprintf(stemp, "EPRT |1|%d.%d.%d.%d|%d|\r\n",
+ a1, a2, a3, a4, ntohs(alias_port));
+ break;
+ case FTP_229_REPLY:
+ /* Generate 229 reply string. */
+ sprintf(stemp, "229 Entering Extended Passive Mode (|||%d|)\r\n",
+ ntohs(alias_port));
+ break;
+ }
+
+/* Save string length for IP header modification */
+ slen = strlen(stemp);
+
+/* Copy modified buffer into IP packet. */
+ sptr = (char *)pip;
+ sptr += hlen;
+ strncpy(sptr, stemp, maxpacketsize - hlen);
+ }
+
+/* Save information regarding modified seq and ack numbers */
+ {
+ int delta;
+
+ SetAckModified(lnk);
+ tc = (struct tcphdr *)ip_next(pip);
+ delta = GetDeltaSeqOut(tc->th_seq, lnk);
+ AddSeq(lnk, delta + slen - dlen, pip->ip_hl,
+ pip->ip_len, tc->th_seq, tc->th_off);
+ }
+
+/* Revise IP header */
+ {
+ u_short new_len;
+
+ new_len = htons(hlen + slen);
+ DifferentialChecksum(&pip->ip_sum,
+ &new_len,
+ &pip->ip_len,
+ 1);
+ pip->ip_len = new_len;
+ }
+
+/* Compute TCP checksum for revised packet */
+ tc->th_sum = 0;
+#ifdef _KERNEL
+ tc->th_x2 = 1;
+#else
+ tc->th_sum = TcpChecksum(pip);
+#endif
+ } else {
+#ifdef LIBALIAS_DEBUG
+ fprintf(stderr,
+ "PacketAlias/HandleFtpOut: Cannot allocate FTP data port\n");
+#endif
+ }
+}
diff --git a/rtems/freebsd/netinet/libalias/alias_irc.c b/rtems/freebsd/netinet/libalias/alias_irc.c
new file mode 100644
index 00000000..fd2f0763
--- /dev/null
+++ b/rtems/freebsd/netinet/libalias/alias_irc.c
@@ -0,0 +1,490 @@
+#include <rtems/freebsd/machine/rtems-bsd-config.h>
+
+/*-
+ * Copyright (c) 2001 Charles Mott <cm@linktel.net>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <rtems/freebsd/sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+/* Alias_irc.c intercepts packages contain IRC CTCP commands, and
+ changes DCC commands to export a port on the aliasing host instead
+ of an aliased host.
+
+ For this routine to work, the DCC command must fit entirely into a
+ single TCP packet. This will usually happen, but is not
+ guaranteed.
+
+ The interception is likely to change the length of the packet.
+ The handling of this is copied more-or-less verbatim from
+ ftp_alias.c
+
+ Initial version: Eivind Eklund <perhaps@yes.no> (ee) 97-01-29
+
+ Version 2.1: May, 1997 (cjm)
+ Very minor changes to conform with
+ local/global/function naming conventions
+ withing the packet alising module.
+*/
+
+/* Includes */
+#ifdef _KERNEL
+#include <rtems/freebsd/sys/param.h>
+#include <rtems/freebsd/sys/ctype.h>
+#include <rtems/freebsd/sys/limits.h>
+#include <rtems/freebsd/sys/systm.h>
+#include <rtems/freebsd/sys/kernel.h>
+#include <rtems/freebsd/sys/module.h>
+#else
+#include <rtems/freebsd/ctype.h>
+#include <rtems/freebsd/errno.h>
+#include <rtems/freebsd/sys/types.h>
+#include <rtems/freebsd/stdio.h>
+#include <rtems/freebsd/stdlib.h>
+#include <rtems/freebsd/string.h>
+#include <rtems/freebsd/limits.h>
+#endif
+
+#include <rtems/freebsd/netinet/in_systm.h>
+#include <rtems/freebsd/netinet/in.h>
+#include <rtems/freebsd/netinet/ip.h>
+#include <rtems/freebsd/netinet/tcp.h>
+
+#ifdef _KERNEL
+#include <rtems/freebsd/netinet/libalias/alias.h>
+#include <rtems/freebsd/netinet/libalias/alias_local.h>
+#include <rtems/freebsd/netinet/libalias/alias_mod.h>
+#else
+#include <rtems/freebsd/local/alias_local.h>
+#include <rtems/freebsd/local/alias_mod.h>
+#endif
+
+#define IRC_CONTROL_PORT_NUMBER_1 6667
+#define IRC_CONTROL_PORT_NUMBER_2 6668
+
+#define PKTSIZE (IP_MAXPACKET + 1)
+char *newpacket;
+
+/* Local defines */
+#define DBprintf(a)
+
+static void
+AliasHandleIrcOut(struct libalias *, struct ip *, struct alias_link *,
+ int maxpacketsize);
+
+static int
+fingerprint(struct libalias *la, struct alias_data *ah)
+{
+
+ if (ah->dport == NULL || ah->dport == NULL || ah->lnk == NULL ||
+ ah->maxpktsize == 0)
+ return (-1);
+ if (ntohs(*ah->dport) == IRC_CONTROL_PORT_NUMBER_1
+ || ntohs(*ah->dport) == IRC_CONTROL_PORT_NUMBER_2)
+ return (0);
+ return (-1);
+}
+
+static int
+protohandler(struct libalias *la, struct ip *pip, struct alias_data *ah)
+{
+
+ newpacket = malloc(PKTSIZE);
+ if (newpacket) {
+ AliasHandleIrcOut(la, pip, ah->lnk, ah->maxpktsize);
+ free(newpacket);
+ }
+ return (0);
+}
+
+struct proto_handler handlers[] = {
+ {
+ .pri = 90,
+ .dir = OUT,
+ .proto = TCP,
+ .fingerprint = &fingerprint,
+ .protohandler = &protohandler
+ },
+ { EOH }
+};
+
+static int
+mod_handler(module_t mod, int type, void *data)
+{
+ int error;
+
+ switch (type) {
+ case MOD_LOAD:
+ error = 0;
+ LibAliasAttachHandlers(handlers);
+ break;
+ case MOD_UNLOAD:
+ error = 0;
+ LibAliasDetachHandlers(handlers);
+ break;
+ default:
+ error = EINVAL;
+ }
+ return (error);
+}
+
+#ifdef _KERNEL
+static
+#endif
+moduledata_t alias_mod = {
+ "alias_irc", mod_handler, NULL
+};
+
+/* Kernel module definition. */
+#ifdef _KERNEL
+DECLARE_MODULE(alias_irc, alias_mod, SI_SUB_DRIVERS, SI_ORDER_SECOND);
+MODULE_VERSION(alias_irc, 1);
+MODULE_DEPEND(alias_irc, libalias, 1, 1, 1);
+#endif
+
+static void
+AliasHandleIrcOut(struct libalias *la,
+ struct ip *pip, /* IP packet to examine */
+ struct alias_link *lnk, /* Which link are we on? */
+ int maxsize /* Maximum size of IP packet including
+ * headers */
+)
+{
+ int hlen, tlen, dlen;
+ struct in_addr true_addr;
+ u_short true_port;
+ char *sptr;
+ struct tcphdr *tc;
+ int i; /* Iterator through the source */
+
+/* Calculate data length of TCP packet */
+ tc = (struct tcphdr *)ip_next(pip);
+ hlen = (pip->ip_hl + tc->th_off) << 2;
+ tlen = ntohs(pip->ip_len);
+ dlen = tlen - hlen;
+
+ /*
+ * Return if data length is too short - assume an entire PRIVMSG in
+ * each packet.
+ */
+ if (dlen < (int)sizeof(":A!a@n.n PRIVMSG A :aDCC 1 1a") - 1)
+ return;
+
+/* Place string pointer at beginning of data */
+ sptr = (char *)pip;
+ sptr += hlen;
+ maxsize -= hlen; /* We're interested in maximum size of
+ * data, not packet */
+
+ /* Search for a CTCP command [Note 1] */
+ for (i = 0; i < dlen; i++) {
+ if (sptr[i] == '\001')
+ goto lFOUND_CTCP;
+ }
+ return; /* No CTCP commands in */
+ /* Handle CTCP commands - the buffer may have to be copied */
+lFOUND_CTCP:
+ {
+ unsigned int copyat = i;
+ unsigned int iCopy = 0; /* How much data have we written to
+ * copy-back string? */
+ unsigned long org_addr; /* Original IP address */
+ unsigned short org_port; /* Original source port
+ * address */
+
+lCTCP_START:
+ if (i >= dlen || iCopy >= PKTSIZE)
+ goto lPACKET_DONE;
+ newpacket[iCopy++] = sptr[i++]; /* Copy the CTCP start
+ * character */
+ /* Start of a CTCP */
+ if (i + 4 >= dlen) /* Too short for DCC */
+ goto lBAD_CTCP;
+ if (sptr[i + 0] != 'D')
+ goto lBAD_CTCP;
+ if (sptr[i + 1] != 'C')
+ goto lBAD_CTCP;
+ if (sptr[i + 2] != 'C')
+ goto lBAD_CTCP;
+ if (sptr[i + 3] != ' ')
+ goto lBAD_CTCP;
+ /* We have a DCC command - handle it! */
+ i += 4; /* Skip "DCC " */
+ if (iCopy + 4 > PKTSIZE)
+ goto lPACKET_DONE;
+ newpacket[iCopy++] = 'D';
+ newpacket[iCopy++] = 'C';
+ newpacket[iCopy++] = 'C';
+ newpacket[iCopy++] = ' ';
+
+ DBprintf(("Found DCC\n"));
+ /*
+ * Skip any extra spaces (should not occur according to
+ * protocol, but DCC breaks CTCP protocol anyway
+ */
+ while (sptr[i] == ' ') {
+ if (++i >= dlen) {
+ DBprintf(("DCC packet terminated in just spaces\n"));
+ goto lPACKET_DONE;
+ }
+ }
+
+ DBprintf(("Transferring command...\n"));
+ while (sptr[i] != ' ') {
+ newpacket[iCopy++] = sptr[i];
+ if (++i >= dlen || iCopy >= PKTSIZE) {
+ DBprintf(("DCC packet terminated during command\n"));
+ goto lPACKET_DONE;
+ }
+ }
+ /* Copy _one_ space */
+ if (i + 1 < dlen && iCopy < PKTSIZE)
+ newpacket[iCopy++] = sptr[i++];
+
+ DBprintf(("Done command - removing spaces\n"));
+ /*
+ * Skip any extra spaces (should not occur according to
+ * protocol, but DCC breaks CTCP protocol anyway
+ */
+ while (sptr[i] == ' ') {
+ if (++i >= dlen) {
+ DBprintf(("DCC packet terminated in just spaces (post-command)\n"));
+ goto lPACKET_DONE;
+ }
+ }
+
+ DBprintf(("Transferring filename...\n"));
+ while (sptr[i] != ' ') {
+ newpacket[iCopy++] = sptr[i];
+ if (++i >= dlen || iCopy >= PKTSIZE) {
+ DBprintf(("DCC packet terminated during filename\n"));
+ goto lPACKET_DONE;
+ }
+ }
+ /* Copy _one_ space */
+ if (i + 1 < dlen && iCopy < PKTSIZE)
+ newpacket[iCopy++] = sptr[i++];
+
+ DBprintf(("Done filename - removing spaces\n"));
+ /*
+ * Skip any extra spaces (should not occur according to
+ * protocol, but DCC breaks CTCP protocol anyway
+ */
+ while (sptr[i] == ' ') {
+ if (++i >= dlen) {
+ DBprintf(("DCC packet terminated in just spaces (post-filename)\n"));
+ goto lPACKET_DONE;
+ }
+ }
+
+ DBprintf(("Fetching IP address\n"));
+ /* Fetch IP address */
+ org_addr = 0;
+ while (i < dlen && isdigit(sptr[i])) {
+ if (org_addr > ULONG_MAX / 10UL) { /* Terminate on overflow */
+ DBprintf(("DCC Address overflow (org_addr == 0x%08lx, next char %c\n", org_addr, sptr[i]));
+ goto lBAD_CTCP;
+ }
+ org_addr *= 10;
+ org_addr += sptr[i++] - '0';
+ }
+ DBprintf(("Skipping space\n"));
+ if (i + 1 >= dlen || sptr[i] != ' ') {
+ DBprintf(("Overflow (%d >= %d) or bad character (%02x) terminating IP address\n", i + 1, dlen, sptr[i]));
+ goto lBAD_CTCP;
+ }
+ /*
+ * Skip any extra spaces (should not occur according to
+ * protocol, but DCC breaks CTCP protocol anyway, so we
+ * might as well play it safe
+ */
+ while (sptr[i] == ' ') {
+ if (++i >= dlen) {
+ DBprintf(("Packet failure - space overflow.\n"));
+ goto lPACKET_DONE;
+ }
+ }
+ DBprintf(("Fetching port number\n"));
+ /* Fetch source port */
+ org_port = 0;
+ while (i < dlen && isdigit(sptr[i])) {
+ if (org_port > 6554) { /* Terminate on overflow
+ * (65536/10 rounded up */
+ DBprintf(("DCC: port number overflow\n"));
+ goto lBAD_CTCP;
+ }
+ org_port *= 10;
+ org_port += sptr[i++] - '0';
+ }
+ /* Skip illegal addresses (or early termination) */
+ if (i >= dlen || (sptr[i] != '\001' && sptr[i] != ' ')) {
+ DBprintf(("Bad port termination\n"));
+ goto lBAD_CTCP;
+ }
+ DBprintf(("Got IP %lu and port %u\n", org_addr, (unsigned)org_port));
+
+ /* We've got the address and port - now alias it */
+ {
+ struct alias_link *dcc_lnk;
+ struct in_addr destaddr;
+
+
+ true_port = htons(org_port);
+ true_addr.s_addr = htonl(org_addr);
+ destaddr.s_addr = 0;
+
+ /* Sanity/Security checking */
+ if (!org_addr || !org_port ||
+ pip->ip_src.s_addr != true_addr.s_addr ||
+ org_port < IPPORT_RESERVED)
+ goto lBAD_CTCP;
+
+ /*
+ * Steal the FTP_DATA_PORT - it doesn't really
+ * matter, and this would probably allow it through
+ * at least _some_ firewalls.
+ */
+ dcc_lnk = FindUdpTcpOut(la, true_addr, destaddr,
+ true_port, 0,
+ IPPROTO_TCP, 1);
+ DBprintf(("Got a DCC link\n"));
+ if (dcc_lnk) {
+ struct in_addr alias_address; /* Address from aliasing */
+ u_short alias_port; /* Port given by
+ * aliasing */
+ int n;
+
+#ifndef NO_FW_PUNCH
+ /* Generate firewall hole as appropriate */
+ PunchFWHole(dcc_lnk);
+#endif
+
+ alias_address = GetAliasAddress(lnk);
+ n = snprintf(&newpacket[iCopy],
+ PKTSIZE - iCopy,
+ "%lu ", (u_long) htonl(alias_address.s_addr));
+ if (n < 0) {
+ DBprintf(("DCC packet construct failure.\n"));
+ goto lBAD_CTCP;
+ }
+ if ((iCopy += n) >= PKTSIZE) { /* Truncated/fit exactly
+ * - bad news */
+ DBprintf(("DCC constructed packet overflow.\n"));
+ goto lBAD_CTCP;
+ }
+ alias_port = GetAliasPort(dcc_lnk);
+ n = snprintf(&newpacket[iCopy],
+ PKTSIZE - iCopy,
+ "%u", htons(alias_port));
+ if (n < 0) {
+ DBprintf(("DCC packet construct failure.\n"));
+ goto lBAD_CTCP;
+ }
+ iCopy += n;
+ /*
+ * Done - truncated cases will be taken
+ * care of by lBAD_CTCP
+ */
+ DBprintf(("Aliased IP %lu and port %u\n", alias_address.s_addr, (unsigned)alias_port));
+ }
+ }
+ /*
+ * An uninteresting CTCP - state entered right after '\001'
+ * has been pushed. Also used to copy the rest of a DCC,
+ * after IP address and port has been handled
+ */
+lBAD_CTCP:
+ for (; i < dlen && iCopy < PKTSIZE; i++, iCopy++) {
+ newpacket[iCopy] = sptr[i]; /* Copy CTCP unchanged */
+ if (sptr[i] == '\001') {
+ goto lNORMAL_TEXT;
+ }
+ }
+ goto lPACKET_DONE;
+ /* Normal text */
+lNORMAL_TEXT:
+ for (; i < dlen && iCopy < PKTSIZE; i++, iCopy++) {
+ newpacket[iCopy] = sptr[i]; /* Copy CTCP unchanged */
+ if (sptr[i] == '\001') {
+ goto lCTCP_START;
+ }
+ }
+ /* Handle the end of a packet */
+lPACKET_DONE:
+ iCopy = iCopy > maxsize - copyat ? maxsize - copyat : iCopy;
+ memcpy(sptr + copyat, newpacket, iCopy);
+
+/* Save information regarding modified seq and ack numbers */
+ {
+ int delta;
+
+ SetAckModified(lnk);
+ tc = (struct tcphdr *)ip_next(pip);
+ delta = GetDeltaSeqOut(tc->th_seq, lnk);
+ AddSeq(lnk, delta + copyat + iCopy - dlen, pip->ip_hl,
+ pip->ip_len, tc->th_seq, tc->th_off);
+ }
+
+ /* Revise IP header */
+ {
+ u_short new_len;
+
+ new_len = htons(hlen + iCopy + copyat);
+ DifferentialChecksum(&pip->ip_sum,
+ &new_len,
+ &pip->ip_len,
+ 1);
+ pip->ip_len = new_len;
+ }
+
+ /* Compute TCP checksum for revised packet */
+ tc->th_sum = 0;
+#ifdef _KERNEL
+ tc->th_x2 = 1;
+#else
+ tc->th_sum = TcpChecksum(pip);
+#endif
+ return;
+ }
+}
+
+/* Notes:
+ [Note 1]
+ The initial search will most often fail; it could be replaced with a 32-bit specific search.
+ Such a search would be done for 32-bit unsigned value V:
+ V ^= 0x01010101; (Search is for null bytes)
+ if( ((V-0x01010101)^V) & 0x80808080 ) {
+ (found a null bytes which was a 01 byte)
+ }
+ To assert that the processor is 32-bits, do
+ extern int ircdccar[32]; (32 bits)
+ extern int ircdccar[CHAR_BIT*sizeof(unsigned int)];
+ which will generate a type-error on all but 32-bit machines.
+
+ [Note 2] This routine really ought to be replaced with one that
+ creates a transparent proxy on the aliasing host, to allow arbitary
+ changes in the TCP stream. This should not be too difficult given
+ this base; I (ee) will try to do this some time later.
+ */
diff --git a/rtems/freebsd/netinet/libalias/alias_local.h b/rtems/freebsd/netinet/libalias/alias_local.h
new file mode 100644
index 00000000..c6e2b0b8
--- /dev/null
+++ b/rtems/freebsd/netinet/libalias/alias_local.h
@@ -0,0 +1,397 @@
+/*-
+ * Copyright (c) 2001 Charles Mott <cm@linktel.net>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+/*
+ * Alias_local.h contains the function prototypes for alias.c,
+ * alias_db.c, alias_util.c and alias_ftp.c, alias_irc.c (as well
+ * as any future add-ons). It also includes macros, globals and
+ * struct definitions shared by more than one alias*.c file.
+ *
+ * This include file is intended to be used only within the aliasing
+ * software. Outside world interfaces are defined in alias.h
+ *
+ * This software is placed into the public domain with no restrictions
+ * on its distribution.
+ *
+ * Initial version: August, 1996 (cjm)
+ *
+ * <updated several times by original author and Eivind Eklund>
+ */
+
+#ifndef _ALIAS_LOCAL_HH_
+#define _ALIAS_LOCAL_HH_
+
+#include <rtems/freebsd/sys/types.h>
+#include <rtems/freebsd/sys/sysctl.h>
+
+#ifdef _KERNEL
+#include <rtems/freebsd/sys/malloc.h>
+#include <rtems/freebsd/sys/param.h>
+#include <rtems/freebsd/sys/lock.h>
+#include <rtems/freebsd/sys/mutex.h>
+
+/* XXX: LibAliasSetTarget() uses this constant. */
+#define INADDR_NONE 0xffffffff
+
+#include <rtems/freebsd/netinet/libalias/alias_sctp.h>
+#else
+#include <rtems/freebsd/local/alias_sctp.h>
+#endif
+
+/* Sizes of input and output link tables */
+#define LINK_TABLE_OUT_SIZE 4001
+#define LINK_TABLE_IN_SIZE 4001
+
+struct proxy_entry;
+
+struct libalias {
+ LIST_ENTRY(libalias) instancelist;
+
+ int packetAliasMode; /* Mode flags */
+ /* - documented in alias.h */
+
+ struct in_addr aliasAddress; /* Address written onto source */
+ /* field of IP packet. */
+
+ struct in_addr targetAddress; /* IP address incoming packets */
+ /* are sent to if no aliasing */
+ /* link already exists */
+
+ struct in_addr nullAddress; /* Used as a dummy parameter for */
+ /* some function calls */
+
+ LIST_HEAD (, alias_link) linkTableOut[LINK_TABLE_OUT_SIZE];
+ /* Lookup table of pointers to */
+ /* chains of link records. Each */
+
+ LIST_HEAD (, alias_link) linkTableIn[LINK_TABLE_IN_SIZE];
+ /* link record is doubly indexed */
+ /* into input and output lookup */
+ /* tables. */
+
+ /* Link statistics */
+ int icmpLinkCount;
+ int udpLinkCount;
+ int tcpLinkCount;
+ int pptpLinkCount;
+ int protoLinkCount;
+ int fragmentIdLinkCount;
+ int fragmentPtrLinkCount;
+ int sockCount;
+
+ int cleanupIndex; /* Index to chain of link table */
+ /* being inspected for old links */
+
+ int timeStamp; /* System time in seconds for */
+ /* current packet */
+
+ int lastCleanupTime; /* Last time
+ * IncrementalCleanup() */
+ /* was called */
+
+ int deleteAllLinks; /* If equal to zero, DeleteLink() */
+ /* will not remove permanent links */
+
+ /* log descriptor */
+#ifdef _KERNEL
+ char *logDesc;
+#else
+ FILE *logDesc;
+#endif
+ /* statistics monitoring */
+
+ int newDefaultLink; /* Indicates if a new aliasing */
+ /* link has been created after a */
+ /* call to PacketAliasIn/Out(). */
+
+#ifndef NO_FW_PUNCH
+ int fireWallFD; /* File descriptor to be able to */
+ /* control firewall. Opened by */
+ /* PacketAliasSetMode on first */
+ /* setting the PKT_ALIAS_PUNCH_FW */
+ /* flag. */
+ int fireWallBaseNum; /* The first firewall entry
+ * free for our use */
+ int fireWallNumNums; /* How many entries can we
+ * use? */
+ int fireWallActiveNum; /* Which entry did we last
+ * use? */
+ char *fireWallField; /* bool array for entries */
+#endif
+
+ unsigned int skinnyPort; /* TCP port used by the Skinny */
+ /* protocol. */
+
+ struct proxy_entry *proxyList;
+
+ struct in_addr true_addr; /* in network byte order. */
+ u_short true_port; /* in host byte order. */
+
+ /*
+ * sctp code support
+ */
+
+ /* counts associations that have progressed to UP and not yet removed */
+ int sctpLinkCount;
+#ifdef _KERNEL
+ /* timing queue for keeping track of association timeouts */
+ struct sctp_nat_timer sctpNatTimer;
+
+ /* size of hash table used in this instance */
+ u_int sctpNatTableSize;
+
+/*
+ * local look up table sorted by l_vtag/l_port
+ */
+ LIST_HEAD(sctpNatTableL, sctp_nat_assoc) *sctpTableLocal;
+/*
+ * global look up table sorted by g_vtag/g_port
+ */
+ LIST_HEAD(sctpNatTableG, sctp_nat_assoc) *sctpTableGlobal;
+
+ /*
+ * avoid races in libalias: every public function has to use it.
+ */
+ struct mtx mutex;
+#endif
+};
+
+/* Macros */
+
+#ifdef _KERNEL
+#define LIBALIAS_LOCK_INIT(l) \
+ mtx_init(&l->mutex, "per-instance libalias mutex", NULL, MTX_DEF)
+#define LIBALIAS_LOCK_ASSERT(l) mtx_assert(&l->mutex, MA_OWNED)
+#define LIBALIAS_LOCK(l) mtx_lock(&l->mutex)
+#define LIBALIAS_UNLOCK(l) mtx_unlock(&l->mutex)
+#define LIBALIAS_LOCK_DESTROY(l) mtx_destroy(&l->mutex)
+#else
+#define LIBALIAS_LOCK_INIT(l)
+#define LIBALIAS_LOCK_ASSERT(l)
+#define LIBALIAS_LOCK(l)
+#define LIBALIAS_UNLOCK(l)
+#define LIBALIAS_LOCK_DESTROY(l)
+#endif
+
+/*
+ * The following macro is used to update an
+ * internet checksum. "delta" is a 32-bit
+ * accumulation of all the changes to the
+ * checksum (adding in new 16-bit words and
+ * subtracting out old words), and "cksum"
+ * is the checksum value to be updated.
+ */
+#define ADJUST_CHECKSUM(acc, cksum) \
+ do { \
+ acc += cksum; \
+ if (acc < 0) { \
+ acc = -acc; \
+ acc = (acc >> 16) + (acc & 0xffff); \
+ acc += acc >> 16; \
+ cksum = (u_short) ~acc; \
+ } else { \
+ acc = (acc >> 16) + (acc & 0xffff); \
+ acc += acc >> 16; \
+ cksum = (u_short) acc; \
+ } \
+ } while (0)
+
+
+/* Prototypes */
+
+/*
+ * SctpFunction prototypes
+ *
+ */
+void AliasSctpInit(struct libalias *la);
+void AliasSctpTerm(struct libalias *la);
+int SctpAlias(struct libalias *la, struct ip *ip, int direction);
+
+/*
+ * We do not calculate TCP checksums when libalias is a kernel
+ * module, since it has no idea about checksum offloading.
+ * If TCP data has changed, then we just set checksum to zero,
+ * and caller must recalculate it himself.
+ * In case if libalias will edit UDP data, the same approach
+ * should be used.
+ */
+#ifndef _KERNEL
+u_short IpChecksum(struct ip *_pip);
+u_short TcpChecksum(struct ip *_pip);
+#endif
+void
+DifferentialChecksum(u_short * _cksum, void * _new, void * _old, int _n);
+
+/* Internal data access */
+struct alias_link *
+FindIcmpIn(struct libalias *la, struct in_addr _dst_addr, struct in_addr _alias_addr,
+ u_short _id_alias, int _create);
+struct alias_link *
+FindIcmpOut(struct libalias *la, struct in_addr _src_addr, struct in_addr _dst_addr,
+ u_short _id, int _create);
+struct alias_link *
+FindFragmentIn1(struct libalias *la, struct in_addr _dst_addr, struct in_addr _alias_addr,
+ u_short _ip_id);
+struct alias_link *
+FindFragmentIn2(struct libalias *la, struct in_addr _dst_addr, struct in_addr _alias_addr,
+ u_short _ip_id);
+struct alias_link *
+ AddFragmentPtrLink(struct libalias *la, struct in_addr _dst_addr, u_short _ip_id);
+struct alias_link *
+ FindFragmentPtr(struct libalias *la, struct in_addr _dst_addr, u_short _ip_id);
+struct alias_link *
+FindProtoIn(struct libalias *la, struct in_addr _dst_addr, struct in_addr _alias_addr,
+ u_char _proto);
+struct alias_link *
+FindProtoOut(struct libalias *la, struct in_addr _src_addr, struct in_addr _dst_addr,
+ u_char _proto);
+struct alias_link *
+FindUdpTcpIn(struct libalias *la, struct in_addr _dst_addr, struct in_addr _alias_addr,
+ u_short _dst_port, u_short _alias_port, u_char _proto, int _create);
+struct alias_link *
+FindUdpTcpOut(struct libalias *la, struct in_addr _src_addr, struct in_addr _dst_addr,
+ u_short _src_port, u_short _dst_port, u_char _proto, int _create);
+struct alias_link *
+AddPptp(struct libalias *la, struct in_addr _src_addr, struct in_addr _dst_addr,
+ struct in_addr _alias_addr, u_int16_t _src_call_id);
+struct alias_link *
+FindPptpOutByCallId(struct libalias *la, struct in_addr _src_addr,
+ struct in_addr _dst_addr, u_int16_t _src_call_id);
+struct alias_link *
+FindPptpInByCallId(struct libalias *la, struct in_addr _dst_addr,
+ struct in_addr _alias_addr, u_int16_t _dst_call_id);
+struct alias_link *
+FindPptpOutByPeerCallId(struct libalias *la, struct in_addr _src_addr,
+ struct in_addr _dst_addr, u_int16_t _dst_call_id);
+struct alias_link *
+FindPptpInByPeerCallId(struct libalias *la, struct in_addr _dst_addr,
+ struct in_addr _alias_addr, u_int16_t _alias_call_id);
+struct alias_link *
+FindRtspOut(struct libalias *la, struct in_addr _src_addr, struct in_addr _dst_addr,
+ u_short _src_port, u_short _alias_port, u_char _proto);
+struct in_addr
+ FindOriginalAddress(struct libalias *la, struct in_addr _alias_addr);
+struct in_addr
+ FindAliasAddress(struct libalias *la, struct in_addr _original_addr);
+struct in_addr
+FindSctpRedirectAddress(struct libalias *la, struct sctp_nat_msg *sm);
+
+/* External data access/modification */
+int
+FindNewPortGroup(struct libalias *la, struct in_addr _dst_addr, struct in_addr _alias_addr,
+ u_short _src_port, u_short _dst_port, u_short _port_count,
+ u_char _proto, u_char _align);
+void GetFragmentAddr(struct alias_link *_lnk, struct in_addr *_src_addr);
+void SetFragmentAddr(struct alias_link *_lnk, struct in_addr _src_addr);
+void GetFragmentPtr(struct alias_link *_lnk, char **_fptr);
+void SetFragmentPtr(struct alias_link *_lnk, char *fptr);
+void SetStateIn(struct alias_link *_lnk, int _state);
+void SetStateOut(struct alias_link *_lnk, int _state);
+int GetStateIn (struct alias_link *_lnk);
+int GetStateOut(struct alias_link *_lnk);
+struct in_addr
+ GetOriginalAddress(struct alias_link *_lnk);
+struct in_addr
+ GetDestAddress(struct alias_link *_lnk);
+struct in_addr
+ GetAliasAddress(struct alias_link *_lnk);
+struct in_addr
+ GetDefaultAliasAddress(struct libalias *la);
+void SetDefaultAliasAddress(struct libalias *la, struct in_addr _alias_addr);
+u_short GetOriginalPort(struct alias_link *_lnk);
+u_short GetAliasPort(struct alias_link *_lnk);
+struct in_addr
+ GetProxyAddress(struct alias_link *_lnk);
+void SetProxyAddress(struct alias_link *_lnk, struct in_addr _addr);
+u_short GetProxyPort(struct alias_link *_lnk);
+void SetProxyPort(struct alias_link *_lnk, u_short _port);
+void SetAckModified(struct alias_link *_lnk);
+int GetAckModified(struct alias_link *_lnk);
+int GetDeltaAckIn(u_long, struct alias_link *_lnk);
+int GetDeltaSeqOut(u_long, struct alias_link *lnk);
+void AddSeq(struct alias_link *lnk, int delta, u_int ip_hl,
+ u_short ip_len, u_long th_seq, u_int th_off);
+void SetExpire (struct alias_link *_lnk, int _expire);
+void ClearCheckNewLink(struct libalias *la);
+void SetProtocolFlags(struct alias_link *_lnk, int _pflags);
+int GetProtocolFlags(struct alias_link *_lnk);
+void SetDestCallId(struct alias_link *_lnk, u_int16_t _cid);
+
+#ifndef NO_FW_PUNCH
+void PunchFWHole(struct alias_link *_lnk);
+
+#endif
+
+/* Housekeeping function */
+void HouseKeeping(struct libalias *);
+
+/* Tcp specfic routines */
+/* lint -save -library Suppress flexelint warnings */
+
+/* Transparent proxy routines */
+int
+ProxyCheck(struct libalias *la, struct in_addr *proxy_server_addr,
+ u_short * proxy_server_port, struct in_addr src_addr,
+ struct in_addr dst_addr, u_short dst_port, u_char ip_p);
+void
+ProxyModify(struct libalias *la, struct alias_link *_lnk, struct ip *_pip,
+ int _maxpacketsize, int _proxy_type);
+
+enum alias_tcp_state {
+ ALIAS_TCP_STATE_NOT_CONNECTED,
+ ALIAS_TCP_STATE_CONNECTED,
+ ALIAS_TCP_STATE_DISCONNECTED
+};
+
+#if defined(_NETINET_IP_HH_)
+static __inline void *
+ip_next(struct ip *iphdr)
+{
+ char *p = (char *)iphdr;
+ return (&p[iphdr->ip_hl * 4]);
+}
+#endif
+
+#if defined(_NETINET_TCP_HH_)
+static __inline void *
+tcp_next(struct tcphdr *tcphdr)
+{
+ char *p = (char *)tcphdr;
+ return (&p[tcphdr->th_off * 4]);
+}
+#endif
+
+#if defined(_NETINET_UDP_HH_)
+static __inline void *
+udp_next(struct udphdr *udphdr)
+{
+ return ((void *)(udphdr + 1));
+}
+#endif
+
+#endif /* !_ALIAS_LOCAL_HH_ */
diff --git a/rtems/freebsd/netinet/libalias/alias_mod.c b/rtems/freebsd/netinet/libalias/alias_mod.c
new file mode 100644
index 00000000..820dac39
--- /dev/null
+++ b/rtems/freebsd/netinet/libalias/alias_mod.c
@@ -0,0 +1,292 @@
+#include <rtems/freebsd/machine/rtems-bsd-config.h>
+
+/*-
+ * Copyright (c) 2005 Paolo Pisati <piso@FreeBSD.org>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ */
+#include <rtems/freebsd/sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#ifdef _KERNEL
+#include <rtems/freebsd/sys/libkern.h>
+#include <rtems/freebsd/sys/param.h>
+#include <rtems/freebsd/sys/lock.h>
+#include <rtems/freebsd/sys/rwlock.h>
+#else
+#include <rtems/freebsd/stdio.h>
+#include <rtems/freebsd/string.h>
+#include <rtems/freebsd/sys/types.h>
+#include <rtems/freebsd/errno.h>
+#endif
+
+#include <rtems/freebsd/netinet/in_systm.h>
+#include <rtems/freebsd/netinet/in.h>
+#include <rtems/freebsd/netinet/ip.h>
+
+#ifdef _KERNEL
+#include <rtems/freebsd/netinet/libalias/alias_local.h>
+#include <rtems/freebsd/netinet/libalias/alias_mod.h>
+#else
+#include <rtems/freebsd/local/alias_local.h>
+#include <rtems/freebsd/local/alias_mod.h>
+#endif
+
+/* Protocol and userland module handlers chains. */
+LIST_HEAD(handler_chain, proto_handler) handler_chain = LIST_HEAD_INITIALIZER(handler_chain);
+#ifdef _KERNEL
+struct rwlock handler_rw;
+#endif
+SLIST_HEAD(dll_chain, dll) dll_chain = SLIST_HEAD_INITIALIZER(dll_chain);
+
+#ifdef _KERNEL
+
+#define LIBALIAS_RWLOCK_INIT() \
+ rw_init(&handler_rw, "Libalias_modules_rwlock")
+#define LIBALIAS_RWLOCK_DESTROY() rw_destroy(&handler_rw)
+#define LIBALIAS_WLOCK_ASSERT() \
+ rw_assert(&handler_rw, RA_WLOCKED)
+
+static __inline void
+LIBALIAS_RLOCK(void)
+{
+ rw_rlock(&handler_rw);
+}
+
+static __inline void
+LIBALIAS_RUNLOCK(void)
+{
+ rw_runlock(&handler_rw);
+}
+
+static __inline void
+LIBALIAS_WLOCK(void)
+{
+ rw_wlock(&handler_rw);
+}
+
+static __inline void
+LIBALIAS_WUNLOCK(void)
+{
+ rw_wunlock(&handler_rw);
+}
+
+static void
+_handler_chain_init(void)
+{
+
+ if (!rw_initialized(&handler_rw))
+ LIBALIAS_RWLOCK_INIT();
+}
+
+static void
+_handler_chain_destroy(void)
+{
+
+ if (rw_initialized(&handler_rw))
+ LIBALIAS_RWLOCK_DESTROY();
+}
+
+#else
+#define LIBALIAS_RWLOCK_INIT() ;
+#define LIBALIAS_RWLOCK_DESTROY() ;
+#define LIBALIAS_WLOCK_ASSERT() ;
+#define LIBALIAS_RLOCK() ;
+#define LIBALIAS_RUNLOCK() ;
+#define LIBALIAS_WLOCK() ;
+#define LIBALIAS_WUNLOCK() ;
+#define _handler_chain_init() ;
+#define _handler_chain_destroy() ;
+#endif
+
+void
+handler_chain_init(void)
+{
+ _handler_chain_init();
+}
+
+void
+handler_chain_destroy(void)
+{
+ _handler_chain_destroy();
+}
+
+static int
+_attach_handler(struct proto_handler *p)
+{
+ struct proto_handler *b;
+
+ LIBALIAS_WLOCK_ASSERT();
+ b = NULL;
+ LIST_FOREACH(b, &handler_chain, entries) {
+ if ((b->pri == p->pri) &&
+ (b->dir == p->dir) &&
+ (b->proto == p->proto))
+ return (EEXIST); /* Priority conflict. */
+ if (b->pri > p->pri) {
+ LIST_INSERT_BEFORE(b, p, entries);
+ return (0);
+ }
+ }
+ /* End of list or found right position, inserts here. */
+ if (b)
+ LIST_INSERT_AFTER(b, p, entries);
+ else
+ LIST_INSERT_HEAD(&handler_chain, p, entries);
+ return (0);
+}
+
+static int
+_detach_handler(struct proto_handler *p)
+{
+ struct proto_handler *b, *b_tmp;
+
+ LIBALIAS_WLOCK_ASSERT();
+ LIST_FOREACH_SAFE(b, &handler_chain, entries, b_tmp) {
+ if (b == p) {
+ LIST_REMOVE(b, entries);
+ return (0);
+ }
+ }
+ return (ENOENT); /* Handler not found. */
+}
+
+int
+LibAliasAttachHandlers(struct proto_handler *_p)
+{
+ int i, error;
+
+ LIBALIAS_WLOCK();
+ error = -1;
+ for (i = 0; 1; i++) {
+ if (*((int *)&_p[i]) == EOH)
+ break;
+ error = _attach_handler(&_p[i]);
+ if (error != 0)
+ break;
+ }
+ LIBALIAS_WUNLOCK();
+ return (error);
+}
+
+int
+LibAliasDetachHandlers(struct proto_handler *_p)
+{
+ int i, error;
+
+ LIBALIAS_WLOCK();
+ error = -1;
+ for (i = 0; 1; i++) {
+ if (*((int *)&_p[i]) == EOH)
+ break;
+ error = _detach_handler(&_p[i]);
+ if (error != 0)
+ break;
+ }
+ LIBALIAS_WUNLOCK();
+ return (error);
+}
+
+int
+detach_handler(struct proto_handler *_p)
+{
+ int error;
+
+ LIBALIAS_WLOCK();
+ error = -1;
+ error = _detach_handler(_p);
+ LIBALIAS_WUNLOCK();
+ return (error);
+}
+
+int
+find_handler(int8_t dir, int8_t proto, struct libalias *la, __unused struct ip *pip,
+ struct alias_data *ad)
+{
+ struct proto_handler *p;
+ int error;
+
+ LIBALIAS_RLOCK();
+ error = ENOENT;
+ LIST_FOREACH(p, &handler_chain, entries) {
+ if ((p->dir & dir) && (p->proto & proto))
+ if (p->fingerprint(la, ad) == 0) {
+ error = p->protohandler(la, pip, ad);
+ break;
+ }
+ }
+ LIBALIAS_RUNLOCK();
+ return (error);
+}
+
+struct proto_handler *
+first_handler(void)
+{
+
+ return (LIST_FIRST(&handler_chain));
+}
+
+/* Dll manipulation code - this code is not thread safe... */
+
+int
+attach_dll(struct dll *p)
+{
+ struct dll *b;
+
+ SLIST_FOREACH(b, &dll_chain, next) {
+ if (!strncmp(b->name, p->name, DLL_LEN))
+ return (EEXIST); /* Dll name conflict. */
+ }
+ SLIST_INSERT_HEAD(&dll_chain, p, next);
+ return (0);
+}
+
+void *
+detach_dll(char *p)
+{
+ struct dll *b, *b_tmp;
+ void *error;
+
+ b = NULL;
+ error = NULL;
+ SLIST_FOREACH_SAFE(b, &dll_chain, next, b_tmp)
+ if (!strncmp(b->name, p, DLL_LEN)) {
+ SLIST_REMOVE(&dll_chain, b, dll, next);
+ error = b;
+ break;
+ }
+ return (error);
+}
+
+struct dll *
+walk_dll_chain(void)
+{
+ struct dll *t;
+
+ t = SLIST_FIRST(&dll_chain);
+ if (t == NULL)
+ return (NULL);
+ SLIST_REMOVE_HEAD(&dll_chain, next);
+ return (t);
+}
diff --git a/rtems/freebsd/netinet/libalias/alias_mod.h b/rtems/freebsd/netinet/libalias/alias_mod.h
new file mode 100644
index 00000000..f5f98cc3
--- /dev/null
+++ b/rtems/freebsd/netinet/libalias/alias_mod.h
@@ -0,0 +1,163 @@
+/*-
+ * Copyright (c) 2005 Paolo Pisati <piso@FreeBSD.org>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+/*
+ * Alias_mod.h defines the outside world interfaces for the packet aliasing
+ * modular framework
+ */
+
+#ifndef _ALIAS_MOD_HH_
+#define _ALIAS_MOD_HH_
+
+#ifdef _KERNEL
+MALLOC_DECLARE(M_ALIAS);
+
+/* Use kernel allocator. */
+#if defined(_SYS_MALLOC_HH_)
+#ifndef __rtems__
+#define malloc(x) malloc(x, M_ALIAS, M_NOWAIT|M_ZERO)
+#define calloc(x, n) malloc(x*n)
+#define free(x) free(x, M_ALIAS)
+#else /* __rtems__ */
+#define malloc(x) _bsd_malloc(x, M_ALIAS, M_NOWAIT|M_ZERO)
+#define calloc(x, n) malloc(x*n)
+#define free(x) _bsd_free(x, M_ALIAS)
+#endif /* __rtems__ */
+#endif
+#endif
+
+/* Protocol handlers struct & function. */
+
+/* Packet flow direction. */
+#define IN 1
+#define OUT 2
+
+/* Working protocol. */
+#define IP 1
+#define TCP 2
+#define UDP 4
+
+/*
+ * Data passed to protocol handler module, it must be filled
+ * right before calling find_handler() to determine which
+ * module is elegible to be called.
+ */
+
+struct alias_data {
+ struct alias_link *lnk;
+ struct in_addr *oaddr; /* Original address. */
+ struct in_addr *aaddr; /* Alias address. */
+ uint16_t *aport; /* Alias port. */
+ uint16_t *sport, *dport; /* Source & destination port */
+ uint16_t maxpktsize; /* Max packet size. */
+};
+
+/*
+ * This structure contains all the information necessary to make
+ * a protocol handler correctly work.
+ */
+
+struct proto_handler {
+ u_int pri; /* Handler priority. */
+ int16_t dir; /* Flow direction. */
+ uint8_t proto; /* Working protocol. */
+ int (*fingerprint)(struct libalias *, /* Fingerprint * function. */
+ struct alias_data *);
+ int (*protohandler)(struct libalias *, /* Aliasing * function. */
+ struct ip *, struct alias_data *);
+ LIST_ENTRY(proto_handler) entries;
+};
+
+
+/*
+ * Used only in userland when libalias needs to keep track of all
+ * module loaded. In kernel land (kld mode) we don't need to care
+ * care about libalias modules cause it's kld to do it for us.
+ */
+
+#define DLL_LEN 32
+struct dll {
+ char name[DLL_LEN]; /* Name of module. */
+ void *handle; /*
+ * Ptr to shared obj obtained through
+ * dlopen() - use this ptr to get access
+ * to any symbols from a loaded module
+ * via dlsym().
+ */
+ SLIST_ENTRY(dll) next;
+};
+
+/* Functions used with protocol handlers. */
+
+void handler_chain_init(void);
+void handler_chain_destroy(void);
+int LibAliasAttachHandlers(struct proto_handler *);
+int LibAliasDetachHandlers(struct proto_handler *);
+int detach_handler(struct proto_handler *);
+int find_handler(int8_t, int8_t, struct libalias *,
+ struct ip *, struct alias_data *);
+struct proto_handler *first_handler(void);
+
+/* Functions used with dll module. */
+
+void dll_chain_init(void);
+void dll_chain_destroy(void);
+int attach_dll(struct dll *);
+void *detach_dll(char *);
+struct dll *walk_dll_chain(void);
+
+/* End of handlers. */
+#define EOH -1
+
+/*
+ * Some defines borrowed from sys/module.h used to compile a kld
+ * in userland as a shared lib.
+ */
+
+#ifndef _KERNEL
+typedef enum modeventtype {
+ MOD_LOAD,
+ MOD_UNLOAD,
+ MOD_SHUTDOWN,
+ MOD_QUIESCE
+} modeventtype_t;
+
+typedef struct module *module_t;
+typedef int (*modeventhand_t)(module_t, int /* modeventtype_t */, void *);
+
+/*
+ * Struct for registering modules statically via SYSINIT.
+ */
+typedef struct moduledata {
+ const char *name; /* module name */
+ modeventhand_t evhand; /* event handler */
+ void *priv; /* extra data */
+} moduledata_t;
+#endif
+
+#endif /* !_ALIAS_MOD_HH_ */
diff --git a/rtems/freebsd/netinet/libalias/alias_nbt.c b/rtems/freebsd/netinet/libalias/alias_nbt.c
new file mode 100644
index 00000000..a9f04559
--- /dev/null
+++ b/rtems/freebsd/netinet/libalias/alias_nbt.c
@@ -0,0 +1,855 @@
+#include <rtems/freebsd/machine/rtems-bsd-config.h>
+
+/*-
+ * Written by Atsushi Murai <amurai@spec.co.jp>
+ * Copyright (c) 1998, System Planning and Engineering Co.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ * TODO:
+ * oClean up.
+ * oConsidering for word alignment for other platform.
+ */
+
+#include <rtems/freebsd/sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+/*
+ alias_nbt.c performs special processing for NetBios over TCP/IP
+ sessions by UDP.
+
+ Initial version: May, 1998 (Atsushi Murai <amurai@spec.co.jp>)
+
+ See HISTORY file for record of revisions.
+*/
+
+/* Includes */
+#ifdef _KERNEL
+#include <rtems/freebsd/sys/param.h>
+#include <rtems/freebsd/sys/systm.h>
+#include <rtems/freebsd/sys/kernel.h>
+#include <rtems/freebsd/sys/module.h>
+#else
+#include <rtems/freebsd/errno.h>
+#include <rtems/freebsd/sys/types.h>
+#include <rtems/freebsd/stdio.h>
+#include <rtems/freebsd/strings.h>
+#endif
+
+#include <rtems/freebsd/netinet/in_systm.h>
+#include <rtems/freebsd/netinet/in.h>
+#include <rtems/freebsd/netinet/ip.h>
+#include <rtems/freebsd/netinet/udp.h>
+
+#ifdef _KERNEL
+#include <rtems/freebsd/netinet/libalias/alias_local.h>
+#include <rtems/freebsd/netinet/libalias/alias_mod.h>
+#else
+#include <rtems/freebsd/local/alias_local.h>
+#include <rtems/freebsd/local/alias_mod.h>
+#endif
+
+#define NETBIOS_NS_PORT_NUMBER 137
+#define NETBIOS_DGM_PORT_NUMBER 138
+
+static int
+AliasHandleUdpNbt(struct libalias *, struct ip *, struct alias_link *,
+ struct in_addr *, u_short);
+
+static int
+AliasHandleUdpNbtNS(struct libalias *, struct ip *, struct alias_link *,
+ struct in_addr *, u_short *, struct in_addr *, u_short *);
+static int
+fingerprint1(struct libalias *la, struct alias_data *ah)
+{
+
+ if (ah->dport == NULL || ah->sport == NULL || ah->lnk == NULL ||
+ ah->aaddr == NULL || ah->aport == NULL)
+ return (-1);
+ if (ntohs(*ah->dport) == NETBIOS_DGM_PORT_NUMBER
+ || ntohs(*ah->sport) == NETBIOS_DGM_PORT_NUMBER)
+ return (0);
+ return (-1);
+}
+
+static int
+protohandler1(struct libalias *la, struct ip *pip, struct alias_data *ah)
+{
+
+ return (AliasHandleUdpNbt(la, pip, ah->lnk, ah->aaddr, *ah->aport));
+}
+
+static int
+fingerprint2(struct libalias *la, struct alias_data *ah)
+{
+
+ if (ah->dport == NULL || ah->sport == NULL || ah->lnk == NULL ||
+ ah->aaddr == NULL || ah->aport == NULL)
+ return (-1);
+ if (ntohs(*ah->dport) == NETBIOS_NS_PORT_NUMBER
+ || ntohs(*ah->sport) == NETBIOS_NS_PORT_NUMBER)
+ return (0);
+ return (-1);
+}
+
+static int
+protohandler2in(struct libalias *la, struct ip *pip, struct alias_data *ah)
+{
+
+ AliasHandleUdpNbtNS(la, pip, ah->lnk, ah->aaddr, ah->aport,
+ ah->oaddr, ah->dport);
+ return (0);
+}
+
+static int
+protohandler2out(struct libalias *la, struct ip *pip, struct alias_data *ah)
+{
+
+ return (AliasHandleUdpNbtNS(la, pip, ah->lnk, &pip->ip_src, ah->sport,
+ ah->aaddr, ah->aport));
+}
+
+/* Kernel module definition. */
+struct proto_handler handlers[] = {
+ {
+ .pri = 130,
+ .dir = IN|OUT,
+ .proto = UDP,
+ .fingerprint = &fingerprint1,
+ .protohandler = &protohandler1
+ },
+ {
+ .pri = 140,
+ .dir = IN,
+ .proto = UDP,
+ .fingerprint = &fingerprint2,
+ .protohandler = &protohandler2in
+ },
+ {
+ .pri = 140,
+ .dir = OUT,
+ .proto = UDP,
+ .fingerprint = &fingerprint2,
+ .protohandler = &protohandler2out
+ },
+ { EOH }
+};
+
+static int
+mod_handler(module_t mod, int type, void *data)
+{
+ int error;
+
+ switch (type) {
+ case MOD_LOAD:
+ error = 0;
+ LibAliasAttachHandlers(handlers);
+ break;
+ case MOD_UNLOAD:
+ error = 0;
+ LibAliasDetachHandlers(handlers);
+ break;
+ default:
+ error = EINVAL;
+ }
+ return (error);
+}
+
+#ifdef _KERNEL
+static
+#endif
+moduledata_t alias_mod = {
+ "alias_nbt", mod_handler, NULL
+};
+
+#ifdef _KERNEL
+DECLARE_MODULE(alias_nbt, alias_mod, SI_SUB_DRIVERS, SI_ORDER_SECOND);
+MODULE_VERSION(alias_nbt, 1);
+MODULE_DEPEND(alias_nbt, libalias, 1, 1, 1);
+#endif
+
+typedef struct {
+ struct in_addr oldaddr;
+ u_short oldport;
+ struct in_addr newaddr;
+ u_short newport;
+ u_short *uh_sum;
+} NBTArguments;
+
+typedef struct {
+ unsigned char type;
+ unsigned char flags;
+ u_short id;
+ struct in_addr source_ip;
+ u_short source_port;
+ u_short len;
+ u_short offset;
+} NbtDataHeader;
+
+#define OpQuery 0
+#define OpUnknown 4
+#define OpRegist 5
+#define OpRelease 6
+#define OpWACK 7
+#define OpRefresh 8
+typedef struct {
+ u_short nametrid;
+ u_short dir: 1, opcode:4, nmflags:7, rcode:4;
+ u_short qdcount;
+ u_short ancount;
+ u_short nscount;
+ u_short arcount;
+} NbtNSHeader;
+
+#define FMT_ERR 0x1
+#define SRV_ERR 0x2
+#define IMP_ERR 0x4
+#define RFS_ERR 0x5
+#define ACT_ERR 0x6
+#define CFT_ERR 0x7
+
+
+#ifdef LIBALIAS_DEBUG
+static void
+PrintRcode(u_char rcode)
+{
+
+ switch (rcode) {
+ case FMT_ERR:
+ printf("\nFormat Error.");
+ case SRV_ERR:
+ printf("\nSever failure.");
+ case IMP_ERR:
+ printf("\nUnsupported request error.\n");
+ case RFS_ERR:
+ printf("\nRefused error.\n");
+ case ACT_ERR:
+ printf("\nActive error.\n");
+ case CFT_ERR:
+ printf("\nName in conflict error.\n");
+ default:
+ printf("\n?%c?=%0x\n", '?', rcode);
+
+ }
+}
+
+#endif
+
+
+/* Handling Name field */
+static u_char *
+AliasHandleName(u_char * p, char *pmax)
+{
+
+ u_char *s;
+ u_char c;
+ int compress;
+
+ /* Following length field */
+
+ if (p == NULL || (char *)p >= pmax)
+ return (NULL);
+
+ if (*p & 0xc0) {
+ p = p + 2;
+ if ((char *)p > pmax)
+ return (NULL);
+ return ((u_char *) p);
+ }
+ while ((*p & 0x3f) != 0x00) {
+ s = p + 1;
+ if (*p == 0x20)
+ compress = 1;
+ else
+ compress = 0;
+
+ /* Get next length field */
+ p = (u_char *) (p + (*p & 0x3f) + 1);
+ if ((char *)p > pmax) {
+ p = NULL;
+ break;
+ }
+#ifdef LIBALIAS_DEBUG
+ printf(":");
+#endif
+ while (s < p) {
+ if (compress == 1) {
+ c = (u_char) (((((*s & 0x0f) << 4) | (*(s + 1) & 0x0f)) - 0x11));
+#ifdef LIBALIAS_DEBUG
+ if (isprint(c))
+ printf("%c", c);
+ else
+ printf("<0x%02x>", c);
+#endif
+ s += 2;
+ } else {
+#ifdef LIBALIAS_DEBUG
+ printf("%c", *s);
+#endif
+ s++;
+ }
+ }
+#ifdef LIBALIAS_DEBUG
+ printf(":");
+ fflush(stdout);
+#endif
+ }
+
+ /* Set up to out of Name field */
+ if (p == NULL || (char *)p >= pmax)
+ p = NULL;
+ else
+ p++;
+ return ((u_char *) p);
+}
+
+/*
+ * NetBios Datagram Handler (IP/UDP)
+ */
+#define DGM_DIRECT_UNIQ 0x10
+#define DGM_DIRECT_GROUP 0x11
+#define DGM_BROADCAST 0x12
+#define DGM_ERROR 0x13
+#define DGM_QUERY 0x14
+#define DGM_POSITIVE_RES 0x15
+#define DGM_NEGATIVE_RES 0x16
+
+static int
+AliasHandleUdpNbt(
+ struct libalias *la,
+ struct ip *pip, /* IP packet to examine/patch */
+ struct alias_link *lnk,
+ struct in_addr *alias_address,
+ u_short alias_port
+)
+{
+ struct udphdr *uh;
+ NbtDataHeader *ndh;
+ u_char *p = NULL;
+ char *pmax;
+
+ (void)la;
+ (void)lnk;
+
+ /* Calculate data length of UDP packet */
+ uh = (struct udphdr *)ip_next(pip);
+ pmax = (char *)uh + ntohs(uh->uh_ulen);
+
+ ndh = (NbtDataHeader *)udp_next(uh);
+ if ((char *)(ndh + 1) > pmax)
+ return (-1);
+#ifdef LIBALIAS_DEBUG
+ printf("\nType=%02x,", ndh->type);
+#endif
+ switch (ndh->type) {
+ case DGM_DIRECT_UNIQ:
+ case DGM_DIRECT_GROUP:
+ case DGM_BROADCAST:
+ p = (u_char *) ndh + 14;
+ p = AliasHandleName(p, pmax); /* Source Name */
+ p = AliasHandleName(p, pmax); /* Destination Name */
+ break;
+ case DGM_ERROR:
+ p = (u_char *) ndh + 11;
+ break;
+ case DGM_QUERY:
+ case DGM_POSITIVE_RES:
+ case DGM_NEGATIVE_RES:
+ p = (u_char *) ndh + 10;
+ p = AliasHandleName(p, pmax); /* Destination Name */
+ break;
+ }
+ if (p == NULL || (char *)p > pmax)
+ p = NULL;
+#ifdef LIBALIAS_DEBUG
+ printf("%s:%d-->", inet_ntoa(ndh->source_ip), ntohs(ndh->source_port));
+#endif
+ /* Doing an IP address and Port number Translation */
+ if (uh->uh_sum != 0) {
+ int acc;
+ u_short *sptr;
+
+ acc = ndh->source_port;
+ acc -= alias_port;
+ sptr = (u_short *) & (ndh->source_ip);
+ acc += *sptr++;
+ acc += *sptr;
+ sptr = (u_short *) alias_address;
+ acc -= *sptr++;
+ acc -= *sptr;
+ ADJUST_CHECKSUM(acc, uh->uh_sum);
+ }
+ ndh->source_ip = *alias_address;
+ ndh->source_port = alias_port;
+#ifdef LIBALIAS_DEBUG
+ printf("%s:%d\n", inet_ntoa(ndh->source_ip), ntohs(ndh->source_port));
+ fflush(stdout);
+#endif
+ return ((p == NULL) ? -1 : 0);
+}
+
+/* Question Section */
+#define QS_TYPE_NB 0x0020
+#define QS_TYPE_NBSTAT 0x0021
+#define QS_CLAS_IN 0x0001
+typedef struct {
+ u_short type; /* The type of Request */
+ u_short class; /* The class of Request */
+} NBTNsQuestion;
+
+static u_char *
+AliasHandleQuestion(
+ u_short count,
+ NBTNsQuestion * q,
+ char *pmax,
+ NBTArguments * nbtarg)
+{
+
+ (void)nbtarg;
+
+ while (count != 0) {
+ /* Name Filed */
+ q = (NBTNsQuestion *) AliasHandleName((u_char *) q, pmax);
+
+ if (q == NULL || (char *)(q + 1) > pmax) {
+ q = NULL;
+ break;
+ }
+ /* Type and Class filed */
+ switch (ntohs(q->type)) {
+ case QS_TYPE_NB:
+ case QS_TYPE_NBSTAT:
+ q = q + 1;
+ break;
+ default:
+#ifdef LIBALIAS_DEBUG
+ printf("\nUnknown Type on Question %0x\n", ntohs(q->type));
+#endif
+ break;
+ }
+ count--;
+ }
+
+ /* Set up to out of Question Section */
+ return ((u_char *) q);
+}
+
+/* Resource Record */
+#define RR_TYPE_A 0x0001
+#define RR_TYPE_NS 0x0002
+#define RR_TYPE_NULL 0x000a
+#define RR_TYPE_NB 0x0020
+#define RR_TYPE_NBSTAT 0x0021
+#define RR_CLAS_IN 0x0001
+#define SizeOfNsResource 8
+typedef struct {
+ u_short type;
+ u_short class;
+ unsigned int ttl;
+ u_short rdlen;
+} NBTNsResource;
+
+#define SizeOfNsRNB 6
+typedef struct {
+ u_short g: 1 , ont:2, resv:13;
+ struct in_addr addr;
+} NBTNsRNB;
+
+static u_char *
+AliasHandleResourceNB(
+ NBTNsResource * q,
+ char *pmax,
+ NBTArguments * nbtarg)
+{
+ NBTNsRNB *nb;
+ u_short bcount;
+
+ if (q == NULL || (char *)(q + 1) > pmax)
+ return (NULL);
+ /* Check out a length */
+ bcount = ntohs(q->rdlen);
+
+ /* Forward to Resource NB position */
+ nb = (NBTNsRNB *) ((u_char *) q + SizeOfNsResource);
+
+ /* Processing all in_addr array */
+#ifdef LIBALIAS_DEBUG
+ printf("NB rec[%s", inet_ntoa(nbtarg->oldaddr));
+ printf("->%s, %dbytes] ", inet_ntoa(nbtarg->newaddr), bcount);
+#endif
+ while (nb != NULL && bcount != 0) {
+ if ((char *)(nb + 1) > pmax) {
+ nb = NULL;
+ break;
+ }
+#ifdef LIBALIAS_DEBUG
+ printf("<%s>", inet_ntoa(nb->addr));
+#endif
+ if (!bcmp(&nbtarg->oldaddr, &nb->addr, sizeof(struct in_addr))) {
+ if (*nbtarg->uh_sum != 0) {
+ int acc;
+ u_short *sptr;
+
+ sptr = (u_short *) & (nb->addr);
+ acc = *sptr++;
+ acc += *sptr;
+ sptr = (u_short *) & (nbtarg->newaddr);
+ acc -= *sptr++;
+ acc -= *sptr;
+ ADJUST_CHECKSUM(acc, *nbtarg->uh_sum);
+ }
+ nb->addr = nbtarg->newaddr;
+#ifdef LIBALIAS_DEBUG
+ printf("O");
+#endif
+ }
+#ifdef LIBALIAS_DEBUG
+ else {
+ printf(".");
+ }
+#endif
+ nb = (NBTNsRNB *) ((u_char *) nb + SizeOfNsRNB);
+ bcount -= SizeOfNsRNB;
+ }
+ if (nb == NULL || (char *)(nb + 1) > pmax) {
+ nb = NULL;
+ }
+ return ((u_char *) nb);
+}
+
+#define SizeOfResourceA 6
+typedef struct {
+ struct in_addr addr;
+} NBTNsResourceA;
+
+static u_char *
+AliasHandleResourceA(
+ NBTNsResource * q,
+ char *pmax,
+ NBTArguments * nbtarg)
+{
+ NBTNsResourceA *a;
+ u_short bcount;
+
+ if (q == NULL || (char *)(q + 1) > pmax)
+ return (NULL);
+
+ /* Forward to Resource A position */
+ a = (NBTNsResourceA *) ((u_char *) q + sizeof(NBTNsResource));
+
+ /* Check out of length */
+ bcount = ntohs(q->rdlen);
+
+ /* Processing all in_addr array */
+#ifdef LIBALIAS_DEBUG
+ printf("Arec [%s", inet_ntoa(nbtarg->oldaddr));
+ printf("->%s]", inet_ntoa(nbtarg->newaddr));
+#endif
+ while (bcount != 0) {
+ if (a == NULL || (char *)(a + 1) > pmax)
+ return (NULL);
+#ifdef LIBALIAS_DEBUG
+ printf("..%s", inet_ntoa(a->addr));
+#endif
+ if (!bcmp(&nbtarg->oldaddr, &a->addr, sizeof(struct in_addr))) {
+ if (*nbtarg->uh_sum != 0) {
+ int acc;
+ u_short *sptr;
+
+ sptr = (u_short *) & (a->addr); /* Old */
+ acc = *sptr++;
+ acc += *sptr;
+ sptr = (u_short *) & nbtarg->newaddr; /* New */
+ acc -= *sptr++;
+ acc -= *sptr;
+ ADJUST_CHECKSUM(acc, *nbtarg->uh_sum);
+ }
+ a->addr = nbtarg->newaddr;
+ }
+ a++; /* XXXX */
+ bcount -= SizeOfResourceA;
+ }
+ if (a == NULL || (char *)(a + 1) > pmax)
+ a = NULL;
+ return ((u_char *) a);
+}
+
+typedef struct {
+ u_short opcode:4, flags:8, resv:4;
+} NBTNsResourceNULL;
+
+static u_char *
+AliasHandleResourceNULL(
+ NBTNsResource * q,
+ char *pmax,
+ NBTArguments * nbtarg)
+{
+ NBTNsResourceNULL *n;
+ u_short bcount;
+
+ (void)nbtarg;
+
+ if (q == NULL || (char *)(q + 1) > pmax)
+ return (NULL);
+
+ /* Forward to Resource NULL position */
+ n = (NBTNsResourceNULL *) ((u_char *) q + sizeof(NBTNsResource));
+
+ /* Check out of length */
+ bcount = ntohs(q->rdlen);
+
+ /* Processing all in_addr array */
+ while (bcount != 0) {
+ if ((char *)(n + 1) > pmax) {
+ n = NULL;
+ break;
+ }
+ n++;
+ bcount -= sizeof(NBTNsResourceNULL);
+ }
+ if ((char *)(n + 1) > pmax)
+ n = NULL;
+
+ return ((u_char *) n);
+}
+
+static u_char *
+AliasHandleResourceNS(
+ NBTNsResource * q,
+ char *pmax,
+ NBTArguments * nbtarg)
+{
+ NBTNsResourceNULL *n;
+ u_short bcount;
+
+ (void)nbtarg;
+
+ if (q == NULL || (char *)(q + 1) > pmax)
+ return (NULL);
+
+ /* Forward to Resource NULL position */
+ n = (NBTNsResourceNULL *) ((u_char *) q + sizeof(NBTNsResource));
+
+ /* Check out of length */
+ bcount = ntohs(q->rdlen);
+
+ /* Resource Record Name Filed */
+ q = (NBTNsResource *) AliasHandleName((u_char *) n, pmax); /* XXX */
+
+ if (q == NULL || (char *)((u_char *) n + bcount) > pmax)
+ return (NULL);
+ else
+ return ((u_char *) n + bcount);
+}
+
+typedef struct {
+ u_short numnames;
+} NBTNsResourceNBSTAT;
+
+static u_char *
+AliasHandleResourceNBSTAT(
+ NBTNsResource * q,
+ char *pmax,
+ NBTArguments * nbtarg)
+{
+ NBTNsResourceNBSTAT *n;
+ u_short bcount;
+
+ (void)nbtarg;
+
+ if (q == NULL || (char *)(q + 1) > pmax)
+ return (NULL);
+
+ /* Forward to Resource NBSTAT position */
+ n = (NBTNsResourceNBSTAT *) ((u_char *) q + sizeof(NBTNsResource));
+
+ /* Check out of length */
+ bcount = ntohs(q->rdlen);
+
+ if (q == NULL || (char *)((u_char *) n + bcount) > pmax)
+ return (NULL);
+ else
+ return ((u_char *) n + bcount);
+}
+
+static u_char *
+AliasHandleResource(
+ u_short count,
+ NBTNsResource * q,
+ char *pmax,
+ NBTArguments
+ * nbtarg)
+{
+ while (count != 0) {
+ /* Resource Record Name Filed */
+ q = (NBTNsResource *) AliasHandleName((u_char *) q, pmax);
+
+ if (q == NULL || (char *)(q + 1) > pmax)
+ break;
+#ifdef LIBALIAS_DEBUG
+ printf("type=%02x, count=%d\n", ntohs(q->type), count);
+#endif
+
+ /* Type and Class filed */
+ switch (ntohs(q->type)) {
+ case RR_TYPE_NB:
+ q = (NBTNsResource *) AliasHandleResourceNB(
+ q,
+ pmax,
+ nbtarg
+ );
+ break;
+ case RR_TYPE_A:
+ q = (NBTNsResource *) AliasHandleResourceA(
+ q,
+ pmax,
+ nbtarg
+ );
+ break;
+ case RR_TYPE_NS:
+ q = (NBTNsResource *) AliasHandleResourceNS(
+ q,
+ pmax,
+ nbtarg
+ );
+ break;
+ case RR_TYPE_NULL:
+ q = (NBTNsResource *) AliasHandleResourceNULL(
+ q,
+ pmax,
+ nbtarg
+ );
+ break;
+ case RR_TYPE_NBSTAT:
+ q = (NBTNsResource *) AliasHandleResourceNBSTAT(
+ q,
+ pmax,
+ nbtarg
+ );
+ break;
+ default:
+#ifdef LIBALIAS_DEBUG
+ printf(
+ "\nUnknown Type of Resource %0x\n",
+ ntohs(q->type)
+ );
+ fflush(stdout);
+#endif
+ break;
+ }
+ count--;
+ }
+ return ((u_char *) q);
+}
+
+static int
+AliasHandleUdpNbtNS(
+ struct libalias *la,
+ struct ip *pip, /* IP packet to examine/patch */
+ struct alias_link *lnk,
+ struct in_addr *alias_address,
+ u_short * alias_port,
+ struct in_addr *original_address,
+ u_short * original_port)
+{
+ struct udphdr *uh;
+ NbtNSHeader *nsh;
+ u_char *p;
+ char *pmax;
+ NBTArguments nbtarg;
+
+ (void)la;
+ (void)lnk;
+
+ /* Set up Common Parameter */
+ nbtarg.oldaddr = *alias_address;
+ nbtarg.oldport = *alias_port;
+ nbtarg.newaddr = *original_address;
+ nbtarg.newport = *original_port;
+
+ /* Calculate data length of UDP packet */
+ uh = (struct udphdr *)ip_next(pip);
+ nbtarg.uh_sum = &(uh->uh_sum);
+ nsh = (NbtNSHeader *)udp_next(uh);
+ p = (u_char *) (nsh + 1);
+ pmax = (char *)uh + ntohs(uh->uh_ulen);
+
+ if ((char *)(nsh + 1) > pmax)
+ return (-1);
+
+#ifdef LIBALIAS_DEBUG
+ printf(" [%s] ID=%02x, op=%01x, flag=%02x, rcode=%01x, qd=%04x"
+ ", an=%04x, ns=%04x, ar=%04x, [%d]-->",
+ nsh->dir ? "Response" : "Request",
+ nsh->nametrid,
+ nsh->opcode,
+ nsh->nmflags,
+ nsh->rcode,
+ ntohs(nsh->qdcount),
+ ntohs(nsh->ancount),
+ ntohs(nsh->nscount),
+ ntohs(nsh->arcount),
+ (u_char *) p - (u_char *) nsh
+ );
+#endif
+
+ /* Question Entries */
+ if (ntohs(nsh->qdcount) != 0) {
+ p = AliasHandleQuestion(
+ ntohs(nsh->qdcount),
+ (NBTNsQuestion *) p,
+ pmax,
+ &nbtarg
+ );
+ }
+ /* Answer Resource Records */
+ if (ntohs(nsh->ancount) != 0) {
+ p = AliasHandleResource(
+ ntohs(nsh->ancount),
+ (NBTNsResource *) p,
+ pmax,
+ &nbtarg
+ );
+ }
+ /* Authority Resource Recodrs */
+ if (ntohs(nsh->nscount) != 0) {
+ p = AliasHandleResource(
+ ntohs(nsh->nscount),
+ (NBTNsResource *) p,
+ pmax,
+ &nbtarg
+ );
+ }
+ /* Additional Resource Recodrs */
+ if (ntohs(nsh->arcount) != 0) {
+ p = AliasHandleResource(
+ ntohs(nsh->arcount),
+ (NBTNsResource *) p,
+ pmax,
+ &nbtarg
+ );
+ }
+#ifdef LIBALIAS_DEBUG
+ PrintRcode(nsh->rcode);
+#endif
+ return ((p == NULL) ? -1 : 0);
+}
diff --git a/rtems/freebsd/netinet/libalias/alias_pptp.c b/rtems/freebsd/netinet/libalias/alias_pptp.c
new file mode 100644
index 00000000..032464cc
--- /dev/null
+++ b/rtems/freebsd/netinet/libalias/alias_pptp.c
@@ -0,0 +1,525 @@
+#include <rtems/freebsd/machine/rtems-bsd-config.h>
+
+/*
+ * alias_pptp.c
+ *
+ * Copyright (c) 2000 Whistle Communications, Inc.
+ * All rights reserved.
+ *
+ * Subject to the following obligations and disclaimer of warranty, use and
+ * redistribution of this software, in source or object code forms, with or
+ * without modifications are expressly permitted by Whistle Communications;
+ * provided, however, that:
+ * 1. Any and all reproductions of the source or object code must include the
+ * copyright notice above and the following disclaimer of warranties; and
+ * 2. No rights are granted, in any manner or form, to use Whistle
+ * Communications, Inc. trademarks, including the mark "WHISTLE
+ * COMMUNICATIONS" on advertising, endorsements, or otherwise except as
+ * such appears in the above copyright notice or in the software.
+ *
+ * THIS SOFTWARE IS BEING PROVIDED BY WHISTLE COMMUNICATIONS "AS IS", AND
+ * TO THE MAXIMUM EXTENT PERMITTED BY LAW, WHISTLE COMMUNICATIONS MAKES NO
+ * REPRESENTATIONS OR WARRANTIES, EXPRESS OR IMPLIED, REGARDING THIS SOFTWARE,
+ * INCLUDING WITHOUT LIMITATION, ANY AND ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT.
+ * WHISTLE COMMUNICATIONS DOES NOT WARRANT, GUARANTEE, OR MAKE ANY
+ * REPRESENTATIONS REGARDING THE USE OF, OR THE RESULTS OF THE USE OF THIS
+ * SOFTWARE IN TERMS OF ITS CORRECTNESS, ACCURACY, RELIABILITY OR OTHERWISE.
+ * IN NO EVENT SHALL WHISTLE COMMUNICATIONS BE LIABLE FOR ANY DAMAGES
+ * RESULTING FROM OR ARISING OUT OF ANY USE OF THIS SOFTWARE, INCLUDING
+ * WITHOUT LIMITATION, ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
+ * PUNITIVE, OR CONSEQUENTIAL DAMAGES, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES, LOSS OF USE, DATA OR PROFITS, HOWEVER CAUSED AND UNDER ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF WHISTLE COMMUNICATIONS IS ADVISED OF THE POSSIBILITY
+ * OF SUCH DAMAGE.
+ *
+ * Author: Erik Salander <erik@whistle.com>
+ */
+
+#include <rtems/freebsd/sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+/* Includes */
+#ifdef _KERNEL
+#include <rtems/freebsd/sys/param.h>
+#include <rtems/freebsd/sys/limits.h>
+#include <rtems/freebsd/sys/kernel.h>
+#include <rtems/freebsd/sys/module.h>
+#else
+#include <rtems/freebsd/errno.h>
+#include <rtems/freebsd/limits.h>
+#include <rtems/freebsd/sys/types.h>
+#include <rtems/freebsd/stdio.h>
+#endif
+
+#include <rtems/freebsd/netinet/tcp.h>
+
+#ifdef _KERNEL
+#include <rtems/freebsd/netinet/libalias/alias.h>
+#include <rtems/freebsd/netinet/libalias/alias_local.h>
+#include <rtems/freebsd/netinet/libalias/alias_mod.h>
+#else
+#include <rtems/freebsd/local/alias.h>
+#include <rtems/freebsd/local/alias_local.h>
+#include <rtems/freebsd/local/alias_mod.h>
+#endif
+
+#define PPTP_CONTROL_PORT_NUMBER 1723
+
+static void
+AliasHandlePptpOut(struct libalias *, struct ip *, struct alias_link *);
+
+static void
+AliasHandlePptpIn(struct libalias *, struct ip *, struct alias_link *);
+
+static int
+AliasHandlePptpGreOut(struct libalias *, struct ip *);
+
+static int
+AliasHandlePptpGreIn(struct libalias *, struct ip *);
+
+static int
+fingerprint(struct libalias *la, struct alias_data *ah)
+{
+
+ if (ah->dport == NULL || ah->sport == NULL || ah->lnk == NULL)
+ return (-1);
+ if (ntohs(*ah->dport) == PPTP_CONTROL_PORT_NUMBER
+ || ntohs(*ah->sport) == PPTP_CONTROL_PORT_NUMBER)
+ return (0);
+ return (-1);
+}
+
+static int
+fingerprintgre(struct libalias *la, struct alias_data *ah)
+{
+
+ return (0);
+}
+
+static int
+protohandlerin(struct libalias *la, struct ip *pip, struct alias_data *ah)
+{
+
+ AliasHandlePptpIn(la, pip, ah->lnk);
+ return (0);
+}
+
+static int
+protohandlerout(struct libalias *la, struct ip *pip, struct alias_data *ah)
+{
+
+ AliasHandlePptpOut(la, pip, ah->lnk);
+ return (0);
+}
+
+static int
+protohandlergrein(struct libalias *la, struct ip *pip, struct alias_data *ah)
+{
+
+ if (la->packetAliasMode & PKT_ALIAS_PROXY_ONLY ||
+ AliasHandlePptpGreIn(la, pip) == 0)
+ return (0);
+ return (-1);
+}
+
+static int
+protohandlergreout(struct libalias *la, struct ip *pip, struct alias_data *ah)
+{
+
+ if (AliasHandlePptpGreOut(la, pip) == 0)
+ return (0);
+ return (-1);
+}
+
+/* Kernel module definition. */
+struct proto_handler handlers[] = {
+ {
+ .pri = 200,
+ .dir = IN,
+ .proto = TCP,
+ .fingerprint = &fingerprint,
+ .protohandler = &protohandlerin
+ },
+ {
+ .pri = 210,
+ .dir = OUT,
+ .proto = TCP,
+ .fingerprint = &fingerprint,
+ .protohandler = &protohandlerout
+ },
+/*
+ * WATCH OUT!!! these 2 handlers NEED a priority of INT_MAX (highest possible)
+ * cause they will ALWAYS process packets, so they must be the last one
+ * in chain: look fingerprintgre() above.
+ */
+ {
+ .pri = INT_MAX,
+ .dir = IN,
+ .proto = IP,
+ .fingerprint = &fingerprintgre,
+ .protohandler = &protohandlergrein
+ },
+ {
+ .pri = INT_MAX,
+ .dir = OUT,
+ .proto = IP,
+ .fingerprint = &fingerprintgre,
+ .protohandler = &protohandlergreout
+ },
+ { EOH }
+};
+static int
+mod_handler(module_t mod, int type, void *data)
+{
+ int error;
+
+ switch (type) {
+ case MOD_LOAD:
+ error = 0;
+ LibAliasAttachHandlers(handlers);
+ break;
+ case MOD_UNLOAD:
+ error = 0;
+ LibAliasDetachHandlers(handlers);
+ break;
+ default:
+ error = EINVAL;
+ }
+ return (error);
+}
+
+#ifdef _KERNEL
+static
+#endif
+moduledata_t alias_mod = {
+ "alias_pptp", mod_handler, NULL
+};
+
+#ifdef _KERNEL
+DECLARE_MODULE(alias_pptp, alias_mod, SI_SUB_DRIVERS, SI_ORDER_SECOND);
+MODULE_VERSION(alias_pptp, 1);
+MODULE_DEPEND(alias_pptp, libalias, 1, 1, 1);
+#endif
+
+/*
+ Alias_pptp.c performs special processing for PPTP sessions under TCP.
+ Specifically, watch PPTP control messages and alias the Call ID or the
+ Peer's Call ID in the appropriate messages. Note, PPTP requires
+ "de-aliasing" of incoming packets, this is different than any other
+ TCP applications that are currently (ie. FTP, IRC and RTSP) aliased.
+
+ For Call IDs encountered for the first time, a PPTP alias link is created.
+ The PPTP alias link uses the Call ID in place of the original port number.
+ An alias Call ID is created.
+
+ For this routine to work, the PPTP control messages must fit entirely
+ into a single TCP packet. This is typically the case, but is not
+ required by the spec.
+
+ Unlike some of the other TCP applications that are aliased (ie. FTP,
+ IRC and RTSP), the PPTP control messages that need to be aliased are
+ guaranteed to remain the same length. The aliased Call ID is a fixed
+ length field.
+
+ Reference: RFC 2637
+
+ Initial version: May, 2000 (eds)
+
+*/
+
+/*
+ * PPTP definitions
+ */
+
+struct grehdr { /* Enhanced GRE header. */
+ u_int16_t gh_flags; /* Flags. */
+ u_int16_t gh_protocol; /* Protocol type. */
+ u_int16_t gh_length; /* Payload length. */
+ u_int16_t gh_call_id; /* Call ID. */
+ u_int32_t gh_seq_no; /* Sequence number (optional). */
+ u_int32_t gh_ack_no; /* Acknowledgment number
+ * (optional). */
+};
+typedef struct grehdr GreHdr;
+
+/* The PPTP protocol ID used in the GRE 'proto' field. */
+#define PPTP_GRE_PROTO 0x880b
+
+/* Bits that must be set a certain way in all PPTP/GRE packets. */
+#define PPTP_INIT_VALUE ((0x2001 << 16) | PPTP_GRE_PROTO)
+#define PPTP_INIT_MASK 0xef7fffff
+
+#define PPTP_MAGIC 0x1a2b3c4d
+#define PPTP_CTRL_MSG_TYPE 1
+
+enum {
+ PPTP_StartCtrlConnRequest = 1,
+ PPTP_StartCtrlConnReply = 2,
+ PPTP_StopCtrlConnRequest = 3,
+ PPTP_StopCtrlConnReply = 4,
+ PPTP_EchoRequest = 5,
+ PPTP_EchoReply = 6,
+ PPTP_OutCallRequest = 7,
+ PPTP_OutCallReply = 8,
+ PPTP_InCallRequest = 9,
+ PPTP_InCallReply = 10,
+ PPTP_InCallConn = 11,
+ PPTP_CallClearRequest = 12,
+ PPTP_CallDiscNotify = 13,
+ PPTP_WanErrorNotify = 14,
+ PPTP_SetLinkInfo = 15
+};
+
+ /* Message structures */
+struct pptpMsgHead {
+ u_int16_t length; /* total length */
+ u_int16_t msgType;/* PPTP message type */
+ u_int32_t magic; /* magic cookie */
+ u_int16_t type; /* control message type */
+ u_int16_t resv0; /* reserved */
+};
+typedef struct pptpMsgHead *PptpMsgHead;
+
+struct pptpCodes {
+ u_int8_t resCode;/* Result Code */
+ u_int8_t errCode;/* Error Code */
+};
+typedef struct pptpCodes *PptpCode;
+
+struct pptpCallIds {
+ u_int16_t cid1; /* Call ID field #1 */
+ u_int16_t cid2; /* Call ID field #2 */
+};
+typedef struct pptpCallIds *PptpCallId;
+
+static PptpCallId AliasVerifyPptp(struct ip *, u_int16_t *);
+
+
+static void
+AliasHandlePptpOut(struct libalias *la,
+ struct ip *pip, /* IP packet to examine/patch */
+ struct alias_link *lnk)
+{ /* The PPTP control link */
+ struct alias_link *pptp_lnk;
+ PptpCallId cptr;
+ PptpCode codes;
+ u_int16_t ctl_type; /* control message type */
+ struct tcphdr *tc;
+
+ /* Verify valid PPTP control message */
+ if ((cptr = AliasVerifyPptp(pip, &ctl_type)) == NULL)
+ return;
+
+ /* Modify certain PPTP messages */
+ switch (ctl_type) {
+ case PPTP_OutCallRequest:
+ case PPTP_OutCallReply:
+ case PPTP_InCallRequest:
+ case PPTP_InCallReply:
+ /*
+ * Establish PPTP link for address and Call ID found in
+ * control message.
+ */
+ pptp_lnk = AddPptp(la, GetOriginalAddress(lnk), GetDestAddress(lnk),
+ GetAliasAddress(lnk), cptr->cid1);
+ break;
+ case PPTP_CallClearRequest:
+ case PPTP_CallDiscNotify:
+ /*
+ * Find PPTP link for address and Call ID found in control
+ * message.
+ */
+ pptp_lnk = FindPptpOutByCallId(la, GetOriginalAddress(lnk),
+ GetDestAddress(lnk),
+ cptr->cid1);
+ break;
+ default:
+ return;
+ }
+
+ if (pptp_lnk != NULL) {
+ int accumulate = cptr->cid1;
+
+ /* alias the Call Id */
+ cptr->cid1 = GetAliasPort(pptp_lnk);
+
+ /* Compute TCP checksum for revised packet */
+ tc = (struct tcphdr *)ip_next(pip);
+ accumulate -= cptr->cid1;
+ ADJUST_CHECKSUM(accumulate, tc->th_sum);
+
+ switch (ctl_type) {
+ case PPTP_OutCallReply:
+ case PPTP_InCallReply:
+ codes = (PptpCode) (cptr + 1);
+ if (codes->resCode == 1) /* Connection
+ * established, */
+ SetDestCallId(pptp_lnk, /* note the Peer's Call
+ * ID. */
+ cptr->cid2);
+ else
+ SetExpire(pptp_lnk, 0); /* Connection refused. */
+ break;
+ case PPTP_CallDiscNotify: /* Connection closed. */
+ SetExpire(pptp_lnk, 0);
+ break;
+ }
+ }
+}
+
+static void
+AliasHandlePptpIn(struct libalias *la,
+ struct ip *pip, /* IP packet to examine/patch */
+ struct alias_link *lnk)
+{ /* The PPTP control link */
+ struct alias_link *pptp_lnk;
+ PptpCallId cptr;
+ u_int16_t *pcall_id;
+ u_int16_t ctl_type; /* control message type */
+ struct tcphdr *tc;
+
+ /* Verify valid PPTP control message */
+ if ((cptr = AliasVerifyPptp(pip, &ctl_type)) == NULL)
+ return;
+
+ /* Modify certain PPTP messages */
+ switch (ctl_type) {
+ case PPTP_InCallConn:
+ case PPTP_WanErrorNotify:
+ case PPTP_SetLinkInfo:
+ pcall_id = &cptr->cid1;
+ break;
+ case PPTP_OutCallReply:
+ case PPTP_InCallReply:
+ pcall_id = &cptr->cid2;
+ break;
+ case PPTP_CallDiscNotify: /* Connection closed. */
+ pptp_lnk = FindPptpInByCallId(la, GetDestAddress(lnk),
+ GetAliasAddress(lnk),
+ cptr->cid1);
+ if (pptp_lnk != NULL)
+ SetExpire(pptp_lnk, 0);
+ return;
+ default:
+ return;
+ }
+
+ /* Find PPTP link for address and Call ID found in PPTP Control Msg */
+ pptp_lnk = FindPptpInByPeerCallId(la, GetDestAddress(lnk),
+ GetAliasAddress(lnk),
+ *pcall_id);
+
+ if (pptp_lnk != NULL) {
+ int accumulate = *pcall_id;
+
+ /* De-alias the Peer's Call Id. */
+ *pcall_id = GetOriginalPort(pptp_lnk);
+
+ /* Compute TCP checksum for modified packet */
+ tc = (struct tcphdr *)ip_next(pip);
+ accumulate -= *pcall_id;
+ ADJUST_CHECKSUM(accumulate, tc->th_sum);
+
+ if (ctl_type == PPTP_OutCallReply || ctl_type == PPTP_InCallReply) {
+ PptpCode codes = (PptpCode) (cptr + 1);
+
+ if (codes->resCode == 1) /* Connection
+ * established, */
+ SetDestCallId(pptp_lnk, /* note the Call ID. */
+ cptr->cid1);
+ else
+ SetExpire(pptp_lnk, 0); /* Connection refused. */
+ }
+ }
+}
+
+static PptpCallId
+AliasVerifyPptp(struct ip *pip, u_int16_t * ptype)
+{ /* IP packet to examine/patch */
+ int hlen, tlen, dlen;
+ PptpMsgHead hptr;
+ struct tcphdr *tc;
+
+ /* Calculate some lengths */
+ tc = (struct tcphdr *)ip_next(pip);
+ hlen = (pip->ip_hl + tc->th_off) << 2;
+ tlen = ntohs(pip->ip_len);
+ dlen = tlen - hlen;
+
+ /* Verify data length */
+ if (dlen < (int)(sizeof(struct pptpMsgHead) + sizeof(struct pptpCallIds)))
+ return (NULL);
+
+ /* Move up to PPTP message header */
+ hptr = (PptpMsgHead) tcp_next(tc);
+
+ /* Return the control message type */
+ *ptype = ntohs(hptr->type);
+
+ /* Verify PPTP Control Message */
+ if ((ntohs(hptr->msgType) != PPTP_CTRL_MSG_TYPE) ||
+ (ntohl(hptr->magic) != PPTP_MAGIC))
+ return (NULL);
+
+ /* Verify data length. */
+ if ((*ptype == PPTP_OutCallReply || *ptype == PPTP_InCallReply) &&
+ (dlen < (int)(sizeof(struct pptpMsgHead) + sizeof(struct pptpCallIds) +
+ sizeof(struct pptpCodes))))
+ return (NULL);
+ else
+ return (PptpCallId) (hptr + 1);
+}
+
+static int
+AliasHandlePptpGreOut(struct libalias *la, struct ip *pip)
+{
+ GreHdr *gr;
+ struct alias_link *lnk;
+
+ gr = (GreHdr *) ip_next(pip);
+
+ /* Check GRE header bits. */
+ if ((ntohl(*((u_int32_t *) gr)) & PPTP_INIT_MASK) != PPTP_INIT_VALUE)
+ return (-1);
+
+ lnk = FindPptpOutByPeerCallId(la, pip->ip_src, pip->ip_dst, gr->gh_call_id);
+ if (lnk != NULL) {
+ struct in_addr alias_addr = GetAliasAddress(lnk);
+
+ /* Change source IP address. */
+ DifferentialChecksum(&pip->ip_sum,
+ &alias_addr, &pip->ip_src, 2);
+ pip->ip_src = alias_addr;
+ }
+ return (0);
+}
+
+static int
+AliasHandlePptpGreIn(struct libalias *la, struct ip *pip)
+{
+ GreHdr *gr;
+ struct alias_link *lnk;
+
+ gr = (GreHdr *) ip_next(pip);
+
+ /* Check GRE header bits. */
+ if ((ntohl(*((u_int32_t *) gr)) & PPTP_INIT_MASK) != PPTP_INIT_VALUE)
+ return (-1);
+
+ lnk = FindPptpInByPeerCallId(la, pip->ip_src, pip->ip_dst, gr->gh_call_id);
+ if (lnk != NULL) {
+ struct in_addr src_addr = GetOriginalAddress(lnk);
+
+ /* De-alias the Peer's Call Id. */
+ gr->gh_call_id = GetOriginalPort(lnk);
+
+ /* Restore original IP address. */
+ DifferentialChecksum(&pip->ip_sum,
+ &src_addr, &pip->ip_dst, 2);
+ pip->ip_dst = src_addr;
+ }
+ return (0);
+}
diff --git a/rtems/freebsd/netinet/libalias/alias_proxy.c b/rtems/freebsd/netinet/libalias/alias_proxy.c
new file mode 100644
index 00000000..5fafc94d
--- /dev/null
+++ b/rtems/freebsd/netinet/libalias/alias_proxy.c
@@ -0,0 +1,870 @@
+#include <rtems/freebsd/machine/rtems-bsd-config.h>
+
+/*-
+ * Copyright (c) 2001 Charles Mott <cm@linktel.net>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <rtems/freebsd/sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+/* file: alias_proxy.c
+
+ This file encapsulates special operations related to transparent
+ proxy redirection. This is where packets with a particular destination,
+ usually tcp port 80, are redirected to a proxy server.
+
+ When packets are proxied, the destination address and port are
+ modified. In certain cases, it is necessary to somehow encode
+ the original address/port info into the packet. Two methods are
+ presently supported: addition of a [DEST addr port] string at the
+ beginning of a tcp stream, or inclusion of an optional field
+ in the IP header.
+
+ There is one public API function:
+
+ PacketAliasProxyRule() -- Adds and deletes proxy
+ rules.
+
+ Rules are stored in a linear linked list, so lookup efficiency
+ won't be too good for large lists.
+
+
+ Initial development: April, 1998 (cjm)
+*/
+
+
+/* System includes */
+#ifdef _KERNEL
+#include <rtems/freebsd/sys/param.h>
+#include <rtems/freebsd/sys/ctype.h>
+#include <rtems/freebsd/sys/libkern.h>
+#include <rtems/freebsd/sys/limits.h>
+#else
+#include <rtems/freebsd/sys/types.h>
+#include <rtems/freebsd/ctype.h>
+#include <rtems/freebsd/stdio.h>
+#include <rtems/freebsd/stdlib.h>
+#include <rtems/freebsd/netdb.h>
+#include <rtems/freebsd/string.h>
+#endif
+
+#include <rtems/freebsd/netinet/tcp.h>
+
+#ifdef _KERNEL
+#include <rtems/freebsd/netinet/libalias/alias.h>
+#include <rtems/freebsd/netinet/libalias/alias_local.h>
+#include <rtems/freebsd/netinet/libalias/alias_mod.h>
+#else
+#include <rtems/freebsd/arpa/inet.h>
+#include <rtems/freebsd/local/alias.h> /* Public API functions for libalias */
+#include <rtems/freebsd/local/alias_local.h> /* Functions used by alias*.c */
+#endif
+
+/*
+ Data structures
+ */
+
+/*
+ * A linked list of arbitrary length, based on struct proxy_entry is
+ * used to store proxy rules.
+ */
+struct proxy_entry {
+ struct libalias *la;
+#define PROXY_TYPE_ENCODE_NONE 1
+#define PROXY_TYPE_ENCODE_TCPSTREAM 2
+#define PROXY_TYPE_ENCODE_IPHDR 3
+ int rule_index;
+ int proxy_type;
+ u_char proto;
+ u_short proxy_port;
+ u_short server_port;
+
+ struct in_addr server_addr;
+
+ struct in_addr src_addr;
+ struct in_addr src_mask;
+
+ struct in_addr dst_addr;
+ struct in_addr dst_mask;
+
+ struct proxy_entry *next;
+ struct proxy_entry *last;
+};
+
+
+
+/*
+ File scope variables
+*/
+
+
+
+/* Local (static) functions:
+
+ IpMask() -- Utility function for creating IP
+ masks from integer (1-32) specification.
+ IpAddr() -- Utility function for converting string
+ to IP address
+ IpPort() -- Utility function for converting string
+ to port number
+ RuleAdd() -- Adds an element to the rule list.
+ RuleDelete() -- Removes an element from the rule list.
+ RuleNumberDelete() -- Removes all elements from the rule list
+ having a certain rule number.
+ ProxyEncodeTcpStream() -- Adds [DEST x.x.x.x xxxx] to the beginning
+ of a TCP stream.
+ ProxyEncodeIpHeader() -- Adds an IP option indicating the true
+ destination of a proxied IP packet
+*/
+
+static int IpMask(int, struct in_addr *);
+static int IpAddr(char *, struct in_addr *);
+static int IpPort(char *, int, int *);
+static void RuleAdd(struct libalias *la, struct proxy_entry *);
+static void RuleDelete(struct proxy_entry *);
+static int RuleNumberDelete(struct libalias *la, int);
+static void ProxyEncodeTcpStream(struct alias_link *, struct ip *, int);
+static void ProxyEncodeIpHeader(struct ip *, int);
+
+static int
+IpMask(int nbits, struct in_addr *mask)
+{
+ int i;
+ u_int imask;
+
+ if (nbits < 0 || nbits > 32)
+ return (-1);
+
+ imask = 0;
+ for (i = 0; i < nbits; i++)
+ imask = (imask >> 1) + 0x80000000;
+ mask->s_addr = htonl(imask);
+
+ return (0);
+}
+
+static int
+IpAddr(char *s, struct in_addr *addr)
+{
+ if (inet_aton(s, addr) == 0)
+ return (-1);
+ else
+ return (0);
+}
+
+static int
+IpPort(char *s, int proto, int *port)
+{
+ int n;
+
+ n = sscanf(s, "%d", port);
+ if (n != 1)
+#ifndef _KERNEL /* XXX: we accept only numeric ports in kernel */
+ {
+ struct servent *se;
+
+ if (proto == IPPROTO_TCP)
+ se = getservbyname(s, "tcp");
+ else if (proto == IPPROTO_UDP)
+ se = getservbyname(s, "udp");
+ else
+ return (-1);
+
+ if (se == NULL)
+ return (-1);
+
+ *port = (u_int) ntohs(se->s_port);
+ }
+#else
+ return (-1);
+#endif
+ return (0);
+}
+
+void
+RuleAdd(struct libalias *la, struct proxy_entry *entry)
+{
+ int rule_index;
+ struct proxy_entry *ptr;
+ struct proxy_entry *ptr_last;
+
+ LIBALIAS_LOCK_ASSERT(la);
+
+ if (la->proxyList == NULL) {
+ la->proxyList = entry;
+ entry->last = NULL;
+ entry->next = NULL;
+ return;
+ }
+ entry->la = la;
+
+ rule_index = entry->rule_index;
+ ptr = la->proxyList;
+ ptr_last = NULL;
+ while (ptr != NULL) {
+ if (ptr->rule_index >= rule_index) {
+ if (ptr_last == NULL) {
+ entry->next = la->proxyList;
+ entry->last = NULL;
+ la->proxyList->last = entry;
+ la->proxyList = entry;
+ return;
+ }
+ ptr_last->next = entry;
+ ptr->last = entry;
+ entry->last = ptr->last;
+ entry->next = ptr;
+ return;
+ }
+ ptr_last = ptr;
+ ptr = ptr->next;
+ }
+
+ ptr_last->next = entry;
+ entry->last = ptr_last;
+ entry->next = NULL;
+}
+
+static void
+RuleDelete(struct proxy_entry *entry)
+{
+ struct libalias *la;
+
+ la = entry->la;
+ LIBALIAS_LOCK_ASSERT(la);
+ if (entry->last != NULL)
+ entry->last->next = entry->next;
+ else
+ la->proxyList = entry->next;
+
+ if (entry->next != NULL)
+ entry->next->last = entry->last;
+
+ free(entry);
+}
+
+static int
+RuleNumberDelete(struct libalias *la, int rule_index)
+{
+ int err;
+ struct proxy_entry *ptr;
+
+ LIBALIAS_LOCK_ASSERT(la);
+ err = -1;
+ ptr = la->proxyList;
+ while (ptr != NULL) {
+ struct proxy_entry *ptr_next;
+
+ ptr_next = ptr->next;
+ if (ptr->rule_index == rule_index) {
+ err = 0;
+ RuleDelete(ptr);
+ }
+ ptr = ptr_next;
+ }
+
+ return (err);
+}
+
+static void
+ProxyEncodeTcpStream(struct alias_link *lnk,
+ struct ip *pip,
+ int maxpacketsize)
+{
+ int slen;
+ char buffer[40];
+ struct tcphdr *tc;
+
+/* Compute pointer to tcp header */
+ tc = (struct tcphdr *)ip_next(pip);
+
+/* Don't modify if once already modified */
+
+ if (GetAckModified(lnk))
+ return;
+
+/* Translate destination address and port to string form */
+ snprintf(buffer, sizeof(buffer) - 2, "[DEST %s %d]",
+ inet_ntoa(GetProxyAddress(lnk)), (u_int) ntohs(GetProxyPort(lnk)));
+
+/* Pad string out to a multiple of two in length */
+ slen = strlen(buffer);
+ switch (slen % 2) {
+ case 0:
+ strcat(buffer, " \n");
+ slen += 2;
+ break;
+ case 1:
+ strcat(buffer, "\n");
+ slen += 1;
+ }
+
+/* Check for packet overflow */
+ if ((int)(ntohs(pip->ip_len) + strlen(buffer)) > maxpacketsize)
+ return;
+
+/* Shift existing TCP data and insert destination string */
+ {
+ int dlen;
+ int hlen;
+ char *p;
+
+ hlen = (pip->ip_hl + tc->th_off) << 2;
+ dlen = ntohs(pip->ip_len) - hlen;
+
+/* Modify first packet that has data in it */
+
+ if (dlen == 0)
+ return;
+
+ p = (char *)pip;
+ p += hlen;
+
+ bcopy(p, p + slen, dlen);
+ memcpy(p, buffer, slen);
+ }
+
+/* Save information about modfied sequence number */
+ {
+ int delta;
+
+ SetAckModified(lnk);
+ tc = (struct tcphdr *)ip_next(pip);
+ delta = GetDeltaSeqOut(tc->th_seq, lnk);
+ AddSeq(lnk, delta + slen, pip->ip_hl, pip->ip_len, tc->th_seq,
+ tc->th_off);
+ }
+
+/* Update IP header packet length and checksum */
+ {
+ int accumulate;
+
+ accumulate = pip->ip_len;
+ pip->ip_len = htons(ntohs(pip->ip_len) + slen);
+ accumulate -= pip->ip_len;
+
+ ADJUST_CHECKSUM(accumulate, pip->ip_sum);
+ }
+
+/* Update TCP checksum, Use TcpChecksum since so many things have
+ already changed. */
+
+ tc->th_sum = 0;
+#ifdef _KERNEL
+ tc->th_x2 = 1;
+#else
+ tc->th_sum = TcpChecksum(pip);
+#endif
+}
+
+static void
+ProxyEncodeIpHeader(struct ip *pip,
+ int maxpacketsize)
+{
+#define OPTION_LEN_BYTES 8
+#define OPTION_LEN_INT16 4
+#define OPTION_LEN_INT32 2
+ u_char option[OPTION_LEN_BYTES];
+
+#ifdef LIBALIAS_DEBUG
+ fprintf(stdout, " ip cksum 1 = %x\n", (u_int) IpChecksum(pip));
+ fprintf(stdout, "tcp cksum 1 = %x\n", (u_int) TcpChecksum(pip));
+#endif
+
+ (void)maxpacketsize;
+
+/* Check to see that there is room to add an IP option */
+ if (pip->ip_hl > (0x0f - OPTION_LEN_INT32))
+ return;
+
+/* Build option and copy into packet */
+ {
+ u_char *ptr;
+ struct tcphdr *tc;
+
+ ptr = (u_char *) pip;
+ ptr += 20;
+ memcpy(ptr + OPTION_LEN_BYTES, ptr, ntohs(pip->ip_len) - 20);
+
+ option[0] = 0x64; /* class: 3 (reserved), option 4 */
+ option[1] = OPTION_LEN_BYTES;
+
+ memcpy(&option[2], (u_char *) & pip->ip_dst, 4);
+
+ tc = (struct tcphdr *)ip_next(pip);
+ memcpy(&option[6], (u_char *) & tc->th_sport, 2);
+
+ memcpy(ptr, option, 8);
+ }
+
+/* Update checksum, header length and packet length */
+ {
+ int i;
+ int accumulate;
+ u_short *sptr;
+
+ sptr = (u_short *) option;
+ accumulate = 0;
+ for (i = 0; i < OPTION_LEN_INT16; i++)
+ accumulate -= *(sptr++);
+
+ sptr = (u_short *) pip;
+ accumulate += *sptr;
+ pip->ip_hl += OPTION_LEN_INT32;
+ accumulate -= *sptr;
+
+ accumulate += pip->ip_len;
+ pip->ip_len = htons(ntohs(pip->ip_len) + OPTION_LEN_BYTES);
+ accumulate -= pip->ip_len;
+
+ ADJUST_CHECKSUM(accumulate, pip->ip_sum);
+ }
+#undef OPTION_LEN_BYTES
+#undef OPTION_LEN_INT16
+#undef OPTION_LEN_INT32
+#ifdef LIBALIAS_DEBUG
+ fprintf(stdout, " ip cksum 2 = %x\n", (u_int) IpChecksum(pip));
+ fprintf(stdout, "tcp cksum 2 = %x\n", (u_int) TcpChecksum(pip));
+#endif
+}
+
+
+/* Functions by other packet alias source files
+
+ ProxyCheck() -- Checks whether an outgoing packet should
+ be proxied.
+ ProxyModify() -- Encodes the original destination address/port
+ for a packet which is to be redirected to
+ a proxy server.
+*/
+
+int
+ProxyCheck(struct libalias *la, struct in_addr *proxy_server_addr,
+ u_short * proxy_server_port, struct in_addr src_addr,
+ struct in_addr dst_addr, u_short dst_port, u_char ip_p)
+{
+ struct proxy_entry *ptr;
+
+ LIBALIAS_LOCK_ASSERT(la);
+
+ ptr = la->proxyList;
+ while (ptr != NULL) {
+ u_short proxy_port;
+
+ proxy_port = ptr->proxy_port;
+ if ((dst_port == proxy_port || proxy_port == 0)
+ && ip_p == ptr->proto
+ && src_addr.s_addr != ptr->server_addr.s_addr) {
+ struct in_addr src_addr_masked;
+ struct in_addr dst_addr_masked;
+
+ src_addr_masked.s_addr = src_addr.s_addr & ptr->src_mask.s_addr;
+ dst_addr_masked.s_addr = dst_addr.s_addr & ptr->dst_mask.s_addr;
+
+ if ((src_addr_masked.s_addr == ptr->src_addr.s_addr)
+ && (dst_addr_masked.s_addr == ptr->dst_addr.s_addr)) {
+ if ((*proxy_server_port = ptr->server_port) == 0)
+ *proxy_server_port = dst_port;
+ *proxy_server_addr = ptr->server_addr;
+ return (ptr->proxy_type);
+ }
+ }
+ ptr = ptr->next;
+ }
+
+ return (0);
+}
+
+void
+ProxyModify(struct libalias *la, struct alias_link *lnk,
+ struct ip *pip,
+ int maxpacketsize,
+ int proxy_type)
+{
+
+ LIBALIAS_LOCK_ASSERT(la);
+ (void)la;
+
+ switch (proxy_type) {
+ case PROXY_TYPE_ENCODE_IPHDR:
+ ProxyEncodeIpHeader(pip, maxpacketsize);
+ break;
+
+ case PROXY_TYPE_ENCODE_TCPSTREAM:
+ ProxyEncodeTcpStream(lnk, pip, maxpacketsize);
+ break;
+ }
+}
+
+
+/*
+ Public API functions
+*/
+
+int
+LibAliasProxyRule(struct libalias *la, const char *cmd)
+{
+/*
+ * This function takes command strings of the form:
+ *
+ * server <addr>[:<port>]
+ * [port <port>]
+ * [rule n]
+ * [proto tcp|udp]
+ * [src <addr>[/n]]
+ * [dst <addr>[/n]]
+ * [type encode_tcp_stream|encode_ip_hdr|no_encode]
+ *
+ * delete <rule number>
+ *
+ * Subfields can be in arbitrary order. Port numbers and addresses
+ * must be in either numeric or symbolic form. An optional rule number
+ * is used to control the order in which rules are searched. If two
+ * rules have the same number, then search order cannot be guaranteed,
+ * and the rules should be disjoint. If no rule number is specified,
+ * then 0 is used, and group 0 rules are always checked before any
+ * others.
+ */
+ int i, n, len, ret;
+ int cmd_len;
+ int token_count;
+ int state;
+ char *token;
+ char buffer[256];
+ char str_port[sizeof(buffer)];
+ char str_server_port[sizeof(buffer)];
+ char *res = buffer;
+
+ int rule_index;
+ int proto;
+ int proxy_type;
+ int proxy_port;
+ int server_port;
+ struct in_addr server_addr;
+ struct in_addr src_addr, src_mask;
+ struct in_addr dst_addr, dst_mask;
+ struct proxy_entry *proxy_entry;
+
+ LIBALIAS_LOCK(la);
+ ret = 0;
+/* Copy command line into a buffer */
+ cmd += strspn(cmd, " \t");
+ cmd_len = strlen(cmd);
+ if (cmd_len > (int)(sizeof(buffer) - 1)) {
+ ret = -1;
+ goto getout;
+ }
+ strcpy(buffer, cmd);
+
+/* Convert to lower case */
+ len = strlen(buffer);
+ for (i = 0; i < len; i++)
+ buffer[i] = tolower((unsigned char)buffer[i]);
+
+/* Set default proxy type */
+
+/* Set up default values */
+ rule_index = 0;
+ proxy_type = PROXY_TYPE_ENCODE_NONE;
+ proto = IPPROTO_TCP;
+ proxy_port = 0;
+ server_addr.s_addr = 0;
+ server_port = 0;
+ src_addr.s_addr = 0;
+ IpMask(0, &src_mask);
+ dst_addr.s_addr = 0;
+ IpMask(0, &dst_mask);
+
+ str_port[0] = 0;
+ str_server_port[0] = 0;
+
+/* Parse command string with state machine */
+#define STATE_READ_KEYWORD 0
+#define STATE_READ_TYPE 1
+#define STATE_READ_PORT 2
+#define STATE_READ_SERVER 3
+#define STATE_READ_RULE 4
+#define STATE_READ_DELETE 5
+#define STATE_READ_PROTO 6
+#define STATE_READ_SRC 7
+#define STATE_READ_DST 8
+ state = STATE_READ_KEYWORD;
+ token = strsep(&res, " \t");
+ token_count = 0;
+ while (token != NULL) {
+ token_count++;
+ switch (state) {
+ case STATE_READ_KEYWORD:
+ if (strcmp(token, "type") == 0)
+ state = STATE_READ_TYPE;
+ else if (strcmp(token, "port") == 0)
+ state = STATE_READ_PORT;
+ else if (strcmp(token, "server") == 0)
+ state = STATE_READ_SERVER;
+ else if (strcmp(token, "rule") == 0)
+ state = STATE_READ_RULE;
+ else if (strcmp(token, "delete") == 0)
+ state = STATE_READ_DELETE;
+ else if (strcmp(token, "proto") == 0)
+ state = STATE_READ_PROTO;
+ else if (strcmp(token, "src") == 0)
+ state = STATE_READ_SRC;
+ else if (strcmp(token, "dst") == 0)
+ state = STATE_READ_DST;
+ else {
+ ret = -1;
+ goto getout;
+ }
+ break;
+
+ case STATE_READ_TYPE:
+ if (strcmp(token, "encode_ip_hdr") == 0)
+ proxy_type = PROXY_TYPE_ENCODE_IPHDR;
+ else if (strcmp(token, "encode_tcp_stream") == 0)
+ proxy_type = PROXY_TYPE_ENCODE_TCPSTREAM;
+ else if (strcmp(token, "no_encode") == 0)
+ proxy_type = PROXY_TYPE_ENCODE_NONE;
+ else {
+ ret = -1;
+ goto getout;
+ }
+ state = STATE_READ_KEYWORD;
+ break;
+
+ case STATE_READ_PORT:
+ strcpy(str_port, token);
+ state = STATE_READ_KEYWORD;
+ break;
+
+ case STATE_READ_SERVER:
+ {
+ int err;
+ char *p;
+ char s[sizeof(buffer)];
+
+ p = token;
+ while (*p != ':' && *p != 0)
+ p++;
+
+ if (*p != ':') {
+ err = IpAddr(token, &server_addr);
+ if (err) {
+ ret = -1;
+ goto getout;
+ }
+ } else {
+ *p = ' ';
+
+ n = sscanf(token, "%s %s", s, str_server_port);
+ if (n != 2) {
+ ret = -1;
+ goto getout;
+ }
+
+ err = IpAddr(s, &server_addr);
+ if (err) {
+ ret = -1;
+ goto getout;
+ }
+ }
+ }
+ state = STATE_READ_KEYWORD;
+ break;
+
+ case STATE_READ_RULE:
+ n = sscanf(token, "%d", &rule_index);
+ if (n != 1 || rule_index < 0) {
+ ret = -1;
+ goto getout;
+ }
+ state = STATE_READ_KEYWORD;
+ break;
+
+ case STATE_READ_DELETE:
+ {
+ int err;
+ int rule_to_delete;
+
+ if (token_count != 2) {
+ ret = -1;
+ goto getout;
+ }
+
+ n = sscanf(token, "%d", &rule_to_delete);
+ if (n != 1) {
+ ret = -1;
+ goto getout;
+ }
+ err = RuleNumberDelete(la, rule_to_delete);
+ if (err)
+ ret = -1;
+ ret = 0;
+ goto getout;
+ }
+
+ case STATE_READ_PROTO:
+ if (strcmp(token, "tcp") == 0)
+ proto = IPPROTO_TCP;
+ else if (strcmp(token, "udp") == 0)
+ proto = IPPROTO_UDP;
+ else {
+ ret = -1;
+ goto getout;
+ }
+ state = STATE_READ_KEYWORD;
+ break;
+
+ case STATE_READ_SRC:
+ case STATE_READ_DST:
+ {
+ int err;
+ char *p;
+ struct in_addr mask;
+ struct in_addr addr;
+
+ p = token;
+ while (*p != '/' && *p != 0)
+ p++;
+
+ if (*p != '/') {
+ IpMask(32, &mask);
+ err = IpAddr(token, &addr);
+ if (err) {
+ ret = -1;
+ goto getout;
+ }
+ } else {
+ int nbits;
+ char s[sizeof(buffer)];
+
+ *p = ' ';
+ n = sscanf(token, "%s %d", s, &nbits);
+ if (n != 2) {
+ ret = -1;
+ goto getout;
+ }
+
+ err = IpAddr(s, &addr);
+ if (err) {
+ ret = -1;
+ goto getout;
+ }
+
+ err = IpMask(nbits, &mask);
+ if (err) {
+ ret = -1;
+ goto getout;
+ }
+ }
+
+ if (state == STATE_READ_SRC) {
+ src_addr = addr;
+ src_mask = mask;
+ } else {
+ dst_addr = addr;
+ dst_mask = mask;
+ }
+ }
+ state = STATE_READ_KEYWORD;
+ break;
+
+ default:
+ ret = -1;
+ goto getout;
+ break;
+ }
+
+ do {
+ token = strsep(&res, " \t");
+ } while (token != NULL && !*token);
+ }
+#undef STATE_READ_KEYWORD
+#undef STATE_READ_TYPE
+#undef STATE_READ_PORT
+#undef STATE_READ_SERVER
+#undef STATE_READ_RULE
+#undef STATE_READ_DELETE
+#undef STATE_READ_PROTO
+#undef STATE_READ_SRC
+#undef STATE_READ_DST
+
+/* Convert port strings to numbers. This needs to be done after
+ the string is parsed, because the prototype might not be designated
+ before the ports (which might be symbolic entries in /etc/services) */
+
+ if (strlen(str_port) != 0) {
+ int err;
+
+ err = IpPort(str_port, proto, &proxy_port);
+ if (err) {
+ ret = -1;
+ goto getout;
+ }
+ } else {
+ proxy_port = 0;
+ }
+
+ if (strlen(str_server_port) != 0) {
+ int err;
+
+ err = IpPort(str_server_port, proto, &server_port);
+ if (err) {
+ ret = -1;
+ goto getout;
+ }
+ } else {
+ server_port = 0;
+ }
+
+/* Check that at least the server address has been defined */
+ if (server_addr.s_addr == 0) {
+ ret = -1;
+ goto getout;
+ }
+
+/* Add to linked list */
+ proxy_entry = malloc(sizeof(struct proxy_entry));
+ if (proxy_entry == NULL) {
+ ret = -1;
+ goto getout;
+ }
+
+ proxy_entry->proxy_type = proxy_type;
+ proxy_entry->rule_index = rule_index;
+ proxy_entry->proto = proto;
+ proxy_entry->proxy_port = htons(proxy_port);
+ proxy_entry->server_port = htons(server_port);
+ proxy_entry->server_addr = server_addr;
+ proxy_entry->src_addr.s_addr = src_addr.s_addr & src_mask.s_addr;
+ proxy_entry->dst_addr.s_addr = dst_addr.s_addr & dst_mask.s_addr;
+ proxy_entry->src_mask = src_mask;
+ proxy_entry->dst_mask = dst_mask;
+
+ RuleAdd(la, proxy_entry);
+
+getout:
+ LIBALIAS_UNLOCK(la);
+ return (ret);
+}
diff --git a/rtems/freebsd/netinet/libalias/alias_sctp.c b/rtems/freebsd/netinet/libalias/alias_sctp.c
new file mode 100644
index 00000000..4ebac8ff
--- /dev/null
+++ b/rtems/freebsd/netinet/libalias/alias_sctp.c
@@ -0,0 +1,2700 @@
+#include <rtems/freebsd/machine/rtems-bsd-config.h>
+
+/*-
+ * Copyright (c) 2008
+ * Swinburne University of Technology, Melbourne, Australia.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS "AS IS" AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+/*
+ * Alias_sctp forms part of the libalias kernel module to handle
+ * Network Address Translation (NAT) for the SCTP protocol.
+ *
+ * This software was developed by David A. Hayes and Jason But
+ *
+ * The design is outlined in CAIA technical report number 080618A
+ * (D. Hayes and J. But, "Alias_sctp Version 0.1: SCTP NAT implementation in IPFW")
+ *
+ * Development is part of the CAIA SONATA project,
+ * proposed by Jason But and Grenville Armitage:
+ * http://caia.swin.edu.au/urp/sonata/
+ *
+ *
+ * This project has been made possible in part by a grant from
+ * the Cisco University Research Program Fund at Community
+ * Foundation Silicon Valley.
+ *
+ */
+/** @mainpage
+ * Alias_sctp is part of the SONATA (http://caia.swin.edu.au/urp/sonata) project
+ * to develop and release a BSD licensed implementation of a Network Address
+ * Translation (NAT) module that supports the Stream Control Transmission
+ * Protocol (SCTP).
+ *
+ * Traditional address and port number look ups are inadequate for SCTP's
+ * operation due to both processing requirements and issues with multi-homing.
+ * Alias_sctp integrates with FreeBSD's ipfw/libalias NAT system.
+ *
+ * Version 0.2 features include:
+ * - Support for global multi-homing
+ * - Support for ASCONF modification from Internet Draft
+ * (draft-stewart-behave-sctpnat-04, R. Stewart and M. Tuexen, "Stream control
+ * transmission protocol (SCTP) network address translation," Jul. 2008) to
+ * provide support for multi-homed privately addressed hosts
+ * - Support for forwarding of T-flagged packets
+ * - Generation and delivery of AbortM/ErrorM packets upon detection of NAT
+ * collisions
+ * - Per-port forwarding rules
+ * - Dynamically controllable logging and statistics
+ * - Dynamic management of timers
+ * - Dynamic control of hash-table size
+ */
+
+/* $FreeBSD$ */
+
+#ifdef _KERNEL
+#include <rtems/freebsd/machine/stdarg.h>
+#include <rtems/freebsd/sys/param.h>
+#include <rtems/freebsd/sys/systm.h>
+#include <rtems/freebsd/sys/kernel.h>
+#include <rtems/freebsd/sys/module.h>
+#include <rtems/freebsd/sys/syslog.h>
+#include <rtems/freebsd/netinet/libalias/alias_sctp.h>
+#include <rtems/freebsd/netinet/libalias/alias.h>
+#include <rtems/freebsd/netinet/libalias/alias_local.h>
+#include <rtems/freebsd/netinet/sctp_crc32.h>
+#include <rtems/freebsd/machine/in_cksum.h>
+#else
+#include <rtems/freebsd/local/alias_sctp.h>
+#include <rtems/freebsd/arpa/inet.h>
+#include <rtems/freebsd/local/alias.h>
+#include <rtems/freebsd/local/alias_local.h>
+#include <rtems/freebsd/machine/in_cksum.h>
+#include <rtems/freebsd/sys/libkern.h>
+#endif //#ifdef _KERNEL
+
+/* ----------------------------------------------------------------------
+ * FUNCTION PROTOTYPES
+ * ----------------------------------------------------------------------
+ */
+/* Packet Parsing Functions */
+static int sctp_PktParser(struct libalias *la, int direction, struct ip *pip,
+ struct sctp_nat_msg *sm, struct sctp_nat_assoc **passoc);
+static int GetAsconfVtags(struct libalias *la, struct sctp_nat_msg *sm,
+ uint32_t *l_vtag, uint32_t *g_vtag, int direction);
+static int IsASCONFack(struct libalias *la, struct sctp_nat_msg *sm, int direction);
+
+static void AddGlobalIPAddresses(struct sctp_nat_msg *sm, struct sctp_nat_assoc *assoc, int direction);
+static int Add_Global_Address_to_List(struct sctp_nat_assoc *assoc, struct sctp_GlobalAddress *G_addr);
+static void RmGlobalIPAddresses(struct sctp_nat_msg *sm, struct sctp_nat_assoc *assoc, int direction);
+static int IsADDorDEL(struct libalias *la, struct sctp_nat_msg *sm, int direction);
+
+/* State Machine Functions */
+static int ProcessSctpMsg(struct libalias *la, int direction, \
+ struct sctp_nat_msg *sm, struct sctp_nat_assoc *assoc);
+
+static int ID_process(struct libalias *la, int direction,\
+ struct sctp_nat_assoc *assoc, struct sctp_nat_msg *sm);
+static int INi_process(struct libalias *la, int direction,\
+ struct sctp_nat_assoc *assoc, struct sctp_nat_msg *sm);
+static int INa_process(struct libalias *la, int direction,\
+ struct sctp_nat_assoc *assoc, struct sctp_nat_msg *sm);
+static int UP_process(struct libalias *la, int direction,\
+ struct sctp_nat_assoc *assoc, struct sctp_nat_msg *sm);
+static int CL_process(struct libalias *la, int direction,\
+ struct sctp_nat_assoc *assoc, struct sctp_nat_msg *sm);
+static void TxAbortErrorM(struct libalias *la, struct sctp_nat_msg *sm,\
+ struct sctp_nat_assoc *assoc, int sndrply, int direction);
+
+/* Hash Table Functions */
+static struct sctp_nat_assoc*
+FindSctpLocal(struct libalias *la, struct in_addr l_addr, struct in_addr g_addr, uint32_t l_vtag, uint16_t l_port, uint16_t g_port);
+static struct sctp_nat_assoc*
+FindSctpGlobal(struct libalias *la, struct in_addr g_addr, uint32_t g_vtag, uint16_t g_port, uint16_t l_port, int *partial_match);
+static struct sctp_nat_assoc*
+FindSctpGlobalClash(struct libalias *la, struct sctp_nat_assoc *Cassoc);
+static struct sctp_nat_assoc*
+FindSctpLocalT(struct libalias *la, struct in_addr g_addr, uint32_t l_vtag, uint16_t g_port, uint16_t l_port);
+static struct sctp_nat_assoc*
+FindSctpGlobalT(struct libalias *la, struct in_addr g_addr, uint32_t g_vtag, uint16_t l_port, uint16_t g_port);
+
+static int AddSctpAssocLocal(struct libalias *la, struct sctp_nat_assoc *assoc, struct in_addr g_addr);
+static int AddSctpAssocGlobal(struct libalias *la, struct sctp_nat_assoc *assoc);
+static void RmSctpAssoc(struct libalias *la, struct sctp_nat_assoc *assoc);
+static void freeGlobalAddressList(struct sctp_nat_assoc *assoc);
+
+/* Timer Queue Functions */
+static void sctp_AddTimeOut(struct libalias *la, struct sctp_nat_assoc *assoc);
+static void sctp_RmTimeOut(struct libalias *la, struct sctp_nat_assoc *assoc);
+static void sctp_ResetTimeOut(struct libalias *la, struct sctp_nat_assoc *assoc, int newexp);
+void sctp_CheckTimers(struct libalias *la);
+
+
+/* Logging Functions */
+static void logsctperror(char* errormsg, uint32_t vtag, int error, int direction);
+static void logsctpparse(int direction, struct sctp_nat_msg *sm);
+static void logsctpassoc(struct sctp_nat_assoc *assoc, char *s);
+static void logTimerQ(struct libalias *la);
+static void logSctpGlobal(struct libalias *la);
+static void logSctpLocal(struct libalias *la);
+#ifdef _KERNEL
+static void SctpAliasLog(const char *format, ...);
+#endif
+
+/** @defgroup external External code changes and modifications
+ *
+ * Some changes have been made to files external to alias_sctp.(c|h). These
+ * changes are primarily due to code needing to call static functions within
+ * those files or to perform extra functionality that can only be performed
+ * within these files.
+ */
+/** @ingroup external
+ * @brief Log current statistics for the libalias instance
+ *
+ * This function is defined in alias_db.c, since it calls static functions in
+ * this file
+ *
+ * Calls the higher level ShowAliasStats() in alias_db.c which logs all current
+ * statistics about the libalias instance - including SCTP statistics
+ *
+ * @param la Pointer to the libalias instance
+ */
+void SctpShowAliasStats(struct libalias *la);
+
+#ifdef _KERNEL
+
+MALLOC_DEFINE(M_SCTPNAT, "sctpnat", "sctp nat dbs");
+/* Use kernel allocator. */
+#ifdef _SYS_MALLOC_HH_
+#define sn_malloc(x) malloc(x, M_SCTPNAT, M_NOWAIT|M_ZERO)
+#define sn_calloc(n,x) sn_malloc(x * n)
+#define sn_free(x) free(x, M_SCTPNAT)
+#endif// #ifdef _SYS_MALLOC_HH_
+
+#else //#ifdef _KERNEL
+#define sn_malloc(x) malloc(x)
+#define sn_calloc(n, x) calloc(n, x)
+#define sn_free(x) free(x)
+
+#endif //#ifdef _KERNEL
+
+/** @defgroup packet_parser SCTP Packet Parsing
+ *
+ * Macros to:
+ * - Return pointers to the first and next SCTP chunks within an SCTP Packet
+ * - Define possible return values of the packet parsing process
+ * - SCTP message types for storing in the sctp_nat_msg structure @{
+ */
+
+#define SN_SCTP_FIRSTCHUNK(sctphead) (struct sctp_chunkhdr *)(((char *)sctphead) + sizeof(struct sctphdr))
+/**< Returns a pointer to the first chunk in an SCTP packet given a pointer to the SCTP header */
+
+#define SN_SCTP_NEXTCHUNK(chunkhead) (struct sctp_chunkhdr *)(((char *)chunkhead) + SCTP_SIZE32(ntohs(chunkhead->chunk_length)))
+/**< Returns a pointer to the next chunk in an SCTP packet given a pointer to the current chunk */
+
+#define SN_SCTP_NEXTPARAM(param) (struct sctp_paramhdr *)(((char *)param) + SCTP_SIZE32(ntohs(param->param_length)))
+/**< Returns a pointer to the next parameter in an SCTP packet given a pointer to the current parameter */
+
+#define SN_MIN_CHUNK_SIZE 4 /**< Smallest possible SCTP chunk size in bytes */
+#define SN_MIN_PARAM_SIZE 4 /**< Smallest possible SCTP param size in bytes */
+#define SN_VTAG_PARAM_SIZE 12 /**< Size of SCTP ASCONF vtag param in bytes */
+#define SN_ASCONFACK_PARAM_SIZE 8 /**< Size of SCTP ASCONF ACK param in bytes */
+
+/* Packet parsing return codes */
+#define SN_PARSE_OK 0 /**< Packet parsed for SCTP messages */
+#define SN_PARSE_ERROR_IPSHL 1 /**< Packet parsing error - IP and SCTP common header len */
+#define SN_PARSE_ERROR_AS_MALLOC 2 /**< Packet parsing error - assoc malloc */
+#define SN_PARSE_ERROR_CHHL 3 /**< Packet parsing error - Chunk header len */
+#define SN_PARSE_ERROR_DIR 4 /**< Packet parsing error - Direction */
+#define SN_PARSE_ERROR_VTAG 5 /**< Packet parsing error - Vtag */
+#define SN_PARSE_ERROR_CHUNK 6 /**< Packet parsing error - Chunk */
+#define SN_PARSE_ERROR_PORT 7 /**< Packet parsing error - Port=0 */
+#define SN_PARSE_ERROR_LOOKUP 8 /**< Packet parsing error - Lookup */
+#define SN_PARSE_ERROR_PARTIALLOOKUP 9 /**< Packet parsing error - partial lookup only found */
+#define SN_PARSE_ERROR_LOOKUP_ABORT 10 /**< Packet parsing error - Lookup - but abort packet */
+
+/* Alias_sctp performs its processing based on a number of key messages */
+#define SN_SCTP_ABORT 0x0000 /**< a packet containing an ABORT chunk */
+#define SN_SCTP_INIT 0x0001 /**< a packet containing an INIT chunk */
+#define SN_SCTP_INITACK 0x0002 /**< a packet containing an INIT-ACK chunk */
+#define SN_SCTP_SHUTCOMP 0x0010 /**< a packet containing a SHUTDOWN-COMPLETE chunk */
+#define SN_SCTP_SHUTACK 0x0020 /**< a packet containing a SHUTDOWN-ACK chunk */
+#define SN_SCTP_ASCONF 0x0100 /**< a packet containing an ASCONF chunk */
+#define SN_SCTP_ASCONFACK 0x0200 /**< a packet containing an ASCONF-ACK chunk */
+#define SN_SCTP_OTHER 0xFFFF /**< a packet containing a chunk that is not of interest */
+
+/** @}
+ * @defgroup state_machine SCTP NAT State Machine
+ *
+ * Defines the various states an association can be within the NAT @{
+ */
+#define SN_ID 0x0000 /**< Idle state */
+#define SN_INi 0x0010 /**< Initialising, waiting for InitAck state */
+#define SN_INa 0x0020 /**< Initialising, waiting for AddIpAck state */
+#define SN_UP 0x0100 /**< Association in UP state */
+#define SN_CL 0x1000 /**< Closing state */
+#define SN_RM 0x2000 /**< Removing state */
+
+/** @}
+ * @defgroup Logging Logging Functionality
+ *
+ * Define various log levels and a macro to call specified log functions only if
+ * the current log level (sysctl_log_level) matches the specified level @{
+ */
+#define SN_LOG_LOW 0
+#define SN_LOG_EVENT 1
+#define SN_LOG_INFO 2
+#define SN_LOG_DETAIL 3
+#define SN_LOG_DEBUG 4
+#define SN_LOG_DEBUG_MAX 5
+
+#define SN_LOG(level, action) if (sysctl_log_level >= level) { action; } /**< Perform log action ONLY if the current log level meets the specified log level */
+
+/** @}
+ * @defgroup Hash Hash Table Macros and Functions
+ *
+ * Defines minimum/maximum/default values for the hash table size @{
+ */
+#define SN_MIN_HASH_SIZE 101 /**< Minimum hash table size (set to stop users choosing stupid values) */
+#define SN_MAX_HASH_SIZE 1000001 /**< Maximum hash table size (NB must be less than max int) */
+#define SN_DEFAULT_HASH_SIZE 2003 /**< A reasonable default size for the hash tables */
+
+#define SN_LOCAL_TBL 0x01 /**< assoc in local table */
+#define SN_GLOBAL_TBL 0x02 /**< assoc in global table */
+#define SN_BOTH_TBL 0x03 /**< assoc in both tables */
+#define SN_WAIT_TOLOCAL 0x10 /**< assoc waiting for TOLOCAL asconf ACK*/
+#define SN_WAIT_TOGLOBAL 0x20 /**< assoc waiting for TOLOCAL asconf ACK*/
+#define SN_NULL_TBL 0x00 /**< assoc in No table */
+#define SN_MAX_GLOBAL_ADDRESSES 100 /**< absolute maximum global address count*/
+
+#define SN_ADD_OK 0 /**< Association added to the table */
+#define SN_ADD_CLASH 1 /**< Clash when trying to add the assoc. info to the table */
+
+#define SN_TABLE_HASH(vtag, port, size) (((u_int) vtag + (u_int) port) % (u_int) size) /**< Calculate the hash table lookup position */
+
+/** @}
+ * @defgroup Timer Timer Queue Macros and Functions
+ *
+ * Timer macros set minimum/maximum timeout values and calculate timer expiry
+ * times for the provided libalias instance @{
+ */
+#define SN_MIN_TIMER 1
+#define SN_MAX_TIMER 600
+#define SN_TIMER_QUEUE_SIZE SN_MAX_TIMER+2
+
+#define SN_I_T(la) (la->timeStamp + sysctl_init_timer) /**< INIT State expiration time in seconds */
+#define SN_U_T(la) (la->timeStamp + sysctl_up_timer) /**< UP State expiration time in seconds */
+#define SN_C_T(la) (la->timeStamp + sysctl_shutdown_timer) /**< CL State expiration time in seconds */
+#define SN_X_T(la) (la->timeStamp + sysctl_holddown_timer) /**< Wait after a shutdown complete in seconds */
+
+/** @}
+ * @defgroup sysctl SysCtl Variable and callback function declarations
+ *
+ * Sysctl variables to modify NAT functionality in real-time along with associated functions
+ * to manage modifications to the sysctl variables @{
+ */
+
+/* Callbacks */
+int sysctl_chg_loglevel(SYSCTL_HANDLER_ARGS);
+int sysctl_chg_timer(SYSCTL_HANDLER_ARGS);
+int sysctl_chg_hashtable_size(SYSCTL_HANDLER_ARGS);
+int sysctl_chg_error_on_ootb(SYSCTL_HANDLER_ARGS);
+int sysctl_chg_accept_global_ootb_addip(SYSCTL_HANDLER_ARGS);
+int sysctl_chg_initialising_chunk_proc_limit(SYSCTL_HANDLER_ARGS);
+int sysctl_chg_chunk_proc_limit(SYSCTL_HANDLER_ARGS);
+int sysctl_chg_param_proc_limit(SYSCTL_HANDLER_ARGS);
+int sysctl_chg_track_global_addresses(SYSCTL_HANDLER_ARGS);
+
+/* Sysctl variables */
+/** @brief net.inet.ip.alias.sctp.log_level */
+static u_int sysctl_log_level = 0; /**< Stores the current level of logging */
+/** @brief net.inet.ip.alias.sctp.init_timer */
+static u_int sysctl_init_timer = 15; /**< Seconds to hold an association in the table waiting for an INIT-ACK or AddIP-ACK */
+/** @brief net.inet.ip.alias.sctp.up_timer */
+static u_int sysctl_up_timer = 300; /**< Seconds to hold an association in the table while no packets are transmitted */
+/** @brief net.inet.ip.alias.sctp.shutdown_timer */
+static u_int sysctl_shutdown_timer = 15; /**< Seconds to hold an association in the table waiting for a SHUTDOWN-COMPLETE */
+/** @brief net.inet.ip.alias.sctp.holddown_timer */
+static u_int sysctl_holddown_timer = 0; /**< Seconds to hold an association in the table after it has been shutdown (to allow for lost SHUTDOWN-COMPLETEs) */
+/** @brief net.inet.ip.alias.sctp.hashtable_size */
+static u_int sysctl_hashtable_size = SN_DEFAULT_HASH_SIZE; /**< Sets the hash table size for any NEW NAT instances (existing instances retain their existing Hash Table */
+/** @brief net.inet.ip.alias.sctp.error_on_ootb */
+static u_int sysctl_error_on_ootb = 1; /**< NAT response to receipt of OOTB packet
+ (0 - No response, 1 - NAT will send ErrorM only to local side,
+ 2 - NAT will send local ErrorM and global ErrorM if there was a partial association match
+ 3 - NAT will send ErrorM to both local and global) */
+/** @brief net.inet.ip.alias.sctp.accept_global_ootb_addip */
+static u_int sysctl_accept_global_ootb_addip = 0; /**<NAT responset to receipt of global OOTB AddIP (0 - No response, 1 - NAT will accept OOTB global AddIP messages for processing (Security risk)) */
+/** @brief net.inet.ip.alias.sctp.initialising_chunk_proc_limit */
+static u_int sysctl_initialising_chunk_proc_limit = 2; /**< A limit on the number of chunks that should be searched if there is no matching association (DoS prevention) */
+/** @brief net.inet.ip.alias.sctp.param_proc_limit */
+static u_int sysctl_chunk_proc_limit = 5; /**< A limit on the number of chunks that should be searched (DoS prevention) */
+/** @brief net.inet.ip.alias.sctp.param_proc_limit */
+static u_int sysctl_param_proc_limit = 25; /**< A limit on the number of parameters (in chunks) that should be searched (DoS prevention) */
+/** @brief net.inet.ip.alias.sctp.track_global_addresses */
+static u_int sysctl_track_global_addresses = 0; /**< Configures the global address tracking option within the NAT (0 - Global tracking is disabled, > 0 - enables tracking but limits the number of global IP addresses to this value)
+ If set to >=1 the NAT will track that many global IP addresses. This may reduce look up table conflicts, but increases processing */
+
+#define SN_NO_ERROR_ON_OOTB 0 /**< Send no errorM on out of the blue packets */
+#define SN_LOCAL_ERROR_ON_OOTB 1 /**< Send only local errorM on out of the blue packets */
+#define SN_LOCALandPARTIAL_ERROR_ON_OOTB 2 /**< Send local errorM and global errorM for out of the blue packets only if partial match found */
+#define SN_ERROR_ON_OOTB 3 /**< Send errorM on out of the blue packets */
+
+#ifdef SYSCTL_NODE
+
+SYSCTL_DECL(_net_inet);
+SYSCTL_DECL(_net_inet_ip);
+SYSCTL_DECL(_net_inet_ip_alias);
+
+SYSCTL_NODE(_net_inet_ip_alias, OID_AUTO, sctp, CTLFLAG_RW, NULL, "SCTP NAT");
+
+SYSCTL_PROC(_net_inet_ip_alias_sctp, OID_AUTO, log_level, CTLTYPE_UINT | CTLFLAG_RW,
+ &sysctl_log_level, 0, sysctl_chg_loglevel, "IU",
+ "Level of detail (0 - default, 1 - event, 2 - info, 3 - detail, 4 - debug, 5 - max debug)");
+SYSCTL_PROC(_net_inet_ip_alias_sctp, OID_AUTO, init_timer, CTLTYPE_UINT | CTLFLAG_RW,
+ &sysctl_init_timer, 0, sysctl_chg_timer, "IU",
+ "Timeout value (s) while waiting for (INIT-ACK|AddIP-ACK)");
+SYSCTL_PROC(_net_inet_ip_alias_sctp, OID_AUTO, up_timer, CTLTYPE_UINT | CTLFLAG_RW,
+ &sysctl_up_timer, 0, sysctl_chg_timer, "IU",
+ "Timeout value (s) to keep an association up with no traffic");
+SYSCTL_PROC(_net_inet_ip_alias_sctp, OID_AUTO, shutdown_timer, CTLTYPE_UINT | CTLFLAG_RW,
+ &sysctl_shutdown_timer, 0, sysctl_chg_timer, "IU",
+ "Timeout value (s) while waiting for SHUTDOWN-COMPLETE");
+SYSCTL_PROC(_net_inet_ip_alias_sctp, OID_AUTO, holddown_timer, CTLTYPE_UINT | CTLFLAG_RW,
+ &sysctl_holddown_timer, 0, sysctl_chg_timer, "IU",
+ "Hold association in table for this many seconds after receiving a SHUTDOWN-COMPLETE");
+SYSCTL_PROC(_net_inet_ip_alias_sctp, OID_AUTO, hashtable_size, CTLTYPE_UINT | CTLFLAG_RW,
+ &sysctl_hashtable_size, 0, sysctl_chg_hashtable_size, "IU",
+ "Size of hash tables used for NAT lookups (100 < prime_number > 1000001)");
+SYSCTL_PROC(_net_inet_ip_alias_sctp, OID_AUTO, error_on_ootb, CTLTYPE_UINT | CTLFLAG_RW,
+ &sysctl_error_on_ootb, 0, sysctl_chg_error_on_ootb, "IU",
+ "ErrorM sent on receipt of ootb packet:\n\t0 - none,\n\t1 - to local only,\n\t2 - to local and global if a partial association match,\n\t3 - to local and global (DoS risk)");
+SYSCTL_PROC(_net_inet_ip_alias_sctp, OID_AUTO, accept_global_ootb_addip, CTLTYPE_UINT | CTLFLAG_RW,
+ &sysctl_accept_global_ootb_addip, 0, sysctl_chg_accept_global_ootb_addip, "IU",
+ "NAT response to receipt of global OOTB AddIP:\n\t0 - No response,\n\t1 - NAT will accept OOTB global AddIP messages for processing (Security risk)");
+SYSCTL_PROC(_net_inet_ip_alias_sctp, OID_AUTO, initialising_chunk_proc_limit, CTLTYPE_UINT | CTLFLAG_RW,
+ &sysctl_initialising_chunk_proc_limit, 0, sysctl_chg_initialising_chunk_proc_limit, "IU",
+ "Number of chunks that should be processed if there is no current association found:\n\t > 0 (A high value is a DoS risk)");
+SYSCTL_PROC(_net_inet_ip_alias_sctp, OID_AUTO, chunk_proc_limit, CTLTYPE_UINT | CTLFLAG_RW,
+ &sysctl_chunk_proc_limit, 0, sysctl_chg_chunk_proc_limit, "IU",
+ "Number of chunks that should be processed to find key chunk:\n\t>= initialising_chunk_proc_limit (A high value is a DoS risk)");
+SYSCTL_PROC(_net_inet_ip_alias_sctp, OID_AUTO, param_proc_limit, CTLTYPE_UINT | CTLFLAG_RW,
+ &sysctl_param_proc_limit, 0, sysctl_chg_param_proc_limit, "IU",
+ "Number of parameters (in a chunk) that should be processed to find key parameters:\n\t> 1 (A high value is a DoS risk)");
+SYSCTL_PROC(_net_inet_ip_alias_sctp, OID_AUTO, track_global_addresses, CTLTYPE_UINT | CTLFLAG_RW,
+ &sysctl_track_global_addresses, 0, sysctl_chg_track_global_addresses, "IU",
+ "Configures the global address tracking option within the NAT:\n\t0 - Global tracking is disabled,\n\t> 0 - enables tracking but limits the number of global IP addresses to this value");
+
+#endif /* SYSCTL_NODE */
+
+/** @}
+ * @ingroup sysctl
+ * @brief sysctl callback for changing net.inet.ip.fw.sctp.log_level
+ *
+ * Updates the variable sysctl_log_level to the provided value and ensures
+ * it is in the valid range (SN_LOG_LOW -> SN_LOG_DEBUG)
+ */
+int sysctl_chg_loglevel(SYSCTL_HANDLER_ARGS)
+{
+ u_int level = *(u_int *)arg1;
+ int error;
+
+ error = sysctl_handle_int(oidp, &level, 0, req);
+ if (error) return (error);
+
+ sysctl_log_level = (level > SN_LOG_DEBUG_MAX)?(SN_LOG_DEBUG_MAX):(level);
+ sysctl_log_level = (level < SN_LOG_LOW)?(SN_LOG_LOW):(level);
+
+ return (0);
+}
+
+/** @ingroup sysctl
+ * @brief sysctl callback for changing net.inet.ip.fw.sctp.(init_timer|up_timer|shutdown_timer)
+ *
+ * Updates the timer-based sysctl variables. The new values are sanity-checked
+ * to make sure that they are within the range SN_MIN_TIMER-SN_MAX_TIMER. The
+ * holddown timer is allowed to be 0
+ */
+int sysctl_chg_timer(SYSCTL_HANDLER_ARGS)
+{
+ u_int timer = *(u_int *)arg1;
+ int error;
+
+ error = sysctl_handle_int(oidp, &timer, 0, req);
+ if (error) return (error);
+
+ timer = (timer > SN_MAX_TIMER)?(SN_MAX_TIMER):(timer);
+
+ if (((u_int *)arg1) != &sysctl_holddown_timer)
+ {
+ timer = (timer < SN_MIN_TIMER)?(SN_MIN_TIMER):(timer);
+ }
+
+ *(u_int *)arg1 = timer;
+
+ return (0);
+}
+
+/** @ingroup sysctl
+ * @brief sysctl callback for changing net.inet.ip.alias.sctp.hashtable_size
+ *
+ * Updates the hashtable_size sysctl variable. The new value should be a prime
+ * number. We sanity check to ensure that the size is within the range
+ * SN_MIN_HASH_SIZE-SN_MAX_HASH_SIZE. We then check the provided number to see
+ * if it is prime. We approximate by checking that (2,3,5,7,11) are not factors,
+ * incrementing the user provided value until we find a suitable number.
+ */
+int sysctl_chg_hashtable_size(SYSCTL_HANDLER_ARGS)
+{
+ u_int size = *(u_int *)arg1;
+ int error;
+
+ error = sysctl_handle_int(oidp, &size, 0, req);
+ if (error) return (error);
+
+ size = (size < SN_MIN_HASH_SIZE)?(SN_MIN_HASH_SIZE):((size > SN_MAX_HASH_SIZE)?(SN_MAX_HASH_SIZE):(size));
+
+ size |= 0x00000001; /* make odd */
+
+ for(;(((size % 3) == 0) || ((size % 5) == 0) || ((size % 7) == 0) || ((size % 11) == 0)); size+=2);
+ sysctl_hashtable_size = size;
+
+ return (0);
+}
+
+/** @ingroup sysctl
+ * @brief sysctl callback for changing net.inet.ip.alias.sctp.error_on_ootb
+ *
+ * Updates the error_on_clash sysctl variable.
+ * If set to 0, no ErrorM will be sent if there is a look up table clash
+ * If set to 1, an ErrorM is sent only to the local side
+ * If set to 2, an ErrorM is sent to the local side and global side if there is
+ * a partial association match
+ * If set to 3, an ErrorM is sent to both local and global sides (DoS) risk.
+ */
+int sysctl_chg_error_on_ootb(SYSCTL_HANDLER_ARGS)
+{
+ u_int flag = *(u_int *)arg1;
+ int error;
+
+ error = sysctl_handle_int(oidp, &flag, 0, req);
+ if (error) return (error);
+
+ sysctl_error_on_ootb = (flag > SN_ERROR_ON_OOTB) ? SN_ERROR_ON_OOTB: flag;
+
+ return (0);
+}
+
+/** @ingroup sysctl
+ * @brief sysctl callback for changing net.inet.ip.alias.sctp.accept_global_ootb_addip
+ *
+ * If set to 1 the NAT will accept ootb global addip messages for processing (Security risk)
+ * Default is 0, only responding to local ootb AddIP messages
+ */
+int sysctl_chg_accept_global_ootb_addip(SYSCTL_HANDLER_ARGS)
+{
+ u_int flag = *(u_int *)arg1;
+ int error;
+
+ error = sysctl_handle_int(oidp, &flag, 0, req);
+ if (error) return (error);
+
+ sysctl_accept_global_ootb_addip = (flag == 1) ? 1: 0;
+
+ return (0);
+}
+
+/** @ingroup sysctl
+ * @brief sysctl callback for changing net.inet.ip.alias.sctp.initialising_chunk_proc_limit
+ *
+ * Updates the initialising_chunk_proc_limit sysctl variable. Number of chunks
+ * that should be processed if there is no current association found: > 0 (A
+ * high value is a DoS risk)
+ */
+int sysctl_chg_initialising_chunk_proc_limit(SYSCTL_HANDLER_ARGS)
+{
+ u_int proclimit = *(u_int *)arg1;
+ int error;
+
+ error = sysctl_handle_int(oidp, &proclimit, 0, req);
+ if (error) return (error);
+
+ sysctl_initialising_chunk_proc_limit = (proclimit < 1) ? 1: proclimit;
+ sysctl_chunk_proc_limit =
+ (sysctl_chunk_proc_limit < sysctl_initialising_chunk_proc_limit) ? sysctl_initialising_chunk_proc_limit : sysctl_chunk_proc_limit;
+
+ return (0);
+}
+
+/** @ingroup sysctl
+ * @brief sysctl callback for changing net.inet.ip.alias.sctp.chunk_proc_limit
+ *
+ * Updates the chunk_proc_limit sysctl variable.
+ * Number of chunks that should be processed to find key chunk:
+ * >= initialising_chunk_proc_limit (A high value is a DoS risk)
+ */
+int sysctl_chg_chunk_proc_limit(SYSCTL_HANDLER_ARGS)
+{
+ u_int proclimit = *(u_int *)arg1;
+ int error;
+
+ error = sysctl_handle_int(oidp, &proclimit, 0, req);
+ if (error) return (error);
+
+ sysctl_chunk_proc_limit =
+ (proclimit < sysctl_initialising_chunk_proc_limit) ? sysctl_initialising_chunk_proc_limit : proclimit;
+
+ return (0);
+}
+
+
+/** @ingroup sysctl
+ * @brief sysctl callback for changing net.inet.ip.alias.sctp.param_proc_limit
+ *
+ * Updates the param_proc_limit sysctl variable.
+ * Number of parameters that should be processed to find key parameters:
+ * > 1 (A high value is a DoS risk)
+ */
+int sysctl_chg_param_proc_limit(SYSCTL_HANDLER_ARGS)
+{
+ u_int proclimit = *(u_int *)arg1;
+ int error;
+
+ error = sysctl_handle_int(oidp, &proclimit, 0, req);
+ if (error) return (error);
+
+ sysctl_param_proc_limit =
+ (proclimit < 2) ? 2 : proclimit;
+
+ return (0);
+}
+
+/** @ingroup sysctl
+ * @brief sysctl callback for changing net.inet.ip.alias.sctp.track_global_addresses
+ *
+ *Configures the global address tracking option within the NAT (0 - Global
+ *tracking is disabled, > 0 - enables tracking but limits the number of global
+ *IP addresses to this value)
+ */
+int sysctl_chg_track_global_addresses(SYSCTL_HANDLER_ARGS)
+{
+ u_int num_to_track = *(u_int *)arg1;
+ int error;
+
+ error = sysctl_handle_int(oidp, &num_to_track, 0, req);
+ if (error) return (error);
+
+ sysctl_track_global_addresses = (num_to_track > SN_MAX_GLOBAL_ADDRESSES) ? SN_MAX_GLOBAL_ADDRESSES : num_to_track;
+
+ return (0);
+}
+
+
+/* ----------------------------------------------------------------------
+ * CODE BEGINS HERE
+ * ----------------------------------------------------------------------
+ */
+/**
+ * @brief Initialises the SCTP NAT Implementation
+ *
+ * Creates the look-up tables and the timer queue and initialises all state
+ * variables
+ *
+ * @param la Pointer to the relevant libalias instance
+ */
+void AliasSctpInit(struct libalias *la)
+{
+ /* Initialise association tables*/
+ int i;
+ la->sctpNatTableSize = sysctl_hashtable_size;
+ SN_LOG(SN_LOG_EVENT,
+ SctpAliasLog("Initialising SCTP NAT Instance (hash_table_size:%d)\n", la->sctpNatTableSize));
+ la->sctpTableLocal = sn_calloc(la->sctpNatTableSize, sizeof(struct sctpNatTableL));
+ la->sctpTableGlobal = sn_calloc(la->sctpNatTableSize, sizeof(struct sctpNatTableG));
+ la->sctpNatTimer.TimerQ = sn_calloc(SN_TIMER_QUEUE_SIZE, sizeof(struct sctpTimerQ));
+ /* Initialise hash table */
+ for (i = 0; i < la->sctpNatTableSize; i++) {
+ LIST_INIT(&la->sctpTableLocal[i]);
+ LIST_INIT(&la->sctpTableGlobal[i]);
+ }
+
+ /* Initialise circular timer Q*/
+ for (i = 0; i < SN_TIMER_QUEUE_SIZE; i++)
+ LIST_INIT(&la->sctpNatTimer.TimerQ[i]);
+#ifdef _KERNEL
+ la->sctpNatTimer.loc_time=time_uptime; /* la->timeStamp is not set yet */
+#else
+ la->sctpNatTimer.loc_time=la->timeStamp;
+#endif
+ la->sctpNatTimer.cur_loc = 0;
+ la->sctpLinkCount = 0;
+}
+
+/**
+ * @brief Cleans-up the SCTP NAT Implementation prior to unloading
+ *
+ * Removes all entries from the timer queue, freeing associations as it goes.
+ * We then free memory allocated to the look-up tables and the time queue
+ *
+ * NOTE: We do not need to traverse the look-up tables as each association
+ * will always have an entry in the timer queue, freeing this memory
+ * once will free all memory allocated to entries in the look-up tables
+ *
+ * @param la Pointer to the relevant libalias instance
+ */
+void AliasSctpTerm(struct libalias *la)
+{
+ struct sctp_nat_assoc *assoc1, *assoc2;
+ int i;
+
+ LIBALIAS_LOCK_ASSERT(la);
+ SN_LOG(SN_LOG_EVENT,
+ SctpAliasLog("Removing SCTP NAT Instance\n"));
+ for (i = 0; i < SN_TIMER_QUEUE_SIZE; i++) {
+ assoc1 = LIST_FIRST(&la->sctpNatTimer.TimerQ[i]);
+ while (assoc1 != NULL) {
+ freeGlobalAddressList(assoc1);
+ assoc2 = LIST_NEXT(assoc1, timer_Q);
+ sn_free(assoc1);
+ assoc1 = assoc2;
+ }
+ }
+
+ sn_free(la->sctpTableLocal);
+ sn_free(la->sctpTableGlobal);
+ sn_free(la->sctpNatTimer.TimerQ);
+}
+
+/**
+ * @brief Handles SCTP packets passed from libalias
+ *
+ * This function needs to actually NAT/drop packets and possibly create and
+ * send AbortM or ErrorM packets in response. The process involves:
+ * - Validating the direction parameter passed by the caller
+ * - Checking and handling any expired timers for the NAT
+ * - Calling sctp_PktParser() to parse the packet
+ * - Call ProcessSctpMsg() to decide the appropriate outcome and to update
+ * the NAT tables
+ * - Based on the return code either:
+ * - NAT the packet
+ * - Construct and send an ErrorM|AbortM packet
+ * - Mark the association for removal from the tables
+ * - Potentially remove the association from all lookup tables
+ * - Return the appropriate result to libalias
+ *
+ * @param la Pointer to the relevant libalias instance
+ * @param pip Pointer to IP packet to process
+ * @param direction SN_TO_LOCAL | SN_TO_GLOBAL
+ *
+ * @return PKT_ALIAS_OK | PKT_ALIAS_IGNORE | PKT_ALIAS_ERROR
+ */
+int
+SctpAlias(struct libalias *la, struct ip *pip, int direction)
+{
+ int rtnval;
+ struct sctp_nat_msg msg;
+ struct sctp_nat_assoc *assoc = NULL;
+
+ if ((direction != SN_TO_LOCAL) && (direction != SN_TO_GLOBAL)) {
+ SctpAliasLog("ERROR: Invalid direction\n");
+ return(PKT_ALIAS_ERROR);
+ }
+
+ sctp_CheckTimers(la); /* Check timers */
+
+ /* Parse the packet */
+ rtnval = sctp_PktParser(la, direction, pip, &msg, &assoc); //using *char (change to mbuf when get code from paolo)
+ switch (rtnval) {
+ case SN_PARSE_OK:
+ break;
+ case SN_PARSE_ERROR_CHHL:
+ /* Not an error if there is a chunk length parsing error and this is a fragmented packet */
+ if (ntohs(pip->ip_off) & IP_MF) {
+ rtnval = SN_PARSE_OK;
+ break;
+ }
+ SN_LOG(SN_LOG_EVENT,
+ logsctperror("SN_PARSE_ERROR", msg.sctp_hdr->v_tag, rtnval, direction));
+ return(PKT_ALIAS_ERROR);
+ case SN_PARSE_ERROR_PARTIALLOOKUP:
+ if (sysctl_error_on_ootb > SN_LOCALandPARTIAL_ERROR_ON_OOTB) {
+ SN_LOG(SN_LOG_EVENT,
+ logsctperror("SN_PARSE_ERROR", msg.sctp_hdr->v_tag, rtnval, direction));
+ return(PKT_ALIAS_ERROR);
+ }
+ case SN_PARSE_ERROR_LOOKUP:
+ if (sysctl_error_on_ootb == SN_ERROR_ON_OOTB ||
+ (sysctl_error_on_ootb == SN_LOCALandPARTIAL_ERROR_ON_OOTB && direction == SN_TO_LOCAL) ||
+ (sysctl_error_on_ootb == SN_LOCAL_ERROR_ON_OOTB && direction == SN_TO_GLOBAL)) {
+ TxAbortErrorM(la, &msg, assoc, SN_REFLECT_ERROR, direction); /*NB assoc=NULL */
+ return(PKT_ALIAS_RESPOND);
+ }
+ default:
+ SN_LOG(SN_LOG_EVENT,
+ logsctperror("SN_PARSE_ERROR", msg.sctp_hdr->v_tag, rtnval, direction));
+ return(PKT_ALIAS_ERROR);
+ }
+
+ SN_LOG(SN_LOG_DETAIL,
+ logsctpassoc(assoc, "*");
+ logsctpparse(direction, &msg);
+ );
+
+ /* Process the SCTP message */
+ rtnval = ProcessSctpMsg(la, direction, &msg, assoc);
+
+ SN_LOG(SN_LOG_DEBUG_MAX,
+ logsctpassoc(assoc, "-");
+ logSctpLocal(la);
+ logSctpGlobal(la);
+ );
+ SN_LOG(SN_LOG_DEBUG, logTimerQ(la));
+
+ switch(rtnval){
+ case SN_NAT_PKT:
+ switch(direction) {
+ case SN_TO_LOCAL:
+ DifferentialChecksum(&(msg.ip_hdr->ip_sum),
+ &(assoc->l_addr), &(msg.ip_hdr->ip_dst), 2);
+ msg.ip_hdr->ip_dst = assoc->l_addr; /* change dst address to local address*/
+ break;
+ case SN_TO_GLOBAL:
+ DifferentialChecksum(&(msg.ip_hdr->ip_sum),
+ &(assoc->a_addr), &(msg.ip_hdr->ip_src), 2);
+ msg.ip_hdr->ip_src = assoc->a_addr; /* change src to alias addr*/
+ break;
+ default:
+ rtnval = SN_DROP_PKT; /* shouldn't get here, but if it does drop packet */
+ SN_LOG(SN_LOG_LOW, logsctperror("ERROR: Invalid direction", msg.sctp_hdr->v_tag, rtnval, direction));
+ break;
+ }
+ break;
+ case SN_DROP_PKT:
+ SN_LOG(SN_LOG_DETAIL, logsctperror("SN_DROP_PKT", msg.sctp_hdr->v_tag, rtnval, direction));
+ break;
+ case SN_REPLY_ABORT:
+ case SN_REPLY_ERROR:
+ case SN_SEND_ABORT:
+ TxAbortErrorM(la, &msg, assoc, rtnval, direction);
+ break;
+ default:
+ // big error, remove association and go to idle and write log messages
+ SN_LOG(SN_LOG_LOW, logsctperror("SN_PROCESSING_ERROR", msg.sctp_hdr->v_tag, rtnval, direction));
+ assoc->state=SN_RM;/* Mark for removal*/
+ break;
+ }
+
+ /* Remove association if tagged for removal */
+ if (assoc->state == SN_RM) {
+ if (assoc->TableRegister) {
+ sctp_RmTimeOut(la, assoc);
+ RmSctpAssoc(la, assoc);
+ }
+ LIBALIAS_LOCK_ASSERT(la);
+ freeGlobalAddressList(assoc);
+ sn_free(assoc);
+ }
+ switch(rtnval) {
+ case SN_NAT_PKT:
+ return(PKT_ALIAS_OK);
+ case SN_SEND_ABORT:
+ return(PKT_ALIAS_OK);
+ case SN_REPLY_ABORT:
+ case SN_REPLY_ERROR:
+ case SN_REFLECT_ERROR:
+ return(PKT_ALIAS_RESPOND);
+ case SN_DROP_PKT:
+ default:
+ return(PKT_ALIAS_ERROR);
+ }
+}
+
+/**
+ * @brief Send an AbortM or ErrorM
+ *
+ * We construct the new SCTP packet to send in place of the existing packet we
+ * have been asked to NAT. This function can only be called if the original
+ * packet was successfully parsed as a valid SCTP packet.
+ *
+ * An AbortM (without cause) packet is the smallest SCTP packet available and as
+ * such there is always space in the existing packet buffer to fit the AbortM
+ * packet. An ErrorM packet is 4 bytes longer than the (the error cause is not
+ * optional). An ErrorM is sent in response to an AddIP when the Vtag/address
+ * combination, if added, will produce a conflict in the association look up
+ * tables. It may also be used for an unexpected packet - a packet with no
+ * matching association in the NAT table and we are requesting an AddIP so we
+ * can add it. The smallest valid SCTP packet while the association is in an
+ * up-state is a Heartbeat packet, which is big enough to be transformed to an
+ * ErrorM.
+ *
+ * We create a temporary character array to store the packet as we are constructing
+ * it. We then populate the array with appropriate values based on:
+ * - Packet type (AbortM | ErrorM)
+ * - Initial packet direction (SN_TO_LOCAL | SN_TO_GLOBAL)
+ * - NAT response (Send packet | Reply packet)
+ *
+ * Once complete, we copy the contents of the temporary packet over the original
+ * SCTP packet we were asked to NAT
+ *
+ * @param la Pointer to the relevant libalias instance
+ * @param sm Pointer to sctp message information
+ * @param assoc Pointer to current association details
+ * @param sndrply SN_SEND_ABORT | SN_REPLY_ABORT | SN_REPLY_ERROR
+ * @param direction SN_TO_LOCAL | SN_TO_GLOBAL
+ */
+static uint32_t
+local_sctp_finalize_crc32(uint32_t crc32c)
+{
+ /* This routine is duplicated from SCTP
+ * we need to do that since it MAY be that SCTP
+ * is NOT compiled into the kernel. The CRC32C routines
+ * however are always available in libkern.
+ */
+ uint32_t result;
+#if BYTE_ORDER == BIG_ENDIAN
+ uint8_t byte0, byte1, byte2, byte3;
+
+#endif
+ /* Complement the result */
+ result = ~crc32c;
+#if BYTE_ORDER == BIG_ENDIAN
+ /*
+ * For BIG-ENDIAN.. aka Motorola byte order the result is in
+ * little-endian form. So we must manually swap the bytes. Then we
+ * can call htonl() which does nothing...
+ */
+ byte0 = result & 0x000000ff;
+ byte1 = (result >> 8) & 0x000000ff;
+ byte2 = (result >> 16) & 0x000000ff;
+ byte3 = (result >> 24) & 0x000000ff;
+ crc32c = ((byte0 << 24) | (byte1 << 16) | (byte2 << 8) | byte3);
+#else
+ /*
+ * For INTEL platforms the result comes out in network order. No
+ * htonl is required or the swap above. So we optimize out both the
+ * htonl and the manual swap above.
+ */
+ crc32c = result;
+#endif
+ return (crc32c);
+}
+
+static void
+TxAbortErrorM(struct libalias *la, struct sctp_nat_msg *sm, struct sctp_nat_assoc *assoc, int sndrply, int direction)
+{
+ int sctp_size = sizeof(struct sctphdr) + sizeof(struct sctp_chunkhdr) + sizeof(struct sctp_error_cause);
+ int ip_size = sizeof(struct ip) + sctp_size;
+ int include_error_cause = 1;
+ char tmp_ip[ip_size];
+
+ if (ntohs(sm->ip_hdr->ip_len) < ip_size) { /* short packet, cannot send error cause */
+ include_error_cause = 0;
+ ip_size = ip_size - sizeof(struct sctp_error_cause);
+ sctp_size = sctp_size - sizeof(struct sctp_error_cause);
+ }
+ /* Assign header pointers packet */
+ struct ip* ip = (struct ip *) tmp_ip;
+ struct sctphdr* sctp_hdr = (struct sctphdr *) ((char *) ip + sizeof(*ip));
+ struct sctp_chunkhdr* chunk_hdr = (struct sctp_chunkhdr *) ((char *) sctp_hdr + sizeof(*sctp_hdr));
+ struct sctp_error_cause* error_cause = (struct sctp_error_cause *) ((char *) chunk_hdr + sizeof(*chunk_hdr));
+
+ /* construct ip header */
+ ip->ip_v = sm->ip_hdr->ip_v;
+ ip->ip_hl = 5; /* 5*32 bit words */
+ ip->ip_tos = 0;
+ ip->ip_len = htons(ip_size);
+ ip->ip_id = sm->ip_hdr->ip_id;
+ ip->ip_off = 0;
+ ip->ip_ttl = 255;
+ ip->ip_p = IPPROTO_SCTP;
+ /*
+ The definitions below should be removed when they make it into the SCTP stack
+ */
+#define SCTP_MIDDLEBOX_FLAG 0x02
+#define SCTP_NAT_TABLE_COLLISION 0x00b0
+#define SCTP_MISSING_NAT 0x00b1
+ chunk_hdr->chunk_type = (sndrply & SN_TX_ABORT) ? SCTP_ABORT_ASSOCIATION : SCTP_OPERATION_ERROR;
+ chunk_hdr->chunk_flags = SCTP_MIDDLEBOX_FLAG;
+ if (include_error_cause) {
+ error_cause->code = htons((sndrply & SN_REFLECT_ERROR) ? SCTP_MISSING_NAT : SCTP_NAT_TABLE_COLLISION);
+ error_cause->length = htons(sizeof(struct sctp_error_cause));
+ chunk_hdr->chunk_length = htons(sizeof(*chunk_hdr) + sizeof(struct sctp_error_cause));
+ } else {
+ chunk_hdr->chunk_length = htons(sizeof(*chunk_hdr));
+ }
+
+ /* set specific values */
+ switch(sndrply) {
+ case SN_REFLECT_ERROR:
+ chunk_hdr->chunk_flags |= SCTP_HAD_NO_TCB; /* set Tbit */
+ sctp_hdr->v_tag = sm->sctp_hdr->v_tag;
+ break;
+ case SN_REPLY_ERROR:
+ sctp_hdr->v_tag = (direction == SN_TO_LOCAL) ? assoc->g_vtag : assoc->l_vtag ;
+ break;
+ case SN_SEND_ABORT:
+ sctp_hdr->v_tag = sm->sctp_hdr->v_tag;
+ break;
+ case SN_REPLY_ABORT:
+ sctp_hdr->v_tag = sm->sctpchnk.Init->initiate_tag;
+ break;
+ }
+
+ /* Set send/reply values */
+ if (sndrply == SN_SEND_ABORT) { /*pass through NAT */
+ ip->ip_src = (direction == SN_TO_LOCAL) ? sm->ip_hdr->ip_src : assoc->a_addr;
+ ip->ip_dst = (direction == SN_TO_LOCAL) ? assoc->l_addr : sm->ip_hdr->ip_dst;
+ sctp_hdr->src_port = sm->sctp_hdr->src_port;
+ sctp_hdr->dest_port = sm->sctp_hdr->dest_port;
+ } else { /* reply and reflect */
+ ip->ip_src = sm->ip_hdr->ip_dst;
+ ip->ip_dst = sm->ip_hdr->ip_src;
+ sctp_hdr->src_port = sm->sctp_hdr->dest_port;
+ sctp_hdr->dest_port = sm->sctp_hdr->src_port;
+ }
+
+ /* Calculate IP header checksum */
+ ip->ip_sum = in_cksum_hdr(ip);
+
+ /* calculate SCTP header CRC32 */
+ sctp_hdr->checksum = 0;
+ sctp_hdr->checksum = local_sctp_finalize_crc32(calculate_crc32c(0xffffffff, (unsigned char *) sctp_hdr, sctp_size));
+
+ memcpy(sm->ip_hdr, ip, ip_size);
+
+ SN_LOG(SN_LOG_EVENT,SctpAliasLog("%s %s 0x%x (->%s:%u vtag=0x%x crc=0x%x)\n",
+ ((sndrply == SN_SEND_ABORT) ? "Sending" : "Replying"),
+ ((sndrply & SN_TX_ERROR) ? "ErrorM" : "AbortM"),
+ (include_error_cause ? ntohs(error_cause->code) : 0),
+ inet_ntoa(ip->ip_dst),ntohs(sctp_hdr->dest_port),
+ ntohl(sctp_hdr->v_tag), ntohl(sctp_hdr->checksum)));
+}
+
+/* ----------------------------------------------------------------------
+ * PACKET PARSER CODE
+ * ----------------------------------------------------------------------
+ */
+/** @addtogroup packet_parser
+ *
+ * These functions parse the SCTP packet and fill a sctp_nat_msg structure
+ * with the parsed contents.
+ */
+/** @ingroup packet_parser
+ * @brief Parses SCTP packets for the key SCTP chunk that will be processed
+ *
+ * This module parses SCTP packets for the key SCTP chunk that will be processed
+ * The module completes the sctp_nat_msg structure and either retrieves the
+ * relevant (existing) stored association from the Hash Tables or creates a new
+ * association entity with state SN_ID
+ *
+ * @param la Pointer to the relevant libalias instance
+ * @param direction SN_TO_LOCAL | SN_TO_GLOBAL
+ * @param pip
+ * @param sm Pointer to sctp message information
+ * @param passoc Pointer to the association this SCTP Message belongs to
+ *
+ * @return SN_PARSE_OK | SN_PARSE_ERROR_*
+ */
+static int
+sctp_PktParser(struct libalias *la, int direction, struct ip *pip,
+ struct sctp_nat_msg *sm, struct sctp_nat_assoc **passoc)
+//sctp_PktParser(int direction, struct mbuf *ipak, int ip_hdr_len,struct sctp_nat_msg *sm, struct sctp_nat_assoc *assoc)
+{
+ struct sctphdr *sctp_hdr;
+ struct sctp_chunkhdr *chunk_hdr;
+ struct sctp_paramhdr *param_hdr;
+ struct in_addr ipv4addr;
+ int bytes_left; /* bytes left in ip packet */
+ int chunk_length;
+ int chunk_count;
+ int partial_match = 0;
+ // mbuf *mp;
+ // int mlen;
+
+ // mlen = SCTP_HEADER_LEN(i_pak);
+ // mp = SCTP_HEADER_TO_CHAIN(i_pak); /* does nothing in bsd since header and chain not separate */
+
+ /*
+ * Note, that if the VTag is zero, it must be an INIT
+ * Also, I am only interested in the content of INIT and ADDIP chunks
+ */
+
+ // no mbuf stuff from Paolo yet so ...
+ sm->ip_hdr = pip;
+ /* remove ip header length from the bytes_left */
+ bytes_left = ntohs(pip->ip_len) - (pip->ip_hl << 2);
+
+ /* Check SCTP header length and move to first chunk */
+ if (bytes_left < sizeof(struct sctphdr)) {
+ sm->sctp_hdr = NULL;
+ return(SN_PARSE_ERROR_IPSHL); /* packet not long enough*/
+ }
+
+ sm->sctp_hdr = sctp_hdr = (struct sctphdr *) ip_next(pip);
+ bytes_left -= sizeof(struct sctphdr);
+
+ /* Check for valid ports (zero valued ports would find partially initialised associations */
+ if (sctp_hdr->src_port == 0 || sctp_hdr->dest_port == 0)
+ return(SN_PARSE_ERROR_PORT);
+
+ /* Check length of first chunk */
+ if (bytes_left < SN_MIN_CHUNK_SIZE) /* malformed chunk - could cause endless loop*/
+ return(SN_PARSE_ERROR_CHHL); /* packet not long enough for this chunk */
+
+ /* First chunk */
+ chunk_hdr = SN_SCTP_FIRSTCHUNK(sctp_hdr);
+
+ chunk_length = SCTP_SIZE32(ntohs(chunk_hdr->chunk_length));
+ if ((chunk_length < SN_MIN_CHUNK_SIZE) || (chunk_length > bytes_left)) /* malformed chunk - could cause endless loop*/
+ return(SN_PARSE_ERROR_CHHL);
+
+ if ((chunk_hdr->chunk_flags & SCTP_HAD_NO_TCB) &&
+ ((chunk_hdr->chunk_type == SCTP_ABORT_ASSOCIATION) ||
+ (chunk_hdr->chunk_type == SCTP_SHUTDOWN_COMPLETE))) {
+ /* T-Bit set */
+ if (direction == SN_TO_LOCAL)
+ *passoc = FindSctpGlobalT(la, pip->ip_src, sctp_hdr->v_tag, sctp_hdr->dest_port, sctp_hdr->src_port);
+ else
+ *passoc = FindSctpLocalT(la, pip->ip_dst, sctp_hdr->v_tag, sctp_hdr->dest_port, sctp_hdr->src_port);
+ } else {
+ /* Proper v_tag settings */
+ if (direction == SN_TO_LOCAL)
+ *passoc = FindSctpGlobal(la, pip->ip_src, sctp_hdr->v_tag, sctp_hdr->src_port, sctp_hdr->dest_port, &partial_match);
+ else
+ *passoc = FindSctpLocal(la, pip->ip_src, pip->ip_dst, sctp_hdr->v_tag, sctp_hdr->src_port, sctp_hdr->dest_port);
+ }
+
+ chunk_count = 1;
+ /* Real packet parsing occurs below */
+ sm->msg = SN_SCTP_OTHER;/* Initialise to largest value*/
+ sm->chunk_length = 0; /* only care about length for key chunks */
+ while (IS_SCTP_CONTROL(chunk_hdr)) {
+ switch(chunk_hdr->chunk_type) {
+ case SCTP_INITIATION:
+ if (chunk_length < sizeof(struct sctp_init_chunk)) /* malformed chunk*/
+ return(SN_PARSE_ERROR_CHHL);
+ sm->msg = SN_SCTP_INIT;
+ sm->sctpchnk.Init = (struct sctp_init *) ((char *) chunk_hdr + sizeof(struct sctp_chunkhdr));
+ sm->chunk_length = chunk_length;
+ /* if no existing association, create a new one */
+ if (*passoc == NULL) {
+ if (sctp_hdr->v_tag == 0){ //Init requires vtag=0
+ *passoc = (struct sctp_nat_assoc *) sn_malloc(sizeof(struct sctp_nat_assoc));
+ if (*passoc == NULL) {/* out of resources */
+ return(SN_PARSE_ERROR_AS_MALLOC);
+ }
+ /* Initialise association - malloc initialises memory to zeros */
+ (*passoc)->state = SN_ID;
+ LIST_INIT(&((*passoc)->Gaddr)); /* always initialise to avoid memory problems */
+ (*passoc)->TableRegister = SN_NULL_TBL;
+ return(SN_PARSE_OK);
+ }
+ return(SN_PARSE_ERROR_VTAG);
+ }
+ return(SN_PARSE_ERROR_LOOKUP);
+ case SCTP_INITIATION_ACK:
+ if (chunk_length < sizeof(struct sctp_init_ack_chunk)) /* malformed chunk*/
+ return(SN_PARSE_ERROR_CHHL);
+ sm->msg = SN_SCTP_INITACK;
+ sm->sctpchnk.InitAck = (struct sctp_init_ack *) ((char *) chunk_hdr + sizeof(struct sctp_chunkhdr));
+ sm->chunk_length = chunk_length;
+ return ((*passoc == NULL)?(SN_PARSE_ERROR_LOOKUP):(SN_PARSE_OK));
+ case SCTP_ABORT_ASSOCIATION: /* access only minimum sized chunk */
+ sm->msg = SN_SCTP_ABORT;
+ sm->chunk_length = chunk_length;
+ return ((*passoc == NULL)?(SN_PARSE_ERROR_LOOKUP_ABORT):(SN_PARSE_OK));
+ case SCTP_SHUTDOWN_ACK:
+ if (chunk_length < sizeof(struct sctp_shutdown_ack_chunk)) /* malformed chunk*/
+ return(SN_PARSE_ERROR_CHHL);
+ if (sm->msg > SN_SCTP_SHUTACK) {
+ sm->msg = SN_SCTP_SHUTACK;
+ sm->chunk_length = chunk_length;
+ }
+ break;
+ case SCTP_SHUTDOWN_COMPLETE: /* minimum sized chunk */
+ if (sm->msg > SN_SCTP_SHUTCOMP) {
+ sm->msg = SN_SCTP_SHUTCOMP;
+ sm->chunk_length = chunk_length;
+ }
+ return ((*passoc == NULL)?(SN_PARSE_ERROR_LOOKUP):(SN_PARSE_OK));
+ case SCTP_ASCONF:
+ if (sm->msg > SN_SCTP_ASCONF) {
+ if (chunk_length < (sizeof(struct sctp_asconf_chunk) + sizeof(struct sctp_ipv4addr_param))) /* malformed chunk*/
+ return(SN_PARSE_ERROR_CHHL);
+ //leave parameter searching to later, if required
+ param_hdr = (struct sctp_paramhdr *) ((char *) chunk_hdr + sizeof(struct sctp_asconf_chunk)); /*compulsory IP parameter*/
+ if (ntohs(param_hdr->param_type) == SCTP_IPV4_ADDRESS) {
+ if ((*passoc == NULL) && (direction == SN_TO_LOCAL)) { /* AddIP with no association */
+ /* try look up with the ASCONF packet's alternative address */
+ ipv4addr.s_addr = ((struct sctp_ipv4addr_param *) param_hdr)->addr;
+ *passoc = FindSctpGlobal(la, ipv4addr, sctp_hdr->v_tag, sctp_hdr->src_port, sctp_hdr->dest_port, &partial_match);
+ }
+ param_hdr = (struct sctp_paramhdr *)
+ ((char *) param_hdr + sizeof(struct sctp_ipv4addr_param)); /*asconf's compulsory address parameter */
+ sm->chunk_length = chunk_length - sizeof(struct sctp_asconf_chunk) - sizeof(struct sctp_ipv4addr_param); /* rest of chunk */
+ } else {
+ if (chunk_length < (sizeof(struct sctp_asconf_chunk) + sizeof(struct sctp_ipv6addr_param))) /* malformed chunk*/
+ return(SN_PARSE_ERROR_CHHL);
+ param_hdr = (struct sctp_paramhdr *)
+ ((char *) param_hdr + sizeof(struct sctp_ipv6addr_param)); /*asconf's compulsory address parameter */
+ sm->chunk_length = chunk_length - sizeof(struct sctp_asconf_chunk) - sizeof(struct sctp_ipv6addr_param); /* rest of chunk */
+ }
+ sm->msg = SN_SCTP_ASCONF;
+ sm->sctpchnk.Asconf = param_hdr;
+
+ if (*passoc == NULL) { /* AddIP with no association */
+ *passoc = (struct sctp_nat_assoc *) sn_malloc(sizeof(struct sctp_nat_assoc));
+ if (*passoc == NULL) {/* out of resources */
+ return(SN_PARSE_ERROR_AS_MALLOC);
+ }
+ /* Initialise association - malloc initialises memory to zeros */
+ (*passoc)->state = SN_ID;
+ LIST_INIT(&((*passoc)->Gaddr)); /* always initialise to avoid memory problems */
+ (*passoc)->TableRegister = SN_NULL_TBL;
+ return(SN_PARSE_OK);
+ }
+ }
+ break;
+ case SCTP_ASCONF_ACK:
+ if (sm->msg > SN_SCTP_ASCONFACK) {
+ if (chunk_length < sizeof(struct sctp_asconf_ack_chunk)) /* malformed chunk*/
+ return(SN_PARSE_ERROR_CHHL);
+ //leave parameter searching to later, if required
+ param_hdr = (struct sctp_paramhdr *) ((char *) chunk_hdr
+ + sizeof(struct sctp_asconf_ack_chunk));
+ sm->msg = SN_SCTP_ASCONFACK;
+ sm->sctpchnk.Asconf = param_hdr;
+ sm->chunk_length = chunk_length - sizeof(struct sctp_asconf_ack_chunk);
+ }
+ break;
+ default:
+ break; /* do nothing*/
+ }
+
+ /* if no association is found exit - we need to find an Init or AddIP within sysctl_initialising_chunk_proc_limit */
+ if ((*passoc == NULL) && (chunk_count >= sysctl_initialising_chunk_proc_limit))
+ return(SN_PARSE_ERROR_LOOKUP);
+
+ /* finished with this chunk, on to the next chunk*/
+ bytes_left-= chunk_length;
+
+ /* Is this the end of the packet ? */
+ if (bytes_left == 0)
+ return (*passoc == NULL)?(SN_PARSE_ERROR_LOOKUP):(SN_PARSE_OK);
+
+ /* Are there enough bytes in packet to at least retrieve length of next chunk ? */
+ if (bytes_left < SN_MIN_CHUNK_SIZE)
+ return(SN_PARSE_ERROR_CHHL);
+
+ chunk_hdr = SN_SCTP_NEXTCHUNK(chunk_hdr);
+
+ /* Is the chunk long enough to not cause endless look and are there enough bytes in packet to read the chunk ? */
+ chunk_length = SCTP_SIZE32(ntohs(chunk_hdr->chunk_length));
+ if ((chunk_length < SN_MIN_CHUNK_SIZE) || (chunk_length > bytes_left))
+ return(SN_PARSE_ERROR_CHHL);
+ if(++chunk_count > sysctl_chunk_proc_limit)
+ return(SN_PARSE_OK); /* limit for processing chunks, take what we get */
+ }
+
+ if (*passoc == NULL)
+ return (partial_match)?(SN_PARSE_ERROR_PARTIALLOOKUP):(SN_PARSE_ERROR_LOOKUP);
+ else
+ return(SN_PARSE_OK);
+}
+
+/** @ingroup packet_parser
+ * @brief Extract Vtags from Asconf Chunk
+ *
+ * GetAsconfVtags scans an Asconf Chunk for the vtags parameter, and then
+ * extracts the vtags.
+ *
+ * GetAsconfVtags is not called from within sctp_PktParser. It is called only
+ * from within ID_process when an AddIP has been received.
+ *
+ * @param la Pointer to the relevant libalias instance
+ * @param sm Pointer to sctp message information
+ * @param l_vtag Pointer to the local vtag in the association this SCTP Message belongs to
+ * @param g_vtag Pointer to the local vtag in the association this SCTP Message belongs to
+ * @param direction SN_TO_LOCAL | SN_TO_GLOBAL
+ *
+ * @return 1 - success | 0 - fail
+ */
+static int
+GetAsconfVtags(struct libalias *la, struct sctp_nat_msg *sm, uint32_t *l_vtag, uint32_t *g_vtag, int direction)
+{
+ /* To be removed when information is in the sctp headers */
+#define SCTP_VTAG_PARAM 0xC007
+ struct sctp_vtag_param {
+ struct sctp_paramhdr ph;/* type=SCTP_VTAG_PARAM */
+ uint32_t local_vtag;
+ uint32_t remote_vtag;
+ } __attribute__((packed));
+
+ struct sctp_vtag_param *vtag_param;
+ struct sctp_paramhdr *param;
+ int bytes_left;
+ int param_size;
+ int param_count;
+
+ param_count = 1;
+ param = sm->sctpchnk.Asconf;
+ param_size = SCTP_SIZE32(ntohs(param->param_length));
+ bytes_left = sm->chunk_length;
+ /* step through Asconf parameters */
+ while((bytes_left >= param_size) && (bytes_left >= SN_VTAG_PARAM_SIZE)) {
+ if (ntohs(param->param_type) == SCTP_VTAG_PARAM) {
+ vtag_param = (struct sctp_vtag_param *) param;
+ switch(direction) {
+ /* The Internet draft is a little ambigious as to order of these vtags.
+ We think it is this way around. If we are wrong, the order will need
+ to be changed. */
+ case SN_TO_GLOBAL:
+ *g_vtag = vtag_param->local_vtag;
+ *l_vtag = vtag_param->remote_vtag;
+ break;
+ case SN_TO_LOCAL:
+ *g_vtag = vtag_param->remote_vtag;
+ *l_vtag = vtag_param->local_vtag;
+ break;
+ }
+ return(1); /* found */
+ }
+
+ bytes_left -= param_size;
+ if (bytes_left < SN_MIN_PARAM_SIZE) return(0);
+
+ param = SN_SCTP_NEXTPARAM(param);
+ param_size = SCTP_SIZE32(ntohs(param->param_length));
+ if (++param_count > sysctl_param_proc_limit) {
+ SN_LOG(SN_LOG_EVENT,
+ logsctperror("Parameter parse limit exceeded (GetAsconfVtags)",
+ sm->sctp_hdr->v_tag, sysctl_param_proc_limit, direction));
+ return(0); /* not found limit exceeded*/
+ }
+ }
+ return(0); /* not found */
+}
+
+/** @ingroup packet_parser
+ * @brief AddGlobalIPAddresses from Init,InitAck,or AddIP packets
+ *
+ * AddGlobalIPAddresses scans an SCTP chunk (in sm) for Global IP addresses, and
+ * adds them.
+ *
+ * @param sm Pointer to sctp message information
+ * @param assoc Pointer to the association this SCTP Message belongs to
+ * @param direction SN_TO_LOCAL | SN_TO_GLOBAL
+ *
+ */
+static void
+AddGlobalIPAddresses(struct sctp_nat_msg *sm, struct sctp_nat_assoc *assoc, int direction)
+{
+ struct sctp_ipv4addr_param *ipv4_param;
+ struct sctp_paramhdr *param = NULL;
+ struct sctp_GlobalAddress *G_Addr;
+ struct in_addr g_addr = {0};
+ int bytes_left = 0;
+ int param_size;
+ int param_count, addr_param_count = 0;
+
+ switch(direction) {
+ case SN_TO_GLOBAL: /* does not contain global addresses */
+ g_addr = sm->ip_hdr->ip_dst;
+ bytes_left = 0; /* force exit */
+ break;
+ case SN_TO_LOCAL:
+ g_addr = sm->ip_hdr->ip_src;
+ param_count = 1;
+ switch(sm->msg) {
+ case SN_SCTP_INIT:
+ bytes_left = sm->chunk_length - sizeof(struct sctp_init_chunk);
+ param = (struct sctp_paramhdr *)((char *)sm->sctpchnk.Init + sizeof(struct sctp_init));
+ break;
+ case SN_SCTP_INITACK:
+ bytes_left = sm->chunk_length - sizeof(struct sctp_init_ack_chunk);
+ param = (struct sctp_paramhdr *)((char *)sm->sctpchnk.InitAck + sizeof(struct sctp_init_ack));
+ break;
+ case SN_SCTP_ASCONF:
+ bytes_left = sm->chunk_length;
+ param = sm->sctpchnk.Asconf;
+ break;
+ }
+ }
+ if (bytes_left >= SN_MIN_PARAM_SIZE)
+ param_size = SCTP_SIZE32(ntohs(param->param_length));
+ else
+ param_size = bytes_left+1; /* force skip loop */
+
+ if ((assoc->state == SN_ID) && ((sm->msg == SN_SCTP_INIT) || (bytes_left < SN_MIN_PARAM_SIZE))) {/* add pkt address */
+ G_Addr = (struct sctp_GlobalAddress *) sn_malloc(sizeof(struct sctp_GlobalAddress));
+ if (G_Addr == NULL) {/* out of resources */
+ SN_LOG(SN_LOG_EVENT,
+ logsctperror("AddGlobalIPAddress: No resources for adding global address - revert to no tracking",
+ sm->sctp_hdr->v_tag, 0, direction));
+ assoc->num_Gaddr = 0; /* don't track any more for this assoc*/
+ sysctl_track_global_addresses=0;
+ return;
+ }
+ G_Addr->g_addr = g_addr;
+ if (!Add_Global_Address_to_List(assoc, G_Addr))
+ SN_LOG(SN_LOG_EVENT,
+ logsctperror("AddGlobalIPAddress: Address already in list",
+ sm->sctp_hdr->v_tag, assoc->num_Gaddr, direction));
+ }
+
+ /* step through parameters */
+ while((bytes_left >= param_size) && (bytes_left >= sizeof(struct sctp_ipv4addr_param))) {
+ if (assoc->num_Gaddr >= sysctl_track_global_addresses) {
+ SN_LOG(SN_LOG_EVENT,
+ logsctperror("AddGlobalIPAddress: Maximum Number of addresses reached",
+ sm->sctp_hdr->v_tag, sysctl_track_global_addresses, direction));
+ return;
+ }
+ switch(ntohs(param->param_type)) {
+ case SCTP_ADD_IP_ADDRESS:
+ /* skip to address parameter - leave param_size so bytes left will be calculated properly*/
+ param = (struct sctp_paramhdr *) &((struct sctp_asconf_addrv4_param *) param)->addrp;
+ case SCTP_IPV4_ADDRESS:
+ ipv4_param = (struct sctp_ipv4addr_param *) param;
+ /* add addresses to association */
+ G_Addr = (struct sctp_GlobalAddress *) sn_malloc(sizeof(struct sctp_GlobalAddress));
+ if (G_Addr == NULL) {/* out of resources */
+ SN_LOG(SN_LOG_EVENT,
+ logsctperror("AddGlobalIPAddress: No resources for adding global address - revert to no tracking",
+ sm->sctp_hdr->v_tag, 0, direction));
+ assoc->num_Gaddr = 0; /* don't track any more for this assoc*/
+ sysctl_track_global_addresses=0;
+ return;
+ }
+ /* add address */
+ addr_param_count++;
+ if ((sm->msg == SN_SCTP_ASCONF) && (ipv4_param->addr == INADDR_ANY)) { /* use packet address */
+ G_Addr->g_addr = g_addr;
+ if (!Add_Global_Address_to_List(assoc, G_Addr))
+ SN_LOG(SN_LOG_EVENT,
+ logsctperror("AddGlobalIPAddress: Address already in list",
+ sm->sctp_hdr->v_tag, assoc->num_Gaddr, direction));
+ return; /*shouldn't be any other addresses if the zero address is given*/
+ } else {
+ G_Addr->g_addr.s_addr = ipv4_param->addr;
+ if (!Add_Global_Address_to_List(assoc, G_Addr))
+ SN_LOG(SN_LOG_EVENT,
+ logsctperror("AddGlobalIPAddress: Address already in list",
+ sm->sctp_hdr->v_tag, assoc->num_Gaddr, direction));
+ }
+ }
+
+ bytes_left -= param_size;
+ if (bytes_left < SN_MIN_PARAM_SIZE)
+ break;
+
+ param = SN_SCTP_NEXTPARAM(param);
+ param_size = SCTP_SIZE32(ntohs(param->param_length));
+ if (++param_count > sysctl_param_proc_limit) {
+ SN_LOG(SN_LOG_EVENT,
+ logsctperror("Parameter parse limit exceeded (AddGlobalIPAddress)",
+ sm->sctp_hdr->v_tag, sysctl_param_proc_limit, direction));
+ break; /* limit exceeded*/
+ }
+ }
+ if (addr_param_count == 0) {
+ SN_LOG(SN_LOG_DETAIL,
+ logsctperror("AddGlobalIPAddress: no address parameters to add",
+ sm->sctp_hdr->v_tag, assoc->num_Gaddr, direction));
+ }
+}
+
+/**
+ * @brief Add_Global_Address_to_List
+ *
+ * Adds a global IP address to an associations address list, if it is not
+ * already there. The first address added us usually the packet's address, and
+ * is most likely to be used, so it is added at the beginning. Subsequent
+ * addresses are added after this one.
+ *
+ * @param assoc Pointer to the association this SCTP Message belongs to
+ * @param G_addr Pointer to the global address to add
+ *
+ * @return 1 - success | 0 - fail
+ */
+static int Add_Global_Address_to_List(struct sctp_nat_assoc *assoc, struct sctp_GlobalAddress *G_addr)
+{
+ struct sctp_GlobalAddress *iter_G_Addr = NULL, *first_G_Addr = NULL;
+ first_G_Addr = LIST_FIRST(&(assoc->Gaddr));
+ if (first_G_Addr == NULL) {
+ LIST_INSERT_HEAD(&(assoc->Gaddr), G_addr, list_Gaddr); /* add new address to beginning of list*/
+ } else {
+ LIST_FOREACH(iter_G_Addr, &(assoc->Gaddr), list_Gaddr) {
+ if (G_addr->g_addr.s_addr == iter_G_Addr->g_addr.s_addr)
+ return(0); /* already exists, so don't add */
+ }
+ LIST_INSERT_AFTER(first_G_Addr, G_addr, list_Gaddr); /* add address to end of list*/
+ }
+ assoc->num_Gaddr++;
+ return(1); /* success */
+}
+
+/** @ingroup packet_parser
+ * @brief RmGlobalIPAddresses from DelIP packets
+ *
+ * RmGlobalIPAddresses scans an ASCONF chunk for DelIP parameters to remove the
+ * given Global IP addresses from the association. It will not delete the
+ * the address if it is a list of one address.
+ *
+ *
+ * @param sm Pointer to sctp message information
+ * @param assoc Pointer to the association this SCTP Message belongs to
+ * @param direction SN_TO_LOCAL | SN_TO_GLOBAL
+ *
+ */
+static void
+RmGlobalIPAddresses(struct sctp_nat_msg *sm, struct sctp_nat_assoc *assoc, int direction)
+{
+ struct sctp_asconf_addrv4_param *asconf_ipv4_param;
+ struct sctp_paramhdr *param;
+ struct sctp_GlobalAddress *G_Addr, *G_Addr_tmp;
+ struct in_addr g_addr;
+ int bytes_left;
+ int param_size;
+ int param_count;
+
+ if(direction == SN_TO_GLOBAL)
+ g_addr = sm->ip_hdr->ip_dst;
+ else
+ g_addr = sm->ip_hdr->ip_src;
+
+ bytes_left = sm->chunk_length;
+ param_count = 1;
+ param = sm->sctpchnk.Asconf;
+ if (bytes_left >= SN_MIN_PARAM_SIZE) {
+ param_size = SCTP_SIZE32(ntohs(param->param_length));
+ } else {
+ SN_LOG(SN_LOG_EVENT,
+ logsctperror("RmGlobalIPAddress: truncated packet - cannot remove IP addresses",
+ sm->sctp_hdr->v_tag, sysctl_track_global_addresses, direction));
+ return;
+ }
+
+ /* step through Asconf parameters */
+ while((bytes_left >= param_size) && (bytes_left >= sizeof(struct sctp_ipv4addr_param))) {
+ if (ntohs(param->param_type) == SCTP_DEL_IP_ADDRESS) {
+ asconf_ipv4_param = (struct sctp_asconf_addrv4_param *) param;
+ if (asconf_ipv4_param->addrp.addr == INADDR_ANY) { /* remove all bar pkt address */
+ LIST_FOREACH_SAFE(G_Addr, &(assoc->Gaddr), list_Gaddr, G_Addr_tmp) {
+ if(G_Addr->g_addr.s_addr != sm->ip_hdr->ip_src.s_addr) {
+ if (assoc->num_Gaddr > 1) { /* only delete if more than one */
+ LIST_REMOVE(G_Addr, list_Gaddr);
+ sn_free(G_Addr);
+ assoc->num_Gaddr--;
+ } else {
+ SN_LOG(SN_LOG_EVENT,
+ logsctperror("RmGlobalIPAddress: Request to remove last IP address (didn't)",
+ sm->sctp_hdr->v_tag, assoc->num_Gaddr, direction));
+ }
+ }
+ }
+ return; /*shouldn't be any other addresses if the zero address is given*/
+ } else {
+ LIST_FOREACH_SAFE(G_Addr, &(assoc->Gaddr), list_Gaddr, G_Addr_tmp) {
+ if(G_Addr->g_addr.s_addr == asconf_ipv4_param->addrp.addr) {
+ if (assoc->num_Gaddr > 1) { /* only delete if more than one */
+ LIST_REMOVE(G_Addr, list_Gaddr);
+ sn_free(G_Addr);
+ assoc->num_Gaddr--;
+ break; /* Since add only adds new addresses, there should be no double entries */
+ } else {
+ SN_LOG(SN_LOG_EVENT,
+ logsctperror("RmGlobalIPAddress: Request to remove last IP address (didn't)",
+ sm->sctp_hdr->v_tag, assoc->num_Gaddr, direction));
+ }
+ }
+ }
+ }
+ }
+ bytes_left -= param_size;
+ if (bytes_left == 0) return;
+ else if (bytes_left < SN_MIN_PARAM_SIZE) {
+ SN_LOG(SN_LOG_EVENT,
+ logsctperror("RmGlobalIPAddress: truncated packet - may not have removed all IP addresses",
+ sm->sctp_hdr->v_tag, sysctl_track_global_addresses, direction));
+ return;
+ }
+
+ param = SN_SCTP_NEXTPARAM(param);
+ param_size = SCTP_SIZE32(ntohs(param->param_length));
+ if (++param_count > sysctl_param_proc_limit) {
+ SN_LOG(SN_LOG_EVENT,
+ logsctperror("Parameter parse limit exceeded (RmGlobalIPAddress)",
+ sm->sctp_hdr->v_tag, sysctl_param_proc_limit, direction));
+ return; /* limit exceeded*/
+ }
+ }
+}
+
+/** @ingroup packet_parser
+ * @brief Check that ASCONF was successful
+ *
+ * Each ASCONF configuration parameter carries a correlation ID which should be
+ * matched with an ASCONFack. This is difficult for a NAT, since every
+ * association could potentially have a number of outstanding ASCONF
+ * configuration parameters, which should only be activated on receipt of the
+ * ACK.
+ *
+ * Currently we only look for an ACK when the NAT is setting up a new
+ * association (ie AddIP for a connection that the NAT does not know about
+ * because the original Init went through a public interface or another NAT)
+ * Since there is currently no connection on this path, there should be no other
+ * ASCONF configuration parameters outstanding, so we presume that if there is
+ * an ACK that it is responding to the AddIP and activate the new association.
+ *
+ * @param la Pointer to the relevant libalias instance
+ * @param sm Pointer to sctp message information
+ * @param direction SN_TO_LOCAL | SN_TO_GLOBAL
+ *
+ * @return 1 - success | 0 - fail
+ */
+static int
+IsASCONFack(struct libalias *la, struct sctp_nat_msg *sm, int direction)
+{
+ struct sctp_paramhdr *param;
+ int bytes_left;
+ int param_size;
+ int param_count;
+
+ param_count = 1;
+ param = sm->sctpchnk.Asconf;
+ param_size = SCTP_SIZE32(ntohs(param->param_length));
+ if (param_size == 8)
+ return(1); /*success - default acknowledgement of everything */
+
+ bytes_left = sm->chunk_length;
+ if (bytes_left < param_size)
+ return(0); /* not found */
+ /* step through Asconf parameters */
+ while(bytes_left >= SN_ASCONFACK_PARAM_SIZE) {
+ if (ntohs(param->param_type) == SCTP_SUCCESS_REPORT)
+ return(1); /* success - but can't match correlation IDs - should only be one */
+ /* check others just in case */
+ bytes_left -= param_size;
+ if (bytes_left >= SN_MIN_PARAM_SIZE) {
+ param = SN_SCTP_NEXTPARAM(param);
+ } else {
+ return(0);
+ }
+ param_size = SCTP_SIZE32(ntohs(param->param_length));
+ if (bytes_left < param_size) return(0);
+
+ if (++param_count > sysctl_param_proc_limit) {
+ SN_LOG(SN_LOG_EVENT,
+ logsctperror("Parameter parse limit exceeded (IsASCONFack)",
+ sm->sctp_hdr->v_tag, sysctl_param_proc_limit, direction));
+ return(0); /* not found limit exceeded*/
+ }
+ }
+ return(0); /* not success */
+}
+
+/** @ingroup packet_parser
+ * @brief Check to see if ASCONF contains an Add IP or Del IP parameter
+ *
+ * IsADDorDEL scans an ASCONF packet to see if it contains an AddIP or DelIP
+ * parameter
+ *
+ * @param la Pointer to the relevant libalias instance
+ * @param sm Pointer to sctp message information
+ * @param direction SN_TO_LOCAL | SN_TO_GLOBAL
+ *
+ * @return SCTP_ADD_IP_ADDRESS | SCTP_DEL_IP_ADDRESS | 0 - fail
+ */
+static int
+IsADDorDEL(struct libalias *la, struct sctp_nat_msg *sm, int direction)
+{
+ struct sctp_paramhdr *param;
+ int bytes_left;
+ int param_size;
+ int param_count;
+
+ param_count = 1;
+ param = sm->sctpchnk.Asconf;
+ param_size = SCTP_SIZE32(ntohs(param->param_length));
+
+ bytes_left = sm->chunk_length;
+ if (bytes_left < param_size)
+ return(0); /* not found */
+ /* step through Asconf parameters */
+ while(bytes_left >= SN_ASCONFACK_PARAM_SIZE) {
+ if (ntohs(param->param_type) == SCTP_ADD_IP_ADDRESS)
+ return(SCTP_ADD_IP_ADDRESS);
+ else if (ntohs(param->param_type) == SCTP_DEL_IP_ADDRESS)
+ return(SCTP_DEL_IP_ADDRESS);
+ /* check others just in case */
+ bytes_left -= param_size;
+ if (bytes_left >= SN_MIN_PARAM_SIZE) {
+ param = SN_SCTP_NEXTPARAM(param);
+ } else {
+ return(0); /*Neither found */
+ }
+ param_size = SCTP_SIZE32(ntohs(param->param_length));
+ if (bytes_left < param_size) return(0);
+
+ if (++param_count > sysctl_param_proc_limit) {
+ SN_LOG(SN_LOG_EVENT,
+ logsctperror("Parameter parse limit exceeded IsADDorDEL)",
+ sm->sctp_hdr->v_tag, sysctl_param_proc_limit, direction));
+ return(0); /* not found limit exceeded*/
+ }
+ }
+ return(0); /*Neither found */
+}
+
+/* ----------------------------------------------------------------------
+ * STATE MACHINE CODE
+ * ----------------------------------------------------------------------
+ */
+/** @addtogroup state_machine
+ *
+ * The SCTP NAT State Machine functions will:
+ * - Process an already parsed packet
+ * - Use the existing NAT Hash Tables
+ * - Determine the next state for the association
+ * - Update the NAT Hash Tables and Timer Queues
+ * - Return the appropriate action to take with the packet
+ */
+/** @ingroup state_machine
+ * @brief Process SCTP message
+ *
+ * This function is the base state machine. It calls the processing engine for
+ * each state.
+ *
+ * @param la Pointer to the relevant libalias instance
+ * @param direction SN_TO_LOCAL | SN_TO_GLOBAL
+ * @param sm Pointer to sctp message information
+ * @param assoc Pointer to the association this SCTP Message belongs to
+ *
+ * @return SN_DROP_PKT | SN_NAT_PKT | SN_REPLY_ABORT | SN_REPLY_ERROR | SN_PROCESSING_ERROR
+ */
+static int
+ProcessSctpMsg(struct libalias *la, int direction, struct sctp_nat_msg *sm, struct sctp_nat_assoc *assoc)
+{
+ int rtnval;
+
+ switch (assoc->state) {
+ case SN_ID: /* Idle */
+ rtnval = ID_process(la, direction, assoc, sm);
+ if (rtnval != SN_NAT_PKT) {
+ assoc->state = SN_RM;/* Mark for removal*/
+ }
+ return(rtnval);
+ case SN_INi: /* Initialising - Init */
+ return(INi_process(la, direction, assoc, sm));
+ case SN_INa: /* Initialising - AddIP */
+ return(INa_process(la, direction, assoc, sm));
+ case SN_UP: /* Association UP */
+ return(UP_process(la, direction, assoc, sm));
+ case SN_CL: /* Association Closing */
+ return(CL_process(la, direction, assoc, sm));
+ }
+ return(SN_PROCESSING_ERROR);
+}
+
+/** @ingroup state_machine
+ * @brief Process SCTP message while in the Idle state
+ *
+ * This function looks for an Incoming INIT or AddIP message.
+ *
+ * All other SCTP messages are invalid when in SN_ID, and are dropped.
+ *
+ * @param la Pointer to the relevant libalias instance
+ * @param direction SN_TO_LOCAL | SN_TO_GLOBAL
+ * @param sm Pointer to sctp message information
+ * @param assoc Pointer to the association this SCTP Message belongs to
+ *
+ * @return SN_NAT_PKT | SN_DROP_PKT | SN_REPLY_ABORT | SN_REPLY_ERROR
+ */
+static int
+ID_process(struct libalias *la, int direction, struct sctp_nat_assoc *assoc, struct sctp_nat_msg *sm)
+{
+ switch(sm->msg) {
+ case SN_SCTP_ASCONF: /* a packet containing an ASCONF chunk with ADDIP */
+ if (!sysctl_accept_global_ootb_addip && (direction == SN_TO_LOCAL))
+ return(SN_DROP_PKT);
+ /* if this Asconf packet does not contain the Vtag parameters it is of no use in Idle state */
+ if (!GetAsconfVtags(la, sm, &(assoc->l_vtag), &(assoc->g_vtag), direction))
+ return(SN_DROP_PKT);
+ case SN_SCTP_INIT: /* a packet containing an INIT chunk or an ASCONF AddIP */
+ if (sysctl_track_global_addresses)
+ AddGlobalIPAddresses(sm, assoc, direction);
+ switch(direction){
+ case SN_TO_GLOBAL:
+ assoc->l_addr = sm->ip_hdr->ip_src;
+ assoc->a_addr = FindAliasAddress(la, assoc->l_addr);
+ assoc->l_port = sm->sctp_hdr->src_port;
+ assoc->g_port = sm->sctp_hdr->dest_port;
+ if(sm->msg == SN_SCTP_INIT)
+ assoc->g_vtag = sm->sctpchnk.Init->initiate_tag;
+ if (AddSctpAssocGlobal(la, assoc)) /* DB clash *///**** need to add dst address
+ return((sm->msg == SN_SCTP_INIT) ? SN_REPLY_ABORT : SN_REPLY_ERROR);
+ if(sm->msg == SN_SCTP_ASCONF) {
+ if (AddSctpAssocLocal(la, assoc, sm->ip_hdr->ip_dst)) /* DB clash */
+ return(SN_REPLY_ERROR);
+ assoc->TableRegister |= SN_WAIT_TOLOCAL; /* wait for tolocal ack */
+ }
+ break;
+ case SN_TO_LOCAL:
+ assoc->l_addr = FindSctpRedirectAddress(la, sm);
+ assoc->a_addr = sm->ip_hdr->ip_dst;
+ assoc->l_port = sm->sctp_hdr->dest_port;
+ assoc->g_port = sm->sctp_hdr->src_port;
+ if(sm->msg == SN_SCTP_INIT)
+ assoc->l_vtag = sm->sctpchnk.Init->initiate_tag;
+ if (AddSctpAssocLocal(la, assoc, sm->ip_hdr->ip_src)) /* DB clash */
+ return((sm->msg == SN_SCTP_INIT) ? SN_REPLY_ABORT : SN_REPLY_ERROR);
+ if(sm->msg == SN_SCTP_ASCONF) {
+ if (AddSctpAssocGlobal(la, assoc)) /* DB clash */ //**** need to add src address
+ return(SN_REPLY_ERROR);
+ assoc->TableRegister |= SN_WAIT_TOGLOBAL; /* wait for toglobal ack */
+ }
+ break;
+ }
+ assoc->state = (sm->msg == SN_SCTP_INIT) ? SN_INi : SN_INa;
+ assoc->exp = SN_I_T(la);
+ sctp_AddTimeOut(la,assoc);
+ return(SN_NAT_PKT);
+ default: /* Any other type of SCTP message is not valid in Idle */
+ return(SN_DROP_PKT);
+ }
+return(SN_DROP_PKT);/* shouldn't get here very bad: log, drop and hope for the best */
+}
+
+/** @ingroup state_machine
+ * @brief Process SCTP message while waiting for an INIT-ACK message
+ *
+ * Only an INIT-ACK, resent INIT, or an ABORT SCTP packet are valid in this
+ * state, all other packets are dropped.
+ *
+ * @param la Pointer to the relevant libalias instance
+ * @param direction SN_TO_LOCAL | SN_TO_GLOBAL
+ * @param sm Pointer to sctp message information
+ * @param assoc Pointer to the association this SCTP Message belongs to
+ *
+ * @return SN_NAT_PKT | SN_DROP_PKT | SN_REPLY_ABORT
+ */
+static int
+INi_process(struct libalias *la, int direction, struct sctp_nat_assoc *assoc, struct sctp_nat_msg *sm)
+{
+ switch(sm->msg) {
+ case SN_SCTP_INIT: /* a packet containing a retransmitted INIT chunk */
+ sctp_ResetTimeOut(la, assoc, SN_I_T(la));
+ return(SN_NAT_PKT);
+ case SN_SCTP_INITACK: /* a packet containing an INIT-ACK chunk */
+ switch(direction){
+ case SN_TO_LOCAL:
+ if (assoc->num_Gaddr) /*If tracking global addresses for this association */
+ AddGlobalIPAddresses(sm, assoc, direction);
+ assoc->l_vtag = sm->sctpchnk.Init->initiate_tag;
+ if (AddSctpAssocLocal(la, assoc, sm->ip_hdr->ip_src)) { /* DB clash */
+ assoc->state = SN_RM;/* Mark for removal*/
+ return(SN_SEND_ABORT);
+ }
+ break;
+ case SN_TO_GLOBAL:
+ assoc->l_addr = sm->ip_hdr->ip_src; // Only if not set in Init! *
+ assoc->g_vtag = sm->sctpchnk.Init->initiate_tag;
+ if (AddSctpAssocGlobal(la, assoc)) { /* DB clash */
+ assoc->state = SN_RM;/* Mark for removal*/
+ return(SN_SEND_ABORT);
+ }
+ break;
+ }
+ assoc->state = SN_UP;/* association established for NAT */
+ sctp_ResetTimeOut(la,assoc, SN_U_T(la));
+ return(SN_NAT_PKT);
+ case SN_SCTP_ABORT: /* a packet containing an ABORT chunk */
+ assoc->state = SN_RM;/* Mark for removal*/
+ return(SN_NAT_PKT);
+ default:
+ return(SN_DROP_PKT);
+ }
+ return(SN_DROP_PKT);/* shouldn't get here very bad: log, drop and hope for the best */
+}
+
+/** @ingroup state_machine
+ * @brief Process SCTP message while waiting for an AddIp-ACK message
+ *
+ * Only an AddIP-ACK, resent AddIP, or an ABORT message are valid, all other
+ * SCTP packets are dropped
+ *
+ * @param la Pointer to the relevant libalias instance
+ * @param direction SN_TO_LOCAL | SN_TO_GLOBAL
+ * @param sm Pointer to sctp message information
+ * @param assoc Pointer to the association this SCTP Message belongs to
+ *
+ * @return SN_NAT_PKT | SN_DROP_PKT
+ */
+static int
+INa_process(struct libalias *la, int direction,struct sctp_nat_assoc *assoc, struct sctp_nat_msg *sm)
+{
+ switch(sm->msg) {
+ case SN_SCTP_ASCONF: /* a packet containing an ASCONF chunk*/
+ sctp_ResetTimeOut(la,assoc, SN_I_T(la));
+ return(SN_NAT_PKT);
+ case SN_SCTP_ASCONFACK: /* a packet containing an ASCONF chunk with a ADDIP-ACK */
+ switch(direction){
+ case SN_TO_LOCAL:
+ if (!(assoc->TableRegister & SN_WAIT_TOLOCAL)) /* wrong direction */
+ return(SN_DROP_PKT);
+ break;
+ case SN_TO_GLOBAL:
+ if (!(assoc->TableRegister & SN_WAIT_TOGLOBAL)) /* wrong direction */
+ return(SN_DROP_PKT);
+ }
+ if (IsASCONFack(la,sm,direction)) {
+ assoc->TableRegister &= SN_BOTH_TBL; /* remove wait flags */
+ assoc->state = SN_UP; /* association established for NAT */
+ sctp_ResetTimeOut(la,assoc, SN_U_T(la));
+ return(SN_NAT_PKT);
+ } else {
+ assoc->state = SN_RM;/* Mark for removal*/
+ return(SN_NAT_PKT);
+ }
+ case SN_SCTP_ABORT: /* a packet containing an ABORT chunk */
+ assoc->state = SN_RM;/* Mark for removal*/
+ return(SN_NAT_PKT);
+ default:
+ return(SN_DROP_PKT);
+ }
+ return(SN_DROP_PKT);/* shouldn't get here very bad: log, drop and hope for the best */
+}
+
+/** @ingroup state_machine
+ * @brief Process SCTP messages while association is UP redirecting packets
+ *
+ * While in the SN_UP state, all packets for the particular association
+ * are passed. Only a SHUT-ACK or an ABORT will cause a change of state.
+ *
+ * @param la Pointer to the relevant libalias instance
+ * @param direction SN_TO_LOCAL | SN_TO_GLOBAL
+ * @param sm Pointer to sctp message information
+ * @param assoc Pointer to the association this SCTP Message belongs to
+ *
+ * @return SN_NAT_PKT | SN_DROP_PKT
+ */
+static int
+UP_process(struct libalias *la, int direction, struct sctp_nat_assoc *assoc, struct sctp_nat_msg *sm)
+{
+ switch(sm->msg) {
+ case SN_SCTP_SHUTACK: /* a packet containing a SHUTDOWN-ACK chunk */
+ assoc->state = SN_CL;
+ sctp_ResetTimeOut(la,assoc, SN_C_T(la));
+ return(SN_NAT_PKT);
+ case SN_SCTP_ABORT: /* a packet containing an ABORT chunk */
+ assoc->state = SN_RM;/* Mark for removal*/
+ return(SN_NAT_PKT);
+ case SN_SCTP_ASCONF: /* a packet containing an ASCONF chunk*/
+ if ((direction == SN_TO_LOCAL) && assoc->num_Gaddr) /*If tracking global addresses for this association & from global side */
+ switch(IsADDorDEL(la,sm,direction)) {
+ case SCTP_ADD_IP_ADDRESS:
+ AddGlobalIPAddresses(sm, assoc, direction);
+ break;
+ case SCTP_DEL_IP_ADDRESS:
+ RmGlobalIPAddresses(sm, assoc, direction);
+ break;
+ } /* fall through to default */
+ default:
+ sctp_ResetTimeOut(la,assoc, SN_U_T(la));
+ return(SN_NAT_PKT); /* forward packet */
+ }
+ return(SN_DROP_PKT);/* shouldn't get here very bad: log, drop and hope for the best */
+}
+
+/** @ingroup state_machine
+ * @brief Process SCTP message while association is in the process of closing
+ *
+ * This function waits for a SHUT-COMP to close the association. Depending on
+ * the the setting of sysctl_holddown_timer it may not remove the association
+ * immediately, but leave it up until SN_X_T(la). Only SHUT-COMP, SHUT-ACK, and
+ * ABORT packets are permitted in this state. All other packets are dropped.
+ *
+ * @param la Pointer to the relevant libalias instance
+ * @param direction SN_TO_LOCAL | SN_TO_GLOBAL
+ * @param sm Pointer to sctp message information
+ * @param assoc Pointer to the association this SCTP Message belongs to
+ *
+ * @return SN_NAT_PKT | SN_DROP_PKT
+ */
+static int
+CL_process(struct libalias *la, int direction,struct sctp_nat_assoc *assoc, struct sctp_nat_msg *sm)
+{
+ switch(sm->msg) {
+ case SN_SCTP_SHUTCOMP: /* a packet containing a SHUTDOWN-COMPLETE chunk */
+ assoc->state = SN_CL; /* Stay in Close state until timeout */
+ if (sysctl_holddown_timer > 0)
+ sctp_ResetTimeOut(la, assoc, SN_X_T(la));/* allow to stay open for Tbit packets*/
+ else
+ assoc->state = SN_RM;/* Mark for removal*/
+ return(SN_NAT_PKT);
+ case SN_SCTP_SHUTACK: /* a packet containing a SHUTDOWN-ACK chunk */
+ assoc->state = SN_CL; /* Stay in Close state until timeout */
+ sctp_ResetTimeOut(la, assoc, SN_C_T(la));
+ return(SN_NAT_PKT);
+ case SN_SCTP_ABORT: /* a packet containing an ABORT chunk */
+ assoc->state = SN_RM;/* Mark for removal*/
+ return(SN_NAT_PKT);
+ default:
+ return(SN_DROP_PKT);
+ }
+ return(SN_DROP_PKT);/* shouldn't get here very bad: log, drop and hope for the best */
+}
+
+/* ----------------------------------------------------------------------
+ * HASH TABLE CODE
+ * ----------------------------------------------------------------------
+ */
+/** @addtogroup Hash
+ *
+ * The Hash functions facilitate searching the NAT Hash Tables for associations
+ * as well as adding/removing associations from the table(s).
+ */
+/** @ingroup Hash
+ * @brief Find the SCTP association given the local address, port and vtag
+ *
+ * Searches the local look-up table for the association entry matching the
+ * provided local <address:ports:vtag> tuple
+ *
+ * @param la Pointer to the relevant libalias instance
+ * @param l_addr local address
+ * @param g_addr global address
+ * @param l_vtag local Vtag
+ * @param l_port local Port
+ * @param g_port global Port
+ *
+ * @return pointer to association or NULL
+ */
+static struct sctp_nat_assoc*
+FindSctpLocal(struct libalias *la, struct in_addr l_addr, struct in_addr g_addr, uint32_t l_vtag, uint16_t l_port, uint16_t g_port)
+{
+ u_int i;
+ struct sctp_nat_assoc *assoc = NULL;
+ struct sctp_GlobalAddress *G_Addr = NULL;
+
+ if (l_vtag != 0) { /* an init packet, vtag==0 */
+ i = SN_TABLE_HASH(l_vtag, l_port, la->sctpNatTableSize);
+ LIST_FOREACH(assoc, &la->sctpTableLocal[i], list_L) {
+ if ((assoc->l_vtag == l_vtag) && (assoc->l_port == l_port) && (assoc->g_port == g_port)\
+ && (assoc->l_addr.s_addr == l_addr.s_addr)) {
+ if (assoc->num_Gaddr) {
+ LIST_FOREACH(G_Addr, &(assoc->Gaddr), list_Gaddr) {
+ if(G_Addr->g_addr.s_addr == g_addr.s_addr)
+ return(assoc);
+ }
+ } else {
+ return(assoc);
+ }
+ }
+ }
+ }
+ return(NULL);
+}
+
+/** @ingroup Hash
+ * @brief Check for Global Clash
+ *
+ * Searches the global look-up table for the association entry matching the
+ * provided global <(addresses):ports:vtag> tuple
+ *
+ * @param la Pointer to the relevant libalias instance
+ * @param Cassoc association being checked for a clash
+ *
+ * @return pointer to association or NULL
+ */
+static struct sctp_nat_assoc*
+FindSctpGlobalClash(struct libalias *la, struct sctp_nat_assoc *Cassoc)
+{
+ u_int i;
+ struct sctp_nat_assoc *assoc = NULL;
+ struct sctp_GlobalAddress *G_Addr = NULL;
+ struct sctp_GlobalAddress *G_AddrC = NULL;
+
+ if (Cassoc->g_vtag != 0) { /* an init packet, vtag==0 */
+ i = SN_TABLE_HASH(Cassoc->g_vtag, Cassoc->g_port, la->sctpNatTableSize);
+ LIST_FOREACH(assoc, &la->sctpTableGlobal[i], list_G) {
+ if ((assoc->g_vtag == Cassoc->g_vtag) && (assoc->g_port == Cassoc->g_port) && (assoc->l_port == Cassoc->l_port)) {
+ if (assoc->num_Gaddr) {
+ LIST_FOREACH(G_AddrC, &(Cassoc->Gaddr), list_Gaddr) {
+ LIST_FOREACH(G_Addr, &(assoc->Gaddr), list_Gaddr) {
+ if(G_Addr->g_addr.s_addr == G_AddrC->g_addr.s_addr)
+ return(assoc);
+ }
+ }
+ } else {
+ return(assoc);
+ }
+ }
+ }
+ }
+ return(NULL);
+}
+
+/** @ingroup Hash
+ * @brief Find the SCTP association given the global port and vtag
+ *
+ * Searches the global look-up table for the association entry matching the
+ * provided global <address:ports:vtag> tuple
+ *
+ * If all but the global address match it sets partial_match to 1 to indicate a
+ * partial match. If the NAT is tracking global IP addresses for this
+ * association, the NAT may respond with an ERRORM to request the missing
+ * address to be added.
+ *
+ * @param la Pointer to the relevant libalias instance
+ * @param g_addr global address
+ * @param g_vtag global vtag
+ * @param g_port global port
+ * @param l_port local port
+ *
+ * @return pointer to association or NULL
+ */
+static struct sctp_nat_assoc*
+FindSctpGlobal(struct libalias *la, struct in_addr g_addr, uint32_t g_vtag, uint16_t g_port, uint16_t l_port, int *partial_match)
+{
+ u_int i;
+ struct sctp_nat_assoc *assoc = NULL;
+ struct sctp_GlobalAddress *G_Addr = NULL;
+
+ *partial_match = 0;
+ if (g_vtag != 0) { /* an init packet, vtag==0 */
+ i = SN_TABLE_HASH(g_vtag, g_port, la->sctpNatTableSize);
+ LIST_FOREACH(assoc, &la->sctpTableGlobal[i], list_G) {
+ if ((assoc->g_vtag == g_vtag) && (assoc->g_port == g_port) && (assoc->l_port == l_port)) {
+ *partial_match = 1;
+ if (assoc->num_Gaddr) {
+ LIST_FOREACH(G_Addr, &(assoc->Gaddr), list_Gaddr) {
+ if(G_Addr->g_addr.s_addr == g_addr.s_addr)
+ return(assoc);
+ }
+ } else {
+ return(assoc);
+ }
+ }
+ }
+ }
+ return(NULL);
+}
+
+/** @ingroup Hash
+ * @brief Find the SCTP association for a T-Flag message (given the global port and local vtag)
+ *
+ * Searches the local look-up table for a unique association entry matching the
+ * provided global port and local vtag information
+ *
+ * @param la Pointer to the relevant libalias instance
+ * @param g_addr global address
+ * @param l_vtag local Vtag
+ * @param g_port global Port
+ * @param l_port local Port
+ *
+ * @return pointer to association or NULL
+ */
+static struct sctp_nat_assoc*
+FindSctpLocalT(struct libalias *la, struct in_addr g_addr, uint32_t l_vtag, uint16_t g_port, uint16_t l_port)
+{
+ u_int i;
+ struct sctp_nat_assoc *assoc = NULL, *lastmatch = NULL;
+ struct sctp_GlobalAddress *G_Addr = NULL;
+ int cnt = 0;
+
+ if (l_vtag != 0) { /* an init packet, vtag==0 */
+ i = SN_TABLE_HASH(l_vtag, g_port, la->sctpNatTableSize);
+ LIST_FOREACH(assoc, &la->sctpTableGlobal[i], list_G) {
+ if ((assoc->g_vtag == l_vtag) && (assoc->g_port == g_port) && (assoc->l_port == l_port)) {
+ if (assoc->num_Gaddr) {
+ LIST_FOREACH(G_Addr, &(assoc->Gaddr), list_Gaddr) {
+ if(G_Addr->g_addr.s_addr == G_Addr->g_addr.s_addr)
+ return(assoc); /* full match */
+ }
+ } else {
+ if (++cnt > 1) return(NULL);
+ lastmatch = assoc;
+ }
+ }
+ }
+ }
+ /* If there is more than one match we do not know which local address to send to */
+ return( cnt ? lastmatch : NULL );
+}
+
+/** @ingroup Hash
+ * @brief Find the SCTP association for a T-Flag message (given the local port and global vtag)
+ *
+ * Searches the global look-up table for a unique association entry matching the
+ * provided local port and global vtag information
+ *
+ * @param la Pointer to the relevant libalias instance
+ * @param g_addr global address
+ * @param g_vtag global vtag
+ * @param l_port local port
+ * @param g_port global port
+ *
+ * @return pointer to association or NULL
+ */
+static struct sctp_nat_assoc*
+FindSctpGlobalT(struct libalias *la, struct in_addr g_addr, uint32_t g_vtag, uint16_t l_port, uint16_t g_port)
+{
+ u_int i;
+ struct sctp_nat_assoc *assoc = NULL;
+ struct sctp_GlobalAddress *G_Addr = NULL;
+
+ if (g_vtag != 0) { /* an init packet, vtag==0 */
+ i = SN_TABLE_HASH(g_vtag, l_port, la->sctpNatTableSize);
+ LIST_FOREACH(assoc, &la->sctpTableLocal[i], list_L) {
+ if ((assoc->l_vtag == g_vtag) && (assoc->l_port == l_port) && (assoc->g_port == g_port)) {
+ if (assoc->num_Gaddr) {
+ LIST_FOREACH(G_Addr, &(assoc->Gaddr), list_Gaddr) {
+ if(G_Addr->g_addr.s_addr == g_addr.s_addr)
+ return(assoc);
+ }
+ } else {
+ return(assoc);
+ }
+ }
+ }
+ }
+ return(NULL);
+}
+
+/** @ingroup Hash
+ * @brief Add the sctp association information to the local look up table
+ *
+ * Searches the local look-up table for an existing association with the same
+ * details. If a match exists and is ONLY in the local look-up table then this
+ * is a repeated INIT packet, we need to remove this association from the
+ * look-up table and add the new association
+ *
+ * The new association is added to the head of the list and state is updated
+ *
+ * @param la Pointer to the relevant libalias instance
+ * @param assoc pointer to sctp association
+ * @param g_addr global address
+ *
+ * @return SN_ADD_OK | SN_ADD_CLASH
+ */
+static int
+AddSctpAssocLocal(struct libalias *la, struct sctp_nat_assoc *assoc, struct in_addr g_addr)
+{
+ struct sctp_nat_assoc *found;
+
+ LIBALIAS_LOCK_ASSERT(la);
+ found = FindSctpLocal(la, assoc->l_addr, g_addr, assoc->l_vtag, assoc->l_port, assoc->g_port);
+ /*
+ * Note that if a different global address initiated this Init,
+ * ie it wasn't resent as presumed:
+ * - the local receiver if receiving it for the first time will establish
+ * an association with the new global host
+ * - if receiving an init from a different global address after sending a
+ * lost initack it will send an initack to the new global host, the first
+ * association attempt will then be blocked if retried.
+ */
+ if (found != NULL) {
+ if ((found->TableRegister == SN_LOCAL_TBL) && (found->g_port == assoc->g_port)) { /* resent message */
+ RmSctpAssoc(la, found);
+ sctp_RmTimeOut(la, found);
+ freeGlobalAddressList(found);
+ sn_free(found);
+ } else
+ return(SN_ADD_CLASH);
+ }
+
+ LIST_INSERT_HEAD(&la->sctpTableLocal[SN_TABLE_HASH(assoc->l_vtag, assoc->l_port, la->sctpNatTableSize)],
+ assoc, list_L);
+ assoc->TableRegister |= SN_LOCAL_TBL;
+ la->sctpLinkCount++; //increment link count
+
+ if (assoc->TableRegister == SN_BOTH_TBL) {
+ /* libalias log -- controlled by libalias */
+ if (la->packetAliasMode & PKT_ALIAS_LOG)
+ SctpShowAliasStats(la);
+
+ SN_LOG(SN_LOG_INFO, logsctpassoc(assoc, "^"));
+ }
+
+ return(SN_ADD_OK);
+}
+
+/** @ingroup Hash
+ * @brief Add the sctp association information to the global look up table
+ *
+ * Searches the global look-up table for an existing association with the same
+ * details. If a match exists and is ONLY in the global look-up table then this
+ * is a repeated INIT packet, we need to remove this association from the
+ * look-up table and add the new association
+ *
+ * The new association is added to the head of the list and state is updated
+ *
+ * @param la Pointer to the relevant libalias instance
+ * @param assoc pointer to sctp association
+ *
+ * @return SN_ADD_OK | SN_ADD_CLASH
+ */
+static int
+AddSctpAssocGlobal(struct libalias *la, struct sctp_nat_assoc *assoc)
+{
+ struct sctp_nat_assoc *found;
+
+ LIBALIAS_LOCK_ASSERT(la);
+ found = FindSctpGlobalClash(la, assoc);
+ if (found != NULL) {
+ if ((found->TableRegister == SN_GLOBAL_TBL) && \
+ (found->l_addr.s_addr == assoc->l_addr.s_addr) && (found->l_port == assoc->l_port)) { /* resent message */
+ RmSctpAssoc(la, found);
+ sctp_RmTimeOut(la, found);
+ freeGlobalAddressList(found);
+ sn_free(found);
+ } else
+ return(SN_ADD_CLASH);
+ }
+
+ LIST_INSERT_HEAD(&la->sctpTableGlobal[SN_TABLE_HASH(assoc->g_vtag, assoc->g_port, la->sctpNatTableSize)],
+ assoc, list_G);
+ assoc->TableRegister |= SN_GLOBAL_TBL;
+ la->sctpLinkCount++; //increment link count
+
+ if (assoc->TableRegister == SN_BOTH_TBL) {
+ /* libalias log -- controlled by libalias */
+ if (la->packetAliasMode & PKT_ALIAS_LOG)
+ SctpShowAliasStats(la);
+
+ SN_LOG(SN_LOG_INFO, logsctpassoc(assoc, "^"));
+ }
+
+ return(SN_ADD_OK);
+}
+
+/** @ingroup Hash
+ * @brief Remove the sctp association information from the look up table
+ *
+ * For each of the two (local/global) look-up tables, remove the association
+ * from that table IF it has been registered in that table.
+ *
+ * NOTE: The calling code is responsible for freeing memory allocated to the
+ * association structure itself
+ *
+ * NOTE: The association is NOT removed from the timer queue
+ *
+ * @param la Pointer to the relevant libalias instance
+ * @param assoc pointer to sctp association
+ */
+static void
+RmSctpAssoc(struct libalias *la, struct sctp_nat_assoc *assoc)
+{
+ // struct sctp_nat_assoc *found;
+ if (assoc == NULL) {
+ /* very bad, log and die*/
+ SN_LOG(SN_LOG_LOW,
+ logsctperror("ERROR: alias_sctp:RmSctpAssoc(NULL)\n", 0, 0, SN_TO_NODIR));
+ return;
+ }
+ /* log if association is fully up and now closing */
+ if (assoc->TableRegister == SN_BOTH_TBL) {
+ SN_LOG(SN_LOG_INFO, logsctpassoc(assoc, "$"));
+ }
+ LIBALIAS_LOCK_ASSERT(la);
+ if (assoc->TableRegister & SN_LOCAL_TBL) {
+ assoc->TableRegister ^= SN_LOCAL_TBL;
+ la->sctpLinkCount--; //decrement link count
+ LIST_REMOVE(assoc, list_L);
+ }
+
+ if (assoc->TableRegister & SN_GLOBAL_TBL) {
+ assoc->TableRegister ^= SN_GLOBAL_TBL;
+ la->sctpLinkCount--; //decrement link count
+ LIST_REMOVE(assoc, list_G);
+ }
+ // sn_free(assoc); //Don't remove now, remove if needed later
+ /* libalias logging -- controlled by libalias log definition */
+ if (la->packetAliasMode & PKT_ALIAS_LOG)
+ SctpShowAliasStats(la);
+}
+
+/**
+ * @ingroup Hash
+ * @brief free the Global Address List memory
+ *
+ * freeGlobalAddressList deletes all global IP addresses in an associations
+ * global IP address list.
+ *
+ * @param assoc
+ */
+static void freeGlobalAddressList(struct sctp_nat_assoc *assoc)
+{
+ struct sctp_GlobalAddress *gaddr1=NULL,*gaddr2=NULL;
+ /*free global address list*/
+ gaddr1 = LIST_FIRST(&(assoc->Gaddr));
+ while (gaddr1 != NULL) {
+ gaddr2 = LIST_NEXT(gaddr1, list_Gaddr);
+ sn_free(gaddr1);
+ gaddr1 = gaddr2;
+ }
+}
+/* ----------------------------------------------------------------------
+ * TIMER QUEUE CODE
+ * ----------------------------------------------------------------------
+ */
+/** @addtogroup Timer
+ *
+ * The timer queue management functions are designed to operate efficiently with
+ * a minimum of interaction with the queues.
+ *
+ * Once a timeout is set in the queue it will not be altered in the queue unless
+ * it has to be changed to a shorter time (usually only for aborts and closing).
+ * On a queue timeout, the real expiry time is checked, and if not leq than the
+ * timeout it is requeued (O(1)) at its later time. This is especially important
+ * for normal packets sent during an association. When a timer expires, it is
+ * updated to its new expiration time if necessary, or processed as a
+ * timeout. This means that while in UP state, the timing queue is only altered
+ * every U_T (every few minutes) for a particular association.
+ */
+/** @ingroup Timer
+ * @brief Add an association timeout to the timer queue
+ *
+ * Determine the location in the queue to add the timeout and insert the
+ * association into the list at that queue position
+ *
+ * @param la
+ * @param assoc
+ */
+static void
+sctp_AddTimeOut(struct libalias *la, struct sctp_nat_assoc *assoc)
+{
+ int add_loc;
+ LIBALIAS_LOCK_ASSERT(la);
+ add_loc = assoc->exp - la->sctpNatTimer.loc_time + la->sctpNatTimer.cur_loc;
+ if (add_loc >= SN_TIMER_QUEUE_SIZE)
+ add_loc -= SN_TIMER_QUEUE_SIZE;
+ LIST_INSERT_HEAD(&la->sctpNatTimer.TimerQ[add_loc], assoc, timer_Q);
+ assoc->exp_loc = add_loc;
+}
+
+/** @ingroup Timer
+ * @brief Remove an association from timer queue
+ *
+ * This is an O(1) operation to remove the association pointer from its
+ * current position in the timer queue
+ *
+ * @param la Pointer to the relevant libalias instance
+ * @param assoc pointer to sctp association
+ */
+static void
+sctp_RmTimeOut(struct libalias *la, struct sctp_nat_assoc *assoc)
+{
+ LIBALIAS_LOCK_ASSERT(la);
+ LIST_REMOVE(assoc, timer_Q);/* Note this is O(1) */
+}
+
+
+/** @ingroup Timer
+ * @brief Reset timer in timer queue
+ *
+ * Reset the actual timeout for the specified association. If it is earlier than
+ * the existing timeout, then remove and re-install the association into the
+ * queue
+ *
+ * @param la Pointer to the relevant libalias instance
+ * @param assoc pointer to sctp association
+ * @param newexp New expiration time
+ */
+static void
+sctp_ResetTimeOut(struct libalias *la, struct sctp_nat_assoc *assoc, int newexp)
+{
+ if (newexp < assoc->exp) {
+ sctp_RmTimeOut(la, assoc);
+ assoc->exp = newexp;
+ sctp_AddTimeOut(la, assoc);
+ } else {
+ assoc->exp = newexp;
+ }
+}
+
+/** @ingroup Timer
+ * @brief Check timer Q against current time
+ *
+ * Loop through each entry in the timer queue since the last time we processed
+ * the timer queue until now (the current time). For each association in the
+ * event list, we remove it from that position in the timer queue and check if
+ * it has really expired. If so we:
+ * - Log the timer expiry
+ * - Remove the association from the NAT tables
+ * - Release the memory used by the association
+ *
+ * If the timer hasn't really expired we place the association into its new
+ * correct position in the timer queue.
+ *
+ * @param la Pointer to the relevant libalias instance
+ */
+void
+sctp_CheckTimers(struct libalias *la)
+{
+ struct sctp_nat_assoc *assoc;
+
+ LIBALIAS_LOCK_ASSERT(la);
+ while(la->timeStamp >= la->sctpNatTimer.loc_time) {
+ while (!LIST_EMPTY(&la->sctpNatTimer.TimerQ[la->sctpNatTimer.cur_loc])) {
+ assoc = LIST_FIRST(&la->sctpNatTimer.TimerQ[la->sctpNatTimer.cur_loc]);
+ //SLIST_REMOVE_HEAD(&la->sctpNatTimer.TimerQ[la->sctpNatTimer.cur_loc], timer_Q);
+ LIST_REMOVE(assoc, timer_Q);
+ if (la->timeStamp >= assoc->exp) { /* state expired */
+ SN_LOG(((assoc->state == SN_CL)?(SN_LOG_DEBUG):(SN_LOG_INFO)),
+ logsctperror("Timer Expired", assoc->g_vtag, assoc->state, SN_TO_NODIR));
+ RmSctpAssoc(la, assoc);
+ freeGlobalAddressList(assoc);
+ sn_free(assoc);
+ } else {/* state not expired, reschedule timer*/
+ sctp_AddTimeOut(la, assoc);
+ }
+ }
+ /* Goto next location in the timer queue*/
+ ++la->sctpNatTimer.loc_time;
+ if (++la->sctpNatTimer.cur_loc >= SN_TIMER_QUEUE_SIZE)
+ la->sctpNatTimer.cur_loc = 0;
+ }
+}
+
+/* ----------------------------------------------------------------------
+ * LOGGING CODE
+ * ----------------------------------------------------------------------
+ */
+/** @addtogroup Logging
+ *
+ * The logging functions provide logging of different items ranging from logging
+ * a simple message, through logging an association details to logging the
+ * current state of the NAT tables
+ */
+/** @ingroup Logging
+ * @brief Log sctp nat errors
+ *
+ * @param errormsg Error message to be logged
+ * @param vtag Current Vtag
+ * @param error Error number
+ * @param direction Direction of packet
+ */
+static void
+logsctperror(char* errormsg, uint32_t vtag, int error, int direction)
+{
+ char dir;
+ switch(direction) {
+ case SN_TO_LOCAL:
+ dir = 'L';
+ break;
+ case SN_TO_GLOBAL:
+ dir = 'G';
+ break;
+ default:
+ dir = '*';
+ break;
+ }
+ SctpAliasLog("->%c %s (vt=%u) %d\n", dir, errormsg, ntohl(vtag), error);
+}
+
+/** @ingroup Logging
+ * @brief Log what the parser parsed
+ *
+ * @param direction Direction of packet
+ * @param sm Pointer to sctp message information
+ */
+static void
+logsctpparse(int direction, struct sctp_nat_msg *sm)
+{
+ char *ploc, *pstate;
+ switch(direction) {
+ case SN_TO_LOCAL:
+ ploc = "TO_LOCAL -";
+ break;
+ case SN_TO_GLOBAL:
+ ploc = "TO_GLOBAL -";
+ break;
+ default:
+ ploc = "";
+ }
+ switch(sm->msg) {
+ case SN_SCTP_INIT:
+ pstate = "Init";
+ break;
+ case SN_SCTP_INITACK:
+ pstate = "InitAck";
+ break;
+ case SN_SCTP_ABORT:
+ pstate = "Abort";
+ break;
+ case SN_SCTP_SHUTACK:
+ pstate = "ShutAck";
+ break;
+ case SN_SCTP_SHUTCOMP:
+ pstate = "ShutComp";
+ break;
+ case SN_SCTP_ASCONF:
+ pstate = "Asconf";
+ break;
+ case SN_SCTP_ASCONFACK:
+ pstate = "AsconfAck";
+ break;
+ case SN_SCTP_OTHER:
+ pstate = "Other";
+ break;
+ default:
+ pstate = "***ERROR***";
+ break;
+ }
+ SctpAliasLog("Parsed: %s %s\n", ploc, pstate);
+}
+
+/** @ingroup Logging
+ * @brief Log an SCTP association's details
+ *
+ * @param assoc pointer to sctp association
+ * @param s Character that indicates the state of processing for this packet
+ */
+static void logsctpassoc(struct sctp_nat_assoc *assoc, char* s)
+{
+ struct sctp_GlobalAddress *G_Addr = NULL;
+ char *sp;
+ switch(assoc->state) {
+ case SN_ID:
+ sp = "ID ";
+ break;
+ case SN_INi:
+ sp = "INi ";
+ break;
+ case SN_INa:
+ sp = "INa ";
+ break;
+ case SN_UP:
+ sp = "UP ";
+ break;
+ case SN_CL:
+ sp = "CL ";
+ break;
+ case SN_RM:
+ sp = "RM ";
+ break;
+ default:
+ sp = "***ERROR***";
+ break;
+ }
+ SctpAliasLog("%sAssoc: %s exp=%u la=%s lv=%u lp=%u gv=%u gp=%u tbl=%d\n",
+ s, sp, assoc->exp, inet_ntoa(assoc->l_addr), ntohl(assoc->l_vtag),
+ ntohs(assoc->l_port), ntohl(assoc->g_vtag), ntohs(assoc->g_port),
+ assoc->TableRegister);
+ /* list global addresses */
+ LIST_FOREACH(G_Addr, &(assoc->Gaddr), list_Gaddr) {
+ SctpAliasLog("\t\tga=%s\n",inet_ntoa(G_Addr->g_addr));
+ }
+}
+
+/** @ingroup Logging
+ * @brief Output Global table to log
+ *
+ * @param la Pointer to the relevant libalias instance
+ */
+static void logSctpGlobal(struct libalias *la)
+{
+ u_int i;
+ struct sctp_nat_assoc *assoc = NULL;
+
+ SctpAliasLog("G->\n");
+ for (i=0; i < la->sctpNatTableSize; i++) {
+ LIST_FOREACH(assoc, &la->sctpTableGlobal[i], list_G) {
+ logsctpassoc(assoc, " ");
+ }
+ }
+}
+
+/** @ingroup Logging
+ * @brief Output Local table to log
+ *
+ * @param la Pointer to the relevant libalias instance
+ */
+static void logSctpLocal(struct libalias *la)
+{
+ u_int i;
+ struct sctp_nat_assoc *assoc = NULL;
+
+ SctpAliasLog("L->\n");
+ for (i=0; i < la->sctpNatTableSize; i++) {
+ LIST_FOREACH(assoc, &la->sctpTableLocal[i], list_L) {
+ logsctpassoc(assoc, " ");
+ }
+ }
+}
+
+/** @ingroup Logging
+ * @brief Output timer queue to log
+ *
+ * @param la Pointer to the relevant libalias instance
+ */
+static void logTimerQ(struct libalias *la)
+{
+ static char buf[50];
+ u_int i;
+ struct sctp_nat_assoc *assoc = NULL;
+
+ SctpAliasLog("t->\n");
+ for (i=0; i < SN_TIMER_QUEUE_SIZE; i++) {
+ LIST_FOREACH(assoc, &la->sctpNatTimer.TimerQ[i], timer_Q) {
+ snprintf(buf, 50, " l=%u ",i);
+ //SctpAliasLog(la->logDesc," l=%d ",i);
+ logsctpassoc(assoc, buf);
+ }
+ }
+}
+
+/** @ingroup Logging
+ * @brief Sctp NAT logging function
+ *
+ * This function is based on a similar function in alias_db.c
+ *
+ * @param str/stream logging descriptor
+ * @param format printf type string
+ */
+#ifdef _KERNEL
+static void
+SctpAliasLog(const char *format, ...)
+{
+ char buffer[LIBALIAS_BUF_SIZE];
+ va_list ap;
+ va_start(ap, format);
+ vsnprintf(buffer, LIBALIAS_BUF_SIZE, format, ap);
+ va_end(ap);
+ log(LOG_SECURITY | LOG_INFO,
+ "alias_sctp: %s", buffer);
+}
+#else
+static void
+SctpAliasLog(FILE *stream, const char *format, ...)
+{
+ va_list ap;
+
+ va_start(ap, format);
+ vfprintf(stream, format, ap);
+ va_end(ap);
+ fflush(stream);
+}
+#endif
diff --git a/rtems/freebsd/netinet/libalias/alias_sctp.h b/rtems/freebsd/netinet/libalias/alias_sctp.h
new file mode 100644
index 00000000..2b130f7d
--- /dev/null
+++ b/rtems/freebsd/netinet/libalias/alias_sctp.h
@@ -0,0 +1,201 @@
+/*-
+ * Copyright (c) 2008
+ * Swinburne University of Technology, Melbourne, Australia.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS "AS IS" AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+/*
+ * Alias_sctp forms part of the libalias kernel module to handle
+ * Network Address Translation (NAT) for the SCTP protocol.
+ *
+ * This software was developed by David A. Hayes
+ * with leadership and advice from Jason But
+ *
+ * The design is outlined in CAIA technical report number 080618A
+ * (D. Hayes and J. But, "Alias_sctp Version 0.1: SCTP NAT implementation in IPFW")
+ *
+ * Development is part of the CAIA SONATA project,
+ * proposed by Jason But and Grenville Armitage:
+ * http://caia.swin.edu.au/urp/sonata/
+ *
+ *
+ * This project has been made possible in part by a grant from
+ * the Cisco University Research Program Fund at Community
+ * Foundation Silicon Valley.
+ *
+ */
+
+/* $FreeBSD$ */
+
+#ifndef _ALIAS_SCTP_HH_
+#define _ALIAS_SCTP_HH_
+
+#include <rtems/freebsd/sys/param.h>
+#ifdef _KERNEL
+#include <rtems/freebsd/sys/malloc.h>
+#include <rtems/freebsd/sys/module.h>
+#include <rtems/freebsd/sys/kernel.h>
+#include <rtems/freebsd/sys/proc.h>
+#include <rtems/freebsd/sys/uio.h>
+#include <rtems/freebsd/sys/socketvar.h>
+#include <rtems/freebsd/sys/syslog.h>
+#endif // #ifdef _KERNEL
+#include <rtems/freebsd/sys/types.h>
+
+#include <rtems/freebsd/sys/queue.h>
+#include <rtems/freebsd/sys/types.h>
+#include <rtems/freebsd/sys/time.h>
+
+#include <rtems/freebsd/netinet/in_systm.h>
+#include <rtems/freebsd/netinet/in.h>
+#include <rtems/freebsd/netinet/ip.h>
+
+/**
+ * These are defined in sctp_os_bsd.h, but it can't be included due to its local file
+ * inclusion, so I'm defining them here.
+ *
+ */
+#include <rtems/freebsd/machine/cpufunc.h>
+#include <rtems/freebsd/machine/cpu.h>
+/* The packed define for 64 bit platforms */
+#ifndef SCTP_PACKED
+#define SCTP_PACKED __attribute__((packed))
+#endif //#ifndef SCTP_PACKED
+#ifndef SCTP_UNUSED
+#define SCTP_UNUSED __attribute__((unused))
+#endif //#ifndef SCTP_UNUSED
+
+
+#include <rtems/freebsd/netinet/sctp.h>
+//#include <rtems/freebsd/netinet/sctp_os_bsd.h> --might be needed later for mbuf stuff
+#include <rtems/freebsd/netinet/sctp_header.h>
+
+#ifndef _KERNEL
+#include <rtems/freebsd/stdlib.h>
+#include <rtems/freebsd/stdio.h>
+#include <rtems/freebsd/curses.h>
+#endif //#ifdef _KERNEL
+
+
+#define LINK_SCTP IPPROTO_SCTP
+
+
+#define SN_TO_LOCAL 0 /**< packet traveling from global to local */
+#define SN_TO_GLOBAL 1 /**< packet traveling from local to global */
+#define SN_TO_NODIR 99 /**< used where direction is not important */
+
+#define SN_NAT_PKT 0x0000 /**< Network Address Translate packet */
+#define SN_DROP_PKT 0x0001 /**< drop packet (don't forward it) */
+#define SN_PROCESSING_ERROR 0x0003 /**< Packet processing error */
+#define SN_REPLY_ABORT 0x0010 /**< Reply with ABORT to sender (don't forward it) */
+#define SN_SEND_ABORT 0x0020 /**< Send ABORT to destination */
+#define SN_TX_ABORT 0x0030 /**< mask for transmitting abort */
+#define SN_REFLECT_ERROR 0x0100 /**< Reply with ERROR to sender on OOTB packet Tbit set */
+#define SN_REPLY_ERROR 0x0200 /**< Reply with ERROR to sender on ASCONF clash */
+#define SN_TX_ERROR 0x0300 /**< mask for transmitting error */
+
+
+#define PKT_ALIAS_RESPOND 0x1000 /**< Signal to libalias that there is a response packet to send */
+/*
+ * Data structures
+ */
+
+/**
+ * @brief sctp association information
+ *
+ * Structure that contains information about a particular sctp association
+ * currently under Network Address Translation.
+ * Information is stored in network byte order (as is libalias)***
+ */
+struct sctp_nat_assoc {
+ uint32_t l_vtag; /**< local side verification tag */
+ uint16_t l_port; /**< local side port number */
+ uint32_t g_vtag; /**< global side verification tag */
+ uint16_t g_port; /**< global side port number */
+ struct in_addr l_addr; /**< local ip address */
+ struct in_addr a_addr; /**< alias ip address */
+ int state; /**< current state of NAT association */
+ int TableRegister; /**< stores which look up tables association is registered in */
+ int exp; /**< timer expiration in seconds from uptime */
+ int exp_loc; /**< current location in timer_Q */
+ int num_Gaddr; /**< number of global IP addresses in the list */
+ LIST_HEAD(sctpGlobalAddresshead,sctp_GlobalAddress) Gaddr; /**< List of global addresses */
+ LIST_ENTRY (sctp_nat_assoc) list_L; /**< Linked list of pointers for Local table*/
+ LIST_ENTRY (sctp_nat_assoc) list_G; /**< Linked list of pointers for Global table */
+ LIST_ENTRY (sctp_nat_assoc) timer_Q; /**< Linked list of pointers for timer Q */
+//Using libalias locking
+};
+
+struct sctp_GlobalAddress {
+ struct in_addr g_addr;
+ LIST_ENTRY (sctp_GlobalAddress) list_Gaddr; /**< Linked list of pointers for Global table */
+};
+
+/**
+ * @brief SCTP chunk of interest
+ *
+ * The only chunks whose contents are of any interest are the INIT and ASCONF_AddIP
+ */
+union sctpChunkOfInt {
+ struct sctp_init *Init; /**< Pointer to Init Chunk */
+ struct sctp_init_ack *InitAck; /**< Pointer to Init Chunk */
+ struct sctp_paramhdr *Asconf; /**< Pointer to ASCONF chunk */
+};
+
+
+/**
+ * @brief SCTP message
+ *
+ * Structure containing the relevant information from the SCTP message
+ */
+struct sctp_nat_msg {
+ uint16_t msg; /**< one of the key messages defined above */
+#ifndef __rtems__
+#ifdef INET6
+ // struct ip6_hdr *ip_hdr; /**< pointer to ip packet header */ /*no inet6 support yet*/
+#else
+ struct ip *ip_hdr; /**< pointer to ip packet header */
+#endif //#ifdef INET6
+#else //__rtems__
+ struct ip *ip_hdr; /**< pointer to ip packet header */
+#endif //__rtems__
+ struct sctphdr *sctp_hdr; /**< pointer to sctp common header */
+ union sctpChunkOfInt sctpchnk; /**< union of pointers to the chunk of interest */
+ int chunk_length; /**< length of chunk of interest */
+};
+
+
+/**
+ * @brief sctp nat timer queue structure
+ *
+ */
+
+struct sctp_nat_timer {
+ int loc_time; /**< time in seconds for the current location in the queue */
+ int cur_loc; /**< index of the current location in the circular queue */
+ LIST_HEAD(sctpTimerQ,sctp_nat_assoc) *TimerQ; /**< List of associations at this position in the timer Q */
+};
+
+
+
+#endif //#ifndef _ALIAS_SCTP_H
diff --git a/rtems/freebsd/netinet/libalias/alias_skinny.c b/rtems/freebsd/netinet/libalias/alias_skinny.c
new file mode 100644
index 00000000..0481e0ec
--- /dev/null
+++ b/rtems/freebsd/netinet/libalias/alias_skinny.c
@@ -0,0 +1,449 @@
+#include <rtems/freebsd/machine/rtems-bsd-config.h>
+
+/*-
+ * alias_skinny.c
+ *
+ * Copyright (c) 2002, 2003 MarcusCom, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * Author: Joe Marcus Clarke <marcus@FreeBSD.org>
+ *
+ * $FreeBSD$
+ */
+
+#ifdef _KERNEL
+#include <rtems/freebsd/sys/param.h>
+#include <rtems/freebsd/sys/kernel.h>
+#include <rtems/freebsd/sys/module.h>
+#else
+#include <rtems/freebsd/errno.h>
+#include <rtems/freebsd/stdio.h>
+#include <rtems/freebsd/unistd.h>
+#endif
+
+#include <rtems/freebsd/netinet/in_systm.h>
+#include <rtems/freebsd/netinet/in.h>
+#include <rtems/freebsd/netinet/ip.h>
+#include <rtems/freebsd/netinet/tcp.h>
+
+#ifdef _KERNEL
+#include <rtems/freebsd/netinet/libalias/alias_local.h>
+#include <rtems/freebsd/netinet/libalias/alias_mod.h>
+#else
+#include <rtems/freebsd/local/alias_local.h>
+#include <rtems/freebsd/local/alias_mod.h>
+#endif
+
+static void
+AliasHandleSkinny(struct libalias *, struct ip *, struct alias_link *);
+
+static int
+fingerprint(struct libalias *la, struct alias_data *ah)
+{
+
+ if (ah->dport == NULL || ah->sport == NULL || ah->lnk == NULL)
+ return (-1);
+ if (la->skinnyPort != 0 && (ntohs(*ah->sport) == la->skinnyPort ||
+ ntohs(*ah->dport) == la->skinnyPort))
+ return (0);
+ return (-1);
+}
+
+static int
+protohandler(struct libalias *la, struct ip *pip, struct alias_data *ah)
+{
+
+ AliasHandleSkinny(la, pip, ah->lnk);
+ return (0);
+}
+
+struct proto_handler handlers[] = {
+ {
+ .pri = 110,
+ .dir = IN|OUT,
+ .proto = TCP,
+ .fingerprint = &fingerprint,
+ .protohandler = &protohandler
+ },
+ { EOH }
+};
+
+static int
+mod_handler(module_t mod, int type, void *data)
+{
+ int error;
+
+ switch (type) {
+ case MOD_LOAD:
+ error = 0;
+ LibAliasAttachHandlers(handlers);
+ break;
+ case MOD_UNLOAD:
+ error = 0;
+ LibAliasDetachHandlers(handlers);
+ break;
+ default:
+ error = EINVAL;
+ }
+ return (error);
+}
+
+#ifdef _KERNEL
+static
+#endif
+moduledata_t alias_mod = {
+ "alias_skinny", mod_handler, NULL
+};
+
+#ifdef _KERNEL
+DECLARE_MODULE(alias_skinny, alias_mod, SI_SUB_DRIVERS, SI_ORDER_SECOND);
+MODULE_VERSION(alias_skinny, 1);
+MODULE_DEPEND(alias_skinny, libalias, 1, 1, 1);
+#endif
+
+/*
+ * alias_skinny.c handles the translation for the Cisco Skinny Station
+ * protocol. Skinny typically uses TCP port 2000 to set up calls between
+ * a Cisco Call Manager and a Cisco IP phone. When a phone comes on line,
+ * it first needs to register with the Call Manager. To do this it sends
+ * a registration message. This message contains the IP address of the
+ * IP phone. This message must then be translated to reflect our global
+ * IP address. Along with the registration message (and usually in the
+ * same packet), the phone sends an IP port message. This message indicates
+ * the TCP port over which it will communicate.
+ *
+ * When a call is placed from the phone, the Call Manager will send an
+ * Open Receive Channel message to the phone to let the caller know someone
+ * has answered. The phone then sends back an Open Receive Channel
+ * Acknowledgement. In this packet, the phone sends its IP address again,
+ * and the UDP port over which the voice traffic should flow. These values
+ * need translation. Right after the Open Receive Channel Acknowledgement,
+ * the Call Manager sends a Start Media Transmission message indicating the
+ * call is connected. This message contains the IP address and UDP port
+ * number of the remote (called) party. Once this message is translated, the
+ * call can commence. The called part sends the first UDP packet to the
+ * calling phone at the pre-arranged UDP port in the Open Receive Channel
+ * Acknowledgement.
+ *
+ * Skinny is a Cisco-proprietary protocol and is a trademark of Cisco Systems,
+ * Inc. All rights reserved.
+*/
+
+/* #define LIBALIAS_DEBUG 1 */
+
+/* Message types that need translating */
+#define REG_MSG 0x00000001
+#define IP_PORT_MSG 0x00000002
+#define OPNRCVCH_ACK 0x00000022
+#define START_MEDIATX 0x0000008a
+
+struct skinny_header {
+ u_int32_t len;
+ u_int32_t reserved;
+ u_int32_t msgId;
+};
+
+struct RegisterMessage {
+ u_int32_t msgId;
+ char devName [16];
+ u_int32_t uid;
+ u_int32_t instance;
+ u_int32_t ipAddr;
+ u_char devType;
+ u_int32_t maxStreams;
+};
+
+struct IpPortMessage {
+ u_int32_t msgId;
+ u_int32_t stationIpPort; /* Note: Skinny uses 32-bit port
+ * numbers */
+};
+
+struct OpenReceiveChannelAck {
+ u_int32_t msgId;
+ u_int32_t status;
+ u_int32_t ipAddr;
+ u_int32_t port;
+ u_int32_t passThruPartyID;
+};
+
+struct StartMediaTransmission {
+ u_int32_t msgId;
+ u_int32_t conferenceID;
+ u_int32_t passThruPartyID;
+ u_int32_t remoteIpAddr;
+ u_int32_t remotePort;
+ u_int32_t MSPacket;
+ u_int32_t payloadCap;
+ u_int32_t precedence;
+ u_int32_t silenceSuppression;
+ u_short maxFramesPerPacket;
+ u_int32_t G723BitRate;
+};
+
+typedef enum {
+ ClientToServer = 0,
+ ServerToClient = 1
+} ConvDirection;
+
+
+static int
+alias_skinny_reg_msg(struct RegisterMessage *reg_msg, struct ip *pip,
+ struct tcphdr *tc, struct alias_link *lnk,
+ ConvDirection direction)
+{
+ (void)direction;
+
+ reg_msg->ipAddr = (u_int32_t) GetAliasAddress(lnk).s_addr;
+
+ tc->th_sum = 0;
+#ifdef _KERNEL
+ tc->th_x2 = 1;
+#else
+ tc->th_sum = TcpChecksum(pip);
+#endif
+
+ return (0);
+}
+
+static int
+alias_skinny_startmedia(struct StartMediaTransmission *start_media,
+ struct ip *pip, struct tcphdr *tc,
+ struct alias_link *lnk, u_int32_t localIpAddr,
+ ConvDirection direction)
+{
+ struct in_addr dst, src;
+
+ (void)pip;
+ (void)tc;
+ (void)lnk;
+ (void)direction;
+
+ dst.s_addr = start_media->remoteIpAddr;
+ src.s_addr = localIpAddr;
+
+ /*
+ * XXX I should probably handle in bound global translations as
+ * well.
+ */
+
+ return (0);
+}
+
+static int
+alias_skinny_port_msg(struct IpPortMessage *port_msg, struct ip *pip,
+ struct tcphdr *tc, struct alias_link *lnk,
+ ConvDirection direction)
+{
+ (void)direction;
+
+ port_msg->stationIpPort = (u_int32_t) ntohs(GetAliasPort(lnk));
+
+ tc->th_sum = 0;
+#ifdef _KERNEL
+ tc->th_x2 = 1;
+#else
+ tc->th_sum = TcpChecksum(pip);
+#endif
+ return (0);
+}
+
+static int
+alias_skinny_opnrcvch_ack(struct libalias *la, struct OpenReceiveChannelAck *opnrcvch_ack,
+ struct ip *pip, struct tcphdr *tc,
+ struct alias_link *lnk, u_int32_t * localIpAddr,
+ ConvDirection direction)
+{
+ struct in_addr null_addr;
+ struct alias_link *opnrcv_lnk;
+ u_int32_t localPort;
+
+ (void)lnk;
+ (void)direction;
+
+ *localIpAddr = (u_int32_t) opnrcvch_ack->ipAddr;
+ localPort = opnrcvch_ack->port;
+
+ null_addr.s_addr = INADDR_ANY;
+ opnrcv_lnk = FindUdpTcpOut(la, pip->ip_src, null_addr,
+ htons((u_short) opnrcvch_ack->port), 0,
+ IPPROTO_UDP, 1);
+ opnrcvch_ack->ipAddr = (u_int32_t) GetAliasAddress(opnrcv_lnk).s_addr;
+ opnrcvch_ack->port = (u_int32_t) ntohs(GetAliasPort(opnrcv_lnk));
+
+ tc->th_sum = 0;
+#ifdef _KERNEL
+ tc->th_x2 = 1;
+#else
+ tc->th_sum = TcpChecksum(pip);
+#endif
+ return (0);
+}
+
+static void
+AliasHandleSkinny(struct libalias *la, struct ip *pip, struct alias_link *lnk)
+{
+ size_t hlen, tlen, dlen;
+ struct tcphdr *tc;
+ u_int32_t msgId, t, len, lip;
+ struct skinny_header *sd;
+ size_t orig_len, skinny_hdr_len = sizeof(struct skinny_header);
+ ConvDirection direction;
+
+ lip = -1;
+ tc = (struct tcphdr *)ip_next(pip);
+ hlen = (pip->ip_hl + tc->th_off) << 2;
+ tlen = ntohs(pip->ip_len);
+ dlen = tlen - hlen;
+
+ sd = (struct skinny_header *)tcp_next(tc);
+
+ /*
+ * XXX This direction is reserved for future use. I still need to
+ * handle the scenario where the call manager is on the inside, and
+ * the calling phone is on the global outside.
+ */
+ if (ntohs(tc->th_dport) == la->skinnyPort) {
+ direction = ClientToServer;
+ } else if (ntohs(tc->th_sport) == la->skinnyPort) {
+ direction = ServerToClient;
+ } else {
+#ifdef LIBALIAS_DEBUG
+ fprintf(stderr,
+ "PacketAlias/Skinny: Invalid port number, not a Skinny packet\n");
+#endif
+ return;
+ }
+
+ orig_len = dlen;
+ /*
+ * Skinny packets can contain many messages. We need to loop
+ * through the packet using len to determine message boundaries.
+ * This comes into play big time with port messages being in the
+ * same packet as register messages. Also, open receive channel
+ * acks are usually buried in a pakcet some 400 bytes long.
+ */
+ while (dlen >= skinny_hdr_len) {
+ len = (sd->len);
+ msgId = (sd->msgId);
+ t = len;
+
+ if (t > orig_len || t > dlen) {
+#ifdef LIBALIAS_DEBUG
+ fprintf(stderr,
+ "PacketAlias/Skinny: Not a skinny packet, invalid length \n");
+#endif
+ return;
+ }
+ switch (msgId) {
+ case REG_MSG: {
+ struct RegisterMessage *reg_mesg;
+
+ if (len < (int)sizeof(struct RegisterMessage)) {
+#ifdef LIBALIAS_DEBUG
+ fprintf(stderr,
+ "PacketAlias/Skinny: Not a skinny packet, bad registration message\n");
+#endif
+ return;
+ }
+ reg_mesg = (struct RegisterMessage *)&sd->msgId;
+#ifdef LIBALIAS_DEBUG
+ fprintf(stderr,
+ "PacketAlias/Skinny: Received a register message");
+#endif
+ alias_skinny_reg_msg(reg_mesg, pip, tc, lnk, direction);
+ break;
+ }
+ case IP_PORT_MSG: {
+ struct IpPortMessage *port_mesg;
+
+ if (len < (int)sizeof(struct IpPortMessage)) {
+#ifdef LIBALIAS_DEBUG
+ fprintf(stderr,
+ "PacketAlias/Skinny: Not a skinny packet, port message\n");
+#endif
+ return;
+ }
+#ifdef LIBALIAS_DEBUG
+ fprintf(stderr,
+ "PacketAlias/Skinny: Received ipport message\n");
+#endif
+ port_mesg = (struct IpPortMessage *)&sd->msgId;
+ alias_skinny_port_msg(port_mesg, pip, tc, lnk, direction);
+ break;
+ }
+ case OPNRCVCH_ACK: {
+ struct OpenReceiveChannelAck *opnrcvchn_ack;
+
+ if (len < (int)sizeof(struct OpenReceiveChannelAck)) {
+#ifdef LIBALIAS_DEBUG
+ fprintf(stderr,
+ "PacketAlias/Skinny: Not a skinny packet, packet,OpnRcvChnAckMsg\n");
+#endif
+ return;
+ }
+#ifdef LIBALIAS_DEBUG
+ fprintf(stderr,
+ "PacketAlias/Skinny: Received open rcv channel msg\n");
+#endif
+ opnrcvchn_ack = (struct OpenReceiveChannelAck *)&sd->msgId;
+ alias_skinny_opnrcvch_ack(la, opnrcvchn_ack, pip, tc, lnk, &lip, direction);
+ break;
+ }
+ case START_MEDIATX: {
+ struct StartMediaTransmission *startmedia_tx;
+
+ if (len < (int)sizeof(struct StartMediaTransmission)) {
+#ifdef LIBALIAS_DEBUG
+ fprintf(stderr,
+ "PacketAlias/Skinny: Not a skinny packet,StartMediaTx Message\n");
+#endif
+ return;
+ }
+ if (lip == -1) {
+#ifdef LIBALIAS_DEBUG
+ fprintf(stderr,
+ "PacketAlias/Skinny: received a"
+ " packet,StartMediaTx Message before"
+ " packet,OpnRcvChnAckMsg\n"
+#endif
+ return;
+ }
+
+#ifdef LIBALIAS_DEBUG
+ fprintf(stderr,
+ "PacketAlias/Skinny: Received start media trans msg\n");
+#endif
+ startmedia_tx = (struct StartMediaTransmission *)&sd->msgId;
+ alias_skinny_startmedia(startmedia_tx, pip, tc, lnk, lip, direction);
+ break;
+ }
+ default:
+ break;
+ }
+ /* Place the pointer at the next message in the packet. */
+ dlen -= len + (skinny_hdr_len - sizeof(msgId));
+ sd = (struct skinny_header *)(((char *)&sd->msgId) + len);
+ }
+}
diff --git a/rtems/freebsd/netinet/libalias/alias_smedia.c b/rtems/freebsd/netinet/libalias/alias_smedia.c
new file mode 100644
index 00000000..91ac394f
--- /dev/null
+++ b/rtems/freebsd/netinet/libalias/alias_smedia.c
@@ -0,0 +1,551 @@
+#include <rtems/freebsd/machine/rtems-bsd-config.h>
+
+/*
+ * alias_smedia.c
+ *
+ * Copyright (c) 2000 Whistle Communications, Inc.
+ * All rights reserved.
+ *
+ * Subject to the following obligations and disclaimer of warranty, use and
+ * redistribution of this software, in source or object code forms, with or
+ * without modifications are expressly permitted by Whistle Communications;
+ * provided, however, that:
+ * 1. Any and all reproductions of the source or object code must include the
+ * copyright notice above and the following disclaimer of warranties; and
+ * 2. No rights are granted, in any manner or form, to use Whistle
+ * Communications, Inc. trademarks, including the mark "WHISTLE
+ * COMMUNICATIONS" on advertising, endorsements, or otherwise except as
+ * such appears in the above copyright notice or in the software.
+ *
+ * THIS SOFTWARE IS BEING PROVIDED BY WHISTLE COMMUNICATIONS "AS IS", AND
+ * TO THE MAXIMUM EXTENT PERMITTED BY LAW, WHISTLE COMMUNICATIONS MAKES NO
+ * REPRESENTATIONS OR WARRANTIES, EXPRESS OR IMPLIED, REGARDING THIS SOFTWARE,
+ * INCLUDING WITHOUT LIMITATION, ANY AND ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT.
+ * WHISTLE COMMUNICATIONS DOES NOT WARRANT, GUARANTEE, OR MAKE ANY
+ * REPRESENTATIONS REGARDING THE USE OF, OR THE RESULTS OF THE USE OF THIS
+ * SOFTWARE IN TERMS OF ITS CORRECTNESS, ACCURACY, RELIABILITY OR OTHERWISE.
+ * IN NO EVENT SHALL WHISTLE COMMUNICATIONS BE LIABLE FOR ANY DAMAGES
+ * RESULTING FROM OR ARISING OUT OF ANY USE OF THIS SOFTWARE, INCLUDING
+ * WITHOUT LIMITATION, ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
+ * PUNITIVE, OR CONSEQUENTIAL DAMAGES, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES, LOSS OF USE, DATA OR PROFITS, HOWEVER CAUSED AND UNDER ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF WHISTLE COMMUNICATIONS IS ADVISED OF THE POSSIBILITY
+ * OF SUCH DAMAGE.
+ *
+ * Copyright (c) 2000 Junichi SATOH <junichi@astec.co.jp>
+ * <junichi@junichi.org>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * Authors: Erik Salander <erik@whistle.com>
+ * Junichi SATOH <junichi@astec.co.jp>
+ * <junichi@junichi.org>
+ */
+
+#include <rtems/freebsd/sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+/*
+ Alias_smedia.c is meant to contain the aliasing code for streaming media
+ protocols. It performs special processing for RSTP sessions under TCP.
+ Specifically, when a SETUP request is sent by a client, or a 200 reply
+ is sent by a server, it is intercepted and modified. The address is
+ changed to the gateway machine and an aliasing port is used.
+
+ More specifically, the "client_port" configuration parameter is
+ parsed for SETUP requests. The "server_port" configuration parameter is
+ parsed for 200 replies eminating from a server. This is intended to handle
+ the unicast case.
+
+ RTSP also allows a redirection of a stream to another client by using the
+ "destination" configuration parameter. The destination config parm would
+ indicate a different IP address. This function is NOT supported by the
+ RTSP translation code below.
+
+ The RTSP multicast functions without any address translation intervention.
+
+ For this routine to work, the SETUP/200 must fit entirely
+ into a single TCP packet. This is typically the case, but exceptions
+ can easily be envisioned under the actual specifications.
+
+ Probably the most troubling aspect of the approach taken here is
+ that the new SETUP/200 will typically be a different length, and
+ this causes a certain amount of bookkeeping to keep track of the
+ changes of sequence and acknowledgment numbers, since the client
+ machine is totally unaware of the modification to the TCP stream.
+
+ Initial version: May, 2000 (eds)
+*/
+
+#ifdef _KERNEL
+#include <rtems/freebsd/sys/param.h>
+#include <rtems/freebsd/sys/systm.h>
+#include <rtems/freebsd/sys/kernel.h>
+#include <rtems/freebsd/sys/module.h>
+#else
+#include <rtems/freebsd/errno.h>
+#include <rtems/freebsd/sys/types.h>
+#include <rtems/freebsd/stdio.h>
+#include <rtems/freebsd/string.h>
+#endif
+
+#include <rtems/freebsd/netinet/in_systm.h>
+#include <rtems/freebsd/netinet/in.h>
+#include <rtems/freebsd/netinet/ip.h>
+#include <rtems/freebsd/netinet/tcp.h>
+
+#ifdef _KERNEL
+#include <rtems/freebsd/netinet/libalias/alias.h>
+#include <rtems/freebsd/netinet/libalias/alias_local.h>
+#include <rtems/freebsd/netinet/libalias/alias_mod.h>
+#else
+#include <rtems/freebsd/local/alias_local.h>
+#include <rtems/freebsd/local/alias_mod.h>
+#endif
+
+#define RTSP_CONTROL_PORT_NUMBER_1 554
+#define RTSP_CONTROL_PORT_NUMBER_2 7070
+#define TFTP_PORT_NUMBER 69
+
+static void
+AliasHandleRtspOut(struct libalias *, struct ip *, struct alias_link *,
+ int maxpacketsize);
+static int
+fingerprint(struct libalias *la, struct alias_data *ah)
+{
+
+ if (ah->dport != NULL && ah->aport != NULL && ah->sport != NULL &&
+ ntohs(*ah->dport) == TFTP_PORT_NUMBER)
+ return (0);
+ if (ah->dport == NULL || ah->sport == NULL || ah->lnk == NULL ||
+ ah->maxpktsize == 0)
+ return (-1);
+ if (ntohs(*ah->dport) == RTSP_CONTROL_PORT_NUMBER_1
+ || ntohs(*ah->sport) == RTSP_CONTROL_PORT_NUMBER_1
+ || ntohs(*ah->dport) == RTSP_CONTROL_PORT_NUMBER_2
+ || ntohs(*ah->sport) == RTSP_CONTROL_PORT_NUMBER_2)
+ return (0);
+ return (-1);
+}
+
+static int
+protohandler(struct libalias *la, struct ip *pip, struct alias_data *ah)
+{
+
+ if (ntohs(*ah->dport) == TFTP_PORT_NUMBER)
+ FindRtspOut(la, pip->ip_src, pip->ip_dst,
+ *ah->sport, *ah->aport, IPPROTO_UDP);
+ else AliasHandleRtspOut(la, pip, ah->lnk, ah->maxpktsize);
+ return (0);
+}
+
+struct proto_handler handlers[] = {
+ {
+ .pri = 100,
+ .dir = OUT,
+ .proto = TCP|UDP,
+ .fingerprint = &fingerprint,
+ .protohandler = &protohandler
+ },
+ { EOH }
+};
+
+static int
+mod_handler(module_t mod, int type, void *data)
+{
+ int error;
+
+ switch (type) {
+ case MOD_LOAD:
+ error = 0;
+ LibAliasAttachHandlers(handlers);
+ break;
+ case MOD_UNLOAD:
+ error = 0;
+ LibAliasDetachHandlers(handlers);
+ break;
+ default:
+ error = EINVAL;
+ }
+ return (error);
+}
+
+#ifdef _KERNEL
+static
+#endif
+moduledata_t alias_mod = {
+ "alias_smedia", mod_handler, NULL
+};
+
+#ifdef _KERNEL
+DECLARE_MODULE(alias_smedia, alias_mod, SI_SUB_DRIVERS, SI_ORDER_SECOND);
+MODULE_VERSION(alias_smedia, 1);
+MODULE_DEPEND(alias_smedia, libalias, 1, 1, 1);
+#endif
+
+#define RTSP_CONTROL_PORT_NUMBER_1 554
+#define RTSP_CONTROL_PORT_NUMBER_2 7070
+#define RTSP_PORT_GROUP 2
+
+#define ISDIGIT(a) (((a) >= '0') && ((a) <= '9'))
+
+static int
+search_string(char *data, int dlen, const char *search_str)
+{
+ int i, j, k;
+ int search_str_len;
+
+ search_str_len = strlen(search_str);
+ for (i = 0; i < dlen - search_str_len; i++) {
+ for (j = i, k = 0; j < dlen - search_str_len; j++, k++) {
+ if (data[j] != search_str[k] &&
+ data[j] != search_str[k] - ('a' - 'A')) {
+ break;
+ }
+ if (k == search_str_len - 1) {
+ return (j + 1);
+ }
+ }
+ }
+ return (-1);
+}
+
+static int
+alias_rtsp_out(struct libalias *la, struct ip *pip,
+ struct alias_link *lnk,
+ char *data,
+ const char *port_str)
+{
+ int hlen, tlen, dlen;
+ struct tcphdr *tc;
+ int i, j, pos, state, port_dlen, new_dlen, delta;
+ u_short p[2], new_len;
+ u_short sport, eport, base_port;
+ u_short salias = 0, ealias = 0, base_alias = 0;
+ const char *transport_str = "transport:";
+ char newdata[2048], *port_data, *port_newdata, stemp[80];
+ int links_created = 0, pkt_updated = 0;
+ struct alias_link *rtsp_lnk = NULL;
+ struct in_addr null_addr;
+
+ /* Calculate data length of TCP packet */
+ tc = (struct tcphdr *)ip_next(pip);
+ hlen = (pip->ip_hl + tc->th_off) << 2;
+ tlen = ntohs(pip->ip_len);
+ dlen = tlen - hlen;
+
+ /* Find keyword, "Transport: " */
+ pos = search_string(data, dlen, transport_str);
+ if (pos < 0) {
+ return (-1);
+ }
+ port_data = data + pos;
+ port_dlen = dlen - pos;
+
+ memcpy(newdata, data, pos);
+ port_newdata = newdata + pos;
+
+ while (port_dlen > (int)strlen(port_str)) {
+ /* Find keyword, appropriate port string */
+ pos = search_string(port_data, port_dlen, port_str);
+ if (pos < 0) {
+ break;
+ }
+ memcpy(port_newdata, port_data, pos + 1);
+ port_newdata += (pos + 1);
+
+ p[0] = p[1] = 0;
+ sport = eport = 0;
+ state = 0;
+ for (i = pos; i < port_dlen; i++) {
+ switch (state) {
+ case 0:
+ if (port_data[i] == '=') {
+ state++;
+ }
+ break;
+ case 1:
+ if (ISDIGIT(port_data[i])) {
+ p[0] = p[0] * 10 + port_data[i] - '0';
+ } else {
+ if (port_data[i] == ';') {
+ state = 3;
+ }
+ if (port_data[i] == '-') {
+ state++;
+ }
+ }
+ break;
+ case 2:
+ if (ISDIGIT(port_data[i])) {
+ p[1] = p[1] * 10 + port_data[i] - '0';
+ } else {
+ state++;
+ }
+ break;
+ case 3:
+ base_port = p[0];
+ sport = htons(p[0]);
+ eport = htons(p[1]);
+
+ if (!links_created) {
+
+ links_created = 1;
+ /*
+ * Find an even numbered port
+ * number base that satisfies the
+ * contiguous number of ports we
+ * need
+ */
+ null_addr.s_addr = 0;
+ if (0 == (salias = FindNewPortGroup(la, null_addr,
+ FindAliasAddress(la, pip->ip_src),
+ sport, 0,
+ RTSP_PORT_GROUP,
+ IPPROTO_UDP, 1))) {
+#ifdef LIBALIAS_DEBUG
+ fprintf(stderr,
+ "PacketAlias/RTSP: Cannot find contiguous RTSP data ports\n");
+#endif
+ } else {
+
+ base_alias = ntohs(salias);
+ for (j = 0; j < RTSP_PORT_GROUP; j++) {
+ /*
+ * Establish link
+ * to port found in
+ * RTSP packet
+ */
+ rtsp_lnk = FindRtspOut(la, GetOriginalAddress(lnk), null_addr,
+ htons(base_port + j), htons(base_alias + j),
+ IPPROTO_UDP);
+ if (rtsp_lnk != NULL) {
+#ifndef NO_FW_PUNCH
+ /*
+ * Punch
+ * hole in
+ * firewall
+ */
+ PunchFWHole(rtsp_lnk);
+#endif
+ } else {
+#ifdef LIBALIAS_DEBUG
+ fprintf(stderr,
+ "PacketAlias/RTSP: Cannot allocate RTSP data ports\n");
+#endif
+ break;
+ }
+ }
+ }
+ ealias = htons(base_alias + (RTSP_PORT_GROUP - 1));
+ }
+ if (salias && rtsp_lnk) {
+
+ pkt_updated = 1;
+
+ /* Copy into IP packet */
+ sprintf(stemp, "%d", ntohs(salias));
+ memcpy(port_newdata, stemp, strlen(stemp));
+ port_newdata += strlen(stemp);
+
+ if (eport != 0) {
+ *port_newdata = '-';
+ port_newdata++;
+
+ /* Copy into IP packet */
+ sprintf(stemp, "%d", ntohs(ealias));
+ memcpy(port_newdata, stemp, strlen(stemp));
+ port_newdata += strlen(stemp);
+ }
+ *port_newdata = ';';
+ port_newdata++;
+ }
+ state++;
+ break;
+ }
+ if (state > 3) {
+ break;
+ }
+ }
+ port_data += i;
+ port_dlen -= i;
+ }
+
+ if (!pkt_updated)
+ return (-1);
+
+ memcpy(port_newdata, port_data, port_dlen);
+ port_newdata += port_dlen;
+ *port_newdata = '\0';
+
+ /* Create new packet */
+ new_dlen = port_newdata - newdata;
+ memcpy(data, newdata, new_dlen);
+
+ SetAckModified(lnk);
+ tc = (struct tcphdr *)ip_next(pip);
+ delta = GetDeltaSeqOut(tc->th_seq, lnk);
+ AddSeq(lnk, delta + new_dlen - dlen, pip->ip_hl, pip->ip_len,
+ tc->th_seq, tc->th_off);
+
+ new_len = htons(hlen + new_dlen);
+ DifferentialChecksum(&pip->ip_sum,
+ &new_len,
+ &pip->ip_len,
+ 1);
+ pip->ip_len = new_len;
+
+ tc->th_sum = 0;
+#ifdef _KERNEL
+ tc->th_x2 = 1;
+#else
+ tc->th_sum = TcpChecksum(pip);
+#endif
+ return (0);
+}
+
+/* Support the protocol used by early versions of RealPlayer */
+
+static int
+alias_pna_out(struct libalias *la, struct ip *pip,
+ struct alias_link *lnk,
+ char *data,
+ int dlen)
+{
+ struct alias_link *pna_links;
+ u_short msg_id, msg_len;
+ char *work;
+ u_short alias_port, port;
+ struct tcphdr *tc;
+
+ work = data;
+ work += 5;
+ while (work + 4 < data + dlen) {
+ memcpy(&msg_id, work, 2);
+ work += 2;
+ memcpy(&msg_len, work, 2);
+ work += 2;
+ if (ntohs(msg_id) == 0) {
+ /* end of options */
+ return (0);
+ }
+ if ((ntohs(msg_id) == 1) || (ntohs(msg_id) == 7)) {
+ memcpy(&port, work, 2);
+ pna_links = FindUdpTcpOut(la, pip->ip_src, GetDestAddress(lnk),
+ port, 0, IPPROTO_UDP, 1);
+ if (pna_links != NULL) {
+#ifndef NO_FW_PUNCH
+ /* Punch hole in firewall */
+ PunchFWHole(pna_links);
+#endif
+ tc = (struct tcphdr *)ip_next(pip);
+ alias_port = GetAliasPort(pna_links);
+ memcpy(work, &alias_port, 2);
+
+ /* Compute TCP checksum for revised packet */
+ tc->th_sum = 0;
+#ifdef _KERNEL
+ tc->th_x2 = 1;
+#else
+ tc->th_sum = TcpChecksum(pip);
+#endif
+ }
+ }
+ work += ntohs(msg_len);
+ }
+
+ return (0);
+}
+
+static void
+AliasHandleRtspOut(struct libalias *la, struct ip *pip, struct alias_link *lnk, int maxpacketsize)
+{
+ int hlen, tlen, dlen;
+ struct tcphdr *tc;
+ char *data;
+ const char *setup = "SETUP", *pna = "PNA", *str200 = "200";
+ const char *okstr = "OK", *client_port_str = "client_port";
+ const char *server_port_str = "server_port";
+ int i, parseOk;
+
+ (void)maxpacketsize;
+
+ tc = (struct tcphdr *)ip_next(pip);
+ hlen = (pip->ip_hl + tc->th_off) << 2;
+ tlen = ntohs(pip->ip_len);
+ dlen = tlen - hlen;
+
+ data = (char *)pip;
+ data += hlen;
+
+ /* When aliasing a client, check for the SETUP request */
+ if ((ntohs(tc->th_dport) == RTSP_CONTROL_PORT_NUMBER_1) ||
+ (ntohs(tc->th_dport) == RTSP_CONTROL_PORT_NUMBER_2)) {
+
+ if (dlen >= (int)strlen(setup)) {
+ if (memcmp(data, setup, strlen(setup)) == 0) {
+ alias_rtsp_out(la, pip, lnk, data, client_port_str);
+ return;
+ }
+ }
+ if (dlen >= (int)strlen(pna)) {
+ if (memcmp(data, pna, strlen(pna)) == 0) {
+ alias_pna_out(la, pip, lnk, data, dlen);
+ }
+ }
+ } else {
+
+ /*
+ * When aliasing a server, check for the 200 reply
+ * Accomodate varying number of blanks between 200 & OK
+ */
+
+ if (dlen >= (int)strlen(str200)) {
+
+ for (parseOk = 0, i = 0;
+ i <= dlen - (int)strlen(str200);
+ i++) {
+ if (memcmp(&data[i], str200, strlen(str200)) == 0) {
+ parseOk = 1;
+ break;
+ }
+ }
+ if (parseOk) {
+
+ i += strlen(str200); /* skip string found */
+ while (data[i] == ' ') /* skip blank(s) */
+ i++;
+
+ if ((dlen - i) >= (int)strlen(okstr)) {
+
+ if (memcmp(&data[i], okstr, strlen(okstr)) == 0)
+ alias_rtsp_out(la, pip, lnk, data, server_port_str);
+
+ }
+ }
+ }
+ }
+}
diff --git a/rtems/freebsd/netinet/libalias/alias_util.c b/rtems/freebsd/netinet/libalias/alias_util.c
new file mode 100644
index 00000000..8915e65d
--- /dev/null
+++ b/rtems/freebsd/netinet/libalias/alias_util.c
@@ -0,0 +1,178 @@
+#include <rtems/freebsd/machine/rtems-bsd-config.h>
+
+/*-
+ * Copyright (c) 2001 Charles Mott <cm@linktel.net>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <rtems/freebsd/sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+
+/*
+ Alias_util.c contains general utilities used by other functions
+ in the packet aliasing module. At the moment, there are functions
+ for computing IP header and TCP packet checksums.
+
+ The checksum routines are based upon example code in a Unix networking
+ text written by Stevens (sorry, I can't remember the title -- but
+ at least this is a good author).
+
+ Initial Version: August, 1996 (cjm)
+
+ Version 1.7: January 9, 1997
+ Added differential checksum update function.
+*/
+
+#ifdef _KERNEL
+#include <rtems/freebsd/sys/param.h>
+#include <rtems/freebsd/sys/proc.h>
+#else
+#include <rtems/freebsd/sys/types.h>
+#include <rtems/freebsd/stdio.h>
+#endif
+
+#include <rtems/freebsd/netinet/in_systm.h>
+#include <rtems/freebsd/netinet/in.h>
+#include <rtems/freebsd/netinet/ip.h>
+#include <rtems/freebsd/netinet/tcp.h>
+
+#ifdef _KERNEL
+#include <rtems/freebsd/netinet/libalias/alias.h>
+#include <rtems/freebsd/netinet/libalias/alias_local.h>
+#else
+#include <rtems/freebsd/local/alias.h>
+#include <rtems/freebsd/local/alias_local.h>
+#endif
+
+/*
+ * Note: the checksum routines assume that the actual checksum word has
+ * been zeroed out. If the checksum word is filled with the proper value,
+ * then these routines will give a result of zero (useful for testing
+ * purposes);
+ */
+u_short
+LibAliasInternetChecksum(struct libalias *la __unused, u_short * ptr,
+ int nbytes)
+{
+ int sum, oddbyte;
+
+ LIBALIAS_LOCK(la);
+ sum = 0;
+ while (nbytes > 1) {
+ sum += *ptr++;
+ nbytes -= 2;
+ }
+ if (nbytes == 1) {
+ oddbyte = 0;
+ ((u_char *) & oddbyte)[0] = *(u_char *) ptr;
+ ((u_char *) & oddbyte)[1] = 0;
+ sum += oddbyte;
+ }
+ sum = (sum >> 16) + (sum & 0xffff);
+ sum += (sum >> 16);
+ LIBALIAS_UNLOCK(la);
+ return (~sum);
+}
+
+#ifndef _KERNEL
+u_short
+IpChecksum(struct ip *pip)
+{
+ return (LibAliasInternetChecksum(NULL, (u_short *) pip,
+ (pip->ip_hl << 2)));
+
+}
+
+u_short
+TcpChecksum(struct ip *pip)
+{
+ u_short *ptr;
+ struct tcphdr *tc;
+ int nhdr, ntcp, nbytes;
+ int sum, oddbyte;
+
+ nhdr = pip->ip_hl << 2;
+ ntcp = ntohs(pip->ip_len) - nhdr;
+
+ tc = (struct tcphdr *)ip_next(pip);
+ ptr = (u_short *) tc;
+
+/* Add up TCP header and data */
+ nbytes = ntcp;
+ sum = 0;
+ while (nbytes > 1) {
+ sum += *ptr++;
+ nbytes -= 2;
+ }
+ if (nbytes == 1) {
+ oddbyte = 0;
+ ((u_char *) & oddbyte)[0] = *(u_char *) ptr;
+ ((u_char *) & oddbyte)[1] = 0;
+ sum += oddbyte;
+ }
+/* "Pseudo-header" data */
+ ptr = (void *)&pip->ip_dst;
+ sum += *ptr++;
+ sum += *ptr;
+ ptr = (void *)&pip->ip_src;
+ sum += *ptr++;
+ sum += *ptr;
+ sum += htons((u_short) ntcp);
+ sum += htons((u_short) pip->ip_p);
+
+/* Roll over carry bits */
+ sum = (sum >> 16) + (sum & 0xffff);
+ sum += (sum >> 16);
+
+/* Return checksum */
+ return ((u_short) ~ sum);
+}
+#endif /* not _KERNEL */
+
+void
+DifferentialChecksum(u_short * cksum, void *newp, void *oldp, int n)
+{
+ int i;
+ int accumulate;
+ u_short *new = newp;
+ u_short *old = oldp;
+
+ accumulate = *cksum;
+ for (i = 0; i < n; i++) {
+ accumulate -= *new++;
+ accumulate += *old++;
+ }
+
+ if (accumulate < 0) {
+ accumulate = -accumulate;
+ accumulate = (accumulate >> 16) + (accumulate & 0xffff);
+ accumulate += accumulate >> 16;
+ *cksum = (u_short) ~ accumulate;
+ } else {
+ accumulate = (accumulate >> 16) + (accumulate & 0xffff);
+ accumulate += accumulate >> 16;
+ *cksum = (u_short) accumulate;
+ }
+}
diff --git a/rtems/freebsd/netinet/pim.h b/rtems/freebsd/netinet/pim.h
new file mode 100644
index 00000000..50118074
--- /dev/null
+++ b/rtems/freebsd/netinet/pim.h
@@ -0,0 +1,119 @@
+/*-
+ * Copyright (c) 1996-2000
+ * University of Southern California/Information Sciences Institute.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the project nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _NETINET_PIM_HH_
+#define _NETINET_PIM_HH_
+
+/*
+ * Protocol Independent Multicast (PIM) definitions.
+ * RFC 2362, June 1998.
+ *
+ * Written by Ahmed Helmy, USC/SGI, July 1996.
+ * Modified by George Edmond Eddy (Rusty), ISI, February 1998.
+ * Modified by Pavlin Radoslavov, USC/ISI, May 1998, October 2000.
+ */
+
+#include <rtems/freebsd/sys/types.h>
+
+#ifndef _PIM_VT
+#ifndef BYTE_ORDER
+# error BYTE_ORDER is not defined!
+#endif
+#if (BYTE_ORDER != BIG_ENDIAN) && (BYTE_ORDER != LITTLE_ENDIAN)
+# error BYTE_ORDER must be defined to either BIG_ENDIAN or LITTLE_ENDIAN
+#endif
+#endif /* ! _PIM_VT */
+
+/*
+ * PIM packet header
+ */
+struct pim {
+#ifdef _PIM_VT
+ uint8_t pim_vt; /* PIM version and message type */
+#else /* ! _PIM_VT */
+#if BYTE_ORDER == BIG_ENDIAN
+ u_int pim_vers:4, /* PIM protocol version */
+ pim_type:4; /* PIM message type */
+#endif
+#if BYTE_ORDER == LITTLE_ENDIAN
+ u_int pim_type:4, /* PIM message type */
+ pim_vers:4; /* PIM protocol version */
+#endif
+#endif /* ! _PIM_VT */
+ uint8_t pim_reserved; /* Reserved */
+ uint16_t pim_cksum; /* IP-style checksum */
+};
+/* KAME-related name backward compatibility */
+#define pim_ver pim_vers
+#define pim_rsv pim_reserved
+
+#ifdef _PIM_VT
+#define PIM_MAKE_VT(v, t) (0xff & (((v) << 4) | (0x0f & (t))))
+#define PIM_VT_V(x) (((x) >> 4) & 0x0f)
+#define PIM_VT_T(x) ((x) & 0x0f)
+#endif /* _PIM_VT */
+
+#define PIM_VERSION 2
+#define PIM_MINLEN 8 /* PIM message min. length */
+#define PIM_REG_MINLEN (PIM_MINLEN+20) /* PIM Register hdr + inner IPv4 hdr */
+#define PIM6_REG_MINLEN (PIM_MINLEN+40) /* PIM Register hdr + inner IPv6 hdr */
+
+/*
+ * PIM message types
+ */
+#define PIM_HELLO 0x0 /* PIM-SM and PIM-DM */
+#define PIM_REGISTER 0x1 /* PIM-SM only */
+#define PIM_REGISTER_STOP 0x2 /* PIM-SM only */
+#define PIM_JOIN_PRUNE 0x3 /* PIM-SM and PIM-DM */
+#define PIM_BOOTSTRAP 0x4 /* PIM-SM only */
+#define PIM_ASSERT 0x5 /* PIM-SM and PIM-DM */
+#define PIM_GRAFT 0x6 /* PIM-DM only */
+#define PIM_GRAFT_ACK 0x7 /* PIM-DM only */
+#define PIM_CAND_RP_ADV 0x8 /* PIM-SM only */
+#define PIM_ALL_DF_ELECTION 0xa /* Bidir-PIM-SM only */
+
+/*
+ * PIM-Register message flags
+ */
+#define PIM_BORDER_REGISTER 0x80000000U /* The Border bit (host-order) */
+#define PIM_NULL_REGISTER 0x40000000U /* The Null-Register bit (host-order)*/
+
+/*
+ * All-PIM-Routers IPv4 and IPv6 multicast addresses
+ */
+#define INADDR_ALLPIM_ROUTERS_GROUP (uint32_t)0xe000000dU /* 224.0.0.13 */
+#define IN6ADDR_LINKLOCAL_ALLPIM_ROUTERS "ff02::d"
+#define IN6ADDR_LINKLOCAL_ALLPIM_ROUTERS_INIT \
+ {{{ 0xff, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, \
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0d }}}
+
+#endif /* _NETINET_PIM_HH_ */
diff --git a/rtems/freebsd/netinet/pim_var.h b/rtems/freebsd/netinet/pim_var.h
new file mode 100644
index 00000000..9d80bbb2
--- /dev/null
+++ b/rtems/freebsd/netinet/pim_var.h
@@ -0,0 +1,84 @@
+/*-
+ * Copyright (c) 1998-2000
+ * University of Southern California/Information Sciences Institute.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the project nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _NETINET_PIM_VAR_HH_
+#define _NETINET_PIM_VAR_HH_
+
+/*
+ * Protocol Independent Multicast (PIM),
+ * kernel variables and implementation-specific definitions.
+ *
+ * Written by George Edmond Eddy (Rusty), ISI, February 1998.
+ * Modified by Pavlin Radoslavov, USC/ISI, May 1998, Aug 1999, October 2000.
+ * Modified by Hitoshi Asaeda, WIDE, August 1998.
+ */
+
+/*
+ * PIM statistics kept in the kernel
+ */
+struct pimstat {
+ u_quad_t pims_rcv_total_msgs; /* total PIM messages received */
+ u_quad_t pims_rcv_total_bytes; /* total PIM bytes received */
+ u_quad_t pims_rcv_tooshort; /* rcvd with too few bytes */
+ u_quad_t pims_rcv_badsum; /* rcvd with bad checksum */
+ u_quad_t pims_rcv_badversion; /* rcvd bad PIM version */
+ u_quad_t pims_rcv_registers_msgs; /* rcvd regs. msgs (data only) */
+ u_quad_t pims_rcv_registers_bytes; /* rcvd regs. bytes (data only) */
+ u_quad_t pims_rcv_registers_wrongiif; /* rcvd regs. on wrong iif */
+ u_quad_t pims_rcv_badregisters; /* rcvd invalid registers */
+ u_quad_t pims_snd_registers_msgs; /* sent regs. msgs (data only) */
+ u_quad_t pims_snd_registers_bytes; /* sent regs. bytes (data only) */
+};
+
+#ifdef _KERNEL
+#define PIMSTAT_ADD(name, val) V_pimstat.name += (val)
+#define PIMSTAT_INC(name) PIMSTAT_ADD(name, 1)
+#endif
+
+/*
+ * Names for PIM sysctl objects
+ */
+#define PIMCTL_STATS 1 /* statistics (read-only) */
+#define PIMCTL_MAXID 2
+
+#define PIMCTL_NAMES { \
+ { 0, 0 }, \
+ { "stats", CTLTYPE_STRUCT }, \
+}
+
+#ifdef _KERNEL
+
+void pim_input(struct mbuf *, int);
+SYSCTL_DECL(_net_inet_pim);
+#endif
+
+#endif /* _NETINET_PIM_VAR_HH_ */
diff --git a/rtems/freebsd/netinet/raw_ip.c b/rtems/freebsd/netinet/raw_ip.c
new file mode 100644
index 00000000..fd7d27cf
--- /dev/null
+++ b/rtems/freebsd/netinet/raw_ip.c
@@ -0,0 +1,1116 @@
+#include <rtems/freebsd/machine/rtems-bsd-config.h>
+
+/*-
+ * Copyright (c) 1982, 1986, 1988, 1993
+ * The Regents of the University of California.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * @(#)raw_ip.c 8.7 (Berkeley) 5/15/95
+ */
+
+#include <rtems/freebsd/sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <rtems/freebsd/local/opt_inet6.h>
+#include <rtems/freebsd/local/opt_ipsec.h>
+
+#include <rtems/freebsd/sys/param.h>
+#include <rtems/freebsd/sys/jail.h>
+#include <rtems/freebsd/sys/kernel.h>
+#include <rtems/freebsd/sys/lock.h>
+#include <rtems/freebsd/sys/malloc.h>
+#include <rtems/freebsd/sys/mbuf.h>
+#include <rtems/freebsd/sys/priv.h>
+#include <rtems/freebsd/sys/proc.h>
+#include <rtems/freebsd/sys/protosw.h>
+#include <rtems/freebsd/sys/rwlock.h>
+#include <rtems/freebsd/sys/signalvar.h>
+#include <rtems/freebsd/sys/socket.h>
+#include <rtems/freebsd/sys/socketvar.h>
+#include <rtems/freebsd/sys/sx.h>
+#include <rtems/freebsd/sys/sysctl.h>
+#include <rtems/freebsd/sys/systm.h>
+
+#include <rtems/freebsd/vm/uma.h>
+
+#include <rtems/freebsd/net/if.h>
+#include <rtems/freebsd/net/route.h>
+#include <rtems/freebsd/net/vnet.h>
+
+#include <rtems/freebsd/netinet/in.h>
+#include <rtems/freebsd/netinet/in_systm.h>
+#include <rtems/freebsd/netinet/in_pcb.h>
+#include <rtems/freebsd/netinet/in_var.h>
+#include <rtems/freebsd/netinet/ip.h>
+#include <rtems/freebsd/netinet/ip_var.h>
+#include <rtems/freebsd/netinet/ip_mroute.h>
+
+#ifdef IPSEC
+#include <rtems/freebsd/netipsec/ipsec.h>
+#endif /*IPSEC*/
+
+#include <rtems/freebsd/security/mac/mac_framework.h>
+
+VNET_DEFINE(struct inpcbhead, ripcb);
+VNET_DEFINE(struct inpcbinfo, ripcbinfo);
+
+#define V_ripcb VNET(ripcb)
+#define V_ripcbinfo VNET(ripcbinfo)
+
+/*
+ * Control and data hooks for ipfw, dummynet, divert and so on.
+ * The data hooks are not used here but it is convenient
+ * to keep them all in one place.
+ */
+VNET_DEFINE(ip_fw_chk_ptr_t, ip_fw_chk_ptr) = NULL;
+VNET_DEFINE(ip_fw_ctl_ptr_t, ip_fw_ctl_ptr) = NULL;
+
+int (*ip_dn_ctl_ptr)(struct sockopt *);
+int (*ip_dn_io_ptr)(struct mbuf **, int, struct ip_fw_args *);
+void (*ip_divert_ptr)(struct mbuf *, int);
+int (*ng_ipfw_input_p)(struct mbuf **, int,
+ struct ip_fw_args *, int);
+
+/*
+ * Hooks for multicast routing. They all default to NULL, so leave them not
+ * initialized and rely on BSS being set to 0.
+ */
+
+/*
+ * The socket used to communicate with the multicast routing daemon.
+ */
+VNET_DEFINE(struct socket *, ip_mrouter);
+
+/*
+ * The various mrouter and rsvp functions.
+ */
+int (*ip_mrouter_set)(struct socket *, struct sockopt *);
+int (*ip_mrouter_get)(struct socket *, struct sockopt *);
+int (*ip_mrouter_done)(void);
+int (*ip_mforward)(struct ip *, struct ifnet *, struct mbuf *,
+ struct ip_moptions *);
+int (*mrt_ioctl)(u_long, caddr_t, int);
+int (*legal_vif_num)(int);
+u_long (*ip_mcast_src)(int);
+
+void (*rsvp_input_p)(struct mbuf *m, int off);
+int (*ip_rsvp_vif)(struct socket *, struct sockopt *);
+void (*ip_rsvp_force_done)(struct socket *);
+
+/*
+ * Hash functions
+ */
+
+#define INP_PCBHASH_RAW_SIZE 256
+#define INP_PCBHASH_RAW(proto, laddr, faddr, mask) \
+ (((proto) + (laddr) + (faddr)) % (mask) + 1)
+
+static void
+rip_inshash(struct inpcb *inp)
+{
+ struct inpcbinfo *pcbinfo = inp->inp_pcbinfo;
+ struct inpcbhead *pcbhash;
+ int hash;
+
+ INP_INFO_WLOCK_ASSERT(pcbinfo);
+ INP_WLOCK_ASSERT(inp);
+
+ if (inp->inp_ip_p != 0 &&
+ inp->inp_laddr.s_addr != INADDR_ANY &&
+ inp->inp_faddr.s_addr != INADDR_ANY) {
+ hash = INP_PCBHASH_RAW(inp->inp_ip_p, inp->inp_laddr.s_addr,
+ inp->inp_faddr.s_addr, pcbinfo->ipi_hashmask);
+ } else
+ hash = 0;
+ pcbhash = &pcbinfo->ipi_hashbase[hash];
+ LIST_INSERT_HEAD(pcbhash, inp, inp_hash);
+}
+
+static void
+rip_delhash(struct inpcb *inp)
+{
+
+ INP_INFO_WLOCK_ASSERT(inp->inp_pcbinfo);
+ INP_WLOCK_ASSERT(inp);
+
+ LIST_REMOVE(inp, inp_hash);
+}
+
+/*
+ * Raw interface to IP protocol.
+ */
+
+/*
+ * Initialize raw connection block q.
+ */
+static void
+rip_zone_change(void *tag)
+{
+
+ uma_zone_set_max(V_ripcbinfo.ipi_zone, maxsockets);
+}
+
+static int
+rip_inpcb_init(void *mem, int size, int flags)
+{
+ struct inpcb *inp = mem;
+
+ INP_LOCK_INIT(inp, "inp", "rawinp");
+ return (0);
+}
+
+void
+rip_init(void)
+{
+
+ INP_INFO_LOCK_INIT(&V_ripcbinfo, "rip");
+ LIST_INIT(&V_ripcb);
+#ifdef VIMAGE
+ V_ripcbinfo.ipi_vnet = curvnet;
+#endif
+ V_ripcbinfo.ipi_listhead = &V_ripcb;
+ V_ripcbinfo.ipi_hashbase =
+ hashinit(INP_PCBHASH_RAW_SIZE, M_PCB, &V_ripcbinfo.ipi_hashmask);
+ V_ripcbinfo.ipi_porthashbase =
+ hashinit(1, M_PCB, &V_ripcbinfo.ipi_porthashmask);
+ V_ripcbinfo.ipi_zone = uma_zcreate("ripcb", sizeof(struct inpcb),
+ NULL, NULL, rip_inpcb_init, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE);
+ uma_zone_set_max(V_ripcbinfo.ipi_zone, maxsockets);
+ EVENTHANDLER_REGISTER(maxsockets_change, rip_zone_change, NULL,
+ EVENTHANDLER_PRI_ANY);
+}
+
+#ifdef VIMAGE
+void
+rip_destroy(void)
+{
+
+ hashdestroy(V_ripcbinfo.ipi_hashbase, M_PCB,
+ V_ripcbinfo.ipi_hashmask);
+ hashdestroy(V_ripcbinfo.ipi_porthashbase, M_PCB,
+ V_ripcbinfo.ipi_porthashmask);
+}
+#endif
+
+static int
+rip_append(struct inpcb *last, struct ip *ip, struct mbuf *n,
+ struct sockaddr_in *ripsrc)
+{
+ int policyfail = 0;
+
+ INP_RLOCK_ASSERT(last);
+
+#ifdef IPSEC
+ /* check AH/ESP integrity. */
+ if (ipsec4_in_reject(n, last)) {
+ policyfail = 1;
+ }
+#endif /* IPSEC */
+#ifdef MAC
+ if (!policyfail && mac_inpcb_check_deliver(last, n) != 0)
+ policyfail = 1;
+#endif
+ /* Check the minimum TTL for socket. */
+ if (last->inp_ip_minttl && last->inp_ip_minttl > ip->ip_ttl)
+ policyfail = 1;
+ if (!policyfail) {
+ struct mbuf *opts = NULL;
+ struct socket *so;
+
+ so = last->inp_socket;
+ if ((last->inp_flags & INP_CONTROLOPTS) ||
+ (so->so_options & (SO_TIMESTAMP | SO_BINTIME)))
+ ip_savecontrol(last, &opts, ip, n);
+ SOCKBUF_LOCK(&so->so_rcv);
+ if (sbappendaddr_locked(&so->so_rcv,
+ (struct sockaddr *)ripsrc, n, opts) == 0) {
+ /* should notify about lost packet */
+ m_freem(n);
+ if (opts)
+ m_freem(opts);
+ SOCKBUF_UNLOCK(&so->so_rcv);
+ } else
+ sorwakeup_locked(so);
+ } else
+ m_freem(n);
+ return (policyfail);
+}
+
+/*
+ * Setup generic address and protocol structures for raw_input routine, then
+ * pass them along with mbuf chain.
+ */
+void
+rip_input(struct mbuf *m, int off)
+{
+ struct ifnet *ifp;
+ struct ip *ip = mtod(m, struct ip *);
+ int proto = ip->ip_p;
+ struct inpcb *inp, *last;
+ struct sockaddr_in ripsrc;
+ int hash;
+
+ bzero(&ripsrc, sizeof(ripsrc));
+ ripsrc.sin_len = sizeof(ripsrc);
+ ripsrc.sin_family = AF_INET;
+ ripsrc.sin_addr = ip->ip_src;
+ last = NULL;
+
+ ifp = m->m_pkthdr.rcvif;
+
+ hash = INP_PCBHASH_RAW(proto, ip->ip_src.s_addr,
+ ip->ip_dst.s_addr, V_ripcbinfo.ipi_hashmask);
+ INP_INFO_RLOCK(&V_ripcbinfo);
+ LIST_FOREACH(inp, &V_ripcbinfo.ipi_hashbase[hash], inp_hash) {
+ if (inp->inp_ip_p != proto)
+ continue;
+#ifdef INET6
+ /* XXX inp locking */
+ if ((inp->inp_vflag & INP_IPV4) == 0)
+ continue;
+#endif
+ if (inp->inp_laddr.s_addr != ip->ip_dst.s_addr)
+ continue;
+ if (inp->inp_faddr.s_addr != ip->ip_src.s_addr)
+ continue;
+ if (jailed_without_vnet(inp->inp_cred)) {
+ /*
+ * XXX: If faddr was bound to multicast group,
+ * jailed raw socket will drop datagram.
+ */
+ if (prison_check_ip4(inp->inp_cred, &ip->ip_dst) != 0)
+ continue;
+ }
+ if (last != NULL) {
+ struct mbuf *n;
+
+ n = m_copy(m, 0, (int)M_COPYALL);
+ if (n != NULL)
+ (void) rip_append(last, ip, n, &ripsrc);
+ /* XXX count dropped packet */
+ INP_RUNLOCK(last);
+ }
+ INP_RLOCK(inp);
+ last = inp;
+ }
+ LIST_FOREACH(inp, &V_ripcbinfo.ipi_hashbase[0], inp_hash) {
+ if (inp->inp_ip_p && inp->inp_ip_p != proto)
+ continue;
+#ifdef INET6
+ /* XXX inp locking */
+ if ((inp->inp_vflag & INP_IPV4) == 0)
+ continue;
+#endif
+ if (!in_nullhost(inp->inp_laddr) &&
+ !in_hosteq(inp->inp_laddr, ip->ip_dst))
+ continue;
+ if (!in_nullhost(inp->inp_faddr) &&
+ !in_hosteq(inp->inp_faddr, ip->ip_src))
+ continue;
+ if (jailed_without_vnet(inp->inp_cred)) {
+ /*
+ * Allow raw socket in jail to receive multicast;
+ * assume process had PRIV_NETINET_RAW at attach,
+ * and fall through into normal filter path if so.
+ */
+ if (!IN_MULTICAST(ntohl(ip->ip_dst.s_addr)) &&
+ prison_check_ip4(inp->inp_cred, &ip->ip_dst) != 0)
+ continue;
+ }
+ /*
+ * If this raw socket has multicast state, and we
+ * have received a multicast, check if this socket
+ * should receive it, as multicast filtering is now
+ * the responsibility of the transport layer.
+ */
+ if (inp->inp_moptions != NULL &&
+ IN_MULTICAST(ntohl(ip->ip_dst.s_addr))) {
+ /*
+ * If the incoming datagram is for IGMP, allow it
+ * through unconditionally to the raw socket.
+ *
+ * In the case of IGMPv2, we may not have explicitly
+ * joined the group, and may have set IFF_ALLMULTI
+ * on the interface. imo_multi_filter() may discard
+ * control traffic we actually need to see.
+ *
+ * Userland multicast routing daemons should continue
+ * filter the control traffic appropriately.
+ */
+ int blocked;
+
+ blocked = MCAST_PASS;
+ if (proto != IPPROTO_IGMP) {
+ struct sockaddr_in group;
+
+ bzero(&group, sizeof(struct sockaddr_in));
+ group.sin_len = sizeof(struct sockaddr_in);
+ group.sin_family = AF_INET;
+ group.sin_addr = ip->ip_dst;
+
+ blocked = imo_multi_filter(inp->inp_moptions,
+ ifp,
+ (struct sockaddr *)&group,
+ (struct sockaddr *)&ripsrc);
+ }
+
+ if (blocked != MCAST_PASS) {
+ IPSTAT_INC(ips_notmember);
+ continue;
+ }
+ }
+ if (last != NULL) {
+ struct mbuf *n;
+
+ n = m_copy(m, 0, (int)M_COPYALL);
+ if (n != NULL)
+ (void) rip_append(last, ip, n, &ripsrc);
+ /* XXX count dropped packet */
+ INP_RUNLOCK(last);
+ }
+ INP_RLOCK(inp);
+ last = inp;
+ }
+ INP_INFO_RUNLOCK(&V_ripcbinfo);
+ if (last != NULL) {
+ if (rip_append(last, ip, m, &ripsrc) != 0)
+ IPSTAT_INC(ips_delivered);
+ INP_RUNLOCK(last);
+ } else {
+ m_freem(m);
+ IPSTAT_INC(ips_noproto);
+ IPSTAT_DEC(ips_delivered);
+ }
+}
+
+/*
+ * Generate IP header and pass packet to ip_output. Tack on options user may
+ * have setup with control call.
+ */
+int
+rip_output(struct mbuf *m, struct socket *so, u_long dst)
+{
+ struct ip *ip;
+ int error;
+ struct inpcb *inp = sotoinpcb(so);
+ int flags = ((so->so_options & SO_DONTROUTE) ? IP_ROUTETOIF : 0) |
+ IP_ALLOWBROADCAST;
+
+ /*
+ * If the user handed us a complete IP packet, use it. Otherwise,
+ * allocate an mbuf for a header and fill it in.
+ */
+ if ((inp->inp_flags & INP_HDRINCL) == 0) {
+ if (m->m_pkthdr.len + sizeof(struct ip) > IP_MAXPACKET) {
+ m_freem(m);
+ return(EMSGSIZE);
+ }
+ M_PREPEND(m, sizeof(struct ip), M_DONTWAIT);
+ if (m == NULL)
+ return(ENOBUFS);
+
+ INP_RLOCK(inp);
+ ip = mtod(m, struct ip *);
+ ip->ip_tos = inp->inp_ip_tos;
+ if (inp->inp_flags & INP_DONTFRAG)
+ ip->ip_off = IP_DF;
+ else
+ ip->ip_off = 0;
+ ip->ip_p = inp->inp_ip_p;
+ ip->ip_len = m->m_pkthdr.len;
+ ip->ip_src = inp->inp_laddr;
+ if (jailed(inp->inp_cred)) {
+ /*
+ * prison_local_ip4() would be good enough but would
+ * let a source of INADDR_ANY pass, which we do not
+ * want to see from jails. We do not go through the
+ * pain of in_pcbladdr() for raw sockets.
+ */
+ if (ip->ip_src.s_addr == INADDR_ANY)
+ error = prison_get_ip4(inp->inp_cred,
+ &ip->ip_src);
+ else
+ error = prison_local_ip4(inp->inp_cred,
+ &ip->ip_src);
+ if (error != 0) {
+ INP_RUNLOCK(inp);
+ m_freem(m);
+ return (error);
+ }
+ }
+ ip->ip_dst.s_addr = dst;
+ ip->ip_ttl = inp->inp_ip_ttl;
+ } else {
+ if (m->m_pkthdr.len > IP_MAXPACKET) {
+ m_freem(m);
+ return(EMSGSIZE);
+ }
+ INP_RLOCK(inp);
+ ip = mtod(m, struct ip *);
+ error = prison_check_ip4(inp->inp_cred, &ip->ip_src);
+ if (error != 0) {
+ INP_RUNLOCK(inp);
+ m_freem(m);
+ return (error);
+ }
+
+ /*
+ * Don't allow both user specified and setsockopt options,
+ * and don't allow packet length sizes that will crash.
+ */
+ if (((ip->ip_hl != (sizeof (*ip) >> 2)) && inp->inp_options)
+ || (ip->ip_len > m->m_pkthdr.len)
+ || (ip->ip_len < (ip->ip_hl << 2))) {
+ INP_RUNLOCK(inp);
+ m_freem(m);
+ return (EINVAL);
+ }
+ if (ip->ip_id == 0)
+ ip->ip_id = ip_newid();
+
+ /*
+ * XXX prevent ip_output from overwriting header fields.
+ */
+ flags |= IP_RAWOUTPUT;
+ IPSTAT_INC(ips_rawout);
+ }
+
+ if (inp->inp_flags & INP_ONESBCAST)
+ flags |= IP_SENDONES;
+
+#ifdef MAC
+ mac_inpcb_create_mbuf(inp, m);
+#endif
+
+ error = ip_output(m, inp->inp_options, NULL, flags,
+ inp->inp_moptions, inp);
+ INP_RUNLOCK(inp);
+ return (error);
+}
+
+/*
+ * Raw IP socket option processing.
+ *
+ * IMPORTANT NOTE regarding access control: Traditionally, raw sockets could
+ * only be created by a privileged process, and as such, socket option
+ * operations to manage system properties on any raw socket were allowed to
+ * take place without explicit additional access control checks. However,
+ * raw sockets can now also be created in jail(), and therefore explicit
+ * checks are now required. Likewise, raw sockets can be used by a process
+ * after it gives up privilege, so some caution is required. For options
+ * passed down to the IP layer via ip_ctloutput(), checks are assumed to be
+ * performed in ip_ctloutput() and therefore no check occurs here.
+ * Unilaterally checking priv_check() here breaks normal IP socket option
+ * operations on raw sockets.
+ *
+ * When adding new socket options here, make sure to add access control
+ * checks here as necessary.
+ */
+int
+rip_ctloutput(struct socket *so, struct sockopt *sopt)
+{
+ struct inpcb *inp = sotoinpcb(so);
+ int error, optval;
+
+ if (sopt->sopt_level != IPPROTO_IP) {
+ if ((sopt->sopt_level == SOL_SOCKET) &&
+ (sopt->sopt_name == SO_SETFIB)) {
+ inp->inp_inc.inc_fibnum = so->so_fibnum;
+ return (0);
+ }
+ return (EINVAL);
+ }
+
+ error = 0;
+ switch (sopt->sopt_dir) {
+ case SOPT_GET:
+ switch (sopt->sopt_name) {
+ case IP_HDRINCL:
+ optval = inp->inp_flags & INP_HDRINCL;
+ error = sooptcopyout(sopt, &optval, sizeof optval);
+ break;
+
+ case IP_FW3: /* generic ipfw v.3 functions */
+ case IP_FW_ADD: /* ADD actually returns the body... */
+ case IP_FW_GET:
+ case IP_FW_TABLE_GETSIZE:
+ case IP_FW_TABLE_LIST:
+ case IP_FW_NAT_GET_CONFIG:
+ case IP_FW_NAT_GET_LOG:
+ if (V_ip_fw_ctl_ptr != NULL)
+ error = V_ip_fw_ctl_ptr(sopt);
+ else
+ error = ENOPROTOOPT;
+ break;
+
+ case IP_DUMMYNET3: /* generic dummynet v.3 functions */
+ case IP_DUMMYNET_GET:
+ if (ip_dn_ctl_ptr != NULL)
+ error = ip_dn_ctl_ptr(sopt);
+ else
+ error = ENOPROTOOPT;
+ break ;
+
+ case MRT_INIT:
+ case MRT_DONE:
+ case MRT_ADD_VIF:
+ case MRT_DEL_VIF:
+ case MRT_ADD_MFC:
+ case MRT_DEL_MFC:
+ case MRT_VERSION:
+ case MRT_ASSERT:
+ case MRT_API_SUPPORT:
+ case MRT_API_CONFIG:
+ case MRT_ADD_BW_UPCALL:
+ case MRT_DEL_BW_UPCALL:
+ error = priv_check(curthread, PRIV_NETINET_MROUTE);
+ if (error != 0)
+ return (error);
+ error = ip_mrouter_get ? ip_mrouter_get(so, sopt) :
+ EOPNOTSUPP;
+ break;
+
+ default:
+ error = ip_ctloutput(so, sopt);
+ break;
+ }
+ break;
+
+ case SOPT_SET:
+ switch (sopt->sopt_name) {
+ case IP_HDRINCL:
+ error = sooptcopyin(sopt, &optval, sizeof optval,
+ sizeof optval);
+ if (error)
+ break;
+ if (optval)
+ inp->inp_flags |= INP_HDRINCL;
+ else
+ inp->inp_flags &= ~INP_HDRINCL;
+ break;
+
+ case IP_FW3: /* generic ipfw v.3 functions */
+ case IP_FW_ADD:
+ case IP_FW_DEL:
+ case IP_FW_FLUSH:
+ case IP_FW_ZERO:
+ case IP_FW_RESETLOG:
+ case IP_FW_TABLE_ADD:
+ case IP_FW_TABLE_DEL:
+ case IP_FW_TABLE_FLUSH:
+ case IP_FW_NAT_CFG:
+ case IP_FW_NAT_DEL:
+ if (V_ip_fw_ctl_ptr != NULL)
+ error = V_ip_fw_ctl_ptr(sopt);
+ else
+ error = ENOPROTOOPT;
+ break;
+
+ case IP_DUMMYNET3: /* generic dummynet v.3 functions */
+ case IP_DUMMYNET_CONFIGURE:
+ case IP_DUMMYNET_DEL:
+ case IP_DUMMYNET_FLUSH:
+ if (ip_dn_ctl_ptr != NULL)
+ error = ip_dn_ctl_ptr(sopt);
+ else
+ error = ENOPROTOOPT ;
+ break ;
+
+ case IP_RSVP_ON:
+ error = priv_check(curthread, PRIV_NETINET_MROUTE);
+ if (error != 0)
+ return (error);
+ error = ip_rsvp_init(so);
+ break;
+
+ case IP_RSVP_OFF:
+ error = priv_check(curthread, PRIV_NETINET_MROUTE);
+ if (error != 0)
+ return (error);
+ error = ip_rsvp_done();
+ break;
+
+ case IP_RSVP_VIF_ON:
+ case IP_RSVP_VIF_OFF:
+ error = priv_check(curthread, PRIV_NETINET_MROUTE);
+ if (error != 0)
+ return (error);
+ error = ip_rsvp_vif ?
+ ip_rsvp_vif(so, sopt) : EINVAL;
+ break;
+
+ case MRT_INIT:
+ case MRT_DONE:
+ case MRT_ADD_VIF:
+ case MRT_DEL_VIF:
+ case MRT_ADD_MFC:
+ case MRT_DEL_MFC:
+ case MRT_VERSION:
+ case MRT_ASSERT:
+ case MRT_API_SUPPORT:
+ case MRT_API_CONFIG:
+ case MRT_ADD_BW_UPCALL:
+ case MRT_DEL_BW_UPCALL:
+ error = priv_check(curthread, PRIV_NETINET_MROUTE);
+ if (error != 0)
+ return (error);
+ error = ip_mrouter_set ? ip_mrouter_set(so, sopt) :
+ EOPNOTSUPP;
+ break;
+
+ default:
+ error = ip_ctloutput(so, sopt);
+ break;
+ }
+ break;
+ }
+
+ return (error);
+}
+
+/*
+ * This function exists solely to receive the PRC_IFDOWN messages which are
+ * sent by if_down(). It looks for an ifaddr whose ifa_addr is sa, and calls
+ * in_ifadown() to remove all routes corresponding to that address. It also
+ * receives the PRC_IFUP messages from if_up() and reinstalls the interface
+ * routes.
+ */
+void
+rip_ctlinput(int cmd, struct sockaddr *sa, void *vip)
+{
+ struct in_ifaddr *ia;
+ struct ifnet *ifp;
+ int err;
+ int flags;
+
+ switch (cmd) {
+ case PRC_IFDOWN:
+ IN_IFADDR_RLOCK();
+ TAILQ_FOREACH(ia, &V_in_ifaddrhead, ia_link) {
+ if (ia->ia_ifa.ifa_addr == sa
+ && (ia->ia_flags & IFA_ROUTE)) {
+ ifa_ref(&ia->ia_ifa);
+ IN_IFADDR_RUNLOCK();
+ /*
+ * in_ifscrub kills the interface route.
+ */
+ in_ifscrub(ia->ia_ifp, ia);
+ /*
+ * in_ifadown gets rid of all the rest of the
+ * routes. This is not quite the right thing
+ * to do, but at least if we are running a
+ * routing process they will come back.
+ */
+ in_ifadown(&ia->ia_ifa, 0);
+ ifa_free(&ia->ia_ifa);
+ break;
+ }
+ }
+ if (ia == NULL) /* If ia matched, already unlocked. */
+ IN_IFADDR_RUNLOCK();
+ break;
+
+ case PRC_IFUP:
+ IN_IFADDR_RLOCK();
+ TAILQ_FOREACH(ia, &V_in_ifaddrhead, ia_link) {
+ if (ia->ia_ifa.ifa_addr == sa)
+ break;
+ }
+ if (ia == NULL || (ia->ia_flags & IFA_ROUTE)) {
+ IN_IFADDR_RUNLOCK();
+ return;
+ }
+ ifa_ref(&ia->ia_ifa);
+ IN_IFADDR_RUNLOCK();
+ flags = RTF_UP;
+ ifp = ia->ia_ifa.ifa_ifp;
+
+ if ((ifp->if_flags & IFF_LOOPBACK)
+ || (ifp->if_flags & IFF_POINTOPOINT))
+ flags |= RTF_HOST;
+
+ err = rtinit(&ia->ia_ifa, RTM_ADD, flags);
+ if (err == 0)
+ ia->ia_flags |= IFA_ROUTE;
+ err = ifa_add_loopback_route((struct ifaddr *)ia, sa);
+ ifa_free(&ia->ia_ifa);
+ break;
+ }
+}
+
+u_long rip_sendspace = 9216;
+u_long rip_recvspace = 9216;
+
+SYSCTL_ULONG(_net_inet_raw, OID_AUTO, maxdgram, CTLFLAG_RW,
+ &rip_sendspace, 0, "Maximum outgoing raw IP datagram size");
+SYSCTL_ULONG(_net_inet_raw, OID_AUTO, recvspace, CTLFLAG_RW,
+ &rip_recvspace, 0, "Maximum space for incoming raw IP datagrams");
+
+static int
+rip_attach(struct socket *so, int proto, struct thread *td)
+{
+ struct inpcb *inp;
+ int error;
+
+ inp = sotoinpcb(so);
+ KASSERT(inp == NULL, ("rip_attach: inp != NULL"));
+
+ error = priv_check(td, PRIV_NETINET_RAW);
+ if (error)
+ return (error);
+ if (proto >= IPPROTO_MAX || proto < 0)
+ return EPROTONOSUPPORT;
+ error = soreserve(so, rip_sendspace, rip_recvspace);
+ if (error)
+ return (error);
+ INP_INFO_WLOCK(&V_ripcbinfo);
+ error = in_pcballoc(so, &V_ripcbinfo);
+ if (error) {
+ INP_INFO_WUNLOCK(&V_ripcbinfo);
+ return (error);
+ }
+ inp = (struct inpcb *)so->so_pcb;
+ inp->inp_vflag |= INP_IPV4;
+ inp->inp_ip_p = proto;
+ inp->inp_ip_ttl = V_ip_defttl;
+ rip_inshash(inp);
+ INP_INFO_WUNLOCK(&V_ripcbinfo);
+ INP_WUNLOCK(inp);
+ return (0);
+}
+
+static void
+rip_detach(struct socket *so)
+{
+ struct inpcb *inp;
+
+ inp = sotoinpcb(so);
+ KASSERT(inp != NULL, ("rip_detach: inp == NULL"));
+ KASSERT(inp->inp_faddr.s_addr == INADDR_ANY,
+ ("rip_detach: not closed"));
+
+ INP_INFO_WLOCK(&V_ripcbinfo);
+ INP_WLOCK(inp);
+ rip_delhash(inp);
+ if (so == V_ip_mrouter && ip_mrouter_done)
+ ip_mrouter_done();
+ if (ip_rsvp_force_done)
+ ip_rsvp_force_done(so);
+ if (so == V_ip_rsvpd)
+ ip_rsvp_done();
+ in_pcbdetach(inp);
+ in_pcbfree(inp);
+ INP_INFO_WUNLOCK(&V_ripcbinfo);
+}
+
+static void
+rip_dodisconnect(struct socket *so, struct inpcb *inp)
+{
+
+ INP_INFO_WLOCK_ASSERT(inp->inp_pcbinfo);
+ INP_WLOCK_ASSERT(inp);
+
+ rip_delhash(inp);
+ inp->inp_faddr.s_addr = INADDR_ANY;
+ rip_inshash(inp);
+ SOCK_LOCK(so);
+ so->so_state &= ~SS_ISCONNECTED;
+ SOCK_UNLOCK(so);
+}
+
+static void
+rip_abort(struct socket *so)
+{
+ struct inpcb *inp;
+
+ inp = sotoinpcb(so);
+ KASSERT(inp != NULL, ("rip_abort: inp == NULL"));
+
+ INP_INFO_WLOCK(&V_ripcbinfo);
+ INP_WLOCK(inp);
+ rip_dodisconnect(so, inp);
+ INP_WUNLOCK(inp);
+ INP_INFO_WUNLOCK(&V_ripcbinfo);
+}
+
+static void
+rip_close(struct socket *so)
+{
+ struct inpcb *inp;
+
+ inp = sotoinpcb(so);
+ KASSERT(inp != NULL, ("rip_close: inp == NULL"));
+
+ INP_INFO_WLOCK(&V_ripcbinfo);
+ INP_WLOCK(inp);
+ rip_dodisconnect(so, inp);
+ INP_WUNLOCK(inp);
+ INP_INFO_WUNLOCK(&V_ripcbinfo);
+}
+
+static int
+rip_disconnect(struct socket *so)
+{
+ struct inpcb *inp;
+
+ if ((so->so_state & SS_ISCONNECTED) == 0)
+ return (ENOTCONN);
+
+ inp = sotoinpcb(so);
+ KASSERT(inp != NULL, ("rip_disconnect: inp == NULL"));
+
+ INP_INFO_WLOCK(&V_ripcbinfo);
+ INP_WLOCK(inp);
+ rip_dodisconnect(so, inp);
+ INP_WUNLOCK(inp);
+ INP_INFO_WUNLOCK(&V_ripcbinfo);
+ return (0);
+}
+
+static int
+rip_bind(struct socket *so, struct sockaddr *nam, struct thread *td)
+{
+ struct sockaddr_in *addr = (struct sockaddr_in *)nam;
+ struct inpcb *inp;
+ int error;
+
+ if (nam->sa_len != sizeof(*addr))
+ return (EINVAL);
+
+ error = prison_check_ip4(td->td_ucred, &addr->sin_addr);
+ if (error != 0)
+ return (error);
+
+ inp = sotoinpcb(so);
+ KASSERT(inp != NULL, ("rip_bind: inp == NULL"));
+
+ if (TAILQ_EMPTY(&V_ifnet) ||
+ (addr->sin_family != AF_INET && addr->sin_family != AF_IMPLINK) ||
+ (addr->sin_addr.s_addr &&
+ (inp->inp_flags & INP_BINDANY) == 0 &&
+ ifa_ifwithaddr_check((struct sockaddr *)addr) == 0))
+ return (EADDRNOTAVAIL);
+
+ INP_INFO_WLOCK(&V_ripcbinfo);
+ INP_WLOCK(inp);
+ rip_delhash(inp);
+ inp->inp_laddr = addr->sin_addr;
+ rip_inshash(inp);
+ INP_WUNLOCK(inp);
+ INP_INFO_WUNLOCK(&V_ripcbinfo);
+ return (0);
+}
+
+static int
+rip_connect(struct socket *so, struct sockaddr *nam, struct thread *td)
+{
+ struct sockaddr_in *addr = (struct sockaddr_in *)nam;
+ struct inpcb *inp;
+
+ if (nam->sa_len != sizeof(*addr))
+ return (EINVAL);
+ if (TAILQ_EMPTY(&V_ifnet))
+ return (EADDRNOTAVAIL);
+ if (addr->sin_family != AF_INET && addr->sin_family != AF_IMPLINK)
+ return (EAFNOSUPPORT);
+
+ inp = sotoinpcb(so);
+ KASSERT(inp != NULL, ("rip_connect: inp == NULL"));
+
+ INP_INFO_WLOCK(&V_ripcbinfo);
+ INP_WLOCK(inp);
+ rip_delhash(inp);
+ inp->inp_faddr = addr->sin_addr;
+ rip_inshash(inp);
+ soisconnected(so);
+ INP_WUNLOCK(inp);
+ INP_INFO_WUNLOCK(&V_ripcbinfo);
+ return (0);
+}
+
+static int
+rip_shutdown(struct socket *so)
+{
+ struct inpcb *inp;
+
+ inp = sotoinpcb(so);
+ KASSERT(inp != NULL, ("rip_shutdown: inp == NULL"));
+
+ INP_WLOCK(inp);
+ socantsendmore(so);
+ INP_WUNLOCK(inp);
+ return (0);
+}
+
+static int
+rip_send(struct socket *so, int flags, struct mbuf *m, struct sockaddr *nam,
+ struct mbuf *control, struct thread *td)
+{
+ struct inpcb *inp;
+ u_long dst;
+
+ inp = sotoinpcb(so);
+ KASSERT(inp != NULL, ("rip_send: inp == NULL"));
+
+ /*
+ * Note: 'dst' reads below are unlocked.
+ */
+ if (so->so_state & SS_ISCONNECTED) {
+ if (nam) {
+ m_freem(m);
+ return (EISCONN);
+ }
+ dst = inp->inp_faddr.s_addr; /* Unlocked read. */
+ } else {
+ if (nam == NULL) {
+ m_freem(m);
+ return (ENOTCONN);
+ }
+ dst = ((struct sockaddr_in *)nam)->sin_addr.s_addr;
+ }
+ return (rip_output(m, so, dst));
+}
+
+static int
+rip_pcblist(SYSCTL_HANDLER_ARGS)
+{
+ int error, i, n;
+ struct inpcb *inp, **inp_list;
+ inp_gen_t gencnt;
+ struct xinpgen xig;
+
+ /*
+ * The process of preparing the TCB list is too time-consuming and
+ * resource-intensive to repeat twice on every request.
+ */
+ if (req->oldptr == 0) {
+ n = V_ripcbinfo.ipi_count;
+ n += imax(n / 8, 10);
+ req->oldidx = 2 * (sizeof xig) + n * sizeof(struct xinpcb);
+ return (0);
+ }
+
+ if (req->newptr != 0)
+ return (EPERM);
+
+ /*
+ * OK, now we're committed to doing something.
+ */
+ INP_INFO_RLOCK(&V_ripcbinfo);
+ gencnt = V_ripcbinfo.ipi_gencnt;
+ n = V_ripcbinfo.ipi_count;
+ INP_INFO_RUNLOCK(&V_ripcbinfo);
+
+ xig.xig_len = sizeof xig;
+ xig.xig_count = n;
+ xig.xig_gen = gencnt;
+ xig.xig_sogen = so_gencnt;
+ error = SYSCTL_OUT(req, &xig, sizeof xig);
+ if (error)
+ return (error);
+
+ inp_list = malloc(n * sizeof *inp_list, M_TEMP, M_WAITOK);
+ if (inp_list == 0)
+ return (ENOMEM);
+
+ INP_INFO_RLOCK(&V_ripcbinfo);
+ for (inp = LIST_FIRST(V_ripcbinfo.ipi_listhead), i = 0; inp && i < n;
+ inp = LIST_NEXT(inp, inp_list)) {
+ INP_WLOCK(inp);
+ if (inp->inp_gencnt <= gencnt &&
+ cr_canseeinpcb(req->td->td_ucred, inp) == 0) {
+ in_pcbref(inp);
+ inp_list[i++] = inp;
+ }
+ INP_WUNLOCK(inp);
+ }
+ INP_INFO_RUNLOCK(&V_ripcbinfo);
+ n = i;
+
+ error = 0;
+ for (i = 0; i < n; i++) {
+ inp = inp_list[i];
+ INP_RLOCK(inp);
+ if (inp->inp_gencnt <= gencnt) {
+ struct xinpcb xi;
+
+ bzero(&xi, sizeof(xi));
+ xi.xi_len = sizeof xi;
+ /* XXX should avoid extra copy */
+ bcopy(inp, &xi.xi_inp, sizeof *inp);
+ if (inp->inp_socket)
+ sotoxsocket(inp->inp_socket, &xi.xi_socket);
+ INP_RUNLOCK(inp);
+ error = SYSCTL_OUT(req, &xi, sizeof xi);
+ } else
+ INP_RUNLOCK(inp);
+ }
+ INP_INFO_WLOCK(&V_ripcbinfo);
+ for (i = 0; i < n; i++) {
+ inp = inp_list[i];
+ INP_WLOCK(inp);
+ if (!in_pcbrele(inp))
+ INP_WUNLOCK(inp);
+ }
+ INP_INFO_WUNLOCK(&V_ripcbinfo);
+
+ if (!error) {
+ /*
+ * Give the user an updated idea of our state. If the
+ * generation differs from what we told her before, she knows
+ * that something happened while we were processing this
+ * request, and it might be necessary to retry.
+ */
+ INP_INFO_RLOCK(&V_ripcbinfo);
+ xig.xig_gen = V_ripcbinfo.ipi_gencnt;
+ xig.xig_sogen = so_gencnt;
+ xig.xig_count = V_ripcbinfo.ipi_count;
+ INP_INFO_RUNLOCK(&V_ripcbinfo);
+ error = SYSCTL_OUT(req, &xig, sizeof xig);
+ }
+ free(inp_list, M_TEMP);
+ return (error);
+}
+
+SYSCTL_PROC(_net_inet_raw, OID_AUTO/*XXX*/, pcblist, CTLFLAG_RD, 0, 0,
+ rip_pcblist, "S,xinpcb", "List of active raw IP sockets");
+
+struct pr_usrreqs rip_usrreqs = {
+ .pru_abort = rip_abort,
+ .pru_attach = rip_attach,
+ .pru_bind = rip_bind,
+ .pru_connect = rip_connect,
+ .pru_control = in_control,
+ .pru_detach = rip_detach,
+ .pru_disconnect = rip_disconnect,
+ .pru_peeraddr = in_getpeeraddr,
+ .pru_send = rip_send,
+ .pru_shutdown = rip_shutdown,
+ .pru_sockaddr = in_getsockaddr,
+ .pru_sosetlabel = in_pcbsosetlabel,
+ .pru_close = rip_close,
+};
diff --git a/rtems/freebsd/netinet/sctp.h b/rtems/freebsd/netinet/sctp.h
new file mode 100644
index 00000000..da17aa3a
--- /dev/null
+++ b/rtems/freebsd/netinet/sctp.h
@@ -0,0 +1,549 @@
+/*-
+ * Copyright (c) 2001-2008, by Cisco Systems, Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * a) Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * b) Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the distribution.
+ *
+ * c) Neither the name of Cisco Systems, Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+/* $KAME: sctp.h,v 1.18 2005/03/06 16:04:16 itojun Exp $ */
+
+#include <rtems/freebsd/sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#ifndef _NETINET_SCTP_HH_
+#define _NETINET_SCTP_HH_
+
+#include <rtems/freebsd/sys/types.h>
+
+
+#define SCTP_PACKED __attribute__((packed))
+
+/*
+ * SCTP protocol - RFC2960.
+ */
+struct sctphdr {
+ uint16_t src_port; /* source port */
+ uint16_t dest_port; /* destination port */
+ uint32_t v_tag; /* verification tag of packet */
+ uint32_t checksum; /* Adler32 C-Sum */
+ /* chunks follow... */
+} SCTP_PACKED;
+
+/*
+ * SCTP Chunks
+ */
+struct sctp_chunkhdr {
+ uint8_t chunk_type; /* chunk type */
+ uint8_t chunk_flags; /* chunk flags */
+ uint16_t chunk_length; /* chunk length */
+ /* optional params follow */
+} SCTP_PACKED;
+
+/*
+ * SCTP chunk parameters
+ */
+struct sctp_paramhdr {
+ uint16_t param_type; /* parameter type */
+ uint16_t param_length; /* parameter length */
+} SCTP_PACKED;
+
+/*
+ * user socket options: socket API defined
+ */
+/*
+ * read-write options
+ */
+#define SCTP_RTOINFO 0x00000001
+#define SCTP_ASSOCINFO 0x00000002
+#define SCTP_INITMSG 0x00000003
+#define SCTP_NODELAY 0x00000004
+#define SCTP_AUTOCLOSE 0x00000005
+#define SCTP_SET_PEER_PRIMARY_ADDR 0x00000006
+#define SCTP_PRIMARY_ADDR 0x00000007
+#define SCTP_ADAPTATION_LAYER 0x00000008
+/* same as above */
+#define SCTP_ADAPTION_LAYER 0x00000008
+#define SCTP_DISABLE_FRAGMENTS 0x00000009
+#define SCTP_PEER_ADDR_PARAMS 0x0000000a
+#define SCTP_DEFAULT_SEND_PARAM 0x0000000b
+/* ancillary data/notification interest options */
+#define SCTP_EVENTS 0x0000000c
+/* Without this applied we will give V4 and V6 addresses on a V6 socket */
+#define SCTP_I_WANT_MAPPED_V4_ADDR 0x0000000d
+#define SCTP_MAXSEG 0x0000000e
+#define SCTP_DELAYED_SACK 0x0000000f
+#define SCTP_FRAGMENT_INTERLEAVE 0x00000010
+#define SCTP_PARTIAL_DELIVERY_POINT 0x00000011
+/* authentication support */
+#define SCTP_AUTH_CHUNK 0x00000012
+#define SCTP_AUTH_KEY 0x00000013
+#define SCTP_HMAC_IDENT 0x00000014
+#define SCTP_AUTH_ACTIVE_KEY 0x00000015
+#define SCTP_AUTH_DELETE_KEY 0x00000016
+#define SCTP_USE_EXT_RCVINFO 0x00000017
+#define SCTP_AUTO_ASCONF 0x00000018 /* rw */
+#define SCTP_MAXBURST 0x00000019 /* rw */
+#define SCTP_MAX_BURST 0x00000019 /* rw */
+/* assoc level context */
+#define SCTP_CONTEXT 0x0000001a /* rw */
+/* explicit EOR signalling */
+#define SCTP_EXPLICIT_EOR 0x0000001b
+#define SCTP_REUSE_PORT 0x0000001c /* rw */
+#define SCTP_AUTH_DEACTIVATE_KEY 0x0000001d
+
+/*
+ * read-only options
+ */
+#define SCTP_STATUS 0x00000100
+#define SCTP_GET_PEER_ADDR_INFO 0x00000101
+/* authentication support */
+#define SCTP_PEER_AUTH_CHUNKS 0x00000102
+#define SCTP_LOCAL_AUTH_CHUNKS 0x00000103
+#define SCTP_GET_ASSOC_NUMBER 0x00000104 /* ro */
+#define SCTP_GET_ASSOC_ID_LIST 0x00000105 /* ro */
+#define SCTP_TIMEOUTS 0x00000106
+
+/*
+ * user socket options: BSD implementation specific
+ */
+/*
+ * Blocking I/O is enabled on any TCP type socket by default. For the UDP
+ * model if this is turned on then the socket buffer is shared for send
+ * resources amongst all associations. The default for the UDP model is that
+ * is SS_NBIO is set. Which means all associations have a separate send
+ * limit BUT they will NOT ever BLOCK instead you will get an error back
+ * EAGAIN if you try to send too much. If you want the blocking semantics you
+ * set this option at the cost of sharing one socket send buffer size amongst
+ * all associations. Peeled off sockets turn this option off and block. But
+ * since both TCP and peeled off sockets have only one assoc per socket this
+ * is fine. It probably does NOT make sense to set this on SS_NBIO on a TCP
+ * model OR peeled off UDP model, but we do allow you to do so. You just use
+ * the normal syscall to toggle SS_NBIO the way you want.
+ *
+ * Blocking I/O is controlled by the SS_NBIO flag on the socket state so_state
+ * field.
+ */
+
+/* these should probably go into sockets API */
+#define SCTP_RESET_STREAMS 0x00001004 /* wo */
+
+
+/* here on down are more implementation specific */
+#define SCTP_SET_DEBUG_LEVEL 0x00001005
+#define SCTP_CLR_STAT_LOG 0x00001007
+/* CMT ON/OFF socket option */
+#define SCTP_CMT_ON_OFF 0x00001200
+#define SCTP_CMT_USE_DAC 0x00001201
+/* JRS - Pluggable Congestion Control Socket option */
+#define SCTP_PLUGGABLE_CC 0x00001202
+
+/* read only */
+#define SCTP_GET_SNDBUF_USE 0x00001101
+#define SCTP_GET_STAT_LOG 0x00001103
+#define SCTP_PCB_STATUS 0x00001104
+#define SCTP_GET_NONCE_VALUES 0x00001105
+
+
+/* Special hook for dynamically setting primary for all assoc's,
+ * this is a write only option that requires root privilege.
+ */
+#define SCTP_SET_DYNAMIC_PRIMARY 0x00002001
+
+/* VRF (virtual router feature) and multi-VRF support
+ * options. VRF's provide splits within a router
+ * that give the views of multiple routers. A
+ * standard host, without VRF support, is just
+ * a single VRF. If VRF's are supported then
+ * the transport must be VRF aware. This means
+ * that every socket call coming in must be directed
+ * within the endpoint to one of the VRF's it belongs
+ * to. The endpoint, before binding, may select
+ * the "default" VRF it is in by using a set socket
+ * option with SCTP_VRF_ID. This will also
+ * get propagated to the default VRF. Once the
+ * endpoint binds an address then it CANNOT add
+ * additional VRF's to become a Multi-VRF endpoint.
+ *
+ * Before BINDING additional VRF's can be added with
+ * the SCTP_ADD_VRF_ID call or deleted with
+ * SCTP_DEL_VRF_ID.
+ *
+ * Associations are ALWAYS contained inside a single
+ * VRF. They cannot reside in two (or more) VRF's. Incoming
+ * packets, assuming the router is VRF aware, can always
+ * tell us what VRF they arrived on. A host not supporting
+ * any VRF's will find that the packets always arrived on the
+ * single VRF that the host has.
+ *
+ */
+
+#define SCTP_VRF_ID 0x00003001
+#define SCTP_ADD_VRF_ID 0x00003002
+#define SCTP_GET_VRF_IDS 0x00003003
+#define SCTP_GET_ASOC_VRF 0x00003004
+#define SCTP_DEL_VRF_ID 0x00003005
+
+/*
+ * If you enable packet logging you can get
+ * a poor mans ethereal output in binary
+ * form. Note this is a compile option to
+ * the kernel, SCTP_PACKET_LOGGING, and
+ * without it in your kernel you
+ * will get a EOPNOTSUPP
+ */
+#define SCTP_GET_PACKET_LOG 0x00004001
+
+/*
+ * hidden implementation specific options these are NOT user visible (should
+ * move out of sctp.h)
+ */
+/* sctp_bindx() flags as hidden socket options */
+#define SCTP_BINDX_ADD_ADDR 0x00008001
+#define SCTP_BINDX_REM_ADDR 0x00008002
+/* Hidden socket option that gets the addresses */
+#define SCTP_GET_PEER_ADDRESSES 0x00008003
+#define SCTP_GET_LOCAL_ADDRESSES 0x00008004
+/* return the total count in bytes needed to hold all local addresses bound */
+#define SCTP_GET_LOCAL_ADDR_SIZE 0x00008005
+/* Return the total count in bytes needed to hold the remote address */
+#define SCTP_GET_REMOTE_ADDR_SIZE 0x00008006
+/* hidden option for connectx */
+#define SCTP_CONNECT_X 0x00008007
+/* hidden option for connectx_delayed, part of sendx */
+#define SCTP_CONNECT_X_DELAYED 0x00008008
+#define SCTP_CONNECT_X_COMPLETE 0x00008009
+/* hidden socket option based sctp_peeloff */
+#define SCTP_PEELOFF 0x0000800a
+/* the real worker for sctp_getaddrlen() */
+#define SCTP_GET_ADDR_LEN 0x0000800b
+/* temporary workaround for Apple listen() issue, no args used */
+#define SCTP_LISTEN_FIX 0x0000800c
+/* Debug things that need to be purged */
+#define SCTP_SET_INITIAL_DBG_SEQ 0x00009f00
+
+/* JRS - Supported congestion control modules for pluggable
+ * congestion control
+ */
+/* Standard TCP Congestion Control */
+#define SCTP_CC_RFC2581 0x00000000
+/* High Speed TCP Congestion Control (Floyd) */
+#define SCTP_CC_HSTCP 0x00000001
+/* HTCP Congestion Control */
+#define SCTP_CC_HTCP 0x00000002
+
+
+/* fragment interleave constants
+ * setting must be one of these or
+ * EINVAL returned.
+ */
+#define SCTP_FRAG_LEVEL_0 0x00000000
+#define SCTP_FRAG_LEVEL_1 0x00000001
+#define SCTP_FRAG_LEVEL_2 0x00000002
+
+/*
+ * user state values
+ */
+#define SCTP_CLOSED 0x0000
+#define SCTP_BOUND 0x1000
+#define SCTP_LISTEN 0x2000
+#define SCTP_COOKIE_WAIT 0x0002
+#define SCTP_COOKIE_ECHOED 0x0004
+#define SCTP_ESTABLISHED 0x0008
+#define SCTP_SHUTDOWN_SENT 0x0010
+#define SCTP_SHUTDOWN_RECEIVED 0x0020
+#define SCTP_SHUTDOWN_ACK_SENT 0x0040
+#define SCTP_SHUTDOWN_PENDING 0x0080
+
+/*
+ * SCTP operational error codes (user visible)
+ */
+#define SCTP_CAUSE_NO_ERROR 0x0000
+#define SCTP_CAUSE_INVALID_STREAM 0x0001
+#define SCTP_CAUSE_MISSING_PARAM 0x0002
+#define SCTP_CAUSE_STALE_COOKIE 0x0003
+#define SCTP_CAUSE_OUT_OF_RESC 0x0004
+#define SCTP_CAUSE_UNRESOLVABLE_ADDR 0x0005
+#define SCTP_CAUSE_UNRECOG_CHUNK 0x0006
+#define SCTP_CAUSE_INVALID_PARAM 0x0007
+#define SCTP_CAUSE_UNRECOG_PARAM 0x0008
+#define SCTP_CAUSE_NO_USER_DATA 0x0009
+#define SCTP_CAUSE_COOKIE_IN_SHUTDOWN 0x000a
+#define SCTP_CAUSE_RESTART_W_NEWADDR 0x000b
+#define SCTP_CAUSE_USER_INITIATED_ABT 0x000c
+#define SCTP_CAUSE_PROTOCOL_VIOLATION 0x000d
+
+/* Error causes from RFC5061 */
+#define SCTP_CAUSE_DELETING_LAST_ADDR 0x00a0
+#define SCTP_CAUSE_RESOURCE_SHORTAGE 0x00a1
+#define SCTP_CAUSE_DELETING_SRC_ADDR 0x00a2
+#define SCTP_CAUSE_ILLEGAL_ASCONF_ACK 0x00a3
+#define SCTP_CAUSE_REQUEST_REFUSED 0x00a4
+
+/* Error causes from nat-draft */
+#define SCTP_CAUSE_NAT_COLLIDING_STATE 0x00b0
+#define SCTP_CAUSE_NAT_MISSING_STATE 0x00b1
+
+/* Error causes from RFC4895 */
+#define SCTP_CAUSE_UNSUPPORTED_HMACID 0x0105
+
+/*
+ * error cause parameters (user visible)
+ */
+struct sctp_error_cause {
+ uint16_t code;
+ uint16_t length;
+ /* optional cause-specific info may follow */
+} SCTP_PACKED;
+
+struct sctp_error_invalid_stream {
+ struct sctp_error_cause cause; /* code=SCTP_ERROR_INVALID_STREAM */
+ uint16_t stream_id; /* stream id of the DATA in error */
+ uint16_t reserved;
+} SCTP_PACKED;
+
+struct sctp_error_missing_param {
+ struct sctp_error_cause cause; /* code=SCTP_ERROR_MISSING_PARAM */
+ uint32_t num_missing_params; /* number of missing parameters */
+ /* uint16_t param_type's follow */
+} SCTP_PACKED;
+
+struct sctp_error_stale_cookie {
+ struct sctp_error_cause cause; /* code=SCTP_ERROR_STALE_COOKIE */
+ uint32_t stale_time; /* time in usec of staleness */
+} SCTP_PACKED;
+
+struct sctp_error_out_of_resource {
+ struct sctp_error_cause cause; /* code=SCTP_ERROR_OUT_OF_RESOURCES */
+} SCTP_PACKED;
+
+struct sctp_error_unresolv_addr {
+ struct sctp_error_cause cause; /* code=SCTP_ERROR_UNRESOLVABLE_ADDR */
+
+} SCTP_PACKED;
+
+struct sctp_error_unrecognized_chunk {
+ struct sctp_error_cause cause; /* code=SCTP_ERROR_UNRECOG_CHUNK */
+ struct sctp_chunkhdr ch;/* header from chunk in error */
+} SCTP_PACKED;
+
+/*
+ * Main SCTP chunk types we place these here so natd and f/w's in user land
+ * can find them.
+ */
+/************0x00 series ***********/
+#define SCTP_DATA 0x00
+#define SCTP_INITIATION 0x01
+#define SCTP_INITIATION_ACK 0x02
+#define SCTP_SELECTIVE_ACK 0x03
+#define SCTP_HEARTBEAT_REQUEST 0x04
+#define SCTP_HEARTBEAT_ACK 0x05
+#define SCTP_ABORT_ASSOCIATION 0x06
+#define SCTP_SHUTDOWN 0x07
+#define SCTP_SHUTDOWN_ACK 0x08
+#define SCTP_OPERATION_ERROR 0x09
+#define SCTP_COOKIE_ECHO 0x0a
+#define SCTP_COOKIE_ACK 0x0b
+#define SCTP_ECN_ECHO 0x0c
+#define SCTP_ECN_CWR 0x0d
+#define SCTP_SHUTDOWN_COMPLETE 0x0e
+/* RFC4895 */
+#define SCTP_AUTHENTICATION 0x0f
+/* EY nr_sack chunk id*/
+#define SCTP_NR_SELECTIVE_ACK 0x10
+/************0x40 series ***********/
+/************0x80 series ***********/
+/* RFC5061 */
+#define SCTP_ASCONF_ACK 0x80
+/* draft-ietf-stewart-pktdrpsctp */
+#define SCTP_PACKET_DROPPED 0x81
+/* draft-ietf-stewart-strreset-xxx */
+#define SCTP_STREAM_RESET 0x82
+
+/* RFC4820 */
+#define SCTP_PAD_CHUNK 0x84
+/************0xc0 series ***********/
+/* RFC3758 */
+#define SCTP_FORWARD_CUM_TSN 0xc0
+/* RFC5061 */
+#define SCTP_ASCONF 0xc1
+
+
+/* ABORT and SHUTDOWN COMPLETE FLAG */
+#define SCTP_HAD_NO_TCB 0x01
+
+/* Packet dropped flags */
+#define SCTP_FROM_MIDDLE_BOX SCTP_HAD_NO_TCB
+#define SCTP_BADCRC 0x02
+#define SCTP_PACKET_TRUNCATED 0x04
+
+#define SCTP_SAT_NETWORK_MIN 400 /* min ms for RTT to set satellite
+ * time */
+#define SCTP_SAT_NETWORK_BURST_INCR 2 /* how many times to multiply maxburst
+ * in sat */
+
+/* Data Chuck Specific Flags */
+#define SCTP_DATA_FRAG_MASK 0x03
+#define SCTP_DATA_MIDDLE_FRAG 0x00
+#define SCTP_DATA_LAST_FRAG 0x01
+#define SCTP_DATA_FIRST_FRAG 0x02
+#define SCTP_DATA_NOT_FRAG 0x03
+#define SCTP_DATA_UNORDERED 0x04
+#define SCTP_DATA_SACK_IMMEDIATELY 0x08
+/* ECN Nonce: SACK Chunk Specific Flags */
+#define SCTP_SACK_NONCE_SUM 0x01
+
+/* CMT DAC algorithm SACK flag */
+#define SCTP_SACK_CMT_DAC 0x80
+
+/*
+ * PCB flags (in sctp_flags bitmask).
+ * Note the features and flags are meant
+ * for use by netstat.
+ */
+#define SCTP_PCB_FLAGS_UDPTYPE 0x00000001
+#define SCTP_PCB_FLAGS_TCPTYPE 0x00000002
+#define SCTP_PCB_FLAGS_BOUNDALL 0x00000004
+#define SCTP_PCB_FLAGS_ACCEPTING 0x00000008
+#define SCTP_PCB_FLAGS_UNBOUND 0x00000010
+#define SCTP_PCB_FLAGS_CLOSE_IP 0x00040000
+#define SCTP_PCB_FLAGS_WAS_CONNECTED 0x00080000
+#define SCTP_PCB_FLAGS_WAS_ABORTED 0x00100000
+/* TCP model support */
+
+#define SCTP_PCB_FLAGS_CONNECTED 0x00200000
+#define SCTP_PCB_FLAGS_IN_TCPPOOL 0x00400000
+#define SCTP_PCB_FLAGS_DONT_WAKE 0x00800000
+#define SCTP_PCB_FLAGS_WAKEOUTPUT 0x01000000
+#define SCTP_PCB_FLAGS_WAKEINPUT 0x02000000
+#define SCTP_PCB_FLAGS_BOUND_V6 0x04000000
+#define SCTP_PCB_FLAGS_BLOCKING_IO 0x08000000
+#define SCTP_PCB_FLAGS_SOCKET_GONE 0x10000000
+#define SCTP_PCB_FLAGS_SOCKET_ALLGONE 0x20000000
+#define SCTP_PCB_FLAGS_SOCKET_CANT_READ 0x40000000
+/* flags to copy to new PCB */
+#define SCTP_PCB_COPY_FLAGS (SCTP_PCB_FLAGS_BOUNDALL|\
+ SCTP_PCB_FLAGS_WAKEINPUT|\
+ SCTP_PCB_FLAGS_BOUND_V6)
+
+
+/*
+ * PCB Features (in sctp_features bitmask)
+ */
+#define SCTP_PCB_FLAGS_EXT_RCVINFO 0x00000002
+#define SCTP_PCB_FLAGS_DONOT_HEARTBEAT 0x00000004
+#define SCTP_PCB_FLAGS_FRAG_INTERLEAVE 0x00000008
+#define SCTP_PCB_FLAGS_INTERLEAVE_STRMS 0x00000010
+#define SCTP_PCB_FLAGS_DO_ASCONF 0x00000020
+#define SCTP_PCB_FLAGS_AUTO_ASCONF 0x00000040
+#define SCTP_PCB_FLAGS_ZERO_COPY_ACTIVE 0x00000080
+/* socket options */
+#define SCTP_PCB_FLAGS_NODELAY 0x00000100
+#define SCTP_PCB_FLAGS_AUTOCLOSE 0x00000200
+#define SCTP_PCB_FLAGS_RECVDATAIOEVNT 0x00000400
+#define SCTP_PCB_FLAGS_RECVASSOCEVNT 0x00000800
+#define SCTP_PCB_FLAGS_RECVPADDREVNT 0x00001000
+#define SCTP_PCB_FLAGS_RECVPEERERR 0x00002000
+#define SCTP_PCB_FLAGS_RECVSENDFAILEVNT 0x00004000
+#define SCTP_PCB_FLAGS_RECVSHUTDOWNEVNT 0x00008000
+#define SCTP_PCB_FLAGS_ADAPTATIONEVNT 0x00010000
+#define SCTP_PCB_FLAGS_PDAPIEVNT 0x00020000
+#define SCTP_PCB_FLAGS_AUTHEVNT 0x00040000
+#define SCTP_PCB_FLAGS_STREAM_RESETEVNT 0x00080000
+#define SCTP_PCB_FLAGS_NO_FRAGMENT 0x00100000
+#define SCTP_PCB_FLAGS_EXPLICIT_EOR 0x00400000
+#define SCTP_PCB_FLAGS_NEEDS_MAPPED_V4 0x00800000
+#define SCTP_PCB_FLAGS_MULTIPLE_ASCONFS 0x01000000
+#define SCTP_PCB_FLAGS_PORTREUSE 0x02000000
+#define SCTP_PCB_FLAGS_DRYEVNT 0x04000000
+/*-
+ * mobility_features parameters (by micchie).Note
+ * these features are applied against the
+ * sctp_mobility_features flags.. not the sctp_features
+ * flags.
+ */
+#define SCTP_MOBILITY_BASE 0x00000001
+#define SCTP_MOBILITY_FASTHANDOFF 0x00000002
+#define SCTP_MOBILITY_PRIM_DELETED 0x00000004
+
+
+#define SCTP_SMALLEST_PMTU 512 /* smallest pmtu allowed when disabling PMTU
+ * discovery */
+
+#include <rtems/freebsd/netinet/sctp_uio.h>
+
+/* This dictates the size of the packet
+ * collection buffer. This only applies
+ * if SCTP_PACKET_LOGGING is enabled in
+ * your config.
+ */
+#define SCTP_PACKET_LOG_SIZE 65536
+
+/* Maximum delays and such a user can set for options that
+ * take ms.
+ */
+#define SCTP_MAX_SACK_DELAY 500 /* per RFC4960 */
+#define SCTP_MAX_HB_INTERVAL 14400000 /* 4 hours in ms */
+#define SCTP_MAX_COOKIE_LIFE 3600000 /* 1 hour in ms */
+
+
+/* Types of logging/KTR tracing that can be enabled via the
+ * sysctl net.inet.sctp.sctp_logging. You must also enable
+ * SUBSYS tracing.
+ * Note that you must have the SCTP option in the kernel
+ * to enable these as well.
+ */
+#define SCTP_BLK_LOGGING_ENABLE 0x00000001
+#define SCTP_CWND_MONITOR_ENABLE 0x00000002
+#define SCTP_CWND_LOGGING_ENABLE 0x00000004
+#define SCTP_EARLYFR_LOGGING_ENABLE 0x00000010
+#define SCTP_FLIGHT_LOGGING_ENABLE 0x00000020
+#define SCTP_FR_LOGGING_ENABLE 0x00000040
+#define SCTP_LOCK_LOGGING_ENABLE 0x00000080
+#define SCTP_MAP_LOGGING_ENABLE 0x00000100
+#define SCTP_MBCNT_LOGGING_ENABLE 0x00000200
+#define SCTP_MBUF_LOGGING_ENABLE 0x00000400
+#define SCTP_NAGLE_LOGGING_ENABLE 0x00000800
+#define SCTP_RECV_RWND_LOGGING_ENABLE 0x00001000
+#define SCTP_RTTVAR_LOGGING_ENABLE 0x00002000
+#define SCTP_SACK_LOGGING_ENABLE 0x00004000
+#define SCTP_SACK_RWND_LOGGING_ENABLE 0x00008000
+#define SCTP_SB_LOGGING_ENABLE 0x00010000
+#define SCTP_STR_LOGGING_ENABLE 0x00020000
+#define SCTP_WAKE_LOGGING_ENABLE 0x00040000
+#define SCTP_LOG_MAXBURST_ENABLE 0x00080000
+#define SCTP_LOG_RWND_ENABLE 0x00100000
+#define SCTP_LOG_SACK_ARRIVALS_ENABLE 0x00200000
+#define SCTP_LTRACE_CHUNK_ENABLE 0x00400000
+#define SCTP_LTRACE_ERROR_ENABLE 0x00800000
+#define SCTP_LAST_PACKET_TRACING 0x01000000
+#define SCTP_THRESHOLD_LOGGING 0x02000000
+#define SCTP_LOG_AT_SEND_2_SCTP 0x04000000
+#define SCTP_LOG_AT_SEND_2_OUTQ 0x08000000
+#define SCTP_LOG_TRY_ADVANCE 0x10000000
+
+
+#undef SCTP_PACKED
+
+#endif /* !_NETINET_SCTP_HH_ */
diff --git a/rtems/freebsd/netinet/sctp_asconf.c b/rtems/freebsd/netinet/sctp_asconf.c
new file mode 100644
index 00000000..4bf8d3b5
--- /dev/null
+++ b/rtems/freebsd/netinet/sctp_asconf.c
@@ -0,0 +1,3397 @@
+#include <rtems/freebsd/machine/rtems-bsd-config.h>
+
+/*-
+ * Copyright (c) 2001-2007, by Cisco Systems, Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * a) Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * b) Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the distribution.
+ *
+ * c) Neither the name of Cisco Systems, Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/* $KAME: sctp_asconf.c,v 1.24 2005/03/06 16:04:16 itojun Exp $ */
+
+#include <rtems/freebsd/sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+#include <rtems/freebsd/netinet/sctp_os.h>
+#include <rtems/freebsd/netinet/sctp_var.h>
+#include <rtems/freebsd/netinet/sctp_sysctl.h>
+#include <rtems/freebsd/netinet/sctp_pcb.h>
+#include <rtems/freebsd/netinet/sctp_header.h>
+#include <rtems/freebsd/netinet/sctputil.h>
+#include <rtems/freebsd/netinet/sctp_output.h>
+#include <rtems/freebsd/netinet/sctp_asconf.h>
+#include <rtems/freebsd/netinet/sctp_timer.h>
+
+/*
+ * debug flags:
+ * SCTP_DEBUG_ASCONF1: protocol info, general info and errors
+ * SCTP_DEBUG_ASCONF2: detailed info
+ */
+#ifdef SCTP_DEBUG
+#endif /* SCTP_DEBUG */
+
+
+static void
+sctp_asconf_get_source_ip(struct mbuf *m, struct sockaddr *sa)
+{
+ struct ip *iph;
+ struct sockaddr_in *sin;
+
+#ifdef INET6
+ struct sockaddr_in6 *sin6;
+
+#endif
+
+ iph = mtod(m, struct ip *);
+ if (iph->ip_v == IPVERSION) {
+ /* IPv4 source */
+ sin = (struct sockaddr_in *)sa;
+ bzero(sin, sizeof(*sin));
+ sin->sin_family = AF_INET;
+ sin->sin_len = sizeof(struct sockaddr_in);
+ sin->sin_port = 0;
+ sin->sin_addr.s_addr = iph->ip_src.s_addr;
+ return;
+ }
+#ifdef INET6
+ else if (iph->ip_v == (IPV6_VERSION >> 4)) {
+ /* IPv6 source */
+ struct ip6_hdr *ip6;
+
+ sin6 = (struct sockaddr_in6 *)sa;
+ bzero(sin6, sizeof(*sin6));
+ sin6->sin6_family = AF_INET6;
+ sin6->sin6_len = sizeof(struct sockaddr_in6);
+ sin6->sin6_port = 0;
+ ip6 = mtod(m, struct ip6_hdr *);
+ sin6->sin6_addr = ip6->ip6_src;
+ return;
+ }
+#endif /* INET6 */
+ else
+ return;
+}
+
+/*
+ * draft-ietf-tsvwg-addip-sctp
+ *
+ * An ASCONF parameter queue exists per asoc which holds the pending address
+ * operations. Lists are updated upon receipt of ASCONF-ACK.
+ *
+ * A restricted_addrs list exists per assoc to hold local addresses that are
+ * not (yet) usable by the assoc as a source address. These addresses are
+ * either pending an ASCONF operation (and exist on the ASCONF parameter
+ * queue), or they are permanently restricted (the peer has returned an
+ * ERROR indication to an ASCONF(ADD), or the peer does not support ASCONF).
+ *
+ * Deleted addresses are always immediately removed from the lists as they will
+ * (shortly) no longer exist in the kernel. We send ASCONFs as a courtesy,
+ * only if allowed.
+ */
+
+/*
+ * ASCONF parameter processing.
+ * response_required: set if a reply is required (eg. SUCCESS_REPORT).
+ * returns a mbuf to an "error" response parameter or NULL/"success" if ok.
+ * FIX: allocating this many mbufs on the fly is pretty inefficient...
+ */
+static struct mbuf *
+sctp_asconf_success_response(uint32_t id)
+{
+ struct mbuf *m_reply = NULL;
+ struct sctp_asconf_paramhdr *aph;
+
+ m_reply = sctp_get_mbuf_for_msg(sizeof(struct sctp_asconf_paramhdr),
+ 0, M_DONTWAIT, 1, MT_DATA);
+ if (m_reply == NULL) {
+ SCTPDBG(SCTP_DEBUG_ASCONF1,
+ "asconf_success_response: couldn't get mbuf!\n");
+ return NULL;
+ }
+ aph = mtod(m_reply, struct sctp_asconf_paramhdr *);
+ aph->correlation_id = id;
+ aph->ph.param_type = htons(SCTP_SUCCESS_REPORT);
+ aph->ph.param_length = sizeof(struct sctp_asconf_paramhdr);
+ SCTP_BUF_LEN(m_reply) = aph->ph.param_length;
+ aph->ph.param_length = htons(aph->ph.param_length);
+
+ return m_reply;
+}
+
+static struct mbuf *
+sctp_asconf_error_response(uint32_t id, uint16_t cause, uint8_t * error_tlv,
+ uint16_t tlv_length)
+{
+ struct mbuf *m_reply = NULL;
+ struct sctp_asconf_paramhdr *aph;
+ struct sctp_error_cause *error;
+ uint8_t *tlv;
+
+ m_reply = sctp_get_mbuf_for_msg((sizeof(struct sctp_asconf_paramhdr) +
+ tlv_length +
+ sizeof(struct sctp_error_cause)),
+ 0, M_DONTWAIT, 1, MT_DATA);
+ if (m_reply == NULL) {
+ SCTPDBG(SCTP_DEBUG_ASCONF1,
+ "asconf_error_response: couldn't get mbuf!\n");
+ return NULL;
+ }
+ aph = mtod(m_reply, struct sctp_asconf_paramhdr *);
+ error = (struct sctp_error_cause *)(aph + 1);
+
+ aph->correlation_id = id;
+ aph->ph.param_type = htons(SCTP_ERROR_CAUSE_IND);
+ error->code = htons(cause);
+ error->length = tlv_length + sizeof(struct sctp_error_cause);
+ aph->ph.param_length = error->length +
+ sizeof(struct sctp_asconf_paramhdr);
+
+ if (aph->ph.param_length > MLEN) {
+ SCTPDBG(SCTP_DEBUG_ASCONF1,
+ "asconf_error_response: tlv_length (%xh) too big\n",
+ tlv_length);
+ sctp_m_freem(m_reply); /* discard */
+ return NULL;
+ }
+ if (error_tlv != NULL) {
+ tlv = (uint8_t *) (error + 1);
+ memcpy(tlv, error_tlv, tlv_length);
+ }
+ SCTP_BUF_LEN(m_reply) = aph->ph.param_length;
+ error->length = htons(error->length);
+ aph->ph.param_length = htons(aph->ph.param_length);
+
+ return m_reply;
+}
+
+static struct mbuf *
+sctp_process_asconf_add_ip(struct mbuf *m, struct sctp_asconf_paramhdr *aph,
+ struct sctp_tcb *stcb, int response_required)
+{
+ struct mbuf *m_reply = NULL;
+ struct sockaddr_storage sa_source, sa_store;
+ struct sctp_ipv4addr_param *v4addr;
+ uint16_t param_type, param_length, aparam_length;
+ struct sockaddr *sa;
+ struct sockaddr_in *sin;
+ int zero_address = 0;
+
+#ifdef INET6
+ struct sockaddr_in6 *sin6;
+ struct sctp_ipv6addr_param *v6addr;
+
+#endif /* INET6 */
+
+ aparam_length = ntohs(aph->ph.param_length);
+ v4addr = (struct sctp_ipv4addr_param *)(aph + 1);
+#ifdef INET6
+ v6addr = (struct sctp_ipv6addr_param *)(aph + 1);
+#endif /* INET6 */
+ param_type = ntohs(v4addr->ph.param_type);
+ param_length = ntohs(v4addr->ph.param_length);
+
+ sa = (struct sockaddr *)&sa_store;
+ switch (param_type) {
+ case SCTP_IPV4_ADDRESS:
+ if (param_length != sizeof(struct sctp_ipv4addr_param)) {
+ /* invalid param size */
+ return NULL;
+ }
+ sin = (struct sockaddr_in *)&sa_store;
+ bzero(sin, sizeof(*sin));
+ sin->sin_family = AF_INET;
+ sin->sin_len = sizeof(struct sockaddr_in);
+ sin->sin_port = stcb->rport;
+ sin->sin_addr.s_addr = v4addr->addr;
+ if (sin->sin_addr.s_addr == INADDR_ANY)
+ zero_address = 1;
+ SCTPDBG(SCTP_DEBUG_ASCONF1, "process_asconf_add_ip: adding ");
+ SCTPDBG_ADDR(SCTP_DEBUG_ASCONF1, sa);
+ break;
+ case SCTP_IPV6_ADDRESS:
+#ifdef INET6
+ if (param_length != sizeof(struct sctp_ipv6addr_param)) {
+ /* invalid param size */
+ return NULL;
+ }
+ sin6 = (struct sockaddr_in6 *)&sa_store;
+ bzero(sin6, sizeof(*sin6));
+ sin6->sin6_family = AF_INET6;
+ sin6->sin6_len = sizeof(struct sockaddr_in6);
+ sin6->sin6_port = stcb->rport;
+ memcpy((caddr_t)&sin6->sin6_addr, v6addr->addr,
+ sizeof(struct in6_addr));
+ if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr))
+ zero_address = 1;
+ SCTPDBG(SCTP_DEBUG_ASCONF1, "process_asconf_add_ip: adding ");
+ SCTPDBG_ADDR(SCTP_DEBUG_ASCONF1, sa);
+#else
+ /* IPv6 not enabled! */
+ /* FIX ME: currently sends back an invalid param error */
+ m_reply = sctp_asconf_error_response(aph->correlation_id,
+ SCTP_CAUSE_INVALID_PARAM, (uint8_t *) aph, aparam_length);
+ SCTPDBG(SCTP_DEBUG_ASCONF1,
+ "process_asconf_add_ip: v6 disabled- skipping ");
+ SCTPDBG_ADDR(SCTP_DEBUG_ASCONF1, sa);
+ return m_reply;
+#endif
+ break;
+ default:
+ m_reply = sctp_asconf_error_response(aph->correlation_id,
+ SCTP_CAUSE_UNRESOLVABLE_ADDR, (uint8_t *) aph,
+ aparam_length);
+ return m_reply;
+ } /* end switch */
+
+ /* if 0.0.0.0/::0, add the source address instead */
+ if (zero_address && SCTP_BASE_SYSCTL(sctp_nat_friendly)) {
+ sa = (struct sockaddr *)&sa_source;
+ sctp_asconf_get_source_ip(m, sa);
+ SCTPDBG(SCTP_DEBUG_ASCONF1,
+ "process_asconf_add_ip: using source addr ");
+ SCTPDBG_ADDR(SCTP_DEBUG_ASCONF1, sa);
+ }
+ /* add the address */
+ if (sctp_add_remote_addr(stcb, sa, SCTP_DONOT_SETSCOPE,
+ SCTP_ADDR_DYNAMIC_ADDED) != 0) {
+ SCTPDBG(SCTP_DEBUG_ASCONF1,
+ "process_asconf_add_ip: error adding address\n");
+ m_reply = sctp_asconf_error_response(aph->correlation_id,
+ SCTP_CAUSE_RESOURCE_SHORTAGE, (uint8_t *) aph,
+ aparam_length);
+ } else {
+ /* notify upper layer */
+ sctp_ulp_notify(SCTP_NOTIFY_ASCONF_ADD_IP, stcb, 0, sa, SCTP_SO_NOT_LOCKED);
+ if (response_required) {
+ m_reply =
+ sctp_asconf_success_response(aph->correlation_id);
+ }
+ sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, stcb,
+ NULL, SCTP_FROM_SCTP_ASCONF + SCTP_LOC_1);
+ sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep,
+ stcb, NULL);
+ }
+
+ return m_reply;
+}
+
+static int
+sctp_asconf_del_remote_addrs_except(struct sctp_tcb *stcb, struct sockaddr *src)
+{
+ struct sctp_nets *src_net, *net;
+
+ /* make sure the source address exists as a destination net */
+ src_net = sctp_findnet(stcb, src);
+ if (src_net == NULL) {
+ /* not found */
+ return -1;
+ }
+ /* delete all destination addresses except the source */
+ TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) {
+ if (net != src_net) {
+ /* delete this address */
+ sctp_remove_net(stcb, net);
+ SCTPDBG(SCTP_DEBUG_ASCONF1,
+ "asconf_del_remote_addrs_except: deleting ");
+ SCTPDBG_ADDR(SCTP_DEBUG_ASCONF1,
+ (struct sockaddr *)&net->ro._l_addr);
+ /* notify upper layer */
+ sctp_ulp_notify(SCTP_NOTIFY_ASCONF_DELETE_IP, stcb, 0,
+ (struct sockaddr *)&net->ro._l_addr, SCTP_SO_NOT_LOCKED);
+ }
+ }
+ return 0;
+}
+
+static struct mbuf *
+sctp_process_asconf_delete_ip(struct mbuf *m, struct sctp_asconf_paramhdr *aph,
+ struct sctp_tcb *stcb, int response_required)
+{
+ struct mbuf *m_reply = NULL;
+ struct sockaddr_storage sa_source, sa_store;
+ struct sctp_ipv4addr_param *v4addr;
+ uint16_t param_type, param_length, aparam_length;
+ struct sockaddr *sa;
+ struct sockaddr_in *sin;
+ int zero_address = 0;
+ int result;
+
+#ifdef INET6
+ struct sockaddr_in6 *sin6;
+ struct sctp_ipv6addr_param *v6addr;
+
+#endif /* INET6 */
+
+ /* get the source IP address for src and 0.0.0.0/::0 delete checks */
+ sctp_asconf_get_source_ip(m, (struct sockaddr *)&sa_source);
+
+ aparam_length = ntohs(aph->ph.param_length);
+ v4addr = (struct sctp_ipv4addr_param *)(aph + 1);
+#ifdef INET6
+ v6addr = (struct sctp_ipv6addr_param *)(aph + 1);
+#endif /* INET6 */
+ param_type = ntohs(v4addr->ph.param_type);
+ param_length = ntohs(v4addr->ph.param_length);
+
+ sa = (struct sockaddr *)&sa_store;
+ switch (param_type) {
+ case SCTP_IPV4_ADDRESS:
+ if (param_length != sizeof(struct sctp_ipv4addr_param)) {
+ /* invalid param size */
+ return NULL;
+ }
+ sin = (struct sockaddr_in *)&sa_store;
+ bzero(sin, sizeof(*sin));
+ sin->sin_family = AF_INET;
+ sin->sin_len = sizeof(struct sockaddr_in);
+ sin->sin_port = stcb->rport;
+ sin->sin_addr.s_addr = v4addr->addr;
+ if (sin->sin_addr.s_addr == INADDR_ANY)
+ zero_address = 1;
+ SCTPDBG(SCTP_DEBUG_ASCONF1,
+ "process_asconf_delete_ip: deleting ");
+ SCTPDBG_ADDR(SCTP_DEBUG_ASCONF1, sa);
+ break;
+ case SCTP_IPV6_ADDRESS:
+ if (param_length != sizeof(struct sctp_ipv6addr_param)) {
+ /* invalid param size */
+ return NULL;
+ }
+#ifdef INET6
+ sin6 = (struct sockaddr_in6 *)&sa_store;
+ bzero(sin6, sizeof(*sin6));
+ sin6->sin6_family = AF_INET6;
+ sin6->sin6_len = sizeof(struct sockaddr_in6);
+ sin6->sin6_port = stcb->rport;
+ memcpy(&sin6->sin6_addr, v6addr->addr,
+ sizeof(struct in6_addr));
+ if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr))
+ zero_address = 1;
+ SCTPDBG(SCTP_DEBUG_ASCONF1,
+ "process_asconf_delete_ip: deleting ");
+ SCTPDBG_ADDR(SCTP_DEBUG_ASCONF1, sa);
+#else
+ /* IPv6 not enabled! No "action" needed; just ack it */
+ SCTPDBG(SCTP_DEBUG_ASCONF1,
+ "process_asconf_delete_ip: v6 disabled- ignoring: ");
+ SCTPDBG_ADDR(SCTP_DEBUG_ASCONF1, sa);
+ /* just respond with a "success" ASCONF-ACK */
+ return NULL;
+#endif
+ break;
+ default:
+ m_reply = sctp_asconf_error_response(aph->correlation_id,
+ SCTP_CAUSE_UNRESOLVABLE_ADDR, (uint8_t *) aph,
+ aparam_length);
+ return m_reply;
+ }
+
+ /* make sure the source address is not being deleted */
+ if (sctp_cmpaddr(sa, (struct sockaddr *)&sa_source)) {
+ /* trying to delete the source address! */
+ SCTPDBG(SCTP_DEBUG_ASCONF1, "process_asconf_delete_ip: tried to delete source addr\n");
+ m_reply = sctp_asconf_error_response(aph->correlation_id,
+ SCTP_CAUSE_DELETING_SRC_ADDR, (uint8_t *) aph,
+ aparam_length);
+ return m_reply;
+ }
+ /* if deleting 0.0.0.0/::0, delete all addresses except src addr */
+ if (zero_address && SCTP_BASE_SYSCTL(sctp_nat_friendly)) {
+ result = sctp_asconf_del_remote_addrs_except(stcb,
+ (struct sockaddr *)&sa_source);
+
+ if (result) {
+ /* src address did not exist? */
+ SCTPDBG(SCTP_DEBUG_ASCONF1, "process_asconf_delete_ip: src addr does not exist?\n");
+ /* what error to reply with?? */
+ m_reply =
+ sctp_asconf_error_response(aph->correlation_id,
+ SCTP_CAUSE_REQUEST_REFUSED, (uint8_t *) aph,
+ aparam_length);
+ } else if (response_required) {
+ m_reply =
+ sctp_asconf_success_response(aph->correlation_id);
+ }
+ return m_reply;
+ }
+ /* delete the address */
+ result = sctp_del_remote_addr(stcb, sa);
+ /*
+ * note if result == -2, the address doesn't exist in the asoc but
+ * since it's being deleted anyways, we just ack the delete -- but
+ * this probably means something has already gone awry
+ */
+ if (result == -1) {
+ /* only one address in the asoc */
+ SCTPDBG(SCTP_DEBUG_ASCONF1, "process_asconf_delete_ip: tried to delete last IP addr!\n");
+ m_reply = sctp_asconf_error_response(aph->correlation_id,
+ SCTP_CAUSE_DELETING_LAST_ADDR, (uint8_t *) aph,
+ aparam_length);
+ } else {
+ if (response_required) {
+ m_reply = sctp_asconf_success_response(aph->correlation_id);
+ }
+ /* notify upper layer */
+ sctp_ulp_notify(SCTP_NOTIFY_ASCONF_DELETE_IP, stcb, 0, sa, SCTP_SO_NOT_LOCKED);
+ }
+ return m_reply;
+}
+
+static struct mbuf *
+sctp_process_asconf_set_primary(struct mbuf *m,
+ struct sctp_asconf_paramhdr *aph,
+ struct sctp_tcb *stcb, int response_required)
+{
+ struct mbuf *m_reply = NULL;
+ struct sockaddr_storage sa_source, sa_store;
+ struct sctp_ipv4addr_param *v4addr;
+ uint16_t param_type, param_length, aparam_length;
+ struct sockaddr *sa;
+ struct sockaddr_in *sin;
+ int zero_address = 0;
+
+#ifdef INET6
+ struct sockaddr_in6 *sin6;
+ struct sctp_ipv6addr_param *v6addr;
+
+#endif /* INET6 */
+
+ aparam_length = ntohs(aph->ph.param_length);
+ v4addr = (struct sctp_ipv4addr_param *)(aph + 1);
+#ifdef INET6
+ v6addr = (struct sctp_ipv6addr_param *)(aph + 1);
+#endif /* INET6 */
+ param_type = ntohs(v4addr->ph.param_type);
+ param_length = ntohs(v4addr->ph.param_length);
+
+ sa = (struct sockaddr *)&sa_store;
+ switch (param_type) {
+ case SCTP_IPV4_ADDRESS:
+ if (param_length != sizeof(struct sctp_ipv4addr_param)) {
+ /* invalid param size */
+ return NULL;
+ }
+ sin = (struct sockaddr_in *)&sa_store;
+ bzero(sin, sizeof(*sin));
+ sin->sin_family = AF_INET;
+ sin->sin_len = sizeof(struct sockaddr_in);
+ sin->sin_addr.s_addr = v4addr->addr;
+ if (sin->sin_addr.s_addr == INADDR_ANY)
+ zero_address = 1;
+ SCTPDBG(SCTP_DEBUG_ASCONF1, "process_asconf_set_primary: ");
+ SCTPDBG_ADDR(SCTP_DEBUG_ASCONF1, sa);
+ break;
+ case SCTP_IPV6_ADDRESS:
+ if (param_length != sizeof(struct sctp_ipv6addr_param)) {
+ /* invalid param size */
+ return NULL;
+ }
+#ifdef INET6
+ sin6 = (struct sockaddr_in6 *)&sa_store;
+ bzero(sin6, sizeof(*sin6));
+ sin6->sin6_family = AF_INET6;
+ sin6->sin6_len = sizeof(struct sockaddr_in6);
+ memcpy((caddr_t)&sin6->sin6_addr, v6addr->addr,
+ sizeof(struct in6_addr));
+ if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr))
+ zero_address = 1;
+ SCTPDBG(SCTP_DEBUG_ASCONF1, "process_asconf_set_primary: ");
+ SCTPDBG_ADDR(SCTP_DEBUG_ASCONF1, sa);
+#else
+ /* IPv6 not enabled! No "action" needed; just ack it */
+ SCTPDBG(SCTP_DEBUG_ASCONF1,
+ "process_asconf_set_primary: v6 disabled- ignoring: ");
+ SCTPDBG_ADDR(SCTP_DEBUG_ASCONF1, sa);
+ /* just respond with a "success" ASCONF-ACK */
+ return NULL;
+#endif
+ break;
+ default:
+ m_reply = sctp_asconf_error_response(aph->correlation_id,
+ SCTP_CAUSE_UNRESOLVABLE_ADDR, (uint8_t *) aph,
+ aparam_length);
+ return m_reply;
+ }
+
+ /* if 0.0.0.0/::0, use the source address instead */
+ if (zero_address && SCTP_BASE_SYSCTL(sctp_nat_friendly)) {
+ sa = (struct sockaddr *)&sa_source;
+ sctp_asconf_get_source_ip(m, sa);
+ SCTPDBG(SCTP_DEBUG_ASCONF1,
+ "process_asconf_set_primary: using source addr ");
+ SCTPDBG_ADDR(SCTP_DEBUG_ASCONF1, sa);
+ }
+ /* set the primary address */
+ if (sctp_set_primary_addr(stcb, sa, NULL) == 0) {
+ SCTPDBG(SCTP_DEBUG_ASCONF1,
+ "process_asconf_set_primary: primary address set\n");
+ /* notify upper layer */
+ sctp_ulp_notify(SCTP_NOTIFY_ASCONF_SET_PRIMARY, stcb, 0, sa, SCTP_SO_NOT_LOCKED);
+
+ if (response_required) {
+ m_reply = sctp_asconf_success_response(aph->correlation_id);
+ }
+ /*
+ * Mobility adaptation. Ideally, when the reception of SET
+ * PRIMARY with DELETE IP ADDRESS of the previous primary
+ * destination, unacknowledged DATA are retransmitted
+ * immediately to the new primary destination for seamless
+ * handover. If the destination is UNCONFIRMED and marked to
+ * REQ_PRIM, The retransmission occur when reception of the
+ * HEARTBEAT-ACK. (See sctp_handle_heartbeat_ack in
+ * sctp_input.c) Also, when change of the primary
+ * destination, it is better that all subsequent new DATA
+ * containing already queued DATA are transmitted to the new
+ * primary destination. (by micchie)
+ */
+ if ((sctp_is_mobility_feature_on(stcb->sctp_ep,
+ SCTP_MOBILITY_BASE) ||
+ sctp_is_mobility_feature_on(stcb->sctp_ep,
+ SCTP_MOBILITY_FASTHANDOFF)) &&
+ sctp_is_mobility_feature_on(stcb->sctp_ep,
+ SCTP_MOBILITY_PRIM_DELETED) &&
+ (stcb->asoc.primary_destination->dest_state &
+ SCTP_ADDR_UNCONFIRMED) == 0) {
+
+ sctp_timer_stop(SCTP_TIMER_TYPE_PRIM_DELETED, stcb->sctp_ep, stcb, NULL, SCTP_FROM_SCTP_TIMER + SCTP_LOC_7);
+ if (sctp_is_mobility_feature_on(stcb->sctp_ep,
+ SCTP_MOBILITY_FASTHANDOFF)) {
+ sctp_assoc_immediate_retrans(stcb,
+ stcb->asoc.primary_destination);
+ }
+ if (sctp_is_mobility_feature_on(stcb->sctp_ep,
+ SCTP_MOBILITY_BASE)) {
+ sctp_move_chunks_from_net(stcb,
+ stcb->asoc.deleted_primary);
+ }
+ sctp_delete_prim_timer(stcb->sctp_ep, stcb,
+ stcb->asoc.deleted_primary);
+ }
+ } else {
+ /* couldn't set the requested primary address! */
+ SCTPDBG(SCTP_DEBUG_ASCONF1,
+ "process_asconf_set_primary: set primary failed!\n");
+ /* must have been an invalid address, so report */
+ m_reply = sctp_asconf_error_response(aph->correlation_id,
+ SCTP_CAUSE_UNRESOLVABLE_ADDR, (uint8_t *) aph,
+ aparam_length);
+ }
+
+ return m_reply;
+}
+
+/*
+ * handles an ASCONF chunk.
+ * if all parameters are processed ok, send a plain (empty) ASCONF-ACK
+ */
+void
+sctp_handle_asconf(struct mbuf *m, unsigned int offset,
+ struct sctp_asconf_chunk *cp, struct sctp_tcb *stcb,
+ int first)
+{
+ struct sctp_association *asoc;
+ uint32_t serial_num;
+ struct mbuf *n, *m_ack, *m_result, *m_tail;
+ struct sctp_asconf_ack_chunk *ack_cp;
+ struct sctp_asconf_paramhdr *aph, *ack_aph;
+ struct sctp_ipv6addr_param *p_addr;
+ unsigned int asconf_limit;
+ int error = 0; /* did an error occur? */
+
+ /* asconf param buffer */
+ uint8_t aparam_buf[SCTP_PARAM_BUFFER_SIZE];
+ struct sctp_asconf_ack *ack, *ack_next;
+
+ /* verify minimum length */
+ if (ntohs(cp->ch.chunk_length) < sizeof(struct sctp_asconf_chunk)) {
+ SCTPDBG(SCTP_DEBUG_ASCONF1,
+ "handle_asconf: chunk too small = %xh\n",
+ ntohs(cp->ch.chunk_length));
+ return;
+ }
+ asoc = &stcb->asoc;
+ serial_num = ntohl(cp->serial_number);
+
+ if (compare_with_wrap(asoc->asconf_seq_in, serial_num, MAX_SEQ) ||
+ serial_num == asoc->asconf_seq_in) {
+ /* got a duplicate ASCONF */
+ SCTPDBG(SCTP_DEBUG_ASCONF1,
+ "handle_asconf: got duplicate serial number = %xh\n",
+ serial_num);
+ return;
+ } else if (serial_num != (asoc->asconf_seq_in + 1)) {
+ SCTPDBG(SCTP_DEBUG_ASCONF1, "handle_asconf: incorrect serial number = %xh (expected next = %xh)\n",
+ serial_num, asoc->asconf_seq_in + 1);
+ return;
+ }
+ /* it's the expected "next" sequence number, so process it */
+ asoc->asconf_seq_in = serial_num; /* update sequence */
+ /* get length of all the param's in the ASCONF */
+ asconf_limit = offset + ntohs(cp->ch.chunk_length);
+ SCTPDBG(SCTP_DEBUG_ASCONF1,
+ "handle_asconf: asconf_limit=%u, sequence=%xh\n",
+ asconf_limit, serial_num);
+
+ if (first) {
+ /* delete old cache */
+ SCTPDBG(SCTP_DEBUG_ASCONF1, "handle_asconf: Now processing firstASCONF. Try to delte old cache\n");
+
+ ack = TAILQ_FIRST(&stcb->asoc.asconf_ack_sent);
+ while (ack != NULL) {
+ ack_next = TAILQ_NEXT(ack, next);
+ if (ack->serial_number == serial_num)
+ break;
+ SCTPDBG(SCTP_DEBUG_ASCONF1, "handle_asconf: delete old(%u) < first(%u)\n",
+ ack->serial_number, serial_num);
+ TAILQ_REMOVE(&stcb->asoc.asconf_ack_sent, ack, next);
+ if (ack->data != NULL) {
+ sctp_m_freem(ack->data);
+ }
+ SCTP_ZONE_FREE(SCTP_BASE_INFO(ipi_zone_asconf_ack), ack);
+ ack = ack_next;
+ }
+ }
+ m_ack = sctp_get_mbuf_for_msg(sizeof(struct sctp_asconf_ack_chunk), 0,
+ M_DONTWAIT, 1, MT_DATA);
+ if (m_ack == NULL) {
+ SCTPDBG(SCTP_DEBUG_ASCONF1,
+ "handle_asconf: couldn't get mbuf!\n");
+ return;
+ }
+ m_tail = m_ack; /* current reply chain's tail */
+
+ /* fill in ASCONF-ACK header */
+ ack_cp = mtod(m_ack, struct sctp_asconf_ack_chunk *);
+ ack_cp->ch.chunk_type = SCTP_ASCONF_ACK;
+ ack_cp->ch.chunk_flags = 0;
+ ack_cp->serial_number = htonl(serial_num);
+ /* set initial lengths (eg. just an ASCONF-ACK), ntohx at the end! */
+ SCTP_BUF_LEN(m_ack) = sizeof(struct sctp_asconf_ack_chunk);
+ ack_cp->ch.chunk_length = sizeof(struct sctp_asconf_ack_chunk);
+
+ /* skip the lookup address parameter */
+ offset += sizeof(struct sctp_asconf_chunk);
+ p_addr = (struct sctp_ipv6addr_param *)sctp_m_getptr(m, offset, sizeof(struct sctp_paramhdr), (uint8_t *) & aparam_buf);
+ if (p_addr == NULL) {
+ SCTPDBG(SCTP_DEBUG_ASCONF1,
+ "handle_asconf: couldn't get lookup addr!\n");
+ /* respond with a missing/invalid mandatory parameter error */
+ return;
+ }
+ /* param_length is already validated in process_control... */
+ offset += ntohs(p_addr->ph.param_length); /* skip lookup addr */
+
+ /* get pointer to first asconf param in ASCONF-ACK */
+ ack_aph = (struct sctp_asconf_paramhdr *)(mtod(m_ack, caddr_t)+sizeof(struct sctp_asconf_ack_chunk));
+ if (ack_aph == NULL) {
+ SCTPDBG(SCTP_DEBUG_ASCONF1, "Gak in asconf2\n");
+ return;
+ }
+ /* get pointer to first asconf param in ASCONF */
+ aph = (struct sctp_asconf_paramhdr *)sctp_m_getptr(m, offset, sizeof(struct sctp_asconf_paramhdr), (uint8_t *) & aparam_buf);
+ if (aph == NULL) {
+ SCTPDBG(SCTP_DEBUG_ASCONF1, "Empty ASCONF received?\n");
+ goto send_reply;
+ }
+ /* process through all parameters */
+ while (aph != NULL) {
+ unsigned int param_length, param_type;
+
+ param_type = ntohs(aph->ph.param_type);
+ param_length = ntohs(aph->ph.param_length);
+ if (offset + param_length > asconf_limit) {
+ /* parameter goes beyond end of chunk! */
+ sctp_m_freem(m_ack);
+ return;
+ }
+ m_result = NULL;
+
+ if (param_length > sizeof(aparam_buf)) {
+ SCTPDBG(SCTP_DEBUG_ASCONF1, "handle_asconf: param length (%u) larger than buffer size!\n", param_length);
+ sctp_m_freem(m_ack);
+ return;
+ }
+ if (param_length <= sizeof(struct sctp_paramhdr)) {
+ SCTPDBG(SCTP_DEBUG_ASCONF1, "handle_asconf: param length (%u) too short\n", param_length);
+ sctp_m_freem(m_ack);
+ }
+ /* get the entire parameter */
+ aph = (struct sctp_asconf_paramhdr *)sctp_m_getptr(m, offset, param_length, aparam_buf);
+ if (aph == NULL) {
+ SCTPDBG(SCTP_DEBUG_ASCONF1, "handle_asconf: couldn't get entire param\n");
+ sctp_m_freem(m_ack);
+ return;
+ }
+ switch (param_type) {
+ case SCTP_ADD_IP_ADDRESS:
+ asoc->peer_supports_asconf = 1;
+ m_result = sctp_process_asconf_add_ip(m, aph, stcb,
+ error);
+ break;
+ case SCTP_DEL_IP_ADDRESS:
+ asoc->peer_supports_asconf = 1;
+ m_result = sctp_process_asconf_delete_ip(m, aph, stcb,
+ error);
+ break;
+ case SCTP_ERROR_CAUSE_IND:
+ /* not valid in an ASCONF chunk */
+ break;
+ case SCTP_SET_PRIM_ADDR:
+ asoc->peer_supports_asconf = 1;
+ m_result = sctp_process_asconf_set_primary(m, aph,
+ stcb, error);
+ break;
+ case SCTP_NAT_VTAGS:
+ SCTPDBG(SCTP_DEBUG_ASCONF1, "handle_asconf: sees a NAT VTAG state parameter\n");
+ break;
+ case SCTP_SUCCESS_REPORT:
+ /* not valid in an ASCONF chunk */
+ break;
+ case SCTP_ULP_ADAPTATION:
+ /* FIX */
+ break;
+ default:
+ if ((param_type & 0x8000) == 0) {
+ /* Been told to STOP at this param */
+ asconf_limit = offset;
+ /*
+ * FIX FIX - We need to call
+ * sctp_arethere_unrecognized_parameters()
+ * to get a operr and send it for any
+ * param's with the 0x4000 bit set OR do it
+ * here ourselves... note we still must STOP
+ * if the 0x8000 bit is clear.
+ */
+ }
+ /* unknown/invalid param type */
+ break;
+ } /* switch */
+
+ /* add any (error) result to the reply mbuf chain */
+ if (m_result != NULL) {
+ SCTP_BUF_NEXT(m_tail) = m_result;
+ m_tail = m_result;
+ /* update lengths, make sure it's aligned too */
+ SCTP_BUF_LEN(m_result) = SCTP_SIZE32(SCTP_BUF_LEN(m_result));
+ ack_cp->ch.chunk_length += SCTP_BUF_LEN(m_result);
+ /* set flag to force success reports */
+ error = 1;
+ }
+ offset += SCTP_SIZE32(param_length);
+ /* update remaining ASCONF message length to process */
+ if (offset >= asconf_limit) {
+ /* no more data in the mbuf chain */
+ break;
+ }
+ /* get pointer to next asconf param */
+ aph = (struct sctp_asconf_paramhdr *)sctp_m_getptr(m, offset,
+ sizeof(struct sctp_asconf_paramhdr),
+ (uint8_t *) & aparam_buf);
+ if (aph == NULL) {
+ /* can't get an asconf paramhdr */
+ SCTPDBG(SCTP_DEBUG_ASCONF1, "handle_asconf: can't get asconf param hdr!\n");
+ /* FIX ME - add error here... */
+ }
+ }
+
+send_reply:
+ ack_cp->ch.chunk_length = htons(ack_cp->ch.chunk_length);
+ /* save the ASCONF-ACK reply */
+ ack = SCTP_ZONE_GET(SCTP_BASE_INFO(ipi_zone_asconf_ack),
+ struct sctp_asconf_ack);
+ if (ack == NULL) {
+ sctp_m_freem(m_ack);
+ return;
+ }
+ ack->serial_number = serial_num;
+ ack->last_sent_to = NULL;
+ ack->data = m_ack;
+ ack->len = 0;
+ n = m_ack;
+ while (n) {
+ ack->len += SCTP_BUF_LEN(n);
+ n = SCTP_BUF_NEXT(n);
+ }
+ TAILQ_INSERT_TAIL(&stcb->asoc.asconf_ack_sent, ack, next);
+
+ /* see if last_control_chunk_from is set properly (use IP src addr) */
+ if (stcb->asoc.last_control_chunk_from == NULL) {
+ /*
+ * this could happen if the source address was just newly
+ * added
+ */
+ struct ip *iph;
+ struct sctphdr *sh;
+ struct sockaddr_storage from_store;
+ struct sockaddr *from = (struct sockaddr *)&from_store;
+
+ SCTPDBG(SCTP_DEBUG_ASCONF1, "handle_asconf: looking up net for IP source address\n");
+ /* pullup already done, IP options already stripped */
+ iph = mtod(m, struct ip *);
+ sh = (struct sctphdr *)((caddr_t)iph + sizeof(*iph));
+ switch (iph->ip_v) {
+ case IPVERSION:
+ {
+ struct sockaddr_in *from4;
+
+ from4 = (struct sockaddr_in *)&from_store;
+ bzero(from4, sizeof(*from4));
+ from4->sin_family = AF_INET;
+ from4->sin_len = sizeof(struct sockaddr_in);
+ from4->sin_addr.s_addr = iph->ip_src.s_addr;
+ from4->sin_port = sh->src_port;
+ break;
+ }
+#ifdef INET6
+ case IPV6_VERSION >> 4:
+ {
+ struct ip6_hdr *ip6;
+ struct sockaddr_in6 *from6;
+
+ ip6 = mtod(m, struct ip6_hdr *);
+ from6 = (struct sockaddr_in6 *)&from_store;
+ bzero(from6, sizeof(*from6));
+ from6->sin6_family = AF_INET6;
+ from6->sin6_len = sizeof(struct sockaddr_in6);
+ from6->sin6_addr = ip6->ip6_src;
+ from6->sin6_port = sh->src_port;
+ /*
+ * Get the scopes in properly to the sin6
+ * addr's
+ */
+ /* we probably don't need these operations */
+ (void)sa6_recoverscope(from6);
+ sa6_embedscope(from6,
+ MODULE_GLOBAL(ip6_use_defzone));
+
+ break;
+ }
+#endif
+ default:
+ /* unknown address type */
+ from = NULL;
+ }
+ if (from != NULL) {
+ SCTPDBG(SCTP_DEBUG_ASCONF1, "Looking for IP source: ");
+ SCTPDBG_ADDR(SCTP_DEBUG_ASCONF1, from);
+ /* look up the from address */
+ stcb->asoc.last_control_chunk_from = sctp_findnet(stcb, from);
+#ifdef SCTP_DEBUG
+ if (stcb->asoc.last_control_chunk_from == NULL)
+ SCTPDBG(SCTP_DEBUG_ASCONF1, "handle_asconf: IP source address not found?!\n");
+#endif
+ }
+ }
+}
+
+/*
+ * does the address match? returns 0 if not, 1 if so
+ */
+static uint32_t
+sctp_asconf_addr_match(struct sctp_asconf_addr *aa, struct sockaddr *sa)
+{
+#ifdef INET6
+ if (sa->sa_family == AF_INET6) {
+ /* IPv6 sa address */
+ /* XXX scopeid */
+ struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)sa;
+
+ if ((aa->ap.addrp.ph.param_type == SCTP_IPV6_ADDRESS) &&
+ (memcmp(&aa->ap.addrp.addr, &sin6->sin6_addr,
+ sizeof(struct in6_addr)) == 0)) {
+ return (1);
+ }
+ } else
+#endif /* INET6 */
+ if (sa->sa_family == AF_INET) {
+ /* IPv4 sa address */
+ struct sockaddr_in *sin = (struct sockaddr_in *)sa;
+
+ if ((aa->ap.addrp.ph.param_type == SCTP_IPV4_ADDRESS) &&
+ (memcmp(&aa->ap.addrp.addr, &sin->sin_addr,
+ sizeof(struct in_addr)) == 0)) {
+ return (1);
+ }
+ }
+ return (0);
+}
+
+/*
+ * does the address match? returns 0 if not, 1 if so
+ */
+static uint32_t
+sctp_addr_match(
+ struct sctp_ipv6addr_param *v6addr,
+ struct sockaddr *sa)
+{
+ uint16_t param_type, param_length;
+ struct sctp_ipv4addr_param *v4addr = (struct sctp_ipv4addr_param *)v6addr;
+
+#ifdef INET6
+ if (sa->sa_family == AF_INET6) {
+ /* IPv6 sa address */
+ /* XXX scopeid */
+ struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)sa;
+
+ param_type = ntohs(v6addr->ph.param_type);
+ param_length = ntohs(v6addr->ph.param_length);
+
+ if ((param_type == SCTP_IPV6_ADDRESS) &&
+ param_length == sizeof(struct sctp_ipv6addr_param) &&
+ (memcmp(&v6addr->addr, &sin6->sin6_addr,
+ sizeof(struct in6_addr)) == 0)) {
+ return (1);
+ }
+ }
+#endif
+ if (sa->sa_family == AF_INET) {
+ /* IPv4 sa address */
+ struct sockaddr_in *sin = (struct sockaddr_in *)sa;
+
+ param_type = ntohs(v4addr->ph.param_type);
+ param_length = ntohs(v4addr->ph.param_length);
+
+ if ((param_type == SCTP_IPV4_ADDRESS) &&
+ param_length == sizeof(struct sctp_ipv4addr_param) &&
+ (memcmp(&v4addr->addr, &sin->sin_addr,
+ sizeof(struct in_addr)) == 0)) {
+ return (1);
+ }
+ }
+ return (0);
+}
+
+/*
+ * Cleanup for non-responded/OP ERR'd ASCONF
+ */
+void
+sctp_asconf_cleanup(struct sctp_tcb *stcb, struct sctp_nets *net)
+{
+ /* mark peer as ASCONF incapable */
+ stcb->asoc.peer_supports_asconf = 0;
+ /*
+ * clear out any existing asconfs going out
+ */
+ sctp_timer_stop(SCTP_TIMER_TYPE_ASCONF, stcb->sctp_ep, stcb, net,
+ SCTP_FROM_SCTP_ASCONF + SCTP_LOC_2);
+ stcb->asoc.asconf_seq_out_acked = stcb->asoc.asconf_seq_out;
+ /* remove the old ASCONF on our outbound queue */
+ sctp_toss_old_asconf(stcb);
+}
+
+/*
+ * cleanup any cached source addresses that may be topologically
+ * incorrect after a new address has been added to this interface.
+ */
+static void
+sctp_asconf_nets_cleanup(struct sctp_tcb *stcb, struct sctp_ifn *ifn)
+{
+ struct sctp_nets *net;
+
+ /*
+ * Ideally, we want to only clear cached routes and source addresses
+ * that are topologically incorrect. But since there is no easy way
+ * to know whether the newly added address on the ifn would cause a
+ * routing change (i.e. a new egress interface would be chosen)
+ * without doing a new routing lookup and source address selection,
+ * we will (for now) just flush any cached route using a different
+ * ifn (and cached source addrs) and let output re-choose them
+ * during the next send on that net.
+ */
+ TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) {
+ /*
+ * clear any cached route (and cached source address) if the
+ * route's interface is NOT the same as the address change.
+ * If it's the same interface, just clear the cached source
+ * address.
+ */
+ if (SCTP_ROUTE_HAS_VALID_IFN(&net->ro) &&
+ ((ifn == NULL) ||
+ (SCTP_GET_IF_INDEX_FROM_ROUTE(&net->ro) != ifn->ifn_index))) {
+ /* clear any cached route */
+ RTFREE(net->ro.ro_rt);
+ net->ro.ro_rt = NULL;
+ }
+ /* clear any cached source address */
+ if (net->src_addr_selected) {
+ sctp_free_ifa(net->ro._s_addr);
+ net->ro._s_addr = NULL;
+ net->src_addr_selected = 0;
+ }
+ }
+}
+
+
+void
+sctp_assoc_immediate_retrans(struct sctp_tcb *stcb, struct sctp_nets *dstnet)
+{
+ int error;
+
+ if (dstnet->dest_state & SCTP_ADDR_UNCONFIRMED) {
+ return;
+ }
+ if (stcb->asoc.deleted_primary == NULL) {
+ return;
+ }
+ if (!TAILQ_EMPTY(&stcb->asoc.sent_queue)) {
+ SCTPDBG(SCTP_DEBUG_ASCONF1, "assoc_immediate_retrans: Deleted primary is ");
+ SCTPDBG_ADDR(SCTP_DEBUG_ASCONF1, &stcb->asoc.deleted_primary->ro._l_addr.sa);
+ SCTPDBG(SCTP_DEBUG_ASCONF1, "Current Primary is ");
+ SCTPDBG_ADDR(SCTP_DEBUG_ASCONF1, &stcb->asoc.primary_destination->ro._l_addr.sa);
+ sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, stcb,
+ stcb->asoc.deleted_primary,
+ SCTP_FROM_SCTP_TIMER + SCTP_LOC_8);
+ stcb->asoc.num_send_timers_up--;
+ if (stcb->asoc.num_send_timers_up < 0) {
+ stcb->asoc.num_send_timers_up = 0;
+ }
+ SCTP_TCB_LOCK_ASSERT(stcb);
+ error = sctp_t3rxt_timer(stcb->sctp_ep, stcb,
+ stcb->asoc.deleted_primary);
+ if (error) {
+ SCTP_INP_DECR_REF(stcb->sctp_ep);
+ return;
+ }
+ SCTP_TCB_LOCK_ASSERT(stcb);
+#ifdef SCTP_AUDITING_ENABLED
+ sctp_auditing(4, stcb->sctp_ep, stcb, stcb->asoc.deleted_primary);
+#endif
+ sctp_chunk_output(stcb->sctp_ep, stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED);
+ if ((stcb->asoc.num_send_timers_up == 0) &&
+ (stcb->asoc.sent_queue_cnt > 0)) {
+ struct sctp_tmit_chunk *chk;
+
+ chk = TAILQ_FIRST(&stcb->asoc.sent_queue);
+ sctp_timer_start(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
+ stcb, chk->whoTo);
+ }
+ }
+ return;
+}
+
+static int
+ sctp_asconf_queue_mgmt(struct sctp_tcb *, struct sctp_ifa *, uint16_t);
+
+void
+sctp_net_immediate_retrans(struct sctp_tcb *stcb, struct sctp_nets *net)
+{
+ struct sctp_tmit_chunk *chk;
+
+ SCTPDBG(SCTP_DEBUG_ASCONF1, "net_immediate_retrans: RTO is %d\n", net->RTO);
+ sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, stcb, net,
+ SCTP_FROM_SCTP_TIMER + SCTP_LOC_5);
+ stcb->asoc.cc_functions.sctp_set_initial_cc_param(stcb, net);
+ net->error_count = 0;
+ TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) {
+ if (chk->whoTo == net) {
+ if (chk->sent < SCTP_DATAGRAM_RESEND) {
+ chk->sent = SCTP_DATAGRAM_RESEND;
+ sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt);
+ sctp_flight_size_decrease(chk);
+ sctp_total_flight_decrease(stcb, chk);
+ net->marked_retrans++;
+ stcb->asoc.marked_retrans++;
+ }
+ }
+ }
+ if (net->marked_retrans) {
+ sctp_chunk_output(stcb->sctp_ep, stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED);
+ }
+}
+
+static void
+sctp_path_check_and_react(struct sctp_tcb *stcb, struct sctp_ifa *newifa)
+{
+ struct sctp_nets *net;
+ int addrnum, changed;
+
+ /*
+ * If number of local valid addresses is 1, the valid address is
+ * probably newly added address. Several valid addresses in this
+ * association. A source address may not be changed. Additionally,
+ * they can be configured on a same interface as "alias" addresses.
+ * (by micchie)
+ */
+ addrnum = sctp_local_addr_count(stcb);
+ SCTPDBG(SCTP_DEBUG_ASCONF1, "p_check_react(): %d local addresses\n",
+ addrnum);
+ if (addrnum == 1) {
+ TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) {
+ /* clear any cached route and source address */
+ if (net->ro.ro_rt) {
+ RTFREE(net->ro.ro_rt);
+ net->ro.ro_rt = NULL;
+ }
+ if (net->src_addr_selected) {
+ sctp_free_ifa(net->ro._s_addr);
+ net->ro._s_addr = NULL;
+ net->src_addr_selected = 0;
+ }
+ /* Retransmit unacknowledged DATA chunks immediately */
+ if (sctp_is_mobility_feature_on(stcb->sctp_ep,
+ SCTP_MOBILITY_FASTHANDOFF)) {
+ sctp_net_immediate_retrans(stcb, net);
+ }
+ /* also, SET PRIMARY is maybe already sent */
+ }
+ return;
+ }
+ /* Multiple local addresses exsist in the association. */
+ TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) {
+ /* clear any cached route and source address */
+ if (net->ro.ro_rt) {
+ RTFREE(net->ro.ro_rt);
+ net->ro.ro_rt = NULL;
+ }
+ if (net->src_addr_selected) {
+ sctp_free_ifa(net->ro._s_addr);
+ net->ro._s_addr = NULL;
+ net->src_addr_selected = 0;
+ }
+ /*
+ * Check if the nexthop is corresponding to the new address.
+ * If the new address is corresponding to the current
+ * nexthop, the path will be changed. If the new address is
+ * NOT corresponding to the current nexthop, the path will
+ * not be changed.
+ */
+ SCTP_RTALLOC((sctp_route_t *) & net->ro,
+ stcb->sctp_ep->def_vrf_id);
+ if (net->ro.ro_rt == NULL)
+ continue;
+
+ changed = 0;
+ if (net->ro._l_addr.sa.sa_family == AF_INET) {
+ if (sctp_v4src_match_nexthop(newifa, (sctp_route_t *) & net->ro))
+ changed = 1;
+ }
+#ifdef INET6
+ if (net->ro._l_addr.sa.sa_family == AF_INET6) {
+ if (sctp_v6src_match_nexthop(
+ &newifa->address.sin6, (sctp_route_t *) & net->ro))
+ changed = 1;
+ }
+#endif
+ /*
+ * if the newly added address does not relate routing
+ * information, we skip.
+ */
+ if (changed == 0)
+ continue;
+ /* Retransmit unacknowledged DATA chunks immediately */
+ if (sctp_is_mobility_feature_on(stcb->sctp_ep,
+ SCTP_MOBILITY_FASTHANDOFF)) {
+ sctp_net_immediate_retrans(stcb, net);
+ }
+ /* Send SET PRIMARY for this new address */
+ if (net == stcb->asoc.primary_destination) {
+ (void)sctp_asconf_queue_mgmt(stcb, newifa,
+ SCTP_SET_PRIM_ADDR);
+ }
+ }
+}
+
+/*
+ * process an ADD/DELETE IP ack from peer.
+ * addr: corresponding sctp_ifa to the address being added/deleted.
+ * type: SCTP_ADD_IP_ADDRESS or SCTP_DEL_IP_ADDRESS.
+ * flag: 1=success, 0=failure.
+ */
+static void
+sctp_asconf_addr_mgmt_ack(struct sctp_tcb *stcb, struct sctp_ifa *addr,
+ uint16_t type, uint32_t flag)
+{
+ /*
+ * do the necessary asoc list work- if we get a failure indication,
+ * leave the address on the assoc's restricted list. If we get a
+ * success indication, remove the address from the restricted list.
+ */
+ /*
+ * Note: this will only occur for ADD_IP_ADDRESS, since
+ * DEL_IP_ADDRESS is never actually added to the list...
+ */
+ if (flag) {
+ /* success case, so remove from the restricted list */
+ sctp_del_local_addr_restricted(stcb, addr);
+
+ if (sctp_is_mobility_feature_on(stcb->sctp_ep,
+ SCTP_MOBILITY_BASE) ||
+ sctp_is_mobility_feature_on(stcb->sctp_ep,
+ SCTP_MOBILITY_FASTHANDOFF)) {
+ sctp_path_check_and_react(stcb, addr);
+ return;
+ }
+ /* clear any cached/topologically incorrect source addresses */
+ sctp_asconf_nets_cleanup(stcb, addr->ifn_p);
+ }
+ /* else, leave it on the list */
+}
+
+/*
+ * add an asconf add/delete/set primary IP address parameter to the queue.
+ * type = SCTP_ADD_IP_ADDRESS, SCTP_DEL_IP_ADDRESS, SCTP_SET_PRIM_ADDR.
+ * returns 0 if queued, -1 if not queued/removed.
+ * NOTE: if adding, but a delete for the same address is already scheduled
+ * (and not yet sent out), simply remove it from queue. Same for deleting
+ * an address already scheduled for add. If a duplicate operation is found,
+ * ignore the new one.
+ */
+static int
+sctp_asconf_queue_mgmt(struct sctp_tcb *stcb, struct sctp_ifa *ifa,
+ uint16_t type)
+{
+ struct sctp_asconf_addr *aa, *aa_next;
+ struct sockaddr *sa;
+
+ /* make sure the request isn't already in the queue */
+ for (aa = TAILQ_FIRST(&stcb->asoc.asconf_queue); aa != NULL;
+ aa = aa_next) {
+ aa_next = TAILQ_NEXT(aa, next);
+ /* address match? */
+ if (sctp_asconf_addr_match(aa, &ifa->address.sa) == 0)
+ continue;
+ /*
+ * is the request already in queue but not sent? pass the
+ * request already sent in order to resolve the following
+ * case: 1. arrival of ADD, then sent 2. arrival of DEL. we
+ * can't remove the ADD request already sent 3. arrival of
+ * ADD
+ */
+ if (aa->ap.aph.ph.param_type == type && aa->sent == 0) {
+ return (-1);
+ }
+ /* is the negative request already in queue, and not sent */
+ if ((aa->sent == 0) && (type == SCTP_ADD_IP_ADDRESS) &&
+ (aa->ap.aph.ph.param_type == SCTP_DEL_IP_ADDRESS)) {
+ /* add requested, delete already queued */
+ TAILQ_REMOVE(&stcb->asoc.asconf_queue, aa, next);
+ /* remove the ifa from the restricted list */
+ sctp_del_local_addr_restricted(stcb, ifa);
+ /* free the asconf param */
+ SCTP_FREE(aa, SCTP_M_ASC_ADDR);
+ SCTPDBG(SCTP_DEBUG_ASCONF2, "asconf_queue_mgmt: add removes queued entry\n");
+ return (-1);
+ }
+ if ((aa->sent == 0) && (type == SCTP_DEL_IP_ADDRESS) &&
+ (aa->ap.aph.ph.param_type == SCTP_ADD_IP_ADDRESS)) {
+ /* delete requested, add already queued */
+ TAILQ_REMOVE(&stcb->asoc.asconf_queue, aa, next);
+ /* remove the aa->ifa from the restricted list */
+ sctp_del_local_addr_restricted(stcb, aa->ifa);
+ /* free the asconf param */
+ SCTP_FREE(aa, SCTP_M_ASC_ADDR);
+ SCTPDBG(SCTP_DEBUG_ASCONF2, "asconf_queue_mgmt: delete removes queued entry\n");
+ return (-1);
+ }
+ } /* for each aa */
+
+ /* adding new request to the queue */
+ SCTP_MALLOC(aa, struct sctp_asconf_addr *, sizeof(*aa),
+ SCTP_M_ASC_ADDR);
+ if (aa == NULL) {
+ /* didn't get memory */
+ SCTPDBG(SCTP_DEBUG_ASCONF1, "asconf_queue_mgmt: failed to get memory!\n");
+ return (-1);
+ }
+ aa->special_del = 0;
+ /* fill in asconf address parameter fields */
+ /* top level elements are "networked" during send */
+ aa->ap.aph.ph.param_type = type;
+ aa->ifa = ifa;
+ atomic_add_int(&ifa->refcount, 1);
+ /* correlation_id filled in during send routine later... */
+ if (ifa->address.sa.sa_family == AF_INET6) {
+ /* IPv6 address */
+ struct sockaddr_in6 *sin6;
+
+ sin6 = (struct sockaddr_in6 *)&ifa->address.sa;
+ sa = (struct sockaddr *)sin6;
+ aa->ap.addrp.ph.param_type = SCTP_IPV6_ADDRESS;
+ aa->ap.addrp.ph.param_length = (sizeof(struct sctp_ipv6addr_param));
+ aa->ap.aph.ph.param_length = sizeof(struct sctp_asconf_paramhdr) +
+ sizeof(struct sctp_ipv6addr_param);
+ memcpy(&aa->ap.addrp.addr, &sin6->sin6_addr,
+ sizeof(struct in6_addr));
+ } else if (ifa->address.sa.sa_family == AF_INET) {
+ /* IPv4 address */
+ struct sockaddr_in *sin;
+
+ sin = (struct sockaddr_in *)&ifa->address.sa;
+ sa = (struct sockaddr *)sin;
+ aa->ap.addrp.ph.param_type = SCTP_IPV4_ADDRESS;
+ aa->ap.addrp.ph.param_length = (sizeof(struct sctp_ipv4addr_param));
+ aa->ap.aph.ph.param_length = sizeof(struct sctp_asconf_paramhdr) +
+ sizeof(struct sctp_ipv4addr_param);
+ memcpy(&aa->ap.addrp.addr, &sin->sin_addr,
+ sizeof(struct in_addr));
+ } else {
+ /* invalid family! */
+ SCTP_FREE(aa, SCTP_M_ASC_ADDR);
+ sctp_free_ifa(ifa);
+ return (-1);
+ }
+ aa->sent = 0; /* clear sent flag */
+
+ TAILQ_INSERT_TAIL(&stcb->asoc.asconf_queue, aa, next);
+#ifdef SCTP_DEBUG
+ if (SCTP_BASE_SYSCTL(sctp_debug_on) && SCTP_DEBUG_ASCONF2) {
+ if (type == SCTP_ADD_IP_ADDRESS) {
+ SCTP_PRINTF("asconf_queue_mgmt: inserted asconf ADD_IP_ADDRESS: ");
+ SCTPDBG_ADDR(SCTP_DEBUG_ASCONF2, sa);
+ } else if (type == SCTP_DEL_IP_ADDRESS) {
+ SCTP_PRINTF("asconf_queue_mgmt: appended asconf DEL_IP_ADDRESS: ");
+ SCTPDBG_ADDR(SCTP_DEBUG_ASCONF2, sa);
+ } else {
+ SCTP_PRINTF("asconf_queue_mgmt: appended asconf SET_PRIM_ADDR: ");
+ SCTPDBG_ADDR(SCTP_DEBUG_ASCONF2, sa);
+ }
+ }
+#endif
+
+ return (0);
+}
+
+
+/*
+ * add an asconf operation for the given ifa and type.
+ * type = SCTP_ADD_IP_ADDRESS, SCTP_DEL_IP_ADDRESS, SCTP_SET_PRIM_ADDR.
+ * returns 0 if completed, -1 if not completed, 1 if immediate send is
+ * advisable.
+ */
+static int
+sctp_asconf_queue_add(struct sctp_tcb *stcb, struct sctp_ifa *ifa,
+ uint16_t type)
+{
+ uint32_t status;
+ int pending_delete_queued = 0;
+
+ /* see if peer supports ASCONF */
+ if (stcb->asoc.peer_supports_asconf == 0) {
+ return (-1);
+ }
+ /*
+ * if this is deleting the last address from the assoc, mark it as
+ * pending.
+ */
+ if ((type == SCTP_DEL_IP_ADDRESS) && !stcb->asoc.asconf_del_pending &&
+ (sctp_local_addr_count(stcb) < 2)) {
+ /* set the pending delete info only */
+ stcb->asoc.asconf_del_pending = 1;
+ stcb->asoc.asconf_addr_del_pending = ifa;
+ atomic_add_int(&ifa->refcount, 1);
+ SCTPDBG(SCTP_DEBUG_ASCONF2,
+ "asconf_queue_add: mark delete last address pending\n");
+ return (-1);
+ }
+ /* queue an asconf parameter */
+ status = sctp_asconf_queue_mgmt(stcb, ifa, type);
+
+ /*
+ * if this is an add, and there is a delete also pending (i.e. the
+ * last local address is being changed), queue the pending delete
+ * too.
+ */
+ if ((type == SCTP_ADD_IP_ADDRESS) && stcb->asoc.asconf_del_pending && (status == 0)) {
+ /* queue in the pending delete */
+ if (sctp_asconf_queue_mgmt(stcb,
+ stcb->asoc.asconf_addr_del_pending,
+ SCTP_DEL_IP_ADDRESS) == 0) {
+ SCTPDBG(SCTP_DEBUG_ASCONF2, "asconf_queue_add: queing pending delete\n");
+ pending_delete_queued = 1;
+ /* clear out the pending delete info */
+ stcb->asoc.asconf_del_pending = 0;
+ sctp_free_ifa(stcb->asoc.asconf_addr_del_pending);
+ stcb->asoc.asconf_addr_del_pending = NULL;
+ }
+ }
+ if (pending_delete_queued) {
+ struct sctp_nets *net;
+
+ /*
+ * since we know that the only/last address is now being
+ * changed in this case, reset the cwnd/rto on all nets to
+ * start as a new address and path. Also clear the error
+ * counts to give the assoc the best chance to complete the
+ * address change.
+ */
+ TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) {
+ stcb->asoc.cc_functions.sctp_set_initial_cc_param(stcb,
+ net);
+ net->RTO = 0;
+ net->error_count = 0;
+ }
+ stcb->asoc.overall_error_count = 0;
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
+ sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
+ stcb->asoc.overall_error_count,
+ 0,
+ SCTP_FROM_SCTP_ASCONF,
+ __LINE__);
+ }
+ /* queue in an advisory set primary too */
+ (void)sctp_asconf_queue_mgmt(stcb, ifa, SCTP_SET_PRIM_ADDR);
+ /* let caller know we should send this out immediately */
+ status = 1;
+ }
+ return (status);
+}
+
+/*-
+ * add an asconf delete IP address parameter to the queue by sockaddr and
+ * possibly with no sctp_ifa available. This is only called by the routine
+ * that checks the addresses in an INIT-ACK against the current address list.
+ * returns 0 if completed, non-zero if not completed.
+ * NOTE: if an add is already scheduled (and not yet sent out), simply
+ * remove it from queue. If a duplicate operation is found, ignore the
+ * new one.
+ */
+static int
+sctp_asconf_queue_sa_delete(struct sctp_tcb *stcb, struct sockaddr *sa)
+{
+ struct sctp_ifa *ifa;
+ struct sctp_asconf_addr *aa, *aa_next;
+ uint32_t vrf_id;
+
+ if (stcb == NULL) {
+ return (-1);
+ }
+ /* see if peer supports ASCONF */
+ if (stcb->asoc.peer_supports_asconf == 0) {
+ return (-1);
+ }
+ /* make sure the request isn't already in the queue */
+ for (aa = TAILQ_FIRST(&stcb->asoc.asconf_queue); aa != NULL;
+ aa = aa_next) {
+ aa_next = TAILQ_NEXT(aa, next);
+ /* address match? */
+ if (sctp_asconf_addr_match(aa, sa) == 0)
+ continue;
+ /* is the request already in queue (sent or not) */
+ if (aa->ap.aph.ph.param_type == SCTP_DEL_IP_ADDRESS) {
+ return (-1);
+ }
+ /* is the negative request already in queue, and not sent */
+ if (aa->sent == 1)
+ continue;
+ if (aa->ap.aph.ph.param_type == SCTP_ADD_IP_ADDRESS) {
+ /* add already queued, so remove existing entry */
+ TAILQ_REMOVE(&stcb->asoc.asconf_queue, aa, next);
+ sctp_del_local_addr_restricted(stcb, aa->ifa);
+ /* free the entry */
+ SCTP_FREE(aa, SCTP_M_ASC_ADDR);
+ return (-1);
+ }
+ } /* for each aa */
+
+ /* find any existing ifa-- NOTE ifa CAN be allowed to be NULL */
+ if (stcb) {
+ vrf_id = stcb->asoc.vrf_id;
+ } else {
+ vrf_id = SCTP_DEFAULT_VRFID;
+ }
+ ifa = sctp_find_ifa_by_addr(sa, vrf_id, SCTP_ADDR_NOT_LOCKED);
+
+ /* adding new request to the queue */
+ SCTP_MALLOC(aa, struct sctp_asconf_addr *, sizeof(*aa),
+ SCTP_M_ASC_ADDR);
+ if (aa == NULL) {
+ /* didn't get memory */
+ SCTPDBG(SCTP_DEBUG_ASCONF1,
+ "sctp_asconf_queue_sa_delete: failed to get memory!\n");
+ return (-1);
+ }
+ aa->special_del = 0;
+ /* fill in asconf address parameter fields */
+ /* top level elements are "networked" during send */
+ aa->ap.aph.ph.param_type = SCTP_DEL_IP_ADDRESS;
+ aa->ifa = ifa;
+ if (ifa)
+ atomic_add_int(&ifa->refcount, 1);
+ /* correlation_id filled in during send routine later... */
+ if (sa->sa_family == AF_INET6) {
+ /* IPv6 address */
+ struct sockaddr_in6 *sin6;
+
+ sin6 = (struct sockaddr_in6 *)sa;
+ aa->ap.addrp.ph.param_type = SCTP_IPV6_ADDRESS;
+ aa->ap.addrp.ph.param_length = (sizeof(struct sctp_ipv6addr_param));
+ aa->ap.aph.ph.param_length = sizeof(struct sctp_asconf_paramhdr) + sizeof(struct sctp_ipv6addr_param);
+ memcpy(&aa->ap.addrp.addr, &sin6->sin6_addr,
+ sizeof(struct in6_addr));
+ } else if (sa->sa_family == AF_INET) {
+ /* IPv4 address */
+ struct sockaddr_in *sin = (struct sockaddr_in *)sa;
+
+ aa->ap.addrp.ph.param_type = SCTP_IPV4_ADDRESS;
+ aa->ap.addrp.ph.param_length = (sizeof(struct sctp_ipv4addr_param));
+ aa->ap.aph.ph.param_length = sizeof(struct sctp_asconf_paramhdr) + sizeof(struct sctp_ipv4addr_param);
+ memcpy(&aa->ap.addrp.addr, &sin->sin_addr,
+ sizeof(struct in_addr));
+ } else {
+ /* invalid family! */
+ SCTP_FREE(aa, SCTP_M_ASC_ADDR);
+ if (ifa)
+ sctp_free_ifa(ifa);
+ return (-1);
+ }
+ aa->sent = 0; /* clear sent flag */
+
+ /* delete goes to the back of the queue */
+ TAILQ_INSERT_TAIL(&stcb->asoc.asconf_queue, aa, next);
+
+ /* sa_ignore MEMLEAK {memory is put on the tailq} */
+ return (0);
+}
+
+/*
+ * find a specific asconf param on our "sent" queue
+ */
+static struct sctp_asconf_addr *
+sctp_asconf_find_param(struct sctp_tcb *stcb, uint32_t correlation_id)
+{
+ struct sctp_asconf_addr *aa;
+
+ TAILQ_FOREACH(aa, &stcb->asoc.asconf_queue, next) {
+ if (aa->ap.aph.correlation_id == correlation_id &&
+ aa->sent == 1) {
+ /* found it */
+ return (aa);
+ }
+ }
+ /* didn't find it */
+ return (NULL);
+}
+
+/*
+ * process an SCTP_ERROR_CAUSE_IND for a ASCONF-ACK parameter and do
+ * notifications based on the error response
+ */
+static void
+sctp_asconf_process_error(struct sctp_tcb *stcb,
+ struct sctp_asconf_paramhdr *aph)
+{
+ struct sctp_error_cause *eh;
+ struct sctp_paramhdr *ph;
+ uint16_t param_type;
+ uint16_t error_code;
+
+ eh = (struct sctp_error_cause *)(aph + 1);
+ ph = (struct sctp_paramhdr *)(eh + 1);
+ /* validate lengths */
+ if (htons(eh->length) + sizeof(struct sctp_error_cause) >
+ htons(aph->ph.param_length)) {
+ /* invalid error cause length */
+ SCTPDBG(SCTP_DEBUG_ASCONF1,
+ "asconf_process_error: cause element too long\n");
+ return;
+ }
+ if (htons(ph->param_length) + sizeof(struct sctp_paramhdr) >
+ htons(eh->length)) {
+ /* invalid included TLV length */
+ SCTPDBG(SCTP_DEBUG_ASCONF1,
+ "asconf_process_error: included TLV too long\n");
+ return;
+ }
+ /* which error code ? */
+ error_code = ntohs(eh->code);
+ param_type = ntohs(aph->ph.param_type);
+ /* FIX: this should go back up the REMOTE_ERROR ULP notify */
+ switch (error_code) {
+ case SCTP_CAUSE_RESOURCE_SHORTAGE:
+ /* we allow ourselves to "try again" for this error */
+ break;
+ default:
+ /* peer can't handle it... */
+ switch (param_type) {
+ case SCTP_ADD_IP_ADDRESS:
+ case SCTP_DEL_IP_ADDRESS:
+ stcb->asoc.peer_supports_asconf = 0;
+ break;
+ case SCTP_SET_PRIM_ADDR:
+ stcb->asoc.peer_supports_asconf = 0;
+ break;
+ default:
+ break;
+ }
+ }
+}
+
+/*
+ * process an asconf queue param.
+ * aparam: parameter to process, will be removed from the queue.
+ * flag: 1=success case, 0=failure case
+ */
+static void
+sctp_asconf_process_param_ack(struct sctp_tcb *stcb,
+ struct sctp_asconf_addr *aparam, uint32_t flag)
+{
+ uint16_t param_type;
+
+ /* process this param */
+ param_type = aparam->ap.aph.ph.param_type;
+ switch (param_type) {
+ case SCTP_ADD_IP_ADDRESS:
+ SCTPDBG(SCTP_DEBUG_ASCONF1,
+ "process_param_ack: added IP address\n");
+ sctp_asconf_addr_mgmt_ack(stcb, aparam->ifa, param_type, flag);
+ break;
+ case SCTP_DEL_IP_ADDRESS:
+ SCTPDBG(SCTP_DEBUG_ASCONF1,
+ "process_param_ack: deleted IP address\n");
+ /* nothing really to do... lists already updated */
+ break;
+ case SCTP_SET_PRIM_ADDR:
+ SCTPDBG(SCTP_DEBUG_ASCONF1,
+ "process_param_ack: set primary IP address\n");
+ /* nothing to do... peer may start using this addr */
+ if (flag == 0)
+ stcb->asoc.peer_supports_asconf = 0;
+ break;
+ default:
+ /* should NEVER happen */
+ break;
+ }
+
+ /* remove the param and free it */
+ TAILQ_REMOVE(&stcb->asoc.asconf_queue, aparam, next);
+ if (aparam->ifa)
+ sctp_free_ifa(aparam->ifa);
+ SCTP_FREE(aparam, SCTP_M_ASC_ADDR);
+}
+
+/*
+ * cleanup from a bad asconf ack parameter
+ */
+static void
+sctp_asconf_ack_clear(struct sctp_tcb *stcb)
+{
+ /* assume peer doesn't really know how to do asconfs */
+ stcb->asoc.peer_supports_asconf = 0;
+ /* XXX we could free the pending queue here */
+}
+
+void
+sctp_handle_asconf_ack(struct mbuf *m, int offset,
+ struct sctp_asconf_ack_chunk *cp, struct sctp_tcb *stcb,
+ struct sctp_nets *net, int *abort_no_unlock)
+{
+ struct sctp_association *asoc;
+ uint32_t serial_num;
+ uint16_t ack_length;
+ struct sctp_asconf_paramhdr *aph;
+ struct sctp_asconf_addr *aa, *aa_next;
+ uint32_t last_error_id = 0; /* last error correlation id */
+ uint32_t id;
+ struct sctp_asconf_addr *ap;
+
+ /* asconf param buffer */
+ uint8_t aparam_buf[SCTP_PARAM_BUFFER_SIZE];
+
+ /* verify minimum length */
+ if (ntohs(cp->ch.chunk_length) < sizeof(struct sctp_asconf_ack_chunk)) {
+ SCTPDBG(SCTP_DEBUG_ASCONF1,
+ "handle_asconf_ack: chunk too small = %xh\n",
+ ntohs(cp->ch.chunk_length));
+ return;
+ }
+ asoc = &stcb->asoc;
+ serial_num = ntohl(cp->serial_number);
+
+ /*
+ * NOTE: we may want to handle this differently- currently, we will
+ * abort when we get an ack for the expected serial number + 1 (eg.
+ * we didn't send it), process an ack normally if it is the expected
+ * serial number, and re-send the previous ack for *ALL* other
+ * serial numbers
+ */
+
+ /*
+ * if the serial number is the next expected, but I didn't send it,
+ * abort the asoc, since someone probably just hijacked us...
+ */
+ if (serial_num == (asoc->asconf_seq_out + 1)) {
+ SCTPDBG(SCTP_DEBUG_ASCONF1, "handle_asconf_ack: got unexpected next serial number! Aborting asoc!\n");
+ sctp_abort_an_association(stcb->sctp_ep, stcb,
+ SCTP_CAUSE_ILLEGAL_ASCONF_ACK, NULL, SCTP_SO_NOT_LOCKED);
+ *abort_no_unlock = 1;
+ return;
+ }
+ if (serial_num != asoc->asconf_seq_out_acked + 1) {
+ /* got a duplicate/unexpected ASCONF-ACK */
+ SCTPDBG(SCTP_DEBUG_ASCONF1, "handle_asconf_ack: got duplicate/unexpected serial number = %xh (expected = %xh)\n",
+ serial_num, asoc->asconf_seq_out_acked + 1);
+ return;
+ }
+ if (serial_num == asoc->asconf_seq_out - 1) {
+ /* stop our timer */
+ sctp_timer_stop(SCTP_TIMER_TYPE_ASCONF, stcb->sctp_ep, stcb, net,
+ SCTP_FROM_SCTP_ASCONF + SCTP_LOC_3);
+ }
+ /* process the ASCONF-ACK contents */
+ ack_length = ntohs(cp->ch.chunk_length) -
+ sizeof(struct sctp_asconf_ack_chunk);
+ offset += sizeof(struct sctp_asconf_ack_chunk);
+ /* process through all parameters */
+ while (ack_length >= sizeof(struct sctp_asconf_paramhdr)) {
+ unsigned int param_length, param_type;
+
+ /* get pointer to next asconf parameter */
+ aph = (struct sctp_asconf_paramhdr *)sctp_m_getptr(m, offset,
+ sizeof(struct sctp_asconf_paramhdr), aparam_buf);
+ if (aph == NULL) {
+ /* can't get an asconf paramhdr */
+ sctp_asconf_ack_clear(stcb);
+ return;
+ }
+ param_type = ntohs(aph->ph.param_type);
+ param_length = ntohs(aph->ph.param_length);
+ if (param_length > ack_length) {
+ sctp_asconf_ack_clear(stcb);
+ return;
+ }
+ if (param_length < sizeof(struct sctp_paramhdr)) {
+ sctp_asconf_ack_clear(stcb);
+ return;
+ }
+ /* get the complete parameter... */
+ if (param_length > sizeof(aparam_buf)) {
+ SCTPDBG(SCTP_DEBUG_ASCONF1,
+ "param length (%u) larger than buffer size!\n", param_length);
+ sctp_asconf_ack_clear(stcb);
+ return;
+ }
+ aph = (struct sctp_asconf_paramhdr *)sctp_m_getptr(m, offset, param_length, aparam_buf);
+ if (aph == NULL) {
+ sctp_asconf_ack_clear(stcb);
+ return;
+ }
+ /* correlation_id is transparent to peer, no ntohl needed */
+ id = aph->correlation_id;
+
+ switch (param_type) {
+ case SCTP_ERROR_CAUSE_IND:
+ last_error_id = id;
+ /* find the corresponding asconf param in our queue */
+ ap = sctp_asconf_find_param(stcb, id);
+ if (ap == NULL) {
+ /* hmm... can't find this in our queue! */
+ break;
+ }
+ /* process the parameter, failed flag */
+ sctp_asconf_process_param_ack(stcb, ap, 0);
+ /* process the error response */
+ sctp_asconf_process_error(stcb, aph);
+ break;
+ case SCTP_SUCCESS_REPORT:
+ /* find the corresponding asconf param in our queue */
+ ap = sctp_asconf_find_param(stcb, id);
+ if (ap == NULL) {
+ /* hmm... can't find this in our queue! */
+ break;
+ }
+ /* process the parameter, success flag */
+ sctp_asconf_process_param_ack(stcb, ap, 1);
+ break;
+ default:
+ break;
+ } /* switch */
+
+ /* update remaining ASCONF-ACK message length to process */
+ ack_length -= SCTP_SIZE32(param_length);
+ if (ack_length <= 0) {
+ /* no more data in the mbuf chain */
+ break;
+ }
+ offset += SCTP_SIZE32(param_length);
+ } /* while */
+
+ /*
+ * if there are any "sent" params still on the queue, these are
+ * implicitly "success", or "failed" (if we got an error back) ...
+ * so process these appropriately
+ *
+ * we assume that the correlation_id's are monotonically increasing
+ * beginning from 1 and that we don't have *that* many outstanding
+ * at any given time
+ */
+ if (last_error_id == 0)
+ last_error_id--;/* set to "max" value */
+ for (aa = TAILQ_FIRST(&stcb->asoc.asconf_queue); aa != NULL;
+ aa = aa_next) {
+ aa_next = TAILQ_NEXT(aa, next);
+ if (aa->sent == 1) {
+ /*
+ * implicitly successful or failed if correlation_id
+ * < last_error_id, then success else, failure
+ */
+ if (aa->ap.aph.correlation_id < last_error_id)
+ sctp_asconf_process_param_ack(stcb, aa, 1);
+ else
+ sctp_asconf_process_param_ack(stcb, aa, 0);
+ } else {
+ /*
+ * since we always process in order (FIFO queue) if
+ * we reach one that hasn't been sent, the rest
+ * should not have been sent either. so, we're
+ * done...
+ */
+ break;
+ }
+ }
+
+ /* update the next sequence number to use */
+ asoc->asconf_seq_out_acked++;
+ /* remove the old ASCONF on our outbound queue */
+ sctp_toss_old_asconf(stcb);
+ if (!TAILQ_EMPTY(&stcb->asoc.asconf_queue)) {
+#ifdef SCTP_TIMER_BASED_ASCONF
+ /* we have more params, so restart our timer */
+ sctp_timer_start(SCTP_TIMER_TYPE_ASCONF, stcb->sctp_ep,
+ stcb, net);
+#else
+ /* we have more params, so send out more */
+ sctp_send_asconf(stcb, net, SCTP_ADDR_NOT_LOCKED);
+#endif
+ }
+}
+
+#ifdef INET6
+static uint32_t
+sctp_is_scopeid_in_nets(struct sctp_tcb *stcb, struct sockaddr *sa)
+{
+ struct sockaddr_in6 *sin6, *net6;
+ struct sctp_nets *net;
+
+ if (sa->sa_family != AF_INET6) {
+ /* wrong family */
+ return (0);
+ }
+ sin6 = (struct sockaddr_in6 *)sa;
+ if (IN6_IS_ADDR_LINKLOCAL(&sin6->sin6_addr) == 0) {
+ /* not link local address */
+ return (0);
+ }
+ /* hunt through our destination nets list for this scope_id */
+ TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) {
+ if (((struct sockaddr *)(&net->ro._l_addr))->sa_family !=
+ AF_INET6)
+ continue;
+ net6 = (struct sockaddr_in6 *)&net->ro._l_addr;
+ if (IN6_IS_ADDR_LINKLOCAL(&net6->sin6_addr) == 0)
+ continue;
+ if (sctp_is_same_scope(sin6, net6)) {
+ /* found one */
+ return (1);
+ }
+ }
+ /* didn't find one */
+ return (0);
+}
+
+#endif
+
+/*
+ * address management functions
+ */
+static void
+sctp_addr_mgmt_assoc(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
+ struct sctp_ifa *ifa, uint16_t type, int addr_locked)
+{
+ int status;
+
+
+ if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) == 0 &&
+ sctp_is_feature_off(inp, SCTP_PCB_FLAGS_DO_ASCONF)) {
+ /* subset bound, no ASCONF allowed case, so ignore */
+ return;
+ }
+ /*
+ * note: we know this is not the subset bound, no ASCONF case eg.
+ * this is boundall or subset bound w/ASCONF allowed
+ */
+
+ /* first, make sure it's a good address family */
+ if (ifa->address.sa.sa_family != AF_INET6 &&
+ ifa->address.sa.sa_family != AF_INET) {
+ return;
+ }
+ /* make sure we're "allowed" to add this type of addr */
+ if (ifa->address.sa.sa_family == AF_INET6) {
+ /* invalid if we're not a v6 endpoint */
+ if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0)
+ return;
+ /* is the v6 addr really valid ? */
+ if (ifa->localifa_flags & SCTP_ADDR_IFA_UNUSEABLE) {
+ return;
+ }
+ }
+ /* put this address on the "pending/do not use yet" list */
+ sctp_add_local_addr_restricted(stcb, ifa);
+ /*
+ * check address scope if address is out of scope, don't queue
+ * anything... note: this would leave the address on both inp and
+ * asoc lists
+ */
+ switch (ifa->address.sa.sa_family) {
+#ifdef INET6
+ case AF_INET6:
+ {
+ struct sockaddr_in6 *sin6;
+
+ sin6 = (struct sockaddr_in6 *)&ifa->address.sin6;
+ if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) {
+ /* we skip unspecifed addresses */
+ return;
+ }
+ if (IN6_IS_ADDR_LINKLOCAL(&sin6->sin6_addr)) {
+ if (stcb->asoc.local_scope == 0) {
+ return;
+ }
+ /* is it the right link local scope? */
+ if (sctp_is_scopeid_in_nets(stcb, &ifa->address.sa) == 0) {
+ return;
+ }
+ }
+ if (stcb->asoc.site_scope == 0 &&
+ IN6_IS_ADDR_SITELOCAL(&sin6->sin6_addr)) {
+ return;
+ }
+ break;
+ }
+#endif
+ case AF_INET:
+ {
+ struct sockaddr_in *sin;
+ struct in6pcb *inp6;
+
+ inp6 = (struct in6pcb *)&inp->ip_inp.inp;
+ /* invalid if we are a v6 only endpoint */
+ if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
+ SCTP_IPV6_V6ONLY(inp6))
+ return;
+
+ sin = (struct sockaddr_in *)&ifa->address.sa;
+ if (sin->sin_addr.s_addr == 0) {
+ /* we skip unspecifed addresses */
+ return;
+ }
+ if (stcb->asoc.ipv4_local_scope == 0 &&
+ IN4_ISPRIVATE_ADDRESS(&sin->sin_addr)) {
+ return;
+ }
+ break;
+ }
+ default:
+ /* else, not AF_INET or AF_INET6, so skip */
+ return;
+ }
+
+ /* queue an asconf for this address add/delete */
+ if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_DO_ASCONF)) {
+ /* does the peer do asconf? */
+ if (stcb->asoc.peer_supports_asconf) {
+ /* queue an asconf for this addr */
+ status = sctp_asconf_queue_add(stcb, ifa, type);
+
+ /*
+ * if queued ok, and in the open state, send out the
+ * ASCONF. If in the non-open state, these will be
+ * sent when the state goes open.
+ */
+ if (status == 0 &&
+ SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_OPEN) {
+#ifdef SCTP_TIMER_BASED_ASCONF
+ sctp_timer_start(SCTP_TIMER_TYPE_ASCONF, inp,
+ stcb, stcb->asoc.primary_destination);
+#else
+ sctp_send_asconf(stcb, stcb->asoc.primary_destination,
+ addr_locked);
+#endif
+ }
+ }
+ }
+}
+
+
+int
+sctp_asconf_iterator_ep(struct sctp_inpcb *inp, void *ptr, uint32_t val)
+{
+ struct sctp_asconf_iterator *asc;
+ struct sctp_ifa *ifa;
+ struct sctp_laddr *l;
+ int cnt_invalid = 0;
+
+ asc = (struct sctp_asconf_iterator *)ptr;
+ LIST_FOREACH(l, &asc->list_of_work, sctp_nxt_addr) {
+ ifa = l->ifa;
+ if (ifa->address.sa.sa_family == AF_INET6) {
+ /* invalid if we're not a v6 endpoint */
+ if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) {
+ cnt_invalid++;
+ if (asc->cnt == cnt_invalid)
+ return (1);
+ else
+ continue;
+ }
+ } else if (ifa->address.sa.sa_family == AF_INET) {
+ /* invalid if we are a v6 only endpoint */
+ struct in6pcb *inp6;
+
+ inp6 = (struct in6pcb *)&inp->ip_inp.inp;
+ if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
+ SCTP_IPV6_V6ONLY(inp6)) {
+ cnt_invalid++;
+ if (asc->cnt == cnt_invalid)
+ return (1);
+ else
+ continue;
+ }
+ } else {
+ /* invalid address family */
+ cnt_invalid++;
+ if (asc->cnt == cnt_invalid)
+ return (1);
+ else
+ continue;
+ }
+ }
+ return (0);
+}
+
+static int
+sctp_asconf_iterator_ep_end(struct sctp_inpcb *inp, void *ptr, uint32_t val)
+{
+ struct sctp_ifa *ifa;
+ struct sctp_asconf_iterator *asc;
+ struct sctp_laddr *laddr, *nladdr, *l;
+
+ /* Only for specific case not bound all */
+ asc = (struct sctp_asconf_iterator *)ptr;
+ LIST_FOREACH(l, &asc->list_of_work, sctp_nxt_addr) {
+ ifa = l->ifa;
+ if (l->action == SCTP_ADD_IP_ADDRESS) {
+ LIST_FOREACH(laddr, &inp->sctp_addr_list,
+ sctp_nxt_addr) {
+ if (laddr->ifa == ifa) {
+ laddr->action = 0;
+ break;
+ }
+ }
+ } else if (l->action == SCTP_DEL_IP_ADDRESS) {
+ laddr = LIST_FIRST(&inp->sctp_addr_list);
+ while (laddr) {
+ nladdr = LIST_NEXT(laddr, sctp_nxt_addr);
+ /* remove only after all guys are done */
+ if (laddr->ifa == ifa) {
+ sctp_del_local_addr_ep(inp, ifa);
+ }
+ laddr = nladdr;
+ }
+ }
+ }
+ return (0);
+}
+
+void
+sctp_asconf_iterator_stcb(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
+ void *ptr, uint32_t val)
+{
+ struct sctp_asconf_iterator *asc;
+ struct sctp_ifa *ifa;
+ struct sctp_laddr *l;
+ int cnt_invalid = 0;
+ int type, status;
+ int num_queued = 0;
+
+ asc = (struct sctp_asconf_iterator *)ptr;
+ LIST_FOREACH(l, &asc->list_of_work, sctp_nxt_addr) {
+ ifa = l->ifa;
+ type = l->action;
+
+ /* address's vrf_id must be the vrf_id of the assoc */
+ if (ifa->vrf_id != stcb->asoc.vrf_id) {
+ continue;
+ }
+ /* Same checks again for assoc */
+ switch (ifa->address.sa.sa_family) {
+#ifdef INET6
+ case AF_INET6:
+ {
+ /* invalid if we're not a v6 endpoint */
+ struct sockaddr_in6 *sin6;
+
+ if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) {
+ cnt_invalid++;
+ if (asc->cnt == cnt_invalid)
+ return;
+ else
+ continue;
+ }
+ sin6 = (struct sockaddr_in6 *)&ifa->address.sin6;
+ if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) {
+ /* we skip unspecifed addresses */
+ continue;
+ }
+ if (IN6_IS_ADDR_LINKLOCAL(&sin6->sin6_addr)) {
+ if (stcb->asoc.local_scope == 0) {
+ continue;
+ }
+ /* is it the right link local scope? */
+ if (sctp_is_scopeid_in_nets(stcb, &ifa->address.sa) == 0) {
+ continue;
+ }
+ }
+ break;
+ }
+#endif
+ case AF_INET:
+ {
+ /* invalid if we are a v6 only endpoint */
+ struct in6pcb *inp6;
+ struct sockaddr_in *sin;
+
+ inp6 = (struct in6pcb *)&inp->ip_inp.inp;
+ /* invalid if we are a v6 only endpoint */
+ if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
+ SCTP_IPV6_V6ONLY(inp6))
+ continue;
+
+ sin = (struct sockaddr_in *)&ifa->address.sa;
+ if (sin->sin_addr.s_addr == 0) {
+ /* we skip unspecifed addresses */
+ continue;
+ }
+ if (stcb->asoc.ipv4_local_scope == 0 &&
+ IN4_ISPRIVATE_ADDRESS(&sin->sin_addr)) {
+ continue;
+ }
+ if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
+ SCTP_IPV6_V6ONLY(inp6)) {
+ cnt_invalid++;
+ if (asc->cnt == cnt_invalid)
+ return;
+ else
+ continue;
+ }
+ break;
+ }
+ default:
+ /* invalid address family */
+ cnt_invalid++;
+ if (asc->cnt == cnt_invalid)
+ return;
+ else
+ continue;
+ break;
+ }
+
+ if (type == SCTP_ADD_IP_ADDRESS) {
+ /* prevent this address from being used as a source */
+ sctp_add_local_addr_restricted(stcb, ifa);
+ } else if (type == SCTP_DEL_IP_ADDRESS) {
+ struct sctp_nets *net;
+
+ TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) {
+ sctp_rtentry_t *rt;
+
+ /* delete this address if cached */
+ if (net->ro._s_addr == ifa) {
+ sctp_free_ifa(net->ro._s_addr);
+ net->ro._s_addr = NULL;
+ net->src_addr_selected = 0;
+ rt = net->ro.ro_rt;
+ if (rt) {
+ RTFREE(rt);
+ net->ro.ro_rt = NULL;
+ }
+ /*
+ * Now we deleted our src address,
+ * should we not also now reset the
+ * cwnd/rto to start as if its a new
+ * address?
+ */
+ stcb->asoc.cc_functions.sctp_set_initial_cc_param(stcb, net);
+ net->RTO = 0;
+
+ }
+ }
+ } else if (type == SCTP_SET_PRIM_ADDR) {
+ if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) == 0) {
+ /* must validate the ifa is in the ep */
+ if (sctp_is_addr_in_ep(stcb->sctp_ep, ifa) == 0) {
+ continue;
+ }
+ } else {
+ /* Need to check scopes for this guy */
+ if (sctp_is_address_in_scope(ifa,
+ stcb->asoc.ipv4_addr_legal,
+ stcb->asoc.ipv6_addr_legal,
+ stcb->asoc.loopback_scope,
+ stcb->asoc.ipv4_local_scope,
+ stcb->asoc.local_scope,
+ stcb->asoc.site_scope, 0) == 0) {
+ continue;
+ }
+ }
+ }
+ /* queue an asconf for this address add/delete */
+ if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_DO_ASCONF) &&
+ stcb->asoc.peer_supports_asconf) {
+ /* queue an asconf for this addr */
+ status = sctp_asconf_queue_add(stcb, ifa, type);
+ /*
+ * if queued ok, and in the open state, update the
+ * count of queued params. If in the non-open
+ * state, these get sent when the assoc goes open.
+ */
+ if (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_OPEN) {
+ if (status >= 0) {
+ num_queued++;
+ }
+ }
+ }
+ }
+ /*
+ * If we have queued params in the open state, send out an ASCONF.
+ */
+ if (num_queued > 0) {
+ sctp_send_asconf(stcb, stcb->asoc.primary_destination,
+ SCTP_ADDR_NOT_LOCKED);
+ }
+}
+
+void
+sctp_asconf_iterator_end(void *ptr, uint32_t val)
+{
+ struct sctp_asconf_iterator *asc;
+ struct sctp_ifa *ifa;
+ struct sctp_laddr *l, *l_next;
+
+ asc = (struct sctp_asconf_iterator *)ptr;
+ l = LIST_FIRST(&asc->list_of_work);
+ while (l != NULL) {
+ l_next = LIST_NEXT(l, sctp_nxt_addr);
+ ifa = l->ifa;
+ if (l->action == SCTP_ADD_IP_ADDRESS) {
+ /* Clear the defer use flag */
+ ifa->localifa_flags &= ~SCTP_ADDR_DEFER_USE;
+ }
+ sctp_free_ifa(ifa);
+ SCTP_ZONE_FREE(SCTP_BASE_INFO(ipi_zone_laddr), l);
+ SCTP_DECR_LADDR_COUNT();
+ l = l_next;
+ }
+ SCTP_FREE(asc, SCTP_M_ASC_IT);
+}
+
+/*
+ * sa is the sockaddr to ask the peer to set primary to.
+ * returns: 0 = completed, -1 = error
+ */
+int32_t
+sctp_set_primary_ip_address_sa(struct sctp_tcb *stcb, struct sockaddr *sa)
+{
+ uint32_t vrf_id;
+ struct sctp_ifa *ifa;
+
+ /* find the ifa for the desired set primary */
+ vrf_id = stcb->asoc.vrf_id;
+ ifa = sctp_find_ifa_by_addr(sa, vrf_id, SCTP_ADDR_NOT_LOCKED);
+ if (ifa == NULL) {
+ /* Invalid address */
+ return (-1);
+ }
+ /* queue an ASCONF:SET_PRIM_ADDR to be sent */
+ if (!sctp_asconf_queue_add(stcb, ifa, SCTP_SET_PRIM_ADDR)) {
+ /* set primary queuing succeeded */
+ SCTPDBG(SCTP_DEBUG_ASCONF1,
+ "set_primary_ip_address_sa: queued on tcb=%p, ",
+ stcb);
+ SCTPDBG_ADDR(SCTP_DEBUG_ASCONF1, sa);
+ if (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_OPEN) {
+#ifdef SCTP_TIMER_BASED_ASCONF
+ sctp_timer_start(SCTP_TIMER_TYPE_ASCONF,
+ stcb->sctp_ep, stcb,
+ stcb->asoc.primary_destination);
+#else
+ sctp_send_asconf(stcb, stcb->asoc.primary_destination,
+ SCTP_ADDR_NOT_LOCKED);
+#endif
+ }
+ } else {
+ SCTPDBG(SCTP_DEBUG_ASCONF1, "set_primary_ip_address_sa: failed to add to queue on tcb=%p, ",
+ stcb);
+ SCTPDBG_ADDR(SCTP_DEBUG_ASCONF1, sa);
+ return (-1);
+ }
+ return (0);
+}
+
+void
+sctp_set_primary_ip_address(struct sctp_ifa *ifa)
+{
+ struct sctp_inpcb *inp;
+
+ /* go through all our PCB's */
+ LIST_FOREACH(inp, &SCTP_BASE_INFO(listhead), sctp_list) {
+ struct sctp_tcb *stcb;
+
+ /* process for all associations for this endpoint */
+ LIST_FOREACH(stcb, &inp->sctp_asoc_list, sctp_tcblist) {
+ /* queue an ASCONF:SET_PRIM_ADDR to be sent */
+ if (!sctp_asconf_queue_add(stcb, ifa,
+ SCTP_SET_PRIM_ADDR)) {
+ /* set primary queuing succeeded */
+ SCTPDBG(SCTP_DEBUG_ASCONF1, "set_primary_ip_address: queued on stcb=%p, ",
+ stcb);
+ SCTPDBG_ADDR(SCTP_DEBUG_ASCONF1, &ifa->address.sa);
+ if (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_OPEN) {
+#ifdef SCTP_TIMER_BASED_ASCONF
+ sctp_timer_start(SCTP_TIMER_TYPE_ASCONF,
+ stcb->sctp_ep, stcb,
+ stcb->asoc.primary_destination);
+#else
+ sctp_send_asconf(stcb, stcb->asoc.primary_destination,
+ SCTP_ADDR_NOT_LOCKED);
+#endif
+ }
+ }
+ } /* for each stcb */
+ } /* for each inp */
+}
+
+int
+sctp_is_addr_pending(struct sctp_tcb *stcb, struct sctp_ifa *sctp_ifa)
+{
+ struct sctp_tmit_chunk *chk, *nchk;
+ unsigned int offset, asconf_limit;
+ struct sctp_asconf_chunk *acp;
+ struct sctp_asconf_paramhdr *aph;
+ uint8_t aparam_buf[SCTP_PARAM_BUFFER_SIZE];
+ struct sctp_ipv6addr_param *p_addr;
+ int add_cnt, del_cnt;
+ uint16_t last_param_type;
+
+ add_cnt = del_cnt = 0;
+ last_param_type = 0;
+ for (chk = TAILQ_FIRST(&stcb->asoc.asconf_send_queue); chk != NULL;
+ chk = nchk) {
+ /* get next chk */
+ nchk = TAILQ_NEXT(chk, sctp_next);
+
+ if (chk->data == NULL) {
+ SCTPDBG(SCTP_DEBUG_ASCONF1, "is_addr_pending: No mbuf data?\n");
+ continue;
+ }
+ offset = 0;
+ acp = mtod(chk->data, struct sctp_asconf_chunk *);
+ offset += sizeof(struct sctp_asconf_chunk);
+ asconf_limit = ntohs(acp->ch.chunk_length);
+ p_addr = (struct sctp_ipv6addr_param *)sctp_m_getptr(chk->data, offset, sizeof(struct sctp_paramhdr), aparam_buf);
+ if (p_addr == NULL) {
+ SCTPDBG(SCTP_DEBUG_ASCONF1, "is_addr_pending: couldn't get lookup addr!\n");
+ continue;
+ }
+ offset += ntohs(p_addr->ph.param_length);
+
+ aph = (struct sctp_asconf_paramhdr *)sctp_m_getptr(chk->data, offset, sizeof(struct sctp_asconf_paramhdr), aparam_buf);
+ if (aph == NULL) {
+ SCTPDBG(SCTP_DEBUG_ASCONF1, "is_addr_pending: Empty ASCONF will be sent?\n");
+ continue;
+ }
+ while (aph != NULL) {
+ unsigned int param_length, param_type;
+
+ param_type = ntohs(aph->ph.param_type);
+ param_length = ntohs(aph->ph.param_length);
+ if (offset + param_length > asconf_limit) {
+ /* parameter goes beyond end of chunk! */
+ break;
+ }
+ if (param_length > sizeof(aparam_buf)) {
+ SCTPDBG(SCTP_DEBUG_ASCONF1, "is_addr_pending: param length (%u) larger than buffer size!\n", param_length);
+ break;
+ }
+ if (param_length <= sizeof(struct sctp_paramhdr)) {
+ SCTPDBG(SCTP_DEBUG_ASCONF1, "is_addr_pending: param length(%u) too short\n", param_length);
+ break;
+ }
+ aph = (struct sctp_asconf_paramhdr *)sctp_m_getptr(chk->data, offset, param_length, aparam_buf);
+ if (aph == NULL) {
+ SCTPDBG(SCTP_DEBUG_ASCONF1, "is_addr_pending: couldn't get entire param\n");
+ break;
+ }
+ p_addr = (struct sctp_ipv6addr_param *)(aph + 1);
+ if (sctp_addr_match(p_addr, &sctp_ifa->address.sa) != 0) {
+ switch (param_type) {
+ case SCTP_ADD_IP_ADDRESS:
+ add_cnt++;
+ break;
+ case SCTP_DEL_IP_ADDRESS:
+ del_cnt++;
+ break;
+ default:
+ break;
+ }
+ last_param_type = param_type;
+ }
+ offset += SCTP_SIZE32(param_length);
+ if (offset >= asconf_limit) {
+ /* no more data in the mbuf chain */
+ break;
+ }
+ /* get pointer to next asconf param */
+ aph = (struct sctp_asconf_paramhdr *)sctp_m_getptr(chk->data, offset, sizeof(struct sctp_asconf_paramhdr), aparam_buf);
+ }
+ }
+
+ /*
+ * we want to find the sequences which consist of ADD -> DEL -> ADD
+ * or DEL -> ADD
+ */
+ if (add_cnt > del_cnt ||
+ (add_cnt == del_cnt && last_param_type == SCTP_ADD_IP_ADDRESS)) {
+ return 1;
+ }
+ return 0;
+}
+
+static struct sockaddr *
+sctp_find_valid_localaddr(struct sctp_tcb *stcb, int addr_locked)
+{
+ struct sctp_vrf *vrf = NULL;
+ struct sctp_ifn *sctp_ifn;
+ struct sctp_ifa *sctp_ifa;
+
+ if (addr_locked == SCTP_ADDR_NOT_LOCKED)
+ SCTP_IPI_ADDR_RLOCK();
+ vrf = sctp_find_vrf(stcb->asoc.vrf_id);
+ if (vrf == NULL) {
+ if (addr_locked == SCTP_ADDR_NOT_LOCKED)
+ SCTP_IPI_ADDR_RUNLOCK();
+ return (NULL);
+ }
+ LIST_FOREACH(sctp_ifn, &vrf->ifnlist, next_ifn) {
+ if (stcb->asoc.loopback_scope == 0 &&
+ SCTP_IFN_IS_IFT_LOOP(sctp_ifn)) {
+ /* Skip if loopback_scope not set */
+ continue;
+ }
+ LIST_FOREACH(sctp_ifa, &sctp_ifn->ifalist, next_ifa) {
+ if (sctp_ifa->address.sa.sa_family == AF_INET &&
+ stcb->asoc.ipv4_addr_legal) {
+ struct sockaddr_in *sin;
+
+ sin = (struct sockaddr_in *)&sctp_ifa->address.sa;
+ if (sin->sin_addr.s_addr == 0) {
+ /* skip unspecifed addresses */
+ continue;
+ }
+ if (stcb->asoc.ipv4_local_scope == 0 &&
+ IN4_ISPRIVATE_ADDRESS(&sin->sin_addr))
+ continue;
+
+ if (sctp_is_addr_restricted(stcb, sctp_ifa) &&
+ (!sctp_is_addr_pending(stcb, sctp_ifa)))
+ continue;
+ /* found a valid local v4 address to use */
+ if (addr_locked == SCTP_ADDR_NOT_LOCKED)
+ SCTP_IPI_ADDR_RUNLOCK();
+ return (&sctp_ifa->address.sa);
+ } else if (sctp_ifa->address.sa.sa_family == AF_INET6 &&
+ stcb->asoc.ipv6_addr_legal) {
+ struct sockaddr_in6 *sin6;
+
+ if (sctp_ifa->localifa_flags & SCTP_ADDR_IFA_UNUSEABLE) {
+ continue;
+ }
+ sin6 = (struct sockaddr_in6 *)&sctp_ifa->address.sa;
+ if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) {
+ /* we skip unspecifed addresses */
+ continue;
+ }
+ if (stcb->asoc.local_scope == 0 &&
+ IN6_IS_ADDR_LINKLOCAL(&sin6->sin6_addr))
+ continue;
+ if (stcb->asoc.site_scope == 0 &&
+ IN6_IS_ADDR_SITELOCAL(&sin6->sin6_addr))
+ continue;
+
+ if (sctp_is_addr_restricted(stcb, sctp_ifa) &&
+ (!sctp_is_addr_pending(stcb, sctp_ifa)))
+ continue;
+ /* found a valid local v6 address to use */
+ if (addr_locked == SCTP_ADDR_NOT_LOCKED)
+ SCTP_IPI_ADDR_RUNLOCK();
+ return (&sctp_ifa->address.sa);
+ }
+ }
+ }
+ /* no valid addresses found */
+ if (addr_locked == SCTP_ADDR_NOT_LOCKED)
+ SCTP_IPI_ADDR_RUNLOCK();
+ return (NULL);
+}
+
+static struct sockaddr *
+sctp_find_valid_localaddr_ep(struct sctp_tcb *stcb)
+{
+ struct sctp_laddr *laddr;
+
+ LIST_FOREACH(laddr, &stcb->sctp_ep->sctp_addr_list, sctp_nxt_addr) {
+ if (laddr->ifa == NULL) {
+ continue;
+ }
+ /* is the address restricted ? */
+ if (sctp_is_addr_restricted(stcb, laddr->ifa) &&
+ (!sctp_is_addr_pending(stcb, laddr->ifa)))
+ continue;
+
+ /* found a valid local address to use */
+ return (&laddr->ifa->address.sa);
+ }
+ /* no valid addresses found */
+ return (NULL);
+}
+
+/*
+ * builds an ASCONF chunk from queued ASCONF params.
+ * returns NULL on error (no mbuf, no ASCONF params queued, etc).
+ */
+struct mbuf *
+sctp_compose_asconf(struct sctp_tcb *stcb, int *retlen, int addr_locked)
+{
+ struct mbuf *m_asconf, *m_asconf_chk;
+ struct sctp_asconf_addr *aa;
+ struct sctp_asconf_chunk *acp;
+ struct sctp_asconf_paramhdr *aph;
+ struct sctp_asconf_addr_param *aap;
+ uint32_t p_length;
+ uint32_t correlation_id = 1; /* 0 is reserved... */
+ caddr_t ptr, lookup_ptr;
+ uint8_t lookup_used = 0;
+
+ /* are there any asconf params to send? */
+ TAILQ_FOREACH(aa, &stcb->asoc.asconf_queue, next) {
+ if (aa->sent == 0)
+ break;
+ }
+ if (aa == NULL)
+ return (NULL);
+
+ /*
+ * get a chunk header mbuf and a cluster for the asconf params since
+ * it's simpler to fill in the asconf chunk header lookup address on
+ * the fly
+ */
+ m_asconf_chk = sctp_get_mbuf_for_msg(sizeof(struct sctp_asconf_chunk), 0, M_DONTWAIT, 1, MT_DATA);
+ if (m_asconf_chk == NULL) {
+ /* no mbuf's */
+ SCTPDBG(SCTP_DEBUG_ASCONF1,
+ "compose_asconf: couldn't get chunk mbuf!\n");
+ return (NULL);
+ }
+ m_asconf = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_DONTWAIT, 1, MT_DATA);
+ if (m_asconf == NULL) {
+ /* no mbuf's */
+ SCTPDBG(SCTP_DEBUG_ASCONF1,
+ "compose_asconf: couldn't get mbuf!\n");
+ sctp_m_freem(m_asconf_chk);
+ return (NULL);
+ }
+ SCTP_BUF_LEN(m_asconf_chk) = sizeof(struct sctp_asconf_chunk);
+ SCTP_BUF_LEN(m_asconf) = 0;
+ acp = mtod(m_asconf_chk, struct sctp_asconf_chunk *);
+ bzero(acp, sizeof(struct sctp_asconf_chunk));
+ /* save pointers to lookup address and asconf params */
+ lookup_ptr = (caddr_t)(acp + 1); /* after the header */
+ ptr = mtod(m_asconf, caddr_t); /* beginning of cluster */
+
+ /* fill in chunk header info */
+ acp->ch.chunk_type = SCTP_ASCONF;
+ acp->ch.chunk_flags = 0;
+ acp->serial_number = htonl(stcb->asoc.asconf_seq_out);
+ stcb->asoc.asconf_seq_out++;
+
+ /* add parameters... up to smallest MTU allowed */
+ TAILQ_FOREACH(aa, &stcb->asoc.asconf_queue, next) {
+ if (aa->sent)
+ continue;
+ /* get the parameter length */
+ p_length = SCTP_SIZE32(aa->ap.aph.ph.param_length);
+ /* will it fit in current chunk? */
+ if (SCTP_BUF_LEN(m_asconf) + p_length > stcb->asoc.smallest_mtu) {
+ /* won't fit, so we're done with this chunk */
+ break;
+ }
+ /* assign (and store) a correlation id */
+ aa->ap.aph.correlation_id = correlation_id++;
+
+ /*
+ * fill in address if we're doing a delete this is a simple
+ * way for us to fill in the correlation address, which
+ * should only be used by the peer if we're deleting our
+ * source address and adding a new address (e.g. renumbering
+ * case)
+ */
+ if (lookup_used == 0 &&
+ (aa->special_del == 0) &&
+ aa->ap.aph.ph.param_type == SCTP_DEL_IP_ADDRESS) {
+ struct sctp_ipv6addr_param *lookup;
+ uint16_t p_size, addr_size;
+
+ lookup = (struct sctp_ipv6addr_param *)lookup_ptr;
+ lookup->ph.param_type =
+ htons(aa->ap.addrp.ph.param_type);
+ if (aa->ap.addrp.ph.param_type == SCTP_IPV6_ADDRESS) {
+ /* copy IPv6 address */
+ p_size = sizeof(struct sctp_ipv6addr_param);
+ addr_size = sizeof(struct in6_addr);
+ } else {
+ /* copy IPv4 address */
+ p_size = sizeof(struct sctp_ipv4addr_param);
+ addr_size = sizeof(struct in_addr);
+ }
+ lookup->ph.param_length = htons(SCTP_SIZE32(p_size));
+ memcpy(lookup->addr, &aa->ap.addrp.addr, addr_size);
+ SCTP_BUF_LEN(m_asconf_chk) += SCTP_SIZE32(p_size);
+ lookup_used = 1;
+ }
+ /* copy into current space */
+ memcpy(ptr, &aa->ap, p_length);
+
+ /* network elements and update lengths */
+ aph = (struct sctp_asconf_paramhdr *)ptr;
+ aap = (struct sctp_asconf_addr_param *)ptr;
+ /* correlation_id is transparent to peer, no htonl needed */
+ aph->ph.param_type = htons(aph->ph.param_type);
+ aph->ph.param_length = htons(aph->ph.param_length);
+ aap->addrp.ph.param_type = htons(aap->addrp.ph.param_type);
+ aap->addrp.ph.param_length = htons(aap->addrp.ph.param_length);
+
+ SCTP_BUF_LEN(m_asconf) += SCTP_SIZE32(p_length);
+ ptr += SCTP_SIZE32(p_length);
+
+ /*
+ * these params are removed off the pending list upon
+ * getting an ASCONF-ACK back from the peer, just set flag
+ */
+ aa->sent = 1;
+ }
+ /* check to see if the lookup addr has been populated yet */
+ if (lookup_used == 0) {
+ /* NOTE: if the address param is optional, can skip this... */
+ /* add any valid (existing) address... */
+ struct sctp_ipv6addr_param *lookup;
+ uint16_t p_size, addr_size;
+ struct sockaddr *found_addr;
+ caddr_t addr_ptr;
+
+ if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL)
+ found_addr = sctp_find_valid_localaddr(stcb,
+ addr_locked);
+ else
+ found_addr = sctp_find_valid_localaddr_ep(stcb);
+
+ lookup = (struct sctp_ipv6addr_param *)lookup_ptr;
+ if (found_addr != NULL) {
+ if (found_addr->sa_family == AF_INET6) {
+ /* copy IPv6 address */
+ lookup->ph.param_type =
+ htons(SCTP_IPV6_ADDRESS);
+ p_size = sizeof(struct sctp_ipv6addr_param);
+ addr_size = sizeof(struct in6_addr);
+ addr_ptr = (caddr_t)&((struct sockaddr_in6 *)
+ found_addr)->sin6_addr;
+ } else {
+ /* copy IPv4 address */
+ lookup->ph.param_type =
+ htons(SCTP_IPV4_ADDRESS);
+ p_size = sizeof(struct sctp_ipv4addr_param);
+ addr_size = sizeof(struct in_addr);
+ addr_ptr = (caddr_t)&((struct sockaddr_in *)
+ found_addr)->sin_addr;
+ }
+ lookup->ph.param_length = htons(SCTP_SIZE32(p_size));
+ memcpy(lookup->addr, addr_ptr, addr_size);
+ SCTP_BUF_LEN(m_asconf_chk) += SCTP_SIZE32(p_size);
+ lookup_used = 1;
+ } else {
+ /* uh oh... don't have any address?? */
+ SCTPDBG(SCTP_DEBUG_ASCONF1,
+ "compose_asconf: no lookup addr!\n");
+ /* for now, we send a IPv4 address of 0.0.0.0 */
+ lookup->ph.param_type = htons(SCTP_IPV4_ADDRESS);
+ lookup->ph.param_length = htons(SCTP_SIZE32(sizeof(struct sctp_ipv4addr_param)));
+ bzero(lookup->addr, sizeof(struct in_addr));
+ SCTP_BUF_LEN(m_asconf_chk) += SCTP_SIZE32(sizeof(struct sctp_ipv4addr_param));
+ lookup_used = 1;
+ }
+ }
+ /* chain it all together */
+ SCTP_BUF_NEXT(m_asconf_chk) = m_asconf;
+ *retlen = SCTP_BUF_LEN(m_asconf_chk) + SCTP_BUF_LEN(m_asconf);
+ acp->ch.chunk_length = ntohs(*retlen);
+
+ return (m_asconf_chk);
+}
+
+/*
+ * section to handle address changes before an association is up eg. changes
+ * during INIT/INIT-ACK/COOKIE-ECHO handshake
+ */
+
+/*
+ * processes the (local) addresses in the INIT-ACK chunk
+ */
+static void
+sctp_process_initack_addresses(struct sctp_tcb *stcb, struct mbuf *m,
+ unsigned int offset, unsigned int length)
+{
+ struct sctp_paramhdr tmp_param, *ph;
+ uint16_t plen, ptype;
+ struct sctp_ifa *sctp_ifa;
+ struct sctp_ipv6addr_param addr_store;
+ struct sockaddr_in6 sin6;
+ struct sockaddr_in sin;
+ struct sockaddr *sa;
+ uint32_t vrf_id;
+
+ SCTPDBG(SCTP_DEBUG_ASCONF2, "processing init-ack addresses\n");
+ if (stcb == NULL) /* Un-needed check for SA */
+ return;
+
+ /* convert to upper bound */
+ length += offset;
+
+ if ((offset + sizeof(struct sctp_paramhdr)) > length) {
+ return;
+ }
+ /* init the addresses */
+ bzero(&sin6, sizeof(sin6));
+ sin6.sin6_family = AF_INET6;
+ sin6.sin6_len = sizeof(sin6);
+ sin6.sin6_port = stcb->rport;
+
+ bzero(&sin, sizeof(sin));
+ sin.sin_len = sizeof(sin);
+ sin.sin_family = AF_INET;
+ sin.sin_port = stcb->rport;
+
+ /* go through the addresses in the init-ack */
+ ph = (struct sctp_paramhdr *)sctp_m_getptr(m, offset,
+ sizeof(struct sctp_paramhdr), (uint8_t *) & tmp_param);
+ while (ph != NULL) {
+ ptype = ntohs(ph->param_type);
+ plen = ntohs(ph->param_length);
+ if (ptype == SCTP_IPV6_ADDRESS) {
+ struct sctp_ipv6addr_param *a6p;
+
+ /* get the entire IPv6 address param */
+ a6p = (struct sctp_ipv6addr_param *)
+ sctp_m_getptr(m, offset,
+ sizeof(struct sctp_ipv6addr_param),
+ (uint8_t *) & addr_store);
+ if (plen != sizeof(struct sctp_ipv6addr_param) ||
+ a6p == NULL) {
+ return;
+ }
+ memcpy(&sin6.sin6_addr, a6p->addr,
+ sizeof(struct in6_addr));
+ sa = (struct sockaddr *)&sin6;
+ } else if (ptype == SCTP_IPV4_ADDRESS) {
+ struct sctp_ipv4addr_param *a4p;
+
+ /* get the entire IPv4 address param */
+ a4p = (struct sctp_ipv4addr_param *)sctp_m_getptr(m, offset,
+ sizeof(struct sctp_ipv4addr_param),
+ (uint8_t *) & addr_store);
+ if (plen != sizeof(struct sctp_ipv4addr_param) ||
+ a4p == NULL) {
+ return;
+ }
+ sin.sin_addr.s_addr = a4p->addr;
+ sa = (struct sockaddr *)&sin;
+ } else {
+ goto next_addr;
+ }
+
+ /* see if this address really (still) exists */
+ if (stcb) {
+ vrf_id = stcb->asoc.vrf_id;
+ } else {
+ vrf_id = SCTP_DEFAULT_VRFID;
+ }
+ sctp_ifa = sctp_find_ifa_by_addr(sa, vrf_id,
+ SCTP_ADDR_NOT_LOCKED);
+ if (sctp_ifa == NULL) {
+ /* address doesn't exist anymore */
+ int status;
+
+ /* are ASCONFs allowed ? */
+ if ((sctp_is_feature_on(stcb->sctp_ep,
+ SCTP_PCB_FLAGS_DO_ASCONF)) &&
+ stcb->asoc.peer_supports_asconf) {
+ /* queue an ASCONF DEL_IP_ADDRESS */
+ status = sctp_asconf_queue_sa_delete(stcb, sa);
+ /*
+ * if queued ok, and in correct state, send
+ * out the ASCONF.
+ */
+ if (status == 0 &&
+ SCTP_GET_STATE(&stcb->asoc) ==
+ SCTP_STATE_OPEN) {
+#ifdef SCTP_TIMER_BASED_ASCONF
+ sctp_timer_start(SCTP_TIMER_TYPE_ASCONF,
+ stcb->sctp_ep, stcb,
+ stcb->asoc.primary_destination);
+#else
+ sctp_send_asconf(stcb, stcb->asoc.primary_destination,
+ SCTP_ADDR_NOT_LOCKED);
+#endif
+ }
+ }
+ }
+next_addr:
+ /*
+ * Sanity check: Make sure the length isn't 0, otherwise
+ * we'll be stuck in this loop for a long time...
+ */
+ if (SCTP_SIZE32(plen) == 0) {
+ SCTP_PRINTF("process_initack_addrs: bad len (%d) type=%xh\n",
+ plen, ptype);
+ return;
+ }
+ /* get next parameter */
+ offset += SCTP_SIZE32(plen);
+ if ((offset + sizeof(struct sctp_paramhdr)) > length)
+ return;
+ ph = (struct sctp_paramhdr *)sctp_m_getptr(m, offset,
+ sizeof(struct sctp_paramhdr), (uint8_t *) & tmp_param);
+ } /* while */
+}
+
+/* FIX ME: need to verify return result for v6 address type if v6 disabled */
+/*
+ * checks to see if a specific address is in the initack address list returns
+ * 1 if found, 0 if not
+ */
+static uint32_t
+sctp_addr_in_initack(struct sctp_tcb *stcb, struct mbuf *m, uint32_t offset,
+ uint32_t length, struct sockaddr *sa)
+{
+ struct sctp_paramhdr tmp_param, *ph;
+ uint16_t plen, ptype;
+ struct sctp_ipv6addr_param addr_store;
+ struct sockaddr_in *sin;
+ struct sctp_ipv4addr_param *a4p;
+
+#ifdef INET6
+ struct sockaddr_in6 *sin6;
+ struct sctp_ipv6addr_param *a6p;
+ struct sockaddr_in6 sin6_tmp;
+
+#endif /* INET6 */
+
+ if (
+#ifdef INET6
+ (sa->sa_family != AF_INET6) &&
+#endif /* INET6 */
+ (sa->sa_family != AF_INET))
+ return (0);
+
+ SCTPDBG(SCTP_DEBUG_ASCONF2, "find_initack_addr: starting search for ");
+ SCTPDBG_ADDR(SCTP_DEBUG_ASCONF2, sa);
+ /* convert to upper bound */
+ length += offset;
+
+ if ((offset + sizeof(struct sctp_paramhdr)) > length) {
+ SCTPDBG(SCTP_DEBUG_ASCONF1,
+ "find_initack_addr: invalid offset?\n");
+ return (0);
+ }
+ /* go through the addresses in the init-ack */
+ ph = (struct sctp_paramhdr *)sctp_m_getptr(m, offset,
+ sizeof(struct sctp_paramhdr), (uint8_t *) & tmp_param);
+ while (ph != NULL) {
+ ptype = ntohs(ph->param_type);
+ plen = ntohs(ph->param_length);
+#ifdef INET6
+ if (ptype == SCTP_IPV6_ADDRESS && sa->sa_family == AF_INET6) {
+ /* get the entire IPv6 address param */
+ a6p = (struct sctp_ipv6addr_param *)
+ sctp_m_getptr(m, offset,
+ sizeof(struct sctp_ipv6addr_param),
+ (uint8_t *) & addr_store);
+ if (plen != sizeof(struct sctp_ipv6addr_param) ||
+ (ph == NULL) ||
+ (a6p == NULL)) {
+ return (0);
+ }
+ sin6 = (struct sockaddr_in6 *)sa;
+ if (IN6_IS_SCOPE_LINKLOCAL(&sin6->sin6_addr)) {
+ /* create a copy and clear scope */
+ memcpy(&sin6_tmp, sin6,
+ sizeof(struct sockaddr_in6));
+ sin6 = &sin6_tmp;
+ in6_clearscope(&sin6->sin6_addr);
+ }
+ if (memcmp(&sin6->sin6_addr, a6p->addr,
+ sizeof(struct in6_addr)) == 0) {
+ /* found it */
+ return (1);
+ }
+ } else
+#endif /* INET6 */
+
+ if (ptype == SCTP_IPV4_ADDRESS &&
+ sa->sa_family == AF_INET) {
+ /* get the entire IPv4 address param */
+ a4p = (struct sctp_ipv4addr_param *)sctp_m_getptr(m,
+ offset, sizeof(struct sctp_ipv4addr_param),
+ (uint8_t *) & addr_store);
+ if (plen != sizeof(struct sctp_ipv4addr_param) ||
+ (ph == NULL) ||
+ (a4p == NULL)) {
+ return (0);
+ }
+ sin = (struct sockaddr_in *)sa;
+ if (sin->sin_addr.s_addr == a4p->addr) {
+ /* found it */
+ return (1);
+ }
+ }
+ /* get next parameter */
+ offset += SCTP_SIZE32(plen);
+ if (offset + sizeof(struct sctp_paramhdr) > length)
+ return (0);
+ ph = (struct sctp_paramhdr *)
+ sctp_m_getptr(m, offset, sizeof(struct sctp_paramhdr),
+ (uint8_t *) & tmp_param);
+ } /* while */
+ /* not found! */
+ return (0);
+}
+
+/*
+ * makes sure that the current endpoint local addr list is consistent with
+ * the new association (eg. subset bound, asconf allowed) adds addresses as
+ * necessary
+ */
+static void
+sctp_check_address_list_ep(struct sctp_tcb *stcb, struct mbuf *m, int offset,
+ int length, struct sockaddr *init_addr)
+{
+ struct sctp_laddr *laddr;
+
+ /* go through the endpoint list */
+ LIST_FOREACH(laddr, &stcb->sctp_ep->sctp_addr_list, sctp_nxt_addr) {
+ /* be paranoid and validate the laddr */
+ if (laddr->ifa == NULL) {
+ SCTPDBG(SCTP_DEBUG_ASCONF1,
+ "check_addr_list_ep: laddr->ifa is NULL");
+ continue;
+ }
+ if (laddr->ifa == NULL) {
+ SCTPDBG(SCTP_DEBUG_ASCONF1, "check_addr_list_ep: laddr->ifa->ifa_addr is NULL");
+ continue;
+ }
+ /* do i have it implicitly? */
+ if (sctp_cmpaddr(&laddr->ifa->address.sa, init_addr)) {
+ continue;
+ }
+ /* check to see if in the init-ack */
+ if (!sctp_addr_in_initack(stcb, m, offset, length,
+ &laddr->ifa->address.sa)) {
+ /* try to add it */
+ sctp_addr_mgmt_assoc(stcb->sctp_ep, stcb, laddr->ifa,
+ SCTP_ADD_IP_ADDRESS, SCTP_ADDR_NOT_LOCKED);
+ }
+ }
+}
+
+/*
+ * makes sure that the current kernel address list is consistent with the new
+ * association (with all addrs bound) adds addresses as necessary
+ */
+static void
+sctp_check_address_list_all(struct sctp_tcb *stcb, struct mbuf *m, int offset,
+ int length, struct sockaddr *init_addr,
+ uint16_t local_scope, uint16_t site_scope,
+ uint16_t ipv4_scope, uint16_t loopback_scope)
+{
+ struct sctp_vrf *vrf = NULL;
+ struct sctp_ifn *sctp_ifn;
+ struct sctp_ifa *sctp_ifa;
+ uint32_t vrf_id;
+
+ if (stcb) {
+ vrf_id = stcb->asoc.vrf_id;
+ } else {
+ return;
+ }
+ SCTP_IPI_ADDR_RLOCK();
+ vrf = sctp_find_vrf(vrf_id);
+ if (vrf == NULL) {
+ SCTP_IPI_ADDR_RUNLOCK();
+ return;
+ }
+ /* go through all our known interfaces */
+ LIST_FOREACH(sctp_ifn, &vrf->ifnlist, next_ifn) {
+ if (loopback_scope == 0 && SCTP_IFN_IS_IFT_LOOP(sctp_ifn)) {
+ /* skip loopback interface */
+ continue;
+ }
+ /* go through each interface address */
+ LIST_FOREACH(sctp_ifa, &sctp_ifn->ifalist, next_ifa) {
+ /* do i have it implicitly? */
+ if (sctp_cmpaddr(&sctp_ifa->address.sa, init_addr)) {
+ continue;
+ }
+ /* check to see if in the init-ack */
+ if (!sctp_addr_in_initack(stcb, m, offset, length,
+ &sctp_ifa->address.sa)) {
+ /* try to add it */
+ sctp_addr_mgmt_assoc(stcb->sctp_ep, stcb,
+ sctp_ifa, SCTP_ADD_IP_ADDRESS,
+ SCTP_ADDR_LOCKED);
+ }
+ } /* end foreach ifa */
+ } /* end foreach ifn */
+ SCTP_IPI_ADDR_RUNLOCK();
+}
+
+/*
+ * validates an init-ack chunk (from a cookie-echo) with current addresses
+ * adds addresses from the init-ack into our local address list, if needed
+ * queues asconf adds/deletes addresses as needed and makes appropriate list
+ * changes for source address selection m, offset: points to the start of the
+ * address list in an init-ack chunk length: total length of the address
+ * params only init_addr: address where my INIT-ACK was sent from
+ */
+void
+sctp_check_address_list(struct sctp_tcb *stcb, struct mbuf *m, int offset,
+ int length, struct sockaddr *init_addr,
+ uint16_t local_scope, uint16_t site_scope,
+ uint16_t ipv4_scope, uint16_t loopback_scope)
+{
+ /* process the local addresses in the initack */
+ sctp_process_initack_addresses(stcb, m, offset, length);
+
+ if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
+ /* bound all case */
+ sctp_check_address_list_all(stcb, m, offset, length, init_addr,
+ local_scope, site_scope, ipv4_scope, loopback_scope);
+ } else {
+ /* subset bound case */
+ if (sctp_is_feature_on(stcb->sctp_ep,
+ SCTP_PCB_FLAGS_DO_ASCONF)) {
+ /* asconf's allowed */
+ sctp_check_address_list_ep(stcb, m, offset, length,
+ init_addr);
+ }
+ /* else, no asconfs allowed, so what we sent is what we get */
+ }
+}
+
+/*
+ * sctp_bindx() support
+ */
+uint32_t
+sctp_addr_mgmt_ep_sa(struct sctp_inpcb *inp, struct sockaddr *sa,
+ uint32_t type, uint32_t vrf_id, struct sctp_ifa *sctp_ifap)
+{
+ struct sctp_ifa *ifa;
+
+ if (sa->sa_len == 0) {
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_ASCONF, EINVAL);
+ return (EINVAL);
+ }
+ if (sctp_ifap) {
+ ifa = sctp_ifap;
+ } else if (type == SCTP_ADD_IP_ADDRESS) {
+ /* For an add the address MUST be on the system */
+ ifa = sctp_find_ifa_by_addr(sa, vrf_id, SCTP_ADDR_NOT_LOCKED);
+ } else if (type == SCTP_DEL_IP_ADDRESS) {
+ /* For a delete we need to find it in the inp */
+ ifa = sctp_find_ifa_in_ep(inp, sa, SCTP_ADDR_NOT_LOCKED);
+ } else {
+ ifa = NULL;
+ }
+ if (ifa != NULL) {
+ if (type == SCTP_ADD_IP_ADDRESS) {
+ sctp_add_local_addr_ep(inp, ifa, type);
+ } else if (type == SCTP_DEL_IP_ADDRESS) {
+ struct sctp_laddr *laddr;
+
+ if (inp->laddr_count < 2) {
+ /* can't delete the last local address */
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_ASCONF, EINVAL);
+ return (EINVAL);
+ }
+ LIST_FOREACH(laddr, &inp->sctp_addr_list,
+ sctp_nxt_addr) {
+ if (ifa == laddr->ifa) {
+ /* Mark in the delete */
+ laddr->action = type;
+ }
+ }
+ }
+ if (!LIST_EMPTY(&inp->sctp_asoc_list)) {
+ /*
+ * There is no need to start the iterator if the inp
+ * has no associations.
+ */
+ struct sctp_asconf_iterator *asc;
+ struct sctp_laddr *wi;
+
+ SCTP_MALLOC(asc, struct sctp_asconf_iterator *,
+ sizeof(struct sctp_asconf_iterator),
+ SCTP_M_ASC_IT);
+ if (asc == NULL) {
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_ASCONF, ENOMEM);
+ return (ENOMEM);
+ }
+ wi = SCTP_ZONE_GET(SCTP_BASE_INFO(ipi_zone_laddr), struct sctp_laddr);
+ if (wi == NULL) {
+ SCTP_FREE(asc, SCTP_M_ASC_IT);
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_ASCONF, ENOMEM);
+ return (ENOMEM);
+ }
+ LIST_INIT(&asc->list_of_work);
+ asc->cnt = 1;
+ SCTP_INCR_LADDR_COUNT();
+ wi->ifa = ifa;
+ wi->action = type;
+ atomic_add_int(&ifa->refcount, 1);
+ LIST_INSERT_HEAD(&asc->list_of_work, wi, sctp_nxt_addr);
+ (void)sctp_initiate_iterator(sctp_asconf_iterator_ep,
+ sctp_asconf_iterator_stcb,
+ sctp_asconf_iterator_ep_end,
+ SCTP_PCB_ANY_FLAGS,
+ SCTP_PCB_ANY_FEATURES,
+ SCTP_ASOC_ANY_STATE,
+ (void *)asc, 0,
+ sctp_asconf_iterator_end, inp, 0);
+ }
+ return (0);
+ } else {
+ /* invalid address! */
+ SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTP_ASCONF, EADDRNOTAVAIL);
+ return (EADDRNOTAVAIL);
+ }
+}
+
+void
+sctp_asconf_send_nat_state_update(struct sctp_tcb *stcb,
+ struct sctp_nets *net)
+{
+ struct sctp_asconf_addr *aa;
+ struct sctp_ifa *sctp_ifap;
+ struct sctp_asconf_tag_param *vtag;
+ struct sockaddr_in *to;
+
+#ifdef INET6
+ struct sockaddr_in6 *to6;
+
+#endif
+ if (net == NULL) {
+ SCTPDBG(SCTP_DEBUG_ASCONF1, "sctp_asconf_send_nat_state_update: Missing net\n");
+ return;
+ }
+ if (stcb == NULL) {
+ SCTPDBG(SCTP_DEBUG_ASCONF1, "sctp_asconf_send_nat_state_update: Missing stcb\n");
+ return;
+ }
+ /*
+ * Need to have in the asconf: - vtagparam(my_vtag/peer_vtag) -
+ * add(0.0.0.0) - del(0.0.0.0) - Any global addresses add(addr)
+ */
+ SCTP_MALLOC(aa, struct sctp_asconf_addr *, sizeof(*aa),
+ SCTP_M_ASC_ADDR);
+ if (aa == NULL) {
+ /* didn't get memory */
+ SCTPDBG(SCTP_DEBUG_ASCONF1,
+ "sctp_asconf_send_nat_state_update: failed to get memory!\n");
+ return;
+ }
+ aa->special_del = 0;
+ /* fill in asconf address parameter fields */
+ /* top level elements are "networked" during send */
+ aa->ifa = NULL;
+ aa->sent = 0; /* clear sent flag */
+ vtag = (struct sctp_asconf_tag_param *)&aa->ap.aph;
+ vtag->aph.ph.param_type = SCTP_NAT_VTAGS;
+ vtag->aph.ph.param_length = sizeof(struct sctp_asconf_tag_param);
+ vtag->local_vtag = htonl(stcb->asoc.my_vtag);
+ vtag->remote_vtag = htonl(stcb->asoc.peer_vtag);
+ TAILQ_INSERT_TAIL(&stcb->asoc.asconf_queue, aa, next);
+
+ SCTP_MALLOC(aa, struct sctp_asconf_addr *, sizeof(*aa),
+ SCTP_M_ASC_ADDR);
+ if (aa == NULL) {
+ /* didn't get memory */
+ SCTPDBG(SCTP_DEBUG_ASCONF1,
+ "sctp_asconf_send_nat_state_update: failed to get memory!\n");
+ return;
+ }
+ memset(aa, 0, sizeof(struct sctp_asconf_addr));
+ /* fill in asconf address parameter fields */
+ /* ADD(0.0.0.0) */
+ if (net->ro._l_addr.sa.sa_family == AF_INET) {
+ aa->ap.aph.ph.param_type = SCTP_ADD_IP_ADDRESS;
+ aa->ap.aph.ph.param_length = sizeof(struct sctp_asconf_addrv4_param);
+ aa->ap.addrp.ph.param_type = SCTP_IPV4_ADDRESS;
+ aa->ap.addrp.ph.param_length = sizeof(struct sctp_ipv4addr_param);
+ /* No need to add an address, we are using 0.0.0.0 */
+ TAILQ_INSERT_TAIL(&stcb->asoc.asconf_queue, aa, next);
+ }
+#ifdef INET6
+ else if (net->ro._l_addr.sa.sa_family == AF_INET6) {
+ aa->ap.aph.ph.param_type = SCTP_ADD_IP_ADDRESS;
+ aa->ap.aph.ph.param_length = sizeof(struct sctp_asconf_addr_param);
+ aa->ap.addrp.ph.param_type = SCTP_IPV6_ADDRESS;
+ aa->ap.addrp.ph.param_length = sizeof(struct sctp_ipv6addr_param);
+ /* No need to add an address, we are using 0.0.0.0 */
+ TAILQ_INSERT_TAIL(&stcb->asoc.asconf_queue, aa, next);
+ }
+#endif /* INET6 */
+ SCTP_MALLOC(aa, struct sctp_asconf_addr *, sizeof(*aa),
+ SCTP_M_ASC_ADDR);
+ if (aa == NULL) {
+ /* didn't get memory */
+ SCTPDBG(SCTP_DEBUG_ASCONF1,
+ "sctp_asconf_send_nat_state_update: failed to get memory!\n");
+ return;
+ }
+ memset(aa, 0, sizeof(struct sctp_asconf_addr));
+ /* fill in asconf address parameter fields */
+ /* ADD(0.0.0.0) */
+ if (net->ro._l_addr.sa.sa_family == AF_INET) {
+ aa->ap.aph.ph.param_type = SCTP_ADD_IP_ADDRESS;
+ aa->ap.aph.ph.param_length = sizeof(struct sctp_asconf_addrv4_param);
+ aa->ap.addrp.ph.param_type = SCTP_IPV4_ADDRESS;
+ aa->ap.addrp.ph.param_length = sizeof(struct sctp_ipv4addr_param);
+ /* No need to add an address, we are using 0.0.0.0 */
+ TAILQ_INSERT_TAIL(&stcb->asoc.asconf_queue, aa, next);
+ }
+#ifdef INET6
+ else if (net->ro._l_addr.sa.sa_family == AF_INET6) {
+ aa->ap.aph.ph.param_type = SCTP_DEL_IP_ADDRESS;
+ aa->ap.aph.ph.param_length = sizeof(struct sctp_asconf_addr_param);
+ aa->ap.addrp.ph.param_type = SCTP_IPV6_ADDRESS;
+ aa->ap.addrp.ph.param_length = sizeof(struct sctp_ipv6addr_param);
+ /* No need to add an address, we are using 0.0.0.0 */
+ TAILQ_INSERT_TAIL(&stcb->asoc.asconf_queue, aa, next);
+ }
+#endif /* INET6 */
+ /* Now we must hunt the addresses and add all global addresses */
+ if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
+ struct sctp_vrf *vrf = NULL;
+ struct sctp_ifn *sctp_ifnp;
+ uint32_t vrf_id;
+
+ vrf_id = stcb->sctp_ep->def_vrf_id;
+ vrf = sctp_find_vrf(vrf_id);
+ if (vrf == NULL) {
+ goto skip_rest;
+ }
+ SCTP_IPI_ADDR_RLOCK();
+ LIST_FOREACH(sctp_ifnp, &vrf->ifnlist, next_ifn) {
+ LIST_FOREACH(sctp_ifap, &sctp_ifnp->ifalist, next_ifa) {
+ if (sctp_ifap->address.sa.sa_family == AF_INET) {
+ to = &sctp_ifap->address.sin;
+
+ if (IN4_ISPRIVATE_ADDRESS(&to->sin_addr)) {
+ continue;
+ }
+ if (IN4_ISLOOPBACK_ADDRESS(&to->sin_addr)) {
+ continue;
+ }
+ }
+#ifdef INET6
+ else if (sctp_ifap->address.sa.sa_family == AF_INET6) {
+ to6 = &sctp_ifap->address.sin6;
+ if (IN6_IS_ADDR_LOOPBACK(&to6->sin6_addr)) {
+ continue;
+ }
+ if (IN6_IS_ADDR_LINKLOCAL(&to6->sin6_addr)) {
+ continue;
+ }
+ }
+#endif
+ sctp_asconf_queue_mgmt(stcb, sctp_ifap, SCTP_ADD_IP_ADDRESS);
+ }
+ }
+ SCTP_IPI_ADDR_RUNLOCK();
+ } else {
+ struct sctp_laddr *laddr;
+
+ LIST_FOREACH(laddr, &stcb->sctp_ep->sctp_addr_list, sctp_nxt_addr) {
+ if (laddr->ifa == NULL) {
+ continue;
+ }
+ if (laddr->ifa->localifa_flags & SCTP_BEING_DELETED)
+ /*
+ * Address being deleted by the system, dont
+ * list.
+ */
+ continue;
+ if (laddr->action == SCTP_DEL_IP_ADDRESS) {
+ /*
+ * Address being deleted on this ep don't
+ * list.
+ */
+ continue;
+ }
+ sctp_ifap = laddr->ifa;
+ if (sctp_ifap->address.sa.sa_family == AF_INET) {
+ to = &sctp_ifap->address.sin;
+
+ if (IN4_ISPRIVATE_ADDRESS(&to->sin_addr)) {
+ continue;
+ }
+ if (IN4_ISLOOPBACK_ADDRESS(&to->sin_addr)) {
+ continue;
+ }
+ }
+#ifdef INET6
+ else if (sctp_ifap->address.sa.sa_family == AF_INET6) {
+ to6 = &sctp_ifap->address.sin6;
+ if (IN6_IS_ADDR_LOOPBACK(&to6->sin6_addr)) {
+ continue;
+ }
+ if (IN6_IS_ADDR_LINKLOCAL(&to6->sin6_addr)) {
+ continue;
+ }
+ }
+#endif
+ sctp_asconf_queue_mgmt(stcb, sctp_ifap, SCTP_ADD_IP_ADDRESS);
+ }
+ }
+skip_rest:
+ /* Now we must send the asconf into the queue */
+ sctp_send_asconf(stcb, net, 0);
+}
diff --git a/rtems/freebsd/netinet/sctp_asconf.h b/rtems/freebsd/netinet/sctp_asconf.h
new file mode 100644
index 00000000..b6e3ed34
--- /dev/null
+++ b/rtems/freebsd/netinet/sctp_asconf.h
@@ -0,0 +1,96 @@
+/*-
+ * Copyright (c) 2001-2007, by Cisco Systems, Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * a) Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * b) Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the distribution.
+ *
+ * c) Neither the name of Cisco Systems, Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/* $KAME: sctp_asconf.h,v 1.8 2005/03/06 16:04:16 itojun Exp $ */
+
+#include <rtems/freebsd/sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#ifndef _NETINET_SCTP_ASCONF_HH_
+#define _NETINET_SCTP_ASCONF_HH_
+
+#if defined(_KERNEL) || defined(__Userspace__)
+
+/*
+ * function prototypes
+ */
+extern void sctp_asconf_cleanup(struct sctp_tcb *, struct sctp_nets *);
+
+extern struct mbuf *sctp_compose_asconf(struct sctp_tcb *, int *, int);
+
+extern void
+sctp_handle_asconf(struct mbuf *, unsigned int, struct sctp_asconf_chunk *,
+ struct sctp_tcb *, int i);
+
+extern void
+sctp_handle_asconf_ack(struct mbuf *, int, struct sctp_asconf_ack_chunk *,
+ struct sctp_tcb *, struct sctp_nets *, int *);
+
+extern uint32_t
+sctp_addr_mgmt_ep_sa(struct sctp_inpcb *, struct sockaddr *,
+ uint32_t, uint32_t, struct sctp_ifa *);
+
+
+extern int
+sctp_asconf_iterator_ep(struct sctp_inpcb *inp, void *ptr,
+ uint32_t val);
+extern void
+sctp_asconf_iterator_stcb(struct sctp_inpcb *inp,
+ struct sctp_tcb *stcb,
+ void *ptr, uint32_t type);
+extern void sctp_asconf_iterator_end(void *ptr, uint32_t val);
+
+
+extern int32_t
+sctp_set_primary_ip_address_sa(struct sctp_tcb *,
+ struct sockaddr *);
+
+extern void
+ sctp_set_primary_ip_address(struct sctp_ifa *ifa);
+
+extern void
+sctp_check_address_list(struct sctp_tcb *, struct mbuf *, int, int,
+ struct sockaddr *, uint16_t, uint16_t, uint16_t, uint16_t);
+
+extern void
+ sctp_assoc_immediate_retrans(struct sctp_tcb *, struct sctp_nets *);
+extern void
+ sctp_net_immediate_retrans(struct sctp_tcb *, struct sctp_nets *);
+
+extern void
+sctp_asconf_send_nat_state_update(struct sctp_tcb *stcb,
+ struct sctp_nets *net);
+
+extern int
+ sctp_is_addr_pending(struct sctp_tcb *, struct sctp_ifa *);
+
+#endif /* _KERNEL */
+
+#endif /* !_NETINET_SCTP_ASCONF_HH_ */
diff --git a/rtems/freebsd/netinet/sctp_auth.c b/rtems/freebsd/netinet/sctp_auth.c
new file mode 100644
index 00000000..585cff1e
--- /dev/null
+++ b/rtems/freebsd/netinet/sctp_auth.c
@@ -0,0 +1,2128 @@
+#include <rtems/freebsd/machine/rtems-bsd-config.h>
+
+/*-
+ * Copyright (c) 2001-2008, by Cisco Systems, Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * a) Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * b) Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the distribution.
+ *
+ * c) Neither the name of Cisco Systems, Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <rtems/freebsd/sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <rtems/freebsd/netinet/sctp_os.h>
+#include <rtems/freebsd/netinet/sctp.h>
+#include <rtems/freebsd/netinet/sctp_header.h>
+#include <rtems/freebsd/netinet/sctp_pcb.h>
+#include <rtems/freebsd/netinet/sctp_var.h>
+#include <rtems/freebsd/netinet/sctp_sysctl.h>
+#include <rtems/freebsd/netinet/sctputil.h>
+#include <rtems/freebsd/netinet/sctp_indata.h>
+#include <rtems/freebsd/netinet/sctp_output.h>
+#include <rtems/freebsd/netinet/sctp_auth.h>
+
+#ifdef SCTP_DEBUG
+#define SCTP_AUTH_DEBUG (SCTP_BASE_SYSCTL(sctp_debug_on) & SCTP_DEBUG_AUTH1)
+#define SCTP_AUTH_DEBUG2 (SCTP_BASE_SYSCTL(sctp_debug_on) & SCTP_DEBUG_AUTH2)
+#endif /* SCTP_DEBUG */
+
+
+void
+sctp_clear_chunklist(sctp_auth_chklist_t * chklist)
+{
+ bzero(chklist, sizeof(*chklist));
+ /* chklist->num_chunks = 0; */
+}
+
+sctp_auth_chklist_t *
+sctp_alloc_chunklist(void)
+{
+ sctp_auth_chklist_t *chklist;
+
+ SCTP_MALLOC(chklist, sctp_auth_chklist_t *, sizeof(*chklist),
+ SCTP_M_AUTH_CL);
+ if (chklist == NULL) {
+ SCTPDBG(SCTP_DEBUG_AUTH1, "sctp_alloc_chunklist: failed to get memory!\n");
+ } else {
+ sctp_clear_chunklist(chklist);
+ }
+ return (chklist);
+}
+
+void
+sctp_free_chunklist(sctp_auth_chklist_t * list)
+{
+ if (list != NULL)
+ SCTP_FREE(list, SCTP_M_AUTH_CL);
+}
+
+sctp_auth_chklist_t *
+sctp_copy_chunklist(sctp_auth_chklist_t * list)
+{
+ sctp_auth_chklist_t *new_list;
+
+ if (list == NULL)
+ return (NULL);
+
+ /* get a new list */
+ new_list = sctp_alloc_chunklist();
+ if (new_list == NULL)
+ return (NULL);
+ /* copy it */
+ bcopy(list, new_list, sizeof(*new_list));
+
+ return (new_list);
+}
+
+
+/*
+ * add a chunk to the required chunks list
+ */
+int
+sctp_auth_add_chunk(uint8_t chunk, sctp_auth_chklist_t * list)
+{
+ if (list == NULL)
+ return (-1);
+
+ /* is chunk restricted? */
+ if ((chunk == SCTP_INITIATION) ||
+ (chunk == SCTP_INITIATION_ACK) ||
+ (chunk == SCTP_SHUTDOWN_COMPLETE) ||
+ (chunk == SCTP_AUTHENTICATION)) {
+ return (-1);
+ }
+ if (list->chunks[chunk] == 0) {
+ list->chunks[chunk] = 1;
+ list->num_chunks++;
+ SCTPDBG(SCTP_DEBUG_AUTH1,
+ "SCTP: added chunk %u (0x%02x) to Auth list\n",
+ chunk, chunk);
+ }
+ return (0);
+}
+
+/*
+ * delete a chunk from the required chunks list
+ */
+int
+sctp_auth_delete_chunk(uint8_t chunk, sctp_auth_chklist_t * list)
+{
+ if (list == NULL)
+ return (-1);
+
+ /* is chunk restricted? */
+ if ((chunk == SCTP_ASCONF) ||
+ (chunk == SCTP_ASCONF_ACK)) {
+ return (-1);
+ }
+ if (list->chunks[chunk] == 1) {
+ list->chunks[chunk] = 0;
+ list->num_chunks--;
+ SCTPDBG(SCTP_DEBUG_AUTH1,
+ "SCTP: deleted chunk %u (0x%02x) from Auth list\n",
+ chunk, chunk);
+ }
+ return (0);
+}
+
+size_t
+sctp_auth_get_chklist_size(const sctp_auth_chklist_t * list)
+{
+ if (list == NULL)
+ return (0);
+ else
+ return (list->num_chunks);
+}
+
+/*
+ * set the default list of chunks requiring AUTH
+ */
+void
+sctp_auth_set_default_chunks(sctp_auth_chklist_t * list)
+{
+ (void)sctp_auth_add_chunk(SCTP_ASCONF, list);
+ (void)sctp_auth_add_chunk(SCTP_ASCONF_ACK, list);
+}
+
+/*
+ * return the current number and list of required chunks caller must
+ * guarantee ptr has space for up to 256 bytes
+ */
+int
+sctp_serialize_auth_chunks(const sctp_auth_chklist_t * list, uint8_t * ptr)
+{
+ int i, count = 0;
+
+ if (list == NULL)
+ return (0);
+
+ for (i = 0; i < 256; i++) {
+ if (list->chunks[i] != 0) {
+ *ptr++ = i;
+ count++;
+ }
+ }
+ return (count);
+}
+
+int
+sctp_pack_auth_chunks(const sctp_auth_chklist_t * list, uint8_t * ptr)
+{
+ int i, size = 0;
+
+ if (list == NULL)
+ return (0);
+
+ if (list->num_chunks <= 32) {
+ /* just list them, one byte each */
+ for (i = 0; i < 256; i++) {
+ if (list->chunks[i] != 0) {
+ *ptr++ = i;
+ size++;
+ }
+ }
+ } else {
+ int index, offset;
+
+ /* pack into a 32 byte bitfield */
+ for (i = 0; i < 256; i++) {
+ if (list->chunks[i] != 0) {
+ index = i / 8;
+ offset = i % 8;
+ ptr[index] |= (1 << offset);
+ }
+ }
+ size = 32;
+ }
+ return (size);
+}
+
+int
+sctp_unpack_auth_chunks(const uint8_t * ptr, uint8_t num_chunks,
+ sctp_auth_chklist_t * list)
+{
+ int i;
+ int size;
+
+ if (list == NULL)
+ return (0);
+
+ if (num_chunks <= 32) {
+ /* just pull them, one byte each */
+ for (i = 0; i < num_chunks; i++) {
+ (void)sctp_auth_add_chunk(*ptr++, list);
+ }
+ size = num_chunks;
+ } else {
+ int index, offset;
+
+ /* unpack from a 32 byte bitfield */
+ for (index = 0; index < 32; index++) {
+ for (offset = 0; offset < 8; offset++) {
+ if (ptr[index] & (1 << offset)) {
+ (void)sctp_auth_add_chunk((index * 8) + offset, list);
+ }
+ }
+ }
+ size = 32;
+ }
+ return (size);
+}
+
+
+/*
+ * allocate structure space for a key of length keylen
+ */
+sctp_key_t *
+sctp_alloc_key(uint32_t keylen)
+{
+ sctp_key_t *new_key;
+
+ SCTP_MALLOC(new_key, sctp_key_t *, sizeof(*new_key) + keylen,
+ SCTP_M_AUTH_KY);
+ if (new_key == NULL) {
+ /* out of memory */
+ return (NULL);
+ }
+ new_key->keylen = keylen;
+ return (new_key);
+}
+
+void
+sctp_free_key(sctp_key_t * key)
+{
+ if (key != NULL)
+ SCTP_FREE(key, SCTP_M_AUTH_KY);
+}
+
+void
+sctp_print_key(sctp_key_t * key, const char *str)
+{
+ uint32_t i;
+
+ if (key == NULL) {
+ printf("%s: [Null key]\n", str);
+ return;
+ }
+ printf("%s: len %u, ", str, key->keylen);
+ if (key->keylen) {
+ for (i = 0; i < key->keylen; i++)
+ printf("%02x", key->key[i]);
+ printf("\n");
+ } else {
+ printf("[Null key]\n");
+ }
+}
+
+void
+sctp_show_key(sctp_key_t * key, const char *str)
+{
+ uint32_t i;
+
+ if (key == NULL) {
+ printf("%s: [Null key]\n", str);
+ return;
+ }
+ printf("%s: len %u, ", str, key->keylen);
+ if (key->keylen) {
+ for (i = 0; i < key->keylen; i++)
+ printf("%02x", key->key[i]);
+ printf("\n");
+ } else {
+ printf("[Null key]\n");
+ }
+}
+
+static uint32_t
+sctp_get_keylen(sctp_key_t * key)
+{
+ if (key != NULL)
+ return (key->keylen);
+ else
+ return (0);
+}
+
+/*
+ * generate a new random key of length 'keylen'
+ */
+sctp_key_t *
+sctp_generate_random_key(uint32_t keylen)
+{
+ sctp_key_t *new_key;
+
+ /* validate keylen */
+ if (keylen > SCTP_AUTH_RANDOM_SIZE_MAX)
+ keylen = SCTP_AUTH_RANDOM_SIZE_MAX;
+
+ new_key = sctp_alloc_key(keylen);
+ if (new_key == NULL) {
+ /* out of memory */
+ return (NULL);
+ }
+ SCTP_READ_RANDOM(new_key->key, keylen);
+ new_key->keylen = keylen;
+ return (new_key);
+}
+
+sctp_key_t *
+sctp_set_key(uint8_t * key, uint32_t keylen)
+{
+ sctp_key_t *new_key;
+
+ new_key = sctp_alloc_key(keylen);
+ if (new_key == NULL) {
+ /* out of memory */
+ return (NULL);
+ }
+ bcopy(key, new_key->key, keylen);
+ return (new_key);
+}
+
+/*-
+ * given two keys of variable size, compute which key is "larger/smaller"
+ * returns: 1 if key1 > key2
+ * -1 if key1 < key2
+ * 0 if key1 = key2
+ */
+static int
+sctp_compare_key(sctp_key_t * key1, sctp_key_t * key2)
+{
+ uint32_t maxlen;
+ uint32_t i;
+ uint32_t key1len, key2len;
+ uint8_t *key_1, *key_2;
+ uint8_t temp[SCTP_AUTH_RANDOM_SIZE_MAX];
+
+ /* sanity/length check */
+ key1len = sctp_get_keylen(key1);
+ key2len = sctp_get_keylen(key2);
+ if ((key1len == 0) && (key2len == 0))
+ return (0);
+ else if (key1len == 0)
+ return (-1);
+ else if (key2len == 0)
+ return (1);
+
+ if (key1len != key2len) {
+ if (key1len >= key2len)
+ maxlen = key1len;
+ else
+ maxlen = key2len;
+ bzero(temp, maxlen);
+ if (key1len < maxlen) {
+ /* prepend zeroes to key1 */
+ bcopy(key1->key, temp + (maxlen - key1len), key1len);
+ key_1 = temp;
+ key_2 = key2->key;
+ } else {
+ /* prepend zeroes to key2 */
+ bcopy(key2->key, temp + (maxlen - key2len), key2len);
+ key_1 = key1->key;
+ key_2 = temp;
+ }
+ } else {
+ maxlen = key1len;
+ key_1 = key1->key;
+ key_2 = key2->key;
+ }
+
+ for (i = 0; i < maxlen; i++) {
+ if (*key_1 > *key_2)
+ return (1);
+ else if (*key_1 < *key_2)
+ return (-1);
+ key_1++;
+ key_2++;
+ }
+
+ /* keys are equal value, so check lengths */
+ if (key1len == key2len)
+ return (0);
+ else if (key1len < key2len)
+ return (-1);
+ else
+ return (1);
+}
+
+/*
+ * generate the concatenated keying material based on the two keys and the
+ * shared key (if available). draft-ietf-tsvwg-auth specifies the specific
+ * order for concatenation
+ */
+sctp_key_t *
+sctp_compute_hashkey(sctp_key_t * key1, sctp_key_t * key2, sctp_key_t * shared)
+{
+ uint32_t keylen;
+ sctp_key_t *new_key;
+ uint8_t *key_ptr;
+
+ keylen = sctp_get_keylen(key1) + sctp_get_keylen(key2) +
+ sctp_get_keylen(shared);
+
+ if (keylen > 0) {
+ /* get space for the new key */
+ new_key = sctp_alloc_key(keylen);
+ if (new_key == NULL) {
+ /* out of memory */
+ return (NULL);
+ }
+ new_key->keylen = keylen;
+ key_ptr = new_key->key;
+ } else {
+ /* all keys empty/null?! */
+ return (NULL);
+ }
+
+ /* concatenate the keys */
+ if (sctp_compare_key(key1, key2) <= 0) {
+ /* key is shared + key1 + key2 */
+ if (sctp_get_keylen(shared)) {
+ bcopy(shared->key, key_ptr, shared->keylen);
+ key_ptr += shared->keylen;
+ }
+ if (sctp_get_keylen(key1)) {
+ bcopy(key1->key, key_ptr, key1->keylen);
+ key_ptr += key1->keylen;
+ }
+ if (sctp_get_keylen(key2)) {
+ bcopy(key2->key, key_ptr, key2->keylen);
+ key_ptr += key2->keylen;
+ }
+ } else {
+ /* key is shared + key2 + key1 */
+ if (sctp_get_keylen(shared)) {
+ bcopy(shared->key, key_ptr, shared->keylen);
+ key_ptr += shared->keylen;
+ }
+ if (sctp_get_keylen(key2)) {
+ bcopy(key2->key, key_ptr, key2->keylen);
+ key_ptr += key2->keylen;
+ }
+ if (sctp_get_keylen(key1)) {
+ bcopy(key1->key, key_ptr, key1->keylen);
+ key_ptr += key1->keylen;
+ }
+ }
+ return (new_key);
+}
+
+
+sctp_sharedkey_t *
+sctp_alloc_sharedkey(void)
+{
+ sctp_sharedkey_t *new_key;
+
+ SCTP_MALLOC(new_key, sctp_sharedkey_t *, sizeof(*new_key),
+ SCTP_M_AUTH_KY);
+ if (new_key == NULL) {
+ /* out of memory */
+ return (NULL);
+ }
+ new_key->keyid = 0;
+ new_key->key = NULL;
+ new_key->refcount = 1;
+ new_key->deactivated = 0;
+ return (new_key);
+}
+
+void
+sctp_free_sharedkey(sctp_sharedkey_t * skey)
+{
+ if (skey == NULL)
+ return;
+
+ if (SCTP_DECREMENT_AND_CHECK_REFCOUNT(&skey->refcount)) {
+ if (skey->key != NULL)
+ sctp_free_key(skey->key);
+ SCTP_FREE(skey, SCTP_M_AUTH_KY);
+ }
+}
+
+sctp_sharedkey_t *
+sctp_find_sharedkey(struct sctp_keyhead *shared_keys, uint16_t key_id)
+{
+ sctp_sharedkey_t *skey;
+
+ LIST_FOREACH(skey, shared_keys, next) {
+ if (skey->keyid == key_id)
+ return (skey);
+ }
+ return (NULL);
+}
+
+int
+sctp_insert_sharedkey(struct sctp_keyhead *shared_keys,
+ sctp_sharedkey_t * new_skey)
+{
+ sctp_sharedkey_t *skey;
+
+ if ((shared_keys == NULL) || (new_skey == NULL))
+ return (EINVAL);
+
+ /* insert into an empty list? */
+ if (LIST_EMPTY(shared_keys)) {
+ LIST_INSERT_HEAD(shared_keys, new_skey, next);
+ return (0);
+ }
+ /* insert into the existing list, ordered by key id */
+ LIST_FOREACH(skey, shared_keys, next) {
+ if (new_skey->keyid < skey->keyid) {
+ /* insert it before here */
+ LIST_INSERT_BEFORE(skey, new_skey, next);
+ return (0);
+ } else if (new_skey->keyid == skey->keyid) {
+ /* replace the existing key */
+ /* verify this key *can* be replaced */
+ if ((skey->deactivated) && (skey->refcount > 1)) {
+ SCTPDBG(SCTP_DEBUG_AUTH1,
+ "can't replace shared key id %u\n",
+ new_skey->keyid);
+ return (EBUSY);
+ }
+ SCTPDBG(SCTP_DEBUG_AUTH1,
+ "replacing shared key id %u\n",
+ new_skey->keyid);
+ LIST_INSERT_BEFORE(skey, new_skey, next);
+ LIST_REMOVE(skey, next);
+ sctp_free_sharedkey(skey);
+ return (0);
+ }
+ if (LIST_NEXT(skey, next) == NULL) {
+ /* belongs at the end of the list */
+ LIST_INSERT_AFTER(skey, new_skey, next);
+ return (0);
+ }
+ }
+ /* shouldn't reach here */
+ return (0);
+}
+
+void
+sctp_auth_key_acquire(struct sctp_tcb *stcb, uint16_t key_id)
+{
+ sctp_sharedkey_t *skey;
+
+ /* find the shared key */
+ skey = sctp_find_sharedkey(&stcb->asoc.shared_keys, key_id);
+
+ /* bump the ref count */
+ if (skey) {
+ atomic_add_int(&skey->refcount, 1);
+ SCTPDBG(SCTP_DEBUG_AUTH2,
+ "%s: stcb %p key %u refcount acquire to %d\n",
+ __FUNCTION__, stcb, key_id, skey->refcount);
+ }
+}
+
+void
+sctp_auth_key_release(struct sctp_tcb *stcb, uint16_t key_id)
+{
+ sctp_sharedkey_t *skey;
+
+ /* find the shared key */
+ skey = sctp_find_sharedkey(&stcb->asoc.shared_keys, key_id);
+
+ /* decrement the ref count */
+ if (skey) {
+ sctp_free_sharedkey(skey);
+ SCTPDBG(SCTP_DEBUG_AUTH2,
+ "%s: stcb %p key %u refcount release to %d\n",
+ __FUNCTION__, stcb, key_id, skey->refcount);
+
+ /* see if a notification should be generated */
+ if ((skey->refcount <= 1) && (skey->deactivated)) {
+ /* notify ULP that key is no longer used */
+ sctp_ulp_notify(SCTP_NOTIFY_AUTH_FREE_KEY, stcb,
+ key_id, 0, SCTP_SO_LOCKED);
+ SCTPDBG(SCTP_DEBUG_AUTH2,
+ "%s: stcb %p key %u no longer used, %d\n",
+ __FUNCTION__, stcb, key_id, skey->refcount);
+ }
+ }
+}
+
+static sctp_sharedkey_t *
+sctp_copy_sharedkey(const sctp_sharedkey_t * skey)
+{
+ sctp_sharedkey_t *new_skey;
+
+ if (skey == NULL)
+ return (NULL);
+ new_skey = sctp_alloc_sharedkey();
+ if (new_skey == NULL)
+ return (NULL);
+ if (skey->key != NULL)
+ new_skey->key = sctp_set_key(skey->key->key, skey->key->keylen);
+ else
+ new_skey->key = NULL;
+ new_skey->keyid = skey->keyid;
+ return (new_skey);
+}
+
+int
+sctp_copy_skeylist(const struct sctp_keyhead *src, struct sctp_keyhead *dest)
+{
+ sctp_sharedkey_t *skey, *new_skey;
+ int count = 0;
+
+ if ((src == NULL) || (dest == NULL))
+ return (0);
+ LIST_FOREACH(skey, src, next) {
+ new_skey = sctp_copy_sharedkey(skey);
+ if (new_skey != NULL) {
+ (void)sctp_insert_sharedkey(dest, new_skey);
+ count++;
+ }
+ }
+ return (count);
+}
+
+
+sctp_hmaclist_t *
+sctp_alloc_hmaclist(uint8_t num_hmacs)
+{
+ sctp_hmaclist_t *new_list;
+ int alloc_size;
+
+ alloc_size = sizeof(*new_list) + num_hmacs * sizeof(new_list->hmac[0]);
+ SCTP_MALLOC(new_list, sctp_hmaclist_t *, alloc_size,
+ SCTP_M_AUTH_HL);
+ if (new_list == NULL) {
+ /* out of memory */
+ return (NULL);
+ }
+ new_list->max_algo = num_hmacs;
+ new_list->num_algo = 0;
+ return (new_list);
+}
+
+void
+sctp_free_hmaclist(sctp_hmaclist_t * list)
+{
+ if (list != NULL) {
+ SCTP_FREE(list, SCTP_M_AUTH_HL);
+ list = NULL;
+ }
+}
+
+int
+sctp_auth_add_hmacid(sctp_hmaclist_t * list, uint16_t hmac_id)
+{
+ int i;
+
+ if (list == NULL)
+ return (-1);
+ if (list->num_algo == list->max_algo) {
+ SCTPDBG(SCTP_DEBUG_AUTH1,
+ "SCTP: HMAC id list full, ignoring add %u\n", hmac_id);
+ return (-1);
+ }
+ if ((hmac_id != SCTP_AUTH_HMAC_ID_SHA1) &&
+#ifdef HAVE_SHA224
+ (hmac_id != SCTP_AUTH_HMAC_ID_SHA224) &&
+#endif
+#ifdef HAVE_SHA2
+ (hmac_id != SCTP_AUTH_HMAC_ID_SHA256) &&
+ (hmac_id != SCTP_AUTH_HMAC_ID_SHA384) &&
+ (hmac_id != SCTP_AUTH_HMAC_ID_SHA512) &&
+#endif
+ 1) {
+ return (-1);
+ }
+ /* Now is it already in the list */
+ for (i = 0; i < list->num_algo; i++) {
+ if (list->hmac[i] == hmac_id) {
+ /* already in list */
+ return (-1);
+ }
+ }
+ SCTPDBG(SCTP_DEBUG_AUTH1, "SCTP: add HMAC id %u to list\n", hmac_id);
+ list->hmac[list->num_algo++] = hmac_id;
+ return (0);
+}
+
+sctp_hmaclist_t *
+sctp_copy_hmaclist(sctp_hmaclist_t * list)
+{
+ sctp_hmaclist_t *new_list;
+ int i;
+
+ if (list == NULL)
+ return (NULL);
+ /* get a new list */
+ new_list = sctp_alloc_hmaclist(list->max_algo);
+ if (new_list == NULL)
+ return (NULL);
+ /* copy it */
+ new_list->max_algo = list->max_algo;
+ new_list->num_algo = list->num_algo;
+ for (i = 0; i < list->num_algo; i++)
+ new_list->hmac[i] = list->hmac[i];
+ return (new_list);
+}
+
+sctp_hmaclist_t *
+sctp_default_supported_hmaclist(void)
+{
+ sctp_hmaclist_t *new_list;
+
+ new_list = sctp_alloc_hmaclist(2);
+ if (new_list == NULL)
+ return (NULL);
+ (void)sctp_auth_add_hmacid(new_list, SCTP_AUTH_HMAC_ID_SHA1);
+ (void)sctp_auth_add_hmacid(new_list, SCTP_AUTH_HMAC_ID_SHA256);
+ return (new_list);
+}
+
+/*-
+ * HMAC algos are listed in priority/preference order
+ * find the best HMAC id to use for the peer based on local support
+ */
+uint16_t
+sctp_negotiate_hmacid(sctp_hmaclist_t * peer, sctp_hmaclist_t * local)
+{
+ int i, j;
+
+ if ((local == NULL) || (peer == NULL))
+ return (SCTP_AUTH_HMAC_ID_RSVD);
+
+ for (i = 0; i < peer->num_algo; i++) {
+ for (j = 0; j < local->num_algo; j++) {
+ if (peer->hmac[i] == local->hmac[j]) {
+ /* found the "best" one */
+ SCTPDBG(SCTP_DEBUG_AUTH1,
+ "SCTP: negotiated peer HMAC id %u\n",
+ peer->hmac[i]);
+ return (peer->hmac[i]);
+ }
+ }
+ }
+ /* didn't find one! */
+ return (SCTP_AUTH_HMAC_ID_RSVD);
+}
+
+/*-
+ * serialize the HMAC algo list and return space used
+ * caller must guarantee ptr has appropriate space
+ */
+int
+sctp_serialize_hmaclist(sctp_hmaclist_t * list, uint8_t * ptr)
+{
+ int i;
+ uint16_t hmac_id;
+
+ if (list == NULL)
+ return (0);
+
+ for (i = 0; i < list->num_algo; i++) {
+ hmac_id = htons(list->hmac[i]);
+ bcopy(&hmac_id, ptr, sizeof(hmac_id));
+ ptr += sizeof(hmac_id);
+ }
+ return (list->num_algo * sizeof(hmac_id));
+}
+
+int
+sctp_verify_hmac_param(struct sctp_auth_hmac_algo *hmacs, uint32_t num_hmacs)
+{
+ uint32_t i;
+ uint16_t hmac_id;
+ uint32_t sha1_supported = 0;
+
+ for (i = 0; i < num_hmacs; i++) {
+ hmac_id = ntohs(hmacs->hmac_ids[i]);
+ if (hmac_id == SCTP_AUTH_HMAC_ID_SHA1)
+ sha1_supported = 1;
+ }
+ /* all HMAC id's are supported */
+ if (sha1_supported == 0)
+ return (-1);
+ else
+ return (0);
+}
+
+sctp_authinfo_t *
+sctp_alloc_authinfo(void)
+{
+ sctp_authinfo_t *new_authinfo;
+
+ SCTP_MALLOC(new_authinfo, sctp_authinfo_t *, sizeof(*new_authinfo),
+ SCTP_M_AUTH_IF);
+
+ if (new_authinfo == NULL) {
+ /* out of memory */
+ return (NULL);
+ }
+ bzero(new_authinfo, sizeof(*new_authinfo));
+ return (new_authinfo);
+}
+
+void
+sctp_free_authinfo(sctp_authinfo_t * authinfo)
+{
+ if (authinfo == NULL)
+ return;
+
+ if (authinfo->random != NULL)
+ sctp_free_key(authinfo->random);
+ if (authinfo->peer_random != NULL)
+ sctp_free_key(authinfo->peer_random);
+ if (authinfo->assoc_key != NULL)
+ sctp_free_key(authinfo->assoc_key);
+ if (authinfo->recv_key != NULL)
+ sctp_free_key(authinfo->recv_key);
+
+ /* We are NOT dynamically allocating authinfo's right now... */
+ /* SCTP_FREE(authinfo, SCTP_M_AUTH_??); */
+}
+
+
+uint32_t
+sctp_get_auth_chunk_len(uint16_t hmac_algo)
+{
+ int size;
+
+ size = sizeof(struct sctp_auth_chunk) + sctp_get_hmac_digest_len(hmac_algo);
+ return (SCTP_SIZE32(size));
+}
+
+uint32_t
+sctp_get_hmac_digest_len(uint16_t hmac_algo)
+{
+ switch (hmac_algo) {
+ case SCTP_AUTH_HMAC_ID_SHA1:
+ return (SCTP_AUTH_DIGEST_LEN_SHA1);
+#ifdef HAVE_SHA224
+ case SCTP_AUTH_HMAC_ID_SHA224:
+ return (SCTP_AUTH_DIGEST_LEN_SHA224);
+#endif
+#ifdef HAVE_SHA2
+ case SCTP_AUTH_HMAC_ID_SHA256:
+ return (SCTP_AUTH_DIGEST_LEN_SHA256);
+ case SCTP_AUTH_HMAC_ID_SHA384:
+ return (SCTP_AUTH_DIGEST_LEN_SHA384);
+ case SCTP_AUTH_HMAC_ID_SHA512:
+ return (SCTP_AUTH_DIGEST_LEN_SHA512);
+#endif
+ default:
+ /* unknown HMAC algorithm: can't do anything */
+ return (0);
+ } /* end switch */
+}
+
+static inline int
+sctp_get_hmac_block_len(uint16_t hmac_algo)
+{
+ switch (hmac_algo) {
+ case SCTP_AUTH_HMAC_ID_SHA1:
+#ifdef HAVE_SHA224
+ case SCTP_AUTH_HMAC_ID_SHA224:
+#endif
+ return (64);
+#ifdef HAVE_SHA2
+ case SCTP_AUTH_HMAC_ID_SHA256:
+ return (64);
+ case SCTP_AUTH_HMAC_ID_SHA384:
+ case SCTP_AUTH_HMAC_ID_SHA512:
+ return (128);
+#endif
+ case SCTP_AUTH_HMAC_ID_RSVD:
+ default:
+ /* unknown HMAC algorithm: can't do anything */
+ return (0);
+ } /* end switch */
+}
+
+static void
+sctp_hmac_init(uint16_t hmac_algo, sctp_hash_context_t * ctx)
+{
+ switch (hmac_algo) {
+ case SCTP_AUTH_HMAC_ID_SHA1:
+ SHA1_Init(&ctx->sha1);
+ break;
+#ifdef HAVE_SHA224
+ case SCTP_AUTH_HMAC_ID_SHA224:
+ break;
+#endif
+#ifdef HAVE_SHA2
+ case SCTP_AUTH_HMAC_ID_SHA256:
+ SHA256_Init(&ctx->sha256);
+ break;
+ case SCTP_AUTH_HMAC_ID_SHA384:
+ SHA384_Init(&ctx->sha384);
+ break;
+ case SCTP_AUTH_HMAC_ID_SHA512:
+ SHA512_Init(&ctx->sha512);
+ break;
+#endif
+ case SCTP_AUTH_HMAC_ID_RSVD:
+ default:
+ /* unknown HMAC algorithm: can't do anything */
+ return;
+ } /* end switch */
+}
+
+static void
+sctp_hmac_update(uint16_t hmac_algo, sctp_hash_context_t * ctx,
+ uint8_t * text, uint32_t textlen)
+{
+ switch (hmac_algo) {
+ case SCTP_AUTH_HMAC_ID_SHA1:
+ SHA1_Update(&ctx->sha1, text, textlen);
+ break;
+#ifdef HAVE_SHA224
+ case SCTP_AUTH_HMAC_ID_SHA224:
+ break;
+#endif
+#ifdef HAVE_SHA2
+ case SCTP_AUTH_HMAC_ID_SHA256:
+ SHA256_Update(&ctx->sha256, text, textlen);
+ break;
+ case SCTP_AUTH_HMAC_ID_SHA384:
+ SHA384_Update(&ctx->sha384, text, textlen);
+ break;
+ case SCTP_AUTH_HMAC_ID_SHA512:
+ SHA512_Update(&ctx->sha512, text, textlen);
+ break;
+#endif
+ case SCTP_AUTH_HMAC_ID_RSVD:
+ default:
+ /* unknown HMAC algorithm: can't do anything */
+ return;
+ } /* end switch */
+}
+
+static void
+sctp_hmac_final(uint16_t hmac_algo, sctp_hash_context_t * ctx,
+ uint8_t * digest)
+{
+ switch (hmac_algo) {
+ case SCTP_AUTH_HMAC_ID_SHA1:
+ SHA1_Final(digest, &ctx->sha1);
+ break;
+#ifdef HAVE_SHA224
+ case SCTP_AUTH_HMAC_ID_SHA224:
+ break;
+#endif
+#ifdef HAVE_SHA2
+ case SCTP_AUTH_HMAC_ID_SHA256:
+ SHA256_Final(digest, &ctx->sha256);
+ break;
+ case SCTP_AUTH_HMAC_ID_SHA384:
+ /* SHA384 is truncated SHA512 */
+ SHA384_Final(digest, &ctx->sha384);
+ break;
+ case SCTP_AUTH_HMAC_ID_SHA512:
+ SHA512_Final(digest, &ctx->sha512);
+ break;
+#endif
+ case SCTP_AUTH_HMAC_ID_RSVD:
+ default:
+ /* unknown HMAC algorithm: can't do anything */
+ return;
+ } /* end switch */
+}
+
+/*-
+ * Keyed-Hashing for Message Authentication: FIPS 198 (RFC 2104)
+ *
+ * Compute the HMAC digest using the desired hash key, text, and HMAC
+ * algorithm. Resulting digest is placed in 'digest' and digest length
+ * is returned, if the HMAC was performed.
+ *
+ * WARNING: it is up to the caller to supply sufficient space to hold the
+ * resultant digest.
+ */
+uint32_t
+sctp_hmac(uint16_t hmac_algo, uint8_t * key, uint32_t keylen,
+ uint8_t * text, uint32_t textlen, uint8_t * digest)
+{
+ uint32_t digestlen;
+ uint32_t blocklen;
+ sctp_hash_context_t ctx;
+ uint8_t ipad[128], opad[128]; /* keyed hash inner/outer pads */
+ uint8_t temp[SCTP_AUTH_DIGEST_LEN_MAX];
+ uint32_t i;
+
+ /* sanity check the material and length */
+ if ((key == NULL) || (keylen == 0) || (text == NULL) ||
+ (textlen == 0) || (digest == NULL)) {
+ /* can't do HMAC with empty key or text or digest store */
+ return (0);
+ }
+ /* validate the hmac algo and get the digest length */
+ digestlen = sctp_get_hmac_digest_len(hmac_algo);
+ if (digestlen == 0)
+ return (0);
+
+ /* hash the key if it is longer than the hash block size */
+ blocklen = sctp_get_hmac_block_len(hmac_algo);
+ if (keylen > blocklen) {
+ sctp_hmac_init(hmac_algo, &ctx);
+ sctp_hmac_update(hmac_algo, &ctx, key, keylen);
+ sctp_hmac_final(hmac_algo, &ctx, temp);
+ /* set the hashed key as the key */
+ keylen = digestlen;
+ key = temp;
+ }
+ /* initialize the inner/outer pads with the key and "append" zeroes */
+ bzero(ipad, blocklen);
+ bzero(opad, blocklen);
+ bcopy(key, ipad, keylen);
+ bcopy(key, opad, keylen);
+
+ /* XOR the key with ipad and opad values */
+ for (i = 0; i < blocklen; i++) {
+ ipad[i] ^= 0x36;
+ opad[i] ^= 0x5c;
+ }
+
+ /* perform inner hash */
+ sctp_hmac_init(hmac_algo, &ctx);
+ sctp_hmac_update(hmac_algo, &ctx, ipad, blocklen);
+ sctp_hmac_update(hmac_algo, &ctx, text, textlen);
+ sctp_hmac_final(hmac_algo, &ctx, temp);
+
+ /* perform outer hash */
+ sctp_hmac_init(hmac_algo, &ctx);
+ sctp_hmac_update(hmac_algo, &ctx, opad, blocklen);
+ sctp_hmac_update(hmac_algo, &ctx, temp, digestlen);
+ sctp_hmac_final(hmac_algo, &ctx, digest);
+
+ return (digestlen);
+}
+
+/* mbuf version */
+uint32_t
+sctp_hmac_m(uint16_t hmac_algo, uint8_t * key, uint32_t keylen,
+ struct mbuf *m, uint32_t m_offset, uint8_t * digest, uint32_t trailer)
+{
+ uint32_t digestlen;
+ uint32_t blocklen;
+ sctp_hash_context_t ctx;
+ uint8_t ipad[128], opad[128]; /* keyed hash inner/outer pads */
+ uint8_t temp[SCTP_AUTH_DIGEST_LEN_MAX];
+ uint32_t i;
+ struct mbuf *m_tmp;
+
+ /* sanity check the material and length */
+ if ((key == NULL) || (keylen == 0) || (m == NULL) || (digest == NULL)) {
+ /* can't do HMAC with empty key or text or digest store */
+ return (0);
+ }
+ /* validate the hmac algo and get the digest length */
+ digestlen = sctp_get_hmac_digest_len(hmac_algo);
+ if (digestlen == 0)
+ return (0);
+
+ /* hash the key if it is longer than the hash block size */
+ blocklen = sctp_get_hmac_block_len(hmac_algo);
+ if (keylen > blocklen) {
+ sctp_hmac_init(hmac_algo, &ctx);
+ sctp_hmac_update(hmac_algo, &ctx, key, keylen);
+ sctp_hmac_final(hmac_algo, &ctx, temp);
+ /* set the hashed key as the key */
+ keylen = digestlen;
+ key = temp;
+ }
+ /* initialize the inner/outer pads with the key and "append" zeroes */
+ bzero(ipad, blocklen);
+ bzero(opad, blocklen);
+ bcopy(key, ipad, keylen);
+ bcopy(key, opad, keylen);
+
+ /* XOR the key with ipad and opad values */
+ for (i = 0; i < blocklen; i++) {
+ ipad[i] ^= 0x36;
+ opad[i] ^= 0x5c;
+ }
+
+ /* perform inner hash */
+ sctp_hmac_init(hmac_algo, &ctx);
+ sctp_hmac_update(hmac_algo, &ctx, ipad, blocklen);
+ /* find the correct starting mbuf and offset (get start of text) */
+ m_tmp = m;
+ while ((m_tmp != NULL) && (m_offset >= (uint32_t) SCTP_BUF_LEN(m_tmp))) {
+ m_offset -= SCTP_BUF_LEN(m_tmp);
+ m_tmp = SCTP_BUF_NEXT(m_tmp);
+ }
+ /* now use the rest of the mbuf chain for the text */
+ while (m_tmp != NULL) {
+ if ((SCTP_BUF_NEXT(m_tmp) == NULL) && trailer) {
+ sctp_hmac_update(hmac_algo, &ctx, mtod(m_tmp, uint8_t *) + m_offset,
+ SCTP_BUF_LEN(m_tmp) - (trailer + m_offset));
+ } else {
+ sctp_hmac_update(hmac_algo, &ctx, mtod(m_tmp, uint8_t *) + m_offset,
+ SCTP_BUF_LEN(m_tmp) - m_offset);
+ }
+
+ /* clear the offset since it's only for the first mbuf */
+ m_offset = 0;
+ m_tmp = SCTP_BUF_NEXT(m_tmp);
+ }
+ sctp_hmac_final(hmac_algo, &ctx, temp);
+
+ /* perform outer hash */
+ sctp_hmac_init(hmac_algo, &ctx);
+ sctp_hmac_update(hmac_algo, &ctx, opad, blocklen);
+ sctp_hmac_update(hmac_algo, &ctx, temp, digestlen);
+ sctp_hmac_final(hmac_algo, &ctx, digest);
+
+ return (digestlen);
+}
+
+/*-
+ * verify the HMAC digest using the desired hash key, text, and HMAC
+ * algorithm.
+ * Returns -1 on error, 0 on success.
+ */
+int
+sctp_verify_hmac(uint16_t hmac_algo, uint8_t * key, uint32_t keylen,
+ uint8_t * text, uint32_t textlen,
+ uint8_t * digest, uint32_t digestlen)
+{
+ uint32_t len;
+ uint8_t temp[SCTP_AUTH_DIGEST_LEN_MAX];
+
+ /* sanity check the material and length */
+ if ((key == NULL) || (keylen == 0) ||
+ (text == NULL) || (textlen == 0) || (digest == NULL)) {
+ /* can't do HMAC with empty key or text or digest */
+ return (-1);
+ }
+ len = sctp_get_hmac_digest_len(hmac_algo);
+ if ((len == 0) || (digestlen != len))
+ return (-1);
+
+ /* compute the expected hash */
+ if (sctp_hmac(hmac_algo, key, keylen, text, textlen, temp) != len)
+ return (-1);
+
+ if (memcmp(digest, temp, digestlen) != 0)
+ return (-1);
+ else
+ return (0);
+}
+
+
+/*
+ * computes the requested HMAC using a key struct (which may be modified if
+ * the keylen exceeds the HMAC block len).
+ */
+uint32_t
+sctp_compute_hmac(uint16_t hmac_algo, sctp_key_t * key, uint8_t * text,
+ uint32_t textlen, uint8_t * digest)
+{
+ uint32_t digestlen;
+ uint32_t blocklen;
+ sctp_hash_context_t ctx;
+ uint8_t temp[SCTP_AUTH_DIGEST_LEN_MAX];
+
+ /* sanity check */
+ if ((key == NULL) || (text == NULL) || (textlen == 0) ||
+ (digest == NULL)) {
+ /* can't do HMAC with empty key or text or digest store */
+ return (0);
+ }
+ /* validate the hmac algo and get the digest length */
+ digestlen = sctp_get_hmac_digest_len(hmac_algo);
+ if (digestlen == 0)
+ return (0);
+
+ /* hash the key if it is longer than the hash block size */
+ blocklen = sctp_get_hmac_block_len(hmac_algo);
+ if (key->keylen > blocklen) {
+ sctp_hmac_init(hmac_algo, &ctx);
+ sctp_hmac_update(hmac_algo, &ctx, key->key, key->keylen);
+ sctp_hmac_final(hmac_algo, &ctx, temp);
+ /* save the hashed key as the new key */
+ key->keylen = digestlen;
+ bcopy(temp, key->key, key->keylen);
+ }
+ return (sctp_hmac(hmac_algo, key->key, key->keylen, text, textlen,
+ digest));
+}
+
+/* mbuf version */
+uint32_t
+sctp_compute_hmac_m(uint16_t hmac_algo, sctp_key_t * key, struct mbuf *m,
+ uint32_t m_offset, uint8_t * digest)
+{
+ uint32_t digestlen;
+ uint32_t blocklen;
+ sctp_hash_context_t ctx;
+ uint8_t temp[SCTP_AUTH_DIGEST_LEN_MAX];
+
+ /* sanity check */
+ if ((key == NULL) || (m == NULL) || (digest == NULL)) {
+ /* can't do HMAC with empty key or text or digest store */
+ return (0);
+ }
+ /* validate the hmac algo and get the digest length */
+ digestlen = sctp_get_hmac_digest_len(hmac_algo);
+ if (digestlen == 0)
+ return (0);
+
+ /* hash the key if it is longer than the hash block size */
+ blocklen = sctp_get_hmac_block_len(hmac_algo);
+ if (key->keylen > blocklen) {
+ sctp_hmac_init(hmac_algo, &ctx);
+ sctp_hmac_update(hmac_algo, &ctx, key->key, key->keylen);
+ sctp_hmac_final(hmac_algo, &ctx, temp);
+ /* save the hashed key as the new key */
+ key->keylen = digestlen;
+ bcopy(temp, key->key, key->keylen);
+ }
+ return (sctp_hmac_m(hmac_algo, key->key, key->keylen, m, m_offset, digest, 0));
+}
+
+int
+sctp_auth_is_supported_hmac(sctp_hmaclist_t * list, uint16_t id)
+{
+ int i;
+
+ if ((list == NULL) || (id == SCTP_AUTH_HMAC_ID_RSVD))
+ return (0);
+
+ for (i = 0; i < list->num_algo; i++)
+ if (list->hmac[i] == id)
+ return (1);
+
+ /* not in the list */
+ return (0);
+}
+
+
+/*-
+ * clear any cached key(s) if they match the given key id on an association.
+ * the cached key(s) will be recomputed and re-cached at next use.
+ * ASSUMES TCB_LOCK is already held
+ */
+void
+sctp_clear_cachedkeys(struct sctp_tcb *stcb, uint16_t keyid)
+{
+ if (stcb == NULL)
+ return;
+
+ if (keyid == stcb->asoc.authinfo.assoc_keyid) {
+ sctp_free_key(stcb->asoc.authinfo.assoc_key);
+ stcb->asoc.authinfo.assoc_key = NULL;
+ }
+ if (keyid == stcb->asoc.authinfo.recv_keyid) {
+ sctp_free_key(stcb->asoc.authinfo.recv_key);
+ stcb->asoc.authinfo.recv_key = NULL;
+ }
+}
+
+/*-
+ * clear any cached key(s) if they match the given key id for all assocs on
+ * an endpoint.
+ * ASSUMES INP_WLOCK is already held
+ */
+void
+sctp_clear_cachedkeys_ep(struct sctp_inpcb *inp, uint16_t keyid)
+{
+ struct sctp_tcb *stcb;
+
+ if (inp == NULL)
+ return;
+
+ /* clear the cached keys on all assocs on this instance */
+ LIST_FOREACH(stcb, &inp->sctp_asoc_list, sctp_tcblist) {
+ SCTP_TCB_LOCK(stcb);
+ sctp_clear_cachedkeys(stcb, keyid);
+ SCTP_TCB_UNLOCK(stcb);
+ }
+}
+
+/*-
+ * delete a shared key from an association
+ * ASSUMES TCB_LOCK is already held
+ */
+int
+sctp_delete_sharedkey(struct sctp_tcb *stcb, uint16_t keyid)
+{
+ sctp_sharedkey_t *skey;
+
+ if (stcb == NULL)
+ return (-1);
+
+ /* is the keyid the assoc active sending key */
+ if (keyid == stcb->asoc.authinfo.active_keyid)
+ return (-1);
+
+ /* does the key exist? */
+ skey = sctp_find_sharedkey(&stcb->asoc.shared_keys, keyid);
+ if (skey == NULL)
+ return (-1);
+
+ /* are there other refcount holders on the key? */
+ if (skey->refcount > 1)
+ return (-1);
+
+ /* remove it */
+ LIST_REMOVE(skey, next);
+ sctp_free_sharedkey(skey); /* frees skey->key as well */
+
+ /* clear any cached keys */
+ sctp_clear_cachedkeys(stcb, keyid);
+ return (0);
+}
+
+/*-
+ * deletes a shared key from the endpoint
+ * ASSUMES INP_WLOCK is already held
+ */
+int
+sctp_delete_sharedkey_ep(struct sctp_inpcb *inp, uint16_t keyid)
+{
+ sctp_sharedkey_t *skey;
+
+ if (inp == NULL)
+ return (-1);
+
+ /* is the keyid the active sending key on the endpoint */
+ if (keyid == inp->sctp_ep.default_keyid)
+ return (-1);
+
+ /* does the key exist? */
+ skey = sctp_find_sharedkey(&inp->sctp_ep.shared_keys, keyid);
+ if (skey == NULL)
+ return (-1);
+
+ /* endpoint keys are not refcounted */
+
+ /* remove it */
+ LIST_REMOVE(skey, next);
+ sctp_free_sharedkey(skey); /* frees skey->key as well */
+
+ /* clear any cached keys */
+ sctp_clear_cachedkeys_ep(inp, keyid);
+ return (0);
+}
+
+/*-
+ * set the active key on an association
+ * ASSUMES TCB_LOCK is already held
+ */
+int
+sctp_auth_setactivekey(struct sctp_tcb *stcb, uint16_t keyid)
+{
+ sctp_sharedkey_t *skey = NULL;
+
+ /* find the key on the assoc */
+ skey = sctp_find_sharedkey(&stcb->asoc.shared_keys, keyid);
+ if (skey == NULL) {
+ /* that key doesn't exist */
+ return (-1);
+ }
+ if ((skey->deactivated) && (skey->refcount > 1)) {
+ /* can't reactivate a deactivated key with other refcounts */
+ return (-1);
+ }
+ /* set the (new) active key */
+ stcb->asoc.authinfo.active_keyid = keyid;
+ /* reset the deactivated flag */
+ skey->deactivated = 0;
+
+ return (0);
+}
+
+/*-
+ * set the active key on an endpoint
+ * ASSUMES INP_WLOCK is already held
+ */
+int
+sctp_auth_setactivekey_ep(struct sctp_inpcb *inp, uint16_t keyid)
+{
+ sctp_sharedkey_t *skey;
+
+ /* find the key */
+ skey = sctp_find_sharedkey(&inp->sctp_ep.shared_keys, keyid);
+ if (skey == NULL) {
+ /* that key doesn't exist */
+ return (-1);
+ }
+ inp->sctp_ep.default_keyid = keyid;
+ return (0);
+}
+
+/*-
+ * deactivates a shared key from the association
+ * ASSUMES INP_WLOCK is already held
+ */
+int
+sctp_deact_sharedkey(struct sctp_tcb *stcb, uint16_t keyid)
+{
+ sctp_sharedkey_t *skey;
+
+ if (stcb == NULL)
+ return (-1);
+
+ /* is the keyid the assoc active sending key */
+ if (keyid == stcb->asoc.authinfo.active_keyid)
+ return (-1);
+
+ /* does the key exist? */
+ skey = sctp_find_sharedkey(&stcb->asoc.shared_keys, keyid);
+ if (skey == NULL)
+ return (-1);
+
+ /* are there other refcount holders on the key? */
+ if (skey->refcount == 1) {
+ /* no other users, send a notification for this key */
+ sctp_ulp_notify(SCTP_NOTIFY_AUTH_FREE_KEY, stcb, keyid, 0,
+ SCTP_SO_LOCKED);
+ }
+ /* mark the key as deactivated */
+ skey->deactivated = 1;
+
+ return (0);
+}
+
+/*-
+ * deactivates a shared key from the endpoint
+ * ASSUMES INP_WLOCK is already held
+ */
+int
+sctp_deact_sharedkey_ep(struct sctp_inpcb *inp, uint16_t keyid)
+{
+ sctp_sharedkey_t *skey;
+
+ if (inp == NULL)
+ return (-1);
+
+ /* is the keyid the active sending key on the endpoint */
+ if (keyid == inp->sctp_ep.default_keyid)
+ return (-1);
+
+ /* does the key exist? */
+ skey = sctp_find_sharedkey(&inp->sctp_ep.shared_keys, keyid);
+ if (skey == NULL)
+ return (-1);
+
+ /* endpoint keys are not refcounted */
+
+ /* remove it */
+ LIST_REMOVE(skey, next);
+ sctp_free_sharedkey(skey); /* frees skey->key as well */
+
+ return (0);
+}
+
+/*
+ * get local authentication parameters from cookie (from INIT-ACK)
+ */
+void
+sctp_auth_get_cookie_params(struct sctp_tcb *stcb, struct mbuf *m,
+ uint32_t offset, uint32_t length)
+{
+ struct sctp_paramhdr *phdr, tmp_param;
+ uint16_t plen, ptype;
+ uint8_t random_store[SCTP_PARAM_BUFFER_SIZE];
+ struct sctp_auth_random *p_random = NULL;
+ uint16_t random_len = 0;
+ uint8_t hmacs_store[SCTP_PARAM_BUFFER_SIZE];
+ struct sctp_auth_hmac_algo *hmacs = NULL;
+ uint16_t hmacs_len = 0;
+ uint8_t chunks_store[SCTP_PARAM_BUFFER_SIZE];
+ struct sctp_auth_chunk_list *chunks = NULL;
+ uint16_t num_chunks = 0;
+ sctp_key_t *new_key;
+ uint32_t keylen;
+
+ /* convert to upper bound */
+ length += offset;
+
+ phdr = (struct sctp_paramhdr *)sctp_m_getptr(m, offset,
+ sizeof(struct sctp_paramhdr), (uint8_t *) & tmp_param);
+ while (phdr != NULL) {
+ ptype = ntohs(phdr->param_type);
+ plen = ntohs(phdr->param_length);
+
+ if ((plen == 0) || (offset + plen > length))
+ break;
+
+ if (ptype == SCTP_RANDOM) {
+ if (plen > sizeof(random_store))
+ break;
+ phdr = sctp_get_next_param(m, offset,
+ (struct sctp_paramhdr *)random_store, min(plen, sizeof(random_store)));
+ if (phdr == NULL)
+ return;
+ /* save the random and length for the key */
+ p_random = (struct sctp_auth_random *)phdr;
+ random_len = plen - sizeof(*p_random);
+ } else if (ptype == SCTP_HMAC_LIST) {
+ int num_hmacs;
+ int i;
+
+ if (plen > sizeof(hmacs_store))
+ break;
+ phdr = sctp_get_next_param(m, offset,
+ (struct sctp_paramhdr *)hmacs_store, min(plen, sizeof(hmacs_store)));
+ if (phdr == NULL)
+ return;
+ /* save the hmacs list and num for the key */
+ hmacs = (struct sctp_auth_hmac_algo *)phdr;
+ hmacs_len = plen - sizeof(*hmacs);
+ num_hmacs = hmacs_len / sizeof(hmacs->hmac_ids[0]);
+ if (stcb->asoc.local_hmacs != NULL)
+ sctp_free_hmaclist(stcb->asoc.local_hmacs);
+ stcb->asoc.local_hmacs = sctp_alloc_hmaclist(num_hmacs);
+ if (stcb->asoc.local_hmacs != NULL) {
+ for (i = 0; i < num_hmacs; i++) {
+ (void)sctp_auth_add_hmacid(stcb->asoc.local_hmacs,
+ ntohs(hmacs->hmac_ids[i]));
+ }
+ }
+ } else if (ptype == SCTP_CHUNK_LIST) {
+ int i;
+
+ if (plen > sizeof(chunks_store))
+ break;
+ phdr = sctp_get_next_param(m, offset,
+ (struct sctp_paramhdr *)chunks_store, min(plen, sizeof(chunks_store)));
+ if (phdr == NULL)
+ return;
+ chunks = (struct sctp_auth_chunk_list *)phdr;
+ num_chunks = plen - sizeof(*chunks);
+ /* save chunks list and num for the key */
+ if (stcb->asoc.local_auth_chunks != NULL)
+ sctp_clear_chunklist(stcb->asoc.local_auth_chunks);
+ else
+ stcb->asoc.local_auth_chunks = sctp_alloc_chunklist();
+ for (i = 0; i < num_chunks; i++) {
+ (void)sctp_auth_add_chunk(chunks->chunk_types[i],
+ stcb->asoc.local_auth_chunks);
+ }
+ }
+ /* get next parameter */
+ offset += SCTP_SIZE32(plen);
+ if (offset + sizeof(struct sctp_paramhdr) > length)
+ break;
+ phdr = (struct sctp_paramhdr *)sctp_m_getptr(m, offset, sizeof(struct sctp_paramhdr),
+ (uint8_t *) & tmp_param);
+ }
+ /* concatenate the full random key */
+ keylen = sizeof(*p_random) + random_len + sizeof(*hmacs) + hmacs_len;
+ if (chunks != NULL) {
+ keylen += sizeof(*chunks) + num_chunks;
+ }
+ new_key = sctp_alloc_key(keylen);
+ if (new_key != NULL) {
+ /* copy in the RANDOM */
+ if (p_random != NULL) {
+ keylen = sizeof(*p_random) + random_len;
+ bcopy(p_random, new_key->key, keylen);
+ }
+ /* append in the AUTH chunks */
+ if (chunks != NULL) {
+ bcopy(chunks, new_key->key + keylen,
+ sizeof(*chunks) + num_chunks);
+ keylen += sizeof(*chunks) + num_chunks;
+ }
+ /* append in the HMACs */
+ if (hmacs != NULL) {
+ bcopy(hmacs, new_key->key + keylen,
+ sizeof(*hmacs) + hmacs_len);
+ }
+ }
+ if (stcb->asoc.authinfo.random != NULL)
+ sctp_free_key(stcb->asoc.authinfo.random);
+ stcb->asoc.authinfo.random = new_key;
+ stcb->asoc.authinfo.random_len = random_len;
+ sctp_clear_cachedkeys(stcb, stcb->asoc.authinfo.assoc_keyid);
+ sctp_clear_cachedkeys(stcb, stcb->asoc.authinfo.recv_keyid);
+
+ /* negotiate what HMAC to use for the peer */
+ stcb->asoc.peer_hmac_id = sctp_negotiate_hmacid(stcb->asoc.peer_hmacs,
+ stcb->asoc.local_hmacs);
+
+ /* copy defaults from the endpoint */
+ /* FIX ME: put in cookie? */
+ stcb->asoc.authinfo.active_keyid = stcb->sctp_ep->sctp_ep.default_keyid;
+ /* copy out the shared key list (by reference) from the endpoint */
+ (void)sctp_copy_skeylist(&stcb->sctp_ep->sctp_ep.shared_keys,
+ &stcb->asoc.shared_keys);
+}
+
+/*
+ * compute and fill in the HMAC digest for a packet
+ */
+void
+sctp_fill_hmac_digest_m(struct mbuf *m, uint32_t auth_offset,
+ struct sctp_auth_chunk *auth, struct sctp_tcb *stcb, uint16_t keyid)
+{
+ uint32_t digestlen;
+ sctp_sharedkey_t *skey;
+ sctp_key_t *key;
+
+ if ((stcb == NULL) || (auth == NULL))
+ return;
+
+ /* zero the digest + chunk padding */
+ digestlen = sctp_get_hmac_digest_len(stcb->asoc.peer_hmac_id);
+ bzero(auth->hmac, SCTP_SIZE32(digestlen));
+
+ /* is the desired key cached? */
+ if ((keyid != stcb->asoc.authinfo.assoc_keyid) ||
+ (stcb->asoc.authinfo.assoc_key == NULL)) {
+ if (stcb->asoc.authinfo.assoc_key != NULL) {
+ /* free the old cached key */
+ sctp_free_key(stcb->asoc.authinfo.assoc_key);
+ }
+ skey = sctp_find_sharedkey(&stcb->asoc.shared_keys, keyid);
+ /* the only way skey is NULL is if null key id 0 is used */
+ if (skey != NULL)
+ key = skey->key;
+ else
+ key = NULL;
+ /* compute a new assoc key and cache it */
+ stcb->asoc.authinfo.assoc_key =
+ sctp_compute_hashkey(stcb->asoc.authinfo.random,
+ stcb->asoc.authinfo.peer_random, key);
+ stcb->asoc.authinfo.assoc_keyid = keyid;
+ SCTPDBG(SCTP_DEBUG_AUTH1, "caching key id %u\n",
+ stcb->asoc.authinfo.assoc_keyid);
+#ifdef SCTP_DEBUG
+ if (SCTP_AUTH_DEBUG)
+ sctp_print_key(stcb->asoc.authinfo.assoc_key,
+ "Assoc Key");
+#endif
+ }
+ /* set in the active key id */
+ auth->shared_key_id = htons(keyid);
+
+ /* compute and fill in the digest */
+ (void)sctp_compute_hmac_m(stcb->asoc.peer_hmac_id, stcb->asoc.authinfo.assoc_key,
+ m, auth_offset, auth->hmac);
+}
+
+
+static void
+sctp_bzero_m(struct mbuf *m, uint32_t m_offset, uint32_t size)
+{
+ struct mbuf *m_tmp;
+ uint8_t *data;
+
+ /* sanity check */
+ if (m == NULL)
+ return;
+
+ /* find the correct starting mbuf and offset (get start position) */
+ m_tmp = m;
+ while ((m_tmp != NULL) && (m_offset >= (uint32_t) SCTP_BUF_LEN(m_tmp))) {
+ m_offset -= SCTP_BUF_LEN(m_tmp);
+ m_tmp = SCTP_BUF_NEXT(m_tmp);
+ }
+ /* now use the rest of the mbuf chain */
+ while ((m_tmp != NULL) && (size > 0)) {
+ data = mtod(m_tmp, uint8_t *) + m_offset;
+ if (size > (uint32_t) SCTP_BUF_LEN(m_tmp)) {
+ bzero(data, SCTP_BUF_LEN(m_tmp));
+ size -= SCTP_BUF_LEN(m_tmp);
+ } else {
+ bzero(data, size);
+ size = 0;
+ }
+ /* clear the offset since it's only for the first mbuf */
+ m_offset = 0;
+ m_tmp = SCTP_BUF_NEXT(m_tmp);
+ }
+}
+
+/*-
+ * process the incoming Authentication chunk
+ * return codes:
+ * -1 on any authentication error
+ * 0 on authentication verification
+ */
+int
+sctp_handle_auth(struct sctp_tcb *stcb, struct sctp_auth_chunk *auth,
+ struct mbuf *m, uint32_t offset)
+{
+ uint16_t chunklen;
+ uint16_t shared_key_id;
+ uint16_t hmac_id;
+ sctp_sharedkey_t *skey;
+ uint32_t digestlen;
+ uint8_t digest[SCTP_AUTH_DIGEST_LEN_MAX];
+ uint8_t computed_digest[SCTP_AUTH_DIGEST_LEN_MAX];
+
+ /* auth is checked for NULL by caller */
+ chunklen = ntohs(auth->ch.chunk_length);
+ if (chunklen < sizeof(*auth)) {
+ SCTP_STAT_INCR(sctps_recvauthfailed);
+ return (-1);
+ }
+ SCTP_STAT_INCR(sctps_recvauth);
+
+ /* get the auth params */
+ shared_key_id = ntohs(auth->shared_key_id);
+ hmac_id = ntohs(auth->hmac_id);
+ SCTPDBG(SCTP_DEBUG_AUTH1,
+ "SCTP AUTH Chunk: shared key %u, HMAC id %u\n",
+ shared_key_id, hmac_id);
+
+ /* is the indicated HMAC supported? */
+ if (!sctp_auth_is_supported_hmac(stcb->asoc.local_hmacs, hmac_id)) {
+ struct mbuf *m_err;
+ struct sctp_auth_invalid_hmac *err;
+
+ SCTP_STAT_INCR(sctps_recvivalhmacid);
+ SCTPDBG(SCTP_DEBUG_AUTH1,
+ "SCTP Auth: unsupported HMAC id %u\n",
+ hmac_id);
+ /*
+ * report this in an Error Chunk: Unsupported HMAC
+ * Identifier
+ */
+ m_err = sctp_get_mbuf_for_msg(sizeof(*err), 0, M_DONTWAIT,
+ 1, MT_HEADER);
+ if (m_err != NULL) {
+ /* pre-reserve some space */
+ SCTP_BUF_RESV_UF(m_err, sizeof(struct sctp_chunkhdr));
+ /* fill in the error */
+ err = mtod(m_err, struct sctp_auth_invalid_hmac *);
+ bzero(err, sizeof(*err));
+ err->ph.param_type = htons(SCTP_CAUSE_UNSUPPORTED_HMACID);
+ err->ph.param_length = htons(sizeof(*err));
+ err->hmac_id = ntohs(hmac_id);
+ SCTP_BUF_LEN(m_err) = sizeof(*err);
+ /* queue it */
+ sctp_queue_op_err(stcb, m_err);
+ }
+ return (-1);
+ }
+ /* get the indicated shared key, if available */
+ if ((stcb->asoc.authinfo.recv_key == NULL) ||
+ (stcb->asoc.authinfo.recv_keyid != shared_key_id)) {
+ /* find the shared key on the assoc first */
+ skey = sctp_find_sharedkey(&stcb->asoc.shared_keys,
+ shared_key_id);
+ /* if the shared key isn't found, discard the chunk */
+ if (skey == NULL) {
+ SCTP_STAT_INCR(sctps_recvivalkeyid);
+ SCTPDBG(SCTP_DEBUG_AUTH1,
+ "SCTP Auth: unknown key id %u\n",
+ shared_key_id);
+ return (-1);
+ }
+ /* generate a notification if this is a new key id */
+ if (stcb->asoc.authinfo.recv_keyid != shared_key_id)
+ /*
+ * sctp_ulp_notify(SCTP_NOTIFY_AUTH_NEW_KEY, stcb,
+ * shared_key_id, (void
+ * *)stcb->asoc.authinfo.recv_keyid);
+ */
+ sctp_notify_authentication(stcb, SCTP_AUTH_NEWKEY,
+ shared_key_id, stcb->asoc.authinfo.recv_keyid,
+ SCTP_SO_NOT_LOCKED);
+ /* compute a new recv assoc key and cache it */
+ if (stcb->asoc.authinfo.recv_key != NULL)
+ sctp_free_key(stcb->asoc.authinfo.recv_key);
+ stcb->asoc.authinfo.recv_key =
+ sctp_compute_hashkey(stcb->asoc.authinfo.random,
+ stcb->asoc.authinfo.peer_random, skey->key);
+ stcb->asoc.authinfo.recv_keyid = shared_key_id;
+#ifdef SCTP_DEBUG
+ if (SCTP_AUTH_DEBUG)
+ sctp_print_key(stcb->asoc.authinfo.recv_key, "Recv Key");
+#endif
+ }
+ /* validate the digest length */
+ digestlen = sctp_get_hmac_digest_len(hmac_id);
+ if (chunklen < (sizeof(*auth) + digestlen)) {
+ /* invalid digest length */
+ SCTP_STAT_INCR(sctps_recvauthfailed);
+ SCTPDBG(SCTP_DEBUG_AUTH1,
+ "SCTP Auth: chunk too short for HMAC\n");
+ return (-1);
+ }
+ /* save a copy of the digest, zero the pseudo header, and validate */
+ bcopy(auth->hmac, digest, digestlen);
+ sctp_bzero_m(m, offset + sizeof(*auth), SCTP_SIZE32(digestlen));
+ (void)sctp_compute_hmac_m(hmac_id, stcb->asoc.authinfo.recv_key,
+ m, offset, computed_digest);
+
+ /* compare the computed digest with the one in the AUTH chunk */
+ if (memcmp(digest, computed_digest, digestlen) != 0) {
+ SCTP_STAT_INCR(sctps_recvauthfailed);
+ SCTPDBG(SCTP_DEBUG_AUTH1,
+ "SCTP Auth: HMAC digest check failed\n");
+ return (-1);
+ }
+ return (0);
+}
+
+/*
+ * Generate NOTIFICATION
+ */
+void
+sctp_notify_authentication(struct sctp_tcb *stcb, uint32_t indication,
+ uint16_t keyid, uint16_t alt_keyid, int so_locked
+#if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
+ SCTP_UNUSED
+#endif
+)
+{
+ struct mbuf *m_notify;
+ struct sctp_authkey_event *auth;
+ struct sctp_queued_to_read *control;
+
+ if ((stcb == NULL) ||
+ (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
+ (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
+ (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)
+ ) {
+ /* If the socket is gone we are out of here */
+ return;
+ }
+ if (sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_AUTHEVNT))
+ /* event not enabled */
+ return;
+
+ m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_authkey_event),
+ 0, M_DONTWAIT, 1, MT_HEADER);
+ if (m_notify == NULL)
+ /* no space left */
+ return;
+
+ SCTP_BUF_LEN(m_notify) = 0;
+ auth = mtod(m_notify, struct sctp_authkey_event *);
+ auth->auth_type = SCTP_AUTHENTICATION_EVENT;
+ auth->auth_flags = 0;
+ auth->auth_length = sizeof(*auth);
+ auth->auth_keynumber = keyid;
+ auth->auth_altkeynumber = alt_keyid;
+ auth->auth_indication = indication;
+ auth->auth_assoc_id = sctp_get_associd(stcb);
+
+ SCTP_BUF_LEN(m_notify) = sizeof(*auth);
+ SCTP_BUF_NEXT(m_notify) = NULL;
+
+ /* append to socket */
+ control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
+ 0, 0, 0, 0, 0, 0, m_notify);
+ if (control == NULL) {
+ /* no memory */
+ sctp_m_freem(m_notify);
+ return;
+ }
+ control->spec_flags = M_NOTIFICATION;
+ control->length = SCTP_BUF_LEN(m_notify);
+ /* not that we need this */
+ control->tail_mbuf = m_notify;
+ sctp_add_to_readq(stcb->sctp_ep, stcb, control,
+ &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, so_locked);
+}
+
+
+/*-
+ * validates the AUTHentication related parameters in an INIT/INIT-ACK
+ * Note: currently only used for INIT as INIT-ACK is handled inline
+ * with sctp_load_addresses_from_init()
+ */
+int
+sctp_validate_init_auth_params(struct mbuf *m, int offset, int limit)
+{
+ struct sctp_paramhdr *phdr, parm_buf;
+ uint16_t ptype, plen;
+ int peer_supports_asconf = 0;
+ int peer_supports_auth = 0;
+ int got_random = 0, got_hmacs = 0, got_chklist = 0;
+ uint8_t saw_asconf = 0;
+ uint8_t saw_asconf_ack = 0;
+
+ /* go through each of the params. */
+ phdr = sctp_get_next_param(m, offset, &parm_buf, sizeof(parm_buf));
+ while (phdr) {
+ ptype = ntohs(phdr->param_type);
+ plen = ntohs(phdr->param_length);
+
+ if (offset + plen > limit) {
+ break;
+ }
+ if (plen < sizeof(struct sctp_paramhdr)) {
+ break;
+ }
+ if (ptype == SCTP_SUPPORTED_CHUNK_EXT) {
+ /* A supported extension chunk */
+ struct sctp_supported_chunk_types_param *pr_supported;
+ uint8_t local_store[SCTP_PARAM_BUFFER_SIZE];
+ int num_ent, i;
+
+ phdr = sctp_get_next_param(m, offset,
+ (struct sctp_paramhdr *)&local_store, min(plen, sizeof(local_store)));
+ if (phdr == NULL) {
+ return (-1);
+ }
+ pr_supported = (struct sctp_supported_chunk_types_param *)phdr;
+ num_ent = plen - sizeof(struct sctp_paramhdr);
+ for (i = 0; i < num_ent; i++) {
+ switch (pr_supported->chunk_types[i]) {
+ case SCTP_ASCONF:
+ case SCTP_ASCONF_ACK:
+ peer_supports_asconf = 1;
+ break;
+ case SCTP_AUTHENTICATION:
+ peer_supports_auth = 1;
+ break;
+ default:
+ /* one we don't care about */
+ break;
+ }
+ }
+ } else if (ptype == SCTP_RANDOM) {
+ got_random = 1;
+ /* enforce the random length */
+ if (plen != (sizeof(struct sctp_auth_random) +
+ SCTP_AUTH_RANDOM_SIZE_REQUIRED)) {
+ SCTPDBG(SCTP_DEBUG_AUTH1,
+ "SCTP: invalid RANDOM len\n");
+ return (-1);
+ }
+ } else if (ptype == SCTP_HMAC_LIST) {
+ uint8_t store[SCTP_PARAM_BUFFER_SIZE];
+ struct sctp_auth_hmac_algo *hmacs;
+ int num_hmacs;
+
+ if (plen > sizeof(store))
+ break;
+ phdr = sctp_get_next_param(m, offset,
+ (struct sctp_paramhdr *)store, min(plen, sizeof(store)));
+ if (phdr == NULL)
+ return (-1);
+ hmacs = (struct sctp_auth_hmac_algo *)phdr;
+ num_hmacs = (plen - sizeof(*hmacs)) /
+ sizeof(hmacs->hmac_ids[0]);
+ /* validate the hmac list */
+ if (sctp_verify_hmac_param(hmacs, num_hmacs)) {
+ SCTPDBG(SCTP_DEBUG_AUTH1,
+ "SCTP: invalid HMAC param\n");
+ return (-1);
+ }
+ got_hmacs = 1;
+ } else if (ptype == SCTP_CHUNK_LIST) {
+ int i, num_chunks;
+ uint8_t chunks_store[SCTP_SMALL_CHUNK_STORE];
+
+ /* did the peer send a non-empty chunk list? */
+ struct sctp_auth_chunk_list *chunks = NULL;
+
+ phdr = sctp_get_next_param(m, offset,
+ (struct sctp_paramhdr *)chunks_store,
+ min(plen, sizeof(chunks_store)));
+ if (phdr == NULL)
+ return (-1);
+
+ /*-
+ * Flip through the list and mark that the
+ * peer supports asconf/asconf_ack.
+ */
+ chunks = (struct sctp_auth_chunk_list *)phdr;
+ num_chunks = plen - sizeof(*chunks);
+ for (i = 0; i < num_chunks; i++) {
+ /* record asconf/asconf-ack if listed */
+ if (chunks->chunk_types[i] == SCTP_ASCONF)
+ saw_asconf = 1;
+ if (chunks->chunk_types[i] == SCTP_ASCONF_ACK)
+ saw_asconf_ack = 1;
+
+ }
+ if (num_chunks)
+ got_chklist = 1;
+ }
+ offset += SCTP_SIZE32(plen);
+ if (offset >= limit) {
+ break;
+ }
+ phdr = sctp_get_next_param(m, offset, &parm_buf,
+ sizeof(parm_buf));
+ }
+ /* validate authentication required parameters */
+ if (got_random && got_hmacs) {
+ peer_supports_auth = 1;
+ } else {
+ peer_supports_auth = 0;
+ }
+ if (!peer_supports_auth && got_chklist) {
+ SCTPDBG(SCTP_DEBUG_AUTH1,
+ "SCTP: peer sent chunk list w/o AUTH\n");
+ return (-1);
+ }
+ if (!SCTP_BASE_SYSCTL(sctp_asconf_auth_nochk) && peer_supports_asconf &&
+ !peer_supports_auth) {
+ SCTPDBG(SCTP_DEBUG_AUTH1,
+ "SCTP: peer supports ASCONF but not AUTH\n");
+ return (-1);
+ } else if ((peer_supports_asconf) && (peer_supports_auth) &&
+ ((saw_asconf == 0) || (saw_asconf_ack == 0))) {
+ return (-2);
+ }
+ return (0);
+}
+
+void
+sctp_initialize_auth_params(struct sctp_inpcb *inp, struct sctp_tcb *stcb)
+{
+ uint16_t chunks_len = 0;
+ uint16_t hmacs_len = 0;
+ uint16_t random_len = SCTP_AUTH_RANDOM_SIZE_DEFAULT;
+ sctp_key_t *new_key;
+ uint16_t keylen;
+
+ /* initialize hmac list from endpoint */
+ stcb->asoc.local_hmacs = sctp_copy_hmaclist(inp->sctp_ep.local_hmacs);
+ if (stcb->asoc.local_hmacs != NULL) {
+ hmacs_len = stcb->asoc.local_hmacs->num_algo *
+ sizeof(stcb->asoc.local_hmacs->hmac[0]);
+ }
+ /* initialize auth chunks list from endpoint */
+ stcb->asoc.local_auth_chunks =
+ sctp_copy_chunklist(inp->sctp_ep.local_auth_chunks);
+ if (stcb->asoc.local_auth_chunks != NULL) {
+ int i;
+
+ for (i = 0; i < 256; i++) {
+ if (stcb->asoc.local_auth_chunks->chunks[i])
+ chunks_len++;
+ }
+ }
+ /* copy defaults from the endpoint */
+ stcb->asoc.authinfo.active_keyid = inp->sctp_ep.default_keyid;
+
+ /* copy out the shared key list (by reference) from the endpoint */
+ (void)sctp_copy_skeylist(&inp->sctp_ep.shared_keys,
+ &stcb->asoc.shared_keys);
+
+ /* now set the concatenated key (random + chunks + hmacs) */
+ /* key includes parameter headers */
+ keylen = (3 * sizeof(struct sctp_paramhdr)) + random_len + chunks_len +
+ hmacs_len;
+ new_key = sctp_alloc_key(keylen);
+ if (new_key != NULL) {
+ struct sctp_paramhdr *ph;
+ int plen;
+
+ /* generate and copy in the RANDOM */
+ ph = (struct sctp_paramhdr *)new_key->key;
+ ph->param_type = htons(SCTP_RANDOM);
+ plen = sizeof(*ph) + random_len;
+ ph->param_length = htons(plen);
+ SCTP_READ_RANDOM(new_key->key + sizeof(*ph), random_len);
+ keylen = plen;
+
+ /* append in the AUTH chunks */
+ /* NOTE: currently we always have chunks to list */
+ ph = (struct sctp_paramhdr *)(new_key->key + keylen);
+ ph->param_type = htons(SCTP_CHUNK_LIST);
+ plen = sizeof(*ph) + chunks_len;
+ ph->param_length = htons(plen);
+ keylen += sizeof(*ph);
+ if (stcb->asoc.local_auth_chunks) {
+ int i;
+
+ for (i = 0; i < 256; i++) {
+ if (stcb->asoc.local_auth_chunks->chunks[i])
+ new_key->key[keylen++] = i;
+ }
+ }
+ /* append in the HMACs */
+ ph = (struct sctp_paramhdr *)(new_key->key + keylen);
+ ph->param_type = htons(SCTP_HMAC_LIST);
+ plen = sizeof(*ph) + hmacs_len;
+ ph->param_length = htons(plen);
+ keylen += sizeof(*ph);
+ (void)sctp_serialize_hmaclist(stcb->asoc.local_hmacs,
+ new_key->key + keylen);
+ }
+ if (stcb->asoc.authinfo.random != NULL)
+ sctp_free_key(stcb->asoc.authinfo.random);
+ stcb->asoc.authinfo.random = new_key;
+ stcb->asoc.authinfo.random_len = random_len;
+}
diff --git a/rtems/freebsd/netinet/sctp_auth.h b/rtems/freebsd/netinet/sctp_auth.h
new file mode 100644
index 00000000..e131680d
--- /dev/null
+++ b/rtems/freebsd/netinet/sctp_auth.h
@@ -0,0 +1,235 @@
+/*-
+ * Copyright (c) 2001-2008, by Cisco Systems, Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * a) Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * b) Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the distribution.
+ *
+ * c) Neither the name of Cisco Systems, Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <rtems/freebsd/sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#ifndef __SCTP_AUTH_HH__
+#define __SCTP_AUTH_HH__
+
+
+/* digest lengths */
+#define SCTP_AUTH_DIGEST_LEN_SHA1 20
+#define SCTP_AUTH_DIGEST_LEN_SHA224 28
+#define SCTP_AUTH_DIGEST_LEN_SHA256 32
+#define SCTP_AUTH_DIGEST_LEN_SHA384 48
+#define SCTP_AUTH_DIGEST_LEN_SHA512 64
+#define SCTP_AUTH_DIGEST_LEN_MAX 64
+
+/* random sizes */
+#define SCTP_AUTH_RANDOM_SIZE_DEFAULT 32
+#define SCTP_AUTH_RANDOM_SIZE_REQUIRED 32
+#define SCTP_AUTH_RANDOM_SIZE_MAX 256
+
+/* union of all supported HMAC algorithm contexts */
+typedef union sctp_hash_context {
+ SHA1_CTX sha1;
+#ifdef HAVE_SHA2
+ SHA256_CTX sha256;
+ SHA384_CTX sha384;
+ SHA512_CTX sha512;
+#endif
+} sctp_hash_context_t;
+
+typedef struct sctp_key {
+ uint32_t keylen;
+ uint8_t key[];
+} sctp_key_t;
+
+typedef struct sctp_shared_key {
+ LIST_ENTRY(sctp_shared_key) next;
+ sctp_key_t *key; /* key text */
+ uint32_t refcount; /* reference count */
+ uint16_t keyid; /* shared key ID */
+ uint8_t deactivated; /* key is deactivated */
+} sctp_sharedkey_t;
+
+LIST_HEAD(sctp_keyhead, sctp_shared_key);
+
+/* authentication chunks list */
+typedef struct sctp_auth_chklist {
+ uint8_t chunks[256];
+ uint8_t num_chunks;
+} sctp_auth_chklist_t;
+
+/* hmac algos supported list */
+typedef struct sctp_hmaclist {
+ uint16_t max_algo; /* max algorithms allocated */
+ uint16_t num_algo; /* num algorithms used */
+ uint16_t hmac[];
+} sctp_hmaclist_t;
+
+/* authentication info */
+typedef struct sctp_authinfo {
+ sctp_key_t *random; /* local random key (concatenated) */
+ uint32_t random_len; /* local random number length for param */
+ sctp_key_t *peer_random;/* peer's random key (concatenated) */
+ sctp_key_t *assoc_key; /* cached concatenated send key */
+ sctp_key_t *recv_key; /* cached concatenated recv key */
+ uint16_t active_keyid; /* active send keyid */
+ uint16_t assoc_keyid; /* current send keyid (cached) */
+ uint16_t recv_keyid; /* last recv keyid (cached) */
+} sctp_authinfo_t;
+
+
+
+/*
+ * Macros
+ */
+#define sctp_auth_is_required_chunk(chunk, list) ((list == NULL) ? (0) : (list->chunks[chunk] != 0))
+
+/*
+ * function prototypes
+ */
+
+/* socket option api functions */
+extern sctp_auth_chklist_t *sctp_alloc_chunklist(void);
+extern void sctp_free_chunklist(sctp_auth_chklist_t * chklist);
+extern void sctp_clear_chunklist(sctp_auth_chklist_t * chklist);
+extern sctp_auth_chklist_t *sctp_copy_chunklist(sctp_auth_chklist_t * chklist);
+extern int sctp_auth_add_chunk(uint8_t chunk, sctp_auth_chklist_t * list);
+extern int sctp_auth_delete_chunk(uint8_t chunk, sctp_auth_chklist_t * list);
+extern size_t sctp_auth_get_chklist_size(const sctp_auth_chklist_t * list);
+extern void sctp_auth_set_default_chunks(sctp_auth_chklist_t * list);
+extern int
+sctp_serialize_auth_chunks(const sctp_auth_chklist_t * list,
+ uint8_t * ptr);
+extern int
+sctp_pack_auth_chunks(const sctp_auth_chklist_t * list,
+ uint8_t * ptr);
+extern int
+sctp_unpack_auth_chunks(const uint8_t * ptr, uint8_t num_chunks,
+ sctp_auth_chklist_t * list);
+
+/* key handling */
+extern sctp_key_t *sctp_alloc_key(uint32_t keylen);
+extern void sctp_free_key(sctp_key_t * key);
+extern void sctp_print_key(sctp_key_t * key, const char *str);
+extern void sctp_show_key(sctp_key_t * key, const char *str);
+extern sctp_key_t *sctp_generate_random_key(uint32_t keylen);
+extern sctp_key_t *sctp_set_key(uint8_t * key, uint32_t keylen);
+extern sctp_key_t *
+sctp_compute_hashkey(sctp_key_t * key1, sctp_key_t * key2,
+ sctp_key_t * shared);
+
+/* shared key handling */
+extern sctp_sharedkey_t *sctp_alloc_sharedkey(void);
+extern void sctp_free_sharedkey(sctp_sharedkey_t * skey);
+extern sctp_sharedkey_t *
+sctp_find_sharedkey(struct sctp_keyhead *shared_keys,
+ uint16_t key_id);
+extern int
+sctp_insert_sharedkey(struct sctp_keyhead *shared_keys,
+ sctp_sharedkey_t * new_skey);
+extern int
+sctp_copy_skeylist(const struct sctp_keyhead *src,
+ struct sctp_keyhead *dest);
+
+/* ref counts on shared keys, by key id */
+extern void sctp_auth_key_acquire(struct sctp_tcb *stcb, uint16_t keyid);
+extern void sctp_auth_key_release(struct sctp_tcb *stcb, uint16_t keyid);
+
+
+/* hmac list handling */
+extern sctp_hmaclist_t *sctp_alloc_hmaclist(uint8_t num_hmacs);
+extern void sctp_free_hmaclist(sctp_hmaclist_t * list);
+extern int sctp_auth_add_hmacid(sctp_hmaclist_t * list, uint16_t hmac_id);
+extern sctp_hmaclist_t *sctp_copy_hmaclist(sctp_hmaclist_t * list);
+extern sctp_hmaclist_t *sctp_default_supported_hmaclist(void);
+extern uint16_t
+sctp_negotiate_hmacid(sctp_hmaclist_t * peer,
+ sctp_hmaclist_t * local);
+extern int sctp_serialize_hmaclist(sctp_hmaclist_t * list, uint8_t * ptr);
+extern int
+sctp_verify_hmac_param(struct sctp_auth_hmac_algo *hmacs,
+ uint32_t num_hmacs);
+
+extern sctp_authinfo_t *sctp_alloc_authinfo(void);
+extern void sctp_free_authinfo(sctp_authinfo_t * authinfo);
+
+/* keyed-HMAC functions */
+extern uint32_t sctp_get_auth_chunk_len(uint16_t hmac_algo);
+extern uint32_t sctp_get_hmac_digest_len(uint16_t hmac_algo);
+extern uint32_t
+sctp_hmac(uint16_t hmac_algo, uint8_t * key, uint32_t keylen,
+ uint8_t * text, uint32_t textlen, uint8_t * digest);
+extern int
+sctp_verify_hmac(uint16_t hmac_algo, uint8_t * key, uint32_t keylen,
+ uint8_t * text, uint32_t textlen, uint8_t * digest, uint32_t digestlen);
+extern uint32_t
+sctp_compute_hmac(uint16_t hmac_algo, sctp_key_t * key,
+ uint8_t * text, uint32_t textlen, uint8_t * digest);
+extern int sctp_auth_is_supported_hmac(sctp_hmaclist_t * list, uint16_t id);
+
+/* mbuf versions */
+extern uint32_t
+sctp_hmac_m(uint16_t hmac_algo, uint8_t * key, uint32_t keylen,
+ struct mbuf *m, uint32_t m_offset, uint8_t * digest, uint32_t trailer);
+extern uint32_t
+sctp_compute_hmac_m(uint16_t hmac_algo, sctp_key_t * key,
+ struct mbuf *m, uint32_t m_offset, uint8_t * digest);
+
+/*
+ * authentication routines
+ */
+extern void sctp_clear_cachedkeys(struct sctp_tcb *stcb, uint16_t keyid);
+extern void sctp_clear_cachedkeys_ep(struct sctp_inpcb *inp, uint16_t keyid);
+extern int sctp_delete_sharedkey(struct sctp_tcb *stcb, uint16_t keyid);
+extern int sctp_delete_sharedkey_ep(struct sctp_inpcb *inp, uint16_t keyid);
+extern int sctp_auth_setactivekey(struct sctp_tcb *stcb, uint16_t keyid);
+extern int sctp_auth_setactivekey_ep(struct sctp_inpcb *inp, uint16_t keyid);
+extern int sctp_deact_sharedkey(struct sctp_tcb *stcb, uint16_t keyid);
+extern int sctp_deact_sharedkey_ep(struct sctp_inpcb *inp, uint16_t keyid);
+
+extern void
+sctp_auth_get_cookie_params(struct sctp_tcb *stcb, struct mbuf *m,
+ uint32_t offset, uint32_t length);
+extern void
+sctp_fill_hmac_digest_m(struct mbuf *m, uint32_t auth_offset,
+ struct sctp_auth_chunk *auth, struct sctp_tcb *stcb, uint16_t key_id);
+extern struct mbuf *
+sctp_add_auth_chunk(struct mbuf *m, struct mbuf **m_end,
+ struct sctp_auth_chunk **auth_ret, uint32_t * offset,
+ struct sctp_tcb *stcb, uint8_t chunk);
+extern int
+sctp_handle_auth(struct sctp_tcb *stcb, struct sctp_auth_chunk *ch,
+ struct mbuf *m, uint32_t offset);
+extern void
+sctp_notify_authentication(struct sctp_tcb *stcb,
+ uint32_t indication, uint16_t keyid, uint16_t alt_keyid, int so_locked);
+extern int
+sctp_validate_init_auth_params(struct mbuf *m, int offset,
+ int limit);
+extern void
+sctp_initialize_auth_params(struct sctp_inpcb *inp,
+ struct sctp_tcb *stcb);
+
+/* test functions */
+#endif /* __SCTP_AUTH_HH__ */
diff --git a/rtems/freebsd/netinet/sctp_bsd_addr.c b/rtems/freebsd/netinet/sctp_bsd_addr.c
new file mode 100644
index 00000000..b83490c9
--- /dev/null
+++ b/rtems/freebsd/netinet/sctp_bsd_addr.c
@@ -0,0 +1,562 @@
+#include <rtems/freebsd/machine/rtems-bsd-config.h>
+
+/*-
+ * Copyright (c) 2001-2007, by Cisco Systems, Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * a) Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * b) Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the distribution.
+ *
+ * c) Neither the name of Cisco Systems, Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/* $KAME: sctp_output.c,v 1.46 2005/03/06 16:04:17 itojun Exp $ */
+
+#include <rtems/freebsd/sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <rtems/freebsd/netinet/sctp_os.h>
+#include <rtems/freebsd/netinet/sctp_var.h>
+#include <rtems/freebsd/netinet/sctp_pcb.h>
+#include <rtems/freebsd/netinet/sctp_header.h>
+#include <rtems/freebsd/netinet/sctputil.h>
+#include <rtems/freebsd/netinet/sctp_output.h>
+#include <rtems/freebsd/netinet/sctp_bsd_addr.h>
+#include <rtems/freebsd/netinet/sctp_uio.h>
+#include <rtems/freebsd/netinet/sctputil.h>
+#include <rtems/freebsd/netinet/sctp_timer.h>
+#include <rtems/freebsd/netinet/sctp_asconf.h>
+#include <rtems/freebsd/netinet/sctp_sysctl.h>
+#include <rtems/freebsd/netinet/sctp_indata.h>
+#include <rtems/freebsd/sys/unistd.h>
+
+/* Declare all of our malloc named types */
+MALLOC_DEFINE(SCTP_M_MAP, "sctp_map", "sctp asoc map descriptor");
+MALLOC_DEFINE(SCTP_M_STRMI, "sctp_stri", "sctp stream in array");
+MALLOC_DEFINE(SCTP_M_STRMO, "sctp_stro", "sctp stream out array");
+MALLOC_DEFINE(SCTP_M_ASC_ADDR, "sctp_aadr", "sctp asconf address");
+MALLOC_DEFINE(SCTP_M_ASC_IT, "sctp_a_it", "sctp asconf iterator");
+MALLOC_DEFINE(SCTP_M_AUTH_CL, "sctp_atcl", "sctp auth chunklist");
+MALLOC_DEFINE(SCTP_M_AUTH_KY, "sctp_atky", "sctp auth key");
+MALLOC_DEFINE(SCTP_M_AUTH_HL, "sctp_athm", "sctp auth hmac list");
+MALLOC_DEFINE(SCTP_M_AUTH_IF, "sctp_athi", "sctp auth info");
+MALLOC_DEFINE(SCTP_M_STRESET, "sctp_stre", "sctp stream reset");
+MALLOC_DEFINE(SCTP_M_CMSG, "sctp_cmsg", "sctp CMSG buffer");
+MALLOC_DEFINE(SCTP_M_COPYAL, "sctp_cpal", "sctp copy all");
+MALLOC_DEFINE(SCTP_M_VRF, "sctp_vrf", "sctp vrf struct");
+MALLOC_DEFINE(SCTP_M_IFA, "sctp_ifa", "sctp ifa struct");
+MALLOC_DEFINE(SCTP_M_IFN, "sctp_ifn", "sctp ifn struct");
+MALLOC_DEFINE(SCTP_M_TIMW, "sctp_timw", "sctp time block");
+MALLOC_DEFINE(SCTP_M_MVRF, "sctp_mvrf", "sctp mvrf pcb list");
+MALLOC_DEFINE(SCTP_M_ITER, "sctp_iter", "sctp iterator control");
+MALLOC_DEFINE(SCTP_M_SOCKOPT, "sctp_socko", "sctp socket option");
+
+/* Global NON-VNET structure that controls the iterator */
+struct iterator_control sctp_it_ctl;
+static int __sctp_thread_based_iterator_started = 0;
+
+
+static void
+sctp_cleanup_itqueue(void)
+{
+ struct sctp_iterator *it;
+
+ while ((it = TAILQ_FIRST(&sctp_it_ctl.iteratorhead)) != NULL) {
+ if (it->function_atend != NULL) {
+ (*it->function_atend) (it->pointer, it->val);
+ }
+ TAILQ_REMOVE(&sctp_it_ctl.iteratorhead, it, sctp_nxt_itr);
+ SCTP_FREE(it, SCTP_M_ITER);
+ }
+}
+
+
+void
+sctp_wakeup_iterator(void)
+{
+ wakeup(&sctp_it_ctl.iterator_running);
+}
+
+static void
+sctp_iterator_thread(void *v)
+{
+ SCTP_IPI_ITERATOR_WQ_LOCK();
+ while (1) {
+ msleep(&sctp_it_ctl.iterator_running,
+ &sctp_it_ctl.ipi_iterator_wq_mtx,
+ 0, "waiting_for_work", 0);
+ if (sctp_it_ctl.iterator_flags & SCTP_ITERATOR_MUST_EXIT) {
+ SCTP_IPI_ITERATOR_WQ_DESTROY();
+ SCTP_ITERATOR_LOCK_DESTROY();
+ sctp_cleanup_itqueue();
+ __sctp_thread_based_iterator_started = 0;
+ kthread_exit();
+ }
+ sctp_iterator_worker();
+ }
+}
+
+void
+sctp_startup_iterator(void)
+{
+ if (__sctp_thread_based_iterator_started) {
+ /* You only get one */
+ return;
+ }
+ /* init the iterator head */
+ __sctp_thread_based_iterator_started = 1;
+ sctp_it_ctl.iterator_running = 0;
+ sctp_it_ctl.iterator_flags = 0;
+ sctp_it_ctl.cur_it = NULL;
+ SCTP_ITERATOR_LOCK_INIT();
+ SCTP_IPI_ITERATOR_WQ_INIT();
+ TAILQ_INIT(&sctp_it_ctl.iteratorhead);
+
+ int ret;
+
+ ret = kproc_create(sctp_iterator_thread,
+ (void *)NULL,
+ &sctp_it_ctl.thread_proc,
+ RFPROC,
+ SCTP_KTHREAD_PAGES,
+ SCTP_KTRHEAD_NAME);
+}
+
+#ifdef INET6
+
+void
+sctp_gather_internal_ifa_flags(struct sctp_ifa *ifa)
+{
+ struct in6_ifaddr *ifa6;
+
+ ifa6 = (struct in6_ifaddr *)ifa->ifa;
+ ifa->flags = ifa6->ia6_flags;
+ if (!MODULE_GLOBAL(ip6_use_deprecated)) {
+ if (ifa->flags &
+ IN6_IFF_DEPRECATED) {
+ ifa->localifa_flags |= SCTP_ADDR_IFA_UNUSEABLE;
+ } else {
+ ifa->localifa_flags &= ~SCTP_ADDR_IFA_UNUSEABLE;
+ }
+ } else {
+ ifa->localifa_flags &= ~SCTP_ADDR_IFA_UNUSEABLE;
+ }
+ if (ifa->flags &
+ (IN6_IFF_DETACHED |
+ IN6_IFF_ANYCAST |
+ IN6_IFF_NOTREADY)) {
+ ifa->localifa_flags |= SCTP_ADDR_IFA_UNUSEABLE;
+ } else {
+ ifa->localifa_flags &= ~SCTP_ADDR_IFA_UNUSEABLE;
+ }
+}
+
+#endif /* INET6 */
+
+
+static uint32_t
+sctp_is_desired_interface_type(struct ifaddr *ifa)
+{
+ int result;
+
+ /* check the interface type to see if it's one we care about */
+ switch (ifa->ifa_ifp->if_type) {
+ case IFT_ETHER:
+ case IFT_ISO88023:
+ case IFT_ISO88024:
+ case IFT_ISO88025:
+ case IFT_ISO88026:
+ case IFT_STARLAN:
+ case IFT_P10:
+ case IFT_P80:
+ case IFT_HY:
+ case IFT_FDDI:
+ case IFT_XETHER:
+ case IFT_ISDNBASIC:
+ case IFT_ISDNPRIMARY:
+ case IFT_PTPSERIAL:
+ case IFT_OTHER:
+ case IFT_PPP:
+ case IFT_LOOP:
+ case IFT_SLIP:
+ case IFT_GIF:
+ case IFT_L2VLAN:
+ case IFT_IP:
+ case IFT_IPOVERCDLC:
+ case IFT_IPOVERCLAW:
+ case IFT_VIRTUALIPADDRESS:
+ result = 1;
+ break;
+ default:
+ result = 0;
+ }
+
+ return (result);
+}
+
+
+
+
+static void
+sctp_init_ifns_for_vrf(int vrfid)
+{
+ /*
+ * Here we must apply ANY locks needed by the IFN we access and also
+ * make sure we lock any IFA that exists as we float through the
+ * list of IFA's
+ */
+ struct ifnet *ifn;
+ struct ifaddr *ifa;
+ struct in6_ifaddr *ifa6;
+ struct sctp_ifa *sctp_ifa;
+ uint32_t ifa_flags;
+
+ IFNET_RLOCK();
+ TAILQ_FOREACH(ifn, &MODULE_GLOBAL(ifnet), if_list) {
+ IF_ADDR_LOCK(ifn);
+ TAILQ_FOREACH(ifa, &ifn->if_addrlist, ifa_list) {
+ if (ifa->ifa_addr == NULL) {
+ continue;
+ }
+ if ((ifa->ifa_addr->sa_family != AF_INET) && (ifa->ifa_addr->sa_family != AF_INET6)) {
+ /* non inet/inet6 skip */
+ continue;
+ }
+ if (ifa->ifa_addr->sa_family == AF_INET6) {
+ if (IN6_IS_ADDR_UNSPECIFIED(&((struct sockaddr_in6 *)ifa->ifa_addr)->sin6_addr)) {
+ /* skip unspecifed addresses */
+ continue;
+ }
+ } else {
+ if (((struct sockaddr_in *)ifa->ifa_addr)->sin_addr.s_addr == 0) {
+ continue;
+ }
+ }
+ if (sctp_is_desired_interface_type(ifa) == 0) {
+ /* non desired type */
+ continue;
+ }
+ if (ifa->ifa_addr->sa_family == AF_INET6) {
+ ifa6 = (struct in6_ifaddr *)ifa;
+ ifa_flags = ifa6->ia6_flags;
+ } else {
+ ifa_flags = 0;
+ }
+ sctp_ifa = sctp_add_addr_to_vrf(vrfid,
+ (void *)ifn,
+ ifn->if_index,
+ ifn->if_type,
+ ifn->if_xname,
+ (void *)ifa,
+ ifa->ifa_addr,
+ ifa_flags,
+ 0);
+ if (sctp_ifa) {
+ sctp_ifa->localifa_flags &= ~SCTP_ADDR_DEFER_USE;
+ }
+ }
+ IF_ADDR_UNLOCK(ifn);
+ }
+ IFNET_RUNLOCK();
+}
+
+void
+sctp_init_vrf_list(int vrfid)
+{
+ if (vrfid > SCTP_MAX_VRF_ID)
+ /* can't do that */
+ return;
+
+ /* Don't care about return here */
+ (void)sctp_allocate_vrf(vrfid);
+
+ /*
+ * Now we need to build all the ifn's for this vrf and there
+ * addresses
+ */
+ sctp_init_ifns_for_vrf(vrfid);
+}
+
+void
+sctp_addr_change(struct ifaddr *ifa, int cmd)
+{
+ uint32_t ifa_flags = 0;
+
+ /*
+ * BSD only has one VRF, if this changes we will need to hook in the
+ * right things here to get the id to pass to the address managment
+ * routine.
+ */
+ if (SCTP_BASE_VAR(first_time) == 0) {
+ /* Special test to see if my ::1 will showup with this */
+ SCTP_BASE_VAR(first_time) = 1;
+ sctp_init_ifns_for_vrf(SCTP_DEFAULT_VRFID);
+ }
+ if ((cmd != RTM_ADD) && (cmd != RTM_DELETE)) {
+ /* don't know what to do with this */
+ return;
+ }
+ if (ifa->ifa_addr == NULL) {
+ return;
+ }
+ if ((ifa->ifa_addr->sa_family != AF_INET) && (ifa->ifa_addr->sa_family != AF_INET6)) {
+ /* non inet/inet6 skip */
+ return;
+ }
+ if (ifa->ifa_addr->sa_family == AF_INET6) {
+ ifa_flags = ((struct in6_ifaddr *)ifa)->ia6_flags;
+ if (IN6_IS_ADDR_UNSPECIFIED(&((struct sockaddr_in6 *)ifa->ifa_addr)->sin6_addr)) {
+ /* skip unspecifed addresses */
+ return;
+ }
+ } else {
+ if (((struct sockaddr_in *)ifa->ifa_addr)->sin_addr.s_addr == 0) {
+ return;
+ }
+ }
+
+ if (sctp_is_desired_interface_type(ifa) == 0) {
+ /* non desired type */
+ return;
+ }
+ if (cmd == RTM_ADD) {
+ (void)sctp_add_addr_to_vrf(SCTP_DEFAULT_VRFID, (void *)ifa->ifa_ifp,
+ ifa->ifa_ifp->if_index, ifa->ifa_ifp->if_type,
+ ifa->ifa_ifp->if_xname,
+ (void *)ifa, ifa->ifa_addr, ifa_flags, 1);
+ } else {
+
+ sctp_del_addr_from_vrf(SCTP_DEFAULT_VRFID, ifa->ifa_addr,
+ ifa->ifa_ifp->if_index,
+ ifa->ifa_ifp->if_xname
+ );
+ /*
+ * We don't bump refcount here so when it completes the
+ * final delete will happen.
+ */
+ }
+}
+
+void
+ sctp_add_or_del_interfaces(int (*pred) (struct ifnet *), int add){
+ struct ifnet *ifn;
+ struct ifaddr *ifa;
+
+ IFNET_RLOCK();
+ TAILQ_FOREACH(ifn, &MODULE_GLOBAL(ifnet), if_list) {
+ if (!(*pred) (ifn)) {
+ continue;
+ }
+ TAILQ_FOREACH(ifa, &ifn->if_addrlist, ifa_list) {
+ sctp_addr_change(ifa, add ? RTM_ADD : RTM_DELETE);
+ }
+ }
+ IFNET_RUNLOCK();
+}
+
+struct mbuf *
+sctp_get_mbuf_for_msg(unsigned int space_needed, int want_header,
+ int how, int allonebuf, int type)
+{
+ struct mbuf *m = NULL;
+
+ m = m_getm2(NULL, space_needed, how, type, want_header ? M_PKTHDR : 0);
+ if (m == NULL) {
+ /* bad, no memory */
+ return (m);
+ }
+ if (allonebuf) {
+ int siz;
+
+ if (SCTP_BUF_IS_EXTENDED(m)) {
+ siz = SCTP_BUF_EXTEND_SIZE(m);
+ } else {
+ if (want_header)
+ siz = MHLEN;
+ else
+ siz = MLEN;
+ }
+ if (siz < space_needed) {
+ m_freem(m);
+ return (NULL);
+ }
+ }
+ if (SCTP_BUF_NEXT(m)) {
+ sctp_m_freem(SCTP_BUF_NEXT(m));
+ SCTP_BUF_NEXT(m) = NULL;
+ }
+#ifdef SCTP_MBUF_LOGGING
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
+ if (SCTP_BUF_IS_EXTENDED(m)) {
+ sctp_log_mb(m, SCTP_MBUF_IALLOC);
+ }
+ }
+#endif
+ return (m);
+}
+
+
+#ifdef SCTP_PACKET_LOGGING
+void
+sctp_packet_log(struct mbuf *m, int length)
+{
+ int *lenat, thisone;
+ void *copyto;
+ uint32_t *tick_tock;
+ int total_len;
+ int grabbed_lock = 0;
+ int value, newval, thisend, thisbegin;
+
+ /*
+ * Buffer layout. -sizeof this entry (total_len) -previous end
+ * (value) -ticks of log (ticks) o -ip packet o -as logged -
+ * where this started (thisbegin) x <--end points here
+ */
+ total_len = SCTP_SIZE32((length + (4 * sizeof(int))));
+ /* Log a packet to the buffer. */
+ if (total_len > SCTP_PACKET_LOG_SIZE) {
+ /* Can't log this packet I have not a buffer big enough */
+ return;
+ }
+ if (length < (int)(SCTP_MIN_V4_OVERHEAD + sizeof(struct sctp_cookie_ack_chunk))) {
+ return;
+ }
+ atomic_add_int(&SCTP_BASE_VAR(packet_log_writers), 1);
+try_again:
+ if (SCTP_BASE_VAR(packet_log_writers) > SCTP_PKTLOG_WRITERS_NEED_LOCK) {
+ SCTP_IP_PKTLOG_LOCK();
+ grabbed_lock = 1;
+again_locked:
+ value = SCTP_BASE_VAR(packet_log_end);
+ newval = SCTP_BASE_VAR(packet_log_end) + total_len;
+ if (newval >= SCTP_PACKET_LOG_SIZE) {
+ /* we wrapped */
+ thisbegin = 0;
+ thisend = total_len;
+ } else {
+ thisbegin = SCTP_BASE_VAR(packet_log_end);
+ thisend = newval;
+ }
+ if (!(atomic_cmpset_int(&SCTP_BASE_VAR(packet_log_end), value, thisend))) {
+ goto again_locked;
+ }
+ } else {
+ value = SCTP_BASE_VAR(packet_log_end);
+ newval = SCTP_BASE_VAR(packet_log_end) + total_len;
+ if (newval >= SCTP_PACKET_LOG_SIZE) {
+ /* we wrapped */
+ thisbegin = 0;
+ thisend = total_len;
+ } else {
+ thisbegin = SCTP_BASE_VAR(packet_log_end);
+ thisend = newval;
+ }
+ if (!(atomic_cmpset_int(&SCTP_BASE_VAR(packet_log_end), value, thisend))) {
+ goto try_again;
+ }
+ }
+ /* Sanity check */
+ if (thisend >= SCTP_PACKET_LOG_SIZE) {
+ printf("Insanity stops a log thisbegin:%d thisend:%d writers:%d lock:%d end:%d\n",
+ thisbegin,
+ thisend,
+ SCTP_BASE_VAR(packet_log_writers),
+ grabbed_lock,
+ SCTP_BASE_VAR(packet_log_end));
+ SCTP_BASE_VAR(packet_log_end) = 0;
+ goto no_log;
+
+ }
+ lenat = (int *)&SCTP_BASE_VAR(packet_log_buffer)[thisbegin];
+ *lenat = total_len;
+ lenat++;
+ *lenat = value;
+ lenat++;
+ tick_tock = (uint32_t *) lenat;
+ lenat++;
+ *tick_tock = sctp_get_tick_count();
+ copyto = (void *)lenat;
+ thisone = thisend - sizeof(int);
+ lenat = (int *)&SCTP_BASE_VAR(packet_log_buffer)[thisone];
+ *lenat = thisbegin;
+ if (grabbed_lock) {
+ SCTP_IP_PKTLOG_UNLOCK();
+ grabbed_lock = 0;
+ }
+ m_copydata(m, 0, length, (caddr_t)copyto);
+no_log:
+ if (grabbed_lock) {
+ SCTP_IP_PKTLOG_UNLOCK();
+ }
+ atomic_subtract_int(&SCTP_BASE_VAR(packet_log_writers), 1);
+}
+
+
+int
+sctp_copy_out_packet_log(uint8_t * target, int length)
+{
+ /*
+ * We wind through the packet log starting at start copying up to
+ * length bytes out. We return the number of bytes copied.
+ */
+ int tocopy, this_copy;
+ int *lenat;
+ int did_delay = 0;
+
+ tocopy = length;
+ if (length < (int)(2 * sizeof(int))) {
+ /* not enough room */
+ return (0);
+ }
+ if (SCTP_PKTLOG_WRITERS_NEED_LOCK) {
+ atomic_add_int(&SCTP_BASE_VAR(packet_log_writers), SCTP_PKTLOG_WRITERS_NEED_LOCK);
+again:
+ if ((did_delay == 0) && (SCTP_BASE_VAR(packet_log_writers) != SCTP_PKTLOG_WRITERS_NEED_LOCK)) {
+ /*
+ * we delay here for just a moment hoping the
+ * writer(s) that were present when we entered will
+ * have left and we only have locking ones that will
+ * contend with us for the lock. This does not
+ * assure 100% access, but its good enough for a
+ * logging facility like this.
+ */
+ did_delay = 1;
+ DELAY(10);
+ goto again;
+ }
+ }
+ SCTP_IP_PKTLOG_LOCK();
+ lenat = (int *)target;
+ *lenat = SCTP_BASE_VAR(packet_log_end);
+ lenat++;
+ this_copy = min((length - sizeof(int)), SCTP_PACKET_LOG_SIZE);
+ memcpy((void *)lenat, (void *)SCTP_BASE_VAR(packet_log_buffer), this_copy);
+ if (SCTP_PKTLOG_WRITERS_NEED_LOCK) {
+ atomic_subtract_int(&SCTP_BASE_VAR(packet_log_writers),
+ SCTP_PKTLOG_WRITERS_NEED_LOCK);
+ }
+ SCTP_IP_PKTLOG_UNLOCK();
+ return (this_copy + sizeof(int));
+}
+
+#endif
diff --git a/rtems/freebsd/netinet/sctp_bsd_addr.h b/rtems/freebsd/netinet/sctp_bsd_addr.h
new file mode 100644
index 00000000..f2f3d03d
--- /dev/null
+++ b/rtems/freebsd/netinet/sctp_bsd_addr.h
@@ -0,0 +1,63 @@
+/*-
+ * Copyright (c) 2001-2007, by Cisco Systems, Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * a) Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * b) Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the distribution.
+ *
+ * c) Neither the name of Cisco Systems, Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <rtems/freebsd/sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#ifndef __sctp_bsd_addr_h__
+#define __sctp_bsd_addr_h__
+#include <rtems/freebsd/netinet/sctp_pcb.h>
+
+#if defined(_KERNEL) || defined(__Userspace__)
+
+extern struct iterator_control sctp_it_ctl;
+void sctp_wakeup_iterator(void);
+
+void sctp_startup_iterator(void);
+
+
+#ifdef INET6
+void sctp_gather_internal_ifa_flags(struct sctp_ifa *ifa);
+
+#endif
+
+#ifdef SCTP_PACKET_LOGGING
+
+void sctp_packet_log(struct mbuf *m, int length);
+int sctp_copy_out_packet_log(uint8_t * target, int length);
+
+#endif
+
+void sctp_addr_change(struct ifaddr *ifa, int cmd);
+
+void sctp_add_or_del_interfaces(int (*pred) (struct ifnet *), int add);
+
+#endif
+#endif
diff --git a/rtems/freebsd/netinet/sctp_cc_functions.c b/rtems/freebsd/netinet/sctp_cc_functions.c
new file mode 100644
index 00000000..a3345a9d
--- /dev/null
+++ b/rtems/freebsd/netinet/sctp_cc_functions.c
@@ -0,0 +1,1565 @@
+#include <rtems/freebsd/machine/rtems-bsd-config.h>
+
+/*-
+ * Copyright (c) 2001-2007, by Cisco Systems, Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * a) Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * b) Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the distribution.
+ *
+ * c) Neither the name of Cisco Systems, Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <rtems/freebsd/netinet/sctp_os.h>
+#include <rtems/freebsd/netinet/sctp_var.h>
+#include <rtems/freebsd/netinet/sctp_sysctl.h>
+#include <rtems/freebsd/netinet/sctp_pcb.h>
+#include <rtems/freebsd/netinet/sctp_header.h>
+#include <rtems/freebsd/netinet/sctputil.h>
+#include <rtems/freebsd/netinet/sctp_output.h>
+#include <rtems/freebsd/netinet/sctp_input.h>
+#include <rtems/freebsd/netinet/sctp_indata.h>
+#include <rtems/freebsd/netinet/sctp_uio.h>
+#include <rtems/freebsd/netinet/sctp_timer.h>
+#include <rtems/freebsd/netinet/sctp_auth.h>
+#include <rtems/freebsd/netinet/sctp_asconf.h>
+#include <rtems/freebsd/netinet/sctp_cc_functions.h>
+#include <rtems/freebsd/sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+void
+sctp_set_initial_cc_param(struct sctp_tcb *stcb, struct sctp_nets *net)
+{
+ struct sctp_association *assoc;
+ uint32_t cwnd_in_mtu;
+
+ assoc = &stcb->asoc;
+ /*
+ * We take the minimum of the burst limit and the initial congestion
+ * window. The initial congestion window is at least two times the
+ * MTU.
+ */
+ cwnd_in_mtu = SCTP_BASE_SYSCTL(sctp_initial_cwnd);
+ if ((assoc->max_burst > 0) && (cwnd_in_mtu > assoc->max_burst))
+ cwnd_in_mtu = assoc->max_burst;
+ net->cwnd = (net->mtu - sizeof(struct sctphdr)) * cwnd_in_mtu;
+ net->ssthresh = assoc->peers_rwnd;
+
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) &
+ (SCTP_CWND_MONITOR_ENABLE | SCTP_CWND_LOGGING_ENABLE)) {
+ sctp_log_cwnd(stcb, net, 0, SCTP_CWND_INITIALIZATION);
+ }
+}
+
+void
+sctp_cwnd_update_after_fr(struct sctp_tcb *stcb,
+ struct sctp_association *asoc)
+{
+ struct sctp_nets *net;
+
+ /*-
+ * CMT fast recovery code. Need to debug. ((sctp_cmt_on_off == 1) &&
+ * (net->fast_retran_loss_recovery == 0)))
+ */
+ TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
+ if ((asoc->fast_retran_loss_recovery == 0) ||
+ (asoc->sctp_cmt_on_off == 1)) {
+ /* out of a RFC2582 Fast recovery window? */
+ if (net->net_ack > 0) {
+ /*
+ * per section 7.2.3, are there any
+ * destinations that had a fast retransmit
+ * to them. If so what we need to do is
+ * adjust ssthresh and cwnd.
+ */
+ struct sctp_tmit_chunk *lchk;
+ int old_cwnd = net->cwnd;
+
+ net->ssthresh = net->cwnd / 2;
+ if (net->ssthresh < (net->mtu * 2)) {
+ net->ssthresh = 2 * net->mtu;
+ }
+ net->cwnd = net->ssthresh;
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_MONITOR_ENABLE) {
+ sctp_log_cwnd(stcb, net, (net->cwnd - old_cwnd),
+ SCTP_CWND_LOG_FROM_FR);
+ }
+ lchk = TAILQ_FIRST(&asoc->send_queue);
+
+ net->partial_bytes_acked = 0;
+ /* Turn on fast recovery window */
+ asoc->fast_retran_loss_recovery = 1;
+ if (lchk == NULL) {
+ /* Mark end of the window */
+ asoc->fast_recovery_tsn = asoc->sending_seq - 1;
+ } else {
+ asoc->fast_recovery_tsn = lchk->rec.data.TSN_seq - 1;
+ }
+
+ /*
+ * CMT fast recovery -- per destination
+ * recovery variable.
+ */
+ net->fast_retran_loss_recovery = 1;
+
+ if (lchk == NULL) {
+ /* Mark end of the window */
+ net->fast_recovery_tsn = asoc->sending_seq - 1;
+ } else {
+ net->fast_recovery_tsn = lchk->rec.data.TSN_seq - 1;
+ }
+
+ /*
+ * Disable Nonce Sum Checking and store the
+ * resync tsn
+ */
+ asoc->nonce_sum_check = 0;
+ asoc->nonce_resync_tsn = asoc->fast_recovery_tsn + 1;
+
+ sctp_timer_stop(SCTP_TIMER_TYPE_SEND,
+ stcb->sctp_ep, stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_32);
+ sctp_timer_start(SCTP_TIMER_TYPE_SEND,
+ stcb->sctp_ep, stcb, net);
+ }
+ } else if (net->net_ack > 0) {
+ /*
+ * Mark a peg that we WOULD have done a cwnd
+ * reduction but RFC2582 prevented this action.
+ */
+ SCTP_STAT_INCR(sctps_fastretransinrtt);
+ }
+ }
+}
+
+void
+sctp_cwnd_update_after_sack(struct sctp_tcb *stcb,
+ struct sctp_association *asoc,
+ int accum_moved, int reneged_all, int will_exit)
+{
+ struct sctp_nets *net;
+
+ /******************************/
+ /* update cwnd and Early FR */
+ /******************************/
+ TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
+
+#ifdef JANA_CMT_FAST_RECOVERY
+ /*
+ * CMT fast recovery code. Need to debug.
+ */
+ if (net->fast_retran_loss_recovery && net->new_pseudo_cumack) {
+ if (compare_with_wrap(asoc->last_acked_seq,
+ net->fast_recovery_tsn, MAX_TSN) ||
+ (asoc->last_acked_seq == net->fast_recovery_tsn) ||
+ compare_with_wrap(net->pseudo_cumack, net->fast_recovery_tsn, MAX_TSN) ||
+ (net->pseudo_cumack == net->fast_recovery_tsn)) {
+ net->will_exit_fast_recovery = 1;
+ }
+ }
+#endif
+ if (SCTP_BASE_SYSCTL(sctp_early_fr)) {
+ /*
+ * So, first of all do we need to have a Early FR
+ * timer running?
+ */
+ if ((!TAILQ_EMPTY(&asoc->sent_queue) &&
+ (net->ref_count > 1) &&
+ (net->flight_size < net->cwnd)) ||
+ (reneged_all)) {
+ /*
+ * yes, so in this case stop it if its
+ * running, and then restart it. Reneging
+ * all is a special case where we want to
+ * run the Early FR timer and then force the
+ * last few unacked to be sent, causing us
+ * to illicit a sack with gaps to force out
+ * the others.
+ */
+ if (SCTP_OS_TIMER_PENDING(&net->fr_timer.timer)) {
+ SCTP_STAT_INCR(sctps_earlyfrstpidsck2);
+ sctp_timer_stop(SCTP_TIMER_TYPE_EARLYFR, stcb->sctp_ep, stcb, net,
+ SCTP_FROM_SCTP_INDATA + SCTP_LOC_20);
+ }
+ SCTP_STAT_INCR(sctps_earlyfrstrid);
+ sctp_timer_start(SCTP_TIMER_TYPE_EARLYFR, stcb->sctp_ep, stcb, net);
+ } else {
+ /* No, stop it if its running */
+ if (SCTP_OS_TIMER_PENDING(&net->fr_timer.timer)) {
+ SCTP_STAT_INCR(sctps_earlyfrstpidsck3);
+ sctp_timer_stop(SCTP_TIMER_TYPE_EARLYFR, stcb->sctp_ep, stcb, net,
+ SCTP_FROM_SCTP_INDATA + SCTP_LOC_21);
+ }
+ }
+ }
+ /* if nothing was acked on this destination skip it */
+ if (net->net_ack == 0) {
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
+ sctp_log_cwnd(stcb, net, 0, SCTP_CWND_LOG_FROM_SACK);
+ }
+ continue;
+ }
+ if (net->net_ack2 > 0) {
+ /*
+ * Karn's rule applies to clearing error count, this
+ * is optional.
+ */
+ net->error_count = 0;
+ if ((net->dest_state & SCTP_ADDR_NOT_REACHABLE) ==
+ SCTP_ADDR_NOT_REACHABLE) {
+ /* addr came good */
+ net->dest_state &= ~SCTP_ADDR_NOT_REACHABLE;
+ net->dest_state |= SCTP_ADDR_REACHABLE;
+ sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_UP, stcb,
+ SCTP_RECEIVED_SACK, (void *)net, SCTP_SO_NOT_LOCKED);
+ /* now was it the primary? if so restore */
+ if (net->dest_state & SCTP_ADDR_WAS_PRIMARY) {
+ (void)sctp_set_primary_addr(stcb, (struct sockaddr *)NULL, net);
+ }
+ }
+ /*
+ * JRS 5/14/07 - If CMT PF is on and the destination
+ * is in PF state, set the destination to active
+ * state and set the cwnd to one or two MTU's based
+ * on whether PF1 or PF2 is being used.
+ *
+ * Should we stop any running T3 timer here?
+ */
+ if ((asoc->sctp_cmt_on_off == 1) &&
+ (asoc->sctp_cmt_pf > 0) &&
+ ((net->dest_state & SCTP_ADDR_PF) == SCTP_ADDR_PF)) {
+ net->dest_state &= ~SCTP_ADDR_PF;
+ net->cwnd = net->mtu * asoc->sctp_cmt_pf;
+ SCTPDBG(SCTP_DEBUG_INDATA1, "Destination %p moved from PF to reachable with cwnd %d.\n",
+ net, net->cwnd);
+ /*
+ * Since the cwnd value is explicitly set,
+ * skip the code that updates the cwnd
+ * value.
+ */
+ goto skip_cwnd_update;
+ }
+ }
+#ifdef JANA_CMT_FAST_RECOVERY
+ /*
+ * CMT fast recovery code
+ */
+ /*
+ * if (sctp_cmt_on_off == 1 &&
+ * net->fast_retran_loss_recovery &&
+ * net->will_exit_fast_recovery == 0) { @@@ Do something }
+ * else if (sctp_cmt_on_off == 0 &&
+ * asoc->fast_retran_loss_recovery && will_exit == 0) {
+ */
+#endif
+
+ if (asoc->fast_retran_loss_recovery &&
+ (will_exit == 0) &&
+ (asoc->sctp_cmt_on_off == 0)) {
+ /*
+ * If we are in loss recovery we skip any cwnd
+ * update
+ */
+ goto skip_cwnd_update;
+ }
+ /*
+ * CMT: CUC algorithm. Update cwnd if pseudo-cumack has
+ * moved.
+ */
+ if (accum_moved ||
+ ((asoc->sctp_cmt_on_off == 1) && net->new_pseudo_cumack)) {
+ /* If the cumulative ack moved we can proceed */
+ if (net->cwnd <= net->ssthresh) {
+ /* We are in slow start */
+ if (net->flight_size + net->net_ack >= net->cwnd) {
+ if (net->net_ack > (net->mtu * SCTP_BASE_SYSCTL(sctp_L2_abc_variable))) {
+ net->cwnd += (net->mtu * SCTP_BASE_SYSCTL(sctp_L2_abc_variable));
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_MONITOR_ENABLE) {
+ sctp_log_cwnd(stcb, net, net->mtu,
+ SCTP_CWND_LOG_FROM_SS);
+ }
+ } else {
+ net->cwnd += net->net_ack;
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_MONITOR_ENABLE) {
+ sctp_log_cwnd(stcb, net, net->net_ack,
+ SCTP_CWND_LOG_FROM_SS);
+ }
+ }
+ } else {
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
+ sctp_log_cwnd(stcb, net, net->net_ack,
+ SCTP_CWND_LOG_NOADV_SS);
+ }
+ }
+ } else {
+ /* We are in congestion avoidance */
+ /*
+ * Add to pba
+ */
+ net->partial_bytes_acked += net->net_ack;
+
+ if ((net->flight_size + net->net_ack >= net->cwnd) &&
+ (net->partial_bytes_acked >= net->cwnd)) {
+ net->partial_bytes_acked -= net->cwnd;
+ net->cwnd += net->mtu;
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_MONITOR_ENABLE) {
+ sctp_log_cwnd(stcb, net, net->mtu,
+ SCTP_CWND_LOG_FROM_CA);
+ }
+ } else {
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
+ sctp_log_cwnd(stcb, net, net->net_ack,
+ SCTP_CWND_LOG_NOADV_CA);
+ }
+ }
+ }
+ } else {
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
+ sctp_log_cwnd(stcb, net, net->mtu,
+ SCTP_CWND_LOG_NO_CUMACK);
+ }
+ }
+skip_cwnd_update:
+ /*
+ * NOW, according to Karn's rule do we need to restore the
+ * RTO timer back? Check our net_ack2. If not set then we
+ * have a ambiguity.. i.e. all data ack'd was sent to more
+ * than one place.
+ */
+ if (net->net_ack2) {
+ /* restore any doubled timers */
+ net->RTO = ((net->lastsa >> 2) + net->lastsv) >> 1;
+ if (net->RTO < stcb->asoc.minrto) {
+ net->RTO = stcb->asoc.minrto;
+ }
+ if (net->RTO > stcb->asoc.maxrto) {
+ net->RTO = stcb->asoc.maxrto;
+ }
+ }
+ }
+}
+
+void
+sctp_cwnd_update_after_timeout(struct sctp_tcb *stcb, struct sctp_nets *net)
+{
+ int old_cwnd = net->cwnd;
+
+ net->ssthresh = max(net->cwnd / 2, 4 * net->mtu);
+ net->cwnd = net->mtu;
+ net->partial_bytes_acked = 0;
+
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_MONITOR_ENABLE) {
+ sctp_log_cwnd(stcb, net, net->cwnd - old_cwnd, SCTP_CWND_LOG_FROM_RTX);
+ }
+}
+
+void
+sctp_cwnd_update_after_ecn_echo(struct sctp_tcb *stcb, struct sctp_nets *net)
+{
+ int old_cwnd = net->cwnd;
+
+ SCTP_STAT_INCR(sctps_ecnereducedcwnd);
+ net->ssthresh = net->cwnd / 2;
+ if (net->ssthresh < net->mtu) {
+ net->ssthresh = net->mtu;
+ /* here back off the timer as well, to slow us down */
+ net->RTO <<= 1;
+ }
+ net->cwnd = net->ssthresh;
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_MONITOR_ENABLE) {
+ sctp_log_cwnd(stcb, net, (net->cwnd - old_cwnd), SCTP_CWND_LOG_FROM_SAT);
+ }
+}
+
+void
+sctp_cwnd_update_after_packet_dropped(struct sctp_tcb *stcb,
+ struct sctp_nets *net, struct sctp_pktdrop_chunk *cp,
+ uint32_t * bottle_bw, uint32_t * on_queue)
+{
+ uint32_t bw_avail;
+ int rtt, incr;
+ int old_cwnd = net->cwnd;
+
+ /* need real RTT for this calc */
+ rtt = ((net->lastsa >> 2) + net->lastsv) >> 1;
+ /* get bottle neck bw */
+ *bottle_bw = ntohl(cp->bottle_bw);
+ /* and whats on queue */
+ *on_queue = ntohl(cp->current_onq);
+ /*
+ * adjust the on-queue if our flight is more it could be that the
+ * router has not yet gotten data "in-flight" to it
+ */
+ if (*on_queue < net->flight_size)
+ *on_queue = net->flight_size;
+ /* calculate the available space */
+ bw_avail = (*bottle_bw * rtt) / 1000;
+ if (bw_avail > *bottle_bw) {
+ /*
+ * Cap the growth to no more than the bottle neck. This can
+ * happen as RTT slides up due to queues. It also means if
+ * you have more than a 1 second RTT with a empty queue you
+ * will be limited to the bottle_bw per second no matter if
+ * other points have 1/2 the RTT and you could get more
+ * out...
+ */
+ bw_avail = *bottle_bw;
+ }
+ if (*on_queue > bw_avail) {
+ /*
+ * No room for anything else don't allow anything else to be
+ * "added to the fire".
+ */
+ int seg_inflight, seg_onqueue, my_portion;
+
+ net->partial_bytes_acked = 0;
+
+ /* how much are we over queue size? */
+ incr = *on_queue - bw_avail;
+ if (stcb->asoc.seen_a_sack_this_pkt) {
+ /*
+ * undo any cwnd adjustment that the sack might have
+ * made
+ */
+ net->cwnd = net->prev_cwnd;
+ }
+ /* Now how much of that is mine? */
+ seg_inflight = net->flight_size / net->mtu;
+ seg_onqueue = *on_queue / net->mtu;
+ my_portion = (incr * seg_inflight) / seg_onqueue;
+
+ /* Have I made an adjustment already */
+ if (net->cwnd > net->flight_size) {
+ /*
+ * for this flight I made an adjustment we need to
+ * decrease the portion by a share our previous
+ * adjustment.
+ */
+ int diff_adj;
+
+ diff_adj = net->cwnd - net->flight_size;
+ if (diff_adj > my_portion)
+ my_portion = 0;
+ else
+ my_portion -= diff_adj;
+ }
+ /*
+ * back down to the previous cwnd (assume we have had a sack
+ * before this packet). minus what ever portion of the
+ * overage is my fault.
+ */
+ net->cwnd -= my_portion;
+
+ /* we will NOT back down more than 1 MTU */
+ if (net->cwnd <= net->mtu) {
+ net->cwnd = net->mtu;
+ }
+ /* force into CA */
+ net->ssthresh = net->cwnd - 1;
+ } else {
+ /*
+ * Take 1/4 of the space left or max burst up .. whichever
+ * is less.
+ */
+ incr = min((bw_avail - *on_queue) >> 2,
+ stcb->asoc.max_burst * net->mtu);
+ net->cwnd += incr;
+ }
+ if (net->cwnd > bw_avail) {
+ /* We can't exceed the pipe size */
+ net->cwnd = bw_avail;
+ }
+ if (net->cwnd < net->mtu) {
+ /* We always have 1 MTU */
+ net->cwnd = net->mtu;
+ }
+ if (net->cwnd - old_cwnd != 0) {
+ /* log only changes */
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_MONITOR_ENABLE) {
+ sctp_log_cwnd(stcb, net, (net->cwnd - old_cwnd),
+ SCTP_CWND_LOG_FROM_SAT);
+ }
+ }
+}
+
+void
+sctp_cwnd_update_after_output(struct sctp_tcb *stcb,
+ struct sctp_nets *net, int burst_limit)
+{
+ int old_cwnd = net->cwnd;
+
+ if (net->ssthresh < net->cwnd)
+ net->ssthresh = net->cwnd;
+ net->cwnd = (net->flight_size + (burst_limit * net->mtu));
+
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_MONITOR_ENABLE) {
+ sctp_log_cwnd(stcb, net, (net->cwnd - old_cwnd), SCTP_CWND_LOG_FROM_BRST);
+ }
+}
+
+void
+sctp_cwnd_update_after_fr_timer(struct sctp_inpcb *inp,
+ struct sctp_tcb *stcb, struct sctp_nets *net)
+{
+ int old_cwnd = net->cwnd;
+
+ sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_EARLY_FR_TMR, SCTP_SO_NOT_LOCKED);
+ /*
+ * make a small adjustment to cwnd and force to CA.
+ */
+ if (net->cwnd > net->mtu)
+ /* drop down one MTU after sending */
+ net->cwnd -= net->mtu;
+ if (net->cwnd < net->ssthresh)
+ /* still in SS move to CA */
+ net->ssthresh = net->cwnd - 1;
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_MONITOR_ENABLE) {
+ sctp_log_cwnd(stcb, net, (old_cwnd - net->cwnd), SCTP_CWND_LOG_FROM_FR);
+ }
+}
+
+struct sctp_hs_raise_drop {
+ int32_t cwnd;
+ int32_t increase;
+ int32_t drop_percent;
+};
+
+#define SCTP_HS_TABLE_SIZE 73
+
+struct sctp_hs_raise_drop sctp_cwnd_adjust[SCTP_HS_TABLE_SIZE] = {
+ {38, 1, 50}, /* 0 */
+ {118, 2, 44}, /* 1 */
+ {221, 3, 41}, /* 2 */
+ {347, 4, 38}, /* 3 */
+ {495, 5, 37}, /* 4 */
+ {663, 6, 35}, /* 5 */
+ {851, 7, 34}, /* 6 */
+ {1058, 8, 33}, /* 7 */
+ {1284, 9, 32}, /* 8 */
+ {1529, 10, 31}, /* 9 */
+ {1793, 11, 30}, /* 10 */
+ {2076, 12, 29}, /* 11 */
+ {2378, 13, 28}, /* 12 */
+ {2699, 14, 28}, /* 13 */
+ {3039, 15, 27}, /* 14 */
+ {3399, 16, 27}, /* 15 */
+ {3778, 17, 26}, /* 16 */
+ {4177, 18, 26}, /* 17 */
+ {4596, 19, 25}, /* 18 */
+ {5036, 20, 25}, /* 19 */
+ {5497, 21, 24}, /* 20 */
+ {5979, 22, 24}, /* 21 */
+ {6483, 23, 23}, /* 22 */
+ {7009, 24, 23}, /* 23 */
+ {7558, 25, 22}, /* 24 */
+ {8130, 26, 22}, /* 25 */
+ {8726, 27, 22}, /* 26 */
+ {9346, 28, 21}, /* 27 */
+ {9991, 29, 21}, /* 28 */
+ {10661, 30, 21}, /* 29 */
+ {11358, 31, 20}, /* 30 */
+ {12082, 32, 20}, /* 31 */
+ {12834, 33, 20}, /* 32 */
+ {13614, 34, 19}, /* 33 */
+ {14424, 35, 19}, /* 34 */
+ {15265, 36, 19}, /* 35 */
+ {16137, 37, 19}, /* 36 */
+ {17042, 38, 18}, /* 37 */
+ {17981, 39, 18}, /* 38 */
+ {18955, 40, 18}, /* 39 */
+ {19965, 41, 17}, /* 40 */
+ {21013, 42, 17}, /* 41 */
+ {22101, 43, 17}, /* 42 */
+ {23230, 44, 17}, /* 43 */
+ {24402, 45, 16}, /* 44 */
+ {25618, 46, 16}, /* 45 */
+ {26881, 47, 16}, /* 46 */
+ {28193, 48, 16}, /* 47 */
+ {29557, 49, 15}, /* 48 */
+ {30975, 50, 15}, /* 49 */
+ {32450, 51, 15}, /* 50 */
+ {33986, 52, 15}, /* 51 */
+ {35586, 53, 14}, /* 52 */
+ {37253, 54, 14}, /* 53 */
+ {38992, 55, 14}, /* 54 */
+ {40808, 56, 14}, /* 55 */
+ {42707, 57, 13}, /* 56 */
+ {44694, 58, 13}, /* 57 */
+ {46776, 59, 13}, /* 58 */
+ {48961, 60, 13}, /* 59 */
+ {51258, 61, 13}, /* 60 */
+ {53677, 62, 12}, /* 61 */
+ {56230, 63, 12}, /* 62 */
+ {58932, 64, 12}, /* 63 */
+ {61799, 65, 12}, /* 64 */
+ {64851, 66, 11}, /* 65 */
+ {68113, 67, 11}, /* 66 */
+ {71617, 68, 11}, /* 67 */
+ {75401, 69, 10}, /* 68 */
+ {79517, 70, 10}, /* 69 */
+ {84035, 71, 10}, /* 70 */
+ {89053, 72, 10}, /* 71 */
+ {94717, 73, 9} /* 72 */
+};
+
+static void
+sctp_hs_cwnd_increase(struct sctp_tcb *stcb, struct sctp_nets *net)
+{
+ int cur_val, i, indx, incr;
+
+ cur_val = net->cwnd >> 10;
+ indx = SCTP_HS_TABLE_SIZE - 1;
+#ifdef SCTP_DEBUG
+ printf("HS CC CAlled.\n");
+#endif
+ if (cur_val < sctp_cwnd_adjust[0].cwnd) {
+ /* normal mode */
+ if (net->net_ack > net->mtu) {
+ net->cwnd += net->mtu;
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_MONITOR_ENABLE) {
+ sctp_log_cwnd(stcb, net, net->mtu, SCTP_CWND_LOG_FROM_SS);
+ }
+ } else {
+ net->cwnd += net->net_ack;
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_MONITOR_ENABLE) {
+ sctp_log_cwnd(stcb, net, net->net_ack, SCTP_CWND_LOG_FROM_SS);
+ }
+ }
+ } else {
+ for (i = net->last_hs_used; i < SCTP_HS_TABLE_SIZE; i++) {
+ if (cur_val < sctp_cwnd_adjust[i].cwnd) {
+ indx = i;
+ break;
+ }
+ }
+ net->last_hs_used = indx;
+ incr = ((sctp_cwnd_adjust[indx].increase) << 10);
+ net->cwnd += incr;
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_MONITOR_ENABLE) {
+ sctp_log_cwnd(stcb, net, incr, SCTP_CWND_LOG_FROM_SS);
+ }
+ }
+}
+
+static void
+sctp_hs_cwnd_decrease(struct sctp_tcb *stcb, struct sctp_nets *net)
+{
+ int cur_val, i, indx;
+ int old_cwnd = net->cwnd;
+
+ cur_val = net->cwnd >> 10;
+ if (cur_val < sctp_cwnd_adjust[0].cwnd) {
+ /* normal mode */
+ net->ssthresh = net->cwnd / 2;
+ if (net->ssthresh < (net->mtu * 2)) {
+ net->ssthresh = 2 * net->mtu;
+ }
+ net->cwnd = net->ssthresh;
+ } else {
+ /* drop by the proper amount */
+ net->ssthresh = net->cwnd - (int)((net->cwnd / 100) *
+ sctp_cwnd_adjust[net->last_hs_used].drop_percent);
+ net->cwnd = net->ssthresh;
+ /* now where are we */
+ indx = net->last_hs_used;
+ cur_val = net->cwnd >> 10;
+ /* reset where we are in the table */
+ if (cur_val < sctp_cwnd_adjust[0].cwnd) {
+ /* feel out of hs */
+ net->last_hs_used = 0;
+ } else {
+ for (i = indx; i >= 1; i--) {
+ if (cur_val > sctp_cwnd_adjust[i - 1].cwnd) {
+ break;
+ }
+ }
+ net->last_hs_used = indx;
+ }
+ }
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_MONITOR_ENABLE) {
+ sctp_log_cwnd(stcb, net, (net->cwnd - old_cwnd), SCTP_CWND_LOG_FROM_FR);
+ }
+}
+
+void
+sctp_hs_cwnd_update_after_fr(struct sctp_tcb *stcb,
+ struct sctp_association *asoc)
+{
+ struct sctp_nets *net;
+
+ /*
+ * CMT fast recovery code. Need to debug. ((sctp_cmt_on_off == 1) &&
+ * (net->fast_retran_loss_recovery == 0)))
+ */
+ TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
+ if ((asoc->fast_retran_loss_recovery == 0) ||
+ (asoc->sctp_cmt_on_off == 1)) {
+ /* out of a RFC2582 Fast recovery window? */
+ if (net->net_ack > 0) {
+ /*
+ * per section 7.2.3, are there any
+ * destinations that had a fast retransmit
+ * to them. If so what we need to do is
+ * adjust ssthresh and cwnd.
+ */
+ struct sctp_tmit_chunk *lchk;
+
+ sctp_hs_cwnd_decrease(stcb, net);
+
+ lchk = TAILQ_FIRST(&asoc->send_queue);
+
+ net->partial_bytes_acked = 0;
+ /* Turn on fast recovery window */
+ asoc->fast_retran_loss_recovery = 1;
+ if (lchk == NULL) {
+ /* Mark end of the window */
+ asoc->fast_recovery_tsn = asoc->sending_seq - 1;
+ } else {
+ asoc->fast_recovery_tsn = lchk->rec.data.TSN_seq - 1;
+ }
+
+ /*
+ * CMT fast recovery -- per destination
+ * recovery variable.
+ */
+ net->fast_retran_loss_recovery = 1;
+
+ if (lchk == NULL) {
+ /* Mark end of the window */
+ net->fast_recovery_tsn = asoc->sending_seq - 1;
+ } else {
+ net->fast_recovery_tsn = lchk->rec.data.TSN_seq - 1;
+ }
+
+ /*
+ * Disable Nonce Sum Checking and store the
+ * resync tsn
+ */
+ asoc->nonce_sum_check = 0;
+ asoc->nonce_resync_tsn = asoc->fast_recovery_tsn + 1;
+
+ sctp_timer_stop(SCTP_TIMER_TYPE_SEND,
+ stcb->sctp_ep, stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_32);
+ sctp_timer_start(SCTP_TIMER_TYPE_SEND,
+ stcb->sctp_ep, stcb, net);
+ }
+ } else if (net->net_ack > 0) {
+ /*
+ * Mark a peg that we WOULD have done a cwnd
+ * reduction but RFC2582 prevented this action.
+ */
+ SCTP_STAT_INCR(sctps_fastretransinrtt);
+ }
+ }
+}
+
+void
+sctp_hs_cwnd_update_after_sack(struct sctp_tcb *stcb,
+ struct sctp_association *asoc,
+ int accum_moved, int reneged_all, int will_exit)
+{
+ struct sctp_nets *net;
+
+ /******************************/
+ /* update cwnd and Early FR */
+ /******************************/
+ TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
+
+#ifdef JANA_CMT_FAST_RECOVERY
+ /*
+ * CMT fast recovery code. Need to debug.
+ */
+ if (net->fast_retran_loss_recovery && net->new_pseudo_cumack) {
+ if (compare_with_wrap(asoc->last_acked_seq,
+ net->fast_recovery_tsn, MAX_TSN) ||
+ (asoc->last_acked_seq == net->fast_recovery_tsn) ||
+ compare_with_wrap(net->pseudo_cumack, net->fast_recovery_tsn, MAX_TSN) ||
+ (net->pseudo_cumack == net->fast_recovery_tsn)) {
+ net->will_exit_fast_recovery = 1;
+ }
+ }
+#endif
+ if (SCTP_BASE_SYSCTL(sctp_early_fr)) {
+ /*
+ * So, first of all do we need to have a Early FR
+ * timer running?
+ */
+ if ((!TAILQ_EMPTY(&asoc->sent_queue) &&
+ (net->ref_count > 1) &&
+ (net->flight_size < net->cwnd)) ||
+ (reneged_all)) {
+ /*
+ * yes, so in this case stop it if its
+ * running, and then restart it. Reneging
+ * all is a special case where we want to
+ * run the Early FR timer and then force the
+ * last few unacked to be sent, causing us
+ * to illicit a sack with gaps to force out
+ * the others.
+ */
+ if (SCTP_OS_TIMER_PENDING(&net->fr_timer.timer)) {
+ SCTP_STAT_INCR(sctps_earlyfrstpidsck2);
+ sctp_timer_stop(SCTP_TIMER_TYPE_EARLYFR, stcb->sctp_ep, stcb, net,
+ SCTP_FROM_SCTP_INDATA + SCTP_LOC_20);
+ }
+ SCTP_STAT_INCR(sctps_earlyfrstrid);
+ sctp_timer_start(SCTP_TIMER_TYPE_EARLYFR, stcb->sctp_ep, stcb, net);
+ } else {
+ /* No, stop it if its running */
+ if (SCTP_OS_TIMER_PENDING(&net->fr_timer.timer)) {
+ SCTP_STAT_INCR(sctps_earlyfrstpidsck3);
+ sctp_timer_stop(SCTP_TIMER_TYPE_EARLYFR, stcb->sctp_ep, stcb, net,
+ SCTP_FROM_SCTP_INDATA + SCTP_LOC_21);
+ }
+ }
+ }
+ /* if nothing was acked on this destination skip it */
+ if (net->net_ack == 0) {
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
+ sctp_log_cwnd(stcb, net, 0, SCTP_CWND_LOG_FROM_SACK);
+ }
+ continue;
+ }
+ if (net->net_ack2 > 0) {
+ /*
+ * Karn's rule applies to clearing error count, this
+ * is optional.
+ */
+ net->error_count = 0;
+ if ((net->dest_state & SCTP_ADDR_NOT_REACHABLE) ==
+ SCTP_ADDR_NOT_REACHABLE) {
+ /* addr came good */
+ net->dest_state &= ~SCTP_ADDR_NOT_REACHABLE;
+ net->dest_state |= SCTP_ADDR_REACHABLE;
+ sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_UP, stcb,
+ SCTP_RECEIVED_SACK, (void *)net, SCTP_SO_NOT_LOCKED);
+ /* now was it the primary? if so restore */
+ if (net->dest_state & SCTP_ADDR_WAS_PRIMARY) {
+ (void)sctp_set_primary_addr(stcb, (struct sockaddr *)NULL, net);
+ }
+ }
+ /*
+ * JRS 5/14/07 - If CMT PF is on and the destination
+ * is in PF state, set the destination to active
+ * state and set the cwnd to one or two MTU's based
+ * on whether PF1 or PF2 is being used.
+ *
+ * Should we stop any running T3 timer here?
+ */
+ if ((asoc->sctp_cmt_on_off == 1) &&
+ (asoc->sctp_cmt_pf > 0) &&
+ ((net->dest_state & SCTP_ADDR_PF) == SCTP_ADDR_PF)) {
+ net->dest_state &= ~SCTP_ADDR_PF;
+ net->cwnd = net->mtu * asoc->sctp_cmt_pf;
+ SCTPDBG(SCTP_DEBUG_INDATA1, "Destination %p moved from PF to reachable with cwnd %d.\n",
+ net, net->cwnd);
+ /*
+ * Since the cwnd value is explicitly set,
+ * skip the code that updates the cwnd
+ * value.
+ */
+ goto skip_cwnd_update;
+ }
+ }
+#ifdef JANA_CMT_FAST_RECOVERY
+ /*
+ * CMT fast recovery code
+ */
+ /*
+ * if (sctp_cmt_on_off == 1 &&
+ * net->fast_retran_loss_recovery &&
+ * net->will_exit_fast_recovery == 0) { @@@ Do something }
+ * else if (sctp_cmt_on_off == 0 &&
+ * asoc->fast_retran_loss_recovery && will_exit == 0) {
+ */
+#endif
+
+ if (asoc->fast_retran_loss_recovery &&
+ (will_exit == 0) &&
+ (asoc->sctp_cmt_on_off == 0)) {
+ /*
+ * If we are in loss recovery we skip any cwnd
+ * update
+ */
+ goto skip_cwnd_update;
+ }
+ /*
+ * CMT: CUC algorithm. Update cwnd if pseudo-cumack has
+ * moved.
+ */
+ if (accum_moved ||
+ ((asoc->sctp_cmt_on_off == 1) && net->new_pseudo_cumack)) {
+ /* If the cumulative ack moved we can proceed */
+ if (net->cwnd <= net->ssthresh) {
+ /* We are in slow start */
+ if (net->flight_size + net->net_ack >= net->cwnd) {
+
+ sctp_hs_cwnd_increase(stcb, net);
+
+ } else {
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
+ sctp_log_cwnd(stcb, net, net->net_ack,
+ SCTP_CWND_LOG_NOADV_SS);
+ }
+ }
+ } else {
+ /* We are in congestion avoidance */
+ net->partial_bytes_acked += net->net_ack;
+ if ((net->flight_size + net->net_ack >= net->cwnd) &&
+ (net->partial_bytes_acked >= net->cwnd)) {
+ net->partial_bytes_acked -= net->cwnd;
+ net->cwnd += net->mtu;
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_MONITOR_ENABLE) {
+ sctp_log_cwnd(stcb, net, net->mtu,
+ SCTP_CWND_LOG_FROM_CA);
+ }
+ } else {
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
+ sctp_log_cwnd(stcb, net, net->net_ack,
+ SCTP_CWND_LOG_NOADV_CA);
+ }
+ }
+ }
+ } else {
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
+ sctp_log_cwnd(stcb, net, net->mtu,
+ SCTP_CWND_LOG_NO_CUMACK);
+ }
+ }
+skip_cwnd_update:
+ /*
+ * NOW, according to Karn's rule do we need to restore the
+ * RTO timer back? Check our net_ack2. If not set then we
+ * have a ambiguity.. i.e. all data ack'd was sent to more
+ * than one place.
+ */
+ if (net->net_ack2) {
+ /* restore any doubled timers */
+ net->RTO = ((net->lastsa >> 2) + net->lastsv) >> 1;
+ if (net->RTO < stcb->asoc.minrto) {
+ net->RTO = stcb->asoc.minrto;
+ }
+ if (net->RTO > stcb->asoc.maxrto) {
+ net->RTO = stcb->asoc.maxrto;
+ }
+ }
+ }
+}
+
+
+/*
+ * H-TCP congestion control. The algorithm is detailed in:
+ * R.N.Shorten, D.J.Leith:
+ * "H-TCP: TCP for high-speed and long-distance networks"
+ * Proc. PFLDnet, Argonne, 2004.
+ * http://www.hamilton.ie/net/htcp3.pdf
+ */
+
+
+static int use_rtt_scaling = 1;
+static int use_bandwidth_switch = 1;
+
+static inline int
+between(uint32_t seq1, uint32_t seq2, uint32_t seq3)
+{
+ return seq3 - seq2 >= seq1 - seq2;
+}
+
+static inline uint32_t
+htcp_cong_time(struct htcp *ca)
+{
+ return sctp_get_tick_count() - ca->last_cong;
+}
+
+static inline uint32_t
+htcp_ccount(struct htcp *ca)
+{
+ return htcp_cong_time(ca) / ca->minRTT;
+}
+
+static inline void
+htcp_reset(struct htcp *ca)
+{
+ ca->undo_last_cong = ca->last_cong;
+ ca->undo_maxRTT = ca->maxRTT;
+ ca->undo_old_maxB = ca->old_maxB;
+ ca->last_cong = sctp_get_tick_count();
+}
+
+#ifdef SCTP_NOT_USED
+
+static uint32_t
+htcp_cwnd_undo(struct sctp_tcb *stcb, struct sctp_nets *net)
+{
+ net->htcp_ca.last_cong = net->htcp_ca.undo_last_cong;
+ net->htcp_ca.maxRTT = net->htcp_ca.undo_maxRTT;
+ net->htcp_ca.old_maxB = net->htcp_ca.undo_old_maxB;
+ return max(net->cwnd, ((net->ssthresh / net->mtu << 7) / net->htcp_ca.beta) * net->mtu);
+}
+
+#endif
+
+static inline void
+measure_rtt(struct sctp_tcb *stcb, struct sctp_nets *net)
+{
+ uint32_t srtt = net->lastsa >> 3;
+
+ /* keep track of minimum RTT seen so far, minRTT is zero at first */
+ if (net->htcp_ca.minRTT > srtt || !net->htcp_ca.minRTT)
+ net->htcp_ca.minRTT = srtt;
+
+ /* max RTT */
+ if (net->fast_retran_ip == 0 && net->ssthresh < 0xFFFF && htcp_ccount(&net->htcp_ca) > 3) {
+ if (net->htcp_ca.maxRTT < net->htcp_ca.minRTT)
+ net->htcp_ca.maxRTT = net->htcp_ca.minRTT;
+ if (net->htcp_ca.maxRTT < srtt && srtt <= net->htcp_ca.maxRTT + MSEC_TO_TICKS(20))
+ net->htcp_ca.maxRTT = srtt;
+ }
+}
+
+static void
+measure_achieved_throughput(struct sctp_tcb *stcb, struct sctp_nets *net)
+{
+ uint32_t now = sctp_get_tick_count();
+
+ if (net->fast_retran_ip == 0)
+ net->htcp_ca.bytes_acked = net->net_ack;
+
+ if (!use_bandwidth_switch)
+ return;
+
+ /* achieved throughput calculations */
+ /* JRS - not 100% sure of this statement */
+ if (net->fast_retran_ip == 1) {
+ net->htcp_ca.bytecount = 0;
+ net->htcp_ca.lasttime = now;
+ return;
+ }
+ net->htcp_ca.bytecount += net->net_ack;
+
+ if (net->htcp_ca.bytecount >= net->cwnd - ((net->htcp_ca.alpha >> 7 ? : 1) * net->mtu)
+ && now - net->htcp_ca.lasttime >= net->htcp_ca.minRTT
+ && net->htcp_ca.minRTT > 0) {
+ uint32_t cur_Bi = net->htcp_ca.bytecount / net->mtu * hz / (now - net->htcp_ca.lasttime);
+
+ if (htcp_ccount(&net->htcp_ca) <= 3) {
+ /* just after backoff */
+ net->htcp_ca.minB = net->htcp_ca.maxB = net->htcp_ca.Bi = cur_Bi;
+ } else {
+ net->htcp_ca.Bi = (3 * net->htcp_ca.Bi + cur_Bi) / 4;
+ if (net->htcp_ca.Bi > net->htcp_ca.maxB)
+ net->htcp_ca.maxB = net->htcp_ca.Bi;
+ if (net->htcp_ca.minB > net->htcp_ca.maxB)
+ net->htcp_ca.minB = net->htcp_ca.maxB;
+ }
+ net->htcp_ca.bytecount = 0;
+ net->htcp_ca.lasttime = now;
+ }
+}
+
+static inline void
+htcp_beta_update(struct htcp *ca, uint32_t minRTT, uint32_t maxRTT)
+{
+ if (use_bandwidth_switch) {
+ uint32_t maxB = ca->maxB;
+ uint32_t old_maxB = ca->old_maxB;
+
+ ca->old_maxB = ca->maxB;
+
+ if (!between(5 * maxB, 4 * old_maxB, 6 * old_maxB)) {
+ ca->beta = BETA_MIN;
+ ca->modeswitch = 0;
+ return;
+ }
+ }
+ if (ca->modeswitch && minRTT > (uint32_t) MSEC_TO_TICKS(10) && maxRTT) {
+ ca->beta = (minRTT << 7) / maxRTT;
+ if (ca->beta < BETA_MIN)
+ ca->beta = BETA_MIN;
+ else if (ca->beta > BETA_MAX)
+ ca->beta = BETA_MAX;
+ } else {
+ ca->beta = BETA_MIN;
+ ca->modeswitch = 1;
+ }
+}
+
+static inline void
+htcp_alpha_update(struct htcp *ca)
+{
+ uint32_t minRTT = ca->minRTT;
+ uint32_t factor = 1;
+ uint32_t diff = htcp_cong_time(ca);
+
+ if (diff > (uint32_t) hz) {
+ diff -= hz;
+ factor = 1 + (10 * diff + ((diff / 2) * (diff / 2) / hz)) / hz;
+ }
+ if (use_rtt_scaling && minRTT) {
+ uint32_t scale = (hz << 3) / (10 * minRTT);
+
+ scale = min(max(scale, 1U << 2), 10U << 3); /* clamping ratio to
+ * interval [0.5,10]<<3 */
+ factor = (factor << 3) / scale;
+ if (!factor)
+ factor = 1;
+ }
+ ca->alpha = 2 * factor * ((1 << 7) - ca->beta);
+ if (!ca->alpha)
+ ca->alpha = ALPHA_BASE;
+}
+
+/* After we have the rtt data to calculate beta, we'd still prefer to wait one
+ * rtt before we adjust our beta to ensure we are working from a consistent
+ * data.
+ *
+ * This function should be called when we hit a congestion event since only at
+ * that point do we really have a real sense of maxRTT (the queues en route
+ * were getting just too full now).
+ */
+static void
+htcp_param_update(struct sctp_tcb *stcb, struct sctp_nets *net)
+{
+ uint32_t minRTT = net->htcp_ca.minRTT;
+ uint32_t maxRTT = net->htcp_ca.maxRTT;
+
+ htcp_beta_update(&net->htcp_ca, minRTT, maxRTT);
+ htcp_alpha_update(&net->htcp_ca);
+
+ /*
+ * add slowly fading memory for maxRTT to accommodate routing
+ * changes etc
+ */
+ if (minRTT > 0 && maxRTT > minRTT)
+ net->htcp_ca.maxRTT = minRTT + ((maxRTT - minRTT) * 95) / 100;
+}
+
+static uint32_t
+htcp_recalc_ssthresh(struct sctp_tcb *stcb, struct sctp_nets *net)
+{
+ htcp_param_update(stcb, net);
+ return max(((net->cwnd / net->mtu * net->htcp_ca.beta) >> 7) * net->mtu, 2U * net->mtu);
+}
+
+static void
+htcp_cong_avoid(struct sctp_tcb *stcb, struct sctp_nets *net)
+{
+ /*-
+ * How to handle these functions?
+ * if (!tcp_is_cwnd_limited(sk, in_flight)) RRS - good question.
+ * return;
+ */
+ if (net->cwnd <= net->ssthresh) {
+ /* We are in slow start */
+ if (net->flight_size + net->net_ack >= net->cwnd) {
+ if (net->net_ack > (net->mtu * SCTP_BASE_SYSCTL(sctp_L2_abc_variable))) {
+ net->cwnd += (net->mtu * SCTP_BASE_SYSCTL(sctp_L2_abc_variable));
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_MONITOR_ENABLE) {
+ sctp_log_cwnd(stcb, net, net->mtu,
+ SCTP_CWND_LOG_FROM_SS);
+ }
+ } else {
+ net->cwnd += net->net_ack;
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_MONITOR_ENABLE) {
+ sctp_log_cwnd(stcb, net, net->net_ack,
+ SCTP_CWND_LOG_FROM_SS);
+ }
+ }
+ } else {
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
+ sctp_log_cwnd(stcb, net, net->net_ack,
+ SCTP_CWND_LOG_NOADV_SS);
+ }
+ }
+ } else {
+ measure_rtt(stcb, net);
+
+ /*
+ * In dangerous area, increase slowly. In theory this is
+ * net->cwnd += alpha / net->cwnd
+ */
+ /* What is snd_cwnd_cnt?? */
+ if (((net->partial_bytes_acked / net->mtu * net->htcp_ca.alpha) >> 7) * net->mtu >= net->cwnd) {
+ /*-
+ * Does SCTP have a cwnd clamp?
+ * if (net->snd_cwnd < net->snd_cwnd_clamp) - Nope (RRS).
+ */
+ net->cwnd += net->mtu;
+ net->partial_bytes_acked = 0;
+ htcp_alpha_update(&net->htcp_ca);
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_MONITOR_ENABLE) {
+ sctp_log_cwnd(stcb, net, net->mtu,
+ SCTP_CWND_LOG_FROM_CA);
+ }
+ } else {
+ net->partial_bytes_acked += net->net_ack;
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
+ sctp_log_cwnd(stcb, net, net->net_ack,
+ SCTP_CWND_LOG_NOADV_CA);
+ }
+ }
+
+ net->htcp_ca.bytes_acked = net->mtu;
+ }
+}
+
+#ifdef SCTP_NOT_USED
+/* Lower bound on congestion window. */
+static uint32_t
+htcp_min_cwnd(struct sctp_tcb *stcb, struct sctp_nets *net)
+{
+ return net->ssthresh;
+}
+
+#endif
+
+static void
+htcp_init(struct sctp_tcb *stcb, struct sctp_nets *net)
+{
+ memset(&net->htcp_ca, 0, sizeof(struct htcp));
+ net->htcp_ca.alpha = ALPHA_BASE;
+ net->htcp_ca.beta = BETA_MIN;
+ net->htcp_ca.bytes_acked = net->mtu;
+ net->htcp_ca.last_cong = sctp_get_tick_count();
+}
+
+void
+sctp_htcp_set_initial_cc_param(struct sctp_tcb *stcb, struct sctp_nets *net)
+{
+ /*
+ * We take the max of the burst limit times a MTU or the
+ * INITIAL_CWND. We then limit this to 4 MTU's of sending.
+ */
+ net->cwnd = min((net->mtu * 4), max((2 * net->mtu), SCTP_INITIAL_CWND));
+ net->ssthresh = stcb->asoc.peers_rwnd;
+ htcp_init(stcb, net);
+
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & (SCTP_CWND_MONITOR_ENABLE | SCTP_CWND_LOGGING_ENABLE)) {
+ sctp_log_cwnd(stcb, net, 0, SCTP_CWND_INITIALIZATION);
+ }
+}
+
+void
+sctp_htcp_cwnd_update_after_sack(struct sctp_tcb *stcb,
+ struct sctp_association *asoc,
+ int accum_moved, int reneged_all, int will_exit)
+{
+ struct sctp_nets *net;
+
+ /******************************/
+ /* update cwnd and Early FR */
+ /******************************/
+ TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
+
+#ifdef JANA_CMT_FAST_RECOVERY
+ /*
+ * CMT fast recovery code. Need to debug.
+ */
+ if (net->fast_retran_loss_recovery && net->new_pseudo_cumack) {
+ if (compare_with_wrap(asoc->last_acked_seq,
+ net->fast_recovery_tsn, MAX_TSN) ||
+ (asoc->last_acked_seq == net->fast_recovery_tsn) ||
+ compare_with_wrap(net->pseudo_cumack, net->fast_recovery_tsn, MAX_TSN) ||
+ (net->pseudo_cumack == net->fast_recovery_tsn)) {
+ net->will_exit_fast_recovery = 1;
+ }
+ }
+#endif
+ if (SCTP_BASE_SYSCTL(sctp_early_fr)) {
+ /*
+ * So, first of all do we need to have a Early FR
+ * timer running?
+ */
+ if ((!TAILQ_EMPTY(&asoc->sent_queue) &&
+ (net->ref_count > 1) &&
+ (net->flight_size < net->cwnd)) ||
+ (reneged_all)) {
+ /*
+ * yes, so in this case stop it if its
+ * running, and then restart it. Reneging
+ * all is a special case where we want to
+ * run the Early FR timer and then force the
+ * last few unacked to be sent, causing us
+ * to illicit a sack with gaps to force out
+ * the others.
+ */
+ if (SCTP_OS_TIMER_PENDING(&net->fr_timer.timer)) {
+ SCTP_STAT_INCR(sctps_earlyfrstpidsck2);
+ sctp_timer_stop(SCTP_TIMER_TYPE_EARLYFR, stcb->sctp_ep, stcb, net,
+ SCTP_FROM_SCTP_INDATA + SCTP_LOC_20);
+ }
+ SCTP_STAT_INCR(sctps_earlyfrstrid);
+ sctp_timer_start(SCTP_TIMER_TYPE_EARLYFR, stcb->sctp_ep, stcb, net);
+ } else {
+ /* No, stop it if its running */
+ if (SCTP_OS_TIMER_PENDING(&net->fr_timer.timer)) {
+ SCTP_STAT_INCR(sctps_earlyfrstpidsck3);
+ sctp_timer_stop(SCTP_TIMER_TYPE_EARLYFR, stcb->sctp_ep, stcb, net,
+ SCTP_FROM_SCTP_INDATA + SCTP_LOC_21);
+ }
+ }
+ }
+ /* if nothing was acked on this destination skip it */
+ if (net->net_ack == 0) {
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
+ sctp_log_cwnd(stcb, net, 0, SCTP_CWND_LOG_FROM_SACK);
+ }
+ continue;
+ }
+ if (net->net_ack2 > 0) {
+ /*
+ * Karn's rule applies to clearing error count, this
+ * is optional.
+ */
+ net->error_count = 0;
+ if ((net->dest_state & SCTP_ADDR_NOT_REACHABLE) ==
+ SCTP_ADDR_NOT_REACHABLE) {
+ /* addr came good */
+ net->dest_state &= ~SCTP_ADDR_NOT_REACHABLE;
+ net->dest_state |= SCTP_ADDR_REACHABLE;
+ sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_UP, stcb,
+ SCTP_RECEIVED_SACK, (void *)net, SCTP_SO_NOT_LOCKED);
+ /* now was it the primary? if so restore */
+ if (net->dest_state & SCTP_ADDR_WAS_PRIMARY) {
+ (void)sctp_set_primary_addr(stcb, (struct sockaddr *)NULL, net);
+ }
+ }
+ /*
+ * JRS 5/14/07 - If CMT PF is on and the destination
+ * is in PF state, set the destination to active
+ * state and set the cwnd to one or two MTU's based
+ * on whether PF1 or PF2 is being used.
+ *
+ * Should we stop any running T3 timer here?
+ */
+ if ((asoc->sctp_cmt_on_off == 1) &&
+ (asoc->sctp_cmt_pf > 0) &&
+ ((net->dest_state & SCTP_ADDR_PF) == SCTP_ADDR_PF)) {
+ net->dest_state &= ~SCTP_ADDR_PF;
+ net->cwnd = net->mtu * asoc->sctp_cmt_pf;
+ SCTPDBG(SCTP_DEBUG_INDATA1, "Destination %p moved from PF to reachable with cwnd %d.\n",
+ net, net->cwnd);
+ /*
+ * Since the cwnd value is explicitly set,
+ * skip the code that updates the cwnd
+ * value.
+ */
+ goto skip_cwnd_update;
+ }
+ }
+#ifdef JANA_CMT_FAST_RECOVERY
+ /*
+ * CMT fast recovery code
+ */
+ /*
+ * if (sctp_cmt_on_off == 1 &&
+ * net->fast_retran_loss_recovery &&
+ * net->will_exit_fast_recovery == 0) { @@@ Do something }
+ * else if (sctp_cmt_on_off == 0 &&
+ * asoc->fast_retran_loss_recovery && will_exit == 0) {
+ */
+#endif
+
+ if (asoc->fast_retran_loss_recovery &&
+ will_exit == 0 &&
+ (asoc->sctp_cmt_on_off == 0)) {
+ /*
+ * If we are in loss recovery we skip any cwnd
+ * update
+ */
+ goto skip_cwnd_update;
+ }
+ /*
+ * CMT: CUC algorithm. Update cwnd if pseudo-cumack has
+ * moved.
+ */
+ if (accum_moved ||
+ ((asoc->sctp_cmt_on_off == 1) && net->new_pseudo_cumack)) {
+ htcp_cong_avoid(stcb, net);
+ measure_achieved_throughput(stcb, net);
+ } else {
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
+ sctp_log_cwnd(stcb, net, net->mtu,
+ SCTP_CWND_LOG_NO_CUMACK);
+ }
+ }
+skip_cwnd_update:
+ /*
+ * NOW, according to Karn's rule do we need to restore the
+ * RTO timer back? Check our net_ack2. If not set then we
+ * have a ambiguity.. i.e. all data ack'd was sent to more
+ * than one place.
+ */
+ if (net->net_ack2) {
+ /* restore any doubled timers */
+ net->RTO = ((net->lastsa >> 2) + net->lastsv) >> 1;
+ if (net->RTO < stcb->asoc.minrto) {
+ net->RTO = stcb->asoc.minrto;
+ }
+ if (net->RTO > stcb->asoc.maxrto) {
+ net->RTO = stcb->asoc.maxrto;
+ }
+ }
+ }
+}
+
+void
+sctp_htcp_cwnd_update_after_fr(struct sctp_tcb *stcb,
+ struct sctp_association *asoc)
+{
+ struct sctp_nets *net;
+
+ /*
+ * CMT fast recovery code. Need to debug. ((sctp_cmt_on_off == 1) &&
+ * (net->fast_retran_loss_recovery == 0)))
+ */
+ TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
+ if ((asoc->fast_retran_loss_recovery == 0) ||
+ (asoc->sctp_cmt_on_off == 1)) {
+ /* out of a RFC2582 Fast recovery window? */
+ if (net->net_ack > 0) {
+ /*
+ * per section 7.2.3, are there any
+ * destinations that had a fast retransmit
+ * to them. If so what we need to do is
+ * adjust ssthresh and cwnd.
+ */
+ struct sctp_tmit_chunk *lchk;
+ int old_cwnd = net->cwnd;
+
+ /* JRS - reset as if state were changed */
+ htcp_reset(&net->htcp_ca);
+ net->ssthresh = htcp_recalc_ssthresh(stcb, net);
+ net->cwnd = net->ssthresh;
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_MONITOR_ENABLE) {
+ sctp_log_cwnd(stcb, net, (net->cwnd - old_cwnd),
+ SCTP_CWND_LOG_FROM_FR);
+ }
+ lchk = TAILQ_FIRST(&asoc->send_queue);
+
+ net->partial_bytes_acked = 0;
+ /* Turn on fast recovery window */
+ asoc->fast_retran_loss_recovery = 1;
+ if (lchk == NULL) {
+ /* Mark end of the window */
+ asoc->fast_recovery_tsn = asoc->sending_seq - 1;
+ } else {
+ asoc->fast_recovery_tsn = lchk->rec.data.TSN_seq - 1;
+ }
+
+ /*
+ * CMT fast recovery -- per destination
+ * recovery variable.
+ */
+ net->fast_retran_loss_recovery = 1;
+
+ if (lchk == NULL) {
+ /* Mark end of the window */
+ net->fast_recovery_tsn = asoc->sending_seq - 1;
+ } else {
+ net->fast_recovery_tsn = lchk->rec.data.TSN_seq - 1;
+ }
+
+ /*
+ * Disable Nonce Sum Checking and store the
+ * resync tsn
+ */
+ asoc->nonce_sum_check = 0;
+ asoc->nonce_resync_tsn = asoc->fast_recovery_tsn + 1;
+
+ sctp_timer_stop(SCTP_TIMER_TYPE_SEND,
+ stcb->sctp_ep, stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_32);
+ sctp_timer_start(SCTP_TIMER_TYPE_SEND,
+ stcb->sctp_ep, stcb, net);
+ }
+ } else if (net->net_ack > 0) {
+ /*
+ * Mark a peg that we WOULD have done a cwnd
+ * reduction but RFC2582 prevented this action.
+ */
+ SCTP_STAT_INCR(sctps_fastretransinrtt);
+ }
+ }
+}
+
+void
+sctp_htcp_cwnd_update_after_timeout(struct sctp_tcb *stcb,
+ struct sctp_nets *net)
+{
+ int old_cwnd = net->cwnd;
+
+ /* JRS - reset as if the state were being changed to timeout */
+ htcp_reset(&net->htcp_ca);
+ net->ssthresh = htcp_recalc_ssthresh(stcb, net);
+ net->cwnd = net->mtu;
+ net->partial_bytes_acked = 0;
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_MONITOR_ENABLE) {
+ sctp_log_cwnd(stcb, net, net->cwnd - old_cwnd, SCTP_CWND_LOG_FROM_RTX);
+ }
+}
+
+void
+sctp_htcp_cwnd_update_after_fr_timer(struct sctp_inpcb *inp,
+ struct sctp_tcb *stcb, struct sctp_nets *net)
+{
+ int old_cwnd;
+
+ old_cwnd = net->cwnd;
+
+ sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_EARLY_FR_TMR, SCTP_SO_NOT_LOCKED);
+ net->htcp_ca.last_cong = sctp_get_tick_count();
+ /*
+ * make a small adjustment to cwnd and force to CA.
+ */
+ if (net->cwnd > net->mtu)
+ /* drop down one MTU after sending */
+ net->cwnd -= net->mtu;
+ if (net->cwnd < net->ssthresh)
+ /* still in SS move to CA */
+ net->ssthresh = net->cwnd - 1;
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_MONITOR_ENABLE) {
+ sctp_log_cwnd(stcb, net, (old_cwnd - net->cwnd), SCTP_CWND_LOG_FROM_FR);
+ }
+}
+
+void
+sctp_htcp_cwnd_update_after_ecn_echo(struct sctp_tcb *stcb,
+ struct sctp_nets *net)
+{
+ int old_cwnd;
+
+ old_cwnd = net->cwnd;
+
+ /* JRS - reset hctp as if state changed */
+ htcp_reset(&net->htcp_ca);
+ SCTP_STAT_INCR(sctps_ecnereducedcwnd);
+ net->ssthresh = htcp_recalc_ssthresh(stcb, net);
+ if (net->ssthresh < net->mtu) {
+ net->ssthresh = net->mtu;
+ /* here back off the timer as well, to slow us down */
+ net->RTO <<= 1;
+ }
+ net->cwnd = net->ssthresh;
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_MONITOR_ENABLE) {
+ sctp_log_cwnd(stcb, net, (net->cwnd - old_cwnd), SCTP_CWND_LOG_FROM_SAT);
+ }
+}
diff --git a/rtems/freebsd/netinet/sctp_cc_functions.h b/rtems/freebsd/netinet/sctp_cc_functions.h
new file mode 100644
index 00000000..6cac637b
--- /dev/null
+++ b/rtems/freebsd/netinet/sctp_cc_functions.h
@@ -0,0 +1,116 @@
+/*-
+ * Copyright (c) 2001-2007, by Cisco Systems, Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * a) Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * b) Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the distribution.
+ *
+ * c) Neither the name of Cisco Systems, Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#include <rtems/freebsd/sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#ifndef __sctp_cc_functions_h__
+#define __sctp_cc_functions_h__
+
+#if defined(_KERNEL) || defined(__Userspace__)
+
+void
+sctp_set_initial_cc_param(struct sctp_tcb *stcb,
+ struct sctp_nets *net);
+
+void
+sctp_cwnd_update_after_fr(struct sctp_tcb *stcb,
+ struct sctp_association *asoc);
+
+void
+sctp_cwnd_update_after_sack(struct sctp_tcb *stcb,
+ struct sctp_association *asoc,
+ int accum_moved, int reneged_all, int will_exit);
+
+void
+sctp_cwnd_update_after_timeout(struct sctp_tcb *stcb,
+ struct sctp_nets *net);
+
+void
+sctp_hs_cwnd_update_after_fr(struct sctp_tcb *stcb,
+ struct sctp_association *asoc);
+
+void
+sctp_hs_cwnd_update_after_sack(struct sctp_tcb *stcb,
+ struct sctp_association *asoc,
+ int accum_moved, int reneged_all, int will_exit);
+
+void
+sctp_cwnd_update_after_ecn_echo(struct sctp_tcb *stcb,
+ struct sctp_nets *net);
+
+void
+sctp_cwnd_update_after_packet_dropped(struct sctp_tcb *stcb,
+ struct sctp_nets *net, struct sctp_pktdrop_chunk *cp,
+ uint32_t * bottle_bw, uint32_t * on_queue);
+
+void
+sctp_cwnd_update_after_output(struct sctp_tcb *stcb,
+ struct sctp_nets *net, int burst_limit);
+
+void
+sctp_cwnd_update_after_fr_timer(struct sctp_inpcb *inp,
+ struct sctp_tcb *stcb, struct sctp_nets *net);
+
+/*
+ * HTCP algorithms are directly taken from
+ * R.N.Shorten, D.J.Leith and are work/outcome from
+ * a Cisco-URP grant to enhance HTCP for satellite
+ * communications. We use the BSD Liscense
+ * granted from his source and have modified his
+ * algorithms to fit within the SCTP BSD framework.
+ */
+
+void
+sctp_htcp_set_initial_cc_param(struct sctp_tcb *stcb,
+ struct sctp_nets *net);
+
+void
+sctp_htcp_cwnd_update_after_fr(struct sctp_tcb *stcb,
+ struct sctp_association *asoc);
+
+void
+sctp_htcp_cwnd_update_after_sack(struct sctp_tcb *stcb,
+ struct sctp_association *asoc,
+ int accum_moved, int reneged_all, int will_exit);
+
+void
+sctp_htcp_cwnd_update_after_timeout(struct sctp_tcb *stcb,
+ struct sctp_nets *net);
+
+void
+sctp_htcp_cwnd_update_after_ecn_echo(struct sctp_tcb *stcb,
+ struct sctp_nets *net);
+
+void
+sctp_htcp_cwnd_update_after_fr_timer(struct sctp_inpcb *inp,
+ struct sctp_tcb *stcb, struct sctp_nets *net);
+
+#endif
+#endif
diff --git a/rtems/freebsd/netinet/sctp_constants.h b/rtems/freebsd/netinet/sctp_constants.h
new file mode 100644
index 00000000..44f2ab55
--- /dev/null
+++ b/rtems/freebsd/netinet/sctp_constants.h
@@ -0,0 +1,1051 @@
+/*-
+ * Copyright (c) 2001-2008, by Cisco Systems, Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * a) Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * b) Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the distribution.
+ *
+ * c) Neither the name of Cisco Systems, Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/* $KAME: sctp_constants.h,v 1.17 2005/03/06 16:04:17 itojun Exp $ */
+
+#include <rtems/freebsd/sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#ifndef __sctp_constants_h__
+#define __sctp_constants_h__
+
+/* IANA assigned port number for SCTP over UDP encapsulation */
+/* For freebsd we cannot bind the port at
+ * startup. Otherwise what will happen is
+ * we really won't be bound. The user must
+ * put it into the sysctl... or we need
+ * to build a special timer for this to allow
+ * us to wait 1 second or so after the system
+ * comes up.
+ */
+#define SCTP_OVER_UDP_TUNNELING_PORT 0
+/* Number of packets to get before sack sent by default */
+#define SCTP_DEFAULT_SACK_FREQ 2
+
+/* Address limit - This variable is calculated
+ * based on an 65535 byte max ip packet. We take out 100 bytes
+ * for the cookie, 40 bytes for a v6 header and 32
+ * bytes for the init structure. A second init structure
+ * for the init-ack and then finally a third one for the
+ * imbedded init. This yeilds 100+40+(3 * 32) = 236 bytes.
+ * This leaves 65299 bytes for addresses. We throw out the 299 bytes.
+ * Now whatever we send in the INIT() we need to allow to get back in the
+ * INIT-ACK plus all the values from INIT and INIT-ACK
+ * listed in the cookie. Plus we need some overhead for
+ * maybe copied parameters in the COOKIE. If we
+ * allow 1080 addresses, and each side has 1080 V6 addresses
+ * that will be 21600 bytes. In the INIT-ACK we will
+ * see the INIT-ACK 21600 + 43200 in the cookie. This leaves
+ * about 500 bytes slack for misc things in the cookie.
+ */
+#define SCTP_ADDRESS_LIMIT 1080
+
+/* We need at least 2k of space for us, inits
+ * larger than that lets abort.
+ */
+#define SCTP_LARGEST_INIT_ACCEPTED (65535 - 2048)
+
+/* Number of addresses where we just skip the counting */
+#define SCTP_COUNT_LIMIT 40
+
+#define SCTP_ZERO_COPY_TICK_DELAY (((100 * hz) + 999) / 1000)
+#define SCTP_ZERO_COPY_SENDQ_TICK_DELAY (((100 * hz) + 999) / 1000)
+
+/* Number of ticks to delay before running
+ * iterator on an address change.
+ */
+#define SCTP_ADDRESS_TICK_DELAY 2
+
+#define SCTP_VERSION_STRING "KAME-BSD 1.1"
+/* #define SCTP_AUDITING_ENABLED 1 used for debug/auditing */
+#define SCTP_AUDIT_SIZE 256
+
+
+#define SCTP_KTRHEAD_NAME "sctp_iterator"
+#define SCTP_KTHREAD_PAGES 0
+
+
+/* If you support Multi-VRF how big to
+ * make the initial array of VRF's to.
+ */
+#define SCTP_DEFAULT_VRF_SIZE 4
+
+/* constants for rto calc */
+#define sctp_align_safe_nocopy 0
+#define sctp_align_unsafe_makecopy 1
+
+/* JRS - Values defined for the HTCP algorithm */
+#define ALPHA_BASE (1<<7) /* 1.0 with shift << 7 */
+#define BETA_MIN (1<<6) /* 0.5 with shift << 7 */
+#define BETA_MAX 102 /* 0.8 with shift << 7 */
+
+/* Places that CWND log can happen from */
+#define SCTP_CWND_LOG_FROM_FR 1
+#define SCTP_CWND_LOG_FROM_RTX 2
+#define SCTP_CWND_LOG_FROM_BRST 3
+#define SCTP_CWND_LOG_FROM_SS 4
+#define SCTP_CWND_LOG_FROM_CA 5
+#define SCTP_CWND_LOG_FROM_SAT 6
+#define SCTP_BLOCK_LOG_INTO_BLK 7
+#define SCTP_BLOCK_LOG_OUTOF_BLK 8
+#define SCTP_BLOCK_LOG_CHECK 9
+#define SCTP_STR_LOG_FROM_INTO_STRD 10
+#define SCTP_STR_LOG_FROM_IMMED_DEL 11
+#define SCTP_STR_LOG_FROM_INSERT_HD 12
+#define SCTP_STR_LOG_FROM_INSERT_MD 13
+#define SCTP_STR_LOG_FROM_INSERT_TL 14
+#define SCTP_STR_LOG_FROM_MARK_TSN 15
+#define SCTP_STR_LOG_FROM_EXPRS_DEL 16
+#define SCTP_FR_LOG_BIGGEST_TSNS 17
+#define SCTP_FR_LOG_STRIKE_TEST 18
+#define SCTP_FR_LOG_STRIKE_CHUNK 19
+#define SCTP_FR_T3_TIMEOUT 20
+#define SCTP_MAP_PREPARE_SLIDE 21
+#define SCTP_MAP_SLIDE_FROM 22
+#define SCTP_MAP_SLIDE_RESULT 23
+#define SCTP_MAP_SLIDE_CLEARED 24
+#define SCTP_MAP_SLIDE_NONE 25
+#define SCTP_FR_T3_MARK_TIME 26
+#define SCTP_FR_T3_MARKED 27
+#define SCTP_FR_T3_STOPPED 28
+#define SCTP_FR_MARKED 30
+#define SCTP_CWND_LOG_NOADV_SS 31
+#define SCTP_CWND_LOG_NOADV_CA 32
+#define SCTP_MAX_BURST_APPLIED 33
+#define SCTP_MAX_IFP_APPLIED 34
+#define SCTP_MAX_BURST_ERROR_STOP 35
+#define SCTP_INCREASE_PEER_RWND 36
+#define SCTP_DECREASE_PEER_RWND 37
+#define SCTP_SET_PEER_RWND_VIA_SACK 38
+#define SCTP_LOG_MBCNT_INCREASE 39
+#define SCTP_LOG_MBCNT_DECREASE 40
+#define SCTP_LOG_MBCNT_CHKSET 41
+#define SCTP_LOG_NEW_SACK 42
+#define SCTP_LOG_TSN_ACKED 43
+#define SCTP_LOG_TSN_REVOKED 44
+#define SCTP_LOG_LOCK_TCB 45
+#define SCTP_LOG_LOCK_INP 46
+#define SCTP_LOG_LOCK_SOCK 47
+#define SCTP_LOG_LOCK_SOCKBUF_R 48
+#define SCTP_LOG_LOCK_SOCKBUF_S 49
+#define SCTP_LOG_LOCK_CREATE 50
+#define SCTP_LOG_INITIAL_RTT 51
+#define SCTP_LOG_RTTVAR 52
+#define SCTP_LOG_SBALLOC 53
+#define SCTP_LOG_SBFREE 54
+#define SCTP_LOG_SBRESULT 55
+#define SCTP_FR_DUPED 56
+#define SCTP_FR_MARKED_EARLY 57
+#define SCTP_FR_CWND_REPORT 58
+#define SCTP_FR_CWND_REPORT_START 59
+#define SCTP_FR_CWND_REPORT_STOP 60
+#define SCTP_CWND_LOG_FROM_SEND 61
+#define SCTP_CWND_INITIALIZATION 62
+#define SCTP_CWND_LOG_FROM_T3 63
+#define SCTP_CWND_LOG_FROM_SACK 64
+#define SCTP_CWND_LOG_NO_CUMACK 65
+#define SCTP_CWND_LOG_FROM_RESEND 66
+#define SCTP_FR_LOG_CHECK_STRIKE 67
+#define SCTP_SEND_NOW_COMPLETES 68
+#define SCTP_CWND_LOG_FILL_OUTQ_CALLED 69
+#define SCTP_CWND_LOG_FILL_OUTQ_FILLS 70
+#define SCTP_LOG_FREE_SENT 71
+#define SCTP_NAGLE_APPLIED 72
+#define SCTP_NAGLE_SKIPPED 73
+#define SCTP_WAKESND_FROM_SACK 74
+#define SCTP_WAKESND_FROM_FWDTSN 75
+#define SCTP_NOWAKE_FROM_SACK 76
+#define SCTP_CWNDLOG_PRESEND 77
+#define SCTP_CWNDLOG_ENDSEND 78
+#define SCTP_AT_END_OF_SACK 79
+#define SCTP_REASON_FOR_SC 80
+#define SCTP_BLOCK_LOG_INTO_BLKA 81
+#define SCTP_ENTER_USER_RECV 82
+#define SCTP_USER_RECV_SACKS 83
+#define SCTP_SORECV_BLOCKSA 84
+#define SCTP_SORECV_BLOCKSB 85
+#define SCTP_SORECV_DONE 86
+#define SCTP_SACK_RWND_UPDATE 87
+#define SCTP_SORECV_ENTER 88
+#define SCTP_SORECV_ENTERPL 89
+#define SCTP_MBUF_INPUT 90
+#define SCTP_MBUF_IALLOC 91
+#define SCTP_MBUF_IFREE 92
+#define SCTP_MBUF_ICOPY 93
+#define SCTP_MBUF_SPLIT 94
+#define SCTP_SORCV_FREECTL 95
+#define SCTP_SORCV_DOESCPY 96
+#define SCTP_SORCV_DOESLCK 97
+#define SCTP_SORCV_DOESADJ 98
+#define SCTP_SORCV_BOTWHILE 99
+#define SCTP_SORCV_PASSBF 100
+#define SCTP_SORCV_ADJD 101
+#define SCTP_UNKNOWN_MAX 102
+#define SCTP_RANDY_STUFF 103
+#define SCTP_RANDY_STUFF1 104
+#define SCTP_STRMOUT_LOG_ASSIGN 105
+#define SCTP_STRMOUT_LOG_SEND 106
+#define SCTP_FLIGHT_LOG_DOWN_CA 107
+#define SCTP_FLIGHT_LOG_UP 108
+#define SCTP_FLIGHT_LOG_DOWN_GAP 109
+#define SCTP_FLIGHT_LOG_DOWN_RSND 110
+#define SCTP_FLIGHT_LOG_UP_RSND 111
+#define SCTP_FLIGHT_LOG_DOWN_RSND_TO 112
+#define SCTP_FLIGHT_LOG_DOWN_WP 113
+#define SCTP_FLIGHT_LOG_UP_REVOKE 114
+#define SCTP_FLIGHT_LOG_DOWN_PDRP 115
+#define SCTP_FLIGHT_LOG_DOWN_PMTU 116
+#define SCTP_SACK_LOG_NORMAL 117
+#define SCTP_SACK_LOG_EXPRESS 118
+#define SCTP_MAP_TSN_ENTERS 119
+#define SCTP_THRESHOLD_CLEAR 120
+#define SCTP_THRESHOLD_INCR 121
+#define SCTP_FLIGHT_LOG_DWN_WP_FWD 122
+#define SCTP_FWD_TSN_CHECK 123
+#define SCTP_LOG_MAX_TYPES 124
+/*
+ * To turn on various logging, you must first enable 'options KTR' and
+ * you might want to bump the entires 'options KTR_ENTRIES=80000'.
+ * To get something to log you define one of the logging defines.
+ * (see LINT).
+ *
+ * This gets the compile in place, but you still need to turn the
+ * logging flag on too in the sysctl (see in sctp.h).
+ */
+
+#define SCTP_LOG_EVENT_UNKNOWN 0
+#define SCTP_LOG_EVENT_CWND 1
+#define SCTP_LOG_EVENT_BLOCK 2
+#define SCTP_LOG_EVENT_STRM 3
+#define SCTP_LOG_EVENT_FR 4
+#define SCTP_LOG_EVENT_MAP 5
+#define SCTP_LOG_EVENT_MAXBURST 6
+#define SCTP_LOG_EVENT_RWND 7
+#define SCTP_LOG_EVENT_MBCNT 8
+#define SCTP_LOG_EVENT_SACK 9
+#define SCTP_LOG_LOCK_EVENT 10
+#define SCTP_LOG_EVENT_RTT 11
+#define SCTP_LOG_EVENT_SB 12
+#define SCTP_LOG_EVENT_NAGLE 13
+#define SCTP_LOG_EVENT_WAKE 14
+#define SCTP_LOG_MISC_EVENT 15
+#define SCTP_LOG_EVENT_CLOSE 16
+#define SCTP_LOG_EVENT_MBUF 17
+#define SCTP_LOG_CHUNK_PROC 18
+#define SCTP_LOG_ERROR_RET 19
+
+#define SCTP_LOG_MAX_EVENT 20
+
+#define SCTP_LOCK_UNKNOWN 2
+
+
+/* number of associations by default for zone allocation */
+#define SCTP_MAX_NUM_OF_ASOC 40000
+/* how many addresses per assoc remote and local */
+#define SCTP_SCALE_FOR_ADDR 2
+
+/* default AUTO_ASCONF mode enable(1)/disable(0) value (sysctl) */
+#define SCTP_DEFAULT_AUTO_ASCONF 1
+
+/* default MULTIPLE_ASCONF mode enable(1)/disable(0) value (sysctl) */
+#define SCTP_DEFAULT_MULTIPLE_ASCONFS 0
+
+/* default MOBILITY_BASE mode enable(1)/disable(0) value (sysctl) */
+#define SCTP_DEFAULT_MOBILITY_BASE 0
+
+/* default MOBILITY_FASTHANDOFF mode enable(1)/disable(0) value (sysctl) */
+#define SCTP_DEFAULT_MOBILITY_FASTHANDOFF 0
+
+/*
+ * Theshold for rwnd updates, we have to read (sb_hiwat >>
+ * SCTP_RWND_HIWAT_SHIFT) before we will look to see if we need to send a
+ * window update sack. When we look, we compare the last rwnd we sent vs the
+ * current rwnd. It too must be greater than this value. Using 3 divdes the
+ * hiwat by 8, so for 200k rwnd we need to read 24k. For a 64k rwnd we need
+ * to read 8k. This seems about right.. I hope :-D.. we do set a
+ * min of a MTU on it so if the rwnd is real small we will insist
+ * on a full MTU of 1500 bytes.
+ */
+#define SCTP_RWND_HIWAT_SHIFT 3
+
+/* How much of the rwnd must the
+ * message be taking up to start partial delivery.
+ * We calculate this by shifing the hi_water (recv_win)
+ * left the following .. set to 1, when a message holds
+ * 1/2 the rwnd. If we set it to 2 when a message holds
+ * 1/4 the rwnd...etc..
+ */
+
+#define SCTP_PARTIAL_DELIVERY_SHIFT 1
+
+/*
+ * default HMAC for cookies, etc... use one of the AUTH HMAC id's
+ * SCTP_HMAC is the HMAC_ID to use
+ * SCTP_SIGNATURE_SIZE is the digest length
+ */
+#define SCTP_HMAC SCTP_AUTH_HMAC_ID_SHA1
+#define SCTP_SIGNATURE_SIZE SCTP_AUTH_DIGEST_LEN_SHA1
+#define SCTP_SIGNATURE_ALOC_SIZE SCTP_SIGNATURE_SIZE
+
+/*
+ * the SCTP protocol signature this includes the version number encoded in
+ * the last 4 bits of the signature.
+ */
+#define PROTO_SIGNATURE_A 0x30000000
+#define SCTP_VERSION_NUMBER 0x3
+
+#define MAX_TSN 0xffffffff
+#define MAX_SEQ 0xffff
+
+/* how many executions every N tick's */
+#define SCTP_ITERATOR_MAX_AT_ONCE 20
+
+/* number of clock ticks between iterator executions */
+#define SCTP_ITERATOR_TICKS 1
+
+/*
+ * option: If you comment out the following you will receive the old behavior
+ * of obeying cwnd for the fast retransmit algorithm. With this defined a FR
+ * happens right away with-out waiting for the flightsize to drop below the
+ * cwnd value (which is reduced by the FR to 1/2 the inflight packets).
+ */
+#define SCTP_IGNORE_CWND_ON_FR 1
+
+/*
+ * Adds implementors guide behavior to only use newest highest update in SACK
+ * gap ack's to figure out if you need to stroke a chunk for FR.
+ */
+#define SCTP_NO_FR_UNLESS_SEGMENT_SMALLER 1
+
+/* default max I can burst out after a fast retransmit */
+#define SCTP_DEF_MAX_BURST 4
+/* IP hdr (20/40) + 12+2+2 (enet) + sctp common 12 */
+#define SCTP_FIRST_MBUF_RESV 68
+/* Packet transmit states in the sent field */
+#define SCTP_DATAGRAM_UNSENT 0
+#define SCTP_DATAGRAM_SENT 1
+#define SCTP_DATAGRAM_RESEND1 2 /* not used (in code, but may
+ * hit this value) */
+#define SCTP_DATAGRAM_RESEND2 3 /* not used (in code, but may
+ * hit this value) */
+#define SCTP_DATAGRAM_RESEND 4
+#define SCTP_DATAGRAM_ACKED 10010
+#define SCTP_DATAGRAM_MARKED 20010
+#define SCTP_FORWARD_TSN_SKIP 30010
+
+/* chunk output send from locations */
+#define SCTP_OUTPUT_FROM_USR_SEND 0
+#define SCTP_OUTPUT_FROM_T3 1
+#define SCTP_OUTPUT_FROM_INPUT_ERROR 2
+#define SCTP_OUTPUT_FROM_CONTROL_PROC 3
+#define SCTP_OUTPUT_FROM_SACK_TMR 4
+#define SCTP_OUTPUT_FROM_SHUT_TMR 5
+#define SCTP_OUTPUT_FROM_HB_TMR 6
+#define SCTP_OUTPUT_FROM_SHUT_ACK_TMR 7
+#define SCTP_OUTPUT_FROM_ASCONF_TMR 8
+#define SCTP_OUTPUT_FROM_STRRST_TMR 9
+#define SCTP_OUTPUT_FROM_AUTOCLOSE_TMR 10
+#define SCTP_OUTPUT_FROM_EARLY_FR_TMR 11
+#define SCTP_OUTPUT_FROM_STRRST_REQ 12
+#define SCTP_OUTPUT_FROM_USR_RCVD 13
+#define SCTP_OUTPUT_FROM_COOKIE_ACK 14
+#define SCTP_OUTPUT_FROM_DRAIN 15
+#define SCTP_OUTPUT_FROM_CLOSING 16
+/* SCTP chunk types are moved sctp.h for application (NAT, FW) use */
+
+/* align to 32-bit sizes */
+#define SCTP_SIZE32(x) ((((x)+3) >> 2) << 2)
+
+#define IS_SCTP_CONTROL(a) ((a)->chunk_type != SCTP_DATA)
+#define IS_SCTP_DATA(a) ((a)->chunk_type == SCTP_DATA)
+
+
+/* SCTP parameter types */
+/*************0x0000 series*************/
+#define SCTP_HEARTBEAT_INFO 0x0001
+#define SCTP_IPV4_ADDRESS 0x0005
+#define SCTP_IPV6_ADDRESS 0x0006
+#define SCTP_STATE_COOKIE 0x0007
+#define SCTP_UNRECOG_PARAM 0x0008
+#define SCTP_COOKIE_PRESERVE 0x0009
+#define SCTP_HOSTNAME_ADDRESS 0x000b
+#define SCTP_SUPPORTED_ADDRTYPE 0x000c
+
+/* draft-ietf-stewart-tsvwg-strreset-xxx */
+#define SCTP_STR_RESET_OUT_REQUEST 0x000d
+#define SCTP_STR_RESET_IN_REQUEST 0x000e
+#define SCTP_STR_RESET_TSN_REQUEST 0x000f
+#define SCTP_STR_RESET_RESPONSE 0x0010
+#define SCTP_STR_RESET_ADD_STREAMS 0x0011
+
+#define SCTP_MAX_RESET_PARAMS 2
+#define SCTP_STREAM_RESET_TSN_DELTA 0x1000
+
+/*************0x4000 series*************/
+
+/*************0x8000 series*************/
+#define SCTP_ECN_CAPABLE 0x8000
+/* ECN Nonce: draft-ladha-sctp-ecn-nonce */
+#define SCTP_ECN_NONCE_SUPPORTED 0x8001
+/* draft-ietf-tsvwg-auth-xxx */
+#define SCTP_RANDOM 0x8002
+#define SCTP_CHUNK_LIST 0x8003
+#define SCTP_HMAC_LIST 0x8004
+/*
+ * draft-ietf-tsvwg-addip-sctp-xx param=0x8008 len=0xNNNN Byte | Byte | Byte
+ * | Byte Byte | Byte ...
+ *
+ * Where each byte is a chunk type extension supported. For example, to support
+ * all chunks one would have (in hex):
+ *
+ * 80 01 00 09 C0 C1 80 81 82 00 00 00
+ *
+ * Has the parameter. C0 = PR-SCTP (RFC3758) C1, 80 = ASCONF (addip draft) 81
+ * = Packet Drop 82 = Stream Reset 83 = Authentication
+ */
+#define SCTP_SUPPORTED_CHUNK_EXT 0x8008
+
+/*************0xC000 series*************/
+#define SCTP_PRSCTP_SUPPORTED 0xc000
+/* draft-ietf-tsvwg-addip-sctp */
+#define SCTP_ADD_IP_ADDRESS 0xc001
+#define SCTP_DEL_IP_ADDRESS 0xc002
+#define SCTP_ERROR_CAUSE_IND 0xc003
+#define SCTP_SET_PRIM_ADDR 0xc004
+#define SCTP_SUCCESS_REPORT 0xc005
+#define SCTP_ULP_ADAPTATION 0xc006
+/* behave-nat-draft */
+#define SCTP_HAS_NAT_SUPPORT 0xc007
+#define SCTP_NAT_VTAGS 0xc008
+
+/* Notification error codes */
+#define SCTP_NOTIFY_DATAGRAM_UNSENT 0x0001
+#define SCTP_NOTIFY_DATAGRAM_SENT 0x0002
+#define SCTP_FAILED_THRESHOLD 0x0004
+#define SCTP_HEARTBEAT_SUCCESS 0x0008
+#define SCTP_RESPONSE_TO_USER_REQ 0x0010
+#define SCTP_INTERNAL_ERROR 0x0020
+#define SCTP_SHUTDOWN_GUARD_EXPIRES 0x0040
+#define SCTP_RECEIVED_SACK 0x0080
+#define SCTP_PEER_FAULTY 0x0100
+#define SCTP_ICMP_REFUSED 0x0200
+
+/* bits for TOS field */
+#define SCTP_ECT0_BIT 0x02
+#define SCTP_ECT1_BIT 0x01
+#define SCTP_CE_BITS 0x03
+
+/* below turns off above */
+#define SCTP_FLEXIBLE_ADDRESS 0x20
+#define SCTP_NO_HEARTBEAT 0x40
+
+/* mask to get sticky */
+#define SCTP_STICKY_OPTIONS_MASK 0x0c
+
+
+/*
+ * SCTP states for internal state machine XXX (should match "user" values)
+ */
+#define SCTP_STATE_EMPTY 0x0000
+#define SCTP_STATE_INUSE 0x0001
+#define SCTP_STATE_COOKIE_WAIT 0x0002
+#define SCTP_STATE_COOKIE_ECHOED 0x0004
+#define SCTP_STATE_OPEN 0x0008
+#define SCTP_STATE_SHUTDOWN_SENT 0x0010
+#define SCTP_STATE_SHUTDOWN_RECEIVED 0x0020
+#define SCTP_STATE_SHUTDOWN_ACK_SENT 0x0040
+#define SCTP_STATE_SHUTDOWN_PENDING 0x0080
+#define SCTP_STATE_CLOSED_SOCKET 0x0100
+#define SCTP_STATE_ABOUT_TO_BE_FREED 0x0200
+#define SCTP_STATE_PARTIAL_MSG_LEFT 0x0400
+#define SCTP_STATE_WAS_ABORTED 0x0800
+#define SCTP_STATE_IN_ACCEPT_QUEUE 0x1000
+#define SCTP_STATE_MASK 0x007f
+
+#define SCTP_GET_STATE(asoc) ((asoc)->state & SCTP_STATE_MASK)
+#define SCTP_SET_STATE(asoc, newstate) ((asoc)->state = ((asoc)->state & ~SCTP_STATE_MASK) | newstate)
+#define SCTP_CLEAR_SUBSTATE(asoc, substate) ((asoc)->state &= ~substate)
+#define SCTP_ADD_SUBSTATE(asoc, substate) ((asoc)->state |= substate)
+
+/* SCTP reachability state for each address */
+#define SCTP_ADDR_REACHABLE 0x001
+#define SCTP_ADDR_NOT_REACHABLE 0x002
+#define SCTP_ADDR_NOHB 0x004
+#define SCTP_ADDR_BEING_DELETED 0x008
+#define SCTP_ADDR_NOT_IN_ASSOC 0x010
+#define SCTP_ADDR_WAS_PRIMARY 0x020
+#define SCTP_ADDR_SWITCH_PRIMARY 0x040
+#define SCTP_ADDR_OUT_OF_SCOPE 0x080
+#define SCTP_ADDR_DOUBLE_SWITCH 0x100
+#define SCTP_ADDR_UNCONFIRMED 0x200
+#define SCTP_ADDR_REQ_PRIMARY 0x400
+/* JRS 5/13/07 - Added potentially failed state for CMT PF */
+#define SCTP_ADDR_PF 0x800
+#define SCTP_REACHABLE_MASK 0x203
+
+/* bound address types (e.g. valid address types to allow) */
+#define SCTP_BOUND_V6 0x01
+#define SCTP_BOUND_V4 0x02
+
+/*
+ * what is the default number of mbufs in a chain I allow before switching to
+ * a cluster
+ */
+#define SCTP_DEFAULT_MBUFS_IN_CHAIN 5
+
+/* How long a cookie lives in milli-seconds */
+#define SCTP_DEFAULT_COOKIE_LIFE 60000
+
+/* resource limit of streams */
+#define MAX_SCTP_STREAMS 2048
+
+/* Maximum the mapping array will grow to (TSN mapping array) */
+#define SCTP_MAPPING_ARRAY 512
+
+/* size of the inital malloc on the mapping array */
+#define SCTP_INITIAL_MAPPING_ARRAY 16
+/* how much we grow the mapping array each call */
+#define SCTP_MAPPING_ARRAY_INCR 32
+
+/*
+ * Here we define the timer types used by the implementation as arguments in
+ * the set/get timer type calls.
+ */
+#define SCTP_TIMER_INIT 0
+#define SCTP_TIMER_RECV 1
+#define SCTP_TIMER_SEND 2
+#define SCTP_TIMER_HEARTBEAT 3
+#define SCTP_TIMER_PMTU 4
+#define SCTP_TIMER_MAXSHUTDOWN 5
+#define SCTP_TIMER_SIGNATURE 6
+/*
+ * number of timer types in the base SCTP structure used in the set/get and
+ * has the base default.
+ */
+#define SCTP_NUM_TMRS 7
+
+/* timer types */
+#define SCTP_TIMER_TYPE_NONE 0
+#define SCTP_TIMER_TYPE_SEND 1
+#define SCTP_TIMER_TYPE_INIT 2
+#define SCTP_TIMER_TYPE_RECV 3
+#define SCTP_TIMER_TYPE_SHUTDOWN 4
+#define SCTP_TIMER_TYPE_HEARTBEAT 5
+#define SCTP_TIMER_TYPE_COOKIE 6
+#define SCTP_TIMER_TYPE_NEWCOOKIE 7
+#define SCTP_TIMER_TYPE_PATHMTURAISE 8
+#define SCTP_TIMER_TYPE_SHUTDOWNACK 9
+#define SCTP_TIMER_TYPE_ASCONF 10
+#define SCTP_TIMER_TYPE_SHUTDOWNGUARD 11
+#define SCTP_TIMER_TYPE_AUTOCLOSE 12
+#define SCTP_TIMER_TYPE_EVENTWAKE 13
+#define SCTP_TIMER_TYPE_STRRESET 14
+#define SCTP_TIMER_TYPE_INPKILL 15
+#define SCTP_TIMER_TYPE_EARLYFR 17
+#define SCTP_TIMER_TYPE_ASOCKILL 18
+#define SCTP_TIMER_TYPE_ADDR_WQ 19
+#define SCTP_TIMER_TYPE_ZERO_COPY 20
+#define SCTP_TIMER_TYPE_ZCOPY_SENDQ 21
+#define SCTP_TIMER_TYPE_PRIM_DELETED 22
+/* add new timers here - and increment LAST */
+#define SCTP_TIMER_TYPE_LAST 23
+
+#define SCTP_IS_TIMER_TYPE_VALID(t) (((t) > SCTP_TIMER_TYPE_NONE) && \
+ ((t) < SCTP_TIMER_TYPE_LAST))
+
+
+
+/* max number of TSN's dup'd that I will hold */
+#define SCTP_MAX_DUP_TSNS 20
+
+/*
+ * Here we define the types used when setting the retry amounts.
+ */
+/* How many drop re-attempts we make on INIT/COOKIE-ECHO */
+#define SCTP_RETRY_DROPPED_THRESH 4
+
+/*
+ * Maxmium number of chunks a single association can have on it. Note that
+ * this is a squishy number since the count can run over this if the user
+ * sends a large message down .. the fragmented chunks don't count until
+ * AFTER the message is on queue.. it would be the next send that blocks
+ * things. This number will get tuned up at boot in the sctp_init and use the
+ * number of clusters as a base. This way high bandwidth environments will
+ * not get impacted by the lower bandwidth sending a bunch of 1 byte chunks
+ */
+#define SCTP_ASOC_MAX_CHUNKS_ON_QUEUE 512
+
+
+/* The conversion from time to ticks and vice versa is done by rounding
+ * upwards. This way we can test in the code the time to be positive and
+ * know that this corresponds to a positive number of ticks.
+ */
+#define MSEC_TO_TICKS(x) ((hz == 1000) ? x : ((((x) * hz) + 999) / 1000))
+#define TICKS_TO_MSEC(x) ((hz == 1000) ? x : ((((x) * 1000) + (hz - 1)) / hz))
+
+#define SEC_TO_TICKS(x) ((x) * hz)
+#define TICKS_TO_SEC(x) (((x) + (hz - 1)) / hz)
+
+/*
+ * Basically the minimum amount of time before I do a early FR. Making this
+ * value to low will cause duplicate retransmissions.
+ */
+#define SCTP_MINFR_MSEC_TIMER 250
+/* The floor this value is allowed to fall to when starting a timer. */
+#define SCTP_MINFR_MSEC_FLOOR 20
+
+/* init timer def = 1 sec */
+#define SCTP_INIT_SEC 1
+
+/* send timer def = 1 seconds */
+#define SCTP_SEND_SEC 1
+
+/* recv timer def = 200ms */
+#define SCTP_RECV_MSEC 200
+
+/* 30 seconds + RTO (in ms) */
+#define SCTP_HB_DEFAULT_MSEC 30000
+
+/* Max time I will wait for Shutdown to complete */
+#define SCTP_DEF_MAX_SHUTDOWN_SEC 180
+
+
+/*
+ * This is how long a secret lives, NOT how long a cookie lives how many
+ * ticks the current secret will live.
+ */
+#define SCTP_DEFAULT_SECRET_LIFE_SEC 3600
+
+#define SCTP_RTO_UPPER_BOUND (60000) /* 60 sec in ms */
+#define SCTP_RTO_UPPER_BOUND_SEC 60 /* for the init timer */
+#define SCTP_RTO_LOWER_BOUND (1000) /* 1 sec in ms */
+#define SCTP_RTO_INITIAL (3000) /* 3 sec in ms */
+
+
+#define SCTP_INP_KILL_TIMEOUT 20/* number of ms to retry kill of inpcb */
+#define SCTP_ASOC_KILL_TIMEOUT 10 /* number of ms to retry kill of inpcb */
+
+#define SCTP_DEF_MAX_INIT 8
+#define SCTP_DEF_MAX_SEND 10
+#define SCTP_DEF_MAX_PATH_RTX 5
+
+#define SCTP_DEF_PMTU_RAISE_SEC 600 /* 10 min between raise attempts */
+
+
+/* How many streams I request initally by default */
+#define SCTP_OSTREAM_INITIAL 10
+
+/*
+ * How many smallest_mtu's need to increase before a window update sack is
+ * sent (should be a power of 2).
+ */
+/* Send window update (incr * this > hiwat). Should be a power of 2 */
+#define SCTP_MINIMAL_RWND (4096) /* minimal rwnd */
+
+#define SCTP_ADDRMAX 24
+
+/* SCTP DEBUG Switch parameters */
+#define SCTP_DEBUG_TIMER1 0x00000001
+#define SCTP_DEBUG_TIMER2 0x00000002 /* unused */
+#define SCTP_DEBUG_TIMER3 0x00000004 /* unused */
+#define SCTP_DEBUG_TIMER4 0x00000008
+#define SCTP_DEBUG_OUTPUT1 0x00000010
+#define SCTP_DEBUG_OUTPUT2 0x00000020
+#define SCTP_DEBUG_OUTPUT3 0x00000040
+#define SCTP_DEBUG_OUTPUT4 0x00000080
+#define SCTP_DEBUG_UTIL1 0x00000100
+#define SCTP_DEBUG_UTIL2 0x00000200 /* unused */
+#define SCTP_DEBUG_AUTH1 0x00000400
+#define SCTP_DEBUG_AUTH2 0x00000800 /* unused */
+#define SCTP_DEBUG_INPUT1 0x00001000
+#define SCTP_DEBUG_INPUT2 0x00002000
+#define SCTP_DEBUG_INPUT3 0x00004000
+#define SCTP_DEBUG_INPUT4 0x00008000 /* unused */
+#define SCTP_DEBUG_ASCONF1 0x00010000
+#define SCTP_DEBUG_ASCONF2 0x00020000
+#define SCTP_DEBUG_OUTPUT5 0x00040000 /* unused */
+#define SCTP_DEBUG_XXX 0x00080000 /* unused */
+#define SCTP_DEBUG_PCB1 0x00100000
+#define SCTP_DEBUG_PCB2 0x00200000 /* unused */
+#define SCTP_DEBUG_PCB3 0x00400000
+#define SCTP_DEBUG_PCB4 0x00800000
+#define SCTP_DEBUG_INDATA1 0x01000000
+#define SCTP_DEBUG_INDATA2 0x02000000 /* unused */
+#define SCTP_DEBUG_INDATA3 0x04000000 /* unused */
+#define SCTP_DEBUG_CRCOFFLOAD 0x08000000 /* unused */
+#define SCTP_DEBUG_USRREQ1 0x10000000 /* unused */
+#define SCTP_DEBUG_USRREQ2 0x20000000 /* unused */
+#define SCTP_DEBUG_PEEL1 0x40000000
+#define SCTP_DEBUG_XXXXX 0x80000000 /* unused */
+#define SCTP_DEBUG_ALL 0x7ff3ffff
+#define SCTP_DEBUG_NOISY 0x00040000
+
+/* What sender needs to see to avoid SWS or we consider peers rwnd 0 */
+#define SCTP_SWS_SENDER_DEF 1420
+
+/*
+ * SWS is scaled to the sb_hiwat of the socket. A value of 2 is hiwat/4, 1
+ * would be hiwat/2 etc.
+ */
+/* What receiver needs to see in sockbuf or we tell peer its 1 */
+#define SCTP_SWS_RECEIVER_DEF 3000
+
+#define SCTP_INITIAL_CWND 4380
+
+#define SCTP_DEFAULT_MTU 1500 /* emergency default MTU */
+/* amount peer is obligated to have in rwnd or I will abort */
+#define SCTP_MIN_RWND 1500
+
+#define SCTP_DEFAULT_MAXSEGMENT 65535
+
+#define SCTP_CHUNK_BUFFER_SIZE 512
+#define SCTP_PARAM_BUFFER_SIZE 512
+
+/* small chunk store for looking at chunk_list in auth */
+#define SCTP_SMALL_CHUNK_STORE 260
+
+#define SCTP_DEFAULT_MINSEGMENT 512 /* MTU size ... if no mtu disc */
+#define SCTP_HOW_MANY_SECRETS 2 /* how many secrets I keep */
+
+#define SCTP_NUMBER_OF_SECRETS 8 /* or 8 * 4 = 32 octets */
+#define SCTP_SECRET_SIZE 32 /* number of octets in a 256 bits */
+
+
+/*
+ * SCTP upper layer notifications
+ */
+#define SCTP_NOTIFY_ASSOC_UP 1
+#define SCTP_NOTIFY_ASSOC_DOWN 2
+#define SCTP_NOTIFY_INTERFACE_DOWN 3
+#define SCTP_NOTIFY_INTERFACE_UP 4
+#define SCTP_NOTIFY_DG_FAIL 5
+#define SCTP_NOTIFY_STRDATA_ERR 6
+#define SCTP_NOTIFY_ASSOC_ABORTED 7
+#define SCTP_NOTIFY_PEER_OPENED_STREAM 8
+#define SCTP_NOTIFY_STREAM_OPENED_OK 9
+#define SCTP_NOTIFY_ASSOC_RESTART 10
+#define SCTP_NOTIFY_HB_RESP 11
+#define SCTP_NOTIFY_ASCONF_SUCCESS 12
+#define SCTP_NOTIFY_ASCONF_FAILED 13
+#define SCTP_NOTIFY_PEER_SHUTDOWN 14
+#define SCTP_NOTIFY_ASCONF_ADD_IP 15
+#define SCTP_NOTIFY_ASCONF_DELETE_IP 16
+#define SCTP_NOTIFY_ASCONF_SET_PRIMARY 17
+#define SCTP_NOTIFY_PARTIAL_DELVIERY_INDICATION 18
+#define SCTP_NOTIFY_INTERFACE_CONFIRMED 20
+#define SCTP_NOTIFY_STR_RESET_RECV 21
+#define SCTP_NOTIFY_STR_RESET_SEND 22
+#define SCTP_NOTIFY_STR_RESET_FAILED_OUT 23
+#define SCTP_NOTIFY_STR_RESET_FAILED_IN 24
+#define SCTP_NOTIFY_AUTH_NEW_KEY 25
+#define SCTP_NOTIFY_AUTH_FREE_KEY 26
+#define SCTP_NOTIFY_SPECIAL_SP_FAIL 27
+#define SCTP_NOTIFY_NO_PEER_AUTH 28
+#define SCTP_NOTIFY_SENDER_DRY 29
+#define SCTP_NOTIFY_STR_RESET_ADD_OK 30
+#define SCTP_NOTIFY_STR_RESET_ADD_FAIL 31
+#define SCTP_NOTIFY_STR_RESET_INSTREAM_ADD_OK 32
+#define SCTP_NOTIFY_MAX 32
+
+
+/* This is the value for messages that are NOT completely
+ * copied down where we will start to split the message.
+ * So, with our default, we split only if the piece we
+ * want to take will fill up a full MTU (assuming
+ * a 1500 byte MTU).
+ */
+#define SCTP_DEFAULT_SPLIT_POINT_MIN 2904
+
+/* ABORT CODES and other tell-tale location
+ * codes are generated by adding the below
+ * to the instance id.
+ */
+
+/* File defines */
+#define SCTP_FROM_SCTP_INPUT 0x10000000
+#define SCTP_FROM_SCTP_PCB 0x20000000
+#define SCTP_FROM_SCTP_INDATA 0x30000000
+#define SCTP_FROM_SCTP_TIMER 0x40000000
+#define SCTP_FROM_SCTP_USRREQ 0x50000000
+#define SCTP_FROM_SCTPUTIL 0x60000000
+#define SCTP_FROM_SCTP6_USRREQ 0x70000000
+#define SCTP_FROM_SCTP_ASCONF 0x80000000
+#define SCTP_FROM_SCTP_OUTPUT 0x90000000
+#define SCTP_FROM_SCTP_PEELOFF 0xa0000000
+#define SCTP_FROM_SCTP_PANDA 0xb0000000
+#define SCTP_FROM_SCTP_SYSCTL 0xc0000000
+
+/* Location ID's */
+#define SCTP_LOC_1 0x00000001
+#define SCTP_LOC_2 0x00000002
+#define SCTP_LOC_3 0x00000003
+#define SCTP_LOC_4 0x00000004
+#define SCTP_LOC_5 0x00000005
+#define SCTP_LOC_6 0x00000006
+#define SCTP_LOC_7 0x00000007
+#define SCTP_LOC_8 0x00000008
+#define SCTP_LOC_9 0x00000009
+#define SCTP_LOC_10 0x0000000a
+#define SCTP_LOC_11 0x0000000b
+#define SCTP_LOC_12 0x0000000c
+#define SCTP_LOC_13 0x0000000d
+#define SCTP_LOC_14 0x0000000e
+#define SCTP_LOC_15 0x0000000f
+#define SCTP_LOC_16 0x00000010
+#define SCTP_LOC_17 0x00000011
+#define SCTP_LOC_18 0x00000012
+#define SCTP_LOC_19 0x00000013
+#define SCTP_LOC_20 0x00000014
+#define SCTP_LOC_21 0x00000015
+#define SCTP_LOC_22 0x00000016
+#define SCTP_LOC_23 0x00000017
+#define SCTP_LOC_24 0x00000018
+#define SCTP_LOC_25 0x00000019
+#define SCTP_LOC_26 0x0000001a
+#define SCTP_LOC_27 0x0000001b
+#define SCTP_LOC_28 0x0000001c
+#define SCTP_LOC_29 0x0000001d
+#define SCTP_LOC_30 0x0000001e
+#define SCTP_LOC_31 0x0000001f
+#define SCTP_LOC_32 0x00000020
+#define SCTP_LOC_33 0x00000021
+
+
+/* Free assoc codes */
+#define SCTP_NORMAL_PROC 0
+#define SCTP_PCBFREE_NOFORCE 1
+#define SCTP_PCBFREE_FORCE 2
+
+/* From codes for adding addresses */
+#define SCTP_ADDR_IS_CONFIRMED 8
+#define SCTP_ADDR_DYNAMIC_ADDED 6
+#define SCTP_IN_COOKIE_PROC 100
+#define SCTP_ALLOC_ASOC 1
+#define SCTP_LOAD_ADDR_2 2
+#define SCTP_LOAD_ADDR_3 3
+#define SCTP_LOAD_ADDR_4 4
+#define SCTP_LOAD_ADDR_5 5
+
+#define SCTP_DONOT_SETSCOPE 0
+#define SCTP_DO_SETSCOPE 1
+
+
+/* This value determines the default for when
+ * we try to add more on the send queue., if
+ * there is room. This prevents us from cycling
+ * into the copy_resume routine to often if
+ * we have not got enough space to add a decent
+ * enough size message. Note that if we have enough
+ * space to complete the message copy we will always
+ * add to the message, no matter what the size. Its
+ * only when we reach the point that we have some left
+ * to add, there is only room for part of it that we
+ * will use this threshold. Its also a sysctl.
+ */
+#define SCTP_DEFAULT_ADD_MORE 1452
+
+#ifndef SCTP_PCBHASHSIZE
+/* default number of association hash buckets in each endpoint */
+#define SCTP_PCBHASHSIZE 256
+#endif
+#ifndef SCTP_TCBHASHSIZE
+#define SCTP_TCBHASHSIZE 1024
+#endif
+
+#ifndef SCTP_CHUNKQUEUE_SCALE
+#define SCTP_CHUNKQUEUE_SCALE 10
+#endif
+
+/* clock variance is 1 ms */
+#define SCTP_CLOCK_GRANULARITY 1
+#define IP_HDR_SIZE 40 /* we use the size of a IP6 header here this
+ * detracts a small amount for ipv4 but it
+ * simplifies the ipv6 addition */
+
+/* Argument magic number for sctp_inpcb_free() */
+
+/* third argument */
+#define SCTP_CALLED_DIRECTLY_NOCMPSET 0
+#define SCTP_CALLED_AFTER_CMPSET_OFCLOSE 1
+#define SCTP_CALLED_FROM_INPKILL_TIMER 2
+/* second argument */
+#define SCTP_FREE_SHOULD_USE_ABORT 1
+#define SCTP_FREE_SHOULD_USE_GRACEFUL_CLOSE 0
+
+#ifndef IPPROTO_SCTP
+#define IPPROTO_SCTP 132 /* the Official IANA number :-) */
+#endif /* !IPPROTO_SCTP */
+
+#define SCTP_MAX_DATA_BUNDLING 256
+
+/* modular comparison */
+/* True if a > b (mod = M) */
+#define compare_with_wrap(a, b, M) (((a > b) && ((a - b) < ((M >> 1) + 1))) || \
+ ((b > a) && ((b - a) > ((M >> 1) + 1))))
+
+
+/* Mapping array manipulation routines */
+#define SCTP_IS_TSN_PRESENT(arry, gap) ((arry[(gap >> 3)] >> (gap & 0x07)) & 0x01)
+#define SCTP_SET_TSN_PRESENT(arry, gap) (arry[(gap >> 3)] |= (0x01 << ((gap & 0x07))))
+#define SCTP_UNSET_TSN_PRESENT(arry, gap) (arry[(gap >> 3)] &= ((~(0x01 << ((gap & 0x07)))) & 0xff))
+#define SCTP_CALC_TSN_TO_GAP(gap, tsn, mapping_tsn) do { \
+ if (tsn >= mapping_tsn) { \
+ gap = tsn - mapping_tsn; \
+ } else { \
+ gap = (MAX_TSN - mapping_tsn) + tsn + 1; \
+ } \
+ } while(0)
+
+
+#define SCTP_RETRAN_DONE -1
+#define SCTP_RETRAN_EXIT -2
+
+/*
+ * This value defines the number of vtag block time wait entry's per list
+ * element. Each entry will take 2 4 byte ints (and of course the overhead
+ * of the next pointer as well). Using 15 as an example will yield * ((8 *
+ * 15) + 8) or 128 bytes of overhead for each timewait block that gets
+ * initialized. Increasing it to 31 would yeild 256 bytes per block.
+ */
+#define SCTP_NUMBER_IN_VTAG_BLOCK 15
+/*
+ * If we use the STACK option, we have an array of this size head pointers.
+ * This array is mod'd the with the size to find which bucket and then all
+ * entries must be searched to see if the tag is in timed wait. If so we
+ * reject it.
+ */
+#define SCTP_STACK_VTAG_HASH_SIZE 32
+
+/*
+ * Number of seconds of time wait for a vtag.
+ */
+#define SCTP_TIME_WAIT 60
+
+#define SCTP_SEND_BUFFER_SPLITTING 0x00000001
+#define SCTP_RECV_BUFFER_SPLITTING 0x00000002
+
+/* The system retains a cache of free chunks such to
+ * cut down on calls the memory allocation system. There
+ * is a per association limit of free items and a overall
+ * system limit. If either one gets hit then the resource
+ * stops being cached.
+ */
+
+#define SCTP_DEF_ASOC_RESC_LIMIT 10
+#define SCTP_DEF_SYSTEM_RESC_LIMIT 1000
+
+/*-
+ * defines for socket lock states.
+ * Used by __APPLE__ and SCTP_SO_LOCK_TESTING
+ */
+#define SCTP_SO_LOCKED 1
+#define SCTP_SO_NOT_LOCKED 0
+
+
+#define SCTP_HOLDS_LOCK 1
+#define SCTP_NOT_LOCKED 0
+
+/*-
+ * For address locks, do we hold the lock?
+ */
+#define SCTP_ADDR_LOCKED 1
+#define SCTP_ADDR_NOT_LOCKED 0
+
+#define IN4_ISPRIVATE_ADDRESS(a) \
+ ((((uint8_t *)&(a)->s_addr)[0] == 10) || \
+ ((((uint8_t *)&(a)->s_addr)[0] == 172) && \
+ (((uint8_t *)&(a)->s_addr)[1] >= 16) && \
+ (((uint8_t *)&(a)->s_addr)[1] <= 32)) || \
+ ((((uint8_t *)&(a)->s_addr)[0] == 192) && \
+ (((uint8_t *)&(a)->s_addr)[1] == 168)))
+
+#define IN4_ISLOOPBACK_ADDRESS(a) \
+ ((((uint8_t *)&(a)->s_addr)[0] == 127) && \
+ (((uint8_t *)&(a)->s_addr)[1] == 0) && \
+ (((uint8_t *)&(a)->s_addr)[2] == 0) && \
+ (((uint8_t *)&(a)->s_addr)[3] == 1))
+
+
+#if defined(_KERNEL)
+
+#define SCTP_GETTIME_TIMEVAL(x) (getmicrouptime(x))
+#define SCTP_GETPTIME_TIMEVAL(x) (microuptime(x))
+#endif
+/*#if defined(__FreeBSD__) || defined(__APPLE__)*/
+/*#define SCTP_GETTIME_TIMEVAL(x) { \*/
+/* (x)->tv_sec = ticks / 1000; \*/
+/* (x)->tv_usec = (ticks % 1000) * 1000; \*/
+/*}*/
+
+/*#else*/
+/*#define SCTP_GETTIME_TIMEVAL(x) (microtime(x))*/
+/*#endif __FreeBSD__ */
+
+#if defined(_KERNEL) || defined(__Userspace__)
+#define sctp_sowwakeup(inp, so) \
+do { \
+ if (inp->sctp_flags & SCTP_PCB_FLAGS_DONT_WAKE) { \
+ inp->sctp_flags |= SCTP_PCB_FLAGS_WAKEOUTPUT; \
+ } else { \
+ sowwakeup(so); \
+ } \
+} while (0)
+
+#define sctp_sowwakeup_locked(inp, so) \
+do { \
+ if (inp->sctp_flags & SCTP_PCB_FLAGS_DONT_WAKE) { \
+ SOCKBUF_UNLOCK(&((so)->so_snd)); \
+ inp->sctp_flags |= SCTP_PCB_FLAGS_WAKEOUTPUT; \
+ } else { \
+ sowwakeup_locked(so); \
+ } \
+} while (0)
+
+#define sctp_sorwakeup(inp, so) \
+do { \
+ if (inp->sctp_flags & SCTP_PCB_FLAGS_DONT_WAKE) { \
+ inp->sctp_flags |= SCTP_PCB_FLAGS_WAKEINPUT; \
+ } else { \
+ sorwakeup(so); \
+ } \
+} while (0)
+
+#define sctp_sorwakeup_locked(inp, so) \
+do { \
+ if (inp->sctp_flags & SCTP_PCB_FLAGS_DONT_WAKE) { \
+ inp->sctp_flags |= SCTP_PCB_FLAGS_WAKEINPUT; \
+ SOCKBUF_UNLOCK(&((so)->so_rcv)); \
+ } else { \
+ sorwakeup_locked(so); \
+ } \
+} while (0)
+
+#endif /* _KERNEL || __Userspace__ */
+#endif
diff --git a/rtems/freebsd/netinet/sctp_crc32.c b/rtems/freebsd/netinet/sctp_crc32.c
new file mode 100644
index 00000000..a69d755f
--- /dev/null
+++ b/rtems/freebsd/netinet/sctp_crc32.c
@@ -0,0 +1,148 @@
+#include <rtems/freebsd/machine/rtems-bsd-config.h>
+
+/*-
+ * Copyright (c) 2001-2007, by Cisco Systems, Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * a) Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * b) Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the distribution.
+ *
+ * c) Neither the name of Cisco Systems, Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/* $KAME: sctp_crc32.c,v 1.12 2005/03/06 16:04:17 itojun Exp $ */
+
+
+#include <rtems/freebsd/sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <rtems/freebsd/netinet/sctp_os.h>
+#include <rtems/freebsd/netinet/sctp.h>
+#include <rtems/freebsd/netinet/sctp_crc32.h>
+#include <rtems/freebsd/netinet/sctp_pcb.h>
+
+
+#if !defined(SCTP_WITH_NO_CSUM)
+
+static uint32_t
+sctp_finalize_crc32c(uint32_t crc32c)
+{
+ uint32_t result;
+
+#if BYTE_ORDER == BIG_ENDIAN
+ uint8_t byte0, byte1, byte2, byte3;
+
+#endif
+ /* Complement the result */
+ result = ~crc32c;
+#if BYTE_ORDER == BIG_ENDIAN
+ /*
+ * For BIG-ENDIAN.. aka Motorola byte order the result is in
+ * little-endian form. So we must manually swap the bytes. Then we
+ * can call htonl() which does nothing...
+ */
+ byte0 = result & 0x000000ff;
+ byte1 = (result >> 8) & 0x000000ff;
+ byte2 = (result >> 16) & 0x000000ff;
+ byte3 = (result >> 24) & 0x000000ff;
+ crc32c = ((byte0 << 24) | (byte1 << 16) | (byte2 << 8) | byte3);
+#else
+ /*
+ * For INTEL platforms the result comes out in network order. No
+ * htonl is required or the swap above. So we optimize out both the
+ * htonl and the manual swap above.
+ */
+ crc32c = result;
+#endif
+ return (crc32c);
+}
+
+uint32_t
+sctp_calculate_cksum(struct mbuf *m, uint32_t offset)
+{
+ /*
+ * given a mbuf chain with a packetheader offset by 'offset'
+ * pointing at a sctphdr (with csum set to 0) go through the chain
+ * of SCTP_BUF_NEXT()'s and calculate the SCTP checksum. This also
+ * has a side bonus as it will calculate the total length of the
+ * mbuf chain. Note: if offset is greater than the total mbuf
+ * length, checksum=1, pktlen=0 is returned (ie. no real error code)
+ */
+ uint32_t base = 0xffffffff;
+ struct mbuf *at;
+
+ at = m;
+ /* find the correct mbuf and offset into mbuf */
+ while ((at != NULL) && (offset > (uint32_t) SCTP_BUF_LEN(at))) {
+ offset -= SCTP_BUF_LEN(at); /* update remaining offset
+ * left */
+ at = SCTP_BUF_NEXT(at);
+ }
+ while (at != NULL) {
+ if ((SCTP_BUF_LEN(at) - offset) > 0) {
+ base = calculate_crc32c(base,
+ (unsigned char *)(SCTP_BUF_AT(at, offset)),
+ (unsigned int)(SCTP_BUF_LEN(at) - offset));
+ }
+ if (offset) {
+ /* we only offset once into the first mbuf */
+ if (offset < (uint32_t) SCTP_BUF_LEN(at))
+ offset = 0;
+ else
+ offset -= SCTP_BUF_LEN(at);
+ }
+ at = SCTP_BUF_NEXT(at);
+ }
+ base = sctp_finalize_crc32c(base);
+ return (base);
+}
+
+#endif /* !defined(SCTP_WITH_NO_CSUM) */
+
+
+void
+sctp_delayed_cksum(struct mbuf *m, uint32_t offset)
+{
+#if defined(SCTP_WITH_NO_CSUM)
+ panic("sctp_delayed_cksum() called when using no SCTP CRC.");
+#else
+ uint32_t checksum;
+
+ checksum = sctp_calculate_cksum(m, offset);
+ SCTP_STAT_DECR(sctps_sendhwcrc);
+ SCTP_STAT_INCR(sctps_sendswcrc);
+ offset += offsetof(struct sctphdr, checksum);
+
+ if (offset + sizeof(uint32_t) > (uint32_t) (m->m_len)) {
+ printf("sctp_delayed_cksum(): m->len: %d, off: %d.\n",
+ (uint32_t) m->m_len, offset);
+ /*
+ * XXX this shouldn't happen, but if it does, the correct
+ * behavior may be to insert the checksum in the appropriate
+ * next mbuf in the chain.
+ */
+ return;
+ }
+ *(uint32_t *) (m->m_data + offset) = checksum;
+#endif
+}
diff --git a/rtems/freebsd/netinet/sctp_crc32.h b/rtems/freebsd/netinet/sctp_crc32.h
new file mode 100644
index 00000000..31727d86
--- /dev/null
+++ b/rtems/freebsd/netinet/sctp_crc32.h
@@ -0,0 +1,47 @@
+/*-
+ * Copyright (c) 2001-2007, by Cisco Systems, Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * a) Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * b) Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the distribution.
+ *
+ * c) Neither the name of Cisco Systems, Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/* $KAME: sctp_crc32.h,v 1.5 2004/08/17 04:06:16 itojun Exp $ */
+
+#include <rtems/freebsd/sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#ifndef __crc32c_h__
+#define __crc32c_h__
+
+#if defined(_KERNEL)
+#if !defined(SCTP_WITH_NO_CSUM)
+uint32_t sctp_calculate_cksum(struct mbuf *, uint32_t);
+
+#endif
+void sctp_delayed_cksum(struct mbuf *, uint32_t offset);
+
+#endif /* _KERNEL */
+#endif /* __crc32c_h__ */
diff --git a/rtems/freebsd/netinet/sctp_header.h b/rtems/freebsd/netinet/sctp_header.h
new file mode 100644
index 00000000..279deb4b
--- /dev/null
+++ b/rtems/freebsd/netinet/sctp_header.h
@@ -0,0 +1,624 @@
+/*-
+ * Copyright (c) 2001-2007, by Cisco Systems, Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * a) Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * b) Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the distribution.
+ *
+ * c) Neither the name of Cisco Systems, Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/* $KAME: sctp_header.h,v 1.14 2005/03/06 16:04:17 itojun Exp $ */
+
+#include <rtems/freebsd/sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#ifndef __sctp_header_h__
+#define __sctp_header_h__
+
+#include <rtems/freebsd/sys/time.h>
+#include <rtems/freebsd/netinet/sctp.h>
+#include <rtems/freebsd/netinet/sctp_constants.h>
+
+#define SCTP_PACKED __attribute__((packed))
+
+/*
+ * Parameter structures
+ */
+struct sctp_ipv4addr_param {
+ struct sctp_paramhdr ph;/* type=SCTP_IPV4_PARAM_TYPE, len=8 */
+ uint32_t addr; /* IPV4 address */
+} SCTP_PACKED;
+
+#define SCTP_V6_ADDR_BYTES 16
+
+
+struct sctp_ipv6addr_param {
+ struct sctp_paramhdr ph;/* type=SCTP_IPV6_PARAM_TYPE, len=20 */
+ uint8_t addr[SCTP_V6_ADDR_BYTES]; /* IPV6 address */
+} SCTP_PACKED;
+
+/* Cookie Preservative */
+struct sctp_cookie_perserve_param {
+ struct sctp_paramhdr ph;/* type=SCTP_COOKIE_PRESERVE, len=8 */
+ uint32_t time; /* time in ms to extend cookie */
+} SCTP_PACKED;
+
+#define SCTP_ARRAY_MIN_LEN 1
+/* Host Name Address */
+struct sctp_host_name_param {
+ struct sctp_paramhdr ph;/* type=SCTP_HOSTNAME_ADDRESS */
+ char name[SCTP_ARRAY_MIN_LEN]; /* host name */
+} SCTP_PACKED;
+
+/*
+ * This is the maximum padded size of a s-a-p
+ * so paramheadr + 3 address types (6 bytes) + 2 byte pad = 12
+ */
+#define SCTP_MAX_ADDR_PARAMS_SIZE 12
+/* supported address type */
+struct sctp_supported_addr_param {
+ struct sctp_paramhdr ph;/* type=SCTP_SUPPORTED_ADDRTYPE */
+ uint16_t addr_type[SCTP_ARRAY_MIN_LEN]; /* array of supported address
+ * types */
+} SCTP_PACKED;
+
+/* ECN parameter */
+struct sctp_ecn_supported_param {
+ struct sctp_paramhdr ph;/* type=SCTP_ECN_CAPABLE */
+} SCTP_PACKED;
+
+
+/* heartbeat info parameter */
+struct sctp_heartbeat_info_param {
+ struct sctp_paramhdr ph;
+ uint32_t time_value_1;
+ uint32_t time_value_2;
+ uint32_t random_value1;
+ uint32_t random_value2;
+ uint16_t user_req;
+ uint8_t addr_family;
+ uint8_t addr_len;
+ char address[SCTP_ADDRMAX];
+} SCTP_PACKED;
+
+
+/* draft-ietf-tsvwg-prsctp */
+/* PR-SCTP supported parameter */
+struct sctp_prsctp_supported_param {
+ struct sctp_paramhdr ph;
+} SCTP_PACKED;
+
+
+/* draft-ietf-tsvwg-addip-sctp */
+struct sctp_asconf_paramhdr { /* an ASCONF "parameter" */
+ struct sctp_paramhdr ph;/* a SCTP parameter header */
+ uint32_t correlation_id;/* correlation id for this param */
+} SCTP_PACKED;
+
+struct sctp_asconf_addr_param { /* an ASCONF address parameter */
+ struct sctp_asconf_paramhdr aph; /* asconf "parameter" */
+ struct sctp_ipv6addr_param addrp; /* max storage size */
+} SCTP_PACKED;
+
+
+struct sctp_asconf_tag_param { /* an ASCONF NAT-Vtag parameter */
+ struct sctp_asconf_paramhdr aph; /* asconf "parameter" */
+ uint32_t local_vtag;
+ uint32_t remote_vtag;
+} SCTP_PACKED;
+
+
+struct sctp_asconf_addrv4_param { /* an ASCONF address (v4) parameter */
+ struct sctp_asconf_paramhdr aph; /* asconf "parameter" */
+ struct sctp_ipv4addr_param addrp; /* max storage size */
+} SCTP_PACKED;
+
+#define SCTP_MAX_SUPPORTED_EXT 256
+
+struct sctp_supported_chunk_types_param {
+ struct sctp_paramhdr ph;/* type = 0x8008 len = x */
+ uint8_t chunk_types[];
+} SCTP_PACKED;
+
+
+/* ECN Nonce: draft-ladha-sctp-ecn-nonce */
+struct sctp_ecn_nonce_supported_param {
+ struct sctp_paramhdr ph;/* type = 0x8001 len = 4 */
+} SCTP_PACKED;
+
+
+/*
+ * Structures for DATA chunks
+ */
+struct sctp_data {
+ uint32_t tsn;
+ uint16_t stream_id;
+ uint16_t stream_sequence;
+ uint32_t protocol_id;
+ /* user data follows */
+} SCTP_PACKED;
+
+struct sctp_data_chunk {
+ struct sctp_chunkhdr ch;
+ struct sctp_data dp;
+} SCTP_PACKED;
+
+/*
+ * Structures for the control chunks
+ */
+
+/* Initiate (INIT)/Initiate Ack (INIT ACK) */
+struct sctp_init {
+ uint32_t initiate_tag; /* initiate tag */
+ uint32_t a_rwnd; /* a_rwnd */
+ uint16_t num_outbound_streams; /* OS */
+ uint16_t num_inbound_streams; /* MIS */
+ uint32_t initial_tsn; /* I-TSN */
+ /* optional param's follow */
+} SCTP_PACKED;
+
+#define SCTP_IDENTIFICATION_SIZE 16
+#define SCTP_ADDRESS_SIZE 4
+#define SCTP_RESERVE_SPACE 6
+/* state cookie header */
+struct sctp_state_cookie { /* this is our definition... */
+ uint8_t identification[SCTP_IDENTIFICATION_SIZE]; /* id of who we are */
+ struct timeval time_entered; /* the time I built cookie */
+ uint32_t cookie_life; /* life I will award this cookie */
+ uint32_t tie_tag_my_vtag; /* my tag in old association */
+
+ uint32_t tie_tag_peer_vtag; /* peers tag in old association */
+ uint32_t peers_vtag; /* peers tag in INIT (for quick ref) */
+
+ uint32_t my_vtag; /* my tag in INIT-ACK (for quick ref) */
+ uint32_t address[SCTP_ADDRESS_SIZE]; /* 4 ints/128 bits */
+ uint32_t addr_type; /* address type */
+ uint32_t laddress[SCTP_ADDRESS_SIZE]; /* my local from address */
+ uint32_t laddr_type; /* my local from address type */
+ uint32_t scope_id; /* v6 scope id for link-locals */
+
+ uint16_t peerport; /* port address of the peer in the INIT */
+ uint16_t myport; /* my port address used in the INIT */
+ uint8_t ipv4_addr_legal;/* Are V4 addr legal? */
+ uint8_t ipv6_addr_legal;/* Are V6 addr legal? */
+ uint8_t local_scope; /* IPv6 local scope flag */
+ uint8_t site_scope; /* IPv6 site scope flag */
+
+ uint8_t ipv4_scope; /* IPv4 private addr scope */
+ uint8_t loopback_scope; /* loopback scope information */
+ uint8_t reserved[SCTP_RESERVE_SPACE]; /* Align to 64 bits */
+ /*
+ * at the end is tacked on the INIT chunk and the INIT-ACK chunk
+ * (minus the cookie).
+ */
+} SCTP_PACKED;
+
+
+/* Used for NAT state error cause */
+struct sctp_missing_nat_state {
+ uint16_t cause;
+ uint16_t length;
+ uint8_t data[];
+} SCTP_PACKED;
+
+
+struct sctp_inv_mandatory_param {
+ uint16_t cause;
+ uint16_t length;
+ uint32_t num_param;
+ uint16_t param;
+ /*
+ * We include this to 0 it since only a missing cookie will cause
+ * this error.
+ */
+ uint16_t resv;
+} SCTP_PACKED;
+
+struct sctp_unresolv_addr {
+ uint16_t cause;
+ uint16_t length;
+ uint16_t addr_type;
+ uint16_t reserved; /* Only one invalid addr type */
+} SCTP_PACKED;
+
+/* state cookie parameter */
+struct sctp_state_cookie_param {
+ struct sctp_paramhdr ph;
+ struct sctp_state_cookie cookie;
+} SCTP_PACKED;
+
+struct sctp_init_chunk {
+ struct sctp_chunkhdr ch;
+ struct sctp_init init;
+} SCTP_PACKED;
+
+struct sctp_init_msg {
+ struct sctphdr sh;
+ struct sctp_init_chunk msg;
+} SCTP_PACKED;
+
+/* ... used for both INIT and INIT ACK */
+#define sctp_init_ack sctp_init
+#define sctp_init_ack_chunk sctp_init_chunk
+#define sctp_init_ack_msg sctp_init_msg
+
+
+/* Selective Ack (SACK) */
+struct sctp_gap_ack_block {
+ uint16_t start; /* Gap Ack block start */
+ uint16_t end; /* Gap Ack block end */
+} SCTP_PACKED;
+
+struct sctp_sack {
+ uint32_t cum_tsn_ack; /* cumulative TSN Ack */
+ uint32_t a_rwnd; /* updated a_rwnd of sender */
+ uint16_t num_gap_ack_blks; /* number of Gap Ack blocks */
+ uint16_t num_dup_tsns; /* number of duplicate TSNs */
+ /* struct sctp_gap_ack_block's follow */
+ /* uint32_t duplicate_tsn's follow */
+} SCTP_PACKED;
+
+struct sctp_sack_chunk {
+ struct sctp_chunkhdr ch;
+ struct sctp_sack sack;
+} SCTP_PACKED;
+
+struct sctp_nr_sack {
+ uint32_t cum_tsn_ack; /* cumulative TSN Ack */
+ uint32_t a_rwnd; /* updated a_rwnd of sender */
+ uint16_t num_gap_ack_blks; /* number of Gap Ack blocks */
+ uint16_t num_nr_gap_ack_blks; /* number of NR Gap Ack blocks */
+ uint16_t num_dup_tsns; /* number of duplicate TSNs */
+ uint16_t reserved; /* not currently used */
+ /* struct sctp_gap_ack_block's follow */
+ /* uint32_t duplicate_tsn's follow */
+} SCTP_PACKED;
+
+struct sctp_nr_sack_chunk {
+ struct sctp_chunkhdr ch;
+ struct sctp_nr_sack nr_sack;
+} SCTP_PACKED;
+
+
+/* Heartbeat Request (HEARTBEAT) */
+struct sctp_heartbeat {
+ struct sctp_heartbeat_info_param hb_info;
+} SCTP_PACKED;
+
+struct sctp_heartbeat_chunk {
+ struct sctp_chunkhdr ch;
+ struct sctp_heartbeat heartbeat;
+} SCTP_PACKED;
+
+/* ... used for Heartbeat Ack (HEARTBEAT ACK) */
+#define sctp_heartbeat_ack sctp_heartbeat
+#define sctp_heartbeat_ack_chunk sctp_heartbeat_chunk
+
+
+/* Abort Asssociation (ABORT) */
+struct sctp_abort_chunk {
+ struct sctp_chunkhdr ch;
+ /* optional error cause may follow */
+} SCTP_PACKED;
+
+struct sctp_abort_msg {
+ struct sctphdr sh;
+ struct sctp_abort_chunk msg;
+} SCTP_PACKED;
+
+
+/* Shutdown Association (SHUTDOWN) */
+struct sctp_shutdown_chunk {
+ struct sctp_chunkhdr ch;
+ uint32_t cumulative_tsn_ack;
+} SCTP_PACKED;
+
+
+/* Shutdown Acknowledgment (SHUTDOWN ACK) */
+struct sctp_shutdown_ack_chunk {
+ struct sctp_chunkhdr ch;
+} SCTP_PACKED;
+
+
+/* Operation Error (ERROR) */
+struct sctp_error_chunk {
+ struct sctp_chunkhdr ch;
+ /* optional error causes follow */
+} SCTP_PACKED;
+
+
+/* Cookie Echo (COOKIE ECHO) */
+struct sctp_cookie_echo_chunk {
+ struct sctp_chunkhdr ch;
+ struct sctp_state_cookie cookie;
+} SCTP_PACKED;
+
+/* Cookie Acknowledgment (COOKIE ACK) */
+struct sctp_cookie_ack_chunk {
+ struct sctp_chunkhdr ch;
+} SCTP_PACKED;
+
+/* Explicit Congestion Notification Echo (ECNE) */
+struct sctp_ecne_chunk {
+ struct sctp_chunkhdr ch;
+ uint32_t tsn;
+} SCTP_PACKED;
+
+/* Congestion Window Reduced (CWR) */
+struct sctp_cwr_chunk {
+ struct sctp_chunkhdr ch;
+ uint32_t tsn;
+} SCTP_PACKED;
+
+/* Shutdown Complete (SHUTDOWN COMPLETE) */
+struct sctp_shutdown_complete_chunk {
+ struct sctp_chunkhdr ch;
+} SCTP_PACKED;
+
+/* Oper error holding a stale cookie */
+struct sctp_stale_cookie_msg {
+ struct sctp_paramhdr ph;/* really an error cause */
+ uint32_t time_usec;
+} SCTP_PACKED;
+
+struct sctp_adaptation_layer_indication {
+ struct sctp_paramhdr ph;
+ uint32_t indication;
+} SCTP_PACKED;
+
+struct sctp_cookie_while_shutting_down {
+ struct sctphdr sh;
+ struct sctp_chunkhdr ch;
+ struct sctp_paramhdr ph;/* really an error cause */
+} SCTP_PACKED;
+
+struct sctp_shutdown_complete_msg {
+ struct sctphdr sh;
+ struct sctp_shutdown_complete_chunk shut_cmp;
+} SCTP_PACKED;
+
+/*
+ * draft-ietf-tsvwg-addip-sctp
+ */
+/* Address/Stream Configuration Change (ASCONF) */
+struct sctp_asconf_chunk {
+ struct sctp_chunkhdr ch;
+ uint32_t serial_number;
+ /* lookup address parameter (mandatory) */
+ /* asconf parameters follow */
+} SCTP_PACKED;
+
+/* Address/Stream Configuration Acknowledge (ASCONF ACK) */
+struct sctp_asconf_ack_chunk {
+ struct sctp_chunkhdr ch;
+ uint32_t serial_number;
+ /* asconf parameters follow */
+} SCTP_PACKED;
+
+/* draft-ietf-tsvwg-prsctp */
+/* Forward Cumulative TSN (FORWARD TSN) */
+struct sctp_forward_tsn_chunk {
+ struct sctp_chunkhdr ch;
+ uint32_t new_cumulative_tsn;
+ /* stream/sequence pairs (sctp_strseq) follow */
+} SCTP_PACKED;
+
+struct sctp_strseq {
+ uint16_t stream;
+ uint16_t sequence;
+} SCTP_PACKED;
+
+struct sctp_forward_tsn_msg {
+ struct sctphdr sh;
+ struct sctp_forward_tsn_chunk msg;
+} SCTP_PACKED;
+
+/* should be a multiple of 4 - 1 aka 3/7/11 etc. */
+
+#define SCTP_NUM_DB_TO_VERIFY 31
+
+struct sctp_chunk_desc {
+ uint8_t chunk_type;
+ uint8_t data_bytes[SCTP_NUM_DB_TO_VERIFY];
+ uint32_t tsn_ifany;
+} SCTP_PACKED;
+
+
+struct sctp_pktdrop_chunk {
+ struct sctp_chunkhdr ch;
+ uint32_t bottle_bw;
+ uint32_t current_onq;
+ uint16_t trunc_len;
+ uint16_t reserved;
+ uint8_t data[];
+} SCTP_PACKED;
+
+/**********STREAM RESET STUFF ******************/
+
+struct sctp_stream_reset_out_request {
+ struct sctp_paramhdr ph;
+ uint32_t request_seq; /* monotonically increasing seq no */
+ uint32_t response_seq; /* if a response, the resp seq no */
+ uint32_t send_reset_at_tsn; /* last TSN I assigned outbound */
+ uint16_t list_of_streams[]; /* if not all list of streams */
+} SCTP_PACKED;
+
+struct sctp_stream_reset_in_request {
+ struct sctp_paramhdr ph;
+ uint32_t request_seq;
+ uint16_t list_of_streams[]; /* if not all list of streams */
+} SCTP_PACKED;
+
+
+struct sctp_stream_reset_tsn_request {
+ struct sctp_paramhdr ph;
+ uint32_t request_seq;
+} SCTP_PACKED;
+
+struct sctp_stream_reset_response {
+ struct sctp_paramhdr ph;
+ uint32_t response_seq; /* if a response, the resp seq no */
+ uint32_t result;
+} SCTP_PACKED;
+
+struct sctp_stream_reset_response_tsn {
+ struct sctp_paramhdr ph;
+ uint32_t response_seq; /* if a response, the resp seq no */
+ uint32_t result;
+ uint32_t senders_next_tsn;
+ uint32_t receivers_next_tsn;
+} SCTP_PACKED;
+
+struct sctp_stream_reset_add_strm {
+ struct sctp_paramhdr ph;
+ uint32_t request_seq;
+ uint16_t number_of_streams;
+ uint16_t reserved;
+} SCTP_PACKED;
+
+#define SCTP_STREAM_RESET_NOTHING 0x00000000 /* Nothing for me to do */
+#define SCTP_STREAM_RESET_PERFORMED 0x00000001 /* Did it */
+#define SCTP_STREAM_RESET_DENIED 0x00000002 /* refused to do it */
+#define SCTP_STREAM_RESET_ERROR_STR 0x00000003 /* bad Stream no */
+#define SCTP_STREAM_RESET_TRY_LATER 0x00000004 /* collision, try again */
+#define SCTP_STREAM_RESET_BAD_SEQNO 0x00000005 /* bad str-reset seq no */
+
+/*
+ * convience structures, note that if you are making a request for specific
+ * streams then the request will need to be an overlay structure.
+ */
+
+struct sctp_stream_reset_out_req {
+ struct sctp_chunkhdr ch;
+ struct sctp_stream_reset_out_request sr_req;
+} SCTP_PACKED;
+
+struct sctp_stream_reset_in_req {
+ struct sctp_chunkhdr ch;
+ struct sctp_stream_reset_in_request sr_req;
+} SCTP_PACKED;
+
+struct sctp_stream_reset_tsn_req {
+ struct sctp_chunkhdr ch;
+ struct sctp_stream_reset_tsn_request sr_req;
+} SCTP_PACKED;
+
+struct sctp_stream_reset_resp {
+ struct sctp_chunkhdr ch;
+ struct sctp_stream_reset_response sr_resp;
+} SCTP_PACKED;
+
+/* respone only valid with a TSN request */
+struct sctp_stream_reset_resp_tsn {
+ struct sctp_chunkhdr ch;
+ struct sctp_stream_reset_response_tsn sr_resp;
+} SCTP_PACKED;
+
+/****************************************************/
+
+/*
+ * Authenticated chunks support draft-ietf-tsvwg-sctp-auth
+ */
+
+/* Should we make the max be 32? */
+#define SCTP_RANDOM_MAX_SIZE 256
+struct sctp_auth_random {
+ struct sctp_paramhdr ph;/* type = 0x8002 */
+ uint8_t random_data[];
+} SCTP_PACKED;
+
+struct sctp_auth_chunk_list {
+ struct sctp_paramhdr ph;/* type = 0x8003 */
+ uint8_t chunk_types[];
+} SCTP_PACKED;
+
+struct sctp_auth_hmac_algo {
+ struct sctp_paramhdr ph;/* type = 0x8004 */
+ uint16_t hmac_ids[];
+} SCTP_PACKED;
+
+struct sctp_auth_chunk {
+ struct sctp_chunkhdr ch;
+ uint16_t shared_key_id;
+ uint16_t hmac_id;
+ uint8_t hmac[];
+} SCTP_PACKED;
+
+struct sctp_auth_invalid_hmac {
+ struct sctp_paramhdr ph;
+ uint16_t hmac_id;
+ uint16_t padding;
+} SCTP_PACKED;
+
+/*
+ * we pre-reserve enough room for a ECNE or CWR AND a SACK with no missing
+ * pieces. If ENCE is missing we could have a couple of blocks. This way we
+ * optimize so we MOST likely can bundle a SACK/ECN with the smallest size
+ * data chunk I will split into. We could increase throughput slightly by
+ * taking out these two but the 24-sack/8-CWR i.e. 32 bytes I pre-reserve I
+ * feel is worth it for now.
+ */
+#ifndef SCTP_MAX_OVERHEAD
+#ifdef INET6
+#define SCTP_MAX_OVERHEAD (sizeof(struct sctp_data_chunk) + \
+ sizeof(struct sctphdr) + \
+ sizeof(struct sctp_ecne_chunk) + \
+ sizeof(struct sctp_sack_chunk) + \
+ sizeof(struct ip6_hdr))
+
+#define SCTP_MED_OVERHEAD (sizeof(struct sctp_data_chunk) + \
+ sizeof(struct sctphdr) + \
+ sizeof(struct ip6_hdr))
+
+
+#define SCTP_MIN_OVERHEAD (sizeof(struct ip6_hdr) + \
+ sizeof(struct sctphdr))
+
+#else
+#define SCTP_MAX_OVERHEAD (sizeof(struct sctp_data_chunk) + \
+ sizeof(struct sctphdr) + \
+ sizeof(struct sctp_ecne_chunk) + \
+ sizeof(struct sctp_sack_chunk) + \
+ sizeof(struct ip))
+
+#define SCTP_MED_OVERHEAD (sizeof(struct sctp_data_chunk) + \
+ sizeof(struct sctphdr) + \
+ sizeof(struct ip))
+
+
+#define SCTP_MIN_OVERHEAD (sizeof(struct ip) + \
+ sizeof(struct sctphdr))
+
+#endif /* INET6 */
+#endif /* !SCTP_MAX_OVERHEAD */
+
+#define SCTP_MED_V4_OVERHEAD (sizeof(struct sctp_data_chunk) + \
+ sizeof(struct sctphdr) + \
+ sizeof(struct ip))
+
+#define SCTP_MIN_V4_OVERHEAD (sizeof(struct ip) + \
+ sizeof(struct sctphdr))
+
+#undef SCTP_PACKED
+#endif /* !__sctp_header_h__ */
diff --git a/rtems/freebsd/netinet/sctp_indata.c b/rtems/freebsd/netinet/sctp_indata.c
new file mode 100644
index 00000000..3e22efb9
--- /dev/null
+++ b/rtems/freebsd/netinet/sctp_indata.c
@@ -0,0 +1,5800 @@
+#include <rtems/freebsd/machine/rtems-bsd-config.h>
+
+/*-
+ * Copyright (c) 2001-2007, by Cisco Systems, Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * a) Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * b) Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the distribution.
+ *
+ * c) Neither the name of Cisco Systems, Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/* $KAME: sctp_indata.c,v 1.36 2005/03/06 16:04:17 itojun Exp $ */
+
+#include <rtems/freebsd/sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <rtems/freebsd/netinet/sctp_os.h>
+#include <rtems/freebsd/netinet/sctp_var.h>
+#include <rtems/freebsd/netinet/sctp_sysctl.h>
+#include <rtems/freebsd/netinet/sctp_pcb.h>
+#include <rtems/freebsd/netinet/sctp_header.h>
+#include <rtems/freebsd/netinet/sctputil.h>
+#include <rtems/freebsd/netinet/sctp_output.h>
+#include <rtems/freebsd/netinet/sctp_input.h>
+#include <rtems/freebsd/netinet/sctp_indata.h>
+#include <rtems/freebsd/netinet/sctp_uio.h>
+#include <rtems/freebsd/netinet/sctp_timer.h>
+
+
+/*
+ * NOTES: On the outbound side of things I need to check the sack timer to
+ * see if I should generate a sack into the chunk queue (if I have data to
+ * send that is and will be sending it .. for bundling.
+ *
+ * The callback in sctp_usrreq.c will get called when the socket is read from.
+ * This will cause sctp_service_queues() to get called on the top entry in
+ * the list.
+ */
+
+void
+sctp_set_rwnd(struct sctp_tcb *stcb, struct sctp_association *asoc)
+{
+ asoc->my_rwnd = sctp_calc_rwnd(stcb, asoc);
+}
+
+/* Calculate what the rwnd would be */
+uint32_t
+sctp_calc_rwnd(struct sctp_tcb *stcb, struct sctp_association *asoc)
+{
+ uint32_t calc = 0;
+
+ /*
+ * This is really set wrong with respect to a 1-2-m socket. Since
+ * the sb_cc is the count that everyone as put up. When we re-write
+ * sctp_soreceive then we will fix this so that ONLY this
+ * associations data is taken into account.
+ */
+ if (stcb->sctp_socket == NULL)
+ return (calc);
+
+ if (stcb->asoc.sb_cc == 0 &&
+ asoc->size_on_reasm_queue == 0 &&
+ asoc->size_on_all_streams == 0) {
+ /* Full rwnd granted */
+ calc = max(SCTP_SB_LIMIT_RCV(stcb->sctp_socket), SCTP_MINIMAL_RWND);
+ return (calc);
+ }
+ /* get actual space */
+ calc = (uint32_t) sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv);
+
+ /*
+ * take out what has NOT been put on socket queue and we yet hold
+ * for putting up.
+ */
+ calc = sctp_sbspace_sub(calc, (uint32_t) (asoc->size_on_reasm_queue +
+ asoc->cnt_on_reasm_queue * MSIZE));
+ calc = sctp_sbspace_sub(calc, (uint32_t) (asoc->size_on_all_streams +
+ asoc->cnt_on_all_streams * MSIZE));
+
+ if (calc == 0) {
+ /* out of space */
+ return (calc);
+ }
+ /* what is the overhead of all these rwnd's */
+ calc = sctp_sbspace_sub(calc, stcb->asoc.my_rwnd_control_len);
+ /*
+ * If the window gets too small due to ctrl-stuff, reduce it to 1,
+ * even it is 0. SWS engaged
+ */
+ if (calc < stcb->asoc.my_rwnd_control_len) {
+ calc = 1;
+ }
+ return (calc);
+}
+
+
+
+/*
+ * Build out our readq entry based on the incoming packet.
+ */
+struct sctp_queued_to_read *
+sctp_build_readq_entry(struct sctp_tcb *stcb,
+ struct sctp_nets *net,
+ uint32_t tsn, uint32_t ppid,
+ uint32_t context, uint16_t stream_no,
+ uint16_t stream_seq, uint8_t flags,
+ struct mbuf *dm)
+{
+ struct sctp_queued_to_read *read_queue_e = NULL;
+
+ sctp_alloc_a_readq(stcb, read_queue_e);
+ if (read_queue_e == NULL) {
+ goto failed_build;
+ }
+ read_queue_e->sinfo_stream = stream_no;
+ read_queue_e->sinfo_ssn = stream_seq;
+ read_queue_e->sinfo_flags = (flags << 8);
+ read_queue_e->sinfo_ppid = ppid;
+ read_queue_e->sinfo_context = stcb->asoc.context;
+ read_queue_e->sinfo_timetolive = 0;
+ read_queue_e->sinfo_tsn = tsn;
+ read_queue_e->sinfo_cumtsn = tsn;
+ read_queue_e->sinfo_assoc_id = sctp_get_associd(stcb);
+ read_queue_e->whoFrom = net;
+ read_queue_e->length = 0;
+ atomic_add_int(&net->ref_count, 1);
+ read_queue_e->data = dm;
+ read_queue_e->spec_flags = 0;
+ read_queue_e->tail_mbuf = NULL;
+ read_queue_e->aux_data = NULL;
+ read_queue_e->stcb = stcb;
+ read_queue_e->port_from = stcb->rport;
+ read_queue_e->do_not_ref_stcb = 0;
+ read_queue_e->end_added = 0;
+ read_queue_e->some_taken = 0;
+ read_queue_e->pdapi_aborted = 0;
+failed_build:
+ return (read_queue_e);
+}
+
+
+/*
+ * Build out our readq entry based on the incoming packet.
+ */
+static struct sctp_queued_to_read *
+sctp_build_readq_entry_chk(struct sctp_tcb *stcb,
+ struct sctp_tmit_chunk *chk)
+{
+ struct sctp_queued_to_read *read_queue_e = NULL;
+
+ sctp_alloc_a_readq(stcb, read_queue_e);
+ if (read_queue_e == NULL) {
+ goto failed_build;
+ }
+ read_queue_e->sinfo_stream = chk->rec.data.stream_number;
+ read_queue_e->sinfo_ssn = chk->rec.data.stream_seq;
+ read_queue_e->sinfo_flags = (chk->rec.data.rcv_flags << 8);
+ read_queue_e->sinfo_ppid = chk->rec.data.payloadtype;
+ read_queue_e->sinfo_context = stcb->asoc.context;
+ read_queue_e->sinfo_timetolive = 0;
+ read_queue_e->sinfo_tsn = chk->rec.data.TSN_seq;
+ read_queue_e->sinfo_cumtsn = chk->rec.data.TSN_seq;
+ read_queue_e->sinfo_assoc_id = sctp_get_associd(stcb);
+ read_queue_e->whoFrom = chk->whoTo;
+ read_queue_e->aux_data = NULL;
+ read_queue_e->length = 0;
+ atomic_add_int(&chk->whoTo->ref_count, 1);
+ read_queue_e->data = chk->data;
+ read_queue_e->tail_mbuf = NULL;
+ read_queue_e->stcb = stcb;
+ read_queue_e->port_from = stcb->rport;
+ read_queue_e->spec_flags = 0;
+ read_queue_e->do_not_ref_stcb = 0;
+ read_queue_e->end_added = 0;
+ read_queue_e->some_taken = 0;
+ read_queue_e->pdapi_aborted = 0;
+failed_build:
+ return (read_queue_e);
+}
+
+
+struct mbuf *
+sctp_build_ctl_nchunk(struct sctp_inpcb *inp,
+ struct sctp_sndrcvinfo *sinfo)
+{
+ struct sctp_sndrcvinfo *outinfo;
+ struct cmsghdr *cmh;
+ struct mbuf *ret;
+ int len;
+ int use_extended = 0;
+
+ if (sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT)) {
+ /* user does not want the sndrcv ctl */
+ return (NULL);
+ }
+ if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO)) {
+ use_extended = 1;
+ len = CMSG_LEN(sizeof(struct sctp_extrcvinfo));
+ } else {
+ len = CMSG_LEN(sizeof(struct sctp_sndrcvinfo));
+ }
+
+
+ ret = sctp_get_mbuf_for_msg(len,
+ 0, M_DONTWAIT, 1, MT_DATA);
+
+ if (ret == NULL) {
+ /* No space */
+ return (ret);
+ }
+ /* We need a CMSG header followed by the struct */
+ cmh = mtod(ret, struct cmsghdr *);
+ outinfo = (struct sctp_sndrcvinfo *)CMSG_DATA(cmh);
+ cmh->cmsg_level = IPPROTO_SCTP;
+ if (use_extended) {
+ cmh->cmsg_type = SCTP_EXTRCV;
+ cmh->cmsg_len = len;
+ memcpy(outinfo, sinfo, len);
+ } else {
+ cmh->cmsg_type = SCTP_SNDRCV;
+ cmh->cmsg_len = len;
+ *outinfo = *sinfo;
+ }
+ SCTP_BUF_LEN(ret) = cmh->cmsg_len;
+ return (ret);
+}
+
+
+char *
+sctp_build_ctl_cchunk(struct sctp_inpcb *inp,
+ int *control_len,
+ struct sctp_sndrcvinfo *sinfo)
+{
+ struct sctp_sndrcvinfo *outinfo;
+ struct cmsghdr *cmh;
+ char *buf;
+ int len;
+ int use_extended = 0;
+
+ if (sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT)) {
+ /* user does not want the sndrcv ctl */
+ return (NULL);
+ }
+ if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO)) {
+ use_extended = 1;
+ len = CMSG_LEN(sizeof(struct sctp_extrcvinfo));
+ } else {
+ len = CMSG_LEN(sizeof(struct sctp_sndrcvinfo));
+ }
+ SCTP_MALLOC(buf, char *, len, SCTP_M_CMSG);
+ if (buf == NULL) {
+ /* No space */
+ return (buf);
+ }
+ /* We need a CMSG header followed by the struct */
+ cmh = (struct cmsghdr *)buf;
+ outinfo = (struct sctp_sndrcvinfo *)CMSG_DATA(cmh);
+ cmh->cmsg_level = IPPROTO_SCTP;
+ if (use_extended) {
+ cmh->cmsg_type = SCTP_EXTRCV;
+ cmh->cmsg_len = len;
+ memcpy(outinfo, sinfo, len);
+ } else {
+ cmh->cmsg_type = SCTP_SNDRCV;
+ cmh->cmsg_len = len;
+ *outinfo = *sinfo;
+ }
+ *control_len = len;
+ return (buf);
+}
+
+static void
+sctp_mark_non_revokable(struct sctp_association *asoc, uint32_t tsn)
+{
+ uint32_t gap, i, cumackp1;
+ int fnd = 0;
+
+ if (SCTP_BASE_SYSCTL(sctp_do_drain) == 0) {
+ return;
+ }
+ cumackp1 = asoc->cumulative_tsn + 1;
+ if (compare_with_wrap(cumackp1, tsn, MAX_TSN)) {
+ /*
+ * this tsn is behind the cum ack and thus we don't need to
+ * worry about it being moved from one to the other.
+ */
+ return;
+ }
+ SCTP_CALC_TSN_TO_GAP(gap, tsn, asoc->mapping_array_base_tsn);
+ if (!SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap)) {
+ printf("gap:%x tsn:%x\n", gap, tsn);
+ sctp_print_mapping_array(asoc);
+#ifdef INVARIANTS
+ panic("Things are really messed up now!!");
+#endif
+ }
+ SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
+ SCTP_UNSET_TSN_PRESENT(asoc->mapping_array, gap);
+ if (compare_with_wrap(tsn, asoc->highest_tsn_inside_nr_map, MAX_TSN)) {
+ asoc->highest_tsn_inside_nr_map = tsn;
+ }
+ if (tsn == asoc->highest_tsn_inside_map) {
+ /* We must back down to see what the new highest is */
+ for (i = tsn - 1; (compare_with_wrap(i, asoc->mapping_array_base_tsn, MAX_TSN) ||
+ (i == asoc->mapping_array_base_tsn)); i--) {
+ SCTP_CALC_TSN_TO_GAP(gap, i, asoc->mapping_array_base_tsn);
+ if (SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap)) {
+ asoc->highest_tsn_inside_map = i;
+ fnd = 1;
+ break;
+ }
+ }
+ if (!fnd) {
+ asoc->highest_tsn_inside_map = asoc->mapping_array_base_tsn - 1;
+ }
+ }
+}
+
+
+/*
+ * We are delivering currently from the reassembly queue. We must continue to
+ * deliver until we either: 1) run out of space. 2) run out of sequential
+ * TSN's 3) hit the SCTP_DATA_LAST_FRAG flag.
+ */
+static void
+sctp_service_reassembly(struct sctp_tcb *stcb, struct sctp_association *asoc)
+{
+ struct sctp_tmit_chunk *chk;
+ uint16_t nxt_todel;
+ uint16_t stream_no;
+ int end = 0;
+ int cntDel;
+
+ struct sctp_queued_to_read *control, *ctl, *ctlat;
+
+ if (stcb == NULL)
+ return;
+
+ cntDel = stream_no = 0;
+ if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
+ (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) ||
+ (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) {
+ /* socket above is long gone or going.. */
+abandon:
+ asoc->fragmented_delivery_inprogress = 0;
+ chk = TAILQ_FIRST(&asoc->reasmqueue);
+ while (chk) {
+ TAILQ_REMOVE(&asoc->reasmqueue, chk, sctp_next);
+ asoc->size_on_reasm_queue -= chk->send_size;
+ sctp_ucount_decr(asoc->cnt_on_reasm_queue);
+ /*
+ * Lose the data pointer, since its in the socket
+ * buffer
+ */
+ if (chk->data) {
+ sctp_m_freem(chk->data);
+ chk->data = NULL;
+ }
+ /* Now free the address and data */
+ sctp_free_a_chunk(stcb, chk);
+ /* sa_ignore FREED_MEMORY */
+ chk = TAILQ_FIRST(&asoc->reasmqueue);
+ }
+ return;
+ }
+ SCTP_TCB_LOCK_ASSERT(stcb);
+ do {
+ chk = TAILQ_FIRST(&asoc->reasmqueue);
+ if (chk == NULL) {
+ return;
+ }
+ if (chk->rec.data.TSN_seq != (asoc->tsn_last_delivered + 1)) {
+ /* Can't deliver more :< */
+ return;
+ }
+ stream_no = chk->rec.data.stream_number;
+ nxt_todel = asoc->strmin[stream_no].last_sequence_delivered + 1;
+ if (nxt_todel != chk->rec.data.stream_seq &&
+ (chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == 0) {
+ /*
+ * Not the next sequence to deliver in its stream OR
+ * unordered
+ */
+ return;
+ }
+ if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
+
+ control = sctp_build_readq_entry_chk(stcb, chk);
+ if (control == NULL) {
+ /* out of memory? */
+ return;
+ }
+ /* save it off for our future deliveries */
+ stcb->asoc.control_pdapi = control;
+ if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG)
+ end = 1;
+ else
+ end = 0;
+ sctp_mark_non_revokable(asoc, chk->rec.data.TSN_seq);
+ sctp_add_to_readq(stcb->sctp_ep,
+ stcb, control, &stcb->sctp_socket->so_rcv, end,
+ SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
+ cntDel++;
+ } else {
+ if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG)
+ end = 1;
+ else
+ end = 0;
+ sctp_mark_non_revokable(asoc, chk->rec.data.TSN_seq);
+ if (sctp_append_to_readq(stcb->sctp_ep, stcb,
+ stcb->asoc.control_pdapi,
+ chk->data, end, chk->rec.data.TSN_seq,
+ &stcb->sctp_socket->so_rcv)) {
+ /*
+ * something is very wrong, either
+ * control_pdapi is NULL, or the tail_mbuf
+ * is corrupt, or there is a EOM already on
+ * the mbuf chain.
+ */
+ if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
+ goto abandon;
+ } else {
+#ifdef INVARIANTS
+ if ((stcb->asoc.control_pdapi == NULL) || (stcb->asoc.control_pdapi->tail_mbuf == NULL)) {
+ panic("This should not happen control_pdapi NULL?");
+ }
+ /* if we did not panic, it was a EOM */
+ panic("Bad chunking ??");
+#else
+ if ((stcb->asoc.control_pdapi == NULL) || (stcb->asoc.control_pdapi->tail_mbuf == NULL)) {
+ SCTP_PRINTF("This should not happen control_pdapi NULL?\n");
+ }
+ SCTP_PRINTF("Bad chunking ??\n");
+ SCTP_PRINTF("Dumping re-assembly queue this will probably hose the association\n");
+
+#endif
+ goto abandon;
+ }
+ }
+ cntDel++;
+ }
+ /* pull it we did it */
+ TAILQ_REMOVE(&asoc->reasmqueue, chk, sctp_next);
+ if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
+ asoc->fragmented_delivery_inprogress = 0;
+ if ((chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == 0) {
+ asoc->strmin[stream_no].last_sequence_delivered++;
+ }
+ if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) == 0) {
+ SCTP_STAT_INCR_COUNTER64(sctps_reasmusrmsgs);
+ }
+ } else if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
+ /*
+ * turn the flag back on since we just delivered
+ * yet another one.
+ */
+ asoc->fragmented_delivery_inprogress = 1;
+ }
+ asoc->tsn_of_pdapi_last_delivered = chk->rec.data.TSN_seq;
+ asoc->last_flags_delivered = chk->rec.data.rcv_flags;
+ asoc->last_strm_seq_delivered = chk->rec.data.stream_seq;
+ asoc->last_strm_no_delivered = chk->rec.data.stream_number;
+
+ asoc->tsn_last_delivered = chk->rec.data.TSN_seq;
+ asoc->size_on_reasm_queue -= chk->send_size;
+ sctp_ucount_decr(asoc->cnt_on_reasm_queue);
+ /* free up the chk */
+ chk->data = NULL;
+ sctp_free_a_chunk(stcb, chk);
+
+ if (asoc->fragmented_delivery_inprogress == 0) {
+ /*
+ * Now lets see if we can deliver the next one on
+ * the stream
+ */
+ struct sctp_stream_in *strm;
+
+ strm = &asoc->strmin[stream_no];
+ nxt_todel = strm->last_sequence_delivered + 1;
+ ctl = TAILQ_FIRST(&strm->inqueue);
+ if (ctl && (nxt_todel == ctl->sinfo_ssn)) {
+ while (ctl != NULL) {
+ /* Deliver more if we can. */
+ if (nxt_todel == ctl->sinfo_ssn) {
+ ctlat = TAILQ_NEXT(ctl, next);
+ TAILQ_REMOVE(&strm->inqueue, ctl, next);
+ asoc->size_on_all_streams -= ctl->length;
+ sctp_ucount_decr(asoc->cnt_on_all_streams);
+ strm->last_sequence_delivered++;
+ sctp_mark_non_revokable(asoc, ctl->sinfo_tsn);
+ sctp_add_to_readq(stcb->sctp_ep, stcb,
+ ctl,
+ &stcb->sctp_socket->so_rcv, 1,
+ SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
+ ctl = ctlat;
+ } else {
+ break;
+ }
+ nxt_todel = strm->last_sequence_delivered + 1;
+ }
+ }
+ break;
+ }
+ /* sa_ignore FREED_MEMORY */
+ chk = TAILQ_FIRST(&asoc->reasmqueue);
+ } while (chk);
+}
+
+/*
+ * Queue the chunk either right into the socket buffer if it is the next one
+ * to go OR put it in the correct place in the delivery queue. If we do
+ * append to the so_buf, keep doing so until we are out of order. One big
+ * question still remains, what to do when the socket buffer is FULL??
+ */
+static void
+sctp_queue_data_to_stream(struct sctp_tcb *stcb, struct sctp_association *asoc,
+ struct sctp_queued_to_read *control, int *abort_flag)
+{
+ /*
+ * FIX-ME maybe? What happens when the ssn wraps? If we are getting
+ * all the data in one stream this could happen quite rapidly. One
+ * could use the TSN to keep track of things, but this scheme breaks
+ * down in the other type of stream useage that could occur. Send a
+ * single msg to stream 0, send 4Billion messages to stream 1, now
+ * send a message to stream 0. You have a situation where the TSN
+ * has wrapped but not in the stream. Is this worth worrying about
+ * or should we just change our queue sort at the bottom to be by
+ * TSN.
+ *
+ * Could it also be legal for a peer to send ssn 1 with TSN 2 and ssn 2
+ * with TSN 1? If the peer is doing some sort of funky TSN/SSN
+ * assignment this could happen... and I don't see how this would be
+ * a violation. So for now I am undecided an will leave the sort by
+ * SSN alone. Maybe a hybred approach is the answer
+ *
+ */
+ struct sctp_stream_in *strm;
+ struct sctp_queued_to_read *at;
+ int queue_needed;
+ uint16_t nxt_todel;
+ struct mbuf *oper;
+
+ queue_needed = 1;
+ asoc->size_on_all_streams += control->length;
+ sctp_ucount_incr(asoc->cnt_on_all_streams);
+ strm = &asoc->strmin[control->sinfo_stream];
+ nxt_todel = strm->last_sequence_delivered + 1;
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
+ sctp_log_strm_del(control, NULL, SCTP_STR_LOG_FROM_INTO_STRD);
+ }
+ SCTPDBG(SCTP_DEBUG_INDATA1,
+ "queue to stream called for ssn:%u lastdel:%u nxt:%u\n",
+ (uint32_t) control->sinfo_stream,
+ (uint32_t) strm->last_sequence_delivered,
+ (uint32_t) nxt_todel);
+ if (compare_with_wrap(strm->last_sequence_delivered,
+ control->sinfo_ssn, MAX_SEQ) ||
+ (strm->last_sequence_delivered == control->sinfo_ssn)) {
+ /* The incoming sseq is behind where we last delivered? */
+ SCTPDBG(SCTP_DEBUG_INDATA1, "Duplicate S-SEQ:%d delivered:%d from peer, Abort association\n",
+ control->sinfo_ssn, strm->last_sequence_delivered);
+protocol_error:
+ /*
+ * throw it in the stream so it gets cleaned up in
+ * association destruction
+ */
+ TAILQ_INSERT_HEAD(&strm->inqueue, control, next);
+ oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
+ 0, M_DONTWAIT, 1, MT_DATA);
+ if (oper) {
+ struct sctp_paramhdr *ph;
+ uint32_t *ippp;
+
+ SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) +
+ (sizeof(uint32_t) * 3);
+ ph = mtod(oper, struct sctp_paramhdr *);
+ ph->param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
+ ph->param_length = htons(SCTP_BUF_LEN(oper));
+ ippp = (uint32_t *) (ph + 1);
+ *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_1);
+ ippp++;
+ *ippp = control->sinfo_tsn;
+ ippp++;
+ *ippp = ((control->sinfo_stream << 16) | control->sinfo_ssn);
+ }
+ stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_1;
+ sctp_abort_an_association(stcb->sctp_ep, stcb,
+ SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
+
+ *abort_flag = 1;
+ return;
+
+ }
+ if (nxt_todel == control->sinfo_ssn) {
+ /* can be delivered right away? */
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
+ sctp_log_strm_del(control, NULL, SCTP_STR_LOG_FROM_IMMED_DEL);
+ }
+ /* EY it wont be queued if it could be delivered directly */
+ queue_needed = 0;
+ asoc->size_on_all_streams -= control->length;
+ sctp_ucount_decr(asoc->cnt_on_all_streams);
+ strm->last_sequence_delivered++;
+
+ sctp_mark_non_revokable(asoc, control->sinfo_tsn);
+ sctp_add_to_readq(stcb->sctp_ep, stcb,
+ control,
+ &stcb->sctp_socket->so_rcv, 1,
+ SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
+ control = TAILQ_FIRST(&strm->inqueue);
+ while (control != NULL) {
+ /* all delivered */
+ nxt_todel = strm->last_sequence_delivered + 1;
+ if (nxt_todel == control->sinfo_ssn) {
+ at = TAILQ_NEXT(control, next);
+ TAILQ_REMOVE(&strm->inqueue, control, next);
+ asoc->size_on_all_streams -= control->length;
+ sctp_ucount_decr(asoc->cnt_on_all_streams);
+ strm->last_sequence_delivered++;
+ /*
+ * We ignore the return of deliver_data here
+ * since we always can hold the chunk on the
+ * d-queue. And we have a finite number that
+ * can be delivered from the strq.
+ */
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
+ sctp_log_strm_del(control, NULL,
+ SCTP_STR_LOG_FROM_IMMED_DEL);
+ }
+ sctp_mark_non_revokable(asoc, control->sinfo_tsn);
+ sctp_add_to_readq(stcb->sctp_ep, stcb,
+ control,
+ &stcb->sctp_socket->so_rcv, 1,
+ SCTP_READ_LOCK_NOT_HELD,
+ SCTP_SO_NOT_LOCKED);
+ control = at;
+ continue;
+ }
+ break;
+ }
+ }
+ if (queue_needed) {
+ /*
+ * Ok, we did not deliver this guy, find the correct place
+ * to put it on the queue.
+ */
+ if ((compare_with_wrap(asoc->cumulative_tsn,
+ control->sinfo_tsn, MAX_TSN)) ||
+ (control->sinfo_tsn == asoc->cumulative_tsn)) {
+ goto protocol_error;
+ }
+ if (TAILQ_EMPTY(&strm->inqueue)) {
+ /* Empty queue */
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
+ sctp_log_strm_del(control, NULL, SCTP_STR_LOG_FROM_INSERT_HD);
+ }
+ TAILQ_INSERT_HEAD(&strm->inqueue, control, next);
+ } else {
+ TAILQ_FOREACH(at, &strm->inqueue, next) {
+ if (compare_with_wrap(at->sinfo_ssn,
+ control->sinfo_ssn, MAX_SEQ)) {
+ /*
+ * one in queue is bigger than the
+ * new one, insert before this one
+ */
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
+ sctp_log_strm_del(control, at,
+ SCTP_STR_LOG_FROM_INSERT_MD);
+ }
+ TAILQ_INSERT_BEFORE(at, control, next);
+ break;
+ } else if (at->sinfo_ssn == control->sinfo_ssn) {
+ /*
+ * Gak, He sent me a duplicate str
+ * seq number
+ */
+ /*
+ * foo bar, I guess I will just free
+ * this new guy, should we abort
+ * too? FIX ME MAYBE? Or it COULD be
+ * that the SSN's have wrapped.
+ * Maybe I should compare to TSN
+ * somehow... sigh for now just blow
+ * away the chunk!
+ */
+
+ if (control->data)
+ sctp_m_freem(control->data);
+ control->data = NULL;
+ asoc->size_on_all_streams -= control->length;
+ sctp_ucount_decr(asoc->cnt_on_all_streams);
+ if (control->whoFrom) {
+ sctp_free_remote_addr(control->whoFrom);
+ control->whoFrom = NULL;
+ }
+ sctp_free_a_readq(stcb, control);
+ return;
+ } else {
+ if (TAILQ_NEXT(at, next) == NULL) {
+ /*
+ * We are at the end, insert
+ * it after this one
+ */
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
+ sctp_log_strm_del(control, at,
+ SCTP_STR_LOG_FROM_INSERT_TL);
+ }
+ TAILQ_INSERT_AFTER(&strm->inqueue,
+ at, control, next);
+ break;
+ }
+ }
+ }
+ }
+ }
+}
+
+/*
+ * Returns two things: You get the total size of the deliverable parts of the
+ * first fragmented message on the reassembly queue. And you get a 1 back if
+ * all of the message is ready or a 0 back if the message is still incomplete
+ */
+static int
+sctp_is_all_msg_on_reasm(struct sctp_association *asoc, uint32_t * t_size)
+{
+ struct sctp_tmit_chunk *chk;
+ uint32_t tsn;
+
+ *t_size = 0;
+ chk = TAILQ_FIRST(&asoc->reasmqueue);
+ if (chk == NULL) {
+ /* nothing on the queue */
+ return (0);
+ }
+ if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) == 0) {
+ /* Not a first on the queue */
+ return (0);
+ }
+ tsn = chk->rec.data.TSN_seq;
+ while (chk) {
+ if (tsn != chk->rec.data.TSN_seq) {
+ return (0);
+ }
+ *t_size += chk->send_size;
+ if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
+ return (1);
+ }
+ tsn++;
+ chk = TAILQ_NEXT(chk, sctp_next);
+ }
+ return (0);
+}
+
+static void
+sctp_deliver_reasm_check(struct sctp_tcb *stcb, struct sctp_association *asoc)
+{
+ struct sctp_tmit_chunk *chk;
+ uint16_t nxt_todel;
+ uint32_t tsize, pd_point;
+
+doit_again:
+ chk = TAILQ_FIRST(&asoc->reasmqueue);
+ if (chk == NULL) {
+ /* Huh? */
+ asoc->size_on_reasm_queue = 0;
+ asoc->cnt_on_reasm_queue = 0;
+ return;
+ }
+ if (asoc->fragmented_delivery_inprogress == 0) {
+ nxt_todel =
+ asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered + 1;
+ if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) &&
+ (nxt_todel == chk->rec.data.stream_seq ||
+ (chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED))) {
+ /*
+ * Yep the first one is here and its ok to deliver
+ * but should we?
+ */
+ if (stcb->sctp_socket) {
+ pd_point = min(SCTP_SB_LIMIT_RCV(stcb->sctp_socket),
+ stcb->sctp_ep->partial_delivery_point);
+ } else {
+ pd_point = stcb->sctp_ep->partial_delivery_point;
+ }
+ if (sctp_is_all_msg_on_reasm(asoc, &tsize) || (tsize >= pd_point)) {
+
+ /*
+ * Yes, we setup to start reception, by
+ * backing down the TSN just in case we
+ * can't deliver. If we
+ */
+ asoc->fragmented_delivery_inprogress = 1;
+ asoc->tsn_last_delivered =
+ chk->rec.data.TSN_seq - 1;
+ asoc->str_of_pdapi =
+ chk->rec.data.stream_number;
+ asoc->ssn_of_pdapi = chk->rec.data.stream_seq;
+ asoc->pdapi_ppid = chk->rec.data.payloadtype;
+ asoc->fragment_flags = chk->rec.data.rcv_flags;
+ sctp_service_reassembly(stcb, asoc);
+ }
+ }
+ } else {
+ /*
+ * Service re-assembly will deliver stream data queued at
+ * the end of fragmented delivery.. but it wont know to go
+ * back and call itself again... we do that here with the
+ * got doit_again
+ */
+ sctp_service_reassembly(stcb, asoc);
+ if (asoc->fragmented_delivery_inprogress == 0) {
+ /*
+ * finished our Fragmented delivery, could be more
+ * waiting?
+ */
+ goto doit_again;
+ }
+ }
+}
+
+/*
+ * Dump onto the re-assembly queue, in its proper place. After dumping on the
+ * queue, see if anthing can be delivered. If so pull it off (or as much as
+ * we can. If we run out of space then we must dump what we can and set the
+ * appropriate flag to say we queued what we could.
+ */
+static void
+sctp_queue_data_for_reasm(struct sctp_tcb *stcb, struct sctp_association *asoc,
+ struct sctp_tmit_chunk *chk, int *abort_flag)
+{
+ struct mbuf *oper;
+ uint32_t cum_ackp1, last_tsn, prev_tsn, post_tsn;
+ u_char last_flags;
+ struct sctp_tmit_chunk *at, *prev, *next;
+
+ prev = next = NULL;
+ cum_ackp1 = asoc->tsn_last_delivered + 1;
+ if (TAILQ_EMPTY(&asoc->reasmqueue)) {
+ /* This is the first one on the queue */
+ TAILQ_INSERT_HEAD(&asoc->reasmqueue, chk, sctp_next);
+ /*
+ * we do not check for delivery of anything when only one
+ * fragment is here
+ */
+ asoc->size_on_reasm_queue = chk->send_size;
+ sctp_ucount_incr(asoc->cnt_on_reasm_queue);
+ if (chk->rec.data.TSN_seq == cum_ackp1) {
+ if (asoc->fragmented_delivery_inprogress == 0 &&
+ (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) !=
+ SCTP_DATA_FIRST_FRAG) {
+ /*
+ * An empty queue, no delivery inprogress,
+ * we hit the next one and it does NOT have
+ * a FIRST fragment mark.
+ */
+ SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, its not first, no fragmented delivery in progress\n");
+ oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
+ 0, M_DONTWAIT, 1, MT_DATA);
+
+ if (oper) {
+ struct sctp_paramhdr *ph;
+ uint32_t *ippp;
+
+ SCTP_BUF_LEN(oper) =
+ sizeof(struct sctp_paramhdr) +
+ (sizeof(uint32_t) * 3);
+ ph = mtod(oper, struct sctp_paramhdr *);
+ ph->param_type =
+ htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
+ ph->param_length = htons(SCTP_BUF_LEN(oper));
+ ippp = (uint32_t *) (ph + 1);
+ *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_2);
+ ippp++;
+ *ippp = chk->rec.data.TSN_seq;
+ ippp++;
+ *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
+
+ }
+ stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_2;
+ sctp_abort_an_association(stcb->sctp_ep, stcb,
+ SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
+ *abort_flag = 1;
+ } else if (asoc->fragmented_delivery_inprogress &&
+ (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) == SCTP_DATA_FIRST_FRAG) {
+ /*
+ * We are doing a partial delivery and the
+ * NEXT chunk MUST be either the LAST or
+ * MIDDLE fragment NOT a FIRST
+ */
+ SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, it IS a first and fragmented delivery in progress\n");
+ oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
+ 0, M_DONTWAIT, 1, MT_DATA);
+ if (oper) {
+ struct sctp_paramhdr *ph;
+ uint32_t *ippp;
+
+ SCTP_BUF_LEN(oper) =
+ sizeof(struct sctp_paramhdr) +
+ (3 * sizeof(uint32_t));
+ ph = mtod(oper, struct sctp_paramhdr *);
+ ph->param_type =
+ htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
+ ph->param_length = htons(SCTP_BUF_LEN(oper));
+ ippp = (uint32_t *) (ph + 1);
+ *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_3);
+ ippp++;
+ *ippp = chk->rec.data.TSN_seq;
+ ippp++;
+ *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
+ }
+ stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_3;
+ sctp_abort_an_association(stcb->sctp_ep, stcb,
+ SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
+ *abort_flag = 1;
+ } else if (asoc->fragmented_delivery_inprogress) {
+ /*
+ * Here we are ok with a MIDDLE or LAST
+ * piece
+ */
+ if (chk->rec.data.stream_number !=
+ asoc->str_of_pdapi) {
+ /* Got to be the right STR No */
+ SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, it IS not same stream number %d vs %d\n",
+ chk->rec.data.stream_number,
+ asoc->str_of_pdapi);
+ oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
+ 0, M_DONTWAIT, 1, MT_DATA);
+ if (oper) {
+ struct sctp_paramhdr *ph;
+ uint32_t *ippp;
+
+ SCTP_BUF_LEN(oper) =
+ sizeof(struct sctp_paramhdr) +
+ (sizeof(uint32_t) * 3);
+ ph = mtod(oper,
+ struct sctp_paramhdr *);
+ ph->param_type =
+ htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
+ ph->param_length =
+ htons(SCTP_BUF_LEN(oper));
+ ippp = (uint32_t *) (ph + 1);
+ *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_4);
+ ippp++;
+ *ippp = chk->rec.data.TSN_seq;
+ ippp++;
+ *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
+ }
+ stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_4;
+ sctp_abort_an_association(stcb->sctp_ep,
+ stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
+ *abort_flag = 1;
+ } else if ((asoc->fragment_flags & SCTP_DATA_UNORDERED) !=
+ SCTP_DATA_UNORDERED &&
+ chk->rec.data.stream_seq != asoc->ssn_of_pdapi) {
+ /* Got to be the right STR Seq */
+ SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, it IS not same stream seq %d vs %d\n",
+ chk->rec.data.stream_seq,
+ asoc->ssn_of_pdapi);
+ oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
+ 0, M_DONTWAIT, 1, MT_DATA);
+ if (oper) {
+ struct sctp_paramhdr *ph;
+ uint32_t *ippp;
+
+ SCTP_BUF_LEN(oper) =
+ sizeof(struct sctp_paramhdr) +
+ (3 * sizeof(uint32_t));
+ ph = mtod(oper,
+ struct sctp_paramhdr *);
+ ph->param_type =
+ htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
+ ph->param_length =
+ htons(SCTP_BUF_LEN(oper));
+ ippp = (uint32_t *) (ph + 1);
+ *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_5);
+ ippp++;
+ *ippp = chk->rec.data.TSN_seq;
+ ippp++;
+ *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
+
+ }
+ stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_5;
+ sctp_abort_an_association(stcb->sctp_ep,
+ stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
+ *abort_flag = 1;
+ }
+ }
+ }
+ return;
+ }
+ /* Find its place */
+ TAILQ_FOREACH(at, &asoc->reasmqueue, sctp_next) {
+ if (compare_with_wrap(at->rec.data.TSN_seq,
+ chk->rec.data.TSN_seq, MAX_TSN)) {
+ /*
+ * one in queue is bigger than the new one, insert
+ * before this one
+ */
+ /* A check */
+ asoc->size_on_reasm_queue += chk->send_size;
+ sctp_ucount_incr(asoc->cnt_on_reasm_queue);
+ next = at;
+ TAILQ_INSERT_BEFORE(at, chk, sctp_next);
+ break;
+ } else if (at->rec.data.TSN_seq == chk->rec.data.TSN_seq) {
+ /* Gak, He sent me a duplicate str seq number */
+ /*
+ * foo bar, I guess I will just free this new guy,
+ * should we abort too? FIX ME MAYBE? Or it COULD be
+ * that the SSN's have wrapped. Maybe I should
+ * compare to TSN somehow... sigh for now just blow
+ * away the chunk!
+ */
+ if (chk->data) {
+ sctp_m_freem(chk->data);
+ chk->data = NULL;
+ }
+ sctp_free_a_chunk(stcb, chk);
+ return;
+ } else {
+ last_flags = at->rec.data.rcv_flags;
+ last_tsn = at->rec.data.TSN_seq;
+ prev = at;
+ if (TAILQ_NEXT(at, sctp_next) == NULL) {
+ /*
+ * We are at the end, insert it after this
+ * one
+ */
+ /* check it first */
+ asoc->size_on_reasm_queue += chk->send_size;
+ sctp_ucount_incr(asoc->cnt_on_reasm_queue);
+ TAILQ_INSERT_AFTER(&asoc->reasmqueue, at, chk, sctp_next);
+ break;
+ }
+ }
+ }
+ /* Now the audits */
+ if (prev) {
+ prev_tsn = chk->rec.data.TSN_seq - 1;
+ if (prev_tsn == prev->rec.data.TSN_seq) {
+ /*
+ * Ok the one I am dropping onto the end is the
+ * NEXT. A bit of valdiation here.
+ */
+ if ((prev->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
+ SCTP_DATA_FIRST_FRAG ||
+ (prev->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
+ SCTP_DATA_MIDDLE_FRAG) {
+ /*
+ * Insert chk MUST be a MIDDLE or LAST
+ * fragment
+ */
+ if ((chk->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
+ SCTP_DATA_FIRST_FRAG) {
+ SCTPDBG(SCTP_DEBUG_INDATA1, "Prev check - It can be a midlle or last but not a first\n");
+ SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, it's a FIRST!\n");
+ oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
+ 0, M_DONTWAIT, 1, MT_DATA);
+ if (oper) {
+ struct sctp_paramhdr *ph;
+ uint32_t *ippp;
+
+ SCTP_BUF_LEN(oper) =
+ sizeof(struct sctp_paramhdr) +
+ (3 * sizeof(uint32_t));
+ ph = mtod(oper,
+ struct sctp_paramhdr *);
+ ph->param_type =
+ htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
+ ph->param_length =
+ htons(SCTP_BUF_LEN(oper));
+ ippp = (uint32_t *) (ph + 1);
+ *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_6);
+ ippp++;
+ *ippp = chk->rec.data.TSN_seq;
+ ippp++;
+ *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
+
+ }
+ stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_6;
+ sctp_abort_an_association(stcb->sctp_ep,
+ stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
+ *abort_flag = 1;
+ return;
+ }
+ if (chk->rec.data.stream_number !=
+ prev->rec.data.stream_number) {
+ /*
+ * Huh, need the correct STR here,
+ * they must be the same.
+ */
+ SCTP_PRINTF("Prev check - Gak, Evil plot, ssn:%d not the same as at:%d\n",
+ chk->rec.data.stream_number,
+ prev->rec.data.stream_number);
+ oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
+ 0, M_DONTWAIT, 1, MT_DATA);
+ if (oper) {
+ struct sctp_paramhdr *ph;
+ uint32_t *ippp;
+
+ SCTP_BUF_LEN(oper) =
+ sizeof(struct sctp_paramhdr) +
+ (3 * sizeof(uint32_t));
+ ph = mtod(oper,
+ struct sctp_paramhdr *);
+ ph->param_type =
+ htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
+ ph->param_length =
+ htons(SCTP_BUF_LEN(oper));
+ ippp = (uint32_t *) (ph + 1);
+ *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_7);
+ ippp++;
+ *ippp = chk->rec.data.TSN_seq;
+ ippp++;
+ *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
+ }
+ stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_7;
+ sctp_abort_an_association(stcb->sctp_ep,
+ stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
+
+ *abort_flag = 1;
+ return;
+ }
+ if ((prev->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == 0 &&
+ chk->rec.data.stream_seq !=
+ prev->rec.data.stream_seq) {
+ /*
+ * Huh, need the correct STR here,
+ * they must be the same.
+ */
+ SCTPDBG(SCTP_DEBUG_INDATA1, "Prev check - Gak, Evil plot, sseq:%d not the same as at:%d\n",
+ chk->rec.data.stream_seq,
+ prev->rec.data.stream_seq);
+ oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
+ 0, M_DONTWAIT, 1, MT_DATA);
+ if (oper) {
+ struct sctp_paramhdr *ph;
+ uint32_t *ippp;
+
+ SCTP_BUF_LEN(oper) =
+ sizeof(struct sctp_paramhdr) +
+ (3 * sizeof(uint32_t));
+ ph = mtod(oper,
+ struct sctp_paramhdr *);
+ ph->param_type =
+ htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
+ ph->param_length =
+ htons(SCTP_BUF_LEN(oper));
+ ippp = (uint32_t *) (ph + 1);
+ *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_8);
+ ippp++;
+ *ippp = chk->rec.data.TSN_seq;
+ ippp++;
+ *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
+ }
+ stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_8;
+ sctp_abort_an_association(stcb->sctp_ep,
+ stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
+
+ *abort_flag = 1;
+ return;
+ }
+ } else if ((prev->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
+ SCTP_DATA_LAST_FRAG) {
+ /* Insert chk MUST be a FIRST */
+ if ((chk->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) !=
+ SCTP_DATA_FIRST_FRAG) {
+ SCTPDBG(SCTP_DEBUG_INDATA1, "Prev check - Gak, evil plot, its not FIRST and it must be!\n");
+ oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
+ 0, M_DONTWAIT, 1, MT_DATA);
+ if (oper) {
+ struct sctp_paramhdr *ph;
+ uint32_t *ippp;
+
+ SCTP_BUF_LEN(oper) =
+ sizeof(struct sctp_paramhdr) +
+ (3 * sizeof(uint32_t));
+ ph = mtod(oper,
+ struct sctp_paramhdr *);
+ ph->param_type =
+ htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
+ ph->param_length =
+ htons(SCTP_BUF_LEN(oper));
+ ippp = (uint32_t *) (ph + 1);
+ *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_9);
+ ippp++;
+ *ippp = chk->rec.data.TSN_seq;
+ ippp++;
+ *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
+
+ }
+ stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_9;
+ sctp_abort_an_association(stcb->sctp_ep,
+ stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
+
+ *abort_flag = 1;
+ return;
+ }
+ }
+ }
+ }
+ if (next) {
+ post_tsn = chk->rec.data.TSN_seq + 1;
+ if (post_tsn == next->rec.data.TSN_seq) {
+ /*
+ * Ok the one I am inserting ahead of is my NEXT
+ * one. A bit of valdiation here.
+ */
+ if (next->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
+ /* Insert chk MUST be a last fragment */
+ if ((chk->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK)
+ != SCTP_DATA_LAST_FRAG) {
+ SCTPDBG(SCTP_DEBUG_INDATA1, "Next chk - Next is FIRST, we must be LAST\n");
+ SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, its not a last!\n");
+ oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
+ 0, M_DONTWAIT, 1, MT_DATA);
+ if (oper) {
+ struct sctp_paramhdr *ph;
+ uint32_t *ippp;
+
+ SCTP_BUF_LEN(oper) =
+ sizeof(struct sctp_paramhdr) +
+ (3 * sizeof(uint32_t));
+ ph = mtod(oper,
+ struct sctp_paramhdr *);
+ ph->param_type =
+ htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
+ ph->param_length =
+ htons(SCTP_BUF_LEN(oper));
+ ippp = (uint32_t *) (ph + 1);
+ *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_10);
+ ippp++;
+ *ippp = chk->rec.data.TSN_seq;
+ ippp++;
+ *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
+ }
+ stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_10;
+ sctp_abort_an_association(stcb->sctp_ep,
+ stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
+
+ *abort_flag = 1;
+ return;
+ }
+ } else if ((next->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
+ SCTP_DATA_MIDDLE_FRAG ||
+ (next->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
+ SCTP_DATA_LAST_FRAG) {
+ /*
+ * Insert chk CAN be MIDDLE or FIRST NOT
+ * LAST
+ */
+ if ((chk->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
+ SCTP_DATA_LAST_FRAG) {
+ SCTPDBG(SCTP_DEBUG_INDATA1, "Next chk - Next is a MIDDLE/LAST\n");
+ SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, new prev chunk is a LAST\n");
+ oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
+ 0, M_DONTWAIT, 1, MT_DATA);
+ if (oper) {
+ struct sctp_paramhdr *ph;
+ uint32_t *ippp;
+
+ SCTP_BUF_LEN(oper) =
+ sizeof(struct sctp_paramhdr) +
+ (3 * sizeof(uint32_t));
+ ph = mtod(oper,
+ struct sctp_paramhdr *);
+ ph->param_type =
+ htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
+ ph->param_length =
+ htons(SCTP_BUF_LEN(oper));
+ ippp = (uint32_t *) (ph + 1);
+ *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_11);
+ ippp++;
+ *ippp = chk->rec.data.TSN_seq;
+ ippp++;
+ *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
+
+ }
+ stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_11;
+ sctp_abort_an_association(stcb->sctp_ep,
+ stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
+
+ *abort_flag = 1;
+ return;
+ }
+ if (chk->rec.data.stream_number !=
+ next->rec.data.stream_number) {
+ /*
+ * Huh, need the correct STR here,
+ * they must be the same.
+ */
+ SCTPDBG(SCTP_DEBUG_INDATA1, "Next chk - Gak, Evil plot, ssn:%d not the same as at:%d\n",
+ chk->rec.data.stream_number,
+ next->rec.data.stream_number);
+ oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
+ 0, M_DONTWAIT, 1, MT_DATA);
+ if (oper) {
+ struct sctp_paramhdr *ph;
+ uint32_t *ippp;
+
+ SCTP_BUF_LEN(oper) =
+ sizeof(struct sctp_paramhdr) +
+ (3 * sizeof(uint32_t));
+ ph = mtod(oper,
+ struct sctp_paramhdr *);
+ ph->param_type =
+ htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
+ ph->param_length =
+ htons(SCTP_BUF_LEN(oper));
+ ippp = (uint32_t *) (ph + 1);
+ *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_12);
+ ippp++;
+ *ippp = chk->rec.data.TSN_seq;
+ ippp++;
+ *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
+
+ }
+ stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_12;
+ sctp_abort_an_association(stcb->sctp_ep,
+ stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
+
+ *abort_flag = 1;
+ return;
+ }
+ if ((next->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == 0 &&
+ chk->rec.data.stream_seq !=
+ next->rec.data.stream_seq) {
+ /*
+ * Huh, need the correct STR here,
+ * they must be the same.
+ */
+ SCTPDBG(SCTP_DEBUG_INDATA1, "Next chk - Gak, Evil plot, sseq:%d not the same as at:%d\n",
+ chk->rec.data.stream_seq,
+ next->rec.data.stream_seq);
+ oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
+ 0, M_DONTWAIT, 1, MT_DATA);
+ if (oper) {
+ struct sctp_paramhdr *ph;
+ uint32_t *ippp;
+
+ SCTP_BUF_LEN(oper) =
+ sizeof(struct sctp_paramhdr) +
+ (3 * sizeof(uint32_t));
+ ph = mtod(oper,
+ struct sctp_paramhdr *);
+ ph->param_type =
+ htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
+ ph->param_length =
+ htons(SCTP_BUF_LEN(oper));
+ ippp = (uint32_t *) (ph + 1);
+ *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_13);
+ ippp++;
+ *ippp = chk->rec.data.TSN_seq;
+ ippp++;
+ *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
+ }
+ stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_13;
+ sctp_abort_an_association(stcb->sctp_ep,
+ stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
+
+ *abort_flag = 1;
+ return;
+ }
+ }
+ }
+ }
+ /* Do we need to do some delivery? check */
+ sctp_deliver_reasm_check(stcb, asoc);
+}
+
+/*
+ * This is an unfortunate routine. It checks to make sure a evil guy is not
+ * stuffing us full of bad packet fragments. A broken peer could also do this
+ * but this is doubtful. It is to bad I must worry about evil crackers sigh
+ * :< more cycles.
+ */
+static int
+sctp_does_tsn_belong_to_reasm(struct sctp_association *asoc,
+ uint32_t TSN_seq)
+{
+ struct sctp_tmit_chunk *at;
+ uint32_t tsn_est;
+
+ TAILQ_FOREACH(at, &asoc->reasmqueue, sctp_next) {
+ if (compare_with_wrap(TSN_seq,
+ at->rec.data.TSN_seq, MAX_TSN)) {
+ /* is it one bigger? */
+ tsn_est = at->rec.data.TSN_seq + 1;
+ if (tsn_est == TSN_seq) {
+ /* yep. It better be a last then */
+ if ((at->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) !=
+ SCTP_DATA_LAST_FRAG) {
+ /*
+ * Ok this guy belongs next to a guy
+ * that is NOT last, it should be a
+ * middle/last, not a complete
+ * chunk.
+ */
+ return (1);
+ } else {
+ /*
+ * This guy is ok since its a LAST
+ * and the new chunk is a fully
+ * self- contained one.
+ */
+ return (0);
+ }
+ }
+ } else if (TSN_seq == at->rec.data.TSN_seq) {
+ /* Software error since I have a dup? */
+ return (1);
+ } else {
+ /*
+ * Ok, 'at' is larger than new chunk but does it
+ * need to be right before it.
+ */
+ tsn_est = TSN_seq + 1;
+ if (tsn_est == at->rec.data.TSN_seq) {
+ /* Yep, It better be a first */
+ if ((at->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) !=
+ SCTP_DATA_FIRST_FRAG) {
+ return (1);
+ } else {
+ return (0);
+ }
+ }
+ }
+ }
+ return (0);
+}
+
+
+static int
+sctp_process_a_data_chunk(struct sctp_tcb *stcb, struct sctp_association *asoc,
+ struct mbuf **m, int offset, struct sctp_data_chunk *ch, int chk_length,
+ struct sctp_nets *net, uint32_t * high_tsn, int *abort_flag,
+ int *break_flag, int last_chunk)
+{
+ /* Process a data chunk */
+ /* struct sctp_tmit_chunk *chk; */
+ struct sctp_tmit_chunk *chk;
+ uint32_t tsn, gap;
+ struct mbuf *dmbuf;
+ int indx, the_len;
+ int need_reasm_check = 0;
+ uint16_t strmno, strmseq;
+ struct mbuf *oper;
+ struct sctp_queued_to_read *control;
+ int ordered;
+ uint32_t protocol_id;
+ uint8_t chunk_flags;
+ struct sctp_stream_reset_list *liste;
+
+ chk = NULL;
+ tsn = ntohl(ch->dp.tsn);
+ chunk_flags = ch->ch.chunk_flags;
+ if ((chunk_flags & SCTP_DATA_SACK_IMMEDIATELY) == SCTP_DATA_SACK_IMMEDIATELY) {
+ asoc->send_sack = 1;
+ }
+ protocol_id = ch->dp.protocol_id;
+ ordered = ((chunk_flags & SCTP_DATA_UNORDERED) == 0);
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
+ sctp_log_map(tsn, asoc->cumulative_tsn, asoc->highest_tsn_inside_map, SCTP_MAP_TSN_ENTERS);
+ }
+ if (stcb == NULL) {
+ return (0);
+ }
+ SCTP_LTRACE_CHK(stcb->sctp_ep, stcb, ch->ch.chunk_type, tsn);
+ if (compare_with_wrap(asoc->cumulative_tsn, tsn, MAX_TSN) ||
+ asoc->cumulative_tsn == tsn) {
+ /* It is a duplicate */
+ SCTP_STAT_INCR(sctps_recvdupdata);
+ if (asoc->numduptsns < SCTP_MAX_DUP_TSNS) {
+ /* Record a dup for the next outbound sack */
+ asoc->dup_tsns[asoc->numduptsns] = tsn;
+ asoc->numduptsns++;
+ }
+ asoc->send_sack = 1;
+ return (0);
+ }
+ /* Calculate the number of TSN's between the base and this TSN */
+ SCTP_CALC_TSN_TO_GAP(gap, tsn, asoc->mapping_array_base_tsn);
+ if (gap >= (SCTP_MAPPING_ARRAY << 3)) {
+ /* Can't hold the bit in the mapping at max array, toss it */
+ return (0);
+ }
+ if (gap >= (uint32_t) (asoc->mapping_array_size << 3)) {
+ SCTP_TCB_LOCK_ASSERT(stcb);
+ if (sctp_expand_mapping_array(asoc, gap)) {
+ /* Can't expand, drop it */
+ return (0);
+ }
+ }
+ if (compare_with_wrap(tsn, *high_tsn, MAX_TSN)) {
+ *high_tsn = tsn;
+ }
+ /* See if we have received this one already */
+ if (SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap) ||
+ SCTP_IS_TSN_PRESENT(asoc->nr_mapping_array, gap)) {
+ SCTP_STAT_INCR(sctps_recvdupdata);
+ if (asoc->numduptsns < SCTP_MAX_DUP_TSNS) {
+ /* Record a dup for the next outbound sack */
+ asoc->dup_tsns[asoc->numduptsns] = tsn;
+ asoc->numduptsns++;
+ }
+ asoc->send_sack = 1;
+ return (0);
+ }
+ /*
+ * Check to see about the GONE flag, duplicates would cause a sack
+ * to be sent up above
+ */
+ if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
+ (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
+ (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET))
+ ) {
+ /*
+ * wait a minute, this guy is gone, there is no longer a
+ * receiver. Send peer an ABORT!
+ */
+ struct mbuf *op_err;
+
+ op_err = sctp_generate_invmanparam(SCTP_CAUSE_OUT_OF_RESC);
+ sctp_abort_an_association(stcb->sctp_ep, stcb, 0, op_err, SCTP_SO_NOT_LOCKED);
+ *abort_flag = 1;
+ return (0);
+ }
+ /*
+ * Now before going further we see if there is room. If NOT then we
+ * MAY let one through only IF this TSN is the one we are waiting
+ * for on a partial delivery API.
+ */
+
+ /* now do the tests */
+ if (((asoc->cnt_on_all_streams +
+ asoc->cnt_on_reasm_queue +
+ asoc->cnt_msg_on_sb) >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue)) ||
+ (((int)asoc->my_rwnd) <= 0)) {
+ /*
+ * When we have NO room in the rwnd we check to make sure
+ * the reader is doing its job...
+ */
+ if (stcb->sctp_socket->so_rcv.sb_cc) {
+ /* some to read, wake-up */
+#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
+ struct socket *so;
+
+ so = SCTP_INP_SO(stcb->sctp_ep);
+ atomic_add_int(&stcb->asoc.refcnt, 1);
+ SCTP_TCB_UNLOCK(stcb);
+ SCTP_SOCKET_LOCK(so, 1);
+ SCTP_TCB_LOCK(stcb);
+ atomic_subtract_int(&stcb->asoc.refcnt, 1);
+ if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
+ /* assoc was freed while we were unlocked */
+ SCTP_SOCKET_UNLOCK(so, 1);
+ return (0);
+ }
+#endif
+ sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
+#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
+ SCTP_SOCKET_UNLOCK(so, 1);
+#endif
+ }
+ /* now is it in the mapping array of what we have accepted? */
+ if (compare_with_wrap(tsn, asoc->highest_tsn_inside_map, MAX_TSN) &&
+ compare_with_wrap(tsn, asoc->highest_tsn_inside_nr_map, MAX_TSN)) {
+ /* Nope not in the valid range dump it */
+ sctp_set_rwnd(stcb, asoc);
+ if ((asoc->cnt_on_all_streams +
+ asoc->cnt_on_reasm_queue +
+ asoc->cnt_msg_on_sb) >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue)) {
+ SCTP_STAT_INCR(sctps_datadropchklmt);
+ } else {
+ SCTP_STAT_INCR(sctps_datadroprwnd);
+ }
+ indx = *break_flag;
+ *break_flag = 1;
+ return (0);
+ }
+ }
+ strmno = ntohs(ch->dp.stream_id);
+ if (strmno >= asoc->streamincnt) {
+ struct sctp_paramhdr *phdr;
+ struct mbuf *mb;
+
+ mb = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) * 2),
+ 0, M_DONTWAIT, 1, MT_DATA);
+ if (mb != NULL) {
+ /* add some space up front so prepend will work well */
+ SCTP_BUF_RESV_UF(mb, sizeof(struct sctp_chunkhdr));
+ phdr = mtod(mb, struct sctp_paramhdr *);
+ /*
+ * Error causes are just param's and this one has
+ * two back to back phdr, one with the error type
+ * and size, the other with the streamid and a rsvd
+ */
+ SCTP_BUF_LEN(mb) = (sizeof(struct sctp_paramhdr) * 2);
+ phdr->param_type = htons(SCTP_CAUSE_INVALID_STREAM);
+ phdr->param_length =
+ htons(sizeof(struct sctp_paramhdr) * 2);
+ phdr++;
+ /* We insert the stream in the type field */
+ phdr->param_type = ch->dp.stream_id;
+ /* And set the length to 0 for the rsvd field */
+ phdr->param_length = 0;
+ sctp_queue_op_err(stcb, mb);
+ }
+ SCTP_STAT_INCR(sctps_badsid);
+ SCTP_TCB_LOCK_ASSERT(stcb);
+ SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
+ if (compare_with_wrap(tsn, asoc->highest_tsn_inside_nr_map, MAX_TSN)) {
+ asoc->highest_tsn_inside_nr_map = tsn;
+ }
+ if (tsn == (asoc->cumulative_tsn + 1)) {
+ /* Update cum-ack */
+ asoc->cumulative_tsn = tsn;
+ }
+ return (0);
+ }
+ /*
+ * Before we continue lets validate that we are not being fooled by
+ * an evil attacker. We can only have 4k chunks based on our TSN
+ * spread allowed by the mapping array 512 * 8 bits, so there is no
+ * way our stream sequence numbers could have wrapped. We of course
+ * only validate the FIRST fragment so the bit must be set.
+ */
+ strmseq = ntohs(ch->dp.stream_sequence);
+#ifdef SCTP_ASOCLOG_OF_TSNS
+ SCTP_TCB_LOCK_ASSERT(stcb);
+ if (asoc->tsn_in_at >= SCTP_TSN_LOG_SIZE) {
+ asoc->tsn_in_at = 0;
+ asoc->tsn_in_wrapped = 1;
+ }
+ asoc->in_tsnlog[asoc->tsn_in_at].tsn = tsn;
+ asoc->in_tsnlog[asoc->tsn_in_at].strm = strmno;
+ asoc->in_tsnlog[asoc->tsn_in_at].seq = strmseq;
+ asoc->in_tsnlog[asoc->tsn_in_at].sz = chk_length;
+ asoc->in_tsnlog[asoc->tsn_in_at].flgs = chunk_flags;
+ asoc->in_tsnlog[asoc->tsn_in_at].stcb = (void *)stcb;
+ asoc->in_tsnlog[asoc->tsn_in_at].in_pos = asoc->tsn_in_at;
+ asoc->in_tsnlog[asoc->tsn_in_at].in_out = 1;
+ asoc->tsn_in_at++;
+#endif
+ if ((chunk_flags & SCTP_DATA_FIRST_FRAG) &&
+ (TAILQ_EMPTY(&asoc->resetHead)) &&
+ (chunk_flags & SCTP_DATA_UNORDERED) == 0 &&
+ (compare_with_wrap(asoc->strmin[strmno].last_sequence_delivered,
+ strmseq, MAX_SEQ) ||
+ asoc->strmin[strmno].last_sequence_delivered == strmseq)) {
+ /* The incoming sseq is behind where we last delivered? */
+ SCTPDBG(SCTP_DEBUG_INDATA1, "EVIL/Broken-Dup S-SEQ:%d delivered:%d from peer, Abort!\n",
+ strmseq, asoc->strmin[strmno].last_sequence_delivered);
+ oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
+ 0, M_DONTWAIT, 1, MT_DATA);
+ if (oper) {
+ struct sctp_paramhdr *ph;
+ uint32_t *ippp;
+
+ SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) +
+ (3 * sizeof(uint32_t));
+ ph = mtod(oper, struct sctp_paramhdr *);
+ ph->param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
+ ph->param_length = htons(SCTP_BUF_LEN(oper));
+ ippp = (uint32_t *) (ph + 1);
+ *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_14);
+ ippp++;
+ *ippp = tsn;
+ ippp++;
+ *ippp = ((strmno << 16) | strmseq);
+
+ }
+ stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_14;
+ sctp_abort_an_association(stcb->sctp_ep, stcb,
+ SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
+ *abort_flag = 1;
+ return (0);
+ }
+ /************************************
+ * From here down we may find ch-> invalid
+ * so its a good idea NOT to use it.
+ *************************************/
+
+ the_len = (chk_length - sizeof(struct sctp_data_chunk));
+ if (last_chunk == 0) {
+ dmbuf = SCTP_M_COPYM(*m,
+ (offset + sizeof(struct sctp_data_chunk)),
+ the_len, M_DONTWAIT);
+#ifdef SCTP_MBUF_LOGGING
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
+ struct mbuf *mat;
+
+ mat = dmbuf;
+ while (mat) {
+ if (SCTP_BUF_IS_EXTENDED(mat)) {
+ sctp_log_mb(mat, SCTP_MBUF_ICOPY);
+ }
+ mat = SCTP_BUF_NEXT(mat);
+ }
+ }
+#endif
+ } else {
+ /* We can steal the last chunk */
+ int l_len;
+
+ dmbuf = *m;
+ /* lop off the top part */
+ m_adj(dmbuf, (offset + sizeof(struct sctp_data_chunk)));
+ if (SCTP_BUF_NEXT(dmbuf) == NULL) {
+ l_len = SCTP_BUF_LEN(dmbuf);
+ } else {
+ /*
+ * need to count up the size hopefully does not hit
+ * this to often :-0
+ */
+ struct mbuf *lat;
+
+ l_len = 0;
+ lat = dmbuf;
+ while (lat) {
+ l_len += SCTP_BUF_LEN(lat);
+ lat = SCTP_BUF_NEXT(lat);
+ }
+ }
+ if (l_len > the_len) {
+ /* Trim the end round bytes off too */
+ m_adj(dmbuf, -(l_len - the_len));
+ }
+ }
+ if (dmbuf == NULL) {
+ SCTP_STAT_INCR(sctps_nomem);
+ return (0);
+ }
+ if ((chunk_flags & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG &&
+ asoc->fragmented_delivery_inprogress == 0 &&
+ TAILQ_EMPTY(&asoc->resetHead) &&
+ ((ordered == 0) ||
+ ((uint16_t) (asoc->strmin[strmno].last_sequence_delivered + 1) == strmseq &&
+ TAILQ_EMPTY(&asoc->strmin[strmno].inqueue)))) {
+ /* Candidate for express delivery */
+ /*
+ * Its not fragmented, No PD-API is up, Nothing in the
+ * delivery queue, Its un-ordered OR ordered and the next to
+ * deliver AND nothing else is stuck on the stream queue,
+ * And there is room for it in the socket buffer. Lets just
+ * stuff it up the buffer....
+ */
+
+ /* It would be nice to avoid this copy if we could :< */
+ sctp_alloc_a_readq(stcb, control);
+ sctp_build_readq_entry_mac(control, stcb, asoc->context, net, tsn,
+ protocol_id,
+ stcb->asoc.context,
+ strmno, strmseq,
+ chunk_flags,
+ dmbuf);
+ if (control == NULL) {
+ goto failed_express_del;
+ }
+ SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
+ if (compare_with_wrap(tsn, asoc->highest_tsn_inside_nr_map, MAX_TSN)) {
+ asoc->highest_tsn_inside_nr_map = tsn;
+ }
+ sctp_add_to_readq(stcb->sctp_ep, stcb,
+ control, &stcb->sctp_socket->so_rcv,
+ 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
+
+ if ((chunk_flags & SCTP_DATA_UNORDERED) == 0) {
+ /* for ordered, bump what we delivered */
+ asoc->strmin[strmno].last_sequence_delivered++;
+ }
+ SCTP_STAT_INCR(sctps_recvexpress);
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
+ sctp_log_strm_del_alt(stcb, tsn, strmseq, strmno,
+ SCTP_STR_LOG_FROM_EXPRS_DEL);
+ }
+ control = NULL;
+
+ goto finish_express_del;
+ }
+failed_express_del:
+ /* If we reach here this is a new chunk */
+ chk = NULL;
+ control = NULL;
+ /* Express for fragmented delivery? */
+ if ((asoc->fragmented_delivery_inprogress) &&
+ (stcb->asoc.control_pdapi) &&
+ (asoc->str_of_pdapi == strmno) &&
+ (asoc->ssn_of_pdapi == strmseq)
+ ) {
+ control = stcb->asoc.control_pdapi;
+ if ((chunk_flags & SCTP_DATA_FIRST_FRAG) == SCTP_DATA_FIRST_FRAG) {
+ /* Can't be another first? */
+ goto failed_pdapi_express_del;
+ }
+ if (tsn == (control->sinfo_tsn + 1)) {
+ /* Yep, we can add it on */
+ int end = 0;
+ uint32_t cumack;
+
+ if (chunk_flags & SCTP_DATA_LAST_FRAG) {
+ end = 1;
+ }
+ cumack = asoc->cumulative_tsn;
+ if ((cumack + 1) == tsn)
+ cumack = tsn;
+
+ if (sctp_append_to_readq(stcb->sctp_ep, stcb, control, dmbuf, end,
+ tsn,
+ &stcb->sctp_socket->so_rcv)) {
+ SCTP_PRINTF("Append fails end:%d\n", end);
+ goto failed_pdapi_express_del;
+ }
+ SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
+ if (compare_with_wrap(tsn, asoc->highest_tsn_inside_nr_map, MAX_TSN)) {
+ asoc->highest_tsn_inside_nr_map = tsn;
+ }
+ SCTP_STAT_INCR(sctps_recvexpressm);
+ control->sinfo_tsn = tsn;
+ asoc->tsn_last_delivered = tsn;
+ asoc->fragment_flags = chunk_flags;
+ asoc->tsn_of_pdapi_last_delivered = tsn;
+ asoc->last_flags_delivered = chunk_flags;
+ asoc->last_strm_seq_delivered = strmseq;
+ asoc->last_strm_no_delivered = strmno;
+ if (end) {
+ /* clean up the flags and such */
+ asoc->fragmented_delivery_inprogress = 0;
+ if ((chunk_flags & SCTP_DATA_UNORDERED) == 0) {
+ asoc->strmin[strmno].last_sequence_delivered++;
+ }
+ stcb->asoc.control_pdapi = NULL;
+ if (TAILQ_EMPTY(&asoc->reasmqueue) == 0) {
+ /*
+ * There could be another message
+ * ready
+ */
+ need_reasm_check = 1;
+ }
+ }
+ control = NULL;
+ goto finish_express_del;
+ }
+ }
+failed_pdapi_express_del:
+ control = NULL;
+ if (SCTP_BASE_SYSCTL(sctp_do_drain) == 0) {
+ SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
+ if (compare_with_wrap(tsn, asoc->highest_tsn_inside_nr_map, MAX_TSN)) {
+ asoc->highest_tsn_inside_nr_map = tsn;
+ }
+ } else {
+ SCTP_SET_TSN_PRESENT(asoc->mapping_array, gap);
+ if (compare_with_wrap(tsn, asoc->highest_tsn_inside_map, MAX_TSN)) {
+ asoc->highest_tsn_inside_map = tsn;
+ }
+ }
+ if ((chunk_flags & SCTP_DATA_NOT_FRAG) != SCTP_DATA_NOT_FRAG) {
+ sctp_alloc_a_chunk(stcb, chk);
+ if (chk == NULL) {
+ /* No memory so we drop the chunk */
+ SCTP_STAT_INCR(sctps_nomem);
+ if (last_chunk == 0) {
+ /* we copied it, free the copy */
+ sctp_m_freem(dmbuf);
+ }
+ return (0);
+ }
+ chk->rec.data.TSN_seq = tsn;
+ chk->no_fr_allowed = 0;
+ chk->rec.data.stream_seq = strmseq;
+ chk->rec.data.stream_number = strmno;
+ chk->rec.data.payloadtype = protocol_id;
+ chk->rec.data.context = stcb->asoc.context;
+ chk->rec.data.doing_fast_retransmit = 0;
+ chk->rec.data.rcv_flags = chunk_flags;
+ chk->asoc = asoc;
+ chk->send_size = the_len;
+ chk->whoTo = net;
+ atomic_add_int(&net->ref_count, 1);
+ chk->data = dmbuf;
+ } else {
+ sctp_alloc_a_readq(stcb, control);
+ sctp_build_readq_entry_mac(control, stcb, asoc->context, net, tsn,
+ protocol_id,
+ stcb->asoc.context,
+ strmno, strmseq,
+ chunk_flags,
+ dmbuf);
+ if (control == NULL) {
+ /* No memory so we drop the chunk */
+ SCTP_STAT_INCR(sctps_nomem);
+ if (last_chunk == 0) {
+ /* we copied it, free the copy */
+ sctp_m_freem(dmbuf);
+ }
+ return (0);
+ }
+ control->length = the_len;
+ }
+
+ /* Mark it as received */
+ /* Now queue it where it belongs */
+ if (control != NULL) {
+ /* First a sanity check */
+ if (asoc->fragmented_delivery_inprogress) {
+ /*
+ * Ok, we have a fragmented delivery in progress if
+ * this chunk is next to deliver OR belongs in our
+ * view to the reassembly, the peer is evil or
+ * broken.
+ */
+ uint32_t estimate_tsn;
+
+ estimate_tsn = asoc->tsn_last_delivered + 1;
+ if (TAILQ_EMPTY(&asoc->reasmqueue) &&
+ (estimate_tsn == control->sinfo_tsn)) {
+ /* Evil/Broke peer */
+ sctp_m_freem(control->data);
+ control->data = NULL;
+ if (control->whoFrom) {
+ sctp_free_remote_addr(control->whoFrom);
+ control->whoFrom = NULL;
+ }
+ sctp_free_a_readq(stcb, control);
+ oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
+ 0, M_DONTWAIT, 1, MT_DATA);
+ if (oper) {
+ struct sctp_paramhdr *ph;
+ uint32_t *ippp;
+
+ SCTP_BUF_LEN(oper) =
+ sizeof(struct sctp_paramhdr) +
+ (3 * sizeof(uint32_t));
+ ph = mtod(oper, struct sctp_paramhdr *);
+ ph->param_type =
+ htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
+ ph->param_length = htons(SCTP_BUF_LEN(oper));
+ ippp = (uint32_t *) (ph + 1);
+ *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_15);
+ ippp++;
+ *ippp = tsn;
+ ippp++;
+ *ippp = ((strmno << 16) | strmseq);
+ }
+ stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_15;
+ sctp_abort_an_association(stcb->sctp_ep, stcb,
+ SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
+
+ *abort_flag = 1;
+ return (0);
+ } else {
+ if (sctp_does_tsn_belong_to_reasm(asoc, control->sinfo_tsn)) {
+ sctp_m_freem(control->data);
+ control->data = NULL;
+ if (control->whoFrom) {
+ sctp_free_remote_addr(control->whoFrom);
+ control->whoFrom = NULL;
+ }
+ sctp_free_a_readq(stcb, control);
+
+ oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
+ 0, M_DONTWAIT, 1, MT_DATA);
+ if (oper) {
+ struct sctp_paramhdr *ph;
+ uint32_t *ippp;
+
+ SCTP_BUF_LEN(oper) =
+ sizeof(struct sctp_paramhdr) +
+ (3 * sizeof(uint32_t));
+ ph = mtod(oper,
+ struct sctp_paramhdr *);
+ ph->param_type =
+ htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
+ ph->param_length =
+ htons(SCTP_BUF_LEN(oper));
+ ippp = (uint32_t *) (ph + 1);
+ *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_16);
+ ippp++;
+ *ippp = tsn;
+ ippp++;
+ *ippp = ((strmno << 16) | strmseq);
+ }
+ stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_16;
+ sctp_abort_an_association(stcb->sctp_ep,
+ stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
+
+ *abort_flag = 1;
+ return (0);
+ }
+ }
+ } else {
+ /* No PDAPI running */
+ if (!TAILQ_EMPTY(&asoc->reasmqueue)) {
+ /*
+ * Reassembly queue is NOT empty validate
+ * that this tsn does not need to be in
+ * reasembly queue. If it does then our peer
+ * is broken or evil.
+ */
+ if (sctp_does_tsn_belong_to_reasm(asoc, control->sinfo_tsn)) {
+ sctp_m_freem(control->data);
+ control->data = NULL;
+ if (control->whoFrom) {
+ sctp_free_remote_addr(control->whoFrom);
+ control->whoFrom = NULL;
+ }
+ sctp_free_a_readq(stcb, control);
+ oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
+ 0, M_DONTWAIT, 1, MT_DATA);
+ if (oper) {
+ struct sctp_paramhdr *ph;
+ uint32_t *ippp;
+
+ SCTP_BUF_LEN(oper) =
+ sizeof(struct sctp_paramhdr) +
+ (3 * sizeof(uint32_t));
+ ph = mtod(oper,
+ struct sctp_paramhdr *);
+ ph->param_type =
+ htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
+ ph->param_length =
+ htons(SCTP_BUF_LEN(oper));
+ ippp = (uint32_t *) (ph + 1);
+ *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_17);
+ ippp++;
+ *ippp = tsn;
+ ippp++;
+ *ippp = ((strmno << 16) | strmseq);
+ }
+ stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_17;
+ sctp_abort_an_association(stcb->sctp_ep,
+ stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
+
+ *abort_flag = 1;
+ return (0);
+ }
+ }
+ }
+ /* ok, if we reach here we have passed the sanity checks */
+ if (chunk_flags & SCTP_DATA_UNORDERED) {
+ /* queue directly into socket buffer */
+ sctp_mark_non_revokable(asoc, control->sinfo_tsn);
+ sctp_add_to_readq(stcb->sctp_ep, stcb,
+ control,
+ &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
+ } else {
+ /*
+ * Special check for when streams are resetting. We
+ * could be more smart about this and check the
+ * actual stream to see if it is not being reset..
+ * that way we would not create a HOLB when amongst
+ * streams being reset and those not being reset.
+ *
+ * We take complete messages that have a stream reset
+ * intervening (aka the TSN is after where our
+ * cum-ack needs to be) off and put them on a
+ * pending_reply_queue. The reassembly ones we do
+ * not have to worry about since they are all sorted
+ * and proceessed by TSN order. It is only the
+ * singletons I must worry about.
+ */
+ if (((liste = TAILQ_FIRST(&asoc->resetHead)) != NULL) &&
+ ((compare_with_wrap(tsn, liste->tsn, MAX_TSN)))
+ ) {
+ /*
+ * yep its past where we need to reset... go
+ * ahead and queue it.
+ */
+ if (TAILQ_EMPTY(&asoc->pending_reply_queue)) {
+ /* first one on */
+ TAILQ_INSERT_TAIL(&asoc->pending_reply_queue, control, next);
+ } else {
+ struct sctp_queued_to_read *ctlOn;
+ unsigned char inserted = 0;
+
+ ctlOn = TAILQ_FIRST(&asoc->pending_reply_queue);
+ while (ctlOn) {
+ if (compare_with_wrap(control->sinfo_tsn,
+ ctlOn->sinfo_tsn, MAX_TSN)) {
+ ctlOn = TAILQ_NEXT(ctlOn, next);
+ } else {
+ /* found it */
+ TAILQ_INSERT_BEFORE(ctlOn, control, next);
+ inserted = 1;
+ break;
+ }
+ }
+ if (inserted == 0) {
+ /*
+ * must be put at end, use
+ * prevP (all setup from
+ * loop) to setup nextP.
+ */
+ TAILQ_INSERT_TAIL(&asoc->pending_reply_queue, control, next);
+ }
+ }
+ } else {
+ sctp_queue_data_to_stream(stcb, asoc, control, abort_flag);
+ if (*abort_flag) {
+ return (0);
+ }
+ }
+ }
+ } else {
+ /* Into the re-assembly queue */
+ sctp_queue_data_for_reasm(stcb, asoc, chk, abort_flag);
+ if (*abort_flag) {
+ /*
+ * the assoc is now gone and chk was put onto the
+ * reasm queue, which has all been freed.
+ */
+ *m = NULL;
+ return (0);
+ }
+ }
+finish_express_del:
+ if (tsn == (asoc->cumulative_tsn + 1)) {
+ /* Update cum-ack */
+ asoc->cumulative_tsn = tsn;
+ }
+ if (last_chunk) {
+ *m = NULL;
+ }
+ if (ordered) {
+ SCTP_STAT_INCR_COUNTER64(sctps_inorderchunks);
+ } else {
+ SCTP_STAT_INCR_COUNTER64(sctps_inunorderchunks);
+ }
+ SCTP_STAT_INCR(sctps_recvdata);
+ /* Set it present please */
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
+ sctp_log_strm_del_alt(stcb, tsn, strmseq, strmno, SCTP_STR_LOG_FROM_MARK_TSN);
+ }
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
+ sctp_log_map(asoc->mapping_array_base_tsn, asoc->cumulative_tsn,
+ asoc->highest_tsn_inside_map, SCTP_MAP_PREPARE_SLIDE);
+ }
+ /* check the special flag for stream resets */
+ if (((liste = TAILQ_FIRST(&asoc->resetHead)) != NULL) &&
+ ((compare_with_wrap(asoc->cumulative_tsn, liste->tsn, MAX_TSN)) ||
+ (asoc->cumulative_tsn == liste->tsn))
+ ) {
+ /*
+ * we have finished working through the backlogged TSN's now
+ * time to reset streams. 1: call reset function. 2: free
+ * pending_reply space 3: distribute any chunks in
+ * pending_reply_queue.
+ */
+ struct sctp_queued_to_read *ctl;
+
+ sctp_reset_in_stream(stcb, liste->number_entries, liste->req.list_of_streams);
+ TAILQ_REMOVE(&asoc->resetHead, liste, next_resp);
+ SCTP_FREE(liste, SCTP_M_STRESET);
+ /* sa_ignore FREED_MEMORY */
+ liste = TAILQ_FIRST(&asoc->resetHead);
+ ctl = TAILQ_FIRST(&asoc->pending_reply_queue);
+ if (ctl && (liste == NULL)) {
+ /* All can be removed */
+ while (ctl) {
+ TAILQ_REMOVE(&asoc->pending_reply_queue, ctl, next);
+ sctp_queue_data_to_stream(stcb, asoc, ctl, abort_flag);
+ if (*abort_flag) {
+ return (0);
+ }
+ ctl = TAILQ_FIRST(&asoc->pending_reply_queue);
+ }
+ } else if (ctl) {
+ /* more than one in queue */
+ while (!compare_with_wrap(ctl->sinfo_tsn, liste->tsn, MAX_TSN)) {
+ /*
+ * if ctl->sinfo_tsn is <= liste->tsn we can
+ * process it which is the NOT of
+ * ctl->sinfo_tsn > liste->tsn
+ */
+ TAILQ_REMOVE(&asoc->pending_reply_queue, ctl, next);
+ sctp_queue_data_to_stream(stcb, asoc, ctl, abort_flag);
+ if (*abort_flag) {
+ return (0);
+ }
+ ctl = TAILQ_FIRST(&asoc->pending_reply_queue);
+ }
+ }
+ /*
+ * Now service re-assembly to pick up anything that has been
+ * held on reassembly queue?
+ */
+ sctp_deliver_reasm_check(stcb, asoc);
+ need_reasm_check = 0;
+ }
+ if (need_reasm_check) {
+ /* Another one waits ? */
+ sctp_deliver_reasm_check(stcb, asoc);
+ }
+ return (1);
+}
+
+int8_t sctp_map_lookup_tab[256] = {
+ 0, 1, 0, 2, 0, 1, 0, 3,
+ 0, 1, 0, 2, 0, 1, 0, 4,
+ 0, 1, 0, 2, 0, 1, 0, 3,
+ 0, 1, 0, 2, 0, 1, 0, 5,
+ 0, 1, 0, 2, 0, 1, 0, 3,
+ 0, 1, 0, 2, 0, 1, 0, 4,
+ 0, 1, 0, 2, 0, 1, 0, 3,
+ 0, 1, 0, 2, 0, 1, 0, 6,
+ 0, 1, 0, 2, 0, 1, 0, 3,
+ 0, 1, 0, 2, 0, 1, 0, 4,
+ 0, 1, 0, 2, 0, 1, 0, 3,
+ 0, 1, 0, 2, 0, 1, 0, 5,
+ 0, 1, 0, 2, 0, 1, 0, 3,
+ 0, 1, 0, 2, 0, 1, 0, 4,
+ 0, 1, 0, 2, 0, 1, 0, 3,
+ 0, 1, 0, 2, 0, 1, 0, 7,
+ 0, 1, 0, 2, 0, 1, 0, 3,
+ 0, 1, 0, 2, 0, 1, 0, 4,
+ 0, 1, 0, 2, 0, 1, 0, 3,
+ 0, 1, 0, 2, 0, 1, 0, 5,
+ 0, 1, 0, 2, 0, 1, 0, 3,
+ 0, 1, 0, 2, 0, 1, 0, 4,
+ 0, 1, 0, 2, 0, 1, 0, 3,
+ 0, 1, 0, 2, 0, 1, 0, 6,
+ 0, 1, 0, 2, 0, 1, 0, 3,
+ 0, 1, 0, 2, 0, 1, 0, 4,
+ 0, 1, 0, 2, 0, 1, 0, 3,
+ 0, 1, 0, 2, 0, 1, 0, 5,
+ 0, 1, 0, 2, 0, 1, 0, 3,
+ 0, 1, 0, 2, 0, 1, 0, 4,
+ 0, 1, 0, 2, 0, 1, 0, 3,
+ 0, 1, 0, 2, 0, 1, 0, 8
+};
+
+
+void
+sctp_slide_mapping_arrays(struct sctp_tcb *stcb)
+{
+ /*
+ * Now we also need to check the mapping array in a couple of ways.
+ * 1) Did we move the cum-ack point?
+ *
+ * When you first glance at this you might think that all entries that
+ * make up the postion of the cum-ack would be in the nr-mapping
+ * array only.. i.e. things up to the cum-ack are always
+ * deliverable. Thats true with one exception, when its a fragmented
+ * message we may not deliver the data until some threshold (or all
+ * of it) is in place. So we must OR the nr_mapping_array and
+ * mapping_array to get a true picture of the cum-ack.
+ */
+ struct sctp_association *asoc;
+ int at;
+ uint8_t val;
+ int slide_from, slide_end, lgap, distance;
+ uint32_t old_cumack, old_base, old_highest, highest_tsn;
+
+ asoc = &stcb->asoc;
+ at = 0;
+
+ old_cumack = asoc->cumulative_tsn;
+ old_base = asoc->mapping_array_base_tsn;
+ old_highest = asoc->highest_tsn_inside_map;
+ /*
+ * We could probably improve this a small bit by calculating the
+ * offset of the current cum-ack as the starting point.
+ */
+ at = 0;
+ for (slide_from = 0; slide_from < stcb->asoc.mapping_array_size; slide_from++) {
+ val = asoc->nr_mapping_array[slide_from] | asoc->mapping_array[slide_from];
+ if (val == 0xff) {
+ at += 8;
+ } else {
+ /* there is a 0 bit */
+ at += sctp_map_lookup_tab[val];
+ break;
+ }
+ }
+ asoc->cumulative_tsn = asoc->mapping_array_base_tsn + (at - 1);
+
+ if (compare_with_wrap(asoc->cumulative_tsn, asoc->highest_tsn_inside_map, MAX_TSN) &&
+ compare_with_wrap(asoc->cumulative_tsn, asoc->highest_tsn_inside_nr_map, MAX_TSN)) {
+#ifdef INVARIANTS
+ panic("huh, cumack 0x%x greater than high-tsn 0x%x in map",
+ asoc->cumulative_tsn, asoc->highest_tsn_inside_map);
+#else
+ SCTP_PRINTF("huh, cumack 0x%x greater than high-tsn 0x%x in map - should panic?\n",
+ asoc->cumulative_tsn, asoc->highest_tsn_inside_map);
+ sctp_print_mapping_array(asoc);
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
+ sctp_log_map(0, 6, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
+ }
+ asoc->highest_tsn_inside_map = asoc->cumulative_tsn;
+ asoc->highest_tsn_inside_nr_map = asoc->cumulative_tsn;
+#endif
+ }
+ if (compare_with_wrap(asoc->highest_tsn_inside_nr_map,
+ asoc->highest_tsn_inside_map,
+ MAX_TSN)) {
+ highest_tsn = asoc->highest_tsn_inside_nr_map;
+ } else {
+ highest_tsn = asoc->highest_tsn_inside_map;
+ }
+ if ((asoc->cumulative_tsn == highest_tsn) && (at >= 8)) {
+ /* The complete array was completed by a single FR */
+ /* highest becomes the cum-ack */
+ int clr;
+
+#ifdef INVARIANTS
+ unsigned int i;
+
+#endif
+
+ /* clear the array */
+ clr = ((at + 7) >> 3);
+ if (clr > asoc->mapping_array_size) {
+ clr = asoc->mapping_array_size;
+ }
+ memset(asoc->mapping_array, 0, clr);
+ memset(asoc->nr_mapping_array, 0, clr);
+#ifdef INVARIANTS
+ for (i = 0; i < asoc->mapping_array_size; i++) {
+ if ((asoc->mapping_array[i]) || (asoc->nr_mapping_array[i])) {
+ printf("Error Mapping array's not clean at clear\n");
+ sctp_print_mapping_array(asoc);
+ }
+ }
+#endif
+ asoc->mapping_array_base_tsn = asoc->cumulative_tsn + 1;
+ asoc->highest_tsn_inside_nr_map = asoc->highest_tsn_inside_map = asoc->cumulative_tsn;
+ } else if (at >= 8) {
+ /* we can slide the mapping array down */
+ /* slide_from holds where we hit the first NON 0xff byte */
+
+ /*
+ * now calculate the ceiling of the move using our highest
+ * TSN value
+ */
+ SCTP_CALC_TSN_TO_GAP(lgap, highest_tsn, asoc->mapping_array_base_tsn);
+ slide_end = (lgap >> 3);
+ if (slide_end < slide_from) {
+ sctp_print_mapping_array(asoc);
+#ifdef INVARIANTS
+ panic("impossible slide");
+#else
+ printf("impossible slide lgap:%x slide_end:%x slide_from:%x? at:%d\n",
+ lgap, slide_end, slide_from, at);
+ return;
+#endif
+ }
+ if (slide_end > asoc->mapping_array_size) {
+#ifdef INVARIANTS
+ panic("would overrun buffer");
+#else
+ printf("Gak, would have overrun map end:%d slide_end:%d\n",
+ asoc->mapping_array_size, slide_end);
+ slide_end = asoc->mapping_array_size;
+#endif
+ }
+ distance = (slide_end - slide_from) + 1;
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
+ sctp_log_map(old_base, old_cumack, old_highest,
+ SCTP_MAP_PREPARE_SLIDE);
+ sctp_log_map((uint32_t) slide_from, (uint32_t) slide_end,
+ (uint32_t) lgap, SCTP_MAP_SLIDE_FROM);
+ }
+ if (distance + slide_from > asoc->mapping_array_size ||
+ distance < 0) {
+ /*
+ * Here we do NOT slide forward the array so that
+ * hopefully when more data comes in to fill it up
+ * we will be able to slide it forward. Really I
+ * don't think this should happen :-0
+ */
+
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
+ sctp_log_map((uint32_t) distance, (uint32_t) slide_from,
+ (uint32_t) asoc->mapping_array_size,
+ SCTP_MAP_SLIDE_NONE);
+ }
+ } else {
+ int ii;
+
+ for (ii = 0; ii < distance; ii++) {
+ asoc->mapping_array[ii] = asoc->mapping_array[slide_from + ii];
+ asoc->nr_mapping_array[ii] = asoc->nr_mapping_array[slide_from + ii];
+
+ }
+ for (ii = distance; ii < asoc->mapping_array_size; ii++) {
+ asoc->mapping_array[ii] = 0;
+ asoc->nr_mapping_array[ii] = 0;
+ }
+ if (asoc->highest_tsn_inside_map + 1 == asoc->mapping_array_base_tsn) {
+ asoc->highest_tsn_inside_map += (slide_from << 3);
+ }
+ if (asoc->highest_tsn_inside_nr_map + 1 == asoc->mapping_array_base_tsn) {
+ asoc->highest_tsn_inside_nr_map += (slide_from << 3);
+ }
+ asoc->mapping_array_base_tsn += (slide_from << 3);
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
+ sctp_log_map(asoc->mapping_array_base_tsn,
+ asoc->cumulative_tsn, asoc->highest_tsn_inside_map,
+ SCTP_MAP_SLIDE_RESULT);
+ }
+ }
+ }
+}
+
+
+void
+sctp_sack_check(struct sctp_tcb *stcb, int was_a_gap, int *abort_flag)
+{
+ struct sctp_association *asoc;
+ uint32_t highest_tsn;
+
+ asoc = &stcb->asoc;
+ if (compare_with_wrap(asoc->highest_tsn_inside_nr_map,
+ asoc->highest_tsn_inside_map,
+ MAX_TSN)) {
+ highest_tsn = asoc->highest_tsn_inside_nr_map;
+ } else {
+ highest_tsn = asoc->highest_tsn_inside_map;
+ }
+
+ /*
+ * Now we need to see if we need to queue a sack or just start the
+ * timer (if allowed).
+ */
+ if (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_SENT) {
+ /*
+ * Ok special case, in SHUTDOWN-SENT case. here we maker
+ * sure SACK timer is off and instead send a SHUTDOWN and a
+ * SACK
+ */
+ if (SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) {
+ sctp_timer_stop(SCTP_TIMER_TYPE_RECV,
+ stcb->sctp_ep, stcb, NULL, SCTP_FROM_SCTP_INDATA + SCTP_LOC_18);
+ }
+ sctp_send_shutdown(stcb, stcb->asoc.primary_destination);
+ sctp_send_sack(stcb);
+ } else {
+ int is_a_gap;
+
+ /* is there a gap now ? */
+ is_a_gap = compare_with_wrap(highest_tsn, stcb->asoc.cumulative_tsn, MAX_TSN);
+
+ /*
+ * CMT DAC algorithm: increase number of packets received
+ * since last ack
+ */
+ stcb->asoc.cmt_dac_pkts_rcvd++;
+
+ if ((stcb->asoc.send_sack == 1) || /* We need to send a
+ * SACK */
+ ((was_a_gap) && (is_a_gap == 0)) || /* was a gap, but no
+ * longer is one */
+ (stcb->asoc.numduptsns) || /* we have dup's */
+ (is_a_gap) || /* is still a gap */
+ (stcb->asoc.delayed_ack == 0) || /* Delayed sack disabled */
+ (stcb->asoc.data_pkts_seen >= stcb->asoc.sack_freq) /* hit limit of pkts */
+ ) {
+
+ if ((stcb->asoc.sctp_cmt_on_off == 1) &&
+ (SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) &&
+ (stcb->asoc.send_sack == 0) &&
+ (stcb->asoc.numduptsns == 0) &&
+ (stcb->asoc.delayed_ack) &&
+ (!SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer))) {
+
+ /*
+ * CMT DAC algorithm: With CMT, delay acks
+ * even in the face of
+ *
+ * reordering. Therefore, if acks that do not
+ * have to be sent because of the above
+ * reasons, will be delayed. That is, acks
+ * that would have been sent due to gap
+ * reports will be delayed with DAC. Start
+ * the delayed ack timer.
+ */
+ sctp_timer_start(SCTP_TIMER_TYPE_RECV,
+ stcb->sctp_ep, stcb, NULL);
+ } else {
+ /*
+ * Ok we must build a SACK since the timer
+ * is pending, we got our first packet OR
+ * there are gaps or duplicates.
+ */
+ (void)SCTP_OS_TIMER_STOP(&stcb->asoc.dack_timer.timer);
+ sctp_send_sack(stcb);
+ }
+ } else {
+ if (!SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) {
+ sctp_timer_start(SCTP_TIMER_TYPE_RECV,
+ stcb->sctp_ep, stcb, NULL);
+ }
+ }
+ }
+}
+
+void
+sctp_service_queues(struct sctp_tcb *stcb, struct sctp_association *asoc)
+{
+ struct sctp_tmit_chunk *chk;
+ uint32_t tsize, pd_point;
+ uint16_t nxt_todel;
+
+ if (asoc->fragmented_delivery_inprogress) {
+ sctp_service_reassembly(stcb, asoc);
+ }
+ /* Can we proceed further, i.e. the PD-API is complete */
+ if (asoc->fragmented_delivery_inprogress) {
+ /* no */
+ return;
+ }
+ /*
+ * Now is there some other chunk I can deliver from the reassembly
+ * queue.
+ */
+doit_again:
+ chk = TAILQ_FIRST(&asoc->reasmqueue);
+ if (chk == NULL) {
+ asoc->size_on_reasm_queue = 0;
+ asoc->cnt_on_reasm_queue = 0;
+ return;
+ }
+ nxt_todel = asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered + 1;
+ if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) &&
+ ((nxt_todel == chk->rec.data.stream_seq) ||
+ (chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED))) {
+ /*
+ * Yep the first one is here. We setup to start reception,
+ * by backing down the TSN just in case we can't deliver.
+ */
+
+ /*
+ * Before we start though either all of the message should
+ * be here or the socket buffer max or nothing on the
+ * delivery queue and something can be delivered.
+ */
+ if (stcb->sctp_socket) {
+ pd_point = min(SCTP_SB_LIMIT_RCV(stcb->sctp_socket),
+ stcb->sctp_ep->partial_delivery_point);
+ } else {
+ pd_point = stcb->sctp_ep->partial_delivery_point;
+ }
+ if (sctp_is_all_msg_on_reasm(asoc, &tsize) || (tsize >= pd_point)) {
+ asoc->fragmented_delivery_inprogress = 1;
+ asoc->tsn_last_delivered = chk->rec.data.TSN_seq - 1;
+ asoc->str_of_pdapi = chk->rec.data.stream_number;
+ asoc->ssn_of_pdapi = chk->rec.data.stream_seq;
+ asoc->pdapi_ppid = chk->rec.data.payloadtype;
+ asoc->fragment_flags = chk->rec.data.rcv_flags;
+ sctp_service_reassembly(stcb, asoc);
+ if (asoc->fragmented_delivery_inprogress == 0) {
+ goto doit_again;
+ }
+ }
+ }
+}
+
+int
+sctp_process_data(struct mbuf **mm, int iphlen, int *offset, int length,
+ struct sctphdr *sh, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
+ struct sctp_nets *net, uint32_t * high_tsn)
+{
+ struct sctp_data_chunk *ch, chunk_buf;
+ struct sctp_association *asoc;
+ int num_chunks = 0; /* number of control chunks processed */
+ int stop_proc = 0;
+ int chk_length, break_flag, last_chunk;
+ int abort_flag = 0, was_a_gap;
+ struct mbuf *m;
+ uint32_t highest_tsn;
+
+ /* set the rwnd */
+ sctp_set_rwnd(stcb, &stcb->asoc);
+
+ m = *mm;
+ SCTP_TCB_LOCK_ASSERT(stcb);
+ asoc = &stcb->asoc;
+ if (compare_with_wrap(asoc->highest_tsn_inside_nr_map, asoc->highest_tsn_inside_map, MAX_TSN)) {
+ highest_tsn = asoc->highest_tsn_inside_nr_map;
+ } else {
+ highest_tsn = asoc->highest_tsn_inside_map;
+ }
+ was_a_gap = compare_with_wrap(highest_tsn, stcb->asoc.cumulative_tsn, MAX_TSN);
+ /*
+ * setup where we got the last DATA packet from for any SACK that
+ * may need to go out. Don't bump the net. This is done ONLY when a
+ * chunk is assigned.
+ */
+ asoc->last_data_chunk_from = net;
+
+ /*-
+ * Now before we proceed we must figure out if this is a wasted
+ * cluster... i.e. it is a small packet sent in and yet the driver
+ * underneath allocated a full cluster for it. If so we must copy it
+ * to a smaller mbuf and free up the cluster mbuf. This will help
+ * with cluster starvation. Note for __Panda__ we don't do this
+ * since it has clusters all the way down to 64 bytes.
+ */
+ if (SCTP_BUF_LEN(m) < (long)MLEN && SCTP_BUF_NEXT(m) == NULL) {
+ /* we only handle mbufs that are singletons.. not chains */
+ m = sctp_get_mbuf_for_msg(SCTP_BUF_LEN(m), 0, M_DONTWAIT, 1, MT_DATA);
+ if (m) {
+ /* ok lets see if we can copy the data up */
+ caddr_t *from, *to;
+
+ /* get the pointers and copy */
+ to = mtod(m, caddr_t *);
+ from = mtod((*mm), caddr_t *);
+ memcpy(to, from, SCTP_BUF_LEN((*mm)));
+ /* copy the length and free up the old */
+ SCTP_BUF_LEN(m) = SCTP_BUF_LEN((*mm));
+ sctp_m_freem(*mm);
+ /* sucess, back copy */
+ *mm = m;
+ } else {
+ /* We are in trouble in the mbuf world .. yikes */
+ m = *mm;
+ }
+ }
+ /* get pointer to the first chunk header */
+ ch = (struct sctp_data_chunk *)sctp_m_getptr(m, *offset,
+ sizeof(struct sctp_data_chunk), (uint8_t *) & chunk_buf);
+ if (ch == NULL) {
+ return (1);
+ }
+ /*
+ * process all DATA chunks...
+ */
+ *high_tsn = asoc->cumulative_tsn;
+ break_flag = 0;
+ asoc->data_pkts_seen++;
+ while (stop_proc == 0) {
+ /* validate chunk length */
+ chk_length = ntohs(ch->ch.chunk_length);
+ if (length - *offset < chk_length) {
+ /* all done, mutulated chunk */
+ stop_proc = 1;
+ break;
+ }
+ if (ch->ch.chunk_type == SCTP_DATA) {
+ if ((size_t)chk_length < sizeof(struct sctp_data_chunk) + 1) {
+ /*
+ * Need to send an abort since we had a
+ * invalid data chunk.
+ */
+ struct mbuf *op_err;
+
+ op_err = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 2 * sizeof(uint32_t)),
+ 0, M_DONTWAIT, 1, MT_DATA);
+
+ if (op_err) {
+ struct sctp_paramhdr *ph;
+ uint32_t *ippp;
+
+ SCTP_BUF_LEN(op_err) = sizeof(struct sctp_paramhdr) +
+ (2 * sizeof(uint32_t));
+ ph = mtod(op_err, struct sctp_paramhdr *);
+ ph->param_type =
+ htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
+ ph->param_length = htons(SCTP_BUF_LEN(op_err));
+ ippp = (uint32_t *) (ph + 1);
+ *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_19);
+ ippp++;
+ *ippp = asoc->cumulative_tsn;
+
+ }
+ stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_19;
+ sctp_abort_association(inp, stcb, m, iphlen, sh,
+ op_err, 0, net->port);
+ return (2);
+ }
+#ifdef SCTP_AUDITING_ENABLED
+ sctp_audit_log(0xB1, 0);
+#endif
+ if (SCTP_SIZE32(chk_length) == (length - *offset)) {
+ last_chunk = 1;
+ } else {
+ last_chunk = 0;
+ }
+ if (sctp_process_a_data_chunk(stcb, asoc, mm, *offset, ch,
+ chk_length, net, high_tsn, &abort_flag, &break_flag,
+ last_chunk)) {
+ num_chunks++;
+ }
+ if (abort_flag)
+ return (2);
+
+ if (break_flag) {
+ /*
+ * Set because of out of rwnd space and no
+ * drop rep space left.
+ */
+ stop_proc = 1;
+ break;
+ }
+ } else {
+ /* not a data chunk in the data region */
+ switch (ch->ch.chunk_type) {
+ case SCTP_INITIATION:
+ case SCTP_INITIATION_ACK:
+ case SCTP_SELECTIVE_ACK:
+ case SCTP_NR_SELECTIVE_ACK: /* EY */
+ case SCTP_HEARTBEAT_REQUEST:
+ case SCTP_HEARTBEAT_ACK:
+ case SCTP_ABORT_ASSOCIATION:
+ case SCTP_SHUTDOWN:
+ case SCTP_SHUTDOWN_ACK:
+ case SCTP_OPERATION_ERROR:
+ case SCTP_COOKIE_ECHO:
+ case SCTP_COOKIE_ACK:
+ case SCTP_ECN_ECHO:
+ case SCTP_ECN_CWR:
+ case SCTP_SHUTDOWN_COMPLETE:
+ case SCTP_AUTHENTICATION:
+ case SCTP_ASCONF_ACK:
+ case SCTP_PACKET_DROPPED:
+ case SCTP_STREAM_RESET:
+ case SCTP_FORWARD_CUM_TSN:
+ case SCTP_ASCONF:
+ /*
+ * Now, what do we do with KNOWN chunks that
+ * are NOT in the right place?
+ *
+ * For now, I do nothing but ignore them. We
+ * may later want to add sysctl stuff to
+ * switch out and do either an ABORT() or
+ * possibly process them.
+ */
+ if (SCTP_BASE_SYSCTL(sctp_strict_data_order)) {
+ struct mbuf *op_err;
+
+ op_err = sctp_generate_invmanparam(SCTP_CAUSE_PROTOCOL_VIOLATION);
+ sctp_abort_association(inp, stcb, m, iphlen, sh, op_err, 0, net->port);
+ return (2);
+ }
+ break;
+ default:
+ /* unknown chunk type, use bit rules */
+ if (ch->ch.chunk_type & 0x40) {
+ /* Add a error report to the queue */
+ struct mbuf *merr;
+ struct sctp_paramhdr *phd;
+
+ merr = sctp_get_mbuf_for_msg(sizeof(*phd), 0, M_DONTWAIT, 1, MT_DATA);
+ if (merr) {
+ phd = mtod(merr, struct sctp_paramhdr *);
+ /*
+ * We cheat and use param
+ * type since we did not
+ * bother to define a error
+ * cause struct. They are
+ * the same basic format
+ * with different names.
+ */
+ phd->param_type =
+ htons(SCTP_CAUSE_UNRECOG_CHUNK);
+ phd->param_length =
+ htons(chk_length + sizeof(*phd));
+ SCTP_BUF_LEN(merr) = sizeof(*phd);
+ SCTP_BUF_NEXT(merr) = SCTP_M_COPYM(m, *offset,
+ SCTP_SIZE32(chk_length),
+ M_DONTWAIT);
+ if (SCTP_BUF_NEXT(merr)) {
+ sctp_queue_op_err(stcb, merr);
+ } else {
+ sctp_m_freem(merr);
+ }
+ }
+ }
+ if ((ch->ch.chunk_type & 0x80) == 0) {
+ /* discard the rest of this packet */
+ stop_proc = 1;
+ } /* else skip this bad chunk and
+ * continue... */
+ break;
+ }; /* switch of chunk type */
+ }
+ *offset += SCTP_SIZE32(chk_length);
+ if ((*offset >= length) || stop_proc) {
+ /* no more data left in the mbuf chain */
+ stop_proc = 1;
+ continue;
+ }
+ ch = (struct sctp_data_chunk *)sctp_m_getptr(m, *offset,
+ sizeof(struct sctp_data_chunk), (uint8_t *) & chunk_buf);
+ if (ch == NULL) {
+ *offset = length;
+ stop_proc = 1;
+ break;
+
+ }
+ } /* while */
+ if (break_flag) {
+ /*
+ * we need to report rwnd overrun drops.
+ */
+ sctp_send_packet_dropped(stcb, net, *mm, iphlen, 0);
+ }
+ if (num_chunks) {
+ /*
+ * Did we get data, if so update the time for auto-close and
+ * give peer credit for being alive.
+ */
+ SCTP_STAT_INCR(sctps_recvpktwithdata);
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
+ sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
+ stcb->asoc.overall_error_count,
+ 0,
+ SCTP_FROM_SCTP_INDATA,
+ __LINE__);
+ }
+ stcb->asoc.overall_error_count = 0;
+ (void)SCTP_GETTIME_TIMEVAL(&stcb->asoc.time_last_rcvd);
+ }
+ /* now service all of the reassm queue if needed */
+ if (!(TAILQ_EMPTY(&asoc->reasmqueue)))
+ sctp_service_queues(stcb, asoc);
+
+ if (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_SENT) {
+ /* Assure that we ack right away */
+ stcb->asoc.send_sack = 1;
+ }
+ /* Start a sack timer or QUEUE a SACK for sending */
+ sctp_sack_check(stcb, was_a_gap, &abort_flag);
+ if (abort_flag)
+ return (2);
+
+ return (0);
+}
+
+static int
+sctp_process_segment_range(struct sctp_tcb *stcb, struct sctp_tmit_chunk **p_tp1, uint32_t last_tsn,
+ uint16_t frag_strt, uint16_t frag_end, int nr_sacking,
+ int *num_frs,
+ uint32_t * biggest_newly_acked_tsn,
+ uint32_t * this_sack_lowest_newack,
+ int *ecn_seg_sums)
+{
+ struct sctp_tmit_chunk *tp1;
+ unsigned int theTSN;
+ int j, wake_him = 0, circled = 0;
+
+ /* Recover the tp1 we last saw */
+ tp1 = *p_tp1;
+ if (tp1 == NULL) {
+ tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue);
+ }
+ for (j = frag_strt; j <= frag_end; j++) {
+ theTSN = j + last_tsn;
+ while (tp1) {
+ if (tp1->rec.data.doing_fast_retransmit)
+ (*num_frs) += 1;
+
+ /*-
+ * CMT: CUCv2 algorithm. For each TSN being
+ * processed from the sent queue, track the
+ * next expected pseudo-cumack, or
+ * rtx_pseudo_cumack, if required. Separate
+ * cumack trackers for first transmissions,
+ * and retransmissions.
+ */
+ if ((tp1->whoTo->find_pseudo_cumack == 1) && (tp1->sent < SCTP_DATAGRAM_RESEND) &&
+ (tp1->snd_count == 1)) {
+ tp1->whoTo->pseudo_cumack = tp1->rec.data.TSN_seq;
+ tp1->whoTo->find_pseudo_cumack = 0;
+ }
+ if ((tp1->whoTo->find_rtx_pseudo_cumack == 1) && (tp1->sent < SCTP_DATAGRAM_RESEND) &&
+ (tp1->snd_count > 1)) {
+ tp1->whoTo->rtx_pseudo_cumack = tp1->rec.data.TSN_seq;
+ tp1->whoTo->find_rtx_pseudo_cumack = 0;
+ }
+ if (tp1->rec.data.TSN_seq == theTSN) {
+ if (tp1->sent != SCTP_DATAGRAM_UNSENT) {
+ /*-
+ * must be held until
+ * cum-ack passes
+ */
+ /*-
+ * ECN Nonce: Add the nonce
+ * value to the sender's
+ * nonce sum
+ */
+ if (tp1->sent < SCTP_DATAGRAM_RESEND) {
+ /*-
+ * If it is less than RESEND, it is
+ * now no-longer in flight.
+ * Higher values may already be set
+ * via previous Gap Ack Blocks...
+ * i.e. ACKED or RESEND.
+ */
+ if (compare_with_wrap(tp1->rec.data.TSN_seq,
+ *biggest_newly_acked_tsn, MAX_TSN)) {
+ *biggest_newly_acked_tsn = tp1->rec.data.TSN_seq;
+ }
+ /*-
+ * CMT: SFR algo (and HTNA) - set
+ * saw_newack to 1 for dest being
+ * newly acked. update
+ * this_sack_highest_newack if
+ * appropriate.
+ */
+ if (tp1->rec.data.chunk_was_revoked == 0)
+ tp1->whoTo->saw_newack = 1;
+
+ if (compare_with_wrap(tp1->rec.data.TSN_seq,
+ tp1->whoTo->this_sack_highest_newack,
+ MAX_TSN)) {
+ tp1->whoTo->this_sack_highest_newack =
+ tp1->rec.data.TSN_seq;
+ }
+ /*-
+ * CMT DAC algo: also update
+ * this_sack_lowest_newack
+ */
+ if (*this_sack_lowest_newack == 0) {
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
+ sctp_log_sack(*this_sack_lowest_newack,
+ last_tsn,
+ tp1->rec.data.TSN_seq,
+ 0,
+ 0,
+ SCTP_LOG_TSN_ACKED);
+ }
+ *this_sack_lowest_newack = tp1->rec.data.TSN_seq;
+ }
+ /*-
+ * CMT: CUCv2 algorithm. If (rtx-)pseudo-cumack for corresp
+ * dest is being acked, then we have a new (rtx-)pseudo-cumack. Set
+ * new_(rtx_)pseudo_cumack to TRUE so that the cwnd for this dest can be
+ * updated. Also trigger search for the next expected (rtx-)pseudo-cumack.
+ * Separate pseudo_cumack trackers for first transmissions and
+ * retransmissions.
+ */
+ if (tp1->rec.data.TSN_seq == tp1->whoTo->pseudo_cumack) {
+ if (tp1->rec.data.chunk_was_revoked == 0) {
+ tp1->whoTo->new_pseudo_cumack = 1;
+ }
+ tp1->whoTo->find_pseudo_cumack = 1;
+ }
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
+ sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.TSN_seq, SCTP_CWND_LOG_FROM_SACK);
+ }
+ if (tp1->rec.data.TSN_seq == tp1->whoTo->rtx_pseudo_cumack) {
+ if (tp1->rec.data.chunk_was_revoked == 0) {
+ tp1->whoTo->new_pseudo_cumack = 1;
+ }
+ tp1->whoTo->find_rtx_pseudo_cumack = 1;
+ }
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
+ sctp_log_sack(*biggest_newly_acked_tsn,
+ last_tsn,
+ tp1->rec.data.TSN_seq,
+ frag_strt,
+ frag_end,
+ SCTP_LOG_TSN_ACKED);
+ }
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
+ sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_GAP,
+ tp1->whoTo->flight_size,
+ tp1->book_size,
+ (uintptr_t) tp1->whoTo,
+ tp1->rec.data.TSN_seq);
+ }
+ sctp_flight_size_decrease(tp1);
+ sctp_total_flight_decrease(stcb, tp1);
+
+ tp1->whoTo->net_ack += tp1->send_size;
+ if (tp1->snd_count < 2) {
+ /*-
+ * True non-retransmited chunk
+ */
+ tp1->whoTo->net_ack2 += tp1->send_size;
+
+ /*-
+ * update RTO too ?
+ */
+ if (tp1->do_rtt) {
+ tp1->whoTo->RTO =
+ sctp_calculate_rto(stcb,
+ &stcb->asoc,
+ tp1->whoTo,
+ &tp1->sent_rcv_time,
+ sctp_align_safe_nocopy);
+ tp1->do_rtt = 0;
+ }
+ }
+ }
+ if (tp1->sent <= SCTP_DATAGRAM_RESEND) {
+ (*ecn_seg_sums) += tp1->rec.data.ect_nonce;
+ (*ecn_seg_sums) &= SCTP_SACK_NONCE_SUM;
+ if (compare_with_wrap(tp1->rec.data.TSN_seq,
+ stcb->asoc.this_sack_highest_gap,
+ MAX_TSN)) {
+ stcb->asoc.this_sack_highest_gap =
+ tp1->rec.data.TSN_seq;
+ }
+ if (tp1->sent == SCTP_DATAGRAM_RESEND) {
+ sctp_ucount_decr(stcb->asoc.sent_queue_retran_cnt);
+#ifdef SCTP_AUDITING_ENABLED
+ sctp_audit_log(0xB2,
+ (stcb->asoc.sent_queue_retran_cnt & 0x000000ff));
+#endif
+ }
+ }
+ /*-
+ * All chunks NOT UNSENT fall through here and are marked
+ * (leave PR-SCTP ones that are to skip alone though)
+ */
+ if (tp1->sent != SCTP_FORWARD_TSN_SKIP)
+ tp1->sent = SCTP_DATAGRAM_MARKED;
+
+ if (tp1->rec.data.chunk_was_revoked) {
+ /* deflate the cwnd */
+ tp1->whoTo->cwnd -= tp1->book_size;
+ tp1->rec.data.chunk_was_revoked = 0;
+ }
+ /* NR Sack code here */
+ if (nr_sacking) {
+ if (tp1->data) {
+ /*
+ * sa_ignore
+ * NO_NULL_CHK
+ */
+ sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1);
+ sctp_m_freem(tp1->data);
+ tp1->data = NULL;
+ }
+ wake_him++;
+ }
+ }
+ break;
+ } /* if (tp1->TSN_seq == theTSN) */
+ if (compare_with_wrap(tp1->rec.data.TSN_seq, theTSN,
+ MAX_TSN))
+ break;
+
+ tp1 = TAILQ_NEXT(tp1, sctp_next);
+ if ((tp1 == NULL) && (circled == 0)) {
+ circled++;
+ tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue);
+ }
+ } /* end while (tp1) */
+ if (tp1 == NULL) {
+ circled = 0;
+ tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue);
+ }
+ /* In case the fragments were not in order we must reset */
+ } /* end for (j = fragStart */
+ *p_tp1 = tp1;
+ return (wake_him); /* Return value only used for nr-sack */
+}
+
+
+static int
+sctp_handle_segments(struct mbuf *m, int *offset, struct sctp_tcb *stcb, struct sctp_association *asoc,
+ uint32_t last_tsn, uint32_t * biggest_tsn_acked,
+ uint32_t * biggest_newly_acked_tsn, uint32_t * this_sack_lowest_newack,
+ int num_seg, int num_nr_seg, int *ecn_seg_sums)
+{
+ struct sctp_gap_ack_block *frag, block;
+ struct sctp_tmit_chunk *tp1;
+ int i;
+ int num_frs = 0;
+ int chunk_freed;
+ int non_revocable;
+ uint16_t frag_strt, frag_end, prev_frag_end;
+
+ tp1 = TAILQ_FIRST(&asoc->sent_queue);
+ prev_frag_end = 0;
+ chunk_freed = 0;
+
+ for (i = 0; i < (num_seg + num_nr_seg); i++) {
+ if (i == num_seg) {
+ prev_frag_end = 0;
+ tp1 = TAILQ_FIRST(&asoc->sent_queue);
+ }
+ frag = (struct sctp_gap_ack_block *)sctp_m_getptr(m, *offset,
+ sizeof(struct sctp_gap_ack_block), (uint8_t *) & block);
+ *offset += sizeof(block);
+ if (frag == NULL) {
+ return (chunk_freed);
+ }
+ frag_strt = ntohs(frag->start);
+ frag_end = ntohs(frag->end);
+
+ if (frag_strt > frag_end) {
+ /* This gap report is malformed, skip it. */
+ continue;
+ }
+ if (frag_strt <= prev_frag_end) {
+ /* This gap report is not in order, so restart. */
+ tp1 = TAILQ_FIRST(&asoc->sent_queue);
+ }
+ if (compare_with_wrap((last_tsn + frag_end), *biggest_tsn_acked, MAX_TSN)) {
+ *biggest_tsn_acked = last_tsn + frag_end;
+ }
+ if (i < num_seg) {
+ non_revocable = 0;
+ } else {
+ non_revocable = 1;
+ }
+ if (sctp_process_segment_range(stcb, &tp1, last_tsn, frag_strt, frag_end,
+ non_revocable, &num_frs, biggest_newly_acked_tsn,
+ this_sack_lowest_newack, ecn_seg_sums)) {
+ chunk_freed = 1;
+ }
+ prev_frag_end = frag_end;
+ }
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
+ if (num_frs)
+ sctp_log_fr(*biggest_tsn_acked,
+ *biggest_newly_acked_tsn,
+ last_tsn, SCTP_FR_LOG_BIGGEST_TSNS);
+ }
+ return (chunk_freed);
+}
+
+static void
+sctp_check_for_revoked(struct sctp_tcb *stcb,
+ struct sctp_association *asoc, uint32_t cumack,
+ uint32_t biggest_tsn_acked)
+{
+ struct sctp_tmit_chunk *tp1;
+ int tot_revoked = 0;
+
+ tp1 = TAILQ_FIRST(&asoc->sent_queue);
+ while (tp1) {
+ if (compare_with_wrap(tp1->rec.data.TSN_seq, cumack,
+ MAX_TSN)) {
+ /*
+ * ok this guy is either ACK or MARKED. If it is
+ * ACKED it has been previously acked but not this
+ * time i.e. revoked. If it is MARKED it was ACK'ed
+ * again.
+ */
+ if (compare_with_wrap(tp1->rec.data.TSN_seq, biggest_tsn_acked,
+ MAX_TSN))
+ break;
+
+
+ if (tp1->sent == SCTP_DATAGRAM_ACKED) {
+ /* it has been revoked */
+ tp1->sent = SCTP_DATAGRAM_SENT;
+ tp1->rec.data.chunk_was_revoked = 1;
+ /*
+ * We must add this stuff back in to assure
+ * timers and such get started.
+ */
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
+ sctp_misc_ints(SCTP_FLIGHT_LOG_UP_REVOKE,
+ tp1->whoTo->flight_size,
+ tp1->book_size,
+ (uintptr_t) tp1->whoTo,
+ tp1->rec.data.TSN_seq);
+ }
+ sctp_flight_size_increase(tp1);
+ sctp_total_flight_increase(stcb, tp1);
+ /*
+ * We inflate the cwnd to compensate for our
+ * artificial inflation of the flight_size.
+ */
+ tp1->whoTo->cwnd += tp1->book_size;
+ tot_revoked++;
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
+ sctp_log_sack(asoc->last_acked_seq,
+ cumack,
+ tp1->rec.data.TSN_seq,
+ 0,
+ 0,
+ SCTP_LOG_TSN_REVOKED);
+ }
+ } else if (tp1->sent == SCTP_DATAGRAM_MARKED) {
+ /* it has been re-acked in this SACK */
+ tp1->sent = SCTP_DATAGRAM_ACKED;
+ }
+ }
+ if (tp1->sent == SCTP_DATAGRAM_UNSENT)
+ break;
+ tp1 = TAILQ_NEXT(tp1, sctp_next);
+ }
+ if (tot_revoked > 0) {
+ /*
+ * Setup the ecn nonce re-sync point. We do this since once
+ * data is revoked we begin to retransmit things, which do
+ * NOT have the ECN bits set. This means we are now out of
+ * sync and must wait until we get back in sync with the
+ * peer to check ECN bits.
+ */
+ tp1 = TAILQ_FIRST(&asoc->send_queue);
+ if (tp1 == NULL) {
+ asoc->nonce_resync_tsn = asoc->sending_seq;
+ } else {
+ asoc->nonce_resync_tsn = tp1->rec.data.TSN_seq;
+ }
+ asoc->nonce_wait_for_ecne = 0;
+ asoc->nonce_sum_check = 0;
+ }
+}
+
+
+static void
+sctp_strike_gap_ack_chunks(struct sctp_tcb *stcb, struct sctp_association *asoc,
+ uint32_t biggest_tsn_acked, uint32_t biggest_tsn_newly_acked, uint32_t this_sack_lowest_newack, int accum_moved)
+{
+ struct sctp_tmit_chunk *tp1;
+ int strike_flag = 0;
+ struct timeval now;
+ int tot_retrans = 0;
+ uint32_t sending_seq;
+ struct sctp_nets *net;
+ int num_dests_sacked = 0;
+
+ /*
+ * select the sending_seq, this is either the next thing ready to be
+ * sent but not transmitted, OR, the next seq we assign.
+ */
+ tp1 = TAILQ_FIRST(&stcb->asoc.send_queue);
+ if (tp1 == NULL) {
+ sending_seq = asoc->sending_seq;
+ } else {
+ sending_seq = tp1->rec.data.TSN_seq;
+ }
+
+ /* CMT DAC algo: finding out if SACK is a mixed SACK */
+ if ((asoc->sctp_cmt_on_off == 1) &&
+ SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
+ TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
+ if (net->saw_newack)
+ num_dests_sacked++;
+ }
+ }
+ if (stcb->asoc.peer_supports_prsctp) {
+ (void)SCTP_GETTIME_TIMEVAL(&now);
+ }
+ tp1 = TAILQ_FIRST(&asoc->sent_queue);
+ while (tp1) {
+ strike_flag = 0;
+ if (tp1->no_fr_allowed) {
+ /* this one had a timeout or something */
+ tp1 = TAILQ_NEXT(tp1, sctp_next);
+ continue;
+ }
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
+ if (tp1->sent < SCTP_DATAGRAM_RESEND)
+ sctp_log_fr(biggest_tsn_newly_acked,
+ tp1->rec.data.TSN_seq,
+ tp1->sent,
+ SCTP_FR_LOG_CHECK_STRIKE);
+ }
+ if (compare_with_wrap(tp1->rec.data.TSN_seq, biggest_tsn_acked,
+ MAX_TSN) ||
+ tp1->sent == SCTP_DATAGRAM_UNSENT) {
+ /* done */
+ break;
+ }
+ if (stcb->asoc.peer_supports_prsctp) {
+ if ((PR_SCTP_TTL_ENABLED(tp1->flags)) && tp1->sent < SCTP_DATAGRAM_ACKED) {
+ /* Is it expired? */
+ if (timevalcmp(&now, &tp1->rec.data.timetodrop, >)) {
+ /* Yes so drop it */
+ if (tp1->data != NULL) {
+ (void)sctp_release_pr_sctp_chunk(stcb, tp1,
+ (SCTP_RESPONSE_TO_USER_REQ | SCTP_NOTIFY_DATAGRAM_SENT),
+ SCTP_SO_NOT_LOCKED);
+ }
+ tp1 = TAILQ_NEXT(tp1, sctp_next);
+ continue;
+ }
+ }
+ }
+ if (compare_with_wrap(tp1->rec.data.TSN_seq,
+ asoc->this_sack_highest_gap, MAX_TSN)) {
+ /* we are beyond the tsn in the sack */
+ break;
+ }
+ if (tp1->sent >= SCTP_DATAGRAM_RESEND) {
+ /* either a RESEND, ACKED, or MARKED */
+ /* skip */
+ if (tp1->sent == SCTP_FORWARD_TSN_SKIP) {
+ /* Continue strikin FWD-TSN chunks */
+ tp1->rec.data.fwd_tsn_cnt++;
+ }
+ tp1 = TAILQ_NEXT(tp1, sctp_next);
+ continue;
+ }
+ /*
+ * CMT : SFR algo (covers part of DAC and HTNA as well)
+ */
+ if (tp1->whoTo && tp1->whoTo->saw_newack == 0) {
+ /*
+ * No new acks were receieved for data sent to this
+ * dest. Therefore, according to the SFR algo for
+ * CMT, no data sent to this dest can be marked for
+ * FR using this SACK.
+ */
+ tp1 = TAILQ_NEXT(tp1, sctp_next);
+ continue;
+ } else if (tp1->whoTo && compare_with_wrap(tp1->rec.data.TSN_seq,
+ tp1->whoTo->this_sack_highest_newack, MAX_TSN)) {
+ /*
+ * CMT: New acks were receieved for data sent to
+ * this dest. But no new acks were seen for data
+ * sent after tp1. Therefore, according to the SFR
+ * algo for CMT, tp1 cannot be marked for FR using
+ * this SACK. This step covers part of the DAC algo
+ * and the HTNA algo as well.
+ */
+ tp1 = TAILQ_NEXT(tp1, sctp_next);
+ continue;
+ }
+ /*
+ * Here we check to see if we were have already done a FR
+ * and if so we see if the biggest TSN we saw in the sack is
+ * smaller than the recovery point. If so we don't strike
+ * the tsn... otherwise we CAN strike the TSN.
+ */
+ /*
+ * @@@ JRI: Check for CMT if (accum_moved &&
+ * asoc->fast_retran_loss_recovery && (sctp_cmt_on_off ==
+ * 0)) {
+ */
+ if (accum_moved && asoc->fast_retran_loss_recovery) {
+ /*
+ * Strike the TSN if in fast-recovery and cum-ack
+ * moved.
+ */
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
+ sctp_log_fr(biggest_tsn_newly_acked,
+ tp1->rec.data.TSN_seq,
+ tp1->sent,
+ SCTP_FR_LOG_STRIKE_CHUNK);
+ }
+ if (tp1->sent < SCTP_DATAGRAM_RESEND) {
+ tp1->sent++;
+ }
+ if ((asoc->sctp_cmt_on_off == 1) &&
+ SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
+ /*
+ * CMT DAC algorithm: If SACK flag is set to
+ * 0, then lowest_newack test will not pass
+ * because it would have been set to the
+ * cumack earlier. If not already to be
+ * rtx'd, If not a mixed sack and if tp1 is
+ * not between two sacked TSNs, then mark by
+ * one more. NOTE that we are marking by one
+ * additional time since the SACK DAC flag
+ * indicates that two packets have been
+ * received after this missing TSN.
+ */
+ if ((tp1->sent < SCTP_DATAGRAM_RESEND) && (num_dests_sacked == 1) &&
+ compare_with_wrap(this_sack_lowest_newack, tp1->rec.data.TSN_seq, MAX_TSN)) {
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
+ sctp_log_fr(16 + num_dests_sacked,
+ tp1->rec.data.TSN_seq,
+ tp1->sent,
+ SCTP_FR_LOG_STRIKE_CHUNK);
+ }
+ tp1->sent++;
+ }
+ }
+ } else if ((tp1->rec.data.doing_fast_retransmit) &&
+ (asoc->sctp_cmt_on_off == 0)) {
+ /*
+ * For those that have done a FR we must take
+ * special consideration if we strike. I.e the
+ * biggest_newly_acked must be higher than the
+ * sending_seq at the time we did the FR.
+ */
+ if (
+#ifdef SCTP_FR_TO_ALTERNATE
+ /*
+ * If FR's go to new networks, then we must only do
+ * this for singly homed asoc's. However if the FR's
+ * go to the same network (Armando's work) then its
+ * ok to FR multiple times.
+ */
+ (asoc->numnets < 2)
+#else
+ (1)
+#endif
+ ) {
+
+ if ((compare_with_wrap(biggest_tsn_newly_acked,
+ tp1->rec.data.fast_retran_tsn, MAX_TSN)) ||
+ (biggest_tsn_newly_acked ==
+ tp1->rec.data.fast_retran_tsn)) {
+ /*
+ * Strike the TSN, since this ack is
+ * beyond where things were when we
+ * did a FR.
+ */
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
+ sctp_log_fr(biggest_tsn_newly_acked,
+ tp1->rec.data.TSN_seq,
+ tp1->sent,
+ SCTP_FR_LOG_STRIKE_CHUNK);
+ }
+ if (tp1->sent < SCTP_DATAGRAM_RESEND) {
+ tp1->sent++;
+ }
+ strike_flag = 1;
+ if ((asoc->sctp_cmt_on_off == 1) &&
+ SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
+ /*
+ * CMT DAC algorithm: If
+ * SACK flag is set to 0,
+ * then lowest_newack test
+ * will not pass because it
+ * would have been set to
+ * the cumack earlier. If
+ * not already to be rtx'd,
+ * If not a mixed sack and
+ * if tp1 is not between two
+ * sacked TSNs, then mark by
+ * one more. NOTE that we
+ * are marking by one
+ * additional time since the
+ * SACK DAC flag indicates
+ * that two packets have
+ * been received after this
+ * missing TSN.
+ */
+ if ((tp1->sent < SCTP_DATAGRAM_RESEND) &&
+ (num_dests_sacked == 1) &&
+ compare_with_wrap(this_sack_lowest_newack,
+ tp1->rec.data.TSN_seq, MAX_TSN)) {
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
+ sctp_log_fr(32 + num_dests_sacked,
+ tp1->rec.data.TSN_seq,
+ tp1->sent,
+ SCTP_FR_LOG_STRIKE_CHUNK);
+ }
+ if (tp1->sent < SCTP_DATAGRAM_RESEND) {
+ tp1->sent++;
+ }
+ }
+ }
+ }
+ }
+ /*
+ * JRI: TODO: remove code for HTNA algo. CMT's SFR
+ * algo covers HTNA.
+ */
+ } else if (compare_with_wrap(tp1->rec.data.TSN_seq,
+ biggest_tsn_newly_acked, MAX_TSN)) {
+ /*
+ * We don't strike these: This is the HTNA
+ * algorithm i.e. we don't strike If our TSN is
+ * larger than the Highest TSN Newly Acked.
+ */
+ ;
+ } else {
+ /* Strike the TSN */
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
+ sctp_log_fr(biggest_tsn_newly_acked,
+ tp1->rec.data.TSN_seq,
+ tp1->sent,
+ SCTP_FR_LOG_STRIKE_CHUNK);
+ }
+ if (tp1->sent < SCTP_DATAGRAM_RESEND) {
+ tp1->sent++;
+ }
+ if ((asoc->sctp_cmt_on_off == 1) &&
+ SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
+ /*
+ * CMT DAC algorithm: If SACK flag is set to
+ * 0, then lowest_newack test will not pass
+ * because it would have been set to the
+ * cumack earlier. If not already to be
+ * rtx'd, If not a mixed sack and if tp1 is
+ * not between two sacked TSNs, then mark by
+ * one more. NOTE that we are marking by one
+ * additional time since the SACK DAC flag
+ * indicates that two packets have been
+ * received after this missing TSN.
+ */
+ if ((tp1->sent < SCTP_DATAGRAM_RESEND) && (num_dests_sacked == 1) &&
+ compare_with_wrap(this_sack_lowest_newack, tp1->rec.data.TSN_seq, MAX_TSN)) {
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
+ sctp_log_fr(48 + num_dests_sacked,
+ tp1->rec.data.TSN_seq,
+ tp1->sent,
+ SCTP_FR_LOG_STRIKE_CHUNK);
+ }
+ tp1->sent++;
+ }
+ }
+ }
+ if (tp1->sent == SCTP_DATAGRAM_RESEND) {
+ struct sctp_nets *alt;
+
+ /* fix counts and things */
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
+ sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_RSND,
+ (tp1->whoTo ? (tp1->whoTo->flight_size) : 0),
+ tp1->book_size,
+ (uintptr_t) tp1->whoTo,
+ tp1->rec.data.TSN_seq);
+ }
+ if (tp1->whoTo) {
+ tp1->whoTo->net_ack++;
+ sctp_flight_size_decrease(tp1);
+ }
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
+ sctp_log_rwnd(SCTP_INCREASE_PEER_RWND,
+ asoc->peers_rwnd, tp1->send_size, SCTP_BASE_SYSCTL(sctp_peer_chunk_oh));
+ }
+ /* add back to the rwnd */
+ asoc->peers_rwnd += (tp1->send_size + SCTP_BASE_SYSCTL(sctp_peer_chunk_oh));
+
+ /* remove from the total flight */
+ sctp_total_flight_decrease(stcb, tp1);
+
+ if ((stcb->asoc.peer_supports_prsctp) &&
+ (PR_SCTP_RTX_ENABLED(tp1->flags))) {
+ /*
+ * Has it been retransmitted tv_sec times? -
+ * we store the retran count there.
+ */
+ if (tp1->snd_count > tp1->rec.data.timetodrop.tv_sec) {
+ /* Yes, so drop it */
+ if (tp1->data != NULL) {
+ (void)sctp_release_pr_sctp_chunk(stcb, tp1,
+ (SCTP_RESPONSE_TO_USER_REQ | SCTP_NOTIFY_DATAGRAM_SENT),
+ SCTP_SO_NOT_LOCKED);
+ }
+ /* Make sure to flag we had a FR */
+ tp1->whoTo->net_ack++;
+ tp1 = TAILQ_NEXT(tp1, sctp_next);
+ continue;
+ }
+ }
+ /* printf("OK, we are now ready to FR this guy\n"); */
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
+ sctp_log_fr(tp1->rec.data.TSN_seq, tp1->snd_count,
+ 0, SCTP_FR_MARKED);
+ }
+ if (strike_flag) {
+ /* This is a subsequent FR */
+ SCTP_STAT_INCR(sctps_sendmultfastretrans);
+ }
+ sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt);
+ if (asoc->sctp_cmt_on_off == 1) {
+ /*
+ * CMT: Using RTX_SSTHRESH policy for CMT.
+ * If CMT is being used, then pick dest with
+ * largest ssthresh for any retransmission.
+ */
+ tp1->no_fr_allowed = 1;
+ alt = tp1->whoTo;
+ /* sa_ignore NO_NULL_CHK */
+ if (asoc->sctp_cmt_pf > 0) {
+ /*
+ * JRS 5/18/07 - If CMT PF is on,
+ * use the PF version of
+ * find_alt_net()
+ */
+ alt = sctp_find_alternate_net(stcb, alt, 2);
+ } else {
+ /*
+ * JRS 5/18/07 - If only CMT is on,
+ * use the CMT version of
+ * find_alt_net()
+ */
+ /* sa_ignore NO_NULL_CHK */
+ alt = sctp_find_alternate_net(stcb, alt, 1);
+ }
+ if (alt == NULL) {
+ alt = tp1->whoTo;
+ }
+ /*
+ * CUCv2: If a different dest is picked for
+ * the retransmission, then new
+ * (rtx-)pseudo_cumack needs to be tracked
+ * for orig dest. Let CUCv2 track new (rtx-)
+ * pseudo-cumack always.
+ */
+ if (tp1->whoTo) {
+ tp1->whoTo->find_pseudo_cumack = 1;
+ tp1->whoTo->find_rtx_pseudo_cumack = 1;
+ }
+ } else {/* CMT is OFF */
+
+#ifdef SCTP_FR_TO_ALTERNATE
+ /* Can we find an alternate? */
+ alt = sctp_find_alternate_net(stcb, tp1->whoTo, 0);
+#else
+ /*
+ * default behavior is to NOT retransmit
+ * FR's to an alternate. Armando Caro's
+ * paper details why.
+ */
+ alt = tp1->whoTo;
+#endif
+ }
+
+ tp1->rec.data.doing_fast_retransmit = 1;
+ tot_retrans++;
+ /* mark the sending seq for possible subsequent FR's */
+ /*
+ * printf("Marking TSN for FR new value %x\n",
+ * (uint32_t)tpi->rec.data.TSN_seq);
+ */
+ if (TAILQ_EMPTY(&asoc->send_queue)) {
+ /*
+ * If the queue of send is empty then its
+ * the next sequence number that will be
+ * assigned so we subtract one from this to
+ * get the one we last sent.
+ */
+ tp1->rec.data.fast_retran_tsn = sending_seq;
+ } else {
+ /*
+ * If there are chunks on the send queue
+ * (unsent data that has made it from the
+ * stream queues but not out the door, we
+ * take the first one (which will have the
+ * lowest TSN) and subtract one to get the
+ * one we last sent.
+ */
+ struct sctp_tmit_chunk *ttt;
+
+ ttt = TAILQ_FIRST(&asoc->send_queue);
+ tp1->rec.data.fast_retran_tsn =
+ ttt->rec.data.TSN_seq;
+ }
+
+ if (tp1->do_rtt) {
+ /*
+ * this guy had a RTO calculation pending on
+ * it, cancel it
+ */
+ tp1->do_rtt = 0;
+ }
+ if (alt != tp1->whoTo) {
+ /* yes, there is an alternate. */
+ sctp_free_remote_addr(tp1->whoTo);
+ /* sa_ignore FREED_MEMORY */
+ tp1->whoTo = alt;
+ atomic_add_int(&alt->ref_count, 1);
+ }
+ }
+ tp1 = TAILQ_NEXT(tp1, sctp_next);
+ } /* while (tp1) */
+
+ if (tot_retrans > 0) {
+ /*
+ * Setup the ecn nonce re-sync point. We do this since once
+ * we go to FR something we introduce a Karn's rule scenario
+ * and won't know the totals for the ECN bits.
+ */
+ asoc->nonce_resync_tsn = sending_seq;
+ asoc->nonce_wait_for_ecne = 0;
+ asoc->nonce_sum_check = 0;
+ }
+}
+
+struct sctp_tmit_chunk *
+sctp_try_advance_peer_ack_point(struct sctp_tcb *stcb,
+ struct sctp_association *asoc)
+{
+ struct sctp_tmit_chunk *tp1, *tp2, *a_adv = NULL;
+ struct timeval now;
+ int now_filled = 0;
+
+ if (asoc->peer_supports_prsctp == 0) {
+ return (NULL);
+ }
+ tp1 = TAILQ_FIRST(&asoc->sent_queue);
+ while (tp1) {
+ if (tp1->sent != SCTP_FORWARD_TSN_SKIP &&
+ tp1->sent != SCTP_DATAGRAM_RESEND) {
+ /* no chance to advance, out of here */
+ break;
+ }
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_TRY_ADVANCE) {
+ if (tp1->sent == SCTP_FORWARD_TSN_SKIP) {
+ sctp_misc_ints(SCTP_FWD_TSN_CHECK,
+ asoc->advanced_peer_ack_point,
+ tp1->rec.data.TSN_seq, 0, 0);
+ }
+ }
+ if (!PR_SCTP_ENABLED(tp1->flags)) {
+ /*
+ * We can't fwd-tsn past any that are reliable aka
+ * retransmitted until the asoc fails.
+ */
+ break;
+ }
+ if (!now_filled) {
+ (void)SCTP_GETTIME_TIMEVAL(&now);
+ now_filled = 1;
+ }
+ tp2 = TAILQ_NEXT(tp1, sctp_next);
+ /*
+ * now we got a chunk which is marked for another
+ * retransmission to a PR-stream but has run out its chances
+ * already maybe OR has been marked to skip now. Can we skip
+ * it if its a resend?
+ */
+ if (tp1->sent == SCTP_DATAGRAM_RESEND &&
+ (PR_SCTP_TTL_ENABLED(tp1->flags))) {
+ /*
+ * Now is this one marked for resend and its time is
+ * now up?
+ */
+ if (timevalcmp(&now, &tp1->rec.data.timetodrop, >)) {
+ /* Yes so drop it */
+ if (tp1->data) {
+ (void)sctp_release_pr_sctp_chunk(stcb, tp1,
+ (SCTP_RESPONSE_TO_USER_REQ | SCTP_NOTIFY_DATAGRAM_SENT),
+ SCTP_SO_NOT_LOCKED);
+ }
+ } else {
+ /*
+ * No, we are done when hit one for resend
+ * whos time as not expired.
+ */
+ break;
+ }
+ }
+ /*
+ * Ok now if this chunk is marked to drop it we can clean up
+ * the chunk, advance our peer ack point and we can check
+ * the next chunk.
+ */
+ if (tp1->sent == SCTP_FORWARD_TSN_SKIP) {
+ /* advance PeerAckPoint goes forward */
+ if (compare_with_wrap(tp1->rec.data.TSN_seq,
+ asoc->advanced_peer_ack_point,
+ MAX_TSN)) {
+
+ asoc->advanced_peer_ack_point = tp1->rec.data.TSN_seq;
+ a_adv = tp1;
+ } else if (tp1->rec.data.TSN_seq == asoc->advanced_peer_ack_point) {
+ /* No update but we do save the chk */
+ a_adv = tp1;
+ }
+ } else {
+ /*
+ * If it is still in RESEND we can advance no
+ * further
+ */
+ break;
+ }
+ /*
+ * If we hit here we just dumped tp1, move to next tsn on
+ * sent queue.
+ */
+ tp1 = tp2;
+ }
+ return (a_adv);
+}
+
+static int
+sctp_fs_audit(struct sctp_association *asoc)
+{
+ struct sctp_tmit_chunk *chk;
+ int inflight = 0, resend = 0, inbetween = 0, acked = 0, above = 0;
+ int entry_flight, entry_cnt, ret;
+
+ entry_flight = asoc->total_flight;
+ entry_cnt = asoc->total_flight_count;
+ ret = 0;
+
+ if (asoc->pr_sctp_cnt >= asoc->sent_queue_cnt)
+ return (0);
+
+ TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) {
+ if (chk->sent < SCTP_DATAGRAM_RESEND) {
+ printf("Chk TSN:%u size:%d inflight cnt:%d\n",
+ chk->rec.data.TSN_seq,
+ chk->send_size,
+ chk->snd_count
+ );
+ inflight++;
+ } else if (chk->sent == SCTP_DATAGRAM_RESEND) {
+ resend++;
+ } else if (chk->sent < SCTP_DATAGRAM_ACKED) {
+ inbetween++;
+ } else if (chk->sent > SCTP_DATAGRAM_ACKED) {
+ above++;
+ } else {
+ acked++;
+ }
+ }
+
+ if ((inflight > 0) || (inbetween > 0)) {
+#ifdef INVARIANTS
+ panic("Flight size-express incorrect? \n");
+#else
+ printf("asoc->total_flight:%d cnt:%d\n",
+ entry_flight, entry_cnt);
+
+ SCTP_PRINTF("Flight size-express incorrect F:%d I:%d R:%d Ab:%d ACK:%d\n",
+ inflight, inbetween, resend, above, acked);
+ ret = 1;
+#endif
+ }
+ return (ret);
+}
+
+
+static void
+sctp_window_probe_recovery(struct sctp_tcb *stcb,
+ struct sctp_association *asoc,
+ struct sctp_nets *net,
+ struct sctp_tmit_chunk *tp1)
+{
+ tp1->window_probe = 0;
+ if ((tp1->sent >= SCTP_DATAGRAM_ACKED) || (tp1->data == NULL)) {
+ /* TSN's skipped we do NOT move back. */
+ sctp_misc_ints(SCTP_FLIGHT_LOG_DWN_WP_FWD,
+ tp1->whoTo->flight_size,
+ tp1->book_size,
+ (uintptr_t) tp1->whoTo,
+ tp1->rec.data.TSN_seq);
+ return;
+ }
+ /* First setup this by shrinking flight */
+ sctp_flight_size_decrease(tp1);
+ sctp_total_flight_decrease(stcb, tp1);
+ /* Now mark for resend */
+ tp1->sent = SCTP_DATAGRAM_RESEND;
+ sctp_ucount_incr(asoc->sent_queue_retran_cnt);
+
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
+ sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_WP,
+ tp1->whoTo->flight_size,
+ tp1->book_size,
+ (uintptr_t) tp1->whoTo,
+ tp1->rec.data.TSN_seq);
+ }
+}
+
+void
+sctp_express_handle_sack(struct sctp_tcb *stcb, uint32_t cumack,
+ uint32_t rwnd, int nonce_sum_flag, int *abort_now)
+{
+ struct sctp_nets *net;
+ struct sctp_association *asoc;
+ struct sctp_tmit_chunk *tp1, *tp2;
+ uint32_t old_rwnd;
+ int win_probe_recovery = 0;
+ int win_probe_recovered = 0;
+ int j, done_once = 0;
+
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_SACK_ARRIVALS_ENABLE) {
+ sctp_misc_ints(SCTP_SACK_LOG_EXPRESS, cumack,
+ rwnd, stcb->asoc.last_acked_seq, stcb->asoc.peers_rwnd);
+ }
+ SCTP_TCB_LOCK_ASSERT(stcb);
+#ifdef SCTP_ASOCLOG_OF_TSNS
+ stcb->asoc.cumack_log[stcb->asoc.cumack_log_at] = cumack;
+ stcb->asoc.cumack_log_at++;
+ if (stcb->asoc.cumack_log_at > SCTP_TSN_LOG_SIZE) {
+ stcb->asoc.cumack_log_at = 0;
+ }
+#endif
+ asoc = &stcb->asoc;
+ old_rwnd = asoc->peers_rwnd;
+ if (compare_with_wrap(asoc->last_acked_seq, cumack, MAX_TSN)) {
+ /* old ack */
+ return;
+ } else if (asoc->last_acked_seq == cumack) {
+ /* Window update sack */
+ asoc->peers_rwnd = sctp_sbspace_sub(rwnd,
+ (uint32_t) (asoc->total_flight + (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))));
+ if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
+ /* SWS sender side engages */
+ asoc->peers_rwnd = 0;
+ }
+ if (asoc->peers_rwnd > old_rwnd) {
+ goto again;
+ }
+ return;
+ }
+ /* First setup for CC stuff */
+ TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
+ net->prev_cwnd = net->cwnd;
+ net->net_ack = 0;
+ net->net_ack2 = 0;
+
+ /*
+ * CMT: Reset CUC and Fast recovery algo variables before
+ * SACK processing
+ */
+ net->new_pseudo_cumack = 0;
+ net->will_exit_fast_recovery = 0;
+ }
+ if (SCTP_BASE_SYSCTL(sctp_strict_sacks)) {
+ uint32_t send_s;
+
+ if (!TAILQ_EMPTY(&asoc->sent_queue)) {
+ tp1 = TAILQ_LAST(&asoc->sent_queue,
+ sctpchunk_listhead);
+ send_s = tp1->rec.data.TSN_seq + 1;
+ } else {
+ send_s = asoc->sending_seq;
+ }
+ if ((cumack == send_s) ||
+ compare_with_wrap(cumack, send_s, MAX_TSN)) {
+#ifndef INVARIANTS
+ struct mbuf *oper;
+
+#endif
+#ifdef INVARIANTS
+ panic("Impossible sack 1");
+#else
+
+ *abort_now = 1;
+ /* XXX */
+ oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)),
+ 0, M_DONTWAIT, 1, MT_DATA);
+ if (oper) {
+ struct sctp_paramhdr *ph;
+ uint32_t *ippp;
+
+ SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) +
+ sizeof(uint32_t);
+ ph = mtod(oper, struct sctp_paramhdr *);
+ ph->param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
+ ph->param_length = htons(SCTP_BUF_LEN(oper));
+ ippp = (uint32_t *) (ph + 1);
+ *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_25);
+ }
+ stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_25;
+ sctp_abort_an_association(stcb->sctp_ep, stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
+ return;
+#endif
+ }
+ }
+ asoc->this_sack_highest_gap = cumack;
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
+ sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
+ stcb->asoc.overall_error_count,
+ 0,
+ SCTP_FROM_SCTP_INDATA,
+ __LINE__);
+ }
+ stcb->asoc.overall_error_count = 0;
+ if (compare_with_wrap(cumack, asoc->last_acked_seq, MAX_TSN)) {
+ /* process the new consecutive TSN first */
+ tp1 = TAILQ_FIRST(&asoc->sent_queue);
+ while (tp1) {
+ tp2 = TAILQ_NEXT(tp1, sctp_next);
+ if (compare_with_wrap(cumack, tp1->rec.data.TSN_seq,
+ MAX_TSN) ||
+ cumack == tp1->rec.data.TSN_seq) {
+ if (tp1->sent == SCTP_DATAGRAM_UNSENT) {
+ printf("Warning, an unsent is now acked?\n");
+ }
+ /*
+ * ECN Nonce: Add the nonce to the sender's
+ * nonce sum
+ */
+ asoc->nonce_sum_expect_base += tp1->rec.data.ect_nonce;
+ if (tp1->sent < SCTP_DATAGRAM_ACKED) {
+ /*
+ * If it is less than ACKED, it is
+ * now no-longer in flight. Higher
+ * values may occur during marking
+ */
+ if (tp1->sent < SCTP_DATAGRAM_RESEND) {
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
+ sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_CA,
+ tp1->whoTo->flight_size,
+ tp1->book_size,
+ (uintptr_t) tp1->whoTo,
+ tp1->rec.data.TSN_seq);
+ }
+ sctp_flight_size_decrease(tp1);
+ /* sa_ignore NO_NULL_CHK */
+ sctp_total_flight_decrease(stcb, tp1);
+ }
+ tp1->whoTo->net_ack += tp1->send_size;
+ if (tp1->snd_count < 2) {
+ /*
+ * True non-retransmited
+ * chunk
+ */
+ tp1->whoTo->net_ack2 +=
+ tp1->send_size;
+
+ /* update RTO too? */
+ if (tp1->do_rtt) {
+ tp1->whoTo->RTO =
+ /*
+ * sa_ignore
+ * NO_NULL_CHK
+ */
+ sctp_calculate_rto(stcb,
+ asoc, tp1->whoTo,
+ &tp1->sent_rcv_time,
+ sctp_align_safe_nocopy);
+ tp1->do_rtt = 0;
+ }
+ }
+ /*
+ * CMT: CUCv2 algorithm. From the
+ * cumack'd TSNs, for each TSN being
+ * acked for the first time, set the
+ * following variables for the
+ * corresp destination.
+ * new_pseudo_cumack will trigger a
+ * cwnd update.
+ * find_(rtx_)pseudo_cumack will
+ * trigger search for the next
+ * expected (rtx-)pseudo-cumack.
+ */
+ tp1->whoTo->new_pseudo_cumack = 1;
+ tp1->whoTo->find_pseudo_cumack = 1;
+ tp1->whoTo->find_rtx_pseudo_cumack = 1;
+
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
+ /* sa_ignore NO_NULL_CHK */
+ sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.TSN_seq, SCTP_CWND_LOG_FROM_SACK);
+ }
+ }
+ if (tp1->sent == SCTP_DATAGRAM_RESEND) {
+ sctp_ucount_decr(asoc->sent_queue_retran_cnt);
+ }
+ if (tp1->rec.data.chunk_was_revoked) {
+ /* deflate the cwnd */
+ tp1->whoTo->cwnd -= tp1->book_size;
+ tp1->rec.data.chunk_was_revoked = 0;
+ }
+ tp1->sent = SCTP_DATAGRAM_ACKED;
+ TAILQ_REMOVE(&asoc->sent_queue, tp1, sctp_next);
+ if (tp1->data) {
+ /* sa_ignore NO_NULL_CHK */
+ sctp_free_bufspace(stcb, asoc, tp1, 1);
+ sctp_m_freem(tp1->data);
+ }
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
+ sctp_log_sack(asoc->last_acked_seq,
+ cumack,
+ tp1->rec.data.TSN_seq,
+ 0,
+ 0,
+ SCTP_LOG_FREE_SENT);
+ }
+ tp1->data = NULL;
+ asoc->sent_queue_cnt--;
+ sctp_free_a_chunk(stcb, tp1);
+ tp1 = tp2;
+ } else {
+ break;
+ }
+ }
+
+ }
+ /* sa_ignore NO_NULL_CHK */
+ if (stcb->sctp_socket) {
+#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
+ struct socket *so;
+
+#endif
+ SOCKBUF_LOCK(&stcb->sctp_socket->so_snd);
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
+ /* sa_ignore NO_NULL_CHK */
+ sctp_wakeup_log(stcb, cumack, 1, SCTP_WAKESND_FROM_SACK);
+ }
+#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
+ so = SCTP_INP_SO(stcb->sctp_ep);
+ atomic_add_int(&stcb->asoc.refcnt, 1);
+ SCTP_TCB_UNLOCK(stcb);
+ SCTP_SOCKET_LOCK(so, 1);
+ SCTP_TCB_LOCK(stcb);
+ atomic_subtract_int(&stcb->asoc.refcnt, 1);
+ if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
+ /* assoc was freed while we were unlocked */
+ SCTP_SOCKET_UNLOCK(so, 1);
+ return;
+ }
+#endif
+ sctp_sowwakeup_locked(stcb->sctp_ep, stcb->sctp_socket);
+#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
+ SCTP_SOCKET_UNLOCK(so, 1);
+#endif
+ } else {
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
+ sctp_wakeup_log(stcb, cumack, 1, SCTP_NOWAKE_FROM_SACK);
+ }
+ }
+
+ /* JRS - Use the congestion control given in the CC module */
+ if (asoc->last_acked_seq != cumack)
+ asoc->cc_functions.sctp_cwnd_update_after_sack(stcb, asoc, 1, 0, 0);
+
+ asoc->last_acked_seq = cumack;
+
+ if (TAILQ_EMPTY(&asoc->sent_queue)) {
+ /* nothing left in-flight */
+ TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
+ net->flight_size = 0;
+ net->partial_bytes_acked = 0;
+ }
+ asoc->total_flight = 0;
+ asoc->total_flight_count = 0;
+ }
+ /* ECN Nonce updates */
+ if (asoc->ecn_nonce_allowed) {
+ if (asoc->nonce_sum_check) {
+ if (nonce_sum_flag != ((asoc->nonce_sum_expect_base) & SCTP_SACK_NONCE_SUM)) {
+ if (asoc->nonce_wait_for_ecne == 0) {
+ struct sctp_tmit_chunk *lchk;
+
+ lchk = TAILQ_FIRST(&asoc->send_queue);
+ asoc->nonce_wait_for_ecne = 1;
+ if (lchk) {
+ asoc->nonce_wait_tsn = lchk->rec.data.TSN_seq;
+ } else {
+ asoc->nonce_wait_tsn = asoc->sending_seq;
+ }
+ } else {
+ if (compare_with_wrap(asoc->last_acked_seq, asoc->nonce_wait_tsn, MAX_TSN) ||
+ (asoc->last_acked_seq == asoc->nonce_wait_tsn)) {
+ /*
+ * Misbehaving peer. We need
+ * to react to this guy
+ */
+ asoc->ecn_allowed = 0;
+ asoc->ecn_nonce_allowed = 0;
+ }
+ }
+ }
+ } else {
+ /* See if Resynchronization Possible */
+ if (compare_with_wrap(asoc->last_acked_seq, asoc->nonce_resync_tsn, MAX_TSN)) {
+ asoc->nonce_sum_check = 1;
+ /*
+ * Now we must calculate what the base is.
+ * We do this based on two things, we know
+ * the total's for all the segments
+ * gap-acked in the SACK (none). We also
+ * know the SACK's nonce sum, its in
+ * nonce_sum_flag. So we can build a truth
+ * table to back-calculate the new value of
+ * asoc->nonce_sum_expect_base:
+ *
+ * SACK-flag-Value Seg-Sums Base 0 0 0
+ * 1 0 1 0 1 1 1
+ * 1 0
+ */
+ asoc->nonce_sum_expect_base = (0 ^ nonce_sum_flag) & SCTP_SACK_NONCE_SUM;
+ }
+ }
+ }
+ /* RWND update */
+ asoc->peers_rwnd = sctp_sbspace_sub(rwnd,
+ (uint32_t) (asoc->total_flight + (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))));
+ if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
+ /* SWS sender side engages */
+ asoc->peers_rwnd = 0;
+ }
+ if (asoc->peers_rwnd > old_rwnd) {
+ win_probe_recovery = 1;
+ }
+ /* Now assure a timer where data is queued at */
+again:
+ j = 0;
+ TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
+ int to_ticks;
+
+ if (win_probe_recovery && (net->window_probe)) {
+ win_probe_recovered = 1;
+ /*
+ * Find first chunk that was used with window probe
+ * and clear the sent
+ */
+ /* sa_ignore FREED_MEMORY */
+ TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
+ if (tp1->window_probe) {
+ /* move back to data send queue */
+ sctp_window_probe_recovery(stcb, asoc, net, tp1);
+ break;
+ }
+ }
+ }
+ if (net->RTO == 0) {
+ to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
+ } else {
+ to_ticks = MSEC_TO_TICKS(net->RTO);
+ }
+ if (net->flight_size) {
+ j++;
+ (void)SCTP_OS_TIMER_START(&net->rxt_timer.timer, to_ticks,
+ sctp_timeout_handler, &net->rxt_timer);
+ if (net->window_probe) {
+ net->window_probe = 0;
+ }
+ } else {
+ if (net->window_probe) {
+ /*
+ * In window probes we must assure a timer
+ * is still running there
+ */
+ net->window_probe = 0;
+ if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
+ SCTP_OS_TIMER_START(&net->rxt_timer.timer, to_ticks,
+ sctp_timeout_handler, &net->rxt_timer);
+ }
+ } else if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
+ sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
+ stcb, net,
+ SCTP_FROM_SCTP_INDATA + SCTP_LOC_22);
+ }
+ if (SCTP_BASE_SYSCTL(sctp_early_fr)) {
+ if (SCTP_OS_TIMER_PENDING(&net->fr_timer.timer)) {
+ SCTP_STAT_INCR(sctps_earlyfrstpidsck4);
+ sctp_timer_stop(SCTP_TIMER_TYPE_EARLYFR, stcb->sctp_ep, stcb, net,
+ SCTP_FROM_SCTP_INDATA + SCTP_LOC_23);
+ }
+ }
+ }
+ }
+ if ((j == 0) &&
+ (!TAILQ_EMPTY(&asoc->sent_queue)) &&
+ (asoc->sent_queue_retran_cnt == 0) &&
+ (win_probe_recovered == 0) &&
+ (done_once == 0)) {
+ /*
+ * huh, this should not happen unless all packets are
+ * PR-SCTP and marked to skip of course.
+ */
+ if (sctp_fs_audit(asoc)) {
+ TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
+ net->flight_size = 0;
+ }
+ asoc->total_flight = 0;
+ asoc->total_flight_count = 0;
+ asoc->sent_queue_retran_cnt = 0;
+ TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
+ if (tp1->sent < SCTP_DATAGRAM_RESEND) {
+ sctp_flight_size_increase(tp1);
+ sctp_total_flight_increase(stcb, tp1);
+ } else if (tp1->sent == SCTP_DATAGRAM_RESEND) {
+ sctp_ucount_incr(asoc->sent_queue_retran_cnt);
+ }
+ }
+ }
+ done_once = 1;
+ goto again;
+ }
+ /**********************************/
+ /* Now what about shutdown issues */
+ /**********************************/
+ if (TAILQ_EMPTY(&asoc->send_queue) && TAILQ_EMPTY(&asoc->sent_queue)) {
+ /* nothing left on sendqueue.. consider done */
+ /* clean up */
+ if ((asoc->stream_queue_cnt == 1) &&
+ ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) ||
+ (asoc->state & SCTP_STATE_SHUTDOWN_RECEIVED)) &&
+ (asoc->locked_on_sending)
+ ) {
+ struct sctp_stream_queue_pending *sp;
+
+ /*
+ * I may be in a state where we got all across.. but
+ * cannot write more due to a shutdown... we abort
+ * since the user did not indicate EOR in this case.
+ * The sp will be cleaned during free of the asoc.
+ */
+ sp = TAILQ_LAST(&((asoc->locked_on_sending)->outqueue),
+ sctp_streamhead);
+ if ((sp) && (sp->length == 0)) {
+ /* Let cleanup code purge it */
+ if (sp->msg_is_complete) {
+ asoc->stream_queue_cnt--;
+ } else {
+ asoc->state |= SCTP_STATE_PARTIAL_MSG_LEFT;
+ asoc->locked_on_sending = NULL;
+ asoc->stream_queue_cnt--;
+ }
+ }
+ }
+ if ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) &&
+ (asoc->stream_queue_cnt == 0)) {
+ if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) {
+ /* Need to abort here */
+ struct mbuf *oper;
+
+ abort_out_now:
+ *abort_now = 1;
+ /* XXX */
+ oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)),
+ 0, M_DONTWAIT, 1, MT_DATA);
+ if (oper) {
+ struct sctp_paramhdr *ph;
+ uint32_t *ippp;
+
+ SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) +
+ sizeof(uint32_t);
+ ph = mtod(oper, struct sctp_paramhdr *);
+ ph->param_type = htons(SCTP_CAUSE_USER_INITIATED_ABT);
+ ph->param_length = htons(SCTP_BUF_LEN(oper));
+ ippp = (uint32_t *) (ph + 1);
+ *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_24);
+ }
+ stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_24;
+ sctp_abort_an_association(stcb->sctp_ep, stcb, SCTP_RESPONSE_TO_USER_REQ, oper, SCTP_SO_NOT_LOCKED);
+ } else {
+ if ((SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) ||
+ (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
+ SCTP_STAT_DECR_GAUGE32(sctps_currestab);
+ }
+ SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_SENT);
+ SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
+ sctp_stop_timers_for_shutdown(stcb);
+ sctp_send_shutdown(stcb,
+ stcb->asoc.primary_destination);
+ sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN,
+ stcb->sctp_ep, stcb, asoc->primary_destination);
+ sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
+ stcb->sctp_ep, stcb, asoc->primary_destination);
+ }
+ } else if ((SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED) &&
+ (asoc->stream_queue_cnt == 0)) {
+ if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) {
+ goto abort_out_now;
+ }
+ SCTP_STAT_DECR_GAUGE32(sctps_currestab);
+ SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_ACK_SENT);
+ SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
+ sctp_send_shutdown_ack(stcb,
+ stcb->asoc.primary_destination);
+ sctp_stop_timers_for_shutdown(stcb);
+ sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK,
+ stcb->sctp_ep, stcb, asoc->primary_destination);
+ }
+ }
+ /*********************************************/
+ /* Here we perform PR-SCTP procedures */
+ /* (section 4.2) */
+ /*********************************************/
+ /* C1. update advancedPeerAckPoint */
+ if (compare_with_wrap(cumack, asoc->advanced_peer_ack_point, MAX_TSN)) {
+ asoc->advanced_peer_ack_point = cumack;
+ }
+ /* PR-Sctp issues need to be addressed too */
+ if ((asoc->peer_supports_prsctp) && (asoc->pr_sctp_cnt > 0)) {
+ struct sctp_tmit_chunk *lchk;
+ uint32_t old_adv_peer_ack_point;
+
+ old_adv_peer_ack_point = asoc->advanced_peer_ack_point;
+ lchk = sctp_try_advance_peer_ack_point(stcb, asoc);
+ /* C3. See if we need to send a Fwd-TSN */
+ if (compare_with_wrap(asoc->advanced_peer_ack_point, cumack,
+ MAX_TSN)) {
+ /*
+ * ISSUE with ECN, see FWD-TSN processing for notes
+ * on issues that will occur when the ECN NONCE
+ * stuff is put into SCTP for cross checking.
+ */
+ if (compare_with_wrap(asoc->advanced_peer_ack_point, old_adv_peer_ack_point,
+ MAX_TSN)) {
+ send_forward_tsn(stcb, asoc);
+ /*
+ * ECN Nonce: Disable Nonce Sum check when
+ * FWD TSN is sent and store resync tsn
+ */
+ asoc->nonce_sum_check = 0;
+ asoc->nonce_resync_tsn = asoc->advanced_peer_ack_point;
+ } else if (lchk) {
+ /* try to FR fwd-tsn's that get lost too */
+ if (lchk->rec.data.fwd_tsn_cnt >= 3) {
+ send_forward_tsn(stcb, asoc);
+ }
+ }
+ }
+ if (lchk) {
+ /* Assure a timer is up */
+ sctp_timer_start(SCTP_TIMER_TYPE_SEND,
+ stcb->sctp_ep, stcb, lchk->whoTo);
+ }
+ }
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_RWND_LOGGING_ENABLE) {
+ sctp_misc_ints(SCTP_SACK_RWND_UPDATE,
+ rwnd,
+ stcb->asoc.peers_rwnd,
+ stcb->asoc.total_flight,
+ stcb->asoc.total_output_queue_size);
+ }
+}
+
+void
+sctp_handle_sack(struct mbuf *m, int offset_seg, int offset_dup,
+ struct sctp_tcb *stcb, struct sctp_nets *net_from,
+ uint16_t num_seg, uint16_t num_nr_seg, uint16_t num_dup,
+ int *abort_now, uint8_t flags,
+ uint32_t cum_ack, uint32_t rwnd)
+{
+ struct sctp_association *asoc;
+ struct sctp_tmit_chunk *tp1, *tp2;
+ uint32_t last_tsn, biggest_tsn_acked, biggest_tsn_newly_acked, this_sack_lowest_newack;
+ uint32_t sav_cum_ack;
+ uint16_t wake_him = 0;
+ uint32_t send_s = 0;
+ long j;
+ int accum_moved = 0;
+ int will_exit_fast_recovery = 0;
+ uint32_t a_rwnd, old_rwnd;
+ int win_probe_recovery = 0;
+ int win_probe_recovered = 0;
+ struct sctp_nets *net = NULL;
+ int nonce_sum_flag, ecn_seg_sums = 0;
+ int done_once;
+ uint8_t reneged_all = 0;
+ uint8_t cmt_dac_flag;
+
+ /*
+ * we take any chance we can to service our queues since we cannot
+ * get awoken when the socket is read from :<
+ */
+ /*
+ * Now perform the actual SACK handling: 1) Verify that it is not an
+ * old sack, if so discard. 2) If there is nothing left in the send
+ * queue (cum-ack is equal to last acked) then you have a duplicate
+ * too, update any rwnd change and verify no timers are running.
+ * then return. 3) Process any new consequtive data i.e. cum-ack
+ * moved process these first and note that it moved. 4) Process any
+ * sack blocks. 5) Drop any acked from the queue. 6) Check for any
+ * revoked blocks and mark. 7) Update the cwnd. 8) Nothing left,
+ * sync up flightsizes and things, stop all timers and also check
+ * for shutdown_pending state. If so then go ahead and send off the
+ * shutdown. If in shutdown recv, send off the shutdown-ack and
+ * start that timer, Ret. 9) Strike any non-acked things and do FR
+ * procedure if needed being sure to set the FR flag. 10) Do pr-sctp
+ * procedures. 11) Apply any FR penalties. 12) Assure we will SACK
+ * if in shutdown_recv state.
+ */
+ SCTP_TCB_LOCK_ASSERT(stcb);
+ /* CMT DAC algo */
+ this_sack_lowest_newack = 0;
+ j = 0;
+ SCTP_STAT_INCR(sctps_slowpath_sack);
+ last_tsn = cum_ack;
+ nonce_sum_flag = flags & SCTP_SACK_NONCE_SUM;
+ cmt_dac_flag = flags & SCTP_SACK_CMT_DAC;
+#ifdef SCTP_ASOCLOG_OF_TSNS
+ stcb->asoc.cumack_log[stcb->asoc.cumack_log_at] = cum_ack;
+ stcb->asoc.cumack_log_at++;
+ if (stcb->asoc.cumack_log_at > SCTP_TSN_LOG_SIZE) {
+ stcb->asoc.cumack_log_at = 0;
+ }
+#endif
+ a_rwnd = rwnd;
+
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_SACK_ARRIVALS_ENABLE) {
+ sctp_misc_ints(SCTP_SACK_LOG_NORMAL, cum_ack,
+ rwnd, stcb->asoc.last_acked_seq, stcb->asoc.peers_rwnd);
+ }
+ old_rwnd = stcb->asoc.peers_rwnd;
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
+ sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
+ stcb->asoc.overall_error_count,
+ 0,
+ SCTP_FROM_SCTP_INDATA,
+ __LINE__);
+ }
+ stcb->asoc.overall_error_count = 0;
+ asoc = &stcb->asoc;
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
+ sctp_log_sack(asoc->last_acked_seq,
+ cum_ack,
+ 0,
+ num_seg,
+ num_dup,
+ SCTP_LOG_NEW_SACK);
+ }
+ if ((num_dup) && (SCTP_BASE_SYSCTL(sctp_logging_level) & (SCTP_FR_LOGGING_ENABLE | SCTP_EARLYFR_LOGGING_ENABLE))) {
+ uint16_t i;
+ uint32_t *dupdata, dblock;
+
+ for (i = 0; i < num_dup; i++) {
+ dupdata = (uint32_t *) sctp_m_getptr(m, offset_dup + i * sizeof(uint32_t),
+ sizeof(uint32_t), (uint8_t *) & dblock);
+ if (dupdata == NULL) {
+ break;
+ }
+ sctp_log_fr(*dupdata, 0, 0, SCTP_FR_DUPED);
+ }
+ }
+ if (SCTP_BASE_SYSCTL(sctp_strict_sacks)) {
+ /* reality check */
+ if (!TAILQ_EMPTY(&asoc->sent_queue)) {
+ tp1 = TAILQ_LAST(&asoc->sent_queue,
+ sctpchunk_listhead);
+ send_s = tp1->rec.data.TSN_seq + 1;
+ } else {
+ tp1 = NULL;
+ send_s = asoc->sending_seq;
+ }
+ if (cum_ack == send_s ||
+ compare_with_wrap(cum_ack, send_s, MAX_TSN)) {
+ struct mbuf *oper;
+
+ /*
+ * no way, we have not even sent this TSN out yet.
+ * Peer is hopelessly messed up with us.
+ */
+ printf("NEW cum_ack:%x send_s:%x is smaller or equal\n",
+ cum_ack, send_s);
+ if (tp1) {
+ printf("Got send_s from tsn:%x + 1 of tp1:%p\n",
+ tp1->rec.data.TSN_seq, tp1);
+ }
+ hopeless_peer:
+ *abort_now = 1;
+ /* XXX */
+ oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)),
+ 0, M_DONTWAIT, 1, MT_DATA);
+ if (oper) {
+ struct sctp_paramhdr *ph;
+ uint32_t *ippp;
+
+ SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) +
+ sizeof(uint32_t);
+ ph = mtod(oper, struct sctp_paramhdr *);
+ ph->param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
+ ph->param_length = htons(SCTP_BUF_LEN(oper));
+ ippp = (uint32_t *) (ph + 1);
+ *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_25);
+ }
+ stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_25;
+ sctp_abort_an_association(stcb->sctp_ep, stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
+ return;
+ }
+ }
+ /**********************/
+ /* 1) check the range */
+ /**********************/
+ if (compare_with_wrap(asoc->last_acked_seq, last_tsn, MAX_TSN)) {
+ /* acking something behind */
+ return;
+ }
+ sav_cum_ack = asoc->last_acked_seq;
+
+ /* update the Rwnd of the peer */
+ if (TAILQ_EMPTY(&asoc->sent_queue) &&
+ TAILQ_EMPTY(&asoc->send_queue) &&
+ (asoc->stream_queue_cnt == 0)) {
+ /* nothing left on send/sent and strmq */
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
+ sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
+ asoc->peers_rwnd, 0, 0, a_rwnd);
+ }
+ asoc->peers_rwnd = a_rwnd;
+ if (asoc->sent_queue_retran_cnt) {
+ asoc->sent_queue_retran_cnt = 0;
+ }
+ if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
+ /* SWS sender side engages */
+ asoc->peers_rwnd = 0;
+ }
+ /* stop any timers */
+ TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
+ sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
+ stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_26);
+ if (SCTP_BASE_SYSCTL(sctp_early_fr)) {
+ if (SCTP_OS_TIMER_PENDING(&net->fr_timer.timer)) {
+ SCTP_STAT_INCR(sctps_earlyfrstpidsck1);
+ sctp_timer_stop(SCTP_TIMER_TYPE_EARLYFR, stcb->sctp_ep, stcb, net,
+ SCTP_FROM_SCTP_INDATA + SCTP_LOC_26);
+ }
+ }
+ net->partial_bytes_acked = 0;
+ net->flight_size = 0;
+ }
+ asoc->total_flight = 0;
+ asoc->total_flight_count = 0;
+ return;
+ }
+ /*
+ * We init netAckSz and netAckSz2 to 0. These are used to track 2
+ * things. The total byte count acked is tracked in netAckSz AND
+ * netAck2 is used to track the total bytes acked that are un-
+ * amibguious and were never retransmitted. We track these on a per
+ * destination address basis.
+ */
+ TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
+ net->prev_cwnd = net->cwnd;
+ net->net_ack = 0;
+ net->net_ack2 = 0;
+
+ /*
+ * CMT: Reset CUC and Fast recovery algo variables before
+ * SACK processing
+ */
+ net->new_pseudo_cumack = 0;
+ net->will_exit_fast_recovery = 0;
+ }
+ /* process the new consecutive TSN first */
+ tp1 = TAILQ_FIRST(&asoc->sent_queue);
+ while (tp1) {
+ if (compare_with_wrap(last_tsn, tp1->rec.data.TSN_seq,
+ MAX_TSN) ||
+ last_tsn == tp1->rec.data.TSN_seq) {
+ if (tp1->sent != SCTP_DATAGRAM_UNSENT) {
+ /*
+ * ECN Nonce: Add the nonce to the sender's
+ * nonce sum
+ */
+ asoc->nonce_sum_expect_base += tp1->rec.data.ect_nonce;
+ accum_moved = 1;
+ if (tp1->sent < SCTP_DATAGRAM_ACKED) {
+ /*
+ * If it is less than ACKED, it is
+ * now no-longer in flight. Higher
+ * values may occur during marking
+ */
+ if ((tp1->whoTo->dest_state &
+ SCTP_ADDR_UNCONFIRMED) &&
+ (tp1->snd_count < 2)) {
+ /*
+ * If there was no retran
+ * and the address is
+ * un-confirmed and we sent
+ * there and are now
+ * sacked.. its confirmed,
+ * mark it so.
+ */
+ tp1->whoTo->dest_state &=
+ ~SCTP_ADDR_UNCONFIRMED;
+ }
+ if (tp1->sent < SCTP_DATAGRAM_RESEND) {
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
+ sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_CA,
+ tp1->whoTo->flight_size,
+ tp1->book_size,
+ (uintptr_t) tp1->whoTo,
+ tp1->rec.data.TSN_seq);
+ }
+ sctp_flight_size_decrease(tp1);
+ sctp_total_flight_decrease(stcb, tp1);
+ }
+ tp1->whoTo->net_ack += tp1->send_size;
+
+ /* CMT SFR and DAC algos */
+ this_sack_lowest_newack = tp1->rec.data.TSN_seq;
+ tp1->whoTo->saw_newack = 1;
+
+ if (tp1->snd_count < 2) {
+ /*
+ * True non-retransmited
+ * chunk
+ */
+ tp1->whoTo->net_ack2 +=
+ tp1->send_size;
+
+ /* update RTO too? */
+ if (tp1->do_rtt) {
+ tp1->whoTo->RTO =
+ sctp_calculate_rto(stcb,
+ asoc, tp1->whoTo,
+ &tp1->sent_rcv_time,
+ sctp_align_safe_nocopy);
+ tp1->do_rtt = 0;
+ }
+ }
+ /*
+ * CMT: CUCv2 algorithm. From the
+ * cumack'd TSNs, for each TSN being
+ * acked for the first time, set the
+ * following variables for the
+ * corresp destination.
+ * new_pseudo_cumack will trigger a
+ * cwnd update.
+ * find_(rtx_)pseudo_cumack will
+ * trigger search for the next
+ * expected (rtx-)pseudo-cumack.
+ */
+ tp1->whoTo->new_pseudo_cumack = 1;
+ tp1->whoTo->find_pseudo_cumack = 1;
+ tp1->whoTo->find_rtx_pseudo_cumack = 1;
+
+
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
+ sctp_log_sack(asoc->last_acked_seq,
+ cum_ack,
+ tp1->rec.data.TSN_seq,
+ 0,
+ 0,
+ SCTP_LOG_TSN_ACKED);
+ }
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
+ sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.TSN_seq, SCTP_CWND_LOG_FROM_SACK);
+ }
+ }
+ if (tp1->sent == SCTP_DATAGRAM_RESEND) {
+ sctp_ucount_decr(asoc->sent_queue_retran_cnt);
+#ifdef SCTP_AUDITING_ENABLED
+ sctp_audit_log(0xB3,
+ (asoc->sent_queue_retran_cnt & 0x000000ff));
+#endif
+ }
+ if (tp1->rec.data.chunk_was_revoked) {
+ /* deflate the cwnd */
+ tp1->whoTo->cwnd -= tp1->book_size;
+ tp1->rec.data.chunk_was_revoked = 0;
+ }
+ tp1->sent = SCTP_DATAGRAM_ACKED;
+ }
+ } else {
+ break;
+ }
+ tp1 = TAILQ_NEXT(tp1, sctp_next);
+ }
+ biggest_tsn_newly_acked = biggest_tsn_acked = last_tsn;
+ /* always set this up to cum-ack */
+ asoc->this_sack_highest_gap = last_tsn;
+
+ if ((num_seg > 0) || (num_nr_seg > 0)) {
+
+ /*
+ * CMT: SFR algo (and HTNA) - this_sack_highest_newack has
+ * to be greater than the cumack. Also reset saw_newack to 0
+ * for all dests.
+ */
+ TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
+ net->saw_newack = 0;
+ net->this_sack_highest_newack = last_tsn;
+ }
+
+ /*
+ * thisSackHighestGap will increase while handling NEW
+ * segments this_sack_highest_newack will increase while
+ * handling NEWLY ACKED chunks. this_sack_lowest_newack is
+ * used for CMT DAC algo. saw_newack will also change.
+ */
+ if (sctp_handle_segments(m, &offset_seg, stcb, asoc, last_tsn, &biggest_tsn_acked,
+ &biggest_tsn_newly_acked, &this_sack_lowest_newack,
+ num_seg, num_nr_seg, &ecn_seg_sums)) {
+ wake_him++;
+ }
+ if (SCTP_BASE_SYSCTL(sctp_strict_sacks)) {
+ /*
+ * validate the biggest_tsn_acked in the gap acks if
+ * strict adherence is wanted.
+ */
+ if ((biggest_tsn_acked == send_s) ||
+ (compare_with_wrap(biggest_tsn_acked, send_s, MAX_TSN))) {
+ /*
+ * peer is either confused or we are under
+ * attack. We must abort.
+ */
+ printf("Hopeless peer! biggest_tsn_acked:%x largest seq:%x\n",
+ biggest_tsn_acked,
+ send_s);
+
+ goto hopeless_peer;
+ }
+ }
+ }
+ /*******************************************/
+ /* cancel ALL T3-send timer if accum moved */
+ /*******************************************/
+ if (asoc->sctp_cmt_on_off == 1) {
+ TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
+ if (net->new_pseudo_cumack)
+ sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
+ stcb, net,
+ SCTP_FROM_SCTP_INDATA + SCTP_LOC_27);
+
+ }
+ } else {
+ if (accum_moved) {
+ TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
+ sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
+ stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_28);
+ }
+ }
+ }
+ /********************************************/
+ /* drop the acked chunks from the sentqueue */
+ /********************************************/
+ asoc->last_acked_seq = cum_ack;
+
+ tp1 = TAILQ_FIRST(&asoc->sent_queue);
+ if (tp1 == NULL)
+ goto done_with_it;
+ do {
+ if (compare_with_wrap(tp1->rec.data.TSN_seq, cum_ack,
+ MAX_TSN)) {
+ break;
+ }
+ if (tp1->sent == SCTP_DATAGRAM_UNSENT) {
+ /* no more sent on list */
+ printf("Warning, tp1->sent == %d and its now acked?\n",
+ tp1->sent);
+ }
+ tp2 = TAILQ_NEXT(tp1, sctp_next);
+ TAILQ_REMOVE(&asoc->sent_queue, tp1, sctp_next);
+ if (tp1->pr_sctp_on) {
+ if (asoc->pr_sctp_cnt != 0)
+ asoc->pr_sctp_cnt--;
+ }
+ if (TAILQ_EMPTY(&asoc->sent_queue) &&
+ (asoc->total_flight > 0)) {
+#ifdef INVARIANTS
+ panic("Warning flight size is postive and should be 0");
+#else
+ SCTP_PRINTF("Warning flight size incorrect should be 0 is %d\n",
+ asoc->total_flight);
+#endif
+ asoc->total_flight = 0;
+ }
+ if (tp1->data) {
+ /* sa_ignore NO_NULL_CHK */
+ sctp_free_bufspace(stcb, asoc, tp1, 1);
+ sctp_m_freem(tp1->data);
+ if (asoc->peer_supports_prsctp && PR_SCTP_BUF_ENABLED(tp1->flags)) {
+ asoc->sent_queue_cnt_removeable--;
+ }
+ }
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
+ sctp_log_sack(asoc->last_acked_seq,
+ cum_ack,
+ tp1->rec.data.TSN_seq,
+ 0,
+ 0,
+ SCTP_LOG_FREE_SENT);
+ }
+ tp1->data = NULL;
+ asoc->sent_queue_cnt--;
+ sctp_free_a_chunk(stcb, tp1);
+ wake_him++;
+ tp1 = tp2;
+ } while (tp1 != NULL);
+
+done_with_it:
+ /* sa_ignore NO_NULL_CHK */
+ if ((wake_him) && (stcb->sctp_socket)) {
+#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
+ struct socket *so;
+
+#endif
+ SOCKBUF_LOCK(&stcb->sctp_socket->so_snd);
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
+ sctp_wakeup_log(stcb, cum_ack, wake_him, SCTP_WAKESND_FROM_SACK);
+ }
+#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
+ so = SCTP_INP_SO(stcb->sctp_ep);
+ atomic_add_int(&stcb->asoc.refcnt, 1);
+ SCTP_TCB_UNLOCK(stcb);
+ SCTP_SOCKET_LOCK(so, 1);
+ SCTP_TCB_LOCK(stcb);
+ atomic_subtract_int(&stcb->asoc.refcnt, 1);
+ if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
+ /* assoc was freed while we were unlocked */
+ SCTP_SOCKET_UNLOCK(so, 1);
+ return;
+ }
+#endif
+ sctp_sowwakeup_locked(stcb->sctp_ep, stcb->sctp_socket);
+#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
+ SCTP_SOCKET_UNLOCK(so, 1);
+#endif
+ } else {
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
+ sctp_wakeup_log(stcb, cum_ack, wake_him, SCTP_NOWAKE_FROM_SACK);
+ }
+ }
+
+ if (asoc->fast_retran_loss_recovery && accum_moved) {
+ if (compare_with_wrap(asoc->last_acked_seq,
+ asoc->fast_recovery_tsn, MAX_TSN) ||
+ asoc->last_acked_seq == asoc->fast_recovery_tsn) {
+ /* Setup so we will exit RFC2582 fast recovery */
+ will_exit_fast_recovery = 1;
+ }
+ }
+ /*
+ * Check for revoked fragments:
+ *
+ * if Previous sack - Had no frags then we can't have any revoked if
+ * Previous sack - Had frag's then - If we now have frags aka
+ * num_seg > 0 call sctp_check_for_revoked() to tell if peer revoked
+ * some of them. else - The peer revoked all ACKED fragments, since
+ * we had some before and now we have NONE.
+ */
+
+ if (num_seg) {
+ sctp_check_for_revoked(stcb, asoc, cum_ack, biggest_tsn_acked);
+ asoc->saw_sack_with_frags = 1;
+ } else if (asoc->saw_sack_with_frags) {
+ int cnt_revoked = 0;
+
+ tp1 = TAILQ_FIRST(&asoc->sent_queue);
+ if (tp1 != NULL) {
+ /* Peer revoked all dg's marked or acked */
+ TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
+ if (tp1->sent == SCTP_DATAGRAM_ACKED) {
+ tp1->sent = SCTP_DATAGRAM_SENT;
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
+ sctp_misc_ints(SCTP_FLIGHT_LOG_UP_REVOKE,
+ tp1->whoTo->flight_size,
+ tp1->book_size,
+ (uintptr_t) tp1->whoTo,
+ tp1->rec.data.TSN_seq);
+ }
+ sctp_flight_size_increase(tp1);
+ sctp_total_flight_increase(stcb, tp1);
+ tp1->rec.data.chunk_was_revoked = 1;
+ /*
+ * To ensure that this increase in
+ * flightsize, which is artificial,
+ * does not throttle the sender, we
+ * also increase the cwnd
+ * artificially.
+ */
+ tp1->whoTo->cwnd += tp1->book_size;
+ cnt_revoked++;
+ }
+ }
+ if (cnt_revoked) {
+ reneged_all = 1;
+ }
+ }
+ asoc->saw_sack_with_frags = 0;
+ }
+ if (num_nr_seg > 0)
+ asoc->saw_sack_with_nr_frags = 1;
+ else
+ asoc->saw_sack_with_nr_frags = 0;
+
+ /* JRS - Use the congestion control given in the CC module */
+ asoc->cc_functions.sctp_cwnd_update_after_sack(stcb, asoc, accum_moved, reneged_all, will_exit_fast_recovery);
+
+ if (TAILQ_EMPTY(&asoc->sent_queue)) {
+ /* nothing left in-flight */
+ TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
+ /* stop all timers */
+ if (SCTP_BASE_SYSCTL(sctp_early_fr)) {
+ if (SCTP_OS_TIMER_PENDING(&net->fr_timer.timer)) {
+ SCTP_STAT_INCR(sctps_earlyfrstpidsck4);
+ sctp_timer_stop(SCTP_TIMER_TYPE_EARLYFR, stcb->sctp_ep, stcb, net,
+ SCTP_FROM_SCTP_INDATA + SCTP_LOC_29);
+ }
+ }
+ sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
+ stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_30);
+ net->flight_size = 0;
+ net->partial_bytes_acked = 0;
+ }
+ asoc->total_flight = 0;
+ asoc->total_flight_count = 0;
+ }
+ /**********************************/
+ /* Now what about shutdown issues */
+ /**********************************/
+ if (TAILQ_EMPTY(&asoc->send_queue) && TAILQ_EMPTY(&asoc->sent_queue)) {
+ /* nothing left on sendqueue.. consider done */
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
+ sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
+ asoc->peers_rwnd, 0, 0, a_rwnd);
+ }
+ asoc->peers_rwnd = a_rwnd;
+ if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
+ /* SWS sender side engages */
+ asoc->peers_rwnd = 0;
+ }
+ /* clean up */
+ if ((asoc->stream_queue_cnt == 1) &&
+ ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) ||
+ (asoc->state & SCTP_STATE_SHUTDOWN_RECEIVED)) &&
+ (asoc->locked_on_sending)
+ ) {
+ struct sctp_stream_queue_pending *sp;
+
+ /*
+ * I may be in a state where we got all across.. but
+ * cannot write more due to a shutdown... we abort
+ * since the user did not indicate EOR in this case.
+ */
+ sp = TAILQ_LAST(&((asoc->locked_on_sending)->outqueue),
+ sctp_streamhead);
+ if ((sp) && (sp->length == 0)) {
+ asoc->locked_on_sending = NULL;
+ if (sp->msg_is_complete) {
+ asoc->stream_queue_cnt--;
+ } else {
+ asoc->state |= SCTP_STATE_PARTIAL_MSG_LEFT;
+ asoc->stream_queue_cnt--;
+ }
+ }
+ }
+ if ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) &&
+ (asoc->stream_queue_cnt == 0)) {
+ if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) {
+ /* Need to abort here */
+ struct mbuf *oper;
+
+ abort_out_now:
+ *abort_now = 1;
+ /* XXX */
+ oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)),
+ 0, M_DONTWAIT, 1, MT_DATA);
+ if (oper) {
+ struct sctp_paramhdr *ph;
+ uint32_t *ippp;
+
+ SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) +
+ sizeof(uint32_t);
+ ph = mtod(oper, struct sctp_paramhdr *);
+ ph->param_type = htons(SCTP_CAUSE_USER_INITIATED_ABT);
+ ph->param_length = htons(SCTP_BUF_LEN(oper));
+ ippp = (uint32_t *) (ph + 1);
+ *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_31);
+ }
+ stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_31;
+ sctp_abort_an_association(stcb->sctp_ep, stcb, SCTP_RESPONSE_TO_USER_REQ, oper, SCTP_SO_NOT_LOCKED);
+ return;
+ } else {
+ if ((SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) ||
+ (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
+ SCTP_STAT_DECR_GAUGE32(sctps_currestab);
+ }
+ SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_SENT);
+ SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
+ sctp_stop_timers_for_shutdown(stcb);
+ sctp_send_shutdown(stcb,
+ stcb->asoc.primary_destination);
+ sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN,
+ stcb->sctp_ep, stcb, asoc->primary_destination);
+ sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
+ stcb->sctp_ep, stcb, asoc->primary_destination);
+ }
+ return;
+ } else if ((SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED) &&
+ (asoc->stream_queue_cnt == 0)) {
+ if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) {
+ goto abort_out_now;
+ }
+ SCTP_STAT_DECR_GAUGE32(sctps_currestab);
+ SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_ACK_SENT);
+ SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
+ sctp_send_shutdown_ack(stcb,
+ stcb->asoc.primary_destination);
+ sctp_stop_timers_for_shutdown(stcb);
+ sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK,
+ stcb->sctp_ep, stcb, asoc->primary_destination);
+ return;
+ }
+ }
+ /*
+ * Now here we are going to recycle net_ack for a different use...
+ * HEADS UP.
+ */
+ TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
+ net->net_ack = 0;
+ }
+
+ /*
+ * CMT DAC algorithm: If SACK DAC flag was 0, then no extra marking
+ * to be done. Setting this_sack_lowest_newack to the cum_ack will
+ * automatically ensure that.
+ */
+ if ((asoc->sctp_cmt_on_off == 1) &&
+ SCTP_BASE_SYSCTL(sctp_cmt_use_dac) &&
+ (cmt_dac_flag == 0)) {
+ this_sack_lowest_newack = cum_ack;
+ }
+ if ((num_seg > 0) || (num_nr_seg > 0)) {
+ sctp_strike_gap_ack_chunks(stcb, asoc, biggest_tsn_acked,
+ biggest_tsn_newly_acked, this_sack_lowest_newack, accum_moved);
+ }
+ /* JRS - Use the congestion control given in the CC module */
+ asoc->cc_functions.sctp_cwnd_update_after_fr(stcb, asoc);
+
+ /******************************************************************
+ * Here we do the stuff with ECN Nonce checking.
+ * We basically check to see if the nonce sum flag was incorrect
+ * or if resynchronization needs to be done. Also if we catch a
+ * misbehaving receiver we give him the kick.
+ ******************************************************************/
+
+ if (asoc->ecn_nonce_allowed) {
+ if (asoc->nonce_sum_check) {
+ if (nonce_sum_flag != ((asoc->nonce_sum_expect_base + ecn_seg_sums) & SCTP_SACK_NONCE_SUM)) {
+ if (asoc->nonce_wait_for_ecne == 0) {
+ struct sctp_tmit_chunk *lchk;
+
+ lchk = TAILQ_FIRST(&asoc->send_queue);
+ asoc->nonce_wait_for_ecne = 1;
+ if (lchk) {
+ asoc->nonce_wait_tsn = lchk->rec.data.TSN_seq;
+ } else {
+ asoc->nonce_wait_tsn = asoc->sending_seq;
+ }
+ } else {
+ if (compare_with_wrap(asoc->last_acked_seq, asoc->nonce_wait_tsn, MAX_TSN) ||
+ (asoc->last_acked_seq == asoc->nonce_wait_tsn)) {
+ /*
+ * Misbehaving peer. We need
+ * to react to this guy
+ */
+ asoc->ecn_allowed = 0;
+ asoc->ecn_nonce_allowed = 0;
+ }
+ }
+ }
+ } else {
+ /* See if Resynchronization Possible */
+ if (compare_with_wrap(asoc->last_acked_seq, asoc->nonce_resync_tsn, MAX_TSN)) {
+ asoc->nonce_sum_check = 1;
+ /*
+ * now we must calculate what the base is.
+ * We do this based on two things, we know
+ * the total's for all the segments
+ * gap-acked in the SACK, its stored in
+ * ecn_seg_sums. We also know the SACK's
+ * nonce sum, its in nonce_sum_flag. So we
+ * can build a truth table to back-calculate
+ * the new value of
+ * asoc->nonce_sum_expect_base:
+ *
+ * SACK-flag-Value Seg-Sums Base 0 0 0
+ * 1 0 1 0 1 1 1
+ * 1 0
+ */
+ asoc->nonce_sum_expect_base = (ecn_seg_sums ^ nonce_sum_flag) & SCTP_SACK_NONCE_SUM;
+ }
+ }
+ }
+ /* Now are we exiting loss recovery ? */
+ if (will_exit_fast_recovery) {
+ /* Ok, we must exit fast recovery */
+ asoc->fast_retran_loss_recovery = 0;
+ }
+ if ((asoc->sat_t3_loss_recovery) &&
+ ((compare_with_wrap(asoc->last_acked_seq, asoc->sat_t3_recovery_tsn,
+ MAX_TSN) ||
+ (asoc->last_acked_seq == asoc->sat_t3_recovery_tsn)))) {
+ /* end satellite t3 loss recovery */
+ asoc->sat_t3_loss_recovery = 0;
+ }
+ /*
+ * CMT Fast recovery
+ */
+ TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
+ if (net->will_exit_fast_recovery) {
+ /* Ok, we must exit fast recovery */
+ net->fast_retran_loss_recovery = 0;
+ }
+ }
+
+ /* Adjust and set the new rwnd value */
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
+ sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
+ asoc->peers_rwnd, asoc->total_flight, (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh)), a_rwnd);
+ }
+ asoc->peers_rwnd = sctp_sbspace_sub(a_rwnd,
+ (uint32_t) (asoc->total_flight + (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))));
+ if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
+ /* SWS sender side engages */
+ asoc->peers_rwnd = 0;
+ }
+ if (asoc->peers_rwnd > old_rwnd) {
+ win_probe_recovery = 1;
+ }
+ /*
+ * Now we must setup so we have a timer up for anyone with
+ * outstanding data.
+ */
+ done_once = 0;
+again:
+ j = 0;
+ TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
+ if (win_probe_recovery && (net->window_probe)) {
+ win_probe_recovered = 1;
+ /*-
+ * Find first chunk that was used with
+ * window probe and clear the event. Put
+ * it back into the send queue as if has
+ * not been sent.
+ */
+ TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
+ if (tp1->window_probe) {
+ sctp_window_probe_recovery(stcb, asoc, net, tp1);
+ break;
+ }
+ }
+ }
+ if (net->flight_size) {
+ j++;
+ if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
+ sctp_timer_start(SCTP_TIMER_TYPE_SEND,
+ stcb->sctp_ep, stcb, net);
+ }
+ if (net->window_probe) {
+ net->window_probe = 0;
+ }
+ } else {
+ if (net->window_probe) {
+ /*
+ * In window probes we must assure a timer
+ * is still running there
+ */
+ if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
+ sctp_timer_start(SCTP_TIMER_TYPE_SEND,
+ stcb->sctp_ep, stcb, net);
+
+ }
+ } else if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
+ sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
+ stcb, net,
+ SCTP_FROM_SCTP_INDATA + SCTP_LOC_22);
+ }
+ if (SCTP_BASE_SYSCTL(sctp_early_fr)) {
+ if (SCTP_OS_TIMER_PENDING(&net->fr_timer.timer)) {
+ SCTP_STAT_INCR(sctps_earlyfrstpidsck4);
+ sctp_timer_stop(SCTP_TIMER_TYPE_EARLYFR, stcb->sctp_ep, stcb, net,
+ SCTP_FROM_SCTP_INDATA + SCTP_LOC_23);
+ }
+ }
+ }
+ }
+ if ((j == 0) &&
+ (!TAILQ_EMPTY(&asoc->sent_queue)) &&
+ (asoc->sent_queue_retran_cnt == 0) &&
+ (win_probe_recovered == 0) &&
+ (done_once == 0)) {
+ /*
+ * huh, this should not happen unless all packets are
+ * PR-SCTP and marked to skip of course.
+ */
+ if (sctp_fs_audit(asoc)) {
+ TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
+ net->flight_size = 0;
+ }
+ asoc->total_flight = 0;
+ asoc->total_flight_count = 0;
+ asoc->sent_queue_retran_cnt = 0;
+ TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
+ if (tp1->sent < SCTP_DATAGRAM_RESEND) {
+ sctp_flight_size_increase(tp1);
+ sctp_total_flight_increase(stcb, tp1);
+ } else if (tp1->sent == SCTP_DATAGRAM_RESEND) {
+ sctp_ucount_incr(asoc->sent_queue_retran_cnt);
+ }
+ }
+ }
+ done_once = 1;
+ goto again;
+ }
+ /*********************************************/
+ /* Here we perform PR-SCTP procedures */
+ /* (section 4.2) */
+ /*********************************************/
+ /* C1. update advancedPeerAckPoint */
+ if (compare_with_wrap(cum_ack, asoc->advanced_peer_ack_point, MAX_TSN)) {
+ asoc->advanced_peer_ack_point = cum_ack;
+ }
+ /* C2. try to further move advancedPeerAckPoint ahead */
+ if ((asoc->peer_supports_prsctp) && (asoc->pr_sctp_cnt > 0)) {
+ struct sctp_tmit_chunk *lchk;
+ uint32_t old_adv_peer_ack_point;
+
+ old_adv_peer_ack_point = asoc->advanced_peer_ack_point;
+ lchk = sctp_try_advance_peer_ack_point(stcb, asoc);
+ /* C3. See if we need to send a Fwd-TSN */
+ if (compare_with_wrap(asoc->advanced_peer_ack_point, cum_ack,
+ MAX_TSN)) {
+ /*
+ * ISSUE with ECN, see FWD-TSN processing for notes
+ * on issues that will occur when the ECN NONCE
+ * stuff is put into SCTP for cross checking.
+ */
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_TRY_ADVANCE) {
+ sctp_misc_ints(SCTP_FWD_TSN_CHECK,
+ 0xee, cum_ack, asoc->advanced_peer_ack_point,
+ old_adv_peer_ack_point);
+ }
+ if (compare_with_wrap(asoc->advanced_peer_ack_point, old_adv_peer_ack_point,
+ MAX_TSN)) {
+
+ send_forward_tsn(stcb, asoc);
+ /*
+ * ECN Nonce: Disable Nonce Sum check when
+ * FWD TSN is sent and store resync tsn
+ */
+ asoc->nonce_sum_check = 0;
+ asoc->nonce_resync_tsn = asoc->advanced_peer_ack_point;
+ } else if (lchk) {
+ /* try to FR fwd-tsn's that get lost too */
+ if (lchk->rec.data.fwd_tsn_cnt >= 3) {
+ send_forward_tsn(stcb, asoc);
+ }
+ }
+ }
+ if (lchk) {
+ /* Assure a timer is up */
+ sctp_timer_start(SCTP_TIMER_TYPE_SEND,
+ stcb->sctp_ep, stcb, lchk->whoTo);
+ }
+ }
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_RWND_LOGGING_ENABLE) {
+ sctp_misc_ints(SCTP_SACK_RWND_UPDATE,
+ a_rwnd,
+ stcb->asoc.peers_rwnd,
+ stcb->asoc.total_flight,
+ stcb->asoc.total_output_queue_size);
+ }
+}
+
+void
+sctp_update_acked(struct sctp_tcb *stcb, struct sctp_shutdown_chunk *cp,
+ struct sctp_nets *netp, int *abort_flag)
+{
+ /* Copy cum-ack */
+ uint32_t cum_ack, a_rwnd;
+
+ cum_ack = ntohl(cp->cumulative_tsn_ack);
+ /* Arrange so a_rwnd does NOT change */
+ a_rwnd = stcb->asoc.peers_rwnd + stcb->asoc.total_flight;
+
+ /* Now call the express sack handling */
+ sctp_express_handle_sack(stcb, cum_ack, a_rwnd, 0, abort_flag);
+}
+
+static void
+sctp_kick_prsctp_reorder_queue(struct sctp_tcb *stcb,
+ struct sctp_stream_in *strmin)
+{
+ struct sctp_queued_to_read *ctl, *nctl;
+ struct sctp_association *asoc;
+ uint16_t tt;
+
+ asoc = &stcb->asoc;
+ tt = strmin->last_sequence_delivered;
+ /*
+ * First deliver anything prior to and including the stream no that
+ * came in
+ */
+ ctl = TAILQ_FIRST(&strmin->inqueue);
+ while (ctl) {
+ nctl = TAILQ_NEXT(ctl, next);
+ if (compare_with_wrap(tt, ctl->sinfo_ssn, MAX_SEQ) ||
+ (tt == ctl->sinfo_ssn)) {
+ /* this is deliverable now */
+ TAILQ_REMOVE(&strmin->inqueue, ctl, next);
+ /* subtract pending on streams */
+ asoc->size_on_all_streams -= ctl->length;
+ sctp_ucount_decr(asoc->cnt_on_all_streams);
+ /* deliver it to at least the delivery-q */
+ if (stcb->sctp_socket) {
+ sctp_mark_non_revokable(asoc, ctl->sinfo_tsn);
+ sctp_add_to_readq(stcb->sctp_ep, stcb,
+ ctl,
+ &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_HELD, SCTP_SO_NOT_LOCKED);
+ }
+ } else {
+ /* no more delivery now. */
+ break;
+ }
+ ctl = nctl;
+ }
+ /*
+ * now we must deliver things in queue the normal way if any are
+ * now ready.
+ */
+ tt = strmin->last_sequence_delivered + 1;
+ ctl = TAILQ_FIRST(&strmin->inqueue);
+ while (ctl) {
+ nctl = TAILQ_NEXT(ctl, next);
+ if (tt == ctl->sinfo_ssn) {
+ /* this is deliverable now */
+ TAILQ_REMOVE(&strmin->inqueue, ctl, next);
+ /* subtract pending on streams */
+ asoc->size_on_all_streams -= ctl->length;
+ sctp_ucount_decr(asoc->cnt_on_all_streams);
+ /* deliver it to at least the delivery-q */
+ strmin->last_sequence_delivered = ctl->sinfo_ssn;
+ if (stcb->sctp_socket) {
+ sctp_mark_non_revokable(asoc, ctl->sinfo_tsn);
+ sctp_add_to_readq(stcb->sctp_ep, stcb,
+ ctl,
+ &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_HELD, SCTP_SO_NOT_LOCKED);
+
+ }
+ tt = strmin->last_sequence_delivered + 1;
+ } else {
+ break;
+ }
+ ctl = nctl;
+ }
+}
+
+static void
+sctp_flush_reassm_for_str_seq(struct sctp_tcb *stcb,
+ struct sctp_association *asoc,
+ uint16_t stream, uint16_t seq)
+{
+ struct sctp_tmit_chunk *chk, *at;
+
+ if (!TAILQ_EMPTY(&asoc->reasmqueue)) {
+ /* For each one on here see if we need to toss it */
+ /*
+ * For now large messages held on the reasmqueue that are
+ * complete will be tossed too. We could in theory do more
+ * work to spin through and stop after dumping one msg aka
+ * seeing the start of a new msg at the head, and call the
+ * delivery function... to see if it can be delivered... But
+ * for now we just dump everything on the queue.
+ */
+ chk = TAILQ_FIRST(&asoc->reasmqueue);
+ while (chk) {
+ at = TAILQ_NEXT(chk, sctp_next);
+ /*
+ * Do not toss it if on a different stream or marked
+ * for unordered delivery in which case the stream
+ * sequence number has no meaning.
+ */
+ if ((chk->rec.data.stream_number != stream) ||
+ ((chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == SCTP_DATA_UNORDERED)) {
+ chk = at;
+ continue;
+ }
+ if (chk->rec.data.stream_seq == seq) {
+ /* It needs to be tossed */
+ TAILQ_REMOVE(&asoc->reasmqueue, chk, sctp_next);
+ if (compare_with_wrap(chk->rec.data.TSN_seq,
+ asoc->tsn_last_delivered, MAX_TSN)) {
+ asoc->tsn_last_delivered =
+ chk->rec.data.TSN_seq;
+ asoc->str_of_pdapi =
+ chk->rec.data.stream_number;
+ asoc->ssn_of_pdapi =
+ chk->rec.data.stream_seq;
+ asoc->fragment_flags =
+ chk->rec.data.rcv_flags;
+ }
+ asoc->size_on_reasm_queue -= chk->send_size;
+ sctp_ucount_decr(asoc->cnt_on_reasm_queue);
+
+ /* Clear up any stream problem */
+ if ((chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) !=
+ SCTP_DATA_UNORDERED &&
+ (compare_with_wrap(chk->rec.data.stream_seq,
+ asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered,
+ MAX_SEQ))) {
+ /*
+ * We must dump forward this streams
+ * sequence number if the chunk is
+ * not unordered that is being
+ * skipped. There is a chance that
+ * if the peer does not include the
+ * last fragment in its FWD-TSN we
+ * WILL have a problem here since
+ * you would have a partial chunk in
+ * queue that may not be
+ * deliverable. Also if a Partial
+ * delivery API as started the user
+ * may get a partial chunk. The next
+ * read returning a new chunk...
+ * really ugly but I see no way
+ * around it! Maybe a notify??
+ */
+ asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered =
+ chk->rec.data.stream_seq;
+ }
+ if (chk->data) {
+ sctp_m_freem(chk->data);
+ chk->data = NULL;
+ }
+ sctp_free_a_chunk(stcb, chk);
+ } else if (compare_with_wrap(chk->rec.data.stream_seq, seq, MAX_SEQ)) {
+ /*
+ * If the stream_seq is > than the purging
+ * one, we are done
+ */
+ break;
+ }
+ chk = at;
+ }
+ }
+}
+
+
+void
+sctp_handle_forward_tsn(struct sctp_tcb *stcb,
+ struct sctp_forward_tsn_chunk *fwd,
+ int *abort_flag, struct mbuf *m, int offset)
+{
+ /*
+ * ISSUES that MUST be fixed for ECN! When we are the sender of the
+ * forward TSN, when the SACK comes back that acknowledges the
+ * FWD-TSN we must reset the NONCE sum to match correctly. This will
+ * get quite tricky since we may have sent more data interveneing
+ * and must carefully account for what the SACK says on the nonce
+ * and any gaps that are reported. This work will NOT be done here,
+ * but I note it here since it is really related to PR-SCTP and
+ * FWD-TSN's
+ */
+
+ /* The pr-sctp fwd tsn */
+ /*
+ * here we will perform all the data receiver side steps for
+ * processing FwdTSN, as required in by pr-sctp draft:
+ *
+ * Assume we get FwdTSN(x):
+ *
+ * 1) update local cumTSN to x 2) try to further advance cumTSN to x +
+ * others we have 3) examine and update re-ordering queue on
+ * pr-in-streams 4) clean up re-assembly queue 5) Send a sack to
+ * report where we are.
+ */
+ struct sctp_association *asoc;
+ uint32_t new_cum_tsn, gap;
+ unsigned int i, fwd_sz, cumack_set_flag, m_size;
+ uint32_t str_seq;
+ struct sctp_stream_in *strm;
+ struct sctp_tmit_chunk *chk, *at;
+ struct sctp_queued_to_read *ctl, *sv;
+
+ cumack_set_flag = 0;
+ asoc = &stcb->asoc;
+ if ((fwd_sz = ntohs(fwd->ch.chunk_length)) < sizeof(struct sctp_forward_tsn_chunk)) {
+ SCTPDBG(SCTP_DEBUG_INDATA1,
+ "Bad size too small/big fwd-tsn\n");
+ return;
+ }
+ m_size = (stcb->asoc.mapping_array_size << 3);
+ /*************************************************************/
+ /* 1. Here we update local cumTSN and shift the bitmap array */
+ /*************************************************************/
+ new_cum_tsn = ntohl(fwd->new_cumulative_tsn);
+
+ if (compare_with_wrap(asoc->cumulative_tsn, new_cum_tsn, MAX_TSN) ||
+ asoc->cumulative_tsn == new_cum_tsn) {
+ /* Already got there ... */
+ return;
+ }
+ /*
+ * now we know the new TSN is more advanced, let's find the actual
+ * gap
+ */
+ SCTP_CALC_TSN_TO_GAP(gap, new_cum_tsn, asoc->mapping_array_base_tsn);
+ asoc->cumulative_tsn = new_cum_tsn;
+ if (gap >= m_size) {
+ if ((long)gap > sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv)) {
+ struct mbuf *oper;
+
+ /*
+ * out of range (of single byte chunks in the rwnd I
+ * give out). This must be an attacker.
+ */
+ *abort_flag = 1;
+ oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
+ 0, M_DONTWAIT, 1, MT_DATA);
+ if (oper) {
+ struct sctp_paramhdr *ph;
+ uint32_t *ippp;
+
+ SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) +
+ (sizeof(uint32_t) * 3);
+ ph = mtod(oper, struct sctp_paramhdr *);
+ ph->param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
+ ph->param_length = htons(SCTP_BUF_LEN(oper));
+ ippp = (uint32_t *) (ph + 1);
+ *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_33);
+ ippp++;
+ *ippp = asoc->highest_tsn_inside_map;
+ ippp++;
+ *ippp = new_cum_tsn;
+ }
+ stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_33;
+ sctp_abort_an_association(stcb->sctp_ep, stcb,
+ SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
+ return;
+ }
+ SCTP_STAT_INCR(sctps_fwdtsn_map_over);
+
+ memset(stcb->asoc.mapping_array, 0, stcb->asoc.mapping_array_size);
+ asoc->mapping_array_base_tsn = new_cum_tsn + 1;
+ asoc->highest_tsn_inside_map = new_cum_tsn;
+
+ memset(stcb->asoc.nr_mapping_array, 0, stcb->asoc.mapping_array_size);
+ asoc->highest_tsn_inside_nr_map = new_cum_tsn;
+
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
+ sctp_log_map(0, 3, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
+ }
+ asoc->last_echo_tsn = asoc->highest_tsn_inside_map;
+ } else {
+ SCTP_TCB_LOCK_ASSERT(stcb);
+ for (i = 0; i <= gap; i++) {
+ if (!SCTP_IS_TSN_PRESENT(asoc->mapping_array, i) &&
+ !SCTP_IS_TSN_PRESENT(asoc->nr_mapping_array, i)) {
+ SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, i);
+ if (compare_with_wrap(asoc->mapping_array_base_tsn + i, asoc->highest_tsn_inside_nr_map, MAX_TSN)) {
+ asoc->highest_tsn_inside_nr_map = asoc->mapping_array_base_tsn + i;
+ }
+ }
+ }
+ }
+ /*************************************************************/
+ /* 2. Clear up re-assembly queue */
+ /*************************************************************/
+ /*
+ * First service it if pd-api is up, just in case we can progress it
+ * forward
+ */
+ if (asoc->fragmented_delivery_inprogress) {
+ sctp_service_reassembly(stcb, asoc);
+ }
+ if (!TAILQ_EMPTY(&asoc->reasmqueue)) {
+ /* For each one on here see if we need to toss it */
+ /*
+ * For now large messages held on the reasmqueue that are
+ * complete will be tossed too. We could in theory do more
+ * work to spin through and stop after dumping one msg aka
+ * seeing the start of a new msg at the head, and call the
+ * delivery function... to see if it can be delivered... But
+ * for now we just dump everything on the queue.
+ */
+ chk = TAILQ_FIRST(&asoc->reasmqueue);
+ while (chk) {
+ at = TAILQ_NEXT(chk, sctp_next);
+ if ((compare_with_wrap(new_cum_tsn,
+ chk->rec.data.TSN_seq, MAX_TSN)) ||
+ (new_cum_tsn == chk->rec.data.TSN_seq)) {
+ /* It needs to be tossed */
+ TAILQ_REMOVE(&asoc->reasmqueue, chk, sctp_next);
+ if (compare_with_wrap(chk->rec.data.TSN_seq,
+ asoc->tsn_last_delivered, MAX_TSN)) {
+ asoc->tsn_last_delivered =
+ chk->rec.data.TSN_seq;
+ asoc->str_of_pdapi =
+ chk->rec.data.stream_number;
+ asoc->ssn_of_pdapi =
+ chk->rec.data.stream_seq;
+ asoc->fragment_flags =
+ chk->rec.data.rcv_flags;
+ }
+ asoc->size_on_reasm_queue -= chk->send_size;
+ sctp_ucount_decr(asoc->cnt_on_reasm_queue);
+
+ /* Clear up any stream problem */
+ if ((chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) !=
+ SCTP_DATA_UNORDERED &&
+ (compare_with_wrap(chk->rec.data.stream_seq,
+ asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered,
+ MAX_SEQ))) {
+ /*
+ * We must dump forward this streams
+ * sequence number if the chunk is
+ * not unordered that is being
+ * skipped. There is a chance that
+ * if the peer does not include the
+ * last fragment in its FWD-TSN we
+ * WILL have a problem here since
+ * you would have a partial chunk in
+ * queue that may not be
+ * deliverable. Also if a Partial
+ * delivery API as started the user
+ * may get a partial chunk. The next
+ * read returning a new chunk...
+ * really ugly but I see no way
+ * around it! Maybe a notify??
+ */
+ asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered =
+ chk->rec.data.stream_seq;
+ }
+ if (chk->data) {
+ sctp_m_freem(chk->data);
+ chk->data = NULL;
+ }
+ sctp_free_a_chunk(stcb, chk);
+ } else {
+ /*
+ * Ok we have gone beyond the end of the
+ * fwd-tsn's mark.
+ */
+ break;
+ }
+ chk = at;
+ }
+ }
+ /*******************************************************/
+ /* 3. Update the PR-stream re-ordering queues and fix */
+ /* delivery issues as needed. */
+ /*******************************************************/
+ fwd_sz -= sizeof(*fwd);
+ if (m && fwd_sz) {
+ /* New method. */
+ unsigned int num_str;
+ struct sctp_strseq *stseq, strseqbuf;
+
+ offset += sizeof(*fwd);
+
+ SCTP_INP_READ_LOCK(stcb->sctp_ep);
+ num_str = fwd_sz / sizeof(struct sctp_strseq);
+ for (i = 0; i < num_str; i++) {
+ uint16_t st;
+
+ stseq = (struct sctp_strseq *)sctp_m_getptr(m, offset,
+ sizeof(struct sctp_strseq),
+ (uint8_t *) & strseqbuf);
+ offset += sizeof(struct sctp_strseq);
+ if (stseq == NULL) {
+ break;
+ }
+ /* Convert */
+ st = ntohs(stseq->stream);
+ stseq->stream = st;
+ st = ntohs(stseq->sequence);
+ stseq->sequence = st;
+
+ /* now process */
+
+ /*
+ * Ok we now look for the stream/seq on the read
+ * queue where its not all delivered. If we find it
+ * we transmute the read entry into a PDI_ABORTED.
+ */
+ if (stseq->stream >= asoc->streamincnt) {
+ /* screwed up streams, stop! */
+ break;
+ }
+ if ((asoc->str_of_pdapi == stseq->stream) &&
+ (asoc->ssn_of_pdapi == stseq->sequence)) {
+ /*
+ * If this is the one we were partially
+ * delivering now then we no longer are.
+ * Note this will change with the reassembly
+ * re-write.
+ */
+ asoc->fragmented_delivery_inprogress = 0;
+ }
+ sctp_flush_reassm_for_str_seq(stcb, asoc, stseq->stream, stseq->sequence);
+ TAILQ_FOREACH(ctl, &stcb->sctp_ep->read_queue, next) {
+ if ((ctl->sinfo_stream == stseq->stream) &&
+ (ctl->sinfo_ssn == stseq->sequence)) {
+ str_seq = (stseq->stream << 16) | stseq->sequence;
+ ctl->end_added = 1;
+ ctl->pdapi_aborted = 1;
+ sv = stcb->asoc.control_pdapi;
+ stcb->asoc.control_pdapi = ctl;
+ sctp_ulp_notify(SCTP_NOTIFY_PARTIAL_DELVIERY_INDICATION,
+ stcb,
+ SCTP_PARTIAL_DELIVERY_ABORTED,
+ (void *)&str_seq,
+ SCTP_SO_NOT_LOCKED);
+ stcb->asoc.control_pdapi = sv;
+ break;
+ } else if ((ctl->sinfo_stream == stseq->stream) &&
+ (compare_with_wrap(ctl->sinfo_ssn, stseq->sequence, MAX_SEQ))) {
+ /* We are past our victim SSN */
+ break;
+ }
+ }
+ strm = &asoc->strmin[stseq->stream];
+ if (compare_with_wrap(stseq->sequence,
+ strm->last_sequence_delivered, MAX_SEQ)) {
+ /* Update the sequence number */
+ strm->last_sequence_delivered =
+ stseq->sequence;
+ }
+ /* now kick the stream the new way */
+ /* sa_ignore NO_NULL_CHK */
+ sctp_kick_prsctp_reorder_queue(stcb, strm);
+ }
+ SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
+ }
+ /*
+ * Now slide thing forward.
+ */
+ sctp_slide_mapping_arrays(stcb);
+
+ if (!TAILQ_EMPTY(&asoc->reasmqueue)) {
+ /* now lets kick out and check for more fragmented delivery */
+ /* sa_ignore NO_NULL_CHK */
+ sctp_deliver_reasm_check(stcb, &stcb->asoc);
+ }
+}
diff --git a/rtems/freebsd/netinet/sctp_indata.h b/rtems/freebsd/netinet/sctp_indata.h
new file mode 100644
index 00000000..9f6890ed
--- /dev/null
+++ b/rtems/freebsd/netinet/sctp_indata.h
@@ -0,0 +1,129 @@
+/*-
+ * Copyright (c) 2001-2007, by Cisco Systems, Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * a) Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * b) Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the distribution.
+ *
+ * c) Neither the name of Cisco Systems, Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/* $KAME: sctp_indata.h,v 1.9 2005/03/06 16:04:17 itojun Exp $ */
+
+#include <rtems/freebsd/sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#ifndef __sctp_indata_h__
+#define __sctp_indata_h__
+
+#if defined(_KERNEL) || defined(__Userspace__)
+
+struct sctp_queued_to_read *
+sctp_build_readq_entry(struct sctp_tcb *stcb,
+ struct sctp_nets *net,
+ uint32_t tsn, uint32_t ppid,
+ uint32_t context, uint16_t stream_no,
+ uint16_t stream_seq, uint8_t flags,
+ struct mbuf *dm);
+
+
+#define sctp_build_readq_entry_mac(_ctl, in_it, a, net, tsn, ppid, context, stream_no, stream_seq, flags, dm) do { \
+ if (_ctl) { \
+ atomic_add_int(&((net)->ref_count), 1); \
+ (_ctl)->sinfo_stream = stream_no; \
+ (_ctl)->sinfo_ssn = stream_seq; \
+ (_ctl)->sinfo_flags = (flags << 8); \
+ (_ctl)->sinfo_ppid = ppid; \
+ (_ctl)->sinfo_context = a; \
+ (_ctl)->sinfo_timetolive = 0; \
+ (_ctl)->sinfo_tsn = tsn; \
+ (_ctl)->sinfo_cumtsn = tsn; \
+ (_ctl)->sinfo_assoc_id = sctp_get_associd((in_it)); \
+ (_ctl)->length = 0; \
+ (_ctl)->held_length = 0; \
+ (_ctl)->whoFrom = net; \
+ (_ctl)->data = dm; \
+ (_ctl)->tail_mbuf = NULL; \
+ (_ctl)->aux_data = NULL; \
+ (_ctl)->stcb = (in_it); \
+ (_ctl)->port_from = (in_it)->rport; \
+ (_ctl)->spec_flags = 0; \
+ (_ctl)->do_not_ref_stcb = 0; \
+ (_ctl)->end_added = 0; \
+ (_ctl)->pdapi_aborted = 0; \
+ (_ctl)->some_taken = 0; \
+ } \
+} while (0)
+
+
+
+struct mbuf *
+sctp_build_ctl_nchunk(struct sctp_inpcb *inp,
+ struct sctp_sndrcvinfo *sinfo);
+
+char *
+sctp_build_ctl_cchunk(struct sctp_inpcb *inp,
+ int *control_len,
+ struct sctp_sndrcvinfo *sinfo);
+
+void sctp_set_rwnd(struct sctp_tcb *, struct sctp_association *);
+
+uint32_t
+sctp_calc_rwnd(struct sctp_tcb *stcb, struct sctp_association *asoc);
+
+void
+sctp_express_handle_sack(struct sctp_tcb *stcb, uint32_t cumack,
+ uint32_t rwnd, int nonce_sum_flag, int *abort_now);
+
+void
+sctp_handle_sack(struct mbuf *m, int offset_seg, int offset_dup,
+ struct sctp_tcb *stcb, struct sctp_nets *net_from,
+ uint16_t num_seg, uint16_t num_nr_seg, uint16_t num_dup,
+ int *abort_now, uint8_t flags,
+ uint32_t cum_ack, uint32_t rwnd);
+
+/* draft-ietf-tsvwg-usctp */
+void
+sctp_handle_forward_tsn(struct sctp_tcb *,
+ struct sctp_forward_tsn_chunk *, int *, struct mbuf *, int);
+
+struct sctp_tmit_chunk *
+ sctp_try_advance_peer_ack_point(struct sctp_tcb *, struct sctp_association *);
+
+void sctp_service_queues(struct sctp_tcb *, struct sctp_association *);
+
+void
+sctp_update_acked(struct sctp_tcb *, struct sctp_shutdown_chunk *,
+ struct sctp_nets *, int *);
+
+int
+sctp_process_data(struct mbuf **, int, int *, int, struct sctphdr *,
+ struct sctp_inpcb *, struct sctp_tcb *,
+ struct sctp_nets *, uint32_t *);
+
+void sctp_slide_mapping_arrays(struct sctp_tcb *stcb);
+
+void sctp_sack_check(struct sctp_tcb *, int, int *);
+
+#endif
+#endif
diff --git a/rtems/freebsd/netinet/sctp_input.c b/rtems/freebsd/netinet/sctp_input.c
new file mode 100644
index 00000000..0f6beea9
--- /dev/null
+++ b/rtems/freebsd/netinet/sctp_input.c
@@ -0,0 +1,5965 @@
+#include <rtems/freebsd/machine/rtems-bsd-config.h>
+
+/*-
+ * Copyright (c) 2001-2008, by Cisco Systems, Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * a) Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * b) Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the distribution.
+ *
+ * c) Neither the name of Cisco Systems, Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/* $KAME: sctp_input.c,v 1.27 2005/03/06 16:04:17 itojun Exp $ */
+
+#include <rtems/freebsd/sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <rtems/freebsd/netinet/sctp_os.h>
+#include <rtems/freebsd/netinet/sctp_var.h>
+#include <rtems/freebsd/netinet/sctp_sysctl.h>
+#include <rtems/freebsd/netinet/sctp_pcb.h>
+#include <rtems/freebsd/netinet/sctp_header.h>
+#include <rtems/freebsd/netinet/sctputil.h>
+#include <rtems/freebsd/netinet/sctp_output.h>
+#include <rtems/freebsd/netinet/sctp_input.h>
+#include <rtems/freebsd/netinet/sctp_auth.h>
+#include <rtems/freebsd/netinet/sctp_indata.h>
+#include <rtems/freebsd/netinet/sctp_asconf.h>
+#include <rtems/freebsd/netinet/sctp_bsd_addr.h>
+#include <rtems/freebsd/netinet/sctp_timer.h>
+#include <rtems/freebsd/netinet/sctp_crc32.h>
+#include <rtems/freebsd/netinet/udp.h>
+
+
+
+static void
+sctp_stop_all_cookie_timers(struct sctp_tcb *stcb)
+{
+ struct sctp_nets *net;
+
+ /*
+ * This now not only stops all cookie timers it also stops any INIT
+ * timers as well. This will make sure that the timers are stopped
+ * in all collision cases.
+ */
+ SCTP_TCB_LOCK_ASSERT(stcb);
+ TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) {
+ if (net->rxt_timer.type == SCTP_TIMER_TYPE_COOKIE) {
+ sctp_timer_stop(SCTP_TIMER_TYPE_COOKIE,
+ stcb->sctp_ep,
+ stcb,
+ net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_1);
+ } else if (net->rxt_timer.type == SCTP_TIMER_TYPE_INIT) {
+ sctp_timer_stop(SCTP_TIMER_TYPE_INIT,
+ stcb->sctp_ep,
+ stcb,
+ net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_2);
+ }
+ }
+}
+
+/* INIT handler */
+static void
+sctp_handle_init(struct mbuf *m, int iphlen, int offset, struct sctphdr *sh,
+ struct sctp_init_chunk *cp, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
+ struct sctp_nets *net, int *abort_no_unlock, uint32_t vrf_id, uint16_t port)
+{
+ struct sctp_init *init;
+ struct mbuf *op_err;
+ uint32_t init_limit;
+
+ SCTPDBG(SCTP_DEBUG_INPUT2, "sctp_handle_init: handling INIT tcb:%p\n",
+ stcb);
+ if (stcb == NULL) {
+ SCTP_INP_RLOCK(inp);
+ if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
+ goto outnow;
+ }
+ }
+ op_err = NULL;
+ init = &cp->init;
+ /* First are we accepting? */
+ if ((inp->sctp_socket->so_qlimit == 0) && (stcb == NULL)) {
+ SCTPDBG(SCTP_DEBUG_INPUT2,
+ "sctp_handle_init: Abort, so_qlimit:%d\n",
+ inp->sctp_socket->so_qlimit);
+ /*
+ * FIX ME ?? What about TCP model and we have a
+ * match/restart case? Actually no fix is needed. the lookup
+ * will always find the existing assoc so stcb would not be
+ * NULL. It may be questionable to do this since we COULD
+ * just send back the INIT-ACK and hope that the app did
+ * accept()'s by the time the COOKIE was sent. But there is
+ * a price to pay for COOKIE generation and I don't want to
+ * pay it on the chance that the app will actually do some
+ * accepts(). The App just looses and should NOT be in this
+ * state :-)
+ */
+ sctp_abort_association(inp, stcb, m, iphlen, sh, op_err,
+ vrf_id, port);
+ if (stcb)
+ *abort_no_unlock = 1;
+ goto outnow;
+ }
+ if (ntohs(cp->ch.chunk_length) < sizeof(struct sctp_init_chunk)) {
+ /* Invalid length */
+ op_err = sctp_generate_invmanparam(SCTP_CAUSE_INVALID_PARAM);
+ sctp_abort_association(inp, stcb, m, iphlen, sh, op_err,
+ vrf_id, port);
+ if (stcb)
+ *abort_no_unlock = 1;
+ goto outnow;
+ }
+ /* validate parameters */
+ if (init->initiate_tag == 0) {
+ /* protocol error... send abort */
+ op_err = sctp_generate_invmanparam(SCTP_CAUSE_INVALID_PARAM);
+ sctp_abort_association(inp, stcb, m, iphlen, sh, op_err,
+ vrf_id, port);
+ if (stcb)
+ *abort_no_unlock = 1;
+ goto outnow;
+ }
+ if (ntohl(init->a_rwnd) < SCTP_MIN_RWND) {
+ /* invalid parameter... send abort */
+ op_err = sctp_generate_invmanparam(SCTP_CAUSE_INVALID_PARAM);
+ sctp_abort_association(inp, stcb, m, iphlen, sh, op_err,
+ vrf_id, port);
+ if (stcb)
+ *abort_no_unlock = 1;
+ goto outnow;
+ }
+ if (init->num_inbound_streams == 0) {
+ /* protocol error... send abort */
+ op_err = sctp_generate_invmanparam(SCTP_CAUSE_INVALID_PARAM);
+ sctp_abort_association(inp, stcb, m, iphlen, sh, op_err,
+ vrf_id, port);
+ if (stcb)
+ *abort_no_unlock = 1;
+ goto outnow;
+ }
+ if (init->num_outbound_streams == 0) {
+ /* protocol error... send abort */
+ op_err = sctp_generate_invmanparam(SCTP_CAUSE_INVALID_PARAM);
+ sctp_abort_association(inp, stcb, m, iphlen, sh, op_err,
+ vrf_id, port);
+ if (stcb)
+ *abort_no_unlock = 1;
+ goto outnow;
+ }
+ init_limit = offset + ntohs(cp->ch.chunk_length);
+ if (sctp_validate_init_auth_params(m, offset + sizeof(*cp),
+ init_limit)) {
+ /* auth parameter(s) error... send abort */
+ sctp_abort_association(inp, stcb, m, iphlen, sh, NULL, vrf_id, port);
+ if (stcb)
+ *abort_no_unlock = 1;
+ goto outnow;
+ }
+ /* send an INIT-ACK w/cookie */
+ SCTPDBG(SCTP_DEBUG_INPUT3, "sctp_handle_init: sending INIT-ACK\n");
+ sctp_send_initiate_ack(inp, stcb, m, iphlen, offset, sh, cp, vrf_id, port,
+ ((stcb == NULL) ? SCTP_HOLDS_LOCK : SCTP_NOT_LOCKED));
+outnow:
+ if (stcb == NULL) {
+ SCTP_INP_RUNLOCK(inp);
+ }
+}
+
+/*
+ * process peer "INIT/INIT-ACK" chunk returns value < 0 on error
+ */
+
+int
+sctp_is_there_unsent_data(struct sctp_tcb *stcb)
+{
+ int unsent_data = 0;
+ struct sctp_stream_queue_pending *sp;
+ struct sctp_stream_out *strq;
+ struct sctp_association *asoc;
+
+ /*
+ * This function returns the number of streams that have true unsent
+ * data on them. Note that as it looks through it will clean up any
+ * places that have old data that has been sent but left at top of
+ * stream queue.
+ */
+ asoc = &stcb->asoc;
+ SCTP_TCB_SEND_LOCK(stcb);
+ if (!TAILQ_EMPTY(&asoc->out_wheel)) {
+ /* Check to see if some data queued */
+ TAILQ_FOREACH(strq, &asoc->out_wheel, next_spoke) {
+ is_there_another:
+ /* sa_ignore FREED_MEMORY */
+ sp = TAILQ_FIRST(&strq->outqueue);
+ if (sp == NULL) {
+ continue;
+ }
+ if ((sp->msg_is_complete) &&
+ (sp->length == 0) &&
+ (sp->sender_all_done)) {
+ /*
+ * We are doing differed cleanup. Last time
+ * through when we took all the data the
+ * sender_all_done was not set.
+ */
+ if (sp->put_last_out == 0) {
+ SCTP_PRINTF("Gak, put out entire msg with NO end!-1\n");
+ SCTP_PRINTF("sender_done:%d len:%d msg_comp:%d put_last_out:%d\n",
+ sp->sender_all_done,
+ sp->length,
+ sp->msg_is_complete,
+ sp->put_last_out);
+ }
+ atomic_subtract_int(&stcb->asoc.stream_queue_cnt, 1);
+ TAILQ_REMOVE(&strq->outqueue, sp, next);
+ if (sp->net) {
+ sctp_free_remote_addr(sp->net);
+ sp->net = NULL;
+ }
+ if (sp->data) {
+ sctp_m_freem(sp->data);
+ sp->data = NULL;
+ }
+ sctp_free_a_strmoq(stcb, sp);
+ goto is_there_another;
+ } else {
+ unsent_data++;
+ continue;
+ }
+ }
+ }
+ SCTP_TCB_SEND_UNLOCK(stcb);
+ return (unsent_data);
+}
+
+static int
+sctp_process_init(struct sctp_init_chunk *cp, struct sctp_tcb *stcb,
+ struct sctp_nets *net)
+{
+ struct sctp_init *init;
+ struct sctp_association *asoc;
+ struct sctp_nets *lnet;
+ unsigned int i;
+
+ init = &cp->init;
+ asoc = &stcb->asoc;
+ /* save off parameters */
+ asoc->peer_vtag = ntohl(init->initiate_tag);
+ asoc->peers_rwnd = ntohl(init->a_rwnd);
+ if (!TAILQ_EMPTY(&asoc->nets)) {
+ /* update any ssthresh's that may have a default */
+ TAILQ_FOREACH(lnet, &asoc->nets, sctp_next) {
+ lnet->ssthresh = asoc->peers_rwnd;
+
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & (SCTP_CWND_MONITOR_ENABLE | SCTP_CWND_LOGGING_ENABLE)) {
+ sctp_log_cwnd(stcb, lnet, 0, SCTP_CWND_INITIALIZATION);
+ }
+ }
+ }
+ SCTP_TCB_SEND_LOCK(stcb);
+ if (asoc->pre_open_streams > ntohs(init->num_inbound_streams)) {
+ unsigned int newcnt;
+ struct sctp_stream_out *outs;
+ struct sctp_stream_queue_pending *sp;
+ struct sctp_tmit_chunk *chk, *chk_next;
+
+ /* abandon the upper streams */
+ newcnt = ntohs(init->num_inbound_streams);
+ if (!TAILQ_EMPTY(&asoc->send_queue)) {
+ chk = TAILQ_FIRST(&asoc->send_queue);
+ while (chk) {
+ chk_next = TAILQ_NEXT(chk, sctp_next);
+ if (chk->rec.data.stream_number >= newcnt) {
+ TAILQ_REMOVE(&asoc->send_queue, chk, sctp_next);
+ asoc->send_queue_cnt--;
+ if (chk->data != NULL) {
+ sctp_free_bufspace(stcb, asoc, chk, 1);
+ sctp_ulp_notify(SCTP_NOTIFY_DG_FAIL, stcb,
+ SCTP_NOTIFY_DATAGRAM_UNSENT, chk, SCTP_SO_NOT_LOCKED);
+ if (chk->data) {
+ sctp_m_freem(chk->data);
+ chk->data = NULL;
+ }
+ }
+ sctp_free_a_chunk(stcb, chk);
+ /* sa_ignore FREED_MEMORY */
+ }
+ chk = chk_next;
+ }
+ }
+ if (asoc->strmout) {
+ for (i = newcnt; i < asoc->pre_open_streams; i++) {
+ outs = &asoc->strmout[i];
+ sp = TAILQ_FIRST(&outs->outqueue);
+ while (sp) {
+ TAILQ_REMOVE(&outs->outqueue, sp, next);
+ asoc->stream_queue_cnt--;
+ sctp_ulp_notify(SCTP_NOTIFY_SPECIAL_SP_FAIL,
+ stcb, SCTP_NOTIFY_DATAGRAM_UNSENT,
+ sp, SCTP_SO_NOT_LOCKED);
+ if (sp->data) {
+ sctp_m_freem(sp->data);
+ sp->data = NULL;
+ }
+ if (sp->net) {
+ sctp_free_remote_addr(sp->net);
+ sp->net = NULL;
+ }
+ /* Free the chunk */
+ sctp_free_a_strmoq(stcb, sp);
+ /* sa_ignore FREED_MEMORY */
+ sp = TAILQ_FIRST(&outs->outqueue);
+ }
+ }
+ }
+ /* cut back the count */
+ asoc->pre_open_streams = newcnt;
+ }
+ SCTP_TCB_SEND_UNLOCK(stcb);
+ asoc->strm_realoutsize = asoc->streamoutcnt = asoc->pre_open_streams;
+ /* init tsn's */
+ asoc->highest_tsn_inside_map = asoc->asconf_seq_in = ntohl(init->initial_tsn) - 1;
+ /* EY - nr_sack: initialize highest tsn in nr_mapping_array */
+ asoc->highest_tsn_inside_nr_map = asoc->highest_tsn_inside_map;
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
+ sctp_log_map(0, 5, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
+ }
+ /* This is the next one we expect */
+ asoc->str_reset_seq_in = asoc->asconf_seq_in + 1;
+
+ asoc->mapping_array_base_tsn = ntohl(init->initial_tsn);
+ asoc->tsn_last_delivered = asoc->cumulative_tsn = asoc->asconf_seq_in;
+ asoc->last_echo_tsn = asoc->asconf_seq_in;
+ asoc->advanced_peer_ack_point = asoc->last_acked_seq;
+ /* open the requested streams */
+
+ if (asoc->strmin != NULL) {
+ /* Free the old ones */
+ struct sctp_queued_to_read *ctl;
+
+ for (i = 0; i < asoc->streamincnt; i++) {
+ ctl = TAILQ_FIRST(&asoc->strmin[i].inqueue);
+ while (ctl) {
+ TAILQ_REMOVE(&asoc->strmin[i].inqueue, ctl, next);
+ sctp_free_remote_addr(ctl->whoFrom);
+ ctl->whoFrom = NULL;
+ sctp_m_freem(ctl->data);
+ ctl->data = NULL;
+ sctp_free_a_readq(stcb, ctl);
+ ctl = TAILQ_FIRST(&asoc->strmin[i].inqueue);
+ }
+ }
+ SCTP_FREE(asoc->strmin, SCTP_M_STRMI);
+ }
+ asoc->streamincnt = ntohs(init->num_outbound_streams);
+ if (asoc->streamincnt > MAX_SCTP_STREAMS) {
+ asoc->streamincnt = MAX_SCTP_STREAMS;
+ }
+ SCTP_MALLOC(asoc->strmin, struct sctp_stream_in *, asoc->streamincnt *
+ sizeof(struct sctp_stream_in), SCTP_M_STRMI);
+ if (asoc->strmin == NULL) {
+ /* we didn't get memory for the streams! */
+ SCTPDBG(SCTP_DEBUG_INPUT2, "process_init: couldn't get memory for the streams!\n");
+ return (-1);
+ }
+ for (i = 0; i < asoc->streamincnt; i++) {
+ asoc->strmin[i].stream_no = i;
+ asoc->strmin[i].last_sequence_delivered = 0xffff;
+ /*
+ * U-stream ranges will be set when the cookie is unpacked.
+ * Or for the INIT sender they are un set (if pr-sctp not
+ * supported) when the INIT-ACK arrives.
+ */
+ TAILQ_INIT(&asoc->strmin[i].inqueue);
+ asoc->strmin[i].delivery_started = 0;
+ }
+ /*
+ * load_address_from_init will put the addresses into the
+ * association when the COOKIE is processed or the INIT-ACK is
+ * processed. Both types of COOKIE's existing and new call this
+ * routine. It will remove addresses that are no longer in the
+ * association (for the restarting case where addresses are
+ * removed). Up front when the INIT arrives we will discard it if it
+ * is a restart and new addresses have been added.
+ */
+ /* sa_ignore MEMLEAK */
+ return (0);
+}
+
+/*
+ * INIT-ACK message processing/consumption returns value < 0 on error
+ */
+static int
+sctp_process_init_ack(struct mbuf *m, int iphlen, int offset,
+ struct sctphdr *sh, struct sctp_init_ack_chunk *cp, struct sctp_tcb *stcb,
+ struct sctp_nets *net, int *abort_no_unlock, uint32_t vrf_id)
+{
+ struct sctp_association *asoc;
+ struct mbuf *op_err;
+ int retval, abort_flag;
+ uint32_t initack_limit;
+ int nat_friendly = 0;
+
+ /* First verify that we have no illegal param's */
+ abort_flag = 0;
+ op_err = NULL;
+
+ op_err = sctp_arethere_unrecognized_parameters(m,
+ (offset + sizeof(struct sctp_init_chunk)),
+ &abort_flag, (struct sctp_chunkhdr *)cp, &nat_friendly);
+ if (abort_flag) {
+ /* Send an abort and notify peer */
+ sctp_abort_an_association(stcb->sctp_ep, stcb, SCTP_CAUSE_PROTOCOL_VIOLATION, op_err, SCTP_SO_NOT_LOCKED);
+ *abort_no_unlock = 1;
+ return (-1);
+ }
+ asoc = &stcb->asoc;
+ asoc->peer_supports_nat = (uint8_t) nat_friendly;
+ /* process the peer's parameters in the INIT-ACK */
+ retval = sctp_process_init((struct sctp_init_chunk *)cp, stcb, net);
+ if (retval < 0) {
+ return (retval);
+ }
+ initack_limit = offset + ntohs(cp->ch.chunk_length);
+ /* load all addresses */
+ if ((retval = sctp_load_addresses_from_init(stcb, m, iphlen,
+ (offset + sizeof(struct sctp_init_chunk)), initack_limit, sh,
+ NULL))) {
+ /* Huh, we should abort */
+ SCTPDBG(SCTP_DEBUG_INPUT1,
+ "Load addresses from INIT causes an abort %d\n",
+ retval);
+ sctp_abort_association(stcb->sctp_ep, stcb, m, iphlen, sh,
+ NULL, 0, net->port);
+ *abort_no_unlock = 1;
+ return (-1);
+ }
+ /* if the peer doesn't support asconf, flush the asconf queue */
+ if (asoc->peer_supports_asconf == 0) {
+ struct sctp_asconf_addr *aparam;
+
+ while (!TAILQ_EMPTY(&asoc->asconf_queue)) {
+ /* sa_ignore FREED_MEMORY */
+ aparam = TAILQ_FIRST(&asoc->asconf_queue);
+ TAILQ_REMOVE(&asoc->asconf_queue, aparam, next);
+ SCTP_FREE(aparam, SCTP_M_ASC_ADDR);
+ }
+ }
+ stcb->asoc.peer_hmac_id = sctp_negotiate_hmacid(stcb->asoc.peer_hmacs,
+ stcb->asoc.local_hmacs);
+ if (op_err) {
+ sctp_queue_op_err(stcb, op_err);
+ /* queuing will steal away the mbuf chain to the out queue */
+ op_err = NULL;
+ }
+ /* extract the cookie and queue it to "echo" it back... */
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
+ sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
+ stcb->asoc.overall_error_count,
+ 0,
+ SCTP_FROM_SCTP_INPUT,
+ __LINE__);
+ }
+ stcb->asoc.overall_error_count = 0;
+ net->error_count = 0;
+
+ /*
+ * Cancel the INIT timer, We do this first before queueing the
+ * cookie. We always cancel at the primary to assue that we are
+ * canceling the timer started by the INIT which always goes to the
+ * primary.
+ */
+ sctp_timer_stop(SCTP_TIMER_TYPE_INIT, stcb->sctp_ep, stcb,
+ asoc->primary_destination, SCTP_FROM_SCTP_INPUT + SCTP_LOC_4);
+
+ /* calculate the RTO */
+ net->RTO = sctp_calculate_rto(stcb, asoc, net, &asoc->time_entered, sctp_align_safe_nocopy);
+
+ retval = sctp_send_cookie_echo(m, offset, stcb, net);
+ if (retval < 0) {
+ /*
+ * No cookie, we probably should send a op error. But in any
+ * case if there is no cookie in the INIT-ACK, we can
+ * abandon the peer, its broke.
+ */
+ if (retval == -3) {
+ /* We abort with an error of missing mandatory param */
+ op_err =
+ sctp_generate_invmanparam(SCTP_CAUSE_MISSING_PARAM);
+ if (op_err) {
+ /*
+ * Expand beyond to include the mandatory
+ * param cookie
+ */
+ struct sctp_inv_mandatory_param *mp;
+
+ SCTP_BUF_LEN(op_err) =
+ sizeof(struct sctp_inv_mandatory_param);
+ mp = mtod(op_err,
+ struct sctp_inv_mandatory_param *);
+ /* Subtract the reserved param */
+ mp->length =
+ htons(sizeof(struct sctp_inv_mandatory_param) - 2);
+ mp->num_param = htonl(1);
+ mp->param = htons(SCTP_STATE_COOKIE);
+ mp->resv = 0;
+ }
+ sctp_abort_association(stcb->sctp_ep, stcb, m, iphlen,
+ sh, op_err, 0, net->port);
+ *abort_no_unlock = 1;
+ }
+ return (retval);
+ }
+ return (0);
+}
+
+static void
+sctp_handle_heartbeat_ack(struct sctp_heartbeat_chunk *cp,
+ struct sctp_tcb *stcb, struct sctp_nets *net)
+{
+ struct sockaddr_storage store;
+ struct sockaddr_in *sin;
+ struct sockaddr_in6 *sin6;
+ struct sctp_nets *r_net, *f_net;
+ struct timeval tv;
+ int req_prim = 0;
+
+ if (ntohs(cp->ch.chunk_length) != sizeof(struct sctp_heartbeat_chunk)) {
+ /* Invalid length */
+ return;
+ }
+ sin = (struct sockaddr_in *)&store;
+ sin6 = (struct sockaddr_in6 *)&store;
+
+ memset(&store, 0, sizeof(store));
+ if (cp->heartbeat.hb_info.addr_family == AF_INET &&
+ cp->heartbeat.hb_info.addr_len == sizeof(struct sockaddr_in)) {
+ sin->sin_family = cp->heartbeat.hb_info.addr_family;
+ sin->sin_len = cp->heartbeat.hb_info.addr_len;
+ sin->sin_port = stcb->rport;
+ memcpy(&sin->sin_addr, cp->heartbeat.hb_info.address,
+ sizeof(sin->sin_addr));
+ } else if (cp->heartbeat.hb_info.addr_family == AF_INET6 &&
+ cp->heartbeat.hb_info.addr_len == sizeof(struct sockaddr_in6)) {
+ sin6->sin6_family = cp->heartbeat.hb_info.addr_family;
+ sin6->sin6_len = cp->heartbeat.hb_info.addr_len;
+ sin6->sin6_port = stcb->rport;
+ memcpy(&sin6->sin6_addr, cp->heartbeat.hb_info.address,
+ sizeof(sin6->sin6_addr));
+ } else {
+ return;
+ }
+ r_net = sctp_findnet(stcb, (struct sockaddr *)sin);
+ if (r_net == NULL) {
+ SCTPDBG(SCTP_DEBUG_INPUT1, "Huh? I can't find the address I sent it to, discard\n");
+ return;
+ }
+ if ((r_net && (r_net->dest_state & SCTP_ADDR_UNCONFIRMED)) &&
+ (r_net->heartbeat_random1 == cp->heartbeat.hb_info.random_value1) &&
+ (r_net->heartbeat_random2 == cp->heartbeat.hb_info.random_value2)) {
+ /*
+ * If the its a HB and it's random value is correct when can
+ * confirm the destination.
+ */
+ r_net->dest_state &= ~SCTP_ADDR_UNCONFIRMED;
+ if (r_net->dest_state & SCTP_ADDR_REQ_PRIMARY) {
+ stcb->asoc.primary_destination = r_net;
+ r_net->dest_state &= ~SCTP_ADDR_WAS_PRIMARY;
+ r_net->dest_state &= ~SCTP_ADDR_REQ_PRIMARY;
+ f_net = TAILQ_FIRST(&stcb->asoc.nets);
+ if (f_net != r_net) {
+ /*
+ * first one on the list is NOT the primary
+ * sctp_cmpaddr() is much more efficent if
+ * the primary is the first on the list,
+ * make it so.
+ */
+ TAILQ_REMOVE(&stcb->asoc.nets, r_net, sctp_next);
+ TAILQ_INSERT_HEAD(&stcb->asoc.nets, r_net, sctp_next);
+ }
+ req_prim = 1;
+ }
+ sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_CONFIRMED,
+ stcb, 0, (void *)r_net, SCTP_SO_NOT_LOCKED);
+ }
+ r_net->error_count = 0;
+ r_net->hb_responded = 1;
+ tv.tv_sec = cp->heartbeat.hb_info.time_value_1;
+ tv.tv_usec = cp->heartbeat.hb_info.time_value_2;
+ if (r_net->dest_state & SCTP_ADDR_NOT_REACHABLE) {
+ r_net->dest_state &= ~SCTP_ADDR_NOT_REACHABLE;
+ r_net->dest_state |= SCTP_ADDR_REACHABLE;
+ sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_UP, stcb,
+ SCTP_HEARTBEAT_SUCCESS, (void *)r_net, SCTP_SO_NOT_LOCKED);
+ /* now was it the primary? if so restore */
+ if (r_net->dest_state & SCTP_ADDR_WAS_PRIMARY) {
+ (void)sctp_set_primary_addr(stcb, (struct sockaddr *)NULL, r_net);
+ }
+ }
+ /*
+ * JRS 5/14/07 - If CMT PF is on and the destination is in PF state,
+ * set the destination to active state and set the cwnd to one or
+ * two MTU's based on whether PF1 or PF2 is being used. If a T3
+ * timer is running, for the destination, stop the timer because a
+ * PF-heartbeat was received.
+ */
+ if ((stcb->asoc.sctp_cmt_on_off == 1) &&
+ (stcb->asoc.sctp_cmt_pf > 0) &&
+ ((net->dest_state & SCTP_ADDR_PF) == SCTP_ADDR_PF)) {
+ if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
+ sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
+ stcb, net,
+ SCTP_FROM_SCTP_INPUT + SCTP_LOC_5);
+ }
+ net->dest_state &= ~SCTP_ADDR_PF;
+ net->cwnd = net->mtu * stcb->asoc.sctp_cmt_pf;
+ SCTPDBG(SCTP_DEBUG_INPUT1, "Destination %p moved from PF to reachable with cwnd %d.\n",
+ net, net->cwnd);
+ }
+ /* Now lets do a RTO with this */
+ r_net->RTO = sctp_calculate_rto(stcb, &stcb->asoc, r_net, &tv, sctp_align_safe_nocopy);
+ /* Mobility adaptation */
+ if (req_prim) {
+ if ((sctp_is_mobility_feature_on(stcb->sctp_ep,
+ SCTP_MOBILITY_BASE) ||
+ sctp_is_mobility_feature_on(stcb->sctp_ep,
+ SCTP_MOBILITY_FASTHANDOFF)) &&
+ sctp_is_mobility_feature_on(stcb->sctp_ep,
+ SCTP_MOBILITY_PRIM_DELETED)) {
+
+ sctp_timer_stop(SCTP_TIMER_TYPE_PRIM_DELETED, stcb->sctp_ep, stcb, NULL, SCTP_FROM_SCTP_TIMER + SCTP_LOC_7);
+ if (sctp_is_mobility_feature_on(stcb->sctp_ep,
+ SCTP_MOBILITY_FASTHANDOFF)) {
+ sctp_assoc_immediate_retrans(stcb,
+ stcb->asoc.primary_destination);
+ }
+ if (sctp_is_mobility_feature_on(stcb->sctp_ep,
+ SCTP_MOBILITY_BASE)) {
+ sctp_move_chunks_from_net(stcb,
+ stcb->asoc.deleted_primary);
+ }
+ sctp_delete_prim_timer(stcb->sctp_ep, stcb,
+ stcb->asoc.deleted_primary);
+ }
+ }
+}
+
+static int
+sctp_handle_nat_colliding_state(struct sctp_tcb *stcb)
+{
+ /*
+ * return 0 means we want you to proceed with the abort non-zero
+ * means no abort processing
+ */
+ struct sctpasochead *head;
+
+ if (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_COOKIE_WAIT) {
+ /* generate a new vtag and send init */
+ LIST_REMOVE(stcb, sctp_asocs);
+ stcb->asoc.my_vtag = sctp_select_a_tag(stcb->sctp_ep, stcb->sctp_ep->sctp_lport, stcb->rport, 1);
+ head = &SCTP_BASE_INFO(sctp_asochash)[SCTP_PCBHASH_ASOC(stcb->asoc.my_vtag, SCTP_BASE_INFO(hashasocmark))];
+ /*
+ * put it in the bucket in the vtag hash of assoc's for the
+ * system
+ */
+ LIST_INSERT_HEAD(head, stcb, sctp_asocs);
+ sctp_send_initiate(stcb->sctp_ep, stcb, SCTP_SO_NOT_LOCKED);
+ return (1);
+ }
+ if (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_COOKIE_ECHOED) {
+ /*
+ * treat like a case where the cookie expired i.e.: - dump
+ * current cookie. - generate a new vtag. - resend init.
+ */
+ /* generate a new vtag and send init */
+ LIST_REMOVE(stcb, sctp_asocs);
+ stcb->asoc.state &= ~SCTP_STATE_COOKIE_ECHOED;
+ stcb->asoc.state |= SCTP_STATE_COOKIE_WAIT;
+ sctp_stop_all_cookie_timers(stcb);
+ sctp_toss_old_cookies(stcb, &stcb->asoc);
+ stcb->asoc.my_vtag = sctp_select_a_tag(stcb->sctp_ep, stcb->sctp_ep->sctp_lport, stcb->rport, 1);
+ head = &SCTP_BASE_INFO(sctp_asochash)[SCTP_PCBHASH_ASOC(stcb->asoc.my_vtag, SCTP_BASE_INFO(hashasocmark))];
+ /*
+ * put it in the bucket in the vtag hash of assoc's for the
+ * system
+ */
+ LIST_INSERT_HEAD(head, stcb, sctp_asocs);
+ sctp_send_initiate(stcb->sctp_ep, stcb, SCTP_SO_NOT_LOCKED);
+ return (1);
+ }
+ return (0);
+}
+
+static int
+sctp_handle_nat_missing_state(struct sctp_tcb *stcb,
+ struct sctp_nets *net)
+{
+ /*
+ * return 0 means we want you to proceed with the abort non-zero
+ * means no abort processing
+ */
+ if (stcb->asoc.peer_supports_auth == 0) {
+ SCTPDBG(SCTP_DEBUG_INPUT2, "sctp_handle_nat_missing_state: Peer does not support AUTH, cannot send an asconf\n");
+ return (0);
+ }
+ sctp_asconf_send_nat_state_update(stcb, net);
+ return (1);
+}
+
+
+static void
+sctp_handle_abort(struct sctp_abort_chunk *cp,
+ struct sctp_tcb *stcb, struct sctp_nets *net)
+{
+#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
+ struct socket *so;
+
+#endif
+ uint16_t len;
+
+ SCTPDBG(SCTP_DEBUG_INPUT2, "sctp_handle_abort: handling ABORT\n");
+ if (stcb == NULL)
+ return;
+
+ len = ntohs(cp->ch.chunk_length);
+ if (len > sizeof(struct sctp_chunkhdr)) {
+ /*
+ * Need to check the cause codes for our two magic nat
+ * aborts which don't kill the assoc necessarily.
+ */
+ struct sctp_abort_chunk *cpnext;
+ struct sctp_missing_nat_state *natc;
+ uint16_t cause;
+
+ cpnext = cp;
+ cpnext++;
+ natc = (struct sctp_missing_nat_state *)cpnext;
+ cause = ntohs(natc->cause);
+ if (cause == SCTP_CAUSE_NAT_COLLIDING_STATE) {
+ SCTPDBG(SCTP_DEBUG_INPUT2, "Received Colliding state abort flags:%x\n",
+ cp->ch.chunk_flags);
+ if (sctp_handle_nat_colliding_state(stcb)) {
+ return;
+ }
+ } else if (cause == SCTP_CAUSE_NAT_MISSING_STATE) {
+ SCTPDBG(SCTP_DEBUG_INPUT2, "Received missing state abort flags:%x\n",
+ cp->ch.chunk_flags);
+ if (sctp_handle_nat_missing_state(stcb, net)) {
+ return;
+ }
+ }
+ }
+ /* stop any receive timers */
+ sctp_timer_stop(SCTP_TIMER_TYPE_RECV, stcb->sctp_ep, stcb, net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_6);
+ /* notify user of the abort and clean up... */
+ sctp_abort_notification(stcb, 0, SCTP_SO_NOT_LOCKED);
+ /* free the tcb */
+#if defined(SCTP_PANIC_ON_ABORT)
+ printf("stcb:%p state:%d rport:%d net:%p\n",
+ stcb, stcb->asoc.state, stcb->rport, net);
+ if (!(stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) {
+ panic("Received an ABORT");
+ } else {
+ printf("No panic its in state %x closed\n", stcb->asoc.state);
+ }
+#endif
+ SCTP_STAT_INCR_COUNTER32(sctps_aborted);
+ if ((SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_OPEN) ||
+ (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
+ SCTP_STAT_DECR_GAUGE32(sctps_currestab);
+ }
+#ifdef SCTP_ASOCLOG_OF_TSNS
+ sctp_print_out_track_log(stcb);
+#endif
+#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
+ so = SCTP_INP_SO(stcb->sctp_ep);
+ atomic_add_int(&stcb->asoc.refcnt, 1);
+ SCTP_TCB_UNLOCK(stcb);
+ SCTP_SOCKET_LOCK(so, 1);
+ SCTP_TCB_LOCK(stcb);
+ atomic_subtract_int(&stcb->asoc.refcnt, 1);
+#endif
+ stcb->asoc.state |= SCTP_STATE_WAS_ABORTED;
+ (void)sctp_free_assoc(stcb->sctp_ep, stcb, SCTP_NORMAL_PROC,
+ SCTP_FROM_SCTP_INPUT + SCTP_LOC_6);
+#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
+ SCTP_SOCKET_UNLOCK(so, 1);
+#endif
+ SCTPDBG(SCTP_DEBUG_INPUT2, "sctp_handle_abort: finished\n");
+}
+
+static void
+sctp_handle_shutdown(struct sctp_shutdown_chunk *cp,
+ struct sctp_tcb *stcb, struct sctp_nets *net, int *abort_flag)
+{
+ struct sctp_association *asoc;
+ int some_on_streamwheel;
+
+#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
+ struct socket *so;
+
+#endif
+
+ SCTPDBG(SCTP_DEBUG_INPUT2,
+ "sctp_handle_shutdown: handling SHUTDOWN\n");
+ if (stcb == NULL)
+ return;
+ asoc = &stcb->asoc;
+ if ((SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_WAIT) ||
+ (SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_ECHOED)) {
+ return;
+ }
+ if (ntohs(cp->ch.chunk_length) != sizeof(struct sctp_shutdown_chunk)) {
+ /* Shutdown NOT the expected size */
+ return;
+ } else {
+ sctp_update_acked(stcb, cp, net, abort_flag);
+ if (*abort_flag) {
+ return;
+ }
+ }
+ if (asoc->control_pdapi) {
+ /*
+ * With a normal shutdown we assume the end of last record.
+ */
+ SCTP_INP_READ_LOCK(stcb->sctp_ep);
+ asoc->control_pdapi->end_added = 1;
+ asoc->control_pdapi->pdapi_aborted = 1;
+ asoc->control_pdapi = NULL;
+ SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
+#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
+ so = SCTP_INP_SO(stcb->sctp_ep);
+ atomic_add_int(&stcb->asoc.refcnt, 1);
+ SCTP_TCB_UNLOCK(stcb);
+ SCTP_SOCKET_LOCK(so, 1);
+ SCTP_TCB_LOCK(stcb);
+ atomic_subtract_int(&stcb->asoc.refcnt, 1);
+ if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
+ /* assoc was freed while we were unlocked */
+ SCTP_SOCKET_UNLOCK(so, 1);
+ return;
+ }
+#endif
+ sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
+#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
+ SCTP_SOCKET_UNLOCK(so, 1);
+#endif
+ }
+ /* goto SHUTDOWN_RECEIVED state to block new requests */
+ if (stcb->sctp_socket) {
+ if ((SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_RECEIVED) &&
+ (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_ACK_SENT) &&
+ (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_SENT)) {
+ SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_RECEIVED);
+ SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
+ /*
+ * notify upper layer that peer has initiated a
+ * shutdown
+ */
+ sctp_ulp_notify(SCTP_NOTIFY_PEER_SHUTDOWN, stcb, 0, NULL, SCTP_SO_NOT_LOCKED);
+
+ /* reset time */
+ (void)SCTP_GETTIME_TIMEVAL(&asoc->time_entered);
+ }
+ }
+ if (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_SENT) {
+ /*
+ * stop the shutdown timer, since we WILL move to
+ * SHUTDOWN-ACK-SENT.
+ */
+ sctp_timer_stop(SCTP_TIMER_TYPE_SHUTDOWN, stcb->sctp_ep, stcb, net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_8);
+ }
+ /* Now is there unsent data on a stream somewhere? */
+ some_on_streamwheel = sctp_is_there_unsent_data(stcb);
+
+ if (!TAILQ_EMPTY(&asoc->send_queue) ||
+ !TAILQ_EMPTY(&asoc->sent_queue) ||
+ some_on_streamwheel) {
+ /* By returning we will push more data out */
+ return;
+ } else {
+ /* no outstanding data to send, so move on... */
+ /* send SHUTDOWN-ACK */
+ sctp_send_shutdown_ack(stcb, stcb->asoc.primary_destination);
+ /* move to SHUTDOWN-ACK-SENT state */
+ if ((SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) ||
+ (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
+ SCTP_STAT_DECR_GAUGE32(sctps_currestab);
+ }
+ SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_ACK_SENT);
+ SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
+ sctp_stop_timers_for_shutdown(stcb);
+ sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK, stcb->sctp_ep,
+ stcb, net);
+ }
+}
+
+static void
+sctp_handle_shutdown_ack(struct sctp_shutdown_ack_chunk *cp,
+ struct sctp_tcb *stcb,
+ struct sctp_nets *net)
+{
+ struct sctp_association *asoc;
+
+#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
+ struct socket *so;
+
+ so = SCTP_INP_SO(stcb->sctp_ep);
+#endif
+ SCTPDBG(SCTP_DEBUG_INPUT2,
+ "sctp_handle_shutdown_ack: handling SHUTDOWN ACK\n");
+ if (stcb == NULL)
+ return;
+
+ asoc = &stcb->asoc;
+ /* process according to association state */
+ if ((SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_WAIT) ||
+ (SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_ECHOED)) {
+ /* unexpected SHUTDOWN-ACK... do OOTB handling... */
+ sctp_send_shutdown_complete(stcb, net, 1);
+ SCTP_TCB_UNLOCK(stcb);
+ return;
+ }
+ if ((SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_SENT) &&
+ (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_ACK_SENT)) {
+ /* unexpected SHUTDOWN-ACK... so ignore... */
+ SCTP_TCB_UNLOCK(stcb);
+ return;
+ }
+ if (asoc->control_pdapi) {
+ /*
+ * With a normal shutdown we assume the end of last record.
+ */
+ SCTP_INP_READ_LOCK(stcb->sctp_ep);
+ asoc->control_pdapi->end_added = 1;
+ asoc->control_pdapi->pdapi_aborted = 1;
+ asoc->control_pdapi = NULL;
+ SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
+#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
+ atomic_add_int(&stcb->asoc.refcnt, 1);
+ SCTP_TCB_UNLOCK(stcb);
+ SCTP_SOCKET_LOCK(so, 1);
+ SCTP_TCB_LOCK(stcb);
+ atomic_subtract_int(&stcb->asoc.refcnt, 1);
+ if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
+ /* assoc was freed while we were unlocked */
+ SCTP_SOCKET_UNLOCK(so, 1);
+ return;
+ }
+#endif
+ sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
+#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
+ SCTP_SOCKET_UNLOCK(so, 1);
+#endif
+ }
+ /* are the queues empty? */
+ if (!TAILQ_EMPTY(&asoc->send_queue) ||
+ !TAILQ_EMPTY(&asoc->sent_queue) ||
+ !TAILQ_EMPTY(&asoc->out_wheel)) {
+ sctp_report_all_outbound(stcb, 0, SCTP_SO_NOT_LOCKED);
+ }
+ /* stop the timer */
+ sctp_timer_stop(SCTP_TIMER_TYPE_SHUTDOWN, stcb->sctp_ep, stcb, net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_9);
+ /* send SHUTDOWN-COMPLETE */
+ sctp_send_shutdown_complete(stcb, net, 0);
+ /* notify upper layer protocol */
+ if (stcb->sctp_socket) {
+ sctp_ulp_notify(SCTP_NOTIFY_ASSOC_DOWN, stcb, 0, NULL, SCTP_SO_NOT_LOCKED);
+ if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
+ (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
+ /* Set the connected flag to disconnected */
+ stcb->sctp_ep->sctp_socket->so_snd.sb_cc = 0;
+ }
+ }
+ SCTP_STAT_INCR_COUNTER32(sctps_shutdown);
+ /* free the TCB but first save off the ep */
+#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
+ atomic_add_int(&stcb->asoc.refcnt, 1);
+ SCTP_TCB_UNLOCK(stcb);
+ SCTP_SOCKET_LOCK(so, 1);
+ SCTP_TCB_LOCK(stcb);
+ atomic_subtract_int(&stcb->asoc.refcnt, 1);
+#endif
+ (void)sctp_free_assoc(stcb->sctp_ep, stcb, SCTP_NORMAL_PROC,
+ SCTP_FROM_SCTP_INPUT + SCTP_LOC_10);
+#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
+ SCTP_SOCKET_UNLOCK(so, 1);
+#endif
+}
+
+/*
+ * Skip past the param header and then we will find the chunk that caused the
+ * problem. There are two possiblities ASCONF or FWD-TSN other than that and
+ * our peer must be broken.
+ */
+static void
+sctp_process_unrecog_chunk(struct sctp_tcb *stcb, struct sctp_paramhdr *phdr,
+ struct sctp_nets *net)
+{
+ struct sctp_chunkhdr *chk;
+
+ chk = (struct sctp_chunkhdr *)((caddr_t)phdr + sizeof(*phdr));
+ switch (chk->chunk_type) {
+ case SCTP_ASCONF_ACK:
+ case SCTP_ASCONF:
+ sctp_asconf_cleanup(stcb, net);
+ break;
+ case SCTP_FORWARD_CUM_TSN:
+ stcb->asoc.peer_supports_prsctp = 0;
+ break;
+ default:
+ SCTPDBG(SCTP_DEBUG_INPUT2,
+ "Peer does not support chunk type %d(%x)??\n",
+ chk->chunk_type, (uint32_t) chk->chunk_type);
+ break;
+ }
+}
+
+/*
+ * Skip past the param header and then we will find the param that caused the
+ * problem. There are a number of param's in a ASCONF OR the prsctp param
+ * these will turn of specific features.
+ */
+static void
+sctp_process_unrecog_param(struct sctp_tcb *stcb, struct sctp_paramhdr *phdr)
+{
+ struct sctp_paramhdr *pbad;
+
+ pbad = phdr + 1;
+ switch (ntohs(pbad->param_type)) {
+ /* pr-sctp draft */
+ case SCTP_PRSCTP_SUPPORTED:
+ stcb->asoc.peer_supports_prsctp = 0;
+ break;
+ case SCTP_SUPPORTED_CHUNK_EXT:
+ break;
+ /* draft-ietf-tsvwg-addip-sctp */
+ case SCTP_HAS_NAT_SUPPORT:
+ stcb->asoc.peer_supports_nat = 0;
+ break;
+ case SCTP_ECN_NONCE_SUPPORTED:
+ stcb->asoc.peer_supports_ecn_nonce = 0;
+ stcb->asoc.ecn_nonce_allowed = 0;
+ stcb->asoc.ecn_allowed = 0;
+ break;
+ case SCTP_ADD_IP_ADDRESS:
+ case SCTP_DEL_IP_ADDRESS:
+ case SCTP_SET_PRIM_ADDR:
+ stcb->asoc.peer_supports_asconf = 0;
+ break;
+ case SCTP_SUCCESS_REPORT:
+ case SCTP_ERROR_CAUSE_IND:
+ SCTPDBG(SCTP_DEBUG_INPUT2, "Huh, the peer does not support success? or error cause?\n");
+ SCTPDBG(SCTP_DEBUG_INPUT2,
+ "Turning off ASCONF to this strange peer\n");
+ stcb->asoc.peer_supports_asconf = 0;
+ break;
+ default:
+ SCTPDBG(SCTP_DEBUG_INPUT2,
+ "Peer does not support param type %d(%x)??\n",
+ pbad->param_type, (uint32_t) pbad->param_type);
+ break;
+ }
+}
+
+static int
+sctp_handle_error(struct sctp_chunkhdr *ch,
+ struct sctp_tcb *stcb, struct sctp_nets *net)
+{
+ int chklen;
+ struct sctp_paramhdr *phdr;
+ uint16_t error_type;
+ uint16_t error_len;
+ struct sctp_association *asoc;
+ int adjust;
+
+#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
+ struct socket *so;
+
+#endif
+
+ /* parse through all of the errors and process */
+ asoc = &stcb->asoc;
+ phdr = (struct sctp_paramhdr *)((caddr_t)ch +
+ sizeof(struct sctp_chunkhdr));
+ chklen = ntohs(ch->chunk_length) - sizeof(struct sctp_chunkhdr);
+ while ((size_t)chklen >= sizeof(struct sctp_paramhdr)) {
+ /* Process an Error Cause */
+ error_type = ntohs(phdr->param_type);
+ error_len = ntohs(phdr->param_length);
+ if ((error_len > chklen) || (error_len == 0)) {
+ /* invalid param length for this param */
+ SCTPDBG(SCTP_DEBUG_INPUT1, "Bogus length in error param- chunk left:%d errorlen:%d\n",
+ chklen, error_len);
+ return (0);
+ }
+ switch (error_type) {
+ case SCTP_CAUSE_INVALID_STREAM:
+ case SCTP_CAUSE_MISSING_PARAM:
+ case SCTP_CAUSE_INVALID_PARAM:
+ case SCTP_CAUSE_NO_USER_DATA:
+ SCTPDBG(SCTP_DEBUG_INPUT1, "Software error we got a %d back? We have a bug :/ (or do they?)\n",
+ error_type);
+ break;
+ case SCTP_CAUSE_NAT_COLLIDING_STATE:
+ SCTPDBG(SCTP_DEBUG_INPUT2, "Received Colliding state abort flags:%x\n",
+ ch->chunk_flags);
+ if (sctp_handle_nat_colliding_state(stcb)) {
+ return (0);
+ }
+ break;
+ case SCTP_CAUSE_NAT_MISSING_STATE:
+ SCTPDBG(SCTP_DEBUG_INPUT2, "Received missing state abort flags:%x\n",
+ ch->chunk_flags);
+ if (sctp_handle_nat_missing_state(stcb, net)) {
+ return (0);
+ }
+ break;
+ case SCTP_CAUSE_STALE_COOKIE:
+ /*
+ * We only act if we have echoed a cookie and are
+ * waiting.
+ */
+ if (SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_ECHOED) {
+ int *p;
+
+ p = (int *)((caddr_t)phdr + sizeof(*phdr));
+ /* Save the time doubled */
+ asoc->cookie_preserve_req = ntohl(*p) << 1;
+ asoc->stale_cookie_count++;
+ if (asoc->stale_cookie_count >
+ asoc->max_init_times) {
+ sctp_abort_notification(stcb, 0, SCTP_SO_NOT_LOCKED);
+ /* now free the asoc */
+#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
+ so = SCTP_INP_SO(stcb->sctp_ep);
+ atomic_add_int(&stcb->asoc.refcnt, 1);
+ SCTP_TCB_UNLOCK(stcb);
+ SCTP_SOCKET_LOCK(so, 1);
+ SCTP_TCB_LOCK(stcb);
+ atomic_subtract_int(&stcb->asoc.refcnt, 1);
+#endif
+ (void)sctp_free_assoc(stcb->sctp_ep, stcb, SCTP_NORMAL_PROC,
+ SCTP_FROM_SCTP_INPUT + SCTP_LOC_11);
+#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
+ SCTP_SOCKET_UNLOCK(so, 1);
+#endif
+ return (-1);
+ }
+ /* blast back to INIT state */
+ sctp_toss_old_cookies(stcb, &stcb->asoc);
+ asoc->state &= ~SCTP_STATE_COOKIE_ECHOED;
+ asoc->state |= SCTP_STATE_COOKIE_WAIT;
+ sctp_stop_all_cookie_timers(stcb);
+ sctp_send_initiate(stcb->sctp_ep, stcb, SCTP_SO_NOT_LOCKED);
+ }
+ break;
+ case SCTP_CAUSE_UNRESOLVABLE_ADDR:
+ /*
+ * Nothing we can do here, we don't do hostname
+ * addresses so if the peer does not like my IPv6
+ * (or IPv4 for that matter) it does not matter. If
+ * they don't support that type of address, they can
+ * NOT possibly get that packet type... i.e. with no
+ * IPv6 you can't recieve a IPv6 packet. so we can
+ * safely ignore this one. If we ever added support
+ * for HOSTNAME Addresses, then we would need to do
+ * something here.
+ */
+ break;
+ case SCTP_CAUSE_UNRECOG_CHUNK:
+ sctp_process_unrecog_chunk(stcb, phdr, net);
+ break;
+ case SCTP_CAUSE_UNRECOG_PARAM:
+ sctp_process_unrecog_param(stcb, phdr);
+ break;
+ case SCTP_CAUSE_COOKIE_IN_SHUTDOWN:
+ /*
+ * We ignore this since the timer will drive out a
+ * new cookie anyway and there timer will drive us
+ * to send a SHUTDOWN_COMPLETE. We can't send one
+ * here since we don't have their tag.
+ */
+ break;
+ case SCTP_CAUSE_DELETING_LAST_ADDR:
+ case SCTP_CAUSE_RESOURCE_SHORTAGE:
+ case SCTP_CAUSE_DELETING_SRC_ADDR:
+ /*
+ * We should NOT get these here, but in a
+ * ASCONF-ACK.
+ */
+ SCTPDBG(SCTP_DEBUG_INPUT2, "Peer sends ASCONF errors in a Operational Error?<%d>?\n",
+ error_type);
+ break;
+ case SCTP_CAUSE_OUT_OF_RESC:
+ /*
+ * And what, pray tell do we do with the fact that
+ * the peer is out of resources? Not really sure we
+ * could do anything but abort. I suspect this
+ * should have came WITH an abort instead of in a
+ * OP-ERROR.
+ */
+ break;
+ default:
+ SCTPDBG(SCTP_DEBUG_INPUT1, "sctp_handle_error: unknown error type = 0x%xh\n",
+ error_type);
+ break;
+ }
+ adjust = SCTP_SIZE32(error_len);
+ chklen -= adjust;
+ phdr = (struct sctp_paramhdr *)((caddr_t)phdr + adjust);
+ }
+ return (0);
+}
+
+static int
+sctp_handle_init_ack(struct mbuf *m, int iphlen, int offset,
+ struct sctphdr *sh, struct sctp_init_ack_chunk *cp, struct sctp_tcb *stcb,
+ struct sctp_nets *net, int *abort_no_unlock, uint32_t vrf_id)
+{
+ struct sctp_init_ack *init_ack;
+ struct mbuf *op_err;
+
+ SCTPDBG(SCTP_DEBUG_INPUT2,
+ "sctp_handle_init_ack: handling INIT-ACK\n");
+
+ if (stcb == NULL) {
+ SCTPDBG(SCTP_DEBUG_INPUT2,
+ "sctp_handle_init_ack: TCB is null\n");
+ return (-1);
+ }
+ if (ntohs(cp->ch.chunk_length) < sizeof(struct sctp_init_ack_chunk)) {
+ /* Invalid length */
+ op_err = sctp_generate_invmanparam(SCTP_CAUSE_INVALID_PARAM);
+ sctp_abort_association(stcb->sctp_ep, stcb, m, iphlen, sh,
+ op_err, 0, net->port);
+ *abort_no_unlock = 1;
+ return (-1);
+ }
+ init_ack = &cp->init;
+ /* validate parameters */
+ if (init_ack->initiate_tag == 0) {
+ /* protocol error... send an abort */
+ op_err = sctp_generate_invmanparam(SCTP_CAUSE_INVALID_PARAM);
+ sctp_abort_association(stcb->sctp_ep, stcb, m, iphlen, sh,
+ op_err, 0, net->port);
+ *abort_no_unlock = 1;
+ return (-1);
+ }
+ if (ntohl(init_ack->a_rwnd) < SCTP_MIN_RWND) {
+ /* protocol error... send an abort */
+ op_err = sctp_generate_invmanparam(SCTP_CAUSE_INVALID_PARAM);
+ sctp_abort_association(stcb->sctp_ep, stcb, m, iphlen, sh,
+ op_err, 0, net->port);
+ *abort_no_unlock = 1;
+ return (-1);
+ }
+ if (init_ack->num_inbound_streams == 0) {
+ /* protocol error... send an abort */
+ op_err = sctp_generate_invmanparam(SCTP_CAUSE_INVALID_PARAM);
+ sctp_abort_association(stcb->sctp_ep, stcb, m, iphlen, sh,
+ op_err, 0, net->port);
+ *abort_no_unlock = 1;
+ return (-1);
+ }
+ if (init_ack->num_outbound_streams == 0) {
+ /* protocol error... send an abort */
+ op_err = sctp_generate_invmanparam(SCTP_CAUSE_INVALID_PARAM);
+ sctp_abort_association(stcb->sctp_ep, stcb, m, iphlen, sh,
+ op_err, 0, net->port);
+ *abort_no_unlock = 1;
+ return (-1);
+ }
+ /* process according to association state... */
+ switch (stcb->asoc.state & SCTP_STATE_MASK) {
+ case SCTP_STATE_COOKIE_WAIT:
+ /* this is the expected state for this chunk */
+ /* process the INIT-ACK parameters */
+ if (stcb->asoc.primary_destination->dest_state &
+ SCTP_ADDR_UNCONFIRMED) {
+ /*
+ * The primary is where we sent the INIT, we can
+ * always consider it confirmed when the INIT-ACK is
+ * returned. Do this before we load addresses
+ * though.
+ */
+ stcb->asoc.primary_destination->dest_state &=
+ ~SCTP_ADDR_UNCONFIRMED;
+ sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_CONFIRMED,
+ stcb, 0, (void *)stcb->asoc.primary_destination, SCTP_SO_NOT_LOCKED);
+ }
+ if (sctp_process_init_ack(m, iphlen, offset, sh, cp, stcb,
+ net, abort_no_unlock, vrf_id) < 0) {
+ /* error in parsing parameters */
+ return (-1);
+ }
+ /* update our state */
+ SCTPDBG(SCTP_DEBUG_INPUT2, "moving to COOKIE-ECHOED state\n");
+ SCTP_SET_STATE(&stcb->asoc, SCTP_STATE_COOKIE_ECHOED);
+
+ /* reset the RTO calc */
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
+ sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
+ stcb->asoc.overall_error_count,
+ 0,
+ SCTP_FROM_SCTP_INPUT,
+ __LINE__);
+ }
+ stcb->asoc.overall_error_count = 0;
+ (void)SCTP_GETTIME_TIMEVAL(&stcb->asoc.time_entered);
+ /*
+ * collapse the init timer back in case of a exponential
+ * backoff
+ */
+ sctp_timer_start(SCTP_TIMER_TYPE_COOKIE, stcb->sctp_ep,
+ stcb, net);
+ /*
+ * the send at the end of the inbound data processing will
+ * cause the cookie to be sent
+ */
+ break;
+ case SCTP_STATE_SHUTDOWN_SENT:
+ /* incorrect state... discard */
+ break;
+ case SCTP_STATE_COOKIE_ECHOED:
+ /* incorrect state... discard */
+ break;
+ case SCTP_STATE_OPEN:
+ /* incorrect state... discard */
+ break;
+ case SCTP_STATE_EMPTY:
+ case SCTP_STATE_INUSE:
+ default:
+ /* incorrect state... discard */
+ return (-1);
+ break;
+ }
+ SCTPDBG(SCTP_DEBUG_INPUT1, "Leaving handle-init-ack end\n");
+ return (0);
+}
+
+static struct sctp_tcb *
+sctp_process_cookie_new(struct mbuf *m, int iphlen, int offset,
+ struct sctphdr *sh, struct sctp_state_cookie *cookie, int cookie_len,
+ struct sctp_inpcb *inp, struct sctp_nets **netp,
+ struct sockaddr *init_src, int *notification,
+ int auth_skipped, uint32_t auth_offset, uint32_t auth_len,
+ uint32_t vrf_id, uint16_t port);
+
+
+/*
+ * handle a state cookie for an existing association m: input packet mbuf
+ * chain-- assumes a pullup on IP/SCTP/COOKIE-ECHO chunk note: this is a
+ * "split" mbuf and the cookie signature does not exist offset: offset into
+ * mbuf to the cookie-echo chunk
+ */
+static struct sctp_tcb *
+sctp_process_cookie_existing(struct mbuf *m, int iphlen, int offset,
+ struct sctphdr *sh, struct sctp_state_cookie *cookie, int cookie_len,
+ struct sctp_inpcb *inp, struct sctp_tcb *stcb, struct sctp_nets **netp,
+ struct sockaddr *init_src, int *notification, sctp_assoc_t * sac_assoc_id,
+ uint32_t vrf_id, int auth_skipped, uint32_t auth_offset, uint32_t auth_len, uint16_t port)
+{
+ struct sctp_association *asoc;
+ struct sctp_init_chunk *init_cp, init_buf;
+ struct sctp_init_ack_chunk *initack_cp, initack_buf;
+ struct sctp_nets *net;
+ struct mbuf *op_err;
+ struct sctp_paramhdr *ph;
+ int chk_length;
+ int init_offset, initack_offset, i;
+ int retval;
+ int spec_flag = 0;
+ uint32_t how_indx;
+
+ net = *netp;
+ /* I know that the TCB is non-NULL from the caller */
+ asoc = &stcb->asoc;
+ for (how_indx = 0; how_indx < sizeof(asoc->cookie_how); how_indx++) {
+ if (asoc->cookie_how[how_indx] == 0)
+ break;
+ }
+ if (how_indx < sizeof(asoc->cookie_how)) {
+ asoc->cookie_how[how_indx] = 1;
+ }
+ if (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_ACK_SENT) {
+ /* SHUTDOWN came in after sending INIT-ACK */
+ sctp_send_shutdown_ack(stcb, stcb->asoc.primary_destination);
+ op_err = sctp_get_mbuf_for_msg(sizeof(struct sctp_paramhdr),
+ 0, M_DONTWAIT, 1, MT_DATA);
+ if (op_err == NULL) {
+ /* FOOBAR */
+ return (NULL);
+ }
+ /* Set the len */
+ SCTP_BUF_LEN(op_err) = sizeof(struct sctp_paramhdr);
+ ph = mtod(op_err, struct sctp_paramhdr *);
+ ph->param_type = htons(SCTP_CAUSE_COOKIE_IN_SHUTDOWN);
+ ph->param_length = htons(sizeof(struct sctp_paramhdr));
+ sctp_send_operr_to(m, iphlen, op_err, cookie->peers_vtag,
+ vrf_id, net->port);
+ if (how_indx < sizeof(asoc->cookie_how))
+ asoc->cookie_how[how_indx] = 2;
+ return (NULL);
+ }
+ /*
+ * find and validate the INIT chunk in the cookie (peer's info) the
+ * INIT should start after the cookie-echo header struct (chunk
+ * header, state cookie header struct)
+ */
+ init_offset = offset += sizeof(struct sctp_cookie_echo_chunk);
+
+ init_cp = (struct sctp_init_chunk *)
+ sctp_m_getptr(m, init_offset, sizeof(struct sctp_init_chunk),
+ (uint8_t *) & init_buf);
+ if (init_cp == NULL) {
+ /* could not pull a INIT chunk in cookie */
+ return (NULL);
+ }
+ chk_length = ntohs(init_cp->ch.chunk_length);
+ if (init_cp->ch.chunk_type != SCTP_INITIATION) {
+ return (NULL);
+ }
+ /*
+ * find and validate the INIT-ACK chunk in the cookie (my info) the
+ * INIT-ACK follows the INIT chunk
+ */
+ initack_offset = init_offset + SCTP_SIZE32(chk_length);
+ initack_cp = (struct sctp_init_ack_chunk *)
+ sctp_m_getptr(m, initack_offset, sizeof(struct sctp_init_ack_chunk),
+ (uint8_t *) & initack_buf);
+ if (initack_cp == NULL) {
+ /* could not pull INIT-ACK chunk in cookie */
+ return (NULL);
+ }
+ chk_length = ntohs(initack_cp->ch.chunk_length);
+ if (initack_cp->ch.chunk_type != SCTP_INITIATION_ACK) {
+ return (NULL);
+ }
+ if ((ntohl(initack_cp->init.initiate_tag) == asoc->my_vtag) &&
+ (ntohl(init_cp->init.initiate_tag) == asoc->peer_vtag)) {
+ /*
+ * case D in Section 5.2.4 Table 2: MMAA process accordingly
+ * to get into the OPEN state
+ */
+ if (ntohl(initack_cp->init.initial_tsn) != asoc->init_seq_number) {
+ /*-
+ * Opps, this means that we somehow generated two vtag's
+ * the same. I.e. we did:
+ * Us Peer
+ * <---INIT(tag=a)------
+ * ----INIT-ACK(tag=t)-->
+ * ----INIT(tag=t)------> *1
+ * <---INIT-ACK(tag=a)---
+ * <----CE(tag=t)------------- *2
+ *
+ * At point *1 we should be generating a different
+ * tag t'. Which means we would throw away the CE and send
+ * ours instead. Basically this is case C (throw away side).
+ */
+ if (how_indx < sizeof(asoc->cookie_how))
+ asoc->cookie_how[how_indx] = 17;
+ return (NULL);
+
+ }
+ switch SCTP_GET_STATE
+ (asoc) {
+ case SCTP_STATE_COOKIE_WAIT:
+ case SCTP_STATE_COOKIE_ECHOED:
+ /*
+ * INIT was sent but got a COOKIE_ECHO with the
+ * correct tags... just accept it...but we must
+ * process the init so that we can make sure we have
+ * the right seq no's.
+ */
+ /* First we must process the INIT !! */
+ retval = sctp_process_init(init_cp, stcb, net);
+ if (retval < 0) {
+ if (how_indx < sizeof(asoc->cookie_how))
+ asoc->cookie_how[how_indx] = 3;
+ return (NULL);
+ }
+ /* we have already processed the INIT so no problem */
+ sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT, inp, stcb,
+ net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_12);
+ sctp_timer_stop(SCTP_TIMER_TYPE_INIT, inp, stcb, net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_13);
+ /* update current state */
+ if (SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_ECHOED)
+ SCTP_STAT_INCR_COUNTER32(sctps_activeestab);
+ else
+ SCTP_STAT_INCR_COUNTER32(sctps_collisionestab);
+
+ SCTP_SET_STATE(asoc, SCTP_STATE_OPEN);
+ if (asoc->state & SCTP_STATE_SHUTDOWN_PENDING) {
+ sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
+ stcb->sctp_ep, stcb, asoc->primary_destination);
+ }
+ SCTP_STAT_INCR_GAUGE32(sctps_currestab);
+ sctp_stop_all_cookie_timers(stcb);
+ if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
+ (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) &&
+ (inp->sctp_socket->so_qlimit == 0)
+ ) {
+#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
+ struct socket *so;
+
+#endif
+ /*
+ * Here is where collision would go if we
+ * did a connect() and instead got a
+ * init/init-ack/cookie done before the
+ * init-ack came back..
+ */
+ stcb->sctp_ep->sctp_flags |=
+ SCTP_PCB_FLAGS_CONNECTED;
+#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
+ so = SCTP_INP_SO(stcb->sctp_ep);
+ atomic_add_int(&stcb->asoc.refcnt, 1);
+ SCTP_TCB_UNLOCK(stcb);
+ SCTP_SOCKET_LOCK(so, 1);
+ SCTP_TCB_LOCK(stcb);
+ atomic_add_int(&stcb->asoc.refcnt, -1);
+ if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
+ SCTP_SOCKET_UNLOCK(so, 1);
+ return (NULL);
+ }
+#endif
+ soisconnected(stcb->sctp_socket);
+#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
+ SCTP_SOCKET_UNLOCK(so, 1);
+#endif
+ }
+ /* notify upper layer */
+ *notification = SCTP_NOTIFY_ASSOC_UP;
+ /*
+ * since we did not send a HB make sure we don't
+ * double things
+ */
+ net->hb_responded = 1;
+ net->RTO = sctp_calculate_rto(stcb, asoc, net,
+ &cookie->time_entered, sctp_align_unsafe_makecopy);
+
+ if (stcb->asoc.sctp_autoclose_ticks &&
+ (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_AUTOCLOSE))) {
+ sctp_timer_start(SCTP_TIMER_TYPE_AUTOCLOSE,
+ inp, stcb, NULL);
+ }
+ break;
+ default:
+ /*
+ * we're in the OPEN state (or beyond), so peer must
+ * have simply lost the COOKIE-ACK
+ */
+ break;
+ } /* end switch */
+ sctp_stop_all_cookie_timers(stcb);
+ /*
+ * We ignore the return code here.. not sure if we should
+ * somehow abort.. but we do have an existing asoc. This
+ * really should not fail.
+ */
+ if (sctp_load_addresses_from_init(stcb, m, iphlen,
+ init_offset + sizeof(struct sctp_init_chunk),
+ initack_offset, sh, init_src)) {
+ if (how_indx < sizeof(asoc->cookie_how))
+ asoc->cookie_how[how_indx] = 4;
+ return (NULL);
+ }
+ /* respond with a COOKIE-ACK */
+ sctp_toss_old_cookies(stcb, asoc);
+ sctp_send_cookie_ack(stcb);
+ if (how_indx < sizeof(asoc->cookie_how))
+ asoc->cookie_how[how_indx] = 5;
+ return (stcb);
+ }
+ if (ntohl(initack_cp->init.initiate_tag) != asoc->my_vtag &&
+ ntohl(init_cp->init.initiate_tag) == asoc->peer_vtag &&
+ cookie->tie_tag_my_vtag == 0 &&
+ cookie->tie_tag_peer_vtag == 0) {
+ /*
+ * case C in Section 5.2.4 Table 2: XMOO silently discard
+ */
+ if (how_indx < sizeof(asoc->cookie_how))
+ asoc->cookie_how[how_indx] = 6;
+ return (NULL);
+ }
+ /*
+ * If nat support, and the below and stcb is established, send back
+ * a ABORT(colliding state) if we are established.
+ */
+ if ((SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) &&
+ (asoc->peer_supports_nat) &&
+ ((ntohl(initack_cp->init.initiate_tag) == asoc->my_vtag) &&
+ ((ntohl(init_cp->init.initiate_tag) != asoc->peer_vtag) ||
+ (asoc->peer_vtag == 0)))) {
+ /*
+ * Special case - Peer's support nat. We may have two init's
+ * that we gave out the same tag on since one was not
+ * established.. i.e. we get INIT from host-1 behind the nat
+ * and we respond tag-a, we get a INIT from host-2 behind
+ * the nat and we get tag-a again. Then we bring up host-1
+ * (or 2's) assoc, Then comes the cookie from hsot-2 (or 1).
+ * Now we have colliding state. We must send an abort here
+ * with colliding state indication.
+ */
+ op_err = sctp_get_mbuf_for_msg(sizeof(struct sctp_paramhdr),
+ 0, M_DONTWAIT, 1, MT_DATA);
+ if (op_err == NULL) {
+ /* FOOBAR */
+ return (NULL);
+ }
+ /* pre-reserve some space */
+#ifdef INET6
+ SCTP_BUF_RESV_UF(op_err, sizeof(struct ip6_hdr));
+#else
+ SCTP_BUF_RESV_UF(op_err, sizeof(struct ip));
+#endif
+ SCTP_BUF_RESV_UF(op_err, sizeof(struct sctphdr));
+ SCTP_BUF_RESV_UF(op_err, sizeof(struct sctp_chunkhdr));
+ /* Set the len */
+ SCTP_BUF_LEN(op_err) = sizeof(struct sctp_paramhdr);
+ ph = mtod(op_err, struct sctp_paramhdr *);
+ ph->param_type = htons(SCTP_CAUSE_NAT_COLLIDING_STATE);
+ ph->param_length = htons(sizeof(struct sctp_paramhdr));
+ sctp_send_abort(m, iphlen, sh, 0, op_err, vrf_id, port);
+ return (NULL);
+ }
+ if ((ntohl(initack_cp->init.initiate_tag) == asoc->my_vtag) &&
+ ((ntohl(init_cp->init.initiate_tag) != asoc->peer_vtag) ||
+ (asoc->peer_vtag == 0))) {
+ /*
+ * case B in Section 5.2.4 Table 2: MXAA or MOAA my info
+ * should be ok, re-accept peer info
+ */
+ if (ntohl(initack_cp->init.initial_tsn) != asoc->init_seq_number) {
+ /*
+ * Extension of case C. If we hit this, then the
+ * random number generator returned the same vtag
+ * when we first sent our INIT-ACK and when we later
+ * sent our INIT. The side with the seq numbers that
+ * are different will be the one that normnally
+ * would have hit case C. This in effect "extends"
+ * our vtags in this collision case to be 64 bits.
+ * The same collision could occur aka you get both
+ * vtag and seq number the same twice in a row.. but
+ * is much less likely. If it did happen then we
+ * would proceed through and bring up the assoc.. we
+ * may end up with the wrong stream setup however..
+ * which would be bad.. but there is no way to
+ * tell.. until we send on a stream that does not
+ * exist :-)
+ */
+ if (how_indx < sizeof(asoc->cookie_how))
+ asoc->cookie_how[how_indx] = 7;
+
+ return (NULL);
+ }
+ if (how_indx < sizeof(asoc->cookie_how))
+ asoc->cookie_how[how_indx] = 8;
+ sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT, inp, stcb, net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_14);
+ sctp_stop_all_cookie_timers(stcb);
+ /*
+ * since we did not send a HB make sure we don't double
+ * things
+ */
+ net->hb_responded = 1;
+ if (stcb->asoc.sctp_autoclose_ticks &&
+ sctp_is_feature_on(inp, SCTP_PCB_FLAGS_AUTOCLOSE)) {
+ sctp_timer_start(SCTP_TIMER_TYPE_AUTOCLOSE, inp, stcb,
+ NULL);
+ }
+ asoc->my_rwnd = ntohl(initack_cp->init.a_rwnd);
+ asoc->pre_open_streams = ntohs(initack_cp->init.num_outbound_streams);
+
+ /* Note last_cwr_tsn? where is this used? */
+ asoc->last_cwr_tsn = asoc->init_seq_number - 1;
+ if (ntohl(init_cp->init.initiate_tag) != asoc->peer_vtag) {
+ /*
+ * Ok the peer probably discarded our data (if we
+ * echoed a cookie+data). So anything on the
+ * sent_queue should be marked for retransmit, we
+ * may not get something to kick us so it COULD
+ * still take a timeout to move these.. but it can't
+ * hurt to mark them.
+ */
+ struct sctp_tmit_chunk *chk;
+
+ TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) {
+ if (chk->sent < SCTP_DATAGRAM_RESEND) {
+ chk->sent = SCTP_DATAGRAM_RESEND;
+ sctp_flight_size_decrease(chk);
+ sctp_total_flight_decrease(stcb, chk);
+ sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt);
+ spec_flag++;
+ }
+ }
+
+ }
+ /* process the INIT info (peer's info) */
+ retval = sctp_process_init(init_cp, stcb, net);
+ if (retval < 0) {
+ if (how_indx < sizeof(asoc->cookie_how))
+ asoc->cookie_how[how_indx] = 9;
+ return (NULL);
+ }
+ if (sctp_load_addresses_from_init(stcb, m, iphlen,
+ init_offset + sizeof(struct sctp_init_chunk),
+ initack_offset, sh, init_src)) {
+ if (how_indx < sizeof(asoc->cookie_how))
+ asoc->cookie_how[how_indx] = 10;
+ return (NULL);
+ }
+ if ((asoc->state & SCTP_STATE_COOKIE_WAIT) ||
+ (asoc->state & SCTP_STATE_COOKIE_ECHOED)) {
+ *notification = SCTP_NOTIFY_ASSOC_UP;
+
+ if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
+ (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) &&
+ (inp->sctp_socket->so_qlimit == 0)) {
+#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
+ struct socket *so;
+
+#endif
+ stcb->sctp_ep->sctp_flags |=
+ SCTP_PCB_FLAGS_CONNECTED;
+#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
+ so = SCTP_INP_SO(stcb->sctp_ep);
+ atomic_add_int(&stcb->asoc.refcnt, 1);
+ SCTP_TCB_UNLOCK(stcb);
+ SCTP_SOCKET_LOCK(so, 1);
+ SCTP_TCB_LOCK(stcb);
+ atomic_add_int(&stcb->asoc.refcnt, -1);
+ if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
+ SCTP_SOCKET_UNLOCK(so, 1);
+ return (NULL);
+ }
+#endif
+ soisconnected(stcb->sctp_socket);
+#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
+ SCTP_SOCKET_UNLOCK(so, 1);
+#endif
+ }
+ if (SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_ECHOED)
+ SCTP_STAT_INCR_COUNTER32(sctps_activeestab);
+ else
+ SCTP_STAT_INCR_COUNTER32(sctps_collisionestab);
+ SCTP_STAT_INCR_GAUGE32(sctps_currestab);
+ } else if (SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) {
+ SCTP_STAT_INCR_COUNTER32(sctps_restartestab);
+ } else {
+ SCTP_STAT_INCR_COUNTER32(sctps_collisionestab);
+ }
+ SCTP_SET_STATE(asoc, SCTP_STATE_OPEN);
+ if (asoc->state & SCTP_STATE_SHUTDOWN_PENDING) {
+ sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
+ stcb->sctp_ep, stcb, asoc->primary_destination);
+ }
+ sctp_stop_all_cookie_timers(stcb);
+ sctp_toss_old_cookies(stcb, asoc);
+ sctp_send_cookie_ack(stcb);
+ if (spec_flag) {
+ /*
+ * only if we have retrans set do we do this. What
+ * this call does is get only the COOKIE-ACK out and
+ * then when we return the normal call to
+ * sctp_chunk_output will get the retrans out behind
+ * this.
+ */
+ sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_COOKIE_ACK, SCTP_SO_NOT_LOCKED);
+ }
+ if (how_indx < sizeof(asoc->cookie_how))
+ asoc->cookie_how[how_indx] = 11;
+
+ return (stcb);
+ }
+ if ((ntohl(initack_cp->init.initiate_tag) != asoc->my_vtag &&
+ ntohl(init_cp->init.initiate_tag) != asoc->peer_vtag) &&
+ cookie->tie_tag_my_vtag == asoc->my_vtag_nonce &&
+ cookie->tie_tag_peer_vtag == asoc->peer_vtag_nonce &&
+ cookie->tie_tag_peer_vtag != 0) {
+ struct sctpasochead *head;
+
+ if (asoc->peer_supports_nat) {
+ /*
+ * This is a gross gross hack. just call the
+ * cookie_new code since we are allowing a duplicate
+ * association. I hope this works...
+ */
+ return (sctp_process_cookie_new(m, iphlen, offset, sh, cookie, cookie_len,
+ inp, netp, init_src, notification,
+ auth_skipped, auth_offset, auth_len,
+ vrf_id, port));
+ }
+ /*
+ * case A in Section 5.2.4 Table 2: XXMM (peer restarted)
+ */
+ /* temp code */
+ if (how_indx < sizeof(asoc->cookie_how))
+ asoc->cookie_how[how_indx] = 12;
+ sctp_timer_stop(SCTP_TIMER_TYPE_INIT, inp, stcb, net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_15);
+ sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT, inp, stcb, net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_16);
+
+ *sac_assoc_id = sctp_get_associd(stcb);
+ /* notify upper layer */
+ *notification = SCTP_NOTIFY_ASSOC_RESTART;
+ atomic_add_int(&stcb->asoc.refcnt, 1);
+ if ((SCTP_GET_STATE(asoc) != SCTP_STATE_OPEN) &&
+ (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_RECEIVED) &&
+ (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_SENT)) {
+ SCTP_STAT_INCR_GAUGE32(sctps_currestab);
+ }
+ if (SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) {
+ SCTP_STAT_INCR_GAUGE32(sctps_restartestab);
+ } else if (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_SENT) {
+ SCTP_STAT_INCR_GAUGE32(sctps_collisionestab);
+ }
+ if (asoc->state & SCTP_STATE_SHUTDOWN_PENDING) {
+ SCTP_SET_STATE(asoc, SCTP_STATE_OPEN);
+ sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
+ stcb->sctp_ep, stcb, asoc->primary_destination);
+
+ } else if (!(asoc->state & SCTP_STATE_SHUTDOWN_SENT)) {
+ /* move to OPEN state, if not in SHUTDOWN_SENT */
+ SCTP_SET_STATE(asoc, SCTP_STATE_OPEN);
+ }
+ asoc->pre_open_streams =
+ ntohs(initack_cp->init.num_outbound_streams);
+ asoc->init_seq_number = ntohl(initack_cp->init.initial_tsn);
+ asoc->sending_seq = asoc->asconf_seq_out = asoc->str_reset_seq_out = asoc->init_seq_number;
+ asoc->asconf_seq_out_acked = asoc->asconf_seq_out - 1;
+
+ asoc->last_cwr_tsn = asoc->init_seq_number - 1;
+ asoc->asconf_seq_in = asoc->last_acked_seq = asoc->init_seq_number - 1;
+
+ asoc->str_reset_seq_in = asoc->init_seq_number;
+
+ asoc->advanced_peer_ack_point = asoc->last_acked_seq;
+ if (asoc->mapping_array) {
+ memset(asoc->mapping_array, 0,
+ asoc->mapping_array_size);
+ }
+ if (asoc->nr_mapping_array) {
+ memset(asoc->nr_mapping_array, 0,
+ asoc->mapping_array_size);
+ }
+ SCTP_TCB_UNLOCK(stcb);
+ SCTP_INP_INFO_WLOCK();
+ SCTP_INP_WLOCK(stcb->sctp_ep);
+ SCTP_TCB_LOCK(stcb);
+ atomic_add_int(&stcb->asoc.refcnt, -1);
+ /* send up all the data */
+ SCTP_TCB_SEND_LOCK(stcb);
+
+ sctp_report_all_outbound(stcb, 1, SCTP_SO_NOT_LOCKED);
+ for (i = 0; i < stcb->asoc.streamoutcnt; i++) {
+ stcb->asoc.strmout[i].stream_no = i;
+ stcb->asoc.strmout[i].next_sequence_sent = 0;
+ stcb->asoc.strmout[i].last_msg_incomplete = 0;
+ }
+ /* process the INIT-ACK info (my info) */
+ asoc->my_vtag = ntohl(initack_cp->init.initiate_tag);
+ asoc->my_rwnd = ntohl(initack_cp->init.a_rwnd);
+
+ /* pull from vtag hash */
+ LIST_REMOVE(stcb, sctp_asocs);
+ /* re-insert to new vtag position */
+ head = &SCTP_BASE_INFO(sctp_asochash)[SCTP_PCBHASH_ASOC(stcb->asoc.my_vtag,
+ SCTP_BASE_INFO(hashasocmark))];
+ /*
+ * put it in the bucket in the vtag hash of assoc's for the
+ * system
+ */
+ LIST_INSERT_HEAD(head, stcb, sctp_asocs);
+
+ /* process the INIT info (peer's info) */
+ SCTP_TCB_SEND_UNLOCK(stcb);
+ SCTP_INP_WUNLOCK(stcb->sctp_ep);
+ SCTP_INP_INFO_WUNLOCK();
+
+ retval = sctp_process_init(init_cp, stcb, net);
+ if (retval < 0) {
+ if (how_indx < sizeof(asoc->cookie_how))
+ asoc->cookie_how[how_indx] = 13;
+
+ return (NULL);
+ }
+ /*
+ * since we did not send a HB make sure we don't double
+ * things
+ */
+ net->hb_responded = 1;
+
+ if (sctp_load_addresses_from_init(stcb, m, iphlen,
+ init_offset + sizeof(struct sctp_init_chunk),
+ initack_offset, sh, init_src)) {
+ if (how_indx < sizeof(asoc->cookie_how))
+ asoc->cookie_how[how_indx] = 14;
+
+ return (NULL);
+ }
+ /* respond with a COOKIE-ACK */
+ sctp_stop_all_cookie_timers(stcb);
+ sctp_toss_old_cookies(stcb, asoc);
+ sctp_send_cookie_ack(stcb);
+ if (how_indx < sizeof(asoc->cookie_how))
+ asoc->cookie_how[how_indx] = 15;
+
+ return (stcb);
+ }
+ if (how_indx < sizeof(asoc->cookie_how))
+ asoc->cookie_how[how_indx] = 16;
+ /* all other cases... */
+ return (NULL);
+}
+
+
+/*
+ * handle a state cookie for a new association m: input packet mbuf chain--
+ * assumes a pullup on IP/SCTP/COOKIE-ECHO chunk note: this is a "split" mbuf
+ * and the cookie signature does not exist offset: offset into mbuf to the
+ * cookie-echo chunk length: length of the cookie chunk to: where the init
+ * was from returns a new TCB
+ */
+struct sctp_tcb *
+sctp_process_cookie_new(struct mbuf *m, int iphlen, int offset,
+ struct sctphdr *sh, struct sctp_state_cookie *cookie, int cookie_len,
+ struct sctp_inpcb *inp, struct sctp_nets **netp,
+ struct sockaddr *init_src, int *notification,
+ int auth_skipped, uint32_t auth_offset, uint32_t auth_len,
+ uint32_t vrf_id, uint16_t port)
+{
+ struct sctp_tcb *stcb;
+ struct sctp_init_chunk *init_cp, init_buf;
+ struct sctp_init_ack_chunk *initack_cp, initack_buf;
+ struct sockaddr_storage sa_store;
+ struct sockaddr *initack_src = (struct sockaddr *)&sa_store;
+ struct sockaddr_in *sin;
+ struct sockaddr_in6 *sin6;
+ struct sctp_association *asoc;
+ int chk_length;
+ int init_offset, initack_offset, initack_limit;
+ int retval;
+ int error = 0;
+ uint32_t old_tag;
+ uint8_t auth_chunk_buf[SCTP_PARAM_BUFFER_SIZE];
+
+#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
+ struct socket *so;
+
+ so = SCTP_INP_SO(inp);
+#endif
+
+ /*
+ * find and validate the INIT chunk in the cookie (peer's info) the
+ * INIT should start after the cookie-echo header struct (chunk
+ * header, state cookie header struct)
+ */
+ init_offset = offset + sizeof(struct sctp_cookie_echo_chunk);
+ init_cp = (struct sctp_init_chunk *)
+ sctp_m_getptr(m, init_offset, sizeof(struct sctp_init_chunk),
+ (uint8_t *) & init_buf);
+ if (init_cp == NULL) {
+ /* could not pull a INIT chunk in cookie */
+ SCTPDBG(SCTP_DEBUG_INPUT1,
+ "process_cookie_new: could not pull INIT chunk hdr\n");
+ return (NULL);
+ }
+ chk_length = ntohs(init_cp->ch.chunk_length);
+ if (init_cp->ch.chunk_type != SCTP_INITIATION) {
+ SCTPDBG(SCTP_DEBUG_INPUT1, "HUH? process_cookie_new: could not find INIT chunk!\n");
+ return (NULL);
+ }
+ initack_offset = init_offset + SCTP_SIZE32(chk_length);
+ /*
+ * find and validate the INIT-ACK chunk in the cookie (my info) the
+ * INIT-ACK follows the INIT chunk
+ */
+ initack_cp = (struct sctp_init_ack_chunk *)
+ sctp_m_getptr(m, initack_offset, sizeof(struct sctp_init_ack_chunk),
+ (uint8_t *) & initack_buf);
+ if (initack_cp == NULL) {
+ /* could not pull INIT-ACK chunk in cookie */
+ SCTPDBG(SCTP_DEBUG_INPUT1, "process_cookie_new: could not pull INIT-ACK chunk hdr\n");
+ return (NULL);
+ }
+ chk_length = ntohs(initack_cp->ch.chunk_length);
+ if (initack_cp->ch.chunk_type != SCTP_INITIATION_ACK) {
+ return (NULL);
+ }
+ /*
+ * NOTE: We can't use the INIT_ACK's chk_length to determine the
+ * "initack_limit" value. This is because the chk_length field
+ * includes the length of the cookie, but the cookie is omitted when
+ * the INIT and INIT_ACK are tacked onto the cookie...
+ */
+ initack_limit = offset + cookie_len;
+
+ /*
+ * now that we know the INIT/INIT-ACK are in place, create a new TCB
+ * and popluate
+ */
+
+ /*
+ * Here we do a trick, we set in NULL for the proc/thread argument.
+ * We do this since in effect we only use the p argument when the
+ * socket is unbound and we must do an implicit bind. Since we are
+ * getting a cookie, we cannot be unbound.
+ */
+ stcb = sctp_aloc_assoc(inp, init_src, &error,
+ ntohl(initack_cp->init.initiate_tag), vrf_id,
+ (struct thread *)NULL
+ );
+ if (stcb == NULL) {
+ struct mbuf *op_err;
+
+ /* memory problem? */
+ SCTPDBG(SCTP_DEBUG_INPUT1,
+ "process_cookie_new: no room for another TCB!\n");
+ op_err = sctp_generate_invmanparam(SCTP_CAUSE_OUT_OF_RESC);
+
+ sctp_abort_association(inp, (struct sctp_tcb *)NULL, m, iphlen,
+ sh, op_err, vrf_id, port);
+ return (NULL);
+ }
+ /* get the correct sctp_nets */
+ if (netp)
+ *netp = sctp_findnet(stcb, init_src);
+
+ asoc = &stcb->asoc;
+ /* get scope variables out of cookie */
+ asoc->ipv4_local_scope = cookie->ipv4_scope;
+ asoc->site_scope = cookie->site_scope;
+ asoc->local_scope = cookie->local_scope;
+ asoc->loopback_scope = cookie->loopback_scope;
+
+ if ((asoc->ipv4_addr_legal != cookie->ipv4_addr_legal) ||
+ (asoc->ipv6_addr_legal != cookie->ipv6_addr_legal)) {
+ struct mbuf *op_err;
+
+ /*
+ * Houston we have a problem. The EP changed while the
+ * cookie was in flight. Only recourse is to abort the
+ * association.
+ */
+ atomic_add_int(&stcb->asoc.refcnt, 1);
+ op_err = sctp_generate_invmanparam(SCTP_CAUSE_OUT_OF_RESC);
+ sctp_abort_association(inp, (struct sctp_tcb *)NULL, m, iphlen,
+ sh, op_err, vrf_id, port);
+#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
+ SCTP_TCB_UNLOCK(stcb);
+ SCTP_SOCKET_LOCK(so, 1);
+ SCTP_TCB_LOCK(stcb);
+#endif
+ (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
+ SCTP_FROM_SCTP_INPUT + SCTP_LOC_16);
+#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
+ SCTP_SOCKET_UNLOCK(so, 1);
+#endif
+ atomic_subtract_int(&stcb->asoc.refcnt, 1);
+ return (NULL);
+ }
+ /* process the INIT-ACK info (my info) */
+ old_tag = asoc->my_vtag;
+ asoc->my_vtag = ntohl(initack_cp->init.initiate_tag);
+ asoc->my_rwnd = ntohl(initack_cp->init.a_rwnd);
+ asoc->pre_open_streams = ntohs(initack_cp->init.num_outbound_streams);
+ asoc->init_seq_number = ntohl(initack_cp->init.initial_tsn);
+ asoc->sending_seq = asoc->asconf_seq_out = asoc->str_reset_seq_out = asoc->init_seq_number;
+ asoc->asconf_seq_out_acked = asoc->asconf_seq_out - 1;
+ asoc->last_cwr_tsn = asoc->init_seq_number - 1;
+ asoc->asconf_seq_in = asoc->last_acked_seq = asoc->init_seq_number - 1;
+ asoc->str_reset_seq_in = asoc->init_seq_number;
+
+ asoc->advanced_peer_ack_point = asoc->last_acked_seq;
+
+ /* process the INIT info (peer's info) */
+ if (netp)
+ retval = sctp_process_init(init_cp, stcb, *netp);
+ else
+ retval = 0;
+ if (retval < 0) {
+ atomic_add_int(&stcb->asoc.refcnt, 1);
+#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
+ SCTP_TCB_UNLOCK(stcb);
+ SCTP_SOCKET_LOCK(so, 1);
+ SCTP_TCB_LOCK(stcb);
+#endif
+ (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_INPUT + SCTP_LOC_16);
+#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
+ SCTP_SOCKET_UNLOCK(so, 1);
+#endif
+ atomic_subtract_int(&stcb->asoc.refcnt, 1);
+ return (NULL);
+ }
+ /* load all addresses */
+ if (sctp_load_addresses_from_init(stcb, m, iphlen,
+ init_offset + sizeof(struct sctp_init_chunk), initack_offset, sh,
+ init_src)) {
+ atomic_add_int(&stcb->asoc.refcnt, 1);
+#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
+ SCTP_TCB_UNLOCK(stcb);
+ SCTP_SOCKET_LOCK(so, 1);
+ SCTP_TCB_LOCK(stcb);
+#endif
+ (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_INPUT + SCTP_LOC_17);
+#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
+ SCTP_SOCKET_UNLOCK(so, 1);
+#endif
+ atomic_subtract_int(&stcb->asoc.refcnt, 1);
+ return (NULL);
+ }
+ /*
+ * verify any preceding AUTH chunk that was skipped
+ */
+ /* pull the local authentication parameters from the cookie/init-ack */
+ sctp_auth_get_cookie_params(stcb, m,
+ initack_offset + sizeof(struct sctp_init_ack_chunk),
+ initack_limit - (initack_offset + sizeof(struct sctp_init_ack_chunk)));
+ if (auth_skipped) {
+ struct sctp_auth_chunk *auth;
+
+ auth = (struct sctp_auth_chunk *)
+ sctp_m_getptr(m, auth_offset, auth_len, auth_chunk_buf);
+ if ((auth == NULL) || sctp_handle_auth(stcb, auth, m, auth_offset)) {
+ /* auth HMAC failed, dump the assoc and packet */
+ SCTPDBG(SCTP_DEBUG_AUTH1,
+ "COOKIE-ECHO: AUTH failed\n");
+ atomic_add_int(&stcb->asoc.refcnt, 1);
+#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
+ SCTP_TCB_UNLOCK(stcb);
+ SCTP_SOCKET_LOCK(so, 1);
+ SCTP_TCB_LOCK(stcb);
+#endif
+ (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_INPUT + SCTP_LOC_18);
+#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
+ SCTP_SOCKET_UNLOCK(so, 1);
+#endif
+ atomic_subtract_int(&stcb->asoc.refcnt, 1);
+ return (NULL);
+ } else {
+ /* remaining chunks checked... good to go */
+ stcb->asoc.authenticated = 1;
+ }
+ }
+ /* update current state */
+ SCTPDBG(SCTP_DEBUG_INPUT2, "moving to OPEN state\n");
+ SCTP_SET_STATE(asoc, SCTP_STATE_OPEN);
+ if (asoc->state & SCTP_STATE_SHUTDOWN_PENDING) {
+ sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
+ stcb->sctp_ep, stcb, asoc->primary_destination);
+ }
+ sctp_stop_all_cookie_timers(stcb);
+ SCTP_STAT_INCR_COUNTER32(sctps_passiveestab);
+ SCTP_STAT_INCR_GAUGE32(sctps_currestab);
+
+ /*
+ * if we're doing ASCONFs, check to see if we have any new local
+ * addresses that need to get added to the peer (eg. addresses
+ * changed while cookie echo in flight). This needs to be done
+ * after we go to the OPEN state to do the correct asconf
+ * processing. else, make sure we have the correct addresses in our
+ * lists
+ */
+
+ /* warning, we re-use sin, sin6, sa_store here! */
+ /* pull in local_address (our "from" address) */
+ if (cookie->laddr_type == SCTP_IPV4_ADDRESS) {
+ /* source addr is IPv4 */
+ sin = (struct sockaddr_in *)initack_src;
+ memset(sin, 0, sizeof(*sin));
+ sin->sin_family = AF_INET;
+ sin->sin_len = sizeof(struct sockaddr_in);
+ sin->sin_addr.s_addr = cookie->laddress[0];
+ } else if (cookie->laddr_type == SCTP_IPV6_ADDRESS) {
+ /* source addr is IPv6 */
+ sin6 = (struct sockaddr_in6 *)initack_src;
+ memset(sin6, 0, sizeof(*sin6));
+ sin6->sin6_family = AF_INET6;
+ sin6->sin6_len = sizeof(struct sockaddr_in6);
+ sin6->sin6_scope_id = cookie->scope_id;
+ memcpy(&sin6->sin6_addr, cookie->laddress,
+ sizeof(sin6->sin6_addr));
+ } else {
+ atomic_add_int(&stcb->asoc.refcnt, 1);
+#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
+ SCTP_TCB_UNLOCK(stcb);
+ SCTP_SOCKET_LOCK(so, 1);
+ SCTP_TCB_LOCK(stcb);
+#endif
+ (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_INPUT + SCTP_LOC_19);
+#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
+ SCTP_SOCKET_UNLOCK(so, 1);
+#endif
+ atomic_subtract_int(&stcb->asoc.refcnt, 1);
+ return (NULL);
+ }
+
+ /* set up to notify upper layer */
+ *notification = SCTP_NOTIFY_ASSOC_UP;
+ if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
+ (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) &&
+ (inp->sctp_socket->so_qlimit == 0)) {
+ /*
+ * This is an endpoint that called connect() how it got a
+ * cookie that is NEW is a bit of a mystery. It must be that
+ * the INIT was sent, but before it got there.. a complete
+ * INIT/INIT-ACK/COOKIE arrived. But of course then it
+ * should have went to the other code.. not here.. oh well..
+ * a bit of protection is worth having..
+ */
+ stcb->sctp_ep->sctp_flags |= SCTP_PCB_FLAGS_CONNECTED;
+#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
+ atomic_add_int(&stcb->asoc.refcnt, 1);
+ SCTP_TCB_UNLOCK(stcb);
+ SCTP_SOCKET_LOCK(so, 1);
+ SCTP_TCB_LOCK(stcb);
+ atomic_subtract_int(&stcb->asoc.refcnt, 1);
+ if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
+ SCTP_SOCKET_UNLOCK(so, 1);
+ return (NULL);
+ }
+#endif
+ soisconnected(stcb->sctp_socket);
+#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
+ SCTP_SOCKET_UNLOCK(so, 1);
+#endif
+ } else if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) &&
+ (inp->sctp_socket->so_qlimit)) {
+ /*
+ * We don't want to do anything with this one. Since it is
+ * the listening guy. The timer will get started for
+ * accepted connections in the caller.
+ */
+ ;
+ }
+ /* since we did not send a HB make sure we don't double things */
+ if ((netp) && (*netp))
+ (*netp)->hb_responded = 1;
+
+ if (stcb->asoc.sctp_autoclose_ticks &&
+ sctp_is_feature_on(inp, SCTP_PCB_FLAGS_AUTOCLOSE)) {
+ sctp_timer_start(SCTP_TIMER_TYPE_AUTOCLOSE, inp, stcb, NULL);
+ }
+ /* calculate the RTT */
+ (void)SCTP_GETTIME_TIMEVAL(&stcb->asoc.time_entered);
+ if ((netp) && (*netp)) {
+ (*netp)->RTO = sctp_calculate_rto(stcb, asoc, *netp,
+ &cookie->time_entered, sctp_align_unsafe_makecopy);
+ }
+ /* respond with a COOKIE-ACK */
+ sctp_send_cookie_ack(stcb);
+
+ /*
+ * check the address lists for any ASCONFs that need to be sent
+ * AFTER the cookie-ack is sent
+ */
+ sctp_check_address_list(stcb, m,
+ initack_offset + sizeof(struct sctp_init_ack_chunk),
+ initack_limit - (initack_offset + sizeof(struct sctp_init_ack_chunk)),
+ initack_src, cookie->local_scope, cookie->site_scope,
+ cookie->ipv4_scope, cookie->loopback_scope);
+
+
+ return (stcb);
+}
+
+/*
+ * CODE LIKE THIS NEEDS TO RUN IF the peer supports the NAT extension, i.e
+ * we NEED to make sure we are not already using the vtag. If so we
+ * need to send back an ABORT-TRY-AGAIN-WITH-NEW-TAG No middle box bit!
+ head = &SCTP_BASE_INFO(sctp_asochash)[SCTP_PCBHASH_ASOC(tag,
+ SCTP_BASE_INFO(hashasocmark))];
+ LIST_FOREACH(stcb, head, sctp_asocs) {
+ if ((stcb->asoc.my_vtag == tag) && (stcb->rport == rport) && (inp == stcb->sctp_ep)) {
+ -- SEND ABORT - TRY AGAIN --
+ }
+ }
+*/
+
+/*
+ * handles a COOKIE-ECHO message stcb: modified to either a new or left as
+ * existing (non-NULL) TCB
+ */
+static struct mbuf *
+sctp_handle_cookie_echo(struct mbuf *m, int iphlen, int offset,
+ struct sctphdr *sh, struct sctp_cookie_echo_chunk *cp,
+ struct sctp_inpcb **inp_p, struct sctp_tcb **stcb, struct sctp_nets **netp,
+ int auth_skipped, uint32_t auth_offset, uint32_t auth_len,
+ struct sctp_tcb **locked_tcb, uint32_t vrf_id, uint16_t port)
+{
+ struct sctp_state_cookie *cookie;
+ struct sockaddr_in6 sin6;
+ struct sockaddr_in sin;
+ struct sctp_tcb *l_stcb = *stcb;
+ struct sctp_inpcb *l_inp;
+ struct sockaddr *to;
+ sctp_assoc_t sac_restart_id;
+ struct sctp_pcb *ep;
+ struct mbuf *m_sig;
+ uint8_t calc_sig[SCTP_SIGNATURE_SIZE], tmp_sig[SCTP_SIGNATURE_SIZE];
+ uint8_t *sig;
+ uint8_t cookie_ok = 0;
+ unsigned int size_of_pkt, sig_offset, cookie_offset;
+ unsigned int cookie_len;
+ struct timeval now;
+ struct timeval time_expires;
+ struct sockaddr_storage dest_store;
+ struct sockaddr *localep_sa = (struct sockaddr *)&dest_store;
+ struct ip *iph;
+ int notification = 0;
+ struct sctp_nets *netl;
+ int had_a_existing_tcb = 0;
+
+ SCTPDBG(SCTP_DEBUG_INPUT2,
+ "sctp_handle_cookie: handling COOKIE-ECHO\n");
+
+ if (inp_p == NULL) {
+ return (NULL);
+ }
+ /* First get the destination address setup too. */
+ iph = mtod(m, struct ip *);
+ switch (iph->ip_v) {
+ case IPVERSION:
+ {
+ /* its IPv4 */
+ struct sockaddr_in *lsin;
+
+ lsin = (struct sockaddr_in *)(localep_sa);
+ memset(lsin, 0, sizeof(*lsin));
+ lsin->sin_family = AF_INET;
+ lsin->sin_len = sizeof(*lsin);
+ lsin->sin_port = sh->dest_port;
+ lsin->sin_addr.s_addr = iph->ip_dst.s_addr;
+ size_of_pkt = SCTP_GET_IPV4_LENGTH(iph);
+ break;
+ }
+#ifdef INET6
+ case IPV6_VERSION >> 4:
+ {
+ /* its IPv6 */
+ struct ip6_hdr *ip6;
+ struct sockaddr_in6 *lsin6;
+
+ lsin6 = (struct sockaddr_in6 *)(localep_sa);
+ memset(lsin6, 0, sizeof(*lsin6));
+ lsin6->sin6_family = AF_INET6;
+ lsin6->sin6_len = sizeof(struct sockaddr_in6);
+ ip6 = mtod(m, struct ip6_hdr *);
+ lsin6->sin6_port = sh->dest_port;
+ lsin6->sin6_addr = ip6->ip6_dst;
+ size_of_pkt = SCTP_GET_IPV6_LENGTH(ip6) + iphlen;
+ break;
+ }
+#endif
+ default:
+ return (NULL);
+ }
+
+ cookie = &cp->cookie;
+ cookie_offset = offset + sizeof(struct sctp_chunkhdr);
+ cookie_len = ntohs(cp->ch.chunk_length);
+
+ if ((cookie->peerport != sh->src_port) &&
+ (cookie->myport != sh->dest_port) &&
+ (cookie->my_vtag != sh->v_tag)) {
+ /*
+ * invalid ports or bad tag. Note that we always leave the
+ * v_tag in the header in network order and when we stored
+ * it in the my_vtag slot we also left it in network order.
+ * This maintains the match even though it may be in the
+ * opposite byte order of the machine :->
+ */
+ return (NULL);
+ }
+ if (cookie_len > size_of_pkt ||
+ cookie_len < sizeof(struct sctp_cookie_echo_chunk) +
+ sizeof(struct sctp_init_chunk) +
+ sizeof(struct sctp_init_ack_chunk) + SCTP_SIGNATURE_SIZE) {
+ /* cookie too long! or too small */
+ return (NULL);
+ }
+ /*
+ * split off the signature into its own mbuf (since it should not be
+ * calculated in the sctp_hmac_m() call).
+ */
+ sig_offset = offset + cookie_len - SCTP_SIGNATURE_SIZE;
+ if (sig_offset > size_of_pkt) {
+ /* packet not correct size! */
+ /* XXX this may already be accounted for earlier... */
+ return (NULL);
+ }
+ m_sig = m_split(m, sig_offset, M_DONTWAIT);
+ if (m_sig == NULL) {
+ /* out of memory or ?? */
+ return (NULL);
+ }
+#ifdef SCTP_MBUF_LOGGING
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
+ struct mbuf *mat;
+
+ mat = m_sig;
+ while (mat) {
+ if (SCTP_BUF_IS_EXTENDED(mat)) {
+ sctp_log_mb(mat, SCTP_MBUF_SPLIT);
+ }
+ mat = SCTP_BUF_NEXT(mat);
+ }
+ }
+#endif
+
+ /*
+ * compute the signature/digest for the cookie
+ */
+ ep = &(*inp_p)->sctp_ep;
+ l_inp = *inp_p;
+ if (l_stcb) {
+ SCTP_TCB_UNLOCK(l_stcb);
+ }
+ SCTP_INP_RLOCK(l_inp);
+ if (l_stcb) {
+ SCTP_TCB_LOCK(l_stcb);
+ }
+ /* which cookie is it? */
+ if ((cookie->time_entered.tv_sec < (long)ep->time_of_secret_change) &&
+ (ep->current_secret_number != ep->last_secret_number)) {
+ /* it's the old cookie */
+ (void)sctp_hmac_m(SCTP_HMAC,
+ (uint8_t *) ep->secret_key[(int)ep->last_secret_number],
+ SCTP_SECRET_SIZE, m, cookie_offset, calc_sig, 0);
+ } else {
+ /* it's the current cookie */
+ (void)sctp_hmac_m(SCTP_HMAC,
+ (uint8_t *) ep->secret_key[(int)ep->current_secret_number],
+ SCTP_SECRET_SIZE, m, cookie_offset, calc_sig, 0);
+ }
+ /* get the signature */
+ SCTP_INP_RUNLOCK(l_inp);
+ sig = (uint8_t *) sctp_m_getptr(m_sig, 0, SCTP_SIGNATURE_SIZE, (uint8_t *) & tmp_sig);
+ if (sig == NULL) {
+ /* couldn't find signature */
+ sctp_m_freem(m_sig);
+ return (NULL);
+ }
+ /* compare the received digest with the computed digest */
+ if (memcmp(calc_sig, sig, SCTP_SIGNATURE_SIZE) != 0) {
+ /* try the old cookie? */
+ if ((cookie->time_entered.tv_sec == (long)ep->time_of_secret_change) &&
+ (ep->current_secret_number != ep->last_secret_number)) {
+ /* compute digest with old */
+ (void)sctp_hmac_m(SCTP_HMAC,
+ (uint8_t *) ep->secret_key[(int)ep->last_secret_number],
+ SCTP_SECRET_SIZE, m, cookie_offset, calc_sig, 0);
+ /* compare */
+ if (memcmp(calc_sig, sig, SCTP_SIGNATURE_SIZE) == 0)
+ cookie_ok = 1;
+ }
+ } else {
+ cookie_ok = 1;
+ }
+
+ /*
+ * Now before we continue we must reconstruct our mbuf so that
+ * normal processing of any other chunks will work.
+ */
+ {
+ struct mbuf *m_at;
+
+ m_at = m;
+ while (SCTP_BUF_NEXT(m_at) != NULL) {
+ m_at = SCTP_BUF_NEXT(m_at);
+ }
+ SCTP_BUF_NEXT(m_at) = m_sig;
+ }
+
+ if (cookie_ok == 0) {
+ SCTPDBG(SCTP_DEBUG_INPUT2, "handle_cookie_echo: cookie signature validation failed!\n");
+ SCTPDBG(SCTP_DEBUG_INPUT2,
+ "offset = %u, cookie_offset = %u, sig_offset = %u\n",
+ (uint32_t) offset, cookie_offset, sig_offset);
+ return (NULL);
+ }
+ /*
+ * check the cookie timestamps to be sure it's not stale
+ */
+ (void)SCTP_GETTIME_TIMEVAL(&now);
+ /* Expire time is in Ticks, so we convert to seconds */
+ time_expires.tv_sec = cookie->time_entered.tv_sec + TICKS_TO_SEC(cookie->cookie_life);
+ time_expires.tv_usec = cookie->time_entered.tv_usec;
+ /*
+ * TODO sctp_constants.h needs alternative time macros when _KERNEL
+ * is undefined.
+ */
+ if (timevalcmp(&now, &time_expires, >)) {
+ /* cookie is stale! */
+ struct mbuf *op_err;
+ struct sctp_stale_cookie_msg *scm;
+ uint32_t tim;
+
+ op_err = sctp_get_mbuf_for_msg(sizeof(struct sctp_stale_cookie_msg),
+ 0, M_DONTWAIT, 1, MT_DATA);
+ if (op_err == NULL) {
+ /* FOOBAR */
+ return (NULL);
+ }
+ /* Set the len */
+ SCTP_BUF_LEN(op_err) = sizeof(struct sctp_stale_cookie_msg);
+ scm = mtod(op_err, struct sctp_stale_cookie_msg *);
+ scm->ph.param_type = htons(SCTP_CAUSE_STALE_COOKIE);
+ scm->ph.param_length = htons((sizeof(struct sctp_paramhdr) +
+ (sizeof(uint32_t))));
+ /* seconds to usec */
+ tim = (now.tv_sec - time_expires.tv_sec) * 1000000;
+ /* add in usec */
+ if (tim == 0)
+ tim = now.tv_usec - cookie->time_entered.tv_usec;
+ scm->time_usec = htonl(tim);
+ sctp_send_operr_to(m, iphlen, op_err, cookie->peers_vtag,
+ vrf_id, port);
+ return (NULL);
+ }
+ /*
+ * Now we must see with the lookup address if we have an existing
+ * asoc. This will only happen if we were in the COOKIE-WAIT state
+ * and a INIT collided with us and somewhere the peer sent the
+ * cookie on another address besides the single address our assoc
+ * had for him. In this case we will have one of the tie-tags set at
+ * least AND the address field in the cookie can be used to look it
+ * up.
+ */
+ to = NULL;
+ if (cookie->addr_type == SCTP_IPV6_ADDRESS) {
+ memset(&sin6, 0, sizeof(sin6));
+ sin6.sin6_family = AF_INET6;
+ sin6.sin6_len = sizeof(sin6);
+ sin6.sin6_port = sh->src_port;
+ sin6.sin6_scope_id = cookie->scope_id;
+ memcpy(&sin6.sin6_addr.s6_addr, cookie->address,
+ sizeof(sin6.sin6_addr.s6_addr));
+ to = (struct sockaddr *)&sin6;
+ } else if (cookie->addr_type == SCTP_IPV4_ADDRESS) {
+ memset(&sin, 0, sizeof(sin));
+ sin.sin_family = AF_INET;
+ sin.sin_len = sizeof(sin);
+ sin.sin_port = sh->src_port;
+ sin.sin_addr.s_addr = cookie->address[0];
+ to = (struct sockaddr *)&sin;
+ } else {
+ /* This should not happen */
+ return (NULL);
+ }
+ if ((*stcb == NULL) && to) {
+ /* Yep, lets check */
+ *stcb = sctp_findassociation_ep_addr(inp_p, to, netp, localep_sa, NULL);
+ if (*stcb == NULL) {
+ /*
+ * We should have only got back the same inp. If we
+ * got back a different ep we have a problem. The
+ * original findep got back l_inp and now
+ */
+ if (l_inp != *inp_p) {
+ SCTP_PRINTF("Bad problem find_ep got a diff inp then special_locate?\n");
+ }
+ } else {
+ if (*locked_tcb == NULL) {
+ /*
+ * In this case we found the assoc only
+ * after we locked the create lock. This
+ * means we are in a colliding case and we
+ * must make sure that we unlock the tcb if
+ * its one of the cases where we throw away
+ * the incoming packets.
+ */
+ *locked_tcb = *stcb;
+
+ /*
+ * We must also increment the inp ref count
+ * since the ref_count flags was set when we
+ * did not find the TCB, now we found it
+ * which reduces the refcount.. we must
+ * raise it back out to balance it all :-)
+ */
+ SCTP_INP_INCR_REF((*stcb)->sctp_ep);
+ if ((*stcb)->sctp_ep != l_inp) {
+ SCTP_PRINTF("Huh? ep:%p diff then l_inp:%p?\n",
+ (*stcb)->sctp_ep, l_inp);
+ }
+ }
+ }
+ }
+ if (to == NULL) {
+ return (NULL);
+ }
+ cookie_len -= SCTP_SIGNATURE_SIZE;
+ if (*stcb == NULL) {
+ /* this is the "normal" case... get a new TCB */
+ *stcb = sctp_process_cookie_new(m, iphlen, offset, sh, cookie,
+ cookie_len, *inp_p, netp, to, &notification,
+ auth_skipped, auth_offset, auth_len, vrf_id, port);
+ } else {
+ /* this is abnormal... cookie-echo on existing TCB */
+ had_a_existing_tcb = 1;
+ *stcb = sctp_process_cookie_existing(m, iphlen, offset, sh,
+ cookie, cookie_len, *inp_p, *stcb, netp, to,
+ &notification, &sac_restart_id, vrf_id, auth_skipped, auth_offset, auth_len, port);
+ }
+
+ if (*stcb == NULL) {
+ /* still no TCB... must be bad cookie-echo */
+ return (NULL);
+ }
+ /*
+ * Ok, we built an association so confirm the address we sent the
+ * INIT-ACK to.
+ */
+ netl = sctp_findnet(*stcb, to);
+ /*
+ * This code should in theory NOT run but
+ */
+ if (netl == NULL) {
+ /* TSNH! Huh, why do I need to add this address here? */
+ int ret;
+
+ ret = sctp_add_remote_addr(*stcb, to, SCTP_DONOT_SETSCOPE,
+ SCTP_IN_COOKIE_PROC);
+ netl = sctp_findnet(*stcb, to);
+ }
+ if (netl) {
+ if (netl->dest_state & SCTP_ADDR_UNCONFIRMED) {
+ netl->dest_state &= ~SCTP_ADDR_UNCONFIRMED;
+ (void)sctp_set_primary_addr((*stcb), (struct sockaddr *)NULL,
+ netl);
+ sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_CONFIRMED,
+ (*stcb), 0, (void *)netl, SCTP_SO_NOT_LOCKED);
+ }
+ }
+ if (*stcb) {
+ sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, *inp_p,
+ *stcb, NULL);
+ }
+ if ((*inp_p)->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) {
+ if (!had_a_existing_tcb ||
+ (((*inp_p)->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) == 0)) {
+ /*
+ * If we have a NEW cookie or the connect never
+ * reached the connected state during collision we
+ * must do the TCP accept thing.
+ */
+ struct socket *so, *oso;
+ struct sctp_inpcb *inp;
+
+ if (notification == SCTP_NOTIFY_ASSOC_RESTART) {
+ /*
+ * For a restart we will keep the same
+ * socket, no need to do anything. I THINK!!
+ */
+ sctp_ulp_notify(notification, *stcb, 0, (void *)&sac_restart_id, SCTP_SO_NOT_LOCKED);
+ return (m);
+ }
+ oso = (*inp_p)->sctp_socket;
+ atomic_add_int(&(*stcb)->asoc.refcnt, 1);
+ SCTP_TCB_UNLOCK((*stcb));
+ so = sonewconn(oso, 0
+ );
+ SCTP_TCB_LOCK((*stcb));
+ atomic_subtract_int(&(*stcb)->asoc.refcnt, 1);
+
+ if (so == NULL) {
+ struct mbuf *op_err;
+
+#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
+ struct socket *pcb_so;
+
+#endif
+ /* Too many sockets */
+ SCTPDBG(SCTP_DEBUG_INPUT1, "process_cookie_new: no room for another socket!\n");
+ op_err = sctp_generate_invmanparam(SCTP_CAUSE_OUT_OF_RESC);
+ sctp_abort_association(*inp_p, NULL, m, iphlen,
+ sh, op_err, vrf_id, port);
+#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
+ pcb_so = SCTP_INP_SO(*inp_p);
+ atomic_add_int(&(*stcb)->asoc.refcnt, 1);
+ SCTP_TCB_UNLOCK((*stcb));
+ SCTP_SOCKET_LOCK(pcb_so, 1);
+ SCTP_TCB_LOCK((*stcb));
+ atomic_subtract_int(&(*stcb)->asoc.refcnt, 1);
+#endif
+ (void)sctp_free_assoc(*inp_p, *stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_INPUT + SCTP_LOC_20);
+#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
+ SCTP_SOCKET_UNLOCK(pcb_so, 1);
+#endif
+ return (NULL);
+ }
+ inp = (struct sctp_inpcb *)so->so_pcb;
+ SCTP_INP_INCR_REF(inp);
+ /*
+ * We add the unbound flag here so that if we get an
+ * soabort() before we get the move_pcb done, we
+ * will properly cleanup.
+ */
+ inp->sctp_flags = (SCTP_PCB_FLAGS_TCPTYPE |
+ SCTP_PCB_FLAGS_CONNECTED |
+ SCTP_PCB_FLAGS_IN_TCPPOOL |
+ SCTP_PCB_FLAGS_UNBOUND |
+ (SCTP_PCB_COPY_FLAGS & (*inp_p)->sctp_flags) |
+ SCTP_PCB_FLAGS_DONT_WAKE);
+ inp->sctp_features = (*inp_p)->sctp_features;
+ inp->sctp_mobility_features = (*inp_p)->sctp_mobility_features;
+ inp->sctp_socket = so;
+ inp->sctp_frag_point = (*inp_p)->sctp_frag_point;
+ inp->sctp_cmt_on_off = (*inp_p)->sctp_cmt_on_off;
+ inp->partial_delivery_point = (*inp_p)->partial_delivery_point;
+ inp->sctp_context = (*inp_p)->sctp_context;
+ inp->inp_starting_point_for_iterator = NULL;
+ /*
+ * copy in the authentication parameters from the
+ * original endpoint
+ */
+ if (inp->sctp_ep.local_hmacs)
+ sctp_free_hmaclist(inp->sctp_ep.local_hmacs);
+ inp->sctp_ep.local_hmacs =
+ sctp_copy_hmaclist((*inp_p)->sctp_ep.local_hmacs);
+ if (inp->sctp_ep.local_auth_chunks)
+ sctp_free_chunklist(inp->sctp_ep.local_auth_chunks);
+ inp->sctp_ep.local_auth_chunks =
+ sctp_copy_chunklist((*inp_p)->sctp_ep.local_auth_chunks);
+
+ /*
+ * Now we must move it from one hash table to
+ * another and get the tcb in the right place.
+ */
+
+ /*
+ * This is where the one-2-one socket is put into
+ * the accept state waiting for the accept!
+ */
+ if (*stcb) {
+ (*stcb)->asoc.state |= SCTP_STATE_IN_ACCEPT_QUEUE;
+ }
+ sctp_move_pcb_and_assoc(*inp_p, inp, *stcb);
+
+ atomic_add_int(&(*stcb)->asoc.refcnt, 1);
+ SCTP_TCB_UNLOCK((*stcb));
+
+ sctp_pull_off_control_to_new_inp((*inp_p), inp, *stcb,
+ 0);
+ SCTP_TCB_LOCK((*stcb));
+ atomic_subtract_int(&(*stcb)->asoc.refcnt, 1);
+
+
+ /*
+ * now we must check to see if we were aborted while
+ * the move was going on and the lock/unlock
+ * happened.
+ */
+ if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
+ /*
+ * yep it was, we leave the assoc attached
+ * to the socket since the sctp_inpcb_free()
+ * call will send an abort for us.
+ */
+ SCTP_INP_DECR_REF(inp);
+ return (NULL);
+ }
+ SCTP_INP_DECR_REF(inp);
+ /* Switch over to the new guy */
+ *inp_p = inp;
+ sctp_ulp_notify(notification, *stcb, 0, NULL, SCTP_SO_NOT_LOCKED);
+
+ /*
+ * Pull it from the incomplete queue and wake the
+ * guy
+ */
+#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
+ atomic_add_int(&(*stcb)->asoc.refcnt, 1);
+ SCTP_TCB_UNLOCK((*stcb));
+ SCTP_SOCKET_LOCK(so, 1);
+#endif
+ soisconnected(so);
+#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
+ SCTP_TCB_LOCK((*stcb));
+ atomic_subtract_int(&(*stcb)->asoc.refcnt, 1);
+ SCTP_SOCKET_UNLOCK(so, 1);
+#endif
+ return (m);
+ }
+ }
+ if ((notification) && ((*inp_p)->sctp_flags & SCTP_PCB_FLAGS_UDPTYPE)) {
+ sctp_ulp_notify(notification, *stcb, 0, NULL, SCTP_SO_NOT_LOCKED);
+ }
+ return (m);
+}
+
+static void
+sctp_handle_cookie_ack(struct sctp_cookie_ack_chunk *cp,
+ struct sctp_tcb *stcb, struct sctp_nets *net)
+{
+ /* cp must not be used, others call this without a c-ack :-) */
+ struct sctp_association *asoc;
+
+ SCTPDBG(SCTP_DEBUG_INPUT2,
+ "sctp_handle_cookie_ack: handling COOKIE-ACK\n");
+ if (stcb == NULL)
+ return;
+
+ asoc = &stcb->asoc;
+
+ sctp_stop_all_cookie_timers(stcb);
+ /* process according to association state */
+ if (SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_ECHOED) {
+ /* state change only needed when I am in right state */
+ SCTPDBG(SCTP_DEBUG_INPUT2, "moving to OPEN state\n");
+ SCTP_SET_STATE(asoc, SCTP_STATE_OPEN);
+ if (asoc->state & SCTP_STATE_SHUTDOWN_PENDING) {
+ sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
+ stcb->sctp_ep, stcb, asoc->primary_destination);
+
+ }
+ /* update RTO */
+ SCTP_STAT_INCR_COUNTER32(sctps_activeestab);
+ SCTP_STAT_INCR_GAUGE32(sctps_currestab);
+ if (asoc->overall_error_count == 0) {
+ net->RTO = sctp_calculate_rto(stcb, asoc, net,
+ &asoc->time_entered, sctp_align_safe_nocopy);
+ }
+ (void)SCTP_GETTIME_TIMEVAL(&asoc->time_entered);
+ sctp_ulp_notify(SCTP_NOTIFY_ASSOC_UP, stcb, 0, NULL, SCTP_SO_NOT_LOCKED);
+ if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
+ (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
+#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
+ struct socket *so;
+
+#endif
+ stcb->sctp_ep->sctp_flags |= SCTP_PCB_FLAGS_CONNECTED;
+#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
+ so = SCTP_INP_SO(stcb->sctp_ep);
+ atomic_add_int(&stcb->asoc.refcnt, 1);
+ SCTP_TCB_UNLOCK(stcb);
+ SCTP_SOCKET_LOCK(so, 1);
+ SCTP_TCB_LOCK(stcb);
+ atomic_subtract_int(&stcb->asoc.refcnt, 1);
+ if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
+ SCTP_SOCKET_UNLOCK(so, 1);
+ return;
+ }
+#endif
+ soisconnected(stcb->sctp_socket);
+#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
+ SCTP_SOCKET_UNLOCK(so, 1);
+#endif
+ }
+ sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep,
+ stcb, net);
+ /*
+ * since we did not send a HB make sure we don't double
+ * things
+ */
+ net->hb_responded = 1;
+
+ if (stcb->asoc.sctp_autoclose_ticks &&
+ sctp_is_feature_on(stcb->sctp_ep, SCTP_PCB_FLAGS_AUTOCLOSE)) {
+ sctp_timer_start(SCTP_TIMER_TYPE_AUTOCLOSE,
+ stcb->sctp_ep, stcb, NULL);
+ }
+ /*
+ * send ASCONF if parameters are pending and ASCONFs are
+ * allowed (eg. addresses changed when init/cookie echo were
+ * in flight)
+ */
+ if ((sctp_is_feature_on(stcb->sctp_ep, SCTP_PCB_FLAGS_DO_ASCONF)) &&
+ (stcb->asoc.peer_supports_asconf) &&
+ (!TAILQ_EMPTY(&stcb->asoc.asconf_queue))) {
+#ifdef SCTP_TIMER_BASED_ASCONF
+ sctp_timer_start(SCTP_TIMER_TYPE_ASCONF,
+ stcb->sctp_ep, stcb,
+ stcb->asoc.primary_destination);
+#else
+ sctp_send_asconf(stcb, stcb->asoc.primary_destination,
+ SCTP_ADDR_NOT_LOCKED);
+#endif
+ }
+ }
+ /* Toss the cookie if I can */
+ sctp_toss_old_cookies(stcb, asoc);
+ if (!TAILQ_EMPTY(&asoc->sent_queue)) {
+ /* Restart the timer if we have pending data */
+ struct sctp_tmit_chunk *chk;
+
+ chk = TAILQ_FIRST(&asoc->sent_queue);
+ if (chk) {
+ sctp_timer_start(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
+ stcb, chk->whoTo);
+ }
+ }
+}
+
+static void
+sctp_handle_ecn_echo(struct sctp_ecne_chunk *cp,
+ struct sctp_tcb *stcb)
+{
+ struct sctp_nets *net;
+ struct sctp_tmit_chunk *lchk;
+ uint32_t tsn;
+
+ if (ntohs(cp->ch.chunk_length) != sizeof(struct sctp_ecne_chunk)) {
+ return;
+ }
+ SCTP_STAT_INCR(sctps_recvecne);
+ tsn = ntohl(cp->tsn);
+ /* ECN Nonce stuff: need a resync and disable the nonce sum check */
+ /* Also we make sure we disable the nonce_wait */
+ lchk = TAILQ_FIRST(&stcb->asoc.send_queue);
+ if (lchk == NULL) {
+ stcb->asoc.nonce_resync_tsn = stcb->asoc.sending_seq;
+ } else {
+ stcb->asoc.nonce_resync_tsn = lchk->rec.data.TSN_seq;
+ }
+ stcb->asoc.nonce_wait_for_ecne = 0;
+ stcb->asoc.nonce_sum_check = 0;
+
+ /* Find where it was sent, if possible */
+ net = NULL;
+ lchk = TAILQ_FIRST(&stcb->asoc.sent_queue);
+ while (lchk) {
+ if (lchk->rec.data.TSN_seq == tsn) {
+ net = lchk->whoTo;
+ break;
+ }
+ if (compare_with_wrap(lchk->rec.data.TSN_seq, tsn, MAX_SEQ))
+ break;
+ lchk = TAILQ_NEXT(lchk, sctp_next);
+ }
+ if (net == NULL)
+ /* default is we use the primary */
+ net = stcb->asoc.primary_destination;
+
+ if (compare_with_wrap(tsn, stcb->asoc.last_cwr_tsn, MAX_TSN)) {
+ /*
+ * JRS - Use the congestion control given in the pluggable
+ * CC module
+ */
+ stcb->asoc.cc_functions.sctp_cwnd_update_after_ecn_echo(stcb, net);
+ /*
+ * we reduce once every RTT. So we will only lower cwnd at
+ * the next sending seq i.e. the resync_tsn.
+ */
+ stcb->asoc.last_cwr_tsn = stcb->asoc.nonce_resync_tsn;
+ }
+ /*
+ * We always send a CWR this way if our previous one was lost our
+ * peer will get an update, or if it is not time again to reduce we
+ * still get the cwr to the peer.
+ */
+ sctp_send_cwr(stcb, net, tsn);
+}
+
+static void
+sctp_handle_ecn_cwr(struct sctp_cwr_chunk *cp, struct sctp_tcb *stcb)
+{
+ /*
+ * Here we get a CWR from the peer. We must look in the outqueue and
+ * make sure that we have a covered ECNE in teh control chunk part.
+ * If so remove it.
+ */
+ struct sctp_tmit_chunk *chk;
+ struct sctp_ecne_chunk *ecne;
+
+ TAILQ_FOREACH(chk, &stcb->asoc.control_send_queue, sctp_next) {
+ if (chk->rec.chunk_id.id != SCTP_ECN_ECHO) {
+ continue;
+ }
+ /*
+ * Look for and remove if it is the right TSN. Since there
+ * is only ONE ECNE on the control queue at any one time we
+ * don't need to worry about more than one!
+ */
+ ecne = mtod(chk->data, struct sctp_ecne_chunk *);
+ if (compare_with_wrap(ntohl(cp->tsn), ntohl(ecne->tsn),
+ MAX_TSN) || (cp->tsn == ecne->tsn)) {
+ /* this covers this ECNE, we can remove it */
+ stcb->asoc.ecn_echo_cnt_onq--;
+ TAILQ_REMOVE(&stcb->asoc.control_send_queue, chk,
+ sctp_next);
+ if (chk->data) {
+ sctp_m_freem(chk->data);
+ chk->data = NULL;
+ }
+ stcb->asoc.ctrl_queue_cnt--;
+ sctp_free_a_chunk(stcb, chk);
+ break;
+ }
+ }
+}
+
+static void
+sctp_handle_shutdown_complete(struct sctp_shutdown_complete_chunk *cp,
+ struct sctp_tcb *stcb, struct sctp_nets *net)
+{
+ struct sctp_association *asoc;
+
+#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
+ struct socket *so;
+
+#endif
+
+ SCTPDBG(SCTP_DEBUG_INPUT2,
+ "sctp_handle_shutdown_complete: handling SHUTDOWN-COMPLETE\n");
+ if (stcb == NULL)
+ return;
+
+ asoc = &stcb->asoc;
+ /* process according to association state */
+ if (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_ACK_SENT) {
+ /* unexpected SHUTDOWN-COMPLETE... so ignore... */
+ SCTPDBG(SCTP_DEBUG_INPUT2,
+ "sctp_handle_shutdown_complete: not in SCTP_STATE_SHUTDOWN_ACK_SENT --- ignore\n");
+ SCTP_TCB_UNLOCK(stcb);
+ return;
+ }
+ /* notify upper layer protocol */
+ if (stcb->sctp_socket) {
+ sctp_ulp_notify(SCTP_NOTIFY_ASSOC_DOWN, stcb, 0, NULL, SCTP_SO_NOT_LOCKED);
+ /* are the queues empty? they should be */
+ if (!TAILQ_EMPTY(&asoc->send_queue) ||
+ !TAILQ_EMPTY(&asoc->sent_queue) ||
+ !TAILQ_EMPTY(&asoc->out_wheel)) {
+ sctp_report_all_outbound(stcb, 0, SCTP_SO_NOT_LOCKED);
+ }
+ }
+ /* stop the timer */
+ sctp_timer_stop(SCTP_TIMER_TYPE_SHUTDOWNACK, stcb->sctp_ep, stcb, net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_22);
+ SCTP_STAT_INCR_COUNTER32(sctps_shutdown);
+ /* free the TCB */
+ SCTPDBG(SCTP_DEBUG_INPUT2,
+ "sctp_handle_shutdown_complete: calls free-asoc\n");
+#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
+ so = SCTP_INP_SO(stcb->sctp_ep);
+ atomic_add_int(&stcb->asoc.refcnt, 1);
+ SCTP_TCB_UNLOCK(stcb);
+ SCTP_SOCKET_LOCK(so, 1);
+ SCTP_TCB_LOCK(stcb);
+ atomic_subtract_int(&stcb->asoc.refcnt, 1);
+#endif
+ (void)sctp_free_assoc(stcb->sctp_ep, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_INPUT + SCTP_LOC_23);
+#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
+ SCTP_SOCKET_UNLOCK(so, 1);
+#endif
+ return;
+}
+
+static int
+process_chunk_drop(struct sctp_tcb *stcb, struct sctp_chunk_desc *desc,
+ struct sctp_nets *net, uint8_t flg)
+{
+ switch (desc->chunk_type) {
+ case SCTP_DATA:
+ /* find the tsn to resend (possibly */
+ {
+ uint32_t tsn;
+ struct sctp_tmit_chunk *tp1;
+
+ tsn = ntohl(desc->tsn_ifany);
+ tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue);
+ while (tp1) {
+ if (tp1->rec.data.TSN_seq == tsn) {
+ /* found it */
+ break;
+ }
+ if (compare_with_wrap(tp1->rec.data.TSN_seq, tsn,
+ MAX_TSN)) {
+ /* not found */
+ tp1 = NULL;
+ break;
+ }
+ tp1 = TAILQ_NEXT(tp1, sctp_next);
+ }
+ if (tp1 == NULL) {
+ /*
+ * Do it the other way , aka without paying
+ * attention to queue seq order.
+ */
+ SCTP_STAT_INCR(sctps_pdrpdnfnd);
+ tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue);
+ while (tp1) {
+ if (tp1->rec.data.TSN_seq == tsn) {
+ /* found it */
+ break;
+ }
+ tp1 = TAILQ_NEXT(tp1, sctp_next);
+ }
+ }
+ if (tp1 == NULL) {
+ SCTP_STAT_INCR(sctps_pdrptsnnf);
+ }
+ if ((tp1) && (tp1->sent < SCTP_DATAGRAM_ACKED)) {
+ uint8_t *ddp;
+
+ if (((flg & SCTP_BADCRC) == 0) &&
+ ((flg & SCTP_FROM_MIDDLE_BOX) == 0)) {
+ return (0);
+ }
+ if ((stcb->asoc.peers_rwnd == 0) &&
+ ((flg & SCTP_FROM_MIDDLE_BOX) == 0)) {
+ SCTP_STAT_INCR(sctps_pdrpdiwnp);
+ return (0);
+ }
+ if (stcb->asoc.peers_rwnd == 0 &&
+ (flg & SCTP_FROM_MIDDLE_BOX)) {
+ SCTP_STAT_INCR(sctps_pdrpdizrw);
+ return (0);
+ }
+ ddp = (uint8_t *) (mtod(tp1->data, caddr_t)+
+ sizeof(struct sctp_data_chunk));
+ {
+ unsigned int iii;
+
+ for (iii = 0; iii < sizeof(desc->data_bytes);
+ iii++) {
+ if (ddp[iii] != desc->data_bytes[iii]) {
+ SCTP_STAT_INCR(sctps_pdrpbadd);
+ return (-1);
+ }
+ }
+ }
+ /*
+ * We zero out the nonce so resync not
+ * needed
+ */
+ tp1->rec.data.ect_nonce = 0;
+
+ if (tp1->do_rtt) {
+ /*
+ * this guy had a RTO calculation
+ * pending on it, cancel it
+ */
+ tp1->do_rtt = 0;
+ }
+ SCTP_STAT_INCR(sctps_pdrpmark);
+ if (tp1->sent != SCTP_DATAGRAM_RESEND)
+ sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt);
+ /*
+ * mark it as if we were doing a FR, since
+ * we will be getting gap ack reports behind
+ * the info from the router.
+ */
+ tp1->rec.data.doing_fast_retransmit = 1;
+ /*
+ * mark the tsn with what sequences can
+ * cause a new FR.
+ */
+ if (TAILQ_EMPTY(&stcb->asoc.send_queue)) {
+ tp1->rec.data.fast_retran_tsn = stcb->asoc.sending_seq;
+ } else {
+ tp1->rec.data.fast_retran_tsn = (TAILQ_FIRST(&stcb->asoc.send_queue))->rec.data.TSN_seq;
+ }
+
+ /* restart the timer */
+ sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
+ stcb, tp1->whoTo, SCTP_FROM_SCTP_INPUT + SCTP_LOC_24);
+ sctp_timer_start(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
+ stcb, tp1->whoTo);
+
+ /* fix counts and things */
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
+ sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_PDRP,
+ tp1->whoTo->flight_size,
+ tp1->book_size,
+ (uintptr_t) stcb,
+ tp1->rec.data.TSN_seq);
+ }
+ if (tp1->sent < SCTP_DATAGRAM_RESEND) {
+ sctp_flight_size_decrease(tp1);
+ sctp_total_flight_decrease(stcb, tp1);
+ }
+ tp1->sent = SCTP_DATAGRAM_RESEND;
+ } {
+ /* audit code */
+ unsigned int audit;
+
+ audit = 0;
+ TAILQ_FOREACH(tp1, &stcb->asoc.sent_queue, sctp_next) {
+ if (tp1->sent == SCTP_DATAGRAM_RESEND)
+ audit++;
+ }
+ TAILQ_FOREACH(tp1, &stcb->asoc.control_send_queue,
+ sctp_next) {
+ if (tp1->sent == SCTP_DATAGRAM_RESEND)
+ audit++;
+ }
+ if (audit != stcb->asoc.sent_queue_retran_cnt) {
+ SCTP_PRINTF("**Local Audit finds cnt:%d asoc cnt:%d\n",
+ audit, stcb->asoc.sent_queue_retran_cnt);
+#ifndef SCTP_AUDITING_ENABLED
+ stcb->asoc.sent_queue_retran_cnt = audit;
+#endif
+ }
+ }
+ }
+ break;
+ case SCTP_ASCONF:
+ {
+ struct sctp_tmit_chunk *asconf;
+
+ TAILQ_FOREACH(asconf, &stcb->asoc.control_send_queue,
+ sctp_next) {
+ if (asconf->rec.chunk_id.id == SCTP_ASCONF) {
+ break;
+ }
+ }
+ if (asconf) {
+ if (asconf->sent != SCTP_DATAGRAM_RESEND)
+ sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt);
+ asconf->sent = SCTP_DATAGRAM_RESEND;
+ asconf->snd_count--;
+ }
+ }
+ break;
+ case SCTP_INITIATION:
+ /* resend the INIT */
+ stcb->asoc.dropped_special_cnt++;
+ if (stcb->asoc.dropped_special_cnt < SCTP_RETRY_DROPPED_THRESH) {
+ /*
+ * If we can get it in, in a few attempts we do
+ * this, otherwise we let the timer fire.
+ */
+ sctp_timer_stop(SCTP_TIMER_TYPE_INIT, stcb->sctp_ep,
+ stcb, net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_25);
+ sctp_send_initiate(stcb->sctp_ep, stcb, SCTP_SO_NOT_LOCKED);
+ }
+ break;
+ case SCTP_SELECTIVE_ACK:
+ case SCTP_NR_SELECTIVE_ACK:
+ /* resend the sack */
+ sctp_send_sack(stcb);
+ break;
+ case SCTP_HEARTBEAT_REQUEST:
+ /* resend a demand HB */
+ if ((stcb->asoc.overall_error_count + 3) < stcb->asoc.max_send_times) {
+ /*
+ * Only retransmit if we KNOW we wont destroy the
+ * tcb
+ */
+ (void)sctp_send_hb(stcb, 1, net);
+ }
+ break;
+ case SCTP_SHUTDOWN:
+ sctp_send_shutdown(stcb, net);
+ break;
+ case SCTP_SHUTDOWN_ACK:
+ sctp_send_shutdown_ack(stcb, net);
+ break;
+ case SCTP_COOKIE_ECHO:
+ {
+ struct sctp_tmit_chunk *cookie;
+
+ cookie = NULL;
+ TAILQ_FOREACH(cookie, &stcb->asoc.control_send_queue,
+ sctp_next) {
+ if (cookie->rec.chunk_id.id == SCTP_COOKIE_ECHO) {
+ break;
+ }
+ }
+ if (cookie) {
+ if (cookie->sent != SCTP_DATAGRAM_RESEND)
+ sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt);
+ cookie->sent = SCTP_DATAGRAM_RESEND;
+ sctp_stop_all_cookie_timers(stcb);
+ }
+ }
+ break;
+ case SCTP_COOKIE_ACK:
+ sctp_send_cookie_ack(stcb);
+ break;
+ case SCTP_ASCONF_ACK:
+ /* resend last asconf ack */
+ sctp_send_asconf_ack(stcb);
+ break;
+ case SCTP_FORWARD_CUM_TSN:
+ send_forward_tsn(stcb, &stcb->asoc);
+ break;
+ /* can't do anything with these */
+ case SCTP_PACKET_DROPPED:
+ case SCTP_INITIATION_ACK: /* this should not happen */
+ case SCTP_HEARTBEAT_ACK:
+ case SCTP_ABORT_ASSOCIATION:
+ case SCTP_OPERATION_ERROR:
+ case SCTP_SHUTDOWN_COMPLETE:
+ case SCTP_ECN_ECHO:
+ case SCTP_ECN_CWR:
+ default:
+ break;
+ }
+ return (0);
+}
+
+void
+sctp_reset_in_stream(struct sctp_tcb *stcb, int number_entries, uint16_t * list)
+{
+ int i;
+ uint16_t temp;
+
+ /*
+ * We set things to 0xffff since this is the last delivered sequence
+ * and we will be sending in 0 after the reset.
+ */
+
+ if (number_entries) {
+ for (i = 0; i < number_entries; i++) {
+ temp = ntohs(list[i]);
+ if (temp >= stcb->asoc.streamincnt) {
+ continue;
+ }
+ stcb->asoc.strmin[temp].last_sequence_delivered = 0xffff;
+ }
+ } else {
+ list = NULL;
+ for (i = 0; i < stcb->asoc.streamincnt; i++) {
+ stcb->asoc.strmin[i].last_sequence_delivered = 0xffff;
+ }
+ }
+ sctp_ulp_notify(SCTP_NOTIFY_STR_RESET_RECV, stcb, number_entries, (void *)list, SCTP_SO_NOT_LOCKED);
+}
+
+static void
+sctp_reset_out_streams(struct sctp_tcb *stcb, int number_entries, uint16_t * list)
+{
+ int i;
+
+ if (number_entries == 0) {
+ for (i = 0; i < stcb->asoc.streamoutcnt; i++) {
+ stcb->asoc.strmout[i].next_sequence_sent = 0;
+ }
+ } else if (number_entries) {
+ for (i = 0; i < number_entries; i++) {
+ uint16_t temp;
+
+ temp = ntohs(list[i]);
+ if (temp >= stcb->asoc.streamoutcnt) {
+ /* no such stream */
+ continue;
+ }
+ stcb->asoc.strmout[temp].next_sequence_sent = 0;
+ }
+ }
+ sctp_ulp_notify(SCTP_NOTIFY_STR_RESET_SEND, stcb, number_entries, (void *)list, SCTP_SO_NOT_LOCKED);
+}
+
+
+struct sctp_stream_reset_out_request *
+sctp_find_stream_reset(struct sctp_tcb *stcb, uint32_t seq, struct sctp_tmit_chunk **bchk)
+{
+ struct sctp_association *asoc;
+ struct sctp_stream_reset_out_req *req;
+ struct sctp_stream_reset_out_request *r;
+ struct sctp_tmit_chunk *chk;
+ int len, clen;
+
+ asoc = &stcb->asoc;
+ if (TAILQ_EMPTY(&stcb->asoc.control_send_queue)) {
+ asoc->stream_reset_outstanding = 0;
+ return (NULL);
+ }
+ if (stcb->asoc.str_reset == NULL) {
+ asoc->stream_reset_outstanding = 0;
+ return (NULL);
+ }
+ chk = stcb->asoc.str_reset;
+ if (chk->data == NULL) {
+ return (NULL);
+ }
+ if (bchk) {
+ /* he wants a copy of the chk pointer */
+ *bchk = chk;
+ }
+ clen = chk->send_size;
+ req = mtod(chk->data, struct sctp_stream_reset_out_req *);
+ r = &req->sr_req;
+ if (ntohl(r->request_seq) == seq) {
+ /* found it */
+ return (r);
+ }
+ len = SCTP_SIZE32(ntohs(r->ph.param_length));
+ if (clen > (len + (int)sizeof(struct sctp_chunkhdr))) {
+ /* move to the next one, there can only be a max of two */
+ r = (struct sctp_stream_reset_out_request *)((caddr_t)r + len);
+ if (ntohl(r->request_seq) == seq) {
+ return (r);
+ }
+ }
+ /* that seq is not here */
+ return (NULL);
+}
+
+static void
+sctp_clean_up_stream_reset(struct sctp_tcb *stcb)
+{
+ struct sctp_association *asoc;
+ struct sctp_tmit_chunk *chk = stcb->asoc.str_reset;
+
+ if (stcb->asoc.str_reset == NULL) {
+ return;
+ }
+ asoc = &stcb->asoc;
+
+ sctp_timer_stop(SCTP_TIMER_TYPE_STRRESET, stcb->sctp_ep, stcb, chk->whoTo, SCTP_FROM_SCTP_INPUT + SCTP_LOC_26);
+ TAILQ_REMOVE(&asoc->control_send_queue,
+ chk,
+ sctp_next);
+ if (chk->data) {
+ sctp_m_freem(chk->data);
+ chk->data = NULL;
+ }
+ asoc->ctrl_queue_cnt--;
+ sctp_free_a_chunk(stcb, chk);
+ /* sa_ignore NO_NULL_CHK */
+ stcb->asoc.str_reset = NULL;
+}
+
+
+static int
+sctp_handle_stream_reset_response(struct sctp_tcb *stcb,
+ uint32_t seq, uint32_t action,
+ struct sctp_stream_reset_response *respin)
+{
+ uint16_t type;
+ int lparm_len;
+ struct sctp_association *asoc = &stcb->asoc;
+ struct sctp_tmit_chunk *chk;
+ struct sctp_stream_reset_out_request *srparam;
+ int number_entries;
+
+ if (asoc->stream_reset_outstanding == 0) {
+ /* duplicate */
+ return (0);
+ }
+ if (seq == stcb->asoc.str_reset_seq_out) {
+ srparam = sctp_find_stream_reset(stcb, seq, &chk);
+ if (srparam) {
+ stcb->asoc.str_reset_seq_out++;
+ type = ntohs(srparam->ph.param_type);
+ lparm_len = ntohs(srparam->ph.param_length);
+ if (type == SCTP_STR_RESET_OUT_REQUEST) {
+ number_entries = (lparm_len - sizeof(struct sctp_stream_reset_out_request)) / sizeof(uint16_t);
+ asoc->stream_reset_out_is_outstanding = 0;
+ if (asoc->stream_reset_outstanding)
+ asoc->stream_reset_outstanding--;
+ if (action == SCTP_STREAM_RESET_PERFORMED) {
+ /* do it */
+ sctp_reset_out_streams(stcb, number_entries, srparam->list_of_streams);
+ } else {
+ sctp_ulp_notify(SCTP_NOTIFY_STR_RESET_FAILED_OUT, stcb, number_entries, srparam->list_of_streams, SCTP_SO_NOT_LOCKED);
+ }
+ } else if (type == SCTP_STR_RESET_IN_REQUEST) {
+ /* Answered my request */
+ number_entries = (lparm_len - sizeof(struct sctp_stream_reset_in_request)) / sizeof(uint16_t);
+ if (asoc->stream_reset_outstanding)
+ asoc->stream_reset_outstanding--;
+ if (action != SCTP_STREAM_RESET_PERFORMED) {
+ sctp_ulp_notify(SCTP_NOTIFY_STR_RESET_FAILED_IN, stcb, number_entries, srparam->list_of_streams, SCTP_SO_NOT_LOCKED);
+ }
+ } else if (type == SCTP_STR_RESET_ADD_STREAMS) {
+ /* Ok we now may have more streams */
+ if (asoc->stream_reset_outstanding)
+ asoc->stream_reset_outstanding--;
+ if (action == SCTP_STREAM_RESET_PERFORMED) {
+ /* Put the new streams into effect */
+ stcb->asoc.streamoutcnt = stcb->asoc.strm_realoutsize;
+ sctp_ulp_notify(SCTP_NOTIFY_STR_RESET_ADD_OK, stcb,
+ (uint32_t) stcb->asoc.streamoutcnt, NULL, SCTP_SO_NOT_LOCKED);
+ } else {
+ sctp_ulp_notify(SCTP_NOTIFY_STR_RESET_ADD_FAIL, stcb,
+ (uint32_t) stcb->asoc.streamoutcnt, NULL, SCTP_SO_NOT_LOCKED);
+ }
+ } else if (type == SCTP_STR_RESET_TSN_REQUEST) {
+ /**
+ * a) Adopt the new in tsn.
+ * b) reset the map
+ * c) Adopt the new out-tsn
+ */
+ struct sctp_stream_reset_response_tsn *resp;
+ struct sctp_forward_tsn_chunk fwdtsn;
+ int abort_flag = 0;
+
+ if (respin == NULL) {
+ /* huh ? */
+ return (0);
+ }
+ if (action == SCTP_STREAM_RESET_PERFORMED) {
+ resp = (struct sctp_stream_reset_response_tsn *)respin;
+ asoc->stream_reset_outstanding--;
+ fwdtsn.ch.chunk_length = htons(sizeof(struct sctp_forward_tsn_chunk));
+ fwdtsn.ch.chunk_type = SCTP_FORWARD_CUM_TSN;
+ fwdtsn.new_cumulative_tsn = htonl(ntohl(resp->senders_next_tsn) - 1);
+ sctp_handle_forward_tsn(stcb, &fwdtsn, &abort_flag, NULL, 0);
+ if (abort_flag) {
+ return (1);
+ }
+ stcb->asoc.highest_tsn_inside_map = (ntohl(resp->senders_next_tsn) - 1);
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
+ sctp_log_map(0, 7, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
+ }
+ stcb->asoc.tsn_last_delivered = stcb->asoc.cumulative_tsn = stcb->asoc.highest_tsn_inside_map;
+ stcb->asoc.mapping_array_base_tsn = ntohl(resp->senders_next_tsn);
+ memset(stcb->asoc.mapping_array, 0, stcb->asoc.mapping_array_size);
+
+ stcb->asoc.highest_tsn_inside_nr_map = stcb->asoc.highest_tsn_inside_map;
+ memset(stcb->asoc.nr_mapping_array, 0, stcb->asoc.mapping_array_size);
+
+ stcb->asoc.sending_seq = ntohl(resp->receivers_next_tsn);
+ stcb->asoc.last_acked_seq = stcb->asoc.cumulative_tsn;
+
+ sctp_reset_out_streams(stcb, 0, (uint16_t *) NULL);
+ sctp_reset_in_stream(stcb, 0, (uint16_t *) NULL);
+
+ }
+ }
+ /* get rid of the request and get the request flags */
+ if (asoc->stream_reset_outstanding == 0) {
+ sctp_clean_up_stream_reset(stcb);
+ }
+ }
+ }
+ return (0);
+}
+
+static void
+sctp_handle_str_reset_request_in(struct sctp_tcb *stcb,
+ struct sctp_tmit_chunk *chk,
+ struct sctp_stream_reset_in_request *req, int trunc)
+{
+ uint32_t seq;
+ int len, i;
+ int number_entries;
+ uint16_t temp;
+
+ /*
+ * peer wants me to send a str-reset to him for my outgoing seq's if
+ * seq_in is right.
+ */
+ struct sctp_association *asoc = &stcb->asoc;
+
+ seq = ntohl(req->request_seq);
+ if (asoc->str_reset_seq_in == seq) {
+ if (trunc) {
+ /* Can't do it, since they exceeded our buffer size */
+ asoc->last_reset_action[1] = asoc->last_reset_action[0];
+ asoc->last_reset_action[0] = SCTP_STREAM_RESET_DENIED;
+ sctp_add_stream_reset_result(chk, seq, asoc->last_reset_action[0]);
+ } else if (stcb->asoc.stream_reset_out_is_outstanding == 0) {
+ len = ntohs(req->ph.param_length);
+ number_entries = ((len - sizeof(struct sctp_stream_reset_in_request)) / sizeof(uint16_t));
+ for (i = 0; i < number_entries; i++) {
+ temp = ntohs(req->list_of_streams[i]);
+ req->list_of_streams[i] = temp;
+ }
+ /* move the reset action back one */
+ asoc->last_reset_action[1] = asoc->last_reset_action[0];
+ asoc->last_reset_action[0] = SCTP_STREAM_RESET_PERFORMED;
+ sctp_add_stream_reset_out(chk, number_entries, req->list_of_streams,
+ asoc->str_reset_seq_out,
+ seq, (asoc->sending_seq - 1));
+ asoc->stream_reset_out_is_outstanding = 1;
+ asoc->str_reset = chk;
+ sctp_timer_start(SCTP_TIMER_TYPE_STRRESET, stcb->sctp_ep, stcb, chk->whoTo);
+ stcb->asoc.stream_reset_outstanding++;
+ } else {
+ /* Can't do it, since we have sent one out */
+ asoc->last_reset_action[1] = asoc->last_reset_action[0];
+ asoc->last_reset_action[0] = SCTP_STREAM_RESET_TRY_LATER;
+ sctp_add_stream_reset_result(chk, seq, asoc->last_reset_action[0]);
+ }
+ asoc->str_reset_seq_in++;
+ } else if (asoc->str_reset_seq_in - 1 == seq) {
+ sctp_add_stream_reset_result(chk, seq, asoc->last_reset_action[0]);
+ } else if (asoc->str_reset_seq_in - 2 == seq) {
+ sctp_add_stream_reset_result(chk, seq, asoc->last_reset_action[1]);
+ } else {
+ sctp_add_stream_reset_result(chk, seq, SCTP_STREAM_RESET_BAD_SEQNO);
+ }
+}
+
+static int
+sctp_handle_str_reset_request_tsn(struct sctp_tcb *stcb,
+ struct sctp_tmit_chunk *chk,
+ struct sctp_stream_reset_tsn_request *req)
+{
+ /* reset all in and out and update the tsn */
+ /*
+ * A) reset my str-seq's on in and out. B) Select a receive next,
+ * and set cum-ack to it. Also process this selected number as a
+ * fwd-tsn as well. C) set in the response my next sending seq.
+ */
+ struct sctp_forward_tsn_chunk fwdtsn;
+ struct sctp_association *asoc = &stcb->asoc;
+ int abort_flag = 0;
+ uint32_t seq;
+
+ seq = ntohl(req->request_seq);
+ if (asoc->str_reset_seq_in == seq) {
+ fwdtsn.ch.chunk_length = htons(sizeof(struct sctp_forward_tsn_chunk));
+ fwdtsn.ch.chunk_type = SCTP_FORWARD_CUM_TSN;
+ fwdtsn.ch.chunk_flags = 0;
+ fwdtsn.new_cumulative_tsn = htonl(stcb->asoc.highest_tsn_inside_map + 1);
+ sctp_handle_forward_tsn(stcb, &fwdtsn, &abort_flag, NULL, 0);
+ if (abort_flag) {
+ return (1);
+ }
+ stcb->asoc.highest_tsn_inside_map += SCTP_STREAM_RESET_TSN_DELTA;
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
+ sctp_log_map(0, 10, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
+ }
+ stcb->asoc.tsn_last_delivered = stcb->asoc.cumulative_tsn = stcb->asoc.highest_tsn_inside_map;
+ stcb->asoc.mapping_array_base_tsn = stcb->asoc.highest_tsn_inside_map + 1;
+ memset(stcb->asoc.mapping_array, 0, stcb->asoc.mapping_array_size);
+ stcb->asoc.highest_tsn_inside_nr_map = stcb->asoc.highest_tsn_inside_map;
+ memset(stcb->asoc.nr_mapping_array, 0, stcb->asoc.mapping_array_size);
+ atomic_add_int(&stcb->asoc.sending_seq, 1);
+ /* save off historical data for retrans */
+ stcb->asoc.last_sending_seq[1] = stcb->asoc.last_sending_seq[0];
+ stcb->asoc.last_sending_seq[0] = stcb->asoc.sending_seq;
+ stcb->asoc.last_base_tsnsent[1] = stcb->asoc.last_base_tsnsent[0];
+ stcb->asoc.last_base_tsnsent[0] = stcb->asoc.mapping_array_base_tsn;
+
+ sctp_add_stream_reset_result_tsn(chk,
+ ntohl(req->request_seq),
+ SCTP_STREAM_RESET_PERFORMED,
+ stcb->asoc.sending_seq,
+ stcb->asoc.mapping_array_base_tsn);
+ sctp_reset_out_streams(stcb, 0, (uint16_t *) NULL);
+ sctp_reset_in_stream(stcb, 0, (uint16_t *) NULL);
+ stcb->asoc.last_reset_action[1] = stcb->asoc.last_reset_action[0];
+ stcb->asoc.last_reset_action[0] = SCTP_STREAM_RESET_PERFORMED;
+
+ asoc->str_reset_seq_in++;
+ } else if (asoc->str_reset_seq_in - 1 == seq) {
+ sctp_add_stream_reset_result_tsn(chk, seq, asoc->last_reset_action[0],
+ stcb->asoc.last_sending_seq[0],
+ stcb->asoc.last_base_tsnsent[0]
+ );
+ } else if (asoc->str_reset_seq_in - 2 == seq) {
+ sctp_add_stream_reset_result_tsn(chk, seq, asoc->last_reset_action[1],
+ stcb->asoc.last_sending_seq[1],
+ stcb->asoc.last_base_tsnsent[1]
+ );
+ } else {
+ sctp_add_stream_reset_result(chk, seq, SCTP_STREAM_RESET_BAD_SEQNO);
+ }
+ return (0);
+}
+
+static void
+sctp_handle_str_reset_request_out(struct sctp_tcb *stcb,
+ struct sctp_tmit_chunk *chk,
+ struct sctp_stream_reset_out_request *req, int trunc)
+{
+ uint32_t seq, tsn;
+ int number_entries, len;
+ struct sctp_association *asoc = &stcb->asoc;
+
+ seq = ntohl(req->request_seq);
+
+ /* now if its not a duplicate we process it */
+ if (asoc->str_reset_seq_in == seq) {
+ len = ntohs(req->ph.param_length);
+ number_entries = ((len - sizeof(struct sctp_stream_reset_out_request)) / sizeof(uint16_t));
+ /*
+ * the sender is resetting, handle the list issue.. we must
+ * a) verify if we can do the reset, if so no problem b) If
+ * we can't do the reset we must copy the request. c) queue
+ * it, and setup the data in processor to trigger it off
+ * when needed and dequeue all the queued data.
+ */
+ tsn = ntohl(req->send_reset_at_tsn);
+
+ /* move the reset action back one */
+ asoc->last_reset_action[1] = asoc->last_reset_action[0];
+ if (trunc) {
+ sctp_add_stream_reset_result(chk, seq, SCTP_STREAM_RESET_DENIED);
+ asoc->last_reset_action[0] = SCTP_STREAM_RESET_DENIED;
+ } else if ((tsn == asoc->cumulative_tsn) ||
+ (compare_with_wrap(asoc->cumulative_tsn, tsn, MAX_TSN))) {
+ /* we can do it now */
+ sctp_reset_in_stream(stcb, number_entries, req->list_of_streams);
+ sctp_add_stream_reset_result(chk, seq, SCTP_STREAM_RESET_PERFORMED);
+ asoc->last_reset_action[0] = SCTP_STREAM_RESET_PERFORMED;
+ } else {
+ /*
+ * we must queue it up and thus wait for the TSN's
+ * to arrive that are at or before tsn
+ */
+ struct sctp_stream_reset_list *liste;
+ int siz;
+
+ siz = sizeof(struct sctp_stream_reset_list) + (number_entries * sizeof(uint16_t));
+ SCTP_MALLOC(liste, struct sctp_stream_reset_list *,
+ siz, SCTP_M_STRESET);
+ if (liste == NULL) {
+ /* gak out of memory */
+ sctp_add_stream_reset_result(chk, seq, SCTP_STREAM_RESET_DENIED);
+ asoc->last_reset_action[0] = SCTP_STREAM_RESET_DENIED;
+ return;
+ }
+ liste->tsn = tsn;
+ liste->number_entries = number_entries;
+ memcpy(&liste->req, req,
+ (sizeof(struct sctp_stream_reset_out_request) + (number_entries * sizeof(uint16_t))));
+ TAILQ_INSERT_TAIL(&asoc->resetHead, liste, next_resp);
+ sctp_add_stream_reset_result(chk, seq, SCTP_STREAM_RESET_PERFORMED);
+ asoc->last_reset_action[0] = SCTP_STREAM_RESET_PERFORMED;
+ }
+ asoc->str_reset_seq_in++;
+ } else if ((asoc->str_reset_seq_in - 1) == seq) {
+ /*
+ * one seq back, just echo back last action since my
+ * response was lost.
+ */
+ sctp_add_stream_reset_result(chk, seq, asoc->last_reset_action[0]);
+ } else if ((asoc->str_reset_seq_in - 2) == seq) {
+ /*
+ * two seq back, just echo back last action since my
+ * response was lost.
+ */
+ sctp_add_stream_reset_result(chk, seq, asoc->last_reset_action[1]);
+ } else {
+ sctp_add_stream_reset_result(chk, seq, SCTP_STREAM_RESET_BAD_SEQNO);
+ }
+}
+
+static void
+sctp_handle_str_reset_add_strm(struct sctp_tcb *stcb, struct sctp_tmit_chunk *chk,
+ struct sctp_stream_reset_add_strm *str_add)
+{
+ /*
+ * Peer is requesting to add more streams. If its within our
+ * max-streams we will allow it.
+ */
+ uint16_t num_stream, i;
+ uint32_t seq;
+ struct sctp_association *asoc = &stcb->asoc;
+ struct sctp_queued_to_read *ctl;
+
+ /* Get the number. */
+ seq = ntohl(str_add->request_seq);
+ num_stream = ntohs(str_add->number_of_streams);
+ /* Now what would be the new total? */
+ if (asoc->str_reset_seq_in == seq) {
+ num_stream += stcb->asoc.streamincnt;
+ if (num_stream > stcb->asoc.max_inbound_streams) {
+ /* We must reject it they ask for to many */
+ denied:
+ sctp_add_stream_reset_result(chk, seq, SCTP_STREAM_RESET_DENIED);
+ stcb->asoc.last_reset_action[1] = stcb->asoc.last_reset_action[0];
+ stcb->asoc.last_reset_action[0] = SCTP_STREAM_RESET_DENIED;
+ } else {
+ /* Ok, we can do that :-) */
+ struct sctp_stream_in *oldstrm;
+
+ /* save off the old */
+ oldstrm = stcb->asoc.strmin;
+ SCTP_MALLOC(stcb->asoc.strmin, struct sctp_stream_in *,
+ (num_stream * sizeof(struct sctp_stream_in)),
+ SCTP_M_STRMI);
+ if (stcb->asoc.strmin == NULL) {
+ stcb->asoc.strmin = oldstrm;
+ goto denied;
+ }
+ /* copy off the old data */
+ for (i = 0; i < stcb->asoc.streamincnt; i++) {
+ TAILQ_INIT(&stcb->asoc.strmin[i].inqueue);
+ stcb->asoc.strmin[i].stream_no = i;
+ stcb->asoc.strmin[i].last_sequence_delivered = oldstrm[i].last_sequence_delivered;
+ stcb->asoc.strmin[i].delivery_started = oldstrm[i].delivery_started;
+ /* now anything on those queues? */
+ while (TAILQ_EMPTY(&oldstrm[i].inqueue) == 0) {
+ ctl = TAILQ_FIRST(&oldstrm[i].inqueue);
+ TAILQ_REMOVE(&oldstrm[i].inqueue, ctl, next);
+ TAILQ_INSERT_TAIL(&stcb->asoc.strmin[i].inqueue, ctl, next);
+ }
+ }
+ /* Init the new streams */
+ for (i = stcb->asoc.streamincnt; i < num_stream; i++) {
+ TAILQ_INIT(&stcb->asoc.strmin[i].inqueue);
+ stcb->asoc.strmin[i].stream_no = i;
+ stcb->asoc.strmin[i].last_sequence_delivered = 0xffff;
+ stcb->asoc.strmin[i].delivery_started = 0;
+ }
+ SCTP_FREE(oldstrm, SCTP_M_STRMI);
+ /* update the size */
+ stcb->asoc.streamincnt = num_stream;
+ /* Send the ack */
+ sctp_add_stream_reset_result(chk, seq, SCTP_STREAM_RESET_PERFORMED);
+ stcb->asoc.last_reset_action[1] = stcb->asoc.last_reset_action[0];
+ stcb->asoc.last_reset_action[0] = SCTP_STREAM_RESET_PERFORMED;
+ sctp_ulp_notify(SCTP_NOTIFY_STR_RESET_INSTREAM_ADD_OK, stcb,
+ (uint32_t) stcb->asoc.streamincnt, NULL, SCTP_SO_NOT_LOCKED);
+ }
+ } else if ((asoc->str_reset_seq_in - 1) == seq) {
+ /*
+ * one seq back, just echo back last action since my
+ * response was lost.
+ */
+ sctp_add_stream_reset_result(chk, seq, asoc->last_reset_action[0]);
+ } else if ((asoc->str_reset_seq_in - 2) == seq) {
+ /*
+ * two seq back, just echo back last action since my
+ * response was lost.
+ */
+ sctp_add_stream_reset_result(chk, seq, asoc->last_reset_action[1]);
+ } else {
+ sctp_add_stream_reset_result(chk, seq, SCTP_STREAM_RESET_BAD_SEQNO);
+
+ }
+}
+
+#ifdef __GNUC__
+__attribute__((noinline))
+#endif
+ static int
+ sctp_handle_stream_reset(struct sctp_tcb *stcb, struct mbuf *m, int offset,
+ struct sctp_stream_reset_out_req *sr_req)
+{
+ int chk_length, param_len, ptype;
+ struct sctp_paramhdr pstore;
+ uint8_t cstore[SCTP_CHUNK_BUFFER_SIZE];
+
+ uint32_t seq;
+ int num_req = 0;
+ int trunc = 0;
+ struct sctp_tmit_chunk *chk;
+ struct sctp_chunkhdr *ch;
+ struct sctp_paramhdr *ph;
+ int ret_code = 0;
+ int num_param = 0;
+
+ /* now it may be a reset or a reset-response */
+ chk_length = ntohs(sr_req->ch.chunk_length);
+
+ /* setup for adding the response */
+ sctp_alloc_a_chunk(stcb, chk);
+ if (chk == NULL) {
+ return (ret_code);
+ }
+ chk->rec.chunk_id.id = SCTP_STREAM_RESET;
+ chk->rec.chunk_id.can_take_data = 0;
+ chk->asoc = &stcb->asoc;
+ chk->no_fr_allowed = 0;
+ chk->book_size = chk->send_size = sizeof(struct sctp_chunkhdr);
+ chk->book_size_scale = 0;
+ chk->data = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_DONTWAIT, 1, MT_DATA);
+ if (chk->data == NULL) {
+strres_nochunk:
+ if (chk->data) {
+ sctp_m_freem(chk->data);
+ chk->data = NULL;
+ }
+ sctp_free_a_chunk(stcb, chk);
+ return (ret_code);
+ }
+ SCTP_BUF_RESV_UF(chk->data, SCTP_MIN_OVERHEAD);
+
+ /* setup chunk parameters */
+ chk->sent = SCTP_DATAGRAM_UNSENT;
+ chk->snd_count = 0;
+ chk->whoTo = stcb->asoc.primary_destination;
+ atomic_add_int(&chk->whoTo->ref_count, 1);
+
+ ch = mtod(chk->data, struct sctp_chunkhdr *);
+ ch->chunk_type = SCTP_STREAM_RESET;
+ ch->chunk_flags = 0;
+ ch->chunk_length = htons(chk->send_size);
+ SCTP_BUF_LEN(chk->data) = SCTP_SIZE32(chk->send_size);
+ offset += sizeof(struct sctp_chunkhdr);
+ while ((size_t)chk_length >= sizeof(struct sctp_stream_reset_tsn_request)) {
+ ph = (struct sctp_paramhdr *)sctp_m_getptr(m, offset, sizeof(pstore), (uint8_t *) & pstore);
+ if (ph == NULL)
+ break;
+ param_len = ntohs(ph->param_length);
+ if (param_len < (int)sizeof(struct sctp_stream_reset_tsn_request)) {
+ /* bad param */
+ break;
+ }
+ ph = (struct sctp_paramhdr *)sctp_m_getptr(m, offset, min(param_len, (int)sizeof(cstore)),
+ (uint8_t *) & cstore);
+ ptype = ntohs(ph->param_type);
+ num_param++;
+ if (param_len > (int)sizeof(cstore)) {
+ trunc = 1;
+ } else {
+ trunc = 0;
+ }
+
+ if (num_param > SCTP_MAX_RESET_PARAMS) {
+ /* hit the max of parameters already sorry.. */
+ break;
+ }
+ if (ptype == SCTP_STR_RESET_OUT_REQUEST) {
+ struct sctp_stream_reset_out_request *req_out;
+
+ req_out = (struct sctp_stream_reset_out_request *)ph;
+ num_req++;
+ if (stcb->asoc.stream_reset_outstanding) {
+ seq = ntohl(req_out->response_seq);
+ if (seq == stcb->asoc.str_reset_seq_out) {
+ /* implicit ack */
+ (void)sctp_handle_stream_reset_response(stcb, seq, SCTP_STREAM_RESET_PERFORMED, NULL);
+ }
+ }
+ sctp_handle_str_reset_request_out(stcb, chk, req_out, trunc);
+ } else if (ptype == SCTP_STR_RESET_ADD_STREAMS) {
+ struct sctp_stream_reset_add_strm *str_add;
+
+ str_add = (struct sctp_stream_reset_add_strm *)ph;
+ num_req++;
+ sctp_handle_str_reset_add_strm(stcb, chk, str_add);
+ } else if (ptype == SCTP_STR_RESET_IN_REQUEST) {
+ struct sctp_stream_reset_in_request *req_in;
+
+ num_req++;
+
+ req_in = (struct sctp_stream_reset_in_request *)ph;
+
+ sctp_handle_str_reset_request_in(stcb, chk, req_in, trunc);
+ } else if (ptype == SCTP_STR_RESET_TSN_REQUEST) {
+ struct sctp_stream_reset_tsn_request *req_tsn;
+
+ num_req++;
+ req_tsn = (struct sctp_stream_reset_tsn_request *)ph;
+
+ if (sctp_handle_str_reset_request_tsn(stcb, chk, req_tsn)) {
+ ret_code = 1;
+ goto strres_nochunk;
+ }
+ /* no more */
+ break;
+ } else if (ptype == SCTP_STR_RESET_RESPONSE) {
+ struct sctp_stream_reset_response *resp;
+ uint32_t result;
+
+ resp = (struct sctp_stream_reset_response *)ph;
+ seq = ntohl(resp->response_seq);
+ result = ntohl(resp->result);
+ if (sctp_handle_stream_reset_response(stcb, seq, result, resp)) {
+ ret_code = 1;
+ goto strres_nochunk;
+ }
+ } else {
+ break;
+ }
+ offset += SCTP_SIZE32(param_len);
+ chk_length -= SCTP_SIZE32(param_len);
+ }
+ if (num_req == 0) {
+ /* we have no response free the stuff */
+ goto strres_nochunk;
+ }
+ /* ok we have a chunk to link in */
+ TAILQ_INSERT_TAIL(&stcb->asoc.control_send_queue,
+ chk,
+ sctp_next);
+ stcb->asoc.ctrl_queue_cnt++;
+ return (ret_code);
+}
+
+/*
+ * Handle a router or endpoints report of a packet loss, there are two ways
+ * to handle this, either we get the whole packet and must disect it
+ * ourselves (possibly with truncation and or corruption) or it is a summary
+ * from a middle box that did the disectting for us.
+ */
+static void
+sctp_handle_packet_dropped(struct sctp_pktdrop_chunk *cp,
+ struct sctp_tcb *stcb, struct sctp_nets *net, uint32_t limit)
+{
+ uint32_t bottle_bw, on_queue;
+ uint16_t trunc_len;
+ unsigned int chlen;
+ unsigned int at;
+ struct sctp_chunk_desc desc;
+ struct sctp_chunkhdr *ch;
+
+ chlen = ntohs(cp->ch.chunk_length);
+ chlen -= sizeof(struct sctp_pktdrop_chunk);
+ /* XXX possible chlen underflow */
+ if (chlen == 0) {
+ ch = NULL;
+ if (cp->ch.chunk_flags & SCTP_FROM_MIDDLE_BOX)
+ SCTP_STAT_INCR(sctps_pdrpbwrpt);
+ } else {
+ ch = (struct sctp_chunkhdr *)(cp->data + sizeof(struct sctphdr));
+ chlen -= sizeof(struct sctphdr);
+ /* XXX possible chlen underflow */
+ memset(&desc, 0, sizeof(desc));
+ }
+ trunc_len = (uint16_t) ntohs(cp->trunc_len);
+ if (trunc_len > limit) {
+ trunc_len = limit;
+ }
+ /* now the chunks themselves */
+ while ((ch != NULL) && (chlen >= sizeof(struct sctp_chunkhdr))) {
+ desc.chunk_type = ch->chunk_type;
+ /* get amount we need to move */
+ at = ntohs(ch->chunk_length);
+ if (at < sizeof(struct sctp_chunkhdr)) {
+ /* corrupt chunk, maybe at the end? */
+ SCTP_STAT_INCR(sctps_pdrpcrupt);
+ break;
+ }
+ if (trunc_len == 0) {
+ /* we are supposed to have all of it */
+ if (at > chlen) {
+ /* corrupt skip it */
+ SCTP_STAT_INCR(sctps_pdrpcrupt);
+ break;
+ }
+ } else {
+ /* is there enough of it left ? */
+ if (desc.chunk_type == SCTP_DATA) {
+ if (chlen < (sizeof(struct sctp_data_chunk) +
+ sizeof(desc.data_bytes))) {
+ break;
+ }
+ } else {
+ if (chlen < sizeof(struct sctp_chunkhdr)) {
+ break;
+ }
+ }
+ }
+ if (desc.chunk_type == SCTP_DATA) {
+ /* can we get out the tsn? */
+ if ((cp->ch.chunk_flags & SCTP_FROM_MIDDLE_BOX))
+ SCTP_STAT_INCR(sctps_pdrpmbda);
+
+ if (chlen >= (sizeof(struct sctp_data_chunk) + sizeof(uint32_t))) {
+ /* yep */
+ struct sctp_data_chunk *dcp;
+ uint8_t *ddp;
+ unsigned int iii;
+
+ dcp = (struct sctp_data_chunk *)ch;
+ ddp = (uint8_t *) (dcp + 1);
+ for (iii = 0; iii < sizeof(desc.data_bytes); iii++) {
+ desc.data_bytes[iii] = ddp[iii];
+ }
+ desc.tsn_ifany = dcp->dp.tsn;
+ } else {
+ /* nope we are done. */
+ SCTP_STAT_INCR(sctps_pdrpnedat);
+ break;
+ }
+ } else {
+ if ((cp->ch.chunk_flags & SCTP_FROM_MIDDLE_BOX))
+ SCTP_STAT_INCR(sctps_pdrpmbct);
+ }
+
+ if (process_chunk_drop(stcb, &desc, net, cp->ch.chunk_flags)) {
+ SCTP_STAT_INCR(sctps_pdrppdbrk);
+ break;
+ }
+ if (SCTP_SIZE32(at) > chlen) {
+ break;
+ }
+ chlen -= SCTP_SIZE32(at);
+ if (chlen < sizeof(struct sctp_chunkhdr)) {
+ /* done, none left */
+ break;
+ }
+ ch = (struct sctp_chunkhdr *)((caddr_t)ch + SCTP_SIZE32(at));
+ }
+ /* Now update any rwnd --- possibly */
+ if ((cp->ch.chunk_flags & SCTP_FROM_MIDDLE_BOX) == 0) {
+ /* From a peer, we get a rwnd report */
+ uint32_t a_rwnd;
+
+ SCTP_STAT_INCR(sctps_pdrpfehos);
+
+ bottle_bw = ntohl(cp->bottle_bw);
+ on_queue = ntohl(cp->current_onq);
+ if (bottle_bw && on_queue) {
+ /* a rwnd report is in here */
+ if (bottle_bw > on_queue)
+ a_rwnd = bottle_bw - on_queue;
+ else
+ a_rwnd = 0;
+
+ if (a_rwnd == 0)
+ stcb->asoc.peers_rwnd = 0;
+ else {
+ if (a_rwnd > stcb->asoc.total_flight) {
+ stcb->asoc.peers_rwnd =
+ a_rwnd - stcb->asoc.total_flight;
+ } else {
+ stcb->asoc.peers_rwnd = 0;
+ }
+ if (stcb->asoc.peers_rwnd <
+ stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
+ /* SWS sender side engages */
+ stcb->asoc.peers_rwnd = 0;
+ }
+ }
+ }
+ } else {
+ SCTP_STAT_INCR(sctps_pdrpfmbox);
+ }
+
+ /* now middle boxes in sat networks get a cwnd bump */
+ if ((cp->ch.chunk_flags & SCTP_FROM_MIDDLE_BOX) &&
+ (stcb->asoc.sat_t3_loss_recovery == 0) &&
+ (stcb->asoc.sat_network)) {
+ /*
+ * This is debateable but for sat networks it makes sense
+ * Note if a T3 timer has went off, we will prohibit any
+ * changes to cwnd until we exit the t3 loss recovery.
+ */
+ stcb->asoc.cc_functions.sctp_cwnd_update_after_packet_dropped(stcb,
+ net, cp, &bottle_bw, &on_queue);
+ }
+}
+
+/*
+ * handles all control chunks in a packet inputs: - m: mbuf chain, assumed to
+ * still contain IP/SCTP header - stcb: is the tcb found for this packet -
+ * offset: offset into the mbuf chain to first chunkhdr - length: is the
+ * length of the complete packet outputs: - length: modified to remaining
+ * length after control processing - netp: modified to new sctp_nets after
+ * cookie-echo processing - return NULL to discard the packet (ie. no asoc,
+ * bad packet,...) otherwise return the tcb for this packet
+ */
+#ifdef __GNUC__
+__attribute__((noinline))
+#endif
+ static struct sctp_tcb *
+ sctp_process_control(struct mbuf *m, int iphlen, int *offset, int length,
+ struct sctphdr *sh, struct sctp_chunkhdr *ch, struct sctp_inpcb *inp,
+ struct sctp_tcb *stcb, struct sctp_nets **netp, int *fwd_tsn_seen,
+ uint32_t vrf_id, uint16_t port)
+{
+ struct sctp_association *asoc;
+ uint32_t vtag_in;
+ int num_chunks = 0; /* number of control chunks processed */
+ uint32_t chk_length;
+ int ret;
+ int abort_no_unlock = 0;
+
+ /*
+ * How big should this be, and should it be alloc'd? Lets try the
+ * d-mtu-ceiling for now (2k) and that should hopefully work ...
+ * until we get into jumbo grams and such..
+ */
+ uint8_t chunk_buf[SCTP_CHUNK_BUFFER_SIZE];
+ struct sctp_tcb *locked_tcb = stcb;
+ int got_auth = 0;
+ uint32_t auth_offset = 0, auth_len = 0;
+ int auth_skipped = 0;
+ int asconf_cnt = 0;
+
+#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
+ struct socket *so;
+
+#endif
+
+ SCTPDBG(SCTP_DEBUG_INPUT1, "sctp_process_control: iphlen=%u, offset=%u, length=%u stcb:%p\n",
+ iphlen, *offset, length, stcb);
+
+ /* validate chunk header length... */
+ if (ntohs(ch->chunk_length) < sizeof(*ch)) {
+ SCTPDBG(SCTP_DEBUG_INPUT1, "Invalid header length %d\n",
+ ntohs(ch->chunk_length));
+ if (locked_tcb) {
+ SCTP_TCB_UNLOCK(locked_tcb);
+ }
+ return (NULL);
+ }
+ /*
+ * validate the verification tag
+ */
+ vtag_in = ntohl(sh->v_tag);
+
+ if (locked_tcb) {
+ SCTP_TCB_LOCK_ASSERT(locked_tcb);
+ }
+ if (ch->chunk_type == SCTP_INITIATION) {
+ SCTPDBG(SCTP_DEBUG_INPUT1, "Its an INIT of len:%d vtag:%x\n",
+ ntohs(ch->chunk_length), vtag_in);
+ if (vtag_in != 0) {
+ /* protocol error- silently discard... */
+ SCTP_STAT_INCR(sctps_badvtag);
+ if (locked_tcb) {
+ SCTP_TCB_UNLOCK(locked_tcb);
+ }
+ return (NULL);
+ }
+ } else if (ch->chunk_type != SCTP_COOKIE_ECHO) {
+ /*
+ * If there is no stcb, skip the AUTH chunk and process
+ * later after a stcb is found (to validate the lookup was
+ * valid.
+ */
+ if ((ch->chunk_type == SCTP_AUTHENTICATION) &&
+ (stcb == NULL) &&
+ !SCTP_BASE_SYSCTL(sctp_auth_disable)) {
+ /* save this chunk for later processing */
+ auth_skipped = 1;
+ auth_offset = *offset;
+ auth_len = ntohs(ch->chunk_length);
+
+ /* (temporarily) move past this chunk */
+ *offset += SCTP_SIZE32(auth_len);
+ if (*offset >= length) {
+ /* no more data left in the mbuf chain */
+ *offset = length;
+ if (locked_tcb) {
+ SCTP_TCB_UNLOCK(locked_tcb);
+ }
+ return (NULL);
+ }
+ ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, *offset,
+ sizeof(struct sctp_chunkhdr), chunk_buf);
+ }
+ if (ch == NULL) {
+ /* Help */
+ *offset = length;
+ if (locked_tcb) {
+ SCTP_TCB_UNLOCK(locked_tcb);
+ }
+ return (NULL);
+ }
+ if (ch->chunk_type == SCTP_COOKIE_ECHO) {
+ goto process_control_chunks;
+ }
+ /*
+ * first check if it's an ASCONF with an unknown src addr we
+ * need to look inside to find the association
+ */
+ if (ch->chunk_type == SCTP_ASCONF && stcb == NULL) {
+ struct sctp_chunkhdr *asconf_ch = ch;
+ uint32_t asconf_offset = 0, asconf_len = 0;
+
+ /* inp's refcount may be reduced */
+ SCTP_INP_INCR_REF(inp);
+
+ asconf_offset = *offset;
+ do {
+ asconf_len = ntohs(asconf_ch->chunk_length);
+ if (asconf_len < sizeof(struct sctp_asconf_paramhdr))
+ break;
+ stcb = sctp_findassociation_ep_asconf(m, iphlen,
+ *offset, sh, &inp, netp, vrf_id);
+ if (stcb != NULL)
+ break;
+ asconf_offset += SCTP_SIZE32(asconf_len);
+ asconf_ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, asconf_offset,
+ sizeof(struct sctp_chunkhdr), chunk_buf);
+ } while (asconf_ch != NULL && asconf_ch->chunk_type == SCTP_ASCONF);
+ if (stcb == NULL) {
+ /*
+ * reduce inp's refcount if not reduced in
+ * sctp_findassociation_ep_asconf().
+ */
+ SCTP_INP_DECR_REF(inp);
+ } else {
+ locked_tcb = stcb;
+ }
+
+ /* now go back and verify any auth chunk to be sure */
+ if (auth_skipped && (stcb != NULL)) {
+ struct sctp_auth_chunk *auth;
+
+ auth = (struct sctp_auth_chunk *)
+ sctp_m_getptr(m, auth_offset,
+ auth_len, chunk_buf);
+ got_auth = 1;
+ auth_skipped = 0;
+ if ((auth == NULL) || sctp_handle_auth(stcb, auth, m,
+ auth_offset)) {
+ /* auth HMAC failed so dump it */
+ *offset = length;
+ if (locked_tcb) {
+ SCTP_TCB_UNLOCK(locked_tcb);
+ }
+ return (NULL);
+ } else {
+ /* remaining chunks are HMAC checked */
+ stcb->asoc.authenticated = 1;
+ }
+ }
+ }
+ if (stcb == NULL) {
+ /* no association, so it's out of the blue... */
+ sctp_handle_ootb(m, iphlen, *offset, sh, inp, NULL,
+ vrf_id, port);
+ *offset = length;
+ if (locked_tcb) {
+ SCTP_TCB_UNLOCK(locked_tcb);
+ }
+ return (NULL);
+ }
+ asoc = &stcb->asoc;
+ /* ABORT and SHUTDOWN can use either v_tag... */
+ if ((ch->chunk_type == SCTP_ABORT_ASSOCIATION) ||
+ (ch->chunk_type == SCTP_SHUTDOWN_COMPLETE) ||
+ (ch->chunk_type == SCTP_PACKET_DROPPED)) {
+ if ((vtag_in == asoc->my_vtag) ||
+ ((ch->chunk_flags & SCTP_HAD_NO_TCB) &&
+ (vtag_in == asoc->peer_vtag))) {
+ /* this is valid */
+ } else {
+ /* drop this packet... */
+ SCTP_STAT_INCR(sctps_badvtag);
+ if (locked_tcb) {
+ SCTP_TCB_UNLOCK(locked_tcb);
+ }
+ return (NULL);
+ }
+ } else if (ch->chunk_type == SCTP_SHUTDOWN_ACK) {
+ if (vtag_in != asoc->my_vtag) {
+ /*
+ * this could be a stale SHUTDOWN-ACK or the
+ * peer never got the SHUTDOWN-COMPLETE and
+ * is still hung; we have started a new asoc
+ * but it won't complete until the shutdown
+ * is completed
+ */
+ if (locked_tcb) {
+ SCTP_TCB_UNLOCK(locked_tcb);
+ }
+ sctp_handle_ootb(m, iphlen, *offset, sh, inp,
+ NULL, vrf_id, port);
+ return (NULL);
+ }
+ } else {
+ /* for all other chunks, vtag must match */
+ if (vtag_in != asoc->my_vtag) {
+ /* invalid vtag... */
+ SCTPDBG(SCTP_DEBUG_INPUT3,
+ "invalid vtag: %xh, expect %xh\n",
+ vtag_in, asoc->my_vtag);
+ SCTP_STAT_INCR(sctps_badvtag);
+ if (locked_tcb) {
+ SCTP_TCB_UNLOCK(locked_tcb);
+ }
+ *offset = length;
+ return (NULL);
+ }
+ }
+ } /* end if !SCTP_COOKIE_ECHO */
+ /*
+ * process all control chunks...
+ */
+ if (((ch->chunk_type == SCTP_SELECTIVE_ACK) ||
+ /* EY */
+ (ch->chunk_type == SCTP_NR_SELECTIVE_ACK) ||
+ (ch->chunk_type == SCTP_HEARTBEAT_REQUEST)) &&
+ (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_COOKIE_ECHOED)) {
+ /* implied cookie-ack.. we must have lost the ack */
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
+ sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
+ stcb->asoc.overall_error_count,
+ 0,
+ SCTP_FROM_SCTP_INPUT,
+ __LINE__);
+ }
+ stcb->asoc.overall_error_count = 0;
+ sctp_handle_cookie_ack((struct sctp_cookie_ack_chunk *)ch, stcb,
+ *netp);
+ }
+process_control_chunks:
+ while (IS_SCTP_CONTROL(ch)) {
+ /* validate chunk length */
+ chk_length = ntohs(ch->chunk_length);
+ SCTPDBG(SCTP_DEBUG_INPUT2, "sctp_process_control: processing a chunk type=%u, len=%u\n",
+ ch->chunk_type, chk_length);
+ SCTP_LTRACE_CHK(inp, stcb, ch->chunk_type, chk_length);
+ if (chk_length < sizeof(*ch) ||
+ (*offset + (int)chk_length) > length) {
+ *offset = length;
+ if (locked_tcb) {
+ SCTP_TCB_UNLOCK(locked_tcb);
+ }
+ return (NULL);
+ }
+ SCTP_STAT_INCR_COUNTER64(sctps_incontrolchunks);
+ /*
+ * INIT-ACK only gets the init ack "header" portion only
+ * because we don't have to process the peer's COOKIE. All
+ * others get a complete chunk.
+ */
+ if ((ch->chunk_type == SCTP_INITIATION_ACK) ||
+ (ch->chunk_type == SCTP_INITIATION)) {
+ /* get an init-ack chunk */
+ ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, *offset,
+ sizeof(struct sctp_init_ack_chunk), chunk_buf);
+ if (ch == NULL) {
+ *offset = length;
+ if (locked_tcb) {
+ SCTP_TCB_UNLOCK(locked_tcb);
+ }
+ return (NULL);
+ }
+ } else {
+ /* For cookies and all other chunks. */
+ if (chk_length > sizeof(chunk_buf)) {
+ /*
+ * use just the size of the chunk buffer so
+ * the front part of our chunks fit in
+ * contiguous space up to the chunk buffer
+ * size (508 bytes). For chunks that need to
+ * get more than that they must use the
+ * sctp_m_getptr() function or other means
+ * (e.g. know how to parse mbuf chains).
+ * Cookies do this already.
+ */
+ ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, *offset,
+ (sizeof(chunk_buf) - 4),
+ chunk_buf);
+ if (ch == NULL) {
+ *offset = length;
+ if (locked_tcb) {
+ SCTP_TCB_UNLOCK(locked_tcb);
+ }
+ return (NULL);
+ }
+ } else {
+ /* We can fit it all */
+ ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, *offset,
+ chk_length, chunk_buf);
+ if (ch == NULL) {
+ SCTP_PRINTF("sctp_process_control: Can't get the all data....\n");
+ *offset = length;
+ if (locked_tcb) {
+ SCTP_TCB_UNLOCK(locked_tcb);
+ }
+ return (NULL);
+ }
+ }
+ }
+ num_chunks++;
+ /* Save off the last place we got a control from */
+ if (stcb != NULL) {
+ if (((netp != NULL) && (*netp != NULL)) || (ch->chunk_type == SCTP_ASCONF)) {
+ /*
+ * allow last_control to be NULL if
+ * ASCONF... ASCONF processing will find the
+ * right net later
+ */
+ if ((netp != NULL) && (*netp != NULL))
+ stcb->asoc.last_control_chunk_from = *netp;
+ }
+ }
+#ifdef SCTP_AUDITING_ENABLED
+ sctp_audit_log(0xB0, ch->chunk_type);
+#endif
+
+ /* check to see if this chunk required auth, but isn't */
+ if ((stcb != NULL) &&
+ !SCTP_BASE_SYSCTL(sctp_auth_disable) &&
+ sctp_auth_is_required_chunk(ch->chunk_type, stcb->asoc.local_auth_chunks) &&
+ !stcb->asoc.authenticated) {
+ /* "silently" ignore */
+ SCTP_STAT_INCR(sctps_recvauthmissing);
+ goto next_chunk;
+ }
+ switch (ch->chunk_type) {
+ case SCTP_INITIATION:
+ /* must be first and only chunk */
+ SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_INIT\n");
+ if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
+ /* We are not interested anymore? */
+ if ((stcb) && (stcb->asoc.total_output_queue_size)) {
+ /*
+ * collision case where we are
+ * sending to them too
+ */
+ ;
+ } else {
+ if (locked_tcb) {
+ SCTP_TCB_UNLOCK(locked_tcb);
+ }
+ *offset = length;
+ return (NULL);
+ }
+ }
+ if ((chk_length > SCTP_LARGEST_INIT_ACCEPTED) ||
+ (num_chunks > 1) ||
+ (SCTP_BASE_SYSCTL(sctp_strict_init) && (length - *offset > (int)SCTP_SIZE32(chk_length)))) {
+ *offset = length;
+ if (locked_tcb) {
+ SCTP_TCB_UNLOCK(locked_tcb);
+ }
+ return (NULL);
+ }
+ if ((stcb != NULL) &&
+ (SCTP_GET_STATE(&stcb->asoc) ==
+ SCTP_STATE_SHUTDOWN_ACK_SENT)) {
+ sctp_send_shutdown_ack(stcb,
+ stcb->asoc.primary_destination);
+ *offset = length;
+ sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_CONTROL_PROC, SCTP_SO_NOT_LOCKED);
+ if (locked_tcb) {
+ SCTP_TCB_UNLOCK(locked_tcb);
+ }
+ return (NULL);
+ }
+ if (netp) {
+ sctp_handle_init(m, iphlen, *offset, sh,
+ (struct sctp_init_chunk *)ch, inp,
+ stcb, *netp, &abort_no_unlock, vrf_id, port);
+ }
+ if (abort_no_unlock)
+ return (NULL);
+
+ *offset = length;
+ if (locked_tcb) {
+ SCTP_TCB_UNLOCK(locked_tcb);
+ }
+ return (NULL);
+ break;
+ case SCTP_PAD_CHUNK:
+ break;
+ case SCTP_INITIATION_ACK:
+ /* must be first and only chunk */
+ SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_INIT-ACK\n");
+ if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
+ /* We are not interested anymore */
+ if ((stcb) && (stcb->asoc.total_output_queue_size)) {
+ ;
+ } else {
+ if (locked_tcb != stcb) {
+ /* Very unlikely */
+ SCTP_TCB_UNLOCK(locked_tcb);
+ }
+ *offset = length;
+ if (stcb) {
+#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
+ so = SCTP_INP_SO(inp);
+ atomic_add_int(&stcb->asoc.refcnt, 1);
+ SCTP_TCB_UNLOCK(stcb);
+ SCTP_SOCKET_LOCK(so, 1);
+ SCTP_TCB_LOCK(stcb);
+ atomic_subtract_int(&stcb->asoc.refcnt, 1);
+#endif
+ (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_INPUT + SCTP_LOC_27);
+#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
+ SCTP_SOCKET_UNLOCK(so, 1);
+#endif
+ }
+ return (NULL);
+ }
+ }
+ if ((num_chunks > 1) ||
+ (SCTP_BASE_SYSCTL(sctp_strict_init) && (length - *offset > (int)SCTP_SIZE32(chk_length)))) {
+ *offset = length;
+ if (locked_tcb) {
+ SCTP_TCB_UNLOCK(locked_tcb);
+ }
+ return (NULL);
+ }
+ if ((netp) && (*netp)) {
+ ret = sctp_handle_init_ack(m, iphlen, *offset, sh,
+ (struct sctp_init_ack_chunk *)ch, stcb, *netp, &abort_no_unlock, vrf_id);
+ } else {
+ ret = -1;
+ }
+ /*
+ * Special case, I must call the output routine to
+ * get the cookie echoed
+ */
+ if (abort_no_unlock)
+ return (NULL);
+
+ if ((stcb) && ret == 0)
+ sctp_chunk_output(stcb->sctp_ep, stcb, SCTP_OUTPUT_FROM_CONTROL_PROC, SCTP_SO_NOT_LOCKED);
+ *offset = length;
+ if (locked_tcb) {
+ SCTP_TCB_UNLOCK(locked_tcb);
+ }
+ return (NULL);
+ break;
+ case SCTP_SELECTIVE_ACK:
+ {
+ struct sctp_sack_chunk *sack;
+ int abort_now = 0;
+ uint32_t a_rwnd, cum_ack;
+ uint16_t num_seg, num_dup;
+ uint8_t flags;
+ int offset_seg, offset_dup;
+ int nonce_sum_flag;
+
+ SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_SACK\n");
+ SCTP_STAT_INCR(sctps_recvsacks);
+ if (stcb == NULL) {
+ SCTPDBG(SCTP_DEBUG_INDATA1, "No stcb when processing SACK chunk\n");
+ break;
+ }
+ if (chk_length < sizeof(struct sctp_sack_chunk)) {
+ SCTPDBG(SCTP_DEBUG_INDATA1, "Bad size on SACK chunk, too small\n");
+ break;
+ }
+ if (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_ACK_SENT) {
+ /*-
+ * If we have sent a shutdown-ack, we will pay no
+ * attention to a sack sent in to us since
+ * we don't care anymore.
+ */
+ break;
+ }
+ sack = (struct sctp_sack_chunk *)ch;
+ flags = ch->chunk_flags;
+ nonce_sum_flag = flags & SCTP_SACK_NONCE_SUM;
+ cum_ack = ntohl(sack->sack.cum_tsn_ack);
+ num_seg = ntohs(sack->sack.num_gap_ack_blks);
+ num_dup = ntohs(sack->sack.num_dup_tsns);
+ a_rwnd = (uint32_t) ntohl(sack->sack.a_rwnd);
+ if (sizeof(struct sctp_sack_chunk) +
+ num_seg * sizeof(struct sctp_gap_ack_block) +
+ num_dup * sizeof(uint32_t) != chk_length) {
+ SCTPDBG(SCTP_DEBUG_INDATA1, "Bad size of SACK chunk\n");
+ break;
+ }
+ offset_seg = *offset + sizeof(struct sctp_sack_chunk);
+ offset_dup = offset_seg + num_seg * sizeof(struct sctp_gap_ack_block);
+ SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_SACK process cum_ack:%x num_seg:%d a_rwnd:%d\n",
+ cum_ack, num_seg, a_rwnd);
+ stcb->asoc.seen_a_sack_this_pkt = 1;
+ if ((stcb->asoc.pr_sctp_cnt == 0) &&
+ (num_seg == 0) &&
+ ((compare_with_wrap(cum_ack, stcb->asoc.last_acked_seq, MAX_TSN)) ||
+ (cum_ack == stcb->asoc.last_acked_seq)) &&
+ (stcb->asoc.saw_sack_with_frags == 0) &&
+ (stcb->asoc.saw_sack_with_nr_frags == 0) &&
+ (!TAILQ_EMPTY(&stcb->asoc.sent_queue))
+ ) {
+ /*
+ * We have a SIMPLE sack having no
+ * prior segments and data on sent
+ * queue to be acked.. Use the
+ * faster path sack processing. We
+ * also allow window update sacks
+ * with no missing segments to go
+ * this way too.
+ */
+ sctp_express_handle_sack(stcb, cum_ack, a_rwnd, nonce_sum_flag,
+ &abort_now);
+ } else {
+ if (netp && *netp)
+ sctp_handle_sack(m, offset_seg, offset_dup,
+ stcb, *netp,
+ num_seg, 0, num_dup, &abort_now, flags,
+ cum_ack, a_rwnd);
+ }
+ if (abort_now) {
+ /* ABORT signal from sack processing */
+ *offset = length;
+ return (NULL);
+ }
+ if (TAILQ_EMPTY(&stcb->asoc.send_queue) &&
+ TAILQ_EMPTY(&stcb->asoc.sent_queue) &&
+ (stcb->asoc.stream_queue_cnt == 0)) {
+ sctp_ulp_notify(SCTP_NOTIFY_SENDER_DRY, stcb, 0, NULL, SCTP_SO_NOT_LOCKED);
+ }
+ }
+ break;
+ /*
+ * EY - nr_sack: If the received chunk is an
+ * nr_sack chunk
+ */
+ case SCTP_NR_SELECTIVE_ACK:
+ {
+ struct sctp_nr_sack_chunk *nr_sack;
+ int abort_now = 0;
+ uint32_t a_rwnd, cum_ack;
+ uint16_t num_seg, num_nr_seg, num_dup;
+ uint8_t flags;
+ int offset_seg, offset_dup;
+ int nonce_sum_flag;
+
+ SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_NR_SACK\n");
+ SCTP_STAT_INCR(sctps_recvsacks);
+ if (stcb == NULL) {
+ SCTPDBG(SCTP_DEBUG_INDATA1, "No stcb when processing NR-SACK chunk\n");
+ break;
+ }
+ if ((stcb->asoc.sctp_nr_sack_on_off == 0) ||
+ (stcb->asoc.peer_supports_nr_sack == 0)) {
+ goto unknown_chunk;
+ }
+ if (chk_length < sizeof(struct sctp_nr_sack_chunk)) {
+ SCTPDBG(SCTP_DEBUG_INDATA1, "Bad size on NR-SACK chunk, too small\n");
+ break;
+ }
+ if (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_ACK_SENT) {
+ /*-
+ * If we have sent a shutdown-ack, we will pay no
+ * attention to a sack sent in to us since
+ * we don't care anymore.
+ */
+ break;
+ }
+ nr_sack = (struct sctp_nr_sack_chunk *)ch;
+ flags = ch->chunk_flags;
+ nonce_sum_flag = flags & SCTP_SACK_NONCE_SUM;
+
+ cum_ack = ntohl(nr_sack->nr_sack.cum_tsn_ack);
+ num_seg = ntohs(nr_sack->nr_sack.num_gap_ack_blks);
+ num_nr_seg = ntohs(nr_sack->nr_sack.num_nr_gap_ack_blks);
+ num_dup = ntohs(nr_sack->nr_sack.num_dup_tsns);
+ a_rwnd = (uint32_t) ntohl(nr_sack->nr_sack.a_rwnd);
+ if (sizeof(struct sctp_nr_sack_chunk) +
+ (num_seg + num_nr_seg) * sizeof(struct sctp_gap_ack_block) +
+ num_dup * sizeof(uint32_t) != chk_length) {
+ SCTPDBG(SCTP_DEBUG_INDATA1, "Bad size of NR_SACK chunk\n");
+ break;
+ }
+ offset_seg = *offset + sizeof(struct sctp_nr_sack_chunk);
+ offset_dup = offset_seg + num_seg * sizeof(struct sctp_gap_ack_block);
+ SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_NR_SACK process cum_ack:%x num_seg:%d a_rwnd:%d\n",
+ cum_ack, num_seg, a_rwnd);
+ stcb->asoc.seen_a_sack_this_pkt = 1;
+ if ((stcb->asoc.pr_sctp_cnt == 0) &&
+ (num_seg == 0) && (num_nr_seg == 0) &&
+ ((compare_with_wrap(cum_ack, stcb->asoc.last_acked_seq, MAX_TSN)) ||
+ (cum_ack == stcb->asoc.last_acked_seq)) &&
+ (stcb->asoc.saw_sack_with_frags == 0) &&
+ (stcb->asoc.saw_sack_with_nr_frags == 0) &&
+ (!TAILQ_EMPTY(&stcb->asoc.sent_queue))) {
+ /*
+ * We have a SIMPLE sack having no
+ * prior segments and data on sent
+ * queue to be acked. Use the faster
+ * path sack processing. We also
+ * allow window update sacks with no
+ * missing segments to go this way
+ * too.
+ */
+ sctp_express_handle_sack(stcb, cum_ack, a_rwnd, nonce_sum_flag,
+ &abort_now);
+ } else {
+ if (netp && *netp)
+ sctp_handle_sack(m, offset_seg, offset_dup,
+ stcb, *netp,
+ num_seg, num_nr_seg, num_dup, &abort_now, flags,
+ cum_ack, a_rwnd);
+ }
+ if (abort_now) {
+ /* ABORT signal from sack processing */
+ *offset = length;
+ return (NULL);
+ }
+ if (TAILQ_EMPTY(&stcb->asoc.send_queue) &&
+ TAILQ_EMPTY(&stcb->asoc.sent_queue) &&
+ (stcb->asoc.stream_queue_cnt == 0)) {
+ sctp_ulp_notify(SCTP_NOTIFY_SENDER_DRY, stcb, 0, NULL, SCTP_SO_NOT_LOCKED);
+ }
+ }
+ break;
+
+ case SCTP_HEARTBEAT_REQUEST:
+ SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_HEARTBEAT\n");
+ if ((stcb) && netp && *netp) {
+ SCTP_STAT_INCR(sctps_recvheartbeat);
+ sctp_send_heartbeat_ack(stcb, m, *offset,
+ chk_length, *netp);
+
+ /* He's alive so give him credit */
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
+ sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
+ stcb->asoc.overall_error_count,
+ 0,
+ SCTP_FROM_SCTP_INPUT,
+ __LINE__);
+ }
+ stcb->asoc.overall_error_count = 0;
+ }
+ break;
+ case SCTP_HEARTBEAT_ACK:
+ SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_HEARTBEAT-ACK\n");
+ if ((stcb == NULL) || (chk_length != sizeof(struct sctp_heartbeat_chunk))) {
+ /* Its not ours */
+ *offset = length;
+ if (locked_tcb) {
+ SCTP_TCB_UNLOCK(locked_tcb);
+ }
+ return (NULL);
+ }
+ /* He's alive so give him credit */
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
+ sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
+ stcb->asoc.overall_error_count,
+ 0,
+ SCTP_FROM_SCTP_INPUT,
+ __LINE__);
+ }
+ stcb->asoc.overall_error_count = 0;
+ SCTP_STAT_INCR(sctps_recvheartbeatack);
+ if (netp && *netp)
+ sctp_handle_heartbeat_ack((struct sctp_heartbeat_chunk *)ch,
+ stcb, *netp);
+ break;
+ case SCTP_ABORT_ASSOCIATION:
+ SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_ABORT, stcb %p\n",
+ stcb);
+ if ((stcb) && netp && *netp)
+ sctp_handle_abort((struct sctp_abort_chunk *)ch,
+ stcb, *netp);
+ *offset = length;
+ return (NULL);
+ break;
+ case SCTP_SHUTDOWN:
+ SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_SHUTDOWN, stcb %p\n",
+ stcb);
+ if ((stcb == NULL) || (chk_length != sizeof(struct sctp_shutdown_chunk))) {
+ *offset = length;
+ if (locked_tcb) {
+ SCTP_TCB_UNLOCK(locked_tcb);
+ }
+ return (NULL);
+ }
+ if (netp && *netp) {
+ int abort_flag = 0;
+
+ sctp_handle_shutdown((struct sctp_shutdown_chunk *)ch,
+ stcb, *netp, &abort_flag);
+ if (abort_flag) {
+ *offset = length;
+ return (NULL);
+ }
+ }
+ break;
+ case SCTP_SHUTDOWN_ACK:
+ SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_SHUTDOWN-ACK, stcb %p\n", stcb);
+ if ((stcb) && (netp) && (*netp))
+ sctp_handle_shutdown_ack((struct sctp_shutdown_ack_chunk *)ch, stcb, *netp);
+ *offset = length;
+ return (NULL);
+ break;
+
+ case SCTP_OPERATION_ERROR:
+ SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_OP-ERR\n");
+ if ((stcb) && netp && *netp && sctp_handle_error(ch, stcb, *netp) < 0) {
+
+ *offset = length;
+ return (NULL);
+ }
+ break;
+ case SCTP_COOKIE_ECHO:
+ SCTPDBG(SCTP_DEBUG_INPUT3,
+ "SCTP_COOKIE-ECHO, stcb %p\n", stcb);
+ if ((stcb) && (stcb->asoc.total_output_queue_size)) {
+ ;
+ } else {
+ if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
+ /* We are not interested anymore */
+ abend:
+ if (stcb) {
+ SCTP_TCB_UNLOCK(stcb);
+ }
+ *offset = length;
+ return (NULL);
+ }
+ }
+ /*
+ * First are we accepting? We do this again here
+ * since it is possible that a previous endpoint WAS
+ * listening responded to a INIT-ACK and then
+ * closed. We opened and bound.. and are now no
+ * longer listening.
+ */
+
+ if ((stcb == NULL) && (inp->sctp_socket->so_qlen >= inp->sctp_socket->so_qlimit)) {
+ if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) &&
+ (SCTP_BASE_SYSCTL(sctp_abort_if_one_2_one_hits_limit))) {
+ struct mbuf *oper;
+ struct sctp_paramhdr *phdr;
+
+ oper = sctp_get_mbuf_for_msg(sizeof(struct sctp_paramhdr),
+ 0, M_DONTWAIT, 1, MT_DATA);
+ if (oper) {
+ SCTP_BUF_LEN(oper) =
+ sizeof(struct sctp_paramhdr);
+ phdr = mtod(oper,
+ struct sctp_paramhdr *);
+ phdr->param_type =
+ htons(SCTP_CAUSE_OUT_OF_RESC);
+ phdr->param_length =
+ htons(sizeof(struct sctp_paramhdr));
+ }
+ sctp_abort_association(inp, stcb, m,
+ iphlen, sh, oper, vrf_id, port);
+ }
+ *offset = length;
+ return (NULL);
+ } else {
+ struct mbuf *ret_buf;
+ struct sctp_inpcb *linp;
+
+ if (stcb) {
+ linp = NULL;
+ } else {
+ linp = inp;
+ }
+
+ if (linp) {
+ SCTP_ASOC_CREATE_LOCK(linp);
+ if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
+ (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE)) {
+ SCTP_ASOC_CREATE_UNLOCK(linp);
+ goto abend;
+ }
+ }
+ if (netp) {
+ ret_buf =
+ sctp_handle_cookie_echo(m, iphlen,
+ *offset, sh,
+ (struct sctp_cookie_echo_chunk *)ch,
+ &inp, &stcb, netp,
+ auth_skipped,
+ auth_offset,
+ auth_len,
+ &locked_tcb,
+ vrf_id,
+ port);
+ } else {
+ ret_buf = NULL;
+ }
+ if (linp) {
+ SCTP_ASOC_CREATE_UNLOCK(linp);
+ }
+ if (ret_buf == NULL) {
+ if (locked_tcb) {
+ SCTP_TCB_UNLOCK(locked_tcb);
+ }
+ SCTPDBG(SCTP_DEBUG_INPUT3,
+ "GAK, null buffer\n");
+ auth_skipped = 0;
+ *offset = length;
+ return (NULL);
+ }
+ /* if AUTH skipped, see if it verified... */
+ if (auth_skipped) {
+ got_auth = 1;
+ auth_skipped = 0;
+ }
+ if (!TAILQ_EMPTY(&stcb->asoc.sent_queue)) {
+ /*
+ * Restart the timer if we have
+ * pending data
+ */
+ struct sctp_tmit_chunk *chk;
+
+ chk = TAILQ_FIRST(&stcb->asoc.sent_queue);
+ if (chk) {
+ sctp_timer_start(SCTP_TIMER_TYPE_SEND,
+ stcb->sctp_ep, stcb,
+ chk->whoTo);
+ }
+ }
+ }
+ break;
+ case SCTP_COOKIE_ACK:
+ SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_COOKIE-ACK, stcb %p\n", stcb);
+ if ((stcb == NULL) || chk_length != sizeof(struct sctp_cookie_ack_chunk)) {
+ if (locked_tcb) {
+ SCTP_TCB_UNLOCK(locked_tcb);
+ }
+ return (NULL);
+ }
+ if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
+ /* We are not interested anymore */
+ if ((stcb) && (stcb->asoc.total_output_queue_size)) {
+ ;
+ } else if (stcb) {
+#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
+ so = SCTP_INP_SO(inp);
+ atomic_add_int(&stcb->asoc.refcnt, 1);
+ SCTP_TCB_UNLOCK(stcb);
+ SCTP_SOCKET_LOCK(so, 1);
+ SCTP_TCB_LOCK(stcb);
+ atomic_subtract_int(&stcb->asoc.refcnt, 1);
+#endif
+ (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_INPUT + SCTP_LOC_27);
+#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
+ SCTP_SOCKET_UNLOCK(so, 1);
+#endif
+ *offset = length;
+ return (NULL);
+ }
+ }
+ /* He's alive so give him credit */
+ if ((stcb) && netp && *netp) {
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
+ sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
+ stcb->asoc.overall_error_count,
+ 0,
+ SCTP_FROM_SCTP_INPUT,
+ __LINE__);
+ }
+ stcb->asoc.overall_error_count = 0;
+ sctp_handle_cookie_ack((struct sctp_cookie_ack_chunk *)ch, stcb, *netp);
+ }
+ break;
+ case SCTP_ECN_ECHO:
+ SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_ECN-ECHO\n");
+ /* He's alive so give him credit */
+ if ((stcb == NULL) || (chk_length != sizeof(struct sctp_ecne_chunk))) {
+ /* Its not ours */
+ if (locked_tcb) {
+ SCTP_TCB_UNLOCK(locked_tcb);
+ }
+ *offset = length;
+ return (NULL);
+ }
+ if (stcb) {
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
+ sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
+ stcb->asoc.overall_error_count,
+ 0,
+ SCTP_FROM_SCTP_INPUT,
+ __LINE__);
+ }
+ stcb->asoc.overall_error_count = 0;
+ sctp_handle_ecn_echo((struct sctp_ecne_chunk *)ch,
+ stcb);
+ }
+ break;
+ case SCTP_ECN_CWR:
+ SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_ECN-CWR\n");
+ /* He's alive so give him credit */
+ if ((stcb == NULL) || (chk_length != sizeof(struct sctp_cwr_chunk))) {
+ /* Its not ours */
+ if (locked_tcb) {
+ SCTP_TCB_UNLOCK(locked_tcb);
+ }
+ *offset = length;
+ return (NULL);
+ }
+ if (stcb) {
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
+ sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
+ stcb->asoc.overall_error_count,
+ 0,
+ SCTP_FROM_SCTP_INPUT,
+ __LINE__);
+ }
+ stcb->asoc.overall_error_count = 0;
+ sctp_handle_ecn_cwr((struct sctp_cwr_chunk *)ch, stcb);
+ }
+ break;
+ case SCTP_SHUTDOWN_COMPLETE:
+ SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_SHUTDOWN-COMPLETE, stcb %p\n", stcb);
+ /* must be first and only chunk */
+ if ((num_chunks > 1) ||
+ (length - *offset > (int)SCTP_SIZE32(chk_length))) {
+ *offset = length;
+ if (locked_tcb) {
+ SCTP_TCB_UNLOCK(locked_tcb);
+ }
+ return (NULL);
+ }
+ if ((stcb) && netp && *netp) {
+ sctp_handle_shutdown_complete((struct sctp_shutdown_complete_chunk *)ch,
+ stcb, *netp);
+ }
+ *offset = length;
+ return (NULL);
+ break;
+ case SCTP_ASCONF:
+ SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_ASCONF\n");
+ /* He's alive so give him credit */
+ if (stcb) {
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
+ sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
+ stcb->asoc.overall_error_count,
+ 0,
+ SCTP_FROM_SCTP_INPUT,
+ __LINE__);
+ }
+ stcb->asoc.overall_error_count = 0;
+ sctp_handle_asconf(m, *offset,
+ (struct sctp_asconf_chunk *)ch, stcb, asconf_cnt == 0);
+ asconf_cnt++;
+ }
+ break;
+ case SCTP_ASCONF_ACK:
+ SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_ASCONF-ACK\n");
+ if (chk_length < sizeof(struct sctp_asconf_ack_chunk)) {
+ /* Its not ours */
+ if (locked_tcb) {
+ SCTP_TCB_UNLOCK(locked_tcb);
+ }
+ *offset = length;
+ return (NULL);
+ }
+ if ((stcb) && netp && *netp) {
+ /* He's alive so give him credit */
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
+ sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
+ stcb->asoc.overall_error_count,
+ 0,
+ SCTP_FROM_SCTP_INPUT,
+ __LINE__);
+ }
+ stcb->asoc.overall_error_count = 0;
+ sctp_handle_asconf_ack(m, *offset,
+ (struct sctp_asconf_ack_chunk *)ch, stcb, *netp, &abort_no_unlock);
+ if (abort_no_unlock)
+ return (NULL);
+ }
+ break;
+ case SCTP_FORWARD_CUM_TSN:
+ SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_FWD-TSN\n");
+ if (chk_length < sizeof(struct sctp_forward_tsn_chunk)) {
+ /* Its not ours */
+ if (locked_tcb) {
+ SCTP_TCB_UNLOCK(locked_tcb);
+ }
+ *offset = length;
+ return (NULL);
+ }
+ /* He's alive so give him credit */
+ if (stcb) {
+ int abort_flag = 0;
+
+ stcb->asoc.overall_error_count = 0;
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
+ sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
+ stcb->asoc.overall_error_count,
+ 0,
+ SCTP_FROM_SCTP_INPUT,
+ __LINE__);
+ }
+ *fwd_tsn_seen = 1;
+ if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
+ /* We are not interested anymore */
+#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
+ so = SCTP_INP_SO(inp);
+ atomic_add_int(&stcb->asoc.refcnt, 1);
+ SCTP_TCB_UNLOCK(stcb);
+ SCTP_SOCKET_LOCK(so, 1);
+ SCTP_TCB_LOCK(stcb);
+ atomic_subtract_int(&stcb->asoc.refcnt, 1);
+#endif
+ (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_INPUT + SCTP_LOC_29);
+#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
+ SCTP_SOCKET_UNLOCK(so, 1);
+#endif
+ *offset = length;
+ return (NULL);
+ }
+ sctp_handle_forward_tsn(stcb,
+ (struct sctp_forward_tsn_chunk *)ch, &abort_flag, m, *offset);
+ if (abort_flag) {
+ *offset = length;
+ return (NULL);
+ } else {
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
+ sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
+ stcb->asoc.overall_error_count,
+ 0,
+ SCTP_FROM_SCTP_INPUT,
+ __LINE__);
+ }
+ stcb->asoc.overall_error_count = 0;
+ }
+
+ }
+ break;
+ case SCTP_STREAM_RESET:
+ SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_STREAM_RESET\n");
+ if (((stcb == NULL) || (ch == NULL) || (chk_length < sizeof(struct sctp_stream_reset_tsn_req)))) {
+ /* Its not ours */
+ if (locked_tcb) {
+ SCTP_TCB_UNLOCK(locked_tcb);
+ }
+ *offset = length;
+ return (NULL);
+ }
+ if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
+ /* We are not interested anymore */
+#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
+ so = SCTP_INP_SO(inp);
+ atomic_add_int(&stcb->asoc.refcnt, 1);
+ SCTP_TCB_UNLOCK(stcb);
+ SCTP_SOCKET_LOCK(so, 1);
+ SCTP_TCB_LOCK(stcb);
+ atomic_subtract_int(&stcb->asoc.refcnt, 1);
+#endif
+ (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_INPUT + SCTP_LOC_30);
+#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
+ SCTP_SOCKET_UNLOCK(so, 1);
+#endif
+ *offset = length;
+ return (NULL);
+ }
+ if (stcb->asoc.peer_supports_strreset == 0) {
+ /*
+ * hmm, peer should have announced this, but
+ * we will turn it on since he is sending us
+ * a stream reset.
+ */
+ stcb->asoc.peer_supports_strreset = 1;
+ }
+ if (sctp_handle_stream_reset(stcb, m, *offset, (struct sctp_stream_reset_out_req *)ch)) {
+ /* stop processing */
+ *offset = length;
+ return (NULL);
+ }
+ break;
+ case SCTP_PACKET_DROPPED:
+ SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_PACKET_DROPPED\n");
+ /* re-get it all please */
+ if (chk_length < sizeof(struct sctp_pktdrop_chunk)) {
+ /* Its not ours */
+ if (locked_tcb) {
+ SCTP_TCB_UNLOCK(locked_tcb);
+ }
+ *offset = length;
+ return (NULL);
+ }
+ if (ch && (stcb) && netp && (*netp)) {
+ sctp_handle_packet_dropped((struct sctp_pktdrop_chunk *)ch,
+ stcb, *netp,
+ min(chk_length, (sizeof(chunk_buf) - 4)));
+
+ }
+ break;
+
+ case SCTP_AUTHENTICATION:
+ SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_AUTHENTICATION\n");
+ if (SCTP_BASE_SYSCTL(sctp_auth_disable))
+ goto unknown_chunk;
+
+ if (stcb == NULL) {
+ /* save the first AUTH for later processing */
+ if (auth_skipped == 0) {
+ auth_offset = *offset;
+ auth_len = chk_length;
+ auth_skipped = 1;
+ }
+ /* skip this chunk (temporarily) */
+ goto next_chunk;
+ }
+ if ((chk_length < (sizeof(struct sctp_auth_chunk))) ||
+ (chk_length > (sizeof(struct sctp_auth_chunk) +
+ SCTP_AUTH_DIGEST_LEN_MAX))) {
+ /* Its not ours */
+ if (locked_tcb) {
+ SCTP_TCB_UNLOCK(locked_tcb);
+ }
+ *offset = length;
+ return (NULL);
+ }
+ if (got_auth == 1) {
+ /* skip this chunk... it's already auth'd */
+ goto next_chunk;
+ }
+ got_auth = 1;
+ if ((ch == NULL) || sctp_handle_auth(stcb, (struct sctp_auth_chunk *)ch,
+ m, *offset)) {
+ /* auth HMAC failed so dump the packet */
+ *offset = length;
+ return (stcb);
+ } else {
+ /* remaining chunks are HMAC checked */
+ stcb->asoc.authenticated = 1;
+ }
+ break;
+
+ default:
+ unknown_chunk:
+ /* it's an unknown chunk! */
+ if ((ch->chunk_type & 0x40) && (stcb != NULL)) {
+ struct mbuf *mm;
+ struct sctp_paramhdr *phd;
+
+ mm = sctp_get_mbuf_for_msg(sizeof(struct sctp_paramhdr),
+ 0, M_DONTWAIT, 1, MT_DATA);
+ if (mm) {
+ phd = mtod(mm, struct sctp_paramhdr *);
+ /*
+ * We cheat and use param type since
+ * we did not bother to define a
+ * error cause struct. They are the
+ * same basic format with different
+ * names.
+ */
+ phd->param_type = htons(SCTP_CAUSE_UNRECOG_CHUNK);
+ phd->param_length = htons(chk_length + sizeof(*phd));
+ SCTP_BUF_LEN(mm) = sizeof(*phd);
+ SCTP_BUF_NEXT(mm) = SCTP_M_COPYM(m, *offset, SCTP_SIZE32(chk_length),
+ M_DONTWAIT);
+ if (SCTP_BUF_NEXT(mm)) {
+#ifdef SCTP_MBUF_LOGGING
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
+ struct mbuf *mat;
+
+ mat = SCTP_BUF_NEXT(mm);
+ while (mat) {
+ if (SCTP_BUF_IS_EXTENDED(mat)) {
+ sctp_log_mb(mat, SCTP_MBUF_ICOPY);
+ }
+ mat = SCTP_BUF_NEXT(mat);
+ }
+ }
+#endif
+ sctp_queue_op_err(stcb, mm);
+ } else {
+ sctp_m_freem(mm);
+ }
+ }
+ }
+ if ((ch->chunk_type & 0x80) == 0) {
+ /* discard this packet */
+ *offset = length;
+ return (stcb);
+ } /* else skip this bad chunk and continue... */
+ break;
+ } /* switch (ch->chunk_type) */
+
+
+next_chunk:
+ /* get the next chunk */
+ *offset += SCTP_SIZE32(chk_length);
+ if (*offset >= length) {
+ /* no more data left in the mbuf chain */
+ break;
+ }
+ ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, *offset,
+ sizeof(struct sctp_chunkhdr), chunk_buf);
+ if (ch == NULL) {
+ if (locked_tcb) {
+ SCTP_TCB_UNLOCK(locked_tcb);
+ }
+ *offset = length;
+ return (NULL);
+ }
+ } /* while */
+
+ if (asconf_cnt > 0 && stcb != NULL) {
+ sctp_send_asconf_ack(stcb);
+ }
+ return (stcb);
+}
+
+
+/*
+ * Process the ECN bits we have something set so we must look to see if it is
+ * ECN(0) or ECN(1) or CE
+ */
+static void
+sctp_process_ecn_marked_a(struct sctp_tcb *stcb, struct sctp_nets *net,
+ uint8_t ecn_bits)
+{
+ if ((ecn_bits & SCTP_CE_BITS) == SCTP_CE_BITS) {
+ ;
+ } else if ((ecn_bits & SCTP_ECT1_BIT) == SCTP_ECT1_BIT) {
+ /*
+ * we only add to the nonce sum for ECT1, ECT0 does not
+ * change the NS bit (that we have yet to find a way to send
+ * it yet).
+ */
+
+ /* ECN Nonce stuff */
+ stcb->asoc.receiver_nonce_sum++;
+ stcb->asoc.receiver_nonce_sum &= SCTP_SACK_NONCE_SUM;
+
+ /*
+ * Drag up the last_echo point if cumack is larger since we
+ * don't want the point falling way behind by more than
+ * 2^^31 and then having it be incorrect.
+ */
+ if (compare_with_wrap(stcb->asoc.cumulative_tsn,
+ stcb->asoc.last_echo_tsn, MAX_TSN)) {
+ stcb->asoc.last_echo_tsn = stcb->asoc.cumulative_tsn;
+ }
+ } else if ((ecn_bits & SCTP_ECT0_BIT) == SCTP_ECT0_BIT) {
+ /*
+ * Drag up the last_echo point if cumack is larger since we
+ * don't want the point falling way behind by more than
+ * 2^^31 and then having it be incorrect.
+ */
+ if (compare_with_wrap(stcb->asoc.cumulative_tsn,
+ stcb->asoc.last_echo_tsn, MAX_TSN)) {
+ stcb->asoc.last_echo_tsn = stcb->asoc.cumulative_tsn;
+ }
+ }
+}
+
+static void
+sctp_process_ecn_marked_b(struct sctp_tcb *stcb, struct sctp_nets *net,
+ uint32_t high_tsn, uint8_t ecn_bits)
+{
+ if ((ecn_bits & SCTP_CE_BITS) == SCTP_CE_BITS) {
+ /*
+ * we possibly must notify the sender that a congestion
+ * window reduction is in order. We do this by adding a ECNE
+ * chunk to the output chunk queue. The incoming CWR will
+ * remove this chunk.
+ */
+ if (compare_with_wrap(high_tsn, stcb->asoc.last_echo_tsn,
+ MAX_TSN)) {
+ /* Yep, we need to add a ECNE */
+ sctp_send_ecn_echo(stcb, net, high_tsn);
+ stcb->asoc.last_echo_tsn = high_tsn;
+ }
+ }
+}
+
+#ifdef INVARIANTS
+#ifdef __GNUC__
+__attribute__((noinline))
+#endif
+ void
+ sctp_validate_no_locks(struct sctp_inpcb *inp)
+{
+ struct sctp_tcb *lstcb;
+
+ LIST_FOREACH(lstcb, &inp->sctp_asoc_list, sctp_tcblist) {
+ if (mtx_owned(&lstcb->tcb_mtx)) {
+ panic("Own lock on stcb at return from input");
+ }
+ }
+ if (mtx_owned(&inp->inp_create_mtx)) {
+ panic("Own create lock on inp");
+ }
+ if (mtx_owned(&inp->inp_mtx)) {
+ panic("Own inp lock on inp");
+ }
+}
+
+#endif
+
+/*
+ * common input chunk processing (v4 and v6)
+ */
+void
+sctp_common_input_processing(struct mbuf **mm, int iphlen, int offset,
+ int length, struct sctphdr *sh, struct sctp_chunkhdr *ch,
+ struct sctp_inpcb *inp, struct sctp_tcb *stcb, struct sctp_nets *net,
+ uint8_t ecn_bits, uint32_t vrf_id, uint16_t port)
+{
+ /*
+ * Control chunk processing
+ */
+ uint32_t high_tsn;
+ int fwd_tsn_seen = 0, data_processed = 0;
+ struct mbuf *m = *mm;
+ int abort_flag = 0;
+ int un_sent;
+
+ SCTP_STAT_INCR(sctps_recvdatagrams);
+#ifdef SCTP_AUDITING_ENABLED
+ sctp_audit_log(0xE0, 1);
+ sctp_auditing(0, inp, stcb, net);
+#endif
+
+ SCTPDBG(SCTP_DEBUG_INPUT1, "Ok, Common input processing called, m:%p iphlen:%d offset:%d length:%d stcb:%p\n",
+ m, iphlen, offset, length, stcb);
+ if (stcb) {
+ /* always clear this before beginning a packet */
+ stcb->asoc.authenticated = 0;
+ stcb->asoc.seen_a_sack_this_pkt = 0;
+ SCTPDBG(SCTP_DEBUG_INPUT1, "stcb:%p state:%x\n",
+ stcb, stcb->asoc.state);
+
+ if ((stcb->asoc.state & SCTP_STATE_WAS_ABORTED) ||
+ (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED)) {
+ /*-
+ * If we hit here, we had a ref count
+ * up when the assoc was aborted and the
+ * timer is clearing out the assoc, we should
+ * NOT respond to any packet.. its OOTB.
+ */
+ SCTP_TCB_UNLOCK(stcb);
+ sctp_handle_ootb(m, iphlen, offset, sh, inp, NULL,
+ vrf_id, port);
+ goto out_now;
+ }
+ }
+ if (IS_SCTP_CONTROL(ch)) {
+ /* process the control portion of the SCTP packet */
+ /* sa_ignore NO_NULL_CHK */
+ stcb = sctp_process_control(m, iphlen, &offset, length, sh, ch,
+ inp, stcb, &net, &fwd_tsn_seen, vrf_id, port);
+ if (stcb) {
+ /*
+ * This covers us if the cookie-echo was there and
+ * it changes our INP.
+ */
+ inp = stcb->sctp_ep;
+ if ((net) && (port)) {
+ if (net->port == 0) {
+ sctp_pathmtu_adjustment(inp, stcb, net, net->mtu - sizeof(struct udphdr));
+ }
+ net->port = port;
+ }
+ }
+ } else {
+ /*
+ * no control chunks, so pre-process DATA chunks (these
+ * checks are taken care of by control processing)
+ */
+
+ /*
+ * if DATA only packet, and auth is required, then punt...
+ * can't have authenticated without any AUTH (control)
+ * chunks
+ */
+ if ((stcb != NULL) &&
+ !SCTP_BASE_SYSCTL(sctp_auth_disable) &&
+ sctp_auth_is_required_chunk(SCTP_DATA, stcb->asoc.local_auth_chunks)) {
+ /* "silently" ignore */
+ SCTP_STAT_INCR(sctps_recvauthmissing);
+ SCTP_TCB_UNLOCK(stcb);
+ goto out_now;
+ }
+ if (stcb == NULL) {
+ /* out of the blue DATA chunk */
+ sctp_handle_ootb(m, iphlen, offset, sh, inp, NULL,
+ vrf_id, port);
+ goto out_now;
+ }
+ if (stcb->asoc.my_vtag != ntohl(sh->v_tag)) {
+ /* v_tag mismatch! */
+ SCTP_STAT_INCR(sctps_badvtag);
+ SCTP_TCB_UNLOCK(stcb);
+ goto out_now;
+ }
+ }
+
+ if (stcb == NULL) {
+ /*
+ * no valid TCB for this packet, or we found it's a bad
+ * packet while processing control, or we're done with this
+ * packet (done or skip rest of data), so we drop it...
+ */
+ goto out_now;
+ }
+ /*
+ * DATA chunk processing
+ */
+ /* plow through the data chunks while length > offset */
+
+ /*
+ * Rest should be DATA only. Check authentication state if AUTH for
+ * DATA is required.
+ */
+ if ((length > offset) &&
+ (stcb != NULL) &&
+ !SCTP_BASE_SYSCTL(sctp_auth_disable) &&
+ sctp_auth_is_required_chunk(SCTP_DATA, stcb->asoc.local_auth_chunks) &&
+ !stcb->asoc.authenticated) {
+ /* "silently" ignore */
+ SCTP_STAT_INCR(sctps_recvauthmissing);
+ SCTPDBG(SCTP_DEBUG_AUTH1,
+ "Data chunk requires AUTH, skipped\n");
+ goto trigger_send;
+ }
+ if (length > offset) {
+ int retval;
+
+ /*
+ * First check to make sure our state is correct. We would
+ * not get here unless we really did have a tag, so we don't
+ * abort if this happens, just dump the chunk silently.
+ */
+ switch (SCTP_GET_STATE(&stcb->asoc)) {
+ case SCTP_STATE_COOKIE_ECHOED:
+ /*
+ * we consider data with valid tags in this state
+ * shows us the cookie-ack was lost. Imply it was
+ * there.
+ */
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
+ sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
+ stcb->asoc.overall_error_count,
+ 0,
+ SCTP_FROM_SCTP_INPUT,
+ __LINE__);
+ }
+ stcb->asoc.overall_error_count = 0;
+ sctp_handle_cookie_ack((struct sctp_cookie_ack_chunk *)ch, stcb, net);
+ break;
+ case SCTP_STATE_COOKIE_WAIT:
+ /*
+ * We consider OOTB any data sent during asoc setup.
+ */
+ sctp_handle_ootb(m, iphlen, offset, sh, inp, NULL,
+ vrf_id, port);
+ SCTP_TCB_UNLOCK(stcb);
+ goto out_now;
+ /* sa_ignore NOTREACHED */
+ break;
+ case SCTP_STATE_EMPTY: /* should not happen */
+ case SCTP_STATE_INUSE: /* should not happen */
+ case SCTP_STATE_SHUTDOWN_RECEIVED: /* This is a peer error */
+ case SCTP_STATE_SHUTDOWN_ACK_SENT:
+ default:
+ SCTP_TCB_UNLOCK(stcb);
+ goto out_now;
+ /* sa_ignore NOTREACHED */
+ break;
+ case SCTP_STATE_OPEN:
+ case SCTP_STATE_SHUTDOWN_SENT:
+ break;
+ }
+ /* take care of ECN, part 1. */
+ if (stcb->asoc.ecn_allowed &&
+ (ecn_bits & (SCTP_ECT0_BIT | SCTP_ECT1_BIT))) {
+ sctp_process_ecn_marked_a(stcb, net, ecn_bits);
+ }
+ /* plow through the data chunks while length > offset */
+ retval = sctp_process_data(mm, iphlen, &offset, length, sh,
+ inp, stcb, net, &high_tsn);
+ if (retval == 2) {
+ /*
+ * The association aborted, NO UNLOCK needed since
+ * the association is destroyed.
+ */
+ goto out_now;
+ }
+ data_processed = 1;
+ if (retval == 0) {
+ /* take care of ecn part 2. */
+ if (stcb->asoc.ecn_allowed &&
+ (ecn_bits & (SCTP_ECT0_BIT | SCTP_ECT1_BIT))) {
+ sctp_process_ecn_marked_b(stcb, net, high_tsn,
+ ecn_bits);
+ }
+ }
+ /*
+ * Anything important needs to have been m_copy'ed in
+ * process_data
+ */
+ }
+ if ((data_processed == 0) && (fwd_tsn_seen)) {
+ int was_a_gap;
+ uint32_t highest_tsn;
+
+ if (compare_with_wrap(stcb->asoc.highest_tsn_inside_nr_map, stcb->asoc.highest_tsn_inside_map, MAX_TSN)) {
+ highest_tsn = stcb->asoc.highest_tsn_inside_nr_map;
+ } else {
+ highest_tsn = stcb->asoc.highest_tsn_inside_map;
+ }
+ was_a_gap = compare_with_wrap(highest_tsn, stcb->asoc.cumulative_tsn, MAX_TSN);
+ stcb->asoc.send_sack = 1;
+ sctp_sack_check(stcb, was_a_gap, &abort_flag);
+ if (abort_flag) {
+ /* Again, we aborted so NO UNLOCK needed */
+ goto out_now;
+ }
+ } else if (fwd_tsn_seen) {
+ stcb->asoc.send_sack = 1;
+ }
+ /* trigger send of any chunks in queue... */
+trigger_send:
+#ifdef SCTP_AUDITING_ENABLED
+ sctp_audit_log(0xE0, 2);
+ sctp_auditing(1, inp, stcb, net);
+#endif
+ SCTPDBG(SCTP_DEBUG_INPUT1,
+ "Check for chunk output prw:%d tqe:%d tf=%d\n",
+ stcb->asoc.peers_rwnd,
+ TAILQ_EMPTY(&stcb->asoc.control_send_queue),
+ stcb->asoc.total_flight);
+ un_sent = (stcb->asoc.total_output_queue_size - stcb->asoc.total_flight);
+
+ if (!TAILQ_EMPTY(&stcb->asoc.control_send_queue) ||
+ ((un_sent) &&
+ (stcb->asoc.peers_rwnd > 0 ||
+ (stcb->asoc.peers_rwnd <= 0 && stcb->asoc.total_flight == 0)))) {
+ SCTPDBG(SCTP_DEBUG_INPUT3, "Calling chunk OUTPUT\n");
+ sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_CONTROL_PROC, SCTP_SO_NOT_LOCKED);
+ SCTPDBG(SCTP_DEBUG_INPUT3, "chunk OUTPUT returns\n");
+ }
+#ifdef SCTP_AUDITING_ENABLED
+ sctp_audit_log(0xE0, 3);
+ sctp_auditing(2, inp, stcb, net);
+#endif
+ SCTP_TCB_UNLOCK(stcb);
+out_now:
+#ifdef INVARIANTS
+ sctp_validate_no_locks(inp);
+#endif
+ return;
+}
+
+#if 0
+static void
+sctp_print_mbuf_chain(struct mbuf *m)
+{
+ for (; m; m = SCTP_BUF_NEXT(m)) {
+ printf("%p: m_len = %ld\n", m, SCTP_BUF_LEN(m));
+ if (SCTP_BUF_IS_EXTENDED(m))
+ printf("%p: extend_size = %d\n", m, SCTP_BUF_EXTEND_SIZE(m));
+ }
+}
+
+#endif
+
+void
+sctp_input_with_port(struct mbuf *i_pak, int off, uint16_t port)
+{
+#ifdef SCTP_MBUF_LOGGING
+ struct mbuf *mat;
+
+#endif
+ struct mbuf *m;
+ int iphlen;
+ uint32_t vrf_id = 0;
+ uint8_t ecn_bits;
+ struct ip *ip;
+ struct sctphdr *sh;
+ struct sctp_inpcb *inp = NULL;
+ struct sctp_nets *net;
+ struct sctp_tcb *stcb = NULL;
+ struct sctp_chunkhdr *ch;
+ int refcount_up = 0;
+ int length, mlen, offset;
+
+#if !defined(SCTP_WITH_NO_CSUM)
+ uint32_t check, calc_check;
+
+#endif
+
+ if (SCTP_GET_PKT_VRFID(i_pak, vrf_id)) {
+ SCTP_RELEASE_PKT(i_pak);
+ return;
+ }
+ mlen = SCTP_HEADER_LEN(i_pak);
+ iphlen = off;
+ m = SCTP_HEADER_TO_CHAIN(i_pak);
+
+ net = NULL;
+ SCTP_STAT_INCR(sctps_recvpackets);
+ SCTP_STAT_INCR_COUNTER64(sctps_inpackets);
+
+
+#ifdef SCTP_MBUF_LOGGING
+ /* Log in any input mbufs */
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
+ mat = m;
+ while (mat) {
+ if (SCTP_BUF_IS_EXTENDED(mat)) {
+ sctp_log_mb(mat, SCTP_MBUF_INPUT);
+ }
+ mat = SCTP_BUF_NEXT(mat);
+ }
+ }
+#endif
+#ifdef SCTP_PACKET_LOGGING
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LAST_PACKET_TRACING)
+ sctp_packet_log(m, mlen);
+#endif
+ /*
+ * Must take out the iphlen, since mlen expects this (only effect lb
+ * case)
+ */
+ mlen -= iphlen;
+
+ /*
+ * Get IP, SCTP, and first chunk header together in first mbuf.
+ */
+ ip = mtod(m, struct ip *);
+ offset = iphlen + sizeof(*sh) + sizeof(*ch);
+ if (SCTP_BUF_LEN(m) < offset) {
+ if ((m = m_pullup(m, offset)) == 0) {
+ SCTP_STAT_INCR(sctps_hdrops);
+ return;
+ }
+ ip = mtod(m, struct ip *);
+ }
+ /* validate mbuf chain length with IP payload length */
+ if (mlen < (SCTP_GET_IPV4_LENGTH(ip) - iphlen)) {
+ SCTP_STAT_INCR(sctps_hdrops);
+ goto bad;
+ }
+ sh = (struct sctphdr *)((caddr_t)ip + iphlen);
+ ch = (struct sctp_chunkhdr *)((caddr_t)sh + sizeof(*sh));
+ SCTPDBG(SCTP_DEBUG_INPUT1,
+ "sctp_input() length:%d iphlen:%d\n", mlen, iphlen);
+
+ /* SCTP does not allow broadcasts or multicasts */
+ if (IN_MULTICAST(ntohl(ip->ip_dst.s_addr))) {
+ goto bad;
+ }
+ if (SCTP_IS_IT_BROADCAST(ip->ip_dst, m)) {
+ /*
+ * We only look at broadcast if its a front state, All
+ * others we will not have a tcb for anyway.
+ */
+ goto bad;
+ }
+ /* validate SCTP checksum */
+ SCTPDBG(SCTP_DEBUG_CRCOFFLOAD,
+ "sctp_input(): Packet of length %d received on %s with csum_flags 0x%x.\n",
+ m->m_pkthdr.len,
+ if_name(m->m_pkthdr.rcvif),
+ m->m_pkthdr.csum_flags);
+#if defined(SCTP_WITH_NO_CSUM)
+ SCTP_STAT_INCR(sctps_recvnocrc);
+#else
+ if (m->m_pkthdr.csum_flags & CSUM_SCTP_VALID) {
+ SCTP_STAT_INCR(sctps_recvhwcrc);
+ goto sctp_skip_csum_4;
+ }
+ check = sh->checksum; /* save incoming checksum */
+ sh->checksum = 0; /* prepare for calc */
+ calc_check = sctp_calculate_cksum(m, iphlen);
+ sh->checksum = check;
+ SCTP_STAT_INCR(sctps_recvswcrc);
+ if (calc_check != check) {
+ SCTPDBG(SCTP_DEBUG_INPUT1, "Bad CSUM on SCTP packet calc_check:%x check:%x m:%p mlen:%d iphlen:%d\n",
+ calc_check, check, m, mlen, iphlen);
+
+ stcb = sctp_findassociation_addr(m, iphlen,
+ offset - sizeof(*ch),
+ sh, ch, &inp, &net,
+ vrf_id);
+ if ((net) && (port)) {
+ if (net->port == 0) {
+ sctp_pathmtu_adjustment(inp, stcb, net, net->mtu - sizeof(struct udphdr));
+ }
+ net->port = port;
+ }
+ if ((inp) && (stcb)) {
+ sctp_send_packet_dropped(stcb, net, m, iphlen, 1);
+ sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_INPUT_ERROR, SCTP_SO_NOT_LOCKED);
+ } else if ((inp != NULL) && (stcb == NULL)) {
+ refcount_up = 1;
+ }
+ SCTP_STAT_INCR(sctps_badsum);
+ SCTP_STAT_INCR_COUNTER32(sctps_checksumerrors);
+ goto bad;
+ }
+sctp_skip_csum_4:
+#endif
+ /* destination port of 0 is illegal, based on RFC2960. */
+ if (sh->dest_port == 0) {
+ SCTP_STAT_INCR(sctps_hdrops);
+ goto bad;
+ }
+ /*
+ * Locate pcb and tcb for datagram sctp_findassociation_addr() wants
+ * IP/SCTP/first chunk header...
+ */
+ stcb = sctp_findassociation_addr(m, iphlen, offset - sizeof(*ch),
+ sh, ch, &inp, &net, vrf_id);
+ if ((net) && (port)) {
+ if (net->port == 0) {
+ sctp_pathmtu_adjustment(inp, stcb, net, net->mtu - sizeof(struct udphdr));
+ }
+ net->port = port;
+ }
+ /* inp's ref-count increased && stcb locked */
+ if (inp == NULL) {
+ struct sctp_init_chunk *init_chk, chunk_buf;
+
+ SCTP_STAT_INCR(sctps_noport);
+#ifdef ICMP_BANDLIM
+ /*
+ * we use the bandwidth limiting to protect against sending
+ * too many ABORTS all at once. In this case these count the
+ * same as an ICMP message.
+ */
+ if (badport_bandlim(0) < 0)
+ goto bad;
+#endif /* ICMP_BANDLIM */
+ SCTPDBG(SCTP_DEBUG_INPUT1,
+ "Sending a ABORT from packet entry!\n");
+ if (ch->chunk_type == SCTP_INITIATION) {
+ /*
+ * we do a trick here to get the INIT tag, dig in
+ * and get the tag from the INIT and put it in the
+ * common header.
+ */
+ init_chk = (struct sctp_init_chunk *)sctp_m_getptr(m,
+ iphlen + sizeof(*sh), sizeof(*init_chk),
+ (uint8_t *) & chunk_buf);
+ if (init_chk != NULL)
+ sh->v_tag = init_chk->init.initiate_tag;
+ }
+ if (ch->chunk_type == SCTP_SHUTDOWN_ACK) {
+ sctp_send_shutdown_complete2(m, iphlen, sh, vrf_id, port);
+ goto bad;
+ }
+ if (ch->chunk_type == SCTP_SHUTDOWN_COMPLETE) {
+ goto bad;
+ }
+ if (ch->chunk_type != SCTP_ABORT_ASSOCIATION)
+ sctp_send_abort(m, iphlen, sh, 0, NULL, vrf_id, port);
+ goto bad;
+ } else if (stcb == NULL) {
+ refcount_up = 1;
+ }
+#ifdef IPSEC
+ /*
+ * I very much doubt any of the IPSEC stuff will work but I have no
+ * idea, so I will leave it in place.
+ */
+ if (inp && ipsec4_in_reject(m, &inp->ip_inp.inp)) {
+ MODULE_GLOBAL(ipsec4stat).in_polvio++;
+ SCTP_STAT_INCR(sctps_hdrops);
+ goto bad;
+ }
+#endif /* IPSEC */
+
+ /*
+ * common chunk processing
+ */
+ length = ip->ip_len + iphlen;
+ offset -= sizeof(struct sctp_chunkhdr);
+
+ ecn_bits = ip->ip_tos;
+
+ /* sa_ignore NO_NULL_CHK */
+ sctp_common_input_processing(&m, iphlen, offset, length, sh, ch,
+ inp, stcb, net, ecn_bits, vrf_id, port);
+ /* inp's ref-count reduced && stcb unlocked */
+ if (m) {
+ sctp_m_freem(m);
+ }
+ if ((inp) && (refcount_up)) {
+ /* reduce ref-count */
+ SCTP_INP_DECR_REF(inp);
+ }
+ return;
+bad:
+ if (stcb) {
+ SCTP_TCB_UNLOCK(stcb);
+ }
+ if ((inp) && (refcount_up)) {
+ /* reduce ref-count */
+ SCTP_INP_DECR_REF(inp);
+ }
+ if (m) {
+ sctp_m_freem(m);
+ }
+ return;
+}
+void
+sctp_input(i_pak, off)
+ struct mbuf *i_pak;
+ int off;
+{
+ sctp_input_with_port(i_pak, off, 0);
+}
diff --git a/rtems/freebsd/netinet/sctp_input.h b/rtems/freebsd/netinet/sctp_input.h
new file mode 100644
index 00000000..2fe782c7
--- /dev/null
+++ b/rtems/freebsd/netinet/sctp_input.h
@@ -0,0 +1,57 @@
+/*-
+ * Copyright (c) 2001-2007, by Cisco Systems, Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * a) Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * b) Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the distribution.
+ *
+ * c) Neither the name of Cisco Systems, Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/* $KAME: sctp_input.h,v 1.6 2005/03/06 16:04:17 itojun Exp $ */
+
+#include <rtems/freebsd/sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#ifndef __sctp_input_h__
+#define __sctp_input_h__
+
+#if defined(_KERNEL) || defined(__Userspace__)
+void
+sctp_common_input_processing(struct mbuf **, int, int, int,
+ struct sctphdr *, struct sctp_chunkhdr *, struct sctp_inpcb *,
+ struct sctp_tcb *, struct sctp_nets *, uint8_t, uint32_t, uint16_t);
+
+struct sctp_stream_reset_out_request *
+sctp_find_stream_reset(struct sctp_tcb *stcb, uint32_t seq,
+ struct sctp_tmit_chunk **bchk);
+
+void
+sctp_reset_in_stream(struct sctp_tcb *stcb, int number_entries,
+ uint16_t * list);
+
+
+int sctp_is_there_unsent_data(struct sctp_tcb *stcb);
+
+#endif
+#endif
diff --git a/rtems/freebsd/netinet/sctp_lock_bsd.h b/rtems/freebsd/netinet/sctp_lock_bsd.h
new file mode 100644
index 00000000..1cdca585
--- /dev/null
+++ b/rtems/freebsd/netinet/sctp_lock_bsd.h
@@ -0,0 +1,430 @@
+#ifndef __sctp_lock_bsd_h__
+#define __sctp_lock_bsd_h__
+/*-
+ * Copyright (c) 2001-2007, by Cisco Systems, Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * a) Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * b) Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the distribution.
+ *
+ * c) Neither the name of Cisco Systems, Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * General locking concepts: The goal of our locking is to of course provide
+ * consistency and yet minimize overhead. We will attempt to use
+ * non-recursive locks which are supposed to be quite inexpensive. Now in
+ * order to do this the goal is that most functions are not aware of locking.
+ * Once we have a TCB we lock it and unlock when we are through. This means
+ * that the TCB lock is kind-of a "global" lock when working on an
+ * association. Caution must be used when asserting a TCB_LOCK since if we
+ * recurse we deadlock.
+ *
+ * Most other locks (INP and INFO) attempt to localize the locking i.e. we try
+ * to contain the lock and unlock within the function that needs to lock it.
+ * This sometimes mean we do extra locks and unlocks and lose a bit of
+ * efficency, but if the performance statements about non-recursive locks are
+ * true this should not be a problem. One issue that arises with this only
+ * lock when needed is that if an implicit association setup is done we have
+ * a problem. If at the time I lookup an association I have NULL in the tcb
+ * return, by the time I call to create the association some other processor
+ * could have created it. This is what the CREATE lock on the endpoint.
+ * Places where we will be implicitly creating the association OR just
+ * creating an association (the connect call) will assert the CREATE_INP
+ * lock. This will assure us that during all the lookup of INP and INFO if
+ * another creator is also locking/looking up we can gate the two to
+ * synchronize. So the CREATE_INP lock is also another one we must use
+ * extreme caution in locking to make sure we don't hit a re-entrancy issue.
+ *
+ * For non FreeBSD 5.x we provide a bunch of EMPTY lock macros so we can
+ * blatantly put locks everywhere and they reduce to nothing on
+ * NetBSD/OpenBSD and FreeBSD 4.x
+ *
+ */
+
+/*
+ * When working with the global SCTP lists we lock and unlock the INP_INFO
+ * lock. So when we go to lookup an association we will want to do a
+ * SCTP_INP_INFO_RLOCK() and then when we want to add a new association to
+ * the SCTP_BASE_INFO() list's we will do a SCTP_INP_INFO_WLOCK().
+ */
+#include <rtems/freebsd/sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+
+extern struct sctp_foo_stuff sctp_logoff[];
+extern int sctp_logoff_stuff;
+
+#define SCTP_IPI_COUNT_INIT()
+
+#define SCTP_STATLOG_INIT_LOCK()
+#define SCTP_STATLOG_LOCK()
+#define SCTP_STATLOG_UNLOCK()
+#define SCTP_STATLOG_DESTROY()
+
+#define SCTP_INP_INFO_LOCK_DESTROY() do { \
+ if(rw_wowned(&SCTP_BASE_INFO(ipi_ep_mtx))) { \
+ rw_wunlock(&SCTP_BASE_INFO(ipi_ep_mtx)); \
+ } \
+ rw_destroy(&SCTP_BASE_INFO(ipi_ep_mtx)); \
+ } while (0)
+
+#define SCTP_INP_INFO_LOCK_INIT() \
+ rw_init(&SCTP_BASE_INFO(ipi_ep_mtx), "sctp-info");
+
+
+#define SCTP_INP_INFO_RLOCK() do { \
+ rw_rlock(&SCTP_BASE_INFO(ipi_ep_mtx)); \
+} while (0)
+
+
+#define SCTP_INP_INFO_WLOCK() do { \
+ rw_wlock(&SCTP_BASE_INFO(ipi_ep_mtx)); \
+} while (0)
+
+
+#define SCTP_INP_INFO_RUNLOCK() rw_runlock(&SCTP_BASE_INFO(ipi_ep_mtx))
+#define SCTP_INP_INFO_WUNLOCK() rw_wunlock(&SCTP_BASE_INFO(ipi_ep_mtx))
+
+
+#define SCTP_IPI_ADDR_INIT() \
+ rw_init(&SCTP_BASE_INFO(ipi_addr_mtx), "sctp-addr")
+#define SCTP_IPI_ADDR_DESTROY() do { \
+ if(rw_wowned(&SCTP_BASE_INFO(ipi_addr_mtx))) { \
+ rw_wunlock(&SCTP_BASE_INFO(ipi_addr_mtx)); \
+ } \
+ rw_destroy(&SCTP_BASE_INFO(ipi_addr_mtx)); \
+ } while (0)
+#define SCTP_IPI_ADDR_RLOCK() do { \
+ rw_rlock(&SCTP_BASE_INFO(ipi_addr_mtx)); \
+} while (0)
+#define SCTP_IPI_ADDR_WLOCK() do { \
+ rw_wlock(&SCTP_BASE_INFO(ipi_addr_mtx)); \
+} while (0)
+
+#define SCTP_IPI_ADDR_RUNLOCK() rw_runlock(&SCTP_BASE_INFO(ipi_addr_mtx))
+#define SCTP_IPI_ADDR_WUNLOCK() rw_wunlock(&SCTP_BASE_INFO(ipi_addr_mtx))
+
+
+#define SCTP_IPI_ITERATOR_WQ_INIT() \
+ mtx_init(&sctp_it_ctl.ipi_iterator_wq_mtx, "sctp-it-wq", "sctp_it_wq", MTX_DEF)
+
+#define SCTP_IPI_ITERATOR_WQ_DESTROY() \
+ mtx_destroy(&sctp_it_ctl.ipi_iterator_wq_mtx)
+
+#define SCTP_IPI_ITERATOR_WQ_LOCK() do { \
+ mtx_lock(&sctp_it_ctl.ipi_iterator_wq_mtx); \
+} while (0)
+
+#define SCTP_IPI_ITERATOR_WQ_UNLOCK() mtx_unlock(&sctp_it_ctl.ipi_iterator_wq_mtx)
+
+
+#define SCTP_IP_PKTLOG_INIT() \
+ mtx_init(&SCTP_BASE_INFO(ipi_pktlog_mtx), "sctp-pktlog", "packetlog", MTX_DEF)
+
+
+#define SCTP_IP_PKTLOG_LOCK() do { \
+ mtx_lock(&SCTP_BASE_INFO(ipi_pktlog_mtx)); \
+} while (0)
+
+#define SCTP_IP_PKTLOG_UNLOCK() mtx_unlock(&SCTP_BASE_INFO(ipi_pktlog_mtx))
+
+#define SCTP_IP_PKTLOG_DESTROY() \
+ mtx_destroy(&SCTP_BASE_INFO(ipi_pktlog_mtx))
+
+
+
+
+
+/*
+ * The INP locks we will use for locking an SCTP endpoint, so for example if
+ * we want to change something at the endpoint level for example random_store
+ * or cookie secrets we lock the INP level.
+ */
+
+#define SCTP_INP_READ_INIT(_inp) \
+ mtx_init(&(_inp)->inp_rdata_mtx, "sctp-read", "inpr", MTX_DEF | MTX_DUPOK)
+
+#define SCTP_INP_READ_DESTROY(_inp) \
+ mtx_destroy(&(_inp)->inp_rdata_mtx)
+
+#define SCTP_INP_READ_LOCK(_inp) do { \
+ mtx_lock(&(_inp)->inp_rdata_mtx); \
+} while (0)
+
+
+#define SCTP_INP_READ_UNLOCK(_inp) mtx_unlock(&(_inp)->inp_rdata_mtx)
+
+
+#define SCTP_INP_LOCK_INIT(_inp) \
+ mtx_init(&(_inp)->inp_mtx, "sctp-inp", "inp", MTX_DEF | MTX_DUPOK)
+#define SCTP_ASOC_CREATE_LOCK_INIT(_inp) \
+ mtx_init(&(_inp)->inp_create_mtx, "sctp-create", "inp_create", \
+ MTX_DEF | MTX_DUPOK)
+
+#define SCTP_INP_LOCK_DESTROY(_inp) \
+ mtx_destroy(&(_inp)->inp_mtx)
+
+#define SCTP_INP_LOCK_CONTENDED(_inp) ((_inp)->inp_mtx.mtx_lock & MTX_CONTESTED)
+
+#define SCTP_INP_READ_CONTENDED(_inp) ((_inp)->inp_rdata_mtx.mtx_lock & MTX_CONTESTED)
+
+#define SCTP_ASOC_CREATE_LOCK_CONTENDED(_inp) ((_inp)->inp_create_mtx.mtx_lock & MTX_CONTESTED)
+
+
+#define SCTP_ASOC_CREATE_LOCK_DESTROY(_inp) \
+ mtx_destroy(&(_inp)->inp_create_mtx)
+
+
+#ifdef SCTP_LOCK_LOGGING
+#define SCTP_INP_RLOCK(_inp) do { \
+ if(SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOCK_LOGGING_ENABLE) sctp_log_lock(_inp, (struct sctp_tcb *)NULL, SCTP_LOG_LOCK_INP);\
+ mtx_lock(&(_inp)->inp_mtx); \
+} while (0)
+
+#define SCTP_INP_WLOCK(_inp) do { \
+ if(SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOCK_LOGGING_ENABLE) sctp_log_lock(_inp, (struct sctp_tcb *)NULL, SCTP_LOG_LOCK_INP);\
+ mtx_lock(&(_inp)->inp_mtx); \
+} while (0)
+
+#else
+
+#define SCTP_INP_RLOCK(_inp) do { \
+ mtx_lock(&(_inp)->inp_mtx); \
+} while (0)
+
+#define SCTP_INP_WLOCK(_inp) do { \
+ mtx_lock(&(_inp)->inp_mtx); \
+} while (0)
+
+#endif
+
+
+#define SCTP_TCB_SEND_LOCK_INIT(_tcb) \
+ mtx_init(&(_tcb)->tcb_send_mtx, "sctp-send-tcb", "tcbs", MTX_DEF | MTX_DUPOK)
+
+#define SCTP_TCB_SEND_LOCK_DESTROY(_tcb) mtx_destroy(&(_tcb)->tcb_send_mtx)
+
+#define SCTP_TCB_SEND_LOCK(_tcb) do { \
+ mtx_lock(&(_tcb)->tcb_send_mtx); \
+} while (0)
+
+#define SCTP_TCB_SEND_UNLOCK(_tcb) mtx_unlock(&(_tcb)->tcb_send_mtx)
+
+#define SCTP_INP_INCR_REF(_inp) atomic_add_int(&((_inp)->refcount), 1)
+#define SCTP_INP_DECR_REF(_inp) atomic_add_int(&((_inp)->refcount), -1)
+
+
+#ifdef SCTP_LOCK_LOGGING
+#define SCTP_ASOC_CREATE_LOCK(_inp) \
+ do { \
+ if(SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOCK_LOGGING_ENABLE) sctp_log_lock(_inp, (struct sctp_tcb *)NULL, SCTP_LOG_LOCK_CREATE); \
+ mtx_lock(&(_inp)->inp_create_mtx); \
+ } while (0)
+#else
+
+#define SCTP_ASOC_CREATE_LOCK(_inp) \
+ do { \
+ mtx_lock(&(_inp)->inp_create_mtx); \
+ } while (0)
+#endif
+
+#define SCTP_INP_RUNLOCK(_inp) mtx_unlock(&(_inp)->inp_mtx)
+#define SCTP_INP_WUNLOCK(_inp) mtx_unlock(&(_inp)->inp_mtx)
+#define SCTP_ASOC_CREATE_UNLOCK(_inp) mtx_unlock(&(_inp)->inp_create_mtx)
+
+/*
+ * For the majority of things (once we have found the association) we will
+ * lock the actual association mutex. This will protect all the assoiciation
+ * level queues and streams and such. We will need to lock the socket layer
+ * when we stuff data up into the receiving sb_mb. I.e. we will need to do an
+ * extra SOCKBUF_LOCK(&so->so_rcv) even though the association is locked.
+ */
+
+#define SCTP_TCB_LOCK_INIT(_tcb) \
+ mtx_init(&(_tcb)->tcb_mtx, "sctp-tcb", "tcb", MTX_DEF | MTX_DUPOK)
+
+#define SCTP_TCB_LOCK_DESTROY(_tcb) mtx_destroy(&(_tcb)->tcb_mtx)
+
+#ifdef SCTP_LOCK_LOGGING
+#define SCTP_TCB_LOCK(_tcb) do { \
+ if(SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOCK_LOGGING_ENABLE) sctp_log_lock(_tcb->sctp_ep, _tcb, SCTP_LOG_LOCK_TCB); \
+ mtx_lock(&(_tcb)->tcb_mtx); \
+} while (0)
+
+#else
+#define SCTP_TCB_LOCK(_tcb) do { \
+ mtx_lock(&(_tcb)->tcb_mtx); \
+} while (0)
+
+#endif
+
+
+#define SCTP_TCB_TRYLOCK(_tcb) mtx_trylock(&(_tcb)->tcb_mtx)
+
+#define SCTP_TCB_UNLOCK(_tcb) mtx_unlock(&(_tcb)->tcb_mtx)
+
+#define SCTP_TCB_UNLOCK_IFOWNED(_tcb) do { \
+ if (mtx_owned(&(_tcb)->tcb_mtx)) \
+ mtx_unlock(&(_tcb)->tcb_mtx); \
+ } while (0)
+
+
+
+#ifdef INVARIANTS
+#define SCTP_TCB_LOCK_ASSERT(_tcb) do { \
+ if (mtx_owned(&(_tcb)->tcb_mtx) == 0) \
+ panic("Don't own TCB lock"); \
+ } while (0)
+#else
+#define SCTP_TCB_LOCK_ASSERT(_tcb)
+#endif
+
+#define SCTP_ITERATOR_LOCK_INIT() \
+ mtx_init(&sctp_it_ctl.it_mtx, "sctp-it", "iterator", MTX_DEF)
+
+#ifdef INVARIANTS
+#define SCTP_ITERATOR_LOCK() \
+ do { \
+ if (mtx_owned(&sctp_it_ctl.it_mtx)) \
+ panic("Iterator Lock"); \
+ mtx_lock(&sctp_it_ctl.it_mtx); \
+ } while (0)
+#else
+#define SCTP_ITERATOR_LOCK() \
+ do { \
+ mtx_lock(&sctp_it_ctl.it_mtx); \
+ } while (0)
+
+#endif
+
+#define SCTP_ITERATOR_UNLOCK() mtx_unlock(&sctp_it_ctl.it_mtx)
+#define SCTP_ITERATOR_LOCK_DESTROY() mtx_destroy(&sctp_it_ctl.it_mtx)
+
+
+#define SCTP_WQ_ADDR_INIT() do { \
+ mtx_init(&SCTP_BASE_INFO(wq_addr_mtx), "sctp-addr-wq","sctp_addr_wq",MTX_DEF); \
+ } while (0)
+
+#define SCTP_WQ_ADDR_DESTROY() do { \
+ if(mtx_owned(&SCTP_BASE_INFO(wq_addr_mtx))) { \
+ mtx_unlock(&SCTP_BASE_INFO(wq_addr_mtx)); \
+ } \
+ mtx_destroy(&SCTP_BASE_INFO(wq_addr_mtx)); \
+ } while (0)
+
+#define SCTP_WQ_ADDR_LOCK() do { \
+ mtx_lock(&SCTP_BASE_INFO(wq_addr_mtx)); \
+} while (0)
+#define SCTP_WQ_ADDR_UNLOCK() do { \
+ mtx_unlock(&SCTP_BASE_INFO(wq_addr_mtx)); \
+} while (0)
+
+
+
+#define SCTP_INCR_EP_COUNT() \
+ do { \
+ atomic_add_int(&SCTP_BASE_INFO(ipi_count_ep), 1); \
+ } while (0)
+
+#define SCTP_DECR_EP_COUNT() \
+ do { \
+ atomic_subtract_int(&SCTP_BASE_INFO(ipi_count_ep), 1); \
+ } while (0)
+
+#define SCTP_INCR_ASOC_COUNT() \
+ do { \
+ atomic_add_int(&SCTP_BASE_INFO(ipi_count_asoc), 1); \
+ } while (0)
+
+#define SCTP_DECR_ASOC_COUNT() \
+ do { \
+ atomic_subtract_int(&SCTP_BASE_INFO(ipi_count_asoc), 1); \
+ } while (0)
+
+#define SCTP_INCR_LADDR_COUNT() \
+ do { \
+ atomic_add_int(&SCTP_BASE_INFO(ipi_count_laddr), 1); \
+ } while (0)
+
+#define SCTP_DECR_LADDR_COUNT() \
+ do { \
+ atomic_subtract_int(&SCTP_BASE_INFO(ipi_count_laddr), 1); \
+ } while (0)
+
+#define SCTP_INCR_RADDR_COUNT() \
+ do { \
+ atomic_add_int(&SCTP_BASE_INFO(ipi_count_raddr), 1); \
+ } while (0)
+
+#define SCTP_DECR_RADDR_COUNT() \
+ do { \
+ atomic_subtract_int(&SCTP_BASE_INFO(ipi_count_raddr),1); \
+ } while (0)
+
+#define SCTP_INCR_CHK_COUNT() \
+ do { \
+ atomic_add_int(&SCTP_BASE_INFO(ipi_count_chunk), 1); \
+ } while (0)
+#ifdef INVARIANTS
+#define SCTP_DECR_CHK_COUNT() \
+ do { \
+ if(SCTP_BASE_INFO(ipi_count_chunk) == 0) \
+ panic("chunk count to 0?"); \
+ atomic_subtract_int(&SCTP_BASE_INFO(ipi_count_chunk), 1); \
+ } while (0)
+#else
+#define SCTP_DECR_CHK_COUNT() \
+ do { \
+ if(SCTP_BASE_INFO(ipi_count_chunk) != 0) \
+ atomic_subtract_int(&SCTP_BASE_INFO(ipi_count_chunk), 1); \
+ } while (0)
+#endif
+#define SCTP_INCR_READQ_COUNT() \
+ do { \
+ atomic_add_int(&SCTP_BASE_INFO(ipi_count_readq),1); \
+ } while (0)
+
+#define SCTP_DECR_READQ_COUNT() \
+ do { \
+ atomic_subtract_int(&SCTP_BASE_INFO(ipi_count_readq), 1); \
+ } while (0)
+
+#define SCTP_INCR_STRMOQ_COUNT() \
+ do { \
+ atomic_add_int(&SCTP_BASE_INFO(ipi_count_strmoq), 1); \
+ } while (0)
+
+#define SCTP_DECR_STRMOQ_COUNT() \
+ do { \
+ atomic_subtract_int(&SCTP_BASE_INFO(ipi_count_strmoq), 1); \
+ } while (0)
+
+
+#if defined(SCTP_SO_LOCK_TESTING)
+#define SCTP_INP_SO(sctpinp) (sctpinp)->ip_inp.inp.inp_socket
+#define SCTP_SOCKET_LOCK(so, refcnt)
+#define SCTP_SOCKET_UNLOCK(so, refcnt)
+#endif
+
+#endif
diff --git a/rtems/freebsd/netinet/sctp_os.h b/rtems/freebsd/netinet/sctp_os.h
new file mode 100644
index 00000000..67bdb1af
--- /dev/null
+++ b/rtems/freebsd/netinet/sctp_os.h
@@ -0,0 +1,72 @@
+/*-
+ * Copyright (c) 2006-2007, by Cisco Systems, Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * a) Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * b) Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the distribution.
+ *
+ * c) Neither the name of Cisco Systems, Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#include <rtems/freebsd/sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+#ifndef __sctp_os_h__
+#define __sctp_os_h__
+
+/*
+ * General kernel memory allocation:
+ * SCTP_MALLOC(element, type, size, name)
+ * SCTP_FREE(element)
+ * Kernel memory allocation for "soname"- memory must be zeroed.
+ * SCTP_MALLOC_SONAME(name, type, size)
+ * SCTP_FREE_SONAME(name)
+ */
+
+/*
+ * Zone(pool) allocation routines: MUST be defined for each OS.
+ * zone = zone/pool pointer.
+ * name = string name of the zone/pool.
+ * size = size of each zone/pool element.
+ * number = number of elements in zone/pool.
+ * type = structure type to allocate
+ *
+ * sctp_zone_t
+ * SCTP_ZONE_INIT(zone, name, size, number)
+ * SCTP_ZONE_GET(zone, type)
+ * SCTP_ZONE_FREE(zone, element)
+ * SCTP_ZONE_DESTROY(zone)
+ */
+
+#include <rtems/freebsd/netinet/sctp_os_bsd.h>
+
+
+
+
+
+/* All os's must implement this address gatherer. If
+ * no VRF's exist, then vrf 0 is the only one and all
+ * addresses and ifn's live here.
+ */
+#define SCTP_DEFAULT_VRF 0
+void sctp_init_vrf_list(int vrfid);
+
+#endif
diff --git a/rtems/freebsd/netinet/sctp_os_bsd.h b/rtems/freebsd/netinet/sctp_os_bsd.h
new file mode 100644
index 00000000..8fea7379
--- /dev/null
+++ b/rtems/freebsd/netinet/sctp_os_bsd.h
@@ -0,0 +1,503 @@
+/*-
+ * Copyright (c) 2006-2007, by Cisco Systems, Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * a) Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * b) Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the distribution.
+ *
+ * c) Neither the name of Cisco Systems, Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#include <rtems/freebsd/sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+#ifndef __sctp_os_bsd_h__
+#define __sctp_os_bsd_h__
+/*
+ * includes
+ */
+#include <rtems/freebsd/local/opt_ipsec.h>
+#include <rtems/freebsd/local/opt_compat.h>
+#include <rtems/freebsd/local/opt_inet6.h>
+#include <rtems/freebsd/local/opt_inet.h>
+#include <rtems/freebsd/local/opt_sctp.h>
+
+#include <rtems/freebsd/sys/param.h>
+#include <rtems/freebsd/sys/ktr.h>
+#include <rtems/freebsd/sys/systm.h>
+#include <rtems/freebsd/sys/malloc.h>
+#include <rtems/freebsd/sys/kernel.h>
+#include <rtems/freebsd/sys/sysctl.h>
+#include <rtems/freebsd/sys/mbuf.h>
+#include <rtems/freebsd/sys/protosw.h>
+#include <rtems/freebsd/sys/socket.h>
+#include <rtems/freebsd/sys/socketvar.h>
+#include <rtems/freebsd/sys/jail.h>
+#include <rtems/freebsd/sys/sysctl.h>
+#include <rtems/freebsd/sys/resourcevar.h>
+#include <rtems/freebsd/sys/uio.h>
+#include <rtems/freebsd/sys/lock.h>
+#include <rtems/freebsd/sys/rwlock.h>
+#include <rtems/freebsd/sys/kthread.h>
+#include <rtems/freebsd/sys/priv.h>
+#include <rtems/freebsd/sys/random.h>
+#include <rtems/freebsd/sys/limits.h>
+#include <rtems/freebsd/sys/queue.h>
+#include <rtems/freebsd/machine/cpu.h>
+
+#include <rtems/freebsd/net/if.h>
+#include <rtems/freebsd/net/if_types.h>
+#include <rtems/freebsd/net/if_var.h>
+#include <rtems/freebsd/net/route.h>
+#include <rtems/freebsd/net/vnet.h>
+
+#include <rtems/freebsd/netinet/in.h>
+#include <rtems/freebsd/netinet/in_systm.h>
+#include <rtems/freebsd/netinet/ip.h>
+#include <rtems/freebsd/netinet/in_pcb.h>
+#include <rtems/freebsd/netinet/in_var.h>
+#include <rtems/freebsd/netinet/ip_var.h>
+#include <rtems/freebsd/netinet/ip_icmp.h>
+#include <rtems/freebsd/netinet/icmp_var.h>
+
+#ifdef IPSEC
+#include <rtems/freebsd/netipsec/ipsec.h>
+#include <rtems/freebsd/netipsec/key.h>
+#endif /* IPSEC */
+
+#ifdef INET6
+#include <rtems/freebsd/sys/domain.h>
+#ifdef IPSEC
+#include <rtems/freebsd/netipsec/ipsec6.h>
+#endif
+#include <rtems/freebsd/netinet/ip6.h>
+#include <rtems/freebsd/netinet6/ip6_var.h>
+#include <rtems/freebsd/netinet6/in6_pcb.h>
+#include <rtems/freebsd/netinet/icmp6.h>
+#include <rtems/freebsd/netinet6/ip6protosw.h>
+#include <rtems/freebsd/netinet6/nd6.h>
+#include <rtems/freebsd/netinet6/scope6_var.h>
+#endif /* INET6 */
+
+
+#include <rtems/freebsd/netinet/ip_options.h>
+
+#ifndef in6pcb
+#define in6pcb inpcb
+#endif
+/* Declare all the malloc names for all the various mallocs */
+MALLOC_DECLARE(SCTP_M_MAP);
+MALLOC_DECLARE(SCTP_M_STRMI);
+MALLOC_DECLARE(SCTP_M_STRMO);
+MALLOC_DECLARE(SCTP_M_ASC_ADDR);
+MALLOC_DECLARE(SCTP_M_ASC_IT);
+MALLOC_DECLARE(SCTP_M_AUTH_CL);
+MALLOC_DECLARE(SCTP_M_AUTH_KY);
+MALLOC_DECLARE(SCTP_M_AUTH_HL);
+MALLOC_DECLARE(SCTP_M_AUTH_IF);
+MALLOC_DECLARE(SCTP_M_STRESET);
+MALLOC_DECLARE(SCTP_M_CMSG);
+MALLOC_DECLARE(SCTP_M_COPYAL);
+MALLOC_DECLARE(SCTP_M_VRF);
+MALLOC_DECLARE(SCTP_M_IFA);
+MALLOC_DECLARE(SCTP_M_IFN);
+MALLOC_DECLARE(SCTP_M_TIMW);
+MALLOC_DECLARE(SCTP_M_MVRF);
+MALLOC_DECLARE(SCTP_M_ITER);
+MALLOC_DECLARE(SCTP_M_SOCKOPT);
+
+#if defined(SCTP_LOCAL_TRACE_BUF)
+
+#define SCTP_GET_CYCLECOUNT get_cyclecount()
+#define SCTP_CTR6 sctp_log_trace
+
+#else
+#define SCTP_CTR6 CTR6
+#endif
+
+/*
+ * Macros to expand out globals defined by various modules
+ * to either a real global or a virtualized instance of one,
+ * depending on whether VIMAGE is defined.
+ */
+/* then define the macro(s) that hook into the vimage macros */
+#define MODULE_GLOBAL(__SYMBOL) V_##__SYMBOL
+
+#define V_system_base_info VNET(system_base_info)
+#define SCTP_BASE_INFO(__m) V_system_base_info.sctppcbinfo.__m
+#define SCTP_BASE_STATS V_system_base_info.sctpstat
+#define SCTP_BASE_STATS_SYSCTL VNET_NAME(system_base_info.sctpstat)
+#define SCTP_BASE_STAT(__m) V_system_base_info.sctpstat.__m
+#define SCTP_BASE_SYSCTL(__m) VNET_NAME(system_base_info.sctpsysctl.__m)
+#define SCTP_BASE_VAR(__m) V_system_base_info.__m
+
+/*
+ *
+ */
+#define USER_ADDR_NULL (NULL) /* FIX ME: temp */
+
+#if defined(SCTP_DEBUG)
+#define SCTPDBG(level, params...) \
+{ \
+ do { \
+ if (SCTP_BASE_SYSCTL(sctp_debug_on) & level ) { \
+ printf(params); \
+ } \
+ } while (0); \
+}
+#define SCTPDBG_ADDR(level, addr) \
+{ \
+ do { \
+ if (SCTP_BASE_SYSCTL(sctp_debug_on) & level ) { \
+ sctp_print_address(addr); \
+ } \
+ } while (0); \
+}
+#define SCTPDBG_PKT(level, iph, sh) \
+{ \
+ do { \
+ if (SCTP_BASE_SYSCTL(sctp_debug_on) & level) { \
+ sctp_print_address_pkt(iph, sh); \
+ } \
+ } while (0); \
+}
+#else
+#define SCTPDBG(level, params...)
+#define SCTPDBG_ADDR(level, addr)
+#define SCTPDBG_PKT(level, iph, sh)
+#endif
+#define SCTP_PRINTF(params...) printf(params)
+
+#ifdef SCTP_LTRACE_CHUNKS
+#define SCTP_LTRACE_CHK(a, b, c, d) if(SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LTRACE_CHUNK_ENABLE) SCTP_CTR6(KTR_SUBSYS, "SCTP:%d[%d]:%x-%x-%x-%x", SCTP_LOG_CHUNK_PROC, 0, a, b, c, d)
+#else
+#define SCTP_LTRACE_CHK(a, b, c, d)
+#endif
+
+#ifdef SCTP_LTRACE_ERRORS
+#define SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, net, file, err) if(SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LTRACE_ERROR_ENABLE) \
+ printf("mbuf:%p inp:%p stcb:%p net:%p file:%x line:%d error:%d\n", \
+ m, inp, stcb, net, file, __LINE__, err);
+#define SCTP_LTRACE_ERR_RET(inp, stcb, net, file, err) if(SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LTRACE_ERROR_ENABLE) \
+ printf("inp:%p stcb:%p net:%p file:%x line:%d error:%d\n", \
+ inp, stcb, net, file, __LINE__, err);
+#else
+#define SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, net, file, err)
+#define SCTP_LTRACE_ERR_RET(inp, stcb, net, file, err)
+#endif
+
+
+/*
+ * Local address and interface list handling
+ */
+#define SCTP_MAX_VRF_ID 0
+#define SCTP_SIZE_OF_VRF_HASH 3
+#define SCTP_IFNAMSIZ IFNAMSIZ
+#define SCTP_DEFAULT_VRFID 0
+#define SCTP_VRF_ADDR_HASH_SIZE 16
+#define SCTP_VRF_IFN_HASH_SIZE 3
+#define SCTP_INIT_VRF_TABLEID(vrf)
+
+#define SCTP_IFN_IS_IFT_LOOP(ifn) ((ifn)->ifn_type == IFT_LOOP)
+#define SCTP_ROUTE_IS_REAL_LOOP(ro) ((ro)->ro_rt && (ro)->ro_rt->rt_ifa && (ro)->ro_rt->rt_ifa->ifa_ifp && (ro)->ro_rt->rt_ifa->ifa_ifp->if_type == IFT_LOOP)
+
+/*
+ * Access to IFN's to help with src-addr-selection
+ */
+/* This could return VOID if the index works but for BSD we provide both. */
+#define SCTP_GET_IFN_VOID_FROM_ROUTE(ro) (void *)ro->ro_rt->rt_ifp
+#define SCTP_GET_IF_INDEX_FROM_ROUTE(ro) (ro)->ro_rt->rt_ifp->if_index
+#define SCTP_ROUTE_HAS_VALID_IFN(ro) ((ro)->ro_rt && (ro)->ro_rt->rt_ifp)
+
+/*
+ * general memory allocation
+ */
+#define SCTP_MALLOC(var, type, size, name) \
+ do { \
+ var = (type)malloc(size, name, M_NOWAIT); \
+ } while (0)
+
+#define SCTP_FREE(var, type) free(var, type)
+
+#define SCTP_MALLOC_SONAME(var, type, size) \
+ do { \
+ var = (type)malloc(size, M_SONAME, M_WAITOK | M_ZERO); \
+ } while (0)
+
+#define SCTP_FREE_SONAME(var) free(var, M_SONAME)
+
+#define SCTP_PROCESS_STRUCT struct proc *
+
+/*
+ * zone allocation functions
+ */
+#include <rtems/freebsd/vm/uma.h>
+
+/* SCTP_ZONE_INIT: initialize the zone */
+typedef struct uma_zone *sctp_zone_t;
+
+#define SCTP_ZONE_INIT(zone, name, size, number) { \
+ zone = uma_zcreate(name, size, NULL, NULL, NULL, NULL, UMA_ALIGN_PTR,\
+ 0); \
+ uma_zone_set_max(zone, number); \
+}
+
+#define SCTP_ZONE_DESTROY(zone) uma_zdestroy(zone)
+
+/* SCTP_ZONE_GET: allocate element from the zone */
+#define SCTP_ZONE_GET(zone, type) \
+ (type *)uma_zalloc(zone, M_NOWAIT);
+
+/* SCTP_ZONE_FREE: free element from the zone */
+#define SCTP_ZONE_FREE(zone, element) \
+ uma_zfree(zone, element);
+
+#define SCTP_HASH_INIT(size, hashmark) hashinit_flags(size, M_PCB, hashmark, HASH_NOWAIT)
+#define SCTP_HASH_FREE(table, hashmark) hashdestroy(table, M_PCB, hashmark)
+
+#define SCTP_M_COPYM m_copym
+
+/*
+ * timers
+ */
+#include <rtems/freebsd/sys/callout.h>
+typedef struct callout sctp_os_timer_t;
+
+
+#define SCTP_OS_TIMER_INIT(tmr) callout_init(tmr, 1)
+#define SCTP_OS_TIMER_START callout_reset
+#define SCTP_OS_TIMER_STOP callout_stop
+#define SCTP_OS_TIMER_STOP_DRAIN callout_drain
+#define SCTP_OS_TIMER_PENDING callout_pending
+#define SCTP_OS_TIMER_ACTIVE callout_active
+#define SCTP_OS_TIMER_DEACTIVATE callout_deactivate
+
+#define sctp_get_tick_count() (ticks)
+
+#define SCTP_UNUSED __attribute__((unused))
+
+/*
+ * Functions
+ */
+/* Mbuf manipulation and access macros */
+#define SCTP_BUF_LEN(m) (m->m_len)
+#define SCTP_BUF_NEXT(m) (m->m_next)
+#define SCTP_BUF_NEXT_PKT(m) (m->m_nextpkt)
+#define SCTP_BUF_RESV_UF(m, size) m->m_data += size
+#define SCTP_BUF_AT(m, size) m->m_data + size
+#define SCTP_BUF_IS_EXTENDED(m) (m->m_flags & M_EXT)
+#define SCTP_BUF_EXTEND_SIZE(m) (m->m_ext.ext_size)
+#define SCTP_BUF_TYPE(m) (m->m_type)
+#define SCTP_BUF_RECVIF(m) (m->m_pkthdr.rcvif)
+#define SCTP_BUF_PREPEND M_PREPEND
+
+#define SCTP_ALIGN_TO_END(m, len) if(m->m_flags & M_PKTHDR) { \
+ MH_ALIGN(m, len); \
+ } else if ((m->m_flags & M_EXT) == 0) { \
+ M_ALIGN(m, len); \
+ }
+
+/* We make it so if you have up to 4 threads
+ * writing based on the default size of
+ * the packet log 65 k, that would be
+ * 4 16k packets before we would hit
+ * a problem.
+ */
+#define SCTP_PKTLOG_WRITERS_NEED_LOCK 3
+
+/*************************/
+/* MTU */
+/*************************/
+#define SCTP_GATHER_MTU_FROM_IFN_INFO(ifn, ifn_index, af) ((struct ifnet *)ifn)->if_mtu
+#define SCTP_GATHER_MTU_FROM_ROUTE(sctp_ifa, sa, rt) ((rt != NULL) ? rt->rt_rmx.rmx_mtu : 0)
+#define SCTP_GATHER_MTU_FROM_INTFC(sctp_ifn) ((sctp_ifn->ifn_p != NULL) ? ((struct ifnet *)(sctp_ifn->ifn_p))->if_mtu : 0)
+#define SCTP_SET_MTU_OF_ROUTE(sa, rt, mtu) do { \
+ if (rt != NULL) \
+ rt->rt_rmx.rmx_mtu = mtu; \
+ } while(0)
+
+/* (de-)register interface event notifications */
+#define SCTP_REGISTER_INTERFACE(ifhandle, af)
+#define SCTP_DEREGISTER_INTERFACE(ifhandle, af)
+
+
+/*************************/
+/* These are for logging */
+/*************************/
+/* return the base ext data pointer */
+#define SCTP_BUF_EXTEND_BASE(m) (m->m_ext.ext_buf)
+ /* return the refcnt of the data pointer */
+#define SCTP_BUF_EXTEND_REFCNT(m) (*m->m_ext.ref_cnt)
+/* return any buffer related flags, this is
+ * used beyond logging for apple only.
+ */
+#define SCTP_BUF_GET_FLAGS(m) (m->m_flags)
+
+/* For BSD this just accesses the M_PKTHDR length
+ * so it operates on an mbuf with hdr flag. Other
+ * O/S's may have separate packet header and mbuf
+ * chain pointers.. thus the macro.
+ */
+#define SCTP_HEADER_TO_CHAIN(m) (m)
+#define SCTP_DETACH_HEADER_FROM_CHAIN(m)
+#define SCTP_HEADER_LEN(m) (m->m_pkthdr.len)
+#define SCTP_GET_HEADER_FOR_OUTPUT(o_pak) 0
+#define SCTP_RELEASE_HEADER(m)
+#define SCTP_RELEASE_PKT(m) sctp_m_freem(m)
+#define SCTP_ENABLE_UDP_CSUM(m) do { \
+ m->m_pkthdr.csum_flags = CSUM_UDP; \
+ m->m_pkthdr.csum_data = offsetof(struct udphdr, uh_sum); \
+ } while (0)
+
+#define SCTP_GET_PKT_VRFID(m, vrf_id) ((vrf_id = SCTP_DEFAULT_VRFID) != SCTP_DEFAULT_VRFID)
+
+
+
+/* Attach the chain of data into the sendable packet. */
+#define SCTP_ATTACH_CHAIN(pak, m, packet_length) do { \
+ pak = m; \
+ pak->m_pkthdr.len = packet_length; \
+ } while(0)
+
+/* Other m_pkthdr type things */
+#define SCTP_IS_IT_BROADCAST(dst, m) ((m->m_flags & M_PKTHDR) ? in_broadcast(dst, m->m_pkthdr.rcvif) : 0)
+#define SCTP_IS_IT_LOOPBACK(m) ((m->m_flags & M_PKTHDR) && ((m->m_pkthdr.rcvif == NULL) || (m->m_pkthdr.rcvif->if_type == IFT_LOOP)))
+
+
+/* This converts any input packet header
+ * into the chain of data holders, for BSD
+ * its a NOP.
+ */
+
+/* Macro's for getting length from V6/V4 header */
+#define SCTP_GET_IPV4_LENGTH(iph) (iph->ip_len)
+#define SCTP_GET_IPV6_LENGTH(ip6) (ntohs(ip6->ip6_plen))
+
+/* get the v6 hop limit */
+#define SCTP_GET_HLIM(inp, ro) in6_selecthlim((struct in6pcb *)&inp->ip_inp.inp, (ro ? (ro->ro_rt ? (ro->ro_rt->rt_ifp) : (NULL)) : (NULL)));
+
+/* is the endpoint v6only? */
+#define SCTP_IPV6_V6ONLY(inp) (((struct inpcb *)inp)->inp_flags & IN6P_IPV6_V6ONLY)
+/* is the socket non-blocking? */
+#define SCTP_SO_IS_NBIO(so) ((so)->so_state & SS_NBIO)
+#define SCTP_SET_SO_NBIO(so) ((so)->so_state |= SS_NBIO)
+#define SCTP_CLEAR_SO_NBIO(so) ((so)->so_state &= ~SS_NBIO)
+/* get the socket type */
+#define SCTP_SO_TYPE(so) ((so)->so_type)
+/* reserve sb space for a socket */
+#define SCTP_SORESERVE(so, send, recv) soreserve(so, send, recv)
+/* wakeup a socket */
+#define SCTP_SOWAKEUP(so) wakeup(&(so)->so_timeo)
+/* clear the socket buffer state */
+#define SCTP_SB_CLEAR(sb) \
+ (sb).sb_cc = 0; \
+ (sb).sb_mb = NULL; \
+ (sb).sb_mbcnt = 0;
+
+#define SCTP_SB_LIMIT_RCV(so) so->so_rcv.sb_hiwat
+#define SCTP_SB_LIMIT_SND(so) so->so_snd.sb_hiwat
+
+/*
+ * routes, output, etc.
+ */
+typedef struct route sctp_route_t;
+typedef struct rtentry sctp_rtentry_t;
+
+#define SCTP_RTALLOC(ro, vrf_id) rtalloc_ign((struct route *)ro, 0UL)
+
+/* Future zero copy wakeup/send function */
+#define SCTP_ZERO_COPY_EVENT(inp, so)
+/* This is re-pulse ourselves for sendbuf */
+#define SCTP_ZERO_COPY_SENDQ_EVENT(inp, so)
+
+/*
+ * IP output routines
+ */
+#define SCTP_IP_OUTPUT(result, o_pak, ro, stcb, vrf_id) \
+{ \
+ int o_flgs = IP_RAWOUTPUT; \
+ struct sctp_tcb *local_stcb = stcb; \
+ if (local_stcb && \
+ local_stcb->sctp_ep && \
+ local_stcb->sctp_ep->sctp_socket) \
+ o_flgs |= local_stcb->sctp_ep->sctp_socket->so_options & SO_DONTROUTE; \
+ result = ip_output(o_pak, NULL, ro, o_flgs, 0, NULL); \
+}
+
+#define SCTP_IP6_OUTPUT(result, o_pak, ro, ifp, stcb, vrf_id) \
+{ \
+ struct sctp_tcb *local_stcb = stcb; \
+ if (local_stcb && local_stcb->sctp_ep) \
+ result = ip6_output(o_pak, \
+ ((struct in6pcb *)(local_stcb->sctp_ep))->in6p_outputopts, \
+ (ro), 0, 0, ifp, NULL); \
+ else \
+ result = ip6_output(o_pak, NULL, (ro), 0, 0, ifp, NULL); \
+}
+
+struct mbuf *
+sctp_get_mbuf_for_msg(unsigned int space_needed,
+ int want_header, int how, int allonebuf, int type);
+
+
+/*
+ * SCTP AUTH
+ */
+#define HAVE_SHA2
+
+#define SCTP_READ_RANDOM(buf, len) read_random(buf, len)
+
+#ifdef USE_SCTP_SHA1
+#include <rtems/freebsd/netinet/sctp_sha1.h>
+#else
+#include <rtems/freebsd/crypto/sha1.h>
+/* map standard crypto API names */
+#define SHA1_Init SHA1Init
+#define SHA1_Update SHA1Update
+#define SHA1_Final(x,y) SHA1Final((caddr_t)x, y)
+#endif
+
+#if defined(HAVE_SHA2)
+#include <rtems/freebsd/crypto/sha2/sha2.h>
+#endif
+
+#endif
+
+#define SCTP_DECREMENT_AND_CHECK_REFCOUNT(addr) (atomic_fetchadd_int(addr, -1) == 1)
+#if defined(INVARIANTS)
+#define SCTP_SAVE_ATOMIC_DECREMENT(addr, val) \
+{ \
+ int32_t oldval; \
+ oldval = atomic_fetchadd_int(addr, -val); \
+ if (oldval < val) { \
+ panic("Counter goes negative"); \
+ } \
+}
+#else
+#define SCTP_SAVE_ATOMIC_DECREMENT(addr, val) \
+{ \
+ int32_t oldval; \
+ oldval = atomic_fetchadd_int(addr, -val); \
+ if (oldval < val) { \
+ *addr = 0; \
+ } \
+}
+#endif
diff --git a/rtems/freebsd/netinet/sctp_output.c b/rtems/freebsd/netinet/sctp_output.c
new file mode 100644
index 00000000..b0960f04
--- /dev/null
+++ b/rtems/freebsd/netinet/sctp_output.c
@@ -0,0 +1,13537 @@
+#include <rtems/freebsd/machine/rtems-bsd-config.h>
+
+/*-
+ * Copyright (c) 2001-2008, by Cisco Systems, Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * a) Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * b) Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the distribution.
+ *
+ * c) Neither the name of Cisco Systems, Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/* $KAME: sctp_output.c,v 1.46 2005/03/06 16:04:17 itojun Exp $ */
+
+#include <rtems/freebsd/sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <rtems/freebsd/netinet/sctp_os.h>
+#include <rtems/freebsd/sys/proc.h>
+#include <rtems/freebsd/netinet/sctp_var.h>
+#include <rtems/freebsd/netinet/sctp_sysctl.h>
+#include <rtems/freebsd/netinet/sctp_header.h>
+#include <rtems/freebsd/netinet/sctp_pcb.h>
+#include <rtems/freebsd/netinet/sctputil.h>
+#include <rtems/freebsd/netinet/sctp_output.h>
+#include <rtems/freebsd/netinet/sctp_uio.h>
+#include <rtems/freebsd/netinet/sctputil.h>
+#include <rtems/freebsd/netinet/sctp_auth.h>
+#include <rtems/freebsd/netinet/sctp_timer.h>
+#include <rtems/freebsd/netinet/sctp_asconf.h>
+#include <rtems/freebsd/netinet/sctp_indata.h>
+#include <rtems/freebsd/netinet/sctp_bsd_addr.h>
+#include <rtems/freebsd/netinet/sctp_input.h>
+#include <rtems/freebsd/netinet/sctp_crc32.h>
+#include <rtems/freebsd/netinet/udp.h>
+#include <rtems/freebsd/machine/in_cksum.h>
+
+
+
+#define SCTP_MAX_GAPS_INARRAY 4
+struct sack_track {
+ uint8_t right_edge; /* mergable on the right edge */
+ uint8_t left_edge; /* mergable on the left edge */
+ uint8_t num_entries;
+ uint8_t spare;
+ struct sctp_gap_ack_block gaps[SCTP_MAX_GAPS_INARRAY];
+};
+
+struct sack_track sack_array[256] = {
+ {0, 0, 0, 0, /* 0x00 */
+ {{0, 0},
+ {0, 0},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {1, 0, 1, 0, /* 0x01 */
+ {{0, 0},
+ {0, 0},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {0, 0, 1, 0, /* 0x02 */
+ {{1, 1},
+ {0, 0},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {1, 0, 1, 0, /* 0x03 */
+ {{0, 1},
+ {0, 0},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {0, 0, 1, 0, /* 0x04 */
+ {{2, 2},
+ {0, 0},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {1, 0, 2, 0, /* 0x05 */
+ {{0, 0},
+ {2, 2},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {0, 0, 1, 0, /* 0x06 */
+ {{1, 2},
+ {0, 0},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {1, 0, 1, 0, /* 0x07 */
+ {{0, 2},
+ {0, 0},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {0, 0, 1, 0, /* 0x08 */
+ {{3, 3},
+ {0, 0},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {1, 0, 2, 0, /* 0x09 */
+ {{0, 0},
+ {3, 3},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {0, 0, 2, 0, /* 0x0a */
+ {{1, 1},
+ {3, 3},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {1, 0, 2, 0, /* 0x0b */
+ {{0, 1},
+ {3, 3},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {0, 0, 1, 0, /* 0x0c */
+ {{2, 3},
+ {0, 0},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {1, 0, 2, 0, /* 0x0d */
+ {{0, 0},
+ {2, 3},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {0, 0, 1, 0, /* 0x0e */
+ {{1, 3},
+ {0, 0},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {1, 0, 1, 0, /* 0x0f */
+ {{0, 3},
+ {0, 0},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {0, 0, 1, 0, /* 0x10 */
+ {{4, 4},
+ {0, 0},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {1, 0, 2, 0, /* 0x11 */
+ {{0, 0},
+ {4, 4},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {0, 0, 2, 0, /* 0x12 */
+ {{1, 1},
+ {4, 4},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {1, 0, 2, 0, /* 0x13 */
+ {{0, 1},
+ {4, 4},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {0, 0, 2, 0, /* 0x14 */
+ {{2, 2},
+ {4, 4},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {1, 0, 3, 0, /* 0x15 */
+ {{0, 0},
+ {2, 2},
+ {4, 4},
+ {0, 0}
+ }
+ },
+ {0, 0, 2, 0, /* 0x16 */
+ {{1, 2},
+ {4, 4},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {1, 0, 2, 0, /* 0x17 */
+ {{0, 2},
+ {4, 4},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {0, 0, 1, 0, /* 0x18 */
+ {{3, 4},
+ {0, 0},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {1, 0, 2, 0, /* 0x19 */
+ {{0, 0},
+ {3, 4},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {0, 0, 2, 0, /* 0x1a */
+ {{1, 1},
+ {3, 4},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {1, 0, 2, 0, /* 0x1b */
+ {{0, 1},
+ {3, 4},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {0, 0, 1, 0, /* 0x1c */
+ {{2, 4},
+ {0, 0},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {1, 0, 2, 0, /* 0x1d */
+ {{0, 0},
+ {2, 4},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {0, 0, 1, 0, /* 0x1e */
+ {{1, 4},
+ {0, 0},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {1, 0, 1, 0, /* 0x1f */
+ {{0, 4},
+ {0, 0},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {0, 0, 1, 0, /* 0x20 */
+ {{5, 5},
+ {0, 0},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {1, 0, 2, 0, /* 0x21 */
+ {{0, 0},
+ {5, 5},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {0, 0, 2, 0, /* 0x22 */
+ {{1, 1},
+ {5, 5},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {1, 0, 2, 0, /* 0x23 */
+ {{0, 1},
+ {5, 5},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {0, 0, 2, 0, /* 0x24 */
+ {{2, 2},
+ {5, 5},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {1, 0, 3, 0, /* 0x25 */
+ {{0, 0},
+ {2, 2},
+ {5, 5},
+ {0, 0}
+ }
+ },
+ {0, 0, 2, 0, /* 0x26 */
+ {{1, 2},
+ {5, 5},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {1, 0, 2, 0, /* 0x27 */
+ {{0, 2},
+ {5, 5},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {0, 0, 2, 0, /* 0x28 */
+ {{3, 3},
+ {5, 5},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {1, 0, 3, 0, /* 0x29 */
+ {{0, 0},
+ {3, 3},
+ {5, 5},
+ {0, 0}
+ }
+ },
+ {0, 0, 3, 0, /* 0x2a */
+ {{1, 1},
+ {3, 3},
+ {5, 5},
+ {0, 0}
+ }
+ },
+ {1, 0, 3, 0, /* 0x2b */
+ {{0, 1},
+ {3, 3},
+ {5, 5},
+ {0, 0}
+ }
+ },
+ {0, 0, 2, 0, /* 0x2c */
+ {{2, 3},
+ {5, 5},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {1, 0, 3, 0, /* 0x2d */
+ {{0, 0},
+ {2, 3},
+ {5, 5},
+ {0, 0}
+ }
+ },
+ {0, 0, 2, 0, /* 0x2e */
+ {{1, 3},
+ {5, 5},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {1, 0, 2, 0, /* 0x2f */
+ {{0, 3},
+ {5, 5},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {0, 0, 1, 0, /* 0x30 */
+ {{4, 5},
+ {0, 0},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {1, 0, 2, 0, /* 0x31 */
+ {{0, 0},
+ {4, 5},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {0, 0, 2, 0, /* 0x32 */
+ {{1, 1},
+ {4, 5},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {1, 0, 2, 0, /* 0x33 */
+ {{0, 1},
+ {4, 5},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {0, 0, 2, 0, /* 0x34 */
+ {{2, 2},
+ {4, 5},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {1, 0, 3, 0, /* 0x35 */
+ {{0, 0},
+ {2, 2},
+ {4, 5},
+ {0, 0}
+ }
+ },
+ {0, 0, 2, 0, /* 0x36 */
+ {{1, 2},
+ {4, 5},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {1, 0, 2, 0, /* 0x37 */
+ {{0, 2},
+ {4, 5},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {0, 0, 1, 0, /* 0x38 */
+ {{3, 5},
+ {0, 0},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {1, 0, 2, 0, /* 0x39 */
+ {{0, 0},
+ {3, 5},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {0, 0, 2, 0, /* 0x3a */
+ {{1, 1},
+ {3, 5},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {1, 0, 2, 0, /* 0x3b */
+ {{0, 1},
+ {3, 5},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {0, 0, 1, 0, /* 0x3c */
+ {{2, 5},
+ {0, 0},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {1, 0, 2, 0, /* 0x3d */
+ {{0, 0},
+ {2, 5},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {0, 0, 1, 0, /* 0x3e */
+ {{1, 5},
+ {0, 0},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {1, 0, 1, 0, /* 0x3f */
+ {{0, 5},
+ {0, 0},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {0, 0, 1, 0, /* 0x40 */
+ {{6, 6},
+ {0, 0},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {1, 0, 2, 0, /* 0x41 */
+ {{0, 0},
+ {6, 6},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {0, 0, 2, 0, /* 0x42 */
+ {{1, 1},
+ {6, 6},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {1, 0, 2, 0, /* 0x43 */
+ {{0, 1},
+ {6, 6},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {0, 0, 2, 0, /* 0x44 */
+ {{2, 2},
+ {6, 6},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {1, 0, 3, 0, /* 0x45 */
+ {{0, 0},
+ {2, 2},
+ {6, 6},
+ {0, 0}
+ }
+ },
+ {0, 0, 2, 0, /* 0x46 */
+ {{1, 2},
+ {6, 6},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {1, 0, 2, 0, /* 0x47 */
+ {{0, 2},
+ {6, 6},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {0, 0, 2, 0, /* 0x48 */
+ {{3, 3},
+ {6, 6},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {1, 0, 3, 0, /* 0x49 */
+ {{0, 0},
+ {3, 3},
+ {6, 6},
+ {0, 0}
+ }
+ },
+ {0, 0, 3, 0, /* 0x4a */
+ {{1, 1},
+ {3, 3},
+ {6, 6},
+ {0, 0}
+ }
+ },
+ {1, 0, 3, 0, /* 0x4b */
+ {{0, 1},
+ {3, 3},
+ {6, 6},
+ {0, 0}
+ }
+ },
+ {0, 0, 2, 0, /* 0x4c */
+ {{2, 3},
+ {6, 6},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {1, 0, 3, 0, /* 0x4d */
+ {{0, 0},
+ {2, 3},
+ {6, 6},
+ {0, 0}
+ }
+ },
+ {0, 0, 2, 0, /* 0x4e */
+ {{1, 3},
+ {6, 6},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {1, 0, 2, 0, /* 0x4f */
+ {{0, 3},
+ {6, 6},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {0, 0, 2, 0, /* 0x50 */
+ {{4, 4},
+ {6, 6},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {1, 0, 3, 0, /* 0x51 */
+ {{0, 0},
+ {4, 4},
+ {6, 6},
+ {0, 0}
+ }
+ },
+ {0, 0, 3, 0, /* 0x52 */
+ {{1, 1},
+ {4, 4},
+ {6, 6},
+ {0, 0}
+ }
+ },
+ {1, 0, 3, 0, /* 0x53 */
+ {{0, 1},
+ {4, 4},
+ {6, 6},
+ {0, 0}
+ }
+ },
+ {0, 0, 3, 0, /* 0x54 */
+ {{2, 2},
+ {4, 4},
+ {6, 6},
+ {0, 0}
+ }
+ },
+ {1, 0, 4, 0, /* 0x55 */
+ {{0, 0},
+ {2, 2},
+ {4, 4},
+ {6, 6}
+ }
+ },
+ {0, 0, 3, 0, /* 0x56 */
+ {{1, 2},
+ {4, 4},
+ {6, 6},
+ {0, 0}
+ }
+ },
+ {1, 0, 3, 0, /* 0x57 */
+ {{0, 2},
+ {4, 4},
+ {6, 6},
+ {0, 0}
+ }
+ },
+ {0, 0, 2, 0, /* 0x58 */
+ {{3, 4},
+ {6, 6},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {1, 0, 3, 0, /* 0x59 */
+ {{0, 0},
+ {3, 4},
+ {6, 6},
+ {0, 0}
+ }
+ },
+ {0, 0, 3, 0, /* 0x5a */
+ {{1, 1},
+ {3, 4},
+ {6, 6},
+ {0, 0}
+ }
+ },
+ {1, 0, 3, 0, /* 0x5b */
+ {{0, 1},
+ {3, 4},
+ {6, 6},
+ {0, 0}
+ }
+ },
+ {0, 0, 2, 0, /* 0x5c */
+ {{2, 4},
+ {6, 6},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {1, 0, 3, 0, /* 0x5d */
+ {{0, 0},
+ {2, 4},
+ {6, 6},
+ {0, 0}
+ }
+ },
+ {0, 0, 2, 0, /* 0x5e */
+ {{1, 4},
+ {6, 6},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {1, 0, 2, 0, /* 0x5f */
+ {{0, 4},
+ {6, 6},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {0, 0, 1, 0, /* 0x60 */
+ {{5, 6},
+ {0, 0},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {1, 0, 2, 0, /* 0x61 */
+ {{0, 0},
+ {5, 6},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {0, 0, 2, 0, /* 0x62 */
+ {{1, 1},
+ {5, 6},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {1, 0, 2, 0, /* 0x63 */
+ {{0, 1},
+ {5, 6},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {0, 0, 2, 0, /* 0x64 */
+ {{2, 2},
+ {5, 6},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {1, 0, 3, 0, /* 0x65 */
+ {{0, 0},
+ {2, 2},
+ {5, 6},
+ {0, 0}
+ }
+ },
+ {0, 0, 2, 0, /* 0x66 */
+ {{1, 2},
+ {5, 6},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {1, 0, 2, 0, /* 0x67 */
+ {{0, 2},
+ {5, 6},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {0, 0, 2, 0, /* 0x68 */
+ {{3, 3},
+ {5, 6},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {1, 0, 3, 0, /* 0x69 */
+ {{0, 0},
+ {3, 3},
+ {5, 6},
+ {0, 0}
+ }
+ },
+ {0, 0, 3, 0, /* 0x6a */
+ {{1, 1},
+ {3, 3},
+ {5, 6},
+ {0, 0}
+ }
+ },
+ {1, 0, 3, 0, /* 0x6b */
+ {{0, 1},
+ {3, 3},
+ {5, 6},
+ {0, 0}
+ }
+ },
+ {0, 0, 2, 0, /* 0x6c */
+ {{2, 3},
+ {5, 6},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {1, 0, 3, 0, /* 0x6d */
+ {{0, 0},
+ {2, 3},
+ {5, 6},
+ {0, 0}
+ }
+ },
+ {0, 0, 2, 0, /* 0x6e */
+ {{1, 3},
+ {5, 6},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {1, 0, 2, 0, /* 0x6f */
+ {{0, 3},
+ {5, 6},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {0, 0, 1, 0, /* 0x70 */
+ {{4, 6},
+ {0, 0},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {1, 0, 2, 0, /* 0x71 */
+ {{0, 0},
+ {4, 6},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {0, 0, 2, 0, /* 0x72 */
+ {{1, 1},
+ {4, 6},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {1, 0, 2, 0, /* 0x73 */
+ {{0, 1},
+ {4, 6},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {0, 0, 2, 0, /* 0x74 */
+ {{2, 2},
+ {4, 6},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {1, 0, 3, 0, /* 0x75 */
+ {{0, 0},
+ {2, 2},
+ {4, 6},
+ {0, 0}
+ }
+ },
+ {0, 0, 2, 0, /* 0x76 */
+ {{1, 2},
+ {4, 6},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {1, 0, 2, 0, /* 0x77 */
+ {{0, 2},
+ {4, 6},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {0, 0, 1, 0, /* 0x78 */
+ {{3, 6},
+ {0, 0},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {1, 0, 2, 0, /* 0x79 */
+ {{0, 0},
+ {3, 6},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {0, 0, 2, 0, /* 0x7a */
+ {{1, 1},
+ {3, 6},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {1, 0, 2, 0, /* 0x7b */
+ {{0, 1},
+ {3, 6},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {0, 0, 1, 0, /* 0x7c */
+ {{2, 6},
+ {0, 0},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {1, 0, 2, 0, /* 0x7d */
+ {{0, 0},
+ {2, 6},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {0, 0, 1, 0, /* 0x7e */
+ {{1, 6},
+ {0, 0},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {1, 0, 1, 0, /* 0x7f */
+ {{0, 6},
+ {0, 0},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {0, 1, 1, 0, /* 0x80 */
+ {{7, 7},
+ {0, 0},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {1, 1, 2, 0, /* 0x81 */
+ {{0, 0},
+ {7, 7},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {0, 1, 2, 0, /* 0x82 */
+ {{1, 1},
+ {7, 7},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {1, 1, 2, 0, /* 0x83 */
+ {{0, 1},
+ {7, 7},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {0, 1, 2, 0, /* 0x84 */
+ {{2, 2},
+ {7, 7},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {1, 1, 3, 0, /* 0x85 */
+ {{0, 0},
+ {2, 2},
+ {7, 7},
+ {0, 0}
+ }
+ },
+ {0, 1, 2, 0, /* 0x86 */
+ {{1, 2},
+ {7, 7},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {1, 1, 2, 0, /* 0x87 */
+ {{0, 2},
+ {7, 7},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {0, 1, 2, 0, /* 0x88 */
+ {{3, 3},
+ {7, 7},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {1, 1, 3, 0, /* 0x89 */
+ {{0, 0},
+ {3, 3},
+ {7, 7},
+ {0, 0}
+ }
+ },
+ {0, 1, 3, 0, /* 0x8a */
+ {{1, 1},
+ {3, 3},
+ {7, 7},
+ {0, 0}
+ }
+ },
+ {1, 1, 3, 0, /* 0x8b */
+ {{0, 1},
+ {3, 3},
+ {7, 7},
+ {0, 0}
+ }
+ },
+ {0, 1, 2, 0, /* 0x8c */
+ {{2, 3},
+ {7, 7},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {1, 1, 3, 0, /* 0x8d */
+ {{0, 0},
+ {2, 3},
+ {7, 7},
+ {0, 0}
+ }
+ },
+ {0, 1, 2, 0, /* 0x8e */
+ {{1, 3},
+ {7, 7},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {1, 1, 2, 0, /* 0x8f */
+ {{0, 3},
+ {7, 7},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {0, 1, 2, 0, /* 0x90 */
+ {{4, 4},
+ {7, 7},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {1, 1, 3, 0, /* 0x91 */
+ {{0, 0},
+ {4, 4},
+ {7, 7},
+ {0, 0}
+ }
+ },
+ {0, 1, 3, 0, /* 0x92 */
+ {{1, 1},
+ {4, 4},
+ {7, 7},
+ {0, 0}
+ }
+ },
+ {1, 1, 3, 0, /* 0x93 */
+ {{0, 1},
+ {4, 4},
+ {7, 7},
+ {0, 0}
+ }
+ },
+ {0, 1, 3, 0, /* 0x94 */
+ {{2, 2},
+ {4, 4},
+ {7, 7},
+ {0, 0}
+ }
+ },
+ {1, 1, 4, 0, /* 0x95 */
+ {{0, 0},
+ {2, 2},
+ {4, 4},
+ {7, 7}
+ }
+ },
+ {0, 1, 3, 0, /* 0x96 */
+ {{1, 2},
+ {4, 4},
+ {7, 7},
+ {0, 0}
+ }
+ },
+ {1, 1, 3, 0, /* 0x97 */
+ {{0, 2},
+ {4, 4},
+ {7, 7},
+ {0, 0}
+ }
+ },
+ {0, 1, 2, 0, /* 0x98 */
+ {{3, 4},
+ {7, 7},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {1, 1, 3, 0, /* 0x99 */
+ {{0, 0},
+ {3, 4},
+ {7, 7},
+ {0, 0}
+ }
+ },
+ {0, 1, 3, 0, /* 0x9a */
+ {{1, 1},
+ {3, 4},
+ {7, 7},
+ {0, 0}
+ }
+ },
+ {1, 1, 3, 0, /* 0x9b */
+ {{0, 1},
+ {3, 4},
+ {7, 7},
+ {0, 0}
+ }
+ },
+ {0, 1, 2, 0, /* 0x9c */
+ {{2, 4},
+ {7, 7},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {1, 1, 3, 0, /* 0x9d */
+ {{0, 0},
+ {2, 4},
+ {7, 7},
+ {0, 0}
+ }
+ },
+ {0, 1, 2, 0, /* 0x9e */
+ {{1, 4},
+ {7, 7},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {1, 1, 2, 0, /* 0x9f */
+ {{0, 4},
+ {7, 7},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {0, 1, 2, 0, /* 0xa0 */
+ {{5, 5},
+ {7, 7},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {1, 1, 3, 0, /* 0xa1 */
+ {{0, 0},
+ {5, 5},
+ {7, 7},
+ {0, 0}
+ }
+ },
+ {0, 1, 3, 0, /* 0xa2 */
+ {{1, 1},
+ {5, 5},
+ {7, 7},
+ {0, 0}
+ }
+ },
+ {1, 1, 3, 0, /* 0xa3 */
+ {{0, 1},
+ {5, 5},
+ {7, 7},
+ {0, 0}
+ }
+ },
+ {0, 1, 3, 0, /* 0xa4 */
+ {{2, 2},
+ {5, 5},
+ {7, 7},
+ {0, 0}
+ }
+ },
+ {1, 1, 4, 0, /* 0xa5 */
+ {{0, 0},
+ {2, 2},
+ {5, 5},
+ {7, 7}
+ }
+ },
+ {0, 1, 3, 0, /* 0xa6 */
+ {{1, 2},
+ {5, 5},
+ {7, 7},
+ {0, 0}
+ }
+ },
+ {1, 1, 3, 0, /* 0xa7 */
+ {{0, 2},
+ {5, 5},
+ {7, 7},
+ {0, 0}
+ }
+ },
+ {0, 1, 3, 0, /* 0xa8 */
+ {{3, 3},
+ {5, 5},
+ {7, 7},
+ {0, 0}
+ }
+ },
+ {1, 1, 4, 0, /* 0xa9 */
+ {{0, 0},
+ {3, 3},
+ {5, 5},
+ {7, 7}
+ }
+ },
+ {0, 1, 4, 0, /* 0xaa */
+ {{1, 1},
+ {3, 3},
+ {5, 5},
+ {7, 7}
+ }
+ },
+ {1, 1, 4, 0, /* 0xab */
+ {{0, 1},
+ {3, 3},
+ {5, 5},
+ {7, 7}
+ }
+ },
+ {0, 1, 3, 0, /* 0xac */
+ {{2, 3},
+ {5, 5},
+ {7, 7},
+ {0, 0}
+ }
+ },
+ {1, 1, 4, 0, /* 0xad */
+ {{0, 0},
+ {2, 3},
+ {5, 5},
+ {7, 7}
+ }
+ },
+ {0, 1, 3, 0, /* 0xae */
+ {{1, 3},
+ {5, 5},
+ {7, 7},
+ {0, 0}
+ }
+ },
+ {1, 1, 3, 0, /* 0xaf */
+ {{0, 3},
+ {5, 5},
+ {7, 7},
+ {0, 0}
+ }
+ },
+ {0, 1, 2, 0, /* 0xb0 */
+ {{4, 5},
+ {7, 7},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {1, 1, 3, 0, /* 0xb1 */
+ {{0, 0},
+ {4, 5},
+ {7, 7},
+ {0, 0}
+ }
+ },
+ {0, 1, 3, 0, /* 0xb2 */
+ {{1, 1},
+ {4, 5},
+ {7, 7},
+ {0, 0}
+ }
+ },
+ {1, 1, 3, 0, /* 0xb3 */
+ {{0, 1},
+ {4, 5},
+ {7, 7},
+ {0, 0}
+ }
+ },
+ {0, 1, 3, 0, /* 0xb4 */
+ {{2, 2},
+ {4, 5},
+ {7, 7},
+ {0, 0}
+ }
+ },
+ {1, 1, 4, 0, /* 0xb5 */
+ {{0, 0},
+ {2, 2},
+ {4, 5},
+ {7, 7}
+ }
+ },
+ {0, 1, 3, 0, /* 0xb6 */
+ {{1, 2},
+ {4, 5},
+ {7, 7},
+ {0, 0}
+ }
+ },
+ {1, 1, 3, 0, /* 0xb7 */
+ {{0, 2},
+ {4, 5},
+ {7, 7},
+ {0, 0}
+ }
+ },
+ {0, 1, 2, 0, /* 0xb8 */
+ {{3, 5},
+ {7, 7},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {1, 1, 3, 0, /* 0xb9 */
+ {{0, 0},
+ {3, 5},
+ {7, 7},
+ {0, 0}
+ }
+ },
+ {0, 1, 3, 0, /* 0xba */
+ {{1, 1},
+ {3, 5},
+ {7, 7},
+ {0, 0}
+ }
+ },
+ {1, 1, 3, 0, /* 0xbb */
+ {{0, 1},
+ {3, 5},
+ {7, 7},
+ {0, 0}
+ }
+ },
+ {0, 1, 2, 0, /* 0xbc */
+ {{2, 5},
+ {7, 7},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {1, 1, 3, 0, /* 0xbd */
+ {{0, 0},
+ {2, 5},
+ {7, 7},
+ {0, 0}
+ }
+ },
+ {0, 1, 2, 0, /* 0xbe */
+ {{1, 5},
+ {7, 7},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {1, 1, 2, 0, /* 0xbf */
+ {{0, 5},
+ {7, 7},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {0, 1, 1, 0, /* 0xc0 */
+ {{6, 7},
+ {0, 0},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {1, 1, 2, 0, /* 0xc1 */
+ {{0, 0},
+ {6, 7},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {0, 1, 2, 0, /* 0xc2 */
+ {{1, 1},
+ {6, 7},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {1, 1, 2, 0, /* 0xc3 */
+ {{0, 1},
+ {6, 7},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {0, 1, 2, 0, /* 0xc4 */
+ {{2, 2},
+ {6, 7},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {1, 1, 3, 0, /* 0xc5 */
+ {{0, 0},
+ {2, 2},
+ {6, 7},
+ {0, 0}
+ }
+ },
+ {0, 1, 2, 0, /* 0xc6 */
+ {{1, 2},
+ {6, 7},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {1, 1, 2, 0, /* 0xc7 */
+ {{0, 2},
+ {6, 7},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {0, 1, 2, 0, /* 0xc8 */
+ {{3, 3},
+ {6, 7},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {1, 1, 3, 0, /* 0xc9 */
+ {{0, 0},
+ {3, 3},
+ {6, 7},
+ {0, 0}
+ }
+ },
+ {0, 1, 3, 0, /* 0xca */
+ {{1, 1},
+ {3, 3},
+ {6, 7},
+ {0, 0}
+ }
+ },
+ {1, 1, 3, 0, /* 0xcb */
+ {{0, 1},
+ {3, 3},
+ {6, 7},
+ {0, 0}
+ }
+ },
+ {0, 1, 2, 0, /* 0xcc */
+ {{2, 3},
+ {6, 7},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {1, 1, 3, 0, /* 0xcd */
+ {{0, 0},
+ {2, 3},
+ {6, 7},
+ {0, 0}
+ }
+ },
+ {0, 1, 2, 0, /* 0xce */
+ {{1, 3},
+ {6, 7},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {1, 1, 2, 0, /* 0xcf */
+ {{0, 3},
+ {6, 7},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {0, 1, 2, 0, /* 0xd0 */
+ {{4, 4},
+ {6, 7},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {1, 1, 3, 0, /* 0xd1 */
+ {{0, 0},
+ {4, 4},
+ {6, 7},
+ {0, 0}
+ }
+ },
+ {0, 1, 3, 0, /* 0xd2 */
+ {{1, 1},
+ {4, 4},
+ {6, 7},
+ {0, 0}
+ }
+ },
+ {1, 1, 3, 0, /* 0xd3 */
+ {{0, 1},
+ {4, 4},
+ {6, 7},
+ {0, 0}
+ }
+ },
+ {0, 1, 3, 0, /* 0xd4 */
+ {{2, 2},
+ {4, 4},
+ {6, 7},
+ {0, 0}
+ }
+ },
+ {1, 1, 4, 0, /* 0xd5 */
+ {{0, 0},
+ {2, 2},
+ {4, 4},
+ {6, 7}
+ }
+ },
+ {0, 1, 3, 0, /* 0xd6 */
+ {{1, 2},
+ {4, 4},
+ {6, 7},
+ {0, 0}
+ }
+ },
+ {1, 1, 3, 0, /* 0xd7 */
+ {{0, 2},
+ {4, 4},
+ {6, 7},
+ {0, 0}
+ }
+ },
+ {0, 1, 2, 0, /* 0xd8 */
+ {{3, 4},
+ {6, 7},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {1, 1, 3, 0, /* 0xd9 */
+ {{0, 0},
+ {3, 4},
+ {6, 7},
+ {0, 0}
+ }
+ },
+ {0, 1, 3, 0, /* 0xda */
+ {{1, 1},
+ {3, 4},
+ {6, 7},
+ {0, 0}
+ }
+ },
+ {1, 1, 3, 0, /* 0xdb */
+ {{0, 1},
+ {3, 4},
+ {6, 7},
+ {0, 0}
+ }
+ },
+ {0, 1, 2, 0, /* 0xdc */
+ {{2, 4},
+ {6, 7},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {1, 1, 3, 0, /* 0xdd */
+ {{0, 0},
+ {2, 4},
+ {6, 7},
+ {0, 0}
+ }
+ },
+ {0, 1, 2, 0, /* 0xde */
+ {{1, 4},
+ {6, 7},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {1, 1, 2, 0, /* 0xdf */
+ {{0, 4},
+ {6, 7},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {0, 1, 1, 0, /* 0xe0 */
+ {{5, 7},
+ {0, 0},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {1, 1, 2, 0, /* 0xe1 */
+ {{0, 0},
+ {5, 7},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {0, 1, 2, 0, /* 0xe2 */
+ {{1, 1},
+ {5, 7},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {1, 1, 2, 0, /* 0xe3 */
+ {{0, 1},
+ {5, 7},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {0, 1, 2, 0, /* 0xe4 */
+ {{2, 2},
+ {5, 7},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {1, 1, 3, 0, /* 0xe5 */
+ {{0, 0},
+ {2, 2},
+ {5, 7},
+ {0, 0}
+ }
+ },
+ {0, 1, 2, 0, /* 0xe6 */
+ {{1, 2},
+ {5, 7},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {1, 1, 2, 0, /* 0xe7 */
+ {{0, 2},
+ {5, 7},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {0, 1, 2, 0, /* 0xe8 */
+ {{3, 3},
+ {5, 7},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {1, 1, 3, 0, /* 0xe9 */
+ {{0, 0},
+ {3, 3},
+ {5, 7},
+ {0, 0}
+ }
+ },
+ {0, 1, 3, 0, /* 0xea */
+ {{1, 1},
+ {3, 3},
+ {5, 7},
+ {0, 0}
+ }
+ },
+ {1, 1, 3, 0, /* 0xeb */
+ {{0, 1},
+ {3, 3},
+ {5, 7},
+ {0, 0}
+ }
+ },
+ {0, 1, 2, 0, /* 0xec */
+ {{2, 3},
+ {5, 7},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {1, 1, 3, 0, /* 0xed */
+ {{0, 0},
+ {2, 3},
+ {5, 7},
+ {0, 0}
+ }
+ },
+ {0, 1, 2, 0, /* 0xee */
+ {{1, 3},
+ {5, 7},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {1, 1, 2, 0, /* 0xef */
+ {{0, 3},
+ {5, 7},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {0, 1, 1, 0, /* 0xf0 */
+ {{4, 7},
+ {0, 0},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {1, 1, 2, 0, /* 0xf1 */
+ {{0, 0},
+ {4, 7},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {0, 1, 2, 0, /* 0xf2 */
+ {{1, 1},
+ {4, 7},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {1, 1, 2, 0, /* 0xf3 */
+ {{0, 1},
+ {4, 7},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {0, 1, 2, 0, /* 0xf4 */
+ {{2, 2},
+ {4, 7},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {1, 1, 3, 0, /* 0xf5 */
+ {{0, 0},
+ {2, 2},
+ {4, 7},
+ {0, 0}
+ }
+ },
+ {0, 1, 2, 0, /* 0xf6 */
+ {{1, 2},
+ {4, 7},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {1, 1, 2, 0, /* 0xf7 */
+ {{0, 2},
+ {4, 7},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {0, 1, 1, 0, /* 0xf8 */
+ {{3, 7},
+ {0, 0},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {1, 1, 2, 0, /* 0xf9 */
+ {{0, 0},
+ {3, 7},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {0, 1, 2, 0, /* 0xfa */
+ {{1, 1},
+ {3, 7},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {1, 1, 2, 0, /* 0xfb */
+ {{0, 1},
+ {3, 7},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {0, 1, 1, 0, /* 0xfc */
+ {{2, 7},
+ {0, 0},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {1, 1, 2, 0, /* 0xfd */
+ {{0, 0},
+ {2, 7},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {0, 1, 1, 0, /* 0xfe */
+ {{1, 7},
+ {0, 0},
+ {0, 0},
+ {0, 0}
+ }
+ },
+ {1, 1, 1, 0, /* 0xff */
+ {{0, 7},
+ {0, 0},
+ {0, 0},
+ {0, 0}
+ }
+ }
+};
+
+
+int
+sctp_is_address_in_scope(struct sctp_ifa *ifa,
+ int ipv4_addr_legal,
+ int ipv6_addr_legal,
+ int loopback_scope,
+ int ipv4_local_scope,
+ int local_scope,
+ int site_scope,
+ int do_update)
+{
+ if ((loopback_scope == 0) &&
+ (ifa->ifn_p) && SCTP_IFN_IS_IFT_LOOP(ifa->ifn_p)) {
+ /*
+ * skip loopback if not in scope *
+ */
+ return (0);
+ }
+ switch (ifa->address.sa.sa_family) {
+ case AF_INET:
+ if (ipv4_addr_legal) {
+ struct sockaddr_in *sin;
+
+ sin = (struct sockaddr_in *)&ifa->address.sin;
+ if (sin->sin_addr.s_addr == 0) {
+ /* not in scope , unspecified */
+ return (0);
+ }
+ if ((ipv4_local_scope == 0) &&
+ (IN4_ISPRIVATE_ADDRESS(&sin->sin_addr))) {
+ /* private address not in scope */
+ return (0);
+ }
+ } else {
+ return (0);
+ }
+ break;
+#ifdef INET6
+ case AF_INET6:
+ if (ipv6_addr_legal) {
+ struct sockaddr_in6 *sin6;
+
+ /*
+ * Must update the flags, bummer, which means any
+ * IFA locks must now be applied HERE <->
+ */
+ if (do_update) {
+ sctp_gather_internal_ifa_flags(ifa);
+ }
+ if (ifa->localifa_flags & SCTP_ADDR_IFA_UNUSEABLE) {
+ return (0);
+ }
+ /* ok to use deprecated addresses? */
+ sin6 = (struct sockaddr_in6 *)&ifa->address.sin6;
+ if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) {
+ /* skip unspecifed addresses */
+ return (0);
+ }
+ if ( /* (local_scope == 0) && */
+ (IN6_IS_ADDR_LINKLOCAL(&sin6->sin6_addr))) {
+ return (0);
+ }
+ if ((site_scope == 0) &&
+ (IN6_IS_ADDR_SITELOCAL(&sin6->sin6_addr))) {
+ return (0);
+ }
+ } else {
+ return (0);
+ }
+ break;
+#endif
+ default:
+ return (0);
+ }
+ return (1);
+}
+
+static struct mbuf *
+sctp_add_addr_to_mbuf(struct mbuf *m, struct sctp_ifa *ifa)
+{
+ struct sctp_paramhdr *parmh;
+ struct mbuf *mret;
+ int len;
+
+ if (ifa->address.sa.sa_family == AF_INET) {
+ len = sizeof(struct sctp_ipv4addr_param);
+ } else if (ifa->address.sa.sa_family == AF_INET6) {
+ len = sizeof(struct sctp_ipv6addr_param);
+ } else {
+ /* unknown type */
+ return (m);
+ }
+ if (M_TRAILINGSPACE(m) >= len) {
+ /* easy side we just drop it on the end */
+ parmh = (struct sctp_paramhdr *)(SCTP_BUF_AT(m, SCTP_BUF_LEN(m)));
+ mret = m;
+ } else {
+ /* Need more space */
+ mret = m;
+ while (SCTP_BUF_NEXT(mret) != NULL) {
+ mret = SCTP_BUF_NEXT(mret);
+ }
+ SCTP_BUF_NEXT(mret) = sctp_get_mbuf_for_msg(len, 0, M_DONTWAIT, 1, MT_DATA);
+ if (SCTP_BUF_NEXT(mret) == NULL) {
+ /* We are hosed, can't add more addresses */
+ return (m);
+ }
+ mret = SCTP_BUF_NEXT(mret);
+ parmh = mtod(mret, struct sctp_paramhdr *);
+ }
+ /* now add the parameter */
+ switch (ifa->address.sa.sa_family) {
+ case AF_INET:
+ {
+ struct sctp_ipv4addr_param *ipv4p;
+ struct sockaddr_in *sin;
+
+ sin = (struct sockaddr_in *)&ifa->address.sin;
+ ipv4p = (struct sctp_ipv4addr_param *)parmh;
+ parmh->param_type = htons(SCTP_IPV4_ADDRESS);
+ parmh->param_length = htons(len);
+ ipv4p->addr = sin->sin_addr.s_addr;
+ SCTP_BUF_LEN(mret) += len;
+ break;
+ }
+#ifdef INET6
+ case AF_INET6:
+ {
+ struct sctp_ipv6addr_param *ipv6p;
+ struct sockaddr_in6 *sin6;
+
+ sin6 = (struct sockaddr_in6 *)&ifa->address.sin6;
+ ipv6p = (struct sctp_ipv6addr_param *)parmh;
+ parmh->param_type = htons(SCTP_IPV6_ADDRESS);
+ parmh->param_length = htons(len);
+ memcpy(ipv6p->addr, &sin6->sin6_addr,
+ sizeof(ipv6p->addr));
+ /* clear embedded scope in the address */
+ in6_clearscope((struct in6_addr *)ipv6p->addr);
+ SCTP_BUF_LEN(mret) += len;
+ break;
+ }
+#endif
+ default:
+ return (m);
+ }
+ return (mret);
+}
+
+
+struct mbuf *
+sctp_add_addresses_to_i_ia(struct sctp_inpcb *inp, struct sctp_scoping *scope,
+ struct mbuf *m_at, int cnt_inits_to)
+{
+ struct sctp_vrf *vrf = NULL;
+ int cnt, limit_out = 0, total_count;
+ uint32_t vrf_id;
+
+ vrf_id = inp->def_vrf_id;
+ SCTP_IPI_ADDR_RLOCK();
+ vrf = sctp_find_vrf(vrf_id);
+ if (vrf == NULL) {
+ SCTP_IPI_ADDR_RUNLOCK();
+ return (m_at);
+ }
+ if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
+ struct sctp_ifa *sctp_ifap;
+ struct sctp_ifn *sctp_ifnp;
+
+ cnt = cnt_inits_to;
+ if (vrf->total_ifa_count > SCTP_COUNT_LIMIT) {
+ limit_out = 1;
+ cnt = SCTP_ADDRESS_LIMIT;
+ goto skip_count;
+ }
+ LIST_FOREACH(sctp_ifnp, &vrf->ifnlist, next_ifn) {
+ if ((scope->loopback_scope == 0) &&
+ SCTP_IFN_IS_IFT_LOOP(sctp_ifnp)) {
+ /*
+ * Skip loopback devices if loopback_scope
+ * not set
+ */
+ continue;
+ }
+ LIST_FOREACH(sctp_ifap, &sctp_ifnp->ifalist, next_ifa) {
+ if (sctp_is_address_in_scope(sctp_ifap,
+ scope->ipv4_addr_legal,
+ scope->ipv6_addr_legal,
+ scope->loopback_scope,
+ scope->ipv4_local_scope,
+ scope->local_scope,
+ scope->site_scope, 1) == 0) {
+ continue;
+ }
+ cnt++;
+ if (cnt > SCTP_ADDRESS_LIMIT) {
+ break;
+ }
+ }
+ if (cnt > SCTP_ADDRESS_LIMIT) {
+ break;
+ }
+ }
+skip_count:
+ if (cnt > 1) {
+ total_count = 0;
+ LIST_FOREACH(sctp_ifnp, &vrf->ifnlist, next_ifn) {
+ cnt = 0;
+ if ((scope->loopback_scope == 0) &&
+ SCTP_IFN_IS_IFT_LOOP(sctp_ifnp)) {
+ /*
+ * Skip loopback devices if
+ * loopback_scope not set
+ */
+ continue;
+ }
+ LIST_FOREACH(sctp_ifap, &sctp_ifnp->ifalist, next_ifa) {
+ if (sctp_is_address_in_scope(sctp_ifap,
+ scope->ipv4_addr_legal,
+ scope->ipv6_addr_legal,
+ scope->loopback_scope,
+ scope->ipv4_local_scope,
+ scope->local_scope,
+ scope->site_scope, 0) == 0) {
+ continue;
+ }
+ m_at = sctp_add_addr_to_mbuf(m_at, sctp_ifap);
+ if (limit_out) {
+ cnt++;
+ total_count++;
+ if (cnt >= 2) {
+ /*
+ * two from each
+ * address
+ */
+ break;
+ }
+ if (total_count > SCTP_ADDRESS_LIMIT) {
+ /* No more addresses */
+ break;
+ }
+ }
+ }
+ }
+ }
+ } else {
+ struct sctp_laddr *laddr;
+
+ cnt = cnt_inits_to;
+ /* First, how many ? */
+ LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) {
+ if (laddr->ifa == NULL) {
+ continue;
+ }
+ if (laddr->ifa->localifa_flags & SCTP_BEING_DELETED)
+ /*
+ * Address being deleted by the system, dont
+ * list.
+ */
+ continue;
+ if (laddr->action == SCTP_DEL_IP_ADDRESS) {
+ /*
+ * Address being deleted on this ep don't
+ * list.
+ */
+ continue;
+ }
+ if (sctp_is_address_in_scope(laddr->ifa,
+ scope->ipv4_addr_legal,
+ scope->ipv6_addr_legal,
+ scope->loopback_scope,
+ scope->ipv4_local_scope,
+ scope->local_scope,
+ scope->site_scope, 1) == 0) {
+ continue;
+ }
+ cnt++;
+ }
+ if (cnt > SCTP_ADDRESS_LIMIT) {
+ limit_out = 1;
+ }
+ /*
+ * To get through a NAT we only list addresses if we have
+ * more than one. That way if you just bind a single address
+ * we let the source of the init dictate our address.
+ */
+ if (cnt > 1) {
+ LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) {
+ cnt = 0;
+ if (laddr->ifa == NULL) {
+ continue;
+ }
+ if (laddr->ifa->localifa_flags & SCTP_BEING_DELETED)
+ continue;
+
+ if (sctp_is_address_in_scope(laddr->ifa,
+ scope->ipv4_addr_legal,
+ scope->ipv6_addr_legal,
+ scope->loopback_scope,
+ scope->ipv4_local_scope,
+ scope->local_scope,
+ scope->site_scope, 0) == 0) {
+ continue;
+ }
+ m_at = sctp_add_addr_to_mbuf(m_at, laddr->ifa);
+ cnt++;
+ if (cnt >= SCTP_ADDRESS_LIMIT) {
+ break;
+ }
+ }
+ }
+ }
+ SCTP_IPI_ADDR_RUNLOCK();
+ return (m_at);
+}
+
+static struct sctp_ifa *
+sctp_is_ifa_addr_preferred(struct sctp_ifa *ifa,
+ uint8_t dest_is_loop,
+ uint8_t dest_is_priv,
+ sa_family_t fam)
+{
+ uint8_t dest_is_global = 0;
+
+ /* dest_is_priv is true if destination is a private address */
+ /* dest_is_loop is true if destination is a loopback addresses */
+
+ /**
+ * Here we determine if its a preferred address. A preferred address
+ * means it is the same scope or higher scope then the destination.
+ * L = loopback, P = private, G = global
+ * -----------------------------------------
+ * src | dest | result
+ * ----------------------------------------
+ * L | L | yes
+ * -----------------------------------------
+ * P | L | yes-v4 no-v6
+ * -----------------------------------------
+ * G | L | yes-v4 no-v6
+ * -----------------------------------------
+ * L | P | no
+ * -----------------------------------------
+ * P | P | yes
+ * -----------------------------------------
+ * G | P | no
+ * -----------------------------------------
+ * L | G | no
+ * -----------------------------------------
+ * P | G | no
+ * -----------------------------------------
+ * G | G | yes
+ * -----------------------------------------
+ */
+
+ if (ifa->address.sa.sa_family != fam) {
+ /* forget mis-matched family */
+ return (NULL);
+ }
+ if ((dest_is_priv == 0) && (dest_is_loop == 0)) {
+ dest_is_global = 1;
+ }
+ SCTPDBG(SCTP_DEBUG_OUTPUT2, "Is destination preferred:");
+ SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, &ifa->address.sa);
+ /* Ok the address may be ok */
+ if (fam == AF_INET6) {
+ /* ok to use deprecated addresses? no lets not! */
+ if (ifa->localifa_flags & SCTP_ADDR_IFA_UNUSEABLE) {
+ SCTPDBG(SCTP_DEBUG_OUTPUT3, "NO:1\n");
+ return (NULL);
+ }
+ if (ifa->src_is_priv && !ifa->src_is_loop) {
+ if (dest_is_loop) {
+ SCTPDBG(SCTP_DEBUG_OUTPUT3, "NO:2\n");
+ return (NULL);
+ }
+ }
+ if (ifa->src_is_glob) {
+ if (dest_is_loop) {
+ SCTPDBG(SCTP_DEBUG_OUTPUT3, "NO:3\n");
+ return (NULL);
+ }
+ }
+ }
+ /*
+ * Now that we know what is what, implement or table this could in
+ * theory be done slicker (it used to be), but this is
+ * straightforward and easier to validate :-)
+ */
+ SCTPDBG(SCTP_DEBUG_OUTPUT3, "src_loop:%d src_priv:%d src_glob:%d\n",
+ ifa->src_is_loop, ifa->src_is_priv, ifa->src_is_glob);
+ SCTPDBG(SCTP_DEBUG_OUTPUT3, "dest_loop:%d dest_priv:%d dest_glob:%d\n",
+ dest_is_loop, dest_is_priv, dest_is_global);
+
+ if ((ifa->src_is_loop) && (dest_is_priv)) {
+ SCTPDBG(SCTP_DEBUG_OUTPUT3, "NO:4\n");
+ return (NULL);
+ }
+ if ((ifa->src_is_glob) && (dest_is_priv)) {
+ SCTPDBG(SCTP_DEBUG_OUTPUT3, "NO:5\n");
+ return (NULL);
+ }
+ if ((ifa->src_is_loop) && (dest_is_global)) {
+ SCTPDBG(SCTP_DEBUG_OUTPUT3, "NO:6\n");
+ return (NULL);
+ }
+ if ((ifa->src_is_priv) && (dest_is_global)) {
+ SCTPDBG(SCTP_DEBUG_OUTPUT3, "NO:7\n");
+ return (NULL);
+ }
+ SCTPDBG(SCTP_DEBUG_OUTPUT3, "YES\n");
+ /* its a preferred address */
+ return (ifa);
+}
+
+static struct sctp_ifa *
+sctp_is_ifa_addr_acceptable(struct sctp_ifa *ifa,
+ uint8_t dest_is_loop,
+ uint8_t dest_is_priv,
+ sa_family_t fam)
+{
+ uint8_t dest_is_global = 0;
+
+ /*
+ * Here we determine if its a acceptable address. A acceptable
+ * address means it is the same scope or higher scope but we can
+ * allow for NAT which means its ok to have a global dest and a
+ * private src.
+ *
+ * L = loopback, P = private, G = global
+ * ----------------------------------------- src | dest | result
+ * ----------------------------------------- L | L | yes
+ * ----------------------------------------- P | L |
+ * yes-v4 no-v6 ----------------------------------------- G |
+ * L | yes ----------------------------------------- L |
+ * P | no ----------------------------------------- P | P
+ * | yes ----------------------------------------- G | P
+ * | yes - May not work -----------------------------------------
+ * L | G | no ----------------------------------------- P
+ * | G | yes - May not work
+ * ----------------------------------------- G | G | yes
+ * -----------------------------------------
+ */
+
+ if (ifa->address.sa.sa_family != fam) {
+ /* forget non matching family */
+ return (NULL);
+ }
+ /* Ok the address may be ok */
+ if ((dest_is_loop == 0) && (dest_is_priv == 0)) {
+ dest_is_global = 1;
+ }
+ if (fam == AF_INET6) {
+ /* ok to use deprecated addresses? */
+ if (ifa->localifa_flags & SCTP_ADDR_IFA_UNUSEABLE) {
+ return (NULL);
+ }
+ if (ifa->src_is_priv) {
+ /* Special case, linklocal to loop */
+ if (dest_is_loop)
+ return (NULL);
+ }
+ }
+ /*
+ * Now that we know what is what, implement our table. This could in
+ * theory be done slicker (it used to be), but this is
+ * straightforward and easier to validate :-)
+ */
+ if ((ifa->src_is_loop == 1) && (dest_is_priv)) {
+ return (NULL);
+ }
+ if ((ifa->src_is_loop == 1) && (dest_is_global)) {
+ return (NULL);
+ }
+ /* its an acceptable address */
+ return (ifa);
+}
+
+int
+sctp_is_addr_restricted(struct sctp_tcb *stcb, struct sctp_ifa *ifa)
+{
+ struct sctp_laddr *laddr;
+
+ if (stcb == NULL) {
+ /* There are no restrictions, no TCB :-) */
+ return (0);
+ }
+ LIST_FOREACH(laddr, &stcb->asoc.sctp_restricted_addrs, sctp_nxt_addr) {
+ if (laddr->ifa == NULL) {
+ SCTPDBG(SCTP_DEBUG_OUTPUT1, "%s: NULL ifa\n",
+ __FUNCTION__);
+ continue;
+ }
+ if (laddr->ifa == ifa) {
+ /* Yes it is on the list */
+ return (1);
+ }
+ }
+ return (0);
+}
+
+
+int
+sctp_is_addr_in_ep(struct sctp_inpcb *inp, struct sctp_ifa *ifa)
+{
+ struct sctp_laddr *laddr;
+
+ if (ifa == NULL)
+ return (0);
+ LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) {
+ if (laddr->ifa == NULL) {
+ SCTPDBG(SCTP_DEBUG_OUTPUT1, "%s: NULL ifa\n",
+ __FUNCTION__);
+ continue;
+ }
+ if ((laddr->ifa == ifa) && laddr->action == 0)
+ /* same pointer */
+ return (1);
+ }
+ return (0);
+}
+
+
+
+static struct sctp_ifa *
+sctp_choose_boundspecific_inp(struct sctp_inpcb *inp,
+ sctp_route_t * ro,
+ uint32_t vrf_id,
+ int non_asoc_addr_ok,
+ uint8_t dest_is_priv,
+ uint8_t dest_is_loop,
+ sa_family_t fam)
+{
+ struct sctp_laddr *laddr, *starting_point;
+ void *ifn;
+ int resettotop = 0;
+ struct sctp_ifn *sctp_ifn;
+ struct sctp_ifa *sctp_ifa, *sifa;
+ struct sctp_vrf *vrf;
+ uint32_t ifn_index;
+
+ vrf = sctp_find_vrf(vrf_id);
+ if (vrf == NULL)
+ return (NULL);
+
+ ifn = SCTP_GET_IFN_VOID_FROM_ROUTE(ro);
+ ifn_index = SCTP_GET_IF_INDEX_FROM_ROUTE(ro);
+ sctp_ifn = sctp_find_ifn(ifn, ifn_index);
+ /*
+ * first question, is the ifn we will emit on in our list, if so, we
+ * want such an address. Note that we first looked for a preferred
+ * address.
+ */
+ if (sctp_ifn) {
+ /* is a preferred one on the interface we route out? */
+ LIST_FOREACH(sctp_ifa, &sctp_ifn->ifalist, next_ifa) {
+ if ((sctp_ifa->localifa_flags & SCTP_ADDR_DEFER_USE) &&
+ (non_asoc_addr_ok == 0))
+ continue;
+ sifa = sctp_is_ifa_addr_preferred(sctp_ifa,
+ dest_is_loop,
+ dest_is_priv, fam);
+ if (sifa == NULL)
+ continue;
+ if (sctp_is_addr_in_ep(inp, sifa)) {
+ atomic_add_int(&sifa->refcount, 1);
+ return (sifa);
+ }
+ }
+ }
+ /*
+ * ok, now we now need to find one on the list of the addresses. We
+ * can't get one on the emitting interface so let's find first a
+ * preferred one. If not that an acceptable one otherwise... we
+ * return NULL.
+ */
+ starting_point = inp->next_addr_touse;
+once_again:
+ if (inp->next_addr_touse == NULL) {
+ inp->next_addr_touse = LIST_FIRST(&inp->sctp_addr_list);
+ resettotop = 1;
+ }
+ for (laddr = inp->next_addr_touse; laddr;
+ laddr = LIST_NEXT(laddr, sctp_nxt_addr)) {
+ if (laddr->ifa == NULL) {
+ /* address has been removed */
+ continue;
+ }
+ if (laddr->action == SCTP_DEL_IP_ADDRESS) {
+ /* address is being deleted */
+ continue;
+ }
+ sifa = sctp_is_ifa_addr_preferred(laddr->ifa, dest_is_loop,
+ dest_is_priv, fam);
+ if (sifa == NULL)
+ continue;
+ atomic_add_int(&sifa->refcount, 1);
+ return (sifa);
+ }
+ if (resettotop == 0) {
+ inp->next_addr_touse = NULL;
+ goto once_again;
+ }
+ inp->next_addr_touse = starting_point;
+ resettotop = 0;
+once_again_too:
+ if (inp->next_addr_touse == NULL) {
+ inp->next_addr_touse = LIST_FIRST(&inp->sctp_addr_list);
+ resettotop = 1;
+ }
+ /* ok, what about an acceptable address in the inp */
+ for (laddr = inp->next_addr_touse; laddr;
+ laddr = LIST_NEXT(laddr, sctp_nxt_addr)) {
+ if (laddr->ifa == NULL) {
+ /* address has been removed */
+ continue;
+ }
+ if (laddr->action == SCTP_DEL_IP_ADDRESS) {
+ /* address is being deleted */
+ continue;
+ }
+ sifa = sctp_is_ifa_addr_acceptable(laddr->ifa, dest_is_loop,
+ dest_is_priv, fam);
+ if (sifa == NULL)
+ continue;
+ atomic_add_int(&sifa->refcount, 1);
+ return (sifa);
+ }
+ if (resettotop == 0) {
+ inp->next_addr_touse = NULL;
+ goto once_again_too;
+ }
+ /*
+ * no address bound can be a source for the destination we are in
+ * trouble
+ */
+ return (NULL);
+}
+
+
+
+static struct sctp_ifa *
+sctp_choose_boundspecific_stcb(struct sctp_inpcb *inp,
+ struct sctp_tcb *stcb,
+ struct sctp_nets *net,
+ sctp_route_t * ro,
+ uint32_t vrf_id,
+ uint8_t dest_is_priv,
+ uint8_t dest_is_loop,
+ int non_asoc_addr_ok,
+ sa_family_t fam)
+{
+ struct sctp_laddr *laddr, *starting_point;
+ void *ifn;
+ struct sctp_ifn *sctp_ifn;
+ struct sctp_ifa *sctp_ifa, *sifa;
+ uint8_t start_at_beginning = 0;
+ struct sctp_vrf *vrf;
+ uint32_t ifn_index;
+
+ /*
+ * first question, is the ifn we will emit on in our list, if so, we
+ * want that one.
+ */
+ vrf = sctp_find_vrf(vrf_id);
+ if (vrf == NULL)
+ return (NULL);
+
+ ifn = SCTP_GET_IFN_VOID_FROM_ROUTE(ro);
+ ifn_index = SCTP_GET_IF_INDEX_FROM_ROUTE(ro);
+ sctp_ifn = sctp_find_ifn(ifn, ifn_index);
+
+ /*
+ * first question, is the ifn we will emit on in our list? If so,
+ * we want that one. First we look for a preferred. Second, we go
+ * for an acceptable.
+ */
+ if (sctp_ifn) {
+ /* first try for a preferred address on the ep */
+ LIST_FOREACH(sctp_ifa, &sctp_ifn->ifalist, next_ifa) {
+ if ((sctp_ifa->localifa_flags & SCTP_ADDR_DEFER_USE) && (non_asoc_addr_ok == 0))
+ continue;
+ if (sctp_is_addr_in_ep(inp, sctp_ifa)) {
+ sifa = sctp_is_ifa_addr_preferred(sctp_ifa, dest_is_loop, dest_is_priv, fam);
+ if (sifa == NULL)
+ continue;
+ if (((non_asoc_addr_ok == 0) &&
+ (sctp_is_addr_restricted(stcb, sifa))) ||
+ (non_asoc_addr_ok &&
+ (sctp_is_addr_restricted(stcb, sifa)) &&
+ (!sctp_is_addr_pending(stcb, sifa)))) {
+ /* on the no-no list */
+ continue;
+ }
+ atomic_add_int(&sifa->refcount, 1);
+ return (sifa);
+ }
+ }
+ /* next try for an acceptable address on the ep */
+ LIST_FOREACH(sctp_ifa, &sctp_ifn->ifalist, next_ifa) {
+ if ((sctp_ifa->localifa_flags & SCTP_ADDR_DEFER_USE) && (non_asoc_addr_ok == 0))
+ continue;
+ if (sctp_is_addr_in_ep(inp, sctp_ifa)) {
+ sifa = sctp_is_ifa_addr_acceptable(sctp_ifa, dest_is_loop, dest_is_priv, fam);
+ if (sifa == NULL)
+ continue;
+ if (((non_asoc_addr_ok == 0) &&
+ (sctp_is_addr_restricted(stcb, sifa))) ||
+ (non_asoc_addr_ok &&
+ (sctp_is_addr_restricted(stcb, sifa)) &&
+ (!sctp_is_addr_pending(stcb, sifa)))) {
+ /* on the no-no list */
+ continue;
+ }
+ atomic_add_int(&sifa->refcount, 1);
+ return (sifa);
+ }
+ }
+
+ }
+ /*
+ * if we can't find one like that then we must look at all addresses
+ * bound to pick one at first preferable then secondly acceptable.
+ */
+ starting_point = stcb->asoc.last_used_address;
+sctp_from_the_top:
+ if (stcb->asoc.last_used_address == NULL) {
+ start_at_beginning = 1;
+ stcb->asoc.last_used_address = LIST_FIRST(&inp->sctp_addr_list);
+ }
+ /* search beginning with the last used address */
+ for (laddr = stcb->asoc.last_used_address; laddr;
+ laddr = LIST_NEXT(laddr, sctp_nxt_addr)) {
+ if (laddr->ifa == NULL) {
+ /* address has been removed */
+ continue;
+ }
+ if (laddr->action == SCTP_DEL_IP_ADDRESS) {
+ /* address is being deleted */
+ continue;
+ }
+ sifa = sctp_is_ifa_addr_preferred(laddr->ifa, dest_is_loop, dest_is_priv, fam);
+ if (sifa == NULL)
+ continue;
+ if (((non_asoc_addr_ok == 0) &&
+ (sctp_is_addr_restricted(stcb, sifa))) ||
+ (non_asoc_addr_ok &&
+ (sctp_is_addr_restricted(stcb, sifa)) &&
+ (!sctp_is_addr_pending(stcb, sifa)))) {
+ /* on the no-no list */
+ continue;
+ }
+ stcb->asoc.last_used_address = laddr;
+ atomic_add_int(&sifa->refcount, 1);
+ return (sifa);
+ }
+ if (start_at_beginning == 0) {
+ stcb->asoc.last_used_address = NULL;
+ goto sctp_from_the_top;
+ }
+ /* now try for any higher scope than the destination */
+ stcb->asoc.last_used_address = starting_point;
+ start_at_beginning = 0;
+sctp_from_the_top2:
+ if (stcb->asoc.last_used_address == NULL) {
+ start_at_beginning = 1;
+ stcb->asoc.last_used_address = LIST_FIRST(&inp->sctp_addr_list);
+ }
+ /* search beginning with the last used address */
+ for (laddr = stcb->asoc.last_used_address; laddr;
+ laddr = LIST_NEXT(laddr, sctp_nxt_addr)) {
+ if (laddr->ifa == NULL) {
+ /* address has been removed */
+ continue;
+ }
+ if (laddr->action == SCTP_DEL_IP_ADDRESS) {
+ /* address is being deleted */
+ continue;
+ }
+ sifa = sctp_is_ifa_addr_acceptable(laddr->ifa, dest_is_loop,
+ dest_is_priv, fam);
+ if (sifa == NULL)
+ continue;
+ if (((non_asoc_addr_ok == 0) &&
+ (sctp_is_addr_restricted(stcb, sifa))) ||
+ (non_asoc_addr_ok &&
+ (sctp_is_addr_restricted(stcb, sifa)) &&
+ (!sctp_is_addr_pending(stcb, sifa)))) {
+ /* on the no-no list */
+ continue;
+ }
+ stcb->asoc.last_used_address = laddr;
+ atomic_add_int(&sifa->refcount, 1);
+ return (sifa);
+ }
+ if (start_at_beginning == 0) {
+ stcb->asoc.last_used_address = NULL;
+ goto sctp_from_the_top2;
+ }
+ return (NULL);
+}
+
+static struct sctp_ifa *
+sctp_select_nth_preferred_addr_from_ifn_boundall(struct sctp_ifn *ifn,
+ struct sctp_tcb *stcb,
+ int non_asoc_addr_ok,
+ uint8_t dest_is_loop,
+ uint8_t dest_is_priv,
+ int addr_wanted,
+ sa_family_t fam,
+ sctp_route_t * ro
+)
+{
+ struct sctp_ifa *ifa, *sifa;
+ int num_eligible_addr = 0;
+
+#ifdef INET6
+ struct sockaddr_in6 sin6, lsa6;
+
+ if (fam == AF_INET6) {
+ memcpy(&sin6, &ro->ro_dst, sizeof(struct sockaddr_in6));
+ (void)sa6_recoverscope(&sin6);
+ }
+#endif /* INET6 */
+ LIST_FOREACH(ifa, &ifn->ifalist, next_ifa) {
+ if ((ifa->localifa_flags & SCTP_ADDR_DEFER_USE) &&
+ (non_asoc_addr_ok == 0))
+ continue;
+ sifa = sctp_is_ifa_addr_preferred(ifa, dest_is_loop,
+ dest_is_priv, fam);
+ if (sifa == NULL)
+ continue;
+#ifdef INET6
+ if (fam == AF_INET6 &&
+ dest_is_loop &&
+ sifa->src_is_loop && sifa->src_is_priv) {
+ /*
+ * don't allow fe80::1 to be a src on loop ::1, we
+ * don't list it to the peer so we will get an
+ * abort.
+ */
+ continue;
+ }
+ if (fam == AF_INET6 &&
+ IN6_IS_ADDR_LINKLOCAL(&sifa->address.sin6.sin6_addr) &&
+ IN6_IS_ADDR_LINKLOCAL(&sin6.sin6_addr)) {
+ /*
+ * link-local <-> link-local must belong to the same
+ * scope.
+ */
+ memcpy(&lsa6, &sifa->address.sin6, sizeof(struct sockaddr_in6));
+ (void)sa6_recoverscope(&lsa6);
+ if (sin6.sin6_scope_id != lsa6.sin6_scope_id) {
+ continue;
+ }
+ }
+#endif /* INET6 */
+
+ /*
+ * Check if the IPv6 address matches to next-hop. In the
+ * mobile case, old IPv6 address may be not deleted from the
+ * interface. Then, the interface has previous and new
+ * addresses. We should use one corresponding to the
+ * next-hop. (by micchie)
+ */
+#ifdef INET6
+ if (stcb && fam == AF_INET6 &&
+ sctp_is_mobility_feature_on(stcb->sctp_ep, SCTP_MOBILITY_BASE)) {
+ if (sctp_v6src_match_nexthop(&sifa->address.sin6, ro)
+ == 0) {
+ continue;
+ }
+ }
+#endif
+ /* Avoid topologically incorrect IPv4 address */
+ if (stcb && fam == AF_INET &&
+ sctp_is_mobility_feature_on(stcb->sctp_ep, SCTP_MOBILITY_BASE)) {
+ if (sctp_v4src_match_nexthop(sifa, ro) == 0) {
+ continue;
+ }
+ }
+ if (stcb) {
+ if (sctp_is_address_in_scope(ifa,
+ stcb->asoc.ipv4_addr_legal,
+ stcb->asoc.ipv6_addr_legal,
+ stcb->asoc.loopback_scope,
+ stcb->asoc.ipv4_local_scope,
+ stcb->asoc.local_scope,
+ stcb->asoc.site_scope, 0) == 0) {
+ continue;
+ }
+ if (((non_asoc_addr_ok == 0) &&
+ (sctp_is_addr_restricted(stcb, sifa))) ||
+ (non_asoc_addr_ok &&
+ (sctp_is_addr_restricted(stcb, sifa)) &&
+ (!sctp_is_addr_pending(stcb, sifa)))) {
+ /*
+ * It is restricted for some reason..
+ * probably not yet added.
+ */
+ continue;
+ }
+ }
+ if (num_eligible_addr >= addr_wanted) {
+ return (sifa);
+ }
+ num_eligible_addr++;
+ }
+ return (NULL);
+}
+
+
+static int
+sctp_count_num_preferred_boundall(struct sctp_ifn *ifn,
+ struct sctp_tcb *stcb,
+ int non_asoc_addr_ok,
+ uint8_t dest_is_loop,
+ uint8_t dest_is_priv,
+ sa_family_t fam)
+{
+ struct sctp_ifa *ifa, *sifa;
+ int num_eligible_addr = 0;
+
+ LIST_FOREACH(ifa, &ifn->ifalist, next_ifa) {
+ if ((ifa->localifa_flags & SCTP_ADDR_DEFER_USE) &&
+ (non_asoc_addr_ok == 0)) {
+ continue;
+ }
+ sifa = sctp_is_ifa_addr_preferred(ifa, dest_is_loop,
+ dest_is_priv, fam);
+ if (sifa == NULL) {
+ continue;
+ }
+ if (stcb) {
+ if (sctp_is_address_in_scope(ifa,
+ stcb->asoc.ipv4_addr_legal,
+ stcb->asoc.ipv6_addr_legal,
+ stcb->asoc.loopback_scope,
+ stcb->asoc.ipv4_local_scope,
+ stcb->asoc.local_scope,
+ stcb->asoc.site_scope, 0) == 0) {
+ continue;
+ }
+ if (((non_asoc_addr_ok == 0) &&
+ (sctp_is_addr_restricted(stcb, sifa))) ||
+ (non_asoc_addr_ok &&
+ (sctp_is_addr_restricted(stcb, sifa)) &&
+ (!sctp_is_addr_pending(stcb, sifa)))) {
+ /*
+ * It is restricted for some reason..
+ * probably not yet added.
+ */
+ continue;
+ }
+ }
+ num_eligible_addr++;
+ }
+ return (num_eligible_addr);
+}
+
+static struct sctp_ifa *
+sctp_choose_boundall(struct sctp_inpcb *inp,
+ struct sctp_tcb *stcb,
+ struct sctp_nets *net,
+ sctp_route_t * ro,
+ uint32_t vrf_id,
+ uint8_t dest_is_priv,
+ uint8_t dest_is_loop,
+ int non_asoc_addr_ok,
+ sa_family_t fam)
+{
+ int cur_addr_num = 0, num_preferred = 0;
+ void *ifn;
+ struct sctp_ifn *sctp_ifn, *looked_at = NULL, *emit_ifn;
+ struct sctp_ifa *sctp_ifa, *sifa;
+ uint32_t ifn_index;
+ struct sctp_vrf *vrf;
+
+ /*-
+ * For boundall we can use any address in the association.
+ * If non_asoc_addr_ok is set we can use any address (at least in
+ * theory). So we look for preferred addresses first. If we find one,
+ * we use it. Otherwise we next try to get an address on the
+ * interface, which we should be able to do (unless non_asoc_addr_ok
+ * is false and we are routed out that way). In these cases where we
+ * can't use the address of the interface we go through all the
+ * ifn's looking for an address we can use and fill that in. Punting
+ * means we send back address 0, which will probably cause problems
+ * actually since then IP will fill in the address of the route ifn,
+ * which means we probably already rejected it.. i.e. here comes an
+ * abort :-<.
+ */
+ vrf = sctp_find_vrf(vrf_id);
+ if (vrf == NULL)
+ return (NULL);
+
+ ifn = SCTP_GET_IFN_VOID_FROM_ROUTE(ro);
+ ifn_index = SCTP_GET_IF_INDEX_FROM_ROUTE(ro);
+ emit_ifn = looked_at = sctp_ifn = sctp_find_ifn(ifn, ifn_index);
+ if (sctp_ifn == NULL) {
+ /* ?? We don't have this guy ?? */
+ SCTPDBG(SCTP_DEBUG_OUTPUT2, "No ifn emit interface?\n");
+ goto bound_all_plan_b;
+ }
+ SCTPDBG(SCTP_DEBUG_OUTPUT2, "ifn_index:%d name:%s is emit interface\n",
+ ifn_index, sctp_ifn->ifn_name);
+
+ if (net) {
+ cur_addr_num = net->indx_of_eligible_next_to_use;
+ }
+ num_preferred = sctp_count_num_preferred_boundall(sctp_ifn,
+ stcb,
+ non_asoc_addr_ok,
+ dest_is_loop,
+ dest_is_priv, fam);
+ SCTPDBG(SCTP_DEBUG_OUTPUT2, "Found %d preferred source addresses for intf:%s\n",
+ num_preferred, sctp_ifn->ifn_name);
+ if (num_preferred == 0) {
+ /*
+ * no eligible addresses, we must use some other interface
+ * address if we can find one.
+ */
+ goto bound_all_plan_b;
+ }
+ /*
+ * Ok we have num_eligible_addr set with how many we can use, this
+ * may vary from call to call due to addresses being deprecated
+ * etc..
+ */
+ if (cur_addr_num >= num_preferred) {
+ cur_addr_num = 0;
+ }
+ /*
+ * select the nth address from the list (where cur_addr_num is the
+ * nth) and 0 is the first one, 1 is the second one etc...
+ */
+ SCTPDBG(SCTP_DEBUG_OUTPUT2, "cur_addr_num:%d\n", cur_addr_num);
+
+ sctp_ifa = sctp_select_nth_preferred_addr_from_ifn_boundall(sctp_ifn, stcb, non_asoc_addr_ok, dest_is_loop,
+ dest_is_priv, cur_addr_num, fam, ro);
+
+ /* if sctp_ifa is NULL something changed??, fall to plan b. */
+ if (sctp_ifa) {
+ atomic_add_int(&sctp_ifa->refcount, 1);
+ if (net) {
+ /* save off where the next one we will want */
+ net->indx_of_eligible_next_to_use = cur_addr_num + 1;
+ }
+ return (sctp_ifa);
+ }
+ /*
+ * plan_b: Look at all interfaces and find a preferred address. If
+ * no preferred fall through to plan_c.
+ */
+bound_all_plan_b:
+ SCTPDBG(SCTP_DEBUG_OUTPUT2, "Trying Plan B\n");
+ LIST_FOREACH(sctp_ifn, &vrf->ifnlist, next_ifn) {
+ SCTPDBG(SCTP_DEBUG_OUTPUT2, "Examine interface %s\n",
+ sctp_ifn->ifn_name);
+ if (dest_is_loop == 0 && SCTP_IFN_IS_IFT_LOOP(sctp_ifn)) {
+ /* wrong base scope */
+ SCTPDBG(SCTP_DEBUG_OUTPUT2, "skip\n");
+ continue;
+ }
+ if ((sctp_ifn == looked_at) && looked_at) {
+ /* already looked at this guy */
+ SCTPDBG(SCTP_DEBUG_OUTPUT2, "already seen\n");
+ continue;
+ }
+ num_preferred = sctp_count_num_preferred_boundall(sctp_ifn, stcb, non_asoc_addr_ok,
+ dest_is_loop, dest_is_priv, fam);
+ SCTPDBG(SCTP_DEBUG_OUTPUT2,
+ "Found ifn:%p %d preferred source addresses\n",
+ ifn, num_preferred);
+ if (num_preferred == 0) {
+ /* None on this interface. */
+ SCTPDBG(SCTP_DEBUG_OUTPUT2, "No prefered -- skipping to next\n");
+ continue;
+ }
+ SCTPDBG(SCTP_DEBUG_OUTPUT2,
+ "num preferred:%d on interface:%p cur_addr_num:%d\n",
+ num_preferred, sctp_ifn, cur_addr_num);
+
+ /*
+ * Ok we have num_eligible_addr set with how many we can
+ * use, this may vary from call to call due to addresses
+ * being deprecated etc..
+ */
+ if (cur_addr_num >= num_preferred) {
+ cur_addr_num = 0;
+ }
+ sifa = sctp_select_nth_preferred_addr_from_ifn_boundall(sctp_ifn, stcb, non_asoc_addr_ok, dest_is_loop,
+ dest_is_priv, cur_addr_num, fam, ro);
+ if (sifa == NULL)
+ continue;
+ if (net) {
+ net->indx_of_eligible_next_to_use = cur_addr_num + 1;
+ SCTPDBG(SCTP_DEBUG_OUTPUT2, "we selected %d\n",
+ cur_addr_num);
+ SCTPDBG(SCTP_DEBUG_OUTPUT2, "Source:");
+ SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, &sifa->address.sa);
+ SCTPDBG(SCTP_DEBUG_OUTPUT2, "Dest:");
+ SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, &net->ro._l_addr.sa);
+ }
+ atomic_add_int(&sifa->refcount, 1);
+ return (sifa);
+
+ }
+
+ /* plan_c: do we have an acceptable address on the emit interface */
+ SCTPDBG(SCTP_DEBUG_OUTPUT2, "Trying Plan C: find acceptable on interface\n");
+ if (emit_ifn == NULL) {
+ goto plan_d;
+ }
+ LIST_FOREACH(sctp_ifa, &emit_ifn->ifalist, next_ifa) {
+ if ((sctp_ifa->localifa_flags & SCTP_ADDR_DEFER_USE) &&
+ (non_asoc_addr_ok == 0))
+ continue;
+ sifa = sctp_is_ifa_addr_acceptable(sctp_ifa, dest_is_loop,
+ dest_is_priv, fam);
+ if (sifa == NULL)
+ continue;
+ if (stcb) {
+ if (sctp_is_address_in_scope(sifa,
+ stcb->asoc.ipv4_addr_legal,
+ stcb->asoc.ipv6_addr_legal,
+ stcb->asoc.loopback_scope,
+ stcb->asoc.ipv4_local_scope,
+ stcb->asoc.local_scope,
+ stcb->asoc.site_scope, 0) == 0) {
+ continue;
+ }
+ if (((non_asoc_addr_ok == 0) &&
+ (sctp_is_addr_restricted(stcb, sifa))) ||
+ (non_asoc_addr_ok &&
+ (sctp_is_addr_restricted(stcb, sifa)) &&
+ (!sctp_is_addr_pending(stcb, sifa)))) {
+ /*
+ * It is restricted for some reason..
+ * probably not yet added.
+ */
+ continue;
+ }
+ }
+ atomic_add_int(&sifa->refcount, 1);
+ return (sifa);
+ }
+plan_d:
+ /*
+ * plan_d: We are in trouble. No preferred address on the emit
+ * interface. And not even a preferred address on all interfaces. Go
+ * out and see if we can find an acceptable address somewhere
+ * amongst all interfaces.
+ */
+ SCTPDBG(SCTP_DEBUG_OUTPUT2, "Trying Plan D\n");
+ LIST_FOREACH(sctp_ifn, &vrf->ifnlist, next_ifn) {
+ if (dest_is_loop == 0 && SCTP_IFN_IS_IFT_LOOP(sctp_ifn)) {
+ /* wrong base scope */
+ continue;
+ }
+ if ((sctp_ifn == looked_at) && looked_at)
+ /* already looked at this guy */
+ continue;
+
+ LIST_FOREACH(sctp_ifa, &sctp_ifn->ifalist, next_ifa) {
+ if ((sctp_ifa->localifa_flags & SCTP_ADDR_DEFER_USE) &&
+ (non_asoc_addr_ok == 0))
+ continue;
+ sifa = sctp_is_ifa_addr_acceptable(sctp_ifa,
+ dest_is_loop,
+ dest_is_priv, fam);
+ if (sifa == NULL)
+ continue;
+ if (stcb) {
+ if (sctp_is_address_in_scope(sifa,
+ stcb->asoc.ipv4_addr_legal,
+ stcb->asoc.ipv6_addr_legal,
+ stcb->asoc.loopback_scope,
+ stcb->asoc.ipv4_local_scope,
+ stcb->asoc.local_scope,
+ stcb->asoc.site_scope, 0) == 0) {
+ continue;
+ }
+ if (((non_asoc_addr_ok == 0) &&
+ (sctp_is_addr_restricted(stcb, sifa))) ||
+ (non_asoc_addr_ok &&
+ (sctp_is_addr_restricted(stcb, sifa)) &&
+ (!sctp_is_addr_pending(stcb, sifa)))) {
+ /*
+ * It is restricted for some
+ * reason.. probably not yet added.
+ */
+ continue;
+ }
+ }
+ atomic_add_int(&sifa->refcount, 1);
+ return (sifa);
+ }
+ }
+ /*
+ * Ok we can find NO address to source from that is not on our
+ * restricted list and non_asoc_address is NOT ok, or it is on our
+ * restricted list. We can't source to it :-(
+ */
+ return (NULL);
+}
+
+
+
+/* tcb may be NULL */
+struct sctp_ifa *
+sctp_source_address_selection(struct sctp_inpcb *inp,
+ struct sctp_tcb *stcb,
+ sctp_route_t * ro,
+ struct sctp_nets *net,
+ int non_asoc_addr_ok, uint32_t vrf_id)
+{
+ struct sockaddr_in *to = (struct sockaddr_in *)&ro->ro_dst;
+
+#ifdef INET6
+ struct sockaddr_in6 *to6 = (struct sockaddr_in6 *)&ro->ro_dst;
+
+#endif
+ struct sctp_ifa *answer;
+ uint8_t dest_is_priv, dest_is_loop;
+ sa_family_t fam;
+
+ /*-
+ * Rules: - Find the route if needed, cache if I can. - Look at
+ * interface address in route, Is it in the bound list. If so we
+ * have the best source. - If not we must rotate amongst the
+ * addresses.
+ *
+ * Cavets and issues
+ *
+ * Do we need to pay attention to scope. We can have a private address
+ * or a global address we are sourcing or sending to. So if we draw
+ * it out
+ * zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz
+ * For V4
+ * ------------------------------------------
+ * source * dest * result
+ * -----------------------------------------
+ * <a> Private * Global * NAT
+ * -----------------------------------------
+ * <b> Private * Private * No problem
+ * -----------------------------------------
+ * <c> Global * Private * Huh, How will this work?
+ * -----------------------------------------
+ * <d> Global * Global * No Problem
+ *------------------------------------------
+ * zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz
+ * For V6
+ *------------------------------------------
+ * source * dest * result
+ * -----------------------------------------
+ * <a> Linklocal * Global *
+ * -----------------------------------------
+ * <b> Linklocal * Linklocal * No problem
+ * -----------------------------------------
+ * <c> Global * Linklocal * Huh, How will this work?
+ * -----------------------------------------
+ * <d> Global * Global * No Problem
+ *------------------------------------------
+ * zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz
+ *
+ * And then we add to that what happens if there are multiple addresses
+ * assigned to an interface. Remember the ifa on a ifn is a linked
+ * list of addresses. So one interface can have more than one IP
+ * address. What happens if we have both a private and a global
+ * address? Do we then use context of destination to sort out which
+ * one is best? And what about NAT's sending P->G may get you a NAT
+ * translation, or should you select the G thats on the interface in
+ * preference.
+ *
+ * Decisions:
+ *
+ * - count the number of addresses on the interface.
+ * - if it is one, no problem except case <c>.
+ * For <a> we will assume a NAT out there.
+ * - if there are more than one, then we need to worry about scope P
+ * or G. We should prefer G -> G and P -> P if possible.
+ * Then as a secondary fall back to mixed types G->P being a last
+ * ditch one.
+ * - The above all works for bound all, but bound specific we need to
+ * use the same concept but instead only consider the bound
+ * addresses. If the bound set is NOT assigned to the interface then
+ * we must use rotation amongst the bound addresses..
+ */
+ if (ro->ro_rt == NULL) {
+ /*
+ * Need a route to cache.
+ */
+ SCTP_RTALLOC(ro, vrf_id);
+ }
+ if (ro->ro_rt == NULL) {
+ return (NULL);
+ }
+ fam = to->sin_family;
+ dest_is_priv = dest_is_loop = 0;
+ /* Setup our scopes for the destination */
+ switch (fam) {
+ case AF_INET:
+ /* Scope based on outbound address */
+ if (IN4_ISLOOPBACK_ADDRESS(&to->sin_addr)) {
+ dest_is_loop = 1;
+ if (net != NULL) {
+ /* mark it as local */
+ net->addr_is_local = 1;
+ }
+ } else if ((IN4_ISPRIVATE_ADDRESS(&to->sin_addr))) {
+ dest_is_priv = 1;
+ }
+ break;
+#ifdef INET6
+ case AF_INET6:
+ /* Scope based on outbound address */
+ if (IN6_IS_ADDR_LOOPBACK(&to6->sin6_addr) ||
+ SCTP_ROUTE_IS_REAL_LOOP(ro)) {
+ /*
+ * If the address is a loopback address, which
+ * consists of "::1" OR "fe80::1%lo0", we are
+ * loopback scope. But we don't use dest_is_priv
+ * (link local addresses).
+ */
+ dest_is_loop = 1;
+ if (net != NULL) {
+ /* mark it as local */
+ net->addr_is_local = 1;
+ }
+ } else if (IN6_IS_ADDR_LINKLOCAL(&to6->sin6_addr)) {
+ dest_is_priv = 1;
+ }
+ break;
+#endif
+ }
+ SCTPDBG(SCTP_DEBUG_OUTPUT2, "Select source addr for:");
+ SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, (struct sockaddr *)to);
+ SCTP_IPI_ADDR_RLOCK();
+ if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
+ /*
+ * Bound all case
+ */
+ answer = sctp_choose_boundall(inp, stcb, net, ro, vrf_id,
+ dest_is_priv, dest_is_loop,
+ non_asoc_addr_ok, fam);
+ SCTP_IPI_ADDR_RUNLOCK();
+ return (answer);
+ }
+ /*
+ * Subset bound case
+ */
+ if (stcb) {
+ answer = sctp_choose_boundspecific_stcb(inp, stcb, net, ro,
+ vrf_id, dest_is_priv,
+ dest_is_loop,
+ non_asoc_addr_ok, fam);
+ } else {
+ answer = sctp_choose_boundspecific_inp(inp, ro, vrf_id,
+ non_asoc_addr_ok,
+ dest_is_priv,
+ dest_is_loop, fam);
+ }
+ SCTP_IPI_ADDR_RUNLOCK();
+ return (answer);
+}
+
+static int
+sctp_find_cmsg(int c_type, void *data, struct mbuf *control, int cpsize)
+{
+ struct cmsghdr cmh;
+ int tlen, at;
+
+ tlen = SCTP_BUF_LEN(control);
+ at = 0;
+ /*
+ * Independent of how many mbufs, find the c_type inside the control
+ * structure and copy out the data.
+ */
+ while (at < tlen) {
+ if ((tlen - at) < (int)CMSG_ALIGN(sizeof(cmh))) {
+ /* not enough room for one more we are done. */
+ return (0);
+ }
+ m_copydata(control, at, sizeof(cmh), (caddr_t)&cmh);
+ if (((int)cmh.cmsg_len + at) > tlen) {
+ /*
+ * this is real messed up since there is not enough
+ * data here to cover the cmsg header. We are done.
+ */
+ return (0);
+ }
+ if ((cmh.cmsg_level == IPPROTO_SCTP) &&
+ (c_type == cmh.cmsg_type)) {
+ /* found the one we want, copy it out */
+ at += CMSG_ALIGN(sizeof(struct cmsghdr));
+ if ((int)(cmh.cmsg_len - CMSG_ALIGN(sizeof(struct cmsghdr))) < cpsize) {
+ /*
+ * space of cmsg_len after header not big
+ * enough
+ */
+ return (0);
+ }
+ m_copydata(control, at, cpsize, data);
+ return (1);
+ } else {
+ at += CMSG_ALIGN(cmh.cmsg_len);
+ if (cmh.cmsg_len == 0) {
+ break;
+ }
+ }
+ }
+ /* not found */
+ return (0);
+}
+
+static struct mbuf *
+sctp_add_cookie(struct sctp_inpcb *inp, struct mbuf *init, int init_offset,
+ struct mbuf *initack, int initack_offset, struct sctp_state_cookie *stc_in, uint8_t ** signature)
+{
+ struct mbuf *copy_init, *copy_initack, *m_at, *sig, *mret;
+ struct sctp_state_cookie *stc;
+ struct sctp_paramhdr *ph;
+ uint8_t *foo;
+ int sig_offset;
+ uint16_t cookie_sz;
+
+ mret = NULL;
+ mret = sctp_get_mbuf_for_msg((sizeof(struct sctp_state_cookie) +
+ sizeof(struct sctp_paramhdr)), 0,
+ M_DONTWAIT, 1, MT_DATA);
+ if (mret == NULL) {
+ return (NULL);
+ }
+ copy_init = SCTP_M_COPYM(init, init_offset, M_COPYALL, M_DONTWAIT);
+ if (copy_init == NULL) {
+ sctp_m_freem(mret);
+ return (NULL);
+ }
+#ifdef SCTP_MBUF_LOGGING
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
+ struct mbuf *mat;
+
+ mat = copy_init;
+ while (mat) {
+ if (SCTP_BUF_IS_EXTENDED(mat)) {
+ sctp_log_mb(mat, SCTP_MBUF_ICOPY);
+ }
+ mat = SCTP_BUF_NEXT(mat);
+ }
+ }
+#endif
+ copy_initack = SCTP_M_COPYM(initack, initack_offset, M_COPYALL,
+ M_DONTWAIT);
+ if (copy_initack == NULL) {
+ sctp_m_freem(mret);
+ sctp_m_freem(copy_init);
+ return (NULL);
+ }
+#ifdef SCTP_MBUF_LOGGING
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
+ struct mbuf *mat;
+
+ mat = copy_initack;
+ while (mat) {
+ if (SCTP_BUF_IS_EXTENDED(mat)) {
+ sctp_log_mb(mat, SCTP_MBUF_ICOPY);
+ }
+ mat = SCTP_BUF_NEXT(mat);
+ }
+ }
+#endif
+ /* easy side we just drop it on the end */
+ ph = mtod(mret, struct sctp_paramhdr *);
+ SCTP_BUF_LEN(mret) = sizeof(struct sctp_state_cookie) +
+ sizeof(struct sctp_paramhdr);
+ stc = (struct sctp_state_cookie *)((caddr_t)ph +
+ sizeof(struct sctp_paramhdr));
+ ph->param_type = htons(SCTP_STATE_COOKIE);
+ ph->param_length = 0; /* fill in at the end */
+ /* Fill in the stc cookie data */
+ memcpy(stc, stc_in, sizeof(struct sctp_state_cookie));
+
+ /* tack the INIT and then the INIT-ACK onto the chain */
+ cookie_sz = 0;
+ m_at = mret;
+ for (m_at = mret; m_at; m_at = SCTP_BUF_NEXT(m_at)) {
+ cookie_sz += SCTP_BUF_LEN(m_at);
+ if (SCTP_BUF_NEXT(m_at) == NULL) {
+ SCTP_BUF_NEXT(m_at) = copy_init;
+ break;
+ }
+ }
+
+ for (m_at = copy_init; m_at; m_at = SCTP_BUF_NEXT(m_at)) {
+ cookie_sz += SCTP_BUF_LEN(m_at);
+ if (SCTP_BUF_NEXT(m_at) == NULL) {
+ SCTP_BUF_NEXT(m_at) = copy_initack;
+ break;
+ }
+ }
+
+ for (m_at = copy_initack; m_at; m_at = SCTP_BUF_NEXT(m_at)) {
+ cookie_sz += SCTP_BUF_LEN(m_at);
+ if (SCTP_BUF_NEXT(m_at) == NULL) {
+ break;
+ }
+ }
+ sig = sctp_get_mbuf_for_msg(SCTP_SECRET_SIZE, 0, M_DONTWAIT, 1, MT_DATA);
+ if (sig == NULL) {
+ /* no space, so free the entire chain */
+ sctp_m_freem(mret);
+ return (NULL);
+ }
+ SCTP_BUF_LEN(sig) = 0;
+ SCTP_BUF_NEXT(m_at) = sig;
+ sig_offset = 0;
+ foo = (uint8_t *) (mtod(sig, caddr_t)+sig_offset);
+ memset(foo, 0, SCTP_SIGNATURE_SIZE);
+ *signature = foo;
+ SCTP_BUF_LEN(sig) += SCTP_SIGNATURE_SIZE;
+ cookie_sz += SCTP_SIGNATURE_SIZE;
+ ph->param_length = htons(cookie_sz);
+ return (mret);
+}
+
+
+static uint8_t
+sctp_get_ect(struct sctp_tcb *stcb,
+ struct sctp_tmit_chunk *chk)
+{
+ uint8_t this_random;
+
+ /* Huh? */
+ if (SCTP_BASE_SYSCTL(sctp_ecn_enable) == 0)
+ return (0);
+
+ if (SCTP_BASE_SYSCTL(sctp_ecn_nonce) == 0)
+ /* no nonce, always return ECT0 */
+ return (SCTP_ECT0_BIT);
+
+ if (stcb->asoc.peer_supports_ecn_nonce == 0) {
+ /* Peer does NOT support it, so we send a ECT0 only */
+ return (SCTP_ECT0_BIT);
+ }
+ if (chk == NULL)
+ return (SCTP_ECT0_BIT);
+
+ if ((stcb->asoc.hb_random_idx > 3) ||
+ ((stcb->asoc.hb_random_idx == 3) &&
+ (stcb->asoc.hb_ect_randombit > 7))) {
+ uint32_t rndval;
+
+warp_drive_sa:
+ rndval = sctp_select_initial_TSN(&stcb->sctp_ep->sctp_ep);
+ memcpy(stcb->asoc.hb_random_values, &rndval,
+ sizeof(stcb->asoc.hb_random_values));
+ this_random = stcb->asoc.hb_random_values[0];
+ stcb->asoc.hb_random_idx = 0;
+ stcb->asoc.hb_ect_randombit = 0;
+ } else {
+ if (stcb->asoc.hb_ect_randombit > 7) {
+ stcb->asoc.hb_ect_randombit = 0;
+ stcb->asoc.hb_random_idx++;
+ if (stcb->asoc.hb_random_idx > 3) {
+ goto warp_drive_sa;
+ }
+ }
+ this_random = stcb->asoc.hb_random_values[stcb->asoc.hb_random_idx];
+ }
+ if ((this_random >> stcb->asoc.hb_ect_randombit) & 0x01) {
+ if (chk != NULL)
+ /* ECN Nonce stuff */
+ chk->rec.data.ect_nonce = SCTP_ECT1_BIT;
+ stcb->asoc.hb_ect_randombit++;
+ return (SCTP_ECT1_BIT);
+ } else {
+ stcb->asoc.hb_ect_randombit++;
+ return (SCTP_ECT0_BIT);
+ }
+}
+
+static int
+sctp_lowlevel_chunk_output(struct sctp_inpcb *inp,
+ struct sctp_tcb *stcb, /* may be NULL */
+ struct sctp_nets *net,
+ struct sockaddr *to,
+ struct mbuf *m,
+ uint32_t auth_offset,
+ struct sctp_auth_chunk *auth,
+ uint16_t auth_keyid,
+ int nofragment_flag,
+ int ecn_ok,
+ struct sctp_tmit_chunk *chk,
+ int out_of_asoc_ok,
+ uint16_t src_port,
+ uint16_t dest_port,
+ uint32_t v_tag,
+ uint16_t port,
+ int so_locked,
+#if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
+ SCTP_UNUSED
+#endif
+ union sctp_sockstore *over_addr
+)
+/* nofragment_flag to tell if IP_DF should be set (IPv4 only) */
+{
+ /*
+ * Given a mbuf chain (via SCTP_BUF_NEXT()) that holds a packet
+ * header WITH an SCTPHDR but no IP header, endpoint inp and sa
+ * structure: - fill in the HMAC digest of any AUTH chunk in the
+ * packet. - calculate and fill in the SCTP checksum. - prepend an
+ * IP address header. - if boundall use INADDR_ANY. - if
+ * boundspecific do source address selection. - set fragmentation
+ * option for ipV4. - On return from IP output, check/adjust mtu
+ * size of output interface and smallest_mtu size as well.
+ */
+ /* Will need ifdefs around this */
+ struct mbuf *o_pak;
+ struct mbuf *newm;
+ struct sctphdr *sctphdr;
+ int packet_length;
+ int ret;
+ uint32_t vrf_id;
+ sctp_route_t *ro = NULL;
+ struct udphdr *udp = NULL;
+
+#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
+ struct socket *so = NULL;
+
+#endif
+
+ if ((net) && (net->dest_state & SCTP_ADDR_OUT_OF_SCOPE)) {
+ SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EFAULT);
+ sctp_m_freem(m);
+ return (EFAULT);
+ }
+ if (stcb) {
+ vrf_id = stcb->asoc.vrf_id;
+ } else {
+ vrf_id = inp->def_vrf_id;
+ }
+
+ /* fill in the HMAC digest for any AUTH chunk in the packet */
+ if ((auth != NULL) && (stcb != NULL)) {
+ sctp_fill_hmac_digest_m(m, auth_offset, auth, stcb, auth_keyid);
+ }
+ if (to->sa_family == AF_INET) {
+ struct ip *ip = NULL;
+ sctp_route_t iproute;
+ uint8_t tos_value;
+ int len;
+
+ len = sizeof(struct ip) + sizeof(struct sctphdr);
+ if (port) {
+ len += sizeof(struct udphdr);
+ }
+ newm = sctp_get_mbuf_for_msg(len, 1, M_DONTWAIT, 1, MT_DATA);
+ if (newm == NULL) {
+ sctp_m_freem(m);
+ SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
+ return (ENOMEM);
+ }
+ SCTP_ALIGN_TO_END(newm, len);
+ SCTP_BUF_LEN(newm) = len;
+ SCTP_BUF_NEXT(newm) = m;
+ m = newm;
+ packet_length = sctp_calculate_len(m);
+ ip = mtod(m, struct ip *);
+ ip->ip_v = IPVERSION;
+ ip->ip_hl = (sizeof(struct ip) >> 2);
+ if (net) {
+ tos_value = net->tos_flowlabel & 0x000000ff;
+ } else {
+ tos_value = inp->ip_inp.inp.inp_ip_tos;
+ }
+ if ((nofragment_flag) && (port == 0)) {
+ ip->ip_off = IP_DF;
+ } else
+ ip->ip_off = 0;
+
+ /* FreeBSD has a function for ip_id's */
+ ip->ip_id = ip_newid();
+
+ ip->ip_ttl = inp->ip_inp.inp.inp_ip_ttl;
+ ip->ip_len = packet_length;
+ if (stcb) {
+ if ((stcb->asoc.ecn_allowed) && ecn_ok) {
+ /* Enable ECN */
+ ip->ip_tos = ((u_char)(tos_value & 0xfc) | sctp_get_ect(stcb, chk));
+ } else {
+ /* No ECN */
+ ip->ip_tos = (u_char)(tos_value & 0xfc);
+ }
+ } else {
+ /* no association at all */
+ ip->ip_tos = (tos_value & 0xfc);
+ }
+ if (port) {
+ ip->ip_p = IPPROTO_UDP;
+ } else {
+ ip->ip_p = IPPROTO_SCTP;
+ }
+ ip->ip_sum = 0;
+ if (net == NULL) {
+ ro = &iproute;
+ memset(&iproute, 0, sizeof(iproute));
+ memcpy(&ro->ro_dst, to, to->sa_len);
+ } else {
+ ro = (sctp_route_t *) & net->ro;
+ }
+ /* Now the address selection part */
+ ip->ip_dst.s_addr = ((struct sockaddr_in *)to)->sin_addr.s_addr;
+
+ /* call the routine to select the src address */
+ if (net && out_of_asoc_ok == 0) {
+ if (net->ro._s_addr && (net->ro._s_addr->localifa_flags & (SCTP_BEING_DELETED | SCTP_ADDR_IFA_UNUSEABLE))) {
+ sctp_free_ifa(net->ro._s_addr);
+ net->ro._s_addr = NULL;
+ net->src_addr_selected = 0;
+ if (ro->ro_rt) {
+ RTFREE(ro->ro_rt);
+ ro->ro_rt = NULL;
+ }
+ }
+ if (net->src_addr_selected == 0) {
+ /* Cache the source address */
+ net->ro._s_addr = sctp_source_address_selection(inp, stcb,
+ ro, net, 0,
+ vrf_id);
+ net->src_addr_selected = 1;
+ }
+ if (net->ro._s_addr == NULL) {
+ /* No route to host */
+ net->src_addr_selected = 0;
+ goto no_route;
+ }
+ ip->ip_src = net->ro._s_addr->address.sin.sin_addr;
+ } else {
+ if (over_addr == NULL) {
+ struct sctp_ifa *_lsrc;
+
+ _lsrc = sctp_source_address_selection(inp, stcb, ro,
+ net,
+ out_of_asoc_ok,
+ vrf_id);
+ if (_lsrc == NULL) {
+ goto no_route;
+ }
+ ip->ip_src = _lsrc->address.sin.sin_addr;
+ sctp_free_ifa(_lsrc);
+ } else {
+ ip->ip_src = over_addr->sin.sin_addr;
+ SCTP_RTALLOC(ro, vrf_id);
+ }
+ }
+ if (port) {
+ udp = (struct udphdr *)((caddr_t)ip + sizeof(struct ip));
+ udp->uh_sport = htons(SCTP_BASE_SYSCTL(sctp_udp_tunneling_port));
+ udp->uh_dport = port;
+ udp->uh_ulen = htons(packet_length - sizeof(struct ip));
+ udp->uh_sum = in_pseudo(ip->ip_src.s_addr, ip->ip_dst.s_addr, udp->uh_ulen + htons(IPPROTO_UDP));
+ sctphdr = (struct sctphdr *)((caddr_t)udp + sizeof(struct udphdr));
+ } else {
+ sctphdr = (struct sctphdr *)((caddr_t)ip + sizeof(struct ip));
+ }
+
+ sctphdr->src_port = src_port;
+ sctphdr->dest_port = dest_port;
+ sctphdr->v_tag = v_tag;
+ sctphdr->checksum = 0;
+
+ /*
+ * If source address selection fails and we find no route
+ * then the ip_output should fail as well with a
+ * NO_ROUTE_TO_HOST type error. We probably should catch
+ * that somewhere and abort the association right away
+ * (assuming this is an INIT being sent).
+ */
+ if ((ro->ro_rt == NULL)) {
+ /*
+ * src addr selection failed to find a route (or
+ * valid source addr), so we can't get there from
+ * here (yet)!
+ */
+ no_route:
+ SCTPDBG(SCTP_DEBUG_OUTPUT1,
+ "%s: dropped packet - no valid source addr\n",
+ __FUNCTION__);
+ if (net) {
+ SCTPDBG(SCTP_DEBUG_OUTPUT1,
+ "Destination was ");
+ SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT1,
+ &net->ro._l_addr.sa);
+ if (net->dest_state & SCTP_ADDR_CONFIRMED) {
+ if ((net->dest_state & SCTP_ADDR_REACHABLE) && stcb) {
+ SCTPDBG(SCTP_DEBUG_OUTPUT1, "no route takes interface %p down\n", net);
+ sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_DOWN,
+ stcb,
+ SCTP_FAILED_THRESHOLD,
+ (void *)net,
+ so_locked);
+ net->dest_state &= ~SCTP_ADDR_REACHABLE;
+ net->dest_state |= SCTP_ADDR_NOT_REACHABLE;
+ /*
+ * JRS 5/14/07 - If a
+ * destination is
+ * unreachable, the PF bit
+ * is turned off. This
+ * allows an unambiguous use
+ * of the PF bit for
+ * destinations that are
+ * reachable but potentially
+ * failed. If the
+ * destination is set to the
+ * unreachable state, also
+ * set the destination to
+ * the PF state.
+ */
+ /*
+ * Add debug message here if
+ * destination is not in PF
+ * state.
+ */
+ /*
+ * Stop any running T3
+ * timers here?
+ */
+ if ((stcb->asoc.sctp_cmt_on_off == 1) &&
+ (stcb->asoc.sctp_cmt_pf > 0)) {
+ net->dest_state &= ~SCTP_ADDR_PF;
+ SCTPDBG(SCTP_DEBUG_OUTPUT1, "Destination %p moved from PF to unreachable.\n",
+ net);
+ }
+ }
+ }
+ if (stcb) {
+ if (net == stcb->asoc.primary_destination) {
+ /* need a new primary */
+ struct sctp_nets *alt;
+
+ alt = sctp_find_alternate_net(stcb, net, 0);
+ if (alt != net) {
+ if (sctp_set_primary_addr(stcb,
+ (struct sockaddr *)NULL,
+ alt) == 0) {
+ net->dest_state |= SCTP_ADDR_WAS_PRIMARY;
+ if (net->ro._s_addr) {
+ sctp_free_ifa(net->ro._s_addr);
+ net->ro._s_addr = NULL;
+ }
+ net->src_addr_selected = 0;
+ }
+ }
+ }
+ }
+ }
+ SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, EHOSTUNREACH);
+ sctp_m_freem(m);
+ return (EHOSTUNREACH);
+ }
+ if (ro != &iproute) {
+ memcpy(&iproute, ro, sizeof(*ro));
+ }
+ SCTPDBG(SCTP_DEBUG_OUTPUT3, "Calling ipv4 output routine from low level src addr:%x\n",
+ (uint32_t) (ntohl(ip->ip_src.s_addr)));
+ SCTPDBG(SCTP_DEBUG_OUTPUT3, "Destination is %x\n",
+ (uint32_t) (ntohl(ip->ip_dst.s_addr)));
+ SCTPDBG(SCTP_DEBUG_OUTPUT3, "RTP route is %p through\n",
+ ro->ro_rt);
+
+ if (SCTP_GET_HEADER_FOR_OUTPUT(o_pak)) {
+ /* failed to prepend data, give up */
+ SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
+ sctp_m_freem(m);
+ return (ENOMEM);
+ }
+#ifdef SCTP_PACKET_LOGGING
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LAST_PACKET_TRACING)
+ sctp_packet_log(m, packet_length);
+#endif
+ SCTP_ATTACH_CHAIN(o_pak, m, packet_length);
+ if (port) {
+#if defined(SCTP_WITH_NO_CSUM)
+ SCTP_STAT_INCR(sctps_sendnocrc);
+#else
+ if (!(SCTP_BASE_SYSCTL(sctp_no_csum_on_loopback) &&
+ (stcb) &&
+ (stcb->asoc.loopback_scope))) {
+ sctphdr->checksum = sctp_calculate_cksum(m, sizeof(struct ip) + sizeof(struct udphdr));
+ SCTP_STAT_INCR(sctps_sendswcrc);
+ } else {
+ SCTP_STAT_INCR(sctps_sendnocrc);
+ }
+#endif
+ SCTP_ENABLE_UDP_CSUM(o_pak);
+ } else {
+#if defined(SCTP_WITH_NO_CSUM)
+ SCTP_STAT_INCR(sctps_sendnocrc);
+#else
+ m->m_pkthdr.csum_flags = CSUM_SCTP;
+ m->m_pkthdr.csum_data = 0;
+ SCTP_STAT_INCR(sctps_sendhwcrc);
+#endif
+ }
+ /* send it out. table id is taken from stcb */
+#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
+ if ((SCTP_BASE_SYSCTL(sctp_output_unlocked)) && (so_locked)) {
+ so = SCTP_INP_SO(inp);
+ SCTP_SOCKET_UNLOCK(so, 0);
+ }
+#endif
+ SCTP_IP_OUTPUT(ret, o_pak, ro, stcb, vrf_id);
+#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
+ if ((SCTP_BASE_SYSCTL(sctp_output_unlocked)) && (so_locked)) {
+ atomic_add_int(&stcb->asoc.refcnt, 1);
+ SCTP_TCB_UNLOCK(stcb);
+ SCTP_SOCKET_LOCK(so, 0);
+ SCTP_TCB_LOCK(stcb);
+ atomic_subtract_int(&stcb->asoc.refcnt, 1);
+ }
+#endif
+ SCTP_STAT_INCR(sctps_sendpackets);
+ SCTP_STAT_INCR_COUNTER64(sctps_outpackets);
+ if (ret)
+ SCTP_STAT_INCR(sctps_senderrors);
+
+ SCTPDBG(SCTP_DEBUG_OUTPUT3, "IP output returns %d\n", ret);
+ if (net == NULL) {
+ /* free tempy routes */
+ if (ro->ro_rt) {
+ RTFREE(ro->ro_rt);
+ ro->ro_rt = NULL;
+ }
+ } else {
+ /* PMTU check versus smallest asoc MTU goes here */
+ if ((ro->ro_rt != NULL) &&
+ (net->ro._s_addr)) {
+ uint32_t mtu;
+
+ mtu = SCTP_GATHER_MTU_FROM_ROUTE(net->ro._s_addr, &net->ro._l_addr.sa, ro->ro_rt);
+ if (net->port) {
+ mtu -= sizeof(struct udphdr);
+ }
+ if (mtu && (stcb->asoc.smallest_mtu > mtu)) {
+ sctp_mtu_size_reset(inp, &stcb->asoc, mtu);
+ net->mtu = mtu;
+ }
+ } else if (ro->ro_rt == NULL) {
+ /* route was freed */
+ if (net->ro._s_addr &&
+ net->src_addr_selected) {
+ sctp_free_ifa(net->ro._s_addr);
+ net->ro._s_addr = NULL;
+ }
+ net->src_addr_selected = 0;
+ }
+ }
+ return (ret);
+ }
+#ifdef INET6
+ else if (to->sa_family == AF_INET6) {
+ uint32_t flowlabel;
+ struct ip6_hdr *ip6h;
+ struct route_in6 ip6route;
+ struct ifnet *ifp;
+ u_char flowTop;
+ uint16_t flowBottom;
+ u_char tosBottom, tosTop;
+ struct sockaddr_in6 *sin6, tmp, *lsa6, lsa6_tmp;
+ int prev_scope = 0;
+ struct sockaddr_in6 lsa6_storage;
+ int error;
+ u_short prev_port = 0;
+ int len;
+
+ if (net != NULL) {
+ flowlabel = net->tos_flowlabel;
+ } else {
+ flowlabel = ((struct in6pcb *)inp)->in6p_flowinfo;
+ }
+
+ len = sizeof(struct ip6_hdr) + sizeof(struct sctphdr);
+ if (port) {
+ len += sizeof(struct udphdr);
+ }
+ newm = sctp_get_mbuf_for_msg(len, 1, M_DONTWAIT, 1, MT_DATA);
+ if (newm == NULL) {
+ sctp_m_freem(m);
+ SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
+ return (ENOMEM);
+ }
+ SCTP_ALIGN_TO_END(newm, len);
+ SCTP_BUF_LEN(newm) = len;
+ SCTP_BUF_NEXT(newm) = m;
+ m = newm;
+ packet_length = sctp_calculate_len(m);
+
+ ip6h = mtod(m, struct ip6_hdr *);
+ /*
+ * We assume here that inp_flow is in host byte order within
+ * the TCB!
+ */
+ flowBottom = flowlabel & 0x0000ffff;
+ flowTop = ((flowlabel & 0x000f0000) >> 16);
+ tosTop = (((flowlabel & 0xf0) >> 4) | IPV6_VERSION);
+ /* protect *sin6 from overwrite */
+ sin6 = (struct sockaddr_in6 *)to;
+ tmp = *sin6;
+ sin6 = &tmp;
+
+ /* KAME hack: embed scopeid */
+ if (sa6_embedscope(sin6, MODULE_GLOBAL(ip6_use_defzone)) != 0) {
+ SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
+ return (EINVAL);
+ }
+ if (net == NULL) {
+ memset(&ip6route, 0, sizeof(ip6route));
+ ro = (sctp_route_t *) & ip6route;
+ memcpy(&ro->ro_dst, sin6, sin6->sin6_len);
+ } else {
+ ro = (sctp_route_t *) & net->ro;
+ }
+ if (stcb != NULL) {
+ if ((stcb->asoc.ecn_allowed) && ecn_ok) {
+ /* Enable ECN */
+ tosBottom = (((((struct in6pcb *)inp)->in6p_flowinfo & 0x0c) | sctp_get_ect(stcb, chk)) << 4);
+ } else {
+ /* No ECN */
+ tosBottom = ((((struct in6pcb *)inp)->in6p_flowinfo & 0x0c) << 4);
+ }
+ } else {
+ /* we could get no asoc if it is a O-O-T-B packet */
+ tosBottom = ((((struct in6pcb *)inp)->in6p_flowinfo & 0x0c) << 4);
+ }
+ ip6h->ip6_flow = htonl(((tosTop << 24) | ((tosBottom | flowTop) << 16) | flowBottom));
+ if (port) {
+ ip6h->ip6_nxt = IPPROTO_UDP;
+ } else {
+ ip6h->ip6_nxt = IPPROTO_SCTP;
+ }
+ ip6h->ip6_plen = (packet_length - sizeof(struct ip6_hdr));
+ ip6h->ip6_dst = sin6->sin6_addr;
+
+ /*
+ * Add SRC address selection here: we can only reuse to a
+ * limited degree the kame src-addr-sel, since we can try
+ * their selection but it may not be bound.
+ */
+ bzero(&lsa6_tmp, sizeof(lsa6_tmp));
+ lsa6_tmp.sin6_family = AF_INET6;
+ lsa6_tmp.sin6_len = sizeof(lsa6_tmp);
+ lsa6 = &lsa6_tmp;
+ if (net && out_of_asoc_ok == 0) {
+ if (net->ro._s_addr && (net->ro._s_addr->localifa_flags & (SCTP_BEING_DELETED | SCTP_ADDR_IFA_UNUSEABLE))) {
+ sctp_free_ifa(net->ro._s_addr);
+ net->ro._s_addr = NULL;
+ net->src_addr_selected = 0;
+ if (ro->ro_rt) {
+ RTFREE(ro->ro_rt);
+ ro->ro_rt = NULL;
+ }
+ }
+ if (net->src_addr_selected == 0) {
+ sin6 = (struct sockaddr_in6 *)&net->ro._l_addr;
+ /* KAME hack: embed scopeid */
+ if (sa6_embedscope(sin6, MODULE_GLOBAL(ip6_use_defzone)) != 0) {
+ SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
+ return (EINVAL);
+ }
+ /* Cache the source address */
+ net->ro._s_addr = sctp_source_address_selection(inp,
+ stcb,
+ ro,
+ net,
+ 0,
+ vrf_id);
+ (void)sa6_recoverscope(sin6);
+ net->src_addr_selected = 1;
+ }
+ if (net->ro._s_addr == NULL) {
+ SCTPDBG(SCTP_DEBUG_OUTPUT3, "V6:No route to host\n");
+ net->src_addr_selected = 0;
+ goto no_route;
+ }
+ lsa6->sin6_addr = net->ro._s_addr->address.sin6.sin6_addr;
+ } else {
+ sin6 = (struct sockaddr_in6 *)&ro->ro_dst;
+ /* KAME hack: embed scopeid */
+ if (sa6_embedscope(sin6, MODULE_GLOBAL(ip6_use_defzone)) != 0) {
+ SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
+ return (EINVAL);
+ }
+ if (over_addr == NULL) {
+ struct sctp_ifa *_lsrc;
+
+ _lsrc = sctp_source_address_selection(inp, stcb, ro,
+ net,
+ out_of_asoc_ok,
+ vrf_id);
+ if (_lsrc == NULL) {
+ goto no_route;
+ }
+ lsa6->sin6_addr = _lsrc->address.sin6.sin6_addr;
+ sctp_free_ifa(_lsrc);
+ } else {
+ lsa6->sin6_addr = over_addr->sin6.sin6_addr;
+ SCTP_RTALLOC(ro, vrf_id);
+ }
+ (void)sa6_recoverscope(sin6);
+ }
+ lsa6->sin6_port = inp->sctp_lport;
+
+ if (ro->ro_rt == NULL) {
+ /*
+ * src addr selection failed to find a route (or
+ * valid source addr), so we can't get there from
+ * here!
+ */
+ goto no_route;
+ }
+ /*
+ * XXX: sa6 may not have a valid sin6_scope_id in the
+ * non-SCOPEDROUTING case.
+ */
+ bzero(&lsa6_storage, sizeof(lsa6_storage));
+ lsa6_storage.sin6_family = AF_INET6;
+ lsa6_storage.sin6_len = sizeof(lsa6_storage);
+ lsa6_storage.sin6_addr = lsa6->sin6_addr;
+ if ((error = sa6_recoverscope(&lsa6_storage)) != 0) {
+ SCTPDBG(SCTP_DEBUG_OUTPUT3, "recover scope fails error %d\n", error);
+ sctp_m_freem(m);
+ return (error);
+ }
+ /* XXX */
+ lsa6_storage.sin6_addr = lsa6->sin6_addr;
+ lsa6_storage.sin6_port = inp->sctp_lport;
+ lsa6 = &lsa6_storage;
+ ip6h->ip6_src = lsa6->sin6_addr;
+
+ if (port) {
+ udp = (struct udphdr *)((caddr_t)ip6h + sizeof(struct ip6_hdr));
+ udp->uh_sport = htons(SCTP_BASE_SYSCTL(sctp_udp_tunneling_port));
+ udp->uh_dport = port;
+ udp->uh_ulen = htons(packet_length - sizeof(struct ip6_hdr));
+ udp->uh_sum = 0;
+ sctphdr = (struct sctphdr *)((caddr_t)udp + sizeof(struct udphdr));
+ } else {
+ sctphdr = (struct sctphdr *)((caddr_t)ip6h + sizeof(struct ip6_hdr));
+ }
+
+ sctphdr->src_port = src_port;
+ sctphdr->dest_port = dest_port;
+ sctphdr->v_tag = v_tag;
+ sctphdr->checksum = 0;
+
+ /*
+ * We set the hop limit now since there is a good chance
+ * that our ro pointer is now filled
+ */
+ ip6h->ip6_hlim = SCTP_GET_HLIM(inp, ro);
+ ifp = SCTP_GET_IFN_VOID_FROM_ROUTE(ro);
+
+#ifdef SCTP_DEBUG
+ /* Copy to be sure something bad is not happening */
+ sin6->sin6_addr = ip6h->ip6_dst;
+ lsa6->sin6_addr = ip6h->ip6_src;
+#endif
+
+ SCTPDBG(SCTP_DEBUG_OUTPUT3, "Calling ipv6 output routine from low level\n");
+ SCTPDBG(SCTP_DEBUG_OUTPUT3, "src: ");
+ SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT3, (struct sockaddr *)lsa6);
+ SCTPDBG(SCTP_DEBUG_OUTPUT3, "dst: ");
+ SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT3, (struct sockaddr *)sin6);
+ if (net) {
+ sin6 = (struct sockaddr_in6 *)&net->ro._l_addr;
+ /* preserve the port and scope for link local send */
+ prev_scope = sin6->sin6_scope_id;
+ prev_port = sin6->sin6_port;
+ }
+ if (SCTP_GET_HEADER_FOR_OUTPUT(o_pak)) {
+ /* failed to prepend data, give up */
+ sctp_m_freem(m);
+ SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
+ return (ENOMEM);
+ }
+#ifdef SCTP_PACKET_LOGGING
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LAST_PACKET_TRACING)
+ sctp_packet_log(m, packet_length);
+#endif
+ SCTP_ATTACH_CHAIN(o_pak, m, packet_length);
+ if (port) {
+#if defined(SCTP_WITH_NO_CSUM)
+ SCTP_STAT_INCR(sctps_sendnocrc);
+#else
+ if (!(SCTP_BASE_SYSCTL(sctp_no_csum_on_loopback) &&
+ (stcb) &&
+ (stcb->asoc.loopback_scope))) {
+ sctphdr->checksum = sctp_calculate_cksum(m, sizeof(struct ip6_hdr) + sizeof(struct udphdr));
+ SCTP_STAT_INCR(sctps_sendswcrc);
+ } else {
+ SCTP_STAT_INCR(sctps_sendnocrc);
+ }
+#endif
+ if ((udp->uh_sum = in6_cksum(o_pak, IPPROTO_UDP, sizeof(struct ip6_hdr), packet_length - sizeof(struct ip6_hdr))) == 0) {
+ udp->uh_sum = 0xffff;
+ }
+ } else {
+#if defined(SCTP_WITH_NO_CSUM)
+ SCTP_STAT_INCR(sctps_sendnocrc);
+#else
+ m->m_pkthdr.csum_flags = CSUM_SCTP;
+ m->m_pkthdr.csum_data = 0;
+ SCTP_STAT_INCR(sctps_sendhwcrc);
+#endif
+ }
+ /* send it out. table id is taken from stcb */
+#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
+ if ((SCTP_BASE_SYSCTL(sctp_output_unlocked)) && (so_locked)) {
+ so = SCTP_INP_SO(inp);
+ SCTP_SOCKET_UNLOCK(so, 0);
+ }
+#endif
+ SCTP_IP6_OUTPUT(ret, o_pak, (struct route_in6 *)ro, &ifp, stcb, vrf_id);
+#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
+ if ((SCTP_BASE_SYSCTL(sctp_output_unlocked)) && (so_locked)) {
+ atomic_add_int(&stcb->asoc.refcnt, 1);
+ SCTP_TCB_UNLOCK(stcb);
+ SCTP_SOCKET_LOCK(so, 0);
+ SCTP_TCB_LOCK(stcb);
+ atomic_subtract_int(&stcb->asoc.refcnt, 1);
+ }
+#endif
+ if (net) {
+ /* for link local this must be done */
+ sin6->sin6_scope_id = prev_scope;
+ sin6->sin6_port = prev_port;
+ }
+ SCTPDBG(SCTP_DEBUG_OUTPUT3, "return from send is %d\n", ret);
+ SCTP_STAT_INCR(sctps_sendpackets);
+ SCTP_STAT_INCR_COUNTER64(sctps_outpackets);
+ if (ret) {
+ SCTP_STAT_INCR(sctps_senderrors);
+ }
+ if (net == NULL) {
+ /* Now if we had a temp route free it */
+ if (ro->ro_rt) {
+ RTFREE(ro->ro_rt);
+ }
+ } else {
+ /* PMTU check versus smallest asoc MTU goes here */
+ if (ro->ro_rt == NULL) {
+ /* Route was freed */
+ if (net->ro._s_addr &&
+ net->src_addr_selected) {
+ sctp_free_ifa(net->ro._s_addr);
+ net->ro._s_addr = NULL;
+ }
+ net->src_addr_selected = 0;
+ }
+ if ((ro->ro_rt != NULL) &&
+ (net->ro._s_addr)) {
+ uint32_t mtu;
+
+ mtu = SCTP_GATHER_MTU_FROM_ROUTE(net->ro._s_addr, &net->ro._l_addr.sa, ro->ro_rt);
+ if (mtu &&
+ (stcb->asoc.smallest_mtu > mtu)) {
+ sctp_mtu_size_reset(inp, &stcb->asoc, mtu);
+ net->mtu = mtu;
+ if (net->port) {
+ net->mtu -= sizeof(struct udphdr);
+ }
+ }
+ } else if (ifp) {
+ if (ND_IFINFO(ifp)->linkmtu &&
+ (stcb->asoc.smallest_mtu > ND_IFINFO(ifp)->linkmtu)) {
+ sctp_mtu_size_reset(inp,
+ &stcb->asoc,
+ ND_IFINFO(ifp)->linkmtu);
+ }
+ }
+ }
+ return (ret);
+ }
+#endif
+ else {
+ SCTPDBG(SCTP_DEBUG_OUTPUT1, "Unknown protocol (TSNH) type %d\n",
+ ((struct sockaddr *)to)->sa_family);
+ sctp_m_freem(m);
+ SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EFAULT);
+ return (EFAULT);
+ }
+}
+
+
+void
+sctp_send_initiate(struct sctp_inpcb *inp, struct sctp_tcb *stcb, int so_locked
+#if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
+ SCTP_UNUSED
+#endif
+)
+{
+ struct mbuf *m, *m_at, *mp_last;
+ struct sctp_nets *net;
+ struct sctp_init_chunk *init;
+ struct sctp_supported_addr_param *sup_addr;
+ struct sctp_adaptation_layer_indication *ali;
+ struct sctp_ecn_supported_param *ecn;
+ struct sctp_prsctp_supported_param *prsctp;
+ struct sctp_ecn_nonce_supported_param *ecn_nonce;
+ struct sctp_supported_chunk_types_param *pr_supported;
+ int cnt_inits_to = 0;
+ int padval, ret;
+ int num_ext;
+ int p_len;
+
+ /* INIT's always go to the primary (and usually ONLY address) */
+ mp_last = NULL;
+ net = stcb->asoc.primary_destination;
+ if (net == NULL) {
+ net = TAILQ_FIRST(&stcb->asoc.nets);
+ if (net == NULL) {
+ /* TSNH */
+ return;
+ }
+ /* we confirm any address we send an INIT to */
+ net->dest_state &= ~SCTP_ADDR_UNCONFIRMED;
+ (void)sctp_set_primary_addr(stcb, NULL, net);
+ } else {
+ /* we confirm any address we send an INIT to */
+ net->dest_state &= ~SCTP_ADDR_UNCONFIRMED;
+ }
+ SCTPDBG(SCTP_DEBUG_OUTPUT4, "Sending INIT\n");
+#ifdef INET6
+ if (((struct sockaddr *)&(net->ro._l_addr))->sa_family == AF_INET6) {
+ /*
+ * special hook, if we are sending to link local it will not
+ * show up in our private address count.
+ */
+ struct sockaddr_in6 *sin6l;
+
+ sin6l = &net->ro._l_addr.sin6;
+ if (IN6_IS_ADDR_LINKLOCAL(&sin6l->sin6_addr))
+ cnt_inits_to = 1;
+ }
+#endif
+ if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
+ /* This case should not happen */
+ SCTPDBG(SCTP_DEBUG_OUTPUT4, "Sending INIT - failed timer?\n");
+ return;
+ }
+ /* start the INIT timer */
+ sctp_timer_start(SCTP_TIMER_TYPE_INIT, inp, stcb, net);
+
+ m = sctp_get_mbuf_for_msg(MCLBYTES, 1, M_DONTWAIT, 1, MT_DATA);
+ if (m == NULL) {
+ /* No memory, INIT timer will re-attempt. */
+ SCTPDBG(SCTP_DEBUG_OUTPUT4, "Sending INIT - mbuf?\n");
+ return;
+ }
+ SCTP_BUF_LEN(m) = sizeof(struct sctp_init_chunk);
+ /*
+ * assume peer supports asconf in order to be able to queue local
+ * address changes while an INIT is in flight and before the assoc
+ * is established.
+ */
+ stcb->asoc.peer_supports_asconf = 1;
+ /* Now lets put the SCTP header in place */
+ init = mtod(m, struct sctp_init_chunk *);
+ /* now the chunk header */
+ init->ch.chunk_type = SCTP_INITIATION;
+ init->ch.chunk_flags = 0;
+ /* fill in later from mbuf we build */
+ init->ch.chunk_length = 0;
+ /* place in my tag */
+ init->init.initiate_tag = htonl(stcb->asoc.my_vtag);
+ /* set up some of the credits. */
+ init->init.a_rwnd = htonl(max(inp->sctp_socket ? SCTP_SB_LIMIT_RCV(inp->sctp_socket) : 0,
+ SCTP_MINIMAL_RWND));
+
+ init->init.num_outbound_streams = htons(stcb->asoc.pre_open_streams);
+ init->init.num_inbound_streams = htons(stcb->asoc.max_inbound_streams);
+ init->init.initial_tsn = htonl(stcb->asoc.init_seq_number);
+ /* now the address restriction */
+ sup_addr = (struct sctp_supported_addr_param *)((caddr_t)init +
+ sizeof(*init));
+ sup_addr->ph.param_type = htons(SCTP_SUPPORTED_ADDRTYPE);
+#ifdef INET6
+ /* we support 2 types: IPv6/IPv4 */
+ sup_addr->ph.param_length = htons(sizeof(*sup_addr) + sizeof(uint16_t));
+ sup_addr->addr_type[0] = htons(SCTP_IPV4_ADDRESS);
+ sup_addr->addr_type[1] = htons(SCTP_IPV6_ADDRESS);
+#else
+ /* we support 1 type: IPv4 */
+ sup_addr->ph.param_length = htons(sizeof(*sup_addr) + sizeof(uint8_t));
+ sup_addr->addr_type[0] = htons(SCTP_IPV4_ADDRESS);
+ sup_addr->addr_type[1] = htons(0); /* this is the padding */
+#endif
+ SCTP_BUF_LEN(m) += sizeof(*sup_addr) + sizeof(uint16_t);
+ /* adaptation layer indication parameter */
+ ali = (struct sctp_adaptation_layer_indication *)((caddr_t)sup_addr + sizeof(*sup_addr) + sizeof(uint16_t));
+ ali->ph.param_type = htons(SCTP_ULP_ADAPTATION);
+ ali->ph.param_length = htons(sizeof(*ali));
+ ali->indication = ntohl(inp->sctp_ep.adaptation_layer_indicator);
+ SCTP_BUF_LEN(m) += sizeof(*ali);
+ ecn = (struct sctp_ecn_supported_param *)((caddr_t)ali + sizeof(*ali));
+
+ if (SCTP_BASE_SYSCTL(sctp_inits_include_nat_friendly)) {
+ /* Add NAT friendly parameter */
+ struct sctp_paramhdr *ph;
+
+ ph = (struct sctp_paramhdr *)(mtod(m, caddr_t)+SCTP_BUF_LEN(m));
+ ph->param_type = htons(SCTP_HAS_NAT_SUPPORT);
+ ph->param_length = htons(sizeof(struct sctp_paramhdr));
+ SCTP_BUF_LEN(m) += sizeof(struct sctp_paramhdr);
+ ecn = (struct sctp_ecn_supported_param *)((caddr_t)ph + sizeof(*ph));
+ }
+ /* now any cookie time extensions */
+ if (stcb->asoc.cookie_preserve_req) {
+ struct sctp_cookie_perserve_param *cookie_preserve;
+
+ cookie_preserve = (struct sctp_cookie_perserve_param *)(ecn);
+ cookie_preserve->ph.param_type = htons(SCTP_COOKIE_PRESERVE);
+ cookie_preserve->ph.param_length = htons(
+ sizeof(*cookie_preserve));
+ cookie_preserve->time = htonl(stcb->asoc.cookie_preserve_req);
+ SCTP_BUF_LEN(m) += sizeof(*cookie_preserve);
+ ecn = (struct sctp_ecn_supported_param *)(
+ (caddr_t)cookie_preserve + sizeof(*cookie_preserve));
+ stcb->asoc.cookie_preserve_req = 0;
+ }
+ /* ECN parameter */
+ if (SCTP_BASE_SYSCTL(sctp_ecn_enable) == 1) {
+ ecn->ph.param_type = htons(SCTP_ECN_CAPABLE);
+ ecn->ph.param_length = htons(sizeof(*ecn));
+ SCTP_BUF_LEN(m) += sizeof(*ecn);
+ prsctp = (struct sctp_prsctp_supported_param *)((caddr_t)ecn +
+ sizeof(*ecn));
+ } else {
+ prsctp = (struct sctp_prsctp_supported_param *)((caddr_t)ecn);
+ }
+ /* And now tell the peer we do pr-sctp */
+ prsctp->ph.param_type = htons(SCTP_PRSCTP_SUPPORTED);
+ prsctp->ph.param_length = htons(sizeof(*prsctp));
+ SCTP_BUF_LEN(m) += sizeof(*prsctp);
+
+ /* And now tell the peer we do all the extensions */
+ pr_supported = (struct sctp_supported_chunk_types_param *)
+ ((caddr_t)prsctp + sizeof(*prsctp));
+ pr_supported->ph.param_type = htons(SCTP_SUPPORTED_CHUNK_EXT);
+ num_ext = 0;
+ pr_supported->chunk_types[num_ext++] = SCTP_ASCONF;
+ pr_supported->chunk_types[num_ext++] = SCTP_ASCONF_ACK;
+ pr_supported->chunk_types[num_ext++] = SCTP_FORWARD_CUM_TSN;
+ pr_supported->chunk_types[num_ext++] = SCTP_PACKET_DROPPED;
+ pr_supported->chunk_types[num_ext++] = SCTP_STREAM_RESET;
+ if (!SCTP_BASE_SYSCTL(sctp_auth_disable)) {
+ pr_supported->chunk_types[num_ext++] = SCTP_AUTHENTICATION;
+ }
+ if (stcb->asoc.sctp_nr_sack_on_off == 1) {
+ pr_supported->chunk_types[num_ext++] = SCTP_NR_SELECTIVE_ACK;
+ }
+ p_len = sizeof(*pr_supported) + num_ext;
+ pr_supported->ph.param_length = htons(p_len);
+ bzero((caddr_t)pr_supported + p_len, SCTP_SIZE32(p_len) - p_len);
+ SCTP_BUF_LEN(m) += SCTP_SIZE32(p_len);
+
+
+ /* ECN nonce: And now tell the peer we support ECN nonce */
+ if (SCTP_BASE_SYSCTL(sctp_ecn_nonce)) {
+ ecn_nonce = (struct sctp_ecn_nonce_supported_param *)
+ ((caddr_t)pr_supported + SCTP_SIZE32(p_len));
+ ecn_nonce->ph.param_type = htons(SCTP_ECN_NONCE_SUPPORTED);
+ ecn_nonce->ph.param_length = htons(sizeof(*ecn_nonce));
+ SCTP_BUF_LEN(m) += sizeof(*ecn_nonce);
+ }
+ /* add authentication parameters */
+ if (!SCTP_BASE_SYSCTL(sctp_auth_disable)) {
+ struct sctp_auth_random *randp;
+ struct sctp_auth_hmac_algo *hmacs;
+ struct sctp_auth_chunk_list *chunks;
+
+ /* attach RANDOM parameter, if available */
+ if (stcb->asoc.authinfo.random != NULL) {
+ randp = (struct sctp_auth_random *)(mtod(m, caddr_t)+SCTP_BUF_LEN(m));
+ p_len = sizeof(*randp) + stcb->asoc.authinfo.random_len;
+ /* random key already contains the header */
+ bcopy(stcb->asoc.authinfo.random->key, randp, p_len);
+ /* zero out any padding required */
+ bzero((caddr_t)randp + p_len, SCTP_SIZE32(p_len) - p_len);
+ SCTP_BUF_LEN(m) += SCTP_SIZE32(p_len);
+ }
+ /* add HMAC_ALGO parameter */
+ hmacs = (struct sctp_auth_hmac_algo *)(mtod(m, caddr_t)+SCTP_BUF_LEN(m));
+ p_len = sctp_serialize_hmaclist(stcb->asoc.local_hmacs,
+ (uint8_t *) hmacs->hmac_ids);
+ if (p_len > 0) {
+ p_len += sizeof(*hmacs);
+ hmacs->ph.param_type = htons(SCTP_HMAC_LIST);
+ hmacs->ph.param_length = htons(p_len);
+ /* zero out any padding required */
+ bzero((caddr_t)hmacs + p_len, SCTP_SIZE32(p_len) - p_len);
+ SCTP_BUF_LEN(m) += SCTP_SIZE32(p_len);
+ }
+ /* add CHUNKS parameter */
+ chunks = (struct sctp_auth_chunk_list *)(mtod(m, caddr_t)+SCTP_BUF_LEN(m));
+ p_len = sctp_serialize_auth_chunks(stcb->asoc.local_auth_chunks,
+ chunks->chunk_types);
+ if (p_len > 0) {
+ p_len += sizeof(*chunks);
+ chunks->ph.param_type = htons(SCTP_CHUNK_LIST);
+ chunks->ph.param_length = htons(p_len);
+ /* zero out any padding required */
+ bzero((caddr_t)chunks + p_len, SCTP_SIZE32(p_len) - p_len);
+ SCTP_BUF_LEN(m) += SCTP_SIZE32(p_len);
+ }
+ }
+ m_at = m;
+ /* now the addresses */
+ {
+ struct sctp_scoping scp;
+
+ /*
+ * To optimize this we could put the scoping stuff into a
+ * structure and remove the individual uint8's from the
+ * assoc structure. Then we could just sifa in the address
+ * within the stcb.. but for now this is a quick hack to get
+ * the address stuff teased apart.
+ */
+ scp.ipv4_addr_legal = stcb->asoc.ipv4_addr_legal;
+ scp.ipv6_addr_legal = stcb->asoc.ipv6_addr_legal;
+ scp.loopback_scope = stcb->asoc.loopback_scope;
+ scp.ipv4_local_scope = stcb->asoc.ipv4_local_scope;
+ scp.local_scope = stcb->asoc.local_scope;
+ scp.site_scope = stcb->asoc.site_scope;
+
+ m_at = sctp_add_addresses_to_i_ia(inp, &scp, m_at, cnt_inits_to);
+ }
+
+ /* calulate the size and update pkt header and chunk header */
+ p_len = 0;
+ for (m_at = m; m_at; m_at = SCTP_BUF_NEXT(m_at)) {
+ if (SCTP_BUF_NEXT(m_at) == NULL)
+ mp_last = m_at;
+ p_len += SCTP_BUF_LEN(m_at);
+ }
+ init->ch.chunk_length = htons(p_len);
+ /*
+ * We sifa 0 here to NOT set IP_DF if its IPv4, we ignore the return
+ * here since the timer will drive a retranmission.
+ */
+
+ /* I don't expect this to execute but we will be safe here */
+ padval = p_len % 4;
+ if ((padval) && (mp_last)) {
+ /*
+ * The compiler worries that mp_last may not be set even
+ * though I think it is impossible :-> however we add
+ * mp_last here just in case.
+ */
+ ret = sctp_add_pad_tombuf(mp_last, (4 - padval));
+ if (ret) {
+ /* Houston we have a problem, no space */
+ sctp_m_freem(m);
+ return;
+ }
+ p_len += padval;
+ }
+ SCTPDBG(SCTP_DEBUG_OUTPUT4, "Sending INIT - calls lowlevel_output\n");
+ ret = sctp_lowlevel_chunk_output(inp, stcb, net,
+ (struct sockaddr *)&net->ro._l_addr,
+ m, 0, NULL, 0, 0, 0, NULL, 0,
+ inp->sctp_lport, stcb->rport, htonl(0),
+ net->port, so_locked, NULL);
+ SCTPDBG(SCTP_DEBUG_OUTPUT4, "lowlevel_output - %d\n", ret);
+ SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
+ (void)SCTP_GETTIME_TIMEVAL(&net->last_sent_time);
+}
+
+struct mbuf *
+sctp_arethere_unrecognized_parameters(struct mbuf *in_initpkt,
+ int param_offset, int *abort_processing, struct sctp_chunkhdr *cp, int *nat_friendly)
+{
+ /*
+ * Given a mbuf containing an INIT or INIT-ACK with the param_offset
+ * being equal to the beginning of the params i.e. (iphlen +
+ * sizeof(struct sctp_init_msg) parse through the parameters to the
+ * end of the mbuf verifying that all parameters are known.
+ *
+ * For unknown parameters build and return a mbuf with
+ * UNRECOGNIZED_PARAMETER errors. If the flags indicate to stop
+ * processing this chunk stop, and set *abort_processing to 1.
+ *
+ * By having param_offset be pre-set to where parameters begin it is
+ * hoped that this routine may be reused in the future by new
+ * features.
+ */
+ struct sctp_paramhdr *phdr, params;
+
+ struct mbuf *mat, *op_err;
+ char tempbuf[SCTP_PARAM_BUFFER_SIZE];
+ int at, limit, pad_needed;
+ uint16_t ptype, plen, padded_size;
+ int err_at;
+
+ *abort_processing = 0;
+ mat = in_initpkt;
+ err_at = 0;
+ limit = ntohs(cp->chunk_length) - sizeof(struct sctp_init_chunk);
+ at = param_offset;
+ op_err = NULL;
+ SCTPDBG(SCTP_DEBUG_OUTPUT1, "Check for unrecognized param's\n");
+ phdr = sctp_get_next_param(mat, at, &params, sizeof(params));
+ while ((phdr != NULL) && ((size_t)limit >= sizeof(struct sctp_paramhdr))) {
+ ptype = ntohs(phdr->param_type);
+ plen = ntohs(phdr->param_length);
+ if ((plen > limit) || (plen < sizeof(struct sctp_paramhdr))) {
+ /* wacked parameter */
+ SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error %d\n", plen);
+ goto invalid_size;
+ }
+ limit -= SCTP_SIZE32(plen);
+ /*-
+ * All parameters for all chunks that we know/understand are
+ * listed here. We process them other places and make
+ * appropriate stop actions per the upper bits. However this
+ * is the generic routine processor's can call to get back
+ * an operr.. to either incorporate (init-ack) or send.
+ */
+ padded_size = SCTP_SIZE32(plen);
+ switch (ptype) {
+ /* Param's with variable size */
+ case SCTP_HEARTBEAT_INFO:
+ case SCTP_STATE_COOKIE:
+ case SCTP_UNRECOG_PARAM:
+ case SCTP_ERROR_CAUSE_IND:
+ /* ok skip fwd */
+ at += padded_size;
+ break;
+ /* Param's with variable size within a range */
+ case SCTP_CHUNK_LIST:
+ case SCTP_SUPPORTED_CHUNK_EXT:
+ if (padded_size > (sizeof(struct sctp_supported_chunk_types_param) + (sizeof(uint8_t) * SCTP_MAX_SUPPORTED_EXT))) {
+ SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error chklist %d\n", plen);
+ goto invalid_size;
+ }
+ at += padded_size;
+ break;
+ case SCTP_SUPPORTED_ADDRTYPE:
+ if (padded_size > SCTP_MAX_ADDR_PARAMS_SIZE) {
+ SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error supaddrtype %d\n", plen);
+ goto invalid_size;
+ }
+ at += padded_size;
+ break;
+ case SCTP_RANDOM:
+ if (padded_size > (sizeof(struct sctp_auth_random) + SCTP_RANDOM_MAX_SIZE)) {
+ SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error random %d\n", plen);
+ goto invalid_size;
+ }
+ at += padded_size;
+ break;
+ case SCTP_SET_PRIM_ADDR:
+ case SCTP_DEL_IP_ADDRESS:
+ case SCTP_ADD_IP_ADDRESS:
+ if ((padded_size != sizeof(struct sctp_asconf_addrv4_param)) &&
+ (padded_size != sizeof(struct sctp_asconf_addr_param))) {
+ SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error setprim %d\n", plen);
+ goto invalid_size;
+ }
+ at += padded_size;
+ break;
+ /* Param's with a fixed size */
+ case SCTP_IPV4_ADDRESS:
+ if (padded_size != sizeof(struct sctp_ipv4addr_param)) {
+ SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error ipv4 addr %d\n", plen);
+ goto invalid_size;
+ }
+ at += padded_size;
+ break;
+ case SCTP_IPV6_ADDRESS:
+ if (padded_size != sizeof(struct sctp_ipv6addr_param)) {
+ SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error ipv6 addr %d\n", plen);
+ goto invalid_size;
+ }
+ at += padded_size;
+ break;
+ case SCTP_COOKIE_PRESERVE:
+ if (padded_size != sizeof(struct sctp_cookie_perserve_param)) {
+ SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error cookie-preserve %d\n", plen);
+ goto invalid_size;
+ }
+ at += padded_size;
+ break;
+ case SCTP_HAS_NAT_SUPPORT:
+ *nat_friendly = 1;
+ /* fall through */
+ case SCTP_ECN_NONCE_SUPPORTED:
+ case SCTP_PRSCTP_SUPPORTED:
+
+ if (padded_size != sizeof(struct sctp_paramhdr)) {
+ SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error ecnnonce/prsctp/nat support %d\n", plen);
+ goto invalid_size;
+ }
+ at += padded_size;
+ break;
+ case SCTP_ECN_CAPABLE:
+ if (padded_size != sizeof(struct sctp_ecn_supported_param)) {
+ SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error ecn %d\n", plen);
+ goto invalid_size;
+ }
+ at += padded_size;
+ break;
+ case SCTP_ULP_ADAPTATION:
+ if (padded_size != sizeof(struct sctp_adaptation_layer_indication)) {
+ SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error adapatation %d\n", plen);
+ goto invalid_size;
+ }
+ at += padded_size;
+ break;
+ case SCTP_SUCCESS_REPORT:
+ if (padded_size != sizeof(struct sctp_asconf_paramhdr)) {
+ SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error success %d\n", plen);
+ goto invalid_size;
+ }
+ at += padded_size;
+ break;
+ case SCTP_HOSTNAME_ADDRESS:
+ {
+ /* We can NOT handle HOST NAME addresses!! */
+ int l_len;
+
+ SCTPDBG(SCTP_DEBUG_OUTPUT1, "Can't handle hostname addresses.. abort processing\n");
+ *abort_processing = 1;
+ if (op_err == NULL) {
+ /* Ok need to try to get a mbuf */
+#ifdef INET6
+ l_len = sizeof(struct ip6_hdr) + sizeof(struct sctphdr) + sizeof(struct sctp_chunkhdr);
+#else
+ l_len = sizeof(struct ip) + sizeof(struct sctphdr) + sizeof(struct sctp_chunkhdr);
+#endif
+ l_len += plen;
+ l_len += sizeof(struct sctp_paramhdr);
+ op_err = sctp_get_mbuf_for_msg(l_len, 0, M_DONTWAIT, 1, MT_DATA);
+ if (op_err) {
+ SCTP_BUF_LEN(op_err) = 0;
+ /*
+ * pre-reserve space for ip
+ * and sctp header and
+ * chunk hdr
+ */
+#ifdef INET6
+ SCTP_BUF_RESV_UF(op_err, sizeof(struct ip6_hdr));
+#else
+ SCTP_BUF_RESV_UF(op_err, sizeof(struct ip));
+#endif
+ SCTP_BUF_RESV_UF(op_err, sizeof(struct sctphdr));
+ SCTP_BUF_RESV_UF(op_err, sizeof(struct sctp_chunkhdr));
+ }
+ }
+ if (op_err) {
+ /* If we have space */
+ struct sctp_paramhdr s;
+
+ if (err_at % 4) {
+ uint32_t cpthis = 0;
+
+ pad_needed = 4 - (err_at % 4);
+ m_copyback(op_err, err_at, pad_needed, (caddr_t)&cpthis);
+ err_at += pad_needed;
+ }
+ s.param_type = htons(SCTP_CAUSE_UNRESOLVABLE_ADDR);
+ s.param_length = htons(sizeof(s) + plen);
+ m_copyback(op_err, err_at, sizeof(s), (caddr_t)&s);
+ err_at += sizeof(s);
+ phdr = sctp_get_next_param(mat, at, (struct sctp_paramhdr *)tempbuf, min(sizeof(tempbuf), plen));
+ if (phdr == NULL) {
+ sctp_m_freem(op_err);
+ /*
+ * we are out of memory but
+ * we still need to have a
+ * look at what to do (the
+ * system is in trouble
+ * though).
+ */
+ return (NULL);
+ }
+ m_copyback(op_err, err_at, plen, (caddr_t)phdr);
+ err_at += plen;
+ }
+ return (op_err);
+ break;
+ }
+ default:
+ /*
+ * we do not recognize the parameter figure out what
+ * we do.
+ */
+ SCTPDBG(SCTP_DEBUG_OUTPUT1, "Hit default param %x\n", ptype);
+ if ((ptype & 0x4000) == 0x4000) {
+ /* Report bit is set?? */
+ SCTPDBG(SCTP_DEBUG_OUTPUT1, "report op err\n");
+ if (op_err == NULL) {
+ int l_len;
+
+ /* Ok need to try to get an mbuf */
+#ifdef INET6
+ l_len = sizeof(struct ip6_hdr) + sizeof(struct sctphdr) + sizeof(struct sctp_chunkhdr);
+#else
+ l_len = sizeof(struct ip) + sizeof(struct sctphdr) + sizeof(struct sctp_chunkhdr);
+#endif
+ l_len += plen;
+ l_len += sizeof(struct sctp_paramhdr);
+ op_err = sctp_get_mbuf_for_msg(l_len, 0, M_DONTWAIT, 1, MT_DATA);
+ if (op_err) {
+ SCTP_BUF_LEN(op_err) = 0;
+#ifdef INET6
+ SCTP_BUF_RESV_UF(op_err, sizeof(struct ip6_hdr));
+#else
+ SCTP_BUF_RESV_UF(op_err, sizeof(struct ip));
+#endif
+ SCTP_BUF_RESV_UF(op_err, sizeof(struct sctphdr));
+ SCTP_BUF_RESV_UF(op_err, sizeof(struct sctp_chunkhdr));
+ }
+ }
+ if (op_err) {
+ /* If we have space */
+ struct sctp_paramhdr s;
+
+ if (err_at % 4) {
+ uint32_t cpthis = 0;
+
+ pad_needed = 4 - (err_at % 4);
+ m_copyback(op_err, err_at, pad_needed, (caddr_t)&cpthis);
+ err_at += pad_needed;
+ }
+ s.param_type = htons(SCTP_UNRECOG_PARAM);
+ s.param_length = htons(sizeof(s) + plen);
+ m_copyback(op_err, err_at, sizeof(s), (caddr_t)&s);
+ err_at += sizeof(s);
+ if (plen > sizeof(tempbuf)) {
+ plen = sizeof(tempbuf);
+ }
+ phdr = sctp_get_next_param(mat, at, (struct sctp_paramhdr *)tempbuf, min(sizeof(tempbuf), plen));
+ if (phdr == NULL) {
+ sctp_m_freem(op_err);
+ /*
+ * we are out of memory but
+ * we still need to have a
+ * look at what to do (the
+ * system is in trouble
+ * though).
+ */
+ op_err = NULL;
+ goto more_processing;
+ }
+ m_copyback(op_err, err_at, plen, (caddr_t)phdr);
+ err_at += plen;
+ }
+ }
+ more_processing:
+ if ((ptype & 0x8000) == 0x0000) {
+ SCTPDBG(SCTP_DEBUG_OUTPUT1, "stop proc\n");
+ return (op_err);
+ } else {
+ /* skip this chunk and continue processing */
+ SCTPDBG(SCTP_DEBUG_OUTPUT1, "move on\n");
+ at += SCTP_SIZE32(plen);
+ }
+ break;
+
+ }
+ phdr = sctp_get_next_param(mat, at, &params, sizeof(params));
+ }
+ return (op_err);
+invalid_size:
+ SCTPDBG(SCTP_DEBUG_OUTPUT1, "abort flag set\n");
+ *abort_processing = 1;
+ if ((op_err == NULL) && phdr) {
+ int l_len;
+
+#ifdef INET6
+ l_len = sizeof(struct ip6_hdr) + sizeof(struct sctphdr) + sizeof(struct sctp_chunkhdr);
+#else
+ l_len = sizeof(struct ip) + sizeof(struct sctphdr) + sizeof(struct sctp_chunkhdr);
+#endif
+ l_len += (2 * sizeof(struct sctp_paramhdr));
+ op_err = sctp_get_mbuf_for_msg(l_len, 0, M_DONTWAIT, 1, MT_DATA);
+ if (op_err) {
+ SCTP_BUF_LEN(op_err) = 0;
+#ifdef INET6
+ SCTP_BUF_RESV_UF(op_err, sizeof(struct ip6_hdr));
+#else
+ SCTP_BUF_RESV_UF(op_err, sizeof(struct ip));
+#endif
+ SCTP_BUF_RESV_UF(op_err, sizeof(struct sctphdr));
+ SCTP_BUF_RESV_UF(op_err, sizeof(struct sctp_chunkhdr));
+ }
+ }
+ if ((op_err) && phdr) {
+ struct sctp_paramhdr s;
+
+ if (err_at % 4) {
+ uint32_t cpthis = 0;
+
+ pad_needed = 4 - (err_at % 4);
+ m_copyback(op_err, err_at, pad_needed, (caddr_t)&cpthis);
+ err_at += pad_needed;
+ }
+ s.param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
+ s.param_length = htons(sizeof(s) + sizeof(struct sctp_paramhdr));
+ m_copyback(op_err, err_at, sizeof(s), (caddr_t)&s);
+ err_at += sizeof(s);
+ /* Only copy back the p-hdr that caused the issue */
+ m_copyback(op_err, err_at, sizeof(struct sctp_paramhdr), (caddr_t)phdr);
+ }
+ return (op_err);
+}
+
+static int
+sctp_are_there_new_addresses(struct sctp_association *asoc,
+ struct mbuf *in_initpkt, int iphlen, int offset)
+{
+ /*
+ * Given a INIT packet, look through the packet to verify that there
+ * are NO new addresses. As we go through the parameters add reports
+ * of any un-understood parameters that require an error. Also we
+ * must return (1) to drop the packet if we see a un-understood
+ * parameter that tells us to drop the chunk.
+ */
+ struct sockaddr_in sin4, *sa4;
+
+#ifdef INET6
+ struct sockaddr_in6 sin6, *sa6;
+
+#endif
+ struct sockaddr *sa_touse;
+ struct sockaddr *sa;
+ struct sctp_paramhdr *phdr, params;
+ struct ip *iph;
+
+#ifdef INET6
+ struct ip6_hdr *ip6h;
+
+#endif
+ struct mbuf *mat;
+ uint16_t ptype, plen;
+ int err_at;
+ uint8_t fnd;
+ struct sctp_nets *net;
+
+ memset(&sin4, 0, sizeof(sin4));
+#ifdef INET6
+ memset(&sin6, 0, sizeof(sin6));
+#endif
+ sin4.sin_family = AF_INET;
+ sin4.sin_len = sizeof(sin4);
+#ifdef INET6
+ sin6.sin6_family = AF_INET6;
+ sin6.sin6_len = sizeof(sin6);
+#endif
+ sa_touse = NULL;
+ /* First what about the src address of the pkt ? */
+ iph = mtod(in_initpkt, struct ip *);
+ switch (iph->ip_v) {
+ case IPVERSION:
+ /* source addr is IPv4 */
+ sin4.sin_addr = iph->ip_src;
+ sa_touse = (struct sockaddr *)&sin4;
+ break;
+#ifdef INET6
+ case IPV6_VERSION >> 4:
+ /* source addr is IPv6 */
+ ip6h = mtod(in_initpkt, struct ip6_hdr *);
+ sin6.sin6_addr = ip6h->ip6_src;
+ sa_touse = (struct sockaddr *)&sin6;
+ break;
+#endif
+ default:
+ return (1);
+ }
+
+ fnd = 0;
+ TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
+ sa = (struct sockaddr *)&net->ro._l_addr;
+ if (sa->sa_family == sa_touse->sa_family) {
+ if (sa->sa_family == AF_INET) {
+ sa4 = (struct sockaddr_in *)sa;
+ if (sa4->sin_addr.s_addr ==
+ sin4.sin_addr.s_addr) {
+ fnd = 1;
+ break;
+ }
+ }
+#ifdef INET6
+ if (sa->sa_family == AF_INET6) {
+ sa6 = (struct sockaddr_in6 *)sa;
+ if (SCTP6_ARE_ADDR_EQUAL(sa6,
+ &sin6)) {
+ fnd = 1;
+ break;
+ }
+ }
+#endif
+ }
+ }
+ if (fnd == 0) {
+ /* New address added! no need to look futher. */
+ return (1);
+ }
+ /* Ok so far lets munge through the rest of the packet */
+ mat = in_initpkt;
+ err_at = 0;
+ sa_touse = NULL;
+ offset += sizeof(struct sctp_init_chunk);
+ phdr = sctp_get_next_param(mat, offset, &params, sizeof(params));
+ while (phdr) {
+ ptype = ntohs(phdr->param_type);
+ plen = ntohs(phdr->param_length);
+ if (ptype == SCTP_IPV4_ADDRESS) {
+ struct sctp_ipv4addr_param *p4, p4_buf;
+
+ phdr = sctp_get_next_param(mat, offset,
+ (struct sctp_paramhdr *)&p4_buf, sizeof(p4_buf));
+ if (plen != sizeof(struct sctp_ipv4addr_param) ||
+ phdr == NULL) {
+ return (1);
+ }
+ p4 = (struct sctp_ipv4addr_param *)phdr;
+ sin4.sin_addr.s_addr = p4->addr;
+ sa_touse = (struct sockaddr *)&sin4;
+ } else if (ptype == SCTP_IPV6_ADDRESS) {
+ struct sctp_ipv6addr_param *p6, p6_buf;
+
+ phdr = sctp_get_next_param(mat, offset,
+ (struct sctp_paramhdr *)&p6_buf, sizeof(p6_buf));
+ if (plen != sizeof(struct sctp_ipv6addr_param) ||
+ phdr == NULL) {
+ return (1);
+ }
+ p6 = (struct sctp_ipv6addr_param *)phdr;
+#ifdef INET6
+ memcpy((caddr_t)&sin6.sin6_addr, p6->addr,
+ sizeof(p6->addr));
+#endif
+ sa_touse = (struct sockaddr *)&sin4;
+ }
+ if (sa_touse) {
+ /* ok, sa_touse points to one to check */
+ fnd = 0;
+ TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
+ sa = (struct sockaddr *)&net->ro._l_addr;
+ if (sa->sa_family != sa_touse->sa_family) {
+ continue;
+ }
+ if (sa->sa_family == AF_INET) {
+ sa4 = (struct sockaddr_in *)sa;
+ if (sa4->sin_addr.s_addr ==
+ sin4.sin_addr.s_addr) {
+ fnd = 1;
+ break;
+ }
+ }
+#ifdef INET6
+ if (sa->sa_family == AF_INET6) {
+ sa6 = (struct sockaddr_in6 *)sa;
+ if (SCTP6_ARE_ADDR_EQUAL(
+ sa6, &sin6)) {
+ fnd = 1;
+ break;
+ }
+ }
+#endif
+ }
+ if (!fnd) {
+ /* New addr added! no need to look further */
+ return (1);
+ }
+ }
+ offset += SCTP_SIZE32(plen);
+ phdr = sctp_get_next_param(mat, offset, &params, sizeof(params));
+ }
+ return (0);
+}
+
+/*
+ * Given a MBUF chain that was sent into us containing an INIT. Build a
+ * INIT-ACK with COOKIE and send back. We assume that the in_initpkt has done
+ * a pullup to include IPv6/4header, SCTP header and initial part of INIT
+ * message (i.e. the struct sctp_init_msg).
+ */
+void
+sctp_send_initiate_ack(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
+ struct mbuf *init_pkt, int iphlen, int offset, struct sctphdr *sh,
+ struct sctp_init_chunk *init_chk, uint32_t vrf_id, uint16_t port, int hold_inp_lock)
+{
+ struct sctp_association *asoc;
+ struct mbuf *m, *m_at, *m_tmp, *m_cookie, *op_err, *mp_last;
+ struct sctp_init_ack_chunk *initack;
+ struct sctp_adaptation_layer_indication *ali;
+ struct sctp_ecn_supported_param *ecn;
+ struct sctp_prsctp_supported_param *prsctp;
+ struct sctp_ecn_nonce_supported_param *ecn_nonce;
+ struct sctp_supported_chunk_types_param *pr_supported;
+ union sctp_sockstore store, store1, *over_addr;
+ struct sockaddr_in *sin, *to_sin;
+
+#ifdef INET6
+ struct sockaddr_in6 *sin6, *to_sin6;
+
+#endif
+ struct ip *iph;
+
+#ifdef INET6
+ struct ip6_hdr *ip6;
+
+#endif
+ struct sockaddr *to;
+ struct sctp_state_cookie stc;
+ struct sctp_nets *net = NULL;
+ uint8_t *signature = NULL;
+ int cnt_inits_to = 0;
+ uint16_t his_limit, i_want;
+ int abort_flag, padval;
+ int num_ext;
+ int p_len;
+ int nat_friendly = 0;
+ struct socket *so;
+
+ if (stcb)
+ asoc = &stcb->asoc;
+ else
+ asoc = NULL;
+ mp_last = NULL;
+ if ((asoc != NULL) &&
+ (SCTP_GET_STATE(asoc) != SCTP_STATE_COOKIE_WAIT) &&
+ (sctp_are_there_new_addresses(asoc, init_pkt, iphlen, offset))) {
+ /* new addresses, out of here in non-cookie-wait states */
+ /*
+ * Send a ABORT, we don't add the new address error clause
+ * though we even set the T bit and copy in the 0 tag.. this
+ * looks no different than if no listener was present.
+ */
+ sctp_send_abort(init_pkt, iphlen, sh, 0, NULL, vrf_id, port);
+ return;
+ }
+ abort_flag = 0;
+ op_err = sctp_arethere_unrecognized_parameters(init_pkt,
+ (offset + sizeof(struct sctp_init_chunk)),
+ &abort_flag, (struct sctp_chunkhdr *)init_chk, &nat_friendly);
+ if (abort_flag) {
+do_a_abort:
+ sctp_send_abort(init_pkt, iphlen, sh,
+ init_chk->init.initiate_tag, op_err, vrf_id, port);
+ return;
+ }
+ m = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_DONTWAIT, 1, MT_DATA);
+ if (m == NULL) {
+ /* No memory, INIT timer will re-attempt. */
+ if (op_err)
+ sctp_m_freem(op_err);
+ return;
+ }
+ SCTP_BUF_LEN(m) = sizeof(struct sctp_init_chunk);
+
+ /* the time I built cookie */
+ (void)SCTP_GETTIME_TIMEVAL(&stc.time_entered);
+
+ /* populate any tie tags */
+ if (asoc != NULL) {
+ /* unlock before tag selections */
+ stc.tie_tag_my_vtag = asoc->my_vtag_nonce;
+ stc.tie_tag_peer_vtag = asoc->peer_vtag_nonce;
+ stc.cookie_life = asoc->cookie_life;
+ net = asoc->primary_destination;
+ } else {
+ stc.tie_tag_my_vtag = 0;
+ stc.tie_tag_peer_vtag = 0;
+ /* life I will award this cookie */
+ stc.cookie_life = inp->sctp_ep.def_cookie_life;
+ }
+
+ /* copy in the ports for later check */
+ stc.myport = sh->dest_port;
+ stc.peerport = sh->src_port;
+
+ /*
+ * If we wanted to honor cookie life extentions, we would add to
+ * stc.cookie_life. For now we should NOT honor any extension
+ */
+ stc.site_scope = stc.local_scope = stc.loopback_scope = 0;
+ if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
+ struct inpcb *in_inp;
+
+ /* Its a V6 socket */
+ in_inp = (struct inpcb *)inp;
+ stc.ipv6_addr_legal = 1;
+ /* Now look at the binding flag to see if V4 will be legal */
+ if (SCTP_IPV6_V6ONLY(in_inp) == 0) {
+ stc.ipv4_addr_legal = 1;
+ } else {
+ /* V4 addresses are NOT legal on the association */
+ stc.ipv4_addr_legal = 0;
+ }
+ } else {
+ /* Its a V4 socket, no - V6 */
+ stc.ipv4_addr_legal = 1;
+ stc.ipv6_addr_legal = 0;
+ }
+
+#ifdef SCTP_DONT_DO_PRIVADDR_SCOPE
+ stc.ipv4_scope = 1;
+#else
+ stc.ipv4_scope = 0;
+#endif
+ /* now for scope setup */
+ memset((caddr_t)&store, 0, sizeof(store));
+ memset((caddr_t)&store1, 0, sizeof(store1));
+ sin = &store.sin;
+ to_sin = &store1.sin;
+#ifdef INET6
+ sin6 = &store.sin6;
+ to_sin6 = &store1.sin6;
+#endif
+ iph = mtod(init_pkt, struct ip *);
+ /* establish the to_addr's */
+ switch (iph->ip_v) {
+ case IPVERSION:
+ to_sin->sin_port = sh->dest_port;
+ to_sin->sin_family = AF_INET;
+ to_sin->sin_len = sizeof(struct sockaddr_in);
+ to_sin->sin_addr = iph->ip_dst;
+ break;
+#ifdef INET6
+ case IPV6_VERSION >> 4:
+ ip6 = mtod(init_pkt, struct ip6_hdr *);
+ to_sin6->sin6_addr = ip6->ip6_dst;
+ to_sin6->sin6_scope_id = 0;
+ to_sin6->sin6_port = sh->dest_port;
+ to_sin6->sin6_family = AF_INET6;
+ to_sin6->sin6_len = sizeof(struct sockaddr_in6);
+ break;
+#endif
+ default:
+ goto do_a_abort;
+ break;
+ };
+
+ if (net == NULL) {
+ to = (struct sockaddr *)&store;
+ switch (iph->ip_v) {
+ case IPVERSION:
+ {
+ sin->sin_family = AF_INET;
+ sin->sin_len = sizeof(struct sockaddr_in);
+ sin->sin_port = sh->src_port;
+ sin->sin_addr = iph->ip_src;
+ /* lookup address */
+ stc.address[0] = sin->sin_addr.s_addr;
+ stc.address[1] = 0;
+ stc.address[2] = 0;
+ stc.address[3] = 0;
+ stc.addr_type = SCTP_IPV4_ADDRESS;
+ /* local from address */
+ stc.laddress[0] = to_sin->sin_addr.s_addr;
+ stc.laddress[1] = 0;
+ stc.laddress[2] = 0;
+ stc.laddress[3] = 0;
+ stc.laddr_type = SCTP_IPV4_ADDRESS;
+ /* scope_id is only for v6 */
+ stc.scope_id = 0;
+#ifndef SCTP_DONT_DO_PRIVADDR_SCOPE
+ if (IN4_ISPRIVATE_ADDRESS(&sin->sin_addr)) {
+ stc.ipv4_scope = 1;
+ }
+#else
+ stc.ipv4_scope = 1;
+#endif /* SCTP_DONT_DO_PRIVADDR_SCOPE */
+ /* Must use the address in this case */
+ if (sctp_is_address_on_local_host((struct sockaddr *)sin, vrf_id)) {
+ stc.loopback_scope = 1;
+ stc.ipv4_scope = 1;
+ stc.site_scope = 1;
+ stc.local_scope = 0;
+ }
+ break;
+ }
+#ifdef INET6
+ case IPV6_VERSION >> 4:
+ {
+ ip6 = mtod(init_pkt, struct ip6_hdr *);
+ sin6->sin6_family = AF_INET6;
+ sin6->sin6_len = sizeof(struct sockaddr_in6);
+ sin6->sin6_port = sh->src_port;
+ sin6->sin6_addr = ip6->ip6_src;
+ /* lookup address */
+ memcpy(&stc.address, &sin6->sin6_addr,
+ sizeof(struct in6_addr));
+ sin6->sin6_scope_id = 0;
+ stc.addr_type = SCTP_IPV6_ADDRESS;
+ stc.scope_id = 0;
+ if (sctp_is_address_on_local_host((struct sockaddr *)sin6, vrf_id)) {
+ /*
+ * FIX ME: does this have scope from
+ * rcvif?
+ */
+ (void)sa6_recoverscope(sin6);
+ stc.scope_id = sin6->sin6_scope_id;
+ sa6_embedscope(sin6, MODULE_GLOBAL(ip6_use_defzone));
+ stc.loopback_scope = 1;
+ stc.local_scope = 0;
+ stc.site_scope = 1;
+ stc.ipv4_scope = 1;
+ } else if (IN6_IS_ADDR_LINKLOCAL(&sin6->sin6_addr)) {
+ /*
+ * If the new destination is a
+ * LINK_LOCAL we must have common
+ * both site and local scope. Don't
+ * set local scope though since we
+ * must depend on the source to be
+ * added implicitly. We cannot
+ * assure just because we share one
+ * link that all links are common.
+ */
+ stc.local_scope = 0;
+ stc.site_scope = 1;
+ stc.ipv4_scope = 1;
+ /*
+ * we start counting for the private
+ * address stuff at 1. since the
+ * link local we source from won't
+ * show up in our scoped count.
+ */
+ cnt_inits_to = 1;
+ /*
+ * pull out the scope_id from
+ * incoming pkt
+ */
+ /*
+ * FIX ME: does this have scope from
+ * rcvif?
+ */
+ (void)sa6_recoverscope(sin6);
+ stc.scope_id = sin6->sin6_scope_id;
+ sa6_embedscope(sin6, MODULE_GLOBAL(ip6_use_defzone));
+ } else if (IN6_IS_ADDR_SITELOCAL(&sin6->sin6_addr)) {
+ /*
+ * If the new destination is
+ * SITE_LOCAL then we must have site
+ * scope in common.
+ */
+ stc.site_scope = 1;
+ }
+ memcpy(&stc.laddress, &to_sin6->sin6_addr, sizeof(struct in6_addr));
+ stc.laddr_type = SCTP_IPV6_ADDRESS;
+ break;
+ }
+#endif
+ default:
+ /* TSNH */
+ goto do_a_abort;
+ break;
+ }
+ } else {
+ /* set the scope per the existing tcb */
+
+#ifdef INET6
+ struct sctp_nets *lnet;
+
+#endif
+
+ stc.loopback_scope = asoc->loopback_scope;
+ stc.ipv4_scope = asoc->ipv4_local_scope;
+ stc.site_scope = asoc->site_scope;
+ stc.local_scope = asoc->local_scope;
+#ifdef INET6
+ /* Why do we not consider IPv4 LL addresses? */
+ TAILQ_FOREACH(lnet, &asoc->nets, sctp_next) {
+ if (lnet->ro._l_addr.sin6.sin6_family == AF_INET6) {
+ if (IN6_IS_ADDR_LINKLOCAL(&lnet->ro._l_addr.sin6.sin6_addr)) {
+ /*
+ * if we have a LL address, start
+ * counting at 1.
+ */
+ cnt_inits_to = 1;
+ }
+ }
+ }
+#endif
+ /* use the net pointer */
+ to = (struct sockaddr *)&net->ro._l_addr;
+ switch (to->sa_family) {
+ case AF_INET:
+ sin = (struct sockaddr_in *)to;
+ stc.address[0] = sin->sin_addr.s_addr;
+ stc.address[1] = 0;
+ stc.address[2] = 0;
+ stc.address[3] = 0;
+ stc.addr_type = SCTP_IPV4_ADDRESS;
+ if (net->src_addr_selected == 0) {
+ /*
+ * strange case here, the INIT should have
+ * did the selection.
+ */
+ net->ro._s_addr = sctp_source_address_selection(inp,
+ stcb, (sctp_route_t *) & net->ro,
+ net, 0, vrf_id);
+ if (net->ro._s_addr == NULL)
+ return;
+
+ net->src_addr_selected = 1;
+
+ }
+ stc.laddress[0] = net->ro._s_addr->address.sin.sin_addr.s_addr;
+ stc.laddress[1] = 0;
+ stc.laddress[2] = 0;
+ stc.laddress[3] = 0;
+ stc.laddr_type = SCTP_IPV4_ADDRESS;
+ break;
+#ifdef INET6
+ case AF_INET6:
+ sin6 = (struct sockaddr_in6 *)to;
+ memcpy(&stc.address, &sin6->sin6_addr,
+ sizeof(struct in6_addr));
+ stc.addr_type = SCTP_IPV6_ADDRESS;
+ if (net->src_addr_selected == 0) {
+ /*
+ * strange case here, the INIT should have
+ * did the selection.
+ */
+ net->ro._s_addr = sctp_source_address_selection(inp,
+ stcb, (sctp_route_t *) & net->ro,
+ net, 0, vrf_id);
+ if (net->ro._s_addr == NULL)
+ return;
+
+ net->src_addr_selected = 1;
+ }
+ memcpy(&stc.laddress, &net->ro._s_addr->address.sin6.sin6_addr,
+ sizeof(struct in6_addr));
+ stc.laddr_type = SCTP_IPV6_ADDRESS;
+ break;
+#endif
+ }
+ }
+ /* Now lets put the SCTP header in place */
+ initack = mtod(m, struct sctp_init_ack_chunk *);
+ /* Save it off for quick ref */
+ stc.peers_vtag = init_chk->init.initiate_tag;
+ /* who are we */
+ memcpy(stc.identification, SCTP_VERSION_STRING,
+ min(strlen(SCTP_VERSION_STRING), sizeof(stc.identification)));
+ /* now the chunk header */
+ initack->ch.chunk_type = SCTP_INITIATION_ACK;
+ initack->ch.chunk_flags = 0;
+ /* fill in later from mbuf we build */
+ initack->ch.chunk_length = 0;
+ /* place in my tag */
+ if ((asoc != NULL) &&
+ ((SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_WAIT) ||
+ (SCTP_GET_STATE(asoc) == SCTP_STATE_INUSE) ||
+ (SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_ECHOED))) {
+ /* re-use the v-tags and init-seq here */
+ initack->init.initiate_tag = htonl(asoc->my_vtag);
+ initack->init.initial_tsn = htonl(asoc->init_seq_number);
+ } else {
+ uint32_t vtag, itsn;
+
+ if (hold_inp_lock) {
+ SCTP_INP_INCR_REF(inp);
+ SCTP_INP_RUNLOCK(inp);
+ }
+ if (asoc) {
+ atomic_add_int(&asoc->refcnt, 1);
+ SCTP_TCB_UNLOCK(stcb);
+ new_tag:
+ vtag = sctp_select_a_tag(inp, inp->sctp_lport, sh->src_port, 1);
+ if ((asoc->peer_supports_nat) && (vtag == asoc->my_vtag)) {
+ /*
+ * Got a duplicate vtag on some guy behind a
+ * nat make sure we don't use it.
+ */
+ goto new_tag;
+ }
+ initack->init.initiate_tag = htonl(vtag);
+ /* get a TSN to use too */
+ itsn = sctp_select_initial_TSN(&inp->sctp_ep);
+ initack->init.initial_tsn = htonl(itsn);
+ SCTP_TCB_LOCK(stcb);
+ atomic_add_int(&asoc->refcnt, -1);
+ } else {
+ vtag = sctp_select_a_tag(inp, inp->sctp_lport, sh->src_port, 1);
+ initack->init.initiate_tag = htonl(vtag);
+ /* get a TSN to use too */
+ initack->init.initial_tsn = htonl(sctp_select_initial_TSN(&inp->sctp_ep));
+ }
+ if (hold_inp_lock) {
+ SCTP_INP_RLOCK(inp);
+ SCTP_INP_DECR_REF(inp);
+ }
+ }
+ /* save away my tag to */
+ stc.my_vtag = initack->init.initiate_tag;
+
+ /* set up some of the credits. */
+ so = inp->sctp_socket;
+ if (so == NULL) {
+ /* memory problem */
+ sctp_m_freem(m);
+ return;
+ } else {
+ initack->init.a_rwnd = htonl(max(SCTP_SB_LIMIT_RCV(so), SCTP_MINIMAL_RWND));
+ }
+ /* set what I want */
+ his_limit = ntohs(init_chk->init.num_inbound_streams);
+ /* choose what I want */
+ if (asoc != NULL) {
+ if (asoc->streamoutcnt > inp->sctp_ep.pre_open_stream_count) {
+ i_want = asoc->streamoutcnt;
+ } else {
+ i_want = inp->sctp_ep.pre_open_stream_count;
+ }
+ } else {
+ i_want = inp->sctp_ep.pre_open_stream_count;
+ }
+ if (his_limit < i_want) {
+ /* I Want more :< */
+ initack->init.num_outbound_streams = init_chk->init.num_inbound_streams;
+ } else {
+ /* I can have what I want :> */
+ initack->init.num_outbound_streams = htons(i_want);
+ }
+ /* tell him his limt. */
+ initack->init.num_inbound_streams =
+ htons(inp->sctp_ep.max_open_streams_intome);
+
+ /* adaptation layer indication parameter */
+ ali = (struct sctp_adaptation_layer_indication *)((caddr_t)initack + sizeof(*initack));
+ ali->ph.param_type = htons(SCTP_ULP_ADAPTATION);
+ ali->ph.param_length = htons(sizeof(*ali));
+ ali->indication = ntohl(inp->sctp_ep.adaptation_layer_indicator);
+ SCTP_BUF_LEN(m) += sizeof(*ali);
+ ecn = (struct sctp_ecn_supported_param *)((caddr_t)ali + sizeof(*ali));
+
+ /* ECN parameter */
+ if (SCTP_BASE_SYSCTL(sctp_ecn_enable) == 1) {
+ ecn->ph.param_type = htons(SCTP_ECN_CAPABLE);
+ ecn->ph.param_length = htons(sizeof(*ecn));
+ SCTP_BUF_LEN(m) += sizeof(*ecn);
+
+ prsctp = (struct sctp_prsctp_supported_param *)((caddr_t)ecn +
+ sizeof(*ecn));
+ } else {
+ prsctp = (struct sctp_prsctp_supported_param *)((caddr_t)ecn);
+ }
+ /* And now tell the peer we do pr-sctp */
+ prsctp->ph.param_type = htons(SCTP_PRSCTP_SUPPORTED);
+ prsctp->ph.param_length = htons(sizeof(*prsctp));
+ SCTP_BUF_LEN(m) += sizeof(*prsctp);
+ if (nat_friendly) {
+ /* Add NAT friendly parameter */
+ struct sctp_paramhdr *ph;
+
+ ph = (struct sctp_paramhdr *)(mtod(m, caddr_t)+SCTP_BUF_LEN(m));
+ ph->param_type = htons(SCTP_HAS_NAT_SUPPORT);
+ ph->param_length = htons(sizeof(struct sctp_paramhdr));
+ SCTP_BUF_LEN(m) += sizeof(struct sctp_paramhdr);
+ }
+ /* And now tell the peer we do all the extensions */
+ pr_supported = (struct sctp_supported_chunk_types_param *)(mtod(m, caddr_t)+SCTP_BUF_LEN(m));
+ pr_supported->ph.param_type = htons(SCTP_SUPPORTED_CHUNK_EXT);
+ num_ext = 0;
+ pr_supported->chunk_types[num_ext++] = SCTP_ASCONF;
+ pr_supported->chunk_types[num_ext++] = SCTP_ASCONF_ACK;
+ pr_supported->chunk_types[num_ext++] = SCTP_FORWARD_CUM_TSN;
+ pr_supported->chunk_types[num_ext++] = SCTP_PACKET_DROPPED;
+ pr_supported->chunk_types[num_ext++] = SCTP_STREAM_RESET;
+ if (!SCTP_BASE_SYSCTL(sctp_auth_disable))
+ pr_supported->chunk_types[num_ext++] = SCTP_AUTHENTICATION;
+ if (SCTP_BASE_SYSCTL(sctp_nr_sack_on_off))
+ pr_supported->chunk_types[num_ext++] = SCTP_NR_SELECTIVE_ACK;
+ p_len = sizeof(*pr_supported) + num_ext;
+ pr_supported->ph.param_length = htons(p_len);
+ bzero((caddr_t)pr_supported + p_len, SCTP_SIZE32(p_len) - p_len);
+ SCTP_BUF_LEN(m) += SCTP_SIZE32(p_len);
+
+ /* ECN nonce: And now tell the peer we support ECN nonce */
+ if (SCTP_BASE_SYSCTL(sctp_ecn_nonce)) {
+ ecn_nonce = (struct sctp_ecn_nonce_supported_param *)
+ ((caddr_t)pr_supported + SCTP_SIZE32(p_len));
+ ecn_nonce->ph.param_type = htons(SCTP_ECN_NONCE_SUPPORTED);
+ ecn_nonce->ph.param_length = htons(sizeof(*ecn_nonce));
+ SCTP_BUF_LEN(m) += sizeof(*ecn_nonce);
+ }
+ /* add authentication parameters */
+ if (!SCTP_BASE_SYSCTL(sctp_auth_disable)) {
+ struct sctp_auth_random *randp;
+ struct sctp_auth_hmac_algo *hmacs;
+ struct sctp_auth_chunk_list *chunks;
+ uint16_t random_len;
+
+ /* generate and add RANDOM parameter */
+ random_len = SCTP_AUTH_RANDOM_SIZE_DEFAULT;
+ randp = (struct sctp_auth_random *)(mtod(m, caddr_t)+SCTP_BUF_LEN(m));
+ randp->ph.param_type = htons(SCTP_RANDOM);
+ p_len = sizeof(*randp) + random_len;
+ randp->ph.param_length = htons(p_len);
+ SCTP_READ_RANDOM(randp->random_data, random_len);
+ /* zero out any padding required */
+ bzero((caddr_t)randp + p_len, SCTP_SIZE32(p_len) - p_len);
+ SCTP_BUF_LEN(m) += SCTP_SIZE32(p_len);
+
+ /* add HMAC_ALGO parameter */
+ hmacs = (struct sctp_auth_hmac_algo *)(mtod(m, caddr_t)+SCTP_BUF_LEN(m));
+ p_len = sctp_serialize_hmaclist(inp->sctp_ep.local_hmacs,
+ (uint8_t *) hmacs->hmac_ids);
+ if (p_len > 0) {
+ p_len += sizeof(*hmacs);
+ hmacs->ph.param_type = htons(SCTP_HMAC_LIST);
+ hmacs->ph.param_length = htons(p_len);
+ /* zero out any padding required */
+ bzero((caddr_t)hmacs + p_len, SCTP_SIZE32(p_len) - p_len);
+ SCTP_BUF_LEN(m) += SCTP_SIZE32(p_len);
+ }
+ /* add CHUNKS parameter */
+ chunks = (struct sctp_auth_chunk_list *)(mtod(m, caddr_t)+SCTP_BUF_LEN(m));
+ p_len = sctp_serialize_auth_chunks(inp->sctp_ep.local_auth_chunks,
+ chunks->chunk_types);
+ if (p_len > 0) {
+ p_len += sizeof(*chunks);
+ chunks->ph.param_type = htons(SCTP_CHUNK_LIST);
+ chunks->ph.param_length = htons(p_len);
+ /* zero out any padding required */
+ bzero((caddr_t)chunks + p_len, SCTP_SIZE32(p_len) - p_len);
+ SCTP_BUF_LEN(m) += SCTP_SIZE32(p_len);
+ }
+ }
+ m_at = m;
+ /* now the addresses */
+ {
+ struct sctp_scoping scp;
+
+ /*
+ * To optimize this we could put the scoping stuff into a
+ * structure and remove the individual uint8's from the stc
+ * structure. Then we could just sifa in the address within
+ * the stc.. but for now this is a quick hack to get the
+ * address stuff teased apart.
+ */
+ scp.ipv4_addr_legal = stc.ipv4_addr_legal;
+ scp.ipv6_addr_legal = stc.ipv6_addr_legal;
+ scp.loopback_scope = stc.loopback_scope;
+ scp.ipv4_local_scope = stc.ipv4_scope;
+ scp.local_scope = stc.local_scope;
+ scp.site_scope = stc.site_scope;
+ m_at = sctp_add_addresses_to_i_ia(inp, &scp, m_at, cnt_inits_to);
+ }
+
+ /* tack on the operational error if present */
+ if (op_err) {
+ struct mbuf *ol;
+ int llen;
+
+ llen = 0;
+ ol = op_err;
+ while (ol) {
+ llen += SCTP_BUF_LEN(ol);
+ ol = SCTP_BUF_NEXT(ol);
+ }
+ if (llen % 4) {
+ /* must add a pad to the param */
+ uint32_t cpthis = 0;
+ int padlen;
+
+ padlen = 4 - (llen % 4);
+ m_copyback(op_err, llen, padlen, (caddr_t)&cpthis);
+ }
+ while (SCTP_BUF_NEXT(m_at) != NULL) {
+ m_at = SCTP_BUF_NEXT(m_at);
+ }
+ SCTP_BUF_NEXT(m_at) = op_err;
+ while (SCTP_BUF_NEXT(m_at) != NULL) {
+ m_at = SCTP_BUF_NEXT(m_at);
+ }
+ }
+ /* pre-calulate the size and update pkt header and chunk header */
+ p_len = 0;
+ for (m_tmp = m; m_tmp; m_tmp = SCTP_BUF_NEXT(m_tmp)) {
+ p_len += SCTP_BUF_LEN(m_tmp);
+ if (SCTP_BUF_NEXT(m_tmp) == NULL) {
+ /* m_tmp should now point to last one */
+ break;
+ }
+ }
+
+ /* Now we must build a cookie */
+ m_cookie = sctp_add_cookie(inp, init_pkt, offset, m, 0, &stc, &signature);
+ if (m_cookie == NULL) {
+ /* memory problem */
+ sctp_m_freem(m);
+ return;
+ }
+ /* Now append the cookie to the end and update the space/size */
+ SCTP_BUF_NEXT(m_tmp) = m_cookie;
+
+ for (m_tmp = m_cookie; m_tmp; m_tmp = SCTP_BUF_NEXT(m_tmp)) {
+ p_len += SCTP_BUF_LEN(m_tmp);
+ if (SCTP_BUF_NEXT(m_tmp) == NULL) {
+ /* m_tmp should now point to last one */
+ mp_last = m_tmp;
+ break;
+ }
+ }
+ /*
+ * Place in the size, but we don't include the last pad (if any) in
+ * the INIT-ACK.
+ */
+ initack->ch.chunk_length = htons(p_len);
+
+ /*
+ * Time to sign the cookie, we don't sign over the cookie signature
+ * though thus we set trailer.
+ */
+ (void)sctp_hmac_m(SCTP_HMAC,
+ (uint8_t *) inp->sctp_ep.secret_key[(int)(inp->sctp_ep.current_secret_number)],
+ SCTP_SECRET_SIZE, m_cookie, sizeof(struct sctp_paramhdr),
+ (uint8_t *) signature, SCTP_SIGNATURE_SIZE);
+ /*
+ * We sifa 0 here to NOT set IP_DF if its IPv4, we ignore the return
+ * here since the timer will drive a retranmission.
+ */
+ padval = p_len % 4;
+ if ((padval) && (mp_last)) {
+ /* see my previous comments on mp_last */
+ int ret;
+
+ ret = sctp_add_pad_tombuf(mp_last, (4 - padval));
+ if (ret) {
+ /* Houston we have a problem, no space */
+ sctp_m_freem(m);
+ return;
+ }
+ p_len += padval;
+ }
+ if (stc.loopback_scope) {
+ over_addr = &store1;
+ } else {
+ over_addr = NULL;
+ }
+
+ (void)sctp_lowlevel_chunk_output(inp, NULL, NULL, to, m, 0, NULL, 0, 0,
+ 0, NULL, 0,
+ inp->sctp_lport, sh->src_port, init_chk->init.initiate_tag,
+ port, SCTP_SO_NOT_LOCKED, over_addr);
+ SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
+}
+
+
+void
+sctp_insert_on_wheel(struct sctp_tcb *stcb,
+ struct sctp_association *asoc,
+ struct sctp_stream_out *strq, int holds_lock)
+{
+ if (holds_lock == 0) {
+ SCTP_TCB_SEND_LOCK(stcb);
+ }
+ if ((strq->next_spoke.tqe_next == NULL) &&
+ (strq->next_spoke.tqe_prev == NULL)) {
+ TAILQ_INSERT_TAIL(&asoc->out_wheel, strq, next_spoke);
+ }
+ if (holds_lock == 0) {
+ SCTP_TCB_SEND_UNLOCK(stcb);
+ }
+}
+
+void
+sctp_remove_from_wheel(struct sctp_tcb *stcb,
+ struct sctp_association *asoc,
+ struct sctp_stream_out *strq,
+ int holds_lock)
+{
+ /* take off and then setup so we know it is not on the wheel */
+ if (holds_lock == 0) {
+ SCTP_TCB_SEND_LOCK(stcb);
+ }
+ if (TAILQ_EMPTY(&strq->outqueue)) {
+ if (asoc->last_out_stream == strq) {
+ asoc->last_out_stream = TAILQ_PREV(asoc->last_out_stream, sctpwheel_listhead, next_spoke);
+ if (asoc->last_out_stream == NULL) {
+ asoc->last_out_stream = TAILQ_LAST(&asoc->out_wheel, sctpwheel_listhead);
+ }
+ if (asoc->last_out_stream == strq) {
+ asoc->last_out_stream = NULL;
+ }
+ }
+ TAILQ_REMOVE(&asoc->out_wheel, strq, next_spoke);
+ strq->next_spoke.tqe_next = NULL;
+ strq->next_spoke.tqe_prev = NULL;
+ }
+ if (holds_lock == 0) {
+ SCTP_TCB_SEND_UNLOCK(stcb);
+ }
+}
+
+static void
+sctp_prune_prsctp(struct sctp_tcb *stcb,
+ struct sctp_association *asoc,
+ struct sctp_sndrcvinfo *srcv,
+ int dataout)
+{
+ int freed_spc = 0;
+ struct sctp_tmit_chunk *chk, *nchk;
+
+ SCTP_TCB_LOCK_ASSERT(stcb);
+ if ((asoc->peer_supports_prsctp) &&
+ (asoc->sent_queue_cnt_removeable > 0)) {
+ TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) {
+ /*
+ * Look for chunks marked with the PR_SCTP flag AND
+ * the buffer space flag. If the one being sent is
+ * equal or greater priority then purge the old one
+ * and free some space.
+ */
+ if (PR_SCTP_BUF_ENABLED(chk->flags)) {
+ /*
+ * This one is PR-SCTP AND buffer space
+ * limited type
+ */
+ if (chk->rec.data.timetodrop.tv_sec >= (long)srcv->sinfo_timetolive) {
+ /*
+ * Lower numbers equates to higher
+ * priority so if the one we are
+ * looking at has a larger or equal
+ * priority we want to drop the data
+ * and NOT retransmit it.
+ */
+ if (chk->data) {
+ /*
+ * We release the book_size
+ * if the mbuf is here
+ */
+ int ret_spc;
+ int cause;
+
+ if (chk->sent > SCTP_DATAGRAM_UNSENT)
+ cause = SCTP_RESPONSE_TO_USER_REQ | SCTP_NOTIFY_DATAGRAM_SENT;
+ else
+ cause = SCTP_RESPONSE_TO_USER_REQ | SCTP_NOTIFY_DATAGRAM_UNSENT;
+ ret_spc = sctp_release_pr_sctp_chunk(stcb, chk,
+ cause,
+ SCTP_SO_LOCKED);
+ freed_spc += ret_spc;
+ if (freed_spc >= dataout) {
+ return;
+ }
+ } /* if chunk was present */
+ } /* if of sufficent priority */
+ } /* if chunk has enabled */
+ } /* tailqforeach */
+
+ chk = TAILQ_FIRST(&asoc->send_queue);
+ while (chk) {
+ nchk = TAILQ_NEXT(chk, sctp_next);
+ /* Here we must move to the sent queue and mark */
+ if (PR_SCTP_BUF_ENABLED(chk->flags)) {
+ if (chk->rec.data.timetodrop.tv_sec >= (long)srcv->sinfo_timetolive) {
+ if (chk->data) {
+ /*
+ * We release the book_size
+ * if the mbuf is here
+ */
+ int ret_spc;
+
+ ret_spc = sctp_release_pr_sctp_chunk(stcb, chk,
+ SCTP_RESPONSE_TO_USER_REQ | SCTP_NOTIFY_DATAGRAM_UNSENT,
+ SCTP_SO_LOCKED);
+
+ freed_spc += ret_spc;
+ if (freed_spc >= dataout) {
+ return;
+ }
+ } /* end if chk->data */
+ } /* end if right class */
+ } /* end if chk pr-sctp */
+ chk = nchk;
+ } /* end while (chk) */
+ } /* if enabled in asoc */
+}
+
+int
+sctp_get_frag_point(struct sctp_tcb *stcb,
+ struct sctp_association *asoc)
+{
+ int siz, ovh;
+
+ /*
+ * For endpoints that have both v6 and v4 addresses we must reserve
+ * room for the ipv6 header, for those that are only dealing with V4
+ * we use a larger frag point.
+ */
+ if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
+ ovh = SCTP_MED_OVERHEAD;
+ } else {
+ ovh = SCTP_MED_V4_OVERHEAD;
+ }
+
+ if (stcb->asoc.sctp_frag_point > asoc->smallest_mtu)
+ siz = asoc->smallest_mtu - ovh;
+ else
+ siz = (stcb->asoc.sctp_frag_point - ovh);
+ /*
+ * if (siz > (MCLBYTES-sizeof(struct sctp_data_chunk))) {
+ */
+ /* A data chunk MUST fit in a cluster */
+ /* siz = (MCLBYTES - sizeof(struct sctp_data_chunk)); */
+ /* } */
+
+ /* adjust for an AUTH chunk if DATA requires auth */
+ if (sctp_auth_is_required_chunk(SCTP_DATA, stcb->asoc.peer_auth_chunks))
+ siz -= sctp_get_auth_chunk_len(stcb->asoc.peer_hmac_id);
+
+ if (siz % 4) {
+ /* make it an even word boundary please */
+ siz -= (siz % 4);
+ }
+ return (siz);
+}
+
+static void
+sctp_set_prsctp_policy(struct sctp_stream_queue_pending *sp)
+{
+ sp->pr_sctp_on = 0;
+ /*
+ * We assume that the user wants PR_SCTP_TTL if the user provides a
+ * positive lifetime but does not specify any PR_SCTP policy. This
+ * is a BAD assumption and causes problems at least with the
+ * U-Vancovers MPI folks. I will change this to be no policy means
+ * NO PR-SCTP.
+ */
+ if (PR_SCTP_ENABLED(sp->sinfo_flags)) {
+ sp->act_flags |= PR_SCTP_POLICY(sp->sinfo_flags);
+ sp->pr_sctp_on = 1;
+ } else {
+ return;
+ }
+ switch (PR_SCTP_POLICY(sp->sinfo_flags)) {
+ case CHUNK_FLAGS_PR_SCTP_BUF:
+ /*
+ * Time to live is a priority stored in tv_sec when doing
+ * the buffer drop thing.
+ */
+ sp->ts.tv_sec = sp->timetolive;
+ sp->ts.tv_usec = 0;
+ break;
+ case CHUNK_FLAGS_PR_SCTP_TTL:
+ {
+ struct timeval tv;
+
+ (void)SCTP_GETTIME_TIMEVAL(&sp->ts);
+ tv.tv_sec = sp->timetolive / 1000;
+ tv.tv_usec = (sp->timetolive * 1000) % 1000000;
+ /*
+ * TODO sctp_constants.h needs alternative time
+ * macros when _KERNEL is undefined.
+ */
+ timevaladd(&sp->ts, &tv);
+ }
+ break;
+ case CHUNK_FLAGS_PR_SCTP_RTX:
+ /*
+ * Time to live is a the number or retransmissions stored in
+ * tv_sec.
+ */
+ sp->ts.tv_sec = sp->timetolive;
+ sp->ts.tv_usec = 0;
+ break;
+ default:
+ SCTPDBG(SCTP_DEBUG_USRREQ1,
+ "Unknown PR_SCTP policy %u.\n",
+ PR_SCTP_POLICY(sp->sinfo_flags));
+ break;
+ }
+}
+
+static int
+sctp_msg_append(struct sctp_tcb *stcb,
+ struct sctp_nets *net,
+ struct mbuf *m,
+ struct sctp_sndrcvinfo *srcv, int hold_stcb_lock)
+{
+ int error = 0, holds_lock;
+ struct mbuf *at;
+ struct sctp_stream_queue_pending *sp = NULL;
+ struct sctp_stream_out *strm;
+
+ /*
+ * Given an mbuf chain, put it into the association send queue and
+ * place it on the wheel
+ */
+ holds_lock = hold_stcb_lock;
+ if (srcv->sinfo_stream >= stcb->asoc.streamoutcnt) {
+ /* Invalid stream number */
+ SCTP_LTRACE_ERR_RET_PKT(m, NULL, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
+ error = EINVAL;
+ goto out_now;
+ }
+ if ((stcb->asoc.stream_locked) &&
+ (stcb->asoc.stream_locked_on != srcv->sinfo_stream)) {
+ SCTP_LTRACE_ERR_RET_PKT(m, NULL, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
+ error = EINVAL;
+ goto out_now;
+ }
+ strm = &stcb->asoc.strmout[srcv->sinfo_stream];
+ /* Now can we send this? */
+ if ((SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_SENT) ||
+ (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_ACK_SENT) ||
+ (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_RECEIVED) ||
+ (stcb->asoc.state & SCTP_STATE_SHUTDOWN_PENDING)) {
+ /* got data while shutting down */
+ SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ECONNRESET);
+ error = ECONNRESET;
+ goto out_now;
+ }
+ sctp_alloc_a_strmoq(stcb, sp);
+ if (sp == NULL) {
+ SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
+ error = ENOMEM;
+ goto out_now;
+ }
+ sp->sinfo_flags = srcv->sinfo_flags;
+ sp->timetolive = srcv->sinfo_timetolive;
+ sp->ppid = srcv->sinfo_ppid;
+ sp->context = srcv->sinfo_context;
+ sp->strseq = 0;
+ if (sp->sinfo_flags & SCTP_ADDR_OVER) {
+ sp->net = net;
+ atomic_add_int(&sp->net->ref_count, 1);
+ } else {
+ sp->net = NULL;
+ }
+ (void)SCTP_GETTIME_TIMEVAL(&sp->ts);
+ sp->stream = srcv->sinfo_stream;
+ sp->msg_is_complete = 1;
+ sp->sender_all_done = 1;
+ sp->some_taken = 0;
+ sp->data = m;
+ sp->tail_mbuf = NULL;
+ sp->length = 0;
+ at = m;
+ sctp_set_prsctp_policy(sp);
+ /*
+ * We could in theory (for sendall) sifa the length in, but we would
+ * still have to hunt through the chain since we need to setup the
+ * tail_mbuf
+ */
+ while (at) {
+ if (SCTP_BUF_NEXT(at) == NULL)
+ sp->tail_mbuf = at;
+ sp->length += SCTP_BUF_LEN(at);
+ at = SCTP_BUF_NEXT(at);
+ }
+ SCTP_TCB_SEND_LOCK(stcb);
+ sctp_snd_sb_alloc(stcb, sp->length);
+ atomic_add_int(&stcb->asoc.stream_queue_cnt, 1);
+ TAILQ_INSERT_TAIL(&strm->outqueue, sp, next);
+ if ((srcv->sinfo_flags & SCTP_UNORDERED) == 0) {
+ sp->strseq = strm->next_sequence_sent;
+ strm->next_sequence_sent++;
+ }
+ if ((strm->next_spoke.tqe_next == NULL) &&
+ (strm->next_spoke.tqe_prev == NULL)) {
+ /* Not on wheel, insert */
+ sctp_insert_on_wheel(stcb, &stcb->asoc, strm, 1);
+ }
+ m = NULL;
+ SCTP_TCB_SEND_UNLOCK(stcb);
+out_now:
+ if (m) {
+ sctp_m_freem(m);
+ }
+ return (error);
+}
+
+
+static struct mbuf *
+sctp_copy_mbufchain(struct mbuf *clonechain,
+ struct mbuf *outchain,
+ struct mbuf **endofchain,
+ int can_take_mbuf,
+ int sizeofcpy,
+ uint8_t copy_by_ref)
+{
+ struct mbuf *m;
+ struct mbuf *appendchain;
+ caddr_t cp;
+ int len;
+
+ if (endofchain == NULL) {
+ /* error */
+error_out:
+ if (outchain)
+ sctp_m_freem(outchain);
+ return (NULL);
+ }
+ if (can_take_mbuf) {
+ appendchain = clonechain;
+ } else {
+ if (!copy_by_ref &&
+ (sizeofcpy <= (int)((((SCTP_BASE_SYSCTL(sctp_mbuf_threshold_count) - 1) * MLEN) + MHLEN)))
+ ) {
+ /* Its not in a cluster */
+ if (*endofchain == NULL) {
+ /* lets get a mbuf cluster */
+ if (outchain == NULL) {
+ /* This is the general case */
+ new_mbuf:
+ outchain = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_DONTWAIT, 1, MT_HEADER);
+ if (outchain == NULL) {
+ goto error_out;
+ }
+ SCTP_BUF_LEN(outchain) = 0;
+ *endofchain = outchain;
+ /* get the prepend space */
+ SCTP_BUF_RESV_UF(outchain, (SCTP_FIRST_MBUF_RESV + 4));
+ } else {
+ /*
+ * We really should not get a NULL
+ * in endofchain
+ */
+ /* find end */
+ m = outchain;
+ while (m) {
+ if (SCTP_BUF_NEXT(m) == NULL) {
+ *endofchain = m;
+ break;
+ }
+ m = SCTP_BUF_NEXT(m);
+ }
+ /* sanity */
+ if (*endofchain == NULL) {
+ /*
+ * huh, TSNH XXX maybe we
+ * should panic
+ */
+ sctp_m_freem(outchain);
+ goto new_mbuf;
+ }
+ }
+ /* get the new end of length */
+ len = M_TRAILINGSPACE(*endofchain);
+ } else {
+ /* how much is left at the end? */
+ len = M_TRAILINGSPACE(*endofchain);
+ }
+ /* Find the end of the data, for appending */
+ cp = (mtod((*endofchain), caddr_t)+SCTP_BUF_LEN((*endofchain)));
+
+ /* Now lets copy it out */
+ if (len >= sizeofcpy) {
+ /* It all fits, copy it in */
+ m_copydata(clonechain, 0, sizeofcpy, cp);
+ SCTP_BUF_LEN((*endofchain)) += sizeofcpy;
+ } else {
+ /* fill up the end of the chain */
+ if (len > 0) {
+ m_copydata(clonechain, 0, len, cp);
+ SCTP_BUF_LEN((*endofchain)) += len;
+ /* now we need another one */
+ sizeofcpy -= len;
+ }
+ m = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_DONTWAIT, 1, MT_HEADER);
+ if (m == NULL) {
+ /* We failed */
+ goto error_out;
+ }
+ SCTP_BUF_NEXT((*endofchain)) = m;
+ *endofchain = m;
+ cp = mtod((*endofchain), caddr_t);
+ m_copydata(clonechain, len, sizeofcpy, cp);
+ SCTP_BUF_LEN((*endofchain)) += sizeofcpy;
+ }
+ return (outchain);
+ } else {
+ /* copy the old fashion way */
+ appendchain = SCTP_M_COPYM(clonechain, 0, M_COPYALL, M_DONTWAIT);
+#ifdef SCTP_MBUF_LOGGING
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
+ struct mbuf *mat;
+
+ mat = appendchain;
+ while (mat) {
+ if (SCTP_BUF_IS_EXTENDED(mat)) {
+ sctp_log_mb(mat, SCTP_MBUF_ICOPY);
+ }
+ mat = SCTP_BUF_NEXT(mat);
+ }
+ }
+#endif
+ }
+ }
+ if (appendchain == NULL) {
+ /* error */
+ if (outchain)
+ sctp_m_freem(outchain);
+ return (NULL);
+ }
+ if (outchain) {
+ /* tack on to the end */
+ if (*endofchain != NULL) {
+ SCTP_BUF_NEXT(((*endofchain))) = appendchain;
+ } else {
+ m = outchain;
+ while (m) {
+ if (SCTP_BUF_NEXT(m) == NULL) {
+ SCTP_BUF_NEXT(m) = appendchain;
+ break;
+ }
+ m = SCTP_BUF_NEXT(m);
+ }
+ }
+ /*
+ * save off the end and update the end-chain postion
+ */
+ m = appendchain;
+ while (m) {
+ if (SCTP_BUF_NEXT(m) == NULL) {
+ *endofchain = m;
+ break;
+ }
+ m = SCTP_BUF_NEXT(m);
+ }
+ return (outchain);
+ } else {
+ /* save off the end and update the end-chain postion */
+ m = appendchain;
+ while (m) {
+ if (SCTP_BUF_NEXT(m) == NULL) {
+ *endofchain = m;
+ break;
+ }
+ m = SCTP_BUF_NEXT(m);
+ }
+ return (appendchain);
+ }
+}
+
+int
+sctp_med_chunk_output(struct sctp_inpcb *inp,
+ struct sctp_tcb *stcb,
+ struct sctp_association *asoc,
+ int *num_out,
+ int *reason_code,
+ int control_only, int from_where,
+ struct timeval *now, int *now_filled, int frag_point, int so_locked
+#if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
+ SCTP_UNUSED
+#endif
+);
+
+static void
+sctp_sendall_iterator(struct sctp_inpcb *inp, struct sctp_tcb *stcb, void *ptr,
+ uint32_t val)
+{
+ struct sctp_copy_all *ca;
+ struct mbuf *m;
+ int ret = 0;
+ int added_control = 0;
+ int un_sent, do_chunk_output = 1;
+ struct sctp_association *asoc;
+
+ ca = (struct sctp_copy_all *)ptr;
+ if (ca->m == NULL) {
+ return;
+ }
+ if (ca->inp != inp) {
+ /* TSNH */
+ return;
+ }
+ if ((ca->m) && ca->sndlen) {
+ m = SCTP_M_COPYM(ca->m, 0, M_COPYALL, M_DONTWAIT);
+ if (m == NULL) {
+ /* can't copy so we are done */
+ ca->cnt_failed++;
+ return;
+ }
+#ifdef SCTP_MBUF_LOGGING
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
+ struct mbuf *mat;
+
+ mat = m;
+ while (mat) {
+ if (SCTP_BUF_IS_EXTENDED(mat)) {
+ sctp_log_mb(mat, SCTP_MBUF_ICOPY);
+ }
+ mat = SCTP_BUF_NEXT(mat);
+ }
+ }
+#endif
+ } else {
+ m = NULL;
+ }
+ SCTP_TCB_LOCK_ASSERT(stcb);
+ if (ca->sndrcv.sinfo_flags & SCTP_ABORT) {
+ /* Abort this assoc with m as the user defined reason */
+ if (m) {
+ struct sctp_paramhdr *ph;
+
+ SCTP_BUF_PREPEND(m, sizeof(struct sctp_paramhdr), M_DONTWAIT);
+ if (m) {
+ ph = mtod(m, struct sctp_paramhdr *);
+ ph->param_type = htons(SCTP_CAUSE_USER_INITIATED_ABT);
+ ph->param_length = htons(ca->sndlen);
+ }
+ /*
+ * We add one here to keep the assoc from
+ * dis-appearing on us.
+ */
+ atomic_add_int(&stcb->asoc.refcnt, 1);
+ sctp_abort_an_association(inp, stcb,
+ SCTP_RESPONSE_TO_USER_REQ,
+ m, SCTP_SO_NOT_LOCKED);
+ /*
+ * sctp_abort_an_association calls sctp_free_asoc()
+ * free association will NOT free it since we
+ * incremented the refcnt .. we do this to prevent
+ * it being freed and things getting tricky since we
+ * could end up (from free_asoc) calling inpcb_free
+ * which would get a recursive lock call to the
+ * iterator lock.. But as a consequence of that the
+ * stcb will return to us un-locked.. since
+ * free_asoc returns with either no TCB or the TCB
+ * unlocked, we must relock.. to unlock in the
+ * iterator timer :-0
+ */
+ SCTP_TCB_LOCK(stcb);
+ atomic_add_int(&stcb->asoc.refcnt, -1);
+ goto no_chunk_output;
+ }
+ } else {
+ if (m) {
+ ret = sctp_msg_append(stcb, stcb->asoc.primary_destination, m,
+ &ca->sndrcv, 1);
+ }
+ asoc = &stcb->asoc;
+ if (ca->sndrcv.sinfo_flags & SCTP_EOF) {
+ /* shutdown this assoc */
+ int cnt;
+
+ cnt = sctp_is_there_unsent_data(stcb);
+
+ if (TAILQ_EMPTY(&asoc->send_queue) &&
+ TAILQ_EMPTY(&asoc->sent_queue) &&
+ (cnt == 0)) {
+ if (asoc->locked_on_sending) {
+ goto abort_anyway;
+ }
+ /*
+ * there is nothing queued to send, so I'm
+ * done...
+ */
+ if ((SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_SENT) &&
+ (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_RECEIVED) &&
+ (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_ACK_SENT)) {
+ /*
+ * only send SHUTDOWN the first time
+ * through
+ */
+ sctp_send_shutdown(stcb, stcb->asoc.primary_destination);
+ if (SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) {
+ SCTP_STAT_DECR_GAUGE32(sctps_currestab);
+ }
+ SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_SENT);
+ SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
+ sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN, stcb->sctp_ep, stcb,
+ asoc->primary_destination);
+ sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, stcb->sctp_ep, stcb,
+ asoc->primary_destination);
+ added_control = 1;
+ do_chunk_output = 0;
+ }
+ } else {
+ /*
+ * we still got (or just got) data to send,
+ * so set SHUTDOWN_PENDING
+ */
+ /*
+ * XXX sockets draft says that SCTP_EOF
+ * should be sent with no data. currently,
+ * we will allow user data to be sent first
+ * and move to SHUTDOWN-PENDING
+ */
+ if ((SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_SENT) &&
+ (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_RECEIVED) &&
+ (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_ACK_SENT)) {
+ if (asoc->locked_on_sending) {
+ /*
+ * Locked to send out the
+ * data
+ */
+ struct sctp_stream_queue_pending *sp;
+
+ sp = TAILQ_LAST(&asoc->locked_on_sending->outqueue, sctp_streamhead);
+ if (sp) {
+ if ((sp->length == 0) && (sp->msg_is_complete == 0))
+ asoc->state |= SCTP_STATE_PARTIAL_MSG_LEFT;
+ }
+ }
+ asoc->state |= SCTP_STATE_SHUTDOWN_PENDING;
+ if (TAILQ_EMPTY(&asoc->send_queue) &&
+ TAILQ_EMPTY(&asoc->sent_queue) &&
+ (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT)) {
+ abort_anyway:
+ atomic_add_int(&stcb->asoc.refcnt, 1);
+ sctp_abort_an_association(stcb->sctp_ep, stcb,
+ SCTP_RESPONSE_TO_USER_REQ,
+ NULL, SCTP_SO_NOT_LOCKED);
+ atomic_add_int(&stcb->asoc.refcnt, -1);
+ goto no_chunk_output;
+ }
+ sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, stcb->sctp_ep, stcb,
+ asoc->primary_destination);
+ }
+ }
+
+ }
+ }
+ un_sent = ((stcb->asoc.total_output_queue_size - stcb->asoc.total_flight) +
+ (stcb->asoc.stream_queue_cnt * sizeof(struct sctp_data_chunk)));
+
+ if ((sctp_is_feature_off(inp, SCTP_PCB_FLAGS_NODELAY)) &&
+ (stcb->asoc.total_flight > 0) &&
+ (un_sent < (int)(stcb->asoc.smallest_mtu - SCTP_MIN_OVERHEAD))
+ ) {
+ do_chunk_output = 0;
+ }
+ if (do_chunk_output)
+ sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_USR_SEND, SCTP_SO_NOT_LOCKED);
+ else if (added_control) {
+ int num_out = 0, reason = 0, now_filled = 0;
+ struct timeval now;
+ int frag_point;
+
+ frag_point = sctp_get_frag_point(stcb, &stcb->asoc);
+ (void)sctp_med_chunk_output(inp, stcb, &stcb->asoc, &num_out,
+ &reason, 1, 1, &now, &now_filled, frag_point, SCTP_SO_NOT_LOCKED);
+ }
+no_chunk_output:
+ if (ret) {
+ ca->cnt_failed++;
+ } else {
+ ca->cnt_sent++;
+ }
+}
+
+static void
+sctp_sendall_completes(void *ptr, uint32_t val)
+{
+ struct sctp_copy_all *ca;
+
+ ca = (struct sctp_copy_all *)ptr;
+ /*
+ * Do a notify here? Kacheong suggests that the notify be done at
+ * the send time.. so you would push up a notification if any send
+ * failed. Don't know if this is feasable since the only failures we
+ * have is "memory" related and if you cannot get an mbuf to send
+ * the data you surely can't get an mbuf to send up to notify the
+ * user you can't send the data :->
+ */
+
+ /* now free everything */
+ sctp_m_freem(ca->m);
+ SCTP_FREE(ca, SCTP_M_COPYAL);
+}
+
+
+#define MC_ALIGN(m, len) do { \
+ SCTP_BUF_RESV_UF(m, ((MCLBYTES - (len)) & ~(sizeof(long) - 1)); \
+} while (0)
+
+
+
+static struct mbuf *
+sctp_copy_out_all(struct uio *uio, int len)
+{
+ struct mbuf *ret, *at;
+ int left, willcpy, cancpy, error;
+
+ ret = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_WAIT, 1, MT_DATA);
+ if (ret == NULL) {
+ /* TSNH */
+ return (NULL);
+ }
+ left = len;
+ SCTP_BUF_LEN(ret) = 0;
+ /* save space for the data chunk header */
+ cancpy = M_TRAILINGSPACE(ret);
+ willcpy = min(cancpy, left);
+ at = ret;
+ while (left > 0) {
+ /* Align data to the end */
+ error = uiomove(mtod(at, caddr_t), willcpy, uio);
+ if (error) {
+ err_out_now:
+ sctp_m_freem(at);
+ return (NULL);
+ }
+ SCTP_BUF_LEN(at) = willcpy;
+ SCTP_BUF_NEXT_PKT(at) = SCTP_BUF_NEXT(at) = 0;
+ left -= willcpy;
+ if (left > 0) {
+ SCTP_BUF_NEXT(at) = sctp_get_mbuf_for_msg(left, 0, M_WAIT, 1, MT_DATA);
+ if (SCTP_BUF_NEXT(at) == NULL) {
+ goto err_out_now;
+ }
+ at = SCTP_BUF_NEXT(at);
+ SCTP_BUF_LEN(at) = 0;
+ cancpy = M_TRAILINGSPACE(at);
+ willcpy = min(cancpy, left);
+ }
+ }
+ return (ret);
+}
+
+static int
+sctp_sendall(struct sctp_inpcb *inp, struct uio *uio, struct mbuf *m,
+ struct sctp_sndrcvinfo *srcv)
+{
+ int ret;
+ struct sctp_copy_all *ca;
+
+ SCTP_MALLOC(ca, struct sctp_copy_all *, sizeof(struct sctp_copy_all),
+ SCTP_M_COPYAL);
+ if (ca == NULL) {
+ sctp_m_freem(m);
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
+ return (ENOMEM);
+ }
+ memset(ca, 0, sizeof(struct sctp_copy_all));
+
+ ca->inp = inp;
+ memcpy(&ca->sndrcv, srcv, sizeof(struct sctp_nonpad_sndrcvinfo));
+ /*
+ * take off the sendall flag, it would be bad if we failed to do
+ * this :-0
+ */
+ ca->sndrcv.sinfo_flags &= ~SCTP_SENDALL;
+ /* get length and mbuf chain */
+ if (uio) {
+ ca->sndlen = uio->uio_resid;
+ ca->m = sctp_copy_out_all(uio, ca->sndlen);
+ if (ca->m == NULL) {
+ SCTP_FREE(ca, SCTP_M_COPYAL);
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
+ return (ENOMEM);
+ }
+ } else {
+ /* Gather the length of the send */
+ struct mbuf *mat;
+
+ mat = m;
+ ca->sndlen = 0;
+ while (m) {
+ ca->sndlen += SCTP_BUF_LEN(m);
+ m = SCTP_BUF_NEXT(m);
+ }
+ ca->m = mat;
+ }
+ ret = sctp_initiate_iterator(NULL, sctp_sendall_iterator, NULL,
+ SCTP_PCB_ANY_FLAGS, SCTP_PCB_ANY_FEATURES,
+ SCTP_ASOC_ANY_STATE,
+ (void *)ca, 0,
+ sctp_sendall_completes, inp, 1);
+ if (ret) {
+ SCTP_PRINTF("Failed to initiate iterator for sendall\n");
+ SCTP_FREE(ca, SCTP_M_COPYAL);
+ SCTP_LTRACE_ERR_RET_PKT(m, inp, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, EFAULT);
+ return (EFAULT);
+ }
+ return (0);
+}
+
+
+void
+sctp_toss_old_cookies(struct sctp_tcb *stcb, struct sctp_association *asoc)
+{
+ struct sctp_tmit_chunk *chk, *nchk;
+
+ chk = TAILQ_FIRST(&asoc->control_send_queue);
+ while (chk) {
+ nchk = TAILQ_NEXT(chk, sctp_next);
+ if (chk->rec.chunk_id.id == SCTP_COOKIE_ECHO) {
+ TAILQ_REMOVE(&asoc->control_send_queue, chk, sctp_next);
+ if (chk->data) {
+ sctp_m_freem(chk->data);
+ chk->data = NULL;
+ }
+ asoc->ctrl_queue_cnt--;
+ sctp_free_a_chunk(stcb, chk);
+ }
+ chk = nchk;
+ }
+}
+
+void
+sctp_toss_old_asconf(struct sctp_tcb *stcb)
+{
+ struct sctp_association *asoc;
+ struct sctp_tmit_chunk *chk, *chk_tmp;
+ struct sctp_asconf_chunk *acp;
+
+ asoc = &stcb->asoc;
+ for (chk = TAILQ_FIRST(&asoc->asconf_send_queue); chk != NULL;
+ chk = chk_tmp) {
+ /* get next chk */
+ chk_tmp = TAILQ_NEXT(chk, sctp_next);
+ /* find SCTP_ASCONF chunk in queue */
+ if (chk->rec.chunk_id.id == SCTP_ASCONF) {
+ if (chk->data) {
+ acp = mtod(chk->data, struct sctp_asconf_chunk *);
+ if (compare_with_wrap(ntohl(acp->serial_number), stcb->asoc.asconf_seq_out_acked, MAX_SEQ)) {
+ /* Not Acked yet */
+ break;
+ }
+ }
+ TAILQ_REMOVE(&asoc->asconf_send_queue, chk, sctp_next);
+ if (chk->data) {
+ sctp_m_freem(chk->data);
+ chk->data = NULL;
+ }
+ asoc->ctrl_queue_cnt--;
+ sctp_free_a_chunk(stcb, chk);
+ }
+ }
+}
+
+
+static void
+sctp_clean_up_datalist(struct sctp_tcb *stcb,
+ struct sctp_association *asoc,
+ struct sctp_tmit_chunk **data_list,
+ int bundle_at,
+ struct sctp_nets *net)
+{
+ int i;
+ struct sctp_tmit_chunk *tp1;
+
+ for (i = 0; i < bundle_at; i++) {
+ /* off of the send queue */
+ TAILQ_REMOVE(&asoc->send_queue, data_list[i], sctp_next);
+ asoc->send_queue_cnt--;
+ if (i > 0) {
+ /*
+ * Any chunk NOT 0 you zap the time chunk 0 gets
+ * zapped or set based on if a RTO measurment is
+ * needed.
+ */
+ data_list[i]->do_rtt = 0;
+ }
+ /* record time */
+ data_list[i]->sent_rcv_time = net->last_sent_time;
+ data_list[i]->rec.data.fast_retran_tsn = data_list[i]->rec.data.TSN_seq;
+ if (data_list[i]->whoTo == NULL) {
+ data_list[i]->whoTo = net;
+ atomic_add_int(&net->ref_count, 1);
+ }
+ /* on to the sent queue */
+ tp1 = TAILQ_LAST(&asoc->sent_queue, sctpchunk_listhead);
+ if ((tp1) && (compare_with_wrap(tp1->rec.data.TSN_seq,
+ data_list[i]->rec.data.TSN_seq, MAX_TSN))) {
+ struct sctp_tmit_chunk *tpp;
+
+ /* need to move back */
+ back_up_more:
+ tpp = TAILQ_PREV(tp1, sctpchunk_listhead, sctp_next);
+ if (tpp == NULL) {
+ TAILQ_INSERT_BEFORE(tp1, data_list[i], sctp_next);
+ goto all_done;
+ }
+ tp1 = tpp;
+ if (compare_with_wrap(tp1->rec.data.TSN_seq,
+ data_list[i]->rec.data.TSN_seq, MAX_TSN)) {
+ goto back_up_more;
+ }
+ TAILQ_INSERT_AFTER(&asoc->sent_queue, tp1, data_list[i], sctp_next);
+ } else {
+ TAILQ_INSERT_TAIL(&asoc->sent_queue,
+ data_list[i],
+ sctp_next);
+ }
+all_done:
+ /* This does not lower until the cum-ack passes it */
+ asoc->sent_queue_cnt++;
+ if ((asoc->peers_rwnd <= 0) &&
+ (asoc->total_flight == 0) &&
+ (bundle_at == 1)) {
+ /* Mark the chunk as being a window probe */
+ SCTP_STAT_INCR(sctps_windowprobed);
+ }
+#ifdef SCTP_AUDITING_ENABLED
+ sctp_audit_log(0xC2, 3);
+#endif
+ data_list[i]->sent = SCTP_DATAGRAM_SENT;
+ data_list[i]->snd_count = 1;
+ data_list[i]->rec.data.chunk_was_revoked = 0;
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
+ sctp_misc_ints(SCTP_FLIGHT_LOG_UP,
+ data_list[i]->whoTo->flight_size,
+ data_list[i]->book_size,
+ (uintptr_t) data_list[i]->whoTo,
+ data_list[i]->rec.data.TSN_seq);
+ }
+ sctp_flight_size_increase(data_list[i]);
+ sctp_total_flight_increase(stcb, data_list[i]);
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
+ sctp_log_rwnd(SCTP_DECREASE_PEER_RWND,
+ asoc->peers_rwnd, data_list[i]->send_size, SCTP_BASE_SYSCTL(sctp_peer_chunk_oh));
+ }
+ asoc->peers_rwnd = sctp_sbspace_sub(asoc->peers_rwnd,
+ (uint32_t) (data_list[i]->send_size + SCTP_BASE_SYSCTL(sctp_peer_chunk_oh)));
+ if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
+ /* SWS sender side engages */
+ asoc->peers_rwnd = 0;
+ }
+ }
+}
+
+static void
+sctp_clean_up_ctl(struct sctp_tcb *stcb, struct sctp_association *asoc)
+{
+ struct sctp_tmit_chunk *chk, *nchk;
+
+ for (chk = TAILQ_FIRST(&asoc->control_send_queue);
+ chk; chk = nchk) {
+ nchk = TAILQ_NEXT(chk, sctp_next);
+ if ((chk->rec.chunk_id.id == SCTP_SELECTIVE_ACK) ||
+ (chk->rec.chunk_id.id == SCTP_NR_SELECTIVE_ACK) || /* EY */
+ (chk->rec.chunk_id.id == SCTP_HEARTBEAT_REQUEST) ||
+ (chk->rec.chunk_id.id == SCTP_HEARTBEAT_ACK) ||
+ (chk->rec.chunk_id.id == SCTP_FORWARD_CUM_TSN) ||
+ (chk->rec.chunk_id.id == SCTP_SHUTDOWN) ||
+ (chk->rec.chunk_id.id == SCTP_SHUTDOWN_ACK) ||
+ (chk->rec.chunk_id.id == SCTP_OPERATION_ERROR) ||
+ (chk->rec.chunk_id.id == SCTP_PACKET_DROPPED) ||
+ (chk->rec.chunk_id.id == SCTP_COOKIE_ACK) ||
+ (chk->rec.chunk_id.id == SCTP_ECN_CWR) ||
+ (chk->rec.chunk_id.id == SCTP_ASCONF_ACK)) {
+ /* Stray chunks must be cleaned up */
+ clean_up_anyway:
+ TAILQ_REMOVE(&asoc->control_send_queue, chk, sctp_next);
+ if (chk->data) {
+ sctp_m_freem(chk->data);
+ chk->data = NULL;
+ }
+ asoc->ctrl_queue_cnt--;
+ if (chk->rec.chunk_id.id == SCTP_FORWARD_CUM_TSN)
+ asoc->fwd_tsn_cnt--;
+ sctp_free_a_chunk(stcb, chk);
+ } else if (chk->rec.chunk_id.id == SCTP_STREAM_RESET) {
+ /* special handling, we must look into the param */
+ if (chk != asoc->str_reset) {
+ goto clean_up_anyway;
+ }
+ }
+ }
+}
+
+
+static int
+sctp_can_we_split_this(struct sctp_tcb *stcb,
+ uint32_t length,
+ uint32_t goal_mtu, uint32_t frag_point, int eeor_on)
+{
+ /*
+ * Make a decision on if I should split a msg into multiple parts.
+ * This is only asked of incomplete messages.
+ */
+ if (eeor_on) {
+ /*
+ * If we are doing EEOR we need to always send it if its the
+ * entire thing, since it might be all the guy is putting in
+ * the hopper.
+ */
+ if (goal_mtu >= length) {
+ /*-
+ * If we have data outstanding,
+ * we get another chance when the sack
+ * arrives to transmit - wait for more data
+ */
+ if (stcb->asoc.total_flight == 0) {
+ /*
+ * If nothing is in flight, we zero the
+ * packet counter.
+ */
+ return (length);
+ }
+ return (0);
+
+ } else {
+ /* You can fill the rest */
+ return (goal_mtu);
+ }
+ }
+ /*-
+ * For those strange folk that make the send buffer
+ * smaller than our fragmentation point, we can't
+ * get a full msg in so we have to allow splitting.
+ */
+ if (SCTP_SB_LIMIT_SND(stcb->sctp_socket) < frag_point) {
+ return (length);
+ }
+ if ((length <= goal_mtu) ||
+ ((length - goal_mtu) < SCTP_BASE_SYSCTL(sctp_min_residual))) {
+ /* Sub-optimial residual don't split in non-eeor mode. */
+ return (0);
+ }
+ /*
+ * If we reach here length is larger than the goal_mtu. Do we wish
+ * to split it for the sake of packet putting together?
+ */
+ if (goal_mtu >= min(SCTP_BASE_SYSCTL(sctp_min_split_point), frag_point)) {
+ /* Its ok to split it */
+ return (min(goal_mtu, frag_point));
+ }
+ /* Nope, can't split */
+ return (0);
+
+}
+
+static uint32_t
+sctp_move_to_outqueue(struct sctp_tcb *stcb,
+ struct sctp_stream_out *strq,
+ uint32_t goal_mtu,
+ uint32_t frag_point,
+ int *locked,
+ int *giveup,
+ int eeor_mode,
+ int *bail)
+{
+ /* Move from the stream to the send_queue keeping track of the total */
+ struct sctp_association *asoc;
+ struct sctp_stream_queue_pending *sp;
+ struct sctp_tmit_chunk *chk;
+ struct sctp_data_chunk *dchkh;
+ uint32_t to_move, length;
+ uint8_t rcv_flags = 0;
+ uint8_t some_taken;
+ uint8_t send_lock_up = 0;
+
+ SCTP_TCB_LOCK_ASSERT(stcb);
+ asoc = &stcb->asoc;
+one_more_time:
+ /* sa_ignore FREED_MEMORY */
+ sp = TAILQ_FIRST(&strq->outqueue);
+ if (sp == NULL) {
+ *locked = 0;
+ if (send_lock_up == 0) {
+ SCTP_TCB_SEND_LOCK(stcb);
+ send_lock_up = 1;
+ }
+ sp = TAILQ_FIRST(&strq->outqueue);
+ if (sp) {
+ goto one_more_time;
+ }
+ if (strq->last_msg_incomplete) {
+ SCTP_PRINTF("Huh? Stream:%d lm_in_c=%d but queue is NULL\n",
+ strq->stream_no,
+ strq->last_msg_incomplete);
+ strq->last_msg_incomplete = 0;
+ }
+ to_move = 0;
+ if (send_lock_up) {
+ SCTP_TCB_SEND_UNLOCK(stcb);
+ send_lock_up = 0;
+ }
+ goto out_of;
+ }
+ if ((sp->msg_is_complete) && (sp->length == 0)) {
+ if (sp->sender_all_done) {
+ /*
+ * We are doing differed cleanup. Last time through
+ * when we took all the data the sender_all_done was
+ * not set.
+ */
+ if ((sp->put_last_out == 0) && (sp->discard_rest == 0)) {
+ SCTP_PRINTF("Gak, put out entire msg with NO end!-1\n");
+ SCTP_PRINTF("sender_done:%d len:%d msg_comp:%d put_last_out:%d send_lock:%d\n",
+ sp->sender_all_done,
+ sp->length,
+ sp->msg_is_complete,
+ sp->put_last_out,
+ send_lock_up);
+ }
+ if ((TAILQ_NEXT(sp, next) == NULL) && (send_lock_up == 0)) {
+ SCTP_TCB_SEND_LOCK(stcb);
+ send_lock_up = 1;
+ }
+ atomic_subtract_int(&asoc->stream_queue_cnt, 1);
+ TAILQ_REMOVE(&strq->outqueue, sp, next);
+ if (sp->net) {
+ sctp_free_remote_addr(sp->net);
+ sp->net = NULL;
+ }
+ if (sp->data) {
+ sctp_m_freem(sp->data);
+ sp->data = NULL;
+ }
+ sctp_free_a_strmoq(stcb, sp);
+ /* we can't be locked to it */
+ *locked = 0;
+ stcb->asoc.locked_on_sending = NULL;
+ if (send_lock_up) {
+ SCTP_TCB_SEND_UNLOCK(stcb);
+ send_lock_up = 0;
+ }
+ /* back to get the next msg */
+ goto one_more_time;
+ } else {
+ /*
+ * sender just finished this but still holds a
+ * reference
+ */
+ *locked = 1;
+ *giveup = 1;
+ to_move = 0;
+ goto out_of;
+ }
+ } else {
+ /* is there some to get */
+ if (sp->length == 0) {
+ /* no */
+ *locked = 1;
+ *giveup = 1;
+ to_move = 0;
+ goto out_of;
+ } else if (sp->discard_rest) {
+ if (send_lock_up == 0) {
+ SCTP_TCB_SEND_LOCK(stcb);
+ send_lock_up = 1;
+ }
+ /* Whack down the size */
+ atomic_subtract_int(&stcb->asoc.total_output_queue_size, sp->length);
+ if ((stcb->sctp_socket != NULL) && \
+ ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
+ (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL))) {
+ atomic_subtract_int(&stcb->sctp_socket->so_snd.sb_cc, sp->length);
+ }
+ if (sp->data) {
+ sctp_m_freem(sp->data);
+ sp->data = NULL;
+ sp->tail_mbuf = NULL;
+ }
+ sp->length = 0;
+ sp->some_taken = 1;
+ *locked = 1;
+ *giveup = 1;
+ to_move = 0;
+ goto out_of;
+ }
+ }
+ some_taken = sp->some_taken;
+ if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
+ sp->msg_is_complete = 1;
+ }
+re_look:
+ length = sp->length;
+ if (sp->msg_is_complete) {
+ /* The message is complete */
+ to_move = min(length, frag_point);
+ if (to_move == length) {
+ /* All of it fits in the MTU */
+ if (sp->some_taken) {
+ rcv_flags |= SCTP_DATA_LAST_FRAG;
+ sp->put_last_out = 1;
+ } else {
+ rcv_flags |= SCTP_DATA_NOT_FRAG;
+ sp->put_last_out = 1;
+ }
+ } else {
+ /* Not all of it fits, we fragment */
+ if (sp->some_taken == 0) {
+ rcv_flags |= SCTP_DATA_FIRST_FRAG;
+ }
+ sp->some_taken = 1;
+ }
+ } else {
+ to_move = sctp_can_we_split_this(stcb, length, goal_mtu, frag_point, eeor_mode);
+ if (to_move) {
+ /*-
+ * We use a snapshot of length in case it
+ * is expanding during the compare.
+ */
+ uint32_t llen;
+
+ llen = length;
+ if (to_move >= llen) {
+ to_move = llen;
+ if (send_lock_up == 0) {
+ /*-
+ * We are taking all of an incomplete msg
+ * thus we need a send lock.
+ */
+ SCTP_TCB_SEND_LOCK(stcb);
+ send_lock_up = 1;
+ if (sp->msg_is_complete) {
+ /*
+ * the sender finished the
+ * msg
+ */
+ goto re_look;
+ }
+ }
+ }
+ if (sp->some_taken == 0) {
+ rcv_flags |= SCTP_DATA_FIRST_FRAG;
+ sp->some_taken = 1;
+ }
+ } else {
+ /* Nothing to take. */
+ if (sp->some_taken) {
+ *locked = 1;
+ }
+ *giveup = 1;
+ to_move = 0;
+ goto out_of;
+ }
+ }
+
+ /* If we reach here, we can copy out a chunk */
+ sctp_alloc_a_chunk(stcb, chk);
+ if (chk == NULL) {
+ /* No chunk memory */
+ *giveup = 1;
+ to_move = 0;
+ goto out_of;
+ }
+ /*
+ * Setup for unordered if needed by looking at the user sent info
+ * flags.
+ */
+ if (sp->sinfo_flags & SCTP_UNORDERED) {
+ rcv_flags |= SCTP_DATA_UNORDERED;
+ }
+ if ((SCTP_BASE_SYSCTL(sctp_enable_sack_immediately) && ((sp->sinfo_flags & SCTP_EOF) == SCTP_EOF)) ||
+ ((sp->sinfo_flags & SCTP_SACK_IMMEDIATELY) == SCTP_SACK_IMMEDIATELY)) {
+ rcv_flags |= SCTP_DATA_SACK_IMMEDIATELY;
+ }
+ /* clear out the chunk before setting up */
+ memset(chk, 0, sizeof(*chk));
+ chk->rec.data.rcv_flags = rcv_flags;
+
+ if (to_move >= length) {
+ /* we think we can steal the whole thing */
+ if ((sp->sender_all_done == 0) && (send_lock_up == 0)) {
+ SCTP_TCB_SEND_LOCK(stcb);
+ send_lock_up = 1;
+ }
+ if (to_move < sp->length) {
+ /* bail, it changed */
+ goto dont_do_it;
+ }
+ chk->data = sp->data;
+ chk->last_mbuf = sp->tail_mbuf;
+ /* register the stealing */
+ sp->data = sp->tail_mbuf = NULL;
+ } else {
+ struct mbuf *m;
+
+dont_do_it:
+ chk->data = SCTP_M_COPYM(sp->data, 0, to_move, M_DONTWAIT);
+ chk->last_mbuf = NULL;
+ if (chk->data == NULL) {
+ sp->some_taken = some_taken;
+ sctp_free_a_chunk(stcb, chk);
+ *bail = 1;
+ to_move = 0;
+ goto out_of;
+ }
+#ifdef SCTP_MBUF_LOGGING
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
+ struct mbuf *mat;
+
+ mat = chk->data;
+ while (mat) {
+ if (SCTP_BUF_IS_EXTENDED(mat)) {
+ sctp_log_mb(mat, SCTP_MBUF_ICOPY);
+ }
+ mat = SCTP_BUF_NEXT(mat);
+ }
+ }
+#endif
+ /* Pull off the data */
+ m_adj(sp->data, to_move);
+ /* Now lets work our way down and compact it */
+ m = sp->data;
+ while (m && (SCTP_BUF_LEN(m) == 0)) {
+ sp->data = SCTP_BUF_NEXT(m);
+ SCTP_BUF_NEXT(m) = NULL;
+ if (sp->tail_mbuf == m) {
+ /*-
+ * Freeing tail? TSNH since
+ * we supposedly were taking less
+ * than the sp->length.
+ */
+#ifdef INVARIANTS
+ panic("Huh, freing tail? - TSNH");
+#else
+ SCTP_PRINTF("Huh, freeing tail? - TSNH\n");
+ sp->tail_mbuf = sp->data = NULL;
+ sp->length = 0;
+#endif
+
+ }
+ sctp_m_free(m);
+ m = sp->data;
+ }
+ }
+ if (SCTP_BUF_IS_EXTENDED(chk->data)) {
+ chk->copy_by_ref = 1;
+ } else {
+ chk->copy_by_ref = 0;
+ }
+ /*
+ * get last_mbuf and counts of mb useage This is ugly but hopefully
+ * its only one mbuf.
+ */
+ if (chk->last_mbuf == NULL) {
+ chk->last_mbuf = chk->data;
+ while (SCTP_BUF_NEXT(chk->last_mbuf) != NULL) {
+ chk->last_mbuf = SCTP_BUF_NEXT(chk->last_mbuf);
+ }
+ }
+ if (to_move > length) {
+ /*- This should not happen either
+ * since we always lower to_move to the size
+ * of sp->length if its larger.
+ */
+#ifdef INVARIANTS
+ panic("Huh, how can to_move be larger?");
+#else
+ SCTP_PRINTF("Huh, how can to_move be larger?\n");
+ sp->length = 0;
+#endif
+ } else {
+ atomic_subtract_int(&sp->length, to_move);
+ }
+ if (M_LEADINGSPACE(chk->data) < (int)sizeof(struct sctp_data_chunk)) {
+ /* Not enough room for a chunk header, get some */
+ struct mbuf *m;
+
+ m = sctp_get_mbuf_for_msg(1, 0, M_DONTWAIT, 0, MT_DATA);
+ if (m == NULL) {
+ /*
+ * we're in trouble here. _PREPEND below will free
+ * all the data if there is no leading space, so we
+ * must put the data back and restore.
+ */
+ if (send_lock_up == 0) {
+ SCTP_TCB_SEND_LOCK(stcb);
+ send_lock_up = 1;
+ }
+ if (chk->data == NULL) {
+ /* unsteal the data */
+ sp->data = chk->data;
+ sp->tail_mbuf = chk->last_mbuf;
+ } else {
+ struct mbuf *m_tmp;
+
+ /* reassemble the data */
+ m_tmp = sp->data;
+ sp->data = chk->data;
+ SCTP_BUF_NEXT(chk->last_mbuf) = m_tmp;
+ }
+ sp->some_taken = some_taken;
+ atomic_add_int(&sp->length, to_move);
+ chk->data = NULL;
+ *bail = 1;
+ sctp_free_a_chunk(stcb, chk);
+ to_move = 0;
+ goto out_of;
+ } else {
+ SCTP_BUF_LEN(m) = 0;
+ SCTP_BUF_NEXT(m) = chk->data;
+ chk->data = m;
+ M_ALIGN(chk->data, 4);
+ }
+ }
+ SCTP_BUF_PREPEND(chk->data, sizeof(struct sctp_data_chunk), M_DONTWAIT);
+ if (chk->data == NULL) {
+ /* HELP, TSNH since we assured it would not above? */
+#ifdef INVARIANTS
+ panic("prepend failes HELP?");
+#else
+ SCTP_PRINTF("prepend fails HELP?\n");
+ sctp_free_a_chunk(stcb, chk);
+#endif
+ *bail = 1;
+ to_move = 0;
+ goto out_of;
+ }
+ sctp_snd_sb_alloc(stcb, sizeof(struct sctp_data_chunk));
+ chk->book_size = chk->send_size = (to_move + sizeof(struct sctp_data_chunk));
+ chk->book_size_scale = 0;
+ chk->sent = SCTP_DATAGRAM_UNSENT;
+
+ chk->flags = 0;
+ chk->asoc = &stcb->asoc;
+ chk->pad_inplace = 0;
+ chk->no_fr_allowed = 0;
+ chk->rec.data.stream_seq = sp->strseq;
+ chk->rec.data.stream_number = sp->stream;
+ chk->rec.data.payloadtype = sp->ppid;
+ chk->rec.data.context = sp->context;
+ chk->rec.data.doing_fast_retransmit = 0;
+ chk->rec.data.ect_nonce = 0; /* ECN Nonce */
+
+ chk->rec.data.timetodrop = sp->ts;
+ chk->flags = sp->act_flags;
+
+ if (sp->net) {
+ chk->whoTo = sp->net;
+ atomic_add_int(&chk->whoTo->ref_count, 1);
+ } else
+ chk->whoTo = NULL;
+
+ if (sp->holds_key_ref) {
+ chk->auth_keyid = sp->auth_keyid;
+ sctp_auth_key_acquire(stcb, chk->auth_keyid);
+ chk->holds_key_ref = 1;
+ }
+ chk->rec.data.TSN_seq = atomic_fetchadd_int(&asoc->sending_seq, 1);
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_AT_SEND_2_OUTQ) {
+ sctp_misc_ints(SCTP_STRMOUT_LOG_SEND,
+ (uintptr_t) stcb, sp->length,
+ (uint32_t) ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq),
+ chk->rec.data.TSN_seq);
+ }
+ dchkh = mtod(chk->data, struct sctp_data_chunk *);
+ /*
+ * Put the rest of the things in place now. Size was done earlier in
+ * previous loop prior to padding.
+ */
+
+#ifdef SCTP_ASOCLOG_OF_TSNS
+ SCTP_TCB_LOCK_ASSERT(stcb);
+ if (asoc->tsn_out_at >= SCTP_TSN_LOG_SIZE) {
+ asoc->tsn_out_at = 0;
+ asoc->tsn_out_wrapped = 1;
+ }
+ asoc->out_tsnlog[asoc->tsn_out_at].tsn = chk->rec.data.TSN_seq;
+ asoc->out_tsnlog[asoc->tsn_out_at].strm = chk->rec.data.stream_number;
+ asoc->out_tsnlog[asoc->tsn_out_at].seq = chk->rec.data.stream_seq;
+ asoc->out_tsnlog[asoc->tsn_out_at].sz = chk->send_size;
+ asoc->out_tsnlog[asoc->tsn_out_at].flgs = chk->rec.data.rcv_flags;
+ asoc->out_tsnlog[asoc->tsn_out_at].stcb = (void *)stcb;
+ asoc->out_tsnlog[asoc->tsn_out_at].in_pos = asoc->tsn_out_at;
+ asoc->out_tsnlog[asoc->tsn_out_at].in_out = 2;
+ asoc->tsn_out_at++;
+#endif
+
+ dchkh->ch.chunk_type = SCTP_DATA;
+ dchkh->ch.chunk_flags = chk->rec.data.rcv_flags;
+ dchkh->dp.tsn = htonl(chk->rec.data.TSN_seq);
+ dchkh->dp.stream_id = htons(strq->stream_no);
+ dchkh->dp.stream_sequence = htons(chk->rec.data.stream_seq);
+ dchkh->dp.protocol_id = chk->rec.data.payloadtype;
+ dchkh->ch.chunk_length = htons(chk->send_size);
+ /* Now advance the chk->send_size by the actual pad needed. */
+ if (chk->send_size < SCTP_SIZE32(chk->book_size)) {
+ /* need a pad */
+ struct mbuf *lm;
+ int pads;
+
+ pads = SCTP_SIZE32(chk->book_size) - chk->send_size;
+ if (sctp_pad_lastmbuf(chk->data, pads, chk->last_mbuf) == 0) {
+ chk->pad_inplace = 1;
+ }
+ if ((lm = SCTP_BUF_NEXT(chk->last_mbuf)) != NULL) {
+ /* pad added an mbuf */
+ chk->last_mbuf = lm;
+ }
+ chk->send_size += pads;
+ }
+ /* We only re-set the policy if it is on */
+ if (sp->pr_sctp_on) {
+ sctp_set_prsctp_policy(sp);
+ asoc->pr_sctp_cnt++;
+ chk->pr_sctp_on = 1;
+ } else {
+ chk->pr_sctp_on = 0;
+ }
+ if (sp->msg_is_complete && (sp->length == 0) && (sp->sender_all_done)) {
+ /* All done pull and kill the message */
+ atomic_subtract_int(&asoc->stream_queue_cnt, 1);
+ if (sp->put_last_out == 0) {
+ SCTP_PRINTF("Gak, put out entire msg with NO end!-2\n");
+ SCTP_PRINTF("sender_done:%d len:%d msg_comp:%d put_last_out:%d send_lock:%d\n",
+ sp->sender_all_done,
+ sp->length,
+ sp->msg_is_complete,
+ sp->put_last_out,
+ send_lock_up);
+ }
+ if ((send_lock_up == 0) && (TAILQ_NEXT(sp, next) == NULL)) {
+ SCTP_TCB_SEND_LOCK(stcb);
+ send_lock_up = 1;
+ }
+ TAILQ_REMOVE(&strq->outqueue, sp, next);
+ if (sp->net) {
+ sctp_free_remote_addr(sp->net);
+ sp->net = NULL;
+ }
+ if (sp->data) {
+ sctp_m_freem(sp->data);
+ sp->data = NULL;
+ }
+ sctp_free_a_strmoq(stcb, sp);
+
+ /* we can't be locked to it */
+ *locked = 0;
+ stcb->asoc.locked_on_sending = NULL;
+ } else {
+ /* more to go, we are locked */
+ *locked = 1;
+ }
+ asoc->chunks_on_out_queue++;
+ TAILQ_INSERT_TAIL(&asoc->send_queue, chk, sctp_next);
+ asoc->send_queue_cnt++;
+out_of:
+ if (send_lock_up) {
+ SCTP_TCB_SEND_UNLOCK(stcb);
+ send_lock_up = 0;
+ }
+ return (to_move);
+}
+
+
+static struct sctp_stream_out *
+sctp_select_a_stream(struct sctp_tcb *stcb, struct sctp_association *asoc)
+{
+ struct sctp_stream_out *strq;
+
+ /* Find the next stream to use */
+ if (asoc->last_out_stream == NULL) {
+ strq = TAILQ_FIRST(&asoc->out_wheel);
+ } else {
+ strq = TAILQ_NEXT(asoc->last_out_stream, next_spoke);
+ if (strq == NULL) {
+ strq = TAILQ_FIRST(&asoc->out_wheel);
+ }
+ }
+ return (strq);
+}
+
+
+static void
+sctp_fill_outqueue(struct sctp_tcb *stcb,
+ struct sctp_nets *net, int frag_point, int eeor_mode, int *quit_now)
+{
+ struct sctp_association *asoc;
+ struct sctp_stream_out *strq, *strqn;
+ int goal_mtu, moved_how_much, total_moved = 0, bail = 0;
+ int locked, giveup;
+ struct sctp_stream_queue_pending *sp;
+
+ SCTP_TCB_LOCK_ASSERT(stcb);
+ asoc = &stcb->asoc;
+#ifdef INET6
+ if (net->ro._l_addr.sin6.sin6_family == AF_INET6) {
+ goal_mtu = net->mtu - SCTP_MIN_OVERHEAD;
+ } else {
+ /* ?? not sure what else to do */
+ goal_mtu = net->mtu - SCTP_MIN_V4_OVERHEAD;
+ }
+#else
+ goal_mtu = net->mtu - SCTP_MIN_OVERHEAD;
+#endif
+ /* Need an allowance for the data chunk header too */
+ goal_mtu -= sizeof(struct sctp_data_chunk);
+
+ /* must make even word boundary */
+ goal_mtu &= 0xfffffffc;
+ if (asoc->locked_on_sending) {
+ /* We are stuck on one stream until the message completes. */
+ strq = asoc->locked_on_sending;
+ locked = 1;
+ } else {
+ strq = sctp_select_a_stream(stcb, asoc);
+ locked = 0;
+ }
+ strqn = strq;
+ while ((goal_mtu > 0) && strq) {
+ sp = TAILQ_FIRST(&strq->outqueue);
+ if (sp == NULL) {
+ break;
+ }
+ /**
+ * Honor the users' choice if given. If not given,
+ * pull it only to the primary path in case of not using
+ * CMT.
+ */
+ if (((sp->net != NULL) &&
+ (sp->net != net)) ||
+ ((sp->net == NULL) &&
+ (asoc->sctp_cmt_on_off == 0) &&
+ (asoc->primary_destination != net))) {
+ /* Do not pull to this network */
+ if (locked) {
+ break;
+ } else {
+ strq = sctp_select_a_stream(stcb, asoc);
+ if (strq == NULL)
+ /* none left */
+ break;
+ if (strqn == strq) {
+ /* I have circled */
+ break;
+ }
+ continue;
+ }
+ }
+ giveup = 0;
+ bail = 0;
+ moved_how_much = sctp_move_to_outqueue(stcb, strq, goal_mtu, frag_point, &locked,
+ &giveup, eeor_mode, &bail);
+ if (moved_how_much)
+ asoc->last_out_stream = strq;
+
+ if (locked) {
+ asoc->locked_on_sending = strq;
+ if ((moved_how_much == 0) || (giveup) || bail)
+ /* no more to move for now */
+ break;
+ } else {
+ asoc->locked_on_sending = NULL;
+ if (TAILQ_EMPTY(&strq->outqueue)) {
+ if (strq == strqn) {
+ /* Must move start to next one */
+ strqn = TAILQ_NEXT(strq, next_spoke);
+ if (strqn == NULL) {
+ strqn = TAILQ_FIRST(&asoc->out_wheel);
+ if (strqn == NULL) {
+ break;
+ }
+ }
+ }
+ sctp_remove_from_wheel(stcb, asoc, strq, 0);
+ }
+ if ((giveup) || bail) {
+ break;
+ }
+ strq = sctp_select_a_stream(stcb, asoc);
+ if (strq == NULL) {
+ break;
+ }
+ }
+ total_moved += moved_how_much;
+ goal_mtu -= (moved_how_much + sizeof(struct sctp_data_chunk));
+ goal_mtu &= 0xfffffffc;
+ }
+ if (bail)
+ *quit_now = 1;
+
+ if (total_moved == 0) {
+ if ((stcb->asoc.sctp_cmt_on_off == 0) &&
+ (net == stcb->asoc.primary_destination)) {
+ /* ran dry for primary network net */
+ SCTP_STAT_INCR(sctps_primary_randry);
+ } else if (stcb->asoc.sctp_cmt_on_off == 1) {
+ /* ran dry with CMT on */
+ SCTP_STAT_INCR(sctps_cmt_randry);
+ }
+ }
+}
+
+void
+sctp_fix_ecn_echo(struct sctp_association *asoc)
+{
+ struct sctp_tmit_chunk *chk;
+
+ TAILQ_FOREACH(chk, &asoc->control_send_queue, sctp_next) {
+ if (chk->rec.chunk_id.id == SCTP_ECN_ECHO) {
+ chk->sent = SCTP_DATAGRAM_UNSENT;
+ }
+ }
+}
+
+void
+sctp_move_chunks_from_net(struct sctp_tcb *stcb, struct sctp_nets *net)
+{
+ struct sctp_association *asoc;
+ struct sctp_stream_out *outs;
+ struct sctp_tmit_chunk *chk;
+ struct sctp_stream_queue_pending *sp;
+
+ if (net == NULL) {
+ return;
+ }
+ asoc = &stcb->asoc;
+ TAILQ_FOREACH(outs, &asoc->out_wheel, next_spoke) {
+ TAILQ_FOREACH(sp, &outs->outqueue, next) {
+ if (sp->net == net) {
+ sctp_free_remote_addr(sp->net);
+ sp->net = NULL;
+ }
+ }
+ }
+ TAILQ_FOREACH(chk, &asoc->send_queue, sctp_next) {
+ if (chk->whoTo == net) {
+ sctp_free_remote_addr(chk->whoTo);
+ chk->whoTo = NULL;
+ }
+ }
+}
+
+int
+sctp_med_chunk_output(struct sctp_inpcb *inp,
+ struct sctp_tcb *stcb,
+ struct sctp_association *asoc,
+ int *num_out,
+ int *reason_code,
+ int control_only, int from_where,
+ struct timeval *now, int *now_filled, int frag_point, int so_locked
+#if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
+ SCTP_UNUSED
+#endif
+)
+{
+ /*
+ * Ok this is the generic chunk service queue. we must do the
+ * following: - Service the stream queue that is next, moving any
+ * message (note I must get a complete message i.e. FIRST/MIDDLE and
+ * LAST to the out queue in one pass) and assigning TSN's - Check to
+ * see if the cwnd/rwnd allows any output, if so we go ahead and
+ * fomulate and send the low level chunks. Making sure to combine
+ * any control in the control chunk queue also.
+ */
+ struct sctp_nets *net, *start_at, *old_start_at = NULL;
+ struct mbuf *outchain, *endoutchain;
+ struct sctp_tmit_chunk *chk, *nchk;
+
+ /* temp arrays for unlinking */
+ struct sctp_tmit_chunk *data_list[SCTP_MAX_DATA_BUNDLING];
+ int no_fragmentflg, error;
+ unsigned int max_rwnd_per_dest, max_send_per_dest;
+ int one_chunk, hbflag, skip_data_for_this_net;
+ int asconf, cookie, no_out_cnt;
+ int bundle_at, ctl_cnt, no_data_chunks, eeor_mode;
+ unsigned int mtu, r_mtu, omtu, mx_mtu, to_out;
+ int tsns_sent = 0;
+ uint32_t auth_offset = 0;
+ struct sctp_auth_chunk *auth = NULL;
+ uint16_t auth_keyid;
+ int override_ok = 1;
+ int data_auth_reqd = 0;
+
+ /*
+ * JRS 5/14/07 - Add flag for whether a heartbeat is sent to the
+ * destination.
+ */
+ int pf_hbflag = 0;
+ int quit_now = 0;
+
+ *num_out = 0;
+ auth_keyid = stcb->asoc.authinfo.active_keyid;
+
+ if ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) ||
+ (asoc->state & SCTP_STATE_SHUTDOWN_RECEIVED) ||
+ (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXPLICIT_EOR))) {
+ eeor_mode = 1;
+ } else {
+ eeor_mode = 0;
+ }
+ ctl_cnt = no_out_cnt = asconf = cookie = 0;
+ /*
+ * First lets prime the pump. For each destination, if there is room
+ * in the flight size, attempt to pull an MTU's worth out of the
+ * stream queues into the general send_queue
+ */
+#ifdef SCTP_AUDITING_ENABLED
+ sctp_audit_log(0xC2, 2);
+#endif
+ SCTP_TCB_LOCK_ASSERT(stcb);
+ hbflag = 0;
+ if ((control_only) || (asoc->stream_reset_outstanding))
+ no_data_chunks = 1;
+ else
+ no_data_chunks = 0;
+
+ /* Nothing to possible to send? */
+ if (TAILQ_EMPTY(&asoc->control_send_queue) &&
+ TAILQ_EMPTY(&asoc->asconf_send_queue) &&
+ TAILQ_EMPTY(&asoc->send_queue) &&
+ TAILQ_EMPTY(&asoc->out_wheel)) {
+ *reason_code = 9;
+ return (0);
+ }
+ if (asoc->peers_rwnd == 0) {
+ /* No room in peers rwnd */
+ *reason_code = 1;
+ if (asoc->total_flight > 0) {
+ /* we are allowed one chunk in flight */
+ no_data_chunks = 1;
+ }
+ }
+ max_rwnd_per_dest = ((asoc->peers_rwnd + asoc->total_flight) / asoc->numnets);
+ if (stcb->sctp_socket)
+ max_send_per_dest = SCTP_SB_LIMIT_SND(stcb->sctp_socket) / asoc->numnets;
+ else
+ max_send_per_dest = 0;
+ if ((no_data_chunks == 0) && (!TAILQ_EMPTY(&asoc->out_wheel))) {
+ TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
+ /*
+ * This for loop we are in takes in each net, if
+ * its's got space in cwnd and has data sent to it
+ * (when CMT is off) then it calls
+ * sctp_fill_outqueue for the net. This gets data on
+ * the send queue for that network.
+ *
+ * In sctp_fill_outqueue TSN's are assigned and data is
+ * copied out of the stream buffers. Note mostly
+ * copy by reference (we hope).
+ */
+ net->window_probe = 0;
+ if ((net->dest_state & SCTP_ADDR_NOT_REACHABLE) ||
+ (net->dest_state & SCTP_ADDR_UNCONFIRMED)) {
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
+ sctp_log_cwnd(stcb, net, 1,
+ SCTP_CWND_LOG_FILL_OUTQ_CALLED);
+ }
+ continue;
+ }
+ if ((asoc->sctp_cmt_on_off == 0) &&
+ (asoc->primary_destination != net) &&
+ (net->ref_count < 2)) {
+ /* nothing can be in queue for this guy */
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
+ sctp_log_cwnd(stcb, net, 2,
+ SCTP_CWND_LOG_FILL_OUTQ_CALLED);
+ }
+ continue;
+ }
+ if (net->flight_size >= net->cwnd) {
+ /* skip this network, no room - can't fill */
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
+ sctp_log_cwnd(stcb, net, 3,
+ SCTP_CWND_LOG_FILL_OUTQ_CALLED);
+ }
+ continue;
+ }
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
+ sctp_log_cwnd(stcb, net, 4, SCTP_CWND_LOG_FILL_OUTQ_CALLED);
+ }
+ sctp_fill_outqueue(stcb, net, frag_point, eeor_mode, &quit_now);
+ if (quit_now) {
+ /* memory alloc failure */
+ no_data_chunks = 1;
+ break;
+ }
+ }
+ }
+ /* now service each destination and send out what we can for it */
+ /* Nothing to send? */
+ if (TAILQ_EMPTY(&asoc->control_send_queue) &&
+ TAILQ_EMPTY(&asoc->asconf_send_queue) &&
+ TAILQ_EMPTY(&asoc->send_queue)) {
+ *reason_code = 8;
+ return (0);
+ }
+ if (asoc->sctp_cmt_on_off == 1) {
+ /* get the last start point */
+ start_at = asoc->last_net_cmt_send_started;
+ if (start_at == NULL) {
+ /* null so to beginning */
+ start_at = TAILQ_FIRST(&asoc->nets);
+ } else {
+ start_at = TAILQ_NEXT(asoc->last_net_cmt_send_started, sctp_next);
+ if (start_at == NULL) {
+ start_at = TAILQ_FIRST(&asoc->nets);
+ }
+ }
+ asoc->last_net_cmt_send_started = start_at;
+ } else {
+ start_at = TAILQ_FIRST(&asoc->nets);
+ }
+ old_start_at = NULL;
+again_one_more_time:
+ for (net = start_at; net != NULL; net = TAILQ_NEXT(net, sctp_next)) {
+ /* how much can we send? */
+ /* SCTPDBG("Examine for sending net:%x\n", (uint32_t)net); */
+ if (old_start_at && (old_start_at == net)) {
+ /* through list ocmpletely. */
+ break;
+ }
+ tsns_sent = 0xa;
+ if ((asoc->sctp_cmt_on_off == 0) &&
+ (asoc->primary_destination != net) &&
+ (net->ref_count < 2)) {
+ /*
+ * Ref-count of 1 so we cannot have data or control
+ * queued to this address. Skip it (non-CMT).
+ */
+ continue;
+ }
+ if (TAILQ_EMPTY(&asoc->control_send_queue) &&
+ TAILQ_EMPTY(&asoc->asconf_send_queue) &&
+ (net->flight_size >= net->cwnd)) {
+ /*
+ * Nothing on control or asconf and flight is full,
+ * we can skip even in the CMT case.
+ */
+ continue;
+ }
+ ctl_cnt = bundle_at = 0;
+ endoutchain = outchain = NULL;
+ no_fragmentflg = 1;
+ one_chunk = 0;
+ if (net->dest_state & SCTP_ADDR_UNCONFIRMED) {
+ skip_data_for_this_net = 1;
+ } else {
+ skip_data_for_this_net = 0;
+ }
+ if ((net->ro.ro_rt) && (net->ro.ro_rt->rt_ifp)) {
+ /*
+ * if we have a route and an ifp check to see if we
+ * have room to send to this guy
+ */
+ struct ifnet *ifp;
+
+ ifp = net->ro.ro_rt->rt_ifp;
+ if ((ifp->if_snd.ifq_len + 2) >= ifp->if_snd.ifq_maxlen) {
+ SCTP_STAT_INCR(sctps_ifnomemqueued);
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_MAXBURST_ENABLE) {
+ sctp_log_maxburst(stcb, net, ifp->if_snd.ifq_len, ifp->if_snd.ifq_maxlen, SCTP_MAX_IFP_APPLIED);
+ }
+ continue;
+ }
+ }
+ switch (((struct sockaddr *)&net->ro._l_addr)->sa_family) {
+ case AF_INET:
+ mtu = net->mtu - (sizeof(struct ip) + sizeof(struct sctphdr));
+ break;
+#ifdef INET6
+ case AF_INET6:
+ mtu = net->mtu - (sizeof(struct ip6_hdr) + sizeof(struct sctphdr));
+ break;
+#endif
+ default:
+ /* TSNH */
+ mtu = net->mtu;
+ break;
+ }
+ mx_mtu = mtu;
+ to_out = 0;
+ if (mtu > asoc->peers_rwnd) {
+ if (asoc->total_flight > 0) {
+ /* We have a packet in flight somewhere */
+ r_mtu = asoc->peers_rwnd;
+ } else {
+ /* We are always allowed to send one MTU out */
+ one_chunk = 1;
+ r_mtu = mtu;
+ }
+ } else {
+ r_mtu = mtu;
+ }
+ /************************/
+ /* ASCONF transmission */
+ /************************/
+ /* Now first lets go through the asconf queue */
+ for (chk = TAILQ_FIRST(&asoc->asconf_send_queue);
+ chk; chk = nchk) {
+ nchk = TAILQ_NEXT(chk, sctp_next);
+ if (chk->rec.chunk_id.id != SCTP_ASCONF) {
+ continue;
+ }
+ if (chk->whoTo != net) {
+ /*
+ * No, not sent to the network we are
+ * looking at
+ */
+ break;
+ }
+ if (chk->data == NULL) {
+ break;
+ }
+ if (chk->sent != SCTP_DATAGRAM_UNSENT &&
+ chk->sent != SCTP_DATAGRAM_RESEND) {
+ break;
+ }
+ /*
+ * if no AUTH is yet included and this chunk
+ * requires it, make sure to account for it. We
+ * don't apply the size until the AUTH chunk is
+ * actually added below in case there is no room for
+ * this chunk. NOTE: we overload the use of "omtu"
+ * here
+ */
+ if ((auth == NULL) &&
+ sctp_auth_is_required_chunk(chk->rec.chunk_id.id,
+ stcb->asoc.peer_auth_chunks)) {
+ omtu = sctp_get_auth_chunk_len(stcb->asoc.peer_hmac_id);
+ } else
+ omtu = 0;
+ /* Here we do NOT factor the r_mtu */
+ if ((chk->send_size < (int)(mtu - omtu)) ||
+ (chk->flags & CHUNK_FLAGS_FRAGMENT_OK)) {
+ /*
+ * We probably should glom the mbuf chain
+ * from the chk->data for control but the
+ * problem is it becomes yet one more level
+ * of tracking to do if for some reason
+ * output fails. Then I have got to
+ * reconstruct the merged control chain.. el
+ * yucko.. for now we take the easy way and
+ * do the copy
+ */
+ /*
+ * Add an AUTH chunk, if chunk requires it
+ * save the offset into the chain for AUTH
+ */
+ if ((auth == NULL) &&
+ (sctp_auth_is_required_chunk(chk->rec.chunk_id.id,
+ stcb->asoc.peer_auth_chunks))) {
+ outchain = sctp_add_auth_chunk(outchain,
+ &endoutchain,
+ &auth,
+ &auth_offset,
+ stcb,
+ chk->rec.chunk_id.id);
+ SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
+ }
+ outchain = sctp_copy_mbufchain(chk->data, outchain, &endoutchain,
+ (int)chk->rec.chunk_id.can_take_data,
+ chk->send_size, chk->copy_by_ref);
+ if (outchain == NULL) {
+ *reason_code = 8;
+ SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
+ return (ENOMEM);
+ }
+ SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
+ /* update our MTU size */
+ if (mtu > (chk->send_size + omtu))
+ mtu -= (chk->send_size + omtu);
+ else
+ mtu = 0;
+ to_out += (chk->send_size + omtu);
+ /* Do clear IP_DF ? */
+ if (chk->flags & CHUNK_FLAGS_FRAGMENT_OK) {
+ no_fragmentflg = 0;
+ }
+ if (chk->rec.chunk_id.can_take_data)
+ chk->data = NULL;
+ /*
+ * set hb flag since we can use these for
+ * RTO
+ */
+ hbflag = 1;
+ asconf = 1;
+ /*
+ * should sysctl this: don't bundle data
+ * with ASCONF since it requires AUTH
+ */
+ no_data_chunks = 1;
+ chk->sent = SCTP_DATAGRAM_SENT;
+ chk->snd_count++;
+ if (mtu == 0) {
+ /*
+ * Ok we are out of room but we can
+ * output without effecting the
+ * flight size since this little guy
+ * is a control only packet.
+ */
+ sctp_timer_start(SCTP_TIMER_TYPE_ASCONF, inp, stcb, net);
+ /*
+ * do NOT clear the asconf flag as
+ * it is used to do appropriate
+ * source address selection.
+ */
+ if ((error = sctp_lowlevel_chunk_output(inp, stcb, net,
+ (struct sockaddr *)&net->ro._l_addr,
+ outchain, auth_offset, auth,
+ stcb->asoc.authinfo.active_keyid,
+ no_fragmentflg, 0, NULL, asconf,
+ inp->sctp_lport, stcb->rport,
+ htonl(stcb->asoc.peer_vtag),
+ net->port, so_locked, NULL))) {
+ if (error == ENOBUFS) {
+ asoc->ifp_had_enobuf = 1;
+ SCTP_STAT_INCR(sctps_lowlevelerr);
+ }
+ if (from_where == 0) {
+ SCTP_STAT_INCR(sctps_lowlevelerrusr);
+ }
+ if (*now_filled == 0) {
+ (void)SCTP_GETTIME_TIMEVAL(&net->last_sent_time);
+ *now_filled = 1;
+ *now = net->last_sent_time;
+ } else {
+ net->last_sent_time = *now;
+ }
+ hbflag = 0;
+ /* error, could not output */
+ if (error == EHOSTUNREACH) {
+ /*
+ * Destination went
+ * unreachable
+ * during this send
+ */
+ sctp_move_chunks_from_net(stcb, net);
+ }
+ *reason_code = 7;
+ continue;
+ } else
+ asoc->ifp_had_enobuf = 0;
+ if (*now_filled == 0) {
+ (void)SCTP_GETTIME_TIMEVAL(&net->last_sent_time);
+ *now_filled = 1;
+ *now = net->last_sent_time;
+ } else {
+ net->last_sent_time = *now;
+ }
+ hbflag = 0;
+ /*
+ * increase the number we sent, if a
+ * cookie is sent we don't tell them
+ * any was sent out.
+ */
+ outchain = endoutchain = NULL;
+ auth = NULL;
+ auth_offset = 0;
+ if (!no_out_cnt)
+ *num_out += ctl_cnt;
+ /* recalc a clean slate and setup */
+ if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
+ mtu = (net->mtu - SCTP_MIN_OVERHEAD);
+ } else {
+ mtu = (net->mtu - SCTP_MIN_V4_OVERHEAD);
+ }
+ to_out = 0;
+ no_fragmentflg = 1;
+ }
+ }
+ }
+ /************************/
+ /* Control transmission */
+ /************************/
+ /* Now first lets go through the control queue */
+ for (chk = TAILQ_FIRST(&asoc->control_send_queue);
+ chk; chk = nchk) {
+ nchk = TAILQ_NEXT(chk, sctp_next);
+ if (chk->whoTo != net) {
+ /*
+ * No, not sent to the network we are
+ * looking at
+ */
+ continue;
+ }
+ if (chk->data == NULL) {
+ continue;
+ }
+ if (chk->sent != SCTP_DATAGRAM_UNSENT) {
+ /*
+ * It must be unsent. Cookies and ASCONF's
+ * hang around but there timers will force
+ * when marked for resend.
+ */
+ continue;
+ }
+ /*
+ * if no AUTH is yet included and this chunk
+ * requires it, make sure to account for it. We
+ * don't apply the size until the AUTH chunk is
+ * actually added below in case there is no room for
+ * this chunk. NOTE: we overload the use of "omtu"
+ * here
+ */
+ if ((auth == NULL) &&
+ sctp_auth_is_required_chunk(chk->rec.chunk_id.id,
+ stcb->asoc.peer_auth_chunks)) {
+ omtu = sctp_get_auth_chunk_len(stcb->asoc.peer_hmac_id);
+ } else
+ omtu = 0;
+ /* Here we do NOT factor the r_mtu */
+ if ((chk->send_size <= (int)(mtu - omtu)) ||
+ (chk->flags & CHUNK_FLAGS_FRAGMENT_OK)) {
+ /*
+ * We probably should glom the mbuf chain
+ * from the chk->data for control but the
+ * problem is it becomes yet one more level
+ * of tracking to do if for some reason
+ * output fails. Then I have got to
+ * reconstruct the merged control chain.. el
+ * yucko.. for now we take the easy way and
+ * do the copy
+ */
+ /*
+ * Add an AUTH chunk, if chunk requires it
+ * save the offset into the chain for AUTH
+ */
+ if ((auth == NULL) &&
+ (sctp_auth_is_required_chunk(chk->rec.chunk_id.id,
+ stcb->asoc.peer_auth_chunks))) {
+ outchain = sctp_add_auth_chunk(outchain,
+ &endoutchain,
+ &auth,
+ &auth_offset,
+ stcb,
+ chk->rec.chunk_id.id);
+ SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
+ }
+ outchain = sctp_copy_mbufchain(chk->data, outchain, &endoutchain,
+ (int)chk->rec.chunk_id.can_take_data,
+ chk->send_size, chk->copy_by_ref);
+ if (outchain == NULL) {
+ *reason_code = 8;
+ SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
+ return (ENOMEM);
+ }
+ SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
+ /* update our MTU size */
+ if (mtu > (chk->send_size + omtu))
+ mtu -= (chk->send_size + omtu);
+ else
+ mtu = 0;
+ to_out += (chk->send_size + omtu);
+ /* Do clear IP_DF ? */
+ if (chk->flags & CHUNK_FLAGS_FRAGMENT_OK) {
+ no_fragmentflg = 0;
+ }
+ if (chk->rec.chunk_id.can_take_data)
+ chk->data = NULL;
+ /* Mark things to be removed, if needed */
+ if ((chk->rec.chunk_id.id == SCTP_SELECTIVE_ACK) ||
+ (chk->rec.chunk_id.id == SCTP_NR_SELECTIVE_ACK) || /* EY */
+ (chk->rec.chunk_id.id == SCTP_HEARTBEAT_REQUEST) ||
+ (chk->rec.chunk_id.id == SCTP_HEARTBEAT_ACK) ||
+ (chk->rec.chunk_id.id == SCTP_SHUTDOWN) ||
+ (chk->rec.chunk_id.id == SCTP_SHUTDOWN_ACK) ||
+ (chk->rec.chunk_id.id == SCTP_OPERATION_ERROR) ||
+ (chk->rec.chunk_id.id == SCTP_COOKIE_ACK) ||
+ (chk->rec.chunk_id.id == SCTP_ECN_CWR) ||
+ (chk->rec.chunk_id.id == SCTP_PACKET_DROPPED) ||
+ (chk->rec.chunk_id.id == SCTP_ASCONF_ACK)) {
+
+ if (chk->rec.chunk_id.id == SCTP_HEARTBEAT_REQUEST) {
+ hbflag = 1;
+ /*
+ * JRS 5/14/07 - Set the
+ * flag to say a heartbeat
+ * is being sent.
+ */
+ pf_hbflag = 1;
+ }
+ /* remove these chunks at the end */
+ if ((chk->rec.chunk_id.id == SCTP_SELECTIVE_ACK) ||
+ (chk->rec.chunk_id.id == SCTP_NR_SELECTIVE_ACK)) {
+ /* turn off the timer */
+ if (SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) {
+ sctp_timer_stop(SCTP_TIMER_TYPE_RECV,
+ inp, stcb, net, SCTP_FROM_SCTP_OUTPUT + SCTP_LOC_1);
+ }
+ }
+ ctl_cnt++;
+ } else {
+ /*
+ * Other chunks, since they have
+ * timers running (i.e. COOKIE) we
+ * just "trust" that it gets sent or
+ * retransmitted.
+ */
+ ctl_cnt++;
+ if (chk->rec.chunk_id.id == SCTP_COOKIE_ECHO) {
+ cookie = 1;
+ no_out_cnt = 1;
+ }
+ chk->sent = SCTP_DATAGRAM_SENT;
+ chk->snd_count++;
+ }
+ if (mtu == 0) {
+ /*
+ * Ok we are out of room but we can
+ * output without effecting the
+ * flight size since this little guy
+ * is a control only packet.
+ */
+ if (asconf) {
+ sctp_timer_start(SCTP_TIMER_TYPE_ASCONF, inp, stcb, net);
+ /*
+ * do NOT clear the asconf
+ * flag as it is used to do
+ * appropriate source
+ * address selection.
+ */
+ }
+ if (cookie) {
+ sctp_timer_start(SCTP_TIMER_TYPE_COOKIE, inp, stcb, net);
+ cookie = 0;
+ }
+ if ((error = sctp_lowlevel_chunk_output(inp, stcb, net,
+ (struct sockaddr *)&net->ro._l_addr,
+ outchain,
+ auth_offset, auth,
+ stcb->asoc.authinfo.active_keyid,
+ no_fragmentflg, 0, NULL, asconf,
+ inp->sctp_lport, stcb->rport,
+ htonl(stcb->asoc.peer_vtag),
+ net->port, so_locked, NULL))) {
+ if (error == ENOBUFS) {
+ asoc->ifp_had_enobuf = 1;
+ SCTP_STAT_INCR(sctps_lowlevelerr);
+ }
+ if (from_where == 0) {
+ SCTP_STAT_INCR(sctps_lowlevelerrusr);
+ }
+ /* error, could not output */
+ if (hbflag) {
+ if (*now_filled == 0) {
+ (void)SCTP_GETTIME_TIMEVAL(&net->last_sent_time);
+ *now_filled = 1;
+ *now = net->last_sent_time;
+ } else {
+ net->last_sent_time = *now;
+ }
+ hbflag = 0;
+ }
+ if (error == EHOSTUNREACH) {
+ /*
+ * Destination went
+ * unreachable
+ * during this send
+ */
+ sctp_move_chunks_from_net(stcb, net);
+ }
+ *reason_code = 7;
+ continue;
+ } else
+ asoc->ifp_had_enobuf = 0;
+ /* Only HB or ASCONF advances time */
+ if (hbflag) {
+ if (*now_filled == 0) {
+ (void)SCTP_GETTIME_TIMEVAL(&net->last_sent_time);
+ *now_filled = 1;
+ *now = net->last_sent_time;
+ } else {
+ net->last_sent_time = *now;
+ }
+ hbflag = 0;
+ }
+ /*
+ * increase the number we sent, if a
+ * cookie is sent we don't tell them
+ * any was sent out.
+ */
+ outchain = endoutchain = NULL;
+ auth = NULL;
+ auth_offset = 0;
+ if (!no_out_cnt)
+ *num_out += ctl_cnt;
+ /* recalc a clean slate and setup */
+ if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
+ mtu = (net->mtu - SCTP_MIN_OVERHEAD);
+ } else {
+ mtu = (net->mtu - SCTP_MIN_V4_OVERHEAD);
+ }
+ to_out = 0;
+ no_fragmentflg = 1;
+ }
+ }
+ }
+ /* JRI: if dest is in PF state, do not send data to it */
+ if ((asoc->sctp_cmt_on_off == 1) &&
+ (asoc->sctp_cmt_pf > 0) &&
+ (net->dest_state & SCTP_ADDR_PF)) {
+ goto no_data_fill;
+ }
+ if (net->flight_size >= net->cwnd) {
+ goto no_data_fill;
+ }
+ if ((asoc->sctp_cmt_on_off == 1) &&
+ (SCTP_BASE_SYSCTL(sctp_buffer_splitting) & SCTP_RECV_BUFFER_SPLITTING) &&
+ (net->flight_size > max_rwnd_per_dest)) {
+ goto no_data_fill;
+ }
+ /*
+ * We need a specific accounting for the usage of the send
+ * buffer. We also need to check the number of messages per
+ * net. For now, this is better than nothing and it disabled
+ * by default...
+ */
+ if ((asoc->sctp_cmt_on_off == 1) &&
+ (SCTP_BASE_SYSCTL(sctp_buffer_splitting) & SCTP_SEND_BUFFER_SPLITTING) &&
+ (max_send_per_dest > 0) &&
+ (net->flight_size > max_send_per_dest)) {
+ goto no_data_fill;
+ }
+ /*********************/
+ /* Data transmission */
+ /*********************/
+ /*
+ * if AUTH for DATA is required and no AUTH has been added
+ * yet, account for this in the mtu now... if no data can be
+ * bundled, this adjustment won't matter anyways since the
+ * packet will be going out...
+ */
+ data_auth_reqd = sctp_auth_is_required_chunk(SCTP_DATA,
+ stcb->asoc.peer_auth_chunks);
+ if (data_auth_reqd && (auth == NULL)) {
+ mtu -= sctp_get_auth_chunk_len(stcb->asoc.peer_hmac_id);
+ }
+ /* now lets add any data within the MTU constraints */
+ switch (((struct sockaddr *)&net->ro._l_addr)->sa_family) {
+ case AF_INET:
+ if (net->mtu > (sizeof(struct ip) + sizeof(struct sctphdr)))
+ omtu = net->mtu - (sizeof(struct ip) + sizeof(struct sctphdr));
+ else
+ omtu = 0;
+ break;
+#ifdef INET6
+ case AF_INET6:
+ if (net->mtu > (sizeof(struct ip6_hdr) + sizeof(struct sctphdr)))
+ omtu = net->mtu - (sizeof(struct ip6_hdr) + sizeof(struct sctphdr));
+ else
+ omtu = 0;
+ break;
+#endif
+ default:
+ /* TSNH */
+ omtu = 0;
+ break;
+ }
+ if ((((asoc->state & SCTP_STATE_OPEN) == SCTP_STATE_OPEN) &&
+ (skip_data_for_this_net == 0)) ||
+ (cookie)) {
+ for (chk = TAILQ_FIRST(&asoc->send_queue); chk; chk = nchk) {
+ if (no_data_chunks) {
+ /* let only control go out */
+ *reason_code = 1;
+ break;
+ }
+ if (net->flight_size >= net->cwnd) {
+ /* skip this net, no room for data */
+ *reason_code = 2;
+ break;
+ }
+ nchk = TAILQ_NEXT(chk, sctp_next);
+ if ((chk->whoTo != NULL) &&
+ (chk->whoTo != net)) {
+ /* Don't send the chunk on this net */
+ continue;
+ }
+ if ((chk->send_size > omtu) && ((chk->flags & CHUNK_FLAGS_FRAGMENT_OK) == 0)) {
+ /*-
+ * strange, we have a chunk that is
+ * to big for its destination and
+ * yet no fragment ok flag.
+ * Something went wrong when the
+ * PMTU changed...we did not mark
+ * this chunk for some reason?? I
+ * will fix it here by letting IP
+ * fragment it for now and printing
+ * a warning. This really should not
+ * happen ...
+ */
+ SCTP_PRINTF("Warning chunk of %d bytes > mtu:%d and yet PMTU disc missed\n",
+ chk->send_size, mtu);
+ chk->flags |= CHUNK_FLAGS_FRAGMENT_OK;
+ }
+ if (SCTP_BASE_SYSCTL(sctp_enable_sack_immediately) &&
+ ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) == SCTP_STATE_SHUTDOWN_PENDING)) {
+ struct sctp_data_chunk *dchkh;
+
+ dchkh = mtod(chk->data, struct sctp_data_chunk *);
+ dchkh->ch.chunk_flags |= SCTP_DATA_SACK_IMMEDIATELY;
+ }
+ if (((chk->send_size <= mtu) && (chk->send_size <= r_mtu)) ||
+ ((chk->flags & CHUNK_FLAGS_FRAGMENT_OK) && (chk->send_size <= asoc->peers_rwnd))) {
+ /* ok we will add this one */
+
+ /*
+ * Add an AUTH chunk, if chunk
+ * requires it, save the offset into
+ * the chain for AUTH
+ */
+ if (data_auth_reqd) {
+ if (auth == NULL) {
+ outchain = sctp_add_auth_chunk(outchain,
+ &endoutchain,
+ &auth,
+ &auth_offset,
+ stcb,
+ SCTP_DATA);
+ auth_keyid = chk->auth_keyid;
+ override_ok = 0;
+ SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
+ } else if (override_ok) {
+ /*
+ * use this data's
+ * keyid
+ */
+ auth_keyid = chk->auth_keyid;
+ override_ok = 0;
+ } else if (auth_keyid != chk->auth_keyid) {
+ /*
+ * different keyid,
+ * so done bundling
+ */
+ break;
+ }
+ }
+ outchain = sctp_copy_mbufchain(chk->data, outchain, &endoutchain, 0,
+ chk->send_size, chk->copy_by_ref);
+ if (outchain == NULL) {
+ SCTPDBG(SCTP_DEBUG_OUTPUT3, "No memory?\n");
+ if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
+ sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb, net);
+ }
+ *reason_code = 3;
+ SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
+ return (ENOMEM);
+ }
+ /* upate our MTU size */
+ /* Do clear IP_DF ? */
+ if (chk->flags & CHUNK_FLAGS_FRAGMENT_OK) {
+ no_fragmentflg = 0;
+ }
+ /* unsigned subtraction of mtu */
+ if (mtu > chk->send_size)
+ mtu -= chk->send_size;
+ else
+ mtu = 0;
+ /* unsigned subtraction of r_mtu */
+ if (r_mtu > chk->send_size)
+ r_mtu -= chk->send_size;
+ else
+ r_mtu = 0;
+
+ to_out += chk->send_size;
+ if ((to_out > mx_mtu) && no_fragmentflg) {
+#ifdef INVARIANTS
+ panic("Exceeding mtu of %d out size is %d", mx_mtu, to_out);
+#else
+ SCTP_PRINTF("Exceeding mtu of %d out size is %d\n",
+ mx_mtu, to_out);
+#endif
+ }
+ chk->window_probe = 0;
+ data_list[bundle_at++] = chk;
+ if (bundle_at >= SCTP_MAX_DATA_BUNDLING) {
+ mtu = 0;
+ break;
+ }
+ if (chk->sent == SCTP_DATAGRAM_UNSENT) {
+ if ((chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == 0) {
+ SCTP_STAT_INCR_COUNTER64(sctps_outorderchunks);
+ } else {
+ SCTP_STAT_INCR_COUNTER64(sctps_outunorderchunks);
+ }
+ if (((chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) == SCTP_DATA_LAST_FRAG) &&
+ ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) == 0))
+ /*
+ * Count number of
+ * user msg's that
+ * were fragmented
+ * we do this by
+ * counting when we
+ * see a LAST
+ * fragment only.
+ */
+ SCTP_STAT_INCR_COUNTER64(sctps_fragusrmsgs);
+ }
+ if ((mtu == 0) || (r_mtu == 0) || (one_chunk)) {
+ if ((one_chunk) && (stcb->asoc.total_flight == 0)) {
+ data_list[0]->window_probe = 1;
+ net->window_probe = 1;
+ }
+ break;
+ }
+ } else {
+ /*
+ * Must be sent in order of the
+ * TSN's (on a network)
+ */
+ break;
+ }
+ } /* for (chunk gather loop for this net) */
+ } /* if asoc.state OPEN */
+no_data_fill:
+ /* Is there something to send for this destination? */
+ if (outchain) {
+ /* We may need to start a control timer or two */
+ if (asconf) {
+ sctp_timer_start(SCTP_TIMER_TYPE_ASCONF, inp,
+ stcb, net);
+ /*
+ * do NOT clear the asconf flag as it is
+ * used to do appropriate source address
+ * selection.
+ */
+ }
+ if (cookie) {
+ sctp_timer_start(SCTP_TIMER_TYPE_COOKIE, inp, stcb, net);
+ cookie = 0;
+ }
+ /* must start a send timer if data is being sent */
+ if (bundle_at && (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer))) {
+ /*
+ * no timer running on this destination
+ * restart it.
+ */
+ sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb, net);
+ } else if ((asoc->sctp_cmt_on_off == 1) &&
+ (asoc->sctp_cmt_pf > 0) &&
+ pf_hbflag &&
+ ((net->dest_state & SCTP_ADDR_PF) == SCTP_ADDR_PF) &&
+ (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer))) {
+ /*
+ * JRS 5/14/07 - If a HB has been sent to a
+ * PF destination and no T3 timer is
+ * currently running, start the T3 timer to
+ * track the HBs that were sent.
+ */
+ sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb, net);
+ }
+ /* Now send it, if there is anything to send :> */
+ if ((error = sctp_lowlevel_chunk_output(inp,
+ stcb,
+ net,
+ (struct sockaddr *)&net->ro._l_addr,
+ outchain,
+ auth_offset,
+ auth,
+ auth_keyid,
+ no_fragmentflg,
+ bundle_at,
+ data_list[0],
+ asconf,
+ inp->sctp_lport, stcb->rport,
+ htonl(stcb->asoc.peer_vtag),
+ net->port, so_locked, NULL))) {
+ /* error, we could not output */
+ if (error == ENOBUFS) {
+ SCTP_STAT_INCR(sctps_lowlevelerr);
+ asoc->ifp_had_enobuf = 1;
+ }
+ if (from_where == 0) {
+ SCTP_STAT_INCR(sctps_lowlevelerrusr);
+ }
+ SCTPDBG(SCTP_DEBUG_OUTPUT3, "Gak send error %d\n", error);
+ if (hbflag) {
+ if (*now_filled == 0) {
+ (void)SCTP_GETTIME_TIMEVAL(&net->last_sent_time);
+ *now_filled = 1;
+ *now = net->last_sent_time;
+ } else {
+ net->last_sent_time = *now;
+ }
+ hbflag = 0;
+ }
+ if (error == EHOSTUNREACH) {
+ /*
+ * Destination went unreachable
+ * during this send
+ */
+ sctp_move_chunks_from_net(stcb, net);
+ }
+ *reason_code = 6;
+ /*-
+ * I add this line to be paranoid. As far as
+ * I can tell the continue, takes us back to
+ * the top of the for, but just to make sure
+ * I will reset these again here.
+ */
+ ctl_cnt = bundle_at = 0;
+ continue; /* This takes us back to the
+ * for() for the nets. */
+ } else {
+ asoc->ifp_had_enobuf = 0;
+ }
+ outchain = endoutchain = NULL;
+ auth = NULL;
+ auth_offset = 0;
+ if (bundle_at || hbflag) {
+ /* For data/asconf and hb set time */
+ if (*now_filled == 0) {
+ (void)SCTP_GETTIME_TIMEVAL(&net->last_sent_time);
+ *now_filled = 1;
+ *now = net->last_sent_time;
+ } else {
+ net->last_sent_time = *now;
+ }
+ }
+ if (!no_out_cnt) {
+ *num_out += (ctl_cnt + bundle_at);
+ }
+ if (bundle_at) {
+ /* setup for a RTO measurement */
+ tsns_sent = data_list[0]->rec.data.TSN_seq;
+ /* fill time if not already filled */
+ if (*now_filled == 0) {
+ (void)SCTP_GETTIME_TIMEVAL(&asoc->time_last_sent);
+ *now_filled = 1;
+ *now = asoc->time_last_sent;
+ } else {
+ asoc->time_last_sent = *now;
+ }
+ data_list[0]->do_rtt = 1;
+ SCTP_STAT_INCR_BY(sctps_senddata, bundle_at);
+ sctp_clean_up_datalist(stcb, asoc, data_list, bundle_at, net);
+ if (SCTP_BASE_SYSCTL(sctp_early_fr)) {
+ if (net->flight_size < net->cwnd) {
+ /* start or restart it */
+ if (SCTP_OS_TIMER_PENDING(&net->fr_timer.timer)) {
+ sctp_timer_stop(SCTP_TIMER_TYPE_EARLYFR, inp, stcb, net,
+ SCTP_FROM_SCTP_OUTPUT + SCTP_LOC_2);
+ }
+ SCTP_STAT_INCR(sctps_earlyfrstrout);
+ sctp_timer_start(SCTP_TIMER_TYPE_EARLYFR, inp, stcb, net);
+ } else {
+ /* stop it if its running */
+ if (SCTP_OS_TIMER_PENDING(&net->fr_timer.timer)) {
+ SCTP_STAT_INCR(sctps_earlyfrstpout);
+ sctp_timer_stop(SCTP_TIMER_TYPE_EARLYFR, inp, stcb, net,
+ SCTP_FROM_SCTP_OUTPUT + SCTP_LOC_3);
+ }
+ }
+ }
+ }
+ if (one_chunk) {
+ break;
+ }
+ }
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
+ sctp_log_cwnd(stcb, net, tsns_sent, SCTP_CWND_LOG_FROM_SEND);
+ }
+ }
+ if (old_start_at == NULL) {
+ old_start_at = start_at;
+ start_at = TAILQ_FIRST(&asoc->nets);
+ if (old_start_at)
+ goto again_one_more_time;
+ }
+ /*
+ * At the end there should be no NON timed chunks hanging on this
+ * queue.
+ */
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
+ sctp_log_cwnd(stcb, net, *num_out, SCTP_CWND_LOG_FROM_SEND);
+ }
+ if ((*num_out == 0) && (*reason_code == 0)) {
+ *reason_code = 4;
+ } else {
+ *reason_code = 5;
+ }
+ sctp_clean_up_ctl(stcb, asoc);
+ return (0);
+}
+
+void
+sctp_queue_op_err(struct sctp_tcb *stcb, struct mbuf *op_err)
+{
+ /*-
+ * Prepend a OPERATIONAL_ERROR chunk header and put on the end of
+ * the control chunk queue.
+ */
+ struct sctp_chunkhdr *hdr;
+ struct sctp_tmit_chunk *chk;
+ struct mbuf *mat;
+
+ SCTP_TCB_LOCK_ASSERT(stcb);
+ sctp_alloc_a_chunk(stcb, chk);
+ if (chk == NULL) {
+ /* no memory */
+ sctp_m_freem(op_err);
+ return;
+ }
+ chk->copy_by_ref = 0;
+ SCTP_BUF_PREPEND(op_err, sizeof(struct sctp_chunkhdr), M_DONTWAIT);
+ if (op_err == NULL) {
+ sctp_free_a_chunk(stcb, chk);
+ return;
+ }
+ chk->send_size = 0;
+ mat = op_err;
+ while (mat != NULL) {
+ chk->send_size += SCTP_BUF_LEN(mat);
+ mat = SCTP_BUF_NEXT(mat);
+ }
+ chk->rec.chunk_id.id = SCTP_OPERATION_ERROR;
+ chk->rec.chunk_id.can_take_data = 1;
+ chk->sent = SCTP_DATAGRAM_UNSENT;
+ chk->snd_count = 0;
+ chk->flags = 0;
+ chk->asoc = &stcb->asoc;
+ chk->data = op_err;
+ chk->whoTo = chk->asoc->primary_destination;
+ atomic_add_int(&chk->whoTo->ref_count, 1);
+ hdr = mtod(op_err, struct sctp_chunkhdr *);
+ hdr->chunk_type = SCTP_OPERATION_ERROR;
+ hdr->chunk_flags = 0;
+ hdr->chunk_length = htons(chk->send_size);
+ TAILQ_INSERT_TAIL(&chk->asoc->control_send_queue,
+ chk,
+ sctp_next);
+ chk->asoc->ctrl_queue_cnt++;
+}
+
+int
+sctp_send_cookie_echo(struct mbuf *m,
+ int offset,
+ struct sctp_tcb *stcb,
+ struct sctp_nets *net)
+{
+ /*-
+ * pull out the cookie and put it at the front of the control chunk
+ * queue.
+ */
+ int at;
+ struct mbuf *cookie;
+ struct sctp_paramhdr parm, *phdr;
+ struct sctp_chunkhdr *hdr;
+ struct sctp_tmit_chunk *chk;
+ uint16_t ptype, plen;
+
+ /* First find the cookie in the param area */
+ cookie = NULL;
+ at = offset + sizeof(struct sctp_init_chunk);
+
+ SCTP_TCB_LOCK_ASSERT(stcb);
+ do {
+ phdr = sctp_get_next_param(m, at, &parm, sizeof(parm));
+ if (phdr == NULL) {
+ return (-3);
+ }
+ ptype = ntohs(phdr->param_type);
+ plen = ntohs(phdr->param_length);
+ if (ptype == SCTP_STATE_COOKIE) {
+ int pad;
+
+ /* found the cookie */
+ if ((pad = (plen % 4))) {
+ plen += 4 - pad;
+ }
+ cookie = SCTP_M_COPYM(m, at, plen, M_DONTWAIT);
+ if (cookie == NULL) {
+ /* No memory */
+ return (-2);
+ }
+#ifdef SCTP_MBUF_LOGGING
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
+ struct mbuf *mat;
+
+ mat = cookie;
+ while (mat) {
+ if (SCTP_BUF_IS_EXTENDED(mat)) {
+ sctp_log_mb(mat, SCTP_MBUF_ICOPY);
+ }
+ mat = SCTP_BUF_NEXT(mat);
+ }
+ }
+#endif
+ break;
+ }
+ at += SCTP_SIZE32(plen);
+ } while (phdr);
+ if (cookie == NULL) {
+ /* Did not find the cookie */
+ return (-3);
+ }
+ /* ok, we got the cookie lets change it into a cookie echo chunk */
+
+ /* first the change from param to cookie */
+ hdr = mtod(cookie, struct sctp_chunkhdr *);
+ hdr->chunk_type = SCTP_COOKIE_ECHO;
+ hdr->chunk_flags = 0;
+ /* get the chunk stuff now and place it in the FRONT of the queue */
+ sctp_alloc_a_chunk(stcb, chk);
+ if (chk == NULL) {
+ /* no memory */
+ sctp_m_freem(cookie);
+ return (-5);
+ }
+ chk->copy_by_ref = 0;
+ chk->send_size = plen;
+ chk->rec.chunk_id.id = SCTP_COOKIE_ECHO;
+ chk->rec.chunk_id.can_take_data = 0;
+ chk->sent = SCTP_DATAGRAM_UNSENT;
+ chk->snd_count = 0;
+ chk->flags = CHUNK_FLAGS_FRAGMENT_OK;
+ chk->asoc = &stcb->asoc;
+ chk->data = cookie;
+ chk->whoTo = chk->asoc->primary_destination;
+ atomic_add_int(&chk->whoTo->ref_count, 1);
+ TAILQ_INSERT_HEAD(&chk->asoc->control_send_queue, chk, sctp_next);
+ chk->asoc->ctrl_queue_cnt++;
+ return (0);
+}
+
+void
+sctp_send_heartbeat_ack(struct sctp_tcb *stcb,
+ struct mbuf *m,
+ int offset,
+ int chk_length,
+ struct sctp_nets *net)
+{
+ /*
+ * take a HB request and make it into a HB ack and send it.
+ */
+ struct mbuf *outchain;
+ struct sctp_chunkhdr *chdr;
+ struct sctp_tmit_chunk *chk;
+
+
+ if (net == NULL)
+ /* must have a net pointer */
+ return;
+
+ outchain = SCTP_M_COPYM(m, offset, chk_length, M_DONTWAIT);
+ if (outchain == NULL) {
+ /* gak out of memory */
+ return;
+ }
+#ifdef SCTP_MBUF_LOGGING
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
+ struct mbuf *mat;
+
+ mat = outchain;
+ while (mat) {
+ if (SCTP_BUF_IS_EXTENDED(mat)) {
+ sctp_log_mb(mat, SCTP_MBUF_ICOPY);
+ }
+ mat = SCTP_BUF_NEXT(mat);
+ }
+ }
+#endif
+ chdr = mtod(outchain, struct sctp_chunkhdr *);
+ chdr->chunk_type = SCTP_HEARTBEAT_ACK;
+ chdr->chunk_flags = 0;
+ if (chk_length % 4) {
+ /* need pad */
+ uint32_t cpthis = 0;
+ int padlen;
+
+ padlen = 4 - (chk_length % 4);
+ m_copyback(outchain, chk_length, padlen, (caddr_t)&cpthis);
+ }
+ sctp_alloc_a_chunk(stcb, chk);
+ if (chk == NULL) {
+ /* no memory */
+ sctp_m_freem(outchain);
+ return;
+ }
+ chk->copy_by_ref = 0;
+ chk->send_size = chk_length;
+ chk->rec.chunk_id.id = SCTP_HEARTBEAT_ACK;
+ chk->rec.chunk_id.can_take_data = 1;
+ chk->sent = SCTP_DATAGRAM_UNSENT;
+ chk->snd_count = 0;
+ chk->flags = 0;
+ chk->asoc = &stcb->asoc;
+ chk->data = outchain;
+ chk->whoTo = net;
+ atomic_add_int(&chk->whoTo->ref_count, 1);
+ TAILQ_INSERT_TAIL(&chk->asoc->control_send_queue, chk, sctp_next);
+ chk->asoc->ctrl_queue_cnt++;
+}
+
+void
+sctp_send_cookie_ack(struct sctp_tcb *stcb)
+{
+ /* formulate and queue a cookie-ack back to sender */
+ struct mbuf *cookie_ack;
+ struct sctp_chunkhdr *hdr;
+ struct sctp_tmit_chunk *chk;
+
+ cookie_ack = NULL;
+ SCTP_TCB_LOCK_ASSERT(stcb);
+
+ cookie_ack = sctp_get_mbuf_for_msg(sizeof(struct sctp_chunkhdr), 0, M_DONTWAIT, 1, MT_HEADER);
+ if (cookie_ack == NULL) {
+ /* no mbuf's */
+ return;
+ }
+ SCTP_BUF_RESV_UF(cookie_ack, SCTP_MIN_OVERHEAD);
+ sctp_alloc_a_chunk(stcb, chk);
+ if (chk == NULL) {
+ /* no memory */
+ sctp_m_freem(cookie_ack);
+ return;
+ }
+ chk->copy_by_ref = 0;
+ chk->send_size = sizeof(struct sctp_chunkhdr);
+ chk->rec.chunk_id.id = SCTP_COOKIE_ACK;
+ chk->rec.chunk_id.can_take_data = 1;
+ chk->sent = SCTP_DATAGRAM_UNSENT;
+ chk->snd_count = 0;
+ chk->flags = 0;
+ chk->asoc = &stcb->asoc;
+ chk->data = cookie_ack;
+ if (chk->asoc->last_control_chunk_from != NULL) {
+ chk->whoTo = chk->asoc->last_control_chunk_from;
+ } else {
+ chk->whoTo = chk->asoc->primary_destination;
+ }
+ atomic_add_int(&chk->whoTo->ref_count, 1);
+ hdr = mtod(cookie_ack, struct sctp_chunkhdr *);
+ hdr->chunk_type = SCTP_COOKIE_ACK;
+ hdr->chunk_flags = 0;
+ hdr->chunk_length = htons(chk->send_size);
+ SCTP_BUF_LEN(cookie_ack) = chk->send_size;
+ TAILQ_INSERT_TAIL(&chk->asoc->control_send_queue, chk, sctp_next);
+ chk->asoc->ctrl_queue_cnt++;
+ return;
+}
+
+
+void
+sctp_send_shutdown_ack(struct sctp_tcb *stcb, struct sctp_nets *net)
+{
+ /* formulate and queue a SHUTDOWN-ACK back to the sender */
+ struct mbuf *m_shutdown_ack;
+ struct sctp_shutdown_ack_chunk *ack_cp;
+ struct sctp_tmit_chunk *chk;
+
+ m_shutdown_ack = sctp_get_mbuf_for_msg(sizeof(struct sctp_shutdown_ack_chunk), 0, M_DONTWAIT, 1, MT_HEADER);
+ if (m_shutdown_ack == NULL) {
+ /* no mbuf's */
+ return;
+ }
+ SCTP_BUF_RESV_UF(m_shutdown_ack, SCTP_MIN_OVERHEAD);
+ sctp_alloc_a_chunk(stcb, chk);
+ if (chk == NULL) {
+ /* no memory */
+ sctp_m_freem(m_shutdown_ack);
+ return;
+ }
+ chk->copy_by_ref = 0;
+ chk->send_size = sizeof(struct sctp_chunkhdr);
+ chk->rec.chunk_id.id = SCTP_SHUTDOWN_ACK;
+ chk->rec.chunk_id.can_take_data = 1;
+ chk->sent = SCTP_DATAGRAM_UNSENT;
+ chk->snd_count = 0;
+ chk->flags = 0;
+ chk->asoc = &stcb->asoc;
+ chk->data = m_shutdown_ack;
+ chk->whoTo = net;
+ atomic_add_int(&net->ref_count, 1);
+
+ ack_cp = mtod(m_shutdown_ack, struct sctp_shutdown_ack_chunk *);
+ ack_cp->ch.chunk_type = SCTP_SHUTDOWN_ACK;
+ ack_cp->ch.chunk_flags = 0;
+ ack_cp->ch.chunk_length = htons(chk->send_size);
+ SCTP_BUF_LEN(m_shutdown_ack) = chk->send_size;
+ TAILQ_INSERT_TAIL(&chk->asoc->control_send_queue, chk, sctp_next);
+ chk->asoc->ctrl_queue_cnt++;
+ return;
+}
+
+void
+sctp_send_shutdown(struct sctp_tcb *stcb, struct sctp_nets *net)
+{
+ /* formulate and queue a SHUTDOWN to the sender */
+ struct mbuf *m_shutdown;
+ struct sctp_shutdown_chunk *shutdown_cp;
+ struct sctp_tmit_chunk *chk;
+
+ m_shutdown = sctp_get_mbuf_for_msg(sizeof(struct sctp_shutdown_chunk), 0, M_DONTWAIT, 1, MT_HEADER);
+ if (m_shutdown == NULL) {
+ /* no mbuf's */
+ return;
+ }
+ SCTP_BUF_RESV_UF(m_shutdown, SCTP_MIN_OVERHEAD);
+ sctp_alloc_a_chunk(stcb, chk);
+ if (chk == NULL) {
+ /* no memory */
+ sctp_m_freem(m_shutdown);
+ return;
+ }
+ chk->copy_by_ref = 0;
+ chk->send_size = sizeof(struct sctp_shutdown_chunk);
+ chk->rec.chunk_id.id = SCTP_SHUTDOWN;
+ chk->rec.chunk_id.can_take_data = 1;
+ chk->sent = SCTP_DATAGRAM_UNSENT;
+ chk->snd_count = 0;
+ chk->flags = 0;
+ chk->asoc = &stcb->asoc;
+ chk->data = m_shutdown;
+ chk->whoTo = net;
+ atomic_add_int(&net->ref_count, 1);
+
+ shutdown_cp = mtod(m_shutdown, struct sctp_shutdown_chunk *);
+ shutdown_cp->ch.chunk_type = SCTP_SHUTDOWN;
+ shutdown_cp->ch.chunk_flags = 0;
+ shutdown_cp->ch.chunk_length = htons(chk->send_size);
+ shutdown_cp->cumulative_tsn_ack = htonl(stcb->asoc.cumulative_tsn);
+ SCTP_BUF_LEN(m_shutdown) = chk->send_size;
+ TAILQ_INSERT_TAIL(&chk->asoc->control_send_queue, chk, sctp_next);
+ chk->asoc->ctrl_queue_cnt++;
+ return;
+}
+
+void
+sctp_send_asconf(struct sctp_tcb *stcb, struct sctp_nets *net, int addr_locked)
+{
+ /*
+ * formulate and queue an ASCONF to the peer. ASCONF parameters
+ * should be queued on the assoc queue.
+ */
+ struct sctp_tmit_chunk *chk;
+ struct mbuf *m_asconf;
+ int len;
+
+ SCTP_TCB_LOCK_ASSERT(stcb);
+
+ if ((!TAILQ_EMPTY(&stcb->asoc.asconf_send_queue)) &&
+ (!sctp_is_feature_on(stcb->sctp_ep, SCTP_PCB_FLAGS_MULTIPLE_ASCONFS))) {
+ /* can't send a new one if there is one in flight already */
+ return;
+ }
+ /* compose an ASCONF chunk, maximum length is PMTU */
+ m_asconf = sctp_compose_asconf(stcb, &len, addr_locked);
+ if (m_asconf == NULL) {
+ return;
+ }
+ sctp_alloc_a_chunk(stcb, chk);
+ if (chk == NULL) {
+ /* no memory */
+ sctp_m_freem(m_asconf);
+ return;
+ }
+ chk->copy_by_ref = 0;
+ chk->data = m_asconf;
+ chk->send_size = len;
+ chk->rec.chunk_id.id = SCTP_ASCONF;
+ chk->rec.chunk_id.can_take_data = 0;
+ chk->sent = SCTP_DATAGRAM_UNSENT;
+ chk->snd_count = 0;
+ chk->flags = CHUNK_FLAGS_FRAGMENT_OK;
+ chk->asoc = &stcb->asoc;
+ chk->whoTo = net;
+ atomic_add_int(&chk->whoTo->ref_count, 1);
+ TAILQ_INSERT_TAIL(&chk->asoc->asconf_send_queue, chk, sctp_next);
+ chk->asoc->ctrl_queue_cnt++;
+ return;
+}
+
+void
+sctp_send_asconf_ack(struct sctp_tcb *stcb)
+{
+ /*
+ * formulate and queue a asconf-ack back to sender. the asconf-ack
+ * must be stored in the tcb.
+ */
+ struct sctp_tmit_chunk *chk;
+ struct sctp_asconf_ack *ack, *latest_ack;
+ struct mbuf *m_ack, *m;
+ struct sctp_nets *net = NULL;
+
+ SCTP_TCB_LOCK_ASSERT(stcb);
+ /* Get the latest ASCONF-ACK */
+ latest_ack = TAILQ_LAST(&stcb->asoc.asconf_ack_sent, sctp_asconf_ackhead);
+ if (latest_ack == NULL) {
+ return;
+ }
+ if (latest_ack->last_sent_to != NULL &&
+ latest_ack->last_sent_to == stcb->asoc.last_control_chunk_from) {
+ /* we're doing a retransmission */
+ net = sctp_find_alternate_net(stcb, stcb->asoc.last_control_chunk_from, 0);
+ if (net == NULL) {
+ /* no alternate */
+ if (stcb->asoc.last_control_chunk_from == NULL)
+ net = stcb->asoc.primary_destination;
+ else
+ net = stcb->asoc.last_control_chunk_from;
+ }
+ } else {
+ /* normal case */
+ if (stcb->asoc.last_control_chunk_from == NULL)
+ net = stcb->asoc.primary_destination;
+ else
+ net = stcb->asoc.last_control_chunk_from;
+ }
+ latest_ack->last_sent_to = net;
+
+ TAILQ_FOREACH(ack, &stcb->asoc.asconf_ack_sent, next) {
+ if (ack->data == NULL) {
+ continue;
+ }
+ /* copy the asconf_ack */
+ m_ack = SCTP_M_COPYM(ack->data, 0, M_COPYALL, M_DONTWAIT);
+ if (m_ack == NULL) {
+ /* couldn't copy it */
+ return;
+ }
+#ifdef SCTP_MBUF_LOGGING
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
+ struct mbuf *mat;
+
+ mat = m_ack;
+ while (mat) {
+ if (SCTP_BUF_IS_EXTENDED(mat)) {
+ sctp_log_mb(mat, SCTP_MBUF_ICOPY);
+ }
+ mat = SCTP_BUF_NEXT(mat);
+ }
+ }
+#endif
+
+ sctp_alloc_a_chunk(stcb, chk);
+ if (chk == NULL) {
+ /* no memory */
+ if (m_ack)
+ sctp_m_freem(m_ack);
+ return;
+ }
+ chk->copy_by_ref = 0;
+
+ chk->whoTo = net;
+ chk->data = m_ack;
+ chk->send_size = 0;
+ /* Get size */
+ m = m_ack;
+ chk->send_size = ack->len;
+ chk->rec.chunk_id.id = SCTP_ASCONF_ACK;
+ chk->rec.chunk_id.can_take_data = 1;
+ chk->sent = SCTP_DATAGRAM_UNSENT;
+ chk->snd_count = 0;
+ chk->flags |= CHUNK_FLAGS_FRAGMENT_OK; /* XXX */
+ chk->asoc = &stcb->asoc;
+ atomic_add_int(&chk->whoTo->ref_count, 1);
+
+ TAILQ_INSERT_TAIL(&chk->asoc->control_send_queue, chk, sctp_next);
+ chk->asoc->ctrl_queue_cnt++;
+ }
+ return;
+}
+
+
+static int
+sctp_chunk_retransmission(struct sctp_inpcb *inp,
+ struct sctp_tcb *stcb,
+ struct sctp_association *asoc,
+ int *cnt_out, struct timeval *now, int *now_filled, int *fr_done, int so_locked
+#if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
+ SCTP_UNUSED
+#endif
+)
+{
+ /*-
+ * send out one MTU of retransmission. If fast_retransmit is
+ * happening we ignore the cwnd. Otherwise we obey the cwnd and
+ * rwnd. For a Cookie or Asconf in the control chunk queue we
+ * retransmit them by themselves.
+ *
+ * For data chunks we will pick out the lowest TSN's in the sent_queue
+ * marked for resend and bundle them all together (up to a MTU of
+ * destination). The address to send to should have been
+ * selected/changed where the retransmission was marked (i.e. in FR
+ * or t3-timeout routines).
+ */
+ struct sctp_tmit_chunk *data_list[SCTP_MAX_DATA_BUNDLING];
+ struct sctp_tmit_chunk *chk, *fwd;
+ struct mbuf *m, *endofchain;
+ struct sctp_nets *net = NULL;
+ uint32_t tsns_sent = 0;
+ int no_fragmentflg, bundle_at, cnt_thru;
+ unsigned int mtu;
+ int error, i, one_chunk, fwd_tsn, ctl_cnt, tmr_started;
+ struct sctp_auth_chunk *auth = NULL;
+ uint32_t auth_offset = 0;
+ uint16_t auth_keyid;
+ int override_ok = 1;
+ int data_auth_reqd = 0;
+ uint32_t dmtu = 0;
+
+ SCTP_TCB_LOCK_ASSERT(stcb);
+ tmr_started = ctl_cnt = bundle_at = error = 0;
+ no_fragmentflg = 1;
+ fwd_tsn = 0;
+ *cnt_out = 0;
+ fwd = NULL;
+ endofchain = m = NULL;
+ auth_keyid = stcb->asoc.authinfo.active_keyid;
+#ifdef SCTP_AUDITING_ENABLED
+ sctp_audit_log(0xC3, 1);
+#endif
+ if ((TAILQ_EMPTY(&asoc->sent_queue)) &&
+ (TAILQ_EMPTY(&asoc->control_send_queue))) {
+ SCTPDBG(SCTP_DEBUG_OUTPUT1, "SCTP hits empty queue with cnt set to %d?\n",
+ asoc->sent_queue_retran_cnt);
+ asoc->sent_queue_cnt = 0;
+ asoc->sent_queue_cnt_removeable = 0;
+ /* send back 0/0 so we enter normal transmission */
+ *cnt_out = 0;
+ return (0);
+ }
+ TAILQ_FOREACH(chk, &asoc->control_send_queue, sctp_next) {
+ if ((chk->rec.chunk_id.id == SCTP_COOKIE_ECHO) ||
+ (chk->rec.chunk_id.id == SCTP_STREAM_RESET) ||
+ (chk->rec.chunk_id.id == SCTP_FORWARD_CUM_TSN)) {
+ if (chk->sent != SCTP_DATAGRAM_RESEND) {
+ continue;
+ }
+ if (chk->rec.chunk_id.id == SCTP_STREAM_RESET) {
+ if (chk != asoc->str_reset) {
+ /*
+ * not eligible for retran if its
+ * not ours
+ */
+ continue;
+ }
+ }
+ ctl_cnt++;
+ if (chk->rec.chunk_id.id == SCTP_FORWARD_CUM_TSN) {
+ fwd_tsn = 1;
+ fwd = chk;
+ }
+ /*
+ * Add an AUTH chunk, if chunk requires it save the
+ * offset into the chain for AUTH
+ */
+ if ((auth == NULL) &&
+ (sctp_auth_is_required_chunk(chk->rec.chunk_id.id,
+ stcb->asoc.peer_auth_chunks))) {
+ m = sctp_add_auth_chunk(m, &endofchain,
+ &auth, &auth_offset,
+ stcb,
+ chk->rec.chunk_id.id);
+ SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
+ }
+ m = sctp_copy_mbufchain(chk->data, m, &endofchain, 0, chk->send_size, chk->copy_by_ref);
+ break;
+ }
+ }
+ one_chunk = 0;
+ cnt_thru = 0;
+ /* do we have control chunks to retransmit? */
+ if (m != NULL) {
+ /* Start a timer no matter if we suceed or fail */
+ if (chk->rec.chunk_id.id == SCTP_COOKIE_ECHO) {
+ sctp_timer_start(SCTP_TIMER_TYPE_COOKIE, inp, stcb, chk->whoTo);
+ } else if (chk->rec.chunk_id.id == SCTP_ASCONF)
+ sctp_timer_start(SCTP_TIMER_TYPE_ASCONF, inp, stcb, chk->whoTo);
+ chk->snd_count++; /* update our count */
+ if ((error = sctp_lowlevel_chunk_output(inp, stcb, chk->whoTo,
+ (struct sockaddr *)&chk->whoTo->ro._l_addr, m,
+ auth_offset, auth, stcb->asoc.authinfo.active_keyid,
+ no_fragmentflg, 0, NULL, 0,
+ inp->sctp_lport, stcb->rport, htonl(stcb->asoc.peer_vtag),
+ chk->whoTo->port, so_locked, NULL))) {
+ SCTP_STAT_INCR(sctps_lowlevelerr);
+ return (error);
+ }
+ m = endofchain = NULL;
+ auth = NULL;
+ auth_offset = 0;
+ /*
+ * We don't want to mark the net->sent time here since this
+ * we use this for HB and retrans cannot measure RTT
+ */
+ /* (void)SCTP_GETTIME_TIMEVAL(&chk->whoTo->last_sent_time); */
+ *cnt_out += 1;
+ chk->sent = SCTP_DATAGRAM_SENT;
+ sctp_ucount_decr(stcb->asoc.sent_queue_retran_cnt);
+ if (fwd_tsn == 0) {
+ return (0);
+ } else {
+ /* Clean up the fwd-tsn list */
+ sctp_clean_up_ctl(stcb, asoc);
+ return (0);
+ }
+ }
+ /*
+ * Ok, it is just data retransmission we need to do or that and a
+ * fwd-tsn with it all.
+ */
+ if (TAILQ_EMPTY(&asoc->sent_queue)) {
+ return (SCTP_RETRAN_DONE);
+ }
+ if ((SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_ECHOED) ||
+ (SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_WAIT)) {
+ /* not yet open, resend the cookie and that is it */
+ return (1);
+ }
+#ifdef SCTP_AUDITING_ENABLED
+ sctp_auditing(20, inp, stcb, NULL);
+#endif
+ data_auth_reqd = sctp_auth_is_required_chunk(SCTP_DATA, stcb->asoc.peer_auth_chunks);
+ TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) {
+ if (chk->sent != SCTP_DATAGRAM_RESEND) {
+ /* No, not sent to this net or not ready for rtx */
+ continue;
+ }
+ if (chk->data == NULL) {
+ printf("TSN:%x chk->snd_count:%d chk->sent:%d can't retran - no data\n",
+ chk->rec.data.TSN_seq, chk->snd_count, chk->sent);
+ continue;
+ }
+ if ((SCTP_BASE_SYSCTL(sctp_max_retran_chunk)) &&
+ (chk->snd_count >= SCTP_BASE_SYSCTL(sctp_max_retran_chunk))) {
+ /* Gak, we have exceeded max unlucky retran, abort! */
+ SCTP_PRINTF("Gak, chk->snd_count:%d >= max:%d - send abort\n",
+ chk->snd_count,
+ SCTP_BASE_SYSCTL(sctp_max_retran_chunk));
+ atomic_add_int(&stcb->asoc.refcnt, 1);
+ sctp_abort_an_association(stcb->sctp_ep, stcb, 0, NULL, so_locked);
+ SCTP_TCB_LOCK(stcb);
+ atomic_subtract_int(&stcb->asoc.refcnt, 1);
+ return (SCTP_RETRAN_EXIT);
+ }
+ /* pick up the net */
+ net = chk->whoTo;
+ if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
+ mtu = (net->mtu - SCTP_MIN_OVERHEAD);
+ } else {
+ mtu = net->mtu - SCTP_MIN_V4_OVERHEAD;
+ }
+
+ if ((asoc->peers_rwnd < mtu) && (asoc->total_flight > 0)) {
+ /* No room in peers rwnd */
+ uint32_t tsn;
+
+ tsn = asoc->last_acked_seq + 1;
+ if (tsn == chk->rec.data.TSN_seq) {
+ /*
+ * we make a special exception for this
+ * case. The peer has no rwnd but is missing
+ * the lowest chunk.. which is probably what
+ * is holding up the rwnd.
+ */
+ goto one_chunk_around;
+ }
+ return (1);
+ }
+one_chunk_around:
+ if (asoc->peers_rwnd < mtu) {
+ one_chunk = 1;
+ if ((asoc->peers_rwnd == 0) &&
+ (asoc->total_flight == 0)) {
+ chk->window_probe = 1;
+ chk->whoTo->window_probe = 1;
+ }
+ }
+#ifdef SCTP_AUDITING_ENABLED
+ sctp_audit_log(0xC3, 2);
+#endif
+ bundle_at = 0;
+ m = NULL;
+ net->fast_retran_ip = 0;
+ if (chk->rec.data.doing_fast_retransmit == 0) {
+ /*
+ * if no FR in progress skip destination that have
+ * flight_size > cwnd.
+ */
+ if (net->flight_size >= net->cwnd) {
+ continue;
+ }
+ } else {
+ /*
+ * Mark the destination net to have FR recovery
+ * limits put on it.
+ */
+ *fr_done = 1;
+ net->fast_retran_ip = 1;
+ }
+
+ /*
+ * if no AUTH is yet included and this chunk requires it,
+ * make sure to account for it. We don't apply the size
+ * until the AUTH chunk is actually added below in case
+ * there is no room for this chunk.
+ */
+ if (data_auth_reqd && (auth == NULL)) {
+ dmtu = sctp_get_auth_chunk_len(stcb->asoc.peer_hmac_id);
+ } else
+ dmtu = 0;
+
+ if ((chk->send_size <= (mtu - dmtu)) ||
+ (chk->flags & CHUNK_FLAGS_FRAGMENT_OK)) {
+ /* ok we will add this one */
+ if (data_auth_reqd) {
+ if (auth == NULL) {
+ m = sctp_add_auth_chunk(m,
+ &endofchain,
+ &auth,
+ &auth_offset,
+ stcb,
+ SCTP_DATA);
+ auth_keyid = chk->auth_keyid;
+ override_ok = 0;
+ SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
+ } else if (override_ok) {
+ auth_keyid = chk->auth_keyid;
+ override_ok = 0;
+ } else if (chk->auth_keyid != auth_keyid) {
+ /* different keyid, so done bundling */
+ break;
+ }
+ }
+ m = sctp_copy_mbufchain(chk->data, m, &endofchain, 0, chk->send_size, chk->copy_by_ref);
+ if (m == NULL) {
+ SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
+ return (ENOMEM);
+ }
+ /* Do clear IP_DF ? */
+ if (chk->flags & CHUNK_FLAGS_FRAGMENT_OK) {
+ no_fragmentflg = 0;
+ }
+ /* upate our MTU size */
+ if (mtu > (chk->send_size + dmtu))
+ mtu -= (chk->send_size + dmtu);
+ else
+ mtu = 0;
+ data_list[bundle_at++] = chk;
+ if (one_chunk && (asoc->total_flight <= 0)) {
+ SCTP_STAT_INCR(sctps_windowprobed);
+ }
+ }
+ if (one_chunk == 0) {
+ /*
+ * now are there anymore forward from chk to pick
+ * up?
+ */
+ fwd = TAILQ_NEXT(chk, sctp_next);
+ while (fwd) {
+ if (fwd->sent != SCTP_DATAGRAM_RESEND) {
+ /* Nope, not for retran */
+ fwd = TAILQ_NEXT(fwd, sctp_next);
+ continue;
+ }
+ if (fwd->whoTo != net) {
+ /* Nope, not the net in question */
+ fwd = TAILQ_NEXT(fwd, sctp_next);
+ continue;
+ }
+ if (data_auth_reqd && (auth == NULL)) {
+ dmtu = sctp_get_auth_chunk_len(stcb->asoc.peer_hmac_id);
+ } else
+ dmtu = 0;
+ if (fwd->send_size <= (mtu - dmtu)) {
+ if (data_auth_reqd) {
+ if (auth == NULL) {
+ m = sctp_add_auth_chunk(m,
+ &endofchain,
+ &auth,
+ &auth_offset,
+ stcb,
+ SCTP_DATA);
+ auth_keyid = fwd->auth_keyid;
+ override_ok = 0;
+ SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
+ } else if (override_ok) {
+ auth_keyid = fwd->auth_keyid;
+ override_ok = 0;
+ } else if (fwd->auth_keyid != auth_keyid) {
+ /*
+ * different keyid,
+ * so done bundling
+ */
+ break;
+ }
+ }
+ m = sctp_copy_mbufchain(fwd->data, m, &endofchain, 0, fwd->send_size, fwd->copy_by_ref);
+ if (m == NULL) {
+ SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
+ return (ENOMEM);
+ }
+ /* Do clear IP_DF ? */
+ if (fwd->flags & CHUNK_FLAGS_FRAGMENT_OK) {
+ no_fragmentflg = 0;
+ }
+ /* upate our MTU size */
+ if (mtu > (fwd->send_size + dmtu))
+ mtu -= (fwd->send_size + dmtu);
+ else
+ mtu = 0;
+ data_list[bundle_at++] = fwd;
+ if (bundle_at >= SCTP_MAX_DATA_BUNDLING) {
+ break;
+ }
+ fwd = TAILQ_NEXT(fwd, sctp_next);
+ } else {
+ /* can't fit so we are done */
+ break;
+ }
+ }
+ }
+ /* Is there something to send for this destination? */
+ if (m) {
+ /*
+ * No matter if we fail/or suceed we should start a
+ * timer. A failure is like a lost IP packet :-)
+ */
+ if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
+ /*
+ * no timer running on this destination
+ * restart it.
+ */
+ sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb, net);
+ tmr_started = 1;
+ }
+ /* Now lets send it, if there is anything to send :> */
+ if ((error = sctp_lowlevel_chunk_output(inp, stcb, net,
+ (struct sockaddr *)&net->ro._l_addr, m,
+ auth_offset, auth, auth_keyid,
+ no_fragmentflg, 0, NULL, 0,
+ inp->sctp_lport, stcb->rport, htonl(stcb->asoc.peer_vtag),
+ net->port, so_locked, NULL))) {
+ /* error, we could not output */
+ SCTP_STAT_INCR(sctps_lowlevelerr);
+ return (error);
+ }
+ m = endofchain = NULL;
+ auth = NULL;
+ auth_offset = 0;
+ /* For HB's */
+ /*
+ * We don't want to mark the net->sent time here
+ * since this we use this for HB and retrans cannot
+ * measure RTT
+ */
+ /* (void)SCTP_GETTIME_TIMEVAL(&net->last_sent_time); */
+
+ /* For auto-close */
+ cnt_thru++;
+ if (*now_filled == 0) {
+ (void)SCTP_GETTIME_TIMEVAL(&asoc->time_last_sent);
+ *now = asoc->time_last_sent;
+ *now_filled = 1;
+ } else {
+ asoc->time_last_sent = *now;
+ }
+ *cnt_out += bundle_at;
+#ifdef SCTP_AUDITING_ENABLED
+ sctp_audit_log(0xC4, bundle_at);
+#endif
+ if (bundle_at) {
+ tsns_sent = data_list[0]->rec.data.TSN_seq;
+ }
+ for (i = 0; i < bundle_at; i++) {
+ SCTP_STAT_INCR(sctps_sendretransdata);
+ data_list[i]->sent = SCTP_DATAGRAM_SENT;
+ /*
+ * When we have a revoked data, and we
+ * retransmit it, then we clear the revoked
+ * flag since this flag dictates if we
+ * subtracted from the fs
+ */
+ if (data_list[i]->rec.data.chunk_was_revoked) {
+ /* Deflate the cwnd */
+ data_list[i]->whoTo->cwnd -= data_list[i]->book_size;
+ data_list[i]->rec.data.chunk_was_revoked = 0;
+ }
+ data_list[i]->snd_count++;
+ sctp_ucount_decr(asoc->sent_queue_retran_cnt);
+ /* record the time */
+ data_list[i]->sent_rcv_time = asoc->time_last_sent;
+ if (data_list[i]->book_size_scale) {
+ /*
+ * need to double the book size on
+ * this one
+ */
+ data_list[i]->book_size_scale = 0;
+ /*
+ * Since we double the booksize, we
+ * must also double the output queue
+ * size, since this get shrunk when
+ * we free by this amount.
+ */
+ atomic_add_int(&((asoc)->total_output_queue_size), data_list[i]->book_size);
+ data_list[i]->book_size *= 2;
+
+
+ } else {
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
+ sctp_log_rwnd(SCTP_DECREASE_PEER_RWND,
+ asoc->peers_rwnd, data_list[i]->send_size, SCTP_BASE_SYSCTL(sctp_peer_chunk_oh));
+ }
+ asoc->peers_rwnd = sctp_sbspace_sub(asoc->peers_rwnd,
+ (uint32_t) (data_list[i]->send_size +
+ SCTP_BASE_SYSCTL(sctp_peer_chunk_oh)));
+ }
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
+ sctp_misc_ints(SCTP_FLIGHT_LOG_UP_RSND,
+ data_list[i]->whoTo->flight_size,
+ data_list[i]->book_size,
+ (uintptr_t) data_list[i]->whoTo,
+ data_list[i]->rec.data.TSN_seq);
+ }
+ sctp_flight_size_increase(data_list[i]);
+ sctp_total_flight_increase(stcb, data_list[i]);
+ if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
+ /* SWS sender side engages */
+ asoc->peers_rwnd = 0;
+ }
+ if ((i == 0) &&
+ (data_list[i]->rec.data.doing_fast_retransmit)) {
+ SCTP_STAT_INCR(sctps_sendfastretrans);
+ if ((data_list[i] == TAILQ_FIRST(&asoc->sent_queue)) &&
+ (tmr_started == 0)) {
+ /*-
+ * ok we just fast-retrans'd
+ * the lowest TSN, i.e the
+ * first on the list. In
+ * this case we want to give
+ * some more time to get a
+ * SACK back without a
+ * t3-expiring.
+ */
+ sctp_timer_stop(SCTP_TIMER_TYPE_SEND, inp, stcb, net,
+ SCTP_FROM_SCTP_OUTPUT + SCTP_LOC_4);
+ sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb, net);
+ }
+ }
+ }
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
+ sctp_log_cwnd(stcb, net, tsns_sent, SCTP_CWND_LOG_FROM_RESEND);
+ }
+#ifdef SCTP_AUDITING_ENABLED
+ sctp_auditing(21, inp, stcb, NULL);
+#endif
+ } else {
+ /* None will fit */
+ return (1);
+ }
+ if (asoc->sent_queue_retran_cnt <= 0) {
+ /* all done we have no more to retran */
+ asoc->sent_queue_retran_cnt = 0;
+ break;
+ }
+ if (one_chunk) {
+ /* No more room in rwnd */
+ return (1);
+ }
+ /* stop the for loop here. we sent out a packet */
+ break;
+ }
+ return (0);
+}
+
+
+static int
+sctp_timer_validation(struct sctp_inpcb *inp,
+ struct sctp_tcb *stcb,
+ struct sctp_association *asoc,
+ int ret)
+{
+ struct sctp_nets *net;
+
+ /* Validate that a timer is running somewhere */
+ TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
+ if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
+ /* Here is a timer */
+ return (ret);
+ }
+ }
+ SCTP_TCB_LOCK_ASSERT(stcb);
+ /* Gak, we did not have a timer somewhere */
+ SCTPDBG(SCTP_DEBUG_OUTPUT3, "Deadlock avoided starting timer on a dest at retran\n");
+ sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb, asoc->primary_destination);
+ return (ret);
+}
+
+void
+sctp_chunk_output(struct sctp_inpcb *inp,
+ struct sctp_tcb *stcb,
+ int from_where,
+ int so_locked
+#if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
+ SCTP_UNUSED
+#endif
+)
+{
+ /*-
+ * Ok this is the generic chunk service queue. we must do the
+ * following:
+ * - See if there are retransmits pending, if so we must
+ * do these first.
+ * - Service the stream queue that is next, moving any
+ * message (note I must get a complete message i.e.
+ * FIRST/MIDDLE and LAST to the out queue in one pass) and assigning
+ * TSN's
+ * - Check to see if the cwnd/rwnd allows any output, if so we
+ * go ahead and fomulate and send the low level chunks. Making sure
+ * to combine any control in the control chunk queue also.
+ */
+ struct sctp_association *asoc;
+ struct sctp_nets *net;
+ int error = 0, num_out = 0, tot_out = 0, ret = 0, reason_code = 0,
+ burst_cnt = 0, burst_limit = 0;
+ struct timeval now;
+ int now_filled = 0;
+ int nagle_on = 0;
+ int frag_point = sctp_get_frag_point(stcb, &stcb->asoc);
+ int un_sent = 0;
+ int fr_done, tot_frs = 0;
+
+ asoc = &stcb->asoc;
+ if (from_where == SCTP_OUTPUT_FROM_USR_SEND) {
+ if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_NODELAY)) {
+ nagle_on = 0;
+ } else {
+ nagle_on = 1;
+ }
+ }
+ SCTP_TCB_LOCK_ASSERT(stcb);
+
+ un_sent = (stcb->asoc.total_output_queue_size - stcb->asoc.total_flight);
+
+ if ((un_sent <= 0) &&
+ (TAILQ_EMPTY(&asoc->control_send_queue)) &&
+ (TAILQ_EMPTY(&asoc->asconf_send_queue)) &&
+ (asoc->sent_queue_retran_cnt == 0)) {
+ /* Nothing to do unless there is something to be sent left */
+ return;
+ }
+ /*
+ * Do we have something to send, data or control AND a sack timer
+ * running, if so piggy-back the sack.
+ */
+ if (SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) {
+ sctp_send_sack(stcb);
+ (void)SCTP_OS_TIMER_STOP(&stcb->asoc.dack_timer.timer);
+ }
+ while (asoc->sent_queue_retran_cnt) {
+ /*-
+ * Ok, it is retransmission time only, we send out only ONE
+ * packet with a single call off to the retran code.
+ */
+ if (from_where == SCTP_OUTPUT_FROM_COOKIE_ACK) {
+ /*-
+ * Special hook for handling cookiess discarded
+ * by peer that carried data. Send cookie-ack only
+ * and then the next call with get the retran's.
+ */
+ (void)sctp_med_chunk_output(inp, stcb, asoc, &num_out, &reason_code, 1,
+ from_where,
+ &now, &now_filled, frag_point, so_locked);
+ return;
+ } else if (from_where != SCTP_OUTPUT_FROM_HB_TMR) {
+ /* if its not from a HB then do it */
+ fr_done = 0;
+ ret = sctp_chunk_retransmission(inp, stcb, asoc, &num_out, &now, &now_filled, &fr_done, so_locked);
+ if (fr_done) {
+ tot_frs++;
+ }
+ } else {
+ /*
+ * its from any other place, we don't allow retran
+ * output (only control)
+ */
+ ret = 1;
+ }
+ if (ret > 0) {
+ /* Can't send anymore */
+ /*-
+ * now lets push out control by calling med-level
+ * output once. this assures that we WILL send HB's
+ * if queued too.
+ */
+ (void)sctp_med_chunk_output(inp, stcb, asoc, &num_out, &reason_code, 1,
+ from_where,
+ &now, &now_filled, frag_point, so_locked);
+#ifdef SCTP_AUDITING_ENABLED
+ sctp_auditing(8, inp, stcb, NULL);
+#endif
+ (void)sctp_timer_validation(inp, stcb, asoc, ret);
+ return;
+ }
+ if (ret < 0) {
+ /*-
+ * The count was off.. retran is not happening so do
+ * the normal retransmission.
+ */
+#ifdef SCTP_AUDITING_ENABLED
+ sctp_auditing(9, inp, stcb, NULL);
+#endif
+ if (ret == SCTP_RETRAN_EXIT) {
+ return;
+ }
+ break;
+ }
+ if (from_where == SCTP_OUTPUT_FROM_T3) {
+ /* Only one transmission allowed out of a timeout */
+#ifdef SCTP_AUDITING_ENABLED
+ sctp_auditing(10, inp, stcb, NULL);
+#endif
+ /* Push out any control */
+ (void)sctp_med_chunk_output(inp, stcb, asoc, &num_out, &reason_code, 1, from_where,
+ &now, &now_filled, frag_point, so_locked);
+ return;
+ }
+ if (tot_frs > asoc->max_burst) {
+ /* Hit FR burst limit */
+ return;
+ }
+ if ((num_out == 0) && (ret == 0)) {
+
+ /* No more retrans to send */
+ break;
+ }
+ }
+#ifdef SCTP_AUDITING_ENABLED
+ sctp_auditing(12, inp, stcb, NULL);
+#endif
+ /* Check for bad destinations, if they exist move chunks around. */
+ burst_limit = asoc->max_burst;
+ TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
+ if ((net->dest_state & SCTP_ADDR_NOT_REACHABLE) ==
+ SCTP_ADDR_NOT_REACHABLE) {
+ /*-
+ * if possible move things off of this address we
+ * still may send below due to the dormant state but
+ * we try to find an alternate address to send to
+ * and if we have one we move all queued data on the
+ * out wheel to this alternate address.
+ */
+ if (net->ref_count > 1)
+ sctp_move_chunks_from_net(stcb, net);
+ } else if ((asoc->sctp_cmt_on_off == 1) &&
+ (asoc->sctp_cmt_pf > 0) &&
+ ((net->dest_state & SCTP_ADDR_PF) == SCTP_ADDR_PF)) {
+ /*
+ * JRS 5/14/07 - If CMT PF is on and the current
+ * destination is in PF state, move all queued data
+ * to an alternate desination.
+ */
+ if (net->ref_count > 1)
+ sctp_move_chunks_from_net(stcb, net);
+ } else {
+ /*-
+ * if ((asoc->sat_network) || (net->addr_is_local))
+ * { burst_limit = asoc->max_burst *
+ * SCTP_SAT_NETWORK_BURST_INCR; }
+ */
+ if (SCTP_BASE_SYSCTL(sctp_use_cwnd_based_maxburst)) {
+ if ((net->flight_size + (burst_limit * net->mtu)) < net->cwnd) {
+ /*
+ * JRS - Use the congestion control
+ * given in the congestion control
+ * module
+ */
+ asoc->cc_functions.sctp_cwnd_update_after_output(stcb, net, burst_limit);
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_MAXBURST_ENABLE) {
+ sctp_log_maxburst(stcb, net, 0, burst_limit, SCTP_MAX_BURST_APPLIED);
+ }
+ SCTP_STAT_INCR(sctps_maxburstqueued);
+ }
+ net->fast_retran_ip = 0;
+ } else {
+ if (net->flight_size == 0) {
+ /* Should be decaying the cwnd here */
+ ;
+ }
+ }
+ }
+
+ }
+ burst_cnt = 0;
+ do {
+ error = sctp_med_chunk_output(inp, stcb, asoc, &num_out,
+ &reason_code, 0, from_where,
+ &now, &now_filled, frag_point, so_locked);
+ if (error) {
+ SCTPDBG(SCTP_DEBUG_OUTPUT1, "Error %d was returned from med-c-op\n", error);
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_MAXBURST_ENABLE) {
+ sctp_log_maxburst(stcb, asoc->primary_destination, error, burst_cnt, SCTP_MAX_BURST_ERROR_STOP);
+ }
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
+ sctp_log_cwnd(stcb, NULL, error, SCTP_SEND_NOW_COMPLETES);
+ sctp_log_cwnd(stcb, NULL, 0xdeadbeef, SCTP_SEND_NOW_COMPLETES);
+ }
+ break;
+ }
+ SCTPDBG(SCTP_DEBUG_OUTPUT3, "m-c-o put out %d\n", num_out);
+
+ tot_out += num_out;
+ burst_cnt++;
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
+ sctp_log_cwnd(stcb, NULL, num_out, SCTP_SEND_NOW_COMPLETES);
+ if (num_out == 0) {
+ sctp_log_cwnd(stcb, NULL, reason_code, SCTP_SEND_NOW_COMPLETES);
+ }
+ }
+ if (nagle_on) {
+ /*-
+ * When nagle is on, we look at how much is un_sent, then
+ * if its smaller than an MTU and we have data in
+ * flight we stop.
+ */
+ un_sent = ((stcb->asoc.total_output_queue_size - stcb->asoc.total_flight) +
+ (stcb->asoc.stream_queue_cnt * sizeof(struct sctp_data_chunk)));
+ if ((un_sent < (int)(stcb->asoc.smallest_mtu - SCTP_MIN_OVERHEAD)) &&
+ (stcb->asoc.total_flight > 0)) {
+ break;
+ }
+ }
+ if (TAILQ_EMPTY(&asoc->control_send_queue) &&
+ TAILQ_EMPTY(&asoc->send_queue) &&
+ TAILQ_EMPTY(&asoc->out_wheel)) {
+ /* Nothing left to send */
+ break;
+ }
+ if ((stcb->asoc.total_output_queue_size - stcb->asoc.total_flight) <= 0) {
+ /* Nothing left to send */
+ break;
+ }
+ } while (num_out && (SCTP_BASE_SYSCTL(sctp_use_cwnd_based_maxburst) ||
+ (burst_cnt < burst_limit)));
+
+ if (SCTP_BASE_SYSCTL(sctp_use_cwnd_based_maxburst) == 0) {
+ if (burst_cnt >= burst_limit) {
+ SCTP_STAT_INCR(sctps_maxburstqueued);
+ asoc->burst_limit_applied = 1;
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_MAXBURST_ENABLE) {
+ sctp_log_maxburst(stcb, asoc->primary_destination, 0, burst_cnt, SCTP_MAX_BURST_APPLIED);
+ }
+ } else {
+ asoc->burst_limit_applied = 0;
+ }
+ }
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
+ sctp_log_cwnd(stcb, NULL, tot_out, SCTP_SEND_NOW_COMPLETES);
+ }
+ SCTPDBG(SCTP_DEBUG_OUTPUT1, "Ok, we have put out %d chunks\n",
+ tot_out);
+
+ /*-
+ * Now we need to clean up the control chunk chain if a ECNE is on
+ * it. It must be marked as UNSENT again so next call will continue
+ * to send it until such time that we get a CWR, to remove it.
+ */
+ if (stcb->asoc.ecn_echo_cnt_onq)
+ sctp_fix_ecn_echo(asoc);
+ return;
+}
+
+
+int
+sctp_output(inp, m, addr, control, p, flags)
+ struct sctp_inpcb *inp;
+ struct mbuf *m;
+ struct sockaddr *addr;
+ struct mbuf *control;
+ struct thread *p;
+ int flags;
+{
+ if (inp == NULL) {
+ SCTP_LTRACE_ERR_RET_PKT(m, inp, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, EINVAL);
+ return (EINVAL);
+ }
+ if (inp->sctp_socket == NULL) {
+ SCTP_LTRACE_ERR_RET_PKT(m, inp, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, EINVAL);
+ return (EINVAL);
+ }
+ return (sctp_sosend(inp->sctp_socket,
+ addr,
+ (struct uio *)NULL,
+ m,
+ control,
+ flags, p
+ ));
+}
+
+void
+send_forward_tsn(struct sctp_tcb *stcb,
+ struct sctp_association *asoc)
+{
+ struct sctp_tmit_chunk *chk;
+ struct sctp_forward_tsn_chunk *fwdtsn;
+ uint32_t advance_peer_ack_point;
+
+ SCTP_TCB_LOCK_ASSERT(stcb);
+ TAILQ_FOREACH(chk, &asoc->control_send_queue, sctp_next) {
+ if (chk->rec.chunk_id.id == SCTP_FORWARD_CUM_TSN) {
+ /* mark it to unsent */
+ chk->sent = SCTP_DATAGRAM_UNSENT;
+ chk->snd_count = 0;
+ /* Do we correct its output location? */
+ if (chk->whoTo != asoc->primary_destination) {
+ sctp_free_remote_addr(chk->whoTo);
+ chk->whoTo = asoc->primary_destination;
+ atomic_add_int(&chk->whoTo->ref_count, 1);
+ }
+ goto sctp_fill_in_rest;
+ }
+ }
+ /* Ok if we reach here we must build one */
+ sctp_alloc_a_chunk(stcb, chk);
+ if (chk == NULL) {
+ return;
+ }
+ asoc->fwd_tsn_cnt++;
+ chk->copy_by_ref = 0;
+ chk->rec.chunk_id.id = SCTP_FORWARD_CUM_TSN;
+ chk->rec.chunk_id.can_take_data = 0;
+ chk->asoc = asoc;
+ chk->whoTo = NULL;
+ chk->data = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_DONTWAIT, 1, MT_DATA);
+ if (chk->data == NULL) {
+ sctp_free_a_chunk(stcb, chk);
+ return;
+ }
+ SCTP_BUF_RESV_UF(chk->data, SCTP_MIN_OVERHEAD);
+ chk->sent = SCTP_DATAGRAM_UNSENT;
+ chk->snd_count = 0;
+ chk->whoTo = asoc->primary_destination;
+ atomic_add_int(&chk->whoTo->ref_count, 1);
+ TAILQ_INSERT_TAIL(&asoc->control_send_queue, chk, sctp_next);
+ asoc->ctrl_queue_cnt++;
+sctp_fill_in_rest:
+ /*-
+ * Here we go through and fill out the part that deals with
+ * stream/seq of the ones we skip.
+ */
+ SCTP_BUF_LEN(chk->data) = 0;
+ {
+ struct sctp_tmit_chunk *at, *tp1, *last;
+ struct sctp_strseq *strseq;
+ unsigned int cnt_of_space, i, ovh;
+ unsigned int space_needed;
+ unsigned int cnt_of_skipped = 0;
+
+ TAILQ_FOREACH(at, &asoc->sent_queue, sctp_next) {
+ if (at->sent != SCTP_FORWARD_TSN_SKIP) {
+ /* no more to look at */
+ break;
+ }
+ if (at->rec.data.rcv_flags & SCTP_DATA_UNORDERED) {
+ /* We don't report these */
+ continue;
+ }
+ cnt_of_skipped++;
+ }
+ space_needed = (sizeof(struct sctp_forward_tsn_chunk) +
+ (cnt_of_skipped * sizeof(struct sctp_strseq)));
+
+ cnt_of_space = M_TRAILINGSPACE(chk->data);
+
+ if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
+ ovh = SCTP_MIN_OVERHEAD;
+ } else {
+ ovh = SCTP_MIN_V4_OVERHEAD;
+ }
+ if (cnt_of_space > (asoc->smallest_mtu - ovh)) {
+ /* trim to a mtu size */
+ cnt_of_space = asoc->smallest_mtu - ovh;
+ }
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_TRY_ADVANCE) {
+ sctp_misc_ints(SCTP_FWD_TSN_CHECK,
+ 0xff, 0, cnt_of_skipped,
+ asoc->advanced_peer_ack_point);
+
+ }
+ advance_peer_ack_point = asoc->advanced_peer_ack_point;
+ if (cnt_of_space < space_needed) {
+ /*-
+ * ok we must trim down the chunk by lowering the
+ * advance peer ack point.
+ */
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_TRY_ADVANCE) {
+ sctp_misc_ints(SCTP_FWD_TSN_CHECK,
+ 0xff, 0xff, cnt_of_space,
+ space_needed);
+ }
+ cnt_of_skipped = cnt_of_space - sizeof(struct sctp_forward_tsn_chunk);
+ cnt_of_skipped /= sizeof(struct sctp_strseq);
+ /*-
+ * Go through and find the TSN that will be the one
+ * we report.
+ */
+ at = TAILQ_FIRST(&asoc->sent_queue);
+ for (i = 0; i < cnt_of_skipped; i++) {
+ tp1 = TAILQ_NEXT(at, sctp_next);
+ if (tp1 == NULL) {
+ break;
+ }
+ at = tp1;
+ }
+ if (at && SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_TRY_ADVANCE) {
+ sctp_misc_ints(SCTP_FWD_TSN_CHECK,
+ 0xff, cnt_of_skipped, at->rec.data.TSN_seq,
+ asoc->advanced_peer_ack_point);
+ }
+ last = at;
+ /*-
+ * last now points to last one I can report, update
+ * peer ack point
+ */
+ if (last)
+ advance_peer_ack_point = last->rec.data.TSN_seq;
+ space_needed = sizeof(struct sctp_forward_tsn_chunk) +
+ cnt_of_skipped * sizeof(struct sctp_strseq);
+ }
+ chk->send_size = space_needed;
+ /* Setup the chunk */
+ fwdtsn = mtod(chk->data, struct sctp_forward_tsn_chunk *);
+ fwdtsn->ch.chunk_length = htons(chk->send_size);
+ fwdtsn->ch.chunk_flags = 0;
+ fwdtsn->ch.chunk_type = SCTP_FORWARD_CUM_TSN;
+ fwdtsn->new_cumulative_tsn = htonl(advance_peer_ack_point);
+ SCTP_BUF_LEN(chk->data) = chk->send_size;
+ fwdtsn++;
+ /*-
+ * Move pointer to after the fwdtsn and transfer to the
+ * strseq pointer.
+ */
+ strseq = (struct sctp_strseq *)fwdtsn;
+ /*-
+ * Now populate the strseq list. This is done blindly
+ * without pulling out duplicate stream info. This is
+ * inefficent but won't harm the process since the peer will
+ * look at these in sequence and will thus release anything.
+ * It could mean we exceed the PMTU and chop off some that
+ * we could have included.. but this is unlikely (aka 1432/4
+ * would mean 300+ stream seq's would have to be reported in
+ * one FWD-TSN. With a bit of work we can later FIX this to
+ * optimize and pull out duplcates.. but it does add more
+ * overhead. So for now... not!
+ */
+ at = TAILQ_FIRST(&asoc->sent_queue);
+ for (i = 0; i < cnt_of_skipped; i++) {
+ tp1 = TAILQ_NEXT(at, sctp_next);
+ if (tp1 == NULL)
+ break;
+ if (at->rec.data.rcv_flags & SCTP_DATA_UNORDERED) {
+ /* We don't report these */
+ i--;
+ at = tp1;
+ continue;
+ }
+ if (at->rec.data.TSN_seq == advance_peer_ack_point) {
+ at->rec.data.fwd_tsn_cnt = 0;
+ }
+ strseq->stream = ntohs(at->rec.data.stream_number);
+ strseq->sequence = ntohs(at->rec.data.stream_seq);
+ strseq++;
+ at = tp1;
+ }
+ }
+ return;
+
+}
+
+void
+sctp_send_sack(struct sctp_tcb *stcb)
+{
+ /*-
+ * Queue up a SACK or NR-SACK in the control queue.
+ * We must first check to see if a SACK or NR-SACK is
+ * somehow on the control queue.
+ * If so, we will take and and remove the old one.
+ */
+ struct sctp_association *asoc;
+ struct sctp_tmit_chunk *chk, *a_chk;
+ struct sctp_sack_chunk *sack;
+ struct sctp_nr_sack_chunk *nr_sack;
+ struct sctp_gap_ack_block *gap_descriptor;
+ struct sack_track *selector;
+ int mergeable = 0;
+ int offset;
+ caddr_t limit;
+ uint32_t *dup;
+ int limit_reached = 0;
+ unsigned int i, siz, j;
+ unsigned int num_gap_blocks = 0, num_nr_gap_blocks = 0, space;
+ int num_dups = 0;
+ int space_req;
+ uint32_t highest_tsn;
+ uint8_t flags;
+ uint8_t type;
+ uint8_t tsn_map;
+
+ if ((stcb->asoc.sctp_nr_sack_on_off == 1) &&
+ (stcb->asoc.peer_supports_nr_sack == 1)) {
+ type = SCTP_NR_SELECTIVE_ACK;
+ } else {
+ type = SCTP_SELECTIVE_ACK;
+ }
+ a_chk = NULL;
+ asoc = &stcb->asoc;
+ SCTP_TCB_LOCK_ASSERT(stcb);
+ if (asoc->last_data_chunk_from == NULL) {
+ /* Hmm we never received anything */
+ return;
+ }
+ sctp_slide_mapping_arrays(stcb);
+ sctp_set_rwnd(stcb, asoc);
+ TAILQ_FOREACH(chk, &asoc->control_send_queue, sctp_next) {
+ if (chk->rec.chunk_id.id == type) {
+ /* Hmm, found a sack already on queue, remove it */
+ TAILQ_REMOVE(&asoc->control_send_queue, chk, sctp_next);
+ asoc->ctrl_queue_cnt--;
+ a_chk = chk;
+ if (a_chk->data) {
+ sctp_m_freem(a_chk->data);
+ a_chk->data = NULL;
+ }
+ sctp_free_remote_addr(a_chk->whoTo);
+ a_chk->whoTo = NULL;
+ break;
+ }
+ }
+ if (a_chk == NULL) {
+ sctp_alloc_a_chunk(stcb, a_chk);
+ if (a_chk == NULL) {
+ /* No memory so we drop the idea, and set a timer */
+ if (stcb->asoc.delayed_ack) {
+ sctp_timer_stop(SCTP_TIMER_TYPE_RECV,
+ stcb->sctp_ep, stcb, NULL, SCTP_FROM_SCTP_OUTPUT + SCTP_LOC_5);
+ sctp_timer_start(SCTP_TIMER_TYPE_RECV,
+ stcb->sctp_ep, stcb, NULL);
+ } else {
+ stcb->asoc.send_sack = 1;
+ }
+ return;
+ }
+ a_chk->copy_by_ref = 0;
+ a_chk->rec.chunk_id.id = type;
+ a_chk->rec.chunk_id.can_take_data = 1;
+ }
+ /* Clear our pkt counts */
+ asoc->data_pkts_seen = 0;
+
+ a_chk->asoc = asoc;
+ a_chk->snd_count = 0;
+ a_chk->send_size = 0; /* fill in later */
+ a_chk->sent = SCTP_DATAGRAM_UNSENT;
+ a_chk->whoTo = NULL;
+
+ if ((asoc->numduptsns) ||
+ (asoc->last_data_chunk_from->dest_state & SCTP_ADDR_NOT_REACHABLE)) {
+ /*-
+ * Ok, we have some duplicates or the destination for the
+ * sack is unreachable, lets see if we can select an
+ * alternate than asoc->last_data_chunk_from
+ */
+ if ((!(asoc->last_data_chunk_from->dest_state & SCTP_ADDR_NOT_REACHABLE)) &&
+ (asoc->used_alt_onsack > asoc->numnets)) {
+ /* We used an alt last time, don't this time */
+ a_chk->whoTo = NULL;
+ } else {
+ asoc->used_alt_onsack++;
+ a_chk->whoTo = sctp_find_alternate_net(stcb, asoc->last_data_chunk_from, 0);
+ }
+ if (a_chk->whoTo == NULL) {
+ /* Nope, no alternate */
+ a_chk->whoTo = asoc->last_data_chunk_from;
+ asoc->used_alt_onsack = 0;
+ }
+ } else {
+ /*
+ * No duplicates so we use the last place we received data
+ * from.
+ */
+ asoc->used_alt_onsack = 0;
+ a_chk->whoTo = asoc->last_data_chunk_from;
+ }
+ if (a_chk->whoTo) {
+ atomic_add_int(&a_chk->whoTo->ref_count, 1);
+ }
+ if (compare_with_wrap(asoc->highest_tsn_inside_map, asoc->highest_tsn_inside_nr_map, MAX_TSN)) {
+ highest_tsn = asoc->highest_tsn_inside_map;
+ } else {
+ highest_tsn = asoc->highest_tsn_inside_nr_map;
+ }
+ if (highest_tsn == asoc->cumulative_tsn) {
+ /* no gaps */
+ if (type == SCTP_SELECTIVE_ACK) {
+ space_req = sizeof(struct sctp_sack_chunk);
+ } else {
+ space_req = sizeof(struct sctp_nr_sack_chunk);
+ }
+ } else {
+ /* gaps get a cluster */
+ space_req = MCLBYTES;
+ }
+ /* Ok now lets formulate a MBUF with our sack */
+ a_chk->data = sctp_get_mbuf_for_msg(space_req, 0, M_DONTWAIT, 1, MT_DATA);
+ if ((a_chk->data == NULL) ||
+ (a_chk->whoTo == NULL)) {
+ /* rats, no mbuf memory */
+ if (a_chk->data) {
+ /* was a problem with the destination */
+ sctp_m_freem(a_chk->data);
+ a_chk->data = NULL;
+ }
+ sctp_free_a_chunk(stcb, a_chk);
+ /* sa_ignore NO_NULL_CHK */
+ if (stcb->asoc.delayed_ack) {
+ sctp_timer_stop(SCTP_TIMER_TYPE_RECV,
+ stcb->sctp_ep, stcb, NULL, SCTP_FROM_SCTP_OUTPUT + SCTP_LOC_6);
+ sctp_timer_start(SCTP_TIMER_TYPE_RECV,
+ stcb->sctp_ep, stcb, NULL);
+ } else {
+ stcb->asoc.send_sack = 1;
+ }
+ return;
+ }
+ /* ok, lets go through and fill it in */
+ SCTP_BUF_RESV_UF(a_chk->data, SCTP_MIN_OVERHEAD);
+ space = M_TRAILINGSPACE(a_chk->data);
+ if (space > (a_chk->whoTo->mtu - SCTP_MIN_OVERHEAD)) {
+ space = (a_chk->whoTo->mtu - SCTP_MIN_OVERHEAD);
+ }
+ limit = mtod(a_chk->data, caddr_t);
+ limit += space;
+
+ /* 0x01 is used by nonce for ecn */
+ if ((SCTP_BASE_SYSCTL(sctp_ecn_enable)) &&
+ (SCTP_BASE_SYSCTL(sctp_ecn_nonce)) &&
+ (asoc->peer_supports_ecn_nonce))
+ flags = (asoc->receiver_nonce_sum & SCTP_SACK_NONCE_SUM);
+ else
+ flags = 0;
+
+ if ((asoc->sctp_cmt_on_off == 1) &&
+ SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
+ /*-
+ * CMT DAC algorithm: If 2 (i.e., 0x10) packets have been
+ * received, then set high bit to 1, else 0. Reset
+ * pkts_rcvd.
+ */
+ flags |= (asoc->cmt_dac_pkts_rcvd << 6);
+ asoc->cmt_dac_pkts_rcvd = 0;
+ }
+#ifdef SCTP_ASOCLOG_OF_TSNS
+ stcb->asoc.cumack_logsnt[stcb->asoc.cumack_log_atsnt] = asoc->cumulative_tsn;
+ stcb->asoc.cumack_log_atsnt++;
+ if (stcb->asoc.cumack_log_atsnt >= SCTP_TSN_LOG_SIZE) {
+ stcb->asoc.cumack_log_atsnt = 0;
+ }
+#endif
+ /* reset the readers interpretation */
+ stcb->freed_by_sorcv_sincelast = 0;
+
+ if (type == SCTP_SELECTIVE_ACK) {
+ sack = mtod(a_chk->data, struct sctp_sack_chunk *);
+ nr_sack = NULL;
+ gap_descriptor = (struct sctp_gap_ack_block *)((caddr_t)sack + sizeof(struct sctp_sack_chunk));
+ if (highest_tsn > asoc->mapping_array_base_tsn) {
+ siz = (((highest_tsn - asoc->mapping_array_base_tsn) + 1) + 7) / 8;
+ } else {
+ siz = (((MAX_TSN - highest_tsn) + 1) + highest_tsn + 7) / 8;
+ }
+ } else {
+ sack = NULL;
+ nr_sack = mtod(a_chk->data, struct sctp_nr_sack_chunk *);
+ gap_descriptor = (struct sctp_gap_ack_block *)((caddr_t)nr_sack + sizeof(struct sctp_nr_sack_chunk));
+ if (asoc->highest_tsn_inside_map > asoc->mapping_array_base_tsn) {
+ siz = (((asoc->highest_tsn_inside_map - asoc->mapping_array_base_tsn) + 1) + 7) / 8;
+ } else {
+ siz = (((MAX_TSN - asoc->mapping_array_base_tsn) + 1) + asoc->highest_tsn_inside_map + 7) / 8;
+ }
+ }
+
+ if (compare_with_wrap(asoc->mapping_array_base_tsn, asoc->cumulative_tsn, MAX_TSN)) {
+ offset = 1;
+ } else {
+ offset = asoc->mapping_array_base_tsn - asoc->cumulative_tsn;
+ }
+ if (((type == SCTP_SELECTIVE_ACK) &&
+ compare_with_wrap(highest_tsn, asoc->cumulative_tsn, MAX_TSN)) ||
+ ((type == SCTP_NR_SELECTIVE_ACK) &&
+ compare_with_wrap(asoc->highest_tsn_inside_map, asoc->cumulative_tsn, MAX_TSN))) {
+ /* we have a gap .. maybe */
+ for (i = 0; i < siz; i++) {
+ tsn_map = asoc->mapping_array[i];
+ if (type == SCTP_SELECTIVE_ACK) {
+ tsn_map |= asoc->nr_mapping_array[i];
+ }
+ if (i == 0) {
+ /*
+ * Clear all bits corresponding to TSNs
+ * smaller or equal to the cumulative TSN.
+ */
+ tsn_map &= (~0 << (1 - offset));
+ }
+ selector = &sack_array[tsn_map];
+ if (mergeable && selector->right_edge) {
+ /*
+ * Backup, left and right edges were ok to
+ * merge.
+ */
+ num_gap_blocks--;
+ gap_descriptor--;
+ }
+ if (selector->num_entries == 0)
+ mergeable = 0;
+ else {
+ for (j = 0; j < selector->num_entries; j++) {
+ if (mergeable && selector->right_edge) {
+ /*
+ * do a merge by NOT setting
+ * the left side
+ */
+ mergeable = 0;
+ } else {
+ /*
+ * no merge, set the left
+ * side
+ */
+ mergeable = 0;
+ gap_descriptor->start = htons((selector->gaps[j].start + offset));
+ }
+ gap_descriptor->end = htons((selector->gaps[j].end + offset));
+ num_gap_blocks++;
+ gap_descriptor++;
+ if (((caddr_t)gap_descriptor + sizeof(struct sctp_gap_ack_block)) > limit) {
+ /* no more room */
+ limit_reached = 1;
+ break;
+ }
+ }
+ if (selector->left_edge) {
+ mergeable = 1;
+ }
+ }
+ if (limit_reached) {
+ /* Reached the limit stop */
+ break;
+ }
+ offset += 8;
+ }
+ }
+ if ((type == SCTP_NR_SELECTIVE_ACK) &&
+ (limit_reached == 0)) {
+
+ mergeable = 0;
+
+ if (asoc->highest_tsn_inside_nr_map > asoc->mapping_array_base_tsn) {
+ siz = (((asoc->highest_tsn_inside_nr_map - asoc->mapping_array_base_tsn) + 1) + 7) / 8;
+ } else {
+ siz = (((MAX_TSN - asoc->mapping_array_base_tsn) + 1) + asoc->highest_tsn_inside_nr_map + 7) / 8;
+ }
+
+ if (compare_with_wrap(asoc->mapping_array_base_tsn, asoc->cumulative_tsn, MAX_TSN)) {
+ offset = 1;
+ } else {
+ offset = asoc->mapping_array_base_tsn - asoc->cumulative_tsn;
+ }
+ if (compare_with_wrap(asoc->highest_tsn_inside_nr_map, asoc->cumulative_tsn, MAX_TSN)) {
+ /* we have a gap .. maybe */
+ for (i = 0; i < siz; i++) {
+ tsn_map = asoc->nr_mapping_array[i];
+ if (i == 0) {
+ /*
+ * Clear all bits corresponding to
+ * TSNs smaller or equal to the
+ * cumulative TSN.
+ */
+ tsn_map &= (~0 << (1 - offset));
+ }
+ selector = &sack_array[tsn_map];
+ if (mergeable && selector->right_edge) {
+ /*
+ * Backup, left and right edges were
+ * ok to merge.
+ */
+ num_nr_gap_blocks--;
+ gap_descriptor--;
+ }
+ if (selector->num_entries == 0)
+ mergeable = 0;
+ else {
+ for (j = 0; j < selector->num_entries; j++) {
+ if (mergeable && selector->right_edge) {
+ /*
+ * do a merge by NOT
+ * setting the left
+ * side
+ */
+ mergeable = 0;
+ } else {
+ /*
+ * no merge, set the
+ * left side
+ */
+ mergeable = 0;
+ gap_descriptor->start = htons((selector->gaps[j].start + offset));
+ }
+ gap_descriptor->end = htons((selector->gaps[j].end + offset));
+ num_nr_gap_blocks++;
+ gap_descriptor++;
+ if (((caddr_t)gap_descriptor + sizeof(struct sctp_gap_ack_block)) > limit) {
+ /* no more room */
+ limit_reached = 1;
+ break;
+ }
+ }
+ if (selector->left_edge) {
+ mergeable = 1;
+ }
+ }
+ if (limit_reached) {
+ /* Reached the limit stop */
+ break;
+ }
+ offset += 8;
+ }
+ }
+ }
+ /* now we must add any dups we are going to report. */
+ if ((limit_reached == 0) && (asoc->numduptsns)) {
+ dup = (uint32_t *) gap_descriptor;
+ for (i = 0; i < asoc->numduptsns; i++) {
+ *dup = htonl(asoc->dup_tsns[i]);
+ dup++;
+ num_dups++;
+ if (((caddr_t)dup + sizeof(uint32_t)) > limit) {
+ /* no more room */
+ break;
+ }
+ }
+ asoc->numduptsns = 0;
+ }
+ /*
+ * now that the chunk is prepared queue it to the control chunk
+ * queue.
+ */
+ if (type == SCTP_SELECTIVE_ACK) {
+ a_chk->send_size = sizeof(struct sctp_sack_chunk) +
+ (num_gap_blocks + num_nr_gap_blocks) * sizeof(struct sctp_gap_ack_block) +
+ num_dups * sizeof(int32_t);
+ SCTP_BUF_LEN(a_chk->data) = a_chk->send_size;
+ sack->sack.cum_tsn_ack = htonl(asoc->cumulative_tsn);
+ sack->sack.a_rwnd = htonl(asoc->my_rwnd);
+ sack->sack.num_gap_ack_blks = htons(num_gap_blocks);
+ sack->sack.num_dup_tsns = htons(num_dups);
+ sack->ch.chunk_type = type;
+ sack->ch.chunk_flags = flags;
+ sack->ch.chunk_length = htons(a_chk->send_size);
+ } else {
+ a_chk->send_size = sizeof(struct sctp_nr_sack_chunk) +
+ (num_gap_blocks + num_nr_gap_blocks) * sizeof(struct sctp_gap_ack_block) +
+ num_dups * sizeof(int32_t);
+ SCTP_BUF_LEN(a_chk->data) = a_chk->send_size;
+ nr_sack->nr_sack.cum_tsn_ack = htonl(asoc->cumulative_tsn);
+ nr_sack->nr_sack.a_rwnd = htonl(asoc->my_rwnd);
+ nr_sack->nr_sack.num_gap_ack_blks = htons(num_gap_blocks);
+ nr_sack->nr_sack.num_nr_gap_ack_blks = htons(num_nr_gap_blocks);
+ nr_sack->nr_sack.num_dup_tsns = htons(num_dups);
+ nr_sack->nr_sack.reserved = 0;
+ nr_sack->ch.chunk_type = type;
+ nr_sack->ch.chunk_flags = flags;
+ nr_sack->ch.chunk_length = htons(a_chk->send_size);
+ }
+ TAILQ_INSERT_TAIL(&asoc->control_send_queue, a_chk, sctp_next);
+ asoc->my_last_reported_rwnd = asoc->my_rwnd;
+ asoc->ctrl_queue_cnt++;
+ asoc->send_sack = 0;
+ SCTP_STAT_INCR(sctps_sendsacks);
+ return;
+}
+
+void
+sctp_send_abort_tcb(struct sctp_tcb *stcb, struct mbuf *operr, int so_locked
+#if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
+ SCTP_UNUSED
+#endif
+)
+{
+ struct mbuf *m_abort;
+ struct mbuf *m_out = NULL, *m_end = NULL;
+ struct sctp_abort_chunk *abort = NULL;
+ int sz;
+ uint32_t auth_offset = 0;
+ struct sctp_auth_chunk *auth = NULL;
+
+ /*-
+ * Add an AUTH chunk, if chunk requires it and save the offset into
+ * the chain for AUTH
+ */
+ if (sctp_auth_is_required_chunk(SCTP_ABORT_ASSOCIATION,
+ stcb->asoc.peer_auth_chunks)) {
+ m_out = sctp_add_auth_chunk(m_out, &m_end, &auth, &auth_offset,
+ stcb, SCTP_ABORT_ASSOCIATION);
+ SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
+ }
+ SCTP_TCB_LOCK_ASSERT(stcb);
+ m_abort = sctp_get_mbuf_for_msg(sizeof(struct sctp_abort_chunk), 0, M_DONTWAIT, 1, MT_HEADER);
+ if (m_abort == NULL) {
+ /* no mbuf's */
+ if (m_out)
+ sctp_m_freem(m_out);
+ return;
+ }
+ /* link in any error */
+ SCTP_BUF_NEXT(m_abort) = operr;
+ sz = 0;
+ if (operr) {
+ struct mbuf *n;
+
+ n = operr;
+ while (n) {
+ sz += SCTP_BUF_LEN(n);
+ n = SCTP_BUF_NEXT(n);
+ }
+ }
+ SCTP_BUF_LEN(m_abort) = sizeof(*abort);
+ if (m_out == NULL) {
+ /* NO Auth chunk prepended, so reserve space in front */
+ SCTP_BUF_RESV_UF(m_abort, SCTP_MIN_OVERHEAD);
+ m_out = m_abort;
+ } else {
+ /* Put AUTH chunk at the front of the chain */
+ SCTP_BUF_NEXT(m_end) = m_abort;
+ }
+
+ /* fill in the ABORT chunk */
+ abort = mtod(m_abort, struct sctp_abort_chunk *);
+ abort->ch.chunk_type = SCTP_ABORT_ASSOCIATION;
+ abort->ch.chunk_flags = 0;
+ abort->ch.chunk_length = htons(sizeof(*abort) + sz);
+
+ (void)sctp_lowlevel_chunk_output(stcb->sctp_ep, stcb,
+ stcb->asoc.primary_destination,
+ (struct sockaddr *)&stcb->asoc.primary_destination->ro._l_addr,
+ m_out, auth_offset, auth, stcb->asoc.authinfo.active_keyid, 1, 0, NULL, 0,
+ stcb->sctp_ep->sctp_lport, stcb->rport, htonl(stcb->asoc.peer_vtag),
+ stcb->asoc.primary_destination->port, so_locked, NULL);
+ SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
+}
+
+void
+sctp_send_shutdown_complete(struct sctp_tcb *stcb,
+ struct sctp_nets *net,
+ int reflect_vtag)
+{
+ /* formulate and SEND a SHUTDOWN-COMPLETE */
+ struct mbuf *m_shutdown_comp;
+ struct sctp_shutdown_complete_chunk *shutdown_complete;
+ uint32_t vtag;
+ uint8_t flags;
+
+ m_shutdown_comp = sctp_get_mbuf_for_msg(sizeof(struct sctp_chunkhdr), 0, M_DONTWAIT, 1, MT_HEADER);
+ if (m_shutdown_comp == NULL) {
+ /* no mbuf's */
+ return;
+ }
+ if (reflect_vtag) {
+ flags = SCTP_HAD_NO_TCB;
+ vtag = stcb->asoc.my_vtag;
+ } else {
+ flags = 0;
+ vtag = stcb->asoc.peer_vtag;
+ }
+ shutdown_complete = mtod(m_shutdown_comp, struct sctp_shutdown_complete_chunk *);
+ shutdown_complete->ch.chunk_type = SCTP_SHUTDOWN_COMPLETE;
+ shutdown_complete->ch.chunk_flags = flags;
+ shutdown_complete->ch.chunk_length = htons(sizeof(struct sctp_shutdown_complete_chunk));
+ SCTP_BUF_LEN(m_shutdown_comp) = sizeof(struct sctp_shutdown_complete_chunk);
+ (void)sctp_lowlevel_chunk_output(stcb->sctp_ep, stcb, net,
+ (struct sockaddr *)&net->ro._l_addr,
+ m_shutdown_comp, 0, NULL, 0, 1, 0, NULL, 0,
+ stcb->sctp_ep->sctp_lport, stcb->rport,
+ htonl(vtag),
+ net->port, SCTP_SO_NOT_LOCKED, NULL);
+ SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
+ return;
+}
+
+void
+sctp_send_shutdown_complete2(struct mbuf *m, int iphlen, struct sctphdr *sh,
+ uint32_t vrf_id, uint16_t port)
+{
+ /* formulate and SEND a SHUTDOWN-COMPLETE */
+ struct mbuf *o_pak;
+ struct mbuf *mout;
+ struct ip *iph, *iph_out;
+ struct udphdr *udp = NULL;
+
+#ifdef INET6
+ struct ip6_hdr *ip6, *ip6_out;
+
+#endif
+ int offset_out, len, mlen;
+ struct sctp_shutdown_complete_msg *comp_cp;
+
+ iph = mtod(m, struct ip *);
+ switch (iph->ip_v) {
+ case IPVERSION:
+ len = (sizeof(struct ip) + sizeof(struct sctp_shutdown_complete_msg));
+ break;
+#ifdef INET6
+ case IPV6_VERSION >> 4:
+ len = (sizeof(struct ip6_hdr) + sizeof(struct sctp_shutdown_complete_msg));
+ break;
+#endif
+ default:
+ return;
+ }
+ if (port) {
+ len += sizeof(struct udphdr);
+ }
+ mout = sctp_get_mbuf_for_msg(len + max_linkhdr, 1, M_DONTWAIT, 1, MT_DATA);
+ if (mout == NULL) {
+ return;
+ }
+ SCTP_BUF_RESV_UF(mout, max_linkhdr);
+ SCTP_BUF_LEN(mout) = len;
+ SCTP_BUF_NEXT(mout) = NULL;
+ iph_out = NULL;
+#ifdef INET6
+ ip6_out = NULL;
+#endif
+ offset_out = 0;
+
+ switch (iph->ip_v) {
+ case IPVERSION:
+ iph_out = mtod(mout, struct ip *);
+
+ /* Fill in the IP header for the ABORT */
+ iph_out->ip_v = IPVERSION;
+ iph_out->ip_hl = (sizeof(struct ip) / 4);
+ iph_out->ip_tos = (u_char)0;
+ iph_out->ip_id = 0;
+ iph_out->ip_off = 0;
+ iph_out->ip_ttl = MAXTTL;
+ if (port) {
+ iph_out->ip_p = IPPROTO_UDP;
+ } else {
+ iph_out->ip_p = IPPROTO_SCTP;
+ }
+ iph_out->ip_src.s_addr = iph->ip_dst.s_addr;
+ iph_out->ip_dst.s_addr = iph->ip_src.s_addr;
+
+ /* let IP layer calculate this */
+ iph_out->ip_sum = 0;
+ offset_out += sizeof(*iph_out);
+ comp_cp = (struct sctp_shutdown_complete_msg *)(
+ (caddr_t)iph_out + offset_out);
+ break;
+#ifdef INET6
+ case IPV6_VERSION >> 4:
+ ip6 = (struct ip6_hdr *)iph;
+ ip6_out = mtod(mout, struct ip6_hdr *);
+
+ /* Fill in the IPv6 header for the ABORT */
+ ip6_out->ip6_flow = ip6->ip6_flow;
+ ip6_out->ip6_hlim = MODULE_GLOBAL(ip6_defhlim);
+ if (port) {
+ ip6_out->ip6_nxt = IPPROTO_UDP;
+ } else {
+ ip6_out->ip6_nxt = IPPROTO_SCTP;
+ }
+ ip6_out->ip6_src = ip6->ip6_dst;
+ ip6_out->ip6_dst = ip6->ip6_src;
+ /*
+ * ?? The old code had both the iph len + payload, I think
+ * this is wrong and would never have worked
+ */
+ ip6_out->ip6_plen = sizeof(struct sctp_shutdown_complete_msg);
+ offset_out += sizeof(*ip6_out);
+ comp_cp = (struct sctp_shutdown_complete_msg *)(
+ (caddr_t)ip6_out + offset_out);
+ break;
+#endif /* INET6 */
+ default:
+ /* Currently not supported. */
+ sctp_m_freem(mout);
+ return;
+ }
+ if (port) {
+ udp = (struct udphdr *)comp_cp;
+ udp->uh_sport = htons(SCTP_BASE_SYSCTL(sctp_udp_tunneling_port));
+ udp->uh_dport = port;
+ udp->uh_ulen = htons(sizeof(struct sctp_shutdown_complete_msg) + sizeof(struct udphdr));
+ if (iph_out)
+ udp->uh_sum = in_pseudo(iph_out->ip_src.s_addr, iph_out->ip_dst.s_addr, udp->uh_ulen + htons(IPPROTO_UDP));
+ offset_out += sizeof(struct udphdr);
+ comp_cp = (struct sctp_shutdown_complete_msg *)((caddr_t)comp_cp + sizeof(struct udphdr));
+ }
+ if (SCTP_GET_HEADER_FOR_OUTPUT(o_pak)) {
+ /* no mbuf's */
+ sctp_m_freem(mout);
+ return;
+ }
+ /* Now copy in and fill in the ABORT tags etc. */
+ comp_cp->sh.src_port = sh->dest_port;
+ comp_cp->sh.dest_port = sh->src_port;
+ comp_cp->sh.checksum = 0;
+ comp_cp->sh.v_tag = sh->v_tag;
+ comp_cp->shut_cmp.ch.chunk_flags = SCTP_HAD_NO_TCB;
+ comp_cp->shut_cmp.ch.chunk_type = SCTP_SHUTDOWN_COMPLETE;
+ comp_cp->shut_cmp.ch.chunk_length = htons(sizeof(struct sctp_shutdown_complete_chunk));
+
+ if (iph_out != NULL) {
+ sctp_route_t ro;
+ int ret;
+
+ mlen = SCTP_BUF_LEN(mout);
+ bzero(&ro, sizeof ro);
+ /* set IPv4 length */
+ iph_out->ip_len = mlen;
+#ifdef SCTP_PACKET_LOGGING
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LAST_PACKET_TRACING)
+ sctp_packet_log(mout, mlen);
+#endif
+ if (port) {
+#if defined(SCTP_WITH_NO_CSUM)
+ SCTP_STAT_INCR(sctps_sendnocrc);
+#else
+ comp_cp->sh.checksum = sctp_calculate_cksum(mout, offset_out);
+ SCTP_STAT_INCR(sctps_sendswcrc);
+#endif
+ SCTP_ENABLE_UDP_CSUM(mout);
+ } else {
+#if defined(SCTP_WITH_NO_CSUM)
+ SCTP_STAT_INCR(sctps_sendnocrc);
+#else
+ mout->m_pkthdr.csum_flags = CSUM_SCTP;
+ mout->m_pkthdr.csum_data = 0;
+ SCTP_STAT_INCR(sctps_sendhwcrc);
+#endif
+ }
+ SCTP_ATTACH_CHAIN(o_pak, mout, mlen);
+ /* out it goes */
+ SCTP_IP_OUTPUT(ret, o_pak, &ro, NULL, vrf_id);
+
+ /* Free the route if we got one back */
+ if (ro.ro_rt)
+ RTFREE(ro.ro_rt);
+ }
+#ifdef INET6
+ if (ip6_out != NULL) {
+ struct route_in6 ro;
+ int ret;
+ struct ifnet *ifp = NULL;
+
+ bzero(&ro, sizeof(ro));
+ mlen = SCTP_BUF_LEN(mout);
+#ifdef SCTP_PACKET_LOGGING
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LAST_PACKET_TRACING)
+ sctp_packet_log(mout, mlen);
+#endif
+ SCTP_ATTACH_CHAIN(o_pak, mout, mlen);
+ if (port) {
+#if defined(SCTP_WITH_NO_CSUM)
+ SCTP_STAT_INCR(sctps_sendnocrc);
+#else
+ comp_cp->sh.checksum = sctp_calculate_cksum(mout, sizeof(struct ip6_hdr) + sizeof(struct udphdr));
+ SCTP_STAT_INCR(sctps_sendswcrc);
+#endif
+ if ((udp->uh_sum = in6_cksum(o_pak, IPPROTO_UDP, sizeof(struct ip6_hdr), mlen - sizeof(struct ip6_hdr))) == 0) {
+ udp->uh_sum = 0xffff;
+ }
+ } else {
+#if defined(SCTP_WITH_NO_CSUM)
+ SCTP_STAT_INCR(sctps_sendnocrc);
+#else
+ mout->m_pkthdr.csum_flags = CSUM_SCTP;
+ mout->m_pkthdr.csum_data = 0;
+ SCTP_STAT_INCR(sctps_sendhwcrc);
+#endif
+ }
+ SCTP_IP6_OUTPUT(ret, o_pak, &ro, &ifp, NULL, vrf_id);
+
+ /* Free the route if we got one back */
+ if (ro.ro_rt)
+ RTFREE(ro.ro_rt);
+ }
+#endif
+ SCTP_STAT_INCR(sctps_sendpackets);
+ SCTP_STAT_INCR_COUNTER64(sctps_outpackets);
+ SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
+ return;
+
+}
+
+static struct sctp_nets *
+sctp_select_hb_destination(struct sctp_tcb *stcb, struct timeval *now)
+{
+ struct sctp_nets *net, *hnet;
+ int ms_goneby, highest_ms, state_overide = 0;
+
+ (void)SCTP_GETTIME_TIMEVAL(now);
+ highest_ms = 0;
+ hnet = NULL;
+ SCTP_TCB_LOCK_ASSERT(stcb);
+ TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) {
+ if (
+ ((net->dest_state & SCTP_ADDR_NOHB) && ((net->dest_state & SCTP_ADDR_UNCONFIRMED) == 0)) ||
+ (net->dest_state & SCTP_ADDR_OUT_OF_SCOPE)
+ ) {
+ /*
+ * Skip this guy from consideration if HB is off AND
+ * its confirmed
+ */
+ continue;
+ }
+ if (sctp_destination_is_reachable(stcb, (struct sockaddr *)&net->ro._l_addr) == 0) {
+ /* skip this dest net from consideration */
+ continue;
+ }
+ if (net->last_sent_time.tv_sec) {
+ /* Sent to so we subtract */
+ ms_goneby = (now->tv_sec - net->last_sent_time.tv_sec) * 1000;
+ } else
+ /* Never been sent to */
+ ms_goneby = 0x7fffffff;
+ /*-
+ * When the address state is unconfirmed but still
+ * considered reachable, we HB at a higher rate. Once it
+ * goes confirmed OR reaches the "unreachable" state, thenw
+ * we cut it back to HB at a more normal pace.
+ */
+ if ((net->dest_state & (SCTP_ADDR_UNCONFIRMED | SCTP_ADDR_NOT_REACHABLE)) == SCTP_ADDR_UNCONFIRMED) {
+ state_overide = 1;
+ } else {
+ state_overide = 0;
+ }
+
+ if ((((unsigned int)ms_goneby >= net->RTO) || (state_overide)) &&
+ (ms_goneby > highest_ms)) {
+ highest_ms = ms_goneby;
+ hnet = net;
+ }
+ }
+ if (hnet &&
+ ((hnet->dest_state & (SCTP_ADDR_UNCONFIRMED | SCTP_ADDR_NOT_REACHABLE)) == SCTP_ADDR_UNCONFIRMED)) {
+ state_overide = 1;
+ } else {
+ state_overide = 0;
+ }
+
+ if (hnet && highest_ms && (((unsigned int)highest_ms >= hnet->RTO) || state_overide)) {
+ /*-
+ * Found the one with longest delay bounds OR it is
+ * unconfirmed and still not marked unreachable.
+ */
+ SCTPDBG(SCTP_DEBUG_OUTPUT4, "net:%p is the hb winner -", hnet);
+#ifdef SCTP_DEBUG
+ if (hnet) {
+ SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT4,
+ (struct sockaddr *)&hnet->ro._l_addr);
+ } else {
+ SCTPDBG(SCTP_DEBUG_OUTPUT4, " none\n");
+ }
+#endif
+ /* update the timer now */
+ hnet->last_sent_time = *now;
+ return (hnet);
+ }
+ /* Nothing to HB */
+ return (NULL);
+}
+
+int
+sctp_send_hb(struct sctp_tcb *stcb, int user_req, struct sctp_nets *u_net)
+{
+ struct sctp_tmit_chunk *chk;
+ struct sctp_nets *net;
+ struct sctp_heartbeat_chunk *hb;
+ struct timeval now;
+ struct sockaddr_in *sin;
+ struct sockaddr_in6 *sin6;
+
+ SCTP_TCB_LOCK_ASSERT(stcb);
+ if (user_req == 0) {
+ net = sctp_select_hb_destination(stcb, &now);
+ if (net == NULL) {
+ /*-
+ * All our busy none to send to, just start the
+ * timer again.
+ */
+ if (stcb->asoc.state == 0) {
+ return (0);
+ }
+ sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT,
+ stcb->sctp_ep,
+ stcb,
+ net);
+ return (0);
+ }
+ } else {
+ net = u_net;
+ if (net == NULL) {
+ return (0);
+ }
+ (void)SCTP_GETTIME_TIMEVAL(&now);
+ }
+ sin = (struct sockaddr_in *)&net->ro._l_addr;
+ if (sin->sin_family != AF_INET) {
+ if (sin->sin_family != AF_INET6) {
+ /* huh */
+ return (0);
+ }
+ }
+ sctp_alloc_a_chunk(stcb, chk);
+ if (chk == NULL) {
+ SCTPDBG(SCTP_DEBUG_OUTPUT4, "Gak, can't get a chunk for hb\n");
+ return (0);
+ }
+ chk->copy_by_ref = 0;
+ chk->rec.chunk_id.id = SCTP_HEARTBEAT_REQUEST;
+ chk->rec.chunk_id.can_take_data = 1;
+ chk->asoc = &stcb->asoc;
+ chk->send_size = sizeof(struct sctp_heartbeat_chunk);
+
+ chk->data = sctp_get_mbuf_for_msg(chk->send_size, 0, M_DONTWAIT, 1, MT_HEADER);
+ if (chk->data == NULL) {
+ sctp_free_a_chunk(stcb, chk);
+ return (0);
+ }
+ SCTP_BUF_RESV_UF(chk->data, SCTP_MIN_OVERHEAD);
+ SCTP_BUF_LEN(chk->data) = chk->send_size;
+ chk->sent = SCTP_DATAGRAM_UNSENT;
+ chk->snd_count = 0;
+ chk->whoTo = net;
+ atomic_add_int(&chk->whoTo->ref_count, 1);
+ /* Now we have a mbuf that we can fill in with the details */
+ hb = mtod(chk->data, struct sctp_heartbeat_chunk *);
+ memset(hb, 0, sizeof(struct sctp_heartbeat_chunk));
+ /* fill out chunk header */
+ hb->ch.chunk_type = SCTP_HEARTBEAT_REQUEST;
+ hb->ch.chunk_flags = 0;
+ hb->ch.chunk_length = htons(chk->send_size);
+ /* Fill out hb parameter */
+ hb->heartbeat.hb_info.ph.param_type = htons(SCTP_HEARTBEAT_INFO);
+ hb->heartbeat.hb_info.ph.param_length = htons(sizeof(struct sctp_heartbeat_info_param));
+ hb->heartbeat.hb_info.time_value_1 = now.tv_sec;
+ hb->heartbeat.hb_info.time_value_2 = now.tv_usec;
+ /* Did our user request this one, put it in */
+ hb->heartbeat.hb_info.user_req = user_req;
+ hb->heartbeat.hb_info.addr_family = sin->sin_family;
+ hb->heartbeat.hb_info.addr_len = sin->sin_len;
+ if (net->dest_state & SCTP_ADDR_UNCONFIRMED) {
+ /*
+ * we only take from the entropy pool if the address is not
+ * confirmed.
+ */
+ net->heartbeat_random1 = hb->heartbeat.hb_info.random_value1 = sctp_select_initial_TSN(&stcb->sctp_ep->sctp_ep);
+ net->heartbeat_random2 = hb->heartbeat.hb_info.random_value2 = sctp_select_initial_TSN(&stcb->sctp_ep->sctp_ep);
+ } else {
+ net->heartbeat_random1 = hb->heartbeat.hb_info.random_value1 = 0;
+ net->heartbeat_random2 = hb->heartbeat.hb_info.random_value2 = 0;
+ }
+ if (sin->sin_family == AF_INET) {
+ memcpy(hb->heartbeat.hb_info.address, &sin->sin_addr, sizeof(sin->sin_addr));
+ } else if (sin->sin_family == AF_INET6) {
+ /* We leave the scope the way it is in our lookup table. */
+ sin6 = (struct sockaddr_in6 *)&net->ro._l_addr;
+ memcpy(hb->heartbeat.hb_info.address, &sin6->sin6_addr, sizeof(sin6->sin6_addr));
+ } else {
+ /* huh compiler bug */
+ return (0);
+ }
+
+ /*
+ * JRS 5/14/07 - In CMT PF, the T3 timer is used to track
+ * PF-heartbeats. Because of this, threshold management is done by
+ * the t3 timer handler, and does not need to be done upon the send
+ * of a PF-heartbeat. If CMT PF is on and the destination to which a
+ * heartbeat is being sent is in PF state, do NOT do threshold
+ * management.
+ */
+ if ((stcb->asoc.sctp_cmt_pf == 0) ||
+ ((net->dest_state & SCTP_ADDR_PF) != SCTP_ADDR_PF)) {
+ /* ok we have a destination that needs a beat */
+ /* lets do the theshold management Qiaobing style */
+ if (sctp_threshold_management(stcb->sctp_ep, stcb, net,
+ stcb->asoc.max_send_times)) {
+ /*-
+ * we have lost the association, in a way this is
+ * quite bad since we really are one less time since
+ * we really did not send yet. This is the down side
+ * to the Q's style as defined in the RFC and not my
+ * alternate style defined in the RFC.
+ */
+ if (chk->data != NULL) {
+ sctp_m_freem(chk->data);
+ chk->data = NULL;
+ }
+ /*
+ * Here we do NOT use the macro since the
+ * association is now gone.
+ */
+ if (chk->whoTo) {
+ sctp_free_remote_addr(chk->whoTo);
+ chk->whoTo = NULL;
+ }
+ sctp_free_a_chunk((struct sctp_tcb *)NULL, chk);
+ return (-1);
+ }
+ }
+ net->hb_responded = 0;
+ TAILQ_INSERT_TAIL(&stcb->asoc.control_send_queue, chk, sctp_next);
+ stcb->asoc.ctrl_queue_cnt++;
+ SCTP_STAT_INCR(sctps_sendheartbeat);
+ /*-
+ * Call directly med level routine to put out the chunk. It will
+ * always tumble out control chunks aka HB but it may even tumble
+ * out data too.
+ */
+ return (1);
+}
+
+void
+sctp_send_ecn_echo(struct sctp_tcb *stcb, struct sctp_nets *net,
+ uint32_t high_tsn)
+{
+ struct sctp_association *asoc;
+ struct sctp_ecne_chunk *ecne;
+ struct sctp_tmit_chunk *chk;
+
+ asoc = &stcb->asoc;
+ SCTP_TCB_LOCK_ASSERT(stcb);
+ TAILQ_FOREACH(chk, &asoc->control_send_queue, sctp_next) {
+ if (chk->rec.chunk_id.id == SCTP_ECN_ECHO) {
+ /* found a previous ECN_ECHO update it if needed */
+ ecne = mtod(chk->data, struct sctp_ecne_chunk *);
+ ecne->tsn = htonl(high_tsn);
+ return;
+ }
+ }
+ /* nope could not find one to update so we must build one */
+ sctp_alloc_a_chunk(stcb, chk);
+ if (chk == NULL) {
+ return;
+ }
+ chk->copy_by_ref = 0;
+ SCTP_STAT_INCR(sctps_sendecne);
+ chk->rec.chunk_id.id = SCTP_ECN_ECHO;
+ chk->rec.chunk_id.can_take_data = 0;
+ chk->asoc = &stcb->asoc;
+ chk->send_size = sizeof(struct sctp_ecne_chunk);
+ chk->data = sctp_get_mbuf_for_msg(chk->send_size, 0, M_DONTWAIT, 1, MT_HEADER);
+ if (chk->data == NULL) {
+ sctp_free_a_chunk(stcb, chk);
+ return;
+ }
+ SCTP_BUF_RESV_UF(chk->data, SCTP_MIN_OVERHEAD);
+ SCTP_BUF_LEN(chk->data) = chk->send_size;
+ chk->sent = SCTP_DATAGRAM_UNSENT;
+ chk->snd_count = 0;
+ chk->whoTo = net;
+ atomic_add_int(&chk->whoTo->ref_count, 1);
+ stcb->asoc.ecn_echo_cnt_onq++;
+ ecne = mtod(chk->data, struct sctp_ecne_chunk *);
+ ecne->ch.chunk_type = SCTP_ECN_ECHO;
+ ecne->ch.chunk_flags = 0;
+ ecne->ch.chunk_length = htons(sizeof(struct sctp_ecne_chunk));
+ ecne->tsn = htonl(high_tsn);
+ TAILQ_INSERT_TAIL(&stcb->asoc.control_send_queue, chk, sctp_next);
+ asoc->ctrl_queue_cnt++;
+}
+
+void
+sctp_send_packet_dropped(struct sctp_tcb *stcb, struct sctp_nets *net,
+ struct mbuf *m, int iphlen, int bad_crc)
+{
+ struct sctp_association *asoc;
+ struct sctp_pktdrop_chunk *drp;
+ struct sctp_tmit_chunk *chk;
+ uint8_t *datap;
+ int len;
+ int was_trunc = 0;
+ struct ip *iph;
+
+#ifdef INET6
+ struct ip6_hdr *ip6h;
+
+#endif
+ int fullsz = 0, extra = 0;
+ long spc;
+ int offset;
+ struct sctp_chunkhdr *ch, chunk_buf;
+ unsigned int chk_length;
+
+ if (!stcb) {
+ return;
+ }
+ asoc = &stcb->asoc;
+ SCTP_TCB_LOCK_ASSERT(stcb);
+ if (asoc->peer_supports_pktdrop == 0) {
+ /*-
+ * peer must declare support before I send one.
+ */
+ return;
+ }
+ if (stcb->sctp_socket == NULL) {
+ return;
+ }
+ sctp_alloc_a_chunk(stcb, chk);
+ if (chk == NULL) {
+ return;
+ }
+ chk->copy_by_ref = 0;
+ iph = mtod(m, struct ip *);
+ if (iph == NULL) {
+ sctp_free_a_chunk(stcb, chk);
+ return;
+ }
+ switch (iph->ip_v) {
+ case IPVERSION:
+ /* IPv4 */
+ len = chk->send_size = iph->ip_len;
+ break;
+#ifdef INET6
+ case IPV6_VERSION >> 4:
+ /* IPv6 */
+ ip6h = mtod(m, struct ip6_hdr *);
+ len = chk->send_size = htons(ip6h->ip6_plen);
+ break;
+#endif
+ default:
+ return;
+ }
+ /* Validate that we do not have an ABORT in here. */
+ offset = iphlen + sizeof(struct sctphdr);
+ ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
+ sizeof(*ch), (uint8_t *) & chunk_buf);
+ while (ch != NULL) {
+ chk_length = ntohs(ch->chunk_length);
+ if (chk_length < sizeof(*ch)) {
+ /* break to abort land */
+ break;
+ }
+ switch (ch->chunk_type) {
+ case SCTP_PACKET_DROPPED:
+ case SCTP_ABORT_ASSOCIATION:
+ case SCTP_INITIATION_ACK:
+ /**
+ * We don't respond with an PKT-DROP to an ABORT
+ * or PKT-DROP. We also do not respond to an
+ * INIT-ACK, because we can't know if the initiation
+ * tag is correct or not.
+ */
+ sctp_free_a_chunk(stcb, chk);
+ return;
+ default:
+ break;
+ }
+ offset += SCTP_SIZE32(chk_length);
+ ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
+ sizeof(*ch), (uint8_t *) & chunk_buf);
+ }
+
+ if ((len + SCTP_MAX_OVERHEAD + sizeof(struct sctp_pktdrop_chunk)) >
+ min(stcb->asoc.smallest_mtu, MCLBYTES)) {
+ /*
+ * only send 1 mtu worth, trim off the excess on the end.
+ */
+ fullsz = len - extra;
+ len = min(stcb->asoc.smallest_mtu, MCLBYTES) - SCTP_MAX_OVERHEAD;
+ was_trunc = 1;
+ }
+ chk->asoc = &stcb->asoc;
+ chk->data = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_DONTWAIT, 1, MT_DATA);
+ if (chk->data == NULL) {
+jump_out:
+ sctp_free_a_chunk(stcb, chk);
+ return;
+ }
+ SCTP_BUF_RESV_UF(chk->data, SCTP_MIN_OVERHEAD);
+ drp = mtod(chk->data, struct sctp_pktdrop_chunk *);
+ if (drp == NULL) {
+ sctp_m_freem(chk->data);
+ chk->data = NULL;
+ goto jump_out;
+ }
+ chk->book_size = SCTP_SIZE32((chk->send_size + sizeof(struct sctp_pktdrop_chunk) +
+ sizeof(struct sctphdr) + SCTP_MED_OVERHEAD));
+ chk->book_size_scale = 0;
+ if (was_trunc) {
+ drp->ch.chunk_flags = SCTP_PACKET_TRUNCATED;
+ drp->trunc_len = htons(fullsz);
+ /*
+ * Len is already adjusted to size minus overhead above take
+ * out the pkt_drop chunk itself from it.
+ */
+ chk->send_size = len - sizeof(struct sctp_pktdrop_chunk);
+ len = chk->send_size;
+ } else {
+ /* no truncation needed */
+ drp->ch.chunk_flags = 0;
+ drp->trunc_len = htons(0);
+ }
+ if (bad_crc) {
+ drp->ch.chunk_flags |= SCTP_BADCRC;
+ }
+ chk->send_size += sizeof(struct sctp_pktdrop_chunk);
+ SCTP_BUF_LEN(chk->data) = chk->send_size;
+ chk->sent = SCTP_DATAGRAM_UNSENT;
+ chk->snd_count = 0;
+ if (net) {
+ /* we should hit here */
+ chk->whoTo = net;
+ } else {
+ chk->whoTo = asoc->primary_destination;
+ }
+ atomic_add_int(&chk->whoTo->ref_count, 1);
+ chk->rec.chunk_id.id = SCTP_PACKET_DROPPED;
+ chk->rec.chunk_id.can_take_data = 1;
+ drp->ch.chunk_type = SCTP_PACKET_DROPPED;
+ drp->ch.chunk_length = htons(chk->send_size);
+ spc = SCTP_SB_LIMIT_RCV(stcb->sctp_socket);
+ if (spc < 0) {
+ spc = 0;
+ }
+ drp->bottle_bw = htonl(spc);
+ if (asoc->my_rwnd) {
+ drp->current_onq = htonl(asoc->size_on_reasm_queue +
+ asoc->size_on_all_streams +
+ asoc->my_rwnd_control_len +
+ stcb->sctp_socket->so_rcv.sb_cc);
+ } else {
+ /*-
+ * If my rwnd is 0, possibly from mbuf depletion as well as
+ * space used, tell the peer there is NO space aka onq == bw
+ */
+ drp->current_onq = htonl(spc);
+ }
+ drp->reserved = 0;
+ datap = drp->data;
+ m_copydata(m, iphlen, len, (caddr_t)datap);
+ TAILQ_INSERT_TAIL(&stcb->asoc.control_send_queue, chk, sctp_next);
+ asoc->ctrl_queue_cnt++;
+}
+
+void
+sctp_send_cwr(struct sctp_tcb *stcb, struct sctp_nets *net, uint32_t high_tsn)
+{
+ struct sctp_association *asoc;
+ struct sctp_cwr_chunk *cwr;
+ struct sctp_tmit_chunk *chk;
+
+ asoc = &stcb->asoc;
+ SCTP_TCB_LOCK_ASSERT(stcb);
+ TAILQ_FOREACH(chk, &asoc->control_send_queue, sctp_next) {
+ if (chk->rec.chunk_id.id == SCTP_ECN_CWR) {
+ /* found a previous ECN_CWR update it if needed */
+ cwr = mtod(chk->data, struct sctp_cwr_chunk *);
+ if (compare_with_wrap(high_tsn, ntohl(cwr->tsn),
+ MAX_TSN)) {
+ cwr->tsn = htonl(high_tsn);
+ }
+ return;
+ }
+ }
+ /* nope could not find one to update so we must build one */
+ sctp_alloc_a_chunk(stcb, chk);
+ if (chk == NULL) {
+ return;
+ }
+ chk->copy_by_ref = 0;
+ chk->rec.chunk_id.id = SCTP_ECN_CWR;
+ chk->rec.chunk_id.can_take_data = 1;
+ chk->asoc = &stcb->asoc;
+ chk->send_size = sizeof(struct sctp_cwr_chunk);
+ chk->data = sctp_get_mbuf_for_msg(chk->send_size, 0, M_DONTWAIT, 1, MT_HEADER);
+ if (chk->data == NULL) {
+ sctp_free_a_chunk(stcb, chk);
+ return;
+ }
+ SCTP_BUF_RESV_UF(chk->data, SCTP_MIN_OVERHEAD);
+ SCTP_BUF_LEN(chk->data) = chk->send_size;
+ chk->sent = SCTP_DATAGRAM_UNSENT;
+ chk->snd_count = 0;
+ chk->whoTo = net;
+ atomic_add_int(&chk->whoTo->ref_count, 1);
+ cwr = mtod(chk->data, struct sctp_cwr_chunk *);
+ cwr->ch.chunk_type = SCTP_ECN_CWR;
+ cwr->ch.chunk_flags = 0;
+ cwr->ch.chunk_length = htons(sizeof(struct sctp_cwr_chunk));
+ cwr->tsn = htonl(high_tsn);
+ TAILQ_INSERT_TAIL(&stcb->asoc.control_send_queue, chk, sctp_next);
+ asoc->ctrl_queue_cnt++;
+}
+
+void
+sctp_add_stream_reset_out(struct sctp_tmit_chunk *chk,
+ int number_entries, uint16_t * list,
+ uint32_t seq, uint32_t resp_seq, uint32_t last_sent)
+{
+ int len, old_len, i;
+ struct sctp_stream_reset_out_request *req_out;
+ struct sctp_chunkhdr *ch;
+
+ ch = mtod(chk->data, struct sctp_chunkhdr *);
+
+
+ old_len = len = SCTP_SIZE32(ntohs(ch->chunk_length));
+
+ /* get to new offset for the param. */
+ req_out = (struct sctp_stream_reset_out_request *)((caddr_t)ch + len);
+ /* now how long will this param be? */
+ len = (sizeof(struct sctp_stream_reset_out_request) + (sizeof(uint16_t) * number_entries));
+ req_out->ph.param_type = htons(SCTP_STR_RESET_OUT_REQUEST);
+ req_out->ph.param_length = htons(len);
+ req_out->request_seq = htonl(seq);
+ req_out->response_seq = htonl(resp_seq);
+ req_out->send_reset_at_tsn = htonl(last_sent);
+ if (number_entries) {
+ for (i = 0; i < number_entries; i++) {
+ req_out->list_of_streams[i] = htons(list[i]);
+ }
+ }
+ if (SCTP_SIZE32(len) > len) {
+ /*-
+ * Need to worry about the pad we may end up adding to the
+ * end. This is easy since the struct is either aligned to 4
+ * bytes or 2 bytes off.
+ */
+ req_out->list_of_streams[number_entries] = 0;
+ }
+ /* now fix the chunk length */
+ ch->chunk_length = htons(len + old_len);
+ chk->book_size = len + old_len;
+ chk->book_size_scale = 0;
+ chk->send_size = SCTP_SIZE32(chk->book_size);
+ SCTP_BUF_LEN(chk->data) = chk->send_size;
+ return;
+}
+
+
+void
+sctp_add_stream_reset_in(struct sctp_tmit_chunk *chk,
+ int number_entries, uint16_t * list,
+ uint32_t seq)
+{
+ int len, old_len, i;
+ struct sctp_stream_reset_in_request *req_in;
+ struct sctp_chunkhdr *ch;
+
+ ch = mtod(chk->data, struct sctp_chunkhdr *);
+
+
+ old_len = len = SCTP_SIZE32(ntohs(ch->chunk_length));
+
+ /* get to new offset for the param. */
+ req_in = (struct sctp_stream_reset_in_request *)((caddr_t)ch + len);
+ /* now how long will this param be? */
+ len = (sizeof(struct sctp_stream_reset_in_request) + (sizeof(uint16_t) * number_entries));
+ req_in->ph.param_type = htons(SCTP_STR_RESET_IN_REQUEST);
+ req_in->ph.param_length = htons(len);
+ req_in->request_seq = htonl(seq);
+ if (number_entries) {
+ for (i = 0; i < number_entries; i++) {
+ req_in->list_of_streams[i] = htons(list[i]);
+ }
+ }
+ if (SCTP_SIZE32(len) > len) {
+ /*-
+ * Need to worry about the pad we may end up adding to the
+ * end. This is easy since the struct is either aligned to 4
+ * bytes or 2 bytes off.
+ */
+ req_in->list_of_streams[number_entries] = 0;
+ }
+ /* now fix the chunk length */
+ ch->chunk_length = htons(len + old_len);
+ chk->book_size = len + old_len;
+ chk->book_size_scale = 0;
+ chk->send_size = SCTP_SIZE32(chk->book_size);
+ SCTP_BUF_LEN(chk->data) = chk->send_size;
+ return;
+}
+
+
+void
+sctp_add_stream_reset_tsn(struct sctp_tmit_chunk *chk,
+ uint32_t seq)
+{
+ int len, old_len;
+ struct sctp_stream_reset_tsn_request *req_tsn;
+ struct sctp_chunkhdr *ch;
+
+ ch = mtod(chk->data, struct sctp_chunkhdr *);
+
+
+ old_len = len = SCTP_SIZE32(ntohs(ch->chunk_length));
+
+ /* get to new offset for the param. */
+ req_tsn = (struct sctp_stream_reset_tsn_request *)((caddr_t)ch + len);
+ /* now how long will this param be? */
+ len = sizeof(struct sctp_stream_reset_tsn_request);
+ req_tsn->ph.param_type = htons(SCTP_STR_RESET_TSN_REQUEST);
+ req_tsn->ph.param_length = htons(len);
+ req_tsn->request_seq = htonl(seq);
+
+ /* now fix the chunk length */
+ ch->chunk_length = htons(len + old_len);
+ chk->send_size = len + old_len;
+ chk->book_size = SCTP_SIZE32(chk->send_size);
+ chk->book_size_scale = 0;
+ SCTP_BUF_LEN(chk->data) = SCTP_SIZE32(chk->send_size);
+ return;
+}
+
+void
+sctp_add_stream_reset_result(struct sctp_tmit_chunk *chk,
+ uint32_t resp_seq, uint32_t result)
+{
+ int len, old_len;
+ struct sctp_stream_reset_response *resp;
+ struct sctp_chunkhdr *ch;
+
+ ch = mtod(chk->data, struct sctp_chunkhdr *);
+
+
+ old_len = len = SCTP_SIZE32(ntohs(ch->chunk_length));
+
+ /* get to new offset for the param. */
+ resp = (struct sctp_stream_reset_response *)((caddr_t)ch + len);
+ /* now how long will this param be? */
+ len = sizeof(struct sctp_stream_reset_response);
+ resp->ph.param_type = htons(SCTP_STR_RESET_RESPONSE);
+ resp->ph.param_length = htons(len);
+ resp->response_seq = htonl(resp_seq);
+ resp->result = ntohl(result);
+
+ /* now fix the chunk length */
+ ch->chunk_length = htons(len + old_len);
+ chk->book_size = len + old_len;
+ chk->book_size_scale = 0;
+ chk->send_size = SCTP_SIZE32(chk->book_size);
+ SCTP_BUF_LEN(chk->data) = chk->send_size;
+ return;
+
+}
+
+
+void
+sctp_add_stream_reset_result_tsn(struct sctp_tmit_chunk *chk,
+ uint32_t resp_seq, uint32_t result,
+ uint32_t send_una, uint32_t recv_next)
+{
+ int len, old_len;
+ struct sctp_stream_reset_response_tsn *resp;
+ struct sctp_chunkhdr *ch;
+
+ ch = mtod(chk->data, struct sctp_chunkhdr *);
+
+
+ old_len = len = SCTP_SIZE32(ntohs(ch->chunk_length));
+
+ /* get to new offset for the param. */
+ resp = (struct sctp_stream_reset_response_tsn *)((caddr_t)ch + len);
+ /* now how long will this param be? */
+ len = sizeof(struct sctp_stream_reset_response_tsn);
+ resp->ph.param_type = htons(SCTP_STR_RESET_RESPONSE);
+ resp->ph.param_length = htons(len);
+ resp->response_seq = htonl(resp_seq);
+ resp->result = htonl(result);
+ resp->senders_next_tsn = htonl(send_una);
+ resp->receivers_next_tsn = htonl(recv_next);
+
+ /* now fix the chunk length */
+ ch->chunk_length = htons(len + old_len);
+ chk->book_size = len + old_len;
+ chk->send_size = SCTP_SIZE32(chk->book_size);
+ chk->book_size_scale = 0;
+ SCTP_BUF_LEN(chk->data) = chk->send_size;
+ return;
+}
+
+static void
+sctp_add_a_stream(struct sctp_tmit_chunk *chk,
+ uint32_t seq,
+ uint16_t adding)
+{
+ int len, old_len;
+ struct sctp_chunkhdr *ch;
+ struct sctp_stream_reset_add_strm *addstr;
+
+ ch = mtod(chk->data, struct sctp_chunkhdr *);
+ old_len = len = SCTP_SIZE32(ntohs(ch->chunk_length));
+
+ /* get to new offset for the param. */
+ addstr = (struct sctp_stream_reset_add_strm *)((caddr_t)ch + len);
+ /* now how long will this param be? */
+ len = sizeof(struct sctp_stream_reset_add_strm);
+
+ /* Fill it out. */
+ addstr->ph.param_type = htons(SCTP_STR_RESET_ADD_STREAMS);
+ addstr->ph.param_length = htons(len);
+ addstr->request_seq = htonl(seq);
+ addstr->number_of_streams = htons(adding);
+ addstr->reserved = 0;
+
+ /* now fix the chunk length */
+ ch->chunk_length = htons(len + old_len);
+ chk->send_size = len + old_len;
+ chk->book_size = SCTP_SIZE32(chk->send_size);
+ chk->book_size_scale = 0;
+ SCTP_BUF_LEN(chk->data) = SCTP_SIZE32(chk->send_size);
+ return;
+}
+
+int
+sctp_send_str_reset_req(struct sctp_tcb *stcb,
+ int number_entries, uint16_t * list,
+ uint8_t send_out_req,
+ uint32_t resp_seq,
+ uint8_t send_in_req,
+ uint8_t send_tsn_req,
+ uint8_t add_stream,
+ uint16_t adding
+)
+{
+
+ struct sctp_association *asoc;
+ struct sctp_tmit_chunk *chk;
+ struct sctp_chunkhdr *ch;
+ uint32_t seq;
+
+ asoc = &stcb->asoc;
+ if (asoc->stream_reset_outstanding) {
+ /*-
+ * Already one pending, must get ACK back to clear the flag.
+ */
+ SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, EBUSY);
+ return (EBUSY);
+ }
+ if ((send_out_req == 0) && (send_in_req == 0) && (send_tsn_req == 0) &&
+ (add_stream == 0)) {
+ /* nothing to do */
+ SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, EINVAL);
+ return (EINVAL);
+ }
+ if (send_tsn_req && (send_out_req || send_in_req)) {
+ /* error, can't do that */
+ SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, EINVAL);
+ return (EINVAL);
+ }
+ sctp_alloc_a_chunk(stcb, chk);
+ if (chk == NULL) {
+ SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
+ return (ENOMEM);
+ }
+ chk->copy_by_ref = 0;
+ chk->rec.chunk_id.id = SCTP_STREAM_RESET;
+ chk->rec.chunk_id.can_take_data = 0;
+ chk->asoc = &stcb->asoc;
+ chk->book_size = sizeof(struct sctp_chunkhdr);
+ chk->send_size = SCTP_SIZE32(chk->book_size);
+ chk->book_size_scale = 0;
+
+ chk->data = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_DONTWAIT, 1, MT_DATA);
+ if (chk->data == NULL) {
+ sctp_free_a_chunk(stcb, chk);
+ SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
+ return (ENOMEM);
+ }
+ SCTP_BUF_RESV_UF(chk->data, SCTP_MIN_OVERHEAD);
+
+ /* setup chunk parameters */
+ chk->sent = SCTP_DATAGRAM_UNSENT;
+ chk->snd_count = 0;
+ chk->whoTo = asoc->primary_destination;
+ atomic_add_int(&chk->whoTo->ref_count, 1);
+
+ ch = mtod(chk->data, struct sctp_chunkhdr *);
+ ch->chunk_type = SCTP_STREAM_RESET;
+ ch->chunk_flags = 0;
+ ch->chunk_length = htons(chk->book_size);
+ SCTP_BUF_LEN(chk->data) = chk->send_size;
+
+ seq = stcb->asoc.str_reset_seq_out;
+ if (send_out_req) {
+ sctp_add_stream_reset_out(chk, number_entries, list,
+ seq, resp_seq, (stcb->asoc.sending_seq - 1));
+ asoc->stream_reset_out_is_outstanding = 1;
+ seq++;
+ asoc->stream_reset_outstanding++;
+ }
+ if (add_stream) {
+ sctp_add_a_stream(chk, seq, adding);
+ seq++;
+ asoc->stream_reset_outstanding++;
+ }
+ if (send_in_req) {
+ sctp_add_stream_reset_in(chk, number_entries, list, seq);
+ asoc->stream_reset_outstanding++;
+ }
+ if (send_tsn_req) {
+ sctp_add_stream_reset_tsn(chk, seq);
+ asoc->stream_reset_outstanding++;
+ }
+ asoc->str_reset = chk;
+
+ /* insert the chunk for sending */
+ TAILQ_INSERT_TAIL(&asoc->control_send_queue,
+ chk,
+ sctp_next);
+ asoc->ctrl_queue_cnt++;
+ sctp_timer_start(SCTP_TIMER_TYPE_STRRESET, stcb->sctp_ep, stcb, chk->whoTo);
+ return (0);
+}
+
+void
+sctp_send_abort(struct mbuf *m, int iphlen, struct sctphdr *sh, uint32_t vtag,
+ struct mbuf *err_cause, uint32_t vrf_id, uint16_t port)
+{
+ /*-
+ * Formulate the abort message, and send it back down.
+ */
+ struct mbuf *o_pak;
+ struct mbuf *mout;
+ struct sctp_abort_msg *abm;
+ struct ip *iph, *iph_out;
+ struct udphdr *udp;
+
+#ifdef INET6
+ struct ip6_hdr *ip6, *ip6_out;
+
+#endif
+ int iphlen_out, len;
+
+ /* don't respond to ABORT with ABORT */
+ if (sctp_is_there_an_abort_here(m, iphlen, &vtag)) {
+ if (err_cause)
+ sctp_m_freem(err_cause);
+ return;
+ }
+ iph = mtod(m, struct ip *);
+ switch (iph->ip_v) {
+ case IPVERSION:
+ len = (sizeof(struct ip) + sizeof(struct sctp_abort_msg));
+ break;
+#ifdef INET6
+ case IPV6_VERSION >> 4:
+ len = (sizeof(struct ip6_hdr) + sizeof(struct sctp_abort_msg));
+ break;
+#endif
+ default:
+ if (err_cause) {
+ sctp_m_freem(err_cause);
+ }
+ return;
+ }
+ if (port) {
+ len += sizeof(struct udphdr);
+ }
+ mout = sctp_get_mbuf_for_msg(len + max_linkhdr, 1, M_DONTWAIT, 1, MT_DATA);
+ if (mout == NULL) {
+ if (err_cause) {
+ sctp_m_freem(err_cause);
+ }
+ return;
+ }
+ SCTP_BUF_RESV_UF(mout, max_linkhdr);
+ SCTP_BUF_LEN(mout) = len;
+ SCTP_BUF_NEXT(mout) = err_cause;
+ iph_out = NULL;
+#ifdef INET6
+ ip6_out = NULL;
+#endif
+ switch (iph->ip_v) {
+ case IPVERSION:
+ iph_out = mtod(mout, struct ip *);
+
+ /* Fill in the IP header for the ABORT */
+ iph_out->ip_v = IPVERSION;
+ iph_out->ip_hl = (sizeof(struct ip) / 4);
+ iph_out->ip_tos = (u_char)0;
+ iph_out->ip_id = 0;
+ iph_out->ip_off = 0;
+ iph_out->ip_ttl = MAXTTL;
+ if (port) {
+ iph_out->ip_p = IPPROTO_UDP;
+ } else {
+ iph_out->ip_p = IPPROTO_SCTP;
+ }
+ iph_out->ip_src.s_addr = iph->ip_dst.s_addr;
+ iph_out->ip_dst.s_addr = iph->ip_src.s_addr;
+ /* let IP layer calculate this */
+ iph_out->ip_sum = 0;
+
+ iphlen_out = sizeof(*iph_out);
+ abm = (struct sctp_abort_msg *)((caddr_t)iph_out + iphlen_out);
+ break;
+#ifdef INET6
+ case IPV6_VERSION >> 4:
+ ip6 = (struct ip6_hdr *)iph;
+ ip6_out = mtod(mout, struct ip6_hdr *);
+
+ /* Fill in the IP6 header for the ABORT */
+ ip6_out->ip6_flow = ip6->ip6_flow;
+ ip6_out->ip6_hlim = MODULE_GLOBAL(ip6_defhlim);
+ if (port) {
+ ip6_out->ip6_nxt = IPPROTO_UDP;
+ } else {
+ ip6_out->ip6_nxt = IPPROTO_SCTP;
+ }
+ ip6_out->ip6_src = ip6->ip6_dst;
+ ip6_out->ip6_dst = ip6->ip6_src;
+
+ iphlen_out = sizeof(*ip6_out);
+ abm = (struct sctp_abort_msg *)((caddr_t)ip6_out + iphlen_out);
+ break;
+#endif /* INET6 */
+ default:
+ /* Currently not supported */
+ sctp_m_freem(mout);
+ return;
+ }
+
+ udp = (struct udphdr *)abm;
+ if (port) {
+ udp->uh_sport = htons(SCTP_BASE_SYSCTL(sctp_udp_tunneling_port));
+ udp->uh_dport = port;
+ /* set udp->uh_ulen later */
+ udp->uh_sum = 0;
+ iphlen_out += sizeof(struct udphdr);
+ abm = (struct sctp_abort_msg *)((caddr_t)abm + sizeof(struct udphdr));
+ }
+ abm->sh.src_port = sh->dest_port;
+ abm->sh.dest_port = sh->src_port;
+ abm->sh.checksum = 0;
+ if (vtag == 0) {
+ abm->sh.v_tag = sh->v_tag;
+ abm->msg.ch.chunk_flags = SCTP_HAD_NO_TCB;
+ } else {
+ abm->sh.v_tag = htonl(vtag);
+ abm->msg.ch.chunk_flags = 0;
+ }
+ abm->msg.ch.chunk_type = SCTP_ABORT_ASSOCIATION;
+
+ if (err_cause) {
+ struct mbuf *m_tmp = err_cause;
+ int err_len = 0;
+
+ /* get length of the err_cause chain */
+ while (m_tmp != NULL) {
+ err_len += SCTP_BUF_LEN(m_tmp);
+ m_tmp = SCTP_BUF_NEXT(m_tmp);
+ }
+ len = SCTP_BUF_LEN(mout) + err_len;
+ if (err_len % 4) {
+ /* need pad at end of chunk */
+ uint32_t cpthis = 0;
+ int padlen;
+
+ padlen = 4 - (len % 4);
+ m_copyback(mout, len, padlen, (caddr_t)&cpthis);
+ len += padlen;
+ }
+ abm->msg.ch.chunk_length = htons(sizeof(abm->msg.ch) + err_len);
+ } else {
+ len = SCTP_BUF_LEN(mout);
+ abm->msg.ch.chunk_length = htons(sizeof(abm->msg.ch));
+ }
+
+ if (SCTP_GET_HEADER_FOR_OUTPUT(o_pak)) {
+ /* no mbuf's */
+ sctp_m_freem(mout);
+ return;
+ }
+ if (iph_out != NULL) {
+ sctp_route_t ro;
+ int ret;
+
+ /* zap the stack pointer to the route */
+ bzero(&ro, sizeof ro);
+ if (port) {
+ udp->uh_ulen = htons(len - sizeof(struct ip));
+ udp->uh_sum = in_pseudo(iph_out->ip_src.s_addr, iph_out->ip_dst.s_addr, udp->uh_ulen + htons(IPPROTO_UDP));
+ }
+ SCTPDBG(SCTP_DEBUG_OUTPUT2, "sctp_send_abort calling ip_output:\n");
+ SCTPDBG_PKT(SCTP_DEBUG_OUTPUT2, iph_out, &abm->sh);
+ /* set IPv4 length */
+ iph_out->ip_len = len;
+ /* out it goes */
+#ifdef SCTP_PACKET_LOGGING
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LAST_PACKET_TRACING)
+ sctp_packet_log(mout, len);
+#endif
+ SCTP_ATTACH_CHAIN(o_pak, mout, len);
+ if (port) {
+#if defined(SCTP_WITH_NO_CSUM)
+ SCTP_STAT_INCR(sctps_sendnocrc);
+#else
+ abm->sh.checksum = sctp_calculate_cksum(mout, iphlen_out);
+ SCTP_STAT_INCR(sctps_sendswcrc);
+#endif
+ SCTP_ENABLE_UDP_CSUM(o_pak);
+ } else {
+#if defined(SCTP_WITH_NO_CSUM)
+ SCTP_STAT_INCR(sctps_sendnocrc);
+#else
+ mout->m_pkthdr.csum_flags = CSUM_SCTP;
+ mout->m_pkthdr.csum_data = 0;
+ SCTP_STAT_INCR(sctps_sendhwcrc);
+#endif
+ }
+ SCTP_IP_OUTPUT(ret, o_pak, &ro, NULL, vrf_id);
+
+ /* Free the route if we got one back */
+ if (ro.ro_rt)
+ RTFREE(ro.ro_rt);
+ }
+#ifdef INET6
+ if (ip6_out != NULL) {
+ struct route_in6 ro;
+ int ret;
+ struct ifnet *ifp = NULL;
+
+ /* zap the stack pointer to the route */
+ bzero(&ro, sizeof(ro));
+ if (port) {
+ udp->uh_ulen = htons(len - sizeof(struct ip6_hdr));
+ }
+ SCTPDBG(SCTP_DEBUG_OUTPUT2, "sctp_send_abort calling ip6_output:\n");
+ SCTPDBG_PKT(SCTP_DEBUG_OUTPUT2, (struct ip *)ip6_out, &abm->sh);
+ ip6_out->ip6_plen = len - sizeof(*ip6_out);
+#ifdef SCTP_PACKET_LOGGING
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LAST_PACKET_TRACING)
+ sctp_packet_log(mout, len);
+#endif
+ SCTP_ATTACH_CHAIN(o_pak, mout, len);
+ if (port) {
+#if defined(SCTP_WITH_NO_CSUM)
+ SCTP_STAT_INCR(sctps_sendnocrc);
+#else
+ abm->sh.checksum = sctp_calculate_cksum(mout, sizeof(struct ip6_hdr) + sizeof(struct udphdr));
+ SCTP_STAT_INCR(sctps_sendswcrc);
+#endif
+ if ((udp->uh_sum = in6_cksum(o_pak, IPPROTO_UDP, sizeof(struct ip6_hdr), len - sizeof(struct ip6_hdr))) == 0) {
+ udp->uh_sum = 0xffff;
+ }
+ } else {
+#if defined(SCTP_WITH_NO_CSUM)
+ SCTP_STAT_INCR(sctps_sendnocrc);
+#else
+ mout->m_pkthdr.csum_flags = CSUM_SCTP;
+ mout->m_pkthdr.csum_data = 0;
+ SCTP_STAT_INCR(sctps_sendhwcrc);
+#endif
+ }
+ SCTP_IP6_OUTPUT(ret, o_pak, &ro, &ifp, NULL, vrf_id);
+
+ /* Free the route if we got one back */
+ if (ro.ro_rt)
+ RTFREE(ro.ro_rt);
+ }
+#endif
+ SCTP_STAT_INCR(sctps_sendpackets);
+ SCTP_STAT_INCR_COUNTER64(sctps_outpackets);
+ SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
+}
+
+void
+sctp_send_operr_to(struct mbuf *m, int iphlen, struct mbuf *scm, uint32_t vtag,
+ uint32_t vrf_id, uint16_t port)
+{
+ struct mbuf *o_pak;
+ struct sctphdr *sh, *sh_out;
+ struct sctp_chunkhdr *ch;
+ struct ip *iph, *iph_out;
+ struct udphdr *udp = NULL;
+ struct mbuf *mout;
+
+#ifdef INET6
+ struct ip6_hdr *ip6, *ip6_out;
+
+#endif
+ int iphlen_out, len;
+
+ iph = mtod(m, struct ip *);
+ sh = (struct sctphdr *)((caddr_t)iph + iphlen);
+ switch (iph->ip_v) {
+ case IPVERSION:
+ len = (sizeof(struct ip) + sizeof(struct sctphdr) + sizeof(struct sctp_chunkhdr));
+ break;
+#ifdef INET6
+ case IPV6_VERSION >> 4:
+ len = (sizeof(struct ip6_hdr) + sizeof(struct sctphdr) + sizeof(struct sctp_chunkhdr));
+ break;
+#endif
+ default:
+ if (scm) {
+ sctp_m_freem(scm);
+ }
+ return;
+ }
+ if (port) {
+ len += sizeof(struct udphdr);
+ }
+ mout = sctp_get_mbuf_for_msg(len + max_linkhdr, 1, M_DONTWAIT, 1, MT_DATA);
+ if (mout == NULL) {
+ if (scm) {
+ sctp_m_freem(scm);
+ }
+ return;
+ }
+ SCTP_BUF_RESV_UF(mout, max_linkhdr);
+ SCTP_BUF_LEN(mout) = len;
+ SCTP_BUF_NEXT(mout) = scm;
+ iph_out = NULL;
+#ifdef INET6
+ ip6_out = NULL;
+#endif
+ switch (iph->ip_v) {
+ case IPVERSION:
+ iph_out = mtod(mout, struct ip *);
+
+ /* Fill in the IP header for the ABORT */
+ iph_out->ip_v = IPVERSION;
+ iph_out->ip_hl = (sizeof(struct ip) / 4);
+ iph_out->ip_tos = (u_char)0;
+ iph_out->ip_id = 0;
+ iph_out->ip_off = 0;
+ iph_out->ip_ttl = MAXTTL;
+ if (port) {
+ iph_out->ip_p = IPPROTO_UDP;
+ } else {
+ iph_out->ip_p = IPPROTO_SCTP;
+ }
+ iph_out->ip_src.s_addr = iph->ip_dst.s_addr;
+ iph_out->ip_dst.s_addr = iph->ip_src.s_addr;
+ /* let IP layer calculate this */
+ iph_out->ip_sum = 0;
+
+ iphlen_out = sizeof(struct ip);
+ sh_out = (struct sctphdr *)((caddr_t)iph_out + iphlen_out);
+ break;
+#ifdef INET6
+ case IPV6_VERSION >> 4:
+ ip6 = (struct ip6_hdr *)iph;
+ ip6_out = mtod(mout, struct ip6_hdr *);
+
+ /* Fill in the IP6 header for the ABORT */
+ ip6_out->ip6_flow = ip6->ip6_flow;
+ ip6_out->ip6_hlim = MODULE_GLOBAL(ip6_defhlim);
+ if (port) {
+ ip6_out->ip6_nxt = IPPROTO_UDP;
+ } else {
+ ip6_out->ip6_nxt = IPPROTO_SCTP;
+ }
+ ip6_out->ip6_src = ip6->ip6_dst;
+ ip6_out->ip6_dst = ip6->ip6_src;
+
+ iphlen_out = sizeof(struct ip6_hdr);
+ sh_out = (struct sctphdr *)((caddr_t)ip6_out + iphlen_out);
+ break;
+#endif /* INET6 */
+ default:
+ /* Currently not supported */
+ sctp_m_freem(mout);
+ return;
+ }
+
+ udp = (struct udphdr *)sh_out;
+ if (port) {
+ udp->uh_sport = htons(SCTP_BASE_SYSCTL(sctp_udp_tunneling_port));
+ udp->uh_dport = port;
+ /* set udp->uh_ulen later */
+ udp->uh_sum = 0;
+ iphlen_out += sizeof(struct udphdr);
+ sh_out = (struct sctphdr *)((caddr_t)udp + sizeof(struct udphdr));
+ }
+ sh_out->src_port = sh->dest_port;
+ sh_out->dest_port = sh->src_port;
+ sh_out->v_tag = vtag;
+ sh_out->checksum = 0;
+
+ ch = (struct sctp_chunkhdr *)((caddr_t)sh_out + sizeof(struct sctphdr));
+ ch->chunk_type = SCTP_OPERATION_ERROR;
+ ch->chunk_flags = 0;
+
+ if (scm) {
+ struct mbuf *m_tmp = scm;
+ int cause_len = 0;
+
+ /* get length of the err_cause chain */
+ while (m_tmp != NULL) {
+ cause_len += SCTP_BUF_LEN(m_tmp);
+ m_tmp = SCTP_BUF_NEXT(m_tmp);
+ }
+ len = SCTP_BUF_LEN(mout) + cause_len;
+ if (cause_len % 4) {
+ /* need pad at end of chunk */
+ uint32_t cpthis = 0;
+ int padlen;
+
+ padlen = 4 - (len % 4);
+ m_copyback(mout, len, padlen, (caddr_t)&cpthis);
+ len += padlen;
+ }
+ ch->chunk_length = htons(sizeof(struct sctp_chunkhdr) + cause_len);
+ } else {
+ len = SCTP_BUF_LEN(mout);
+ ch->chunk_length = htons(sizeof(struct sctp_chunkhdr));
+ }
+
+ if (SCTP_GET_HEADER_FOR_OUTPUT(o_pak)) {
+ /* no mbuf's */
+ sctp_m_freem(mout);
+ return;
+ }
+ if (iph_out != NULL) {
+ sctp_route_t ro;
+ int ret;
+
+ /* zap the stack pointer to the route */
+ bzero(&ro, sizeof ro);
+ if (port) {
+ udp->uh_ulen = htons(len - sizeof(struct ip));
+ udp->uh_sum = in_pseudo(iph_out->ip_src.s_addr, iph_out->ip_dst.s_addr, udp->uh_ulen + htons(IPPROTO_UDP));
+ }
+ /* set IPv4 length */
+ iph_out->ip_len = len;
+ /* out it goes */
+#ifdef SCTP_PACKET_LOGGING
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LAST_PACKET_TRACING)
+ sctp_packet_log(mout, len);
+#endif
+ SCTP_ATTACH_CHAIN(o_pak, mout, len);
+ if (port) {
+#if defined(SCTP_WITH_NO_CSUM)
+ SCTP_STAT_INCR(sctps_sendnocrc);
+#else
+ sh_out->checksum = sctp_calculate_cksum(mout, iphlen_out);
+ SCTP_STAT_INCR(sctps_sendswcrc);
+#endif
+ SCTP_ENABLE_UDP_CSUM(o_pak);
+ } else {
+#if defined(SCTP_WITH_NO_CSUM)
+ SCTP_STAT_INCR(sctps_sendnocrc);
+#else
+ mout->m_pkthdr.csum_flags = CSUM_SCTP;
+ mout->m_pkthdr.csum_data = 0;
+ SCTP_STAT_INCR(sctps_sendhwcrc);
+#endif
+ }
+ SCTP_IP_OUTPUT(ret, o_pak, &ro, NULL, vrf_id);
+
+ /* Free the route if we got one back */
+ if (ro.ro_rt)
+ RTFREE(ro.ro_rt);
+ }
+#ifdef INET6
+ if (ip6_out != NULL) {
+ struct route_in6 ro;
+ int ret;
+ struct ifnet *ifp = NULL;
+
+ /* zap the stack pointer to the route */
+ bzero(&ro, sizeof(ro));
+ if (port) {
+ udp->uh_ulen = htons(len - sizeof(struct ip6_hdr));
+ }
+ ip6_out->ip6_plen = len - sizeof(*ip6_out);
+#ifdef SCTP_PACKET_LOGGING
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LAST_PACKET_TRACING)
+ sctp_packet_log(mout, len);
+#endif
+ SCTP_ATTACH_CHAIN(o_pak, mout, len);
+ if (port) {
+#if defined(SCTP_WITH_NO_CSUM)
+ SCTP_STAT_INCR(sctps_sendnocrc);
+#else
+ sh_out->checksum = sctp_calculate_cksum(mout, sizeof(struct ip6_hdr) + sizeof(struct udphdr));
+ SCTP_STAT_INCR(sctps_sendswcrc);
+#endif
+ if ((udp->uh_sum = in6_cksum(o_pak, IPPROTO_UDP, sizeof(struct ip6_hdr), len - sizeof(struct ip6_hdr))) == 0) {
+ udp->uh_sum = 0xffff;
+ }
+ } else {
+#if defined(SCTP_WITH_NO_CSUM)
+ SCTP_STAT_INCR(sctps_sendnocrc);
+#else
+ mout->m_pkthdr.csum_flags = CSUM_SCTP;
+ mout->m_pkthdr.csum_data = 0;
+ SCTP_STAT_INCR(sctps_sendhwcrc);
+#endif
+ }
+ SCTP_IP6_OUTPUT(ret, o_pak, &ro, &ifp, NULL, vrf_id);
+
+ /* Free the route if we got one back */
+ if (ro.ro_rt)
+ RTFREE(ro.ro_rt);
+ }
+#endif
+ SCTP_STAT_INCR(sctps_sendpackets);
+ SCTP_STAT_INCR_COUNTER64(sctps_outpackets);
+ SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
+}
+
+static struct mbuf *
+sctp_copy_resume(struct sctp_stream_queue_pending *sp,
+ struct uio *uio,
+ struct sctp_sndrcvinfo *srcv,
+ int max_send_len,
+ int user_marks_eor,
+ int *error,
+ uint32_t * sndout,
+ struct mbuf **new_tail)
+{
+ struct mbuf *m;
+
+ m = m_uiotombuf(uio, M_WAITOK, max_send_len, 0,
+ (M_PKTHDR | (user_marks_eor ? M_EOR : 0)));
+ if (m == NULL) {
+ SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
+ *error = ENOMEM;
+ } else {
+ *sndout = m_length(m, NULL);
+ *new_tail = m_last(m);
+ }
+ return (m);
+}
+
+static int
+sctp_copy_one(struct sctp_stream_queue_pending *sp,
+ struct uio *uio,
+ int resv_upfront)
+{
+ int left;
+
+ left = sp->length;
+ sp->data = m_uiotombuf(uio, M_WAITOK, sp->length,
+ resv_upfront, 0);
+ if (sp->data == NULL) {
+ SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
+ return (ENOMEM);
+ }
+ sp->tail_mbuf = m_last(sp->data);
+ return (0);
+}
+
+
+
+static struct sctp_stream_queue_pending *
+sctp_copy_it_in(struct sctp_tcb *stcb,
+ struct sctp_association *asoc,
+ struct sctp_sndrcvinfo *srcv,
+ struct uio *uio,
+ struct sctp_nets *net,
+ int max_send_len,
+ int user_marks_eor,
+ int *error,
+ int non_blocking)
+{
+ /*-
+ * This routine must be very careful in its work. Protocol
+ * processing is up and running so care must be taken to spl...()
+ * when you need to do something that may effect the stcb/asoc. The
+ * sb is locked however. When data is copied the protocol processing
+ * should be enabled since this is a slower operation...
+ */
+ struct sctp_stream_queue_pending *sp = NULL;
+ int resv_in_first;
+
+ *error = 0;
+ /* Now can we send this? */
+ if ((SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_SENT) ||
+ (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_ACK_SENT) ||
+ (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED) ||
+ (asoc->state & SCTP_STATE_SHUTDOWN_PENDING)) {
+ /* got data while shutting down */
+ SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ECONNRESET);
+ *error = ECONNRESET;
+ goto out_now;
+ }
+ sctp_alloc_a_strmoq(stcb, sp);
+ if (sp == NULL) {
+ SCTP_LTRACE_ERR_RET(NULL, stcb, net, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
+ *error = ENOMEM;
+ goto out_now;
+ }
+ sp->act_flags = 0;
+ sp->sender_all_done = 0;
+ sp->sinfo_flags = srcv->sinfo_flags;
+ sp->timetolive = srcv->sinfo_timetolive;
+ sp->ppid = srcv->sinfo_ppid;
+ sp->context = srcv->sinfo_context;
+ sp->strseq = 0;
+ (void)SCTP_GETTIME_TIMEVAL(&sp->ts);
+
+ sp->stream = srcv->sinfo_stream;
+ sp->length = min(uio->uio_resid, max_send_len);
+ if ((sp->length == (uint32_t) uio->uio_resid) &&
+ ((user_marks_eor == 0) ||
+ (srcv->sinfo_flags & SCTP_EOF) ||
+ (user_marks_eor && (srcv->sinfo_flags & SCTP_EOR)))) {
+ sp->msg_is_complete = 1;
+ } else {
+ sp->msg_is_complete = 0;
+ }
+ sp->sender_all_done = 0;
+ sp->some_taken = 0;
+ sp->put_last_out = 0;
+ resv_in_first = sizeof(struct sctp_data_chunk);
+ sp->data = sp->tail_mbuf = NULL;
+ if (sp->length == 0) {
+ *error = 0;
+ goto skip_copy;
+ }
+ sp->auth_keyid = stcb->asoc.authinfo.active_keyid;
+ if (sctp_auth_is_required_chunk(SCTP_DATA, stcb->asoc.peer_auth_chunks)) {
+ sctp_auth_key_acquire(stcb, stcb->asoc.authinfo.active_keyid);
+ sp->holds_key_ref = 1;
+ }
+ *error = sctp_copy_one(sp, uio, resv_in_first);
+skip_copy:
+ if (*error) {
+ sctp_free_a_strmoq(stcb, sp);
+ sp = NULL;
+ } else {
+ if (sp->sinfo_flags & SCTP_ADDR_OVER) {
+ sp->net = net;
+ atomic_add_int(&sp->net->ref_count, 1);
+ } else {
+ sp->net = NULL;
+ }
+ sctp_set_prsctp_policy(sp);
+ }
+out_now:
+ return (sp);
+}
+
+
+int
+sctp_sosend(struct socket *so,
+ struct sockaddr *addr,
+ struct uio *uio,
+ struct mbuf *top,
+ struct mbuf *control,
+ int flags,
+ struct thread *p
+)
+{
+ int error, use_rcvinfo = 0;
+ struct sctp_sndrcvinfo srcv;
+ struct sockaddr *addr_to_use;
+
+#if defined(INET) && defined(INET6)
+ struct sockaddr_in sin;
+
+#endif
+
+ if (control) {
+ /* process cmsg snd/rcv info (maybe a assoc-id) */
+ if (sctp_find_cmsg(SCTP_SNDRCV, (void *)&srcv, control,
+ sizeof(srcv))) {
+ /* got one */
+ use_rcvinfo = 1;
+ }
+ }
+ addr_to_use = addr;
+#if defined(INET) && defined(INET6)
+ if ((addr) && (addr->sa_family == AF_INET6)) {
+ struct sockaddr_in6 *sin6;
+
+ sin6 = (struct sockaddr_in6 *)addr;
+ if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
+ in6_sin6_2_sin(&sin, sin6);
+ addr_to_use = (struct sockaddr *)&sin;
+ }
+ }
+#endif
+ error = sctp_lower_sosend(so, addr_to_use, uio, top,
+ control,
+ flags,
+ use_rcvinfo ? &srcv : NULL
+ ,p
+ );
+ return (error);
+}
+
+
+int
+sctp_lower_sosend(struct socket *so,
+ struct sockaddr *addr,
+ struct uio *uio,
+ struct mbuf *i_pak,
+ struct mbuf *control,
+ int flags,
+ struct sctp_sndrcvinfo *srcv
+ ,
+ struct thread *p
+)
+{
+ unsigned int sndlen = 0, max_len;
+ int error, len;
+ struct mbuf *top = NULL;
+ int queue_only = 0, queue_only_for_init = 0;
+ int free_cnt_applied = 0;
+ int un_sent;
+ int now_filled = 0;
+ unsigned int inqueue_bytes = 0;
+ struct sctp_block_entry be;
+ struct sctp_inpcb *inp;
+ struct sctp_tcb *stcb = NULL;
+ struct timeval now;
+ struct sctp_nets *net;
+ struct sctp_association *asoc;
+ struct sctp_inpcb *t_inp;
+ int user_marks_eor;
+ int create_lock_applied = 0;
+ int nagle_applies = 0;
+ int some_on_control = 0;
+ int got_all_of_the_send = 0;
+ int hold_tcblock = 0;
+ int non_blocking = 0;
+ uint32_t local_add_more, local_soresv = 0;
+ uint16_t port;
+ uint16_t sinfo_flags;
+ sctp_assoc_t sinfo_assoc_id;
+
+ error = 0;
+ net = NULL;
+ stcb = NULL;
+ asoc = NULL;
+
+ t_inp = inp = (struct sctp_inpcb *)so->so_pcb;
+ if (inp == NULL) {
+ SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, EINVAL);
+ error = EINVAL;
+ if (i_pak) {
+ SCTP_RELEASE_PKT(i_pak);
+ }
+ return (error);
+ }
+ if ((uio == NULL) && (i_pak == NULL)) {
+ SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
+ return (EINVAL);
+ }
+ user_marks_eor = sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXPLICIT_EOR);
+ atomic_add_int(&inp->total_sends, 1);
+ if (uio) {
+ if (uio->uio_resid < 0) {
+ SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
+ return (EINVAL);
+ }
+ sndlen = uio->uio_resid;
+ } else {
+ top = SCTP_HEADER_TO_CHAIN(i_pak);
+ sndlen = SCTP_HEADER_LEN(i_pak);
+ }
+ SCTPDBG(SCTP_DEBUG_OUTPUT1, "Send called addr:%p send length %d\n",
+ addr,
+ sndlen);
+ if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) &&
+ (inp->sctp_socket->so_qlimit)) {
+ /* The listener can NOT send */
+ SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, ENOTCONN);
+ error = ENOTCONN;
+ goto out_unlocked;
+ }
+ /**
+ * Pre-screen address, if one is given the sin-len
+ * must be set correctly!
+ */
+ if (addr) {
+ union sctp_sockstore *raddr = (union sctp_sockstore *)addr;
+
+ switch (raddr->sa.sa_family) {
+#if defined(INET)
+ case AF_INET:
+ if (raddr->sin.sin_len != sizeof(struct sockaddr_in)) {
+ SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
+ error = EINVAL;
+ goto out_unlocked;
+ }
+ port = raddr->sin.sin_port;
+ break;
+#endif
+#if defined(INET6)
+ case AF_INET6:
+ if (raddr->sin6.sin6_len != sizeof(struct sockaddr_in6)) {
+ SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
+ error = EINVAL;
+ goto out_unlocked;
+ }
+ port = raddr->sin6.sin6_port;
+ break;
+#endif
+ default:
+ SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EAFNOSUPPORT);
+ error = EAFNOSUPPORT;
+ goto out_unlocked;
+ }
+ } else
+ port = 0;
+
+ if (srcv) {
+ sinfo_flags = srcv->sinfo_flags;
+ sinfo_assoc_id = srcv->sinfo_assoc_id;
+ if (INVALID_SINFO_FLAG(sinfo_flags) ||
+ PR_SCTP_INVALID_POLICY(sinfo_flags)) {
+ SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
+ error = EINVAL;
+ goto out_unlocked;
+ }
+ if (srcv->sinfo_flags)
+ SCTP_STAT_INCR(sctps_sends_with_flags);
+ } else {
+ sinfo_flags = inp->def_send.sinfo_flags;
+ sinfo_assoc_id = inp->def_send.sinfo_assoc_id;
+ }
+ if (sinfo_flags & SCTP_SENDALL) {
+ /* its a sendall */
+ error = sctp_sendall(inp, uio, top, srcv);
+ top = NULL;
+ goto out_unlocked;
+ }
+ if ((sinfo_flags & SCTP_ADDR_OVER) && (addr == NULL)) {
+ SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
+ error = EINVAL;
+ goto out_unlocked;
+ }
+ /* now we must find the assoc */
+ if ((inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) ||
+ (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
+ SCTP_INP_RLOCK(inp);
+ stcb = LIST_FIRST(&inp->sctp_asoc_list);
+ if (stcb == NULL) {
+ SCTP_INP_RUNLOCK(inp);
+ SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, ENOTCONN);
+ error = ENOTCONN;
+ goto out_unlocked;
+ }
+ SCTP_TCB_LOCK(stcb);
+ hold_tcblock = 1;
+ SCTP_INP_RUNLOCK(inp);
+ } else if (sinfo_assoc_id) {
+ stcb = sctp_findassociation_ep_asocid(inp, sinfo_assoc_id, 0);
+ } else if (addr) {
+ /*-
+ * Since we did not use findep we must
+ * increment it, and if we don't find a tcb
+ * decrement it.
+ */
+ SCTP_INP_WLOCK(inp);
+ SCTP_INP_INCR_REF(inp);
+ SCTP_INP_WUNLOCK(inp);
+ stcb = sctp_findassociation_ep_addr(&t_inp, addr, &net, NULL, NULL);
+ if (stcb == NULL) {
+ SCTP_INP_WLOCK(inp);
+ SCTP_INP_DECR_REF(inp);
+ SCTP_INP_WUNLOCK(inp);
+ } else {
+ hold_tcblock = 1;
+ }
+ }
+ if ((stcb == NULL) && (addr)) {
+ /* Possible implicit send? */
+ SCTP_ASOC_CREATE_LOCK(inp);
+ create_lock_applied = 1;
+ if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
+ (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE)) {
+ /* Should I really unlock ? */
+ SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, EINVAL);
+ error = EINVAL;
+ goto out_unlocked;
+
+ }
+ if (((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) &&
+ (addr->sa_family == AF_INET6)) {
+ SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
+ error = EINVAL;
+ goto out_unlocked;
+ }
+ SCTP_INP_WLOCK(inp);
+ SCTP_INP_INCR_REF(inp);
+ SCTP_INP_WUNLOCK(inp);
+ /* With the lock applied look again */
+ stcb = sctp_findassociation_ep_addr(&t_inp, addr, &net, NULL, NULL);
+ if (stcb == NULL) {
+ SCTP_INP_WLOCK(inp);
+ SCTP_INP_DECR_REF(inp);
+ SCTP_INP_WUNLOCK(inp);
+ } else {
+ hold_tcblock = 1;
+ }
+ if (t_inp != inp) {
+ SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, ENOTCONN);
+ error = ENOTCONN;
+ goto out_unlocked;
+ }
+ }
+ if (stcb == NULL) {
+ if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
+ (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
+ SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, ENOTCONN);
+ error = ENOTCONN;
+ goto out_unlocked;
+ }
+ if (addr == NULL) {
+ SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, ENOENT);
+ error = ENOENT;
+ goto out_unlocked;
+ } else {
+ /*
+ * UDP style, we must go ahead and start the INIT
+ * process
+ */
+ uint32_t vrf_id;
+
+ if ((sinfo_flags & SCTP_ABORT) ||
+ ((sinfo_flags & SCTP_EOF) && (sndlen == 0))) {
+ /*-
+ * User asks to abort a non-existant assoc,
+ * or EOF a non-existant assoc with no data
+ */
+ SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, ENOENT);
+ error = ENOENT;
+ goto out_unlocked;
+ }
+ /* get an asoc/stcb struct */
+ vrf_id = inp->def_vrf_id;
+#ifdef INVARIANTS
+ if (create_lock_applied == 0) {
+ panic("Error, should hold create lock and I don't?");
+ }
+#endif
+ stcb = sctp_aloc_assoc(inp, addr, &error, 0, vrf_id,
+ p
+ );
+ if (stcb == NULL) {
+ /* Error is setup for us in the call */
+ goto out_unlocked;
+ }
+ if (create_lock_applied) {
+ SCTP_ASOC_CREATE_UNLOCK(inp);
+ create_lock_applied = 0;
+ } else {
+ SCTP_PRINTF("Huh-3? create lock should have been on??\n");
+ }
+ /*
+ * Turn on queue only flag to prevent data from
+ * being sent
+ */
+ queue_only = 1;
+ asoc = &stcb->asoc;
+ SCTP_SET_STATE(asoc, SCTP_STATE_COOKIE_WAIT);
+ (void)SCTP_GETTIME_TIMEVAL(&asoc->time_entered);
+
+ /* initialize authentication params for the assoc */
+ sctp_initialize_auth_params(inp, stcb);
+
+ if (control) {
+ /*
+ * see if a init structure exists in cmsg
+ * headers
+ */
+ struct sctp_initmsg initm;
+ int i;
+
+ if (sctp_find_cmsg(SCTP_INIT, (void *)&initm, control,
+ sizeof(initm))) {
+ /*
+ * we have an INIT override of the
+ * default
+ */
+ if (initm.sinit_max_attempts)
+ asoc->max_init_times = initm.sinit_max_attempts;
+ if (initm.sinit_num_ostreams)
+ asoc->pre_open_streams = initm.sinit_num_ostreams;
+ if (initm.sinit_max_instreams)
+ asoc->max_inbound_streams = initm.sinit_max_instreams;
+ if (initm.sinit_max_init_timeo)
+ asoc->initial_init_rto_max = initm.sinit_max_init_timeo;
+ if (asoc->streamoutcnt < asoc->pre_open_streams) {
+ struct sctp_stream_out *tmp_str;
+ int had_lock = 0;
+
+ /* Default is NOT correct */
+ SCTPDBG(SCTP_DEBUG_OUTPUT1, "Ok, defout:%d pre_open:%d\n",
+ asoc->streamoutcnt, asoc->pre_open_streams);
+ /*
+ * What happens if this
+ * fails? we panic ...
+ */
+
+ if (hold_tcblock) {
+ had_lock = 1;
+ SCTP_TCB_UNLOCK(stcb);
+ }
+ SCTP_MALLOC(tmp_str,
+ struct sctp_stream_out *,
+ (asoc->pre_open_streams *
+ sizeof(struct sctp_stream_out)),
+ SCTP_M_STRMO);
+ if (had_lock) {
+ SCTP_TCB_LOCK(stcb);
+ }
+ if (tmp_str != NULL) {
+ SCTP_FREE(asoc->strmout, SCTP_M_STRMO);
+ asoc->strmout = tmp_str;
+ asoc->strm_realoutsize = asoc->streamoutcnt = asoc->pre_open_streams;
+ } else {
+ asoc->pre_open_streams = asoc->streamoutcnt;
+ }
+ for (i = 0; i < asoc->streamoutcnt; i++) {
+ /*-
+ * inbound side must be set
+ * to 0xffff, also NOTE when
+ * we get the INIT-ACK back
+ * (for INIT sender) we MUST
+ * reduce the count
+ * (streamoutcnt) but first
+ * check if we sent to any
+ * of the upper streams that
+ * were dropped (if some
+ * were). Those that were
+ * dropped must be notified
+ * to the upper layer as
+ * failed to send.
+ */
+ asoc->strmout[i].next_sequence_sent = 0x0;
+ TAILQ_INIT(&asoc->strmout[i].outqueue);
+ asoc->strmout[i].stream_no = i;
+ asoc->strmout[i].last_msg_incomplete = 0;
+ asoc->strmout[i].next_spoke.tqe_next = 0;
+ asoc->strmout[i].next_spoke.tqe_prev = 0;
+ }
+ }
+ }
+ }
+ hold_tcblock = 1;
+ /* out with the INIT */
+ queue_only_for_init = 1;
+ /*-
+ * we may want to dig in after this call and adjust the MTU
+ * value. It defaulted to 1500 (constant) but the ro
+ * structure may now have an update and thus we may need to
+ * change it BEFORE we append the message.
+ */
+ }
+ } else
+ asoc = &stcb->asoc;
+ if (srcv == NULL)
+ srcv = (struct sctp_sndrcvinfo *)&asoc->def_send;
+ if (srcv->sinfo_flags & SCTP_ADDR_OVER) {
+ if (addr)
+ net = sctp_findnet(stcb, addr);
+ else
+ net = NULL;
+ if ((net == NULL) ||
+ ((port != 0) && (port != stcb->rport))) {
+ SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
+ error = EINVAL;
+ goto out_unlocked;
+ }
+ } else {
+ net = stcb->asoc.primary_destination;
+ }
+ atomic_add_int(&stcb->total_sends, 1);
+ /* Keep the stcb from being freed under our feet */
+ atomic_add_int(&asoc->refcnt, 1);
+ free_cnt_applied = 1;
+
+ if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_NO_FRAGMENT)) {
+ if (sndlen > asoc->smallest_mtu) {
+ SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EMSGSIZE);
+ error = EMSGSIZE;
+ goto out_unlocked;
+ }
+ }
+ if ((SCTP_SO_IS_NBIO(so)
+ || (flags & MSG_NBIO)
+ )) {
+ non_blocking = 1;
+ }
+ /* would we block? */
+ if (non_blocking) {
+ if (hold_tcblock == 0) {
+ SCTP_TCB_LOCK(stcb);
+ hold_tcblock = 1;
+ }
+ inqueue_bytes = stcb->asoc.total_output_queue_size - (stcb->asoc.chunks_on_out_queue * sizeof(struct sctp_data_chunk));
+ if ((SCTP_SB_LIMIT_SND(so) < (sndlen + inqueue_bytes + stcb->asoc.sb_send_resv)) ||
+ (stcb->asoc.chunks_on_out_queue >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue))) {
+ SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EWOULDBLOCK);
+ if (sndlen > SCTP_SB_LIMIT_SND(so))
+ error = EMSGSIZE;
+ else
+ error = EWOULDBLOCK;
+ goto out_unlocked;
+ }
+ stcb->asoc.sb_send_resv += sndlen;
+ SCTP_TCB_UNLOCK(stcb);
+ hold_tcblock = 0;
+ } else {
+ atomic_add_int(&stcb->asoc.sb_send_resv, sndlen);
+ }
+ local_soresv = sndlen;
+ if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
+ SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ECONNRESET);
+ error = ECONNRESET;
+ goto out_unlocked;
+ }
+ if (create_lock_applied) {
+ SCTP_ASOC_CREATE_UNLOCK(inp);
+ create_lock_applied = 0;
+ }
+ if (asoc->stream_reset_outstanding) {
+ /*
+ * Can't queue any data while stream reset is underway.
+ */
+ SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EAGAIN);
+ error = EAGAIN;
+ goto out_unlocked;
+ }
+ if ((SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_WAIT) ||
+ (SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_ECHOED)) {
+ queue_only = 1;
+ }
+ /* we are now done with all control */
+ if (control) {
+ sctp_m_freem(control);
+ control = NULL;
+ }
+ if ((SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_SENT) ||
+ (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED) ||
+ (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_ACK_SENT) ||
+ (asoc->state & SCTP_STATE_SHUTDOWN_PENDING)) {
+ if (srcv->sinfo_flags & SCTP_ABORT) {
+ ;
+ } else {
+ SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ECONNRESET);
+ error = ECONNRESET;
+ goto out_unlocked;
+ }
+ }
+ /* Ok, we will attempt a msgsnd :> */
+ if (p) {
+ p->td_ru.ru_msgsnd++;
+ }
+ /* Are we aborting? */
+ if (srcv->sinfo_flags & SCTP_ABORT) {
+ struct mbuf *mm;
+ int tot_demand, tot_out = 0, max_out;
+
+ SCTP_STAT_INCR(sctps_sends_with_abort);
+ if ((SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_WAIT) ||
+ (SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_ECHOED)) {
+ /* It has to be up before we abort */
+ /* how big is the user initiated abort? */
+ SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
+ error = EINVAL;
+ goto out;
+ }
+ if (hold_tcblock) {
+ SCTP_TCB_UNLOCK(stcb);
+ hold_tcblock = 0;
+ }
+ if (top) {
+ struct mbuf *cntm = NULL;
+
+ mm = sctp_get_mbuf_for_msg(1, 0, M_WAIT, 1, MT_DATA);
+ if (sndlen != 0) {
+ cntm = top;
+ while (cntm) {
+ tot_out += SCTP_BUF_LEN(cntm);
+ cntm = SCTP_BUF_NEXT(cntm);
+ }
+ }
+ tot_demand = (tot_out + sizeof(struct sctp_paramhdr));
+ } else {
+ /* Must fit in a MTU */
+ tot_out = sndlen;
+ tot_demand = (tot_out + sizeof(struct sctp_paramhdr));
+ if (tot_demand > SCTP_DEFAULT_ADD_MORE) {
+ /* To big */
+ SCTP_LTRACE_ERR_RET(NULL, stcb, net, SCTP_FROM_SCTP_OUTPUT, EMSGSIZE);
+ error = EMSGSIZE;
+ goto out;
+ }
+ mm = sctp_get_mbuf_for_msg(tot_demand, 0, M_WAIT, 1, MT_DATA);
+ }
+ if (mm == NULL) {
+ SCTP_LTRACE_ERR_RET(NULL, stcb, net, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
+ error = ENOMEM;
+ goto out;
+ }
+ max_out = asoc->smallest_mtu - sizeof(struct sctp_paramhdr);
+ max_out -= sizeof(struct sctp_abort_msg);
+ if (tot_out > max_out) {
+ tot_out = max_out;
+ }
+ if (mm) {
+ struct sctp_paramhdr *ph;
+
+ /* now move forward the data pointer */
+ ph = mtod(mm, struct sctp_paramhdr *);
+ ph->param_type = htons(SCTP_CAUSE_USER_INITIATED_ABT);
+ ph->param_length = htons((sizeof(struct sctp_paramhdr) + tot_out));
+ ph++;
+ SCTP_BUF_LEN(mm) = tot_out + sizeof(struct sctp_paramhdr);
+ if (top == NULL) {
+ error = uiomove((caddr_t)ph, (int)tot_out, uio);
+ if (error) {
+ /*-
+ * Here if we can't get his data we
+ * still abort we just don't get to
+ * send the users note :-0
+ */
+ sctp_m_freem(mm);
+ mm = NULL;
+ }
+ } else {
+ if (sndlen != 0) {
+ SCTP_BUF_NEXT(mm) = top;
+ }
+ }
+ }
+ if (hold_tcblock == 0) {
+ SCTP_TCB_LOCK(stcb);
+ hold_tcblock = 1;
+ }
+ atomic_add_int(&stcb->asoc.refcnt, -1);
+ free_cnt_applied = 0;
+ /* release this lock, otherwise we hang on ourselves */
+ sctp_abort_an_association(stcb->sctp_ep, stcb,
+ SCTP_RESPONSE_TO_USER_REQ,
+ mm, SCTP_SO_LOCKED);
+ /* now relock the stcb so everything is sane */
+ hold_tcblock = 0;
+ stcb = NULL;
+ /*
+ * In this case top is already chained to mm avoid double
+ * free, since we free it below if top != NULL and driver
+ * would free it after sending the packet out
+ */
+ if (sndlen != 0) {
+ top = NULL;
+ }
+ goto out_unlocked;
+ }
+ /* Calculate the maximum we can send */
+ inqueue_bytes = stcb->asoc.total_output_queue_size - (stcb->asoc.chunks_on_out_queue * sizeof(struct sctp_data_chunk));
+ if (SCTP_SB_LIMIT_SND(so) > inqueue_bytes) {
+ if (non_blocking) {
+ /* we already checked for non-blocking above. */
+ max_len = sndlen;
+ } else {
+ max_len = SCTP_SB_LIMIT_SND(so) - inqueue_bytes;
+ }
+ } else {
+ max_len = 0;
+ }
+ if (hold_tcblock) {
+ SCTP_TCB_UNLOCK(stcb);
+ hold_tcblock = 0;
+ }
+ /* Is the stream no. valid? */
+ if (srcv->sinfo_stream >= asoc->streamoutcnt) {
+ /* Invalid stream number */
+ SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
+ error = EINVAL;
+ goto out_unlocked;
+ }
+ if (asoc->strmout == NULL) {
+ /* huh? software error */
+ SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EFAULT);
+ error = EFAULT;
+ goto out_unlocked;
+ }
+ /* Unless E_EOR mode is on, we must make a send FIT in one call. */
+ if ((user_marks_eor == 0) &&
+ (sndlen > SCTP_SB_LIMIT_SND(stcb->sctp_socket))) {
+ /* It will NEVER fit */
+ SCTP_LTRACE_ERR_RET(NULL, stcb, net, SCTP_FROM_SCTP_OUTPUT, EMSGSIZE);
+ error = EMSGSIZE;
+ goto out_unlocked;
+ }
+ if ((uio == NULL) && user_marks_eor) {
+ /*-
+ * We do not support eeor mode for
+ * sending with mbuf chains (like sendfile).
+ */
+ SCTP_LTRACE_ERR_RET(NULL, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
+ error = EINVAL;
+ goto out_unlocked;
+ }
+ if (user_marks_eor) {
+ local_add_more = min(SCTP_SB_LIMIT_SND(so), SCTP_BASE_SYSCTL(sctp_add_more_threshold));
+ } else {
+ /*-
+ * For non-eeor the whole message must fit in
+ * the socket send buffer.
+ */
+ local_add_more = sndlen;
+ }
+ len = 0;
+ if (non_blocking) {
+ goto skip_preblock;
+ }
+ if (((max_len <= local_add_more) &&
+ (SCTP_SB_LIMIT_SND(so) >= local_add_more)) ||
+ (max_len == 0) ||
+ ((stcb->asoc.chunks_on_out_queue + stcb->asoc.stream_queue_cnt) >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue))) {
+ /* No room right now ! */
+ SOCKBUF_LOCK(&so->so_snd);
+ inqueue_bytes = stcb->asoc.total_output_queue_size - (stcb->asoc.chunks_on_out_queue * sizeof(struct sctp_data_chunk));
+ while ((SCTP_SB_LIMIT_SND(so) < (inqueue_bytes + local_add_more)) ||
+ ((stcb->asoc.stream_queue_cnt + stcb->asoc.chunks_on_out_queue) >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue))) {
+ SCTPDBG(SCTP_DEBUG_OUTPUT1, "pre_block limit:%u <(inq:%d + %d) || (%d+%d > %d)\n",
+ (unsigned int)SCTP_SB_LIMIT_SND(so),
+ inqueue_bytes,
+ local_add_more,
+ stcb->asoc.stream_queue_cnt,
+ stcb->asoc.chunks_on_out_queue,
+ SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue));
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_BLK_LOGGING_ENABLE) {
+ sctp_log_block(SCTP_BLOCK_LOG_INTO_BLKA, so, asoc, sndlen);
+ }
+ be.error = 0;
+ stcb->block_entry = &be;
+ error = sbwait(&so->so_snd);
+ stcb->block_entry = NULL;
+ if (error || so->so_error || be.error) {
+ if (error == 0) {
+ if (so->so_error)
+ error = so->so_error;
+ if (be.error) {
+ error = be.error;
+ }
+ }
+ SOCKBUF_UNLOCK(&so->so_snd);
+ goto out_unlocked;
+ }
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_BLK_LOGGING_ENABLE) {
+ sctp_log_block(SCTP_BLOCK_LOG_OUTOF_BLK,
+ so, asoc, stcb->asoc.total_output_queue_size);
+ }
+ if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
+ goto out_unlocked;
+ }
+ inqueue_bytes = stcb->asoc.total_output_queue_size - (stcb->asoc.chunks_on_out_queue * sizeof(struct sctp_data_chunk));
+ }
+ if (SCTP_SB_LIMIT_SND(so) > inqueue_bytes) {
+ max_len = SCTP_SB_LIMIT_SND(so) - inqueue_bytes;
+ } else {
+ max_len = 0;
+ }
+ SOCKBUF_UNLOCK(&so->so_snd);
+ }
+skip_preblock:
+ if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
+ goto out_unlocked;
+ }
+ /*
+ * sndlen covers for mbuf case uio_resid covers for the non-mbuf
+ * case NOTE: uio will be null when top/mbuf is passed
+ */
+ if (sndlen == 0) {
+ if (srcv->sinfo_flags & SCTP_EOF) {
+ got_all_of_the_send = 1;
+ goto dataless_eof;
+ } else {
+ SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
+ error = EINVAL;
+ goto out;
+ }
+ }
+ if (top == NULL) {
+ struct sctp_stream_queue_pending *sp;
+ struct sctp_stream_out *strm;
+ uint32_t sndout;
+
+ SCTP_TCB_SEND_LOCK(stcb);
+ if ((asoc->stream_locked) &&
+ (asoc->stream_locked_on != srcv->sinfo_stream)) {
+ SCTP_TCB_SEND_UNLOCK(stcb);
+ SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
+ error = EINVAL;
+ goto out;
+ }
+ SCTP_TCB_SEND_UNLOCK(stcb);
+
+ strm = &stcb->asoc.strmout[srcv->sinfo_stream];
+ if (strm->last_msg_incomplete == 0) {
+ do_a_copy_in:
+ sp = sctp_copy_it_in(stcb, asoc, srcv, uio, net, max_len, user_marks_eor, &error, non_blocking);
+ if ((sp == NULL) || (error)) {
+ goto out;
+ }
+ SCTP_TCB_SEND_LOCK(stcb);
+ if (sp->msg_is_complete) {
+ strm->last_msg_incomplete = 0;
+ asoc->stream_locked = 0;
+ } else {
+ /*
+ * Just got locked to this guy in case of an
+ * interrupt.
+ */
+ strm->last_msg_incomplete = 1;
+ asoc->stream_locked = 1;
+ asoc->stream_locked_on = srcv->sinfo_stream;
+ sp->sender_all_done = 0;
+ }
+ sctp_snd_sb_alloc(stcb, sp->length);
+ atomic_add_int(&asoc->stream_queue_cnt, 1);
+ if ((srcv->sinfo_flags & SCTP_UNORDERED) == 0) {
+ sp->strseq = strm->next_sequence_sent;
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_AT_SEND_2_SCTP) {
+ sctp_misc_ints(SCTP_STRMOUT_LOG_ASSIGN,
+ (uintptr_t) stcb, sp->length,
+ (uint32_t) ((srcv->sinfo_stream << 16) | sp->strseq), 0);
+ }
+ strm->next_sequence_sent++;
+ } else {
+ SCTP_STAT_INCR(sctps_sends_with_unord);
+ }
+ TAILQ_INSERT_TAIL(&strm->outqueue, sp, next);
+ if ((strm->next_spoke.tqe_next == NULL) &&
+ (strm->next_spoke.tqe_prev == NULL)) {
+ /* Not on wheel, insert */
+ sctp_insert_on_wheel(stcb, asoc, strm, 1);
+ }
+ SCTP_TCB_SEND_UNLOCK(stcb);
+ } else {
+ SCTP_TCB_SEND_LOCK(stcb);
+ sp = TAILQ_LAST(&strm->outqueue, sctp_streamhead);
+ SCTP_TCB_SEND_UNLOCK(stcb);
+ if (sp == NULL) {
+ /* ???? Huh ??? last msg is gone */
+#ifdef INVARIANTS
+ panic("Warning: Last msg marked incomplete, yet nothing left?");
+#else
+ SCTP_PRINTF("Warning: Last msg marked incomplete, yet nothing left?\n");
+ strm->last_msg_incomplete = 0;
+#endif
+ goto do_a_copy_in;
+
+ }
+ }
+ while (uio->uio_resid > 0) {
+ /* How much room do we have? */
+ struct mbuf *new_tail, *mm;
+
+ if (SCTP_SB_LIMIT_SND(so) > stcb->asoc.total_output_queue_size)
+ max_len = SCTP_SB_LIMIT_SND(so) - stcb->asoc.total_output_queue_size;
+ else
+ max_len = 0;
+
+ if ((max_len > SCTP_BASE_SYSCTL(sctp_add_more_threshold)) ||
+ (max_len && (SCTP_SB_LIMIT_SND(so) < SCTP_BASE_SYSCTL(sctp_add_more_threshold))) ||
+ (uio->uio_resid && (uio->uio_resid <= (int)max_len))) {
+ sndout = 0;
+ new_tail = NULL;
+ if (hold_tcblock) {
+ SCTP_TCB_UNLOCK(stcb);
+ hold_tcblock = 0;
+ }
+ mm = sctp_copy_resume(sp, uio, srcv, max_len, user_marks_eor, &error, &sndout, &new_tail);
+ if ((mm == NULL) || error) {
+ if (mm) {
+ sctp_m_freem(mm);
+ }
+ goto out;
+ }
+ /* Update the mbuf and count */
+ SCTP_TCB_SEND_LOCK(stcb);
+ if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
+ /*
+ * we need to get out. Peer probably
+ * aborted.
+ */
+ sctp_m_freem(mm);
+ if (stcb->asoc.state & SCTP_PCB_FLAGS_WAS_ABORTED) {
+ SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ECONNRESET);
+ error = ECONNRESET;
+ }
+ SCTP_TCB_SEND_UNLOCK(stcb);
+ goto out;
+ }
+ if (sp->tail_mbuf) {
+ /* tack it to the end */
+ SCTP_BUF_NEXT(sp->tail_mbuf) = mm;
+ sp->tail_mbuf = new_tail;
+ } else {
+ /* A stolen mbuf */
+ sp->data = mm;
+ sp->tail_mbuf = new_tail;
+ }
+ sctp_snd_sb_alloc(stcb, sndout);
+ atomic_add_int(&sp->length, sndout);
+ len += sndout;
+
+ /* Did we reach EOR? */
+ if ((uio->uio_resid == 0) &&
+ ((user_marks_eor == 0) ||
+ (srcv->sinfo_flags & SCTP_EOF) ||
+ (user_marks_eor && (srcv->sinfo_flags & SCTP_EOR)))) {
+ sp->msg_is_complete = 1;
+ } else {
+ sp->msg_is_complete = 0;
+ }
+ SCTP_TCB_SEND_UNLOCK(stcb);
+ }
+ if (uio->uio_resid == 0) {
+ /* got it all? */
+ continue;
+ }
+ /* PR-SCTP? */
+ if ((asoc->peer_supports_prsctp) && (asoc->sent_queue_cnt_removeable > 0)) {
+ /*
+ * This is ugly but we must assure locking
+ * order
+ */
+ if (hold_tcblock == 0) {
+ SCTP_TCB_LOCK(stcb);
+ hold_tcblock = 1;
+ }
+ sctp_prune_prsctp(stcb, asoc, srcv, sndlen);
+ inqueue_bytes = stcb->asoc.total_output_queue_size - (stcb->asoc.chunks_on_out_queue * sizeof(struct sctp_data_chunk));
+ if (SCTP_SB_LIMIT_SND(so) > stcb->asoc.total_output_queue_size)
+ max_len = SCTP_SB_LIMIT_SND(so) - inqueue_bytes;
+ else
+ max_len = 0;
+ if (max_len > 0) {
+ continue;
+ }
+ SCTP_TCB_UNLOCK(stcb);
+ hold_tcblock = 0;
+ }
+ /* wait for space now */
+ if (non_blocking) {
+ /* Non-blocking io in place out */
+ goto skip_out_eof;
+ }
+ /* What about the INIT, send it maybe */
+ if (queue_only_for_init) {
+ if (hold_tcblock == 0) {
+ SCTP_TCB_LOCK(stcb);
+ hold_tcblock = 1;
+ }
+ if (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_OPEN) {
+ /* a collision took us forward? */
+ queue_only = 0;
+ } else {
+ sctp_send_initiate(inp, stcb, SCTP_SO_LOCKED);
+ SCTP_SET_STATE(asoc, SCTP_STATE_COOKIE_WAIT);
+ queue_only = 1;
+ }
+ }
+ if ((net->flight_size > net->cwnd) &&
+ (asoc->sctp_cmt_on_off == 0)) {
+ SCTP_STAT_INCR(sctps_send_cwnd_avoid);
+ queue_only = 1;
+ } else if (asoc->ifp_had_enobuf) {
+ SCTP_STAT_INCR(sctps_ifnomemqueued);
+ if (net->flight_size > (2 * net->mtu)) {
+ queue_only = 1;
+ }
+ asoc->ifp_had_enobuf = 0;
+ }
+ un_sent = ((stcb->asoc.total_output_queue_size - stcb->asoc.total_flight) +
+ (stcb->asoc.stream_queue_cnt * sizeof(struct sctp_data_chunk)));
+ if ((sctp_is_feature_off(inp, SCTP_PCB_FLAGS_NODELAY)) &&
+ (stcb->asoc.total_flight > 0) &&
+ (stcb->asoc.stream_queue_cnt < SCTP_MAX_DATA_BUNDLING) &&
+ (un_sent < (int)(stcb->asoc.smallest_mtu - SCTP_MIN_OVERHEAD))) {
+
+ /*-
+ * Ok, Nagle is set on and we have data outstanding.
+ * Don't send anything and let SACKs drive out the
+ * data unless wen have a "full" segment to send.
+ */
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_NAGLE_LOGGING_ENABLE) {
+ sctp_log_nagle_event(stcb, SCTP_NAGLE_APPLIED);
+ }
+ SCTP_STAT_INCR(sctps_naglequeued);
+ nagle_applies = 1;
+ } else {
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_NAGLE_LOGGING_ENABLE) {
+ if (sctp_is_feature_off(inp, SCTP_PCB_FLAGS_NODELAY))
+ sctp_log_nagle_event(stcb, SCTP_NAGLE_SKIPPED);
+ }
+ SCTP_STAT_INCR(sctps_naglesent);
+ nagle_applies = 0;
+ }
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_BLK_LOGGING_ENABLE) {
+
+ sctp_misc_ints(SCTP_CWNDLOG_PRESEND, queue_only_for_init, queue_only,
+ nagle_applies, un_sent);
+ sctp_misc_ints(SCTP_CWNDLOG_PRESEND, stcb->asoc.total_output_queue_size,
+ stcb->asoc.total_flight,
+ stcb->asoc.chunks_on_out_queue, stcb->asoc.total_flight_count);
+ }
+ if (queue_only_for_init)
+ queue_only_for_init = 0;
+ if ((queue_only == 0) && (nagle_applies == 0)) {
+ /*-
+ * need to start chunk output
+ * before blocking.. note that if
+ * a lock is already applied, then
+ * the input via the net is happening
+ * and I don't need to start output :-D
+ */
+ if (hold_tcblock == 0) {
+ if (SCTP_TCB_TRYLOCK(stcb)) {
+ hold_tcblock = 1;
+ sctp_chunk_output(inp,
+ stcb,
+ SCTP_OUTPUT_FROM_USR_SEND, SCTP_SO_LOCKED);
+ }
+ } else {
+ sctp_chunk_output(inp,
+ stcb,
+ SCTP_OUTPUT_FROM_USR_SEND, SCTP_SO_LOCKED);
+ }
+ if (hold_tcblock == 1) {
+ SCTP_TCB_UNLOCK(stcb);
+ hold_tcblock = 0;
+ }
+ }
+ SOCKBUF_LOCK(&so->so_snd);
+ /*-
+ * This is a bit strange, but I think it will
+ * work. The total_output_queue_size is locked and
+ * protected by the TCB_LOCK, which we just released.
+ * There is a race that can occur between releasing it
+ * above, and me getting the socket lock, where sacks
+ * come in but we have not put the SB_WAIT on the
+ * so_snd buffer to get the wakeup. After the LOCK
+ * is applied the sack_processing will also need to
+ * LOCK the so->so_snd to do the actual sowwakeup(). So
+ * once we have the socket buffer lock if we recheck the
+ * size we KNOW we will get to sleep safely with the
+ * wakeup flag in place.
+ */
+ if (SCTP_SB_LIMIT_SND(so) <= (stcb->asoc.total_output_queue_size +
+ min(SCTP_BASE_SYSCTL(sctp_add_more_threshold), SCTP_SB_LIMIT_SND(so)))) {
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_BLK_LOGGING_ENABLE) {
+ sctp_log_block(SCTP_BLOCK_LOG_INTO_BLK,
+ so, asoc, uio->uio_resid);
+ }
+ be.error = 0;
+ stcb->block_entry = &be;
+ error = sbwait(&so->so_snd);
+ stcb->block_entry = NULL;
+
+ if (error || so->so_error || be.error) {
+ if (error == 0) {
+ if (so->so_error)
+ error = so->so_error;
+ if (be.error) {
+ error = be.error;
+ }
+ }
+ SOCKBUF_UNLOCK(&so->so_snd);
+ goto out_unlocked;
+ }
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_BLK_LOGGING_ENABLE) {
+ sctp_log_block(SCTP_BLOCK_LOG_OUTOF_BLK,
+ so, asoc, stcb->asoc.total_output_queue_size);
+ }
+ }
+ SOCKBUF_UNLOCK(&so->so_snd);
+ if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
+ goto out_unlocked;
+ }
+ }
+ SCTP_TCB_SEND_LOCK(stcb);
+ if (sp) {
+ if (sp->msg_is_complete == 0) {
+ strm->last_msg_incomplete = 1;
+ asoc->stream_locked = 1;
+ asoc->stream_locked_on = srcv->sinfo_stream;
+ } else {
+ sp->sender_all_done = 1;
+ strm->last_msg_incomplete = 0;
+ asoc->stream_locked = 0;
+ }
+ } else {
+ SCTP_PRINTF("Huh no sp TSNH?\n");
+ strm->last_msg_incomplete = 0;
+ asoc->stream_locked = 0;
+ }
+ SCTP_TCB_SEND_UNLOCK(stcb);
+ if (uio->uio_resid == 0) {
+ got_all_of_the_send = 1;
+ }
+ } else {
+ /* We send in a 0, since we do NOT have any locks */
+ error = sctp_msg_append(stcb, net, top, srcv, 0);
+ top = NULL;
+ if (srcv->sinfo_flags & SCTP_EOF) {
+ /*
+ * This should only happen for Panda for the mbuf
+ * send case, which does NOT yet support EEOR mode.
+ * Thus, we can just set this flag to do the proper
+ * EOF handling.
+ */
+ got_all_of_the_send = 1;
+ }
+ }
+ if (error) {
+ goto out;
+ }
+dataless_eof:
+ /* EOF thing ? */
+ if ((srcv->sinfo_flags & SCTP_EOF) &&
+ (got_all_of_the_send == 1) &&
+ (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_UDPTYPE)) {
+ int cnt;
+
+ SCTP_STAT_INCR(sctps_sends_with_eof);
+ error = 0;
+ if (hold_tcblock == 0) {
+ SCTP_TCB_LOCK(stcb);
+ hold_tcblock = 1;
+ }
+ cnt = sctp_is_there_unsent_data(stcb);
+ if (TAILQ_EMPTY(&asoc->send_queue) &&
+ TAILQ_EMPTY(&asoc->sent_queue) &&
+ (cnt == 0)) {
+ if (asoc->locked_on_sending) {
+ goto abort_anyway;
+ }
+ /* there is nothing queued to send, so I'm done... */
+ if ((SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_SENT) &&
+ (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_RECEIVED) &&
+ (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_ACK_SENT)) {
+ /* only send SHUTDOWN the first time through */
+ sctp_send_shutdown(stcb, stcb->asoc.primary_destination);
+ if (SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) {
+ SCTP_STAT_DECR_GAUGE32(sctps_currestab);
+ }
+ SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_SENT);
+ SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
+ sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN, stcb->sctp_ep, stcb,
+ asoc->primary_destination);
+ sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, stcb->sctp_ep, stcb,
+ asoc->primary_destination);
+ }
+ } else {
+ /*-
+ * we still got (or just got) data to send, so set
+ * SHUTDOWN_PENDING
+ */
+ /*-
+ * XXX sockets draft says that SCTP_EOF should be
+ * sent with no data. currently, we will allow user
+ * data to be sent first and move to
+ * SHUTDOWN-PENDING
+ */
+ if ((SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_SENT) &&
+ (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_RECEIVED) &&
+ (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_ACK_SENT)) {
+ if (hold_tcblock == 0) {
+ SCTP_TCB_LOCK(stcb);
+ hold_tcblock = 1;
+ }
+ if (asoc->locked_on_sending) {
+ /* Locked to send out the data */
+ struct sctp_stream_queue_pending *sp;
+
+ sp = TAILQ_LAST(&asoc->locked_on_sending->outqueue, sctp_streamhead);
+ if (sp) {
+ if ((sp->length == 0) && (sp->msg_is_complete == 0))
+ asoc->state |= SCTP_STATE_PARTIAL_MSG_LEFT;
+ }
+ }
+ asoc->state |= SCTP_STATE_SHUTDOWN_PENDING;
+ if (TAILQ_EMPTY(&asoc->send_queue) &&
+ TAILQ_EMPTY(&asoc->sent_queue) &&
+ (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT)) {
+ abort_anyway:
+ if (free_cnt_applied) {
+ atomic_add_int(&stcb->asoc.refcnt, -1);
+ free_cnt_applied = 0;
+ }
+ sctp_abort_an_association(stcb->sctp_ep, stcb,
+ SCTP_RESPONSE_TO_USER_REQ,
+ NULL, SCTP_SO_LOCKED);
+ /*
+ * now relock the stcb so everything
+ * is sane
+ */
+ hold_tcblock = 0;
+ stcb = NULL;
+ goto out;
+ }
+ sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, stcb->sctp_ep, stcb,
+ asoc->primary_destination);
+ sctp_feature_off(inp, SCTP_PCB_FLAGS_NODELAY);
+ }
+ }
+ }
+skip_out_eof:
+ if (!TAILQ_EMPTY(&stcb->asoc.control_send_queue)) {
+ some_on_control = 1;
+ }
+ if (queue_only_for_init) {
+ if (hold_tcblock == 0) {
+ SCTP_TCB_LOCK(stcb);
+ hold_tcblock = 1;
+ }
+ if (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_OPEN) {
+ /* a collision took us forward? */
+ queue_only = 0;
+ } else {
+ sctp_send_initiate(inp, stcb, SCTP_SO_LOCKED);
+ SCTP_SET_STATE(&stcb->asoc, SCTP_STATE_COOKIE_WAIT);
+ queue_only = 1;
+ }
+ }
+ if ((net->flight_size > net->cwnd) &&
+ (stcb->asoc.sctp_cmt_on_off == 0)) {
+ SCTP_STAT_INCR(sctps_send_cwnd_avoid);
+ queue_only = 1;
+ } else if (asoc->ifp_had_enobuf) {
+ SCTP_STAT_INCR(sctps_ifnomemqueued);
+ if (net->flight_size > (2 * net->mtu)) {
+ queue_only = 1;
+ }
+ asoc->ifp_had_enobuf = 0;
+ }
+ un_sent = ((stcb->asoc.total_output_queue_size - stcb->asoc.total_flight) +
+ (stcb->asoc.stream_queue_cnt * sizeof(struct sctp_data_chunk)));
+ if ((sctp_is_feature_off(inp, SCTP_PCB_FLAGS_NODELAY)) &&
+ (stcb->asoc.total_flight > 0) &&
+ (stcb->asoc.stream_queue_cnt < SCTP_MAX_DATA_BUNDLING) &&
+ (un_sent < (int)(stcb->asoc.smallest_mtu - SCTP_MIN_OVERHEAD))) {
+ /*-
+ * Ok, Nagle is set on and we have data outstanding.
+ * Don't send anything and let SACKs drive out the
+ * data unless wen have a "full" segment to send.
+ */
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_NAGLE_LOGGING_ENABLE) {
+ sctp_log_nagle_event(stcb, SCTP_NAGLE_APPLIED);
+ }
+ SCTP_STAT_INCR(sctps_naglequeued);
+ nagle_applies = 1;
+ } else {
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_NAGLE_LOGGING_ENABLE) {
+ if (sctp_is_feature_off(inp, SCTP_PCB_FLAGS_NODELAY))
+ sctp_log_nagle_event(stcb, SCTP_NAGLE_SKIPPED);
+ }
+ SCTP_STAT_INCR(sctps_naglesent);
+ nagle_applies = 0;
+ }
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_BLK_LOGGING_ENABLE) {
+ sctp_misc_ints(SCTP_CWNDLOG_PRESEND, queue_only_for_init, queue_only,
+ nagle_applies, un_sent);
+ sctp_misc_ints(SCTP_CWNDLOG_PRESEND, stcb->asoc.total_output_queue_size,
+ stcb->asoc.total_flight,
+ stcb->asoc.chunks_on_out_queue, stcb->asoc.total_flight_count);
+ }
+ if (queue_only_for_init)
+ queue_only_for_init = 0;
+ if ((queue_only == 0) && (nagle_applies == 0) && (stcb->asoc.peers_rwnd && un_sent)) {
+ /* we can attempt to send too. */
+ if (hold_tcblock == 0) {
+ /*
+ * If there is activity recv'ing sacks no need to
+ * send
+ */
+ if (SCTP_TCB_TRYLOCK(stcb)) {
+ sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_USR_SEND, SCTP_SO_LOCKED);
+ hold_tcblock = 1;
+ }
+ } else {
+ sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_USR_SEND, SCTP_SO_LOCKED);
+ }
+ } else if ((queue_only == 0) &&
+ (stcb->asoc.peers_rwnd == 0) &&
+ (stcb->asoc.total_flight == 0)) {
+ /* We get to have a probe outstanding */
+ if (hold_tcblock == 0) {
+ hold_tcblock = 1;
+ SCTP_TCB_LOCK(stcb);
+ }
+ sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_USR_SEND, SCTP_SO_LOCKED);
+ } else if (some_on_control) {
+ int num_out, reason, frag_point;
+
+ /* Here we do control only */
+ if (hold_tcblock == 0) {
+ hold_tcblock = 1;
+ SCTP_TCB_LOCK(stcb);
+ }
+ frag_point = sctp_get_frag_point(stcb, &stcb->asoc);
+ (void)sctp_med_chunk_output(inp, stcb, &stcb->asoc, &num_out,
+ &reason, 1, 1, &now, &now_filled, frag_point, SCTP_SO_LOCKED);
+ }
+ SCTPDBG(SCTP_DEBUG_OUTPUT1, "USR Send complete qo:%d prw:%d unsent:%d tf:%d cooq:%d toqs:%d err:%d\n",
+ queue_only, stcb->asoc.peers_rwnd, un_sent,
+ stcb->asoc.total_flight, stcb->asoc.chunks_on_out_queue,
+ stcb->asoc.total_output_queue_size, error);
+
+out:
+out_unlocked:
+
+ if (local_soresv && stcb) {
+ atomic_subtract_int(&stcb->asoc.sb_send_resv, sndlen);
+ local_soresv = 0;
+ }
+ if (create_lock_applied) {
+ SCTP_ASOC_CREATE_UNLOCK(inp);
+ create_lock_applied = 0;
+ }
+ if ((stcb) && hold_tcblock) {
+ SCTP_TCB_UNLOCK(stcb);
+ }
+ if (stcb && free_cnt_applied) {
+ atomic_add_int(&stcb->asoc.refcnt, -1);
+ }
+#ifdef INVARIANTS
+ if (stcb) {
+ if (mtx_owned(&stcb->tcb_mtx)) {
+ panic("Leaving with tcb mtx owned?");
+ }
+ if (mtx_owned(&stcb->tcb_send_mtx)) {
+ panic("Leaving with tcb send mtx owned?");
+ }
+ }
+#endif
+#ifdef INVARIANTS
+ if (inp) {
+ sctp_validate_no_locks(inp);
+ } else {
+ printf("Warning - inp is NULL so cant validate locks\n");
+ }
+#endif
+ if (top) {
+ sctp_m_freem(top);
+ }
+ if (control) {
+ sctp_m_freem(control);
+ }
+ return (error);
+}
+
+
+/*
+ * generate an AUTHentication chunk, if required
+ */
+struct mbuf *
+sctp_add_auth_chunk(struct mbuf *m, struct mbuf **m_end,
+ struct sctp_auth_chunk **auth_ret, uint32_t * offset,
+ struct sctp_tcb *stcb, uint8_t chunk)
+{
+ struct mbuf *m_auth;
+ struct sctp_auth_chunk *auth;
+ int chunk_len;
+
+ if ((m_end == NULL) || (auth_ret == NULL) || (offset == NULL) ||
+ (stcb == NULL))
+ return (m);
+
+ /* sysctl disabled auth? */
+ if (SCTP_BASE_SYSCTL(sctp_auth_disable))
+ return (m);
+
+ /* peer doesn't do auth... */
+ if (!stcb->asoc.peer_supports_auth) {
+ return (m);
+ }
+ /* does the requested chunk require auth? */
+ if (!sctp_auth_is_required_chunk(chunk, stcb->asoc.peer_auth_chunks)) {
+ return (m);
+ }
+ m_auth = sctp_get_mbuf_for_msg(sizeof(*auth), 0, M_DONTWAIT, 1, MT_HEADER);
+ if (m_auth == NULL) {
+ /* no mbuf's */
+ return (m);
+ }
+ /* reserve some space if this will be the first mbuf */
+ if (m == NULL)
+ SCTP_BUF_RESV_UF(m_auth, SCTP_MIN_OVERHEAD);
+ /* fill in the AUTH chunk details */
+ auth = mtod(m_auth, struct sctp_auth_chunk *);
+ bzero(auth, sizeof(*auth));
+ auth->ch.chunk_type = SCTP_AUTHENTICATION;
+ auth->ch.chunk_flags = 0;
+ chunk_len = sizeof(*auth) +
+ sctp_get_hmac_digest_len(stcb->asoc.peer_hmac_id);
+ auth->ch.chunk_length = htons(chunk_len);
+ auth->hmac_id = htons(stcb->asoc.peer_hmac_id);
+ /* key id and hmac digest will be computed and filled in upon send */
+
+ /* save the offset where the auth was inserted into the chain */
+ if (m != NULL) {
+ struct mbuf *cn;
+
+ *offset = 0;
+ cn = m;
+ while (cn) {
+ *offset += SCTP_BUF_LEN(cn);
+ cn = SCTP_BUF_NEXT(cn);
+ }
+ } else
+ *offset = 0;
+
+ /* update length and return pointer to the auth chunk */
+ SCTP_BUF_LEN(m_auth) = chunk_len;
+ m = sctp_copy_mbufchain(m_auth, m, m_end, 1, chunk_len, 0);
+ if (auth_ret != NULL)
+ *auth_ret = auth;
+
+ return (m);
+}
+
+#ifdef INET6
+int
+sctp_v6src_match_nexthop(struct sockaddr_in6 *src6, sctp_route_t * ro)
+{
+ struct nd_prefix *pfx = NULL;
+ struct nd_pfxrouter *pfxrtr = NULL;
+ struct sockaddr_in6 gw6;
+
+ if (ro == NULL || ro->ro_rt == NULL || src6->sin6_family != AF_INET6)
+ return (0);
+
+ /* get prefix entry of address */
+ LIST_FOREACH(pfx, &MODULE_GLOBAL(nd_prefix), ndpr_entry) {
+ if (pfx->ndpr_stateflags & NDPRF_DETACHED)
+ continue;
+ if (IN6_ARE_MASKED_ADDR_EQUAL(&pfx->ndpr_prefix.sin6_addr,
+ &src6->sin6_addr, &pfx->ndpr_mask))
+ break;
+ }
+ /* no prefix entry in the prefix list */
+ if (pfx == NULL) {
+ SCTPDBG(SCTP_DEBUG_OUTPUT2, "No prefix entry for ");
+ SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, (struct sockaddr *)src6);
+ return (0);
+ }
+ SCTPDBG(SCTP_DEBUG_OUTPUT2, "v6src_match_nexthop(), Prefix entry is ");
+ SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, (struct sockaddr *)src6);
+
+ /* search installed gateway from prefix entry */
+ for (pfxrtr = pfx->ndpr_advrtrs.lh_first; pfxrtr; pfxrtr =
+ pfxrtr->pfr_next) {
+ memset(&gw6, 0, sizeof(struct sockaddr_in6));
+ gw6.sin6_family = AF_INET6;
+ gw6.sin6_len = sizeof(struct sockaddr_in6);
+ memcpy(&gw6.sin6_addr, &pfxrtr->router->rtaddr,
+ sizeof(struct in6_addr));
+ SCTPDBG(SCTP_DEBUG_OUTPUT2, "prefix router is ");
+ SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, (struct sockaddr *)&gw6);
+ SCTPDBG(SCTP_DEBUG_OUTPUT2, "installed router is ");
+ SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, ro->ro_rt->rt_gateway);
+ if (sctp_cmpaddr((struct sockaddr *)&gw6,
+ ro->ro_rt->rt_gateway)) {
+ SCTPDBG(SCTP_DEBUG_OUTPUT2, "pfxrouter is installed\n");
+ return (1);
+ }
+ }
+ SCTPDBG(SCTP_DEBUG_OUTPUT2, "pfxrouter is not installed\n");
+ return (0);
+}
+
+#endif
+
+int
+sctp_v4src_match_nexthop(struct sctp_ifa *sifa, sctp_route_t * ro)
+{
+ struct sockaddr_in *sin, *mask;
+ struct ifaddr *ifa;
+ struct in_addr srcnetaddr, gwnetaddr;
+
+ if (ro == NULL || ro->ro_rt == NULL ||
+ sifa->address.sa.sa_family != AF_INET) {
+ return (0);
+ }
+ ifa = (struct ifaddr *)sifa->ifa;
+ mask = (struct sockaddr_in *)(ifa->ifa_netmask);
+ sin = (struct sockaddr_in *)&sifa->address.sin;
+ srcnetaddr.s_addr = (sin->sin_addr.s_addr & mask->sin_addr.s_addr);
+ SCTPDBG(SCTP_DEBUG_OUTPUT1, "match_nexthop4: src address is ");
+ SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, &sifa->address.sa);
+ SCTPDBG(SCTP_DEBUG_OUTPUT1, "network address is %x\n", srcnetaddr.s_addr);
+
+ sin = (struct sockaddr_in *)ro->ro_rt->rt_gateway;
+ gwnetaddr.s_addr = (sin->sin_addr.s_addr & mask->sin_addr.s_addr);
+ SCTPDBG(SCTP_DEBUG_OUTPUT1, "match_nexthop4: nexthop is ");
+ SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, ro->ro_rt->rt_gateway);
+ SCTPDBG(SCTP_DEBUG_OUTPUT1, "network address is %x\n", gwnetaddr.s_addr);
+ if (srcnetaddr.s_addr == gwnetaddr.s_addr) {
+ return (1);
+ }
+ return (0);
+}
diff --git a/rtems/freebsd/netinet/sctp_output.h b/rtems/freebsd/netinet/sctp_output.h
new file mode 100644
index 00000000..38b19cc2
--- /dev/null
+++ b/rtems/freebsd/netinet/sctp_output.h
@@ -0,0 +1,229 @@
+/*-
+ * Copyright (c) 2001-2007, by Cisco Systems, Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * a) Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * b) Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the distribution.
+ *
+ * c) Neither the name of Cisco Systems, Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/* $KAME: sctp_output.h,v 1.14 2005/03/06 16:04:18 itojun Exp $ */
+
+#include <rtems/freebsd/sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#ifndef __sctp_output_h__
+#define __sctp_output_h__
+
+#include <rtems/freebsd/netinet/sctp_header.h>
+
+#if defined(_KERNEL) || defined(__Userspace__)
+
+
+struct mbuf *
+sctp_add_addresses_to_i_ia(struct sctp_inpcb *inp,
+ struct sctp_scoping *scope,
+ struct mbuf *m_at,
+ int cnt_inits_to);
+
+
+int sctp_is_addr_restricted(struct sctp_tcb *, struct sctp_ifa *);
+
+
+int
+sctp_is_address_in_scope(struct sctp_ifa *ifa,
+ int ipv4_addr_legal,
+ int ipv6_addr_legal,
+ int loopback_scope,
+ int ipv4_local_scope,
+ int local_scope,
+ int site_scope,
+ int do_update);
+int
+ sctp_is_addr_in_ep(struct sctp_inpcb *inp, struct sctp_ifa *ifa);
+
+struct sctp_ifa *
+sctp_source_address_selection(struct sctp_inpcb *inp,
+ struct sctp_tcb *stcb,
+ sctp_route_t * ro, struct sctp_nets *net,
+ int non_asoc_addr_ok, uint32_t vrf_id);
+
+int
+ sctp_v6src_match_nexthop(struct sockaddr_in6 *src6, sctp_route_t * ro);
+int
+ sctp_v4src_match_nexthop(struct sctp_ifa *sifa, sctp_route_t * ro);
+
+void
+sctp_send_initiate(struct sctp_inpcb *, struct sctp_tcb *, int
+#if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
+ SCTP_UNUSED
+#endif
+);
+
+void
+sctp_send_initiate_ack(struct sctp_inpcb *, struct sctp_tcb *,
+ struct mbuf *, int, int, struct sctphdr *, struct sctp_init_chunk *,
+ uint32_t, uint16_t, int);
+
+struct mbuf *
+sctp_arethere_unrecognized_parameters(struct mbuf *, int, int *,
+ struct sctp_chunkhdr *, int *);
+void sctp_queue_op_err(struct sctp_tcb *, struct mbuf *);
+
+int
+sctp_send_cookie_echo(struct mbuf *, int, struct sctp_tcb *,
+ struct sctp_nets *);
+
+void sctp_send_cookie_ack(struct sctp_tcb *);
+
+void
+sctp_send_heartbeat_ack(struct sctp_tcb *, struct mbuf *, int, int,
+ struct sctp_nets *);
+
+void
+sctp_remove_from_wheel(struct sctp_tcb *stcb,
+ struct sctp_association *asoc,
+ struct sctp_stream_out *strq, int holds_lock);
+
+
+void sctp_send_shutdown(struct sctp_tcb *, struct sctp_nets *);
+
+void sctp_send_shutdown_ack(struct sctp_tcb *, struct sctp_nets *);
+
+void sctp_send_shutdown_complete(struct sctp_tcb *, struct sctp_nets *, int);
+
+void
+sctp_send_shutdown_complete2(struct mbuf *, int, struct sctphdr *,
+ uint32_t, uint16_t);
+
+void sctp_send_asconf(struct sctp_tcb *, struct sctp_nets *, int addr_locked);
+
+void sctp_send_asconf_ack(struct sctp_tcb *);
+
+int sctp_get_frag_point(struct sctp_tcb *, struct sctp_association *);
+
+void sctp_toss_old_cookies(struct sctp_tcb *, struct sctp_association *);
+
+void sctp_toss_old_asconf(struct sctp_tcb *);
+
+void sctp_fix_ecn_echo(struct sctp_association *);
+
+void sctp_move_chunks_from_net(struct sctp_tcb *stcb, struct sctp_nets *net);
+
+int
+sctp_output(struct sctp_inpcb *, struct mbuf *, struct sockaddr *,
+ struct mbuf *, struct thread *, int);
+
+void
+sctp_insert_on_wheel(struct sctp_tcb *stcb,
+ struct sctp_association *asoc,
+ struct sctp_stream_out *strq, int holdslock);
+
+void
+sctp_chunk_output(struct sctp_inpcb *, struct sctp_tcb *, int, int
+#if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
+ SCTP_UNUSED
+#endif
+);
+void
+sctp_send_abort_tcb(struct sctp_tcb *, struct mbuf *, int
+#if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
+ SCTP_UNUSED
+#endif
+);
+
+void send_forward_tsn(struct sctp_tcb *, struct sctp_association *);
+
+void sctp_send_sack(struct sctp_tcb *);
+
+int sctp_send_hb(struct sctp_tcb *, int, struct sctp_nets *);
+
+void sctp_send_ecn_echo(struct sctp_tcb *, struct sctp_nets *, uint32_t);
+
+
+void
+sctp_send_packet_dropped(struct sctp_tcb *, struct sctp_nets *, struct mbuf *,
+ int, int);
+
+
+
+void sctp_send_cwr(struct sctp_tcb *, struct sctp_nets *, uint32_t);
+
+
+void
+sctp_add_stream_reset_out(struct sctp_tmit_chunk *chk,
+ int number_entries, uint16_t * list,
+ uint32_t seq, uint32_t resp_seq, uint32_t last_sent);
+
+void
+sctp_add_stream_reset_in(struct sctp_tmit_chunk *chk,
+ int number_entries, uint16_t * list,
+ uint32_t seq);
+
+void
+sctp_add_stream_reset_tsn(struct sctp_tmit_chunk *chk,
+ uint32_t seq);
+
+void
+sctp_add_stream_reset_result(struct sctp_tmit_chunk *chk,
+ uint32_t resp_seq, uint32_t result);
+
+void
+sctp_add_stream_reset_result_tsn(struct sctp_tmit_chunk *chk,
+ uint32_t resp_seq, uint32_t result,
+ uint32_t send_una, uint32_t recv_next);
+
+int
+sctp_send_str_reset_req(struct sctp_tcb *stcb,
+ int number_entries,
+ uint16_t * list,
+ uint8_t send_out_req,
+ uint32_t resp_seq,
+ uint8_t send_in_req,
+ uint8_t send_tsn_req,
+ uint8_t add_str,
+ uint16_t adding);
+
+
+void
+sctp_send_abort(struct mbuf *, int, struct sctphdr *, uint32_t,
+ struct mbuf *, uint32_t, uint16_t);
+
+void sctp_send_operr_to(struct mbuf *, int, struct mbuf *, uint32_t, uint32_t, uint16_t);
+
+#endif /* _KERNEL || __Userspace__ */
+
+#if defined(_KERNEL) || defined (__Userspace__)
+int
+sctp_sosend(struct socket *so,
+ struct sockaddr *addr,
+ struct uio *uio,
+ struct mbuf *top,
+ struct mbuf *control,
+ int flags,
+ struct thread *p
+);
+
+#endif
+#endif
diff --git a/rtems/freebsd/netinet/sctp_pcb.c b/rtems/freebsd/netinet/sctp_pcb.c
new file mode 100644
index 00000000..de4925b1
--- /dev/null
+++ b/rtems/freebsd/netinet/sctp_pcb.c
@@ -0,0 +1,6810 @@
+#include <rtems/freebsd/machine/rtems-bsd-config.h>
+
+/*-
+ * Copyright (c) 2001-2008, by Cisco Systems, Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * a) Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * b) Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the distribution.
+ *
+ * c) Neither the name of Cisco Systems, Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/* $KAME: sctp_pcb.c,v 1.38 2005/03/06 16:04:18 itojun Exp $ */
+
+#include <rtems/freebsd/sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <rtems/freebsd/netinet/sctp_os.h>
+#include <rtems/freebsd/sys/proc.h>
+#include <rtems/freebsd/netinet/sctp_var.h>
+#include <rtems/freebsd/netinet/sctp_sysctl.h>
+#include <rtems/freebsd/netinet/sctp_pcb.h>
+#include <rtems/freebsd/netinet/sctputil.h>
+#include <rtems/freebsd/netinet/sctp.h>
+#include <rtems/freebsd/netinet/sctp_header.h>
+#include <rtems/freebsd/netinet/sctp_asconf.h>
+#include <rtems/freebsd/netinet/sctp_output.h>
+#include <rtems/freebsd/netinet/sctp_timer.h>
+#include <rtems/freebsd/netinet/sctp_bsd_addr.h>
+#include <rtems/freebsd/netinet/udp.h>
+
+
+VNET_DEFINE(struct sctp_base_info, system_base_info);
+
+/* FIX: we don't handle multiple link local scopes */
+/* "scopeless" replacement IN6_ARE_ADDR_EQUAL */
+#ifdef INET6
+int
+SCTP6_ARE_ADDR_EQUAL(struct sockaddr_in6 *a, struct sockaddr_in6 *b)
+{
+ struct sockaddr_in6 tmp_a, tmp_b;
+
+ memcpy(&tmp_a, a, sizeof(struct sockaddr_in6));
+ if (sa6_embedscope(&tmp_a, MODULE_GLOBAL(ip6_use_defzone)) != 0) {
+ return 0;
+ }
+ memcpy(&tmp_b, b, sizeof(struct sockaddr_in6));
+ if (sa6_embedscope(&tmp_b, MODULE_GLOBAL(ip6_use_defzone)) != 0) {
+ return 0;
+ }
+ return (IN6_ARE_ADDR_EQUAL(&tmp_a.sin6_addr, &tmp_b.sin6_addr));
+}
+
+#endif
+
+void
+sctp_fill_pcbinfo(struct sctp_pcbinfo *spcb)
+{
+ /*
+ * We really don't need to lock this, but I will just because it
+ * does not hurt.
+ */
+ SCTP_INP_INFO_RLOCK();
+ spcb->ep_count = SCTP_BASE_INFO(ipi_count_ep);
+ spcb->asoc_count = SCTP_BASE_INFO(ipi_count_asoc);
+ spcb->laddr_count = SCTP_BASE_INFO(ipi_count_laddr);
+ spcb->raddr_count = SCTP_BASE_INFO(ipi_count_raddr);
+ spcb->chk_count = SCTP_BASE_INFO(ipi_count_chunk);
+ spcb->readq_count = SCTP_BASE_INFO(ipi_count_readq);
+ spcb->stream_oque = SCTP_BASE_INFO(ipi_count_strmoq);
+ spcb->free_chunks = SCTP_BASE_INFO(ipi_free_chunks);
+
+ SCTP_INP_INFO_RUNLOCK();
+}
+
+/*
+ * Addresses are added to VRF's (Virtual Router's). For BSD we
+ * have only the default VRF 0. We maintain a hash list of
+ * VRF's. Each VRF has its own list of sctp_ifn's. Each of
+ * these has a list of addresses. When we add a new address
+ * to a VRF we lookup the ifn/ifn_index, if the ifn does
+ * not exist we create it and add it to the list of IFN's
+ * within the VRF. Once we have the sctp_ifn, we add the
+ * address to the list. So we look something like:
+ *
+ * hash-vrf-table
+ * vrf-> ifn-> ifn -> ifn
+ * vrf |
+ * ... +--ifa-> ifa -> ifa
+ * vrf
+ *
+ * We keep these separate lists since the SCTP subsystem will
+ * point to these from its source address selection nets structure.
+ * When an address is deleted it does not happen right away on
+ * the SCTP side, it gets scheduled. What we do when a
+ * delete happens is immediately remove the address from
+ * the master list and decrement the refcount. As our
+ * addip iterator works through and frees the src address
+ * selection pointing to the sctp_ifa, eventually the refcount
+ * will reach 0 and we will delete it. Note that it is assumed
+ * that any locking on system level ifn/ifa is done at the
+ * caller of these functions and these routines will only
+ * lock the SCTP structures as they add or delete things.
+ *
+ * Other notes on VRF concepts.
+ * - An endpoint can be in multiple VRF's
+ * - An association lives within a VRF and only one VRF.
+ * - Any incoming packet we can deduce the VRF for by
+ * looking at the mbuf/pak inbound (for BSD its VRF=0 :D)
+ * - Any downward send call or connect call must supply the
+ * VRF via ancillary data or via some sort of set default
+ * VRF socket option call (again for BSD no brainer since
+ * the VRF is always 0).
+ * - An endpoint may add multiple VRF's to it.
+ * - Listening sockets can accept associations in any
+ * of the VRF's they are in but the assoc will end up
+ * in only one VRF (gotten from the packet or connect/send).
+ *
+ */
+
+struct sctp_vrf *
+sctp_allocate_vrf(int vrf_id)
+{
+ struct sctp_vrf *vrf = NULL;
+ struct sctp_vrflist *bucket;
+
+ /* First allocate the VRF structure */
+ vrf = sctp_find_vrf(vrf_id);
+ if (vrf) {
+ /* Already allocated */
+ return (vrf);
+ }
+ SCTP_MALLOC(vrf, struct sctp_vrf *, sizeof(struct sctp_vrf),
+ SCTP_M_VRF);
+ if (vrf == NULL) {
+ /* No memory */
+#ifdef INVARIANTS
+ panic("No memory for VRF:%d", vrf_id);
+#endif
+ return (NULL);
+ }
+ /* setup the VRF */
+ memset(vrf, 0, sizeof(struct sctp_vrf));
+ vrf->vrf_id = vrf_id;
+ LIST_INIT(&vrf->ifnlist);
+ vrf->total_ifa_count = 0;
+ vrf->refcount = 0;
+ /* now also setup table ids */
+ SCTP_INIT_VRF_TABLEID(vrf);
+ /* Init the HASH of addresses */
+ vrf->vrf_addr_hash = SCTP_HASH_INIT(SCTP_VRF_ADDR_HASH_SIZE,
+ &vrf->vrf_addr_hashmark);
+ if (vrf->vrf_addr_hash == NULL) {
+ /* No memory */
+#ifdef INVARIANTS
+ panic("No memory for VRF:%d", vrf_id);
+#endif
+ SCTP_FREE(vrf, SCTP_M_VRF);
+ return (NULL);
+ }
+ /* Add it to the hash table */
+ bucket = &SCTP_BASE_INFO(sctp_vrfhash)[(vrf_id & SCTP_BASE_INFO(hashvrfmark))];
+ LIST_INSERT_HEAD(bucket, vrf, next_vrf);
+ atomic_add_int(&SCTP_BASE_INFO(ipi_count_vrfs), 1);
+ return (vrf);
+}
+
+
+struct sctp_ifn *
+sctp_find_ifn(void *ifn, uint32_t ifn_index)
+{
+ struct sctp_ifn *sctp_ifnp;
+ struct sctp_ifnlist *hash_ifn_head;
+
+ /*
+ * We assume the lock is held for the addresses if that's wrong
+ * problems could occur :-)
+ */
+ hash_ifn_head = &SCTP_BASE_INFO(vrf_ifn_hash)[(ifn_index & SCTP_BASE_INFO(vrf_ifn_hashmark))];
+ LIST_FOREACH(sctp_ifnp, hash_ifn_head, next_bucket) {
+ if (sctp_ifnp->ifn_index == ifn_index) {
+ return (sctp_ifnp);
+ }
+ if (sctp_ifnp->ifn_p && ifn && (sctp_ifnp->ifn_p == ifn)) {
+ return (sctp_ifnp);
+ }
+ }
+ return (NULL);
+}
+
+
+
+struct sctp_vrf *
+sctp_find_vrf(uint32_t vrf_id)
+{
+ struct sctp_vrflist *bucket;
+ struct sctp_vrf *liste;
+
+ bucket = &SCTP_BASE_INFO(sctp_vrfhash)[(vrf_id & SCTP_BASE_INFO(hashvrfmark))];
+ LIST_FOREACH(liste, bucket, next_vrf) {
+ if (vrf_id == liste->vrf_id) {
+ return (liste);
+ }
+ }
+ return (NULL);
+}
+
+void
+sctp_free_vrf(struct sctp_vrf *vrf)
+{
+ if (SCTP_DECREMENT_AND_CHECK_REFCOUNT(&vrf->refcount)) {
+ if (vrf->vrf_addr_hash) {
+ SCTP_HASH_FREE(vrf->vrf_addr_hash, vrf->vrf_addr_hashmark);
+ vrf->vrf_addr_hash = NULL;
+ }
+ /* We zero'd the count */
+ LIST_REMOVE(vrf, next_vrf);
+ SCTP_FREE(vrf, SCTP_M_VRF);
+ atomic_subtract_int(&SCTP_BASE_INFO(ipi_count_vrfs), 1);
+ }
+}
+
+void
+sctp_free_ifn(struct sctp_ifn *sctp_ifnp)
+{
+ if (SCTP_DECREMENT_AND_CHECK_REFCOUNT(&sctp_ifnp->refcount)) {
+ /* We zero'd the count */
+ if (sctp_ifnp->vrf) {
+ sctp_free_vrf(sctp_ifnp->vrf);
+ }
+ SCTP_FREE(sctp_ifnp, SCTP_M_IFN);
+ atomic_subtract_int(&SCTP_BASE_INFO(ipi_count_ifns), 1);
+ }
+}
+
+void
+sctp_update_ifn_mtu(uint32_t ifn_index, uint32_t mtu)
+{
+ struct sctp_ifn *sctp_ifnp;
+
+ sctp_ifnp = sctp_find_ifn((void *)NULL, ifn_index);
+ if (sctp_ifnp != NULL) {
+ sctp_ifnp->ifn_mtu = mtu;
+ }
+}
+
+
+void
+sctp_free_ifa(struct sctp_ifa *sctp_ifap)
+{
+ if (SCTP_DECREMENT_AND_CHECK_REFCOUNT(&sctp_ifap->refcount)) {
+ /* We zero'd the count */
+ if (sctp_ifap->ifn_p) {
+ sctp_free_ifn(sctp_ifap->ifn_p);
+ }
+ SCTP_FREE(sctp_ifap, SCTP_M_IFA);
+ atomic_subtract_int(&SCTP_BASE_INFO(ipi_count_ifas), 1);
+ }
+}
+
+static void
+sctp_delete_ifn(struct sctp_ifn *sctp_ifnp, int hold_addr_lock)
+{
+ struct sctp_ifn *found;
+
+ found = sctp_find_ifn(sctp_ifnp->ifn_p, sctp_ifnp->ifn_index);
+ if (found == NULL) {
+ /* Not in the list.. sorry */
+ return;
+ }
+ if (hold_addr_lock == 0)
+ SCTP_IPI_ADDR_WLOCK();
+ LIST_REMOVE(sctp_ifnp, next_bucket);
+ LIST_REMOVE(sctp_ifnp, next_ifn);
+ SCTP_DEREGISTER_INTERFACE(sctp_ifnp->ifn_index,
+ sctp_ifnp->registered_af);
+ if (hold_addr_lock == 0)
+ SCTP_IPI_ADDR_WUNLOCK();
+ /* Take away the reference, and possibly free it */
+ sctp_free_ifn(sctp_ifnp);
+}
+
+void
+sctp_mark_ifa_addr_down(uint32_t vrf_id, struct sockaddr *addr,
+ const char *if_name, uint32_t ifn_index)
+{
+ struct sctp_vrf *vrf;
+ struct sctp_ifa *sctp_ifap = NULL;
+
+ SCTP_IPI_ADDR_RLOCK();
+ vrf = sctp_find_vrf(vrf_id);
+ if (vrf == NULL) {
+ SCTPDBG(SCTP_DEBUG_PCB4, "Can't find vrf_id 0x%x\n", vrf_id);
+ goto out;
+
+ }
+ sctp_ifap = sctp_find_ifa_by_addr(addr, vrf->vrf_id, SCTP_ADDR_LOCKED);
+ if (sctp_ifap == NULL) {
+ SCTPDBG(SCTP_DEBUG_PCB4, "Can't find sctp_ifap for address\n");
+ goto out;
+ }
+ if (sctp_ifap->ifn_p == NULL) {
+ SCTPDBG(SCTP_DEBUG_PCB4, "IFA has no IFN - can't mark unuseable\n");
+ goto out;
+ }
+ if (if_name) {
+ int len1, len2;
+
+ len1 = strlen(if_name);
+ len2 = strlen(sctp_ifap->ifn_p->ifn_name);
+ if (len1 != len2) {
+ SCTPDBG(SCTP_DEBUG_PCB4, "IFN of ifa names different length %d vs %d - ignored\n",
+ len1, len2);
+ goto out;
+ }
+ if (strncmp(if_name, sctp_ifap->ifn_p->ifn_name, len1) != 0) {
+ SCTPDBG(SCTP_DEBUG_PCB4, "IFN %s of IFA not the same as %s\n",
+ sctp_ifap->ifn_p->ifn_name,
+ if_name);
+ goto out;
+ }
+ } else {
+ if (sctp_ifap->ifn_p->ifn_index != ifn_index) {
+ SCTPDBG(SCTP_DEBUG_PCB4, "IFA owned by ifn_index:%d down command for ifn_index:%d - ignored\n",
+ sctp_ifap->ifn_p->ifn_index, ifn_index);
+ goto out;
+ }
+ }
+
+ sctp_ifap->localifa_flags &= (~SCTP_ADDR_VALID);
+ sctp_ifap->localifa_flags |= SCTP_ADDR_IFA_UNUSEABLE;
+out:
+ SCTP_IPI_ADDR_RUNLOCK();
+}
+
+void
+sctp_mark_ifa_addr_up(uint32_t vrf_id, struct sockaddr *addr,
+ const char *if_name, uint32_t ifn_index)
+{
+ struct sctp_vrf *vrf;
+ struct sctp_ifa *sctp_ifap = NULL;
+
+ SCTP_IPI_ADDR_RLOCK();
+ vrf = sctp_find_vrf(vrf_id);
+ if (vrf == NULL) {
+ SCTPDBG(SCTP_DEBUG_PCB4, "Can't find vrf_id 0x%x\n", vrf_id);
+ goto out;
+
+ }
+ sctp_ifap = sctp_find_ifa_by_addr(addr, vrf->vrf_id, SCTP_ADDR_LOCKED);
+ if (sctp_ifap == NULL) {
+ SCTPDBG(SCTP_DEBUG_PCB4, "Can't find sctp_ifap for address\n");
+ goto out;
+ }
+ if (sctp_ifap->ifn_p == NULL) {
+ SCTPDBG(SCTP_DEBUG_PCB4, "IFA has no IFN - can't mark unuseable\n");
+ goto out;
+ }
+ if (if_name) {
+ int len1, len2;
+
+ len1 = strlen(if_name);
+ len2 = strlen(sctp_ifap->ifn_p->ifn_name);
+ if (len1 != len2) {
+ SCTPDBG(SCTP_DEBUG_PCB4, "IFN of ifa names different length %d vs %d - ignored\n",
+ len1, len2);
+ goto out;
+ }
+ if (strncmp(if_name, sctp_ifap->ifn_p->ifn_name, len1) != 0) {
+ SCTPDBG(SCTP_DEBUG_PCB4, "IFN %s of IFA not the same as %s\n",
+ sctp_ifap->ifn_p->ifn_name,
+ if_name);
+ goto out;
+ }
+ } else {
+ if (sctp_ifap->ifn_p->ifn_index != ifn_index) {
+ SCTPDBG(SCTP_DEBUG_PCB4, "IFA owned by ifn_index:%d down command for ifn_index:%d - ignored\n",
+ sctp_ifap->ifn_p->ifn_index, ifn_index);
+ goto out;
+ }
+ }
+
+ sctp_ifap->localifa_flags &= (~SCTP_ADDR_IFA_UNUSEABLE);
+ sctp_ifap->localifa_flags |= SCTP_ADDR_VALID;
+out:
+ SCTP_IPI_ADDR_RUNLOCK();
+}
+
+/*-
+ * Add an ifa to an ifn.
+ * Register the interface as necessary.
+ * NOTE: ADDR write lock MUST be held.
+ */
+static void
+sctp_add_ifa_to_ifn(struct sctp_ifn *sctp_ifnp, struct sctp_ifa *sctp_ifap)
+{
+ int ifa_af;
+
+ LIST_INSERT_HEAD(&sctp_ifnp->ifalist, sctp_ifap, next_ifa);
+ sctp_ifap->ifn_p = sctp_ifnp;
+ atomic_add_int(&sctp_ifap->ifn_p->refcount, 1);
+ /* update address counts */
+ sctp_ifnp->ifa_count++;
+ ifa_af = sctp_ifap->address.sa.sa_family;
+ if (ifa_af == AF_INET)
+ sctp_ifnp->num_v4++;
+ else
+ sctp_ifnp->num_v6++;
+ if (sctp_ifnp->ifa_count == 1) {
+ /* register the new interface */
+ SCTP_REGISTER_INTERFACE(sctp_ifnp->ifn_index, ifa_af);
+ sctp_ifnp->registered_af = ifa_af;
+ }
+}
+
+/*-
+ * Remove an ifa from its ifn.
+ * If no more addresses exist, remove the ifn too. Otherwise, re-register
+ * the interface based on the remaining address families left.
+ * NOTE: ADDR write lock MUST be held.
+ */
+static void
+sctp_remove_ifa_from_ifn(struct sctp_ifa *sctp_ifap)
+{
+ uint32_t ifn_index;
+
+ LIST_REMOVE(sctp_ifap, next_ifa);
+ if (sctp_ifap->ifn_p) {
+ /* update address counts */
+ sctp_ifap->ifn_p->ifa_count--;
+ if (sctp_ifap->address.sa.sa_family == AF_INET6)
+ sctp_ifap->ifn_p->num_v6--;
+ else if (sctp_ifap->address.sa.sa_family == AF_INET)
+ sctp_ifap->ifn_p->num_v4--;
+
+ ifn_index = sctp_ifap->ifn_p->ifn_index;
+ if (LIST_EMPTY(&sctp_ifap->ifn_p->ifalist)) {
+ /* remove the ifn, possibly freeing it */
+ sctp_delete_ifn(sctp_ifap->ifn_p, SCTP_ADDR_LOCKED);
+ } else {
+ /* re-register address family type, if needed */
+ if ((sctp_ifap->ifn_p->num_v6 == 0) &&
+ (sctp_ifap->ifn_p->registered_af == AF_INET6)) {
+ SCTP_DEREGISTER_INTERFACE(ifn_index, AF_INET6);
+ SCTP_REGISTER_INTERFACE(ifn_index, AF_INET);
+ sctp_ifap->ifn_p->registered_af = AF_INET;
+ } else if ((sctp_ifap->ifn_p->num_v4 == 0) &&
+ (sctp_ifap->ifn_p->registered_af == AF_INET)) {
+ SCTP_DEREGISTER_INTERFACE(ifn_index, AF_INET);
+ SCTP_REGISTER_INTERFACE(ifn_index, AF_INET6);
+ sctp_ifap->ifn_p->registered_af = AF_INET6;
+ }
+ /* free the ifn refcount */
+ sctp_free_ifn(sctp_ifap->ifn_p);
+ }
+ sctp_ifap->ifn_p = NULL;
+ }
+}
+
+struct sctp_ifa *
+sctp_add_addr_to_vrf(uint32_t vrf_id, void *ifn, uint32_t ifn_index,
+ uint32_t ifn_type, const char *if_name, void *ifa,
+ struct sockaddr *addr, uint32_t ifa_flags,
+ int dynamic_add)
+{
+ struct sctp_vrf *vrf;
+ struct sctp_ifn *sctp_ifnp = NULL;
+ struct sctp_ifa *sctp_ifap = NULL;
+ struct sctp_ifalist *hash_addr_head;
+ struct sctp_ifnlist *hash_ifn_head;
+ uint32_t hash_of_addr;
+ int new_ifn_af = 0;
+
+#ifdef SCTP_DEBUG
+ SCTPDBG(SCTP_DEBUG_PCB4, "vrf_id 0x%x: adding address: ", vrf_id);
+ SCTPDBG_ADDR(SCTP_DEBUG_PCB4, addr);
+#endif
+ SCTP_IPI_ADDR_WLOCK();
+ sctp_ifnp = sctp_find_ifn(ifn, ifn_index);
+ if (sctp_ifnp) {
+ vrf = sctp_ifnp->vrf;
+ } else {
+ vrf = sctp_find_vrf(vrf_id);
+ if (vrf == NULL) {
+ vrf = sctp_allocate_vrf(vrf_id);
+ if (vrf == NULL) {
+ SCTP_IPI_ADDR_WUNLOCK();
+ return (NULL);
+ }
+ }
+ }
+ if (sctp_ifnp == NULL) {
+ /*
+ * build one and add it, can't hold lock until after malloc
+ * done though.
+ */
+ SCTP_IPI_ADDR_WUNLOCK();
+ SCTP_MALLOC(sctp_ifnp, struct sctp_ifn *,
+ sizeof(struct sctp_ifn), SCTP_M_IFN);
+ if (sctp_ifnp == NULL) {
+#ifdef INVARIANTS
+ panic("No memory for IFN");
+#endif
+ return (NULL);
+ }
+ memset(sctp_ifnp, 0, sizeof(struct sctp_ifn));
+ sctp_ifnp->ifn_index = ifn_index;
+ sctp_ifnp->ifn_p = ifn;
+ sctp_ifnp->ifn_type = ifn_type;
+ sctp_ifnp->refcount = 0;
+ sctp_ifnp->vrf = vrf;
+ atomic_add_int(&vrf->refcount, 1);
+ sctp_ifnp->ifn_mtu = SCTP_GATHER_MTU_FROM_IFN_INFO(ifn, ifn_index, addr->sa_family);
+ if (if_name != NULL) {
+ memcpy(sctp_ifnp->ifn_name, if_name, SCTP_IFNAMSIZ);
+ } else {
+ memcpy(sctp_ifnp->ifn_name, "unknown", min(7, SCTP_IFNAMSIZ));
+ }
+ hash_ifn_head = &SCTP_BASE_INFO(vrf_ifn_hash)[(ifn_index & SCTP_BASE_INFO(vrf_ifn_hashmark))];
+ LIST_INIT(&sctp_ifnp->ifalist);
+ SCTP_IPI_ADDR_WLOCK();
+ LIST_INSERT_HEAD(hash_ifn_head, sctp_ifnp, next_bucket);
+ LIST_INSERT_HEAD(&vrf->ifnlist, sctp_ifnp, next_ifn);
+ atomic_add_int(&SCTP_BASE_INFO(ipi_count_ifns), 1);
+ new_ifn_af = 1;
+ }
+ sctp_ifap = sctp_find_ifa_by_addr(addr, vrf->vrf_id, SCTP_ADDR_LOCKED);
+ if (sctp_ifap) {
+ /* Hmm, it already exists? */
+ if ((sctp_ifap->ifn_p) &&
+ (sctp_ifap->ifn_p->ifn_index == ifn_index)) {
+ SCTPDBG(SCTP_DEBUG_PCB4, "Using existing ifn %s (0x%x) for ifa %p\n",
+ sctp_ifap->ifn_p->ifn_name, ifn_index,
+ sctp_ifap);
+ if (new_ifn_af) {
+ /* Remove the created one that we don't want */
+ sctp_delete_ifn(sctp_ifnp, SCTP_ADDR_LOCKED);
+ }
+ if (sctp_ifap->localifa_flags & SCTP_BEING_DELETED) {
+ /* easy to solve, just switch back to active */
+ SCTPDBG(SCTP_DEBUG_PCB4, "Clearing deleted ifa flag\n");
+ sctp_ifap->localifa_flags = SCTP_ADDR_VALID;
+ sctp_ifap->ifn_p = sctp_ifnp;
+ atomic_add_int(&sctp_ifap->ifn_p->refcount, 1);
+ }
+ exit_stage_left:
+ SCTP_IPI_ADDR_WUNLOCK();
+ return (sctp_ifap);
+ } else {
+ if (sctp_ifap->ifn_p) {
+ /*
+ * The last IFN gets the address, remove the
+ * old one
+ */
+ SCTPDBG(SCTP_DEBUG_PCB4, "Moving ifa %p from %s (0x%x) to %s (0x%x)\n",
+ sctp_ifap, sctp_ifap->ifn_p->ifn_name,
+ sctp_ifap->ifn_p->ifn_index, if_name,
+ ifn_index);
+ /* remove the address from the old ifn */
+ sctp_remove_ifa_from_ifn(sctp_ifap);
+ /* move the address over to the new ifn */
+ sctp_add_ifa_to_ifn(sctp_ifnp, sctp_ifap);
+ goto exit_stage_left;
+ } else {
+ /* repair ifnp which was NULL ? */
+ sctp_ifap->localifa_flags = SCTP_ADDR_VALID;
+ SCTPDBG(SCTP_DEBUG_PCB4, "Repairing ifn %p for ifa %p\n",
+ sctp_ifnp, sctp_ifap);
+ sctp_add_ifa_to_ifn(sctp_ifnp, sctp_ifap);
+ }
+ goto exit_stage_left;
+ }
+ }
+ SCTP_IPI_ADDR_WUNLOCK();
+ SCTP_MALLOC(sctp_ifap, struct sctp_ifa *, sizeof(struct sctp_ifa), SCTP_M_IFA);
+ if (sctp_ifap == NULL) {
+#ifdef INVARIANTS
+ panic("No memory for IFA");
+#endif
+ return (NULL);
+ }
+ memset(sctp_ifap, 0, sizeof(struct sctp_ifa));
+ sctp_ifap->ifn_p = sctp_ifnp;
+ atomic_add_int(&sctp_ifnp->refcount, 1);
+ sctp_ifap->vrf_id = vrf_id;
+ sctp_ifap->ifa = ifa;
+ memcpy(&sctp_ifap->address, addr, addr->sa_len);
+ sctp_ifap->localifa_flags = SCTP_ADDR_VALID | SCTP_ADDR_DEFER_USE;
+ sctp_ifap->flags = ifa_flags;
+ /* Set scope */
+ switch (sctp_ifap->address.sa.sa_family) {
+ case AF_INET:
+ {
+ struct sockaddr_in *sin;
+
+ sin = (struct sockaddr_in *)&sctp_ifap->address.sin;
+ if (SCTP_IFN_IS_IFT_LOOP(sctp_ifap->ifn_p) ||
+ (IN4_ISLOOPBACK_ADDRESS(&sin->sin_addr))) {
+ sctp_ifap->src_is_loop = 1;
+ }
+ if ((IN4_ISPRIVATE_ADDRESS(&sin->sin_addr))) {
+ sctp_ifap->src_is_priv = 1;
+ }
+ sctp_ifnp->num_v4++;
+ if (new_ifn_af)
+ new_ifn_af = AF_INET;
+ break;
+ }
+#ifdef INET6
+ case AF_INET6:
+ {
+ /* ok to use deprecated addresses? */
+ struct sockaddr_in6 *sin6;
+
+ sin6 = (struct sockaddr_in6 *)&sctp_ifap->address.sin6;
+ if (SCTP_IFN_IS_IFT_LOOP(sctp_ifap->ifn_p) ||
+ (IN6_IS_ADDR_LOOPBACK(&sin6->sin6_addr))) {
+ sctp_ifap->src_is_loop = 1;
+ }
+ if (IN6_IS_ADDR_LINKLOCAL(&sin6->sin6_addr)) {
+ sctp_ifap->src_is_priv = 1;
+ }
+ sctp_ifnp->num_v6++;
+ if (new_ifn_af)
+ new_ifn_af = AF_INET6;
+ break;
+ }
+#endif
+ default:
+ new_ifn_af = 0;
+ break;
+ }
+ hash_of_addr = sctp_get_ifa_hash_val(&sctp_ifap->address.sa);
+
+ if ((sctp_ifap->src_is_priv == 0) &&
+ (sctp_ifap->src_is_loop == 0)) {
+ sctp_ifap->src_is_glob = 1;
+ }
+ SCTP_IPI_ADDR_WLOCK();
+ hash_addr_head = &vrf->vrf_addr_hash[(hash_of_addr & vrf->vrf_addr_hashmark)];
+ LIST_INSERT_HEAD(hash_addr_head, sctp_ifap, next_bucket);
+ sctp_ifap->refcount = 1;
+ LIST_INSERT_HEAD(&sctp_ifnp->ifalist, sctp_ifap, next_ifa);
+ sctp_ifnp->ifa_count++;
+ vrf->total_ifa_count++;
+ atomic_add_int(&SCTP_BASE_INFO(ipi_count_ifas), 1);
+ if (new_ifn_af) {
+ SCTP_REGISTER_INTERFACE(ifn_index, new_ifn_af);
+ sctp_ifnp->registered_af = new_ifn_af;
+ }
+ SCTP_IPI_ADDR_WUNLOCK();
+ if (dynamic_add) {
+ /*
+ * Bump up the refcount so that when the timer completes it
+ * will drop back down.
+ */
+ struct sctp_laddr *wi;
+
+ atomic_add_int(&sctp_ifap->refcount, 1);
+ wi = SCTP_ZONE_GET(SCTP_BASE_INFO(ipi_zone_laddr), struct sctp_laddr);
+ if (wi == NULL) {
+ /*
+ * Gak, what can we do? We have lost an address
+ * change can you say HOSED?
+ */
+ SCTPDBG(SCTP_DEBUG_PCB4, "Lost an address change?\n");
+ /* Opps, must decrement the count */
+ sctp_del_addr_from_vrf(vrf_id, addr, ifn_index,
+ if_name);
+ return (NULL);
+ }
+ SCTP_INCR_LADDR_COUNT();
+ bzero(wi, sizeof(*wi));
+ (void)SCTP_GETTIME_TIMEVAL(&wi->start_time);
+ wi->ifa = sctp_ifap;
+ wi->action = SCTP_ADD_IP_ADDRESS;
+
+ SCTP_WQ_ADDR_LOCK();
+ LIST_INSERT_HEAD(&SCTP_BASE_INFO(addr_wq), wi, sctp_nxt_addr);
+ SCTP_WQ_ADDR_UNLOCK();
+
+ sctp_timer_start(SCTP_TIMER_TYPE_ADDR_WQ,
+ (struct sctp_inpcb *)NULL,
+ (struct sctp_tcb *)NULL,
+ (struct sctp_nets *)NULL);
+ } else {
+ /* it's ready for use */
+ sctp_ifap->localifa_flags &= ~SCTP_ADDR_DEFER_USE;
+ }
+ return (sctp_ifap);
+}
+
+void
+sctp_del_addr_from_vrf(uint32_t vrf_id, struct sockaddr *addr,
+ uint32_t ifn_index, const char *if_name)
+{
+ struct sctp_vrf *vrf;
+ struct sctp_ifa *sctp_ifap = NULL;
+
+ SCTP_IPI_ADDR_WLOCK();
+ vrf = sctp_find_vrf(vrf_id);
+ if (vrf == NULL) {
+ SCTPDBG(SCTP_DEBUG_PCB4, "Can't find vrf_id 0x%x\n", vrf_id);
+ goto out_now;
+ }
+#ifdef SCTP_DEBUG
+ SCTPDBG(SCTP_DEBUG_PCB4, "vrf_id 0x%x: deleting address:", vrf_id);
+ SCTPDBG_ADDR(SCTP_DEBUG_PCB4, addr);
+#endif
+ sctp_ifap = sctp_find_ifa_by_addr(addr, vrf->vrf_id, SCTP_ADDR_LOCKED);
+ if (sctp_ifap) {
+ /* Validate the delete */
+ if (sctp_ifap->ifn_p) {
+ int valid = 0;
+
+ /*-
+ * The name has priority over the ifn_index
+ * if its given. We do this especially for
+ * panda who might recycle indexes fast.
+ */
+ if (if_name) {
+ int len1, len2;
+
+ len1 = min(SCTP_IFNAMSIZ, strlen(if_name));
+ len2 = min(SCTP_IFNAMSIZ, strlen(sctp_ifap->ifn_p->ifn_name));
+ if (len1 && len2 && (len1 == len2)) {
+ /* we can compare them */
+ if (strncmp(if_name, sctp_ifap->ifn_p->ifn_name, len1) == 0) {
+ /*
+ * They match its a correct
+ * delete
+ */
+ valid = 1;
+ }
+ }
+ }
+ if (!valid) {
+ /* last ditch check ifn_index */
+ if (ifn_index == sctp_ifap->ifn_p->ifn_index) {
+ valid = 1;
+ }
+ }
+ if (!valid) {
+ SCTPDBG(SCTP_DEBUG_PCB4, "ifn:%d ifname:%s does not match addresses\n",
+ ifn_index, ((if_name == NULL) ? "NULL" : if_name));
+ SCTPDBG(SCTP_DEBUG_PCB4, "ifn:%d ifname:%s - ignoring delete\n",
+ sctp_ifap->ifn_p->ifn_index, sctp_ifap->ifn_p->ifn_name);
+ SCTP_IPI_ADDR_WUNLOCK();
+ return;
+ }
+ }
+ SCTPDBG(SCTP_DEBUG_PCB4, "Deleting ifa %p\n", sctp_ifap);
+ sctp_ifap->localifa_flags &= SCTP_ADDR_VALID;
+ sctp_ifap->localifa_flags |= SCTP_BEING_DELETED;
+ vrf->total_ifa_count--;
+ LIST_REMOVE(sctp_ifap, next_bucket);
+ sctp_remove_ifa_from_ifn(sctp_ifap);
+ }
+#ifdef SCTP_DEBUG
+ else {
+ SCTPDBG(SCTP_DEBUG_PCB4, "Del Addr-ifn:%d Could not find address:",
+ ifn_index);
+ SCTPDBG_ADDR(SCTP_DEBUG_PCB1, addr);
+ }
+#endif
+
+out_now:
+ SCTP_IPI_ADDR_WUNLOCK();
+ if (sctp_ifap) {
+ struct sctp_laddr *wi;
+
+ wi = SCTP_ZONE_GET(SCTP_BASE_INFO(ipi_zone_laddr), struct sctp_laddr);
+ if (wi == NULL) {
+ /*
+ * Gak, what can we do? We have lost an address
+ * change can you say HOSED?
+ */
+ SCTPDBG(SCTP_DEBUG_PCB4, "Lost an address change?\n");
+
+ /* Oops, must decrement the count */
+ sctp_free_ifa(sctp_ifap);
+ return;
+ }
+ SCTP_INCR_LADDR_COUNT();
+ bzero(wi, sizeof(*wi));
+ (void)SCTP_GETTIME_TIMEVAL(&wi->start_time);
+ wi->ifa = sctp_ifap;
+ wi->action = SCTP_DEL_IP_ADDRESS;
+ SCTP_WQ_ADDR_LOCK();
+ /*
+ * Should this really be a tailq? As it is we will process
+ * the newest first :-0
+ */
+ LIST_INSERT_HEAD(&SCTP_BASE_INFO(addr_wq), wi, sctp_nxt_addr);
+ SCTP_WQ_ADDR_UNLOCK();
+
+ sctp_timer_start(SCTP_TIMER_TYPE_ADDR_WQ,
+ (struct sctp_inpcb *)NULL,
+ (struct sctp_tcb *)NULL,
+ (struct sctp_nets *)NULL);
+ }
+ return;
+}
+
+
+static struct sctp_tcb *
+sctp_tcb_special_locate(struct sctp_inpcb **inp_p, struct sockaddr *from,
+ struct sockaddr *to, struct sctp_nets **netp, uint32_t vrf_id)
+{
+ /**** ASSUMES THE CALLER holds the INP_INFO_RLOCK */
+ /*
+ * If we support the TCP model, then we must now dig through to see
+ * if we can find our endpoint in the list of tcp ep's.
+ */
+ uint16_t lport, rport;
+ struct sctppcbhead *ephead;
+ struct sctp_inpcb *inp;
+ struct sctp_laddr *laddr;
+ struct sctp_tcb *stcb;
+ struct sctp_nets *net;
+
+ if ((to == NULL) || (from == NULL)) {
+ return (NULL);
+ }
+ if (to->sa_family == AF_INET && from->sa_family == AF_INET) {
+ lport = ((struct sockaddr_in *)to)->sin_port;
+ rport = ((struct sockaddr_in *)from)->sin_port;
+ } else if (to->sa_family == AF_INET6 && from->sa_family == AF_INET6) {
+ lport = ((struct sockaddr_in6 *)to)->sin6_port;
+ rport = ((struct sockaddr_in6 *)from)->sin6_port;
+ } else {
+ return NULL;
+ }
+ ephead = &SCTP_BASE_INFO(sctp_tcpephash)[SCTP_PCBHASH_ALLADDR((lport | rport), SCTP_BASE_INFO(hashtcpmark))];
+ /*
+ * Ok now for each of the guys in this bucket we must look and see:
+ * - Does the remote port match. - Does there single association's
+ * addresses match this address (to). If so we update p_ep to point
+ * to this ep and return the tcb from it.
+ */
+ LIST_FOREACH(inp, ephead, sctp_hash) {
+ SCTP_INP_RLOCK(inp);
+ if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) {
+ SCTP_INP_RUNLOCK(inp);
+ continue;
+ }
+ if (lport != inp->sctp_lport) {
+ SCTP_INP_RUNLOCK(inp);
+ continue;
+ }
+ if (inp->def_vrf_id != vrf_id) {
+ SCTP_INP_RUNLOCK(inp);
+ continue;
+ }
+ /* check to see if the ep has one of the addresses */
+ if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) == 0) {
+ /* We are NOT bound all, so look further */
+ int match = 0;
+
+ LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) {
+
+ if (laddr->ifa == NULL) {
+ SCTPDBG(SCTP_DEBUG_PCB1, "%s: NULL ifa\n", __FUNCTION__);
+ continue;
+ }
+ if (laddr->ifa->localifa_flags & SCTP_BEING_DELETED) {
+ SCTPDBG(SCTP_DEBUG_PCB1, "ifa being deleted\n");
+ continue;
+ }
+ if (laddr->ifa->address.sa.sa_family ==
+ to->sa_family) {
+ /* see if it matches */
+ struct sockaddr_in *intf_addr, *sin;
+
+ intf_addr = &laddr->ifa->address.sin;
+ sin = (struct sockaddr_in *)to;
+ if (from->sa_family == AF_INET) {
+ if (sin->sin_addr.s_addr ==
+ intf_addr->sin_addr.s_addr) {
+ match = 1;
+ break;
+ }
+ }
+#ifdef INET6
+ if (from->sa_family == AF_INET6) {
+ struct sockaddr_in6 *intf_addr6;
+ struct sockaddr_in6 *sin6;
+
+ sin6 = (struct sockaddr_in6 *)
+ to;
+ intf_addr6 = &laddr->ifa->address.sin6;
+
+ if (SCTP6_ARE_ADDR_EQUAL(sin6,
+ intf_addr6)) {
+ match = 1;
+ break;
+ }
+ }
+#endif
+ }
+ }
+ if (match == 0) {
+ /* This endpoint does not have this address */
+ SCTP_INP_RUNLOCK(inp);
+ continue;
+ }
+ }
+ /*
+ * Ok if we hit here the ep has the address, does it hold
+ * the tcb?
+ */
+
+ stcb = LIST_FIRST(&inp->sctp_asoc_list);
+ if (stcb == NULL) {
+ SCTP_INP_RUNLOCK(inp);
+ continue;
+ }
+ SCTP_TCB_LOCK(stcb);
+ if (stcb->rport != rport) {
+ /* remote port does not match. */
+ SCTP_TCB_UNLOCK(stcb);
+ SCTP_INP_RUNLOCK(inp);
+ continue;
+ }
+ if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
+ SCTP_TCB_UNLOCK(stcb);
+ SCTP_INP_RUNLOCK(inp);
+ continue;
+ }
+ /* Does this TCB have a matching address? */
+ TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) {
+
+ if (net->ro._l_addr.sa.sa_family != from->sa_family) {
+ /* not the same family, can't be a match */
+ continue;
+ }
+ switch (from->sa_family) {
+ case AF_INET:
+ {
+ struct sockaddr_in *sin, *rsin;
+
+ sin = (struct sockaddr_in *)&net->ro._l_addr;
+ rsin = (struct sockaddr_in *)from;
+ if (sin->sin_addr.s_addr ==
+ rsin->sin_addr.s_addr) {
+ /* found it */
+ if (netp != NULL) {
+ *netp = net;
+ }
+ /*
+ * Update the endpoint
+ * pointer
+ */
+ *inp_p = inp;
+ SCTP_INP_RUNLOCK(inp);
+ return (stcb);
+ }
+ break;
+ }
+#ifdef INET6
+ case AF_INET6:
+ {
+ struct sockaddr_in6 *sin6, *rsin6;
+
+ sin6 = (struct sockaddr_in6 *)&net->ro._l_addr;
+ rsin6 = (struct sockaddr_in6 *)from;
+ if (SCTP6_ARE_ADDR_EQUAL(sin6,
+ rsin6)) {
+ /* found it */
+ if (netp != NULL) {
+ *netp = net;
+ }
+ /*
+ * Update the endpoint
+ * pointer
+ */
+ *inp_p = inp;
+ SCTP_INP_RUNLOCK(inp);
+ return (stcb);
+ }
+ break;
+ }
+#endif
+ default:
+ /* TSNH */
+ break;
+ }
+ }
+ SCTP_TCB_UNLOCK(stcb);
+ SCTP_INP_RUNLOCK(inp);
+ }
+ return (NULL);
+}
+
+static int
+sctp_does_stcb_own_this_addr(struct sctp_tcb *stcb, struct sockaddr *to)
+{
+ int loopback_scope, ipv4_local_scope, local_scope, site_scope;
+ int ipv4_addr_legal, ipv6_addr_legal;
+ struct sctp_vrf *vrf;
+ struct sctp_ifn *sctp_ifn;
+ struct sctp_ifa *sctp_ifa;
+
+ loopback_scope = stcb->asoc.loopback_scope;
+ ipv4_local_scope = stcb->asoc.ipv4_local_scope;
+ local_scope = stcb->asoc.local_scope;
+ site_scope = stcb->asoc.site_scope;
+ ipv4_addr_legal = ipv6_addr_legal = 0;
+ if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
+ ipv6_addr_legal = 1;
+ if (SCTP_IPV6_V6ONLY(stcb->sctp_ep) == 0) {
+ ipv4_addr_legal = 1;
+ }
+ } else {
+ ipv4_addr_legal = 1;
+ }
+
+ SCTP_IPI_ADDR_RLOCK();
+ vrf = sctp_find_vrf(stcb->asoc.vrf_id);
+ if (vrf == NULL) {
+ /* no vrf, no addresses */
+ SCTP_IPI_ADDR_RUNLOCK();
+ return (0);
+ }
+ if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
+ LIST_FOREACH(sctp_ifn, &vrf->ifnlist, next_ifn) {
+ if ((loopback_scope == 0) &&
+ SCTP_IFN_IS_IFT_LOOP(sctp_ifn)) {
+ continue;
+ }
+ LIST_FOREACH(sctp_ifa, &sctp_ifn->ifalist, next_ifa) {
+ if (sctp_is_addr_restricted(stcb, sctp_ifa))
+ continue;
+ switch (sctp_ifa->address.sa.sa_family) {
+#ifdef INET
+ case AF_INET:
+ if (ipv4_addr_legal) {
+ struct sockaddr_in *sin,
+ *rsin;
+
+ sin = &sctp_ifa->address.sin;
+ rsin = (struct sockaddr_in *)to;
+ if ((ipv4_local_scope == 0) &&
+ IN4_ISPRIVATE_ADDRESS(&sin->sin_addr)) {
+ continue;
+ }
+ if (sin->sin_addr.s_addr == rsin->sin_addr.s_addr) {
+ SCTP_IPI_ADDR_RUNLOCK();
+ return (1);
+ }
+ }
+ break;
+#endif
+#ifdef INET6
+ case AF_INET6:
+ if (ipv6_addr_legal) {
+ struct sockaddr_in6 *sin6,
+ *rsin6;
+
+ sin6 = &sctp_ifa->address.sin6;
+ rsin6 = (struct sockaddr_in6 *)to;
+ if (IN6_IS_ADDR_LINKLOCAL(&sin6->sin6_addr)) {
+ if (local_scope == 0)
+ continue;
+ if (sin6->sin6_scope_id == 0) {
+ if (sa6_recoverscope(sin6) != 0)
+ continue;
+ }
+ }
+ if ((site_scope == 0) &&
+ (IN6_IS_ADDR_SITELOCAL(&sin6->sin6_addr))) {
+ continue;
+ }
+ if (SCTP6_ARE_ADDR_EQUAL(sin6, rsin6)) {
+ SCTP_IPI_ADDR_RUNLOCK();
+ return (1);
+ }
+ }
+ break;
+#endif
+ default:
+ /* TSNH */
+ break;
+ }
+ }
+ }
+ } else {
+ struct sctp_laddr *laddr;
+
+ LIST_FOREACH(laddr, &stcb->sctp_ep->sctp_addr_list, sctp_nxt_addr) {
+ if (sctp_is_addr_restricted(stcb, laddr->ifa)) {
+ continue;
+ }
+ if (laddr->ifa->address.sa.sa_family != to->sa_family) {
+ continue;
+ }
+ switch (to->sa_family) {
+#ifdef INET
+ case AF_INET:
+ {
+ struct sockaddr_in *sin, *rsin;
+
+ sin = (struct sockaddr_in *)&laddr->ifa->address.sin;
+ rsin = (struct sockaddr_in *)to;
+ if (sin->sin_addr.s_addr == rsin->sin_addr.s_addr) {
+ SCTP_IPI_ADDR_RUNLOCK();
+ return (1);
+ }
+ break;
+ }
+#endif
+#ifdef INET6
+ case AF_INET6:
+ {
+ struct sockaddr_in6 *sin6, *rsin6;
+
+ sin6 = (struct sockaddr_in6 *)&laddr->ifa->address.sin6;
+ rsin6 = (struct sockaddr_in6 *)to;
+ if (SCTP6_ARE_ADDR_EQUAL(sin6, rsin6)) {
+ SCTP_IPI_ADDR_RUNLOCK();
+ return (1);
+ }
+ break;
+ }
+
+#endif
+ default:
+ /* TSNH */
+ break;
+ }
+
+ }
+ }
+ SCTP_IPI_ADDR_RUNLOCK();
+ return (0);
+}
+
+/*
+ * rules for use
+ *
+ * 1) If I return a NULL you must decrement any INP ref cnt. 2) If I find an
+ * stcb, both will be locked (locked_tcb and stcb) but decrement will be done
+ * (if locked == NULL). 3) Decrement happens on return ONLY if locked ==
+ * NULL.
+ */
+
+struct sctp_tcb *
+sctp_findassociation_ep_addr(struct sctp_inpcb **inp_p, struct sockaddr *remote,
+ struct sctp_nets **netp, struct sockaddr *local, struct sctp_tcb *locked_tcb)
+{
+ struct sctpasochead *head;
+ struct sctp_inpcb *inp;
+ struct sctp_tcb *stcb = NULL;
+ struct sctp_nets *net;
+ uint16_t rport;
+
+ inp = *inp_p;
+ if (remote->sa_family == AF_INET) {
+ rport = (((struct sockaddr_in *)remote)->sin_port);
+ } else if (remote->sa_family == AF_INET6) {
+ rport = (((struct sockaddr_in6 *)remote)->sin6_port);
+ } else {
+ return (NULL);
+ }
+ if (locked_tcb) {
+ /*
+ * UN-lock so we can do proper locking here this occurs when
+ * called from load_addresses_from_init.
+ */
+ atomic_add_int(&locked_tcb->asoc.refcnt, 1);
+ SCTP_TCB_UNLOCK(locked_tcb);
+ }
+ SCTP_INP_INFO_RLOCK();
+ if (inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) {
+ /*-
+ * Now either this guy is our listener or it's the
+ * connector. If it is the one that issued the connect, then
+ * it's only chance is to be the first TCB in the list. If
+ * it is the acceptor, then do the special_lookup to hash
+ * and find the real inp.
+ */
+ if ((inp->sctp_socket) && (inp->sctp_socket->so_qlimit)) {
+ /* to is peer addr, from is my addr */
+ stcb = sctp_tcb_special_locate(inp_p, remote, local,
+ netp, inp->def_vrf_id);
+ if ((stcb != NULL) && (locked_tcb == NULL)) {
+ /* we have a locked tcb, lower refcount */
+ SCTP_INP_DECR_REF(inp);
+ }
+ if ((locked_tcb != NULL) && (locked_tcb != stcb)) {
+ SCTP_INP_RLOCK(locked_tcb->sctp_ep);
+ SCTP_TCB_LOCK(locked_tcb);
+ atomic_subtract_int(&locked_tcb->asoc.refcnt, 1);
+ SCTP_INP_RUNLOCK(locked_tcb->sctp_ep);
+ }
+ SCTP_INP_INFO_RUNLOCK();
+ return (stcb);
+ } else {
+ SCTP_INP_WLOCK(inp);
+ if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) {
+ goto null_return;
+ }
+ stcb = LIST_FIRST(&inp->sctp_asoc_list);
+ if (stcb == NULL) {
+ goto null_return;
+ }
+ SCTP_TCB_LOCK(stcb);
+
+ if (stcb->rport != rport) {
+ /* remote port does not match. */
+ SCTP_TCB_UNLOCK(stcb);
+ goto null_return;
+ }
+ if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
+ SCTP_TCB_UNLOCK(stcb);
+ goto null_return;
+ }
+ if (local && !sctp_does_stcb_own_this_addr(stcb, local)) {
+ SCTP_TCB_UNLOCK(stcb);
+ goto null_return;
+ }
+ /* now look at the list of remote addresses */
+ TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) {
+#ifdef INVARIANTS
+ if (net == (TAILQ_NEXT(net, sctp_next))) {
+ panic("Corrupt net list");
+ }
+#endif
+ if (net->ro._l_addr.sa.sa_family !=
+ remote->sa_family) {
+ /* not the same family */
+ continue;
+ }
+ switch (remote->sa_family) {
+ case AF_INET:
+ {
+ struct sockaddr_in *sin,
+ *rsin;
+
+ sin = (struct sockaddr_in *)
+ &net->ro._l_addr;
+ rsin = (struct sockaddr_in *)remote;
+ if (sin->sin_addr.s_addr ==
+ rsin->sin_addr.s_addr) {
+ /* found it */
+ if (netp != NULL) {
+ *netp = net;
+ }
+ if (locked_tcb == NULL) {
+ SCTP_INP_DECR_REF(inp);
+ } else if (locked_tcb != stcb) {
+ SCTP_TCB_LOCK(locked_tcb);
+ }
+ if (locked_tcb) {
+ atomic_subtract_int(&locked_tcb->asoc.refcnt, 1);
+ }
+ SCTP_INP_WUNLOCK(inp);
+ SCTP_INP_INFO_RUNLOCK();
+ return (stcb);
+ }
+ break;
+ }
+#ifdef INET6
+ case AF_INET6:
+ {
+ struct sockaddr_in6 *sin6,
+ *rsin6;
+
+ sin6 = (struct sockaddr_in6 *)&net->ro._l_addr;
+ rsin6 = (struct sockaddr_in6 *)remote;
+ if (SCTP6_ARE_ADDR_EQUAL(sin6,
+ rsin6)) {
+ /* found it */
+ if (netp != NULL) {
+ *netp = net;
+ }
+ if (locked_tcb == NULL) {
+ SCTP_INP_DECR_REF(inp);
+ } else if (locked_tcb != stcb) {
+ SCTP_TCB_LOCK(locked_tcb);
+ }
+ if (locked_tcb) {
+ atomic_subtract_int(&locked_tcb->asoc.refcnt, 1);
+ }
+ SCTP_INP_WUNLOCK(inp);
+ SCTP_INP_INFO_RUNLOCK();
+ return (stcb);
+ }
+ break;
+ }
+#endif
+ default:
+ /* TSNH */
+ break;
+ }
+ }
+ SCTP_TCB_UNLOCK(stcb);
+ }
+ } else {
+ SCTP_INP_WLOCK(inp);
+ if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) {
+ goto null_return;
+ }
+ head = &inp->sctp_tcbhash[SCTP_PCBHASH_ALLADDR(rport,
+ inp->sctp_hashmark)];
+ if (head == NULL) {
+ goto null_return;
+ }
+ LIST_FOREACH(stcb, head, sctp_tcbhash) {
+ if (stcb->rport != rport) {
+ /* remote port does not match */
+ continue;
+ }
+ SCTP_TCB_LOCK(stcb);
+ if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
+ SCTP_TCB_UNLOCK(stcb);
+ continue;
+ }
+ if (local && !sctp_does_stcb_own_this_addr(stcb, local)) {
+ SCTP_TCB_UNLOCK(stcb);
+ continue;
+ }
+ /* now look at the list of remote addresses */
+ TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) {
+#ifdef INVARIANTS
+ if (net == (TAILQ_NEXT(net, sctp_next))) {
+ panic("Corrupt net list");
+ }
+#endif
+ if (net->ro._l_addr.sa.sa_family !=
+ remote->sa_family) {
+ /* not the same family */
+ continue;
+ }
+ switch (remote->sa_family) {
+ case AF_INET:
+ {
+ struct sockaddr_in *sin,
+ *rsin;
+
+ sin = (struct sockaddr_in *)
+ &net->ro._l_addr;
+ rsin = (struct sockaddr_in *)remote;
+ if (sin->sin_addr.s_addr ==
+ rsin->sin_addr.s_addr) {
+ /* found it */
+ if (netp != NULL) {
+ *netp = net;
+ }
+ if (locked_tcb == NULL) {
+ SCTP_INP_DECR_REF(inp);
+ } else if (locked_tcb != stcb) {
+ SCTP_TCB_LOCK(locked_tcb);
+ }
+ if (locked_tcb) {
+ atomic_subtract_int(&locked_tcb->asoc.refcnt, 1);
+ }
+ SCTP_INP_WUNLOCK(inp);
+ SCTP_INP_INFO_RUNLOCK();
+ return (stcb);
+ }
+ break;
+ }
+#ifdef INET6
+ case AF_INET6:
+ {
+ struct sockaddr_in6 *sin6,
+ *rsin6;
+
+ sin6 = (struct sockaddr_in6 *)
+ &net->ro._l_addr;
+ rsin6 = (struct sockaddr_in6 *)remote;
+ if (SCTP6_ARE_ADDR_EQUAL(sin6,
+ rsin6)) {
+ /* found it */
+ if (netp != NULL) {
+ *netp = net;
+ }
+ if (locked_tcb == NULL) {
+ SCTP_INP_DECR_REF(inp);
+ } else if (locked_tcb != stcb) {
+ SCTP_TCB_LOCK(locked_tcb);
+ }
+ if (locked_tcb) {
+ atomic_subtract_int(&locked_tcb->asoc.refcnt, 1);
+ }
+ SCTP_INP_WUNLOCK(inp);
+ SCTP_INP_INFO_RUNLOCK();
+ return (stcb);
+ }
+ break;
+ }
+#endif
+ default:
+ /* TSNH */
+ break;
+ }
+ }
+ SCTP_TCB_UNLOCK(stcb);
+ }
+ }
+null_return:
+ /* clean up for returning null */
+ if (locked_tcb) {
+ SCTP_TCB_LOCK(locked_tcb);
+ atomic_subtract_int(&locked_tcb->asoc.refcnt, 1);
+ }
+ SCTP_INP_WUNLOCK(inp);
+ SCTP_INP_INFO_RUNLOCK();
+ /* not found */
+ return (NULL);
+}
+
+/*
+ * Find an association for a specific endpoint using the association id given
+ * out in the COMM_UP notification
+ */
+
+struct sctp_tcb *
+sctp_findasoc_ep_asocid_locked(struct sctp_inpcb *inp, sctp_assoc_t asoc_id, int want_lock)
+{
+ /*
+ * Use my the assoc_id to find a endpoint
+ */
+ struct sctpasochead *head;
+ struct sctp_tcb *stcb;
+ uint32_t id;
+
+ if (inp == NULL) {
+ SCTP_PRINTF("TSNH ep_associd\n");
+ return (NULL);
+ }
+ if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) {
+ SCTP_PRINTF("TSNH ep_associd0\n");
+ return (NULL);
+ }
+ id = (uint32_t) asoc_id;
+ head = &inp->sctp_asocidhash[SCTP_PCBHASH_ASOC(id, inp->hashasocidmark)];
+ if (head == NULL) {
+ /* invalid id TSNH */
+ SCTP_PRINTF("TSNH ep_associd1\n");
+ return (NULL);
+ }
+ LIST_FOREACH(stcb, head, sctp_tcbasocidhash) {
+ if (stcb->asoc.assoc_id == id) {
+ if (inp != stcb->sctp_ep) {
+ /*
+ * some other guy has the same id active (id
+ * collision ??).
+ */
+ SCTP_PRINTF("TSNH ep_associd2\n");
+ continue;
+ }
+ if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
+ continue;
+ }
+ if (want_lock) {
+ SCTP_TCB_LOCK(stcb);
+ }
+ return (stcb);
+ }
+ }
+ return (NULL);
+}
+
+
+struct sctp_tcb *
+sctp_findassociation_ep_asocid(struct sctp_inpcb *inp, sctp_assoc_t asoc_id, int want_lock)
+{
+ struct sctp_tcb *stcb;
+
+ SCTP_INP_RLOCK(inp);
+ stcb = sctp_findasoc_ep_asocid_locked(inp, asoc_id, want_lock);
+ SCTP_INP_RUNLOCK(inp);
+ return (stcb);
+}
+
+
+static struct sctp_inpcb *
+sctp_endpoint_probe(struct sockaddr *nam, struct sctppcbhead *head,
+ uint16_t lport, uint32_t vrf_id)
+{
+ struct sctp_inpcb *inp;
+ struct sockaddr_in *sin;
+
+#ifdef INET6
+ struct sockaddr_in6 *sin6;
+
+#endif
+ struct sctp_laddr *laddr;
+
+#ifdef INET6
+ struct sockaddr_in6 *intf_addr6;
+
+#endif
+
+ int fnd;
+
+ /*
+ * Endpoint probe expects that the INP_INFO is locked.
+ */
+ sin = NULL;
+#ifdef INET6
+ sin6 = NULL;
+#endif
+ switch (nam->sa_family) {
+ case AF_INET:
+ sin = (struct sockaddr_in *)nam;
+ break;
+#ifdef INET6
+ case AF_INET6:
+ sin6 = (struct sockaddr_in6 *)nam;
+ break;
+#endif
+ default:
+ /* unsupported family */
+ return (NULL);
+ }
+
+ if (head == NULL)
+ return (NULL);
+
+ LIST_FOREACH(inp, head, sctp_hash) {
+ SCTP_INP_RLOCK(inp);
+ if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) {
+ SCTP_INP_RUNLOCK(inp);
+ continue;
+ }
+ if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) &&
+ (inp->sctp_lport == lport)) {
+ /* got it */
+ if ((nam->sa_family == AF_INET) &&
+ (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
+ SCTP_IPV6_V6ONLY(inp)) {
+ /* IPv4 on a IPv6 socket with ONLY IPv6 set */
+ SCTP_INP_RUNLOCK(inp);
+ continue;
+ }
+ /* A V6 address and the endpoint is NOT bound V6 */
+ if (nam->sa_family == AF_INET6 &&
+ (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) {
+ SCTP_INP_RUNLOCK(inp);
+ continue;
+ }
+ /* does a VRF id match? */
+ fnd = 0;
+ if (inp->def_vrf_id == vrf_id)
+ fnd = 1;
+
+ SCTP_INP_RUNLOCK(inp);
+ if (!fnd)
+ continue;
+ return (inp);
+ }
+ SCTP_INP_RUNLOCK(inp);
+ }
+ if ((nam->sa_family == AF_INET) &&
+ (sin->sin_addr.s_addr == INADDR_ANY)) {
+ /* Can't hunt for one that has no address specified */
+ return (NULL);
+ }
+#ifdef INET6
+ if ((nam->sa_family == AF_INET6) &&
+ (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr))) {
+ /* Can't hunt for one that has no address specified */
+ return (NULL);
+ }
+#endif
+ /*
+ * ok, not bound to all so see if we can find a EP bound to this
+ * address.
+ */
+ LIST_FOREACH(inp, head, sctp_hash) {
+ SCTP_INP_RLOCK(inp);
+ if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) {
+ SCTP_INP_RUNLOCK(inp);
+ continue;
+ }
+ if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL)) {
+ SCTP_INP_RUNLOCK(inp);
+ continue;
+ }
+ /*
+ * Ok this could be a likely candidate, look at all of its
+ * addresses
+ */
+ if (inp->sctp_lport != lport) {
+ SCTP_INP_RUNLOCK(inp);
+ continue;
+ }
+ /* does a VRF id match? */
+ fnd = 0;
+ if (inp->def_vrf_id == vrf_id)
+ fnd = 1;
+
+ if (!fnd) {
+ SCTP_INP_RUNLOCK(inp);
+ continue;
+ }
+ LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) {
+ if (laddr->ifa == NULL) {
+ SCTPDBG(SCTP_DEBUG_PCB1, "%s: NULL ifa\n",
+ __FUNCTION__);
+ continue;
+ }
+ SCTPDBG(SCTP_DEBUG_PCB1, "Ok laddr->ifa:%p is possible, ",
+ laddr->ifa);
+ if (laddr->ifa->localifa_flags & SCTP_BEING_DELETED) {
+ SCTPDBG(SCTP_DEBUG_PCB1, "Huh IFA being deleted\n");
+ continue;
+ }
+ if (laddr->ifa->address.sa.sa_family == nam->sa_family) {
+ /* possible, see if it matches */
+ struct sockaddr_in *intf_addr;
+
+ intf_addr = &laddr->ifa->address.sin;
+ switch (nam->sa_family) {
+ case AF_INET:
+ if (sin->sin_addr.s_addr ==
+ intf_addr->sin_addr.s_addr) {
+ SCTP_INP_RUNLOCK(inp);
+ return (inp);
+ }
+ break;
+#ifdef INET6
+ case AF_INET6:
+ intf_addr6 = &laddr->ifa->address.sin6;
+ if (SCTP6_ARE_ADDR_EQUAL(sin6,
+ intf_addr6)) {
+ SCTP_INP_RUNLOCK(inp);
+ return (inp);
+ }
+ break;
+#endif
+ }
+ }
+ }
+ SCTP_INP_RUNLOCK(inp);
+ }
+ return (NULL);
+}
+
+
+static struct sctp_inpcb *
+sctp_isport_inuse(struct sctp_inpcb *inp, uint16_t lport, uint32_t vrf_id)
+{
+ struct sctppcbhead *head;
+ struct sctp_inpcb *t_inp;
+ int fnd;
+
+ head = &SCTP_BASE_INFO(sctp_ephash)[SCTP_PCBHASH_ALLADDR(lport,
+ SCTP_BASE_INFO(hashmark))];
+ LIST_FOREACH(t_inp, head, sctp_hash) {
+ if (t_inp->sctp_lport != lport) {
+ continue;
+ }
+ /* is it in the VRF in question */
+ fnd = 0;
+ if (t_inp->def_vrf_id == vrf_id)
+ fnd = 1;
+ if (!fnd)
+ continue;
+
+ /* This one is in use. */
+ /* check the v6/v4 binding issue */
+ if ((t_inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
+ SCTP_IPV6_V6ONLY(t_inp)) {
+ if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
+ /* collision in V6 space */
+ return (t_inp);
+ } else {
+ /* inp is BOUND_V4 no conflict */
+ continue;
+ }
+ } else if (t_inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
+ /* t_inp is bound v4 and v6, conflict always */
+ return (t_inp);
+ } else {
+ /* t_inp is bound only V4 */
+ if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
+ SCTP_IPV6_V6ONLY(inp)) {
+ /* no conflict */
+ continue;
+ }
+ /* else fall through to conflict */
+ }
+ return (t_inp);
+ }
+ return (NULL);
+}
+
+
+int
+sctp_swap_inpcb_for_listen(struct sctp_inpcb *inp)
+{
+ /* For 1-2-1 with port reuse */
+ struct sctppcbhead *head;
+ struct sctp_inpcb *tinp;
+
+ if (sctp_is_feature_off(inp, SCTP_PCB_FLAGS_PORTREUSE)) {
+ /* only works with port reuse on */
+ return (-1);
+ }
+ if ((inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) == 0) {
+ return (0);
+ }
+ SCTP_INP_RUNLOCK(inp);
+ head = &SCTP_BASE_INFO(sctp_ephash)[SCTP_PCBHASH_ALLADDR(inp->sctp_lport,
+ SCTP_BASE_INFO(hashmark))];
+ /* Kick out all non-listeners to the TCP hash */
+ LIST_FOREACH(tinp, head, sctp_hash) {
+ if (tinp->sctp_lport != inp->sctp_lport) {
+ continue;
+ }
+ if (tinp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) {
+ continue;
+ }
+ if (tinp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
+ continue;
+ }
+ if (tinp->sctp_socket->so_qlimit) {
+ continue;
+ }
+ SCTP_INP_WLOCK(tinp);
+ LIST_REMOVE(tinp, sctp_hash);
+ head = &SCTP_BASE_INFO(sctp_tcpephash)[SCTP_PCBHASH_ALLADDR(tinp->sctp_lport, SCTP_BASE_INFO(hashtcpmark))];
+ tinp->sctp_flags |= SCTP_PCB_FLAGS_IN_TCPPOOL;
+ LIST_INSERT_HEAD(head, tinp, sctp_hash);
+ SCTP_INP_WUNLOCK(tinp);
+ }
+ SCTP_INP_WLOCK(inp);
+ /* Pull from where he was */
+ LIST_REMOVE(inp, sctp_hash);
+ inp->sctp_flags &= ~SCTP_PCB_FLAGS_IN_TCPPOOL;
+ head = &SCTP_BASE_INFO(sctp_ephash)[SCTP_PCBHASH_ALLADDR(inp->sctp_lport, SCTP_BASE_INFO(hashmark))];
+ LIST_INSERT_HEAD(head, inp, sctp_hash);
+ SCTP_INP_WUNLOCK(inp);
+ SCTP_INP_RLOCK(inp);
+ return (0);
+}
+
+
+struct sctp_inpcb *
+sctp_pcb_findep(struct sockaddr *nam, int find_tcp_pool, int have_lock,
+ uint32_t vrf_id)
+{
+ /*
+ * First we check the hash table to see if someone has this port
+ * bound with just the port.
+ */
+ struct sctp_inpcb *inp;
+ struct sctppcbhead *head;
+ struct sockaddr_in *sin;
+ struct sockaddr_in6 *sin6;
+ int lport;
+ unsigned int i;
+
+ if (nam->sa_family == AF_INET) {
+ sin = (struct sockaddr_in *)nam;
+ lport = ((struct sockaddr_in *)nam)->sin_port;
+ } else if (nam->sa_family == AF_INET6) {
+ sin6 = (struct sockaddr_in6 *)nam;
+ lport = ((struct sockaddr_in6 *)nam)->sin6_port;
+ } else {
+ /* unsupported family */
+ return (NULL);
+ }
+ /*
+ * I could cheat here and just cast to one of the types but we will
+ * do it right. It also provides the check against an Unsupported
+ * type too.
+ */
+ /* Find the head of the ALLADDR chain */
+ if (have_lock == 0) {
+ SCTP_INP_INFO_RLOCK();
+ }
+ head = &SCTP_BASE_INFO(sctp_ephash)[SCTP_PCBHASH_ALLADDR(lport,
+ SCTP_BASE_INFO(hashmark))];
+ inp = sctp_endpoint_probe(nam, head, lport, vrf_id);
+
+ /*
+ * If the TCP model exists it could be that the main listening
+ * endpoint is gone but there still exists a connected socket for
+ * this guy. If so we can return the first one that we find. This
+ * may NOT be the correct one so the caller should be wary on the
+ * returned INP. Currently the only caller that sets find_tcp_pool
+ * is in bindx where we are verifying that a user CAN bind the
+ * address. He either has bound it already, or someone else has, or
+ * its open to bind, so this is good enough.
+ */
+ if (inp == NULL && find_tcp_pool) {
+ for (i = 0; i < SCTP_BASE_INFO(hashtcpmark) + 1; i++) {
+ head = &SCTP_BASE_INFO(sctp_tcpephash)[i];
+ inp = sctp_endpoint_probe(nam, head, lport, vrf_id);
+ if (inp) {
+ break;
+ }
+ }
+ }
+ if (inp) {
+ SCTP_INP_INCR_REF(inp);
+ }
+ if (have_lock == 0) {
+ SCTP_INP_INFO_RUNLOCK();
+ }
+ return (inp);
+}
+
+/*
+ * Find an association for an endpoint with the pointer to whom you want to
+ * send to and the endpoint pointer. The address can be IPv4 or IPv6. We may
+ * need to change the *to to some other struct like a mbuf...
+ */
+struct sctp_tcb *
+sctp_findassociation_addr_sa(struct sockaddr *to, struct sockaddr *from,
+ struct sctp_inpcb **inp_p, struct sctp_nets **netp, int find_tcp_pool,
+ uint32_t vrf_id)
+{
+ struct sctp_inpcb *inp = NULL;
+ struct sctp_tcb *retval;
+
+ SCTP_INP_INFO_RLOCK();
+ if (find_tcp_pool) {
+ if (inp_p != NULL) {
+ retval = sctp_tcb_special_locate(inp_p, from, to, netp,
+ vrf_id);
+ } else {
+ retval = sctp_tcb_special_locate(&inp, from, to, netp,
+ vrf_id);
+ }
+ if (retval != NULL) {
+ SCTP_INP_INFO_RUNLOCK();
+ return (retval);
+ }
+ }
+ inp = sctp_pcb_findep(to, 0, 1, vrf_id);
+ if (inp_p != NULL) {
+ *inp_p = inp;
+ }
+ SCTP_INP_INFO_RUNLOCK();
+
+ if (inp == NULL) {
+ return (NULL);
+ }
+ /*
+ * ok, we have an endpoint, now lets find the assoc for it (if any)
+ * we now place the source address or from in the to of the find
+ * endpoint call. Since in reality this chain is used from the
+ * inbound packet side.
+ */
+ if (inp_p != NULL) {
+ retval = sctp_findassociation_ep_addr(inp_p, from, netp, to,
+ NULL);
+ } else {
+ retval = sctp_findassociation_ep_addr(&inp, from, netp, to,
+ NULL);
+ }
+ return retval;
+}
+
+
+/*
+ * This routine will grub through the mbuf that is a INIT or INIT-ACK and
+ * find all addresses that the sender has specified in any address list. Each
+ * address will be used to lookup the TCB and see if one exits.
+ */
+static struct sctp_tcb *
+sctp_findassociation_special_addr(struct mbuf *m, int iphlen, int offset,
+ struct sctphdr *sh, struct sctp_inpcb **inp_p, struct sctp_nets **netp,
+ struct sockaddr *dest)
+{
+ struct sockaddr_in sin4;
+ struct sockaddr_in6 sin6;
+ struct sctp_paramhdr *phdr, parm_buf;
+ struct sctp_tcb *retval;
+ uint32_t ptype, plen;
+
+ memset(&sin4, 0, sizeof(sin4));
+ memset(&sin6, 0, sizeof(sin6));
+ sin4.sin_len = sizeof(sin4);
+ sin4.sin_family = AF_INET;
+ sin4.sin_port = sh->src_port;
+ sin6.sin6_len = sizeof(sin6);
+ sin6.sin6_family = AF_INET6;
+ sin6.sin6_port = sh->src_port;
+
+ retval = NULL;
+ offset += sizeof(struct sctp_init_chunk);
+
+ phdr = sctp_get_next_param(m, offset, &parm_buf, sizeof(parm_buf));
+ while (phdr != NULL) {
+ /* now we must see if we want the parameter */
+ ptype = ntohs(phdr->param_type);
+ plen = ntohs(phdr->param_length);
+ if (plen == 0) {
+ break;
+ }
+ if (ptype == SCTP_IPV4_ADDRESS &&
+ plen == sizeof(struct sctp_ipv4addr_param)) {
+ /* Get the rest of the address */
+ struct sctp_ipv4addr_param ip4_parm, *p4;
+
+ phdr = sctp_get_next_param(m, offset,
+ (struct sctp_paramhdr *)&ip4_parm, min(plen, sizeof(ip4_parm)));
+ if (phdr == NULL) {
+ return (NULL);
+ }
+ p4 = (struct sctp_ipv4addr_param *)phdr;
+ memcpy(&sin4.sin_addr, &p4->addr, sizeof(p4->addr));
+ /* look it up */
+ retval = sctp_findassociation_ep_addr(inp_p,
+ (struct sockaddr *)&sin4, netp, dest, NULL);
+ if (retval != NULL) {
+ return (retval);
+ }
+ } else if (ptype == SCTP_IPV6_ADDRESS &&
+ plen == sizeof(struct sctp_ipv6addr_param)) {
+ /* Get the rest of the address */
+ struct sctp_ipv6addr_param ip6_parm, *p6;
+
+ phdr = sctp_get_next_param(m, offset,
+ (struct sctp_paramhdr *)&ip6_parm, min(plen, sizeof(ip6_parm)));
+ if (phdr == NULL) {
+ return (NULL);
+ }
+ p6 = (struct sctp_ipv6addr_param *)phdr;
+ memcpy(&sin6.sin6_addr, &p6->addr, sizeof(p6->addr));
+ /* look it up */
+ retval = sctp_findassociation_ep_addr(inp_p,
+ (struct sockaddr *)&sin6, netp, dest, NULL);
+ if (retval != NULL) {
+ return (retval);
+ }
+ }
+ offset += SCTP_SIZE32(plen);
+ phdr = sctp_get_next_param(m, offset, &parm_buf,
+ sizeof(parm_buf));
+ }
+ return (NULL);
+}
+
+static struct sctp_tcb *
+sctp_findassoc_by_vtag(struct sockaddr *from, struct sockaddr *to, uint32_t vtag,
+ struct sctp_inpcb **inp_p, struct sctp_nets **netp, uint16_t rport,
+ uint16_t lport, int skip_src_check, uint32_t vrf_id, uint32_t remote_tag)
+{
+ /*
+ * Use my vtag to hash. If we find it we then verify the source addr
+ * is in the assoc. If all goes well we save a bit on rec of a
+ * packet.
+ */
+ struct sctpasochead *head;
+ struct sctp_nets *net;
+ struct sctp_tcb *stcb;
+
+ *netp = NULL;
+ *inp_p = NULL;
+ SCTP_INP_INFO_RLOCK();
+ head = &SCTP_BASE_INFO(sctp_asochash)[SCTP_PCBHASH_ASOC(vtag,
+ SCTP_BASE_INFO(hashasocmark))];
+ if (head == NULL) {
+ /* invalid vtag */
+ SCTP_INP_INFO_RUNLOCK();
+ return (NULL);
+ }
+ LIST_FOREACH(stcb, head, sctp_asocs) {
+ SCTP_INP_RLOCK(stcb->sctp_ep);
+ if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) {
+ SCTP_INP_RUNLOCK(stcb->sctp_ep);
+ continue;
+ }
+ SCTP_TCB_LOCK(stcb);
+ SCTP_INP_RUNLOCK(stcb->sctp_ep);
+ if (stcb->asoc.my_vtag == vtag) {
+ /* candidate */
+ if (stcb->rport != rport) {
+ SCTP_TCB_UNLOCK(stcb);
+ continue;
+ }
+ if (stcb->sctp_ep->sctp_lport != lport) {
+ SCTP_TCB_UNLOCK(stcb);
+ continue;
+ }
+ if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
+ SCTP_TCB_UNLOCK(stcb);
+ continue;
+ }
+ /* RRS:Need toaddr check here */
+ if (sctp_does_stcb_own_this_addr(stcb, to) == 0) {
+ /* Endpoint does not own this address */
+ SCTP_TCB_UNLOCK(stcb);
+ continue;
+ }
+ if (remote_tag) {
+ /*
+ * If we have both vtags that's all we match
+ * on
+ */
+ if (stcb->asoc.peer_vtag == remote_tag) {
+ /*
+ * If both tags match we consider it
+ * conclusive and check NO
+ * source/destination addresses
+ */
+ goto conclusive;
+ }
+ }
+ if (skip_src_check) {
+ conclusive:
+ if (from) {
+ net = sctp_findnet(stcb, from);
+ } else {
+ *netp = NULL; /* unknown */
+ }
+ if (inp_p)
+ *inp_p = stcb->sctp_ep;
+ SCTP_INP_INFO_RUNLOCK();
+ return (stcb);
+ }
+ net = sctp_findnet(stcb, from);
+ if (net) {
+ /* yep its him. */
+ *netp = net;
+ SCTP_STAT_INCR(sctps_vtagexpress);
+ *inp_p = stcb->sctp_ep;
+ SCTP_INP_INFO_RUNLOCK();
+ return (stcb);
+ } else {
+ /*
+ * not him, this should only happen in rare
+ * cases so I peg it.
+ */
+ SCTP_STAT_INCR(sctps_vtagbogus);
+ }
+ }
+ SCTP_TCB_UNLOCK(stcb);
+ }
+ SCTP_INP_INFO_RUNLOCK();
+ return (NULL);
+}
+
+/*
+ * Find an association with the pointer to the inbound IP packet. This can be
+ * a IPv4 or IPv6 packet.
+ */
+struct sctp_tcb *
+sctp_findassociation_addr(struct mbuf *m, int iphlen, int offset,
+ struct sctphdr *sh, struct sctp_chunkhdr *ch,
+ struct sctp_inpcb **inp_p, struct sctp_nets **netp, uint32_t vrf_id)
+{
+ int find_tcp_pool;
+ struct ip *iph;
+ struct sctp_tcb *retval;
+ struct sockaddr_storage to_store, from_store;
+ struct sockaddr *to = (struct sockaddr *)&to_store;
+ struct sockaddr *from = (struct sockaddr *)&from_store;
+ struct sctp_inpcb *inp;
+
+ iph = mtod(m, struct ip *);
+ switch (iph->ip_v) {
+ case IPVERSION:
+ {
+ /* its IPv4 */
+ struct sockaddr_in *from4;
+
+ from4 = (struct sockaddr_in *)&from_store;
+ bzero(from4, sizeof(*from4));
+ from4->sin_family = AF_INET;
+ from4->sin_len = sizeof(struct sockaddr_in);
+ from4->sin_addr.s_addr = iph->ip_src.s_addr;
+ from4->sin_port = sh->src_port;
+ break;
+ }
+#ifdef INET6
+ case IPV6_VERSION >> 4:
+ {
+ /* its IPv6 */
+ struct ip6_hdr *ip6;
+ struct sockaddr_in6 *from6;
+
+ ip6 = mtod(m, struct ip6_hdr *);
+ from6 = (struct sockaddr_in6 *)&from_store;
+ bzero(from6, sizeof(*from6));
+ from6->sin6_family = AF_INET6;
+ from6->sin6_len = sizeof(struct sockaddr_in6);
+ from6->sin6_addr = ip6->ip6_src;
+ from6->sin6_port = sh->src_port;
+ /* Get the scopes in properly to the sin6 addr's */
+ /* we probably don't need these operations */
+ (void)sa6_recoverscope(from6);
+ sa6_embedscope(from6, MODULE_GLOBAL(ip6_use_defzone));
+ break;
+ }
+#endif
+ default:
+ /* Currently not supported. */
+ return (NULL);
+ }
+
+
+ switch (iph->ip_v) {
+ case IPVERSION:
+ {
+ /* its IPv4 */
+ struct sockaddr_in *to4;
+
+ to4 = (struct sockaddr_in *)&to_store;
+ bzero(to4, sizeof(*to4));
+ to4->sin_family = AF_INET;
+ to4->sin_len = sizeof(struct sockaddr_in);
+ to4->sin_addr.s_addr = iph->ip_dst.s_addr;
+ to4->sin_port = sh->dest_port;
+ break;
+ }
+#ifdef INET6
+ case IPV6_VERSION >> 4:
+ {
+ /* its IPv6 */
+ struct ip6_hdr *ip6;
+ struct sockaddr_in6 *to6;
+
+ ip6 = mtod(m, struct ip6_hdr *);
+ to6 = (struct sockaddr_in6 *)&to_store;
+ bzero(to6, sizeof(*to6));
+ to6->sin6_family = AF_INET6;
+ to6->sin6_len = sizeof(struct sockaddr_in6);
+ to6->sin6_addr = ip6->ip6_dst;
+ to6->sin6_port = sh->dest_port;
+ /* Get the scopes in properly to the sin6 addr's */
+ /* we probably don't need these operations */
+ (void)sa6_recoverscope(to6);
+ sa6_embedscope(to6, MODULE_GLOBAL(ip6_use_defzone));
+ break;
+ }
+#endif
+ default:
+ /* TSNH */
+ break;
+ }
+ if (sh->v_tag) {
+ /* we only go down this path if vtag is non-zero */
+ retval = sctp_findassoc_by_vtag(from, to, ntohl(sh->v_tag),
+ inp_p, netp, sh->src_port, sh->dest_port, 0, vrf_id, 0);
+ if (retval) {
+ return (retval);
+ }
+ }
+ find_tcp_pool = 0;
+ if ((ch->chunk_type != SCTP_INITIATION) &&
+ (ch->chunk_type != SCTP_INITIATION_ACK) &&
+ (ch->chunk_type != SCTP_COOKIE_ACK) &&
+ (ch->chunk_type != SCTP_COOKIE_ECHO)) {
+ /* Other chunk types go to the tcp pool. */
+ find_tcp_pool = 1;
+ }
+ if (inp_p) {
+ retval = sctp_findassociation_addr_sa(to, from, inp_p, netp,
+ find_tcp_pool, vrf_id);
+ inp = *inp_p;
+ } else {
+ retval = sctp_findassociation_addr_sa(to, from, &inp, netp,
+ find_tcp_pool, vrf_id);
+ }
+ SCTPDBG(SCTP_DEBUG_PCB1, "retval:%p inp:%p\n", retval, inp);
+ if (retval == NULL && inp) {
+ /* Found a EP but not this address */
+ if ((ch->chunk_type == SCTP_INITIATION) ||
+ (ch->chunk_type == SCTP_INITIATION_ACK)) {
+ /*-
+ * special hook, we do NOT return linp or an
+ * association that is linked to an existing
+ * association that is under the TCP pool (i.e. no
+ * listener exists). The endpoint finding routine
+ * will always find a listener before examining the
+ * TCP pool.
+ */
+ if (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) {
+ if (inp_p) {
+ *inp_p = NULL;
+ }
+ return (NULL);
+ }
+ retval = sctp_findassociation_special_addr(m, iphlen,
+ offset, sh, &inp, netp, to);
+ if (inp_p != NULL) {
+ *inp_p = inp;
+ }
+ }
+ }
+ SCTPDBG(SCTP_DEBUG_PCB1, "retval is %p\n", retval);
+ return (retval);
+}
+
+/*
+ * lookup an association by an ASCONF lookup address.
+ * if the lookup address is 0.0.0.0 or ::0, use the vtag to do the lookup
+ */
+struct sctp_tcb *
+sctp_findassociation_ep_asconf(struct mbuf *m, int iphlen, int offset,
+ struct sctphdr *sh, struct sctp_inpcb **inp_p, struct sctp_nets **netp, uint32_t vrf_id)
+{
+ struct sctp_tcb *stcb;
+ struct sockaddr_in *sin;
+
+#ifdef INET6
+ struct sockaddr_in6 *sin6;
+
+#endif
+ struct sockaddr_storage local_store, remote_store;
+ struct sockaddr *to;
+ struct ip *iph;
+
+#ifdef INET6
+ struct ip6_hdr *ip6;
+
+#endif
+ struct sctp_paramhdr parm_buf, *phdr;
+ int ptype;
+ int zero_address = 0;
+
+
+ memset(&local_store, 0, sizeof(local_store));
+ memset(&remote_store, 0, sizeof(remote_store));
+ to = (struct sockaddr *)&local_store;
+ /* First get the destination address setup too. */
+ iph = mtod(m, struct ip *);
+ switch (iph->ip_v) {
+ case IPVERSION:
+ /* its IPv4 */
+ sin = (struct sockaddr_in *)&local_store;
+ sin->sin_family = AF_INET;
+ sin->sin_len = sizeof(*sin);
+ sin->sin_port = sh->dest_port;
+ sin->sin_addr.s_addr = iph->ip_dst.s_addr;
+ break;
+#ifdef INET6
+ case IPV6_VERSION >> 4:
+ /* its IPv6 */
+ ip6 = mtod(m, struct ip6_hdr *);
+ sin6 = (struct sockaddr_in6 *)&local_store;
+ sin6->sin6_family = AF_INET6;
+ sin6->sin6_len = sizeof(*sin6);
+ sin6->sin6_port = sh->dest_port;
+ sin6->sin6_addr = ip6->ip6_dst;
+ break;
+#endif
+ default:
+ return NULL;
+ }
+
+ phdr = sctp_get_next_param(m, offset + sizeof(struct sctp_asconf_chunk),
+ &parm_buf, sizeof(struct sctp_paramhdr));
+ if (phdr == NULL) {
+ SCTPDBG(SCTP_DEBUG_INPUT3, "%s: failed to get asconf lookup addr\n",
+ __FUNCTION__);
+ return NULL;
+ }
+ ptype = (int)((uint32_t) ntohs(phdr->param_type));
+ /* get the correlation address */
+ switch (ptype) {
+#ifdef INET6
+ case SCTP_IPV6_ADDRESS:
+ {
+ /* ipv6 address param */
+ struct sctp_ipv6addr_param *p6, p6_buf;
+
+ if (ntohs(phdr->param_length) != sizeof(struct sctp_ipv6addr_param)) {
+ return NULL;
+ }
+ p6 = (struct sctp_ipv6addr_param *)sctp_get_next_param(m,
+ offset + sizeof(struct sctp_asconf_chunk),
+ &p6_buf.ph, sizeof(*p6));
+ if (p6 == NULL) {
+ SCTPDBG(SCTP_DEBUG_INPUT3, "%s: failed to get asconf v6 lookup addr\n",
+ __FUNCTION__);
+ return (NULL);
+ }
+ sin6 = (struct sockaddr_in6 *)&remote_store;
+ sin6->sin6_family = AF_INET6;
+ sin6->sin6_len = sizeof(*sin6);
+ sin6->sin6_port = sh->src_port;
+ memcpy(&sin6->sin6_addr, &p6->addr, sizeof(struct in6_addr));
+ if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr))
+ zero_address = 1;
+ break;
+ }
+#endif
+ case SCTP_IPV4_ADDRESS:
+ {
+ /* ipv4 address param */
+ struct sctp_ipv4addr_param *p4, p4_buf;
+
+ if (ntohs(phdr->param_length) != sizeof(struct sctp_ipv4addr_param)) {
+ return NULL;
+ }
+ p4 = (struct sctp_ipv4addr_param *)sctp_get_next_param(m,
+ offset + sizeof(struct sctp_asconf_chunk),
+ &p4_buf.ph, sizeof(*p4));
+ if (p4 == NULL) {
+ SCTPDBG(SCTP_DEBUG_INPUT3, "%s: failed to get asconf v4 lookup addr\n",
+ __FUNCTION__);
+ return (NULL);
+ }
+ sin = (struct sockaddr_in *)&remote_store;
+ sin->sin_family = AF_INET;
+ sin->sin_len = sizeof(*sin);
+ sin->sin_port = sh->src_port;
+ memcpy(&sin->sin_addr, &p4->addr, sizeof(struct in_addr));
+ if (sin->sin_addr.s_addr == INADDR_ANY)
+ zero_address = 1;
+ break;
+ }
+ default:
+ /* invalid address param type */
+ return NULL;
+ }
+
+ if (zero_address) {
+ stcb = sctp_findassoc_by_vtag(NULL, to, ntohl(sh->v_tag), inp_p,
+ netp, sh->src_port, sh->dest_port, 1, vrf_id, 0);
+ /*
+ * printf("findassociation_ep_asconf: zero lookup address
+ * finds stcb 0x%x\n", (uint32_t)stcb);
+ */
+ } else {
+ stcb = sctp_findassociation_ep_addr(inp_p,
+ (struct sockaddr *)&remote_store, netp,
+ to, NULL);
+ }
+ return (stcb);
+}
+
+
+/*
+ * allocate a sctp_inpcb and setup a temporary binding to a port/all
+ * addresses. This way if we don't get a bind we by default pick a ephemeral
+ * port with all addresses bound.
+ */
+int
+sctp_inpcb_alloc(struct socket *so, uint32_t vrf_id)
+{
+ /*
+ * we get called when a new endpoint starts up. We need to allocate
+ * the sctp_inpcb structure from the zone and init it. Mark it as
+ * unbound and find a port that we can use as an ephemeral with
+ * INADDR_ANY. If the user binds later no problem we can then add in
+ * the specific addresses. And setup the default parameters for the
+ * EP.
+ */
+ int i, error;
+ struct sctp_inpcb *inp;
+ struct sctp_pcb *m;
+ struct timeval time;
+ sctp_sharedkey_t *null_key;
+
+ error = 0;
+
+ SCTP_INP_INFO_WLOCK();
+ inp = SCTP_ZONE_GET(SCTP_BASE_INFO(ipi_zone_ep), struct sctp_inpcb);
+ if (inp == NULL) {
+ SCTP_PRINTF("Out of SCTP-INPCB structures - no resources\n");
+ SCTP_INP_INFO_WUNLOCK();
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_PCB, ENOBUFS);
+ return (ENOBUFS);
+ }
+ /* zap it */
+ bzero(inp, sizeof(*inp));
+
+ /* bump generations */
+ /* setup socket pointers */
+ inp->sctp_socket = so;
+ inp->ip_inp.inp.inp_socket = so;
+ inp->sctp_associd_counter = 1;
+ inp->partial_delivery_point = SCTP_SB_LIMIT_RCV(so) >> SCTP_PARTIAL_DELIVERY_SHIFT;
+ inp->sctp_frag_point = SCTP_DEFAULT_MAXSEGMENT;
+ inp->sctp_cmt_on_off = SCTP_BASE_SYSCTL(sctp_cmt_on_off);
+ /* init the small hash table we use to track asocid <-> tcb */
+ inp->sctp_asocidhash = SCTP_HASH_INIT(SCTP_STACK_VTAG_HASH_SIZE, &inp->hashasocidmark);
+ if (inp->sctp_asocidhash == NULL) {
+ SCTP_ZONE_FREE(SCTP_BASE_INFO(ipi_zone_ep), inp);
+ SCTP_INP_INFO_WUNLOCK();
+ return (ENOBUFS);
+ }
+#ifdef IPSEC
+ {
+ struct inpcbpolicy *pcb_sp = NULL;
+
+ error = ipsec_init_policy(so, &pcb_sp);
+ /* Arrange to share the policy */
+ inp->ip_inp.inp.inp_sp = pcb_sp;
+ ((struct in6pcb *)(&inp->ip_inp.inp))->in6p_sp = pcb_sp;
+ }
+ if (error != 0) {
+ SCTP_ZONE_FREE(SCTP_BASE_INFO(ipi_zone_ep), inp);
+ SCTP_INP_INFO_WUNLOCK();
+ return error;
+ }
+#endif /* IPSEC */
+ SCTP_INCR_EP_COUNT();
+ inp->ip_inp.inp.inp_ip_ttl = MODULE_GLOBAL(ip_defttl);
+ SCTP_INP_INFO_WUNLOCK();
+
+ so->so_pcb = (caddr_t)inp;
+
+ if ((SCTP_SO_TYPE(so) == SOCK_DGRAM) ||
+ (SCTP_SO_TYPE(so) == SOCK_SEQPACKET)) {
+ /* UDP style socket */
+ inp->sctp_flags = (SCTP_PCB_FLAGS_UDPTYPE |
+ SCTP_PCB_FLAGS_UNBOUND);
+ /* Be sure it is NON-BLOCKING IO for UDP */
+ /* SCTP_SET_SO_NBIO(so); */
+ } else if (SCTP_SO_TYPE(so) == SOCK_STREAM) {
+ /* TCP style socket */
+ inp->sctp_flags = (SCTP_PCB_FLAGS_TCPTYPE |
+ SCTP_PCB_FLAGS_UNBOUND);
+ /* Be sure we have blocking IO by default */
+ SCTP_CLEAR_SO_NBIO(so);
+ } else {
+ /*
+ * unsupported socket type (RAW, etc)- in case we missed it
+ * in protosw
+ */
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_PCB, EOPNOTSUPP);
+ so->so_pcb = NULL;
+ SCTP_ZONE_FREE(SCTP_BASE_INFO(ipi_zone_ep), inp);
+ return (EOPNOTSUPP);
+ }
+ if (SCTP_BASE_SYSCTL(sctp_default_frag_interleave) == SCTP_FRAG_LEVEL_1) {
+ sctp_feature_on(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE);
+ sctp_feature_off(inp, SCTP_PCB_FLAGS_INTERLEAVE_STRMS);
+ } else if (SCTP_BASE_SYSCTL(sctp_default_frag_interleave) == SCTP_FRAG_LEVEL_2) {
+ sctp_feature_on(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE);
+ sctp_feature_on(inp, SCTP_PCB_FLAGS_INTERLEAVE_STRMS);
+ } else if (SCTP_BASE_SYSCTL(sctp_default_frag_interleave) == SCTP_FRAG_LEVEL_0) {
+ sctp_feature_off(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE);
+ sctp_feature_off(inp, SCTP_PCB_FLAGS_INTERLEAVE_STRMS);
+ }
+ inp->sctp_tcbhash = SCTP_HASH_INIT(SCTP_BASE_SYSCTL(sctp_pcbtblsize),
+ &inp->sctp_hashmark);
+ if (inp->sctp_tcbhash == NULL) {
+ SCTP_PRINTF("Out of SCTP-INPCB->hashinit - no resources\n");
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_PCB, ENOBUFS);
+ so->so_pcb = NULL;
+ SCTP_ZONE_FREE(SCTP_BASE_INFO(ipi_zone_ep), inp);
+ return (ENOBUFS);
+ }
+ inp->def_vrf_id = vrf_id;
+
+ SCTP_INP_INFO_WLOCK();
+ SCTP_INP_LOCK_INIT(inp);
+ INP_LOCK_INIT(&inp->ip_inp.inp, "inp", "sctpinp");
+ SCTP_INP_READ_INIT(inp);
+ SCTP_ASOC_CREATE_LOCK_INIT(inp);
+ /* lock the new ep */
+ SCTP_INP_WLOCK(inp);
+
+ /* add it to the info area */
+ LIST_INSERT_HEAD(&SCTP_BASE_INFO(listhead), inp, sctp_list);
+ SCTP_INP_INFO_WUNLOCK();
+
+ TAILQ_INIT(&inp->read_queue);
+ LIST_INIT(&inp->sctp_addr_list);
+
+ LIST_INIT(&inp->sctp_asoc_list);
+
+#ifdef SCTP_TRACK_FREED_ASOCS
+ /* TEMP CODE */
+ LIST_INIT(&inp->sctp_asoc_free_list);
+#endif
+ /* Init the timer structure for signature change */
+ SCTP_OS_TIMER_INIT(&inp->sctp_ep.signature_change.timer);
+ inp->sctp_ep.signature_change.type = SCTP_TIMER_TYPE_NEWCOOKIE;
+
+ /* now init the actual endpoint default data */
+ m = &inp->sctp_ep;
+
+ /* setup the base timeout information */
+ m->sctp_timeoutticks[SCTP_TIMER_SEND] = SEC_TO_TICKS(SCTP_SEND_SEC); /* needed ? */
+ m->sctp_timeoutticks[SCTP_TIMER_INIT] = SEC_TO_TICKS(SCTP_INIT_SEC); /* needed ? */
+ m->sctp_timeoutticks[SCTP_TIMER_RECV] = MSEC_TO_TICKS(SCTP_BASE_SYSCTL(sctp_delayed_sack_time_default));
+ m->sctp_timeoutticks[SCTP_TIMER_HEARTBEAT] = MSEC_TO_TICKS(SCTP_BASE_SYSCTL(sctp_heartbeat_interval_default));
+ m->sctp_timeoutticks[SCTP_TIMER_PMTU] = SEC_TO_TICKS(SCTP_BASE_SYSCTL(sctp_pmtu_raise_time_default));
+ m->sctp_timeoutticks[SCTP_TIMER_MAXSHUTDOWN] = SEC_TO_TICKS(SCTP_BASE_SYSCTL(sctp_shutdown_guard_time_default));
+ m->sctp_timeoutticks[SCTP_TIMER_SIGNATURE] = SEC_TO_TICKS(SCTP_BASE_SYSCTL(sctp_secret_lifetime_default));
+ /* all max/min max are in ms */
+ m->sctp_maxrto = SCTP_BASE_SYSCTL(sctp_rto_max_default);
+ m->sctp_minrto = SCTP_BASE_SYSCTL(sctp_rto_min_default);
+ m->initial_rto = SCTP_BASE_SYSCTL(sctp_rto_initial_default);
+ m->initial_init_rto_max = SCTP_BASE_SYSCTL(sctp_init_rto_max_default);
+ m->sctp_sack_freq = SCTP_BASE_SYSCTL(sctp_sack_freq_default);
+
+ m->max_open_streams_intome = MAX_SCTP_STREAMS;
+
+ m->max_init_times = SCTP_BASE_SYSCTL(sctp_init_rtx_max_default);
+ m->max_send_times = SCTP_BASE_SYSCTL(sctp_assoc_rtx_max_default);
+ m->def_net_failure = SCTP_BASE_SYSCTL(sctp_path_rtx_max_default);
+ m->sctp_sws_sender = SCTP_SWS_SENDER_DEF;
+ m->sctp_sws_receiver = SCTP_SWS_RECEIVER_DEF;
+ m->max_burst = SCTP_BASE_SYSCTL(sctp_max_burst_default);
+ if ((SCTP_BASE_SYSCTL(sctp_default_cc_module) >= SCTP_CC_RFC2581) &&
+ (SCTP_BASE_SYSCTL(sctp_default_cc_module) <= SCTP_CC_HTCP)) {
+ m->sctp_default_cc_module = SCTP_BASE_SYSCTL(sctp_default_cc_module);
+ } else {
+ /* sysctl done with invalid value, set to 2581 */
+ m->sctp_default_cc_module = SCTP_CC_RFC2581;
+ }
+ /* number of streams to pre-open on a association */
+ m->pre_open_stream_count = SCTP_BASE_SYSCTL(sctp_nr_outgoing_streams_default);
+
+ /* Add adaptation cookie */
+ m->adaptation_layer_indicator = 0x504C5253;
+
+ /* seed random number generator */
+ m->random_counter = 1;
+ m->store_at = SCTP_SIGNATURE_SIZE;
+ SCTP_READ_RANDOM(m->random_numbers, sizeof(m->random_numbers));
+ sctp_fill_random_store(m);
+
+ /* Minimum cookie size */
+ m->size_of_a_cookie = (sizeof(struct sctp_init_msg) * 2) +
+ sizeof(struct sctp_state_cookie);
+ m->size_of_a_cookie += SCTP_SIGNATURE_SIZE;
+
+ /* Setup the initial secret */
+ (void)SCTP_GETTIME_TIMEVAL(&time);
+ m->time_of_secret_change = time.tv_sec;
+
+ for (i = 0; i < SCTP_NUMBER_OF_SECRETS; i++) {
+ m->secret_key[0][i] = sctp_select_initial_TSN(m);
+ }
+ sctp_timer_start(SCTP_TIMER_TYPE_NEWCOOKIE, inp, NULL, NULL);
+
+ /* How long is a cookie good for ? */
+ m->def_cookie_life = MSEC_TO_TICKS(SCTP_BASE_SYSCTL(sctp_valid_cookie_life_default));
+ /*
+ * Initialize authentication parameters
+ */
+ m->local_hmacs = sctp_default_supported_hmaclist();
+ m->local_auth_chunks = sctp_alloc_chunklist();
+ sctp_auth_set_default_chunks(m->local_auth_chunks);
+ LIST_INIT(&m->shared_keys);
+ /* add default NULL key as key id 0 */
+ null_key = sctp_alloc_sharedkey();
+ sctp_insert_sharedkey(&m->shared_keys, null_key);
+ SCTP_INP_WUNLOCK(inp);
+#ifdef SCTP_LOG_CLOSING
+ sctp_log_closing(inp, NULL, 12);
+#endif
+ return (error);
+}
+
+
+void
+sctp_move_pcb_and_assoc(struct sctp_inpcb *old_inp, struct sctp_inpcb *new_inp,
+ struct sctp_tcb *stcb)
+{
+ struct sctp_nets *net;
+ uint16_t lport, rport;
+ struct sctppcbhead *head;
+ struct sctp_laddr *laddr, *oladdr;
+
+ atomic_add_int(&stcb->asoc.refcnt, 1);
+ SCTP_TCB_UNLOCK(stcb);
+ SCTP_INP_INFO_WLOCK();
+ SCTP_INP_WLOCK(old_inp);
+ SCTP_INP_WLOCK(new_inp);
+ SCTP_TCB_LOCK(stcb);
+ atomic_subtract_int(&stcb->asoc.refcnt, 1);
+
+ new_inp->sctp_ep.time_of_secret_change =
+ old_inp->sctp_ep.time_of_secret_change;
+ memcpy(new_inp->sctp_ep.secret_key, old_inp->sctp_ep.secret_key,
+ sizeof(old_inp->sctp_ep.secret_key));
+ new_inp->sctp_ep.current_secret_number =
+ old_inp->sctp_ep.current_secret_number;
+ new_inp->sctp_ep.last_secret_number =
+ old_inp->sctp_ep.last_secret_number;
+ new_inp->sctp_ep.size_of_a_cookie = old_inp->sctp_ep.size_of_a_cookie;
+
+ /* make it so new data pours into the new socket */
+ stcb->sctp_socket = new_inp->sctp_socket;
+ stcb->sctp_ep = new_inp;
+
+ /* Copy the port across */
+ lport = new_inp->sctp_lport = old_inp->sctp_lport;
+ rport = stcb->rport;
+ /* Pull the tcb from the old association */
+ LIST_REMOVE(stcb, sctp_tcbhash);
+ LIST_REMOVE(stcb, sctp_tcblist);
+ if (stcb->asoc.in_asocid_hash) {
+ LIST_REMOVE(stcb, sctp_tcbasocidhash);
+ }
+ /* Now insert the new_inp into the TCP connected hash */
+ head = &SCTP_BASE_INFO(sctp_tcpephash)[SCTP_PCBHASH_ALLADDR((lport | rport), SCTP_BASE_INFO(hashtcpmark))];
+
+ LIST_INSERT_HEAD(head, new_inp, sctp_hash);
+ /* Its safe to access */
+ new_inp->sctp_flags &= ~SCTP_PCB_FLAGS_UNBOUND;
+
+ /* Now move the tcb into the endpoint list */
+ LIST_INSERT_HEAD(&new_inp->sctp_asoc_list, stcb, sctp_tcblist);
+ /*
+ * Question, do we even need to worry about the ep-hash since we
+ * only have one connection? Probably not :> so lets get rid of it
+ * and not suck up any kernel memory in that.
+ */
+ if (stcb->asoc.in_asocid_hash) {
+ struct sctpasochead *lhd;
+
+ lhd = &new_inp->sctp_asocidhash[SCTP_PCBHASH_ASOC(stcb->asoc.assoc_id,
+ new_inp->hashasocidmark)];
+ LIST_INSERT_HEAD(lhd, stcb, sctp_tcbasocidhash);
+ }
+ /* Ok. Let's restart timer. */
+ TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) {
+ sctp_timer_start(SCTP_TIMER_TYPE_PATHMTURAISE, new_inp,
+ stcb, net);
+ }
+
+ SCTP_INP_INFO_WUNLOCK();
+ if (new_inp->sctp_tcbhash != NULL) {
+ SCTP_HASH_FREE(new_inp->sctp_tcbhash, new_inp->sctp_hashmark);
+ new_inp->sctp_tcbhash = NULL;
+ }
+ if ((new_inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) == 0) {
+ /* Subset bound, so copy in the laddr list from the old_inp */
+ LIST_FOREACH(oladdr, &old_inp->sctp_addr_list, sctp_nxt_addr) {
+ laddr = SCTP_ZONE_GET(SCTP_BASE_INFO(ipi_zone_laddr), struct sctp_laddr);
+ if (laddr == NULL) {
+ /*
+ * Gak, what can we do? This assoc is really
+ * HOSED. We probably should send an abort
+ * here.
+ */
+ SCTPDBG(SCTP_DEBUG_PCB1, "Association hosed in TCP model, out of laddr memory\n");
+ continue;
+ }
+ SCTP_INCR_LADDR_COUNT();
+ bzero(laddr, sizeof(*laddr));
+ (void)SCTP_GETTIME_TIMEVAL(&laddr->start_time);
+ laddr->ifa = oladdr->ifa;
+ atomic_add_int(&laddr->ifa->refcount, 1);
+ LIST_INSERT_HEAD(&new_inp->sctp_addr_list, laddr,
+ sctp_nxt_addr);
+ new_inp->laddr_count++;
+ }
+ }
+ /*
+ * Now any running timers need to be adjusted since we really don't
+ * care if they are running or not just blast in the new_inp into
+ * all of them.
+ */
+
+ stcb->asoc.hb_timer.ep = (void *)new_inp;
+ stcb->asoc.dack_timer.ep = (void *)new_inp;
+ stcb->asoc.asconf_timer.ep = (void *)new_inp;
+ stcb->asoc.strreset_timer.ep = (void *)new_inp;
+ stcb->asoc.shut_guard_timer.ep = (void *)new_inp;
+ stcb->asoc.autoclose_timer.ep = (void *)new_inp;
+ stcb->asoc.delayed_event_timer.ep = (void *)new_inp;
+ stcb->asoc.delete_prim_timer.ep = (void *)new_inp;
+ /* now what about the nets? */
+ TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) {
+ net->pmtu_timer.ep = (void *)new_inp;
+ net->rxt_timer.ep = (void *)new_inp;
+ net->fr_timer.ep = (void *)new_inp;
+ }
+ SCTP_INP_WUNLOCK(new_inp);
+ SCTP_INP_WUNLOCK(old_inp);
+}
+
+
+
+
+/* sctp_ifap is used to bypass normal local address validation checks */
+int
+sctp_inpcb_bind(struct socket *so, struct sockaddr *addr,
+ struct sctp_ifa *sctp_ifap, struct thread *p)
+{
+ /* bind a ep to a socket address */
+ struct sctppcbhead *head;
+ struct sctp_inpcb *inp, *inp_tmp;
+ struct inpcb *ip_inp;
+ int port_reuse_active = 0;
+ int bindall;
+ uint16_t lport;
+ int error;
+ uint32_t vrf_id;
+
+ lport = 0;
+ error = 0;
+ bindall = 1;
+ inp = (struct sctp_inpcb *)so->so_pcb;
+ ip_inp = (struct inpcb *)so->so_pcb;
+#ifdef SCTP_DEBUG
+ if (addr) {
+ SCTPDBG(SCTP_DEBUG_PCB1, "Bind called port:%d\n",
+ ntohs(((struct sockaddr_in *)addr)->sin_port));
+ SCTPDBG(SCTP_DEBUG_PCB1, "Addr :");
+ SCTPDBG_ADDR(SCTP_DEBUG_PCB1, addr);
+ }
+#endif
+ if ((inp->sctp_flags & SCTP_PCB_FLAGS_UNBOUND) == 0) {
+ /* already did a bind, subsequent binds NOT allowed ! */
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_PCB, EINVAL);
+ return (EINVAL);
+ }
+#ifdef INVARIANTS
+ if (p == NULL)
+ panic("null proc/thread");
+#endif
+ if (addr != NULL) {
+ switch (addr->sa_family) {
+ case AF_INET:
+ {
+ struct sockaddr_in *sin;
+
+ /* IPV6_V6ONLY socket? */
+ if (SCTP_IPV6_V6ONLY(ip_inp)) {
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_PCB, EINVAL);
+ return (EINVAL);
+ }
+ if (addr->sa_len != sizeof(*sin)) {
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_PCB, EINVAL);
+ return (EINVAL);
+ }
+ sin = (struct sockaddr_in *)addr;
+ lport = sin->sin_port;
+ /*
+ * For LOOPBACK the prison_local_ip4() call
+ * will transmute the ip address to the
+ * proper value.
+ */
+ if (p && (error = prison_local_ip4(p->td_ucred, &sin->sin_addr)) != 0) {
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_PCB, error);
+ return (error);
+ }
+ if (sin->sin_addr.s_addr != INADDR_ANY) {
+ bindall = 0;
+ }
+ break;
+ }
+#ifdef INET6
+ case AF_INET6:
+ {
+ /*
+ * Only for pure IPv6 Address. (No IPv4
+ * Mapped!)
+ */
+ struct sockaddr_in6 *sin6;
+
+ sin6 = (struct sockaddr_in6 *)addr;
+
+ if (addr->sa_len != sizeof(*sin6)) {
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_PCB, EINVAL);
+ return (EINVAL);
+ }
+ lport = sin6->sin6_port;
+
+ /*
+ * For LOOPBACK the prison_local_ip6() call
+ * will transmute the ipv6 address to the
+ * proper value.
+ */
+ if (p && (error = prison_local_ip6(p->td_ucred, &sin6->sin6_addr,
+ (SCTP_IPV6_V6ONLY(inp) != 0))) != 0) {
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_PCB, error);
+ return (error);
+ }
+ if (!IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) {
+ bindall = 0;
+ /* KAME hack: embed scopeid */
+ if (sa6_embedscope(sin6, MODULE_GLOBAL(ip6_use_defzone)) != 0) {
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_PCB, EINVAL);
+ return (EINVAL);
+ }
+ }
+ /* this must be cleared for ifa_ifwithaddr() */
+ sin6->sin6_scope_id = 0;
+ break;
+ }
+#endif
+ default:
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_PCB, EAFNOSUPPORT);
+ return (EAFNOSUPPORT);
+ }
+ }
+ SCTP_INP_INFO_WLOCK();
+ SCTP_INP_WLOCK(inp);
+ /* Setup a vrf_id to be the default for the non-bind-all case. */
+ vrf_id = inp->def_vrf_id;
+
+ /* increase our count due to the unlock we do */
+ SCTP_INP_INCR_REF(inp);
+ if (lport) {
+ /*
+ * Did the caller specify a port? if so we must see if a ep
+ * already has this one bound.
+ */
+ /* got to be root to get at low ports */
+ if (ntohs(lport) < IPPORT_RESERVED) {
+ if (p && (error =
+ priv_check(p, PRIV_NETINET_RESERVEDPORT)
+ )) {
+ SCTP_INP_DECR_REF(inp);
+ SCTP_INP_WUNLOCK(inp);
+ SCTP_INP_INFO_WUNLOCK();
+ return (error);
+ }
+ }
+ if (p == NULL) {
+ SCTP_INP_DECR_REF(inp);
+ SCTP_INP_WUNLOCK(inp);
+ SCTP_INP_INFO_WUNLOCK();
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_PCB, error);
+ return (error);
+ }
+ SCTP_INP_WUNLOCK(inp);
+ if (bindall) {
+ vrf_id = inp->def_vrf_id;
+ inp_tmp = sctp_pcb_findep(addr, 0, 1, vrf_id);
+ if (inp_tmp != NULL) {
+ /*
+ * lock guy returned and lower count note
+ * that we are not bound so inp_tmp should
+ * NEVER be inp. And it is this inp
+ * (inp_tmp) that gets the reference bump,
+ * so we must lower it.
+ */
+ SCTP_INP_DECR_REF(inp_tmp);
+ /* unlock info */
+ if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_PORTREUSE)) &&
+ (sctp_is_feature_on(inp_tmp, SCTP_PCB_FLAGS_PORTREUSE))) {
+ /*
+ * Ok, must be one-2-one and
+ * allowing port re-use
+ */
+ port_reuse_active = 1;
+ goto continue_anyway;
+ }
+ SCTP_INP_DECR_REF(inp);
+ SCTP_INP_INFO_WUNLOCK();
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_PCB, EADDRINUSE);
+ return (EADDRINUSE);
+ }
+ } else {
+ inp_tmp = sctp_pcb_findep(addr, 0, 1, vrf_id);
+ if (inp_tmp != NULL) {
+ /*
+ * lock guy returned and lower count note
+ * that we are not bound so inp_tmp should
+ * NEVER be inp. And it is this inp
+ * (inp_tmp) that gets the reference bump,
+ * so we must lower it.
+ */
+ SCTP_INP_DECR_REF(inp_tmp);
+ /* unlock info */
+ if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_PORTREUSE)) &&
+ (sctp_is_feature_on(inp_tmp, SCTP_PCB_FLAGS_PORTREUSE))) {
+ /*
+ * Ok, must be one-2-one and
+ * allowing port re-use
+ */
+ port_reuse_active = 1;
+ goto continue_anyway;
+ }
+ SCTP_INP_DECR_REF(inp);
+ SCTP_INP_INFO_WUNLOCK();
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_PCB, EADDRINUSE);
+ return (EADDRINUSE);
+ }
+ }
+continue_anyway:
+ SCTP_INP_WLOCK(inp);
+ if (bindall) {
+ /* verify that no lport is not used by a singleton */
+ if ((port_reuse_active == 0) &&
+ (inp_tmp = sctp_isport_inuse(inp, lport, vrf_id))
+ ) {
+ /* Sorry someone already has this one bound */
+ if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_PORTREUSE)) &&
+ (sctp_is_feature_on(inp_tmp, SCTP_PCB_FLAGS_PORTREUSE))) {
+ port_reuse_active = 1;
+ } else {
+ SCTP_INP_DECR_REF(inp);
+ SCTP_INP_WUNLOCK(inp);
+ SCTP_INP_INFO_WUNLOCK();
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_PCB, EADDRINUSE);
+ return (EADDRINUSE);
+ }
+ }
+ }
+ } else {
+ uint16_t first, last, candidate;
+ uint16_t count;
+ int done;
+
+ if (ip_inp->inp_flags & INP_HIGHPORT) {
+ first = MODULE_GLOBAL(ipport_hifirstauto);
+ last = MODULE_GLOBAL(ipport_hilastauto);
+ } else if (ip_inp->inp_flags & INP_LOWPORT) {
+ if (p && (error =
+ priv_check(p, PRIV_NETINET_RESERVEDPORT)
+ )) {
+ SCTP_INP_DECR_REF(inp);
+ SCTP_INP_WUNLOCK(inp);
+ SCTP_INP_INFO_WUNLOCK();
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_PCB, error);
+ return (error);
+ }
+ first = MODULE_GLOBAL(ipport_lowfirstauto);
+ last = MODULE_GLOBAL(ipport_lowlastauto);
+ } else {
+ first = MODULE_GLOBAL(ipport_firstauto);
+ last = MODULE_GLOBAL(ipport_lastauto);
+ }
+ if (first > last) {
+ uint16_t temp;
+
+ temp = first;
+ first = last;
+ last = temp;
+ }
+ count = last - first + 1; /* number of candidates */
+ candidate = first + sctp_select_initial_TSN(&inp->sctp_ep) % (count);
+
+ done = 0;
+ while (!done) {
+ if (sctp_isport_inuse(inp, htons(candidate), inp->def_vrf_id) == NULL) {
+ done = 1;
+ }
+ if (!done) {
+ if (--count == 0) {
+ SCTP_INP_DECR_REF(inp);
+ SCTP_INP_WUNLOCK(inp);
+ SCTP_INP_INFO_WUNLOCK();
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_PCB, EADDRINUSE);
+ return (EADDRINUSE);
+ }
+ if (candidate == last)
+ candidate = first;
+ else
+ candidate = candidate + 1;
+ }
+ }
+ lport = htons(candidate);
+ }
+ SCTP_INP_DECR_REF(inp);
+ if (inp->sctp_flags & (SCTP_PCB_FLAGS_SOCKET_GONE |
+ SCTP_PCB_FLAGS_SOCKET_ALLGONE)) {
+ /*
+ * this really should not happen. The guy did a non-blocking
+ * bind and then did a close at the same time.
+ */
+ SCTP_INP_WUNLOCK(inp);
+ SCTP_INP_INFO_WUNLOCK();
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_PCB, EINVAL);
+ return (EINVAL);
+ }
+ /* ok we look clear to give out this port, so lets setup the binding */
+ if (bindall) {
+ /* binding to all addresses, so just set in the proper flags */
+ inp->sctp_flags |= SCTP_PCB_FLAGS_BOUNDALL;
+ /* set the automatic addr changes from kernel flag */
+ if (SCTP_BASE_SYSCTL(sctp_auto_asconf) == 0) {
+ sctp_feature_off(inp, SCTP_PCB_FLAGS_DO_ASCONF);
+ sctp_feature_off(inp, SCTP_PCB_FLAGS_AUTO_ASCONF);
+ } else {
+ sctp_feature_on(inp, SCTP_PCB_FLAGS_DO_ASCONF);
+ sctp_feature_on(inp, SCTP_PCB_FLAGS_AUTO_ASCONF);
+ }
+ if (SCTP_BASE_SYSCTL(sctp_multiple_asconfs) == 0) {
+ sctp_feature_off(inp, SCTP_PCB_FLAGS_MULTIPLE_ASCONFS);
+ } else {
+ sctp_feature_on(inp, SCTP_PCB_FLAGS_MULTIPLE_ASCONFS);
+ }
+ /*
+ * set the automatic mobility_base from kernel flag (by
+ * micchie)
+ */
+ if (SCTP_BASE_SYSCTL(sctp_mobility_base) == 0) {
+ sctp_mobility_feature_off(inp, SCTP_MOBILITY_BASE);
+ sctp_mobility_feature_off(inp, SCTP_MOBILITY_PRIM_DELETED);
+ } else {
+ sctp_mobility_feature_on(inp, SCTP_MOBILITY_BASE);
+ sctp_mobility_feature_off(inp, SCTP_MOBILITY_PRIM_DELETED);
+ }
+ /*
+ * set the automatic mobility_fasthandoff from kernel flag
+ * (by micchie)
+ */
+ if (SCTP_BASE_SYSCTL(sctp_mobility_fasthandoff) == 0) {
+ sctp_mobility_feature_off(inp, SCTP_MOBILITY_FASTHANDOFF);
+ sctp_mobility_feature_off(inp, SCTP_MOBILITY_PRIM_DELETED);
+ } else {
+ sctp_mobility_feature_on(inp, SCTP_MOBILITY_FASTHANDOFF);
+ sctp_mobility_feature_off(inp, SCTP_MOBILITY_PRIM_DELETED);
+ }
+ } else {
+ /*
+ * bind specific, make sure flags is off and add a new
+ * address structure to the sctp_addr_list inside the ep
+ * structure.
+ *
+ * We will need to allocate one and insert it at the head. The
+ * socketopt call can just insert new addresses in there as
+ * well. It will also have to do the embed scope kame hack
+ * too (before adding).
+ */
+ struct sctp_ifa *ifa;
+ struct sockaddr_storage store_sa;
+
+ memset(&store_sa, 0, sizeof(store_sa));
+ if (addr->sa_family == AF_INET) {
+ struct sockaddr_in *sin;
+
+ sin = (struct sockaddr_in *)&store_sa;
+ memcpy(sin, addr, sizeof(struct sockaddr_in));
+ sin->sin_port = 0;
+ } else if (addr->sa_family == AF_INET6) {
+ struct sockaddr_in6 *sin6;
+
+ sin6 = (struct sockaddr_in6 *)&store_sa;
+ memcpy(sin6, addr, sizeof(struct sockaddr_in6));
+ sin6->sin6_port = 0;
+ }
+ /*
+ * first find the interface with the bound address need to
+ * zero out the port to find the address! yuck! can't do
+ * this earlier since need port for sctp_pcb_findep()
+ */
+ if (sctp_ifap != NULL)
+ ifa = sctp_ifap;
+ else {
+ /*
+ * Note for BSD we hit here always other O/S's will
+ * pass things in via the sctp_ifap argument
+ * (Panda).
+ */
+ ifa = sctp_find_ifa_by_addr((struct sockaddr *)&store_sa,
+ vrf_id, SCTP_ADDR_NOT_LOCKED);
+ }
+ if (ifa == NULL) {
+ /* Can't find an interface with that address */
+ SCTP_INP_WUNLOCK(inp);
+ SCTP_INP_INFO_WUNLOCK();
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_PCB, EADDRNOTAVAIL);
+ return (EADDRNOTAVAIL);
+ }
+ if (addr->sa_family == AF_INET6) {
+ /* GAK, more FIXME IFA lock? */
+ if (ifa->localifa_flags & SCTP_ADDR_IFA_UNUSEABLE) {
+ /* Can't bind a non-existent addr. */
+ SCTP_INP_WUNLOCK(inp);
+ SCTP_INP_INFO_WUNLOCK();
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_PCB, EINVAL);
+ return (EINVAL);
+ }
+ }
+ /* we're not bound all */
+ inp->sctp_flags &= ~SCTP_PCB_FLAGS_BOUNDALL;
+ /* allow bindx() to send ASCONF's for binding changes */
+ sctp_feature_on(inp, SCTP_PCB_FLAGS_DO_ASCONF);
+ /* clear automatic addr changes from kernel flag */
+ sctp_feature_off(inp, SCTP_PCB_FLAGS_AUTO_ASCONF);
+
+ /* add this address to the endpoint list */
+ error = sctp_insert_laddr(&inp->sctp_addr_list, ifa, 0);
+ if (error != 0) {
+ SCTP_INP_WUNLOCK(inp);
+ SCTP_INP_INFO_WUNLOCK();
+ return (error);
+ }
+ inp->laddr_count++;
+ }
+ /* find the bucket */
+ if (port_reuse_active) {
+ /* Put it into tcp 1-2-1 hash */
+ head = &SCTP_BASE_INFO(sctp_tcpephash)[SCTP_PCBHASH_ALLADDR(lport, SCTP_BASE_INFO(hashtcpmark))];
+ inp->sctp_flags |= SCTP_PCB_FLAGS_IN_TCPPOOL;
+ } else {
+ head = &SCTP_BASE_INFO(sctp_ephash)[SCTP_PCBHASH_ALLADDR(lport, SCTP_BASE_INFO(hashmark))];
+ }
+ /* put it in the bucket */
+ LIST_INSERT_HEAD(head, inp, sctp_hash);
+ SCTPDBG(SCTP_DEBUG_PCB1, "Main hash to bind at head:%p, bound port:%d - in tcp_pool=%d\n",
+ head, ntohs(lport), port_reuse_active);
+ /* set in the port */
+ inp->sctp_lport = lport;
+
+ /* turn off just the unbound flag */
+ inp->sctp_flags &= ~SCTP_PCB_FLAGS_UNBOUND;
+ SCTP_INP_WUNLOCK(inp);
+ SCTP_INP_INFO_WUNLOCK();
+ return (0);
+}
+
+
+static void
+sctp_iterator_inp_being_freed(struct sctp_inpcb *inp)
+{
+ struct sctp_iterator *it, *nit;
+
+ /*
+ * We enter with the only the ITERATOR_LOCK in place and a write
+ * lock on the inp_info stuff.
+ */
+ it = sctp_it_ctl.cur_it;
+ if (it && (it->vn != curvnet)) {
+ /* Its not looking at our VNET */
+ return;
+ }
+ if (it && (it->inp == inp)) {
+ /*
+ * This is tricky and we hold the iterator lock, but when it
+ * returns and gets the lock (when we release it) the
+ * iterator will try to operate on inp. We need to stop that
+ * from happening. But of course the iterator has a
+ * reference on the stcb and inp. We can mark it and it will
+ * stop.
+ *
+ * If its a single iterator situation, we set the end iterator
+ * flag. Otherwise we set the iterator to go to the next
+ * inp.
+ *
+ */
+ if (it->iterator_flags & SCTP_ITERATOR_DO_SINGLE_INP) {
+ sctp_it_ctl.iterator_flags |= SCTP_ITERATOR_STOP_CUR_IT;
+ } else {
+ sctp_it_ctl.iterator_flags |= SCTP_ITERATOR_STOP_CUR_INP;
+ }
+ }
+ /*
+ * Now go through and remove any single reference to our inp that
+ * may be still pending on the list
+ */
+ SCTP_IPI_ITERATOR_WQ_LOCK();
+ it = TAILQ_FIRST(&sctp_it_ctl.iteratorhead);
+ while (it) {
+ nit = TAILQ_NEXT(it, sctp_nxt_itr);
+ if (it->vn != curvnet) {
+ it = nit;
+ continue;
+ }
+ if (it->inp == inp) {
+ /* This one points to me is it inp specific? */
+ if (it->iterator_flags & SCTP_ITERATOR_DO_SINGLE_INP) {
+ /* Remove and free this one */
+ TAILQ_REMOVE(&sctp_it_ctl.iteratorhead,
+ it, sctp_nxt_itr);
+ if (it->function_atend != NULL) {
+ (*it->function_atend) (it->pointer, it->val);
+ }
+ SCTP_FREE(it, SCTP_M_ITER);
+ } else {
+ it->inp = LIST_NEXT(it->inp, sctp_list);
+ if (it->inp) {
+ SCTP_INP_INCR_REF(it->inp);
+ }
+ }
+ /*
+ * When its put in the refcnt is incremented so decr
+ * it
+ */
+ SCTP_INP_DECR_REF(inp);
+ }
+ it = nit;
+ }
+ SCTP_IPI_ITERATOR_WQ_UNLOCK();
+}
+
+/* release sctp_inpcb unbind the port */
+void
+sctp_inpcb_free(struct sctp_inpcb *inp, int immediate, int from)
+{
+ /*
+ * Here we free a endpoint. We must find it (if it is in the Hash
+ * table) and remove it from there. Then we must also find it in the
+ * overall list and remove it from there. After all removals are
+ * complete then any timer has to be stopped. Then start the actual
+ * freeing. a) Any local lists. b) Any associations. c) The hash of
+ * all associations. d) finally the ep itself.
+ */
+ struct sctp_pcb *m;
+ struct sctp_tcb *asoc, *nasoc;
+ struct sctp_laddr *laddr, *nladdr;
+ struct inpcb *ip_pcb;
+ struct socket *so;
+ int being_refed = 0;
+ struct sctp_queued_to_read *sq;
+
+
+ int cnt;
+ sctp_sharedkey_t *shared_key;
+
+
+#ifdef SCTP_LOG_CLOSING
+ sctp_log_closing(inp, NULL, 0);
+#endif
+ SCTP_ITERATOR_LOCK();
+ /* mark any iterators on the list or being processed */
+ sctp_iterator_inp_being_freed(inp);
+ SCTP_ITERATOR_UNLOCK();
+ so = inp->sctp_socket;
+ if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) {
+ /* been here before.. eeks.. get out of here */
+ SCTP_PRINTF("This conflict in free SHOULD not be happening! from %d, imm %d\n", from, immediate);
+#ifdef SCTP_LOG_CLOSING
+ sctp_log_closing(inp, NULL, 1);
+#endif
+ return;
+ }
+ SCTP_ASOC_CREATE_LOCK(inp);
+ SCTP_INP_INFO_WLOCK();
+
+ SCTP_INP_WLOCK(inp);
+ if (from == SCTP_CALLED_AFTER_CMPSET_OFCLOSE) {
+ inp->sctp_flags &= ~SCTP_PCB_FLAGS_CLOSE_IP;
+ /* socket is gone, so no more wakeups allowed */
+ inp->sctp_flags |= SCTP_PCB_FLAGS_DONT_WAKE;
+ inp->sctp_flags &= ~SCTP_PCB_FLAGS_WAKEINPUT;
+ inp->sctp_flags &= ~SCTP_PCB_FLAGS_WAKEOUTPUT;
+
+ }
+ /* First time through we have the socket lock, after that no more. */
+ sctp_timer_stop(SCTP_TIMER_TYPE_NEWCOOKIE, inp, NULL, NULL,
+ SCTP_FROM_SCTP_PCB + SCTP_LOC_1);
+
+ if (inp->control) {
+ sctp_m_freem(inp->control);
+ inp->control = NULL;
+ }
+ if (inp->pkt) {
+ sctp_m_freem(inp->pkt);
+ inp->pkt = NULL;
+ }
+ m = &inp->sctp_ep;
+ ip_pcb = &inp->ip_inp.inp; /* we could just cast the main pointer
+ * here but I will be nice :> (i.e.
+ * ip_pcb = ep;) */
+ if (immediate == SCTP_FREE_SHOULD_USE_GRACEFUL_CLOSE) {
+ int cnt_in_sd;
+
+ cnt_in_sd = 0;
+ for ((asoc = LIST_FIRST(&inp->sctp_asoc_list)); asoc != NULL;
+ asoc = nasoc) {
+ SCTP_TCB_LOCK(asoc);
+ nasoc = LIST_NEXT(asoc, sctp_tcblist);
+ if (asoc->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
+ /* Skip guys being freed */
+ cnt_in_sd++;
+ if (asoc->asoc.state & SCTP_STATE_IN_ACCEPT_QUEUE) {
+ /*
+ * Special case - we did not start a
+ * kill timer on the asoc due to it
+ * was not closed. So go ahead and
+ * start it now.
+ */
+ asoc->asoc.state &= ~SCTP_STATE_IN_ACCEPT_QUEUE;
+ sctp_timer_start(SCTP_TIMER_TYPE_ASOCKILL, inp, asoc, NULL);
+ }
+ SCTP_TCB_UNLOCK(asoc);
+ continue;
+ }
+ if (((SCTP_GET_STATE(&asoc->asoc) == SCTP_STATE_COOKIE_WAIT) ||
+ (SCTP_GET_STATE(&asoc->asoc) == SCTP_STATE_COOKIE_ECHOED)) &&
+ (asoc->asoc.total_output_queue_size == 0)) {
+ /*
+ * If we have data in queue, we don't want
+ * to just free since the app may have done,
+ * send()/close or connect/send/close. And
+ * it wants the data to get across first.
+ */
+ /* Just abandon things in the front states */
+ if (sctp_free_assoc(inp, asoc, SCTP_PCBFREE_NOFORCE,
+ SCTP_FROM_SCTP_PCB + SCTP_LOC_2) == 0) {
+ cnt_in_sd++;
+ }
+ continue;
+ }
+ /* Disconnect the socket please */
+ asoc->sctp_socket = NULL;
+ asoc->asoc.state |= SCTP_STATE_CLOSED_SOCKET;
+ if ((asoc->asoc.size_on_reasm_queue > 0) ||
+ (asoc->asoc.control_pdapi) ||
+ (asoc->asoc.size_on_all_streams > 0) ||
+ (so && (so->so_rcv.sb_cc > 0))
+ ) {
+ /* Left with Data unread */
+ struct mbuf *op_err;
+
+ op_err = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)),
+ 0, M_DONTWAIT, 1, MT_DATA);
+ if (op_err) {
+ /* Fill in the user initiated abort */
+ struct sctp_paramhdr *ph;
+ uint32_t *ippp;
+
+ SCTP_BUF_LEN(op_err) =
+ sizeof(struct sctp_paramhdr) + sizeof(uint32_t);
+ ph = mtod(op_err,
+ struct sctp_paramhdr *);
+ ph->param_type = htons(
+ SCTP_CAUSE_USER_INITIATED_ABT);
+ ph->param_length = htons(SCTP_BUF_LEN(op_err));
+ ippp = (uint32_t *) (ph + 1);
+ *ippp = htonl(SCTP_FROM_SCTP_PCB + SCTP_LOC_3);
+ }
+ asoc->sctp_ep->last_abort_code = SCTP_FROM_SCTP_PCB + SCTP_LOC_3;
+#if defined(SCTP_PANIC_ON_ABORT)
+ panic("inpcb_free does an abort");
+#endif
+ sctp_send_abort_tcb(asoc, op_err, SCTP_SO_LOCKED);
+ SCTP_STAT_INCR_COUNTER32(sctps_aborted);
+ if ((SCTP_GET_STATE(&asoc->asoc) == SCTP_STATE_OPEN) ||
+ (SCTP_GET_STATE(&asoc->asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
+ SCTP_STAT_DECR_GAUGE32(sctps_currestab);
+ }
+ if (sctp_free_assoc(inp, asoc,
+ SCTP_PCBFREE_NOFORCE, SCTP_FROM_SCTP_PCB + SCTP_LOC_4) == 0) {
+ cnt_in_sd++;
+ }
+ continue;
+ } else if (TAILQ_EMPTY(&asoc->asoc.send_queue) &&
+ TAILQ_EMPTY(&asoc->asoc.sent_queue) &&
+ (asoc->asoc.stream_queue_cnt == 0)
+ ) {
+ if (asoc->asoc.locked_on_sending) {
+ goto abort_anyway;
+ }
+ if ((SCTP_GET_STATE(&asoc->asoc) != SCTP_STATE_SHUTDOWN_SENT) &&
+ (SCTP_GET_STATE(&asoc->asoc) != SCTP_STATE_SHUTDOWN_ACK_SENT)) {
+ /*
+ * there is nothing queued to send,
+ * so I send shutdown
+ */
+ sctp_send_shutdown(asoc, asoc->asoc.primary_destination);
+ if ((SCTP_GET_STATE(&asoc->asoc) == SCTP_STATE_OPEN) ||
+ (SCTP_GET_STATE(&asoc->asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
+ SCTP_STAT_DECR_GAUGE32(sctps_currestab);
+ }
+ SCTP_SET_STATE(&asoc->asoc, SCTP_STATE_SHUTDOWN_SENT);
+ SCTP_CLEAR_SUBSTATE(&asoc->asoc, SCTP_STATE_SHUTDOWN_PENDING);
+ sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN, asoc->sctp_ep, asoc,
+ asoc->asoc.primary_destination);
+ sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, asoc->sctp_ep, asoc,
+ asoc->asoc.primary_destination);
+ sctp_chunk_output(inp, asoc, SCTP_OUTPUT_FROM_SHUT_TMR, SCTP_SO_LOCKED);
+ }
+ } else {
+ /* mark into shutdown pending */
+ struct sctp_stream_queue_pending *sp;
+
+ asoc->asoc.state |= SCTP_STATE_SHUTDOWN_PENDING;
+ sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, asoc->sctp_ep, asoc,
+ asoc->asoc.primary_destination);
+ if (asoc->asoc.locked_on_sending) {
+ sp = TAILQ_LAST(&((asoc->asoc.locked_on_sending)->outqueue),
+ sctp_streamhead);
+ if (sp == NULL) {
+ SCTP_PRINTF("Error, sp is NULL, locked on sending is %p strm:%d\n",
+ asoc->asoc.locked_on_sending,
+ asoc->asoc.locked_on_sending->stream_no);
+ } else {
+ if ((sp->length == 0) && (sp->msg_is_complete == 0))
+ asoc->asoc.state |= SCTP_STATE_PARTIAL_MSG_LEFT;
+ }
+ }
+ if (TAILQ_EMPTY(&asoc->asoc.send_queue) &&
+ TAILQ_EMPTY(&asoc->asoc.sent_queue) &&
+ (asoc->asoc.state & SCTP_STATE_PARTIAL_MSG_LEFT)) {
+ struct mbuf *op_err;
+
+ abort_anyway:
+ op_err = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)),
+ 0, M_DONTWAIT, 1, MT_DATA);
+ if (op_err) {
+ /*
+ * Fill in the user
+ * initiated abort
+ */
+ struct sctp_paramhdr *ph;
+ uint32_t *ippp;
+
+ SCTP_BUF_LEN(op_err) =
+ (sizeof(struct sctp_paramhdr) +
+ sizeof(uint32_t));
+ ph = mtod(op_err,
+ struct sctp_paramhdr *);
+ ph->param_type = htons(
+ SCTP_CAUSE_USER_INITIATED_ABT);
+ ph->param_length = htons(SCTP_BUF_LEN(op_err));
+ ippp = (uint32_t *) (ph + 1);
+ *ippp = htonl(SCTP_FROM_SCTP_PCB + SCTP_LOC_5);
+ }
+ asoc->sctp_ep->last_abort_code = SCTP_FROM_SCTP_PCB + SCTP_LOC_5;
+#if defined(SCTP_PANIC_ON_ABORT)
+ panic("inpcb_free does an abort");
+#endif
+
+ sctp_send_abort_tcb(asoc, op_err, SCTP_SO_LOCKED);
+ SCTP_STAT_INCR_COUNTER32(sctps_aborted);
+ if ((SCTP_GET_STATE(&asoc->asoc) == SCTP_STATE_OPEN) ||
+ (SCTP_GET_STATE(&asoc->asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
+ SCTP_STAT_DECR_GAUGE32(sctps_currestab);
+ }
+ if (sctp_free_assoc(inp, asoc,
+ SCTP_PCBFREE_NOFORCE,
+ SCTP_FROM_SCTP_PCB + SCTP_LOC_6) == 0) {
+ cnt_in_sd++;
+ }
+ continue;
+ } else {
+ sctp_chunk_output(inp, asoc, SCTP_OUTPUT_FROM_CLOSING, SCTP_SO_LOCKED);
+ }
+ }
+ cnt_in_sd++;
+ SCTP_TCB_UNLOCK(asoc);
+ }
+ /* now is there some left in our SHUTDOWN state? */
+ if (cnt_in_sd) {
+#ifdef SCTP_LOG_CLOSING
+ sctp_log_closing(inp, NULL, 2);
+#endif
+ inp->sctp_socket = NULL;
+ SCTP_INP_WUNLOCK(inp);
+ SCTP_ASOC_CREATE_UNLOCK(inp);
+ SCTP_INP_INFO_WUNLOCK();
+ return;
+ }
+ }
+ inp->sctp_socket = NULL;
+ if ((inp->sctp_flags & SCTP_PCB_FLAGS_UNBOUND) !=
+ SCTP_PCB_FLAGS_UNBOUND) {
+ /*
+ * ok, this guy has been bound. It's port is somewhere in
+ * the SCTP_BASE_INFO(hash table). Remove it!
+ */
+ LIST_REMOVE(inp, sctp_hash);
+ inp->sctp_flags |= SCTP_PCB_FLAGS_UNBOUND;
+ }
+ /*
+ * If there is a timer running to kill us, forget it, since it may
+ * have a contest on the INP lock.. which would cause us to die ...
+ */
+ cnt = 0;
+ for ((asoc = LIST_FIRST(&inp->sctp_asoc_list)); asoc != NULL;
+ asoc = nasoc) {
+ SCTP_TCB_LOCK(asoc);
+ nasoc = LIST_NEXT(asoc, sctp_tcblist);
+ if (asoc->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
+ if (asoc->asoc.state & SCTP_STATE_IN_ACCEPT_QUEUE) {
+ asoc->asoc.state &= ~SCTP_STATE_IN_ACCEPT_QUEUE;
+ sctp_timer_start(SCTP_TIMER_TYPE_ASOCKILL, inp, asoc, NULL);
+ }
+ cnt++;
+ SCTP_TCB_UNLOCK(asoc);
+ continue;
+ }
+ /* Free associations that are NOT killing us */
+ if ((SCTP_GET_STATE(&asoc->asoc) != SCTP_STATE_COOKIE_WAIT) &&
+ ((asoc->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) == 0)) {
+ struct mbuf *op_err;
+ uint32_t *ippp;
+
+ op_err = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)),
+ 0, M_DONTWAIT, 1, MT_DATA);
+ if (op_err) {
+ /* Fill in the user initiated abort */
+ struct sctp_paramhdr *ph;
+
+ SCTP_BUF_LEN(op_err) = (sizeof(struct sctp_paramhdr) +
+ sizeof(uint32_t));
+ ph = mtod(op_err, struct sctp_paramhdr *);
+ ph->param_type = htons(
+ SCTP_CAUSE_USER_INITIATED_ABT);
+ ph->param_length = htons(SCTP_BUF_LEN(op_err));
+ ippp = (uint32_t *) (ph + 1);
+ *ippp = htonl(SCTP_FROM_SCTP_PCB + SCTP_LOC_7);
+
+ }
+ asoc->sctp_ep->last_abort_code = SCTP_FROM_SCTP_PCB + SCTP_LOC_7;
+#if defined(SCTP_PANIC_ON_ABORT)
+ panic("inpcb_free does an abort");
+#endif
+ sctp_send_abort_tcb(asoc, op_err, SCTP_SO_LOCKED);
+ SCTP_STAT_INCR_COUNTER32(sctps_aborted);
+ } else if (asoc->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
+ cnt++;
+ SCTP_TCB_UNLOCK(asoc);
+ continue;
+ }
+ if ((SCTP_GET_STATE(&asoc->asoc) == SCTP_STATE_OPEN) ||
+ (SCTP_GET_STATE(&asoc->asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
+ SCTP_STAT_DECR_GAUGE32(sctps_currestab);
+ }
+ if (sctp_free_assoc(inp, asoc, SCTP_PCBFREE_FORCE, SCTP_FROM_SCTP_PCB + SCTP_LOC_8) == 0) {
+ cnt++;
+ }
+ }
+ if (cnt) {
+ /* Ok we have someone out there that will kill us */
+ (void)SCTP_OS_TIMER_STOP(&inp->sctp_ep.signature_change.timer);
+#ifdef SCTP_LOG_CLOSING
+ sctp_log_closing(inp, NULL, 3);
+#endif
+ SCTP_INP_WUNLOCK(inp);
+ SCTP_ASOC_CREATE_UNLOCK(inp);
+ SCTP_INP_INFO_WUNLOCK();
+ return;
+ }
+#ifndef __rtems__
+ if (SCTP_INP_LOCK_CONTENDED(inp))
+ being_refed++;
+ if (SCTP_INP_READ_CONTENDED(inp))
+ being_refed++;
+ if (SCTP_ASOC_CREATE_LOCK_CONTENDED(inp))
+ being_refed++;
+#endif
+
+ if ((inp->refcount) ||
+ (being_refed) ||
+ (inp->sctp_flags & SCTP_PCB_FLAGS_CLOSE_IP)) {
+ (void)SCTP_OS_TIMER_STOP(&inp->sctp_ep.signature_change.timer);
+#ifdef SCTP_LOG_CLOSING
+ sctp_log_closing(inp, NULL, 4);
+#endif
+ sctp_timer_start(SCTP_TIMER_TYPE_INPKILL, inp, NULL, NULL);
+ SCTP_INP_WUNLOCK(inp);
+ SCTP_ASOC_CREATE_UNLOCK(inp);
+ SCTP_INP_INFO_WUNLOCK();
+ return;
+ }
+ inp->sctp_ep.signature_change.type = 0;
+ inp->sctp_flags |= SCTP_PCB_FLAGS_SOCKET_ALLGONE;
+ /*
+ * Remove it from the list .. last thing we need a lock for.
+ */
+ LIST_REMOVE(inp, sctp_list);
+ SCTP_INP_WUNLOCK(inp);
+ SCTP_ASOC_CREATE_UNLOCK(inp);
+ SCTP_INP_INFO_WUNLOCK();
+ /*
+ * Now we release all locks. Since this INP cannot be found anymore
+ * except possibly by the kill timer that might be running. We call
+ * the drain function here. It should hit the case were it sees the
+ * ACTIVE flag cleared and exit out freeing us to proceed and
+ * destroy everything.
+ */
+ if (from != SCTP_CALLED_FROM_INPKILL_TIMER) {
+ (void)SCTP_OS_TIMER_STOP_DRAIN(&inp->sctp_ep.signature_change.timer);
+ } else {
+ /* Probably un-needed */
+ (void)SCTP_OS_TIMER_STOP(&inp->sctp_ep.signature_change.timer);
+ }
+
+#ifdef SCTP_LOG_CLOSING
+ sctp_log_closing(inp, NULL, 5);
+#endif
+
+
+ if ((inp->sctp_asocidhash) != NULL) {
+ SCTP_HASH_FREE(inp->sctp_asocidhash, inp->hashasocidmark);
+ inp->sctp_asocidhash = NULL;
+ }
+ /* sa_ignore FREED_MEMORY */
+ while ((sq = TAILQ_FIRST(&inp->read_queue)) != NULL) {
+ /* Its only abandoned if it had data left */
+ if (sq->length)
+ SCTP_STAT_INCR(sctps_left_abandon);
+
+ TAILQ_REMOVE(&inp->read_queue, sq, next);
+ sctp_free_remote_addr(sq->whoFrom);
+ if (so)
+ so->so_rcv.sb_cc -= sq->length;
+ if (sq->data) {
+ sctp_m_freem(sq->data);
+ sq->data = NULL;
+ }
+ /*
+ * no need to free the net count, since at this point all
+ * assoc's are gone.
+ */
+ SCTP_ZONE_FREE(SCTP_BASE_INFO(ipi_zone_readq), sq);
+ SCTP_DECR_READQ_COUNT();
+ }
+ /* Now the sctp_pcb things */
+ /*
+ * free each asoc if it is not already closed/free. we can't use the
+ * macro here since le_next will get freed as part of the
+ * sctp_free_assoc() call.
+ */
+ cnt = 0;
+ if (so) {
+#ifdef IPSEC
+ ipsec_delete_pcbpolicy(ip_pcb);
+#endif /* IPSEC */
+
+ /* Unlocks not needed since the socket is gone now */
+ }
+ if (ip_pcb->inp_options) {
+ (void)sctp_m_free(ip_pcb->inp_options);
+ ip_pcb->inp_options = 0;
+ }
+ if (ip_pcb->inp_moptions) {
+ inp_freemoptions(ip_pcb->inp_moptions);
+ ip_pcb->inp_moptions = 0;
+ }
+#ifdef INET6
+ if (ip_pcb->inp_vflag & INP_IPV6) {
+ struct in6pcb *in6p;
+
+ in6p = (struct in6pcb *)inp;
+ ip6_freepcbopts(in6p->in6p_outputopts);
+ }
+#endif /* INET6 */
+ ip_pcb->inp_vflag = 0;
+ /* free up authentication fields */
+ if (inp->sctp_ep.local_auth_chunks != NULL)
+ sctp_free_chunklist(inp->sctp_ep.local_auth_chunks);
+ if (inp->sctp_ep.local_hmacs != NULL)
+ sctp_free_hmaclist(inp->sctp_ep.local_hmacs);
+
+ shared_key = LIST_FIRST(&inp->sctp_ep.shared_keys);
+ while (shared_key) {
+ LIST_REMOVE(shared_key, next);
+ sctp_free_sharedkey(shared_key);
+ /* sa_ignore FREED_MEMORY */
+ shared_key = LIST_FIRST(&inp->sctp_ep.shared_keys);
+ }
+
+ /*
+ * if we have an address list the following will free the list of
+ * ifaddr's that are set into this ep. Again macro limitations here,
+ * since the LIST_FOREACH could be a bad idea.
+ */
+ for ((laddr = LIST_FIRST(&inp->sctp_addr_list)); laddr != NULL;
+ laddr = nladdr) {
+ nladdr = LIST_NEXT(laddr, sctp_nxt_addr);
+ sctp_remove_laddr(laddr);
+ }
+
+#ifdef SCTP_TRACK_FREED_ASOCS
+ /* TEMP CODE */
+ for ((asoc = LIST_FIRST(&inp->sctp_asoc_free_list)); asoc != NULL;
+ asoc = nasoc) {
+ nasoc = LIST_NEXT(asoc, sctp_tcblist);
+ LIST_REMOVE(asoc, sctp_tcblist);
+ SCTP_ZONE_FREE(SCTP_BASE_INFO(ipi_zone_asoc), asoc);
+ SCTP_DECR_ASOC_COUNT();
+ }
+ /* *** END TEMP CODE *** */
+#endif
+ /* Now lets see about freeing the EP hash table. */
+ if (inp->sctp_tcbhash != NULL) {
+ SCTP_HASH_FREE(inp->sctp_tcbhash, inp->sctp_hashmark);
+ inp->sctp_tcbhash = NULL;
+ }
+ /* Now we must put the ep memory back into the zone pool */
+ INP_LOCK_DESTROY(&inp->ip_inp.inp);
+ SCTP_INP_LOCK_DESTROY(inp);
+ SCTP_INP_READ_DESTROY(inp);
+ SCTP_ASOC_CREATE_LOCK_DESTROY(inp);
+ SCTP_ZONE_FREE(SCTP_BASE_INFO(ipi_zone_ep), inp);
+ SCTP_DECR_EP_COUNT();
+}
+
+
+struct sctp_nets *
+sctp_findnet(struct sctp_tcb *stcb, struct sockaddr *addr)
+{
+ struct sctp_nets *net;
+
+ /* locate the address */
+ TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) {
+ if (sctp_cmpaddr(addr, (struct sockaddr *)&net->ro._l_addr))
+ return (net);
+ }
+ return (NULL);
+}
+
+
+int
+sctp_is_address_on_local_host(struct sockaddr *addr, uint32_t vrf_id)
+{
+ struct sctp_ifa *sctp_ifa;
+
+ sctp_ifa = sctp_find_ifa_by_addr(addr, vrf_id, SCTP_ADDR_NOT_LOCKED);
+ if (sctp_ifa) {
+ return (1);
+ } else {
+ return (0);
+ }
+}
+
+/*
+ * add's a remote endpoint address, done with the INIT/INIT-ACK as well as
+ * when a ASCONF arrives that adds it. It will also initialize all the cwnd
+ * stats of stuff.
+ */
+int
+sctp_add_remote_addr(struct sctp_tcb *stcb, struct sockaddr *newaddr,
+ int set_scope, int from)
+{
+ /*
+ * The following is redundant to the same lines in the
+ * sctp_aloc_assoc() but is needed since others call the add address
+ * function
+ */
+ struct sctp_nets *net, *netfirst;
+ int addr_inscope;
+
+ SCTPDBG(SCTP_DEBUG_PCB1, "Adding an address (from:%d) to the peer: ",
+ from);
+ SCTPDBG_ADDR(SCTP_DEBUG_PCB1, newaddr);
+
+ netfirst = sctp_findnet(stcb, newaddr);
+ if (netfirst) {
+ /*
+ * Lie and return ok, we don't want to make the association
+ * go away for this behavior. It will happen in the TCP
+ * model in a connected socket. It does not reach the hash
+ * table until after the association is built so it can't be
+ * found. Mark as reachable, since the initial creation will
+ * have been cleared and the NOT_IN_ASSOC flag will have
+ * been added... and we don't want to end up removing it
+ * back out.
+ */
+ if (netfirst->dest_state & SCTP_ADDR_UNCONFIRMED) {
+ netfirst->dest_state = (SCTP_ADDR_REACHABLE |
+ SCTP_ADDR_UNCONFIRMED);
+ } else {
+ netfirst->dest_state = SCTP_ADDR_REACHABLE;
+ }
+
+ return (0);
+ }
+ addr_inscope = 1;
+ if (newaddr->sa_family == AF_INET) {
+ struct sockaddr_in *sin;
+
+ sin = (struct sockaddr_in *)newaddr;
+ if (sin->sin_addr.s_addr == 0) {
+ /* Invalid address */
+ return (-1);
+ }
+ /* zero out the bzero area */
+ memset(&sin->sin_zero, 0, sizeof(sin->sin_zero));
+
+ /* assure len is set */
+ sin->sin_len = sizeof(struct sockaddr_in);
+ if (set_scope) {
+#ifdef SCTP_DONT_DO_PRIVADDR_SCOPE
+ stcb->ipv4_local_scope = 1;
+#else
+ if (IN4_ISPRIVATE_ADDRESS(&sin->sin_addr)) {
+ stcb->asoc.ipv4_local_scope = 1;
+ }
+#endif /* SCTP_DONT_DO_PRIVADDR_SCOPE */
+ } else {
+ /* Validate the address is in scope */
+ if ((IN4_ISPRIVATE_ADDRESS(&sin->sin_addr)) &&
+ (stcb->asoc.ipv4_local_scope == 0)) {
+ addr_inscope = 0;
+ }
+ }
+#ifdef INET6
+ } else if (newaddr->sa_family == AF_INET6) {
+ struct sockaddr_in6 *sin6;
+
+ sin6 = (struct sockaddr_in6 *)newaddr;
+ if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) {
+ /* Invalid address */
+ return (-1);
+ }
+ /* assure len is set */
+ sin6->sin6_len = sizeof(struct sockaddr_in6);
+ if (set_scope) {
+ if (sctp_is_address_on_local_host(newaddr, stcb->asoc.vrf_id)) {
+ stcb->asoc.loopback_scope = 1;
+ stcb->asoc.local_scope = 0;
+ stcb->asoc.ipv4_local_scope = 1;
+ stcb->asoc.site_scope = 1;
+ } else if (IN6_IS_ADDR_LINKLOCAL(&sin6->sin6_addr)) {
+ /*
+ * If the new destination is a LINK_LOCAL we
+ * must have common site scope. Don't set
+ * the local scope since we may not share
+ * all links, only loopback can do this.
+ * Links on the local network would also be
+ * on our private network for v4 too.
+ */
+ stcb->asoc.ipv4_local_scope = 1;
+ stcb->asoc.site_scope = 1;
+ } else if (IN6_IS_ADDR_SITELOCAL(&sin6->sin6_addr)) {
+ /*
+ * If the new destination is SITE_LOCAL then
+ * we must have site scope in common.
+ */
+ stcb->asoc.site_scope = 1;
+ }
+ } else {
+ /* Validate the address is in scope */
+ if (IN6_IS_ADDR_LOOPBACK(&sin6->sin6_addr) &&
+ (stcb->asoc.loopback_scope == 0)) {
+ addr_inscope = 0;
+ } else if (IN6_IS_ADDR_LINKLOCAL(&sin6->sin6_addr) &&
+ (stcb->asoc.local_scope == 0)) {
+ addr_inscope = 0;
+ } else if (IN6_IS_ADDR_SITELOCAL(&sin6->sin6_addr) &&
+ (stcb->asoc.site_scope == 0)) {
+ addr_inscope = 0;
+ }
+ }
+#endif
+ } else {
+ /* not supported family type */
+ return (-1);
+ }
+ net = SCTP_ZONE_GET(SCTP_BASE_INFO(ipi_zone_net), struct sctp_nets);
+ if (net == NULL) {
+ return (-1);
+ }
+ SCTP_INCR_RADDR_COUNT();
+ bzero(net, sizeof(*net));
+ (void)SCTP_GETTIME_TIMEVAL(&net->start_time);
+ memcpy(&net->ro._l_addr, newaddr, newaddr->sa_len);
+ if (newaddr->sa_family == AF_INET) {
+ ((struct sockaddr_in *)&net->ro._l_addr)->sin_port = stcb->rport;
+ } else if (newaddr->sa_family == AF_INET6) {
+ ((struct sockaddr_in6 *)&net->ro._l_addr)->sin6_port = stcb->rport;
+ }
+ net->addr_is_local = sctp_is_address_on_local_host(newaddr, stcb->asoc.vrf_id);
+ if (net->addr_is_local && ((set_scope || (from == SCTP_ADDR_IS_CONFIRMED)))) {
+ stcb->asoc.loopback_scope = 1;
+ stcb->asoc.ipv4_local_scope = 1;
+ stcb->asoc.local_scope = 0;
+ stcb->asoc.site_scope = 1;
+ addr_inscope = 1;
+ }
+ net->failure_threshold = stcb->asoc.def_net_failure;
+ if (addr_inscope == 0) {
+ net->dest_state = (SCTP_ADDR_REACHABLE |
+ SCTP_ADDR_OUT_OF_SCOPE);
+ } else {
+ if (from == SCTP_ADDR_IS_CONFIRMED)
+ /* SCTP_ADDR_IS_CONFIRMED is passed by connect_x */
+ net->dest_state = SCTP_ADDR_REACHABLE;
+ else
+ net->dest_state = SCTP_ADDR_REACHABLE |
+ SCTP_ADDR_UNCONFIRMED;
+ }
+ /*
+ * We set this to 0, the timer code knows that this means its an
+ * initial value
+ */
+ net->RTO = 0;
+ net->RTO_measured = 0;
+ stcb->asoc.numnets++;
+ *(&net->ref_count) = 1;
+ net->tos_flowlabel = 0;
+ if (SCTP_BASE_SYSCTL(sctp_udp_tunneling_for_client_enable)) {
+ net->port = htons(SCTP_BASE_SYSCTL(sctp_udp_tunneling_port));
+ } else {
+ net->port = 0;
+ }
+#ifdef INET
+ if (newaddr->sa_family == AF_INET)
+ net->tos_flowlabel = stcb->asoc.default_tos;
+#endif
+#ifdef INET6
+ if (newaddr->sa_family == AF_INET6)
+ net->tos_flowlabel = stcb->asoc.default_flowlabel;
+#endif
+ /* Init the timer structure */
+ SCTP_OS_TIMER_INIT(&net->rxt_timer.timer);
+ SCTP_OS_TIMER_INIT(&net->fr_timer.timer);
+ SCTP_OS_TIMER_INIT(&net->pmtu_timer.timer);
+
+ /* Now generate a route for this guy */
+#ifdef INET6
+ /* KAME hack: embed scopeid */
+ if (newaddr->sa_family == AF_INET6) {
+ struct sockaddr_in6 *sin6;
+
+ sin6 = (struct sockaddr_in6 *)&net->ro._l_addr;
+ (void)sa6_embedscope(sin6, MODULE_GLOBAL(ip6_use_defzone));
+ sin6->sin6_scope_id = 0;
+ }
+#endif
+ SCTP_RTALLOC((sctp_route_t *) & net->ro, stcb->asoc.vrf_id);
+
+ if (SCTP_ROUTE_HAS_VALID_IFN(&net->ro)) {
+ /* Get source address */
+ net->ro._s_addr = sctp_source_address_selection(stcb->sctp_ep,
+ stcb,
+ (sctp_route_t *) & net->ro,
+ net,
+ 0,
+ stcb->asoc.vrf_id);
+ /* Now get the interface MTU */
+ if (net->ro._s_addr && net->ro._s_addr->ifn_p) {
+ net->mtu = SCTP_GATHER_MTU_FROM_INTFC(net->ro._s_addr->ifn_p);
+ } else {
+ net->mtu = 0;
+ }
+ if (net->mtu == 0) {
+ /* Huh ?? */
+ net->mtu = SCTP_DEFAULT_MTU;
+ } else {
+ uint32_t rmtu;
+
+ rmtu = SCTP_GATHER_MTU_FROM_ROUTE(net->ro._s_addr, &net->ro._l_addr.sa, net->ro.ro_rt);
+ if (rmtu == 0) {
+ /*
+ * Start things off to match mtu of
+ * interface please.
+ */
+ SCTP_SET_MTU_OF_ROUTE(&net->ro._l_addr.sa,
+ net->ro.ro_rt, net->mtu);
+ } else {
+ /*
+ * we take the route mtu over the interface,
+ * since the route may be leading out the
+ * loopback, or a different interface.
+ */
+ net->mtu = rmtu;
+ }
+ }
+ if (from == SCTP_ALLOC_ASOC) {
+ stcb->asoc.smallest_mtu = net->mtu;
+ }
+ } else {
+ net->mtu = stcb->asoc.smallest_mtu;
+ }
+#ifdef INET6
+ if (newaddr->sa_family == AF_INET6) {
+ struct sockaddr_in6 *sin6;
+
+ sin6 = (struct sockaddr_in6 *)&net->ro._l_addr;
+ (void)sa6_recoverscope(sin6);
+ }
+#endif
+ if (net->port) {
+ net->mtu -= sizeof(struct udphdr);
+ }
+ if (stcb->asoc.smallest_mtu > net->mtu) {
+ stcb->asoc.smallest_mtu = net->mtu;
+ }
+ /* JRS - Use the congestion control given in the CC module */
+ stcb->asoc.cc_functions.sctp_set_initial_cc_param(stcb, net);
+
+ /*
+ * CMT: CUC algo - set find_pseudo_cumack to TRUE (1) at beginning
+ * of assoc (2005/06/27, iyengar@cis.udel.edu)
+ */
+ net->find_pseudo_cumack = 1;
+ net->find_rtx_pseudo_cumack = 1;
+ net->src_addr_selected = 0;
+ netfirst = TAILQ_FIRST(&stcb->asoc.nets);
+ if (net->ro.ro_rt == NULL) {
+ /* Since we have no route put it at the back */
+ TAILQ_INSERT_TAIL(&stcb->asoc.nets, net, sctp_next);
+ } else if (netfirst == NULL) {
+ /* We are the first one in the pool. */
+ TAILQ_INSERT_HEAD(&stcb->asoc.nets, net, sctp_next);
+ } else if (netfirst->ro.ro_rt == NULL) {
+ /*
+ * First one has NO route. Place this one ahead of the first
+ * one.
+ */
+ TAILQ_INSERT_HEAD(&stcb->asoc.nets, net, sctp_next);
+ } else if (net->ro.ro_rt->rt_ifp != netfirst->ro.ro_rt->rt_ifp) {
+ /*
+ * This one has a different interface than the one at the
+ * top of the list. Place it ahead.
+ */
+ TAILQ_INSERT_HEAD(&stcb->asoc.nets, net, sctp_next);
+ } else {
+ /*
+ * Ok we have the same interface as the first one. Move
+ * forward until we find either a) one with a NULL route...
+ * insert ahead of that b) one with a different ifp.. insert
+ * after that. c) end of the list.. insert at the tail.
+ */
+ struct sctp_nets *netlook;
+
+ do {
+ netlook = TAILQ_NEXT(netfirst, sctp_next);
+ if (netlook == NULL) {
+ /* End of the list */
+ TAILQ_INSERT_TAIL(&stcb->asoc.nets, net, sctp_next);
+ break;
+ } else if (netlook->ro.ro_rt == NULL) {
+ /* next one has NO route */
+ TAILQ_INSERT_BEFORE(netfirst, net, sctp_next);
+ break;
+ } else if (netlook->ro.ro_rt->rt_ifp != net->ro.ro_rt->rt_ifp) {
+ TAILQ_INSERT_AFTER(&stcb->asoc.nets, netlook,
+ net, sctp_next);
+ break;
+ }
+ /* Shift forward */
+ netfirst = netlook;
+ } while (netlook != NULL);
+ }
+
+ /* got to have a primary set */
+ if (stcb->asoc.primary_destination == 0) {
+ stcb->asoc.primary_destination = net;
+ } else if ((stcb->asoc.primary_destination->ro.ro_rt == NULL) &&
+ (net->ro.ro_rt) &&
+ ((net->dest_state & SCTP_ADDR_UNCONFIRMED) == 0)) {
+ /* No route to current primary adopt new primary */
+ stcb->asoc.primary_destination = net;
+ }
+ sctp_timer_start(SCTP_TIMER_TYPE_PATHMTURAISE, stcb->sctp_ep, stcb,
+ net);
+ /* Validate primary is first */
+ net = TAILQ_FIRST(&stcb->asoc.nets);
+ if ((net != stcb->asoc.primary_destination) &&
+ (stcb->asoc.primary_destination)) {
+ /*
+ * first one on the list is NOT the primary sctp_cmpaddr()
+ * is much more efficient if the primary is the first on the
+ * list, make it so.
+ */
+ TAILQ_REMOVE(&stcb->asoc.nets,
+ stcb->asoc.primary_destination, sctp_next);
+ TAILQ_INSERT_HEAD(&stcb->asoc.nets,
+ stcb->asoc.primary_destination, sctp_next);
+ }
+ return (0);
+}
+
+
+static uint32_t
+sctp_aloc_a_assoc_id(struct sctp_inpcb *inp, struct sctp_tcb *stcb)
+{
+ uint32_t id;
+ struct sctpasochead *head;
+ struct sctp_tcb *lstcb;
+
+ SCTP_INP_WLOCK(inp);
+try_again:
+ if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) {
+ /* TSNH */
+ SCTP_INP_WUNLOCK(inp);
+ return (0);
+ }
+ /*
+ * We don't allow assoc id to be 0, this is needed otherwise if the
+ * id were to wrap we would have issues with some socket options.
+ */
+ if (inp->sctp_associd_counter == 0) {
+ inp->sctp_associd_counter++;
+ }
+ id = inp->sctp_associd_counter;
+ inp->sctp_associd_counter++;
+ lstcb = sctp_findasoc_ep_asocid_locked(inp, (sctp_assoc_t) id, 0);
+ if (lstcb) {
+ goto try_again;
+ }
+ head = &inp->sctp_asocidhash[SCTP_PCBHASH_ASOC(id, inp->hashasocidmark)];
+ LIST_INSERT_HEAD(head, stcb, sctp_tcbasocidhash);
+ stcb->asoc.in_asocid_hash = 1;
+ SCTP_INP_WUNLOCK(inp);
+ return id;
+}
+
+/*
+ * allocate an association and add it to the endpoint. The caller must be
+ * careful to add all additional addresses once they are know right away or
+ * else the assoc will be may experience a blackout scenario.
+ */
+struct sctp_tcb *
+sctp_aloc_assoc(struct sctp_inpcb *inp, struct sockaddr *firstaddr,
+ int *error, uint32_t override_tag, uint32_t vrf_id,
+ struct thread *p
+)
+{
+ /* note the p argument is only valid in unbound sockets */
+
+ struct sctp_tcb *stcb;
+ struct sctp_association *asoc;
+ struct sctpasochead *head;
+ uint16_t rport;
+ int err;
+
+ /*
+ * Assumption made here: Caller has done a
+ * sctp_findassociation_ep_addr(ep, addr's); to make sure the
+ * address does not exist already.
+ */
+ if (SCTP_BASE_INFO(ipi_count_asoc) >= SCTP_MAX_NUM_OF_ASOC) {
+ /* Hit max assoc, sorry no more */
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_PCB, ENOBUFS);
+ *error = ENOBUFS;
+ return (NULL);
+ }
+ if (firstaddr == NULL) {
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_PCB, EINVAL);
+ *error = EINVAL;
+ return (NULL);
+ }
+ SCTP_INP_RLOCK(inp);
+ if ((inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) &&
+ ((sctp_is_feature_off(inp, SCTP_PCB_FLAGS_PORTREUSE)) ||
+ (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED))) {
+ /*
+ * If its in the TCP pool, its NOT allowed to create an
+ * association. The parent listener needs to call
+ * sctp_aloc_assoc.. or the one-2-many socket. If a peeled
+ * off, or connected one does this.. its an error.
+ */
+ SCTP_INP_RUNLOCK(inp);
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_PCB, EINVAL);
+ *error = EINVAL;
+ return (NULL);
+ }
+ SCTPDBG(SCTP_DEBUG_PCB3, "Allocate an association for peer:");
+#ifdef SCTP_DEBUG
+ if (firstaddr) {
+ SCTPDBG_ADDR(SCTP_DEBUG_PCB3, firstaddr);
+ SCTPDBG(SCTP_DEBUG_PCB3, "Port:%d\n",
+ ntohs(((struct sockaddr_in *)firstaddr)->sin_port));
+ } else {
+ SCTPDBG(SCTP_DEBUG_PCB3, "None\n");
+ }
+#endif /* SCTP_DEBUG */
+ if (firstaddr->sa_family == AF_INET) {
+ struct sockaddr_in *sin;
+
+ sin = (struct sockaddr_in *)firstaddr;
+ if ((sin->sin_port == 0) || (sin->sin_addr.s_addr == 0)) {
+ /* Invalid address */
+ SCTP_INP_RUNLOCK(inp);
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_PCB, EINVAL);
+ *error = EINVAL;
+ return (NULL);
+ }
+ rport = sin->sin_port;
+ } else if (firstaddr->sa_family == AF_INET6) {
+ struct sockaddr_in6 *sin6;
+
+ sin6 = (struct sockaddr_in6 *)firstaddr;
+ if ((sin6->sin6_port == 0) ||
+ (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr))) {
+ /* Invalid address */
+ SCTP_INP_RUNLOCK(inp);
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_PCB, EINVAL);
+ *error = EINVAL;
+ return (NULL);
+ }
+ rport = sin6->sin6_port;
+ } else {
+ /* not supported family type */
+ SCTP_INP_RUNLOCK(inp);
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_PCB, EINVAL);
+ *error = EINVAL;
+ return (NULL);
+ }
+ SCTP_INP_RUNLOCK(inp);
+ if (inp->sctp_flags & SCTP_PCB_FLAGS_UNBOUND) {
+ /*
+ * If you have not performed a bind, then we need to do the
+ * ephemeral bind for you.
+ */
+ if ((err = sctp_inpcb_bind(inp->sctp_socket,
+ (struct sockaddr *)NULL,
+ (struct sctp_ifa *)NULL,
+ p
+ ))) {
+ /* bind error, probably perm */
+ *error = err;
+ return (NULL);
+ }
+ }
+ stcb = SCTP_ZONE_GET(SCTP_BASE_INFO(ipi_zone_asoc), struct sctp_tcb);
+ if (stcb == NULL) {
+ /* out of memory? */
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_PCB, ENOMEM);
+ *error = ENOMEM;
+ return (NULL);
+ }
+ SCTP_INCR_ASOC_COUNT();
+
+ bzero(stcb, sizeof(*stcb));
+ asoc = &stcb->asoc;
+
+ asoc->assoc_id = sctp_aloc_a_assoc_id(inp, stcb);
+ SCTP_TCB_LOCK_INIT(stcb);
+ SCTP_TCB_SEND_LOCK_INIT(stcb);
+ stcb->rport = rport;
+ /* setup back pointer's */
+ stcb->sctp_ep = inp;
+ stcb->sctp_socket = inp->sctp_socket;
+ if ((err = sctp_init_asoc(inp, stcb, override_tag, vrf_id))) {
+ /* failed */
+ SCTP_TCB_LOCK_DESTROY(stcb);
+ SCTP_TCB_SEND_LOCK_DESTROY(stcb);
+ LIST_REMOVE(stcb, sctp_tcbasocidhash);
+ SCTP_ZONE_FREE(SCTP_BASE_INFO(ipi_zone_asoc), stcb);
+ SCTP_DECR_ASOC_COUNT();
+ *error = err;
+ return (NULL);
+ }
+ /* and the port */
+ SCTP_INP_INFO_WLOCK();
+ SCTP_INP_WLOCK(inp);
+ if (inp->sctp_flags & (SCTP_PCB_FLAGS_SOCKET_GONE | SCTP_PCB_FLAGS_SOCKET_ALLGONE)) {
+ /* inpcb freed while alloc going on */
+ SCTP_TCB_LOCK_DESTROY(stcb);
+ SCTP_TCB_SEND_LOCK_DESTROY(stcb);
+ LIST_REMOVE(stcb, sctp_tcbasocidhash);
+ SCTP_ZONE_FREE(SCTP_BASE_INFO(ipi_zone_asoc), stcb);
+ SCTP_INP_WUNLOCK(inp);
+ SCTP_INP_INFO_WUNLOCK();
+ SCTP_DECR_ASOC_COUNT();
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_PCB, EINVAL);
+ *error = EINVAL;
+ return (NULL);
+ }
+ SCTP_TCB_LOCK(stcb);
+
+ /* now that my_vtag is set, add it to the hash */
+ head = &SCTP_BASE_INFO(sctp_asochash)[SCTP_PCBHASH_ASOC(stcb->asoc.my_vtag, SCTP_BASE_INFO(hashasocmark))];
+ /* put it in the bucket in the vtag hash of assoc's for the system */
+ LIST_INSERT_HEAD(head, stcb, sctp_asocs);
+ SCTP_INP_INFO_WUNLOCK();
+
+ if ((err = sctp_add_remote_addr(stcb, firstaddr, SCTP_DO_SETSCOPE, SCTP_ALLOC_ASOC))) {
+ /* failure.. memory error? */
+ if (asoc->strmout) {
+ SCTP_FREE(asoc->strmout, SCTP_M_STRMO);
+ asoc->strmout = NULL;
+ }
+ if (asoc->mapping_array) {
+ SCTP_FREE(asoc->mapping_array, SCTP_M_MAP);
+ asoc->mapping_array = NULL;
+ }
+ if (asoc->nr_mapping_array) {
+ SCTP_FREE(asoc->nr_mapping_array, SCTP_M_MAP);
+ asoc->nr_mapping_array = NULL;
+ }
+ SCTP_DECR_ASOC_COUNT();
+ SCTP_TCB_LOCK_DESTROY(stcb);
+ SCTP_TCB_SEND_LOCK_DESTROY(stcb);
+ LIST_REMOVE(stcb, sctp_tcbasocidhash);
+ SCTP_ZONE_FREE(SCTP_BASE_INFO(ipi_zone_asoc), stcb);
+ SCTP_INP_WUNLOCK(inp);
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_PCB, ENOBUFS);
+ *error = ENOBUFS;
+ return (NULL);
+ }
+ /* Init all the timers */
+ SCTP_OS_TIMER_INIT(&asoc->hb_timer.timer);
+ SCTP_OS_TIMER_INIT(&asoc->dack_timer.timer);
+ SCTP_OS_TIMER_INIT(&asoc->strreset_timer.timer);
+ SCTP_OS_TIMER_INIT(&asoc->asconf_timer.timer);
+ SCTP_OS_TIMER_INIT(&asoc->shut_guard_timer.timer);
+ SCTP_OS_TIMER_INIT(&asoc->autoclose_timer.timer);
+ SCTP_OS_TIMER_INIT(&asoc->delayed_event_timer.timer);
+ SCTP_OS_TIMER_INIT(&asoc->delete_prim_timer.timer);
+
+ LIST_INSERT_HEAD(&inp->sctp_asoc_list, stcb, sctp_tcblist);
+ /* now file the port under the hash as well */
+ if (inp->sctp_tcbhash != NULL) {
+ head = &inp->sctp_tcbhash[SCTP_PCBHASH_ALLADDR(stcb->rport,
+ inp->sctp_hashmark)];
+ LIST_INSERT_HEAD(head, stcb, sctp_tcbhash);
+ }
+ SCTP_INP_WUNLOCK(inp);
+ SCTPDBG(SCTP_DEBUG_PCB1, "Association %p now allocated\n", stcb);
+ return (stcb);
+}
+
+
+void
+sctp_remove_net(struct sctp_tcb *stcb, struct sctp_nets *net)
+{
+ struct sctp_association *asoc;
+
+ asoc = &stcb->asoc;
+ asoc->numnets--;
+ TAILQ_REMOVE(&asoc->nets, net, sctp_next);
+ if (net == asoc->primary_destination) {
+ /* Reset primary */
+ struct sctp_nets *lnet;
+
+ lnet = TAILQ_FIRST(&asoc->nets);
+ /*
+ * Mobility adaptation Ideally, if deleted destination is
+ * the primary, it becomes a fast retransmission trigger by
+ * the subsequent SET PRIMARY. (by micchie)
+ */
+ if (sctp_is_mobility_feature_on(stcb->sctp_ep,
+ SCTP_MOBILITY_BASE) ||
+ sctp_is_mobility_feature_on(stcb->sctp_ep,
+ SCTP_MOBILITY_FASTHANDOFF)) {
+ SCTPDBG(SCTP_DEBUG_ASCONF1, "remove_net: primary dst is deleting\n");
+ if (asoc->deleted_primary != NULL) {
+ SCTPDBG(SCTP_DEBUG_ASCONF1, "remove_net: deleted primary may be already stored\n");
+ goto out;
+ }
+ asoc->deleted_primary = net;
+ atomic_add_int(&net->ref_count, 1);
+ memset(&net->lastsa, 0, sizeof(net->lastsa));
+ memset(&net->lastsv, 0, sizeof(net->lastsv));
+ sctp_mobility_feature_on(stcb->sctp_ep,
+ SCTP_MOBILITY_PRIM_DELETED);
+ sctp_timer_start(SCTP_TIMER_TYPE_PRIM_DELETED,
+ stcb->sctp_ep, stcb, NULL);
+ }
+out:
+ /* Try to find a confirmed primary */
+ asoc->primary_destination = sctp_find_alternate_net(stcb, lnet, 0);
+ }
+ if (net == asoc->last_data_chunk_from) {
+ /* Reset primary */
+ asoc->last_data_chunk_from = TAILQ_FIRST(&asoc->nets);
+ }
+ if (net == asoc->last_control_chunk_from) {
+ /* Clear net */
+ asoc->last_control_chunk_from = NULL;
+ }
+ sctp_free_remote_addr(net);
+}
+
+/*
+ * remove a remote endpoint address from an association, it will fail if the
+ * address does not exist.
+ */
+int
+sctp_del_remote_addr(struct sctp_tcb *stcb, struct sockaddr *remaddr)
+{
+ /*
+ * Here we need to remove a remote address. This is quite simple, we
+ * first find it in the list of address for the association
+ * (tasoc->asoc.nets) and then if it is there, we do a LIST_REMOVE
+ * on that item. Note we do not allow it to be removed if there are
+ * no other addresses.
+ */
+ struct sctp_association *asoc;
+ struct sctp_nets *net, *net_tmp;
+
+ asoc = &stcb->asoc;
+
+ /* locate the address */
+ for (net = TAILQ_FIRST(&asoc->nets); net != NULL; net = net_tmp) {
+ net_tmp = TAILQ_NEXT(net, sctp_next);
+ if (net->ro._l_addr.sa.sa_family != remaddr->sa_family) {
+ continue;
+ }
+ if (sctp_cmpaddr((struct sockaddr *)&net->ro._l_addr,
+ remaddr)) {
+ /* we found the guy */
+ if (asoc->numnets < 2) {
+ /* Must have at LEAST two remote addresses */
+ return (-1);
+ } else {
+ sctp_remove_net(stcb, net);
+ return (0);
+ }
+ }
+ }
+ /* not found. */
+ return (-2);
+}
+
+void
+sctp_delete_from_timewait(uint32_t tag, uint16_t lport, uint16_t rport)
+{
+ struct sctpvtaghead *chain;
+ struct sctp_tagblock *twait_block;
+ int found = 0;
+ int i;
+
+ chain = &SCTP_BASE_INFO(vtag_timewait)[(tag % SCTP_STACK_VTAG_HASH_SIZE)];
+ if (!LIST_EMPTY(chain)) {
+ LIST_FOREACH(twait_block, chain, sctp_nxt_tagblock) {
+ for (i = 0; i < SCTP_NUMBER_IN_VTAG_BLOCK; i++) {
+ if ((twait_block->vtag_block[i].v_tag == tag) &&
+ (twait_block->vtag_block[i].lport == lport) &&
+ (twait_block->vtag_block[i].rport == rport)) {
+ twait_block->vtag_block[i].tv_sec_at_expire = 0;
+ twait_block->vtag_block[i].v_tag = 0;
+ twait_block->vtag_block[i].lport = 0;
+ twait_block->vtag_block[i].rport = 0;
+ found = 1;
+ break;
+ }
+ }
+ if (found)
+ break;
+ }
+ }
+}
+
+int
+sctp_is_in_timewait(uint32_t tag, uint16_t lport, uint16_t rport)
+{
+ struct sctpvtaghead *chain;
+ struct sctp_tagblock *twait_block;
+ int found = 0;
+ int i;
+
+ SCTP_INP_INFO_WLOCK();
+ chain = &SCTP_BASE_INFO(vtag_timewait)[(tag % SCTP_STACK_VTAG_HASH_SIZE)];
+ if (!LIST_EMPTY(chain)) {
+ LIST_FOREACH(twait_block, chain, sctp_nxt_tagblock) {
+ for (i = 0; i < SCTP_NUMBER_IN_VTAG_BLOCK; i++) {
+ if ((twait_block->vtag_block[i].v_tag == tag) &&
+ (twait_block->vtag_block[i].lport == lport) &&
+ (twait_block->vtag_block[i].rport == rport)) {
+ found = 1;
+ break;
+ }
+ }
+ if (found)
+ break;
+ }
+ }
+ SCTP_INP_INFO_WUNLOCK();
+ return (found);
+}
+
+
+void
+sctp_add_vtag_to_timewait(uint32_t tag, uint32_t time, uint16_t lport, uint16_t rport)
+{
+ struct sctpvtaghead *chain;
+ struct sctp_tagblock *twait_block;
+ struct timeval now;
+ int set, i;
+
+ if (time == 0) {
+ /* Its disabled */
+ return;
+ }
+ (void)SCTP_GETTIME_TIMEVAL(&now);
+ chain = &SCTP_BASE_INFO(vtag_timewait)[(tag % SCTP_STACK_VTAG_HASH_SIZE)];
+ set = 0;
+ if (!LIST_EMPTY(chain)) {
+ /* Block(s) present, lets find space, and expire on the fly */
+ LIST_FOREACH(twait_block, chain, sctp_nxt_tagblock) {
+ for (i = 0; i < SCTP_NUMBER_IN_VTAG_BLOCK; i++) {
+ if ((twait_block->vtag_block[i].v_tag == 0) &&
+ !set) {
+ twait_block->vtag_block[i].tv_sec_at_expire =
+ now.tv_sec + time;
+ twait_block->vtag_block[i].v_tag = tag;
+ twait_block->vtag_block[i].lport = lport;
+ twait_block->vtag_block[i].rport = rport;
+ set = 1;
+ } else if ((twait_block->vtag_block[i].v_tag) &&
+ ((long)twait_block->vtag_block[i].tv_sec_at_expire < now.tv_sec)) {
+ /* Audit expires this guy */
+ twait_block->vtag_block[i].tv_sec_at_expire = 0;
+ twait_block->vtag_block[i].v_tag = 0;
+ twait_block->vtag_block[i].lport = 0;
+ twait_block->vtag_block[i].rport = 0;
+ if (set == 0) {
+ /* Reuse it for my new tag */
+ twait_block->vtag_block[i].tv_sec_at_expire = now.tv_sec + time;
+ twait_block->vtag_block[i].v_tag = tag;
+ twait_block->vtag_block[i].lport = lport;
+ twait_block->vtag_block[i].rport = rport;
+ set = 1;
+ }
+ }
+ }
+ if (set) {
+ /*
+ * We only do up to the block where we can
+ * place our tag for audits
+ */
+ break;
+ }
+ }
+ }
+ /* Need to add a new block to chain */
+ if (!set) {
+ SCTP_MALLOC(twait_block, struct sctp_tagblock *,
+ sizeof(struct sctp_tagblock), SCTP_M_TIMW);
+ if (twait_block == NULL) {
+#ifdef INVARIANTS
+ panic("Can not alloc tagblock");
+#endif
+ return;
+ }
+ memset(twait_block, 0, sizeof(struct sctp_tagblock));
+ LIST_INSERT_HEAD(chain, twait_block, sctp_nxt_tagblock);
+ twait_block->vtag_block[0].tv_sec_at_expire = now.tv_sec + time;
+ twait_block->vtag_block[0].v_tag = tag;
+ twait_block->vtag_block[0].lport = lport;
+ twait_block->vtag_block[0].rport = rport;
+ }
+}
+
+
+
+/*-
+ * Free the association after un-hashing the remote port. This
+ * function ALWAYS returns holding NO LOCK on the stcb. It DOES
+ * expect that the input to this function IS a locked TCB.
+ * It will return 0, if it did NOT destroy the association (instead
+ * it unlocks it. It will return NON-zero if it either destroyed the
+ * association OR the association is already destroyed.
+ */
+int
+sctp_free_assoc(struct sctp_inpcb *inp, struct sctp_tcb *stcb, int from_inpcbfree, int from_location)
+{
+ int i;
+ struct sctp_association *asoc;
+ struct sctp_nets *net, *prev;
+ struct sctp_laddr *laddr;
+ struct sctp_tmit_chunk *chk;
+ struct sctp_asconf_addr *aparam;
+ struct sctp_asconf_ack *aack;
+ struct sctp_stream_reset_list *liste;
+ struct sctp_queued_to_read *sq;
+ struct sctp_stream_queue_pending *sp;
+ sctp_sharedkey_t *shared_key;
+ struct socket *so;
+ int ccnt = 0;
+ int cnt = 0;
+
+ /* first, lets purge the entry from the hash table. */
+
+#ifdef SCTP_LOG_CLOSING
+ sctp_log_closing(inp, stcb, 6);
+#endif
+ if (stcb->asoc.state == 0) {
+#ifdef SCTP_LOG_CLOSING
+ sctp_log_closing(inp, NULL, 7);
+#endif
+ /* there is no asoc, really TSNH :-0 */
+ return (1);
+ }
+ /* TEMP CODE */
+ if (stcb->freed_from_where == 0) {
+ /* Only record the first place free happened from */
+ stcb->freed_from_where = from_location;
+ }
+ /* TEMP CODE */
+
+ asoc = &stcb->asoc;
+ if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
+ (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE))
+ /* nothing around */
+ so = NULL;
+ else
+ so = inp->sctp_socket;
+
+ /*
+ * We used timer based freeing if a reader or writer is in the way.
+ * So we first check if we are actually being called from a timer,
+ * if so we abort early if a reader or writer is still in the way.
+ */
+ if ((stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) &&
+ (from_inpcbfree == SCTP_NORMAL_PROC)) {
+ /*
+ * is it the timer driving us? if so are the reader/writers
+ * gone?
+ */
+ if (stcb->asoc.refcnt) {
+ /* nope, reader or writer in the way */
+ sctp_timer_start(SCTP_TIMER_TYPE_ASOCKILL, inp, stcb, NULL);
+ /* no asoc destroyed */
+ SCTP_TCB_UNLOCK(stcb);
+#ifdef SCTP_LOG_CLOSING
+ sctp_log_closing(inp, stcb, 8);
+#endif
+ return (0);
+ }
+ }
+ /* now clean up any other timers */
+ (void)SCTP_OS_TIMER_STOP(&asoc->hb_timer.timer);
+ asoc->hb_timer.self = NULL;
+ (void)SCTP_OS_TIMER_STOP(&asoc->dack_timer.timer);
+ asoc->dack_timer.self = NULL;
+ (void)SCTP_OS_TIMER_STOP(&asoc->strreset_timer.timer);
+ /*-
+ * For stream reset we don't blast this unless
+ * it is a str-reset timer, it might be the
+ * free-asoc timer which we DON'T want to
+ * disturb.
+ */
+ if (asoc->strreset_timer.type == SCTP_TIMER_TYPE_STRRESET)
+ asoc->strreset_timer.self = NULL;
+ (void)SCTP_OS_TIMER_STOP(&asoc->asconf_timer.timer);
+ asoc->asconf_timer.self = NULL;
+ (void)SCTP_OS_TIMER_STOP(&asoc->autoclose_timer.timer);
+ asoc->autoclose_timer.self = NULL;
+ (void)SCTP_OS_TIMER_STOP(&asoc->shut_guard_timer.timer);
+ asoc->shut_guard_timer.self = NULL;
+ (void)SCTP_OS_TIMER_STOP(&asoc->delayed_event_timer.timer);
+ asoc->delayed_event_timer.self = NULL;
+ /* Mobility adaptation */
+ (void)SCTP_OS_TIMER_STOP(&asoc->delete_prim_timer.timer);
+ asoc->delete_prim_timer.self = NULL;
+ TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
+ (void)SCTP_OS_TIMER_STOP(&net->fr_timer.timer);
+ net->fr_timer.self = NULL;
+ (void)SCTP_OS_TIMER_STOP(&net->rxt_timer.timer);
+ net->rxt_timer.self = NULL;
+ (void)SCTP_OS_TIMER_STOP(&net->pmtu_timer.timer);
+ net->pmtu_timer.self = NULL;
+ }
+ /* Now the read queue needs to be cleaned up (only once) */
+ cnt = 0;
+ if ((stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) == 0) {
+ stcb->asoc.state |= SCTP_STATE_ABOUT_TO_BE_FREED;
+ SCTP_INP_READ_LOCK(inp);
+ TAILQ_FOREACH(sq, &inp->read_queue, next) {
+ if (sq->stcb == stcb) {
+ sq->do_not_ref_stcb = 1;
+ sq->sinfo_cumtsn = stcb->asoc.cumulative_tsn;
+ /*
+ * If there is no end, there never will be
+ * now.
+ */
+ if (sq->end_added == 0) {
+ /* Held for PD-API clear that. */
+ sq->pdapi_aborted = 1;
+ sq->held_length = 0;
+ if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_PDAPIEVNT) && (so != NULL)) {
+ /*
+ * Need to add a PD-API
+ * aborted indication.
+ * Setting the control_pdapi
+ * assures that it will be
+ * added right after this
+ * msg.
+ */
+ uint32_t strseq;
+
+ stcb->asoc.control_pdapi = sq;
+ strseq = (sq->sinfo_stream << 16) | sq->sinfo_ssn;
+ sctp_ulp_notify(SCTP_NOTIFY_PARTIAL_DELVIERY_INDICATION,
+ stcb,
+ SCTP_PARTIAL_DELIVERY_ABORTED,
+ (void *)&strseq,
+ SCTP_SO_LOCKED);
+ stcb->asoc.control_pdapi = NULL;
+ }
+ }
+ /* Add an end to wake them */
+ sq->end_added = 1;
+ cnt++;
+ }
+ }
+ SCTP_INP_READ_UNLOCK(inp);
+ if (stcb->block_entry) {
+ cnt++;
+ SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_PCB, ECONNRESET);
+ stcb->block_entry->error = ECONNRESET;
+ stcb->block_entry = NULL;
+ }
+ }
+ if ((stcb->asoc.refcnt) || (stcb->asoc.state & SCTP_STATE_IN_ACCEPT_QUEUE)) {
+ /*
+ * Someone holds a reference OR the socket is unaccepted
+ * yet.
+ */
+ if ((stcb->asoc.refcnt) ||
+ (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
+ (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE)) {
+ stcb->asoc.state &= ~SCTP_STATE_IN_ACCEPT_QUEUE;
+ sctp_timer_start(SCTP_TIMER_TYPE_ASOCKILL, inp, stcb, NULL);
+ }
+ SCTP_TCB_UNLOCK(stcb);
+ if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
+ (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE))
+ /* nothing around */
+ so = NULL;
+ if (so) {
+ /* Wake any reader/writers */
+ sctp_sorwakeup(inp, so);
+ sctp_sowwakeup(inp, so);
+ }
+#ifdef SCTP_LOG_CLOSING
+ sctp_log_closing(inp, stcb, 9);
+#endif
+ /* no asoc destroyed */
+ return (0);
+ }
+#ifdef SCTP_LOG_CLOSING
+ sctp_log_closing(inp, stcb, 10);
+#endif
+ /*
+ * When I reach here, no others want to kill the assoc yet.. and I
+ * own the lock. Now its possible an abort comes in when I do the
+ * lock exchange below to grab all the locks to do the final take
+ * out. to prevent this we increment the count, which will start a
+ * timer and blow out above thus assuring us that we hold exclusive
+ * killing of the asoc. Note that after getting back the TCB lock we
+ * will go ahead and increment the counter back up and stop any
+ * timer a passing stranger may have started :-S
+ */
+ if (from_inpcbfree == SCTP_NORMAL_PROC) {
+ atomic_add_int(&stcb->asoc.refcnt, 1);
+
+ SCTP_TCB_UNLOCK(stcb);
+ SCTP_INP_INFO_WLOCK();
+ SCTP_INP_WLOCK(inp);
+ SCTP_TCB_LOCK(stcb);
+ }
+ /* Double check the GONE flag */
+ if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
+ (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE))
+ /* nothing around */
+ so = NULL;
+
+ if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
+ (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
+ /*
+ * For TCP type we need special handling when we are
+ * connected. We also include the peel'ed off ones to.
+ */
+ if (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) {
+ inp->sctp_flags &= ~SCTP_PCB_FLAGS_CONNECTED;
+ inp->sctp_flags |= SCTP_PCB_FLAGS_WAS_CONNECTED;
+ if (so) {
+ SOCK_LOCK(so);
+ if (so->so_rcv.sb_cc == 0) {
+ so->so_state &= ~(SS_ISCONNECTING |
+ SS_ISDISCONNECTING |
+ SS_ISCONFIRMING |
+ SS_ISCONNECTED);
+ }
+ socantrcvmore_locked(so);
+ sctp_sowwakeup(inp, so);
+ sctp_sorwakeup(inp, so);
+ SCTP_SOWAKEUP(so);
+ }
+ }
+ }
+ /*
+ * Make it invalid too, that way if its about to run it will abort
+ * and return.
+ */
+ /* re-increment the lock */
+ if (from_inpcbfree == SCTP_NORMAL_PROC) {
+ atomic_add_int(&stcb->asoc.refcnt, -1);
+ }
+ if (stcb->asoc.refcnt) {
+ stcb->asoc.state &= ~SCTP_STATE_IN_ACCEPT_QUEUE;
+ sctp_timer_start(SCTP_TIMER_TYPE_ASOCKILL, inp, stcb, NULL);
+ if (from_inpcbfree == SCTP_NORMAL_PROC) {
+ SCTP_INP_INFO_WUNLOCK();
+ SCTP_INP_WUNLOCK(inp);
+ }
+ SCTP_TCB_UNLOCK(stcb);
+ return (0);
+ }
+ asoc->state = 0;
+ if (inp->sctp_tcbhash) {
+ LIST_REMOVE(stcb, sctp_tcbhash);
+ }
+ if (stcb->asoc.in_asocid_hash) {
+ LIST_REMOVE(stcb, sctp_tcbasocidhash);
+ }
+ /* Now lets remove it from the list of ALL associations in the EP */
+ LIST_REMOVE(stcb, sctp_tcblist);
+ if (from_inpcbfree == SCTP_NORMAL_PROC) {
+ SCTP_INP_INCR_REF(inp);
+ SCTP_INP_WUNLOCK(inp);
+ }
+ /* pull from vtag hash */
+ LIST_REMOVE(stcb, sctp_asocs);
+ sctp_add_vtag_to_timewait(asoc->my_vtag, SCTP_BASE_SYSCTL(sctp_vtag_time_wait),
+ inp->sctp_lport, stcb->rport);
+
+ /*
+ * Now restop the timers to be sure this is paranoia at is finest!
+ */
+ (void)SCTP_OS_TIMER_STOP(&asoc->strreset_timer.timer);
+ (void)SCTP_OS_TIMER_STOP(&asoc->hb_timer.timer);
+ (void)SCTP_OS_TIMER_STOP(&asoc->dack_timer.timer);
+ (void)SCTP_OS_TIMER_STOP(&asoc->strreset_timer.timer);
+ (void)SCTP_OS_TIMER_STOP(&asoc->asconf_timer.timer);
+ (void)SCTP_OS_TIMER_STOP(&asoc->shut_guard_timer.timer);
+ (void)SCTP_OS_TIMER_STOP(&asoc->autoclose_timer.timer);
+ (void)SCTP_OS_TIMER_STOP(&asoc->delayed_event_timer.timer);
+ TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
+ (void)SCTP_OS_TIMER_STOP(&net->fr_timer.timer);
+ (void)SCTP_OS_TIMER_STOP(&net->rxt_timer.timer);
+ (void)SCTP_OS_TIMER_STOP(&net->pmtu_timer.timer);
+ }
+
+ asoc->strreset_timer.type = SCTP_TIMER_TYPE_NONE;
+ prev = NULL;
+ /*
+ * The chunk lists and such SHOULD be empty but we check them just
+ * in case.
+ */
+ /* anything on the wheel needs to be removed */
+ for (i = 0; i < asoc->streamoutcnt; i++) {
+ struct sctp_stream_out *outs;
+
+ outs = &asoc->strmout[i];
+ /* now clean up any chunks here */
+ sp = TAILQ_FIRST(&outs->outqueue);
+ while (sp) {
+ TAILQ_REMOVE(&outs->outqueue, sp, next);
+ if (sp->data) {
+ if (so) {
+ /* Still an open socket - report */
+ sctp_ulp_notify(SCTP_NOTIFY_SPECIAL_SP_FAIL, stcb,
+ SCTP_NOTIFY_DATAGRAM_UNSENT,
+ (void *)sp, SCTP_SO_LOCKED);
+ }
+ if (sp->data) {
+ sctp_m_freem(sp->data);
+ sp->data = NULL;
+ sp->tail_mbuf = NULL;
+ }
+ }
+ if (sp->net) {
+ sctp_free_remote_addr(sp->net);
+ sp->net = NULL;
+ }
+ sctp_free_spbufspace(stcb, asoc, sp);
+ if (sp->holds_key_ref)
+ sctp_auth_key_release(stcb, sp->auth_keyid);
+ /* Free the zone stuff */
+ SCTP_ZONE_FREE(SCTP_BASE_INFO(ipi_zone_strmoq), sp);
+ SCTP_DECR_STRMOQ_COUNT();
+ /* sa_ignore FREED_MEMORY */
+ sp = TAILQ_FIRST(&outs->outqueue);
+ }
+ }
+
+ /* sa_ignore FREED_MEMORY */
+ while ((liste = TAILQ_FIRST(&asoc->resetHead)) != NULL) {
+ TAILQ_REMOVE(&asoc->resetHead, liste, next_resp);
+ SCTP_FREE(liste, SCTP_M_STRESET);
+ }
+
+ sq = TAILQ_FIRST(&asoc->pending_reply_queue);
+ while (sq) {
+ TAILQ_REMOVE(&asoc->pending_reply_queue, sq, next);
+ if (sq->data) {
+ sctp_m_freem(sq->data);
+ sq->data = NULL;
+ }
+ sctp_free_remote_addr(sq->whoFrom);
+ sq->whoFrom = NULL;
+ sq->stcb = NULL;
+ /* Free the ctl entry */
+ SCTP_ZONE_FREE(SCTP_BASE_INFO(ipi_zone_readq), sq);
+ SCTP_DECR_READQ_COUNT();
+ /* sa_ignore FREED_MEMORY */
+ sq = TAILQ_FIRST(&asoc->pending_reply_queue);
+ }
+
+ chk = TAILQ_FIRST(&asoc->free_chunks);
+ while (chk) {
+ TAILQ_REMOVE(&asoc->free_chunks, chk, sctp_next);
+ if (chk->data) {
+ sctp_m_freem(chk->data);
+ chk->data = NULL;
+ }
+ if (chk->holds_key_ref)
+ sctp_auth_key_release(stcb, chk->auth_keyid);
+ ccnt++;
+ SCTP_ZONE_FREE(SCTP_BASE_INFO(ipi_zone_chunk), chk);
+ SCTP_DECR_CHK_COUNT();
+ atomic_subtract_int(&SCTP_BASE_INFO(ipi_free_chunks), 1);
+ asoc->free_chunk_cnt--;
+ /* sa_ignore FREED_MEMORY */
+ chk = TAILQ_FIRST(&asoc->free_chunks);
+ }
+ /* pending send queue SHOULD be empty */
+ if (!TAILQ_EMPTY(&asoc->send_queue)) {
+ chk = TAILQ_FIRST(&asoc->send_queue);
+ while (chk) {
+ TAILQ_REMOVE(&asoc->send_queue, chk, sctp_next);
+ if (chk->data) {
+ if (so) {
+ /* Still a socket? */
+ sctp_ulp_notify(SCTP_NOTIFY_DG_FAIL, stcb,
+ SCTP_NOTIFY_DATAGRAM_UNSENT, chk, SCTP_SO_LOCKED);
+ }
+ if (chk->data) {
+ sctp_m_freem(chk->data);
+ chk->data = NULL;
+ }
+ }
+ if (chk->holds_key_ref)
+ sctp_auth_key_release(stcb, chk->auth_keyid);
+ ccnt++;
+ if (chk->whoTo) {
+ sctp_free_remote_addr(chk->whoTo);
+ chk->whoTo = NULL;
+ }
+ SCTP_ZONE_FREE(SCTP_BASE_INFO(ipi_zone_chunk), chk);
+ SCTP_DECR_CHK_COUNT();
+ /* sa_ignore FREED_MEMORY */
+ chk = TAILQ_FIRST(&asoc->send_queue);
+ }
+ }
+/*
+ if (ccnt) {
+ printf("Freed %d from send_queue\n", ccnt);
+ ccnt = 0;
+ }
+*/
+ /* sent queue SHOULD be empty */
+ if (!TAILQ_EMPTY(&asoc->sent_queue)) {
+ chk = TAILQ_FIRST(&asoc->sent_queue);
+ while (chk) {
+ TAILQ_REMOVE(&asoc->sent_queue, chk, sctp_next);
+ if (chk->data) {
+ if (so) {
+ /* Still a socket? */
+ sctp_ulp_notify(SCTP_NOTIFY_DG_FAIL, stcb,
+ SCTP_NOTIFY_DATAGRAM_SENT, chk, SCTP_SO_LOCKED);
+ }
+ if (chk->data) {
+ sctp_m_freem(chk->data);
+ chk->data = NULL;
+ }
+ }
+ if (chk->holds_key_ref)
+ sctp_auth_key_release(stcb, chk->auth_keyid);
+ ccnt++;
+ sctp_free_remote_addr(chk->whoTo);
+ SCTP_ZONE_FREE(SCTP_BASE_INFO(ipi_zone_chunk), chk);
+ SCTP_DECR_CHK_COUNT();
+ /* sa_ignore FREED_MEMORY */
+ chk = TAILQ_FIRST(&asoc->sent_queue);
+ }
+ }
+/*
+ if (ccnt) {
+ printf("Freed %d from sent_queue\n", ccnt);
+ ccnt = 0;
+ }
+*/
+ /* control queue MAY not be empty */
+ if (!TAILQ_EMPTY(&asoc->control_send_queue)) {
+ chk = TAILQ_FIRST(&asoc->control_send_queue);
+ while (chk) {
+ TAILQ_REMOVE(&asoc->control_send_queue, chk, sctp_next);
+ if (chk->data) {
+ sctp_m_freem(chk->data);
+ chk->data = NULL;
+ }
+ if (chk->holds_key_ref)
+ sctp_auth_key_release(stcb, chk->auth_keyid);
+ ccnt++;
+ sctp_free_remote_addr(chk->whoTo);
+ SCTP_ZONE_FREE(SCTP_BASE_INFO(ipi_zone_chunk), chk);
+ SCTP_DECR_CHK_COUNT();
+ /* sa_ignore FREED_MEMORY */
+ chk = TAILQ_FIRST(&asoc->control_send_queue);
+ }
+ }
+/*
+ if (ccnt) {
+ printf("Freed %d from ctrl_queue\n", ccnt);
+ ccnt = 0;
+ }
+*/
+
+ /* ASCONF queue MAY not be empty */
+ if (!TAILQ_EMPTY(&asoc->asconf_send_queue)) {
+ chk = TAILQ_FIRST(&asoc->asconf_send_queue);
+ while (chk) {
+ TAILQ_REMOVE(&asoc->asconf_send_queue, chk, sctp_next);
+ if (chk->data) {
+ sctp_m_freem(chk->data);
+ chk->data = NULL;
+ }
+ if (chk->holds_key_ref)
+ sctp_auth_key_release(stcb, chk->auth_keyid);
+ ccnt++;
+ sctp_free_remote_addr(chk->whoTo);
+ SCTP_ZONE_FREE(SCTP_BASE_INFO(ipi_zone_chunk), chk);
+ SCTP_DECR_CHK_COUNT();
+ /* sa_ignore FREED_MEMORY */
+ chk = TAILQ_FIRST(&asoc->asconf_send_queue);
+ }
+ }
+/*
+ if (ccnt) {
+ printf("Freed %d from asconf_queue\n", ccnt);
+ ccnt = 0;
+ }
+*/
+ if (!TAILQ_EMPTY(&asoc->reasmqueue)) {
+ chk = TAILQ_FIRST(&asoc->reasmqueue);
+ while (chk) {
+ TAILQ_REMOVE(&asoc->reasmqueue, chk, sctp_next);
+ if (chk->data) {
+ sctp_m_freem(chk->data);
+ chk->data = NULL;
+ }
+ if (chk->holds_key_ref)
+ sctp_auth_key_release(stcb, chk->auth_keyid);
+ sctp_free_remote_addr(chk->whoTo);
+ ccnt++;
+ SCTP_ZONE_FREE(SCTP_BASE_INFO(ipi_zone_chunk), chk);
+ SCTP_DECR_CHK_COUNT();
+ /* sa_ignore FREED_MEMORY */
+ chk = TAILQ_FIRST(&asoc->reasmqueue);
+ }
+ }
+/*
+ if (ccnt) {
+ printf("Freed %d from reasm_queue\n", ccnt);
+ ccnt = 0;
+ }
+*/
+ if (asoc->mapping_array) {
+ SCTP_FREE(asoc->mapping_array, SCTP_M_MAP);
+ asoc->mapping_array = NULL;
+ }
+ if (asoc->nr_mapping_array) {
+ SCTP_FREE(asoc->nr_mapping_array, SCTP_M_MAP);
+ asoc->nr_mapping_array = NULL;
+ }
+ /* the stream outs */
+ if (asoc->strmout) {
+ SCTP_FREE(asoc->strmout, SCTP_M_STRMO);
+ asoc->strmout = NULL;
+ }
+ asoc->strm_realoutsize = asoc->streamoutcnt = 0;
+ if (asoc->strmin) {
+ struct sctp_queued_to_read *ctl;
+
+ for (i = 0; i < asoc->streamincnt; i++) {
+ if (!TAILQ_EMPTY(&asoc->strmin[i].inqueue)) {
+ /* We have somethings on the streamin queue */
+ ctl = TAILQ_FIRST(&asoc->strmin[i].inqueue);
+ while (ctl) {
+ TAILQ_REMOVE(&asoc->strmin[i].inqueue,
+ ctl, next);
+ sctp_free_remote_addr(ctl->whoFrom);
+ if (ctl->data) {
+ sctp_m_freem(ctl->data);
+ ctl->data = NULL;
+ }
+ /*
+ * We don't free the address here
+ * since all the net's were freed
+ * above.
+ */
+ SCTP_ZONE_FREE(SCTP_BASE_INFO(ipi_zone_readq), ctl);
+ SCTP_DECR_READQ_COUNT();
+ ctl = TAILQ_FIRST(&asoc->strmin[i].inqueue);
+ }
+ }
+ }
+ SCTP_FREE(asoc->strmin, SCTP_M_STRMI);
+ asoc->strmin = NULL;
+ }
+ asoc->streamincnt = 0;
+ while (!TAILQ_EMPTY(&asoc->nets)) {
+ /* sa_ignore FREED_MEMORY */
+ net = TAILQ_FIRST(&asoc->nets);
+ /* pull from list */
+ if ((SCTP_BASE_INFO(ipi_count_raddr) == 0) || (prev == net)) {
+#ifdef INVARIANTS
+ panic("no net's left alloc'ed, or list points to itself");
+#endif
+ break;
+ }
+ prev = net;
+ TAILQ_REMOVE(&asoc->nets, net, sctp_next);
+ sctp_free_remote_addr(net);
+ }
+
+ while (!LIST_EMPTY(&asoc->sctp_restricted_addrs)) {
+ /* sa_ignore FREED_MEMORY */
+ laddr = LIST_FIRST(&asoc->sctp_restricted_addrs);
+ sctp_remove_laddr(laddr);
+ }
+
+ /* pending asconf (address) parameters */
+ while (!TAILQ_EMPTY(&asoc->asconf_queue)) {
+ /* sa_ignore FREED_MEMORY */
+ aparam = TAILQ_FIRST(&asoc->asconf_queue);
+ TAILQ_REMOVE(&asoc->asconf_queue, aparam, next);
+ SCTP_FREE(aparam, SCTP_M_ASC_ADDR);
+ }
+ while (!TAILQ_EMPTY(&asoc->asconf_ack_sent)) {
+ /* sa_ignore FREED_MEMORY */
+ aack = TAILQ_FIRST(&asoc->asconf_ack_sent);
+ TAILQ_REMOVE(&asoc->asconf_ack_sent, aack, next);
+ if (aack->data != NULL) {
+ sctp_m_freem(aack->data);
+ }
+ SCTP_ZONE_FREE(SCTP_BASE_INFO(ipi_zone_asconf_ack), aack);
+ }
+ /* clean up auth stuff */
+ if (asoc->local_hmacs)
+ sctp_free_hmaclist(asoc->local_hmacs);
+ if (asoc->peer_hmacs)
+ sctp_free_hmaclist(asoc->peer_hmacs);
+
+ if (asoc->local_auth_chunks)
+ sctp_free_chunklist(asoc->local_auth_chunks);
+ if (asoc->peer_auth_chunks)
+ sctp_free_chunklist(asoc->peer_auth_chunks);
+
+ sctp_free_authinfo(&asoc->authinfo);
+
+ shared_key = LIST_FIRST(&asoc->shared_keys);
+ while (shared_key) {
+ LIST_REMOVE(shared_key, next);
+ sctp_free_sharedkey(shared_key);
+ /* sa_ignore FREED_MEMORY */
+ shared_key = LIST_FIRST(&asoc->shared_keys);
+ }
+
+ /* Insert new items here :> */
+
+ /* Get rid of LOCK */
+ SCTP_TCB_LOCK_DESTROY(stcb);
+ SCTP_TCB_SEND_LOCK_DESTROY(stcb);
+ if (from_inpcbfree == SCTP_NORMAL_PROC) {
+ SCTP_INP_INFO_WUNLOCK();
+ SCTP_INP_RLOCK(inp);
+ }
+#ifdef SCTP_TRACK_FREED_ASOCS
+ if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
+ /* now clean up the tasoc itself */
+ SCTP_ZONE_FREE(SCTP_BASE_INFO(ipi_zone_asoc), stcb);
+ SCTP_DECR_ASOC_COUNT();
+ } else {
+ LIST_INSERT_HEAD(&inp->sctp_asoc_free_list, stcb, sctp_tcblist);
+ }
+#else
+ SCTP_ZONE_FREE(SCTP_BASE_INFO(ipi_zone_asoc), stcb);
+ SCTP_DECR_ASOC_COUNT();
+#endif
+ if (from_inpcbfree == SCTP_NORMAL_PROC) {
+ if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
+ /*
+ * If its NOT the inp_free calling us AND sctp_close
+ * as been called, we call back...
+ */
+ SCTP_INP_RUNLOCK(inp);
+ /*
+ * This will start the kill timer (if we are the
+ * last one) since we hold an increment yet. But
+ * this is the only safe way to do this since
+ * otherwise if the socket closes at the same time
+ * we are here we might collide in the cleanup.
+ */
+ sctp_inpcb_free(inp,
+ SCTP_FREE_SHOULD_USE_GRACEFUL_CLOSE,
+ SCTP_CALLED_DIRECTLY_NOCMPSET);
+ SCTP_INP_DECR_REF(inp);
+ goto out_of;
+ } else {
+ /* The socket is still open. */
+ SCTP_INP_DECR_REF(inp);
+ }
+ }
+ if (from_inpcbfree == SCTP_NORMAL_PROC) {
+ SCTP_INP_RUNLOCK(inp);
+ }
+out_of:
+ /* destroyed the asoc */
+#ifdef SCTP_LOG_CLOSING
+ sctp_log_closing(inp, NULL, 11);
+#endif
+ return (1);
+}
+
+
+
+/*
+ * determine if a destination is "reachable" based upon the addresses bound
+ * to the current endpoint (e.g. only v4 or v6 currently bound)
+ */
+/*
+ * FIX: if we allow assoc-level bindx(), then this needs to be fixed to use
+ * assoc level v4/v6 flags, as the assoc *may* not have the same address
+ * types bound as its endpoint
+ */
+int
+sctp_destination_is_reachable(struct sctp_tcb *stcb, struct sockaddr *destaddr)
+{
+ struct sctp_inpcb *inp;
+ int answer;
+
+ /*
+ * No locks here, the TCB, in all cases is already locked and an
+ * assoc is up. There is either a INP lock by the caller applied (in
+ * asconf case when deleting an address) or NOT in the HB case,
+ * however if HB then the INP increment is up and the INP will not
+ * be removed (on top of the fact that we have a TCB lock). So we
+ * only want to read the sctp_flags, which is either bound-all or
+ * not.. no protection needed since once an assoc is up you can't be
+ * changing your binding.
+ */
+ inp = stcb->sctp_ep;
+ if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
+ /* if bound all, destination is not restricted */
+ /*
+ * RRS: Question during lock work: Is this correct? If you
+ * are bound-all you still might need to obey the V4--V6
+ * flags??? IMO this bound-all stuff needs to be removed!
+ */
+ return (1);
+ }
+ /* NOTE: all "scope" checks are done when local addresses are added */
+ if (destaddr->sa_family == AF_INET6) {
+ answer = inp->ip_inp.inp.inp_vflag & INP_IPV6;
+ } else if (destaddr->sa_family == AF_INET) {
+ answer = inp->ip_inp.inp.inp_vflag & INP_IPV4;
+ } else {
+ /* invalid family, so it's unreachable */
+ answer = 0;
+ }
+ return (answer);
+}
+
+/*
+ * update the inp_vflags on an endpoint
+ */
+static void
+sctp_update_ep_vflag(struct sctp_inpcb *inp)
+{
+ struct sctp_laddr *laddr;
+
+ /* first clear the flag */
+ inp->ip_inp.inp.inp_vflag = 0;
+ /* set the flag based on addresses on the ep list */
+ LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) {
+ if (laddr->ifa == NULL) {
+ SCTPDBG(SCTP_DEBUG_PCB1, "%s: NULL ifa\n",
+ __FUNCTION__);
+ continue;
+ }
+ if (laddr->ifa->localifa_flags & SCTP_BEING_DELETED) {
+ continue;
+ }
+ if (laddr->ifa->address.sa.sa_family == AF_INET6) {
+ inp->ip_inp.inp.inp_vflag |= INP_IPV6;
+ } else if (laddr->ifa->address.sa.sa_family == AF_INET) {
+ inp->ip_inp.inp.inp_vflag |= INP_IPV4;
+ }
+ }
+}
+
+/*
+ * Add the address to the endpoint local address list There is nothing to be
+ * done if we are bound to all addresses
+ */
+void
+sctp_add_local_addr_ep(struct sctp_inpcb *inp, struct sctp_ifa *ifa, uint32_t action)
+{
+ struct sctp_laddr *laddr;
+ int fnd, error = 0;
+
+ fnd = 0;
+
+ if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
+ /* You are already bound to all. You have it already */
+ return;
+ }
+ if (ifa->address.sa.sa_family == AF_INET6) {
+ if (ifa->localifa_flags & SCTP_ADDR_IFA_UNUSEABLE) {
+ /* Can't bind a non-useable addr. */
+ return;
+ }
+ }
+ /* first, is it already present? */
+ LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) {
+ if (laddr->ifa == ifa) {
+ fnd = 1;
+ break;
+ }
+ }
+
+ if (fnd == 0) {
+ /* Not in the ep list */
+ error = sctp_insert_laddr(&inp->sctp_addr_list, ifa, action);
+ if (error != 0)
+ return;
+ inp->laddr_count++;
+ /* update inp_vflag flags */
+ if (ifa->address.sa.sa_family == AF_INET6) {
+ inp->ip_inp.inp.inp_vflag |= INP_IPV6;
+ } else if (ifa->address.sa.sa_family == AF_INET) {
+ inp->ip_inp.inp.inp_vflag |= INP_IPV4;
+ }
+ }
+ return;
+}
+
+
+/*
+ * select a new (hopefully reachable) destination net (should only be used
+ * when we deleted an ep addr that is the only usable source address to reach
+ * the destination net)
+ */
+static void
+sctp_select_primary_destination(struct sctp_tcb *stcb)
+{
+ struct sctp_nets *net;
+
+ TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) {
+ /* for now, we'll just pick the first reachable one we find */
+ if (net->dest_state & SCTP_ADDR_UNCONFIRMED)
+ continue;
+ if (sctp_destination_is_reachable(stcb,
+ (struct sockaddr *)&net->ro._l_addr)) {
+ /* found a reachable destination */
+ stcb->asoc.primary_destination = net;
+ }
+ }
+ /* I can't there from here! ...we're gonna die shortly... */
+}
+
+
+/*
+ * Delete the address from the endpoint local address list There is nothing
+ * to be done if we are bound to all addresses
+ */
+void
+sctp_del_local_addr_ep(struct sctp_inpcb *inp, struct sctp_ifa *ifa)
+{
+ struct sctp_laddr *laddr;
+ int fnd;
+
+ fnd = 0;
+ if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
+ /* You are already bound to all. You have it already */
+ return;
+ }
+ LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) {
+ if (laddr->ifa == ifa) {
+ fnd = 1;
+ break;
+ }
+ }
+ if (fnd && (inp->laddr_count < 2)) {
+ /* can't delete unless there are at LEAST 2 addresses */
+ return;
+ }
+ if (fnd) {
+ /*
+ * clean up any use of this address go through our
+ * associations and clear any last_used_address that match
+ * this one for each assoc, see if a new primary_destination
+ * is needed
+ */
+ struct sctp_tcb *stcb;
+
+ /* clean up "next_addr_touse" */
+ if (inp->next_addr_touse == laddr)
+ /* delete this address */
+ inp->next_addr_touse = NULL;
+
+ /* clean up "last_used_address" */
+ LIST_FOREACH(stcb, &inp->sctp_asoc_list, sctp_tcblist) {
+ struct sctp_nets *net;
+
+ SCTP_TCB_LOCK(stcb);
+ if (stcb->asoc.last_used_address == laddr)
+ /* delete this address */
+ stcb->asoc.last_used_address = NULL;
+ /*
+ * Now spin through all the nets and purge any ref
+ * to laddr
+ */
+ TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) {
+ if (net->ro._s_addr &&
+ (net->ro._s_addr->ifa == laddr->ifa)) {
+ /* Yep, purge src address selected */
+ sctp_rtentry_t *rt;
+
+ /* delete this address if cached */
+ rt = net->ro.ro_rt;
+ if (rt != NULL) {
+ RTFREE(rt);
+ net->ro.ro_rt = NULL;
+ }
+ sctp_free_ifa(net->ro._s_addr);
+ net->ro._s_addr = NULL;
+ net->src_addr_selected = 0;
+ }
+ }
+ SCTP_TCB_UNLOCK(stcb);
+ } /* for each tcb */
+ /* remove it from the ep list */
+ sctp_remove_laddr(laddr);
+ inp->laddr_count--;
+ /* update inp_vflag flags */
+ sctp_update_ep_vflag(inp);
+ }
+ return;
+}
+
+/*
+ * Add the address to the TCB local address restricted list.
+ * This is a "pending" address list (eg. addresses waiting for an
+ * ASCONF-ACK response) and cannot be used as a valid source address.
+ */
+void
+sctp_add_local_addr_restricted(struct sctp_tcb *stcb, struct sctp_ifa *ifa)
+{
+ struct sctp_inpcb *inp;
+ struct sctp_laddr *laddr;
+ struct sctpladdr *list;
+
+ /*
+ * Assumes TCB is locked.. and possibly the INP. May need to
+ * confirm/fix that if we need it and is not the case.
+ */
+ list = &stcb->asoc.sctp_restricted_addrs;
+
+ inp = stcb->sctp_ep;
+ if (ifa->address.sa.sa_family == AF_INET6) {
+ if (ifa->localifa_flags & SCTP_ADDR_IFA_UNUSEABLE) {
+ /* Can't bind a non-existent addr. */
+ return;
+ }
+ }
+ /* does the address already exist? */
+ LIST_FOREACH(laddr, list, sctp_nxt_addr) {
+ if (laddr->ifa == ifa) {
+ return;
+ }
+ }
+
+ /* add to the list */
+ (void)sctp_insert_laddr(list, ifa, 0);
+ return;
+}
+
+/*
+ * insert an laddr entry with the given ifa for the desired list
+ */
+int
+sctp_insert_laddr(struct sctpladdr *list, struct sctp_ifa *ifa, uint32_t act)
+{
+ struct sctp_laddr *laddr;
+
+ laddr = SCTP_ZONE_GET(SCTP_BASE_INFO(ipi_zone_laddr), struct sctp_laddr);
+ if (laddr == NULL) {
+ /* out of memory? */
+ SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTP_PCB, EINVAL);
+ return (EINVAL);
+ }
+ SCTP_INCR_LADDR_COUNT();
+ bzero(laddr, sizeof(*laddr));
+ (void)SCTP_GETTIME_TIMEVAL(&laddr->start_time);
+ laddr->ifa = ifa;
+ laddr->action = act;
+ atomic_add_int(&ifa->refcount, 1);
+ /* insert it */
+ LIST_INSERT_HEAD(list, laddr, sctp_nxt_addr);
+
+ return (0);
+}
+
+/*
+ * Remove an laddr entry from the local address list (on an assoc)
+ */
+void
+sctp_remove_laddr(struct sctp_laddr *laddr)
+{
+
+ /* remove from the list */
+ LIST_REMOVE(laddr, sctp_nxt_addr);
+ sctp_free_ifa(laddr->ifa);
+ SCTP_ZONE_FREE(SCTP_BASE_INFO(ipi_zone_laddr), laddr);
+ SCTP_DECR_LADDR_COUNT();
+}
+
+/*
+ * Remove a local address from the TCB local address restricted list
+ */
+void
+sctp_del_local_addr_restricted(struct sctp_tcb *stcb, struct sctp_ifa *ifa)
+{
+ struct sctp_inpcb *inp;
+ struct sctp_laddr *laddr;
+
+ /*
+ * This is called by asconf work. It is assumed that a) The TCB is
+ * locked and b) The INP is locked. This is true in as much as I can
+ * trace through the entry asconf code where I did these locks.
+ * Again, the ASCONF code is a bit different in that it does lock
+ * the INP during its work often times. This must be since we don't
+ * want other proc's looking up things while what they are looking
+ * up is changing :-D
+ */
+
+ inp = stcb->sctp_ep;
+ /* if subset bound and don't allow ASCONF's, can't delete last */
+ if (((inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) == 0) &&
+ sctp_is_feature_off(inp, SCTP_PCB_FLAGS_DO_ASCONF)) {
+ if (stcb->sctp_ep->laddr_count < 2) {
+ /* can't delete last address */
+ return;
+ }
+ }
+ LIST_FOREACH(laddr, &stcb->asoc.sctp_restricted_addrs, sctp_nxt_addr) {
+ /* remove the address if it exists */
+ if (laddr->ifa == NULL)
+ continue;
+ if (laddr->ifa == ifa) {
+ sctp_remove_laddr(laddr);
+ return;
+ }
+ }
+
+ /* address not found! */
+ return;
+}
+
+/*
+ * Temporarily remove for __APPLE__ until we use the Tiger equivalents
+ */
+/* sysctl */
+static int sctp_max_number_of_assoc = SCTP_MAX_NUM_OF_ASOC;
+static int sctp_scale_up_for_address = SCTP_SCALE_FOR_ADDR;
+
+void
+sctp_pcb_init()
+{
+ /*
+ * SCTP initialization for the PCB structures should be called by
+ * the sctp_init() funciton.
+ */
+ int i;
+ struct timeval tv;
+
+ if (SCTP_BASE_VAR(sctp_pcb_initialized) != 0) {
+ /* error I was called twice */
+ return;
+ }
+ SCTP_BASE_VAR(sctp_pcb_initialized) = 1;
+
+#if defined(SCTP_LOCAL_TRACE_BUF)
+ bzero(&SCTP_BASE_SYSCTL(sctp_log), sizeof(struct sctp_log));
+#endif
+ (void)SCTP_GETTIME_TIMEVAL(&tv);
+#if defined(__FreeBSD__) && defined(SMP) && defined(SCTP_USE_PERCPU_STAT)
+ SCTP_BASE_STATS[PCPU_GET(cpuid)].sctps_discontinuitytime.tv_sec = (uint32_t) tv.tv_sec;
+ SCTP_BASE_STATS[PCPU_GET(cpuid)].sctps_discontinuitytime.tv_usec = (uint32_t) tv.tv_usec;
+#else
+ SCTP_BASE_STAT(sctps_discontinuitytime).tv_sec = (uint32_t) tv.tv_sec;
+ SCTP_BASE_STAT(sctps_discontinuitytime).tv_usec = (uint32_t) tv.tv_usec;
+#endif
+ /* init the empty list of (All) Endpoints */
+ LIST_INIT(&SCTP_BASE_INFO(listhead));
+
+
+ /* init the hash table of endpoints */
+ TUNABLE_INT_FETCH("net.inet.sctp.tcbhashsize", &SCTP_BASE_SYSCTL(sctp_hashtblsize));
+ TUNABLE_INT_FETCH("net.inet.sctp.pcbhashsize", &SCTP_BASE_SYSCTL(sctp_pcbtblsize));
+ TUNABLE_INT_FETCH("net.inet.sctp.chunkscale", &SCTP_BASE_SYSCTL(sctp_chunkscale));
+ SCTP_BASE_INFO(sctp_asochash) = SCTP_HASH_INIT((SCTP_BASE_SYSCTL(sctp_hashtblsize) * 31),
+ &SCTP_BASE_INFO(hashasocmark));
+ SCTP_BASE_INFO(sctp_ephash) = SCTP_HASH_INIT(SCTP_BASE_SYSCTL(sctp_hashtblsize),
+ &SCTP_BASE_INFO(hashmark));
+ SCTP_BASE_INFO(sctp_tcpephash) = SCTP_HASH_INIT(SCTP_BASE_SYSCTL(sctp_hashtblsize),
+ &SCTP_BASE_INFO(hashtcpmark));
+ SCTP_BASE_INFO(hashtblsize) = SCTP_BASE_SYSCTL(sctp_hashtblsize);
+
+
+ SCTP_BASE_INFO(sctp_vrfhash) = SCTP_HASH_INIT(SCTP_SIZE_OF_VRF_HASH,
+ &SCTP_BASE_INFO(hashvrfmark));
+
+ SCTP_BASE_INFO(vrf_ifn_hash) = SCTP_HASH_INIT(SCTP_VRF_IFN_HASH_SIZE,
+ &SCTP_BASE_INFO(vrf_ifn_hashmark));
+ /* init the zones */
+ /*
+ * FIX ME: Should check for NULL returns, but if it does fail we are
+ * doomed to panic anyways... add later maybe.
+ */
+ SCTP_ZONE_INIT(SCTP_BASE_INFO(ipi_zone_ep), "sctp_ep",
+ sizeof(struct sctp_inpcb), maxsockets);
+
+ SCTP_ZONE_INIT(SCTP_BASE_INFO(ipi_zone_asoc), "sctp_asoc",
+ sizeof(struct sctp_tcb), sctp_max_number_of_assoc);
+
+ SCTP_ZONE_INIT(SCTP_BASE_INFO(ipi_zone_laddr), "sctp_laddr",
+ sizeof(struct sctp_laddr),
+ (sctp_max_number_of_assoc * sctp_scale_up_for_address));
+
+ SCTP_ZONE_INIT(SCTP_BASE_INFO(ipi_zone_net), "sctp_raddr",
+ sizeof(struct sctp_nets),
+ (sctp_max_number_of_assoc * sctp_scale_up_for_address));
+
+ SCTP_ZONE_INIT(SCTP_BASE_INFO(ipi_zone_chunk), "sctp_chunk",
+ sizeof(struct sctp_tmit_chunk),
+ (sctp_max_number_of_assoc * SCTP_BASE_SYSCTL(sctp_chunkscale)));
+
+ SCTP_ZONE_INIT(SCTP_BASE_INFO(ipi_zone_readq), "sctp_readq",
+ sizeof(struct sctp_queued_to_read),
+ (sctp_max_number_of_assoc * SCTP_BASE_SYSCTL(sctp_chunkscale)));
+
+ SCTP_ZONE_INIT(SCTP_BASE_INFO(ipi_zone_strmoq), "sctp_stream_msg_out",
+ sizeof(struct sctp_stream_queue_pending),
+ (sctp_max_number_of_assoc * SCTP_BASE_SYSCTL(sctp_chunkscale)));
+
+ SCTP_ZONE_INIT(SCTP_BASE_INFO(ipi_zone_asconf), "sctp_asconf",
+ sizeof(struct sctp_asconf),
+ (sctp_max_number_of_assoc * SCTP_BASE_SYSCTL(sctp_chunkscale)));
+
+ SCTP_ZONE_INIT(SCTP_BASE_INFO(ipi_zone_asconf_ack), "sctp_asconf_ack",
+ sizeof(struct sctp_asconf_ack),
+ (sctp_max_number_of_assoc * SCTP_BASE_SYSCTL(sctp_chunkscale)));
+
+
+ /* Master Lock INIT for info structure */
+ SCTP_INP_INFO_LOCK_INIT();
+ SCTP_STATLOG_INIT_LOCK();
+
+ SCTP_IPI_COUNT_INIT();
+ SCTP_IPI_ADDR_INIT();
+#ifdef SCTP_PACKET_LOGGING
+ SCTP_IP_PKTLOG_INIT();
+#endif
+ LIST_INIT(&SCTP_BASE_INFO(addr_wq));
+
+ SCTP_WQ_ADDR_INIT();
+ /* not sure if we need all the counts */
+ SCTP_BASE_INFO(ipi_count_ep) = 0;
+ /* assoc/tcb zone info */
+ SCTP_BASE_INFO(ipi_count_asoc) = 0;
+ /* local addrlist zone info */
+ SCTP_BASE_INFO(ipi_count_laddr) = 0;
+ /* remote addrlist zone info */
+ SCTP_BASE_INFO(ipi_count_raddr) = 0;
+ /* chunk info */
+ SCTP_BASE_INFO(ipi_count_chunk) = 0;
+
+ /* socket queue zone info */
+ SCTP_BASE_INFO(ipi_count_readq) = 0;
+
+ /* stream out queue cont */
+ SCTP_BASE_INFO(ipi_count_strmoq) = 0;
+
+ SCTP_BASE_INFO(ipi_free_strmoq) = 0;
+ SCTP_BASE_INFO(ipi_free_chunks) = 0;
+
+ SCTP_OS_TIMER_INIT(&SCTP_BASE_INFO(addr_wq_timer.timer));
+
+ /* Init the TIMEWAIT list */
+ for (i = 0; i < SCTP_STACK_VTAG_HASH_SIZE; i++) {
+ LIST_INIT(&SCTP_BASE_INFO(vtag_timewait)[i]);
+ }
+
+ sctp_startup_iterator();
+
+ /*
+ * INIT the default VRF which for BSD is the only one, other O/S's
+ * may have more. But initially they must start with one and then
+ * add the VRF's as addresses are added.
+ */
+ sctp_init_vrf_list(SCTP_DEFAULT_VRF);
+
+}
+
+/*
+ * Assumes that the SCTP_BASE_INFO() lock is NOT held.
+ */
+void
+sctp_pcb_finish(void)
+{
+ struct sctp_vrflist *vrf_bucket;
+ struct sctp_vrf *vrf;
+ struct sctp_ifn *ifn;
+ struct sctp_ifa *ifa;
+ struct sctpvtaghead *chain;
+ struct sctp_tagblock *twait_block, *prev_twait_block;
+ struct sctp_laddr *wi;
+ int i;
+
+ /*
+ * Free BSD the it thread never exits but we do clean up. The only
+ * way freebsd reaches here if we have VRF's but we still add the
+ * ifdef to make it compile on old versions.
+ */
+ {
+ struct sctp_iterator *it, *nit;
+
+ SCTP_IPI_ITERATOR_WQ_LOCK();
+ it = TAILQ_FIRST(&sctp_it_ctl.iteratorhead);
+ while (it) {
+ nit = TAILQ_NEXT(it, sctp_nxt_itr);
+ if (it->vn != curvnet) {
+ it = nit;
+ continue;
+ }
+ TAILQ_REMOVE(&sctp_it_ctl.iteratorhead,
+ it, sctp_nxt_itr);
+ if (it->function_atend != NULL) {
+ (*it->function_atend) (it->pointer, it->val);
+ }
+ SCTP_FREE(it, SCTP_M_ITER);
+ it = nit;
+ }
+ SCTP_IPI_ITERATOR_WQ_UNLOCK();
+ SCTP_ITERATOR_LOCK();
+ if ((sctp_it_ctl.cur_it) &&
+ (sctp_it_ctl.cur_it->vn == curvnet)) {
+ sctp_it_ctl.iterator_flags |= SCTP_ITERATOR_STOP_CUR_IT;
+ }
+ SCTP_ITERATOR_UNLOCK();
+ }
+
+ SCTP_OS_TIMER_STOP(&SCTP_BASE_INFO(addr_wq_timer.timer));
+ SCTP_WQ_ADDR_LOCK();
+ while ((wi = LIST_FIRST(&SCTP_BASE_INFO(addr_wq))) != NULL) {
+ LIST_REMOVE(wi, sctp_nxt_addr);
+ SCTP_DECR_LADDR_COUNT();
+ SCTP_ZONE_FREE(SCTP_BASE_INFO(ipi_zone_laddr), wi);
+ }
+ SCTP_WQ_ADDR_UNLOCK();
+
+ /*
+ * free the vrf/ifn/ifa lists and hashes (be sure address monitor is
+ * destroyed first).
+ */
+ vrf_bucket = &SCTP_BASE_INFO(sctp_vrfhash)[(SCTP_DEFAULT_VRFID & SCTP_BASE_INFO(hashvrfmark))];
+ while ((vrf = LIST_FIRST(vrf_bucket)) != NULL) {
+ while ((ifn = LIST_FIRST(&vrf->ifnlist)) != NULL) {
+ while ((ifa = LIST_FIRST(&ifn->ifalist)) != NULL) {
+ /* free the ifa */
+ LIST_REMOVE(ifa, next_bucket);
+ LIST_REMOVE(ifa, next_ifa);
+ SCTP_FREE(ifa, SCTP_M_IFA);
+ }
+ /* free the ifn */
+ LIST_REMOVE(ifn, next_bucket);
+ LIST_REMOVE(ifn, next_ifn);
+ SCTP_FREE(ifn, SCTP_M_IFN);
+ }
+ SCTP_HASH_FREE(vrf->vrf_addr_hash, vrf->vrf_addr_hashmark);
+ /* free the vrf */
+ LIST_REMOVE(vrf, next_vrf);
+ SCTP_FREE(vrf, SCTP_M_VRF);
+ }
+ /* free the vrf hashes */
+ SCTP_HASH_FREE(SCTP_BASE_INFO(sctp_vrfhash), SCTP_BASE_INFO(hashvrfmark));
+ SCTP_HASH_FREE(SCTP_BASE_INFO(vrf_ifn_hash), SCTP_BASE_INFO(vrf_ifn_hashmark));
+
+ /*
+ * free the TIMEWAIT list elements malloc'd in the function
+ * sctp_add_vtag_to_timewait()...
+ */
+ for (i = 0; i < SCTP_STACK_VTAG_HASH_SIZE; i++) {
+ chain = &SCTP_BASE_INFO(vtag_timewait)[i];
+ if (!LIST_EMPTY(chain)) {
+ prev_twait_block = NULL;
+ LIST_FOREACH(twait_block, chain, sctp_nxt_tagblock) {
+ if (prev_twait_block) {
+ SCTP_FREE(prev_twait_block, SCTP_M_TIMW);
+ }
+ prev_twait_block = twait_block;
+ }
+ SCTP_FREE(prev_twait_block, SCTP_M_TIMW);
+ }
+ }
+
+ /* free the locks and mutexes */
+#ifdef SCTP_PACKET_LOGGING
+ SCTP_IP_PKTLOG_DESTROY();
+#endif
+ SCTP_IPI_ADDR_DESTROY();
+ SCTP_STATLOG_DESTROY();
+ SCTP_INP_INFO_LOCK_DESTROY();
+
+ SCTP_WQ_ADDR_DESTROY();
+
+ SCTP_ZONE_DESTROY(SCTP_BASE_INFO(ipi_zone_ep));
+ SCTP_ZONE_DESTROY(SCTP_BASE_INFO(ipi_zone_asoc));
+ SCTP_ZONE_DESTROY(SCTP_BASE_INFO(ipi_zone_laddr));
+ SCTP_ZONE_DESTROY(SCTP_BASE_INFO(ipi_zone_net));
+ SCTP_ZONE_DESTROY(SCTP_BASE_INFO(ipi_zone_chunk));
+ SCTP_ZONE_DESTROY(SCTP_BASE_INFO(ipi_zone_readq));
+ SCTP_ZONE_DESTROY(SCTP_BASE_INFO(ipi_zone_strmoq));
+ SCTP_ZONE_DESTROY(SCTP_BASE_INFO(ipi_zone_asconf));
+ SCTP_ZONE_DESTROY(SCTP_BASE_INFO(ipi_zone_asconf_ack));
+ /* Get rid of other stuff to */
+ if (SCTP_BASE_INFO(sctp_asochash) != NULL)
+ SCTP_HASH_FREE(SCTP_BASE_INFO(sctp_asochash), SCTP_BASE_INFO(hashasocmark));
+ if (SCTP_BASE_INFO(sctp_ephash) != NULL)
+ SCTP_HASH_FREE(SCTP_BASE_INFO(sctp_ephash), SCTP_BASE_INFO(hashmark));
+ if (SCTP_BASE_INFO(sctp_tcpephash) != NULL)
+ SCTP_HASH_FREE(SCTP_BASE_INFO(sctp_tcpephash), SCTP_BASE_INFO(hashtcpmark));
+
+}
+
+
+int
+sctp_load_addresses_from_init(struct sctp_tcb *stcb, struct mbuf *m,
+ int iphlen, int offset, int limit, struct sctphdr *sh,
+ struct sockaddr *altsa)
+{
+ /*
+ * grub through the INIT pulling addresses and loading them to the
+ * nets structure in the asoc. The from address in the mbuf should
+ * also be loaded (if it is not already). This routine can be called
+ * with either INIT or INIT-ACK's as long as the m points to the IP
+ * packet and the offset points to the beginning of the parameters.
+ */
+ struct sctp_inpcb *inp, *l_inp;
+ struct sctp_nets *net, *net_tmp;
+ struct ip *iph;
+ struct sctp_paramhdr *phdr, parm_buf;
+ struct sctp_tcb *stcb_tmp;
+ uint16_t ptype, plen;
+ struct sockaddr *sa;
+ struct sockaddr_storage dest_store;
+ struct sockaddr *local_sa = (struct sockaddr *)&dest_store;
+ struct sockaddr_in sin;
+ struct sockaddr_in6 sin6;
+ uint8_t random_store[SCTP_PARAM_BUFFER_SIZE];
+ struct sctp_auth_random *p_random = NULL;
+ uint16_t random_len = 0;
+ uint8_t hmacs_store[SCTP_PARAM_BUFFER_SIZE];
+ struct sctp_auth_hmac_algo *hmacs = NULL;
+ uint16_t hmacs_len = 0;
+ uint8_t saw_asconf = 0;
+ uint8_t saw_asconf_ack = 0;
+ uint8_t chunks_store[SCTP_PARAM_BUFFER_SIZE];
+ struct sctp_auth_chunk_list *chunks = NULL;
+ uint16_t num_chunks = 0;
+ sctp_key_t *new_key;
+ uint32_t keylen;
+ int got_random = 0, got_hmacs = 0, got_chklist = 0;
+
+ /* First get the destination address setup too. */
+ memset(&sin, 0, sizeof(sin));
+ memset(&sin6, 0, sizeof(sin6));
+
+ sin.sin_family = AF_INET;
+ sin.sin_len = sizeof(sin);
+ sin.sin_port = stcb->rport;
+
+ sin6.sin6_family = AF_INET6;
+ sin6.sin6_len = sizeof(struct sockaddr_in6);
+ sin6.sin6_port = stcb->rport;
+ if (altsa == NULL) {
+ iph = mtod(m, struct ip *);
+ switch (iph->ip_v) {
+ case IPVERSION:
+ {
+ /* its IPv4 */
+ struct sockaddr_in *sin_2;
+
+ sin_2 = (struct sockaddr_in *)(local_sa);
+ memset(sin_2, 0, sizeof(sin));
+ sin_2->sin_family = AF_INET;
+ sin_2->sin_len = sizeof(sin);
+ sin_2->sin_port = sh->dest_port;
+ sin_2->sin_addr.s_addr = iph->ip_dst.s_addr;
+ sin.sin_addr = iph->ip_src;
+ sa = (struct sockaddr *)&sin;
+ break;
+ }
+#ifdef INET6
+ case IPV6_VERSION >> 4:
+ {
+ /* its IPv6 */
+ struct ip6_hdr *ip6;
+ struct sockaddr_in6 *sin6_2;
+
+ ip6 = mtod(m, struct ip6_hdr *);
+ sin6_2 = (struct sockaddr_in6 *)(local_sa);
+ memset(sin6_2, 0, sizeof(sin6));
+ sin6_2->sin6_family = AF_INET6;
+ sin6_2->sin6_len = sizeof(struct sockaddr_in6);
+ sin6_2->sin6_port = sh->dest_port;
+ sin6.sin6_addr = ip6->ip6_src;
+ sa = (struct sockaddr *)&sin6;
+ break;
+ }
+#endif
+ default:
+ return (-1);
+ break;
+ }
+ } else {
+ /*
+ * For cookies we use the src address NOT from the packet
+ * but from the original INIT
+ */
+ sa = altsa;
+ }
+ /* Turn off ECN until we get through all params */
+ stcb->asoc.ecn_allowed = 0;
+ TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) {
+ /* mark all addresses that we have currently on the list */
+ net->dest_state |= SCTP_ADDR_NOT_IN_ASSOC;
+ }
+ /* does the source address already exist? if so skip it */
+ l_inp = inp = stcb->sctp_ep;
+
+ atomic_add_int(&stcb->asoc.refcnt, 1);
+ stcb_tmp = sctp_findassociation_ep_addr(&inp, sa, &net_tmp, local_sa, stcb);
+ atomic_add_int(&stcb->asoc.refcnt, -1);
+
+ if ((stcb_tmp == NULL && inp == stcb->sctp_ep) || inp == NULL) {
+ /* we must add the source address */
+ /* no scope set here since we have a tcb already. */
+ if ((sa->sa_family == AF_INET) &&
+ (stcb->asoc.ipv4_addr_legal)) {
+ if (sctp_add_remote_addr(stcb, sa, SCTP_DONOT_SETSCOPE, SCTP_LOAD_ADDR_2)) {
+ return (-1);
+ }
+ } else if ((sa->sa_family == AF_INET6) &&
+ (stcb->asoc.ipv6_addr_legal)) {
+ if (sctp_add_remote_addr(stcb, sa, SCTP_DONOT_SETSCOPE, SCTP_LOAD_ADDR_3)) {
+ return (-2);
+ }
+ }
+ } else {
+ if (net_tmp != NULL && stcb_tmp == stcb) {
+ net_tmp->dest_state &= ~SCTP_ADDR_NOT_IN_ASSOC;
+ } else if (stcb_tmp != stcb) {
+ /* It belongs to another association? */
+ if (stcb_tmp)
+ SCTP_TCB_UNLOCK(stcb_tmp);
+ return (-3);
+ }
+ }
+ if (stcb->asoc.state == 0) {
+ /* the assoc was freed? */
+ return (-4);
+ }
+ /*
+ * peer must explicitly turn this on. This may have been initialized
+ * to be "on" in order to allow local addr changes while INIT's are
+ * in flight.
+ */
+ stcb->asoc.peer_supports_asconf = 0;
+ /* now we must go through each of the params. */
+ phdr = sctp_get_next_param(m, offset, &parm_buf, sizeof(parm_buf));
+ while (phdr) {
+ ptype = ntohs(phdr->param_type);
+ plen = ntohs(phdr->param_length);
+ /*
+ * printf("ptype => %0x, plen => %d\n", (uint32_t)ptype,
+ * (int)plen);
+ */
+ if (offset + plen > limit) {
+ break;
+ }
+ if (plen == 0) {
+ break;
+ }
+ if (ptype == SCTP_IPV4_ADDRESS) {
+ if (stcb->asoc.ipv4_addr_legal) {
+ struct sctp_ipv4addr_param *p4, p4_buf;
+
+ /* ok get the v4 address and check/add */
+ phdr = sctp_get_next_param(m, offset,
+ (struct sctp_paramhdr *)&p4_buf,
+ sizeof(p4_buf));
+ if (plen != sizeof(struct sctp_ipv4addr_param) ||
+ phdr == NULL) {
+ return (-5);
+ }
+ p4 = (struct sctp_ipv4addr_param *)phdr;
+ sin.sin_addr.s_addr = p4->addr;
+ if (IN_MULTICAST(ntohl(sin.sin_addr.s_addr))) {
+ /* Skip multi-cast addresses */
+ goto next_param;
+ }
+ if ((sin.sin_addr.s_addr == INADDR_BROADCAST) ||
+ (sin.sin_addr.s_addr == INADDR_ANY)) {
+ goto next_param;
+ }
+ sa = (struct sockaddr *)&sin;
+ inp = stcb->sctp_ep;
+ atomic_add_int(&stcb->asoc.refcnt, 1);
+ stcb_tmp = sctp_findassociation_ep_addr(&inp, sa, &net,
+ local_sa, stcb);
+ atomic_add_int(&stcb->asoc.refcnt, -1);
+
+ if ((stcb_tmp == NULL && inp == stcb->sctp_ep) ||
+ inp == NULL) {
+ /* we must add the source address */
+ /*
+ * no scope set since we have a tcb
+ * already
+ */
+
+ /*
+ * we must validate the state again
+ * here
+ */
+ add_it_now:
+ if (stcb->asoc.state == 0) {
+ /* the assoc was freed? */
+ return (-7);
+ }
+ if (sctp_add_remote_addr(stcb, sa, SCTP_DONOT_SETSCOPE, SCTP_LOAD_ADDR_4)) {
+ return (-8);
+ }
+ } else if (stcb_tmp == stcb) {
+ if (stcb->asoc.state == 0) {
+ /* the assoc was freed? */
+ return (-10);
+ }
+ if (net != NULL) {
+ /* clear flag */
+ net->dest_state &=
+ ~SCTP_ADDR_NOT_IN_ASSOC;
+ }
+ } else {
+ /*
+ * strange, address is in another
+ * assoc? straighten out locks.
+ */
+ if (stcb_tmp) {
+ if (SCTP_GET_STATE(&stcb_tmp->asoc) & SCTP_STATE_COOKIE_WAIT) {
+ /*
+ * in setup state we
+ * abort this guy
+ */
+ sctp_abort_an_association(stcb_tmp->sctp_ep,
+ stcb_tmp, 1, NULL, 0);
+ goto add_it_now;
+ }
+ SCTP_TCB_UNLOCK(stcb_tmp);
+ }
+ if (stcb->asoc.state == 0) {
+ /* the assoc was freed? */
+ return (-12);
+ }
+ return (-13);
+ }
+ }
+ } else if (ptype == SCTP_IPV6_ADDRESS) {
+ if (stcb->asoc.ipv6_addr_legal) {
+ /* ok get the v6 address and check/add */
+ struct sctp_ipv6addr_param *p6, p6_buf;
+
+ phdr = sctp_get_next_param(m, offset,
+ (struct sctp_paramhdr *)&p6_buf,
+ sizeof(p6_buf));
+ if (plen != sizeof(struct sctp_ipv6addr_param) ||
+ phdr == NULL) {
+ return (-14);
+ }
+ p6 = (struct sctp_ipv6addr_param *)phdr;
+ memcpy((caddr_t)&sin6.sin6_addr, p6->addr,
+ sizeof(p6->addr));
+ if (IN6_IS_ADDR_MULTICAST(&sin6.sin6_addr)) {
+ /* Skip multi-cast addresses */
+ goto next_param;
+ }
+ if (IN6_IS_ADDR_LINKLOCAL(&sin6.sin6_addr)) {
+ /*
+ * Link local make no sense without
+ * scope
+ */
+ goto next_param;
+ }
+ sa = (struct sockaddr *)&sin6;
+ inp = stcb->sctp_ep;
+ atomic_add_int(&stcb->asoc.refcnt, 1);
+ stcb_tmp = sctp_findassociation_ep_addr(&inp, sa, &net,
+ local_sa, stcb);
+ atomic_add_int(&stcb->asoc.refcnt, -1);
+ if (stcb_tmp == NULL &&
+ (inp == stcb->sctp_ep || inp == NULL)) {
+ /*
+ * we must validate the state again
+ * here
+ */
+ add_it_now6:
+ if (stcb->asoc.state == 0) {
+ /* the assoc was freed? */
+ return (-16);
+ }
+ /*
+ * we must add the address, no scope
+ * set
+ */
+ if (sctp_add_remote_addr(stcb, sa, SCTP_DONOT_SETSCOPE, SCTP_LOAD_ADDR_5)) {
+ return (-17);
+ }
+ } else if (stcb_tmp == stcb) {
+ /*
+ * we must validate the state again
+ * here
+ */
+ if (stcb->asoc.state == 0) {
+ /* the assoc was freed? */
+ return (-19);
+ }
+ if (net != NULL) {
+ /* clear flag */
+ net->dest_state &=
+ ~SCTP_ADDR_NOT_IN_ASSOC;
+ }
+ } else {
+ /*
+ * strange, address is in another
+ * assoc? straighten out locks.
+ */
+ if (stcb_tmp)
+ if (SCTP_GET_STATE(&stcb_tmp->asoc) & SCTP_STATE_COOKIE_WAIT) {
+ /*
+ * in setup state we
+ * abort this guy
+ */
+ sctp_abort_an_association(stcb_tmp->sctp_ep,
+ stcb_tmp, 1, NULL, 0);
+ goto add_it_now6;
+ }
+ SCTP_TCB_UNLOCK(stcb_tmp);
+
+ if (stcb->asoc.state == 0) {
+ /* the assoc was freed? */
+ return (-21);
+ }
+ return (-22);
+ }
+ }
+ } else if (ptype == SCTP_ECN_CAPABLE) {
+ stcb->asoc.ecn_allowed = 1;
+ } else if (ptype == SCTP_ULP_ADAPTATION) {
+ if (stcb->asoc.state != SCTP_STATE_OPEN) {
+ struct sctp_adaptation_layer_indication ai,
+ *aip;
+
+ phdr = sctp_get_next_param(m, offset,
+ (struct sctp_paramhdr *)&ai, sizeof(ai));
+ aip = (struct sctp_adaptation_layer_indication *)phdr;
+ if (aip) {
+ stcb->asoc.peers_adaptation = ntohl(aip->indication);
+ stcb->asoc.adaptation_needed = 1;
+ }
+ }
+ } else if (ptype == SCTP_SET_PRIM_ADDR) {
+ struct sctp_asconf_addr_param lstore, *fee;
+ struct sctp_asconf_addrv4_param *fii;
+ int lptype;
+ struct sockaddr *lsa = NULL;
+
+ stcb->asoc.peer_supports_asconf = 1;
+ if (plen > sizeof(lstore)) {
+ return (-23);
+ }
+ phdr = sctp_get_next_param(m, offset,
+ (struct sctp_paramhdr *)&lstore,
+ min(plen, sizeof(lstore)));
+ if (phdr == NULL) {
+ return (-24);
+ }
+ fee = (struct sctp_asconf_addr_param *)phdr;
+ lptype = ntohs(fee->addrp.ph.param_type);
+ if (lptype == SCTP_IPV4_ADDRESS) {
+ if (plen !=
+ sizeof(struct sctp_asconf_addrv4_param)) {
+ SCTP_PRINTF("Sizeof setprim in init/init ack not %d but %d - ignored\n",
+ (int)sizeof(struct sctp_asconf_addrv4_param),
+ plen);
+ } else {
+ fii = (struct sctp_asconf_addrv4_param *)fee;
+ sin.sin_addr.s_addr = fii->addrp.addr;
+ lsa = (struct sockaddr *)&sin;
+ }
+ } else if (lptype == SCTP_IPV6_ADDRESS) {
+ if (plen !=
+ sizeof(struct sctp_asconf_addr_param)) {
+ SCTP_PRINTF("Sizeof setprim (v6) in init/init ack not %d but %d - ignored\n",
+ (int)sizeof(struct sctp_asconf_addr_param),
+ plen);
+ } else {
+ memcpy(sin6.sin6_addr.s6_addr,
+ fee->addrp.addr,
+ sizeof(fee->addrp.addr));
+ lsa = (struct sockaddr *)&sin6;
+ }
+ }
+ if (lsa) {
+ (void)sctp_set_primary_addr(stcb, sa, NULL);
+ }
+ } else if (ptype == SCTP_HAS_NAT_SUPPORT) {
+ stcb->asoc.peer_supports_nat = 1;
+ } else if (ptype == SCTP_PRSCTP_SUPPORTED) {
+ /* Peer supports pr-sctp */
+ stcb->asoc.peer_supports_prsctp = 1;
+ } else if (ptype == SCTP_SUPPORTED_CHUNK_EXT) {
+ /* A supported extension chunk */
+ struct sctp_supported_chunk_types_param *pr_supported;
+ uint8_t local_store[SCTP_PARAM_BUFFER_SIZE];
+ int num_ent, i;
+
+ phdr = sctp_get_next_param(m, offset,
+ (struct sctp_paramhdr *)&local_store, min(sizeof(local_store), plen));
+ if (phdr == NULL) {
+ return (-25);
+ }
+ stcb->asoc.peer_supports_asconf = 0;
+ stcb->asoc.peer_supports_prsctp = 0;
+ stcb->asoc.peer_supports_pktdrop = 0;
+ stcb->asoc.peer_supports_strreset = 0;
+ stcb->asoc.peer_supports_nr_sack = 0;
+ stcb->asoc.peer_supports_auth = 0;
+ pr_supported = (struct sctp_supported_chunk_types_param *)phdr;
+ num_ent = plen - sizeof(struct sctp_paramhdr);
+ for (i = 0; i < num_ent; i++) {
+ switch (pr_supported->chunk_types[i]) {
+ case SCTP_ASCONF:
+ case SCTP_ASCONF_ACK:
+ stcb->asoc.peer_supports_asconf = 1;
+ break;
+ case SCTP_FORWARD_CUM_TSN:
+ stcb->asoc.peer_supports_prsctp = 1;
+ break;
+ case SCTP_PACKET_DROPPED:
+ stcb->asoc.peer_supports_pktdrop = 1;
+ break;
+ case SCTP_NR_SELECTIVE_ACK:
+ stcb->asoc.peer_supports_nr_sack = 1;
+ break;
+ case SCTP_STREAM_RESET:
+ stcb->asoc.peer_supports_strreset = 1;
+ break;
+ case SCTP_AUTHENTICATION:
+ stcb->asoc.peer_supports_auth = 1;
+ break;
+ default:
+ /* one I have not learned yet */
+ break;
+
+ }
+ }
+ } else if (ptype == SCTP_ECN_NONCE_SUPPORTED) {
+ /* Peer supports ECN-nonce */
+ stcb->asoc.peer_supports_ecn_nonce = 1;
+ stcb->asoc.ecn_nonce_allowed = 1;
+ } else if (ptype == SCTP_RANDOM) {
+ if (plen > sizeof(random_store))
+ break;
+ if (got_random) {
+ /* already processed a RANDOM */
+ goto next_param;
+ }
+ phdr = sctp_get_next_param(m, offset,
+ (struct sctp_paramhdr *)random_store,
+ min(sizeof(random_store), plen));
+ if (phdr == NULL)
+ return (-26);
+ p_random = (struct sctp_auth_random *)phdr;
+ random_len = plen - sizeof(*p_random);
+ /* enforce the random length */
+ if (random_len != SCTP_AUTH_RANDOM_SIZE_REQUIRED) {
+ SCTPDBG(SCTP_DEBUG_AUTH1, "SCTP: invalid RANDOM len\n");
+ return (-27);
+ }
+ got_random = 1;
+ } else if (ptype == SCTP_HMAC_LIST) {
+ int num_hmacs;
+ int i;
+
+ if (plen > sizeof(hmacs_store))
+ break;
+ if (got_hmacs) {
+ /* already processed a HMAC list */
+ goto next_param;
+ }
+ phdr = sctp_get_next_param(m, offset,
+ (struct sctp_paramhdr *)hmacs_store,
+ min(plen, sizeof(hmacs_store)));
+ if (phdr == NULL)
+ return (-28);
+ hmacs = (struct sctp_auth_hmac_algo *)phdr;
+ hmacs_len = plen - sizeof(*hmacs);
+ num_hmacs = hmacs_len / sizeof(hmacs->hmac_ids[0]);
+ /* validate the hmac list */
+ if (sctp_verify_hmac_param(hmacs, num_hmacs)) {
+ return (-29);
+ }
+ if (stcb->asoc.peer_hmacs != NULL)
+ sctp_free_hmaclist(stcb->asoc.peer_hmacs);
+ stcb->asoc.peer_hmacs = sctp_alloc_hmaclist(num_hmacs);
+ if (stcb->asoc.peer_hmacs != NULL) {
+ for (i = 0; i < num_hmacs; i++) {
+ (void)sctp_auth_add_hmacid(stcb->asoc.peer_hmacs,
+ ntohs(hmacs->hmac_ids[i]));
+ }
+ }
+ got_hmacs = 1;
+ } else if (ptype == SCTP_CHUNK_LIST) {
+ int i;
+
+ if (plen > sizeof(chunks_store))
+ break;
+ if (got_chklist) {
+ /* already processed a Chunks list */
+ goto next_param;
+ }
+ phdr = sctp_get_next_param(m, offset,
+ (struct sctp_paramhdr *)chunks_store,
+ min(plen, sizeof(chunks_store)));
+ if (phdr == NULL)
+ return (-30);
+ chunks = (struct sctp_auth_chunk_list *)phdr;
+ num_chunks = plen - sizeof(*chunks);
+ if (stcb->asoc.peer_auth_chunks != NULL)
+ sctp_clear_chunklist(stcb->asoc.peer_auth_chunks);
+ else
+ stcb->asoc.peer_auth_chunks = sctp_alloc_chunklist();
+ for (i = 0; i < num_chunks; i++) {
+ (void)sctp_auth_add_chunk(chunks->chunk_types[i],
+ stcb->asoc.peer_auth_chunks);
+ /* record asconf/asconf-ack if listed */
+ if (chunks->chunk_types[i] == SCTP_ASCONF)
+ saw_asconf = 1;
+ if (chunks->chunk_types[i] == SCTP_ASCONF_ACK)
+ saw_asconf_ack = 1;
+
+ }
+ got_chklist = 1;
+ } else if ((ptype == SCTP_HEARTBEAT_INFO) ||
+ (ptype == SCTP_STATE_COOKIE) ||
+ (ptype == SCTP_UNRECOG_PARAM) ||
+ (ptype == SCTP_COOKIE_PRESERVE) ||
+ (ptype == SCTP_SUPPORTED_ADDRTYPE) ||
+ (ptype == SCTP_ADD_IP_ADDRESS) ||
+ (ptype == SCTP_DEL_IP_ADDRESS) ||
+ (ptype == SCTP_ERROR_CAUSE_IND) ||
+ (ptype == SCTP_SUCCESS_REPORT)) {
+ /* don't care */ ;
+ } else {
+ if ((ptype & 0x8000) == 0x0000) {
+ /*
+ * must stop processing the rest of the
+ * param's. Any report bits were handled
+ * with the call to
+ * sctp_arethere_unrecognized_parameters()
+ * when the INIT or INIT-ACK was first seen.
+ */
+ break;
+ }
+ }
+
+next_param:
+ offset += SCTP_SIZE32(plen);
+ if (offset >= limit) {
+ break;
+ }
+ phdr = sctp_get_next_param(m, offset, &parm_buf,
+ sizeof(parm_buf));
+ }
+ /* Now check to see if we need to purge any addresses */
+ for (net = TAILQ_FIRST(&stcb->asoc.nets); net != NULL; net = net_tmp) {
+ net_tmp = TAILQ_NEXT(net, sctp_next);
+ if ((net->dest_state & SCTP_ADDR_NOT_IN_ASSOC) ==
+ SCTP_ADDR_NOT_IN_ASSOC) {
+ /* This address has been removed from the asoc */
+ /* remove and free it */
+ stcb->asoc.numnets--;
+ TAILQ_REMOVE(&stcb->asoc.nets, net, sctp_next);
+ sctp_free_remote_addr(net);
+ if (net == stcb->asoc.primary_destination) {
+ stcb->asoc.primary_destination = NULL;
+ sctp_select_primary_destination(stcb);
+ }
+ }
+ }
+ /* validate authentication required parameters */
+ if (got_random && got_hmacs) {
+ stcb->asoc.peer_supports_auth = 1;
+ } else {
+ stcb->asoc.peer_supports_auth = 0;
+ }
+ if (!stcb->asoc.peer_supports_auth && got_chklist) {
+ /* peer does not support auth but sent a chunks list? */
+ return (-31);
+ }
+ if (!SCTP_BASE_SYSCTL(sctp_asconf_auth_nochk) && stcb->asoc.peer_supports_asconf &&
+ !stcb->asoc.peer_supports_auth) {
+ /* peer supports asconf but not auth? */
+ return (-32);
+ } else if ((stcb->asoc.peer_supports_asconf) && (stcb->asoc.peer_supports_auth) &&
+ ((saw_asconf == 0) || (saw_asconf_ack == 0))) {
+ return (-33);
+ }
+ /* concatenate the full random key */
+ keylen = sizeof(*p_random) + random_len + sizeof(*hmacs) + hmacs_len;
+ if (chunks != NULL) {
+ keylen += sizeof(*chunks) + num_chunks;
+ }
+ new_key = sctp_alloc_key(keylen);
+ if (new_key != NULL) {
+ /* copy in the RANDOM */
+ if (p_random != NULL) {
+ keylen = sizeof(*p_random) + random_len;
+ bcopy(p_random, new_key->key, keylen);
+ }
+ /* append in the AUTH chunks */
+ if (chunks != NULL) {
+ bcopy(chunks, new_key->key + keylen,
+ sizeof(*chunks) + num_chunks);
+ keylen += sizeof(*chunks) + num_chunks;
+ }
+ /* append in the HMACs */
+ if (hmacs != NULL) {
+ bcopy(hmacs, new_key->key + keylen,
+ sizeof(*hmacs) + hmacs_len);
+ }
+ } else {
+ /* failed to get memory for the key */
+ return (-34);
+ }
+ if (stcb->asoc.authinfo.peer_random != NULL)
+ sctp_free_key(stcb->asoc.authinfo.peer_random);
+ stcb->asoc.authinfo.peer_random = new_key;
+ sctp_clear_cachedkeys(stcb, stcb->asoc.authinfo.assoc_keyid);
+ sctp_clear_cachedkeys(stcb, stcb->asoc.authinfo.recv_keyid);
+
+ return (0);
+}
+
+int
+sctp_set_primary_addr(struct sctp_tcb *stcb, struct sockaddr *sa,
+ struct sctp_nets *net)
+{
+ /* make sure the requested primary address exists in the assoc */
+ if (net == NULL && sa)
+ net = sctp_findnet(stcb, sa);
+
+ if (net == NULL) {
+ /* didn't find the requested primary address! */
+ return (-1);
+ } else {
+ /* set the primary address */
+ if (net->dest_state & SCTP_ADDR_UNCONFIRMED) {
+ /* Must be confirmed, so queue to set */
+ net->dest_state |= SCTP_ADDR_REQ_PRIMARY;
+ return (0);
+ }
+ stcb->asoc.primary_destination = net;
+ net->dest_state &= ~SCTP_ADDR_WAS_PRIMARY;
+ net = TAILQ_FIRST(&stcb->asoc.nets);
+ if (net != stcb->asoc.primary_destination) {
+ /*
+ * first one on the list is NOT the primary
+ * sctp_cmpaddr() is much more efficient if the
+ * primary is the first on the list, make it so.
+ */
+ TAILQ_REMOVE(&stcb->asoc.nets, stcb->asoc.primary_destination, sctp_next);
+ TAILQ_INSERT_HEAD(&stcb->asoc.nets, stcb->asoc.primary_destination, sctp_next);
+ }
+ return (0);
+ }
+}
+
+int
+sctp_is_vtag_good(struct sctp_inpcb *inp, uint32_t tag, uint16_t lport, uint16_t rport, struct timeval *now, int save_in_twait)
+{
+ /*
+ * This function serves two purposes. It will see if a TAG can be
+ * re-used and return 1 for yes it is ok and 0 for don't use that
+ * tag. A secondary function it will do is purge out old tags that
+ * can be removed.
+ */
+ struct sctpvtaghead *chain;
+ struct sctp_tagblock *twait_block;
+ struct sctpasochead *head;
+ struct sctp_tcb *stcb;
+ int i;
+
+ SCTP_INP_INFO_RLOCK();
+ head = &SCTP_BASE_INFO(sctp_asochash)[SCTP_PCBHASH_ASOC(tag,
+ SCTP_BASE_INFO(hashasocmark))];
+ if (head == NULL) {
+ /* invalid vtag */
+ goto skip_vtag_check;
+ }
+ LIST_FOREACH(stcb, head, sctp_asocs) {
+ /*
+ * We choose not to lock anything here. TCB's can't be
+ * removed since we have the read lock, so they can't be
+ * freed on us, same thing for the INP. I may be wrong with
+ * this assumption, but we will go with it for now :-)
+ */
+ if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) {
+ continue;
+ }
+ if (stcb->asoc.my_vtag == tag) {
+ /* candidate */
+ if (stcb->rport != rport) {
+ continue;
+ }
+ if (stcb->sctp_ep->sctp_lport != lport) {
+ continue;
+ }
+ /* Its a used tag set */
+ SCTP_INP_INFO_RUNLOCK();
+ return (0);
+ }
+ }
+skip_vtag_check:
+
+ chain = &SCTP_BASE_INFO(vtag_timewait)[(tag % SCTP_STACK_VTAG_HASH_SIZE)];
+ /* Now what about timed wait ? */
+ if (!LIST_EMPTY(chain)) {
+ /*
+ * Block(s) are present, lets see if we have this tag in the
+ * list
+ */
+ LIST_FOREACH(twait_block, chain, sctp_nxt_tagblock) {
+ for (i = 0; i < SCTP_NUMBER_IN_VTAG_BLOCK; i++) {
+ if (twait_block->vtag_block[i].v_tag == 0) {
+ /* not used */
+ continue;
+ } else if ((long)twait_block->vtag_block[i].tv_sec_at_expire <
+ now->tv_sec) {
+ /* Audit expires this guy */
+ twait_block->vtag_block[i].tv_sec_at_expire = 0;
+ twait_block->vtag_block[i].v_tag = 0;
+ twait_block->vtag_block[i].lport = 0;
+ twait_block->vtag_block[i].rport = 0;
+ } else if ((twait_block->vtag_block[i].v_tag == tag) &&
+ (twait_block->vtag_block[i].lport == lport) &&
+ (twait_block->vtag_block[i].rport == rport)) {
+ /* Bad tag, sorry :< */
+ SCTP_INP_INFO_RUNLOCK();
+ return (0);
+ }
+ }
+ }
+ }
+ SCTP_INP_INFO_RUNLOCK();
+ return (1);
+}
+
+
+static sctp_assoc_t reneged_asoc_ids[256];
+static uint8_t reneged_at = 0;
+
+
+static void
+sctp_drain_mbufs(struct sctp_inpcb *inp, struct sctp_tcb *stcb)
+{
+ /*
+ * We must hunt this association for MBUF's past the cumack (i.e.
+ * out of order data that we can renege on).
+ */
+ struct sctp_association *asoc;
+ struct sctp_tmit_chunk *chk, *nchk;
+ uint32_t cumulative_tsn_p1;
+ struct sctp_queued_to_read *ctl, *nctl;
+ int cnt, strmat;
+ uint32_t gap, i;
+ int fnd = 0;
+
+ /* We look for anything larger than the cum-ack + 1 */
+
+ asoc = &stcb->asoc;
+ if (asoc->cumulative_tsn == asoc->highest_tsn_inside_map) {
+ /* none we can reneg on. */
+ return;
+ }
+ SCTP_STAT_INCR(sctps_protocol_drains_done);
+ cumulative_tsn_p1 = asoc->cumulative_tsn + 1;
+ cnt = 0;
+ /* First look in the re-assembly queue */
+ chk = TAILQ_FIRST(&asoc->reasmqueue);
+ while (chk) {
+ /* Get the next one */
+ nchk = TAILQ_NEXT(chk, sctp_next);
+ if (compare_with_wrap(chk->rec.data.TSN_seq,
+ cumulative_tsn_p1, MAX_TSN)) {
+ /* Yep it is above cum-ack */
+ cnt++;
+ SCTP_CALC_TSN_TO_GAP(gap, chk->rec.data.TSN_seq, asoc->mapping_array_base_tsn);
+ asoc->size_on_reasm_queue = sctp_sbspace_sub(asoc->size_on_reasm_queue, chk->send_size);
+ sctp_ucount_decr(asoc->cnt_on_reasm_queue);
+ SCTP_UNSET_TSN_PRESENT(asoc->mapping_array, gap);
+ TAILQ_REMOVE(&asoc->reasmqueue, chk, sctp_next);
+ if (chk->data) {
+ sctp_m_freem(chk->data);
+ chk->data = NULL;
+ }
+ sctp_free_a_chunk(stcb, chk);
+ }
+ chk = nchk;
+ }
+ /* Ok that was fun, now we will drain all the inbound streams? */
+ for (strmat = 0; strmat < asoc->streamincnt; strmat++) {
+ ctl = TAILQ_FIRST(&asoc->strmin[strmat].inqueue);
+ while (ctl) {
+ nctl = TAILQ_NEXT(ctl, next);
+ if (compare_with_wrap(ctl->sinfo_tsn,
+ cumulative_tsn_p1, MAX_TSN)) {
+ /* Yep it is above cum-ack */
+ cnt++;
+ SCTP_CALC_TSN_TO_GAP(gap, ctl->sinfo_tsn, asoc->mapping_array_base_tsn);
+ asoc->size_on_all_streams = sctp_sbspace_sub(asoc->size_on_all_streams, ctl->length);
+ sctp_ucount_decr(asoc->cnt_on_all_streams);
+ SCTP_UNSET_TSN_PRESENT(asoc->mapping_array, gap);
+ TAILQ_REMOVE(&asoc->strmin[strmat].inqueue, ctl, next);
+ if (ctl->data) {
+ sctp_m_freem(ctl->data);
+ ctl->data = NULL;
+ }
+ sctp_free_remote_addr(ctl->whoFrom);
+ SCTP_ZONE_FREE(SCTP_BASE_INFO(ipi_zone_readq), ctl);
+ SCTP_DECR_READQ_COUNT();
+ }
+ ctl = nctl;
+ }
+ }
+ if (cnt) {
+ /* We must back down to see what the new highest is */
+ for (i = asoc->highest_tsn_inside_map;
+ (compare_with_wrap(i, asoc->mapping_array_base_tsn, MAX_TSN) || (i == asoc->mapping_array_base_tsn));
+ i--) {
+ SCTP_CALC_TSN_TO_GAP(gap, i, asoc->mapping_array_base_tsn);
+ if (SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap)) {
+ asoc->highest_tsn_inside_map = i;
+ fnd = 1;
+ break;
+ }
+ }
+ if (!fnd) {
+ asoc->highest_tsn_inside_map = asoc->mapping_array_base_tsn - 1;
+ }
+ /*
+ * Question, should we go through the delivery queue? The
+ * only reason things are on here is the app not reading OR
+ * a p-d-api up. An attacker COULD send enough in to
+ * initiate the PD-API and then send a bunch of stuff to
+ * other streams... these would wind up on the delivery
+ * queue.. and then we would not get to them. But in order
+ * to do this I then have to back-track and un-deliver
+ * sequence numbers in streams.. el-yucko. I think for now
+ * we will NOT look at the delivery queue and leave it to be
+ * something to consider later. An alternative would be to
+ * abort the P-D-API with a notification and then deliver
+ * the data.... Or another method might be to keep track of
+ * how many times the situation occurs and if we see a
+ * possible attack underway just abort the association.
+ */
+#ifdef SCTP_DEBUG
+ SCTPDBG(SCTP_DEBUG_PCB1, "Freed %d chunks from reneg harvest\n", cnt);
+#endif
+ /*
+ * Now do we need to find a new
+ * asoc->highest_tsn_inside_map?
+ */
+ asoc->last_revoke_count = cnt;
+ (void)SCTP_OS_TIMER_STOP(&stcb->asoc.dack_timer.timer);
+ /* sa_ignore NO_NULL_CHK */
+ sctp_send_sack(stcb);
+ sctp_chunk_output(stcb->sctp_ep, stcb, SCTP_OUTPUT_FROM_DRAIN, SCTP_SO_NOT_LOCKED);
+ reneged_asoc_ids[reneged_at] = sctp_get_associd(stcb);
+ reneged_at++;
+ }
+ /*
+ * Another issue, in un-setting the TSN's in the mapping array we
+ * DID NOT adjust the highest_tsn marker. This will cause one of
+ * two things to occur. It may cause us to do extra work in checking
+ * for our mapping array movement. More importantly it may cause us
+ * to SACK every datagram. This may not be a bad thing though since
+ * we will recover once we get our cum-ack above and all this stuff
+ * we dumped recovered.
+ */
+}
+
+void
+sctp_drain()
+{
+ /*
+ * We must walk the PCB lists for ALL associations here. The system
+ * is LOW on MBUF's and needs help. This is where reneging will
+ * occur. We really hope this does NOT happen!
+ */
+ VNET_ITERATOR_DECL(vnet_iter);
+ VNET_LIST_RLOCK_NOSLEEP();
+ VNET_FOREACH(vnet_iter) {
+ CURVNET_SET(vnet_iter);
+ struct sctp_inpcb *inp;
+ struct sctp_tcb *stcb;
+
+ SCTP_STAT_INCR(sctps_protocol_drain_calls);
+ if (SCTP_BASE_SYSCTL(sctp_do_drain) == 0) {
+#ifdef VIMAGE
+ continue;
+#else
+ return;
+#endif
+ }
+ SCTP_INP_INFO_RLOCK();
+ LIST_FOREACH(inp, &SCTP_BASE_INFO(listhead), sctp_list) {
+ /* For each endpoint */
+ SCTP_INP_RLOCK(inp);
+ LIST_FOREACH(stcb, &inp->sctp_asoc_list, sctp_tcblist) {
+ /* For each association */
+ SCTP_TCB_LOCK(stcb);
+ sctp_drain_mbufs(inp, stcb);
+ SCTP_TCB_UNLOCK(stcb);
+ }
+ SCTP_INP_RUNLOCK(inp);
+ }
+ SCTP_INP_INFO_RUNLOCK();
+ CURVNET_RESTORE();
+ }
+ VNET_LIST_RUNLOCK_NOSLEEP();
+}
+
+/*
+ * start a new iterator
+ * iterates through all endpoints and associations based on the pcb_state
+ * flags and asoc_state. "af" (mandatory) is executed for all matching
+ * assocs and "ef" (optional) is executed when the iterator completes.
+ * "inpf" (optional) is executed for each new endpoint as it is being
+ * iterated through. inpe (optional) is called when the inp completes
+ * its way through all the stcbs.
+ */
+int
+sctp_initiate_iterator(inp_func inpf,
+ asoc_func af,
+ inp_func inpe,
+ uint32_t pcb_state,
+ uint32_t pcb_features,
+ uint32_t asoc_state,
+ void *argp,
+ uint32_t argi,
+ end_func ef,
+ struct sctp_inpcb *s_inp,
+ uint8_t chunk_output_off)
+{
+ struct sctp_iterator *it = NULL;
+
+ if (af == NULL) {
+ return (-1);
+ }
+ SCTP_MALLOC(it, struct sctp_iterator *, sizeof(struct sctp_iterator),
+ SCTP_M_ITER);
+ if (it == NULL) {
+ SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTP_PCB, ENOMEM);
+ return (ENOMEM);
+ }
+ memset(it, 0, sizeof(*it));
+ it->function_assoc = af;
+ it->function_inp = inpf;
+ if (inpf)
+ it->done_current_ep = 0;
+ else
+ it->done_current_ep = 1;
+ it->function_atend = ef;
+ it->pointer = argp;
+ it->val = argi;
+ it->pcb_flags = pcb_state;
+ it->pcb_features = pcb_features;
+ it->asoc_state = asoc_state;
+ it->function_inp_end = inpe;
+ it->no_chunk_output = chunk_output_off;
+ it->vn = curvnet;
+ if (s_inp) {
+ /* Assume lock is held here */
+ it->inp = s_inp;
+ SCTP_INP_INCR_REF(it->inp);
+ it->iterator_flags = SCTP_ITERATOR_DO_SINGLE_INP;
+ } else {
+ SCTP_INP_INFO_RLOCK();
+ it->inp = LIST_FIRST(&SCTP_BASE_INFO(listhead));
+ if (it->inp) {
+ SCTP_INP_INCR_REF(it->inp);
+ }
+ SCTP_INP_INFO_RUNLOCK();
+ it->iterator_flags = SCTP_ITERATOR_DO_ALL_INP;
+
+ }
+ SCTP_IPI_ITERATOR_WQ_LOCK();
+
+ TAILQ_INSERT_TAIL(&sctp_it_ctl.iteratorhead, it, sctp_nxt_itr);
+ if (sctp_it_ctl.iterator_running == 0) {
+ sctp_wakeup_iterator();
+ }
+ SCTP_IPI_ITERATOR_WQ_UNLOCK();
+ /* sa_ignore MEMLEAK {memory is put on the tailq for the iterator} */
+ return (0);
+}
diff --git a/rtems/freebsd/netinet/sctp_pcb.h b/rtems/freebsd/netinet/sctp_pcb.h
new file mode 100644
index 00000000..8653f3e0
--- /dev/null
+++ b/rtems/freebsd/netinet/sctp_pcb.h
@@ -0,0 +1,632 @@
+/*-
+ * Copyright (c) 2001-2007, by Cisco Systems, Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * a) Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * b) Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the distribution.
+ *
+ * c) Neither the name of Cisco Systems, Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/* $KAME: sctp_pcb.h,v 1.21 2005/07/16 01:18:47 suz Exp $ */
+
+#include <rtems/freebsd/sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#ifndef __sctp_pcb_h__
+#define __sctp_pcb_h__
+
+#include <rtems/freebsd/netinet/sctp_os.h>
+#include <rtems/freebsd/netinet/sctp.h>
+#include <rtems/freebsd/netinet/sctp_constants.h>
+#include <rtems/freebsd/netinet/sctp_sysctl.h>
+
+LIST_HEAD(sctppcbhead, sctp_inpcb);
+LIST_HEAD(sctpasochead, sctp_tcb);
+LIST_HEAD(sctpladdr, sctp_laddr);
+LIST_HEAD(sctpvtaghead, sctp_tagblock);
+LIST_HEAD(sctp_vrflist, sctp_vrf);
+LIST_HEAD(sctp_ifnlist, sctp_ifn);
+LIST_HEAD(sctp_ifalist, sctp_ifa);
+TAILQ_HEAD(sctp_readhead, sctp_queued_to_read);
+TAILQ_HEAD(sctp_streamhead, sctp_stream_queue_pending);
+
+#include <rtems/freebsd/netinet/sctp_structs.h>
+#include <rtems/freebsd/netinet/sctp_auth.h>
+
+#define SCTP_PCBHASH_ALLADDR(port, mask) (port & mask)
+#define SCTP_PCBHASH_ASOC(tag, mask) (tag & mask)
+
+struct sctp_vrf {
+ LIST_ENTRY(sctp_vrf) next_vrf;
+ struct sctp_ifalist *vrf_addr_hash;
+ struct sctp_ifnlist ifnlist;
+ uint32_t vrf_id;
+ uint32_t tbl_id_v4; /* default v4 table id */
+ uint32_t tbl_id_v6; /* default v6 table id */
+ uint32_t total_ifa_count;
+ u_long vrf_addr_hashmark;
+ uint32_t refcount;
+};
+
+struct sctp_ifn {
+ struct sctp_ifalist ifalist;
+ struct sctp_vrf *vrf;
+ LIST_ENTRY(sctp_ifn) next_ifn;
+ LIST_ENTRY(sctp_ifn) next_bucket;
+ void *ifn_p; /* never access without appropriate lock */
+ uint32_t ifn_mtu;
+ uint32_t ifn_type;
+ uint32_t ifn_index; /* shorthand way to look at ifn for reference */
+ uint32_t refcount; /* number of reference held should be >=
+ * ifa_count */
+ uint32_t ifa_count; /* IFA's we hold (in our list - ifalist) */
+ uint32_t num_v6; /* number of v6 addresses */
+ uint32_t num_v4; /* number of v4 addresses */
+ uint32_t registered_af; /* registered address family for i/f events */
+ char ifn_name[SCTP_IFNAMSIZ];
+};
+
+/* SCTP local IFA flags */
+#define SCTP_ADDR_VALID 0x00000001 /* its up and active */
+#define SCTP_BEING_DELETED 0x00000002 /* being deleted, when
+ * refcount = 0. Note that it
+ * is pulled from the ifn list
+ * and ifa_p is nulled right
+ * away but it cannot be freed
+ * until the last *net
+ * pointing to it is deleted. */
+#define SCTP_ADDR_DEFER_USE 0x00000004 /* Hold off using this one */
+#define SCTP_ADDR_IFA_UNUSEABLE 0x00000008
+
+struct sctp_ifa {
+ LIST_ENTRY(sctp_ifa) next_ifa;
+ LIST_ENTRY(sctp_ifa) next_bucket;
+ struct sctp_ifn *ifn_p; /* back pointer to parent ifn */
+ void *ifa; /* pointer to ifa, needed for flag update for
+ * that we MUST lock appropriate locks. This
+ * is for V6. */
+ union sctp_sockstore address;
+ uint32_t refcount; /* number of folks refering to this */
+ uint32_t flags;
+ uint32_t localifa_flags;
+ uint32_t vrf_id; /* vrf_id of this addr (for deleting) */
+ uint8_t src_is_loop;
+ uint8_t src_is_priv;
+ uint8_t src_is_glob;
+ uint8_t resv;
+};
+
+struct sctp_laddr {
+ LIST_ENTRY(sctp_laddr) sctp_nxt_addr; /* next in list */
+ struct sctp_ifa *ifa;
+ uint32_t action; /* Used during asconf and adding if no-zero
+ * src-addr selection will not consider this
+ * address. */
+ struct timeval start_time; /* time when this address was created */
+};
+
+struct sctp_block_entry {
+ int error;
+};
+
+struct sctp_timewait {
+ uint32_t tv_sec_at_expire; /* the seconds from boot to expire */
+ uint32_t v_tag; /* the vtag that can not be reused */
+ uint16_t lport; /* the local port used in vtag */
+ uint16_t rport; /* the remote port used in vtag */
+};
+
+struct sctp_tagblock {
+ LIST_ENTRY(sctp_tagblock) sctp_nxt_tagblock;
+ struct sctp_timewait vtag_block[SCTP_NUMBER_IN_VTAG_BLOCK];
+};
+
+
+struct sctp_epinfo {
+ struct socket *udp_tun_socket;
+ struct sctpasochead *sctp_asochash;
+ u_long hashasocmark;
+
+ struct sctppcbhead *sctp_ephash;
+ u_long hashmark;
+
+ /*-
+ * The TCP model represents a substantial overhead in that we get an
+ * additional hash table to keep explicit connections in. The
+ * listening TCP endpoint will exist in the usual ephash above and
+ * accept only INIT's. It will be incapable of sending off an INIT.
+ * When a dg arrives we must look in the normal ephash. If we find a
+ * TCP endpoint that will tell us to go to the specific endpoint
+ * hash and re-hash to find the right assoc/socket. If we find a UDP
+ * model socket we then must complete the lookup. If this fails,
+ * i.e. no association can be found then we must continue to see if
+ * a sctp_peeloff()'d socket is in the tcpephash (a spun off socket
+ * acts like a TCP model connected socket).
+ */
+ struct sctppcbhead *sctp_tcpephash;
+ u_long hashtcpmark;
+ uint32_t hashtblsize;
+
+ struct sctp_vrflist *sctp_vrfhash;
+ u_long hashvrfmark;
+
+ struct sctp_ifnlist *vrf_ifn_hash;
+ u_long vrf_ifn_hashmark;
+
+ struct sctppcbhead listhead;
+ struct sctpladdr addr_wq;
+
+ /* ep zone info */
+ sctp_zone_t ipi_zone_ep;
+ sctp_zone_t ipi_zone_asoc;
+ sctp_zone_t ipi_zone_laddr;
+ sctp_zone_t ipi_zone_net;
+ sctp_zone_t ipi_zone_chunk;
+ sctp_zone_t ipi_zone_readq;
+ sctp_zone_t ipi_zone_strmoq;
+ sctp_zone_t ipi_zone_asconf;
+ sctp_zone_t ipi_zone_asconf_ack;
+
+ struct rwlock ipi_ep_mtx;
+ struct mtx ipi_iterator_wq_mtx;
+ struct rwlock ipi_addr_mtx;
+ struct mtx ipi_pktlog_mtx;
+ struct mtx wq_addr_mtx;
+ uint32_t ipi_count_ep;
+
+ /* assoc/tcb zone info */
+ uint32_t ipi_count_asoc;
+
+ /* local addrlist zone info */
+ uint32_t ipi_count_laddr;
+
+ /* remote addrlist zone info */
+ uint32_t ipi_count_raddr;
+
+ /* chunk structure list for output */
+ uint32_t ipi_count_chunk;
+
+ /* socket queue zone info */
+ uint32_t ipi_count_readq;
+
+ /* socket queue zone info */
+ uint32_t ipi_count_strmoq;
+
+ /* Number of vrfs */
+ uint32_t ipi_count_vrfs;
+
+ /* Number of ifns */
+ uint32_t ipi_count_ifns;
+
+ /* Number of ifas */
+ uint32_t ipi_count_ifas;
+
+ /* system wide number of free chunks hanging around */
+ uint32_t ipi_free_chunks;
+ uint32_t ipi_free_strmoq;
+
+ struct sctpvtaghead vtag_timewait[SCTP_STACK_VTAG_HASH_SIZE];
+
+ /* address work queue handling */
+ struct sctp_timer addr_wq_timer;
+
+};
+
+
+struct sctp_base_info {
+ /*
+ * All static structures that anchor the system must be here.
+ */
+ struct sctp_epinfo sctppcbinfo;
+#if defined(__FreeBSD__) && defined(SMP) && defined(SCTP_USE_PERCPU_STAT)
+ struct sctpstat sctpstat[MAXCPU];
+#else
+ struct sctpstat sctpstat;
+#endif
+ struct sctp_sysctl sctpsysctl;
+ uint8_t first_time;
+ char sctp_pcb_initialized;
+#if defined(SCTP_PACKET_LOGGING)
+ int packet_log_writers;
+ int packet_log_end;
+ uint8_t packet_log_buffer[SCTP_PACKET_LOG_SIZE];
+#endif
+};
+
+/*-
+ * Here we have all the relevant information for each SCTP entity created. We
+ * will need to modify this as approprate. We also need to figure out how to
+ * access /dev/random.
+ */
+struct sctp_pcb {
+ unsigned int time_of_secret_change; /* number of seconds from
+ * timeval.tv_sec */
+ uint32_t secret_key[SCTP_HOW_MANY_SECRETS][SCTP_NUMBER_OF_SECRETS];
+ unsigned int size_of_a_cookie;
+
+ unsigned int sctp_timeoutticks[SCTP_NUM_TMRS];
+ unsigned int sctp_minrto;
+ unsigned int sctp_maxrto;
+ unsigned int initial_rto;
+ int initial_init_rto_max;
+
+ unsigned int sctp_sack_freq;
+ uint32_t sctp_sws_sender;
+ uint32_t sctp_sws_receiver;
+
+ uint32_t sctp_default_cc_module;
+ /* authentication related fields */
+ struct sctp_keyhead shared_keys;
+ sctp_auth_chklist_t *local_auth_chunks;
+ sctp_hmaclist_t *local_hmacs;
+ uint16_t default_keyid;
+
+ /* various thresholds */
+ /* Max times I will init at a guy */
+ uint16_t max_init_times;
+
+ /* Max times I will send before we consider someone dead */
+ uint16_t max_send_times;
+
+ uint16_t def_net_failure;
+
+ /* number of streams to pre-open on a association */
+ uint16_t pre_open_stream_count;
+ uint16_t max_open_streams_intome;
+
+ /* random number generator */
+ uint32_t random_counter;
+ uint8_t random_numbers[SCTP_SIGNATURE_ALOC_SIZE];
+ uint8_t random_store[SCTP_SIGNATURE_ALOC_SIZE];
+
+ /*
+ * This timer is kept running per endpoint. When it fires it will
+ * change the secret key. The default is once a hour
+ */
+ struct sctp_timer signature_change;
+
+ /* Zero copy full buffer timer */
+ struct sctp_timer zero_copy_timer;
+ /* Zero copy app to transport (sendq) read repulse timer */
+ struct sctp_timer zero_copy_sendq_timer;
+ uint32_t def_cookie_life;
+ /* defaults to 0 */
+ int auto_close_time;
+ uint32_t initial_sequence_debug;
+ uint32_t adaptation_layer_indicator;
+ uint32_t store_at;
+ uint8_t max_burst;
+ char current_secret_number;
+ char last_secret_number;
+};
+
+#ifndef SCTP_ALIGNMENT
+#define SCTP_ALIGNMENT 32
+#endif
+
+#ifndef SCTP_ALIGNM1
+#define SCTP_ALIGNM1 (SCTP_ALIGNMENT-1)
+#endif
+
+#define sctp_lport ip_inp.inp.inp_lport
+
+struct sctp_pcbtsn_rlog {
+ uint32_t vtag;
+ uint16_t strm;
+ uint16_t seq;
+ uint16_t sz;
+ uint16_t flgs;
+};
+
+#define SCTP_READ_LOG_SIZE 135 /* we choose the number to make a pcb a page */
+
+
+struct sctp_inpcb {
+ /*-
+ * put an inpcb in front of it all, kind of a waste but we need to
+ * for compatability with all the other stuff.
+ */
+ union {
+ struct inpcb inp;
+ char align[(sizeof(struct in6pcb) + SCTP_ALIGNM1) &
+ ~SCTP_ALIGNM1];
+ } ip_inp;
+
+
+ /* Socket buffer lock protects read_queue and of course sb_cc */
+ struct sctp_readhead read_queue;
+
+ LIST_ENTRY(sctp_inpcb) sctp_list; /* lists all endpoints */
+ /* hash of all endpoints for model */
+ LIST_ENTRY(sctp_inpcb) sctp_hash;
+ /* count of local addresses bound, 0 if bound all */
+ int laddr_count;
+
+ /* list of addrs in use by the EP, NULL if bound-all */
+ struct sctpladdr sctp_addr_list;
+ /*
+ * used for source address selection rotation when we are subset
+ * bound
+ */
+ struct sctp_laddr *next_addr_touse;
+
+ /* back pointer to our socket */
+ struct socket *sctp_socket;
+ uint32_t sctp_flags; /* INP state flag set */
+ uint32_t sctp_features; /* Feature flags */
+ uint32_t sctp_mobility_features; /* Mobility Feature flags */
+ struct sctp_pcb sctp_ep;/* SCTP ep data */
+ /* head of the hash of all associations */
+ struct sctpasochead *sctp_tcbhash;
+ u_long sctp_hashmark;
+ /* head of the list of all associations */
+ struct sctpasochead sctp_asoc_list;
+#ifdef SCTP_TRACK_FREED_ASOCS
+ struct sctpasochead sctp_asoc_free_list;
+#endif
+ struct sctp_iterator *inp_starting_point_for_iterator;
+ uint32_t sctp_frag_point;
+ uint32_t partial_delivery_point;
+ uint32_t sctp_context;
+ uint32_t sctp_cmt_on_off;
+ struct sctp_nonpad_sndrcvinfo def_send;
+ /*-
+ * These three are here for the sosend_dgram
+ * (pkt, pkt_last and control).
+ * routine. However, I don't think anyone in
+ * the current FreeBSD kernel calls this. So
+ * they are candidates with sctp_sendm for
+ * de-supporting.
+ */
+ struct mbuf *pkt, *pkt_last;
+ struct mbuf *control;
+ struct mtx inp_mtx;
+ struct mtx inp_create_mtx;
+ struct mtx inp_rdata_mtx;
+ int32_t refcount;
+ uint32_t def_vrf_id;
+ uint32_t total_sends;
+ uint32_t total_recvs;
+ uint32_t last_abort_code;
+ uint32_t total_nospaces;
+ struct sctpasochead *sctp_asocidhash;
+ u_long hashasocidmark;
+ uint32_t sctp_associd_counter;
+
+#ifdef SCTP_ASOCLOG_OF_TSNS
+ struct sctp_pcbtsn_rlog readlog[SCTP_READ_LOG_SIZE];
+ uint32_t readlog_index;
+#endif
+};
+
+struct sctp_tcb {
+ struct socket *sctp_socket; /* back pointer to socket */
+ struct sctp_inpcb *sctp_ep; /* back pointer to ep */
+ LIST_ENTRY(sctp_tcb) sctp_tcbhash; /* next link in hash
+ * table */
+ LIST_ENTRY(sctp_tcb) sctp_tcblist; /* list of all of the
+ * TCB's */
+ LIST_ENTRY(sctp_tcb) sctp_tcbasocidhash; /* next link in asocid
+ * hash table */
+ LIST_ENTRY(sctp_tcb) sctp_asocs; /* vtag hash list */
+ struct sctp_block_entry *block_entry; /* pointer locked by socket
+ * send buffer */
+ struct sctp_association asoc;
+ /*-
+ * freed_by_sorcv_sincelast is protected by the sockbuf_lock NOT the
+ * tcb_lock. Its special in this way to help avoid extra mutex calls
+ * in the reading of data.
+ */
+ uint32_t freed_by_sorcv_sincelast;
+ uint32_t total_sends;
+ uint32_t total_recvs;
+ int freed_from_where;
+ uint16_t rport; /* remote port in network format */
+ uint16_t resv;
+ struct mtx tcb_mtx;
+ struct mtx tcb_send_mtx;
+};
+
+
+
+#include <rtems/freebsd/netinet/sctp_lock_bsd.h>
+
+
+/* TODO where to put non-_KERNEL things for __Userspace__? */
+#if defined(_KERNEL) || defined(__Userspace__)
+
+/* Attention Julian, this is the extern that
+ * goes with the base info. sctp_pcb.c has
+ * the real definition.
+ */
+VNET_DECLARE(struct sctp_base_info, system_base_info);
+
+#ifdef INET6
+int SCTP6_ARE_ADDR_EQUAL(struct sockaddr_in6 *a, struct sockaddr_in6 *b);
+
+#endif
+
+void sctp_fill_pcbinfo(struct sctp_pcbinfo *);
+
+struct sctp_ifn *
+ sctp_find_ifn(void *ifn, uint32_t ifn_index);
+
+struct sctp_vrf *sctp_allocate_vrf(int vrfid);
+struct sctp_vrf *sctp_find_vrf(uint32_t vrfid);
+void sctp_free_vrf(struct sctp_vrf *vrf);
+
+/*-
+ * Change address state, can be used if
+ * O/S supports telling transports about
+ * changes to IFA/IFN's (link layer triggers).
+ * If a ifn goes down, we will do src-addr-selection
+ * and NOT use that, as a source address. This does
+ * not stop the routing system from routing out
+ * that interface, but we won't put it as a source.
+ */
+void sctp_mark_ifa_addr_down(uint32_t vrf_id, struct sockaddr *addr, const char *if_name, uint32_t ifn_index);
+void sctp_mark_ifa_addr_up(uint32_t vrf_id, struct sockaddr *addr, const char *if_name, uint32_t ifn_index);
+
+struct sctp_ifa *
+sctp_add_addr_to_vrf(uint32_t vrfid,
+ void *ifn, uint32_t ifn_index, uint32_t ifn_type,
+ const char *if_name,
+ void *ifa, struct sockaddr *addr, uint32_t ifa_flags,
+ int dynamic_add);
+
+void sctp_update_ifn_mtu(uint32_t ifn_index, uint32_t mtu);
+
+void sctp_free_ifn(struct sctp_ifn *sctp_ifnp);
+void sctp_free_ifa(struct sctp_ifa *sctp_ifap);
+
+
+void
+sctp_del_addr_from_vrf(uint32_t vrfid, struct sockaddr *addr,
+ uint32_t ifn_index, const char *if_name);
+
+
+
+struct sctp_nets *sctp_findnet(struct sctp_tcb *, struct sockaddr *);
+
+struct sctp_inpcb *sctp_pcb_findep(struct sockaddr *, int, int, uint32_t);
+
+int
+sctp_inpcb_bind(struct socket *, struct sockaddr *,
+ struct sctp_ifa *, struct thread *);
+
+struct sctp_tcb *
+sctp_findassociation_addr(struct mbuf *, int, int,
+ struct sctphdr *, struct sctp_chunkhdr *, struct sctp_inpcb **,
+ struct sctp_nets **, uint32_t vrf_id);
+
+struct sctp_tcb *
+sctp_findassociation_addr_sa(struct sockaddr *,
+ struct sockaddr *, struct sctp_inpcb **, struct sctp_nets **, int, uint32_t);
+
+void
+sctp_move_pcb_and_assoc(struct sctp_inpcb *, struct sctp_inpcb *,
+ struct sctp_tcb *);
+
+/*-
+ * For this call ep_addr, the to is the destination endpoint address of the
+ * peer (relative to outbound). The from field is only used if the TCP model
+ * is enabled and helps distingush amongst the subset bound (non-boundall).
+ * The TCP model MAY change the actual ep field, this is why it is passed.
+ */
+struct sctp_tcb *
+sctp_findassociation_ep_addr(struct sctp_inpcb **,
+ struct sockaddr *, struct sctp_nets **, struct sockaddr *,
+ struct sctp_tcb *);
+
+struct sctp_tcb *
+ sctp_findasoc_ep_asocid_locked(struct sctp_inpcb *inp, sctp_assoc_t asoc_id, int want_lock);
+
+struct sctp_tcb *
+sctp_findassociation_ep_asocid(struct sctp_inpcb *,
+ sctp_assoc_t, int);
+
+struct sctp_tcb *
+sctp_findassociation_ep_asconf(struct mbuf *, int, int,
+ struct sctphdr *, struct sctp_inpcb **, struct sctp_nets **, uint32_t vrf_id);
+
+int sctp_inpcb_alloc(struct socket *so, uint32_t vrf_id);
+
+int sctp_is_address_on_local_host(struct sockaddr *addr, uint32_t vrf_id);
+
+void sctp_inpcb_free(struct sctp_inpcb *, int, int);
+
+struct sctp_tcb *
+sctp_aloc_assoc(struct sctp_inpcb *, struct sockaddr *,
+ int *, uint32_t, uint32_t, struct thread *);
+
+int sctp_free_assoc(struct sctp_inpcb *, struct sctp_tcb *, int, int);
+
+
+void sctp_delete_from_timewait(uint32_t, uint16_t, uint16_t);
+
+int sctp_is_in_timewait(uint32_t tag, uint16_t lport, uint16_t rport);
+
+void
+ sctp_add_vtag_to_timewait(uint32_t tag, uint32_t time, uint16_t lport, uint16_t rport);
+
+void sctp_add_local_addr_ep(struct sctp_inpcb *, struct sctp_ifa *, uint32_t);
+
+int sctp_insert_laddr(struct sctpladdr *, struct sctp_ifa *, uint32_t);
+
+void sctp_remove_laddr(struct sctp_laddr *);
+
+void sctp_del_local_addr_ep(struct sctp_inpcb *, struct sctp_ifa *);
+
+int sctp_add_remote_addr(struct sctp_tcb *, struct sockaddr *, int, int);
+
+void sctp_remove_net(struct sctp_tcb *, struct sctp_nets *);
+
+int sctp_del_remote_addr(struct sctp_tcb *, struct sockaddr *);
+
+void sctp_pcb_init(void);
+
+void sctp_pcb_finish(void);
+
+void sctp_add_local_addr_restricted(struct sctp_tcb *, struct sctp_ifa *);
+void sctp_del_local_addr_restricted(struct sctp_tcb *, struct sctp_ifa *);
+
+int
+sctp_load_addresses_from_init(struct sctp_tcb *, struct mbuf *, int, int,
+ int, struct sctphdr *, struct sockaddr *);
+
+int
+sctp_set_primary_addr(struct sctp_tcb *, struct sockaddr *,
+ struct sctp_nets *);
+
+int sctp_is_vtag_good(struct sctp_inpcb *, uint32_t, uint16_t lport, uint16_t rport, struct timeval *, int);
+
+/* void sctp_drain(void); */
+
+int sctp_destination_is_reachable(struct sctp_tcb *, struct sockaddr *);
+
+int sctp_swap_inpcb_for_listen(struct sctp_inpcb *inp);
+
+/*-
+ * Null in last arg inpcb indicate run on ALL ep's. Specific inp in last arg
+ * indicates run on ONLY assoc's of the specified endpoint.
+ */
+int
+sctp_initiate_iterator(inp_func inpf,
+ asoc_func af,
+ inp_func inpe,
+ uint32_t, uint32_t,
+ uint32_t, void *,
+ uint32_t,
+ end_func ef,
+ struct sctp_inpcb *,
+ uint8_t co_off);
+
+#ifdef INVARIANTS
+void
+ sctp_validate_no_locks(struct sctp_inpcb *inp);
+
+#endif
+
+#endif /* _KERNEL */
+#endif /* !__sctp_pcb_h__ */
diff --git a/rtems/freebsd/netinet/sctp_peeloff.c b/rtems/freebsd/netinet/sctp_peeloff.c
new file mode 100644
index 00000000..f622ca6d
--- /dev/null
+++ b/rtems/freebsd/netinet/sctp_peeloff.c
@@ -0,0 +1,240 @@
+#include <rtems/freebsd/machine/rtems-bsd-config.h>
+
+/*-
+ * Copyright (c) 2001-2007, by Cisco Systems, Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * a) Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * b) Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the distribution.
+ *
+ * c) Neither the name of Cisco Systems, Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+
+/* $KAME: sctp_peeloff.c,v 1.13 2005/03/06 16:04:18 itojun Exp $ */
+
+#include <rtems/freebsd/sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+#include <rtems/freebsd/netinet/sctp_os.h>
+#include <rtems/freebsd/netinet/sctp_pcb.h>
+#include <rtems/freebsd/netinet/sctputil.h>
+#include <rtems/freebsd/netinet/sctp_var.h>
+#include <rtems/freebsd/netinet/sctp_var.h>
+#include <rtems/freebsd/netinet/sctp_sysctl.h>
+#include <rtems/freebsd/netinet/sctp.h>
+#include <rtems/freebsd/netinet/sctp_uio.h>
+#include <rtems/freebsd/netinet/sctp_peeloff.h>
+#include <rtems/freebsd/netinet/sctputil.h>
+#include <rtems/freebsd/netinet/sctp_auth.h>
+
+
+int
+sctp_can_peel_off(struct socket *head, sctp_assoc_t assoc_id)
+{
+ struct sctp_inpcb *inp;
+ struct sctp_tcb *stcb;
+ uint32_t state;
+
+ inp = (struct sctp_inpcb *)head->so_pcb;
+ if (inp == NULL) {
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_PEELOFF, EFAULT);
+ return (EFAULT);
+ }
+ stcb = sctp_findassociation_ep_asocid(inp, assoc_id, 1);
+ if (stcb == NULL) {
+ SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_PEELOFF, ENOENT);
+ return (ENOENT);
+ }
+ state = SCTP_GET_STATE((&stcb->asoc));
+ if ((state == SCTP_STATE_EMPTY) ||
+ (state == SCTP_STATE_INUSE) ||
+ (state == SCTP_STATE_COOKIE_WAIT) ||
+ (state == SCTP_STATE_COOKIE_ECHOED)) {
+ SCTP_TCB_UNLOCK(stcb);
+ SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_PEELOFF, ENOTCONN);
+ return (ENOTCONN);
+ }
+ SCTP_TCB_UNLOCK(stcb);
+ /* We are clear to peel this one off */
+ return (0);
+}
+
+int
+sctp_do_peeloff(struct socket *head, struct socket *so, sctp_assoc_t assoc_id)
+{
+ struct sctp_inpcb *inp, *n_inp;
+ struct sctp_tcb *stcb;
+ uint32_t state;
+
+ inp = (struct sctp_inpcb *)head->so_pcb;
+ if (inp == NULL) {
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_PEELOFF, EFAULT);
+ return (EFAULT);
+ }
+ stcb = sctp_findassociation_ep_asocid(inp, assoc_id, 1);
+ if (stcb == NULL) {
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_PEELOFF, ENOTCONN);
+ return (ENOTCONN);
+ }
+ state = SCTP_GET_STATE((&stcb->asoc));
+ if ((state == SCTP_STATE_EMPTY) ||
+ (state == SCTP_STATE_INUSE) ||
+ (state == SCTP_STATE_COOKIE_WAIT) ||
+ (state == SCTP_STATE_COOKIE_ECHOED)) {
+ SCTP_TCB_UNLOCK(stcb);
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_PEELOFF, ENOTCONN);
+ return (ENOTCONN);
+ }
+ n_inp = (struct sctp_inpcb *)so->so_pcb;
+ n_inp->sctp_flags = (SCTP_PCB_FLAGS_UDPTYPE |
+ SCTP_PCB_FLAGS_CONNECTED |
+ SCTP_PCB_FLAGS_IN_TCPPOOL | /* Turn on Blocking IO */
+ (SCTP_PCB_COPY_FLAGS & inp->sctp_flags));
+ n_inp->sctp_socket = so;
+ n_inp->sctp_features = inp->sctp_features;
+ n_inp->sctp_mobility_features = inp->sctp_mobility_features;
+ n_inp->sctp_frag_point = inp->sctp_frag_point;
+ n_inp->sctp_cmt_on_off = inp->sctp_cmt_on_off;
+ n_inp->partial_delivery_point = inp->partial_delivery_point;
+ n_inp->sctp_context = inp->sctp_context;
+ n_inp->inp_starting_point_for_iterator = NULL;
+ /* copy in the authentication parameters from the original endpoint */
+ if (n_inp->sctp_ep.local_hmacs)
+ sctp_free_hmaclist(n_inp->sctp_ep.local_hmacs);
+ n_inp->sctp_ep.local_hmacs =
+ sctp_copy_hmaclist(inp->sctp_ep.local_hmacs);
+ if (n_inp->sctp_ep.local_auth_chunks)
+ sctp_free_chunklist(n_inp->sctp_ep.local_auth_chunks);
+ n_inp->sctp_ep.local_auth_chunks =
+ sctp_copy_chunklist(inp->sctp_ep.local_auth_chunks);
+ (void)sctp_copy_skeylist(&inp->sctp_ep.shared_keys,
+ &n_inp->sctp_ep.shared_keys);
+ /*
+ * Now we must move it from one hash table to another and get the
+ * stcb in the right place.
+ */
+ sctp_move_pcb_and_assoc(inp, n_inp, stcb);
+ atomic_add_int(&stcb->asoc.refcnt, 1);
+ SCTP_TCB_UNLOCK(stcb);
+
+ sctp_pull_off_control_to_new_inp(inp, n_inp, stcb, SBL_WAIT);
+ atomic_subtract_int(&stcb->asoc.refcnt, 1);
+
+ return (0);
+}
+
+
+struct socket *
+sctp_get_peeloff(struct socket *head, sctp_assoc_t assoc_id, int *error)
+{
+ struct socket *newso;
+ struct sctp_inpcb *inp, *n_inp;
+ struct sctp_tcb *stcb;
+
+ SCTPDBG(SCTP_DEBUG_PEEL1, "SCTP peel-off called\n");
+ inp = (struct sctp_inpcb *)head->so_pcb;
+ if (inp == NULL) {
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_PEELOFF, EFAULT);
+ *error = EFAULT;
+ return (NULL);
+ }
+ stcb = sctp_findassociation_ep_asocid(inp, assoc_id, 1);
+ if (stcb == NULL) {
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_PEELOFF, ENOTCONN);
+ *error = ENOTCONN;
+ return (NULL);
+ }
+ atomic_add_int(&stcb->asoc.refcnt, 1);
+ SCTP_TCB_UNLOCK(stcb);
+ newso = sonewconn(head, SS_ISCONNECTED
+ );
+ if (newso == NULL) {
+ SCTPDBG(SCTP_DEBUG_PEEL1, "sctp_peeloff:sonewconn failed\n");
+ SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_PEELOFF, ENOMEM);
+ *error = ENOMEM;
+ atomic_subtract_int(&stcb->asoc.refcnt, 1);
+ return (NULL);
+
+ }
+ SCTP_TCB_LOCK(stcb);
+ atomic_subtract_int(&stcb->asoc.refcnt, 1);
+ n_inp = (struct sctp_inpcb *)newso->so_pcb;
+ SOCK_LOCK(head);
+ n_inp->sctp_flags = (SCTP_PCB_FLAGS_UDPTYPE |
+ SCTP_PCB_FLAGS_CONNECTED |
+ SCTP_PCB_FLAGS_IN_TCPPOOL | /* Turn on Blocking IO */
+ (SCTP_PCB_COPY_FLAGS & inp->sctp_flags));
+ n_inp->sctp_features = inp->sctp_features;
+ n_inp->sctp_frag_point = inp->sctp_frag_point;
+ n_inp->sctp_cmt_on_off = inp->sctp_cmt_on_off;
+ n_inp->partial_delivery_point = inp->partial_delivery_point;
+ n_inp->sctp_context = inp->sctp_context;
+ n_inp->inp_starting_point_for_iterator = NULL;
+
+ /* copy in the authentication parameters from the original endpoint */
+ if (n_inp->sctp_ep.local_hmacs)
+ sctp_free_hmaclist(n_inp->sctp_ep.local_hmacs);
+ n_inp->sctp_ep.local_hmacs =
+ sctp_copy_hmaclist(inp->sctp_ep.local_hmacs);
+ if (n_inp->sctp_ep.local_auth_chunks)
+ sctp_free_chunklist(n_inp->sctp_ep.local_auth_chunks);
+ n_inp->sctp_ep.local_auth_chunks =
+ sctp_copy_chunklist(inp->sctp_ep.local_auth_chunks);
+ (void)sctp_copy_skeylist(&inp->sctp_ep.shared_keys,
+ &n_inp->sctp_ep.shared_keys);
+
+ n_inp->sctp_socket = newso;
+ if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_AUTOCLOSE)) {
+ sctp_feature_off(n_inp, SCTP_PCB_FLAGS_AUTOCLOSE);
+ n_inp->sctp_ep.auto_close_time = 0;
+ sctp_timer_stop(SCTP_TIMER_TYPE_AUTOCLOSE, n_inp, stcb, NULL,
+ SCTP_FROM_SCTP_PEELOFF + SCTP_LOC_1);
+ }
+ /* Turn off any non-blocking semantic. */
+ SCTP_CLEAR_SO_NBIO(newso);
+ newso->so_state |= SS_ISCONNECTED;
+ /* We remove it right away */
+
+#ifdef SCTP_LOCK_LOGGING
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOCK_LOGGING_ENABLE) {
+ sctp_log_lock(inp, (struct sctp_tcb *)NULL, SCTP_LOG_LOCK_SOCK);
+ }
+#endif
+ TAILQ_REMOVE(&head->so_comp, newso, so_list);
+ head->so_qlen--;
+ SOCK_UNLOCK(head);
+ /*
+ * Now we must move it from one hash table to another and get the
+ * stcb in the right place.
+ */
+ sctp_move_pcb_and_assoc(inp, n_inp, stcb);
+ atomic_add_int(&stcb->asoc.refcnt, 1);
+ SCTP_TCB_UNLOCK(stcb);
+ /*
+ * And now the final hack. We move data in the pending side i.e.
+ * head to the new socket buffer. Let the GRUBBING begin :-0
+ */
+ sctp_pull_off_control_to_new_inp(inp, n_inp, stcb, SBL_WAIT);
+ atomic_subtract_int(&stcb->asoc.refcnt, 1);
+ return (newso);
+}
diff --git a/rtems/freebsd/netinet/sctp_peeloff.h b/rtems/freebsd/netinet/sctp_peeloff.h
new file mode 100644
index 00000000..28c5c30b
--- /dev/null
+++ b/rtems/freebsd/netinet/sctp_peeloff.h
@@ -0,0 +1,52 @@
+/*-
+ * Copyright (c) 2001-2007, by Cisco Systems, Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * a) Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * b) Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the distribution.
+ *
+ * c) Neither the name of Cisco Systems, Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/* $KAME: sctp_peeloff.h,v 1.6 2005/03/06 16:04:18 itojun Exp $ */
+
+#include <rtems/freebsd/sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#ifndef __sctp_peeloff_h__
+#define __sctp_peeloff_h__
+
+
+
+
+#if defined(_KERNEL)
+
+int sctp_can_peel_off(struct socket *, sctp_assoc_t);
+int sctp_do_peeloff(struct socket *, struct socket *, sctp_assoc_t);
+struct socket *sctp_get_peeloff(struct socket *, sctp_assoc_t, int *);
+
+
+
+#endif /* _KERNEL */
+
+#endif
diff --git a/rtems/freebsd/netinet/sctp_structs.h b/rtems/freebsd/netinet/sctp_structs.h
new file mode 100644
index 00000000..18285053
--- /dev/null
+++ b/rtems/freebsd/netinet/sctp_structs.h
@@ -0,0 +1,1094 @@
+/*-
+ * Copyright (c) 2001-2008, by Cisco Systems, Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * a) Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * b) Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the distribution.
+ *
+ * c) Neither the name of Cisco Systems, Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/* $KAME: sctp_structs.h,v 1.13 2005/03/06 16:04:18 itojun Exp $ */
+
+#include <rtems/freebsd/sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#ifndef __sctp_structs_h__
+#define __sctp_structs_h__
+
+#include <rtems/freebsd/netinet/sctp_os.h>
+#include <rtems/freebsd/netinet/sctp_header.h>
+#include <rtems/freebsd/netinet/sctp_auth.h>
+
+struct sctp_timer {
+ sctp_os_timer_t timer;
+
+ int type;
+ /*
+ * Depending on the timer type these will be setup and cast with the
+ * appropriate entity.
+ */
+ void *ep;
+ void *tcb;
+ void *net;
+ void *vnet;
+
+ /* for sanity checking */
+ void *self;
+ uint32_t ticks;
+ uint32_t stopped_from;
+};
+
+
+struct sctp_foo_stuff {
+ struct sctp_inpcb *inp;
+ uint32_t lineno;
+ uint32_t ticks;
+ int updown;
+};
+
+
+/*
+ * This is the information we track on each interface that we know about from
+ * the distant end.
+ */
+TAILQ_HEAD(sctpnetlisthead, sctp_nets);
+
+struct sctp_stream_reset_list {
+ TAILQ_ENTRY(sctp_stream_reset_list) next_resp;
+ uint32_t tsn;
+ int number_entries;
+ struct sctp_stream_reset_out_request req;
+};
+
+TAILQ_HEAD(sctp_resethead, sctp_stream_reset_list);
+
+/*
+ * Users of the iterator need to malloc a iterator with a call to
+ * sctp_initiate_iterator(inp_func, assoc_func, inp_func, pcb_flags, pcb_features,
+ * asoc_state, void-ptr-arg, uint32-arg, end_func, inp);
+ *
+ * Use the following two defines if you don't care what pcb flags are on the EP
+ * and/or you don't care what state the association is in.
+ *
+ * Note that if you specify an INP as the last argument then ONLY each
+ * association of that single INP will be executed upon. Note that the pcb
+ * flags STILL apply so if the inp you specify has different pcb_flags then
+ * what you put in pcb_flags nothing will happen. use SCTP_PCB_ANY_FLAGS to
+ * assure the inp you specify gets treated.
+ */
+#define SCTP_PCB_ANY_FLAGS 0x00000000
+#define SCTP_PCB_ANY_FEATURES 0x00000000
+#define SCTP_ASOC_ANY_STATE 0x00000000
+
+typedef void (*asoc_func) (struct sctp_inpcb *, struct sctp_tcb *, void *ptr,
+ uint32_t val);
+typedef int (*inp_func) (struct sctp_inpcb *, void *ptr, uint32_t val);
+typedef void (*end_func) (void *ptr, uint32_t val);
+
+struct sctp_iterator {
+ TAILQ_ENTRY(sctp_iterator) sctp_nxt_itr;
+ struct vnet *vn;
+ struct sctp_timer tmr;
+ struct sctp_inpcb *inp; /* current endpoint */
+ struct sctp_tcb *stcb; /* current* assoc */
+ struct sctp_inpcb *next_inp; /* special hook to skip to */
+ asoc_func function_assoc; /* per assoc function */
+ inp_func function_inp; /* per endpoint function */
+ inp_func function_inp_end; /* end INP function */
+ end_func function_atend;/* iterator completion function */
+ void *pointer; /* pointer for apply func to use */
+ uint32_t val; /* value for apply func to use */
+ uint32_t pcb_flags; /* endpoint flags being checked */
+ uint32_t pcb_features; /* endpoint features being checked */
+ uint32_t asoc_state; /* assoc state being checked */
+ uint32_t iterator_flags;
+ uint8_t no_chunk_output;
+ uint8_t done_current_ep;
+};
+
+/* iterator_flags values */
+#define SCTP_ITERATOR_DO_ALL_INP 0x00000001
+#define SCTP_ITERATOR_DO_SINGLE_INP 0x00000002
+
+
+TAILQ_HEAD(sctpiterators, sctp_iterator);
+
+struct sctp_copy_all {
+ struct sctp_inpcb *inp; /* ep */
+ struct mbuf *m;
+ struct sctp_sndrcvinfo sndrcv;
+ int sndlen;
+ int cnt_sent;
+ int cnt_failed;
+};
+
+struct sctp_asconf_iterator {
+ struct sctpladdr list_of_work;
+ int cnt;
+};
+
+struct iterator_control {
+ struct mtx ipi_iterator_wq_mtx;
+ struct mtx it_mtx;
+ SCTP_PROCESS_STRUCT thread_proc;
+ struct sctpiterators iteratorhead;
+ struct sctp_iterator *cur_it;
+ uint32_t iterator_running;
+ uint32_t iterator_flags;
+};
+
+#define SCTP_ITERATOR_MUST_EXIT 0x00000001
+#define SCTP_ITERATOR_STOP_CUR_IT 0x00000002
+#define SCTP_ITERATOR_STOP_CUR_INP 0x00000004
+
+struct sctp_net_route {
+ sctp_rtentry_t *ro_rt;
+ void *ro_lle;
+ union sctp_sockstore _l_addr; /* remote peer addr */
+ struct sctp_ifa *_s_addr; /* our selected src addr */
+};
+
+struct htcp {
+ uint16_t alpha; /* Fixed point arith, << 7 */
+ uint8_t beta; /* Fixed point arith, << 7 */
+ uint8_t modeswitch; /* Delay modeswitch until we had at least one
+ * congestion event */
+ uint32_t last_cong; /* Time since last congestion event end */
+ uint32_t undo_last_cong;
+ uint16_t bytes_acked;
+ uint32_t bytecount;
+ uint32_t minRTT;
+ uint32_t maxRTT;
+
+ uint32_t undo_maxRTT;
+ uint32_t undo_old_maxB;
+
+ /* Bandwidth estimation */
+ uint32_t minB;
+ uint32_t maxB;
+ uint32_t old_maxB;
+ uint32_t Bi;
+ uint32_t lasttime;
+};
+
+
+struct sctp_nets {
+ TAILQ_ENTRY(sctp_nets) sctp_next; /* next link */
+
+ /*
+ * Things on the top half may be able to be split into a common
+ * structure shared by all.
+ */
+ struct sctp_timer pmtu_timer;
+
+ /*
+ * The following two in combination equate to a route entry for v6
+ * or v4.
+ */
+ struct sctp_net_route ro;
+
+ /* mtu discovered so far */
+ uint32_t mtu;
+ uint32_t ssthresh; /* not sure about this one for split */
+
+ /* smoothed average things for RTT and RTO itself */
+ int lastsa;
+ int lastsv;
+ int rtt; /* last measured rtt value in ms */
+ unsigned int RTO;
+
+ /* This is used for SHUTDOWN/SHUTDOWN-ACK/SEND or INIT timers */
+ struct sctp_timer rxt_timer;
+ struct sctp_timer fr_timer; /* for early fr */
+
+ /* last time in seconds I sent to it */
+ struct timeval last_sent_time;
+ int ref_count;
+
+ /* Congestion stats per destination */
+ /*
+ * flight size variables and such, sorry Vern, I could not avoid
+ * this if I wanted performance :>
+ */
+ uint32_t flight_size;
+ uint32_t cwnd; /* actual cwnd */
+ uint32_t prev_cwnd; /* cwnd before any processing */
+ uint32_t partial_bytes_acked; /* in CA tracks when to incr a MTU */
+ uint32_t prev_rtt;
+ /* tracking variables to avoid the aloc/free in sack processing */
+ unsigned int net_ack;
+ unsigned int net_ack2;
+
+ /*
+ * JRS - 5/8/07 - Variable to track last time a destination was
+ * active for CMT PF
+ */
+ uint32_t last_active;
+
+ /*
+ * CMT variables (iyengar@cis.udel.edu)
+ */
+ uint32_t this_sack_highest_newack; /* tracks highest TSN newly
+ * acked for a given dest in
+ * the current SACK. Used in
+ * SFR and HTNA algos */
+ uint32_t pseudo_cumack; /* CMT CUC algorithm. Maintains next expected
+ * pseudo-cumack for this destination */
+ uint32_t rtx_pseudo_cumack; /* CMT CUC algorithm. Maintains next
+ * expected pseudo-cumack for this
+ * destination */
+
+ /* CMT fast recovery variables */
+ uint32_t fast_recovery_tsn;
+ uint32_t heartbeat_random1;
+ uint32_t heartbeat_random2;
+ uint32_t tos_flowlabel;
+
+ struct timeval start_time; /* time when this net was created */
+
+ uint32_t marked_retrans;/* number or DATA chunks marked for timer
+ * based retransmissions */
+ uint32_t marked_fastretrans;
+
+ /* if this guy is ok or not ... status */
+ uint16_t dest_state;
+ /* number of transmit failures to down this guy */
+ uint16_t failure_threshold;
+ /* error stats on destination */
+ uint16_t error_count;
+ /* UDP port number in case of UDP tunneling */
+ uint16_t port;
+
+ uint8_t fast_retran_loss_recovery;
+ uint8_t will_exit_fast_recovery;
+ /* Flags that probably can be combined into dest_state */
+ uint8_t fast_retran_ip; /* fast retransmit in progress */
+ uint8_t hb_responded;
+ uint8_t saw_newack; /* CMT's SFR algorithm flag */
+ uint8_t src_addr_selected; /* if we split we move */
+ uint8_t indx_of_eligible_next_to_use;
+ uint8_t addr_is_local; /* its a local address (if known) could move
+ * in split */
+
+ /*
+ * CMT variables (iyengar@cis.udel.edu)
+ */
+ uint8_t find_pseudo_cumack; /* CMT CUC algorithm. Flag used to
+ * find a new pseudocumack. This flag
+ * is set after a new pseudo-cumack
+ * has been received and indicates
+ * that the sender should find the
+ * next pseudo-cumack expected for
+ * this destination */
+ uint8_t find_rtx_pseudo_cumack; /* CMT CUCv2 algorithm. Flag used to
+ * find a new rtx-pseudocumack. This
+ * flag is set after a new
+ * rtx-pseudo-cumack has been received
+ * and indicates that the sender
+ * should find the next
+ * rtx-pseudo-cumack expected for this
+ * destination */
+ uint8_t new_pseudo_cumack; /* CMT CUC algorithm. Flag used to
+ * indicate if a new pseudo-cumack or
+ * rtx-pseudo-cumack has been received */
+ uint8_t window_probe; /* Doing a window probe? */
+ uint8_t RTO_measured; /* Have we done the first measure */
+ uint8_t last_hs_used; /* index into the last HS table entry we used */
+ /* JRS - struct used in HTCP algorithm */
+ struct htcp htcp_ca;
+};
+
+
+struct sctp_data_chunkrec {
+ uint32_t TSN_seq; /* the TSN of this transmit */
+ uint16_t stream_seq; /* the stream sequence number of this transmit */
+ uint16_t stream_number; /* the stream number of this guy */
+ uint32_t payloadtype;
+ uint32_t context; /* from send */
+
+ /* ECN Nonce: Nonce Value for this chunk */
+ uint8_t ect_nonce;
+ uint8_t fwd_tsn_cnt;
+ /*
+ * part of the Highest sacked algorithm to be able to stroke counts
+ * on ones that are FR'd.
+ */
+ uint32_t fast_retran_tsn; /* sending_seq at the time of FR */
+ struct timeval timetodrop; /* time we drop it from queue */
+ uint8_t doing_fast_retransmit;
+ uint8_t rcv_flags; /* flags pulled from data chunk on inbound for
+ * outbound holds sending flags for PR-SCTP. */
+ uint8_t state_flags;
+ uint8_t chunk_was_revoked;
+};
+
+TAILQ_HEAD(sctpchunk_listhead, sctp_tmit_chunk);
+
+/* The lower byte is used to enumerate PR_SCTP policies */
+#define CHUNK_FLAGS_PR_SCTP_TTL SCTP_PR_SCTP_TTL
+#define CHUNK_FLAGS_PR_SCTP_BUF SCTP_PR_SCTP_BUF
+#define CHUNK_FLAGS_PR_SCTP_RTX SCTP_PR_SCTP_RTX
+
+/* The upper byte is used a a bit mask */
+#define CHUNK_FLAGS_FRAGMENT_OK 0x0100
+
+struct chk_id {
+ uint16_t id;
+ uint16_t can_take_data;
+};
+
+
+struct sctp_tmit_chunk {
+ union {
+ struct sctp_data_chunkrec data;
+ struct chk_id chunk_id;
+ } rec;
+ struct sctp_association *asoc; /* bp to asoc this belongs to */
+ struct timeval sent_rcv_time; /* filled in if RTT being calculated */
+ struct mbuf *data; /* pointer to mbuf chain of data */
+ struct mbuf *last_mbuf; /* pointer to last mbuf in chain */
+ struct sctp_nets *whoTo;
+ TAILQ_ENTRY(sctp_tmit_chunk) sctp_next; /* next link */
+ int32_t sent; /* the send status */
+ uint16_t snd_count; /* number of times I sent */
+ uint16_t flags; /* flags, such as FRAGMENT_OK */
+ uint16_t send_size;
+ uint16_t book_size;
+ uint16_t mbcnt;
+ uint16_t auth_keyid;
+ uint8_t holds_key_ref; /* flag if auth keyid refcount is held */
+ uint8_t pad_inplace;
+ uint8_t do_rtt;
+ uint8_t book_size_scale;
+ uint8_t no_fr_allowed;
+ uint8_t pr_sctp_on;
+ uint8_t copy_by_ref;
+ uint8_t window_probe;
+};
+
+/*
+ * The first part of this structure MUST be the entire sinfo structure. Maybe
+ * I should have made it a sub structure... we can circle back later and do
+ * that if we want.
+ */
+struct sctp_queued_to_read { /* sinfo structure Pluse more */
+ uint16_t sinfo_stream; /* off the wire */
+ uint16_t sinfo_ssn; /* off the wire */
+ uint16_t sinfo_flags; /* SCTP_UNORDERED from wire use SCTP_EOF for
+ * EOR */
+ uint32_t sinfo_ppid; /* off the wire */
+ uint32_t sinfo_context; /* pick this up from assoc def context? */
+ uint32_t sinfo_timetolive; /* not used by kernel */
+ uint32_t sinfo_tsn; /* Use this in reassembly as first TSN */
+ uint32_t sinfo_cumtsn; /* Use this in reassembly as last TSN */
+ sctp_assoc_t sinfo_assoc_id; /* our assoc id */
+ /* Non sinfo stuff */
+ uint32_t length; /* length of data */
+ uint32_t held_length; /* length held in sb */
+ struct sctp_nets *whoFrom; /* where it came from */
+ struct mbuf *data; /* front of the mbuf chain of data with
+ * PKT_HDR */
+ struct mbuf *tail_mbuf; /* used for multi-part data */
+ struct mbuf *aux_data; /* used to hold/cache control if o/s does not
+ * take it from us */
+ struct sctp_tcb *stcb; /* assoc, used for window update */
+ TAILQ_ENTRY(sctp_queued_to_read) next;
+ uint16_t port_from;
+ uint16_t spec_flags; /* Flags to hold the notification field */
+ uint8_t do_not_ref_stcb;
+ uint8_t end_added;
+ uint8_t pdapi_aborted;
+ uint8_t some_taken;
+};
+
+/* This data structure will be on the outbound
+ * stream queues. Data will be pulled off from
+ * the front of the mbuf data and chunk-ified
+ * by the output routines. We will custom
+ * fit every chunk we pull to the send/sent
+ * queue to make up the next full packet
+ * if we can. An entry cannot be removed
+ * from the stream_out queue until
+ * the msg_is_complete flag is set. This
+ * means at times data/tail_mbuf MIGHT
+ * be NULL.. If that occurs it happens
+ * for one of two reasons. Either the user
+ * is blocked on a send() call and has not
+ * awoken to copy more data down... OR
+ * the user is in the explict MSG_EOR mode
+ * and wrote some data, but has not completed
+ * sending.
+ */
+struct sctp_stream_queue_pending {
+ struct mbuf *data;
+ struct mbuf *tail_mbuf;
+ struct timeval ts;
+ struct sctp_nets *net;
+ TAILQ_ENTRY(sctp_stream_queue_pending) next;
+ uint32_t length;
+ uint32_t timetolive;
+ uint32_t ppid;
+ uint32_t context;
+ uint16_t sinfo_flags;
+ uint16_t stream;
+ uint16_t strseq;
+ uint16_t act_flags;
+ uint16_t auth_keyid;
+ uint8_t holds_key_ref;
+ uint8_t msg_is_complete;
+ uint8_t some_taken;
+ uint8_t pr_sctp_on;
+ uint8_t sender_all_done;
+ uint8_t put_last_out;
+ uint8_t discard_rest;
+};
+
+/*
+ * this struct contains info that is used to track inbound stream data and
+ * help with ordering.
+ */
+TAILQ_HEAD(sctpwheelunrel_listhead, sctp_stream_in);
+struct sctp_stream_in {
+ struct sctp_readhead inqueue;
+ uint16_t stream_no;
+ uint16_t last_sequence_delivered; /* used for re-order */
+ uint8_t delivery_started;
+};
+
+/* This struct is used to track the traffic on outbound streams */
+TAILQ_HEAD(sctpwheel_listhead, sctp_stream_out);
+struct sctp_stream_out {
+ struct sctp_streamhead outqueue;
+ TAILQ_ENTRY(sctp_stream_out) next_spoke; /* next link in wheel */
+ uint16_t stream_no;
+ uint16_t next_sequence_sent; /* next one I expect to send out */
+ uint8_t last_msg_incomplete;
+};
+
+/* used to keep track of the addresses yet to try to add/delete */
+TAILQ_HEAD(sctp_asconf_addrhead, sctp_asconf_addr);
+struct sctp_asconf_addr {
+ TAILQ_ENTRY(sctp_asconf_addr) next;
+ struct sctp_asconf_addr_param ap;
+ struct sctp_ifa *ifa; /* save the ifa for add/del ip */
+ uint8_t sent; /* has this been sent yet? */
+ uint8_t special_del; /* not to be used in lookup */
+};
+
+struct sctp_scoping {
+ uint8_t ipv4_addr_legal;
+ uint8_t ipv6_addr_legal;
+ uint8_t loopback_scope;
+ uint8_t ipv4_local_scope;
+ uint8_t local_scope;
+ uint8_t site_scope;
+};
+
+#define SCTP_TSN_LOG_SIZE 40
+
+struct sctp_tsn_log {
+ void *stcb;
+ uint32_t tsn;
+ uint16_t strm;
+ uint16_t seq;
+ uint16_t sz;
+ uint16_t flgs;
+ uint16_t in_pos;
+ uint16_t in_out;
+};
+
+#define SCTP_FS_SPEC_LOG_SIZE 200
+struct sctp_fs_spec_log {
+ uint32_t sent;
+ uint32_t total_flight;
+ uint32_t tsn;
+ uint16_t book;
+ uint8_t incr;
+ uint8_t decr;
+};
+
+/* This struct is here to cut out the compatiabilty
+ * pad that bulks up both the inp and stcb. The non
+ * pad portion MUST stay in complete sync with
+ * sctp_sndrcvinfo... i.e. if sinfo_xxxx is added
+ * this must be done here too.
+ */
+struct sctp_nonpad_sndrcvinfo {
+ uint16_t sinfo_stream;
+ uint16_t sinfo_ssn;
+ uint16_t sinfo_flags;
+ uint32_t sinfo_ppid;
+ uint32_t sinfo_context;
+ uint32_t sinfo_timetolive;
+ uint32_t sinfo_tsn;
+ uint32_t sinfo_cumtsn;
+ sctp_assoc_t sinfo_assoc_id;
+};
+
+/*
+ * JRS - Structure to hold function pointers to the functions responsible
+ * for congestion control.
+ */
+
+struct sctp_cc_functions {
+ void (*sctp_set_initial_cc_param) (struct sctp_tcb *stcb, struct sctp_nets *net);
+ void (*sctp_cwnd_update_after_sack) (struct sctp_tcb *stcb,
+ struct sctp_association *asoc,
+ int accum_moved, int reneged_all, int will_exit);
+ void (*sctp_cwnd_update_after_fr) (struct sctp_tcb *stcb,
+ struct sctp_association *asoc);
+ void (*sctp_cwnd_update_after_timeout) (struct sctp_tcb *stcb,
+ struct sctp_nets *net);
+ void (*sctp_cwnd_update_after_ecn_echo) (struct sctp_tcb *stcb,
+ struct sctp_nets *net);
+ void (*sctp_cwnd_update_after_packet_dropped) (struct sctp_tcb *stcb,
+ struct sctp_nets *net, struct sctp_pktdrop_chunk *cp,
+ uint32_t * bottle_bw, uint32_t * on_queue);
+ void (*sctp_cwnd_update_after_output) (struct sctp_tcb *stcb,
+ struct sctp_nets *net, int burst_limit);
+ void (*sctp_cwnd_update_after_fr_timer) (struct sctp_inpcb *inp,
+ struct sctp_tcb *stcb, struct sctp_nets *net);
+};
+
+/* used to save ASCONF chunks for retransmission */
+TAILQ_HEAD(sctp_asconf_head, sctp_asconf);
+struct sctp_asconf {
+ TAILQ_ENTRY(sctp_asconf) next;
+ uint32_t serial_number;
+ uint16_t snd_count;
+ struct mbuf *data;
+ uint16_t len;
+};
+
+/* used to save ASCONF-ACK chunks for retransmission */
+TAILQ_HEAD(sctp_asconf_ackhead, sctp_asconf_ack);
+struct sctp_asconf_ack {
+ TAILQ_ENTRY(sctp_asconf_ack) next;
+ uint32_t serial_number;
+ struct sctp_nets *last_sent_to;
+ struct mbuf *data;
+ uint16_t len;
+};
+
+/*
+ * Here we have information about each individual association that we track.
+ * We probably in production would be more dynamic. But for ease of
+ * implementation we will have a fixed array that we hunt for in a linear
+ * fashion.
+ */
+struct sctp_association {
+ /* association state */
+ int state;
+
+ /* queue of pending addrs to add/delete */
+ struct sctp_asconf_addrhead asconf_queue;
+
+ struct timeval time_entered; /* time we entered state */
+ struct timeval time_last_rcvd;
+ struct timeval time_last_sent;
+ struct timeval time_last_sat_advance;
+ struct sctp_nonpad_sndrcvinfo def_send;
+
+ /* timers and such */
+ struct sctp_timer hb_timer; /* hb timer */
+ struct sctp_timer dack_timer; /* Delayed ack timer */
+ struct sctp_timer asconf_timer; /* asconf */
+ struct sctp_timer strreset_timer; /* stream reset */
+ struct sctp_timer shut_guard_timer; /* shutdown guard */
+ struct sctp_timer autoclose_timer; /* automatic close timer */
+ struct sctp_timer delayed_event_timer; /* timer for delayed events */
+ struct sctp_timer delete_prim_timer; /* deleting primary dst */
+
+ /* list of restricted local addresses */
+ struct sctpladdr sctp_restricted_addrs;
+
+ /* last local address pending deletion (waiting for an address add) */
+ struct sctp_ifa *asconf_addr_del_pending;
+ /* Deleted primary destination (used to stop timer) */
+ struct sctp_nets *deleted_primary;
+
+ struct sctpnetlisthead nets; /* remote address list */
+
+ /* Free chunk list */
+ struct sctpchunk_listhead free_chunks;
+
+ /* Control chunk queue */
+ struct sctpchunk_listhead control_send_queue;
+
+ /* ASCONF chunk queue */
+ struct sctpchunk_listhead asconf_send_queue;
+
+ /*
+ * Once a TSN hits the wire it is moved to the sent_queue. We
+ * maintain two counts here (don't know if any but retran_cnt is
+ * needed). The idea is that the sent_queue_retran_cnt reflects how
+ * many chunks have been marked for retranmission by either T3-rxt
+ * or FR.
+ */
+ struct sctpchunk_listhead sent_queue;
+ struct sctpchunk_listhead send_queue;
+
+ /* re-assembly queue for fragmented chunks on the inbound path */
+ struct sctpchunk_listhead reasmqueue;
+
+ /*
+ * this queue is used when we reach a condition that we can NOT put
+ * data into the socket buffer. We track the size of this queue and
+ * set our rwnd to the space in the socket minus also the
+ * size_on_delivery_queue.
+ */
+ struct sctpwheel_listhead out_wheel;
+
+ /*
+ * This pointer will be set to NULL most of the time. But when we
+ * have a fragmented message, where we could not get out all of the
+ * message at the last send then this will point to the stream to go
+ * get data from.
+ */
+ struct sctp_stream_out *locked_on_sending;
+
+ /* If an iterator is looking at me, this is it */
+ struct sctp_iterator *stcb_starting_point_for_iterator;
+
+ /* ASCONF save the last ASCONF-ACK so we can resend it if necessary */
+ struct sctp_asconf_ackhead asconf_ack_sent;
+
+ /*
+ * pointer to last stream reset queued to control queue by us with
+ * requests.
+ */
+ struct sctp_tmit_chunk *str_reset;
+ /*
+ * if Source Address Selection happening, this will rotate through
+ * the link list.
+ */
+ struct sctp_laddr *last_used_address;
+
+ /* stream arrays */
+ struct sctp_stream_in *strmin;
+ struct sctp_stream_out *strmout;
+ uint8_t *mapping_array;
+ /* primary destination to use */
+ struct sctp_nets *primary_destination;
+ /* For CMT */
+ struct sctp_nets *last_net_cmt_send_started;
+ /* last place I got a data chunk from */
+ struct sctp_nets *last_data_chunk_from;
+ /* last place I got a control from */
+ struct sctp_nets *last_control_chunk_from;
+
+ /* circular looking for output selection */
+ struct sctp_stream_out *last_out_stream;
+
+ /*
+ * wait to the point the cum-ack passes req->send_reset_at_tsn for
+ * any req on the list.
+ */
+ struct sctp_resethead resetHead;
+
+ /* queue of chunks waiting to be sent into the local stack */
+ struct sctp_readhead pending_reply_queue;
+
+ /* JRS - the congestion control functions are in this struct */
+ struct sctp_cc_functions cc_functions;
+ /*
+ * JRS - value to store the currently loaded congestion control
+ * module
+ */
+ uint32_t congestion_control_module;
+
+ uint32_t vrf_id;
+
+ uint32_t cookie_preserve_req;
+ /* ASCONF next seq I am sending out, inits at init-tsn */
+ uint32_t asconf_seq_out;
+ uint32_t asconf_seq_out_acked;
+ /* ASCONF last received ASCONF from peer, starts at peer's TSN-1 */
+ uint32_t asconf_seq_in;
+
+ /* next seq I am sending in str reset messages */
+ uint32_t str_reset_seq_out;
+ /* next seq I am expecting in str reset messages */
+ uint32_t str_reset_seq_in;
+
+ /* various verification tag information */
+ uint32_t my_vtag; /* The tag to be used. if assoc is re-initited
+ * by remote end, and I have unlocked this
+ * will be regenerated to a new random value. */
+ uint32_t peer_vtag; /* The peers last tag */
+
+ uint32_t my_vtag_nonce;
+ uint32_t peer_vtag_nonce;
+
+ uint32_t assoc_id;
+
+ /* This is the SCTP fragmentation threshold */
+ uint32_t smallest_mtu;
+
+ /*
+ * Special hook for Fast retransmit, allows us to track the highest
+ * TSN that is NEW in this SACK if gap ack blocks are present.
+ */
+ uint32_t this_sack_highest_gap;
+
+ /*
+ * The highest consecutive TSN that has been acked by peer on my
+ * sends
+ */
+ uint32_t last_acked_seq;
+
+ /* The next TSN that I will use in sending. */
+ uint32_t sending_seq;
+
+ /* Original seq number I used ??questionable to keep?? */
+ uint32_t init_seq_number;
+
+
+ /* The Advanced Peer Ack Point, as required by the PR-SCTP */
+ /* (A1 in Section 4.2) */
+ uint32_t advanced_peer_ack_point;
+
+ /*
+ * The highest consequetive TSN at the bottom of the mapping array
+ * (for his sends).
+ */
+ uint32_t cumulative_tsn;
+ /*
+ * Used to track the mapping array and its offset bits. This MAY be
+ * lower then cumulative_tsn.
+ */
+ uint32_t mapping_array_base_tsn;
+ /*
+ * used to track highest TSN we have received and is listed in the
+ * mapping array.
+ */
+ uint32_t highest_tsn_inside_map;
+
+ /* EY - new NR variables used for nr_sack based on mapping_array */
+ uint8_t *nr_mapping_array;
+ uint32_t highest_tsn_inside_nr_map;
+
+ uint32_t last_echo_tsn;
+ uint32_t last_cwr_tsn;
+ uint32_t fast_recovery_tsn;
+ uint32_t sat_t3_recovery_tsn;
+ uint32_t tsn_last_delivered;
+ /*
+ * For the pd-api we should re-write this a bit more efficent. We
+ * could have multiple sctp_queued_to_read's that we are building at
+ * once. Now we only do this when we get ready to deliver to the
+ * socket buffer. Note that we depend on the fact that the struct is
+ * "stuck" on the read queue until we finish all the pd-api.
+ */
+ struct sctp_queued_to_read *control_pdapi;
+
+ uint32_t tsn_of_pdapi_last_delivered;
+ uint32_t pdapi_ppid;
+ uint32_t context;
+ uint32_t last_reset_action[SCTP_MAX_RESET_PARAMS];
+ uint32_t last_sending_seq[SCTP_MAX_RESET_PARAMS];
+ uint32_t last_base_tsnsent[SCTP_MAX_RESET_PARAMS];
+#ifdef SCTP_ASOCLOG_OF_TSNS
+ /*
+ * special log - This adds considerable size to the asoc, but
+ * provides a log that you can use to detect problems via kgdb.
+ */
+ struct sctp_tsn_log in_tsnlog[SCTP_TSN_LOG_SIZE];
+ struct sctp_tsn_log out_tsnlog[SCTP_TSN_LOG_SIZE];
+ uint32_t cumack_log[SCTP_TSN_LOG_SIZE];
+ uint32_t cumack_logsnt[SCTP_TSN_LOG_SIZE];
+ uint16_t tsn_in_at;
+ uint16_t tsn_out_at;
+ uint16_t tsn_in_wrapped;
+ uint16_t tsn_out_wrapped;
+ uint16_t cumack_log_at;
+ uint16_t cumack_log_atsnt;
+#endif /* SCTP_ASOCLOG_OF_TSNS */
+#ifdef SCTP_FS_SPEC_LOG
+ struct sctp_fs_spec_log fslog[SCTP_FS_SPEC_LOG_SIZE];
+ uint16_t fs_index;
+#endif
+
+ /*
+ * window state information and smallest MTU that I use to bound
+ * segmentation
+ */
+ uint32_t peers_rwnd;
+ uint32_t my_rwnd;
+ uint32_t my_last_reported_rwnd;
+ uint32_t sctp_frag_point;
+
+ uint32_t total_output_queue_size;
+
+ uint32_t sb_cc; /* shadow of sb_cc */
+ uint32_t sb_send_resv; /* amount reserved on a send */
+ uint32_t my_rwnd_control_len; /* shadow of sb_mbcnt used for rwnd
+ * control */
+ /* 32 bit nonce stuff */
+ uint32_t nonce_resync_tsn;
+ uint32_t nonce_wait_tsn;
+ uint32_t default_flowlabel;
+ uint32_t pr_sctp_cnt;
+ int ctrl_queue_cnt; /* could be removed REM */
+ /*
+ * All outbound datagrams queue into this list from the individual
+ * stream queue. Here they get assigned a TSN and then await
+ * sending. The stream seq comes when it is first put in the
+ * individual str queue
+ */
+ unsigned int stream_queue_cnt;
+ unsigned int send_queue_cnt;
+ unsigned int sent_queue_cnt;
+ unsigned int sent_queue_cnt_removeable;
+ /*
+ * Number on sent queue that are marked for retran until this value
+ * is 0 we only send one packet of retran'ed data.
+ */
+ unsigned int sent_queue_retran_cnt;
+
+ unsigned int size_on_reasm_queue;
+ unsigned int cnt_on_reasm_queue;
+ unsigned int fwd_tsn_cnt;
+ /* amount of data (bytes) currently in flight (on all destinations) */
+ unsigned int total_flight;
+ /* Total book size in flight */
+ unsigned int total_flight_count; /* count of chunks used with
+ * book total */
+ /* count of destinaton nets and list of destination nets */
+ unsigned int numnets;
+
+ /* Total error count on this association */
+ unsigned int overall_error_count;
+
+ unsigned int cnt_msg_on_sb;
+
+ /* All stream count of chunks for delivery */
+ unsigned int size_on_all_streams;
+ unsigned int cnt_on_all_streams;
+
+ /* Heart Beat delay in ticks */
+ unsigned int heart_beat_delay;
+
+ /* autoclose */
+ unsigned int sctp_autoclose_ticks;
+
+ /* how many preopen streams we have */
+ unsigned int pre_open_streams;
+
+ /* How many streams I support coming into me */
+ unsigned int max_inbound_streams;
+
+ /* the cookie life I award for any cookie, in seconds */
+ unsigned int cookie_life;
+ /* time to delay acks for */
+ unsigned int delayed_ack;
+ unsigned int old_delayed_ack;
+ unsigned int sack_freq;
+ unsigned int data_pkts_seen;
+
+ unsigned int numduptsns;
+ int dup_tsns[SCTP_MAX_DUP_TSNS];
+ unsigned int initial_init_rto_max; /* initial RTO for INIT's */
+ unsigned int initial_rto; /* initial send RTO */
+ unsigned int minrto; /* per assoc RTO-MIN */
+ unsigned int maxrto; /* per assoc RTO-MAX */
+
+ /* authentication fields */
+ sctp_auth_chklist_t *local_auth_chunks;
+ sctp_auth_chklist_t *peer_auth_chunks;
+ sctp_hmaclist_t *local_hmacs; /* local HMACs supported */
+ sctp_hmaclist_t *peer_hmacs; /* peer HMACs supported */
+ struct sctp_keyhead shared_keys; /* assoc's shared keys */
+ sctp_authinfo_t authinfo; /* randoms, cached keys */
+ /*
+ * refcnt to block freeing when a sender or receiver is off coping
+ * user data in.
+ */
+ uint32_t refcnt;
+ uint32_t chunks_on_out_queue; /* total chunks floating around,
+ * locked by send socket buffer */
+ uint32_t peers_adaptation;
+ uint16_t peer_hmac_id; /* peer HMAC id to send */
+
+ /*
+ * Being that we have no bag to collect stale cookies, and that we
+ * really would not want to anyway.. we will count them in this
+ * counter. We of course feed them to the pigeons right away (I have
+ * always thought of pigeons as flying rats).
+ */
+ uint16_t stale_cookie_count;
+
+ /*
+ * For the partial delivery API, if up, invoked this is what last
+ * TSN I delivered
+ */
+ uint16_t str_of_pdapi;
+ uint16_t ssn_of_pdapi;
+
+ /* counts of actual built streams. Allocation may be more however */
+ /* could re-arrange to optimize space here. */
+ uint16_t streamincnt;
+ uint16_t streamoutcnt;
+ uint16_t strm_realoutsize;
+ /* my maximum number of retrans of INIT and SEND */
+ /* copied from SCTP but should be individually setable */
+ uint16_t max_init_times;
+ uint16_t max_send_times;
+
+ uint16_t def_net_failure;
+
+ /*
+ * lock flag: 0 is ok to send, 1+ (duals as a retran count) is
+ * awaiting ACK
+ */
+ uint16_t mapping_array_size;
+
+ uint16_t last_strm_seq_delivered;
+ uint16_t last_strm_no_delivered;
+
+ uint16_t last_revoke_count;
+ int16_t num_send_timers_up;
+
+ uint16_t stream_locked_on;
+ uint16_t ecn_echo_cnt_onq;
+
+ uint16_t free_chunk_cnt;
+
+ uint8_t stream_locked;
+ uint8_t authenticated; /* packet authenticated ok */
+ /*
+ * This flag indicates that a SACK need to be sent. Initially this
+ * is 1 to send the first sACK immediately.
+ */
+ uint8_t send_sack;
+
+ /* max burst after fast retransmit completes */
+ uint8_t max_burst;
+
+ uint8_t sat_network; /* RTT is in range of sat net or greater */
+ uint8_t sat_network_lockout; /* lockout code */
+ uint8_t burst_limit_applied; /* Burst limit in effect at last send? */
+ /* flag goes on when we are doing a partial delivery api */
+ uint8_t hb_random_values[4];
+ uint8_t fragmented_delivery_inprogress;
+ uint8_t fragment_flags;
+ uint8_t last_flags_delivered;
+ uint8_t hb_ect_randombit;
+ uint8_t hb_random_idx;
+ uint8_t hb_is_disabled; /* is the hb disabled? */
+ uint8_t default_tos;
+ uint8_t asconf_del_pending; /* asconf delete last addr pending */
+
+ /* ECN Nonce stuff */
+ uint8_t receiver_nonce_sum; /* nonce I sum and put in my sack */
+ uint8_t ecn_nonce_allowed; /* Tells us if ECN nonce is on */
+ uint8_t nonce_sum_check;/* On off switch used during re-sync */
+ uint8_t nonce_wait_for_ecne; /* flag when we expect a ECN */
+ uint8_t peer_supports_ecn_nonce;
+
+ /*
+ * This value, plus all other ack'd but above cum-ack is added
+ * together to cross check against the bit that we have yet to
+ * define (probably in the SACK). When the cum-ack is updated, this
+ * sum is updated as well.
+ */
+ uint8_t nonce_sum_expect_base;
+ /* Flag to tell if ECN is allowed */
+ uint8_t ecn_allowed;
+
+ /* flag to indicate if peer can do asconf */
+ uint8_t peer_supports_asconf;
+ /* EY - flag to indicate if peer can do nr_sack */
+ uint8_t peer_supports_nr_sack;
+ /* pr-sctp support flag */
+ uint8_t peer_supports_prsctp;
+ /* peer authentication support flag */
+ uint8_t peer_supports_auth;
+ /* stream resets are supported by the peer */
+ uint8_t peer_supports_strreset;
+
+ uint8_t peer_supports_nat;
+ /*
+ * packet drop's are supported by the peer, we don't really care
+ * about this but we bookkeep it anyway.
+ */
+ uint8_t peer_supports_pktdrop;
+
+ /* Do we allow V6/V4? */
+ uint8_t ipv4_addr_legal;
+ uint8_t ipv6_addr_legal;
+ /* Address scoping flags */
+ /* scope value for IPv4 */
+ uint8_t ipv4_local_scope;
+ /* scope values for IPv6 */
+ uint8_t local_scope;
+ uint8_t site_scope;
+ /* loopback scope */
+ uint8_t loopback_scope;
+ /* flags to handle send alternate net tracking */
+ uint8_t used_alt_onsack;
+ uint8_t used_alt_asconfack;
+ uint8_t fast_retran_loss_recovery;
+ uint8_t sat_t3_loss_recovery;
+ uint8_t dropped_special_cnt;
+ uint8_t seen_a_sack_this_pkt;
+ uint8_t stream_reset_outstanding;
+ uint8_t stream_reset_out_is_outstanding;
+ uint8_t delayed_connection;
+ uint8_t ifp_had_enobuf;
+ uint8_t saw_sack_with_frags;
+ uint8_t saw_sack_with_nr_frags;
+ uint8_t in_asocid_hash;
+ uint8_t assoc_up_sent;
+ uint8_t adaptation_needed;
+ uint8_t adaptation_sent;
+ /* CMT variables */
+ uint8_t cmt_dac_pkts_rcvd;
+ uint8_t sctp_cmt_on_off;
+ uint8_t iam_blocking;
+ uint8_t cookie_how[8];
+ /* EY 05/05/08 - NR_SACK variable */
+ uint8_t sctp_nr_sack_on_off;
+ /* JRS 5/21/07 - CMT PF variable */
+ uint8_t sctp_cmt_pf;
+ /*
+ * The mapping array is used to track out of order sequences above
+ * last_acked_seq. 0 indicates packet missing 1 indicates packet
+ * rec'd. We slide it up every time we raise last_acked_seq and 0
+ * trailing locactions out. If I get a TSN above the array
+ * mappingArraySz, I discard the datagram and let retransmit happen.
+ */
+ uint32_t marked_retrans;
+ uint32_t timoinit;
+ uint32_t timodata;
+ uint32_t timosack;
+ uint32_t timoshutdown;
+ uint32_t timoheartbeat;
+ uint32_t timocookie;
+ uint32_t timoshutdownack;
+ struct timeval start_time;
+ struct timeval discontinuity_time;
+};
+
+#endif
diff --git a/rtems/freebsd/netinet/sctp_sysctl.c b/rtems/freebsd/netinet/sctp_sysctl.c
new file mode 100644
index 00000000..c5cf76f8
--- /dev/null
+++ b/rtems/freebsd/netinet/sctp_sysctl.c
@@ -0,0 +1,1108 @@
+#include <rtems/freebsd/machine/rtems-bsd-config.h>
+
+/*-
+ * Copyright (c) 2007, by Cisco Systems, Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * a) Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * b) Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the distribution.
+ *
+ * c) Neither the name of Cisco Systems, Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <rtems/freebsd/sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <rtems/freebsd/netinet/sctp_os.h>
+#include <rtems/freebsd/netinet/sctp.h>
+#include <rtems/freebsd/netinet/sctp_constants.h>
+#include <rtems/freebsd/netinet/sctp_sysctl.h>
+#include <rtems/freebsd/netinet/sctp_pcb.h>
+#include <rtems/freebsd/netinet/sctputil.h>
+#include <rtems/freebsd/netinet/sctp_output.h>
+#include <rtems/freebsd/sys/smp.h>
+
+/*
+ * sysctl tunable variables
+ */
+
+void
+sctp_init_sysctls()
+{
+ SCTP_BASE_SYSCTL(sctp_sendspace) = SCTPCTL_MAXDGRAM_DEFAULT;
+ SCTP_BASE_SYSCTL(sctp_recvspace) = SCTPCTL_RECVSPACE_DEFAULT;
+ SCTP_BASE_SYSCTL(sctp_auto_asconf) = SCTPCTL_AUTOASCONF_DEFAULT;
+ SCTP_BASE_SYSCTL(sctp_multiple_asconfs) = SCTPCTL_MULTIPLEASCONFS_DEFAULT;
+ SCTP_BASE_SYSCTL(sctp_ecn_enable) = SCTPCTL_ECN_ENABLE_DEFAULT;
+ SCTP_BASE_SYSCTL(sctp_ecn_nonce) = SCTPCTL_ECN_NONCE_DEFAULT;
+ SCTP_BASE_SYSCTL(sctp_strict_sacks) = SCTPCTL_STRICT_SACKS_DEFAULT;
+#if !defined(SCTP_WITH_NO_CSUM)
+ SCTP_BASE_SYSCTL(sctp_no_csum_on_loopback) = SCTPCTL_LOOPBACK_NOCSUM_DEFAULT;
+#endif
+ SCTP_BASE_SYSCTL(sctp_strict_init) = SCTPCTL_STRICT_INIT_DEFAULT;
+ SCTP_BASE_SYSCTL(sctp_peer_chunk_oh) = SCTPCTL_PEER_CHKOH_DEFAULT;
+ SCTP_BASE_SYSCTL(sctp_max_burst_default) = SCTPCTL_MAXBURST_DEFAULT;
+ SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue) = SCTPCTL_MAXCHUNKS_DEFAULT;
+ SCTP_BASE_SYSCTL(sctp_hashtblsize) = SCTPCTL_TCBHASHSIZE_DEFAULT;
+ SCTP_BASE_SYSCTL(sctp_pcbtblsize) = SCTPCTL_PCBHASHSIZE_DEFAULT;
+ SCTP_BASE_SYSCTL(sctp_min_split_point) = SCTPCTL_MIN_SPLIT_POINT_DEFAULT;
+ SCTP_BASE_SYSCTL(sctp_chunkscale) = SCTPCTL_CHUNKSCALE_DEFAULT;
+ SCTP_BASE_SYSCTL(sctp_delayed_sack_time_default) = SCTPCTL_DELAYED_SACK_TIME_DEFAULT;
+ SCTP_BASE_SYSCTL(sctp_sack_freq_default) = SCTPCTL_SACK_FREQ_DEFAULT;
+ SCTP_BASE_SYSCTL(sctp_system_free_resc_limit) = SCTPCTL_SYS_RESOURCE_DEFAULT;
+ SCTP_BASE_SYSCTL(sctp_asoc_free_resc_limit) = SCTPCTL_ASOC_RESOURCE_DEFAULT;
+ SCTP_BASE_SYSCTL(sctp_heartbeat_interval_default) = SCTPCTL_HEARTBEAT_INTERVAL_DEFAULT;
+ SCTP_BASE_SYSCTL(sctp_pmtu_raise_time_default) = SCTPCTL_PMTU_RAISE_TIME_DEFAULT;
+ SCTP_BASE_SYSCTL(sctp_shutdown_guard_time_default) = SCTPCTL_SHUTDOWN_GUARD_TIME_DEFAULT;
+ SCTP_BASE_SYSCTL(sctp_secret_lifetime_default) = SCTPCTL_SECRET_LIFETIME_DEFAULT;
+ SCTP_BASE_SYSCTL(sctp_rto_max_default) = SCTPCTL_RTO_MAX_DEFAULT;
+ SCTP_BASE_SYSCTL(sctp_rto_min_default) = SCTPCTL_RTO_MIN_DEFAULT;
+ SCTP_BASE_SYSCTL(sctp_rto_initial_default) = SCTPCTL_RTO_INITIAL_DEFAULT;
+ SCTP_BASE_SYSCTL(sctp_init_rto_max_default) = SCTPCTL_INIT_RTO_MAX_DEFAULT;
+ SCTP_BASE_SYSCTL(sctp_valid_cookie_life_default) = SCTPCTL_VALID_COOKIE_LIFE_DEFAULT;
+ SCTP_BASE_SYSCTL(sctp_init_rtx_max_default) = SCTPCTL_INIT_RTX_MAX_DEFAULT;
+ SCTP_BASE_SYSCTL(sctp_assoc_rtx_max_default) = SCTPCTL_ASSOC_RTX_MAX_DEFAULT;
+ SCTP_BASE_SYSCTL(sctp_path_rtx_max_default) = SCTPCTL_PATH_RTX_MAX_DEFAULT;
+ SCTP_BASE_SYSCTL(sctp_add_more_threshold) = SCTPCTL_ADD_MORE_ON_OUTPUT_DEFAULT;
+ SCTP_BASE_SYSCTL(sctp_nr_outgoing_streams_default) = SCTPCTL_OUTGOING_STREAMS_DEFAULT;
+ SCTP_BASE_SYSCTL(sctp_cmt_on_off) = SCTPCTL_CMT_ON_OFF_DEFAULT;
+ /* EY */
+ SCTP_BASE_SYSCTL(sctp_nr_sack_on_off) = SCTPCTL_NR_SACK_ON_OFF_DEFAULT;
+ SCTP_BASE_SYSCTL(sctp_cmt_use_dac) = SCTPCTL_CMT_USE_DAC_DEFAULT;
+ SCTP_BASE_SYSCTL(sctp_cmt_pf) = SCTPCTL_CMT_PF_DEFAULT;
+ SCTP_BASE_SYSCTL(sctp_use_cwnd_based_maxburst) = SCTPCTL_CWND_MAXBURST_DEFAULT;
+ SCTP_BASE_SYSCTL(sctp_early_fr) = SCTPCTL_EARLY_FAST_RETRAN_DEFAULT;
+ SCTP_BASE_SYSCTL(sctp_early_fr_msec) = SCTPCTL_EARLY_FAST_RETRAN_MSEC_DEFAULT;
+ SCTP_BASE_SYSCTL(sctp_asconf_auth_nochk) = SCTPCTL_ASCONF_AUTH_NOCHK_DEFAULT;
+ SCTP_BASE_SYSCTL(sctp_auth_disable) = SCTPCTL_AUTH_DISABLE_DEFAULT;
+ SCTP_BASE_SYSCTL(sctp_nat_friendly) = SCTPCTL_NAT_FRIENDLY_DEFAULT;
+ SCTP_BASE_SYSCTL(sctp_L2_abc_variable) = SCTPCTL_ABC_L_VAR_DEFAULT;
+ SCTP_BASE_SYSCTL(sctp_mbuf_threshold_count) = SCTPCTL_MAX_CHAINED_MBUFS_DEFAULT;
+ SCTP_BASE_SYSCTL(sctp_do_drain) = SCTPCTL_DO_SCTP_DRAIN_DEFAULT;
+ SCTP_BASE_SYSCTL(sctp_hb_maxburst) = SCTPCTL_HB_MAX_BURST_DEFAULT;
+ SCTP_BASE_SYSCTL(sctp_abort_if_one_2_one_hits_limit) = SCTPCTL_ABORT_AT_LIMIT_DEFAULT;
+ SCTP_BASE_SYSCTL(sctp_strict_data_order) = SCTPCTL_STRICT_DATA_ORDER_DEFAULT;
+ SCTP_BASE_SYSCTL(sctp_min_residual) = SCTPCTL_MIN_RESIDUAL_DEFAULT;
+ SCTP_BASE_SYSCTL(sctp_max_retran_chunk) = SCTPCTL_MAX_RETRAN_CHUNK_DEFAULT;
+ SCTP_BASE_SYSCTL(sctp_logging_level) = SCTPCTL_LOGGING_LEVEL_DEFAULT;
+ /* JRS - Variable for default congestion control module */
+ SCTP_BASE_SYSCTL(sctp_default_cc_module) = SCTPCTL_DEFAULT_CC_MODULE_DEFAULT;
+ SCTP_BASE_SYSCTL(sctp_default_frag_interleave) = SCTPCTL_DEFAULT_FRAG_INTERLEAVE_DEFAULT;
+ SCTP_BASE_SYSCTL(sctp_mobility_base) = SCTPCTL_MOBILITY_BASE_DEFAULT;
+ SCTP_BASE_SYSCTL(sctp_mobility_fasthandoff) = SCTPCTL_MOBILITY_FASTHANDOFF_DEFAULT;
+ SCTP_BASE_SYSCTL(sctp_vtag_time_wait) = SCTPCTL_TIME_WAIT_DEFAULT;
+ SCTP_BASE_SYSCTL(sctp_buffer_splitting) = SCTPCTL_BUFFER_SPLITTING_DEFAULT;
+ SCTP_BASE_SYSCTL(sctp_initial_cwnd) = SCTPCTL_INITIAL_CWND_DEFAULT;
+#if defined(SCTP_LOCAL_TRACE_BUF)
+ memset(&SCTP_BASE_SYSCTL(sctp_log), 0, sizeof(struct sctp_log));
+#endif
+ SCTP_BASE_SYSCTL(sctp_udp_tunneling_for_client_enable) = SCTPCTL_UDP_TUNNELING_FOR_CLIENT_ENABLE_DEFAULT;
+ SCTP_BASE_SYSCTL(sctp_udp_tunneling_port) = SCTPCTL_UDP_TUNNELING_PORT_DEFAULT;
+ SCTP_BASE_SYSCTL(sctp_enable_sack_immediately) = SCTPCTL_SACK_IMMEDIATELY_ENABLE_DEFAULT;
+ SCTP_BASE_SYSCTL(sctp_inits_include_nat_friendly) = SCTPCTL_NAT_FRIENDLY_INITS_DEFAULT;
+#if defined(SCTP_DEBUG)
+ SCTP_BASE_SYSCTL(sctp_debug_on) = SCTPCTL_DEBUG_DEFAULT;
+#endif
+#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
+ SCTP_BASE_SYSCTL(sctp_output_unlocked) = SCTPCTL_OUTPUT_UNLOCKED_DEFAULT;
+#endif
+}
+
+
+/* It returns an upper limit. No filtering is done here */
+static unsigned int
+number_of_addresses(struct sctp_inpcb *inp)
+{
+ int cnt;
+ struct sctp_vrf *vrf;
+ struct sctp_ifn *sctp_ifn;
+ struct sctp_ifa *sctp_ifa;
+ struct sctp_laddr *laddr;
+
+ cnt = 0;
+ /* neither Mac OS X nor FreeBSD support mulitple routing functions */
+ if ((vrf = sctp_find_vrf(inp->def_vrf_id)) == NULL) {
+ return (0);
+ }
+ if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
+ LIST_FOREACH(sctp_ifn, &vrf->ifnlist, next_ifn) {
+ LIST_FOREACH(sctp_ifa, &sctp_ifn->ifalist, next_ifa) {
+ if ((sctp_ifa->address.sa.sa_family == AF_INET) ||
+ (sctp_ifa->address.sa.sa_family == AF_INET6)) {
+ cnt++;
+ }
+ }
+ }
+ } else {
+ LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) {
+ if ((laddr->ifa->address.sa.sa_family == AF_INET) ||
+ (laddr->ifa->address.sa.sa_family == AF_INET6)) {
+ cnt++;
+ }
+ }
+ }
+ return (cnt);
+}
+
+static int
+copy_out_local_addresses(struct sctp_inpcb *inp, struct sctp_tcb *stcb, struct sysctl_req *req)
+{
+ struct sctp_ifn *sctp_ifn;
+ struct sctp_ifa *sctp_ifa;
+ int loopback_scope, ipv4_local_scope, local_scope, site_scope;
+ int ipv4_addr_legal, ipv6_addr_legal;
+ struct sctp_vrf *vrf;
+ struct xsctp_laddr xladdr;
+ struct sctp_laddr *laddr;
+ int error;
+
+ /* Turn on all the appropriate scope */
+ if (stcb) {
+ /* use association specific values */
+ loopback_scope = stcb->asoc.loopback_scope;
+ ipv4_local_scope = stcb->asoc.ipv4_local_scope;
+ local_scope = stcb->asoc.local_scope;
+ site_scope = stcb->asoc.site_scope;
+ } else {
+ /* use generic values for endpoints */
+ loopback_scope = 1;
+ ipv4_local_scope = 1;
+ local_scope = 1;
+ site_scope = 1;
+ }
+
+ /* use only address families of interest */
+ if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
+ ipv6_addr_legal = 1;
+ if (SCTP_IPV6_V6ONLY(inp)) {
+ ipv4_addr_legal = 0;
+ } else {
+ ipv4_addr_legal = 1;
+ }
+ } else {
+ ipv4_addr_legal = 1;
+ ipv6_addr_legal = 0;
+ }
+
+ /* neither Mac OS X nor FreeBSD support mulitple routing functions */
+ if ((vrf = sctp_find_vrf(inp->def_vrf_id)) == NULL) {
+ SCTP_INP_RUNLOCK(inp);
+ SCTP_INP_INFO_RUNLOCK();
+ return (-1);
+ }
+ if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
+ LIST_FOREACH(sctp_ifn, &vrf->ifnlist, next_ifn) {
+ if ((loopback_scope == 0) && SCTP_IFN_IS_IFT_LOOP(sctp_ifn))
+ /* Skip loopback if loopback_scope not set */
+ continue;
+ LIST_FOREACH(sctp_ifa, &sctp_ifn->ifalist, next_ifa) {
+ if (stcb) {
+ /*
+ * ignore if blacklisted at
+ * association level
+ */
+ if (sctp_is_addr_restricted(stcb, sctp_ifa))
+ continue;
+ }
+ switch (sctp_ifa->address.sa.sa_family) {
+ case AF_INET:
+ if (ipv4_addr_legal) {
+ struct sockaddr_in *sin;
+
+ sin = (struct sockaddr_in *)&sctp_ifa->address.sa;
+ if (sin->sin_addr.s_addr == 0)
+ continue;
+ if ((ipv4_local_scope == 0) && (IN4_ISPRIVATE_ADDRESS(&sin->sin_addr)))
+ continue;
+ } else {
+ continue;
+ }
+ break;
+#ifdef INET6
+ case AF_INET6:
+ if (ipv6_addr_legal) {
+ struct sockaddr_in6 *sin6;
+
+ sin6 = (struct sockaddr_in6 *)&sctp_ifa->address.sa;
+ if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr))
+ continue;
+ if (IN6_IS_ADDR_LINKLOCAL(&sin6->sin6_addr)) {
+ if (local_scope == 0)
+ continue;
+ if (sin6->sin6_scope_id == 0) {
+ /*
+ * bad link
+ * local
+ * address
+ */
+ if (sa6_recoverscope(sin6) != 0)
+ continue;
+ }
+ }
+ if ((site_scope == 0) && (IN6_IS_ADDR_SITELOCAL(&sin6->sin6_addr)))
+ continue;
+ } else {
+ continue;
+ }
+ break;
+#endif
+ default:
+ continue;
+ }
+ memset((void *)&xladdr, 0, sizeof(struct xsctp_laddr));
+ memcpy((void *)&xladdr.address, (const void *)&sctp_ifa->address, sizeof(union sctp_sockstore));
+ SCTP_INP_RUNLOCK(inp);
+ SCTP_INP_INFO_RUNLOCK();
+ error = SYSCTL_OUT(req, &xladdr, sizeof(struct xsctp_laddr));
+ if (error) {
+ return (error);
+ } else {
+ SCTP_INP_INFO_RLOCK();
+ SCTP_INP_RLOCK(inp);
+ }
+ }
+ }
+ } else {
+ LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) {
+ /* ignore if blacklisted at association level */
+ if (stcb && sctp_is_addr_restricted(stcb, laddr->ifa))
+ continue;
+ memset((void *)&xladdr, 0, sizeof(struct xsctp_laddr));
+ memcpy((void *)&xladdr.address, (const void *)&laddr->ifa->address, sizeof(union sctp_sockstore));
+ xladdr.start_time.tv_sec = (uint32_t) laddr->start_time.tv_sec;
+ xladdr.start_time.tv_usec = (uint32_t) laddr->start_time.tv_usec;
+ SCTP_INP_RUNLOCK(inp);
+ SCTP_INP_INFO_RUNLOCK();
+ error = SYSCTL_OUT(req, &xladdr, sizeof(struct xsctp_laddr));
+ if (error) {
+ return (error);
+ } else {
+ SCTP_INP_INFO_RLOCK();
+ SCTP_INP_RLOCK(inp);
+ }
+ }
+ }
+ memset((void *)&xladdr, 0, sizeof(struct xsctp_laddr));
+ xladdr.last = 1;
+ SCTP_INP_RUNLOCK(inp);
+ SCTP_INP_INFO_RUNLOCK();
+ error = SYSCTL_OUT(req, &xladdr, sizeof(struct xsctp_laddr));
+
+ if (error) {
+ return (error);
+ } else {
+ SCTP_INP_INFO_RLOCK();
+ SCTP_INP_RLOCK(inp);
+ return (0);
+ }
+}
+
+/*
+ * sysctl functions
+ */
+static int
+sctp_assoclist(SYSCTL_HANDLER_ARGS)
+{
+ unsigned int number_of_endpoints;
+ unsigned int number_of_local_addresses;
+ unsigned int number_of_associations;
+ unsigned int number_of_remote_addresses;
+ unsigned int n;
+ int error;
+ struct sctp_inpcb *inp;
+ struct sctp_tcb *stcb;
+ struct sctp_nets *net;
+ struct xsctp_inpcb xinpcb;
+ struct xsctp_tcb xstcb;
+ struct xsctp_raddr xraddr;
+ struct socket *so;
+
+ number_of_endpoints = 0;
+ number_of_local_addresses = 0;
+ number_of_associations = 0;
+ number_of_remote_addresses = 0;
+
+ SCTP_INP_INFO_RLOCK();
+ if (req->oldptr == USER_ADDR_NULL) {
+ LIST_FOREACH(inp, &SCTP_BASE_INFO(listhead), sctp_list) {
+ SCTP_INP_RLOCK(inp);
+ number_of_endpoints++;
+ number_of_local_addresses += number_of_addresses(inp);
+ LIST_FOREACH(stcb, &inp->sctp_asoc_list, sctp_tcblist) {
+ number_of_associations++;
+ number_of_local_addresses += number_of_addresses(inp);
+ TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) {
+ number_of_remote_addresses++;
+ }
+ }
+ SCTP_INP_RUNLOCK(inp);
+ }
+ SCTP_INP_INFO_RUNLOCK();
+ n = (number_of_endpoints + 1) * sizeof(struct xsctp_inpcb) +
+ (number_of_local_addresses + number_of_endpoints + number_of_associations) * sizeof(struct xsctp_laddr) +
+ (number_of_associations + number_of_endpoints) * sizeof(struct xsctp_tcb) +
+ (number_of_remote_addresses + number_of_associations) * sizeof(struct xsctp_raddr);
+
+ /* request some more memory than needed */
+ req->oldidx = (n + n / 8);
+ return 0;
+ }
+ if (req->newptr != USER_ADDR_NULL) {
+ SCTP_INP_INFO_RUNLOCK();
+ SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTP_SYSCTL, EPERM);
+ return EPERM;
+ }
+ LIST_FOREACH(inp, &SCTP_BASE_INFO(listhead), sctp_list) {
+ SCTP_INP_RLOCK(inp);
+ if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) {
+ /* if its allgone it is being freed - skip it */
+ goto skip;
+ }
+ xinpcb.last = 0;
+ xinpcb.local_port = ntohs(inp->sctp_lport);
+ xinpcb.flags = inp->sctp_flags;
+ xinpcb.features = inp->sctp_features;
+ xinpcb.total_sends = inp->total_sends;
+ xinpcb.total_recvs = inp->total_recvs;
+ xinpcb.total_nospaces = inp->total_nospaces;
+ xinpcb.fragmentation_point = inp->sctp_frag_point;
+ so = inp->sctp_socket;
+ if ((so == NULL) ||
+ (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE)) {
+ xinpcb.qlen = 0;
+ xinpcb.maxqlen = 0;
+ } else {
+ xinpcb.qlen = so->so_qlen;
+ xinpcb.maxqlen = so->so_qlimit;
+ }
+ SCTP_INP_INCR_REF(inp);
+ SCTP_INP_RUNLOCK(inp);
+ SCTP_INP_INFO_RUNLOCK();
+ error = SYSCTL_OUT(req, &xinpcb, sizeof(struct xsctp_inpcb));
+ if (error) {
+ SCTP_INP_DECR_REF(inp);
+ return error;
+ }
+ SCTP_INP_INFO_RLOCK();
+ SCTP_INP_RLOCK(inp);
+ error = copy_out_local_addresses(inp, NULL, req);
+ if (error) {
+ SCTP_INP_DECR_REF(inp);
+ return error;
+ }
+ LIST_FOREACH(stcb, &inp->sctp_asoc_list, sctp_tcblist) {
+ SCTP_TCB_LOCK(stcb);
+ atomic_add_int(&stcb->asoc.refcnt, 1);
+ SCTP_TCB_UNLOCK(stcb);
+ xstcb.last = 0;
+ xstcb.local_port = ntohs(inp->sctp_lport);
+ xstcb.remote_port = ntohs(stcb->rport);
+ if (stcb->asoc.primary_destination != NULL)
+ xstcb.primary_addr = stcb->asoc.primary_destination->ro._l_addr;
+ xstcb.heartbeat_interval = stcb->asoc.heart_beat_delay;
+ xstcb.state = SCTP_GET_STATE(&stcb->asoc); /* FIXME */
+ /* 7.0 does not support these */
+ xstcb.assoc_id = sctp_get_associd(stcb);
+ xstcb.peers_rwnd = stcb->asoc.peers_rwnd;
+ xstcb.in_streams = stcb->asoc.streamincnt;
+ xstcb.out_streams = stcb->asoc.streamoutcnt;
+ xstcb.max_nr_retrans = stcb->asoc.overall_error_count;
+ xstcb.primary_process = 0; /* not really supported
+ * yet */
+ xstcb.T1_expireries = stcb->asoc.timoinit + stcb->asoc.timocookie;
+ xstcb.T2_expireries = stcb->asoc.timoshutdown + stcb->asoc.timoshutdownack;
+ xstcb.retransmitted_tsns = stcb->asoc.marked_retrans;
+ xstcb.start_time.tv_sec = (uint32_t) stcb->asoc.start_time.tv_sec;
+ xstcb.start_time.tv_usec = (uint32_t) stcb->asoc.start_time.tv_usec;
+ xstcb.discontinuity_time.tv_sec = (uint32_t) stcb->asoc.discontinuity_time.tv_sec;
+ xstcb.discontinuity_time.tv_usec = (uint32_t) stcb->asoc.discontinuity_time.tv_usec;
+ xstcb.total_sends = stcb->total_sends;
+ xstcb.total_recvs = stcb->total_recvs;
+ xstcb.local_tag = stcb->asoc.my_vtag;
+ xstcb.remote_tag = stcb->asoc.peer_vtag;
+ xstcb.initial_tsn = stcb->asoc.init_seq_number;
+ xstcb.highest_tsn = stcb->asoc.sending_seq - 1;
+ xstcb.cumulative_tsn = stcb->asoc.last_acked_seq;
+ xstcb.cumulative_tsn_ack = stcb->asoc.cumulative_tsn;
+ xstcb.mtu = stcb->asoc.smallest_mtu;
+ xstcb.refcnt = stcb->asoc.refcnt;
+ SCTP_INP_RUNLOCK(inp);
+ SCTP_INP_INFO_RUNLOCK();
+ error = SYSCTL_OUT(req, &xstcb, sizeof(struct xsctp_tcb));
+ if (error) {
+ SCTP_INP_DECR_REF(inp);
+ atomic_subtract_int(&stcb->asoc.refcnt, 1);
+ return error;
+ }
+ SCTP_INP_INFO_RLOCK();
+ SCTP_INP_RLOCK(inp);
+ error = copy_out_local_addresses(inp, stcb, req);
+ if (error) {
+ SCTP_INP_DECR_REF(inp);
+ atomic_subtract_int(&stcb->asoc.refcnt, 1);
+ return error;
+ }
+ TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) {
+ xraddr.last = 0;
+ xraddr.address = net->ro._l_addr;
+ xraddr.active = ((net->dest_state & SCTP_ADDR_REACHABLE) == SCTP_ADDR_REACHABLE);
+ xraddr.confirmed = ((net->dest_state & SCTP_ADDR_UNCONFIRMED) == 0);
+ xraddr.heartbeat_enabled = ((net->dest_state & SCTP_ADDR_NOHB) == 0);
+ xraddr.rto = net->RTO;
+ xraddr.max_path_rtx = net->failure_threshold;
+ xraddr.rtx = net->marked_retrans;
+ xraddr.error_counter = net->error_count;
+ xraddr.cwnd = net->cwnd;
+ xraddr.flight_size = net->flight_size;
+ xraddr.mtu = net->mtu;
+ xraddr.rtt = net->rtt;
+ xraddr.start_time.tv_sec = (uint32_t) net->start_time.tv_sec;
+ xraddr.start_time.tv_usec = (uint32_t) net->start_time.tv_usec;
+ SCTP_INP_RUNLOCK(inp);
+ SCTP_INP_INFO_RUNLOCK();
+ error = SYSCTL_OUT(req, &xraddr, sizeof(struct xsctp_raddr));
+ if (error) {
+ SCTP_INP_DECR_REF(inp);
+ atomic_subtract_int(&stcb->asoc.refcnt, 1);
+ return error;
+ }
+ SCTP_INP_INFO_RLOCK();
+ SCTP_INP_RLOCK(inp);
+ }
+ atomic_subtract_int(&stcb->asoc.refcnt, 1);
+ memset((void *)&xraddr, 0, sizeof(struct xsctp_raddr));
+ xraddr.last = 1;
+ SCTP_INP_RUNLOCK(inp);
+ SCTP_INP_INFO_RUNLOCK();
+ error = SYSCTL_OUT(req, &xraddr, sizeof(struct xsctp_raddr));
+ if (error) {
+ SCTP_INP_DECR_REF(inp);
+ return error;
+ }
+ SCTP_INP_INFO_RLOCK();
+ SCTP_INP_RLOCK(inp);
+ }
+ SCTP_INP_DECR_REF(inp);
+ SCTP_INP_RUNLOCK(inp);
+ SCTP_INP_INFO_RUNLOCK();
+ memset((void *)&xstcb, 0, sizeof(struct xsctp_tcb));
+ xstcb.last = 1;
+ error = SYSCTL_OUT(req, &xstcb, sizeof(struct xsctp_tcb));
+ if (error) {
+ return error;
+ }
+skip:
+ SCTP_INP_INFO_RLOCK();
+ }
+ SCTP_INP_INFO_RUNLOCK();
+
+ memset((void *)&xinpcb, 0, sizeof(struct xsctp_inpcb));
+ xinpcb.last = 1;
+ error = SYSCTL_OUT(req, &xinpcb, sizeof(struct xsctp_inpcb));
+ return error;
+}
+
+
+#define RANGECHK(var, min, max) \
+ if ((var) < (min)) { (var) = (min); } \
+ else if ((var) > (max)) { (var) = (max); }
+
+static int
+sysctl_sctp_udp_tunneling_check(SYSCTL_HANDLER_ARGS)
+{
+ int error;
+ uint32_t old_sctp_udp_tunneling_port;
+
+ SCTP_INP_INFO_RLOCK();
+ old_sctp_udp_tunneling_port = SCTP_BASE_SYSCTL(sctp_udp_tunneling_port);
+ SCTP_INP_INFO_RUNLOCK();
+ error = sysctl_handle_int(oidp, oidp->oid_arg1, oidp->oid_arg2, req);
+ if (error == 0) {
+ RANGECHK(SCTP_BASE_SYSCTL(sctp_udp_tunneling_port), SCTPCTL_UDP_TUNNELING_PORT_MIN, SCTPCTL_UDP_TUNNELING_PORT_MAX);
+ if (old_sctp_udp_tunneling_port == SCTP_BASE_SYSCTL(sctp_udp_tunneling_port)) {
+ error = 0;
+ goto out;
+ }
+ SCTP_INP_INFO_WLOCK();
+ if (old_sctp_udp_tunneling_port) {
+ sctp_over_udp_stop();
+ }
+ if (SCTP_BASE_SYSCTL(sctp_udp_tunneling_port)) {
+ if (sctp_over_udp_start()) {
+ SCTP_BASE_SYSCTL(sctp_udp_tunneling_port) = 0;
+ }
+ }
+ SCTP_INP_INFO_WUNLOCK();
+ }
+out:
+ return (error);
+}
+
+
+static int
+sysctl_sctp_check(SYSCTL_HANDLER_ARGS)
+{
+ int error;
+
+ error = sysctl_handle_int(oidp, oidp->oid_arg1, oidp->oid_arg2, req);
+ if (error == 0) {
+ RANGECHK(SCTP_BASE_SYSCTL(sctp_sendspace), SCTPCTL_MAXDGRAM_MIN, SCTPCTL_MAXDGRAM_MAX);
+ RANGECHK(SCTP_BASE_SYSCTL(sctp_recvspace), SCTPCTL_RECVSPACE_MIN, SCTPCTL_RECVSPACE_MAX);
+ RANGECHK(SCTP_BASE_SYSCTL(sctp_auto_asconf), SCTPCTL_AUTOASCONF_MIN, SCTPCTL_AUTOASCONF_MAX);
+ RANGECHK(SCTP_BASE_SYSCTL(sctp_ecn_enable), SCTPCTL_ECN_ENABLE_MIN, SCTPCTL_ECN_ENABLE_MAX);
+ RANGECHK(SCTP_BASE_SYSCTL(sctp_ecn_nonce), SCTPCTL_ECN_NONCE_MIN, SCTPCTL_ECN_NONCE_MAX);
+ RANGECHK(SCTP_BASE_SYSCTL(sctp_strict_sacks), SCTPCTL_STRICT_SACKS_MIN, SCTPCTL_STRICT_SACKS_MAX);
+#if !defined(SCTP_WITH_NO_CSUM)
+ RANGECHK(SCTP_BASE_SYSCTL(sctp_no_csum_on_loopback), SCTPCTL_LOOPBACK_NOCSUM_MIN, SCTPCTL_LOOPBACK_NOCSUM_MAX);
+#endif
+ RANGECHK(SCTP_BASE_SYSCTL(sctp_strict_init), SCTPCTL_STRICT_INIT_MIN, SCTPCTL_STRICT_INIT_MAX);
+ RANGECHK(SCTP_BASE_SYSCTL(sctp_peer_chunk_oh), SCTPCTL_PEER_CHKOH_MIN, SCTPCTL_PEER_CHKOH_MAX);
+ RANGECHK(SCTP_BASE_SYSCTL(sctp_max_burst_default), SCTPCTL_MAXBURST_MIN, SCTPCTL_MAXBURST_MAX);
+ RANGECHK(SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue), SCTPCTL_MAXCHUNKS_MIN, SCTPCTL_MAXCHUNKS_MAX);
+ RANGECHK(SCTP_BASE_SYSCTL(sctp_hashtblsize), SCTPCTL_TCBHASHSIZE_MIN, SCTPCTL_TCBHASHSIZE_MAX);
+ RANGECHK(SCTP_BASE_SYSCTL(sctp_pcbtblsize), SCTPCTL_PCBHASHSIZE_MIN, SCTPCTL_PCBHASHSIZE_MAX);
+ RANGECHK(SCTP_BASE_SYSCTL(sctp_min_split_point), SCTPCTL_MIN_SPLIT_POINT_MIN, SCTPCTL_MIN_SPLIT_POINT_MAX);
+ RANGECHK(SCTP_BASE_SYSCTL(sctp_chunkscale), SCTPCTL_CHUNKSCALE_MIN, SCTPCTL_CHUNKSCALE_MAX);
+ RANGECHK(SCTP_BASE_SYSCTL(sctp_delayed_sack_time_default), SCTPCTL_DELAYED_SACK_TIME_MIN, SCTPCTL_DELAYED_SACK_TIME_MAX);
+ RANGECHK(SCTP_BASE_SYSCTL(sctp_sack_freq_default), SCTPCTL_SACK_FREQ_MIN, SCTPCTL_SACK_FREQ_MAX);
+ RANGECHK(SCTP_BASE_SYSCTL(sctp_system_free_resc_limit), SCTPCTL_SYS_RESOURCE_MIN, SCTPCTL_SYS_RESOURCE_MAX);
+ RANGECHK(SCTP_BASE_SYSCTL(sctp_asoc_free_resc_limit), SCTPCTL_ASOC_RESOURCE_MIN, SCTPCTL_ASOC_RESOURCE_MAX);
+ RANGECHK(SCTP_BASE_SYSCTL(sctp_heartbeat_interval_default), SCTPCTL_HEARTBEAT_INTERVAL_MIN, SCTPCTL_HEARTBEAT_INTERVAL_MAX);
+ RANGECHK(SCTP_BASE_SYSCTL(sctp_pmtu_raise_time_default), SCTPCTL_PMTU_RAISE_TIME_MIN, SCTPCTL_PMTU_RAISE_TIME_MAX);
+ RANGECHK(SCTP_BASE_SYSCTL(sctp_shutdown_guard_time_default), SCTPCTL_SHUTDOWN_GUARD_TIME_MIN, SCTPCTL_SHUTDOWN_GUARD_TIME_MAX);
+ RANGECHK(SCTP_BASE_SYSCTL(sctp_secret_lifetime_default), SCTPCTL_SECRET_LIFETIME_MIN, SCTPCTL_SECRET_LIFETIME_MAX);
+ RANGECHK(SCTP_BASE_SYSCTL(sctp_rto_max_default), SCTPCTL_RTO_MAX_MIN, SCTPCTL_RTO_MAX_MAX);
+ RANGECHK(SCTP_BASE_SYSCTL(sctp_rto_min_default), SCTPCTL_RTO_MIN_MIN, SCTPCTL_RTO_MIN_MAX);
+ RANGECHK(SCTP_BASE_SYSCTL(sctp_rto_initial_default), SCTPCTL_RTO_INITIAL_MIN, SCTPCTL_RTO_INITIAL_MAX);
+ RANGECHK(SCTP_BASE_SYSCTL(sctp_init_rto_max_default), SCTPCTL_INIT_RTO_MAX_MIN, SCTPCTL_INIT_RTO_MAX_MAX);
+ RANGECHK(SCTP_BASE_SYSCTL(sctp_valid_cookie_life_default), SCTPCTL_VALID_COOKIE_LIFE_MIN, SCTPCTL_VALID_COOKIE_LIFE_MAX);
+ RANGECHK(SCTP_BASE_SYSCTL(sctp_init_rtx_max_default), SCTPCTL_INIT_RTX_MAX_MIN, SCTPCTL_INIT_RTX_MAX_MAX);
+ RANGECHK(SCTP_BASE_SYSCTL(sctp_assoc_rtx_max_default), SCTPCTL_ASSOC_RTX_MAX_MIN, SCTPCTL_ASSOC_RTX_MAX_MAX);
+ RANGECHK(SCTP_BASE_SYSCTL(sctp_path_rtx_max_default), SCTPCTL_PATH_RTX_MAX_MIN, SCTPCTL_PATH_RTX_MAX_MAX);
+ RANGECHK(SCTP_BASE_SYSCTL(sctp_add_more_threshold), SCTPCTL_ADD_MORE_ON_OUTPUT_MIN, SCTPCTL_ADD_MORE_ON_OUTPUT_MAX);
+ RANGECHK(SCTP_BASE_SYSCTL(sctp_nr_outgoing_streams_default), SCTPCTL_OUTGOING_STREAMS_MIN, SCTPCTL_OUTGOING_STREAMS_MAX);
+ RANGECHK(SCTP_BASE_SYSCTL(sctp_cmt_on_off), SCTPCTL_CMT_ON_OFF_MIN, SCTPCTL_CMT_ON_OFF_MAX);
+ /* EY */
+ RANGECHK(SCTP_BASE_SYSCTL(sctp_nr_sack_on_off), SCTPCTL_NR_SACK_ON_OFF_MIN, SCTPCTL_NR_SACK_ON_OFF_MAX);
+ RANGECHK(SCTP_BASE_SYSCTL(sctp_cmt_use_dac), SCTPCTL_CMT_USE_DAC_MIN, SCTPCTL_CMT_USE_DAC_MAX);
+ RANGECHK(SCTP_BASE_SYSCTL(sctp_cmt_pf), SCTPCTL_CMT_PF_MIN, SCTPCTL_CMT_PF_MAX);
+ RANGECHK(SCTP_BASE_SYSCTL(sctp_use_cwnd_based_maxburst), SCTPCTL_CWND_MAXBURST_MIN, SCTPCTL_CWND_MAXBURST_MAX);
+ RANGECHK(SCTP_BASE_SYSCTL(sctp_early_fr), SCTPCTL_EARLY_FAST_RETRAN_MIN, SCTPCTL_EARLY_FAST_RETRAN_MAX);
+ RANGECHK(SCTP_BASE_SYSCTL(sctp_early_fr_msec), SCTPCTL_EARLY_FAST_RETRAN_MSEC_MIN, SCTPCTL_EARLY_FAST_RETRAN_MSEC_MAX);
+ RANGECHK(SCTP_BASE_SYSCTL(sctp_asconf_auth_nochk), SCTPCTL_ASCONF_AUTH_NOCHK_MIN, SCTPCTL_ASCONF_AUTH_NOCHK_MAX);
+ RANGECHK(SCTP_BASE_SYSCTL(sctp_auth_disable), SCTPCTL_AUTH_DISABLE_MIN, SCTPCTL_AUTH_DISABLE_MAX);
+ RANGECHK(SCTP_BASE_SYSCTL(sctp_nat_friendly), SCTPCTL_NAT_FRIENDLY_MIN, SCTPCTL_NAT_FRIENDLY_MAX);
+ RANGECHK(SCTP_BASE_SYSCTL(sctp_L2_abc_variable), SCTPCTL_ABC_L_VAR_MIN, SCTPCTL_ABC_L_VAR_MAX);
+ RANGECHK(SCTP_BASE_SYSCTL(sctp_mbuf_threshold_count), SCTPCTL_MAX_CHAINED_MBUFS_MIN, SCTPCTL_MAX_CHAINED_MBUFS_MAX);
+ RANGECHK(SCTP_BASE_SYSCTL(sctp_do_drain), SCTPCTL_DO_SCTP_DRAIN_MIN, SCTPCTL_DO_SCTP_DRAIN_MAX);
+ RANGECHK(SCTP_BASE_SYSCTL(sctp_hb_maxburst), SCTPCTL_HB_MAX_BURST_MIN, SCTPCTL_HB_MAX_BURST_MAX);
+ RANGECHK(SCTP_BASE_SYSCTL(sctp_abort_if_one_2_one_hits_limit), SCTPCTL_ABORT_AT_LIMIT_MIN, SCTPCTL_ABORT_AT_LIMIT_MAX);
+ RANGECHK(SCTP_BASE_SYSCTL(sctp_strict_data_order), SCTPCTL_STRICT_DATA_ORDER_MIN, SCTPCTL_STRICT_DATA_ORDER_MAX);
+ RANGECHK(SCTP_BASE_SYSCTL(sctp_min_residual), SCTPCTL_MIN_RESIDUAL_MIN, SCTPCTL_MIN_RESIDUAL_MAX);
+ RANGECHK(SCTP_BASE_SYSCTL(sctp_max_retran_chunk), SCTPCTL_MAX_RETRAN_CHUNK_MIN, SCTPCTL_MAX_RETRAN_CHUNK_MAX);
+ RANGECHK(SCTP_BASE_SYSCTL(sctp_logging_level), SCTPCTL_LOGGING_LEVEL_MIN, SCTPCTL_LOGGING_LEVEL_MAX);
+ RANGECHK(SCTP_BASE_SYSCTL(sctp_default_cc_module), SCTPCTL_DEFAULT_CC_MODULE_MIN, SCTPCTL_DEFAULT_CC_MODULE_MAX);
+ RANGECHK(SCTP_BASE_SYSCTL(sctp_default_frag_interleave), SCTPCTL_DEFAULT_FRAG_INTERLEAVE_MIN, SCTPCTL_DEFAULT_FRAG_INTERLEAVE_MAX);
+ RANGECHK(SCTP_BASE_SYSCTL(sctp_vtag_time_wait), SCTPCTL_TIME_WAIT_MIN, SCTPCTL_TIME_WAIT_MAX);
+ RANGECHK(SCTP_BASE_SYSCTL(sctp_buffer_splitting), SCTPCTL_BUFFER_SPLITTING_MIN, SCTPCTL_BUFFER_SPLITTING_MAX);
+ RANGECHK(SCTP_BASE_SYSCTL(sctp_initial_cwnd), SCTPCTL_INITIAL_CWND_MIN, SCTPCTL_INITIAL_CWND_MAX);
+ RANGECHK(SCTP_BASE_SYSCTL(sctp_mobility_base), SCTPCTL_MOBILITY_BASE_MIN, SCTPCTL_MOBILITY_BASE_MAX);
+ RANGECHK(SCTP_BASE_SYSCTL(sctp_mobility_fasthandoff), SCTPCTL_MOBILITY_FASTHANDOFF_MIN, SCTPCTL_MOBILITY_FASTHANDOFF_MAX);
+ RANGECHK(SCTP_BASE_SYSCTL(sctp_udp_tunneling_for_client_enable), SCTPCTL_UDP_TUNNELING_FOR_CLIENT_ENABLE_MIN, SCTPCTL_UDP_TUNNELING_FOR_CLIENT_ENABLE_MAX);
+ RANGECHK(SCTP_BASE_SYSCTL(sctp_enable_sack_immediately), SCTPCTL_SACK_IMMEDIATELY_ENABLE_MIN, SCTPCTL_SACK_IMMEDIATELY_ENABLE_MAX);
+ RANGECHK(SCTP_BASE_SYSCTL(sctp_inits_include_nat_friendly), SCTPCTL_NAT_FRIENDLY_INITS_MIN, SCTPCTL_NAT_FRIENDLY_INITS_MAX);
+
+#ifdef SCTP_DEBUG
+ RANGECHK(SCTP_BASE_SYSCTL(sctp_debug_on), SCTPCTL_DEBUG_MIN, SCTPCTL_DEBUG_MAX);
+#endif
+#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
+ RANGECHK(SCTP_BASE_SYSCTL(sctp_output_unlocked), SCTPCTL_OUTPUT_UNLOCKED_MIN, SCTPCTL_OUTPUT_UNLOCKED_MAX);
+#endif
+ }
+ return (error);
+}
+
+#if defined(__FreeBSD__) && defined(SMP) && defined(SCTP_USE_PERCPU_STAT)
+static int
+sysctl_stat_get(SYSCTL_HANDLER_ARGS)
+{
+ int cpu, error;
+ struct sctpstat sb, *sarry;
+
+ memset(&sb, 0, sizeof(sb));
+ for (cpu = 0; cpu < mp_ncpus; cpu++) {
+ sarry = &SCTP_BASE_STATS[cpu];
+ if (sarry->sctps_discontinuitytime.tv_sec > sb.sctps_discontinuitytime.tv_sec) {
+ sb.sctps_discontinuitytime.tv_sec = sarry->sctps_discontinuitytime.tv_sec;
+ sb.sctps_discontinuitytime.tv_usec = sarry->sctps_discontinuitytime.tv_usec;
+ }
+ sb.sctps_currestab += sarry->sctps_currestab;
+ sb.sctps_activeestab += sarry->sctps_activeestab;
+ sb.sctps_restartestab += sarry->sctps_restartestab;
+ sb.sctps_collisionestab += sarry->sctps_collisionestab;
+ sb.sctps_passiveestab += sarry->sctps_passiveestab;
+ sb.sctps_aborted += sarry->sctps_aborted;
+ sb.sctps_shutdown += sarry->sctps_shutdown;
+ sb.sctps_outoftheblue += sarry->sctps_outoftheblue;
+ sb.sctps_checksumerrors += sarry->sctps_checksumerrors;
+ sb.sctps_outcontrolchunks += sarry->sctps_outcontrolchunks;
+ sb.sctps_outorderchunks += sarry->sctps_outorderchunks;
+ sb.sctps_outunorderchunks += sarry->sctps_outunorderchunks;
+ sb.sctps_incontrolchunks += sarry->sctps_incontrolchunks;
+ sb.sctps_inorderchunks += sarry->sctps_inorderchunks;
+ sb.sctps_inunorderchunks += sarry->sctps_inunorderchunks;
+ sb.sctps_fragusrmsgs += sarry->sctps_fragusrmsgs;
+ sb.sctps_reasmusrmsgs += sarry->sctps_reasmusrmsgs;
+ sb.sctps_outpackets += sarry->sctps_outpackets;
+ sb.sctps_inpackets += sarry->sctps_inpackets;
+ sb.sctps_recvpackets += sarry->sctps_recvpackets;
+ sb.sctps_recvdatagrams += sarry->sctps_recvdatagrams;
+ sb.sctps_recvpktwithdata += sarry->sctps_recvpktwithdata;
+ sb.sctps_recvsacks += sarry->sctps_recvsacks;
+ sb.sctps_recvdata += sarry->sctps_recvdata;
+ sb.sctps_recvdupdata += sarry->sctps_recvdupdata;
+ sb.sctps_recvheartbeat += sarry->sctps_recvheartbeat;
+ sb.sctps_recvheartbeatack += sarry->sctps_recvheartbeatack;
+ sb.sctps_recvecne += sarry->sctps_recvecne;
+ sb.sctps_recvauth += sarry->sctps_recvauth;
+ sb.sctps_recvauthmissing += sarry->sctps_recvauthmissing;
+ sb.sctps_recvivalhmacid += sarry->sctps_recvivalhmacid;
+ sb.sctps_recvivalkeyid += sarry->sctps_recvivalkeyid;
+ sb.sctps_recvauthfailed += sarry->sctps_recvauthfailed;
+ sb.sctps_recvexpress += sarry->sctps_recvexpress;
+ sb.sctps_recvexpressm += sarry->sctps_recvexpressm;
+ sb.sctps_recvnocrc += sarry->sctps_recvnocrc;
+ sb.sctps_recvswcrc += sarry->sctps_recvswcrc;
+ sb.sctps_recvhwcrc += sarry->sctps_recvhwcrc;
+ sb.sctps_sendpackets += sarry->sctps_sendpackets;
+ sb.sctps_sendsacks += sarry->sctps_sendsacks;
+ sb.sctps_senddata += sarry->sctps_senddata;
+ sb.sctps_sendretransdata += sarry->sctps_sendretransdata;
+ sb.sctps_sendfastretrans += sarry->sctps_sendfastretrans;
+ sb.sctps_sendmultfastretrans += sarry->sctps_sendmultfastretrans;
+ sb.sctps_sendheartbeat += sarry->sctps_sendheartbeat;
+ sb.sctps_sendecne += sarry->sctps_sendecne;
+ sb.sctps_sendauth += sarry->sctps_sendauth;
+ sb.sctps_senderrors += sarry->sctps_senderrors;
+ sb.sctps_sendnocrc += sarry->sctps_sendnocrc;
+ sb.sctps_sendswcrc += sarry->sctps_sendswcrc;
+ sb.sctps_sendhwcrc += sarry->sctps_sendhwcrc;
+ sb.sctps_pdrpfmbox += sarry->sctps_pdrpfmbox;
+ sb.sctps_pdrpfehos += sarry->sctps_pdrpfehos;
+ sb.sctps_pdrpmbda += sarry->sctps_pdrpmbda;
+ sb.sctps_pdrpmbct += sarry->sctps_pdrpmbct;
+ sb.sctps_pdrpbwrpt += sarry->sctps_pdrpbwrpt;
+ sb.sctps_pdrpcrupt += sarry->sctps_pdrpcrupt;
+ sb.sctps_pdrpnedat += sarry->sctps_pdrpnedat;
+ sb.sctps_pdrppdbrk += sarry->sctps_pdrppdbrk;
+ sb.sctps_pdrptsnnf += sarry->sctps_pdrptsnnf;
+ sb.sctps_pdrpdnfnd += sarry->sctps_pdrpdnfnd;
+ sb.sctps_pdrpdiwnp += sarry->sctps_pdrpdiwnp;
+ sb.sctps_pdrpdizrw += sarry->sctps_pdrpdizrw;
+ sb.sctps_pdrpbadd += sarry->sctps_pdrpbadd;
+ sb.sctps_pdrpmark += sarry->sctps_pdrpmark;
+ sb.sctps_timoiterator += sarry->sctps_timoiterator;
+ sb.sctps_timodata += sarry->sctps_timodata;
+ sb.sctps_timowindowprobe += sarry->sctps_timowindowprobe;
+ sb.sctps_timoinit += sarry->sctps_timoinit;
+ sb.sctps_timosack += sarry->sctps_timosack;
+ sb.sctps_timoshutdown += sarry->sctps_timoshutdown;
+ sb.sctps_timoheartbeat += sarry->sctps_timoheartbeat;
+ sb.sctps_timocookie += sarry->sctps_timocookie;
+ sb.sctps_timosecret += sarry->sctps_timosecret;
+ sb.sctps_timopathmtu += sarry->sctps_timopathmtu;
+ sb.sctps_timoshutdownack += sarry->sctps_timoshutdownack;
+ sb.sctps_timoshutdownguard += sarry->sctps_timoshutdownguard;
+ sb.sctps_timostrmrst += sarry->sctps_timostrmrst;
+ sb.sctps_timoearlyfr += sarry->sctps_timoearlyfr;
+ sb.sctps_timoasconf += sarry->sctps_timoasconf;
+ sb.sctps_timodelprim += sarry->sctps_timodelprim;
+ sb.sctps_timoautoclose += sarry->sctps_timoautoclose;
+ sb.sctps_timoassockill += sarry->sctps_timoassockill;
+ sb.sctps_timoinpkill += sarry->sctps_timoinpkill;
+ sb.sctps_earlyfrstart += sarry->sctps_earlyfrstart;
+ sb.sctps_earlyfrstop += sarry->sctps_earlyfrstop;
+ sb.sctps_earlyfrmrkretrans += sarry->sctps_earlyfrmrkretrans;
+ sb.sctps_earlyfrstpout += sarry->sctps_earlyfrstpout;
+ sb.sctps_earlyfrstpidsck1 += sarry->sctps_earlyfrstpidsck1;
+ sb.sctps_earlyfrstpidsck2 += sarry->sctps_earlyfrstpidsck2;
+ sb.sctps_earlyfrstpidsck3 += sarry->sctps_earlyfrstpidsck3;
+ sb.sctps_earlyfrstpidsck4 += sarry->sctps_earlyfrstpidsck4;
+ sb.sctps_earlyfrstrid += sarry->sctps_earlyfrstrid;
+ sb.sctps_earlyfrstrout += sarry->sctps_earlyfrstrout;
+ sb.sctps_earlyfrstrtmr += sarry->sctps_earlyfrstrtmr;
+ sb.sctps_hdrops += sarry->sctps_hdrops;
+ sb.sctps_badsum += sarry->sctps_badsum;
+ sb.sctps_noport += sarry->sctps_noport;
+ sb.sctps_badvtag += sarry->sctps_badvtag;
+ sb.sctps_badsid += sarry->sctps_badsid;
+ sb.sctps_nomem += sarry->sctps_nomem;
+ sb.sctps_fastretransinrtt += sarry->sctps_fastretransinrtt;
+ sb.sctps_markedretrans += sarry->sctps_markedretrans;
+ sb.sctps_naglesent += sarry->sctps_naglesent;
+ sb.sctps_naglequeued += sarry->sctps_naglequeued;
+ sb.sctps_maxburstqueued += sarry->sctps_maxburstqueued;
+ sb.sctps_ifnomemqueued += sarry->sctps_ifnomemqueued;
+ sb.sctps_windowprobed += sarry->sctps_windowprobed;
+ sb.sctps_lowlevelerr += sarry->sctps_lowlevelerr;
+ sb.sctps_lowlevelerrusr += sarry->sctps_lowlevelerrusr;
+ sb.sctps_datadropchklmt += sarry->sctps_datadropchklmt;
+ sb.sctps_datadroprwnd += sarry->sctps_datadroprwnd;
+ sb.sctps_ecnereducedcwnd += sarry->sctps_ecnereducedcwnd;
+ sb.sctps_vtagexpress += sarry->sctps_vtagexpress;
+ sb.sctps_vtagbogus += sarry->sctps_vtagbogus;
+ sb.sctps_primary_randry += sarry->sctps_primary_randry;
+ sb.sctps_cmt_randry += sarry->sctps_cmt_randry;
+ sb.sctps_slowpath_sack += sarry->sctps_slowpath_sack;
+ sb.sctps_wu_sacks_sent += sarry->sctps_wu_sacks_sent;
+ sb.sctps_sends_with_flags += sarry->sctps_sends_with_flags;
+ sb.sctps_sends_with_unord += sarry->sctps_sends_with_unord;
+ sb.sctps_sends_with_eof += sarry->sctps_sends_with_eof;
+ sb.sctps_sends_with_abort += sarry->sctps_sends_with_abort;
+ sb.sctps_protocol_drain_calls += sarry->sctps_protocol_drain_calls;
+ sb.sctps_protocol_drains_done += sarry->sctps_protocol_drains_done;
+ sb.sctps_read_peeks += sarry->sctps_read_peeks;
+ sb.sctps_cached_chk += sarry->sctps_cached_chk;
+ sb.sctps_cached_strmoq += sarry->sctps_cached_strmoq;
+ sb.sctps_left_abandon += sarry->sctps_left_abandon;
+ sb.sctps_send_burst_avoid += sarry->sctps_send_burst_avoid;
+ sb.sctps_send_cwnd_avoid += sarry->sctps_send_cwnd_avoid;
+ sb.sctps_fwdtsn_map_over += sarry->sctps_fwdtsn_map_over;
+ }
+ error = SYSCTL_OUT(req, &sb, sizeof(sb));
+ return (error);
+}
+
+#endif
+
+#if defined(SCTP_LOCAL_TRACE_BUF)
+static int
+sysctl_sctp_cleartrace(SYSCTL_HANDLER_ARGS)
+{
+ int error = 0;
+
+ memset(&SCTP_BASE_SYSCTL(sctp_log), 0, sizeof(struct sctp_log));
+ return (error);
+}
+
+#endif
+
+
+/*
+ * sysctl definitions
+ */
+
+SYSCTL_PROC(_net_inet_sctp, OID_AUTO, sendspace, CTLTYPE_INT | CTLFLAG_RW,
+ &SCTP_BASE_SYSCTL(sctp_sendspace), 0, sysctl_sctp_check, "IU",
+ SCTPCTL_MAXDGRAM_DESC);
+
+SYSCTL_PROC(_net_inet_sctp, OID_AUTO, recvspace, CTLTYPE_INT | CTLFLAG_RW,
+ &SCTP_BASE_SYSCTL(sctp_recvspace), 0, sysctl_sctp_check, "IU",
+ SCTPCTL_RECVSPACE_DESC);
+
+SYSCTL_PROC(_net_inet_sctp, OID_AUTO, auto_asconf, CTLTYPE_INT | CTLFLAG_RW,
+ &SCTP_BASE_SYSCTL(sctp_auto_asconf), 0, sysctl_sctp_check, "IU",
+ SCTPCTL_AUTOASCONF_DESC);
+
+SYSCTL_PROC(_net_inet_sctp, OID_AUTO, ecn_enable, CTLTYPE_INT | CTLFLAG_RW,
+ &SCTP_BASE_SYSCTL(sctp_ecn_enable), 0, sysctl_sctp_check, "IU",
+ SCTPCTL_ECN_ENABLE_DESC);
+
+SYSCTL_PROC(_net_inet_sctp, OID_AUTO, ecn_nonce, CTLTYPE_INT | CTLFLAG_RW,
+ &SCTP_BASE_SYSCTL(sctp_ecn_nonce), 0, sysctl_sctp_check, "IU",
+ SCTPCTL_ECN_NONCE_DESC);
+
+SYSCTL_PROC(_net_inet_sctp, OID_AUTO, strict_sacks, CTLTYPE_INT | CTLFLAG_RW,
+ &SCTP_BASE_SYSCTL(sctp_strict_sacks), 0, sysctl_sctp_check, "IU",
+ SCTPCTL_STRICT_SACKS_DESC);
+
+#if !defined(SCTP_WITH_NO_CSUM)
+SYSCTL_PROC(_net_inet_sctp, OID_AUTO, loopback_nocsum, CTLTYPE_INT | CTLFLAG_RW,
+ &SCTP_BASE_SYSCTL(sctp_no_csum_on_loopback), 0, sysctl_sctp_check, "IU",
+ SCTPCTL_LOOPBACK_NOCSUM_DESC);
+#endif
+
+SYSCTL_PROC(_net_inet_sctp, OID_AUTO, strict_init, CTLTYPE_INT | CTLFLAG_RW,
+ &SCTP_BASE_SYSCTL(sctp_strict_init), 0, sysctl_sctp_check, "IU",
+ SCTPCTL_STRICT_INIT_DESC);
+
+SYSCTL_PROC(_net_inet_sctp, OID_AUTO, peer_chkoh, CTLTYPE_INT | CTLFLAG_RW,
+ &SCTP_BASE_SYSCTL(sctp_peer_chunk_oh), 0, sysctl_sctp_check, "IU",
+ SCTPCTL_PEER_CHKOH_DESC);
+
+SYSCTL_PROC(_net_inet_sctp, OID_AUTO, maxburst, CTLTYPE_INT | CTLFLAG_RW,
+ &SCTP_BASE_SYSCTL(sctp_max_burst_default), 0, sysctl_sctp_check, "IU",
+ SCTPCTL_MAXBURST_DESC);
+
+SYSCTL_PROC(_net_inet_sctp, OID_AUTO, maxchunks, CTLTYPE_INT | CTLFLAG_RW,
+ &SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue), 0, sysctl_sctp_check, "IU",
+ SCTPCTL_MAXCHUNKS_DESC);
+
+SYSCTL_PROC(_net_inet_sctp, OID_AUTO, tcbhashsize, CTLTYPE_INT | CTLFLAG_RW,
+ &SCTP_BASE_SYSCTL(sctp_hashtblsize), 0, sysctl_sctp_check, "IU",
+ SCTPCTL_TCBHASHSIZE_DESC);
+
+SYSCTL_PROC(_net_inet_sctp, OID_AUTO, pcbhashsize, CTLTYPE_INT | CTLFLAG_RW,
+ &SCTP_BASE_SYSCTL(sctp_pcbtblsize), 0, sysctl_sctp_check, "IU",
+ SCTPCTL_PCBHASHSIZE_DESC);
+
+SYSCTL_PROC(_net_inet_sctp, OID_AUTO, min_split_point, CTLTYPE_INT | CTLFLAG_RW,
+ &SCTP_BASE_SYSCTL(sctp_min_split_point), 0, sysctl_sctp_check, "IU",
+ SCTPCTL_MIN_SPLIT_POINT_DESC);
+
+SYSCTL_PROC(_net_inet_sctp, OID_AUTO, chunkscale, CTLTYPE_INT | CTLFLAG_RW,
+ &SCTP_BASE_SYSCTL(sctp_chunkscale), 0, sysctl_sctp_check, "IU",
+ SCTPCTL_CHUNKSCALE_DESC);
+
+SYSCTL_PROC(_net_inet_sctp, OID_AUTO, delayed_sack_time, CTLTYPE_INT | CTLFLAG_RW,
+ &SCTP_BASE_SYSCTL(sctp_delayed_sack_time_default), 0, sysctl_sctp_check, "IU",
+ SCTPCTL_DELAYED_SACK_TIME_DESC);
+
+SYSCTL_PROC(_net_inet_sctp, OID_AUTO, sack_freq, CTLTYPE_INT | CTLFLAG_RW,
+ &SCTP_BASE_SYSCTL(sctp_sack_freq_default), 0, sysctl_sctp_check, "IU",
+ SCTPCTL_SACK_FREQ_DESC);
+
+SYSCTL_PROC(_net_inet_sctp, OID_AUTO, sys_resource, CTLTYPE_INT | CTLFLAG_RW,
+ &SCTP_BASE_SYSCTL(sctp_system_free_resc_limit), 0, sysctl_sctp_check, "IU",
+ SCTPCTL_SYS_RESOURCE_DESC);
+
+SYSCTL_PROC(_net_inet_sctp, OID_AUTO, asoc_resource, CTLTYPE_INT | CTLFLAG_RW,
+ &SCTP_BASE_SYSCTL(sctp_asoc_free_resc_limit), 0, sysctl_sctp_check, "IU",
+ SCTPCTL_ASOC_RESOURCE_DESC);
+
+SYSCTL_PROC(_net_inet_sctp, OID_AUTO, heartbeat_interval, CTLTYPE_INT | CTLFLAG_RW,
+ &SCTP_BASE_SYSCTL(sctp_heartbeat_interval_default), 0, sysctl_sctp_check, "IU",
+ SCTPCTL_HEARTBEAT_INTERVAL_DESC);
+
+SYSCTL_PROC(_net_inet_sctp, OID_AUTO, pmtu_raise_time, CTLTYPE_INT | CTLFLAG_RW,
+ &SCTP_BASE_SYSCTL(sctp_pmtu_raise_time_default), 0, sysctl_sctp_check, "IU",
+ SCTPCTL_PMTU_RAISE_TIME_DESC);
+
+SYSCTL_PROC(_net_inet_sctp, OID_AUTO, shutdown_guard_time, CTLTYPE_INT | CTLFLAG_RW,
+ &SCTP_BASE_SYSCTL(sctp_shutdown_guard_time_default), 0, sysctl_sctp_check, "IU",
+ SCTPCTL_SHUTDOWN_GUARD_TIME_DESC);
+
+SYSCTL_PROC(_net_inet_sctp, OID_AUTO, secret_lifetime, CTLTYPE_INT | CTLFLAG_RW,
+ &SCTP_BASE_SYSCTL(sctp_secret_lifetime_default), 0, sysctl_sctp_check, "IU",
+ SCTPCTL_SECRET_LIFETIME_DESC);
+
+SYSCTL_PROC(_net_inet_sctp, OID_AUTO, rto_max, CTLTYPE_INT | CTLFLAG_RW,
+ &SCTP_BASE_SYSCTL(sctp_rto_max_default), 0, sysctl_sctp_check, "IU",
+ SCTPCTL_RTO_MAX_DESC);
+
+SYSCTL_PROC(_net_inet_sctp, OID_AUTO, rto_min, CTLTYPE_INT | CTLFLAG_RW,
+ &SCTP_BASE_SYSCTL(sctp_rto_min_default), 0, sysctl_sctp_check, "IU",
+ SCTPCTL_RTO_MIN_DESC);
+
+SYSCTL_PROC(_net_inet_sctp, OID_AUTO, rto_initial, CTLTYPE_INT | CTLFLAG_RW,
+ &SCTP_BASE_SYSCTL(sctp_rto_initial_default), 0, sysctl_sctp_check, "IU",
+ SCTPCTL_RTO_INITIAL_DESC);
+
+SYSCTL_PROC(_net_inet_sctp, OID_AUTO, init_rto_max, CTLTYPE_INT | CTLFLAG_RW,
+ &SCTP_BASE_SYSCTL(sctp_init_rto_max_default), 0, sysctl_sctp_check, "IU",
+ SCTPCTL_INIT_RTO_MAX_DESC);
+
+SYSCTL_PROC(_net_inet_sctp, OID_AUTO, valid_cookie_life, CTLTYPE_INT | CTLFLAG_RW,
+ &SCTP_BASE_SYSCTL(sctp_valid_cookie_life_default), 0, sysctl_sctp_check, "IU",
+ SCTPCTL_VALID_COOKIE_LIFE_DESC);
+
+SYSCTL_PROC(_net_inet_sctp, OID_AUTO, init_rtx_max, CTLTYPE_INT | CTLFLAG_RW,
+ &SCTP_BASE_SYSCTL(sctp_init_rtx_max_default), 0, sysctl_sctp_check, "IU",
+ SCTPCTL_INIT_RTX_MAX_DESC);
+
+SYSCTL_PROC(_net_inet_sctp, OID_AUTO, assoc_rtx_max, CTLTYPE_INT | CTLFLAG_RW,
+ &SCTP_BASE_SYSCTL(sctp_assoc_rtx_max_default), 0, sysctl_sctp_check, "IU",
+ SCTPCTL_ASSOC_RTX_MAX_DESC);
+
+SYSCTL_PROC(_net_inet_sctp, OID_AUTO, path_rtx_max, CTLTYPE_INT | CTLFLAG_RW,
+ &SCTP_BASE_SYSCTL(sctp_path_rtx_max_default), 0, sysctl_sctp_check, "IU",
+ SCTPCTL_PATH_RTX_MAX_DESC);
+
+SYSCTL_PROC(_net_inet_sctp, OID_AUTO, add_more_on_output, CTLTYPE_INT | CTLFLAG_RW,
+ &SCTP_BASE_SYSCTL(sctp_add_more_threshold), 0, sysctl_sctp_check, "IU",
+ SCTPCTL_ADD_MORE_ON_OUTPUT_DESC);
+
+SYSCTL_PROC(_net_inet_sctp, OID_AUTO, outgoing_streams, CTLTYPE_INT | CTLFLAG_RW,
+ &SCTP_BASE_SYSCTL(sctp_nr_outgoing_streams_default), 0, sysctl_sctp_check, "IU",
+ SCTPCTL_OUTGOING_STREAMS_DESC);
+
+SYSCTL_PROC(_net_inet_sctp, OID_AUTO, cmt_on_off, CTLTYPE_INT | CTLFLAG_RW,
+ &SCTP_BASE_SYSCTL(sctp_cmt_on_off), 0, sysctl_sctp_check, "IU",
+ SCTPCTL_CMT_ON_OFF_DESC);
+
+/* EY */
+SYSCTL_PROC(_net_inet_sctp, OID_AUTO, nr_sack_on_off, CTLTYPE_INT | CTLFLAG_RW,
+ &SCTP_BASE_SYSCTL(sctp_nr_sack_on_off), 0, sysctl_sctp_check, "IU",
+ SCTPCTL_NR_SACK_ON_OFF_DESC);
+
+SYSCTL_PROC(_net_inet_sctp, OID_AUTO, cmt_use_dac, CTLTYPE_INT | CTLFLAG_RW,
+ &SCTP_BASE_SYSCTL(sctp_cmt_use_dac), 0, sysctl_sctp_check, "IU",
+ SCTPCTL_CMT_USE_DAC_DESC);
+
+SYSCTL_PROC(_net_inet_sctp, OID_AUTO, cmt_pf, CTLTYPE_INT | CTLFLAG_RW,
+ &SCTP_BASE_SYSCTL(sctp_cmt_pf), 0, sysctl_sctp_check, "IU",
+ SCTPCTL_CMT_PF_DESC);
+
+SYSCTL_PROC(_net_inet_sctp, OID_AUTO, cwnd_maxburst, CTLTYPE_INT | CTLFLAG_RW,
+ &SCTP_BASE_SYSCTL(sctp_use_cwnd_based_maxburst), 0, sysctl_sctp_check, "IU",
+ SCTPCTL_CWND_MAXBURST_DESC);
+
+SYSCTL_PROC(_net_inet_sctp, OID_AUTO, early_fast_retran, CTLTYPE_INT | CTLFLAG_RW,
+ &SCTP_BASE_SYSCTL(sctp_early_fr), 0, sysctl_sctp_check, "IU",
+ SCTPCTL_EARLY_FAST_RETRAN_DESC);
+
+SYSCTL_PROC(_net_inet_sctp, OID_AUTO, early_fast_retran_msec, CTLTYPE_INT | CTLFLAG_RW,
+ &SCTP_BASE_SYSCTL(sctp_early_fr_msec), 0, sysctl_sctp_check, "IU",
+ SCTPCTL_EARLY_FAST_RETRAN_MSEC_DESC);
+
+SYSCTL_PROC(_net_inet_sctp, OID_AUTO, asconf_auth_nochk, CTLTYPE_INT | CTLFLAG_RW,
+ &SCTP_BASE_SYSCTL(sctp_asconf_auth_nochk), 0, sysctl_sctp_check, "IU",
+ SCTPCTL_ASCONF_AUTH_NOCHK_DESC);
+
+SYSCTL_PROC(_net_inet_sctp, OID_AUTO, auth_disable, CTLTYPE_INT | CTLFLAG_RW,
+ &SCTP_BASE_SYSCTL(sctp_auth_disable), 0, sysctl_sctp_check, "IU",
+ SCTPCTL_AUTH_DISABLE_DESC);
+
+SYSCTL_PROC(_net_inet_sctp, OID_AUTO, nat_friendly, CTLTYPE_INT | CTLFLAG_RW,
+ &SCTP_BASE_SYSCTL(sctp_nat_friendly), 0, sysctl_sctp_check, "IU",
+ SCTPCTL_NAT_FRIENDLY_DESC);
+
+SYSCTL_PROC(_net_inet_sctp, OID_AUTO, abc_l_var, CTLTYPE_INT | CTLFLAG_RW,
+ &SCTP_BASE_SYSCTL(sctp_L2_abc_variable), 0, sysctl_sctp_check, "IU",
+ SCTPCTL_ABC_L_VAR_DESC);
+
+SYSCTL_PROC(_net_inet_sctp, OID_AUTO, max_chained_mbufs, CTLTYPE_INT | CTLFLAG_RW,
+ &SCTP_BASE_SYSCTL(sctp_mbuf_threshold_count), 0, sysctl_sctp_check, "IU",
+ SCTPCTL_MAX_CHAINED_MBUFS_DESC);
+
+SYSCTL_PROC(_net_inet_sctp, OID_AUTO, do_sctp_drain, CTLTYPE_INT | CTLFLAG_RW,
+ &SCTP_BASE_SYSCTL(sctp_do_drain), 0, sysctl_sctp_check, "IU",
+ SCTPCTL_DO_SCTP_DRAIN_DESC);
+
+SYSCTL_PROC(_net_inet_sctp, OID_AUTO, hb_max_burst, CTLTYPE_INT | CTLFLAG_RW,
+ &SCTP_BASE_SYSCTL(sctp_hb_maxburst), 0, sysctl_sctp_check, "IU",
+ SCTPCTL_HB_MAX_BURST_DESC);
+
+SYSCTL_PROC(_net_inet_sctp, OID_AUTO, abort_at_limit, CTLTYPE_INT | CTLFLAG_RW,
+ &SCTP_BASE_SYSCTL(sctp_abort_if_one_2_one_hits_limit), 0, sysctl_sctp_check, "IU",
+ SCTPCTL_ABORT_AT_LIMIT_DESC);
+
+SYSCTL_PROC(_net_inet_sctp, OID_AUTO, strict_data_order, CTLTYPE_INT | CTLFLAG_RW,
+ &SCTP_BASE_SYSCTL(sctp_strict_data_order), 0, sysctl_sctp_check, "IU",
+ SCTPCTL_STRICT_DATA_ORDER_DESC);
+
+SYSCTL_PROC(_net_inet_sctp, OID_AUTO, min_residual, CTLTYPE_INT | CTLFLAG_RW,
+ &SCTP_BASE_SYSCTL(sctp_min_residual), 0, sysctl_sctp_check, "IU",
+ SCTPCTL_MIN_RESIDUAL_DESC);
+
+SYSCTL_PROC(_net_inet_sctp, OID_AUTO, max_retran_chunk, CTLTYPE_INT | CTLFLAG_RW,
+ &SCTP_BASE_SYSCTL(sctp_max_retran_chunk), 0, sysctl_sctp_check, "IU",
+ SCTPCTL_MAX_RETRAN_CHUNK_DESC);
+
+SYSCTL_PROC(_net_inet_sctp, OID_AUTO, log_level, CTLTYPE_INT | CTLFLAG_RW,
+ &SCTP_BASE_SYSCTL(sctp_logging_level), 0, sysctl_sctp_check, "IU",
+ SCTPCTL_LOGGING_LEVEL_DESC);
+
+SYSCTL_PROC(_net_inet_sctp, OID_AUTO, default_cc_module, CTLTYPE_INT | CTLFLAG_RW,
+ &SCTP_BASE_SYSCTL(sctp_default_cc_module), 0, sysctl_sctp_check, "IU",
+ SCTPCTL_DEFAULT_CC_MODULE_DESC);
+
+SYSCTL_PROC(_net_inet_sctp, OID_AUTO, default_frag_interleave, CTLTYPE_INT | CTLFLAG_RW,
+ &SCTP_BASE_SYSCTL(sctp_default_frag_interleave), 0, sysctl_sctp_check, "IU",
+ SCTPCTL_DEFAULT_FRAG_INTERLEAVE_DESC);
+
+SYSCTL_PROC(_net_inet_sctp, OID_AUTO, mobility_base, CTLTYPE_INT | CTLFLAG_RW,
+ &SCTP_BASE_SYSCTL(sctp_mobility_base), 0, sysctl_sctp_check, "IU",
+ SCTPCTL_MOBILITY_BASE_DESC);
+
+SYSCTL_PROC(_net_inet_sctp, OID_AUTO, mobility_fasthandoff, CTLTYPE_INT | CTLFLAG_RW,
+ &SCTP_BASE_SYSCTL(sctp_mobility_fasthandoff), 0, sysctl_sctp_check, "IU",
+ SCTPCTL_MOBILITY_FASTHANDOFF_DESC);
+
+#if defined(SCTP_LOCAL_TRACE_BUF)
+SYSCTL_STRUCT(_net_inet_sctp, OID_AUTO, log, CTLFLAG_RD,
+ &SCTP_BASE_SYSCTL(sctp_log), sctp_log,
+ "SCTP logging (struct sctp_log)");
+
+SYSCTL_PROC(_net_inet_sctp, OID_AUTO, clear_trace, CTLTYPE_OPAQUE | CTLFLAG_RW,
+ &SCTP_BASE_SYSCTL(sctp_log), 0, sysctl_sctp_cleartrace, "IU",
+ "Clear SCTP Logging buffer");
+
+
+
+#endif
+
+SYSCTL_PROC(_net_inet_sctp, OID_AUTO, udp_tunneling_for_client_enable, CTLTYPE_INT | CTLFLAG_RW,
+ &SCTP_BASE_SYSCTL(sctp_udp_tunneling_for_client_enable), 0, sysctl_sctp_check, "IU",
+ SCTPCTL_UDP_TUNNELING_FOR_CLIENT_ENABLE_DESC);
+
+SYSCTL_PROC(_net_inet_sctp, OID_AUTO, udp_tunneling_port, CTLTYPE_INT | CTLFLAG_RW,
+ &SCTP_BASE_SYSCTL(sctp_udp_tunneling_port), 0, sysctl_sctp_udp_tunneling_check, "IU",
+ SCTPCTL_UDP_TUNNELING_PORT_DESC);
+
+SYSCTL_PROC(_net_inet_sctp, OID_AUTO, enable_sack_immediately, CTLTYPE_INT | CTLFLAG_RW,
+ &SCTP_BASE_SYSCTL(sctp_enable_sack_immediately), 0, sysctl_sctp_check, "IU",
+ SCTPCTL_SACK_IMMEDIATELY_ENABLE_DESC);
+
+SYSCTL_PROC(_net_inet_sctp, OID_AUTO, nat_friendly_init, CTLTYPE_INT | CTLFLAG_RW,
+ &SCTP_BASE_SYSCTL(sctp_inits_include_nat_friendly), 0, sysctl_sctp_check, "IU",
+ SCTPCTL_NAT_FRIENDLY_INITS_DESC);
+
+SYSCTL_PROC(_net_inet_sctp, OID_AUTO, vtag_time_wait, CTLTYPE_INT | CTLFLAG_RW,
+ &SCTP_BASE_SYSCTL(sctp_vtag_time_wait), 0, sysctl_sctp_check, "IU",
+ SCTPCTL_TIME_WAIT_DESC);
+
+SYSCTL_PROC(_net_inet_sctp, OID_AUTO, buffer_splitting, CTLTYPE_INT | CTLFLAG_RW,
+ &SCTP_BASE_SYSCTL(sctp_buffer_splitting), 0, sysctl_sctp_check, "IU",
+ SCTPCTL_BUFFER_SPLITTING_DESC);
+
+SYSCTL_PROC(_net_inet_sctp, OID_AUTO, initial_cwnd, CTLTYPE_INT | CTLFLAG_RW,
+ &SCTP_BASE_SYSCTL(sctp_initial_cwnd), 0, sysctl_sctp_check, "IU",
+ SCTPCTL_INITIAL_CWND_DESC);
+
+#ifdef SCTP_DEBUG
+SYSCTL_PROC(_net_inet_sctp, OID_AUTO, debug, CTLTYPE_INT | CTLFLAG_RW,
+ &SCTP_BASE_SYSCTL(sctp_debug_on), 0, sysctl_sctp_check, "IU",
+ SCTPCTL_DEBUG_DESC);
+#endif /* SCTP_DEBUG */
+
+
+#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
+SYSCTL_PROC(_net_inet_sctp, OID_AUTO, output_unlocked, CTLTYPE_INT | CTLFLAG_RW,
+ &SCTP_BASE_SYSCTL(sctp_output_unlocked), 0, sysctl_sctp_check, "IU",
+ SCTPCTL_OUTPUT_UNLOCKED_DESC);
+#endif
+#if defined(__FreeBSD__) && defined(SMP) && defined(SCTP_USE_PERCPU_STAT)
+SYSCTL_PROC(_net_inet_sctp, OID_AUTO, stats,
+ CTLTYPE_STRUCT | CTLFLAG_RD,
+ 0, 0, sysctl_stat_get, "S,sctpstat",
+ "SCTP statistics (struct sctp_stat)");
+#else
+SYSCTL_STRUCT(_net_inet_sctp, OID_AUTO, stats, CTLFLAG_RW,
+ &SCTP_BASE_STATS_SYSCTL, sctpstat,
+ "SCTP statistics (struct sctp_stat)");
+#endif
+
+SYSCTL_PROC(_net_inet_sctp, OID_AUTO, assoclist, CTLFLAG_RD,
+ 0, 0, sctp_assoclist,
+ "S,xassoc", "List of active SCTP associations");
diff --git a/rtems/freebsd/netinet/sctp_sysctl.h b/rtems/freebsd/netinet/sctp_sysctl.h
new file mode 100644
index 00000000..914c4e59
--- /dev/null
+++ b/rtems/freebsd/netinet/sctp_sysctl.h
@@ -0,0 +1,532 @@
+/*-
+ * Copyright (c) 2007, by Cisco Systems, Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * a) Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * b) Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the distribution.
+ *
+ * c) Neither the name of Cisco Systems, Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <rtems/freebsd/sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#ifndef __sctp_sysctl_h__
+#define __sctp_sysctl_h__
+
+#include <rtems/freebsd/netinet/sctp_os.h>
+#include <rtems/freebsd/netinet/sctp_constants.h>
+
+struct sctp_sysctl {
+ uint32_t sctp_sendspace;
+ uint32_t sctp_recvspace;
+ uint32_t sctp_auto_asconf;
+ uint32_t sctp_multiple_asconfs;
+ uint32_t sctp_ecn_enable;
+ uint32_t sctp_ecn_nonce;
+ uint32_t sctp_strict_sacks;
+#if !defined(SCTP_WITH_NO_CSUM)
+ uint32_t sctp_no_csum_on_loopback;
+#endif
+ uint32_t sctp_strict_init;
+ uint32_t sctp_peer_chunk_oh;
+ uint32_t sctp_max_burst_default;
+ uint32_t sctp_max_chunks_on_queue;
+ uint32_t sctp_hashtblsize;
+ uint32_t sctp_pcbtblsize;
+ uint32_t sctp_min_split_point;
+ uint32_t sctp_chunkscale;
+ uint32_t sctp_delayed_sack_time_default;
+ uint32_t sctp_sack_freq_default;
+ uint32_t sctp_system_free_resc_limit;
+ uint32_t sctp_asoc_free_resc_limit;
+ uint32_t sctp_heartbeat_interval_default;
+ uint32_t sctp_pmtu_raise_time_default;
+ uint32_t sctp_shutdown_guard_time_default;
+ uint32_t sctp_secret_lifetime_default;
+ uint32_t sctp_rto_max_default;
+ uint32_t sctp_rto_min_default;
+ uint32_t sctp_rto_initial_default;
+ uint32_t sctp_init_rto_max_default;
+ uint32_t sctp_valid_cookie_life_default;
+ uint32_t sctp_init_rtx_max_default;
+ uint32_t sctp_assoc_rtx_max_default;
+ uint32_t sctp_path_rtx_max_default;
+ uint32_t sctp_add_more_threshold;
+ uint32_t sctp_nr_outgoing_streams_default;
+ uint32_t sctp_cmt_on_off;
+ uint32_t sctp_cmt_use_dac;
+ /* EY 5/5/08 - nr_sack flag variable */
+ uint32_t sctp_nr_sack_on_off;
+ uint32_t sctp_cmt_pf;
+ uint32_t sctp_use_cwnd_based_maxburst;
+ uint32_t sctp_early_fr;
+ uint32_t sctp_early_fr_msec;
+ uint32_t sctp_asconf_auth_nochk;
+ uint32_t sctp_auth_disable;
+ uint32_t sctp_nat_friendly;
+ uint32_t sctp_L2_abc_variable;
+ uint32_t sctp_mbuf_threshold_count;
+ uint32_t sctp_do_drain;
+ uint32_t sctp_hb_maxburst;
+ uint32_t sctp_abort_if_one_2_one_hits_limit;
+ uint32_t sctp_strict_data_order;
+ uint32_t sctp_min_residual;
+ uint32_t sctp_max_retran_chunk;
+ uint32_t sctp_logging_level;
+ /* JRS - Variable for default congestion control module */
+ uint32_t sctp_default_cc_module;
+ uint32_t sctp_default_frag_interleave;
+ uint32_t sctp_mobility_base;
+ uint32_t sctp_mobility_fasthandoff;
+ uint32_t sctp_inits_include_nat_friendly;
+#if defined(SCTP_LOCAL_TRACE_BUF)
+ struct sctp_log sctp_log;
+#endif
+ uint32_t sctp_udp_tunneling_for_client_enable;
+ uint32_t sctp_udp_tunneling_port;
+ uint32_t sctp_enable_sack_immediately;
+ uint32_t sctp_vtag_time_wait;
+ uint32_t sctp_buffer_splitting;
+ uint32_t sctp_initial_cwnd;
+#if defined(SCTP_DEBUG)
+ uint32_t sctp_debug_on;
+#endif
+#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
+ uint32_t sctp_output_unlocked;
+#endif
+};
+
+/*
+ * limits for the sysctl variables
+ */
+/* maxdgram: Maximum outgoing SCTP buffer size */
+#define SCTPCTL_MAXDGRAM_DESC "Maximum outgoing SCTP buffer size"
+#define SCTPCTL_MAXDGRAM_MIN 0
+#define SCTPCTL_MAXDGRAM_MAX 0xFFFFFFFF
+#define SCTPCTL_MAXDGRAM_DEFAULT 262144 /* 256k */
+
+/* recvspace: Maximum incoming SCTP buffer size */
+#define SCTPCTL_RECVSPACE_DESC "Maximum incoming SCTP buffer size"
+#define SCTPCTL_RECVSPACE_MIN 0
+#define SCTPCTL_RECVSPACE_MAX 0xFFFFFFFF
+#define SCTPCTL_RECVSPACE_DEFAULT 262144 /* 256k */
+
+/* autoasconf: Enable SCTP Auto-ASCONF */
+#define SCTPCTL_AUTOASCONF_DESC "Enable SCTP Auto-ASCONF"
+#define SCTPCTL_AUTOASCONF_MIN 0
+#define SCTPCTL_AUTOASCONF_MAX 1
+#define SCTPCTL_AUTOASCONF_DEFAULT SCTP_DEFAULT_AUTO_ASCONF
+
+/* autoasconf: Enable SCTP Auto-ASCONF */
+#define SCTPCTL_MULTIPLEASCONFS_DESC "Enable SCTP Muliple-ASCONFs"
+#define SCTPCTL_MULTIPLEASCONFS_MIN 0
+#define SCTPCTL_MULTIPLEASCONFS_MAX 1
+#define SCTPCTL_MULTIPLEASCONFS_DEFAULT SCTP_DEFAULT_MULTIPLE_ASCONFS
+
+/* ecn_enable: Enable SCTP ECN */
+#define SCTPCTL_ECN_ENABLE_DESC "Enable SCTP ECN"
+#define SCTPCTL_ECN_ENABLE_MIN 0
+#define SCTPCTL_ECN_ENABLE_MAX 1
+#define SCTPCTL_ECN_ENABLE_DEFAULT 1
+
+/* ecn_nonce: Enable SCTP ECN Nonce */
+#define SCTPCTL_ECN_NONCE_DESC "Enable SCTP ECN Nonce"
+#define SCTPCTL_ECN_NONCE_MIN 0
+#define SCTPCTL_ECN_NONCE_MAX 1
+#define SCTPCTL_ECN_NONCE_DEFAULT 0
+
+/* strict_sacks: Enable SCTP Strict SACK checking */
+#define SCTPCTL_STRICT_SACKS_DESC "Enable SCTP Strict SACK checking"
+#define SCTPCTL_STRICT_SACKS_MIN 0
+#define SCTPCTL_STRICT_SACKS_MAX 1
+#define SCTPCTL_STRICT_SACKS_DEFAULT 1
+
+/* loopback_nocsum: Enable NO Csum on packets sent on loopback */
+#define SCTPCTL_LOOPBACK_NOCSUM_DESC "Enable NO Csum on packets sent on loopback"
+#define SCTPCTL_LOOPBACK_NOCSUM_MIN 0
+#define SCTPCTL_LOOPBACK_NOCSUM_MAX 1
+#define SCTPCTL_LOOPBACK_NOCSUM_DEFAULT 1
+
+/* strict_init: Enable strict INIT/INIT-ACK singleton enforcement */
+#define SCTPCTL_STRICT_INIT_DESC "Enable strict INIT/INIT-ACK singleton enforcement"
+#define SCTPCTL_STRICT_INIT_MIN 0
+#define SCTPCTL_STRICT_INIT_MAX 1
+#define SCTPCTL_STRICT_INIT_DEFAULT 1
+
+/* peer_chkoh: Amount to debit peers rwnd per chunk sent */
+#define SCTPCTL_PEER_CHKOH_DESC "Amount to debit peers rwnd per chunk sent"
+#define SCTPCTL_PEER_CHKOH_MIN 0
+#define SCTPCTL_PEER_CHKOH_MAX 0xFFFFFFFF
+#define SCTPCTL_PEER_CHKOH_DEFAULT 256
+
+/* maxburst: Default max burst for sctp endpoints */
+#define SCTPCTL_MAXBURST_DESC "Default max burst for sctp endpoints"
+#define SCTPCTL_MAXBURST_MIN 1
+#define SCTPCTL_MAXBURST_MAX 0xFFFFFFFF
+#define SCTPCTL_MAXBURST_DEFAULT SCTP_DEF_MAX_BURST
+
+/* maxchunks: Default max chunks on queue per asoc */
+#define SCTPCTL_MAXCHUNKS_DESC "Default max chunks on queue per asoc"
+#define SCTPCTL_MAXCHUNKS_MIN 0
+#define SCTPCTL_MAXCHUNKS_MAX 0xFFFFFFFF
+#define SCTPCTL_MAXCHUNKS_DEFAULT SCTP_ASOC_MAX_CHUNKS_ON_QUEUE
+
+/* tcbhashsize: Tuneable for Hash table sizes */
+#define SCTPCTL_TCBHASHSIZE_DESC "Tunable for TCB hash table sizes"
+#define SCTPCTL_TCBHASHSIZE_MIN 1
+#define SCTPCTL_TCBHASHSIZE_MAX 0xFFFFFFFF
+#define SCTPCTL_TCBHASHSIZE_DEFAULT SCTP_TCBHASHSIZE
+
+/* pcbhashsize: Tuneable for PCB Hash table sizes */
+#define SCTPCTL_PCBHASHSIZE_DESC "Tunable for PCB hash table sizes"
+#define SCTPCTL_PCBHASHSIZE_MIN 1
+#define SCTPCTL_PCBHASHSIZE_MAX 0xFFFFFFFF
+#define SCTPCTL_PCBHASHSIZE_DEFAULT SCTP_PCBHASHSIZE
+
+/* min_split_point: Minimum size when splitting a chunk */
+#define SCTPCTL_MIN_SPLIT_POINT_DESC "Minimum size when splitting a chunk"
+#define SCTPCTL_MIN_SPLIT_POINT_MIN 0
+#define SCTPCTL_MIN_SPLIT_POINT_MAX 0xFFFFFFFF
+#define SCTPCTL_MIN_SPLIT_POINT_DEFAULT SCTP_DEFAULT_SPLIT_POINT_MIN
+
+/* chunkscale: Tuneable for Scaling of number of chunks and messages */
+#define SCTPCTL_CHUNKSCALE_DESC "Tuneable for Scaling of number of chunks and messages"
+#define SCTPCTL_CHUNKSCALE_MIN 1
+#define SCTPCTL_CHUNKSCALE_MAX 0xFFFFFFFF
+#define SCTPCTL_CHUNKSCALE_DEFAULT SCTP_CHUNKQUEUE_SCALE
+
+/* delayed_sack_time: Default delayed SACK timer in msec */
+#define SCTPCTL_DELAYED_SACK_TIME_DESC "Default delayed SACK timer in msec"
+#define SCTPCTL_DELAYED_SACK_TIME_MIN 0
+#define SCTPCTL_DELAYED_SACK_TIME_MAX 0xFFFFFFFF
+#define SCTPCTL_DELAYED_SACK_TIME_DEFAULT SCTP_RECV_MSEC
+
+/* sack_freq: Default SACK frequency */
+#define SCTPCTL_SACK_FREQ_DESC "Default SACK frequency"
+#define SCTPCTL_SACK_FREQ_MIN 0
+#define SCTPCTL_SACK_FREQ_MAX 0xFFFFFFFF
+#define SCTPCTL_SACK_FREQ_DEFAULT SCTP_DEFAULT_SACK_FREQ
+
+/* sys_resource: Max number of cached resources in the system */
+#define SCTPCTL_SYS_RESOURCE_DESC "Max number of cached resources in the system"
+#define SCTPCTL_SYS_RESOURCE_MIN 0
+#define SCTPCTL_SYS_RESOURCE_MAX 0xFFFFFFFF
+#define SCTPCTL_SYS_RESOURCE_DEFAULT SCTP_DEF_SYSTEM_RESC_LIMIT
+
+/* asoc_resource: Max number of cached resources in an asoc */
+#define SCTPCTL_ASOC_RESOURCE_DESC "Max number of cached resources in an asoc"
+#define SCTPCTL_ASOC_RESOURCE_MIN 0
+#define SCTPCTL_ASOC_RESOURCE_MAX 0xFFFFFFFF
+#define SCTPCTL_ASOC_RESOURCE_DEFAULT SCTP_DEF_ASOC_RESC_LIMIT
+
+/* heartbeat_interval: Default heartbeat interval in msec */
+#define SCTPCTL_HEARTBEAT_INTERVAL_DESC "Default heartbeat interval in msec"
+#define SCTPCTL_HEARTBEAT_INTERVAL_MIN 0
+#define SCTPCTL_HEARTBEAT_INTERVAL_MAX 0xFFFFFFFF
+#define SCTPCTL_HEARTBEAT_INTERVAL_DEFAULT SCTP_HB_DEFAULT_MSEC
+
+/* pmtu_raise_time: Default PMTU raise timer in sec */
+#define SCTPCTL_PMTU_RAISE_TIME_DESC "Default PMTU raise timer in sec"
+#define SCTPCTL_PMTU_RAISE_TIME_MIN 0
+#define SCTPCTL_PMTU_RAISE_TIME_MAX 0xFFFFFFFF
+#define SCTPCTL_PMTU_RAISE_TIME_DEFAULT SCTP_DEF_PMTU_RAISE_SEC
+
+/* shutdown_guard_time: Default shutdown guard timer in sec */
+#define SCTPCTL_SHUTDOWN_GUARD_TIME_DESC "Default shutdown guard timer in sec"
+#define SCTPCTL_SHUTDOWN_GUARD_TIME_MIN 0
+#define SCTPCTL_SHUTDOWN_GUARD_TIME_MAX 0xFFFFFFFF
+#define SCTPCTL_SHUTDOWN_GUARD_TIME_DEFAULT SCTP_DEF_MAX_SHUTDOWN_SEC
+
+/* secret_lifetime: Default secret lifetime in sec */
+#define SCTPCTL_SECRET_LIFETIME_DESC "Default secret lifetime in sec"
+#define SCTPCTL_SECRET_LIFETIME_MIN 0
+#define SCTPCTL_SECRET_LIFETIME_MAX 0xFFFFFFFF
+#define SCTPCTL_SECRET_LIFETIME_DEFAULT SCTP_DEFAULT_SECRET_LIFE_SEC
+
+/* rto_max: Default maximum retransmission timeout in msec */
+#define SCTPCTL_RTO_MAX_DESC "Default maximum retransmission timeout in msec"
+#define SCTPCTL_RTO_MAX_MIN 0
+#define SCTPCTL_RTO_MAX_MAX 0xFFFFFFFF
+#define SCTPCTL_RTO_MAX_DEFAULT SCTP_RTO_UPPER_BOUND
+
+/* rto_min: Default minimum retransmission timeout in msec */
+#define SCTPCTL_RTO_MIN_DESC "Default minimum retransmission timeout in msec"
+#define SCTPCTL_RTO_MIN_MIN 0
+#define SCTPCTL_RTO_MIN_MAX 0xFFFFFFFF
+#define SCTPCTL_RTO_MIN_DEFAULT SCTP_RTO_LOWER_BOUND
+
+/* rto_initial: Default initial retransmission timeout in msec */
+#define SCTPCTL_RTO_INITIAL_DESC "Default initial retransmission timeout in msec"
+#define SCTPCTL_RTO_INITIAL_MIN 0
+#define SCTPCTL_RTO_INITIAL_MAX 0xFFFFFFFF
+#define SCTPCTL_RTO_INITIAL_DEFAULT SCTP_RTO_INITIAL
+
+/* init_rto_max: Default maximum retransmission timeout during association setup in msec */
+#define SCTPCTL_INIT_RTO_MAX_DESC "Default maximum retransmission timeout during association setup in msec"
+#define SCTPCTL_INIT_RTO_MAX_MIN 0
+#define SCTPCTL_INIT_RTO_MAX_MAX 0xFFFFFFFF
+#define SCTPCTL_INIT_RTO_MAX_DEFAULT SCTP_RTO_UPPER_BOUND
+
+/* valid_cookie_life: Default cookie lifetime in sec */
+#define SCTPCTL_VALID_COOKIE_LIFE_DESC "Default cookie lifetime in sec"
+#define SCTPCTL_VALID_COOKIE_LIFE_MIN 0
+#define SCTPCTL_VALID_COOKIE_LIFE_MAX 0xFFFFFFFF
+#define SCTPCTL_VALID_COOKIE_LIFE_DEFAULT SCTP_DEFAULT_COOKIE_LIFE
+
+/* init_rtx_max: Default maximum number of retransmission for INIT chunks */
+#define SCTPCTL_INIT_RTX_MAX_DESC "Default maximum number of retransmission for INIT chunks"
+#define SCTPCTL_INIT_RTX_MAX_MIN 0
+#define SCTPCTL_INIT_RTX_MAX_MAX 0xFFFFFFFF
+#define SCTPCTL_INIT_RTX_MAX_DEFAULT SCTP_DEF_MAX_INIT
+
+/* assoc_rtx_max: Default maximum number of retransmissions per association */
+#define SCTPCTL_ASSOC_RTX_MAX_DESC "Default maximum number of retransmissions per association"
+#define SCTPCTL_ASSOC_RTX_MAX_MIN 0
+#define SCTPCTL_ASSOC_RTX_MAX_MAX 0xFFFFFFFF
+#define SCTPCTL_ASSOC_RTX_MAX_DEFAULT SCTP_DEF_MAX_SEND
+
+/* path_rtx_max: Default maximum of retransmissions per path */
+#define SCTPCTL_PATH_RTX_MAX_DESC "Default maximum of retransmissions per path"
+#define SCTPCTL_PATH_RTX_MAX_MIN 0
+#define SCTPCTL_PATH_RTX_MAX_MAX 0xFFFFFFFF
+#define SCTPCTL_PATH_RTX_MAX_DEFAULT SCTP_DEF_MAX_PATH_RTX
+
+/* add_more_on_output: When space wise is it worthwhile to try to add more to a socket send buffer */
+#define SCTPCTL_ADD_MORE_ON_OUTPUT_DESC "When space wise is it worthwhile to try to add more to a socket send buffer"
+#define SCTPCTL_ADD_MORE_ON_OUTPUT_MIN 0
+#define SCTPCTL_ADD_MORE_ON_OUTPUT_MAX 0xFFFFFFFF
+#define SCTPCTL_ADD_MORE_ON_OUTPUT_DEFAULT SCTP_DEFAULT_ADD_MORE
+
+/* outgoing_streams: Default number of outgoing streams */
+#define SCTPCTL_OUTGOING_STREAMS_DESC "Default number of outgoing streams"
+#define SCTPCTL_OUTGOING_STREAMS_MIN 1
+#define SCTPCTL_OUTGOING_STREAMS_MAX 65535
+#define SCTPCTL_OUTGOING_STREAMS_DEFAULT SCTP_OSTREAM_INITIAL
+
+/* cmt_on_off: CMT on/off flag */
+#define SCTPCTL_CMT_ON_OFF_DESC "CMT on/off flag"
+#define SCTPCTL_CMT_ON_OFF_MIN 0
+#define SCTPCTL_CMT_ON_OFF_MAX 1
+#define SCTPCTL_CMT_ON_OFF_DEFAULT 0
+
+/* EY - nr_sack_on_off: NR_SACK on/off flag */
+#define SCTPCTL_NR_SACK_ON_OFF_DESC "NR_SACK on/off flag"
+#define SCTPCTL_NR_SACK_ON_OFF_MIN 0
+#define SCTPCTL_NR_SACK_ON_OFF_MAX 1
+#define SCTPCTL_NR_SACK_ON_OFF_DEFAULT 0
+
+/* cmt_use_dac: CMT DAC on/off flag */
+#define SCTPCTL_CMT_USE_DAC_DESC "CMT DAC on/off flag"
+#define SCTPCTL_CMT_USE_DAC_MIN 0
+#define SCTPCTL_CMT_USE_DAC_MAX 1
+#define SCTPCTL_CMT_USE_DAC_DEFAULT 0
+
+/* JRS 5/2107 - CMT PF type flag */
+#define SCTPCTL_CMT_PF_DESC "CMT PF type flag"
+#define SCTPCTL_CMT_PF_MIN 0
+#define SCTPCTL_CMT_PF_MAX 2
+#define SCTPCTL_CMT_PF_DEFAULT 0
+
+/* cwnd_maxburst: Use a CWND adjusting maxburst */
+#define SCTPCTL_CWND_MAXBURST_DESC "Use a CWND adjusting maxburst"
+#define SCTPCTL_CWND_MAXBURST_MIN 0
+#define SCTPCTL_CWND_MAXBURST_MAX 1
+#define SCTPCTL_CWND_MAXBURST_DEFAULT 1
+
+/* early_fast_retran: Early Fast Retransmit with timer */
+#define SCTPCTL_EARLY_FAST_RETRAN_DESC "Early Fast Retransmit with timer"
+#define SCTPCTL_EARLY_FAST_RETRAN_MIN 0
+#define SCTPCTL_EARLY_FAST_RETRAN_MAX 0xFFFFFFFF
+#define SCTPCTL_EARLY_FAST_RETRAN_DEFAULT 0
+
+/* early_fast_retran_msec: Early Fast Retransmit minimum timer value */
+#define SCTPCTL_EARLY_FAST_RETRAN_MSEC_DESC "Early Fast Retransmit minimum timer value"
+#define SCTPCTL_EARLY_FAST_RETRAN_MSEC_MIN 0
+#define SCTPCTL_EARLY_FAST_RETRAN_MSEC_MAX 0xFFFFFFFF
+#define SCTPCTL_EARLY_FAST_RETRAN_MSEC_DEFAULT SCTP_MINFR_MSEC_TIMER
+
+/* asconf_auth_nochk: Disable SCTP ASCONF AUTH requirement */
+#define SCTPCTL_ASCONF_AUTH_NOCHK_DESC "Disable SCTP ASCONF AUTH requirement"
+#define SCTPCTL_ASCONF_AUTH_NOCHK_MIN 0
+#define SCTPCTL_ASCONF_AUTH_NOCHK_MAX 1
+#define SCTPCTL_ASCONF_AUTH_NOCHK_DEFAULT 0
+
+/* auth_disable: Disable SCTP AUTH function */
+#define SCTPCTL_AUTH_DISABLE_DESC "Disable SCTP AUTH function"
+#define SCTPCTL_AUTH_DISABLE_MIN 0
+#define SCTPCTL_AUTH_DISABLE_MAX 1
+#define SCTPCTL_AUTH_DISABLE_DEFAULT 0
+
+/* nat_friendly: SCTP NAT friendly operation */
+#define SCTPCTL_NAT_FRIENDLY_DESC "SCTP NAT friendly operation"
+#define SCTPCTL_NAT_FRIENDLY_MIN 0
+#define SCTPCTL_NAT_FRIENDLY_MAX 1
+#define SCTPCTL_NAT_FRIENDLY_DEFAULT 1
+
+/* abc_l_var: SCTP ABC max increase per SACK (L) */
+#define SCTPCTL_ABC_L_VAR_DESC "SCTP ABC max increase per SACK (L)"
+#define SCTPCTL_ABC_L_VAR_MIN 0
+#define SCTPCTL_ABC_L_VAR_MAX 0xFFFFFFFF
+#define SCTPCTL_ABC_L_VAR_DEFAULT 1
+
+/* max_chained_mbufs: Default max number of small mbufs on a chain */
+#define SCTPCTL_MAX_CHAINED_MBUFS_DESC "Default max number of small mbufs on a chain"
+#define SCTPCTL_MAX_CHAINED_MBUFS_MIN 0
+#define SCTPCTL_MAX_CHAINED_MBUFS_MAX 0xFFFFFFFF
+#define SCTPCTL_MAX_CHAINED_MBUFS_DEFAULT SCTP_DEFAULT_MBUFS_IN_CHAIN
+
+/* do_sctp_drain: Should SCTP respond to the drain calls */
+#define SCTPCTL_DO_SCTP_DRAIN_DESC "Should SCTP respond to the drain calls"
+#define SCTPCTL_DO_SCTP_DRAIN_MIN 0
+#define SCTPCTL_DO_SCTP_DRAIN_MAX 1
+#define SCTPCTL_DO_SCTP_DRAIN_DEFAULT 1
+
+/* hb_max_burst: Confirmation Heartbeat max burst? */
+#define SCTPCTL_HB_MAX_BURST_DESC "Confirmation Heartbeat max burst"
+#define SCTPCTL_HB_MAX_BURST_MIN 1
+#define SCTPCTL_HB_MAX_BURST_MAX 0xFFFFFFFF
+#define SCTPCTL_HB_MAX_BURST_DEFAULT SCTP_DEF_MAX_BURST
+
+/* abort_at_limit: When one-2-one hits qlimit abort */
+#define SCTPCTL_ABORT_AT_LIMIT_DESC "When one-2-one hits qlimit abort"
+#define SCTPCTL_ABORT_AT_LIMIT_MIN 0
+#define SCTPCTL_ABORT_AT_LIMIT_MAX 1
+#define SCTPCTL_ABORT_AT_LIMIT_DEFAULT 0
+
+/* strict_data_order: Enforce strict data ordering, abort if control inside data */
+#define SCTPCTL_STRICT_DATA_ORDER_DESC "Enforce strict data ordering, abort if control inside data"
+#define SCTPCTL_STRICT_DATA_ORDER_MIN 0
+#define SCTPCTL_STRICT_DATA_ORDER_MAX 1
+#define SCTPCTL_STRICT_DATA_ORDER_DEFAULT 0
+
+/* min_residual: min residual in a data fragment leftover */
+#define SCTPCTL_MIN_RESIDUAL_DESC "Minimum residual data chunk in second part of split"
+#define SCTPCTL_MIN_RESIDUAL_MIN 20
+#define SCTPCTL_MIN_RESIDUAL_MAX 65535
+#define SCTPCTL_MIN_RESIDUAL_DEFAULT 1452
+
+/* max_retran_chunk: max chunk retransmissions */
+#define SCTPCTL_MAX_RETRAN_CHUNK_DESC "Maximum times an unlucky chunk can be retran'd before assoc abort"
+#define SCTPCTL_MAX_RETRAN_CHUNK_MIN 0
+#define SCTPCTL_MAX_RETRAN_CHUNK_MAX 65535
+#define SCTPCTL_MAX_RETRAN_CHUNK_DEFAULT 30
+
+/* sctp_logging: This gives us logging when the options are enabled */
+#define SCTPCTL_LOGGING_LEVEL_DESC "Ltrace/KTR trace logging level"
+#define SCTPCTL_LOGGING_LEVEL_MIN 0
+#define SCTPCTL_LOGGING_LEVEL_MAX 0xffffffff
+#define SCTPCTL_LOGGING_LEVEL_DEFAULT 0
+
+/* JRS - default congestion control module sysctl */
+#define SCTPCTL_DEFAULT_CC_MODULE_DESC "Default congestion control module"
+#define SCTPCTL_DEFAULT_CC_MODULE_MIN 0
+#define SCTPCTL_DEFAULT_CC_MODULE_MAX 2
+#define SCTPCTL_DEFAULT_CC_MODULE_DEFAULT 0
+
+/* RRS - default fragment interleave */
+#define SCTPCTL_DEFAULT_FRAG_INTERLEAVE_DESC "Default fragment interleave level"
+#define SCTPCTL_DEFAULT_FRAG_INTERLEAVE_MIN 0
+#define SCTPCTL_DEFAULT_FRAG_INTERLEAVE_MAX 2
+#define SCTPCTL_DEFAULT_FRAG_INTERLEAVE_DEFAULT 1
+
+/* mobility_base: Enable SCTP mobility support */
+#define SCTPCTL_MOBILITY_BASE_DESC "Enable SCTP base mobility"
+#define SCTPCTL_MOBILITY_BASE_MIN 0
+#define SCTPCTL_MOBILITY_BASE_MAX 1
+#define SCTPCTL_MOBILITY_BASE_DEFAULT SCTP_DEFAULT_MOBILITY_BASE
+
+/* mobility_fasthandoff: Enable SCTP fast handoff support */
+#define SCTPCTL_MOBILITY_FASTHANDOFF_DESC "Enable SCTP fast handoff"
+#define SCTPCTL_MOBILITY_FASTHANDOFF_MIN 0
+#define SCTPCTL_MOBILITY_FASTHANDOFF_MAX 1
+#define SCTPCTL_MOBILITY_FASTHANDOFF_DEFAULT SCTP_DEFAULT_MOBILITY_FASTHANDOFF
+
+/* Enable SCTP/UDP tunneling for clients*/
+#define SCTPCTL_UDP_TUNNELING_FOR_CLIENT_ENABLE_DESC "Enable SCTP/UDP tunneling for client"
+#define SCTPCTL_UDP_TUNNELING_FOR_CLIENT_ENABLE_MIN 0
+#define SCTPCTL_UDP_TUNNELING_FOR_CLIENT_ENABLE_MAX 1
+#define SCTPCTL_UDP_TUNNELING_FOR_CLIENT_ENABLE_DEFAULT SCTPCTL_UDP_TUNNELING_FOR_CLIENT_ENABLE_MIN
+
+/* Enable SCTP/UDP tunneling port */
+#define SCTPCTL_UDP_TUNNELING_PORT_DESC "Set the SCTP/UDP tunneling port"
+#define SCTPCTL_UDP_TUNNELING_PORT_MIN 0
+#define SCTPCTL_UDP_TUNNELING_PORT_MAX 65535
+#define SCTPCTL_UDP_TUNNELING_PORT_DEFAULT SCTP_OVER_UDP_TUNNELING_PORT
+
+/* Enable sending of the SACK-IMMEDIATELY bit */
+#define SCTPCTL_SACK_IMMEDIATELY_ENABLE_DESC "Enable sending of the SACK-IMMEDIATELY-bit."
+#define SCTPCTL_SACK_IMMEDIATELY_ENABLE_MIN 0
+#define SCTPCTL_SACK_IMMEDIATELY_ENABLE_MAX 1
+#define SCTPCTL_SACK_IMMEDIATELY_ENABLE_DEFAULT SCTPCTL_SACK_IMMEDIATELY_ENABLE_MIN
+
+/* Enable sending of the NAT-FRIENDLY message */
+#define SCTPCTL_NAT_FRIENDLY_INITS_DESC "Enable sending of the nat-friendly SCTP option on INITs."
+#define SCTPCTL_NAT_FRIENDLY_INITS_MIN 0
+#define SCTPCTL_NAT_FRIENDLY_INITS_MAX 1
+#define SCTPCTL_NAT_FRIENDLY_INITS_DEFAULT SCTPCTL_NAT_FRIENDLY_INITS_MIN
+
+/* Vtag time wait in seconds */
+#define SCTPCTL_TIME_WAIT_DESC "Vtag time wait time in seconds, 0 disables it."
+#define SCTPCTL_TIME_WAIT_MIN 0
+#define SCTPCTL_TIME_WAIT_MAX 0xffffffff
+#define SCTPCTL_TIME_WAIT_DEFAULT SCTP_TIME_WAIT
+
+/* Enable Send/Receive buffer splitting */
+#define SCTPCTL_BUFFER_SPLITTING_DESC "Enable send/receive buffer splitting."
+#define SCTPCTL_BUFFER_SPLITTING_MIN 0
+#define SCTPCTL_BUFFER_SPLITTING_MAX 0x3
+#define SCTPCTL_BUFFER_SPLITTING_DEFAULT SCTPCTL_BUFFER_SPLITTING_MIN
+
+/* Initial congestion window in MTU */
+#define SCTPCTL_INITIAL_CWND_DESC "Initial congestion window in MTUs"
+#define SCTPCTL_INITIAL_CWND_MIN 1
+#define SCTPCTL_INITIAL_CWND_MAX 0xffffffff
+#define SCTPCTL_INITIAL_CWND_DEFAULT 3
+
+#if defined(SCTP_DEBUG)
+/* debug: Configure debug output */
+#define SCTPCTL_DEBUG_DESC "Configure debug output"
+#define SCTPCTL_DEBUG_MIN 0
+#define SCTPCTL_DEBUG_MAX 0xFFFFFFFF
+#define SCTPCTL_DEBUG_DEFAULT 0
+#endif
+
+
+#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
+#define SCTPCTL_OUTPUT_UNLOCKED_DESC "Unlock socket when sending packets down to IP."
+#define SCTPCTL_OUTPUT_UNLOCKED_MIN 0
+#define SCTPCTL_OUTPUT_UNLOCKED_MAX 1
+#define SCTPCTL_OUTPUT_UNLOCKED_DEFAULT SCTPCTL_OUTPUT_UNLOCKED_MIN
+#endif
+
+
+#if defined(_KERNEL) || defined(__Userspace__)
+#if defined(SYSCTL_DECL)
+SYSCTL_DECL(_net_inet_sctp);
+#endif
+
+void sctp_init_sysctls(void);
+
+#endif /* _KERNEL */
+#endif /* __sctp_sysctl_h__ */
diff --git a/rtems/freebsd/netinet/sctp_timer.c b/rtems/freebsd/netinet/sctp_timer.c
new file mode 100644
index 00000000..a4468e10
--- /dev/null
+++ b/rtems/freebsd/netinet/sctp_timer.c
@@ -0,0 +1,1804 @@
+#include <rtems/freebsd/machine/rtems-bsd-config.h>
+
+/*-
+ * Copyright (c) 2001-2007, by Cisco Systems, Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * a) Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * b) Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the distribution.
+ *
+ * c) Neither the name of Cisco Systems, Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/* $KAME: sctp_timer.c,v 1.29 2005/03/06 16:04:18 itojun Exp $ */
+
+#include <rtems/freebsd/sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#define _IP_VHL
+#include <rtems/freebsd/netinet/sctp_os.h>
+#include <rtems/freebsd/netinet/sctp_pcb.h>
+#ifdef INET6
+#endif
+#include <rtems/freebsd/netinet/sctp_var.h>
+#include <rtems/freebsd/netinet/sctp_sysctl.h>
+#include <rtems/freebsd/netinet/sctp_timer.h>
+#include <rtems/freebsd/netinet/sctputil.h>
+#include <rtems/freebsd/netinet/sctp_output.h>
+#include <rtems/freebsd/netinet/sctp_header.h>
+#include <rtems/freebsd/netinet/sctp_indata.h>
+#include <rtems/freebsd/netinet/sctp_asconf.h>
+#include <rtems/freebsd/netinet/sctp_input.h>
+#include <rtems/freebsd/netinet/sctp.h>
+#include <rtems/freebsd/netinet/sctp_uio.h>
+#include <rtems/freebsd/netinet/udp.h>
+
+
+void
+sctp_early_fr_timer(struct sctp_inpcb *inp,
+ struct sctp_tcb *stcb,
+ struct sctp_nets *net)
+{
+ struct sctp_tmit_chunk *chk, *tp2;
+ struct timeval now, min_wait, tv;
+ unsigned int cur_rtt, cnt = 0, cnt_resend = 0;
+
+ /* an early FR is occuring. */
+ (void)SCTP_GETTIME_TIMEVAL(&now);
+ /* get cur rto in micro-seconds */
+ if (net->lastsa == 0) {
+ /* Hmm no rtt estimate yet? */
+ cur_rtt = stcb->asoc.initial_rto >> 2;
+ } else {
+
+ cur_rtt = ((net->lastsa >> 2) + net->lastsv) >> 1;
+ }
+ if (cur_rtt < SCTP_BASE_SYSCTL(sctp_early_fr_msec)) {
+ cur_rtt = SCTP_BASE_SYSCTL(sctp_early_fr_msec);
+ }
+ cur_rtt *= 1000;
+ tv.tv_sec = cur_rtt / 1000000;
+ tv.tv_usec = cur_rtt % 1000000;
+ min_wait = now;
+ timevalsub(&min_wait, &tv);
+ if (min_wait.tv_sec < 0 || min_wait.tv_usec < 0) {
+ /*
+ * if we hit here, we don't have enough seconds on the clock
+ * to account for the RTO. We just let the lower seconds be
+ * the bounds and don't worry about it. This may mean we
+ * will mark a lot more than we should.
+ */
+ min_wait.tv_sec = min_wait.tv_usec = 0;
+ }
+ chk = TAILQ_LAST(&stcb->asoc.sent_queue, sctpchunk_listhead);
+ for (; chk != NULL; chk = tp2) {
+ tp2 = TAILQ_PREV(chk, sctpchunk_listhead, sctp_next);
+ if (chk->whoTo != net) {
+ continue;
+ }
+ if (chk->sent == SCTP_DATAGRAM_RESEND)
+ cnt_resend++;
+ else if ((chk->sent > SCTP_DATAGRAM_UNSENT) &&
+ (chk->sent < SCTP_DATAGRAM_RESEND)) {
+ /* pending, may need retran */
+ if (chk->sent_rcv_time.tv_sec > min_wait.tv_sec) {
+ /*
+ * we have reached a chunk that was sent
+ * some seconds past our min.. forget it we
+ * will find no more to send.
+ */
+ continue;
+ } else if (chk->sent_rcv_time.tv_sec == min_wait.tv_sec) {
+ /*
+ * we must look at the micro seconds to
+ * know.
+ */
+ if (chk->sent_rcv_time.tv_usec >= min_wait.tv_usec) {
+ /*
+ * ok it was sent after our boundary
+ * time.
+ */
+ continue;
+ }
+ }
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_EARLYFR_LOGGING_ENABLE) {
+ sctp_log_fr(chk->rec.data.TSN_seq, chk->snd_count,
+ 4, SCTP_FR_MARKED_EARLY);
+ }
+ SCTP_STAT_INCR(sctps_earlyfrmrkretrans);
+ chk->sent = SCTP_DATAGRAM_RESEND;
+ sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt);
+ /* double book size since we are doing an early FR */
+ chk->book_size_scale++;
+ cnt += chk->send_size;
+ if ((cnt + net->flight_size) > net->cwnd) {
+ /* Mark all we could possibly resend */
+ break;
+ }
+ }
+ }
+ if (cnt) {
+ /*
+ * JRS - Use the congestion control given in the congestion
+ * control module
+ */
+ stcb->asoc.cc_functions.sctp_cwnd_update_after_fr_timer(inp, stcb, net);
+ } else if (cnt_resend) {
+ sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_EARLY_FR_TMR, SCTP_SO_NOT_LOCKED);
+ }
+ /* Restart it? */
+ if (net->flight_size < net->cwnd) {
+ SCTP_STAT_INCR(sctps_earlyfrstrtmr);
+ sctp_timer_start(SCTP_TIMER_TYPE_EARLYFR, stcb->sctp_ep, stcb, net);
+ }
+}
+
+void
+sctp_audit_retranmission_queue(struct sctp_association *asoc)
+{
+ struct sctp_tmit_chunk *chk;
+
+ SCTPDBG(SCTP_DEBUG_TIMER4, "Audit invoked on send queue cnt:%d onqueue:%d\n",
+ asoc->sent_queue_retran_cnt,
+ asoc->sent_queue_cnt);
+ asoc->sent_queue_retran_cnt = 0;
+ asoc->sent_queue_cnt = 0;
+ TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) {
+ if (chk->sent == SCTP_DATAGRAM_RESEND) {
+ sctp_ucount_incr(asoc->sent_queue_retran_cnt);
+ }
+ asoc->sent_queue_cnt++;
+ }
+ TAILQ_FOREACH(chk, &asoc->control_send_queue, sctp_next) {
+ if (chk->sent == SCTP_DATAGRAM_RESEND) {
+ sctp_ucount_incr(asoc->sent_queue_retran_cnt);
+ }
+ }
+ TAILQ_FOREACH(chk, &asoc->asconf_send_queue, sctp_next) {
+ if (chk->sent == SCTP_DATAGRAM_RESEND) {
+ sctp_ucount_incr(asoc->sent_queue_retran_cnt);
+ }
+ }
+ SCTPDBG(SCTP_DEBUG_TIMER4, "Audit completes retran:%d onqueue:%d\n",
+ asoc->sent_queue_retran_cnt,
+ asoc->sent_queue_cnt);
+}
+
+int
+sctp_threshold_management(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
+ struct sctp_nets *net, uint16_t threshold)
+{
+ if (net) {
+ net->error_count++;
+ SCTPDBG(SCTP_DEBUG_TIMER4, "Error count for %p now %d thresh:%d\n",
+ net, net->error_count,
+ net->failure_threshold);
+ if (net->error_count > net->failure_threshold) {
+ /* We had a threshold failure */
+ if (net->dest_state & SCTP_ADDR_REACHABLE) {
+ net->dest_state &= ~SCTP_ADDR_REACHABLE;
+ net->dest_state |= SCTP_ADDR_NOT_REACHABLE;
+ net->dest_state &= ~SCTP_ADDR_REQ_PRIMARY;
+ if (net == stcb->asoc.primary_destination) {
+ net->dest_state |= SCTP_ADDR_WAS_PRIMARY;
+ }
+ /*
+ * JRS 5/14/07 - If a destination is
+ * unreachable, the PF bit is turned off.
+ * This allows an unambiguous use of the PF
+ * bit for destinations that are reachable
+ * but potentially failed. If the
+ * destination is set to the unreachable
+ * state, also set the destination to the PF
+ * state.
+ */
+ /*
+ * Add debug message here if destination is
+ * not in PF state.
+ */
+ /* Stop any running T3 timers here? */
+ if ((stcb->asoc.sctp_cmt_on_off == 1) &&
+ (stcb->asoc.sctp_cmt_pf > 0)) {
+ net->dest_state &= ~SCTP_ADDR_PF;
+ SCTPDBG(SCTP_DEBUG_TIMER4, "Destination %p moved from PF to unreachable.\n",
+ net);
+ }
+ sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_DOWN,
+ stcb,
+ SCTP_FAILED_THRESHOLD,
+ (void *)net, SCTP_SO_NOT_LOCKED);
+ }
+ }
+ /*********HOLD THIS COMMENT FOR PATCH OF ALTERNATE
+ *********ROUTING CODE
+ */
+ /*********HOLD THIS COMMENT FOR END OF PATCH OF ALTERNATE
+ *********ROUTING CODE
+ */
+ }
+ if (stcb == NULL)
+ return (0);
+
+ if (net) {
+ if ((net->dest_state & SCTP_ADDR_UNCONFIRMED) == 0) {
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
+ sctp_misc_ints(SCTP_THRESHOLD_INCR,
+ stcb->asoc.overall_error_count,
+ (stcb->asoc.overall_error_count + 1),
+ SCTP_FROM_SCTP_TIMER,
+ __LINE__);
+ }
+ stcb->asoc.overall_error_count++;
+ }
+ } else {
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
+ sctp_misc_ints(SCTP_THRESHOLD_INCR,
+ stcb->asoc.overall_error_count,
+ (stcb->asoc.overall_error_count + 1),
+ SCTP_FROM_SCTP_TIMER,
+ __LINE__);
+ }
+ stcb->asoc.overall_error_count++;
+ }
+ SCTPDBG(SCTP_DEBUG_TIMER4, "Overall error count for %p now %d thresh:%u state:%x\n",
+ &stcb->asoc, stcb->asoc.overall_error_count,
+ (uint32_t) threshold,
+ ((net == NULL) ? (uint32_t) 0 : (uint32_t) net->dest_state));
+ /*
+ * We specifically do not do >= to give the assoc one more change
+ * before we fail it.
+ */
+ if (stcb->asoc.overall_error_count > threshold) {
+ /* Abort notification sends a ULP notify */
+ struct mbuf *oper;
+
+ oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)),
+ 0, M_DONTWAIT, 1, MT_DATA);
+ if (oper) {
+ struct sctp_paramhdr *ph;
+ uint32_t *ippp;
+
+ SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) +
+ sizeof(uint32_t);
+ ph = mtod(oper, struct sctp_paramhdr *);
+ ph->param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
+ ph->param_length = htons(SCTP_BUF_LEN(oper));
+ ippp = (uint32_t *) (ph + 1);
+ *ippp = htonl(SCTP_FROM_SCTP_TIMER + SCTP_LOC_1);
+ }
+ inp->last_abort_code = SCTP_FROM_SCTP_TIMER + SCTP_LOC_1;
+ sctp_abort_an_association(inp, stcb, SCTP_FAILED_THRESHOLD, oper, SCTP_SO_NOT_LOCKED);
+ return (1);
+ }
+ return (0);
+}
+
+/*
+ * sctp_find_alternate_net() returns a non-NULL pointer as long
+ * the argument net is non-NULL.
+ */
+struct sctp_nets *
+sctp_find_alternate_net(struct sctp_tcb *stcb,
+ struct sctp_nets *net,
+ int mode)
+{
+ /* Find and return an alternate network if possible */
+ struct sctp_nets *alt, *mnet, *min_errors_net = NULL, *max_cwnd_net = NULL;
+ int once;
+
+ /* JRS 5/14/07 - Initialize min_errors to an impossible value. */
+ int min_errors = -1;
+ uint32_t max_cwnd = 0;
+
+ if (stcb->asoc.numnets == 1) {
+ /* No others but net */
+ return (TAILQ_FIRST(&stcb->asoc.nets));
+ }
+ /*
+ * JRS 5/14/07 - If mode is set to 2, use the CMT PF find alternate
+ * net algorithm. This algorithm chooses the active destination (not
+ * in PF state) with the largest cwnd value. If all destinations are
+ * in PF state, unreachable, or unconfirmed, choose the desination
+ * that is in PF state with the lowest error count. In case of a
+ * tie, choose the destination that was most recently active.
+ */
+ if (mode == 2) {
+ TAILQ_FOREACH(mnet, &stcb->asoc.nets, sctp_next) {
+ /*
+ * JRS 5/14/07 - If the destination is unreachable
+ * or unconfirmed, skip it.
+ */
+ if (((mnet->dest_state & SCTP_ADDR_REACHABLE) != SCTP_ADDR_REACHABLE) ||
+ (mnet->dest_state & SCTP_ADDR_UNCONFIRMED)) {
+ continue;
+ }
+ /*
+ * JRS 5/14/07 - If the destination is reachable
+ * but in PF state, compare the error count of the
+ * destination to the minimum error count seen thus
+ * far. Store the destination with the lower error
+ * count. If the error counts are equal, store the
+ * destination that was most recently active.
+ */
+ if (mnet->dest_state & SCTP_ADDR_PF) {
+ /*
+ * JRS 5/14/07 - If the destination under
+ * consideration is the current destination,
+ * work as if the error count is one higher.
+ * The actual error count will not be
+ * incremented until later in the t3
+ * handler.
+ */
+ if (mnet == net) {
+ if (min_errors == -1) {
+ min_errors = mnet->error_count + 1;
+ min_errors_net = mnet;
+ } else if (mnet->error_count + 1 < min_errors) {
+ min_errors = mnet->error_count + 1;
+ min_errors_net = mnet;
+ } else if (mnet->error_count + 1 == min_errors
+ && mnet->last_active > min_errors_net->last_active) {
+ min_errors_net = mnet;
+ min_errors = mnet->error_count + 1;
+ }
+ continue;
+ } else {
+ if (min_errors == -1) {
+ min_errors = mnet->error_count;
+ min_errors_net = mnet;
+ } else if (mnet->error_count < min_errors) {
+ min_errors = mnet->error_count;
+ min_errors_net = mnet;
+ } else if (mnet->error_count == min_errors
+ && mnet->last_active > min_errors_net->last_active) {
+ min_errors_net = mnet;
+ min_errors = mnet->error_count;
+ }
+ continue;
+ }
+ }
+ /*
+ * JRS 5/14/07 - If the destination is reachable and
+ * not in PF state, compare the cwnd of the
+ * destination to the highest cwnd seen thus far.
+ * Store the destination with the higher cwnd value.
+ * If the cwnd values are equal, randomly choose one
+ * of the two destinations.
+ */
+ if (max_cwnd < mnet->cwnd) {
+ max_cwnd_net = mnet;
+ max_cwnd = mnet->cwnd;
+ } else if (max_cwnd == mnet->cwnd) {
+ uint32_t rndval;
+ uint8_t this_random;
+
+ if (stcb->asoc.hb_random_idx > 3) {
+ rndval = sctp_select_initial_TSN(&stcb->sctp_ep->sctp_ep);
+ memcpy(stcb->asoc.hb_random_values, &rndval, sizeof(stcb->asoc.hb_random_values));
+ this_random = stcb->asoc.hb_random_values[0];
+ stcb->asoc.hb_random_idx++;
+ stcb->asoc.hb_ect_randombit = 0;
+ } else {
+ this_random = stcb->asoc.hb_random_values[stcb->asoc.hb_random_idx];
+ stcb->asoc.hb_random_idx++;
+ stcb->asoc.hb_ect_randombit = 0;
+ }
+ if (this_random % 2 == 1) {
+ max_cwnd_net = mnet;
+ max_cwnd = mnet->cwnd; /* Useless? */
+ }
+ }
+ }
+ /*
+ * JRS 5/14/07 - After all destination have been considered
+ * as alternates, check to see if there was some active
+ * destination (not in PF state). If not, check to see if
+ * there was some PF destination with the minimum number of
+ * errors. If not, return the original destination. If
+ * there is a min_errors_net, remove the PF flag from that
+ * destination, set the cwnd to one or two MTUs, and return
+ * the destination as an alt. If there was some active
+ * destination with a highest cwnd, return the destination
+ * as an alt.
+ */
+ if (max_cwnd_net == NULL) {
+ if (min_errors_net == NULL) {
+ return (net);
+ }
+ min_errors_net->dest_state &= ~SCTP_ADDR_PF;
+ min_errors_net->cwnd = min_errors_net->mtu * stcb->asoc.sctp_cmt_pf;
+ if (SCTP_OS_TIMER_PENDING(&min_errors_net->rxt_timer.timer)) {
+ sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
+ stcb, min_errors_net,
+ SCTP_FROM_SCTP_TIMER + SCTP_LOC_2);
+ }
+ SCTPDBG(SCTP_DEBUG_TIMER4, "Destination %p moved from PF to active with %d errors.\n",
+ min_errors_net, min_errors_net->error_count);
+ return (min_errors_net);
+ } else {
+ return (max_cwnd_net);
+ }
+ }
+ /*
+ * JRS 5/14/07 - If mode is set to 1, use the CMT policy for
+ * choosing an alternate net.
+ */
+ else if (mode == 1) {
+ TAILQ_FOREACH(mnet, &stcb->asoc.nets, sctp_next) {
+ if (((mnet->dest_state & SCTP_ADDR_REACHABLE) != SCTP_ADDR_REACHABLE) ||
+ (mnet->dest_state & SCTP_ADDR_UNCONFIRMED)) {
+ /*
+ * will skip ones that are not-reachable or
+ * unconfirmed
+ */
+ continue;
+ }
+ if (max_cwnd < mnet->cwnd) {
+ max_cwnd_net = mnet;
+ max_cwnd = mnet->cwnd;
+ } else if (max_cwnd == mnet->cwnd) {
+ uint32_t rndval;
+ uint8_t this_random;
+
+ if (stcb->asoc.hb_random_idx > 3) {
+ rndval = sctp_select_initial_TSN(&stcb->sctp_ep->sctp_ep);
+ memcpy(stcb->asoc.hb_random_values, &rndval,
+ sizeof(stcb->asoc.hb_random_values));
+ this_random = stcb->asoc.hb_random_values[0];
+ stcb->asoc.hb_random_idx = 0;
+ stcb->asoc.hb_ect_randombit = 0;
+ } else {
+ this_random = stcb->asoc.hb_random_values[stcb->asoc.hb_random_idx];
+ stcb->asoc.hb_random_idx++;
+ stcb->asoc.hb_ect_randombit = 0;
+ }
+ if (this_random % 2) {
+ max_cwnd_net = mnet;
+ max_cwnd = mnet->cwnd;
+ }
+ }
+ }
+ if (max_cwnd_net) {
+ return (max_cwnd_net);
+ }
+ }
+ mnet = net;
+ once = 0;
+
+ if (mnet == NULL) {
+ mnet = TAILQ_FIRST(&stcb->asoc.nets);
+ if (mnet == NULL) {
+ return (NULL);
+ }
+ }
+ do {
+ alt = TAILQ_NEXT(mnet, sctp_next);
+ if (alt == NULL) {
+ once++;
+ if (once > 1) {
+ break;
+ }
+ alt = TAILQ_FIRST(&stcb->asoc.nets);
+ if (alt == NULL) {
+ return (NULL);
+ }
+ }
+ if (alt->ro.ro_rt == NULL) {
+ if (alt->ro._s_addr) {
+ sctp_free_ifa(alt->ro._s_addr);
+ alt->ro._s_addr = NULL;
+ }
+ alt->src_addr_selected = 0;
+ }
+ /* sa_ignore NO_NULL_CHK */
+ if (((alt->dest_state & SCTP_ADDR_REACHABLE) == SCTP_ADDR_REACHABLE) &&
+ (alt->ro.ro_rt != NULL) &&
+ (!(alt->dest_state & SCTP_ADDR_UNCONFIRMED))) {
+ /* Found a reachable address */
+ break;
+ }
+ mnet = alt;
+ } while (alt != NULL);
+
+ if (alt == NULL) {
+ /* Case where NO insv network exists (dormant state) */
+ /* we rotate destinations */
+ once = 0;
+ mnet = net;
+ do {
+ if (mnet == NULL) {
+ return (TAILQ_FIRST(&stcb->asoc.nets));
+ }
+ alt = TAILQ_NEXT(mnet, sctp_next);
+ if (alt == NULL) {
+ once++;
+ if (once > 1) {
+ break;
+ }
+ alt = TAILQ_FIRST(&stcb->asoc.nets);
+ }
+ /* sa_ignore NO_NULL_CHK */
+ if ((!(alt->dest_state & SCTP_ADDR_UNCONFIRMED)) &&
+ (alt != net)) {
+ /* Found an alternate address */
+ break;
+ }
+ mnet = alt;
+ } while (alt != NULL);
+ }
+ if (alt == NULL) {
+ return (net);
+ }
+ return (alt);
+}
+
+static void
+sctp_backoff_on_timeout(struct sctp_tcb *stcb,
+ struct sctp_nets *net,
+ int win_probe,
+ int num_marked, int num_abandoned)
+{
+ if (net->RTO == 0) {
+ net->RTO = stcb->asoc.minrto;
+ }
+ net->RTO <<= 1;
+ if (net->RTO > stcb->asoc.maxrto) {
+ net->RTO = stcb->asoc.maxrto;
+ }
+ if ((win_probe == 0) && (num_marked || num_abandoned)) {
+ /* We don't apply penalty to window probe scenarios */
+ /* JRS - Use the congestion control given in the CC module */
+ stcb->asoc.cc_functions.sctp_cwnd_update_after_timeout(stcb, net);
+ }
+}
+
+#ifndef INVARIANTS
+static void
+sctp_recover_sent_list(struct sctp_tcb *stcb)
+{
+ struct sctp_tmit_chunk *chk, *tp2;
+ struct sctp_association *asoc;
+
+ asoc = &stcb->asoc;
+ chk = TAILQ_FIRST(&stcb->asoc.sent_queue);
+ for (; chk != NULL; chk = tp2) {
+ tp2 = TAILQ_NEXT(chk, sctp_next);
+ if ((compare_with_wrap(stcb->asoc.last_acked_seq,
+ chk->rec.data.TSN_seq,
+ MAX_TSN)) ||
+ (stcb->asoc.last_acked_seq == chk->rec.data.TSN_seq)) {
+
+ SCTP_PRINTF("Found chk:%p tsn:%x <= last_acked_seq:%x\n",
+ chk, chk->rec.data.TSN_seq, stcb->asoc.last_acked_seq);
+ TAILQ_REMOVE(&asoc->sent_queue, chk, sctp_next);
+ if (chk->pr_sctp_on) {
+ if (asoc->pr_sctp_cnt != 0)
+ asoc->pr_sctp_cnt--;
+ }
+ if (chk->data) {
+ /* sa_ignore NO_NULL_CHK */
+ sctp_free_bufspace(stcb, asoc, chk, 1);
+ sctp_m_freem(chk->data);
+ if (asoc->peer_supports_prsctp && PR_SCTP_BUF_ENABLED(chk->flags)) {
+ asoc->sent_queue_cnt_removeable--;
+ }
+ }
+ chk->data = NULL;
+ asoc->sent_queue_cnt--;
+ sctp_free_a_chunk(stcb, chk);
+ }
+ }
+ SCTP_PRINTF("after recover order is as follows\n");
+ chk = TAILQ_FIRST(&stcb->asoc.sent_queue);
+ for (; chk != NULL; chk = tp2) {
+ tp2 = TAILQ_NEXT(chk, sctp_next);
+ SCTP_PRINTF("chk:%p TSN:%x\n", chk, chk->rec.data.TSN_seq);
+ }
+}
+
+#endif
+
+static int
+sctp_mark_all_for_resend(struct sctp_tcb *stcb,
+ struct sctp_nets *net,
+ struct sctp_nets *alt,
+ int window_probe,
+ int *num_marked,
+ int *num_abandoned)
+{
+
+ /*
+ * Mark all chunks (well not all) that were sent to *net for
+ * retransmission. Move them to alt for there destination as well...
+ * We only mark chunks that have been outstanding long enough to
+ * have received feed-back.
+ */
+ struct sctp_tmit_chunk *chk, *tp2;
+ struct sctp_nets *lnets;
+ struct timeval now, min_wait, tv;
+ int cur_rtt;
+ int cnt_abandoned;
+ int audit_tf, num_mk, fir;
+ unsigned int cnt_mk;
+ uint32_t orig_flight, orig_tf;
+ uint32_t tsnlast, tsnfirst;
+ int recovery_cnt = 0;
+
+
+ /* none in flight now */
+ audit_tf = 0;
+ fir = 0;
+ /*
+ * figure out how long a data chunk must be pending before we can
+ * mark it ..
+ */
+ (void)SCTP_GETTIME_TIMEVAL(&now);
+ /* get cur rto in micro-seconds */
+ cur_rtt = (((net->lastsa >> 2) + net->lastsv) >> 1);
+ cur_rtt *= 1000;
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & (SCTP_EARLYFR_LOGGING_ENABLE | SCTP_FR_LOGGING_ENABLE)) {
+ sctp_log_fr(cur_rtt,
+ stcb->asoc.peers_rwnd,
+ window_probe,
+ SCTP_FR_T3_MARK_TIME);
+ sctp_log_fr(net->flight_size,
+ SCTP_OS_TIMER_PENDING(&net->fr_timer.timer),
+ SCTP_OS_TIMER_ACTIVE(&net->fr_timer.timer),
+ SCTP_FR_CWND_REPORT);
+ sctp_log_fr(net->flight_size, net->cwnd, stcb->asoc.total_flight, SCTP_FR_CWND_REPORT);
+ }
+ tv.tv_sec = cur_rtt / 1000000;
+ tv.tv_usec = cur_rtt % 1000000;
+ min_wait = now;
+ timevalsub(&min_wait, &tv);
+ if (min_wait.tv_sec < 0 || min_wait.tv_usec < 0) {
+ /*
+ * if we hit here, we don't have enough seconds on the clock
+ * to account for the RTO. We just let the lower seconds be
+ * the bounds and don't worry about it. This may mean we
+ * will mark a lot more than we should.
+ */
+ min_wait.tv_sec = min_wait.tv_usec = 0;
+ }
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & (SCTP_EARLYFR_LOGGING_ENABLE | SCTP_FR_LOGGING_ENABLE)) {
+ sctp_log_fr(cur_rtt, now.tv_sec, now.tv_usec, SCTP_FR_T3_MARK_TIME);
+ sctp_log_fr(0, min_wait.tv_sec, min_wait.tv_usec, SCTP_FR_T3_MARK_TIME);
+ }
+ /*
+ * Our rwnd will be incorrect here since we are not adding back the
+ * cnt * mbuf but we will fix that down below.
+ */
+ orig_flight = net->flight_size;
+ orig_tf = stcb->asoc.total_flight;
+
+ net->fast_retran_ip = 0;
+ /* Now on to each chunk */
+ cnt_abandoned = 0;
+ num_mk = cnt_mk = 0;
+ tsnfirst = tsnlast = 0;
+#ifndef INVARIANTS
+start_again:
+#endif
+ chk = TAILQ_FIRST(&stcb->asoc.sent_queue);
+ for (; chk != NULL; chk = tp2) {
+ tp2 = TAILQ_NEXT(chk, sctp_next);
+ if ((compare_with_wrap(stcb->asoc.last_acked_seq,
+ chk->rec.data.TSN_seq,
+ MAX_TSN)) ||
+ (stcb->asoc.last_acked_seq == chk->rec.data.TSN_seq)) {
+ /* Strange case our list got out of order? */
+ SCTP_PRINTF("Our list is out of order? last_acked:%x chk:%x",
+ (unsigned int)stcb->asoc.last_acked_seq, (unsigned int)chk->rec.data.TSN_seq);
+ recovery_cnt++;
+#ifdef INVARIANTS
+ panic("last acked >= chk on sent-Q");
+#else
+ SCTP_PRINTF("Recover attempts a restart cnt:%d\n", recovery_cnt);
+ sctp_recover_sent_list(stcb);
+ if (recovery_cnt < 10) {
+ goto start_again;
+ } else {
+ SCTP_PRINTF("Recovery fails %d times??\n", recovery_cnt);
+ }
+#endif
+ }
+ if ((chk->whoTo == net) && (chk->sent < SCTP_DATAGRAM_ACKED)) {
+ /*
+ * found one to mark: If it is less than
+ * DATAGRAM_ACKED it MUST not be a skipped or marked
+ * TSN but instead one that is either already set
+ * for retransmission OR one that needs
+ * retransmission.
+ */
+
+ /* validate its been outstanding long enough */
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & (SCTP_EARLYFR_LOGGING_ENABLE | SCTP_FR_LOGGING_ENABLE)) {
+ sctp_log_fr(chk->rec.data.TSN_seq,
+ chk->sent_rcv_time.tv_sec,
+ chk->sent_rcv_time.tv_usec,
+ SCTP_FR_T3_MARK_TIME);
+ }
+ if ((chk->sent_rcv_time.tv_sec > min_wait.tv_sec) && (window_probe == 0)) {
+ /*
+ * we have reached a chunk that was sent
+ * some seconds past our min.. forget it we
+ * will find no more to send.
+ */
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & (SCTP_EARLYFR_LOGGING_ENABLE | SCTP_FR_LOGGING_ENABLE)) {
+ sctp_log_fr(0,
+ chk->sent_rcv_time.tv_sec,
+ chk->sent_rcv_time.tv_usec,
+ SCTP_FR_T3_STOPPED);
+ }
+ continue;
+ } else if ((chk->sent_rcv_time.tv_sec == min_wait.tv_sec) &&
+ (window_probe == 0)) {
+ /*
+ * we must look at the micro seconds to
+ * know.
+ */
+ if (chk->sent_rcv_time.tv_usec >= min_wait.tv_usec) {
+ /*
+ * ok it was sent after our boundary
+ * time.
+ */
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & (SCTP_EARLYFR_LOGGING_ENABLE | SCTP_FR_LOGGING_ENABLE)) {
+ sctp_log_fr(0,
+ chk->sent_rcv_time.tv_sec,
+ chk->sent_rcv_time.tv_usec,
+ SCTP_FR_T3_STOPPED);
+ }
+ continue;
+ }
+ }
+ if (stcb->asoc.peer_supports_prsctp && PR_SCTP_TTL_ENABLED(chk->flags)) {
+ /* Is it expired? */
+ if (timevalcmp(&now, &chk->rec.data.timetodrop, >)) {
+ /* Yes so drop it */
+ if (chk->data) {
+ (void)sctp_release_pr_sctp_chunk(stcb,
+ chk,
+ (SCTP_RESPONSE_TO_USER_REQ | SCTP_NOTIFY_DATAGRAM_SENT),
+ SCTP_SO_NOT_LOCKED);
+ cnt_abandoned++;
+ }
+ continue;
+ }
+ }
+ if (stcb->asoc.peer_supports_prsctp && PR_SCTP_RTX_ENABLED(chk->flags)) {
+ /* Has it been retransmitted tv_sec times? */
+ if (chk->snd_count > chk->rec.data.timetodrop.tv_sec) {
+ if (chk->data) {
+ (void)sctp_release_pr_sctp_chunk(stcb,
+ chk,
+ (SCTP_RESPONSE_TO_USER_REQ | SCTP_NOTIFY_DATAGRAM_SENT),
+ SCTP_SO_NOT_LOCKED);
+ cnt_abandoned++;
+ }
+ continue;
+ }
+ }
+ if (chk->sent < SCTP_DATAGRAM_RESEND) {
+ sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt);
+ num_mk++;
+ if (fir == 0) {
+ fir = 1;
+ tsnfirst = chk->rec.data.TSN_seq;
+ }
+ tsnlast = chk->rec.data.TSN_seq;
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & (SCTP_EARLYFR_LOGGING_ENABLE | SCTP_FR_LOGGING_ENABLE)) {
+ sctp_log_fr(chk->rec.data.TSN_seq, chk->snd_count,
+ 0, SCTP_FR_T3_MARKED);
+ }
+ if (chk->rec.data.chunk_was_revoked) {
+ /* deflate the cwnd */
+ chk->whoTo->cwnd -= chk->book_size;
+ chk->rec.data.chunk_was_revoked = 0;
+ }
+ net->marked_retrans++;
+ stcb->asoc.marked_retrans++;
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
+ sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_RSND_TO,
+ chk->whoTo->flight_size,
+ chk->book_size,
+ (uintptr_t) chk->whoTo,
+ chk->rec.data.TSN_seq);
+ }
+ sctp_flight_size_decrease(chk);
+ sctp_total_flight_decrease(stcb, chk);
+ stcb->asoc.peers_rwnd += chk->send_size;
+ stcb->asoc.peers_rwnd += SCTP_BASE_SYSCTL(sctp_peer_chunk_oh);
+ }
+ chk->sent = SCTP_DATAGRAM_RESEND;
+ SCTP_STAT_INCR(sctps_markedretrans);
+
+ /* reset the TSN for striking and other FR stuff */
+ chk->rec.data.doing_fast_retransmit = 0;
+ /* Clear any time so NO RTT is being done */
+ chk->do_rtt = 0;
+ if (alt != net) {
+ sctp_free_remote_addr(chk->whoTo);
+ chk->no_fr_allowed = 1;
+ chk->whoTo = alt;
+ atomic_add_int(&alt->ref_count, 1);
+ } else {
+ chk->no_fr_allowed = 0;
+ if (TAILQ_EMPTY(&stcb->asoc.send_queue)) {
+ chk->rec.data.fast_retran_tsn = stcb->asoc.sending_seq;
+ } else {
+ chk->rec.data.fast_retran_tsn = (TAILQ_FIRST(&stcb->asoc.send_queue))->rec.data.TSN_seq;
+ }
+ }
+ /*
+ * CMT: Do not allow FRs on retransmitted TSNs.
+ */
+ if (stcb->asoc.sctp_cmt_on_off == 1) {
+ chk->no_fr_allowed = 1;
+ }
+#ifdef THIS_SHOULD_NOT_BE_DONE
+ } else if (chk->sent == SCTP_DATAGRAM_ACKED) {
+ /* remember highest acked one */
+ could_be_sent = chk;
+#endif
+ }
+ if (chk->sent == SCTP_DATAGRAM_RESEND) {
+ cnt_mk++;
+ }
+ }
+ if ((orig_flight - net->flight_size) != (orig_tf - stcb->asoc.total_flight)) {
+ /* we did not subtract the same things? */
+ audit_tf = 1;
+ }
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & (SCTP_EARLYFR_LOGGING_ENABLE | SCTP_FR_LOGGING_ENABLE)) {
+ sctp_log_fr(tsnfirst, tsnlast, num_mk, SCTP_FR_T3_TIMEOUT);
+ }
+#ifdef SCTP_DEBUG
+ if (num_mk) {
+ SCTPDBG(SCTP_DEBUG_TIMER1, "LAST TSN marked was %x\n",
+ tsnlast);
+ SCTPDBG(SCTP_DEBUG_TIMER1, "Num marked for retransmission was %d peer-rwd:%ld\n",
+ num_mk, (u_long)stcb->asoc.peers_rwnd);
+ SCTPDBG(SCTP_DEBUG_TIMER1, "LAST TSN marked was %x\n",
+ tsnlast);
+ SCTPDBG(SCTP_DEBUG_TIMER1, "Num marked for retransmission was %d peer-rwd:%d\n",
+ num_mk,
+ (int)stcb->asoc.peers_rwnd);
+ }
+#endif
+ *num_marked = num_mk;
+ *num_abandoned = cnt_abandoned;
+ /*
+ * Now check for a ECN Echo that may be stranded And include the
+ * cnt_mk'd to have all resends in the control queue.
+ */
+ TAILQ_FOREACH(chk, &stcb->asoc.control_send_queue, sctp_next) {
+ if (chk->sent == SCTP_DATAGRAM_RESEND) {
+ cnt_mk++;
+ }
+ if ((chk->whoTo == net) &&
+ (chk->rec.chunk_id.id == SCTP_ECN_ECHO)) {
+ sctp_free_remote_addr(chk->whoTo);
+ chk->whoTo = alt;
+ if (chk->sent != SCTP_DATAGRAM_RESEND) {
+ chk->sent = SCTP_DATAGRAM_RESEND;
+ sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt);
+ cnt_mk++;
+ }
+ atomic_add_int(&alt->ref_count, 1);
+ }
+ }
+#ifdef THIS_SHOULD_NOT_BE_DONE
+ if ((stcb->asoc.sent_queue_retran_cnt == 0) && (could_be_sent)) {
+ /* fix it so we retransmit the highest acked anyway */
+ sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt);
+ cnt_mk++;
+ could_be_sent->sent = SCTP_DATAGRAM_RESEND;
+ }
+#endif
+ if (stcb->asoc.sent_queue_retran_cnt != cnt_mk) {
+#ifdef INVARIANTS
+ SCTP_PRINTF("Local Audit says there are %d for retran asoc cnt:%d we marked:%d this time\n",
+ cnt_mk, stcb->asoc.sent_queue_retran_cnt, num_mk);
+#endif
+#ifndef SCTP_AUDITING_ENABLED
+ stcb->asoc.sent_queue_retran_cnt = cnt_mk;
+#endif
+ }
+ if (audit_tf) {
+ SCTPDBG(SCTP_DEBUG_TIMER4,
+ "Audit total flight due to negative value net:%p\n",
+ net);
+ stcb->asoc.total_flight = 0;
+ stcb->asoc.total_flight_count = 0;
+ /* Clear all networks flight size */
+ TAILQ_FOREACH(lnets, &stcb->asoc.nets, sctp_next) {
+ lnets->flight_size = 0;
+ SCTPDBG(SCTP_DEBUG_TIMER4,
+ "Net:%p c-f cwnd:%d ssthresh:%d\n",
+ lnets, lnets->cwnd, lnets->ssthresh);
+ }
+ TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) {
+ if (chk->sent < SCTP_DATAGRAM_RESEND) {
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
+ sctp_misc_ints(SCTP_FLIGHT_LOG_UP,
+ chk->whoTo->flight_size,
+ chk->book_size,
+ (uintptr_t) chk->whoTo,
+ chk->rec.data.TSN_seq);
+ }
+ sctp_flight_size_increase(chk);
+ sctp_total_flight_increase(stcb, chk);
+ }
+ }
+ }
+ /*
+ * Setup the ecn nonce re-sync point. We do this since
+ * retranmissions are NOT setup for ECN. This means that do to
+ * Karn's rule, we don't know the total of the peers ecn bits.
+ */
+ chk = TAILQ_FIRST(&stcb->asoc.send_queue);
+ if (chk == NULL) {
+ stcb->asoc.nonce_resync_tsn = stcb->asoc.sending_seq;
+ } else {
+ stcb->asoc.nonce_resync_tsn = chk->rec.data.TSN_seq;
+ }
+ stcb->asoc.nonce_wait_for_ecne = 0;
+ stcb->asoc.nonce_sum_check = 0;
+ /* We return 1 if we only have a window probe outstanding */
+ return (0);
+}
+
+
+int
+sctp_t3rxt_timer(struct sctp_inpcb *inp,
+ struct sctp_tcb *stcb,
+ struct sctp_nets *net)
+{
+ struct sctp_nets *alt;
+ int win_probe, num_mk, num_abandoned;
+
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
+ sctp_log_fr(0, 0, 0, SCTP_FR_T3_TIMEOUT);
+ }
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
+ struct sctp_nets *lnet;
+
+ TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) {
+ if (net == lnet) {
+ sctp_log_cwnd(stcb, lnet, 1, SCTP_CWND_LOG_FROM_T3);
+ } else {
+ sctp_log_cwnd(stcb, lnet, 0, SCTP_CWND_LOG_FROM_T3);
+ }
+ }
+ }
+ /* Find an alternate and mark those for retransmission */
+ if ((stcb->asoc.peers_rwnd == 0) &&
+ (stcb->asoc.total_flight < net->mtu)) {
+ SCTP_STAT_INCR(sctps_timowindowprobe);
+ win_probe = 1;
+ } else {
+ win_probe = 0;
+ }
+
+ /*
+ * JRS 5/14/07 - If CMT PF is on and the destination if not already
+ * in PF state, set the destination to PF state and store the
+ * current time as the time that the destination was last active. In
+ * addition, find an alternate destination with PF-based
+ * find_alt_net().
+ */
+ if ((stcb->asoc.sctp_cmt_on_off == 1) &&
+ (stcb->asoc.sctp_cmt_pf > 0)) {
+ if ((net->dest_state & SCTP_ADDR_PF) != SCTP_ADDR_PF) {
+ net->dest_state |= SCTP_ADDR_PF;
+ net->last_active = sctp_get_tick_count();
+ SCTPDBG(SCTP_DEBUG_TIMER4, "Destination %p moved from active to PF.\n",
+ net);
+ }
+ alt = sctp_find_alternate_net(stcb, net, 2);
+ } else if (stcb->asoc.sctp_cmt_on_off == 1) {
+ /*
+ * CMT: Using RTX_SSTHRESH policy for CMT. If CMT is being
+ * used, then pick dest with largest ssthresh for any
+ * retransmission.
+ */
+ alt = sctp_find_alternate_net(stcb, net, 1);
+ /*
+ * CUCv2: If a different dest is picked for the
+ * retransmission, then new (rtx-)pseudo_cumack needs to be
+ * tracked for orig dest. Let CUCv2 track new (rtx-)
+ * pseudo-cumack always.
+ */
+ net->find_pseudo_cumack = 1;
+ net->find_rtx_pseudo_cumack = 1;
+ } else { /* CMT is OFF */
+ alt = sctp_find_alternate_net(stcb, net, 0);
+ }
+ num_mk = 0;
+ num_abandoned = 0;
+ (void)sctp_mark_all_for_resend(stcb, net, alt, win_probe,
+ &num_mk, &num_abandoned);
+ /* FR Loss recovery just ended with the T3. */
+ stcb->asoc.fast_retran_loss_recovery = 0;
+
+ /* CMT FR loss recovery ended with the T3 */
+ net->fast_retran_loss_recovery = 0;
+
+ /*
+ * setup the sat loss recovery that prevents satellite cwnd advance.
+ */
+ stcb->asoc.sat_t3_loss_recovery = 1;
+ stcb->asoc.sat_t3_recovery_tsn = stcb->asoc.sending_seq;
+
+ /* Backoff the timer and cwnd */
+ sctp_backoff_on_timeout(stcb, net, win_probe, num_mk, num_abandoned);
+ if (win_probe == 0) {
+ /* We don't do normal threshold management on window probes */
+ if (sctp_threshold_management(inp, stcb, net,
+ stcb->asoc.max_send_times)) {
+ /* Association was destroyed */
+ return (1);
+ } else {
+ if (net != stcb->asoc.primary_destination) {
+ /* send a immediate HB if our RTO is stale */
+ struct timeval now;
+ unsigned int ms_goneby;
+
+ (void)SCTP_GETTIME_TIMEVAL(&now);
+ if (net->last_sent_time.tv_sec) {
+ ms_goneby = (now.tv_sec - net->last_sent_time.tv_sec) * 1000;
+ } else {
+ ms_goneby = 0;
+ }
+ if ((ms_goneby > net->RTO) || (net->RTO == 0)) {
+ /*
+ * no recent feed back in an RTO or
+ * more, request a RTT update
+ */
+ if (sctp_send_hb(stcb, 1, net) < 0)
+ /*
+ * Less than 0 means we lost
+ * the assoc
+ */
+ return (1);
+ }
+ }
+ }
+ } else {
+ /*
+ * For a window probe we don't penalize the net's but only
+ * the association. This may fail it if SACKs are not coming
+ * back. If sack's are coming with rwnd locked at 0, we will
+ * continue to hold things waiting for rwnd to raise
+ */
+ if (sctp_threshold_management(inp, stcb, NULL,
+ stcb->asoc.max_send_times)) {
+ /* Association was destroyed */
+ return (1);
+ }
+ }
+ if (net->dest_state & SCTP_ADDR_NOT_REACHABLE) {
+ /* Move all pending over too */
+ sctp_move_chunks_from_net(stcb, net);
+
+ /*
+ * Get the address that failed, to force a new src address
+ * selecton and a route allocation.
+ */
+ if (net->ro._s_addr) {
+ sctp_free_ifa(net->ro._s_addr);
+ net->ro._s_addr = NULL;
+ }
+ net->src_addr_selected = 0;
+
+ /* Force a route allocation too */
+ if (net->ro.ro_rt) {
+ RTFREE(net->ro.ro_rt);
+ net->ro.ro_rt = NULL;
+ }
+ /* Was it our primary? */
+ if ((stcb->asoc.primary_destination == net) && (alt != net)) {
+ /*
+ * Yes, note it as such and find an alternate note:
+ * this means HB code must use this to resent the
+ * primary if it goes active AND if someone does a
+ * change-primary then this flag must be cleared
+ * from any net structures.
+ */
+ if (sctp_set_primary_addr(stcb,
+ (struct sockaddr *)NULL,
+ alt) == 0) {
+ net->dest_state |= SCTP_ADDR_WAS_PRIMARY;
+ }
+ }
+ } else if ((stcb->asoc.sctp_cmt_on_off == 1) &&
+ (stcb->asoc.sctp_cmt_pf > 0) &&
+ ((net->dest_state & SCTP_ADDR_PF) == SCTP_ADDR_PF)) {
+ /*
+ * JRS 5/14/07 - If the destination hasn't failed completely
+ * but is in PF state, a PF-heartbeat needs to be sent
+ * manually.
+ */
+ if (sctp_send_hb(stcb, 1, net) < 0)
+ /* Return less than 0 means we lost the association */
+ return (1);
+ }
+ /*
+ * Special case for cookie-echo'ed case, we don't do output but must
+ * await the COOKIE-ACK before retransmission
+ */
+ if (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_COOKIE_ECHOED) {
+ /*
+ * Here we just reset the timer and start again since we
+ * have not established the asoc
+ */
+ sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb, net);
+ return (0);
+ }
+ if (stcb->asoc.peer_supports_prsctp) {
+ struct sctp_tmit_chunk *lchk;
+
+ lchk = sctp_try_advance_peer_ack_point(stcb, &stcb->asoc);
+ /* C3. See if we need to send a Fwd-TSN */
+ if (compare_with_wrap(stcb->asoc.advanced_peer_ack_point,
+ stcb->asoc.last_acked_seq, MAX_TSN)) {
+ /*
+ * ISSUE with ECN, see FWD-TSN processing for notes
+ * on issues that will occur when the ECN NONCE
+ * stuff is put into SCTP for cross checking.
+ */
+ send_forward_tsn(stcb, &stcb->asoc);
+ if (lchk) {
+ /* Assure a timer is up */
+ sctp_timer_start(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, stcb, lchk->whoTo);
+ }
+ }
+ }
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_MONITOR_ENABLE) {
+ sctp_log_cwnd(stcb, net, net->cwnd, SCTP_CWND_LOG_FROM_RTX);
+ }
+ return (0);
+}
+
+int
+sctp_t1init_timer(struct sctp_inpcb *inp,
+ struct sctp_tcb *stcb,
+ struct sctp_nets *net)
+{
+ /* bump the thresholds */
+ if (stcb->asoc.delayed_connection) {
+ /*
+ * special hook for delayed connection. The library did NOT
+ * complete the rest of its sends.
+ */
+ stcb->asoc.delayed_connection = 0;
+ sctp_send_initiate(inp, stcb, SCTP_SO_NOT_LOCKED);
+ return (0);
+ }
+ if (SCTP_GET_STATE((&stcb->asoc)) != SCTP_STATE_COOKIE_WAIT) {
+ return (0);
+ }
+ if (sctp_threshold_management(inp, stcb, net,
+ stcb->asoc.max_init_times)) {
+ /* Association was destroyed */
+ return (1);
+ }
+ stcb->asoc.dropped_special_cnt = 0;
+ sctp_backoff_on_timeout(stcb, stcb->asoc.primary_destination, 1, 0, 0);
+ if (stcb->asoc.initial_init_rto_max < net->RTO) {
+ net->RTO = stcb->asoc.initial_init_rto_max;
+ }
+ if (stcb->asoc.numnets > 1) {
+ /* If we have more than one addr use it */
+ struct sctp_nets *alt;
+
+ alt = sctp_find_alternate_net(stcb, stcb->asoc.primary_destination, 0);
+ if (alt != stcb->asoc.primary_destination) {
+ sctp_move_chunks_from_net(stcb, stcb->asoc.primary_destination);
+ stcb->asoc.primary_destination = alt;
+ }
+ }
+ /* Send out a new init */
+ sctp_send_initiate(inp, stcb, SCTP_SO_NOT_LOCKED);
+ return (0);
+}
+
+/*
+ * For cookie and asconf we actually need to find and mark for resend, then
+ * increment the resend counter (after all the threshold management stuff of
+ * course).
+ */
+int
+sctp_cookie_timer(struct sctp_inpcb *inp,
+ struct sctp_tcb *stcb,
+ struct sctp_nets *net)
+{
+ struct sctp_nets *alt;
+ struct sctp_tmit_chunk *cookie;
+
+ /* first before all else we must find the cookie */
+ TAILQ_FOREACH(cookie, &stcb->asoc.control_send_queue, sctp_next) {
+ if (cookie->rec.chunk_id.id == SCTP_COOKIE_ECHO) {
+ break;
+ }
+ }
+ if (cookie == NULL) {
+ if (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_COOKIE_ECHOED) {
+ /* FOOBAR! */
+ struct mbuf *oper;
+
+ oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)),
+ 0, M_DONTWAIT, 1, MT_DATA);
+ if (oper) {
+ struct sctp_paramhdr *ph;
+ uint32_t *ippp;
+
+ SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) +
+ sizeof(uint32_t);
+ ph = mtod(oper, struct sctp_paramhdr *);
+ ph->param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
+ ph->param_length = htons(SCTP_BUF_LEN(oper));
+ ippp = (uint32_t *) (ph + 1);
+ *ippp = htonl(SCTP_FROM_SCTP_TIMER + SCTP_LOC_3);
+ }
+ inp->last_abort_code = SCTP_FROM_SCTP_TIMER + SCTP_LOC_4;
+ sctp_abort_an_association(inp, stcb, SCTP_INTERNAL_ERROR,
+ oper, SCTP_SO_NOT_LOCKED);
+ } else {
+#ifdef INVARIANTS
+ panic("Cookie timer expires in wrong state?");
+#else
+ SCTP_PRINTF("Strange in state %d not cookie-echoed yet c-e timer expires?\n", SCTP_GET_STATE(&stcb->asoc));
+ return (0);
+#endif
+ }
+ return (0);
+ }
+ /* Ok we found the cookie, threshold management next */
+ if (sctp_threshold_management(inp, stcb, cookie->whoTo,
+ stcb->asoc.max_init_times)) {
+ /* Assoc is over */
+ return (1);
+ }
+ /*
+ * cleared theshold management now lets backoff the address & select
+ * an alternate
+ */
+ stcb->asoc.dropped_special_cnt = 0;
+ sctp_backoff_on_timeout(stcb, cookie->whoTo, 1, 0, 0);
+ alt = sctp_find_alternate_net(stcb, cookie->whoTo, 0);
+ if (alt != cookie->whoTo) {
+ sctp_free_remote_addr(cookie->whoTo);
+ cookie->whoTo = alt;
+ atomic_add_int(&alt->ref_count, 1);
+ }
+ /* Now mark the retran info */
+ if (cookie->sent != SCTP_DATAGRAM_RESEND) {
+ sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt);
+ }
+ cookie->sent = SCTP_DATAGRAM_RESEND;
+ /*
+ * Now call the output routine to kick out the cookie again, Note we
+ * don't mark any chunks for retran so that FR will need to kick in
+ * to move these (or a send timer).
+ */
+ return (0);
+}
+
+int
+sctp_strreset_timer(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
+ struct sctp_nets *net)
+{
+ struct sctp_nets *alt;
+ struct sctp_tmit_chunk *strrst = NULL, *chk = NULL;
+
+ if (stcb->asoc.stream_reset_outstanding == 0) {
+ return (0);
+ }
+ /* find the existing STRRESET, we use the seq number we sent out on */
+ (void)sctp_find_stream_reset(stcb, stcb->asoc.str_reset_seq_out, &strrst);
+ if (strrst == NULL) {
+ return (0);
+ }
+ /* do threshold management */
+ if (sctp_threshold_management(inp, stcb, strrst->whoTo,
+ stcb->asoc.max_send_times)) {
+ /* Assoc is over */
+ return (1);
+ }
+ /*
+ * cleared theshold management now lets backoff the address & select
+ * an alternate
+ */
+ sctp_backoff_on_timeout(stcb, strrst->whoTo, 1, 0, 0);
+ alt = sctp_find_alternate_net(stcb, strrst->whoTo, 0);
+ sctp_free_remote_addr(strrst->whoTo);
+ strrst->whoTo = alt;
+ atomic_add_int(&alt->ref_count, 1);
+
+ /* See if a ECN Echo is also stranded */
+ TAILQ_FOREACH(chk, &stcb->asoc.control_send_queue, sctp_next) {
+ if ((chk->whoTo == net) &&
+ (chk->rec.chunk_id.id == SCTP_ECN_ECHO)) {
+ sctp_free_remote_addr(chk->whoTo);
+ if (chk->sent != SCTP_DATAGRAM_RESEND) {
+ chk->sent = SCTP_DATAGRAM_RESEND;
+ sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt);
+ }
+ chk->whoTo = alt;
+ atomic_add_int(&alt->ref_count, 1);
+ }
+ }
+ if (net->dest_state & SCTP_ADDR_NOT_REACHABLE) {
+ /*
+ * If the address went un-reachable, we need to move to
+ * alternates for ALL chk's in queue
+ */
+ sctp_move_chunks_from_net(stcb, net);
+ }
+ /* mark the retran info */
+ if (strrst->sent != SCTP_DATAGRAM_RESEND)
+ sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt);
+ strrst->sent = SCTP_DATAGRAM_RESEND;
+
+ /* restart the timer */
+ sctp_timer_start(SCTP_TIMER_TYPE_STRRESET, inp, stcb, strrst->whoTo);
+ return (0);
+}
+
+int
+sctp_asconf_timer(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
+ struct sctp_nets *net)
+{
+ struct sctp_nets *alt;
+ struct sctp_tmit_chunk *asconf, *chk, *nchk;
+
+ /* is this a first send, or a retransmission? */
+ if (TAILQ_EMPTY(&stcb->asoc.asconf_send_queue)) {
+ /* compose a new ASCONF chunk and send it */
+ sctp_send_asconf(stcb, net, SCTP_ADDR_NOT_LOCKED);
+ } else {
+ /*
+ * Retransmission of the existing ASCONF is needed
+ */
+
+ /* find the existing ASCONF */
+ asconf = TAILQ_FIRST(&stcb->asoc.asconf_send_queue);
+ if (asconf == NULL) {
+ return (0);
+ }
+ /* do threshold management */
+ if (sctp_threshold_management(inp, stcb, asconf->whoTo,
+ stcb->asoc.max_send_times)) {
+ /* Assoc is over */
+ return (1);
+ }
+ if (asconf->snd_count > stcb->asoc.max_send_times) {
+ /*
+ * Something is rotten: our peer is not responding
+ * to ASCONFs but apparently is to other chunks.
+ * i.e. it is not properly handling the chunk type
+ * upper bits. Mark this peer as ASCONF incapable
+ * and cleanup.
+ */
+ SCTPDBG(SCTP_DEBUG_TIMER1, "asconf_timer: Peer has not responded to our repeated ASCONFs\n");
+ sctp_asconf_cleanup(stcb, net);
+ return (0);
+ }
+ /*
+ * cleared threshold management, so now backoff the net and
+ * select an alternate
+ */
+ sctp_backoff_on_timeout(stcb, asconf->whoTo, 1, 0, 0);
+ alt = sctp_find_alternate_net(stcb, asconf->whoTo, 0);
+ if (asconf->whoTo != alt) {
+ sctp_free_remote_addr(asconf->whoTo);
+ asconf->whoTo = alt;
+ atomic_add_int(&alt->ref_count, 1);
+ }
+ /* See if an ECN Echo is also stranded */
+ TAILQ_FOREACH(chk, &stcb->asoc.control_send_queue, sctp_next) {
+ if ((chk->whoTo == net) &&
+ (chk->rec.chunk_id.id == SCTP_ECN_ECHO)) {
+ sctp_free_remote_addr(chk->whoTo);
+ chk->whoTo = alt;
+ if (chk->sent != SCTP_DATAGRAM_RESEND) {
+ chk->sent = SCTP_DATAGRAM_RESEND;
+ sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt);
+ }
+ atomic_add_int(&alt->ref_count, 1);
+ }
+ }
+ for (chk = asconf; chk; chk = nchk) {
+ nchk = TAILQ_NEXT(chk, sctp_next);
+ if (chk->whoTo != alt) {
+ sctp_free_remote_addr(chk->whoTo);
+ chk->whoTo = alt;
+ atomic_add_int(&alt->ref_count, 1);
+ }
+ if (asconf->sent != SCTP_DATAGRAM_RESEND && chk->sent != SCTP_DATAGRAM_UNSENT)
+ sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt);
+ chk->sent = SCTP_DATAGRAM_RESEND;
+ }
+ if (net->dest_state & SCTP_ADDR_NOT_REACHABLE) {
+ /*
+ * If the address went un-reachable, we need to move
+ * to the alternate for ALL chunks in queue
+ */
+ sctp_move_chunks_from_net(stcb, net);
+ }
+ /* mark the retran info */
+ if (asconf->sent != SCTP_DATAGRAM_RESEND)
+ sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt);
+ asconf->sent = SCTP_DATAGRAM_RESEND;
+
+ /* send another ASCONF if any and we can do */
+ sctp_send_asconf(stcb, alt, SCTP_ADDR_NOT_LOCKED);
+ }
+ return (0);
+}
+
+/* Mobility adaptation */
+void
+sctp_delete_prim_timer(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
+ struct sctp_nets *net)
+{
+ if (stcb->asoc.deleted_primary == NULL) {
+ SCTPDBG(SCTP_DEBUG_ASCONF1, "delete_prim_timer: deleted_primary is not stored...\n");
+ sctp_mobility_feature_off(inp, SCTP_MOBILITY_PRIM_DELETED);
+ return;
+ }
+ SCTPDBG(SCTP_DEBUG_ASCONF1, "delete_prim_timer: finished to keep deleted primary ");
+ SCTPDBG_ADDR(SCTP_DEBUG_ASCONF1, &stcb->asoc.deleted_primary->ro._l_addr.sa);
+ sctp_free_remote_addr(stcb->asoc.deleted_primary);
+ stcb->asoc.deleted_primary = NULL;
+ sctp_mobility_feature_off(inp, SCTP_MOBILITY_PRIM_DELETED);
+ return;
+}
+
+/*
+ * For the shutdown and shutdown-ack, we do not keep one around on the
+ * control queue. This means we must generate a new one and call the general
+ * chunk output routine, AFTER having done threshold management.
+ * It is assumed that net is non-NULL.
+ */
+int
+sctp_shutdown_timer(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
+ struct sctp_nets *net)
+{
+ struct sctp_nets *alt;
+
+ /* first threshold managment */
+ if (sctp_threshold_management(inp, stcb, net, stcb->asoc.max_send_times)) {
+ /* Assoc is over */
+ return (1);
+ }
+ sctp_backoff_on_timeout(stcb, net, 1, 0, 0);
+ /* second select an alternative */
+ alt = sctp_find_alternate_net(stcb, net, 0);
+
+ /* third generate a shutdown into the queue for out net */
+ sctp_send_shutdown(stcb, alt);
+
+ /* fourth restart timer */
+ sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN, inp, stcb, alt);
+ return (0);
+}
+
+int
+sctp_shutdownack_timer(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
+ struct sctp_nets *net)
+{
+ struct sctp_nets *alt;
+
+ /* first threshold managment */
+ if (sctp_threshold_management(inp, stcb, net, stcb->asoc.max_send_times)) {
+ /* Assoc is over */
+ return (1);
+ }
+ sctp_backoff_on_timeout(stcb, net, 1, 0, 0);
+ /* second select an alternative */
+ alt = sctp_find_alternate_net(stcb, net, 0);
+
+ /* third generate a shutdown into the queue for out net */
+ sctp_send_shutdown_ack(stcb, alt);
+
+ /* fourth restart timer */
+ sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK, inp, stcb, alt);
+ return (0);
+}
+
+static void
+sctp_audit_stream_queues_for_size(struct sctp_inpcb *inp,
+ struct sctp_tcb *stcb)
+{
+ struct sctp_stream_out *outs;
+ struct sctp_stream_queue_pending *sp;
+ unsigned int chks_in_queue = 0;
+ int being_filled = 0;
+
+ /*
+ * This function is ONLY called when the send/sent queues are empty.
+ */
+ if ((stcb == NULL) || (inp == NULL))
+ return;
+
+ if (stcb->asoc.sent_queue_retran_cnt) {
+ SCTP_PRINTF("Hmm, sent_queue_retran_cnt is non-zero %d\n",
+ stcb->asoc.sent_queue_retran_cnt);
+ stcb->asoc.sent_queue_retran_cnt = 0;
+ }
+ SCTP_TCB_SEND_LOCK(stcb);
+ if (TAILQ_EMPTY(&stcb->asoc.out_wheel)) {
+ int i, cnt = 0;
+
+ /* Check to see if a spoke fell off the wheel */
+ for (i = 0; i < stcb->asoc.streamoutcnt; i++) {
+ if (!TAILQ_EMPTY(&stcb->asoc.strmout[i].outqueue)) {
+ sctp_insert_on_wheel(stcb, &stcb->asoc, &stcb->asoc.strmout[i], 1);
+ cnt++;
+ }
+ }
+ if (cnt) {
+ /* yep, we lost a spoke or two */
+ SCTP_PRINTF("Found an additional %d streams NOT on outwheel, corrected\n", cnt);
+ } else {
+ /* no spokes lost, */
+ stcb->asoc.total_output_queue_size = 0;
+ }
+ SCTP_TCB_SEND_UNLOCK(stcb);
+ return;
+ }
+ SCTP_TCB_SEND_UNLOCK(stcb);
+ /* Check to see if some data queued, if so report it */
+ TAILQ_FOREACH(outs, &stcb->asoc.out_wheel, next_spoke) {
+ if (!TAILQ_EMPTY(&outs->outqueue)) {
+ TAILQ_FOREACH(sp, &outs->outqueue, next) {
+ if (sp->msg_is_complete)
+ being_filled++;
+ chks_in_queue++;
+ }
+ }
+ }
+ if (chks_in_queue != stcb->asoc.stream_queue_cnt) {
+ SCTP_PRINTF("Hmm, stream queue cnt at %d I counted %d in stream out wheel\n",
+ stcb->asoc.stream_queue_cnt, chks_in_queue);
+ }
+ if (chks_in_queue) {
+ /* call the output queue function */
+ sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED);
+ if ((TAILQ_EMPTY(&stcb->asoc.send_queue)) &&
+ (TAILQ_EMPTY(&stcb->asoc.sent_queue))) {
+ /*
+ * Probably should go in and make it go back through
+ * and add fragments allowed
+ */
+ if (being_filled == 0) {
+ SCTP_PRINTF("Still nothing moved %d chunks are stuck\n",
+ chks_in_queue);
+ }
+ }
+ } else {
+ SCTP_PRINTF("Found no chunks on any queue tot:%lu\n",
+ (u_long)stcb->asoc.total_output_queue_size);
+ stcb->asoc.total_output_queue_size = 0;
+ }
+}
+
+int
+sctp_heartbeat_timer(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
+ struct sctp_nets *net, int cnt_of_unconf)
+{
+ int ret;
+
+ if (net) {
+ if (net->hb_responded == 0) {
+ if (net->ro._s_addr) {
+ /*
+ * Invalidate the src address if we did not
+ * get a response last time.
+ */
+ sctp_free_ifa(net->ro._s_addr);
+ net->ro._s_addr = NULL;
+ net->src_addr_selected = 0;
+ }
+ sctp_backoff_on_timeout(stcb, net, 1, 0, 0);
+ }
+ /* Zero PBA, if it needs it */
+ if (net->partial_bytes_acked) {
+ net->partial_bytes_acked = 0;
+ }
+ }
+ if ((stcb->asoc.total_output_queue_size > 0) &&
+ (TAILQ_EMPTY(&stcb->asoc.send_queue)) &&
+ (TAILQ_EMPTY(&stcb->asoc.sent_queue))) {
+ sctp_audit_stream_queues_for_size(inp, stcb);
+ }
+ /* Send a new HB, this will do threshold managment, pick a new dest */
+ if (cnt_of_unconf == 0) {
+ if (sctp_send_hb(stcb, 0, NULL) < 0) {
+ return (1);
+ }
+ } else {
+ /*
+ * this will send out extra hb's up to maxburst if there are
+ * any unconfirmed addresses.
+ */
+ uint32_t cnt_sent = 0;
+
+ TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) {
+ if ((net->dest_state & SCTP_ADDR_UNCONFIRMED) &&
+ (net->dest_state & SCTP_ADDR_REACHABLE)) {
+ cnt_sent++;
+ if (net->hb_responded == 0) {
+ /* Did we respond last time? */
+ if (net->ro._s_addr) {
+ sctp_free_ifa(net->ro._s_addr);
+ net->ro._s_addr = NULL;
+ net->src_addr_selected = 0;
+ }
+ }
+ ret = sctp_send_hb(stcb, 1, net);
+ if (ret < 0)
+ return 1;
+ else if (ret == 0) {
+ break;
+ }
+ if (cnt_sent >= SCTP_BASE_SYSCTL(sctp_hb_maxburst))
+ break;
+ }
+ }
+ }
+ return (0);
+}
+
+void
+sctp_pathmtu_timer(struct sctp_inpcb *inp,
+ struct sctp_tcb *stcb,
+ struct sctp_nets *net)
+{
+ uint32_t next_mtu, mtu;
+
+ next_mtu = sctp_get_next_mtu(inp, net->mtu);
+
+ if ((next_mtu > net->mtu) && (net->port == 0)) {
+ if ((net->src_addr_selected == 0) ||
+ (net->ro._s_addr == NULL) ||
+ (net->ro._s_addr->localifa_flags & SCTP_BEING_DELETED)) {
+ if ((net->ro._s_addr != NULL) && (net->ro._s_addr->localifa_flags & SCTP_BEING_DELETED)) {
+ sctp_free_ifa(net->ro._s_addr);
+ net->ro._s_addr = NULL;
+ net->src_addr_selected = 0;
+ } else if (net->ro._s_addr == NULL) {
+#if defined(INET6) && defined(SCTP_EMBEDDED_V6_SCOPE)
+ if (net->ro._l_addr.sa.sa_family == AF_INET6) {
+ struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)&net->ro._l_addr;
+
+ /* KAME hack: embed scopeid */
+ (void)sa6_embedscope(sin6, MODULE_GLOBAL(ip6_use_defzone));
+ }
+#endif
+
+ net->ro._s_addr = sctp_source_address_selection(inp,
+ stcb,
+ (sctp_route_t *) & net->ro,
+ net, 0, stcb->asoc.vrf_id);
+#if defined(INET6) && defined(SCTP_EMBEDDED_V6_SCOPE)
+ if (net->ro._l_addr.sa.sa_family == AF_INET6) {
+ struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)&net->ro._l_addr;
+
+ (void)sa6_recoverscope(sin6);
+ }
+#endif /* INET6 */
+ }
+ if (net->ro._s_addr)
+ net->src_addr_selected = 1;
+ }
+ if (net->ro._s_addr) {
+ mtu = SCTP_GATHER_MTU_FROM_ROUTE(net->ro._s_addr, &net->ro._s_addr.sa, net->ro.ro_rt);
+ if (net->port) {
+ mtu -= sizeof(struct udphdr);
+ }
+ if (mtu > next_mtu) {
+ net->mtu = next_mtu;
+ }
+ }
+ }
+ /* restart the timer */
+ sctp_timer_start(SCTP_TIMER_TYPE_PATHMTURAISE, inp, stcb, net);
+}
+
+void
+sctp_autoclose_timer(struct sctp_inpcb *inp,
+ struct sctp_tcb *stcb,
+ struct sctp_nets *net)
+{
+ struct timeval tn, *tim_touse;
+ struct sctp_association *asoc;
+ int ticks_gone_by;
+
+ (void)SCTP_GETTIME_TIMEVAL(&tn);
+ if (stcb->asoc.sctp_autoclose_ticks &&
+ sctp_is_feature_on(inp, SCTP_PCB_FLAGS_AUTOCLOSE)) {
+ /* Auto close is on */
+ asoc = &stcb->asoc;
+ /* pick the time to use */
+ if (asoc->time_last_rcvd.tv_sec >
+ asoc->time_last_sent.tv_sec) {
+ tim_touse = &asoc->time_last_rcvd;
+ } else {
+ tim_touse = &asoc->time_last_sent;
+ }
+ /* Now has long enough transpired to autoclose? */
+ ticks_gone_by = SEC_TO_TICKS(tn.tv_sec - tim_touse->tv_sec);
+ if ((ticks_gone_by > 0) &&
+ (ticks_gone_by >= (int)asoc->sctp_autoclose_ticks)) {
+ /*
+ * autoclose time has hit, call the output routine,
+ * which should do nothing just to be SURE we don't
+ * have hanging data. We can then safely check the
+ * queues and know that we are clear to send
+ * shutdown
+ */
+ sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_AUTOCLOSE_TMR, SCTP_SO_NOT_LOCKED);
+ /* Are we clean? */
+ if (TAILQ_EMPTY(&asoc->send_queue) &&
+ TAILQ_EMPTY(&asoc->sent_queue)) {
+ /*
+ * there is nothing queued to send, so I'm
+ * done...
+ */
+ if (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_SENT) {
+ /* only send SHUTDOWN 1st time thru */
+ sctp_send_shutdown(stcb, stcb->asoc.primary_destination);
+ if ((SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) ||
+ (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
+ SCTP_STAT_DECR_GAUGE32(sctps_currestab);
+ }
+ SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_SENT);
+ SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
+ sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN,
+ stcb->sctp_ep, stcb,
+ asoc->primary_destination);
+ sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
+ stcb->sctp_ep, stcb,
+ asoc->primary_destination);
+ }
+ }
+ } else {
+ /*
+ * No auto close at this time, reset t-o to check
+ * later
+ */
+ int tmp;
+
+ /* fool the timer startup to use the time left */
+ tmp = asoc->sctp_autoclose_ticks;
+ asoc->sctp_autoclose_ticks -= ticks_gone_by;
+ sctp_timer_start(SCTP_TIMER_TYPE_AUTOCLOSE, inp, stcb,
+ net);
+ /* restore the real tick value */
+ asoc->sctp_autoclose_ticks = tmp;
+ }
+ }
+}
diff --git a/rtems/freebsd/netinet/sctp_timer.h b/rtems/freebsd/netinet/sctp_timer.h
new file mode 100644
index 00000000..9d14da8c
--- /dev/null
+++ b/rtems/freebsd/netinet/sctp_timer.h
@@ -0,0 +1,101 @@
+/*-
+ * Copyright (c) 2001-2007, by Cisco Systems, Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * a) Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * b) Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the distribution.
+ *
+ * c) Neither the name of Cisco Systems, Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/* $KAME: sctp_timer.h,v 1.6 2005/03/06 16:04:18 itojun Exp $ */
+#include <rtems/freebsd/sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#ifndef __sctp_timer_h__
+#define __sctp_timer_h__
+
+#if defined(_KERNEL) || defined(__Userspace__)
+
+#define SCTP_RTT_SHIFT 3
+#define SCTP_RTT_VAR_SHIFT 2
+
+void
+sctp_early_fr_timer(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
+ struct sctp_nets *net);
+
+struct sctp_nets *
+sctp_find_alternate_net(struct sctp_tcb *,
+ struct sctp_nets *, int mode);
+
+int
+sctp_threshold_management(struct sctp_inpcb *, struct sctp_tcb *,
+ struct sctp_nets *, uint16_t);
+
+int
+sctp_t3rxt_timer(struct sctp_inpcb *, struct sctp_tcb *,
+ struct sctp_nets *);
+int
+sctp_t1init_timer(struct sctp_inpcb *, struct sctp_tcb *,
+ struct sctp_nets *);
+int
+sctp_shutdown_timer(struct sctp_inpcb *, struct sctp_tcb *,
+ struct sctp_nets *);
+int
+sctp_heartbeat_timer(struct sctp_inpcb *, struct sctp_tcb *,
+ struct sctp_nets *, int);
+
+int
+sctp_cookie_timer(struct sctp_inpcb *, struct sctp_tcb *,
+ struct sctp_nets *);
+
+void
+sctp_pathmtu_timer(struct sctp_inpcb *, struct sctp_tcb *,
+ struct sctp_nets *);
+
+int
+sctp_shutdownack_timer(struct sctp_inpcb *, struct sctp_tcb *,
+ struct sctp_nets *);
+int
+sctp_strreset_timer(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
+ struct sctp_nets *net);
+
+int
+sctp_asconf_timer(struct sctp_inpcb *, struct sctp_tcb *,
+ struct sctp_nets *);
+
+void
+sctp_delete_prim_timer(struct sctp_inpcb *, struct sctp_tcb *,
+ struct sctp_nets *);
+
+void
+sctp_autoclose_timer(struct sctp_inpcb *, struct sctp_tcb *,
+ struct sctp_nets *net);
+
+void sctp_audit_retranmission_queue(struct sctp_association *);
+
+void sctp_iterator_timer(struct sctp_iterator *it);
+
+
+#endif
+#endif
diff --git a/rtems/freebsd/netinet/sctp_uio.h b/rtems/freebsd/netinet/sctp_uio.h
new file mode 100644
index 00000000..76a3bb53
--- /dev/null
+++ b/rtems/freebsd/netinet/sctp_uio.h
@@ -0,0 +1,1166 @@
+/*-
+ * Copyright (c) 2001-2007, by Cisco Systems, Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * a) Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * b) Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the distribution.
+ *
+ * c) Neither the name of Cisco Systems, Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/* $KAME: sctp_uio.h,v 1.11 2005/03/06 16:04:18 itojun Exp $ */
+#include <rtems/freebsd/sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#ifndef __sctp_uio_h__
+#define __sctp_uio_h__
+
+
+#if ! defined(_KERNEL)
+#include <rtems/freebsd/stdint.h>
+#endif
+#include <rtems/freebsd/sys/types.h>
+#include <rtems/freebsd/sys/socket.h>
+#include <rtems/freebsd/netinet/in.h>
+
+typedef uint32_t sctp_assoc_t;
+
+/* Compatibility to previous define's */
+#define sctp_stream_reset_events sctp_stream_reset_event
+
+/* On/Off setup for subscription to events */
+struct sctp_event_subscribe {
+ uint8_t sctp_data_io_event;
+ uint8_t sctp_association_event;
+ uint8_t sctp_address_event;
+ uint8_t sctp_send_failure_event;
+ uint8_t sctp_peer_error_event;
+ uint8_t sctp_shutdown_event;
+ uint8_t sctp_partial_delivery_event;
+ uint8_t sctp_adaptation_layer_event;
+ uint8_t sctp_authentication_event;
+ uint8_t sctp_sender_dry_event;
+ uint8_t sctp_stream_reset_event;
+};
+
+/* ancillary data types */
+#define SCTP_INIT 0x0001
+#define SCTP_SNDRCV 0x0002
+#define SCTP_EXTRCV 0x0003
+/*
+ * ancillary data structures
+ */
+struct sctp_initmsg {
+ uint16_t sinit_num_ostreams;
+ uint16_t sinit_max_instreams;
+ uint16_t sinit_max_attempts;
+ uint16_t sinit_max_init_timeo;
+};
+
+/* We add 96 bytes to the size of sctp_sndrcvinfo.
+ * This makes the current structure 128 bytes long
+ * which is nicely 64 bit aligned but also has room
+ * for us to add more and keep ABI compatibility.
+ * For example, already we have the sctp_extrcvinfo
+ * when enabled which is 48 bytes.
+ */
+
+/*
+ * The assoc up needs a verfid
+ * all sendrcvinfo's need a verfid for SENDING only.
+ */
+
+
+#define SCTP_ALIGN_RESV_PAD 96
+#define SCTP_ALIGN_RESV_PAD_SHORT 80
+
+struct sctp_sndrcvinfo {
+ uint16_t sinfo_stream;
+ uint16_t sinfo_ssn;
+ uint16_t sinfo_flags;
+ uint32_t sinfo_ppid;
+ uint32_t sinfo_context;
+ uint32_t sinfo_timetolive;
+ uint32_t sinfo_tsn;
+ uint32_t sinfo_cumtsn;
+ sctp_assoc_t sinfo_assoc_id;
+ uint8_t __reserve_pad[SCTP_ALIGN_RESV_PAD];
+};
+
+struct sctp_extrcvinfo {
+ uint16_t sinfo_stream;
+ uint16_t sinfo_ssn;
+ uint16_t sinfo_flags;
+ uint16_t sinfo_pr_policy;
+ uint32_t sinfo_ppid;
+ uint32_t sinfo_context;
+ uint32_t sinfo_timetolive;
+ uint32_t sinfo_tsn;
+ uint32_t sinfo_cumtsn;
+ sctp_assoc_t sinfo_assoc_id;
+ uint16_t sreinfo_next_flags;
+ uint16_t sreinfo_next_stream;
+ uint32_t sreinfo_next_aid;
+ uint32_t sreinfo_next_length;
+ uint32_t sreinfo_next_ppid;
+ uint8_t __reserve_pad[SCTP_ALIGN_RESV_PAD_SHORT];
+};
+
+#define SCTP_NO_NEXT_MSG 0x0000
+#define SCTP_NEXT_MSG_AVAIL 0x0001
+#define SCTP_NEXT_MSG_ISCOMPLETE 0x0002
+#define SCTP_NEXT_MSG_IS_UNORDERED 0x0004
+#define SCTP_NEXT_MSG_IS_NOTIFICATION 0x0008
+
+struct sctp_snd_all_completes {
+ uint16_t sall_stream;
+ uint16_t sall_flags;
+ uint32_t sall_ppid;
+ uint32_t sall_context;
+ uint32_t sall_num_sent;
+ uint32_t sall_num_failed;
+};
+
+/* Flags that go into the sinfo->sinfo_flags field */
+#define SCTP_EOF 0x0100 /* Start shutdown procedures */
+#define SCTP_ABORT 0x0200 /* Send an ABORT to peer */
+#define SCTP_UNORDERED 0x0400 /* Message is un-ordered */
+#define SCTP_ADDR_OVER 0x0800 /* Override the primary-address */
+#define SCTP_SENDALL 0x1000 /* Send this on all associations */
+#define SCTP_EOR 0x2000 /* end of message signal */
+#define SCTP_SACK_IMMEDIATELY 0x4000 /* Set I-Bit */
+
+#define INVALID_SINFO_FLAG(x) (((x) & 0xffffff00 \
+ & ~(SCTP_EOF | SCTP_ABORT | SCTP_UNORDERED |\
+ SCTP_ADDR_OVER | SCTP_SENDALL | SCTP_EOR |\
+ SCTP_SACK_IMMEDIATELY)) != 0)
+/* for the endpoint */
+
+/* The lower byte is an enumeration of PR-SCTP policies */
+#define SCTP_PR_SCTP_TTL 0x0001/* Time based PR-SCTP */
+#define SCTP_PR_SCTP_BUF 0x0002/* Buffer based PR-SCTP */
+#define SCTP_PR_SCTP_RTX 0x0003/* Number of retransmissions based PR-SCTP */
+
+#define PR_SCTP_POLICY(x) ((x) & 0xff)
+#define PR_SCTP_ENABLED(x) (PR_SCTP_POLICY(x) != 0)
+#define PR_SCTP_TTL_ENABLED(x) (PR_SCTP_POLICY(x) == SCTP_PR_SCTP_TTL)
+#define PR_SCTP_BUF_ENABLED(x) (PR_SCTP_POLICY(x) == SCTP_PR_SCTP_BUF)
+#define PR_SCTP_RTX_ENABLED(x) (PR_SCTP_POLICY(x) == SCTP_PR_SCTP_RTX)
+#define PR_SCTP_INVALID_POLICY(x) (PR_SCTP_POLICY(x) > SCTP_PR_SCTP_RTX)
+/* Stat's */
+struct sctp_pcbinfo {
+ uint32_t ep_count;
+ uint32_t asoc_count;
+ uint32_t laddr_count;
+ uint32_t raddr_count;
+ uint32_t chk_count;
+ uint32_t readq_count;
+ uint32_t free_chunks;
+ uint32_t stream_oque;
+};
+
+struct sctp_sockstat {
+ sctp_assoc_t ss_assoc_id;
+ uint32_t ss_total_sndbuf;
+ uint32_t ss_total_recv_buf;
+};
+
+/*
+ * notification event structures
+ */
+
+/*
+ * association change event
+ */
+struct sctp_assoc_change {
+ uint16_t sac_type;
+ uint16_t sac_flags;
+ uint32_t sac_length;
+ uint16_t sac_state;
+ uint16_t sac_error;
+ uint16_t sac_outbound_streams;
+ uint16_t sac_inbound_streams;
+ sctp_assoc_t sac_assoc_id;
+};
+
+/* sac_state values */
+#define SCTP_COMM_UP 0x0001
+#define SCTP_COMM_LOST 0x0002
+#define SCTP_RESTART 0x0003
+#define SCTP_SHUTDOWN_COMP 0x0004
+#define SCTP_CANT_STR_ASSOC 0x0005
+
+
+/*
+ * Address event
+ */
+struct sctp_paddr_change {
+ uint16_t spc_type;
+ uint16_t spc_flags;
+ uint32_t spc_length;
+ struct sockaddr_storage spc_aaddr;
+ uint32_t spc_state;
+ uint32_t spc_error;
+ sctp_assoc_t spc_assoc_id;
+ uint8_t spc_padding[4];
+};
+
+/* paddr state values */
+#define SCTP_ADDR_AVAILABLE 0x0001
+#define SCTP_ADDR_UNREACHABLE 0x0002
+#define SCTP_ADDR_REMOVED 0x0003
+#define SCTP_ADDR_ADDED 0x0004
+#define SCTP_ADDR_MADE_PRIM 0x0005
+#define SCTP_ADDR_CONFIRMED 0x0006
+
+/*
+ * CAUTION: these are user exposed SCTP addr reachability states must be
+ * compatible with SCTP_ADDR states in sctp_constants.h
+ */
+#ifdef SCTP_ACTIVE
+#undef SCTP_ACTIVE
+#endif
+#define SCTP_ACTIVE 0x0001 /* SCTP_ADDR_REACHABLE */
+
+#ifdef SCTP_INACTIVE
+#undef SCTP_INACTIVE
+#endif
+#define SCTP_INACTIVE 0x0002 /* SCTP_ADDR_NOT_REACHABLE */
+
+#ifdef SCTP_UNCONFIRMED
+#undef SCTP_UNCONFIRMED
+#endif
+#define SCTP_UNCONFIRMED 0x0200 /* SCTP_ADDR_UNCONFIRMED */
+
+#ifdef SCTP_NOHEARTBEAT
+#undef SCTP_NOHEARTBEAT
+#endif
+#define SCTP_NOHEARTBEAT 0x0040 /* SCTP_ADDR_NOHB */
+
+
+/* remote error events */
+struct sctp_remote_error {
+ uint16_t sre_type;
+ uint16_t sre_flags;
+ uint32_t sre_length;
+ uint16_t sre_error;
+ sctp_assoc_t sre_assoc_id;
+ uint8_t sre_data[4];
+};
+
+/* data send failure event */
+struct sctp_send_failed {
+ uint16_t ssf_type;
+ uint16_t ssf_flags;
+ uint32_t ssf_length;
+ uint32_t ssf_error;
+ struct sctp_sndrcvinfo ssf_info;
+ sctp_assoc_t ssf_assoc_id;
+ uint8_t ssf_data[];
+};
+
+/* flag that indicates state of data */
+#define SCTP_DATA_UNSENT 0x0001 /* inqueue never on wire */
+#define SCTP_DATA_SENT 0x0002 /* on wire at failure */
+
+/* shutdown event */
+struct sctp_shutdown_event {
+ uint16_t sse_type;
+ uint16_t sse_flags;
+ uint32_t sse_length;
+ sctp_assoc_t sse_assoc_id;
+};
+
+/* Adaptation layer indication stuff */
+struct sctp_adaptation_event {
+ uint16_t sai_type;
+ uint16_t sai_flags;
+ uint32_t sai_length;
+ uint32_t sai_adaptation_ind;
+ sctp_assoc_t sai_assoc_id;
+};
+
+struct sctp_setadaptation {
+ uint32_t ssb_adaptation_ind;
+};
+
+/* compatible old spelling */
+struct sctp_adaption_event {
+ uint16_t sai_type;
+ uint16_t sai_flags;
+ uint32_t sai_length;
+ uint32_t sai_adaption_ind;
+ sctp_assoc_t sai_assoc_id;
+};
+
+struct sctp_setadaption {
+ uint32_t ssb_adaption_ind;
+};
+
+
+/*
+ * Partial Delivery API event
+ */
+struct sctp_pdapi_event {
+ uint16_t pdapi_type;
+ uint16_t pdapi_flags;
+ uint32_t pdapi_length;
+ uint32_t pdapi_indication;
+ uint16_t pdapi_stream;
+ uint16_t pdapi_seq;
+ sctp_assoc_t pdapi_assoc_id;
+};
+
+/* indication values */
+#define SCTP_PARTIAL_DELIVERY_ABORTED 0x0001
+
+
+/*
+ * authentication key event
+ */
+struct sctp_authkey_event {
+ uint16_t auth_type;
+ uint16_t auth_flags;
+ uint32_t auth_length;
+ uint16_t auth_keynumber;
+ uint16_t auth_altkeynumber;
+ uint32_t auth_indication;
+ sctp_assoc_t auth_assoc_id;
+};
+
+/* indication values */
+#define SCTP_AUTH_NEWKEY 0x0001
+#define SCTP_AUTH_NO_AUTH 0x0002
+#define SCTP_AUTH_FREE_KEY 0x0003
+
+
+struct sctp_sender_dry_event {
+ uint16_t sender_dry_type;
+ uint16_t sender_dry_flags;
+ uint32_t sender_dry_length;
+ sctp_assoc_t sender_dry_assoc_id;
+};
+
+
+/*
+ * stream reset event
+ */
+struct sctp_stream_reset_event {
+ uint16_t strreset_type;
+ uint16_t strreset_flags;
+ uint32_t strreset_length;
+ sctp_assoc_t strreset_assoc_id;
+ uint16_t strreset_list[];
+};
+
+/* flags in strreset_flags field */
+#define SCTP_STRRESET_INBOUND_STR 0x0001
+#define SCTP_STRRESET_OUTBOUND_STR 0x0002
+#define SCTP_STRRESET_ALL_STREAMS 0x0004
+#define SCTP_STRRESET_STREAM_LIST 0x0008
+#define SCTP_STRRESET_FAILED 0x0010
+#define SCTP_STRRESET_ADD_STREAM 0x0020
+
+/* SCTP notification event */
+struct sctp_tlv {
+ uint16_t sn_type;
+ uint16_t sn_flags;
+ uint32_t sn_length;
+};
+
+union sctp_notification {
+ struct sctp_tlv sn_header;
+ struct sctp_assoc_change sn_assoc_change;
+ struct sctp_paddr_change sn_paddr_change;
+ struct sctp_remote_error sn_remote_error;
+ struct sctp_send_failed sn_send_failed;
+ struct sctp_shutdown_event sn_shutdown_event;
+ struct sctp_adaptation_event sn_adaptation_event;
+ /* compatibility same as above */
+ struct sctp_adaption_event sn_adaption_event;
+ struct sctp_pdapi_event sn_pdapi_event;
+ struct sctp_authkey_event sn_auth_event;
+ struct sctp_sender_dry_event sn_sender_dry_event;
+ struct sctp_stream_reset_event sn_strreset_event;
+};
+
+/* notification types */
+#define SCTP_ASSOC_CHANGE 0x0001
+#define SCTP_PEER_ADDR_CHANGE 0x0002
+#define SCTP_REMOTE_ERROR 0x0003
+#define SCTP_SEND_FAILED 0x0004
+#define SCTP_SHUTDOWN_EVENT 0x0005
+#define SCTP_ADAPTATION_INDICATION 0x0006
+/* same as above */
+#define SCTP_ADAPTION_INDICATION 0x0006
+#define SCTP_PARTIAL_DELIVERY_EVENT 0x0007
+#define SCTP_AUTHENTICATION_EVENT 0x0008
+#define SCTP_STREAM_RESET_EVENT 0x0009
+#define SCTP_SENDER_DRY_EVENT 0x000a
+#define SCTP__NOTIFICATIONS_STOPPED_EVENT 0x000b /* we don't send this */
+/*
+ * socket option structs
+ */
+
+struct sctp_paddrparams {
+ struct sockaddr_storage spp_address;
+ sctp_assoc_t spp_assoc_id;
+ uint32_t spp_hbinterval;
+ uint32_t spp_pathmtu;
+ uint32_t spp_flags;
+ uint32_t spp_ipv6_flowlabel;
+ uint16_t spp_pathmaxrxt;
+ uint8_t spp_ipv4_tos;
+};
+
+#define SPP_HB_ENABLE 0x00000001
+#define SPP_HB_DISABLE 0x00000002
+#define SPP_HB_DEMAND 0x00000004
+#define SPP_PMTUD_ENABLE 0x00000008
+#define SPP_PMTUD_DISABLE 0x00000010
+#define SPP_HB_TIME_IS_ZERO 0x00000080
+#define SPP_IPV6_FLOWLABEL 0x00000100
+#define SPP_IPV4_TOS 0x00000200
+
+struct sctp_paddrinfo {
+ struct sockaddr_storage spinfo_address;
+ sctp_assoc_t spinfo_assoc_id;
+ int32_t spinfo_state;
+ uint32_t spinfo_cwnd;
+ uint32_t spinfo_srtt;
+ uint32_t spinfo_rto;
+ uint32_t spinfo_mtu;
+};
+
+struct sctp_rtoinfo {
+ sctp_assoc_t srto_assoc_id;
+ uint32_t srto_initial;
+ uint32_t srto_max;
+ uint32_t srto_min;
+};
+
+struct sctp_assocparams {
+ sctp_assoc_t sasoc_assoc_id;
+ uint32_t sasoc_peer_rwnd;
+ uint32_t sasoc_local_rwnd;
+ uint32_t sasoc_cookie_life;
+ uint16_t sasoc_asocmaxrxt;
+ uint16_t sasoc_number_peer_destinations;
+};
+
+struct sctp_setprim {
+ struct sockaddr_storage ssp_addr;
+ sctp_assoc_t ssp_assoc_id;
+ uint8_t ssp_padding[4];
+};
+
+struct sctp_setpeerprim {
+ struct sockaddr_storage sspp_addr;
+ sctp_assoc_t sspp_assoc_id;
+ uint8_t sspp_padding[4];
+};
+
+struct sctp_getaddresses {
+ sctp_assoc_t sget_assoc_id;
+ /* addr is filled in for N * sockaddr_storage */
+ struct sockaddr addr[1];
+};
+
+struct sctp_setstrm_timeout {
+ sctp_assoc_t ssto_assoc_id;
+ uint32_t ssto_timeout;
+ uint32_t ssto_streamid_start;
+ uint32_t ssto_streamid_end;
+};
+
+struct sctp_status {
+ sctp_assoc_t sstat_assoc_id;
+ int32_t sstat_state;
+ uint32_t sstat_rwnd;
+ uint16_t sstat_unackdata;
+ uint16_t sstat_penddata;
+ uint16_t sstat_instrms;
+ uint16_t sstat_outstrms;
+ uint32_t sstat_fragmentation_point;
+ struct sctp_paddrinfo sstat_primary;
+};
+
+/*
+ * AUTHENTICATION support
+ */
+/* SCTP_AUTH_CHUNK */
+struct sctp_authchunk {
+ uint8_t sauth_chunk;
+};
+
+/* SCTP_AUTH_KEY */
+struct sctp_authkey {
+ sctp_assoc_t sca_assoc_id;
+ uint16_t sca_keynumber;
+ uint8_t sca_key[];
+};
+
+/* SCTP_HMAC_IDENT */
+struct sctp_hmacalgo {
+ uint32_t shmac_number_of_idents;
+ uint16_t shmac_idents[];
+};
+
+/* AUTH hmac_id */
+#define SCTP_AUTH_HMAC_ID_RSVD 0x0000
+#define SCTP_AUTH_HMAC_ID_SHA1 0x0001 /* default, mandatory */
+#define SCTP_AUTH_HMAC_ID_SHA256 0x0003
+#define SCTP_AUTH_HMAC_ID_SHA224 0x0004
+#define SCTP_AUTH_HMAC_ID_SHA384 0x0005
+#define SCTP_AUTH_HMAC_ID_SHA512 0x0006
+
+
+/* SCTP_AUTH_ACTIVE_KEY / SCTP_AUTH_DELETE_KEY */
+struct sctp_authkeyid {
+ sctp_assoc_t scact_assoc_id;
+ uint16_t scact_keynumber;
+};
+
+/* SCTP_PEER_AUTH_CHUNKS / SCTP_LOCAL_AUTH_CHUNKS */
+struct sctp_authchunks {
+ sctp_assoc_t gauth_assoc_id;
+ uint8_t gauth_chunks[];
+};
+
+struct sctp_assoc_value {
+ sctp_assoc_t assoc_id;
+ uint32_t assoc_value;
+};
+
+struct sctp_assoc_ids {
+ uint32_t gaids_number_of_ids;
+ sctp_assoc_t gaids_assoc_id[];
+};
+
+struct sctp_sack_info {
+ sctp_assoc_t sack_assoc_id;
+ uint32_t sack_delay;
+ uint32_t sack_freq;
+};
+
+struct sctp_timeouts {
+ sctp_assoc_t stimo_assoc_id;
+ uint32_t stimo_init;
+ uint32_t stimo_data;
+ uint32_t stimo_sack;
+ uint32_t stimo_shutdown;
+ uint32_t stimo_heartbeat;
+ uint32_t stimo_cookie;
+ uint32_t stimo_shutdownack;
+};
+
+struct sctp_cwnd_args {
+ struct sctp_nets *net; /* network to *//* FIXME: LP64 issue */
+ uint32_t cwnd_new_value;/* cwnd in k */
+ uint32_t pseudo_cumack;
+ uint16_t inflight; /* flightsize in k */
+ uint16_t cwnd_augment; /* increment to it */
+ uint8_t meets_pseudo_cumack;
+ uint8_t need_new_pseudo_cumack;
+ uint8_t cnt_in_send;
+ uint8_t cnt_in_str;
+};
+
+struct sctp_blk_args {
+ uint32_t onsb; /* in 1k bytes */
+ uint32_t sndlen; /* len of send being attempted */
+ uint32_t peer_rwnd; /* rwnd of peer */
+ uint16_t send_sent_qcnt;/* chnk cnt */
+ uint16_t stream_qcnt; /* chnk cnt */
+ uint16_t chunks_on_oque;/* chunks out */
+ uint16_t flight_size; /* flight size in k */
+};
+
+/*
+ * Max we can reset in one setting, note this is dictated not by the define
+ * but the size of a mbuf cluster so don't change this define and think you
+ * can specify more. You must do multiple resets if you want to reset more
+ * than SCTP_MAX_EXPLICIT_STR_RESET.
+ */
+#define SCTP_MAX_EXPLICT_STR_RESET 1000
+
+#define SCTP_RESET_LOCAL_RECV 0x0001
+#define SCTP_RESET_LOCAL_SEND 0x0002
+#define SCTP_RESET_BOTH 0x0003
+#define SCTP_RESET_TSN 0x0004
+#define SCTP_RESET_ADD_STREAMS 0x0005
+
+struct sctp_stream_reset {
+ sctp_assoc_t strrst_assoc_id;
+ uint16_t strrst_flags;
+ uint16_t strrst_num_streams; /* 0 == ALL */
+ uint16_t strrst_list[]; /* list if strrst_num_streams is not 0 */
+};
+
+
+struct sctp_get_nonce_values {
+ sctp_assoc_t gn_assoc_id;
+ uint32_t gn_peers_tag;
+ uint32_t gn_local_tag;
+};
+
+/* Debugging logs */
+struct sctp_str_log {
+ void *stcb; /* FIXME: LP64 issue */
+ uint32_t n_tsn;
+ uint32_t e_tsn;
+ uint16_t n_sseq;
+ uint16_t e_sseq;
+ uint16_t strm;
+};
+
+struct sctp_sb_log {
+ void *stcb; /* FIXME: LP64 issue */
+ uint32_t so_sbcc;
+ uint32_t stcb_sbcc;
+ uint32_t incr;
+};
+
+struct sctp_fr_log {
+ uint32_t largest_tsn;
+ uint32_t largest_new_tsn;
+ uint32_t tsn;
+};
+
+struct sctp_fr_map {
+ uint32_t base;
+ uint32_t cum;
+ uint32_t high;
+};
+
+struct sctp_rwnd_log {
+ uint32_t rwnd;
+ uint32_t send_size;
+ uint32_t overhead;
+ uint32_t new_rwnd;
+};
+
+struct sctp_mbcnt_log {
+ uint32_t total_queue_size;
+ uint32_t size_change;
+ uint32_t total_queue_mb_size;
+ uint32_t mbcnt_change;
+};
+
+struct sctp_sack_log {
+ uint32_t cumack;
+ uint32_t oldcumack;
+ uint32_t tsn;
+ uint16_t numGaps;
+ uint16_t numDups;
+};
+
+struct sctp_lock_log {
+ void *sock; /* FIXME: LP64 issue */
+ void *inp; /* FIXME: LP64 issue */
+ uint8_t tcb_lock;
+ uint8_t inp_lock;
+ uint8_t info_lock;
+ uint8_t sock_lock;
+ uint8_t sockrcvbuf_lock;
+ uint8_t socksndbuf_lock;
+ uint8_t create_lock;
+ uint8_t resv;
+};
+
+struct sctp_rto_log {
+ void *net; /* FIXME: LP64 issue */
+ uint32_t rtt;
+};
+
+struct sctp_nagle_log {
+ void *stcb; /* FIXME: LP64 issue */
+ uint32_t total_flight;
+ uint32_t total_in_queue;
+ uint16_t count_in_queue;
+ uint16_t count_in_flight;
+};
+
+struct sctp_sbwake_log {
+ void *stcb; /* FIXME: LP64 issue */
+ uint16_t send_q;
+ uint16_t sent_q;
+ uint16_t flight;
+ uint16_t wake_cnt;
+ uint8_t stream_qcnt; /* chnk cnt */
+ uint8_t chunks_on_oque; /* chunks out */
+ uint8_t sbflags;
+ uint8_t sctpflags;
+};
+
+struct sctp_misc_info {
+ uint32_t log1;
+ uint32_t log2;
+ uint32_t log3;
+ uint32_t log4;
+};
+
+struct sctp_log_closing {
+ void *inp; /* FIXME: LP64 issue */
+ void *stcb; /* FIXME: LP64 issue */
+ uint32_t sctp_flags;
+ uint16_t state;
+ int16_t loc;
+};
+
+struct sctp_mbuf_log {
+ struct mbuf *mp; /* FIXME: LP64 issue */
+ caddr_t ext;
+ caddr_t data;
+ uint16_t size;
+ uint8_t refcnt;
+ uint8_t mbuf_flags;
+};
+
+struct sctp_cwnd_log {
+ uint64_t time_event;
+ uint8_t from;
+ uint8_t event_type;
+ uint8_t resv[2];
+ union {
+ struct sctp_log_closing close;
+ struct sctp_blk_args blk;
+ struct sctp_cwnd_args cwnd;
+ struct sctp_str_log strlog;
+ struct sctp_fr_log fr;
+ struct sctp_fr_map map;
+ struct sctp_rwnd_log rwnd;
+ struct sctp_mbcnt_log mbcnt;
+ struct sctp_sack_log sack;
+ struct sctp_lock_log lock;
+ struct sctp_rto_log rto;
+ struct sctp_sb_log sb;
+ struct sctp_nagle_log nagle;
+ struct sctp_sbwake_log wake;
+ struct sctp_mbuf_log mb;
+ struct sctp_misc_info misc;
+ } x;
+};
+
+struct sctp_cwnd_log_req {
+ int32_t num_in_log; /* Number in log */
+ int32_t num_ret; /* Number returned */
+ int32_t start_at; /* start at this one */
+ int32_t end_at; /* end at this one */
+ struct sctp_cwnd_log log[];
+};
+
+struct sctp_timeval {
+ uint32_t tv_sec;
+ uint32_t tv_usec;
+};
+
+struct sctpstat {
+ struct sctp_timeval sctps_discontinuitytime; /* sctpStats 18
+ * (TimeStamp) */
+ /* MIB according to RFC 3873 */
+ uint32_t sctps_currestab; /* sctpStats 1 (Gauge32) */
+ uint32_t sctps_activeestab; /* sctpStats 2 (Counter32) */
+ uint32_t sctps_restartestab;
+ uint32_t sctps_collisionestab;
+ uint32_t sctps_passiveestab; /* sctpStats 3 (Counter32) */
+ uint32_t sctps_aborted; /* sctpStats 4 (Counter32) */
+ uint32_t sctps_shutdown;/* sctpStats 5 (Counter32) */
+ uint32_t sctps_outoftheblue; /* sctpStats 6 (Counter32) */
+ uint32_t sctps_checksumerrors; /* sctpStats 7 (Counter32) */
+ uint32_t sctps_outcontrolchunks; /* sctpStats 8 (Counter64) */
+ uint32_t sctps_outorderchunks; /* sctpStats 9 (Counter64) */
+ uint32_t sctps_outunorderchunks; /* sctpStats 10 (Counter64) */
+ uint32_t sctps_incontrolchunks; /* sctpStats 11 (Counter64) */
+ uint32_t sctps_inorderchunks; /* sctpStats 12 (Counter64) */
+ uint32_t sctps_inunorderchunks; /* sctpStats 13 (Counter64) */
+ uint32_t sctps_fragusrmsgs; /* sctpStats 14 (Counter64) */
+ uint32_t sctps_reasmusrmsgs; /* sctpStats 15 (Counter64) */
+ uint32_t sctps_outpackets; /* sctpStats 16 (Counter64) */
+ uint32_t sctps_inpackets; /* sctpStats 17 (Counter64) */
+
+ /* input statistics: */
+ uint32_t sctps_recvpackets; /* total input packets */
+ uint32_t sctps_recvdatagrams; /* total input datagrams */
+ uint32_t sctps_recvpktwithdata; /* total packets that had data */
+ uint32_t sctps_recvsacks; /* total input SACK chunks */
+ uint32_t sctps_recvdata;/* total input DATA chunks */
+ uint32_t sctps_recvdupdata; /* total input duplicate DATA chunks */
+ uint32_t sctps_recvheartbeat; /* total input HB chunks */
+ uint32_t sctps_recvheartbeatack; /* total input HB-ACK chunks */
+ uint32_t sctps_recvecne;/* total input ECNE chunks */
+ uint32_t sctps_recvauth;/* total input AUTH chunks */
+ uint32_t sctps_recvauthmissing; /* total input chunks missing AUTH */
+ uint32_t sctps_recvivalhmacid; /* total number of invalid HMAC ids
+ * received */
+ uint32_t sctps_recvivalkeyid; /* total number of invalid secret ids
+ * received */
+ uint32_t sctps_recvauthfailed; /* total number of auth failed */
+ uint32_t sctps_recvexpress; /* total fast path receives all one
+ * chunk */
+ uint32_t sctps_recvexpressm; /* total fast path multi-part data */
+ uint32_t sctps_recvnocrc;
+ uint32_t sctps_recvswcrc;
+ uint32_t sctps_recvhwcrc;
+
+ /* output statistics: */
+ uint32_t sctps_sendpackets; /* total output packets */
+ uint32_t sctps_sendsacks; /* total output SACKs */
+ uint32_t sctps_senddata;/* total output DATA chunks */
+ uint32_t sctps_sendretransdata; /* total output retransmitted DATA
+ * chunks */
+ uint32_t sctps_sendfastretrans; /* total output fast retransmitted
+ * DATA chunks */
+ uint32_t sctps_sendmultfastretrans; /* total FR's that happened
+ * more than once to same
+ * chunk (u-del multi-fr
+ * algo). */
+ uint32_t sctps_sendheartbeat; /* total output HB chunks */
+ uint32_t sctps_sendecne;/* total output ECNE chunks */
+ uint32_t sctps_sendauth;/* total output AUTH chunks FIXME */
+ uint32_t sctps_senderrors; /* ip_output error counter */
+ uint32_t sctps_sendnocrc;
+ uint32_t sctps_sendswcrc;
+ uint32_t sctps_sendhwcrc;
+ /* PCKDROPREP statistics: */
+ uint32_t sctps_pdrpfmbox; /* Packet drop from middle box */
+ uint32_t sctps_pdrpfehos; /* P-drop from end host */
+ uint32_t sctps_pdrpmbda;/* P-drops with data */
+ uint32_t sctps_pdrpmbct;/* P-drops, non-data, non-endhost */
+ uint32_t sctps_pdrpbwrpt; /* P-drop, non-endhost, bandwidth rep
+ * only */
+ uint32_t sctps_pdrpcrupt; /* P-drop, not enough for chunk header */
+ uint32_t sctps_pdrpnedat; /* P-drop, not enough data to confirm */
+ uint32_t sctps_pdrppdbrk; /* P-drop, where process_chunk_drop
+ * said break */
+ uint32_t sctps_pdrptsnnf; /* P-drop, could not find TSN */
+ uint32_t sctps_pdrpdnfnd; /* P-drop, attempt reverse TSN lookup */
+ uint32_t sctps_pdrpdiwnp; /* P-drop, e-host confirms zero-rwnd */
+ uint32_t sctps_pdrpdizrw; /* P-drop, midbox confirms no space */
+ uint32_t sctps_pdrpbadd;/* P-drop, data did not match TSN */
+ uint32_t sctps_pdrpmark;/* P-drop, TSN's marked for Fast Retran */
+ /* timeouts */
+ uint32_t sctps_timoiterator; /* Number of iterator timers that
+ * fired */
+ uint32_t sctps_timodata;/* Number of T3 data time outs */
+ uint32_t sctps_timowindowprobe; /* Number of window probe (T3) timers
+ * that fired */
+ uint32_t sctps_timoinit;/* Number of INIT timers that fired */
+ uint32_t sctps_timosack;/* Number of sack timers that fired */
+ uint32_t sctps_timoshutdown; /* Number of shutdown timers that
+ * fired */
+ uint32_t sctps_timoheartbeat; /* Number of heartbeat timers that
+ * fired */
+ uint32_t sctps_timocookie; /* Number of times a cookie timeout
+ * fired */
+ uint32_t sctps_timosecret; /* Number of times an endpoint changed
+ * its cookie secret */
+ uint32_t sctps_timopathmtu; /* Number of PMTU timers that fired */
+ uint32_t sctps_timoshutdownack; /* Number of shutdown ack timers that
+ * fired */
+ uint32_t sctps_timoshutdownguard; /* Number of shutdown guard
+ * timers that fired */
+ uint32_t sctps_timostrmrst; /* Number of stream reset timers that
+ * fired */
+ uint32_t sctps_timoearlyfr; /* Number of early FR timers that
+ * fired */
+ uint32_t sctps_timoasconf; /* Number of times an asconf timer
+ * fired */
+ uint32_t sctps_timodelprim; /* Number of times a prim_deleted
+ * timer fired */
+ uint32_t sctps_timoautoclose; /* Number of times auto close timer
+ * fired */
+ uint32_t sctps_timoassockill; /* Number of asoc free timers expired */
+ uint32_t sctps_timoinpkill; /* Number of inp free timers expired */
+ /* Early fast retransmission counters */
+ uint32_t sctps_earlyfrstart;
+ uint32_t sctps_earlyfrstop;
+ uint32_t sctps_earlyfrmrkretrans;
+ uint32_t sctps_earlyfrstpout;
+ uint32_t sctps_earlyfrstpidsck1;
+ uint32_t sctps_earlyfrstpidsck2;
+ uint32_t sctps_earlyfrstpidsck3;
+ uint32_t sctps_earlyfrstpidsck4;
+ uint32_t sctps_earlyfrstrid;
+ uint32_t sctps_earlyfrstrout;
+ uint32_t sctps_earlyfrstrtmr;
+ /* others */
+ uint32_t sctps_hdrops; /* packet shorter than header */
+ uint32_t sctps_badsum; /* checksum error */
+ uint32_t sctps_noport; /* no endpoint for port */
+ uint32_t sctps_badvtag; /* bad v-tag */
+ uint32_t sctps_badsid; /* bad SID */
+ uint32_t sctps_nomem; /* no memory */
+ uint32_t sctps_fastretransinrtt; /* number of multiple FR in a
+ * RTT window */
+ uint32_t sctps_markedretrans;
+ uint32_t sctps_naglesent; /* nagle allowed sending */
+ uint32_t sctps_naglequeued; /* nagle doesn't allow sending */
+ uint32_t sctps_maxburstqueued; /* max burst doesn't allow sending */
+ uint32_t sctps_ifnomemqueued; /* look ahead tells us no memory in
+ * interface ring buffer OR we had a
+ * send error and are queuing one
+ * send. */
+ uint32_t sctps_windowprobed; /* total number of window probes sent */
+ uint32_t sctps_lowlevelerr; /* total times an output error causes
+ * us to clamp down on next user send. */
+ uint32_t sctps_lowlevelerrusr; /* total times sctp_senderrors were
+ * caused from a user send from a user
+ * invoked send not a sack response */
+ uint32_t sctps_datadropchklmt; /* Number of in data drops due to
+ * chunk limit reached */
+ uint32_t sctps_datadroprwnd; /* Number of in data drops due to rwnd
+ * limit reached */
+ uint32_t sctps_ecnereducedcwnd; /* Number of times a ECN reduced the
+ * cwnd */
+ uint32_t sctps_vtagexpress; /* Used express lookup via vtag */
+ uint32_t sctps_vtagbogus; /* Collision in express lookup. */
+ uint32_t sctps_primary_randry; /* Number of times the sender ran dry
+ * of user data on primary */
+ uint32_t sctps_cmt_randry; /* Same for above */
+ uint32_t sctps_slowpath_sack; /* Sacks the slow way */
+ uint32_t sctps_wu_sacks_sent; /* Window Update only sacks sent */
+ uint32_t sctps_sends_with_flags; /* number of sends with
+ * sinfo_flags !=0 */
+ uint32_t sctps_sends_with_unord; /* number of unordered sends */
+ uint32_t sctps_sends_with_eof; /* number of sends with EOF flag set */
+ uint32_t sctps_sends_with_abort; /* number of sends with ABORT
+ * flag set */
+ uint32_t sctps_protocol_drain_calls; /* number of times protocol
+ * drain called */
+ uint32_t sctps_protocol_drains_done; /* number of times we did a
+ * protocol drain */
+ uint32_t sctps_read_peeks; /* Number of times recv was called
+ * with peek */
+ uint32_t sctps_cached_chk; /* Number of cached chunks used */
+ uint32_t sctps_cached_strmoq; /* Number of cached stream oq's used */
+ uint32_t sctps_left_abandon; /* Number of unread messages abandoned
+ * by close */
+ uint32_t sctps_send_burst_avoid; /* Unused */
+ uint32_t sctps_send_cwnd_avoid; /* Send cwnd full avoidance, already
+ * max burst inflight to net */
+ uint32_t sctps_fwdtsn_map_over; /* number of map array over-runs via
+ * fwd-tsn's */
+
+ uint32_t sctps_reserved[32]; /* Future ABI compat - remove int's
+ * from here when adding new */
+};
+
+#define SCTP_STAT_INCR(_x) SCTP_STAT_INCR_BY(_x,1)
+#define SCTP_STAT_DECR(_x) SCTP_STAT_DECR_BY(_x,1)
+#if defined(__FreeBSD__) && defined(SMP) && defined(SCTP_USE_PERCPU_STAT)
+#define SCTP_STAT_INCR_BY(_x,_d) (SCTP_BASE_STATS[PCPU_GET(cpuid)]._x += _d)
+#define SCTP_STAT_DECR_BY(_x,_d) (SCTP_BASE_STATS[PCPU_GET(cpuid)]._x -= _d)
+#else
+#define SCTP_STAT_INCR_BY(_x,_d) atomic_add_int(&SCTP_BASE_STAT(_x), _d)
+#define SCTP_STAT_DECR_BY(_x,_d) atomic_subtract_int(&SCTP_BASE_STAT(_x), _d)
+#endif
+/* The following macros are for handling MIB values, */
+#define SCTP_STAT_INCR_COUNTER32(_x) SCTP_STAT_INCR(_x)
+#define SCTP_STAT_INCR_COUNTER64(_x) SCTP_STAT_INCR(_x)
+#define SCTP_STAT_INCR_GAUGE32(_x) SCTP_STAT_INCR(_x)
+#define SCTP_STAT_DECR_COUNTER32(_x) SCTP_STAT_DECR(_x)
+#define SCTP_STAT_DECR_COUNTER64(_x) SCTP_STAT_DECR(_x)
+#define SCTP_STAT_DECR_GAUGE32(_x) SCTP_STAT_DECR(_x)
+
+union sctp_sockstore {
+#if defined(INET) || !defined(_KERNEL)
+ struct sockaddr_in sin;
+#endif
+#if defined(INET6) || !defined(_KERNEL)
+ struct sockaddr_in6 sin6;
+#endif
+ struct sockaddr sa;
+};
+
+
+/***********************************/
+/* And something for us old timers */
+/***********************************/
+
+#ifndef ntohll
+#include <rtems/freebsd/sys/endian.h>
+#define ntohll(x) be64toh(x)
+#endif
+
+#ifndef htonll
+#include <rtems/freebsd/sys/endian.h>
+#define htonll(x) htobe64(x)
+#endif
+/***********************************/
+
+
+struct xsctp_inpcb {
+ uint32_t last;
+ uint32_t flags;
+ uint32_t features;
+ uint32_t total_sends;
+ uint32_t total_recvs;
+ uint32_t total_nospaces;
+ uint32_t fragmentation_point;
+ uint16_t local_port;
+ uint16_t qlen;
+ uint16_t maxqlen;
+ uint32_t extra_padding[32]; /* future */
+};
+
+struct xsctp_tcb {
+ union sctp_sockstore primary_addr; /* sctpAssocEntry 5/6 */
+ uint32_t last;
+ uint32_t heartbeat_interval; /* sctpAssocEntry 7 */
+ uint32_t state; /* sctpAssocEntry 8 */
+ uint32_t in_streams; /* sctpAssocEntry 9 */
+ uint32_t out_streams; /* sctpAssocEntry 10 */
+ uint32_t max_nr_retrans;/* sctpAssocEntry 11 */
+ uint32_t primary_process; /* sctpAssocEntry 12 */
+ uint32_t T1_expireries; /* sctpAssocEntry 13 */
+ uint32_t T2_expireries; /* sctpAssocEntry 14 */
+ uint32_t retransmitted_tsns; /* sctpAssocEntry 15 */
+ uint32_t total_sends;
+ uint32_t total_recvs;
+ uint32_t local_tag;
+ uint32_t remote_tag;
+ uint32_t initial_tsn;
+ uint32_t highest_tsn;
+ uint32_t cumulative_tsn;
+ uint32_t cumulative_tsn_ack;
+ uint32_t mtu;
+ uint32_t refcnt;
+ uint16_t local_port; /* sctpAssocEntry 3 */
+ uint16_t remote_port; /* sctpAssocEntry 4 */
+ struct sctp_timeval start_time; /* sctpAssocEntry 16 */
+ struct sctp_timeval discontinuity_time; /* sctpAssocEntry 17 */
+ uint32_t peers_rwnd;
+ sctp_assoc_t assoc_id; /* sctpAssocEntry 1 */
+ uint32_t extra_padding[32]; /* future */
+};
+
+struct xsctp_laddr {
+ union sctp_sockstore address; /* sctpAssocLocalAddrEntry 1/2 */
+ uint32_t last;
+ struct sctp_timeval start_time; /* sctpAssocLocalAddrEntry 3 */
+ uint32_t extra_padding[32]; /* future */
+};
+
+struct xsctp_raddr {
+ union sctp_sockstore address; /* sctpAssocLocalRemEntry 1/2 */
+ uint32_t last;
+ uint32_t rto; /* sctpAssocLocalRemEntry 5 */
+ uint32_t max_path_rtx; /* sctpAssocLocalRemEntry 6 */
+ uint32_t rtx; /* sctpAssocLocalRemEntry 7 */
+ uint32_t error_counter; /* */
+ uint32_t cwnd; /* */
+ uint32_t flight_size; /* */
+ uint32_t mtu; /* */
+ uint8_t active; /* sctpAssocLocalRemEntry 3 */
+ uint8_t confirmed; /* */
+ uint8_t heartbeat_enabled; /* sctpAssocLocalRemEntry 4 */
+ struct sctp_timeval start_time; /* sctpAssocLocalRemEntry 8 */
+ uint32_t rtt;
+ uint32_t extra_padding[32]; /* future */
+};
+
+#define SCTP_MAX_LOGGING_SIZE 30000
+#define SCTP_TRACE_PARAMS 6 /* This number MUST be even */
+
+struct sctp_log_entry {
+ uint64_t timestamp;
+ uint32_t subsys;
+ uint32_t padding;
+ uint32_t params[SCTP_TRACE_PARAMS];
+};
+
+struct sctp_log {
+ struct sctp_log_entry entry[SCTP_MAX_LOGGING_SIZE];
+ uint32_t index;
+ uint32_t padding;
+};
+
+/*
+ * Kernel defined for sctp_send
+ */
+#if defined(_KERNEL) || defined(__Userspace__)
+int
+sctp_lower_sosend(struct socket *so,
+ struct sockaddr *addr,
+ struct uio *uio,
+ struct mbuf *i_pak,
+ struct mbuf *control,
+ int flags,
+ struct sctp_sndrcvinfo *srcv
+ ,struct thread *p
+);
+
+int
+sctp_sorecvmsg(struct socket *so,
+ struct uio *uio,
+ struct mbuf **mp,
+ struct sockaddr *from,
+ int fromlen,
+ int *msg_flags,
+ struct sctp_sndrcvinfo *sinfo,
+ int filling_sinfo);
+
+#endif
+
+/*
+ * API system calls
+ */
+#if !(defined(_KERNEL)) && !(defined(__Userspace__))
+
+__BEGIN_DECLS
+int sctp_peeloff __P((int, sctp_assoc_t));
+int sctp_bindx __P((int, struct sockaddr *, int, int));
+int sctp_connectx __P((int, const struct sockaddr *, int, sctp_assoc_t *));
+int sctp_getaddrlen __P((sa_family_t));
+int sctp_getpaddrs __P((int, sctp_assoc_t, struct sockaddr **));
+void sctp_freepaddrs __P((struct sockaddr *));
+int sctp_getladdrs __P((int, sctp_assoc_t, struct sockaddr **));
+void sctp_freeladdrs __P((struct sockaddr *));
+int sctp_opt_info __P((int, sctp_assoc_t, int, void *, socklen_t *));
+
+ssize_t sctp_sendmsg
+__P((int, const void *, size_t,
+ const struct sockaddr *,
+ socklen_t, uint32_t, uint32_t, uint16_t, uint32_t, uint32_t));
+
+ ssize_t sctp_send __P((int sd, const void *msg, size_t len,
+ const struct sctp_sndrcvinfo *sinfo, int flags));
+
+ ssize_t sctp_sendx __P((int sd, const void *msg, size_t len,
+ struct sockaddr *addrs, int addrcnt,
+ struct sctp_sndrcvinfo *sinfo, int flags));
+
+ ssize_t sctp_sendmsgx __P((int sd, const void *, size_t,
+ struct sockaddr *, int,
+ uint32_t, uint32_t, uint16_t, uint32_t, uint32_t));
+
+ sctp_assoc_t sctp_getassocid __P((int sd, struct sockaddr *sa));
+
+ ssize_t sctp_recvmsg __P((int, void *, size_t, struct sockaddr *,
+ socklen_t *, struct sctp_sndrcvinfo *, int *));
+
+__END_DECLS
+
+#endif /* !_KERNEL */
+#endif /* !__sctp_uio_h__ */
diff --git a/rtems/freebsd/netinet/sctp_usrreq.c b/rtems/freebsd/netinet/sctp_usrreq.c
new file mode 100644
index 00000000..e7252da8
--- /dev/null
+++ b/rtems/freebsd/netinet/sctp_usrreq.c
@@ -0,0 +1,4918 @@
+#include <rtems/freebsd/machine/rtems-bsd-config.h>
+
+/*-
+ * Copyright (c) 2001-2008, by Cisco Systems, Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * a) Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * b) Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the distribution.
+ *
+ * c) Neither the name of Cisco Systems, Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/* $KAME: sctp_usrreq.c,v 1.48 2005/03/07 23:26:08 itojun Exp $ */
+
+#include <rtems/freebsd/sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+#include <rtems/freebsd/netinet/sctp_os.h>
+#include <rtems/freebsd/sys/proc.h>
+#include <rtems/freebsd/netinet/sctp_pcb.h>
+#include <rtems/freebsd/netinet/sctp_header.h>
+#include <rtems/freebsd/netinet/sctp_var.h>
+#if defined(INET6)
+#endif
+#include <rtems/freebsd/netinet/sctp_sysctl.h>
+#include <rtems/freebsd/netinet/sctp_output.h>
+#include <rtems/freebsd/netinet/sctp_uio.h>
+#include <rtems/freebsd/netinet/sctp_asconf.h>
+#include <rtems/freebsd/netinet/sctputil.h>
+#include <rtems/freebsd/netinet/sctp_indata.h>
+#include <rtems/freebsd/netinet/sctp_timer.h>
+#include <rtems/freebsd/netinet/sctp_auth.h>
+#include <rtems/freebsd/netinet/sctp_bsd_addr.h>
+#include <rtems/freebsd/netinet/sctp_cc_functions.h>
+#include <rtems/freebsd/netinet/udp.h>
+
+
+
+
+void
+sctp_init(void)
+{
+ u_long sb_max_adj;
+
+ bzero(&SCTP_BASE_STATS, sizeof(struct sctpstat));
+
+ /* Initialize and modify the sysctled variables */
+ sctp_init_sysctls();
+ if ((nmbclusters / 8) > SCTP_ASOC_MAX_CHUNKS_ON_QUEUE)
+ SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue) = (nmbclusters / 8);
+ /*
+ * Allow a user to take no more than 1/2 the number of clusters or
+ * the SB_MAX whichever is smaller for the send window.
+ */
+ sb_max_adj = (u_long)((u_quad_t) (SB_MAX) * MCLBYTES / (MSIZE + MCLBYTES));
+ SCTP_BASE_SYSCTL(sctp_sendspace) = min(sb_max_adj,
+ (((uint32_t) nmbclusters / 2) * SCTP_DEFAULT_MAXSEGMENT));
+ /*
+ * Now for the recv window, should we take the same amount? or
+ * should I do 1/2 the SB_MAX instead in the SB_MAX min above. For
+ * now I will just copy.
+ */
+ SCTP_BASE_SYSCTL(sctp_recvspace) = SCTP_BASE_SYSCTL(sctp_sendspace);
+
+ SCTP_BASE_VAR(first_time) = 0;
+ SCTP_BASE_VAR(sctp_pcb_initialized) = 0;
+ sctp_pcb_init();
+#if defined(SCTP_PACKET_LOGGING)
+ SCTP_BASE_VAR(packet_log_writers) = 0;
+ SCTP_BASE_VAR(packet_log_end) = 0;
+ bzero(&SCTP_BASE_VAR(packet_log_buffer), SCTP_PACKET_LOG_SIZE);
+#endif
+
+
+}
+
+void
+sctp_finish(void)
+{
+ sctp_pcb_finish();
+}
+
+
+
+void
+sctp_pathmtu_adjustment(struct sctp_inpcb *inp,
+ struct sctp_tcb *stcb,
+ struct sctp_nets *net,
+ uint16_t nxtsz)
+{
+ struct sctp_tmit_chunk *chk;
+ uint16_t overhead;
+
+ /* Adjust that too */
+ stcb->asoc.smallest_mtu = nxtsz;
+ /* now off to subtract IP_DF flag if needed */
+ overhead = IP_HDR_SIZE;
+ if (sctp_auth_is_required_chunk(SCTP_DATA, stcb->asoc.peer_auth_chunks)) {
+ overhead += sctp_get_auth_chunk_len(stcb->asoc.peer_hmac_id);
+ }
+ TAILQ_FOREACH(chk, &stcb->asoc.send_queue, sctp_next) {
+ if ((chk->send_size + overhead) > nxtsz) {
+ chk->flags |= CHUNK_FLAGS_FRAGMENT_OK;
+ }
+ }
+ TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) {
+ if ((chk->send_size + overhead) > nxtsz) {
+ /*
+ * For this guy we also mark for immediate resend
+ * since we sent to big of chunk
+ */
+ chk->flags |= CHUNK_FLAGS_FRAGMENT_OK;
+ if (chk->sent < SCTP_DATAGRAM_RESEND) {
+ sctp_flight_size_decrease(chk);
+ sctp_total_flight_decrease(stcb, chk);
+ }
+ if (chk->sent != SCTP_DATAGRAM_RESEND) {
+ sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt);
+ }
+ chk->sent = SCTP_DATAGRAM_RESEND;
+ chk->rec.data.doing_fast_retransmit = 0;
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
+ sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_PMTU,
+ chk->whoTo->flight_size,
+ chk->book_size,
+ (uintptr_t) chk->whoTo,
+ chk->rec.data.TSN_seq);
+ }
+ /* Clear any time so NO RTT is being done */
+ chk->do_rtt = 0;
+ }
+ }
+}
+
+static void
+sctp_notify_mbuf(struct sctp_inpcb *inp,
+ struct sctp_tcb *stcb,
+ struct sctp_nets *net,
+ struct ip *ip,
+ struct sctphdr *sh)
+{
+ struct icmp *icmph;
+ int totsz, tmr_stopped = 0;
+ uint16_t nxtsz;
+
+ /* protection */
+ if ((inp == NULL) || (stcb == NULL) || (net == NULL) ||
+ (ip == NULL) || (sh == NULL)) {
+ if (stcb != NULL) {
+ SCTP_TCB_UNLOCK(stcb);
+ }
+ return;
+ }
+ /* First job is to verify the vtag matches what I would send */
+ if (ntohl(sh->v_tag) != (stcb->asoc.peer_vtag)) {
+ SCTP_TCB_UNLOCK(stcb);
+ return;
+ }
+ icmph = (struct icmp *)((caddr_t)ip - (sizeof(struct icmp) -
+ sizeof(struct ip)));
+ if (icmph->icmp_type != ICMP_UNREACH) {
+ /* We only care about unreachable */
+ SCTP_TCB_UNLOCK(stcb);
+ return;
+ }
+ if (icmph->icmp_code != ICMP_UNREACH_NEEDFRAG) {
+ /* not a unreachable message due to frag. */
+ SCTP_TCB_UNLOCK(stcb);
+ return;
+ }
+ totsz = ip->ip_len;
+
+ nxtsz = ntohs(icmph->icmp_nextmtu);
+ if (nxtsz == 0) {
+ /*
+ * old type router that does not tell us what the next size
+ * mtu is. Rats we will have to guess (in a educated fashion
+ * of course)
+ */
+ nxtsz = sctp_get_prev_mtu(totsz);
+ }
+ /* Stop any PMTU timer */
+ if (SCTP_OS_TIMER_PENDING(&net->pmtu_timer.timer)) {
+ tmr_stopped = 1;
+ sctp_timer_stop(SCTP_TIMER_TYPE_PATHMTURAISE, inp, stcb, net,
+ SCTP_FROM_SCTP_USRREQ + SCTP_LOC_1);
+ }
+ /* Adjust destination size limit */
+ if (net->mtu > nxtsz) {
+ net->mtu = nxtsz;
+ if (net->port) {
+ net->mtu -= sizeof(struct udphdr);
+ }
+ }
+ /* now what about the ep? */
+ if (stcb->asoc.smallest_mtu > nxtsz) {
+ sctp_pathmtu_adjustment(inp, stcb, net, nxtsz);
+ }
+ if (tmr_stopped)
+ sctp_timer_start(SCTP_TIMER_TYPE_PATHMTURAISE, inp, stcb, net);
+
+ SCTP_TCB_UNLOCK(stcb);
+}
+
+
+void
+sctp_notify(struct sctp_inpcb *inp,
+ struct ip *ip,
+ struct sctphdr *sh,
+ struct sockaddr *to,
+ struct sctp_tcb *stcb,
+ struct sctp_nets *net)
+{
+#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
+ struct socket *so;
+
+#endif
+ /* protection */
+ int reason;
+ struct icmp *icmph;
+
+
+ if ((inp == NULL) || (stcb == NULL) || (net == NULL) ||
+ (sh == NULL) || (to == NULL)) {
+ if (stcb)
+ SCTP_TCB_UNLOCK(stcb);
+ return;
+ }
+ /* First job is to verify the vtag matches what I would send */
+ if (ntohl(sh->v_tag) != (stcb->asoc.peer_vtag)) {
+ SCTP_TCB_UNLOCK(stcb);
+ return;
+ }
+ icmph = (struct icmp *)((caddr_t)ip - (sizeof(struct icmp) -
+ sizeof(struct ip)));
+ if (icmph->icmp_type != ICMP_UNREACH) {
+ /* We only care about unreachable */
+ SCTP_TCB_UNLOCK(stcb);
+ return;
+ }
+ if ((icmph->icmp_code == ICMP_UNREACH_NET) ||
+ (icmph->icmp_code == ICMP_UNREACH_HOST) ||
+ (icmph->icmp_code == ICMP_UNREACH_NET_UNKNOWN) ||
+ (icmph->icmp_code == ICMP_UNREACH_HOST_UNKNOWN) ||
+ (icmph->icmp_code == ICMP_UNREACH_ISOLATED) ||
+ (icmph->icmp_code == ICMP_UNREACH_NET_PROHIB) ||
+ (icmph->icmp_code == ICMP_UNREACH_HOST_PROHIB) ||
+ (icmph->icmp_code == ICMP_UNREACH_FILTER_PROHIB)) {
+
+ /*
+ * Hmm reachablity problems we must examine closely. If its
+ * not reachable, we may have lost a network. Or if there is
+ * NO protocol at the other end named SCTP. well we consider
+ * it a OOTB abort.
+ */
+ if (net->dest_state & SCTP_ADDR_REACHABLE) {
+ /* Ok that destination is NOT reachable */
+ SCTP_PRINTF("ICMP (thresh %d/%d) takes interface %p down\n",
+ net->error_count,
+ net->failure_threshold,
+ net);
+
+ net->dest_state &= ~SCTP_ADDR_REACHABLE;
+ net->dest_state |= SCTP_ADDR_NOT_REACHABLE;
+ /*
+ * JRS 5/14/07 - If a destination is unreachable,
+ * the PF bit is turned off. This allows an
+ * unambiguous use of the PF bit for destinations
+ * that are reachable but potentially failed. If the
+ * destination is set to the unreachable state, also
+ * set the destination to the PF state.
+ */
+ /*
+ * Add debug message here if destination is not in
+ * PF state.
+ */
+ /* Stop any running T3 timers here? */
+ if ((stcb->asoc.sctp_cmt_on_off == 1) &&
+ (stcb->asoc.sctp_cmt_pf > 0)) {
+ net->dest_state &= ~SCTP_ADDR_PF;
+ SCTPDBG(SCTP_DEBUG_TIMER4, "Destination %p moved from PF to unreachable.\n",
+ net);
+ }
+ net->error_count = net->failure_threshold + 1;
+ sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_DOWN,
+ stcb, SCTP_FAILED_THRESHOLD,
+ (void *)net, SCTP_SO_NOT_LOCKED);
+ }
+ SCTP_TCB_UNLOCK(stcb);
+ } else if ((icmph->icmp_code == ICMP_UNREACH_PROTOCOL) ||
+ (icmph->icmp_code == ICMP_UNREACH_PORT)) {
+ /*
+ * Here the peer is either playing tricks on us, including
+ * an address that belongs to someone who does not support
+ * SCTP OR was a userland implementation that shutdown and
+ * now is dead. In either case treat it like a OOTB abort
+ * with no TCB
+ */
+ reason = SCTP_PEER_FAULTY;
+ sctp_abort_notification(stcb, reason, SCTP_SO_NOT_LOCKED);
+#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
+ so = SCTP_INP_SO(inp);
+ atomic_add_int(&stcb->asoc.refcnt, 1);
+ SCTP_TCB_UNLOCK(stcb);
+ SCTP_SOCKET_LOCK(so, 1);
+ SCTP_TCB_LOCK(stcb);
+ atomic_subtract_int(&stcb->asoc.refcnt, 1);
+#endif
+ (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_USRREQ + SCTP_LOC_2);
+#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
+ SCTP_SOCKET_UNLOCK(so, 1);
+ /* SCTP_TCB_UNLOCK(stcb); MT: I think this is not needed. */
+#endif
+ /* no need to unlock here, since the TCB is gone */
+ } else {
+ SCTP_TCB_UNLOCK(stcb);
+ }
+}
+
+void
+sctp_ctlinput(cmd, sa, vip)
+ int cmd;
+ struct sockaddr *sa;
+ void *vip;
+{
+ struct ip *ip = vip;
+ struct sctphdr *sh;
+ uint32_t vrf_id;
+
+ /* FIX, for non-bsd is this right? */
+ vrf_id = SCTP_DEFAULT_VRFID;
+ if (sa->sa_family != AF_INET ||
+ ((struct sockaddr_in *)sa)->sin_addr.s_addr == INADDR_ANY) {
+ return;
+ }
+ if (PRC_IS_REDIRECT(cmd)) {
+ ip = 0;
+ } else if ((unsigned)cmd >= PRC_NCMDS || inetctlerrmap[cmd] == 0) {
+ return;
+ }
+ if (ip) {
+ struct sctp_inpcb *inp = NULL;
+ struct sctp_tcb *stcb = NULL;
+ struct sctp_nets *net = NULL;
+ struct sockaddr_in to, from;
+
+ sh = (struct sctphdr *)((caddr_t)ip + (ip->ip_hl << 2));
+ bzero(&to, sizeof(to));
+ bzero(&from, sizeof(from));
+ from.sin_family = to.sin_family = AF_INET;
+ from.sin_len = to.sin_len = sizeof(to);
+ from.sin_port = sh->src_port;
+ from.sin_addr = ip->ip_src;
+ to.sin_port = sh->dest_port;
+ to.sin_addr = ip->ip_dst;
+
+ /*
+ * 'to' holds the dest of the packet that failed to be sent.
+ * 'from' holds our local endpoint address. Thus we reverse
+ * the to and the from in the lookup.
+ */
+ stcb = sctp_findassociation_addr_sa((struct sockaddr *)&from,
+ (struct sockaddr *)&to,
+ &inp, &net, 1, vrf_id);
+ if (stcb != NULL && inp && (inp->sctp_socket != NULL)) {
+ if (cmd != PRC_MSGSIZE) {
+ sctp_notify(inp, ip, sh,
+ (struct sockaddr *)&to, stcb,
+ net);
+ } else {
+ /* handle possible ICMP size messages */
+ sctp_notify_mbuf(inp, stcb, net, ip, sh);
+ }
+ } else {
+ if ((stcb == NULL) && (inp != NULL)) {
+ /* reduce ref-count */
+ SCTP_INP_WLOCK(inp);
+ SCTP_INP_DECR_REF(inp);
+ SCTP_INP_WUNLOCK(inp);
+ }
+ if (stcb) {
+ SCTP_TCB_UNLOCK(stcb);
+ }
+ }
+ }
+ return;
+}
+
+static int
+sctp_getcred(SYSCTL_HANDLER_ARGS)
+{
+ struct xucred xuc;
+ struct sockaddr_in addrs[2];
+ struct sctp_inpcb *inp;
+ struct sctp_nets *net;
+ struct sctp_tcb *stcb;
+ int error;
+ uint32_t vrf_id;
+
+ /* FIX, for non-bsd is this right? */
+ vrf_id = SCTP_DEFAULT_VRFID;
+
+ error = priv_check(req->td, PRIV_NETINET_GETCRED);
+
+ if (error)
+ return (error);
+
+ error = SYSCTL_IN(req, addrs, sizeof(addrs));
+ if (error)
+ return (error);
+
+ stcb = sctp_findassociation_addr_sa(sintosa(&addrs[0]),
+ sintosa(&addrs[1]),
+ &inp, &net, 1, vrf_id);
+ if (stcb == NULL || inp == NULL || inp->sctp_socket == NULL) {
+ if ((inp != NULL) && (stcb == NULL)) {
+ /* reduce ref-count */
+ SCTP_INP_WLOCK(inp);
+ SCTP_INP_DECR_REF(inp);
+ goto cred_can_cont;
+ }
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOENT);
+ error = ENOENT;
+ goto out;
+ }
+ SCTP_TCB_UNLOCK(stcb);
+ /*
+ * We use the write lock here, only since in the error leg we need
+ * it. If we used RLOCK, then we would have to
+ * wlock/decr/unlock/rlock. Which in theory could create a hole.
+ * Better to use higher wlock.
+ */
+ SCTP_INP_WLOCK(inp);
+cred_can_cont:
+ error = cr_canseesocket(req->td->td_ucred, inp->sctp_socket);
+ if (error) {
+ SCTP_INP_WUNLOCK(inp);
+ goto out;
+ }
+ cru2x(inp->sctp_socket->so_cred, &xuc);
+ SCTP_INP_WUNLOCK(inp);
+ error = SYSCTL_OUT(req, &xuc, sizeof(struct xucred));
+out:
+ return (error);
+}
+
+SYSCTL_PROC(_net_inet_sctp, OID_AUTO, getcred, CTLTYPE_OPAQUE | CTLFLAG_RW,
+ 0, 0, sctp_getcred, "S,ucred", "Get the ucred of a SCTP connection");
+
+
+static void
+sctp_abort(struct socket *so)
+{
+ struct sctp_inpcb *inp;
+ uint32_t flags;
+
+ inp = (struct sctp_inpcb *)so->so_pcb;
+ if (inp == 0) {
+ return;
+ }
+sctp_must_try_again:
+ flags = inp->sctp_flags;
+#ifdef SCTP_LOG_CLOSING
+ sctp_log_closing(inp, NULL, 17);
+#endif
+ if (((flags & SCTP_PCB_FLAGS_SOCKET_GONE) == 0) &&
+ (atomic_cmpset_int(&inp->sctp_flags, flags, (flags | SCTP_PCB_FLAGS_SOCKET_GONE | SCTP_PCB_FLAGS_CLOSE_IP)))) {
+#ifdef SCTP_LOG_CLOSING
+ sctp_log_closing(inp, NULL, 16);
+#endif
+ sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT,
+ SCTP_CALLED_AFTER_CMPSET_OFCLOSE);
+ SOCK_LOCK(so);
+ SCTP_SB_CLEAR(so->so_snd);
+ /*
+ * same for the rcv ones, they are only here for the
+ * accounting/select.
+ */
+ SCTP_SB_CLEAR(so->so_rcv);
+
+ /* Now null out the reference, we are completely detached. */
+ so->so_pcb = NULL;
+ SOCK_UNLOCK(so);
+ } else {
+ flags = inp->sctp_flags;
+ if ((flags & SCTP_PCB_FLAGS_SOCKET_GONE) == 0) {
+ goto sctp_must_try_again;
+ }
+ }
+ return;
+}
+
+static int
+sctp_attach(struct socket *so, int proto, struct thread *p)
+{
+ struct sctp_inpcb *inp;
+ struct inpcb *ip_inp;
+ int error;
+ uint32_t vrf_id = SCTP_DEFAULT_VRFID;
+
+#ifdef IPSEC
+ uint32_t flags;
+
+#endif
+
+ inp = (struct sctp_inpcb *)so->so_pcb;
+ if (inp != 0) {
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+ return EINVAL;
+ }
+ if (so->so_snd.sb_hiwat == 0 || so->so_rcv.sb_hiwat == 0) {
+ error = SCTP_SORESERVE(so, SCTP_BASE_SYSCTL(sctp_sendspace), SCTP_BASE_SYSCTL(sctp_recvspace));
+ if (error) {
+ return error;
+ }
+ }
+ error = sctp_inpcb_alloc(so, vrf_id);
+ if (error) {
+ return error;
+ }
+ inp = (struct sctp_inpcb *)so->so_pcb;
+ SCTP_INP_WLOCK(inp);
+ inp->sctp_flags &= ~SCTP_PCB_FLAGS_BOUND_V6; /* I'm not v6! */
+ ip_inp = &inp->ip_inp.inp;
+ ip_inp->inp_vflag |= INP_IPV4;
+ ip_inp->inp_ip_ttl = MODULE_GLOBAL(ip_defttl);
+#ifdef IPSEC
+ error = ipsec_init_policy(so, &ip_inp->inp_sp);
+#ifdef SCTP_LOG_CLOSING
+ sctp_log_closing(inp, NULL, 17);
+#endif
+ if (error != 0) {
+try_again:
+ flags = inp->sctp_flags;
+ if (((flags & SCTP_PCB_FLAGS_SOCKET_GONE) == 0) &&
+ (atomic_cmpset_int(&inp->sctp_flags, flags, (flags | SCTP_PCB_FLAGS_SOCKET_GONE | SCTP_PCB_FLAGS_CLOSE_IP)))) {
+#ifdef SCTP_LOG_CLOSING
+ sctp_log_closing(inp, NULL, 15);
+#endif
+ SCTP_INP_WUNLOCK(inp);
+ sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT,
+ SCTP_CALLED_AFTER_CMPSET_OFCLOSE);
+ } else {
+ flags = inp->sctp_flags;
+ if ((flags & SCTP_PCB_FLAGS_SOCKET_GONE) == 0) {
+ goto try_again;
+ } else {
+ SCTP_INP_WUNLOCK(inp);
+ }
+ }
+ return error;
+ }
+#endif /* IPSEC */
+ SCTP_INP_WUNLOCK(inp);
+ return 0;
+}
+
+static int
+sctp_bind(struct socket *so, struct sockaddr *addr, struct thread *p)
+{
+ struct sctp_inpcb *inp = NULL;
+ int error;
+
+#ifdef INET6
+ if (addr && addr->sa_family != AF_INET) {
+ /* must be a v4 address! */
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+ return EINVAL;
+ }
+#endif /* INET6 */
+ if (addr && (addr->sa_len != sizeof(struct sockaddr_in))) {
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+ return EINVAL;
+ }
+ inp = (struct sctp_inpcb *)so->so_pcb;
+ if (inp == 0) {
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+ return EINVAL;
+ }
+ error = sctp_inpcb_bind(so, addr, NULL, p);
+ return error;
+}
+
+void
+sctp_close(struct socket *so)
+{
+ struct sctp_inpcb *inp;
+ uint32_t flags;
+
+ inp = (struct sctp_inpcb *)so->so_pcb;
+ if (inp == 0)
+ return;
+
+ /*
+ * Inform all the lower layer assoc that we are done.
+ */
+sctp_must_try_again:
+ flags = inp->sctp_flags;
+#ifdef SCTP_LOG_CLOSING
+ sctp_log_closing(inp, NULL, 17);
+#endif
+ if (((flags & SCTP_PCB_FLAGS_SOCKET_GONE) == 0) &&
+ (atomic_cmpset_int(&inp->sctp_flags, flags, (flags | SCTP_PCB_FLAGS_SOCKET_GONE | SCTP_PCB_FLAGS_CLOSE_IP)))) {
+ if (((so->so_options & SO_LINGER) && (so->so_linger == 0)) ||
+ (so->so_rcv.sb_cc > 0)) {
+#ifdef SCTP_LOG_CLOSING
+ sctp_log_closing(inp, NULL, 13);
+#endif
+ sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT,
+ SCTP_CALLED_AFTER_CMPSET_OFCLOSE);
+ } else {
+#ifdef SCTP_LOG_CLOSING
+ sctp_log_closing(inp, NULL, 14);
+#endif
+ sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_GRACEFUL_CLOSE,
+ SCTP_CALLED_AFTER_CMPSET_OFCLOSE);
+ }
+ /*
+ * The socket is now detached, no matter what the state of
+ * the SCTP association.
+ */
+ SOCK_LOCK(so);
+ SCTP_SB_CLEAR(so->so_snd);
+ /*
+ * same for the rcv ones, they are only here for the
+ * accounting/select.
+ */
+ SCTP_SB_CLEAR(so->so_rcv);
+
+ /* Now null out the reference, we are completely detached. */
+ so->so_pcb = NULL;
+ SOCK_UNLOCK(so);
+ } else {
+ flags = inp->sctp_flags;
+ if ((flags & SCTP_PCB_FLAGS_SOCKET_GONE) == 0) {
+ goto sctp_must_try_again;
+ }
+ }
+ return;
+}
+
+
+int
+sctp_sendm(struct socket *so, int flags, struct mbuf *m, struct sockaddr *addr,
+ struct mbuf *control, struct thread *p);
+
+
+int
+sctp_sendm(struct socket *so, int flags, struct mbuf *m, struct sockaddr *addr,
+ struct mbuf *control, struct thread *p)
+{
+ struct sctp_inpcb *inp;
+ int error;
+
+ inp = (struct sctp_inpcb *)so->so_pcb;
+ if (inp == 0) {
+ if (control) {
+ sctp_m_freem(control);
+ control = NULL;
+ }
+ SCTP_LTRACE_ERR_RET_PKT(m, inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+ sctp_m_freem(m);
+ return EINVAL;
+ }
+ /* Got to have an to address if we are NOT a connected socket */
+ if ((addr == NULL) &&
+ ((inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) ||
+ (inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE))
+ ) {
+ goto connected_type;
+ } else if (addr == NULL) {
+ SCTP_LTRACE_ERR_RET_PKT(m, inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EDESTADDRREQ);
+ error = EDESTADDRREQ;
+ sctp_m_freem(m);
+ if (control) {
+ sctp_m_freem(control);
+ control = NULL;
+ }
+ return (error);
+ }
+#ifdef INET6
+ if (addr->sa_family != AF_INET) {
+ /* must be a v4 address! */
+ SCTP_LTRACE_ERR_RET_PKT(m, inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EDESTADDRREQ);
+ sctp_m_freem(m);
+ if (control) {
+ sctp_m_freem(control);
+ control = NULL;
+ }
+ error = EDESTADDRREQ;
+ return EDESTADDRREQ;
+ }
+#endif /* INET6 */
+connected_type:
+ /* now what about control */
+ if (control) {
+ if (inp->control) {
+ SCTP_PRINTF("huh? control set?\n");
+ sctp_m_freem(inp->control);
+ inp->control = NULL;
+ }
+ inp->control = control;
+ }
+ /* Place the data */
+ if (inp->pkt) {
+ SCTP_BUF_NEXT(inp->pkt_last) = m;
+ inp->pkt_last = m;
+ } else {
+ inp->pkt_last = inp->pkt = m;
+ }
+ if (
+ /* FreeBSD uses a flag passed */
+ ((flags & PRUS_MORETOCOME) == 0)
+ ) {
+ /*
+ * note with the current version this code will only be used
+ * by OpenBSD-- NetBSD, FreeBSD, and MacOS have methods for
+ * re-defining sosend to use the sctp_sosend. One can
+ * optionally switch back to this code (by changing back the
+ * definitions) but this is not advisable. This code is used
+ * by FreeBSD when sending a file with sendfile() though.
+ */
+ int ret;
+
+ ret = sctp_output(inp, inp->pkt, addr, inp->control, p, flags);
+ inp->pkt = NULL;
+ inp->control = NULL;
+ return (ret);
+ } else {
+ return (0);
+ }
+}
+
+int
+sctp_disconnect(struct socket *so)
+{
+ struct sctp_inpcb *inp;
+
+ inp = (struct sctp_inpcb *)so->so_pcb;
+ if (inp == NULL) {
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOTCONN);
+ return (ENOTCONN);
+ }
+ SCTP_INP_RLOCK(inp);
+ if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
+ (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
+ if (LIST_EMPTY(&inp->sctp_asoc_list)) {
+ /* No connection */
+ SCTP_INP_RUNLOCK(inp);
+ return (0);
+ } else {
+ struct sctp_association *asoc;
+ struct sctp_tcb *stcb;
+
+ stcb = LIST_FIRST(&inp->sctp_asoc_list);
+ if (stcb == NULL) {
+ SCTP_INP_RUNLOCK(inp);
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+ return (EINVAL);
+ }
+ SCTP_TCB_LOCK(stcb);
+ asoc = &stcb->asoc;
+ if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
+ /* We are about to be freed, out of here */
+ SCTP_TCB_UNLOCK(stcb);
+ SCTP_INP_RUNLOCK(inp);
+ return (0);
+ }
+ if (((so->so_options & SO_LINGER) &&
+ (so->so_linger == 0)) ||
+ (so->so_rcv.sb_cc > 0)) {
+ if (SCTP_GET_STATE(asoc) !=
+ SCTP_STATE_COOKIE_WAIT) {
+ /* Left with Data unread */
+ struct mbuf *err;
+
+ err = sctp_get_mbuf_for_msg(sizeof(struct sctp_paramhdr), 0, M_DONTWAIT, 1, MT_DATA);
+ if (err) {
+ /*
+ * Fill in the user
+ * initiated abort
+ */
+ struct sctp_paramhdr *ph;
+
+ ph = mtod(err, struct sctp_paramhdr *);
+ SCTP_BUF_LEN(err) = sizeof(struct sctp_paramhdr);
+ ph->param_type = htons(SCTP_CAUSE_USER_INITIATED_ABT);
+ ph->param_length = htons(SCTP_BUF_LEN(err));
+ }
+#if defined(SCTP_PANIC_ON_ABORT)
+ panic("disconnect does an abort");
+#endif
+ sctp_send_abort_tcb(stcb, err, SCTP_SO_LOCKED);
+ SCTP_STAT_INCR_COUNTER32(sctps_aborted);
+ }
+ SCTP_INP_RUNLOCK(inp);
+ if ((SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_OPEN) ||
+ (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
+ SCTP_STAT_DECR_GAUGE32(sctps_currestab);
+ }
+ (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_USRREQ + SCTP_LOC_3);
+ /* No unlock tcb assoc is gone */
+ return (0);
+ }
+ if (TAILQ_EMPTY(&asoc->send_queue) &&
+ TAILQ_EMPTY(&asoc->sent_queue) &&
+ (asoc->stream_queue_cnt == 0)) {
+ /* there is nothing queued to send, so done */
+ if (asoc->locked_on_sending) {
+ goto abort_anyway;
+ }
+ if ((SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_SENT) &&
+ (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_ACK_SENT)) {
+ /* only send SHUTDOWN 1st time thru */
+ sctp_stop_timers_for_shutdown(stcb);
+ sctp_send_shutdown(stcb,
+ stcb->asoc.primary_destination);
+ sctp_chunk_output(stcb->sctp_ep, stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_LOCKED);
+ if ((SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) ||
+ (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
+ SCTP_STAT_DECR_GAUGE32(sctps_currestab);
+ }
+ SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_SENT);
+ SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
+ sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN,
+ stcb->sctp_ep, stcb,
+ asoc->primary_destination);
+ sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
+ stcb->sctp_ep, stcb,
+ asoc->primary_destination);
+ }
+ } else {
+ /*
+ * we still got (or just got) data to send,
+ * so set SHUTDOWN_PENDING
+ */
+ /*
+ * XXX sockets draft says that SCTP_EOF
+ * should be sent with no data. currently,
+ * we will allow user data to be sent first
+ * and move to SHUTDOWN-PENDING
+ */
+ asoc->state |= SCTP_STATE_SHUTDOWN_PENDING;
+ sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, stcb->sctp_ep, stcb,
+ asoc->primary_destination);
+ if (asoc->locked_on_sending) {
+ /* Locked to send out the data */
+ struct sctp_stream_queue_pending *sp;
+
+ sp = TAILQ_LAST(&asoc->locked_on_sending->outqueue, sctp_streamhead);
+ if (sp == NULL) {
+ SCTP_PRINTF("Error, sp is NULL, locked on sending is non-null strm:%d\n",
+ asoc->locked_on_sending->stream_no);
+ } else {
+ if ((sp->length == 0) && (sp->msg_is_complete == 0))
+ asoc->state |= SCTP_STATE_PARTIAL_MSG_LEFT;
+ }
+ }
+ if (TAILQ_EMPTY(&asoc->send_queue) &&
+ TAILQ_EMPTY(&asoc->sent_queue) &&
+ (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT)) {
+ struct mbuf *op_err;
+
+ abort_anyway:
+ op_err = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)),
+ 0, M_DONTWAIT, 1, MT_DATA);
+ if (op_err) {
+ /*
+ * Fill in the user
+ * initiated abort
+ */
+ struct sctp_paramhdr *ph;
+ uint32_t *ippp;
+
+ SCTP_BUF_LEN(op_err) =
+ (sizeof(struct sctp_paramhdr) + sizeof(uint32_t));
+ ph = mtod(op_err,
+ struct sctp_paramhdr *);
+ ph->param_type = htons(
+ SCTP_CAUSE_USER_INITIATED_ABT);
+ ph->param_length = htons(SCTP_BUF_LEN(op_err));
+ ippp = (uint32_t *) (ph + 1);
+ *ippp = htonl(SCTP_FROM_SCTP_USRREQ + SCTP_LOC_4);
+ }
+#if defined(SCTP_PANIC_ON_ABORT)
+ panic("disconnect does an abort");
+#endif
+
+ stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_USRREQ + SCTP_LOC_4;
+ sctp_send_abort_tcb(stcb, op_err, SCTP_SO_LOCKED);
+ SCTP_STAT_INCR_COUNTER32(sctps_aborted);
+ if ((SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_OPEN) ||
+ (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
+ SCTP_STAT_DECR_GAUGE32(sctps_currestab);
+ }
+ SCTP_INP_RUNLOCK(inp);
+ (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_USRREQ + SCTP_LOC_5);
+ return (0);
+ } else {
+ sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_CLOSING, SCTP_SO_LOCKED);
+ }
+ }
+ soisdisconnecting(so);
+ SCTP_TCB_UNLOCK(stcb);
+ SCTP_INP_RUNLOCK(inp);
+ return (0);
+ }
+ /* not reached */
+ } else {
+ /* UDP model does not support this */
+ SCTP_INP_RUNLOCK(inp);
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EOPNOTSUPP);
+ return EOPNOTSUPP;
+ }
+}
+
+int
+sctp_flush(struct socket *so, int how)
+{
+ /*
+ * We will just clear out the values and let subsequent close clear
+ * out the data, if any. Note if the user did a shutdown(SHUT_RD)
+ * they will not be able to read the data, the socket will block
+ * that from happening.
+ */
+ struct sctp_inpcb *inp;
+
+ inp = (struct sctp_inpcb *)so->so_pcb;
+ if (inp == NULL) {
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+ return EINVAL;
+ }
+ SCTP_INP_RLOCK(inp);
+ /* For the 1 to many model this does nothing */
+ if (inp->sctp_flags & SCTP_PCB_FLAGS_UDPTYPE) {
+ SCTP_INP_RUNLOCK(inp);
+ return (0);
+ }
+ SCTP_INP_RUNLOCK(inp);
+ if ((how == PRU_FLUSH_RD) || (how == PRU_FLUSH_RDWR)) {
+ /*
+ * First make sure the sb will be happy, we don't use these
+ * except maybe the count
+ */
+ SCTP_INP_WLOCK(inp);
+ SCTP_INP_READ_LOCK(inp);
+ inp->sctp_flags |= SCTP_PCB_FLAGS_SOCKET_CANT_READ;
+ SCTP_INP_READ_UNLOCK(inp);
+ SCTP_INP_WUNLOCK(inp);
+ so->so_rcv.sb_cc = 0;
+ so->so_rcv.sb_mbcnt = 0;
+ so->so_rcv.sb_mb = NULL;
+ }
+ if ((how == PRU_FLUSH_WR) || (how == PRU_FLUSH_RDWR)) {
+ /*
+ * First make sure the sb will be happy, we don't use these
+ * except maybe the count
+ */
+ so->so_snd.sb_cc = 0;
+ so->so_snd.sb_mbcnt = 0;
+ so->so_snd.sb_mb = NULL;
+
+ }
+ return (0);
+}
+
+int
+sctp_shutdown(struct socket *so)
+{
+ struct sctp_inpcb *inp;
+
+ inp = (struct sctp_inpcb *)so->so_pcb;
+ if (inp == 0) {
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+ return EINVAL;
+ }
+ SCTP_INP_RLOCK(inp);
+ /* For UDP model this is a invalid call */
+ if (inp->sctp_flags & SCTP_PCB_FLAGS_UDPTYPE) {
+ /* Restore the flags that the soshutdown took away. */
+ SOCKBUF_LOCK(&so->so_rcv);
+ so->so_rcv.sb_state &= ~SBS_CANTRCVMORE;
+ SOCKBUF_UNLOCK(&so->so_rcv);
+ /* This proc will wakeup for read and do nothing (I hope) */
+ SCTP_INP_RUNLOCK(inp);
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EOPNOTSUPP);
+ return (EOPNOTSUPP);
+ }
+ /*
+ * Ok if we reach here its the TCP model and it is either a SHUT_WR
+ * or SHUT_RDWR. This means we put the shutdown flag against it.
+ */
+ {
+ struct sctp_tcb *stcb;
+ struct sctp_association *asoc;
+
+ if ((so->so_state &
+ (SS_ISCONNECTED | SS_ISCONNECTING | SS_ISDISCONNECTING)) == 0) {
+ SCTP_INP_RUNLOCK(inp);
+ return (ENOTCONN);
+ }
+ socantsendmore(so);
+
+ stcb = LIST_FIRST(&inp->sctp_asoc_list);
+ if (stcb == NULL) {
+ /*
+ * Ok we hit the case that the shutdown call was
+ * made after an abort or something. Nothing to do
+ * now.
+ */
+ SCTP_INP_RUNLOCK(inp);
+ return (0);
+ }
+ SCTP_TCB_LOCK(stcb);
+ asoc = &stcb->asoc;
+ if (TAILQ_EMPTY(&asoc->send_queue) &&
+ TAILQ_EMPTY(&asoc->sent_queue) &&
+ (asoc->stream_queue_cnt == 0)) {
+ if (asoc->locked_on_sending) {
+ goto abort_anyway;
+ }
+ /* there is nothing queued to send, so I'm done... */
+ if (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_SENT) {
+ /* only send SHUTDOWN the first time through */
+ sctp_stop_timers_for_shutdown(stcb);
+ sctp_send_shutdown(stcb,
+ stcb->asoc.primary_destination);
+ sctp_chunk_output(stcb->sctp_ep, stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_LOCKED);
+ if ((SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) ||
+ (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
+ SCTP_STAT_DECR_GAUGE32(sctps_currestab);
+ }
+ SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_SENT);
+ SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
+ sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN,
+ stcb->sctp_ep, stcb,
+ asoc->primary_destination);
+ sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
+ stcb->sctp_ep, stcb,
+ asoc->primary_destination);
+ }
+ } else {
+ /*
+ * we still got (or just got) data to send, so set
+ * SHUTDOWN_PENDING
+ */
+ asoc->state |= SCTP_STATE_SHUTDOWN_PENDING;
+ sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, stcb->sctp_ep, stcb,
+ asoc->primary_destination);
+
+ if (asoc->locked_on_sending) {
+ /* Locked to send out the data */
+ struct sctp_stream_queue_pending *sp;
+
+ sp = TAILQ_LAST(&asoc->locked_on_sending->outqueue, sctp_streamhead);
+ if (sp == NULL) {
+ SCTP_PRINTF("Error, sp is NULL, locked on sending is non-null strm:%d\n",
+ asoc->locked_on_sending->stream_no);
+ } else {
+ if ((sp->length == 0) && (sp->msg_is_complete == 0)) {
+ asoc->state |= SCTP_STATE_PARTIAL_MSG_LEFT;
+ }
+ }
+ }
+ if (TAILQ_EMPTY(&asoc->send_queue) &&
+ TAILQ_EMPTY(&asoc->sent_queue) &&
+ (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT)) {
+ struct mbuf *op_err;
+
+ abort_anyway:
+ op_err = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)),
+ 0, M_DONTWAIT, 1, MT_DATA);
+ if (op_err) {
+ /* Fill in the user initiated abort */
+ struct sctp_paramhdr *ph;
+ uint32_t *ippp;
+
+ SCTP_BUF_LEN(op_err) =
+ sizeof(struct sctp_paramhdr) + sizeof(uint32_t);
+ ph = mtod(op_err,
+ struct sctp_paramhdr *);
+ ph->param_type = htons(
+ SCTP_CAUSE_USER_INITIATED_ABT);
+ ph->param_length = htons(SCTP_BUF_LEN(op_err));
+ ippp = (uint32_t *) (ph + 1);
+ *ippp = htonl(SCTP_FROM_SCTP_USRREQ + SCTP_LOC_6);
+ }
+#if defined(SCTP_PANIC_ON_ABORT)
+ panic("shutdown does an abort");
+#endif
+ stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_USRREQ + SCTP_LOC_6;
+ sctp_abort_an_association(stcb->sctp_ep, stcb,
+ SCTP_RESPONSE_TO_USER_REQ,
+ op_err, SCTP_SO_LOCKED);
+ goto skip_unlock;
+ } else {
+ sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_CLOSING, SCTP_SO_LOCKED);
+ }
+ }
+ SCTP_TCB_UNLOCK(stcb);
+ }
+skip_unlock:
+ SCTP_INP_RUNLOCK(inp);
+ return 0;
+}
+
+/*
+ * copies a "user" presentable address and removes embedded scope, etc.
+ * returns 0 on success, 1 on error
+ */
+static uint32_t
+sctp_fill_user_address(struct sockaddr_storage *ss, struct sockaddr *sa)
+{
+#ifdef INET6
+ struct sockaddr_in6 lsa6;
+
+ sa = (struct sockaddr *)sctp_recover_scope((struct sockaddr_in6 *)sa,
+ &lsa6);
+#endif
+ memcpy(ss, sa, sa->sa_len);
+ return (0);
+}
+
+
+
+/*
+ * NOTE: assumes addr lock is held
+ */
+static size_t
+sctp_fill_up_addresses_vrf(struct sctp_inpcb *inp,
+ struct sctp_tcb *stcb,
+ size_t limit,
+ struct sockaddr_storage *sas,
+ uint32_t vrf_id)
+{
+ struct sctp_ifn *sctp_ifn;
+ struct sctp_ifa *sctp_ifa;
+ int loopback_scope, ipv4_local_scope, local_scope, site_scope;
+ size_t actual;
+ int ipv4_addr_legal, ipv6_addr_legal;
+ struct sctp_vrf *vrf;
+
+ actual = 0;
+ if (limit <= 0)
+ return (actual);
+
+ if (stcb) {
+ /* Turn on all the appropriate scope */
+ loopback_scope = stcb->asoc.loopback_scope;
+ ipv4_local_scope = stcb->asoc.ipv4_local_scope;
+ local_scope = stcb->asoc.local_scope;
+ site_scope = stcb->asoc.site_scope;
+ } else {
+ /* Turn on ALL scope, since we look at the EP */
+ loopback_scope = ipv4_local_scope = local_scope =
+ site_scope = 1;
+ }
+ ipv4_addr_legal = ipv6_addr_legal = 0;
+ if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
+ ipv6_addr_legal = 1;
+ if (SCTP_IPV6_V6ONLY(inp) == 0) {
+ ipv4_addr_legal = 1;
+ }
+ } else {
+ ipv4_addr_legal = 1;
+ }
+ vrf = sctp_find_vrf(vrf_id);
+ if (vrf == NULL) {
+ return (0);
+ }
+ if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
+ LIST_FOREACH(sctp_ifn, &vrf->ifnlist, next_ifn) {
+ if ((loopback_scope == 0) &&
+ SCTP_IFN_IS_IFT_LOOP(sctp_ifn)) {
+ /* Skip loopback if loopback_scope not set */
+ continue;
+ }
+ LIST_FOREACH(sctp_ifa, &sctp_ifn->ifalist, next_ifa) {
+ if (stcb) {
+ /*
+ * For the BOUND-ALL case, the list
+ * associated with a TCB is Always
+ * considered a reverse list.. i.e.
+ * it lists addresses that are NOT
+ * part of the association. If this
+ * is one of those we must skip it.
+ */
+ if (sctp_is_addr_restricted(stcb,
+ sctp_ifa)) {
+ continue;
+ }
+ }
+ switch (sctp_ifa->address.sa.sa_family) {
+ case AF_INET:
+ if (ipv4_addr_legal) {
+ struct sockaddr_in *sin;
+
+ sin = (struct sockaddr_in *)&sctp_ifa->address.sa;
+ if (sin->sin_addr.s_addr == 0) {
+ /*
+ * we skip
+ * unspecifed
+ * addresses
+ */
+ continue;
+ }
+ if ((ipv4_local_scope == 0) &&
+ (IN4_ISPRIVATE_ADDRESS(&sin->sin_addr))) {
+ continue;
+ }
+#ifdef INET6
+ if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_NEEDS_MAPPED_V4)) {
+ in6_sin_2_v4mapsin6(sin, (struct sockaddr_in6 *)sas);
+ ((struct sockaddr_in6 *)sas)->sin6_port = inp->sctp_lport;
+ sas = (struct sockaddr_storage *)((caddr_t)sas + sizeof(struct sockaddr_in6));
+ actual += sizeof(struct sockaddr_in6);
+ } else {
+#endif
+ memcpy(sas, sin, sizeof(*sin));
+ ((struct sockaddr_in *)sas)->sin_port = inp->sctp_lport;
+ sas = (struct sockaddr_storage *)((caddr_t)sas + sizeof(*sin));
+ actual += sizeof(*sin);
+#ifdef INET6
+ }
+#endif
+ if (actual >= limit) {
+ return (actual);
+ }
+ } else {
+ continue;
+ }
+ break;
+#ifdef INET6
+ case AF_INET6:
+ if (ipv6_addr_legal) {
+ struct sockaddr_in6 *sin6;
+
+ sin6 = (struct sockaddr_in6 *)&sctp_ifa->address.sa;
+ if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) {
+ /*
+ * we skip
+ * unspecifed
+ * addresses
+ */
+ continue;
+ }
+ if (IN6_IS_ADDR_LINKLOCAL(&sin6->sin6_addr)) {
+ if (local_scope == 0)
+ continue;
+ if (sin6->sin6_scope_id == 0) {
+ if (sa6_recoverscope(sin6) != 0)
+ /*
+ *
+ * bad
+ *
+ * li
+ * nk
+ *
+ * loc
+ * al
+ *
+ * add
+ * re
+ * ss
+ * */
+ continue;
+ }
+ }
+ if ((site_scope == 0) &&
+ (IN6_IS_ADDR_SITELOCAL(&sin6->sin6_addr))) {
+ continue;
+ }
+ memcpy(sas, sin6, sizeof(*sin6));
+ ((struct sockaddr_in6 *)sas)->sin6_port = inp->sctp_lport;
+ sas = (struct sockaddr_storage *)((caddr_t)sas + sizeof(*sin6));
+ actual += sizeof(*sin6);
+ if (actual >= limit) {
+ return (actual);
+ }
+ } else {
+ continue;
+ }
+ break;
+#endif
+ default:
+ /* TSNH */
+ break;
+ }
+ }
+ }
+ } else {
+ struct sctp_laddr *laddr;
+
+ LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) {
+ if (stcb) {
+ if (sctp_is_addr_restricted(stcb, laddr->ifa)) {
+ continue;
+ }
+ }
+ if (sctp_fill_user_address(sas, &laddr->ifa->address.sa))
+ continue;
+
+ ((struct sockaddr_in6 *)sas)->sin6_port = inp->sctp_lport;
+ sas = (struct sockaddr_storage *)((caddr_t)sas +
+ laddr->ifa->address.sa.sa_len);
+ actual += laddr->ifa->address.sa.sa_len;
+ if (actual >= limit) {
+ return (actual);
+ }
+ }
+ }
+ return (actual);
+}
+
+static size_t
+sctp_fill_up_addresses(struct sctp_inpcb *inp,
+ struct sctp_tcb *stcb,
+ size_t limit,
+ struct sockaddr_storage *sas)
+{
+ size_t size = 0;
+
+ SCTP_IPI_ADDR_RLOCK();
+ /* fill up addresses for the endpoint's default vrf */
+ size = sctp_fill_up_addresses_vrf(inp, stcb, limit, sas,
+ inp->def_vrf_id);
+ SCTP_IPI_ADDR_RUNLOCK();
+ return (size);
+}
+
+/*
+ * NOTE: assumes addr lock is held
+ */
+static int
+sctp_count_max_addresses_vrf(struct sctp_inpcb *inp, uint32_t vrf_id)
+{
+ int cnt = 0;
+ struct sctp_vrf *vrf = NULL;
+
+ /*
+ * In both sub-set bound an bound_all cases we return the MAXIMUM
+ * number of addresses that you COULD get. In reality the sub-set
+ * bound may have an exclusion list for a given TCB OR in the
+ * bound-all case a TCB may NOT include the loopback or other
+ * addresses as well.
+ */
+ vrf = sctp_find_vrf(vrf_id);
+ if (vrf == NULL) {
+ return (0);
+ }
+ if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
+ struct sctp_ifn *sctp_ifn;
+ struct sctp_ifa *sctp_ifa;
+
+ LIST_FOREACH(sctp_ifn, &vrf->ifnlist, next_ifn) {
+ LIST_FOREACH(sctp_ifa, &sctp_ifn->ifalist, next_ifa) {
+ /* Count them if they are the right type */
+ if (sctp_ifa->address.sa.sa_family == AF_INET) {
+ if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_NEEDS_MAPPED_V4))
+ cnt += sizeof(struct sockaddr_in6);
+ else
+ cnt += sizeof(struct sockaddr_in);
+
+ } else if (sctp_ifa->address.sa.sa_family == AF_INET6)
+ cnt += sizeof(struct sockaddr_in6);
+ }
+ }
+ } else {
+ struct sctp_laddr *laddr;
+
+ LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) {
+ if (laddr->ifa->address.sa.sa_family == AF_INET) {
+ if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_NEEDS_MAPPED_V4))
+ cnt += sizeof(struct sockaddr_in6);
+ else
+ cnt += sizeof(struct sockaddr_in);
+
+ } else if (laddr->ifa->address.sa.sa_family == AF_INET6)
+ cnt += sizeof(struct sockaddr_in6);
+ }
+ }
+ return (cnt);
+}
+
+static int
+sctp_count_max_addresses(struct sctp_inpcb *inp)
+{
+ int cnt = 0;
+
+ SCTP_IPI_ADDR_RLOCK();
+ /* count addresses for the endpoint's default VRF */
+ cnt = sctp_count_max_addresses_vrf(inp, inp->def_vrf_id);
+ SCTP_IPI_ADDR_RUNLOCK();
+ return (cnt);
+}
+
+static int
+sctp_do_connect_x(struct socket *so, struct sctp_inpcb *inp, void *optval,
+ size_t optsize, void *p, int delay)
+{
+ int error = 0;
+ int creat_lock_on = 0;
+ struct sctp_tcb *stcb = NULL;
+ struct sockaddr *sa;
+ int num_v6 = 0, num_v4 = 0, *totaddrp, totaddr;
+ int added = 0;
+ uint32_t vrf_id;
+ int bad_addresses = 0;
+ sctp_assoc_t *a_id;
+
+ SCTPDBG(SCTP_DEBUG_PCB1, "Connectx called\n");
+
+ if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) &&
+ (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED)) {
+ /* We are already connected AND the TCP model */
+ SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_USRREQ, EADDRINUSE);
+ return (EADDRINUSE);
+ }
+ if ((inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) &&
+ (sctp_is_feature_off(inp, SCTP_PCB_FLAGS_PORTREUSE))) {
+ SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+ return (EINVAL);
+ }
+ if (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) {
+ SCTP_INP_RLOCK(inp);
+ stcb = LIST_FIRST(&inp->sctp_asoc_list);
+ SCTP_INP_RUNLOCK(inp);
+ }
+ if (stcb) {
+ SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_USRREQ, EALREADY);
+ return (EALREADY);
+ }
+ SCTP_INP_INCR_REF(inp);
+ SCTP_ASOC_CREATE_LOCK(inp);
+ creat_lock_on = 1;
+ if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
+ (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE)) {
+ SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_USRREQ, EFAULT);
+ error = EFAULT;
+ goto out_now;
+ }
+ totaddrp = (int *)optval;
+ totaddr = *totaddrp;
+ sa = (struct sockaddr *)(totaddrp + 1);
+ stcb = sctp_connectx_helper_find(inp, sa, &totaddr, &num_v4, &num_v6, &error, (optsize - sizeof(int)), &bad_addresses);
+ if ((stcb != NULL) || bad_addresses) {
+ /* Already have or am bring up an association */
+ SCTP_ASOC_CREATE_UNLOCK(inp);
+ creat_lock_on = 0;
+ if (stcb)
+ SCTP_TCB_UNLOCK(stcb);
+ if (bad_addresses == 0) {
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EALREADY);
+ error = EALREADY;
+ }
+ goto out_now;
+ }
+#ifdef INET6
+ if (((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) &&
+ (num_v6 > 0)) {
+ error = EINVAL;
+ goto out_now;
+ }
+ if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
+ (num_v4 > 0)) {
+ struct in6pcb *inp6;
+
+ inp6 = (struct in6pcb *)inp;
+ if (SCTP_IPV6_V6ONLY(inp6)) {
+ /*
+ * if IPV6_V6ONLY flag, ignore connections destined
+ * to a v4 addr or v4-mapped addr
+ */
+ SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+ error = EINVAL;
+ goto out_now;
+ }
+ }
+#endif /* INET6 */
+ if ((inp->sctp_flags & SCTP_PCB_FLAGS_UNBOUND) ==
+ SCTP_PCB_FLAGS_UNBOUND) {
+ /* Bind a ephemeral port */
+ error = sctp_inpcb_bind(so, NULL, NULL, p);
+ if (error) {
+ goto out_now;
+ }
+ }
+ /* FIX ME: do we want to pass in a vrf on the connect call? */
+ vrf_id = inp->def_vrf_id;
+
+
+ /* We are GOOD to go */
+ stcb = sctp_aloc_assoc(inp, sa, &error, 0, vrf_id,
+ (struct thread *)p
+ );
+ if (stcb == NULL) {
+ /* Gak! no memory */
+ goto out_now;
+ }
+ SCTP_SET_STATE(&stcb->asoc, SCTP_STATE_COOKIE_WAIT);
+ /* move to second address */
+ if (sa->sa_family == AF_INET)
+ sa = (struct sockaddr *)((caddr_t)sa + sizeof(struct sockaddr_in));
+ else
+ sa = (struct sockaddr *)((caddr_t)sa + sizeof(struct sockaddr_in6));
+
+ error = 0;
+ added = sctp_connectx_helper_add(stcb, sa, (totaddr - 1), &error);
+ /* Fill in the return id */
+ if (error) {
+ (void)sctp_free_assoc(inp, stcb, SCTP_PCBFREE_FORCE, SCTP_FROM_SCTP_USRREQ + SCTP_LOC_6);
+ goto out_now;
+ }
+ a_id = (sctp_assoc_t *) optval;
+ *a_id = sctp_get_associd(stcb);
+
+ /* initialize authentication parameters for the assoc */
+ sctp_initialize_auth_params(inp, stcb);
+
+ if (delay) {
+ /* doing delayed connection */
+ stcb->asoc.delayed_connection = 1;
+ sctp_timer_start(SCTP_TIMER_TYPE_INIT, inp, stcb, stcb->asoc.primary_destination);
+ } else {
+ (void)SCTP_GETTIME_TIMEVAL(&stcb->asoc.time_entered);
+ sctp_send_initiate(inp, stcb, SCTP_SO_LOCKED);
+ }
+ SCTP_TCB_UNLOCK(stcb);
+ if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) {
+ stcb->sctp_ep->sctp_flags |= SCTP_PCB_FLAGS_CONNECTED;
+ /* Set the connected flag so we can queue data */
+ soisconnecting(so);
+ }
+out_now:
+ if (creat_lock_on) {
+ SCTP_ASOC_CREATE_UNLOCK(inp);
+ }
+ SCTP_INP_DECR_REF(inp);
+ return error;
+}
+
+#define SCTP_FIND_STCB(inp, stcb, assoc_id) { \
+ if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||\
+ (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) { \
+ SCTP_INP_RLOCK(inp); \
+ stcb = LIST_FIRST(&inp->sctp_asoc_list); \
+ if (stcb) { \
+ SCTP_TCB_LOCK(stcb); \
+ } \
+ SCTP_INP_RUNLOCK(inp); \
+ } else if (assoc_id != 0) { \
+ stcb = sctp_findassociation_ep_asocid(inp, assoc_id, 1); \
+ if (stcb == NULL) { \
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOENT); \
+ error = ENOENT; \
+ break; \
+ } \
+ } else { \
+ stcb = NULL; \
+ } \
+ }
+
+
+#define SCTP_CHECK_AND_CAST(destp, srcp, type, size) {\
+ if (size < sizeof(type)) { \
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); \
+ error = EINVAL; \
+ break; \
+ } else { \
+ destp = (type *)srcp; \
+ } \
+ }
+
+static int
+sctp_getopt(struct socket *so, int optname, void *optval, size_t *optsize,
+ void *p)
+{
+ struct sctp_inpcb *inp = NULL;
+ int error, val = 0;
+ struct sctp_tcb *stcb = NULL;
+
+ if (optval == NULL) {
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+ return (EINVAL);
+ }
+ inp = (struct sctp_inpcb *)so->so_pcb;
+ if (inp == 0) {
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+ return EINVAL;
+ }
+ error = 0;
+
+ switch (optname) {
+ case SCTP_NODELAY:
+ case SCTP_AUTOCLOSE:
+ case SCTP_EXPLICIT_EOR:
+ case SCTP_AUTO_ASCONF:
+ case SCTP_DISABLE_FRAGMENTS:
+ case SCTP_I_WANT_MAPPED_V4_ADDR:
+ case SCTP_USE_EXT_RCVINFO:
+ SCTP_INP_RLOCK(inp);
+ switch (optname) {
+ case SCTP_DISABLE_FRAGMENTS:
+ val = sctp_is_feature_on(inp, SCTP_PCB_FLAGS_NO_FRAGMENT);
+ break;
+ case SCTP_I_WANT_MAPPED_V4_ADDR:
+ val = sctp_is_feature_on(inp, SCTP_PCB_FLAGS_NEEDS_MAPPED_V4);
+ break;
+ case SCTP_AUTO_ASCONF:
+ if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
+ /* only valid for bound all sockets */
+ val = sctp_is_feature_on(inp, SCTP_PCB_FLAGS_AUTO_ASCONF);
+ } else {
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+ error = EINVAL;
+ goto flags_out;
+ }
+ break;
+ case SCTP_EXPLICIT_EOR:
+ val = sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXPLICIT_EOR);
+ break;
+ case SCTP_NODELAY:
+ val = sctp_is_feature_on(inp, SCTP_PCB_FLAGS_NODELAY);
+ break;
+ case SCTP_USE_EXT_RCVINFO:
+ val = sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO);
+ break;
+ case SCTP_AUTOCLOSE:
+ if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_AUTOCLOSE))
+ val = TICKS_TO_SEC(inp->sctp_ep.auto_close_time);
+ else
+ val = 0;
+ break;
+
+ default:
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOPROTOOPT);
+ error = ENOPROTOOPT;
+ } /* end switch (sopt->sopt_name) */
+ if (optname != SCTP_AUTOCLOSE) {
+ /* make it an "on/off" value */
+ val = (val != 0);
+ }
+ if (*optsize < sizeof(val)) {
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+ error = EINVAL;
+ }
+flags_out:
+ SCTP_INP_RUNLOCK(inp);
+ if (error == 0) {
+ /* return the option value */
+ *(int *)optval = val;
+ *optsize = sizeof(val);
+ }
+ break;
+ case SCTP_GET_PACKET_LOG:
+ {
+#ifdef SCTP_PACKET_LOGGING
+ uint8_t *target;
+ int ret;
+
+ SCTP_CHECK_AND_CAST(target, optval, uint8_t, *optsize);
+ ret = sctp_copy_out_packet_log(target, (int)*optsize);
+ *optsize = ret;
+#else
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EOPNOTSUPP);
+ error = EOPNOTSUPP;
+#endif
+ break;
+ }
+ case SCTP_REUSE_PORT:
+ {
+ uint32_t *value;
+
+ if ((inp->sctp_flags & SCTP_PCB_FLAGS_UDPTYPE)) {
+ /* Can't do this for a 1-m socket */
+ error = EINVAL;
+ break;
+ }
+ SCTP_CHECK_AND_CAST(value, optval, uint32_t, *optsize);
+ *value = sctp_is_feature_on(inp, SCTP_PCB_FLAGS_PORTREUSE);
+ *optsize = sizeof(uint32_t);
+ }
+ break;
+ case SCTP_PARTIAL_DELIVERY_POINT:
+ {
+ uint32_t *value;
+
+ SCTP_CHECK_AND_CAST(value, optval, uint32_t, *optsize);
+ *value = inp->partial_delivery_point;
+ *optsize = sizeof(uint32_t);
+ }
+ break;
+ case SCTP_FRAGMENT_INTERLEAVE:
+ {
+ uint32_t *value;
+
+ SCTP_CHECK_AND_CAST(value, optval, uint32_t, *optsize);
+ if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE)) {
+ if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_INTERLEAVE_STRMS)) {
+ *value = SCTP_FRAG_LEVEL_2;
+ } else {
+ *value = SCTP_FRAG_LEVEL_1;
+ }
+ } else {
+ *value = SCTP_FRAG_LEVEL_0;
+ }
+ *optsize = sizeof(uint32_t);
+ }
+ break;
+ case SCTP_CMT_ON_OFF:
+ {
+ struct sctp_assoc_value *av;
+
+ SCTP_CHECK_AND_CAST(av, optval, struct sctp_assoc_value, *optsize);
+ SCTP_FIND_STCB(inp, stcb, av->assoc_id);
+ if (stcb) {
+ av->assoc_value = stcb->asoc.sctp_cmt_on_off;
+ SCTP_TCB_UNLOCK(stcb);
+ } else {
+ SCTP_INP_RLOCK(inp);
+ av->assoc_value = inp->sctp_cmt_on_off;
+ SCTP_INP_RUNLOCK(inp);
+ }
+ *optsize = sizeof(*av);
+ }
+ break;
+ /* JRS - Get socket option for pluggable congestion control */
+ case SCTP_PLUGGABLE_CC:
+ {
+ struct sctp_assoc_value *av;
+
+ SCTP_CHECK_AND_CAST(av, optval, struct sctp_assoc_value, *optsize);
+ SCTP_FIND_STCB(inp, stcb, av->assoc_id);
+ if (stcb) {
+ av->assoc_value = stcb->asoc.congestion_control_module;
+ SCTP_TCB_UNLOCK(stcb);
+ } else {
+ av->assoc_value = inp->sctp_ep.sctp_default_cc_module;
+ }
+ *optsize = sizeof(*av);
+ }
+ break;
+ case SCTP_GET_ADDR_LEN:
+ {
+ struct sctp_assoc_value *av;
+
+ SCTP_CHECK_AND_CAST(av, optval, struct sctp_assoc_value, *optsize);
+ error = EINVAL;
+#ifdef INET
+ if (av->assoc_value == AF_INET) {
+ av->assoc_value = sizeof(struct sockaddr_in);
+ error = 0;
+ }
+#endif
+#ifdef INET6
+ if (av->assoc_value == AF_INET6) {
+ av->assoc_value = sizeof(struct sockaddr_in6);
+ error = 0;
+ }
+#endif
+ if (error) {
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, error);
+ }
+ *optsize = sizeof(*av);
+ }
+ break;
+ case SCTP_GET_ASSOC_NUMBER:
+ {
+ uint32_t *value, cnt;
+
+ SCTP_CHECK_AND_CAST(value, optval, uint32_t, *optsize);
+ cnt = 0;
+ SCTP_INP_RLOCK(inp);
+ LIST_FOREACH(stcb, &inp->sctp_asoc_list, sctp_tcblist) {
+ cnt++;
+ }
+ SCTP_INP_RUNLOCK(inp);
+ *value = cnt;
+ *optsize = sizeof(uint32_t);
+ }
+ break;
+
+ case SCTP_GET_ASSOC_ID_LIST:
+ {
+ struct sctp_assoc_ids *ids;
+ unsigned int at, limit;
+
+ SCTP_CHECK_AND_CAST(ids, optval, struct sctp_assoc_ids, *optsize);
+ at = 0;
+ limit = (*optsize - sizeof(uint32_t)) / sizeof(sctp_assoc_t);
+ SCTP_INP_RLOCK(inp);
+ LIST_FOREACH(stcb, &inp->sctp_asoc_list, sctp_tcblist) {
+ if (at < limit) {
+ ids->gaids_assoc_id[at++] = sctp_get_associd(stcb);
+ } else {
+ error = EINVAL;
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, error);
+ break;
+ }
+ }
+ SCTP_INP_RUNLOCK(inp);
+ ids->gaids_number_of_ids = at;
+ *optsize = ((at * sizeof(sctp_assoc_t)) + sizeof(uint32_t));
+ }
+ break;
+ case SCTP_CONTEXT:
+ {
+ struct sctp_assoc_value *av;
+
+ SCTP_CHECK_AND_CAST(av, optval, struct sctp_assoc_value, *optsize);
+ SCTP_FIND_STCB(inp, stcb, av->assoc_id);
+
+ if (stcb) {
+ av->assoc_value = stcb->asoc.context;
+ SCTP_TCB_UNLOCK(stcb);
+ } else {
+ SCTP_INP_RLOCK(inp);
+ av->assoc_value = inp->sctp_context;
+ SCTP_INP_RUNLOCK(inp);
+ }
+ *optsize = sizeof(*av);
+ }
+ break;
+ case SCTP_VRF_ID:
+ {
+ uint32_t *default_vrfid;
+
+ SCTP_CHECK_AND_CAST(default_vrfid, optval, uint32_t, *optsize);
+ *default_vrfid = inp->def_vrf_id;
+ break;
+ }
+ case SCTP_GET_ASOC_VRF:
+ {
+ struct sctp_assoc_value *id;
+
+ SCTP_CHECK_AND_CAST(id, optval, struct sctp_assoc_value, *optsize);
+ SCTP_FIND_STCB(inp, stcb, id->assoc_id);
+ if (stcb == NULL) {
+ error = EINVAL;
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, error);
+ break;
+ }
+ id->assoc_value = stcb->asoc.vrf_id;
+ break;
+ }
+ case SCTP_GET_VRF_IDS:
+ {
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EOPNOTSUPP);
+ error = EOPNOTSUPP;
+ break;
+ }
+ case SCTP_GET_NONCE_VALUES:
+ {
+ struct sctp_get_nonce_values *gnv;
+
+ SCTP_CHECK_AND_CAST(gnv, optval, struct sctp_get_nonce_values, *optsize);
+ SCTP_FIND_STCB(inp, stcb, gnv->gn_assoc_id);
+
+ if (stcb) {
+ gnv->gn_peers_tag = stcb->asoc.peer_vtag;
+ gnv->gn_local_tag = stcb->asoc.my_vtag;
+ SCTP_TCB_UNLOCK(stcb);
+ } else {
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOTCONN);
+ error = ENOTCONN;
+ }
+ *optsize = sizeof(*gnv);
+ }
+ break;
+ case SCTP_DELAYED_SACK:
+ {
+ struct sctp_sack_info *sack;
+
+ SCTP_CHECK_AND_CAST(sack, optval, struct sctp_sack_info, *optsize);
+ SCTP_FIND_STCB(inp, stcb, sack->sack_assoc_id);
+ if (stcb) {
+ sack->sack_delay = stcb->asoc.delayed_ack;
+ sack->sack_freq = stcb->asoc.sack_freq;
+ SCTP_TCB_UNLOCK(stcb);
+ } else {
+ SCTP_INP_RLOCK(inp);
+ sack->sack_delay = TICKS_TO_MSEC(inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_RECV]);
+ sack->sack_freq = inp->sctp_ep.sctp_sack_freq;
+ SCTP_INP_RUNLOCK(inp);
+ }
+ *optsize = sizeof(*sack);
+ }
+ break;
+
+ case SCTP_GET_SNDBUF_USE:
+ {
+ struct sctp_sockstat *ss;
+
+ SCTP_CHECK_AND_CAST(ss, optval, struct sctp_sockstat, *optsize);
+ SCTP_FIND_STCB(inp, stcb, ss->ss_assoc_id);
+
+ if (stcb) {
+ ss->ss_total_sndbuf = stcb->asoc.total_output_queue_size;
+ ss->ss_total_recv_buf = (stcb->asoc.size_on_reasm_queue +
+ stcb->asoc.size_on_all_streams);
+ SCTP_TCB_UNLOCK(stcb);
+ } else {
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOTCONN);
+ error = ENOTCONN;
+ }
+ *optsize = sizeof(struct sctp_sockstat);
+ }
+ break;
+ case SCTP_MAX_BURST:
+ {
+ uint8_t *value;
+
+ SCTP_CHECK_AND_CAST(value, optval, uint8_t, *optsize);
+
+ SCTP_INP_RLOCK(inp);
+ *value = inp->sctp_ep.max_burst;
+ SCTP_INP_RUNLOCK(inp);
+ *optsize = sizeof(uint8_t);
+ }
+ break;
+ case SCTP_MAXSEG:
+ {
+ struct sctp_assoc_value *av;
+ int ovh;
+
+ SCTP_CHECK_AND_CAST(av, optval, struct sctp_assoc_value, *optsize);
+ SCTP_FIND_STCB(inp, stcb, av->assoc_id);
+
+ if (stcb) {
+ av->assoc_value = sctp_get_frag_point(stcb, &stcb->asoc);
+ SCTP_TCB_UNLOCK(stcb);
+ } else {
+ SCTP_INP_RLOCK(inp);
+ if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
+ ovh = SCTP_MED_OVERHEAD;
+ } else {
+ ovh = SCTP_MED_V4_OVERHEAD;
+ }
+ if (inp->sctp_frag_point >= SCTP_DEFAULT_MAXSEGMENT)
+ av->assoc_value = 0;
+ else
+ av->assoc_value = inp->sctp_frag_point - ovh;
+ SCTP_INP_RUNLOCK(inp);
+ }
+ *optsize = sizeof(struct sctp_assoc_value);
+ }
+ break;
+ case SCTP_GET_STAT_LOG:
+ error = sctp_fill_stat_log(optval, optsize);
+ break;
+ case SCTP_EVENTS:
+ {
+ struct sctp_event_subscribe *events;
+
+ SCTP_CHECK_AND_CAST(events, optval, struct sctp_event_subscribe, *optsize);
+ memset(events, 0, sizeof(*events));
+ SCTP_INP_RLOCK(inp);
+ if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT))
+ events->sctp_data_io_event = 1;
+
+ if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVASSOCEVNT))
+ events->sctp_association_event = 1;
+
+ if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVPADDREVNT))
+ events->sctp_address_event = 1;
+
+ if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVSENDFAILEVNT))
+ events->sctp_send_failure_event = 1;
+
+ if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVPEERERR))
+ events->sctp_peer_error_event = 1;
+
+ if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVSHUTDOWNEVNT))
+ events->sctp_shutdown_event = 1;
+
+ if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_PDAPIEVNT))
+ events->sctp_partial_delivery_event = 1;
+
+ if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_ADAPTATIONEVNT))
+ events->sctp_adaptation_layer_event = 1;
+
+ if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_AUTHEVNT))
+ events->sctp_authentication_event = 1;
+
+ if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_DRYEVNT))
+ events->sctp_sender_dry_event = 1;
+
+ if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_STREAM_RESETEVNT))
+ events->sctp_stream_reset_event = 1;
+ SCTP_INP_RUNLOCK(inp);
+ *optsize = sizeof(struct sctp_event_subscribe);
+ }
+ break;
+
+ case SCTP_ADAPTATION_LAYER:
+ {
+ uint32_t *value;
+
+ SCTP_CHECK_AND_CAST(value, optval, uint32_t, *optsize);
+
+ SCTP_INP_RLOCK(inp);
+ *value = inp->sctp_ep.adaptation_layer_indicator;
+ SCTP_INP_RUNLOCK(inp);
+ *optsize = sizeof(uint32_t);
+ }
+ break;
+ case SCTP_SET_INITIAL_DBG_SEQ:
+ {
+ uint32_t *value;
+
+ SCTP_CHECK_AND_CAST(value, optval, uint32_t, *optsize);
+ SCTP_INP_RLOCK(inp);
+ *value = inp->sctp_ep.initial_sequence_debug;
+ SCTP_INP_RUNLOCK(inp);
+ *optsize = sizeof(uint32_t);
+ }
+ break;
+ case SCTP_GET_LOCAL_ADDR_SIZE:
+ {
+ uint32_t *value;
+
+ SCTP_CHECK_AND_CAST(value, optval, uint32_t, *optsize);
+ SCTP_INP_RLOCK(inp);
+ *value = sctp_count_max_addresses(inp);
+ SCTP_INP_RUNLOCK(inp);
+ *optsize = sizeof(uint32_t);
+ }
+ break;
+ case SCTP_GET_REMOTE_ADDR_SIZE:
+ {
+ uint32_t *value;
+ size_t size;
+ struct sctp_nets *net;
+
+ SCTP_CHECK_AND_CAST(value, optval, uint32_t, *optsize);
+ /* FIXME MT: change to sctp_assoc_value? */
+ SCTP_FIND_STCB(inp, stcb, (sctp_assoc_t) * value);
+
+ if (stcb) {
+ size = 0;
+ /* Count the sizes */
+ TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) {
+ if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_NEEDS_MAPPED_V4)) ||
+ (((struct sockaddr *)&net->ro._l_addr)->sa_family == AF_INET6)) {
+ size += sizeof(struct sockaddr_in6);
+ } else if (((struct sockaddr *)&net->ro._l_addr)->sa_family == AF_INET) {
+ size += sizeof(struct sockaddr_in);
+ } else {
+ /* huh */
+ break;
+ }
+ }
+ SCTP_TCB_UNLOCK(stcb);
+ *value = (uint32_t) size;
+ } else {
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOTCONN);
+ error = ENOTCONN;
+ }
+ *optsize = sizeof(uint32_t);
+ }
+ break;
+ case SCTP_GET_PEER_ADDRESSES:
+ /*
+ * Get the address information, an array is passed in to
+ * fill up we pack it.
+ */
+ {
+ size_t cpsz, left;
+ struct sockaddr_storage *sas;
+ struct sctp_nets *net;
+ struct sctp_getaddresses *saddr;
+
+ SCTP_CHECK_AND_CAST(saddr, optval, struct sctp_getaddresses, *optsize);
+ SCTP_FIND_STCB(inp, stcb, saddr->sget_assoc_id);
+
+ if (stcb) {
+ left = (*optsize) - sizeof(struct sctp_getaddresses);
+ *optsize = sizeof(struct sctp_getaddresses);
+ sas = (struct sockaddr_storage *)&saddr->addr[0];
+
+ TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) {
+ if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_NEEDS_MAPPED_V4)) ||
+ (((struct sockaddr *)&net->ro._l_addr)->sa_family == AF_INET6)) {
+ cpsz = sizeof(struct sockaddr_in6);
+ } else if (((struct sockaddr *)&net->ro._l_addr)->sa_family == AF_INET) {
+ cpsz = sizeof(struct sockaddr_in);
+ } else {
+ /* huh */
+ break;
+ }
+ if (left < cpsz) {
+ /* not enough room. */
+ break;
+ }
+#ifdef INET6
+ if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_NEEDS_MAPPED_V4)) &&
+ (((struct sockaddr *)&net->ro._l_addr)->sa_family == AF_INET)) {
+ /* Must map the address */
+ in6_sin_2_v4mapsin6((struct sockaddr_in *)&net->ro._l_addr,
+ (struct sockaddr_in6 *)sas);
+ } else {
+#endif
+ memcpy(sas, &net->ro._l_addr, cpsz);
+#ifdef INET6
+ }
+#endif
+ ((struct sockaddr_in *)sas)->sin_port = stcb->rport;
+
+ sas = (struct sockaddr_storage *)((caddr_t)sas + cpsz);
+ left -= cpsz;
+ *optsize += cpsz;
+ }
+ SCTP_TCB_UNLOCK(stcb);
+ } else {
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOENT);
+ error = ENOENT;
+ }
+ }
+ break;
+ case SCTP_GET_LOCAL_ADDRESSES:
+ {
+ size_t limit, actual;
+ struct sockaddr_storage *sas;
+ struct sctp_getaddresses *saddr;
+
+ SCTP_CHECK_AND_CAST(saddr, optval, struct sctp_getaddresses, *optsize);
+ SCTP_FIND_STCB(inp, stcb, saddr->sget_assoc_id);
+
+ sas = (struct sockaddr_storage *)&saddr->addr[0];
+ limit = *optsize - sizeof(sctp_assoc_t);
+ actual = sctp_fill_up_addresses(inp, stcb, limit, sas);
+ if (stcb) {
+ SCTP_TCB_UNLOCK(stcb);
+ }
+ *optsize = sizeof(struct sockaddr_storage) + actual;
+ }
+ break;
+ case SCTP_PEER_ADDR_PARAMS:
+ {
+ struct sctp_paddrparams *paddrp;
+ struct sctp_nets *net;
+
+ SCTP_CHECK_AND_CAST(paddrp, optval, struct sctp_paddrparams, *optsize);
+ SCTP_FIND_STCB(inp, stcb, paddrp->spp_assoc_id);
+
+ net = NULL;
+ if (stcb) {
+ net = sctp_findnet(stcb, (struct sockaddr *)&paddrp->spp_address);
+ } else {
+ /*
+ * We increment here since
+ * sctp_findassociation_ep_addr() wil do a
+ * decrement if it finds the stcb as long as
+ * the locked tcb (last argument) is NOT a
+ * TCB.. aka NULL.
+ */
+ SCTP_INP_INCR_REF(inp);
+ stcb = sctp_findassociation_ep_addr(&inp, (struct sockaddr *)&paddrp->spp_address, &net, NULL, NULL);
+ if (stcb == NULL) {
+ SCTP_INP_DECR_REF(inp);
+ }
+ }
+ if (stcb && (net == NULL)) {
+ struct sockaddr *sa;
+
+ sa = (struct sockaddr *)&paddrp->spp_address;
+ if (sa->sa_family == AF_INET) {
+ struct sockaddr_in *sin;
+
+ sin = (struct sockaddr_in *)sa;
+ if (sin->sin_addr.s_addr) {
+ error = EINVAL;
+ SCTP_TCB_UNLOCK(stcb);
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, error);
+ break;
+ }
+ } else if (sa->sa_family == AF_INET6) {
+ struct sockaddr_in6 *sin6;
+
+ sin6 = (struct sockaddr_in6 *)sa;
+ if (!IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) {
+ error = EINVAL;
+ SCTP_TCB_UNLOCK(stcb);
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, error);
+ break;
+ }
+ } else {
+ error = EAFNOSUPPORT;
+ SCTP_TCB_UNLOCK(stcb);
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, error);
+ break;
+ }
+ }
+ if (stcb) {
+ /* Applys to the specific association */
+ paddrp->spp_flags = 0;
+ if (net) {
+ int ovh;
+
+ if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
+ ovh = SCTP_MED_OVERHEAD;
+ } else {
+ ovh = SCTP_MED_V4_OVERHEAD;
+ }
+
+
+ paddrp->spp_pathmaxrxt = net->failure_threshold;
+ paddrp->spp_pathmtu = net->mtu - ovh;
+ /* get flags for HB */
+ if (net->dest_state & SCTP_ADDR_NOHB)
+ paddrp->spp_flags |= SPP_HB_DISABLE;
+ else
+ paddrp->spp_flags |= SPP_HB_ENABLE;
+ /* get flags for PMTU */
+ if (SCTP_OS_TIMER_PENDING(&net->pmtu_timer.timer)) {
+ paddrp->spp_flags |= SPP_PMTUD_ENABLE;
+ } else {
+ paddrp->spp_flags |= SPP_PMTUD_DISABLE;
+ }
+#ifdef INET
+ if (net->ro._l_addr.sin.sin_family == AF_INET) {
+ paddrp->spp_ipv4_tos = net->tos_flowlabel & 0x000000fc;
+ paddrp->spp_flags |= SPP_IPV4_TOS;
+ }
+#endif
+#ifdef INET6
+ if (net->ro._l_addr.sin6.sin6_family == AF_INET6) {
+ paddrp->spp_ipv6_flowlabel = net->tos_flowlabel;
+ paddrp->spp_flags |= SPP_IPV6_FLOWLABEL;
+ }
+#endif
+ } else {
+ /*
+ * No destination so return default
+ * value
+ */
+ int cnt = 0;
+
+ paddrp->spp_pathmaxrxt = stcb->asoc.def_net_failure;
+ paddrp->spp_pathmtu = sctp_get_frag_point(stcb, &stcb->asoc);
+#ifdef INET
+ paddrp->spp_ipv4_tos = stcb->asoc.default_tos & 0x000000fc;
+ paddrp->spp_flags |= SPP_IPV4_TOS;
+#endif
+#ifdef INET6
+ paddrp->spp_ipv6_flowlabel = stcb->asoc.default_flowlabel;
+ paddrp->spp_flags |= SPP_IPV6_FLOWLABEL;
+#endif
+ /* default settings should be these */
+ if (stcb->asoc.hb_is_disabled == 0) {
+ paddrp->spp_flags |= SPP_HB_ENABLE;
+ } else {
+ paddrp->spp_flags |= SPP_HB_DISABLE;
+ }
+ TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) {
+ if (SCTP_OS_TIMER_PENDING(&net->pmtu_timer.timer)) {
+ cnt++;
+ }
+ }
+ if (cnt) {
+ paddrp->spp_flags |= SPP_PMTUD_ENABLE;
+ }
+ }
+ paddrp->spp_hbinterval = stcb->asoc.heart_beat_delay;
+ paddrp->spp_assoc_id = sctp_get_associd(stcb);
+ SCTP_TCB_UNLOCK(stcb);
+ } else {
+ /* Use endpoint defaults */
+ SCTP_INP_RLOCK(inp);
+ paddrp->spp_pathmaxrxt = inp->sctp_ep.def_net_failure;
+ paddrp->spp_hbinterval = TICKS_TO_MSEC(inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_HEARTBEAT]);
+ paddrp->spp_assoc_id = (sctp_assoc_t) 0;
+ /* get inp's default */
+#ifdef INET
+ paddrp->spp_ipv4_tos = inp->ip_inp.inp.inp_ip_tos;
+ paddrp->spp_flags |= SPP_IPV4_TOS;
+#endif
+#ifdef INET6
+ if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
+ paddrp->spp_ipv6_flowlabel = ((struct in6pcb *)inp)->in6p_flowinfo;
+ paddrp->spp_flags |= SPP_IPV6_FLOWLABEL;
+ }
+#endif
+ /* can't return this */
+ paddrp->spp_pathmtu = 0;
+
+ /* default behavior, no stcb */
+ paddrp->spp_flags = SPP_PMTUD_ENABLE;
+
+ if (sctp_is_feature_off(inp, SCTP_PCB_FLAGS_DONOT_HEARTBEAT)) {
+ paddrp->spp_flags |= SPP_HB_ENABLE;
+ } else {
+ paddrp->spp_flags |= SPP_HB_DISABLE;
+ }
+ SCTP_INP_RUNLOCK(inp);
+ }
+ *optsize = sizeof(struct sctp_paddrparams);
+ }
+ break;
+ case SCTP_GET_PEER_ADDR_INFO:
+ {
+ struct sctp_paddrinfo *paddri;
+ struct sctp_nets *net;
+
+ SCTP_CHECK_AND_CAST(paddri, optval, struct sctp_paddrinfo, *optsize);
+ SCTP_FIND_STCB(inp, stcb, paddri->spinfo_assoc_id);
+
+ net = NULL;
+ if (stcb) {
+ net = sctp_findnet(stcb, (struct sockaddr *)&paddri->spinfo_address);
+ } else {
+ /*
+ * We increment here since
+ * sctp_findassociation_ep_addr() wil do a
+ * decrement if it finds the stcb as long as
+ * the locked tcb (last argument) is NOT a
+ * TCB.. aka NULL.
+ */
+ SCTP_INP_INCR_REF(inp);
+ stcb = sctp_findassociation_ep_addr(&inp, (struct sockaddr *)&paddri->spinfo_address, &net, NULL, NULL);
+ if (stcb == NULL) {
+ SCTP_INP_DECR_REF(inp);
+ }
+ }
+
+ if ((stcb) && (net)) {
+ paddri->spinfo_state = net->dest_state & (SCTP_REACHABLE_MASK | SCTP_ADDR_NOHB);
+ paddri->spinfo_cwnd = net->cwnd;
+ paddri->spinfo_srtt = ((net->lastsa >> 2) + net->lastsv) >> 1;
+ paddri->spinfo_rto = net->RTO;
+ paddri->spinfo_assoc_id = sctp_get_associd(stcb);
+ SCTP_TCB_UNLOCK(stcb);
+ } else {
+ if (stcb) {
+ SCTP_TCB_UNLOCK(stcb);
+ }
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOENT);
+ error = ENOENT;
+ }
+ *optsize = sizeof(struct sctp_paddrinfo);
+ }
+ break;
+ case SCTP_PCB_STATUS:
+ {
+ struct sctp_pcbinfo *spcb;
+
+ SCTP_CHECK_AND_CAST(spcb, optval, struct sctp_pcbinfo, *optsize);
+ sctp_fill_pcbinfo(spcb);
+ *optsize = sizeof(struct sctp_pcbinfo);
+ }
+ break;
+
+ case SCTP_STATUS:
+ {
+ struct sctp_nets *net;
+ struct sctp_status *sstat;
+
+ SCTP_CHECK_AND_CAST(sstat, optval, struct sctp_status, *optsize);
+ SCTP_FIND_STCB(inp, stcb, sstat->sstat_assoc_id);
+
+ if (stcb == NULL) {
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, error);
+ error = EINVAL;
+ break;
+ }
+ /*
+ * I think passing the state is fine since
+ * sctp_constants.h will be available to the user
+ * land.
+ */
+ sstat->sstat_state = stcb->asoc.state;
+ sstat->sstat_assoc_id = sctp_get_associd(stcb);
+ sstat->sstat_rwnd = stcb->asoc.peers_rwnd;
+ sstat->sstat_unackdata = stcb->asoc.sent_queue_cnt;
+ /*
+ * We can't include chunks that have been passed to
+ * the socket layer. Only things in queue.
+ */
+ sstat->sstat_penddata = (stcb->asoc.cnt_on_reasm_queue +
+ stcb->asoc.cnt_on_all_streams);
+
+
+ sstat->sstat_instrms = stcb->asoc.streamincnt;
+ sstat->sstat_outstrms = stcb->asoc.streamoutcnt;
+ sstat->sstat_fragmentation_point = sctp_get_frag_point(stcb, &stcb->asoc);
+ memcpy(&sstat->sstat_primary.spinfo_address,
+ &stcb->asoc.primary_destination->ro._l_addr,
+ ((struct sockaddr *)(&stcb->asoc.primary_destination->ro._l_addr))->sa_len);
+ net = stcb->asoc.primary_destination;
+ ((struct sockaddr_in *)&sstat->sstat_primary.spinfo_address)->sin_port = stcb->rport;
+ /*
+ * Again the user can get info from sctp_constants.h
+ * for what the state of the network is.
+ */
+ sstat->sstat_primary.spinfo_state = net->dest_state & SCTP_REACHABLE_MASK;
+ sstat->sstat_primary.spinfo_cwnd = net->cwnd;
+ sstat->sstat_primary.spinfo_srtt = net->lastsa;
+ sstat->sstat_primary.spinfo_rto = net->RTO;
+ sstat->sstat_primary.spinfo_mtu = net->mtu;
+ sstat->sstat_primary.spinfo_assoc_id = sctp_get_associd(stcb);
+ SCTP_TCB_UNLOCK(stcb);
+ *optsize = sizeof(*sstat);
+ }
+ break;
+ case SCTP_RTOINFO:
+ {
+ struct sctp_rtoinfo *srto;
+
+ SCTP_CHECK_AND_CAST(srto, optval, struct sctp_rtoinfo, *optsize);
+ SCTP_FIND_STCB(inp, stcb, srto->srto_assoc_id);
+
+ if (stcb) {
+ srto->srto_initial = stcb->asoc.initial_rto;
+ srto->srto_max = stcb->asoc.maxrto;
+ srto->srto_min = stcb->asoc.minrto;
+ SCTP_TCB_UNLOCK(stcb);
+ } else {
+ SCTP_INP_RLOCK(inp);
+ srto->srto_initial = inp->sctp_ep.initial_rto;
+ srto->srto_max = inp->sctp_ep.sctp_maxrto;
+ srto->srto_min = inp->sctp_ep.sctp_minrto;
+ SCTP_INP_RUNLOCK(inp);
+ }
+ *optsize = sizeof(*srto);
+ }
+ break;
+ case SCTP_TIMEOUTS:
+ {
+ struct sctp_timeouts *stimo;
+
+ SCTP_CHECK_AND_CAST(stimo, optval, struct sctp_timeouts, *optsize);
+ SCTP_FIND_STCB(inp, stcb, stimo->stimo_assoc_id);
+
+ if (stcb) {
+ stimo->stimo_init = stcb->asoc.timoinit;
+ stimo->stimo_data = stcb->asoc.timodata;
+ stimo->stimo_sack = stcb->asoc.timosack;
+ stimo->stimo_shutdown = stcb->asoc.timoshutdown;
+ stimo->stimo_heartbeat = stcb->asoc.timoheartbeat;
+ stimo->stimo_cookie = stcb->asoc.timocookie;
+ stimo->stimo_shutdownack = stcb->asoc.timoshutdownack;
+ SCTP_TCB_UNLOCK(stcb);
+ } else {
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, error);
+ error = EINVAL;
+ }
+ *optsize = sizeof(*stimo);
+ }
+ break;
+ case SCTP_ASSOCINFO:
+ {
+ struct sctp_assocparams *sasoc;
+ uint32_t oldval;
+
+ SCTP_CHECK_AND_CAST(sasoc, optval, struct sctp_assocparams, *optsize);
+ SCTP_FIND_STCB(inp, stcb, sasoc->sasoc_assoc_id);
+
+ if (stcb) {
+ oldval = sasoc->sasoc_cookie_life;
+ sasoc->sasoc_cookie_life = TICKS_TO_MSEC(stcb->asoc.cookie_life);
+ sasoc->sasoc_asocmaxrxt = stcb->asoc.max_send_times;
+ sasoc->sasoc_number_peer_destinations = stcb->asoc.numnets;
+ sasoc->sasoc_peer_rwnd = stcb->asoc.peers_rwnd;
+ sasoc->sasoc_local_rwnd = stcb->asoc.my_rwnd;
+ SCTP_TCB_UNLOCK(stcb);
+ } else {
+ SCTP_INP_RLOCK(inp);
+ sasoc->sasoc_cookie_life = TICKS_TO_MSEC(inp->sctp_ep.def_cookie_life);
+ sasoc->sasoc_asocmaxrxt = inp->sctp_ep.max_send_times;
+ sasoc->sasoc_number_peer_destinations = 0;
+ sasoc->sasoc_peer_rwnd = 0;
+ sasoc->sasoc_local_rwnd = sbspace(&inp->sctp_socket->so_rcv);
+ SCTP_INP_RUNLOCK(inp);
+ }
+ *optsize = sizeof(*sasoc);
+ }
+ break;
+ case SCTP_DEFAULT_SEND_PARAM:
+ {
+ struct sctp_sndrcvinfo *s_info;
+
+ SCTP_CHECK_AND_CAST(s_info, optval, struct sctp_sndrcvinfo, *optsize);
+ SCTP_FIND_STCB(inp, stcb, s_info->sinfo_assoc_id);
+
+ if (stcb) {
+ memcpy(s_info, &stcb->asoc.def_send, sizeof(stcb->asoc.def_send));
+ SCTP_TCB_UNLOCK(stcb);
+ } else {
+ SCTP_INP_RLOCK(inp);
+ memcpy(s_info, &inp->def_send, sizeof(inp->def_send));
+ SCTP_INP_RUNLOCK(inp);
+ }
+ *optsize = sizeof(*s_info);
+ }
+ break;
+ case SCTP_INITMSG:
+ {
+ struct sctp_initmsg *sinit;
+
+ SCTP_CHECK_AND_CAST(sinit, optval, struct sctp_initmsg, *optsize);
+ SCTP_INP_RLOCK(inp);
+ sinit->sinit_num_ostreams = inp->sctp_ep.pre_open_stream_count;
+ sinit->sinit_max_instreams = inp->sctp_ep.max_open_streams_intome;
+ sinit->sinit_max_attempts = inp->sctp_ep.max_init_times;
+ sinit->sinit_max_init_timeo = inp->sctp_ep.initial_init_rto_max;
+ SCTP_INP_RUNLOCK(inp);
+ *optsize = sizeof(*sinit);
+ }
+ break;
+ case SCTP_PRIMARY_ADDR:
+ /* we allow a "get" operation on this */
+ {
+ struct sctp_setprim *ssp;
+
+ SCTP_CHECK_AND_CAST(ssp, optval, struct sctp_setprim, *optsize);
+ SCTP_FIND_STCB(inp, stcb, ssp->ssp_assoc_id);
+
+ if (stcb) {
+ /* simply copy out the sockaddr_storage... */
+ int len;
+
+ len = *optsize;
+ if (len > stcb->asoc.primary_destination->ro._l_addr.sa.sa_len)
+ len = stcb->asoc.primary_destination->ro._l_addr.sa.sa_len;
+
+ memcpy(&ssp->ssp_addr,
+ &stcb->asoc.primary_destination->ro._l_addr,
+ len);
+ SCTP_TCB_UNLOCK(stcb);
+ } else {
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, error);
+ error = EINVAL;
+ }
+ *optsize = sizeof(*ssp);
+ }
+ break;
+
+ case SCTP_HMAC_IDENT:
+ {
+ struct sctp_hmacalgo *shmac;
+ sctp_hmaclist_t *hmaclist;
+ uint32_t size;
+ int i;
+
+ SCTP_CHECK_AND_CAST(shmac, optval, struct sctp_hmacalgo, *optsize);
+
+ SCTP_INP_RLOCK(inp);
+ hmaclist = inp->sctp_ep.local_hmacs;
+ if (hmaclist == NULL) {
+ /* no HMACs to return */
+ *optsize = sizeof(*shmac);
+ SCTP_INP_RUNLOCK(inp);
+ break;
+ }
+ /* is there room for all of the hmac ids? */
+ size = sizeof(*shmac) + (hmaclist->num_algo *
+ sizeof(shmac->shmac_idents[0]));
+ if ((size_t)(*optsize) < size) {
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, error);
+ error = EINVAL;
+ SCTP_INP_RUNLOCK(inp);
+ break;
+ }
+ /* copy in the list */
+ shmac->shmac_number_of_idents = hmaclist->num_algo;
+ for (i = 0; i < hmaclist->num_algo; i++) {
+ shmac->shmac_idents[i] = hmaclist->hmac[i];
+ }
+ SCTP_INP_RUNLOCK(inp);
+ *optsize = size;
+ break;
+ }
+ case SCTP_AUTH_ACTIVE_KEY:
+ {
+ struct sctp_authkeyid *scact;
+
+ SCTP_CHECK_AND_CAST(scact, optval, struct sctp_authkeyid, *optsize);
+ SCTP_FIND_STCB(inp, stcb, scact->scact_assoc_id);
+
+ if (stcb) {
+ /* get the active key on the assoc */
+ scact->scact_keynumber = stcb->asoc.authinfo.active_keyid;
+ SCTP_TCB_UNLOCK(stcb);
+ } else {
+ /* get the endpoint active key */
+ SCTP_INP_RLOCK(inp);
+ scact->scact_keynumber = inp->sctp_ep.default_keyid;
+ SCTP_INP_RUNLOCK(inp);
+ }
+ *optsize = sizeof(*scact);
+ break;
+ }
+ case SCTP_LOCAL_AUTH_CHUNKS:
+ {
+ struct sctp_authchunks *sac;
+ sctp_auth_chklist_t *chklist = NULL;
+ size_t size = 0;
+
+ SCTP_CHECK_AND_CAST(sac, optval, struct sctp_authchunks, *optsize);
+ SCTP_FIND_STCB(inp, stcb, sac->gauth_assoc_id);
+
+ if (stcb) {
+ /* get off the assoc */
+ chklist = stcb->asoc.local_auth_chunks;
+ /* is there enough space? */
+ size = sctp_auth_get_chklist_size(chklist);
+ if (*optsize < (sizeof(struct sctp_authchunks) + size)) {
+ error = EINVAL;
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, error);
+ } else {
+ /* copy in the chunks */
+ (void)sctp_serialize_auth_chunks(chklist, sac->gauth_chunks);
+ }
+ SCTP_TCB_UNLOCK(stcb);
+ } else {
+ /* get off the endpoint */
+ SCTP_INP_RLOCK(inp);
+ chklist = inp->sctp_ep.local_auth_chunks;
+ /* is there enough space? */
+ size = sctp_auth_get_chklist_size(chklist);
+ if (*optsize < (sizeof(struct sctp_authchunks) + size)) {
+ error = EINVAL;
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, error);
+ } else {
+ /* copy in the chunks */
+ (void)sctp_serialize_auth_chunks(chklist, sac->gauth_chunks);
+ }
+ SCTP_INP_RUNLOCK(inp);
+ }
+ *optsize = sizeof(struct sctp_authchunks) + size;
+ break;
+ }
+ case SCTP_PEER_AUTH_CHUNKS:
+ {
+ struct sctp_authchunks *sac;
+ sctp_auth_chklist_t *chklist = NULL;
+ size_t size = 0;
+
+ SCTP_CHECK_AND_CAST(sac, optval, struct sctp_authchunks, *optsize);
+ SCTP_FIND_STCB(inp, stcb, sac->gauth_assoc_id);
+
+ if (stcb) {
+ /* get off the assoc */
+ chklist = stcb->asoc.peer_auth_chunks;
+ /* is there enough space? */
+ size = sctp_auth_get_chklist_size(chklist);
+ if (*optsize < (sizeof(struct sctp_authchunks) + size)) {
+ error = EINVAL;
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, error);
+ } else {
+ /* copy in the chunks */
+ (void)sctp_serialize_auth_chunks(chklist, sac->gauth_chunks);
+ }
+ SCTP_TCB_UNLOCK(stcb);
+ } else {
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOENT);
+ error = ENOENT;
+ }
+ *optsize = sizeof(struct sctp_authchunks) + size;
+ break;
+ }
+
+
+ default:
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOPROTOOPT);
+ error = ENOPROTOOPT;
+ *optsize = 0;
+ break;
+ } /* end switch (sopt->sopt_name) */
+ return (error);
+}
+
+static int
+sctp_setopt(struct socket *so, int optname, void *optval, size_t optsize,
+ void *p)
+{
+ int error, set_opt;
+ uint32_t *mopt;
+ struct sctp_tcb *stcb = NULL;
+ struct sctp_inpcb *inp = NULL;
+ uint32_t vrf_id;
+
+ if (optval == NULL) {
+ SCTP_PRINTF("optval is NULL\n");
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+ return (EINVAL);
+ }
+ inp = (struct sctp_inpcb *)so->so_pcb;
+ if (inp == 0) {
+ SCTP_PRINTF("inp is NULL?\n");
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+ return EINVAL;
+ }
+ vrf_id = inp->def_vrf_id;
+
+ error = 0;
+ switch (optname) {
+ case SCTP_NODELAY:
+ case SCTP_AUTOCLOSE:
+ case SCTP_AUTO_ASCONF:
+ case SCTP_EXPLICIT_EOR:
+ case SCTP_DISABLE_FRAGMENTS:
+ case SCTP_USE_EXT_RCVINFO:
+ case SCTP_I_WANT_MAPPED_V4_ADDR:
+ /* copy in the option value */
+ SCTP_CHECK_AND_CAST(mopt, optval, uint32_t, optsize);
+ set_opt = 0;
+ if (error)
+ break;
+ switch (optname) {
+ case SCTP_DISABLE_FRAGMENTS:
+ set_opt = SCTP_PCB_FLAGS_NO_FRAGMENT;
+ break;
+ case SCTP_AUTO_ASCONF:
+ /*
+ * NOTE: we don't really support this flag
+ */
+ if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
+ /* only valid for bound all sockets */
+ set_opt = SCTP_PCB_FLAGS_AUTO_ASCONF;
+ } else {
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+ return (EINVAL);
+ }
+ break;
+ case SCTP_EXPLICIT_EOR:
+ set_opt = SCTP_PCB_FLAGS_EXPLICIT_EOR;
+ break;
+ case SCTP_USE_EXT_RCVINFO:
+ set_opt = SCTP_PCB_FLAGS_EXT_RCVINFO;
+ break;
+ case SCTP_I_WANT_MAPPED_V4_ADDR:
+ if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
+ set_opt = SCTP_PCB_FLAGS_NEEDS_MAPPED_V4;
+ } else {
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+ return (EINVAL);
+ }
+ break;
+ case SCTP_NODELAY:
+ set_opt = SCTP_PCB_FLAGS_NODELAY;
+ break;
+ case SCTP_AUTOCLOSE:
+ if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
+ (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+ return (EINVAL);
+ }
+ set_opt = SCTP_PCB_FLAGS_AUTOCLOSE;
+ /*
+ * The value is in ticks. Note this does not effect
+ * old associations, only new ones.
+ */
+ inp->sctp_ep.auto_close_time = SEC_TO_TICKS(*mopt);
+ break;
+ }
+ SCTP_INP_WLOCK(inp);
+ if (*mopt != 0) {
+ sctp_feature_on(inp, set_opt);
+ } else {
+ sctp_feature_off(inp, set_opt);
+ }
+ SCTP_INP_WUNLOCK(inp);
+ break;
+ case SCTP_REUSE_PORT:
+ {
+ SCTP_CHECK_AND_CAST(mopt, optval, uint32_t, optsize);
+ if ((inp->sctp_flags & SCTP_PCB_FLAGS_UNBOUND) == 0) {
+ /* Can't set it after we are bound */
+ error = EINVAL;
+ break;
+ }
+ if ((inp->sctp_flags & SCTP_PCB_FLAGS_UDPTYPE)) {
+ /* Can't do this for a 1-m socket */
+ error = EINVAL;
+ break;
+ }
+ if (optval)
+ sctp_feature_on(inp, SCTP_PCB_FLAGS_PORTREUSE);
+ else
+ sctp_feature_off(inp, SCTP_PCB_FLAGS_PORTREUSE);
+ }
+ break;
+ case SCTP_PARTIAL_DELIVERY_POINT:
+ {
+ uint32_t *value;
+
+ SCTP_CHECK_AND_CAST(value, optval, uint32_t, optsize);
+ if (*value > SCTP_SB_LIMIT_RCV(so)) {
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+ error = EINVAL;
+ break;
+ }
+ inp->partial_delivery_point = *value;
+ }
+ break;
+ case SCTP_FRAGMENT_INTERLEAVE:
+ /* not yet until we re-write sctp_recvmsg() */
+ {
+ uint32_t *level;
+
+ SCTP_CHECK_AND_CAST(level, optval, uint32_t, optsize);
+ if (*level == SCTP_FRAG_LEVEL_2) {
+ sctp_feature_on(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE);
+ sctp_feature_on(inp, SCTP_PCB_FLAGS_INTERLEAVE_STRMS);
+ } else if (*level == SCTP_FRAG_LEVEL_1) {
+ sctp_feature_on(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE);
+ sctp_feature_off(inp, SCTP_PCB_FLAGS_INTERLEAVE_STRMS);
+ } else if (*level == SCTP_FRAG_LEVEL_0) {
+ sctp_feature_off(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE);
+ sctp_feature_off(inp, SCTP_PCB_FLAGS_INTERLEAVE_STRMS);
+
+ } else {
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+ error = EINVAL;
+ }
+ }
+ break;
+ case SCTP_CMT_ON_OFF:
+ if (SCTP_BASE_SYSCTL(sctp_cmt_on_off)) {
+ struct sctp_assoc_value *av;
+
+ SCTP_CHECK_AND_CAST(av, optval, struct sctp_assoc_value, optsize);
+ SCTP_FIND_STCB(inp, stcb, av->assoc_id);
+ if (stcb) {
+ if (av->assoc_value != 0)
+ stcb->asoc.sctp_cmt_on_off = 1;
+ else
+ stcb->asoc.sctp_cmt_on_off = 0;
+ SCTP_TCB_UNLOCK(stcb);
+ } else {
+ SCTP_INP_WLOCK(inp);
+ if (av->assoc_value != 0)
+ inp->sctp_cmt_on_off = 1;
+ else
+ inp->sctp_cmt_on_off = 0;
+ SCTP_INP_WUNLOCK(inp);
+ }
+ } else {
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOPROTOOPT);
+ error = ENOPROTOOPT;
+ }
+ break;
+ /* JRS - Set socket option for pluggable congestion control */
+ case SCTP_PLUGGABLE_CC:
+ {
+ struct sctp_assoc_value *av;
+
+ SCTP_CHECK_AND_CAST(av, optval, struct sctp_assoc_value, optsize);
+ SCTP_FIND_STCB(inp, stcb, av->assoc_id);
+ if (stcb) {
+ switch (av->assoc_value) {
+ /*
+ * JRS - Standard TCP congestion
+ * control
+ */
+ case SCTP_CC_RFC2581:
+ {
+ stcb->asoc.congestion_control_module = SCTP_CC_RFC2581;
+ stcb->asoc.cc_functions.sctp_set_initial_cc_param = &sctp_set_initial_cc_param;
+ stcb->asoc.cc_functions.sctp_cwnd_update_after_sack = &sctp_cwnd_update_after_sack;
+ stcb->asoc.cc_functions.sctp_cwnd_update_after_fr = &sctp_cwnd_update_after_fr;
+ stcb->asoc.cc_functions.sctp_cwnd_update_after_timeout = &sctp_cwnd_update_after_timeout;
+ stcb->asoc.cc_functions.sctp_cwnd_update_after_ecn_echo = &sctp_cwnd_update_after_ecn_echo;
+ stcb->asoc.cc_functions.sctp_cwnd_update_after_packet_dropped = &sctp_cwnd_update_after_packet_dropped;
+ stcb->asoc.cc_functions.sctp_cwnd_update_after_output = &sctp_cwnd_update_after_output;
+ stcb->asoc.cc_functions.sctp_cwnd_update_after_fr_timer = &sctp_cwnd_update_after_fr_timer;
+ SCTP_TCB_UNLOCK(stcb);
+ break;
+ }
+ /*
+ * JRS - High Speed TCP congestion
+ * control (Floyd)
+ */
+ case SCTP_CC_HSTCP:
+ {
+ stcb->asoc.congestion_control_module = SCTP_CC_HSTCP;
+ stcb->asoc.cc_functions.sctp_set_initial_cc_param = &sctp_set_initial_cc_param;
+ stcb->asoc.cc_functions.sctp_cwnd_update_after_sack = &sctp_hs_cwnd_update_after_sack;
+ stcb->asoc.cc_functions.sctp_cwnd_update_after_fr = &sctp_hs_cwnd_update_after_fr;
+ stcb->asoc.cc_functions.sctp_cwnd_update_after_timeout = &sctp_cwnd_update_after_timeout;
+ stcb->asoc.cc_functions.sctp_cwnd_update_after_ecn_echo = &sctp_cwnd_update_after_ecn_echo;
+ stcb->asoc.cc_functions.sctp_cwnd_update_after_packet_dropped = &sctp_cwnd_update_after_packet_dropped;
+ stcb->asoc.cc_functions.sctp_cwnd_update_after_output = &sctp_cwnd_update_after_output;
+ stcb->asoc.cc_functions.sctp_cwnd_update_after_fr_timer = &sctp_cwnd_update_after_fr_timer;
+ SCTP_TCB_UNLOCK(stcb);
+ break;
+ }
+ /* JRS - HTCP congestion control */
+ case SCTP_CC_HTCP:
+ {
+ stcb->asoc.congestion_control_module = SCTP_CC_HTCP;
+ stcb->asoc.cc_functions.sctp_set_initial_cc_param = &sctp_htcp_set_initial_cc_param;
+ stcb->asoc.cc_functions.sctp_cwnd_update_after_sack = &sctp_htcp_cwnd_update_after_sack;
+ stcb->asoc.cc_functions.sctp_cwnd_update_after_fr = &sctp_htcp_cwnd_update_after_fr;
+ stcb->asoc.cc_functions.sctp_cwnd_update_after_timeout = &sctp_htcp_cwnd_update_after_timeout;
+ stcb->asoc.cc_functions.sctp_cwnd_update_after_ecn_echo = &sctp_htcp_cwnd_update_after_ecn_echo;
+ stcb->asoc.cc_functions.sctp_cwnd_update_after_packet_dropped = &sctp_cwnd_update_after_packet_dropped;
+ stcb->asoc.cc_functions.sctp_cwnd_update_after_output = &sctp_cwnd_update_after_output;
+ stcb->asoc.cc_functions.sctp_cwnd_update_after_fr_timer = &sctp_htcp_cwnd_update_after_fr_timer;
+ SCTP_TCB_UNLOCK(stcb);
+ break;
+ }
+ /*
+ * JRS - All other values are
+ * invalid
+ */
+ default:
+ {
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+ error = EINVAL;
+ SCTP_TCB_UNLOCK(stcb);
+ break;
+ }
+ }
+ } else {
+ switch (av->assoc_value) {
+ case SCTP_CC_RFC2581:
+ case SCTP_CC_HSTCP:
+ case SCTP_CC_HTCP:
+ inp->sctp_ep.sctp_default_cc_module = av->assoc_value;
+ break;
+ default:
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+ error = EINVAL;
+ break;
+ };
+ }
+ }
+ break;
+ case SCTP_CLR_STAT_LOG:
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EOPNOTSUPP);
+ error = EOPNOTSUPP;
+ break;
+ case SCTP_CONTEXT:
+ {
+ struct sctp_assoc_value *av;
+
+ SCTP_CHECK_AND_CAST(av, optval, struct sctp_assoc_value, optsize);
+ SCTP_FIND_STCB(inp, stcb, av->assoc_id);
+
+ if (stcb) {
+ stcb->asoc.context = av->assoc_value;
+ SCTP_TCB_UNLOCK(stcb);
+ } else {
+ SCTP_INP_WLOCK(inp);
+ inp->sctp_context = av->assoc_value;
+ SCTP_INP_WUNLOCK(inp);
+ }
+ }
+ break;
+ case SCTP_VRF_ID:
+ {
+ uint32_t *default_vrfid;
+
+ SCTP_CHECK_AND_CAST(default_vrfid, optval, uint32_t, optsize);
+ if (*default_vrfid > SCTP_MAX_VRF_ID) {
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+ error = EINVAL;
+ break;
+ }
+ inp->def_vrf_id = *default_vrfid;
+ break;
+ }
+ case SCTP_DEL_VRF_ID:
+ {
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EOPNOTSUPP);
+ error = EOPNOTSUPP;
+ break;
+ }
+ case SCTP_ADD_VRF_ID:
+ {
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EOPNOTSUPP);
+ error = EOPNOTSUPP;
+ break;
+ }
+ case SCTP_DELAYED_SACK:
+ {
+ struct sctp_sack_info *sack;
+
+ SCTP_CHECK_AND_CAST(sack, optval, struct sctp_sack_info, optsize);
+ SCTP_FIND_STCB(inp, stcb, sack->sack_assoc_id);
+ if (sack->sack_delay) {
+ if (sack->sack_delay > SCTP_MAX_SACK_DELAY)
+ sack->sack_delay = SCTP_MAX_SACK_DELAY;
+ }
+ if (stcb) {
+ if (sack->sack_delay) {
+ if (MSEC_TO_TICKS(sack->sack_delay) < 1) {
+ sack->sack_delay = TICKS_TO_MSEC(1);
+ }
+ stcb->asoc.delayed_ack = sack->sack_delay;
+ }
+ if (sack->sack_freq) {
+ stcb->asoc.sack_freq = sack->sack_freq;
+ }
+ SCTP_TCB_UNLOCK(stcb);
+ } else {
+ SCTP_INP_WLOCK(inp);
+ if (sack->sack_delay) {
+ if (MSEC_TO_TICKS(sack->sack_delay) < 1) {
+ sack->sack_delay = TICKS_TO_MSEC(1);
+ }
+ inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_RECV] = MSEC_TO_TICKS(sack->sack_delay);
+ }
+ if (sack->sack_freq) {
+ inp->sctp_ep.sctp_sack_freq = sack->sack_freq;
+ }
+ SCTP_INP_WUNLOCK(inp);
+ }
+ break;
+ }
+ case SCTP_AUTH_CHUNK:
+ {
+ struct sctp_authchunk *sauth;
+
+ SCTP_CHECK_AND_CAST(sauth, optval, struct sctp_authchunk, optsize);
+
+ SCTP_INP_WLOCK(inp);
+ if (sctp_auth_add_chunk(sauth->sauth_chunk, inp->sctp_ep.local_auth_chunks)) {
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+ error = EINVAL;
+ }
+ SCTP_INP_WUNLOCK(inp);
+ break;
+ }
+ case SCTP_AUTH_KEY:
+ {
+ struct sctp_authkey *sca;
+ struct sctp_keyhead *shared_keys;
+ sctp_sharedkey_t *shared_key;
+ sctp_key_t *key = NULL;
+ size_t size;
+
+ SCTP_CHECK_AND_CAST(sca, optval, struct sctp_authkey, optsize);
+ SCTP_FIND_STCB(inp, stcb, sca->sca_assoc_id);
+ size = optsize - sizeof(*sca);
+
+ if (stcb) {
+ /* set it on the assoc */
+ shared_keys = &stcb->asoc.shared_keys;
+ /* clear the cached keys for this key id */
+ sctp_clear_cachedkeys(stcb, sca->sca_keynumber);
+ /*
+ * create the new shared key and
+ * insert/replace it
+ */
+ if (size > 0) {
+ key = sctp_set_key(sca->sca_key, (uint32_t) size);
+ if (key == NULL) {
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOMEM);
+ error = ENOMEM;
+ SCTP_TCB_UNLOCK(stcb);
+ break;
+ }
+ }
+ shared_key = sctp_alloc_sharedkey();
+ if (shared_key == NULL) {
+ sctp_free_key(key);
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOMEM);
+ error = ENOMEM;
+ SCTP_TCB_UNLOCK(stcb);
+ break;
+ }
+ shared_key->key = key;
+ shared_key->keyid = sca->sca_keynumber;
+ error = sctp_insert_sharedkey(shared_keys, shared_key);
+ SCTP_TCB_UNLOCK(stcb);
+ } else {
+ /* set it on the endpoint */
+ SCTP_INP_WLOCK(inp);
+ shared_keys = &inp->sctp_ep.shared_keys;
+ /*
+ * clear the cached keys on all assocs for
+ * this key id
+ */
+ sctp_clear_cachedkeys_ep(inp, sca->sca_keynumber);
+ /*
+ * create the new shared key and
+ * insert/replace it
+ */
+ if (size > 0) {
+ key = sctp_set_key(sca->sca_key, (uint32_t) size);
+ if (key == NULL) {
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOMEM);
+ error = ENOMEM;
+ SCTP_INP_WUNLOCK(inp);
+ break;
+ }
+ }
+ shared_key = sctp_alloc_sharedkey();
+ if (shared_key == NULL) {
+ sctp_free_key(key);
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOMEM);
+ error = ENOMEM;
+ SCTP_INP_WUNLOCK(inp);
+ break;
+ }
+ shared_key->key = key;
+ shared_key->keyid = sca->sca_keynumber;
+ error = sctp_insert_sharedkey(shared_keys, shared_key);
+ SCTP_INP_WUNLOCK(inp);
+ }
+ break;
+ }
+ case SCTP_HMAC_IDENT:
+ {
+ struct sctp_hmacalgo *shmac;
+ sctp_hmaclist_t *hmaclist;
+ uint16_t hmacid;
+ uint32_t i;
+
+ size_t found;
+
+ SCTP_CHECK_AND_CAST(shmac, optval, struct sctp_hmacalgo, optsize);
+ if (optsize < sizeof(struct sctp_hmacalgo) + shmac->shmac_number_of_idents * sizeof(uint16_t)) {
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+ error = EINVAL;
+ break;
+ }
+ hmaclist = sctp_alloc_hmaclist(shmac->shmac_number_of_idents);
+ if (hmaclist == NULL) {
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOMEM);
+ error = ENOMEM;
+ break;
+ }
+ for (i = 0; i < shmac->shmac_number_of_idents; i++) {
+ hmacid = shmac->shmac_idents[i];
+ if (sctp_auth_add_hmacid(hmaclist, hmacid)) {
+ /* invalid HMACs were found */ ;
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+ error = EINVAL;
+ sctp_free_hmaclist(hmaclist);
+ goto sctp_set_hmac_done;
+ }
+ }
+ found = 0;
+ for (i = 0; i < hmaclist->num_algo; i++) {
+ if (hmaclist->hmac[i] == SCTP_AUTH_HMAC_ID_SHA1) {
+ /* already in list */
+ found = 1;
+ }
+ }
+ if (!found) {
+ sctp_free_hmaclist(hmaclist);
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+ error = EINVAL;
+ break;
+ }
+ /* set it on the endpoint */
+ SCTP_INP_WLOCK(inp);
+ if (inp->sctp_ep.local_hmacs)
+ sctp_free_hmaclist(inp->sctp_ep.local_hmacs);
+ inp->sctp_ep.local_hmacs = hmaclist;
+ SCTP_INP_WUNLOCK(inp);
+ sctp_set_hmac_done:
+ break;
+ }
+ case SCTP_AUTH_ACTIVE_KEY:
+ {
+ struct sctp_authkeyid *scact;
+
+ SCTP_CHECK_AND_CAST(scact, optval, struct sctp_authkeyid,
+ optsize);
+ SCTP_FIND_STCB(inp, stcb, scact->scact_assoc_id);
+
+ /* set the active key on the right place */
+ if (stcb) {
+ /* set the active key on the assoc */
+ if (sctp_auth_setactivekey(stcb,
+ scact->scact_keynumber)) {
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL,
+ SCTP_FROM_SCTP_USRREQ,
+ EINVAL);
+ error = EINVAL;
+ }
+ SCTP_TCB_UNLOCK(stcb);
+ } else {
+ /* set the active key on the endpoint */
+ SCTP_INP_WLOCK(inp);
+ if (sctp_auth_setactivekey_ep(inp,
+ scact->scact_keynumber)) {
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL,
+ SCTP_FROM_SCTP_USRREQ,
+ EINVAL);
+ error = EINVAL;
+ }
+ SCTP_INP_WUNLOCK(inp);
+ }
+ break;
+ }
+ case SCTP_AUTH_DELETE_KEY:
+ {
+ struct sctp_authkeyid *scdel;
+
+ SCTP_CHECK_AND_CAST(scdel, optval, struct sctp_authkeyid,
+ optsize);
+ SCTP_FIND_STCB(inp, stcb, scdel->scact_assoc_id);
+
+ /* delete the key from the right place */
+ if (stcb) {
+ if (sctp_delete_sharedkey(stcb,
+ scdel->scact_keynumber)) {
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL,
+ SCTP_FROM_SCTP_USRREQ,
+ EINVAL);
+ error = EINVAL;
+ }
+ SCTP_TCB_UNLOCK(stcb);
+ } else {
+ SCTP_INP_WLOCK(inp);
+ if (sctp_delete_sharedkey_ep(inp,
+ scdel->scact_keynumber)) {
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL,
+ SCTP_FROM_SCTP_USRREQ,
+ EINVAL);
+ error = EINVAL;
+ }
+ SCTP_INP_WUNLOCK(inp);
+ }
+ break;
+ }
+ case SCTP_AUTH_DEACTIVATE_KEY:
+ {
+ struct sctp_authkeyid *keyid;
+
+ SCTP_CHECK_AND_CAST(keyid, optval, struct sctp_authkeyid,
+ optsize);
+ SCTP_FIND_STCB(inp, stcb, keyid->scact_assoc_id);
+
+ /* deactivate the key from the right place */
+ if (stcb) {
+ if (sctp_deact_sharedkey(stcb,
+ keyid->scact_keynumber)) {
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL,
+ SCTP_FROM_SCTP_USRREQ,
+ EINVAL);
+ error = EINVAL;
+ }
+ SCTP_TCB_UNLOCK(stcb);
+ } else {
+ SCTP_INP_WLOCK(inp);
+ if (sctp_deact_sharedkey_ep(inp,
+ keyid->scact_keynumber)) {
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL,
+ SCTP_FROM_SCTP_USRREQ,
+ EINVAL);
+ error = EINVAL;
+ }
+ SCTP_INP_WUNLOCK(inp);
+ }
+ break;
+ }
+
+ case SCTP_RESET_STREAMS:
+ {
+ struct sctp_stream_reset *strrst;
+ uint8_t send_in = 0, send_tsn = 0, send_out = 0,
+ addstream = 0;
+ uint16_t addstrmcnt = 0;
+ int i;
+
+ SCTP_CHECK_AND_CAST(strrst, optval, struct sctp_stream_reset, optsize);
+ SCTP_FIND_STCB(inp, stcb, strrst->strrst_assoc_id);
+
+ if (stcb == NULL) {
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOENT);
+ error = ENOENT;
+ break;
+ }
+ if (stcb->asoc.peer_supports_strreset == 0) {
+ /*
+ * Peer does not support it, we return
+ * protocol not supported since this is true
+ * for this feature and this peer, not the
+ * socket request in general.
+ */
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EPROTONOSUPPORT);
+ error = EPROTONOSUPPORT;
+ SCTP_TCB_UNLOCK(stcb);
+ break;
+ }
+ if (stcb->asoc.stream_reset_outstanding) {
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EALREADY);
+ error = EALREADY;
+ SCTP_TCB_UNLOCK(stcb);
+ break;
+ }
+ if (strrst->strrst_flags == SCTP_RESET_LOCAL_RECV) {
+ send_in = 1;
+ } else if (strrst->strrst_flags == SCTP_RESET_LOCAL_SEND) {
+ send_out = 1;
+ } else if (strrst->strrst_flags == SCTP_RESET_BOTH) {
+ send_in = 1;
+ send_out = 1;
+ } else if (strrst->strrst_flags == SCTP_RESET_TSN) {
+ send_tsn = 1;
+ } else if (strrst->strrst_flags == SCTP_RESET_ADD_STREAMS) {
+ if (send_tsn ||
+ send_in ||
+ send_out) {
+ /* We can't do that and add streams */
+ error = EINVAL;
+ goto skip_stuff;
+ }
+ if (stcb->asoc.stream_reset_outstanding) {
+ error = EBUSY;
+ goto skip_stuff;
+ }
+ addstream = 1;
+ /* We allocate here */
+ addstrmcnt = strrst->strrst_num_streams;
+ if ((int)(addstrmcnt + stcb->asoc.streamoutcnt) > 0xffff) {
+ /* You can't have more than 64k */
+ error = EINVAL;
+ goto skip_stuff;
+ }
+ if ((stcb->asoc.strm_realoutsize - stcb->asoc.streamoutcnt) < addstrmcnt) {
+ /* Need to allocate more */
+ struct sctp_stream_out *oldstream;
+ struct sctp_stream_queue_pending *sp;
+ int removed;
+
+ oldstream = stcb->asoc.strmout;
+ /* get some more */
+ SCTP_MALLOC(stcb->asoc.strmout, struct sctp_stream_out *,
+ ((stcb->asoc.streamoutcnt + addstrmcnt) * sizeof(struct sctp_stream_out)),
+ SCTP_M_STRMO);
+ if (stcb->asoc.strmout == NULL) {
+ stcb->asoc.strmout = oldstream;
+ error = ENOMEM;
+ goto skip_stuff;
+ }
+ /*
+ * Ok now we proceed with copying
+ * the old out stuff and
+ * initializing the new stuff.
+ */
+ SCTP_TCB_SEND_LOCK(stcb);
+ for (i = 0; i < stcb->asoc.streamoutcnt; i++) {
+ TAILQ_INIT(&stcb->asoc.strmout[i].outqueue);
+ stcb->asoc.strmout[i].next_sequence_sent = oldstream[i].next_sequence_sent;
+ stcb->asoc.strmout[i].last_msg_incomplete = oldstream[i].last_msg_incomplete;
+ stcb->asoc.strmout[i].stream_no = i;
+ if (oldstream[i].next_spoke.tqe_next) {
+ sctp_remove_from_wheel(stcb, &stcb->asoc, &oldstream[i], 1);
+ stcb->asoc.strmout[i].next_spoke.tqe_next = NULL;
+ stcb->asoc.strmout[i].next_spoke.tqe_prev = NULL;
+ removed = 1;
+ } else {
+ /* not on out wheel */
+ stcb->asoc.strmout[i].next_spoke.tqe_next = NULL;
+ stcb->asoc.strmout[i].next_spoke.tqe_prev = NULL;
+ removed = 0;
+ }
+ /*
+ * now anything on those
+ * queues?
+ */
+ while (TAILQ_EMPTY(&oldstream[i].outqueue) == 0) {
+ sp = TAILQ_FIRST(&oldstream[i].outqueue);
+ TAILQ_REMOVE(&oldstream[i].outqueue, sp, next);
+ TAILQ_INSERT_TAIL(&stcb->asoc.strmout[i].outqueue, sp, next);
+ }
+ /* Did we disrupt the wheel? */
+ if (removed) {
+ sctp_insert_on_wheel(stcb,
+ &stcb->asoc,
+ &stcb->asoc.strmout[i],
+ 1);
+ }
+ /*
+ * Now move assoc pointers
+ * too
+ */
+ if (stcb->asoc.last_out_stream == &oldstream[i]) {
+ stcb->asoc.last_out_stream = &stcb->asoc.strmout[i];
+ }
+ if (stcb->asoc.locked_on_sending == &oldstream[i]) {
+ stcb->asoc.locked_on_sending = &stcb->asoc.strmout[i];
+ }
+ }
+ /* now the new streams */
+ for (i = stcb->asoc.streamoutcnt; i < (stcb->asoc.streamoutcnt + addstrmcnt); i++) {
+ stcb->asoc.strmout[i].next_sequence_sent = 0x0;
+ TAILQ_INIT(&stcb->asoc.strmout[i].outqueue);
+ stcb->asoc.strmout[i].stream_no = i;
+ stcb->asoc.strmout[i].last_msg_incomplete = 0;
+ stcb->asoc.strmout[i].next_spoke.tqe_next = NULL;
+ stcb->asoc.strmout[i].next_spoke.tqe_prev = NULL;
+ }
+ stcb->asoc.strm_realoutsize = stcb->asoc.streamoutcnt + addstrmcnt;
+ SCTP_FREE(oldstream, SCTP_M_STRMO);
+ }
+ SCTP_TCB_SEND_UNLOCK(stcb);
+ goto skip_stuff;
+ } else {
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+ error = EINVAL;
+ SCTP_TCB_UNLOCK(stcb);
+ break;
+ }
+ for (i = 0; i < strrst->strrst_num_streams; i++) {
+ if ((send_in) &&
+
+ (strrst->strrst_list[i] > stcb->asoc.streamincnt)) {
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+ error = EINVAL;
+ goto get_out;
+ }
+ if ((send_out) &&
+ (strrst->strrst_list[i] > stcb->asoc.streamoutcnt)) {
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+ error = EINVAL;
+ goto get_out;
+ }
+ }
+ skip_stuff:
+ if (error) {
+ get_out:
+ SCTP_TCB_UNLOCK(stcb);
+ break;
+ }
+ error = sctp_send_str_reset_req(stcb, strrst->strrst_num_streams,
+ strrst->strrst_list,
+ send_out, (stcb->asoc.str_reset_seq_in - 3),
+ send_in, send_tsn, addstream, addstrmcnt);
+
+ sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_STRRST_REQ, SCTP_SO_LOCKED);
+ SCTP_TCB_UNLOCK(stcb);
+ }
+ break;
+
+ case SCTP_CONNECT_X:
+ if (optsize < (sizeof(int) + sizeof(struct sockaddr_in))) {
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+ error = EINVAL;
+ break;
+ }
+ error = sctp_do_connect_x(so, inp, optval, optsize, p, 0);
+ break;
+
+ case SCTP_CONNECT_X_DELAYED:
+ if (optsize < (sizeof(int) + sizeof(struct sockaddr_in))) {
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+ error = EINVAL;
+ break;
+ }
+ error = sctp_do_connect_x(so, inp, optval, optsize, p, 1);
+ break;
+
+ case SCTP_CONNECT_X_COMPLETE:
+ {
+ struct sockaddr *sa;
+ struct sctp_nets *net;
+
+ /* FIXME MT: check correct? */
+ SCTP_CHECK_AND_CAST(sa, optval, struct sockaddr, optsize);
+
+ /* find tcb */
+ if (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) {
+ SCTP_INP_RLOCK(inp);
+ stcb = LIST_FIRST(&inp->sctp_asoc_list);
+ if (stcb) {
+ SCTP_TCB_LOCK(stcb);
+ net = sctp_findnet(stcb, sa);
+ }
+ SCTP_INP_RUNLOCK(inp);
+ } else {
+ /*
+ * We increment here since
+ * sctp_findassociation_ep_addr() wil do a
+ * decrement if it finds the stcb as long as
+ * the locked tcb (last argument) is NOT a
+ * TCB.. aka NULL.
+ */
+ SCTP_INP_INCR_REF(inp);
+ stcb = sctp_findassociation_ep_addr(&inp, sa, &net, NULL, NULL);
+ if (stcb == NULL) {
+ SCTP_INP_DECR_REF(inp);
+ }
+ }
+
+ if (stcb == NULL) {
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOENT);
+ error = ENOENT;
+ break;
+ }
+ if (stcb->asoc.delayed_connection == 1) {
+ stcb->asoc.delayed_connection = 0;
+ (void)SCTP_GETTIME_TIMEVAL(&stcb->asoc.time_entered);
+ sctp_timer_stop(SCTP_TIMER_TYPE_INIT, inp, stcb,
+ stcb->asoc.primary_destination,
+ SCTP_FROM_SCTP_USRREQ + SCTP_LOC_9);
+ sctp_send_initiate(inp, stcb, SCTP_SO_LOCKED);
+ } else {
+ /*
+ * already expired or did not use delayed
+ * connectx
+ */
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EALREADY);
+ error = EALREADY;
+ }
+ SCTP_TCB_UNLOCK(stcb);
+ }
+ break;
+ case SCTP_MAX_BURST:
+ {
+ uint8_t *burst;
+
+ SCTP_CHECK_AND_CAST(burst, optval, uint8_t, optsize);
+
+ SCTP_INP_WLOCK(inp);
+ if (*burst) {
+ inp->sctp_ep.max_burst = *burst;
+ }
+ SCTP_INP_WUNLOCK(inp);
+ }
+ break;
+ case SCTP_MAXSEG:
+ {
+ struct sctp_assoc_value *av;
+ int ovh;
+
+ SCTP_CHECK_AND_CAST(av, optval, struct sctp_assoc_value, optsize);
+ SCTP_FIND_STCB(inp, stcb, av->assoc_id);
+
+ if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
+ ovh = SCTP_MED_OVERHEAD;
+ } else {
+ ovh = SCTP_MED_V4_OVERHEAD;
+ }
+ if (stcb) {
+ if (av->assoc_value) {
+ stcb->asoc.sctp_frag_point = (av->assoc_value + ovh);
+ } else {
+ stcb->asoc.sctp_frag_point = SCTP_DEFAULT_MAXSEGMENT;
+ }
+ SCTP_TCB_UNLOCK(stcb);
+ } else {
+ SCTP_INP_WLOCK(inp);
+ /*
+ * FIXME MT: I think this is not in tune
+ * with the API ID
+ */
+ if (av->assoc_value) {
+ inp->sctp_frag_point = (av->assoc_value + ovh);
+ } else {
+ inp->sctp_frag_point = SCTP_DEFAULT_MAXSEGMENT;
+ }
+ SCTP_INP_WUNLOCK(inp);
+ }
+ }
+ break;
+ case SCTP_EVENTS:
+ {
+ struct sctp_event_subscribe *events;
+
+ SCTP_CHECK_AND_CAST(events, optval, struct sctp_event_subscribe, optsize);
+
+ SCTP_INP_WLOCK(inp);
+ if (events->sctp_data_io_event) {
+ sctp_feature_on(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT);
+ } else {
+ sctp_feature_off(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT);
+ }
+
+ if (events->sctp_association_event) {
+ sctp_feature_on(inp, SCTP_PCB_FLAGS_RECVASSOCEVNT);
+ } else {
+ sctp_feature_off(inp, SCTP_PCB_FLAGS_RECVASSOCEVNT);
+ }
+
+ if (events->sctp_address_event) {
+ sctp_feature_on(inp, SCTP_PCB_FLAGS_RECVPADDREVNT);
+ } else {
+ sctp_feature_off(inp, SCTP_PCB_FLAGS_RECVPADDREVNT);
+ }
+
+ if (events->sctp_send_failure_event) {
+ sctp_feature_on(inp, SCTP_PCB_FLAGS_RECVSENDFAILEVNT);
+ } else {
+ sctp_feature_off(inp, SCTP_PCB_FLAGS_RECVSENDFAILEVNT);
+ }
+
+ if (events->sctp_peer_error_event) {
+ sctp_feature_on(inp, SCTP_PCB_FLAGS_RECVPEERERR);
+ } else {
+ sctp_feature_off(inp, SCTP_PCB_FLAGS_RECVPEERERR);
+ }
+
+ if (events->sctp_shutdown_event) {
+ sctp_feature_on(inp, SCTP_PCB_FLAGS_RECVSHUTDOWNEVNT);
+ } else {
+ sctp_feature_off(inp, SCTP_PCB_FLAGS_RECVSHUTDOWNEVNT);
+ }
+
+ if (events->sctp_partial_delivery_event) {
+ sctp_feature_on(inp, SCTP_PCB_FLAGS_PDAPIEVNT);
+ } else {
+ sctp_feature_off(inp, SCTP_PCB_FLAGS_PDAPIEVNT);
+ }
+
+ if (events->sctp_adaptation_layer_event) {
+ sctp_feature_on(inp, SCTP_PCB_FLAGS_ADAPTATIONEVNT);
+ } else {
+ sctp_feature_off(inp, SCTP_PCB_FLAGS_ADAPTATIONEVNT);
+ }
+
+ if (events->sctp_authentication_event) {
+ sctp_feature_on(inp, SCTP_PCB_FLAGS_AUTHEVNT);
+ } else {
+ sctp_feature_off(inp, SCTP_PCB_FLAGS_AUTHEVNT);
+ }
+
+ if (events->sctp_sender_dry_event) {
+ sctp_feature_on(inp, SCTP_PCB_FLAGS_DRYEVNT);
+ if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
+ (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
+ stcb = LIST_FIRST(&inp->sctp_asoc_list);
+ if (stcb) {
+ SCTP_TCB_LOCK(stcb);
+ }
+ if (stcb &&
+ TAILQ_EMPTY(&stcb->asoc.send_queue) &&
+ TAILQ_EMPTY(&stcb->asoc.sent_queue) &&
+ (stcb->asoc.stream_queue_cnt == 0)) {
+ sctp_ulp_notify(SCTP_NOTIFY_SENDER_DRY, stcb, 0, NULL, SCTP_SO_LOCKED);
+ }
+ if (stcb) {
+ SCTP_TCB_UNLOCK(stcb);
+ }
+ }
+ } else {
+ sctp_feature_off(inp, SCTP_PCB_FLAGS_DRYEVNT);
+ }
+
+ if (events->sctp_stream_reset_event) {
+ sctp_feature_on(inp, SCTP_PCB_FLAGS_STREAM_RESETEVNT);
+ } else {
+ sctp_feature_off(inp, SCTP_PCB_FLAGS_STREAM_RESETEVNT);
+ }
+ SCTP_INP_WUNLOCK(inp);
+ }
+ break;
+
+ case SCTP_ADAPTATION_LAYER:
+ {
+ struct sctp_setadaptation *adap_bits;
+
+ SCTP_CHECK_AND_CAST(adap_bits, optval, struct sctp_setadaptation, optsize);
+ SCTP_INP_WLOCK(inp);
+ inp->sctp_ep.adaptation_layer_indicator = adap_bits->ssb_adaptation_ind;
+ SCTP_INP_WUNLOCK(inp);
+ }
+ break;
+#ifdef SCTP_DEBUG
+ case SCTP_SET_INITIAL_DBG_SEQ:
+ {
+ uint32_t *vvv;
+
+ SCTP_CHECK_AND_CAST(vvv, optval, uint32_t, optsize);
+ SCTP_INP_WLOCK(inp);
+ inp->sctp_ep.initial_sequence_debug = *vvv;
+ SCTP_INP_WUNLOCK(inp);
+ }
+ break;
+#endif
+ case SCTP_DEFAULT_SEND_PARAM:
+ {
+ struct sctp_sndrcvinfo *s_info;
+
+ SCTP_CHECK_AND_CAST(s_info, optval, struct sctp_sndrcvinfo, optsize);
+ SCTP_FIND_STCB(inp, stcb, s_info->sinfo_assoc_id);
+
+ if (stcb) {
+ if (s_info->sinfo_stream <= stcb->asoc.streamoutcnt) {
+ memcpy(&stcb->asoc.def_send, s_info, min(optsize, sizeof(stcb->asoc.def_send)));
+ } else {
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+ error = EINVAL;
+ }
+ SCTP_TCB_UNLOCK(stcb);
+ } else {
+ SCTP_INP_WLOCK(inp);
+ memcpy(&inp->def_send, s_info, min(optsize, sizeof(inp->def_send)));
+ SCTP_INP_WUNLOCK(inp);
+ }
+ }
+ break;
+ case SCTP_PEER_ADDR_PARAMS:
+ /* Applys to the specific association */
+ {
+ struct sctp_paddrparams *paddrp;
+ struct sctp_nets *net;
+
+ SCTP_CHECK_AND_CAST(paddrp, optval, struct sctp_paddrparams, optsize);
+ SCTP_FIND_STCB(inp, stcb, paddrp->spp_assoc_id);
+ net = NULL;
+ if (stcb) {
+ net = sctp_findnet(stcb, (struct sockaddr *)&paddrp->spp_address);
+ } else {
+ /*
+ * We increment here since
+ * sctp_findassociation_ep_addr() wil do a
+ * decrement if it finds the stcb as long as
+ * the locked tcb (last argument) is NOT a
+ * TCB.. aka NULL.
+ */
+ SCTP_INP_INCR_REF(inp);
+ stcb = sctp_findassociation_ep_addr(&inp,
+ (struct sockaddr *)&paddrp->spp_address,
+ &net, NULL, NULL);
+ if (stcb == NULL) {
+ SCTP_INP_DECR_REF(inp);
+ }
+ }
+ if (stcb && (net == NULL)) {
+ struct sockaddr *sa;
+
+ sa = (struct sockaddr *)&paddrp->spp_address;
+ if (sa->sa_family == AF_INET) {
+ struct sockaddr_in *sin;
+
+ sin = (struct sockaddr_in *)sa;
+ if (sin->sin_addr.s_addr) {
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+ SCTP_TCB_UNLOCK(stcb);
+ error = EINVAL;
+ break;
+ }
+ } else if (sa->sa_family == AF_INET6) {
+ struct sockaddr_in6 *sin6;
+
+ sin6 = (struct sockaddr_in6 *)sa;
+ if (!IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) {
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+ SCTP_TCB_UNLOCK(stcb);
+ error = EINVAL;
+ break;
+ }
+ } else {
+ error = EAFNOSUPPORT;
+ SCTP_TCB_UNLOCK(stcb);
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, error);
+ break;
+ }
+ }
+ /* sanity checks */
+ if ((paddrp->spp_flags & SPP_HB_ENABLE) && (paddrp->spp_flags & SPP_HB_DISABLE)) {
+ if (stcb)
+ SCTP_TCB_UNLOCK(stcb);
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+ return (EINVAL);
+ }
+ if ((paddrp->spp_flags & SPP_PMTUD_ENABLE) && (paddrp->spp_flags & SPP_PMTUD_DISABLE)) {
+ if (stcb)
+ SCTP_TCB_UNLOCK(stcb);
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+ return (EINVAL);
+ }
+ if (stcb) {
+ /************************TCB SPECIFIC SET ******************/
+ /*
+ * do we change the timer for HB, we run
+ * only one?
+ */
+ int ovh = 0;
+
+ if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
+ ovh = SCTP_MED_OVERHEAD;
+ } else {
+ ovh = SCTP_MED_V4_OVERHEAD;
+ }
+
+ if (paddrp->spp_hbinterval)
+ stcb->asoc.heart_beat_delay = paddrp->spp_hbinterval;
+ else if (paddrp->spp_flags & SPP_HB_TIME_IS_ZERO)
+ stcb->asoc.heart_beat_delay = 0;
+
+ /* network sets ? */
+ if (net) {
+ /************************NET SPECIFIC SET ******************/
+ if (paddrp->spp_flags & SPP_HB_DEMAND) {
+ /* on demand HB */
+ if (sctp_send_hb(stcb, 1, net) < 0) {
+ /* asoc destroyed */
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+ error = EINVAL;
+ break;
+ }
+ }
+ if (paddrp->spp_flags & SPP_HB_DISABLE) {
+ net->dest_state |= SCTP_ADDR_NOHB;
+ }
+ if (paddrp->spp_flags & SPP_HB_ENABLE) {
+ net->dest_state &= ~SCTP_ADDR_NOHB;
+ }
+ if ((paddrp->spp_flags & SPP_PMTUD_DISABLE) && (paddrp->spp_pathmtu >= SCTP_SMALLEST_PMTU)) {
+ if (SCTP_OS_TIMER_PENDING(&net->pmtu_timer.timer)) {
+ sctp_timer_stop(SCTP_TIMER_TYPE_PATHMTURAISE, inp, stcb, net,
+ SCTP_FROM_SCTP_USRREQ + SCTP_LOC_10);
+ }
+ if (paddrp->spp_pathmtu > SCTP_DEFAULT_MINSEGMENT) {
+ net->mtu = paddrp->spp_pathmtu + ovh;
+ if (net->mtu < stcb->asoc.smallest_mtu) {
+ sctp_pathmtu_adjustment(inp, stcb, net, net->mtu);
+ }
+ }
+ }
+ if (paddrp->spp_flags & SPP_PMTUD_ENABLE) {
+ if (SCTP_OS_TIMER_PENDING(&net->pmtu_timer.timer)) {
+ sctp_timer_start(SCTP_TIMER_TYPE_PATHMTURAISE, inp, stcb, net);
+ }
+ }
+ if (paddrp->spp_pathmaxrxt)
+ net->failure_threshold = paddrp->spp_pathmaxrxt;
+#ifdef INET
+ if (paddrp->spp_flags & SPP_IPV4_TOS) {
+ if (net->ro._l_addr.sin.sin_family == AF_INET) {
+ net->tos_flowlabel = paddrp->spp_ipv4_tos & 0x000000fc;
+ }
+ }
+#endif
+#ifdef INET6
+ if (paddrp->spp_flags & SPP_IPV6_FLOWLABEL) {
+ if (net->ro._l_addr.sin6.sin6_family == AF_INET6) {
+ net->tos_flowlabel = paddrp->spp_ipv6_flowlabel;
+ }
+ }
+#endif
+ } else {
+ /************************ASSOC ONLY -- NO NET SPECIFIC SET ******************/
+ if (paddrp->spp_pathmaxrxt)
+ stcb->asoc.def_net_failure = paddrp->spp_pathmaxrxt;
+
+ if (paddrp->spp_flags & SPP_HB_ENABLE) {
+ /* Turn back on the timer */
+ stcb->asoc.hb_is_disabled = 0;
+ sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, inp, stcb, net);
+ }
+ if ((paddrp->spp_flags & SPP_PMTUD_DISABLE) && (paddrp->spp_pathmtu >= SCTP_SMALLEST_PMTU)) {
+ TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) {
+ if (SCTP_OS_TIMER_PENDING(&net->pmtu_timer.timer)) {
+ sctp_timer_stop(SCTP_TIMER_TYPE_PATHMTURAISE, inp, stcb, net,
+ SCTP_FROM_SCTP_USRREQ + SCTP_LOC_10);
+ }
+ if (paddrp->spp_pathmtu > SCTP_DEFAULT_MINSEGMENT) {
+ net->mtu = paddrp->spp_pathmtu + ovh;
+ if (net->mtu < stcb->asoc.smallest_mtu) {
+ sctp_pathmtu_adjustment(inp, stcb, net, net->mtu);
+ }
+ }
+ }
+ }
+ if (paddrp->spp_flags & SPP_PMTUD_ENABLE) {
+ TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) {
+ if (SCTP_OS_TIMER_PENDING(&net->pmtu_timer.timer)) {
+ sctp_timer_start(SCTP_TIMER_TYPE_PATHMTURAISE, inp, stcb, net);
+ }
+ }
+ }
+ if (paddrp->spp_flags & SPP_HB_DISABLE) {
+ int cnt_of_unconf = 0;
+ struct sctp_nets *lnet;
+
+ stcb->asoc.hb_is_disabled = 1;
+ TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) {
+ if (lnet->dest_state & SCTP_ADDR_UNCONFIRMED) {
+ cnt_of_unconf++;
+ }
+ }
+ /*
+ * stop the timer ONLY if we
+ * have no unconfirmed
+ * addresses
+ */
+ if (cnt_of_unconf == 0) {
+ TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) {
+ sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT, inp, stcb, net,
+ SCTP_FROM_SCTP_USRREQ + SCTP_LOC_11);
+ }
+ }
+ }
+ if (paddrp->spp_flags & SPP_HB_ENABLE) {
+ /* start up the timer. */
+ TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) {
+ sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, inp, stcb, net);
+ }
+ }
+#ifdef INET
+ if (paddrp->spp_flags & SPP_IPV4_TOS)
+ stcb->asoc.default_tos = paddrp->spp_ipv4_tos & 0x000000fc;
+#endif
+#ifdef INET6
+ if (paddrp->spp_flags & SPP_IPV6_FLOWLABEL)
+ stcb->asoc.default_flowlabel = paddrp->spp_ipv6_flowlabel;
+#endif
+
+ }
+ SCTP_TCB_UNLOCK(stcb);
+ } else {
+ /************************NO TCB, SET TO default stuff ******************/
+ SCTP_INP_WLOCK(inp);
+ /*
+ * For the TOS/FLOWLABEL stuff you set it
+ * with the options on the socket
+ */
+ if (paddrp->spp_pathmaxrxt) {
+ inp->sctp_ep.def_net_failure = paddrp->spp_pathmaxrxt;
+ }
+ if (paddrp->spp_flags & SPP_HB_TIME_IS_ZERO)
+ inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_HEARTBEAT] = 0;
+ else if (paddrp->spp_hbinterval) {
+ if (paddrp->spp_hbinterval > SCTP_MAX_HB_INTERVAL)
+ paddrp->spp_hbinterval = SCTP_MAX_HB_INTERVAL;
+ inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_HEARTBEAT] = MSEC_TO_TICKS(paddrp->spp_hbinterval);
+ }
+ if (paddrp->spp_flags & SPP_HB_ENABLE) {
+ sctp_feature_off(inp, SCTP_PCB_FLAGS_DONOT_HEARTBEAT);
+
+ } else if (paddrp->spp_flags & SPP_HB_DISABLE) {
+ sctp_feature_on(inp, SCTP_PCB_FLAGS_DONOT_HEARTBEAT);
+ }
+ SCTP_INP_WUNLOCK(inp);
+ }
+ }
+ break;
+ case SCTP_RTOINFO:
+ {
+ struct sctp_rtoinfo *srto;
+ uint32_t new_init, new_min, new_max;
+
+ SCTP_CHECK_AND_CAST(srto, optval, struct sctp_rtoinfo, optsize);
+ SCTP_FIND_STCB(inp, stcb, srto->srto_assoc_id);
+
+ if (stcb) {
+ if (srto->srto_initial)
+ new_init = srto->srto_initial;
+ else
+ new_init = stcb->asoc.initial_rto;
+ if (srto->srto_max)
+ new_max = srto->srto_max;
+ else
+ new_max = stcb->asoc.maxrto;
+ if (srto->srto_min)
+ new_min = srto->srto_min;
+ else
+ new_min = stcb->asoc.minrto;
+ if ((new_min <= new_init) && (new_init <= new_max)) {
+ stcb->asoc.initial_rto = new_init;
+ stcb->asoc.maxrto = new_max;
+ stcb->asoc.minrto = new_min;
+ } else {
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+ error = EINVAL;
+ }
+ SCTP_TCB_UNLOCK(stcb);
+ } else {
+ SCTP_INP_WLOCK(inp);
+ if (srto->srto_initial)
+ new_init = srto->srto_initial;
+ else
+ new_init = inp->sctp_ep.initial_rto;
+ if (srto->srto_max)
+ new_max = srto->srto_max;
+ else
+ new_max = inp->sctp_ep.sctp_maxrto;
+ if (srto->srto_min)
+ new_min = srto->srto_min;
+ else
+ new_min = inp->sctp_ep.sctp_minrto;
+ if ((new_min <= new_init) && (new_init <= new_max)) {
+ inp->sctp_ep.initial_rto = new_init;
+ inp->sctp_ep.sctp_maxrto = new_max;
+ inp->sctp_ep.sctp_minrto = new_min;
+ } else {
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+ error = EINVAL;
+ }
+ SCTP_INP_WUNLOCK(inp);
+ }
+ }
+ break;
+ case SCTP_ASSOCINFO:
+ {
+ struct sctp_assocparams *sasoc;
+
+ SCTP_CHECK_AND_CAST(sasoc, optval, struct sctp_assocparams, optsize);
+ SCTP_FIND_STCB(inp, stcb, sasoc->sasoc_assoc_id);
+ if (sasoc->sasoc_cookie_life) {
+ /* boundary check the cookie life */
+ if (sasoc->sasoc_cookie_life < 1000)
+ sasoc->sasoc_cookie_life = 1000;
+ if (sasoc->sasoc_cookie_life > SCTP_MAX_COOKIE_LIFE) {
+ sasoc->sasoc_cookie_life = SCTP_MAX_COOKIE_LIFE;
+ }
+ }
+ if (stcb) {
+ if (sasoc->sasoc_asocmaxrxt)
+ stcb->asoc.max_send_times = sasoc->sasoc_asocmaxrxt;
+ sasoc->sasoc_number_peer_destinations = stcb->asoc.numnets;
+ sasoc->sasoc_peer_rwnd = 0;
+ sasoc->sasoc_local_rwnd = 0;
+ if (sasoc->sasoc_cookie_life) {
+ stcb->asoc.cookie_life = MSEC_TO_TICKS(sasoc->sasoc_cookie_life);
+ }
+ SCTP_TCB_UNLOCK(stcb);
+ } else {
+ SCTP_INP_WLOCK(inp);
+ if (sasoc->sasoc_asocmaxrxt)
+ inp->sctp_ep.max_send_times = sasoc->sasoc_asocmaxrxt;
+ sasoc->sasoc_number_peer_destinations = 0;
+ sasoc->sasoc_peer_rwnd = 0;
+ sasoc->sasoc_local_rwnd = 0;
+ if (sasoc->sasoc_cookie_life) {
+ inp->sctp_ep.def_cookie_life = MSEC_TO_TICKS(sasoc->sasoc_cookie_life);
+ }
+ SCTP_INP_WUNLOCK(inp);
+ }
+ }
+ break;
+ case SCTP_INITMSG:
+ {
+ struct sctp_initmsg *sinit;
+
+ SCTP_CHECK_AND_CAST(sinit, optval, struct sctp_initmsg, optsize);
+ SCTP_INP_WLOCK(inp);
+ if (sinit->sinit_num_ostreams)
+ inp->sctp_ep.pre_open_stream_count = sinit->sinit_num_ostreams;
+
+ if (sinit->sinit_max_instreams)
+ inp->sctp_ep.max_open_streams_intome = sinit->sinit_max_instreams;
+
+ if (sinit->sinit_max_attempts)
+ inp->sctp_ep.max_init_times = sinit->sinit_max_attempts;
+
+ if (sinit->sinit_max_init_timeo)
+ inp->sctp_ep.initial_init_rto_max = sinit->sinit_max_init_timeo;
+ SCTP_INP_WUNLOCK(inp);
+ }
+ break;
+ case SCTP_PRIMARY_ADDR:
+ {
+ struct sctp_setprim *spa;
+ struct sctp_nets *net, *lnet;
+
+ SCTP_CHECK_AND_CAST(spa, optval, struct sctp_setprim, optsize);
+ SCTP_FIND_STCB(inp, stcb, spa->ssp_assoc_id);
+
+ net = NULL;
+ if (stcb) {
+ net = sctp_findnet(stcb, (struct sockaddr *)&spa->ssp_addr);
+ } else {
+ /*
+ * We increment here since
+ * sctp_findassociation_ep_addr() wil do a
+ * decrement if it finds the stcb as long as
+ * the locked tcb (last argument) is NOT a
+ * TCB.. aka NULL.
+ */
+ SCTP_INP_INCR_REF(inp);
+ stcb = sctp_findassociation_ep_addr(&inp,
+ (struct sockaddr *)&spa->ssp_addr,
+ &net, NULL, NULL);
+ if (stcb == NULL) {
+ SCTP_INP_DECR_REF(inp);
+ }
+ }
+
+ if ((stcb) && (net)) {
+ if ((net != stcb->asoc.primary_destination) &&
+ (!(net->dest_state & SCTP_ADDR_UNCONFIRMED))) {
+ /* Ok we need to set it */
+ lnet = stcb->asoc.primary_destination;
+ if (sctp_set_primary_addr(stcb, (struct sockaddr *)NULL, net) == 0) {
+ if (net->dest_state & SCTP_ADDR_SWITCH_PRIMARY) {
+ net->dest_state |= SCTP_ADDR_DOUBLE_SWITCH;
+ }
+ net->dest_state |= SCTP_ADDR_SWITCH_PRIMARY;
+ }
+ }
+ } else {
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+ error = EINVAL;
+ }
+ if (stcb) {
+ SCTP_TCB_UNLOCK(stcb);
+ }
+ }
+ break;
+ case SCTP_SET_DYNAMIC_PRIMARY:
+ {
+ union sctp_sockstore *ss;
+
+ error = priv_check(curthread,
+ PRIV_NETINET_RESERVEDPORT);
+ if (error)
+ break;
+
+ SCTP_CHECK_AND_CAST(ss, optval, union sctp_sockstore, optsize);
+ /* SUPER USER CHECK? */
+ error = sctp_dynamic_set_primary(&ss->sa, vrf_id);
+ }
+ break;
+ case SCTP_SET_PEER_PRIMARY_ADDR:
+ {
+ struct sctp_setpeerprim *sspp;
+
+ SCTP_CHECK_AND_CAST(sspp, optval, struct sctp_setpeerprim, optsize);
+ SCTP_FIND_STCB(inp, stcb, sspp->sspp_assoc_id);
+ if (stcb != NULL) {
+ struct sctp_ifa *ifa;
+
+ ifa = sctp_find_ifa_by_addr((struct sockaddr *)&sspp->sspp_addr,
+ stcb->asoc.vrf_id, SCTP_ADDR_NOT_LOCKED);
+ if (ifa == NULL) {
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+ error = EINVAL;
+ goto out_of_it;
+ }
+ if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) == 0) {
+ /*
+ * Must validate the ifa found is in
+ * our ep
+ */
+ struct sctp_laddr *laddr;
+ int found = 0;
+
+ LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) {
+ if (laddr->ifa == NULL) {
+ SCTPDBG(SCTP_DEBUG_OUTPUT1, "%s: NULL ifa\n",
+ __FUNCTION__);
+ continue;
+ }
+ if (laddr->ifa == ifa) {
+ found = 1;
+ break;
+ }
+ }
+ if (!found) {
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+ error = EINVAL;
+ goto out_of_it;
+ }
+ }
+ if (sctp_set_primary_ip_address_sa(stcb,
+ (struct sockaddr *)&sspp->sspp_addr) != 0) {
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+ error = EINVAL;
+ }
+ out_of_it:
+ SCTP_TCB_UNLOCK(stcb);
+ } else {
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+ error = EINVAL;
+ }
+
+ }
+ break;
+ case SCTP_BINDX_ADD_ADDR:
+ {
+ struct sctp_getaddresses *addrs;
+ size_t sz;
+ struct thread *td;
+
+ td = (struct thread *)p;
+ SCTP_CHECK_AND_CAST(addrs, optval, struct sctp_getaddresses,
+ optsize);
+ if (addrs->addr->sa_family == AF_INET) {
+ sz = sizeof(struct sctp_getaddresses) - sizeof(struct sockaddr) + sizeof(struct sockaddr_in);
+ if (optsize < sz) {
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+ error = EINVAL;
+ break;
+ }
+ if (td != NULL && (error = prison_local_ip4(td->td_ucred, &(((struct sockaddr_in *)(addrs->addr))->sin_addr)))) {
+ SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_USRREQ, error);
+ break;
+ }
+#ifdef INET6
+ } else if (addrs->addr->sa_family == AF_INET6) {
+ sz = sizeof(struct sctp_getaddresses) - sizeof(struct sockaddr) + sizeof(struct sockaddr_in6);
+ if (optsize < sz) {
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+ error = EINVAL;
+ break;
+ }
+ if (td != NULL && (error = prison_local_ip6(td->td_ucred, &(((struct sockaddr_in6 *)(addrs->addr))->sin6_addr),
+ (SCTP_IPV6_V6ONLY(inp) != 0))) != 0) {
+ SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_USRREQ, error);
+ break;
+ }
+#endif
+ } else {
+ error = EAFNOSUPPORT;
+ break;
+ }
+ sctp_bindx_add_address(so, inp, addrs->addr,
+ addrs->sget_assoc_id, vrf_id,
+ &error, p);
+ }
+ break;
+ case SCTP_BINDX_REM_ADDR:
+ {
+ struct sctp_getaddresses *addrs;
+ size_t sz;
+ struct thread *td;
+
+ td = (struct thread *)p;
+
+ SCTP_CHECK_AND_CAST(addrs, optval, struct sctp_getaddresses, optsize);
+ if (addrs->addr->sa_family == AF_INET) {
+ sz = sizeof(struct sctp_getaddresses) - sizeof(struct sockaddr) + sizeof(struct sockaddr_in);
+ if (optsize < sz) {
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+ error = EINVAL;
+ break;
+ }
+ if (td != NULL && (error = prison_local_ip4(td->td_ucred, &(((struct sockaddr_in *)(addrs->addr))->sin_addr)))) {
+ SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_USRREQ, error);
+ break;
+ }
+#ifdef INET6
+ } else if (addrs->addr->sa_family == AF_INET6) {
+ sz = sizeof(struct sctp_getaddresses) - sizeof(struct sockaddr) + sizeof(struct sockaddr_in6);
+ if (optsize < sz) {
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+ error = EINVAL;
+ break;
+ }
+ if (td != NULL && (error = prison_local_ip6(td->td_ucred, &(((struct sockaddr_in6 *)(addrs->addr))->sin6_addr),
+ (SCTP_IPV6_V6ONLY(inp) != 0))) != 0) {
+ SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_USRREQ, error);
+ break;
+ }
+#endif
+ } else {
+ error = EAFNOSUPPORT;
+ break;
+ }
+ sctp_bindx_delete_address(so, inp, addrs->addr,
+ addrs->sget_assoc_id, vrf_id,
+ &error);
+ }
+ break;
+ default:
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOPROTOOPT);
+ error = ENOPROTOOPT;
+ break;
+ } /* end switch (opt) */
+ return (error);
+}
+
+int
+sctp_ctloutput(struct socket *so, struct sockopt *sopt)
+{
+ void *optval = NULL;
+ size_t optsize = 0;
+ struct sctp_inpcb *inp;
+ void *p;
+ int error = 0;
+
+ inp = (struct sctp_inpcb *)so->so_pcb;
+ if (inp == 0) {
+ /* I made the same as TCP since we are not setup? */
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+ return (ECONNRESET);
+ }
+ if (sopt->sopt_level != IPPROTO_SCTP) {
+ /* wrong proto level... send back up to IP */
+#ifdef INET6
+ if (INP_CHECK_SOCKAF(so, AF_INET6))
+ error = ip6_ctloutput(so, sopt);
+ else
+#endif /* INET6 */
+ error = ip_ctloutput(so, sopt);
+ return (error);
+ }
+ optsize = sopt->sopt_valsize;
+ if (optsize) {
+ SCTP_MALLOC(optval, void *, optsize, SCTP_M_SOCKOPT);
+ if (optval == NULL) {
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOBUFS);
+ return (ENOBUFS);
+ }
+ error = sooptcopyin(sopt, optval, optsize, optsize);
+ if (error) {
+ SCTP_FREE(optval, SCTP_M_SOCKOPT);
+ goto out;
+ }
+ }
+ p = (void *)sopt->sopt_td;
+ if (sopt->sopt_dir == SOPT_SET) {
+ error = sctp_setopt(so, sopt->sopt_name, optval, optsize, p);
+ } else if (sopt->sopt_dir == SOPT_GET) {
+ error = sctp_getopt(so, sopt->sopt_name, optval, &optsize, p);
+ } else {
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+ error = EINVAL;
+ }
+ if ((error == 0) && (optval != NULL)) {
+ error = sooptcopyout(sopt, optval, optsize);
+ SCTP_FREE(optval, SCTP_M_SOCKOPT);
+ } else if (optval != NULL) {
+ SCTP_FREE(optval, SCTP_M_SOCKOPT);
+ }
+out:
+ return (error);
+}
+
+
+static int
+sctp_connect(struct socket *so, struct sockaddr *addr, struct thread *p)
+{
+ int error = 0;
+ int create_lock_on = 0;
+ uint32_t vrf_id;
+ struct sctp_inpcb *inp;
+ struct sctp_tcb *stcb = NULL;
+
+ inp = (struct sctp_inpcb *)so->so_pcb;
+ if (inp == 0) {
+ /* I made the same as TCP since we are not setup? */
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+ return (ECONNRESET);
+ }
+ if (addr == NULL) {
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+ return EINVAL;
+ }
+#ifdef INET6
+ if (addr->sa_family == AF_INET6) {
+ struct sockaddr_in6 *sin6p;
+
+ if (addr->sa_len != sizeof(struct sockaddr_in6)) {
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+ return (EINVAL);
+ }
+ sin6p = (struct sockaddr_in6 *)addr;
+ if (p != NULL && (error = prison_remote_ip6(p->td_ucred, &sin6p->sin6_addr)) != 0) {
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, error);
+ return (error);
+ }
+ } else
+#endif
+ if (addr->sa_family == AF_INET) {
+ struct sockaddr_in *sinp;
+
+ if (addr->sa_len != sizeof(struct sockaddr_in)) {
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+ return (EINVAL);
+ }
+ sinp = (struct sockaddr_in *)addr;
+ if (p != NULL && (error = prison_remote_ip4(p->td_ucred, &sinp->sin_addr)) != 0) {
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, error);
+ return (error);
+ }
+ } else {
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EAFNOSUPPORT);
+ return (EAFNOSUPPORT);
+ }
+ SCTP_INP_INCR_REF(inp);
+ SCTP_ASOC_CREATE_LOCK(inp);
+ create_lock_on = 1;
+
+
+ if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
+ (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE)) {
+ /* Should I really unlock ? */
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EFAULT);
+ error = EFAULT;
+ goto out_now;
+ }
+#ifdef INET6
+ if (((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) &&
+ (addr->sa_family == AF_INET6)) {
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+ error = EINVAL;
+ goto out_now;
+ }
+#endif /* INET6 */
+ if ((inp->sctp_flags & SCTP_PCB_FLAGS_UNBOUND) ==
+ SCTP_PCB_FLAGS_UNBOUND) {
+ /* Bind a ephemeral port */
+ error = sctp_inpcb_bind(so, NULL, NULL, p);
+ if (error) {
+ goto out_now;
+ }
+ }
+ /* Now do we connect? */
+ if ((inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) &&
+ (sctp_is_feature_off(inp, SCTP_PCB_FLAGS_PORTREUSE))) {
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+ error = EINVAL;
+ goto out_now;
+ }
+ if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) &&
+ (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED)) {
+ /* We are already connected AND the TCP model */
+ SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_USRREQ, EADDRINUSE);
+ error = EADDRINUSE;
+ goto out_now;
+ }
+ if (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) {
+ SCTP_INP_RLOCK(inp);
+ stcb = LIST_FIRST(&inp->sctp_asoc_list);
+ SCTP_INP_RUNLOCK(inp);
+ } else {
+ /*
+ * We increment here since sctp_findassociation_ep_addr()
+ * will do a decrement if it finds the stcb as long as the
+ * locked tcb (last argument) is NOT a TCB.. aka NULL.
+ */
+ SCTP_INP_INCR_REF(inp);
+ stcb = sctp_findassociation_ep_addr(&inp, addr, NULL, NULL, NULL);
+ if (stcb == NULL) {
+ SCTP_INP_DECR_REF(inp);
+ } else {
+ SCTP_TCB_UNLOCK(stcb);
+ }
+ }
+ if (stcb != NULL) {
+ /* Already have or am bring up an association */
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EALREADY);
+ error = EALREADY;
+ goto out_now;
+ }
+ vrf_id = inp->def_vrf_id;
+ /* We are GOOD to go */
+ stcb = sctp_aloc_assoc(inp, addr, &error, 0, vrf_id, p);
+ if (stcb == NULL) {
+ /* Gak! no memory */
+ goto out_now;
+ }
+ if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) {
+ stcb->sctp_ep->sctp_flags |= SCTP_PCB_FLAGS_CONNECTED;
+ /* Set the connected flag so we can queue data */
+ SOCKBUF_LOCK(&so->so_rcv);
+ so->so_rcv.sb_state &= ~SBS_CANTRCVMORE;
+ SOCKBUF_UNLOCK(&so->so_rcv);
+ SOCKBUF_LOCK(&so->so_snd);
+ so->so_snd.sb_state &= ~SBS_CANTSENDMORE;
+ SOCKBUF_UNLOCK(&so->so_snd);
+ SOCK_LOCK(so);
+ so->so_state &= ~SS_ISDISCONNECTING;
+ SOCK_UNLOCK(so);
+ soisconnecting(so);
+ }
+ SCTP_SET_STATE(&stcb->asoc, SCTP_STATE_COOKIE_WAIT);
+ (void)SCTP_GETTIME_TIMEVAL(&stcb->asoc.time_entered);
+
+ /* initialize authentication parameters for the assoc */
+ sctp_initialize_auth_params(inp, stcb);
+
+ sctp_send_initiate(inp, stcb, SCTP_SO_LOCKED);
+ SCTP_TCB_UNLOCK(stcb);
+out_now:
+ if (create_lock_on) {
+ SCTP_ASOC_CREATE_UNLOCK(inp);
+ }
+ SCTP_INP_DECR_REF(inp);
+ return error;
+}
+
+int
+sctp_listen(struct socket *so, int backlog, struct thread *p)
+{
+ /*
+ * Note this module depends on the protocol processing being called
+ * AFTER any socket level flags and backlog are applied to the
+ * socket. The traditional way that the socket flags are applied is
+ * AFTER protocol processing. We have made a change to the
+ * sys/kern/uipc_socket.c module to reverse this but this MUST be in
+ * place if the socket API for SCTP is to work properly.
+ */
+
+ int error = 0;
+ struct sctp_inpcb *inp;
+
+ inp = (struct sctp_inpcb *)so->so_pcb;
+ if (inp == 0) {
+ /* I made the same as TCP since we are not setup? */
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+ return (ECONNRESET);
+ }
+ if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_PORTREUSE)) {
+ /* See if we have a listener */
+ struct sctp_inpcb *tinp;
+ union sctp_sockstore store, *sp;
+
+ sp = &store;
+ if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) == 0) {
+ /* not bound all */
+ struct sctp_laddr *laddr;
+
+ LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) {
+ memcpy(&store, &laddr->ifa->address, sizeof(store));
+ sp->sin.sin_port = inp->sctp_lport;
+ tinp = sctp_pcb_findep(&sp->sa, 0, 0, inp->def_vrf_id);
+ if (tinp && (tinp != inp) &&
+ ((tinp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) == 0) &&
+ ((tinp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) == 0) &&
+ (tinp->sctp_socket->so_qlimit)) {
+ /*
+ * we have a listener already and
+ * its not this inp.
+ */
+ SCTP_INP_DECR_REF(tinp);
+ return (EADDRINUSE);
+ } else if (tinp) {
+ SCTP_INP_DECR_REF(tinp);
+ }
+ }
+ } else {
+ /* Setup a local addr bound all */
+ memset(&store, 0, sizeof(store));
+ store.sin.sin_port = inp->sctp_lport;
+#ifdef INET6
+ if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
+ store.sa.sa_family = AF_INET6;
+ store.sa.sa_len = sizeof(struct sockaddr_in6);
+ }
+#endif
+ if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) {
+ store.sa.sa_family = AF_INET;
+ store.sa.sa_len = sizeof(struct sockaddr_in);
+ }
+ tinp = sctp_pcb_findep(&sp->sa, 0, 0, inp->def_vrf_id);
+ if (tinp && (tinp != inp) &&
+ ((tinp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) == 0) &&
+ ((tinp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) == 0) &&
+ (tinp->sctp_socket->so_qlimit)) {
+ /*
+ * we have a listener already and its not
+ * this inp.
+ */
+ SCTP_INP_DECR_REF(tinp);
+ return (EADDRINUSE);
+ } else if (tinp) {
+ SCTP_INP_DECR_REF(inp);
+ }
+ }
+ }
+ SCTP_INP_RLOCK(inp);
+#ifdef SCTP_LOCK_LOGGING
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOCK_LOGGING_ENABLE) {
+ sctp_log_lock(inp, (struct sctp_tcb *)NULL, SCTP_LOG_LOCK_SOCK);
+ }
+#endif
+ SOCK_LOCK(so);
+ error = solisten_proto_check(so);
+ if (error) {
+ SOCK_UNLOCK(so);
+ SCTP_INP_RUNLOCK(inp);
+ return (error);
+ }
+ if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_PORTREUSE)) &&
+ (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
+ /*
+ * The unlucky case - We are in the tcp pool with this guy.
+ * - Someone else is in the main inp slot. - We must move
+ * this guy (the listener) to the main slot - We must then
+ * move the guy that was listener to the TCP Pool.
+ */
+ if (sctp_swap_inpcb_for_listen(inp)) {
+ goto in_use;
+ }
+ }
+ if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) &&
+ (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED)) {
+ /* We are already connected AND the TCP model */
+in_use:
+ SCTP_INP_RUNLOCK(inp);
+ SOCK_UNLOCK(so);
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EADDRINUSE);
+ return (EADDRINUSE);
+ }
+ SCTP_INP_RUNLOCK(inp);
+ if (inp->sctp_flags & SCTP_PCB_FLAGS_UNBOUND) {
+ /* We must do a bind. */
+ SOCK_UNLOCK(so);
+ if ((error = sctp_inpcb_bind(so, NULL, NULL, p))) {
+ /* bind error, probably perm */
+ return (error);
+ }
+ SOCK_LOCK(so);
+ }
+ /* It appears for 7.0 and on, we must always call this. */
+ solisten_proto(so, backlog);
+ if (inp->sctp_flags & SCTP_PCB_FLAGS_UDPTYPE) {
+ /* remove the ACCEPTCONN flag for one-to-many sockets */
+ so->so_options &= ~SO_ACCEPTCONN;
+ }
+ if (backlog == 0) {
+ /* turning off listen */
+ so->so_options &= ~SO_ACCEPTCONN;
+ }
+ SOCK_UNLOCK(so);
+ return (error);
+}
+
+static int sctp_defered_wakeup_cnt = 0;
+
+int
+sctp_accept(struct socket *so, struct sockaddr **addr)
+{
+ struct sctp_tcb *stcb;
+ struct sctp_inpcb *inp;
+ union sctp_sockstore store;
+
+#ifdef INET6
+ int error;
+
+#endif
+ inp = (struct sctp_inpcb *)so->so_pcb;
+
+ if (inp == 0) {
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+ return (ECONNRESET);
+ }
+ SCTP_INP_RLOCK(inp);
+ if (inp->sctp_flags & SCTP_PCB_FLAGS_UDPTYPE) {
+ SCTP_INP_RUNLOCK(inp);
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EOPNOTSUPP);
+ return (EOPNOTSUPP);
+ }
+ if (so->so_state & SS_ISDISCONNECTED) {
+ SCTP_INP_RUNLOCK(inp);
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ECONNABORTED);
+ return (ECONNABORTED);
+ }
+ stcb = LIST_FIRST(&inp->sctp_asoc_list);
+ if (stcb == NULL) {
+ SCTP_INP_RUNLOCK(inp);
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+ return (ECONNRESET);
+ }
+ SCTP_TCB_LOCK(stcb);
+ SCTP_INP_RUNLOCK(inp);
+ store = stcb->asoc.primary_destination->ro._l_addr;
+ stcb->asoc.state &= ~SCTP_STATE_IN_ACCEPT_QUEUE;
+ SCTP_TCB_UNLOCK(stcb);
+ switch (store.sa.sa_family) {
+ case AF_INET:
+ {
+ struct sockaddr_in *sin;
+
+ SCTP_MALLOC_SONAME(sin, struct sockaddr_in *, sizeof *sin);
+ if (sin == NULL)
+ return (ENOMEM);
+ sin->sin_family = AF_INET;
+ sin->sin_len = sizeof(*sin);
+ sin->sin_port = ((struct sockaddr_in *)&store)->sin_port;
+ sin->sin_addr = ((struct sockaddr_in *)&store)->sin_addr;
+ *addr = (struct sockaddr *)sin;
+ break;
+ }
+#ifdef INET6
+ case AF_INET6:
+ {
+ struct sockaddr_in6 *sin6;
+
+ SCTP_MALLOC_SONAME(sin6, struct sockaddr_in6 *, sizeof *sin6);
+ if (sin6 == NULL)
+ return (ENOMEM);
+ sin6->sin6_family = AF_INET6;
+ sin6->sin6_len = sizeof(*sin6);
+ sin6->sin6_port = ((struct sockaddr_in6 *)&store)->sin6_port;
+
+ sin6->sin6_addr = ((struct sockaddr_in6 *)&store)->sin6_addr;
+ if ((error = sa6_recoverscope(sin6)) != 0) {
+ SCTP_FREE_SONAME(sin6);
+ return (error);
+ }
+ *addr = (struct sockaddr *)sin6;
+ break;
+ }
+#endif
+ default:
+ /* TSNH */
+ break;
+ }
+ /* Wake any delayed sleep action */
+ if (inp->sctp_flags & SCTP_PCB_FLAGS_DONT_WAKE) {
+ SCTP_INP_WLOCK(inp);
+ inp->sctp_flags &= ~SCTP_PCB_FLAGS_DONT_WAKE;
+ if (inp->sctp_flags & SCTP_PCB_FLAGS_WAKEOUTPUT) {
+ inp->sctp_flags &= ~SCTP_PCB_FLAGS_WAKEOUTPUT;
+ SCTP_INP_WUNLOCK(inp);
+ SOCKBUF_LOCK(&inp->sctp_socket->so_snd);
+ if (sowriteable(inp->sctp_socket)) {
+ sowwakeup_locked(inp->sctp_socket);
+ } else {
+ SOCKBUF_UNLOCK(&inp->sctp_socket->so_snd);
+ }
+ SCTP_INP_WLOCK(inp);
+ }
+ if (inp->sctp_flags & SCTP_PCB_FLAGS_WAKEINPUT) {
+ inp->sctp_flags &= ~SCTP_PCB_FLAGS_WAKEINPUT;
+ SCTP_INP_WUNLOCK(inp);
+ SOCKBUF_LOCK(&inp->sctp_socket->so_rcv);
+ if (soreadable(inp->sctp_socket)) {
+ sctp_defered_wakeup_cnt++;
+ sorwakeup_locked(inp->sctp_socket);
+ } else {
+ SOCKBUF_UNLOCK(&inp->sctp_socket->so_rcv);
+ }
+ SCTP_INP_WLOCK(inp);
+ }
+ SCTP_INP_WUNLOCK(inp);
+ }
+ if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
+ SCTP_TCB_LOCK(stcb);
+ sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_USRREQ + SCTP_LOC_7);
+ }
+ return (0);
+}
+
+int
+sctp_ingetaddr(struct socket *so, struct sockaddr **addr)
+{
+ struct sockaddr_in *sin;
+ uint32_t vrf_id;
+ struct sctp_inpcb *inp;
+ struct sctp_ifa *sctp_ifa;
+
+ /*
+ * Do the malloc first in case it blocks.
+ */
+ SCTP_MALLOC_SONAME(sin, struct sockaddr_in *, sizeof *sin);
+ if (sin == NULL)
+ return (ENOMEM);
+ sin->sin_family = AF_INET;
+ sin->sin_len = sizeof(*sin);
+ inp = (struct sctp_inpcb *)so->so_pcb;
+ if (!inp) {
+ SCTP_FREE_SONAME(sin);
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+ return ECONNRESET;
+ }
+ SCTP_INP_RLOCK(inp);
+ sin->sin_port = inp->sctp_lport;
+ if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
+ if (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) {
+ struct sctp_tcb *stcb;
+ struct sockaddr_in *sin_a;
+ struct sctp_nets *net;
+ int fnd;
+
+ stcb = LIST_FIRST(&inp->sctp_asoc_list);
+ if (stcb == NULL) {
+ goto notConn;
+ }
+ fnd = 0;
+ sin_a = NULL;
+ SCTP_TCB_LOCK(stcb);
+ TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) {
+ sin_a = (struct sockaddr_in *)&net->ro._l_addr;
+ if (sin_a == NULL)
+ /* this will make coverity happy */
+ continue;
+
+ if (sin_a->sin_family == AF_INET) {
+ fnd = 1;
+ break;
+ }
+ }
+ if ((!fnd) || (sin_a == NULL)) {
+ /* punt */
+ SCTP_TCB_UNLOCK(stcb);
+ goto notConn;
+ }
+ vrf_id = inp->def_vrf_id;
+ sctp_ifa = sctp_source_address_selection(inp,
+ stcb,
+ (sctp_route_t *) & net->ro,
+ net, 0, vrf_id);
+ if (sctp_ifa) {
+ sin->sin_addr = sctp_ifa->address.sin.sin_addr;
+ sctp_free_ifa(sctp_ifa);
+ }
+ SCTP_TCB_UNLOCK(stcb);
+ } else {
+ /* For the bound all case you get back 0 */
+ notConn:
+ sin->sin_addr.s_addr = 0;
+ }
+
+ } else {
+ /* Take the first IPv4 address in the list */
+ struct sctp_laddr *laddr;
+ int fnd = 0;
+
+ LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) {
+ if (laddr->ifa->address.sa.sa_family == AF_INET) {
+ struct sockaddr_in *sin_a;
+
+ sin_a = (struct sockaddr_in *)&laddr->ifa->address.sa;
+ sin->sin_addr = sin_a->sin_addr;
+ fnd = 1;
+ break;
+ }
+ }
+ if (!fnd) {
+ SCTP_FREE_SONAME(sin);
+ SCTP_INP_RUNLOCK(inp);
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOENT);
+ return ENOENT;
+ }
+ }
+ SCTP_INP_RUNLOCK(inp);
+ (*addr) = (struct sockaddr *)sin;
+ return (0);
+}
+
+int
+sctp_peeraddr(struct socket *so, struct sockaddr **addr)
+{
+ struct sockaddr_in *sin = (struct sockaddr_in *)*addr;
+ int fnd;
+ struct sockaddr_in *sin_a;
+ struct sctp_inpcb *inp;
+ struct sctp_tcb *stcb;
+ struct sctp_nets *net;
+
+ /* Do the malloc first in case it blocks. */
+ inp = (struct sctp_inpcb *)so->so_pcb;
+ if ((inp == NULL) ||
+ ((inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) == 0)) {
+ /* UDP type and listeners will drop out here */
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOTCONN);
+ return (ENOTCONN);
+ }
+ SCTP_MALLOC_SONAME(sin, struct sockaddr_in *, sizeof *sin);
+ if (sin == NULL)
+ return (ENOMEM);
+ sin->sin_family = AF_INET;
+ sin->sin_len = sizeof(*sin);
+
+ /* We must recapture incase we blocked */
+ inp = (struct sctp_inpcb *)so->so_pcb;
+ if (!inp) {
+ SCTP_FREE_SONAME(sin);
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+ return ECONNRESET;
+ }
+ SCTP_INP_RLOCK(inp);
+ stcb = LIST_FIRST(&inp->sctp_asoc_list);
+ if (stcb) {
+ SCTP_TCB_LOCK(stcb);
+ }
+ SCTP_INP_RUNLOCK(inp);
+ if (stcb == NULL) {
+ SCTP_FREE_SONAME(sin);
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+ return ECONNRESET;
+ }
+ fnd = 0;
+ TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) {
+ sin_a = (struct sockaddr_in *)&net->ro._l_addr;
+ if (sin_a->sin_family == AF_INET) {
+ fnd = 1;
+ sin->sin_port = stcb->rport;
+ sin->sin_addr = sin_a->sin_addr;
+ break;
+ }
+ }
+ SCTP_TCB_UNLOCK(stcb);
+ if (!fnd) {
+ /* No IPv4 address */
+ SCTP_FREE_SONAME(sin);
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOENT);
+ return ENOENT;
+ }
+ (*addr) = (struct sockaddr *)sin;
+ return (0);
+}
+
+struct pr_usrreqs sctp_usrreqs = {
+ .pru_abort = sctp_abort,
+ .pru_accept = sctp_accept,
+ .pru_attach = sctp_attach,
+ .pru_bind = sctp_bind,
+ .pru_connect = sctp_connect,
+ .pru_control = in_control,
+ .pru_close = sctp_close,
+ .pru_detach = sctp_close,
+ .pru_sopoll = sopoll_generic,
+ .pru_flush = sctp_flush,
+ .pru_disconnect = sctp_disconnect,
+ .pru_listen = sctp_listen,
+ .pru_peeraddr = sctp_peeraddr,
+ .pru_send = sctp_sendm,
+ .pru_shutdown = sctp_shutdown,
+ .pru_sockaddr = sctp_ingetaddr,
+ .pru_sosend = sctp_sosend,
+ .pru_soreceive = sctp_soreceive
+};
diff --git a/rtems/freebsd/netinet/sctp_var.h b/rtems/freebsd/netinet/sctp_var.h
new file mode 100644
index 00000000..2fed2e23
--- /dev/null
+++ b/rtems/freebsd/netinet/sctp_var.h
@@ -0,0 +1,336 @@
+/*-
+ * Copyright (c) 2001-2008, by Cisco Systems, Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * a) Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * b) Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the distribution.
+ *
+ * c) Neither the name of Cisco Systems, Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/* $KAME: sctp_var.h,v 1.24 2005/03/06 16:04:19 itojun Exp $ */
+
+#include <rtems/freebsd/sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#ifndef _NETINET_SCTP_VAR_HH_
+#define _NETINET_SCTP_VAR_HH_
+
+#include <rtems/freebsd/netinet/sctp_uio.h>
+
+#if defined(_KERNEL) || defined(__Userspace__)
+
+extern struct pr_usrreqs sctp_usrreqs;
+
+
+#define sctp_feature_on(inp, feature) (inp->sctp_features |= feature)
+#define sctp_feature_off(inp, feature) (inp->sctp_features &= ~feature)
+#define sctp_is_feature_on(inp, feature) ((inp->sctp_features & feature) == feature)
+#define sctp_is_feature_off(inp, feature) ((inp->sctp_features & feature) == 0)
+
+
+/* managing mobility_feature in inpcb (by micchie) */
+#define sctp_mobility_feature_on(inp, feature) (inp->sctp_mobility_features |= feature)
+#define sctp_mobility_feature_off(inp, feature) (inp->sctp_mobility_features &= ~feature)
+#define sctp_is_mobility_feature_on(inp, feature) (inp->sctp_mobility_features & feature)
+#define sctp_is_mobility_feature_off(inp, feature) ((inp->sctp_mobility_features & feature) == 0)
+
+#define sctp_maxspace(sb) (max((sb)->sb_hiwat,SCTP_MINIMAL_RWND))
+
+#define sctp_sbspace(asoc, sb) ((long) ((sctp_maxspace(sb) > (asoc)->sb_cc) ? (sctp_maxspace(sb) - (asoc)->sb_cc) : 0))
+
+#define sctp_sbspace_failedmsgs(sb) ((long) ((sctp_maxspace(sb) > (sb)->sb_cc) ? (sctp_maxspace(sb) - (sb)->sb_cc) : 0))
+
+#define sctp_sbspace_sub(a,b) ((a > b) ? (a - b) : 0)
+
+/*
+ * I tried to cache the readq entries at one point. But the reality
+ * is that it did not add any performance since this meant we had to
+ * lock the STCB on read. And at that point once you have to do an
+ * extra lock, it really does not matter if the lock is in the ZONE
+ * stuff or in our code. Note that this same problem would occur with
+ * an mbuf cache as well so it is not really worth doing, at least
+ * right now :-D
+ */
+
+#define sctp_free_a_readq(_stcb, _readq) { \
+ SCTP_ZONE_FREE(SCTP_BASE_INFO(ipi_zone_readq), (_readq)); \
+ SCTP_DECR_READQ_COUNT(); \
+}
+
+#define sctp_alloc_a_readq(_stcb, _readq) { \
+ (_readq) = SCTP_ZONE_GET(SCTP_BASE_INFO(ipi_zone_readq), struct sctp_queued_to_read); \
+ if ((_readq)) { \
+ SCTP_INCR_READQ_COUNT(); \
+ } \
+}
+
+#define sctp_free_a_strmoq(_stcb, _strmoq) { \
+ if ((_strmoq)->holds_key_ref) { \
+ sctp_auth_key_release(stcb, sp->auth_keyid); \
+ (_strmoq)->holds_key_ref = 0; \
+ } \
+ SCTP_ZONE_FREE(SCTP_BASE_INFO(ipi_zone_strmoq), (_strmoq)); \
+ SCTP_DECR_STRMOQ_COUNT(); \
+}
+
+#define sctp_alloc_a_strmoq(_stcb, _strmoq) { \
+ (_strmoq) = SCTP_ZONE_GET(SCTP_BASE_INFO(ipi_zone_strmoq), struct sctp_stream_queue_pending); \
+ if ((_strmoq)) { \
+ memset(_strmoq, 0, sizeof(struct sctp_stream_queue_pending)); \
+ SCTP_INCR_STRMOQ_COUNT(); \
+ (_strmoq)->holds_key_ref = 0; \
+ } \
+}
+
+#define sctp_free_a_chunk(_stcb, _chk) { \
+ if ((_chk)->holds_key_ref) {\
+ sctp_auth_key_release((_stcb), (_chk)->auth_keyid); \
+ (_chk)->holds_key_ref = 0; \
+ } \
+ if (_stcb) { \
+ SCTP_TCB_LOCK_ASSERT((_stcb)); \
+ if ((_chk)->whoTo) { \
+ sctp_free_remote_addr((_chk)->whoTo); \
+ (_chk)->whoTo = NULL; \
+ } \
+ if (((_stcb)->asoc.free_chunk_cnt > SCTP_BASE_SYSCTL(sctp_asoc_free_resc_limit)) || \
+ (SCTP_BASE_INFO(ipi_free_chunks) > SCTP_BASE_SYSCTL(sctp_system_free_resc_limit))) { \
+ SCTP_ZONE_FREE(SCTP_BASE_INFO(ipi_zone_chunk), (_chk)); \
+ SCTP_DECR_CHK_COUNT(); \
+ } else { \
+ TAILQ_INSERT_TAIL(&(_stcb)->asoc.free_chunks, (_chk), sctp_next); \
+ (_stcb)->asoc.free_chunk_cnt++; \
+ atomic_add_int(&SCTP_BASE_INFO(ipi_free_chunks), 1); \
+ } \
+ } else { \
+ SCTP_ZONE_FREE(SCTP_BASE_INFO(ipi_zone_chunk), (_chk)); \
+ SCTP_DECR_CHK_COUNT(); \
+ } \
+}
+
+#define sctp_alloc_a_chunk(_stcb, _chk) { \
+ if (TAILQ_EMPTY(&(_stcb)->asoc.free_chunks)) { \
+ (_chk) = SCTP_ZONE_GET(SCTP_BASE_INFO(ipi_zone_chunk), struct sctp_tmit_chunk); \
+ if ((_chk)) { \
+ SCTP_INCR_CHK_COUNT(); \
+ (_chk)->whoTo = NULL; \
+ (_chk)->holds_key_ref = 0; \
+ } \
+ } else { \
+ (_chk) = TAILQ_FIRST(&(_stcb)->asoc.free_chunks); \
+ TAILQ_REMOVE(&(_stcb)->asoc.free_chunks, (_chk), sctp_next); \
+ atomic_subtract_int(&SCTP_BASE_INFO(ipi_free_chunks), 1); \
+ (_chk)->holds_key_ref = 0; \
+ SCTP_STAT_INCR(sctps_cached_chk); \
+ (_stcb)->asoc.free_chunk_cnt--; \
+ } \
+}
+
+
+#define sctp_free_remote_addr(__net) { \
+ if ((__net)) { \
+ if (SCTP_DECREMENT_AND_CHECK_REFCOUNT(&(__net)->ref_count)) { \
+ (void)SCTP_OS_TIMER_STOP(&(__net)->rxt_timer.timer); \
+ (void)SCTP_OS_TIMER_STOP(&(__net)->pmtu_timer.timer); \
+ (void)SCTP_OS_TIMER_STOP(&(__net)->fr_timer.timer); \
+ if ((__net)->ro.ro_rt) { \
+ RTFREE((__net)->ro.ro_rt); \
+ (__net)->ro.ro_rt = NULL; \
+ } \
+ if ((__net)->src_addr_selected) { \
+ sctp_free_ifa((__net)->ro._s_addr); \
+ (__net)->ro._s_addr = NULL; \
+ } \
+ (__net)->src_addr_selected = 0; \
+ (__net)->dest_state = SCTP_ADDR_NOT_REACHABLE; \
+ SCTP_ZONE_FREE(SCTP_BASE_INFO(ipi_zone_net), (__net)); \
+ SCTP_DECR_RADDR_COUNT(); \
+ } \
+ } \
+}
+
+#define sctp_sbfree(ctl, stcb, sb, m) { \
+ SCTP_SAVE_ATOMIC_DECREMENT(&(sb)->sb_cc, SCTP_BUF_LEN((m))); \
+ SCTP_SAVE_ATOMIC_DECREMENT(&(sb)->sb_mbcnt, MSIZE); \
+ if (((ctl)->do_not_ref_stcb == 0) && stcb) {\
+ SCTP_SAVE_ATOMIC_DECREMENT(&(stcb)->asoc.sb_cc, SCTP_BUF_LEN((m))); \
+ SCTP_SAVE_ATOMIC_DECREMENT(&(stcb)->asoc.my_rwnd_control_len, MSIZE); \
+ } \
+ if (SCTP_BUF_TYPE(m) != MT_DATA && SCTP_BUF_TYPE(m) != MT_HEADER && \
+ SCTP_BUF_TYPE(m) != MT_OOBDATA) \
+ atomic_subtract_int(&(sb)->sb_ctl,SCTP_BUF_LEN((m))); \
+}
+
+#define sctp_sballoc(stcb, sb, m) { \
+ atomic_add_int(&(sb)->sb_cc,SCTP_BUF_LEN((m))); \
+ atomic_add_int(&(sb)->sb_mbcnt, MSIZE); \
+ if (stcb) { \
+ atomic_add_int(&(stcb)->asoc.sb_cc,SCTP_BUF_LEN((m))); \
+ atomic_add_int(&(stcb)->asoc.my_rwnd_control_len, MSIZE); \
+ } \
+ if (SCTP_BUF_TYPE(m) != MT_DATA && SCTP_BUF_TYPE(m) != MT_HEADER && \
+ SCTP_BUF_TYPE(m) != MT_OOBDATA) \
+ atomic_add_int(&(sb)->sb_ctl,SCTP_BUF_LEN((m))); \
+}
+
+
+#define sctp_ucount_incr(val) { \
+ val++; \
+}
+
+#define sctp_ucount_decr(val) { \
+ if (val > 0) { \
+ val--; \
+ } else { \
+ val = 0; \
+ } \
+}
+
+#define sctp_mbuf_crush(data) do { \
+ struct mbuf *_m; \
+ _m = (data); \
+ while(_m && (SCTP_BUF_LEN(_m) == 0)) { \
+ (data) = SCTP_BUF_NEXT(_m); \
+ SCTP_BUF_NEXT(_m) = NULL; \
+ sctp_m_free(_m); \
+ _m = (data); \
+ } \
+} while (0)
+
+#define sctp_flight_size_decrease(tp1) do { \
+ if (tp1->whoTo->flight_size >= tp1->book_size) \
+ tp1->whoTo->flight_size -= tp1->book_size; \
+ else \
+ tp1->whoTo->flight_size = 0; \
+} while (0)
+
+#define sctp_flight_size_increase(tp1) do { \
+ (tp1)->whoTo->flight_size += (tp1)->book_size; \
+} while (0)
+
+#ifdef SCTP_FS_SPEC_LOG
+#define sctp_total_flight_decrease(stcb, tp1) do { \
+ if (stcb->asoc.fs_index > SCTP_FS_SPEC_LOG_SIZE) \
+ stcb->asoc.fs_index = 0;\
+ stcb->asoc.fslog[stcb->asoc.fs_index].total_flight = stcb->asoc.total_flight; \
+ stcb->asoc.fslog[stcb->asoc.fs_index].tsn = tp1->rec.data.TSN_seq; \
+ stcb->asoc.fslog[stcb->asoc.fs_index].book = tp1->book_size; \
+ stcb->asoc.fslog[stcb->asoc.fs_index].sent = tp1->sent; \
+ stcb->asoc.fslog[stcb->asoc.fs_index].incr = 0; \
+ stcb->asoc.fslog[stcb->asoc.fs_index].decr = 1; \
+ stcb->asoc.fs_index++; \
+ tp1->window_probe = 0; \
+ if (stcb->asoc.total_flight >= tp1->book_size) { \
+ stcb->asoc.total_flight -= tp1->book_size; \
+ if (stcb->asoc.total_flight_count > 0) \
+ stcb->asoc.total_flight_count--; \
+ } else { \
+ stcb->asoc.total_flight = 0; \
+ stcb->asoc.total_flight_count = 0; \
+ } \
+} while (0)
+
+#define sctp_total_flight_increase(stcb, tp1) do { \
+ if (stcb->asoc.fs_index > SCTP_FS_SPEC_LOG_SIZE) \
+ stcb->asoc.fs_index = 0;\
+ stcb->asoc.fslog[stcb->asoc.fs_index].total_flight = stcb->asoc.total_flight; \
+ stcb->asoc.fslog[stcb->asoc.fs_index].tsn = tp1->rec.data.TSN_seq; \
+ stcb->asoc.fslog[stcb->asoc.fs_index].book = tp1->book_size; \
+ stcb->asoc.fslog[stcb->asoc.fs_index].sent = tp1->sent; \
+ stcb->asoc.fslog[stcb->asoc.fs_index].incr = 1; \
+ stcb->asoc.fslog[stcb->asoc.fs_index].decr = 0; \
+ stcb->asoc.fs_index++; \
+ (stcb)->asoc.total_flight_count++; \
+ (stcb)->asoc.total_flight += (tp1)->book_size; \
+} while (0)
+
+#else
+
+#define sctp_total_flight_decrease(stcb, tp1) do { \
+ tp1->window_probe = 0; \
+ if (stcb->asoc.total_flight >= tp1->book_size) { \
+ stcb->asoc.total_flight -= tp1->book_size; \
+ if (stcb->asoc.total_flight_count > 0) \
+ stcb->asoc.total_flight_count--; \
+ } else { \
+ stcb->asoc.total_flight = 0; \
+ stcb->asoc.total_flight_count = 0; \
+ } \
+} while (0)
+
+#define sctp_total_flight_increase(stcb, tp1) do { \
+ (stcb)->asoc.total_flight_count++; \
+ (stcb)->asoc.total_flight += (tp1)->book_size; \
+} while (0)
+
+#endif
+
+
+struct sctp_nets;
+struct sctp_inpcb;
+struct sctp_tcb;
+struct sctphdr;
+
+
+void sctp_close(struct socket *so);
+int sctp_disconnect(struct socket *so);
+
+void sctp_ctlinput __P((int, struct sockaddr *, void *));
+int sctp_ctloutput __P((struct socket *, struct sockopt *));
+void sctp_input_with_port __P((struct mbuf *, int, uint16_t));
+void sctp_input __P((struct mbuf *, int));
+void sctp_pathmtu_adjustment __P((struct sctp_inpcb *, struct sctp_tcb *, struct sctp_nets *, uint16_t));
+void sctp_drain __P((void));
+void sctp_init __P((void));
+
+void sctp_finish(void);
+
+int sctp_flush(struct socket *, int);
+int sctp_shutdown __P((struct socket *));
+void sctp_notify
+__P((struct sctp_inpcb *, struct ip *ip, struct sctphdr *,
+ struct sockaddr *, struct sctp_tcb *,
+ struct sctp_nets *));
+
+ int sctp_bindx(struct socket *, int, struct sockaddr_storage *,
+ int, int, struct proc *);
+
+/* can't use sctp_assoc_t here */
+ int sctp_peeloff(struct socket *, struct socket *, int, caddr_t, int *);
+
+ int sctp_ingetaddr(struct socket *,
+ struct sockaddr **
+);
+
+ int sctp_peeraddr(struct socket *,
+ struct sockaddr **
+);
+
+ int sctp_listen(struct socket *, int, struct thread *);
+
+ int sctp_accept(struct socket *, struct sockaddr **);
+
+#endif /* _KERNEL */
+
+#endif /* !_NETINET_SCTP_VAR_HH_ */
diff --git a/rtems/freebsd/netinet/sctputil.c b/rtems/freebsd/netinet/sctputil.c
new file mode 100644
index 00000000..aa0b690e
--- /dev/null
+++ b/rtems/freebsd/netinet/sctputil.c
@@ -0,0 +1,6977 @@
+#include <rtems/freebsd/machine/rtems-bsd-config.h>
+
+/*-
+ * Copyright (c) 2001-2008, by Cisco Systems, Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * a) Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * b) Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the distribution.
+ *
+ * c) Neither the name of Cisco Systems, Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/* $KAME: sctputil.c,v 1.37 2005/03/07 23:26:09 itojun Exp $ */
+
+#include <rtems/freebsd/sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <rtems/freebsd/netinet/sctp_os.h>
+#include <rtems/freebsd/netinet/sctp_pcb.h>
+#include <rtems/freebsd/netinet/sctputil.h>
+#include <rtems/freebsd/netinet/sctp_var.h>
+#include <rtems/freebsd/netinet/sctp_sysctl.h>
+#ifdef INET6
+#endif
+#include <rtems/freebsd/netinet/sctp_header.h>
+#include <rtems/freebsd/netinet/sctp_output.h>
+#include <rtems/freebsd/netinet/sctp_uio.h>
+#include <rtems/freebsd/netinet/sctp_timer.h>
+#include <rtems/freebsd/netinet/sctp_indata.h>/* for sctp_deliver_data() */
+#include <rtems/freebsd/netinet/sctp_auth.h>
+#include <rtems/freebsd/netinet/sctp_asconf.h>
+#include <rtems/freebsd/netinet/sctp_cc_functions.h>
+#include <rtems/freebsd/netinet/sctp_bsd_addr.h>
+
+
+#ifndef KTR_SCTP
+#define KTR_SCTP KTR_SUBSYS
+#endif
+
+void
+sctp_sblog(struct sockbuf *sb,
+ struct sctp_tcb *stcb, int from, int incr)
+{
+ struct sctp_cwnd_log sctp_clog;
+
+ sctp_clog.x.sb.stcb = stcb;
+ sctp_clog.x.sb.so_sbcc = sb->sb_cc;
+ if (stcb)
+ sctp_clog.x.sb.stcb_sbcc = stcb->asoc.sb_cc;
+ else
+ sctp_clog.x.sb.stcb_sbcc = 0;
+ sctp_clog.x.sb.incr = incr;
+ SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
+ SCTP_LOG_EVENT_SB,
+ from,
+ sctp_clog.x.misc.log1,
+ sctp_clog.x.misc.log2,
+ sctp_clog.x.misc.log3,
+ sctp_clog.x.misc.log4);
+}
+
+void
+sctp_log_closing(struct sctp_inpcb *inp, struct sctp_tcb *stcb, int16_t loc)
+{
+ struct sctp_cwnd_log sctp_clog;
+
+ sctp_clog.x.close.inp = (void *)inp;
+ sctp_clog.x.close.sctp_flags = inp->sctp_flags;
+ if (stcb) {
+ sctp_clog.x.close.stcb = (void *)stcb;
+ sctp_clog.x.close.state = (uint16_t) stcb->asoc.state;
+ } else {
+ sctp_clog.x.close.stcb = 0;
+ sctp_clog.x.close.state = 0;
+ }
+ sctp_clog.x.close.loc = loc;
+ SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
+ SCTP_LOG_EVENT_CLOSE,
+ 0,
+ sctp_clog.x.misc.log1,
+ sctp_clog.x.misc.log2,
+ sctp_clog.x.misc.log3,
+ sctp_clog.x.misc.log4);
+}
+
+
+void
+rto_logging(struct sctp_nets *net, int from)
+{
+ struct sctp_cwnd_log sctp_clog;
+
+ memset(&sctp_clog, 0, sizeof(sctp_clog));
+ sctp_clog.x.rto.net = (void *)net;
+ sctp_clog.x.rto.rtt = net->prev_rtt;
+ SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
+ SCTP_LOG_EVENT_RTT,
+ from,
+ sctp_clog.x.misc.log1,
+ sctp_clog.x.misc.log2,
+ sctp_clog.x.misc.log3,
+ sctp_clog.x.misc.log4);
+
+}
+
+void
+sctp_log_strm_del_alt(struct sctp_tcb *stcb, uint32_t tsn, uint16_t sseq, uint16_t stream, int from)
+{
+ struct sctp_cwnd_log sctp_clog;
+
+ sctp_clog.x.strlog.stcb = stcb;
+ sctp_clog.x.strlog.n_tsn = tsn;
+ sctp_clog.x.strlog.n_sseq = sseq;
+ sctp_clog.x.strlog.e_tsn = 0;
+ sctp_clog.x.strlog.e_sseq = 0;
+ sctp_clog.x.strlog.strm = stream;
+ SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
+ SCTP_LOG_EVENT_STRM,
+ from,
+ sctp_clog.x.misc.log1,
+ sctp_clog.x.misc.log2,
+ sctp_clog.x.misc.log3,
+ sctp_clog.x.misc.log4);
+
+}
+
+void
+sctp_log_nagle_event(struct sctp_tcb *stcb, int action)
+{
+ struct sctp_cwnd_log sctp_clog;
+
+ sctp_clog.x.nagle.stcb = (void *)stcb;
+ sctp_clog.x.nagle.total_flight = stcb->asoc.total_flight;
+ sctp_clog.x.nagle.total_in_queue = stcb->asoc.total_output_queue_size;
+ sctp_clog.x.nagle.count_in_queue = stcb->asoc.chunks_on_out_queue;
+ sctp_clog.x.nagle.count_in_flight = stcb->asoc.total_flight_count;
+ SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
+ SCTP_LOG_EVENT_NAGLE,
+ action,
+ sctp_clog.x.misc.log1,
+ sctp_clog.x.misc.log2,
+ sctp_clog.x.misc.log3,
+ sctp_clog.x.misc.log4);
+}
+
+
+void
+sctp_log_sack(uint32_t old_cumack, uint32_t cumack, uint32_t tsn, uint16_t gaps, uint16_t dups, int from)
+{
+ struct sctp_cwnd_log sctp_clog;
+
+ sctp_clog.x.sack.cumack = cumack;
+ sctp_clog.x.sack.oldcumack = old_cumack;
+ sctp_clog.x.sack.tsn = tsn;
+ sctp_clog.x.sack.numGaps = gaps;
+ sctp_clog.x.sack.numDups = dups;
+ SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
+ SCTP_LOG_EVENT_SACK,
+ from,
+ sctp_clog.x.misc.log1,
+ sctp_clog.x.misc.log2,
+ sctp_clog.x.misc.log3,
+ sctp_clog.x.misc.log4);
+}
+
+void
+sctp_log_map(uint32_t map, uint32_t cum, uint32_t high, int from)
+{
+ struct sctp_cwnd_log sctp_clog;
+
+ memset(&sctp_clog, 0, sizeof(sctp_clog));
+ sctp_clog.x.map.base = map;
+ sctp_clog.x.map.cum = cum;
+ sctp_clog.x.map.high = high;
+ SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
+ SCTP_LOG_EVENT_MAP,
+ from,
+ sctp_clog.x.misc.log1,
+ sctp_clog.x.misc.log2,
+ sctp_clog.x.misc.log3,
+ sctp_clog.x.misc.log4);
+}
+
+void
+sctp_log_fr(uint32_t biggest_tsn, uint32_t biggest_new_tsn, uint32_t tsn,
+ int from)
+{
+ struct sctp_cwnd_log sctp_clog;
+
+ memset(&sctp_clog, 0, sizeof(sctp_clog));
+ sctp_clog.x.fr.largest_tsn = biggest_tsn;
+ sctp_clog.x.fr.largest_new_tsn = biggest_new_tsn;
+ sctp_clog.x.fr.tsn = tsn;
+ SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
+ SCTP_LOG_EVENT_FR,
+ from,
+ sctp_clog.x.misc.log1,
+ sctp_clog.x.misc.log2,
+ sctp_clog.x.misc.log3,
+ sctp_clog.x.misc.log4);
+
+}
+
+
+void
+sctp_log_mb(struct mbuf *m, int from)
+{
+ struct sctp_cwnd_log sctp_clog;
+
+ sctp_clog.x.mb.mp = m;
+ sctp_clog.x.mb.mbuf_flags = (uint8_t) (SCTP_BUF_GET_FLAGS(m));
+ sctp_clog.x.mb.size = (uint16_t) (SCTP_BUF_LEN(m));
+ sctp_clog.x.mb.data = SCTP_BUF_AT(m, 0);
+ if (SCTP_BUF_IS_EXTENDED(m)) {
+ sctp_clog.x.mb.ext = SCTP_BUF_EXTEND_BASE(m);
+ sctp_clog.x.mb.refcnt = (uint8_t) (SCTP_BUF_EXTEND_REFCNT(m));
+ } else {
+ sctp_clog.x.mb.ext = 0;
+ sctp_clog.x.mb.refcnt = 0;
+ }
+ SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
+ SCTP_LOG_EVENT_MBUF,
+ from,
+ sctp_clog.x.misc.log1,
+ sctp_clog.x.misc.log2,
+ sctp_clog.x.misc.log3,
+ sctp_clog.x.misc.log4);
+}
+
+
+void
+sctp_log_strm_del(struct sctp_queued_to_read *control, struct sctp_queued_to_read *poschk,
+ int from)
+{
+ struct sctp_cwnd_log sctp_clog;
+
+ if (control == NULL) {
+ SCTP_PRINTF("Gak log of NULL?\n");
+ return;
+ }
+ sctp_clog.x.strlog.stcb = control->stcb;
+ sctp_clog.x.strlog.n_tsn = control->sinfo_tsn;
+ sctp_clog.x.strlog.n_sseq = control->sinfo_ssn;
+ sctp_clog.x.strlog.strm = control->sinfo_stream;
+ if (poschk != NULL) {
+ sctp_clog.x.strlog.e_tsn = poschk->sinfo_tsn;
+ sctp_clog.x.strlog.e_sseq = poschk->sinfo_ssn;
+ } else {
+ sctp_clog.x.strlog.e_tsn = 0;
+ sctp_clog.x.strlog.e_sseq = 0;
+ }
+ SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
+ SCTP_LOG_EVENT_STRM,
+ from,
+ sctp_clog.x.misc.log1,
+ sctp_clog.x.misc.log2,
+ sctp_clog.x.misc.log3,
+ sctp_clog.x.misc.log4);
+
+}
+
+void
+sctp_log_cwnd(struct sctp_tcb *stcb, struct sctp_nets *net, int augment, uint8_t from)
+{
+ struct sctp_cwnd_log sctp_clog;
+
+ sctp_clog.x.cwnd.net = net;
+ if (stcb->asoc.send_queue_cnt > 255)
+ sctp_clog.x.cwnd.cnt_in_send = 255;
+ else
+ sctp_clog.x.cwnd.cnt_in_send = stcb->asoc.send_queue_cnt;
+ if (stcb->asoc.stream_queue_cnt > 255)
+ sctp_clog.x.cwnd.cnt_in_str = 255;
+ else
+ sctp_clog.x.cwnd.cnt_in_str = stcb->asoc.stream_queue_cnt;
+
+ if (net) {
+ sctp_clog.x.cwnd.cwnd_new_value = net->cwnd;
+ sctp_clog.x.cwnd.inflight = net->flight_size;
+ sctp_clog.x.cwnd.pseudo_cumack = net->pseudo_cumack;
+ sctp_clog.x.cwnd.meets_pseudo_cumack = net->new_pseudo_cumack;
+ sctp_clog.x.cwnd.need_new_pseudo_cumack = net->find_pseudo_cumack;
+ }
+ if (SCTP_CWNDLOG_PRESEND == from) {
+ sctp_clog.x.cwnd.meets_pseudo_cumack = stcb->asoc.peers_rwnd;
+ }
+ sctp_clog.x.cwnd.cwnd_augment = augment;
+ SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
+ SCTP_LOG_EVENT_CWND,
+ from,
+ sctp_clog.x.misc.log1,
+ sctp_clog.x.misc.log2,
+ sctp_clog.x.misc.log3,
+ sctp_clog.x.misc.log4);
+
+}
+
+void
+sctp_log_lock(struct sctp_inpcb *inp, struct sctp_tcb *stcb, uint8_t from)
+{
+ struct sctp_cwnd_log sctp_clog;
+
+ memset(&sctp_clog, 0, sizeof(sctp_clog));
+ if (inp) {
+ sctp_clog.x.lock.sock = (void *)inp->sctp_socket;
+
+ } else {
+ sctp_clog.x.lock.sock = (void *)NULL;
+ }
+ sctp_clog.x.lock.inp = (void *)inp;
+ if (stcb) {
+ sctp_clog.x.lock.tcb_lock = mtx_owned(&stcb->tcb_mtx);
+ } else {
+ sctp_clog.x.lock.tcb_lock = SCTP_LOCK_UNKNOWN;
+ }
+ if (inp) {
+ sctp_clog.x.lock.inp_lock = mtx_owned(&inp->inp_mtx);
+ sctp_clog.x.lock.create_lock = mtx_owned(&inp->inp_create_mtx);
+ } else {
+ sctp_clog.x.lock.inp_lock = SCTP_LOCK_UNKNOWN;
+ sctp_clog.x.lock.create_lock = SCTP_LOCK_UNKNOWN;
+ }
+ sctp_clog.x.lock.info_lock = rw_wowned(&SCTP_BASE_INFO(ipi_ep_mtx));
+ if (inp && (inp->sctp_socket)) {
+ sctp_clog.x.lock.sock_lock = mtx_owned(&(inp->sctp_socket->so_rcv.sb_mtx));
+ sctp_clog.x.lock.sockrcvbuf_lock = mtx_owned(&(inp->sctp_socket->so_rcv.sb_mtx));
+ sctp_clog.x.lock.socksndbuf_lock = mtx_owned(&(inp->sctp_socket->so_snd.sb_mtx));
+ } else {
+ sctp_clog.x.lock.sock_lock = SCTP_LOCK_UNKNOWN;
+ sctp_clog.x.lock.sockrcvbuf_lock = SCTP_LOCK_UNKNOWN;
+ sctp_clog.x.lock.socksndbuf_lock = SCTP_LOCK_UNKNOWN;
+ }
+ SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
+ SCTP_LOG_LOCK_EVENT,
+ from,
+ sctp_clog.x.misc.log1,
+ sctp_clog.x.misc.log2,
+ sctp_clog.x.misc.log3,
+ sctp_clog.x.misc.log4);
+
+}
+
+void
+sctp_log_maxburst(struct sctp_tcb *stcb, struct sctp_nets *net, int error, int burst, uint8_t from)
+{
+ struct sctp_cwnd_log sctp_clog;
+
+ memset(&sctp_clog, 0, sizeof(sctp_clog));
+ sctp_clog.x.cwnd.net = net;
+ sctp_clog.x.cwnd.cwnd_new_value = error;
+ sctp_clog.x.cwnd.inflight = net->flight_size;
+ sctp_clog.x.cwnd.cwnd_augment = burst;
+ if (stcb->asoc.send_queue_cnt > 255)
+ sctp_clog.x.cwnd.cnt_in_send = 255;
+ else
+ sctp_clog.x.cwnd.cnt_in_send = stcb->asoc.send_queue_cnt;
+ if (stcb->asoc.stream_queue_cnt > 255)
+ sctp_clog.x.cwnd.cnt_in_str = 255;
+ else
+ sctp_clog.x.cwnd.cnt_in_str = stcb->asoc.stream_queue_cnt;
+ SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
+ SCTP_LOG_EVENT_MAXBURST,
+ from,
+ sctp_clog.x.misc.log1,
+ sctp_clog.x.misc.log2,
+ sctp_clog.x.misc.log3,
+ sctp_clog.x.misc.log4);
+
+}
+
+void
+sctp_log_rwnd(uint8_t from, uint32_t peers_rwnd, uint32_t snd_size, uint32_t overhead)
+{
+ struct sctp_cwnd_log sctp_clog;
+
+ sctp_clog.x.rwnd.rwnd = peers_rwnd;
+ sctp_clog.x.rwnd.send_size = snd_size;
+ sctp_clog.x.rwnd.overhead = overhead;
+ sctp_clog.x.rwnd.new_rwnd = 0;
+ SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
+ SCTP_LOG_EVENT_RWND,
+ from,
+ sctp_clog.x.misc.log1,
+ sctp_clog.x.misc.log2,
+ sctp_clog.x.misc.log3,
+ sctp_clog.x.misc.log4);
+}
+
+void
+sctp_log_rwnd_set(uint8_t from, uint32_t peers_rwnd, uint32_t flight_size, uint32_t overhead, uint32_t a_rwndval)
+{
+ struct sctp_cwnd_log sctp_clog;
+
+ sctp_clog.x.rwnd.rwnd = peers_rwnd;
+ sctp_clog.x.rwnd.send_size = flight_size;
+ sctp_clog.x.rwnd.overhead = overhead;
+ sctp_clog.x.rwnd.new_rwnd = a_rwndval;
+ SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
+ SCTP_LOG_EVENT_RWND,
+ from,
+ sctp_clog.x.misc.log1,
+ sctp_clog.x.misc.log2,
+ sctp_clog.x.misc.log3,
+ sctp_clog.x.misc.log4);
+}
+
+void
+sctp_log_mbcnt(uint8_t from, uint32_t total_oq, uint32_t book, uint32_t total_mbcnt_q, uint32_t mbcnt)
+{
+ struct sctp_cwnd_log sctp_clog;
+
+ sctp_clog.x.mbcnt.total_queue_size = total_oq;
+ sctp_clog.x.mbcnt.size_change = book;
+ sctp_clog.x.mbcnt.total_queue_mb_size = total_mbcnt_q;
+ sctp_clog.x.mbcnt.mbcnt_change = mbcnt;
+ SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
+ SCTP_LOG_EVENT_MBCNT,
+ from,
+ sctp_clog.x.misc.log1,
+ sctp_clog.x.misc.log2,
+ sctp_clog.x.misc.log3,
+ sctp_clog.x.misc.log4);
+
+}
+
+void
+sctp_misc_ints(uint8_t from, uint32_t a, uint32_t b, uint32_t c, uint32_t d)
+{
+ SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
+ SCTP_LOG_MISC_EVENT,
+ from,
+ a, b, c, d);
+}
+
+void
+sctp_wakeup_log(struct sctp_tcb *stcb, uint32_t cumtsn, uint32_t wake_cnt, int from)
+{
+ struct sctp_cwnd_log sctp_clog;
+
+ sctp_clog.x.wake.stcb = (void *)stcb;
+ sctp_clog.x.wake.wake_cnt = wake_cnt;
+ sctp_clog.x.wake.flight = stcb->asoc.total_flight_count;
+ sctp_clog.x.wake.send_q = stcb->asoc.send_queue_cnt;
+ sctp_clog.x.wake.sent_q = stcb->asoc.sent_queue_cnt;
+
+ if (stcb->asoc.stream_queue_cnt < 0xff)
+ sctp_clog.x.wake.stream_qcnt = (uint8_t) stcb->asoc.stream_queue_cnt;
+ else
+ sctp_clog.x.wake.stream_qcnt = 0xff;
+
+ if (stcb->asoc.chunks_on_out_queue < 0xff)
+ sctp_clog.x.wake.chunks_on_oque = (uint8_t) stcb->asoc.chunks_on_out_queue;
+ else
+ sctp_clog.x.wake.chunks_on_oque = 0xff;
+
+ sctp_clog.x.wake.sctpflags = 0;
+ /* set in the defered mode stuff */
+ if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_DONT_WAKE)
+ sctp_clog.x.wake.sctpflags |= 1;
+ if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_WAKEOUTPUT)
+ sctp_clog.x.wake.sctpflags |= 2;
+ if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_WAKEINPUT)
+ sctp_clog.x.wake.sctpflags |= 4;
+ /* what about the sb */
+ if (stcb->sctp_socket) {
+ struct socket *so = stcb->sctp_socket;
+
+ sctp_clog.x.wake.sbflags = (uint8_t) ((so->so_snd.sb_flags & 0x00ff));
+ } else {
+ sctp_clog.x.wake.sbflags = 0xff;
+ }
+ SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
+ SCTP_LOG_EVENT_WAKE,
+ from,
+ sctp_clog.x.misc.log1,
+ sctp_clog.x.misc.log2,
+ sctp_clog.x.misc.log3,
+ sctp_clog.x.misc.log4);
+
+}
+
+void
+sctp_log_block(uint8_t from, struct socket *so, struct sctp_association *asoc, int sendlen)
+{
+ struct sctp_cwnd_log sctp_clog;
+
+ sctp_clog.x.blk.onsb = asoc->total_output_queue_size;
+ sctp_clog.x.blk.send_sent_qcnt = (uint16_t) (asoc->send_queue_cnt + asoc->sent_queue_cnt);
+ sctp_clog.x.blk.peer_rwnd = asoc->peers_rwnd;
+ sctp_clog.x.blk.stream_qcnt = (uint16_t) asoc->stream_queue_cnt;
+ sctp_clog.x.blk.chunks_on_oque = (uint16_t) asoc->chunks_on_out_queue;
+ sctp_clog.x.blk.flight_size = (uint16_t) (asoc->total_flight / 1024);
+ sctp_clog.x.blk.sndlen = sendlen;
+ SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
+ SCTP_LOG_EVENT_BLOCK,
+ from,
+ sctp_clog.x.misc.log1,
+ sctp_clog.x.misc.log2,
+ sctp_clog.x.misc.log3,
+ sctp_clog.x.misc.log4);
+
+}
+
+int
+sctp_fill_stat_log(void *optval, size_t *optsize)
+{
+ /* May need to fix this if ktrdump does not work */
+ return (0);
+}
+
+#ifdef SCTP_AUDITING_ENABLED
+uint8_t sctp_audit_data[SCTP_AUDIT_SIZE][2];
+static int sctp_audit_indx = 0;
+
+static
+void
+sctp_print_audit_report(void)
+{
+ int i;
+ int cnt;
+
+ cnt = 0;
+ for (i = sctp_audit_indx; i < SCTP_AUDIT_SIZE; i++) {
+ if ((sctp_audit_data[i][0] == 0xe0) &&
+ (sctp_audit_data[i][1] == 0x01)) {
+ cnt = 0;
+ SCTP_PRINTF("\n");
+ } else if (sctp_audit_data[i][0] == 0xf0) {
+ cnt = 0;
+ SCTP_PRINTF("\n");
+ } else if ((sctp_audit_data[i][0] == 0xc0) &&
+ (sctp_audit_data[i][1] == 0x01)) {
+ SCTP_PRINTF("\n");
+ cnt = 0;
+ }
+ SCTP_PRINTF("%2.2x%2.2x ", (uint32_t) sctp_audit_data[i][0],
+ (uint32_t) sctp_audit_data[i][1]);
+ cnt++;
+ if ((cnt % 14) == 0)
+ SCTP_PRINTF("\n");
+ }
+ for (i = 0; i < sctp_audit_indx; i++) {
+ if ((sctp_audit_data[i][0] == 0xe0) &&
+ (sctp_audit_data[i][1] == 0x01)) {
+ cnt = 0;
+ SCTP_PRINTF("\n");
+ } else if (sctp_audit_data[i][0] == 0xf0) {
+ cnt = 0;
+ SCTP_PRINTF("\n");
+ } else if ((sctp_audit_data[i][0] == 0xc0) &&
+ (sctp_audit_data[i][1] == 0x01)) {
+ SCTP_PRINTF("\n");
+ cnt = 0;
+ }
+ SCTP_PRINTF("%2.2x%2.2x ", (uint32_t) sctp_audit_data[i][0],
+ (uint32_t) sctp_audit_data[i][1]);
+ cnt++;
+ if ((cnt % 14) == 0)
+ SCTP_PRINTF("\n");
+ }
+ SCTP_PRINTF("\n");
+}
+
+void
+sctp_auditing(int from, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
+ struct sctp_nets *net)
+{
+ int resend_cnt, tot_out, rep, tot_book_cnt;
+ struct sctp_nets *lnet;
+ struct sctp_tmit_chunk *chk;
+
+ sctp_audit_data[sctp_audit_indx][0] = 0xAA;
+ sctp_audit_data[sctp_audit_indx][1] = 0x000000ff & from;
+ sctp_audit_indx++;
+ if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
+ sctp_audit_indx = 0;
+ }
+ if (inp == NULL) {
+ sctp_audit_data[sctp_audit_indx][0] = 0xAF;
+ sctp_audit_data[sctp_audit_indx][1] = 0x01;
+ sctp_audit_indx++;
+ if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
+ sctp_audit_indx = 0;
+ }
+ return;
+ }
+ if (stcb == NULL) {
+ sctp_audit_data[sctp_audit_indx][0] = 0xAF;
+ sctp_audit_data[sctp_audit_indx][1] = 0x02;
+ sctp_audit_indx++;
+ if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
+ sctp_audit_indx = 0;
+ }
+ return;
+ }
+ sctp_audit_data[sctp_audit_indx][0] = 0xA1;
+ sctp_audit_data[sctp_audit_indx][1] =
+ (0x000000ff & stcb->asoc.sent_queue_retran_cnt);
+ sctp_audit_indx++;
+ if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
+ sctp_audit_indx = 0;
+ }
+ rep = 0;
+ tot_book_cnt = 0;
+ resend_cnt = tot_out = 0;
+ TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) {
+ if (chk->sent == SCTP_DATAGRAM_RESEND) {
+ resend_cnt++;
+ } else if (chk->sent < SCTP_DATAGRAM_RESEND) {
+ tot_out += chk->book_size;
+ tot_book_cnt++;
+ }
+ }
+ if (resend_cnt != stcb->asoc.sent_queue_retran_cnt) {
+ sctp_audit_data[sctp_audit_indx][0] = 0xAF;
+ sctp_audit_data[sctp_audit_indx][1] = 0xA1;
+ sctp_audit_indx++;
+ if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
+ sctp_audit_indx = 0;
+ }
+ SCTP_PRINTF("resend_cnt:%d asoc-tot:%d\n",
+ resend_cnt, stcb->asoc.sent_queue_retran_cnt);
+ rep = 1;
+ stcb->asoc.sent_queue_retran_cnt = resend_cnt;
+ sctp_audit_data[sctp_audit_indx][0] = 0xA2;
+ sctp_audit_data[sctp_audit_indx][1] =
+ (0x000000ff & stcb->asoc.sent_queue_retran_cnt);
+ sctp_audit_indx++;
+ if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
+ sctp_audit_indx = 0;
+ }
+ }
+ if (tot_out != stcb->asoc.total_flight) {
+ sctp_audit_data[sctp_audit_indx][0] = 0xAF;
+ sctp_audit_data[sctp_audit_indx][1] = 0xA2;
+ sctp_audit_indx++;
+ if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
+ sctp_audit_indx = 0;
+ }
+ rep = 1;
+ SCTP_PRINTF("tot_flt:%d asoc_tot:%d\n", tot_out,
+ (int)stcb->asoc.total_flight);
+ stcb->asoc.total_flight = tot_out;
+ }
+ if (tot_book_cnt != stcb->asoc.total_flight_count) {
+ sctp_audit_data[sctp_audit_indx][0] = 0xAF;
+ sctp_audit_data[sctp_audit_indx][1] = 0xA5;
+ sctp_audit_indx++;
+ if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
+ sctp_audit_indx = 0;
+ }
+ rep = 1;
+ SCTP_PRINTF("tot_flt_book:%d\n", tot_book_cnt);
+
+ stcb->asoc.total_flight_count = tot_book_cnt;
+ }
+ tot_out = 0;
+ TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) {
+ tot_out += lnet->flight_size;
+ }
+ if (tot_out != stcb->asoc.total_flight) {
+ sctp_audit_data[sctp_audit_indx][0] = 0xAF;
+ sctp_audit_data[sctp_audit_indx][1] = 0xA3;
+ sctp_audit_indx++;
+ if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
+ sctp_audit_indx = 0;
+ }
+ rep = 1;
+ SCTP_PRINTF("real flight:%d net total was %d\n",
+ stcb->asoc.total_flight, tot_out);
+ /* now corrective action */
+ TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) {
+
+ tot_out = 0;
+ TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) {
+ if ((chk->whoTo == lnet) &&
+ (chk->sent < SCTP_DATAGRAM_RESEND)) {
+ tot_out += chk->book_size;
+ }
+ }
+ if (lnet->flight_size != tot_out) {
+ SCTP_PRINTF("net:%p flight was %d corrected to %d\n",
+ lnet, lnet->flight_size,
+ tot_out);
+ lnet->flight_size = tot_out;
+ }
+ }
+ }
+ if (rep) {
+ sctp_print_audit_report();
+ }
+}
+
+void
+sctp_audit_log(uint8_t ev, uint8_t fd)
+{
+
+ sctp_audit_data[sctp_audit_indx][0] = ev;
+ sctp_audit_data[sctp_audit_indx][1] = fd;
+ sctp_audit_indx++;
+ if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
+ sctp_audit_indx = 0;
+ }
+}
+
+#endif
+
+/*
+ * sctp_stop_timers_for_shutdown() should be called
+ * when entering the SHUTDOWN_SENT or SHUTDOWN_ACK_SENT
+ * state to make sure that all timers are stopped.
+ */
+void
+sctp_stop_timers_for_shutdown(struct sctp_tcb *stcb)
+{
+ struct sctp_association *asoc;
+ struct sctp_nets *net;
+
+ asoc = &stcb->asoc;
+
+ (void)SCTP_OS_TIMER_STOP(&asoc->hb_timer.timer);
+ (void)SCTP_OS_TIMER_STOP(&asoc->dack_timer.timer);
+ (void)SCTP_OS_TIMER_STOP(&asoc->strreset_timer.timer);
+ (void)SCTP_OS_TIMER_STOP(&asoc->asconf_timer.timer);
+ (void)SCTP_OS_TIMER_STOP(&asoc->autoclose_timer.timer);
+ (void)SCTP_OS_TIMER_STOP(&asoc->delayed_event_timer.timer);
+ TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
+ (void)SCTP_OS_TIMER_STOP(&net->fr_timer.timer);
+ (void)SCTP_OS_TIMER_STOP(&net->pmtu_timer.timer);
+ }
+}
+
+/*
+ * a list of sizes based on typical mtu's, used only if next hop size not
+ * returned.
+ */
+static uint32_t sctp_mtu_sizes[] = {
+ 68,
+ 296,
+ 508,
+ 512,
+ 544,
+ 576,
+ 1006,
+ 1492,
+ 1500,
+ 1536,
+ 2002,
+ 2048,
+ 4352,
+ 4464,
+ 8166,
+ 17914,
+ 32000,
+ 65535
+};
+
+/*
+ * Return the largest MTU smaller than val. If there is no
+ * entry, just return val.
+ */
+uint32_t
+sctp_get_prev_mtu(uint32_t val)
+{
+ uint32_t i;
+
+ if (val <= sctp_mtu_sizes[0]) {
+ return (val);
+ }
+ for (i = 1; i < (sizeof(sctp_mtu_sizes) / sizeof(uint32_t)); i++) {
+ if (val <= sctp_mtu_sizes[i]) {
+ break;
+ }
+ }
+ return (sctp_mtu_sizes[i - 1]);
+}
+
+/*
+ * Return the smallest MTU larger than val. If there is no
+ * entry, just return val.
+ */
+uint32_t
+sctp_get_next_mtu(struct sctp_inpcb *inp, uint32_t val)
+{
+ /* select another MTU that is just bigger than this one */
+ uint32_t i;
+
+ for (i = 0; i < (sizeof(sctp_mtu_sizes) / sizeof(uint32_t)); i++) {
+ if (val < sctp_mtu_sizes[i]) {
+ return (sctp_mtu_sizes[i]);
+ }
+ }
+ return (val);
+}
+
+void
+sctp_fill_random_store(struct sctp_pcb *m)
+{
+ /*
+ * Here we use the MD5/SHA-1 to hash with our good randomNumbers and
+ * our counter. The result becomes our good random numbers and we
+ * then setup to give these out. Note that we do no locking to
+ * protect this. This is ok, since if competing folks call this we
+ * will get more gobbled gook in the random store which is what we
+ * want. There is a danger that two guys will use the same random
+ * numbers, but thats ok too since that is random as well :->
+ */
+ m->store_at = 0;
+ (void)sctp_hmac(SCTP_HMAC, (uint8_t *) m->random_numbers,
+ sizeof(m->random_numbers), (uint8_t *) & m->random_counter,
+ sizeof(m->random_counter), (uint8_t *) m->random_store);
+ m->random_counter++;
+}
+
+uint32_t
+sctp_select_initial_TSN(struct sctp_pcb *inp)
+{
+ /*
+ * A true implementation should use random selection process to get
+ * the initial stream sequence number, using RFC1750 as a good
+ * guideline
+ */
+ uint32_t x, *xp;
+ uint8_t *p;
+ int store_at, new_store;
+
+ if (inp->initial_sequence_debug != 0) {
+ uint32_t ret;
+
+ ret = inp->initial_sequence_debug;
+ inp->initial_sequence_debug++;
+ return (ret);
+ }
+retry:
+ store_at = inp->store_at;
+ new_store = store_at + sizeof(uint32_t);
+ if (new_store >= (SCTP_SIGNATURE_SIZE - 3)) {
+ new_store = 0;
+ }
+ if (!atomic_cmpset_int(&inp->store_at, store_at, new_store)) {
+ goto retry;
+ }
+ if (new_store == 0) {
+ /* Refill the random store */
+ sctp_fill_random_store(inp);
+ }
+ p = &inp->random_store[store_at];
+ xp = (uint32_t *) p;
+ x = *xp;
+ return (x);
+}
+
+uint32_t
+sctp_select_a_tag(struct sctp_inpcb *inp, uint16_t lport, uint16_t rport, int save_in_twait)
+{
+ uint32_t x, not_done;
+ struct timeval now;
+
+ (void)SCTP_GETTIME_TIMEVAL(&now);
+ not_done = 1;
+ while (not_done) {
+ x = sctp_select_initial_TSN(&inp->sctp_ep);
+ if (x == 0) {
+ /* we never use 0 */
+ continue;
+ }
+ if (sctp_is_vtag_good(inp, x, lport, rport, &now, save_in_twait)) {
+ not_done = 0;
+ }
+ }
+ return (x);
+}
+
+int
+sctp_init_asoc(struct sctp_inpcb *m, struct sctp_tcb *stcb,
+ uint32_t override_tag, uint32_t vrf_id)
+{
+ struct sctp_association *asoc;
+
+ /*
+ * Anything set to zero is taken care of by the allocation routine's
+ * bzero
+ */
+
+ /*
+ * Up front select what scoping to apply on addresses I tell my peer
+ * Not sure what to do with these right now, we will need to come up
+ * with a way to set them. We may need to pass them through from the
+ * caller in the sctp_aloc_assoc() function.
+ */
+ int i;
+
+ asoc = &stcb->asoc;
+ /* init all variables to a known value. */
+ SCTP_SET_STATE(&stcb->asoc, SCTP_STATE_INUSE);
+ asoc->max_burst = m->sctp_ep.max_burst;
+ asoc->heart_beat_delay = TICKS_TO_MSEC(m->sctp_ep.sctp_timeoutticks[SCTP_TIMER_HEARTBEAT]);
+ asoc->cookie_life = m->sctp_ep.def_cookie_life;
+ asoc->sctp_cmt_on_off = m->sctp_cmt_on_off;
+ asoc->sctp_nr_sack_on_off = (uint8_t) SCTP_BASE_SYSCTL(sctp_nr_sack_on_off);
+ asoc->sctp_cmt_pf = (uint8_t) SCTP_BASE_SYSCTL(sctp_cmt_pf);
+ asoc->sctp_frag_point = m->sctp_frag_point;
+#ifdef INET
+ asoc->default_tos = m->ip_inp.inp.inp_ip_tos;
+#else
+ asoc->default_tos = 0;
+#endif
+
+#ifdef INET6
+ asoc->default_flowlabel = ((struct in6pcb *)m)->in6p_flowinfo;
+#else
+ asoc->default_flowlabel = 0;
+#endif
+ asoc->sb_send_resv = 0;
+ if (override_tag) {
+ asoc->my_vtag = override_tag;
+ } else {
+ asoc->my_vtag = sctp_select_a_tag(m, stcb->sctp_ep->sctp_lport, stcb->rport, 1);
+ }
+ /* Get the nonce tags */
+ asoc->my_vtag_nonce = sctp_select_a_tag(m, stcb->sctp_ep->sctp_lport, stcb->rport, 0);
+ asoc->peer_vtag_nonce = sctp_select_a_tag(m, stcb->sctp_ep->sctp_lport, stcb->rport, 0);
+ asoc->vrf_id = vrf_id;
+
+ if (sctp_is_feature_on(m, SCTP_PCB_FLAGS_DONOT_HEARTBEAT))
+ asoc->hb_is_disabled = 1;
+ else
+ asoc->hb_is_disabled = 0;
+
+#ifdef SCTP_ASOCLOG_OF_TSNS
+ asoc->tsn_in_at = 0;
+ asoc->tsn_out_at = 0;
+ asoc->tsn_in_wrapped = 0;
+ asoc->tsn_out_wrapped = 0;
+ asoc->cumack_log_at = 0;
+ asoc->cumack_log_atsnt = 0;
+#endif
+#ifdef SCTP_FS_SPEC_LOG
+ asoc->fs_index = 0;
+#endif
+ asoc->refcnt = 0;
+ asoc->assoc_up_sent = 0;
+ asoc->asconf_seq_out = asoc->str_reset_seq_out = asoc->init_seq_number = asoc->sending_seq =
+ sctp_select_initial_TSN(&m->sctp_ep);
+ asoc->asconf_seq_out_acked = asoc->asconf_seq_out - 1;
+ /* we are optimisitic here */
+ asoc->peer_supports_pktdrop = 1;
+ asoc->peer_supports_nat = 0;
+ asoc->sent_queue_retran_cnt = 0;
+
+ /* for CMT */
+ asoc->last_net_cmt_send_started = NULL;
+
+ /* This will need to be adjusted */
+ asoc->last_cwr_tsn = asoc->init_seq_number - 1;
+ asoc->last_acked_seq = asoc->init_seq_number - 1;
+ asoc->advanced_peer_ack_point = asoc->last_acked_seq;
+ asoc->asconf_seq_in = asoc->last_acked_seq;
+
+ /* here we are different, we hold the next one we expect */
+ asoc->str_reset_seq_in = asoc->last_acked_seq + 1;
+
+ asoc->initial_init_rto_max = m->sctp_ep.initial_init_rto_max;
+ asoc->initial_rto = m->sctp_ep.initial_rto;
+
+ asoc->max_init_times = m->sctp_ep.max_init_times;
+ asoc->max_send_times = m->sctp_ep.max_send_times;
+ asoc->def_net_failure = m->sctp_ep.def_net_failure;
+ asoc->free_chunk_cnt = 0;
+
+ asoc->iam_blocking = 0;
+ /* ECN Nonce initialization */
+ asoc->context = m->sctp_context;
+ asoc->def_send = m->def_send;
+ asoc->ecn_nonce_allowed = 0;
+ asoc->receiver_nonce_sum = 1;
+ asoc->nonce_sum_expect_base = 1;
+ asoc->nonce_sum_check = 1;
+ asoc->nonce_resync_tsn = 0;
+ asoc->nonce_wait_for_ecne = 0;
+ asoc->nonce_wait_tsn = 0;
+ asoc->delayed_ack = TICKS_TO_MSEC(m->sctp_ep.sctp_timeoutticks[SCTP_TIMER_RECV]);
+ asoc->sack_freq = m->sctp_ep.sctp_sack_freq;
+ asoc->pr_sctp_cnt = 0;
+ asoc->total_output_queue_size = 0;
+
+ if (m->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
+ struct in6pcb *inp6;
+
+ /* Its a V6 socket */
+ inp6 = (struct in6pcb *)m;
+ asoc->ipv6_addr_legal = 1;
+ /* Now look at the binding flag to see if V4 will be legal */
+ if (SCTP_IPV6_V6ONLY(inp6) == 0) {
+ asoc->ipv4_addr_legal = 1;
+ } else {
+ /* V4 addresses are NOT legal on the association */
+ asoc->ipv4_addr_legal = 0;
+ }
+ } else {
+ /* Its a V4 socket, no - V6 */
+ asoc->ipv4_addr_legal = 1;
+ asoc->ipv6_addr_legal = 0;
+ }
+
+ asoc->my_rwnd = max(SCTP_SB_LIMIT_RCV(m->sctp_socket), SCTP_MINIMAL_RWND);
+ asoc->peers_rwnd = SCTP_SB_LIMIT_RCV(m->sctp_socket);
+
+ asoc->smallest_mtu = m->sctp_frag_point;
+ asoc->minrto = m->sctp_ep.sctp_minrto;
+ asoc->maxrto = m->sctp_ep.sctp_maxrto;
+
+ asoc->locked_on_sending = NULL;
+ asoc->stream_locked_on = 0;
+ asoc->ecn_echo_cnt_onq = 0;
+ asoc->stream_locked = 0;
+
+ asoc->send_sack = 1;
+
+ LIST_INIT(&asoc->sctp_restricted_addrs);
+
+ TAILQ_INIT(&asoc->nets);
+ TAILQ_INIT(&asoc->pending_reply_queue);
+ TAILQ_INIT(&asoc->asconf_ack_sent);
+ /* Setup to fill the hb random cache at first HB */
+ asoc->hb_random_idx = 4;
+
+ asoc->sctp_autoclose_ticks = m->sctp_ep.auto_close_time;
+
+ /*
+ * JRS - Pick the default congestion control module based on the
+ * sysctl.
+ */
+ switch (m->sctp_ep.sctp_default_cc_module) {
+ /* JRS - Standard TCP congestion control */
+ case SCTP_CC_RFC2581:
+ {
+ stcb->asoc.congestion_control_module = SCTP_CC_RFC2581;
+ stcb->asoc.cc_functions.sctp_set_initial_cc_param = &sctp_set_initial_cc_param;
+ stcb->asoc.cc_functions.sctp_cwnd_update_after_sack = &sctp_cwnd_update_after_sack;
+ stcb->asoc.cc_functions.sctp_cwnd_update_after_fr = &sctp_cwnd_update_after_fr;
+ stcb->asoc.cc_functions.sctp_cwnd_update_after_timeout = &sctp_cwnd_update_after_timeout;
+ stcb->asoc.cc_functions.sctp_cwnd_update_after_ecn_echo = &sctp_cwnd_update_after_ecn_echo;
+ stcb->asoc.cc_functions.sctp_cwnd_update_after_packet_dropped = &sctp_cwnd_update_after_packet_dropped;
+ stcb->asoc.cc_functions.sctp_cwnd_update_after_output = &sctp_cwnd_update_after_output;
+ stcb->asoc.cc_functions.sctp_cwnd_update_after_fr_timer = &sctp_cwnd_update_after_fr_timer;
+ break;
+ }
+ /* JRS - High Speed TCP congestion control (Floyd) */
+ case SCTP_CC_HSTCP:
+ {
+ stcb->asoc.congestion_control_module = SCTP_CC_HSTCP;
+ stcb->asoc.cc_functions.sctp_set_initial_cc_param = &sctp_set_initial_cc_param;
+ stcb->asoc.cc_functions.sctp_cwnd_update_after_sack = &sctp_hs_cwnd_update_after_sack;
+ stcb->asoc.cc_functions.sctp_cwnd_update_after_fr = &sctp_hs_cwnd_update_after_fr;
+ stcb->asoc.cc_functions.sctp_cwnd_update_after_timeout = &sctp_cwnd_update_after_timeout;
+ stcb->asoc.cc_functions.sctp_cwnd_update_after_ecn_echo = &sctp_cwnd_update_after_ecn_echo;
+ stcb->asoc.cc_functions.sctp_cwnd_update_after_packet_dropped = &sctp_cwnd_update_after_packet_dropped;
+ stcb->asoc.cc_functions.sctp_cwnd_update_after_output = &sctp_cwnd_update_after_output;
+ stcb->asoc.cc_functions.sctp_cwnd_update_after_fr_timer = &sctp_cwnd_update_after_fr_timer;
+ break;
+ }
+ /* JRS - HTCP congestion control */
+ case SCTP_CC_HTCP:
+ {
+ stcb->asoc.congestion_control_module = SCTP_CC_HTCP;
+ stcb->asoc.cc_functions.sctp_set_initial_cc_param = &sctp_htcp_set_initial_cc_param;
+ stcb->asoc.cc_functions.sctp_cwnd_update_after_sack = &sctp_htcp_cwnd_update_after_sack;
+ stcb->asoc.cc_functions.sctp_cwnd_update_after_fr = &sctp_htcp_cwnd_update_after_fr;
+ stcb->asoc.cc_functions.sctp_cwnd_update_after_timeout = &sctp_htcp_cwnd_update_after_timeout;
+ stcb->asoc.cc_functions.sctp_cwnd_update_after_ecn_echo = &sctp_htcp_cwnd_update_after_ecn_echo;
+ stcb->asoc.cc_functions.sctp_cwnd_update_after_packet_dropped = &sctp_cwnd_update_after_packet_dropped;
+ stcb->asoc.cc_functions.sctp_cwnd_update_after_output = &sctp_cwnd_update_after_output;
+ stcb->asoc.cc_functions.sctp_cwnd_update_after_fr_timer = &sctp_htcp_cwnd_update_after_fr_timer;
+ break;
+ }
+ /* JRS - By default, use RFC2581 */
+ default:
+ {
+ stcb->asoc.congestion_control_module = SCTP_CC_RFC2581;
+ stcb->asoc.cc_functions.sctp_set_initial_cc_param = &sctp_set_initial_cc_param;
+ stcb->asoc.cc_functions.sctp_cwnd_update_after_sack = &sctp_cwnd_update_after_sack;
+ stcb->asoc.cc_functions.sctp_cwnd_update_after_fr = &sctp_cwnd_update_after_fr;
+ stcb->asoc.cc_functions.sctp_cwnd_update_after_timeout = &sctp_cwnd_update_after_timeout;
+ stcb->asoc.cc_functions.sctp_cwnd_update_after_ecn_echo = &sctp_cwnd_update_after_ecn_echo;
+ stcb->asoc.cc_functions.sctp_cwnd_update_after_packet_dropped = &sctp_cwnd_update_after_packet_dropped;
+ stcb->asoc.cc_functions.sctp_cwnd_update_after_output = &sctp_cwnd_update_after_output;
+ stcb->asoc.cc_functions.sctp_cwnd_update_after_fr_timer = &sctp_cwnd_update_after_fr_timer;
+ break;
+ }
+ }
+
+ /*
+ * Now the stream parameters, here we allocate space for all streams
+ * that we request by default.
+ */
+ asoc->strm_realoutsize = asoc->streamoutcnt = asoc->pre_open_streams =
+ m->sctp_ep.pre_open_stream_count;
+ SCTP_MALLOC(asoc->strmout, struct sctp_stream_out *,
+ asoc->streamoutcnt * sizeof(struct sctp_stream_out),
+ SCTP_M_STRMO);
+ if (asoc->strmout == NULL) {
+ /* big trouble no memory */
+ SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
+ return (ENOMEM);
+ }
+ for (i = 0; i < asoc->streamoutcnt; i++) {
+ /*
+ * inbound side must be set to 0xffff, also NOTE when we get
+ * the INIT-ACK back (for INIT sender) we MUST reduce the
+ * count (streamoutcnt) but first check if we sent to any of
+ * the upper streams that were dropped (if some were). Those
+ * that were dropped must be notified to the upper layer as
+ * failed to send.
+ */
+ asoc->strmout[i].next_sequence_sent = 0x0;
+ TAILQ_INIT(&asoc->strmout[i].outqueue);
+ asoc->strmout[i].stream_no = i;
+ asoc->strmout[i].last_msg_incomplete = 0;
+ asoc->strmout[i].next_spoke.tqe_next = 0;
+ asoc->strmout[i].next_spoke.tqe_prev = 0;
+ }
+ /* Now the mapping array */
+ asoc->mapping_array_size = SCTP_INITIAL_MAPPING_ARRAY;
+ SCTP_MALLOC(asoc->mapping_array, uint8_t *, asoc->mapping_array_size,
+ SCTP_M_MAP);
+ if (asoc->mapping_array == NULL) {
+ SCTP_FREE(asoc->strmout, SCTP_M_STRMO);
+ SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
+ return (ENOMEM);
+ }
+ memset(asoc->mapping_array, 0, asoc->mapping_array_size);
+ SCTP_MALLOC(asoc->nr_mapping_array, uint8_t *, asoc->mapping_array_size,
+ SCTP_M_MAP);
+ if (asoc->nr_mapping_array == NULL) {
+ SCTP_FREE(asoc->strmout, SCTP_M_STRMO);
+ SCTP_FREE(asoc->mapping_array, SCTP_M_MAP);
+ SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
+ return (ENOMEM);
+ }
+ memset(asoc->nr_mapping_array, 0, asoc->mapping_array_size);
+
+ /* Now the init of the other outqueues */
+ TAILQ_INIT(&asoc->free_chunks);
+ TAILQ_INIT(&asoc->out_wheel);
+ TAILQ_INIT(&asoc->control_send_queue);
+ TAILQ_INIT(&asoc->asconf_send_queue);
+ TAILQ_INIT(&asoc->send_queue);
+ TAILQ_INIT(&asoc->sent_queue);
+ TAILQ_INIT(&asoc->reasmqueue);
+ TAILQ_INIT(&asoc->resetHead);
+ asoc->max_inbound_streams = m->sctp_ep.max_open_streams_intome;
+ TAILQ_INIT(&asoc->asconf_queue);
+ /* authentication fields */
+ asoc->authinfo.random = NULL;
+ asoc->authinfo.active_keyid = 0;
+ asoc->authinfo.assoc_key = NULL;
+ asoc->authinfo.assoc_keyid = 0;
+ asoc->authinfo.recv_key = NULL;
+ asoc->authinfo.recv_keyid = 0;
+ LIST_INIT(&asoc->shared_keys);
+ asoc->marked_retrans = 0;
+ asoc->timoinit = 0;
+ asoc->timodata = 0;
+ asoc->timosack = 0;
+ asoc->timoshutdown = 0;
+ asoc->timoheartbeat = 0;
+ asoc->timocookie = 0;
+ asoc->timoshutdownack = 0;
+ (void)SCTP_GETTIME_TIMEVAL(&asoc->start_time);
+ asoc->discontinuity_time = asoc->start_time;
+ /*
+ * sa_ignore MEMLEAK {memory is put in the assoc mapping array and
+ * freed later when the association is freed.
+ */
+ return (0);
+}
+
+void
+sctp_print_mapping_array(struct sctp_association *asoc)
+{
+ unsigned int i, limit;
+
+ printf("Mapping array size: %d, baseTSN: %8.8x, cumAck: %8.8x, highestTSN: (%8.8x, %8.8x).\n",
+ asoc->mapping_array_size,
+ asoc->mapping_array_base_tsn,
+ asoc->cumulative_tsn,
+ asoc->highest_tsn_inside_map,
+ asoc->highest_tsn_inside_nr_map);
+ for (limit = asoc->mapping_array_size; limit > 1; limit--) {
+ if (asoc->mapping_array[limit - 1]) {
+ break;
+ }
+ }
+ printf("Renegable mapping array (last %d entries are zero):\n", asoc->mapping_array_size - limit);
+ for (i = 0; i < limit; i++) {
+ printf("%2.2x%c", asoc->mapping_array[i], ((i + 1) % 16) ? ' ' : '\n');
+ }
+ if (limit % 16)
+ printf("\n");
+ for (limit = asoc->mapping_array_size; limit > 1; limit--) {
+ if (asoc->nr_mapping_array[limit - 1]) {
+ break;
+ }
+ }
+ printf("Non renegable mapping array (last %d entries are zero):\n", asoc->mapping_array_size - limit);
+ for (i = 0; i < limit; i++) {
+ printf("%2.2x%c", asoc->nr_mapping_array[i], ((i + 1) % 16) ? ' ' : '\n');
+ }
+ if (limit % 16)
+ printf("\n");
+}
+
+int
+sctp_expand_mapping_array(struct sctp_association *asoc, uint32_t needed)
+{
+ /* mapping array needs to grow */
+ uint8_t *new_array1, *new_array2;
+ uint32_t new_size;
+
+ new_size = asoc->mapping_array_size + ((needed + 7) / 8 + SCTP_MAPPING_ARRAY_INCR);
+ SCTP_MALLOC(new_array1, uint8_t *, new_size, SCTP_M_MAP);
+ SCTP_MALLOC(new_array2, uint8_t *, new_size, SCTP_M_MAP);
+ if ((new_array1 == NULL) || (new_array2 == NULL)) {
+ /* can't get more, forget it */
+ SCTP_PRINTF("No memory for expansion of SCTP mapping array %d\n", new_size);
+ if (new_array1) {
+ SCTP_FREE(new_array1, SCTP_M_MAP);
+ }
+ if (new_array2) {
+ SCTP_FREE(new_array2, SCTP_M_MAP);
+ }
+ return (-1);
+ }
+ memset(new_array1, 0, new_size);
+ memset(new_array2, 0, new_size);
+ memcpy(new_array1, asoc->mapping_array, asoc->mapping_array_size);
+ memcpy(new_array2, asoc->nr_mapping_array, asoc->mapping_array_size);
+ SCTP_FREE(asoc->mapping_array, SCTP_M_MAP);
+ SCTP_FREE(asoc->nr_mapping_array, SCTP_M_MAP);
+ asoc->mapping_array = new_array1;
+ asoc->nr_mapping_array = new_array2;
+ asoc->mapping_array_size = new_size;
+ return (0);
+}
+
+
+static void
+sctp_iterator_work(struct sctp_iterator *it)
+{
+ int iteration_count = 0;
+ int inp_skip = 0;
+ int first_in = 1;
+ struct sctp_inpcb *tinp;
+
+ SCTP_INP_INFO_RLOCK();
+ SCTP_ITERATOR_LOCK();
+ if (it->inp) {
+ SCTP_INP_RLOCK(it->inp);
+ SCTP_INP_DECR_REF(it->inp);
+ }
+ if (it->inp == NULL) {
+ /* iterator is complete */
+done_with_iterator:
+ SCTP_ITERATOR_UNLOCK();
+ SCTP_INP_INFO_RUNLOCK();
+ if (it->function_atend != NULL) {
+ (*it->function_atend) (it->pointer, it->val);
+ }
+ SCTP_FREE(it, SCTP_M_ITER);
+ return;
+ }
+select_a_new_ep:
+ if (first_in) {
+ first_in = 0;
+ } else {
+ SCTP_INP_RLOCK(it->inp);
+ }
+ while (((it->pcb_flags) &&
+ ((it->inp->sctp_flags & it->pcb_flags) != it->pcb_flags)) ||
+ ((it->pcb_features) &&
+ ((it->inp->sctp_features & it->pcb_features) != it->pcb_features))) {
+ /* endpoint flags or features don't match, so keep looking */
+ if (it->iterator_flags & SCTP_ITERATOR_DO_SINGLE_INP) {
+ SCTP_INP_RUNLOCK(it->inp);
+ goto done_with_iterator;
+ }
+ tinp = it->inp;
+ it->inp = LIST_NEXT(it->inp, sctp_list);
+ SCTP_INP_RUNLOCK(tinp);
+ if (it->inp == NULL) {
+ goto done_with_iterator;
+ }
+ SCTP_INP_RLOCK(it->inp);
+ }
+ /* now go through each assoc which is in the desired state */
+ if (it->done_current_ep == 0) {
+ if (it->function_inp != NULL)
+ inp_skip = (*it->function_inp) (it->inp, it->pointer, it->val);
+ it->done_current_ep = 1;
+ }
+ if (it->stcb == NULL) {
+ /* run the per instance function */
+ it->stcb = LIST_FIRST(&it->inp->sctp_asoc_list);
+ }
+ if ((inp_skip) || it->stcb == NULL) {
+ if (it->function_inp_end != NULL) {
+ inp_skip = (*it->function_inp_end) (it->inp,
+ it->pointer,
+ it->val);
+ }
+ SCTP_INP_RUNLOCK(it->inp);
+ goto no_stcb;
+ }
+ while (it->stcb) {
+ SCTP_TCB_LOCK(it->stcb);
+ if (it->asoc_state && ((it->stcb->asoc.state & it->asoc_state) != it->asoc_state)) {
+ /* not in the right state... keep looking */
+ SCTP_TCB_UNLOCK(it->stcb);
+ goto next_assoc;
+ }
+ /* see if we have limited out the iterator loop */
+ iteration_count++;
+ if (iteration_count > SCTP_ITERATOR_MAX_AT_ONCE) {
+ /* Pause to let others grab the lock */
+ atomic_add_int(&it->stcb->asoc.refcnt, 1);
+ SCTP_TCB_UNLOCK(it->stcb);
+ SCTP_INP_INCR_REF(it->inp);
+ SCTP_INP_RUNLOCK(it->inp);
+ SCTP_ITERATOR_UNLOCK();
+ SCTP_INP_INFO_RUNLOCK();
+ SCTP_INP_INFO_RLOCK();
+ SCTP_ITERATOR_LOCK();
+ if (sctp_it_ctl.iterator_flags) {
+ /* We won't be staying here */
+ SCTP_INP_DECR_REF(it->inp);
+ atomic_add_int(&it->stcb->asoc.refcnt, -1);
+ if (sctp_it_ctl.iterator_flags &
+ SCTP_ITERATOR_MUST_EXIT) {
+ goto done_with_iterator;
+ }
+ if (sctp_it_ctl.iterator_flags &
+ SCTP_ITERATOR_STOP_CUR_IT) {
+ sctp_it_ctl.iterator_flags &= ~SCTP_ITERATOR_STOP_CUR_IT;
+ goto done_with_iterator;
+ }
+ if (sctp_it_ctl.iterator_flags &
+ SCTP_ITERATOR_STOP_CUR_INP) {
+ sctp_it_ctl.iterator_flags &= ~SCTP_ITERATOR_STOP_CUR_INP;
+ goto no_stcb;
+ }
+ /* If we reach here huh? */
+ printf("Unknown it ctl flag %x\n",
+ sctp_it_ctl.iterator_flags);
+ sctp_it_ctl.iterator_flags = 0;
+ }
+ SCTP_INP_RLOCK(it->inp);
+ SCTP_INP_DECR_REF(it->inp);
+ SCTP_TCB_LOCK(it->stcb);
+ atomic_add_int(&it->stcb->asoc.refcnt, -1);
+ iteration_count = 0;
+ }
+ /* run function on this one */
+ (*it->function_assoc) (it->inp, it->stcb, it->pointer, it->val);
+
+ /*
+ * we lie here, it really needs to have its own type but
+ * first I must verify that this won't effect things :-0
+ */
+ if (it->no_chunk_output == 0)
+ sctp_chunk_output(it->inp, it->stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED);
+
+ SCTP_TCB_UNLOCK(it->stcb);
+next_assoc:
+ it->stcb = LIST_NEXT(it->stcb, sctp_tcblist);
+ if (it->stcb == NULL) {
+ /* Run last function */
+ if (it->function_inp_end != NULL) {
+ inp_skip = (*it->function_inp_end) (it->inp,
+ it->pointer,
+ it->val);
+ }
+ }
+ }
+ SCTP_INP_RUNLOCK(it->inp);
+no_stcb:
+ /* done with all assocs on this endpoint, move on to next endpoint */
+ it->done_current_ep = 0;
+ if (it->iterator_flags & SCTP_ITERATOR_DO_SINGLE_INP) {
+ it->inp = NULL;
+ } else {
+ it->inp = LIST_NEXT(it->inp, sctp_list);
+ }
+ if (it->inp == NULL) {
+ goto done_with_iterator;
+ }
+ goto select_a_new_ep;
+}
+
+void
+sctp_iterator_worker(void)
+{
+ struct sctp_iterator *it = NULL;
+
+ /* This function is called with the WQ lock in place */
+
+ sctp_it_ctl.iterator_running = 1;
+ sctp_it_ctl.cur_it = it = TAILQ_FIRST(&sctp_it_ctl.iteratorhead);
+ while (it) {
+ /* now lets work on this one */
+ TAILQ_REMOVE(&sctp_it_ctl.iteratorhead, it, sctp_nxt_itr);
+ SCTP_IPI_ITERATOR_WQ_UNLOCK();
+ CURVNET_SET(it->vn);
+ sctp_iterator_work(it);
+
+ CURVNET_RESTORE();
+ SCTP_IPI_ITERATOR_WQ_LOCK();
+ if (sctp_it_ctl.iterator_flags & SCTP_ITERATOR_MUST_EXIT) {
+ sctp_it_ctl.cur_it = NULL;
+ break;
+ }
+ /* sa_ignore FREED_MEMORY */
+ sctp_it_ctl.cur_it = it = TAILQ_FIRST(&sctp_it_ctl.iteratorhead);
+ }
+ sctp_it_ctl.iterator_running = 0;
+ return;
+}
+
+
+static void
+sctp_handle_addr_wq(void)
+{
+ /* deal with the ADDR wq from the rtsock calls */
+ struct sctp_laddr *wi;
+ struct sctp_asconf_iterator *asc;
+
+ SCTP_MALLOC(asc, struct sctp_asconf_iterator *,
+ sizeof(struct sctp_asconf_iterator), SCTP_M_ASC_IT);
+ if (asc == NULL) {
+ /* Try later, no memory */
+ sctp_timer_start(SCTP_TIMER_TYPE_ADDR_WQ,
+ (struct sctp_inpcb *)NULL,
+ (struct sctp_tcb *)NULL,
+ (struct sctp_nets *)NULL);
+ return;
+ }
+ LIST_INIT(&asc->list_of_work);
+ asc->cnt = 0;
+
+ SCTP_WQ_ADDR_LOCK();
+ wi = LIST_FIRST(&SCTP_BASE_INFO(addr_wq));
+ while (wi != NULL) {
+ LIST_REMOVE(wi, sctp_nxt_addr);
+ LIST_INSERT_HEAD(&asc->list_of_work, wi, sctp_nxt_addr);
+ asc->cnt++;
+ wi = LIST_FIRST(&SCTP_BASE_INFO(addr_wq));
+ }
+ SCTP_WQ_ADDR_UNLOCK();
+
+ if (asc->cnt == 0) {
+ SCTP_FREE(asc, SCTP_M_ASC_IT);
+ } else {
+ (void)sctp_initiate_iterator(sctp_asconf_iterator_ep,
+ sctp_asconf_iterator_stcb,
+ NULL, /* No ep end for boundall */
+ SCTP_PCB_FLAGS_BOUNDALL,
+ SCTP_PCB_ANY_FEATURES,
+ SCTP_ASOC_ANY_STATE,
+ (void *)asc, 0,
+ sctp_asconf_iterator_end, NULL, 0);
+ }
+}
+
+int retcode = 0;
+int cur_oerr = 0;
+
+void
+sctp_timeout_handler(void *t)
+{
+ struct sctp_inpcb *inp;
+ struct sctp_tcb *stcb;
+ struct sctp_nets *net;
+ struct sctp_timer *tmr;
+
+#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
+ struct socket *so;
+
+#endif
+ int did_output, type;
+
+ tmr = (struct sctp_timer *)t;
+ inp = (struct sctp_inpcb *)tmr->ep;
+ stcb = (struct sctp_tcb *)tmr->tcb;
+ net = (struct sctp_nets *)tmr->net;
+ CURVNET_SET((struct vnet *)tmr->vnet);
+ did_output = 1;
+
+#ifdef SCTP_AUDITING_ENABLED
+ sctp_audit_log(0xF0, (uint8_t) tmr->type);
+ sctp_auditing(3, inp, stcb, net);
+#endif
+
+ /* sanity checks... */
+ if (tmr->self != (void *)tmr) {
+ /*
+ * SCTP_PRINTF("Stale SCTP timer fired (%p), ignoring...\n",
+ * tmr);
+ */
+ CURVNET_RESTORE();
+ return;
+ }
+ tmr->stopped_from = 0xa001;
+ if (!SCTP_IS_TIMER_TYPE_VALID(tmr->type)) {
+ /*
+ * SCTP_PRINTF("SCTP timer fired with invalid type: 0x%x\n",
+ * tmr->type);
+ */
+ CURVNET_RESTORE();
+ return;
+ }
+ tmr->stopped_from = 0xa002;
+ if ((tmr->type != SCTP_TIMER_TYPE_ADDR_WQ) && (inp == NULL)) {
+ CURVNET_RESTORE();
+ return;
+ }
+ /* if this is an iterator timeout, get the struct and clear inp */
+ tmr->stopped_from = 0xa003;
+ type = tmr->type;
+ if (inp) {
+ SCTP_INP_INCR_REF(inp);
+ if ((inp->sctp_socket == 0) &&
+ ((tmr->type != SCTP_TIMER_TYPE_INPKILL) &&
+ (tmr->type != SCTP_TIMER_TYPE_INIT) &&
+ (tmr->type != SCTP_TIMER_TYPE_SEND) &&
+ (tmr->type != SCTP_TIMER_TYPE_RECV) &&
+ (tmr->type != SCTP_TIMER_TYPE_HEARTBEAT) &&
+ (tmr->type != SCTP_TIMER_TYPE_SHUTDOWN) &&
+ (tmr->type != SCTP_TIMER_TYPE_SHUTDOWNACK) &&
+ (tmr->type != SCTP_TIMER_TYPE_SHUTDOWNGUARD) &&
+ (tmr->type != SCTP_TIMER_TYPE_ASOCKILL))
+ ) {
+ SCTP_INP_DECR_REF(inp);
+ CURVNET_RESTORE();
+ return;
+ }
+ }
+ tmr->stopped_from = 0xa004;
+ if (stcb) {
+ atomic_add_int(&stcb->asoc.refcnt, 1);
+ if (stcb->asoc.state == 0) {
+ atomic_add_int(&stcb->asoc.refcnt, -1);
+ if (inp) {
+ SCTP_INP_DECR_REF(inp);
+ }
+ CURVNET_RESTORE();
+ return;
+ }
+ }
+ tmr->stopped_from = 0xa005;
+ SCTPDBG(SCTP_DEBUG_TIMER1, "Timer type %d goes off\n", tmr->type);
+ if (!SCTP_OS_TIMER_ACTIVE(&tmr->timer)) {
+ if (inp) {
+ SCTP_INP_DECR_REF(inp);
+ }
+ if (stcb) {
+ atomic_add_int(&stcb->asoc.refcnt, -1);
+ }
+ CURVNET_RESTORE();
+ return;
+ }
+ tmr->stopped_from = 0xa006;
+
+ if (stcb) {
+ SCTP_TCB_LOCK(stcb);
+ atomic_add_int(&stcb->asoc.refcnt, -1);
+ if ((tmr->type != SCTP_TIMER_TYPE_ASOCKILL) &&
+ ((stcb->asoc.state == 0) ||
+ (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED))) {
+ SCTP_TCB_UNLOCK(stcb);
+ if (inp) {
+ SCTP_INP_DECR_REF(inp);
+ }
+ CURVNET_RESTORE();
+ return;
+ }
+ }
+ /* record in stopped what t-o occured */
+ tmr->stopped_from = tmr->type;
+
+ /* mark as being serviced now */
+ if (SCTP_OS_TIMER_PENDING(&tmr->timer)) {
+ /*
+ * Callout has been rescheduled.
+ */
+ goto get_out;
+ }
+ if (!SCTP_OS_TIMER_ACTIVE(&tmr->timer)) {
+ /*
+ * Not active, so no action.
+ */
+ goto get_out;
+ }
+ SCTP_OS_TIMER_DEACTIVATE(&tmr->timer);
+
+ /* call the handler for the appropriate timer type */
+ switch (tmr->type) {
+ case SCTP_TIMER_TYPE_ZERO_COPY:
+ if (inp == NULL) {
+ break;
+ }
+ if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_ZERO_COPY_ACTIVE)) {
+ SCTP_ZERO_COPY_EVENT(inp, inp->sctp_socket);
+ }
+ break;
+ case SCTP_TIMER_TYPE_ZCOPY_SENDQ:
+ if (inp == NULL) {
+ break;
+ }
+ if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_ZERO_COPY_ACTIVE)) {
+ SCTP_ZERO_COPY_SENDQ_EVENT(inp, inp->sctp_socket);
+ }
+ break;
+ case SCTP_TIMER_TYPE_ADDR_WQ:
+ sctp_handle_addr_wq();
+ break;
+ case SCTP_TIMER_TYPE_SEND:
+ if ((stcb == NULL) || (inp == NULL)) {
+ break;
+ }
+ SCTP_STAT_INCR(sctps_timodata);
+ stcb->asoc.timodata++;
+ stcb->asoc.num_send_timers_up--;
+ if (stcb->asoc.num_send_timers_up < 0) {
+ stcb->asoc.num_send_timers_up = 0;
+ }
+ SCTP_TCB_LOCK_ASSERT(stcb);
+ cur_oerr = stcb->asoc.overall_error_count;
+ retcode = sctp_t3rxt_timer(inp, stcb, net);
+ if (retcode) {
+ /* no need to unlock on tcb its gone */
+
+ goto out_decr;
+ }
+ SCTP_TCB_LOCK_ASSERT(stcb);
+#ifdef SCTP_AUDITING_ENABLED
+ sctp_auditing(4, inp, stcb, net);
+#endif
+ sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED);
+ if ((stcb->asoc.num_send_timers_up == 0) &&
+ (stcb->asoc.sent_queue_cnt > 0)
+ ) {
+ struct sctp_tmit_chunk *chk;
+
+ /*
+ * safeguard. If there on some on the sent queue
+ * somewhere but no timers running something is
+ * wrong... so we start a timer on the first chunk
+ * on the send queue on whatever net it is sent to.
+ */
+ chk = TAILQ_FIRST(&stcb->asoc.sent_queue);
+ sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb,
+ chk->whoTo);
+ }
+ break;
+ case SCTP_TIMER_TYPE_INIT:
+ if ((stcb == NULL) || (inp == NULL)) {
+ break;
+ }
+ SCTP_STAT_INCR(sctps_timoinit);
+ stcb->asoc.timoinit++;
+ if (sctp_t1init_timer(inp, stcb, net)) {
+ /* no need to unlock on tcb its gone */
+ goto out_decr;
+ }
+ /* We do output but not here */
+ did_output = 0;
+ break;
+ case SCTP_TIMER_TYPE_RECV:
+ if ((stcb == NULL) || (inp == NULL)) {
+ break;
+ } {
+ SCTP_STAT_INCR(sctps_timosack);
+ stcb->asoc.timosack++;
+ sctp_send_sack(stcb);
+ }
+#ifdef SCTP_AUDITING_ENABLED
+ sctp_auditing(4, inp, stcb, net);
+#endif
+ sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SACK_TMR, SCTP_SO_NOT_LOCKED);
+ break;
+ case SCTP_TIMER_TYPE_SHUTDOWN:
+ if ((stcb == NULL) || (inp == NULL)) {
+ break;
+ }
+ if (sctp_shutdown_timer(inp, stcb, net)) {
+ /* no need to unlock on tcb its gone */
+ goto out_decr;
+ }
+ SCTP_STAT_INCR(sctps_timoshutdown);
+ stcb->asoc.timoshutdown++;
+#ifdef SCTP_AUDITING_ENABLED
+ sctp_auditing(4, inp, stcb, net);
+#endif
+ sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SHUT_TMR, SCTP_SO_NOT_LOCKED);
+ break;
+ case SCTP_TIMER_TYPE_HEARTBEAT:
+ {
+ struct sctp_nets *lnet;
+ int cnt_of_unconf = 0;
+
+ if ((stcb == NULL) || (inp == NULL)) {
+ break;
+ }
+ SCTP_STAT_INCR(sctps_timoheartbeat);
+ stcb->asoc.timoheartbeat++;
+ TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) {
+ if ((lnet->dest_state & SCTP_ADDR_UNCONFIRMED) &&
+ (lnet->dest_state & SCTP_ADDR_REACHABLE)) {
+ cnt_of_unconf++;
+ }
+ }
+ if (cnt_of_unconf == 0) {
+ if (sctp_heartbeat_timer(inp, stcb, lnet,
+ cnt_of_unconf)) {
+ /* no need to unlock on tcb its gone */
+ goto out_decr;
+ }
+ }
+#ifdef SCTP_AUDITING_ENABLED
+ sctp_auditing(4, inp, stcb, lnet);
+#endif
+ sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT,
+ stcb->sctp_ep, stcb, lnet);
+ sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_HB_TMR, SCTP_SO_NOT_LOCKED);
+ }
+ break;
+ case SCTP_TIMER_TYPE_COOKIE:
+ if ((stcb == NULL) || (inp == NULL)) {
+ break;
+ }
+ if (sctp_cookie_timer(inp, stcb, net)) {
+ /* no need to unlock on tcb its gone */
+ goto out_decr;
+ }
+ SCTP_STAT_INCR(sctps_timocookie);
+ stcb->asoc.timocookie++;
+#ifdef SCTP_AUDITING_ENABLED
+ sctp_auditing(4, inp, stcb, net);
+#endif
+ /*
+ * We consider T3 and Cookie timer pretty much the same with
+ * respect to where from in chunk_output.
+ */
+ sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED);
+ break;
+ case SCTP_TIMER_TYPE_NEWCOOKIE:
+ {
+ struct timeval tv;
+ int i, secret;
+
+ if (inp == NULL) {
+ break;
+ }
+ SCTP_STAT_INCR(sctps_timosecret);
+ (void)SCTP_GETTIME_TIMEVAL(&tv);
+ SCTP_INP_WLOCK(inp);
+ inp->sctp_ep.time_of_secret_change = tv.tv_sec;
+ inp->sctp_ep.last_secret_number =
+ inp->sctp_ep.current_secret_number;
+ inp->sctp_ep.current_secret_number++;
+ if (inp->sctp_ep.current_secret_number >=
+ SCTP_HOW_MANY_SECRETS) {
+ inp->sctp_ep.current_secret_number = 0;
+ }
+ secret = (int)inp->sctp_ep.current_secret_number;
+ for (i = 0; i < SCTP_NUMBER_OF_SECRETS; i++) {
+ inp->sctp_ep.secret_key[secret][i] =
+ sctp_select_initial_TSN(&inp->sctp_ep);
+ }
+ SCTP_INP_WUNLOCK(inp);
+ sctp_timer_start(SCTP_TIMER_TYPE_NEWCOOKIE, inp, stcb, net);
+ }
+ did_output = 0;
+ break;
+ case SCTP_TIMER_TYPE_PATHMTURAISE:
+ if ((stcb == NULL) || (inp == NULL)) {
+ break;
+ }
+ SCTP_STAT_INCR(sctps_timopathmtu);
+ sctp_pathmtu_timer(inp, stcb, net);
+ did_output = 0;
+ break;
+ case SCTP_TIMER_TYPE_SHUTDOWNACK:
+ if ((stcb == NULL) || (inp == NULL)) {
+ break;
+ }
+ if (sctp_shutdownack_timer(inp, stcb, net)) {
+ /* no need to unlock on tcb its gone */
+ goto out_decr;
+ }
+ SCTP_STAT_INCR(sctps_timoshutdownack);
+ stcb->asoc.timoshutdownack++;
+#ifdef SCTP_AUDITING_ENABLED
+ sctp_auditing(4, inp, stcb, net);
+#endif
+ sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SHUT_ACK_TMR, SCTP_SO_NOT_LOCKED);
+ break;
+ case SCTP_TIMER_TYPE_SHUTDOWNGUARD:
+ if ((stcb == NULL) || (inp == NULL)) {
+ break;
+ }
+ SCTP_STAT_INCR(sctps_timoshutdownguard);
+ sctp_abort_an_association(inp, stcb,
+ SCTP_SHUTDOWN_GUARD_EXPIRES, NULL, SCTP_SO_NOT_LOCKED);
+ /* no need to unlock on tcb its gone */
+ goto out_decr;
+
+ case SCTP_TIMER_TYPE_STRRESET:
+ if ((stcb == NULL) || (inp == NULL)) {
+ break;
+ }
+ if (sctp_strreset_timer(inp, stcb, net)) {
+ /* no need to unlock on tcb its gone */
+ goto out_decr;
+ }
+ SCTP_STAT_INCR(sctps_timostrmrst);
+ sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_STRRST_TMR, SCTP_SO_NOT_LOCKED);
+ break;
+ case SCTP_TIMER_TYPE_EARLYFR:
+ /* Need to do FR of things for net */
+ if ((stcb == NULL) || (inp == NULL)) {
+ break;
+ }
+ SCTP_STAT_INCR(sctps_timoearlyfr);
+ sctp_early_fr_timer(inp, stcb, net);
+ break;
+ case SCTP_TIMER_TYPE_ASCONF:
+ if ((stcb == NULL) || (inp == NULL)) {
+ break;
+ }
+ if (sctp_asconf_timer(inp, stcb, net)) {
+ /* no need to unlock on tcb its gone */
+ goto out_decr;
+ }
+ SCTP_STAT_INCR(sctps_timoasconf);
+#ifdef SCTP_AUDITING_ENABLED
+ sctp_auditing(4, inp, stcb, net);
+#endif
+ sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_ASCONF_TMR, SCTP_SO_NOT_LOCKED);
+ break;
+ case SCTP_TIMER_TYPE_PRIM_DELETED:
+ if ((stcb == NULL) || (inp == NULL)) {
+ break;
+ }
+ sctp_delete_prim_timer(inp, stcb, net);
+ SCTP_STAT_INCR(sctps_timodelprim);
+ break;
+
+ case SCTP_TIMER_TYPE_AUTOCLOSE:
+ if ((stcb == NULL) || (inp == NULL)) {
+ break;
+ }
+ SCTP_STAT_INCR(sctps_timoautoclose);
+ sctp_autoclose_timer(inp, stcb, net);
+ sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_AUTOCLOSE_TMR, SCTP_SO_NOT_LOCKED);
+ did_output = 0;
+ break;
+ case SCTP_TIMER_TYPE_ASOCKILL:
+ if ((stcb == NULL) || (inp == NULL)) {
+ break;
+ }
+ SCTP_STAT_INCR(sctps_timoassockill);
+ /* Can we free it yet? */
+ SCTP_INP_DECR_REF(inp);
+ sctp_timer_stop(SCTP_TIMER_TYPE_ASOCKILL, inp, stcb, NULL, SCTP_FROM_SCTPUTIL + SCTP_LOC_1);
+#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
+ so = SCTP_INP_SO(inp);
+ atomic_add_int(&stcb->asoc.refcnt, 1);
+ SCTP_TCB_UNLOCK(stcb);
+ SCTP_SOCKET_LOCK(so, 1);
+ SCTP_TCB_LOCK(stcb);
+ atomic_subtract_int(&stcb->asoc.refcnt, 1);
+#endif
+ (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTPUTIL + SCTP_LOC_2);
+#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
+ SCTP_SOCKET_UNLOCK(so, 1);
+#endif
+ /*
+ * free asoc, always unlocks (or destroy's) so prevent
+ * duplicate unlock or unlock of a free mtx :-0
+ */
+ stcb = NULL;
+ goto out_no_decr;
+ case SCTP_TIMER_TYPE_INPKILL:
+ SCTP_STAT_INCR(sctps_timoinpkill);
+ if (inp == NULL) {
+ break;
+ }
+ /*
+ * special case, take away our increment since WE are the
+ * killer
+ */
+ SCTP_INP_DECR_REF(inp);
+ sctp_timer_stop(SCTP_TIMER_TYPE_INPKILL, inp, NULL, NULL, SCTP_FROM_SCTPUTIL + SCTP_LOC_3);
+ sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT,
+ SCTP_CALLED_FROM_INPKILL_TIMER);
+ inp = NULL;
+ goto out_no_decr;
+ default:
+ SCTPDBG(SCTP_DEBUG_TIMER1, "sctp_timeout_handler:unknown timer %d\n",
+ tmr->type);
+ break;
+ };
+#ifdef SCTP_AUDITING_ENABLED
+ sctp_audit_log(0xF1, (uint8_t) tmr->type);
+ if (inp)
+ sctp_auditing(5, inp, stcb, net);
+#endif
+ if ((did_output) && stcb) {
+ /*
+ * Now we need to clean up the control chunk chain if an
+ * ECNE is on it. It must be marked as UNSENT again so next
+ * call will continue to send it until such time that we get
+ * a CWR, to remove it. It is, however, less likely that we
+ * will find a ecn echo on the chain though.
+ */
+ sctp_fix_ecn_echo(&stcb->asoc);
+ }
+get_out:
+ if (stcb) {
+ SCTP_TCB_UNLOCK(stcb);
+ }
+out_decr:
+ if (inp) {
+ SCTP_INP_DECR_REF(inp);
+ }
+out_no_decr:
+ SCTPDBG(SCTP_DEBUG_TIMER1, "Timer now complete (type %d)\n",
+ type);
+ CURVNET_RESTORE();
+}
+
+void
+sctp_timer_start(int t_type, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
+ struct sctp_nets *net)
+{
+ int to_ticks;
+ struct sctp_timer *tmr;
+
+ if ((t_type != SCTP_TIMER_TYPE_ADDR_WQ) && (inp == NULL))
+ return;
+
+ to_ticks = 0;
+
+ tmr = NULL;
+ if (stcb) {
+ SCTP_TCB_LOCK_ASSERT(stcb);
+ }
+ switch (t_type) {
+ case SCTP_TIMER_TYPE_ZERO_COPY:
+ tmr = &inp->sctp_ep.zero_copy_timer;
+ to_ticks = SCTP_ZERO_COPY_TICK_DELAY;
+ break;
+ case SCTP_TIMER_TYPE_ZCOPY_SENDQ:
+ tmr = &inp->sctp_ep.zero_copy_sendq_timer;
+ to_ticks = SCTP_ZERO_COPY_SENDQ_TICK_DELAY;
+ break;
+ case SCTP_TIMER_TYPE_ADDR_WQ:
+ /* Only 1 tick away :-) */
+ tmr = &SCTP_BASE_INFO(addr_wq_timer);
+ to_ticks = SCTP_ADDRESS_TICK_DELAY;
+ break;
+ case SCTP_TIMER_TYPE_SEND:
+ /* Here we use the RTO timer */
+ {
+ int rto_val;
+
+ if ((stcb == NULL) || (net == NULL)) {
+ return;
+ }
+ tmr = &net->rxt_timer;
+ if (net->RTO == 0) {
+ rto_val = stcb->asoc.initial_rto;
+ } else {
+ rto_val = net->RTO;
+ }
+ to_ticks = MSEC_TO_TICKS(rto_val);
+ }
+ break;
+ case SCTP_TIMER_TYPE_INIT:
+ /*
+ * Here we use the INIT timer default usually about 1
+ * minute.
+ */
+ if ((stcb == NULL) || (net == NULL)) {
+ return;
+ }
+ tmr = &net->rxt_timer;
+ if (net->RTO == 0) {
+ to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
+ } else {
+ to_ticks = MSEC_TO_TICKS(net->RTO);
+ }
+ break;
+ case SCTP_TIMER_TYPE_RECV:
+ /*
+ * Here we use the Delayed-Ack timer value from the inp
+ * ususually about 200ms.
+ */
+ if (stcb == NULL) {
+ return;
+ }
+ tmr = &stcb->asoc.dack_timer;
+ to_ticks = MSEC_TO_TICKS(stcb->asoc.delayed_ack);
+ break;
+ case SCTP_TIMER_TYPE_SHUTDOWN:
+ /* Here we use the RTO of the destination. */
+ if ((stcb == NULL) || (net == NULL)) {
+ return;
+ }
+ if (net->RTO == 0) {
+ to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
+ } else {
+ to_ticks = MSEC_TO_TICKS(net->RTO);
+ }
+ tmr = &net->rxt_timer;
+ break;
+ case SCTP_TIMER_TYPE_HEARTBEAT:
+ /*
+ * the net is used here so that we can add in the RTO. Even
+ * though we use a different timer. We also add the HB timer
+ * PLUS a random jitter.
+ */
+ if ((inp == NULL) || (stcb == NULL)) {
+ return;
+ } else {
+ uint32_t rndval;
+ uint8_t this_random;
+ int cnt_of_unconf = 0;
+ struct sctp_nets *lnet;
+
+ TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) {
+ if ((lnet->dest_state & SCTP_ADDR_UNCONFIRMED) &&
+ (lnet->dest_state & SCTP_ADDR_REACHABLE)) {
+ cnt_of_unconf++;
+ }
+ }
+ if (cnt_of_unconf) {
+ net = lnet = NULL;
+ (void)sctp_heartbeat_timer(inp, stcb, lnet, cnt_of_unconf);
+ }
+ if (stcb->asoc.hb_random_idx > 3) {
+ rndval = sctp_select_initial_TSN(&inp->sctp_ep);
+ memcpy(stcb->asoc.hb_random_values, &rndval,
+ sizeof(stcb->asoc.hb_random_values));
+ stcb->asoc.hb_random_idx = 0;
+ }
+ this_random = stcb->asoc.hb_random_values[stcb->asoc.hb_random_idx];
+ stcb->asoc.hb_random_idx++;
+ stcb->asoc.hb_ect_randombit = 0;
+ /*
+ * this_random will be 0 - 256 ms RTO is in ms.
+ */
+ if ((stcb->asoc.hb_is_disabled) &&
+ (cnt_of_unconf == 0)) {
+ return;
+ }
+ if (net) {
+ int delay;
+
+ delay = stcb->asoc.heart_beat_delay;
+ TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) {
+ if ((lnet->dest_state & SCTP_ADDR_UNCONFIRMED) &&
+ ((lnet->dest_state & SCTP_ADDR_OUT_OF_SCOPE) == 0) &&
+ (lnet->dest_state & SCTP_ADDR_REACHABLE)) {
+ delay = 0;
+ }
+ }
+ if (net->RTO == 0) {
+ /* Never been checked */
+ to_ticks = this_random + stcb->asoc.initial_rto + delay;
+ } else {
+ /* set rto_val to the ms */
+ to_ticks = delay + net->RTO + this_random;
+ }
+ } else {
+ if (cnt_of_unconf) {
+ to_ticks = this_random + stcb->asoc.initial_rto;
+ } else {
+ to_ticks = stcb->asoc.heart_beat_delay + this_random + stcb->asoc.initial_rto;
+ }
+ }
+ /*
+ * Now we must convert the to_ticks that are now in
+ * ms to ticks.
+ */
+ to_ticks = MSEC_TO_TICKS(to_ticks);
+ tmr = &stcb->asoc.hb_timer;
+ }
+ break;
+ case SCTP_TIMER_TYPE_COOKIE:
+ /*
+ * Here we can use the RTO timer from the network since one
+ * RTT was compelete. If a retran happened then we will be
+ * using the RTO initial value.
+ */
+ if ((stcb == NULL) || (net == NULL)) {
+ return;
+ }
+ if (net->RTO == 0) {
+ to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
+ } else {
+ to_ticks = MSEC_TO_TICKS(net->RTO);
+ }
+ tmr = &net->rxt_timer;
+ break;
+ case SCTP_TIMER_TYPE_NEWCOOKIE:
+ /*
+ * nothing needed but the endpoint here ususually about 60
+ * minutes.
+ */
+ if (inp == NULL) {
+ return;
+ }
+ tmr = &inp->sctp_ep.signature_change;
+ to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_SIGNATURE];
+ break;
+ case SCTP_TIMER_TYPE_ASOCKILL:
+ if (stcb == NULL) {
+ return;
+ }
+ tmr = &stcb->asoc.strreset_timer;
+ to_ticks = MSEC_TO_TICKS(SCTP_ASOC_KILL_TIMEOUT);
+ break;
+ case SCTP_TIMER_TYPE_INPKILL:
+ /*
+ * The inp is setup to die. We re-use the signature_chage
+ * timer since that has stopped and we are in the GONE
+ * state.
+ */
+ if (inp == NULL) {
+ return;
+ }
+ tmr = &inp->sctp_ep.signature_change;
+ to_ticks = MSEC_TO_TICKS(SCTP_INP_KILL_TIMEOUT);
+ break;
+ case SCTP_TIMER_TYPE_PATHMTURAISE:
+ /*
+ * Here we use the value found in the EP for PMTU ususually
+ * about 10 minutes.
+ */
+ if ((stcb == NULL) || (inp == NULL)) {
+ return;
+ }
+ if (net == NULL) {
+ return;
+ }
+ to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_PMTU];
+ tmr = &net->pmtu_timer;
+ break;
+ case SCTP_TIMER_TYPE_SHUTDOWNACK:
+ /* Here we use the RTO of the destination */
+ if ((stcb == NULL) || (net == NULL)) {
+ return;
+ }
+ if (net->RTO == 0) {
+ to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
+ } else {
+ to_ticks = MSEC_TO_TICKS(net->RTO);
+ }
+ tmr = &net->rxt_timer;
+ break;
+ case SCTP_TIMER_TYPE_SHUTDOWNGUARD:
+ /*
+ * Here we use the endpoints shutdown guard timer usually
+ * about 3 minutes.
+ */
+ if ((inp == NULL) || (stcb == NULL)) {
+ return;
+ }
+ to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_MAXSHUTDOWN];
+ tmr = &stcb->asoc.shut_guard_timer;
+ break;
+ case SCTP_TIMER_TYPE_STRRESET:
+ /*
+ * Here the timer comes from the stcb but its value is from
+ * the net's RTO.
+ */
+ if ((stcb == NULL) || (net == NULL)) {
+ return;
+ }
+ if (net->RTO == 0) {
+ to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
+ } else {
+ to_ticks = MSEC_TO_TICKS(net->RTO);
+ }
+ tmr = &stcb->asoc.strreset_timer;
+ break;
+
+ case SCTP_TIMER_TYPE_EARLYFR:
+ {
+ unsigned int msec;
+
+ if ((stcb == NULL) || (net == NULL)) {
+ return;
+ }
+ if (net->flight_size > net->cwnd) {
+ /* no need to start */
+ return;
+ }
+ SCTP_STAT_INCR(sctps_earlyfrstart);
+ if (net->lastsa == 0) {
+ /* Hmm no rtt estimate yet? */
+ msec = stcb->asoc.initial_rto >> 2;
+ } else {
+ msec = ((net->lastsa >> 2) + net->lastsv) >> 1;
+ }
+ if (msec < SCTP_BASE_SYSCTL(sctp_early_fr_msec)) {
+ msec = SCTP_BASE_SYSCTL(sctp_early_fr_msec);
+ if (msec < SCTP_MINFR_MSEC_FLOOR) {
+ msec = SCTP_MINFR_MSEC_FLOOR;
+ }
+ }
+ to_ticks = MSEC_TO_TICKS(msec);
+ tmr = &net->fr_timer;
+ }
+ break;
+ case SCTP_TIMER_TYPE_ASCONF:
+ /*
+ * Here the timer comes from the stcb but its value is from
+ * the net's RTO.
+ */
+ if ((stcb == NULL) || (net == NULL)) {
+ return;
+ }
+ if (net->RTO == 0) {
+ to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
+ } else {
+ to_ticks = MSEC_TO_TICKS(net->RTO);
+ }
+ tmr = &stcb->asoc.asconf_timer;
+ break;
+ case SCTP_TIMER_TYPE_PRIM_DELETED:
+ if ((stcb == NULL) || (net != NULL)) {
+ return;
+ }
+ to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
+ tmr = &stcb->asoc.delete_prim_timer;
+ break;
+ case SCTP_TIMER_TYPE_AUTOCLOSE:
+ if (stcb == NULL) {
+ return;
+ }
+ if (stcb->asoc.sctp_autoclose_ticks == 0) {
+ /*
+ * Really an error since stcb is NOT set to
+ * autoclose
+ */
+ return;
+ }
+ to_ticks = stcb->asoc.sctp_autoclose_ticks;
+ tmr = &stcb->asoc.autoclose_timer;
+ break;
+ default:
+ SCTPDBG(SCTP_DEBUG_TIMER1, "%s: Unknown timer type %d\n",
+ __FUNCTION__, t_type);
+ return;
+ break;
+ };
+ if ((to_ticks <= 0) || (tmr == NULL)) {
+ SCTPDBG(SCTP_DEBUG_TIMER1, "%s: %d:software error to_ticks:%d tmr:%p not set ??\n",
+ __FUNCTION__, t_type, to_ticks, tmr);
+ return;
+ }
+ if (SCTP_OS_TIMER_PENDING(&tmr->timer)) {
+ /*
+ * we do NOT allow you to have it already running. if it is
+ * we leave the current one up unchanged
+ */
+ return;
+ }
+ /* At this point we can proceed */
+ if (t_type == SCTP_TIMER_TYPE_SEND) {
+ stcb->asoc.num_send_timers_up++;
+ }
+ tmr->stopped_from = 0;
+ tmr->type = t_type;
+ tmr->ep = (void *)inp;
+ tmr->tcb = (void *)stcb;
+ tmr->net = (void *)net;
+ tmr->self = (void *)tmr;
+ tmr->vnet = (void *)curvnet;
+ tmr->ticks = sctp_get_tick_count();
+ (void)SCTP_OS_TIMER_START(&tmr->timer, to_ticks, sctp_timeout_handler, tmr);
+ return;
+}
+
+void
+sctp_timer_stop(int t_type, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
+ struct sctp_nets *net, uint32_t from)
+{
+ struct sctp_timer *tmr;
+
+ if ((t_type != SCTP_TIMER_TYPE_ADDR_WQ) &&
+ (inp == NULL))
+ return;
+
+ tmr = NULL;
+ if (stcb) {
+ SCTP_TCB_LOCK_ASSERT(stcb);
+ }
+ switch (t_type) {
+ case SCTP_TIMER_TYPE_ZERO_COPY:
+ tmr = &inp->sctp_ep.zero_copy_timer;
+ break;
+ case SCTP_TIMER_TYPE_ZCOPY_SENDQ:
+ tmr = &inp->sctp_ep.zero_copy_sendq_timer;
+ break;
+ case SCTP_TIMER_TYPE_ADDR_WQ:
+ tmr = &SCTP_BASE_INFO(addr_wq_timer);
+ break;
+ case SCTP_TIMER_TYPE_EARLYFR:
+ if ((stcb == NULL) || (net == NULL)) {
+ return;
+ }
+ tmr = &net->fr_timer;
+ SCTP_STAT_INCR(sctps_earlyfrstop);
+ break;
+ case SCTP_TIMER_TYPE_SEND:
+ if ((stcb == NULL) || (net == NULL)) {
+ return;
+ }
+ tmr = &net->rxt_timer;
+ break;
+ case SCTP_TIMER_TYPE_INIT:
+ if ((stcb == NULL) || (net == NULL)) {
+ return;
+ }
+ tmr = &net->rxt_timer;
+ break;
+ case SCTP_TIMER_TYPE_RECV:
+ if (stcb == NULL) {
+ return;
+ }
+ tmr = &stcb->asoc.dack_timer;
+ break;
+ case SCTP_TIMER_TYPE_SHUTDOWN:
+ if ((stcb == NULL) || (net == NULL)) {
+ return;
+ }
+ tmr = &net->rxt_timer;
+ break;
+ case SCTP_TIMER_TYPE_HEARTBEAT:
+ if (stcb == NULL) {
+ return;
+ }
+ tmr = &stcb->asoc.hb_timer;
+ break;
+ case SCTP_TIMER_TYPE_COOKIE:
+ if ((stcb == NULL) || (net == NULL)) {
+ return;
+ }
+ tmr = &net->rxt_timer;
+ break;
+ case SCTP_TIMER_TYPE_NEWCOOKIE:
+ /* nothing needed but the endpoint here */
+ tmr = &inp->sctp_ep.signature_change;
+ /*
+ * We re-use the newcookie timer for the INP kill timer. We
+ * must assure that we do not kill it by accident.
+ */
+ break;
+ case SCTP_TIMER_TYPE_ASOCKILL:
+ /*
+ * Stop the asoc kill timer.
+ */
+ if (stcb == NULL) {
+ return;
+ }
+ tmr = &stcb->asoc.strreset_timer;
+ break;
+
+ case SCTP_TIMER_TYPE_INPKILL:
+ /*
+ * The inp is setup to die. We re-use the signature_chage
+ * timer since that has stopped and we are in the GONE
+ * state.
+ */
+ tmr = &inp->sctp_ep.signature_change;
+ break;
+ case SCTP_TIMER_TYPE_PATHMTURAISE:
+ if ((stcb == NULL) || (net == NULL)) {
+ return;
+ }
+ tmr = &net->pmtu_timer;
+ break;
+ case SCTP_TIMER_TYPE_SHUTDOWNACK:
+ if ((stcb == NULL) || (net == NULL)) {
+ return;
+ }
+ tmr = &net->rxt_timer;
+ break;
+ case SCTP_TIMER_TYPE_SHUTDOWNGUARD:
+ if (stcb == NULL) {
+ return;
+ }
+ tmr = &stcb->asoc.shut_guard_timer;
+ break;
+ case SCTP_TIMER_TYPE_STRRESET:
+ if (stcb == NULL) {
+ return;
+ }
+ tmr = &stcb->asoc.strreset_timer;
+ break;
+ case SCTP_TIMER_TYPE_ASCONF:
+ if (stcb == NULL) {
+ return;
+ }
+ tmr = &stcb->asoc.asconf_timer;
+ break;
+ case SCTP_TIMER_TYPE_PRIM_DELETED:
+ if (stcb == NULL) {
+ return;
+ }
+ tmr = &stcb->asoc.delete_prim_timer;
+ break;
+ case SCTP_TIMER_TYPE_AUTOCLOSE:
+ if (stcb == NULL) {
+ return;
+ }
+ tmr = &stcb->asoc.autoclose_timer;
+ break;
+ default:
+ SCTPDBG(SCTP_DEBUG_TIMER1, "%s: Unknown timer type %d\n",
+ __FUNCTION__, t_type);
+ break;
+ };
+ if (tmr == NULL) {
+ return;
+ }
+ if ((tmr->type != t_type) && tmr->type) {
+ /*
+ * Ok we have a timer that is under joint use. Cookie timer
+ * per chance with the SEND timer. We therefore are NOT
+ * running the timer that the caller wants stopped. So just
+ * return.
+ */
+ return;
+ }
+ if ((t_type == SCTP_TIMER_TYPE_SEND) && (stcb != NULL)) {
+ stcb->asoc.num_send_timers_up--;
+ if (stcb->asoc.num_send_timers_up < 0) {
+ stcb->asoc.num_send_timers_up = 0;
+ }
+ }
+ tmr->self = NULL;
+ tmr->stopped_from = from;
+ (void)SCTP_OS_TIMER_STOP(&tmr->timer);
+ return;
+}
+
+uint32_t
+sctp_calculate_len(struct mbuf *m)
+{
+ uint32_t tlen = 0;
+ struct mbuf *at;
+
+ at = m;
+ while (at) {
+ tlen += SCTP_BUF_LEN(at);
+ at = SCTP_BUF_NEXT(at);
+ }
+ return (tlen);
+}
+
+void
+sctp_mtu_size_reset(struct sctp_inpcb *inp,
+ struct sctp_association *asoc, uint32_t mtu)
+{
+ /*
+ * Reset the P-MTU size on this association, this involves changing
+ * the asoc MTU, going through ANY chunk+overhead larger than mtu to
+ * allow the DF flag to be cleared.
+ */
+ struct sctp_tmit_chunk *chk;
+ unsigned int eff_mtu, ovh;
+
+ asoc->smallest_mtu = mtu;
+ if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
+ ovh = SCTP_MIN_OVERHEAD;
+ } else {
+ ovh = SCTP_MIN_V4_OVERHEAD;
+ }
+ eff_mtu = mtu - ovh;
+ TAILQ_FOREACH(chk, &asoc->send_queue, sctp_next) {
+ if (chk->send_size > eff_mtu) {
+ chk->flags |= CHUNK_FLAGS_FRAGMENT_OK;
+ }
+ }
+ TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) {
+ if (chk->send_size > eff_mtu) {
+ chk->flags |= CHUNK_FLAGS_FRAGMENT_OK;
+ }
+ }
+}
+
+
+/*
+ * given an association and starting time of the current RTT period return
+ * RTO in number of msecs net should point to the current network
+ */
+uint32_t
+sctp_calculate_rto(struct sctp_tcb *stcb,
+ struct sctp_association *asoc,
+ struct sctp_nets *net,
+ struct timeval *told,
+ int safe)
+{
+ /*-
+ * given an association and the starting time of the current RTT
+ * period (in value1/value2) return RTO in number of msecs.
+ */
+ int calc_time = 0;
+ int o_calctime;
+ uint32_t new_rto = 0;
+ int first_measure = 0;
+ struct timeval now, then, *old;
+
+ /* Copy it out for sparc64 */
+ if (safe == sctp_align_unsafe_makecopy) {
+ old = &then;
+ memcpy(&then, told, sizeof(struct timeval));
+ } else if (safe == sctp_align_safe_nocopy) {
+ old = told;
+ } else {
+ /* error */
+ SCTP_PRINTF("Huh, bad rto calc call\n");
+ return (0);
+ }
+ /************************/
+ /* 1. calculate new RTT */
+ /************************/
+ /* get the current time */
+ (void)SCTP_GETTIME_TIMEVAL(&now);
+ /* compute the RTT value */
+ if ((u_long)now.tv_sec > (u_long)old->tv_sec) {
+ calc_time = ((u_long)now.tv_sec - (u_long)old->tv_sec) * 1000;
+ if ((u_long)now.tv_usec > (u_long)old->tv_usec) {
+ calc_time += (((u_long)now.tv_usec -
+ (u_long)old->tv_usec) / 1000);
+ } else if ((u_long)now.tv_usec < (u_long)old->tv_usec) {
+ /* Borrow 1,000ms from current calculation */
+ calc_time -= 1000;
+ /* Add in the slop over */
+ calc_time += ((int)now.tv_usec / 1000);
+ /* Add in the pre-second ms's */
+ calc_time += (((int)1000000 - (int)old->tv_usec) / 1000);
+ }
+ } else if ((u_long)now.tv_sec == (u_long)old->tv_sec) {
+ if ((u_long)now.tv_usec > (u_long)old->tv_usec) {
+ calc_time = ((u_long)now.tv_usec -
+ (u_long)old->tv_usec) / 1000;
+ } else if ((u_long)now.tv_usec < (u_long)old->tv_usec) {
+ /* impossible .. garbage in nothing out */
+ goto calc_rto;
+ } else if ((u_long)now.tv_usec == (u_long)old->tv_usec) {
+ /*
+ * We have to have 1 usec :-D this must be the
+ * loopback.
+ */
+ calc_time = 1;
+ } else {
+ /* impossible .. garbage in nothing out */
+ goto calc_rto;
+ }
+ } else {
+ /* Clock wrapped? */
+ goto calc_rto;
+ }
+ /***************************/
+ /* 2. update RTTVAR & SRTT */
+ /***************************/
+ net->rtt = o_calctime = calc_time;
+ /* this is Van Jacobson's integer version */
+ if (net->RTO_measured) {
+ calc_time -= (net->lastsa >> SCTP_RTT_SHIFT); /* take away 1/8th when
+ * shift=3 */
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RTTVAR_LOGGING_ENABLE) {
+ rto_logging(net, SCTP_LOG_RTTVAR);
+ }
+ net->prev_rtt = o_calctime;
+ net->lastsa += calc_time; /* add 7/8th into sa when
+ * shift=3 */
+ if (calc_time < 0) {
+ calc_time = -calc_time;
+ }
+ calc_time -= (net->lastsv >> SCTP_RTT_VAR_SHIFT); /* take away 1/4 when
+ * VAR shift=2 */
+ net->lastsv += calc_time;
+ if (net->lastsv == 0) {
+ net->lastsv = SCTP_CLOCK_GRANULARITY;
+ }
+ } else {
+ /* First RTO measurment */
+ net->RTO_measured = 1;
+ net->lastsa = calc_time << SCTP_RTT_SHIFT; /* Multiply by 8 when
+ * shift=3 */
+ net->lastsv = calc_time;
+ if (net->lastsv == 0) {
+ net->lastsv = SCTP_CLOCK_GRANULARITY;
+ }
+ first_measure = 1;
+ net->prev_rtt = o_calctime;
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RTTVAR_LOGGING_ENABLE) {
+ rto_logging(net, SCTP_LOG_INITIAL_RTT);
+ }
+ }
+calc_rto:
+ new_rto = (net->lastsa >> SCTP_RTT_SHIFT) + net->lastsv;
+ if ((new_rto > SCTP_SAT_NETWORK_MIN) &&
+ (stcb->asoc.sat_network_lockout == 0)) {
+ stcb->asoc.sat_network = 1;
+ } else if ((!first_measure) && stcb->asoc.sat_network) {
+ stcb->asoc.sat_network = 0;
+ stcb->asoc.sat_network_lockout = 1;
+ }
+ /* bound it, per C6/C7 in Section 5.3.1 */
+ if (new_rto < stcb->asoc.minrto) {
+ new_rto = stcb->asoc.minrto;
+ }
+ if (new_rto > stcb->asoc.maxrto) {
+ new_rto = stcb->asoc.maxrto;
+ }
+ /* we are now returning the RTO */
+ return (new_rto);
+}
+
+/*
+ * return a pointer to a contiguous piece of data from the given mbuf chain
+ * starting at 'off' for 'len' bytes. If the desired piece spans more than
+ * one mbuf, a copy is made at 'ptr'. caller must ensure that the buffer size
+ * is >= 'len' returns NULL if there there isn't 'len' bytes in the chain.
+ */
+caddr_t
+sctp_m_getptr(struct mbuf *m, int off, int len, uint8_t * in_ptr)
+{
+ uint32_t count;
+ uint8_t *ptr;
+
+ ptr = in_ptr;
+ if ((off < 0) || (len <= 0))
+ return (NULL);
+
+ /* find the desired start location */
+ while ((m != NULL) && (off > 0)) {
+ if (off < SCTP_BUF_LEN(m))
+ break;
+ off -= SCTP_BUF_LEN(m);
+ m = SCTP_BUF_NEXT(m);
+ }
+ if (m == NULL)
+ return (NULL);
+
+ /* is the current mbuf large enough (eg. contiguous)? */
+ if ((SCTP_BUF_LEN(m) - off) >= len) {
+ return (mtod(m, caddr_t)+off);
+ } else {
+ /* else, it spans more than one mbuf, so save a temp copy... */
+ while ((m != NULL) && (len > 0)) {
+ count = min(SCTP_BUF_LEN(m) - off, len);
+ bcopy(mtod(m, caddr_t)+off, ptr, count);
+ len -= count;
+ ptr += count;
+ off = 0;
+ m = SCTP_BUF_NEXT(m);
+ }
+ if ((m == NULL) && (len > 0))
+ return (NULL);
+ else
+ return ((caddr_t)in_ptr);
+ }
+}
+
+
+
+struct sctp_paramhdr *
+sctp_get_next_param(struct mbuf *m,
+ int offset,
+ struct sctp_paramhdr *pull,
+ int pull_limit)
+{
+ /* This just provides a typed signature to Peter's Pull routine */
+ return ((struct sctp_paramhdr *)sctp_m_getptr(m, offset, pull_limit,
+ (uint8_t *) pull));
+}
+
+
+int
+sctp_add_pad_tombuf(struct mbuf *m, int padlen)
+{
+ /*
+ * add padlen bytes of 0 filled padding to the end of the mbuf. If
+ * padlen is > 3 this routine will fail.
+ */
+ uint8_t *dp;
+ int i;
+
+ if (padlen > 3) {
+ SCTP_LTRACE_ERR_RET_PKT(m, NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOBUFS);
+ return (ENOBUFS);
+ }
+ if (padlen <= M_TRAILINGSPACE(m)) {
+ /*
+ * The easy way. We hope the majority of the time we hit
+ * here :)
+ */
+ dp = (uint8_t *) (mtod(m, caddr_t)+SCTP_BUF_LEN(m));
+ SCTP_BUF_LEN(m) += padlen;
+ } else {
+ /* Hard way we must grow the mbuf */
+ struct mbuf *tmp;
+
+ tmp = sctp_get_mbuf_for_msg(padlen, 0, M_DONTWAIT, 1, MT_DATA);
+ if (tmp == NULL) {
+ /* Out of space GAK! we are in big trouble. */
+ SCTP_LTRACE_ERR_RET_PKT(m, NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
+ return (ENOSPC);
+ }
+ /* setup and insert in middle */
+ SCTP_BUF_LEN(tmp) = padlen;
+ SCTP_BUF_NEXT(tmp) = NULL;
+ SCTP_BUF_NEXT(m) = tmp;
+ dp = mtod(tmp, uint8_t *);
+ }
+ /* zero out the pad */
+ for (i = 0; i < padlen; i++) {
+ *dp = 0;
+ dp++;
+ }
+ return (0);
+}
+
+int
+sctp_pad_lastmbuf(struct mbuf *m, int padval, struct mbuf *last_mbuf)
+{
+ /* find the last mbuf in chain and pad it */
+ struct mbuf *m_at;
+
+ m_at = m;
+ if (last_mbuf) {
+ return (sctp_add_pad_tombuf(last_mbuf, padval));
+ } else {
+ while (m_at) {
+ if (SCTP_BUF_NEXT(m_at) == NULL) {
+ return (sctp_add_pad_tombuf(m_at, padval));
+ }
+ m_at = SCTP_BUF_NEXT(m_at);
+ }
+ }
+ SCTP_LTRACE_ERR_RET_PKT(m, NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, EFAULT);
+ return (EFAULT);
+}
+
+static void
+sctp_notify_assoc_change(uint32_t event, struct sctp_tcb *stcb,
+ uint32_t error, void *data, int so_locked
+#if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
+ SCTP_UNUSED
+#endif
+)
+{
+ struct mbuf *m_notify;
+ struct sctp_assoc_change *sac;
+ struct sctp_queued_to_read *control;
+
+#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
+ struct socket *so;
+
+#endif
+
+ /*
+ * For TCP model AND UDP connected sockets we will send an error up
+ * when an ABORT comes in.
+ */
+ if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
+ (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) &&
+ ((event == SCTP_COMM_LOST) || (event == SCTP_CANT_STR_ASSOC))) {
+ if (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_COOKIE_WAIT) {
+ SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ECONNREFUSED);
+ stcb->sctp_socket->so_error = ECONNREFUSED;
+ } else {
+ SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ECONNRESET);
+ stcb->sctp_socket->so_error = ECONNRESET;
+ }
+ /* Wake ANY sleepers */
+#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
+ so = SCTP_INP_SO(stcb->sctp_ep);
+ if (!so_locked) {
+ atomic_add_int(&stcb->asoc.refcnt, 1);
+ SCTP_TCB_UNLOCK(stcb);
+ SCTP_SOCKET_LOCK(so, 1);
+ SCTP_TCB_LOCK(stcb);
+ atomic_subtract_int(&stcb->asoc.refcnt, 1);
+ if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
+ SCTP_SOCKET_UNLOCK(so, 1);
+ return;
+ }
+ }
+#endif
+ socantrcvmore(stcb->sctp_socket);
+ sorwakeup(stcb->sctp_socket);
+ sowwakeup(stcb->sctp_socket);
+#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
+ if (!so_locked) {
+ SCTP_SOCKET_UNLOCK(so, 1);
+ }
+#endif
+ }
+ if (sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_RECVASSOCEVNT)) {
+ /* event not enabled */
+ return;
+ }
+ m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_assoc_change), 0, M_DONTWAIT, 1, MT_DATA);
+ if (m_notify == NULL)
+ /* no space left */
+ return;
+ SCTP_BUF_LEN(m_notify) = 0;
+
+ sac = mtod(m_notify, struct sctp_assoc_change *);
+ sac->sac_type = SCTP_ASSOC_CHANGE;
+ sac->sac_flags = 0;
+ sac->sac_length = sizeof(struct sctp_assoc_change);
+ sac->sac_state = event;
+ sac->sac_error = error;
+ /* XXX verify these stream counts */
+ sac->sac_outbound_streams = stcb->asoc.streamoutcnt;
+ sac->sac_inbound_streams = stcb->asoc.streamincnt;
+ sac->sac_assoc_id = sctp_get_associd(stcb);
+ SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_assoc_change);
+ SCTP_BUF_NEXT(m_notify) = NULL;
+ control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
+ 0, 0, 0, 0, 0, 0,
+ m_notify);
+ if (control == NULL) {
+ /* no memory */
+ sctp_m_freem(m_notify);
+ return;
+ }
+ control->length = SCTP_BUF_LEN(m_notify);
+ /* not that we need this */
+ control->tail_mbuf = m_notify;
+ control->spec_flags = M_NOTIFICATION;
+ sctp_add_to_readq(stcb->sctp_ep, stcb,
+ control,
+ &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD,
+ so_locked);
+ if (event == SCTP_COMM_LOST) {
+ /* Wake up any sleeper */
+#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
+ so = SCTP_INP_SO(stcb->sctp_ep);
+ if (!so_locked) {
+ atomic_add_int(&stcb->asoc.refcnt, 1);
+ SCTP_TCB_UNLOCK(stcb);
+ SCTP_SOCKET_LOCK(so, 1);
+ SCTP_TCB_LOCK(stcb);
+ atomic_subtract_int(&stcb->asoc.refcnt, 1);
+ if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
+ SCTP_SOCKET_UNLOCK(so, 1);
+ return;
+ }
+ }
+#endif
+ sctp_sowwakeup(stcb->sctp_ep, stcb->sctp_socket);
+#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
+ if (!so_locked) {
+ SCTP_SOCKET_UNLOCK(so, 1);
+ }
+#endif
+ }
+}
+
+static void
+sctp_notify_peer_addr_change(struct sctp_tcb *stcb, uint32_t state,
+ struct sockaddr *sa, uint32_t error)
+{
+ struct mbuf *m_notify;
+ struct sctp_paddr_change *spc;
+ struct sctp_queued_to_read *control;
+
+ if (sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_RECVPADDREVNT)) {
+ /* event not enabled */
+ return;
+ }
+ m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_paddr_change), 0, M_DONTWAIT, 1, MT_DATA);
+ if (m_notify == NULL)
+ return;
+ SCTP_BUF_LEN(m_notify) = 0;
+ spc = mtod(m_notify, struct sctp_paddr_change *);
+ spc->spc_type = SCTP_PEER_ADDR_CHANGE;
+ spc->spc_flags = 0;
+ spc->spc_length = sizeof(struct sctp_paddr_change);
+ switch (sa->sa_family) {
+ case AF_INET:
+ memcpy(&spc->spc_aaddr, sa, sizeof(struct sockaddr_in));
+ break;
+#ifdef INET6
+ case AF_INET6:
+ {
+ struct sockaddr_in6 *sin6;
+
+ memcpy(&spc->spc_aaddr, sa, sizeof(struct sockaddr_in6));
+
+ sin6 = (struct sockaddr_in6 *)&spc->spc_aaddr;
+ if (IN6_IS_SCOPE_LINKLOCAL(&sin6->sin6_addr)) {
+ if (sin6->sin6_scope_id == 0) {
+ /* recover scope_id for user */
+ (void)sa6_recoverscope(sin6);
+ } else {
+ /* clear embedded scope_id for user */
+ in6_clearscope(&sin6->sin6_addr);
+ }
+ }
+ break;
+ }
+#endif
+ default:
+ /* TSNH */
+ break;
+ }
+ spc->spc_state = state;
+ spc->spc_error = error;
+ spc->spc_assoc_id = sctp_get_associd(stcb);
+
+ SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_paddr_change);
+ SCTP_BUF_NEXT(m_notify) = NULL;
+
+ /* append to socket */
+ control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
+ 0, 0, 0, 0, 0, 0,
+ m_notify);
+ if (control == NULL) {
+ /* no memory */
+ sctp_m_freem(m_notify);
+ return;
+ }
+ control->length = SCTP_BUF_LEN(m_notify);
+ control->spec_flags = M_NOTIFICATION;
+ /* not that we need this */
+ control->tail_mbuf = m_notify;
+ sctp_add_to_readq(stcb->sctp_ep, stcb,
+ control,
+ &stcb->sctp_socket->so_rcv, 1,
+ SCTP_READ_LOCK_NOT_HELD,
+ SCTP_SO_NOT_LOCKED);
+}
+
+
+static void
+sctp_notify_send_failed(struct sctp_tcb *stcb, uint32_t error,
+ struct sctp_tmit_chunk *chk, int so_locked
+#if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
+ SCTP_UNUSED
+#endif
+)
+{
+ struct mbuf *m_notify;
+ struct sctp_send_failed *ssf;
+ struct sctp_queued_to_read *control;
+ int length;
+
+ if (sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_RECVSENDFAILEVNT)) {
+ /* event not enabled */
+ return;
+ }
+ m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_send_failed), 0, M_DONTWAIT, 1, MT_DATA);
+ if (m_notify == NULL)
+ /* no space left */
+ return;
+ length = sizeof(struct sctp_send_failed) + chk->send_size;
+ length -= sizeof(struct sctp_data_chunk);
+ SCTP_BUF_LEN(m_notify) = 0;
+ ssf = mtod(m_notify, struct sctp_send_failed *);
+ ssf->ssf_type = SCTP_SEND_FAILED;
+ if (error == SCTP_NOTIFY_DATAGRAM_UNSENT)
+ ssf->ssf_flags = SCTP_DATA_UNSENT;
+ else
+ ssf->ssf_flags = SCTP_DATA_SENT;
+ ssf->ssf_length = length;
+ ssf->ssf_error = error;
+ /* not exactly what the user sent in, but should be close :) */
+ bzero(&ssf->ssf_info, sizeof(ssf->ssf_info));
+ ssf->ssf_info.sinfo_stream = chk->rec.data.stream_number;
+ ssf->ssf_info.sinfo_ssn = chk->rec.data.stream_seq;
+ ssf->ssf_info.sinfo_flags = chk->rec.data.rcv_flags;
+ ssf->ssf_info.sinfo_ppid = chk->rec.data.payloadtype;
+ ssf->ssf_info.sinfo_context = chk->rec.data.context;
+ ssf->ssf_info.sinfo_assoc_id = sctp_get_associd(stcb);
+ ssf->ssf_assoc_id = sctp_get_associd(stcb);
+
+ if (chk->data) {
+ /*
+ * trim off the sctp chunk header(it should be there)
+ */
+ if (chk->send_size >= sizeof(struct sctp_data_chunk)) {
+ m_adj(chk->data, sizeof(struct sctp_data_chunk));
+ sctp_mbuf_crush(chk->data);
+ chk->send_size -= sizeof(struct sctp_data_chunk);
+ }
+ }
+ SCTP_BUF_NEXT(m_notify) = chk->data;
+ SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_send_failed);
+ /* Steal off the mbuf */
+ chk->data = NULL;
+ /*
+ * For this case, we check the actual socket buffer, since the assoc
+ * is going away we don't want to overfill the socket buffer for a
+ * non-reader
+ */
+ if (sctp_sbspace_failedmsgs(&stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
+ sctp_m_freem(m_notify);
+ return;
+ }
+ /* append to socket */
+ control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
+ 0, 0, 0, 0, 0, 0,
+ m_notify);
+ if (control == NULL) {
+ /* no memory */
+ sctp_m_freem(m_notify);
+ return;
+ }
+ control->spec_flags = M_NOTIFICATION;
+ sctp_add_to_readq(stcb->sctp_ep, stcb,
+ control,
+ &stcb->sctp_socket->so_rcv, 1,
+ SCTP_READ_LOCK_NOT_HELD,
+ so_locked);
+}
+
+
+static void
+sctp_notify_send_failed2(struct sctp_tcb *stcb, uint32_t error,
+ struct sctp_stream_queue_pending *sp, int so_locked
+#if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
+ SCTP_UNUSED
+#endif
+)
+{
+ struct mbuf *m_notify;
+ struct sctp_send_failed *ssf;
+ struct sctp_queued_to_read *control;
+ int length;
+
+ if (sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_RECVSENDFAILEVNT)) {
+ /* event not enabled */
+ return;
+ }
+ length = sizeof(struct sctp_send_failed) + sp->length;
+ m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_send_failed), 0, M_DONTWAIT, 1, MT_DATA);
+ if (m_notify == NULL)
+ /* no space left */
+ return;
+ SCTP_BUF_LEN(m_notify) = 0;
+ ssf = mtod(m_notify, struct sctp_send_failed *);
+ ssf->ssf_type = SCTP_SEND_FAILED;
+ if (error == SCTP_NOTIFY_DATAGRAM_UNSENT)
+ ssf->ssf_flags = SCTP_DATA_UNSENT;
+ else
+ ssf->ssf_flags = SCTP_DATA_SENT;
+ ssf->ssf_length = length;
+ ssf->ssf_error = error;
+ /* not exactly what the user sent in, but should be close :) */
+ bzero(&ssf->ssf_info, sizeof(ssf->ssf_info));
+ ssf->ssf_info.sinfo_stream = sp->stream;
+ ssf->ssf_info.sinfo_ssn = sp->strseq;
+ if (sp->some_taken) {
+ ssf->ssf_info.sinfo_flags = SCTP_DATA_LAST_FRAG;
+ } else {
+ ssf->ssf_info.sinfo_flags = SCTP_DATA_NOT_FRAG;
+ }
+ ssf->ssf_info.sinfo_ppid = sp->ppid;
+ ssf->ssf_info.sinfo_context = sp->context;
+ ssf->ssf_info.sinfo_assoc_id = sctp_get_associd(stcb);
+ ssf->ssf_assoc_id = sctp_get_associd(stcb);
+ SCTP_BUF_NEXT(m_notify) = sp->data;
+ SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_send_failed);
+
+ /* Steal off the mbuf */
+ sp->data = NULL;
+ /*
+ * For this case, we check the actual socket buffer, since the assoc
+ * is going away we don't want to overfill the socket buffer for a
+ * non-reader
+ */
+ if (sctp_sbspace_failedmsgs(&stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
+ sctp_m_freem(m_notify);
+ return;
+ }
+ /* append to socket */
+ control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
+ 0, 0, 0, 0, 0, 0,
+ m_notify);
+ if (control == NULL) {
+ /* no memory */
+ sctp_m_freem(m_notify);
+ return;
+ }
+ control->spec_flags = M_NOTIFICATION;
+ sctp_add_to_readq(stcb->sctp_ep, stcb,
+ control,
+ &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, so_locked);
+}
+
+
+
+static void
+sctp_notify_adaptation_layer(struct sctp_tcb *stcb,
+ uint32_t error)
+{
+ struct mbuf *m_notify;
+ struct sctp_adaptation_event *sai;
+ struct sctp_queued_to_read *control;
+
+ if (sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_ADAPTATIONEVNT)) {
+ /* event not enabled */
+ return;
+ }
+ m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_adaption_event), 0, M_DONTWAIT, 1, MT_DATA);
+ if (m_notify == NULL)
+ /* no space left */
+ return;
+ SCTP_BUF_LEN(m_notify) = 0;
+ sai = mtod(m_notify, struct sctp_adaptation_event *);
+ sai->sai_type = SCTP_ADAPTATION_INDICATION;
+ sai->sai_flags = 0;
+ sai->sai_length = sizeof(struct sctp_adaptation_event);
+ sai->sai_adaptation_ind = stcb->asoc.peers_adaptation;
+ sai->sai_assoc_id = sctp_get_associd(stcb);
+
+ SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_adaptation_event);
+ SCTP_BUF_NEXT(m_notify) = NULL;
+
+ /* append to socket */
+ control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
+ 0, 0, 0, 0, 0, 0,
+ m_notify);
+ if (control == NULL) {
+ /* no memory */
+ sctp_m_freem(m_notify);
+ return;
+ }
+ control->length = SCTP_BUF_LEN(m_notify);
+ control->spec_flags = M_NOTIFICATION;
+ /* not that we need this */
+ control->tail_mbuf = m_notify;
+ sctp_add_to_readq(stcb->sctp_ep, stcb,
+ control,
+ &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
+}
+
+/* This always must be called with the read-queue LOCKED in the INP */
+static void
+sctp_notify_partial_delivery_indication(struct sctp_tcb *stcb, uint32_t error,
+ uint32_t val, int so_locked
+#if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
+ SCTP_UNUSED
+#endif
+)
+{
+ struct mbuf *m_notify;
+ struct sctp_pdapi_event *pdapi;
+ struct sctp_queued_to_read *control;
+ struct sockbuf *sb;
+
+ if (sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_PDAPIEVNT)) {
+ /* event not enabled */
+ return;
+ }
+ if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_CANT_READ) {
+ return;
+ }
+ m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_pdapi_event), 0, M_DONTWAIT, 1, MT_DATA);
+ if (m_notify == NULL)
+ /* no space left */
+ return;
+ SCTP_BUF_LEN(m_notify) = 0;
+ pdapi = mtod(m_notify, struct sctp_pdapi_event *);
+ pdapi->pdapi_type = SCTP_PARTIAL_DELIVERY_EVENT;
+ pdapi->pdapi_flags = 0;
+ pdapi->pdapi_length = sizeof(struct sctp_pdapi_event);
+ pdapi->pdapi_indication = error;
+ pdapi->pdapi_stream = (val >> 16);
+ pdapi->pdapi_seq = (val & 0x0000ffff);
+ pdapi->pdapi_assoc_id = sctp_get_associd(stcb);
+
+ SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_pdapi_event);
+ SCTP_BUF_NEXT(m_notify) = NULL;
+ control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
+ 0, 0, 0, 0, 0, 0,
+ m_notify);
+ if (control == NULL) {
+ /* no memory */
+ sctp_m_freem(m_notify);
+ return;
+ }
+ control->spec_flags = M_NOTIFICATION;
+ control->length = SCTP_BUF_LEN(m_notify);
+ /* not that we need this */
+ control->tail_mbuf = m_notify;
+ control->held_length = 0;
+ control->length = 0;
+ sb = &stcb->sctp_socket->so_rcv;
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
+ sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m_notify));
+ }
+ sctp_sballoc(stcb, sb, m_notify);
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
+ sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
+ }
+ atomic_add_int(&control->length, SCTP_BUF_LEN(m_notify));
+ control->end_added = 1;
+ if (stcb->asoc.control_pdapi)
+ TAILQ_INSERT_AFTER(&stcb->sctp_ep->read_queue, stcb->asoc.control_pdapi, control, next);
+ else {
+ /* we really should not see this case */
+ TAILQ_INSERT_TAIL(&stcb->sctp_ep->read_queue, control, next);
+ }
+ if (stcb->sctp_ep && stcb->sctp_socket) {
+ /* This should always be the case */
+#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
+ struct socket *so;
+
+ so = SCTP_INP_SO(stcb->sctp_ep);
+ if (!so_locked) {
+ atomic_add_int(&stcb->asoc.refcnt, 1);
+ SCTP_TCB_UNLOCK(stcb);
+ SCTP_SOCKET_LOCK(so, 1);
+ SCTP_TCB_LOCK(stcb);
+ atomic_subtract_int(&stcb->asoc.refcnt, 1);
+ if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
+ SCTP_SOCKET_UNLOCK(so, 1);
+ return;
+ }
+ }
+#endif
+ sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
+#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
+ if (!so_locked) {
+ SCTP_SOCKET_UNLOCK(so, 1);
+ }
+#endif
+ }
+}
+
+static void
+sctp_notify_shutdown_event(struct sctp_tcb *stcb)
+{
+ struct mbuf *m_notify;
+ struct sctp_shutdown_event *sse;
+ struct sctp_queued_to_read *control;
+
+ /*
+ * For TCP model AND UDP connected sockets we will send an error up
+ * when an SHUTDOWN completes
+ */
+ if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
+ (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
+ /* mark socket closed for read/write and wakeup! */
+#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
+ struct socket *so;
+
+ so = SCTP_INP_SO(stcb->sctp_ep);
+ atomic_add_int(&stcb->asoc.refcnt, 1);
+ SCTP_TCB_UNLOCK(stcb);
+ SCTP_SOCKET_LOCK(so, 1);
+ SCTP_TCB_LOCK(stcb);
+ atomic_subtract_int(&stcb->asoc.refcnt, 1);
+ if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
+ SCTP_SOCKET_UNLOCK(so, 1);
+ return;
+ }
+#endif
+ socantsendmore(stcb->sctp_socket);
+#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
+ SCTP_SOCKET_UNLOCK(so, 1);
+#endif
+ }
+ if (sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_RECVSHUTDOWNEVNT)) {
+ /* event not enabled */
+ return;
+ }
+ m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_shutdown_event), 0, M_DONTWAIT, 1, MT_DATA);
+ if (m_notify == NULL)
+ /* no space left */
+ return;
+ sse = mtod(m_notify, struct sctp_shutdown_event *);
+ sse->sse_type = SCTP_SHUTDOWN_EVENT;
+ sse->sse_flags = 0;
+ sse->sse_length = sizeof(struct sctp_shutdown_event);
+ sse->sse_assoc_id = sctp_get_associd(stcb);
+
+ SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_shutdown_event);
+ SCTP_BUF_NEXT(m_notify) = NULL;
+
+ /* append to socket */
+ control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
+ 0, 0, 0, 0, 0, 0,
+ m_notify);
+ if (control == NULL) {
+ /* no memory */
+ sctp_m_freem(m_notify);
+ return;
+ }
+ control->spec_flags = M_NOTIFICATION;
+ control->length = SCTP_BUF_LEN(m_notify);
+ /* not that we need this */
+ control->tail_mbuf = m_notify;
+ sctp_add_to_readq(stcb->sctp_ep, stcb,
+ control,
+ &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
+}
+
+static void
+sctp_notify_sender_dry_event(struct sctp_tcb *stcb,
+ int so_locked
+#if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
+ SCTP_UNUSED
+#endif
+)
+{
+ struct mbuf *m_notify;
+ struct sctp_sender_dry_event *event;
+ struct sctp_queued_to_read *control;
+
+ if (sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_DRYEVNT)) {
+ /* event not enabled */
+ return;
+ }
+ m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_sender_dry_event), 0, M_DONTWAIT, 1, MT_DATA);
+ if (m_notify == NULL) {
+ /* no space left */
+ return;
+ }
+ SCTP_BUF_LEN(m_notify) = 0;
+ event = mtod(m_notify, struct sctp_sender_dry_event *);
+ event->sender_dry_type = SCTP_SENDER_DRY_EVENT;
+ event->sender_dry_flags = 0;
+ event->sender_dry_length = sizeof(struct sctp_sender_dry_event);
+ event->sender_dry_assoc_id = sctp_get_associd(stcb);
+
+ SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_sender_dry_event);
+ SCTP_BUF_NEXT(m_notify) = NULL;
+
+ /* append to socket */
+ control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
+ 0, 0, 0, 0, 0, 0, m_notify);
+ if (control == NULL) {
+ /* no memory */
+ sctp_m_freem(m_notify);
+ return;
+ }
+ control->length = SCTP_BUF_LEN(m_notify);
+ control->spec_flags = M_NOTIFICATION;
+ /* not that we need this */
+ control->tail_mbuf = m_notify;
+ sctp_add_to_readq(stcb->sctp_ep, stcb, control,
+ &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, so_locked);
+}
+
+
+static void
+sctp_notify_stream_reset_add(struct sctp_tcb *stcb, int number_entries, int flag)
+{
+ struct mbuf *m_notify;
+ struct sctp_queued_to_read *control;
+ struct sctp_stream_reset_event *strreset;
+ int len;
+
+ if (sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_STREAM_RESETEVNT)) {
+ /* event not enabled */
+ return;
+ }
+ m_notify = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_DONTWAIT, 1, MT_DATA);
+ if (m_notify == NULL)
+ /* no space left */
+ return;
+ SCTP_BUF_LEN(m_notify) = 0;
+ len = sizeof(struct sctp_stream_reset_event) + (number_entries * sizeof(uint16_t));
+ if (len > M_TRAILINGSPACE(m_notify)) {
+ /* never enough room */
+ sctp_m_freem(m_notify);
+ return;
+ }
+ strreset = mtod(m_notify, struct sctp_stream_reset_event *);
+ strreset->strreset_type = SCTP_STREAM_RESET_EVENT;
+ strreset->strreset_flags = SCTP_STRRESET_ADD_STREAM | flag;
+ strreset->strreset_length = len;
+ strreset->strreset_assoc_id = sctp_get_associd(stcb);
+ strreset->strreset_list[0] = number_entries;
+
+ SCTP_BUF_LEN(m_notify) = len;
+ SCTP_BUF_NEXT(m_notify) = NULL;
+ if (sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
+ /* no space */
+ sctp_m_freem(m_notify);
+ return;
+ }
+ /* append to socket */
+ control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
+ 0, 0, 0, 0, 0, 0,
+ m_notify);
+ if (control == NULL) {
+ /* no memory */
+ sctp_m_freem(m_notify);
+ return;
+ }
+ control->spec_flags = M_NOTIFICATION;
+ control->length = SCTP_BUF_LEN(m_notify);
+ /* not that we need this */
+ control->tail_mbuf = m_notify;
+ sctp_add_to_readq(stcb->sctp_ep, stcb,
+ control,
+ &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
+}
+
+
+static void
+sctp_notify_stream_reset(struct sctp_tcb *stcb,
+ int number_entries, uint16_t * list, int flag)
+{
+ struct mbuf *m_notify;
+ struct sctp_queued_to_read *control;
+ struct sctp_stream_reset_event *strreset;
+ int len;
+
+ if (sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_STREAM_RESETEVNT)) {
+ /* event not enabled */
+ return;
+ }
+ m_notify = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_DONTWAIT, 1, MT_DATA);
+ if (m_notify == NULL)
+ /* no space left */
+ return;
+ SCTP_BUF_LEN(m_notify) = 0;
+ len = sizeof(struct sctp_stream_reset_event) + (number_entries * sizeof(uint16_t));
+ if (len > M_TRAILINGSPACE(m_notify)) {
+ /* never enough room */
+ sctp_m_freem(m_notify);
+ return;
+ }
+ strreset = mtod(m_notify, struct sctp_stream_reset_event *);
+ strreset->strreset_type = SCTP_STREAM_RESET_EVENT;
+ if (number_entries == 0) {
+ strreset->strreset_flags = flag | SCTP_STRRESET_ALL_STREAMS;
+ } else {
+ strreset->strreset_flags = flag | SCTP_STRRESET_STREAM_LIST;
+ }
+ strreset->strreset_length = len;
+ strreset->strreset_assoc_id = sctp_get_associd(stcb);
+ if (number_entries) {
+ int i;
+
+ for (i = 0; i < number_entries; i++) {
+ strreset->strreset_list[i] = ntohs(list[i]);
+ }
+ }
+ SCTP_BUF_LEN(m_notify) = len;
+ SCTP_BUF_NEXT(m_notify) = NULL;
+ if (sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
+ /* no space */
+ sctp_m_freem(m_notify);
+ return;
+ }
+ /* append to socket */
+ control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
+ 0, 0, 0, 0, 0, 0,
+ m_notify);
+ if (control == NULL) {
+ /* no memory */
+ sctp_m_freem(m_notify);
+ return;
+ }
+ control->spec_flags = M_NOTIFICATION;
+ control->length = SCTP_BUF_LEN(m_notify);
+ /* not that we need this */
+ control->tail_mbuf = m_notify;
+ sctp_add_to_readq(stcb->sctp_ep, stcb,
+ control,
+ &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
+}
+
+
+void
+sctp_ulp_notify(uint32_t notification, struct sctp_tcb *stcb,
+ uint32_t error, void *data, int so_locked
+#if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
+ SCTP_UNUSED
+#endif
+)
+{
+ if ((stcb == NULL) ||
+ (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
+ (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
+ (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) {
+ /* If the socket is gone we are out of here */
+ return;
+ }
+ if (stcb->sctp_socket->so_rcv.sb_state & SBS_CANTRCVMORE) {
+ return;
+ }
+ if (stcb && ((stcb->asoc.state & SCTP_STATE_COOKIE_WAIT) ||
+ (stcb->asoc.state & SCTP_STATE_COOKIE_ECHOED))) {
+ if ((notification == SCTP_NOTIFY_INTERFACE_DOWN) ||
+ (notification == SCTP_NOTIFY_INTERFACE_UP) ||
+ (notification == SCTP_NOTIFY_INTERFACE_CONFIRMED)) {
+ /* Don't report these in front states */
+ return;
+ }
+ }
+ switch (notification) {
+ case SCTP_NOTIFY_ASSOC_UP:
+ if (stcb->asoc.assoc_up_sent == 0) {
+ sctp_notify_assoc_change(SCTP_COMM_UP, stcb, error, NULL, so_locked);
+ stcb->asoc.assoc_up_sent = 1;
+ }
+ if (stcb->asoc.adaptation_needed && (stcb->asoc.adaptation_sent == 0)) {
+ sctp_notify_adaptation_layer(stcb, error);
+ }
+ if (stcb->asoc.peer_supports_auth == 0) {
+ sctp_ulp_notify(SCTP_NOTIFY_NO_PEER_AUTH, stcb, 0,
+ NULL, so_locked);
+ }
+ break;
+ case SCTP_NOTIFY_ASSOC_DOWN:
+ sctp_notify_assoc_change(SCTP_SHUTDOWN_COMP, stcb, error, NULL, so_locked);
+ break;
+ case SCTP_NOTIFY_INTERFACE_DOWN:
+ {
+ struct sctp_nets *net;
+
+ net = (struct sctp_nets *)data;
+ sctp_notify_peer_addr_change(stcb, SCTP_ADDR_UNREACHABLE,
+ (struct sockaddr *)&net->ro._l_addr, error);
+ break;
+ }
+ case SCTP_NOTIFY_INTERFACE_UP:
+ {
+ struct sctp_nets *net;
+
+ net = (struct sctp_nets *)data;
+ sctp_notify_peer_addr_change(stcb, SCTP_ADDR_AVAILABLE,
+ (struct sockaddr *)&net->ro._l_addr, error);
+ break;
+ }
+ case SCTP_NOTIFY_INTERFACE_CONFIRMED:
+ {
+ struct sctp_nets *net;
+
+ net = (struct sctp_nets *)data;
+ sctp_notify_peer_addr_change(stcb, SCTP_ADDR_CONFIRMED,
+ (struct sockaddr *)&net->ro._l_addr, error);
+ break;
+ }
+ case SCTP_NOTIFY_SPECIAL_SP_FAIL:
+ sctp_notify_send_failed2(stcb, error,
+ (struct sctp_stream_queue_pending *)data, so_locked);
+ break;
+ case SCTP_NOTIFY_DG_FAIL:
+ sctp_notify_send_failed(stcb, error,
+ (struct sctp_tmit_chunk *)data, so_locked);
+ break;
+ case SCTP_NOTIFY_PARTIAL_DELVIERY_INDICATION:
+ {
+ uint32_t val;
+
+ val = *((uint32_t *) data);
+
+ sctp_notify_partial_delivery_indication(stcb, error, val, so_locked);
+ break;
+ }
+ case SCTP_NOTIFY_STRDATA_ERR:
+ break;
+ case SCTP_NOTIFY_ASSOC_ABORTED:
+ if ((stcb) && (((stcb->asoc.state & SCTP_STATE_MASK) == SCTP_STATE_COOKIE_WAIT) ||
+ ((stcb->asoc.state & SCTP_STATE_MASK) == SCTP_STATE_COOKIE_ECHOED))) {
+ sctp_notify_assoc_change(SCTP_CANT_STR_ASSOC, stcb, error, NULL, so_locked);
+ } else {
+ sctp_notify_assoc_change(SCTP_COMM_LOST, stcb, error, NULL, so_locked);
+ }
+ break;
+ case SCTP_NOTIFY_PEER_OPENED_STREAM:
+ break;
+ case SCTP_NOTIFY_STREAM_OPENED_OK:
+ break;
+ case SCTP_NOTIFY_ASSOC_RESTART:
+ sctp_notify_assoc_change(SCTP_RESTART, stcb, error, data, so_locked);
+ if (stcb->asoc.peer_supports_auth == 0) {
+ sctp_ulp_notify(SCTP_NOTIFY_NO_PEER_AUTH, stcb, 0,
+ NULL, so_locked);
+ }
+ break;
+ case SCTP_NOTIFY_HB_RESP:
+ break;
+ case SCTP_NOTIFY_STR_RESET_INSTREAM_ADD_OK:
+ sctp_notify_stream_reset_add(stcb, error, SCTP_STRRESET_INBOUND_STR);
+ break;
+ case SCTP_NOTIFY_STR_RESET_ADD_OK:
+ sctp_notify_stream_reset_add(stcb, error, SCTP_STRRESET_OUTBOUND_STR);
+ break;
+ case SCTP_NOTIFY_STR_RESET_ADD_FAIL:
+ sctp_notify_stream_reset_add(stcb, error, (SCTP_STRRESET_FAILED | SCTP_STRRESET_OUTBOUND_STR));
+ break;
+
+ case SCTP_NOTIFY_STR_RESET_SEND:
+ sctp_notify_stream_reset(stcb, error, ((uint16_t *) data), SCTP_STRRESET_OUTBOUND_STR);
+ break;
+ case SCTP_NOTIFY_STR_RESET_RECV:
+ sctp_notify_stream_reset(stcb, error, ((uint16_t *) data), SCTP_STRRESET_INBOUND_STR);
+ break;
+ case SCTP_NOTIFY_STR_RESET_FAILED_OUT:
+ sctp_notify_stream_reset(stcb, error, ((uint16_t *) data), (SCTP_STRRESET_OUTBOUND_STR | SCTP_STRRESET_FAILED));
+ break;
+ case SCTP_NOTIFY_STR_RESET_FAILED_IN:
+ sctp_notify_stream_reset(stcb, error, ((uint16_t *) data), (SCTP_STRRESET_INBOUND_STR | SCTP_STRRESET_FAILED));
+ break;
+ case SCTP_NOTIFY_ASCONF_ADD_IP:
+ sctp_notify_peer_addr_change(stcb, SCTP_ADDR_ADDED, data,
+ error);
+ break;
+ case SCTP_NOTIFY_ASCONF_DELETE_IP:
+ sctp_notify_peer_addr_change(stcb, SCTP_ADDR_REMOVED, data,
+ error);
+ break;
+ case SCTP_NOTIFY_ASCONF_SET_PRIMARY:
+ sctp_notify_peer_addr_change(stcb, SCTP_ADDR_MADE_PRIM, data,
+ error);
+ break;
+ case SCTP_NOTIFY_ASCONF_SUCCESS:
+ break;
+ case SCTP_NOTIFY_ASCONF_FAILED:
+ break;
+ case SCTP_NOTIFY_PEER_SHUTDOWN:
+ sctp_notify_shutdown_event(stcb);
+ break;
+ case SCTP_NOTIFY_AUTH_NEW_KEY:
+ sctp_notify_authentication(stcb, SCTP_AUTH_NEWKEY, error,
+ (uint16_t) (uintptr_t) data,
+ so_locked);
+ break;
+ case SCTP_NOTIFY_AUTH_FREE_KEY:
+ sctp_notify_authentication(stcb, SCTP_AUTH_FREE_KEY, error,
+ (uint16_t) (uintptr_t) data,
+ so_locked);
+ break;
+ case SCTP_NOTIFY_NO_PEER_AUTH:
+ sctp_notify_authentication(stcb, SCTP_AUTH_NO_AUTH, error,
+ (uint16_t) (uintptr_t) data,
+ so_locked);
+ break;
+ case SCTP_NOTIFY_SENDER_DRY:
+ sctp_notify_sender_dry_event(stcb, so_locked);
+ break;
+ default:
+ SCTPDBG(SCTP_DEBUG_UTIL1, "%s: unknown notification %xh (%u)\n",
+ __FUNCTION__, notification, notification);
+ break;
+ } /* end switch */
+}
+
+void
+sctp_report_all_outbound(struct sctp_tcb *stcb, int holds_lock, int so_locked
+#if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
+ SCTP_UNUSED
+#endif
+)
+{
+ struct sctp_association *asoc;
+ struct sctp_stream_out *outs;
+ struct sctp_tmit_chunk *chk;
+ struct sctp_stream_queue_pending *sp;
+ int i;
+
+ asoc = &stcb->asoc;
+
+ if (stcb == NULL) {
+ return;
+ }
+ if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
+ /* already being freed */
+ return;
+ }
+ if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
+ (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
+ (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) {
+ return;
+ }
+ /* now through all the gunk freeing chunks */
+ if (holds_lock == 0) {
+ SCTP_TCB_SEND_LOCK(stcb);
+ }
+ /* sent queue SHOULD be empty */
+ if (!TAILQ_EMPTY(&asoc->sent_queue)) {
+ chk = TAILQ_FIRST(&asoc->sent_queue);
+ while (chk) {
+ TAILQ_REMOVE(&asoc->sent_queue, chk, sctp_next);
+ asoc->sent_queue_cnt--;
+ if (chk->data != NULL) {
+ sctp_free_bufspace(stcb, asoc, chk, 1);
+ sctp_ulp_notify(SCTP_NOTIFY_DG_FAIL, stcb,
+ SCTP_NOTIFY_DATAGRAM_SENT, chk, so_locked);
+ if (chk->data) {
+ sctp_m_freem(chk->data);
+ chk->data = NULL;
+ }
+ }
+ sctp_free_a_chunk(stcb, chk);
+ /* sa_ignore FREED_MEMORY */
+ chk = TAILQ_FIRST(&asoc->sent_queue);
+ }
+ }
+ /* pending send queue SHOULD be empty */
+ if (!TAILQ_EMPTY(&asoc->send_queue)) {
+ chk = TAILQ_FIRST(&asoc->send_queue);
+ while (chk) {
+ TAILQ_REMOVE(&asoc->send_queue, chk, sctp_next);
+ asoc->send_queue_cnt--;
+ if (chk->data != NULL) {
+ sctp_free_bufspace(stcb, asoc, chk, 1);
+ sctp_ulp_notify(SCTP_NOTIFY_DG_FAIL, stcb,
+ SCTP_NOTIFY_DATAGRAM_UNSENT, chk, so_locked);
+ if (chk->data) {
+ sctp_m_freem(chk->data);
+ chk->data = NULL;
+ }
+ }
+ sctp_free_a_chunk(stcb, chk);
+ /* sa_ignore FREED_MEMORY */
+ chk = TAILQ_FIRST(&asoc->send_queue);
+ }
+ }
+ for (i = 0; i < stcb->asoc.streamoutcnt; i++) {
+ /* For each stream */
+ outs = &stcb->asoc.strmout[i];
+ /* clean up any sends there */
+ stcb->asoc.locked_on_sending = NULL;
+ sp = TAILQ_FIRST(&outs->outqueue);
+ while (sp) {
+ stcb->asoc.stream_queue_cnt--;
+ TAILQ_REMOVE(&outs->outqueue, sp, next);
+ sctp_free_spbufspace(stcb, asoc, sp);
+ if (sp->data) {
+ sctp_ulp_notify(SCTP_NOTIFY_SPECIAL_SP_FAIL, stcb,
+ SCTP_NOTIFY_DATAGRAM_UNSENT, (void *)sp, so_locked);
+ if (sp->data) {
+ sctp_m_freem(sp->data);
+ sp->data = NULL;
+ }
+ }
+ if (sp->net) {
+ sctp_free_remote_addr(sp->net);
+ sp->net = NULL;
+ }
+ /* Free the chunk */
+ sctp_free_a_strmoq(stcb, sp);
+ /* sa_ignore FREED_MEMORY */
+ sp = TAILQ_FIRST(&outs->outqueue);
+ }
+ }
+
+ if (holds_lock == 0) {
+ SCTP_TCB_SEND_UNLOCK(stcb);
+ }
+}
+
+void
+sctp_abort_notification(struct sctp_tcb *stcb, int error, int so_locked
+#if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
+ SCTP_UNUSED
+#endif
+)
+{
+
+ if (stcb == NULL) {
+ return;
+ }
+ if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
+ (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
+ (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) {
+ return;
+ }
+ /* Tell them we lost the asoc */
+ sctp_report_all_outbound(stcb, 1, so_locked);
+ if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) ||
+ ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) &&
+ (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_CONNECTED))) {
+ stcb->sctp_ep->sctp_flags |= SCTP_PCB_FLAGS_WAS_ABORTED;
+ }
+ sctp_ulp_notify(SCTP_NOTIFY_ASSOC_ABORTED, stcb, error, NULL, so_locked);
+}
+
+void
+sctp_abort_association(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
+ struct mbuf *m, int iphlen, struct sctphdr *sh, struct mbuf *op_err,
+ uint32_t vrf_id, uint16_t port)
+{
+ uint32_t vtag;
+
+#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
+ struct socket *so;
+
+#endif
+
+ vtag = 0;
+ if (stcb != NULL) {
+ /* We have a TCB to abort, send notification too */
+ vtag = stcb->asoc.peer_vtag;
+ sctp_abort_notification(stcb, 0, SCTP_SO_NOT_LOCKED);
+ /* get the assoc vrf id and table id */
+ vrf_id = stcb->asoc.vrf_id;
+ stcb->asoc.state |= SCTP_STATE_WAS_ABORTED;
+ }
+ sctp_send_abort(m, iphlen, sh, vtag, op_err, vrf_id, port);
+ if (stcb != NULL) {
+ /* Ok, now lets free it */
+#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
+ so = SCTP_INP_SO(inp);
+ atomic_add_int(&stcb->asoc.refcnt, 1);
+ SCTP_TCB_UNLOCK(stcb);
+ SCTP_SOCKET_LOCK(so, 1);
+ SCTP_TCB_LOCK(stcb);
+ atomic_subtract_int(&stcb->asoc.refcnt, 1);
+#endif
+ SCTP_STAT_INCR_COUNTER32(sctps_aborted);
+ if ((SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_OPEN) ||
+ (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
+ SCTP_STAT_DECR_GAUGE32(sctps_currestab);
+ }
+ (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTPUTIL + SCTP_LOC_4);
+#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
+ SCTP_SOCKET_UNLOCK(so, 1);
+#endif
+ }
+}
+
+#ifdef SCTP_ASOCLOG_OF_TSNS
+void
+sctp_print_out_track_log(struct sctp_tcb *stcb)
+{
+#ifdef NOSIY_PRINTS
+ int i;
+
+ SCTP_PRINTF("Last ep reason:%x\n", stcb->sctp_ep->last_abort_code);
+ SCTP_PRINTF("IN bound TSN log-aaa\n");
+ if ((stcb->asoc.tsn_in_at == 0) && (stcb->asoc.tsn_in_wrapped == 0)) {
+ SCTP_PRINTF("None rcvd\n");
+ goto none_in;
+ }
+ if (stcb->asoc.tsn_in_wrapped) {
+ for (i = stcb->asoc.tsn_in_at; i < SCTP_TSN_LOG_SIZE; i++) {
+ SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
+ stcb->asoc.in_tsnlog[i].tsn,
+ stcb->asoc.in_tsnlog[i].strm,
+ stcb->asoc.in_tsnlog[i].seq,
+ stcb->asoc.in_tsnlog[i].flgs,
+ stcb->asoc.in_tsnlog[i].sz);
+ }
+ }
+ if (stcb->asoc.tsn_in_at) {
+ for (i = 0; i < stcb->asoc.tsn_in_at; i++) {
+ SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
+ stcb->asoc.in_tsnlog[i].tsn,
+ stcb->asoc.in_tsnlog[i].strm,
+ stcb->asoc.in_tsnlog[i].seq,
+ stcb->asoc.in_tsnlog[i].flgs,
+ stcb->asoc.in_tsnlog[i].sz);
+ }
+ }
+none_in:
+ SCTP_PRINTF("OUT bound TSN log-aaa\n");
+ if ((stcb->asoc.tsn_out_at == 0) &&
+ (stcb->asoc.tsn_out_wrapped == 0)) {
+ SCTP_PRINTF("None sent\n");
+ }
+ if (stcb->asoc.tsn_out_wrapped) {
+ for (i = stcb->asoc.tsn_out_at; i < SCTP_TSN_LOG_SIZE; i++) {
+ SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
+ stcb->asoc.out_tsnlog[i].tsn,
+ stcb->asoc.out_tsnlog[i].strm,
+ stcb->asoc.out_tsnlog[i].seq,
+ stcb->asoc.out_tsnlog[i].flgs,
+ stcb->asoc.out_tsnlog[i].sz);
+ }
+ }
+ if (stcb->asoc.tsn_out_at) {
+ for (i = 0; i < stcb->asoc.tsn_out_at; i++) {
+ SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
+ stcb->asoc.out_tsnlog[i].tsn,
+ stcb->asoc.out_tsnlog[i].strm,
+ stcb->asoc.out_tsnlog[i].seq,
+ stcb->asoc.out_tsnlog[i].flgs,
+ stcb->asoc.out_tsnlog[i].sz);
+ }
+ }
+#endif
+}
+
+#endif
+
+void
+sctp_abort_an_association(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
+ int error, struct mbuf *op_err,
+ int so_locked
+#if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
+ SCTP_UNUSED
+#endif
+)
+{
+ uint32_t vtag;
+
+#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
+ struct socket *so;
+
+#endif
+
+#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
+ so = SCTP_INP_SO(inp);
+#endif
+ if (stcb == NULL) {
+ /* Got to have a TCB */
+ if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
+ if (LIST_FIRST(&inp->sctp_asoc_list) == NULL) {
+ sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT,
+ SCTP_CALLED_DIRECTLY_NOCMPSET);
+ }
+ }
+ return;
+ } else {
+ stcb->asoc.state |= SCTP_STATE_WAS_ABORTED;
+ }
+ vtag = stcb->asoc.peer_vtag;
+ /* notify the ulp */
+ if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) == 0)
+ sctp_abort_notification(stcb, error, so_locked);
+ /* notify the peer */
+#if defined(SCTP_PANIC_ON_ABORT)
+ panic("aborting an association");
+#endif
+ sctp_send_abort_tcb(stcb, op_err, so_locked);
+ SCTP_STAT_INCR_COUNTER32(sctps_aborted);
+ if ((SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_OPEN) ||
+ (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
+ SCTP_STAT_DECR_GAUGE32(sctps_currestab);
+ }
+ /* now free the asoc */
+#ifdef SCTP_ASOCLOG_OF_TSNS
+ sctp_print_out_track_log(stcb);
+#endif
+#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
+ if (!so_locked) {
+ atomic_add_int(&stcb->asoc.refcnt, 1);
+ SCTP_TCB_UNLOCK(stcb);
+ SCTP_SOCKET_LOCK(so, 1);
+ SCTP_TCB_LOCK(stcb);
+ atomic_subtract_int(&stcb->asoc.refcnt, 1);
+ }
+#endif
+ (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTPUTIL + SCTP_LOC_5);
+#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
+ if (!so_locked) {
+ SCTP_SOCKET_UNLOCK(so, 1);
+ }
+#endif
+}
+
+void
+sctp_handle_ootb(struct mbuf *m, int iphlen, int offset, struct sctphdr *sh,
+ struct sctp_inpcb *inp, struct mbuf *op_err, uint32_t vrf_id, uint16_t port)
+{
+ struct sctp_chunkhdr *ch, chunk_buf;
+ unsigned int chk_length;
+
+ SCTP_STAT_INCR_COUNTER32(sctps_outoftheblue);
+ /* Generate a TO address for future reference */
+ if (inp && (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE)) {
+ if (LIST_FIRST(&inp->sctp_asoc_list) == NULL) {
+ sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT,
+ SCTP_CALLED_DIRECTLY_NOCMPSET);
+ }
+ }
+ ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
+ sizeof(*ch), (uint8_t *) & chunk_buf);
+ while (ch != NULL) {
+ chk_length = ntohs(ch->chunk_length);
+ if (chk_length < sizeof(*ch)) {
+ /* break to abort land */
+ break;
+ }
+ switch (ch->chunk_type) {
+ case SCTP_COOKIE_ECHO:
+ /* We hit here only if the assoc is being freed */
+ return;
+ case SCTP_PACKET_DROPPED:
+ /* we don't respond to pkt-dropped */
+ return;
+ case SCTP_ABORT_ASSOCIATION:
+ /* we don't respond with an ABORT to an ABORT */
+ return;
+ case SCTP_SHUTDOWN_COMPLETE:
+ /*
+ * we ignore it since we are not waiting for it and
+ * peer is gone
+ */
+ return;
+ case SCTP_SHUTDOWN_ACK:
+ sctp_send_shutdown_complete2(m, iphlen, sh, vrf_id, port);
+ return;
+ default:
+ break;
+ }
+ offset += SCTP_SIZE32(chk_length);
+ ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
+ sizeof(*ch), (uint8_t *) & chunk_buf);
+ }
+ sctp_send_abort(m, iphlen, sh, 0, op_err, vrf_id, port);
+}
+
+/*
+ * check the inbound datagram to make sure there is not an abort inside it,
+ * if there is return 1, else return 0.
+ */
+int
+sctp_is_there_an_abort_here(struct mbuf *m, int iphlen, uint32_t * vtagfill)
+{
+ struct sctp_chunkhdr *ch;
+ struct sctp_init_chunk *init_chk, chunk_buf;
+ int offset;
+ unsigned int chk_length;
+
+ offset = iphlen + sizeof(struct sctphdr);
+ ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset, sizeof(*ch),
+ (uint8_t *) & chunk_buf);
+ while (ch != NULL) {
+ chk_length = ntohs(ch->chunk_length);
+ if (chk_length < sizeof(*ch)) {
+ /* packet is probably corrupt */
+ break;
+ }
+ /* we seem to be ok, is it an abort? */
+ if (ch->chunk_type == SCTP_ABORT_ASSOCIATION) {
+ /* yep, tell them */
+ return (1);
+ }
+ if (ch->chunk_type == SCTP_INITIATION) {
+ /* need to update the Vtag */
+ init_chk = (struct sctp_init_chunk *)sctp_m_getptr(m,
+ offset, sizeof(*init_chk), (uint8_t *) & chunk_buf);
+ if (init_chk != NULL) {
+ *vtagfill = ntohl(init_chk->init.initiate_tag);
+ }
+ }
+ /* Nope, move to the next chunk */
+ offset += SCTP_SIZE32(chk_length);
+ ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
+ sizeof(*ch), (uint8_t *) & chunk_buf);
+ }
+ return (0);
+}
+
+/*
+ * currently (2/02), ifa_addr embeds scope_id's and don't have sin6_scope_id
+ * set (i.e. it's 0) so, create this function to compare link local scopes
+ */
+#ifdef INET6
+uint32_t
+sctp_is_same_scope(struct sockaddr_in6 *addr1, struct sockaddr_in6 *addr2)
+{
+ struct sockaddr_in6 a, b;
+
+ /* save copies */
+ a = *addr1;
+ b = *addr2;
+
+ if (a.sin6_scope_id == 0)
+ if (sa6_recoverscope(&a)) {
+ /* can't get scope, so can't match */
+ return (0);
+ }
+ if (b.sin6_scope_id == 0)
+ if (sa6_recoverscope(&b)) {
+ /* can't get scope, so can't match */
+ return (0);
+ }
+ if (a.sin6_scope_id != b.sin6_scope_id)
+ return (0);
+
+ return (1);
+}
+
+/*
+ * returns a sockaddr_in6 with embedded scope recovered and removed
+ */
+struct sockaddr_in6 *
+sctp_recover_scope(struct sockaddr_in6 *addr, struct sockaddr_in6 *store)
+{
+ /* check and strip embedded scope junk */
+ if (addr->sin6_family == AF_INET6) {
+ if (IN6_IS_SCOPE_LINKLOCAL(&addr->sin6_addr)) {
+ if (addr->sin6_scope_id == 0) {
+ *store = *addr;
+ if (!sa6_recoverscope(store)) {
+ /* use the recovered scope */
+ addr = store;
+ }
+ } else {
+ /* else, return the original "to" addr */
+ in6_clearscope(&addr->sin6_addr);
+ }
+ }
+ }
+ return (addr);
+}
+
+#endif
+
+/*
+ * are the two addresses the same? currently a "scopeless" check returns: 1
+ * if same, 0 if not
+ */
+int
+sctp_cmpaddr(struct sockaddr *sa1, struct sockaddr *sa2)
+{
+
+ /* must be valid */
+ if (sa1 == NULL || sa2 == NULL)
+ return (0);
+
+ /* must be the same family */
+ if (sa1->sa_family != sa2->sa_family)
+ return (0);
+
+ switch (sa1->sa_family) {
+#ifdef INET6
+ case AF_INET6:
+ {
+ /* IPv6 addresses */
+ struct sockaddr_in6 *sin6_1, *sin6_2;
+
+ sin6_1 = (struct sockaddr_in6 *)sa1;
+ sin6_2 = (struct sockaddr_in6 *)sa2;
+ return (SCTP6_ARE_ADDR_EQUAL(sin6_1,
+ sin6_2));
+ }
+#endif
+ case AF_INET:
+ {
+ /* IPv4 addresses */
+ struct sockaddr_in *sin_1, *sin_2;
+
+ sin_1 = (struct sockaddr_in *)sa1;
+ sin_2 = (struct sockaddr_in *)sa2;
+ return (sin_1->sin_addr.s_addr == sin_2->sin_addr.s_addr);
+ }
+ default:
+ /* we don't do these... */
+ return (0);
+ }
+}
+
+void
+sctp_print_address(struct sockaddr *sa)
+{
+#ifdef INET6
+ char ip6buf[INET6_ADDRSTRLEN];
+
+ ip6buf[0] = 0;
+#endif
+
+ switch (sa->sa_family) {
+#ifdef INET6
+ case AF_INET6:
+ {
+ struct sockaddr_in6 *sin6;
+
+ sin6 = (struct sockaddr_in6 *)sa;
+ SCTP_PRINTF("IPv6 address: %s:port:%d scope:%u\n",
+ ip6_sprintf(ip6buf, &sin6->sin6_addr),
+ ntohs(sin6->sin6_port),
+ sin6->sin6_scope_id);
+ break;
+ }
+#endif
+ case AF_INET:
+ {
+ struct sockaddr_in *sin;
+ unsigned char *p;
+
+ sin = (struct sockaddr_in *)sa;
+ p = (unsigned char *)&sin->sin_addr;
+ SCTP_PRINTF("IPv4 address: %u.%u.%u.%u:%d\n",
+ p[0], p[1], p[2], p[3], ntohs(sin->sin_port));
+ break;
+ }
+ default:
+ SCTP_PRINTF("?\n");
+ break;
+ }
+}
+
+void
+sctp_print_address_pkt(struct ip *iph, struct sctphdr *sh)
+{
+ switch (iph->ip_v) {
+ case IPVERSION:
+ {
+ struct sockaddr_in lsa, fsa;
+
+ bzero(&lsa, sizeof(lsa));
+ lsa.sin_len = sizeof(lsa);
+ lsa.sin_family = AF_INET;
+ lsa.sin_addr = iph->ip_src;
+ lsa.sin_port = sh->src_port;
+ bzero(&fsa, sizeof(fsa));
+ fsa.sin_len = sizeof(fsa);
+ fsa.sin_family = AF_INET;
+ fsa.sin_addr = iph->ip_dst;
+ fsa.sin_port = sh->dest_port;
+ SCTP_PRINTF("src: ");
+ sctp_print_address((struct sockaddr *)&lsa);
+ SCTP_PRINTF("dest: ");
+ sctp_print_address((struct sockaddr *)&fsa);
+ break;
+ }
+#ifdef INET6
+ case IPV6_VERSION >> 4:
+ {
+ struct ip6_hdr *ip6;
+ struct sockaddr_in6 lsa6, fsa6;
+
+ ip6 = (struct ip6_hdr *)iph;
+ bzero(&lsa6, sizeof(lsa6));
+ lsa6.sin6_len = sizeof(lsa6);
+ lsa6.sin6_family = AF_INET6;
+ lsa6.sin6_addr = ip6->ip6_src;
+ lsa6.sin6_port = sh->src_port;
+ bzero(&fsa6, sizeof(fsa6));
+ fsa6.sin6_len = sizeof(fsa6);
+ fsa6.sin6_family = AF_INET6;
+ fsa6.sin6_addr = ip6->ip6_dst;
+ fsa6.sin6_port = sh->dest_port;
+ SCTP_PRINTF("src: ");
+ sctp_print_address((struct sockaddr *)&lsa6);
+ SCTP_PRINTF("dest: ");
+ sctp_print_address((struct sockaddr *)&fsa6);
+ break;
+ }
+#endif
+ default:
+ /* TSNH */
+ break;
+ }
+}
+
+void
+sctp_pull_off_control_to_new_inp(struct sctp_inpcb *old_inp,
+ struct sctp_inpcb *new_inp,
+ struct sctp_tcb *stcb,
+ int waitflags)
+{
+ /*
+ * go through our old INP and pull off any control structures that
+ * belong to stcb and move then to the new inp.
+ */
+ struct socket *old_so, *new_so;
+ struct sctp_queued_to_read *control, *nctl;
+ struct sctp_readhead tmp_queue;
+ struct mbuf *m;
+ int error = 0;
+
+ old_so = old_inp->sctp_socket;
+ new_so = new_inp->sctp_socket;
+ TAILQ_INIT(&tmp_queue);
+ error = sblock(&old_so->so_rcv, waitflags);
+ if (error) {
+ /*
+ * Gak, can't get sblock, we have a problem. data will be
+ * left stranded.. and we don't dare look at it since the
+ * other thread may be reading something. Oh well, its a
+ * screwed up app that does a peeloff OR a accept while
+ * reading from the main socket... actually its only the
+ * peeloff() case, since I think read will fail on a
+ * listening socket..
+ */
+ return;
+ }
+ /* lock the socket buffers */
+ SCTP_INP_READ_LOCK(old_inp);
+ control = TAILQ_FIRST(&old_inp->read_queue);
+ /* Pull off all for out target stcb */
+ while (control) {
+ nctl = TAILQ_NEXT(control, next);
+ if (control->stcb == stcb) {
+ /* remove it we want it */
+ TAILQ_REMOVE(&old_inp->read_queue, control, next);
+ TAILQ_INSERT_TAIL(&tmp_queue, control, next);
+ m = control->data;
+ while (m) {
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
+ sctp_sblog(&old_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m));
+ }
+ sctp_sbfree(control, stcb, &old_so->so_rcv, m);
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
+ sctp_sblog(&old_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
+ }
+ m = SCTP_BUF_NEXT(m);
+ }
+ }
+ control = nctl;
+ }
+ SCTP_INP_READ_UNLOCK(old_inp);
+ /* Remove the sb-lock on the old socket */
+
+ sbunlock(&old_so->so_rcv);
+ /* Now we move them over to the new socket buffer */
+ control = TAILQ_FIRST(&tmp_queue);
+ SCTP_INP_READ_LOCK(new_inp);
+ while (control) {
+ nctl = TAILQ_NEXT(control, next);
+ TAILQ_INSERT_TAIL(&new_inp->read_queue, control, next);
+ m = control->data;
+ while (m) {
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
+ sctp_sblog(&new_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m));
+ }
+ sctp_sballoc(stcb, &new_so->so_rcv, m);
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
+ sctp_sblog(&new_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
+ }
+ m = SCTP_BUF_NEXT(m);
+ }
+ control = nctl;
+ }
+ SCTP_INP_READ_UNLOCK(new_inp);
+}
+
+void
+sctp_add_to_readq(struct sctp_inpcb *inp,
+ struct sctp_tcb *stcb,
+ struct sctp_queued_to_read *control,
+ struct sockbuf *sb,
+ int end,
+ int inp_read_lock_held,
+ int so_locked
+#if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
+ SCTP_UNUSED
+#endif
+)
+{
+ /*
+ * Here we must place the control on the end of the socket read
+ * queue AND increment sb_cc so that select will work properly on
+ * read.
+ */
+ struct mbuf *m, *prev = NULL;
+
+ if (inp == NULL) {
+ /* Gak, TSNH!! */
+#ifdef INVARIANTS
+ panic("Gak, inp NULL on add_to_readq");
+#endif
+ return;
+ }
+ if (inp_read_lock_held == 0)
+ SCTP_INP_READ_LOCK(inp);
+ if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_CANT_READ) {
+ sctp_free_remote_addr(control->whoFrom);
+ if (control->data) {
+ sctp_m_freem(control->data);
+ control->data = NULL;
+ }
+ SCTP_ZONE_FREE(SCTP_BASE_INFO(ipi_zone_readq), control);
+ if (inp_read_lock_held == 0)
+ SCTP_INP_READ_UNLOCK(inp);
+ return;
+ }
+ if (!(control->spec_flags & M_NOTIFICATION)) {
+ atomic_add_int(&inp->total_recvs, 1);
+ if (!control->do_not_ref_stcb) {
+ atomic_add_int(&stcb->total_recvs, 1);
+ }
+ }
+ m = control->data;
+ control->held_length = 0;
+ control->length = 0;
+ while (m) {
+ if (SCTP_BUF_LEN(m) == 0) {
+ /* Skip mbufs with NO length */
+ if (prev == NULL) {
+ /* First one */
+ control->data = sctp_m_free(m);
+ m = control->data;
+ } else {
+ SCTP_BUF_NEXT(prev) = sctp_m_free(m);
+ m = SCTP_BUF_NEXT(prev);
+ }
+ if (m == NULL) {
+ control->tail_mbuf = prev;
+ }
+ continue;
+ }
+ prev = m;
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
+ sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m));
+ }
+ sctp_sballoc(stcb, sb, m);
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
+ sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
+ }
+ atomic_add_int(&control->length, SCTP_BUF_LEN(m));
+ m = SCTP_BUF_NEXT(m);
+ }
+ if (prev != NULL) {
+ control->tail_mbuf = prev;
+ } else {
+ /* Everything got collapsed out?? */
+ sctp_free_remote_addr(control->whoFrom);
+ SCTP_ZONE_FREE(SCTP_BASE_INFO(ipi_zone_readq), control);
+ if (inp_read_lock_held == 0)
+ SCTP_INP_READ_UNLOCK(inp);
+ return;
+ }
+ if (end) {
+ control->end_added = 1;
+ }
+ TAILQ_INSERT_TAIL(&inp->read_queue, control, next);
+ if (inp_read_lock_held == 0)
+ SCTP_INP_READ_UNLOCK(inp);
+ if (inp && inp->sctp_socket) {
+ if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_ZERO_COPY_ACTIVE)) {
+ SCTP_ZERO_COPY_EVENT(inp, inp->sctp_socket);
+ } else {
+#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
+ struct socket *so;
+
+ so = SCTP_INP_SO(inp);
+ if (!so_locked) {
+ atomic_add_int(&stcb->asoc.refcnt, 1);
+ SCTP_TCB_UNLOCK(stcb);
+ SCTP_SOCKET_LOCK(so, 1);
+ SCTP_TCB_LOCK(stcb);
+ atomic_subtract_int(&stcb->asoc.refcnt, 1);
+ if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
+ SCTP_SOCKET_UNLOCK(so, 1);
+ return;
+ }
+ }
+#endif
+ sctp_sorwakeup(inp, inp->sctp_socket);
+#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
+ if (!so_locked) {
+ SCTP_SOCKET_UNLOCK(so, 1);
+ }
+#endif
+ }
+ }
+}
+
+
+int
+sctp_append_to_readq(struct sctp_inpcb *inp,
+ struct sctp_tcb *stcb,
+ struct sctp_queued_to_read *control,
+ struct mbuf *m,
+ int end,
+ int ctls_cumack,
+ struct sockbuf *sb)
+{
+ /*
+ * A partial delivery API event is underway. OR we are appending on
+ * the reassembly queue.
+ *
+ * If PDAPI this means we need to add m to the end of the data.
+ * Increase the length in the control AND increment the sb_cc.
+ * Otherwise sb is NULL and all we need to do is put it at the end
+ * of the mbuf chain.
+ */
+ int len = 0;
+ struct mbuf *mm, *tail = NULL, *prev = NULL;
+
+ if (inp) {
+ SCTP_INP_READ_LOCK(inp);
+ }
+ if (control == NULL) {
+get_out:
+ if (inp) {
+ SCTP_INP_READ_UNLOCK(inp);
+ }
+ return (-1);
+ }
+ if (inp && (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_CANT_READ)) {
+ SCTP_INP_READ_UNLOCK(inp);
+ return 0;
+ }
+ if (control->end_added) {
+ /* huh this one is complete? */
+ goto get_out;
+ }
+ mm = m;
+ if (mm == NULL) {
+ goto get_out;
+ }
+ while (mm) {
+ if (SCTP_BUF_LEN(mm) == 0) {
+ /* Skip mbufs with NO lenght */
+ if (prev == NULL) {
+ /* First one */
+ m = sctp_m_free(mm);
+ mm = m;
+ } else {
+ SCTP_BUF_NEXT(prev) = sctp_m_free(mm);
+ mm = SCTP_BUF_NEXT(prev);
+ }
+ continue;
+ }
+ prev = mm;
+ len += SCTP_BUF_LEN(mm);
+ if (sb) {
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
+ sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(mm));
+ }
+ sctp_sballoc(stcb, sb, mm);
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
+ sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
+ }
+ }
+ mm = SCTP_BUF_NEXT(mm);
+ }
+ if (prev) {
+ tail = prev;
+ } else {
+ /* Really there should always be a prev */
+ if (m == NULL) {
+ /* Huh nothing left? */
+#ifdef INVARIANTS
+ panic("Nothing left to add?");
+#else
+ goto get_out;
+#endif
+ }
+ tail = m;
+ }
+ if (control->tail_mbuf) {
+ /* append */
+ SCTP_BUF_NEXT(control->tail_mbuf) = m;
+ control->tail_mbuf = tail;
+ } else {
+ /* nothing there */
+#ifdef INVARIANTS
+ if (control->data != NULL) {
+ panic("This should NOT happen");
+ }
+#endif
+ control->data = m;
+ control->tail_mbuf = tail;
+ }
+ atomic_add_int(&control->length, len);
+ if (end) {
+ /* message is complete */
+ if (stcb && (control == stcb->asoc.control_pdapi)) {
+ stcb->asoc.control_pdapi = NULL;
+ }
+ control->held_length = 0;
+ control->end_added = 1;
+ }
+ if (stcb == NULL) {
+ control->do_not_ref_stcb = 1;
+ }
+ /*
+ * When we are appending in partial delivery, the cum-ack is used
+ * for the actual pd-api highest tsn on this mbuf. The true cum-ack
+ * is populated in the outbound sinfo structure from the true cumack
+ * if the association exists...
+ */
+ control->sinfo_tsn = control->sinfo_cumtsn = ctls_cumack;
+ if (inp) {
+ SCTP_INP_READ_UNLOCK(inp);
+ }
+ if (inp && inp->sctp_socket) {
+ if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_ZERO_COPY_ACTIVE)) {
+ SCTP_ZERO_COPY_EVENT(inp, inp->sctp_socket);
+ } else {
+#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
+ struct socket *so;
+
+ so = SCTP_INP_SO(inp);
+ atomic_add_int(&stcb->asoc.refcnt, 1);
+ SCTP_TCB_UNLOCK(stcb);
+ SCTP_SOCKET_LOCK(so, 1);
+ SCTP_TCB_LOCK(stcb);
+ atomic_subtract_int(&stcb->asoc.refcnt, 1);
+ if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
+ SCTP_SOCKET_UNLOCK(so, 1);
+ return (0);
+ }
+#endif
+ sctp_sorwakeup(inp, inp->sctp_socket);
+#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
+ SCTP_SOCKET_UNLOCK(so, 1);
+#endif
+ }
+ }
+ return (0);
+}
+
+
+
+/*************HOLD THIS COMMENT FOR PATCH FILE OF
+ *************ALTERNATE ROUTING CODE
+ */
+
+/*************HOLD THIS COMMENT FOR END OF PATCH FILE OF
+ *************ALTERNATE ROUTING CODE
+ */
+
+struct mbuf *
+sctp_generate_invmanparam(int err)
+{
+ /* Return a MBUF with a invalid mandatory parameter */
+ struct mbuf *m;
+
+ m = sctp_get_mbuf_for_msg(sizeof(struct sctp_paramhdr), 0, M_DONTWAIT, 1, MT_DATA);
+ if (m) {
+ struct sctp_paramhdr *ph;
+
+ SCTP_BUF_LEN(m) = sizeof(struct sctp_paramhdr);
+ ph = mtod(m, struct sctp_paramhdr *);
+ ph->param_length = htons(sizeof(struct sctp_paramhdr));
+ ph->param_type = htons(err);
+ }
+ return (m);
+}
+
+#ifdef SCTP_MBCNT_LOGGING
+void
+sctp_free_bufspace(struct sctp_tcb *stcb, struct sctp_association *asoc,
+ struct sctp_tmit_chunk *tp1, int chk_cnt)
+{
+ if (tp1->data == NULL) {
+ return;
+ }
+ asoc->chunks_on_out_queue -= chk_cnt;
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBCNT_LOGGING_ENABLE) {
+ sctp_log_mbcnt(SCTP_LOG_MBCNT_DECREASE,
+ asoc->total_output_queue_size,
+ tp1->book_size,
+ 0,
+ tp1->mbcnt);
+ }
+ if (asoc->total_output_queue_size >= tp1->book_size) {
+ atomic_add_int(&asoc->total_output_queue_size, -tp1->book_size);
+ } else {
+ asoc->total_output_queue_size = 0;
+ }
+
+ if (stcb->sctp_socket && (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) ||
+ ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE)))) {
+ if (stcb->sctp_socket->so_snd.sb_cc >= tp1->book_size) {
+ stcb->sctp_socket->so_snd.sb_cc -= tp1->book_size;
+ } else {
+ stcb->sctp_socket->so_snd.sb_cc = 0;
+
+ }
+ }
+}
+
+#endif
+
+int
+sctp_release_pr_sctp_chunk(struct sctp_tcb *stcb, struct sctp_tmit_chunk *tp1,
+ int reason, int so_locked
+#if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
+ SCTP_UNUSED
+#endif
+)
+{
+ struct sctp_stream_out *strq;
+ struct sctp_tmit_chunk *chk = NULL;
+ struct sctp_stream_queue_pending *sp;
+ uint16_t stream = 0, seq = 0;
+ uint8_t foundeom = 0;
+ int ret_sz = 0;
+ int notdone;
+ int do_wakeup_routine = 0;
+
+ stream = tp1->rec.data.stream_number;
+ seq = tp1->rec.data.stream_seq;
+ do {
+ ret_sz += tp1->book_size;
+ if (tp1->data != NULL) {
+ if (tp1->sent < SCTP_DATAGRAM_RESEND) {
+ sctp_flight_size_decrease(tp1);
+ sctp_total_flight_decrease(stcb, tp1);
+ }
+ sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1);
+ stcb->asoc.peers_rwnd += tp1->send_size;
+ stcb->asoc.peers_rwnd += SCTP_BASE_SYSCTL(sctp_peer_chunk_oh);
+ sctp_ulp_notify(SCTP_NOTIFY_DG_FAIL, stcb, reason, tp1, so_locked);
+ if (tp1->data) {
+ sctp_m_freem(tp1->data);
+ tp1->data = NULL;
+ }
+ do_wakeup_routine = 1;
+ if (PR_SCTP_BUF_ENABLED(tp1->flags)) {
+ stcb->asoc.sent_queue_cnt_removeable--;
+ }
+ }
+ tp1->sent = SCTP_FORWARD_TSN_SKIP;
+ if ((tp1->rec.data.rcv_flags & SCTP_DATA_NOT_FRAG) ==
+ SCTP_DATA_NOT_FRAG) {
+ /* not frag'ed we ae done */
+ notdone = 0;
+ foundeom = 1;
+ } else if (tp1->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
+ /* end of frag, we are done */
+ notdone = 0;
+ foundeom = 1;
+ } else {
+ /*
+ * Its a begin or middle piece, we must mark all of
+ * it
+ */
+ notdone = 1;
+ tp1 = TAILQ_NEXT(tp1, sctp_next);
+ }
+ } while (tp1 && notdone);
+ if (foundeom == 0) {
+ /*
+ * The multi-part message was scattered across the send and
+ * sent queue.
+ */
+next_on_sent:
+ tp1 = TAILQ_FIRST(&stcb->asoc.send_queue);
+ /*
+ * recurse throught the send_queue too, starting at the
+ * beginning.
+ */
+ if ((tp1) &&
+ (tp1->rec.data.stream_number == stream) &&
+ (tp1->rec.data.stream_seq == seq)) {
+ /*
+ * save to chk in case we have some on stream out
+ * queue. If so and we have an un-transmitted one we
+ * don't have to fudge the TSN.
+ */
+ chk = tp1;
+ ret_sz += tp1->book_size;
+ sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1);
+ sctp_ulp_notify(SCTP_NOTIFY_DG_FAIL, stcb, reason, tp1, so_locked);
+ if (tp1->data) {
+ sctp_m_freem(tp1->data);
+ tp1->data = NULL;
+ }
+ /* No flight involved here book the size to 0 */
+ tp1->book_size = 0;
+ if (tp1->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
+ foundeom = 1;
+ }
+ do_wakeup_routine = 1;
+ tp1->sent = SCTP_FORWARD_TSN_SKIP;
+ TAILQ_REMOVE(&stcb->asoc.send_queue, tp1, sctp_next);
+ /*
+ * on to the sent queue so we can wait for it to be
+ * passed by.
+ */
+ TAILQ_INSERT_TAIL(&stcb->asoc.sent_queue, tp1,
+ sctp_next);
+ stcb->asoc.send_queue_cnt--;
+ stcb->asoc.sent_queue_cnt++;
+ goto next_on_sent;
+ }
+ }
+ if (foundeom == 0) {
+ /*
+ * Still no eom found. That means there is stuff left on the
+ * stream out queue.. yuck.
+ */
+ strq = &stcb->asoc.strmout[stream];
+ SCTP_TCB_SEND_LOCK(stcb);
+ sp = TAILQ_FIRST(&strq->outqueue);
+ while (sp->strseq <= seq) {
+ /* Check if its our SEQ */
+ if (sp->strseq == seq) {
+ sp->discard_rest = 1;
+ /*
+ * We may need to put a chunk on the queue
+ * that holds the TSN that would have been
+ * sent with the LAST bit.
+ */
+ if (chk == NULL) {
+ /* Yep, we have to */
+ sctp_alloc_a_chunk(stcb, chk);
+ if (chk == NULL) {
+ /*
+ * we are hosed. All we can
+ * do is nothing.. which
+ * will cause an abort if
+ * the peer is paying
+ * attention.
+ */
+ goto oh_well;
+ }
+ memset(chk, 0, sizeof(*chk));
+ chk->rec.data.rcv_flags = SCTP_DATA_LAST_FRAG;
+ chk->sent = SCTP_FORWARD_TSN_SKIP;
+ chk->asoc = &stcb->asoc;
+ chk->rec.data.stream_seq = sp->strseq;
+ chk->rec.data.stream_number = sp->stream;
+ chk->rec.data.payloadtype = sp->ppid;
+ chk->rec.data.context = sp->context;
+ chk->flags = sp->act_flags;
+ if (sp->net)
+ chk->whoTo = sp->net;
+ else
+ chk->whoTo = stcb->asoc.primary_destination;
+ atomic_add_int(&chk->whoTo->ref_count, 1);
+ chk->rec.data.TSN_seq = atomic_fetchadd_int(&stcb->asoc.sending_seq, 1);
+ stcb->asoc.pr_sctp_cnt++;
+ chk->pr_sctp_on = 1;
+ TAILQ_INSERT_TAIL(&stcb->asoc.sent_queue, chk, sctp_next);
+ stcb->asoc.sent_queue_cnt++;
+ stcb->asoc.pr_sctp_cnt++;
+ } else {
+ chk->rec.data.rcv_flags |= SCTP_DATA_LAST_FRAG;
+ }
+ oh_well:
+ if (sp->data) {
+ /*
+ * Pull any data to free up the SB
+ * and allow sender to "add more"
+ * whilc we will throw away :-)
+ */
+ sctp_free_spbufspace(stcb, &stcb->asoc,
+ sp);
+ ret_sz += sp->length;
+ do_wakeup_routine = 1;
+ sp->some_taken = 1;
+ sctp_m_freem(sp->data);
+ sp->length = 0;
+ sp->data = NULL;
+ sp->tail_mbuf = NULL;
+ }
+ break;
+ } else {
+ /* Next one please */
+ sp = TAILQ_NEXT(sp, next);
+ }
+ } /* End while */
+ SCTP_TCB_SEND_UNLOCK(stcb);
+ }
+ if (do_wakeup_routine) {
+#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
+ struct socket *so;
+
+ so = SCTP_INP_SO(stcb->sctp_ep);
+ if (!so_locked) {
+ atomic_add_int(&stcb->asoc.refcnt, 1);
+ SCTP_TCB_UNLOCK(stcb);
+ SCTP_SOCKET_LOCK(so, 1);
+ SCTP_TCB_LOCK(stcb);
+ atomic_subtract_int(&stcb->asoc.refcnt, 1);
+ if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
+ /* assoc was freed while we were unlocked */
+ SCTP_SOCKET_UNLOCK(so, 1);
+ return (ret_sz);
+ }
+ }
+#endif
+ sctp_sowwakeup(stcb->sctp_ep, stcb->sctp_socket);
+#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
+ if (!so_locked) {
+ SCTP_SOCKET_UNLOCK(so, 1);
+ }
+#endif
+ }
+ return (ret_sz);
+}
+
+/*
+ * checks to see if the given address, sa, is one that is currently known by
+ * the kernel note: can't distinguish the same address on multiple interfaces
+ * and doesn't handle multiple addresses with different zone/scope id's note:
+ * ifa_ifwithaddr() compares the entire sockaddr struct
+ */
+struct sctp_ifa *
+sctp_find_ifa_in_ep(struct sctp_inpcb *inp, struct sockaddr *addr,
+ int holds_lock)
+{
+ struct sctp_laddr *laddr;
+
+ if (holds_lock == 0) {
+ SCTP_INP_RLOCK(inp);
+ }
+ LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) {
+ if (laddr->ifa == NULL)
+ continue;
+ if (addr->sa_family != laddr->ifa->address.sa.sa_family)
+ continue;
+ if (addr->sa_family == AF_INET) {
+ if (((struct sockaddr_in *)addr)->sin_addr.s_addr ==
+ laddr->ifa->address.sin.sin_addr.s_addr) {
+ /* found him. */
+ if (holds_lock == 0) {
+ SCTP_INP_RUNLOCK(inp);
+ }
+ return (laddr->ifa);
+ break;
+ }
+ }
+#ifdef INET6
+ if (addr->sa_family == AF_INET6) {
+ if (SCTP6_ARE_ADDR_EQUAL((struct sockaddr_in6 *)addr,
+ &laddr->ifa->address.sin6)) {
+ /* found him. */
+ if (holds_lock == 0) {
+ SCTP_INP_RUNLOCK(inp);
+ }
+ return (laddr->ifa);
+ break;
+ }
+ }
+#endif
+ }
+ if (holds_lock == 0) {
+ SCTP_INP_RUNLOCK(inp);
+ }
+ return (NULL);
+}
+
+uint32_t
+sctp_get_ifa_hash_val(struct sockaddr *addr)
+{
+ if (addr->sa_family == AF_INET) {
+ struct sockaddr_in *sin;
+
+ sin = (struct sockaddr_in *)addr;
+ return (sin->sin_addr.s_addr ^ (sin->sin_addr.s_addr >> 16));
+ } else if (addr->sa_family == AF_INET6) {
+ struct sockaddr_in6 *sin6;
+ uint32_t hash_of_addr;
+
+ sin6 = (struct sockaddr_in6 *)addr;
+ hash_of_addr = (sin6->sin6_addr.s6_addr32[0] +
+ sin6->sin6_addr.s6_addr32[1] +
+ sin6->sin6_addr.s6_addr32[2] +
+ sin6->sin6_addr.s6_addr32[3]);
+ hash_of_addr = (hash_of_addr ^ (hash_of_addr >> 16));
+ return (hash_of_addr);
+ }
+ return (0);
+}
+
+struct sctp_ifa *
+sctp_find_ifa_by_addr(struct sockaddr *addr, uint32_t vrf_id, int holds_lock)
+{
+ struct sctp_ifa *sctp_ifap;
+ struct sctp_vrf *vrf;
+ struct sctp_ifalist *hash_head;
+ uint32_t hash_of_addr;
+
+ if (holds_lock == 0)
+ SCTP_IPI_ADDR_RLOCK();
+
+ vrf = sctp_find_vrf(vrf_id);
+ if (vrf == NULL) {
+stage_right:
+ if (holds_lock == 0)
+ SCTP_IPI_ADDR_RUNLOCK();
+ return (NULL);
+ }
+ hash_of_addr = sctp_get_ifa_hash_val(addr);
+
+ hash_head = &vrf->vrf_addr_hash[(hash_of_addr & vrf->vrf_addr_hashmark)];
+ if (hash_head == NULL) {
+ SCTP_PRINTF("hash_of_addr:%x mask:%x table:%x - ",
+ hash_of_addr, (uint32_t) vrf->vrf_addr_hashmark,
+ (uint32_t) (hash_of_addr & vrf->vrf_addr_hashmark));
+ sctp_print_address(addr);
+ SCTP_PRINTF("No such bucket for address\n");
+ if (holds_lock == 0)
+ SCTP_IPI_ADDR_RUNLOCK();
+
+ return (NULL);
+ }
+ LIST_FOREACH(sctp_ifap, hash_head, next_bucket) {
+ if (sctp_ifap == NULL) {
+#ifdef INVARIANTS
+ panic("Huh LIST_FOREACH corrupt");
+ goto stage_right;
+#else
+ SCTP_PRINTF("LIST corrupt of sctp_ifap's?\n");
+ goto stage_right;
+#endif
+ }
+ if (addr->sa_family != sctp_ifap->address.sa.sa_family)
+ continue;
+ if (addr->sa_family == AF_INET) {
+ if (((struct sockaddr_in *)addr)->sin_addr.s_addr ==
+ sctp_ifap->address.sin.sin_addr.s_addr) {
+ /* found him. */
+ if (holds_lock == 0)
+ SCTP_IPI_ADDR_RUNLOCK();
+ return (sctp_ifap);
+ break;
+ }
+ }
+#ifdef INET6
+ if (addr->sa_family == AF_INET6) {
+ if (SCTP6_ARE_ADDR_EQUAL((struct sockaddr_in6 *)addr,
+ &sctp_ifap->address.sin6)) {
+ /* found him. */
+ if (holds_lock == 0)
+ SCTP_IPI_ADDR_RUNLOCK();
+ return (sctp_ifap);
+ break;
+ }
+ }
+#endif
+ }
+ if (holds_lock == 0)
+ SCTP_IPI_ADDR_RUNLOCK();
+ return (NULL);
+}
+
+static void
+sctp_user_rcvd(struct sctp_tcb *stcb, uint32_t * freed_so_far, int hold_rlock,
+ uint32_t rwnd_req)
+{
+ /* User pulled some data, do we need a rwnd update? */
+ int r_unlocked = 0;
+ uint32_t dif, rwnd;
+ struct socket *so = NULL;
+
+ if (stcb == NULL)
+ return;
+
+ atomic_add_int(&stcb->asoc.refcnt, 1);
+
+ if (stcb->asoc.state & (SCTP_STATE_ABOUT_TO_BE_FREED |
+ SCTP_STATE_SHUTDOWN_RECEIVED |
+ SCTP_STATE_SHUTDOWN_ACK_SENT)) {
+ /* Pre-check If we are freeing no update */
+ goto no_lock;
+ }
+ SCTP_INP_INCR_REF(stcb->sctp_ep);
+ if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
+ (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE)) {
+ goto out;
+ }
+ so = stcb->sctp_socket;
+ if (so == NULL) {
+ goto out;
+ }
+ atomic_add_int(&stcb->freed_by_sorcv_sincelast, *freed_so_far);
+ /* Have you have freed enough to look */
+ *freed_so_far = 0;
+ /* Yep, its worth a look and the lock overhead */
+
+ /* Figure out what the rwnd would be */
+ rwnd = sctp_calc_rwnd(stcb, &stcb->asoc);
+ if (rwnd >= stcb->asoc.my_last_reported_rwnd) {
+ dif = rwnd - stcb->asoc.my_last_reported_rwnd;
+ } else {
+ dif = 0;
+ }
+ if (dif >= rwnd_req) {
+ if (hold_rlock) {
+ SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
+ r_unlocked = 1;
+ }
+ if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
+ /*
+ * One last check before we allow the guy possibly
+ * to get in. There is a race, where the guy has not
+ * reached the gate. In that case
+ */
+ goto out;
+ }
+ SCTP_TCB_LOCK(stcb);
+ if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
+ /* No reports here */
+ SCTP_TCB_UNLOCK(stcb);
+ goto out;
+ }
+ SCTP_STAT_INCR(sctps_wu_sacks_sent);
+ sctp_send_sack(stcb);
+
+ sctp_chunk_output(stcb->sctp_ep, stcb,
+ SCTP_OUTPUT_FROM_USR_RCVD, SCTP_SO_LOCKED);
+ /* make sure no timer is running */
+ sctp_timer_stop(SCTP_TIMER_TYPE_RECV, stcb->sctp_ep, stcb, NULL, SCTP_FROM_SCTPUTIL + SCTP_LOC_6);
+ SCTP_TCB_UNLOCK(stcb);
+ } else {
+ /* Update how much we have pending */
+ stcb->freed_by_sorcv_sincelast = dif;
+ }
+out:
+ if (so && r_unlocked && hold_rlock) {
+ SCTP_INP_READ_LOCK(stcb->sctp_ep);
+ }
+ SCTP_INP_DECR_REF(stcb->sctp_ep);
+no_lock:
+ atomic_add_int(&stcb->asoc.refcnt, -1);
+ return;
+}
+
+int
+sctp_sorecvmsg(struct socket *so,
+ struct uio *uio,
+ struct mbuf **mp,
+ struct sockaddr *from,
+ int fromlen,
+ int *msg_flags,
+ struct sctp_sndrcvinfo *sinfo,
+ int filling_sinfo)
+{
+ /*
+ * MSG flags we will look at MSG_DONTWAIT - non-blocking IO.
+ * MSG_PEEK - Look don't touch :-D (only valid with OUT mbuf copy
+ * mp=NULL thus uio is the copy method to userland) MSG_WAITALL - ??
+ * On the way out we may send out any combination of:
+ * MSG_NOTIFICATION MSG_EOR
+ *
+ */
+ struct sctp_inpcb *inp = NULL;
+ int my_len = 0;
+ int cp_len = 0, error = 0;
+ struct sctp_queued_to_read *control = NULL, *ctl = NULL, *nxt = NULL;
+ struct mbuf *m = NULL;
+ struct sctp_tcb *stcb = NULL;
+ int wakeup_read_socket = 0;
+ int freecnt_applied = 0;
+ int out_flags = 0, in_flags = 0;
+ int block_allowed = 1;
+ uint32_t freed_so_far = 0;
+ uint32_t copied_so_far = 0;
+ int in_eeor_mode = 0;
+ int no_rcv_needed = 0;
+ uint32_t rwnd_req = 0;
+ int hold_sblock = 0;
+ int hold_rlock = 0;
+ int slen = 0;
+ uint32_t held_length = 0;
+ int sockbuf_lock = 0;
+
+ if (uio == NULL) {
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
+ return (EINVAL);
+ }
+ if (msg_flags) {
+ in_flags = *msg_flags;
+ if (in_flags & MSG_PEEK)
+ SCTP_STAT_INCR(sctps_read_peeks);
+ } else {
+ in_flags = 0;
+ }
+ slen = uio->uio_resid;
+
+ /* Pull in and set up our int flags */
+ if (in_flags & MSG_OOB) {
+ /* Out of band's NOT supported */
+ return (EOPNOTSUPP);
+ }
+ if ((in_flags & MSG_PEEK) && (mp != NULL)) {
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
+ return (EINVAL);
+ }
+ if ((in_flags & (MSG_DONTWAIT
+ | MSG_NBIO
+ )) ||
+ SCTP_SO_IS_NBIO(so)) {
+ block_allowed = 0;
+ }
+ /* setup the endpoint */
+ inp = (struct sctp_inpcb *)so->so_pcb;
+ if (inp == NULL) {
+ SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, EFAULT);
+ return (EFAULT);
+ }
+ rwnd_req = (SCTP_SB_LIMIT_RCV(so) >> SCTP_RWND_HIWAT_SHIFT);
+ /* Must be at least a MTU's worth */
+ if (rwnd_req < SCTP_MIN_RWND)
+ rwnd_req = SCTP_MIN_RWND;
+ in_eeor_mode = sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXPLICIT_EOR);
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RECV_RWND_LOGGING_ENABLE) {
+ sctp_misc_ints(SCTP_SORECV_ENTER,
+ rwnd_req, in_eeor_mode, so->so_rcv.sb_cc, uio->uio_resid);
+ }
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RECV_RWND_LOGGING_ENABLE) {
+ sctp_misc_ints(SCTP_SORECV_ENTERPL,
+ rwnd_req, block_allowed, so->so_rcv.sb_cc, uio->uio_resid);
+ }
+ error = sblock(&so->so_rcv, (block_allowed ? SBL_WAIT : 0));
+ sockbuf_lock = 1;
+ if (error) {
+ goto release_unlocked;
+ }
+restart:
+
+
+restart_nosblocks:
+ if (hold_sblock == 0) {
+ SOCKBUF_LOCK(&so->so_rcv);
+ hold_sblock = 1;
+ }
+ if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
+ (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE)) {
+ goto out;
+ }
+ if (so->so_rcv.sb_state & SBS_CANTRCVMORE) {
+ if (so->so_error) {
+ error = so->so_error;
+ if ((in_flags & MSG_PEEK) == 0)
+ so->so_error = 0;
+ goto out;
+ } else {
+ if (so->so_rcv.sb_cc == 0) {
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOTCONN);
+ /* indicate EOF */
+ error = 0;
+ goto out;
+ }
+ }
+ }
+ if ((so->so_rcv.sb_cc <= held_length) && block_allowed) {
+ /* we need to wait for data */
+ if ((so->so_rcv.sb_cc == 0) &&
+ ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
+ (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL))) {
+ if ((inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) == 0) {
+ /*
+ * For active open side clear flags for
+ * re-use passive open is blocked by
+ * connect.
+ */
+ if (inp->sctp_flags & SCTP_PCB_FLAGS_WAS_ABORTED) {
+ /*
+ * You were aborted, passive side
+ * always hits here
+ */
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ECONNRESET);
+ error = ECONNRESET;
+ /*
+ * You get this once if you are
+ * active open side
+ */
+ if (!(inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
+ /*
+ * Remove flag if on the
+ * active open side
+ */
+ inp->sctp_flags &= ~SCTP_PCB_FLAGS_WAS_ABORTED;
+ }
+ }
+ so->so_state &= ~(SS_ISCONNECTING |
+ SS_ISDISCONNECTING |
+ SS_ISCONFIRMING |
+ SS_ISCONNECTED);
+ if (error == 0) {
+ if ((inp->sctp_flags & SCTP_PCB_FLAGS_WAS_CONNECTED) == 0) {
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOTCONN);
+ error = ENOTCONN;
+ } else {
+ inp->sctp_flags &= ~SCTP_PCB_FLAGS_WAS_CONNECTED;
+ }
+ }
+ goto out;
+ }
+ }
+ error = sbwait(&so->so_rcv);
+ if (error) {
+ goto out;
+ }
+ held_length = 0;
+ goto restart_nosblocks;
+ } else if (so->so_rcv.sb_cc == 0) {
+ if (so->so_error) {
+ error = so->so_error;
+ if ((in_flags & MSG_PEEK) == 0)
+ so->so_error = 0;
+ } else {
+ if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
+ (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
+ if ((inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) == 0) {
+ /*
+ * For active open side clear flags
+ * for re-use passive open is
+ * blocked by connect.
+ */
+ if (inp->sctp_flags & SCTP_PCB_FLAGS_WAS_ABORTED) {
+ /*
+ * You were aborted, passive
+ * side always hits here
+ */
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ECONNRESET);
+ error = ECONNRESET;
+ /*
+ * You get this once if you
+ * are active open side
+ */
+ if (!(inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
+ /*
+ * Remove flag if on
+ * the active open
+ * side
+ */
+ inp->sctp_flags &= ~SCTP_PCB_FLAGS_WAS_ABORTED;
+ }
+ }
+ so->so_state &= ~(SS_ISCONNECTING |
+ SS_ISDISCONNECTING |
+ SS_ISCONFIRMING |
+ SS_ISCONNECTED);
+ if (error == 0) {
+ if ((inp->sctp_flags & SCTP_PCB_FLAGS_WAS_CONNECTED) == 0) {
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOTCONN);
+ error = ENOTCONN;
+ } else {
+ inp->sctp_flags &= ~SCTP_PCB_FLAGS_WAS_CONNECTED;
+ }
+ }
+ goto out;
+ }
+ }
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EWOULDBLOCK);
+ error = EWOULDBLOCK;
+ }
+ goto out;
+ }
+ if (hold_sblock == 1) {
+ SOCKBUF_UNLOCK(&so->so_rcv);
+ hold_sblock = 0;
+ }
+ /* we possibly have data we can read */
+ /* sa_ignore FREED_MEMORY */
+ control = TAILQ_FIRST(&inp->read_queue);
+ if (control == NULL) {
+ /*
+ * This could be happening since the appender did the
+ * increment but as not yet did the tailq insert onto the
+ * read_queue
+ */
+ if (hold_rlock == 0) {
+ SCTP_INP_READ_LOCK(inp);
+ hold_rlock = 1;
+ }
+ control = TAILQ_FIRST(&inp->read_queue);
+ if ((control == NULL) && (so->so_rcv.sb_cc != 0)) {
+#ifdef INVARIANTS
+ panic("Huh, its non zero and nothing on control?");
+#endif
+ so->so_rcv.sb_cc = 0;
+ }
+ SCTP_INP_READ_UNLOCK(inp);
+ hold_rlock = 0;
+ goto restart;
+ }
+ if ((control->length == 0) &&
+ (control->do_not_ref_stcb)) {
+ /*
+ * Clean up code for freeing assoc that left behind a
+ * pdapi.. maybe a peer in EEOR that just closed after
+ * sending and never indicated a EOR.
+ */
+ if (hold_rlock == 0) {
+ hold_rlock = 1;
+ SCTP_INP_READ_LOCK(inp);
+ }
+ control->held_length = 0;
+ if (control->data) {
+ /* Hmm there is data here .. fix */
+ struct mbuf *m_tmp;
+ int cnt = 0;
+
+ m_tmp = control->data;
+ while (m_tmp) {
+ cnt += SCTP_BUF_LEN(m_tmp);
+ if (SCTP_BUF_NEXT(m_tmp) == NULL) {
+ control->tail_mbuf = m_tmp;
+ control->end_added = 1;
+ }
+ m_tmp = SCTP_BUF_NEXT(m_tmp);
+ }
+ control->length = cnt;
+ } else {
+ /* remove it */
+ TAILQ_REMOVE(&inp->read_queue, control, next);
+ /* Add back any hiddend data */
+ sctp_free_remote_addr(control->whoFrom);
+ sctp_free_a_readq(stcb, control);
+ }
+ if (hold_rlock) {
+ hold_rlock = 0;
+ SCTP_INP_READ_UNLOCK(inp);
+ }
+ goto restart;
+ }
+ if ((control->length == 0) &&
+ (control->end_added == 1)) {
+ /*
+ * Do we also need to check for (control->pdapi_aborted ==
+ * 1)?
+ */
+ if (hold_rlock == 0) {
+ hold_rlock = 1;
+ SCTP_INP_READ_LOCK(inp);
+ }
+ TAILQ_REMOVE(&inp->read_queue, control, next);
+ if (control->data) {
+#ifdef INVARIANTS
+ panic("control->data not null but control->length == 0");
+#else
+ SCTP_PRINTF("Strange, data left in the control buffer. Cleaning up.\n");
+ sctp_m_freem(control->data);
+ control->data = NULL;
+#endif
+ }
+ if (control->aux_data) {
+ sctp_m_free(control->aux_data);
+ control->aux_data = NULL;
+ }
+ sctp_free_remote_addr(control->whoFrom);
+ sctp_free_a_readq(stcb, control);
+ if (hold_rlock) {
+ hold_rlock = 0;
+ SCTP_INP_READ_UNLOCK(inp);
+ }
+ goto restart;
+ }
+ if (control->length == 0) {
+ if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE)) &&
+ (filling_sinfo)) {
+ /* find a more suitable one then this */
+ ctl = TAILQ_NEXT(control, next);
+ while (ctl) {
+ if ((ctl->stcb != control->stcb) && (ctl->length) &&
+ (ctl->some_taken ||
+ (ctl->spec_flags & M_NOTIFICATION) ||
+ ((ctl->do_not_ref_stcb == 0) &&
+ (ctl->stcb->asoc.strmin[ctl->sinfo_stream].delivery_started == 0)))
+ ) {
+ /*-
+ * If we have a different TCB next, and there is data
+ * present. If we have already taken some (pdapi), OR we can
+ * ref the tcb and no delivery as started on this stream, we
+ * take it. Note we allow a notification on a different
+ * assoc to be delivered..
+ */
+ control = ctl;
+ goto found_one;
+ } else if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_INTERLEAVE_STRMS)) &&
+ (ctl->length) &&
+ ((ctl->some_taken) ||
+ ((ctl->do_not_ref_stcb == 0) &&
+ ((ctl->spec_flags & M_NOTIFICATION) == 0) &&
+ (ctl->stcb->asoc.strmin[ctl->sinfo_stream].delivery_started == 0)))) {
+ /*-
+ * If we have the same tcb, and there is data present, and we
+ * have the strm interleave feature present. Then if we have
+ * taken some (pdapi) or we can refer to tht tcb AND we have
+ * not started a delivery for this stream, we can take it.
+ * Note we do NOT allow a notificaiton on the same assoc to
+ * be delivered.
+ */
+ control = ctl;
+ goto found_one;
+ }
+ ctl = TAILQ_NEXT(ctl, next);
+ }
+ }
+ /*
+ * if we reach here, not suitable replacement is available
+ * <or> fragment interleave is NOT on. So stuff the sb_cc
+ * into the our held count, and its time to sleep again.
+ */
+ held_length = so->so_rcv.sb_cc;
+ control->held_length = so->so_rcv.sb_cc;
+ goto restart;
+ }
+ /* Clear the held length since there is something to read */
+ control->held_length = 0;
+ if (hold_rlock) {
+ SCTP_INP_READ_UNLOCK(inp);
+ hold_rlock = 0;
+ }
+found_one:
+ /*
+ * If we reach here, control has a some data for us to read off.
+ * Note that stcb COULD be NULL.
+ */
+ control->some_taken++;
+ if (hold_sblock) {
+ SOCKBUF_UNLOCK(&so->so_rcv);
+ hold_sblock = 0;
+ }
+ stcb = control->stcb;
+ if (stcb) {
+ if ((control->do_not_ref_stcb == 0) &&
+ (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED)) {
+ if (freecnt_applied == 0)
+ stcb = NULL;
+ } else if (control->do_not_ref_stcb == 0) {
+ /* you can't free it on me please */
+ /*
+ * The lock on the socket buffer protects us so the
+ * free code will stop. But since we used the
+ * socketbuf lock and the sender uses the tcb_lock
+ * to increment, we need to use the atomic add to
+ * the refcnt
+ */
+ if (freecnt_applied) {
+#ifdef INVARIANTS
+ panic("refcnt already incremented");
+#else
+ printf("refcnt already incremented?\n");
+#endif
+ } else {
+ atomic_add_int(&stcb->asoc.refcnt, 1);
+ freecnt_applied = 1;
+ }
+ /*
+ * Setup to remember how much we have not yet told
+ * the peer our rwnd has opened up. Note we grab the
+ * value from the tcb from last time. Note too that
+ * sack sending clears this when a sack is sent,
+ * which is fine. Once we hit the rwnd_req, we then
+ * will go to the sctp_user_rcvd() that will not
+ * lock until it KNOWs it MUST send a WUP-SACK.
+ */
+ freed_so_far = stcb->freed_by_sorcv_sincelast;
+ stcb->freed_by_sorcv_sincelast = 0;
+ }
+ }
+ if (stcb &&
+ ((control->spec_flags & M_NOTIFICATION) == 0) &&
+ control->do_not_ref_stcb == 0) {
+ stcb->asoc.strmin[control->sinfo_stream].delivery_started = 1;
+ }
+ /* First lets get off the sinfo and sockaddr info */
+ if ((sinfo) && filling_sinfo) {
+ memcpy(sinfo, control, sizeof(struct sctp_nonpad_sndrcvinfo));
+ nxt = TAILQ_NEXT(control, next);
+ if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO)) {
+ struct sctp_extrcvinfo *s_extra;
+
+ s_extra = (struct sctp_extrcvinfo *)sinfo;
+ if ((nxt) &&
+ (nxt->length)) {
+ s_extra->sreinfo_next_flags = SCTP_NEXT_MSG_AVAIL;
+ if (nxt->sinfo_flags & SCTP_UNORDERED) {
+ s_extra->sreinfo_next_flags |= SCTP_NEXT_MSG_IS_UNORDERED;
+ }
+ if (nxt->spec_flags & M_NOTIFICATION) {
+ s_extra->sreinfo_next_flags |= SCTP_NEXT_MSG_IS_NOTIFICATION;
+ }
+ s_extra->sreinfo_next_aid = nxt->sinfo_assoc_id;
+ s_extra->sreinfo_next_length = nxt->length;
+ s_extra->sreinfo_next_ppid = nxt->sinfo_ppid;
+ s_extra->sreinfo_next_stream = nxt->sinfo_stream;
+ if (nxt->tail_mbuf != NULL) {
+ if (nxt->end_added) {
+ s_extra->sreinfo_next_flags |= SCTP_NEXT_MSG_ISCOMPLETE;
+ }
+ }
+ } else {
+ /*
+ * we explicitly 0 this, since the memcpy
+ * got some other things beyond the older
+ * sinfo_ that is on the control's structure
+ * :-D
+ */
+ nxt = NULL;
+ s_extra->sreinfo_next_flags = SCTP_NO_NEXT_MSG;
+ s_extra->sreinfo_next_aid = 0;
+ s_extra->sreinfo_next_length = 0;
+ s_extra->sreinfo_next_ppid = 0;
+ s_extra->sreinfo_next_stream = 0;
+ }
+ }
+ /*
+ * update off the real current cum-ack, if we have an stcb.
+ */
+ if ((control->do_not_ref_stcb == 0) && stcb)
+ sinfo->sinfo_cumtsn = stcb->asoc.cumulative_tsn;
+ /*
+ * mask off the high bits, we keep the actual chunk bits in
+ * there.
+ */
+ sinfo->sinfo_flags &= 0x00ff;
+ if ((control->sinfo_flags >> 8) & SCTP_DATA_UNORDERED) {
+ sinfo->sinfo_flags |= SCTP_UNORDERED;
+ }
+ }
+#ifdef SCTP_ASOCLOG_OF_TSNS
+ {
+ int index, newindex;
+ struct sctp_pcbtsn_rlog *entry;
+
+ do {
+ index = inp->readlog_index;
+ newindex = index + 1;
+ if (newindex >= SCTP_READ_LOG_SIZE) {
+ newindex = 0;
+ }
+ } while (atomic_cmpset_int(&inp->readlog_index, index, newindex) == 0);
+ entry = &inp->readlog[index];
+ entry->vtag = control->sinfo_assoc_id;
+ entry->strm = control->sinfo_stream;
+ entry->seq = control->sinfo_ssn;
+ entry->sz = control->length;
+ entry->flgs = control->sinfo_flags;
+ }
+#endif
+ if (fromlen && from) {
+ struct sockaddr *to;
+
+#ifdef INET
+ cp_len = min((size_t)fromlen, (size_t)control->whoFrom->ro._l_addr.sin.sin_len);
+ memcpy(from, &control->whoFrom->ro._l_addr, cp_len);
+ ((struct sockaddr_in *)from)->sin_port = control->port_from;
+#else
+ /* No AF_INET use AF_INET6 */
+ cp_len = min((size_t)fromlen, (size_t)control->whoFrom->ro._l_addr.sin6.sin6_len);
+ memcpy(from, &control->whoFrom->ro._l_addr, cp_len);
+ ((struct sockaddr_in6 *)from)->sin6_port = control->port_from;
+#endif
+
+ to = from;
+#if defined(INET) && defined(INET6)
+ if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_NEEDS_MAPPED_V4)) &&
+ (to->sa_family == AF_INET) &&
+ ((size_t)fromlen >= sizeof(struct sockaddr_in6))) {
+ struct sockaddr_in *sin;
+ struct sockaddr_in6 sin6;
+
+ sin = (struct sockaddr_in *)to;
+ bzero(&sin6, sizeof(sin6));
+ sin6.sin6_family = AF_INET6;
+ sin6.sin6_len = sizeof(struct sockaddr_in6);
+ sin6.sin6_addr.s6_addr32[2] = htonl(0xffff);
+ bcopy(&sin->sin_addr,
+ &sin6.sin6_addr.s6_addr32[3],
+ sizeof(sin6.sin6_addr.s6_addr32[3]));
+ sin6.sin6_port = sin->sin_port;
+ memcpy(from, (caddr_t)&sin6, sizeof(sin6));
+ }
+#endif
+#if defined(INET6)
+ {
+ struct sockaddr_in6 lsa6, *to6;
+
+ to6 = (struct sockaddr_in6 *)to;
+ sctp_recover_scope_mac(to6, (&lsa6));
+ }
+#endif
+ }
+ /* now copy out what data we can */
+ if (mp == NULL) {
+ /* copy out each mbuf in the chain up to length */
+get_more_data:
+ m = control->data;
+ while (m) {
+ /* Move out all we can */
+ cp_len = (int)uio->uio_resid;
+ my_len = (int)SCTP_BUF_LEN(m);
+ if (cp_len > my_len) {
+ /* not enough in this buf */
+ cp_len = my_len;
+ }
+ if (hold_rlock) {
+ SCTP_INP_READ_UNLOCK(inp);
+ hold_rlock = 0;
+ }
+ if (cp_len > 0)
+ error = uiomove(mtod(m, char *), cp_len, uio);
+ /* re-read */
+ if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
+ goto release;
+ }
+ if ((control->do_not_ref_stcb == 0) && stcb &&
+ stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
+ no_rcv_needed = 1;
+ }
+ if (error) {
+ /* error we are out of here */
+ goto release;
+ }
+ if ((SCTP_BUF_NEXT(m) == NULL) &&
+ (cp_len >= SCTP_BUF_LEN(m)) &&
+ ((control->end_added == 0) ||
+ (control->end_added &&
+ (TAILQ_NEXT(control, next) == NULL)))
+ ) {
+ SCTP_INP_READ_LOCK(inp);
+ hold_rlock = 1;
+ }
+ if (cp_len == SCTP_BUF_LEN(m)) {
+ if ((SCTP_BUF_NEXT(m) == NULL) &&
+ (control->end_added)) {
+ out_flags |= MSG_EOR;
+ if ((control->do_not_ref_stcb == 0) &&
+ (control->stcb != NULL) &&
+ ((control->spec_flags & M_NOTIFICATION) == 0))
+ control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
+ }
+ if (control->spec_flags & M_NOTIFICATION) {
+ out_flags |= MSG_NOTIFICATION;
+ }
+ /* we ate up the mbuf */
+ if (in_flags & MSG_PEEK) {
+ /* just looking */
+ m = SCTP_BUF_NEXT(m);
+ copied_so_far += cp_len;
+ } else {
+ /* dispose of the mbuf */
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
+ sctp_sblog(&so->so_rcv,
+ control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m));
+ }
+ sctp_sbfree(control, stcb, &so->so_rcv, m);
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
+ sctp_sblog(&so->so_rcv,
+ control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
+ }
+ copied_so_far += cp_len;
+ freed_so_far += cp_len;
+ freed_so_far += MSIZE;
+ atomic_subtract_int(&control->length, cp_len);
+ control->data = sctp_m_free(m);
+ m = control->data;
+ /*
+ * been through it all, must hold sb
+ * lock ok to null tail
+ */
+ if (control->data == NULL) {
+#ifdef INVARIANTS
+ if ((control->end_added == 0) ||
+ (TAILQ_NEXT(control, next) == NULL)) {
+ /*
+ * If the end is not
+ * added, OR the
+ * next is NOT null
+ * we MUST have the
+ * lock.
+ */
+ if (mtx_owned(&inp->inp_rdata_mtx) == 0) {
+ panic("Hmm we don't own the lock?");
+ }
+ }
+#endif
+ control->tail_mbuf = NULL;
+#ifdef INVARIANTS
+ if ((control->end_added) && ((out_flags & MSG_EOR) == 0)) {
+ panic("end_added, nothing left and no MSG_EOR");
+ }
+#endif
+ }
+ }
+ } else {
+ /* Do we need to trim the mbuf? */
+ if (control->spec_flags & M_NOTIFICATION) {
+ out_flags |= MSG_NOTIFICATION;
+ }
+ if ((in_flags & MSG_PEEK) == 0) {
+ SCTP_BUF_RESV_UF(m, cp_len);
+ SCTP_BUF_LEN(m) -= cp_len;
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
+ sctp_sblog(&so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, cp_len);
+ }
+ atomic_subtract_int(&so->so_rcv.sb_cc, cp_len);
+ if ((control->do_not_ref_stcb == 0) &&
+ stcb) {
+ atomic_subtract_int(&stcb->asoc.sb_cc, cp_len);
+ }
+ copied_so_far += cp_len;
+ freed_so_far += cp_len;
+ freed_so_far += MSIZE;
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
+ sctp_sblog(&so->so_rcv, control->do_not_ref_stcb ? NULL : stcb,
+ SCTP_LOG_SBRESULT, 0);
+ }
+ atomic_subtract_int(&control->length, cp_len);
+ } else {
+ copied_so_far += cp_len;
+ }
+ }
+ if ((out_flags & MSG_EOR) || (uio->uio_resid == 0)) {
+ break;
+ }
+ if (((stcb) && (in_flags & MSG_PEEK) == 0) &&
+ (control->do_not_ref_stcb == 0) &&
+ (freed_so_far >= rwnd_req)) {
+ sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
+ }
+ } /* end while(m) */
+ /*
+ * At this point we have looked at it all and we either have
+ * a MSG_EOR/or read all the user wants... <OR>
+ * control->length == 0.
+ */
+ if ((out_flags & MSG_EOR) && ((in_flags & MSG_PEEK) == 0)) {
+ /* we are done with this control */
+ if (control->length == 0) {
+ if (control->data) {
+#ifdef INVARIANTS
+ panic("control->data not null at read eor?");
+#else
+ SCTP_PRINTF("Strange, data left in the control buffer .. invarients would panic?\n");
+ sctp_m_freem(control->data);
+ control->data = NULL;
+#endif
+ }
+ done_with_control:
+ if (TAILQ_NEXT(control, next) == NULL) {
+ /*
+ * If we don't have a next we need a
+ * lock, if there is a next
+ * interrupt is filling ahead of us
+ * and we don't need a lock to
+ * remove this guy (which is the
+ * head of the queue).
+ */
+ if (hold_rlock == 0) {
+ SCTP_INP_READ_LOCK(inp);
+ hold_rlock = 1;
+ }
+ }
+ TAILQ_REMOVE(&inp->read_queue, control, next);
+ /* Add back any hiddend data */
+ if (control->held_length) {
+ held_length = 0;
+ control->held_length = 0;
+ wakeup_read_socket = 1;
+ }
+ if (control->aux_data) {
+ sctp_m_free(control->aux_data);
+ control->aux_data = NULL;
+ }
+ no_rcv_needed = control->do_not_ref_stcb;
+ sctp_free_remote_addr(control->whoFrom);
+ control->data = NULL;
+ sctp_free_a_readq(stcb, control);
+ control = NULL;
+ if ((freed_so_far >= rwnd_req) &&
+ (no_rcv_needed == 0))
+ sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
+
+ } else {
+ /*
+ * The user did not read all of this
+ * message, turn off the returned MSG_EOR
+ * since we are leaving more behind on the
+ * control to read.
+ */
+#ifdef INVARIANTS
+ if (control->end_added &&
+ (control->data == NULL) &&
+ (control->tail_mbuf == NULL)) {
+ panic("Gak, control->length is corrupt?");
+ }
+#endif
+ no_rcv_needed = control->do_not_ref_stcb;
+ out_flags &= ~MSG_EOR;
+ }
+ }
+ if (out_flags & MSG_EOR) {
+ goto release;
+ }
+ if ((uio->uio_resid == 0) ||
+ ((in_eeor_mode) && (copied_so_far >= max(so->so_rcv.sb_lowat, 1)))
+ ) {
+ goto release;
+ }
+ /*
+ * If I hit here the receiver wants more and this message is
+ * NOT done (pd-api). So two questions. Can we block? if not
+ * we are done. Did the user NOT set MSG_WAITALL?
+ */
+ if (block_allowed == 0) {
+ goto release;
+ }
+ /*
+ * We need to wait for more data a few things: - We don't
+ * sbunlock() so we don't get someone else reading. - We
+ * must be sure to account for the case where what is added
+ * is NOT to our control when we wakeup.
+ */
+
+ /*
+ * Do we need to tell the transport a rwnd update might be
+ * needed before we go to sleep?
+ */
+ if (((stcb) && (in_flags & MSG_PEEK) == 0) &&
+ ((freed_so_far >= rwnd_req) &&
+ (control->do_not_ref_stcb == 0) &&
+ (no_rcv_needed == 0))) {
+ sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
+ }
+wait_some_more:
+ if (so->so_rcv.sb_state & SBS_CANTRCVMORE) {
+ goto release;
+ }
+ if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE)
+ goto release;
+
+ if (hold_rlock == 1) {
+ SCTP_INP_READ_UNLOCK(inp);
+ hold_rlock = 0;
+ }
+ if (hold_sblock == 0) {
+ SOCKBUF_LOCK(&so->so_rcv);
+ hold_sblock = 1;
+ }
+ if ((copied_so_far) && (control->length == 0) &&
+ (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE))) {
+ goto release;
+ }
+ if (so->so_rcv.sb_cc <= control->held_length) {
+ error = sbwait(&so->so_rcv);
+ if (error) {
+ goto release;
+ }
+ control->held_length = 0;
+ }
+ if (hold_sblock) {
+ SOCKBUF_UNLOCK(&so->so_rcv);
+ hold_sblock = 0;
+ }
+ if (control->length == 0) {
+ /* still nothing here */
+ if (control->end_added == 1) {
+ /* he aborted, or is done i.e.did a shutdown */
+ out_flags |= MSG_EOR;
+ if (control->pdapi_aborted) {
+ if ((control->do_not_ref_stcb == 0) && ((control->spec_flags & M_NOTIFICATION) == 0))
+ control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
+
+ out_flags |= MSG_TRUNC;
+ } else {
+ if ((control->do_not_ref_stcb == 0) && ((control->spec_flags & M_NOTIFICATION) == 0))
+ control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
+ }
+ goto done_with_control;
+ }
+ if (so->so_rcv.sb_cc > held_length) {
+ control->held_length = so->so_rcv.sb_cc;
+ held_length = 0;
+ }
+ goto wait_some_more;
+ } else if (control->data == NULL) {
+ /*
+ * we must re-sync since data is probably being
+ * added
+ */
+ SCTP_INP_READ_LOCK(inp);
+ if ((control->length > 0) && (control->data == NULL)) {
+ /*
+ * big trouble.. we have the lock and its
+ * corrupt?
+ */
+#ifdef INVARIANTS
+ panic("Impossible data==NULL length !=0");
+#endif
+ out_flags |= MSG_EOR;
+ out_flags |= MSG_TRUNC;
+ control->length = 0;
+ SCTP_INP_READ_UNLOCK(inp);
+ goto done_with_control;
+ }
+ SCTP_INP_READ_UNLOCK(inp);
+ /* We will fall around to get more data */
+ }
+ goto get_more_data;
+ } else {
+ /*-
+ * Give caller back the mbuf chain,
+ * store in uio_resid the length
+ */
+ wakeup_read_socket = 0;
+ if ((control->end_added == 0) ||
+ (TAILQ_NEXT(control, next) == NULL)) {
+ /* Need to get rlock */
+ if (hold_rlock == 0) {
+ SCTP_INP_READ_LOCK(inp);
+ hold_rlock = 1;
+ }
+ }
+ if (control->end_added) {
+ out_flags |= MSG_EOR;
+ if ((control->do_not_ref_stcb == 0) && ((control->spec_flags & M_NOTIFICATION) == 0))
+ control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
+ }
+ if (control->spec_flags & M_NOTIFICATION) {
+ out_flags |= MSG_NOTIFICATION;
+ }
+ uio->uio_resid = control->length;
+ *mp = control->data;
+ m = control->data;
+ while (m) {
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
+ sctp_sblog(&so->so_rcv,
+ control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m));
+ }
+ sctp_sbfree(control, stcb, &so->so_rcv, m);
+ freed_so_far += SCTP_BUF_LEN(m);
+ freed_so_far += MSIZE;
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
+ sctp_sblog(&so->so_rcv,
+ control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
+ }
+ m = SCTP_BUF_NEXT(m);
+ }
+ control->data = control->tail_mbuf = NULL;
+ control->length = 0;
+ if (out_flags & MSG_EOR) {
+ /* Done with this control */
+ goto done_with_control;
+ }
+ }
+release:
+ if (hold_rlock == 1) {
+ SCTP_INP_READ_UNLOCK(inp);
+ hold_rlock = 0;
+ }
+ if (hold_sblock == 1) {
+ SOCKBUF_UNLOCK(&so->so_rcv);
+ hold_sblock = 0;
+ }
+ sbunlock(&so->so_rcv);
+ sockbuf_lock = 0;
+
+release_unlocked:
+ if (hold_sblock) {
+ SOCKBUF_UNLOCK(&so->so_rcv);
+ hold_sblock = 0;
+ }
+ if ((stcb) && (in_flags & MSG_PEEK) == 0) {
+ if ((freed_so_far >= rwnd_req) &&
+ (control && (control->do_not_ref_stcb == 0)) &&
+ (no_rcv_needed == 0))
+ sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
+ }
+out:
+ if (msg_flags) {
+ *msg_flags = out_flags;
+ }
+ if (((out_flags & MSG_EOR) == 0) &&
+ ((in_flags & MSG_PEEK) == 0) &&
+ (sinfo) &&
+ (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO))) {
+ struct sctp_extrcvinfo *s_extra;
+
+ s_extra = (struct sctp_extrcvinfo *)sinfo;
+ s_extra->sreinfo_next_flags = SCTP_NO_NEXT_MSG;
+ }
+ if (hold_rlock == 1) {
+ SCTP_INP_READ_UNLOCK(inp);
+ hold_rlock = 0;
+ }
+ if (hold_sblock) {
+ SOCKBUF_UNLOCK(&so->so_rcv);
+ hold_sblock = 0;
+ }
+ if (sockbuf_lock) {
+ sbunlock(&so->so_rcv);
+ }
+ if (freecnt_applied) {
+ /*
+ * The lock on the socket buffer protects us so the free
+ * code will stop. But since we used the socketbuf lock and
+ * the sender uses the tcb_lock to increment, we need to use
+ * the atomic add to the refcnt.
+ */
+ if (stcb == NULL) {
+#ifdef INVARIANTS
+ panic("stcb for refcnt has gone NULL?");
+ goto stage_left;
+#else
+ goto stage_left;
+#endif
+ }
+ atomic_add_int(&stcb->asoc.refcnt, -1);
+ freecnt_applied = 0;
+ /* Save the value back for next time */
+ stcb->freed_by_sorcv_sincelast = freed_so_far;
+ }
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RECV_RWND_LOGGING_ENABLE) {
+ if (stcb) {
+ sctp_misc_ints(SCTP_SORECV_DONE,
+ freed_so_far,
+ ((uio) ? (slen - uio->uio_resid) : slen),
+ stcb->asoc.my_rwnd,
+ so->so_rcv.sb_cc);
+ } else {
+ sctp_misc_ints(SCTP_SORECV_DONE,
+ freed_so_far,
+ ((uio) ? (slen - uio->uio_resid) : slen),
+ 0,
+ so->so_rcv.sb_cc);
+ }
+ }
+stage_left:
+ if (wakeup_read_socket) {
+ sctp_sorwakeup(inp, so);
+ }
+ return (error);
+}
+
+
+#ifdef SCTP_MBUF_LOGGING
+struct mbuf *
+sctp_m_free(struct mbuf *m)
+{
+ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
+ if (SCTP_BUF_IS_EXTENDED(m)) {
+ sctp_log_mb(m, SCTP_MBUF_IFREE);
+ }
+ }
+ return (m_free(m));
+}
+
+void
+sctp_m_freem(struct mbuf *mb)
+{
+ while (mb != NULL)
+ mb = sctp_m_free(mb);
+}
+
+#endif
+
+int
+sctp_dynamic_set_primary(struct sockaddr *sa, uint32_t vrf_id)
+{
+ /*
+ * Given a local address. For all associations that holds the
+ * address, request a peer-set-primary.
+ */
+ struct sctp_ifa *ifa;
+ struct sctp_laddr *wi;
+
+ ifa = sctp_find_ifa_by_addr(sa, vrf_id, 0);
+ if (ifa == NULL) {
+ SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, EADDRNOTAVAIL);
+ return (EADDRNOTAVAIL);
+ }
+ /*
+ * Now that we have the ifa we must awaken the iterator with this
+ * message.
+ */
+ wi = SCTP_ZONE_GET(SCTP_BASE_INFO(ipi_zone_laddr), struct sctp_laddr);
+ if (wi == NULL) {
+ SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
+ return (ENOMEM);
+ }
+ /* Now incr the count and int wi structure */
+ SCTP_INCR_LADDR_COUNT();
+ bzero(wi, sizeof(*wi));
+ (void)SCTP_GETTIME_TIMEVAL(&wi->start_time);
+ wi->ifa = ifa;
+ wi->action = SCTP_SET_PRIM_ADDR;
+ atomic_add_int(&ifa->refcount, 1);
+
+ /* Now add it to the work queue */
+ SCTP_WQ_ADDR_LOCK();
+ /*
+ * Should this really be a tailq? As it is we will process the
+ * newest first :-0
+ */
+ LIST_INSERT_HEAD(&SCTP_BASE_INFO(addr_wq), wi, sctp_nxt_addr);
+ SCTP_WQ_ADDR_UNLOCK();
+ sctp_timer_start(SCTP_TIMER_TYPE_ADDR_WQ,
+ (struct sctp_inpcb *)NULL,
+ (struct sctp_tcb *)NULL,
+ (struct sctp_nets *)NULL);
+ return (0);
+}
+
+
+int
+sctp_soreceive(struct socket *so,
+ struct sockaddr **psa,
+ struct uio *uio,
+ struct mbuf **mp0,
+ struct mbuf **controlp,
+ int *flagsp)
+{
+ int error, fromlen;
+ uint8_t sockbuf[256];
+ struct sockaddr *from;
+ struct sctp_extrcvinfo sinfo;
+ int filling_sinfo = 1;
+ struct sctp_inpcb *inp;
+
+ inp = (struct sctp_inpcb *)so->so_pcb;
+ /* pickup the assoc we are reading from */
+ if (inp == NULL) {
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
+ return (EINVAL);
+ }
+ if ((sctp_is_feature_off(inp,
+ SCTP_PCB_FLAGS_RECVDATAIOEVNT)) ||
+ (controlp == NULL)) {
+ /* user does not want the sndrcv ctl */
+ filling_sinfo = 0;
+ }
+ if (psa) {
+ from = (struct sockaddr *)sockbuf;
+ fromlen = sizeof(sockbuf);
+ from->sa_len = 0;
+ } else {
+ from = NULL;
+ fromlen = 0;
+ }
+
+ error = sctp_sorecvmsg(so, uio, mp0, from, fromlen, flagsp,
+ (struct sctp_sndrcvinfo *)&sinfo, filling_sinfo);
+ if ((controlp) && (filling_sinfo)) {
+ /* copy back the sinfo in a CMSG format */
+ if (filling_sinfo)
+ *controlp = sctp_build_ctl_nchunk(inp,
+ (struct sctp_sndrcvinfo *)&sinfo);
+ else
+ *controlp = NULL;
+ }
+ if (psa) {
+ /* copy back the address info */
+ if (from && from->sa_len) {
+ *psa = sodupsockaddr(from, M_NOWAIT);
+ } else {
+ *psa = NULL;
+ }
+ }
+ return (error);
+}
+
+
+int
+sctp_l_soreceive(struct socket *so,
+ struct sockaddr **name,
+ struct uio *uio,
+ char **controlp,
+ int *controllen,
+ int *flag)
+{
+ int error, fromlen;
+ uint8_t sockbuf[256];
+ struct sockaddr *from;
+ struct sctp_extrcvinfo sinfo;
+ int filling_sinfo = 1;
+ struct sctp_inpcb *inp;
+
+ inp = (struct sctp_inpcb *)so->so_pcb;
+ /* pickup the assoc we are reading from */
+ if (inp == NULL) {
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
+ return (EINVAL);
+ }
+ if ((sctp_is_feature_off(inp,
+ SCTP_PCB_FLAGS_RECVDATAIOEVNT)) ||
+ (controlp == NULL)) {
+ /* user does not want the sndrcv ctl */
+ filling_sinfo = 0;
+ }
+ if (name) {
+ from = (struct sockaddr *)sockbuf;
+ fromlen = sizeof(sockbuf);
+ from->sa_len = 0;
+ } else {
+ from = NULL;
+ fromlen = 0;
+ }
+
+ error = sctp_sorecvmsg(so, uio,
+ (struct mbuf **)NULL,
+ from, fromlen, flag,
+ (struct sctp_sndrcvinfo *)&sinfo,
+ filling_sinfo);
+ if ((controlp) && (filling_sinfo)) {
+ /*
+ * copy back the sinfo in a CMSG format note that the caller
+ * has reponsibility for freeing the memory.
+ */
+ if (filling_sinfo)
+ *controlp = sctp_build_ctl_cchunk(inp,
+ controllen,
+ (struct sctp_sndrcvinfo *)&sinfo);
+ }
+ if (name) {
+ /* copy back the address info */
+ if (from && from->sa_len) {
+ *name = sodupsockaddr(from, M_WAIT);
+ } else {
+ *name = NULL;
+ }
+ }
+ return (error);
+}
+
+
+
+
+
+
+
+int
+sctp_connectx_helper_add(struct sctp_tcb *stcb, struct sockaddr *addr,
+ int totaddr, int *error)
+{
+ int added = 0;
+ int i;
+ struct sctp_inpcb *inp;
+ struct sockaddr *sa;
+ size_t incr = 0;
+
+ sa = addr;
+ inp = stcb->sctp_ep;
+ *error = 0;
+ for (i = 0; i < totaddr; i++) {
+ if (sa->sa_family == AF_INET) {
+ incr = sizeof(struct sockaddr_in);
+ if (sctp_add_remote_addr(stcb, sa, SCTP_DONOT_SETSCOPE, SCTP_ADDR_IS_CONFIRMED)) {
+ /* assoc gone no un-lock */
+ SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOBUFS);
+ (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_USRREQ + SCTP_LOC_7);
+ *error = ENOBUFS;
+ goto out_now;
+ }
+ added++;
+ } else if (sa->sa_family == AF_INET6) {
+ incr = sizeof(struct sockaddr_in6);
+ if (sctp_add_remote_addr(stcb, sa, SCTP_DONOT_SETSCOPE, SCTP_ADDR_IS_CONFIRMED)) {
+ /* assoc gone no un-lock */
+ SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOBUFS);
+ (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_USRREQ + SCTP_LOC_8);
+ *error = ENOBUFS;
+ goto out_now;
+ }
+ added++;
+ }
+ sa = (struct sockaddr *)((caddr_t)sa + incr);
+ }
+out_now:
+ return (added);
+}
+
+struct sctp_tcb *
+sctp_connectx_helper_find(struct sctp_inpcb *inp, struct sockaddr *addr,
+ int *totaddr, int *num_v4, int *num_v6, int *error,
+ int limit, int *bad_addr)
+{
+ struct sockaddr *sa;
+ struct sctp_tcb *stcb = NULL;
+ size_t incr, at, i;
+
+ at = incr = 0;
+ sa = addr;
+ *error = *num_v6 = *num_v4 = 0;
+ /* account and validate addresses */
+ for (i = 0; i < (size_t)*totaddr; i++) {
+ if (sa->sa_family == AF_INET) {
+ (*num_v4) += 1;
+ incr = sizeof(struct sockaddr_in);
+ if (sa->sa_len != incr) {
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
+ *error = EINVAL;
+ *bad_addr = 1;
+ return (NULL);
+ }
+ } else if (sa->sa_family == AF_INET6) {
+ struct sockaddr_in6 *sin6;
+
+ sin6 = (struct sockaddr_in6 *)sa;
+ if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
+ /* Must be non-mapped for connectx */
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
+ *error = EINVAL;
+ *bad_addr = 1;
+ return (NULL);
+ }
+ (*num_v6) += 1;
+ incr = sizeof(struct sockaddr_in6);
+ if (sa->sa_len != incr) {
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
+ *error = EINVAL;
+ *bad_addr = 1;
+ return (NULL);
+ }
+ } else {
+ *totaddr = i;
+ /* we are done */
+ break;
+ }
+ SCTP_INP_INCR_REF(inp);
+ stcb = sctp_findassociation_ep_addr(&inp, sa, NULL, NULL, NULL);
+ if (stcb != NULL) {
+ /* Already have or am bring up an association */
+ return (stcb);
+ } else {
+ SCTP_INP_DECR_REF(inp);
+ }
+ if ((at + incr) > (size_t)limit) {
+ *totaddr = i;
+ break;
+ }
+ sa = (struct sockaddr *)((caddr_t)sa + incr);
+ }
+ return ((struct sctp_tcb *)NULL);
+}
+
+/*
+ * sctp_bindx(ADD) for one address.
+ * assumes all arguments are valid/checked by caller.
+ */
+void
+sctp_bindx_add_address(struct socket *so, struct sctp_inpcb *inp,
+ struct sockaddr *sa, sctp_assoc_t assoc_id,
+ uint32_t vrf_id, int *error, void *p)
+{
+ struct sockaddr *addr_touse;
+
+#ifdef INET6
+ struct sockaddr_in sin;
+
+#endif
+
+ /* see if we're bound all already! */
+ if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
+ *error = EINVAL;
+ return;
+ }
+ addr_touse = sa;
+#if defined(INET6) && !defined(__Userspace__) /* TODO port in6_sin6_2_sin */
+ if (sa->sa_family == AF_INET6) {
+ struct sockaddr_in6 *sin6;
+
+ if (sa->sa_len != sizeof(struct sockaddr_in6)) {
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
+ *error = EINVAL;
+ return;
+ }
+ if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) {
+ /* can only bind v6 on PF_INET6 sockets */
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
+ *error = EINVAL;
+ return;
+ }
+ sin6 = (struct sockaddr_in6 *)addr_touse;
+ if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
+ if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
+ SCTP_IPV6_V6ONLY(inp)) {
+ /* can't bind v4-mapped on PF_INET sockets */
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
+ *error = EINVAL;
+ return;
+ }
+ in6_sin6_2_sin(&sin, sin6);
+ addr_touse = (struct sockaddr *)&sin;
+ }
+ }
+#endif
+ if (sa->sa_family == AF_INET) {
+ if (sa->sa_len != sizeof(struct sockaddr_in)) {
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
+ *error = EINVAL;
+ return;
+ }
+ if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
+ SCTP_IPV6_V6ONLY(inp)) {
+ /* can't bind v4 on PF_INET sockets */
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
+ *error = EINVAL;
+ return;
+ }
+ }
+ if (inp->sctp_flags & SCTP_PCB_FLAGS_UNBOUND) {
+ if (p == NULL) {
+ /* Can't get proc for Net/Open BSD */
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
+ *error = EINVAL;
+ return;
+ }
+ *error = sctp_inpcb_bind(so, addr_touse, NULL, p);
+ return;
+ }
+ /*
+ * No locks required here since bind and mgmt_ep_sa all do their own
+ * locking. If we do something for the FIX: below we may need to
+ * lock in that case.
+ */
+ if (assoc_id == 0) {
+ /* add the address */
+ struct sctp_inpcb *lep;
+ struct sockaddr_in *lsin = (struct sockaddr_in *)addr_touse;
+
+ /* validate the incoming port */
+ if ((lsin->sin_port != 0) &&
+ (lsin->sin_port != inp->sctp_lport)) {
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
+ *error = EINVAL;
+ return;
+ } else {
+ /* user specified 0 port, set it to existing port */
+ lsin->sin_port = inp->sctp_lport;
+ }
+
+ lep = sctp_pcb_findep(addr_touse, 1, 0, vrf_id);
+ if (lep != NULL) {
+ /*
+ * We must decrement the refcount since we have the
+ * ep already and are binding. No remove going on
+ * here.
+ */
+ SCTP_INP_DECR_REF(lep);
+ }
+ if (lep == inp) {
+ /* already bound to it.. ok */
+ return;
+ } else if (lep == NULL) {
+ ((struct sockaddr_in *)addr_touse)->sin_port = 0;
+ *error = sctp_addr_mgmt_ep_sa(inp, addr_touse,
+ SCTP_ADD_IP_ADDRESS,
+ vrf_id, NULL);
+ } else {
+ *error = EADDRINUSE;
+ }
+ if (*error)
+ return;
+ } else {
+ /*
+ * FIX: decide whether we allow assoc based bindx
+ */
+ }
+}
+
+/*
+ * sctp_bindx(DELETE) for one address.
+ * assumes all arguments are valid/checked by caller.
+ */
+void
+sctp_bindx_delete_address(struct socket *so, struct sctp_inpcb *inp,
+ struct sockaddr *sa, sctp_assoc_t assoc_id,
+ uint32_t vrf_id, int *error)
+{
+ struct sockaddr *addr_touse;
+
+#ifdef INET6
+ struct sockaddr_in sin;
+
+#endif
+
+ /* see if we're bound all already! */
+ if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
+ *error = EINVAL;
+ return;
+ }
+ addr_touse = sa;
+#if defined(INET6) && !defined(__Userspace__) /* TODO port in6_sin6_2_sin */
+ if (sa->sa_family == AF_INET6) {
+ struct sockaddr_in6 *sin6;
+
+ if (sa->sa_len != sizeof(struct sockaddr_in6)) {
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
+ *error = EINVAL;
+ return;
+ }
+ if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) {
+ /* can only bind v6 on PF_INET6 sockets */
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
+ *error = EINVAL;
+ return;
+ }
+ sin6 = (struct sockaddr_in6 *)addr_touse;
+ if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
+ if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
+ SCTP_IPV6_V6ONLY(inp)) {
+ /* can't bind mapped-v4 on PF_INET sockets */
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
+ *error = EINVAL;
+ return;
+ }
+ in6_sin6_2_sin(&sin, sin6);
+ addr_touse = (struct sockaddr *)&sin;
+ }
+ }
+#endif
+ if (sa->sa_family == AF_INET) {
+ if (sa->sa_len != sizeof(struct sockaddr_in)) {
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
+ *error = EINVAL;
+ return;
+ }
+ if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
+ SCTP_IPV6_V6ONLY(inp)) {
+ /* can't bind v4 on PF_INET sockets */
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
+ *error = EINVAL;
+ return;
+ }
+ }
+ /*
+ * No lock required mgmt_ep_sa does its own locking. If the FIX:
+ * below is ever changed we may need to lock before calling
+ * association level binding.
+ */
+ if (assoc_id == 0) {
+ /* delete the address */
+ *error = sctp_addr_mgmt_ep_sa(inp, addr_touse,
+ SCTP_DEL_IP_ADDRESS,
+ vrf_id, NULL);
+ } else {
+ /*
+ * FIX: decide whether we allow assoc based bindx
+ */
+ }
+}
+
+/*
+ * returns the valid local address count for an assoc, taking into account
+ * all scoping rules
+ */
+int
+sctp_local_addr_count(struct sctp_tcb *stcb)
+{
+ int loopback_scope, ipv4_local_scope, local_scope, site_scope;
+ int ipv4_addr_legal, ipv6_addr_legal;
+ struct sctp_vrf *vrf;
+ struct sctp_ifn *sctp_ifn;
+ struct sctp_ifa *sctp_ifa;
+ int count = 0;
+
+ /* Turn on all the appropriate scopes */
+ loopback_scope = stcb->asoc.loopback_scope;
+ ipv4_local_scope = stcb->asoc.ipv4_local_scope;
+ local_scope = stcb->asoc.local_scope;
+ site_scope = stcb->asoc.site_scope;
+ ipv4_addr_legal = ipv6_addr_legal = 0;
+ if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
+ ipv6_addr_legal = 1;
+ if (SCTP_IPV6_V6ONLY(stcb->sctp_ep) == 0) {
+ ipv4_addr_legal = 1;
+ }
+ } else {
+ ipv4_addr_legal = 1;
+ }
+
+ SCTP_IPI_ADDR_RLOCK();
+ vrf = sctp_find_vrf(stcb->asoc.vrf_id);
+ if (vrf == NULL) {
+ /* no vrf, no addresses */
+ SCTP_IPI_ADDR_RUNLOCK();
+ return (0);
+ }
+ if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
+ /*
+ * bound all case: go through all ifns on the vrf
+ */
+ LIST_FOREACH(sctp_ifn, &vrf->ifnlist, next_ifn) {
+ if ((loopback_scope == 0) &&
+ SCTP_IFN_IS_IFT_LOOP(sctp_ifn)) {
+ continue;
+ }
+ LIST_FOREACH(sctp_ifa, &sctp_ifn->ifalist, next_ifa) {
+ if (sctp_is_addr_restricted(stcb, sctp_ifa))
+ continue;
+ switch (sctp_ifa->address.sa.sa_family) {
+ case AF_INET:
+ if (ipv4_addr_legal) {
+ struct sockaddr_in *sin;
+
+ sin = (struct sockaddr_in *)&sctp_ifa->address.sa;
+ if (sin->sin_addr.s_addr == 0) {
+ /*
+ * skip unspecified
+ * addrs
+ */
+ continue;
+ }
+ if ((ipv4_local_scope == 0) &&
+ (IN4_ISPRIVATE_ADDRESS(&sin->sin_addr))) {
+ continue;
+ }
+ /* count this one */
+ count++;
+ } else {
+ continue;
+ }
+ break;
+#ifdef INET6
+ case AF_INET6:
+ if (ipv6_addr_legal) {
+ struct sockaddr_in6 *sin6;
+
+ sin6 = (struct sockaddr_in6 *)&sctp_ifa->address.sa;
+ if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) {
+ continue;
+ }
+ if (IN6_IS_ADDR_LINKLOCAL(&sin6->sin6_addr)) {
+ if (local_scope == 0)
+ continue;
+ if (sin6->sin6_scope_id == 0) {
+ if (sa6_recoverscope(sin6) != 0)
+ /*
+ *
+ * bad
+ *
+ * li
+ * nk
+ *
+ * loc
+ * al
+ *
+ * add
+ * re
+ * ss
+ * */
+ continue;
+ }
+ }
+ if ((site_scope == 0) &&
+ (IN6_IS_ADDR_SITELOCAL(&sin6->sin6_addr))) {
+ continue;
+ }
+ /* count this one */
+ count++;
+ }
+ break;
+#endif
+ default:
+ /* TSNH */
+ break;
+ }
+ }
+ }
+ } else {
+ /*
+ * subset bound case
+ */
+ struct sctp_laddr *laddr;
+
+ LIST_FOREACH(laddr, &stcb->sctp_ep->sctp_addr_list,
+ sctp_nxt_addr) {
+ if (sctp_is_addr_restricted(stcb, laddr->ifa)) {
+ continue;
+ }
+ /* count this one */
+ count++;
+ }
+ }
+ SCTP_IPI_ADDR_RUNLOCK();
+ return (count);
+}
+
+#if defined(SCTP_LOCAL_TRACE_BUF)
+
+void
+sctp_log_trace(uint32_t subsys, const char *str SCTP_UNUSED, uint32_t a, uint32_t b, uint32_t c, uint32_t d, uint32_t e, uint32_t f)
+{
+ uint32_t saveindex, newindex;
+
+ do {
+ saveindex = SCTP_BASE_SYSCTL(sctp_log).index;
+ if (saveindex >= SCTP_MAX_LOGGING_SIZE) {
+ newindex = 1;
+ } else {
+ newindex = saveindex + 1;
+ }
+ } while (atomic_cmpset_int(&SCTP_BASE_SYSCTL(sctp_log).index, saveindex, newindex) == 0);
+ if (saveindex >= SCTP_MAX_LOGGING_SIZE) {
+ saveindex = 0;
+ }
+ SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].timestamp = SCTP_GET_CYCLECOUNT;
+ SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].subsys = subsys;
+ SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[0] = a;
+ SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[1] = b;
+ SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[2] = c;
+ SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[3] = d;
+ SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[4] = e;
+ SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[5] = f;
+}
+
+#endif
+/* We will need to add support
+ * to bind the ports and such here
+ * so we can do UDP tunneling. In
+ * the mean-time, we return error
+ */
+#include <rtems/freebsd/netinet/udp.h>
+#include <rtems/freebsd/netinet/udp_var.h>
+#include <rtems/freebsd/sys/proc.h>
+#ifdef INET6
+#include <rtems/freebsd/netinet6/sctp6_var.h>
+#endif
+
+static void
+sctp_recv_udp_tunneled_packet(struct mbuf *m, int off, struct inpcb *ignored)
+{
+ struct ip *iph;
+ struct mbuf *sp, *last;
+ struct udphdr *uhdr;
+ uint16_t port = 0, len;
+ int header_size = sizeof(struct udphdr) + sizeof(struct sctphdr);
+
+ /*
+ * Split out the mbuf chain. Leave the IP header in m, place the
+ * rest in the sp.
+ */
+ if ((m->m_flags & M_PKTHDR) == 0) {
+ /* Can't handle one that is not a pkt hdr */
+ goto out;
+ }
+ /* pull the src port */
+ iph = mtod(m, struct ip *);
+ uhdr = (struct udphdr *)((caddr_t)iph + off);
+
+ port = uhdr->uh_sport;
+ sp = m_split(m, off, M_DONTWAIT);
+ if (sp == NULL) {
+ /* Gak, drop packet, we can't do a split */
+ goto out;
+ }
+ if (sp->m_pkthdr.len < header_size) {
+ /* Gak, packet can't have an SCTP header in it - to small */
+ m_freem(sp);
+ goto out;
+ }
+ /* ok now pull up the UDP header and SCTP header together */
+ sp = m_pullup(sp, header_size);
+ if (sp == NULL) {
+ /* Gak pullup failed */
+ goto out;
+ }
+ /* trim out the UDP header */
+ m_adj(sp, sizeof(struct udphdr));
+
+ /* Now reconstruct the mbuf chain */
+ /* 1) find last one */
+ last = m;
+ while (last->m_next != NULL) {
+ last = last->m_next;
+ }
+ last->m_next = sp;
+ m->m_pkthdr.len += sp->m_pkthdr.len;
+ last = m;
+ while (last != NULL) {
+ last = last->m_next;
+ }
+ /* Now its ready for sctp_input or sctp6_input */
+ iph = mtod(m, struct ip *);
+ switch (iph->ip_v) {
+ case IPVERSION:
+ {
+ /* its IPv4 */
+ len = SCTP_GET_IPV4_LENGTH(iph);
+ len -= sizeof(struct udphdr);
+ SCTP_GET_IPV4_LENGTH(iph) = len;
+ sctp_input_with_port(m, off, port);
+ break;
+ }
+#ifdef INET6
+ case IPV6_VERSION >> 4:
+ {
+ /* its IPv6 - NOT supported */
+ goto out;
+ break;
+
+ }
+#endif
+ default:
+ {
+ m_freem(m);
+ break;
+ }
+ }
+ return;
+out:
+ m_freem(m);
+}
+
+void
+sctp_over_udp_stop(void)
+{
+ struct socket *sop;
+
+ /*
+ * This function assumes sysctl caller holds sctp_sysctl_info_lock()
+ * for writting!
+ */
+ if (SCTP_BASE_INFO(udp_tun_socket) == NULL) {
+ /* Nothing to do */
+ return;
+ }
+ sop = SCTP_BASE_INFO(udp_tun_socket);
+ soclose(sop);
+ SCTP_BASE_INFO(udp_tun_socket) = NULL;
+}
+int
+sctp_over_udp_start(void)
+{
+ uint16_t port;
+ int ret;
+ struct sockaddr_in sin;
+ struct socket *sop = NULL;
+ struct thread *th;
+ struct ucred *cred;
+
+ /*
+ * This function assumes sysctl caller holds sctp_sysctl_info_lock()
+ * for writting!
+ */
+ port = SCTP_BASE_SYSCTL(sctp_udp_tunneling_port);
+ if (port == 0) {
+ /* Must have a port set */
+ return (EINVAL);
+ }
+ if (SCTP_BASE_INFO(udp_tun_socket) != NULL) {
+ /* Already running -- must stop first */
+ return (EALREADY);
+ }
+ th = curthread;
+ cred = th->td_ucred;
+ if ((ret = socreate(PF_INET, &sop,
+ SOCK_DGRAM, IPPROTO_UDP, cred, th))) {
+ return (ret);
+ }
+ SCTP_BASE_INFO(udp_tun_socket) = sop;
+ /* call the special UDP hook */
+ ret = udp_set_kernel_tunneling(sop, sctp_recv_udp_tunneled_packet);
+ if (ret) {
+ goto exit_stage_left;
+ }
+ /* Ok we have a socket, bind it to the port */
+ memset(&sin, 0, sizeof(sin));
+ sin.sin_len = sizeof(sin);
+ sin.sin_family = AF_INET;
+ sin.sin_port = htons(port);
+ ret = sobind(sop, (struct sockaddr *)&sin, th);
+ if (ret) {
+ /* Close up we cant get the port */
+exit_stage_left:
+ sctp_over_udp_stop();
+ return (ret);
+ }
+ /*
+ * Ok we should now get UDP packets directly to our input routine
+ * sctp_recv_upd_tunneled_packet().
+ */
+ return (0);
+}
diff --git a/rtems/freebsd/netinet/sctputil.h b/rtems/freebsd/netinet/sctputil.h
new file mode 100644
index 00000000..690e8840
--- /dev/null
+++ b/rtems/freebsd/netinet/sctputil.h
@@ -0,0 +1,392 @@
+/*-
+ * Copyright (c) 2001-2007, by Cisco Systems, Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * a) Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * b) Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the distribution.
+ *
+ * c) Neither the name of Cisco Systems, Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+
+/* $KAME: sctputil.h,v 1.15 2005/03/06 16:04:19 itojun Exp $ */
+
+#include <rtems/freebsd/sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+#ifndef __sctputil_h__
+#define __sctputil_h__
+
+
+#if defined(_KERNEL) || defined(__Userspace__)
+
+#define SCTP_READ_LOCK_HELD 1
+#define SCTP_READ_LOCK_NOT_HELD 0
+
+#ifdef SCTP_ASOCLOG_OF_TSNS
+void sctp_print_out_track_log(struct sctp_tcb *stcb);
+
+#endif
+
+#ifdef SCTP_MBUF_LOGGING
+struct mbuf *sctp_m_free(struct mbuf *m);
+void sctp_m_freem(struct mbuf *m);
+
+#else
+#define sctp_m_free m_free
+#define sctp_m_freem m_freem
+#endif
+
+#if defined(SCTP_LOCAL_TRACE_BUF) || defined(__APPLE__)
+void
+ sctp_log_trace(uint32_t fr, const char *str SCTP_UNUSED, uint32_t a, uint32_t b, uint32_t c, uint32_t d, uint32_t e, uint32_t f);
+
+#endif
+
+#define sctp_get_associd(stcb) ((sctp_assoc_t)stcb->asoc.assoc_id)
+
+
+/*
+ * Function prototypes
+ */
+uint32_t
+sctp_get_ifa_hash_val(struct sockaddr *addr);
+
+struct sctp_ifa *
+ sctp_find_ifa_in_ep(struct sctp_inpcb *inp, struct sockaddr *addr, int hold_lock);
+
+struct sctp_ifa *
+ sctp_find_ifa_by_addr(struct sockaddr *addr, uint32_t vrf_id, int holds_lock);
+
+uint32_t sctp_select_initial_TSN(struct sctp_pcb *);
+
+uint32_t sctp_select_a_tag(struct sctp_inpcb *, uint16_t lport, uint16_t rport, int);
+
+int sctp_init_asoc(struct sctp_inpcb *, struct sctp_tcb *, uint32_t, uint32_t);
+
+void sctp_fill_random_store(struct sctp_pcb *);
+
+void
+sctp_timer_start(int, struct sctp_inpcb *, struct sctp_tcb *,
+ struct sctp_nets *);
+
+void
+sctp_timer_stop(int, struct sctp_inpcb *, struct sctp_tcb *,
+ struct sctp_nets *, uint32_t);
+
+int
+ sctp_dynamic_set_primary(struct sockaddr *sa, uint32_t vrf_id);
+
+void
+ sctp_mtu_size_reset(struct sctp_inpcb *, struct sctp_association *, uint32_t);
+
+void
+sctp_add_to_readq(struct sctp_inpcb *inp,
+ struct sctp_tcb *stcb,
+ struct sctp_queued_to_read *control,
+ struct sockbuf *sb,
+ int end,
+ int inpread_locked,
+ int so_locked
+#if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
+ SCTP_UNUSED
+#endif
+);
+
+int
+sctp_append_to_readq(struct sctp_inpcb *inp,
+ struct sctp_tcb *stcb,
+ struct sctp_queued_to_read *control,
+ struct mbuf *m,
+ int end,
+ int new_cumack,
+ struct sockbuf *sb);
+
+
+void sctp_iterator_worker(void);
+
+uint32_t sctp_get_prev_mtu(uint32_t);
+uint32_t sctp_get_next_mtu(struct sctp_inpcb *, uint32_t);
+
+void
+ sctp_timeout_handler(void *);
+
+uint32_t
+sctp_calculate_rto(struct sctp_tcb *, struct sctp_association *,
+ struct sctp_nets *, struct timeval *, int);
+
+uint32_t sctp_calculate_len(struct mbuf *);
+
+caddr_t sctp_m_getptr(struct mbuf *, int, int, uint8_t *);
+
+struct sctp_paramhdr *
+sctp_get_next_param(struct mbuf *, int,
+ struct sctp_paramhdr *, int);
+
+int sctp_add_pad_tombuf(struct mbuf *, int);
+
+int sctp_pad_lastmbuf(struct mbuf *, int, struct mbuf *);
+
+void
+sctp_ulp_notify(uint32_t, struct sctp_tcb *, uint32_t, void *, int
+#if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
+ SCTP_UNUSED
+#endif
+);
+
+void
+sctp_pull_off_control_to_new_inp(struct sctp_inpcb *old_inp,
+ struct sctp_inpcb *new_inp,
+ struct sctp_tcb *stcb, int waitflags);
+
+
+void sctp_stop_timers_for_shutdown(struct sctp_tcb *);
+
+void
+sctp_report_all_outbound(struct sctp_tcb *, int, int
+#if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
+ SCTP_UNUSED
+#endif
+);
+
+int sctp_expand_mapping_array(struct sctp_association *, uint32_t);
+
+void
+sctp_abort_notification(struct sctp_tcb *, int, int
+#if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
+ SCTP_UNUSED
+#endif
+);
+
+/* We abort responding to an IP packet for some reason */
+void
+sctp_abort_association(struct sctp_inpcb *, struct sctp_tcb *,
+ struct mbuf *, int, struct sctphdr *, struct mbuf *, uint32_t, uint16_t);
+
+
+/* We choose to abort via user input */
+void
+sctp_abort_an_association(struct sctp_inpcb *, struct sctp_tcb *, int,
+ struct mbuf *, int
+#if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
+ SCTP_UNUSED
+#endif
+);
+
+void
+sctp_handle_ootb(struct mbuf *, int, int, struct sctphdr *,
+ struct sctp_inpcb *, struct mbuf *, uint32_t, uint16_t);
+
+int
+sctp_connectx_helper_add(struct sctp_tcb *stcb, struct sockaddr *addr,
+ int totaddr, int *error);
+
+struct sctp_tcb *
+sctp_connectx_helper_find(struct sctp_inpcb *inp, struct sockaddr *addr,
+ int *totaddr, int *num_v4, int *num_v6, int *error, int limit, int *bad_addr);
+
+int sctp_is_there_an_abort_here(struct mbuf *, int, uint32_t *);
+
+#ifdef INET6
+uint32_t sctp_is_same_scope(struct sockaddr_in6 *, struct sockaddr_in6 *);
+
+struct sockaddr_in6 *
+ sctp_recover_scope(struct sockaddr_in6 *, struct sockaddr_in6 *);
+
+#define sctp_recover_scope_mac(addr, store) do { \
+ if ((addr->sin6_family == AF_INET6) && \
+ (IN6_IS_SCOPE_LINKLOCAL(&addr->sin6_addr))) { \
+ *store = *addr; \
+ if (addr->sin6_scope_id == 0) { \
+ if (!sa6_recoverscope(store)) { \
+ addr = store; \
+ } \
+ } else { \
+ in6_clearscope(&addr->sin6_addr); \
+ addr = store; \
+ } \
+ } \
+} while (0)
+#endif
+
+int sctp_cmpaddr(struct sockaddr *, struct sockaddr *);
+
+void sctp_print_address(struct sockaddr *);
+void sctp_print_address_pkt(struct ip *, struct sctphdr *);
+
+int
+sctp_release_pr_sctp_chunk(struct sctp_tcb *, struct sctp_tmit_chunk *,
+ int, int
+#if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
+ SCTP_UNUSED
+#endif
+);
+
+struct mbuf *sctp_generate_invmanparam(int);
+
+void
+sctp_bindx_add_address(struct socket *so, struct sctp_inpcb *inp,
+ struct sockaddr *sa, sctp_assoc_t assoc_id,
+ uint32_t vrf_id, int *error, void *p);
+void
+sctp_bindx_delete_address(struct socket *so, struct sctp_inpcb *inp,
+ struct sockaddr *sa, sctp_assoc_t assoc_id,
+ uint32_t vrf_id, int *error);
+
+int sctp_local_addr_count(struct sctp_tcb *stcb);
+
+#ifdef SCTP_MBCNT_LOGGING
+void
+sctp_free_bufspace(struct sctp_tcb *, struct sctp_association *,
+ struct sctp_tmit_chunk *, int);
+
+#else
+#define sctp_free_bufspace(stcb, asoc, tp1, chk_cnt) \
+do { \
+ if (tp1->data != NULL) { \
+ atomic_subtract_int(&((asoc)->chunks_on_out_queue), chk_cnt); \
+ if ((asoc)->total_output_queue_size >= tp1->book_size) { \
+ atomic_subtract_int(&((asoc)->total_output_queue_size), tp1->book_size); \
+ } else { \
+ (asoc)->total_output_queue_size = 0; \
+ } \
+ if (stcb->sctp_socket && ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || \
+ (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL))) { \
+ if (stcb->sctp_socket->so_snd.sb_cc >= tp1->book_size) { \
+ atomic_subtract_int(&((stcb)->sctp_socket->so_snd.sb_cc), tp1->book_size); \
+ } else { \
+ stcb->sctp_socket->so_snd.sb_cc = 0; \
+ } \
+ } \
+ } \
+} while (0)
+
+#endif
+
+#define sctp_free_spbufspace(stcb, asoc, sp) \
+do { \
+ if (sp->data != NULL) { \
+ if ((asoc)->total_output_queue_size >= sp->length) { \
+ atomic_subtract_int(&(asoc)->total_output_queue_size, sp->length); \
+ } else { \
+ (asoc)->total_output_queue_size = 0; \
+ } \
+ if (stcb->sctp_socket && ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || \
+ (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL))) { \
+ if (stcb->sctp_socket->so_snd.sb_cc >= sp->length) { \
+ atomic_subtract_int(&stcb->sctp_socket->so_snd.sb_cc,sp->length); \
+ } else { \
+ stcb->sctp_socket->so_snd.sb_cc = 0; \
+ } \
+ } \
+ } \
+} while (0)
+
+#define sctp_snd_sb_alloc(stcb, sz) \
+do { \
+ atomic_add_int(&stcb->asoc.total_output_queue_size,sz); \
+ if ((stcb->sctp_socket != NULL) && \
+ ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || \
+ (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL))) { \
+ atomic_add_int(&stcb->sctp_socket->so_snd.sb_cc,sz); \
+ } \
+} while (0)
+
+/* new functions to start/stop udp tunneling */
+void sctp_over_udp_stop(void);
+int sctp_over_udp_start(void);
+
+int
+sctp_soreceive(struct socket *so, struct sockaddr **psa,
+ struct uio *uio,
+ struct mbuf **mp0,
+ struct mbuf **controlp,
+ int *flagsp);
+
+
+/* For those not passing mbufs, this does the
+ * translations for you. Caller owns memory
+ * of size controllen returned in controlp.
+ */
+int
+sctp_l_soreceive(struct socket *so,
+ struct sockaddr **name,
+ struct uio *uio,
+ char **controlp,
+ int *controllen,
+ int *flag);
+
+
+void
+ sctp_misc_ints(uint8_t from, uint32_t a, uint32_t b, uint32_t c, uint32_t d);
+
+void
+sctp_wakeup_log(struct sctp_tcb *stcb,
+ uint32_t cumtsn,
+ uint32_t wake_cnt, int from);
+
+void sctp_log_strm_del_alt(struct sctp_tcb *stcb, uint32_t, uint16_t, uint16_t, int);
+
+void sctp_log_nagle_event(struct sctp_tcb *stcb, int action);
+
+
+void
+ sctp_log_mb(struct mbuf *m, int from);
+
+void
+sctp_sblog(struct sockbuf *sb,
+ struct sctp_tcb *stcb, int from, int incr);
+
+void
+sctp_log_strm_del(struct sctp_queued_to_read *control,
+ struct sctp_queued_to_read *poschk,
+ int from);
+void sctp_log_cwnd(struct sctp_tcb *stcb, struct sctp_nets *, int, uint8_t);
+void rto_logging(struct sctp_nets *net, int from);
+
+void sctp_log_closing(struct sctp_inpcb *inp, struct sctp_tcb *stcb, int16_t loc);
+
+void sctp_log_lock(struct sctp_inpcb *inp, struct sctp_tcb *stcb, uint8_t from);
+void sctp_log_maxburst(struct sctp_tcb *stcb, struct sctp_nets *, int, int, uint8_t);
+void sctp_log_block(uint8_t, struct socket *, struct sctp_association *, int);
+void sctp_log_rwnd(uint8_t, uint32_t, uint32_t, uint32_t);
+void sctp_log_mbcnt(uint8_t, uint32_t, uint32_t, uint32_t, uint32_t);
+void sctp_log_rwnd_set(uint8_t, uint32_t, uint32_t, uint32_t, uint32_t);
+int sctp_fill_stat_log(void *, size_t *);
+void sctp_log_fr(uint32_t, uint32_t, uint32_t, int);
+void sctp_log_sack(uint32_t, uint32_t, uint32_t, uint16_t, uint16_t, int);
+void sctp_log_map(uint32_t, uint32_t, uint32_t, int);
+void sctp_print_mapping_array(struct sctp_association *asoc);
+void sctp_clr_stat_log(void);
+
+
+#ifdef SCTP_AUDITING_ENABLED
+void
+sctp_auditing(int, struct sctp_inpcb *, struct sctp_tcb *,
+ struct sctp_nets *);
+void sctp_audit_log(uint8_t, uint8_t);
+
+#endif
+
+
+#endif /* _KERNEL */
+#endif
diff --git a/rtems/freebsd/netinet/tcp.h b/rtems/freebsd/netinet/tcp.h
new file mode 100644
index 00000000..c965022b
--- /dev/null
+++ b/rtems/freebsd/netinet/tcp.h
@@ -0,0 +1,226 @@
+/*-
+ * Copyright (c) 1982, 1986, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * @(#)tcp.h 8.1 (Berkeley) 6/10/93
+ * $FreeBSD$
+ */
+
+#ifndef _NETINET_TCP_HH_
+#define _NETINET_TCP_HH_
+
+#include <rtems/freebsd/sys/cdefs.h>
+
+#if __BSD_VISIBLE
+
+typedef u_int32_t tcp_seq;
+
+#define tcp6_seq tcp_seq /* for KAME src sync over BSD*'s */
+#define tcp6hdr tcphdr /* for KAME src sync over BSD*'s */
+
+/*
+ * TCP header.
+ * Per RFC 793, September, 1981.
+ */
+struct tcphdr {
+ u_short th_sport; /* source port */
+ u_short th_dport; /* destination port */
+ tcp_seq th_seq; /* sequence number */
+ tcp_seq th_ack; /* acknowledgement number */
+#if BYTE_ORDER == LITTLE_ENDIAN
+ u_int th_x2:4, /* (unused) */
+ th_off:4; /* data offset */
+#endif
+#if BYTE_ORDER == BIG_ENDIAN
+ u_int th_off:4, /* data offset */
+ th_x2:4; /* (unused) */
+#endif
+ u_char th_flags;
+#define TH_FIN 0x01
+#define TH_SYN 0x02
+#define TH_RST 0x04
+#define TH_PUSH 0x08
+#define TH_ACK 0x10
+#define TH_URG 0x20
+#define TH_ECE 0x40
+#define TH_CWR 0x80
+#define TH_FLAGS (TH_FIN|TH_SYN|TH_RST|TH_PUSH|TH_ACK|TH_URG|TH_ECE|TH_CWR)
+#define PRINT_TH_FLAGS "\20\1FIN\2SYN\3RST\4PUSH\5ACK\6URG\7ECE\10CWR"
+
+ u_short th_win; /* window */
+ u_short th_sum; /* checksum */
+ u_short th_urp; /* urgent pointer */
+};
+
+#define TCPOPT_EOL 0
+#define TCPOLEN_EOL 1
+#define TCPOPT_PAD 0 /* padding after EOL */
+#define TCPOLEN_PAD 1
+#define TCPOPT_NOP 1
+#define TCPOLEN_NOP 1
+#define TCPOPT_MAXSEG 2
+#define TCPOLEN_MAXSEG 4
+#define TCPOPT_WINDOW 3
+#define TCPOLEN_WINDOW 3
+#define TCPOPT_SACK_PERMITTED 4
+#define TCPOLEN_SACK_PERMITTED 2
+#define TCPOPT_SACK 5
+#define TCPOLEN_SACKHDR 2
+#define TCPOLEN_SACK 8 /* 2*sizeof(tcp_seq) */
+#define TCPOPT_TIMESTAMP 8
+#define TCPOLEN_TIMESTAMP 10
+#define TCPOLEN_TSTAMP_APPA (TCPOLEN_TIMESTAMP+2) /* appendix A */
+#define TCPOPT_SIGNATURE 19 /* Keyed MD5: RFC 2385 */
+#define TCPOLEN_SIGNATURE 18
+
+/* Miscellaneous constants */
+#define MAX_SACK_BLKS 6 /* Max # SACK blocks stored at receiver side */
+#define TCP_MAX_SACK 4 /* MAX # SACKs sent in any segment */
+
+
+/*
+ * Default maximum segment size for TCP.
+ * With an IP MTU of 576, this is 536,
+ * but 512 is probably more convenient.
+ * This should be defined as MIN(512, IP_MSS - sizeof (struct tcpiphdr)).
+ */
+#define TCP_MSS 512
+/*
+ * TCP_MINMSS is defined to be 216 which is fine for the smallest
+ * link MTU (256 bytes, AX.25 packet radio) in the Internet.
+ * However it is very unlikely to come across such low MTU interfaces
+ * these days (anno dato 2003).
+ * See tcp_subr.c tcp_minmss SYSCTL declaration for more comments.
+ * Setting this to "0" disables the minmss check.
+ */
+#define TCP_MINMSS 216
+
+/*
+ * Default maximum segment size for TCP6.
+ * With an IP6 MSS of 1280, this is 1220,
+ * but 1024 is probably more convenient. (xxx kazu in doubt)
+ * This should be defined as MIN(1024, IP6_MSS - sizeof (struct tcpip6hdr))
+ */
+#define TCP6_MSS 1024
+
+#define TCP_MAXWIN 65535 /* largest value for (unscaled) window */
+#define TTCP_CLIENT_SND_WND 4096 /* dflt send window for T/TCP client */
+
+#define TCP_MAX_WINSHIFT 14 /* maximum window shift */
+
+#define TCP_MAXBURST 4 /* maximum segments in a burst */
+
+#define TCP_MAXHLEN (0xf<<2) /* max length of header in bytes */
+#define TCP_MAXOLEN (TCP_MAXHLEN - sizeof(struct tcphdr))
+ /* max space left for options */
+#endif /* __BSD_VISIBLE */
+
+/*
+ * User-settable options (used with setsockopt).
+ */
+#define TCP_NODELAY 0x01 /* don't delay send to coalesce packets */
+#if __BSD_VISIBLE
+#define TCP_MAXSEG 0x02 /* set maximum segment size */
+#define TCP_NOPUSH 0x04 /* don't push last block of write */
+#define TCP_NOOPT 0x08 /* don't use TCP options */
+#define TCP_MD5SIG 0x10 /* use MD5 digests (RFC2385) */
+#define TCP_INFO 0x20 /* retrieve tcp_info structure */
+#define TCP_CONGESTION 0x40 /* get/set congestion control algorithm */
+
+#define TCP_CA_NAME_MAX 16 /* max congestion control name length */
+
+#define TCPI_OPT_TIMESTAMPS 0x01
+#define TCPI_OPT_SACK 0x02
+#define TCPI_OPT_WSCALE 0x04
+#define TCPI_OPT_ECN 0x08
+#define TCPI_OPT_TOE 0x10
+
+/*
+ * The TCP_INFO socket option comes from the Linux 2.6 TCP API, and permits
+ * the caller to query certain information about the state of a TCP
+ * connection. We provide an overlapping set of fields with the Linux
+ * implementation, but since this is a fixed size structure, room has been
+ * left for growth. In order to maximize potential future compatibility with
+ * the Linux API, the same variable names and order have been adopted, and
+ * padding left to make room for omitted fields in case they are added later.
+ *
+ * XXX: This is currently an unstable ABI/API, in that it is expected to
+ * change.
+ */
+struct tcp_info {
+ u_int8_t tcpi_state; /* TCP FSM state. */
+ u_int8_t __tcpi_ca_state;
+ u_int8_t __tcpi_retransmits;
+ u_int8_t __tcpi_probes;
+ u_int8_t __tcpi_backoff;
+ u_int8_t tcpi_options; /* Options enabled on conn. */
+ u_int8_t tcpi_snd_wscale:4, /* RFC1323 send shift value. */
+ tcpi_rcv_wscale:4; /* RFC1323 recv shift value. */
+
+ u_int32_t tcpi_rto; /* Retransmission timeout (usec). */
+ u_int32_t __tcpi_ato;
+ u_int32_t tcpi_snd_mss; /* Max segment size for send. */
+ u_int32_t tcpi_rcv_mss; /* Max segment size for receive. */
+
+ u_int32_t __tcpi_unacked;
+ u_int32_t __tcpi_sacked;
+ u_int32_t __tcpi_lost;
+ u_int32_t __tcpi_retrans;
+ u_int32_t __tcpi_fackets;
+
+ /* Times; measurements in usecs. */
+ u_int32_t __tcpi_last_data_sent;
+ u_int32_t __tcpi_last_ack_sent; /* Also unimpl. on Linux? */
+ u_int32_t tcpi_last_data_recv; /* Time since last recv data. */
+ u_int32_t __tcpi_last_ack_recv;
+
+ /* Metrics; variable units. */
+ u_int32_t __tcpi_pmtu;
+ u_int32_t __tcpi_rcv_ssthresh;
+ u_int32_t tcpi_rtt; /* Smoothed RTT in usecs. */
+ u_int32_t tcpi_rttvar; /* RTT variance in usecs. */
+ u_int32_t tcpi_snd_ssthresh; /* Slow start threshold. */
+ u_int32_t tcpi_snd_cwnd; /* Send congestion window. */
+ u_int32_t __tcpi_advmss;
+ u_int32_t __tcpi_reordering;
+
+ u_int32_t __tcpi_rcv_rtt;
+ u_int32_t tcpi_rcv_space; /* Advertised recv window. */
+
+ /* FreeBSD extensions to tcp_info. */
+ u_int32_t tcpi_snd_wnd; /* Advertised send window. */
+ u_int32_t tcpi_snd_bwnd; /* Bandwidth send window. */
+ u_int32_t tcpi_snd_nxt; /* Next egress seqno */
+ u_int32_t tcpi_rcv_nxt; /* Next ingress seqno */
+ u_int32_t tcpi_toe_tid; /* HWTID for TOE endpoints */
+
+ /* Padding to grow without breaking ABI. */
+ u_int32_t __tcpi_pad[29]; /* Padding. */
+};
+#endif
+
+#endif /* !_NETINET_TCP_HH_ */
diff --git a/rtems/freebsd/netinet/tcp_debug.c b/rtems/freebsd/netinet/tcp_debug.c
new file mode 100644
index 00000000..4b2b425f
--- /dev/null
+++ b/rtems/freebsd/netinet/tcp_debug.c
@@ -0,0 +1,226 @@
+#include <rtems/freebsd/machine/rtems-bsd-config.h>
+
+/*-
+ * Copyright (c) 1982, 1986, 1993
+ * The Regents of the University of California.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * @(#)tcp_debug.c 8.1 (Berkeley) 6/10/93
+ */
+
+#include <rtems/freebsd/sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <rtems/freebsd/local/opt_inet.h>
+#include <rtems/freebsd/local/opt_inet6.h>
+#include <rtems/freebsd/local/opt_tcpdebug.h>
+
+#ifdef TCPDEBUG
+/* load symbolic names */
+#define PRUREQUESTS
+#define TCPSTATES
+#define TCPTIMERS
+#define TANAMES
+#endif
+
+#include <rtems/freebsd/sys/param.h>
+#include <rtems/freebsd/sys/systm.h>
+#include <rtems/freebsd/sys/kernel.h>
+#include <rtems/freebsd/sys/lock.h>
+#include <rtems/freebsd/sys/mbuf.h>
+#include <rtems/freebsd/sys/mutex.h>
+#include <rtems/freebsd/sys/protosw.h>
+#include <rtems/freebsd/sys/socket.h>
+
+#include <rtems/freebsd/netinet/in.h>
+#include <rtems/freebsd/netinet/in_systm.h>
+#include <rtems/freebsd/netinet/ip.h>
+#ifdef INET6
+#include <rtems/freebsd/netinet/ip6.h>
+#endif
+#include <rtems/freebsd/netinet/ip_var.h>
+#include <rtems/freebsd/netinet/tcp.h>
+#include <rtems/freebsd/netinet/tcp_fsm.h>
+#include <rtems/freebsd/netinet/tcp_timer.h>
+#include <rtems/freebsd/netinet/tcp_var.h>
+#include <rtems/freebsd/netinet/tcpip.h>
+#include <rtems/freebsd/netinet/tcp_debug.h>
+
+#ifdef TCPDEBUG
+static int tcpconsdebug = 0;
+#endif
+
+/*
+ * Global ring buffer of TCP debugging state. Each entry captures a snapshot
+ * of TCP connection state at any given moment. tcp_debx addresses at the
+ * next available slot. There is no explicit export of this data structure;
+ * it will be read via /dev/kmem by debugging tools.
+ */
+static struct tcp_debug tcp_debug[TCP_NDEBUG];
+static int tcp_debx;
+
+/*
+ * All global state is protected by tcp_debug_mtx; tcp_trace() is split into
+ * two parts, one of which saves connection and other state into the global
+ * array (locked by tcp_debug_mtx).
+ */
+struct mtx tcp_debug_mtx;
+MTX_SYSINIT(tcp_debug_mtx, &tcp_debug_mtx, "tcp_debug_mtx", MTX_DEF);
+
+/*
+ * Save TCP state at a given moment; optionally, both tcpcb and TCP packet
+ * header state will be saved.
+ */
+void
+tcp_trace(short act, short ostate, struct tcpcb *tp, void *ipgen,
+ struct tcphdr *th, int req)
+{
+#ifdef INET6
+ int isipv6;
+#endif /* INET6 */
+ tcp_seq seq, ack;
+ int len, flags;
+ struct tcp_debug *td;
+
+ mtx_lock(&tcp_debug_mtx);
+ td = &tcp_debug[tcp_debx++];
+ if (tcp_debx == TCP_NDEBUG)
+ tcp_debx = 0;
+ bzero(td, sizeof(*td));
+#ifdef INET6
+ isipv6 = (ipgen != NULL && ((struct ip *)ipgen)->ip_v == 6) ? 1 : 0;
+#endif /* INET6 */
+ td->td_family =
+#ifdef INET6
+ (isipv6 != 0) ? AF_INET6 :
+#endif
+ AF_INET;
+#ifdef INET
+ td->td_time = iptime();
+#endif
+ td->td_act = act;
+ td->td_ostate = ostate;
+ td->td_tcb = (caddr_t)tp;
+ if (tp != NULL)
+ td->td_cb = *tp;
+ if (ipgen != NULL) {
+ switch (td->td_family) {
+#ifdef INET
+ case AF_INET:
+ bcopy(ipgen, &td->td_ti.ti_i, sizeof(td->td_ti.ti_i));
+ break;
+#endif
+#ifdef INET6
+ case AF_INET6:
+ bcopy(ipgen, td->td_ip6buf, sizeof(td->td_ip6buf));
+ break;
+#endif
+ }
+ }
+ if (th != NULL) {
+ switch (td->td_family) {
+#ifdef INET
+ case AF_INET:
+ td->td_ti.ti_t = *th;
+ break;
+#endif
+#ifdef INET6
+ case AF_INET6:
+ td->td_ti6.th = *th;
+ break;
+#endif
+ }
+ }
+ td->td_req = req;
+ mtx_unlock(&tcp_debug_mtx);
+#ifdef TCPDEBUG
+ if (tcpconsdebug == 0)
+ return;
+ if (tp != NULL)
+ printf("%p %s:", tp, tcpstates[ostate]);
+ else
+ printf("???????? ");
+ printf("%s ", tanames[act]);
+ switch (act) {
+ case TA_INPUT:
+ case TA_OUTPUT:
+ case TA_DROP:
+ if (ipgen == NULL || th == NULL)
+ break;
+ seq = th->th_seq;
+ ack = th->th_ack;
+ len =
+#ifdef INET6
+ isipv6 ? ntohs(((struct ip6_hdr *)ipgen)->ip6_plen) :
+#endif
+ ((struct ip *)ipgen)->ip_len;
+ if (act == TA_OUTPUT) {
+ seq = ntohl(seq);
+ ack = ntohl(ack);
+ len = ntohs((u_short)len);
+ }
+ if (act == TA_OUTPUT)
+ len -= sizeof (struct tcphdr);
+ if (len)
+ printf("[%x..%x)", seq, seq+len);
+ else
+ printf("%x", seq);
+ printf("@%x, urp=%x", ack, th->th_urp);
+ flags = th->th_flags;
+ if (flags) {
+ char *cp = "<";
+#define pf(f) { \
+ if (th->th_flags & TH_##f) { \
+ printf("%s%s", cp, #f); \
+ cp = ","; \
+ } \
+}
+ pf(SYN); pf(ACK); pf(FIN); pf(RST); pf(PUSH); pf(URG);
+ printf(">");
+ }
+ break;
+
+ case TA_USER:
+ printf("%s", prurequests[req&0xff]);
+ if ((req & 0xff) == PRU_SLOWTIMO)
+ printf("<%s>", tcptimers[req>>8]);
+ break;
+ }
+ if (tp != NULL)
+ printf(" -> %s", tcpstates[tp->t_state]);
+ /* print out internal state of tp !?! */
+ printf("\n");
+ if (tp == NULL)
+ return;
+ printf(
+ "\trcv_(nxt,wnd,up) (%lx,%lx,%lx) snd_(una,nxt,max) (%lx,%lx,%lx)\n",
+ (u_long)tp->rcv_nxt, tp->rcv_wnd, (u_long)tp->rcv_up,
+ (u_long)tp->snd_una, (u_long)tp->snd_nxt, (u_long)tp->snd_max);
+ printf("\tsnd_(wl1,wl2,wnd) (%lx,%lx,%lx)\n",
+ (u_long)tp->snd_wl1, (u_long)tp->snd_wl2, tp->snd_wnd);
+#endif /* TCPDEBUG */
+}
diff --git a/rtems/freebsd/netinet/tcp_debug.h b/rtems/freebsd/netinet/tcp_debug.h
new file mode 100644
index 00000000..0c103958
--- /dev/null
+++ b/rtems/freebsd/netinet/tcp_debug.h
@@ -0,0 +1,80 @@
+/*-
+ * Copyright (c) 1982, 1986, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * @(#)tcp_debug.h 8.1 (Berkeley) 6/10/93
+ * $FreeBSD$
+ */
+
+#ifndef _NETINET_TCP_DEBUG_HH_
+#define _NETINET_TCP_DEBUG_HH_
+
+struct tcp_debug {
+ uint32_t td_time; /* network format */
+ short td_act;
+ short td_ostate;
+ caddr_t td_tcb;
+ int td_family;
+ /*
+ * Co-existense of td_ti and td_ti6 below is ugly, but it is necessary
+ * to achieve backword compatibility to some extent.
+ */
+ struct tcpiphdr td_ti;
+ struct {
+#define IP6_HDR_LEN 40 /* sizeof(struct ip6_hdr) */
+#if !defined(_KERNEL) && defined(INET6)
+ struct ip6_hdr ip6;
+#else
+ u_char ip6buf[IP6_HDR_LEN];
+#endif
+ struct tcphdr th;
+ } td_ti6;
+#define td_ip6buf td_ti6.ip6buf
+ short td_req;
+ struct tcpcb td_cb;
+};
+
+#define TA_INPUT 0
+#define TA_OUTPUT 1
+#define TA_USER 2
+#define TA_RESPOND 3
+#define TA_DROP 4
+
+#ifdef TANAMES
+static const char *tanames[] =
+ { "input", "output", "user", "respond", "drop" };
+#endif
+
+#define TCP_NDEBUG 100
+
+#ifndef _KERNEL
+/* XXX common variables for broken applications. */
+struct tcp_debug tcp_debug[TCP_NDEBUG];
+int tcp_debx;
+#endif
+
+#endif /* !_NETINET_TCP_DEBUG_HH_ */
diff --git a/rtems/freebsd/netinet/tcp_fsm.h b/rtems/freebsd/netinet/tcp_fsm.h
new file mode 100644
index 00000000..253e53d4
--- /dev/null
+++ b/rtems/freebsd/netinet/tcp_fsm.h
@@ -0,0 +1,112 @@
+/*-
+ * Copyright (c) 1982, 1986, 1993
+ * The Regents of the University of California.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * @(#)tcp_fsm.h 8.1 (Berkeley) 6/10/93
+ * $FreeBSD$
+ */
+
+#ifndef _NETINET_TCP_FSM_HH_
+#define _NETINET_TCP_FSM_HH_
+
+/*
+ * TCP FSM state definitions.
+ *
+ * Per RFC793, September, 1981.
+ */
+
+#define TCP_NSTATES 11
+
+#define TCPS_CLOSED 0 /* closed */
+#define TCPS_LISTEN 1 /* listening for connection */
+#define TCPS_SYN_SENT 2 /* active, have sent syn */
+#define TCPS_SYN_RECEIVED 3 /* have sent and received syn */
+/* states < TCPS_ESTABLISHED are those where connections not established */
+#define TCPS_ESTABLISHED 4 /* established */
+#define TCPS_CLOSE_WAIT 5 /* rcvd fin, waiting for close */
+/* states > TCPS_CLOSE_WAIT are those where user has closed */
+#define TCPS_FIN_WAIT_1 6 /* have closed, sent fin */
+#define TCPS_CLOSING 7 /* closed xchd FIN; await FIN ACK */
+#define TCPS_LAST_ACK 8 /* had fin and close; await FIN ACK */
+/* states > TCPS_CLOSE_WAIT && < TCPS_FIN_WAIT_2 await ACK of FIN */
+#define TCPS_FIN_WAIT_2 9 /* have closed, fin is acked */
+#define TCPS_TIME_WAIT 10 /* in 2*msl quiet wait after close */
+
+/* for KAME src sync over BSD*'s */
+#define TCP6_NSTATES TCP_NSTATES
+#define TCP6S_CLOSED TCPS_CLOSED
+#define TCP6S_LISTEN TCPS_LISTEN
+#define TCP6S_SYN_SENT TCPS_SYN_SENT
+#define TCP6S_SYN_RECEIVED TCPS_SYN_RECEIVED
+#define TCP6S_ESTABLISHED TCPS_ESTABLISHED
+#define TCP6S_CLOSE_WAIT TCPS_CLOSE_WAIT
+#define TCP6S_FIN_WAIT_1 TCPS_FIN_WAIT_1
+#define TCP6S_CLOSING TCPS_CLOSING
+#define TCP6S_LAST_ACK TCPS_LAST_ACK
+#define TCP6S_FIN_WAIT_2 TCPS_FIN_WAIT_2
+#define TCP6S_TIME_WAIT TCPS_TIME_WAIT
+
+#define TCPS_HAVERCVDSYN(s) ((s) >= TCPS_SYN_RECEIVED)
+#define TCPS_HAVEESTABLISHED(s) ((s) >= TCPS_ESTABLISHED)
+#define TCPS_HAVERCVDFIN(s) ((s) >= TCPS_TIME_WAIT)
+
+#ifdef TCPOUTFLAGS
+/*
+ * Flags used when sending segments in tcp_output. Basic flags (TH_RST,
+ * TH_ACK,TH_SYN,TH_FIN) are totally determined by state, with the proviso
+ * that TH_FIN is sent only if all data queued for output is included in the
+ * segment.
+ */
+static u_char tcp_outflags[TCP_NSTATES] = {
+ TH_RST|TH_ACK, /* 0, CLOSED */
+ 0, /* 1, LISTEN */
+ TH_SYN, /* 2, SYN_SENT */
+ TH_SYN|TH_ACK, /* 3, SYN_RECEIVED */
+ TH_ACK, /* 4, ESTABLISHED */
+ TH_ACK, /* 5, CLOSE_WAIT */
+ TH_FIN|TH_ACK, /* 6, FIN_WAIT_1 */
+ TH_FIN|TH_ACK, /* 7, CLOSING */
+ TH_FIN|TH_ACK, /* 8, LAST_ACK */
+ TH_ACK, /* 9, FIN_WAIT_2 */
+ TH_ACK, /* 10, TIME_WAIT */
+};
+#endif
+
+#ifdef KPROF
+int tcp_acounts[TCP_NSTATES][PRU_NREQ];
+#endif
+
+#ifdef TCPSTATES
+static char const * const tcpstates[] = {
+ "CLOSED", "LISTEN", "SYN_SENT", "SYN_RCVD",
+ "ESTABLISHED", "CLOSE_WAIT", "FIN_WAIT_1", "CLOSING",
+ "LAST_ACK", "FIN_WAIT_2", "TIME_WAIT",
+};
+#endif
+
+#endif
diff --git a/rtems/freebsd/netinet/tcp_hostcache.h b/rtems/freebsd/netinet/tcp_hostcache.h
new file mode 100644
index 00000000..a494ed03
--- /dev/null
+++ b/rtems/freebsd/netinet/tcp_hostcache.h
@@ -0,0 +1,82 @@
+/*-
+ * Copyright (c) 2002 Andre Oppermann, Internet Business Solutions AG
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote
+ * products derived from this software without specific prior written
+ * permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+/*
+ * Many thanks to jlemon for basic structure of tcp_syncache which is being
+ * followed here.
+ */
+
+#ifndef _NETINET_TCP_HOSTCACHE_HH_
+#define _NETINET_TCP_HOSTCACHE_HH_
+
+TAILQ_HEAD(hc_qhead, hc_metrics);
+
+struct hc_head {
+ struct hc_qhead hch_bucket;
+ u_int hch_length;
+ struct mtx hch_mtx;
+};
+
+struct hc_metrics {
+ /* housekeeping */
+ TAILQ_ENTRY(hc_metrics) rmx_q;
+ struct hc_head *rmx_head; /* head of bucket tail queue */
+ struct in_addr ip4; /* IP address */
+ struct in6_addr ip6; /* IP6 address */
+ /* endpoint specific values for tcp */
+ u_long rmx_mtu; /* MTU for this path */
+ u_long rmx_ssthresh; /* outbound gateway buffer limit */
+ u_long rmx_rtt; /* estimated round trip time */
+ u_long rmx_rttvar; /* estimated rtt variance */
+ u_long rmx_bandwidth; /* estimated bandwidth */
+ u_long rmx_cwnd; /* congestion window */
+ u_long rmx_sendpipe; /* outbound delay-bandwidth product */
+ u_long rmx_recvpipe; /* inbound delay-bandwidth product */
+ /* TCP hostcache internal data */
+ int rmx_expire; /* lifetime for object */
+ u_long rmx_hits; /* number of hits */
+ u_long rmx_updates; /* number of updates */
+};
+
+struct tcp_hostcache {
+ struct hc_head *hashbase;
+ uma_zone_t zone;
+ u_int hashsize;
+ u_int hashmask;
+ u_int bucket_limit;
+ u_int cache_count;
+ u_int cache_limit;
+ int expire;
+ int prune;
+ int purgeall;
+};
+
+#endif /* !_NETINET_TCP_HOSTCACHE_HH_*/
diff --git a/rtems/freebsd/netinet/tcp_input.c b/rtems/freebsd/netinet/tcp_input.c
new file mode 100644
index 00000000..e8454718
--- /dev/null
+++ b/rtems/freebsd/netinet/tcp_input.c
@@ -0,0 +1,3453 @@
+#include <rtems/freebsd/machine/rtems-bsd-config.h>
+
+/*-
+ * Copyright (c) 1982, 1986, 1988, 1990, 1993, 1994, 1995
+ * The Regents of the University of California. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * @(#)tcp_input.c 8.12 (Berkeley) 5/24/95
+ */
+
+#include <rtems/freebsd/sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <rtems/freebsd/local/opt_ipfw.h> /* for ipfw_fwd */
+#include <rtems/freebsd/local/opt_inet.h>
+#include <rtems/freebsd/local/opt_inet6.h>
+#include <rtems/freebsd/local/opt_ipsec.h>
+#include <rtems/freebsd/local/opt_tcpdebug.h>
+
+#include <rtems/freebsd/sys/param.h>
+#include <rtems/freebsd/sys/kernel.h>
+#include <rtems/freebsd/sys/malloc.h>
+#include <rtems/freebsd/sys/mbuf.h>
+#include <rtems/freebsd/sys/proc.h> /* for proc0 declaration */
+#include <rtems/freebsd/sys/protosw.h>
+#include <rtems/freebsd/sys/signalvar.h>
+#include <rtems/freebsd/sys/socket.h>
+#include <rtems/freebsd/sys/socketvar.h>
+#include <rtems/freebsd/sys/sysctl.h>
+#include <rtems/freebsd/sys/syslog.h>
+#include <rtems/freebsd/sys/systm.h>
+
+#include <rtems/freebsd/machine/cpu.h> /* before tcp_seq.h, for tcp_random18() */
+
+#include <rtems/freebsd/vm/uma.h>
+
+#include <rtems/freebsd/net/if.h>
+#include <rtems/freebsd/net/route.h>
+#include <rtems/freebsd/net/vnet.h>
+
+#define TCPSTATES /* for logging */
+
+#include <rtems/freebsd/netinet/in.h>
+#include <rtems/freebsd/netinet/in_pcb.h>
+#include <rtems/freebsd/netinet/in_systm.h>
+#include <rtems/freebsd/netinet/in_var.h>
+#include <rtems/freebsd/netinet/ip.h>
+#include <rtems/freebsd/netinet/ip_icmp.h> /* required for icmp_var.h */
+#include <rtems/freebsd/netinet/icmp_var.h> /* for ICMP_BANDLIM */
+#include <rtems/freebsd/netinet/ip_var.h>
+#include <rtems/freebsd/netinet/ip_options.h>
+#include <rtems/freebsd/netinet/ip6.h>
+#include <rtems/freebsd/netinet/icmp6.h>
+#include <rtems/freebsd/netinet6/in6_pcb.h>
+#include <rtems/freebsd/netinet6/ip6_var.h>
+#include <rtems/freebsd/netinet6/nd6.h>
+#include <rtems/freebsd/netinet/tcp.h>
+#include <rtems/freebsd/netinet/tcp_fsm.h>
+#include <rtems/freebsd/netinet/tcp_seq.h>
+#include <rtems/freebsd/netinet/tcp_timer.h>
+#include <rtems/freebsd/netinet/tcp_var.h>
+#include <rtems/freebsd/netinet6/tcp6_var.h>
+#include <rtems/freebsd/netinet/tcpip.h>
+#include <rtems/freebsd/netinet/tcp_syncache.h>
+#ifdef TCPDEBUG
+#include <rtems/freebsd/netinet/tcp_debug.h>
+#endif /* TCPDEBUG */
+
+#ifdef IPSEC
+#include <rtems/freebsd/netipsec/ipsec.h>
+#include <rtems/freebsd/netipsec/ipsec6.h>
+#endif /*IPSEC*/
+
+#include <rtems/freebsd/machine/in_cksum.h>
+
+#include <rtems/freebsd/security/mac/mac_framework.h>
+
+static const int tcprexmtthresh = 3;
+
+VNET_DEFINE(struct tcpstat, tcpstat);
+SYSCTL_VNET_STRUCT(_net_inet_tcp, TCPCTL_STATS, stats, CTLFLAG_RW,
+ &VNET_NAME(tcpstat), tcpstat,
+ "TCP statistics (struct tcpstat, netinet/tcp_var.h)");
+
+int tcp_log_in_vain = 0;
+SYSCTL_INT(_net_inet_tcp, OID_AUTO, log_in_vain, CTLFLAG_RW,
+ &tcp_log_in_vain, 0,
+ "Log all incoming TCP segments to closed ports");
+
+VNET_DEFINE(int, blackhole) = 0;
+#define V_blackhole VNET(blackhole)
+SYSCTL_VNET_INT(_net_inet_tcp, OID_AUTO, blackhole, CTLFLAG_RW,
+ &VNET_NAME(blackhole), 0,
+ "Do not send RST on segments to closed ports");
+
+VNET_DEFINE(int, tcp_delack_enabled) = 1;
+SYSCTL_VNET_INT(_net_inet_tcp, OID_AUTO, delayed_ack, CTLFLAG_RW,
+ &VNET_NAME(tcp_delack_enabled), 0,
+ "Delay ACK to try and piggyback it onto a data packet");
+
+VNET_DEFINE(int, drop_synfin) = 0;
+#define V_drop_synfin VNET(drop_synfin)
+SYSCTL_VNET_INT(_net_inet_tcp, OID_AUTO, drop_synfin, CTLFLAG_RW,
+ &VNET_NAME(drop_synfin), 0,
+ "Drop TCP packets with SYN+FIN set");
+
+VNET_DEFINE(int, tcp_do_rfc3042) = 1;
+#define V_tcp_do_rfc3042 VNET(tcp_do_rfc3042)
+SYSCTL_VNET_INT(_net_inet_tcp, OID_AUTO, rfc3042, CTLFLAG_RW,
+ &VNET_NAME(tcp_do_rfc3042), 0,
+ "Enable RFC 3042 (Limited Transmit)");
+
+VNET_DEFINE(int, tcp_do_rfc3390) = 1;
+#define V_tcp_do_rfc3390 VNET(tcp_do_rfc3390)
+SYSCTL_VNET_INT(_net_inet_tcp, OID_AUTO, rfc3390, CTLFLAG_RW,
+ &VNET_NAME(tcp_do_rfc3390), 0,
+ "Enable RFC 3390 (Increasing TCP's Initial Congestion Window)");
+
+VNET_DEFINE(int, tcp_do_rfc3465) = 1;
+#define V_tcp_do_rfc3465 VNET(tcp_do_rfc3465)
+SYSCTL_VNET_INT(_net_inet_tcp, OID_AUTO, rfc3465, CTLFLAG_RW,
+ &VNET_NAME(tcp_do_rfc3465), 0,
+ "Enable RFC 3465 (Appropriate Byte Counting)");
+
+VNET_DEFINE(int, tcp_abc_l_var) = 2;
+#define V_tcp_abc_l_var VNET(tcp_abc_l_var)
+SYSCTL_VNET_INT(_net_inet_tcp, OID_AUTO, abc_l_var, CTLFLAG_RW,
+ &VNET_NAME(tcp_abc_l_var), 2,
+ "Cap the max cwnd increment during slow-start to this number of segments");
+
+SYSCTL_NODE(_net_inet_tcp, OID_AUTO, ecn, CTLFLAG_RW, 0, "TCP ECN");
+
+VNET_DEFINE(int, tcp_do_ecn) = 0;
+SYSCTL_VNET_INT(_net_inet_tcp_ecn, OID_AUTO, enable, CTLFLAG_RW,
+ &VNET_NAME(tcp_do_ecn), 0,
+ "TCP ECN support");
+
+VNET_DEFINE(int, tcp_ecn_maxretries) = 1;
+SYSCTL_VNET_INT(_net_inet_tcp_ecn, OID_AUTO, maxretries, CTLFLAG_RW,
+ &VNET_NAME(tcp_ecn_maxretries), 0,
+ "Max retries before giving up on ECN");
+
+VNET_DEFINE(int, tcp_insecure_rst) = 0;
+#define V_tcp_insecure_rst VNET(tcp_insecure_rst)
+SYSCTL_VNET_INT(_net_inet_tcp, OID_AUTO, insecure_rst, CTLFLAG_RW,
+ &VNET_NAME(tcp_insecure_rst), 0,
+ "Follow the old (insecure) criteria for accepting RST packets");
+
+VNET_DEFINE(int, tcp_do_autorcvbuf) = 1;
+#define V_tcp_do_autorcvbuf VNET(tcp_do_autorcvbuf)
+SYSCTL_VNET_INT(_net_inet_tcp, OID_AUTO, recvbuf_auto, CTLFLAG_RW,
+ &VNET_NAME(tcp_do_autorcvbuf), 0,
+ "Enable automatic receive buffer sizing");
+
+VNET_DEFINE(int, tcp_autorcvbuf_inc) = 16*1024;
+#define V_tcp_autorcvbuf_inc VNET(tcp_autorcvbuf_inc)
+SYSCTL_VNET_INT(_net_inet_tcp, OID_AUTO, recvbuf_inc, CTLFLAG_RW,
+ &VNET_NAME(tcp_autorcvbuf_inc), 0,
+ "Incrementor step size of automatic receive buffer");
+
+VNET_DEFINE(int, tcp_autorcvbuf_max) = 256*1024;
+#define V_tcp_autorcvbuf_max VNET(tcp_autorcvbuf_max)
+SYSCTL_VNET_INT(_net_inet_tcp, OID_AUTO, recvbuf_max, CTLFLAG_RW,
+ &VNET_NAME(tcp_autorcvbuf_max), 0,
+ "Max size of automatic receive buffer");
+
+int tcp_read_locking = 1;
+SYSCTL_INT(_net_inet_tcp, OID_AUTO, read_locking, CTLFLAG_RW,
+ &tcp_read_locking, 0, "Enable read locking strategy");
+
+VNET_DEFINE(struct inpcbhead, tcb);
+#define tcb6 tcb /* for KAME src sync over BSD*'s */
+VNET_DEFINE(struct inpcbinfo, tcbinfo);
+
+static void tcp_dooptions(struct tcpopt *, u_char *, int, int);
+static void tcp_do_segment(struct mbuf *, struct tcphdr *,
+ struct socket *, struct tcpcb *, int, int, uint8_t,
+ int);
+static void tcp_dropwithreset(struct mbuf *, struct tcphdr *,
+ struct tcpcb *, int, int);
+static void tcp_pulloutofband(struct socket *,
+ struct tcphdr *, struct mbuf *, int);
+static void tcp_xmit_timer(struct tcpcb *, int);
+static void tcp_newreno_partial_ack(struct tcpcb *, struct tcphdr *);
+static void inline
+ tcp_congestion_exp(struct tcpcb *);
+
+/*
+ * Kernel module interface for updating tcpstat. The argument is an index
+ * into tcpstat treated as an array of u_long. While this encodes the
+ * general layout of tcpstat into the caller, it doesn't encode its location,
+ * so that future changes to add, for example, per-CPU stats support won't
+ * cause binary compatibility problems for kernel modules.
+ */
+void
+kmod_tcpstat_inc(int statnum)
+{
+
+ (*((u_long *)&V_tcpstat + statnum))++;
+}
+
+static void inline
+tcp_congestion_exp(struct tcpcb *tp)
+{
+ u_int win;
+
+ win = min(tp->snd_wnd, tp->snd_cwnd) /
+ 2 / tp->t_maxseg;
+ if (win < 2)
+ win = 2;
+ tp->snd_ssthresh = win * tp->t_maxseg;
+ ENTER_FASTRECOVERY(tp);
+ tp->snd_recover = tp->snd_max;
+ if (tp->t_flags & TF_ECN_PERMIT)
+ tp->t_flags |= TF_ECN_SND_CWR;
+}
+
+/* Neighbor Discovery, Neighbor Unreachability Detection Upper layer hint. */
+#ifdef INET6
+#define ND6_HINT(tp) \
+do { \
+ if ((tp) && (tp)->t_inpcb && \
+ ((tp)->t_inpcb->inp_vflag & INP_IPV6) != 0) \
+ nd6_nud_hint(NULL, NULL, 0); \
+} while (0)
+#else
+#define ND6_HINT(tp)
+#endif
+
+/*
+ * Indicate whether this ack should be delayed. We can delay the ack if
+ * - there is no delayed ack timer in progress and
+ * - our last ack wasn't a 0-sized window. We never want to delay
+ * the ack that opens up a 0-sized window and
+ * - delayed acks are enabled or
+ * - this is a half-synchronized T/TCP connection.
+ */
+#define DELAY_ACK(tp) \
+ ((!tcp_timer_active(tp, TT_DELACK) && \
+ (tp->t_flags & TF_RXWIN0SENT) == 0) && \
+ (V_tcp_delack_enabled || (tp->t_flags & TF_NEEDSYN)))
+
+/*
+ * TCP input handling is split into multiple parts:
+ * tcp6_input is a thin wrapper around tcp_input for the extended
+ * ip6_protox[] call format in ip6_input
+ * tcp_input handles primary segment validation, inpcb lookup and
+ * SYN processing on listen sockets
+ * tcp_do_segment processes the ACK and text of the segment for
+ * establishing, established and closing connections
+ */
+#ifdef INET6
+int
+tcp6_input(struct mbuf **mp, int *offp, int proto)
+{
+ struct mbuf *m = *mp;
+ struct in6_ifaddr *ia6;
+
+ IP6_EXTHDR_CHECK(m, *offp, sizeof(struct tcphdr), IPPROTO_DONE);
+
+ /*
+ * draft-itojun-ipv6-tcp-to-anycast
+ * better place to put this in?
+ */
+ ia6 = ip6_getdstifaddr(m);
+ if (ia6 && (ia6->ia6_flags & IN6_IFF_ANYCAST)) {
+ struct ip6_hdr *ip6;
+
+ ifa_free(&ia6->ia_ifa);
+ ip6 = mtod(m, struct ip6_hdr *);
+ icmp6_error(m, ICMP6_DST_UNREACH, ICMP6_DST_UNREACH_ADDR,
+ (caddr_t)&ip6->ip6_dst - (caddr_t)ip6);
+ return IPPROTO_DONE;
+ }
+
+ tcp_input(m, *offp);
+ return IPPROTO_DONE;
+}
+#endif
+
+void
+tcp_input(struct mbuf *m, int off0)
+{
+ struct tcphdr *th;
+ struct ip *ip = NULL;
+ struct ipovly *ipov;
+ struct inpcb *inp = NULL;
+ struct tcpcb *tp = NULL;
+ struct socket *so = NULL;
+ u_char *optp = NULL;
+ int optlen = 0;
+ int len, tlen, off;
+ int drop_hdrlen;
+ int thflags;
+ int rstreason = 0; /* For badport_bandlim accounting purposes */
+ uint8_t iptos;
+#ifdef IPFIREWALL_FORWARD
+ struct m_tag *fwd_tag;
+#endif
+#ifdef INET6
+ struct ip6_hdr *ip6 = NULL;
+ int isipv6;
+#else
+ const void *ip6 = NULL;
+ const int isipv6 = 0;
+#endif
+ struct tcpopt to; /* options in this segment */
+ char *s = NULL; /* address and port logging */
+ int ti_locked;
+#define TI_UNLOCKED 1
+#define TI_RLOCKED 2
+#define TI_WLOCKED 3
+
+#ifdef TCPDEBUG
+ /*
+ * The size of tcp_saveipgen must be the size of the max ip header,
+ * now IPv6.
+ */
+ u_char tcp_saveipgen[IP6_HDR_LEN];
+ struct tcphdr tcp_savetcp;
+ short ostate = 0;
+#endif
+
+#ifdef INET6
+ isipv6 = (mtod(m, struct ip *)->ip_v == 6) ? 1 : 0;
+#endif
+
+ to.to_flags = 0;
+ TCPSTAT_INC(tcps_rcvtotal);
+
+ if (isipv6) {
+#ifdef INET6
+ /* IP6_EXTHDR_CHECK() is already done at tcp6_input(). */
+ ip6 = mtod(m, struct ip6_hdr *);
+ tlen = sizeof(*ip6) + ntohs(ip6->ip6_plen) - off0;
+ if (in6_cksum(m, IPPROTO_TCP, off0, tlen)) {
+ TCPSTAT_INC(tcps_rcvbadsum);
+ goto drop;
+ }
+ th = (struct tcphdr *)((caddr_t)ip6 + off0);
+
+ /*
+ * Be proactive about unspecified IPv6 address in source.
+ * As we use all-zero to indicate unbounded/unconnected pcb,
+ * unspecified IPv6 address can be used to confuse us.
+ *
+ * Note that packets with unspecified IPv6 destination is
+ * already dropped in ip6_input.
+ */
+ if (IN6_IS_ADDR_UNSPECIFIED(&ip6->ip6_src)) {
+ /* XXX stat */
+ goto drop;
+ }
+#else
+ th = NULL; /* XXX: Avoid compiler warning. */
+#endif
+ } else {
+ /*
+ * Get IP and TCP header together in first mbuf.
+ * Note: IP leaves IP header in first mbuf.
+ */
+ if (off0 > sizeof (struct ip)) {
+ ip_stripoptions(m, (struct mbuf *)0);
+ off0 = sizeof(struct ip);
+ }
+ if (m->m_len < sizeof (struct tcpiphdr)) {
+ if ((m = m_pullup(m, sizeof (struct tcpiphdr)))
+ == NULL) {
+ TCPSTAT_INC(tcps_rcvshort);
+ return;
+ }
+ }
+ ip = mtod(m, struct ip *);
+ ipov = (struct ipovly *)ip;
+ th = (struct tcphdr *)((caddr_t)ip + off0);
+ tlen = ip->ip_len;
+
+ if (m->m_pkthdr.csum_flags & CSUM_DATA_VALID) {
+ if (m->m_pkthdr.csum_flags & CSUM_PSEUDO_HDR)
+ th->th_sum = m->m_pkthdr.csum_data;
+ else
+ th->th_sum = in_pseudo(ip->ip_src.s_addr,
+ ip->ip_dst.s_addr,
+ htonl(m->m_pkthdr.csum_data +
+ ip->ip_len +
+ IPPROTO_TCP));
+ th->th_sum ^= 0xffff;
+#ifdef TCPDEBUG
+ ipov->ih_len = (u_short)tlen;
+ ipov->ih_len = htons(ipov->ih_len);
+#endif
+ } else {
+ /*
+ * Checksum extended TCP header and data.
+ */
+ len = sizeof (struct ip) + tlen;
+ bzero(ipov->ih_x1, sizeof(ipov->ih_x1));
+ ipov->ih_len = (u_short)tlen;
+ ipov->ih_len = htons(ipov->ih_len);
+ th->th_sum = in_cksum(m, len);
+ }
+ if (th->th_sum) {
+ TCPSTAT_INC(tcps_rcvbadsum);
+ goto drop;
+ }
+ /* Re-initialization for later version check */
+ ip->ip_v = IPVERSION;
+ }
+
+#ifdef INET6
+ if (isipv6)
+ iptos = (ntohl(ip6->ip6_flow) >> 20) & 0xff;
+ else
+#endif
+ iptos = ip->ip_tos;
+
+ /*
+ * Check that TCP offset makes sense,
+ * pull out TCP options and adjust length. XXX
+ */
+ off = th->th_off << 2;
+ if (off < sizeof (struct tcphdr) || off > tlen) {
+ TCPSTAT_INC(tcps_rcvbadoff);
+ goto drop;
+ }
+ tlen -= off; /* tlen is used instead of ti->ti_len */
+ if (off > sizeof (struct tcphdr)) {
+ if (isipv6) {
+#ifdef INET6
+ IP6_EXTHDR_CHECK(m, off0, off, );
+ ip6 = mtod(m, struct ip6_hdr *);
+ th = (struct tcphdr *)((caddr_t)ip6 + off0);
+#endif
+ } else {
+ if (m->m_len < sizeof(struct ip) + off) {
+ if ((m = m_pullup(m, sizeof (struct ip) + off))
+ == NULL) {
+ TCPSTAT_INC(tcps_rcvshort);
+ return;
+ }
+ ip = mtod(m, struct ip *);
+ ipov = (struct ipovly *)ip;
+ th = (struct tcphdr *)((caddr_t)ip + off0);
+ }
+ }
+ optlen = off - sizeof (struct tcphdr);
+ optp = (u_char *)(th + 1);
+ }
+ thflags = th->th_flags;
+
+ /*
+ * Convert TCP protocol specific fields to host format.
+ */
+ th->th_seq = ntohl(th->th_seq);
+ th->th_ack = ntohl(th->th_ack);
+ th->th_win = ntohs(th->th_win);
+ th->th_urp = ntohs(th->th_urp);
+
+ /*
+ * Delay dropping TCP, IP headers, IPv6 ext headers, and TCP options.
+ */
+ drop_hdrlen = off0 + off;
+
+ /*
+ * Locate pcb for segment, which requires a lock on tcbinfo.
+ * Optimisticaly acquire a global read lock rather than a write lock
+ * unless header flags necessarily imply a state change. There are
+ * two cases where we might discover later we need a write lock
+ * despite the flags: ACKs moving a connection out of the syncache,
+ * and ACKs for a connection in TIMEWAIT.
+ */
+ if ((thflags & (TH_SYN | TH_FIN | TH_RST)) != 0 ||
+ tcp_read_locking == 0) {
+ INP_INFO_WLOCK(&V_tcbinfo);
+ ti_locked = TI_WLOCKED;
+ } else {
+ INP_INFO_RLOCK(&V_tcbinfo);
+ ti_locked = TI_RLOCKED;
+ }
+
+findpcb:
+#ifdef INVARIANTS
+ if (ti_locked == TI_RLOCKED)
+ INP_INFO_RLOCK_ASSERT(&V_tcbinfo);
+ else if (ti_locked == TI_WLOCKED)
+ INP_INFO_WLOCK_ASSERT(&V_tcbinfo);
+ else
+ panic("%s: findpcb ti_locked %d\n", __func__, ti_locked);
+#endif
+
+#ifdef IPFIREWALL_FORWARD
+ /*
+ * Grab info from PACKET_TAG_IPFORWARD tag prepended to the chain.
+ */
+ fwd_tag = m_tag_find(m, PACKET_TAG_IPFORWARD, NULL);
+
+ if (fwd_tag != NULL && isipv6 == 0) { /* IPv6 support is not yet */
+ struct sockaddr_in *next_hop;
+
+ next_hop = (struct sockaddr_in *)(fwd_tag+1);
+ /*
+ * Transparently forwarded. Pretend to be the destination.
+ * already got one like this?
+ */
+ inp = in_pcblookup_hash(&V_tcbinfo,
+ ip->ip_src, th->th_sport,
+ ip->ip_dst, th->th_dport,
+ 0, m->m_pkthdr.rcvif);
+ if (!inp) {
+ /* It's new. Try to find the ambushing socket. */
+ inp = in_pcblookup_hash(&V_tcbinfo,
+ ip->ip_src, th->th_sport,
+ next_hop->sin_addr,
+ next_hop->sin_port ?
+ ntohs(next_hop->sin_port) :
+ th->th_dport,
+ INPLOOKUP_WILDCARD,
+ m->m_pkthdr.rcvif);
+ }
+ /* Remove the tag from the packet. We don't need it anymore. */
+ m_tag_delete(m, fwd_tag);
+ } else
+#endif /* IPFIREWALL_FORWARD */
+ {
+ if (isipv6) {
+#ifdef INET6
+ inp = in6_pcblookup_hash(&V_tcbinfo,
+ &ip6->ip6_src, th->th_sport,
+ &ip6->ip6_dst, th->th_dport,
+ INPLOOKUP_WILDCARD,
+ m->m_pkthdr.rcvif);
+#endif
+ } else
+ inp = in_pcblookup_hash(&V_tcbinfo,
+ ip->ip_src, th->th_sport,
+ ip->ip_dst, th->th_dport,
+ INPLOOKUP_WILDCARD,
+ m->m_pkthdr.rcvif);
+ }
+
+ /*
+ * If the INPCB does not exist then all data in the incoming
+ * segment is discarded and an appropriate RST is sent back.
+ * XXX MRT Send RST using which routing table?
+ */
+ if (inp == NULL) {
+ /*
+ * Log communication attempts to ports that are not
+ * in use.
+ */
+ if ((tcp_log_in_vain == 1 && (thflags & TH_SYN)) ||
+ tcp_log_in_vain == 2) {
+ if ((s = tcp_log_vain(NULL, th, (void *)ip, ip6)))
+ log(LOG_INFO, "%s; %s: Connection attempt "
+ "to closed port\n", s, __func__);
+ }
+ /*
+ * When blackholing do not respond with a RST but
+ * completely ignore the segment and drop it.
+ */
+ if ((V_blackhole == 1 && (thflags & TH_SYN)) ||
+ V_blackhole == 2)
+ goto dropunlock;
+
+ rstreason = BANDLIM_RST_CLOSEDPORT;
+ goto dropwithreset;
+ }
+ INP_WLOCK(inp);
+ if (!(inp->inp_flags & INP_HW_FLOWID)
+ && (m->m_flags & M_FLOWID)
+ && ((inp->inp_socket == NULL)
+ || !(inp->inp_socket->so_options & SO_ACCEPTCONN))) {
+ inp->inp_flags |= INP_HW_FLOWID;
+ inp->inp_flags &= ~INP_SW_FLOWID;
+ inp->inp_flowid = m->m_pkthdr.flowid;
+ }
+#ifdef IPSEC
+#ifdef INET6
+ if (isipv6 && ipsec6_in_reject(m, inp)) {
+ V_ipsec6stat.in_polvio++;
+ goto dropunlock;
+ } else
+#endif /* INET6 */
+ if (ipsec4_in_reject(m, inp) != 0) {
+ V_ipsec4stat.in_polvio++;
+ goto dropunlock;
+ }
+#endif /* IPSEC */
+
+ /*
+ * Check the minimum TTL for socket.
+ */
+ if (inp->inp_ip_minttl != 0) {
+#ifdef INET6
+ if (isipv6 && inp->inp_ip_minttl > ip6->ip6_hlim)
+ goto dropunlock;
+ else
+#endif
+ if (inp->inp_ip_minttl > ip->ip_ttl)
+ goto dropunlock;
+ }
+
+ /*
+ * A previous connection in TIMEWAIT state is supposed to catch stray
+ * or duplicate segments arriving late. If this segment was a
+ * legitimate new connection attempt the old INPCB gets removed and
+ * we can try again to find a listening socket.
+ *
+ * At this point, due to earlier optimism, we may hold a read lock on
+ * the inpcbinfo, rather than a write lock. If so, we need to
+ * upgrade, or if that fails, acquire a reference on the inpcb, drop
+ * all locks, acquire a global write lock, and then re-acquire the
+ * inpcb lock. We may at that point discover that another thread has
+ * tried to free the inpcb, in which case we need to loop back and
+ * try to find a new inpcb to deliver to.
+ */
+relocked:
+ if (inp->inp_flags & INP_TIMEWAIT) {
+ KASSERT(ti_locked == TI_RLOCKED || ti_locked == TI_WLOCKED,
+ ("%s: INP_TIMEWAIT ti_locked %d", __func__, ti_locked));
+
+ if (ti_locked == TI_RLOCKED) {
+ if (INP_INFO_TRY_UPGRADE(&V_tcbinfo) == 0) {
+ in_pcbref(inp);
+ INP_WUNLOCK(inp);
+ INP_INFO_RUNLOCK(&V_tcbinfo);
+ INP_INFO_WLOCK(&V_tcbinfo);
+ ti_locked = TI_WLOCKED;
+ INP_WLOCK(inp);
+ if (in_pcbrele(inp)) {
+ inp = NULL;
+ goto findpcb;
+ }
+ } else
+ ti_locked = TI_WLOCKED;
+ }
+ INP_INFO_WLOCK_ASSERT(&V_tcbinfo);
+
+ if (thflags & TH_SYN)
+ tcp_dooptions(&to, optp, optlen, TO_SYN);
+ /*
+ * NB: tcp_twcheck unlocks the INP and frees the mbuf.
+ */
+ if (tcp_twcheck(inp, &to, th, m, tlen))
+ goto findpcb;
+ INP_INFO_WUNLOCK(&V_tcbinfo);
+ return;
+ }
+ /*
+ * The TCPCB may no longer exist if the connection is winding
+ * down or it is in the CLOSED state. Either way we drop the
+ * segment and send an appropriate response.
+ */
+ tp = intotcpcb(inp);
+ if (tp == NULL || tp->t_state == TCPS_CLOSED) {
+ rstreason = BANDLIM_RST_CLOSEDPORT;
+ goto dropwithreset;
+ }
+
+ /*
+ * We've identified a valid inpcb, but it could be that we need an
+ * inpcbinfo write lock and have only a read lock. In this case,
+ * attempt to upgrade/relock using the same strategy as the TIMEWAIT
+ * case above. If we relock, we have to jump back to 'relocked' as
+ * the connection might now be in TIMEWAIT.
+ */
+ if (tp->t_state != TCPS_ESTABLISHED ||
+ (thflags & (TH_SYN | TH_FIN | TH_RST)) != 0 ||
+ tcp_read_locking == 0) {
+ KASSERT(ti_locked == TI_RLOCKED || ti_locked == TI_WLOCKED,
+ ("%s: upgrade check ti_locked %d", __func__, ti_locked));
+
+ if (ti_locked == TI_RLOCKED) {
+ if (INP_INFO_TRY_UPGRADE(&V_tcbinfo) == 0) {
+ in_pcbref(inp);
+ INP_WUNLOCK(inp);
+ INP_INFO_RUNLOCK(&V_tcbinfo);
+ INP_INFO_WLOCK(&V_tcbinfo);
+ ti_locked = TI_WLOCKED;
+ INP_WLOCK(inp);
+ if (in_pcbrele(inp)) {
+ inp = NULL;
+ goto findpcb;
+ }
+ goto relocked;
+ } else
+ ti_locked = TI_WLOCKED;
+ }
+ INP_INFO_WLOCK_ASSERT(&V_tcbinfo);
+ }
+
+#ifdef MAC
+ INP_WLOCK_ASSERT(inp);
+ if (mac_inpcb_check_deliver(inp, m))
+ goto dropunlock;
+#endif
+ so = inp->inp_socket;
+ KASSERT(so != NULL, ("%s: so == NULL", __func__));
+#ifdef TCPDEBUG
+ if (so->so_options & SO_DEBUG) {
+ ostate = tp->t_state;
+ if (isipv6) {
+#ifdef INET6
+ bcopy((char *)ip6, (char *)tcp_saveipgen, sizeof(*ip6));
+#endif
+ } else
+ bcopy((char *)ip, (char *)tcp_saveipgen, sizeof(*ip));
+ tcp_savetcp = *th;
+ }
+#endif
+ /*
+ * When the socket is accepting connections (the INPCB is in LISTEN
+ * state) we look into the SYN cache if this is a new connection
+ * attempt or the completion of a previous one.
+ */
+ if (so->so_options & SO_ACCEPTCONN) {
+ struct in_conninfo inc;
+
+ KASSERT(tp->t_state == TCPS_LISTEN, ("%s: so accepting but "
+ "tp not listening", __func__));
+
+ bzero(&inc, sizeof(inc));
+#ifdef INET6
+ if (isipv6) {
+ inc.inc_flags |= INC_ISIPV6;
+ inc.inc6_faddr = ip6->ip6_src;
+ inc.inc6_laddr = ip6->ip6_dst;
+ } else
+#endif
+ {
+ inc.inc_faddr = ip->ip_src;
+ inc.inc_laddr = ip->ip_dst;
+ }
+ inc.inc_fport = th->th_sport;
+ inc.inc_lport = th->th_dport;
+ inc.inc_fibnum = so->so_fibnum;
+
+ /*
+ * Check for an existing connection attempt in syncache if
+ * the flag is only ACK. A successful lookup creates a new
+ * socket appended to the listen queue in SYN_RECEIVED state.
+ */
+ if ((thflags & (TH_RST|TH_ACK|TH_SYN)) == TH_ACK) {
+ /*
+ * Parse the TCP options here because
+ * syncookies need access to the reflected
+ * timestamp.
+ */
+ tcp_dooptions(&to, optp, optlen, 0);
+ /*
+ * NB: syncache_expand() doesn't unlock
+ * inp and tcpinfo locks.
+ */
+ if (!syncache_expand(&inc, &to, th, &so, m)) {
+ /*
+ * No syncache entry or ACK was not
+ * for our SYN/ACK. Send a RST.
+ * NB: syncache did its own logging
+ * of the failure cause.
+ */
+ rstreason = BANDLIM_RST_OPENPORT;
+ goto dropwithreset;
+ }
+ if (so == NULL) {
+ /*
+ * We completed the 3-way handshake
+ * but could not allocate a socket
+ * either due to memory shortage,
+ * listen queue length limits or
+ * global socket limits. Send RST
+ * or wait and have the remote end
+ * retransmit the ACK for another
+ * try.
+ */
+ if ((s = tcp_log_addrs(&inc, th, NULL, NULL)))
+ log(LOG_DEBUG, "%s; %s: Listen socket: "
+ "Socket allocation failed due to "
+ "limits or memory shortage, %s\n",
+ s, __func__,
+ V_tcp_sc_rst_sock_fail ?
+ "sending RST" : "try again");
+ if (V_tcp_sc_rst_sock_fail) {
+ rstreason = BANDLIM_UNLIMITED;
+ goto dropwithreset;
+ } else
+ goto dropunlock;
+ }
+ /*
+ * Socket is created in state SYN_RECEIVED.
+ * Unlock the listen socket, lock the newly
+ * created socket and update the tp variable.
+ */
+ INP_WUNLOCK(inp); /* listen socket */
+ inp = sotoinpcb(so);
+ INP_WLOCK(inp); /* new connection */
+ tp = intotcpcb(inp);
+ KASSERT(tp->t_state == TCPS_SYN_RECEIVED,
+ ("%s: ", __func__));
+ /*
+ * Process the segment and the data it
+ * contains. tcp_do_segment() consumes
+ * the mbuf chain and unlocks the inpcb.
+ */
+ tcp_do_segment(m, th, so, tp, drop_hdrlen, tlen,
+ iptos, ti_locked);
+ INP_INFO_UNLOCK_ASSERT(&V_tcbinfo);
+ return;
+ }
+ /*
+ * Segment flag validation for new connection attempts:
+ *
+ * Our (SYN|ACK) response was rejected.
+ * Check with syncache and remove entry to prevent
+ * retransmits.
+ *
+ * NB: syncache_chkrst does its own logging of failure
+ * causes.
+ */
+ if (thflags & TH_RST) {
+ syncache_chkrst(&inc, th);
+ goto dropunlock;
+ }
+ /*
+ * We can't do anything without SYN.
+ */
+ if ((thflags & TH_SYN) == 0) {
+ if ((s = tcp_log_addrs(&inc, th, NULL, NULL)))
+ log(LOG_DEBUG, "%s; %s: Listen socket: "
+ "SYN is missing, segment ignored\n",
+ s, __func__);
+ TCPSTAT_INC(tcps_badsyn);
+ goto dropunlock;
+ }
+ /*
+ * (SYN|ACK) is bogus on a listen socket.
+ */
+ if (thflags & TH_ACK) {
+ if ((s = tcp_log_addrs(&inc, th, NULL, NULL)))
+ log(LOG_DEBUG, "%s; %s: Listen socket: "
+ "SYN|ACK invalid, segment rejected\n",
+ s, __func__);
+ syncache_badack(&inc); /* XXX: Not needed! */
+ TCPSTAT_INC(tcps_badsyn);
+ rstreason = BANDLIM_RST_OPENPORT;
+ goto dropwithreset;
+ }
+ /*
+ * If the drop_synfin option is enabled, drop all
+ * segments with both the SYN and FIN bits set.
+ * This prevents e.g. nmap from identifying the
+ * TCP/IP stack.
+ * XXX: Poor reasoning. nmap has other methods
+ * and is constantly refining its stack detection
+ * strategies.
+ * XXX: This is a violation of the TCP specification
+ * and was used by RFC1644.
+ */
+ if ((thflags & TH_FIN) && V_drop_synfin) {
+ if ((s = tcp_log_addrs(&inc, th, NULL, NULL)))
+ log(LOG_DEBUG, "%s; %s: Listen socket: "
+ "SYN|FIN segment ignored (based on "
+ "sysctl setting)\n", s, __func__);
+ TCPSTAT_INC(tcps_badsyn);
+ goto dropunlock;
+ }
+ /*
+ * Segment's flags are (SYN) or (SYN|FIN).
+ *
+ * TH_PUSH, TH_URG, TH_ECE, TH_CWR are ignored
+ * as they do not affect the state of the TCP FSM.
+ * The data pointed to by TH_URG and th_urp is ignored.
+ */
+ KASSERT((thflags & (TH_RST|TH_ACK)) == 0,
+ ("%s: Listen socket: TH_RST or TH_ACK set", __func__));
+ KASSERT(thflags & (TH_SYN),
+ ("%s: Listen socket: TH_SYN not set", __func__));
+#ifdef INET6
+ /*
+ * If deprecated address is forbidden,
+ * we do not accept SYN to deprecated interface
+ * address to prevent any new inbound connection from
+ * getting established.
+ * When we do not accept SYN, we send a TCP RST,
+ * with deprecated source address (instead of dropping
+ * it). We compromise it as it is much better for peer
+ * to send a RST, and RST will be the final packet
+ * for the exchange.
+ *
+ * If we do not forbid deprecated addresses, we accept
+ * the SYN packet. RFC2462 does not suggest dropping
+ * SYN in this case.
+ * If we decipher RFC2462 5.5.4, it says like this:
+ * 1. use of deprecated addr with existing
+ * communication is okay - "SHOULD continue to be
+ * used"
+ * 2. use of it with new communication:
+ * (2a) "SHOULD NOT be used if alternate address
+ * with sufficient scope is available"
+ * (2b) nothing mentioned otherwise.
+ * Here we fall into (2b) case as we have no choice in
+ * our source address selection - we must obey the peer.
+ *
+ * The wording in RFC2462 is confusing, and there are
+ * multiple description text for deprecated address
+ * handling - worse, they are not exactly the same.
+ * I believe 5.5.4 is the best one, so we follow 5.5.4.
+ */
+ if (isipv6 && !V_ip6_use_deprecated) {
+ struct in6_ifaddr *ia6;
+
+ ia6 = ip6_getdstifaddr(m);
+ if (ia6 != NULL &&
+ (ia6->ia6_flags & IN6_IFF_DEPRECATED)) {
+ ifa_free(&ia6->ia_ifa);
+ if ((s = tcp_log_addrs(&inc, th, NULL, NULL)))
+ log(LOG_DEBUG, "%s; %s: Listen socket: "
+ "Connection attempt to deprecated "
+ "IPv6 address rejected\n",
+ s, __func__);
+ rstreason = BANDLIM_RST_OPENPORT;
+ goto dropwithreset;
+ }
+ ifa_free(&ia6->ia_ifa);
+ }
+#endif
+ /*
+ * Basic sanity checks on incoming SYN requests:
+ * Don't respond if the destination is a link layer
+ * broadcast according to RFC1122 4.2.3.10, p. 104.
+ * If it is from this socket it must be forged.
+ * Don't respond if the source or destination is a
+ * global or subnet broad- or multicast address.
+ * Note that it is quite possible to receive unicast
+ * link-layer packets with a broadcast IP address. Use
+ * in_broadcast() to find them.
+ */
+ if (m->m_flags & (M_BCAST|M_MCAST)) {
+ if ((s = tcp_log_addrs(&inc, th, NULL, NULL)))
+ log(LOG_DEBUG, "%s; %s: Listen socket: "
+ "Connection attempt from broad- or multicast "
+ "link layer address ignored\n", s, __func__);
+ goto dropunlock;
+ }
+ if (isipv6) {
+#ifdef INET6
+ if (th->th_dport == th->th_sport &&
+ IN6_ARE_ADDR_EQUAL(&ip6->ip6_dst, &ip6->ip6_src)) {
+ if ((s = tcp_log_addrs(&inc, th, NULL, NULL)))
+ log(LOG_DEBUG, "%s; %s: Listen socket: "
+ "Connection attempt to/from self "
+ "ignored\n", s, __func__);
+ goto dropunlock;
+ }
+ if (IN6_IS_ADDR_MULTICAST(&ip6->ip6_dst) ||
+ IN6_IS_ADDR_MULTICAST(&ip6->ip6_src)) {
+ if ((s = tcp_log_addrs(&inc, th, NULL, NULL)))
+ log(LOG_DEBUG, "%s; %s: Listen socket: "
+ "Connection attempt from/to multicast "
+ "address ignored\n", s, __func__);
+ goto dropunlock;
+ }
+#endif
+ } else {
+ if (th->th_dport == th->th_sport &&
+ ip->ip_dst.s_addr == ip->ip_src.s_addr) {
+ if ((s = tcp_log_addrs(&inc, th, NULL, NULL)))
+ log(LOG_DEBUG, "%s; %s: Listen socket: "
+ "Connection attempt from/to self "
+ "ignored\n", s, __func__);
+ goto dropunlock;
+ }
+ if (IN_MULTICAST(ntohl(ip->ip_dst.s_addr)) ||
+ IN_MULTICAST(ntohl(ip->ip_src.s_addr)) ||
+ ip->ip_src.s_addr == htonl(INADDR_BROADCAST) ||
+ in_broadcast(ip->ip_dst, m->m_pkthdr.rcvif)) {
+ if ((s = tcp_log_addrs(&inc, th, NULL, NULL)))
+ log(LOG_DEBUG, "%s; %s: Listen socket: "
+ "Connection attempt from/to broad- "
+ "or multicast address ignored\n",
+ s, __func__);
+ goto dropunlock;
+ }
+ }
+ /*
+ * SYN appears to be valid. Create compressed TCP state
+ * for syncache.
+ */
+#ifdef TCPDEBUG
+ if (so->so_options & SO_DEBUG)
+ tcp_trace(TA_INPUT, ostate, tp,
+ (void *)tcp_saveipgen, &tcp_savetcp, 0);
+#endif
+ tcp_dooptions(&to, optp, optlen, TO_SYN);
+ syncache_add(&inc, &to, th, inp, &so, m);
+ /*
+ * Entry added to syncache and mbuf consumed.
+ * Everything already unlocked by syncache_add().
+ */
+ INP_INFO_UNLOCK_ASSERT(&V_tcbinfo);
+ return;
+ }
+
+ /*
+ * Segment belongs to a connection in SYN_SENT, ESTABLISHED or later
+ * state. tcp_do_segment() always consumes the mbuf chain, unlocks
+ * the inpcb, and unlocks pcbinfo.
+ */
+ tcp_do_segment(m, th, so, tp, drop_hdrlen, tlen, iptos, ti_locked);
+ INP_INFO_UNLOCK_ASSERT(&V_tcbinfo);
+ return;
+
+dropwithreset:
+ if (ti_locked == TI_RLOCKED)
+ INP_INFO_RUNLOCK(&V_tcbinfo);
+ else if (ti_locked == TI_WLOCKED)
+ INP_INFO_WUNLOCK(&V_tcbinfo);
+ else
+ panic("%s: dropwithreset ti_locked %d", __func__, ti_locked);
+ ti_locked = TI_UNLOCKED;
+
+ if (inp != NULL) {
+ tcp_dropwithreset(m, th, tp, tlen, rstreason);
+ INP_WUNLOCK(inp);
+ } else
+ tcp_dropwithreset(m, th, NULL, tlen, rstreason);
+ m = NULL; /* mbuf chain got consumed. */
+ goto drop;
+
+dropunlock:
+ if (ti_locked == TI_RLOCKED)
+ INP_INFO_RUNLOCK(&V_tcbinfo);
+ else if (ti_locked == TI_WLOCKED)
+ INP_INFO_WUNLOCK(&V_tcbinfo);
+ else
+ panic("%s: dropunlock ti_locked %d", __func__, ti_locked);
+ ti_locked = TI_UNLOCKED;
+
+ if (inp != NULL)
+ INP_WUNLOCK(inp);
+
+drop:
+ INP_INFO_UNLOCK_ASSERT(&V_tcbinfo);
+ if (s != NULL)
+ free(s, M_TCPLOG);
+ if (m != NULL)
+ m_freem(m);
+}
+
+static void
+tcp_do_segment(struct mbuf *m, struct tcphdr *th, struct socket *so,
+ struct tcpcb *tp, int drop_hdrlen, int tlen, uint8_t iptos,
+ int ti_locked)
+{
+ int thflags, acked, ourfinisacked, needoutput = 0;
+ int rstreason, todrop, win;
+ u_long tiwin;
+ struct tcpopt to;
+
+#ifdef TCPDEBUG
+ /*
+ * The size of tcp_saveipgen must be the size of the max ip header,
+ * now IPv6.
+ */
+ u_char tcp_saveipgen[IP6_HDR_LEN];
+ struct tcphdr tcp_savetcp;
+ short ostate = 0;
+#endif
+ thflags = th->th_flags;
+
+ /*
+ * If this is either a state-changing packet or current state isn't
+ * established, we require a write lock on tcbinfo. Otherwise, we
+ * allow either a read lock or a write lock, as we may have acquired
+ * a write lock due to a race.
+ *
+ * Require a global write lock for SYN/FIN/RST segments or
+ * non-established connections; otherwise accept either a read or
+ * write lock, as we may have conservatively acquired a write lock in
+ * certain cases in tcp_input() (is this still true?). Currently we
+ * will never enter with no lock, so we try to drop it quickly in the
+ * common pure ack/pure data cases.
+ */
+ if ((thflags & (TH_SYN | TH_FIN | TH_RST)) != 0 ||
+ tp->t_state != TCPS_ESTABLISHED) {
+ KASSERT(ti_locked == TI_WLOCKED, ("%s ti_locked %d for "
+ "SYN/FIN/RST/!EST", __func__, ti_locked));
+ INP_INFO_WLOCK_ASSERT(&V_tcbinfo);
+ } else {
+#ifdef INVARIANTS
+ if (ti_locked == TI_RLOCKED)
+ INP_INFO_RLOCK_ASSERT(&V_tcbinfo);
+ else if (ti_locked == TI_WLOCKED)
+ INP_INFO_WLOCK_ASSERT(&V_tcbinfo);
+ else
+ panic("%s: ti_locked %d for EST", __func__,
+ ti_locked);
+#endif
+ }
+ INP_WLOCK_ASSERT(tp->t_inpcb);
+ KASSERT(tp->t_state > TCPS_LISTEN, ("%s: TCPS_LISTEN",
+ __func__));
+ KASSERT(tp->t_state != TCPS_TIME_WAIT, ("%s: TCPS_TIME_WAIT",
+ __func__));
+
+ /*
+ * Segment received on connection.
+ * Reset idle time and keep-alive timer.
+ * XXX: This should be done after segment
+ * validation to ignore broken/spoofed segs.
+ */
+ tp->t_rcvtime = ticks;
+ if (TCPS_HAVEESTABLISHED(tp->t_state))
+ tcp_timer_activate(tp, TT_KEEP, tcp_keepidle);
+
+ /*
+ * Unscale the window into a 32-bit value.
+ * For the SYN_SENT state the scale is zero.
+ */
+ tiwin = th->th_win << tp->snd_scale;
+
+ /*
+ * TCP ECN processing.
+ */
+ if (tp->t_flags & TF_ECN_PERMIT) {
+ if (thflags & TH_CWR)
+ tp->t_flags &= ~TF_ECN_SND_ECE;
+ switch (iptos & IPTOS_ECN_MASK) {
+ case IPTOS_ECN_CE:
+ tp->t_flags |= TF_ECN_SND_ECE;
+ TCPSTAT_INC(tcps_ecn_ce);
+ break;
+ case IPTOS_ECN_ECT0:
+ TCPSTAT_INC(tcps_ecn_ect0);
+ break;
+ case IPTOS_ECN_ECT1:
+ TCPSTAT_INC(tcps_ecn_ect1);
+ break;
+ }
+ /*
+ * Congestion experienced.
+ * Ignore if we are already trying to recover.
+ */
+ if ((thflags & TH_ECE) &&
+ SEQ_LEQ(th->th_ack, tp->snd_recover)) {
+ TCPSTAT_INC(tcps_ecn_rcwnd);
+ tcp_congestion_exp(tp);
+ }
+ }
+
+ /*
+ * Parse options on any incoming segment.
+ */
+ tcp_dooptions(&to, (u_char *)(th + 1),
+ (th->th_off << 2) - sizeof(struct tcphdr),
+ (thflags & TH_SYN) ? TO_SYN : 0);
+
+ /*
+ * If echoed timestamp is later than the current time,
+ * fall back to non RFC1323 RTT calculation. Normalize
+ * timestamp if syncookies were used when this connection
+ * was established.
+ */
+ if ((to.to_flags & TOF_TS) && (to.to_tsecr != 0)) {
+ to.to_tsecr -= tp->ts_offset;
+ if (TSTMP_GT(to.to_tsecr, ticks))
+ to.to_tsecr = 0;
+ }
+
+ /*
+ * Process options only when we get SYN/ACK back. The SYN case
+ * for incoming connections is handled in tcp_syncache.
+ * According to RFC1323 the window field in a SYN (i.e., a <SYN>
+ * or <SYN,ACK>) segment itself is never scaled.
+ * XXX this is traditional behavior, may need to be cleaned up.
+ */
+ if (tp->t_state == TCPS_SYN_SENT && (thflags & TH_SYN)) {
+ if ((to.to_flags & TOF_SCALE) &&
+ (tp->t_flags & TF_REQ_SCALE)) {
+ tp->t_flags |= TF_RCVD_SCALE;
+ tp->snd_scale = to.to_wscale;
+ }
+ /*
+ * Initial send window. It will be updated with
+ * the next incoming segment to the scaled value.
+ */
+ tp->snd_wnd = th->th_win;
+ if (to.to_flags & TOF_TS) {
+ tp->t_flags |= TF_RCVD_TSTMP;
+ tp->ts_recent = to.to_tsval;
+ tp->ts_recent_age = ticks;
+ }
+ if (to.to_flags & TOF_MSS)
+ tcp_mss(tp, to.to_mss);
+ if ((tp->t_flags & TF_SACK_PERMIT) &&
+ (to.to_flags & TOF_SACKPERM) == 0)
+ tp->t_flags &= ~TF_SACK_PERMIT;
+ }
+
+ /*
+ * Header prediction: check for the two common cases
+ * of a uni-directional data xfer. If the packet has
+ * no control flags, is in-sequence, the window didn't
+ * change and we're not retransmitting, it's a
+ * candidate. If the length is zero and the ack moved
+ * forward, we're the sender side of the xfer. Just
+ * free the data acked & wake any higher level process
+ * that was blocked waiting for space. If the length
+ * is non-zero and the ack didn't move, we're the
+ * receiver side. If we're getting packets in-order
+ * (the reassembly queue is empty), add the data to
+ * the socket buffer and note that we need a delayed ack.
+ * Make sure that the hidden state-flags are also off.
+ * Since we check for TCPS_ESTABLISHED first, it can only
+ * be TH_NEEDSYN.
+ */
+ if (tp->t_state == TCPS_ESTABLISHED &&
+ th->th_seq == tp->rcv_nxt &&
+ (thflags & (TH_SYN|TH_FIN|TH_RST|TH_URG|TH_ACK)) == TH_ACK &&
+ tp->snd_nxt == tp->snd_max &&
+ tiwin && tiwin == tp->snd_wnd &&
+ ((tp->t_flags & (TF_NEEDSYN|TF_NEEDFIN)) == 0) &&
+ LIST_EMPTY(&tp->t_segq) &&
+ ((to.to_flags & TOF_TS) == 0 ||
+ TSTMP_GEQ(to.to_tsval, tp->ts_recent)) ) {
+
+ /*
+ * If last ACK falls within this segment's sequence numbers,
+ * record the timestamp.
+ * NOTE that the test is modified according to the latest
+ * proposal of the tcplw@cray.com list (Braden 1993/04/26).
+ */
+ if ((to.to_flags & TOF_TS) != 0 &&
+ SEQ_LEQ(th->th_seq, tp->last_ack_sent)) {
+ tp->ts_recent_age = ticks;
+ tp->ts_recent = to.to_tsval;
+ }
+
+ if (tlen == 0) {
+ if (SEQ_GT(th->th_ack, tp->snd_una) &&
+ SEQ_LEQ(th->th_ack, tp->snd_max) &&
+ tp->snd_cwnd >= tp->snd_wnd &&
+ ((!V_tcp_do_newreno &&
+ !(tp->t_flags & TF_SACK_PERMIT) &&
+ tp->t_dupacks < tcprexmtthresh) ||
+ ((V_tcp_do_newreno ||
+ (tp->t_flags & TF_SACK_PERMIT)) &&
+ !IN_FASTRECOVERY(tp) &&
+ (to.to_flags & TOF_SACK) == 0 &&
+ TAILQ_EMPTY(&tp->snd_holes)))) {
+ /*
+ * This is a pure ack for outstanding data.
+ */
+ if (ti_locked == TI_RLOCKED)
+ INP_INFO_RUNLOCK(&V_tcbinfo);
+ else if (ti_locked == TI_WLOCKED)
+ INP_INFO_WUNLOCK(&V_tcbinfo);
+ else
+ panic("%s: ti_locked %d on pure ACK",
+ __func__, ti_locked);
+ ti_locked = TI_UNLOCKED;
+
+ TCPSTAT_INC(tcps_predack);
+
+ /*
+ * "bad retransmit" recovery.
+ */
+ if (tp->t_rxtshift == 1 &&
+ (int)(ticks - tp->t_badrxtwin) < 0) {
+ TCPSTAT_INC(tcps_sndrexmitbad);
+ tp->snd_cwnd = tp->snd_cwnd_prev;
+ tp->snd_ssthresh =
+ tp->snd_ssthresh_prev;
+ tp->snd_recover = tp->snd_recover_prev;
+ if (tp->t_flags & TF_WASFRECOVERY)
+ ENTER_FASTRECOVERY(tp);
+ tp->snd_nxt = tp->snd_max;
+ tp->t_badrxtwin = 0;
+ }
+
+ /*
+ * Recalculate the transmit timer / rtt.
+ *
+ * Some boxes send broken timestamp replies
+ * during the SYN+ACK phase, ignore
+ * timestamps of 0 or we could calculate a
+ * huge RTT and blow up the retransmit timer.
+ */
+ if ((to.to_flags & TOF_TS) != 0 &&
+ to.to_tsecr) {
+ if (!tp->t_rttlow ||
+ tp->t_rttlow > ticks - to.to_tsecr)
+ tp->t_rttlow = ticks - to.to_tsecr;
+ tcp_xmit_timer(tp,
+ ticks - to.to_tsecr + 1);
+ } else if (tp->t_rtttime &&
+ SEQ_GT(th->th_ack, tp->t_rtseq)) {
+ if (!tp->t_rttlow ||
+ tp->t_rttlow > ticks - tp->t_rtttime)
+ tp->t_rttlow = ticks - tp->t_rtttime;
+ tcp_xmit_timer(tp,
+ ticks - tp->t_rtttime);
+ }
+ tcp_xmit_bandwidth_limit(tp, th->th_ack);
+ acked = th->th_ack - tp->snd_una;
+ TCPSTAT_INC(tcps_rcvackpack);
+ TCPSTAT_ADD(tcps_rcvackbyte, acked);
+ sbdrop(&so->so_snd, acked);
+ if (SEQ_GT(tp->snd_una, tp->snd_recover) &&
+ SEQ_LEQ(th->th_ack, tp->snd_recover))
+ tp->snd_recover = th->th_ack - 1;
+ tp->snd_una = th->th_ack;
+ /*
+ * Pull snd_wl2 up to prevent seq wrap relative
+ * to th_ack.
+ */
+ tp->snd_wl2 = th->th_ack;
+ tp->t_dupacks = 0;
+ m_freem(m);
+ ND6_HINT(tp); /* Some progress has been made. */
+
+ /*
+ * If all outstanding data are acked, stop
+ * retransmit timer, otherwise restart timer
+ * using current (possibly backed-off) value.
+ * If process is waiting for space,
+ * wakeup/selwakeup/signal. If data
+ * are ready to send, let tcp_output
+ * decide between more output or persist.
+ */
+#ifdef TCPDEBUG
+ if (so->so_options & SO_DEBUG)
+ tcp_trace(TA_INPUT, ostate, tp,
+ (void *)tcp_saveipgen,
+ &tcp_savetcp, 0);
+#endif
+ if (tp->snd_una == tp->snd_max)
+ tcp_timer_activate(tp, TT_REXMT, 0);
+ else if (!tcp_timer_active(tp, TT_PERSIST))
+ tcp_timer_activate(tp, TT_REXMT,
+ tp->t_rxtcur);
+ sowwakeup(so);
+ if (so->so_snd.sb_cc)
+ (void) tcp_output(tp);
+ goto check_delack;
+ }
+ } else if (th->th_ack == tp->snd_una &&
+ tlen <= sbspace(&so->so_rcv)) {
+ int newsize = 0; /* automatic sockbuf scaling */
+
+ /*
+ * This is a pure, in-sequence data packet with
+ * nothing on the reassembly queue and we have enough
+ * buffer space to take it.
+ */
+ if (ti_locked == TI_RLOCKED)
+ INP_INFO_RUNLOCK(&V_tcbinfo);
+ else if (ti_locked == TI_WLOCKED)
+ INP_INFO_WUNLOCK(&V_tcbinfo);
+ else
+ panic("%s: ti_locked %d on pure data "
+ "segment", __func__, ti_locked);
+ ti_locked = TI_UNLOCKED;
+
+ /* Clean receiver SACK report if present */
+ if ((tp->t_flags & TF_SACK_PERMIT) && tp->rcv_numsacks)
+ tcp_clean_sackreport(tp);
+ TCPSTAT_INC(tcps_preddat);
+ tp->rcv_nxt += tlen;
+ /*
+ * Pull snd_wl1 up to prevent seq wrap relative to
+ * th_seq.
+ */
+ tp->snd_wl1 = th->th_seq;
+ /*
+ * Pull rcv_up up to prevent seq wrap relative to
+ * rcv_nxt.
+ */
+ tp->rcv_up = tp->rcv_nxt;
+ TCPSTAT_INC(tcps_rcvpack);
+ TCPSTAT_ADD(tcps_rcvbyte, tlen);
+ ND6_HINT(tp); /* Some progress has been made */
+#ifdef TCPDEBUG
+ if (so->so_options & SO_DEBUG)
+ tcp_trace(TA_INPUT, ostate, tp,
+ (void *)tcp_saveipgen, &tcp_savetcp, 0);
+#endif
+ /*
+ * Automatic sizing of receive socket buffer. Often the send
+ * buffer size is not optimally adjusted to the actual network
+ * conditions at hand (delay bandwidth product). Setting the
+ * buffer size too small limits throughput on links with high
+ * bandwidth and high delay (eg. trans-continental/oceanic links).
+ *
+ * On the receive side the socket buffer memory is only rarely
+ * used to any significant extent. This allows us to be much
+ * more aggressive in scaling the receive socket buffer. For
+ * the case that the buffer space is actually used to a large
+ * extent and we run out of kernel memory we can simply drop
+ * the new segments; TCP on the sender will just retransmit it
+ * later. Setting the buffer size too big may only consume too
+ * much kernel memory if the application doesn't read() from
+ * the socket or packet loss or reordering makes use of the
+ * reassembly queue.
+ *
+ * The criteria to step up the receive buffer one notch are:
+ * 1. the number of bytes received during the time it takes
+ * one timestamp to be reflected back to us (the RTT);
+ * 2. received bytes per RTT is within seven eighth of the
+ * current socket buffer size;
+ * 3. receive buffer size has not hit maximal automatic size;
+ *
+ * This algorithm does one step per RTT at most and only if
+ * we receive a bulk stream w/o packet losses or reorderings.
+ * Shrinking the buffer during idle times is not necessary as
+ * it doesn't consume any memory when idle.
+ *
+ * TODO: Only step up if the application is actually serving
+ * the buffer to better manage the socket buffer resources.
+ */
+ if (V_tcp_do_autorcvbuf &&
+ to.to_tsecr &&
+ (so->so_rcv.sb_flags & SB_AUTOSIZE)) {
+ if (TSTMP_GT(to.to_tsecr, tp->rfbuf_ts) &&
+ to.to_tsecr - tp->rfbuf_ts < hz) {
+ if (tp->rfbuf_cnt >
+ (so->so_rcv.sb_hiwat / 8 * 7) &&
+ so->so_rcv.sb_hiwat <
+ V_tcp_autorcvbuf_max) {
+ newsize =
+ min(so->so_rcv.sb_hiwat +
+ V_tcp_autorcvbuf_inc,
+ V_tcp_autorcvbuf_max);
+ }
+ /* Start over with next RTT. */
+ tp->rfbuf_ts = 0;
+ tp->rfbuf_cnt = 0;
+ } else
+ tp->rfbuf_cnt += tlen; /* add up */
+ }
+
+ /* Add data to socket buffer. */
+ SOCKBUF_LOCK(&so->so_rcv);
+ if (so->so_rcv.sb_state & SBS_CANTRCVMORE) {
+ m_freem(m);
+ } else {
+ /*
+ * Set new socket buffer size.
+ * Give up when limit is reached.
+ */
+ if (newsize)
+ if (!sbreserve_locked(&so->so_rcv,
+ newsize, so, NULL))
+ so->so_rcv.sb_flags &= ~SB_AUTOSIZE;
+ m_adj(m, drop_hdrlen); /* delayed header drop */
+ sbappendstream_locked(&so->so_rcv, m);
+ }
+ /* NB: sorwakeup_locked() does an implicit unlock. */
+ sorwakeup_locked(so);
+ if (DELAY_ACK(tp)) {
+ tp->t_flags |= TF_DELACK;
+ } else {
+ tp->t_flags |= TF_ACKNOW;
+ tcp_output(tp);
+ }
+ goto check_delack;
+ }
+ }
+
+ /*
+ * Calculate amount of space in receive window,
+ * and then do TCP input processing.
+ * Receive window is amount of space in rcv queue,
+ * but not less than advertised window.
+ */
+ win = sbspace(&so->so_rcv);
+ if (win < 0)
+ win = 0;
+ tp->rcv_wnd = imax(win, (int)(tp->rcv_adv - tp->rcv_nxt));
+
+ /* Reset receive buffer auto scaling when not in bulk receive mode. */
+ tp->rfbuf_ts = 0;
+ tp->rfbuf_cnt = 0;
+
+ switch (tp->t_state) {
+
+ /*
+ * If the state is SYN_RECEIVED:
+ * if seg contains an ACK, but not for our SYN/ACK, send a RST.
+ */
+ case TCPS_SYN_RECEIVED:
+ if ((thflags & TH_ACK) &&
+ (SEQ_LEQ(th->th_ack, tp->snd_una) ||
+ SEQ_GT(th->th_ack, tp->snd_max))) {
+ rstreason = BANDLIM_RST_OPENPORT;
+ goto dropwithreset;
+ }
+ break;
+
+ /*
+ * If the state is SYN_SENT:
+ * if seg contains an ACK, but not for our SYN, drop the input.
+ * if seg contains a RST, then drop the connection.
+ * if seg does not contain SYN, then drop it.
+ * Otherwise this is an acceptable SYN segment
+ * initialize tp->rcv_nxt and tp->irs
+ * if seg contains ack then advance tp->snd_una
+ * if seg contains an ECE and ECN support is enabled, the stream
+ * is ECN capable.
+ * if SYN has been acked change to ESTABLISHED else SYN_RCVD state
+ * arrange for segment to be acked (eventually)
+ * continue processing rest of data/controls, beginning with URG
+ */
+ case TCPS_SYN_SENT:
+ if ((thflags & TH_ACK) &&
+ (SEQ_LEQ(th->th_ack, tp->iss) ||
+ SEQ_GT(th->th_ack, tp->snd_max))) {
+ rstreason = BANDLIM_UNLIMITED;
+ goto dropwithreset;
+ }
+ if ((thflags & (TH_ACK|TH_RST)) == (TH_ACK|TH_RST))
+ tp = tcp_drop(tp, ECONNREFUSED);
+ if (thflags & TH_RST)
+ goto drop;
+ if (!(thflags & TH_SYN))
+ goto drop;
+
+ tp->irs = th->th_seq;
+ tcp_rcvseqinit(tp);
+ if (thflags & TH_ACK) {
+ TCPSTAT_INC(tcps_connects);
+ soisconnected(so);
+#ifdef MAC
+ mac_socketpeer_set_from_mbuf(m, so);
+#endif
+ /* Do window scaling on this connection? */
+ if ((tp->t_flags & (TF_RCVD_SCALE|TF_REQ_SCALE)) ==
+ (TF_RCVD_SCALE|TF_REQ_SCALE)) {
+ tp->rcv_scale = tp->request_r_scale;
+ }
+ tp->rcv_adv += tp->rcv_wnd;
+ tp->snd_una++; /* SYN is acked */
+ /*
+ * If there's data, delay ACK; if there's also a FIN
+ * ACKNOW will be turned on later.
+ */
+ if (DELAY_ACK(tp) && tlen != 0)
+ tcp_timer_activate(tp, TT_DELACK,
+ tcp_delacktime);
+ else
+ tp->t_flags |= TF_ACKNOW;
+
+ if ((thflags & TH_ECE) && V_tcp_do_ecn) {
+ tp->t_flags |= TF_ECN_PERMIT;
+ TCPSTAT_INC(tcps_ecn_shs);
+ }
+
+ /*
+ * Received <SYN,ACK> in SYN_SENT[*] state.
+ * Transitions:
+ * SYN_SENT --> ESTABLISHED
+ * SYN_SENT* --> FIN_WAIT_1
+ */
+ tp->t_starttime = ticks;
+ if (tp->t_flags & TF_NEEDFIN) {
+ tp->t_state = TCPS_FIN_WAIT_1;
+ tp->t_flags &= ~TF_NEEDFIN;
+ thflags &= ~TH_SYN;
+ } else {
+ tp->t_state = TCPS_ESTABLISHED;
+ tcp_timer_activate(tp, TT_KEEP, tcp_keepidle);
+ }
+ } else {
+ /*
+ * Received initial SYN in SYN-SENT[*] state =>
+ * simultaneous open. If segment contains CC option
+ * and there is a cached CC, apply TAO test.
+ * If it succeeds, connection is * half-synchronized.
+ * Otherwise, do 3-way handshake:
+ * SYN-SENT -> SYN-RECEIVED
+ * SYN-SENT* -> SYN-RECEIVED*
+ * If there was no CC option, clear cached CC value.
+ */
+ tp->t_flags |= (TF_ACKNOW | TF_NEEDSYN);
+ tcp_timer_activate(tp, TT_REXMT, 0);
+ tp->t_state = TCPS_SYN_RECEIVED;
+ }
+
+ KASSERT(ti_locked == TI_WLOCKED, ("%s: trimthenstep6: "
+ "ti_locked %d", __func__, ti_locked));
+ INP_INFO_WLOCK_ASSERT(&V_tcbinfo);
+ INP_WLOCK_ASSERT(tp->t_inpcb);
+
+ /*
+ * Advance th->th_seq to correspond to first data byte.
+ * If data, trim to stay within window,
+ * dropping FIN if necessary.
+ */
+ th->th_seq++;
+ if (tlen > tp->rcv_wnd) {
+ todrop = tlen - tp->rcv_wnd;
+ m_adj(m, -todrop);
+ tlen = tp->rcv_wnd;
+ thflags &= ~TH_FIN;
+ TCPSTAT_INC(tcps_rcvpackafterwin);
+ TCPSTAT_ADD(tcps_rcvbyteafterwin, todrop);
+ }
+ tp->snd_wl1 = th->th_seq - 1;
+ tp->rcv_up = th->th_seq;
+ /*
+ * Client side of transaction: already sent SYN and data.
+ * If the remote host used T/TCP to validate the SYN,
+ * our data will be ACK'd; if so, enter normal data segment
+ * processing in the middle of step 5, ack processing.
+ * Otherwise, goto step 6.
+ */
+ if (thflags & TH_ACK)
+ goto process_ACK;
+
+ goto step6;
+
+ /*
+ * If the state is LAST_ACK or CLOSING or TIME_WAIT:
+ * do normal processing.
+ *
+ * NB: Leftover from RFC1644 T/TCP. Cases to be reused later.
+ */
+ case TCPS_LAST_ACK:
+ case TCPS_CLOSING:
+ break; /* continue normal processing */
+ }
+
+ /*
+ * States other than LISTEN or SYN_SENT.
+ * First check the RST flag and sequence number since reset segments
+ * are exempt from the timestamp and connection count tests. This
+ * fixes a bug introduced by the Stevens, vol. 2, p. 960 bugfix
+ * below which allowed reset segments in half the sequence space
+ * to fall though and be processed (which gives forged reset
+ * segments with a random sequence number a 50 percent chance of
+ * killing a connection).
+ * Then check timestamp, if present.
+ * Then check the connection count, if present.
+ * Then check that at least some bytes of segment are within
+ * receive window. If segment begins before rcv_nxt,
+ * drop leading data (and SYN); if nothing left, just ack.
+ *
+ *
+ * If the RST bit is set, check the sequence number to see
+ * if this is a valid reset segment.
+ * RFC 793 page 37:
+ * In all states except SYN-SENT, all reset (RST) segments
+ * are validated by checking their SEQ-fields. A reset is
+ * valid if its sequence number is in the window.
+ * Note: this does not take into account delayed ACKs, so
+ * we should test against last_ack_sent instead of rcv_nxt.
+ * The sequence number in the reset segment is normally an
+ * echo of our outgoing acknowlegement numbers, but some hosts
+ * send a reset with the sequence number at the rightmost edge
+ * of our receive window, and we have to handle this case.
+ * Note 2: Paul Watson's paper "Slipping in the Window" has shown
+ * that brute force RST attacks are possible. To combat this,
+ * we use a much stricter check while in the ESTABLISHED state,
+ * only accepting RSTs where the sequence number is equal to
+ * last_ack_sent. In all other states (the states in which a
+ * RST is more likely), the more permissive check is used.
+ * If we have multiple segments in flight, the initial reset
+ * segment sequence numbers will be to the left of last_ack_sent,
+ * but they will eventually catch up.
+ * In any case, it never made sense to trim reset segments to
+ * fit the receive window since RFC 1122 says:
+ * 4.2.2.12 RST Segment: RFC-793 Section 3.4
+ *
+ * A TCP SHOULD allow a received RST segment to include data.
+ *
+ * DISCUSSION
+ * It has been suggested that a RST segment could contain
+ * ASCII text that encoded and explained the cause of the
+ * RST. No standard has yet been established for such
+ * data.
+ *
+ * If the reset segment passes the sequence number test examine
+ * the state:
+ * SYN_RECEIVED STATE:
+ * If passive open, return to LISTEN state.
+ * If active open, inform user that connection was refused.
+ * ESTABLISHED, FIN_WAIT_1, FIN_WAIT_2, CLOSE_WAIT STATES:
+ * Inform user that connection was reset, and close tcb.
+ * CLOSING, LAST_ACK STATES:
+ * Close the tcb.
+ * TIME_WAIT STATE:
+ * Drop the segment - see Stevens, vol. 2, p. 964 and
+ * RFC 1337.
+ */
+ if (thflags & TH_RST) {
+ if (SEQ_GEQ(th->th_seq, tp->last_ack_sent - 1) &&
+ SEQ_LEQ(th->th_seq, tp->last_ack_sent + tp->rcv_wnd)) {
+ switch (tp->t_state) {
+
+ case TCPS_SYN_RECEIVED:
+ so->so_error = ECONNREFUSED;
+ goto close;
+
+ case TCPS_ESTABLISHED:
+ if (V_tcp_insecure_rst == 0 &&
+ !(SEQ_GEQ(th->th_seq, tp->rcv_nxt - 1) &&
+ SEQ_LEQ(th->th_seq, tp->rcv_nxt + 1)) &&
+ !(SEQ_GEQ(th->th_seq, tp->last_ack_sent - 1) &&
+ SEQ_LEQ(th->th_seq, tp->last_ack_sent + 1))) {
+ TCPSTAT_INC(tcps_badrst);
+ goto drop;
+ }
+ /* FALLTHROUGH */
+ case TCPS_FIN_WAIT_1:
+ case TCPS_FIN_WAIT_2:
+ case TCPS_CLOSE_WAIT:
+ so->so_error = ECONNRESET;
+ close:
+ KASSERT(ti_locked == TI_WLOCKED,
+ ("tcp_do_segment: TH_RST 1 ti_locked %d",
+ ti_locked));
+ INP_INFO_WLOCK_ASSERT(&V_tcbinfo);
+
+ tp->t_state = TCPS_CLOSED;
+ TCPSTAT_INC(tcps_drops);
+ tp = tcp_close(tp);
+ break;
+
+ case TCPS_CLOSING:
+ case TCPS_LAST_ACK:
+ KASSERT(ti_locked == TI_WLOCKED,
+ ("tcp_do_segment: TH_RST 2 ti_locked %d",
+ ti_locked));
+ INP_INFO_WLOCK_ASSERT(&V_tcbinfo);
+
+ tp = tcp_close(tp);
+ break;
+ }
+ }
+ goto drop;
+ }
+
+ /*
+ * RFC 1323 PAWS: If we have a timestamp reply on this segment
+ * and it's less than ts_recent, drop it.
+ */
+ if ((to.to_flags & TOF_TS) != 0 && tp->ts_recent &&
+ TSTMP_LT(to.to_tsval, tp->ts_recent)) {
+
+ /* Check to see if ts_recent is over 24 days old. */
+ if (ticks - tp->ts_recent_age > TCP_PAWS_IDLE) {
+ /*
+ * Invalidate ts_recent. If this segment updates
+ * ts_recent, the age will be reset later and ts_recent
+ * will get a valid value. If it does not, setting
+ * ts_recent to zero will at least satisfy the
+ * requirement that zero be placed in the timestamp
+ * echo reply when ts_recent isn't valid. The
+ * age isn't reset until we get a valid ts_recent
+ * because we don't want out-of-order segments to be
+ * dropped when ts_recent is old.
+ */
+ tp->ts_recent = 0;
+ } else {
+ TCPSTAT_INC(tcps_rcvduppack);
+ TCPSTAT_ADD(tcps_rcvdupbyte, tlen);
+ TCPSTAT_INC(tcps_pawsdrop);
+ if (tlen)
+ goto dropafterack;
+ goto drop;
+ }
+ }
+
+ /*
+ * In the SYN-RECEIVED state, validate that the packet belongs to
+ * this connection before trimming the data to fit the receive
+ * window. Check the sequence number versus IRS since we know
+ * the sequence numbers haven't wrapped. This is a partial fix
+ * for the "LAND" DoS attack.
+ */
+ if (tp->t_state == TCPS_SYN_RECEIVED && SEQ_LT(th->th_seq, tp->irs)) {
+ rstreason = BANDLIM_RST_OPENPORT;
+ goto dropwithreset;
+ }
+
+ todrop = tp->rcv_nxt - th->th_seq;
+ if (todrop > 0) {
+ /*
+ * If this is a duplicate SYN for our current connection,
+ * advance over it and pretend and it's not a SYN.
+ */
+ if (thflags & TH_SYN && th->th_seq == tp->irs) {
+ thflags &= ~TH_SYN;
+ th->th_seq++;
+ if (th->th_urp > 1)
+ th->th_urp--;
+ else
+ thflags &= ~TH_URG;
+ todrop--;
+ }
+ /*
+ * Following if statement from Stevens, vol. 2, p. 960.
+ */
+ if (todrop > tlen
+ || (todrop == tlen && (thflags & TH_FIN) == 0)) {
+ /*
+ * Any valid FIN must be to the left of the window.
+ * At this point the FIN must be a duplicate or out
+ * of sequence; drop it.
+ */
+ thflags &= ~TH_FIN;
+
+ /*
+ * Send an ACK to resynchronize and drop any data.
+ * But keep on processing for RST or ACK.
+ */
+ tp->t_flags |= TF_ACKNOW;
+ todrop = tlen;
+ TCPSTAT_INC(tcps_rcvduppack);
+ TCPSTAT_ADD(tcps_rcvdupbyte, todrop);
+ } else {
+ TCPSTAT_INC(tcps_rcvpartduppack);
+ TCPSTAT_ADD(tcps_rcvpartdupbyte, todrop);
+ }
+ drop_hdrlen += todrop; /* drop from the top afterwards */
+ th->th_seq += todrop;
+ tlen -= todrop;
+ if (th->th_urp > todrop)
+ th->th_urp -= todrop;
+ else {
+ thflags &= ~TH_URG;
+ th->th_urp = 0;
+ }
+ }
+
+ /*
+ * If new data are received on a connection after the
+ * user processes are gone, then RST the other end.
+ */
+ if ((so->so_state & SS_NOFDREF) &&
+ tp->t_state > TCPS_CLOSE_WAIT && tlen) {
+ char *s;
+
+ KASSERT(ti_locked == TI_WLOCKED, ("%s: SS_NOFDEREF && "
+ "CLOSE_WAIT && tlen ti_locked %d", __func__, ti_locked));
+ INP_INFO_WLOCK_ASSERT(&V_tcbinfo);
+
+ if ((s = tcp_log_addrs(&tp->t_inpcb->inp_inc, th, NULL, NULL))) {
+ log(LOG_DEBUG, "%s; %s: %s: Received %d bytes of data after socket "
+ "was closed, sending RST and removing tcpcb\n",
+ s, __func__, tcpstates[tp->t_state], tlen);
+ free(s, M_TCPLOG);
+ }
+ tp = tcp_close(tp);
+ TCPSTAT_INC(tcps_rcvafterclose);
+ rstreason = BANDLIM_UNLIMITED;
+ goto dropwithreset;
+ }
+
+ /*
+ * If segment ends after window, drop trailing data
+ * (and PUSH and FIN); if nothing left, just ACK.
+ */
+ todrop = (th->th_seq + tlen) - (tp->rcv_nxt + tp->rcv_wnd);
+ if (todrop > 0) {
+ TCPSTAT_INC(tcps_rcvpackafterwin);
+ if (todrop >= tlen) {
+ TCPSTAT_ADD(tcps_rcvbyteafterwin, tlen);
+ /*
+ * If window is closed can only take segments at
+ * window edge, and have to drop data and PUSH from
+ * incoming segments. Continue processing, but
+ * remember to ack. Otherwise, drop segment
+ * and ack.
+ */
+ if (tp->rcv_wnd == 0 && th->th_seq == tp->rcv_nxt) {
+ tp->t_flags |= TF_ACKNOW;
+ TCPSTAT_INC(tcps_rcvwinprobe);
+ } else
+ goto dropafterack;
+ } else
+ TCPSTAT_ADD(tcps_rcvbyteafterwin, todrop);
+ m_adj(m, -todrop);
+ tlen -= todrop;
+ thflags &= ~(TH_PUSH|TH_FIN);
+ }
+
+ /*
+ * If last ACK falls within this segment's sequence numbers,
+ * record its timestamp.
+ * NOTE:
+ * 1) That the test incorporates suggestions from the latest
+ * proposal of the tcplw@cray.com list (Braden 1993/04/26).
+ * 2) That updating only on newer timestamps interferes with
+ * our earlier PAWS tests, so this check should be solely
+ * predicated on the sequence space of this segment.
+ * 3) That we modify the segment boundary check to be
+ * Last.ACK.Sent <= SEG.SEQ + SEG.Len
+ * instead of RFC1323's
+ * Last.ACK.Sent < SEG.SEQ + SEG.Len,
+ * This modified check allows us to overcome RFC1323's
+ * limitations as described in Stevens TCP/IP Illustrated
+ * Vol. 2 p.869. In such cases, we can still calculate the
+ * RTT correctly when RCV.NXT == Last.ACK.Sent.
+ */
+ if ((to.to_flags & TOF_TS) != 0 &&
+ SEQ_LEQ(th->th_seq, tp->last_ack_sent) &&
+ SEQ_LEQ(tp->last_ack_sent, th->th_seq + tlen +
+ ((thflags & (TH_SYN|TH_FIN)) != 0))) {
+ tp->ts_recent_age = ticks;
+ tp->ts_recent = to.to_tsval;
+ }
+
+ /*
+ * If a SYN is in the window, then this is an
+ * error and we send an RST and drop the connection.
+ */
+ if (thflags & TH_SYN) {
+ KASSERT(ti_locked == TI_WLOCKED,
+ ("tcp_do_segment: TH_SYN ti_locked %d", ti_locked));
+ INP_INFO_WLOCK_ASSERT(&V_tcbinfo);
+
+ tp = tcp_drop(tp, ECONNRESET);
+ rstreason = BANDLIM_UNLIMITED;
+ goto drop;
+ }
+
+ /*
+ * If the ACK bit is off: if in SYN-RECEIVED state or SENDSYN
+ * flag is on (half-synchronized state), then queue data for
+ * later processing; else drop segment and return.
+ */
+ if ((thflags & TH_ACK) == 0) {
+ if (tp->t_state == TCPS_SYN_RECEIVED ||
+ (tp->t_flags & TF_NEEDSYN))
+ goto step6;
+ else if (tp->t_flags & TF_ACKNOW)
+ goto dropafterack;
+ else
+ goto drop;
+ }
+
+ /*
+ * Ack processing.
+ */
+ switch (tp->t_state) {
+
+ /*
+ * In SYN_RECEIVED state, the ack ACKs our SYN, so enter
+ * ESTABLISHED state and continue processing.
+ * The ACK was checked above.
+ */
+ case TCPS_SYN_RECEIVED:
+
+ TCPSTAT_INC(tcps_connects);
+ soisconnected(so);
+ /* Do window scaling? */
+ if ((tp->t_flags & (TF_RCVD_SCALE|TF_REQ_SCALE)) ==
+ (TF_RCVD_SCALE|TF_REQ_SCALE)) {
+ tp->rcv_scale = tp->request_r_scale;
+ tp->snd_wnd = tiwin;
+ }
+ /*
+ * Make transitions:
+ * SYN-RECEIVED -> ESTABLISHED
+ * SYN-RECEIVED* -> FIN-WAIT-1
+ */
+ tp->t_starttime = ticks;
+ if (tp->t_flags & TF_NEEDFIN) {
+ tp->t_state = TCPS_FIN_WAIT_1;
+ tp->t_flags &= ~TF_NEEDFIN;
+ } else {
+ tp->t_state = TCPS_ESTABLISHED;
+ tcp_timer_activate(tp, TT_KEEP, tcp_keepidle);
+ }
+ /*
+ * If segment contains data or ACK, will call tcp_reass()
+ * later; if not, do so now to pass queued data to user.
+ */
+ if (tlen == 0 && (thflags & TH_FIN) == 0)
+ (void) tcp_reass(tp, (struct tcphdr *)0, 0,
+ (struct mbuf *)0);
+ tp->snd_wl1 = th->th_seq - 1;
+ /* FALLTHROUGH */
+
+ /*
+ * In ESTABLISHED state: drop duplicate ACKs; ACK out of range
+ * ACKs. If the ack is in the range
+ * tp->snd_una < th->th_ack <= tp->snd_max
+ * then advance tp->snd_una to th->th_ack and drop
+ * data from the retransmission queue. If this ACK reflects
+ * more up to date window information we update our window information.
+ */
+ case TCPS_ESTABLISHED:
+ case TCPS_FIN_WAIT_1:
+ case TCPS_FIN_WAIT_2:
+ case TCPS_CLOSE_WAIT:
+ case TCPS_CLOSING:
+ case TCPS_LAST_ACK:
+ if (SEQ_GT(th->th_ack, tp->snd_max)) {
+ TCPSTAT_INC(tcps_rcvacktoomuch);
+ goto dropafterack;
+ }
+ if ((tp->t_flags & TF_SACK_PERMIT) &&
+ ((to.to_flags & TOF_SACK) ||
+ !TAILQ_EMPTY(&tp->snd_holes)))
+ tcp_sack_doack(tp, &to, th->th_ack);
+ if (SEQ_LEQ(th->th_ack, tp->snd_una)) {
+ if (tlen == 0 && tiwin == tp->snd_wnd) {
+ TCPSTAT_INC(tcps_rcvdupack);
+ /*
+ * If we have outstanding data (other than
+ * a window probe), this is a completely
+ * duplicate ack (ie, window info didn't
+ * change), the ack is the biggest we've
+ * seen and we've seen exactly our rexmt
+ * threshhold of them, assume a packet
+ * has been dropped and retransmit it.
+ * Kludge snd_nxt & the congestion
+ * window so we send only this one
+ * packet.
+ *
+ * We know we're losing at the current
+ * window size so do congestion avoidance
+ * (set ssthresh to half the current window
+ * and pull our congestion window back to
+ * the new ssthresh).
+ *
+ * Dup acks mean that packets have left the
+ * network (they're now cached at the receiver)
+ * so bump cwnd by the amount in the receiver
+ * to keep a constant cwnd packets in the
+ * network.
+ *
+ * When using TCP ECN, notify the peer that
+ * we reduced the cwnd.
+ */
+ if (!tcp_timer_active(tp, TT_REXMT) ||
+ th->th_ack != tp->snd_una)
+ tp->t_dupacks = 0;
+ else if (++tp->t_dupacks > tcprexmtthresh ||
+ ((V_tcp_do_newreno ||
+ (tp->t_flags & TF_SACK_PERMIT)) &&
+ IN_FASTRECOVERY(tp))) {
+ if ((tp->t_flags & TF_SACK_PERMIT) &&
+ IN_FASTRECOVERY(tp)) {
+ int awnd;
+
+ /*
+ * Compute the amount of data in flight first.
+ * We can inject new data into the pipe iff
+ * we have less than 1/2 the original window's
+ * worth of data in flight.
+ */
+ awnd = (tp->snd_nxt - tp->snd_fack) +
+ tp->sackhint.sack_bytes_rexmit;
+ if (awnd < tp->snd_ssthresh) {
+ tp->snd_cwnd += tp->t_maxseg;
+ if (tp->snd_cwnd > tp->snd_ssthresh)
+ tp->snd_cwnd = tp->snd_ssthresh;
+ }
+ } else
+ tp->snd_cwnd += tp->t_maxseg;
+ (void) tcp_output(tp);
+ goto drop;
+ } else if (tp->t_dupacks == tcprexmtthresh) {
+ tcp_seq onxt = tp->snd_nxt;
+
+ /*
+ * If we're doing sack, check to
+ * see if we're already in sack
+ * recovery. If we're not doing sack,
+ * check to see if we're in newreno
+ * recovery.
+ */
+ if (tp->t_flags & TF_SACK_PERMIT) {
+ if (IN_FASTRECOVERY(tp)) {
+ tp->t_dupacks = 0;
+ break;
+ }
+ } else if (V_tcp_do_newreno ||
+ V_tcp_do_ecn) {
+ if (SEQ_LEQ(th->th_ack,
+ tp->snd_recover)) {
+ tp->t_dupacks = 0;
+ break;
+ }
+ }
+ tcp_congestion_exp(tp);
+ tcp_timer_activate(tp, TT_REXMT, 0);
+ tp->t_rtttime = 0;
+ if (tp->t_flags & TF_SACK_PERMIT) {
+ TCPSTAT_INC(
+ tcps_sack_recovery_episode);
+ tp->sack_newdata = tp->snd_nxt;
+ tp->snd_cwnd = tp->t_maxseg;
+ (void) tcp_output(tp);
+ goto drop;
+ }
+ tp->snd_nxt = th->th_ack;
+ tp->snd_cwnd = tp->t_maxseg;
+ (void) tcp_output(tp);
+ KASSERT(tp->snd_limited <= 2,
+ ("%s: tp->snd_limited too big",
+ __func__));
+ tp->snd_cwnd = tp->snd_ssthresh +
+ tp->t_maxseg *
+ (tp->t_dupacks - tp->snd_limited);
+ if (SEQ_GT(onxt, tp->snd_nxt))
+ tp->snd_nxt = onxt;
+ goto drop;
+ } else if (V_tcp_do_rfc3042) {
+ u_long oldcwnd = tp->snd_cwnd;
+ tcp_seq oldsndmax = tp->snd_max;
+ u_int sent;
+
+ KASSERT(tp->t_dupacks == 1 ||
+ tp->t_dupacks == 2,
+ ("%s: dupacks not 1 or 2",
+ __func__));
+ if (tp->t_dupacks == 1)
+ tp->snd_limited = 0;
+ tp->snd_cwnd =
+ (tp->snd_nxt - tp->snd_una) +
+ (tp->t_dupacks - tp->snd_limited) *
+ tp->t_maxseg;
+ (void) tcp_output(tp);
+ sent = tp->snd_max - oldsndmax;
+ if (sent > tp->t_maxseg) {
+ KASSERT((tp->t_dupacks == 2 &&
+ tp->snd_limited == 0) ||
+ (sent == tp->t_maxseg + 1 &&
+ tp->t_flags & TF_SENTFIN),
+ ("%s: sent too much",
+ __func__));
+ tp->snd_limited = 2;
+ } else if (sent > 0)
+ ++tp->snd_limited;
+ tp->snd_cwnd = oldcwnd;
+ goto drop;
+ }
+ } else
+ tp->t_dupacks = 0;
+ break;
+ }
+
+ KASSERT(SEQ_GT(th->th_ack, tp->snd_una),
+ ("%s: th_ack <= snd_una", __func__));
+
+ /*
+ * If the congestion window was inflated to account
+ * for the other side's cached packets, retract it.
+ */
+ if (V_tcp_do_newreno || (tp->t_flags & TF_SACK_PERMIT)) {
+ if (IN_FASTRECOVERY(tp)) {
+ if (SEQ_LT(th->th_ack, tp->snd_recover)) {
+ if (tp->t_flags & TF_SACK_PERMIT)
+ tcp_sack_partialack(tp, th);
+ else
+ tcp_newreno_partial_ack(tp, th);
+ } else {
+ /*
+ * Out of fast recovery.
+ * Window inflation should have left us
+ * with approximately snd_ssthresh
+ * outstanding data.
+ * But in case we would be inclined to
+ * send a burst, better to do it via
+ * the slow start mechanism.
+ */
+ if (SEQ_GT(th->th_ack +
+ tp->snd_ssthresh,
+ tp->snd_max))
+ tp->snd_cwnd = tp->snd_max -
+ th->th_ack +
+ tp->t_maxseg;
+ else
+ tp->snd_cwnd = tp->snd_ssthresh;
+ }
+ }
+ } else {
+ if (tp->t_dupacks >= tcprexmtthresh &&
+ tp->snd_cwnd > tp->snd_ssthresh)
+ tp->snd_cwnd = tp->snd_ssthresh;
+ }
+ tp->t_dupacks = 0;
+ /*
+ * If we reach this point, ACK is not a duplicate,
+ * i.e., it ACKs something we sent.
+ */
+ if (tp->t_flags & TF_NEEDSYN) {
+ /*
+ * T/TCP: Connection was half-synchronized, and our
+ * SYN has been ACK'd (so connection is now fully
+ * synchronized). Go to non-starred state,
+ * increment snd_una for ACK of SYN, and check if
+ * we can do window scaling.
+ */
+ tp->t_flags &= ~TF_NEEDSYN;
+ tp->snd_una++;
+ /* Do window scaling? */
+ if ((tp->t_flags & (TF_RCVD_SCALE|TF_REQ_SCALE)) ==
+ (TF_RCVD_SCALE|TF_REQ_SCALE)) {
+ tp->rcv_scale = tp->request_r_scale;
+ /* Send window already scaled. */
+ }
+ }
+
+process_ACK:
+ INP_INFO_LOCK_ASSERT(&V_tcbinfo);
+ KASSERT(ti_locked == TI_RLOCKED || ti_locked == TI_WLOCKED,
+ ("tcp_input: process_ACK ti_locked %d", ti_locked));
+ INP_WLOCK_ASSERT(tp->t_inpcb);
+
+ acked = th->th_ack - tp->snd_una;
+ TCPSTAT_INC(tcps_rcvackpack);
+ TCPSTAT_ADD(tcps_rcvackbyte, acked);
+
+ /*
+ * If we just performed our first retransmit, and the ACK
+ * arrives within our recovery window, then it was a mistake
+ * to do the retransmit in the first place. Recover our
+ * original cwnd and ssthresh, and proceed to transmit where
+ * we left off.
+ */
+ if (tp->t_rxtshift == 1 && (int)(ticks - tp->t_badrxtwin) < 0) {
+ TCPSTAT_INC(tcps_sndrexmitbad);
+ tp->snd_cwnd = tp->snd_cwnd_prev;
+ tp->snd_ssthresh = tp->snd_ssthresh_prev;
+ tp->snd_recover = tp->snd_recover_prev;
+ if (tp->t_flags & TF_WASFRECOVERY)
+ ENTER_FASTRECOVERY(tp);
+ tp->snd_nxt = tp->snd_max;
+ tp->t_badrxtwin = 0; /* XXX probably not required */
+ }
+
+ /*
+ * If we have a timestamp reply, update smoothed
+ * round trip time. If no timestamp is present but
+ * transmit timer is running and timed sequence
+ * number was acked, update smoothed round trip time.
+ * Since we now have an rtt measurement, cancel the
+ * timer backoff (cf., Phil Karn's retransmit alg.).
+ * Recompute the initial retransmit timer.
+ *
+ * Some boxes send broken timestamp replies
+ * during the SYN+ACK phase, ignore
+ * timestamps of 0 or we could calculate a
+ * huge RTT and blow up the retransmit timer.
+ */
+ if ((to.to_flags & TOF_TS) != 0 &&
+ to.to_tsecr) {
+ if (!tp->t_rttlow || tp->t_rttlow > ticks - to.to_tsecr)
+ tp->t_rttlow = ticks - to.to_tsecr;
+ tcp_xmit_timer(tp, ticks - to.to_tsecr + 1);
+ } else if (tp->t_rtttime && SEQ_GT(th->th_ack, tp->t_rtseq)) {
+ if (!tp->t_rttlow || tp->t_rttlow > ticks - tp->t_rtttime)
+ tp->t_rttlow = ticks - tp->t_rtttime;
+ tcp_xmit_timer(tp, ticks - tp->t_rtttime);
+ }
+ tcp_xmit_bandwidth_limit(tp, th->th_ack);
+
+ /*
+ * If all outstanding data is acked, stop retransmit
+ * timer and remember to restart (more output or persist).
+ * If there is more data to be acked, restart retransmit
+ * timer, using current (possibly backed-off) value.
+ */
+ if (th->th_ack == tp->snd_max) {
+ tcp_timer_activate(tp, TT_REXMT, 0);
+ needoutput = 1;
+ } else if (!tcp_timer_active(tp, TT_PERSIST))
+ tcp_timer_activate(tp, TT_REXMT, tp->t_rxtcur);
+
+ /*
+ * If no data (only SYN) was ACK'd,
+ * skip rest of ACK processing.
+ */
+ if (acked == 0)
+ goto step6;
+
+ /*
+ * When new data is acked, open the congestion window.
+ * Method depends on which congestion control state we're
+ * in (slow start or cong avoid) and if ABC (RFC 3465) is
+ * enabled.
+ *
+ * slow start: cwnd <= ssthresh
+ * cong avoid: cwnd > ssthresh
+ *
+ * slow start and ABC (RFC 3465):
+ * Grow cwnd exponentially by the amount of data
+ * ACKed capping the max increment per ACK to
+ * (abc_l_var * maxseg) bytes.
+ *
+ * slow start without ABC (RFC 2581):
+ * Grow cwnd exponentially by maxseg per ACK.
+ *
+ * cong avoid and ABC (RFC 3465):
+ * Grow cwnd linearly by maxseg per RTT for each
+ * cwnd worth of ACKed data.
+ *
+ * cong avoid without ABC (RFC 2581):
+ * Grow cwnd linearly by approximately maxseg per RTT using
+ * maxseg^2 / cwnd per ACK as the increment.
+ * If cwnd > maxseg^2, fix the cwnd increment at 1 byte to
+ * avoid capping cwnd.
+ */
+ if ((!V_tcp_do_newreno && !(tp->t_flags & TF_SACK_PERMIT)) ||
+ !IN_FASTRECOVERY(tp)) {
+ u_int cw = tp->snd_cwnd;
+ u_int incr = tp->t_maxseg;
+ /* In congestion avoidance? */
+ if (cw > tp->snd_ssthresh) {
+ if (V_tcp_do_rfc3465) {
+ tp->t_bytes_acked += acked;
+ if (tp->t_bytes_acked >= tp->snd_cwnd)
+ tp->t_bytes_acked -= cw;
+ else
+ incr = 0;
+ }
+ else
+ incr = max((incr * incr / cw), 1);
+ /*
+ * In slow-start with ABC enabled and no RTO in sight?
+ * (Must not use abc_l_var > 1 if slow starting after an
+ * RTO. On RTO, snd_nxt = snd_una, so the snd_nxt ==
+ * snd_max check is sufficient to handle this).
+ */
+ } else if (V_tcp_do_rfc3465 &&
+ tp->snd_nxt == tp->snd_max)
+ incr = min(acked,
+ V_tcp_abc_l_var * tp->t_maxseg);
+ /* ABC is on by default, so (incr == 0) frequently. */
+ if (incr > 0)
+ tp->snd_cwnd = min(cw+incr, TCP_MAXWIN<<tp->snd_scale);
+ }
+ SOCKBUF_LOCK(&so->so_snd);
+ if (acked > so->so_snd.sb_cc) {
+ tp->snd_wnd -= so->so_snd.sb_cc;
+ sbdrop_locked(&so->so_snd, (int)so->so_snd.sb_cc);
+ ourfinisacked = 1;
+ } else {
+ sbdrop_locked(&so->so_snd, acked);
+ tp->snd_wnd -= acked;
+ ourfinisacked = 0;
+ }
+ /* NB: sowwakeup_locked() does an implicit unlock. */
+ sowwakeup_locked(so);
+ /* Detect una wraparound. */
+ if ((V_tcp_do_newreno || (tp->t_flags & TF_SACK_PERMIT)) &&
+ !IN_FASTRECOVERY(tp) &&
+ SEQ_GT(tp->snd_una, tp->snd_recover) &&
+ SEQ_LEQ(th->th_ack, tp->snd_recover))
+ tp->snd_recover = th->th_ack - 1;
+ if ((V_tcp_do_newreno || (tp->t_flags & TF_SACK_PERMIT)) &&
+ IN_FASTRECOVERY(tp) &&
+ SEQ_GEQ(th->th_ack, tp->snd_recover)) {
+ EXIT_FASTRECOVERY(tp);
+ tp->t_bytes_acked = 0;
+ }
+ tp->snd_una = th->th_ack;
+ if (tp->t_flags & TF_SACK_PERMIT) {
+ if (SEQ_GT(tp->snd_una, tp->snd_recover))
+ tp->snd_recover = tp->snd_una;
+ }
+ if (SEQ_LT(tp->snd_nxt, tp->snd_una))
+ tp->snd_nxt = tp->snd_una;
+
+ switch (tp->t_state) {
+
+ /*
+ * In FIN_WAIT_1 STATE in addition to the processing
+ * for the ESTABLISHED state if our FIN is now acknowledged
+ * then enter FIN_WAIT_2.
+ */
+ case TCPS_FIN_WAIT_1:
+ if (ourfinisacked) {
+ /*
+ * If we can't receive any more
+ * data, then closing user can proceed.
+ * Starting the timer is contrary to the
+ * specification, but if we don't get a FIN
+ * we'll hang forever.
+ *
+ * XXXjl:
+ * we should release the tp also, and use a
+ * compressed state.
+ */
+ if (so->so_rcv.sb_state & SBS_CANTRCVMORE) {
+ int timeout;
+
+ soisdisconnected(so);
+ timeout = (tcp_fast_finwait2_recycle) ?
+ tcp_finwait2_timeout : tcp_maxidle;
+ tcp_timer_activate(tp, TT_2MSL, timeout);
+ }
+ tp->t_state = TCPS_FIN_WAIT_2;
+ }
+ break;
+
+ /*
+ * In CLOSING STATE in addition to the processing for
+ * the ESTABLISHED state if the ACK acknowledges our FIN
+ * then enter the TIME-WAIT state, otherwise ignore
+ * the segment.
+ */
+ case TCPS_CLOSING:
+ if (ourfinisacked) {
+ INP_INFO_WLOCK_ASSERT(&V_tcbinfo);
+ tcp_twstart(tp);
+ INP_INFO_WUNLOCK(&V_tcbinfo);
+ m_freem(m);
+ return;
+ }
+ break;
+
+ /*
+ * In LAST_ACK, we may still be waiting for data to drain
+ * and/or to be acked, as well as for the ack of our FIN.
+ * If our FIN is now acknowledged, delete the TCB,
+ * enter the closed state and return.
+ */
+ case TCPS_LAST_ACK:
+ if (ourfinisacked) {
+ INP_INFO_WLOCK_ASSERT(&V_tcbinfo);
+ tp = tcp_close(tp);
+ goto drop;
+ }
+ break;
+ }
+ }
+
+step6:
+ INP_INFO_LOCK_ASSERT(&V_tcbinfo);
+ KASSERT(ti_locked == TI_RLOCKED || ti_locked == TI_WLOCKED,
+ ("tcp_do_segment: step6 ti_locked %d", ti_locked));
+ INP_WLOCK_ASSERT(tp->t_inpcb);
+
+ /*
+ * Update window information.
+ * Don't look at window if no ACK: TAC's send garbage on first SYN.
+ */
+ if ((thflags & TH_ACK) &&
+ (SEQ_LT(tp->snd_wl1, th->th_seq) ||
+ (tp->snd_wl1 == th->th_seq && (SEQ_LT(tp->snd_wl2, th->th_ack) ||
+ (tp->snd_wl2 == th->th_ack && tiwin > tp->snd_wnd))))) {
+ /* keep track of pure window updates */
+ if (tlen == 0 &&
+ tp->snd_wl2 == th->th_ack && tiwin > tp->snd_wnd)
+ TCPSTAT_INC(tcps_rcvwinupd);
+ tp->snd_wnd = tiwin;
+ tp->snd_wl1 = th->th_seq;
+ tp->snd_wl2 = th->th_ack;
+ if (tp->snd_wnd > tp->max_sndwnd)
+ tp->max_sndwnd = tp->snd_wnd;
+ needoutput = 1;
+ }
+
+ /*
+ * Process segments with URG.
+ */
+ if ((thflags & TH_URG) && th->th_urp &&
+ TCPS_HAVERCVDFIN(tp->t_state) == 0) {
+ /*
+ * This is a kludge, but if we receive and accept
+ * random urgent pointers, we'll crash in
+ * soreceive. It's hard to imagine someone
+ * actually wanting to send this much urgent data.
+ */
+ SOCKBUF_LOCK(&so->so_rcv);
+ if (th->th_urp + so->so_rcv.sb_cc > sb_max) {
+ th->th_urp = 0; /* XXX */
+ thflags &= ~TH_URG; /* XXX */
+ SOCKBUF_UNLOCK(&so->so_rcv); /* XXX */
+ goto dodata; /* XXX */
+ }
+ /*
+ * If this segment advances the known urgent pointer,
+ * then mark the data stream. This should not happen
+ * in CLOSE_WAIT, CLOSING, LAST_ACK or TIME_WAIT STATES since
+ * a FIN has been received from the remote side.
+ * In these states we ignore the URG.
+ *
+ * According to RFC961 (Assigned Protocols),
+ * the urgent pointer points to the last octet
+ * of urgent data. We continue, however,
+ * to consider it to indicate the first octet
+ * of data past the urgent section as the original
+ * spec states (in one of two places).
+ */
+ if (SEQ_GT(th->th_seq+th->th_urp, tp->rcv_up)) {
+ tp->rcv_up = th->th_seq + th->th_urp;
+ so->so_oobmark = so->so_rcv.sb_cc +
+ (tp->rcv_up - tp->rcv_nxt) - 1;
+ if (so->so_oobmark == 0)
+ so->so_rcv.sb_state |= SBS_RCVATMARK;
+ sohasoutofband(so);
+ tp->t_oobflags &= ~(TCPOOB_HAVEDATA | TCPOOB_HADDATA);
+ }
+ SOCKBUF_UNLOCK(&so->so_rcv);
+ /*
+ * Remove out of band data so doesn't get presented to user.
+ * This can happen independent of advancing the URG pointer,
+ * but if two URG's are pending at once, some out-of-band
+ * data may creep in... ick.
+ */
+ if (th->th_urp <= (u_long)tlen &&
+ !(so->so_options & SO_OOBINLINE)) {
+ /* hdr drop is delayed */
+ tcp_pulloutofband(so, th, m, drop_hdrlen);
+ }
+ } else {
+ /*
+ * If no out of band data is expected,
+ * pull receive urgent pointer along
+ * with the receive window.
+ */
+ if (SEQ_GT(tp->rcv_nxt, tp->rcv_up))
+ tp->rcv_up = tp->rcv_nxt;
+ }
+dodata: /* XXX */
+ INP_INFO_LOCK_ASSERT(&V_tcbinfo);
+ KASSERT(ti_locked == TI_RLOCKED || ti_locked == TI_WLOCKED,
+ ("tcp_do_segment: dodata ti_locked %d", ti_locked));
+ INP_WLOCK_ASSERT(tp->t_inpcb);
+
+ /*
+ * Process the segment text, merging it into the TCP sequencing queue,
+ * and arranging for acknowledgment of receipt if necessary.
+ * This process logically involves adjusting tp->rcv_wnd as data
+ * is presented to the user (this happens in tcp_usrreq.c,
+ * case PRU_RCVD). If a FIN has already been received on this
+ * connection then we just ignore the text.
+ */
+ if ((tlen || (thflags & TH_FIN)) &&
+ TCPS_HAVERCVDFIN(tp->t_state) == 0) {
+ tcp_seq save_start = th->th_seq;
+ m_adj(m, drop_hdrlen); /* delayed header drop */
+ /*
+ * Insert segment which includes th into TCP reassembly queue
+ * with control block tp. Set thflags to whether reassembly now
+ * includes a segment with FIN. This handles the common case
+ * inline (segment is the next to be received on an established
+ * connection, and the queue is empty), avoiding linkage into
+ * and removal from the queue and repetition of various
+ * conversions.
+ * Set DELACK for segments received in order, but ack
+ * immediately when segments are out of order (so
+ * fast retransmit can work).
+ */
+ if (th->th_seq == tp->rcv_nxt &&
+ LIST_EMPTY(&tp->t_segq) &&
+ TCPS_HAVEESTABLISHED(tp->t_state)) {
+ if (DELAY_ACK(tp))
+ tp->t_flags |= TF_DELACK;
+ else
+ tp->t_flags |= TF_ACKNOW;
+ tp->rcv_nxt += tlen;
+ thflags = th->th_flags & TH_FIN;
+ TCPSTAT_INC(tcps_rcvpack);
+ TCPSTAT_ADD(tcps_rcvbyte, tlen);
+ ND6_HINT(tp);
+ SOCKBUF_LOCK(&so->so_rcv);
+ if (so->so_rcv.sb_state & SBS_CANTRCVMORE)
+ m_freem(m);
+ else
+ sbappendstream_locked(&so->so_rcv, m);
+ /* NB: sorwakeup_locked() does an implicit unlock. */
+ sorwakeup_locked(so);
+ } else {
+ /*
+ * XXX: Due to the header drop above "th" is
+ * theoretically invalid by now. Fortunately
+ * m_adj() doesn't actually frees any mbufs
+ * when trimming from the head.
+ */
+ thflags = tcp_reass(tp, th, &tlen, m);
+ tp->t_flags |= TF_ACKNOW;
+ }
+ if (tlen > 0 && (tp->t_flags & TF_SACK_PERMIT))
+ tcp_update_sack_list(tp, save_start, save_start + tlen);
+#if 0
+ /*
+ * Note the amount of data that peer has sent into
+ * our window, in order to estimate the sender's
+ * buffer size.
+ * XXX: Unused.
+ */
+ len = so->so_rcv.sb_hiwat - (tp->rcv_adv - tp->rcv_nxt);
+#endif
+ } else {
+ m_freem(m);
+ thflags &= ~TH_FIN;
+ }
+
+ /*
+ * If FIN is received ACK the FIN and let the user know
+ * that the connection is closing.
+ */
+ if (thflags & TH_FIN) {
+ if (TCPS_HAVERCVDFIN(tp->t_state) == 0) {
+ socantrcvmore(so);
+ /*
+ * If connection is half-synchronized
+ * (ie NEEDSYN flag on) then delay ACK,
+ * so it may be piggybacked when SYN is sent.
+ * Otherwise, since we received a FIN then no
+ * more input can be expected, send ACK now.
+ */
+ if (tp->t_flags & TF_NEEDSYN)
+ tp->t_flags |= TF_DELACK;
+ else
+ tp->t_flags |= TF_ACKNOW;
+ tp->rcv_nxt++;
+ }
+ switch (tp->t_state) {
+
+ /*
+ * In SYN_RECEIVED and ESTABLISHED STATES
+ * enter the CLOSE_WAIT state.
+ */
+ case TCPS_SYN_RECEIVED:
+ tp->t_starttime = ticks;
+ /* FALLTHROUGH */
+ case TCPS_ESTABLISHED:
+ tp->t_state = TCPS_CLOSE_WAIT;
+ break;
+
+ /*
+ * If still in FIN_WAIT_1 STATE FIN has not been acked so
+ * enter the CLOSING state.
+ */
+ case TCPS_FIN_WAIT_1:
+ tp->t_state = TCPS_CLOSING;
+ break;
+
+ /*
+ * In FIN_WAIT_2 state enter the TIME_WAIT state,
+ * starting the time-wait timer, turning off the other
+ * standard timers.
+ */
+ case TCPS_FIN_WAIT_2:
+ INP_INFO_WLOCK_ASSERT(&V_tcbinfo);
+ KASSERT(ti_locked == TI_WLOCKED, ("%s: dodata "
+ "TCP_FIN_WAIT_2 ti_locked: %d", __func__,
+ ti_locked));
+
+ tcp_twstart(tp);
+ INP_INFO_WUNLOCK(&V_tcbinfo);
+ return;
+ }
+ }
+ if (ti_locked == TI_RLOCKED)
+ INP_INFO_RUNLOCK(&V_tcbinfo);
+ else if (ti_locked == TI_WLOCKED)
+ INP_INFO_WUNLOCK(&V_tcbinfo);
+ else
+ panic("%s: dodata epilogue ti_locked %d", __func__,
+ ti_locked);
+ ti_locked = TI_UNLOCKED;
+
+#ifdef TCPDEBUG
+ if (so->so_options & SO_DEBUG)
+ tcp_trace(TA_INPUT, ostate, tp, (void *)tcp_saveipgen,
+ &tcp_savetcp, 0);
+#endif
+
+ /*
+ * Return any desired output.
+ */
+ if (needoutput || (tp->t_flags & TF_ACKNOW))
+ (void) tcp_output(tp);
+
+check_delack:
+ KASSERT(ti_locked == TI_UNLOCKED, ("%s: check_delack ti_locked %d",
+ __func__, ti_locked));
+ INP_INFO_UNLOCK_ASSERT(&V_tcbinfo);
+ INP_WLOCK_ASSERT(tp->t_inpcb);
+
+ if (tp->t_flags & TF_DELACK) {
+ tp->t_flags &= ~TF_DELACK;
+ tcp_timer_activate(tp, TT_DELACK, tcp_delacktime);
+ }
+ INP_WUNLOCK(tp->t_inpcb);
+ return;
+
+dropafterack:
+ KASSERT(ti_locked == TI_RLOCKED || ti_locked == TI_WLOCKED,
+ ("tcp_do_segment: dropafterack ti_locked %d", ti_locked));
+
+ /*
+ * Generate an ACK dropping incoming segment if it occupies
+ * sequence space, where the ACK reflects our state.
+ *
+ * We can now skip the test for the RST flag since all
+ * paths to this code happen after packets containing
+ * RST have been dropped.
+ *
+ * In the SYN-RECEIVED state, don't send an ACK unless the
+ * segment we received passes the SYN-RECEIVED ACK test.
+ * If it fails send a RST. This breaks the loop in the
+ * "LAND" DoS attack, and also prevents an ACK storm
+ * between two listening ports that have been sent forged
+ * SYN segments, each with the source address of the other.
+ */
+ if (tp->t_state == TCPS_SYN_RECEIVED && (thflags & TH_ACK) &&
+ (SEQ_GT(tp->snd_una, th->th_ack) ||
+ SEQ_GT(th->th_ack, tp->snd_max)) ) {
+ rstreason = BANDLIM_RST_OPENPORT;
+ goto dropwithreset;
+ }
+#ifdef TCPDEBUG
+ if (so->so_options & SO_DEBUG)
+ tcp_trace(TA_DROP, ostate, tp, (void *)tcp_saveipgen,
+ &tcp_savetcp, 0);
+#endif
+ if (ti_locked == TI_RLOCKED)
+ INP_INFO_RUNLOCK(&V_tcbinfo);
+ else if (ti_locked == TI_WLOCKED)
+ INP_INFO_WUNLOCK(&V_tcbinfo);
+ else
+ panic("%s: dropafterack epilogue ti_locked %d", __func__,
+ ti_locked);
+ ti_locked = TI_UNLOCKED;
+
+ tp->t_flags |= TF_ACKNOW;
+ (void) tcp_output(tp);
+ INP_WUNLOCK(tp->t_inpcb);
+ m_freem(m);
+ return;
+
+dropwithreset:
+ if (ti_locked == TI_RLOCKED)
+ INP_INFO_RUNLOCK(&V_tcbinfo);
+ else if (ti_locked == TI_WLOCKED)
+ INP_INFO_WUNLOCK(&V_tcbinfo);
+ else
+ panic("%s: dropwithreset ti_locked %d", __func__, ti_locked);
+ ti_locked = TI_UNLOCKED;
+
+ if (tp != NULL) {
+ tcp_dropwithreset(m, th, tp, tlen, rstreason);
+ INP_WUNLOCK(tp->t_inpcb);
+ } else
+ tcp_dropwithreset(m, th, NULL, tlen, rstreason);
+ return;
+
+drop:
+ if (ti_locked == TI_RLOCKED)
+ INP_INFO_RUNLOCK(&V_tcbinfo);
+ else if (ti_locked == TI_WLOCKED)
+ INP_INFO_WUNLOCK(&V_tcbinfo);
+#ifdef INVARIANTS
+ else
+ INP_INFO_UNLOCK_ASSERT(&V_tcbinfo);
+#endif
+ ti_locked = TI_UNLOCKED;
+
+ /*
+ * Drop space held by incoming segment and return.
+ */
+#ifdef TCPDEBUG
+ if (tp == NULL || (tp->t_inpcb->inp_socket->so_options & SO_DEBUG))
+ tcp_trace(TA_DROP, ostate, tp, (void *)tcp_saveipgen,
+ &tcp_savetcp, 0);
+#endif
+ if (tp != NULL)
+ INP_WUNLOCK(tp->t_inpcb);
+ m_freem(m);
+}
+
+/*
+ * Issue RST and make ACK acceptable to originator of segment.
+ * The mbuf must still include the original packet header.
+ * tp may be NULL.
+ */
+static void
+tcp_dropwithreset(struct mbuf *m, struct tcphdr *th, struct tcpcb *tp,
+ int tlen, int rstreason)
+{
+ struct ip *ip;
+#ifdef INET6
+ struct ip6_hdr *ip6;
+#endif
+
+ if (tp != NULL) {
+ INP_WLOCK_ASSERT(tp->t_inpcb);
+ }
+
+ /* Don't bother if destination was broadcast/multicast. */
+ if ((th->th_flags & TH_RST) || m->m_flags & (M_BCAST|M_MCAST))
+ goto drop;
+#ifdef INET6
+ if (mtod(m, struct ip *)->ip_v == 6) {
+ ip6 = mtod(m, struct ip6_hdr *);
+ if (IN6_IS_ADDR_MULTICAST(&ip6->ip6_dst) ||
+ IN6_IS_ADDR_MULTICAST(&ip6->ip6_src))
+ goto drop;
+ /* IPv6 anycast check is done at tcp6_input() */
+ } else
+#endif
+ {
+ ip = mtod(m, struct ip *);
+ if (IN_MULTICAST(ntohl(ip->ip_dst.s_addr)) ||
+ IN_MULTICAST(ntohl(ip->ip_src.s_addr)) ||
+ ip->ip_src.s_addr == htonl(INADDR_BROADCAST) ||
+ in_broadcast(ip->ip_dst, m->m_pkthdr.rcvif))
+ goto drop;
+ }
+
+ /* Perform bandwidth limiting. */
+ if (badport_bandlim(rstreason) < 0)
+ goto drop;
+
+ /* tcp_respond consumes the mbuf chain. */
+ if (th->th_flags & TH_ACK) {
+ tcp_respond(tp, mtod(m, void *), th, m, (tcp_seq)0,
+ th->th_ack, TH_RST);
+ } else {
+ if (th->th_flags & TH_SYN)
+ tlen++;
+ tcp_respond(tp, mtod(m, void *), th, m, th->th_seq+tlen,
+ (tcp_seq)0, TH_RST|TH_ACK);
+ }
+ return;
+drop:
+ m_freem(m);
+}
+
+/*
+ * Parse TCP options and place in tcpopt.
+ */
+static void
+tcp_dooptions(struct tcpopt *to, u_char *cp, int cnt, int flags)
+{
+ int opt, optlen;
+
+ to->to_flags = 0;
+ for (; cnt > 0; cnt -= optlen, cp += optlen) {
+ opt = cp[0];
+ if (opt == TCPOPT_EOL)
+ break;
+ if (opt == TCPOPT_NOP)
+ optlen = 1;
+ else {
+ if (cnt < 2)
+ break;
+ optlen = cp[1];
+ if (optlen < 2 || optlen > cnt)
+ break;
+ }
+ switch (opt) {
+ case TCPOPT_MAXSEG:
+ if (optlen != TCPOLEN_MAXSEG)
+ continue;
+ if (!(flags & TO_SYN))
+ continue;
+ to->to_flags |= TOF_MSS;
+ bcopy((char *)cp + 2,
+ (char *)&to->to_mss, sizeof(to->to_mss));
+ to->to_mss = ntohs(to->to_mss);
+ break;
+ case TCPOPT_WINDOW:
+ if (optlen != TCPOLEN_WINDOW)
+ continue;
+ if (!(flags & TO_SYN))
+ continue;
+ to->to_flags |= TOF_SCALE;
+ to->to_wscale = min(cp[2], TCP_MAX_WINSHIFT);
+ break;
+ case TCPOPT_TIMESTAMP:
+ if (optlen != TCPOLEN_TIMESTAMP)
+ continue;
+ to->to_flags |= TOF_TS;
+ bcopy((char *)cp + 2,
+ (char *)&to->to_tsval, sizeof(to->to_tsval));
+ to->to_tsval = ntohl(to->to_tsval);
+ bcopy((char *)cp + 6,
+ (char *)&to->to_tsecr, sizeof(to->to_tsecr));
+ to->to_tsecr = ntohl(to->to_tsecr);
+ break;
+#ifdef TCP_SIGNATURE
+ /*
+ * XXX In order to reply to a host which has set the
+ * TCP_SIGNATURE option in its initial SYN, we have to
+ * record the fact that the option was observed here
+ * for the syncache code to perform the correct response.
+ */
+ case TCPOPT_SIGNATURE:
+ if (optlen != TCPOLEN_SIGNATURE)
+ continue;
+ to->to_flags |= TOF_SIGNATURE;
+ to->to_signature = cp + 2;
+ break;
+#endif
+ case TCPOPT_SACK_PERMITTED:
+ if (optlen != TCPOLEN_SACK_PERMITTED)
+ continue;
+ if (!(flags & TO_SYN))
+ continue;
+ if (!V_tcp_do_sack)
+ continue;
+ to->to_flags |= TOF_SACKPERM;
+ break;
+ case TCPOPT_SACK:
+ if (optlen <= 2 || (optlen - 2) % TCPOLEN_SACK != 0)
+ continue;
+ if (flags & TO_SYN)
+ continue;
+ to->to_flags |= TOF_SACK;
+ to->to_nsacks = (optlen - 2) / TCPOLEN_SACK;
+ to->to_sacks = cp + 2;
+ TCPSTAT_INC(tcps_sack_rcv_blocks);
+ break;
+ default:
+ continue;
+ }
+ }
+}
+
+/*
+ * Pull out of band byte out of a segment so
+ * it doesn't appear in the user's data queue.
+ * It is still reflected in the segment length for
+ * sequencing purposes.
+ */
+static void
+tcp_pulloutofband(struct socket *so, struct tcphdr *th, struct mbuf *m,
+ int off)
+{
+ int cnt = off + th->th_urp - 1;
+
+ while (cnt >= 0) {
+ if (m->m_len > cnt) {
+ char *cp = mtod(m, caddr_t) + cnt;
+ struct tcpcb *tp = sototcpcb(so);
+
+ INP_WLOCK_ASSERT(tp->t_inpcb);
+
+ tp->t_iobc = *cp;
+ tp->t_oobflags |= TCPOOB_HAVEDATA;
+ bcopy(cp+1, cp, (unsigned)(m->m_len - cnt - 1));
+ m->m_len--;
+ if (m->m_flags & M_PKTHDR)
+ m->m_pkthdr.len--;
+ return;
+ }
+ cnt -= m->m_len;
+ m = m->m_next;
+ if (m == NULL)
+ break;
+ }
+ panic("tcp_pulloutofband");
+}
+
+/*
+ * Collect new round-trip time estimate
+ * and update averages and current timeout.
+ */
+static void
+tcp_xmit_timer(struct tcpcb *tp, int rtt)
+{
+ int delta;
+
+ INP_WLOCK_ASSERT(tp->t_inpcb);
+
+ TCPSTAT_INC(tcps_rttupdated);
+ tp->t_rttupdated++;
+ if (tp->t_srtt != 0) {
+ /*
+ * srtt is stored as fixed point with 5 bits after the
+ * binary point (i.e., scaled by 8). The following magic
+ * is equivalent to the smoothing algorithm in rfc793 with
+ * an alpha of .875 (srtt = rtt/8 + srtt*7/8 in fixed
+ * point). Adjust rtt to origin 0.
+ */
+ delta = ((rtt - 1) << TCP_DELTA_SHIFT)
+ - (tp->t_srtt >> (TCP_RTT_SHIFT - TCP_DELTA_SHIFT));
+
+ if ((tp->t_srtt += delta) <= 0)
+ tp->t_srtt = 1;
+
+ /*
+ * We accumulate a smoothed rtt variance (actually, a
+ * smoothed mean difference), then set the retransmit
+ * timer to smoothed rtt + 4 times the smoothed variance.
+ * rttvar is stored as fixed point with 4 bits after the
+ * binary point (scaled by 16). The following is
+ * equivalent to rfc793 smoothing with an alpha of .75
+ * (rttvar = rttvar*3/4 + |delta| / 4). This replaces
+ * rfc793's wired-in beta.
+ */
+ if (delta < 0)
+ delta = -delta;
+ delta -= tp->t_rttvar >> (TCP_RTTVAR_SHIFT - TCP_DELTA_SHIFT);
+ if ((tp->t_rttvar += delta) <= 0)
+ tp->t_rttvar = 1;
+ if (tp->t_rttbest > tp->t_srtt + tp->t_rttvar)
+ tp->t_rttbest = tp->t_srtt + tp->t_rttvar;
+ } else {
+ /*
+ * No rtt measurement yet - use the unsmoothed rtt.
+ * Set the variance to half the rtt (so our first
+ * retransmit happens at 3*rtt).
+ */
+ tp->t_srtt = rtt << TCP_RTT_SHIFT;
+ tp->t_rttvar = rtt << (TCP_RTTVAR_SHIFT - 1);
+ tp->t_rttbest = tp->t_srtt + tp->t_rttvar;
+ }
+ tp->t_rtttime = 0;
+ tp->t_rxtshift = 0;
+
+ /*
+ * the retransmit should happen at rtt + 4 * rttvar.
+ * Because of the way we do the smoothing, srtt and rttvar
+ * will each average +1/2 tick of bias. When we compute
+ * the retransmit timer, we want 1/2 tick of rounding and
+ * 1 extra tick because of +-1/2 tick uncertainty in the
+ * firing of the timer. The bias will give us exactly the
+ * 1.5 tick we need. But, because the bias is
+ * statistical, we have to test that we don't drop below
+ * the minimum feasible timer (which is 2 ticks).
+ */
+ TCPT_RANGESET(tp->t_rxtcur, TCP_REXMTVAL(tp),
+ max(tp->t_rttmin, rtt + 2), TCPTV_REXMTMAX);
+
+ /*
+ * We received an ack for a packet that wasn't retransmitted;
+ * it is probably safe to discard any error indications we've
+ * received recently. This isn't quite right, but close enough
+ * for now (a route might have failed after we sent a segment,
+ * and the return path might not be symmetrical).
+ */
+ tp->t_softerror = 0;
+}
+
+/*
+ * Determine a reasonable value for maxseg size.
+ * If the route is known, check route for mtu.
+ * If none, use an mss that can be handled on the outgoing
+ * interface without forcing IP to fragment; if bigger than
+ * an mbuf cluster (MCLBYTES), round down to nearest multiple of MCLBYTES
+ * to utilize large mbufs. If no route is found, route has no mtu,
+ * or the destination isn't local, use a default, hopefully conservative
+ * size (usually 512 or the default IP max size, but no more than the mtu
+ * of the interface), as we can't discover anything about intervening
+ * gateways or networks. We also initialize the congestion/slow start
+ * window to be a single segment if the destination isn't local.
+ * While looking at the routing entry, we also initialize other path-dependent
+ * parameters from pre-set or cached values in the routing entry.
+ *
+ * Also take into account the space needed for options that we
+ * send regularly. Make maxseg shorter by that amount to assure
+ * that we can send maxseg amount of data even when the options
+ * are present. Store the upper limit of the length of options plus
+ * data in maxopd.
+ *
+ * In case of T/TCP, we call this routine during implicit connection
+ * setup as well (offer = -1), to initialize maxseg from the cached
+ * MSS of our peer.
+ *
+ * NOTE that this routine is only called when we process an incoming
+ * segment. Outgoing SYN/ACK MSS settings are handled in tcp_mssopt().
+ */
+void
+tcp_mss_update(struct tcpcb *tp, int offer,
+ struct hc_metrics_lite *metricptr, int *mtuflags)
+{
+ int mss;
+ u_long maxmtu;
+ struct inpcb *inp = tp->t_inpcb;
+ struct hc_metrics_lite metrics;
+ int origoffer = offer;
+#ifdef INET6
+ int isipv6 = ((inp->inp_vflag & INP_IPV6) != 0) ? 1 : 0;
+ size_t min_protoh = isipv6 ?
+ sizeof (struct ip6_hdr) + sizeof (struct tcphdr) :
+ sizeof (struct tcpiphdr);
+#else
+ const size_t min_protoh = sizeof(struct tcpiphdr);
+#endif
+
+ INP_WLOCK_ASSERT(tp->t_inpcb);
+
+ /* Initialize. */
+#ifdef INET6
+ if (isipv6) {
+ maxmtu = tcp_maxmtu6(&inp->inp_inc, mtuflags);
+ tp->t_maxopd = tp->t_maxseg = V_tcp_v6mssdflt;
+ } else
+#endif
+ {
+ maxmtu = tcp_maxmtu(&inp->inp_inc, mtuflags);
+ tp->t_maxopd = tp->t_maxseg = V_tcp_mssdflt;
+ }
+
+ /*
+ * No route to sender, stay with default mss and return.
+ */
+ if (maxmtu == 0) {
+ /*
+ * In case we return early we need to initialize metrics
+ * to a defined state as tcp_hc_get() would do for us
+ * if there was no cache hit.
+ */
+ if (metricptr != NULL)
+ bzero(metricptr, sizeof(struct hc_metrics_lite));
+ return;
+ }
+
+ /* What have we got? */
+ switch (offer) {
+ case 0:
+ /*
+ * Offer == 0 means that there was no MSS on the SYN
+ * segment, in this case we use tcp_mssdflt as
+ * already assigned to t_maxopd above.
+ */
+ offer = tp->t_maxopd;
+ break;
+
+ case -1:
+ /*
+ * Offer == -1 means that we didn't receive SYN yet.
+ */
+ /* FALLTHROUGH */
+
+ default:
+ /*
+ * Prevent DoS attack with too small MSS. Round up
+ * to at least minmss.
+ */
+ offer = max(offer, V_tcp_minmss);
+ }
+
+ /*
+ * rmx information is now retrieved from tcp_hostcache.
+ */
+ tcp_hc_get(&inp->inp_inc, &metrics);
+ if (metricptr != NULL)
+ bcopy(&metrics, metricptr, sizeof(struct hc_metrics_lite));
+
+ /*
+ * If there's a discovered mtu int tcp hostcache, use it
+ * else, use the link mtu.
+ */
+ if (metrics.rmx_mtu)
+ mss = min(metrics.rmx_mtu, maxmtu) - min_protoh;
+ else {
+#ifdef INET6
+ if (isipv6) {
+ mss = maxmtu - min_protoh;
+ if (!V_path_mtu_discovery &&
+ !in6_localaddr(&inp->in6p_faddr))
+ mss = min(mss, V_tcp_v6mssdflt);
+ } else
+#endif
+ {
+ mss = maxmtu - min_protoh;
+ if (!V_path_mtu_discovery &&
+ !in_localaddr(inp->inp_faddr))
+ mss = min(mss, V_tcp_mssdflt);
+ }
+ /*
+ * XXX - The above conditional (mss = maxmtu - min_protoh)
+ * probably violates the TCP spec.
+ * The problem is that, since we don't know the
+ * other end's MSS, we are supposed to use a conservative
+ * default. But, if we do that, then MTU discovery will
+ * never actually take place, because the conservative
+ * default is much less than the MTUs typically seen
+ * on the Internet today. For the moment, we'll sweep
+ * this under the carpet.
+ *
+ * The conservative default might not actually be a problem
+ * if the only case this occurs is when sending an initial
+ * SYN with options and data to a host we've never talked
+ * to before. Then, they will reply with an MSS value which
+ * will get recorded and the new parameters should get
+ * recomputed. For Further Study.
+ */
+ }
+ mss = min(mss, offer);
+
+ /*
+ * Sanity check: make sure that maxopd will be large
+ * enough to allow some data on segments even if the
+ * all the option space is used (40bytes). Otherwise
+ * funny things may happen in tcp_output.
+ */
+ mss = max(mss, 64);
+
+ /*
+ * maxopd stores the maximum length of data AND options
+ * in a segment; maxseg is the amount of data in a normal
+ * segment. We need to store this value (maxopd) apart
+ * from maxseg, because now every segment carries options
+ * and thus we normally have somewhat less data in segments.
+ */
+ tp->t_maxopd = mss;
+
+ /*
+ * origoffer==-1 indicates that no segments were received yet.
+ * In this case we just guess.
+ */
+ if ((tp->t_flags & (TF_REQ_TSTMP|TF_NOOPT)) == TF_REQ_TSTMP &&
+ (origoffer == -1 ||
+ (tp->t_flags & TF_RCVD_TSTMP) == TF_RCVD_TSTMP))
+ mss -= TCPOLEN_TSTAMP_APPA;
+
+#if (MCLBYTES & (MCLBYTES - 1)) == 0
+ if (mss > MCLBYTES)
+ mss &= ~(MCLBYTES-1);
+#else
+ if (mss > MCLBYTES)
+ mss = mss / MCLBYTES * MCLBYTES;
+#endif
+ tp->t_maxseg = mss;
+}
+
+void
+tcp_mss(struct tcpcb *tp, int offer)
+{
+ int rtt, mss;
+ u_long bufsize;
+ struct inpcb *inp;
+ struct socket *so;
+ struct hc_metrics_lite metrics;
+ int mtuflags = 0;
+#ifdef INET6
+ int isipv6;
+#endif
+ KASSERT(tp != NULL, ("%s: tp == NULL", __func__));
+
+ tcp_mss_update(tp, offer, &metrics, &mtuflags);
+
+ mss = tp->t_maxseg;
+ inp = tp->t_inpcb;
+#ifdef INET6
+ isipv6 = ((inp->inp_vflag & INP_IPV6) != 0) ? 1 : 0;
+#endif
+
+ /*
+ * If there's a pipesize, change the socket buffer to that size,
+ * don't change if sb_hiwat is different than default (then it
+ * has been changed on purpose with setsockopt).
+ * Make the socket buffers an integral number of mss units;
+ * if the mss is larger than the socket buffer, decrease the mss.
+ */
+ so = inp->inp_socket;
+ SOCKBUF_LOCK(&so->so_snd);
+ if ((so->so_snd.sb_hiwat == tcp_sendspace) && metrics.rmx_sendpipe)
+ bufsize = metrics.rmx_sendpipe;
+ else
+ bufsize = so->so_snd.sb_hiwat;
+ if (bufsize < mss)
+ mss = bufsize;
+ else {
+ bufsize = roundup(bufsize, mss);
+ if (bufsize > sb_max)
+ bufsize = sb_max;
+ if (bufsize > so->so_snd.sb_hiwat)
+ (void)sbreserve_locked(&so->so_snd, bufsize, so, NULL);
+ }
+ SOCKBUF_UNLOCK(&so->so_snd);
+ tp->t_maxseg = mss;
+
+ SOCKBUF_LOCK(&so->so_rcv);
+ if ((so->so_rcv.sb_hiwat == tcp_recvspace) && metrics.rmx_recvpipe)
+ bufsize = metrics.rmx_recvpipe;
+ else
+ bufsize = so->so_rcv.sb_hiwat;
+ if (bufsize > mss) {
+ bufsize = roundup(bufsize, mss);
+ if (bufsize > sb_max)
+ bufsize = sb_max;
+ if (bufsize > so->so_rcv.sb_hiwat)
+ (void)sbreserve_locked(&so->so_rcv, bufsize, so, NULL);
+ }
+ SOCKBUF_UNLOCK(&so->so_rcv);
+ /*
+ * While we're here, check the others too.
+ */
+ if (tp->t_srtt == 0 && (rtt = metrics.rmx_rtt)) {
+ tp->t_srtt = rtt;
+ tp->t_rttbest = tp->t_srtt + TCP_RTT_SCALE;
+ TCPSTAT_INC(tcps_usedrtt);
+ if (metrics.rmx_rttvar) {
+ tp->t_rttvar = metrics.rmx_rttvar;
+ TCPSTAT_INC(tcps_usedrttvar);
+ } else {
+ /* default variation is +- 1 rtt */
+ tp->t_rttvar =
+ tp->t_srtt * TCP_RTTVAR_SCALE / TCP_RTT_SCALE;
+ }
+ TCPT_RANGESET(tp->t_rxtcur,
+ ((tp->t_srtt >> 2) + tp->t_rttvar) >> 1,
+ tp->t_rttmin, TCPTV_REXMTMAX);
+ }
+ if (metrics.rmx_ssthresh) {
+ /*
+ * There's some sort of gateway or interface
+ * buffer limit on the path. Use this to set
+ * the slow start threshhold, but set the
+ * threshold to no less than 2*mss.
+ */
+ tp->snd_ssthresh = max(2 * mss, metrics.rmx_ssthresh);
+ TCPSTAT_INC(tcps_usedssthresh);
+ }
+ if (metrics.rmx_bandwidth)
+ tp->snd_bandwidth = metrics.rmx_bandwidth;
+
+ /*
+ * Set the slow-start flight size depending on whether this
+ * is a local network or not.
+ *
+ * Extend this so we cache the cwnd too and retrieve it here.
+ * Make cwnd even bigger than RFC3390 suggests but only if we
+ * have previous experience with the remote host. Be careful
+ * not make cwnd bigger than remote receive window or our own
+ * send socket buffer. Maybe put some additional upper bound
+ * on the retrieved cwnd. Should do incremental updates to
+ * hostcache when cwnd collapses so next connection doesn't
+ * overloads the path again.
+ *
+ * XXXAO: Initializing the CWND from the hostcache is broken
+ * and in its current form not RFC conformant. It is disabled
+ * until fixed or removed entirely.
+ *
+ * RFC3390 says only do this if SYN or SYN/ACK didn't got lost.
+ * We currently check only in syncache_socket for that.
+ */
+/* #define TCP_METRICS_CWND */
+#ifdef TCP_METRICS_CWND
+ if (metrics.rmx_cwnd)
+ tp->snd_cwnd = max(mss,
+ min(metrics.rmx_cwnd / 2,
+ min(tp->snd_wnd, so->so_snd.sb_hiwat)));
+ else
+#endif
+ if (V_tcp_do_rfc3390)
+ tp->snd_cwnd = min(4 * mss, max(2 * mss, 4380));
+#ifdef INET6
+ else if ((isipv6 && in6_localaddr(&inp->in6p_faddr)) ||
+ (!isipv6 && in_localaddr(inp->inp_faddr)))
+#else
+ else if (in_localaddr(inp->inp_faddr))
+#endif
+ tp->snd_cwnd = mss * V_ss_fltsz_local;
+ else
+ tp->snd_cwnd = mss * V_ss_fltsz;
+
+ /* Check the interface for TSO capabilities. */
+ if (mtuflags & CSUM_TSO)
+ tp->t_flags |= TF_TSO;
+}
+
+/*
+ * Determine the MSS option to send on an outgoing SYN.
+ */
+int
+tcp_mssopt(struct in_conninfo *inc)
+{
+ int mss = 0;
+ u_long maxmtu = 0;
+ u_long thcmtu = 0;
+ size_t min_protoh;
+
+ KASSERT(inc != NULL, ("tcp_mssopt with NULL in_conninfo pointer"));
+
+#ifdef INET6
+ if (inc->inc_flags & INC_ISIPV6) {
+ mss = V_tcp_v6mssdflt;
+ maxmtu = tcp_maxmtu6(inc, NULL);
+ thcmtu = tcp_hc_getmtu(inc); /* IPv4 and IPv6 */
+ min_protoh = sizeof(struct ip6_hdr) + sizeof(struct tcphdr);
+ } else
+#endif
+ {
+ mss = V_tcp_mssdflt;
+ maxmtu = tcp_maxmtu(inc, NULL);
+ thcmtu = tcp_hc_getmtu(inc); /* IPv4 and IPv6 */
+ min_protoh = sizeof(struct tcpiphdr);
+ }
+ if (maxmtu && thcmtu)
+ mss = min(maxmtu, thcmtu) - min_protoh;
+ else if (maxmtu || thcmtu)
+ mss = max(maxmtu, thcmtu) - min_protoh;
+
+ return (mss);
+}
+
+
+/*
+ * On a partial ack arrives, force the retransmission of the
+ * next unacknowledged segment. Do not clear tp->t_dupacks.
+ * By setting snd_nxt to ti_ack, this forces retransmission timer to
+ * be started again.
+ */
+static void
+tcp_newreno_partial_ack(struct tcpcb *tp, struct tcphdr *th)
+{
+ tcp_seq onxt = tp->snd_nxt;
+ u_long ocwnd = tp->snd_cwnd;
+
+ INP_WLOCK_ASSERT(tp->t_inpcb);
+
+ tcp_timer_activate(tp, TT_REXMT, 0);
+ tp->t_rtttime = 0;
+ tp->snd_nxt = th->th_ack;
+ /*
+ * Set snd_cwnd to one segment beyond acknowledged offset.
+ * (tp->snd_una has not yet been updated when this function is called.)
+ */
+ tp->snd_cwnd = tp->t_maxseg + (th->th_ack - tp->snd_una);
+ tp->t_flags |= TF_ACKNOW;
+ (void) tcp_output(tp);
+ tp->snd_cwnd = ocwnd;
+ if (SEQ_GT(onxt, tp->snd_nxt))
+ tp->snd_nxt = onxt;
+ /*
+ * Partial window deflation. Relies on fact that tp->snd_una
+ * not updated yet.
+ */
+ if (tp->snd_cwnd > th->th_ack - tp->snd_una)
+ tp->snd_cwnd -= th->th_ack - tp->snd_una;
+ else
+ tp->snd_cwnd = 0;
+ tp->snd_cwnd += tp->t_maxseg;
+}
diff --git a/rtems/freebsd/netinet/tcp_lro.c b/rtems/freebsd/netinet/tcp_lro.c
new file mode 100644
index 00000000..b2a5435e
--- /dev/null
+++ b/rtems/freebsd/netinet/tcp_lro.c
@@ -0,0 +1,389 @@
+#include <rtems/freebsd/machine/rtems-bsd-config.h>
+
+/******************************************************************************
+
+Copyright (c) 2007, Myricom Inc.
+Copyright (c) 2008, Intel Corporation.
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Neither the name of the Myricom Inc, nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+ 3. Neither the name of the Intel Corporation, nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE.
+
+$FreeBSD$
+***************************************************************************/
+
+#include <rtems/freebsd/sys/param.h>
+#include <rtems/freebsd/sys/systm.h>
+#include <rtems/freebsd/sys/endian.h>
+#include <rtems/freebsd/sys/mbuf.h>
+#include <rtems/freebsd/sys/kernel.h>
+#include <rtems/freebsd/sys/socket.h>
+
+#include <rtems/freebsd/net/if.h>
+#include <rtems/freebsd/net/ethernet.h>
+#include <rtems/freebsd/net/if_media.h>
+
+#include <rtems/freebsd/netinet/in_systm.h>
+#include <rtems/freebsd/netinet/in.h>
+#include <rtems/freebsd/netinet/ip.h>
+#include <rtems/freebsd/netinet/tcp.h>
+#include <rtems/freebsd/netinet/tcp_lro.h>
+
+#include <rtems/freebsd/machine/bus.h>
+#include <rtems/freebsd/machine/in_cksum.h>
+
+
+static uint16_t do_csum_data(uint16_t *raw, int len)
+{
+ uint32_t csum;
+ csum = 0;
+ while (len > 0) {
+ csum += *raw;
+ raw++;
+ csum += *raw;
+ raw++;
+ len -= 4;
+ }
+ csum = (csum >> 16) + (csum & 0xffff);
+ csum = (csum >> 16) + (csum & 0xffff);
+ return (uint16_t)csum;
+}
+
+/*
+ * Allocate and init the LRO data structures
+ */
+int
+tcp_lro_init(struct lro_ctrl *cntl)
+{
+ struct lro_entry *lro;
+ int i, error = 0;
+
+ SLIST_INIT(&cntl->lro_free);
+ SLIST_INIT(&cntl->lro_active);
+
+ cntl->lro_bad_csum = 0;
+ cntl->lro_queued = 0;
+ cntl->lro_flushed = 0;
+
+ for (i = 0; i < LRO_ENTRIES; i++) {
+ lro = (struct lro_entry *) malloc(sizeof (struct lro_entry),
+ M_DEVBUF, M_NOWAIT | M_ZERO);
+ if (lro == NULL) {
+ if (i == 0)
+ error = ENOMEM;
+ break;
+ }
+ cntl->lro_cnt = i;
+ SLIST_INSERT_HEAD(&cntl->lro_free, lro, next);
+ }
+
+ return (error);
+}
+
+void
+tcp_lro_free(struct lro_ctrl *cntl)
+{
+ struct lro_entry *entry;
+
+ while (!SLIST_EMPTY(&cntl->lro_free)) {
+ entry = SLIST_FIRST(&cntl->lro_free);
+ SLIST_REMOVE_HEAD(&cntl->lro_free, next);
+ free(entry, M_DEVBUF);
+ }
+}
+
+void
+tcp_lro_flush(struct lro_ctrl *cntl, struct lro_entry *lro)
+{
+ struct ifnet *ifp;
+ struct ip *ip;
+ struct tcphdr *tcp;
+ uint32_t *ts_ptr;
+ uint32_t tcplen, tcp_csum;
+
+
+ if (lro->append_cnt) {
+ /* incorporate the new len into the ip header and
+ * re-calculate the checksum */
+ ip = lro->ip;
+ ip->ip_len = htons(lro->len - ETHER_HDR_LEN);
+ ip->ip_sum = 0;
+ ip->ip_sum = 0xffff ^
+ do_csum_data((uint16_t*)ip,
+ sizeof (*ip));
+
+ lro->m_head->m_pkthdr.csum_flags = CSUM_IP_CHECKED |
+ CSUM_IP_VALID | CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
+ lro->m_head->m_pkthdr.csum_data = 0xffff;
+ lro->m_head->m_pkthdr.len = lro->len;
+
+ /* incorporate the latest ack into the tcp header */
+ tcp = (struct tcphdr *) (ip + 1);
+ tcp->th_ack = lro->ack_seq;
+ tcp->th_win = lro->window;
+ /* incorporate latest timestamp into the tcp header */
+ if (lro->timestamp) {
+ ts_ptr = (uint32_t *)(tcp + 1);
+ ts_ptr[1] = htonl(lro->tsval);
+ ts_ptr[2] = lro->tsecr;
+ }
+ /*
+ * update checksum in tcp header by re-calculating the
+ * tcp pseudoheader checksum, and adding it to the checksum
+ * of the tcp payload data
+ */
+ tcp->th_sum = 0;
+ tcplen = lro->len - sizeof(*ip) - ETHER_HDR_LEN;
+ tcp_csum = lro->data_csum;
+ tcp_csum += in_pseudo(ip->ip_src.s_addr, ip->ip_dst.s_addr,
+ htons(tcplen + IPPROTO_TCP));
+ tcp_csum += do_csum_data((uint16_t*)tcp,
+ tcp->th_off << 2);
+ tcp_csum = (tcp_csum & 0xffff) + (tcp_csum >> 16);
+ tcp_csum = (tcp_csum & 0xffff) + (tcp_csum >> 16);
+ tcp->th_sum = 0xffff ^ tcp_csum;
+ }
+ ifp = cntl->ifp;
+ (*ifp->if_input)(cntl->ifp, lro->m_head);
+ cntl->lro_queued += lro->append_cnt + 1;
+ cntl->lro_flushed++;
+ lro->m_head = NULL;
+ lro->timestamp = 0;
+ lro->append_cnt = 0;
+ SLIST_INSERT_HEAD(&cntl->lro_free, lro, next);
+}
+
+int
+tcp_lro_rx(struct lro_ctrl *cntl, struct mbuf *m_head, uint32_t csum)
+{
+ struct ether_header *eh;
+ struct ip *ip;
+ struct tcphdr *tcp;
+ uint32_t *ts_ptr;
+ struct mbuf *m_nxt, *m_tail;
+ struct lro_entry *lro;
+ int hlen, ip_len, tcp_hdr_len, tcp_data_len, tot_len;
+ int opt_bytes, trim, csum_flags;
+ uint32_t seq, tmp_csum, device_mtu;
+
+
+ eh = mtod(m_head, struct ether_header *);
+ if (eh->ether_type != htons(ETHERTYPE_IP))
+ return 1;
+ ip = (struct ip *) (eh + 1);
+ if (ip->ip_p != IPPROTO_TCP)
+ return 1;
+
+ /* ensure there are no options */
+ if ((ip->ip_hl << 2) != sizeof (*ip))
+ return -1;
+
+ /* .. and the packet is not fragmented */
+ if (ip->ip_off & htons(IP_MF|IP_OFFMASK))
+ return -1;
+
+ /* verify that the IP header checksum is correct */
+ csum_flags = m_head->m_pkthdr.csum_flags;
+ if (csum_flags & CSUM_IP_CHECKED) {
+ if (__predict_false((csum_flags & CSUM_IP_VALID) == 0)) {
+ cntl->lro_bad_csum++;
+ return -1;
+ }
+ } else {
+ tmp_csum = do_csum_data((uint16_t *)ip, sizeof (*ip));
+ if (__predict_false((tmp_csum ^ 0xffff) != 0)) {
+ cntl->lro_bad_csum++;
+ return -1;
+ }
+ }
+
+ /* find the TCP header */
+ tcp = (struct tcphdr *) (ip + 1);
+
+ /* Get the TCP checksum if we dont have it */
+ if (!csum)
+ csum = tcp->th_sum;
+
+ /* ensure no bits set besides ack or psh */
+ if ((tcp->th_flags & ~(TH_ACK | TH_PUSH)) != 0)
+ return -1;
+
+ /* check for timestamps. Since the only option we handle are
+ timestamps, we only have to handle the simple case of
+ aligned timestamps */
+
+ opt_bytes = (tcp->th_off << 2) - sizeof (*tcp);
+ tcp_hdr_len = sizeof (*tcp) + opt_bytes;
+ ts_ptr = (uint32_t *)(tcp + 1);
+ if (opt_bytes != 0) {
+ if (__predict_false(opt_bytes != TCPOLEN_TSTAMP_APPA) ||
+ (*ts_ptr != ntohl(TCPOPT_NOP<<24|TCPOPT_NOP<<16|
+ TCPOPT_TIMESTAMP<<8|TCPOLEN_TIMESTAMP)))
+ return -1;
+ }
+
+ ip_len = ntohs(ip->ip_len);
+ tcp_data_len = ip_len - (tcp->th_off << 2) - sizeof (*ip);
+
+
+ /*
+ * If frame is padded beyond the end of the IP packet,
+ * then we must trim the extra bytes off the end.
+ */
+ tot_len = m_head->m_pkthdr.len;
+ trim = tot_len - (ip_len + ETHER_HDR_LEN);
+ if (trim != 0) {
+ if (trim < 0) {
+ /* truncated packet */
+ return -1;
+ }
+ m_adj(m_head, -trim);
+ tot_len = m_head->m_pkthdr.len;
+ }
+
+ m_nxt = m_head;
+ m_tail = NULL; /* -Wuninitialized */
+ while (m_nxt != NULL) {
+ m_tail = m_nxt;
+ m_nxt = m_tail->m_next;
+ }
+
+ hlen = ip_len + ETHER_HDR_LEN - tcp_data_len;
+ seq = ntohl(tcp->th_seq);
+
+ SLIST_FOREACH(lro, &cntl->lro_active, next) {
+ if (lro->source_port == tcp->th_sport &&
+ lro->dest_port == tcp->th_dport &&
+ lro->source_ip == ip->ip_src.s_addr &&
+ lro->dest_ip == ip->ip_dst.s_addr) {
+ /* Try to append it */
+
+ if (__predict_false(seq != lro->next_seq)) {
+ /* out of order packet */
+ SLIST_REMOVE(&cntl->lro_active, lro,
+ lro_entry, next);
+ tcp_lro_flush(cntl, lro);
+ return -1;
+ }
+
+ if (opt_bytes) {
+ uint32_t tsval = ntohl(*(ts_ptr + 1));
+ /* make sure timestamp values are increasing */
+ if (__predict_false(lro->tsval > tsval ||
+ *(ts_ptr + 2) == 0)) {
+ return -1;
+ }
+ lro->tsval = tsval;
+ lro->tsecr = *(ts_ptr + 2);
+ }
+
+ lro->next_seq += tcp_data_len;
+ lro->ack_seq = tcp->th_ack;
+ lro->window = tcp->th_win;
+ lro->append_cnt++;
+ if (tcp_data_len == 0) {
+ m_freem(m_head);
+ return 0;
+ }
+ /* subtract off the checksum of the tcp header
+ * from the hardware checksum, and add it to the
+ * stored tcp data checksum. Byteswap the checksum
+ * if the total length so far is odd
+ */
+ tmp_csum = do_csum_data((uint16_t*)tcp,
+ tcp_hdr_len);
+ csum = csum + (tmp_csum ^ 0xffff);
+ csum = (csum & 0xffff) + (csum >> 16);
+ csum = (csum & 0xffff) + (csum >> 16);
+ if (lro->len & 0x1) {
+ /* Odd number of bytes so far, flip bytes */
+ csum = ((csum << 8) | (csum >> 8)) & 0xffff;
+ }
+ csum = csum + lro->data_csum;
+ csum = (csum & 0xffff) + (csum >> 16);
+ csum = (csum & 0xffff) + (csum >> 16);
+ lro->data_csum = csum;
+
+ lro->len += tcp_data_len;
+
+ /* adjust mbuf so that m->m_data points to
+ the first byte of the payload */
+ m_adj(m_head, hlen);
+ /* append mbuf chain */
+ lro->m_tail->m_next = m_head;
+ /* advance the last pointer */
+ lro->m_tail = m_tail;
+ /* flush packet if required */
+ device_mtu = cntl->ifp->if_mtu;
+ if (lro->len > (65535 - device_mtu)) {
+ SLIST_REMOVE(&cntl->lro_active, lro,
+ lro_entry, next);
+ tcp_lro_flush(cntl, lro);
+ }
+ return 0;
+ }
+ }
+
+ if (SLIST_EMPTY(&cntl->lro_free))
+ return -1;
+
+ /* start a new chain */
+ lro = SLIST_FIRST(&cntl->lro_free);
+ SLIST_REMOVE_HEAD(&cntl->lro_free, next);
+ SLIST_INSERT_HEAD(&cntl->lro_active, lro, next);
+ lro->source_port = tcp->th_sport;
+ lro->dest_port = tcp->th_dport;
+ lro->source_ip = ip->ip_src.s_addr;
+ lro->dest_ip = ip->ip_dst.s_addr;
+ lro->next_seq = seq + tcp_data_len;
+ lro->mss = tcp_data_len;
+ lro->ack_seq = tcp->th_ack;
+ lro->window = tcp->th_win;
+
+ /* save the checksum of just the TCP payload by
+ * subtracting off the checksum of the TCP header from
+ * the entire hardware checksum
+ * Since IP header checksum is correct, checksum over
+ * the IP header is -0. Substracting -0 is unnecessary.
+ */
+ tmp_csum = do_csum_data((uint16_t*)tcp, tcp_hdr_len);
+ csum = csum + (tmp_csum ^ 0xffff);
+ csum = (csum & 0xffff) + (csum >> 16);
+ csum = (csum & 0xffff) + (csum >> 16);
+ lro->data_csum = csum;
+
+ lro->ip = ip;
+ /* record timestamp if it is present */
+ if (opt_bytes) {
+ lro->timestamp = 1;
+ lro->tsval = ntohl(*(ts_ptr + 1));
+ lro->tsecr = *(ts_ptr + 2);
+ }
+ lro->len = tot_len;
+ lro->m_head = m_head;
+ lro->m_tail = m_tail;
+ return 0;
+}
diff --git a/rtems/freebsd/netinet/tcp_lro.h b/rtems/freebsd/netinet/tcp_lro.h
new file mode 100644
index 00000000..20cfb7cf
--- /dev/null
+++ b/rtems/freebsd/netinet/tcp_lro.h
@@ -0,0 +1,85 @@
+/*******************************************************************************
+
+Copyright (c) 2006, Myricom Inc.
+Copyright (c) 2008, Intel Corporation.
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Neither the name of the Myricom Inc, nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+ 2. Neither the name of the Intel Corporation, nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE.
+
+
+$FreeBSD$
+
+***************************************************************************/
+#ifndef _TCP_LRO_HH_
+#define _TCP_LRO_HH_
+
+struct lro_entry;
+struct lro_entry
+{
+ SLIST_ENTRY(lro_entry) next;
+ struct mbuf *m_head;
+ struct mbuf *m_tail;
+ int timestamp;
+ struct ip *ip;
+ uint32_t tsval;
+ uint32_t tsecr;
+ uint32_t source_ip;
+ uint32_t dest_ip;
+ uint32_t next_seq;
+ uint32_t ack_seq;
+ uint32_t len;
+ uint32_t data_csum;
+ uint16_t window;
+ uint16_t source_port;
+ uint16_t dest_port;
+ uint16_t append_cnt;
+ uint16_t mss;
+
+};
+SLIST_HEAD(lro_head, lro_entry);
+
+struct lro_ctrl {
+ struct ifnet *ifp;
+ int lro_queued;
+ int lro_flushed;
+ int lro_bad_csum;
+ int lro_cnt;
+
+ struct lro_head lro_active;
+ struct lro_head lro_free;
+};
+
+
+int tcp_lro_init(struct lro_ctrl *);
+void tcp_lro_free(struct lro_ctrl *);
+void tcp_lro_flush(struct lro_ctrl *, struct lro_entry *);
+int tcp_lro_rx(struct lro_ctrl *, struct mbuf *, uint32_t);
+
+/* Number of LRO entries - these are per rx queue */
+#define LRO_ENTRIES 8
+
+#endif /* _TCP_LRO_HH_ */
diff --git a/rtems/freebsd/netinet/tcp_offload.c b/rtems/freebsd/netinet/tcp_offload.c
new file mode 100644
index 00000000..8524a766
--- /dev/null
+++ b/rtems/freebsd/netinet/tcp_offload.c
@@ -0,0 +1,147 @@
+#include <rtems/freebsd/machine/rtems-bsd-config.h>
+
+/*-
+ * Copyright (c) 2007, Chelsio Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Neither the name of the Chelsio Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <rtems/freebsd/sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <rtems/freebsd/sys/param.h>
+#include <rtems/freebsd/sys/systm.h>
+#include <rtems/freebsd/sys/types.h>
+#include <rtems/freebsd/sys/malloc.h>
+#include <rtems/freebsd/sys/kernel.h>
+#include <rtems/freebsd/sys/sysctl.h>
+#include <rtems/freebsd/sys/mbuf.h>
+#include <rtems/freebsd/sys/socket.h>
+#include <rtems/freebsd/sys/socketvar.h>
+
+#include <rtems/freebsd/net/if.h>
+#include <rtems/freebsd/net/if_types.h>
+#include <rtems/freebsd/net/if_var.h>
+#include <rtems/freebsd/net/route.h>
+#include <rtems/freebsd/net/vnet.h>
+
+#include <rtems/freebsd/netinet/in.h>
+#include <rtems/freebsd/netinet/in_systm.h>
+#include <rtems/freebsd/netinet/in_pcb.h>
+#include <rtems/freebsd/netinet/tcp.h>
+#include <rtems/freebsd/netinet/tcp_var.h>
+#include <rtems/freebsd/netinet/tcp_offload.h>
+#include <rtems/freebsd/netinet/toedev.h>
+
+uint32_t toedev_registration_count;
+
+int
+tcp_offload_connect(struct socket *so, struct sockaddr *nam)
+{
+ struct ifnet *ifp;
+ struct toedev *tdev;
+ struct rtentry *rt;
+ int error;
+
+ if (toedev_registration_count == 0)
+ return (EINVAL);
+
+ /*
+ * Look up the route used for the connection to
+ * determine if it uses an interface capable of
+ * offloading the connection.
+ */
+ rt = rtalloc1(nam, 0 /*report*/, 0 /*ignflags*/);
+ if (rt)
+ RT_UNLOCK(rt);
+ else
+ return (EHOSTUNREACH);
+
+ ifp = rt->rt_ifp;
+ if ((ifp->if_capenable & IFCAP_TOE) == 0) {
+ error = EINVAL;
+ goto fail;
+ }
+
+ tdev = TOEDEV(ifp);
+ if (tdev == NULL) {
+ error = EPERM;
+ goto fail;
+ }
+
+ if (tdev->tod_can_offload(tdev, so) == 0) {
+ error = EPERM;
+ goto fail;
+ }
+
+ return (tdev->tod_connect(tdev, so, rt, nam));
+fail:
+ RTFREE(rt);
+ return (error);
+}
+
+
+/*
+ * This file contains code as a short-term staging area before it is moved in
+ * to sys/netinet/tcp_offload.c
+ */
+
+void
+tcp_offload_twstart(struct tcpcb *tp)
+{
+
+ INP_INFO_WLOCK(&V_tcbinfo);
+ INP_WLOCK(tp->t_inpcb);
+ tcp_twstart(tp);
+ INP_INFO_WUNLOCK(&V_tcbinfo);
+}
+
+struct tcpcb *
+tcp_offload_close(struct tcpcb *tp)
+{
+
+ INP_INFO_WLOCK(&V_tcbinfo);
+ INP_WLOCK(tp->t_inpcb);
+ tp = tcp_close(tp);
+ INP_INFO_WUNLOCK(&V_tcbinfo);
+ if (tp)
+ INP_WUNLOCK(tp->t_inpcb);
+
+ return (tp);
+}
+
+struct tcpcb *
+tcp_offload_drop(struct tcpcb *tp, int error)
+{
+
+ INP_INFO_WLOCK(&V_tcbinfo);
+ INP_WLOCK(tp->t_inpcb);
+ tp = tcp_drop(tp, error);
+ INP_INFO_WUNLOCK(&V_tcbinfo);
+ if (tp)
+ INP_WUNLOCK(tp->t_inpcb);
+
+ return (tp);
+}
+
diff --git a/rtems/freebsd/netinet/tcp_offload.h b/rtems/freebsd/netinet/tcp_offload.h
new file mode 100644
index 00000000..f2a35a58
--- /dev/null
+++ b/rtems/freebsd/netinet/tcp_offload.h
@@ -0,0 +1,354 @@
+/*-
+ * Copyright (c) 2007, Chelsio Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Neither the name of the Chelsio Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _NETINET_TCP_OFFLOAD_HH_
+#define _NETINET_TCP_OFFLOAD_HH_
+
+#ifndef _KERNEL
+#error "no user-serviceable parts inside"
+#endif
+
+/*
+ * A driver publishes that it provides offload services
+ * by setting IFCAP_TOE in the ifnet. The offload connect
+ * will bypass any further work if the interface that a
+ * connection would use does not support TCP offload.
+ *
+ * The TOE API assumes that the tcp offload engine can offload the
+ * the entire connection from set up to teardown, with some provision
+ * being made to allowing the software stack to handle time wait. If
+ * the device does not meet these criteria, it is the driver's responsibility
+ * to overload the functions that it needs to in tcp_usrreqs and make
+ * its own calls to tcp_output if it needs to do so.
+ *
+ * There is currently no provision for the device advertising the congestion
+ * control algorithms it supports as there is currently no API for querying
+ * an operating system for the protocols that it has loaded. This is a desirable
+ * future extension.
+ *
+ *
+ *
+ * It is assumed that individuals deploying TOE will want connections
+ * to be offloaded without software changes so all connections on an
+ * interface providing TOE are offloaded unless the the SO_NO_OFFLOAD
+ * flag is set on the socket.
+ *
+ *
+ * The toe_usrreqs structure constitutes the TOE driver's
+ * interface to the TCP stack for functionality that doesn't
+ * interact directly with userspace. If one wants to provide
+ * (optional) functionality to do zero-copy to/from
+ * userspace one still needs to override soreceive/sosend
+ * with functions that fault in and pin the user buffers.
+ *
+ * + tu_send
+ * - tells the driver that new data may have been added to the
+ * socket's send buffer - the driver should not fail if the
+ * buffer is in fact unchanged
+ * - the driver is responsible for providing credits (bytes in the send window)
+ * back to the socket by calling sbdrop() as segments are acknowledged.
+ * - The driver expects the inpcb lock to be held - the driver is expected
+ * not to drop the lock. Hence the driver is not allowed to acquire the
+ * pcbinfo lock during this call.
+ *
+ * + tu_rcvd
+ * - returns credits to the driver and triggers window updates
+ * to the peer (a credit as used here is a byte in the peer's receive window)
+ * - the driver is expected to determine how many bytes have been
+ * consumed and credit that back to the card so that it can grow
+ * the window again by maintaining its own state between invocations.
+ * - In principle this could be used to shrink the window as well as
+ * grow the window, although it is not used for that now.
+ * - this function needs to correctly handle being called any number of
+ * times without any bytes being consumed from the receive buffer.
+ * - The driver expects the inpcb lock to be held - the driver is expected
+ * not to drop the lock. Hence the driver is not allowed to acquire the
+ * pcbinfo lock during this call.
+ *
+ * + tu_disconnect
+ * - tells the driver to send FIN to peer
+ * - driver is expected to send the remaining data and then do a clean half close
+ * - disconnect implies at least half-close so only send, reset, and detach
+ * are legal
+ * - the driver is expected to handle transition through the shutdown
+ * state machine and allow the stack to support SO_LINGER.
+ * - The driver expects the inpcb lock to be held - the driver is expected
+ * not to drop the lock. Hence the driver is not allowed to acquire the
+ * pcbinfo lock during this call.
+ *
+ * + tu_reset
+ * - closes the connection and sends a RST to peer
+ * - driver is expectd to trigger an RST and detach the toepcb
+ * - no further calls are legal after reset
+ * - The driver expects the inpcb lock to be held - the driver is expected
+ * not to drop the lock. Hence the driver is not allowed to acquire the
+ * pcbinfo lock during this call.
+ *
+ * The following fields in the tcpcb are expected to be referenced by the driver:
+ * + iss
+ * + rcv_nxt
+ * + rcv_wnd
+ * + snd_isn
+ * + snd_max
+ * + snd_nxt
+ * + snd_una
+ * + t_flags
+ * + t_inpcb
+ * + t_maxseg
+ * + t_toe
+ *
+ * The following fields in the inpcb are expected to be referenced by the driver:
+ * + inp_lport
+ * + inp_fport
+ * + inp_laddr
+ * + inp_fport
+ * + inp_socket
+ * + inp_ip_tos
+ *
+ * The following fields in the socket are expected to be referenced by the
+ * driver:
+ * + so_comp
+ * + so_error
+ * + so_linger
+ * + so_options
+ * + so_rcv
+ * + so_snd
+ * + so_state
+ * + so_timeo
+ *
+ * These functions all return 0 on success and can return the following errors
+ * as appropriate:
+ * + EPERM:
+ * + ENOBUFS: memory allocation failed
+ * + EMSGSIZE: MTU changed during the call
+ * + EHOSTDOWN:
+ * + EHOSTUNREACH:
+ * + ENETDOWN:
+ * * ENETUNREACH: the peer is no longer reachable
+ *
+ * + tu_detach
+ * - tells driver that the socket is going away so disconnect
+ * the toepcb and free appropriate resources
+ * - allows the driver to cleanly handle the case of connection state
+ * outliving the socket
+ * - no further calls are legal after detach
+ * - the driver is expected to provide its own synchronization between
+ * detach and receiving new data.
+ *
+ * + tu_syncache_event
+ * - even if it is not actually needed, the driver is expected to
+ * call syncache_add for the initial SYN and then syncache_expand
+ * for the SYN,ACK
+ * - tells driver that a connection either has not been added or has
+ * been dropped from the syncache
+ * - the driver is expected to maintain state that lives outside the
+ * software stack so the syncache needs to be able to notify the
+ * toe driver that the software stack is not going to create a connection
+ * for a received SYN
+ * - The driver is responsible for any synchronization required between
+ * the syncache dropping an entry and the driver processing the SYN,ACK.
+ *
+ */
+struct toe_usrreqs {
+ int (*tu_send)(struct tcpcb *tp);
+ int (*tu_rcvd)(struct tcpcb *tp);
+ int (*tu_disconnect)(struct tcpcb *tp);
+ int (*tu_reset)(struct tcpcb *tp);
+ void (*tu_detach)(struct tcpcb *tp);
+ void (*tu_syncache_event)(int event, void *toep);
+};
+
+/*
+ * Proxy for struct tcpopt between TOE drivers and TCP functions.
+ */
+struct toeopt {
+ u_int64_t to_flags; /* see tcpopt in tcp_var.h */
+ u_int16_t to_mss; /* maximum segment size */
+ u_int8_t to_wscale; /* window scaling */
+
+ u_int8_t _pad1; /* explicit pad for 64bit alignment */
+ u_int32_t _pad2; /* explicit pad for 64bit alignment */
+ u_int64_t _pad3[4]; /* TBD */
+};
+
+#define TOE_SC_ENTRY_PRESENT 1 /* 4-tuple already present */
+#define TOE_SC_DROP 2 /* connection was timed out */
+
+/*
+ * Because listen is a one-to-many relationship (a socket can be listening
+ * on all interfaces on a machine some of which may be using different TCP
+ * offload devices), listen uses a publish/subscribe mechanism. The TCP
+ * offload driver registers a listen notification function with the stack.
+ * When a listen socket is created all TCP offload devices are notified
+ * so that they can do the appropriate set up to offload connections on the
+ * port to which the socket is bound. When the listen socket is closed,
+ * the offload devices are notified so that they will stop listening on that
+ * port and free any associated resources as well as sending RSTs on any
+ * connections in the SYN_RCVD state.
+ *
+ */
+
+typedef void (*tcp_offload_listen_start_fn)(void *, struct tcpcb *);
+typedef void (*tcp_offload_listen_stop_fn)(void *, struct tcpcb *);
+
+EVENTHANDLER_DECLARE(tcp_offload_listen_start, tcp_offload_listen_start_fn);
+EVENTHANDLER_DECLARE(tcp_offload_listen_stop, tcp_offload_listen_stop_fn);
+
+/*
+ * Check if the socket can be offloaded by the following steps:
+ * - determine the egress interface
+ * - check the interface for TOE capability and TOE is enabled
+ * - check if the device has resources to offload the connection
+ */
+int tcp_offload_connect(struct socket *so, struct sockaddr *nam);
+
+/*
+ * The tcp_output_* routines are wrappers around the toe_usrreqs calls
+ * which trigger packet transmission. In the non-offloaded case they
+ * translate to tcp_output. The tcp_offload_* routines notify TOE
+ * of specific events. I the non-offloaded case they are no-ops.
+ *
+ * Listen is a special case because it is a 1 to many relationship
+ * and there can be more than one offload driver in the system.
+ */
+
+/*
+ * Connection is offloaded
+ */
+#define tp_offload(tp) ((tp)->t_flags & TF_TOE)
+
+/*
+ * hackish way of allowing this file to also be included by TOE
+ * which needs to be kept ignorant of socket implementation details
+ */
+#ifdef _SYS_SOCKETVAR_HH_
+/*
+ * The socket has not been marked as "do not offload"
+ */
+#define SO_OFFLOADABLE(so) ((so->so_options & SO_NO_OFFLOAD) == 0)
+
+static __inline int
+tcp_output_connect(struct socket *so, struct sockaddr *nam)
+{
+ struct tcpcb *tp = sototcpcb(so);
+ int error;
+
+ /*
+ * If offload has been disabled for this socket or the
+ * connection cannot be offloaded just call tcp_output
+ * to start the TCP state machine.
+ */
+#ifndef TCP_OFFLOAD_DISABLE
+ if (!SO_OFFLOADABLE(so) || (error = tcp_offload_connect(so, nam)) != 0)
+#endif
+ error = tcp_output(tp);
+ return (error);
+}
+
+static __inline int
+tcp_output_send(struct tcpcb *tp)
+{
+
+#ifndef TCP_OFFLOAD_DISABLE
+ if (tp_offload(tp))
+ return (tp->t_tu->tu_send(tp));
+#endif
+ return (tcp_output(tp));
+}
+
+static __inline int
+tcp_output_rcvd(struct tcpcb *tp)
+{
+
+#ifndef TCP_OFFLOAD_DISABLE
+ if (tp_offload(tp))
+ return (tp->t_tu->tu_rcvd(tp));
+#endif
+ return (tcp_output(tp));
+}
+
+static __inline int
+tcp_output_disconnect(struct tcpcb *tp)
+{
+
+#ifndef TCP_OFFLOAD_DISABLE
+ if (tp_offload(tp))
+ return (tp->t_tu->tu_disconnect(tp));
+#endif
+ return (tcp_output(tp));
+}
+
+static __inline int
+tcp_output_reset(struct tcpcb *tp)
+{
+
+#ifndef TCP_OFFLOAD_DISABLE
+ if (tp_offload(tp))
+ return (tp->t_tu->tu_reset(tp));
+#endif
+ return (tcp_output(tp));
+}
+
+static __inline void
+tcp_offload_detach(struct tcpcb *tp)
+{
+
+#ifndef TCP_OFFLOAD_DISABLE
+ if (tp_offload(tp))
+ tp->t_tu->tu_detach(tp);
+#endif
+}
+
+static __inline void
+tcp_offload_listen_open(struct tcpcb *tp)
+{
+
+#ifndef TCP_OFFLOAD_DISABLE
+ if (SO_OFFLOADABLE(tp->t_inpcb->inp_socket))
+ EVENTHANDLER_INVOKE(tcp_offload_listen_start, tp);
+#endif
+}
+
+static __inline void
+tcp_offload_listen_close(struct tcpcb *tp)
+{
+
+#ifndef TCP_OFFLOAD_DISABLE
+ EVENTHANDLER_INVOKE(tcp_offload_listen_stop, tp);
+#endif
+}
+#undef SO_OFFLOADABLE
+#endif /* _SYS_SOCKETVAR_HH_ */
+#undef tp_offload
+
+void tcp_offload_twstart(struct tcpcb *tp);
+struct tcpcb *tcp_offload_close(struct tcpcb *tp);
+struct tcpcb *tcp_offload_drop(struct tcpcb *tp, int error);
+
+#endif /* _NETINET_TCP_OFFLOAD_HH_ */
diff --git a/rtems/freebsd/netinet/tcp_output.c b/rtems/freebsd/netinet/tcp_output.c
new file mode 100644
index 00000000..596b4d79
--- /dev/null
+++ b/rtems/freebsd/netinet/tcp_output.c
@@ -0,0 +1,1485 @@
+#include <rtems/freebsd/machine/rtems-bsd-config.h>
+
+/*-
+ * Copyright (c) 1982, 1986, 1988, 1990, 1993, 1995
+ * The Regents of the University of California. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * @(#)tcp_output.c 8.4 (Berkeley) 5/24/95
+ */
+
+#include <rtems/freebsd/sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <rtems/freebsd/local/opt_inet.h>
+#include <rtems/freebsd/local/opt_inet6.h>
+#include <rtems/freebsd/local/opt_ipsec.h>
+#include <rtems/freebsd/local/opt_tcpdebug.h>
+
+#include <rtems/freebsd/sys/param.h>
+#include <rtems/freebsd/sys/systm.h>
+#include <rtems/freebsd/sys/domain.h>
+#include <rtems/freebsd/sys/kernel.h>
+#include <rtems/freebsd/sys/lock.h>
+#include <rtems/freebsd/sys/mbuf.h>
+#include <rtems/freebsd/sys/mutex.h>
+#include <rtems/freebsd/sys/protosw.h>
+#include <rtems/freebsd/sys/socket.h>
+#include <rtems/freebsd/sys/socketvar.h>
+#include <rtems/freebsd/sys/sysctl.h>
+
+#include <rtems/freebsd/net/if.h>
+#include <rtems/freebsd/net/route.h>
+#include <rtems/freebsd/net/vnet.h>
+
+#include <rtems/freebsd/netinet/in.h>
+#include <rtems/freebsd/netinet/in_systm.h>
+#include <rtems/freebsd/netinet/ip.h>
+#include <rtems/freebsd/netinet/in_pcb.h>
+#include <rtems/freebsd/netinet/ip_var.h>
+#include <rtems/freebsd/netinet/ip_options.h>
+#ifdef INET6
+#include <rtems/freebsd/netinet6/in6_pcb.h>
+#include <rtems/freebsd/netinet/ip6.h>
+#include <rtems/freebsd/netinet6/ip6_var.h>
+#endif
+#include <rtems/freebsd/netinet/tcp.h>
+#define TCPOUTFLAGS
+#include <rtems/freebsd/netinet/tcp_fsm.h>
+#include <rtems/freebsd/netinet/tcp_seq.h>
+#include <rtems/freebsd/netinet/tcp_timer.h>
+#include <rtems/freebsd/netinet/tcp_var.h>
+#include <rtems/freebsd/netinet/tcpip.h>
+#ifdef TCPDEBUG
+#include <rtems/freebsd/netinet/tcp_debug.h>
+#endif
+
+#ifdef IPSEC
+#include <rtems/freebsd/netipsec/ipsec.h>
+#endif /*IPSEC*/
+
+#include <rtems/freebsd/machine/in_cksum.h>
+
+#include <rtems/freebsd/security/mac/mac_framework.h>
+
+#ifdef notyet
+extern struct mbuf *m_copypack();
+#endif
+
+VNET_DEFINE(int, path_mtu_discovery) = 1;
+SYSCTL_VNET_INT(_net_inet_tcp, OID_AUTO, path_mtu_discovery, CTLFLAG_RW,
+ &VNET_NAME(path_mtu_discovery), 1,
+ "Enable Path MTU Discovery");
+
+VNET_DEFINE(int, ss_fltsz) = 1;
+SYSCTL_VNET_INT(_net_inet_tcp, OID_AUTO, slowstart_flightsize, CTLFLAG_RW,
+ &VNET_NAME(ss_fltsz), 1,
+ "Slow start flight size");
+
+VNET_DEFINE(int, ss_fltsz_local) = 4;
+SYSCTL_VNET_INT(_net_inet_tcp, OID_AUTO, local_slowstart_flightsize,
+ CTLFLAG_RW, &VNET_NAME(ss_fltsz_local), 1,
+ "Slow start flight size for local networks");
+
+VNET_DEFINE(int, tcp_do_newreno) = 1;
+SYSCTL_VNET_INT(_net_inet_tcp, OID_AUTO, newreno, CTLFLAG_RW,
+ &VNET_NAME(tcp_do_newreno), 0,
+ "Enable NewReno Algorithms");
+
+VNET_DEFINE(int, tcp_do_tso) = 1;
+#define V_tcp_do_tso VNET(tcp_do_tso)
+SYSCTL_VNET_INT(_net_inet_tcp, OID_AUTO, tso, CTLFLAG_RW,
+ &VNET_NAME(tcp_do_tso), 0,
+ "Enable TCP Segmentation Offload");
+
+VNET_DEFINE(int, tcp_do_autosndbuf) = 1;
+#define V_tcp_do_autosndbuf VNET(tcp_do_autosndbuf)
+SYSCTL_VNET_INT(_net_inet_tcp, OID_AUTO, sendbuf_auto, CTLFLAG_RW,
+ &VNET_NAME(tcp_do_autosndbuf), 0,
+ "Enable automatic send buffer sizing");
+
+VNET_DEFINE(int, tcp_autosndbuf_inc) = 8*1024;
+#define V_tcp_autosndbuf_inc VNET(tcp_autosndbuf_inc)
+SYSCTL_VNET_INT(_net_inet_tcp, OID_AUTO, sendbuf_inc, CTLFLAG_RW,
+ &VNET_NAME(tcp_autosndbuf_inc), 0,
+ "Incrementor step size of automatic send buffer");
+
+VNET_DEFINE(int, tcp_autosndbuf_max) = 256*1024;
+#define V_tcp_autosndbuf_max VNET(tcp_autosndbuf_max)
+SYSCTL_VNET_INT(_net_inet_tcp, OID_AUTO, sendbuf_max, CTLFLAG_RW,
+ &VNET_NAME(tcp_autosndbuf_max), 0,
+ "Max size of automatic send buffer");
+
+
+/*
+ * Tcp output routine: figure out what should be sent and send it.
+ */
+int
+tcp_output(struct tcpcb *tp)
+{
+ struct socket *so = tp->t_inpcb->inp_socket;
+ long len, recwin, sendwin;
+ int off, flags, error, rw;
+ struct mbuf *m;
+ struct ip *ip = NULL;
+ struct ipovly *ipov = NULL;
+ struct tcphdr *th;
+ u_char opt[TCP_MAXOLEN];
+ unsigned ipoptlen, optlen, hdrlen;
+#ifdef IPSEC
+ unsigned ipsec_optlen = 0;
+#endif
+ int idle, sendalot;
+ int sack_rxmit, sack_bytes_rxmt;
+ struct sackhole *p;
+ int tso;
+ struct tcpopt to;
+#if 0
+ int maxburst = TCP_MAXBURST;
+#endif
+#ifdef INET6
+ struct ip6_hdr *ip6 = NULL;
+ int isipv6;
+
+ isipv6 = (tp->t_inpcb->inp_vflag & INP_IPV6) != 0;
+#endif
+
+ INP_WLOCK_ASSERT(tp->t_inpcb);
+
+ /*
+ * Determine length of data that should be transmitted,
+ * and flags that will be used.
+ * If there is some data or critical controls (SYN, RST)
+ * to send, then transmit; otherwise, investigate further.
+ */
+ idle = (tp->t_flags & TF_LASTIDLE) || (tp->snd_max == tp->snd_una);
+ if (idle && ticks - tp->t_rcvtime >= tp->t_rxtcur) {
+ /*
+ * If we've been idle for more than one retransmit
+ * timeout the old congestion window is no longer
+ * current and we have to reduce it to the restart
+ * window before we can transmit again.
+ *
+ * The restart window is the initial window or the last
+ * CWND, whichever is smaller.
+ *
+ * This is done to prevent us from flooding the path with
+ * a full CWND at wirespeed, overloading router and switch
+ * buffers along the way.
+ *
+ * See RFC5681 Section 4.1. "Restarting Idle Connections".
+ */
+ if (V_tcp_do_rfc3390)
+ rw = min(4 * tp->t_maxseg,
+ max(2 * tp->t_maxseg, 4380));
+#ifdef INET6
+ else if ((isipv6 ? in6_localaddr(&tp->t_inpcb->in6p_faddr) :
+ in_localaddr(tp->t_inpcb->inp_faddr)))
+#else
+ else if (in_localaddr(tp->t_inpcb->inp_faddr))
+#endif
+ rw = V_ss_fltsz_local * tp->t_maxseg;
+ else
+ rw = V_ss_fltsz * tp->t_maxseg;
+
+ tp->snd_cwnd = min(rw, tp->snd_cwnd);
+ }
+ tp->t_flags &= ~TF_LASTIDLE;
+ if (idle) {
+ if (tp->t_flags & TF_MORETOCOME) {
+ tp->t_flags |= TF_LASTIDLE;
+ idle = 0;
+ }
+ }
+again:
+ /*
+ * If we've recently taken a timeout, snd_max will be greater than
+ * snd_nxt. There may be SACK information that allows us to avoid
+ * resending already delivered data. Adjust snd_nxt accordingly.
+ */
+ if ((tp->t_flags & TF_SACK_PERMIT) &&
+ SEQ_LT(tp->snd_nxt, tp->snd_max))
+ tcp_sack_adjust(tp);
+ sendalot = 0;
+ tso = 0;
+ off = tp->snd_nxt - tp->snd_una;
+ sendwin = min(tp->snd_wnd, tp->snd_cwnd);
+ sendwin = min(sendwin, tp->snd_bwnd);
+
+ flags = tcp_outflags[tp->t_state];
+ /*
+ * Send any SACK-generated retransmissions. If we're explicitly trying
+ * to send out new data (when sendalot is 1), bypass this function.
+ * If we retransmit in fast recovery mode, decrement snd_cwnd, since
+ * we're replacing a (future) new transmission with a retransmission
+ * now, and we previously incremented snd_cwnd in tcp_input().
+ */
+ /*
+ * Still in sack recovery , reset rxmit flag to zero.
+ */
+ sack_rxmit = 0;
+ sack_bytes_rxmt = 0;
+ len = 0;
+ p = NULL;
+ if ((tp->t_flags & TF_SACK_PERMIT) && IN_FASTRECOVERY(tp) &&
+ (p = tcp_sack_output(tp, &sack_bytes_rxmt))) {
+ long cwin;
+
+ cwin = min(tp->snd_wnd, tp->snd_cwnd) - sack_bytes_rxmt;
+ if (cwin < 0)
+ cwin = 0;
+ /* Do not retransmit SACK segments beyond snd_recover */
+ if (SEQ_GT(p->end, tp->snd_recover)) {
+ /*
+ * (At least) part of sack hole extends beyond
+ * snd_recover. Check to see if we can rexmit data
+ * for this hole.
+ */
+ if (SEQ_GEQ(p->rxmit, tp->snd_recover)) {
+ /*
+ * Can't rexmit any more data for this hole.
+ * That data will be rexmitted in the next
+ * sack recovery episode, when snd_recover
+ * moves past p->rxmit.
+ */
+ p = NULL;
+ goto after_sack_rexmit;
+ } else
+ /* Can rexmit part of the current hole */
+ len = ((long)ulmin(cwin,
+ tp->snd_recover - p->rxmit));
+ } else
+ len = ((long)ulmin(cwin, p->end - p->rxmit));
+ off = p->rxmit - tp->snd_una;
+ KASSERT(off >= 0,("%s: sack block to the left of una : %d",
+ __func__, off));
+ if (len > 0) {
+ sack_rxmit = 1;
+ sendalot = 1;
+ TCPSTAT_INC(tcps_sack_rexmits);
+ TCPSTAT_ADD(tcps_sack_rexmit_bytes,
+ min(len, tp->t_maxseg));
+ }
+ }
+after_sack_rexmit:
+ /*
+ * Get standard flags, and add SYN or FIN if requested by 'hidden'
+ * state flags.
+ */
+ if (tp->t_flags & TF_NEEDFIN)
+ flags |= TH_FIN;
+ if (tp->t_flags & TF_NEEDSYN)
+ flags |= TH_SYN;
+
+ SOCKBUF_LOCK(&so->so_snd);
+ /*
+ * If in persist timeout with window of 0, send 1 byte.
+ * Otherwise, if window is small but nonzero
+ * and timer expired, we will send what we can
+ * and go to transmit state.
+ */
+ if (tp->t_flags & TF_FORCEDATA) {
+ if (sendwin == 0) {
+ /*
+ * If we still have some data to send, then
+ * clear the FIN bit. Usually this would
+ * happen below when it realizes that we
+ * aren't sending all the data. However,
+ * if we have exactly 1 byte of unsent data,
+ * then it won't clear the FIN bit below,
+ * and if we are in persist state, we wind
+ * up sending the packet without recording
+ * that we sent the FIN bit.
+ *
+ * We can't just blindly clear the FIN bit,
+ * because if we don't have any more data
+ * to send then the probe will be the FIN
+ * itself.
+ */
+ if (off < so->so_snd.sb_cc)
+ flags &= ~TH_FIN;
+ sendwin = 1;
+ } else {
+ tcp_timer_activate(tp, TT_PERSIST, 0);
+ tp->t_rxtshift = 0;
+ }
+ }
+
+ /*
+ * If snd_nxt == snd_max and we have transmitted a FIN, the
+ * offset will be > 0 even if so_snd.sb_cc is 0, resulting in
+ * a negative length. This can also occur when TCP opens up
+ * its congestion window while receiving additional duplicate
+ * acks after fast-retransmit because TCP will reset snd_nxt
+ * to snd_max after the fast-retransmit.
+ *
+ * In the normal retransmit-FIN-only case, however, snd_nxt will
+ * be set to snd_una, the offset will be 0, and the length may
+ * wind up 0.
+ *
+ * If sack_rxmit is true we are retransmitting from the scoreboard
+ * in which case len is already set.
+ */
+ if (sack_rxmit == 0) {
+ if (sack_bytes_rxmt == 0)
+ len = ((long)ulmin(so->so_snd.sb_cc, sendwin) - off);
+ else {
+ long cwin;
+
+ /*
+ * We are inside of a SACK recovery episode and are
+ * sending new data, having retransmitted all the
+ * data possible in the scoreboard.
+ */
+ len = ((long)ulmin(so->so_snd.sb_cc, tp->snd_wnd)
+ - off);
+ /*
+ * Don't remove this (len > 0) check !
+ * We explicitly check for len > 0 here (although it
+ * isn't really necessary), to work around a gcc
+ * optimization issue - to force gcc to compute
+ * len above. Without this check, the computation
+ * of len is bungled by the optimizer.
+ */
+ if (len > 0) {
+ cwin = tp->snd_cwnd -
+ (tp->snd_nxt - tp->sack_newdata) -
+ sack_bytes_rxmt;
+ if (cwin < 0)
+ cwin = 0;
+ len = lmin(len, cwin);
+ }
+ }
+ }
+
+ /*
+ * Lop off SYN bit if it has already been sent. However, if this
+ * is SYN-SENT state and if segment contains data and if we don't
+ * know that foreign host supports TAO, suppress sending segment.
+ */
+ if ((flags & TH_SYN) && SEQ_GT(tp->snd_nxt, tp->snd_una)) {
+ if (tp->t_state != TCPS_SYN_RECEIVED)
+ flags &= ~TH_SYN;
+ off--, len++;
+ }
+
+ /*
+ * Be careful not to send data and/or FIN on SYN segments.
+ * This measure is needed to prevent interoperability problems
+ * with not fully conformant TCP implementations.
+ */
+ if ((flags & TH_SYN) && (tp->t_flags & TF_NOOPT)) {
+ len = 0;
+ flags &= ~TH_FIN;
+ }
+
+ if (len < 0) {
+ /*
+ * If FIN has been sent but not acked,
+ * but we haven't been called to retransmit,
+ * len will be < 0. Otherwise, window shrank
+ * after we sent into it. If window shrank to 0,
+ * cancel pending retransmit, pull snd_nxt back
+ * to (closed) window, and set the persist timer
+ * if it isn't already going. If the window didn't
+ * close completely, just wait for an ACK.
+ */
+ len = 0;
+ if (sendwin == 0) {
+ tcp_timer_activate(tp, TT_REXMT, 0);
+ tp->t_rxtshift = 0;
+ tp->snd_nxt = tp->snd_una;
+ if (!tcp_timer_active(tp, TT_PERSIST))
+ tcp_setpersist(tp);
+ }
+ }
+
+ /* len will be >= 0 after this point. */
+ KASSERT(len >= 0, ("[%s:%d]: len < 0", __func__, __LINE__));
+
+ /*
+ * Automatic sizing of send socket buffer. Often the send buffer
+ * size is not optimally adjusted to the actual network conditions
+ * at hand (delay bandwidth product). Setting the buffer size too
+ * small limits throughput on links with high bandwidth and high
+ * delay (eg. trans-continental/oceanic links). Setting the
+ * buffer size too big consumes too much real kernel memory,
+ * especially with many connections on busy servers.
+ *
+ * The criteria to step up the send buffer one notch are:
+ * 1. receive window of remote host is larger than send buffer
+ * (with a fudge factor of 5/4th);
+ * 2. send buffer is filled to 7/8th with data (so we actually
+ * have data to make use of it);
+ * 3. send buffer fill has not hit maximal automatic size;
+ * 4. our send window (slow start and cogestion controlled) is
+ * larger than sent but unacknowledged data in send buffer.
+ *
+ * The remote host receive window scaling factor may limit the
+ * growing of the send buffer before it reaches its allowed
+ * maximum.
+ *
+ * It scales directly with slow start or congestion window
+ * and does at most one step per received ACK. This fast
+ * scaling has the drawback of growing the send buffer beyond
+ * what is strictly necessary to make full use of a given
+ * delay*bandwith product. However testing has shown this not
+ * to be much of an problem. At worst we are trading wasting
+ * of available bandwith (the non-use of it) for wasting some
+ * socket buffer memory.
+ *
+ * TODO: Shrink send buffer during idle periods together
+ * with congestion window. Requires another timer. Has to
+ * wait for upcoming tcp timer rewrite.
+ */
+ if (V_tcp_do_autosndbuf && so->so_snd.sb_flags & SB_AUTOSIZE) {
+ if ((tp->snd_wnd / 4 * 5) >= so->so_snd.sb_hiwat &&
+ so->so_snd.sb_cc >= (so->so_snd.sb_hiwat / 8 * 7) &&
+ so->so_snd.sb_cc < V_tcp_autosndbuf_max &&
+ sendwin >= (so->so_snd.sb_cc - (tp->snd_nxt - tp->snd_una))) {
+ if (!sbreserve_locked(&so->so_snd,
+ min(so->so_snd.sb_hiwat + V_tcp_autosndbuf_inc,
+ V_tcp_autosndbuf_max), so, curthread))
+ so->so_snd.sb_flags &= ~SB_AUTOSIZE;
+ }
+ }
+
+ /*
+ * Truncate to the maximum segment length or enable TCP Segmentation
+ * Offloading (if supported by hardware) and ensure that FIN is removed
+ * if the length no longer contains the last data byte.
+ *
+ * TSO may only be used if we are in a pure bulk sending state. The
+ * presence of TCP-MD5, SACK retransmits, SACK advertizements and
+ * IP options prevent using TSO. With TSO the TCP header is the same
+ * (except for the sequence number) for all generated packets. This
+ * makes it impossible to transmit any options which vary per generated
+ * segment or packet.
+ *
+ * The length of TSO bursts is limited to TCP_MAXWIN. That limit and
+ * removal of FIN (if not already catched here) are handled later after
+ * the exact length of the TCP options are known.
+ */
+#ifdef IPSEC
+ /*
+ * Pre-calculate here as we save another lookup into the darknesses
+ * of IPsec that way and can actually decide if TSO is ok.
+ */
+ ipsec_optlen = ipsec_hdrsiz_tcp(tp);
+#endif
+ if (len > tp->t_maxseg) {
+ if ((tp->t_flags & TF_TSO) && V_tcp_do_tso &&
+ ((tp->t_flags & TF_SIGNATURE) == 0) &&
+ tp->rcv_numsacks == 0 && sack_rxmit == 0 &&
+ tp->t_inpcb->inp_options == NULL &&
+ tp->t_inpcb->in6p_options == NULL
+#ifdef IPSEC
+ && ipsec_optlen == 0
+#endif
+ ) {
+ tso = 1;
+ } else {
+ len = tp->t_maxseg;
+ sendalot = 1;
+ }
+ }
+
+ if (sack_rxmit) {
+ if (SEQ_LT(p->rxmit + len, tp->snd_una + so->so_snd.sb_cc))
+ flags &= ~TH_FIN;
+ } else {
+ if (SEQ_LT(tp->snd_nxt + len, tp->snd_una + so->so_snd.sb_cc))
+ flags &= ~TH_FIN;
+ }
+
+ recwin = sbspace(&so->so_rcv);
+
+ /*
+ * Sender silly window avoidance. We transmit under the following
+ * conditions when len is non-zero:
+ *
+ * - We have a full segment (or more with TSO)
+ * - This is the last buffer in a write()/send() and we are
+ * either idle or running NODELAY
+ * - we've timed out (e.g. persist timer)
+ * - we have more then 1/2 the maximum send window's worth of
+ * data (receiver may be limited the window size)
+ * - we need to retransmit
+ */
+ if (len) {
+ if (len >= tp->t_maxseg)
+ goto send;
+ /*
+ * NOTE! on localhost connections an 'ack' from the remote
+ * end may occur synchronously with the output and cause
+ * us to flush a buffer queued with moretocome. XXX
+ *
+ * note: the len + off check is almost certainly unnecessary.
+ */
+ if (!(tp->t_flags & TF_MORETOCOME) && /* normal case */
+ (idle || (tp->t_flags & TF_NODELAY)) &&
+ len + off >= so->so_snd.sb_cc &&
+ (tp->t_flags & TF_NOPUSH) == 0) {
+ goto send;
+ }
+ if (tp->t_flags & TF_FORCEDATA) /* typ. timeout case */
+ goto send;
+ if (len >= tp->max_sndwnd / 2 && tp->max_sndwnd > 0)
+ goto send;
+ if (SEQ_LT(tp->snd_nxt, tp->snd_max)) /* retransmit case */
+ goto send;
+ if (sack_rxmit)
+ goto send;
+ }
+
+ /*
+ * Compare available window to amount of window
+ * known to peer (as advertised window less
+ * next expected input). If the difference is at least two
+ * max size segments, or at least 50% of the maximum possible
+ * window, then want to send a window update to peer.
+ * Skip this if the connection is in T/TCP half-open state.
+ * Don't send pure window updates when the peer has closed
+ * the connection and won't ever send more data.
+ */
+ if (recwin > 0 && !(tp->t_flags & TF_NEEDSYN) &&
+ !TCPS_HAVERCVDFIN(tp->t_state)) {
+ /*
+ * "adv" is the amount we can increase the window,
+ * taking into account that we are limited by
+ * TCP_MAXWIN << tp->rcv_scale.
+ */
+ long adv = min(recwin, (long)TCP_MAXWIN << tp->rcv_scale) -
+ (tp->rcv_adv - tp->rcv_nxt);
+
+ if (adv >= (long) (2 * tp->t_maxseg))
+ goto send;
+ if (2 * adv >= (long) so->so_rcv.sb_hiwat)
+ goto send;
+ }
+
+ /*
+ * Send if we owe the peer an ACK, RST, SYN, or urgent data. ACKNOW
+ * is also a catch-all for the retransmit timer timeout case.
+ */
+ if (tp->t_flags & TF_ACKNOW)
+ goto send;
+ if ((flags & TH_RST) ||
+ ((flags & TH_SYN) && (tp->t_flags & TF_NEEDSYN) == 0))
+ goto send;
+ if (SEQ_GT(tp->snd_up, tp->snd_una))
+ goto send;
+ /*
+ * If our state indicates that FIN should be sent
+ * and we have not yet done so, then we need to send.
+ */
+ if (flags & TH_FIN &&
+ ((tp->t_flags & TF_SENTFIN) == 0 || tp->snd_nxt == tp->snd_una))
+ goto send;
+ /*
+ * In SACK, it is possible for tcp_output to fail to send a segment
+ * after the retransmission timer has been turned off. Make sure
+ * that the retransmission timer is set.
+ */
+ if ((tp->t_flags & TF_SACK_PERMIT) &&
+ SEQ_GT(tp->snd_max, tp->snd_una) &&
+ !tcp_timer_active(tp, TT_REXMT) &&
+ !tcp_timer_active(tp, TT_PERSIST)) {
+ tcp_timer_activate(tp, TT_REXMT, tp->t_rxtcur);
+ goto just_return;
+ }
+ /*
+ * TCP window updates are not reliable, rather a polling protocol
+ * using ``persist'' packets is used to insure receipt of window
+ * updates. The three ``states'' for the output side are:
+ * idle not doing retransmits or persists
+ * persisting to move a small or zero window
+ * (re)transmitting and thereby not persisting
+ *
+ * tcp_timer_active(tp, TT_PERSIST)
+ * is true when we are in persist state.
+ * (tp->t_flags & TF_FORCEDATA)
+ * is set when we are called to send a persist packet.
+ * tcp_timer_active(tp, TT_REXMT)
+ * is set when we are retransmitting
+ * The output side is idle when both timers are zero.
+ *
+ * If send window is too small, there is data to transmit, and no
+ * retransmit or persist is pending, then go to persist state.
+ * If nothing happens soon, send when timer expires:
+ * if window is nonzero, transmit what we can,
+ * otherwise force out a byte.
+ */
+ if (so->so_snd.sb_cc && !tcp_timer_active(tp, TT_REXMT) &&
+ !tcp_timer_active(tp, TT_PERSIST)) {
+ tp->t_rxtshift = 0;
+ tcp_setpersist(tp);
+ }
+
+ /*
+ * No reason to send a segment, just return.
+ */
+just_return:
+ SOCKBUF_UNLOCK(&so->so_snd);
+ return (0);
+
+send:
+ SOCKBUF_LOCK_ASSERT(&so->so_snd);
+ /*
+ * Before ESTABLISHED, force sending of initial options
+ * unless TCP set not to do any options.
+ * NOTE: we assume that the IP/TCP header plus TCP options
+ * always fit in a single mbuf, leaving room for a maximum
+ * link header, i.e.
+ * max_linkhdr + sizeof (struct tcpiphdr) + optlen <= MCLBYTES
+ */
+ optlen = 0;
+#ifdef INET6
+ if (isipv6)
+ hdrlen = sizeof (struct ip6_hdr) + sizeof (struct tcphdr);
+ else
+#endif
+ hdrlen = sizeof (struct tcpiphdr);
+
+ /*
+ * Compute options for segment.
+ * We only have to care about SYN and established connection
+ * segments. Options for SYN-ACK segments are handled in TCP
+ * syncache.
+ */
+ if ((tp->t_flags & TF_NOOPT) == 0) {
+ to.to_flags = 0;
+ /* Maximum segment size. */
+ if (flags & TH_SYN) {
+ tp->snd_nxt = tp->iss;
+ to.to_mss = tcp_mssopt(&tp->t_inpcb->inp_inc);
+ to.to_flags |= TOF_MSS;
+ }
+ /* Window scaling. */
+ if ((flags & TH_SYN) && (tp->t_flags & TF_REQ_SCALE)) {
+ to.to_wscale = tp->request_r_scale;
+ to.to_flags |= TOF_SCALE;
+ }
+ /* Timestamps. */
+ if ((tp->t_flags & TF_RCVD_TSTMP) ||
+ ((flags & TH_SYN) && (tp->t_flags & TF_REQ_TSTMP))) {
+ to.to_tsval = ticks + tp->ts_offset;
+ to.to_tsecr = tp->ts_recent;
+ to.to_flags |= TOF_TS;
+ /* Set receive buffer autosizing timestamp. */
+ if (tp->rfbuf_ts == 0 &&
+ (so->so_rcv.sb_flags & SB_AUTOSIZE))
+ tp->rfbuf_ts = ticks;
+ }
+ /* Selective ACK's. */
+ if (tp->t_flags & TF_SACK_PERMIT) {
+ if (flags & TH_SYN)
+ to.to_flags |= TOF_SACKPERM;
+ else if (TCPS_HAVEESTABLISHED(tp->t_state) &&
+ (tp->t_flags & TF_SACK_PERMIT) &&
+ tp->rcv_numsacks > 0) {
+ to.to_flags |= TOF_SACK;
+ to.to_nsacks = tp->rcv_numsacks;
+ to.to_sacks = (u_char *)tp->sackblks;
+ }
+ }
+#ifdef TCP_SIGNATURE
+ /* TCP-MD5 (RFC2385). */
+ if (tp->t_flags & TF_SIGNATURE)
+ to.to_flags |= TOF_SIGNATURE;
+#endif /* TCP_SIGNATURE */
+
+ /* Processing the options. */
+ hdrlen += optlen = tcp_addoptions(&to, opt);
+ }
+
+#ifdef INET6
+ if (isipv6)
+ ipoptlen = ip6_optlen(tp->t_inpcb);
+ else
+#endif
+ if (tp->t_inpcb->inp_options)
+ ipoptlen = tp->t_inpcb->inp_options->m_len -
+ offsetof(struct ipoption, ipopt_list);
+ else
+ ipoptlen = 0;
+#ifdef IPSEC
+ ipoptlen += ipsec_optlen;
+#endif
+
+ /*
+ * Adjust data length if insertion of options will
+ * bump the packet length beyond the t_maxopd length.
+ * Clear the FIN bit because we cut off the tail of
+ * the segment.
+ *
+ * When doing TSO limit a burst to TCP_MAXWIN minus the
+ * IP, TCP and Options length to keep ip->ip_len from
+ * overflowing. Prevent the last segment from being
+ * fractional thus making them all equal sized and set
+ * the flag to continue sending. TSO is disabled when
+ * IP options or IPSEC are present.
+ */
+ if (len + optlen + ipoptlen > tp->t_maxopd) {
+ flags &= ~TH_FIN;
+ if (tso) {
+ if (len > TCP_MAXWIN - hdrlen - optlen) {
+ len = TCP_MAXWIN - hdrlen - optlen;
+ len = len - (len % (tp->t_maxopd - optlen));
+ sendalot = 1;
+ } else if (tp->t_flags & TF_NEEDFIN)
+ sendalot = 1;
+ } else {
+ len = tp->t_maxopd - optlen - ipoptlen;
+ sendalot = 1;
+ }
+ }
+
+/*#ifdef DIAGNOSTIC*/
+#ifdef INET6
+ if (max_linkhdr + hdrlen > MCLBYTES)
+#else
+ if (max_linkhdr + hdrlen > MHLEN)
+#endif
+ panic("tcphdr too big");
+/*#endif*/
+
+ /*
+ * This KASSERT is here to catch edge cases at a well defined place.
+ * Before, those had triggered (random) panic conditions further down.
+ */
+ KASSERT(len >= 0, ("[%s:%d]: len < 0", __func__, __LINE__));
+
+ /*
+ * Grab a header mbuf, attaching a copy of data to
+ * be transmitted, and initialize the header from
+ * the template for sends on this connection.
+ */
+ if (len) {
+ struct mbuf *mb;
+ u_int moff;
+
+ if ((tp->t_flags & TF_FORCEDATA) && len == 1)
+ TCPSTAT_INC(tcps_sndprobe);
+ else if (SEQ_LT(tp->snd_nxt, tp->snd_max) || sack_rxmit) {
+ TCPSTAT_INC(tcps_sndrexmitpack);
+ TCPSTAT_ADD(tcps_sndrexmitbyte, len);
+ } else {
+ TCPSTAT_INC(tcps_sndpack);
+ TCPSTAT_ADD(tcps_sndbyte, len);
+ }
+#ifdef notyet
+ if ((m = m_copypack(so->so_snd.sb_mb, off,
+ (int)len, max_linkhdr + hdrlen)) == 0) {
+ SOCKBUF_UNLOCK(&so->so_snd);
+ error = ENOBUFS;
+ goto out;
+ }
+ /*
+ * m_copypack left space for our hdr; use it.
+ */
+ m->m_len += hdrlen;
+ m->m_data -= hdrlen;
+#else
+ MGETHDR(m, M_DONTWAIT, MT_DATA);
+ if (m == NULL) {
+ SOCKBUF_UNLOCK(&so->so_snd);
+ error = ENOBUFS;
+ goto out;
+ }
+#ifdef INET6
+ if (MHLEN < hdrlen + max_linkhdr) {
+ MCLGET(m, M_DONTWAIT);
+ if ((m->m_flags & M_EXT) == 0) {
+ SOCKBUF_UNLOCK(&so->so_snd);
+ m_freem(m);
+ error = ENOBUFS;
+ goto out;
+ }
+ }
+#endif
+ m->m_data += max_linkhdr;
+ m->m_len = hdrlen;
+
+ /*
+ * Start the m_copy functions from the closest mbuf
+ * to the offset in the socket buffer chain.
+ */
+ mb = sbsndptr(&so->so_snd, off, len, &moff);
+
+ if (len <= MHLEN - hdrlen - max_linkhdr) {
+ m_copydata(mb, moff, (int)len,
+ mtod(m, caddr_t) + hdrlen);
+ m->m_len += len;
+ } else {
+ m->m_next = m_copy(mb, moff, (int)len);
+ if (m->m_next == NULL) {
+ SOCKBUF_UNLOCK(&so->so_snd);
+ (void) m_free(m);
+ error = ENOBUFS;
+ goto out;
+ }
+ }
+#endif
+ /*
+ * If we're sending everything we've got, set PUSH.
+ * (This will keep happy those implementations which only
+ * give data to the user when a buffer fills or
+ * a PUSH comes in.)
+ */
+ if (off + len == so->so_snd.sb_cc)
+ flags |= TH_PUSH;
+ SOCKBUF_UNLOCK(&so->so_snd);
+ } else {
+ SOCKBUF_UNLOCK(&so->so_snd);
+ if (tp->t_flags & TF_ACKNOW)
+ TCPSTAT_INC(tcps_sndacks);
+ else if (flags & (TH_SYN|TH_FIN|TH_RST))
+ TCPSTAT_INC(tcps_sndctrl);
+ else if (SEQ_GT(tp->snd_up, tp->snd_una))
+ TCPSTAT_INC(tcps_sndurg);
+ else
+ TCPSTAT_INC(tcps_sndwinup);
+
+ MGETHDR(m, M_DONTWAIT, MT_DATA);
+ if (m == NULL) {
+ error = ENOBUFS;
+ goto out;
+ }
+#ifdef INET6
+ if (isipv6 && (MHLEN < hdrlen + max_linkhdr) &&
+ MHLEN >= hdrlen) {
+ MH_ALIGN(m, hdrlen);
+ } else
+#endif
+ m->m_data += max_linkhdr;
+ m->m_len = hdrlen;
+ }
+ SOCKBUF_UNLOCK_ASSERT(&so->so_snd);
+ m->m_pkthdr.rcvif = (struct ifnet *)0;
+#ifdef MAC
+ mac_inpcb_create_mbuf(tp->t_inpcb, m);
+#endif
+#ifdef INET6
+ if (isipv6) {
+ ip6 = mtod(m, struct ip6_hdr *);
+ th = (struct tcphdr *)(ip6 + 1);
+ tcpip_fillheaders(tp->t_inpcb, ip6, th);
+ } else
+#endif /* INET6 */
+ {
+ ip = mtod(m, struct ip *);
+ ipov = (struct ipovly *)ip;
+ th = (struct tcphdr *)(ip + 1);
+ tcpip_fillheaders(tp->t_inpcb, ip, th);
+ }
+
+ /*
+ * Fill in fields, remembering maximum advertised
+ * window for use in delaying messages about window sizes.
+ * If resending a FIN, be sure not to use a new sequence number.
+ */
+ if (flags & TH_FIN && tp->t_flags & TF_SENTFIN &&
+ tp->snd_nxt == tp->snd_max)
+ tp->snd_nxt--;
+ /*
+ * If we are starting a connection, send ECN setup
+ * SYN packet. If we are on a retransmit, we may
+ * resend those bits a number of times as per
+ * RFC 3168.
+ */
+ if (tp->t_state == TCPS_SYN_SENT && V_tcp_do_ecn) {
+ if (tp->t_rxtshift >= 1) {
+ if (tp->t_rxtshift <= V_tcp_ecn_maxretries)
+ flags |= TH_ECE|TH_CWR;
+ } else
+ flags |= TH_ECE|TH_CWR;
+ }
+
+ if (tp->t_state == TCPS_ESTABLISHED &&
+ (tp->t_flags & TF_ECN_PERMIT)) {
+ /*
+ * If the peer has ECN, mark data packets with
+ * ECN capable transmission (ECT).
+ * Ignore pure ack packets, retransmissions and window probes.
+ */
+ if (len > 0 && SEQ_GEQ(tp->snd_nxt, tp->snd_max) &&
+ !((tp->t_flags & TF_FORCEDATA) && len == 1)) {
+#ifdef INET6
+ if (isipv6)
+ ip6->ip6_flow |= htonl(IPTOS_ECN_ECT0 << 20);
+ else
+#endif
+ ip->ip_tos |= IPTOS_ECN_ECT0;
+ TCPSTAT_INC(tcps_ecn_ect0);
+ }
+
+ /*
+ * Reply with proper ECN notifications.
+ */
+ if (tp->t_flags & TF_ECN_SND_CWR) {
+ flags |= TH_CWR;
+ tp->t_flags &= ~TF_ECN_SND_CWR;
+ }
+ if (tp->t_flags & TF_ECN_SND_ECE)
+ flags |= TH_ECE;
+ }
+
+ /*
+ * If we are doing retransmissions, then snd_nxt will
+ * not reflect the first unsent octet. For ACK only
+ * packets, we do not want the sequence number of the
+ * retransmitted packet, we want the sequence number
+ * of the next unsent octet. So, if there is no data
+ * (and no SYN or FIN), use snd_max instead of snd_nxt
+ * when filling in ti_seq. But if we are in persist
+ * state, snd_max might reflect one byte beyond the
+ * right edge of the window, so use snd_nxt in that
+ * case, since we know we aren't doing a retransmission.
+ * (retransmit and persist are mutually exclusive...)
+ */
+ if (sack_rxmit == 0) {
+ if (len || (flags & (TH_SYN|TH_FIN)) ||
+ tcp_timer_active(tp, TT_PERSIST))
+ th->th_seq = htonl(tp->snd_nxt);
+ else
+ th->th_seq = htonl(tp->snd_max);
+ } else {
+ th->th_seq = htonl(p->rxmit);
+ p->rxmit += len;
+ tp->sackhint.sack_bytes_rexmit += len;
+ }
+ th->th_ack = htonl(tp->rcv_nxt);
+ if (optlen) {
+ bcopy(opt, th + 1, optlen);
+ th->th_off = (sizeof (struct tcphdr) + optlen) >> 2;
+ }
+ th->th_flags = flags;
+ /*
+ * Calculate receive window. Don't shrink window,
+ * but avoid silly window syndrome.
+ */
+ if (recwin < (long)(so->so_rcv.sb_hiwat / 4) &&
+ recwin < (long)tp->t_maxseg)
+ recwin = 0;
+ if (recwin < (long)(tp->rcv_adv - tp->rcv_nxt))
+ recwin = (long)(tp->rcv_adv - tp->rcv_nxt);
+ if (recwin > (long)TCP_MAXWIN << tp->rcv_scale)
+ recwin = (long)TCP_MAXWIN << tp->rcv_scale;
+
+ /*
+ * According to RFC1323 the window field in a SYN (i.e., a <SYN>
+ * or <SYN,ACK>) segment itself is never scaled. The <SYN,ACK>
+ * case is handled in syncache.
+ */
+ if (flags & TH_SYN)
+ th->th_win = htons((u_short)
+ (min(sbspace(&so->so_rcv), TCP_MAXWIN)));
+ else
+ th->th_win = htons((u_short)(recwin >> tp->rcv_scale));
+
+ /*
+ * Adjust the RXWIN0SENT flag - indicate that we have advertised
+ * a 0 window. This may cause the remote transmitter to stall. This
+ * flag tells soreceive() to disable delayed acknowledgements when
+ * draining the buffer. This can occur if the receiver is attempting
+ * to read more data than can be buffered prior to transmitting on
+ * the connection.
+ */
+ if (th->th_win == 0)
+ tp->t_flags |= TF_RXWIN0SENT;
+ else
+ tp->t_flags &= ~TF_RXWIN0SENT;
+ if (SEQ_GT(tp->snd_up, tp->snd_nxt)) {
+ th->th_urp = htons((u_short)(tp->snd_up - tp->snd_nxt));
+ th->th_flags |= TH_URG;
+ } else
+ /*
+ * If no urgent pointer to send, then we pull
+ * the urgent pointer to the left edge of the send window
+ * so that it doesn't drift into the send window on sequence
+ * number wraparound.
+ */
+ tp->snd_up = tp->snd_una; /* drag it along */
+
+#ifdef TCP_SIGNATURE
+ if (tp->t_flags & TF_SIGNATURE) {
+ int sigoff = to.to_signature - opt;
+ tcp_signature_compute(m, 0, len, optlen,
+ (u_char *)(th + 1) + sigoff, IPSEC_DIR_OUTBOUND);
+ }
+#endif
+
+ /*
+ * Put TCP length in extended header, and then
+ * checksum extended header and data.
+ */
+ m->m_pkthdr.len = hdrlen + len; /* in6_cksum() need this */
+#ifdef INET6
+ if (isipv6)
+ /*
+ * ip6_plen is not need to be filled now, and will be filled
+ * in ip6_output.
+ */
+ th->th_sum = in6_cksum(m, IPPROTO_TCP, sizeof(struct ip6_hdr),
+ sizeof(struct tcphdr) + optlen + len);
+ else
+#endif /* INET6 */
+ {
+ m->m_pkthdr.csum_flags = CSUM_TCP;
+ m->m_pkthdr.csum_data = offsetof(struct tcphdr, th_sum);
+ th->th_sum = in_pseudo(ip->ip_src.s_addr, ip->ip_dst.s_addr,
+ htons(sizeof(struct tcphdr) + IPPROTO_TCP + len + optlen));
+
+ /* IP version must be set here for ipv4/ipv6 checking later */
+ KASSERT(ip->ip_v == IPVERSION,
+ ("%s: IP version incorrect: %d", __func__, ip->ip_v));
+ }
+
+ /*
+ * Enable TSO and specify the size of the segments.
+ * The TCP pseudo header checksum is always provided.
+ * XXX: Fixme: This is currently not the case for IPv6.
+ */
+ if (tso) {
+ KASSERT(len > tp->t_maxopd - optlen,
+ ("%s: len <= tso_segsz", __func__));
+ m->m_pkthdr.csum_flags |= CSUM_TSO;
+ m->m_pkthdr.tso_segsz = tp->t_maxopd - optlen;
+ }
+
+ /*
+ * In transmit state, time the transmission and arrange for
+ * the retransmit. In persist state, just set snd_max.
+ */
+ if ((tp->t_flags & TF_FORCEDATA) == 0 ||
+ !tcp_timer_active(tp, TT_PERSIST)) {
+ tcp_seq startseq = tp->snd_nxt;
+
+ /*
+ * Advance snd_nxt over sequence space of this segment.
+ */
+ if (flags & (TH_SYN|TH_FIN)) {
+ if (flags & TH_SYN)
+ tp->snd_nxt++;
+ if (flags & TH_FIN) {
+ tp->snd_nxt++;
+ tp->t_flags |= TF_SENTFIN;
+ }
+ }
+ if (sack_rxmit)
+ goto timer;
+ tp->snd_nxt += len;
+ if (SEQ_GT(tp->snd_nxt, tp->snd_max)) {
+ tp->snd_max = tp->snd_nxt;
+ /*
+ * Time this transmission if not a retransmission and
+ * not currently timing anything.
+ */
+ if (tp->t_rtttime == 0) {
+ tp->t_rtttime = ticks;
+ tp->t_rtseq = startseq;
+ TCPSTAT_INC(tcps_segstimed);
+ }
+ }
+
+ /*
+ * Set retransmit timer if not currently set,
+ * and not doing a pure ack or a keep-alive probe.
+ * Initial value for retransmit timer is smoothed
+ * round-trip time + 2 * round-trip time variance.
+ * Initialize shift counter which is used for backoff
+ * of retransmit time.
+ */
+timer:
+ if (!tcp_timer_active(tp, TT_REXMT) &&
+ ((sack_rxmit && tp->snd_nxt != tp->snd_max) ||
+ (tp->snd_nxt != tp->snd_una))) {
+ if (tcp_timer_active(tp, TT_PERSIST)) {
+ tcp_timer_activate(tp, TT_PERSIST, 0);
+ tp->t_rxtshift = 0;
+ }
+ tcp_timer_activate(tp, TT_REXMT, tp->t_rxtcur);
+ }
+ } else {
+ /*
+ * Persist case, update snd_max but since we are in
+ * persist mode (no window) we do not update snd_nxt.
+ */
+ int xlen = len;
+ if (flags & TH_SYN)
+ ++xlen;
+ if (flags & TH_FIN) {
+ ++xlen;
+ tp->t_flags |= TF_SENTFIN;
+ }
+ if (SEQ_GT(tp->snd_nxt + xlen, tp->snd_max))
+ tp->snd_max = tp->snd_nxt + len;
+ }
+
+#ifdef TCPDEBUG
+ /*
+ * Trace.
+ */
+ if (so->so_options & SO_DEBUG) {
+ u_short save = 0;
+#ifdef INET6
+ if (!isipv6)
+#endif
+ {
+ save = ipov->ih_len;
+ ipov->ih_len = htons(m->m_pkthdr.len /* - hdrlen + (th->th_off << 2) */);
+ }
+ tcp_trace(TA_OUTPUT, tp->t_state, tp, mtod(m, void *), th, 0);
+#ifdef INET6
+ if (!isipv6)
+#endif
+ ipov->ih_len = save;
+ }
+#endif
+
+ /*
+ * Fill in IP length and desired time to live and
+ * send to IP level. There should be a better way
+ * to handle ttl and tos; we could keep them in
+ * the template, but need a way to checksum without them.
+ */
+ /*
+ * m->m_pkthdr.len should have been set before cksum calcuration,
+ * because in6_cksum() need it.
+ */
+#ifdef INET6
+ if (isipv6) {
+ /*
+ * we separately set hoplimit for every segment, since the
+ * user might want to change the value via setsockopt.
+ * Also, desired default hop limit might be changed via
+ * Neighbor Discovery.
+ */
+ ip6->ip6_hlim = in6_selecthlim(tp->t_inpcb, NULL);
+
+ /* TODO: IPv6 IP6TOS_ECT bit on */
+ error = ip6_output(m,
+ tp->t_inpcb->in6p_outputopts, NULL,
+ ((so->so_options & SO_DONTROUTE) ?
+ IP_ROUTETOIF : 0), NULL, NULL, tp->t_inpcb);
+ } else
+#endif /* INET6 */
+ {
+ ip->ip_len = m->m_pkthdr.len;
+#ifdef INET6
+ if (tp->t_inpcb->inp_vflag & INP_IPV6PROTO)
+ ip->ip_ttl = in6_selecthlim(tp->t_inpcb, NULL);
+#endif /* INET6 */
+ /*
+ * If we do path MTU discovery, then we set DF on every packet.
+ * This might not be the best thing to do according to RFC3390
+ * Section 2. However the tcp hostcache migitates the problem
+ * so it affects only the first tcp connection with a host.
+ *
+ * NB: Don't set DF on small MTU/MSS to have a safe fallback.
+ */
+ if (V_path_mtu_discovery && tp->t_maxopd > V_tcp_minmss)
+ ip->ip_off |= IP_DF;
+
+ error = ip_output(m, tp->t_inpcb->inp_options, NULL,
+ ((so->so_options & SO_DONTROUTE) ? IP_ROUTETOIF : 0), 0,
+ tp->t_inpcb);
+ }
+ if (error) {
+
+ /*
+ * We know that the packet was lost, so back out the
+ * sequence number advance, if any.
+ *
+ * If the error is EPERM the packet got blocked by the
+ * local firewall. Normally we should terminate the
+ * connection but the blocking may have been spurious
+ * due to a firewall reconfiguration cycle. So we treat
+ * it like a packet loss and let the retransmit timer and
+ * timeouts do their work over time.
+ * XXX: It is a POLA question whether calling tcp_drop right
+ * away would be the really correct behavior instead.
+ */
+ if (((tp->t_flags & TF_FORCEDATA) == 0 ||
+ !tcp_timer_active(tp, TT_PERSIST)) &&
+ ((flags & TH_SYN) == 0) &&
+ (error != EPERM)) {
+ if (sack_rxmit) {
+ p->rxmit -= len;
+ tp->sackhint.sack_bytes_rexmit -= len;
+ KASSERT(tp->sackhint.sack_bytes_rexmit >= 0,
+ ("sackhint bytes rtx >= 0"));
+ } else
+ tp->snd_nxt -= len;
+ }
+out:
+ SOCKBUF_UNLOCK_ASSERT(&so->so_snd); /* Check gotos. */
+ switch (error) {
+ case EPERM:
+ tp->t_softerror = error;
+ return (error);
+ case ENOBUFS:
+ if (!tcp_timer_active(tp, TT_REXMT) &&
+ !tcp_timer_active(tp, TT_PERSIST))
+ tcp_timer_activate(tp, TT_REXMT, tp->t_rxtcur);
+ tp->snd_cwnd = tp->t_maxseg;
+ return (0);
+ case EMSGSIZE:
+ /*
+ * For some reason the interface we used initially
+ * to send segments changed to another or lowered
+ * its MTU.
+ *
+ * tcp_mtudisc() will find out the new MTU and as
+ * its last action, initiate retransmission, so it
+ * is important to not do so here.
+ *
+ * If TSO was active we either got an interface
+ * without TSO capabilits or TSO was turned off.
+ * Disable it for this connection as too and
+ * immediatly retry with MSS sized segments generated
+ * by this function.
+ */
+ if (tso)
+ tp->t_flags &= ~TF_TSO;
+ tcp_mtudisc(tp->t_inpcb, 0);
+ return (0);
+ case EHOSTDOWN:
+ case EHOSTUNREACH:
+ case ENETDOWN:
+ case ENETUNREACH:
+ if (TCPS_HAVERCVDSYN(tp->t_state)) {
+ tp->t_softerror = error;
+ return (0);
+ }
+ /* FALLTHROUGH */
+ default:
+ return (error);
+ }
+ }
+ TCPSTAT_INC(tcps_sndtotal);
+
+ /*
+ * Data sent (as far as we can tell).
+ * If this advertises a larger window than any other segment,
+ * then remember the size of the advertised window.
+ * Any pending ACK has now been sent.
+ */
+ if (recwin > 0 && SEQ_GT(tp->rcv_nxt + recwin, tp->rcv_adv))
+ tp->rcv_adv = tp->rcv_nxt + recwin;
+ tp->last_ack_sent = tp->rcv_nxt;
+ tp->t_flags &= ~(TF_ACKNOW | TF_DELACK);
+ if (tcp_timer_active(tp, TT_DELACK))
+ tcp_timer_activate(tp, TT_DELACK, 0);
+#if 0
+ /*
+ * This completely breaks TCP if newreno is turned on. What happens
+ * is that if delayed-acks are turned on on the receiver, this code
+ * on the transmitter effectively destroys the TCP window, forcing
+ * it to four packets (1.5Kx4 = 6K window).
+ */
+ if (sendalot && (!V_tcp_do_newreno || --maxburst))
+ goto again;
+#endif
+ if (sendalot)
+ goto again;
+ return (0);
+}
+
+void
+tcp_setpersist(struct tcpcb *tp)
+{
+ int t = ((tp->t_srtt >> 2) + tp->t_rttvar) >> 1;
+ int tt;
+
+ if (tcp_timer_active(tp, TT_REXMT))
+ panic("tcp_setpersist: retransmit pending");
+ /*
+ * Start/restart persistance timer.
+ */
+ TCPT_RANGESET(tt, t * tcp_backoff[tp->t_rxtshift],
+ TCPTV_PERSMIN, TCPTV_PERSMAX);
+ tcp_timer_activate(tp, TT_PERSIST, tt);
+ if (tp->t_rxtshift < TCP_MAXRXTSHIFT)
+ tp->t_rxtshift++;
+}
+
+/*
+ * Insert TCP options according to the supplied parameters to the place
+ * optp in a consistent way. Can handle unaligned destinations.
+ *
+ * The order of the option processing is crucial for optimal packing and
+ * alignment for the scarce option space.
+ *
+ * The optimal order for a SYN/SYN-ACK segment is:
+ * MSS (4) + NOP (1) + Window scale (3) + SACK permitted (2) +
+ * Timestamp (10) + Signature (18) = 38 bytes out of a maximum of 40.
+ *
+ * The SACK options should be last. SACK blocks consume 8*n+2 bytes.
+ * So a full size SACK blocks option is 34 bytes (with 4 SACK blocks).
+ * At minimum we need 10 bytes (to generate 1 SACK block). If both
+ * TCP Timestamps (12 bytes) and TCP Signatures (18 bytes) are present,
+ * we only have 10 bytes for SACK options (40 - (12 + 18)).
+ */
+int
+tcp_addoptions(struct tcpopt *to, u_char *optp)
+{
+ u_int mask, optlen = 0;
+
+ for (mask = 1; mask < TOF_MAXOPT; mask <<= 1) {
+ if ((to->to_flags & mask) != mask)
+ continue;
+ if (optlen == TCP_MAXOLEN)
+ break;
+ switch (to->to_flags & mask) {
+ case TOF_MSS:
+ while (optlen % 4) {
+ optlen += TCPOLEN_NOP;
+ *optp++ = TCPOPT_NOP;
+ }
+ if (TCP_MAXOLEN - optlen < TCPOLEN_MAXSEG)
+ continue;
+ optlen += TCPOLEN_MAXSEG;
+ *optp++ = TCPOPT_MAXSEG;
+ *optp++ = TCPOLEN_MAXSEG;
+ to->to_mss = htons(to->to_mss);
+ bcopy((u_char *)&to->to_mss, optp, sizeof(to->to_mss));
+ optp += sizeof(to->to_mss);
+ break;
+ case TOF_SCALE:
+ while (!optlen || optlen % 2 != 1) {
+ optlen += TCPOLEN_NOP;
+ *optp++ = TCPOPT_NOP;
+ }
+ if (TCP_MAXOLEN - optlen < TCPOLEN_WINDOW)
+ continue;
+ optlen += TCPOLEN_WINDOW;
+ *optp++ = TCPOPT_WINDOW;
+ *optp++ = TCPOLEN_WINDOW;
+ *optp++ = to->to_wscale;
+ break;
+ case TOF_SACKPERM:
+ while (optlen % 2) {
+ optlen += TCPOLEN_NOP;
+ *optp++ = TCPOPT_NOP;
+ }
+ if (TCP_MAXOLEN - optlen < TCPOLEN_SACK_PERMITTED)
+ continue;
+ optlen += TCPOLEN_SACK_PERMITTED;
+ *optp++ = TCPOPT_SACK_PERMITTED;
+ *optp++ = TCPOLEN_SACK_PERMITTED;
+ break;
+ case TOF_TS:
+ while (!optlen || optlen % 4 != 2) {
+ optlen += TCPOLEN_NOP;
+ *optp++ = TCPOPT_NOP;
+ }
+ if (TCP_MAXOLEN - optlen < TCPOLEN_TIMESTAMP)
+ continue;
+ optlen += TCPOLEN_TIMESTAMP;
+ *optp++ = TCPOPT_TIMESTAMP;
+ *optp++ = TCPOLEN_TIMESTAMP;
+ to->to_tsval = htonl(to->to_tsval);
+ to->to_tsecr = htonl(to->to_tsecr);
+ bcopy((u_char *)&to->to_tsval, optp, sizeof(to->to_tsval));
+ optp += sizeof(to->to_tsval);
+ bcopy((u_char *)&to->to_tsecr, optp, sizeof(to->to_tsecr));
+ optp += sizeof(to->to_tsecr);
+ break;
+ case TOF_SIGNATURE:
+ {
+ int siglen = TCPOLEN_SIGNATURE - 2;
+
+ while (!optlen || optlen % 4 != 2) {
+ optlen += TCPOLEN_NOP;
+ *optp++ = TCPOPT_NOP;
+ }
+ if (TCP_MAXOLEN - optlen < TCPOLEN_SIGNATURE)
+ continue;
+ optlen += TCPOLEN_SIGNATURE;
+ *optp++ = TCPOPT_SIGNATURE;
+ *optp++ = TCPOLEN_SIGNATURE;
+ to->to_signature = optp;
+ while (siglen--)
+ *optp++ = 0;
+ break;
+ }
+ case TOF_SACK:
+ {
+ int sackblks = 0;
+ struct sackblk *sack = (struct sackblk *)to->to_sacks;
+ tcp_seq sack_seq;
+
+ while (!optlen || optlen % 4 != 2) {
+ optlen += TCPOLEN_NOP;
+ *optp++ = TCPOPT_NOP;
+ }
+ if (TCP_MAXOLEN - optlen < TCPOLEN_SACKHDR + TCPOLEN_SACK)
+ continue;
+ optlen += TCPOLEN_SACKHDR;
+ *optp++ = TCPOPT_SACK;
+ sackblks = min(to->to_nsacks,
+ (TCP_MAXOLEN - optlen) / TCPOLEN_SACK);
+ *optp++ = TCPOLEN_SACKHDR + sackblks * TCPOLEN_SACK;
+ while (sackblks--) {
+ sack_seq = htonl(sack->start);
+ bcopy((u_char *)&sack_seq, optp, sizeof(sack_seq));
+ optp += sizeof(sack_seq);
+ sack_seq = htonl(sack->end);
+ bcopy((u_char *)&sack_seq, optp, sizeof(sack_seq));
+ optp += sizeof(sack_seq);
+ optlen += TCPOLEN_SACK;
+ sack++;
+ }
+ TCPSTAT_INC(tcps_sack_send_blocks);
+ break;
+ }
+ default:
+ panic("%s: unknown TCP option type", __func__);
+ break;
+ }
+ }
+
+ /* Terminate and pad TCP options to a 4 byte boundary. */
+ if (optlen % 4) {
+ optlen += TCPOLEN_EOL;
+ *optp++ = TCPOPT_EOL;
+ }
+ /*
+ * According to RFC 793 (STD0007):
+ * "The content of the header beyond the End-of-Option option
+ * must be header padding (i.e., zero)."
+ * and later: "The padding is composed of zeros."
+ */
+ while (optlen % 4) {
+ optlen += TCPOLEN_PAD;
+ *optp++ = TCPOPT_PAD;
+ }
+
+ KASSERT(optlen <= TCP_MAXOLEN, ("%s: TCP options too long", __func__));
+ return (optlen);
+}
diff --git a/rtems/freebsd/netinet/tcp_reass.c b/rtems/freebsd/netinet/tcp_reass.c
new file mode 100644
index 00000000..bf80443c
--- /dev/null
+++ b/rtems/freebsd/netinet/tcp_reass.c
@@ -0,0 +1,335 @@
+#include <rtems/freebsd/machine/rtems-bsd-config.h>
+
+/*-
+ * Copyright (c) 1982, 1986, 1988, 1990, 1993, 1994, 1995
+ * The Regents of the University of California. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * @(#)tcp_input.c 8.12 (Berkeley) 5/24/95
+ */
+
+#include <rtems/freebsd/sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <rtems/freebsd/local/opt_inet.h>
+#include <rtems/freebsd/local/opt_inet6.h>
+#include <rtems/freebsd/local/opt_tcpdebug.h>
+
+#include <rtems/freebsd/sys/param.h>
+#include <rtems/freebsd/sys/kernel.h>
+#include <rtems/freebsd/sys/malloc.h>
+#include <rtems/freebsd/sys/mbuf.h>
+#include <rtems/freebsd/sys/socket.h>
+#include <rtems/freebsd/sys/socketvar.h>
+#include <rtems/freebsd/sys/sysctl.h>
+#include <rtems/freebsd/sys/syslog.h>
+#include <rtems/freebsd/sys/systm.h>
+
+#include <rtems/freebsd/vm/uma.h>
+
+#include <rtems/freebsd/net/if.h>
+#include <rtems/freebsd/net/route.h>
+#include <rtems/freebsd/net/vnet.h>
+
+#include <rtems/freebsd/netinet/in.h>
+#include <rtems/freebsd/netinet/in_pcb.h>
+#include <rtems/freebsd/netinet/in_systm.h>
+#include <rtems/freebsd/netinet/in_var.h>
+#include <rtems/freebsd/netinet/ip.h>
+#include <rtems/freebsd/netinet/ip_var.h>
+#include <rtems/freebsd/netinet/ip_options.h>
+#include <rtems/freebsd/netinet/ip6.h>
+#include <rtems/freebsd/netinet6/in6_pcb.h>
+#include <rtems/freebsd/netinet6/ip6_var.h>
+#include <rtems/freebsd/netinet6/nd6.h>
+#include <rtems/freebsd/netinet/tcp.h>
+#include <rtems/freebsd/netinet/tcp_fsm.h>
+#include <rtems/freebsd/netinet/tcp_seq.h>
+#include <rtems/freebsd/netinet/tcp_timer.h>
+#include <rtems/freebsd/netinet/tcp_var.h>
+#include <rtems/freebsd/netinet6/tcp6_var.h>
+#include <rtems/freebsd/netinet/tcpip.h>
+#ifdef TCPDEBUG
+#include <rtems/freebsd/netinet/tcp_debug.h>
+#endif /* TCPDEBUG */
+
+static int tcp_reass_sysctl_maxseg(SYSCTL_HANDLER_ARGS);
+static int tcp_reass_sysctl_qsize(SYSCTL_HANDLER_ARGS);
+
+SYSCTL_NODE(_net_inet_tcp, OID_AUTO, reass, CTLFLAG_RW, 0,
+ "TCP Segment Reassembly Queue");
+
+static VNET_DEFINE(int, tcp_reass_maxseg) = 0;
+#define V_tcp_reass_maxseg VNET(tcp_reass_maxseg)
+SYSCTL_VNET_PROC(_net_inet_tcp_reass, OID_AUTO, maxsegments, CTLFLAG_RDTUN,
+ &VNET_NAME(tcp_reass_maxseg), 0, &tcp_reass_sysctl_maxseg, "I",
+ "Global maximum number of TCP Segments in Reassembly Queue");
+
+static VNET_DEFINE(int, tcp_reass_qsize) = 0;
+#define V_tcp_reass_qsize VNET(tcp_reass_qsize)
+SYSCTL_VNET_PROC(_net_inet_tcp_reass, OID_AUTO, cursegments, CTLFLAG_RD,
+ &VNET_NAME(tcp_reass_qsize), 0, &tcp_reass_sysctl_qsize, "I",
+ "Global number of TCP Segments currently in Reassembly Queue");
+
+static VNET_DEFINE(int, tcp_reass_overflows) = 0;
+#define V_tcp_reass_overflows VNET(tcp_reass_overflows)
+SYSCTL_VNET_INT(_net_inet_tcp_reass, OID_AUTO, overflows, CTLFLAG_RD,
+ &VNET_NAME(tcp_reass_overflows), 0,
+ "Global number of TCP Segment Reassembly Queue Overflows");
+
+static VNET_DEFINE(uma_zone_t, tcp_reass_zone);
+#define V_tcp_reass_zone VNET(tcp_reass_zone)
+
+/* Initialize TCP reassembly queue */
+static void
+tcp_reass_zone_change(void *tag)
+{
+
+ V_tcp_reass_maxseg = nmbclusters / 16;
+ uma_zone_set_max(V_tcp_reass_zone, V_tcp_reass_maxseg);
+}
+
+void
+tcp_reass_init(void)
+{
+
+ V_tcp_reass_maxseg = nmbclusters / 16;
+ TUNABLE_INT_FETCH("net.inet.tcp.reass.maxsegments",
+ &V_tcp_reass_maxseg);
+ V_tcp_reass_zone = uma_zcreate("tcpreass", sizeof (struct tseg_qent),
+ NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE);
+ uma_zone_set_max(V_tcp_reass_zone, V_tcp_reass_maxseg);
+ EVENTHANDLER_REGISTER(nmbclusters_change,
+ tcp_reass_zone_change, NULL, EVENTHANDLER_PRI_ANY);
+}
+
+#ifdef VIMAGE
+void
+tcp_reass_destroy(void)
+{
+
+ uma_zdestroy(V_tcp_reass_zone);
+}
+#endif
+
+void
+tcp_reass_flush(struct tcpcb *tp)
+{
+ struct tseg_qent *qe;
+
+ INP_WLOCK_ASSERT(tp->t_inpcb);
+
+ while ((qe = LIST_FIRST(&tp->t_segq)) != NULL) {
+ LIST_REMOVE(qe, tqe_q);
+ m_freem(qe->tqe_m);
+ uma_zfree(V_tcp_reass_zone, qe);
+ tp->t_segqlen--;
+ }
+
+ KASSERT((tp->t_segqlen == 0),
+ ("TCP reass queue %p segment count is %d instead of 0 after flush.",
+ tp, tp->t_segqlen));
+}
+
+static int
+tcp_reass_sysctl_maxseg(SYSCTL_HANDLER_ARGS)
+{
+ V_tcp_reass_maxseg = uma_zone_get_max(V_tcp_reass_zone);
+ return (sysctl_handle_int(oidp, arg1, arg2, req));
+}
+
+static int
+tcp_reass_sysctl_qsize(SYSCTL_HANDLER_ARGS)
+{
+ V_tcp_reass_qsize = uma_zone_get_cur(V_tcp_reass_zone);
+ return (sysctl_handle_int(oidp, arg1, arg2, req));
+}
+
+int
+tcp_reass(struct tcpcb *tp, struct tcphdr *th, int *tlenp, struct mbuf *m)
+{
+ struct tseg_qent *q;
+ struct tseg_qent *p = NULL;
+ struct tseg_qent *nq;
+ struct tseg_qent *te = NULL;
+ struct socket *so = tp->t_inpcb->inp_socket;
+ int flags;
+
+ INP_WLOCK_ASSERT(tp->t_inpcb);
+
+ /*
+ * XXX: tcp_reass() is rather inefficient with its data structures
+ * and should be rewritten (see NetBSD for optimizations).
+ */
+
+ /*
+ * Call with th==NULL after become established to
+ * force pre-ESTABLISHED data up to user socket.
+ */
+ if (th == NULL)
+ goto present;
+
+ /*
+ * Limit the number of segments that can be queued to reduce the
+ * potential for mbuf exhaustion. For best performance, we want to be
+ * able to queue a full window's worth of segments. The size of the
+ * socket receive buffer determines our advertised window and grows
+ * automatically when socket buffer autotuning is enabled. Use it as the
+ * basis for our queue limit.
+ * Always let the missing segment through which caused this queue.
+ * NB: Access to the socket buffer is left intentionally unlocked as we
+ * can tolerate stale information here.
+ *
+ * XXXLAS: Using sbspace(so->so_rcv) instead of so->so_rcv.sb_hiwat
+ * should work but causes packets to be dropped when they shouldn't.
+ * Investigate why and re-evaluate the below limit after the behaviour
+ * is understood.
+ */
+ if (th->th_seq != tp->rcv_nxt &&
+ tp->t_segqlen >= (so->so_rcv.sb_hiwat / tp->t_maxseg) + 1) {
+ V_tcp_reass_overflows++;
+ TCPSTAT_INC(tcps_rcvmemdrop);
+ m_freem(m);
+ *tlenp = 0;
+ return (0);
+ }
+
+ /*
+ * Allocate a new queue entry. If we can't, or hit the zone limit
+ * just drop the pkt.
+ */
+ te = uma_zalloc(V_tcp_reass_zone, M_NOWAIT);
+ if (te == NULL) {
+ TCPSTAT_INC(tcps_rcvmemdrop);
+ m_freem(m);
+ *tlenp = 0;
+ return (0);
+ }
+ tp->t_segqlen++;
+
+ /*
+ * Find a segment which begins after this one does.
+ */
+ LIST_FOREACH(q, &tp->t_segq, tqe_q) {
+ if (SEQ_GT(q->tqe_th->th_seq, th->th_seq))
+ break;
+ p = q;
+ }
+
+ /*
+ * If there is a preceding segment, it may provide some of
+ * our data already. If so, drop the data from the incoming
+ * segment. If it provides all of our data, drop us.
+ */
+ if (p != NULL) {
+ int i;
+ /* conversion to int (in i) handles seq wraparound */
+ i = p->tqe_th->th_seq + p->tqe_len - th->th_seq;
+ if (i > 0) {
+ if (i >= *tlenp) {
+ TCPSTAT_INC(tcps_rcvduppack);
+ TCPSTAT_ADD(tcps_rcvdupbyte, *tlenp);
+ m_freem(m);
+ uma_zfree(V_tcp_reass_zone, te);
+ tp->t_segqlen--;
+ /*
+ * Try to present any queued data
+ * at the left window edge to the user.
+ * This is needed after the 3-WHS
+ * completes.
+ */
+ goto present; /* ??? */
+ }
+ m_adj(m, i);
+ *tlenp -= i;
+ th->th_seq += i;
+ }
+ }
+ TCPSTAT_INC(tcps_rcvoopack);
+ TCPSTAT_ADD(tcps_rcvoobyte, *tlenp);
+
+ /*
+ * While we overlap succeeding segments trim them or,
+ * if they are completely covered, dequeue them.
+ */
+ while (q) {
+ int i = (th->th_seq + *tlenp) - q->tqe_th->th_seq;
+ if (i <= 0)
+ break;
+ if (i < q->tqe_len) {
+ q->tqe_th->th_seq += i;
+ q->tqe_len -= i;
+ m_adj(q->tqe_m, i);
+ break;
+ }
+
+ nq = LIST_NEXT(q, tqe_q);
+ LIST_REMOVE(q, tqe_q);
+ m_freem(q->tqe_m);
+ uma_zfree(V_tcp_reass_zone, q);
+ tp->t_segqlen--;
+ q = nq;
+ }
+
+ /* Insert the new segment queue entry into place. */
+ te->tqe_m = m;
+ te->tqe_th = th;
+ te->tqe_len = *tlenp;
+
+ if (p == NULL) {
+ LIST_INSERT_HEAD(&tp->t_segq, te, tqe_q);
+ } else {
+ LIST_INSERT_AFTER(p, te, tqe_q);
+ }
+
+present:
+ /*
+ * Present data to user, advancing rcv_nxt through
+ * completed sequence space.
+ */
+ if (!TCPS_HAVEESTABLISHED(tp->t_state))
+ return (0);
+ q = LIST_FIRST(&tp->t_segq);
+ if (!q || q->tqe_th->th_seq != tp->rcv_nxt)
+ return (0);
+ SOCKBUF_LOCK(&so->so_rcv);
+ do {
+ tp->rcv_nxt += q->tqe_len;
+ flags = q->tqe_th->th_flags & TH_FIN;
+ nq = LIST_NEXT(q, tqe_q);
+ LIST_REMOVE(q, tqe_q);
+ if (so->so_rcv.sb_state & SBS_CANTRCVMORE)
+ m_freem(q->tqe_m);
+ else
+ sbappendstream_locked(&so->so_rcv, q->tqe_m);
+ uma_zfree(V_tcp_reass_zone, q);
+ tp->t_segqlen--;
+ q = nq;
+ } while (q && q->tqe_th->th_seq == tp->rcv_nxt);
+ ND6_HINT(tp);
+ sorwakeup_locked(so);
+ return (flags);
+}
diff --git a/rtems/freebsd/netinet/tcp_sack.c b/rtems/freebsd/netinet/tcp_sack.c
new file mode 100644
index 00000000..c995c270
--- /dev/null
+++ b/rtems/freebsd/netinet/tcp_sack.c
@@ -0,0 +1,687 @@
+#include <rtems/freebsd/machine/rtems-bsd-config.h>
+
+/*-
+ * Copyright (c) 1982, 1986, 1988, 1990, 1993, 1994, 1995
+ * The Regents of the University of California.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * @(#)tcp_sack.c 8.12 (Berkeley) 5/24/95
+ */
+
+/*-
+ * @@(#)COPYRIGHT 1.1 (NRL) 17 January 1995
+ *
+ * NRL grants permission for redistribution and use in source and binary
+ * forms, with or without modification, of the software and documentation
+ * created at NRL provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgements:
+ * This product includes software developed by the University of
+ * California, Berkeley and its contributors.
+ * This product includes software developed at the Information
+ * Technology Division, US Naval Research Laboratory.
+ * 4. Neither the name of the NRL nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THE SOFTWARE PROVIDED BY NRL IS PROVIDED BY NRL AND CONTRIBUTORS ``AS
+ * IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
+ * PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NRL OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+ * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+ * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * The views and conclusions contained in the software and documentation
+ * are those of the authors and should not be interpreted as representing
+ * official policies, either expressed or implied, of the US Naval
+ * Research Laboratory (NRL).
+ */
+
+#include <rtems/freebsd/sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <rtems/freebsd/local/opt_inet.h>
+#include <rtems/freebsd/local/opt_inet6.h>
+#include <rtems/freebsd/local/opt_tcpdebug.h>
+
+#include <rtems/freebsd/sys/param.h>
+#include <rtems/freebsd/sys/systm.h>
+#include <rtems/freebsd/sys/kernel.h>
+#include <rtems/freebsd/sys/sysctl.h>
+#include <rtems/freebsd/sys/malloc.h>
+#include <rtems/freebsd/sys/mbuf.h>
+#include <rtems/freebsd/sys/proc.h> /* for proc0 declaration */
+#include <rtems/freebsd/sys/protosw.h>
+#include <rtems/freebsd/sys/socket.h>
+#include <rtems/freebsd/sys/socketvar.h>
+#include <rtems/freebsd/sys/syslog.h>
+#include <rtems/freebsd/sys/systm.h>
+
+#include <rtems/freebsd/machine/cpu.h> /* before tcp_seq.h, for tcp_random18() */
+
+#include <rtems/freebsd/vm/uma.h>
+
+#include <rtems/freebsd/net/if.h>
+#include <rtems/freebsd/net/route.h>
+#include <rtems/freebsd/net/vnet.h>
+
+#include <rtems/freebsd/netinet/in.h>
+#include <rtems/freebsd/netinet/in_systm.h>
+#include <rtems/freebsd/netinet/ip.h>
+#include <rtems/freebsd/netinet/in_var.h>
+#include <rtems/freebsd/netinet/in_pcb.h>
+#include <rtems/freebsd/netinet/ip_var.h>
+#include <rtems/freebsd/netinet/ip6.h>
+#include <rtems/freebsd/netinet/icmp6.h>
+#include <rtems/freebsd/netinet6/nd6.h>
+#include <rtems/freebsd/netinet6/ip6_var.h>
+#include <rtems/freebsd/netinet6/in6_pcb.h>
+#include <rtems/freebsd/netinet/tcp.h>
+#include <rtems/freebsd/netinet/tcp_fsm.h>
+#include <rtems/freebsd/netinet/tcp_seq.h>
+#include <rtems/freebsd/netinet/tcp_timer.h>
+#include <rtems/freebsd/netinet/tcp_var.h>
+#include <rtems/freebsd/netinet6/tcp6_var.h>
+#include <rtems/freebsd/netinet/tcpip.h>
+#ifdef TCPDEBUG
+#include <rtems/freebsd/netinet/tcp_debug.h>
+#endif /* TCPDEBUG */
+
+#include <rtems/freebsd/machine/in_cksum.h>
+
+VNET_DECLARE(struct uma_zone *, sack_hole_zone);
+#define V_sack_hole_zone VNET(sack_hole_zone)
+
+SYSCTL_NODE(_net_inet_tcp, OID_AUTO, sack, CTLFLAG_RW, 0, "TCP SACK");
+VNET_DEFINE(int, tcp_do_sack) = 1;
+#define V_tcp_do_sack VNET(tcp_do_sack)
+SYSCTL_VNET_INT(_net_inet_tcp_sack, OID_AUTO, enable, CTLFLAG_RW,
+ &VNET_NAME(tcp_do_sack), 0, "Enable/Disable TCP SACK support");
+
+VNET_DEFINE(int, tcp_sack_maxholes) = 128;
+#define V_tcp_sack_maxholes VNET(tcp_sack_maxholes)
+SYSCTL_VNET_INT(_net_inet_tcp_sack, OID_AUTO, maxholes, CTLFLAG_RW,
+ &VNET_NAME(tcp_sack_maxholes), 0,
+ "Maximum number of TCP SACK holes allowed per connection");
+
+VNET_DEFINE(int, tcp_sack_globalmaxholes) = 65536;
+#define V_tcp_sack_globalmaxholes VNET(tcp_sack_globalmaxholes)
+SYSCTL_VNET_INT(_net_inet_tcp_sack, OID_AUTO, globalmaxholes, CTLFLAG_RW,
+ &VNET_NAME(tcp_sack_globalmaxholes), 0,
+ "Global maximum number of TCP SACK holes");
+
+VNET_DEFINE(int, tcp_sack_globalholes) = 0;
+#define V_tcp_sack_globalholes VNET(tcp_sack_globalholes)
+SYSCTL_VNET_INT(_net_inet_tcp_sack, OID_AUTO, globalholes, CTLFLAG_RD,
+ &VNET_NAME(tcp_sack_globalholes), 0,
+ "Global number of TCP SACK holes currently allocated");
+
+/*
+ * This function is called upon receipt of new valid data (while not in
+ * header prediction mode), and it updates the ordered list of sacks.
+ */
+void
+tcp_update_sack_list(struct tcpcb *tp, tcp_seq rcv_start, tcp_seq rcv_end)
+{
+ /*
+ * First reported block MUST be the most recent one. Subsequent
+ * blocks SHOULD be in the order in which they arrived at the
+ * receiver. These two conditions make the implementation fully
+ * compliant with RFC 2018.
+ */
+ struct sackblk head_blk, saved_blks[MAX_SACK_BLKS];
+ int num_head, num_saved, i;
+
+ INP_WLOCK_ASSERT(tp->t_inpcb);
+
+ /* Check arguments. */
+ KASSERT(SEQ_LT(rcv_start, rcv_end), ("rcv_start < rcv_end"));
+
+ /* SACK block for the received segment. */
+ head_blk.start = rcv_start;
+ head_blk.end = rcv_end;
+
+ /*
+ * Merge updated SACK blocks into head_blk, and save unchanged SACK
+ * blocks into saved_blks[]. num_saved will have the number of the
+ * saved SACK blocks.
+ */
+ num_saved = 0;
+ for (i = 0; i < tp->rcv_numsacks; i++) {
+ tcp_seq start = tp->sackblks[i].start;
+ tcp_seq end = tp->sackblks[i].end;
+ if (SEQ_GEQ(start, end) || SEQ_LEQ(start, tp->rcv_nxt)) {
+ /*
+ * Discard this SACK block.
+ */
+ } else if (SEQ_LEQ(head_blk.start, end) &&
+ SEQ_GEQ(head_blk.end, start)) {
+ /*
+ * Merge this SACK block into head_blk. This SACK
+ * block itself will be discarded.
+ */
+ if (SEQ_GT(head_blk.start, start))
+ head_blk.start = start;
+ if (SEQ_LT(head_blk.end, end))
+ head_blk.end = end;
+ } else {
+ /*
+ * Save this SACK block.
+ */
+ saved_blks[num_saved].start = start;
+ saved_blks[num_saved].end = end;
+ num_saved++;
+ }
+ }
+
+ /*
+ * Update SACK list in tp->sackblks[].
+ */
+ num_head = 0;
+ if (SEQ_GT(head_blk.start, tp->rcv_nxt)) {
+ /*
+ * The received data segment is an out-of-order segment. Put
+ * head_blk at the top of SACK list.
+ */
+ tp->sackblks[0] = head_blk;
+ num_head = 1;
+ /*
+ * If the number of saved SACK blocks exceeds its limit,
+ * discard the last SACK block.
+ */
+ if (num_saved >= MAX_SACK_BLKS)
+ num_saved--;
+ }
+ if (num_saved > 0) {
+ /*
+ * Copy the saved SACK blocks back.
+ */
+ bcopy(saved_blks, &tp->sackblks[num_head],
+ sizeof(struct sackblk) * num_saved);
+ }
+
+ /* Save the number of SACK blocks. */
+ tp->rcv_numsacks = num_head + num_saved;
+}
+
+/*
+ * Delete all receiver-side SACK information.
+ */
+void
+tcp_clean_sackreport(struct tcpcb *tp)
+{
+ int i;
+
+ INP_WLOCK_ASSERT(tp->t_inpcb);
+ tp->rcv_numsacks = 0;
+ for (i = 0; i < MAX_SACK_BLKS; i++)
+ tp->sackblks[i].start = tp->sackblks[i].end=0;
+}
+
+/*
+ * Allocate struct sackhole.
+ */
+static struct sackhole *
+tcp_sackhole_alloc(struct tcpcb *tp, tcp_seq start, tcp_seq end)
+{
+ struct sackhole *hole;
+
+ if (tp->snd_numholes >= V_tcp_sack_maxholes ||
+ V_tcp_sack_globalholes >= V_tcp_sack_globalmaxholes) {
+ TCPSTAT_INC(tcps_sack_sboverflow);
+ return NULL;
+ }
+
+ hole = (struct sackhole *)uma_zalloc(V_sack_hole_zone, M_NOWAIT);
+ if (hole == NULL)
+ return NULL;
+
+ hole->start = start;
+ hole->end = end;
+ hole->rxmit = start;
+
+ tp->snd_numholes++;
+ atomic_add_int(&V_tcp_sack_globalholes, 1);
+
+ return hole;
+}
+
+/*
+ * Free struct sackhole.
+ */
+static void
+tcp_sackhole_free(struct tcpcb *tp, struct sackhole *hole)
+{
+
+ uma_zfree(V_sack_hole_zone, hole);
+
+ tp->snd_numholes--;
+ atomic_subtract_int(&V_tcp_sack_globalholes, 1);
+
+ KASSERT(tp->snd_numholes >= 0, ("tp->snd_numholes >= 0"));
+ KASSERT(V_tcp_sack_globalholes >= 0, ("tcp_sack_globalholes >= 0"));
+}
+
+/*
+ * Insert new SACK hole into scoreboard.
+ */
+static struct sackhole *
+tcp_sackhole_insert(struct tcpcb *tp, tcp_seq start, tcp_seq end,
+ struct sackhole *after)
+{
+ struct sackhole *hole;
+
+ /* Allocate a new SACK hole. */
+ hole = tcp_sackhole_alloc(tp, start, end);
+ if (hole == NULL)
+ return NULL;
+
+ /* Insert the new SACK hole into scoreboard. */
+ if (after != NULL)
+ TAILQ_INSERT_AFTER(&tp->snd_holes, after, hole, scblink);
+ else
+ TAILQ_INSERT_TAIL(&tp->snd_holes, hole, scblink);
+
+ /* Update SACK hint. */
+ if (tp->sackhint.nexthole == NULL)
+ tp->sackhint.nexthole = hole;
+
+ return hole;
+}
+
+/*
+ * Remove SACK hole from scoreboard.
+ */
+static void
+tcp_sackhole_remove(struct tcpcb *tp, struct sackhole *hole)
+{
+
+ /* Update SACK hint. */
+ if (tp->sackhint.nexthole == hole)
+ tp->sackhint.nexthole = TAILQ_NEXT(hole, scblink);
+
+ /* Remove this SACK hole. */
+ TAILQ_REMOVE(&tp->snd_holes, hole, scblink);
+
+ /* Free this SACK hole. */
+ tcp_sackhole_free(tp, hole);
+}
+
+/*
+ * Process cumulative ACK and the TCP SACK option to update the scoreboard.
+ * tp->snd_holes is an ordered list of holes (oldest to newest, in terms of
+ * the sequence space).
+ */
+void
+tcp_sack_doack(struct tcpcb *tp, struct tcpopt *to, tcp_seq th_ack)
+{
+ struct sackhole *cur, *temp;
+ struct sackblk sack, sack_blocks[TCP_MAX_SACK + 1], *sblkp;
+ int i, j, num_sack_blks;
+
+ INP_WLOCK_ASSERT(tp->t_inpcb);
+
+ num_sack_blks = 0;
+ /*
+ * If SND.UNA will be advanced by SEG.ACK, and if SACK holes exist,
+ * treat [SND.UNA, SEG.ACK) as if it is a SACK block.
+ */
+ if (SEQ_LT(tp->snd_una, th_ack) && !TAILQ_EMPTY(&tp->snd_holes)) {
+ sack_blocks[num_sack_blks].start = tp->snd_una;
+ sack_blocks[num_sack_blks++].end = th_ack;
+ }
+ /*
+ * Append received valid SACK blocks to sack_blocks[], but only if we
+ * received new blocks from the other side.
+ */
+ if (to->to_flags & TOF_SACK) {
+ for (i = 0; i < to->to_nsacks; i++) {
+ bcopy((to->to_sacks + i * TCPOLEN_SACK),
+ &sack, sizeof(sack));
+ sack.start = ntohl(sack.start);
+ sack.end = ntohl(sack.end);
+ if (SEQ_GT(sack.end, sack.start) &&
+ SEQ_GT(sack.start, tp->snd_una) &&
+ SEQ_GT(sack.start, th_ack) &&
+ SEQ_LT(sack.start, tp->snd_max) &&
+ SEQ_GT(sack.end, tp->snd_una) &&
+ SEQ_LEQ(sack.end, tp->snd_max))
+ sack_blocks[num_sack_blks++] = sack;
+ }
+ }
+ /*
+ * Return if SND.UNA is not advanced and no valid SACK block is
+ * received.
+ */
+ if (num_sack_blks == 0)
+ return;
+
+ /*
+ * Sort the SACK blocks so we can update the scoreboard with just one
+ * pass. The overhead of sorting upto 4+1 elements is less than
+ * making upto 4+1 passes over the scoreboard.
+ */
+ for (i = 0; i < num_sack_blks; i++) {
+ for (j = i + 1; j < num_sack_blks; j++) {
+ if (SEQ_GT(sack_blocks[i].end, sack_blocks[j].end)) {
+ sack = sack_blocks[i];
+ sack_blocks[i] = sack_blocks[j];
+ sack_blocks[j] = sack;
+ }
+ }
+ }
+ if (TAILQ_EMPTY(&tp->snd_holes))
+ /*
+ * Empty scoreboard. Need to initialize snd_fack (it may be
+ * uninitialized or have a bogus value). Scoreboard holes
+ * (from the sack blocks received) are created later below
+ * (in the logic that adds holes to the tail of the
+ * scoreboard).
+ */
+ tp->snd_fack = SEQ_MAX(tp->snd_una, th_ack);
+ /*
+ * In the while-loop below, incoming SACK blocks (sack_blocks[]) and
+ * SACK holes (snd_holes) are traversed from their tails with just
+ * one pass in order to reduce the number of compares especially when
+ * the bandwidth-delay product is large.
+ *
+ * Note: Typically, in the first RTT of SACK recovery, the highest
+ * three or four SACK blocks with the same ack number are received.
+ * In the second RTT, if retransmitted data segments are not lost,
+ * the highest three or four SACK blocks with ack number advancing
+ * are received.
+ */
+ sblkp = &sack_blocks[num_sack_blks - 1]; /* Last SACK block */
+ if (SEQ_LT(tp->snd_fack, sblkp->start)) {
+ /*
+ * The highest SACK block is beyond fack. Append new SACK
+ * hole at the tail. If the second or later highest SACK
+ * blocks are also beyond the current fack, they will be
+ * inserted by way of hole splitting in the while-loop below.
+ */
+ temp = tcp_sackhole_insert(tp, tp->snd_fack,sblkp->start,NULL);
+ if (temp != NULL) {
+ tp->snd_fack = sblkp->end;
+ /* Go to the previous sack block. */
+ sblkp--;
+ } else {
+ /*
+ * We failed to add a new hole based on the current
+ * sack block. Skip over all the sack blocks that
+ * fall completely to the right of snd_fack and
+ * proceed to trim the scoreboard based on the
+ * remaining sack blocks. This also trims the
+ * scoreboard for th_ack (which is sack_blocks[0]).
+ */
+ while (sblkp >= sack_blocks &&
+ SEQ_LT(tp->snd_fack, sblkp->start))
+ sblkp--;
+ if (sblkp >= sack_blocks &&
+ SEQ_LT(tp->snd_fack, sblkp->end))
+ tp->snd_fack = sblkp->end;
+ }
+ } else if (SEQ_LT(tp->snd_fack, sblkp->end))
+ /* fack is advanced. */
+ tp->snd_fack = sblkp->end;
+ /* We must have at least one SACK hole in scoreboard. */
+ KASSERT(!TAILQ_EMPTY(&tp->snd_holes),
+ ("SACK scoreboard must not be empty"));
+ cur = TAILQ_LAST(&tp->snd_holes, sackhole_head); /* Last SACK hole. */
+ /*
+ * Since the incoming sack blocks are sorted, we can process them
+ * making one sweep of the scoreboard.
+ */
+ while (sblkp >= sack_blocks && cur != NULL) {
+ if (SEQ_GEQ(sblkp->start, cur->end)) {
+ /*
+ * SACKs data beyond the current hole. Go to the
+ * previous sack block.
+ */
+ sblkp--;
+ continue;
+ }
+ if (SEQ_LEQ(sblkp->end, cur->start)) {
+ /*
+ * SACKs data before the current hole. Go to the
+ * previous hole.
+ */
+ cur = TAILQ_PREV(cur, sackhole_head, scblink);
+ continue;
+ }
+ tp->sackhint.sack_bytes_rexmit -= (cur->rxmit - cur->start);
+ KASSERT(tp->sackhint.sack_bytes_rexmit >= 0,
+ ("sackhint bytes rtx >= 0"));
+ if (SEQ_LEQ(sblkp->start, cur->start)) {
+ /* Data acks at least the beginning of hole. */
+ if (SEQ_GEQ(sblkp->end, cur->end)) {
+ /* Acks entire hole, so delete hole. */
+ temp = cur;
+ cur = TAILQ_PREV(cur, sackhole_head, scblink);
+ tcp_sackhole_remove(tp, temp);
+ /*
+ * The sack block may ack all or part of the
+ * next hole too, so continue onto the next
+ * hole.
+ */
+ continue;
+ } else {
+ /* Move start of hole forward. */
+ cur->start = sblkp->end;
+ cur->rxmit = SEQ_MAX(cur->rxmit, cur->start);
+ }
+ } else {
+ /* Data acks at least the end of hole. */
+ if (SEQ_GEQ(sblkp->end, cur->end)) {
+ /* Move end of hole backward. */
+ cur->end = sblkp->start;
+ cur->rxmit = SEQ_MIN(cur->rxmit, cur->end);
+ } else {
+ /*
+ * ACKs some data in middle of a hole; need
+ * to split current hole
+ */
+ temp = tcp_sackhole_insert(tp, sblkp->end,
+ cur->end, cur);
+ if (temp != NULL) {
+ if (SEQ_GT(cur->rxmit, temp->rxmit)) {
+ temp->rxmit = cur->rxmit;
+ tp->sackhint.sack_bytes_rexmit
+ += (temp->rxmit
+ - temp->start);
+ }
+ cur->end = sblkp->start;
+ cur->rxmit = SEQ_MIN(cur->rxmit,
+ cur->end);
+ }
+ }
+ }
+ tp->sackhint.sack_bytes_rexmit += (cur->rxmit - cur->start);
+ /*
+ * Testing sblkp->start against cur->start tells us whether
+ * we're done with the sack block or the sack hole.
+ * Accordingly, we advance one or the other.
+ */
+ if (SEQ_LEQ(sblkp->start, cur->start))
+ cur = TAILQ_PREV(cur, sackhole_head, scblink);
+ else
+ sblkp--;
+ }
+}
+
+/*
+ * Free all SACK holes to clear the scoreboard.
+ */
+void
+tcp_free_sackholes(struct tcpcb *tp)
+{
+ struct sackhole *q;
+
+ INP_WLOCK_ASSERT(tp->t_inpcb);
+ while ((q = TAILQ_FIRST(&tp->snd_holes)) != NULL)
+ tcp_sackhole_remove(tp, q);
+ tp->sackhint.sack_bytes_rexmit = 0;
+
+ KASSERT(tp->snd_numholes == 0, ("tp->snd_numholes == 0"));
+ KASSERT(tp->sackhint.nexthole == NULL,
+ ("tp->sackhint.nexthole == NULL"));
+}
+
+/*
+ * Partial ack handling within a sack recovery episode. Keeping this very
+ * simple for now. When a partial ack is received, force snd_cwnd to a value
+ * that will allow the sender to transmit no more than 2 segments. If
+ * necessary, a better scheme can be adopted at a later point, but for now,
+ * the goal is to prevent the sender from bursting a large amount of data in
+ * the midst of sack recovery.
+ */
+void
+tcp_sack_partialack(struct tcpcb *tp, struct tcphdr *th)
+{
+ int num_segs = 1;
+
+ INP_WLOCK_ASSERT(tp->t_inpcb);
+ tcp_timer_activate(tp, TT_REXMT, 0);
+ tp->t_rtttime = 0;
+ /* Send one or 2 segments based on how much new data was acked. */
+ if (((th->th_ack - tp->snd_una) / tp->t_maxseg) > 2)
+ num_segs = 2;
+ tp->snd_cwnd = (tp->sackhint.sack_bytes_rexmit +
+ (tp->snd_nxt - tp->sack_newdata) + num_segs * tp->t_maxseg);
+ if (tp->snd_cwnd > tp->snd_ssthresh)
+ tp->snd_cwnd = tp->snd_ssthresh;
+ tp->t_flags |= TF_ACKNOW;
+ (void) tcp_output(tp);
+}
+
+#if 0
+/*
+ * Debug version of tcp_sack_output() that walks the scoreboard. Used for
+ * now to sanity check the hint.
+ */
+static struct sackhole *
+tcp_sack_output_debug(struct tcpcb *tp, int *sack_bytes_rexmt)
+{
+ struct sackhole *p;
+
+ INP_WLOCK_ASSERT(tp->t_inpcb);
+ *sack_bytes_rexmt = 0;
+ TAILQ_FOREACH(p, &tp->snd_holes, scblink) {
+ if (SEQ_LT(p->rxmit, p->end)) {
+ if (SEQ_LT(p->rxmit, tp->snd_una)) {/* old SACK hole */
+ continue;
+ }
+ *sack_bytes_rexmt += (p->rxmit - p->start);
+ break;
+ }
+ *sack_bytes_rexmt += (p->rxmit - p->start);
+ }
+ return (p);
+}
+#endif
+
+/*
+ * Returns the next hole to retransmit and the number of retransmitted bytes
+ * from the scoreboard. We store both the next hole and the number of
+ * retransmitted bytes as hints (and recompute these on the fly upon SACK/ACK
+ * reception). This avoids scoreboard traversals completely.
+ *
+ * The loop here will traverse *at most* one link. Here's the argument. For
+ * the loop to traverse more than 1 link before finding the next hole to
+ * retransmit, we would need to have at least 1 node following the current
+ * hint with (rxmit == end). But, for all holes following the current hint,
+ * (start == rxmit), since we have not yet retransmitted from them.
+ * Therefore, in order to traverse more 1 link in the loop below, we need to
+ * have at least one node following the current hint with (start == rxmit ==
+ * end). But that can't happen, (start == end) means that all the data in
+ * that hole has been sacked, in which case, the hole would have been removed
+ * from the scoreboard.
+ */
+struct sackhole *
+tcp_sack_output(struct tcpcb *tp, int *sack_bytes_rexmt)
+{
+ struct sackhole *hole = NULL;
+
+ INP_WLOCK_ASSERT(tp->t_inpcb);
+ *sack_bytes_rexmt = tp->sackhint.sack_bytes_rexmit;
+ hole = tp->sackhint.nexthole;
+ if (hole == NULL || SEQ_LT(hole->rxmit, hole->end))
+ goto out;
+ while ((hole = TAILQ_NEXT(hole, scblink)) != NULL) {
+ if (SEQ_LT(hole->rxmit, hole->end)) {
+ tp->sackhint.nexthole = hole;
+ break;
+ }
+ }
+out:
+ return (hole);
+}
+
+/*
+ * After a timeout, the SACK list may be rebuilt. This SACK information
+ * should be used to avoid retransmitting SACKed data. This function
+ * traverses the SACK list to see if snd_nxt should be moved forward.
+ */
+void
+tcp_sack_adjust(struct tcpcb *tp)
+{
+ struct sackhole *p, *cur = TAILQ_FIRST(&tp->snd_holes);
+
+ INP_WLOCK_ASSERT(tp->t_inpcb);
+ if (cur == NULL)
+ return; /* No holes */
+ if (SEQ_GEQ(tp->snd_nxt, tp->snd_fack))
+ return; /* We're already beyond any SACKed blocks */
+ /*-
+ * Two cases for which we want to advance snd_nxt:
+ * i) snd_nxt lies between end of one hole and beginning of another
+ * ii) snd_nxt lies between end of last hole and snd_fack
+ */
+ while ((p = TAILQ_NEXT(cur, scblink)) != NULL) {
+ if (SEQ_LT(tp->snd_nxt, cur->end))
+ return;
+ if (SEQ_GEQ(tp->snd_nxt, p->start))
+ cur = p;
+ else {
+ tp->snd_nxt = p->start;
+ return;
+ }
+ }
+ if (SEQ_LT(tp->snd_nxt, cur->end))
+ return;
+ tp->snd_nxt = tp->snd_fack;
+}
diff --git a/rtems/freebsd/netinet/tcp_seq.h b/rtems/freebsd/netinet/tcp_seq.h
new file mode 100644
index 00000000..8af7b0ab
--- /dev/null
+++ b/rtems/freebsd/netinet/tcp_seq.h
@@ -0,0 +1,68 @@
+/*-
+ * Copyright (c) 1982, 1986, 1993, 1995
+ * The Regents of the University of California. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * @(#)tcp_seq.h 8.3 (Berkeley) 6/21/95
+ * $FreeBSD$
+ */
+
+#ifndef _NETINET_TCP_SEQ_HH_
+#define _NETINET_TCP_SEQ_HH_
+/*
+ * TCP sequence numbers are 32 bit integers operated
+ * on with modular arithmetic. These macros can be
+ * used to compare such integers.
+ */
+#define SEQ_LT(a,b) ((int)((a)-(b)) < 0)
+#define SEQ_LEQ(a,b) ((int)((a)-(b)) <= 0)
+#define SEQ_GT(a,b) ((int)((a)-(b)) > 0)
+#define SEQ_GEQ(a,b) ((int)((a)-(b)) >= 0)
+
+#define SEQ_MIN(a, b) ((SEQ_LT(a, b)) ? (a) : (b))
+#define SEQ_MAX(a, b) ((SEQ_GT(a, b)) ? (a) : (b))
+
+/* for modulo comparisons of timestamps */
+#define TSTMP_LT(a,b) ((int)((a)-(b)) < 0)
+#define TSTMP_GT(a,b) ((int)((a)-(b)) > 0)
+#define TSTMP_GEQ(a,b) ((int)((a)-(b)) >= 0)
+
+/*
+ * Macros to initialize tcp sequence numbers for
+ * send and receive from initial send and receive
+ * sequence numbers.
+ */
+#define tcp_rcvseqinit(tp) \
+ (tp)->rcv_adv = (tp)->rcv_nxt = (tp)->irs + 1
+
+#define tcp_sendseqinit(tp) \
+ (tp)->snd_una = (tp)->snd_nxt = (tp)->snd_max = (tp)->snd_up = \
+ (tp)->snd_recover = (tp)->iss
+
+#define TCP_PAWS_IDLE (24 * 24 * 60 * 60 * hz)
+ /* timestamp wrap-around time */
+
+#endif /* _NETINET_TCP_SEQ_HH_ */
diff --git a/rtems/freebsd/netinet/tcp_subr.c b/rtems/freebsd/netinet/tcp_subr.c
new file mode 100644
index 00000000..b4bdb0db
--- /dev/null
+++ b/rtems/freebsd/netinet/tcp_subr.c
@@ -0,0 +1,2315 @@
+#include <rtems/freebsd/machine/rtems-bsd-config.h>
+
+/*-
+ * Copyright (c) 1982, 1986, 1988, 1990, 1993, 1995
+ * The Regents of the University of California. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * @(#)tcp_subr.c 8.2 (Berkeley) 5/24/95
+ */
+
+#include <rtems/freebsd/sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <rtems/freebsd/local/opt_compat.h>
+#include <rtems/freebsd/local/opt_inet.h>
+#include <rtems/freebsd/local/opt_inet6.h>
+#include <rtems/freebsd/local/opt_ipsec.h>
+#include <rtems/freebsd/local/opt_tcpdebug.h>
+
+#include <rtems/freebsd/sys/param.h>
+#include <rtems/freebsd/sys/systm.h>
+#include <rtems/freebsd/sys/callout.h>
+#include <rtems/freebsd/sys/kernel.h>
+#include <rtems/freebsd/sys/sysctl.h>
+#include <rtems/freebsd/sys/jail.h>
+#include <rtems/freebsd/sys/malloc.h>
+#include <rtems/freebsd/sys/mbuf.h>
+#ifdef INET6
+#include <rtems/freebsd/sys/domain.h>
+#endif
+#include <rtems/freebsd/sys/priv.h>
+#include <rtems/freebsd/sys/proc.h>
+#include <rtems/freebsd/sys/socket.h>
+#include <rtems/freebsd/sys/socketvar.h>
+#include <rtems/freebsd/sys/protosw.h>
+#include <rtems/freebsd/sys/random.h>
+
+#include <rtems/freebsd/vm/uma.h>
+
+#include <rtems/freebsd/net/route.h>
+#include <rtems/freebsd/net/if.h>
+#include <rtems/freebsd/net/vnet.h>
+
+#include <rtems/freebsd/netinet/in.h>
+#include <rtems/freebsd/netinet/in_systm.h>
+#include <rtems/freebsd/netinet/ip.h>
+#ifdef INET6
+#include <rtems/freebsd/netinet/ip6.h>
+#endif
+#include <rtems/freebsd/netinet/in_pcb.h>
+#ifdef INET6
+#include <rtems/freebsd/netinet6/in6_pcb.h>
+#endif
+#include <rtems/freebsd/netinet/in_var.h>
+#include <rtems/freebsd/netinet/ip_var.h>
+#ifdef INET6
+#include <rtems/freebsd/netinet6/ip6_var.h>
+#include <rtems/freebsd/netinet6/scope6_var.h>
+#include <rtems/freebsd/netinet6/nd6.h>
+#endif
+#include <rtems/freebsd/netinet/ip_icmp.h>
+#include <rtems/freebsd/netinet/tcp.h>
+#include <rtems/freebsd/netinet/tcp_fsm.h>
+#include <rtems/freebsd/netinet/tcp_seq.h>
+#include <rtems/freebsd/netinet/tcp_timer.h>
+#include <rtems/freebsd/netinet/tcp_var.h>
+#include <rtems/freebsd/netinet/tcp_syncache.h>
+#include <rtems/freebsd/netinet/tcp_offload.h>
+#ifdef INET6
+#include <rtems/freebsd/netinet6/tcp6_var.h>
+#endif
+#include <rtems/freebsd/netinet/tcpip.h>
+#ifdef TCPDEBUG
+#include <rtems/freebsd/netinet/tcp_debug.h>
+#endif
+#include <rtems/freebsd/netinet6/ip6protosw.h>
+
+#ifdef IPSEC
+#include <rtems/freebsd/netipsec/ipsec.h>
+#include <rtems/freebsd/netipsec/xform.h>
+#ifdef INET6
+#include <rtems/freebsd/netipsec/ipsec6.h>
+#endif
+#include <rtems/freebsd/netipsec/key.h>
+#include <rtems/freebsd/sys/syslog.h>
+#endif /*IPSEC*/
+
+#include <rtems/freebsd/machine/in_cksum.h>
+#include <rtems/freebsd/sys/md5.h>
+
+#include <rtems/freebsd/security/mac/mac_framework.h>
+
+VNET_DEFINE(int, tcp_mssdflt) = TCP_MSS;
+#ifdef INET6
+VNET_DEFINE(int, tcp_v6mssdflt) = TCP6_MSS;
+#endif
+
+static int
+sysctl_net_inet_tcp_mss_check(SYSCTL_HANDLER_ARGS)
+{
+ int error, new;
+
+ new = V_tcp_mssdflt;
+ error = sysctl_handle_int(oidp, &new, 0, req);
+ if (error == 0 && req->newptr) {
+ if (new < TCP_MINMSS)
+ error = EINVAL;
+ else
+ V_tcp_mssdflt = new;
+ }
+ return (error);
+}
+
+SYSCTL_VNET_PROC(_net_inet_tcp, TCPCTL_MSSDFLT, mssdflt,
+ CTLTYPE_INT|CTLFLAG_RW, &VNET_NAME(tcp_mssdflt), 0,
+ &sysctl_net_inet_tcp_mss_check, "I",
+ "Default TCP Maximum Segment Size");
+
+#ifdef INET6
+static int
+sysctl_net_inet_tcp_mss_v6_check(SYSCTL_HANDLER_ARGS)
+{
+ int error, new;
+
+ new = V_tcp_v6mssdflt;
+ error = sysctl_handle_int(oidp, &new, 0, req);
+ if (error == 0 && req->newptr) {
+ if (new < TCP_MINMSS)
+ error = EINVAL;
+ else
+ V_tcp_v6mssdflt = new;
+ }
+ return (error);
+}
+
+SYSCTL_VNET_PROC(_net_inet_tcp, TCPCTL_V6MSSDFLT, v6mssdflt,
+ CTLTYPE_INT|CTLFLAG_RW, &VNET_NAME(tcp_v6mssdflt), 0,
+ &sysctl_net_inet_tcp_mss_v6_check, "I",
+ "Default TCP Maximum Segment Size for IPv6");
+#endif
+
+static int
+vnet_sysctl_msec_to_ticks(SYSCTL_HANDLER_ARGS)
+{
+
+ VNET_SYSCTL_ARG(req, arg1);
+ return (sysctl_msec_to_ticks(oidp, arg1, arg2, req));
+}
+
+/*
+ * Minimum MSS we accept and use. This prevents DoS attacks where
+ * we are forced to a ridiculous low MSS like 20 and send hundreds
+ * of packets instead of one. The effect scales with the available
+ * bandwidth and quickly saturates the CPU and network interface
+ * with packet generation and sending. Set to zero to disable MINMSS
+ * checking. This setting prevents us from sending too small packets.
+ */
+VNET_DEFINE(int, tcp_minmss) = TCP_MINMSS;
+SYSCTL_VNET_INT(_net_inet_tcp, OID_AUTO, minmss, CTLFLAG_RW,
+ &VNET_NAME(tcp_minmss), 0,
+ "Minmum TCP Maximum Segment Size");
+
+VNET_DEFINE(int, tcp_do_rfc1323) = 1;
+SYSCTL_VNET_INT(_net_inet_tcp, TCPCTL_DO_RFC1323, rfc1323, CTLFLAG_RW,
+ &VNET_NAME(tcp_do_rfc1323), 0,
+ "Enable rfc1323 (high performance TCP) extensions");
+
+static int tcp_log_debug = 0;
+SYSCTL_INT(_net_inet_tcp, OID_AUTO, log_debug, CTLFLAG_RW,
+ &tcp_log_debug, 0, "Log errors caused by incoming TCP segments");
+
+static int tcp_tcbhashsize = 0;
+SYSCTL_INT(_net_inet_tcp, OID_AUTO, tcbhashsize, CTLFLAG_RDTUN,
+ &tcp_tcbhashsize, 0, "Size of TCP control-block hashtable");
+
+static int do_tcpdrain = 1;
+SYSCTL_INT(_net_inet_tcp, OID_AUTO, do_tcpdrain, CTLFLAG_RW, &do_tcpdrain, 0,
+ "Enable tcp_drain routine for extra help when low on mbufs");
+
+SYSCTL_VNET_INT(_net_inet_tcp, OID_AUTO, pcbcount, CTLFLAG_RD,
+ &VNET_NAME(tcbinfo.ipi_count), 0, "Number of active PCBs");
+
+static VNET_DEFINE(int, icmp_may_rst) = 1;
+#define V_icmp_may_rst VNET(icmp_may_rst)
+SYSCTL_VNET_INT(_net_inet_tcp, OID_AUTO, icmp_may_rst, CTLFLAG_RW,
+ &VNET_NAME(icmp_may_rst), 0,
+ "Certain ICMP unreachable messages may abort connections in SYN_SENT");
+
+static VNET_DEFINE(int, tcp_isn_reseed_interval) = 0;
+#define V_tcp_isn_reseed_interval VNET(tcp_isn_reseed_interval)
+SYSCTL_VNET_INT(_net_inet_tcp, OID_AUTO, isn_reseed_interval, CTLFLAG_RW,
+ &VNET_NAME(tcp_isn_reseed_interval), 0,
+ "Seconds between reseeding of ISN secret");
+
+/*
+ * TCP bandwidth limiting sysctls. Note that the default lower bound of
+ * 1024 exists only for debugging. A good production default would be
+ * something like 6100.
+ */
+SYSCTL_NODE(_net_inet_tcp, OID_AUTO, inflight, CTLFLAG_RW, 0,
+ "TCP inflight data limiting");
+
+static VNET_DEFINE(int, tcp_inflight_enable) = 0;
+#define V_tcp_inflight_enable VNET(tcp_inflight_enable)
+SYSCTL_VNET_INT(_net_inet_tcp_inflight, OID_AUTO, enable, CTLFLAG_RW,
+ &VNET_NAME(tcp_inflight_enable), 0,
+ "Enable automatic TCP inflight data limiting");
+
+static int tcp_inflight_debug = 0;
+SYSCTL_INT(_net_inet_tcp_inflight, OID_AUTO, debug, CTLFLAG_RW,
+ &tcp_inflight_debug, 0,
+ "Debug TCP inflight calculations");
+
+static VNET_DEFINE(int, tcp_inflight_rttthresh);
+#define V_tcp_inflight_rttthresh VNET(tcp_inflight_rttthresh)
+SYSCTL_VNET_PROC(_net_inet_tcp_inflight, OID_AUTO, rttthresh,
+ CTLTYPE_INT|CTLFLAG_RW, &VNET_NAME(tcp_inflight_rttthresh), 0,
+ vnet_sysctl_msec_to_ticks, "I",
+ "RTT threshold below which inflight will deactivate itself");
+
+static VNET_DEFINE(int, tcp_inflight_min) = 6144;
+#define V_tcp_inflight_min VNET(tcp_inflight_min)
+SYSCTL_VNET_INT(_net_inet_tcp_inflight, OID_AUTO, min, CTLFLAG_RW,
+ &VNET_NAME(tcp_inflight_min), 0,
+ "Lower-bound for TCP inflight window");
+
+static VNET_DEFINE(int, tcp_inflight_max) = TCP_MAXWIN << TCP_MAX_WINSHIFT;
+#define V_tcp_inflight_max VNET(tcp_inflight_max)
+SYSCTL_VNET_INT(_net_inet_tcp_inflight, OID_AUTO, max, CTLFLAG_RW,
+ &VNET_NAME(tcp_inflight_max), 0,
+ "Upper-bound for TCP inflight window");
+
+static VNET_DEFINE(int, tcp_inflight_stab) = 20;
+#define V_tcp_inflight_stab VNET(tcp_inflight_stab)
+SYSCTL_VNET_INT(_net_inet_tcp_inflight, OID_AUTO, stab, CTLFLAG_RW,
+ &VNET_NAME(tcp_inflight_stab), 0,
+ "Inflight Algorithm Stabilization 20 = 2 packets");
+
+VNET_DEFINE(uma_zone_t, sack_hole_zone);
+#define V_sack_hole_zone VNET(sack_hole_zone)
+
+static struct inpcb *tcp_notify(struct inpcb *, int);
+static void tcp_isn_tick(void *);
+static char * tcp_log_addr(struct in_conninfo *inc, struct tcphdr *th,
+ void *ip4hdr, const void *ip6hdr);
+
+/*
+ * Target size of TCP PCB hash tables. Must be a power of two.
+ *
+ * Note that this can be overridden by the kernel environment
+ * variable net.inet.tcp.tcbhashsize
+ */
+#ifndef TCBHASHSIZE
+#define TCBHASHSIZE 512
+#endif
+
+/*
+ * XXX
+ * Callouts should be moved into struct tcp directly. They are currently
+ * separate because the tcpcb structure is exported to userland for sysctl
+ * parsing purposes, which do not know about callouts.
+ */
+struct tcpcb_mem {
+ struct tcpcb tcb;
+ struct tcp_timer tt;
+};
+
+static VNET_DEFINE(uma_zone_t, tcpcb_zone);
+#define V_tcpcb_zone VNET(tcpcb_zone)
+
+MALLOC_DEFINE(M_TCPLOG, "tcplog", "TCP address and flags print buffers");
+struct callout isn_callout;
+static struct mtx isn_mtx;
+
+#define ISN_LOCK_INIT() mtx_init(&isn_mtx, "isn_mtx", NULL, MTX_DEF)
+#define ISN_LOCK() mtx_lock(&isn_mtx)
+#define ISN_UNLOCK() mtx_unlock(&isn_mtx)
+
+/*
+ * TCP initialization.
+ */
+static void
+tcp_zone_change(void *tag)
+{
+
+ uma_zone_set_max(V_tcbinfo.ipi_zone, maxsockets);
+ uma_zone_set_max(V_tcpcb_zone, maxsockets);
+ tcp_tw_zone_change();
+}
+
+static int
+tcp_inpcb_init(void *mem, int size, int flags)
+{
+ struct inpcb *inp = mem;
+
+ INP_LOCK_INIT(inp, "inp", "tcpinp");
+ return (0);
+}
+
+void
+tcp_init(void)
+{
+ int hashsize;
+
+ INP_INFO_LOCK_INIT(&V_tcbinfo, "tcp");
+ LIST_INIT(&V_tcb);
+#ifdef VIMAGE
+ V_tcbinfo.ipi_vnet = curvnet;
+#endif
+ V_tcbinfo.ipi_listhead = &V_tcb;
+ hashsize = TCBHASHSIZE;
+ TUNABLE_INT_FETCH("net.inet.tcp.tcbhashsize", &hashsize);
+ if (!powerof2(hashsize)) {
+ printf("WARNING: TCB hash size not a power of 2\n");
+ hashsize = 512; /* safe default */
+ }
+ V_tcbinfo.ipi_hashbase = hashinit(hashsize, M_PCB,
+ &V_tcbinfo.ipi_hashmask);
+ V_tcbinfo.ipi_porthashbase = hashinit(hashsize, M_PCB,
+ &V_tcbinfo.ipi_porthashmask);
+ V_tcbinfo.ipi_zone = uma_zcreate("tcp_inpcb", sizeof(struct inpcb),
+ NULL, NULL, tcp_inpcb_init, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE);
+ uma_zone_set_max(V_tcbinfo.ipi_zone, maxsockets);
+ V_tcp_inflight_rttthresh = TCPTV_INFLIGHT_RTTTHRESH;
+
+ /*
+ * These have to be type stable for the benefit of the timers.
+ */
+ V_tcpcb_zone = uma_zcreate("tcpcb", sizeof(struct tcpcb_mem),
+ NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE);
+ uma_zone_set_max(V_tcpcb_zone, maxsockets);
+
+ tcp_tw_init();
+ syncache_init();
+ tcp_hc_init();
+ tcp_reass_init();
+
+ TUNABLE_INT_FETCH("net.inet.tcp.sack.enable", &V_tcp_do_sack);
+ V_sack_hole_zone = uma_zcreate("sackhole", sizeof(struct sackhole),
+ NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE);
+
+ /* Skip initialization of globals for non-default instances. */
+ if (!IS_DEFAULT_VNET(curvnet))
+ return;
+
+ /* XXX virtualize those bellow? */
+ tcp_delacktime = TCPTV_DELACK;
+ tcp_keepinit = TCPTV_KEEP_INIT;
+ tcp_keepidle = TCPTV_KEEP_IDLE;
+ tcp_keepintvl = TCPTV_KEEPINTVL;
+ tcp_maxpersistidle = TCPTV_KEEP_IDLE;
+ tcp_msl = TCPTV_MSL;
+ tcp_rexmit_min = TCPTV_MIN;
+ if (tcp_rexmit_min < 1)
+ tcp_rexmit_min = 1;
+ tcp_rexmit_slop = TCPTV_CPU_VAR;
+ tcp_finwait2_timeout = TCPTV_FINWAIT2_TIMEOUT;
+ tcp_tcbhashsize = hashsize;
+
+#ifdef INET6
+#define TCP_MINPROTOHDR (sizeof(struct ip6_hdr) + sizeof(struct tcphdr))
+#else /* INET6 */
+#define TCP_MINPROTOHDR (sizeof(struct tcpiphdr))
+#endif /* INET6 */
+ if (max_protohdr < TCP_MINPROTOHDR)
+ max_protohdr = TCP_MINPROTOHDR;
+ if (max_linkhdr + TCP_MINPROTOHDR > MHLEN)
+ panic("tcp_init");
+#undef TCP_MINPROTOHDR
+
+ ISN_LOCK_INIT();
+ callout_init(&isn_callout, CALLOUT_MPSAFE);
+ callout_reset(&isn_callout, hz/100, tcp_isn_tick, NULL);
+ EVENTHANDLER_REGISTER(shutdown_pre_sync, tcp_fini, NULL,
+ SHUTDOWN_PRI_DEFAULT);
+ EVENTHANDLER_REGISTER(maxsockets_change, tcp_zone_change, NULL,
+ EVENTHANDLER_PRI_ANY);
+}
+
+#ifdef VIMAGE
+void
+tcp_destroy(void)
+{
+
+ tcp_reass_destroy();
+ tcp_hc_destroy();
+ syncache_destroy();
+ tcp_tw_destroy();
+
+ /* XXX check that hashes are empty! */
+ hashdestroy(V_tcbinfo.ipi_hashbase, M_PCB,
+ V_tcbinfo.ipi_hashmask);
+ hashdestroy(V_tcbinfo.ipi_porthashbase, M_PCB,
+ V_tcbinfo.ipi_porthashmask);
+
+ uma_zdestroy(V_sack_hole_zone);
+ uma_zdestroy(V_tcpcb_zone);
+ uma_zdestroy(V_tcbinfo.ipi_zone);
+
+ INP_INFO_LOCK_DESTROY(&V_tcbinfo);
+}
+#endif
+
+void
+tcp_fini(void *xtp)
+{
+
+ callout_stop(&isn_callout);
+}
+
+/*
+ * Fill in the IP and TCP headers for an outgoing packet, given the tcpcb.
+ * tcp_template used to store this data in mbufs, but we now recopy it out
+ * of the tcpcb each time to conserve mbufs.
+ */
+void
+tcpip_fillheaders(struct inpcb *inp, void *ip_ptr, void *tcp_ptr)
+{
+ struct tcphdr *th = (struct tcphdr *)tcp_ptr;
+
+ INP_WLOCK_ASSERT(inp);
+
+#ifdef INET6
+ if ((inp->inp_vflag & INP_IPV6) != 0) {
+ struct ip6_hdr *ip6;
+
+ ip6 = (struct ip6_hdr *)ip_ptr;
+ ip6->ip6_flow = (ip6->ip6_flow & ~IPV6_FLOWINFO_MASK) |
+ (inp->inp_flow & IPV6_FLOWINFO_MASK);
+ ip6->ip6_vfc = (ip6->ip6_vfc & ~IPV6_VERSION_MASK) |
+ (IPV6_VERSION & IPV6_VERSION_MASK);
+ ip6->ip6_nxt = IPPROTO_TCP;
+ ip6->ip6_plen = htons(sizeof(struct tcphdr));
+ ip6->ip6_src = inp->in6p_laddr;
+ ip6->ip6_dst = inp->in6p_faddr;
+ } else
+#endif
+ {
+ struct ip *ip;
+
+ ip = (struct ip *)ip_ptr;
+ ip->ip_v = IPVERSION;
+ ip->ip_hl = 5;
+ ip->ip_tos = inp->inp_ip_tos;
+ ip->ip_len = 0;
+ ip->ip_id = 0;
+ ip->ip_off = 0;
+ ip->ip_ttl = inp->inp_ip_ttl;
+ ip->ip_sum = 0;
+ ip->ip_p = IPPROTO_TCP;
+ ip->ip_src = inp->inp_laddr;
+ ip->ip_dst = inp->inp_faddr;
+ }
+ th->th_sport = inp->inp_lport;
+ th->th_dport = inp->inp_fport;
+ th->th_seq = 0;
+ th->th_ack = 0;
+ th->th_x2 = 0;
+ th->th_off = 5;
+ th->th_flags = 0;
+ th->th_win = 0;
+ th->th_urp = 0;
+ th->th_sum = 0; /* in_pseudo() is called later for ipv4 */
+}
+
+/*
+ * Create template to be used to send tcp packets on a connection.
+ * Allocates an mbuf and fills in a skeletal tcp/ip header. The only
+ * use for this function is in keepalives, which use tcp_respond.
+ */
+struct tcptemp *
+tcpip_maketemplate(struct inpcb *inp)
+{
+ struct tcptemp *t;
+
+ t = malloc(sizeof(*t), M_TEMP, M_NOWAIT);
+ if (t == NULL)
+ return (NULL);
+ tcpip_fillheaders(inp, (void *)&t->tt_ipgen, (void *)&t->tt_t);
+ return (t);
+}
+
+/*
+ * Send a single message to the TCP at address specified by
+ * the given TCP/IP header. If m == NULL, then we make a copy
+ * of the tcpiphdr at ti and send directly to the addressed host.
+ * This is used to force keep alive messages out using the TCP
+ * template for a connection. If flags are given then we send
+ * a message back to the TCP which originated the * segment ti,
+ * and discard the mbuf containing it and any other attached mbufs.
+ *
+ * In any case the ack and sequence number of the transmitted
+ * segment are as specified by the parameters.
+ *
+ * NOTE: If m != NULL, then ti must point to *inside* the mbuf.
+ */
+void
+tcp_respond(struct tcpcb *tp, void *ipgen, struct tcphdr *th, struct mbuf *m,
+ tcp_seq ack, tcp_seq seq, int flags)
+{
+ int tlen;
+ int win = 0;
+ struct ip *ip;
+ struct tcphdr *nth;
+#ifdef INET6
+ struct ip6_hdr *ip6;
+ int isipv6;
+#endif /* INET6 */
+ int ipflags = 0;
+ struct inpcb *inp;
+
+ KASSERT(tp != NULL || m != NULL, ("tcp_respond: tp and m both NULL"));
+
+#ifdef INET6
+ isipv6 = ((struct ip *)ipgen)->ip_v == 6;
+ ip6 = ipgen;
+#endif /* INET6 */
+ ip = ipgen;
+
+ if (tp != NULL) {
+ inp = tp->t_inpcb;
+ KASSERT(inp != NULL, ("tcp control block w/o inpcb"));
+ INP_WLOCK_ASSERT(inp);
+ } else
+ inp = NULL;
+
+ if (tp != NULL) {
+ if (!(flags & TH_RST)) {
+ win = sbspace(&inp->inp_socket->so_rcv);
+ if (win > (long)TCP_MAXWIN << tp->rcv_scale)
+ win = (long)TCP_MAXWIN << tp->rcv_scale;
+ }
+ }
+ if (m == NULL) {
+ m = m_gethdr(M_DONTWAIT, MT_DATA);
+ if (m == NULL)
+ return;
+ tlen = 0;
+ m->m_data += max_linkhdr;
+#ifdef INET6
+ if (isipv6) {
+ bcopy((caddr_t)ip6, mtod(m, caddr_t),
+ sizeof(struct ip6_hdr));
+ ip6 = mtod(m, struct ip6_hdr *);
+ nth = (struct tcphdr *)(ip6 + 1);
+ } else
+#endif /* INET6 */
+ {
+ bcopy((caddr_t)ip, mtod(m, caddr_t), sizeof(struct ip));
+ ip = mtod(m, struct ip *);
+ nth = (struct tcphdr *)(ip + 1);
+ }
+ bcopy((caddr_t)th, (caddr_t)nth, sizeof(struct tcphdr));
+ flags = TH_ACK;
+ } else {
+ /*
+ * reuse the mbuf.
+ * XXX MRT We inherrit the FIB, which is lucky.
+ */
+ m_freem(m->m_next);
+ m->m_next = NULL;
+ m->m_data = (caddr_t)ipgen;
+ /* m_len is set later */
+ tlen = 0;
+#define xchg(a,b,type) { type t; t=a; a=b; b=t; }
+#ifdef INET6
+ if (isipv6) {
+ xchg(ip6->ip6_dst, ip6->ip6_src, struct in6_addr);
+ nth = (struct tcphdr *)(ip6 + 1);
+ } else
+#endif /* INET6 */
+ {
+ xchg(ip->ip_dst.s_addr, ip->ip_src.s_addr, uint32_t);
+ nth = (struct tcphdr *)(ip + 1);
+ }
+ if (th != nth) {
+ /*
+ * this is usually a case when an extension header
+ * exists between the IPv6 header and the
+ * TCP header.
+ */
+ nth->th_sport = th->th_sport;
+ nth->th_dport = th->th_dport;
+ }
+ xchg(nth->th_dport, nth->th_sport, uint16_t);
+#undef xchg
+ }
+#ifdef INET6
+ if (isipv6) {
+ ip6->ip6_flow = 0;
+ ip6->ip6_vfc = IPV6_VERSION;
+ ip6->ip6_nxt = IPPROTO_TCP;
+ ip6->ip6_plen = htons((u_short)(sizeof (struct tcphdr) +
+ tlen));
+ tlen += sizeof (struct ip6_hdr) + sizeof (struct tcphdr);
+ } else
+#endif
+ {
+ tlen += sizeof (struct tcpiphdr);
+ ip->ip_len = tlen;
+ ip->ip_ttl = V_ip_defttl;
+ if (V_path_mtu_discovery)
+ ip->ip_off |= IP_DF;
+ }
+ m->m_len = tlen;
+ m->m_pkthdr.len = tlen;
+ m->m_pkthdr.rcvif = NULL;
+#ifdef MAC
+ if (inp != NULL) {
+ /*
+ * Packet is associated with a socket, so allow the
+ * label of the response to reflect the socket label.
+ */
+ INP_WLOCK_ASSERT(inp);
+ mac_inpcb_create_mbuf(inp, m);
+ } else {
+ /*
+ * Packet is not associated with a socket, so possibly
+ * update the label in place.
+ */
+ mac_netinet_tcp_reply(m);
+ }
+#endif
+ nth->th_seq = htonl(seq);
+ nth->th_ack = htonl(ack);
+ nth->th_x2 = 0;
+ nth->th_off = sizeof (struct tcphdr) >> 2;
+ nth->th_flags = flags;
+ if (tp != NULL)
+ nth->th_win = htons((u_short) (win >> tp->rcv_scale));
+ else
+ nth->th_win = htons((u_short)win);
+ nth->th_urp = 0;
+#ifdef INET6
+ if (isipv6) {
+ nth->th_sum = 0;
+ nth->th_sum = in6_cksum(m, IPPROTO_TCP,
+ sizeof(struct ip6_hdr),
+ tlen - sizeof(struct ip6_hdr));
+ ip6->ip6_hlim = in6_selecthlim(tp != NULL ? tp->t_inpcb :
+ NULL, NULL);
+ } else
+#endif /* INET6 */
+ {
+ nth->th_sum = in_pseudo(ip->ip_src.s_addr, ip->ip_dst.s_addr,
+ htons((u_short)(tlen - sizeof(struct ip) + ip->ip_p)));
+ m->m_pkthdr.csum_flags = CSUM_TCP;
+ m->m_pkthdr.csum_data = offsetof(struct tcphdr, th_sum);
+ }
+#ifdef TCPDEBUG
+ if (tp == NULL || (inp->inp_socket->so_options & SO_DEBUG))
+ tcp_trace(TA_OUTPUT, 0, tp, mtod(m, void *), th, 0);
+#endif
+#ifdef INET6
+ if (isipv6)
+ (void) ip6_output(m, NULL, NULL, ipflags, NULL, NULL, inp);
+ else
+#endif /* INET6 */
+ (void) ip_output(m, NULL, NULL, ipflags, NULL, inp);
+}
+
+/*
+ * Create a new TCP control block, making an
+ * empty reassembly queue and hooking it to the argument
+ * protocol control block. The `inp' parameter must have
+ * come from the zone allocator set up in tcp_init().
+ */
+struct tcpcb *
+tcp_newtcpcb(struct inpcb *inp)
+{
+ struct tcpcb_mem *tm;
+ struct tcpcb *tp;
+#ifdef INET6
+ int isipv6 = (inp->inp_vflag & INP_IPV6) != 0;
+#endif /* INET6 */
+
+ tm = uma_zalloc(V_tcpcb_zone, M_NOWAIT | M_ZERO);
+ if (tm == NULL)
+ return (NULL);
+ tp = &tm->tcb;
+#ifdef VIMAGE
+ tp->t_vnet = inp->inp_vnet;
+#endif
+ tp->t_timers = &tm->tt;
+ /* LIST_INIT(&tp->t_segq); */ /* XXX covered by M_ZERO */
+ tp->t_maxseg = tp->t_maxopd =
+#ifdef INET6
+ isipv6 ? V_tcp_v6mssdflt :
+#endif /* INET6 */
+ V_tcp_mssdflt;
+
+ /* Set up our timeouts. */
+ callout_init(&tp->t_timers->tt_rexmt, CALLOUT_MPSAFE);
+ callout_init(&tp->t_timers->tt_persist, CALLOUT_MPSAFE);
+ callout_init(&tp->t_timers->tt_keep, CALLOUT_MPSAFE);
+ callout_init(&tp->t_timers->tt_2msl, CALLOUT_MPSAFE);
+ callout_init(&tp->t_timers->tt_delack, CALLOUT_MPSAFE);
+
+ if (V_tcp_do_rfc1323)
+ tp->t_flags = (TF_REQ_SCALE|TF_REQ_TSTMP);
+ if (V_tcp_do_sack)
+ tp->t_flags |= TF_SACK_PERMIT;
+ TAILQ_INIT(&tp->snd_holes);
+ tp->t_inpcb = inp; /* XXX */
+ /*
+ * Init srtt to TCPTV_SRTTBASE (0), so we can tell that we have no
+ * rtt estimate. Set rttvar so that srtt + 4 * rttvar gives
+ * reasonable initial retransmit time.
+ */
+ tp->t_srtt = TCPTV_SRTTBASE;
+ tp->t_rttvar = ((TCPTV_RTOBASE - TCPTV_SRTTBASE) << TCP_RTTVAR_SHIFT) / 4;
+ tp->t_rttmin = tcp_rexmit_min;
+ tp->t_rxtcur = TCPTV_RTOBASE;
+ tp->snd_cwnd = TCP_MAXWIN << TCP_MAX_WINSHIFT;
+ tp->snd_bwnd = TCP_MAXWIN << TCP_MAX_WINSHIFT;
+ tp->snd_ssthresh = TCP_MAXWIN << TCP_MAX_WINSHIFT;
+ tp->t_rcvtime = ticks;
+ tp->t_bw_rtttime = ticks;
+ /*
+ * IPv4 TTL initialization is necessary for an IPv6 socket as well,
+ * because the socket may be bound to an IPv6 wildcard address,
+ * which may match an IPv4-mapped IPv6 address.
+ */
+ inp->inp_ip_ttl = V_ip_defttl;
+ inp->inp_ppcb = tp;
+ return (tp); /* XXX */
+}
+
+/*
+ * Drop a TCP connection, reporting
+ * the specified error. If connection is synchronized,
+ * then send a RST to peer.
+ */
+struct tcpcb *
+tcp_drop(struct tcpcb *tp, int errno)
+{
+ struct socket *so = tp->t_inpcb->inp_socket;
+
+ INP_INFO_WLOCK_ASSERT(&V_tcbinfo);
+ INP_WLOCK_ASSERT(tp->t_inpcb);
+
+ if (TCPS_HAVERCVDSYN(tp->t_state)) {
+ tp->t_state = TCPS_CLOSED;
+ (void) tcp_output_reset(tp);
+ TCPSTAT_INC(tcps_drops);
+ } else
+ TCPSTAT_INC(tcps_conndrops);
+ if (errno == ETIMEDOUT && tp->t_softerror)
+ errno = tp->t_softerror;
+ so->so_error = errno;
+ return (tcp_close(tp));
+}
+
+void
+tcp_discardcb(struct tcpcb *tp)
+{
+ struct inpcb *inp = tp->t_inpcb;
+ struct socket *so = inp->inp_socket;
+#ifdef INET6
+ int isipv6 = (inp->inp_vflag & INP_IPV6) != 0;
+#endif /* INET6 */
+
+ INP_WLOCK_ASSERT(inp);
+
+ /*
+ * Make sure that all of our timers are stopped before we
+ * delete the PCB.
+ */
+ callout_stop(&tp->t_timers->tt_rexmt);
+ callout_stop(&tp->t_timers->tt_persist);
+ callout_stop(&tp->t_timers->tt_keep);
+ callout_stop(&tp->t_timers->tt_2msl);
+ callout_stop(&tp->t_timers->tt_delack);
+
+ /*
+ * If we got enough samples through the srtt filter,
+ * save the rtt and rttvar in the routing entry.
+ * 'Enough' is arbitrarily defined as 4 rtt samples.
+ * 4 samples is enough for the srtt filter to converge
+ * to within enough % of the correct value; fewer samples
+ * and we could save a bogus rtt. The danger is not high
+ * as tcp quickly recovers from everything.
+ * XXX: Works very well but needs some more statistics!
+ */
+ if (tp->t_rttupdated >= 4) {
+ struct hc_metrics_lite metrics;
+ u_long ssthresh;
+
+ bzero(&metrics, sizeof(metrics));
+ /*
+ * Update the ssthresh always when the conditions below
+ * are satisfied. This gives us better new start value
+ * for the congestion avoidance for new connections.
+ * ssthresh is only set if packet loss occured on a session.
+ *
+ * XXXRW: 'so' may be NULL here, and/or socket buffer may be
+ * being torn down. Ideally this code would not use 'so'.
+ */
+ ssthresh = tp->snd_ssthresh;
+ if (ssthresh != 0 && ssthresh < so->so_snd.sb_hiwat / 2) {
+ /*
+ * convert the limit from user data bytes to
+ * packets then to packet data bytes.
+ */
+ ssthresh = (ssthresh + tp->t_maxseg / 2) / tp->t_maxseg;
+ if (ssthresh < 2)
+ ssthresh = 2;
+ ssthresh *= (u_long)(tp->t_maxseg +
+#ifdef INET6
+ (isipv6 ? sizeof (struct ip6_hdr) +
+ sizeof (struct tcphdr) :
+#endif
+ sizeof (struct tcpiphdr)
+#ifdef INET6
+ )
+#endif
+ );
+ } else
+ ssthresh = 0;
+ metrics.rmx_ssthresh = ssthresh;
+
+ metrics.rmx_rtt = tp->t_srtt;
+ metrics.rmx_rttvar = tp->t_rttvar;
+ /* XXX: This wraps if the pipe is more than 4 Gbit per second */
+ metrics.rmx_bandwidth = tp->snd_bandwidth;
+ metrics.rmx_cwnd = tp->snd_cwnd;
+ metrics.rmx_sendpipe = 0;
+ metrics.rmx_recvpipe = 0;
+
+ tcp_hc_update(&inp->inp_inc, &metrics);
+ }
+
+ /* free the reassembly queue, if any */
+ tcp_reass_flush(tp);
+ /* Disconnect offload device, if any. */
+ tcp_offload_detach(tp);
+
+ tcp_free_sackholes(tp);
+ inp->inp_ppcb = NULL;
+ tp->t_inpcb = NULL;
+ uma_zfree(V_tcpcb_zone, tp);
+}
+
+/*
+ * Attempt to close a TCP control block, marking it as dropped, and freeing
+ * the socket if we hold the only reference.
+ */
+struct tcpcb *
+tcp_close(struct tcpcb *tp)
+{
+ struct inpcb *inp = tp->t_inpcb;
+ struct socket *so;
+
+ INP_INFO_WLOCK_ASSERT(&V_tcbinfo);
+ INP_WLOCK_ASSERT(inp);
+
+ /* Notify any offload devices of listener close */
+ if (tp->t_state == TCPS_LISTEN)
+ tcp_offload_listen_close(tp);
+ in_pcbdrop(inp);
+ TCPSTAT_INC(tcps_closed);
+ KASSERT(inp->inp_socket != NULL, ("tcp_close: inp_socket NULL"));
+ so = inp->inp_socket;
+ soisdisconnected(so);
+ if (inp->inp_flags & INP_SOCKREF) {
+ KASSERT(so->so_state & SS_PROTOREF,
+ ("tcp_close: !SS_PROTOREF"));
+ inp->inp_flags &= ~INP_SOCKREF;
+ INP_WUNLOCK(inp);
+ ACCEPT_LOCK();
+ SOCK_LOCK(so);
+ so->so_state &= ~SS_PROTOREF;
+ sofree(so);
+ return (NULL);
+ }
+ return (tp);
+}
+
+void
+tcp_drain(void)
+{
+ VNET_ITERATOR_DECL(vnet_iter);
+
+ if (!do_tcpdrain)
+ return;
+
+ VNET_LIST_RLOCK_NOSLEEP();
+ VNET_FOREACH(vnet_iter) {
+ CURVNET_SET(vnet_iter);
+ struct inpcb *inpb;
+ struct tcpcb *tcpb;
+
+ /*
+ * Walk the tcpbs, if existing, and flush the reassembly queue,
+ * if there is one...
+ * XXX: The "Net/3" implementation doesn't imply that the TCP
+ * reassembly queue should be flushed, but in a situation
+ * where we're really low on mbufs, this is potentially
+ * usefull.
+ */
+ INP_INFO_RLOCK(&V_tcbinfo);
+ LIST_FOREACH(inpb, V_tcbinfo.ipi_listhead, inp_list) {
+ if (inpb->inp_flags & INP_TIMEWAIT)
+ continue;
+ INP_WLOCK(inpb);
+ if ((tcpb = intotcpcb(inpb)) != NULL) {
+ tcp_reass_flush(tcpb);
+ tcp_clean_sackreport(tcpb);
+ }
+ INP_WUNLOCK(inpb);
+ }
+ INP_INFO_RUNLOCK(&V_tcbinfo);
+ CURVNET_RESTORE();
+ }
+ VNET_LIST_RUNLOCK_NOSLEEP();
+}
+
+/*
+ * Notify a tcp user of an asynchronous error;
+ * store error as soft error, but wake up user
+ * (for now, won't do anything until can select for soft error).
+ *
+ * Do not wake up user since there currently is no mechanism for
+ * reporting soft errors (yet - a kqueue filter may be added).
+ */
+static struct inpcb *
+tcp_notify(struct inpcb *inp, int error)
+{
+ struct tcpcb *tp;
+
+ INP_INFO_WLOCK_ASSERT(&V_tcbinfo);
+ INP_WLOCK_ASSERT(inp);
+
+ if ((inp->inp_flags & INP_TIMEWAIT) ||
+ (inp->inp_flags & INP_DROPPED))
+ return (inp);
+
+ tp = intotcpcb(inp);
+ KASSERT(tp != NULL, ("tcp_notify: tp == NULL"));
+
+ /*
+ * Ignore some errors if we are hooked up.
+ * If connection hasn't completed, has retransmitted several times,
+ * and receives a second error, give up now. This is better
+ * than waiting a long time to establish a connection that
+ * can never complete.
+ */
+ if (tp->t_state == TCPS_ESTABLISHED &&
+ (error == EHOSTUNREACH || error == ENETUNREACH ||
+ error == EHOSTDOWN)) {
+ return (inp);
+ } else if (tp->t_state < TCPS_ESTABLISHED && tp->t_rxtshift > 3 &&
+ tp->t_softerror) {
+ tp = tcp_drop(tp, error);
+ if (tp != NULL)
+ return (inp);
+ else
+ return (NULL);
+ } else {
+ tp->t_softerror = error;
+ return (inp);
+ }
+#if 0
+ wakeup( &so->so_timeo);
+ sorwakeup(so);
+ sowwakeup(so);
+#endif
+}
+
+static int
+tcp_pcblist(SYSCTL_HANDLER_ARGS)
+{
+ int error, i, m, n, pcb_count;
+ struct inpcb *inp, **inp_list;
+ inp_gen_t gencnt;
+ struct xinpgen xig;
+
+ /*
+ * The process of preparing the TCB list is too time-consuming and
+ * resource-intensive to repeat twice on every request.
+ */
+ if (req->oldptr == NULL) {
+ n = V_tcbinfo.ipi_count + syncache_pcbcount();
+ n += imax(n / 8, 10);
+ req->oldidx = 2 * (sizeof xig) + n * sizeof(struct xtcpcb);
+ return (0);
+ }
+
+ if (req->newptr != NULL)
+ return (EPERM);
+
+ /*
+ * OK, now we're committed to doing something.
+ */
+ INP_INFO_RLOCK(&V_tcbinfo);
+ gencnt = V_tcbinfo.ipi_gencnt;
+ n = V_tcbinfo.ipi_count;
+ INP_INFO_RUNLOCK(&V_tcbinfo);
+
+ m = syncache_pcbcount();
+
+ error = sysctl_wire_old_buffer(req, 2 * (sizeof xig)
+ + (n + m) * sizeof(struct xtcpcb));
+ if (error != 0)
+ return (error);
+
+ xig.xig_len = sizeof xig;
+ xig.xig_count = n + m;
+ xig.xig_gen = gencnt;
+ xig.xig_sogen = so_gencnt;
+ error = SYSCTL_OUT(req, &xig, sizeof xig);
+ if (error)
+ return (error);
+
+ error = syncache_pcblist(req, m, &pcb_count);
+ if (error)
+ return (error);
+
+ inp_list = malloc(n * sizeof *inp_list, M_TEMP, M_WAITOK);
+ if (inp_list == NULL)
+ return (ENOMEM);
+
+ INP_INFO_RLOCK(&V_tcbinfo);
+ for (inp = LIST_FIRST(V_tcbinfo.ipi_listhead), i = 0;
+ inp != NULL && i < n; inp = LIST_NEXT(inp, inp_list)) {
+ INP_WLOCK(inp);
+ if (inp->inp_gencnt <= gencnt) {
+ /*
+ * XXX: This use of cr_cansee(), introduced with
+ * TCP state changes, is not quite right, but for
+ * now, better than nothing.
+ */
+ if (inp->inp_flags & INP_TIMEWAIT) {
+ if (intotw(inp) != NULL)
+ error = cr_cansee(req->td->td_ucred,
+ intotw(inp)->tw_cred);
+ else
+ error = EINVAL; /* Skip this inp. */
+ } else
+ error = cr_canseeinpcb(req->td->td_ucred, inp);
+ if (error == 0) {
+ in_pcbref(inp);
+ inp_list[i++] = inp;
+ }
+ }
+ INP_WUNLOCK(inp);
+ }
+ INP_INFO_RUNLOCK(&V_tcbinfo);
+ n = i;
+
+ error = 0;
+ for (i = 0; i < n; i++) {
+ inp = inp_list[i];
+ INP_RLOCK(inp);
+ if (inp->inp_gencnt <= gencnt) {
+ struct xtcpcb xt;
+ void *inp_ppcb;
+
+ bzero(&xt, sizeof(xt));
+ xt.xt_len = sizeof xt;
+ /* XXX should avoid extra copy */
+ bcopy(inp, &xt.xt_inp, sizeof *inp);
+ inp_ppcb = inp->inp_ppcb;
+ if (inp_ppcb == NULL)
+ bzero((char *) &xt.xt_tp, sizeof xt.xt_tp);
+ else if (inp->inp_flags & INP_TIMEWAIT) {
+ bzero((char *) &xt.xt_tp, sizeof xt.xt_tp);
+ xt.xt_tp.t_state = TCPS_TIME_WAIT;
+ } else
+ bcopy(inp_ppcb, &xt.xt_tp, sizeof xt.xt_tp);
+ if (inp->inp_socket != NULL)
+ sotoxsocket(inp->inp_socket, &xt.xt_socket);
+ else {
+ bzero(&xt.xt_socket, sizeof xt.xt_socket);
+ xt.xt_socket.xso_protocol = IPPROTO_TCP;
+ }
+ xt.xt_inp.inp_gencnt = inp->inp_gencnt;
+ INP_RUNLOCK(inp);
+ error = SYSCTL_OUT(req, &xt, sizeof xt);
+ } else
+ INP_RUNLOCK(inp);
+ }
+ INP_INFO_WLOCK(&V_tcbinfo);
+ for (i = 0; i < n; i++) {
+ inp = inp_list[i];
+ INP_WLOCK(inp);
+ if (!in_pcbrele(inp))
+ INP_WUNLOCK(inp);
+ }
+ INP_INFO_WUNLOCK(&V_tcbinfo);
+
+ if (!error) {
+ /*
+ * Give the user an updated idea of our state.
+ * If the generation differs from what we told
+ * her before, she knows that something happened
+ * while we were processing this request, and it
+ * might be necessary to retry.
+ */
+ INP_INFO_RLOCK(&V_tcbinfo);
+ xig.xig_gen = V_tcbinfo.ipi_gencnt;
+ xig.xig_sogen = so_gencnt;
+ xig.xig_count = V_tcbinfo.ipi_count + pcb_count;
+ INP_INFO_RUNLOCK(&V_tcbinfo);
+ error = SYSCTL_OUT(req, &xig, sizeof xig);
+ }
+ free(inp_list, M_TEMP);
+ return (error);
+}
+
+SYSCTL_PROC(_net_inet_tcp, TCPCTL_PCBLIST, pcblist, CTLFLAG_RD, 0, 0,
+ tcp_pcblist, "S,xtcpcb", "List of active TCP connections");
+
+static int
+tcp_getcred(SYSCTL_HANDLER_ARGS)
+{
+ struct xucred xuc;
+ struct sockaddr_in addrs[2];
+ struct inpcb *inp;
+ int error;
+
+ error = priv_check(req->td, PRIV_NETINET_GETCRED);
+ if (error)
+ return (error);
+ error = SYSCTL_IN(req, addrs, sizeof(addrs));
+ if (error)
+ return (error);
+ INP_INFO_RLOCK(&V_tcbinfo);
+ inp = in_pcblookup_hash(&V_tcbinfo, addrs[1].sin_addr,
+ addrs[1].sin_port, addrs[0].sin_addr, addrs[0].sin_port, 0, NULL);
+ if (inp != NULL) {
+ INP_RLOCK(inp);
+ INP_INFO_RUNLOCK(&V_tcbinfo);
+ if (inp->inp_socket == NULL)
+ error = ENOENT;
+ if (error == 0)
+ error = cr_canseeinpcb(req->td->td_ucred, inp);
+ if (error == 0)
+ cru2x(inp->inp_cred, &xuc);
+ INP_RUNLOCK(inp);
+ } else {
+ INP_INFO_RUNLOCK(&V_tcbinfo);
+ error = ENOENT;
+ }
+ if (error == 0)
+ error = SYSCTL_OUT(req, &xuc, sizeof(struct xucred));
+ return (error);
+}
+
+SYSCTL_PROC(_net_inet_tcp, OID_AUTO, getcred,
+ CTLTYPE_OPAQUE|CTLFLAG_RW|CTLFLAG_PRISON, 0, 0,
+ tcp_getcred, "S,xucred", "Get the xucred of a TCP connection");
+
+#ifdef INET6
+static int
+tcp6_getcred(SYSCTL_HANDLER_ARGS)
+{
+ struct xucred xuc;
+ struct sockaddr_in6 addrs[2];
+ struct inpcb *inp;
+ int error, mapped = 0;
+
+ error = priv_check(req->td, PRIV_NETINET_GETCRED);
+ if (error)
+ return (error);
+ error = SYSCTL_IN(req, addrs, sizeof(addrs));
+ if (error)
+ return (error);
+ if ((error = sa6_embedscope(&addrs[0], V_ip6_use_defzone)) != 0 ||
+ (error = sa6_embedscope(&addrs[1], V_ip6_use_defzone)) != 0) {
+ return (error);
+ }
+ if (IN6_IS_ADDR_V4MAPPED(&addrs[0].sin6_addr)) {
+ if (IN6_IS_ADDR_V4MAPPED(&addrs[1].sin6_addr))
+ mapped = 1;
+ else
+ return (EINVAL);
+ }
+
+ INP_INFO_RLOCK(&V_tcbinfo);
+ if (mapped == 1)
+ inp = in_pcblookup_hash(&V_tcbinfo,
+ *(struct in_addr *)&addrs[1].sin6_addr.s6_addr[12],
+ addrs[1].sin6_port,
+ *(struct in_addr *)&addrs[0].sin6_addr.s6_addr[12],
+ addrs[0].sin6_port,
+ 0, NULL);
+ else
+ inp = in6_pcblookup_hash(&V_tcbinfo,
+ &addrs[1].sin6_addr, addrs[1].sin6_port,
+ &addrs[0].sin6_addr, addrs[0].sin6_port, 0, NULL);
+ if (inp != NULL) {
+ INP_RLOCK(inp);
+ INP_INFO_RUNLOCK(&V_tcbinfo);
+ if (inp->inp_socket == NULL)
+ error = ENOENT;
+ if (error == 0)
+ error = cr_canseeinpcb(req->td->td_ucred, inp);
+ if (error == 0)
+ cru2x(inp->inp_cred, &xuc);
+ INP_RUNLOCK(inp);
+ } else {
+ INP_INFO_RUNLOCK(&V_tcbinfo);
+ error = ENOENT;
+ }
+ if (error == 0)
+ error = SYSCTL_OUT(req, &xuc, sizeof(struct xucred));
+ return (error);
+}
+
+SYSCTL_PROC(_net_inet6_tcp6, OID_AUTO, getcred,
+ CTLTYPE_OPAQUE|CTLFLAG_RW|CTLFLAG_PRISON, 0, 0,
+ tcp6_getcred, "S,xucred", "Get the xucred of a TCP6 connection");
+#endif
+
+
+void
+tcp_ctlinput(int cmd, struct sockaddr *sa, void *vip)
+{
+ struct ip *ip = vip;
+ struct tcphdr *th;
+ struct in_addr faddr;
+ struct inpcb *inp;
+ struct tcpcb *tp;
+ struct inpcb *(*notify)(struct inpcb *, int) = tcp_notify;
+ struct icmp *icp;
+ struct in_conninfo inc;
+ tcp_seq icmp_tcp_seq;
+ int mtu;
+
+ faddr = ((struct sockaddr_in *)sa)->sin_addr;
+ if (sa->sa_family != AF_INET || faddr.s_addr == INADDR_ANY)
+ return;
+
+ if (cmd == PRC_MSGSIZE)
+ notify = tcp_mtudisc;
+ else if (V_icmp_may_rst && (cmd == PRC_UNREACH_ADMIN_PROHIB ||
+ cmd == PRC_UNREACH_PORT || cmd == PRC_TIMXCEED_INTRANS) && ip)
+ notify = tcp_drop_syn_sent;
+ /*
+ * Redirects don't need to be handled up here.
+ */
+ else if (PRC_IS_REDIRECT(cmd))
+ return;
+ /*
+ * Source quench is depreciated.
+ */
+ else if (cmd == PRC_QUENCH)
+ return;
+ /*
+ * Hostdead is ugly because it goes linearly through all PCBs.
+ * XXX: We never get this from ICMP, otherwise it makes an
+ * excellent DoS attack on machines with many connections.
+ */
+ else if (cmd == PRC_HOSTDEAD)
+ ip = NULL;
+ else if ((unsigned)cmd >= PRC_NCMDS || inetctlerrmap[cmd] == 0)
+ return;
+ if (ip != NULL) {
+ icp = (struct icmp *)((caddr_t)ip
+ - offsetof(struct icmp, icmp_ip));
+ th = (struct tcphdr *)((caddr_t)ip
+ + (ip->ip_hl << 2));
+ INP_INFO_WLOCK(&V_tcbinfo);
+ inp = in_pcblookup_hash(&V_tcbinfo, faddr, th->th_dport,
+ ip->ip_src, th->th_sport, 0, NULL);
+ if (inp != NULL) {
+ INP_WLOCK(inp);
+ if (!(inp->inp_flags & INP_TIMEWAIT) &&
+ !(inp->inp_flags & INP_DROPPED) &&
+ !(inp->inp_socket == NULL)) {
+ icmp_tcp_seq = htonl(th->th_seq);
+ tp = intotcpcb(inp);
+ if (SEQ_GEQ(icmp_tcp_seq, tp->snd_una) &&
+ SEQ_LT(icmp_tcp_seq, tp->snd_max)) {
+ if (cmd == PRC_MSGSIZE) {
+ /*
+ * MTU discovery:
+ * If we got a needfrag set the MTU
+ * in the route to the suggested new
+ * value (if given) and then notify.
+ */
+ bzero(&inc, sizeof(inc));
+ inc.inc_faddr = faddr;
+ inc.inc_fibnum =
+ inp->inp_inc.inc_fibnum;
+
+ mtu = ntohs(icp->icmp_nextmtu);
+ /*
+ * If no alternative MTU was
+ * proposed, try the next smaller
+ * one. ip->ip_len has already
+ * been swapped in icmp_input().
+ */
+ if (!mtu)
+ mtu = ip_next_mtu(ip->ip_len,
+ 1);
+ if (mtu < V_tcp_minmss
+ + sizeof(struct tcpiphdr))
+ mtu = V_tcp_minmss
+ + sizeof(struct tcpiphdr);
+ /*
+ * Only cache the the MTU if it
+ * is smaller than the interface
+ * or route MTU. tcp_mtudisc()
+ * will do right thing by itself.
+ */
+ if (mtu <= tcp_maxmtu(&inc, NULL))
+ tcp_hc_updatemtu(&inc, mtu);
+ }
+
+ inp = (*notify)(inp, inetctlerrmap[cmd]);
+ }
+ }
+ if (inp != NULL)
+ INP_WUNLOCK(inp);
+ } else {
+ bzero(&inc, sizeof(inc));
+ inc.inc_fport = th->th_dport;
+ inc.inc_lport = th->th_sport;
+ inc.inc_faddr = faddr;
+ inc.inc_laddr = ip->ip_src;
+ syncache_unreach(&inc, th);
+ }
+ INP_INFO_WUNLOCK(&V_tcbinfo);
+ } else
+ in_pcbnotifyall(&V_tcbinfo, faddr, inetctlerrmap[cmd], notify);
+}
+
+#ifdef INET6
+void
+tcp6_ctlinput(int cmd, struct sockaddr *sa, void *d)
+{
+ struct tcphdr th;
+ struct inpcb *(*notify)(struct inpcb *, int) = tcp_notify;
+ struct ip6_hdr *ip6;
+ struct mbuf *m;
+ struct ip6ctlparam *ip6cp = NULL;
+ const struct sockaddr_in6 *sa6_src = NULL;
+ int off;
+ struct tcp_portonly {
+ u_int16_t th_sport;
+ u_int16_t th_dport;
+ } *thp;
+
+ if (sa->sa_family != AF_INET6 ||
+ sa->sa_len != sizeof(struct sockaddr_in6))
+ return;
+
+ if (cmd == PRC_MSGSIZE)
+ notify = tcp_mtudisc;
+ else if (!PRC_IS_REDIRECT(cmd) &&
+ ((unsigned)cmd >= PRC_NCMDS || inet6ctlerrmap[cmd] == 0))
+ return;
+ /* Source quench is depreciated. */
+ else if (cmd == PRC_QUENCH)
+ return;
+
+ /* if the parameter is from icmp6, decode it. */
+ if (d != NULL) {
+ ip6cp = (struct ip6ctlparam *)d;
+ m = ip6cp->ip6c_m;
+ ip6 = ip6cp->ip6c_ip6;
+ off = ip6cp->ip6c_off;
+ sa6_src = ip6cp->ip6c_src;
+ } else {
+ m = NULL;
+ ip6 = NULL;
+ off = 0; /* fool gcc */
+ sa6_src = &sa6_any;
+ }
+
+ if (ip6 != NULL) {
+ struct in_conninfo inc;
+ /*
+ * XXX: We assume that when IPV6 is non NULL,
+ * M and OFF are valid.
+ */
+
+ /* check if we can safely examine src and dst ports */
+ if (m->m_pkthdr.len < off + sizeof(*thp))
+ return;
+
+ bzero(&th, sizeof(th));
+ m_copydata(m, off, sizeof(*thp), (caddr_t)&th);
+
+ in6_pcbnotify(&V_tcbinfo, sa, th.th_dport,
+ (struct sockaddr *)ip6cp->ip6c_src,
+ th.th_sport, cmd, NULL, notify);
+
+ bzero(&inc, sizeof(inc));
+ inc.inc_fport = th.th_dport;
+ inc.inc_lport = th.th_sport;
+ inc.inc6_faddr = ((struct sockaddr_in6 *)sa)->sin6_addr;
+ inc.inc6_laddr = ip6cp->ip6c_src->sin6_addr;
+ inc.inc_flags |= INC_ISIPV6;
+ INP_INFO_WLOCK(&V_tcbinfo);
+ syncache_unreach(&inc, &th);
+ INP_INFO_WUNLOCK(&V_tcbinfo);
+ } else
+ in6_pcbnotify(&V_tcbinfo, sa, 0, (const struct sockaddr *)sa6_src,
+ 0, cmd, NULL, notify);
+}
+#endif /* INET6 */
+
+
+/*
+ * Following is where TCP initial sequence number generation occurs.
+ *
+ * There are two places where we must use initial sequence numbers:
+ * 1. In SYN-ACK packets.
+ * 2. In SYN packets.
+ *
+ * All ISNs for SYN-ACK packets are generated by the syncache. See
+ * tcp_syncache.c for details.
+ *
+ * The ISNs in SYN packets must be monotonic; TIME_WAIT recycling
+ * depends on this property. In addition, these ISNs should be
+ * unguessable so as to prevent connection hijacking. To satisfy
+ * the requirements of this situation, the algorithm outlined in
+ * RFC 1948 is used, with only small modifications.
+ *
+ * Implementation details:
+ *
+ * Time is based off the system timer, and is corrected so that it
+ * increases by one megabyte per second. This allows for proper
+ * recycling on high speed LANs while still leaving over an hour
+ * before rollover.
+ *
+ * As reading the *exact* system time is too expensive to be done
+ * whenever setting up a TCP connection, we increment the time
+ * offset in two ways. First, a small random positive increment
+ * is added to isn_offset for each connection that is set up.
+ * Second, the function tcp_isn_tick fires once per clock tick
+ * and increments isn_offset as necessary so that sequence numbers
+ * are incremented at approximately ISN_BYTES_PER_SECOND. The
+ * random positive increments serve only to ensure that the same
+ * exact sequence number is never sent out twice (as could otherwise
+ * happen when a port is recycled in less than the system tick
+ * interval.)
+ *
+ * net.inet.tcp.isn_reseed_interval controls the number of seconds
+ * between seeding of isn_secret. This is normally set to zero,
+ * as reseeding should not be necessary.
+ *
+ * Locking of the global variables isn_secret, isn_last_reseed, isn_offset,
+ * isn_offset_old, and isn_ctx is performed using the TCP pcbinfo lock. In
+ * general, this means holding an exclusive (write) lock.
+ */
+
+#define ISN_BYTES_PER_SECOND 1048576
+#define ISN_STATIC_INCREMENT 4096
+#define ISN_RANDOM_INCREMENT (4096 - 1)
+
+static VNET_DEFINE(u_char, isn_secret[32]);
+static VNET_DEFINE(int, isn_last_reseed);
+static VNET_DEFINE(u_int32_t, isn_offset);
+static VNET_DEFINE(u_int32_t, isn_offset_old);
+
+#define V_isn_secret VNET(isn_secret)
+#define V_isn_last_reseed VNET(isn_last_reseed)
+#define V_isn_offset VNET(isn_offset)
+#define V_isn_offset_old VNET(isn_offset_old)
+
+tcp_seq
+tcp_new_isn(struct tcpcb *tp)
+{
+ MD5_CTX isn_ctx;
+ u_int32_t md5_buffer[4];
+ tcp_seq new_isn;
+
+ INP_WLOCK_ASSERT(tp->t_inpcb);
+
+ ISN_LOCK();
+ /* Seed if this is the first use, reseed if requested. */
+ if ((V_isn_last_reseed == 0) || ((V_tcp_isn_reseed_interval > 0) &&
+ (((u_int)V_isn_last_reseed + (u_int)V_tcp_isn_reseed_interval*hz)
+ < (u_int)ticks))) {
+ read_random(&V_isn_secret, sizeof(V_isn_secret));
+ V_isn_last_reseed = ticks;
+ }
+
+ /* Compute the md5 hash and return the ISN. */
+ MD5Init(&isn_ctx);
+ MD5Update(&isn_ctx, (u_char *) &tp->t_inpcb->inp_fport, sizeof(u_short));
+ MD5Update(&isn_ctx, (u_char *) &tp->t_inpcb->inp_lport, sizeof(u_short));
+#ifdef INET6
+ if ((tp->t_inpcb->inp_vflag & INP_IPV6) != 0) {
+ MD5Update(&isn_ctx, (u_char *) &tp->t_inpcb->in6p_faddr,
+ sizeof(struct in6_addr));
+ MD5Update(&isn_ctx, (u_char *) &tp->t_inpcb->in6p_laddr,
+ sizeof(struct in6_addr));
+ } else
+#endif
+ {
+ MD5Update(&isn_ctx, (u_char *) &tp->t_inpcb->inp_faddr,
+ sizeof(struct in_addr));
+ MD5Update(&isn_ctx, (u_char *) &tp->t_inpcb->inp_laddr,
+ sizeof(struct in_addr));
+ }
+ MD5Update(&isn_ctx, (u_char *) &V_isn_secret, sizeof(V_isn_secret));
+ MD5Final((u_char *) &md5_buffer, &isn_ctx);
+ new_isn = (tcp_seq) md5_buffer[0];
+ V_isn_offset += ISN_STATIC_INCREMENT +
+ (arc4random() & ISN_RANDOM_INCREMENT);
+ new_isn += V_isn_offset;
+ ISN_UNLOCK();
+ return (new_isn);
+}
+
+/*
+ * Increment the offset to the next ISN_BYTES_PER_SECOND / 100 boundary
+ * to keep time flowing at a relatively constant rate. If the random
+ * increments have already pushed us past the projected offset, do nothing.
+ */
+static void
+tcp_isn_tick(void *xtp)
+{
+ VNET_ITERATOR_DECL(vnet_iter);
+ u_int32_t projected_offset;
+
+ VNET_LIST_RLOCK_NOSLEEP();
+ ISN_LOCK();
+ VNET_FOREACH(vnet_iter) {
+ CURVNET_SET(vnet_iter); /* XXX appease INVARIANTS */
+ projected_offset =
+ V_isn_offset_old + ISN_BYTES_PER_SECOND / 100;
+
+ if (SEQ_GT(projected_offset, V_isn_offset))
+ V_isn_offset = projected_offset;
+
+ V_isn_offset_old = V_isn_offset;
+ CURVNET_RESTORE();
+ }
+ ISN_UNLOCK();
+ VNET_LIST_RUNLOCK_NOSLEEP();
+ callout_reset(&isn_callout, hz/100, tcp_isn_tick, NULL);
+}
+
+/*
+ * When a specific ICMP unreachable message is received and the
+ * connection state is SYN-SENT, drop the connection. This behavior
+ * is controlled by the icmp_may_rst sysctl.
+ */
+struct inpcb *
+tcp_drop_syn_sent(struct inpcb *inp, int errno)
+{
+ struct tcpcb *tp;
+
+ INP_INFO_WLOCK_ASSERT(&V_tcbinfo);
+ INP_WLOCK_ASSERT(inp);
+
+ if ((inp->inp_flags & INP_TIMEWAIT) ||
+ (inp->inp_flags & INP_DROPPED))
+ return (inp);
+
+ tp = intotcpcb(inp);
+ if (tp->t_state != TCPS_SYN_SENT)
+ return (inp);
+
+ tp = tcp_drop(tp, errno);
+ if (tp != NULL)
+ return (inp);
+ else
+ return (NULL);
+}
+
+/*
+ * When `need fragmentation' ICMP is received, update our idea of the MSS
+ * based on the new value in the route. Also nudge TCP to send something,
+ * since we know the packet we just sent was dropped.
+ * This duplicates some code in the tcp_mss() function in tcp_input.c.
+ */
+struct inpcb *
+tcp_mtudisc(struct inpcb *inp, int errno)
+{
+ struct tcpcb *tp;
+ struct socket *so;
+
+ INP_WLOCK_ASSERT(inp);
+ if ((inp->inp_flags & INP_TIMEWAIT) ||
+ (inp->inp_flags & INP_DROPPED))
+ return (inp);
+
+ tp = intotcpcb(inp);
+ KASSERT(tp != NULL, ("tcp_mtudisc: tp == NULL"));
+
+ tcp_mss_update(tp, -1, NULL, NULL);
+
+ so = inp->inp_socket;
+ SOCKBUF_LOCK(&so->so_snd);
+ /* If the mss is larger than the socket buffer, decrease the mss. */
+ if (so->so_snd.sb_hiwat < tp->t_maxseg)
+ tp->t_maxseg = so->so_snd.sb_hiwat;
+ SOCKBUF_UNLOCK(&so->so_snd);
+
+ TCPSTAT_INC(tcps_mturesent);
+ tp->t_rtttime = 0;
+ tp->snd_nxt = tp->snd_una;
+ tcp_free_sackholes(tp);
+ tp->snd_recover = tp->snd_max;
+ if (tp->t_flags & TF_SACK_PERMIT)
+ EXIT_FASTRECOVERY(tp);
+ tcp_output_send(tp);
+ return (inp);
+}
+
+/*
+ * Look-up the routing entry to the peer of this inpcb. If no route
+ * is found and it cannot be allocated, then return 0. This routine
+ * is called by TCP routines that access the rmx structure and by
+ * tcp_mss_update to get the peer/interface MTU.
+ */
+u_long
+tcp_maxmtu(struct in_conninfo *inc, int *flags)
+{
+ struct route sro;
+ struct sockaddr_in *dst;
+ struct ifnet *ifp;
+ u_long maxmtu = 0;
+
+ KASSERT(inc != NULL, ("tcp_maxmtu with NULL in_conninfo pointer"));
+
+ bzero(&sro, sizeof(sro));
+ if (inc->inc_faddr.s_addr != INADDR_ANY) {
+ dst = (struct sockaddr_in *)&sro.ro_dst;
+ dst->sin_family = AF_INET;
+ dst->sin_len = sizeof(*dst);
+ dst->sin_addr = inc->inc_faddr;
+ in_rtalloc_ign(&sro, 0, inc->inc_fibnum);
+ }
+ if (sro.ro_rt != NULL) {
+ ifp = sro.ro_rt->rt_ifp;
+ if (sro.ro_rt->rt_rmx.rmx_mtu == 0)
+ maxmtu = ifp->if_mtu;
+ else
+ maxmtu = min(sro.ro_rt->rt_rmx.rmx_mtu, ifp->if_mtu);
+
+ /* Report additional interface capabilities. */
+ if (flags != NULL) {
+ if (ifp->if_capenable & IFCAP_TSO4 &&
+ ifp->if_hwassist & CSUM_TSO)
+ *flags |= CSUM_TSO;
+ }
+ RTFREE(sro.ro_rt);
+ }
+ return (maxmtu);
+}
+
+#ifdef INET6
+u_long
+tcp_maxmtu6(struct in_conninfo *inc, int *flags)
+{
+ struct route_in6 sro6;
+ struct ifnet *ifp;
+ u_long maxmtu = 0;
+
+ KASSERT(inc != NULL, ("tcp_maxmtu6 with NULL in_conninfo pointer"));
+
+ bzero(&sro6, sizeof(sro6));
+ if (!IN6_IS_ADDR_UNSPECIFIED(&inc->inc6_faddr)) {
+ sro6.ro_dst.sin6_family = AF_INET6;
+ sro6.ro_dst.sin6_len = sizeof(struct sockaddr_in6);
+ sro6.ro_dst.sin6_addr = inc->inc6_faddr;
+ rtalloc_ign((struct route *)&sro6, 0);
+ }
+ if (sro6.ro_rt != NULL) {
+ ifp = sro6.ro_rt->rt_ifp;
+ if (sro6.ro_rt->rt_rmx.rmx_mtu == 0)
+ maxmtu = IN6_LINKMTU(sro6.ro_rt->rt_ifp);
+ else
+ maxmtu = min(sro6.ro_rt->rt_rmx.rmx_mtu,
+ IN6_LINKMTU(sro6.ro_rt->rt_ifp));
+
+ /* Report additional interface capabilities. */
+ if (flags != NULL) {
+ if (ifp->if_capenable & IFCAP_TSO6 &&
+ ifp->if_hwassist & CSUM_TSO)
+ *flags |= CSUM_TSO;
+ }
+ RTFREE(sro6.ro_rt);
+ }
+
+ return (maxmtu);
+}
+#endif /* INET6 */
+
+#ifdef IPSEC
+/* compute ESP/AH header size for TCP, including outer IP header. */
+size_t
+ipsec_hdrsiz_tcp(struct tcpcb *tp)
+{
+ struct inpcb *inp;
+ struct mbuf *m;
+ size_t hdrsiz;
+ struct ip *ip;
+#ifdef INET6
+ struct ip6_hdr *ip6;
+#endif
+ struct tcphdr *th;
+
+ if ((tp == NULL) || ((inp = tp->t_inpcb) == NULL))
+ return (0);
+ MGETHDR(m, M_DONTWAIT, MT_DATA);
+ if (!m)
+ return (0);
+
+#ifdef INET6
+ if ((inp->inp_vflag & INP_IPV6) != 0) {
+ ip6 = mtod(m, struct ip6_hdr *);
+ th = (struct tcphdr *)(ip6 + 1);
+ m->m_pkthdr.len = m->m_len =
+ sizeof(struct ip6_hdr) + sizeof(struct tcphdr);
+ tcpip_fillheaders(inp, ip6, th);
+ hdrsiz = ipsec_hdrsiz(m, IPSEC_DIR_OUTBOUND, inp);
+ } else
+#endif /* INET6 */
+ {
+ ip = mtod(m, struct ip *);
+ th = (struct tcphdr *)(ip + 1);
+ m->m_pkthdr.len = m->m_len = sizeof(struct tcpiphdr);
+ tcpip_fillheaders(inp, ip, th);
+ hdrsiz = ipsec_hdrsiz(m, IPSEC_DIR_OUTBOUND, inp);
+ }
+
+ m_free(m);
+ return (hdrsiz);
+}
+#endif /* IPSEC */
+
+/*
+ * TCP BANDWIDTH DELAY PRODUCT WINDOW LIMITING
+ *
+ * This code attempts to calculate the bandwidth-delay product as a
+ * means of determining the optimal window size to maximize bandwidth,
+ * minimize RTT, and avoid the over-allocation of buffers on interfaces and
+ * routers. This code also does a fairly good job keeping RTTs in check
+ * across slow links like modems. We implement an algorithm which is very
+ * similar (but not meant to be) TCP/Vegas. The code operates on the
+ * transmitter side of a TCP connection and so only effects the transmit
+ * side of the connection.
+ *
+ * BACKGROUND: TCP makes no provision for the management of buffer space
+ * at the end points or at the intermediate routers and switches. A TCP
+ * stream, whether using NewReno or not, will eventually buffer as
+ * many packets as it is able and the only reason this typically works is
+ * due to the fairly small default buffers made available for a connection
+ * (typicaly 16K or 32K). As machines use larger windows and/or window
+ * scaling it is now fairly easy for even a single TCP connection to blow-out
+ * all available buffer space not only on the local interface, but on
+ * intermediate routers and switches as well. NewReno makes a misguided
+ * attempt to 'solve' this problem by waiting for an actual failure to occur,
+ * then backing off, then steadily increasing the window again until another
+ * failure occurs, ad-infinitum. This results in terrible oscillation that
+ * is only made worse as network loads increase and the idea of intentionally
+ * blowing out network buffers is, frankly, a terrible way to manage network
+ * resources.
+ *
+ * It is far better to limit the transmit window prior to the failure
+ * condition being achieved. There are two general ways to do this: First
+ * you can 'scan' through different transmit window sizes and locate the
+ * point where the RTT stops increasing, indicating that you have filled the
+ * pipe, then scan backwards until you note that RTT stops decreasing, then
+ * repeat ad-infinitum. This method works in principle but has severe
+ * implementation issues due to RTT variances, timer granularity, and
+ * instability in the algorithm which can lead to many false positives and
+ * create oscillations as well as interact badly with other TCP streams
+ * implementing the same algorithm.
+ *
+ * The second method is to limit the window to the bandwidth delay product
+ * of the link. This is the method we implement. RTT variances and our
+ * own manipulation of the congestion window, bwnd, can potentially
+ * destabilize the algorithm. For this reason we have to stabilize the
+ * elements used to calculate the window. We do this by using the minimum
+ * observed RTT, the long term average of the observed bandwidth, and
+ * by adding two segments worth of slop. It isn't perfect but it is able
+ * to react to changing conditions and gives us a very stable basis on
+ * which to extend the algorithm.
+ */
+void
+tcp_xmit_bandwidth_limit(struct tcpcb *tp, tcp_seq ack_seq)
+{
+ u_long bw;
+ u_long bwnd;
+ int save_ticks;
+
+ INP_WLOCK_ASSERT(tp->t_inpcb);
+
+ /*
+ * If inflight_enable is disabled in the middle of a tcp connection,
+ * make sure snd_bwnd is effectively disabled.
+ */
+ if (V_tcp_inflight_enable == 0 ||
+ tp->t_rttlow < V_tcp_inflight_rttthresh) {
+ tp->snd_bwnd = TCP_MAXWIN << TCP_MAX_WINSHIFT;
+ tp->snd_bandwidth = 0;
+ return;
+ }
+
+ /*
+ * Figure out the bandwidth. Due to the tick granularity this
+ * is a very rough number and it MUST be averaged over a fairly
+ * long period of time. XXX we need to take into account a link
+ * that is not using all available bandwidth, but for now our
+ * slop will ramp us up if this case occurs and the bandwidth later
+ * increases.
+ *
+ * Note: if ticks rollover 'bw' may wind up negative. We must
+ * effectively reset t_bw_rtttime for this case.
+ */
+ save_ticks = ticks;
+ if ((u_int)(save_ticks - tp->t_bw_rtttime) < 1)
+ return;
+
+ bw = (int64_t)(ack_seq - tp->t_bw_rtseq) * hz /
+ (save_ticks - tp->t_bw_rtttime);
+ tp->t_bw_rtttime = save_ticks;
+ tp->t_bw_rtseq = ack_seq;
+ if (tp->t_bw_rtttime == 0 || (int)bw < 0)
+ return;
+ bw = ((int64_t)tp->snd_bandwidth * 15 + bw) >> 4;
+
+ tp->snd_bandwidth = bw;
+
+ /*
+ * Calculate the semi-static bandwidth delay product, plus two maximal
+ * segments. The additional slop puts us squarely in the sweet
+ * spot and also handles the bandwidth run-up case and stabilization.
+ * Without the slop we could be locking ourselves into a lower
+ * bandwidth.
+ *
+ * Situations Handled:
+ * (1) Prevents over-queueing of packets on LANs, especially on
+ * high speed LANs, allowing larger TCP buffers to be
+ * specified, and also does a good job preventing
+ * over-queueing of packets over choke points like modems
+ * (at least for the transmit side).
+ *
+ * (2) Is able to handle changing network loads (bandwidth
+ * drops so bwnd drops, bandwidth increases so bwnd
+ * increases).
+ *
+ * (3) Theoretically should stabilize in the face of multiple
+ * connections implementing the same algorithm (this may need
+ * a little work).
+ *
+ * (4) Stability value (defaults to 20 = 2 maximal packets) can
+ * be adjusted with a sysctl but typically only needs to be
+ * on very slow connections. A value no smaller then 5
+ * should be used, but only reduce this default if you have
+ * no other choice.
+ */
+#define USERTT ((tp->t_srtt + tp->t_rttbest) / 2)
+ bwnd = (int64_t)bw * USERTT / (hz << TCP_RTT_SHIFT) + V_tcp_inflight_stab * tp->t_maxseg / 10;
+#undef USERTT
+
+ if (tcp_inflight_debug > 0) {
+ static int ltime;
+ if ((u_int)(ticks - ltime) >= hz / tcp_inflight_debug) {
+ ltime = ticks;
+ printf("%p bw %ld rttbest %d srtt %d bwnd %ld\n",
+ tp,
+ bw,
+ tp->t_rttbest,
+ tp->t_srtt,
+ bwnd
+ );
+ }
+ }
+ if ((long)bwnd < V_tcp_inflight_min)
+ bwnd = V_tcp_inflight_min;
+ if (bwnd > V_tcp_inflight_max)
+ bwnd = V_tcp_inflight_max;
+ if ((long)bwnd < tp->t_maxseg * 2)
+ bwnd = tp->t_maxseg * 2;
+ tp->snd_bwnd = bwnd;
+}
+
+#ifdef TCP_SIGNATURE
+/*
+ * Callback function invoked by m_apply() to digest TCP segment data
+ * contained within an mbuf chain.
+ */
+static int
+tcp_signature_apply(void *fstate, void *data, u_int len)
+{
+
+ MD5Update(fstate, (u_char *)data, len);
+ return (0);
+}
+
+/*
+ * Compute TCP-MD5 hash of a TCP segment. (RFC2385)
+ *
+ * Parameters:
+ * m pointer to head of mbuf chain
+ * _unused
+ * len length of TCP segment data, excluding options
+ * optlen length of TCP segment options
+ * buf pointer to storage for computed MD5 digest
+ * direction direction of flow (IPSEC_DIR_INBOUND or OUTBOUND)
+ *
+ * We do this over ip, tcphdr, segment data, and the key in the SADB.
+ * When called from tcp_input(), we can be sure that th_sum has been
+ * zeroed out and verified already.
+ *
+ * Return 0 if successful, otherwise return -1.
+ *
+ * XXX The key is retrieved from the system's PF_KEY SADB, by keying a
+ * search with the destination IP address, and a 'magic SPI' to be
+ * determined by the application. This is hardcoded elsewhere to 1179
+ * right now. Another branch of this code exists which uses the SPD to
+ * specify per-application flows but it is unstable.
+ */
+int
+tcp_signature_compute(struct mbuf *m, int _unused, int len, int optlen,
+ u_char *buf, u_int direction)
+{
+ union sockaddr_union dst;
+ struct ippseudo ippseudo;
+ MD5_CTX ctx;
+ int doff;
+ struct ip *ip;
+ struct ipovly *ipovly;
+ struct secasvar *sav;
+ struct tcphdr *th;
+#ifdef INET6
+ struct ip6_hdr *ip6;
+ struct in6_addr in6;
+ char ip6buf[INET6_ADDRSTRLEN];
+ uint32_t plen;
+ uint16_t nhdr;
+#endif
+ u_short savecsum;
+
+ KASSERT(m != NULL, ("NULL mbuf chain"));
+ KASSERT(buf != NULL, ("NULL signature pointer"));
+
+ /* Extract the destination from the IP header in the mbuf. */
+ bzero(&dst, sizeof(union sockaddr_union));
+ ip = mtod(m, struct ip *);
+#ifdef INET6
+ ip6 = NULL; /* Make the compiler happy. */
+#endif
+ switch (ip->ip_v) {
+ case IPVERSION:
+ dst.sa.sa_len = sizeof(struct sockaddr_in);
+ dst.sa.sa_family = AF_INET;
+ dst.sin.sin_addr = (direction == IPSEC_DIR_INBOUND) ?
+ ip->ip_src : ip->ip_dst;
+ break;
+#ifdef INET6
+ case (IPV6_VERSION >> 4):
+ ip6 = mtod(m, struct ip6_hdr *);
+ dst.sa.sa_len = sizeof(struct sockaddr_in6);
+ dst.sa.sa_family = AF_INET6;
+ dst.sin6.sin6_addr = (direction == IPSEC_DIR_INBOUND) ?
+ ip6->ip6_src : ip6->ip6_dst;
+ break;
+#endif
+ default:
+ return (EINVAL);
+ /* NOTREACHED */
+ break;
+ }
+
+ /* Look up an SADB entry which matches the address of the peer. */
+ sav = KEY_ALLOCSA(&dst, IPPROTO_TCP, htonl(TCP_SIG_SPI));
+ if (sav == NULL) {
+ ipseclog((LOG_ERR, "%s: SADB lookup failed for %s\n", __func__,
+ (ip->ip_v == IPVERSION) ? inet_ntoa(dst.sin.sin_addr) :
+#ifdef INET6
+ (ip->ip_v == (IPV6_VERSION >> 4)) ?
+ ip6_sprintf(ip6buf, &dst.sin6.sin6_addr) :
+#endif
+ "(unsupported)"));
+ return (EINVAL);
+ }
+
+ MD5Init(&ctx);
+ /*
+ * Step 1: Update MD5 hash with IP(v6) pseudo-header.
+ *
+ * XXX The ippseudo header MUST be digested in network byte order,
+ * or else we'll fail the regression test. Assume all fields we've
+ * been doing arithmetic on have been in host byte order.
+ * XXX One cannot depend on ipovly->ih_len here. When called from
+ * tcp_output(), the underlying ip_len member has not yet been set.
+ */
+ switch (ip->ip_v) {
+ case IPVERSION:
+ ipovly = (struct ipovly *)ip;
+ ippseudo.ippseudo_src = ipovly->ih_src;
+ ippseudo.ippseudo_dst = ipovly->ih_dst;
+ ippseudo.ippseudo_pad = 0;
+ ippseudo.ippseudo_p = IPPROTO_TCP;
+ ippseudo.ippseudo_len = htons(len + sizeof(struct tcphdr) +
+ optlen);
+ MD5Update(&ctx, (char *)&ippseudo, sizeof(struct ippseudo));
+
+ th = (struct tcphdr *)((u_char *)ip + sizeof(struct ip));
+ doff = sizeof(struct ip) + sizeof(struct tcphdr) + optlen;
+ break;
+#ifdef INET6
+ /*
+ * RFC 2385, 2.0 Proposal
+ * For IPv6, the pseudo-header is as described in RFC 2460, namely the
+ * 128-bit source IPv6 address, 128-bit destination IPv6 address, zero-
+ * extended next header value (to form 32 bits), and 32-bit segment
+ * length.
+ * Note: Upper-Layer Packet Length comes before Next Header.
+ */
+ case (IPV6_VERSION >> 4):
+ in6 = ip6->ip6_src;
+ in6_clearscope(&in6);
+ MD5Update(&ctx, (char *)&in6, sizeof(struct in6_addr));
+ in6 = ip6->ip6_dst;
+ in6_clearscope(&in6);
+ MD5Update(&ctx, (char *)&in6, sizeof(struct in6_addr));
+ plen = htonl(len + sizeof(struct tcphdr) + optlen);
+ MD5Update(&ctx, (char *)&plen, sizeof(uint32_t));
+ nhdr = 0;
+ MD5Update(&ctx, (char *)&nhdr, sizeof(uint8_t));
+ MD5Update(&ctx, (char *)&nhdr, sizeof(uint8_t));
+ MD5Update(&ctx, (char *)&nhdr, sizeof(uint8_t));
+ nhdr = IPPROTO_TCP;
+ MD5Update(&ctx, (char *)&nhdr, sizeof(uint8_t));
+
+ th = (struct tcphdr *)((u_char *)ip6 + sizeof(struct ip6_hdr));
+ doff = sizeof(struct ip6_hdr) + sizeof(struct tcphdr) + optlen;
+ break;
+#endif
+ default:
+ return (EINVAL);
+ /* NOTREACHED */
+ break;
+ }
+
+
+ /*
+ * Step 2: Update MD5 hash with TCP header, excluding options.
+ * The TCP checksum must be set to zero.
+ */
+ savecsum = th->th_sum;
+ th->th_sum = 0;
+ MD5Update(&ctx, (char *)th, sizeof(struct tcphdr));
+ th->th_sum = savecsum;
+
+ /*
+ * Step 3: Update MD5 hash with TCP segment data.
+ * Use m_apply() to avoid an early m_pullup().
+ */
+ if (len > 0)
+ m_apply(m, doff, len, tcp_signature_apply, &ctx);
+
+ /*
+ * Step 4: Update MD5 hash with shared secret.
+ */
+ MD5Update(&ctx, sav->key_auth->key_data, _KEYLEN(sav->key_auth));
+ MD5Final(buf, &ctx);
+
+ key_sa_recordxfer(sav, m);
+ KEY_FREESAV(&sav);
+ return (0);
+}
+#endif /* TCP_SIGNATURE */
+
+static int
+sysctl_drop(SYSCTL_HANDLER_ARGS)
+{
+ /* addrs[0] is a foreign socket, addrs[1] is a local one. */
+ struct sockaddr_storage addrs[2];
+ struct inpcb *inp;
+ struct tcpcb *tp;
+ struct tcptw *tw;
+ struct sockaddr_in *fin, *lin;
+#ifdef INET6
+ struct sockaddr_in6 *fin6, *lin6;
+#endif
+ int error;
+
+ inp = NULL;
+ fin = lin = NULL;
+#ifdef INET6
+ fin6 = lin6 = NULL;
+#endif
+ error = 0;
+
+ if (req->oldptr != NULL || req->oldlen != 0)
+ return (EINVAL);
+ if (req->newptr == NULL)
+ return (EPERM);
+ if (req->newlen < sizeof(addrs))
+ return (ENOMEM);
+ error = SYSCTL_IN(req, &addrs, sizeof(addrs));
+ if (error)
+ return (error);
+
+ switch (addrs[0].ss_family) {
+#ifdef INET6
+ case AF_INET6:
+ fin6 = (struct sockaddr_in6 *)&addrs[0];
+ lin6 = (struct sockaddr_in6 *)&addrs[1];
+ if (fin6->sin6_len != sizeof(struct sockaddr_in6) ||
+ lin6->sin6_len != sizeof(struct sockaddr_in6))
+ return (EINVAL);
+ if (IN6_IS_ADDR_V4MAPPED(&fin6->sin6_addr)) {
+ if (!IN6_IS_ADDR_V4MAPPED(&lin6->sin6_addr))
+ return (EINVAL);
+ in6_sin6_2_sin_in_sock((struct sockaddr *)&addrs[0]);
+ in6_sin6_2_sin_in_sock((struct sockaddr *)&addrs[1]);
+ fin = (struct sockaddr_in *)&addrs[0];
+ lin = (struct sockaddr_in *)&addrs[1];
+ break;
+ }
+ error = sa6_embedscope(fin6, V_ip6_use_defzone);
+ if (error)
+ return (error);
+ error = sa6_embedscope(lin6, V_ip6_use_defzone);
+ if (error)
+ return (error);
+ break;
+#endif
+ case AF_INET:
+ fin = (struct sockaddr_in *)&addrs[0];
+ lin = (struct sockaddr_in *)&addrs[1];
+ if (fin->sin_len != sizeof(struct sockaddr_in) ||
+ lin->sin_len != sizeof(struct sockaddr_in))
+ return (EINVAL);
+ break;
+ default:
+ return (EINVAL);
+ }
+ INP_INFO_WLOCK(&V_tcbinfo);
+ switch (addrs[0].ss_family) {
+#ifdef INET6
+ case AF_INET6:
+ inp = in6_pcblookup_hash(&V_tcbinfo, &fin6->sin6_addr,
+ fin6->sin6_port, &lin6->sin6_addr, lin6->sin6_port, 0,
+ NULL);
+ break;
+#endif
+ case AF_INET:
+ inp = in_pcblookup_hash(&V_tcbinfo, fin->sin_addr,
+ fin->sin_port, lin->sin_addr, lin->sin_port, 0, NULL);
+ break;
+ }
+ if (inp != NULL) {
+ INP_WLOCK(inp);
+ if (inp->inp_flags & INP_TIMEWAIT) {
+ /*
+ * XXXRW: There currently exists a state where an
+ * inpcb is present, but its timewait state has been
+ * discarded. For now, don't allow dropping of this
+ * type of inpcb.
+ */
+ tw = intotw(inp);
+ if (tw != NULL)
+ tcp_twclose(tw, 0);
+ else
+ INP_WUNLOCK(inp);
+ } else if (!(inp->inp_flags & INP_DROPPED) &&
+ !(inp->inp_socket->so_options & SO_ACCEPTCONN)) {
+ tp = intotcpcb(inp);
+ tp = tcp_drop(tp, ECONNABORTED);
+ if (tp != NULL)
+ INP_WUNLOCK(inp);
+ } else
+ INP_WUNLOCK(inp);
+ } else
+ error = ESRCH;
+ INP_INFO_WUNLOCK(&V_tcbinfo);
+ return (error);
+}
+
+SYSCTL_PROC(_net_inet_tcp, TCPCTL_DROP, drop,
+ CTLTYPE_STRUCT|CTLFLAG_WR|CTLFLAG_SKIP, NULL,
+ 0, sysctl_drop, "", "Drop TCP connection");
+
+/*
+ * Generate a standardized TCP log line for use throughout the
+ * tcp subsystem. Memory allocation is done with M_NOWAIT to
+ * allow use in the interrupt context.
+ *
+ * NB: The caller MUST free(s, M_TCPLOG) the returned string.
+ * NB: The function may return NULL if memory allocation failed.
+ *
+ * Due to header inclusion and ordering limitations the struct ip
+ * and ip6_hdr pointers have to be passed as void pointers.
+ */
+char *
+tcp_log_vain(struct in_conninfo *inc, struct tcphdr *th, void *ip4hdr,
+ const void *ip6hdr)
+{
+
+ /* Is logging enabled? */
+ if (tcp_log_in_vain == 0)
+ return (NULL);
+
+ return (tcp_log_addr(inc, th, ip4hdr, ip6hdr));
+}
+
+char *
+tcp_log_addrs(struct in_conninfo *inc, struct tcphdr *th, void *ip4hdr,
+ const void *ip6hdr)
+{
+
+ /* Is logging enabled? */
+ if (tcp_log_debug == 0)
+ return (NULL);
+
+ return (tcp_log_addr(inc, th, ip4hdr, ip6hdr));
+}
+
+static char *
+tcp_log_addr(struct in_conninfo *inc, struct tcphdr *th, void *ip4hdr,
+ const void *ip6hdr)
+{
+ char *s, *sp;
+ size_t size;
+ struct ip *ip;
+#ifdef INET6
+ const struct ip6_hdr *ip6;
+
+ ip6 = (const struct ip6_hdr *)ip6hdr;
+#endif /* INET6 */
+ ip = (struct ip *)ip4hdr;
+
+ /*
+ * The log line looks like this:
+ * "TCP: [1.2.3.4]:50332 to [1.2.3.4]:80 tcpflags 0x2<SYN>"
+ */
+ size = sizeof("TCP: []:12345 to []:12345 tcpflags 0x2<>") +
+ sizeof(PRINT_TH_FLAGS) + 1 +
+#ifdef INET6
+ 2 * INET6_ADDRSTRLEN;
+#else
+ 2 * INET_ADDRSTRLEN;
+#endif /* INET6 */
+
+ s = malloc(size, M_TCPLOG, M_ZERO|M_NOWAIT);
+ if (s == NULL)
+ return (NULL);
+
+ strcat(s, "TCP: [");
+ sp = s + strlen(s);
+
+ if (inc && ((inc->inc_flags & INC_ISIPV6) == 0)) {
+ inet_ntoa_r(inc->inc_faddr, sp);
+ sp = s + strlen(s);
+ sprintf(sp, "]:%i to [", ntohs(inc->inc_fport));
+ sp = s + strlen(s);
+ inet_ntoa_r(inc->inc_laddr, sp);
+ sp = s + strlen(s);
+ sprintf(sp, "]:%i", ntohs(inc->inc_lport));
+#ifdef INET6
+ } else if (inc) {
+ ip6_sprintf(sp, &inc->inc6_faddr);
+ sp = s + strlen(s);
+ sprintf(sp, "]:%i to [", ntohs(inc->inc_fport));
+ sp = s + strlen(s);
+ ip6_sprintf(sp, &inc->inc6_laddr);
+ sp = s + strlen(s);
+ sprintf(sp, "]:%i", ntohs(inc->inc_lport));
+ } else if (ip6 && th) {
+ ip6_sprintf(sp, &ip6->ip6_src);
+ sp = s + strlen(s);
+ sprintf(sp, "]:%i to [", ntohs(th->th_sport));
+ sp = s + strlen(s);
+ ip6_sprintf(sp, &ip6->ip6_dst);
+ sp = s + strlen(s);
+ sprintf(sp, "]:%i", ntohs(th->th_dport));
+#endif /* INET6 */
+ } else if (ip && th) {
+ inet_ntoa_r(ip->ip_src, sp);
+ sp = s + strlen(s);
+ sprintf(sp, "]:%i to [", ntohs(th->th_sport));
+ sp = s + strlen(s);
+ inet_ntoa_r(ip->ip_dst, sp);
+ sp = s + strlen(s);
+ sprintf(sp, "]:%i", ntohs(th->th_dport));
+ } else {
+ free(s, M_TCPLOG);
+ return (NULL);
+ }
+ sp = s + strlen(s);
+ if (th)
+ sprintf(sp, " tcpflags 0x%b", th->th_flags, PRINT_TH_FLAGS);
+ if (*(s + size - 1) != '\0')
+ panic("%s: string too long", __func__);
+ return (s);
+}
diff --git a/rtems/freebsd/netinet/tcp_syncache.c b/rtems/freebsd/netinet/tcp_syncache.c
new file mode 100644
index 00000000..8b29936e
--- /dev/null
+++ b/rtems/freebsd/netinet/tcp_syncache.c
@@ -0,0 +1,1812 @@
+#include <rtems/freebsd/machine/rtems-bsd-config.h>
+
+/*-
+ * Copyright (c) 2001 McAfee, Inc.
+ * Copyright (c) 2006 Andre Oppermann, Internet Business Solutions AG
+ * All rights reserved.
+ *
+ * This software was developed for the FreeBSD Project by Jonathan Lemon
+ * and McAfee Research, the Security Research Division of McAfee, Inc. under
+ * DARPA/SPAWAR contract N66001-01-C-8035 ("CBOSS"), as part of the
+ * DARPA CHATS research program.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <rtems/freebsd/sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <rtems/freebsd/local/opt_inet.h>
+#include <rtems/freebsd/local/opt_inet6.h>
+#include <rtems/freebsd/local/opt_ipsec.h>
+
+#include <rtems/freebsd/sys/param.h>
+#include <rtems/freebsd/sys/systm.h>
+#include <rtems/freebsd/sys/kernel.h>
+#include <rtems/freebsd/sys/sysctl.h>
+#include <rtems/freebsd/sys/limits.h>
+#include <rtems/freebsd/sys/lock.h>
+#include <rtems/freebsd/sys/mutex.h>
+#include <rtems/freebsd/sys/malloc.h>
+#include <rtems/freebsd/sys/mbuf.h>
+#include <rtems/freebsd/sys/md5.h>
+#include <rtems/freebsd/sys/proc.h> /* for proc0 declaration */
+#include <rtems/freebsd/sys/random.h>
+#include <rtems/freebsd/sys/socket.h>
+#include <rtems/freebsd/sys/socketvar.h>
+#include <rtems/freebsd/sys/syslog.h>
+#include <rtems/freebsd/sys/ucred.h>
+
+#include <rtems/freebsd/vm/uma.h>
+
+#include <rtems/freebsd/net/if.h>
+#include <rtems/freebsd/net/route.h>
+#include <rtems/freebsd/net/vnet.h>
+
+#include <rtems/freebsd/netinet/in.h>
+#include <rtems/freebsd/netinet/in_systm.h>
+#include <rtems/freebsd/netinet/ip.h>
+#include <rtems/freebsd/netinet/in_var.h>
+#include <rtems/freebsd/netinet/in_pcb.h>
+#include <rtems/freebsd/netinet/ip_var.h>
+#include <rtems/freebsd/netinet/ip_options.h>
+#ifdef INET6
+#include <rtems/freebsd/netinet/ip6.h>
+#include <rtems/freebsd/netinet/icmp6.h>
+#include <rtems/freebsd/netinet6/nd6.h>
+#include <rtems/freebsd/netinet6/ip6_var.h>
+#include <rtems/freebsd/netinet6/in6_pcb.h>
+#endif
+#include <rtems/freebsd/netinet/tcp.h>
+#include <rtems/freebsd/netinet/tcp_fsm.h>
+#include <rtems/freebsd/netinet/tcp_seq.h>
+#include <rtems/freebsd/netinet/tcp_timer.h>
+#include <rtems/freebsd/netinet/tcp_var.h>
+#include <rtems/freebsd/netinet/tcp_syncache.h>
+#include <rtems/freebsd/netinet/tcp_offload.h>
+#ifdef INET6
+#include <rtems/freebsd/netinet6/tcp6_var.h>
+#endif
+
+#ifdef IPSEC
+#include <rtems/freebsd/netipsec/ipsec.h>
+#ifdef INET6
+#include <rtems/freebsd/netipsec/ipsec6.h>
+#endif
+#include <rtems/freebsd/netipsec/key.h>
+#endif /*IPSEC*/
+
+#include <rtems/freebsd/machine/in_cksum.h>
+
+#include <rtems/freebsd/security/mac/mac_framework.h>
+
+static VNET_DEFINE(int, tcp_syncookies) = 1;
+#define V_tcp_syncookies VNET(tcp_syncookies)
+SYSCTL_VNET_INT(_net_inet_tcp, OID_AUTO, syncookies, CTLFLAG_RW,
+ &VNET_NAME(tcp_syncookies), 0,
+ "Use TCP SYN cookies if the syncache overflows");
+
+static VNET_DEFINE(int, tcp_syncookiesonly) = 0;
+#define V_tcp_syncookiesonly VNET(tcp_syncookiesonly)
+SYSCTL_VNET_INT(_net_inet_tcp, OID_AUTO, syncookies_only, CTLFLAG_RW,
+ &VNET_NAME(tcp_syncookiesonly), 0,
+ "Use only TCP SYN cookies");
+
+#ifdef TCP_OFFLOAD_DISABLE
+#define TOEPCB_ISSET(sc) (0)
+#else
+#define TOEPCB_ISSET(sc) ((sc)->sc_toepcb != NULL)
+#endif
+
+static void syncache_drop(struct syncache *, struct syncache_head *);
+static void syncache_free(struct syncache *);
+static void syncache_insert(struct syncache *, struct syncache_head *);
+struct syncache *syncache_lookup(struct in_conninfo *, struct syncache_head **);
+static int syncache_respond(struct syncache *);
+static struct socket *syncache_socket(struct syncache *, struct socket *,
+ struct mbuf *m);
+static void syncache_timeout(struct syncache *sc, struct syncache_head *sch,
+ int docallout);
+static void syncache_timer(void *);
+static void syncookie_generate(struct syncache_head *, struct syncache *,
+ u_int32_t *);
+static struct syncache
+ *syncookie_lookup(struct in_conninfo *, struct syncache_head *,
+ struct syncache *, struct tcpopt *, struct tcphdr *,
+ struct socket *);
+
+/*
+ * Transmit the SYN,ACK fewer times than TCP_MAXRXTSHIFT specifies.
+ * 3 retransmits corresponds to a timeout of 3 * (1 + 2 + 4 + 8) == 45 seconds,
+ * the odds are that the user has given up attempting to connect by then.
+ */
+#define SYNCACHE_MAXREXMTS 3
+
+/* Arbitrary values */
+#define TCP_SYNCACHE_HASHSIZE 512
+#define TCP_SYNCACHE_BUCKETLIMIT 30
+
+static VNET_DEFINE(struct tcp_syncache, tcp_syncache);
+#define V_tcp_syncache VNET(tcp_syncache)
+
+SYSCTL_NODE(_net_inet_tcp, OID_AUTO, syncache, CTLFLAG_RW, 0, "TCP SYN cache");
+
+SYSCTL_VNET_INT(_net_inet_tcp_syncache, OID_AUTO, bucketlimit, CTLFLAG_RDTUN,
+ &VNET_NAME(tcp_syncache.bucket_limit), 0,
+ "Per-bucket hash limit for syncache");
+
+SYSCTL_VNET_INT(_net_inet_tcp_syncache, OID_AUTO, cachelimit, CTLFLAG_RDTUN,
+ &VNET_NAME(tcp_syncache.cache_limit), 0,
+ "Overall entry limit for syncache");
+
+SYSCTL_VNET_INT(_net_inet_tcp_syncache, OID_AUTO, count, CTLFLAG_RD,
+ &VNET_NAME(tcp_syncache.cache_count), 0,
+ "Current number of entries in syncache");
+
+SYSCTL_VNET_INT(_net_inet_tcp_syncache, OID_AUTO, hashsize, CTLFLAG_RDTUN,
+ &VNET_NAME(tcp_syncache.hashsize), 0,
+ "Size of TCP syncache hashtable");
+
+SYSCTL_VNET_INT(_net_inet_tcp_syncache, OID_AUTO, rexmtlimit, CTLFLAG_RW,
+ &VNET_NAME(tcp_syncache.rexmt_limit), 0,
+ "Limit on SYN/ACK retransmissions");
+
+VNET_DEFINE(int, tcp_sc_rst_sock_fail) = 1;
+SYSCTL_VNET_INT(_net_inet_tcp_syncache, OID_AUTO, rst_on_sock_fail,
+ CTLFLAG_RW, &VNET_NAME(tcp_sc_rst_sock_fail), 0,
+ "Send reset on socket allocation failure");
+
+static MALLOC_DEFINE(M_SYNCACHE, "syncache", "TCP syncache");
+
+#define SYNCACHE_HASH(inc, mask) \
+ ((V_tcp_syncache.hash_secret ^ \
+ (inc)->inc_faddr.s_addr ^ \
+ ((inc)->inc_faddr.s_addr >> 16) ^ \
+ (inc)->inc_fport ^ (inc)->inc_lport) & mask)
+
+#define SYNCACHE_HASH6(inc, mask) \
+ ((V_tcp_syncache.hash_secret ^ \
+ (inc)->inc6_faddr.s6_addr32[0] ^ \
+ (inc)->inc6_faddr.s6_addr32[3] ^ \
+ (inc)->inc_fport ^ (inc)->inc_lport) & mask)
+
+#define ENDPTS_EQ(a, b) ( \
+ (a)->ie_fport == (b)->ie_fport && \
+ (a)->ie_lport == (b)->ie_lport && \
+ (a)->ie_faddr.s_addr == (b)->ie_faddr.s_addr && \
+ (a)->ie_laddr.s_addr == (b)->ie_laddr.s_addr \
+)
+
+#define ENDPTS6_EQ(a, b) (memcmp(a, b, sizeof(*a)) == 0)
+
+#define SCH_LOCK(sch) mtx_lock(&(sch)->sch_mtx)
+#define SCH_UNLOCK(sch) mtx_unlock(&(sch)->sch_mtx)
+#define SCH_LOCK_ASSERT(sch) mtx_assert(&(sch)->sch_mtx, MA_OWNED)
+
+/*
+ * Requires the syncache entry to be already removed from the bucket list.
+ */
+static void
+syncache_free(struct syncache *sc)
+{
+
+ if (sc->sc_ipopts)
+ (void) m_free(sc->sc_ipopts);
+ if (sc->sc_cred)
+ crfree(sc->sc_cred);
+#ifdef MAC
+ mac_syncache_destroy(&sc->sc_label);
+#endif
+
+ uma_zfree(V_tcp_syncache.zone, sc);
+}
+
+void
+syncache_init(void)
+{
+ int i;
+
+ V_tcp_syncache.cache_count = 0;
+ V_tcp_syncache.hashsize = TCP_SYNCACHE_HASHSIZE;
+ V_tcp_syncache.bucket_limit = TCP_SYNCACHE_BUCKETLIMIT;
+ V_tcp_syncache.rexmt_limit = SYNCACHE_MAXREXMTS;
+ V_tcp_syncache.hash_secret = arc4random();
+
+ TUNABLE_INT_FETCH("net.inet.tcp.syncache.hashsize",
+ &V_tcp_syncache.hashsize);
+ TUNABLE_INT_FETCH("net.inet.tcp.syncache.bucketlimit",
+ &V_tcp_syncache.bucket_limit);
+ if (!powerof2(V_tcp_syncache.hashsize) ||
+ V_tcp_syncache.hashsize == 0) {
+ printf("WARNING: syncache hash size is not a power of 2.\n");
+ V_tcp_syncache.hashsize = TCP_SYNCACHE_HASHSIZE;
+ }
+ V_tcp_syncache.hashmask = V_tcp_syncache.hashsize - 1;
+
+ /* Set limits. */
+ V_tcp_syncache.cache_limit =
+ V_tcp_syncache.hashsize * V_tcp_syncache.bucket_limit;
+ TUNABLE_INT_FETCH("net.inet.tcp.syncache.cachelimit",
+ &V_tcp_syncache.cache_limit);
+
+ /* Allocate the hash table. */
+ V_tcp_syncache.hashbase = malloc(V_tcp_syncache.hashsize *
+ sizeof(struct syncache_head), M_SYNCACHE, M_WAITOK | M_ZERO);
+
+ /* Initialize the hash buckets. */
+ for (i = 0; i < V_tcp_syncache.hashsize; i++) {
+#ifdef VIMAGE
+ V_tcp_syncache.hashbase[i].sch_vnet = curvnet;
+#endif
+ TAILQ_INIT(&V_tcp_syncache.hashbase[i].sch_bucket);
+ mtx_init(&V_tcp_syncache.hashbase[i].sch_mtx, "tcp_sc_head",
+ NULL, MTX_DEF);
+ callout_init_mtx(&V_tcp_syncache.hashbase[i].sch_timer,
+ &V_tcp_syncache.hashbase[i].sch_mtx, 0);
+ V_tcp_syncache.hashbase[i].sch_length = 0;
+ }
+
+ /* Create the syncache entry zone. */
+ V_tcp_syncache.zone = uma_zcreate("syncache", sizeof(struct syncache),
+ NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0);
+ uma_zone_set_max(V_tcp_syncache.zone, V_tcp_syncache.cache_limit);
+}
+
+#ifdef VIMAGE
+void
+syncache_destroy(void)
+{
+ struct syncache_head *sch;
+ struct syncache *sc, *nsc;
+ int i;
+
+ /* Cleanup hash buckets: stop timers, free entries, destroy locks. */
+ for (i = 0; i < V_tcp_syncache.hashsize; i++) {
+
+ sch = &V_tcp_syncache.hashbase[i];
+ callout_drain(&sch->sch_timer);
+
+ SCH_LOCK(sch);
+ TAILQ_FOREACH_SAFE(sc, &sch->sch_bucket, sc_hash, nsc)
+ syncache_drop(sc, sch);
+ SCH_UNLOCK(sch);
+ KASSERT(TAILQ_EMPTY(&sch->sch_bucket),
+ ("%s: sch->sch_bucket not empty", __func__));
+ KASSERT(sch->sch_length == 0, ("%s: sch->sch_length %d not 0",
+ __func__, sch->sch_length));
+ mtx_destroy(&sch->sch_mtx);
+ }
+
+ KASSERT(V_tcp_syncache.cache_count == 0, ("%s: cache_count %d not 0",
+ __func__, V_tcp_syncache.cache_count));
+
+ /* Free the allocated global resources. */
+ uma_zdestroy(V_tcp_syncache.zone);
+ free(V_tcp_syncache.hashbase, M_SYNCACHE);
+}
+#endif
+
+/*
+ * Inserts a syncache entry into the specified bucket row.
+ * Locks and unlocks the syncache_head autonomously.
+ */
+static void
+syncache_insert(struct syncache *sc, struct syncache_head *sch)
+{
+ struct syncache *sc2;
+
+ SCH_LOCK(sch);
+
+ /*
+ * Make sure that we don't overflow the per-bucket limit.
+ * If the bucket is full, toss the oldest element.
+ */
+ if (sch->sch_length >= V_tcp_syncache.bucket_limit) {
+ KASSERT(!TAILQ_EMPTY(&sch->sch_bucket),
+ ("sch->sch_length incorrect"));
+ sc2 = TAILQ_LAST(&sch->sch_bucket, sch_head);
+ syncache_drop(sc2, sch);
+ TCPSTAT_INC(tcps_sc_bucketoverflow);
+ }
+
+ /* Put it into the bucket. */
+ TAILQ_INSERT_HEAD(&sch->sch_bucket, sc, sc_hash);
+ sch->sch_length++;
+
+ /* Reinitialize the bucket row's timer. */
+ if (sch->sch_length == 1)
+ sch->sch_nextc = ticks + INT_MAX;
+ syncache_timeout(sc, sch, 1);
+
+ SCH_UNLOCK(sch);
+
+ V_tcp_syncache.cache_count++;
+ TCPSTAT_INC(tcps_sc_added);
+}
+
+/*
+ * Remove and free entry from syncache bucket row.
+ * Expects locked syncache head.
+ */
+static void
+syncache_drop(struct syncache *sc, struct syncache_head *sch)
+{
+
+ SCH_LOCK_ASSERT(sch);
+
+ TAILQ_REMOVE(&sch->sch_bucket, sc, sc_hash);
+ sch->sch_length--;
+
+#ifndef TCP_OFFLOAD_DISABLE
+ if (sc->sc_tu)
+ sc->sc_tu->tu_syncache_event(TOE_SC_DROP, sc->sc_toepcb);
+#endif
+ syncache_free(sc);
+ V_tcp_syncache.cache_count--;
+}
+
+/*
+ * Engage/reengage time on bucket row.
+ */
+static void
+syncache_timeout(struct syncache *sc, struct syncache_head *sch, int docallout)
+{
+ sc->sc_rxttime = ticks +
+ TCPTV_RTOBASE * (tcp_backoff[sc->sc_rxmits]);
+ sc->sc_rxmits++;
+ if (TSTMP_LT(sc->sc_rxttime, sch->sch_nextc)) {
+ sch->sch_nextc = sc->sc_rxttime;
+ if (docallout)
+ callout_reset(&sch->sch_timer, sch->sch_nextc - ticks,
+ syncache_timer, (void *)sch);
+ }
+}
+
+/*
+ * Walk the timer queues, looking for SYN,ACKs that need to be retransmitted.
+ * If we have retransmitted an entry the maximum number of times, expire it.
+ * One separate timer for each bucket row.
+ */
+static void
+syncache_timer(void *xsch)
+{
+ struct syncache_head *sch = (struct syncache_head *)xsch;
+ struct syncache *sc, *nsc;
+ int tick = ticks;
+ char *s;
+
+ CURVNET_SET(sch->sch_vnet);
+
+ /* NB: syncache_head has already been locked by the callout. */
+ SCH_LOCK_ASSERT(sch);
+
+ /*
+ * In the following cycle we may remove some entries and/or
+ * advance some timeouts, so re-initialize the bucket timer.
+ */
+ sch->sch_nextc = tick + INT_MAX;
+
+ TAILQ_FOREACH_SAFE(sc, &sch->sch_bucket, sc_hash, nsc) {
+ /*
+ * We do not check if the listen socket still exists
+ * and accept the case where the listen socket may be
+ * gone by the time we resend the SYN/ACK. We do
+ * not expect this to happens often. If it does,
+ * then the RST will be sent by the time the remote
+ * host does the SYN/ACK->ACK.
+ */
+ if (TSTMP_GT(sc->sc_rxttime, tick)) {
+ if (TSTMP_LT(sc->sc_rxttime, sch->sch_nextc))
+ sch->sch_nextc = sc->sc_rxttime;
+ continue;
+ }
+ if (sc->sc_rxmits > V_tcp_syncache.rexmt_limit) {
+ if ((s = tcp_log_addrs(&sc->sc_inc, NULL, NULL, NULL))) {
+ log(LOG_DEBUG, "%s; %s: Retransmits exhausted, "
+ "giving up and removing syncache entry\n",
+ s, __func__);
+ free(s, M_TCPLOG);
+ }
+ syncache_drop(sc, sch);
+ TCPSTAT_INC(tcps_sc_stale);
+ continue;
+ }
+ if ((s = tcp_log_addrs(&sc->sc_inc, NULL, NULL, NULL))) {
+ log(LOG_DEBUG, "%s; %s: Response timeout, "
+ "retransmitting (%u) SYN|ACK\n",
+ s, __func__, sc->sc_rxmits);
+ free(s, M_TCPLOG);
+ }
+
+ (void) syncache_respond(sc);
+ TCPSTAT_INC(tcps_sc_retransmitted);
+ syncache_timeout(sc, sch, 0);
+ }
+ if (!TAILQ_EMPTY(&(sch)->sch_bucket))
+ callout_reset(&(sch)->sch_timer, (sch)->sch_nextc - tick,
+ syncache_timer, (void *)(sch));
+ CURVNET_RESTORE();
+}
+
+/*
+ * Find an entry in the syncache.
+ * Returns always with locked syncache_head plus a matching entry or NULL.
+ */
+struct syncache *
+syncache_lookup(struct in_conninfo *inc, struct syncache_head **schp)
+{
+ struct syncache *sc;
+ struct syncache_head *sch;
+
+#ifdef INET6
+ if (inc->inc_flags & INC_ISIPV6) {
+ sch = &V_tcp_syncache.hashbase[
+ SYNCACHE_HASH6(inc, V_tcp_syncache.hashmask)];
+ *schp = sch;
+
+ SCH_LOCK(sch);
+
+ /* Circle through bucket row to find matching entry. */
+ TAILQ_FOREACH(sc, &sch->sch_bucket, sc_hash) {
+ if (ENDPTS6_EQ(&inc->inc_ie, &sc->sc_inc.inc_ie))
+ return (sc);
+ }
+ } else
+#endif
+ {
+ sch = &V_tcp_syncache.hashbase[
+ SYNCACHE_HASH(inc, V_tcp_syncache.hashmask)];
+ *schp = sch;
+
+ SCH_LOCK(sch);
+
+ /* Circle through bucket row to find matching entry. */
+ TAILQ_FOREACH(sc, &sch->sch_bucket, sc_hash) {
+#ifdef INET6
+ if (sc->sc_inc.inc_flags & INC_ISIPV6)
+ continue;
+#endif
+ if (ENDPTS_EQ(&inc->inc_ie, &sc->sc_inc.inc_ie))
+ return (sc);
+ }
+ }
+ SCH_LOCK_ASSERT(*schp);
+ return (NULL); /* always returns with locked sch */
+}
+
+/*
+ * This function is called when we get a RST for a
+ * non-existent connection, so that we can see if the
+ * connection is in the syn cache. If it is, zap it.
+ */
+void
+syncache_chkrst(struct in_conninfo *inc, struct tcphdr *th)
+{
+ struct syncache *sc;
+ struct syncache_head *sch;
+ char *s = NULL;
+
+ sc = syncache_lookup(inc, &sch); /* returns locked sch */
+ SCH_LOCK_ASSERT(sch);
+
+ /*
+ * Any RST to our SYN|ACK must not carry ACK, SYN or FIN flags.
+ * See RFC 793 page 65, section SEGMENT ARRIVES.
+ */
+ if (th->th_flags & (TH_ACK|TH_SYN|TH_FIN)) {
+ if ((s = tcp_log_addrs(inc, th, NULL, NULL)))
+ log(LOG_DEBUG, "%s; %s: Spurious RST with ACK, SYN or "
+ "FIN flag set, segment ignored\n", s, __func__);
+ TCPSTAT_INC(tcps_badrst);
+ goto done;
+ }
+
+ /*
+ * No corresponding connection was found in syncache.
+ * If syncookies are enabled and possibly exclusively
+ * used, or we are under memory pressure, a valid RST
+ * may not find a syncache entry. In that case we're
+ * done and no SYN|ACK retransmissions will happen.
+ * Otherwise the the RST was misdirected or spoofed.
+ */
+ if (sc == NULL) {
+ if ((s = tcp_log_addrs(inc, th, NULL, NULL)))
+ log(LOG_DEBUG, "%s; %s: Spurious RST without matching "
+ "syncache entry (possibly syncookie only), "
+ "segment ignored\n", s, __func__);
+ TCPSTAT_INC(tcps_badrst);
+ goto done;
+ }
+
+ /*
+ * If the RST bit is set, check the sequence number to see
+ * if this is a valid reset segment.
+ * RFC 793 page 37:
+ * In all states except SYN-SENT, all reset (RST) segments
+ * are validated by checking their SEQ-fields. A reset is
+ * valid if its sequence number is in the window.
+ *
+ * The sequence number in the reset segment is normally an
+ * echo of our outgoing acknowlegement numbers, but some hosts
+ * send a reset with the sequence number at the rightmost edge
+ * of our receive window, and we have to handle this case.
+ */
+ if (SEQ_GEQ(th->th_seq, sc->sc_irs) &&
+ SEQ_LEQ(th->th_seq, sc->sc_irs + sc->sc_wnd)) {
+ syncache_drop(sc, sch);
+ if ((s = tcp_log_addrs(inc, th, NULL, NULL)))
+ log(LOG_DEBUG, "%s; %s: Our SYN|ACK was rejected, "
+ "connection attempt aborted by remote endpoint\n",
+ s, __func__);
+ TCPSTAT_INC(tcps_sc_reset);
+ } else {
+ if ((s = tcp_log_addrs(inc, th, NULL, NULL)))
+ log(LOG_DEBUG, "%s; %s: RST with invalid SEQ %u != "
+ "IRS %u (+WND %u), segment ignored\n",
+ s, __func__, th->th_seq, sc->sc_irs, sc->sc_wnd);
+ TCPSTAT_INC(tcps_badrst);
+ }
+
+done:
+ if (s != NULL)
+ free(s, M_TCPLOG);
+ SCH_UNLOCK(sch);
+}
+
+void
+syncache_badack(struct in_conninfo *inc)
+{
+ struct syncache *sc;
+ struct syncache_head *sch;
+
+ sc = syncache_lookup(inc, &sch); /* returns locked sch */
+ SCH_LOCK_ASSERT(sch);
+ if (sc != NULL) {
+ syncache_drop(sc, sch);
+ TCPSTAT_INC(tcps_sc_badack);
+ }
+ SCH_UNLOCK(sch);
+}
+
+void
+syncache_unreach(struct in_conninfo *inc, struct tcphdr *th)
+{
+ struct syncache *sc;
+ struct syncache_head *sch;
+
+ sc = syncache_lookup(inc, &sch); /* returns locked sch */
+ SCH_LOCK_ASSERT(sch);
+ if (sc == NULL)
+ goto done;
+
+ /* If the sequence number != sc_iss, then it's a bogus ICMP msg */
+ if (ntohl(th->th_seq) != sc->sc_iss)
+ goto done;
+
+ /*
+ * If we've rertransmitted 3 times and this is our second error,
+ * we remove the entry. Otherwise, we allow it to continue on.
+ * This prevents us from incorrectly nuking an entry during a
+ * spurious network outage.
+ *
+ * See tcp_notify().
+ */
+ if ((sc->sc_flags & SCF_UNREACH) == 0 || sc->sc_rxmits < 3 + 1) {
+ sc->sc_flags |= SCF_UNREACH;
+ goto done;
+ }
+ syncache_drop(sc, sch);
+ TCPSTAT_INC(tcps_sc_unreach);
+done:
+ SCH_UNLOCK(sch);
+}
+
+/*
+ * Build a new TCP socket structure from a syncache entry.
+ */
+static struct socket *
+syncache_socket(struct syncache *sc, struct socket *lso, struct mbuf *m)
+{
+ struct inpcb *inp = NULL;
+ struct socket *so;
+ struct tcpcb *tp;
+ int error = 0;
+ char *s;
+
+ INP_INFO_WLOCK_ASSERT(&V_tcbinfo);
+
+ /*
+ * Ok, create the full blown connection, and set things up
+ * as they would have been set up if we had created the
+ * connection when the SYN arrived. If we can't create
+ * the connection, abort it.
+ */
+ so = sonewconn(lso, SS_ISCONNECTED);
+ if (so == NULL) {
+ /*
+ * Drop the connection; we will either send a RST or
+ * have the peer retransmit its SYN again after its
+ * RTO and try again.
+ */
+ TCPSTAT_INC(tcps_listendrop);
+ if ((s = tcp_log_addrs(&sc->sc_inc, NULL, NULL, NULL))) {
+ log(LOG_DEBUG, "%s; %s: Socket create failed "
+ "due to limits or memory shortage\n",
+ s, __func__);
+ free(s, M_TCPLOG);
+ }
+ goto abort2;
+ }
+#ifdef MAC
+ mac_socketpeer_set_from_mbuf(m, so);
+#endif
+
+ inp = sotoinpcb(so);
+ inp->inp_inc.inc_fibnum = so->so_fibnum;
+ INP_WLOCK(inp);
+
+ /* Insert new socket into PCB hash list. */
+ inp->inp_inc.inc_flags = sc->sc_inc.inc_flags;
+#ifdef INET6
+ if (sc->sc_inc.inc_flags & INC_ISIPV6) {
+ inp->in6p_laddr = sc->sc_inc.inc6_laddr;
+ } else {
+ inp->inp_vflag &= ~INP_IPV6;
+ inp->inp_vflag |= INP_IPV4;
+#endif
+ inp->inp_laddr = sc->sc_inc.inc_laddr;
+#ifdef INET6
+ }
+#endif
+ inp->inp_lport = sc->sc_inc.inc_lport;
+ if ((error = in_pcbinshash(inp)) != 0) {
+ /*
+ * Undo the assignments above if we failed to
+ * put the PCB on the hash lists.
+ */
+#ifdef INET6
+ if (sc->sc_inc.inc_flags & INC_ISIPV6)
+ inp->in6p_laddr = in6addr_any;
+ else
+#endif
+ inp->inp_laddr.s_addr = INADDR_ANY;
+ inp->inp_lport = 0;
+ if ((s = tcp_log_addrs(&sc->sc_inc, NULL, NULL, NULL))) {
+ log(LOG_DEBUG, "%s; %s: in_pcbinshash failed "
+ "with error %i\n",
+ s, __func__, error);
+ free(s, M_TCPLOG);
+ }
+ goto abort;
+ }
+#ifdef IPSEC
+ /* Copy old policy into new socket's. */
+ if (ipsec_copy_policy(sotoinpcb(lso)->inp_sp, inp->inp_sp))
+ printf("syncache_socket: could not copy policy\n");
+#endif
+#ifdef INET6
+ if (sc->sc_inc.inc_flags & INC_ISIPV6) {
+ struct inpcb *oinp = sotoinpcb(lso);
+ struct in6_addr laddr6;
+ struct sockaddr_in6 sin6;
+ /*
+ * Inherit socket options from the listening socket.
+ * Note that in6p_inputopts are not (and should not be)
+ * copied, since it stores previously received options and is
+ * used to detect if each new option is different than the
+ * previous one and hence should be passed to a user.
+ * If we copied in6p_inputopts, a user would not be able to
+ * receive options just after calling the accept system call.
+ */
+ inp->inp_flags |= oinp->inp_flags & INP_CONTROLOPTS;
+ if (oinp->in6p_outputopts)
+ inp->in6p_outputopts =
+ ip6_copypktopts(oinp->in6p_outputopts, M_NOWAIT);
+
+ sin6.sin6_family = AF_INET6;
+ sin6.sin6_len = sizeof(sin6);
+ sin6.sin6_addr = sc->sc_inc.inc6_faddr;
+ sin6.sin6_port = sc->sc_inc.inc_fport;
+ sin6.sin6_flowinfo = sin6.sin6_scope_id = 0;
+ laddr6 = inp->in6p_laddr;
+ if (IN6_IS_ADDR_UNSPECIFIED(&inp->in6p_laddr))
+ inp->in6p_laddr = sc->sc_inc.inc6_laddr;
+ if ((error = in6_pcbconnect(inp, (struct sockaddr *)&sin6,
+ thread0.td_ucred)) != 0) {
+ inp->in6p_laddr = laddr6;
+ if ((s = tcp_log_addrs(&sc->sc_inc, NULL, NULL, NULL))) {
+ log(LOG_DEBUG, "%s; %s: in6_pcbconnect failed "
+ "with error %i\n",
+ s, __func__, error);
+ free(s, M_TCPLOG);
+ }
+ goto abort;
+ }
+ /* Override flowlabel from in6_pcbconnect. */
+ inp->inp_flow &= ~IPV6_FLOWLABEL_MASK;
+ inp->inp_flow |= sc->sc_flowlabel;
+ } else
+#endif
+ {
+ struct in_addr laddr;
+ struct sockaddr_in sin;
+
+ inp->inp_options = (m) ? ip_srcroute(m) : NULL;
+
+ if (inp->inp_options == NULL) {
+ inp->inp_options = sc->sc_ipopts;
+ sc->sc_ipopts = NULL;
+ }
+
+ sin.sin_family = AF_INET;
+ sin.sin_len = sizeof(sin);
+ sin.sin_addr = sc->sc_inc.inc_faddr;
+ sin.sin_port = sc->sc_inc.inc_fport;
+ bzero((caddr_t)sin.sin_zero, sizeof(sin.sin_zero));
+ laddr = inp->inp_laddr;
+ if (inp->inp_laddr.s_addr == INADDR_ANY)
+ inp->inp_laddr = sc->sc_inc.inc_laddr;
+ if ((error = in_pcbconnect(inp, (struct sockaddr *)&sin,
+ thread0.td_ucred)) != 0) {
+ inp->inp_laddr = laddr;
+ if ((s = tcp_log_addrs(&sc->sc_inc, NULL, NULL, NULL))) {
+ log(LOG_DEBUG, "%s; %s: in_pcbconnect failed "
+ "with error %i\n",
+ s, __func__, error);
+ free(s, M_TCPLOG);
+ }
+ goto abort;
+ }
+ }
+ tp = intotcpcb(inp);
+ tp->t_state = TCPS_SYN_RECEIVED;
+ tp->iss = sc->sc_iss;
+ tp->irs = sc->sc_irs;
+ tcp_rcvseqinit(tp);
+ tcp_sendseqinit(tp);
+ tp->snd_wl1 = sc->sc_irs;
+ tp->snd_max = tp->iss + 1;
+ tp->snd_nxt = tp->iss + 1;
+ tp->rcv_up = sc->sc_irs + 1;
+ tp->rcv_wnd = sc->sc_wnd;
+ tp->rcv_adv += tp->rcv_wnd;
+ tp->last_ack_sent = tp->rcv_nxt;
+
+ tp->t_flags = sototcpcb(lso)->t_flags & (TF_NOPUSH|TF_NODELAY);
+ if (sc->sc_flags & SCF_NOOPT)
+ tp->t_flags |= TF_NOOPT;
+ else {
+ if (sc->sc_flags & SCF_WINSCALE) {
+ tp->t_flags |= TF_REQ_SCALE|TF_RCVD_SCALE;
+ tp->snd_scale = sc->sc_requested_s_scale;
+ tp->request_r_scale = sc->sc_requested_r_scale;
+ }
+ if (sc->sc_flags & SCF_TIMESTAMP) {
+ tp->t_flags |= TF_REQ_TSTMP|TF_RCVD_TSTMP;
+ tp->ts_recent = sc->sc_tsreflect;
+ tp->ts_recent_age = ticks;
+ tp->ts_offset = sc->sc_tsoff;
+ }
+#ifdef TCP_SIGNATURE
+ if (sc->sc_flags & SCF_SIGNATURE)
+ tp->t_flags |= TF_SIGNATURE;
+#endif
+ if (sc->sc_flags & SCF_SACK)
+ tp->t_flags |= TF_SACK_PERMIT;
+ }
+
+ if (sc->sc_flags & SCF_ECN)
+ tp->t_flags |= TF_ECN_PERMIT;
+
+ /*
+ * Set up MSS and get cached values from tcp_hostcache.
+ * This might overwrite some of the defaults we just set.
+ */
+ tcp_mss(tp, sc->sc_peer_mss);
+
+ /*
+ * If the SYN,ACK was retransmitted, reset cwnd to 1 segment.
+ * NB: sc_rxmits counts all SYN,ACK transmits, not just retransmits.
+ */
+ if (sc->sc_rxmits > 1)
+ tp->snd_cwnd = tp->t_maxseg;
+ tcp_timer_activate(tp, TT_KEEP, tcp_keepinit);
+
+ INP_WUNLOCK(inp);
+
+ TCPSTAT_INC(tcps_accepts);
+ return (so);
+
+abort:
+ INP_WUNLOCK(inp);
+abort2:
+ if (so != NULL)
+ soabort(so);
+ return (NULL);
+}
+
+/*
+ * This function gets called when we receive an ACK for a
+ * socket in the LISTEN state. We look up the connection
+ * in the syncache, and if its there, we pull it out of
+ * the cache and turn it into a full-blown connection in
+ * the SYN-RECEIVED state.
+ */
+int
+syncache_expand(struct in_conninfo *inc, struct tcpopt *to, struct tcphdr *th,
+ struct socket **lsop, struct mbuf *m)
+{
+ struct syncache *sc;
+ struct syncache_head *sch;
+ struct syncache scs;
+ char *s;
+
+ /*
+ * Global TCP locks are held because we manipulate the PCB lists
+ * and create a new socket.
+ */
+ INP_INFO_WLOCK_ASSERT(&V_tcbinfo);
+ KASSERT((th->th_flags & (TH_RST|TH_ACK|TH_SYN)) == TH_ACK,
+ ("%s: can handle only ACK", __func__));
+
+ sc = syncache_lookup(inc, &sch); /* returns locked sch */
+ SCH_LOCK_ASSERT(sch);
+ if (sc == NULL) {
+ /*
+ * There is no syncache entry, so see if this ACK is
+ * a returning syncookie. To do this, first:
+ * A. See if this socket has had a syncache entry dropped in
+ * the past. We don't want to accept a bogus syncookie
+ * if we've never received a SYN.
+ * B. check that the syncookie is valid. If it is, then
+ * cobble up a fake syncache entry, and return.
+ */
+ if (!V_tcp_syncookies) {
+ SCH_UNLOCK(sch);
+ if ((s = tcp_log_addrs(inc, th, NULL, NULL)))
+ log(LOG_DEBUG, "%s; %s: Spurious ACK, "
+ "segment rejected (syncookies disabled)\n",
+ s, __func__);
+ goto failed;
+ }
+ bzero(&scs, sizeof(scs));
+ sc = syncookie_lookup(inc, sch, &scs, to, th, *lsop);
+ SCH_UNLOCK(sch);
+ if (sc == NULL) {
+ if ((s = tcp_log_addrs(inc, th, NULL, NULL)))
+ log(LOG_DEBUG, "%s; %s: Segment failed "
+ "SYNCOOKIE authentication, segment rejected "
+ "(probably spoofed)\n", s, __func__);
+ goto failed;
+ }
+ } else {
+ /* Pull out the entry to unlock the bucket row. */
+ TAILQ_REMOVE(&sch->sch_bucket, sc, sc_hash);
+ sch->sch_length--;
+ V_tcp_syncache.cache_count--;
+ SCH_UNLOCK(sch);
+ }
+
+ /*
+ * Segment validation:
+ * ACK must match our initial sequence number + 1 (the SYN|ACK).
+ */
+ if (th->th_ack != sc->sc_iss + 1 && !TOEPCB_ISSET(sc)) {
+ if ((s = tcp_log_addrs(inc, th, NULL, NULL)))
+ log(LOG_DEBUG, "%s; %s: ACK %u != ISS+1 %u, segment "
+ "rejected\n", s, __func__, th->th_ack, sc->sc_iss);
+ goto failed;
+ }
+
+ /*
+ * The SEQ must fall in the window starting at the received
+ * initial receive sequence number + 1 (the SYN).
+ */
+ if ((SEQ_LEQ(th->th_seq, sc->sc_irs) ||
+ SEQ_GT(th->th_seq, sc->sc_irs + sc->sc_wnd)) &&
+ !TOEPCB_ISSET(sc)) {
+ if ((s = tcp_log_addrs(inc, th, NULL, NULL)))
+ log(LOG_DEBUG, "%s; %s: SEQ %u != IRS+1 %u, segment "
+ "rejected\n", s, __func__, th->th_seq, sc->sc_irs);
+ goto failed;
+ }
+
+ if (!(sc->sc_flags & SCF_TIMESTAMP) && (to->to_flags & TOF_TS)) {
+ if ((s = tcp_log_addrs(inc, th, NULL, NULL)))
+ log(LOG_DEBUG, "%s; %s: Timestamp not expected, "
+ "segment rejected\n", s, __func__);
+ goto failed;
+ }
+ /*
+ * If timestamps were negotiated the reflected timestamp
+ * must be equal to what we actually sent in the SYN|ACK.
+ */
+ if ((to->to_flags & TOF_TS) && to->to_tsecr != sc->sc_ts &&
+ !TOEPCB_ISSET(sc)) {
+ if ((s = tcp_log_addrs(inc, th, NULL, NULL)))
+ log(LOG_DEBUG, "%s; %s: TSECR %u != TS %u, "
+ "segment rejected\n",
+ s, __func__, to->to_tsecr, sc->sc_ts);
+ goto failed;
+ }
+
+ *lsop = syncache_socket(sc, *lsop, m);
+
+ if (*lsop == NULL)
+ TCPSTAT_INC(tcps_sc_aborted);
+ else
+ TCPSTAT_INC(tcps_sc_completed);
+
+/* how do we find the inp for the new socket? */
+ if (sc != &scs)
+ syncache_free(sc);
+ return (1);
+failed:
+ if (sc != NULL && sc != &scs)
+ syncache_free(sc);
+ if (s != NULL)
+ free(s, M_TCPLOG);
+ *lsop = NULL;
+ return (0);
+}
+
+int
+tcp_offload_syncache_expand(struct in_conninfo *inc, struct toeopt *toeo,
+ struct tcphdr *th, struct socket **lsop, struct mbuf *m)
+{
+ struct tcpopt to;
+ int rc;
+
+ bzero(&to, sizeof(struct tcpopt));
+ to.to_mss = toeo->to_mss;
+ to.to_wscale = toeo->to_wscale;
+ to.to_flags = toeo->to_flags;
+
+ INP_INFO_WLOCK(&V_tcbinfo);
+ rc = syncache_expand(inc, &to, th, lsop, m);
+ INP_INFO_WUNLOCK(&V_tcbinfo);
+
+ return (rc);
+}
+
+/*
+ * Given a LISTEN socket and an inbound SYN request, add
+ * this to the syn cache, and send back a segment:
+ * <SEQ=ISS><ACK=RCV_NXT><CTL=SYN,ACK>
+ * to the source.
+ *
+ * IMPORTANT NOTE: We do _NOT_ ACK data that might accompany the SYN.
+ * Doing so would require that we hold onto the data and deliver it
+ * to the application. However, if we are the target of a SYN-flood
+ * DoS attack, an attacker could send data which would eventually
+ * consume all available buffer space if it were ACKed. By not ACKing
+ * the data, we avoid this DoS scenario.
+ */
+static void
+_syncache_add(struct in_conninfo *inc, struct tcpopt *to, struct tcphdr *th,
+ struct inpcb *inp, struct socket **lsop, struct mbuf *m,
+ struct toe_usrreqs *tu, void *toepcb)
+{
+ struct tcpcb *tp;
+ struct socket *so;
+ struct syncache *sc = NULL;
+ struct syncache_head *sch;
+ struct mbuf *ipopts = NULL;
+ u_int32_t flowtmp;
+ int win, sb_hiwat, ip_ttl, ip_tos, noopt;
+ char *s;
+#ifdef INET6
+ int autoflowlabel = 0;
+#endif
+#ifdef MAC
+ struct label *maclabel;
+#endif
+ struct syncache scs;
+ struct ucred *cred;
+
+ INP_INFO_WLOCK_ASSERT(&V_tcbinfo);
+ INP_WLOCK_ASSERT(inp); /* listen socket */
+ KASSERT((th->th_flags & (TH_RST|TH_ACK|TH_SYN)) == TH_SYN,
+ ("%s: unexpected tcp flags", __func__));
+
+ /*
+ * Combine all so/tp operations very early to drop the INP lock as
+ * soon as possible.
+ */
+ so = *lsop;
+ tp = sototcpcb(so);
+ cred = crhold(so->so_cred);
+
+#ifdef INET6
+ if ((inc->inc_flags & INC_ISIPV6) &&
+ (inp->inp_flags & IN6P_AUTOFLOWLABEL))
+ autoflowlabel = 1;
+#endif
+ ip_ttl = inp->inp_ip_ttl;
+ ip_tos = inp->inp_ip_tos;
+ win = sbspace(&so->so_rcv);
+ sb_hiwat = so->so_rcv.sb_hiwat;
+ noopt = (tp->t_flags & TF_NOOPT);
+
+ /* By the time we drop the lock these should no longer be used. */
+ so = NULL;
+ tp = NULL;
+
+#ifdef MAC
+ if (mac_syncache_init(&maclabel) != 0) {
+ INP_WUNLOCK(inp);
+ INP_INFO_WUNLOCK(&V_tcbinfo);
+ goto done;
+ } else
+ mac_syncache_create(maclabel, inp);
+#endif
+ INP_WUNLOCK(inp);
+ INP_INFO_WUNLOCK(&V_tcbinfo);
+
+ /*
+ * Remember the IP options, if any.
+ */
+#ifdef INET6
+ if (!(inc->inc_flags & INC_ISIPV6))
+#endif
+ ipopts = (m) ? ip_srcroute(m) : NULL;
+
+ /*
+ * See if we already have an entry for this connection.
+ * If we do, resend the SYN,ACK, and reset the retransmit timer.
+ *
+ * XXX: should the syncache be re-initialized with the contents
+ * of the new SYN here (which may have different options?)
+ *
+ * XXX: We do not check the sequence number to see if this is a
+ * real retransmit or a new connection attempt. The question is
+ * how to handle such a case; either ignore it as spoofed, or
+ * drop the current entry and create a new one?
+ */
+ sc = syncache_lookup(inc, &sch); /* returns locked entry */
+ SCH_LOCK_ASSERT(sch);
+ if (sc != NULL) {
+#ifndef TCP_OFFLOAD_DISABLE
+ if (sc->sc_tu)
+ sc->sc_tu->tu_syncache_event(TOE_SC_ENTRY_PRESENT,
+ sc->sc_toepcb);
+#endif
+ TCPSTAT_INC(tcps_sc_dupsyn);
+ if (ipopts) {
+ /*
+ * If we were remembering a previous source route,
+ * forget it and use the new one we've been given.
+ */
+ if (sc->sc_ipopts)
+ (void) m_free(sc->sc_ipopts);
+ sc->sc_ipopts = ipopts;
+ }
+ /*
+ * Update timestamp if present.
+ */
+ if ((sc->sc_flags & SCF_TIMESTAMP) && (to->to_flags & TOF_TS))
+ sc->sc_tsreflect = to->to_tsval;
+ else
+ sc->sc_flags &= ~SCF_TIMESTAMP;
+#ifdef MAC
+ /*
+ * Since we have already unconditionally allocated label
+ * storage, free it up. The syncache entry will already
+ * have an initialized label we can use.
+ */
+ mac_syncache_destroy(&maclabel);
+#endif
+ /* Retransmit SYN|ACK and reset retransmit count. */
+ if ((s = tcp_log_addrs(&sc->sc_inc, th, NULL, NULL))) {
+ log(LOG_DEBUG, "%s; %s: Received duplicate SYN, "
+ "resetting timer and retransmitting SYN|ACK\n",
+ s, __func__);
+ free(s, M_TCPLOG);
+ }
+ if (!TOEPCB_ISSET(sc) && syncache_respond(sc) == 0) {
+ sc->sc_rxmits = 0;
+ syncache_timeout(sc, sch, 1);
+ TCPSTAT_INC(tcps_sndacks);
+ TCPSTAT_INC(tcps_sndtotal);
+ }
+ SCH_UNLOCK(sch);
+ goto done;
+ }
+
+ sc = uma_zalloc(V_tcp_syncache.zone, M_NOWAIT | M_ZERO);
+ if (sc == NULL) {
+ /*
+ * The zone allocator couldn't provide more entries.
+ * Treat this as if the cache was full; drop the oldest
+ * entry and insert the new one.
+ */
+ TCPSTAT_INC(tcps_sc_zonefail);
+ if ((sc = TAILQ_LAST(&sch->sch_bucket, sch_head)) != NULL)
+ syncache_drop(sc, sch);
+ sc = uma_zalloc(V_tcp_syncache.zone, M_NOWAIT | M_ZERO);
+ if (sc == NULL) {
+ if (V_tcp_syncookies) {
+ bzero(&scs, sizeof(scs));
+ sc = &scs;
+ } else {
+ SCH_UNLOCK(sch);
+ if (ipopts)
+ (void) m_free(ipopts);
+ goto done;
+ }
+ }
+ }
+
+ /*
+ * Fill in the syncache values.
+ */
+#ifdef MAC
+ sc->sc_label = maclabel;
+#endif
+ sc->sc_cred = cred;
+ cred = NULL;
+ sc->sc_ipopts = ipopts;
+ bcopy(inc, &sc->sc_inc, sizeof(struct in_conninfo));
+#ifdef INET6
+ if (!(inc->inc_flags & INC_ISIPV6))
+#endif
+ {
+ sc->sc_ip_tos = ip_tos;
+ sc->sc_ip_ttl = ip_ttl;
+ }
+#ifndef TCP_OFFLOAD_DISABLE
+ sc->sc_tu = tu;
+ sc->sc_toepcb = toepcb;
+#endif
+ sc->sc_irs = th->th_seq;
+ sc->sc_iss = arc4random();
+ sc->sc_flags = 0;
+ sc->sc_flowlabel = 0;
+
+ /*
+ * Initial receive window: clip sbspace to [0 .. TCP_MAXWIN].
+ * win was derived from socket earlier in the function.
+ */
+ win = imax(win, 0);
+ win = imin(win, TCP_MAXWIN);
+ sc->sc_wnd = win;
+
+ if (V_tcp_do_rfc1323) {
+ /*
+ * A timestamp received in a SYN makes
+ * it ok to send timestamp requests and replies.
+ */
+ if (to->to_flags & TOF_TS) {
+ sc->sc_tsreflect = to->to_tsval;
+ sc->sc_ts = ticks;
+ sc->sc_flags |= SCF_TIMESTAMP;
+ }
+ if (to->to_flags & TOF_SCALE) {
+ int wscale = 0;
+
+ /*
+ * Pick the smallest possible scaling factor that
+ * will still allow us to scale up to sb_max, aka
+ * kern.ipc.maxsockbuf.
+ *
+ * We do this because there are broken firewalls that
+ * will corrupt the window scale option, leading to
+ * the other endpoint believing that our advertised
+ * window is unscaled. At scale factors larger than
+ * 5 the unscaled window will drop below 1500 bytes,
+ * leading to serious problems when traversing these
+ * broken firewalls.
+ *
+ * With the default maxsockbuf of 256K, a scale factor
+ * of 3 will be chosen by this algorithm. Those who
+ * choose a larger maxsockbuf should watch out
+ * for the compatiblity problems mentioned above.
+ *
+ * RFC1323: The Window field in a SYN (i.e., a <SYN>
+ * or <SYN,ACK>) segment itself is never scaled.
+ */
+ while (wscale < TCP_MAX_WINSHIFT &&
+ (TCP_MAXWIN << wscale) < sb_max)
+ wscale++;
+ sc->sc_requested_r_scale = wscale;
+ sc->sc_requested_s_scale = to->to_wscale;
+ sc->sc_flags |= SCF_WINSCALE;
+ }
+ }
+#ifdef TCP_SIGNATURE
+ /*
+ * If listening socket requested TCP digests, and received SYN
+ * contains the option, flag this in the syncache so that
+ * syncache_respond() will do the right thing with the SYN+ACK.
+ * XXX: Currently we always record the option by default and will
+ * attempt to use it in syncache_respond().
+ */
+ if (to->to_flags & TOF_SIGNATURE)
+ sc->sc_flags |= SCF_SIGNATURE;
+#endif
+ if (to->to_flags & TOF_SACKPERM)
+ sc->sc_flags |= SCF_SACK;
+ if (to->to_flags & TOF_MSS)
+ sc->sc_peer_mss = to->to_mss; /* peer mss may be zero */
+ if (noopt)
+ sc->sc_flags |= SCF_NOOPT;
+ if ((th->th_flags & (TH_ECE|TH_CWR)) && V_tcp_do_ecn)
+ sc->sc_flags |= SCF_ECN;
+
+ if (V_tcp_syncookies) {
+ syncookie_generate(sch, sc, &flowtmp);
+#ifdef INET6
+ if (autoflowlabel)
+ sc->sc_flowlabel = flowtmp;
+#endif
+ } else {
+#ifdef INET6
+ if (autoflowlabel)
+ sc->sc_flowlabel =
+ (htonl(ip6_randomflowlabel()) & IPV6_FLOWLABEL_MASK);
+#endif
+ }
+ SCH_UNLOCK(sch);
+
+ /*
+ * Do a standard 3-way handshake.
+ */
+ if (TOEPCB_ISSET(sc) || syncache_respond(sc) == 0) {
+ if (V_tcp_syncookies && V_tcp_syncookiesonly && sc != &scs)
+ syncache_free(sc);
+ else if (sc != &scs)
+ syncache_insert(sc, sch); /* locks and unlocks sch */
+ TCPSTAT_INC(tcps_sndacks);
+ TCPSTAT_INC(tcps_sndtotal);
+ } else {
+ if (sc != &scs)
+ syncache_free(sc);
+ TCPSTAT_INC(tcps_sc_dropped);
+ }
+
+done:
+ if (cred != NULL)
+ crfree(cred);
+#ifdef MAC
+ if (sc == &scs)
+ mac_syncache_destroy(&maclabel);
+#endif
+ if (m) {
+
+ *lsop = NULL;
+ m_freem(m);
+ }
+}
+
+static int
+syncache_respond(struct syncache *sc)
+{
+ struct ip *ip = NULL;
+ struct mbuf *m;
+ struct tcphdr *th;
+ int optlen, error;
+ u_int16_t hlen, tlen, mssopt;
+ struct tcpopt to;
+#ifdef INET6
+ struct ip6_hdr *ip6 = NULL;
+#endif
+
+ hlen =
+#ifdef INET6
+ (sc->sc_inc.inc_flags & INC_ISIPV6) ? sizeof(struct ip6_hdr) :
+#endif
+ sizeof(struct ip);
+ tlen = hlen + sizeof(struct tcphdr);
+
+ /* Determine MSS we advertize to other end of connection. */
+ mssopt = tcp_mssopt(&sc->sc_inc);
+ if (sc->sc_peer_mss)
+ mssopt = max( min(sc->sc_peer_mss, mssopt), V_tcp_minmss);
+
+ /* XXX: Assume that the entire packet will fit in a header mbuf. */
+ KASSERT(max_linkhdr + tlen + TCP_MAXOLEN <= MHLEN,
+ ("syncache: mbuf too small"));
+
+ /* Create the IP+TCP header from scratch. */
+ m = m_gethdr(M_DONTWAIT, MT_DATA);
+ if (m == NULL)
+ return (ENOBUFS);
+#ifdef MAC
+ mac_syncache_create_mbuf(sc->sc_label, m);
+#endif
+ m->m_data += max_linkhdr;
+ m->m_len = tlen;
+ m->m_pkthdr.len = tlen;
+ m->m_pkthdr.rcvif = NULL;
+
+#ifdef INET6
+ if (sc->sc_inc.inc_flags & INC_ISIPV6) {
+ ip6 = mtod(m, struct ip6_hdr *);
+ ip6->ip6_vfc = IPV6_VERSION;
+ ip6->ip6_nxt = IPPROTO_TCP;
+ ip6->ip6_src = sc->sc_inc.inc6_laddr;
+ ip6->ip6_dst = sc->sc_inc.inc6_faddr;
+ ip6->ip6_plen = htons(tlen - hlen);
+ /* ip6_hlim is set after checksum */
+ ip6->ip6_flow &= ~IPV6_FLOWLABEL_MASK;
+ ip6->ip6_flow |= sc->sc_flowlabel;
+
+ th = (struct tcphdr *)(ip6 + 1);
+ } else
+#endif
+ {
+ ip = mtod(m, struct ip *);
+ ip->ip_v = IPVERSION;
+ ip->ip_hl = sizeof(struct ip) >> 2;
+ ip->ip_len = tlen;
+ ip->ip_id = 0;
+ ip->ip_off = 0;
+ ip->ip_sum = 0;
+ ip->ip_p = IPPROTO_TCP;
+ ip->ip_src = sc->sc_inc.inc_laddr;
+ ip->ip_dst = sc->sc_inc.inc_faddr;
+ ip->ip_ttl = sc->sc_ip_ttl;
+ ip->ip_tos = sc->sc_ip_tos;
+
+ /*
+ * See if we should do MTU discovery. Route lookups are
+ * expensive, so we will only unset the DF bit if:
+ *
+ * 1) path_mtu_discovery is disabled
+ * 2) the SCF_UNREACH flag has been set
+ */
+ if (V_path_mtu_discovery && ((sc->sc_flags & SCF_UNREACH) == 0))
+ ip->ip_off |= IP_DF;
+
+ th = (struct tcphdr *)(ip + 1);
+ }
+ th->th_sport = sc->sc_inc.inc_lport;
+ th->th_dport = sc->sc_inc.inc_fport;
+
+ th->th_seq = htonl(sc->sc_iss);
+ th->th_ack = htonl(sc->sc_irs + 1);
+ th->th_off = sizeof(struct tcphdr) >> 2;
+ th->th_x2 = 0;
+ th->th_flags = TH_SYN|TH_ACK;
+ th->th_win = htons(sc->sc_wnd);
+ th->th_urp = 0;
+
+ if (sc->sc_flags & SCF_ECN) {
+ th->th_flags |= TH_ECE;
+ TCPSTAT_INC(tcps_ecn_shs);
+ }
+
+ /* Tack on the TCP options. */
+ if ((sc->sc_flags & SCF_NOOPT) == 0) {
+ to.to_flags = 0;
+
+ to.to_mss = mssopt;
+ to.to_flags = TOF_MSS;
+ if (sc->sc_flags & SCF_WINSCALE) {
+ to.to_wscale = sc->sc_requested_r_scale;
+ to.to_flags |= TOF_SCALE;
+ }
+ if (sc->sc_flags & SCF_TIMESTAMP) {
+ /* Virgin timestamp or TCP cookie enhanced one. */
+ to.to_tsval = sc->sc_ts;
+ to.to_tsecr = sc->sc_tsreflect;
+ to.to_flags |= TOF_TS;
+ }
+ if (sc->sc_flags & SCF_SACK)
+ to.to_flags |= TOF_SACKPERM;
+#ifdef TCP_SIGNATURE
+ if (sc->sc_flags & SCF_SIGNATURE)
+ to.to_flags |= TOF_SIGNATURE;
+#endif
+ optlen = tcp_addoptions(&to, (u_char *)(th + 1));
+
+ /* Adjust headers by option size. */
+ th->th_off = (sizeof(struct tcphdr) + optlen) >> 2;
+ m->m_len += optlen;
+ m->m_pkthdr.len += optlen;
+
+#ifdef TCP_SIGNATURE
+ if (sc->sc_flags & SCF_SIGNATURE)
+ tcp_signature_compute(m, 0, 0, optlen,
+ to.to_signature, IPSEC_DIR_OUTBOUND);
+#endif
+#ifdef INET6
+ if (sc->sc_inc.inc_flags & INC_ISIPV6)
+ ip6->ip6_plen = htons(ntohs(ip6->ip6_plen) + optlen);
+ else
+#endif
+ ip->ip_len += optlen;
+ } else
+ optlen = 0;
+
+ M_SETFIB(m, sc->sc_inc.inc_fibnum);
+#ifdef INET6
+ if (sc->sc_inc.inc_flags & INC_ISIPV6) {
+ th->th_sum = 0;
+ th->th_sum = in6_cksum(m, IPPROTO_TCP, hlen,
+ tlen + optlen - hlen);
+ ip6->ip6_hlim = in6_selecthlim(NULL, NULL);
+ error = ip6_output(m, NULL, NULL, 0, NULL, NULL, NULL);
+ } else
+#endif
+ {
+ th->th_sum = in_pseudo(ip->ip_src.s_addr, ip->ip_dst.s_addr,
+ htons(tlen + optlen - hlen + IPPROTO_TCP));
+ m->m_pkthdr.csum_flags = CSUM_TCP;
+ m->m_pkthdr.csum_data = offsetof(struct tcphdr, th_sum);
+ error = ip_output(m, sc->sc_ipopts, NULL, 0, NULL, NULL);
+ }
+ return (error);
+}
+
+void
+syncache_add(struct in_conninfo *inc, struct tcpopt *to, struct tcphdr *th,
+ struct inpcb *inp, struct socket **lsop, struct mbuf *m)
+{
+ _syncache_add(inc, to, th, inp, lsop, m, NULL, NULL);
+}
+
+void
+tcp_offload_syncache_add(struct in_conninfo *inc, struct toeopt *toeo,
+ struct tcphdr *th, struct inpcb *inp, struct socket **lsop,
+ struct toe_usrreqs *tu, void *toepcb)
+{
+ struct tcpopt to;
+
+ bzero(&to, sizeof(struct tcpopt));
+ to.to_mss = toeo->to_mss;
+ to.to_wscale = toeo->to_wscale;
+ to.to_flags = toeo->to_flags;
+
+ INP_INFO_WLOCK(&V_tcbinfo);
+ INP_WLOCK(inp);
+
+ _syncache_add(inc, &to, th, inp, lsop, NULL, tu, toepcb);
+}
+
+/*
+ * The purpose of SYN cookies is to avoid keeping track of all SYN's we
+ * receive and to be able to handle SYN floods from bogus source addresses
+ * (where we will never receive any reply). SYN floods try to exhaust all
+ * our memory and available slots in the SYN cache table to cause a denial
+ * of service to legitimate users of the local host.
+ *
+ * The idea of SYN cookies is to encode and include all necessary information
+ * about the connection setup state within the SYN-ACK we send back and thus
+ * to get along without keeping any local state until the ACK to the SYN-ACK
+ * arrives (if ever). Everything we need to know should be available from
+ * the information we encoded in the SYN-ACK.
+ *
+ * More information about the theory behind SYN cookies and its first
+ * discussion and specification can be found at:
+ * http://cr.yp.to/syncookies.html (overview)
+ * http://cr.yp.to/syncookies/archive (gory details)
+ *
+ * This implementation extends the orginal idea and first implementation
+ * of FreeBSD by using not only the initial sequence number field to store
+ * information but also the timestamp field if present. This way we can
+ * keep track of the entire state we need to know to recreate the session in
+ * its original form. Almost all TCP speakers implement RFC1323 timestamps
+ * these days. For those that do not we still have to live with the known
+ * shortcomings of the ISN only SYN cookies.
+ *
+ * Cookie layers:
+ *
+ * Initial sequence number we send:
+ * 31|................................|0
+ * DDDDDDDDDDDDDDDDDDDDDDDDDMMMRRRP
+ * D = MD5 Digest (first dword)
+ * M = MSS index
+ * R = Rotation of secret
+ * P = Odd or Even secret
+ *
+ * The MD5 Digest is computed with over following parameters:
+ * a) randomly rotated secret
+ * b) struct in_conninfo containing the remote/local ip/port (IPv4&IPv6)
+ * c) the received initial sequence number from remote host
+ * d) the rotation offset and odd/even bit
+ *
+ * Timestamp we send:
+ * 31|................................|0
+ * DDDDDDDDDDDDDDDDDDDDDDSSSSRRRRA5
+ * D = MD5 Digest (third dword) (only as filler)
+ * S = Requested send window scale
+ * R = Requested receive window scale
+ * A = SACK allowed
+ * 5 = TCP-MD5 enabled (not implemented yet)
+ * XORed with MD5 Digest (forth dword)
+ *
+ * The timestamp isn't cryptographically secure and doesn't need to be.
+ * The double use of the MD5 digest dwords ties it to a specific remote/
+ * local host/port, remote initial sequence number and our local time
+ * limited secret. A received timestamp is reverted (XORed) and then
+ * the contained MD5 dword is compared to the computed one to ensure the
+ * timestamp belongs to the SYN-ACK we sent. The other parameters may
+ * have been tampered with but this isn't different from supplying bogus
+ * values in the SYN in the first place.
+ *
+ * Some problems with SYN cookies remain however:
+ * Consider the problem of a recreated (and retransmitted) cookie. If the
+ * original SYN was accepted, the connection is established. The second
+ * SYN is inflight, and if it arrives with an ISN that falls within the
+ * receive window, the connection is killed.
+ *
+ * Notes:
+ * A heuristic to determine when to accept syn cookies is not necessary.
+ * An ACK flood would cause the syncookie verification to be attempted,
+ * but a SYN flood causes syncookies to be generated. Both are of equal
+ * cost, so there's no point in trying to optimize the ACK flood case.
+ * Also, if you don't process certain ACKs for some reason, then all someone
+ * would have to do is launch a SYN and ACK flood at the same time, which
+ * would stop cookie verification and defeat the entire purpose of syncookies.
+ */
+static int tcp_sc_msstab[] = { 0, 256, 468, 536, 996, 1452, 1460, 8960 };
+
+static void
+syncookie_generate(struct syncache_head *sch, struct syncache *sc,
+ u_int32_t *flowlabel)
+{
+ MD5_CTX ctx;
+ u_int32_t md5_buffer[MD5_DIGEST_LENGTH / sizeof(u_int32_t)];
+ u_int32_t data;
+ u_int32_t *secbits;
+ u_int off, pmss, mss;
+ int i;
+
+ SCH_LOCK_ASSERT(sch);
+
+ /* Which of the two secrets to use. */
+ secbits = sch->sch_oddeven ?
+ sch->sch_secbits_odd : sch->sch_secbits_even;
+
+ /* Reseed secret if too old. */
+ if (sch->sch_reseed < time_uptime) {
+ sch->sch_oddeven = sch->sch_oddeven ? 0 : 1; /* toggle */
+ secbits = sch->sch_oddeven ?
+ sch->sch_secbits_odd : sch->sch_secbits_even;
+ for (i = 0; i < SYNCOOKIE_SECRET_SIZE; i++)
+ secbits[i] = arc4random();
+ sch->sch_reseed = time_uptime + SYNCOOKIE_LIFETIME;
+ }
+
+ /* Secret rotation offset. */
+ off = sc->sc_iss & 0x7; /* iss was randomized before */
+
+ /* Maximum segment size calculation. */
+ pmss =
+ max( min(sc->sc_peer_mss, tcp_mssopt(&sc->sc_inc)), V_tcp_minmss);
+ for (mss = sizeof(tcp_sc_msstab) / sizeof(int) - 1; mss > 0; mss--)
+ if (tcp_sc_msstab[mss] <= pmss)
+ break;
+
+ /* Fold parameters and MD5 digest into the ISN we will send. */
+ data = sch->sch_oddeven;/* odd or even secret, 1 bit */
+ data |= off << 1; /* secret offset, derived from iss, 3 bits */
+ data |= mss << 4; /* mss, 3 bits */
+
+ MD5Init(&ctx);
+ MD5Update(&ctx, ((u_int8_t *)secbits) + off,
+ SYNCOOKIE_SECRET_SIZE * sizeof(*secbits) - off);
+ MD5Update(&ctx, secbits, off);
+ MD5Update(&ctx, &sc->sc_inc, sizeof(sc->sc_inc));
+ MD5Update(&ctx, &sc->sc_irs, sizeof(sc->sc_irs));
+ MD5Update(&ctx, &data, sizeof(data));
+ MD5Final((u_int8_t *)&md5_buffer, &ctx);
+
+ data |= (md5_buffer[0] << 7);
+ sc->sc_iss = data;
+
+#ifdef INET6
+ *flowlabel = md5_buffer[1] & IPV6_FLOWLABEL_MASK;
+#endif
+
+ /* Additional parameters are stored in the timestamp if present. */
+ if (sc->sc_flags & SCF_TIMESTAMP) {
+ data = ((sc->sc_flags & SCF_SIGNATURE) ? 1 : 0); /* TCP-MD5, 1 bit */
+ data |= ((sc->sc_flags & SCF_SACK) ? 1 : 0) << 1; /* SACK, 1 bit */
+ data |= sc->sc_requested_s_scale << 2; /* SWIN scale, 4 bits */
+ data |= sc->sc_requested_r_scale << 6; /* RWIN scale, 4 bits */
+ data |= md5_buffer[2] << 10; /* more digest bits */
+ data ^= md5_buffer[3];
+ sc->sc_ts = data;
+ sc->sc_tsoff = data - ticks; /* after XOR */
+ }
+
+ TCPSTAT_INC(tcps_sc_sendcookie);
+}
+
+static struct syncache *
+syncookie_lookup(struct in_conninfo *inc, struct syncache_head *sch,
+ struct syncache *sc, struct tcpopt *to, struct tcphdr *th,
+ struct socket *so)
+{
+ MD5_CTX ctx;
+ u_int32_t md5_buffer[MD5_DIGEST_LENGTH / sizeof(u_int32_t)];
+ u_int32_t data = 0;
+ u_int32_t *secbits;
+ tcp_seq ack, seq;
+ int off, mss, wnd, flags;
+
+ SCH_LOCK_ASSERT(sch);
+
+ /*
+ * Pull information out of SYN-ACK/ACK and
+ * revert sequence number advances.
+ */
+ ack = th->th_ack - 1;
+ seq = th->th_seq - 1;
+ off = (ack >> 1) & 0x7;
+ mss = (ack >> 4) & 0x7;
+ flags = ack & 0x7f;
+
+ /* Which of the two secrets to use. */
+ secbits = (flags & 0x1) ? sch->sch_secbits_odd : sch->sch_secbits_even;
+
+ /*
+ * The secret wasn't updated for the lifetime of a syncookie,
+ * so this SYN-ACK/ACK is either too old (replay) or totally bogus.
+ */
+ if (sch->sch_reseed + SYNCOOKIE_LIFETIME < time_uptime) {
+ return (NULL);
+ }
+
+ /* Recompute the digest so we can compare it. */
+ MD5Init(&ctx);
+ MD5Update(&ctx, ((u_int8_t *)secbits) + off,
+ SYNCOOKIE_SECRET_SIZE * sizeof(*secbits) - off);
+ MD5Update(&ctx, secbits, off);
+ MD5Update(&ctx, inc, sizeof(*inc));
+ MD5Update(&ctx, &seq, sizeof(seq));
+ MD5Update(&ctx, &flags, sizeof(flags));
+ MD5Final((u_int8_t *)&md5_buffer, &ctx);
+
+ /* Does the digest part of or ACK'ed ISS match? */
+ if ((ack & (~0x7f)) != (md5_buffer[0] << 7))
+ return (NULL);
+
+ /* Does the digest part of our reflected timestamp match? */
+ if (to->to_flags & TOF_TS) {
+ data = md5_buffer[3] ^ to->to_tsecr;
+ if ((data & (~0x3ff)) != (md5_buffer[2] << 10))
+ return (NULL);
+ }
+
+ /* Fill in the syncache values. */
+ bcopy(inc, &sc->sc_inc, sizeof(struct in_conninfo));
+ sc->sc_ipopts = NULL;
+
+ sc->sc_irs = seq;
+ sc->sc_iss = ack;
+
+#ifdef INET6
+ if (inc->inc_flags & INC_ISIPV6) {
+ if (sotoinpcb(so)->inp_flags & IN6P_AUTOFLOWLABEL)
+ sc->sc_flowlabel = md5_buffer[1] & IPV6_FLOWLABEL_MASK;
+ } else
+#endif
+ {
+ sc->sc_ip_ttl = sotoinpcb(so)->inp_ip_ttl;
+ sc->sc_ip_tos = sotoinpcb(so)->inp_ip_tos;
+ }
+
+ /* Additional parameters that were encoded in the timestamp. */
+ if (data) {
+ sc->sc_flags |= SCF_TIMESTAMP;
+ sc->sc_tsreflect = to->to_tsval;
+ sc->sc_ts = to->to_tsecr;
+ sc->sc_tsoff = to->to_tsecr - ticks;
+ sc->sc_flags |= (data & 0x1) ? SCF_SIGNATURE : 0;
+ sc->sc_flags |= ((data >> 1) & 0x1) ? SCF_SACK : 0;
+ sc->sc_requested_s_scale = min((data >> 2) & 0xf,
+ TCP_MAX_WINSHIFT);
+ sc->sc_requested_r_scale = min((data >> 6) & 0xf,
+ TCP_MAX_WINSHIFT);
+ if (sc->sc_requested_s_scale || sc->sc_requested_r_scale)
+ sc->sc_flags |= SCF_WINSCALE;
+ } else
+ sc->sc_flags |= SCF_NOOPT;
+
+ wnd = sbspace(&so->so_rcv);
+ wnd = imax(wnd, 0);
+ wnd = imin(wnd, TCP_MAXWIN);
+ sc->sc_wnd = wnd;
+
+ sc->sc_rxmits = 0;
+ sc->sc_peer_mss = tcp_sc_msstab[mss];
+
+ TCPSTAT_INC(tcps_sc_recvcookie);
+ return (sc);
+}
+
+/*
+ * Returns the current number of syncache entries. This number
+ * will probably change before you get around to calling
+ * syncache_pcblist.
+ */
+
+int
+syncache_pcbcount(void)
+{
+ struct syncache_head *sch;
+ int count, i;
+
+ for (count = 0, i = 0; i < V_tcp_syncache.hashsize; i++) {
+ /* No need to lock for a read. */
+ sch = &V_tcp_syncache.hashbase[i];
+ count += sch->sch_length;
+ }
+ return count;
+}
+
+/*
+ * Exports the syncache entries to userland so that netstat can display
+ * them alongside the other sockets. This function is intended to be
+ * called only from tcp_pcblist.
+ *
+ * Due to concurrency on an active system, the number of pcbs exported
+ * may have no relation to max_pcbs. max_pcbs merely indicates the
+ * amount of space the caller allocated for this function to use.
+ */
+int
+syncache_pcblist(struct sysctl_req *req, int max_pcbs, int *pcbs_exported)
+{
+ struct xtcpcb xt;
+ struct syncache *sc;
+ struct syncache_head *sch;
+ int count, error, i;
+
+ for (count = 0, error = 0, i = 0; i < V_tcp_syncache.hashsize; i++) {
+ sch = &V_tcp_syncache.hashbase[i];
+ SCH_LOCK(sch);
+ TAILQ_FOREACH(sc, &sch->sch_bucket, sc_hash) {
+ if (count >= max_pcbs) {
+ SCH_UNLOCK(sch);
+ goto exit;
+ }
+ if (cr_cansee(req->td->td_ucred, sc->sc_cred) != 0)
+ continue;
+ bzero(&xt, sizeof(xt));
+ xt.xt_len = sizeof(xt);
+ if (sc->sc_inc.inc_flags & INC_ISIPV6)
+ xt.xt_inp.inp_vflag = INP_IPV6;
+ else
+ xt.xt_inp.inp_vflag = INP_IPV4;
+ bcopy(&sc->sc_inc, &xt.xt_inp.inp_inc, sizeof (struct in_conninfo));
+ xt.xt_tp.t_inpcb = &xt.xt_inp;
+ xt.xt_tp.t_state = TCPS_SYN_RECEIVED;
+ xt.xt_socket.xso_protocol = IPPROTO_TCP;
+ xt.xt_socket.xso_len = sizeof (struct xsocket);
+ xt.xt_socket.so_type = SOCK_STREAM;
+ xt.xt_socket.so_state = SS_ISCONNECTING;
+ error = SYSCTL_OUT(req, &xt, sizeof xt);
+ if (error) {
+ SCH_UNLOCK(sch);
+ goto exit;
+ }
+ count++;
+ }
+ SCH_UNLOCK(sch);
+ }
+exit:
+ *pcbs_exported = count;
+ return error;
+}
diff --git a/rtems/freebsd/netinet/tcp_syncache.h b/rtems/freebsd/netinet/tcp_syncache.h
new file mode 100644
index 00000000..96ba1535
--- /dev/null
+++ b/rtems/freebsd/netinet/tcp_syncache.h
@@ -0,0 +1,127 @@
+/*-
+ * Copyright (c) 1982, 1986, 1993, 1994, 1995
+ * The Regents of the University of California. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * @(#)tcp_var.h 8.4 (Berkeley) 5/24/95
+ * $FreeBSD$
+ */
+
+#ifndef _NETINET_TCP_SYNCACHE_HH_
+#define _NETINET_TCP_SYNCACHE_HH_
+#ifdef _KERNEL
+
+struct toeopt;
+
+void syncache_init(void);
+#ifdef VIMAGE
+void syncache_destroy(void);
+#endif
+void syncache_unreach(struct in_conninfo *, struct tcphdr *);
+int syncache_expand(struct in_conninfo *, struct tcpopt *,
+ struct tcphdr *, struct socket **, struct mbuf *);
+int tcp_offload_syncache_expand(struct in_conninfo *inc, struct toeopt *toeo,
+ struct tcphdr *th, struct socket **lsop, struct mbuf *m);
+void syncache_add(struct in_conninfo *, struct tcpopt *,
+ struct tcphdr *, struct inpcb *, struct socket **, struct mbuf *);
+void tcp_offload_syncache_add(struct in_conninfo *, struct toeopt *,
+ struct tcphdr *, struct inpcb *, struct socket **,
+ struct toe_usrreqs *tu, void *toepcb);
+
+void syncache_chkrst(struct in_conninfo *, struct tcphdr *);
+void syncache_badack(struct in_conninfo *);
+int syncache_pcbcount(void);
+int syncache_pcblist(struct sysctl_req *req, int max_pcbs, int *pcbs_exported);
+
+struct syncache {
+ TAILQ_ENTRY(syncache) sc_hash;
+ struct in_conninfo sc_inc; /* addresses */
+ int sc_rxttime; /* retransmit time */
+ u_int16_t sc_rxmits; /* retransmit counter */
+ u_int32_t sc_tsreflect; /* timestamp to reflect */
+ u_int32_t sc_ts; /* our timestamp to send */
+ u_int32_t sc_tsoff; /* ts offset w/ syncookies */
+ u_int32_t sc_flowlabel; /* IPv6 flowlabel */
+ tcp_seq sc_irs; /* seq from peer */
+ tcp_seq sc_iss; /* our ISS */
+ struct mbuf *sc_ipopts; /* source route */
+ u_int16_t sc_peer_mss; /* peer's MSS */
+ u_int16_t sc_wnd; /* advertised window */
+ u_int8_t sc_ip_ttl; /* IPv4 TTL */
+ u_int8_t sc_ip_tos; /* IPv4 TOS */
+ u_int8_t sc_requested_s_scale:4,
+ sc_requested_r_scale:4;
+ u_int16_t sc_flags;
+#ifndef TCP_OFFLOAD_DISABLE
+ struct toe_usrreqs *sc_tu; /* TOE operations */
+ void *sc_toepcb; /* TOE protocol block */
+#endif
+ struct label *sc_label; /* MAC label reference */
+ struct ucred *sc_cred; /* cred cache for jail checks */
+};
+
+/*
+ * Flags for the sc_flags field.
+ */
+#define SCF_NOOPT 0x01 /* no TCP options */
+#define SCF_WINSCALE 0x02 /* negotiated window scaling */
+#define SCF_TIMESTAMP 0x04 /* negotiated timestamps */
+ /* MSS is implicit */
+#define SCF_UNREACH 0x10 /* icmp unreachable received */
+#define SCF_SIGNATURE 0x20 /* send MD5 digests */
+#define SCF_SACK 0x80 /* send SACK option */
+#define SCF_ECN 0x100 /* send ECN setup packet */
+
+#define SYNCOOKIE_SECRET_SIZE 8 /* dwords */
+#define SYNCOOKIE_LIFETIME 16 /* seconds */
+
+struct syncache_head {
+ struct vnet *sch_vnet;
+ struct mtx sch_mtx;
+ TAILQ_HEAD(sch_head, syncache) sch_bucket;
+ struct callout sch_timer;
+ int sch_nextc;
+ u_int sch_length;
+ u_int sch_oddeven;
+ u_int32_t sch_secbits_odd[SYNCOOKIE_SECRET_SIZE];
+ u_int32_t sch_secbits_even[SYNCOOKIE_SECRET_SIZE];
+ u_int sch_reseed; /* time_uptime, seconds */
+};
+
+struct tcp_syncache {
+ struct syncache_head *hashbase;
+ uma_zone_t zone;
+ u_int hashsize;
+ u_int hashmask;
+ u_int bucket_limit;
+ u_int cache_count; /* XXX: unprotected */
+ u_int cache_limit;
+ u_int rexmt_limit;
+ u_int hash_secret;
+};
+
+#endif /* _KERNEL */
+#endif /* !_NETINET_TCP_SYNCACHE_HH_ */
diff --git a/rtems/freebsd/netinet/tcp_timer.c b/rtems/freebsd/netinet/tcp_timer.c
new file mode 100644
index 00000000..5cc8f6cd
--- /dev/null
+++ b/rtems/freebsd/netinet/tcp_timer.c
@@ -0,0 +1,660 @@
+#include <rtems/freebsd/machine/rtems-bsd-config.h>
+
+/*-
+ * Copyright (c) 1982, 1986, 1988, 1990, 1993, 1995
+ * The Regents of the University of California. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * @(#)tcp_timer.c 8.2 (Berkeley) 5/24/95
+ */
+
+#include <rtems/freebsd/sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <rtems/freebsd/local/opt_inet6.h>
+#include <rtems/freebsd/local/opt_tcpdebug.h>
+
+#include <rtems/freebsd/sys/param.h>
+#include <rtems/freebsd/sys/kernel.h>
+#include <rtems/freebsd/sys/lock.h>
+#include <rtems/freebsd/sys/mbuf.h>
+#include <rtems/freebsd/sys/mutex.h>
+#include <rtems/freebsd/sys/protosw.h>
+#include <rtems/freebsd/sys/socket.h>
+#include <rtems/freebsd/sys/socketvar.h>
+#include <rtems/freebsd/sys/sysctl.h>
+#include <rtems/freebsd/sys/systm.h>
+
+#include <rtems/freebsd/net/if.h>
+#include <rtems/freebsd/net/route.h>
+#include <rtems/freebsd/net/vnet.h>
+
+#include <rtems/freebsd/netinet/in.h>
+#include <rtems/freebsd/netinet/in_pcb.h>
+#include <rtems/freebsd/netinet/in_systm.h>
+#ifdef INET6
+#include <rtems/freebsd/netinet6/in6_pcb.h>
+#endif
+#include <rtems/freebsd/netinet/ip_var.h>
+#include <rtems/freebsd/netinet/tcp.h>
+#include <rtems/freebsd/netinet/tcp_fsm.h>
+#include <rtems/freebsd/netinet/tcp_timer.h>
+#include <rtems/freebsd/netinet/tcp_var.h>
+#include <rtems/freebsd/netinet/tcpip.h>
+#ifdef TCPDEBUG
+#include <rtems/freebsd/netinet/tcp_debug.h>
+#endif
+
+int tcp_keepinit;
+SYSCTL_PROC(_net_inet_tcp, TCPCTL_KEEPINIT, keepinit, CTLTYPE_INT|CTLFLAG_RW,
+ &tcp_keepinit, 0, sysctl_msec_to_ticks, "I", "time to establish connection");
+
+int tcp_keepidle;
+SYSCTL_PROC(_net_inet_tcp, TCPCTL_KEEPIDLE, keepidle, CTLTYPE_INT|CTLFLAG_RW,
+ &tcp_keepidle, 0, sysctl_msec_to_ticks, "I", "time before keepalive probes begin");
+
+int tcp_keepintvl;
+SYSCTL_PROC(_net_inet_tcp, TCPCTL_KEEPINTVL, keepintvl, CTLTYPE_INT|CTLFLAG_RW,
+ &tcp_keepintvl, 0, sysctl_msec_to_ticks, "I", "time between keepalive probes");
+
+int tcp_delacktime;
+SYSCTL_PROC(_net_inet_tcp, TCPCTL_DELACKTIME, delacktime, CTLTYPE_INT|CTLFLAG_RW,
+ &tcp_delacktime, 0, sysctl_msec_to_ticks, "I",
+ "Time before a delayed ACK is sent");
+
+int tcp_msl;
+SYSCTL_PROC(_net_inet_tcp, OID_AUTO, msl, CTLTYPE_INT|CTLFLAG_RW,
+ &tcp_msl, 0, sysctl_msec_to_ticks, "I", "Maximum segment lifetime");
+
+int tcp_rexmit_min;
+SYSCTL_PROC(_net_inet_tcp, OID_AUTO, rexmit_min, CTLTYPE_INT|CTLFLAG_RW,
+ &tcp_rexmit_min, 0, sysctl_msec_to_ticks, "I",
+ "Minimum Retransmission Timeout");
+
+int tcp_rexmit_slop;
+SYSCTL_PROC(_net_inet_tcp, OID_AUTO, rexmit_slop, CTLTYPE_INT|CTLFLAG_RW,
+ &tcp_rexmit_slop, 0, sysctl_msec_to_ticks, "I",
+ "Retransmission Timer Slop");
+
+static int always_keepalive = 1;
+SYSCTL_INT(_net_inet_tcp, OID_AUTO, always_keepalive, CTLFLAG_RW,
+ &always_keepalive , 0, "Assume SO_KEEPALIVE on all TCP connections");
+
+int tcp_fast_finwait2_recycle = 0;
+SYSCTL_INT(_net_inet_tcp, OID_AUTO, fast_finwait2_recycle, CTLFLAG_RW,
+ &tcp_fast_finwait2_recycle, 0,
+ "Recycle closed FIN_WAIT_2 connections faster");
+
+int tcp_finwait2_timeout;
+SYSCTL_PROC(_net_inet_tcp, OID_AUTO, finwait2_timeout, CTLTYPE_INT|CTLFLAG_RW,
+ &tcp_finwait2_timeout, 0, sysctl_msec_to_ticks, "I", "FIN-WAIT2 timeout");
+
+
+static int tcp_keepcnt = TCPTV_KEEPCNT;
+ /* max idle probes */
+int tcp_maxpersistidle;
+ /* max idle time in persist */
+int tcp_maxidle;
+
+/*
+ * Tcp protocol timeout routine called every 500 ms.
+ * Updates timestamps used for TCP
+ * causes finite state machine actions if timers expire.
+ */
+void
+tcp_slowtimo(void)
+{
+ VNET_ITERATOR_DECL(vnet_iter);
+
+ VNET_LIST_RLOCK_NOSLEEP();
+ VNET_FOREACH(vnet_iter) {
+ CURVNET_SET(vnet_iter);
+ tcp_maxidle = tcp_keepcnt * tcp_keepintvl;
+ INP_INFO_WLOCK(&V_tcbinfo);
+ (void) tcp_tw_2msl_scan(0);
+ INP_INFO_WUNLOCK(&V_tcbinfo);
+ CURVNET_RESTORE();
+ }
+ VNET_LIST_RUNLOCK_NOSLEEP();
+}
+
+int tcp_syn_backoff[TCP_MAXRXTSHIFT + 1] =
+ { 1, 1, 1, 1, 1, 2, 4, 8, 16, 32, 64, 64, 64 };
+
+int tcp_backoff[TCP_MAXRXTSHIFT + 1] =
+ { 1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 512, 512, 512 };
+
+static int tcp_totbackoff = 2559; /* sum of tcp_backoff[] */
+
+static int tcp_timer_race;
+SYSCTL_INT(_net_inet_tcp, OID_AUTO, timer_race, CTLFLAG_RD, &tcp_timer_race,
+ 0, "Count of t_inpcb races on tcp_discardcb");
+
+/*
+ * TCP timer processing.
+ */
+
+void
+tcp_timer_delack(void *xtp)
+{
+ struct tcpcb *tp = xtp;
+ struct inpcb *inp;
+ CURVNET_SET(tp->t_vnet);
+
+ inp = tp->t_inpcb;
+ /*
+ * XXXRW: While this assert is in fact correct, bugs in the tcpcb
+ * tear-down mean we need it as a work-around for races between
+ * timers and tcp_discardcb().
+ *
+ * KASSERT(inp != NULL, ("tcp_timer_delack: inp == NULL"));
+ */
+ if (inp == NULL) {
+ tcp_timer_race++;
+ CURVNET_RESTORE();
+ return;
+ }
+ INP_WLOCK(inp);
+ if ((inp->inp_flags & INP_DROPPED) || callout_pending(&tp->t_timers->tt_delack)
+ || !callout_active(&tp->t_timers->tt_delack)) {
+ INP_WUNLOCK(inp);
+ CURVNET_RESTORE();
+ return;
+ }
+ callout_deactivate(&tp->t_timers->tt_delack);
+
+ tp->t_flags |= TF_ACKNOW;
+ TCPSTAT_INC(tcps_delack);
+ (void) tcp_output(tp);
+ INP_WUNLOCK(inp);
+ CURVNET_RESTORE();
+}
+
+void
+tcp_timer_2msl(void *xtp)
+{
+ struct tcpcb *tp = xtp;
+ struct inpcb *inp;
+ CURVNET_SET(tp->t_vnet);
+#ifdef TCPDEBUG
+ int ostate;
+
+ ostate = tp->t_state;
+#endif
+ /*
+ * XXXRW: Does this actually happen?
+ */
+ INP_INFO_WLOCK(&V_tcbinfo);
+ inp = tp->t_inpcb;
+ /*
+ * XXXRW: While this assert is in fact correct, bugs in the tcpcb
+ * tear-down mean we need it as a work-around for races between
+ * timers and tcp_discardcb().
+ *
+ * KASSERT(inp != NULL, ("tcp_timer_2msl: inp == NULL"));
+ */
+ if (inp == NULL) {
+ tcp_timer_race++;
+ INP_INFO_WUNLOCK(&V_tcbinfo);
+ CURVNET_RESTORE();
+ return;
+ }
+ INP_WLOCK(inp);
+ tcp_free_sackholes(tp);
+ if ((inp->inp_flags & INP_DROPPED) || callout_pending(&tp->t_timers->tt_2msl) ||
+ !callout_active(&tp->t_timers->tt_2msl)) {
+ INP_WUNLOCK(tp->t_inpcb);
+ INP_INFO_WUNLOCK(&V_tcbinfo);
+ CURVNET_RESTORE();
+ return;
+ }
+ callout_deactivate(&tp->t_timers->tt_2msl);
+ /*
+ * 2 MSL timeout in shutdown went off. If we're closed but
+ * still waiting for peer to close and connection has been idle
+ * too long, or if 2MSL time is up from TIME_WAIT, delete connection
+ * control block. Otherwise, check again in a bit.
+ *
+ * If fastrecycle of FIN_WAIT_2, in FIN_WAIT_2 and receiver has closed,
+ * there's no point in hanging onto FIN_WAIT_2 socket. Just close it.
+ * Ignore fact that there were recent incoming segments.
+ */
+ if (tcp_fast_finwait2_recycle && tp->t_state == TCPS_FIN_WAIT_2 &&
+ tp->t_inpcb && tp->t_inpcb->inp_socket &&
+ (tp->t_inpcb->inp_socket->so_rcv.sb_state & SBS_CANTRCVMORE)) {
+ TCPSTAT_INC(tcps_finwait2_drops);
+ tp = tcp_close(tp);
+ } else {
+ if (tp->t_state != TCPS_TIME_WAIT &&
+ ticks - tp->t_rcvtime <= tcp_maxidle)
+ callout_reset(&tp->t_timers->tt_2msl, tcp_keepintvl,
+ tcp_timer_2msl, tp);
+ else
+ tp = tcp_close(tp);
+ }
+
+#ifdef TCPDEBUG
+ if (tp != NULL && (tp->t_inpcb->inp_socket->so_options & SO_DEBUG))
+ tcp_trace(TA_USER, ostate, tp, (void *)0, (struct tcphdr *)0,
+ PRU_SLOWTIMO);
+#endif
+ if (tp != NULL)
+ INP_WUNLOCK(inp);
+ INP_INFO_WUNLOCK(&V_tcbinfo);
+ CURVNET_RESTORE();
+}
+
+void
+tcp_timer_keep(void *xtp)
+{
+ struct tcpcb *tp = xtp;
+ struct tcptemp *t_template;
+ struct inpcb *inp;
+ CURVNET_SET(tp->t_vnet);
+#ifdef TCPDEBUG
+ int ostate;
+
+ ostate = tp->t_state;
+#endif
+ INP_INFO_WLOCK(&V_tcbinfo);
+ inp = tp->t_inpcb;
+ /*
+ * XXXRW: While this assert is in fact correct, bugs in the tcpcb
+ * tear-down mean we need it as a work-around for races between
+ * timers and tcp_discardcb().
+ *
+ * KASSERT(inp != NULL, ("tcp_timer_keep: inp == NULL"));
+ */
+ if (inp == NULL) {
+ tcp_timer_race++;
+ INP_INFO_WUNLOCK(&V_tcbinfo);
+ CURVNET_RESTORE();
+ return;
+ }
+ INP_WLOCK(inp);
+ if ((inp->inp_flags & INP_DROPPED) || callout_pending(&tp->t_timers->tt_keep)
+ || !callout_active(&tp->t_timers->tt_keep)) {
+ INP_WUNLOCK(inp);
+ INP_INFO_WUNLOCK(&V_tcbinfo);
+ CURVNET_RESTORE();
+ return;
+ }
+ callout_deactivate(&tp->t_timers->tt_keep);
+ /*
+ * Keep-alive timer went off; send something
+ * or drop connection if idle for too long.
+ */
+ TCPSTAT_INC(tcps_keeptimeo);
+ if (tp->t_state < TCPS_ESTABLISHED)
+ goto dropit;
+ if ((always_keepalive || inp->inp_socket->so_options & SO_KEEPALIVE) &&
+ tp->t_state <= TCPS_CLOSING) {
+ if (ticks - tp->t_rcvtime >= tcp_keepidle + tcp_maxidle)
+ goto dropit;
+ /*
+ * Send a packet designed to force a response
+ * if the peer is up and reachable:
+ * either an ACK if the connection is still alive,
+ * or an RST if the peer has closed the connection
+ * due to timeout or reboot.
+ * Using sequence number tp->snd_una-1
+ * causes the transmitted zero-length segment
+ * to lie outside the receive window;
+ * by the protocol spec, this requires the
+ * correspondent TCP to respond.
+ */
+ TCPSTAT_INC(tcps_keepprobe);
+ t_template = tcpip_maketemplate(inp);
+ if (t_template) {
+ tcp_respond(tp, t_template->tt_ipgen,
+ &t_template->tt_t, (struct mbuf *)NULL,
+ tp->rcv_nxt, tp->snd_una - 1, 0);
+ free(t_template, M_TEMP);
+ }
+ callout_reset(&tp->t_timers->tt_keep, tcp_keepintvl, tcp_timer_keep, tp);
+ } else
+ callout_reset(&tp->t_timers->tt_keep, tcp_keepidle, tcp_timer_keep, tp);
+
+#ifdef TCPDEBUG
+ if (inp->inp_socket->so_options & SO_DEBUG)
+ tcp_trace(TA_USER, ostate, tp, (void *)0, (struct tcphdr *)0,
+ PRU_SLOWTIMO);
+#endif
+ INP_WUNLOCK(inp);
+ INP_INFO_WUNLOCK(&V_tcbinfo);
+ CURVNET_RESTORE();
+ return;
+
+dropit:
+ TCPSTAT_INC(tcps_keepdrops);
+ tp = tcp_drop(tp, ETIMEDOUT);
+
+#ifdef TCPDEBUG
+ if (tp != NULL && (tp->t_inpcb->inp_socket->so_options & SO_DEBUG))
+ tcp_trace(TA_USER, ostate, tp, (void *)0, (struct tcphdr *)0,
+ PRU_SLOWTIMO);
+#endif
+ if (tp != NULL)
+ INP_WUNLOCK(tp->t_inpcb);
+ INP_INFO_WUNLOCK(&V_tcbinfo);
+ CURVNET_RESTORE();
+}
+
+void
+tcp_timer_persist(void *xtp)
+{
+ struct tcpcb *tp = xtp;
+ struct inpcb *inp;
+ CURVNET_SET(tp->t_vnet);
+#ifdef TCPDEBUG
+ int ostate;
+
+ ostate = tp->t_state;
+#endif
+ INP_INFO_WLOCK(&V_tcbinfo);
+ inp = tp->t_inpcb;
+ /*
+ * XXXRW: While this assert is in fact correct, bugs in the tcpcb
+ * tear-down mean we need it as a work-around for races between
+ * timers and tcp_discardcb().
+ *
+ * KASSERT(inp != NULL, ("tcp_timer_persist: inp == NULL"));
+ */
+ if (inp == NULL) {
+ tcp_timer_race++;
+ INP_INFO_WUNLOCK(&V_tcbinfo);
+ CURVNET_RESTORE();
+ return;
+ }
+ INP_WLOCK(inp);
+ if ((inp->inp_flags & INP_DROPPED) || callout_pending(&tp->t_timers->tt_persist)
+ || !callout_active(&tp->t_timers->tt_persist)) {
+ INP_WUNLOCK(inp);
+ INP_INFO_WUNLOCK(&V_tcbinfo);
+ CURVNET_RESTORE();
+ return;
+ }
+ callout_deactivate(&tp->t_timers->tt_persist);
+ /*
+ * Persistance timer into zero window.
+ * Force a byte to be output, if possible.
+ */
+ TCPSTAT_INC(tcps_persisttimeo);
+ /*
+ * Hack: if the peer is dead/unreachable, we do not
+ * time out if the window is closed. After a full
+ * backoff, drop the connection if the idle time
+ * (no responses to probes) reaches the maximum
+ * backoff that we would use if retransmitting.
+ */
+ if (tp->t_rxtshift == TCP_MAXRXTSHIFT &&
+ (ticks - tp->t_rcvtime >= tcp_maxpersistidle ||
+ ticks - tp->t_rcvtime >= TCP_REXMTVAL(tp) * tcp_totbackoff)) {
+ TCPSTAT_INC(tcps_persistdrop);
+ tp = tcp_drop(tp, ETIMEDOUT);
+ goto out;
+ }
+ tcp_setpersist(tp);
+ tp->t_flags |= TF_FORCEDATA;
+ (void) tcp_output(tp);
+ tp->t_flags &= ~TF_FORCEDATA;
+
+out:
+#ifdef TCPDEBUG
+ if (tp != NULL && tp->t_inpcb->inp_socket->so_options & SO_DEBUG)
+ tcp_trace(TA_USER, ostate, tp, NULL, NULL, PRU_SLOWTIMO);
+#endif
+ if (tp != NULL)
+ INP_WUNLOCK(inp);
+ INP_INFO_WUNLOCK(&V_tcbinfo);
+ CURVNET_RESTORE();
+}
+
+void
+tcp_timer_rexmt(void * xtp)
+{
+ struct tcpcb *tp = xtp;
+ CURVNET_SET(tp->t_vnet);
+ int rexmt;
+ int headlocked;
+ struct inpcb *inp;
+#ifdef TCPDEBUG
+ int ostate;
+
+ ostate = tp->t_state;
+#endif
+ INP_INFO_WLOCK(&V_tcbinfo);
+ headlocked = 1;
+ inp = tp->t_inpcb;
+ /*
+ * XXXRW: While this assert is in fact correct, bugs in the tcpcb
+ * tear-down mean we need it as a work-around for races between
+ * timers and tcp_discardcb().
+ *
+ * KASSERT(inp != NULL, ("tcp_timer_rexmt: inp == NULL"));
+ */
+ if (inp == NULL) {
+ tcp_timer_race++;
+ INP_INFO_WUNLOCK(&V_tcbinfo);
+ CURVNET_RESTORE();
+ return;
+ }
+ INP_WLOCK(inp);
+ if ((inp->inp_flags & INP_DROPPED) || callout_pending(&tp->t_timers->tt_rexmt)
+ || !callout_active(&tp->t_timers->tt_rexmt)) {
+ INP_WUNLOCK(inp);
+ INP_INFO_WUNLOCK(&V_tcbinfo);
+ CURVNET_RESTORE();
+ return;
+ }
+ callout_deactivate(&tp->t_timers->tt_rexmt);
+ tcp_free_sackholes(tp);
+ /*
+ * Retransmission timer went off. Message has not
+ * been acked within retransmit interval. Back off
+ * to a longer retransmit interval and retransmit one segment.
+ */
+ if (++tp->t_rxtshift > TCP_MAXRXTSHIFT) {
+ tp->t_rxtshift = TCP_MAXRXTSHIFT;
+ TCPSTAT_INC(tcps_timeoutdrop);
+ tp = tcp_drop(tp, tp->t_softerror ?
+ tp->t_softerror : ETIMEDOUT);
+ goto out;
+ }
+ INP_INFO_WUNLOCK(&V_tcbinfo);
+ headlocked = 0;
+ if (tp->t_rxtshift == 1) {
+ /*
+ * first retransmit; record ssthresh and cwnd so they can
+ * be recovered if this turns out to be a "bad" retransmit.
+ * A retransmit is considered "bad" if an ACK for this
+ * segment is received within RTT/2 interval; the assumption
+ * here is that the ACK was already in flight. See
+ * "On Estimating End-to-End Network Path Properties" by
+ * Allman and Paxson for more details.
+ */
+ tp->snd_cwnd_prev = tp->snd_cwnd;
+ tp->snd_ssthresh_prev = tp->snd_ssthresh;
+ tp->snd_recover_prev = tp->snd_recover;
+ if (IN_FASTRECOVERY(tp))
+ tp->t_flags |= TF_WASFRECOVERY;
+ else
+ tp->t_flags &= ~TF_WASFRECOVERY;
+ tp->t_badrxtwin = ticks + (tp->t_srtt >> (TCP_RTT_SHIFT + 1));
+ }
+ TCPSTAT_INC(tcps_rexmttimeo);
+ if (tp->t_state == TCPS_SYN_SENT)
+ rexmt = TCP_REXMTVAL(tp) * tcp_syn_backoff[tp->t_rxtshift];
+ else
+ rexmt = TCP_REXMTVAL(tp) * tcp_backoff[tp->t_rxtshift];
+ TCPT_RANGESET(tp->t_rxtcur, rexmt,
+ tp->t_rttmin, TCPTV_REXMTMAX);
+ /*
+ * Disable rfc1323 if we havn't got any response to
+ * our third SYN to work-around some broken terminal servers
+ * (most of which have hopefully been retired) that have bad VJ
+ * header compression code which trashes TCP segments containing
+ * unknown-to-them TCP options.
+ */
+ if ((tp->t_state == TCPS_SYN_SENT) && (tp->t_rxtshift == 3))
+ tp->t_flags &= ~(TF_REQ_SCALE|TF_REQ_TSTMP);
+ /*
+ * If we backed off this far, our srtt estimate is probably bogus.
+ * Clobber it so we'll take the next rtt measurement as our srtt;
+ * move the current srtt into rttvar to keep the current
+ * retransmit times until then.
+ */
+ if (tp->t_rxtshift > TCP_MAXRXTSHIFT / 4) {
+#ifdef INET6
+ if ((tp->t_inpcb->inp_vflag & INP_IPV6) != 0)
+ in6_losing(tp->t_inpcb);
+ else
+#endif
+ tp->t_rttvar += (tp->t_srtt >> TCP_RTT_SHIFT);
+ tp->t_srtt = 0;
+ }
+ tp->snd_nxt = tp->snd_una;
+ tp->snd_recover = tp->snd_max;
+ /*
+ * Force a segment to be sent.
+ */
+ tp->t_flags |= TF_ACKNOW;
+ /*
+ * If timing a segment in this window, stop the timer.
+ */
+ tp->t_rtttime = 0;
+ /*
+ * Close the congestion window down to one segment
+ * (we'll open it by one segment for each ack we get).
+ * Since we probably have a window's worth of unacked
+ * data accumulated, this "slow start" keeps us from
+ * dumping all that data as back-to-back packets (which
+ * might overwhelm an intermediate gateway).
+ *
+ * There are two phases to the opening: Initially we
+ * open by one mss on each ack. This makes the window
+ * size increase exponentially with time. If the
+ * window is larger than the path can handle, this
+ * exponential growth results in dropped packet(s)
+ * almost immediately. To get more time between
+ * drops but still "push" the network to take advantage
+ * of improving conditions, we switch from exponential
+ * to linear window opening at some threshhold size.
+ * For a threshhold, we use half the current window
+ * size, truncated to a multiple of the mss.
+ *
+ * (the minimum cwnd that will give us exponential
+ * growth is 2 mss. We don't allow the threshhold
+ * to go below this.)
+ */
+ {
+ u_int win = min(tp->snd_wnd, tp->snd_cwnd) / 2 / tp->t_maxseg;
+ if (win < 2)
+ win = 2;
+ tp->snd_cwnd = tp->t_maxseg;
+ tp->snd_ssthresh = win * tp->t_maxseg;
+ tp->t_dupacks = 0;
+ }
+ EXIT_FASTRECOVERY(tp);
+ tp->t_bytes_acked = 0;
+ (void) tcp_output(tp);
+
+out:
+#ifdef TCPDEBUG
+ if (tp != NULL && (tp->t_inpcb->inp_socket->so_options & SO_DEBUG))
+ tcp_trace(TA_USER, ostate, tp, (void *)0, (struct tcphdr *)0,
+ PRU_SLOWTIMO);
+#endif
+ if (tp != NULL)
+ INP_WUNLOCK(inp);
+ if (headlocked)
+ INP_INFO_WUNLOCK(&V_tcbinfo);
+ CURVNET_RESTORE();
+}
+
+void
+tcp_timer_activate(struct tcpcb *tp, int timer_type, u_int delta)
+{
+ struct callout *t_callout;
+ void *f_callout;
+
+ switch (timer_type) {
+ case TT_DELACK:
+ t_callout = &tp->t_timers->tt_delack;
+ f_callout = tcp_timer_delack;
+ break;
+ case TT_REXMT:
+ t_callout = &tp->t_timers->tt_rexmt;
+ f_callout = tcp_timer_rexmt;
+ break;
+ case TT_PERSIST:
+ t_callout = &tp->t_timers->tt_persist;
+ f_callout = tcp_timer_persist;
+ break;
+ case TT_KEEP:
+ t_callout = &tp->t_timers->tt_keep;
+ f_callout = tcp_timer_keep;
+ break;
+ case TT_2MSL:
+ t_callout = &tp->t_timers->tt_2msl;
+ f_callout = tcp_timer_2msl;
+ break;
+ default:
+ panic("bad timer_type");
+ }
+ if (delta == 0) {
+ callout_stop(t_callout);
+ } else {
+ callout_reset(t_callout, delta, f_callout, tp);
+ }
+}
+
+int
+tcp_timer_active(struct tcpcb *tp, int timer_type)
+{
+ struct callout *t_callout;
+
+ switch (timer_type) {
+ case TT_DELACK:
+ t_callout = &tp->t_timers->tt_delack;
+ break;
+ case TT_REXMT:
+ t_callout = &tp->t_timers->tt_rexmt;
+ break;
+ case TT_PERSIST:
+ t_callout = &tp->t_timers->tt_persist;
+ break;
+ case TT_KEEP:
+ t_callout = &tp->t_timers->tt_keep;
+ break;
+ case TT_2MSL:
+ t_callout = &tp->t_timers->tt_2msl;
+ break;
+ default:
+ panic("bad timer_type");
+ }
+ return callout_active(t_callout);
+}
diff --git a/rtems/freebsd/netinet/tcp_timer.h b/rtems/freebsd/netinet/tcp_timer.h
new file mode 100644
index 00000000..1514a293
--- /dev/null
+++ b/rtems/freebsd/netinet/tcp_timer.h
@@ -0,0 +1,183 @@
+/*-
+ * Copyright (c) 1982, 1986, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * @(#)tcp_timer.h 8.1 (Berkeley) 6/10/93
+ * $FreeBSD$
+ */
+
+#ifndef _NETINET_TCP_TIMER_HH_
+#define _NETINET_TCP_TIMER_HH_
+
+/*
+ * The TCPT_REXMT timer is used to force retransmissions.
+ * The TCP has the TCPT_REXMT timer set whenever segments
+ * have been sent for which ACKs are expected but not yet
+ * received. If an ACK is received which advances tp->snd_una,
+ * then the retransmit timer is cleared (if there are no more
+ * outstanding segments) or reset to the base value (if there
+ * are more ACKs expected). Whenever the retransmit timer goes off,
+ * we retransmit one unacknowledged segment, and do a backoff
+ * on the retransmit timer.
+ *
+ * The TCPT_PERSIST timer is used to keep window size information
+ * flowing even if the window goes shut. If all previous transmissions
+ * have been acknowledged (so that there are no retransmissions in progress),
+ * and the window is too small to bother sending anything, then we start
+ * the TCPT_PERSIST timer. When it expires, if the window is nonzero,
+ * we go to transmit state. Otherwise, at intervals send a single byte
+ * into the peer's window to force him to update our window information.
+ * We do this at most as often as TCPT_PERSMIN time intervals,
+ * but no more frequently than the current estimate of round-trip
+ * packet time. The TCPT_PERSIST timer is cleared whenever we receive
+ * a window update from the peer.
+ *
+ * The TCPT_KEEP timer is used to keep connections alive. If an
+ * connection is idle (no segments received) for TCPTV_KEEP_INIT amount of time,
+ * but not yet established, then we drop the connection. Once the connection
+ * is established, if the connection is idle for TCPTV_KEEP_IDLE time
+ * (and keepalives have been enabled on the socket), we begin to probe
+ * the connection. We force the peer to send us a segment by sending:
+ * <SEQ=SND.UNA-1><ACK=RCV.NXT><CTL=ACK>
+ * This segment is (deliberately) outside the window, and should elicit
+ * an ack segment in response from the peer. If, despite the TCPT_KEEP
+ * initiated segments we cannot elicit a response from a peer in TCPT_MAXIDLE
+ * amount of time probing, then we drop the connection.
+ */
+
+/*
+ * Time constants.
+ */
+#define TCPTV_MSL ( 30*hz) /* max seg lifetime (hah!) */
+#define TCPTV_SRTTBASE 0 /* base roundtrip time;
+ if 0, no idea yet */
+#define TCPTV_RTOBASE ( 3*hz) /* assumed RTO if no info */
+#define TCPTV_SRTTDFLT ( 3*hz) /* assumed RTT if no info */
+
+#define TCPTV_PERSMIN ( 5*hz) /* retransmit persistence */
+#define TCPTV_PERSMAX ( 60*hz) /* maximum persist interval */
+
+#define TCPTV_KEEP_INIT ( 75*hz) /* initial connect keepalive */
+#define TCPTV_KEEP_IDLE (120*60*hz) /* dflt time before probing */
+#define TCPTV_KEEPINTVL ( 75*hz) /* default probe interval */
+#define TCPTV_KEEPCNT 8 /* max probes before drop */
+
+#define TCPTV_INFLIGHT_RTTTHRESH (10*hz/1000) /* below which inflight
+ disengages, in msec */
+
+#define TCPTV_FINWAIT2_TIMEOUT (60*hz) /* FIN_WAIT_2 timeout if no receiver */
+
+/*
+ * Minimum retransmit timer is 3 ticks, for algorithmic stability.
+ * TCPT_RANGESET() will add another TCPTV_CPU_VAR to deal with
+ * the expected worst-case processing variances by the kernels
+ * representing the end points. Such variances do not always show
+ * up in the srtt because the timestamp is often calculated at
+ * the interface rather then at the TCP layer. This value is
+ * typically 50ms. However, it is also possible that delayed
+ * acks (typically 100ms) could create issues so we set the slop
+ * to 200ms to try to cover it. Note that, properly speaking,
+ * delayed-acks should not create a major issue for interactive
+ * environments which 'P'ush the last segment, at least as
+ * long as implementations do the required 'at least one ack
+ * for every two packets' for the non-interactive streaming case.
+ * (maybe the RTO calculation should use 2*RTT instead of RTT
+ * to handle the ack-every-other-packet case).
+ *
+ * The prior minimum of 1*hz (1 second) badly breaks throughput on any
+ * networks faster then a modem that has minor (e.g. 1%) packet loss.
+ */
+#define TCPTV_MIN ( hz/33 ) /* minimum allowable value */
+#define TCPTV_CPU_VAR ( hz/5 ) /* cpu variance allowed (200ms) */
+#define TCPTV_REXMTMAX ( 64*hz) /* max allowable REXMT value */
+
+#define TCPTV_TWTRUNC 8 /* RTO factor to truncate TW */
+
+#define TCP_LINGERTIME 120 /* linger at most 2 minutes */
+
+#define TCP_MAXRXTSHIFT 12 /* maximum retransmits */
+
+#define TCPTV_DELACK (hz / PR_FASTHZ / 2) /* 100ms timeout */
+
+#ifdef TCPTIMERS
+static const char *tcptimers[] =
+ { "REXMT", "PERSIST", "KEEP", "2MSL" };
+#endif
+
+/*
+ * Force a time value to be in a certain range.
+ */
+#define TCPT_RANGESET(tv, value, tvmin, tvmax) do { \
+ (tv) = (value) + tcp_rexmit_slop; \
+ if ((u_long)(tv) < (u_long)(tvmin)) \
+ (tv) = (tvmin); \
+ if ((u_long)(tv) > (u_long)(tvmax)) \
+ (tv) = (tvmax); \
+} while(0)
+
+#ifdef _KERNEL
+
+struct tcp_timer {
+ struct callout tt_rexmt; /* retransmit timer */
+ struct callout tt_persist; /* retransmit persistence */
+ struct callout tt_keep; /* keepalive */
+ struct callout tt_2msl; /* 2*msl TIME_WAIT timer */
+ struct callout tt_delack; /* delayed ACK timer */
+};
+#define TT_DELACK 0x01
+#define TT_REXMT 0x02
+#define TT_PERSIST 0x04
+#define TT_KEEP 0x08
+#define TT_2MSL 0x10
+
+extern int tcp_keepinit; /* time to establish connection */
+extern int tcp_keepidle; /* time before keepalive probes begin */
+extern int tcp_keepintvl; /* time between keepalive probes */
+extern int tcp_maxidle; /* time to drop after starting probes */
+extern int tcp_delacktime; /* time before sending a delayed ACK */
+extern int tcp_maxpersistidle;
+extern int tcp_rexmit_min;
+extern int tcp_rexmit_slop;
+extern int tcp_msl;
+extern int tcp_ttl; /* time to live for TCP segs */
+extern int tcp_backoff[];
+
+extern int tcp_finwait2_timeout;
+extern int tcp_fast_finwait2_recycle;
+
+void tcp_timer_init(void);
+void tcp_timer_2msl(void *xtp);
+struct tcptw *
+ tcp_tw_2msl_scan(int _reuse); /* XXX temporary */
+void tcp_timer_keep(void *xtp);
+void tcp_timer_persist(void *xtp);
+void tcp_timer_rexmt(void *xtp);
+void tcp_timer_delack(void *xtp);
+
+#endif /* _KERNEL */
+
+#endif /* !_NETINET_TCP_TIMER_HH_ */
diff --git a/rtems/freebsd/netinet/tcp_timewait.c b/rtems/freebsd/netinet/tcp_timewait.c
new file mode 100644
index 00000000..d917ed2c
--- /dev/null
+++ b/rtems/freebsd/netinet/tcp_timewait.c
@@ -0,0 +1,618 @@
+#include <rtems/freebsd/machine/rtems-bsd-config.h>
+
+/*-
+ * Copyright (c) 1982, 1986, 1988, 1990, 1993, 1995
+ * The Regents of the University of California. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * @(#)tcp_subr.c 8.2 (Berkeley) 5/24/95
+ */
+
+#include <rtems/freebsd/sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <rtems/freebsd/local/opt_inet.h>
+#include <rtems/freebsd/local/opt_inet6.h>
+#include <rtems/freebsd/local/opt_tcpdebug.h>
+
+#include <rtems/freebsd/sys/param.h>
+#include <rtems/freebsd/sys/systm.h>
+#include <rtems/freebsd/sys/callout.h>
+#include <rtems/freebsd/sys/kernel.h>
+#include <rtems/freebsd/sys/sysctl.h>
+#include <rtems/freebsd/sys/malloc.h>
+#include <rtems/freebsd/sys/mbuf.h>
+#include <rtems/freebsd/sys/priv.h>
+#include <rtems/freebsd/sys/proc.h>
+#include <rtems/freebsd/sys/socket.h>
+#include <rtems/freebsd/sys/socketvar.h>
+#include <rtems/freebsd/sys/protosw.h>
+#include <rtems/freebsd/sys/random.h>
+
+#include <rtems/freebsd/vm/uma.h>
+
+#include <rtems/freebsd/net/route.h>
+#include <rtems/freebsd/net/if.h>
+#include <rtems/freebsd/net/vnet.h>
+
+#include <rtems/freebsd/netinet/in.h>
+#include <rtems/freebsd/netinet/in_systm.h>
+#include <rtems/freebsd/netinet/ip.h>
+#ifdef INET6
+#include <rtems/freebsd/netinet/ip6.h>
+#endif
+#include <rtems/freebsd/netinet/in_pcb.h>
+#ifdef INET6
+#include <rtems/freebsd/netinet6/in6_pcb.h>
+#endif
+#include <rtems/freebsd/netinet/in_var.h>
+#include <rtems/freebsd/netinet/ip_var.h>
+#ifdef INET6
+#include <rtems/freebsd/netinet6/ip6_var.h>
+#include <rtems/freebsd/netinet6/scope6_var.h>
+#include <rtems/freebsd/netinet6/nd6.h>
+#endif
+#include <rtems/freebsd/netinet/ip_icmp.h>
+#include <rtems/freebsd/netinet/tcp.h>
+#include <rtems/freebsd/netinet/tcp_fsm.h>
+#include <rtems/freebsd/netinet/tcp_seq.h>
+#include <rtems/freebsd/netinet/tcp_timer.h>
+#include <rtems/freebsd/netinet/tcp_var.h>
+#ifdef INET6
+#include <rtems/freebsd/netinet6/tcp6_var.h>
+#endif
+#include <rtems/freebsd/netinet/tcpip.h>
+#ifdef TCPDEBUG
+#include <rtems/freebsd/netinet/tcp_debug.h>
+#endif
+#include <rtems/freebsd/netinet6/ip6protosw.h>
+
+#include <rtems/freebsd/machine/in_cksum.h>
+
+#include <rtems/freebsd/security/mac/mac_framework.h>
+
+static VNET_DEFINE(uma_zone_t, tcptw_zone);
+#define V_tcptw_zone VNET(tcptw_zone)
+static int maxtcptw;
+
+/*
+ * The timed wait queue contains references to each of the TCP sessions
+ * currently in the TIME_WAIT state. The queue pointers, including the
+ * queue pointers in each tcptw structure, are protected using the global
+ * tcbinfo lock, which must be held over queue iteration and modification.
+ */
+static VNET_DEFINE(TAILQ_HEAD(, tcptw), twq_2msl);
+#define V_twq_2msl VNET(twq_2msl)
+
+static void tcp_tw_2msl_reset(struct tcptw *, int);
+static void tcp_tw_2msl_stop(struct tcptw *);
+
+static int
+tcptw_auto_size(void)
+{
+ int halfrange;
+
+ /*
+ * Max out at half the ephemeral port range so that TIME_WAIT
+ * sockets don't tie up too many ephemeral ports.
+ */
+ if (V_ipport_lastauto > V_ipport_firstauto)
+ halfrange = (V_ipport_lastauto - V_ipport_firstauto) / 2;
+ else
+ halfrange = (V_ipport_firstauto - V_ipport_lastauto) / 2;
+ /* Protect against goofy port ranges smaller than 32. */
+ return (imin(imax(halfrange, 32), maxsockets / 5));
+}
+
+static int
+sysctl_maxtcptw(SYSCTL_HANDLER_ARGS)
+{
+ int error, new;
+
+ if (maxtcptw == 0)
+ new = tcptw_auto_size();
+ else
+ new = maxtcptw;
+ error = sysctl_handle_int(oidp, &new, 0, req);
+ if (error == 0 && req->newptr)
+ if (new >= 32) {
+ maxtcptw = new;
+ uma_zone_set_max(V_tcptw_zone, maxtcptw);
+ }
+ return (error);
+}
+
+SYSCTL_PROC(_net_inet_tcp, OID_AUTO, maxtcptw, CTLTYPE_INT|CTLFLAG_RW,
+ &maxtcptw, 0, sysctl_maxtcptw, "IU",
+ "Maximum number of compressed TCP TIME_WAIT entries");
+
+VNET_DEFINE(int, nolocaltimewait) = 0;
+#define V_nolocaltimewait VNET(nolocaltimewait)
+SYSCTL_VNET_INT(_net_inet_tcp, OID_AUTO, nolocaltimewait, CTLFLAG_RW,
+ &VNET_NAME(nolocaltimewait), 0,
+ "Do not create compressed TCP TIME_WAIT entries for local connections");
+
+void
+tcp_tw_zone_change(void)
+{
+
+ if (maxtcptw == 0)
+ uma_zone_set_max(V_tcptw_zone, tcptw_auto_size());
+}
+
+void
+tcp_tw_init(void)
+{
+
+ V_tcptw_zone = uma_zcreate("tcptw", sizeof(struct tcptw),
+ NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE);
+ TUNABLE_INT_FETCH("net.inet.tcp.maxtcptw", &maxtcptw);
+ if (maxtcptw == 0)
+ uma_zone_set_max(V_tcptw_zone, tcptw_auto_size());
+ else
+ uma_zone_set_max(V_tcptw_zone, maxtcptw);
+ TAILQ_INIT(&V_twq_2msl);
+}
+
+#ifdef VIMAGE
+void
+tcp_tw_destroy(void)
+{
+ struct tcptw *tw;
+
+ INP_INFO_WLOCK(&V_tcbinfo);
+ while((tw = TAILQ_FIRST(&V_twq_2msl)) != NULL)
+ tcp_twclose(tw, 0);
+ INP_INFO_WUNLOCK(&V_tcbinfo);
+
+ uma_zdestroy(V_tcptw_zone);
+}
+#endif
+
+/*
+ * Move a TCP connection into TIME_WAIT state.
+ * tcbinfo is locked.
+ * inp is locked, and is unlocked before returning.
+ */
+void
+tcp_twstart(struct tcpcb *tp)
+{
+ struct tcptw *tw;
+ struct inpcb *inp = tp->t_inpcb;
+ int acknow;
+ struct socket *so;
+
+ INP_INFO_WLOCK_ASSERT(&V_tcbinfo); /* tcp_tw_2msl_reset(). */
+ INP_WLOCK_ASSERT(inp);
+
+ if (V_nolocaltimewait && in_localip(inp->inp_faddr)) {
+ tp = tcp_close(tp);
+ if (tp != NULL)
+ INP_WUNLOCK(inp);
+ return;
+ }
+
+ tw = uma_zalloc(V_tcptw_zone, M_NOWAIT);
+ if (tw == NULL) {
+ tw = tcp_tw_2msl_scan(1);
+ if (tw == NULL) {
+ tp = tcp_close(tp);
+ if (tp != NULL)
+ INP_WUNLOCK(inp);
+ return;
+ }
+ }
+ tw->tw_inpcb = inp;
+
+ /*
+ * Recover last window size sent.
+ */
+ tw->last_win = (tp->rcv_adv - tp->rcv_nxt) >> tp->rcv_scale;
+
+ /*
+ * Set t_recent if timestamps are used on the connection.
+ */
+ if ((tp->t_flags & (TF_REQ_TSTMP|TF_RCVD_TSTMP|TF_NOOPT)) ==
+ (TF_REQ_TSTMP|TF_RCVD_TSTMP)) {
+ tw->t_recent = tp->ts_recent;
+ tw->ts_offset = tp->ts_offset;
+ } else {
+ tw->t_recent = 0;
+ tw->ts_offset = 0;
+ }
+
+ tw->snd_nxt = tp->snd_nxt;
+ tw->rcv_nxt = tp->rcv_nxt;
+ tw->iss = tp->iss;
+ tw->irs = tp->irs;
+ tw->t_starttime = tp->t_starttime;
+ tw->tw_time = 0;
+
+/* XXX
+ * If this code will
+ * be used for fin-wait-2 state also, then we may need
+ * a ts_recent from the last segment.
+ */
+ acknow = tp->t_flags & TF_ACKNOW;
+
+ /*
+ * First, discard tcpcb state, which includes stopping its timers and
+ * freeing it. tcp_discardcb() used to also release the inpcb, but
+ * that work is now done in the caller.
+ *
+ * Note: soisdisconnected() call used to be made in tcp_discardcb(),
+ * and might not be needed here any longer.
+ */
+ tcp_discardcb(tp);
+ so = inp->inp_socket;
+ soisdisconnected(so);
+ tw->tw_cred = crhold(so->so_cred);
+ SOCK_LOCK(so);
+ tw->tw_so_options = so->so_options;
+ SOCK_UNLOCK(so);
+ if (acknow)
+ tcp_twrespond(tw, TH_ACK);
+ inp->inp_ppcb = tw;
+ inp->inp_flags |= INP_TIMEWAIT;
+ tcp_tw_2msl_reset(tw, 0);
+
+ /*
+ * If the inpcb owns the sole reference to the socket, then we can
+ * detach and free the socket as it is not needed in time wait.
+ */
+ if (inp->inp_flags & INP_SOCKREF) {
+ KASSERT(so->so_state & SS_PROTOREF,
+ ("tcp_twstart: !SS_PROTOREF"));
+ inp->inp_flags &= ~INP_SOCKREF;
+ INP_WUNLOCK(inp);
+ ACCEPT_LOCK();
+ SOCK_LOCK(so);
+ so->so_state &= ~SS_PROTOREF;
+ sofree(so);
+ } else
+ INP_WUNLOCK(inp);
+}
+
+#if 0
+/*
+ * The appromixate rate of ISN increase of Microsoft TCP stacks;
+ * the actual rate is slightly higher due to the addition of
+ * random positive increments.
+ *
+ * Most other new OSes use semi-randomized ISN values, so we
+ * do not need to worry about them.
+ */
+#define MS_ISN_BYTES_PER_SECOND 250000
+
+/*
+ * Determine if the ISN we will generate has advanced beyond the last
+ * sequence number used by the previous connection. If so, indicate
+ * that it is safe to recycle this tw socket by returning 1.
+ */
+int
+tcp_twrecycleable(struct tcptw *tw)
+{
+ tcp_seq new_iss = tw->iss;
+ tcp_seq new_irs = tw->irs;
+
+ INP_INFO_WLOCK_ASSERT(&V_tcbinfo);
+ new_iss += (ticks - tw->t_starttime) * (ISN_BYTES_PER_SECOND / hz);
+ new_irs += (ticks - tw->t_starttime) * (MS_ISN_BYTES_PER_SECOND / hz);
+
+ if (SEQ_GT(new_iss, tw->snd_nxt) && SEQ_GT(new_irs, tw->rcv_nxt))
+ return (1);
+ else
+ return (0);
+}
+#endif
+
+/*
+ * Returns 1 if the TIME_WAIT state was killed and we should start over,
+ * looking for a pcb in the listen state. Returns 0 otherwise.
+ */
+int
+tcp_twcheck(struct inpcb *inp, struct tcpopt *to, struct tcphdr *th,
+ struct mbuf *m, int tlen)
+{
+ struct tcptw *tw;
+ int thflags;
+ tcp_seq seq;
+
+ /* tcbinfo lock required for tcp_twclose(), tcp_tw_2msl_reset(). */
+ INP_INFO_WLOCK_ASSERT(&V_tcbinfo);
+ INP_WLOCK_ASSERT(inp);
+
+ /*
+ * XXXRW: Time wait state for inpcb has been recycled, but inpcb is
+ * still present. This is undesirable, but temporarily necessary
+ * until we work out how to handle inpcb's who's timewait state has
+ * been removed.
+ */
+ tw = intotw(inp);
+ if (tw == NULL)
+ goto drop;
+
+ thflags = th->th_flags;
+
+ /*
+ * NOTE: for FIN_WAIT_2 (to be added later),
+ * must validate sequence number before accepting RST
+ */
+
+ /*
+ * If the segment contains RST:
+ * Drop the segment - see Stevens, vol. 2, p. 964 and
+ * RFC 1337.
+ */
+ if (thflags & TH_RST)
+ goto drop;
+
+#if 0
+/* PAWS not needed at the moment */
+ /*
+ * RFC 1323 PAWS: If we have a timestamp reply on this segment
+ * and it's less than ts_recent, drop it.
+ */
+ if ((to.to_flags & TOF_TS) != 0 && tp->ts_recent &&
+ TSTMP_LT(to.to_tsval, tp->ts_recent)) {
+ if ((thflags & TH_ACK) == 0)
+ goto drop;
+ goto ack;
+ }
+ /*
+ * ts_recent is never updated because we never accept new segments.
+ */
+#endif
+
+ /*
+ * If a new connection request is received
+ * while in TIME_WAIT, drop the old connection
+ * and start over if the sequence numbers
+ * are above the previous ones.
+ */
+ if ((thflags & TH_SYN) && SEQ_GT(th->th_seq, tw->rcv_nxt)) {
+ tcp_twclose(tw, 0);
+ return (1);
+ }
+
+ /*
+ * Drop the the segment if it does not contain an ACK.
+ */
+ if ((thflags & TH_ACK) == 0)
+ goto drop;
+
+ /*
+ * Reset the 2MSL timer if this is a duplicate FIN.
+ */
+ if (thflags & TH_FIN) {
+ seq = th->th_seq + tlen + (thflags & TH_SYN ? 1 : 0);
+ if (seq + 1 == tw->rcv_nxt)
+ tcp_tw_2msl_reset(tw, 1);
+ }
+
+ /*
+ * Acknowledge the segment if it has data or is not a duplicate ACK.
+ */
+ if (thflags != TH_ACK || tlen != 0 ||
+ th->th_seq != tw->rcv_nxt || th->th_ack != tw->snd_nxt)
+ tcp_twrespond(tw, TH_ACK);
+drop:
+ INP_WUNLOCK(inp);
+ m_freem(m);
+ return (0);
+}
+
+void
+tcp_twclose(struct tcptw *tw, int reuse)
+{
+ struct socket *so;
+ struct inpcb *inp;
+
+ /*
+ * At this point, we are in one of two situations:
+ *
+ * (1) We have no socket, just an inpcb<->twtcp pair. We can free
+ * all state.
+ *
+ * (2) We have a socket -- if we own a reference, release it and
+ * notify the socket layer.
+ */
+ inp = tw->tw_inpcb;
+ KASSERT((inp->inp_flags & INP_TIMEWAIT), ("tcp_twclose: !timewait"));
+ KASSERT(intotw(inp) == tw, ("tcp_twclose: inp_ppcb != tw"));
+ INP_INFO_WLOCK_ASSERT(&V_tcbinfo); /* tcp_tw_2msl_stop(). */
+ INP_WLOCK_ASSERT(inp);
+
+ tw->tw_inpcb = NULL;
+ tcp_tw_2msl_stop(tw);
+ inp->inp_ppcb = NULL;
+ in_pcbdrop(inp);
+
+ so = inp->inp_socket;
+ if (so != NULL) {
+ /*
+ * If there's a socket, handle two cases: first, we own a
+ * strong reference, which we will now release, or we don't
+ * in which case another reference exists (XXXRW: think
+ * about this more), and we don't need to take action.
+ */
+ if (inp->inp_flags & INP_SOCKREF) {
+ inp->inp_flags &= ~INP_SOCKREF;
+ INP_WUNLOCK(inp);
+ ACCEPT_LOCK();
+ SOCK_LOCK(so);
+ KASSERT(so->so_state & SS_PROTOREF,
+ ("tcp_twclose: INP_SOCKREF && !SS_PROTOREF"));
+ so->so_state &= ~SS_PROTOREF;
+ sofree(so);
+ } else {
+ /*
+ * If we don't own the only reference, the socket and
+ * inpcb need to be left around to be handled by
+ * tcp_usr_detach() later.
+ */
+ INP_WUNLOCK(inp);
+ }
+ } else
+ in_pcbfree(inp);
+ TCPSTAT_INC(tcps_closed);
+ crfree(tw->tw_cred);
+ tw->tw_cred = NULL;
+ if (reuse)
+ return;
+ uma_zfree(V_tcptw_zone, tw);
+}
+
+int
+tcp_twrespond(struct tcptw *tw, int flags)
+{
+ struct inpcb *inp = tw->tw_inpcb;
+ struct tcphdr *th;
+ struct mbuf *m;
+ struct ip *ip = NULL;
+ u_int hdrlen, optlen;
+ int error;
+ struct tcpopt to;
+#ifdef INET6
+ struct ip6_hdr *ip6 = NULL;
+ int isipv6 = inp->inp_inc.inc_flags & INC_ISIPV6;
+#endif
+
+ INP_WLOCK_ASSERT(inp);
+
+ m = m_gethdr(M_DONTWAIT, MT_DATA);
+ if (m == NULL)
+ return (ENOBUFS);
+ m->m_data += max_linkhdr;
+
+#ifdef MAC
+ mac_inpcb_create_mbuf(inp, m);
+#endif
+
+#ifdef INET6
+ if (isipv6) {
+ hdrlen = sizeof(struct ip6_hdr) + sizeof(struct tcphdr);
+ ip6 = mtod(m, struct ip6_hdr *);
+ th = (struct tcphdr *)(ip6 + 1);
+ tcpip_fillheaders(inp, ip6, th);
+ } else
+#endif
+ {
+ hdrlen = sizeof(struct tcpiphdr);
+ ip = mtod(m, struct ip *);
+ th = (struct tcphdr *)(ip + 1);
+ tcpip_fillheaders(inp, ip, th);
+ }
+ to.to_flags = 0;
+
+ /*
+ * Send a timestamp and echo-reply if both our side and our peer
+ * have sent timestamps in our SYN's and this is not a RST.
+ */
+ if (tw->t_recent && flags == TH_ACK) {
+ to.to_flags |= TOF_TS;
+ to.to_tsval = ticks + tw->ts_offset;
+ to.to_tsecr = tw->t_recent;
+ }
+ optlen = tcp_addoptions(&to, (u_char *)(th + 1));
+
+ m->m_len = hdrlen + optlen;
+ m->m_pkthdr.len = m->m_len;
+
+ KASSERT(max_linkhdr + m->m_len <= MHLEN, ("tcptw: mbuf too small"));
+
+ th->th_seq = htonl(tw->snd_nxt);
+ th->th_ack = htonl(tw->rcv_nxt);
+ th->th_off = (sizeof(struct tcphdr) + optlen) >> 2;
+ th->th_flags = flags;
+ th->th_win = htons(tw->last_win);
+
+#ifdef INET6
+ if (isipv6) {
+ th->th_sum = in6_cksum(m, IPPROTO_TCP, sizeof(struct ip6_hdr),
+ sizeof(struct tcphdr) + optlen);
+ ip6->ip6_hlim = in6_selecthlim(inp, NULL);
+ error = ip6_output(m, inp->in6p_outputopts, NULL,
+ (tw->tw_so_options & SO_DONTROUTE), NULL, NULL, inp);
+ } else
+#endif
+ {
+ th->th_sum = in_pseudo(ip->ip_src.s_addr, ip->ip_dst.s_addr,
+ htons(sizeof(struct tcphdr) + optlen + IPPROTO_TCP));
+ m->m_pkthdr.csum_flags = CSUM_TCP;
+ m->m_pkthdr.csum_data = offsetof(struct tcphdr, th_sum);
+ ip->ip_len = m->m_pkthdr.len;
+ if (V_path_mtu_discovery)
+ ip->ip_off |= IP_DF;
+ error = ip_output(m, inp->inp_options, NULL,
+ ((tw->tw_so_options & SO_DONTROUTE) ? IP_ROUTETOIF : 0),
+ NULL, inp);
+ }
+ if (flags & TH_ACK)
+ TCPSTAT_INC(tcps_sndacks);
+ else
+ TCPSTAT_INC(tcps_sndctrl);
+ TCPSTAT_INC(tcps_sndtotal);
+ return (error);
+}
+
+static void
+tcp_tw_2msl_reset(struct tcptw *tw, int rearm)
+{
+
+ INP_INFO_WLOCK_ASSERT(&V_tcbinfo);
+ INP_WLOCK_ASSERT(tw->tw_inpcb);
+ if (rearm)
+ TAILQ_REMOVE(&V_twq_2msl, tw, tw_2msl);
+ tw->tw_time = ticks + 2 * tcp_msl;
+ TAILQ_INSERT_TAIL(&V_twq_2msl, tw, tw_2msl);
+}
+
+static void
+tcp_tw_2msl_stop(struct tcptw *tw)
+{
+
+ INP_INFO_WLOCK_ASSERT(&V_tcbinfo);
+ TAILQ_REMOVE(&V_twq_2msl, tw, tw_2msl);
+}
+
+struct tcptw *
+tcp_tw_2msl_scan(int reuse)
+{
+ struct tcptw *tw;
+
+ INP_INFO_WLOCK_ASSERT(&V_tcbinfo);
+ for (;;) {
+ tw = TAILQ_FIRST(&V_twq_2msl);
+ if (tw == NULL || (!reuse && (tw->tw_time - ticks) > 0))
+ break;
+ INP_WLOCK(tw->tw_inpcb);
+ tcp_twclose(tw, reuse);
+ if (reuse)
+ return (tw);
+ }
+ return (NULL);
+}
diff --git a/rtems/freebsd/netinet/tcp_usrreq.c b/rtems/freebsd/netinet/tcp_usrreq.c
new file mode 100644
index 00000000..dd50a318
--- /dev/null
+++ b/rtems/freebsd/netinet/tcp_usrreq.c
@@ -0,0 +1,1886 @@
+#include <rtems/freebsd/machine/rtems-bsd-config.h>
+
+/*-
+ * Copyright (c) 1982, 1986, 1988, 1993
+ * The Regents of the University of California.
+ * Copyright (c) 2006-2007 Robert N. M. Watson
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * From: @(#)tcp_usrreq.c 8.2 (Berkeley) 1/3/94
+ */
+
+#include <rtems/freebsd/sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <rtems/freebsd/local/opt_ddb.h>
+#include <rtems/freebsd/local/opt_inet.h>
+#include <rtems/freebsd/local/opt_inet6.h>
+#include <rtems/freebsd/local/opt_tcpdebug.h>
+
+#include <rtems/freebsd/sys/param.h>
+#include <rtems/freebsd/sys/systm.h>
+#include <rtems/freebsd/sys/malloc.h>
+#include <rtems/freebsd/sys/kernel.h>
+#include <rtems/freebsd/sys/sysctl.h>
+#include <rtems/freebsd/sys/mbuf.h>
+#ifdef INET6
+#include <rtems/freebsd/sys/domain.h>
+#endif /* INET6 */
+#include <rtems/freebsd/sys/socket.h>
+#include <rtems/freebsd/sys/socketvar.h>
+#include <rtems/freebsd/sys/protosw.h>
+#include <rtems/freebsd/sys/proc.h>
+#include <rtems/freebsd/sys/jail.h>
+
+#ifdef DDB
+#include <rtems/freebsd/ddb/ddb.h>
+#endif
+
+#include <rtems/freebsd/net/if.h>
+#include <rtems/freebsd/net/route.h>
+#include <rtems/freebsd/net/vnet.h>
+
+#include <rtems/freebsd/netinet/in.h>
+#include <rtems/freebsd/netinet/in_systm.h>
+#ifdef INET6
+#include <rtems/freebsd/netinet/ip6.h>
+#endif
+#include <rtems/freebsd/netinet/in_pcb.h>
+#ifdef INET6
+#include <rtems/freebsd/netinet6/in6_pcb.h>
+#endif
+#include <rtems/freebsd/netinet/in_var.h>
+#include <rtems/freebsd/netinet/ip_var.h>
+#ifdef INET6
+#include <rtems/freebsd/netinet6/ip6_var.h>
+#include <rtems/freebsd/netinet6/scope6_var.h>
+#endif
+#include <rtems/freebsd/netinet/tcp.h>
+#include <rtems/freebsd/netinet/tcp_fsm.h>
+#include <rtems/freebsd/netinet/tcp_seq.h>
+#include <rtems/freebsd/netinet/tcp_timer.h>
+#include <rtems/freebsd/netinet/tcp_var.h>
+#include <rtems/freebsd/netinet/tcpip.h>
+#ifdef TCPDEBUG
+#include <rtems/freebsd/netinet/tcp_debug.h>
+#endif
+#include <rtems/freebsd/netinet/tcp_offload.h>
+
+/*
+ * TCP protocol interface to socket abstraction.
+ */
+static int tcp_attach(struct socket *);
+static int tcp_connect(struct tcpcb *, struct sockaddr *,
+ struct thread *td);
+#ifdef INET6
+static int tcp6_connect(struct tcpcb *, struct sockaddr *,
+ struct thread *td);
+#endif /* INET6 */
+static void tcp_disconnect(struct tcpcb *);
+static void tcp_usrclosed(struct tcpcb *);
+static void tcp_fill_info(struct tcpcb *, struct tcp_info *);
+
+#ifdef TCPDEBUG
+#define TCPDEBUG0 int ostate = 0
+#define TCPDEBUG1() ostate = tp ? tp->t_state : 0
+#define TCPDEBUG2(req) if (tp && (so->so_options & SO_DEBUG)) \
+ tcp_trace(TA_USER, ostate, tp, 0, 0, req)
+#else
+#define TCPDEBUG0
+#define TCPDEBUG1()
+#define TCPDEBUG2(req)
+#endif
+
+/*
+ * TCP attaches to socket via pru_attach(), reserving space,
+ * and an internet control block.
+ */
+static int
+tcp_usr_attach(struct socket *so, int proto, struct thread *td)
+{
+ struct inpcb *inp;
+ struct tcpcb *tp = NULL;
+ int error;
+ TCPDEBUG0;
+
+ inp = sotoinpcb(so);
+ KASSERT(inp == NULL, ("tcp_usr_attach: inp != NULL"));
+ TCPDEBUG1();
+
+ error = tcp_attach(so);
+ if (error)
+ goto out;
+
+ if ((so->so_options & SO_LINGER) && so->so_linger == 0)
+ so->so_linger = TCP_LINGERTIME;
+
+ inp = sotoinpcb(so);
+ tp = intotcpcb(inp);
+out:
+ TCPDEBUG2(PRU_ATTACH);
+ return error;
+}
+
+/*
+ * tcp_detach is called when the socket layer loses its final reference
+ * to the socket, be it a file descriptor reference, a reference from TCP,
+ * etc. At this point, there is only one case in which we will keep around
+ * inpcb state: time wait.
+ *
+ * This function can probably be re-absorbed back into tcp_usr_detach() now
+ * that there is a single detach path.
+ */
+static void
+tcp_detach(struct socket *so, struct inpcb *inp)
+{
+ struct tcpcb *tp;
+
+ INP_INFO_WLOCK_ASSERT(&V_tcbinfo);
+ INP_WLOCK_ASSERT(inp);
+
+ KASSERT(so->so_pcb == inp, ("tcp_detach: so_pcb != inp"));
+ KASSERT(inp->inp_socket == so, ("tcp_detach: inp_socket != so"));
+
+ tp = intotcpcb(inp);
+
+ if (inp->inp_flags & INP_TIMEWAIT) {
+ /*
+ * There are two cases to handle: one in which the time wait
+ * state is being discarded (INP_DROPPED), and one in which
+ * this connection will remain in timewait. In the former,
+ * it is time to discard all state (except tcptw, which has
+ * already been discarded by the timewait close code, which
+ * should be further up the call stack somewhere). In the
+ * latter case, we detach from the socket, but leave the pcb
+ * present until timewait ends.
+ *
+ * XXXRW: Would it be cleaner to free the tcptw here?
+ */
+ if (inp->inp_flags & INP_DROPPED) {
+ KASSERT(tp == NULL, ("tcp_detach: INP_TIMEWAIT && "
+ "INP_DROPPED && tp != NULL"));
+ in_pcbdetach(inp);
+ in_pcbfree(inp);
+ } else {
+ in_pcbdetach(inp);
+ INP_WUNLOCK(inp);
+ }
+ } else {
+ /*
+ * If the connection is not in timewait, we consider two
+ * two conditions: one in which no further processing is
+ * necessary (dropped || embryonic), and one in which TCP is
+ * not yet done, but no longer requires the socket, so the
+ * pcb will persist for the time being.
+ *
+ * XXXRW: Does the second case still occur?
+ */
+ if (inp->inp_flags & INP_DROPPED ||
+ tp->t_state < TCPS_SYN_SENT) {
+ tcp_discardcb(tp);
+ in_pcbdetach(inp);
+ in_pcbfree(inp);
+ } else
+ in_pcbdetach(inp);
+ }
+}
+
+/*
+ * pru_detach() detaches the TCP protocol from the socket.
+ * If the protocol state is non-embryonic, then can't
+ * do this directly: have to initiate a pru_disconnect(),
+ * which may finish later; embryonic TCB's can just
+ * be discarded here.
+ */
+static void
+tcp_usr_detach(struct socket *so)
+{
+ struct inpcb *inp;
+
+ inp = sotoinpcb(so);
+ KASSERT(inp != NULL, ("tcp_usr_detach: inp == NULL"));
+ INP_INFO_WLOCK(&V_tcbinfo);
+ INP_WLOCK(inp);
+ KASSERT(inp->inp_socket != NULL,
+ ("tcp_usr_detach: inp_socket == NULL"));
+ tcp_detach(so, inp);
+ INP_INFO_WUNLOCK(&V_tcbinfo);
+}
+
+/*
+ * Give the socket an address.
+ */
+static int
+tcp_usr_bind(struct socket *so, struct sockaddr *nam, struct thread *td)
+{
+ int error = 0;
+ struct inpcb *inp;
+ struct tcpcb *tp = NULL;
+ struct sockaddr_in *sinp;
+
+ sinp = (struct sockaddr_in *)nam;
+ if (nam->sa_len != sizeof (*sinp))
+ return (EINVAL);
+ /*
+ * Must check for multicast addresses and disallow binding
+ * to them.
+ */
+ if (sinp->sin_family == AF_INET &&
+ IN_MULTICAST(ntohl(sinp->sin_addr.s_addr)))
+ return (EAFNOSUPPORT);
+
+ TCPDEBUG0;
+ INP_INFO_WLOCK(&V_tcbinfo);
+ inp = sotoinpcb(so);
+ KASSERT(inp != NULL, ("tcp_usr_bind: inp == NULL"));
+ INP_WLOCK(inp);
+ if (inp->inp_flags & (INP_TIMEWAIT | INP_DROPPED)) {
+ error = EINVAL;
+ goto out;
+ }
+ tp = intotcpcb(inp);
+ TCPDEBUG1();
+ error = in_pcbbind(inp, nam, td->td_ucred);
+out:
+ TCPDEBUG2(PRU_BIND);
+ INP_WUNLOCK(inp);
+ INP_INFO_WUNLOCK(&V_tcbinfo);
+
+ return (error);
+}
+
+#ifdef INET6
+static int
+tcp6_usr_bind(struct socket *so, struct sockaddr *nam, struct thread *td)
+{
+ int error = 0;
+ struct inpcb *inp;
+ struct tcpcb *tp = NULL;
+ struct sockaddr_in6 *sin6p;
+
+ sin6p = (struct sockaddr_in6 *)nam;
+ if (nam->sa_len != sizeof (*sin6p))
+ return (EINVAL);
+ /*
+ * Must check for multicast addresses and disallow binding
+ * to them.
+ */
+ if (sin6p->sin6_family == AF_INET6 &&
+ IN6_IS_ADDR_MULTICAST(&sin6p->sin6_addr))
+ return (EAFNOSUPPORT);
+
+ TCPDEBUG0;
+ INP_INFO_WLOCK(&V_tcbinfo);
+ inp = sotoinpcb(so);
+ KASSERT(inp != NULL, ("tcp6_usr_bind: inp == NULL"));
+ INP_WLOCK(inp);
+ if (inp->inp_flags & (INP_TIMEWAIT | INP_DROPPED)) {
+ error = EINVAL;
+ goto out;
+ }
+ tp = intotcpcb(inp);
+ TCPDEBUG1();
+ inp->inp_vflag &= ~INP_IPV4;
+ inp->inp_vflag |= INP_IPV6;
+ if ((inp->inp_flags & IN6P_IPV6_V6ONLY) == 0) {
+ if (IN6_IS_ADDR_UNSPECIFIED(&sin6p->sin6_addr))
+ inp->inp_vflag |= INP_IPV4;
+ else if (IN6_IS_ADDR_V4MAPPED(&sin6p->sin6_addr)) {
+ struct sockaddr_in sin;
+
+ in6_sin6_2_sin(&sin, sin6p);
+ inp->inp_vflag |= INP_IPV4;
+ inp->inp_vflag &= ~INP_IPV6;
+ error = in_pcbbind(inp, (struct sockaddr *)&sin,
+ td->td_ucred);
+ goto out;
+ }
+ }
+ error = in6_pcbbind(inp, nam, td->td_ucred);
+out:
+ TCPDEBUG2(PRU_BIND);
+ INP_WUNLOCK(inp);
+ INP_INFO_WUNLOCK(&V_tcbinfo);
+ return (error);
+}
+#endif /* INET6 */
+
+/*
+ * Prepare to accept connections.
+ */
+static int
+tcp_usr_listen(struct socket *so, int backlog, struct thread *td)
+{
+ int error = 0;
+ struct inpcb *inp;
+ struct tcpcb *tp = NULL;
+
+ TCPDEBUG0;
+ INP_INFO_WLOCK(&V_tcbinfo);
+ inp = sotoinpcb(so);
+ KASSERT(inp != NULL, ("tcp_usr_listen: inp == NULL"));
+ INP_WLOCK(inp);
+ if (inp->inp_flags & (INP_TIMEWAIT | INP_DROPPED)) {
+ error = EINVAL;
+ goto out;
+ }
+ tp = intotcpcb(inp);
+ TCPDEBUG1();
+ SOCK_LOCK(so);
+ error = solisten_proto_check(so);
+ if (error == 0 && inp->inp_lport == 0)
+ error = in_pcbbind(inp, (struct sockaddr *)0, td->td_ucred);
+ if (error == 0) {
+ tp->t_state = TCPS_LISTEN;
+ solisten_proto(so, backlog);
+ tcp_offload_listen_open(tp);
+ }
+ SOCK_UNLOCK(so);
+
+out:
+ TCPDEBUG2(PRU_LISTEN);
+ INP_WUNLOCK(inp);
+ INP_INFO_WUNLOCK(&V_tcbinfo);
+ return (error);
+}
+
+#ifdef INET6
+static int
+tcp6_usr_listen(struct socket *so, int backlog, struct thread *td)
+{
+ int error = 0;
+ struct inpcb *inp;
+ struct tcpcb *tp = NULL;
+
+ TCPDEBUG0;
+ INP_INFO_WLOCK(&V_tcbinfo);
+ inp = sotoinpcb(so);
+ KASSERT(inp != NULL, ("tcp6_usr_listen: inp == NULL"));
+ INP_WLOCK(inp);
+ if (inp->inp_flags & (INP_TIMEWAIT | INP_DROPPED)) {
+ error = EINVAL;
+ goto out;
+ }
+ tp = intotcpcb(inp);
+ TCPDEBUG1();
+ SOCK_LOCK(so);
+ error = solisten_proto_check(so);
+ if (error == 0 && inp->inp_lport == 0) {
+ inp->inp_vflag &= ~INP_IPV4;
+ if ((inp->inp_flags & IN6P_IPV6_V6ONLY) == 0)
+ inp->inp_vflag |= INP_IPV4;
+ error = in6_pcbbind(inp, (struct sockaddr *)0, td->td_ucred);
+ }
+ if (error == 0) {
+ tp->t_state = TCPS_LISTEN;
+ solisten_proto(so, backlog);
+ }
+ SOCK_UNLOCK(so);
+
+out:
+ TCPDEBUG2(PRU_LISTEN);
+ INP_WUNLOCK(inp);
+ INP_INFO_WUNLOCK(&V_tcbinfo);
+ return (error);
+}
+#endif /* INET6 */
+
+/*
+ * Initiate connection to peer.
+ * Create a template for use in transmissions on this connection.
+ * Enter SYN_SENT state, and mark socket as connecting.
+ * Start keep-alive timer, and seed output sequence space.
+ * Send initial segment on connection.
+ */
+static int
+tcp_usr_connect(struct socket *so, struct sockaddr *nam, struct thread *td)
+{
+ int error = 0;
+ struct inpcb *inp;
+ struct tcpcb *tp = NULL;
+ struct sockaddr_in *sinp;
+
+ sinp = (struct sockaddr_in *)nam;
+ if (nam->sa_len != sizeof (*sinp))
+ return (EINVAL);
+ /*
+ * Must disallow TCP ``connections'' to multicast addresses.
+ */
+ if (sinp->sin_family == AF_INET
+ && IN_MULTICAST(ntohl(sinp->sin_addr.s_addr)))
+ return (EAFNOSUPPORT);
+ if ((error = prison_remote_ip4(td->td_ucred, &sinp->sin_addr)) != 0)
+ return (error);
+
+ TCPDEBUG0;
+ INP_INFO_WLOCK(&V_tcbinfo);
+ inp = sotoinpcb(so);
+ KASSERT(inp != NULL, ("tcp_usr_connect: inp == NULL"));
+ INP_WLOCK(inp);
+ if (inp->inp_flags & (INP_TIMEWAIT | INP_DROPPED)) {
+ error = EINVAL;
+ goto out;
+ }
+ tp = intotcpcb(inp);
+ TCPDEBUG1();
+ if ((error = tcp_connect(tp, nam, td)) != 0)
+ goto out;
+ error = tcp_output_connect(so, nam);
+out:
+ TCPDEBUG2(PRU_CONNECT);
+ INP_WUNLOCK(inp);
+ INP_INFO_WUNLOCK(&V_tcbinfo);
+ return (error);
+}
+
+#ifdef INET6
+static int
+tcp6_usr_connect(struct socket *so, struct sockaddr *nam, struct thread *td)
+{
+ int error = 0;
+ struct inpcb *inp;
+ struct tcpcb *tp = NULL;
+ struct sockaddr_in6 *sin6p;
+
+ TCPDEBUG0;
+
+ sin6p = (struct sockaddr_in6 *)nam;
+ if (nam->sa_len != sizeof (*sin6p))
+ return (EINVAL);
+ /*
+ * Must disallow TCP ``connections'' to multicast addresses.
+ */
+ if (sin6p->sin6_family == AF_INET6
+ && IN6_IS_ADDR_MULTICAST(&sin6p->sin6_addr))
+ return (EAFNOSUPPORT);
+
+ INP_INFO_WLOCK(&V_tcbinfo);
+ inp = sotoinpcb(so);
+ KASSERT(inp != NULL, ("tcp6_usr_connect: inp == NULL"));
+ INP_WLOCK(inp);
+ if (inp->inp_flags & (INP_TIMEWAIT | INP_DROPPED)) {
+ error = EINVAL;
+ goto out;
+ }
+ tp = intotcpcb(inp);
+ TCPDEBUG1();
+ if (IN6_IS_ADDR_V4MAPPED(&sin6p->sin6_addr)) {
+ struct sockaddr_in sin;
+
+ if ((inp->inp_flags & IN6P_IPV6_V6ONLY) != 0) {
+ error = EINVAL;
+ goto out;
+ }
+
+ in6_sin6_2_sin(&sin, sin6p);
+ inp->inp_vflag |= INP_IPV4;
+ inp->inp_vflag &= ~INP_IPV6;
+ if ((error = prison_remote_ip4(td->td_ucred,
+ &sin.sin_addr)) != 0)
+ goto out;
+ if ((error = tcp_connect(tp, (struct sockaddr *)&sin, td)) != 0)
+ goto out;
+ error = tcp_output_connect(so, nam);
+ goto out;
+ }
+ inp->inp_vflag &= ~INP_IPV4;
+ inp->inp_vflag |= INP_IPV6;
+ inp->inp_inc.inc_flags |= INC_ISIPV6;
+ if ((error = prison_remote_ip6(td->td_ucred, &sin6p->sin6_addr)) != 0)
+ goto out;
+ if ((error = tcp6_connect(tp, nam, td)) != 0)
+ goto out;
+ error = tcp_output_connect(so, nam);
+
+out:
+ TCPDEBUG2(PRU_CONNECT);
+ INP_WUNLOCK(inp);
+ INP_INFO_WUNLOCK(&V_tcbinfo);
+ return (error);
+}
+#endif /* INET6 */
+
+/*
+ * Initiate disconnect from peer.
+ * If connection never passed embryonic stage, just drop;
+ * else if don't need to let data drain, then can just drop anyways,
+ * else have to begin TCP shutdown process: mark socket disconnecting,
+ * drain unread data, state switch to reflect user close, and
+ * send segment (e.g. FIN) to peer. Socket will be really disconnected
+ * when peer sends FIN and acks ours.
+ *
+ * SHOULD IMPLEMENT LATER PRU_CONNECT VIA REALLOC TCPCB.
+ */
+static int
+tcp_usr_disconnect(struct socket *so)
+{
+ struct inpcb *inp;
+ struct tcpcb *tp = NULL;
+ int error = 0;
+
+ TCPDEBUG0;
+ INP_INFO_WLOCK(&V_tcbinfo);
+ inp = sotoinpcb(so);
+ KASSERT(inp != NULL, ("tcp_usr_disconnect: inp == NULL"));
+ INP_WLOCK(inp);
+ if (inp->inp_flags & (INP_TIMEWAIT | INP_DROPPED)) {
+ error = ECONNRESET;
+ goto out;
+ }
+ tp = intotcpcb(inp);
+ TCPDEBUG1();
+ tcp_disconnect(tp);
+out:
+ TCPDEBUG2(PRU_DISCONNECT);
+ INP_WUNLOCK(inp);
+ INP_INFO_WUNLOCK(&V_tcbinfo);
+ return (error);
+}
+
+/*
+ * Accept a connection. Essentially all the work is done at higher levels;
+ * just return the address of the peer, storing through addr.
+ *
+ * The rationale for acquiring the tcbinfo lock here is somewhat complicated,
+ * and is described in detail in the commit log entry for r175612. Acquiring
+ * it delays an accept(2) racing with sonewconn(), which inserts the socket
+ * before the inpcb address/port fields are initialized. A better fix would
+ * prevent the socket from being placed in the listen queue until all fields
+ * are fully initialized.
+ */
+static int
+tcp_usr_accept(struct socket *so, struct sockaddr **nam)
+{
+ int error = 0;
+ struct inpcb *inp = NULL;
+ struct tcpcb *tp = NULL;
+ struct in_addr addr;
+ in_port_t port = 0;
+ TCPDEBUG0;
+
+ if (so->so_state & SS_ISDISCONNECTED)
+ return (ECONNABORTED);
+
+ inp = sotoinpcb(so);
+ KASSERT(inp != NULL, ("tcp_usr_accept: inp == NULL"));
+ INP_INFO_RLOCK(&V_tcbinfo);
+ INP_WLOCK(inp);
+ if (inp->inp_flags & (INP_TIMEWAIT | INP_DROPPED)) {
+ error = ECONNABORTED;
+ goto out;
+ }
+ tp = intotcpcb(inp);
+ TCPDEBUG1();
+
+ /*
+ * We inline in_getpeeraddr and COMMON_END here, so that we can
+ * copy the data of interest and defer the malloc until after we
+ * release the lock.
+ */
+ port = inp->inp_fport;
+ addr = inp->inp_faddr;
+
+out:
+ TCPDEBUG2(PRU_ACCEPT);
+ INP_WUNLOCK(inp);
+ INP_INFO_RUNLOCK(&V_tcbinfo);
+ if (error == 0)
+ *nam = in_sockaddr(port, &addr);
+ return error;
+}
+
+#ifdef INET6
+static int
+tcp6_usr_accept(struct socket *so, struct sockaddr **nam)
+{
+ struct inpcb *inp = NULL;
+ int error = 0;
+ struct tcpcb *tp = NULL;
+ struct in_addr addr;
+ struct in6_addr addr6;
+ in_port_t port = 0;
+ int v4 = 0;
+ TCPDEBUG0;
+
+ if (so->so_state & SS_ISDISCONNECTED)
+ return (ECONNABORTED);
+
+ inp = sotoinpcb(so);
+ KASSERT(inp != NULL, ("tcp6_usr_accept: inp == NULL"));
+ INP_WLOCK(inp);
+ if (inp->inp_flags & (INP_TIMEWAIT | INP_DROPPED)) {
+ error = ECONNABORTED;
+ goto out;
+ }
+ tp = intotcpcb(inp);
+ TCPDEBUG1();
+
+ /*
+ * We inline in6_mapped_peeraddr and COMMON_END here, so that we can
+ * copy the data of interest and defer the malloc until after we
+ * release the lock.
+ */
+ if (inp->inp_vflag & INP_IPV4) {
+ v4 = 1;
+ port = inp->inp_fport;
+ addr = inp->inp_faddr;
+ } else {
+ port = inp->inp_fport;
+ addr6 = inp->in6p_faddr;
+ }
+
+out:
+ TCPDEBUG2(PRU_ACCEPT);
+ INP_WUNLOCK(inp);
+ if (error == 0) {
+ if (v4)
+ *nam = in6_v4mapsin6_sockaddr(port, &addr);
+ else
+ *nam = in6_sockaddr(port, &addr6);
+ }
+ return error;
+}
+#endif /* INET6 */
+
+/*
+ * Mark the connection as being incapable of further output.
+ */
+static int
+tcp_usr_shutdown(struct socket *so)
+{
+ int error = 0;
+ struct inpcb *inp;
+ struct tcpcb *tp = NULL;
+
+ TCPDEBUG0;
+ INP_INFO_WLOCK(&V_tcbinfo);
+ inp = sotoinpcb(so);
+ KASSERT(inp != NULL, ("inp == NULL"));
+ INP_WLOCK(inp);
+ if (inp->inp_flags & (INP_TIMEWAIT | INP_DROPPED)) {
+ error = ECONNRESET;
+ goto out;
+ }
+ tp = intotcpcb(inp);
+ TCPDEBUG1();
+ socantsendmore(so);
+ tcp_usrclosed(tp);
+ if (!(inp->inp_flags & INP_DROPPED))
+ error = tcp_output_disconnect(tp);
+
+out:
+ TCPDEBUG2(PRU_SHUTDOWN);
+ INP_WUNLOCK(inp);
+ INP_INFO_WUNLOCK(&V_tcbinfo);
+
+ return (error);
+}
+
+/*
+ * After a receive, possibly send window update to peer.
+ */
+static int
+tcp_usr_rcvd(struct socket *so, int flags)
+{
+ struct inpcb *inp;
+ struct tcpcb *tp = NULL;
+ int error = 0;
+
+ TCPDEBUG0;
+ inp = sotoinpcb(so);
+ KASSERT(inp != NULL, ("tcp_usr_rcvd: inp == NULL"));
+ INP_WLOCK(inp);
+ if (inp->inp_flags & (INP_TIMEWAIT | INP_DROPPED)) {
+ error = ECONNRESET;
+ goto out;
+ }
+ tp = intotcpcb(inp);
+ TCPDEBUG1();
+ tcp_output_rcvd(tp);
+
+out:
+ TCPDEBUG2(PRU_RCVD);
+ INP_WUNLOCK(inp);
+ return (error);
+}
+
+/*
+ * Do a send by putting data in output queue and updating urgent
+ * marker if URG set. Possibly send more data. Unlike the other
+ * pru_*() routines, the mbuf chains are our responsibility. We
+ * must either enqueue them or free them. The other pru_* routines
+ * generally are caller-frees.
+ */
+static int
+tcp_usr_send(struct socket *so, int flags, struct mbuf *m,
+ struct sockaddr *nam, struct mbuf *control, struct thread *td)
+{
+ int error = 0;
+ struct inpcb *inp;
+ struct tcpcb *tp = NULL;
+ int headlocked = 0;
+#ifdef INET6
+ int isipv6;
+#endif
+ TCPDEBUG0;
+
+ /*
+ * We require the pcbinfo lock in two cases:
+ *
+ * (1) An implied connect is taking place, which can result in
+ * binding IPs and ports and hence modification of the pcb hash
+ * chains.
+ *
+ * (2) PRUS_EOF is set, resulting in explicit close on the send.
+ */
+ if ((nam != NULL) || (flags & PRUS_EOF)) {
+ INP_INFO_WLOCK(&V_tcbinfo);
+ headlocked = 1;
+ }
+ inp = sotoinpcb(so);
+ KASSERT(inp != NULL, ("tcp_usr_send: inp == NULL"));
+ INP_WLOCK(inp);
+ if (inp->inp_flags & (INP_TIMEWAIT | INP_DROPPED)) {
+ if (control)
+ m_freem(control);
+ if (m)
+ m_freem(m);
+ error = ECONNRESET;
+ goto out;
+ }
+#ifdef INET6
+ isipv6 = nam && nam->sa_family == AF_INET6;
+#endif /* INET6 */
+ tp = intotcpcb(inp);
+ TCPDEBUG1();
+ if (control) {
+ /* TCP doesn't do control messages (rights, creds, etc) */
+ if (control->m_len) {
+ m_freem(control);
+ if (m)
+ m_freem(m);
+ error = EINVAL;
+ goto out;
+ }
+ m_freem(control); /* empty control, just free it */
+ }
+ if (!(flags & PRUS_OOB)) {
+ sbappendstream(&so->so_snd, m);
+ if (nam && tp->t_state < TCPS_SYN_SENT) {
+ /*
+ * Do implied connect if not yet connected,
+ * initialize window to default value, and
+ * initialize maxseg/maxopd using peer's cached
+ * MSS.
+ */
+ INP_INFO_WLOCK_ASSERT(&V_tcbinfo);
+#ifdef INET6
+ if (isipv6)
+ error = tcp6_connect(tp, nam, td);
+ else
+#endif /* INET6 */
+ error = tcp_connect(tp, nam, td);
+ if (error)
+ goto out;
+ tp->snd_wnd = TTCP_CLIENT_SND_WND;
+ tcp_mss(tp, -1);
+ }
+ if (flags & PRUS_EOF) {
+ /*
+ * Close the send side of the connection after
+ * the data is sent.
+ */
+ INP_INFO_WLOCK_ASSERT(&V_tcbinfo);
+ socantsendmore(so);
+ tcp_usrclosed(tp);
+ }
+ if (headlocked) {
+ INP_INFO_WUNLOCK(&V_tcbinfo);
+ headlocked = 0;
+ }
+ if (!(inp->inp_flags & INP_DROPPED)) {
+ if (flags & PRUS_MORETOCOME)
+ tp->t_flags |= TF_MORETOCOME;
+ error = tcp_output_send(tp);
+ if (flags & PRUS_MORETOCOME)
+ tp->t_flags &= ~TF_MORETOCOME;
+ }
+ } else {
+ /*
+ * XXXRW: PRUS_EOF not implemented with PRUS_OOB?
+ */
+ SOCKBUF_LOCK(&so->so_snd);
+ if (sbspace(&so->so_snd) < -512) {
+ SOCKBUF_UNLOCK(&so->so_snd);
+ m_freem(m);
+ error = ENOBUFS;
+ goto out;
+ }
+ /*
+ * According to RFC961 (Assigned Protocols),
+ * the urgent pointer points to the last octet
+ * of urgent data. We continue, however,
+ * to consider it to indicate the first octet
+ * of data past the urgent section.
+ * Otherwise, snd_up should be one lower.
+ */
+ sbappendstream_locked(&so->so_snd, m);
+ SOCKBUF_UNLOCK(&so->so_snd);
+ if (nam && tp->t_state < TCPS_SYN_SENT) {
+ /*
+ * Do implied connect if not yet connected,
+ * initialize window to default value, and
+ * initialize maxseg/maxopd using peer's cached
+ * MSS.
+ */
+ INP_INFO_WLOCK_ASSERT(&V_tcbinfo);
+#ifdef INET6
+ if (isipv6)
+ error = tcp6_connect(tp, nam, td);
+ else
+#endif /* INET6 */
+ error = tcp_connect(tp, nam, td);
+ if (error)
+ goto out;
+ tp->snd_wnd = TTCP_CLIENT_SND_WND;
+ tcp_mss(tp, -1);
+ INP_INFO_WUNLOCK(&V_tcbinfo);
+ headlocked = 0;
+ } else if (nam) {
+ INP_INFO_WUNLOCK(&V_tcbinfo);
+ headlocked = 0;
+ }
+ tp->snd_up = tp->snd_una + so->so_snd.sb_cc;
+ tp->t_flags |= TF_FORCEDATA;
+ error = tcp_output_send(tp);
+ tp->t_flags &= ~TF_FORCEDATA;
+ }
+out:
+ TCPDEBUG2((flags & PRUS_OOB) ? PRU_SENDOOB :
+ ((flags & PRUS_EOF) ? PRU_SEND_EOF : PRU_SEND));
+ INP_WUNLOCK(inp);
+ if (headlocked)
+ INP_INFO_WUNLOCK(&V_tcbinfo);
+ return (error);
+}
+
+/*
+ * Abort the TCP. Drop the connection abruptly.
+ */
+static void
+tcp_usr_abort(struct socket *so)
+{
+ struct inpcb *inp;
+ struct tcpcb *tp = NULL;
+ TCPDEBUG0;
+
+ inp = sotoinpcb(so);
+ KASSERT(inp != NULL, ("tcp_usr_abort: inp == NULL"));
+
+ INP_INFO_WLOCK(&V_tcbinfo);
+ INP_WLOCK(inp);
+ KASSERT(inp->inp_socket != NULL,
+ ("tcp_usr_abort: inp_socket == NULL"));
+
+ /*
+ * If we still have full TCP state, and we're not dropped, drop.
+ */
+ if (!(inp->inp_flags & INP_TIMEWAIT) &&
+ !(inp->inp_flags & INP_DROPPED)) {
+ tp = intotcpcb(inp);
+ TCPDEBUG1();
+ tcp_drop(tp, ECONNABORTED);
+ TCPDEBUG2(PRU_ABORT);
+ }
+ if (!(inp->inp_flags & INP_DROPPED)) {
+ SOCK_LOCK(so);
+ so->so_state |= SS_PROTOREF;
+ SOCK_UNLOCK(so);
+ inp->inp_flags |= INP_SOCKREF;
+ }
+ INP_WUNLOCK(inp);
+ INP_INFO_WUNLOCK(&V_tcbinfo);
+}
+
+/*
+ * TCP socket is closed. Start friendly disconnect.
+ */
+static void
+tcp_usr_close(struct socket *so)
+{
+ struct inpcb *inp;
+ struct tcpcb *tp = NULL;
+ TCPDEBUG0;
+
+ inp = sotoinpcb(so);
+ KASSERT(inp != NULL, ("tcp_usr_close: inp == NULL"));
+
+ INP_INFO_WLOCK(&V_tcbinfo);
+ INP_WLOCK(inp);
+ KASSERT(inp->inp_socket != NULL,
+ ("tcp_usr_close: inp_socket == NULL"));
+
+ /*
+ * If we still have full TCP state, and we're not dropped, initiate
+ * a disconnect.
+ */
+ if (!(inp->inp_flags & INP_TIMEWAIT) &&
+ !(inp->inp_flags & INP_DROPPED)) {
+ tp = intotcpcb(inp);
+ TCPDEBUG1();
+ tcp_disconnect(tp);
+ TCPDEBUG2(PRU_CLOSE);
+ }
+ if (!(inp->inp_flags & INP_DROPPED)) {
+ SOCK_LOCK(so);
+ so->so_state |= SS_PROTOREF;
+ SOCK_UNLOCK(so);
+ inp->inp_flags |= INP_SOCKREF;
+ }
+ INP_WUNLOCK(inp);
+ INP_INFO_WUNLOCK(&V_tcbinfo);
+}
+
+/*
+ * Receive out-of-band data.
+ */
+static int
+tcp_usr_rcvoob(struct socket *so, struct mbuf *m, int flags)
+{
+ int error = 0;
+ struct inpcb *inp;
+ struct tcpcb *tp = NULL;
+
+ TCPDEBUG0;
+ inp = sotoinpcb(so);
+ KASSERT(inp != NULL, ("tcp_usr_rcvoob: inp == NULL"));
+ INP_WLOCK(inp);
+ if (inp->inp_flags & (INP_TIMEWAIT | INP_DROPPED)) {
+ error = ECONNRESET;
+ goto out;
+ }
+ tp = intotcpcb(inp);
+ TCPDEBUG1();
+ if ((so->so_oobmark == 0 &&
+ (so->so_rcv.sb_state & SBS_RCVATMARK) == 0) ||
+ so->so_options & SO_OOBINLINE ||
+ tp->t_oobflags & TCPOOB_HADDATA) {
+ error = EINVAL;
+ goto out;
+ }
+ if ((tp->t_oobflags & TCPOOB_HAVEDATA) == 0) {
+ error = EWOULDBLOCK;
+ goto out;
+ }
+ m->m_len = 1;
+ *mtod(m, caddr_t) = tp->t_iobc;
+ if ((flags & MSG_PEEK) == 0)
+ tp->t_oobflags ^= (TCPOOB_HAVEDATA | TCPOOB_HADDATA);
+
+out:
+ TCPDEBUG2(PRU_RCVOOB);
+ INP_WUNLOCK(inp);
+ return (error);
+}
+
+struct pr_usrreqs tcp_usrreqs = {
+ .pru_abort = tcp_usr_abort,
+ .pru_accept = tcp_usr_accept,
+ .pru_attach = tcp_usr_attach,
+ .pru_bind = tcp_usr_bind,
+ .pru_connect = tcp_usr_connect,
+ .pru_control = in_control,
+ .pru_detach = tcp_usr_detach,
+ .pru_disconnect = tcp_usr_disconnect,
+ .pru_listen = tcp_usr_listen,
+ .pru_peeraddr = in_getpeeraddr,
+ .pru_rcvd = tcp_usr_rcvd,
+ .pru_rcvoob = tcp_usr_rcvoob,
+ .pru_send = tcp_usr_send,
+ .pru_shutdown = tcp_usr_shutdown,
+ .pru_sockaddr = in_getsockaddr,
+#if 0
+ .pru_soreceive = soreceive_stream,
+#endif
+ .pru_sosetlabel = in_pcbsosetlabel,
+ .pru_close = tcp_usr_close,
+};
+
+#ifdef INET6
+struct pr_usrreqs tcp6_usrreqs = {
+ .pru_abort = tcp_usr_abort,
+ .pru_accept = tcp6_usr_accept,
+ .pru_attach = tcp_usr_attach,
+ .pru_bind = tcp6_usr_bind,
+ .pru_connect = tcp6_usr_connect,
+ .pru_control = in6_control,
+ .pru_detach = tcp_usr_detach,
+ .pru_disconnect = tcp_usr_disconnect,
+ .pru_listen = tcp6_usr_listen,
+ .pru_peeraddr = in6_mapped_peeraddr,
+ .pru_rcvd = tcp_usr_rcvd,
+ .pru_rcvoob = tcp_usr_rcvoob,
+ .pru_send = tcp_usr_send,
+ .pru_shutdown = tcp_usr_shutdown,
+ .pru_sockaddr = in6_mapped_sockaddr,
+#if 0
+ .pru_soreceive = soreceive_stream,
+#endif
+ .pru_sosetlabel = in_pcbsosetlabel,
+ .pru_close = tcp_usr_close,
+};
+#endif /* INET6 */
+
+/*
+ * Common subroutine to open a TCP connection to remote host specified
+ * by struct sockaddr_in in mbuf *nam. Call in_pcbbind to assign a local
+ * port number if needed. Call in_pcbconnect_setup to do the routing and
+ * to choose a local host address (interface). If there is an existing
+ * incarnation of the same connection in TIME-WAIT state and if the remote
+ * host was sending CC options and if the connection duration was < MSL, then
+ * truncate the previous TIME-WAIT state and proceed.
+ * Initialize connection parameters and enter SYN-SENT state.
+ */
+static int
+tcp_connect(struct tcpcb *tp, struct sockaddr *nam, struct thread *td)
+{
+ struct inpcb *inp = tp->t_inpcb, *oinp;
+ struct socket *so = inp->inp_socket;
+ struct in_addr laddr;
+ u_short lport;
+ int error;
+
+ INP_INFO_WLOCK_ASSERT(&V_tcbinfo);
+ INP_WLOCK_ASSERT(inp);
+
+ if (inp->inp_lport == 0) {
+ error = in_pcbbind(inp, (struct sockaddr *)0, td->td_ucred);
+ if (error)
+ return error;
+ }
+
+ /*
+ * Cannot simply call in_pcbconnect, because there might be an
+ * earlier incarnation of this same connection still in
+ * TIME_WAIT state, creating an ADDRINUSE error.
+ */
+ laddr = inp->inp_laddr;
+ lport = inp->inp_lport;
+ error = in_pcbconnect_setup(inp, nam, &laddr.s_addr, &lport,
+ &inp->inp_faddr.s_addr, &inp->inp_fport, &oinp, td->td_ucred);
+ if (error && oinp == NULL)
+ return error;
+ if (oinp)
+ return EADDRINUSE;
+ inp->inp_laddr = laddr;
+ in_pcbrehash(inp);
+
+ /*
+ * Compute window scaling to request:
+ * Scale to fit into sweet spot. See tcp_syncache.c.
+ * XXX: This should move to tcp_output().
+ */
+ while (tp->request_r_scale < TCP_MAX_WINSHIFT &&
+ (TCP_MAXWIN << tp->request_r_scale) < sb_max)
+ tp->request_r_scale++;
+
+ soisconnecting(so);
+ TCPSTAT_INC(tcps_connattempt);
+ tp->t_state = TCPS_SYN_SENT;
+ tcp_timer_activate(tp, TT_KEEP, tcp_keepinit);
+ tp->iss = tcp_new_isn(tp);
+ tp->t_bw_rtseq = tp->iss;
+ tcp_sendseqinit(tp);
+
+ return 0;
+}
+
+#ifdef INET6
+static int
+tcp6_connect(struct tcpcb *tp, struct sockaddr *nam, struct thread *td)
+{
+ struct inpcb *inp = tp->t_inpcb, *oinp;
+ struct socket *so = inp->inp_socket;
+ struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)nam;
+ struct in6_addr addr6;
+ int error;
+
+ INP_INFO_WLOCK_ASSERT(&V_tcbinfo);
+ INP_WLOCK_ASSERT(inp);
+
+ if (inp->inp_lport == 0) {
+ error = in6_pcbbind(inp, (struct sockaddr *)0, td->td_ucred);
+ if (error)
+ return error;
+ }
+
+ /*
+ * Cannot simply call in_pcbconnect, because there might be an
+ * earlier incarnation of this same connection still in
+ * TIME_WAIT state, creating an ADDRINUSE error.
+ * in6_pcbladdr() also handles scope zone IDs.
+ */
+ error = in6_pcbladdr(inp, nam, &addr6);
+ if (error)
+ return error;
+ oinp = in6_pcblookup_hash(inp->inp_pcbinfo,
+ &sin6->sin6_addr, sin6->sin6_port,
+ IN6_IS_ADDR_UNSPECIFIED(&inp->in6p_laddr)
+ ? &addr6
+ : &inp->in6p_laddr,
+ inp->inp_lport, 0, NULL);
+ if (oinp)
+ return EADDRINUSE;
+ if (IN6_IS_ADDR_UNSPECIFIED(&inp->in6p_laddr))
+ inp->in6p_laddr = addr6;
+ inp->in6p_faddr = sin6->sin6_addr;
+ inp->inp_fport = sin6->sin6_port;
+ /* update flowinfo - draft-itojun-ipv6-flowlabel-api-00 */
+ inp->inp_flow &= ~IPV6_FLOWLABEL_MASK;
+ if (inp->inp_flags & IN6P_AUTOFLOWLABEL)
+ inp->inp_flow |=
+ (htonl(ip6_randomflowlabel()) & IPV6_FLOWLABEL_MASK);
+ in_pcbrehash(inp);
+
+ /* Compute window scaling to request. */
+ while (tp->request_r_scale < TCP_MAX_WINSHIFT &&
+ (TCP_MAXWIN << tp->request_r_scale) < sb_max)
+ tp->request_r_scale++;
+
+ soisconnecting(so);
+ TCPSTAT_INC(tcps_connattempt);
+ tp->t_state = TCPS_SYN_SENT;
+ tcp_timer_activate(tp, TT_KEEP, tcp_keepinit);
+ tp->iss = tcp_new_isn(tp);
+ tp->t_bw_rtseq = tp->iss;
+ tcp_sendseqinit(tp);
+
+ return 0;
+}
+#endif /* INET6 */
+
+/*
+ * Export TCP internal state information via a struct tcp_info, based on the
+ * Linux 2.6 API. Not ABI compatible as our constants are mapped differently
+ * (TCP state machine, etc). We export all information using FreeBSD-native
+ * constants -- for example, the numeric values for tcpi_state will differ
+ * from Linux.
+ */
+static void
+tcp_fill_info(struct tcpcb *tp, struct tcp_info *ti)
+{
+
+ INP_WLOCK_ASSERT(tp->t_inpcb);
+ bzero(ti, sizeof(*ti));
+
+ ti->tcpi_state = tp->t_state;
+ if ((tp->t_flags & TF_REQ_TSTMP) && (tp->t_flags & TF_RCVD_TSTMP))
+ ti->tcpi_options |= TCPI_OPT_TIMESTAMPS;
+ if (tp->t_flags & TF_SACK_PERMIT)
+ ti->tcpi_options |= TCPI_OPT_SACK;
+ if ((tp->t_flags & TF_REQ_SCALE) && (tp->t_flags & TF_RCVD_SCALE)) {
+ ti->tcpi_options |= TCPI_OPT_WSCALE;
+ ti->tcpi_snd_wscale = tp->snd_scale;
+ ti->tcpi_rcv_wscale = tp->rcv_scale;
+ }
+
+ ti->tcpi_rto = tp->t_rxtcur * tick;
+ ti->tcpi_last_data_recv = (long)(ticks - (int)tp->t_rcvtime) * tick;
+ ti->tcpi_rtt = ((u_int64_t)tp->t_srtt * tick) >> TCP_RTT_SHIFT;
+ ti->tcpi_rttvar = ((u_int64_t)tp->t_rttvar * tick) >> TCP_RTTVAR_SHIFT;
+
+ ti->tcpi_snd_ssthresh = tp->snd_ssthresh;
+ ti->tcpi_snd_cwnd = tp->snd_cwnd;
+
+ /*
+ * FreeBSD-specific extension fields for tcp_info.
+ */
+ ti->tcpi_rcv_space = tp->rcv_wnd;
+ ti->tcpi_rcv_nxt = tp->rcv_nxt;
+ ti->tcpi_snd_wnd = tp->snd_wnd;
+ ti->tcpi_snd_bwnd = tp->snd_bwnd;
+ ti->tcpi_snd_nxt = tp->snd_nxt;
+ ti->tcpi_snd_mss = tp->t_maxseg;
+ ti->tcpi_rcv_mss = tp->t_maxseg;
+ if (tp->t_flags & TF_TOE)
+ ti->tcpi_options |= TCPI_OPT_TOE;
+}
+
+/*
+ * tcp_ctloutput() must drop the inpcb lock before performing copyin on
+ * socket option arguments. When it re-acquires the lock after the copy, it
+ * has to revalidate that the connection is still valid for the socket
+ * option.
+ */
+#define INP_WLOCK_RECHECK(inp) do { \
+ INP_WLOCK(inp); \
+ if (inp->inp_flags & (INP_TIMEWAIT | INP_DROPPED)) { \
+ INP_WUNLOCK(inp); \
+ return (ECONNRESET); \
+ } \
+ tp = intotcpcb(inp); \
+} while(0)
+
+int
+tcp_ctloutput(struct socket *so, struct sockopt *sopt)
+{
+ int error, opt, optval;
+ struct inpcb *inp;
+ struct tcpcb *tp;
+ struct tcp_info ti;
+
+ error = 0;
+ inp = sotoinpcb(so);
+ KASSERT(inp != NULL, ("tcp_ctloutput: inp == NULL"));
+ INP_WLOCK(inp);
+ if (sopt->sopt_level != IPPROTO_TCP) {
+#ifdef INET6
+ if (inp->inp_vflag & INP_IPV6PROTO) {
+ INP_WUNLOCK(inp);
+ error = ip6_ctloutput(so, sopt);
+ } else {
+#endif /* INET6 */
+ INP_WUNLOCK(inp);
+ error = ip_ctloutput(so, sopt);
+#ifdef INET6
+ }
+#endif
+ return (error);
+ }
+ if (inp->inp_flags & (INP_TIMEWAIT | INP_DROPPED)) {
+ INP_WUNLOCK(inp);
+ return (ECONNRESET);
+ }
+
+ switch (sopt->sopt_dir) {
+ case SOPT_SET:
+ switch (sopt->sopt_name) {
+#ifdef TCP_SIGNATURE
+ case TCP_MD5SIG:
+ INP_WUNLOCK(inp);
+ error = sooptcopyin(sopt, &optval, sizeof optval,
+ sizeof optval);
+ if (error)
+ return (error);
+
+ INP_WLOCK_RECHECK(inp);
+ if (optval > 0)
+ tp->t_flags |= TF_SIGNATURE;
+ else
+ tp->t_flags &= ~TF_SIGNATURE;
+ INP_WUNLOCK(inp);
+ break;
+#endif /* TCP_SIGNATURE */
+ case TCP_NODELAY:
+ case TCP_NOOPT:
+ INP_WUNLOCK(inp);
+ error = sooptcopyin(sopt, &optval, sizeof optval,
+ sizeof optval);
+ if (error)
+ return (error);
+
+ INP_WLOCK_RECHECK(inp);
+ switch (sopt->sopt_name) {
+ case TCP_NODELAY:
+ opt = TF_NODELAY;
+ break;
+ case TCP_NOOPT:
+ opt = TF_NOOPT;
+ break;
+ default:
+ opt = 0; /* dead code to fool gcc */
+ break;
+ }
+
+ if (optval)
+ tp->t_flags |= opt;
+ else
+ tp->t_flags &= ~opt;
+ INP_WUNLOCK(inp);
+ break;
+
+ case TCP_NOPUSH:
+ INP_WUNLOCK(inp);
+ error = sooptcopyin(sopt, &optval, sizeof optval,
+ sizeof optval);
+ if (error)
+ return (error);
+
+ INP_WLOCK_RECHECK(inp);
+ if (optval)
+ tp->t_flags |= TF_NOPUSH;
+ else if (tp->t_flags & TF_NOPUSH) {
+ tp->t_flags &= ~TF_NOPUSH;
+ if (TCPS_HAVEESTABLISHED(tp->t_state))
+ error = tcp_output(tp);
+ }
+ INP_WUNLOCK(inp);
+ break;
+
+ case TCP_MAXSEG:
+ INP_WUNLOCK(inp);
+ error = sooptcopyin(sopt, &optval, sizeof optval,
+ sizeof optval);
+ if (error)
+ return (error);
+
+ INP_WLOCK_RECHECK(inp);
+ if (optval > 0 && optval <= tp->t_maxseg &&
+ optval + 40 >= V_tcp_minmss)
+ tp->t_maxseg = optval;
+ else
+ error = EINVAL;
+ INP_WUNLOCK(inp);
+ break;
+
+ case TCP_INFO:
+ INP_WUNLOCK(inp);
+ error = EINVAL;
+ break;
+
+ default:
+ INP_WUNLOCK(inp);
+ error = ENOPROTOOPT;
+ break;
+ }
+ break;
+
+ case SOPT_GET:
+ tp = intotcpcb(inp);
+ switch (sopt->sopt_name) {
+#ifdef TCP_SIGNATURE
+ case TCP_MD5SIG:
+ optval = (tp->t_flags & TF_SIGNATURE) ? 1 : 0;
+ INP_WUNLOCK(inp);
+ error = sooptcopyout(sopt, &optval, sizeof optval);
+ break;
+#endif
+
+ case TCP_NODELAY:
+ optval = tp->t_flags & TF_NODELAY;
+ INP_WUNLOCK(inp);
+ error = sooptcopyout(sopt, &optval, sizeof optval);
+ break;
+ case TCP_MAXSEG:
+ optval = tp->t_maxseg;
+ INP_WUNLOCK(inp);
+ error = sooptcopyout(sopt, &optval, sizeof optval);
+ break;
+ case TCP_NOOPT:
+ optval = tp->t_flags & TF_NOOPT;
+ INP_WUNLOCK(inp);
+ error = sooptcopyout(sopt, &optval, sizeof optval);
+ break;
+ case TCP_NOPUSH:
+ optval = tp->t_flags & TF_NOPUSH;
+ INP_WUNLOCK(inp);
+ error = sooptcopyout(sopt, &optval, sizeof optval);
+ break;
+ case TCP_INFO:
+ tcp_fill_info(tp, &ti);
+ INP_WUNLOCK(inp);
+ error = sooptcopyout(sopt, &ti, sizeof ti);
+ break;
+ default:
+ INP_WUNLOCK(inp);
+ error = ENOPROTOOPT;
+ break;
+ }
+ break;
+ }
+ return (error);
+}
+#undef INP_WLOCK_RECHECK
+
+/*
+ * tcp_sendspace and tcp_recvspace are the default send and receive window
+ * sizes, respectively. These are obsolescent (this information should
+ * be set by the route).
+ */
+u_long tcp_sendspace = 1024*32;
+SYSCTL_ULONG(_net_inet_tcp, TCPCTL_SENDSPACE, sendspace, CTLFLAG_RW,
+ &tcp_sendspace , 0, "Maximum outgoing TCP datagram size");
+u_long tcp_recvspace = 1024*64;
+SYSCTL_ULONG(_net_inet_tcp, TCPCTL_RECVSPACE, recvspace, CTLFLAG_RW,
+ &tcp_recvspace , 0, "Maximum incoming TCP datagram size");
+
+/*
+ * Attach TCP protocol to socket, allocating
+ * internet protocol control block, tcp control block,
+ * bufer space, and entering LISTEN state if to accept connections.
+ */
+static int
+tcp_attach(struct socket *so)
+{
+ struct tcpcb *tp;
+ struct inpcb *inp;
+ int error;
+
+ if (so->so_snd.sb_hiwat == 0 || so->so_rcv.sb_hiwat == 0) {
+ error = soreserve(so, tcp_sendspace, tcp_recvspace);
+ if (error)
+ return (error);
+ }
+ so->so_rcv.sb_flags |= SB_AUTOSIZE;
+ so->so_snd.sb_flags |= SB_AUTOSIZE;
+ INP_INFO_WLOCK(&V_tcbinfo);
+ error = in_pcballoc(so, &V_tcbinfo);
+ if (error) {
+ INP_INFO_WUNLOCK(&V_tcbinfo);
+ return (error);
+ }
+ inp = sotoinpcb(so);
+#ifdef INET6
+ if (inp->inp_vflag & INP_IPV6PROTO) {
+ inp->inp_vflag |= INP_IPV6;
+ inp->in6p_hops = -1; /* use kernel default */
+ }
+ else
+#endif
+ inp->inp_vflag |= INP_IPV4;
+ tp = tcp_newtcpcb(inp);
+ if (tp == NULL) {
+ in_pcbdetach(inp);
+ in_pcbfree(inp);
+ INP_INFO_WUNLOCK(&V_tcbinfo);
+ return (ENOBUFS);
+ }
+ tp->t_state = TCPS_CLOSED;
+ INP_WUNLOCK(inp);
+ INP_INFO_WUNLOCK(&V_tcbinfo);
+ return (0);
+}
+
+/*
+ * Initiate (or continue) disconnect.
+ * If embryonic state, just send reset (once).
+ * If in ``let data drain'' option and linger null, just drop.
+ * Otherwise (hard), mark socket disconnecting and drop
+ * current input data; switch states based on user close, and
+ * send segment to peer (with FIN).
+ */
+static void
+tcp_disconnect(struct tcpcb *tp)
+{
+ struct inpcb *inp = tp->t_inpcb;
+ struct socket *so = inp->inp_socket;
+
+ INP_INFO_WLOCK_ASSERT(&V_tcbinfo);
+ INP_WLOCK_ASSERT(inp);
+
+ /*
+ * Neither tcp_close() nor tcp_drop() should return NULL, as the
+ * socket is still open.
+ */
+ if (tp->t_state < TCPS_ESTABLISHED) {
+ tp = tcp_close(tp);
+ KASSERT(tp != NULL,
+ ("tcp_disconnect: tcp_close() returned NULL"));
+ } else if ((so->so_options & SO_LINGER) && so->so_linger == 0) {
+ tp = tcp_drop(tp, 0);
+ KASSERT(tp != NULL,
+ ("tcp_disconnect: tcp_drop() returned NULL"));
+ } else {
+ soisdisconnecting(so);
+ sbflush(&so->so_rcv);
+ tcp_usrclosed(tp);
+ if (!(inp->inp_flags & INP_DROPPED))
+ tcp_output_disconnect(tp);
+ }
+}
+
+/*
+ * User issued close, and wish to trail through shutdown states:
+ * if never received SYN, just forget it. If got a SYN from peer,
+ * but haven't sent FIN, then go to FIN_WAIT_1 state to send peer a FIN.
+ * If already got a FIN from peer, then almost done; go to LAST_ACK
+ * state. In all other cases, have already sent FIN to peer (e.g.
+ * after PRU_SHUTDOWN), and just have to play tedious game waiting
+ * for peer to send FIN or not respond to keep-alives, etc.
+ * We can let the user exit from the close as soon as the FIN is acked.
+ */
+static void
+tcp_usrclosed(struct tcpcb *tp)
+{
+
+ INP_INFO_WLOCK_ASSERT(&V_tcbinfo);
+ INP_WLOCK_ASSERT(tp->t_inpcb);
+
+ switch (tp->t_state) {
+ case TCPS_LISTEN:
+ tcp_offload_listen_close(tp);
+ /* FALLTHROUGH */
+ case TCPS_CLOSED:
+ tp->t_state = TCPS_CLOSED;
+ tp = tcp_close(tp);
+ /*
+ * tcp_close() should never return NULL here as the socket is
+ * still open.
+ */
+ KASSERT(tp != NULL,
+ ("tcp_usrclosed: tcp_close() returned NULL"));
+ break;
+
+ case TCPS_SYN_SENT:
+ case TCPS_SYN_RECEIVED:
+ tp->t_flags |= TF_NEEDFIN;
+ break;
+
+ case TCPS_ESTABLISHED:
+ tp->t_state = TCPS_FIN_WAIT_1;
+ break;
+
+ case TCPS_CLOSE_WAIT:
+ tp->t_state = TCPS_LAST_ACK;
+ break;
+ }
+ if (tp->t_state >= TCPS_FIN_WAIT_2) {
+ soisdisconnected(tp->t_inpcb->inp_socket);
+ /* Prevent the connection hanging in FIN_WAIT_2 forever. */
+ if (tp->t_state == TCPS_FIN_WAIT_2) {
+ int timeout;
+
+ timeout = (tcp_fast_finwait2_recycle) ?
+ tcp_finwait2_timeout : tcp_maxidle;
+ tcp_timer_activate(tp, TT_2MSL, timeout);
+ }
+ }
+}
+
+#ifdef DDB
+static void
+db_print_indent(int indent)
+{
+ int i;
+
+ for (i = 0; i < indent; i++)
+ db_printf(" ");
+}
+
+static void
+db_print_tstate(int t_state)
+{
+
+ switch (t_state) {
+ case TCPS_CLOSED:
+ db_printf("TCPS_CLOSED");
+ return;
+
+ case TCPS_LISTEN:
+ db_printf("TCPS_LISTEN");
+ return;
+
+ case TCPS_SYN_SENT:
+ db_printf("TCPS_SYN_SENT");
+ return;
+
+ case TCPS_SYN_RECEIVED:
+ db_printf("TCPS_SYN_RECEIVED");
+ return;
+
+ case TCPS_ESTABLISHED:
+ db_printf("TCPS_ESTABLISHED");
+ return;
+
+ case TCPS_CLOSE_WAIT:
+ db_printf("TCPS_CLOSE_WAIT");
+ return;
+
+ case TCPS_FIN_WAIT_1:
+ db_printf("TCPS_FIN_WAIT_1");
+ return;
+
+ case TCPS_CLOSING:
+ db_printf("TCPS_CLOSING");
+ return;
+
+ case TCPS_LAST_ACK:
+ db_printf("TCPS_LAST_ACK");
+ return;
+
+ case TCPS_FIN_WAIT_2:
+ db_printf("TCPS_FIN_WAIT_2");
+ return;
+
+ case TCPS_TIME_WAIT:
+ db_printf("TCPS_TIME_WAIT");
+ return;
+
+ default:
+ db_printf("unknown");
+ return;
+ }
+}
+
+static void
+db_print_tflags(u_int t_flags)
+{
+ int comma;
+
+ comma = 0;
+ if (t_flags & TF_ACKNOW) {
+ db_printf("%sTF_ACKNOW", comma ? ", " : "");
+ comma = 1;
+ }
+ if (t_flags & TF_DELACK) {
+ db_printf("%sTF_DELACK", comma ? ", " : "");
+ comma = 1;
+ }
+ if (t_flags & TF_NODELAY) {
+ db_printf("%sTF_NODELAY", comma ? ", " : "");
+ comma = 1;
+ }
+ if (t_flags & TF_NOOPT) {
+ db_printf("%sTF_NOOPT", comma ? ", " : "");
+ comma = 1;
+ }
+ if (t_flags & TF_SENTFIN) {
+ db_printf("%sTF_SENTFIN", comma ? ", " : "");
+ comma = 1;
+ }
+ if (t_flags & TF_REQ_SCALE) {
+ db_printf("%sTF_REQ_SCALE", comma ? ", " : "");
+ comma = 1;
+ }
+ if (t_flags & TF_RCVD_SCALE) {
+ db_printf("%sTF_RECVD_SCALE", comma ? ", " : "");
+ comma = 1;
+ }
+ if (t_flags & TF_REQ_TSTMP) {
+ db_printf("%sTF_REQ_TSTMP", comma ? ", " : "");
+ comma = 1;
+ }
+ if (t_flags & TF_RCVD_TSTMP) {
+ db_printf("%sTF_RCVD_TSTMP", comma ? ", " : "");
+ comma = 1;
+ }
+ if (t_flags & TF_SACK_PERMIT) {
+ db_printf("%sTF_SACK_PERMIT", comma ? ", " : "");
+ comma = 1;
+ }
+ if (t_flags & TF_NEEDSYN) {
+ db_printf("%sTF_NEEDSYN", comma ? ", " : "");
+ comma = 1;
+ }
+ if (t_flags & TF_NEEDFIN) {
+ db_printf("%sTF_NEEDFIN", comma ? ", " : "");
+ comma = 1;
+ }
+ if (t_flags & TF_NOPUSH) {
+ db_printf("%sTF_NOPUSH", comma ? ", " : "");
+ comma = 1;
+ }
+ if (t_flags & TF_NOPUSH) {
+ db_printf("%sTF_NOPUSH", comma ? ", " : "");
+ comma = 1;
+ }
+ if (t_flags & TF_MORETOCOME) {
+ db_printf("%sTF_MORETOCOME", comma ? ", " : "");
+ comma = 1;
+ }
+ if (t_flags & TF_LQ_OVERFLOW) {
+ db_printf("%sTF_LQ_OVERFLOW", comma ? ", " : "");
+ comma = 1;
+ }
+ if (t_flags & TF_LASTIDLE) {
+ db_printf("%sTF_LASTIDLE", comma ? ", " : "");
+ comma = 1;
+ }
+ if (t_flags & TF_RXWIN0SENT) {
+ db_printf("%sTF_RXWIN0SENT", comma ? ", " : "");
+ comma = 1;
+ }
+ if (t_flags & TF_FASTRECOVERY) {
+ db_printf("%sTF_FASTRECOVERY", comma ? ", " : "");
+ comma = 1;
+ }
+ if (t_flags & TF_WASFRECOVERY) {
+ db_printf("%sTF_WASFRECOVERY", comma ? ", " : "");
+ comma = 1;
+ }
+ if (t_flags & TF_SIGNATURE) {
+ db_printf("%sTF_SIGNATURE", comma ? ", " : "");
+ comma = 1;
+ }
+ if (t_flags & TF_FORCEDATA) {
+ db_printf("%sTF_FORCEDATA", comma ? ", " : "");
+ comma = 1;
+ }
+ if (t_flags & TF_TSO) {
+ db_printf("%sTF_TSO", comma ? ", " : "");
+ comma = 1;
+ }
+ if (t_flags & TF_ECN_PERMIT) {
+ db_printf("%sTF_ECN_PERMIT", comma ? ", " : "");
+ comma = 1;
+ }
+}
+
+static void
+db_print_toobflags(char t_oobflags)
+{
+ int comma;
+
+ comma = 0;
+ if (t_oobflags & TCPOOB_HAVEDATA) {
+ db_printf("%sTCPOOB_HAVEDATA", comma ? ", " : "");
+ comma = 1;
+ }
+ if (t_oobflags & TCPOOB_HADDATA) {
+ db_printf("%sTCPOOB_HADDATA", comma ? ", " : "");
+ comma = 1;
+ }
+}
+
+static void
+db_print_tcpcb(struct tcpcb *tp, const char *name, int indent)
+{
+
+ db_print_indent(indent);
+ db_printf("%s at %p\n", name, tp);
+
+ indent += 2;
+
+ db_print_indent(indent);
+ db_printf("t_segq first: %p t_segqlen: %d t_dupacks: %d\n",
+ LIST_FIRST(&tp->t_segq), tp->t_segqlen, tp->t_dupacks);
+
+ db_print_indent(indent);
+ db_printf("tt_rexmt: %p tt_persist: %p tt_keep: %p\n",
+ &tp->t_timers->tt_rexmt, &tp->t_timers->tt_persist, &tp->t_timers->tt_keep);
+
+ db_print_indent(indent);
+ db_printf("tt_2msl: %p tt_delack: %p t_inpcb: %p\n", &tp->t_timers->tt_2msl,
+ &tp->t_timers->tt_delack, tp->t_inpcb);
+
+ db_print_indent(indent);
+ db_printf("t_state: %d (", tp->t_state);
+ db_print_tstate(tp->t_state);
+ db_printf(")\n");
+
+ db_print_indent(indent);
+ db_printf("t_flags: 0x%x (", tp->t_flags);
+ db_print_tflags(tp->t_flags);
+ db_printf(")\n");
+
+ db_print_indent(indent);
+ db_printf("snd_una: 0x%08x snd_max: 0x%08x snd_nxt: x0%08x\n",
+ tp->snd_una, tp->snd_max, tp->snd_nxt);
+
+ db_print_indent(indent);
+ db_printf("snd_up: 0x%08x snd_wl1: 0x%08x snd_wl2: 0x%08x\n",
+ tp->snd_up, tp->snd_wl1, tp->snd_wl2);
+
+ db_print_indent(indent);
+ db_printf("iss: 0x%08x irs: 0x%08x rcv_nxt: 0x%08x\n",
+ tp->iss, tp->irs, tp->rcv_nxt);
+
+ db_print_indent(indent);
+ db_printf("rcv_adv: 0x%08x rcv_wnd: %lu rcv_up: 0x%08x\n",
+ tp->rcv_adv, tp->rcv_wnd, tp->rcv_up);
+
+ db_print_indent(indent);
+ db_printf("snd_wnd: %lu snd_cwnd: %lu snd_bwnd: %lu\n",
+ tp->snd_wnd, tp->snd_cwnd, tp->snd_bwnd);
+
+ db_print_indent(indent);
+ db_printf("snd_ssthresh: %lu snd_bandwidth: %lu snd_recover: "
+ "0x%08x\n", tp->snd_ssthresh, tp->snd_bandwidth,
+ tp->snd_recover);
+
+ db_print_indent(indent);
+ db_printf("t_maxopd: %u t_rcvtime: %u t_startime: %u\n",
+ tp->t_maxopd, tp->t_rcvtime, tp->t_starttime);
+
+ db_print_indent(indent);
+ db_printf("t_rttime: %u t_rtsq: 0x%08x t_bw_rtttime: %u\n",
+ tp->t_rtttime, tp->t_rtseq, tp->t_bw_rtttime);
+
+ db_print_indent(indent);
+ db_printf("t_bw_rtseq: 0x%08x t_rxtcur: %d t_maxseg: %u "
+ "t_srtt: %d\n", tp->t_bw_rtseq, tp->t_rxtcur, tp->t_maxseg,
+ tp->t_srtt);
+
+ db_print_indent(indent);
+ db_printf("t_rttvar: %d t_rxtshift: %d t_rttmin: %u "
+ "t_rttbest: %u\n", tp->t_rttvar, tp->t_rxtshift, tp->t_rttmin,
+ tp->t_rttbest);
+
+ db_print_indent(indent);
+ db_printf("t_rttupdated: %lu max_sndwnd: %lu t_softerror: %d\n",
+ tp->t_rttupdated, tp->max_sndwnd, tp->t_softerror);
+
+ db_print_indent(indent);
+ db_printf("t_oobflags: 0x%x (", tp->t_oobflags);
+ db_print_toobflags(tp->t_oobflags);
+ db_printf(") t_iobc: 0x%02x\n", tp->t_iobc);
+
+ db_print_indent(indent);
+ db_printf("snd_scale: %u rcv_scale: %u request_r_scale: %u\n",
+ tp->snd_scale, tp->rcv_scale, tp->request_r_scale);
+
+ db_print_indent(indent);
+ db_printf("ts_recent: %u ts_recent_age: %u\n",
+ tp->ts_recent, tp->ts_recent_age);
+
+ db_print_indent(indent);
+ db_printf("ts_offset: %u last_ack_sent: 0x%08x snd_cwnd_prev: "
+ "%lu\n", tp->ts_offset, tp->last_ack_sent, tp->snd_cwnd_prev);
+
+ db_print_indent(indent);
+ db_printf("snd_ssthresh_prev: %lu snd_recover_prev: 0x%08x "
+ "t_badrxtwin: %u\n", tp->snd_ssthresh_prev,
+ tp->snd_recover_prev, tp->t_badrxtwin);
+
+ db_print_indent(indent);
+ db_printf("snd_numholes: %d snd_holes first: %p\n",
+ tp->snd_numholes, TAILQ_FIRST(&tp->snd_holes));
+
+ db_print_indent(indent);
+ db_printf("snd_fack: 0x%08x rcv_numsacks: %d sack_newdata: "
+ "0x%08x\n", tp->snd_fack, tp->rcv_numsacks, tp->sack_newdata);
+
+ /* Skip sackblks, sackhint. */
+
+ db_print_indent(indent);
+ db_printf("t_rttlow: %d rfbuf_ts: %u rfbuf_cnt: %d\n",
+ tp->t_rttlow, tp->rfbuf_ts, tp->rfbuf_cnt);
+}
+
+DB_SHOW_COMMAND(tcpcb, db_show_tcpcb)
+{
+ struct tcpcb *tp;
+
+ if (!have_addr) {
+ db_printf("usage: show tcpcb <addr>\n");
+ return;
+ }
+ tp = (struct tcpcb *)addr;
+
+ db_print_tcpcb(tp, "tcpcb", 0);
+}
+#endif
diff --git a/rtems/freebsd/netinet/tcp_var.h b/rtems/freebsd/netinet/tcp_var.h
new file mode 100644
index 00000000..cb8b8604
--- /dev/null
+++ b/rtems/freebsd/netinet/tcp_var.h
@@ -0,0 +1,687 @@
+/*-
+ * Copyright (c) 1982, 1986, 1993, 1994, 1995
+ * The Regents of the University of California. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * @(#)tcp_var.h 8.4 (Berkeley) 5/24/95
+ * $FreeBSD$
+ */
+
+#ifndef _NETINET_TCP_VAR_HH_
+#define _NETINET_TCP_VAR_HH_
+
+#include <rtems/freebsd/netinet/tcp.h>
+
+#ifdef _KERNEL
+#include <rtems/freebsd/net/vnet.h>
+
+/*
+ * Kernel variables for tcp.
+ */
+VNET_DECLARE(int, tcp_do_rfc1323);
+#define V_tcp_do_rfc1323 VNET(tcp_do_rfc1323)
+
+#endif /* _KERNEL */
+
+/* TCP segment queue entry */
+struct tseg_qent {
+ LIST_ENTRY(tseg_qent) tqe_q;
+ int tqe_len; /* TCP segment data length */
+ struct tcphdr *tqe_th; /* a pointer to tcp header */
+ struct mbuf *tqe_m; /* mbuf contains packet */
+};
+LIST_HEAD(tsegqe_head, tseg_qent);
+
+struct sackblk {
+ tcp_seq start; /* start seq no. of sack block */
+ tcp_seq end; /* end seq no. */
+};
+
+struct sackhole {
+ tcp_seq start; /* start seq no. of hole */
+ tcp_seq end; /* end seq no. */
+ tcp_seq rxmit; /* next seq. no in hole to be retransmitted */
+ TAILQ_ENTRY(sackhole) scblink; /* scoreboard linkage */
+};
+
+struct sackhint {
+ struct sackhole *nexthole;
+ int sack_bytes_rexmit;
+
+ int ispare; /* explicit pad for 64bit alignment */
+ uint64_t _pad[2]; /* 1 sacked_bytes, 1 TBD */
+};
+
+struct tcptemp {
+ u_char tt_ipgen[40]; /* the size must be of max ip header, now IPv6 */
+ struct tcphdr tt_t;
+};
+
+#define tcp6cb tcpcb /* for KAME src sync over BSD*'s */
+
+/* Neighbor Discovery, Neighbor Unreachability Detection Upper layer hint. */
+#ifdef INET6
+#define ND6_HINT(tp) \
+do { \
+ if ((tp) && (tp)->t_inpcb && \
+ ((tp)->t_inpcb->inp_vflag & INP_IPV6) != 0) \
+ nd6_nud_hint(NULL, NULL, 0); \
+} while (0)
+#else
+#define ND6_HINT(tp)
+#endif
+
+/*
+ * Tcp control block, one per tcp; fields:
+ * Organized for 16 byte cacheline efficiency.
+ */
+struct tcpcb {
+ struct tsegqe_head t_segq; /* segment reassembly queue */
+ void *t_pspare[2]; /* new reassembly queue */
+ int t_segqlen; /* segment reassembly queue length */
+ int t_dupacks; /* consecutive dup acks recd */
+
+ struct tcp_timer *t_timers; /* All the TCP timers in one struct */
+
+ struct inpcb *t_inpcb; /* back pointer to internet pcb */
+ int t_state; /* state of this connection */
+ u_int t_flags;
+
+ struct vnet *t_vnet; /* back pointer to parent vnet */
+
+ tcp_seq snd_una; /* send unacknowledged */
+ tcp_seq snd_max; /* highest sequence number sent;
+ * used to recognize retransmits
+ */
+ tcp_seq snd_nxt; /* send next */
+ tcp_seq snd_up; /* send urgent pointer */
+
+ tcp_seq snd_wl1; /* window update seg seq number */
+ tcp_seq snd_wl2; /* window update seg ack number */
+ tcp_seq iss; /* initial send sequence number */
+ tcp_seq irs; /* initial receive sequence number */
+
+ tcp_seq rcv_nxt; /* receive next */
+ tcp_seq rcv_adv; /* advertised window */
+ u_long rcv_wnd; /* receive window */
+ tcp_seq rcv_up; /* receive urgent pointer */
+
+ u_long snd_wnd; /* send window */
+ u_long snd_cwnd; /* congestion-controlled window */
+ u_long snd_bwnd; /* bandwidth-controlled window */
+ u_long snd_ssthresh; /* snd_cwnd size threshold for
+ * for slow start exponential to
+ * linear switch
+ */
+ u_long snd_bandwidth; /* calculated bandwidth or 0 */
+ tcp_seq snd_recover; /* for use in NewReno Fast Recovery */
+
+ u_int t_maxopd; /* mss plus options */
+
+ u_int t_rcvtime; /* inactivity time */
+ u_int t_starttime; /* time connection was established */
+ u_int t_rtttime; /* RTT measurement start time */
+ tcp_seq t_rtseq; /* sequence number being timed */
+
+ u_int t_bw_rtttime; /* used for bandwidth calculation */
+ tcp_seq t_bw_rtseq; /* used for bandwidth calculation */
+
+ int t_rxtcur; /* current retransmit value (ticks) */
+ u_int t_maxseg; /* maximum segment size */
+ int t_srtt; /* smoothed round-trip time */
+ int t_rttvar; /* variance in round-trip time */
+
+ int t_rxtshift; /* log(2) of rexmt exp. backoff */
+ u_int t_rttmin; /* minimum rtt allowed */
+ u_int t_rttbest; /* best rtt we've seen */
+ u_long t_rttupdated; /* number of times rtt sampled */
+ u_long max_sndwnd; /* largest window peer has offered */
+
+ int t_softerror; /* possible error not yet reported */
+/* out-of-band data */
+ char t_oobflags; /* have some */
+ char t_iobc; /* input character */
+/* RFC 1323 variables */
+ u_char snd_scale; /* window scaling for send window */
+ u_char rcv_scale; /* window scaling for recv window */
+ u_char request_r_scale; /* pending window scaling */
+ u_int32_t ts_recent; /* timestamp echo data */
+ u_int ts_recent_age; /* when last updated */
+ u_int32_t ts_offset; /* our timestamp offset */
+
+ tcp_seq last_ack_sent;
+/* experimental */
+ u_long snd_cwnd_prev; /* cwnd prior to retransmit */
+ u_long snd_ssthresh_prev; /* ssthresh prior to retransmit */
+ tcp_seq snd_recover_prev; /* snd_recover prior to retransmit */
+ u_int t_badrxtwin; /* window for retransmit recovery */
+ u_char snd_limited; /* segments limited transmitted */
+/* SACK related state */
+ int snd_numholes; /* number of holes seen by sender */
+ TAILQ_HEAD(sackhole_head, sackhole) snd_holes;
+ /* SACK scoreboard (sorted) */
+ tcp_seq snd_fack; /* last seq number(+1) sack'd by rcv'r*/
+ int rcv_numsacks; /* # distinct sack blks present */
+ struct sackblk sackblks[MAX_SACK_BLKS]; /* seq nos. of sack blocks */
+ tcp_seq sack_newdata; /* New data xmitted in this recovery
+ episode starts at this seq number */
+ struct sackhint sackhint; /* SACK scoreboard hint */
+ int t_rttlow; /* smallest observerved RTT */
+ u_int32_t rfbuf_ts; /* recv buffer autoscaling timestamp */
+ int rfbuf_cnt; /* recv buffer autoscaling byte count */
+ struct toe_usrreqs *t_tu; /* offload operations vector */
+ void *t_toe; /* TOE pcb pointer */
+ int t_bytes_acked; /* # bytes acked during current RTT */
+
+ int t_ispare; /* explicit pad for 64bit alignment */
+ void *t_pspare2[6]; /* 2 CC / 4 TBD */
+ uint64_t _pad[12]; /* 7 UTO, 5 TBD (1-2 CC/RTT?) */
+};
+
+/*
+ * Flags and utility macros for the t_flags field.
+ */
+#define TF_ACKNOW 0x000001 /* ack peer immediately */
+#define TF_DELACK 0x000002 /* ack, but try to delay it */
+#define TF_NODELAY 0x000004 /* don't delay packets to coalesce */
+#define TF_NOOPT 0x000008 /* don't use tcp options */
+#define TF_SENTFIN 0x000010 /* have sent FIN */
+#define TF_REQ_SCALE 0x000020 /* have/will request window scaling */
+#define TF_RCVD_SCALE 0x000040 /* other side has requested scaling */
+#define TF_REQ_TSTMP 0x000080 /* have/will request timestamps */
+#define TF_RCVD_TSTMP 0x000100 /* a timestamp was received in SYN */
+#define TF_SACK_PERMIT 0x000200 /* other side said I could SACK */
+#define TF_NEEDSYN 0x000400 /* send SYN (implicit state) */
+#define TF_NEEDFIN 0x000800 /* send FIN (implicit state) */
+#define TF_NOPUSH 0x001000 /* don't push */
+#define TF_MORETOCOME 0x010000 /* More data to be appended to sock */
+#define TF_LQ_OVERFLOW 0x020000 /* listen queue overflow */
+#define TF_LASTIDLE 0x040000 /* connection was previously idle */
+#define TF_RXWIN0SENT 0x080000 /* sent a receiver win 0 in response */
+#define TF_FASTRECOVERY 0x100000 /* in NewReno Fast Recovery */
+#define TF_WASFRECOVERY 0x200000 /* was in NewReno Fast Recovery */
+#define TF_SIGNATURE 0x400000 /* require MD5 digests (RFC2385) */
+#define TF_FORCEDATA 0x800000 /* force out a byte */
+#define TF_TSO 0x1000000 /* TSO enabled on this connection */
+#define TF_TOE 0x2000000 /* this connection is offloaded */
+#define TF_ECN_PERMIT 0x4000000 /* connection ECN-ready */
+#define TF_ECN_SND_CWR 0x8000000 /* ECN CWR in queue */
+#define TF_ECN_SND_ECE 0x10000000 /* ECN ECE in queue */
+
+#define IN_FASTRECOVERY(tp) (tp->t_flags & TF_FASTRECOVERY)
+#define ENTER_FASTRECOVERY(tp) tp->t_flags |= TF_FASTRECOVERY
+#define EXIT_FASTRECOVERY(tp) tp->t_flags &= ~TF_FASTRECOVERY
+
+/*
+ * Flags for the t_oobflags field.
+ */
+#define TCPOOB_HAVEDATA 0x01
+#define TCPOOB_HADDATA 0x02
+
+#ifdef TCP_SIGNATURE
+/*
+ * Defines which are needed by the xform_tcp module and tcp_[in|out]put
+ * for SADB verification and lookup.
+ */
+#define TCP_SIGLEN 16 /* length of computed digest in bytes */
+#define TCP_KEYLEN_MIN 1 /* minimum length of TCP-MD5 key */
+#define TCP_KEYLEN_MAX 80 /* maximum length of TCP-MD5 key */
+/*
+ * Only a single SA per host may be specified at this time. An SPI is
+ * needed in order for the KEY_ALLOCSA() lookup to work.
+ */
+#define TCP_SIG_SPI 0x1000
+#endif /* TCP_SIGNATURE */
+
+/*
+ * Structure to hold TCP options that are only used during segment
+ * processing (in tcp_input), but not held in the tcpcb.
+ * It's basically used to reduce the number of parameters
+ * to tcp_dooptions and tcp_addoptions.
+ * The binary order of the to_flags is relevant for packing of the
+ * options in tcp_addoptions.
+ */
+struct tcpopt {
+ u_int64_t to_flags; /* which options are present */
+#define TOF_MSS 0x0001 /* maximum segment size */
+#define TOF_SCALE 0x0002 /* window scaling */
+#define TOF_SACKPERM 0x0004 /* SACK permitted */
+#define TOF_TS 0x0010 /* timestamp */
+#define TOF_SIGNATURE 0x0040 /* TCP-MD5 signature option (RFC2385) */
+#define TOF_SACK 0x0080 /* Peer sent SACK option */
+#define TOF_MAXOPT 0x0100
+ u_int32_t to_tsval; /* new timestamp */
+ u_int32_t to_tsecr; /* reflected timestamp */
+ u_char *to_sacks; /* pointer to the first SACK blocks */
+ u_char *to_signature; /* pointer to the TCP-MD5 signature */
+ u_int16_t to_mss; /* maximum segment size */
+ u_int8_t to_wscale; /* window scaling */
+ u_int8_t to_nsacks; /* number of SACK blocks */
+};
+
+/*
+ * Flags for tcp_dooptions.
+ */
+#define TO_SYN 0x01 /* parse SYN-only options */
+
+struct hc_metrics_lite { /* must stay in sync with hc_metrics */
+ u_long rmx_mtu; /* MTU for this path */
+ u_long rmx_ssthresh; /* outbound gateway buffer limit */
+ u_long rmx_rtt; /* estimated round trip time */
+ u_long rmx_rttvar; /* estimated rtt variance */
+ u_long rmx_bandwidth; /* estimated bandwidth */
+ u_long rmx_cwnd; /* congestion window */
+ u_long rmx_sendpipe; /* outbound delay-bandwidth product */
+ u_long rmx_recvpipe; /* inbound delay-bandwidth product */
+};
+
+#ifndef _NETINET_IN_PCB_HH_
+struct in_conninfo;
+#endif /* _NETINET_IN_PCB_HH_ */
+
+struct tcptw {
+ struct inpcb *tw_inpcb; /* XXX back pointer to internet pcb */
+ tcp_seq snd_nxt;
+ tcp_seq rcv_nxt;
+ tcp_seq iss;
+ tcp_seq irs;
+ u_short last_win; /* cached window value */
+ u_short tw_so_options; /* copy of so_options */
+ struct ucred *tw_cred; /* user credentials */
+ u_int32_t t_recent;
+ u_int32_t ts_offset; /* our timestamp offset */
+ u_int t_starttime;
+ int tw_time;
+ TAILQ_ENTRY(tcptw) tw_2msl;
+};
+
+#define intotcpcb(ip) ((struct tcpcb *)(ip)->inp_ppcb)
+#define intotw(ip) ((struct tcptw *)(ip)->inp_ppcb)
+#define sototcpcb(so) (intotcpcb(sotoinpcb(so)))
+
+/*
+ * The smoothed round-trip time and estimated variance
+ * are stored as fixed point numbers scaled by the values below.
+ * For convenience, these scales are also used in smoothing the average
+ * (smoothed = (1/scale)sample + ((scale-1)/scale)smoothed).
+ * With these scales, srtt has 3 bits to the right of the binary point,
+ * and thus an "ALPHA" of 0.875. rttvar has 2 bits to the right of the
+ * binary point, and is smoothed with an ALPHA of 0.75.
+ */
+#define TCP_RTT_SCALE 32 /* multiplier for srtt; 3 bits frac. */
+#define TCP_RTT_SHIFT 5 /* shift for srtt; 3 bits frac. */
+#define TCP_RTTVAR_SCALE 16 /* multiplier for rttvar; 2 bits */
+#define TCP_RTTVAR_SHIFT 4 /* shift for rttvar; 2 bits */
+#define TCP_DELTA_SHIFT 2 /* see tcp_input.c */
+
+/*
+ * The initial retransmission should happen at rtt + 4 * rttvar.
+ * Because of the way we do the smoothing, srtt and rttvar
+ * will each average +1/2 tick of bias. When we compute
+ * the retransmit timer, we want 1/2 tick of rounding and
+ * 1 extra tick because of +-1/2 tick uncertainty in the
+ * firing of the timer. The bias will give us exactly the
+ * 1.5 tick we need. But, because the bias is
+ * statistical, we have to test that we don't drop below
+ * the minimum feasible timer (which is 2 ticks).
+ * This version of the macro adapted from a paper by Lawrence
+ * Brakmo and Larry Peterson which outlines a problem caused
+ * by insufficient precision in the original implementation,
+ * which results in inappropriately large RTO values for very
+ * fast networks.
+ */
+#define TCP_REXMTVAL(tp) \
+ max((tp)->t_rttmin, (((tp)->t_srtt >> (TCP_RTT_SHIFT - TCP_DELTA_SHIFT)) \
+ + (tp)->t_rttvar) >> TCP_DELTA_SHIFT)
+
+/*
+ * TCP statistics.
+ * Many of these should be kept per connection,
+ * but that's inconvenient at the moment.
+ */
+struct tcpstat {
+ u_long tcps_connattempt; /* connections initiated */
+ u_long tcps_accepts; /* connections accepted */
+ u_long tcps_connects; /* connections established */
+ u_long tcps_drops; /* connections dropped */
+ u_long tcps_conndrops; /* embryonic connections dropped */
+ u_long tcps_minmssdrops; /* average minmss too low drops */
+ u_long tcps_closed; /* conn. closed (includes drops) */
+ u_long tcps_segstimed; /* segs where we tried to get rtt */
+ u_long tcps_rttupdated; /* times we succeeded */
+ u_long tcps_delack; /* delayed acks sent */
+ u_long tcps_timeoutdrop; /* conn. dropped in rxmt timeout */
+ u_long tcps_rexmttimeo; /* retransmit timeouts */
+ u_long tcps_persisttimeo; /* persist timeouts */
+ u_long tcps_keeptimeo; /* keepalive timeouts */
+ u_long tcps_keepprobe; /* keepalive probes sent */
+ u_long tcps_keepdrops; /* connections dropped in keepalive */
+
+ u_long tcps_sndtotal; /* total packets sent */
+ u_long tcps_sndpack; /* data packets sent */
+ u_long tcps_sndbyte; /* data bytes sent */
+ u_long tcps_sndrexmitpack; /* data packets retransmitted */
+ u_long tcps_sndrexmitbyte; /* data bytes retransmitted */
+ u_long tcps_sndrexmitbad; /* unnecessary packet retransmissions */
+ u_long tcps_sndacks; /* ack-only packets sent */
+ u_long tcps_sndprobe; /* window probes sent */
+ u_long tcps_sndurg; /* packets sent with URG only */
+ u_long tcps_sndwinup; /* window update-only packets sent */
+ u_long tcps_sndctrl; /* control (SYN|FIN|RST) packets sent */
+
+ u_long tcps_rcvtotal; /* total packets received */
+ u_long tcps_rcvpack; /* packets received in sequence */
+ u_long tcps_rcvbyte; /* bytes received in sequence */
+ u_long tcps_rcvbadsum; /* packets received with ccksum errs */
+ u_long tcps_rcvbadoff; /* packets received with bad offset */
+ u_long tcps_rcvmemdrop; /* packets dropped for lack of memory */
+ u_long tcps_rcvshort; /* packets received too short */
+ u_long tcps_rcvduppack; /* duplicate-only packets received */
+ u_long tcps_rcvdupbyte; /* duplicate-only bytes received */
+ u_long tcps_rcvpartduppack; /* packets with some duplicate data */
+ u_long tcps_rcvpartdupbyte; /* dup. bytes in part-dup. packets */
+ u_long tcps_rcvoopack; /* out-of-order packets received */
+ u_long tcps_rcvoobyte; /* out-of-order bytes received */
+ u_long tcps_rcvpackafterwin; /* packets with data after window */
+ u_long tcps_rcvbyteafterwin; /* bytes rcvd after window */
+ u_long tcps_rcvafterclose; /* packets rcvd after "close" */
+ u_long tcps_rcvwinprobe; /* rcvd window probe packets */
+ u_long tcps_rcvdupack; /* rcvd duplicate acks */
+ u_long tcps_rcvacktoomuch; /* rcvd acks for unsent data */
+ u_long tcps_rcvackpack; /* rcvd ack packets */
+ u_long tcps_rcvackbyte; /* bytes acked by rcvd acks */
+ u_long tcps_rcvwinupd; /* rcvd window update packets */
+ u_long tcps_pawsdrop; /* segments dropped due to PAWS */
+ u_long tcps_predack; /* times hdr predict ok for acks */
+ u_long tcps_preddat; /* times hdr predict ok for data pkts */
+ u_long tcps_pcbcachemiss;
+ u_long tcps_cachedrtt; /* times cached RTT in route updated */
+ u_long tcps_cachedrttvar; /* times cached rttvar updated */
+ u_long tcps_cachedssthresh; /* times cached ssthresh updated */
+ u_long tcps_usedrtt; /* times RTT initialized from route */
+ u_long tcps_usedrttvar; /* times RTTVAR initialized from rt */
+ u_long tcps_usedssthresh; /* times ssthresh initialized from rt*/
+ u_long tcps_persistdrop; /* timeout in persist state */
+ u_long tcps_badsyn; /* bogus SYN, e.g. premature ACK */
+ u_long tcps_mturesent; /* resends due to MTU discovery */
+ u_long tcps_listendrop; /* listen queue overflows */
+ u_long tcps_badrst; /* ignored RSTs in the window */
+
+ u_long tcps_sc_added; /* entry added to syncache */
+ u_long tcps_sc_retransmitted; /* syncache entry was retransmitted */
+ u_long tcps_sc_dupsyn; /* duplicate SYN packet */
+ u_long tcps_sc_dropped; /* could not reply to packet */
+ u_long tcps_sc_completed; /* successful extraction of entry */
+ u_long tcps_sc_bucketoverflow; /* syncache per-bucket limit hit */
+ u_long tcps_sc_cacheoverflow; /* syncache cache limit hit */
+ u_long tcps_sc_reset; /* RST removed entry from syncache */
+ u_long tcps_sc_stale; /* timed out or listen socket gone */
+ u_long tcps_sc_aborted; /* syncache entry aborted */
+ u_long tcps_sc_badack; /* removed due to bad ACK */
+ u_long tcps_sc_unreach; /* ICMP unreachable received */
+ u_long tcps_sc_zonefail; /* zalloc() failed */
+ u_long tcps_sc_sendcookie; /* SYN cookie sent */
+ u_long tcps_sc_recvcookie; /* SYN cookie received */
+
+ u_long tcps_hc_added; /* entry added to hostcache */
+ u_long tcps_hc_bucketoverflow; /* hostcache per bucket limit hit */
+
+ u_long tcps_finwait2_drops; /* Drop FIN_WAIT_2 connection after time limit */
+
+ /* SACK related stats */
+ u_long tcps_sack_recovery_episode; /* SACK recovery episodes */
+ u_long tcps_sack_rexmits; /* SACK rexmit segments */
+ u_long tcps_sack_rexmit_bytes; /* SACK rexmit bytes */
+ u_long tcps_sack_rcv_blocks; /* SACK blocks (options) received */
+ u_long tcps_sack_send_blocks; /* SACK blocks (options) sent */
+ u_long tcps_sack_sboverflow; /* times scoreboard overflowed */
+
+ /* ECN related stats */
+ u_long tcps_ecn_ce; /* ECN Congestion Experienced */
+ u_long tcps_ecn_ect0; /* ECN Capable Transport */
+ u_long tcps_ecn_ect1; /* ECN Capable Transport */
+ u_long tcps_ecn_shs; /* ECN successful handshakes */
+ u_long tcps_ecn_rcwnd; /* # times ECN reduced the cwnd */
+
+ u_long _pad[12]; /* 6 UTO, 6 TBD */
+};
+
+#ifdef _KERNEL
+/*
+ * In-kernel consumers can use these accessor macros directly to update
+ * stats.
+ */
+#define TCPSTAT_ADD(name, val) V_tcpstat.name += (val)
+#define TCPSTAT_INC(name) TCPSTAT_ADD(name, 1)
+
+/*
+ * Kernel module consumers must use this accessor macro.
+ */
+void kmod_tcpstat_inc(int statnum);
+#define KMOD_TCPSTAT_INC(name) \
+ kmod_tcpstat_inc(offsetof(struct tcpstat, name) / sizeof(u_long))
+#endif
+
+/*
+ * TCB structure exported to user-land via sysctl(3).
+ * Evil hack: declare only if in_pcb.h and sys/socketvar.h have been
+ * included. Not all of our clients do.
+ */
+#if defined(_NETINET_IN_PCB_HH_) && defined(_SYS_SOCKETVAR_HH_)
+struct xtcpcb {
+ size_t xt_len;
+ struct inpcb xt_inp;
+ struct tcpcb xt_tp;
+ struct xsocket xt_socket;
+ u_quad_t xt_alignment_hack;
+};
+#endif
+
+/*
+ * Names for TCP sysctl objects
+ */
+#define TCPCTL_DO_RFC1323 1 /* use RFC-1323 extensions */
+#define TCPCTL_MSSDFLT 3 /* MSS default */
+#define TCPCTL_STATS 4 /* statistics (read-only) */
+#define TCPCTL_RTTDFLT 5 /* default RTT estimate */
+#define TCPCTL_KEEPIDLE 6 /* keepalive idle timer */
+#define TCPCTL_KEEPINTVL 7 /* interval to send keepalives */
+#define TCPCTL_SENDSPACE 8 /* send buffer space */
+#define TCPCTL_RECVSPACE 9 /* receive buffer space */
+#define TCPCTL_KEEPINIT 10 /* timeout for establishing syn */
+#define TCPCTL_PCBLIST 11 /* list of all outstanding PCBs */
+#define TCPCTL_DELACKTIME 12 /* time before sending delayed ACK */
+#define TCPCTL_V6MSSDFLT 13 /* MSS default for IPv6 */
+#define TCPCTL_SACK 14 /* Selective Acknowledgement,rfc 2018 */
+#define TCPCTL_DROP 15 /* drop tcp connection */
+#define TCPCTL_MAXID 16
+#define TCPCTL_FINWAIT2_TIMEOUT 17
+
+#define TCPCTL_NAMES { \
+ { 0, 0 }, \
+ { "rfc1323", CTLTYPE_INT }, \
+ { "mssdflt", CTLTYPE_INT }, \
+ { "stats", CTLTYPE_STRUCT }, \
+ { "rttdflt", CTLTYPE_INT }, \
+ { "keepidle", CTLTYPE_INT }, \
+ { "keepintvl", CTLTYPE_INT }, \
+ { "sendspace", CTLTYPE_INT }, \
+ { "recvspace", CTLTYPE_INT }, \
+ { "keepinit", CTLTYPE_INT }, \
+ { "pcblist", CTLTYPE_STRUCT }, \
+ { "delacktime", CTLTYPE_INT }, \
+ { "v6mssdflt", CTLTYPE_INT }, \
+ { "maxid", CTLTYPE_INT }, \
+}
+
+
+#ifdef _KERNEL
+#ifdef SYSCTL_DECL
+SYSCTL_DECL(_net_inet_tcp);
+SYSCTL_DECL(_net_inet_tcp_sack);
+MALLOC_DECLARE(M_TCPLOG);
+#endif
+
+VNET_DECLARE(struct inpcbhead, tcb); /* queue of active tcpcb's */
+VNET_DECLARE(struct inpcbinfo, tcbinfo);
+VNET_DECLARE(struct tcpstat, tcpstat); /* tcp statistics */
+extern int tcp_log_in_vain;
+VNET_DECLARE(int, tcp_mssdflt); /* XXX */
+VNET_DECLARE(int, tcp_minmss);
+VNET_DECLARE(int, tcp_delack_enabled);
+VNET_DECLARE(int, tcp_do_rfc3390);
+VNET_DECLARE(int, tcp_do_newreno);
+VNET_DECLARE(int, path_mtu_discovery);
+VNET_DECLARE(int, ss_fltsz);
+VNET_DECLARE(int, ss_fltsz_local);
+#define V_tcb VNET(tcb)
+#define V_tcbinfo VNET(tcbinfo)
+#define V_tcpstat VNET(tcpstat)
+#define V_tcp_mssdflt VNET(tcp_mssdflt)
+#define V_tcp_minmss VNET(tcp_minmss)
+#define V_tcp_delack_enabled VNET(tcp_delack_enabled)
+#define V_tcp_do_rfc3390 VNET(tcp_do_rfc3390)
+#define V_tcp_do_newreno VNET(tcp_do_newreno)
+#define V_path_mtu_discovery VNET(path_mtu_discovery)
+#define V_ss_fltsz VNET(ss_fltsz)
+#define V_ss_fltsz_local VNET(ss_fltsz_local)
+
+VNET_DECLARE(int, tcp_do_sack); /* SACK enabled/disabled */
+VNET_DECLARE(int, tcp_sc_rst_sock_fail); /* RST on sock alloc failure */
+#define V_tcp_do_sack VNET(tcp_do_sack)
+#define V_tcp_sc_rst_sock_fail VNET(tcp_sc_rst_sock_fail)
+
+VNET_DECLARE(int, tcp_do_ecn); /* TCP ECN enabled/disabled */
+VNET_DECLARE(int, tcp_ecn_maxretries);
+#define V_tcp_do_ecn VNET(tcp_do_ecn)
+#define V_tcp_ecn_maxretries VNET(tcp_ecn_maxretries)
+
+int tcp_addoptions(struct tcpopt *, u_char *);
+struct tcpcb *
+ tcp_close(struct tcpcb *);
+void tcp_discardcb(struct tcpcb *);
+void tcp_twstart(struct tcpcb *);
+#if 0
+int tcp_twrecycleable(struct tcptw *tw);
+#endif
+void tcp_twclose(struct tcptw *_tw, int _reuse);
+void tcp_ctlinput(int, struct sockaddr *, void *);
+int tcp_ctloutput(struct socket *, struct sockopt *);
+#ifndef __rtems__
+struct tcpcb *
+ tcp_drop(struct tcpcb *, int);
+#else
+struct tcpcb *
+tcp_drop(struct tcpcb *tp, int errno);
+#endif
+void tcp_drain(void);
+void tcp_init(void);
+#ifdef VIMAGE
+void tcp_destroy(void);
+#endif
+void tcp_fini(void *);
+char *tcp_log_addrs(struct in_conninfo *, struct tcphdr *, void *,
+ const void *);
+char *tcp_log_vain(struct in_conninfo *, struct tcphdr *, void *,
+ const void *);
+int tcp_reass(struct tcpcb *, struct tcphdr *, int *, struct mbuf *);
+void tcp_reass_init(void);
+void tcp_reass_flush(struct tcpcb *);
+#ifdef VIMAGE
+void tcp_reass_destroy(void);
+#endif
+void tcp_input(struct mbuf *, int);
+u_long tcp_maxmtu(struct in_conninfo *, int *);
+u_long tcp_maxmtu6(struct in_conninfo *, int *);
+void tcp_mss_update(struct tcpcb *, int, struct hc_metrics_lite *, int *);
+void tcp_mss(struct tcpcb *, int);
+int tcp_mssopt(struct in_conninfo *);
+#ifndef __rtems__
+struct inpcb *
+ tcp_drop_syn_sent(struct inpcb *, int);
+struct inpcb *
+ tcp_mtudisc(struct inpcb *, int);
+#else
+struct inpcb *
+tcp_drop_syn_sent(struct inpcb *inp, int errno);
+struct inpcb *
+tcp_mtudisc(struct inpcb *inp, int errno);
+#endif
+struct tcpcb *
+ tcp_newtcpcb(struct inpcb *);
+int tcp_output(struct tcpcb *);
+void tcp_respond(struct tcpcb *, void *,
+ struct tcphdr *, struct mbuf *, tcp_seq, tcp_seq, int);
+void tcp_tw_init(void);
+#ifdef VIMAGE
+void tcp_tw_destroy(void);
+#endif
+void tcp_tw_zone_change(void);
+int tcp_twcheck(struct inpcb *, struct tcpopt *, struct tcphdr *,
+ struct mbuf *, int);
+int tcp_twrespond(struct tcptw *, int);
+void tcp_setpersist(struct tcpcb *);
+#ifdef TCP_SIGNATURE
+int tcp_signature_compute(struct mbuf *, int, int, int, u_char *, u_int);
+#endif
+void tcp_slowtimo(void);
+struct tcptemp *
+ tcpip_maketemplate(struct inpcb *);
+void tcpip_fillheaders(struct inpcb *, void *, void *);
+void tcp_timer_activate(struct tcpcb *, int, u_int);
+int tcp_timer_active(struct tcpcb *, int);
+void tcp_trace(short, short, struct tcpcb *, void *, struct tcphdr *, int);
+void tcp_xmit_bandwidth_limit(struct tcpcb *tp, tcp_seq ack_seq);
+/*
+ * All tcp_hc_* functions are IPv4 and IPv6 (via in_conninfo)
+ */
+void tcp_hc_init(void);
+#ifdef VIMAGE
+void tcp_hc_destroy(void);
+#endif
+void tcp_hc_get(struct in_conninfo *, struct hc_metrics_lite *);
+u_long tcp_hc_getmtu(struct in_conninfo *);
+void tcp_hc_updatemtu(struct in_conninfo *, u_long);
+void tcp_hc_update(struct in_conninfo *, struct hc_metrics_lite *);
+
+extern struct pr_usrreqs tcp_usrreqs;
+extern u_long tcp_sendspace;
+extern u_long tcp_recvspace;
+tcp_seq tcp_new_isn(struct tcpcb *);
+
+void tcp_sack_doack(struct tcpcb *, struct tcpopt *, tcp_seq);
+void tcp_update_sack_list(struct tcpcb *tp, tcp_seq rcv_laststart, tcp_seq rcv_lastend);
+void tcp_clean_sackreport(struct tcpcb *tp);
+void tcp_sack_adjust(struct tcpcb *tp);
+struct sackhole *tcp_sack_output(struct tcpcb *tp, int *sack_bytes_rexmt);
+void tcp_sack_partialack(struct tcpcb *, struct tcphdr *);
+void tcp_free_sackholes(struct tcpcb *tp);
+int tcp_newreno(struct tcpcb *, struct tcphdr *);
+u_long tcp_seq_subtract(u_long, u_long );
+
+#endif /* _KERNEL */
+
+#endif /* _NETINET_TCP_VAR_HH_ */
diff --git a/rtems/freebsd/netinet/tcpip.h b/rtems/freebsd/netinet/tcpip.h
new file mode 100644
index 00000000..337c07a6
--- /dev/null
+++ b/rtems/freebsd/netinet/tcpip.h
@@ -0,0 +1,59 @@
+/*-
+ * Copyright (c) 1982, 1986, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * @(#)tcpip.h 8.1 (Berkeley) 6/10/93
+ * $FreeBSD$
+ */
+
+#ifndef _NETINET_TCPIP_HH_
+#define _NETINET_TCPIP_HH_
+
+/*
+ * Tcp+ip header, after ip options removed.
+ */
+struct tcpiphdr {
+ struct ipovly ti_i; /* overlaid ip structure */
+ struct tcphdr ti_t; /* tcp header */
+};
+#define ti_x1 ti_i.ih_x1
+#define ti_pr ti_i.ih_pr
+#define ti_len ti_i.ih_len
+#define ti_src ti_i.ih_src
+#define ti_dst ti_i.ih_dst
+#define ti_sport ti_t.th_sport
+#define ti_dport ti_t.th_dport
+#define ti_seq ti_t.th_seq
+#define ti_ack ti_t.th_ack
+#define ti_x2 ti_t.th_x2
+#define ti_off ti_t.th_off
+#define ti_flags ti_t.th_flags
+#define ti_win ti_t.th_win
+#define ti_sum ti_t.th_sum
+#define ti_urp ti_t.th_urp
+
+#endif
diff --git a/rtems/freebsd/netinet/toedev.h b/rtems/freebsd/netinet/toedev.h
new file mode 100644
index 00000000..4623845c
--- /dev/null
+++ b/rtems/freebsd/netinet/toedev.h
@@ -0,0 +1,162 @@
+/*-
+ * Copyright (c) 2007, Chelsio Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Neither the name of the Chelsio Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _NETINET_TOEDEV_HH_
+#define _NETINET_TOEDEV_HH_
+
+#ifndef _KERNEL
+#error "no user-serviceable parts inside"
+#endif
+
+extern uint32_t toedev_registration_count;
+
+/* Parameter values for offload_get_phys_egress(). */
+enum {
+ TOE_OPEN,
+ TOE_FAILOVER,
+};
+
+/* Parameter values for toe_failover(). */
+enum {
+ TOE_ACTIVE_SLAVE,
+ TOE_LINK_DOWN,
+ TOE_LINK_UP,
+ TOE_RELEASE,
+ TOE_RELEASE_ALL,
+};
+
+#define TOENAMSIZ 16
+
+/* Get the toedev associated with a ifnet. */
+#define TOEDEV(ifp) ((ifp)->if_llsoftc)
+
+struct offload_id {
+ unsigned int id;
+ unsigned long data;
+};
+
+struct ifnet;
+struct rt_entry;
+struct tom_info;
+struct sysctl_oid;
+struct socket;
+struct mbuf;
+
+struct toedev {
+ TAILQ_ENTRY(toedev) entry;
+ char tod_name[TOENAMSIZ]; /* TOE device name */
+ unsigned int tod_ttid; /* TOE type id */
+ unsigned long tod_flags; /* device flags */
+ unsigned int tod_mtu; /* max TX offloaded data */
+ unsigned int tod_nconn; /* max # of offloaded
+ * connections
+ */
+ struct ifnet *tod_lldev; /* first interface */
+ const struct tom_info *tod_offload_mod; /* TCP offload module */
+
+ /*
+ * This TOE device is capable of offloading the connection for socket so
+ */
+ int (*tod_can_offload)(struct toedev *dev, struct socket *so);
+
+ /*
+ * Establish a connection to nam using the TOE device dev
+ */
+ int (*tod_connect)(struct toedev *dev, struct socket *so,
+ struct rtentry *rt, struct sockaddr *nam);
+ /*
+ * Send an mbuf down to the toe device
+ */
+ int (*tod_send)(struct toedev *dev, struct mbuf *m);
+ /*
+ * Receive an array of mbufs from the TOE device dev
+ */
+ int (*tod_recv)(struct toedev *dev, struct mbuf **m, int n);
+ /*
+ * Device specific ioctl interface
+ */
+ int (*tod_ctl)(struct toedev *dev, unsigned int req, void *data);
+ /*
+ * Update L2 entry in toedev
+ */
+ void (*tod_arp_update)(struct toedev *dev, struct rtentry *neigh);
+ /*
+ * Failover from one toe device to another
+ */
+ void (*tod_failover)(struct toedev *dev, struct ifnet *bond_ifp,
+ struct ifnet *ndev, int event);
+ void *tod_priv; /* driver private data */
+ void *tod_l2opt; /* optional layer 2 data */
+ void *tod_l3opt; /* optional layer 3 data */
+ void *tod_l4opt; /* optional layer 4 data */
+ void *tod_ulp; /* upper lever protocol */
+};
+
+struct tom_info {
+ TAILQ_ENTRY(tom_info) entry;
+ int (*ti_attach)(struct toedev *dev,
+ const struct offload_id *entry);
+ int (*ti_detach)(struct toedev *dev);
+ const char *ti_name;
+ const struct offload_id *ti_id_table;
+};
+
+static __inline void
+init_offload_dev(struct toedev *dev)
+{
+}
+
+int register_tom(struct tom_info *t);
+int unregister_tom(struct tom_info *t);
+int register_toedev(struct toedev *dev, const char *name);
+int unregister_toedev(struct toedev *dev);
+int activate_offload(struct toedev *dev);
+int toe_send(struct toedev *dev, struct mbuf *m);
+void toe_arp_update(struct rtentry *rt);
+struct ifnet *offload_get_phys_egress(struct ifnet *ifp,
+ struct socket *so, int context);
+int toe_receive_mbuf(struct toedev *dev, struct mbuf **m, int n);
+
+static __inline void
+toe_neigh_update(struct ifnet *ifp)
+{
+}
+
+static __inline void
+toe_failover(struct ifnet *bond_ifp, struct ifnet *fail_ifp, int event)
+{
+}
+
+static __inline int
+toe_enslave(struct ifnet *bond_ifp, struct ifnet *slave_ifp)
+{
+ return (0);
+}
+
+#endif /* _NETINET_TOEDEV_HH_ */
diff --git a/rtems/freebsd/netinet/udp.h b/rtems/freebsd/netinet/udp.h
new file mode 100644
index 00000000..66f153d9
--- /dev/null
+++ b/rtems/freebsd/netinet/udp.h
@@ -0,0 +1,67 @@
+/*-
+ * Copyright (c) 1982, 1986, 1993
+ * The Regents of the University of California.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * @(#)udp.h 8.1 (Berkeley) 6/10/93
+ * $FreeBSD$
+ */
+
+#ifndef _NETINET_UDP_HH_
+#define _NETINET_UDP_HH_
+
+/*
+ * UDP protocol header.
+ * Per RFC 768, September, 1981.
+ */
+struct udphdr {
+ u_short uh_sport; /* source port */
+ u_short uh_dport; /* destination port */
+ u_short uh_ulen; /* udp length */
+ u_short uh_sum; /* udp checksum */
+};
+
+/*
+ * User-settable options (used with setsockopt).
+ */
+#define UDP_ENCAP 0x01
+
+
+/*
+ * UDP Encapsulation of IPsec Packets options.
+ */
+/* Encapsulation types. */
+#define UDP_ENCAP_ESPINUDP_NON_IKE 1 /* draft-ietf-ipsec-nat-t-ike-00/01 */
+#define UDP_ENCAP_ESPINUDP 2 /* draft-ietf-ipsec-udp-encaps-02+ */
+
+/* Default ESP in UDP encapsulation port. */
+#define UDP_ENCAP_ESPINUDP_PORT 500
+
+/* Maximum UDP fragment size for ESP over UDP. */
+#define UDP_ENCAP_ESPINUDP_MAXFRAGLEN 552
+
+#endif
diff --git a/rtems/freebsd/netinet/udp_usrreq.c b/rtems/freebsd/netinet/udp_usrreq.c
new file mode 100644
index 00000000..342ed61f
--- /dev/null
+++ b/rtems/freebsd/netinet/udp_usrreq.c
@@ -0,0 +1,1633 @@
+#include <rtems/freebsd/machine/rtems-bsd-config.h>
+
+/*-
+ * Copyright (c) 1982, 1986, 1988, 1990, 1993, 1995
+ * The Regents of the University of California.
+ * Copyright (c) 2008 Robert N. M. Watson
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * @(#)udp_usrreq.c 8.6 (Berkeley) 5/23/95
+ */
+
+#include <rtems/freebsd/sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <rtems/freebsd/local/opt_ipfw.h>
+#include <rtems/freebsd/local/opt_inet6.h>
+#include <rtems/freebsd/local/opt_ipsec.h>
+
+#include <rtems/freebsd/sys/param.h>
+#include <rtems/freebsd/sys/domain.h>
+#include <rtems/freebsd/sys/eventhandler.h>
+#include <rtems/freebsd/sys/jail.h>
+#include <rtems/freebsd/sys/kernel.h>
+#include <rtems/freebsd/sys/lock.h>
+#include <rtems/freebsd/sys/malloc.h>
+#include <rtems/freebsd/sys/mbuf.h>
+#include <rtems/freebsd/sys/priv.h>
+#include <rtems/freebsd/sys/proc.h>
+#include <rtems/freebsd/sys/protosw.h>
+#include <rtems/freebsd/sys/signalvar.h>
+#include <rtems/freebsd/sys/socket.h>
+#include <rtems/freebsd/sys/socketvar.h>
+#include <rtems/freebsd/sys/sx.h>
+#include <rtems/freebsd/sys/sysctl.h>
+#include <rtems/freebsd/sys/syslog.h>
+#include <rtems/freebsd/sys/systm.h>
+
+#include <rtems/freebsd/vm/uma.h>
+
+#include <rtems/freebsd/net/if.h>
+#include <rtems/freebsd/net/route.h>
+
+#include <rtems/freebsd/netinet/in.h>
+#include <rtems/freebsd/netinet/in_pcb.h>
+#include <rtems/freebsd/netinet/in_systm.h>
+#include <rtems/freebsd/netinet/in_var.h>
+#include <rtems/freebsd/netinet/ip.h>
+#ifdef INET6
+#include <rtems/freebsd/netinet/ip6.h>
+#endif
+#include <rtems/freebsd/netinet/ip_icmp.h>
+#include <rtems/freebsd/netinet/icmp_var.h>
+#include <rtems/freebsd/netinet/ip_var.h>
+#include <rtems/freebsd/netinet/ip_options.h>
+#ifdef INET6
+#include <rtems/freebsd/netinet6/ip6_var.h>
+#endif
+#include <rtems/freebsd/netinet/udp.h>
+#include <rtems/freebsd/netinet/udp_var.h>
+
+#ifdef IPSEC
+#include <rtems/freebsd/netipsec/ipsec.h>
+#include <rtems/freebsd/netipsec/esp.h>
+#endif
+
+#include <rtems/freebsd/machine/in_cksum.h>
+
+#include <rtems/freebsd/security/mac/mac_framework.h>
+
+/*
+ * UDP protocol implementation.
+ * Per RFC 768, August, 1980.
+ */
+
+/*
+ * BSD 4.2 defaulted the udp checksum to be off. Turning off udp checksums
+ * removes the only data integrity mechanism for packets and malformed
+ * packets that would otherwise be discarded due to bad checksums, and may
+ * cause problems (especially for NFS data blocks).
+ */
+static int udp_cksum = 1;
+SYSCTL_INT(_net_inet_udp, UDPCTL_CHECKSUM, checksum, CTLFLAG_RW, &udp_cksum,
+ 0, "compute udp checksum");
+
+int udp_log_in_vain = 0;
+SYSCTL_INT(_net_inet_udp, OID_AUTO, log_in_vain, CTLFLAG_RW,
+ &udp_log_in_vain, 0, "Log all incoming UDP packets");
+
+VNET_DEFINE(int, udp_blackhole) = 0;
+SYSCTL_VNET_INT(_net_inet_udp, OID_AUTO, blackhole, CTLFLAG_RW,
+ &VNET_NAME(udp_blackhole), 0,
+ "Do not send port unreachables for refused connects");
+
+u_long udp_sendspace = 9216; /* really max datagram size */
+ /* 40 1K datagrams */
+SYSCTL_ULONG(_net_inet_udp, UDPCTL_MAXDGRAM, maxdgram, CTLFLAG_RW,
+ &udp_sendspace, 0, "Maximum outgoing UDP datagram size");
+
+u_long udp_recvspace = 40 * (1024 +
+#ifdef INET6
+ sizeof(struct sockaddr_in6)
+#else
+ sizeof(struct sockaddr_in)
+#endif
+ );
+
+SYSCTL_ULONG(_net_inet_udp, UDPCTL_RECVSPACE, recvspace, CTLFLAG_RW,
+ &udp_recvspace, 0, "Maximum space for incoming UDP datagrams");
+
+VNET_DEFINE(struct inpcbhead, udb); /* from udp_var.h */
+VNET_DEFINE(struct inpcbinfo, udbinfo);
+static VNET_DEFINE(uma_zone_t, udpcb_zone);
+#define V_udpcb_zone VNET(udpcb_zone)
+
+#ifndef UDBHASHSIZE
+#define UDBHASHSIZE 128
+#endif
+
+VNET_DEFINE(struct udpstat, udpstat); /* from udp_var.h */
+SYSCTL_VNET_STRUCT(_net_inet_udp, UDPCTL_STATS, stats, CTLFLAG_RW,
+ &VNET_NAME(udpstat), udpstat,
+ "UDP statistics (struct udpstat, netinet/udp_var.h)");
+
+static void udp_detach(struct socket *so);
+static int udp_output(struct inpcb *, struct mbuf *, struct sockaddr *,
+ struct mbuf *, struct thread *);
+#ifdef IPSEC
+#ifdef IPSEC_NAT_T
+#define UF_ESPINUDP_ALL (UF_ESPINUDP_NON_IKE|UF_ESPINUDP)
+#ifdef INET
+static struct mbuf *udp4_espdecap(struct inpcb *, struct mbuf *, int);
+#endif
+#endif /* IPSEC_NAT_T */
+#endif /* IPSEC */
+
+static void
+udp_zone_change(void *tag)
+{
+
+ uma_zone_set_max(V_udbinfo.ipi_zone, maxsockets);
+ uma_zone_set_max(V_udpcb_zone, maxsockets);
+}
+
+static int
+udp_inpcb_init(void *mem, int size, int flags)
+{
+ struct inpcb *inp;
+
+ inp = mem;
+ INP_LOCK_INIT(inp, "inp", "udpinp");
+ return (0);
+}
+
+void
+udp_init(void)
+{
+
+
+ INP_INFO_LOCK_INIT(&V_udbinfo, "udp");
+ LIST_INIT(&V_udb);
+#ifdef VIMAGE
+ V_udbinfo.ipi_vnet = curvnet;
+#endif
+ V_udbinfo.ipi_listhead = &V_udb;
+ V_udbinfo.ipi_hashbase = hashinit(UDBHASHSIZE, M_PCB,
+ &V_udbinfo.ipi_hashmask);
+ V_udbinfo.ipi_porthashbase = hashinit(UDBHASHSIZE, M_PCB,
+ &V_udbinfo.ipi_porthashmask);
+ V_udbinfo.ipi_zone = uma_zcreate("udp_inpcb", sizeof(struct inpcb),
+ NULL, NULL, udp_inpcb_init, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE);
+ uma_zone_set_max(V_udbinfo.ipi_zone, maxsockets);
+
+ V_udpcb_zone = uma_zcreate("udpcb", sizeof(struct udpcb),
+ NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE);
+ uma_zone_set_max(V_udpcb_zone, maxsockets);
+
+ EVENTHANDLER_REGISTER(maxsockets_change, udp_zone_change, NULL,
+ EVENTHANDLER_PRI_ANY);
+}
+
+/*
+ * Kernel module interface for updating udpstat. The argument is an index
+ * into udpstat treated as an array of u_long. While this encodes the
+ * general layout of udpstat into the caller, it doesn't encode its location,
+ * so that future changes to add, for example, per-CPU stats support won't
+ * cause binary compatibility problems for kernel modules.
+ */
+void
+kmod_udpstat_inc(int statnum)
+{
+
+ (*((u_long *)&V_udpstat + statnum))++;
+}
+
+int
+udp_newudpcb(struct inpcb *inp)
+{
+ struct udpcb *up;
+
+ up = uma_zalloc(V_udpcb_zone, M_NOWAIT | M_ZERO);
+ if (up == NULL)
+ return (ENOBUFS);
+ inp->inp_ppcb = up;
+ return (0);
+}
+
+void
+udp_discardcb(struct udpcb *up)
+{
+
+ uma_zfree(V_udpcb_zone, up);
+}
+
+#ifdef VIMAGE
+void
+udp_destroy(void)
+{
+
+ hashdestroy(V_udbinfo.ipi_hashbase, M_PCB,
+ V_udbinfo.ipi_hashmask);
+ hashdestroy(V_udbinfo.ipi_porthashbase, M_PCB,
+ V_udbinfo.ipi_porthashmask);
+
+ uma_zdestroy(V_udpcb_zone);
+ uma_zdestroy(V_udbinfo.ipi_zone);
+ INP_INFO_LOCK_DESTROY(&V_udbinfo);
+}
+#endif
+
+/*
+ * Subroutine of udp_input(), which appends the provided mbuf chain to the
+ * passed pcb/socket. The caller must provide a sockaddr_in via udp_in that
+ * contains the source address. If the socket ends up being an IPv6 socket,
+ * udp_append() will convert to a sockaddr_in6 before passing the address
+ * into the socket code.
+ */
+static void
+udp_append(struct inpcb *inp, struct ip *ip, struct mbuf *n, int off,
+ struct sockaddr_in *udp_in)
+{
+ struct sockaddr *append_sa;
+ struct socket *so;
+ struct mbuf *opts = 0;
+#ifdef INET6
+ struct sockaddr_in6 udp_in6;
+#endif
+#ifdef IPSEC
+#ifdef IPSEC_NAT_T
+#ifdef INET
+ struct udpcb *up;
+#endif
+#endif
+#endif
+
+ INP_RLOCK_ASSERT(inp);
+
+#ifdef IPSEC
+ /* Check AH/ESP integrity. */
+ if (ipsec4_in_reject(n, inp)) {
+ m_freem(n);
+ V_ipsec4stat.in_polvio++;
+ return;
+ }
+#ifdef IPSEC_NAT_T
+#ifdef INET
+ up = intoudpcb(inp);
+ KASSERT(up != NULL, ("%s: udpcb NULL", __func__));
+ if (up->u_flags & UF_ESPINUDP_ALL) { /* IPSec UDP encaps. */
+ n = udp4_espdecap(inp, n, off);
+ if (n == NULL) /* Consumed. */
+ return;
+ }
+#endif /* INET */
+#endif /* IPSEC_NAT_T */
+#endif /* IPSEC */
+#ifdef MAC
+ if (mac_inpcb_check_deliver(inp, n) != 0) {
+ m_freem(n);
+ return;
+ }
+#endif
+ if (inp->inp_flags & INP_CONTROLOPTS ||
+ inp->inp_socket->so_options & (SO_TIMESTAMP | SO_BINTIME)) {
+#ifdef INET6
+ if (inp->inp_vflag & INP_IPV6)
+ (void)ip6_savecontrol_v4(inp, n, &opts, NULL);
+ else
+#endif
+ ip_savecontrol(inp, &opts, ip, n);
+ }
+#ifdef INET6
+ if (inp->inp_vflag & INP_IPV6) {
+ bzero(&udp_in6, sizeof(udp_in6));
+ udp_in6.sin6_len = sizeof(udp_in6);
+ udp_in6.sin6_family = AF_INET6;
+ in6_sin_2_v4mapsin6(udp_in, &udp_in6);
+ append_sa = (struct sockaddr *)&udp_in6;
+ } else
+#endif
+ append_sa = (struct sockaddr *)udp_in;
+ m_adj(n, off);
+
+ so = inp->inp_socket;
+ SOCKBUF_LOCK(&so->so_rcv);
+ if (sbappendaddr_locked(&so->so_rcv, append_sa, n, opts) == 0) {
+ SOCKBUF_UNLOCK(&so->so_rcv);
+ m_freem(n);
+ if (opts)
+ m_freem(opts);
+ UDPSTAT_INC(udps_fullsock);
+ } else
+ sorwakeup_locked(so);
+}
+
+void
+udp_input(struct mbuf *m, int off)
+{
+ int iphlen = off;
+ struct ip *ip;
+ struct udphdr *uh;
+ struct ifnet *ifp;
+ struct inpcb *inp;
+ struct udpcb *up;
+ int len;
+ struct ip save_ip;
+ struct sockaddr_in udp_in;
+#ifdef IPFIREWALL_FORWARD
+ struct m_tag *fwd_tag;
+#endif
+
+ ifp = m->m_pkthdr.rcvif;
+ UDPSTAT_INC(udps_ipackets);
+
+ /*
+ * Strip IP options, if any; should skip this, make available to
+ * user, and use on returned packets, but we don't yet have a way to
+ * check the checksum with options still present.
+ */
+ if (iphlen > sizeof (struct ip)) {
+ ip_stripoptions(m, (struct mbuf *)0);
+ iphlen = sizeof(struct ip);
+ }
+
+ /*
+ * Get IP and UDP header together in first mbuf.
+ */
+ ip = mtod(m, struct ip *);
+ if (m->m_len < iphlen + sizeof(struct udphdr)) {
+ if ((m = m_pullup(m, iphlen + sizeof(struct udphdr))) == 0) {
+ UDPSTAT_INC(udps_hdrops);
+ return;
+ }
+ ip = mtod(m, struct ip *);
+ }
+ uh = (struct udphdr *)((caddr_t)ip + iphlen);
+
+ /*
+ * Destination port of 0 is illegal, based on RFC768.
+ */
+ if (uh->uh_dport == 0)
+ goto badunlocked;
+
+ /*
+ * Construct sockaddr format source address. Stuff source address
+ * and datagram in user buffer.
+ */
+ bzero(&udp_in, sizeof(udp_in));
+ udp_in.sin_len = sizeof(udp_in);
+ udp_in.sin_family = AF_INET;
+ udp_in.sin_port = uh->uh_sport;
+ udp_in.sin_addr = ip->ip_src;
+
+ /*
+ * Make mbuf data length reflect UDP length. If not enough data to
+ * reflect UDP length, drop.
+ */
+ len = ntohs((u_short)uh->uh_ulen);
+ if (ip->ip_len != len) {
+ if (len > ip->ip_len || len < sizeof(struct udphdr)) {
+ UDPSTAT_INC(udps_badlen);
+ goto badunlocked;
+ }
+ m_adj(m, len - ip->ip_len);
+ /* ip->ip_len = len; */
+ }
+
+ /*
+ * Save a copy of the IP header in case we want restore it for
+ * sending an ICMP error message in response.
+ */
+ if (!V_udp_blackhole)
+ save_ip = *ip;
+ else
+ memset(&save_ip, 0, sizeof(save_ip));
+
+ /*
+ * Checksum extended UDP header and data.
+ */
+ if (uh->uh_sum) {
+ u_short uh_sum;
+
+ if (m->m_pkthdr.csum_flags & CSUM_DATA_VALID) {
+ if (m->m_pkthdr.csum_flags & CSUM_PSEUDO_HDR)
+ uh_sum = m->m_pkthdr.csum_data;
+ else
+ uh_sum = in_pseudo(ip->ip_src.s_addr,
+ ip->ip_dst.s_addr, htonl((u_short)len +
+ m->m_pkthdr.csum_data + IPPROTO_UDP));
+ uh_sum ^= 0xffff;
+ } else {
+ char b[9];
+
+ bcopy(((struct ipovly *)ip)->ih_x1, b, 9);
+ bzero(((struct ipovly *)ip)->ih_x1, 9);
+ ((struct ipovly *)ip)->ih_len = uh->uh_ulen;
+ uh_sum = in_cksum(m, len + sizeof (struct ip));
+ bcopy(b, ((struct ipovly *)ip)->ih_x1, 9);
+ }
+ if (uh_sum) {
+ UDPSTAT_INC(udps_badsum);
+ m_freem(m);
+ return;
+ }
+ } else
+ UDPSTAT_INC(udps_nosum);
+
+#ifdef IPFIREWALL_FORWARD
+ /*
+ * Grab info from PACKET_TAG_IPFORWARD tag prepended to the chain.
+ */
+ fwd_tag = m_tag_find(m, PACKET_TAG_IPFORWARD, NULL);
+ if (fwd_tag != NULL) {
+ struct sockaddr_in *next_hop;
+
+ /*
+ * Do the hack.
+ */
+ next_hop = (struct sockaddr_in *)(fwd_tag + 1);
+ ip->ip_dst = next_hop->sin_addr;
+ uh->uh_dport = ntohs(next_hop->sin_port);
+
+ /*
+ * Remove the tag from the packet. We don't need it anymore.
+ */
+ m_tag_delete(m, fwd_tag);
+ }
+#endif
+
+ INP_INFO_RLOCK(&V_udbinfo);
+ if (IN_MULTICAST(ntohl(ip->ip_dst.s_addr)) ||
+ in_broadcast(ip->ip_dst, ifp)) {
+ struct inpcb *last;
+ struct ip_moptions *imo;
+
+ last = NULL;
+ LIST_FOREACH(inp, &V_udb, inp_list) {
+ if (inp->inp_lport != uh->uh_dport)
+ continue;
+#ifdef INET6
+ if ((inp->inp_vflag & INP_IPV4) == 0)
+ continue;
+#endif
+ if (inp->inp_laddr.s_addr != INADDR_ANY &&
+ inp->inp_laddr.s_addr != ip->ip_dst.s_addr)
+ continue;
+ if (inp->inp_faddr.s_addr != INADDR_ANY &&
+ inp->inp_faddr.s_addr != ip->ip_src.s_addr)
+ continue;
+ if (inp->inp_fport != 0 &&
+ inp->inp_fport != uh->uh_sport)
+ continue;
+
+ INP_RLOCK(inp);
+
+ /*
+ * Handle socket delivery policy for any-source
+ * and source-specific multicast. [RFC3678]
+ */
+ imo = inp->inp_moptions;
+ if (IN_MULTICAST(ntohl(ip->ip_dst.s_addr)) &&
+ imo != NULL) {
+ struct sockaddr_in group;
+ int blocked;
+
+ bzero(&group, sizeof(struct sockaddr_in));
+ group.sin_len = sizeof(struct sockaddr_in);
+ group.sin_family = AF_INET;
+ group.sin_addr = ip->ip_dst;
+
+ blocked = imo_multi_filter(imo, ifp,
+ (struct sockaddr *)&group,
+ (struct sockaddr *)&udp_in);
+ if (blocked != MCAST_PASS) {
+ if (blocked == MCAST_NOTGMEMBER)
+ IPSTAT_INC(ips_notmember);
+ if (blocked == MCAST_NOTSMEMBER ||
+ blocked == MCAST_MUTED)
+ UDPSTAT_INC(udps_filtermcast);
+ INP_RUNLOCK(inp);
+ continue;
+ }
+ }
+ if (last != NULL) {
+ struct mbuf *n;
+
+ n = m_copy(m, 0, M_COPYALL);
+ up = intoudpcb(last);
+ if (up->u_tun_func == NULL) {
+ if (n != NULL)
+ udp_append(last,
+ ip, n,
+ iphlen +
+ sizeof(struct udphdr),
+ &udp_in);
+ } else {
+ /*
+ * Engage the tunneling protocol we
+ * will have to leave the info_lock
+ * up, since we are hunting through
+ * multiple UDP's.
+ */
+
+ (*up->u_tun_func)(n, iphlen, last);
+ }
+ INP_RUNLOCK(last);
+ }
+ last = inp;
+ /*
+ * Don't look for additional matches if this one does
+ * not have either the SO_REUSEPORT or SO_REUSEADDR
+ * socket options set. This heuristic avoids
+ * searching through all pcbs in the common case of a
+ * non-shared port. It assumes that an application
+ * will never clear these options after setting them.
+ */
+ if ((last->inp_socket->so_options &
+ (SO_REUSEPORT|SO_REUSEADDR)) == 0)
+ break;
+ }
+
+ if (last == NULL) {
+ /*
+ * No matching pcb found; discard datagram. (No need
+ * to send an ICMP Port Unreachable for a broadcast
+ * or multicast datgram.)
+ */
+ UDPSTAT_INC(udps_noportbcast);
+ goto badheadlocked;
+ }
+ up = intoudpcb(last);
+ if (up->u_tun_func == NULL) {
+ udp_append(last, ip, m, iphlen + sizeof(struct udphdr),
+ &udp_in);
+ } else {
+ /*
+ * Engage the tunneling protocol.
+ */
+ (*up->u_tun_func)(m, iphlen, last);
+ }
+ INP_RUNLOCK(last);
+ INP_INFO_RUNLOCK(&V_udbinfo);
+ return;
+ }
+
+ /*
+ * Locate pcb for datagram.
+ */
+ inp = in_pcblookup_hash(&V_udbinfo, ip->ip_src, uh->uh_sport,
+ ip->ip_dst, uh->uh_dport, 1, ifp);
+ if (inp == NULL) {
+ if (udp_log_in_vain) {
+ char buf[4*sizeof "123"];
+
+ strcpy(buf, inet_ntoa(ip->ip_dst));
+ log(LOG_INFO,
+ "Connection attempt to UDP %s:%d from %s:%d\n",
+ buf, ntohs(uh->uh_dport), inet_ntoa(ip->ip_src),
+ ntohs(uh->uh_sport));
+ }
+ UDPSTAT_INC(udps_noport);
+ if (m->m_flags & (M_BCAST | M_MCAST)) {
+ UDPSTAT_INC(udps_noportbcast);
+ goto badheadlocked;
+ }
+ if (V_udp_blackhole)
+ goto badheadlocked;
+ if (badport_bandlim(BANDLIM_ICMP_UNREACH) < 0)
+ goto badheadlocked;
+ *ip = save_ip;
+ ip->ip_len += iphlen;
+ icmp_error(m, ICMP_UNREACH, ICMP_UNREACH_PORT, 0, 0);
+ INP_INFO_RUNLOCK(&V_udbinfo);
+ return;
+ }
+
+ /*
+ * Check the minimum TTL for socket.
+ */
+ INP_RLOCK(inp);
+ INP_INFO_RUNLOCK(&V_udbinfo);
+ if (inp->inp_ip_minttl && inp->inp_ip_minttl > ip->ip_ttl) {
+ INP_RUNLOCK(inp);
+ goto badunlocked;
+ }
+ up = intoudpcb(inp);
+ if (up->u_tun_func == NULL) {
+ udp_append(inp, ip, m, iphlen + sizeof(struct udphdr), &udp_in);
+ } else {
+ /*
+ * Engage the tunneling protocol.
+ */
+
+ (*up->u_tun_func)(m, iphlen, inp);
+ }
+ INP_RUNLOCK(inp);
+ return;
+
+badheadlocked:
+ if (inp)
+ INP_RUNLOCK(inp);
+ INP_INFO_RUNLOCK(&V_udbinfo);
+badunlocked:
+ m_freem(m);
+}
+
+/*
+ * Notify a udp user of an asynchronous error; just wake up so that they can
+ * collect error status.
+ */
+struct inpcb *
+udp_notify(struct inpcb *inp, int errno)
+{
+
+ /*
+ * While udp_ctlinput() always calls udp_notify() with a read lock
+ * when invoking it directly, in_pcbnotifyall() currently uses write
+ * locks due to sharing code with TCP. For now, accept either a read
+ * or a write lock, but a read lock is sufficient.
+ */
+ INP_LOCK_ASSERT(inp);
+
+ inp->inp_socket->so_error = errno;
+ sorwakeup(inp->inp_socket);
+ sowwakeup(inp->inp_socket);
+ return (inp);
+}
+
+void
+udp_ctlinput(int cmd, struct sockaddr *sa, void *vip)
+{
+ struct ip *ip = vip;
+ struct udphdr *uh;
+ struct in_addr faddr;
+ struct inpcb *inp;
+
+ faddr = ((struct sockaddr_in *)sa)->sin_addr;
+ if (sa->sa_family != AF_INET || faddr.s_addr == INADDR_ANY)
+ return;
+
+ /*
+ * Redirects don't need to be handled up here.
+ */
+ if (PRC_IS_REDIRECT(cmd))
+ return;
+
+ /*
+ * Hostdead is ugly because it goes linearly through all PCBs.
+ *
+ * XXX: We never get this from ICMP, otherwise it makes an excellent
+ * DoS attack on machines with many connections.
+ */
+ if (cmd == PRC_HOSTDEAD)
+ ip = NULL;
+ else if ((unsigned)cmd >= PRC_NCMDS || inetctlerrmap[cmd] == 0)
+ return;
+ if (ip != NULL) {
+ uh = (struct udphdr *)((caddr_t)ip + (ip->ip_hl << 2));
+ INP_INFO_RLOCK(&V_udbinfo);
+ inp = in_pcblookup_hash(&V_udbinfo, faddr, uh->uh_dport,
+ ip->ip_src, uh->uh_sport, 0, NULL);
+ if (inp != NULL) {
+ INP_RLOCK(inp);
+ if (inp->inp_socket != NULL) {
+ udp_notify(inp, inetctlerrmap[cmd]);
+ }
+ INP_RUNLOCK(inp);
+ }
+ INP_INFO_RUNLOCK(&V_udbinfo);
+ } else
+ in_pcbnotifyall(&V_udbinfo, faddr, inetctlerrmap[cmd],
+ udp_notify);
+}
+
+static int
+udp_pcblist(SYSCTL_HANDLER_ARGS)
+{
+ int error, i, n;
+ struct inpcb *inp, **inp_list;
+ inp_gen_t gencnt;
+ struct xinpgen xig;
+
+ /*
+ * The process of preparing the PCB list is too time-consuming and
+ * resource-intensive to repeat twice on every request.
+ */
+ if (req->oldptr == 0) {
+ n = V_udbinfo.ipi_count;
+ n += imax(n / 8, 10);
+ req->oldidx = 2 * (sizeof xig) + n * sizeof(struct xinpcb);
+ return (0);
+ }
+
+ if (req->newptr != 0)
+ return (EPERM);
+
+ /*
+ * OK, now we're committed to doing something.
+ */
+ INP_INFO_RLOCK(&V_udbinfo);
+ gencnt = V_udbinfo.ipi_gencnt;
+ n = V_udbinfo.ipi_count;
+ INP_INFO_RUNLOCK(&V_udbinfo);
+
+ error = sysctl_wire_old_buffer(req, 2 * (sizeof xig)
+ + n * sizeof(struct xinpcb));
+ if (error != 0)
+ return (error);
+
+ xig.xig_len = sizeof xig;
+ xig.xig_count = n;
+ xig.xig_gen = gencnt;
+ xig.xig_sogen = so_gencnt;
+ error = SYSCTL_OUT(req, &xig, sizeof xig);
+ if (error)
+ return (error);
+
+ inp_list = malloc(n * sizeof *inp_list, M_TEMP, M_WAITOK);
+ if (inp_list == 0)
+ return (ENOMEM);
+
+ INP_INFO_RLOCK(&V_udbinfo);
+ for (inp = LIST_FIRST(V_udbinfo.ipi_listhead), i = 0; inp && i < n;
+ inp = LIST_NEXT(inp, inp_list)) {
+ INP_WLOCK(inp);
+ if (inp->inp_gencnt <= gencnt &&
+ cr_canseeinpcb(req->td->td_ucred, inp) == 0) {
+ in_pcbref(inp);
+ inp_list[i++] = inp;
+ }
+ INP_WUNLOCK(inp);
+ }
+ INP_INFO_RUNLOCK(&V_udbinfo);
+ n = i;
+
+ error = 0;
+ for (i = 0; i < n; i++) {
+ inp = inp_list[i];
+ INP_RLOCK(inp);
+ if (inp->inp_gencnt <= gencnt) {
+ struct xinpcb xi;
+
+ bzero(&xi, sizeof(xi));
+ xi.xi_len = sizeof xi;
+ /* XXX should avoid extra copy */
+ bcopy(inp, &xi.xi_inp, sizeof *inp);
+ if (inp->inp_socket)
+ sotoxsocket(inp->inp_socket, &xi.xi_socket);
+ xi.xi_inp.inp_gencnt = inp->inp_gencnt;
+ INP_RUNLOCK(inp);
+ error = SYSCTL_OUT(req, &xi, sizeof xi);
+ } else
+ INP_RUNLOCK(inp);
+ }
+ INP_INFO_WLOCK(&V_udbinfo);
+ for (i = 0; i < n; i++) {
+ inp = inp_list[i];
+ INP_WLOCK(inp);
+ if (!in_pcbrele(inp))
+ INP_WUNLOCK(inp);
+ }
+ INP_INFO_WUNLOCK(&V_udbinfo);
+
+ if (!error) {
+ /*
+ * Give the user an updated idea of our state. If the
+ * generation differs from what we told her before, she knows
+ * that something happened while we were processing this
+ * request, and it might be necessary to retry.
+ */
+ INP_INFO_RLOCK(&V_udbinfo);
+ xig.xig_gen = V_udbinfo.ipi_gencnt;
+ xig.xig_sogen = so_gencnt;
+ xig.xig_count = V_udbinfo.ipi_count;
+ INP_INFO_RUNLOCK(&V_udbinfo);
+ error = SYSCTL_OUT(req, &xig, sizeof xig);
+ }
+ free(inp_list, M_TEMP);
+ return (error);
+}
+
+SYSCTL_PROC(_net_inet_udp, UDPCTL_PCBLIST, pcblist, CTLFLAG_RD, 0, 0,
+ udp_pcblist, "S,xinpcb", "List of active UDP sockets");
+
+static int
+udp_getcred(SYSCTL_HANDLER_ARGS)
+{
+ struct xucred xuc;
+ struct sockaddr_in addrs[2];
+ struct inpcb *inp;
+ int error;
+
+ error = priv_check(req->td, PRIV_NETINET_GETCRED);
+ if (error)
+ return (error);
+ error = SYSCTL_IN(req, addrs, sizeof(addrs));
+ if (error)
+ return (error);
+ INP_INFO_RLOCK(&V_udbinfo);
+ inp = in_pcblookup_hash(&V_udbinfo, addrs[1].sin_addr, addrs[1].sin_port,
+ addrs[0].sin_addr, addrs[0].sin_port, 1, NULL);
+ if (inp != NULL) {
+ INP_RLOCK(inp);
+ INP_INFO_RUNLOCK(&V_udbinfo);
+ if (inp->inp_socket == NULL)
+ error = ENOENT;
+ if (error == 0)
+ error = cr_canseeinpcb(req->td->td_ucred, inp);
+ if (error == 0)
+ cru2x(inp->inp_cred, &xuc);
+ INP_RUNLOCK(inp);
+ } else {
+ INP_INFO_RUNLOCK(&V_udbinfo);
+ error = ENOENT;
+ }
+ if (error == 0)
+ error = SYSCTL_OUT(req, &xuc, sizeof(struct xucred));
+ return (error);
+}
+
+SYSCTL_PROC(_net_inet_udp, OID_AUTO, getcred,
+ CTLTYPE_OPAQUE|CTLFLAG_RW|CTLFLAG_PRISON, 0, 0,
+ udp_getcred, "S,xucred", "Get the xucred of a UDP connection");
+
+int
+udp_ctloutput(struct socket *so, struct sockopt *sopt)
+{
+ int error = 0, optval;
+ struct inpcb *inp;
+#ifdef IPSEC_NAT_T
+ struct udpcb *up;
+#endif
+
+ inp = sotoinpcb(so);
+ KASSERT(inp != NULL, ("%s: inp == NULL", __func__));
+ INP_WLOCK(inp);
+ if (sopt->sopt_level != IPPROTO_UDP) {
+#ifdef INET6
+ if (INP_CHECK_SOCKAF(so, AF_INET6)) {
+ INP_WUNLOCK(inp);
+ error = ip6_ctloutput(so, sopt);
+ } else {
+#endif
+ INP_WUNLOCK(inp);
+ error = ip_ctloutput(so, sopt);
+#ifdef INET6
+ }
+#endif
+ return (error);
+ }
+
+ switch (sopt->sopt_dir) {
+ case SOPT_SET:
+ switch (sopt->sopt_name) {
+ case UDP_ENCAP:
+ INP_WUNLOCK(inp);
+ error = sooptcopyin(sopt, &optval, sizeof optval,
+ sizeof optval);
+ if (error)
+ break;
+ inp = sotoinpcb(so);
+ KASSERT(inp != NULL, ("%s: inp == NULL", __func__));
+ INP_WLOCK(inp);
+#ifdef IPSEC_NAT_T
+ up = intoudpcb(inp);
+ KASSERT(up != NULL, ("%s: up == NULL", __func__));
+#endif
+ switch (optval) {
+ case 0:
+ /* Clear all UDP encap. */
+#ifdef IPSEC_NAT_T
+ up->u_flags &= ~UF_ESPINUDP_ALL;
+#endif
+ break;
+#ifdef IPSEC_NAT_T
+ case UDP_ENCAP_ESPINUDP:
+ case UDP_ENCAP_ESPINUDP_NON_IKE:
+ up->u_flags &= ~UF_ESPINUDP_ALL;
+ if (optval == UDP_ENCAP_ESPINUDP)
+ up->u_flags |= UF_ESPINUDP;
+ else if (optval == UDP_ENCAP_ESPINUDP_NON_IKE)
+ up->u_flags |= UF_ESPINUDP_NON_IKE;
+ break;
+#endif
+ default:
+ error = EINVAL;
+ break;
+ }
+ INP_WUNLOCK(inp);
+ break;
+ default:
+ INP_WUNLOCK(inp);
+ error = ENOPROTOOPT;
+ break;
+ }
+ break;
+ case SOPT_GET:
+ switch (sopt->sopt_name) {
+#ifdef IPSEC_NAT_T
+ case UDP_ENCAP:
+ up = intoudpcb(inp);
+ KASSERT(up != NULL, ("%s: up == NULL", __func__));
+ optval = up->u_flags & UF_ESPINUDP_ALL;
+ INP_WUNLOCK(inp);
+ error = sooptcopyout(sopt, &optval, sizeof optval);
+ break;
+#endif
+ default:
+ INP_WUNLOCK(inp);
+ error = ENOPROTOOPT;
+ break;
+ }
+ break;
+ }
+ return (error);
+}
+
+static int
+udp_output(struct inpcb *inp, struct mbuf *m, struct sockaddr *addr,
+ struct mbuf *control, struct thread *td)
+{
+ struct udpiphdr *ui;
+ int len = m->m_pkthdr.len;
+ struct in_addr faddr, laddr;
+ struct cmsghdr *cm;
+ struct sockaddr_in *sin, src;
+ int error = 0;
+ int ipflags;
+ u_short fport, lport;
+ int unlock_udbinfo;
+
+ /*
+ * udp_output() may need to temporarily bind or connect the current
+ * inpcb. As such, we don't know up front whether we will need the
+ * pcbinfo lock or not. Do any work to decide what is needed up
+ * front before acquiring any locks.
+ */
+ if (len + sizeof(struct udpiphdr) > IP_MAXPACKET) {
+ if (control)
+ m_freem(control);
+ m_freem(m);
+ return (EMSGSIZE);
+ }
+
+ src.sin_family = 0;
+ if (control != NULL) {
+ /*
+ * XXX: Currently, we assume all the optional information is
+ * stored in a single mbuf.
+ */
+ if (control->m_next) {
+ m_freem(control);
+ m_freem(m);
+ return (EINVAL);
+ }
+ for (; control->m_len > 0;
+ control->m_data += CMSG_ALIGN(cm->cmsg_len),
+ control->m_len -= CMSG_ALIGN(cm->cmsg_len)) {
+ cm = mtod(control, struct cmsghdr *);
+ if (control->m_len < sizeof(*cm) || cm->cmsg_len == 0
+ || cm->cmsg_len > control->m_len) {
+ error = EINVAL;
+ break;
+ }
+ if (cm->cmsg_level != IPPROTO_IP)
+ continue;
+
+ switch (cm->cmsg_type) {
+ case IP_SENDSRCADDR:
+ if (cm->cmsg_len !=
+ CMSG_LEN(sizeof(struct in_addr))) {
+ error = EINVAL;
+ break;
+ }
+ bzero(&src, sizeof(src));
+ src.sin_family = AF_INET;
+ src.sin_len = sizeof(src);
+ src.sin_port = inp->inp_lport;
+ src.sin_addr =
+ *(struct in_addr *)CMSG_DATA(cm);
+ break;
+
+ default:
+ error = ENOPROTOOPT;
+ break;
+ }
+ if (error)
+ break;
+ }
+ m_freem(control);
+ }
+ if (error) {
+ m_freem(m);
+ return (error);
+ }
+
+ /*
+ * Depending on whether or not the application has bound or connected
+ * the socket, we may have to do varying levels of work. The optimal
+ * case is for a connected UDP socket, as a global lock isn't
+ * required at all.
+ *
+ * In order to decide which we need, we require stability of the
+ * inpcb binding, which we ensure by acquiring a read lock on the
+ * inpcb. This doesn't strictly follow the lock order, so we play
+ * the trylock and retry game; note that we may end up with more
+ * conservative locks than required the second time around, so later
+ * assertions have to accept that. Further analysis of the number of
+ * misses under contention is required.
+ */
+ sin = (struct sockaddr_in *)addr;
+ INP_RLOCK(inp);
+ if (sin != NULL &&
+ (inp->inp_laddr.s_addr == INADDR_ANY && inp->inp_lport == 0)) {
+ INP_RUNLOCK(inp);
+ INP_INFO_WLOCK(&V_udbinfo);
+ INP_WLOCK(inp);
+ unlock_udbinfo = 2;
+ } else if ((sin != NULL && (
+ (sin->sin_addr.s_addr == INADDR_ANY) ||
+ (sin->sin_addr.s_addr == INADDR_BROADCAST) ||
+ (inp->inp_laddr.s_addr == INADDR_ANY) ||
+ (inp->inp_lport == 0))) ||
+ (src.sin_family == AF_INET)) {
+ if (!INP_INFO_TRY_RLOCK(&V_udbinfo)) {
+ INP_RUNLOCK(inp);
+ INP_INFO_RLOCK(&V_udbinfo);
+ INP_RLOCK(inp);
+ }
+ unlock_udbinfo = 1;
+ } else
+ unlock_udbinfo = 0;
+
+ /*
+ * If the IP_SENDSRCADDR control message was specified, override the
+ * source address for this datagram. Its use is invalidated if the
+ * address thus specified is incomplete or clobbers other inpcbs.
+ */
+ laddr = inp->inp_laddr;
+ lport = inp->inp_lport;
+ if (src.sin_family == AF_INET) {
+ INP_INFO_LOCK_ASSERT(&V_udbinfo);
+ if ((lport == 0) ||
+ (laddr.s_addr == INADDR_ANY &&
+ src.sin_addr.s_addr == INADDR_ANY)) {
+ error = EINVAL;
+ goto release;
+ }
+ error = in_pcbbind_setup(inp, (struct sockaddr *)&src,
+ &laddr.s_addr, &lport, td->td_ucred);
+ if (error)
+ goto release;
+ }
+
+ /*
+ * If a UDP socket has been connected, then a local address/port will
+ * have been selected and bound.
+ *
+ * If a UDP socket has not been connected to, then an explicit
+ * destination address must be used, in which case a local
+ * address/port may not have been selected and bound.
+ */
+ if (sin != NULL) {
+ INP_LOCK_ASSERT(inp);
+ if (inp->inp_faddr.s_addr != INADDR_ANY) {
+ error = EISCONN;
+ goto release;
+ }
+
+ /*
+ * Jail may rewrite the destination address, so let it do
+ * that before we use it.
+ */
+ error = prison_remote_ip4(td->td_ucred, &sin->sin_addr);
+ if (error)
+ goto release;
+
+ /*
+ * If a local address or port hasn't yet been selected, or if
+ * the destination address needs to be rewritten due to using
+ * a special INADDR_ constant, invoke in_pcbconnect_setup()
+ * to do the heavy lifting. Once a port is selected, we
+ * commit the binding back to the socket; we also commit the
+ * binding of the address if in jail.
+ *
+ * If we already have a valid binding and we're not
+ * requesting a destination address rewrite, use a fast path.
+ */
+ if (inp->inp_laddr.s_addr == INADDR_ANY ||
+ inp->inp_lport == 0 ||
+ sin->sin_addr.s_addr == INADDR_ANY ||
+ sin->sin_addr.s_addr == INADDR_BROADCAST) {
+ INP_INFO_LOCK_ASSERT(&V_udbinfo);
+ error = in_pcbconnect_setup(inp, addr, &laddr.s_addr,
+ &lport, &faddr.s_addr, &fport, NULL,
+ td->td_ucred);
+ if (error)
+ goto release;
+
+ /*
+ * XXXRW: Why not commit the port if the address is
+ * !INADDR_ANY?
+ */
+ /* Commit the local port if newly assigned. */
+ if (inp->inp_laddr.s_addr == INADDR_ANY &&
+ inp->inp_lport == 0) {
+ INP_INFO_WLOCK_ASSERT(&V_udbinfo);
+ INP_WLOCK_ASSERT(inp);
+ /*
+ * Remember addr if jailed, to prevent
+ * rebinding.
+ */
+ if (prison_flag(td->td_ucred, PR_IP4))
+ inp->inp_laddr = laddr;
+ inp->inp_lport = lport;
+ if (in_pcbinshash(inp) != 0) {
+ inp->inp_lport = 0;
+ error = EAGAIN;
+ goto release;
+ }
+ inp->inp_flags |= INP_ANONPORT;
+ }
+ } else {
+ faddr = sin->sin_addr;
+ fport = sin->sin_port;
+ }
+ } else {
+ INP_LOCK_ASSERT(inp);
+ faddr = inp->inp_faddr;
+ fport = inp->inp_fport;
+ if (faddr.s_addr == INADDR_ANY) {
+ error = ENOTCONN;
+ goto release;
+ }
+ }
+
+ /*
+ * Calculate data length and get a mbuf for UDP, IP, and possible
+ * link-layer headers. Immediate slide the data pointer back forward
+ * since we won't use that space at this layer.
+ */
+ M_PREPEND(m, sizeof(struct udpiphdr) + max_linkhdr, M_DONTWAIT);
+ if (m == NULL) {
+ error = ENOBUFS;
+ goto release;
+ }
+ m->m_data += max_linkhdr;
+ m->m_len -= max_linkhdr;
+ m->m_pkthdr.len -= max_linkhdr;
+
+ /*
+ * Fill in mbuf with extended UDP header and addresses and length put
+ * into network format.
+ */
+ ui = mtod(m, struct udpiphdr *);
+ bzero(ui->ui_x1, sizeof(ui->ui_x1)); /* XXX still needed? */
+ ui->ui_pr = IPPROTO_UDP;
+ ui->ui_src = laddr;
+ ui->ui_dst = faddr;
+ ui->ui_sport = lport;
+ ui->ui_dport = fport;
+ ui->ui_ulen = htons((u_short)len + sizeof(struct udphdr));
+
+ /*
+ * Set the Don't Fragment bit in the IP header.
+ */
+ if (inp->inp_flags & INP_DONTFRAG) {
+ struct ip *ip;
+
+ ip = (struct ip *)&ui->ui_i;
+ ip->ip_off |= IP_DF;
+ }
+
+ ipflags = 0;
+ if (inp->inp_socket->so_options & SO_DONTROUTE)
+ ipflags |= IP_ROUTETOIF;
+ if (inp->inp_socket->so_options & SO_BROADCAST)
+ ipflags |= IP_ALLOWBROADCAST;
+ if (inp->inp_flags & INP_ONESBCAST)
+ ipflags |= IP_SENDONES;
+
+#ifdef MAC
+ mac_inpcb_create_mbuf(inp, m);
+#endif
+
+ /*
+ * Set up checksum and output datagram.
+ */
+ if (udp_cksum) {
+ if (inp->inp_flags & INP_ONESBCAST)
+ faddr.s_addr = INADDR_BROADCAST;
+ ui->ui_sum = in_pseudo(ui->ui_src.s_addr, faddr.s_addr,
+ htons((u_short)len + sizeof(struct udphdr) + IPPROTO_UDP));
+ m->m_pkthdr.csum_flags = CSUM_UDP;
+ m->m_pkthdr.csum_data = offsetof(struct udphdr, uh_sum);
+ } else
+ ui->ui_sum = 0;
+ ((struct ip *)ui)->ip_len = sizeof (struct udpiphdr) + len;
+ ((struct ip *)ui)->ip_ttl = inp->inp_ip_ttl; /* XXX */
+ ((struct ip *)ui)->ip_tos = inp->inp_ip_tos; /* XXX */
+ UDPSTAT_INC(udps_opackets);
+
+ if (unlock_udbinfo == 2)
+ INP_INFO_WUNLOCK(&V_udbinfo);
+ else if (unlock_udbinfo == 1)
+ INP_INFO_RUNLOCK(&V_udbinfo);
+ error = ip_output(m, inp->inp_options, NULL, ipflags,
+ inp->inp_moptions, inp);
+ if (unlock_udbinfo == 2)
+ INP_WUNLOCK(inp);
+ else
+ INP_RUNLOCK(inp);
+ return (error);
+
+release:
+ if (unlock_udbinfo == 2) {
+ INP_WUNLOCK(inp);
+ INP_INFO_WUNLOCK(&V_udbinfo);
+ } else if (unlock_udbinfo == 1) {
+ INP_RUNLOCK(inp);
+ INP_INFO_RUNLOCK(&V_udbinfo);
+ } else
+ INP_RUNLOCK(inp);
+ m_freem(m);
+ return (error);
+}
+
+
+#if defined(IPSEC) && defined(IPSEC_NAT_T)
+#ifdef INET
+/*
+ * Potentially decap ESP in UDP frame. Check for an ESP header
+ * and optional marker; if present, strip the UDP header and
+ * push the result through IPSec.
+ *
+ * Returns mbuf to be processed (potentially re-allocated) or
+ * NULL if consumed and/or processed.
+ */
+static struct mbuf *
+udp4_espdecap(struct inpcb *inp, struct mbuf *m, int off)
+{
+ size_t minlen, payload, skip, iphlen;
+ caddr_t data;
+ struct udpcb *up;
+ struct m_tag *tag;
+ struct udphdr *udphdr;
+ struct ip *ip;
+
+ INP_RLOCK_ASSERT(inp);
+
+ /*
+ * Pull up data so the longest case is contiguous:
+ * IP/UDP hdr + non ESP marker + ESP hdr.
+ */
+ minlen = off + sizeof(uint64_t) + sizeof(struct esp);
+ if (minlen > m->m_pkthdr.len)
+ minlen = m->m_pkthdr.len;
+ if ((m = m_pullup(m, minlen)) == NULL) {
+ V_ipsec4stat.in_inval++;
+ return (NULL); /* Bypass caller processing. */
+ }
+ data = mtod(m, caddr_t); /* Points to ip header. */
+ payload = m->m_len - off; /* Size of payload. */
+
+ if (payload == 1 && data[off] == '\xff')
+ return (m); /* NB: keepalive packet, no decap. */
+
+ up = intoudpcb(inp);
+ KASSERT(up != NULL, ("%s: udpcb NULL", __func__));
+ KASSERT((up->u_flags & UF_ESPINUDP_ALL) != 0,
+ ("u_flags 0x%x", up->u_flags));
+
+ /*
+ * Check that the payload is large enough to hold an
+ * ESP header and compute the amount of data to remove.
+ *
+ * NB: the caller has already done a pullup for us.
+ * XXX can we assume alignment and eliminate bcopys?
+ */
+ if (up->u_flags & UF_ESPINUDP_NON_IKE) {
+ /*
+ * draft-ietf-ipsec-nat-t-ike-0[01].txt and
+ * draft-ietf-ipsec-udp-encaps-(00/)01.txt, ignoring
+ * possible AH mode non-IKE marker+non-ESP marker
+ * from draft-ietf-ipsec-udp-encaps-00.txt.
+ */
+ uint64_t marker;
+
+ if (payload <= sizeof(uint64_t) + sizeof(struct esp))
+ return (m); /* NB: no decap. */
+ bcopy(data + off, &marker, sizeof(uint64_t));
+ if (marker != 0) /* Non-IKE marker. */
+ return (m); /* NB: no decap. */
+ skip = sizeof(uint64_t) + sizeof(struct udphdr);
+ } else {
+ uint32_t spi;
+
+ if (payload <= sizeof(struct esp)) {
+ V_ipsec4stat.in_inval++;
+ m_freem(m);
+ return (NULL); /* Discard. */
+ }
+ bcopy(data + off, &spi, sizeof(uint32_t));
+ if (spi == 0) /* Non-ESP marker. */
+ return (m); /* NB: no decap. */
+ skip = sizeof(struct udphdr);
+ }
+
+ /*
+ * Setup a PACKET_TAG_IPSEC_NAT_T_PORT tag to remember
+ * the UDP ports. This is required if we want to select
+ * the right SPD for multiple hosts behind same NAT.
+ *
+ * NB: ports are maintained in network byte order everywhere
+ * in the NAT-T code.
+ */
+ tag = m_tag_get(PACKET_TAG_IPSEC_NAT_T_PORTS,
+ 2 * sizeof(uint16_t), M_NOWAIT);
+ if (tag == NULL) {
+ V_ipsec4stat.in_nomem++;
+ m_freem(m);
+ return (NULL); /* Discard. */
+ }
+ iphlen = off - sizeof(struct udphdr);
+ udphdr = (struct udphdr *)(data + iphlen);
+ ((uint16_t *)(tag + 1))[0] = udphdr->uh_sport;
+ ((uint16_t *)(tag + 1))[1] = udphdr->uh_dport;
+ m_tag_prepend(m, tag);
+
+ /*
+ * Remove the UDP header (and possibly the non ESP marker)
+ * IP header length is iphlen
+ * Before:
+ * <--- off --->
+ * +----+------+-----+
+ * | IP | UDP | ESP |
+ * +----+------+-----+
+ * <-skip->
+ * After:
+ * +----+-----+
+ * | IP | ESP |
+ * +----+-----+
+ * <-skip->
+ */
+ ovbcopy(data, data + skip, iphlen);
+ m_adj(m, skip);
+
+ ip = mtod(m, struct ip *);
+ ip->ip_len -= skip;
+ ip->ip_p = IPPROTO_ESP;
+
+ /*
+ * We cannot yet update the cksums so clear any
+ * h/w cksum flags as they are no longer valid.
+ */
+ if (m->m_pkthdr.csum_flags & CSUM_DATA_VALID)
+ m->m_pkthdr.csum_flags &= ~(CSUM_DATA_VALID|CSUM_PSEUDO_HDR);
+
+ (void) ipsec4_common_input(m, iphlen, ip->ip_p);
+ return (NULL); /* NB: consumed, bypass processing. */
+}
+#endif /* INET */
+#endif /* defined(IPSEC) && defined(IPSEC_NAT_T) */
+
+static void
+udp_abort(struct socket *so)
+{
+ struct inpcb *inp;
+
+ inp = sotoinpcb(so);
+ KASSERT(inp != NULL, ("udp_abort: inp == NULL"));
+ INP_INFO_WLOCK(&V_udbinfo);
+ INP_WLOCK(inp);
+ if (inp->inp_faddr.s_addr != INADDR_ANY) {
+ in_pcbdisconnect(inp);
+ inp->inp_laddr.s_addr = INADDR_ANY;
+ soisdisconnected(so);
+ }
+ INP_WUNLOCK(inp);
+ INP_INFO_WUNLOCK(&V_udbinfo);
+}
+
+static int
+udp_attach(struct socket *so, int proto, struct thread *td)
+{
+ struct inpcb *inp;
+ int error;
+
+ inp = sotoinpcb(so);
+ KASSERT(inp == NULL, ("udp_attach: inp != NULL"));
+ error = soreserve(so, udp_sendspace, udp_recvspace);
+ if (error)
+ return (error);
+ INP_INFO_WLOCK(&V_udbinfo);
+ error = in_pcballoc(so, &V_udbinfo);
+ if (error) {
+ INP_INFO_WUNLOCK(&V_udbinfo);
+ return (error);
+ }
+
+ inp = sotoinpcb(so);
+ inp->inp_vflag |= INP_IPV4;
+ inp->inp_ip_ttl = V_ip_defttl;
+
+ error = udp_newudpcb(inp);
+ if (error) {
+ in_pcbdetach(inp);
+ in_pcbfree(inp);
+ INP_INFO_WUNLOCK(&V_udbinfo);
+ return (error);
+ }
+
+ INP_WUNLOCK(inp);
+ INP_INFO_WUNLOCK(&V_udbinfo);
+ return (0);
+}
+
+int
+udp_set_kernel_tunneling(struct socket *so, udp_tun_func_t f)
+{
+ struct inpcb *inp;
+ struct udpcb *up;
+
+ KASSERT(so->so_type == SOCK_DGRAM,
+ ("udp_set_kernel_tunneling: !dgram"));
+ inp = sotoinpcb(so);
+ KASSERT(inp != NULL, ("udp_set_kernel_tunneling: inp == NULL"));
+ INP_WLOCK(inp);
+ up = intoudpcb(inp);
+ if (up->u_tun_func != NULL) {
+ INP_WUNLOCK(inp);
+ return (EBUSY);
+ }
+ up->u_tun_func = f;
+ INP_WUNLOCK(inp);
+ return (0);
+}
+
+static int
+udp_bind(struct socket *so, struct sockaddr *nam, struct thread *td)
+{
+ struct inpcb *inp;
+ int error;
+
+ inp = sotoinpcb(so);
+ KASSERT(inp != NULL, ("udp_bind: inp == NULL"));
+ INP_INFO_WLOCK(&V_udbinfo);
+ INP_WLOCK(inp);
+ error = in_pcbbind(inp, nam, td->td_ucred);
+ INP_WUNLOCK(inp);
+ INP_INFO_WUNLOCK(&V_udbinfo);
+ return (error);
+}
+
+static void
+udp_close(struct socket *so)
+{
+ struct inpcb *inp;
+
+ inp = sotoinpcb(so);
+ KASSERT(inp != NULL, ("udp_close: inp == NULL"));
+ INP_INFO_WLOCK(&V_udbinfo);
+ INP_WLOCK(inp);
+ if (inp->inp_faddr.s_addr != INADDR_ANY) {
+ in_pcbdisconnect(inp);
+ inp->inp_laddr.s_addr = INADDR_ANY;
+ soisdisconnected(so);
+ }
+ INP_WUNLOCK(inp);
+ INP_INFO_WUNLOCK(&V_udbinfo);
+}
+
+static int
+udp_connect(struct socket *so, struct sockaddr *nam, struct thread *td)
+{
+ struct inpcb *inp;
+ int error;
+ struct sockaddr_in *sin;
+
+ inp = sotoinpcb(so);
+ KASSERT(inp != NULL, ("udp_connect: inp == NULL"));
+ INP_INFO_WLOCK(&V_udbinfo);
+ INP_WLOCK(inp);
+ if (inp->inp_faddr.s_addr != INADDR_ANY) {
+ INP_WUNLOCK(inp);
+ INP_INFO_WUNLOCK(&V_udbinfo);
+ return (EISCONN);
+ }
+ sin = (struct sockaddr_in *)nam;
+ error = prison_remote_ip4(td->td_ucred, &sin->sin_addr);
+ if (error != 0) {
+ INP_WUNLOCK(inp);
+ INP_INFO_WUNLOCK(&V_udbinfo);
+ return (error);
+ }
+ error = in_pcbconnect(inp, nam, td->td_ucred);
+ if (error == 0)
+ soisconnected(so);
+ INP_WUNLOCK(inp);
+ INP_INFO_WUNLOCK(&V_udbinfo);
+ return (error);
+}
+
+static void
+udp_detach(struct socket *so)
+{
+ struct inpcb *inp;
+ struct udpcb *up;
+
+ inp = sotoinpcb(so);
+ KASSERT(inp != NULL, ("udp_detach: inp == NULL"));
+ KASSERT(inp->inp_faddr.s_addr == INADDR_ANY,
+ ("udp_detach: not disconnected"));
+ INP_INFO_WLOCK(&V_udbinfo);
+ INP_WLOCK(inp);
+ up = intoudpcb(inp);
+ KASSERT(up != NULL, ("%s: up == NULL", __func__));
+ inp->inp_ppcb = NULL;
+ in_pcbdetach(inp);
+ in_pcbfree(inp);
+ INP_INFO_WUNLOCK(&V_udbinfo);
+ udp_discardcb(up);
+}
+
+static int
+udp_disconnect(struct socket *so)
+{
+ struct inpcb *inp;
+
+ inp = sotoinpcb(so);
+ KASSERT(inp != NULL, ("udp_disconnect: inp == NULL"));
+ INP_INFO_WLOCK(&V_udbinfo);
+ INP_WLOCK(inp);
+ if (inp->inp_faddr.s_addr == INADDR_ANY) {
+ INP_WUNLOCK(inp);
+ INP_INFO_WUNLOCK(&V_udbinfo);
+ return (ENOTCONN);
+ }
+
+ in_pcbdisconnect(inp);
+ inp->inp_laddr.s_addr = INADDR_ANY;
+ SOCK_LOCK(so);
+ so->so_state &= ~SS_ISCONNECTED; /* XXX */
+ SOCK_UNLOCK(so);
+ INP_WUNLOCK(inp);
+ INP_INFO_WUNLOCK(&V_udbinfo);
+ return (0);
+}
+
+static int
+udp_send(struct socket *so, int flags, struct mbuf *m, struct sockaddr *addr,
+ struct mbuf *control, struct thread *td)
+{
+ struct inpcb *inp;
+
+ inp = sotoinpcb(so);
+ KASSERT(inp != NULL, ("udp_send: inp == NULL"));
+ return (udp_output(inp, m, addr, control, td));
+}
+
+int
+udp_shutdown(struct socket *so)
+{
+ struct inpcb *inp;
+
+ inp = sotoinpcb(so);
+ KASSERT(inp != NULL, ("udp_shutdown: inp == NULL"));
+ INP_WLOCK(inp);
+ socantsendmore(so);
+ INP_WUNLOCK(inp);
+ return (0);
+}
+
+struct pr_usrreqs udp_usrreqs = {
+ .pru_abort = udp_abort,
+ .pru_attach = udp_attach,
+ .pru_bind = udp_bind,
+ .pru_connect = udp_connect,
+ .pru_control = in_control,
+ .pru_detach = udp_detach,
+ .pru_disconnect = udp_disconnect,
+ .pru_peeraddr = in_getpeeraddr,
+ .pru_send = udp_send,
+ .pru_soreceive = soreceive_dgram,
+ .pru_sosend = sosend_dgram,
+ .pru_shutdown = udp_shutdown,
+ .pru_sockaddr = in_getsockaddr,
+ .pru_sosetlabel = in_pcbsosetlabel,
+ .pru_close = udp_close,
+};
diff --git a/rtems/freebsd/netinet/udp_var.h b/rtems/freebsd/netinet/udp_var.h
new file mode 100644
index 00000000..0bff6ea9
--- /dev/null
+++ b/rtems/freebsd/netinet/udp_var.h
@@ -0,0 +1,161 @@
+/*-
+ * Copyright (c) 1982, 1986, 1989, 1993
+ * The Regents of the University of California.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * @(#)udp_var.h 8.1 (Berkeley) 6/10/93
+ * $FreeBSD$
+ */
+
+#ifndef _NETINET_UDP_VAR_HH_
+#define _NETINET_UDP_VAR_HH_
+
+/*
+ * UDP kernel structures and variables.
+ */
+struct udpiphdr {
+ struct ipovly ui_i; /* overlaid ip structure */
+ struct udphdr ui_u; /* udp header */
+};
+#define ui_x1 ui_i.ih_x1
+#define ui_pr ui_i.ih_pr
+#define ui_len ui_i.ih_len
+#define ui_src ui_i.ih_src
+#define ui_dst ui_i.ih_dst
+#define ui_sport ui_u.uh_sport
+#define ui_dport ui_u.uh_dport
+#define ui_ulen ui_u.uh_ulen
+#define ui_sum ui_u.uh_sum
+
+typedef void(*udp_tun_func_t)(struct mbuf *, int off, struct inpcb *);
+
+/*
+ * UDP control block; one per udp.
+ */
+struct udpcb {
+ udp_tun_func_t u_tun_func; /* UDP kernel tunneling callback. */
+ u_int u_flags; /* Generic UDP flags. */
+};
+
+#define intoudpcb(ip) ((struct udpcb *)(ip)->inp_ppcb)
+#define sotoudpcb(so) (intoudpcb(sotoinpcb(so)))
+
+ /* IPsec: ESP in UDP tunneling: */
+#define UF_ESPINUDP_NON_IKE 0x00000001 /* w/ non-IKE marker .. */
+ /* .. per draft-ietf-ipsec-nat-t-ike-0[01],
+ * and draft-ietf-ipsec-udp-encaps-(00/)01.txt */
+#define UF_ESPINUDP 0x00000002 /* w/ non-ESP marker. */
+
+struct udpstat {
+ /* input statistics: */
+ u_long udps_ipackets; /* total input packets */
+ u_long udps_hdrops; /* packet shorter than header */
+ u_long udps_badsum; /* checksum error */
+ u_long udps_nosum; /* no checksum */
+ u_long udps_badlen; /* data length larger than packet */
+ u_long udps_noport; /* no socket on port */
+ u_long udps_noportbcast; /* of above, arrived as broadcast */
+ u_long udps_fullsock; /* not delivered, input socket full */
+ u_long udpps_pcbcachemiss; /* input packets missing pcb cache */
+ u_long udpps_pcbhashmiss; /* input packets not for hashed pcb */
+ /* output statistics: */
+ u_long udps_opackets; /* total output packets */
+ u_long udps_fastout; /* output packets on fast path */
+ /* of no socket on port, arrived as multicast */
+ u_long udps_noportmcast;
+ u_long udps_filtermcast; /* blocked by multicast filter */
+};
+
+#ifdef _KERNEL
+/*
+ * In-kernel consumers can use these accessor macros directly to update
+ * stats.
+ */
+#define UDPSTAT_ADD(name, val) V_udpstat.name += (val)
+#define UDPSTAT_INC(name) UDPSTAT_ADD(name, 1)
+
+/*
+ * Kernel module consumers must use this accessor macro.
+ */
+void kmod_udpstat_inc(int statnum);
+#define KMOD_UDPSTAT_INC(name) \
+ kmod_udpstat_inc(offsetof(struct udpstat, name) / sizeof(u_long))
+#endif
+
+/*
+ * Names for UDP sysctl objects.
+ */
+#define UDPCTL_CHECKSUM 1 /* checksum UDP packets */
+#define UDPCTL_STATS 2 /* statistics (read-only) */
+#define UDPCTL_MAXDGRAM 3 /* max datagram size */
+#define UDPCTL_RECVSPACE 4 /* default receive buffer space */
+#define UDPCTL_PCBLIST 5 /* list of PCBs for UDP sockets */
+#define UDPCTL_MAXID 6
+
+#define UDPCTL_NAMES { \
+ { 0, 0 }, \
+ { "checksum", CTLTYPE_INT }, \
+ { "stats", CTLTYPE_STRUCT }, \
+ { "maxdgram", CTLTYPE_INT }, \
+ { "recvspace", CTLTYPE_INT }, \
+ { "pcblist", CTLTYPE_STRUCT }, \
+}
+
+#ifdef _KERNEL
+SYSCTL_DECL(_net_inet_udp);
+
+extern struct pr_usrreqs udp_usrreqs;
+VNET_DECLARE(struct inpcbhead, udb);
+VNET_DECLARE(struct inpcbinfo, udbinfo);
+#define V_udb VNET(udb)
+#define V_udbinfo VNET(udbinfo)
+
+extern u_long udp_sendspace;
+extern u_long udp_recvspace;
+VNET_DECLARE(struct udpstat, udpstat);
+VNET_DECLARE(int, udp_blackhole);
+#define V_udpstat VNET(udpstat)
+#define V_udp_blackhole VNET(udp_blackhole)
+extern int udp_log_in_vain;
+
+int udp_newudpcb(struct inpcb *);
+void udp_discardcb(struct udpcb *);
+
+void udp_ctlinput(int, struct sockaddr *, void *);
+int udp_ctloutput(struct socket *, struct sockopt *);
+void udp_init(void);
+#ifdef VIMAGE
+void udp_destroy(void);
+#endif
+void udp_input(struct mbuf *, int);
+struct inpcb *udp_notify(struct inpcb *inp, int errno);
+int udp_shutdown(struct socket *so);
+
+int udp_set_kernel_tunneling(struct socket *so, udp_tun_func_t f);
+#endif
+
+#endif
diff --git a/rtems/freebsd/netinet6/dest6.c b/rtems/freebsd/netinet6/dest6.c
new file mode 100644
index 00000000..c7824dc8
--- /dev/null
+++ b/rtems/freebsd/netinet6/dest6.c
@@ -0,0 +1,125 @@
+#include <rtems/freebsd/machine/rtems-bsd-config.h>
+
+/*-
+ * Copyright (C) 1995, 1996, 1997, and 1998 WIDE Project.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the project nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $KAME: dest6.c,v 1.59 2003/07/11 13:21:16 t-momose Exp $
+ */
+
+#include <rtems/freebsd/sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <rtems/freebsd/local/opt_inet.h>
+#include <rtems/freebsd/local/opt_inet6.h>
+
+#include <rtems/freebsd/sys/param.h>
+#include <rtems/freebsd/sys/systm.h>
+#include <rtems/freebsd/sys/malloc.h>
+#include <rtems/freebsd/sys/mbuf.h>
+#include <rtems/freebsd/sys/domain.h>
+#include <rtems/freebsd/sys/protosw.h>
+#include <rtems/freebsd/sys/socket.h>
+#include <rtems/freebsd/sys/errno.h>
+#include <rtems/freebsd/sys/time.h>
+#include <rtems/freebsd/sys/kernel.h>
+
+#include <rtems/freebsd/net/if.h>
+#include <rtems/freebsd/net/route.h>
+
+#include <rtems/freebsd/netinet/in.h>
+#include <rtems/freebsd/netinet/in_var.h>
+#include <rtems/freebsd/netinet/ip6.h>
+#include <rtems/freebsd/netinet6/ip6_var.h>
+#include <rtems/freebsd/netinet/icmp6.h>
+
+/*
+ * Destination options header processing.
+ */
+int
+dest6_input(struct mbuf **mp, int *offp, int proto)
+{
+ struct mbuf *m = *mp;
+ int off = *offp, dstoptlen, optlen;
+ struct ip6_dest *dstopts;
+ u_int8_t *opt;
+
+ /* validation of the length of the header */
+#ifndef PULLDOWN_TEST
+ IP6_EXTHDR_CHECK(m, off, sizeof(*dstopts), IPPROTO_DONE);
+ dstopts = (struct ip6_dest *)(mtod(m, caddr_t) + off);
+#else
+ IP6_EXTHDR_GET(dstopts, struct ip6_dest *, m, off, sizeof(*dstopts));
+ if (dstopts == NULL)
+ return IPPROTO_DONE;
+#endif
+ dstoptlen = (dstopts->ip6d_len + 1) << 3;
+
+#ifndef PULLDOWN_TEST
+ IP6_EXTHDR_CHECK(m, off, dstoptlen, IPPROTO_DONE);
+ dstopts = (struct ip6_dest *)(mtod(m, caddr_t) + off);
+#else
+ IP6_EXTHDR_GET(dstopts, struct ip6_dest *, m, off, dstoptlen);
+ if (dstopts == NULL)
+ return IPPROTO_DONE;
+#endif
+ off += dstoptlen;
+ dstoptlen -= sizeof(struct ip6_dest);
+ opt = (u_int8_t *)dstopts + sizeof(struct ip6_dest);
+
+ /* search header for all options. */
+ for (optlen = 0; dstoptlen > 0; dstoptlen -= optlen, opt += optlen) {
+ if (*opt != IP6OPT_PAD1 &&
+ (dstoptlen < IP6OPT_MINLEN || *(opt + 1) + 2 > dstoptlen)) {
+ V_ip6stat.ip6s_toosmall++;
+ goto bad;
+ }
+
+ switch (*opt) {
+ case IP6OPT_PAD1:
+ optlen = 1;
+ break;
+ case IP6OPT_PADN:
+ optlen = *(opt + 1) + 2;
+ break;
+ default: /* unknown option */
+ optlen = ip6_unknown_opt(opt, m,
+ opt - mtod(m, u_int8_t *));
+ if (optlen == -1)
+ return (IPPROTO_DONE);
+ optlen += 2;
+ break;
+ }
+ }
+
+ *offp = off;
+ return (dstopts->ip6d_nxt);
+
+ bad:
+ m_freem(m);
+ return (IPPROTO_DONE);
+}
diff --git a/rtems/freebsd/netinet6/frag6.c b/rtems/freebsd/netinet6/frag6.c
new file mode 100644
index 00000000..b2303e60
--- /dev/null
+++ b/rtems/freebsd/netinet6/frag6.c
@@ -0,0 +1,781 @@
+#include <rtems/freebsd/machine/rtems-bsd-config.h>
+
+/*-
+ * Copyright (C) 1995, 1996, 1997, and 1998 WIDE Project.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the project nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $KAME: frag6.c,v 1.33 2002/01/07 11:34:48 kjc Exp $
+ */
+
+#include <rtems/freebsd/sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <rtems/freebsd/sys/param.h>
+#include <rtems/freebsd/sys/systm.h>
+#include <rtems/freebsd/sys/malloc.h>
+#include <rtems/freebsd/sys/mbuf.h>
+#include <rtems/freebsd/sys/domain.h>
+#include <rtems/freebsd/sys/protosw.h>
+#include <rtems/freebsd/sys/socket.h>
+#include <rtems/freebsd/sys/errno.h>
+#include <rtems/freebsd/sys/time.h>
+#include <rtems/freebsd/sys/kernel.h>
+#include <rtems/freebsd/sys/syslog.h>
+
+#include <rtems/freebsd/net/if.h>
+#include <rtems/freebsd/net/route.h>
+#include <rtems/freebsd/net/vnet.h>
+
+#include <rtems/freebsd/netinet/in.h>
+#include <rtems/freebsd/netinet/in_var.h>
+#include <rtems/freebsd/netinet/ip6.h>
+#include <rtems/freebsd/netinet6/ip6_var.h>
+#include <rtems/freebsd/netinet/icmp6.h>
+#include <rtems/freebsd/netinet/in_systm.h> /* for ECN definitions */
+#include <rtems/freebsd/netinet/ip.h> /* for ECN definitions */
+
+#include <rtems/freebsd/security/mac/mac_framework.h>
+
+/*
+ * Define it to get a correct behavior on per-interface statistics.
+ * You will need to perform an extra routing table lookup, per fragment,
+ * to do it. This may, or may not be, a performance hit.
+ */
+#define IN6_IFSTAT_STRICT
+
+static void frag6_enq(struct ip6asfrag *, struct ip6asfrag *);
+static void frag6_deq(struct ip6asfrag *);
+static void frag6_insque(struct ip6q *, struct ip6q *);
+static void frag6_remque(struct ip6q *);
+static void frag6_freef(struct ip6q *);
+
+static struct mtx ip6qlock;
+/*
+ * These fields all protected by ip6qlock.
+ */
+static VNET_DEFINE(u_int, frag6_nfragpackets);
+static VNET_DEFINE(u_int, frag6_nfrags);
+static VNET_DEFINE(struct ip6q, ip6q); /* ip6 reassemble queue */
+
+#define V_frag6_nfragpackets VNET(frag6_nfragpackets)
+#define V_frag6_nfrags VNET(frag6_nfrags)
+#define V_ip6q VNET(ip6q)
+
+#define IP6Q_LOCK_INIT() mtx_init(&ip6qlock, "ip6qlock", NULL, MTX_DEF);
+#define IP6Q_LOCK() mtx_lock(&ip6qlock)
+#define IP6Q_TRYLOCK() mtx_trylock(&ip6qlock)
+#define IP6Q_LOCK_ASSERT() mtx_assert(&ip6qlock, MA_OWNED)
+#define IP6Q_UNLOCK() mtx_unlock(&ip6qlock)
+
+static MALLOC_DEFINE(M_FTABLE, "fragment", "fragment reassembly header");
+
+/*
+ * Initialise reassembly queue and fragment identifier.
+ */
+static void
+frag6_change(void *tag)
+{
+
+ V_ip6_maxfragpackets = nmbclusters / 4;
+ V_ip6_maxfrags = nmbclusters / 4;
+}
+
+void
+frag6_init(void)
+{
+
+ V_ip6_maxfragpackets = nmbclusters / 4;
+ V_ip6_maxfrags = nmbclusters / 4;
+ V_ip6q.ip6q_next = V_ip6q.ip6q_prev = &V_ip6q;
+
+ if (!IS_DEFAULT_VNET(curvnet))
+ return;
+
+ EVENTHANDLER_REGISTER(nmbclusters_change,
+ frag6_change, NULL, EVENTHANDLER_PRI_ANY);
+
+ IP6Q_LOCK_INIT();
+}
+
+/*
+ * In RFC2460, fragment and reassembly rule do not agree with each other,
+ * in terms of next header field handling in fragment header.
+ * While the sender will use the same value for all of the fragmented packets,
+ * receiver is suggested not to check the consistency.
+ *
+ * fragment rule (p20):
+ * (2) A Fragment header containing:
+ * The Next Header value that identifies the first header of
+ * the Fragmentable Part of the original packet.
+ * -> next header field is same for all fragments
+ *
+ * reassembly rule (p21):
+ * The Next Header field of the last header of the Unfragmentable
+ * Part is obtained from the Next Header field of the first
+ * fragment's Fragment header.
+ * -> should grab it from the first fragment only
+ *
+ * The following note also contradicts with fragment rule - noone is going to
+ * send different fragment with different next header field.
+ *
+ * additional note (p22):
+ * The Next Header values in the Fragment headers of different
+ * fragments of the same original packet may differ. Only the value
+ * from the Offset zero fragment packet is used for reassembly.
+ * -> should grab it from the first fragment only
+ *
+ * There is no explicit reason given in the RFC. Historical reason maybe?
+ */
+/*
+ * Fragment input
+ */
+int
+frag6_input(struct mbuf **mp, int *offp, int proto)
+{
+ struct mbuf *m = *mp, *t;
+ struct ip6_hdr *ip6;
+ struct ip6_frag *ip6f;
+ struct ip6q *q6;
+ struct ip6asfrag *af6, *ip6af, *af6dwn;
+#ifdef IN6_IFSTAT_STRICT
+ struct in6_ifaddr *ia;
+#endif
+ int offset = *offp, nxt, i, next;
+ int first_frag = 0;
+ int fragoff, frgpartlen; /* must be larger than u_int16_t */
+ struct ifnet *dstifp;
+ u_int8_t ecn, ecn0;
+#if 0
+ char ip6buf[INET6_ADDRSTRLEN];
+#endif
+
+ ip6 = mtod(m, struct ip6_hdr *);
+#ifndef PULLDOWN_TEST
+ IP6_EXTHDR_CHECK(m, offset, sizeof(struct ip6_frag), IPPROTO_DONE);
+ ip6f = (struct ip6_frag *)((caddr_t)ip6 + offset);
+#else
+ IP6_EXTHDR_GET(ip6f, struct ip6_frag *, m, offset, sizeof(*ip6f));
+ if (ip6f == NULL)
+ return (IPPROTO_DONE);
+#endif
+
+ dstifp = NULL;
+#ifdef IN6_IFSTAT_STRICT
+ /* find the destination interface of the packet. */
+ if ((ia = ip6_getdstifaddr(m)) != NULL) {
+ dstifp = ia->ia_ifp;
+ ifa_free(&ia->ia_ifa);
+ }
+#else
+ /* we are violating the spec, this is not the destination interface */
+ if ((m->m_flags & M_PKTHDR) != 0)
+ dstifp = m->m_pkthdr.rcvif;
+#endif
+
+ /* jumbo payload can't contain a fragment header */
+ if (ip6->ip6_plen == 0) {
+ icmp6_error(m, ICMP6_PARAM_PROB, ICMP6_PARAMPROB_HEADER, offset);
+ in6_ifstat_inc(dstifp, ifs6_reass_fail);
+ return IPPROTO_DONE;
+ }
+
+ /*
+ * check whether fragment packet's fragment length is
+ * multiple of 8 octets.
+ * sizeof(struct ip6_frag) == 8
+ * sizeof(struct ip6_hdr) = 40
+ */
+ if ((ip6f->ip6f_offlg & IP6F_MORE_FRAG) &&
+ (((ntohs(ip6->ip6_plen) - offset) & 0x7) != 0)) {
+ icmp6_error(m, ICMP6_PARAM_PROB, ICMP6_PARAMPROB_HEADER,
+ offsetof(struct ip6_hdr, ip6_plen));
+ in6_ifstat_inc(dstifp, ifs6_reass_fail);
+ return IPPROTO_DONE;
+ }
+
+ V_ip6stat.ip6s_fragments++;
+ in6_ifstat_inc(dstifp, ifs6_reass_reqd);
+
+ /* offset now points to data portion */
+ offset += sizeof(struct ip6_frag);
+
+ IP6Q_LOCK();
+
+ /*
+ * Enforce upper bound on number of fragments.
+ * If maxfrag is 0, never accept fragments.
+ * If maxfrag is -1, accept all fragments without limitation.
+ */
+ if (V_ip6_maxfrags < 0)
+ ;
+ else if (V_frag6_nfrags >= (u_int)V_ip6_maxfrags)
+ goto dropfrag;
+
+ for (q6 = V_ip6q.ip6q_next; q6 != &V_ip6q; q6 = q6->ip6q_next)
+ if (ip6f->ip6f_ident == q6->ip6q_ident &&
+ IN6_ARE_ADDR_EQUAL(&ip6->ip6_src, &q6->ip6q_src) &&
+ IN6_ARE_ADDR_EQUAL(&ip6->ip6_dst, &q6->ip6q_dst)
+#ifdef MAC
+ && mac_ip6q_match(m, q6)
+#endif
+ )
+ break;
+
+ if (q6 == &V_ip6q) {
+ /*
+ * the first fragment to arrive, create a reassembly queue.
+ */
+ first_frag = 1;
+
+ /*
+ * Enforce upper bound on number of fragmented packets
+ * for which we attempt reassembly;
+ * If maxfragpackets is 0, never accept fragments.
+ * If maxfragpackets is -1, accept all fragments without
+ * limitation.
+ */
+ if (V_ip6_maxfragpackets < 0)
+ ;
+ else if (V_frag6_nfragpackets >= (u_int)V_ip6_maxfragpackets)
+ goto dropfrag;
+ V_frag6_nfragpackets++;
+ q6 = (struct ip6q *)malloc(sizeof(struct ip6q), M_FTABLE,
+ M_NOWAIT);
+ if (q6 == NULL)
+ goto dropfrag;
+ bzero(q6, sizeof(*q6));
+#ifdef MAC
+ if (mac_ip6q_init(q6, M_NOWAIT) != 0) {
+ free(q6, M_FTABLE);
+ goto dropfrag;
+ }
+ mac_ip6q_create(m, q6);
+#endif
+ frag6_insque(q6, &V_ip6q);
+
+ /* ip6q_nxt will be filled afterwards, from 1st fragment */
+ q6->ip6q_down = q6->ip6q_up = (struct ip6asfrag *)q6;
+#ifdef notyet
+ q6->ip6q_nxtp = (u_char *)nxtp;
+#endif
+ q6->ip6q_ident = ip6f->ip6f_ident;
+ q6->ip6q_ttl = IPV6_FRAGTTL;
+ q6->ip6q_src = ip6->ip6_src;
+ q6->ip6q_dst = ip6->ip6_dst;
+ q6->ip6q_ecn =
+ (ntohl(ip6->ip6_flow) >> 20) & IPTOS_ECN_MASK;
+ q6->ip6q_unfrglen = -1; /* The 1st fragment has not arrived. */
+
+ q6->ip6q_nfrag = 0;
+ }
+
+ /*
+ * If it's the 1st fragment, record the length of the
+ * unfragmentable part and the next header of the fragment header.
+ */
+ fragoff = ntohs(ip6f->ip6f_offlg & IP6F_OFF_MASK);
+ if (fragoff == 0) {
+ q6->ip6q_unfrglen = offset - sizeof(struct ip6_hdr) -
+ sizeof(struct ip6_frag);
+ q6->ip6q_nxt = ip6f->ip6f_nxt;
+ }
+
+ /*
+ * Check that the reassembled packet would not exceed 65535 bytes
+ * in size.
+ * If it would exceed, discard the fragment and return an ICMP error.
+ */
+ frgpartlen = sizeof(struct ip6_hdr) + ntohs(ip6->ip6_plen) - offset;
+ if (q6->ip6q_unfrglen >= 0) {
+ /* The 1st fragment has already arrived. */
+ if (q6->ip6q_unfrglen + fragoff + frgpartlen > IPV6_MAXPACKET) {
+ icmp6_error(m, ICMP6_PARAM_PROB, ICMP6_PARAMPROB_HEADER,
+ offset - sizeof(struct ip6_frag) +
+ offsetof(struct ip6_frag, ip6f_offlg));
+ IP6Q_UNLOCK();
+ return (IPPROTO_DONE);
+ }
+ } else if (fragoff + frgpartlen > IPV6_MAXPACKET) {
+ icmp6_error(m, ICMP6_PARAM_PROB, ICMP6_PARAMPROB_HEADER,
+ offset - sizeof(struct ip6_frag) +
+ offsetof(struct ip6_frag, ip6f_offlg));
+ IP6Q_UNLOCK();
+ return (IPPROTO_DONE);
+ }
+ /*
+ * If it's the first fragment, do the above check for each
+ * fragment already stored in the reassembly queue.
+ */
+ if (fragoff == 0) {
+ for (af6 = q6->ip6q_down; af6 != (struct ip6asfrag *)q6;
+ af6 = af6dwn) {
+ af6dwn = af6->ip6af_down;
+
+ if (q6->ip6q_unfrglen + af6->ip6af_off + af6->ip6af_frglen >
+ IPV6_MAXPACKET) {
+ struct mbuf *merr = IP6_REASS_MBUF(af6);
+ struct ip6_hdr *ip6err;
+ int erroff = af6->ip6af_offset;
+
+ /* dequeue the fragment. */
+ frag6_deq(af6);
+ free(af6, M_FTABLE);
+
+ /* adjust pointer. */
+ ip6err = mtod(merr, struct ip6_hdr *);
+
+ /*
+ * Restore source and destination addresses
+ * in the erroneous IPv6 header.
+ */
+ ip6err->ip6_src = q6->ip6q_src;
+ ip6err->ip6_dst = q6->ip6q_dst;
+
+ icmp6_error(merr, ICMP6_PARAM_PROB,
+ ICMP6_PARAMPROB_HEADER,
+ erroff - sizeof(struct ip6_frag) +
+ offsetof(struct ip6_frag, ip6f_offlg));
+ }
+ }
+ }
+
+ ip6af = (struct ip6asfrag *)malloc(sizeof(struct ip6asfrag), M_FTABLE,
+ M_NOWAIT);
+ if (ip6af == NULL)
+ goto dropfrag;
+ bzero(ip6af, sizeof(*ip6af));
+ ip6af->ip6af_mff = ip6f->ip6f_offlg & IP6F_MORE_FRAG;
+ ip6af->ip6af_off = fragoff;
+ ip6af->ip6af_frglen = frgpartlen;
+ ip6af->ip6af_offset = offset;
+ IP6_REASS_MBUF(ip6af) = m;
+
+ if (first_frag) {
+ af6 = (struct ip6asfrag *)q6;
+ goto insert;
+ }
+
+ /*
+ * Handle ECN by comparing this segment with the first one;
+ * if CE is set, do not lose CE.
+ * drop if CE and not-ECT are mixed for the same packet.
+ */
+ ecn = (ntohl(ip6->ip6_flow) >> 20) & IPTOS_ECN_MASK;
+ ecn0 = q6->ip6q_ecn;
+ if (ecn == IPTOS_ECN_CE) {
+ if (ecn0 == IPTOS_ECN_NOTECT) {
+ free(ip6af, M_FTABLE);
+ goto dropfrag;
+ }
+ if (ecn0 != IPTOS_ECN_CE)
+ q6->ip6q_ecn = IPTOS_ECN_CE;
+ }
+ if (ecn == IPTOS_ECN_NOTECT && ecn0 != IPTOS_ECN_NOTECT) {
+ free(ip6af, M_FTABLE);
+ goto dropfrag;
+ }
+
+ /*
+ * Find a segment which begins after this one does.
+ */
+ for (af6 = q6->ip6q_down; af6 != (struct ip6asfrag *)q6;
+ af6 = af6->ip6af_down)
+ if (af6->ip6af_off > ip6af->ip6af_off)
+ break;
+
+#if 0
+ /*
+ * If there is a preceding segment, it may provide some of
+ * our data already. If so, drop the data from the incoming
+ * segment. If it provides all of our data, drop us.
+ */
+ if (af6->ip6af_up != (struct ip6asfrag *)q6) {
+ i = af6->ip6af_up->ip6af_off + af6->ip6af_up->ip6af_frglen
+ - ip6af->ip6af_off;
+ if (i > 0) {
+ if (i >= ip6af->ip6af_frglen)
+ goto dropfrag;
+ m_adj(IP6_REASS_MBUF(ip6af), i);
+ ip6af->ip6af_off += i;
+ ip6af->ip6af_frglen -= i;
+ }
+ }
+
+ /*
+ * While we overlap succeeding segments trim them or,
+ * if they are completely covered, dequeue them.
+ */
+ while (af6 != (struct ip6asfrag *)q6 &&
+ ip6af->ip6af_off + ip6af->ip6af_frglen > af6->ip6af_off) {
+ i = (ip6af->ip6af_off + ip6af->ip6af_frglen) - af6->ip6af_off;
+ if (i < af6->ip6af_frglen) {
+ af6->ip6af_frglen -= i;
+ af6->ip6af_off += i;
+ m_adj(IP6_REASS_MBUF(af6), i);
+ break;
+ }
+ af6 = af6->ip6af_down;
+ m_freem(IP6_REASS_MBUF(af6->ip6af_up));
+ frag6_deq(af6->ip6af_up);
+ }
+#else
+ /*
+ * If the incoming framgent overlaps some existing fragments in
+ * the reassembly queue, drop it, since it is dangerous to override
+ * existing fragments from a security point of view.
+ * We don't know which fragment is the bad guy - here we trust
+ * fragment that came in earlier, with no real reason.
+ *
+ * Note: due to changes after disabling this part, mbuf passed to
+ * m_adj() below now does not meet the requirement.
+ */
+ if (af6->ip6af_up != (struct ip6asfrag *)q6) {
+ i = af6->ip6af_up->ip6af_off + af6->ip6af_up->ip6af_frglen
+ - ip6af->ip6af_off;
+ if (i > 0) {
+#if 0 /* suppress the noisy log */
+ log(LOG_ERR, "%d bytes of a fragment from %s "
+ "overlaps the previous fragment\n",
+ i, ip6_sprintf(ip6buf, &q6->ip6q_src));
+#endif
+ free(ip6af, M_FTABLE);
+ goto dropfrag;
+ }
+ }
+ if (af6 != (struct ip6asfrag *)q6) {
+ i = (ip6af->ip6af_off + ip6af->ip6af_frglen) - af6->ip6af_off;
+ if (i > 0) {
+#if 0 /* suppress the noisy log */
+ log(LOG_ERR, "%d bytes of a fragment from %s "
+ "overlaps the succeeding fragment",
+ i, ip6_sprintf(ip6buf, &q6->ip6q_src));
+#endif
+ free(ip6af, M_FTABLE);
+ goto dropfrag;
+ }
+ }
+#endif
+
+insert:
+#ifdef MAC
+ if (!first_frag)
+ mac_ip6q_update(m, q6);
+#endif
+
+ /*
+ * Stick new segment in its place;
+ * check for complete reassembly.
+ * Move to front of packet queue, as we are
+ * the most recently active fragmented packet.
+ */
+ frag6_enq(ip6af, af6->ip6af_up);
+ V_frag6_nfrags++;
+ q6->ip6q_nfrag++;
+#if 0 /* xxx */
+ if (q6 != V_ip6q.ip6q_next) {
+ frag6_remque(q6);
+ frag6_insque(q6, &V_ip6q);
+ }
+#endif
+ next = 0;
+ for (af6 = q6->ip6q_down; af6 != (struct ip6asfrag *)q6;
+ af6 = af6->ip6af_down) {
+ if (af6->ip6af_off != next) {
+ IP6Q_UNLOCK();
+ return IPPROTO_DONE;
+ }
+ next += af6->ip6af_frglen;
+ }
+ if (af6->ip6af_up->ip6af_mff) {
+ IP6Q_UNLOCK();
+ return IPPROTO_DONE;
+ }
+
+ /*
+ * Reassembly is complete; concatenate fragments.
+ */
+ ip6af = q6->ip6q_down;
+ t = m = IP6_REASS_MBUF(ip6af);
+ af6 = ip6af->ip6af_down;
+ frag6_deq(ip6af);
+ while (af6 != (struct ip6asfrag *)q6) {
+ af6dwn = af6->ip6af_down;
+ frag6_deq(af6);
+ while (t->m_next)
+ t = t->m_next;
+ t->m_next = IP6_REASS_MBUF(af6);
+ m_adj(t->m_next, af6->ip6af_offset);
+ free(af6, M_FTABLE);
+ af6 = af6dwn;
+ }
+
+ /* adjust offset to point where the original next header starts */
+ offset = ip6af->ip6af_offset - sizeof(struct ip6_frag);
+ free(ip6af, M_FTABLE);
+ ip6 = mtod(m, struct ip6_hdr *);
+ ip6->ip6_plen = htons((u_short)next + offset - sizeof(struct ip6_hdr));
+ if (q6->ip6q_ecn == IPTOS_ECN_CE)
+ ip6->ip6_flow |= htonl(IPTOS_ECN_CE << 20);
+ nxt = q6->ip6q_nxt;
+#ifdef notyet
+ *q6->ip6q_nxtp = (u_char)(nxt & 0xff);
+#endif
+
+ /* Delete frag6 header */
+ if (m->m_len >= offset + sizeof(struct ip6_frag)) {
+ /* This is the only possible case with !PULLDOWN_TEST */
+ ovbcopy((caddr_t)ip6, (caddr_t)ip6 + sizeof(struct ip6_frag),
+ offset);
+ m->m_data += sizeof(struct ip6_frag);
+ m->m_len -= sizeof(struct ip6_frag);
+ } else {
+ /* this comes with no copy if the boundary is on cluster */
+ if ((t = m_split(m, offset, M_DONTWAIT)) == NULL) {
+ frag6_remque(q6);
+ V_frag6_nfrags -= q6->ip6q_nfrag;
+#ifdef MAC
+ mac_ip6q_destroy(q6);
+#endif
+ free(q6, M_FTABLE);
+ V_frag6_nfragpackets--;
+ goto dropfrag;
+ }
+ m_adj(t, sizeof(struct ip6_frag));
+ m_cat(m, t);
+ }
+
+ /*
+ * Store NXT to the original.
+ */
+ {
+ char *prvnxtp = ip6_get_prevhdr(m, offset); /* XXX */
+ *prvnxtp = nxt;
+ }
+
+ frag6_remque(q6);
+ V_frag6_nfrags -= q6->ip6q_nfrag;
+#ifdef MAC
+ mac_ip6q_reassemble(q6, m);
+ mac_ip6q_destroy(q6);
+#endif
+ free(q6, M_FTABLE);
+ V_frag6_nfragpackets--;
+
+ if (m->m_flags & M_PKTHDR) { /* Isn't it always true? */
+ int plen = 0;
+ for (t = m; t; t = t->m_next)
+ plen += t->m_len;
+ m->m_pkthdr.len = plen;
+ }
+
+ V_ip6stat.ip6s_reassembled++;
+ in6_ifstat_inc(dstifp, ifs6_reass_ok);
+
+ /*
+ * Tell launch routine the next header
+ */
+
+ *mp = m;
+ *offp = offset;
+
+ IP6Q_UNLOCK();
+ return nxt;
+
+ dropfrag:
+ IP6Q_UNLOCK();
+ in6_ifstat_inc(dstifp, ifs6_reass_fail);
+ V_ip6stat.ip6s_fragdropped++;
+ m_freem(m);
+ return IPPROTO_DONE;
+}
+
+/*
+ * Free a fragment reassembly header and all
+ * associated datagrams.
+ */
+void
+frag6_freef(struct ip6q *q6)
+{
+ struct ip6asfrag *af6, *down6;
+
+ IP6Q_LOCK_ASSERT();
+
+ for (af6 = q6->ip6q_down; af6 != (struct ip6asfrag *)q6;
+ af6 = down6) {
+ struct mbuf *m = IP6_REASS_MBUF(af6);
+
+ down6 = af6->ip6af_down;
+ frag6_deq(af6);
+
+ /*
+ * Return ICMP time exceeded error for the 1st fragment.
+ * Just free other fragments.
+ */
+ if (af6->ip6af_off == 0) {
+ struct ip6_hdr *ip6;
+
+ /* adjust pointer */
+ ip6 = mtod(m, struct ip6_hdr *);
+
+ /* restore source and destination addresses */
+ ip6->ip6_src = q6->ip6q_src;
+ ip6->ip6_dst = q6->ip6q_dst;
+
+ icmp6_error(m, ICMP6_TIME_EXCEEDED,
+ ICMP6_TIME_EXCEED_REASSEMBLY, 0);
+ } else
+ m_freem(m);
+ free(af6, M_FTABLE);
+ }
+ frag6_remque(q6);
+ V_frag6_nfrags -= q6->ip6q_nfrag;
+#ifdef MAC
+ mac_ip6q_destroy(q6);
+#endif
+ free(q6, M_FTABLE);
+ V_frag6_nfragpackets--;
+}
+
+/*
+ * Put an ip fragment on a reassembly chain.
+ * Like insque, but pointers in middle of structure.
+ */
+void
+frag6_enq(struct ip6asfrag *af6, struct ip6asfrag *up6)
+{
+
+ IP6Q_LOCK_ASSERT();
+
+ af6->ip6af_up = up6;
+ af6->ip6af_down = up6->ip6af_down;
+ up6->ip6af_down->ip6af_up = af6;
+ up6->ip6af_down = af6;
+}
+
+/*
+ * To frag6_enq as remque is to insque.
+ */
+void
+frag6_deq(struct ip6asfrag *af6)
+{
+
+ IP6Q_LOCK_ASSERT();
+
+ af6->ip6af_up->ip6af_down = af6->ip6af_down;
+ af6->ip6af_down->ip6af_up = af6->ip6af_up;
+}
+
+void
+frag6_insque(struct ip6q *new, struct ip6q *old)
+{
+
+ IP6Q_LOCK_ASSERT();
+
+ new->ip6q_prev = old;
+ new->ip6q_next = old->ip6q_next;
+ old->ip6q_next->ip6q_prev= new;
+ old->ip6q_next = new;
+}
+
+void
+frag6_remque(struct ip6q *p6)
+{
+
+ IP6Q_LOCK_ASSERT();
+
+ p6->ip6q_prev->ip6q_next = p6->ip6q_next;
+ p6->ip6q_next->ip6q_prev = p6->ip6q_prev;
+}
+
+/*
+ * IPv6 reassembling timer processing;
+ * if a timer expires on a reassembly
+ * queue, discard it.
+ */
+void
+frag6_slowtimo(void)
+{
+ VNET_ITERATOR_DECL(vnet_iter);
+ struct ip6q *q6;
+
+ VNET_LIST_RLOCK_NOSLEEP();
+ IP6Q_LOCK();
+ VNET_FOREACH(vnet_iter) {
+ CURVNET_SET(vnet_iter);
+ q6 = V_ip6q.ip6q_next;
+ if (q6)
+ while (q6 != &V_ip6q) {
+ --q6->ip6q_ttl;
+ q6 = q6->ip6q_next;
+ if (q6->ip6q_prev->ip6q_ttl == 0) {
+ V_ip6stat.ip6s_fragtimeout++;
+ /* XXX in6_ifstat_inc(ifp, ifs6_reass_fail) */
+ frag6_freef(q6->ip6q_prev);
+ }
+ }
+ /*
+ * If we are over the maximum number of fragments
+ * (due to the limit being lowered), drain off
+ * enough to get down to the new limit.
+ */
+ while (V_frag6_nfragpackets > (u_int)V_ip6_maxfragpackets &&
+ V_ip6q.ip6q_prev) {
+ V_ip6stat.ip6s_fragoverflow++;
+ /* XXX in6_ifstat_inc(ifp, ifs6_reass_fail) */
+ frag6_freef(V_ip6q.ip6q_prev);
+ }
+ CURVNET_RESTORE();
+ }
+ IP6Q_UNLOCK();
+ VNET_LIST_RUNLOCK_NOSLEEP();
+}
+
+/*
+ * Drain off all datagram fragments.
+ */
+void
+frag6_drain(void)
+{
+ VNET_ITERATOR_DECL(vnet_iter);
+
+ VNET_LIST_RLOCK_NOSLEEP();
+ if (IP6Q_TRYLOCK() == 0) {
+ VNET_LIST_RUNLOCK_NOSLEEP();
+ return;
+ }
+ VNET_FOREACH(vnet_iter) {
+ CURVNET_SET(vnet_iter);
+ while (V_ip6q.ip6q_next != &V_ip6q) {
+ V_ip6stat.ip6s_fragdropped++;
+ /* XXX in6_ifstat_inc(ifp, ifs6_reass_fail) */
+ frag6_freef(V_ip6q.ip6q_next);
+ }
+ CURVNET_RESTORE();
+ }
+ IP6Q_UNLOCK();
+ VNET_LIST_RUNLOCK_NOSLEEP();
+}
diff --git a/rtems/freebsd/netinet6/icmp6.c b/rtems/freebsd/netinet6/icmp6.c
new file mode 100644
index 00000000..eb17ff02
--- /dev/null
+++ b/rtems/freebsd/netinet6/icmp6.c
@@ -0,0 +1,2857 @@
+#include <rtems/freebsd/machine/rtems-bsd-config.h>
+
+/*-
+ * Copyright (C) 1995, 1996, 1997, and 1998 WIDE Project.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the project nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $KAME: icmp6.c,v 1.211 2001/04/04 05:56:20 itojun Exp $
+ */
+
+/*-
+ * Copyright (c) 1982, 1986, 1988, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * @(#)ip_icmp.c 8.2 (Berkeley) 1/4/94
+ */
+
+#include <rtems/freebsd/sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <rtems/freebsd/local/opt_inet.h>
+#include <rtems/freebsd/local/opt_inet6.h>
+#include <rtems/freebsd/local/opt_ipsec.h>
+
+#include <rtems/freebsd/sys/param.h>
+#include <rtems/freebsd/sys/domain.h>
+#include <rtems/freebsd/sys/jail.h>
+#include <rtems/freebsd/sys/kernel.h>
+#include <rtems/freebsd/sys/lock.h>
+#include <rtems/freebsd/sys/malloc.h>
+#include <rtems/freebsd/sys/mbuf.h>
+#include <rtems/freebsd/sys/proc.h>
+#include <rtems/freebsd/sys/protosw.h>
+#include <rtems/freebsd/sys/signalvar.h>
+#include <rtems/freebsd/sys/socket.h>
+#include <rtems/freebsd/sys/socketvar.h>
+#include <rtems/freebsd/sys/sx.h>
+#include <rtems/freebsd/sys/syslog.h>
+#include <rtems/freebsd/sys/systm.h>
+#include <rtems/freebsd/sys/time.h>
+
+#include <rtems/freebsd/net/if.h>
+#include <rtems/freebsd/net/if_dl.h>
+#include <rtems/freebsd/net/if_llatbl.h>
+#include <rtems/freebsd/net/if_types.h>
+#include <rtems/freebsd/net/route.h>
+#include <rtems/freebsd/net/vnet.h>
+
+#include <rtems/freebsd/netinet/in.h>
+#include <rtems/freebsd/netinet/in_pcb.h>
+#include <rtems/freebsd/netinet/in_var.h>
+#include <rtems/freebsd/netinet/ip6.h>
+#include <rtems/freebsd/netinet/icmp6.h>
+#include <rtems/freebsd/netinet/tcp_var.h>
+
+#include <rtems/freebsd/netinet6/in6_ifattach.h>
+#include <rtems/freebsd/netinet6/in6_pcb.h>
+#include <rtems/freebsd/netinet6/ip6protosw.h>
+#include <rtems/freebsd/netinet6/ip6_var.h>
+#include <rtems/freebsd/netinet6/scope6_var.h>
+#include <rtems/freebsd/netinet6/mld6_var.h>
+#include <rtems/freebsd/netinet6/nd6.h>
+
+#ifdef IPSEC
+#include <rtems/freebsd/netipsec/ipsec.h>
+#include <rtems/freebsd/netipsec/key.h>
+#endif
+
+extern struct domain inet6domain;
+
+VNET_DEFINE(struct icmp6stat, icmp6stat);
+
+VNET_DECLARE(struct inpcbinfo, ripcbinfo);
+VNET_DECLARE(struct inpcbhead, ripcb);
+VNET_DECLARE(int, icmp6errppslim);
+static VNET_DEFINE(int, icmp6errpps_count) = 0;
+static VNET_DEFINE(struct timeval, icmp6errppslim_last);
+VNET_DECLARE(int, icmp6_nodeinfo);
+
+#define V_ripcbinfo VNET(ripcbinfo)
+#define V_ripcb VNET(ripcb)
+#define V_icmp6errppslim VNET(icmp6errppslim)
+#define V_icmp6errpps_count VNET(icmp6errpps_count)
+#define V_icmp6errppslim_last VNET(icmp6errppslim_last)
+#define V_icmp6_nodeinfo VNET(icmp6_nodeinfo)
+
+static void icmp6_errcount(struct icmp6errstat *, int, int);
+static int icmp6_rip6_input(struct mbuf **, int);
+static int icmp6_ratelimit(const struct in6_addr *, const int, const int);
+static const char *icmp6_redirect_diag __P((struct in6_addr *,
+ struct in6_addr *, struct in6_addr *));
+static struct mbuf *ni6_input(struct mbuf *, int);
+static struct mbuf *ni6_nametodns(const char *, int, int);
+static int ni6_dnsmatch(const char *, int, const char *, int);
+static int ni6_addrs __P((struct icmp6_nodeinfo *, struct mbuf *,
+ struct ifnet **, struct in6_addr *));
+static int ni6_store_addrs __P((struct icmp6_nodeinfo *, struct icmp6_nodeinfo *,
+ struct ifnet *, int));
+static int icmp6_notify_error(struct mbuf **, int, int, int);
+
+/*
+ * Kernel module interface for updating icmp6stat. The argument is an index
+ * into icmp6stat treated as an array of u_quad_t. While this encodes the
+ * general layout of icmp6stat into the caller, it doesn't encode its
+ * location, so that future changes to add, for example, per-CPU stats
+ * support won't cause binary compatibility problems for kernel modules.
+ */
+void
+kmod_icmp6stat_inc(int statnum)
+{
+
+ (*((u_quad_t *)&V_icmp6stat + statnum))++;
+}
+
+static void
+icmp6_errcount(struct icmp6errstat *stat, int type, int code)
+{
+ switch (type) {
+ case ICMP6_DST_UNREACH:
+ switch (code) {
+ case ICMP6_DST_UNREACH_NOROUTE:
+ stat->icp6errs_dst_unreach_noroute++;
+ return;
+ case ICMP6_DST_UNREACH_ADMIN:
+ stat->icp6errs_dst_unreach_admin++;
+ return;
+ case ICMP6_DST_UNREACH_BEYONDSCOPE:
+ stat->icp6errs_dst_unreach_beyondscope++;
+ return;
+ case ICMP6_DST_UNREACH_ADDR:
+ stat->icp6errs_dst_unreach_addr++;
+ return;
+ case ICMP6_DST_UNREACH_NOPORT:
+ stat->icp6errs_dst_unreach_noport++;
+ return;
+ }
+ break;
+ case ICMP6_PACKET_TOO_BIG:
+ stat->icp6errs_packet_too_big++;
+ return;
+ case ICMP6_TIME_EXCEEDED:
+ switch (code) {
+ case ICMP6_TIME_EXCEED_TRANSIT:
+ stat->icp6errs_time_exceed_transit++;
+ return;
+ case ICMP6_TIME_EXCEED_REASSEMBLY:
+ stat->icp6errs_time_exceed_reassembly++;
+ return;
+ }
+ break;
+ case ICMP6_PARAM_PROB:
+ switch (code) {
+ case ICMP6_PARAMPROB_HEADER:
+ stat->icp6errs_paramprob_header++;
+ return;
+ case ICMP6_PARAMPROB_NEXTHEADER:
+ stat->icp6errs_paramprob_nextheader++;
+ return;
+ case ICMP6_PARAMPROB_OPTION:
+ stat->icp6errs_paramprob_option++;
+ return;
+ }
+ break;
+ case ND_REDIRECT:
+ stat->icp6errs_redirect++;
+ return;
+ }
+ stat->icp6errs_unknown++;
+}
+
+/*
+ * A wrapper function for icmp6_error() necessary when the erroneous packet
+ * may not contain enough scope zone information.
+ */
+void
+icmp6_error2(struct mbuf *m, int type, int code, int param,
+ struct ifnet *ifp)
+{
+ struct ip6_hdr *ip6;
+
+ if (ifp == NULL)
+ return;
+
+#ifndef PULLDOWN_TEST
+ IP6_EXTHDR_CHECK(m, 0, sizeof(struct ip6_hdr), );
+#else
+ if (m->m_len < sizeof(struct ip6_hdr)) {
+ m = m_pullup(m, sizeof(struct ip6_hdr));
+ if (m == NULL)
+ return;
+ }
+#endif
+
+ ip6 = mtod(m, struct ip6_hdr *);
+
+ if (in6_setscope(&ip6->ip6_src, ifp, NULL) != 0)
+ return;
+ if (in6_setscope(&ip6->ip6_dst, ifp, NULL) != 0)
+ return;
+
+ icmp6_error(m, type, code, param);
+}
+
+/*
+ * Generate an error packet of type error in response to bad IP6 packet.
+ */
+void
+icmp6_error(struct mbuf *m, int type, int code, int param)
+{
+ struct ip6_hdr *oip6, *nip6;
+ struct icmp6_hdr *icmp6;
+ u_int preplen;
+ int off;
+ int nxt;
+
+ ICMP6STAT_INC(icp6s_error);
+
+ /* count per-type-code statistics */
+ icmp6_errcount(&V_icmp6stat.icp6s_outerrhist, type, code);
+
+#ifdef M_DECRYPTED /*not openbsd*/
+ if (m->m_flags & M_DECRYPTED) {
+ ICMP6STAT_INC(icp6s_canterror);
+ goto freeit;
+ }
+#endif
+
+#ifndef PULLDOWN_TEST
+ IP6_EXTHDR_CHECK(m, 0, sizeof(struct ip6_hdr), );
+#else
+ if (m->m_len < sizeof(struct ip6_hdr)) {
+ m = m_pullup(m, sizeof(struct ip6_hdr));
+ if (m == NULL)
+ return;
+ }
+#endif
+ oip6 = mtod(m, struct ip6_hdr *);
+
+ /*
+ * If the destination address of the erroneous packet is a multicast
+ * address, or the packet was sent using link-layer multicast,
+ * we should basically suppress sending an error (RFC 2463, Section
+ * 2.4).
+ * We have two exceptions (the item e.2 in that section):
+ * - the Packet Too Big message can be sent for path MTU discovery.
+ * - the Parameter Problem Message that can be allowed an icmp6 error
+ * in the option type field. This check has been done in
+ * ip6_unknown_opt(), so we can just check the type and code.
+ */
+ if ((m->m_flags & (M_BCAST|M_MCAST) ||
+ IN6_IS_ADDR_MULTICAST(&oip6->ip6_dst)) &&
+ (type != ICMP6_PACKET_TOO_BIG &&
+ (type != ICMP6_PARAM_PROB ||
+ code != ICMP6_PARAMPROB_OPTION)))
+ goto freeit;
+
+ /*
+ * RFC 2463, 2.4 (e.5): source address check.
+ * XXX: the case of anycast source?
+ */
+ if (IN6_IS_ADDR_UNSPECIFIED(&oip6->ip6_src) ||
+ IN6_IS_ADDR_MULTICAST(&oip6->ip6_src))
+ goto freeit;
+
+ /*
+ * If we are about to send ICMPv6 against ICMPv6 error/redirect,
+ * don't do it.
+ */
+ nxt = -1;
+ off = ip6_lasthdr(m, 0, IPPROTO_IPV6, &nxt);
+ if (off >= 0 && nxt == IPPROTO_ICMPV6) {
+ struct icmp6_hdr *icp;
+
+#ifndef PULLDOWN_TEST
+ IP6_EXTHDR_CHECK(m, 0, off + sizeof(struct icmp6_hdr), );
+ icp = (struct icmp6_hdr *)(mtod(m, caddr_t) + off);
+#else
+ IP6_EXTHDR_GET(icp, struct icmp6_hdr *, m, off,
+ sizeof(*icp));
+ if (icp == NULL) {
+ ICMP6STAT_INC(icp6s_tooshort);
+ return;
+ }
+#endif
+ if (icp->icmp6_type < ICMP6_ECHO_REQUEST ||
+ icp->icmp6_type == ND_REDIRECT) {
+ /*
+ * ICMPv6 error
+ * Special case: for redirect (which is
+ * informational) we must not send icmp6 error.
+ */
+ ICMP6STAT_INC(icp6s_canterror);
+ goto freeit;
+ } else {
+ /* ICMPv6 informational - send the error */
+ }
+ } else {
+ /* non-ICMPv6 - send the error */
+ }
+
+ oip6 = mtod(m, struct ip6_hdr *); /* adjust pointer */
+
+ /* Finally, do rate limitation check. */
+ if (icmp6_ratelimit(&oip6->ip6_src, type, code)) {
+ ICMP6STAT_INC(icp6s_toofreq);
+ goto freeit;
+ }
+
+ /*
+ * OK, ICMP6 can be generated.
+ */
+
+ if (m->m_pkthdr.len >= ICMPV6_PLD_MAXLEN)
+ m_adj(m, ICMPV6_PLD_MAXLEN - m->m_pkthdr.len);
+
+ preplen = sizeof(struct ip6_hdr) + sizeof(struct icmp6_hdr);
+ M_PREPEND(m, preplen, M_DONTWAIT);
+ if (m && m->m_len < preplen)
+ m = m_pullup(m, preplen);
+ if (m == NULL) {
+ nd6log((LOG_DEBUG, "ENOBUFS in icmp6_error %d\n", __LINE__));
+ return;
+ }
+
+ nip6 = mtod(m, struct ip6_hdr *);
+ nip6->ip6_src = oip6->ip6_src;
+ nip6->ip6_dst = oip6->ip6_dst;
+
+ in6_clearscope(&oip6->ip6_src);
+ in6_clearscope(&oip6->ip6_dst);
+
+ icmp6 = (struct icmp6_hdr *)(nip6 + 1);
+ icmp6->icmp6_type = type;
+ icmp6->icmp6_code = code;
+ icmp6->icmp6_pptr = htonl((u_int32_t)param);
+
+ /*
+ * icmp6_reflect() is designed to be in the input path.
+ * icmp6_error() can be called from both input and output path,
+ * and if we are in output path rcvif could contain bogus value.
+ * clear m->m_pkthdr.rcvif for safety, we should have enough scope
+ * information in ip header (nip6).
+ */
+ m->m_pkthdr.rcvif = NULL;
+
+ ICMP6STAT_INC(icp6s_outhist[type]);
+ icmp6_reflect(m, sizeof(struct ip6_hdr)); /* header order: IPv6 - ICMPv6 */
+
+ return;
+
+ freeit:
+ /*
+ * If we can't tell whether or not we can generate ICMP6, free it.
+ */
+ m_freem(m);
+}
+
+/*
+ * Process a received ICMP6 message.
+ */
+int
+icmp6_input(struct mbuf **mp, int *offp, int proto)
+{
+ struct mbuf *m = *mp, *n;
+ struct ifnet *ifp;
+ struct ip6_hdr *ip6, *nip6;
+ struct icmp6_hdr *icmp6, *nicmp6;
+ int off = *offp;
+ int icmp6len = m->m_pkthdr.len - *offp;
+ int code, sum, noff;
+ char ip6bufs[INET6_ADDRSTRLEN], ip6bufd[INET6_ADDRSTRLEN];
+
+ ifp = m->m_pkthdr.rcvif;
+
+#ifndef PULLDOWN_TEST
+ IP6_EXTHDR_CHECK(m, off, sizeof(struct icmp6_hdr), IPPROTO_DONE);
+ /* m might change if M_LOOP. So, call mtod after this */
+#endif
+
+ /*
+ * Locate icmp6 structure in mbuf, and check
+ * that not corrupted and of at least minimum length
+ */
+
+ ip6 = mtod(m, struct ip6_hdr *);
+ if (icmp6len < sizeof(struct icmp6_hdr)) {
+ ICMP6STAT_INC(icp6s_tooshort);
+ goto freeit;
+ }
+
+ /*
+ * Check multicast group membership.
+ * Note: SSM filters are not applied for ICMPv6 traffic.
+ */
+ if (IN6_IS_ADDR_MULTICAST(&ip6->ip6_dst)) {
+ struct in6_multi *inm;
+
+ inm = in6m_lookup(ifp, &ip6->ip6_dst);
+ if (inm == NULL) {
+ IP6STAT_INC(ip6s_notmember);
+ in6_ifstat_inc(m->m_pkthdr.rcvif, ifs6_in_discard);
+ goto freeit;
+ }
+ }
+
+ /*
+ * calculate the checksum
+ */
+#ifndef PULLDOWN_TEST
+ icmp6 = (struct icmp6_hdr *)((caddr_t)ip6 + off);
+#else
+ IP6_EXTHDR_GET(icmp6, struct icmp6_hdr *, m, off, sizeof(*icmp6));
+ if (icmp6 == NULL) {
+ ICMP6STAT_INC(icp6s_tooshort);
+ return IPPROTO_DONE;
+ }
+#endif
+ code = icmp6->icmp6_code;
+
+ if ((sum = in6_cksum(m, IPPROTO_ICMPV6, off, icmp6len)) != 0) {
+ nd6log((LOG_ERR,
+ "ICMP6 checksum error(%d|%x) %s\n",
+ icmp6->icmp6_type, sum,
+ ip6_sprintf(ip6bufs, &ip6->ip6_src)));
+ ICMP6STAT_INC(icp6s_checksum);
+ goto freeit;
+ }
+
+ if (faithprefix_p != NULL && (*faithprefix_p)(&ip6->ip6_dst)) {
+ /*
+ * Deliver very specific ICMP6 type only.
+ * This is important to deliver TOOBIG. Otherwise PMTUD
+ * will not work.
+ */
+ switch (icmp6->icmp6_type) {
+ case ICMP6_DST_UNREACH:
+ case ICMP6_PACKET_TOO_BIG:
+ case ICMP6_TIME_EXCEEDED:
+ break;
+ default:
+ goto freeit;
+ }
+ }
+
+ ICMP6STAT_INC(icp6s_inhist[icmp6->icmp6_type]);
+ icmp6_ifstat_inc(ifp, ifs6_in_msg);
+ if (icmp6->icmp6_type < ICMP6_INFOMSG_MASK)
+ icmp6_ifstat_inc(ifp, ifs6_in_error);
+
+ switch (icmp6->icmp6_type) {
+ case ICMP6_DST_UNREACH:
+ icmp6_ifstat_inc(ifp, ifs6_in_dstunreach);
+ switch (code) {
+ case ICMP6_DST_UNREACH_NOROUTE:
+ code = PRC_UNREACH_NET;
+ break;
+ case ICMP6_DST_UNREACH_ADMIN:
+ icmp6_ifstat_inc(ifp, ifs6_in_adminprohib);
+ code = PRC_UNREACH_PROTOCOL; /* is this a good code? */
+ break;
+ case ICMP6_DST_UNREACH_ADDR:
+ code = PRC_HOSTDEAD;
+ break;
+ case ICMP6_DST_UNREACH_BEYONDSCOPE:
+ /* I mean "source address was incorrect." */
+ code = PRC_PARAMPROB;
+ break;
+ case ICMP6_DST_UNREACH_NOPORT:
+ code = PRC_UNREACH_PORT;
+ break;
+ default:
+ goto badcode;
+ }
+ goto deliver;
+ break;
+
+ case ICMP6_PACKET_TOO_BIG:
+ icmp6_ifstat_inc(ifp, ifs6_in_pkttoobig);
+
+ /* validation is made in icmp6_mtudisc_update */
+
+ code = PRC_MSGSIZE;
+
+ /*
+ * Updating the path MTU will be done after examining
+ * intermediate extension headers.
+ */
+ goto deliver;
+ break;
+
+ case ICMP6_TIME_EXCEEDED:
+ icmp6_ifstat_inc(ifp, ifs6_in_timeexceed);
+ switch (code) {
+ case ICMP6_TIME_EXCEED_TRANSIT:
+ code = PRC_TIMXCEED_INTRANS;
+ break;
+ case ICMP6_TIME_EXCEED_REASSEMBLY:
+ code = PRC_TIMXCEED_REASS;
+ break;
+ default:
+ goto badcode;
+ }
+ goto deliver;
+ break;
+
+ case ICMP6_PARAM_PROB:
+ icmp6_ifstat_inc(ifp, ifs6_in_paramprob);
+ switch (code) {
+ case ICMP6_PARAMPROB_NEXTHEADER:
+ code = PRC_UNREACH_PROTOCOL;
+ break;
+ case ICMP6_PARAMPROB_HEADER:
+ case ICMP6_PARAMPROB_OPTION:
+ code = PRC_PARAMPROB;
+ break;
+ default:
+ goto badcode;
+ }
+ goto deliver;
+ break;
+
+ case ICMP6_ECHO_REQUEST:
+ icmp6_ifstat_inc(ifp, ifs6_in_echo);
+ if (code != 0)
+ goto badcode;
+ if ((n = m_copy(m, 0, M_COPYALL)) == NULL) {
+ /* Give up remote */
+ break;
+ }
+ if ((n->m_flags & M_EXT) != 0
+ || n->m_len < off + sizeof(struct icmp6_hdr)) {
+ struct mbuf *n0 = n;
+ const int maxlen = sizeof(*nip6) + sizeof(*nicmp6);
+ int n0len;
+
+ MGETHDR(n, M_DONTWAIT, n0->m_type);
+ n0len = n0->m_pkthdr.len; /* save for use below */
+ if (n)
+ M_MOVE_PKTHDR(n, n0);
+ if (n && maxlen >= MHLEN) {
+ MCLGET(n, M_DONTWAIT);
+ if ((n->m_flags & M_EXT) == 0) {
+ m_free(n);
+ n = NULL;
+ }
+ }
+ if (n == NULL) {
+ /* Give up remote */
+ m_freem(n0);
+ break;
+ }
+ /*
+ * Copy IPv6 and ICMPv6 only.
+ */
+ nip6 = mtod(n, struct ip6_hdr *);
+ bcopy(ip6, nip6, sizeof(struct ip6_hdr));
+ nicmp6 = (struct icmp6_hdr *)(nip6 + 1);
+ bcopy(icmp6, nicmp6, sizeof(struct icmp6_hdr));
+ noff = sizeof(struct ip6_hdr);
+ /* new mbuf contains only ipv6+icmpv6 headers */
+ n->m_len = noff + sizeof(struct icmp6_hdr);
+ /*
+ * Adjust mbuf. ip6_plen will be adjusted in
+ * ip6_output().
+ */
+ m_adj(n0, off + sizeof(struct icmp6_hdr));
+ /* recalculate complete packet size */
+ n->m_pkthdr.len = n0len + (noff - off);
+ n->m_next = n0;
+ } else {
+ nip6 = mtod(n, struct ip6_hdr *);
+ IP6_EXTHDR_GET(nicmp6, struct icmp6_hdr *, n, off,
+ sizeof(*nicmp6));
+ noff = off;
+ }
+ nicmp6->icmp6_type = ICMP6_ECHO_REPLY;
+ nicmp6->icmp6_code = 0;
+ if (n) {
+ ICMP6STAT_INC(icp6s_reflect);
+ ICMP6STAT_INC(icp6s_outhist[ICMP6_ECHO_REPLY]);
+ icmp6_reflect(n, noff);
+ }
+ break;
+
+ case ICMP6_ECHO_REPLY:
+ icmp6_ifstat_inc(ifp, ifs6_in_echoreply);
+ if (code != 0)
+ goto badcode;
+ break;
+
+ case MLD_LISTENER_QUERY:
+ case MLD_LISTENER_REPORT:
+ case MLD_LISTENER_DONE:
+ case MLDV2_LISTENER_REPORT:
+ /*
+ * Drop MLD traffic which is not link-local, has a hop limit
+ * of greater than 1 hop, or which does not have the
+ * IPv6 HBH Router Alert option.
+ * As IPv6 HBH options are stripped in ip6_input() we must
+ * check an mbuf header flag.
+ * XXX Should we also sanity check that these messages
+ * were directed to a link-local multicast prefix?
+ */
+ if ((ip6->ip6_hlim != 1) || (m->m_flags & M_RTALERT_MLD) == 0)
+ goto freeit;
+ if (mld_input(m, off, icmp6len) != 0)
+ return (IPPROTO_DONE);
+ /* m stays. */
+ break;
+
+ case ICMP6_WRUREQUEST: /* ICMP6_FQDN_QUERY */
+ {
+ enum { WRU, FQDN } mode;
+
+ if (!V_icmp6_nodeinfo)
+ break;
+
+ if (icmp6len == sizeof(struct icmp6_hdr) + 4)
+ mode = WRU;
+ else if (icmp6len >= sizeof(struct icmp6_nodeinfo))
+ mode = FQDN;
+ else
+ goto badlen;
+
+ if (mode == FQDN) {
+#ifndef PULLDOWN_TEST
+ IP6_EXTHDR_CHECK(m, off, sizeof(struct icmp6_nodeinfo),
+ IPPROTO_DONE);
+#endif
+ n = m_copy(m, 0, M_COPYALL);
+ if (n)
+ n = ni6_input(n, off);
+ /* XXX meaningless if n == NULL */
+ noff = sizeof(struct ip6_hdr);
+ } else {
+ struct prison *pr;
+ u_char *p;
+ int maxlen, maxhlen, hlen;
+
+ /*
+ * XXX: this combination of flags is pointless,
+ * but should we keep this for compatibility?
+ */
+ if ((V_icmp6_nodeinfo & 5) != 5)
+ break;
+
+ if (code != 0)
+ goto badcode;
+ maxlen = sizeof(*nip6) + sizeof(*nicmp6) + 4;
+ if (maxlen >= MCLBYTES) {
+ /* Give up remote */
+ break;
+ }
+ MGETHDR(n, M_DONTWAIT, m->m_type);
+ if (n && maxlen > MHLEN) {
+ MCLGET(n, M_DONTWAIT);
+ if ((n->m_flags & M_EXT) == 0) {
+ m_free(n);
+ n = NULL;
+ }
+ }
+ if (n && !m_dup_pkthdr(n, m, M_DONTWAIT)) {
+ /*
+ * Previous code did a blind M_COPY_PKTHDR
+ * and said "just for rcvif". If true, then
+ * we could tolerate the dup failing (due to
+ * the deep copy of the tag chain). For now
+ * be conservative and just fail.
+ */
+ m_free(n);
+ n = NULL;
+ }
+ if (n == NULL) {
+ /* Give up remote */
+ break;
+ }
+ n->m_pkthdr.rcvif = NULL;
+ n->m_len = 0;
+ maxhlen = M_TRAILINGSPACE(n) - maxlen;
+ pr = curthread->td_ucred->cr_prison;
+ mtx_lock(&pr->pr_mtx);
+ hlen = strlen(pr->pr_hostname);
+ if (maxhlen > hlen)
+ maxhlen = hlen;
+ /*
+ * Copy IPv6 and ICMPv6 only.
+ */
+ nip6 = mtod(n, struct ip6_hdr *);
+ bcopy(ip6, nip6, sizeof(struct ip6_hdr));
+ nicmp6 = (struct icmp6_hdr *)(nip6 + 1);
+ bcopy(icmp6, nicmp6, sizeof(struct icmp6_hdr));
+ p = (u_char *)(nicmp6 + 1);
+ bzero(p, 4);
+ /* meaningless TTL */
+ bcopy(pr->pr_hostname, p + 4, maxhlen);
+ mtx_unlock(&pr->pr_mtx);
+ noff = sizeof(struct ip6_hdr);
+ n->m_pkthdr.len = n->m_len = sizeof(struct ip6_hdr) +
+ sizeof(struct icmp6_hdr) + 4 + maxhlen;
+ nicmp6->icmp6_type = ICMP6_WRUREPLY;
+ nicmp6->icmp6_code = 0;
+ }
+ if (n) {
+ ICMP6STAT_INC(icp6s_reflect);
+ ICMP6STAT_INC(icp6s_outhist[ICMP6_WRUREPLY]);
+ icmp6_reflect(n, noff);
+ }
+ break;
+ }
+
+ case ICMP6_WRUREPLY:
+ if (code != 0)
+ goto badcode;
+ break;
+
+ case ND_ROUTER_SOLICIT:
+ icmp6_ifstat_inc(ifp, ifs6_in_routersolicit);
+ if (code != 0)
+ goto badcode;
+ if (icmp6len < sizeof(struct nd_router_solicit))
+ goto badlen;
+ if ((n = m_copym(m, 0, M_COPYALL, M_DONTWAIT)) == NULL) {
+ /* give up local */
+ nd6_rs_input(m, off, icmp6len);
+ m = NULL;
+ goto freeit;
+ }
+ nd6_rs_input(n, off, icmp6len);
+ /* m stays. */
+ break;
+
+ case ND_ROUTER_ADVERT:
+ icmp6_ifstat_inc(ifp, ifs6_in_routeradvert);
+ if (code != 0)
+ goto badcode;
+ if (icmp6len < sizeof(struct nd_router_advert))
+ goto badlen;
+ if ((n = m_copym(m, 0, M_COPYALL, M_DONTWAIT)) == NULL) {
+ /* give up local */
+ nd6_ra_input(m, off, icmp6len);
+ m = NULL;
+ goto freeit;
+ }
+ nd6_ra_input(n, off, icmp6len);
+ /* m stays. */
+ break;
+
+ case ND_NEIGHBOR_SOLICIT:
+ icmp6_ifstat_inc(ifp, ifs6_in_neighborsolicit);
+ if (code != 0)
+ goto badcode;
+ if (icmp6len < sizeof(struct nd_neighbor_solicit))
+ goto badlen;
+ if ((n = m_copym(m, 0, M_COPYALL, M_DONTWAIT)) == NULL) {
+ /* give up local */
+ nd6_ns_input(m, off, icmp6len);
+ m = NULL;
+ goto freeit;
+ }
+ nd6_ns_input(n, off, icmp6len);
+ /* m stays. */
+ break;
+
+ case ND_NEIGHBOR_ADVERT:
+ icmp6_ifstat_inc(ifp, ifs6_in_neighboradvert);
+ if (code != 0)
+ goto badcode;
+ if (icmp6len < sizeof(struct nd_neighbor_advert))
+ goto badlen;
+ if ((n = m_copym(m, 0, M_COPYALL, M_DONTWAIT)) == NULL) {
+ /* give up local */
+ nd6_na_input(m, off, icmp6len);
+ m = NULL;
+ goto freeit;
+ }
+ nd6_na_input(n, off, icmp6len);
+ /* m stays. */
+ break;
+
+ case ND_REDIRECT:
+ icmp6_ifstat_inc(ifp, ifs6_in_redirect);
+ if (code != 0)
+ goto badcode;
+ if (icmp6len < sizeof(struct nd_redirect))
+ goto badlen;
+ if ((n = m_copym(m, 0, M_COPYALL, M_DONTWAIT)) == NULL) {
+ /* give up local */
+ icmp6_redirect_input(m, off);
+ m = NULL;
+ goto freeit;
+ }
+ icmp6_redirect_input(n, off);
+ /* m stays. */
+ break;
+
+ case ICMP6_ROUTER_RENUMBERING:
+ if (code != ICMP6_ROUTER_RENUMBERING_COMMAND &&
+ code != ICMP6_ROUTER_RENUMBERING_RESULT)
+ goto badcode;
+ if (icmp6len < sizeof(struct icmp6_router_renum))
+ goto badlen;
+ break;
+
+ default:
+ nd6log((LOG_DEBUG,
+ "icmp6_input: unknown type %d(src=%s, dst=%s, ifid=%d)\n",
+ icmp6->icmp6_type, ip6_sprintf(ip6bufs, &ip6->ip6_src),
+ ip6_sprintf(ip6bufd, &ip6->ip6_dst),
+ ifp ? ifp->if_index : 0));
+ if (icmp6->icmp6_type < ICMP6_ECHO_REQUEST) {
+ /* ICMPv6 error: MUST deliver it by spec... */
+ code = PRC_NCMDS;
+ /* deliver */
+ } else {
+ /* ICMPv6 informational: MUST not deliver */
+ break;
+ }
+ deliver:
+ if (icmp6_notify_error(&m, off, icmp6len, code) != 0) {
+ /* In this case, m should've been freed. */
+ return (IPPROTO_DONE);
+ }
+ break;
+
+ badcode:
+ ICMP6STAT_INC(icp6s_badcode);
+ break;
+
+ badlen:
+ ICMP6STAT_INC(icp6s_badlen);
+ break;
+ }
+
+ /* deliver the packet to appropriate sockets */
+ icmp6_rip6_input(&m, *offp);
+
+ return IPPROTO_DONE;
+
+ freeit:
+ m_freem(m);
+ return IPPROTO_DONE;
+}
+
+static int
+icmp6_notify_error(struct mbuf **mp, int off, int icmp6len, int code)
+{
+ struct mbuf *m = *mp;
+ struct icmp6_hdr *icmp6;
+ struct ip6_hdr *eip6;
+ u_int32_t notifymtu;
+ struct sockaddr_in6 icmp6src, icmp6dst;
+
+ if (icmp6len < sizeof(struct icmp6_hdr) + sizeof(struct ip6_hdr)) {
+ ICMP6STAT_INC(icp6s_tooshort);
+ goto freeit;
+ }
+#ifndef PULLDOWN_TEST
+ IP6_EXTHDR_CHECK(m, off,
+ sizeof(struct icmp6_hdr) + sizeof(struct ip6_hdr), -1);
+ icmp6 = (struct icmp6_hdr *)(mtod(m, caddr_t) + off);
+#else
+ IP6_EXTHDR_GET(icmp6, struct icmp6_hdr *, m, off,
+ sizeof(*icmp6) + sizeof(struct ip6_hdr));
+ if (icmp6 == NULL) {
+ ICMP6STAT_INC(icp6s_tooshort);
+ return (-1);
+ }
+#endif
+ eip6 = (struct ip6_hdr *)(icmp6 + 1);
+
+ /* Detect the upper level protocol */
+ {
+ void (*ctlfunc)(int, struct sockaddr *, void *);
+ u_int8_t nxt = eip6->ip6_nxt;
+ int eoff = off + sizeof(struct icmp6_hdr) +
+ sizeof(struct ip6_hdr);
+ struct ip6ctlparam ip6cp;
+ struct in6_addr *finaldst = NULL;
+ int icmp6type = icmp6->icmp6_type;
+ struct ip6_frag *fh;
+ struct ip6_rthdr *rth;
+ struct ip6_rthdr0 *rth0;
+ int rthlen;
+
+ while (1) { /* XXX: should avoid infinite loop explicitly? */
+ struct ip6_ext *eh;
+
+ switch (nxt) {
+ case IPPROTO_HOPOPTS:
+ case IPPROTO_DSTOPTS:
+ case IPPROTO_AH:
+#ifndef PULLDOWN_TEST
+ IP6_EXTHDR_CHECK(m, 0,
+ eoff + sizeof(struct ip6_ext), -1);
+ eh = (struct ip6_ext *)(mtod(m, caddr_t) + eoff);
+#else
+ IP6_EXTHDR_GET(eh, struct ip6_ext *, m,
+ eoff, sizeof(*eh));
+ if (eh == NULL) {
+ ICMP6STAT_INC(icp6s_tooshort);
+ return (-1);
+ }
+#endif
+
+ if (nxt == IPPROTO_AH)
+ eoff += (eh->ip6e_len + 2) << 2;
+ else
+ eoff += (eh->ip6e_len + 1) << 3;
+ nxt = eh->ip6e_nxt;
+ break;
+ case IPPROTO_ROUTING:
+ /*
+ * When the erroneous packet contains a
+ * routing header, we should examine the
+ * header to determine the final destination.
+ * Otherwise, we can't properly update
+ * information that depends on the final
+ * destination (e.g. path MTU).
+ */
+#ifndef PULLDOWN_TEST
+ IP6_EXTHDR_CHECK(m, 0, eoff + sizeof(*rth), -1);
+ rth = (struct ip6_rthdr *)
+ (mtod(m, caddr_t) + eoff);
+#else
+ IP6_EXTHDR_GET(rth, struct ip6_rthdr *, m,
+ eoff, sizeof(*rth));
+ if (rth == NULL) {
+ ICMP6STAT_INC(icp6s_tooshort);
+ return (-1);
+ }
+#endif
+ rthlen = (rth->ip6r_len + 1) << 3;
+ /*
+ * XXX: currently there is no
+ * officially defined type other
+ * than type-0.
+ * Note that if the segment left field
+ * is 0, all intermediate hops must
+ * have been passed.
+ */
+ if (rth->ip6r_segleft &&
+ rth->ip6r_type == IPV6_RTHDR_TYPE_0) {
+ int hops;
+
+#ifndef PULLDOWN_TEST
+ IP6_EXTHDR_CHECK(m, 0, eoff + rthlen, -1);
+ rth0 = (struct ip6_rthdr0 *)
+ (mtod(m, caddr_t) + eoff);
+#else
+ IP6_EXTHDR_GET(rth0,
+ struct ip6_rthdr0 *, m,
+ eoff, rthlen);
+ if (rth0 == NULL) {
+ ICMP6STAT_INC(icp6s_tooshort);
+ return (-1);
+ }
+#endif
+ /* just ignore a bogus header */
+ if ((rth0->ip6r0_len % 2) == 0 &&
+ (hops = rth0->ip6r0_len/2))
+ finaldst = (struct in6_addr *)(rth0 + 1) + (hops - 1);
+ }
+ eoff += rthlen;
+ nxt = rth->ip6r_nxt;
+ break;
+ case IPPROTO_FRAGMENT:
+#ifndef PULLDOWN_TEST
+ IP6_EXTHDR_CHECK(m, 0, eoff +
+ sizeof(struct ip6_frag), -1);
+ fh = (struct ip6_frag *)(mtod(m, caddr_t) +
+ eoff);
+#else
+ IP6_EXTHDR_GET(fh, struct ip6_frag *, m,
+ eoff, sizeof(*fh));
+ if (fh == NULL) {
+ ICMP6STAT_INC(icp6s_tooshort);
+ return (-1);
+ }
+#endif
+ /*
+ * Data after a fragment header is meaningless
+ * unless it is the first fragment, but
+ * we'll go to the notify label for path MTU
+ * discovery.
+ */
+ if (fh->ip6f_offlg & IP6F_OFF_MASK)
+ goto notify;
+
+ eoff += sizeof(struct ip6_frag);
+ nxt = fh->ip6f_nxt;
+ break;
+ default:
+ /*
+ * This case includes ESP and the No Next
+ * Header. In such cases going to the notify
+ * label does not have any meaning
+ * (i.e. ctlfunc will be NULL), but we go
+ * anyway since we might have to update
+ * path MTU information.
+ */
+ goto notify;
+ }
+ }
+ notify:
+#ifndef PULLDOWN_TEST
+ icmp6 = (struct icmp6_hdr *)(mtod(m, caddr_t) + off);
+#else
+ IP6_EXTHDR_GET(icmp6, struct icmp6_hdr *, m, off,
+ sizeof(*icmp6) + sizeof(struct ip6_hdr));
+ if (icmp6 == NULL) {
+ ICMP6STAT_INC(icp6s_tooshort);
+ return (-1);
+ }
+#endif
+
+ /*
+ * retrieve parameters from the inner IPv6 header, and convert
+ * them into sockaddr structures.
+ * XXX: there is no guarantee that the source or destination
+ * addresses of the inner packet are in the same scope as
+ * the addresses of the icmp packet. But there is no other
+ * way to determine the zone.
+ */
+ eip6 = (struct ip6_hdr *)(icmp6 + 1);
+
+ bzero(&icmp6dst, sizeof(icmp6dst));
+ icmp6dst.sin6_len = sizeof(struct sockaddr_in6);
+ icmp6dst.sin6_family = AF_INET6;
+ if (finaldst == NULL)
+ icmp6dst.sin6_addr = eip6->ip6_dst;
+ else
+ icmp6dst.sin6_addr = *finaldst;
+ if (in6_setscope(&icmp6dst.sin6_addr, m->m_pkthdr.rcvif, NULL))
+ goto freeit;
+ bzero(&icmp6src, sizeof(icmp6src));
+ icmp6src.sin6_len = sizeof(struct sockaddr_in6);
+ icmp6src.sin6_family = AF_INET6;
+ icmp6src.sin6_addr = eip6->ip6_src;
+ if (in6_setscope(&icmp6src.sin6_addr, m->m_pkthdr.rcvif, NULL))
+ goto freeit;
+ icmp6src.sin6_flowinfo =
+ (eip6->ip6_flow & IPV6_FLOWLABEL_MASK);
+
+ if (finaldst == NULL)
+ finaldst = &eip6->ip6_dst;
+ ip6cp.ip6c_m = m;
+ ip6cp.ip6c_icmp6 = icmp6;
+ ip6cp.ip6c_ip6 = (struct ip6_hdr *)(icmp6 + 1);
+ ip6cp.ip6c_off = eoff;
+ ip6cp.ip6c_finaldst = finaldst;
+ ip6cp.ip6c_src = &icmp6src;
+ ip6cp.ip6c_nxt = nxt;
+
+ if (icmp6type == ICMP6_PACKET_TOO_BIG) {
+ notifymtu = ntohl(icmp6->icmp6_mtu);
+ ip6cp.ip6c_cmdarg = (void *)&notifymtu;
+ icmp6_mtudisc_update(&ip6cp, 1); /*XXX*/
+ }
+
+ ctlfunc = (void (*)(int, struct sockaddr *, void *))
+ (inet6sw[ip6_protox[nxt]].pr_ctlinput);
+ if (ctlfunc) {
+ (void) (*ctlfunc)(code, (struct sockaddr *)&icmp6dst,
+ &ip6cp);
+ }
+ }
+ *mp = m;
+ return (0);
+
+ freeit:
+ m_freem(m);
+ return (-1);
+}
+
+void
+icmp6_mtudisc_update(struct ip6ctlparam *ip6cp, int validated)
+{
+ struct in6_addr *dst = ip6cp->ip6c_finaldst;
+ struct icmp6_hdr *icmp6 = ip6cp->ip6c_icmp6;
+ struct mbuf *m = ip6cp->ip6c_m; /* will be necessary for scope issue */
+ u_int mtu = ntohl(icmp6->icmp6_mtu);
+ struct in_conninfo inc;
+
+#if 0
+ /*
+ * RFC2460 section 5, last paragraph.
+ * even though minimum link MTU for IPv6 is IPV6_MMTU,
+ * we may see ICMPv6 too big with mtu < IPV6_MMTU
+ * due to packet translator in the middle.
+ * see ip6_output() and ip6_getpmtu() "alwaysfrag" case for
+ * special handling.
+ */
+ if (mtu < IPV6_MMTU)
+ return;
+#endif
+
+ /*
+ * we reject ICMPv6 too big with abnormally small value.
+ * XXX what is the good definition of "abnormally small"?
+ */
+ if (mtu < sizeof(struct ip6_hdr) + sizeof(struct ip6_frag) + 8)
+ return;
+
+ if (!validated)
+ return;
+
+ /*
+ * In case the suggested mtu is less than IPV6_MMTU, we
+ * only need to remember that it was for above mentioned
+ * "alwaysfrag" case.
+ * Try to be as close to the spec as possible.
+ */
+ if (mtu < IPV6_MMTU)
+ mtu = IPV6_MMTU - 8;
+
+ bzero(&inc, sizeof(inc));
+ inc.inc_flags |= INC_ISIPV6;
+ inc.inc6_faddr = *dst;
+ if (in6_setscope(&inc.inc6_faddr, m->m_pkthdr.rcvif, NULL))
+ return;
+
+ if (mtu < tcp_maxmtu6(&inc, NULL)) {
+ tcp_hc_updatemtu(&inc, mtu);
+ ICMP6STAT_INC(icp6s_pmtuchg);
+ }
+}
+
+/*
+ * Process a Node Information Query packet, based on
+ * draft-ietf-ipngwg-icmp-name-lookups-07.
+ *
+ * Spec incompatibilities:
+ * - IPv6 Subject address handling
+ * - IPv4 Subject address handling support missing
+ * - Proxy reply (answer even if it's not for me)
+ * - joins NI group address at in6_ifattach() time only, does not cope
+ * with hostname changes by sethostname(3)
+ */
+static struct mbuf *
+ni6_input(struct mbuf *m, int off)
+{
+ struct icmp6_nodeinfo *ni6, *nni6;
+ struct mbuf *n = NULL;
+ struct prison *pr;
+ u_int16_t qtype;
+ int subjlen;
+ int replylen = sizeof(struct ip6_hdr) + sizeof(struct icmp6_nodeinfo);
+ struct ni_reply_fqdn *fqdn;
+ int addrs; /* for NI_QTYPE_NODEADDR */
+ struct ifnet *ifp = NULL; /* for NI_QTYPE_NODEADDR */
+ struct in6_addr in6_subj; /* subject address */
+ struct ip6_hdr *ip6;
+ int oldfqdn = 0; /* if 1, return pascal string (03 draft) */
+ char *subj = NULL;
+ struct in6_ifaddr *ia6 = NULL;
+
+ ip6 = mtod(m, struct ip6_hdr *);
+#ifndef PULLDOWN_TEST
+ ni6 = (struct icmp6_nodeinfo *)(mtod(m, caddr_t) + off);
+#else
+ IP6_EXTHDR_GET(ni6, struct icmp6_nodeinfo *, m, off, sizeof(*ni6));
+ if (ni6 == NULL) {
+ /* m is already reclaimed */
+ return (NULL);
+ }
+#endif
+
+ /*
+ * Validate IPv6 source address.
+ * The default configuration MUST be to refuse answering queries from
+ * global-scope addresses according to RFC4602.
+ * Notes:
+ * - it's not very clear what "refuse" means; this implementation
+ * simply drops it.
+ * - it's not very easy to identify global-scope (unicast) addresses
+ * since there are many prefixes for them. It should be safer
+ * and in practice sufficient to check "all" but loopback and
+ * link-local (note that site-local unicast was deprecated and
+ * ULA is defined as global scope-wise)
+ */
+ if ((V_icmp6_nodeinfo & ICMP6_NODEINFO_GLOBALOK) == 0 &&
+ !IN6_IS_ADDR_LOOPBACK(&ip6->ip6_src) &&
+ !IN6_IS_ADDR_LINKLOCAL(&ip6->ip6_src))
+ goto bad;
+
+ /*
+ * Validate IPv6 destination address.
+ *
+ * The Responder must discard the Query without further processing
+ * unless it is one of the Responder's unicast or anycast addresses, or
+ * a link-local scope multicast address which the Responder has joined.
+ * [RFC4602, Section 5.]
+ */
+ if (IN6_IS_ADDR_MULTICAST(&ip6->ip6_dst)) {
+ if (!IN6_IS_ADDR_MC_LINKLOCAL(&ip6->ip6_dst))
+ goto bad;
+ /* else it's a link-local multicast, fine */
+ } else { /* unicast or anycast */
+ if ((ia6 = ip6_getdstifaddr(m)) == NULL)
+ goto bad; /* XXX impossible */
+
+ if ((ia6->ia6_flags & IN6_IFF_TEMPORARY) &&
+ !(V_icmp6_nodeinfo & ICMP6_NODEINFO_TMPADDROK)) {
+ ifa_free(&ia6->ia_ifa);
+ nd6log((LOG_DEBUG, "ni6_input: ignore node info to "
+ "a temporary address in %s:%d",
+ __FILE__, __LINE__));
+ goto bad;
+ }
+ ifa_free(&ia6->ia_ifa);
+ }
+
+ /* validate query Subject field. */
+ qtype = ntohs(ni6->ni_qtype);
+ subjlen = m->m_pkthdr.len - off - sizeof(struct icmp6_nodeinfo);
+ switch (qtype) {
+ case NI_QTYPE_NOOP:
+ case NI_QTYPE_SUPTYPES:
+ /* 07 draft */
+ if (ni6->ni_code == ICMP6_NI_SUBJ_FQDN && subjlen == 0)
+ break;
+ /* FALLTHROUGH */
+ case NI_QTYPE_FQDN:
+ case NI_QTYPE_NODEADDR:
+ case NI_QTYPE_IPV4ADDR:
+ switch (ni6->ni_code) {
+ case ICMP6_NI_SUBJ_IPV6:
+#if ICMP6_NI_SUBJ_IPV6 != 0
+ case 0:
+#endif
+ /*
+ * backward compatibility - try to accept 03 draft
+ * format, where no Subject is present.
+ */
+ if (qtype == NI_QTYPE_FQDN && ni6->ni_code == 0 &&
+ subjlen == 0) {
+ oldfqdn++;
+ break;
+ }
+#if ICMP6_NI_SUBJ_IPV6 != 0
+ if (ni6->ni_code != ICMP6_NI_SUBJ_IPV6)
+ goto bad;
+#endif
+
+ if (subjlen != sizeof(struct in6_addr))
+ goto bad;
+
+ /*
+ * Validate Subject address.
+ *
+ * Not sure what exactly "address belongs to the node"
+ * means in the spec, is it just unicast, or what?
+ *
+ * At this moment we consider Subject address as
+ * "belong to the node" if the Subject address equals
+ * to the IPv6 destination address; validation for
+ * IPv6 destination address should have done enough
+ * check for us.
+ *
+ * We do not do proxy at this moment.
+ */
+ /* m_pulldown instead of copy? */
+ m_copydata(m, off + sizeof(struct icmp6_nodeinfo),
+ subjlen, (caddr_t)&in6_subj);
+ if (in6_setscope(&in6_subj, m->m_pkthdr.rcvif, NULL))
+ goto bad;
+
+ subj = (char *)&in6_subj;
+ if (IN6_ARE_ADDR_EQUAL(&ip6->ip6_dst, &in6_subj))
+ break;
+
+ /*
+ * XXX if we are to allow other cases, we should really
+ * be careful about scope here.
+ * basically, we should disallow queries toward IPv6
+ * destination X with subject Y,
+ * if scope(X) > scope(Y).
+ * if we allow scope(X) > scope(Y), it will result in
+ * information leakage across scope boundary.
+ */
+ goto bad;
+
+ case ICMP6_NI_SUBJ_FQDN:
+ /*
+ * Validate Subject name with gethostname(3).
+ *
+ * The behavior may need some debate, since:
+ * - we are not sure if the node has FQDN as
+ * hostname (returned by gethostname(3)).
+ * - the code does wildcard match for truncated names.
+ * however, we are not sure if we want to perform
+ * wildcard match, if gethostname(3) side has
+ * truncated hostname.
+ */
+ pr = curthread->td_ucred->cr_prison;
+ mtx_lock(&pr->pr_mtx);
+ n = ni6_nametodns(pr->pr_hostname,
+ strlen(pr->pr_hostname), 0);
+ mtx_unlock(&pr->pr_mtx);
+ if (!n || n->m_next || n->m_len == 0)
+ goto bad;
+ IP6_EXTHDR_GET(subj, char *, m,
+ off + sizeof(struct icmp6_nodeinfo), subjlen);
+ if (subj == NULL)
+ goto bad;
+ if (!ni6_dnsmatch(subj, subjlen, mtod(n, const char *),
+ n->m_len)) {
+ goto bad;
+ }
+ m_freem(n);
+ n = NULL;
+ break;
+
+ case ICMP6_NI_SUBJ_IPV4: /* XXX: to be implemented? */
+ default:
+ goto bad;
+ }
+ break;
+ }
+
+ /* refuse based on configuration. XXX ICMP6_NI_REFUSED? */
+ switch (qtype) {
+ case NI_QTYPE_FQDN:
+ if ((V_icmp6_nodeinfo & ICMP6_NODEINFO_FQDNOK) == 0)
+ goto bad;
+ break;
+ case NI_QTYPE_NODEADDR:
+ case NI_QTYPE_IPV4ADDR:
+ if ((V_icmp6_nodeinfo & ICMP6_NODEINFO_NODEADDROK) == 0)
+ goto bad;
+ break;
+ }
+
+ /* guess reply length */
+ switch (qtype) {
+ case NI_QTYPE_NOOP:
+ break; /* no reply data */
+ case NI_QTYPE_SUPTYPES:
+ replylen += sizeof(u_int32_t);
+ break;
+ case NI_QTYPE_FQDN:
+ /* XXX will append an mbuf */
+ replylen += offsetof(struct ni_reply_fqdn, ni_fqdn_namelen);
+ break;
+ case NI_QTYPE_NODEADDR:
+ addrs = ni6_addrs(ni6, m, &ifp, (struct in6_addr *)subj);
+ if ((replylen += addrs * (sizeof(struct in6_addr) +
+ sizeof(u_int32_t))) > MCLBYTES)
+ replylen = MCLBYTES; /* XXX: will truncate pkt later */
+ break;
+ case NI_QTYPE_IPV4ADDR:
+ /* unsupported - should respond with unknown Qtype? */
+ break;
+ default:
+ /*
+ * XXX: We must return a reply with the ICMP6 code
+ * `unknown Qtype' in this case. However we regard the case
+ * as an FQDN query for backward compatibility.
+ * Older versions set a random value to this field,
+ * so it rarely varies in the defined qtypes.
+ * But the mechanism is not reliable...
+ * maybe we should obsolete older versions.
+ */
+ qtype = NI_QTYPE_FQDN;
+ /* XXX will append an mbuf */
+ replylen += offsetof(struct ni_reply_fqdn, ni_fqdn_namelen);
+ oldfqdn++;
+ break;
+ }
+
+ /* allocate an mbuf to reply. */
+ MGETHDR(n, M_DONTWAIT, m->m_type);
+ if (n == NULL) {
+ m_freem(m);
+ return (NULL);
+ }
+ M_MOVE_PKTHDR(n, m); /* just for recvif */
+ if (replylen > MHLEN) {
+ if (replylen > MCLBYTES) {
+ /*
+ * XXX: should we try to allocate more? But MCLBYTES
+ * is probably much larger than IPV6_MMTU...
+ */
+ goto bad;
+ }
+ MCLGET(n, M_DONTWAIT);
+ if ((n->m_flags & M_EXT) == 0) {
+ goto bad;
+ }
+ }
+ n->m_pkthdr.len = n->m_len = replylen;
+
+ /* copy mbuf header and IPv6 + Node Information base headers */
+ bcopy(mtod(m, caddr_t), mtod(n, caddr_t), sizeof(struct ip6_hdr));
+ nni6 = (struct icmp6_nodeinfo *)(mtod(n, struct ip6_hdr *) + 1);
+ bcopy((caddr_t)ni6, (caddr_t)nni6, sizeof(struct icmp6_nodeinfo));
+
+ /* qtype dependent procedure */
+ switch (qtype) {
+ case NI_QTYPE_NOOP:
+ nni6->ni_code = ICMP6_NI_SUCCESS;
+ nni6->ni_flags = 0;
+ break;
+ case NI_QTYPE_SUPTYPES:
+ {
+ u_int32_t v;
+ nni6->ni_code = ICMP6_NI_SUCCESS;
+ nni6->ni_flags = htons(0x0000); /* raw bitmap */
+ /* supports NOOP, SUPTYPES, FQDN, and NODEADDR */
+ v = (u_int32_t)htonl(0x0000000f);
+ bcopy(&v, nni6 + 1, sizeof(u_int32_t));
+ break;
+ }
+ case NI_QTYPE_FQDN:
+ nni6->ni_code = ICMP6_NI_SUCCESS;
+ fqdn = (struct ni_reply_fqdn *)(mtod(n, caddr_t) +
+ sizeof(struct ip6_hdr) + sizeof(struct icmp6_nodeinfo));
+ nni6->ni_flags = 0; /* XXX: meaningless TTL */
+ fqdn->ni_fqdn_ttl = 0; /* ditto. */
+ /*
+ * XXX do we really have FQDN in hostname?
+ */
+ pr = curthread->td_ucred->cr_prison;
+ mtx_lock(&pr->pr_mtx);
+ n->m_next = ni6_nametodns(pr->pr_hostname,
+ strlen(pr->pr_hostname), oldfqdn);
+ mtx_unlock(&pr->pr_mtx);
+ if (n->m_next == NULL)
+ goto bad;
+ /* XXX we assume that n->m_next is not a chain */
+ if (n->m_next->m_next != NULL)
+ goto bad;
+ n->m_pkthdr.len += n->m_next->m_len;
+ break;
+ case NI_QTYPE_NODEADDR:
+ {
+ int lenlim, copied;
+
+ nni6->ni_code = ICMP6_NI_SUCCESS;
+ n->m_pkthdr.len = n->m_len =
+ sizeof(struct ip6_hdr) + sizeof(struct icmp6_nodeinfo);
+ lenlim = M_TRAILINGSPACE(n);
+ copied = ni6_store_addrs(ni6, nni6, ifp, lenlim);
+ /* XXX: reset mbuf length */
+ n->m_pkthdr.len = n->m_len = sizeof(struct ip6_hdr) +
+ sizeof(struct icmp6_nodeinfo) + copied;
+ break;
+ }
+ default:
+ break; /* XXX impossible! */
+ }
+
+ nni6->ni_type = ICMP6_NI_REPLY;
+ m_freem(m);
+ return (n);
+
+ bad:
+ m_freem(m);
+ if (n)
+ m_freem(n);
+ return (NULL);
+}
+
+/*
+ * make a mbuf with DNS-encoded string. no compression support.
+ *
+ * XXX names with less than 2 dots (like "foo" or "foo.section") will be
+ * treated as truncated name (two \0 at the end). this is a wild guess.
+ *
+ * old - return pascal string if non-zero
+ */
+static struct mbuf *
+ni6_nametodns(const char *name, int namelen, int old)
+{
+ struct mbuf *m;
+ char *cp, *ep;
+ const char *p, *q;
+ int i, len, nterm;
+
+ if (old)
+ len = namelen + 1;
+ else
+ len = MCLBYTES;
+
+ /* because MAXHOSTNAMELEN is usually 256, we use cluster mbuf */
+ MGET(m, M_DONTWAIT, MT_DATA);
+ if (m && len > MLEN) {
+ MCLGET(m, M_DONTWAIT);
+ if ((m->m_flags & M_EXT) == 0)
+ goto fail;
+ }
+ if (!m)
+ goto fail;
+ m->m_next = NULL;
+
+ if (old) {
+ m->m_len = len;
+ *mtod(m, char *) = namelen;
+ bcopy(name, mtod(m, char *) + 1, namelen);
+ return m;
+ } else {
+ m->m_len = 0;
+ cp = mtod(m, char *);
+ ep = mtod(m, char *) + M_TRAILINGSPACE(m);
+
+ /* if not certain about my name, return empty buffer */
+ if (namelen == 0)
+ return m;
+
+ /*
+ * guess if it looks like shortened hostname, or FQDN.
+ * shortened hostname needs two trailing "\0".
+ */
+ i = 0;
+ for (p = name; p < name + namelen; p++) {
+ if (*p && *p == '.')
+ i++;
+ }
+ if (i < 2)
+ nterm = 2;
+ else
+ nterm = 1;
+
+ p = name;
+ while (cp < ep && p < name + namelen) {
+ i = 0;
+ for (q = p; q < name + namelen && *q && *q != '.'; q++)
+ i++;
+ /* result does not fit into mbuf */
+ if (cp + i + 1 >= ep)
+ goto fail;
+ /*
+ * DNS label length restriction, RFC1035 page 8.
+ * "i == 0" case is included here to avoid returning
+ * 0-length label on "foo..bar".
+ */
+ if (i <= 0 || i >= 64)
+ goto fail;
+ *cp++ = i;
+ bcopy(p, cp, i);
+ cp += i;
+ p = q;
+ if (p < name + namelen && *p == '.')
+ p++;
+ }
+ /* termination */
+ if (cp + nterm >= ep)
+ goto fail;
+ while (nterm-- > 0)
+ *cp++ = '\0';
+ m->m_len = cp - mtod(m, char *);
+ return m;
+ }
+
+ panic("should not reach here");
+ /* NOTREACHED */
+
+ fail:
+ if (m)
+ m_freem(m);
+ return NULL;
+}
+
+/*
+ * check if two DNS-encoded string matches. takes care of truncated
+ * form (with \0\0 at the end). no compression support.
+ * XXX upper/lowercase match (see RFC2065)
+ */
+static int
+ni6_dnsmatch(const char *a, int alen, const char *b, int blen)
+{
+ const char *a0, *b0;
+ int l;
+
+ /* simplest case - need validation? */
+ if (alen == blen && bcmp(a, b, alen) == 0)
+ return 1;
+
+ a0 = a;
+ b0 = b;
+
+ /* termination is mandatory */
+ if (alen < 2 || blen < 2)
+ return 0;
+ if (a0[alen - 1] != '\0' || b0[blen - 1] != '\0')
+ return 0;
+ alen--;
+ blen--;
+
+ while (a - a0 < alen && b - b0 < blen) {
+ if (a - a0 + 1 > alen || b - b0 + 1 > blen)
+ return 0;
+
+ if ((signed char)a[0] < 0 || (signed char)b[0] < 0)
+ return 0;
+ /* we don't support compression yet */
+ if (a[0] >= 64 || b[0] >= 64)
+ return 0;
+
+ /* truncated case */
+ if (a[0] == 0 && a - a0 == alen - 1)
+ return 1;
+ if (b[0] == 0 && b - b0 == blen - 1)
+ return 1;
+ if (a[0] == 0 || b[0] == 0)
+ return 0;
+
+ if (a[0] != b[0])
+ return 0;
+ l = a[0];
+ if (a - a0 + 1 + l > alen || b - b0 + 1 + l > blen)
+ return 0;
+ if (bcmp(a + 1, b + 1, l) != 0)
+ return 0;
+
+ a += 1 + l;
+ b += 1 + l;
+ }
+
+ if (a - a0 == alen && b - b0 == blen)
+ return 1;
+ else
+ return 0;
+}
+
+/*
+ * calculate the number of addresses to be returned in the node info reply.
+ */
+static int
+ni6_addrs(struct icmp6_nodeinfo *ni6, struct mbuf *m, struct ifnet **ifpp,
+ struct in6_addr *subj)
+{
+ struct ifnet *ifp;
+ struct in6_ifaddr *ifa6;
+ struct ifaddr *ifa;
+ int addrs = 0, addrsofif, iffound = 0;
+ int niflags = ni6->ni_flags;
+
+ if ((niflags & NI_NODEADDR_FLAG_ALL) == 0) {
+ switch (ni6->ni_code) {
+ case ICMP6_NI_SUBJ_IPV6:
+ if (subj == NULL) /* must be impossible... */
+ return (0);
+ break;
+ default:
+ /*
+ * XXX: we only support IPv6 subject address for
+ * this Qtype.
+ */
+ return (0);
+ }
+ }
+
+ IFNET_RLOCK_NOSLEEP();
+ for (ifp = TAILQ_FIRST(&V_ifnet); ifp; ifp = TAILQ_NEXT(ifp, if_list)) {
+ addrsofif = 0;
+ IF_ADDR_LOCK(ifp);
+ TAILQ_FOREACH(ifa, &ifp->if_addrhead, ifa_link) {
+ if (ifa->ifa_addr->sa_family != AF_INET6)
+ continue;
+ ifa6 = (struct in6_ifaddr *)ifa;
+
+ if ((niflags & NI_NODEADDR_FLAG_ALL) == 0 &&
+ IN6_ARE_ADDR_EQUAL(subj, &ifa6->ia_addr.sin6_addr))
+ iffound = 1;
+
+ /*
+ * IPv4-mapped addresses can only be returned by a
+ * Node Information proxy, since they represent
+ * addresses of IPv4-only nodes, which perforce do
+ * not implement this protocol.
+ * [icmp-name-lookups-07, Section 5.4]
+ * So we don't support NI_NODEADDR_FLAG_COMPAT in
+ * this function at this moment.
+ */
+
+ /* What do we have to do about ::1? */
+ switch (in6_addrscope(&ifa6->ia_addr.sin6_addr)) {
+ case IPV6_ADDR_SCOPE_LINKLOCAL:
+ if ((niflags & NI_NODEADDR_FLAG_LINKLOCAL) == 0)
+ continue;
+ break;
+ case IPV6_ADDR_SCOPE_SITELOCAL:
+ if ((niflags & NI_NODEADDR_FLAG_SITELOCAL) == 0)
+ continue;
+ break;
+ case IPV6_ADDR_SCOPE_GLOBAL:
+ if ((niflags & NI_NODEADDR_FLAG_GLOBAL) == 0)
+ continue;
+ break;
+ default:
+ continue;
+ }
+
+ /*
+ * check if anycast is okay.
+ * XXX: just experimental. not in the spec.
+ */
+ if ((ifa6->ia6_flags & IN6_IFF_ANYCAST) != 0 &&
+ (niflags & NI_NODEADDR_FLAG_ANYCAST) == 0)
+ continue; /* we need only unicast addresses */
+ if ((ifa6->ia6_flags & IN6_IFF_TEMPORARY) != 0 &&
+ (V_icmp6_nodeinfo & ICMP6_NODEINFO_TMPADDROK) == 0) {
+ continue;
+ }
+ addrsofif++; /* count the address */
+ }
+ IF_ADDR_UNLOCK(ifp);
+ if (iffound) {
+ *ifpp = ifp;
+ IFNET_RUNLOCK_NOSLEEP();
+ return (addrsofif);
+ }
+
+ addrs += addrsofif;
+ }
+ IFNET_RUNLOCK_NOSLEEP();
+
+ return (addrs);
+}
+
+static int
+ni6_store_addrs(struct icmp6_nodeinfo *ni6, struct icmp6_nodeinfo *nni6,
+ struct ifnet *ifp0, int resid)
+{
+ struct ifnet *ifp = ifp0 ? ifp0 : TAILQ_FIRST(&V_ifnet);
+ struct in6_ifaddr *ifa6;
+ struct ifaddr *ifa;
+ struct ifnet *ifp_dep = NULL;
+ int copied = 0, allow_deprecated = 0;
+ u_char *cp = (u_char *)(nni6 + 1);
+ int niflags = ni6->ni_flags;
+ u_int32_t ltime;
+
+ if (ifp0 == NULL && !(niflags & NI_NODEADDR_FLAG_ALL))
+ return (0); /* needless to copy */
+
+ IFNET_RLOCK_NOSLEEP();
+ again:
+
+ for (; ifp; ifp = TAILQ_NEXT(ifp, if_list)) {
+ IF_ADDR_LOCK(ifp);
+ TAILQ_FOREACH(ifa, &ifp->if_addrhead, ifa_link) {
+ if (ifa->ifa_addr->sa_family != AF_INET6)
+ continue;
+ ifa6 = (struct in6_ifaddr *)ifa;
+
+ if ((ifa6->ia6_flags & IN6_IFF_DEPRECATED) != 0 &&
+ allow_deprecated == 0) {
+ /*
+ * prefererred address should be put before
+ * deprecated addresses.
+ */
+
+ /* record the interface for later search */
+ if (ifp_dep == NULL)
+ ifp_dep = ifp;
+
+ continue;
+ } else if ((ifa6->ia6_flags & IN6_IFF_DEPRECATED) == 0 &&
+ allow_deprecated != 0)
+ continue; /* we now collect deprecated addrs */
+
+ /* What do we have to do about ::1? */
+ switch (in6_addrscope(&ifa6->ia_addr.sin6_addr)) {
+ case IPV6_ADDR_SCOPE_LINKLOCAL:
+ if ((niflags & NI_NODEADDR_FLAG_LINKLOCAL) == 0)
+ continue;
+ break;
+ case IPV6_ADDR_SCOPE_SITELOCAL:
+ if ((niflags & NI_NODEADDR_FLAG_SITELOCAL) == 0)
+ continue;
+ break;
+ case IPV6_ADDR_SCOPE_GLOBAL:
+ if ((niflags & NI_NODEADDR_FLAG_GLOBAL) == 0)
+ continue;
+ break;
+ default:
+ continue;
+ }
+
+ /*
+ * check if anycast is okay.
+ * XXX: just experimental. not in the spec.
+ */
+ if ((ifa6->ia6_flags & IN6_IFF_ANYCAST) != 0 &&
+ (niflags & NI_NODEADDR_FLAG_ANYCAST) == 0)
+ continue;
+ if ((ifa6->ia6_flags & IN6_IFF_TEMPORARY) != 0 &&
+ (V_icmp6_nodeinfo & ICMP6_NODEINFO_TMPADDROK) == 0) {
+ continue;
+ }
+
+ /* now we can copy the address */
+ if (resid < sizeof(struct in6_addr) +
+ sizeof(u_int32_t)) {
+ IF_ADDR_UNLOCK(ifp);
+ /*
+ * We give up much more copy.
+ * Set the truncate flag and return.
+ */
+ nni6->ni_flags |= NI_NODEADDR_FLAG_TRUNCATE;
+ IFNET_RUNLOCK_NOSLEEP();
+ return (copied);
+ }
+
+ /*
+ * Set the TTL of the address.
+ * The TTL value should be one of the following
+ * according to the specification:
+ *
+ * 1. The remaining lifetime of a DHCP lease on the
+ * address, or
+ * 2. The remaining Valid Lifetime of a prefix from
+ * which the address was derived through Stateless
+ * Autoconfiguration.
+ *
+ * Note that we currently do not support stateful
+ * address configuration by DHCPv6, so the former
+ * case can't happen.
+ */
+ if (ifa6->ia6_lifetime.ia6t_expire == 0)
+ ltime = ND6_INFINITE_LIFETIME;
+ else {
+ if (ifa6->ia6_lifetime.ia6t_expire >
+ time_second)
+ ltime = htonl(ifa6->ia6_lifetime.ia6t_expire - time_second);
+ else
+ ltime = 0;
+ }
+
+ bcopy(&ltime, cp, sizeof(u_int32_t));
+ cp += sizeof(u_int32_t);
+
+ /* copy the address itself */
+ bcopy(&ifa6->ia_addr.sin6_addr, cp,
+ sizeof(struct in6_addr));
+ in6_clearscope((struct in6_addr *)cp); /* XXX */
+ cp += sizeof(struct in6_addr);
+
+ resid -= (sizeof(struct in6_addr) + sizeof(u_int32_t));
+ copied += (sizeof(struct in6_addr) + sizeof(u_int32_t));
+ }
+ IF_ADDR_UNLOCK(ifp);
+ if (ifp0) /* we need search only on the specified IF */
+ break;
+ }
+
+ if (allow_deprecated == 0 && ifp_dep != NULL) {
+ ifp = ifp_dep;
+ allow_deprecated = 1;
+
+ goto again;
+ }
+
+ IFNET_RUNLOCK_NOSLEEP();
+
+ return (copied);
+}
+
+/*
+ * XXX almost dup'ed code with rip6_input.
+ */
+static int
+icmp6_rip6_input(struct mbuf **mp, int off)
+{
+ struct mbuf *m = *mp;
+ struct ip6_hdr *ip6 = mtod(m, struct ip6_hdr *);
+ struct inpcb *in6p;
+ struct inpcb *last = NULL;
+ struct sockaddr_in6 fromsa;
+ struct icmp6_hdr *icmp6;
+ struct mbuf *opts = NULL;
+
+#ifndef PULLDOWN_TEST
+ /* this is assumed to be safe. */
+ icmp6 = (struct icmp6_hdr *)((caddr_t)ip6 + off);
+#else
+ IP6_EXTHDR_GET(icmp6, struct icmp6_hdr *, m, off, sizeof(*icmp6));
+ if (icmp6 == NULL) {
+ /* m is already reclaimed */
+ return (IPPROTO_DONE);
+ }
+#endif
+
+ /*
+ * XXX: the address may have embedded scope zone ID, which should be
+ * hidden from applications.
+ */
+ bzero(&fromsa, sizeof(fromsa));
+ fromsa.sin6_family = AF_INET6;
+ fromsa.sin6_len = sizeof(struct sockaddr_in6);
+ fromsa.sin6_addr = ip6->ip6_src;
+ if (sa6_recoverscope(&fromsa)) {
+ m_freem(m);
+ return (IPPROTO_DONE);
+ }
+
+ INP_INFO_RLOCK(&V_ripcbinfo);
+ LIST_FOREACH(in6p, &V_ripcb, inp_list) {
+ if ((in6p->inp_vflag & INP_IPV6) == 0)
+ continue;
+ if (in6p->inp_ip_p != IPPROTO_ICMPV6)
+ continue;
+ if (!IN6_IS_ADDR_UNSPECIFIED(&in6p->in6p_laddr) &&
+ !IN6_ARE_ADDR_EQUAL(&in6p->in6p_laddr, &ip6->ip6_dst))
+ continue;
+ if (!IN6_IS_ADDR_UNSPECIFIED(&in6p->in6p_faddr) &&
+ !IN6_ARE_ADDR_EQUAL(&in6p->in6p_faddr, &ip6->ip6_src))
+ continue;
+ INP_RLOCK(in6p);
+ if (ICMP6_FILTER_WILLBLOCK(icmp6->icmp6_type,
+ in6p->in6p_icmp6filt)) {
+ INP_RUNLOCK(in6p);
+ continue;
+ }
+ if (last != NULL) {
+ struct mbuf *n = NULL;
+
+ /*
+ * Recent network drivers tend to allocate a single
+ * mbuf cluster, rather than to make a couple of
+ * mbufs without clusters. Also, since the IPv6 code
+ * path tries to avoid m_pullup(), it is highly
+ * probable that we still have an mbuf cluster here
+ * even though the necessary length can be stored in an
+ * mbuf's internal buffer.
+ * Meanwhile, the default size of the receive socket
+ * buffer for raw sockets is not so large. This means
+ * the possibility of packet loss is relatively higher
+ * than before. To avoid this scenario, we copy the
+ * received data to a separate mbuf that does not use
+ * a cluster, if possible.
+ * XXX: it is better to copy the data after stripping
+ * intermediate headers.
+ */
+ if ((m->m_flags & M_EXT) && m->m_next == NULL &&
+ m->m_len <= MHLEN) {
+ MGET(n, M_DONTWAIT, m->m_type);
+ if (n != NULL) {
+ if (m_dup_pkthdr(n, m, M_NOWAIT)) {
+ bcopy(m->m_data, n->m_data,
+ m->m_len);
+ n->m_len = m->m_len;
+ } else {
+ m_free(n);
+ n = NULL;
+ }
+ }
+ }
+ if (n != NULL ||
+ (n = m_copy(m, 0, (int)M_COPYALL)) != NULL) {
+ if (last->inp_flags & INP_CONTROLOPTS)
+ ip6_savecontrol(last, n, &opts);
+ /* strip intermediate headers */
+ m_adj(n, off);
+ SOCKBUF_LOCK(&last->inp_socket->so_rcv);
+ if (sbappendaddr_locked(
+ &last->inp_socket->so_rcv,
+ (struct sockaddr *)&fromsa, n, opts)
+ == 0) {
+ /* should notify about lost packet */
+ m_freem(n);
+ if (opts) {
+ m_freem(opts);
+ }
+ SOCKBUF_UNLOCK(
+ &last->inp_socket->so_rcv);
+ } else
+ sorwakeup_locked(last->inp_socket);
+ opts = NULL;
+ }
+ INP_RUNLOCK(last);
+ }
+ last = in6p;
+ }
+ INP_INFO_RUNLOCK(&V_ripcbinfo);
+ if (last != NULL) {
+ if (last->inp_flags & INP_CONTROLOPTS)
+ ip6_savecontrol(last, m, &opts);
+ /* strip intermediate headers */
+ m_adj(m, off);
+
+ /* avoid using mbuf clusters if possible (see above) */
+ if ((m->m_flags & M_EXT) && m->m_next == NULL &&
+ m->m_len <= MHLEN) {
+ struct mbuf *n;
+
+ MGET(n, M_DONTWAIT, m->m_type);
+ if (n != NULL) {
+ if (m_dup_pkthdr(n, m, M_NOWAIT)) {
+ bcopy(m->m_data, n->m_data, m->m_len);
+ n->m_len = m->m_len;
+
+ m_freem(m);
+ m = n;
+ } else {
+ m_freem(n);
+ n = NULL;
+ }
+ }
+ }
+ SOCKBUF_LOCK(&last->inp_socket->so_rcv);
+ if (sbappendaddr_locked(&last->inp_socket->so_rcv,
+ (struct sockaddr *)&fromsa, m, opts) == 0) {
+ m_freem(m);
+ if (opts)
+ m_freem(opts);
+ SOCKBUF_UNLOCK(&last->inp_socket->so_rcv);
+ } else
+ sorwakeup_locked(last->inp_socket);
+ INP_RUNLOCK(last);
+ } else {
+ m_freem(m);
+ IP6STAT_DEC(ip6s_delivered);
+ }
+ return IPPROTO_DONE;
+}
+
+/*
+ * Reflect the ip6 packet back to the source.
+ * OFF points to the icmp6 header, counted from the top of the mbuf.
+ */
+void
+icmp6_reflect(struct mbuf *m, size_t off)
+{
+ struct ip6_hdr *ip6;
+ struct icmp6_hdr *icmp6;
+ struct in6_ifaddr *ia = NULL;
+ int plen;
+ int type, code;
+ struct ifnet *outif = NULL;
+ struct in6_addr origdst, src, *srcp = NULL;
+
+ /* too short to reflect */
+ if (off < sizeof(struct ip6_hdr)) {
+ nd6log((LOG_DEBUG,
+ "sanity fail: off=%lx, sizeof(ip6)=%lx in %s:%d\n",
+ (u_long)off, (u_long)sizeof(struct ip6_hdr),
+ __FILE__, __LINE__));
+ goto bad;
+ }
+
+ /*
+ * If there are extra headers between IPv6 and ICMPv6, strip
+ * off that header first.
+ */
+#ifdef DIAGNOSTIC
+ if (sizeof(struct ip6_hdr) + sizeof(struct icmp6_hdr) > MHLEN)
+ panic("assumption failed in icmp6_reflect");
+#endif
+ if (off > sizeof(struct ip6_hdr)) {
+ size_t l;
+ struct ip6_hdr nip6;
+
+ l = off - sizeof(struct ip6_hdr);
+ m_copydata(m, 0, sizeof(nip6), (caddr_t)&nip6);
+ m_adj(m, l);
+ l = sizeof(struct ip6_hdr) + sizeof(struct icmp6_hdr);
+ if (m->m_len < l) {
+ if ((m = m_pullup(m, l)) == NULL)
+ return;
+ }
+ bcopy((caddr_t)&nip6, mtod(m, caddr_t), sizeof(nip6));
+ } else /* off == sizeof(struct ip6_hdr) */ {
+ size_t l;
+ l = sizeof(struct ip6_hdr) + sizeof(struct icmp6_hdr);
+ if (m->m_len < l) {
+ if ((m = m_pullup(m, l)) == NULL)
+ return;
+ }
+ }
+ plen = m->m_pkthdr.len - sizeof(struct ip6_hdr);
+ ip6 = mtod(m, struct ip6_hdr *);
+ ip6->ip6_nxt = IPPROTO_ICMPV6;
+ icmp6 = (struct icmp6_hdr *)(ip6 + 1);
+ type = icmp6->icmp6_type; /* keep type for statistics */
+ code = icmp6->icmp6_code; /* ditto. */
+
+ origdst = ip6->ip6_dst;
+ /*
+ * ip6_input() drops a packet if its src is multicast.
+ * So, the src is never multicast.
+ */
+ ip6->ip6_dst = ip6->ip6_src;
+
+ /*
+ * If the incoming packet was addressed directly to us (i.e. unicast),
+ * use dst as the src for the reply.
+ * The IN6_IFF_NOTREADY case should be VERY rare, but is possible
+ * (for example) when we encounter an error while forwarding procedure
+ * destined to a duplicated address of ours.
+ * Note that ip6_getdstifaddr() may fail if we are in an error handling
+ * procedure of an outgoing packet of our own, in which case we need
+ * to search in the ifaddr list.
+ */
+ if (!IN6_IS_ADDR_MULTICAST(&origdst)) {
+ if ((ia = ip6_getdstifaddr(m))) {
+ if (!(ia->ia6_flags &
+ (IN6_IFF_ANYCAST|IN6_IFF_NOTREADY)))
+ srcp = &ia->ia_addr.sin6_addr;
+ } else {
+ struct sockaddr_in6 d;
+
+ bzero(&d, sizeof(d));
+ d.sin6_family = AF_INET6;
+ d.sin6_len = sizeof(d);
+ d.sin6_addr = origdst;
+ ia = (struct in6_ifaddr *)
+ ifa_ifwithaddr((struct sockaddr *)&d);
+ if (ia &&
+ !(ia->ia6_flags &
+ (IN6_IFF_ANYCAST|IN6_IFF_NOTREADY))) {
+ srcp = &ia->ia_addr.sin6_addr;
+ }
+ }
+ }
+
+ if ((srcp != NULL) &&
+ (in6_addrscope(srcp) != in6_addrscope(&ip6->ip6_src)))
+ srcp = NULL;
+
+ if (srcp == NULL) {
+ int e;
+ struct sockaddr_in6 sin6;
+ struct route_in6 ro;
+
+ /*
+ * This case matches to multicasts, our anycast, or unicasts
+ * that we do not own. Select a source address based on the
+ * source address of the erroneous packet.
+ */
+ bzero(&sin6, sizeof(sin6));
+ sin6.sin6_family = AF_INET6;
+ sin6.sin6_len = sizeof(sin6);
+ sin6.sin6_addr = ip6->ip6_dst; /* zone ID should be embedded */
+
+ bzero(&ro, sizeof(ro));
+ e = in6_selectsrc(&sin6, NULL, NULL, &ro, NULL, &outif, &src);
+ if (ro.ro_rt)
+ RTFREE(ro.ro_rt); /* XXX: we could use this */
+ if (e) {
+ char ip6buf[INET6_ADDRSTRLEN];
+ nd6log((LOG_DEBUG,
+ "icmp6_reflect: source can't be determined: "
+ "dst=%s, error=%d\n",
+ ip6_sprintf(ip6buf, &sin6.sin6_addr), e));
+ goto bad;
+ }
+ srcp = &src;
+ }
+
+ ip6->ip6_src = *srcp;
+ ip6->ip6_flow = 0;
+ ip6->ip6_vfc &= ~IPV6_VERSION_MASK;
+ ip6->ip6_vfc |= IPV6_VERSION;
+ ip6->ip6_nxt = IPPROTO_ICMPV6;
+ if (outif)
+ ip6->ip6_hlim = ND_IFINFO(outif)->chlim;
+ else if (m->m_pkthdr.rcvif) {
+ /* XXX: This may not be the outgoing interface */
+ ip6->ip6_hlim = ND_IFINFO(m->m_pkthdr.rcvif)->chlim;
+ } else
+ ip6->ip6_hlim = V_ip6_defhlim;
+
+ icmp6->icmp6_cksum = 0;
+ icmp6->icmp6_cksum = in6_cksum(m, IPPROTO_ICMPV6,
+ sizeof(struct ip6_hdr), plen);
+
+ /*
+ * XXX option handling
+ */
+
+ m->m_flags &= ~(M_BCAST|M_MCAST);
+
+ ip6_output(m, NULL, NULL, 0, NULL, &outif, NULL);
+ if (outif)
+ icmp6_ifoutstat_inc(outif, type, code);
+
+ if (ia != NULL)
+ ifa_free(&ia->ia_ifa);
+ return;
+
+ bad:
+ if (ia != NULL)
+ ifa_free(&ia->ia_ifa);
+ m_freem(m);
+ return;
+}
+
+void
+icmp6_fasttimo(void)
+{
+
+ mld_fasttimo();
+}
+
+void
+icmp6_slowtimo(void)
+{
+
+ mld_slowtimo();
+}
+
+static const char *
+icmp6_redirect_diag(struct in6_addr *src6, struct in6_addr *dst6,
+ struct in6_addr *tgt6)
+{
+ static char buf[1024];
+ char ip6bufs[INET6_ADDRSTRLEN];
+ char ip6bufd[INET6_ADDRSTRLEN];
+ char ip6buft[INET6_ADDRSTRLEN];
+ snprintf(buf, sizeof(buf), "(src=%s dst=%s tgt=%s)",
+ ip6_sprintf(ip6bufs, src6), ip6_sprintf(ip6bufd, dst6),
+ ip6_sprintf(ip6buft, tgt6));
+ return buf;
+}
+
+void
+icmp6_redirect_input(struct mbuf *m, int off)
+{
+ struct ifnet *ifp;
+ struct ip6_hdr *ip6 = mtod(m, struct ip6_hdr *);
+ struct nd_redirect *nd_rd;
+ int icmp6len = ntohs(ip6->ip6_plen);
+ char *lladdr = NULL;
+ int lladdrlen = 0;
+ u_char *redirhdr = NULL;
+ int redirhdrlen = 0;
+ struct rtentry *rt = NULL;
+ int is_router;
+ int is_onlink;
+ struct in6_addr src6 = ip6->ip6_src;
+ struct in6_addr redtgt6;
+ struct in6_addr reddst6;
+ union nd_opts ndopts;
+ char ip6buf[INET6_ADDRSTRLEN];
+
+ if (!m)
+ return;
+
+ ifp = m->m_pkthdr.rcvif;
+
+ if (!ifp)
+ return;
+
+ /* XXX if we are router, we don't update route by icmp6 redirect */
+ if (V_ip6_forwarding)
+ goto freeit;
+ if (!V_icmp6_rediraccept)
+ goto freeit;
+
+#ifndef PULLDOWN_TEST
+ IP6_EXTHDR_CHECK(m, off, icmp6len,);
+ nd_rd = (struct nd_redirect *)((caddr_t)ip6 + off);
+#else
+ IP6_EXTHDR_GET(nd_rd, struct nd_redirect *, m, off, icmp6len);
+ if (nd_rd == NULL) {
+ ICMP6STAT_INC(icp6s_tooshort);
+ return;
+ }
+#endif
+ redtgt6 = nd_rd->nd_rd_target;
+ reddst6 = nd_rd->nd_rd_dst;
+
+ if (in6_setscope(&redtgt6, m->m_pkthdr.rcvif, NULL) ||
+ in6_setscope(&reddst6, m->m_pkthdr.rcvif, NULL)) {
+ goto freeit;
+ }
+
+ /* validation */
+ if (!IN6_IS_ADDR_LINKLOCAL(&src6)) {
+ nd6log((LOG_ERR,
+ "ICMP6 redirect sent from %s rejected; "
+ "must be from linklocal\n",
+ ip6_sprintf(ip6buf, &src6)));
+ goto bad;
+ }
+ if (ip6->ip6_hlim != 255) {
+ nd6log((LOG_ERR,
+ "ICMP6 redirect sent from %s rejected; "
+ "hlim=%d (must be 255)\n",
+ ip6_sprintf(ip6buf, &src6), ip6->ip6_hlim));
+ goto bad;
+ }
+ {
+ /* ip6->ip6_src must be equal to gw for icmp6->icmp6_reddst */
+ struct sockaddr_in6 sin6;
+ struct in6_addr *gw6;
+
+ bzero(&sin6, sizeof(sin6));
+ sin6.sin6_family = AF_INET6;
+ sin6.sin6_len = sizeof(struct sockaddr_in6);
+ bcopy(&reddst6, &sin6.sin6_addr, sizeof(reddst6));
+ rt = rtalloc1((struct sockaddr *)&sin6, 0, 0UL);
+ if (rt) {
+ if (rt->rt_gateway == NULL ||
+ rt->rt_gateway->sa_family != AF_INET6) {
+ nd6log((LOG_ERR,
+ "ICMP6 redirect rejected; no route "
+ "with inet6 gateway found for redirect dst: %s\n",
+ icmp6_redirect_diag(&src6, &reddst6, &redtgt6)));
+ RTFREE_LOCKED(rt);
+ goto bad;
+ }
+
+ gw6 = &(((struct sockaddr_in6 *)rt->rt_gateway)->sin6_addr);
+ if (bcmp(&src6, gw6, sizeof(struct in6_addr)) != 0) {
+ nd6log((LOG_ERR,
+ "ICMP6 redirect rejected; "
+ "not equal to gw-for-src=%s (must be same): "
+ "%s\n",
+ ip6_sprintf(ip6buf, gw6),
+ icmp6_redirect_diag(&src6, &reddst6, &redtgt6)));
+ RTFREE_LOCKED(rt);
+ goto bad;
+ }
+ } else {
+ nd6log((LOG_ERR,
+ "ICMP6 redirect rejected; "
+ "no route found for redirect dst: %s\n",
+ icmp6_redirect_diag(&src6, &reddst6, &redtgt6)));
+ goto bad;
+ }
+ RTFREE_LOCKED(rt);
+ rt = NULL;
+ }
+ if (IN6_IS_ADDR_MULTICAST(&reddst6)) {
+ nd6log((LOG_ERR,
+ "ICMP6 redirect rejected; "
+ "redirect dst must be unicast: %s\n",
+ icmp6_redirect_diag(&src6, &reddst6, &redtgt6)));
+ goto bad;
+ }
+
+ is_router = is_onlink = 0;
+ if (IN6_IS_ADDR_LINKLOCAL(&redtgt6))
+ is_router = 1; /* router case */
+ if (bcmp(&redtgt6, &reddst6, sizeof(redtgt6)) == 0)
+ is_onlink = 1; /* on-link destination case */
+ if (!is_router && !is_onlink) {
+ nd6log((LOG_ERR,
+ "ICMP6 redirect rejected; "
+ "neither router case nor onlink case: %s\n",
+ icmp6_redirect_diag(&src6, &reddst6, &redtgt6)));
+ goto bad;
+ }
+ /* validation passed */
+
+ icmp6len -= sizeof(*nd_rd);
+ nd6_option_init(nd_rd + 1, icmp6len, &ndopts);
+ if (nd6_options(&ndopts) < 0) {
+ nd6log((LOG_INFO, "icmp6_redirect_input: "
+ "invalid ND option, rejected: %s\n",
+ icmp6_redirect_diag(&src6, &reddst6, &redtgt6)));
+ /* nd6_options have incremented stats */
+ goto freeit;
+ }
+
+ if (ndopts.nd_opts_tgt_lladdr) {
+ lladdr = (char *)(ndopts.nd_opts_tgt_lladdr + 1);
+ lladdrlen = ndopts.nd_opts_tgt_lladdr->nd_opt_len << 3;
+ }
+
+ if (ndopts.nd_opts_rh) {
+ redirhdrlen = ndopts.nd_opts_rh->nd_opt_rh_len;
+ redirhdr = (u_char *)(ndopts.nd_opts_rh + 1); /* xxx */
+ }
+
+ if (lladdr && ((ifp->if_addrlen + 2 + 7) & ~7) != lladdrlen) {
+ nd6log((LOG_INFO,
+ "icmp6_redirect_input: lladdrlen mismatch for %s "
+ "(if %d, icmp6 packet %d): %s\n",
+ ip6_sprintf(ip6buf, &redtgt6),
+ ifp->if_addrlen, lladdrlen - 2,
+ icmp6_redirect_diag(&src6, &reddst6, &redtgt6)));
+ goto bad;
+ }
+
+ /* RFC 2461 8.3 */
+ nd6_cache_lladdr(ifp, &redtgt6, lladdr, lladdrlen, ND_REDIRECT,
+ is_onlink ? ND_REDIRECT_ONLINK : ND_REDIRECT_ROUTER);
+
+ if (!is_onlink) { /* better router case. perform rtredirect. */
+ /* perform rtredirect */
+ struct sockaddr_in6 sdst;
+ struct sockaddr_in6 sgw;
+ struct sockaddr_in6 ssrc;
+
+ bzero(&sdst, sizeof(sdst));
+ bzero(&sgw, sizeof(sgw));
+ bzero(&ssrc, sizeof(ssrc));
+ sdst.sin6_family = sgw.sin6_family = ssrc.sin6_family = AF_INET6;
+ sdst.sin6_len = sgw.sin6_len = ssrc.sin6_len =
+ sizeof(struct sockaddr_in6);
+ bcopy(&redtgt6, &sgw.sin6_addr, sizeof(struct in6_addr));
+ bcopy(&reddst6, &sdst.sin6_addr, sizeof(struct in6_addr));
+ bcopy(&src6, &ssrc.sin6_addr, sizeof(struct in6_addr));
+ rtredirect((struct sockaddr *)&sdst, (struct sockaddr *)&sgw,
+ (struct sockaddr *)NULL, RTF_GATEWAY | RTF_HOST,
+ (struct sockaddr *)&ssrc);
+ }
+ /* finally update cached route in each socket via pfctlinput */
+ {
+ struct sockaddr_in6 sdst;
+
+ bzero(&sdst, sizeof(sdst));
+ sdst.sin6_family = AF_INET6;
+ sdst.sin6_len = sizeof(struct sockaddr_in6);
+ bcopy(&reddst6, &sdst.sin6_addr, sizeof(struct in6_addr));
+ pfctlinput(PRC_REDIRECT_HOST, (struct sockaddr *)&sdst);
+#ifdef IPSEC
+ key_sa_routechange((struct sockaddr *)&sdst);
+#endif /* IPSEC */
+ }
+
+ freeit:
+ m_freem(m);
+ return;
+
+ bad:
+ ICMP6STAT_INC(icp6s_badredirect);
+ m_freem(m);
+}
+
+void
+icmp6_redirect_output(struct mbuf *m0, struct rtentry *rt)
+{
+ struct ifnet *ifp; /* my outgoing interface */
+ struct in6_addr *ifp_ll6;
+ struct in6_addr *router_ll6;
+ struct ip6_hdr *sip6; /* m0 as struct ip6_hdr */
+ struct mbuf *m = NULL; /* newly allocated one */
+ struct ip6_hdr *ip6; /* m as struct ip6_hdr */
+ struct nd_redirect *nd_rd;
+ struct llentry *ln = NULL;
+ size_t maxlen;
+ u_char *p;
+ struct ifnet *outif = NULL;
+ struct sockaddr_in6 src_sa;
+
+ icmp6_errcount(&V_icmp6stat.icp6s_outerrhist, ND_REDIRECT, 0);
+
+ /* if we are not router, we don't send icmp6 redirect */
+ if (!V_ip6_forwarding)
+ goto fail;
+
+ /* sanity check */
+ if (!m0 || !rt || !(rt->rt_flags & RTF_UP) || !(ifp = rt->rt_ifp))
+ goto fail;
+
+ /*
+ * Address check:
+ * the source address must identify a neighbor, and
+ * the destination address must not be a multicast address
+ * [RFC 2461, sec 8.2]
+ */
+ sip6 = mtod(m0, struct ip6_hdr *);
+ bzero(&src_sa, sizeof(src_sa));
+ src_sa.sin6_family = AF_INET6;
+ src_sa.sin6_len = sizeof(src_sa);
+ src_sa.sin6_addr = sip6->ip6_src;
+ if (nd6_is_addr_neighbor(&src_sa, ifp) == 0)
+ goto fail;
+ if (IN6_IS_ADDR_MULTICAST(&sip6->ip6_dst))
+ goto fail; /* what should we do here? */
+
+ /* rate limit */
+ if (icmp6_ratelimit(&sip6->ip6_src, ND_REDIRECT, 0))
+ goto fail;
+
+ /*
+ * Since we are going to append up to 1280 bytes (= IPV6_MMTU),
+ * we almost always ask for an mbuf cluster for simplicity.
+ * (MHLEN < IPV6_MMTU is almost always true)
+ */
+#if IPV6_MMTU >= MCLBYTES
+# error assumption failed about IPV6_MMTU and MCLBYTES
+#endif
+ MGETHDR(m, M_DONTWAIT, MT_HEADER);
+ if (m && IPV6_MMTU >= MHLEN)
+ MCLGET(m, M_DONTWAIT);
+ if (!m)
+ goto fail;
+ m->m_pkthdr.rcvif = NULL;
+ m->m_len = 0;
+ maxlen = M_TRAILINGSPACE(m);
+ maxlen = min(IPV6_MMTU, maxlen);
+ /* just for safety */
+ if (maxlen < sizeof(struct ip6_hdr) + sizeof(struct icmp6_hdr) +
+ ((sizeof(struct nd_opt_hdr) + ifp->if_addrlen + 7) & ~7)) {
+ goto fail;
+ }
+
+ {
+ /* get ip6 linklocal address for ifp(my outgoing interface). */
+ struct in6_ifaddr *ia;
+ if ((ia = in6ifa_ifpforlinklocal(ifp,
+ IN6_IFF_NOTREADY|
+ IN6_IFF_ANYCAST)) == NULL)
+ goto fail;
+ ifp_ll6 = &ia->ia_addr.sin6_addr;
+ /* XXXRW: reference released prematurely. */
+ ifa_free(&ia->ia_ifa);
+ }
+
+ /* get ip6 linklocal address for the router. */
+ if (rt->rt_gateway && (rt->rt_flags & RTF_GATEWAY)) {
+ struct sockaddr_in6 *sin6;
+ sin6 = (struct sockaddr_in6 *)rt->rt_gateway;
+ router_ll6 = &sin6->sin6_addr;
+ if (!IN6_IS_ADDR_LINKLOCAL(router_ll6))
+ router_ll6 = (struct in6_addr *)NULL;
+ } else
+ router_ll6 = (struct in6_addr *)NULL;
+
+ /* ip6 */
+ ip6 = mtod(m, struct ip6_hdr *);
+ ip6->ip6_flow = 0;
+ ip6->ip6_vfc &= ~IPV6_VERSION_MASK;
+ ip6->ip6_vfc |= IPV6_VERSION;
+ /* ip6->ip6_plen will be set later */
+ ip6->ip6_nxt = IPPROTO_ICMPV6;
+ ip6->ip6_hlim = 255;
+ /* ip6->ip6_src must be linklocal addr for my outgoing if. */
+ bcopy(ifp_ll6, &ip6->ip6_src, sizeof(struct in6_addr));
+ bcopy(&sip6->ip6_src, &ip6->ip6_dst, sizeof(struct in6_addr));
+
+ /* ND Redirect */
+ nd_rd = (struct nd_redirect *)(ip6 + 1);
+ nd_rd->nd_rd_type = ND_REDIRECT;
+ nd_rd->nd_rd_code = 0;
+ nd_rd->nd_rd_reserved = 0;
+ if (rt->rt_flags & RTF_GATEWAY) {
+ /*
+ * nd_rd->nd_rd_target must be a link-local address in
+ * better router cases.
+ */
+ if (!router_ll6)
+ goto fail;
+ bcopy(router_ll6, &nd_rd->nd_rd_target,
+ sizeof(nd_rd->nd_rd_target));
+ bcopy(&sip6->ip6_dst, &nd_rd->nd_rd_dst,
+ sizeof(nd_rd->nd_rd_dst));
+ } else {
+ /* make sure redtgt == reddst */
+ bcopy(&sip6->ip6_dst, &nd_rd->nd_rd_target,
+ sizeof(nd_rd->nd_rd_target));
+ bcopy(&sip6->ip6_dst, &nd_rd->nd_rd_dst,
+ sizeof(nd_rd->nd_rd_dst));
+ }
+
+ p = (u_char *)(nd_rd + 1);
+
+ if (!router_ll6)
+ goto nolladdropt;
+
+ {
+ /* target lladdr option */
+ int len;
+ struct nd_opt_hdr *nd_opt;
+ char *lladdr;
+
+ IF_AFDATA_LOCK(ifp);
+ ln = nd6_lookup(router_ll6, 0, ifp);
+ IF_AFDATA_UNLOCK(ifp);
+ if (ln == NULL)
+ goto nolladdropt;
+
+ len = sizeof(*nd_opt) + ifp->if_addrlen;
+ len = (len + 7) & ~7; /* round by 8 */
+ /* safety check */
+ if (len + (p - (u_char *)ip6) > maxlen)
+ goto nolladdropt;
+
+ if (ln->la_flags & LLE_VALID) {
+ nd_opt = (struct nd_opt_hdr *)p;
+ nd_opt->nd_opt_type = ND_OPT_TARGET_LINKADDR;
+ nd_opt->nd_opt_len = len >> 3;
+ lladdr = (char *)(nd_opt + 1);
+ bcopy(&ln->ll_addr, lladdr, ifp->if_addrlen);
+ p += len;
+ }
+ }
+nolladdropt:
+ if (ln != NULL)
+ LLE_RUNLOCK(ln);
+
+ m->m_pkthdr.len = m->m_len = p - (u_char *)ip6;
+
+ /* just to be safe */
+#ifdef M_DECRYPTED /*not openbsd*/
+ if (m0->m_flags & M_DECRYPTED)
+ goto noredhdropt;
+#endif
+ if (p - (u_char *)ip6 > maxlen)
+ goto noredhdropt;
+
+ {
+ /* redirected header option */
+ int len;
+ struct nd_opt_rd_hdr *nd_opt_rh;
+
+ /*
+ * compute the maximum size for icmp6 redirect header option.
+ * XXX room for auth header?
+ */
+ len = maxlen - (p - (u_char *)ip6);
+ len &= ~7;
+
+ /* This is just for simplicity. */
+ if (m0->m_pkthdr.len != m0->m_len) {
+ if (m0->m_next) {
+ m_freem(m0->m_next);
+ m0->m_next = NULL;
+ }
+ m0->m_pkthdr.len = m0->m_len;
+ }
+
+ /*
+ * Redirected header option spec (RFC2461 4.6.3) talks nothing
+ * about padding/truncate rule for the original IP packet.
+ * From the discussion on IPv6imp in Feb 1999,
+ * the consensus was:
+ * - "attach as much as possible" is the goal
+ * - pad if not aligned (original size can be guessed by
+ * original ip6 header)
+ * Following code adds the padding if it is simple enough,
+ * and truncates if not.
+ */
+ if (m0->m_next || m0->m_pkthdr.len != m0->m_len)
+ panic("assumption failed in %s:%d", __FILE__,
+ __LINE__);
+
+ if (len - sizeof(*nd_opt_rh) < m0->m_pkthdr.len) {
+ /* not enough room, truncate */
+ m0->m_pkthdr.len = m0->m_len = len -
+ sizeof(*nd_opt_rh);
+ } else {
+ /* enough room, pad or truncate */
+ size_t extra;
+
+ extra = m0->m_pkthdr.len % 8;
+ if (extra) {
+ /* pad if easy enough, truncate if not */
+ if (8 - extra <= M_TRAILINGSPACE(m0)) {
+ /* pad */
+ m0->m_len += (8 - extra);
+ m0->m_pkthdr.len += (8 - extra);
+ } else {
+ /* truncate */
+ m0->m_pkthdr.len -= extra;
+ m0->m_len -= extra;
+ }
+ }
+ len = m0->m_pkthdr.len + sizeof(*nd_opt_rh);
+ m0->m_pkthdr.len = m0->m_len = len -
+ sizeof(*nd_opt_rh);
+ }
+
+ nd_opt_rh = (struct nd_opt_rd_hdr *)p;
+ bzero(nd_opt_rh, sizeof(*nd_opt_rh));
+ nd_opt_rh->nd_opt_rh_type = ND_OPT_REDIRECTED_HEADER;
+ nd_opt_rh->nd_opt_rh_len = len >> 3;
+ p += sizeof(*nd_opt_rh);
+ m->m_pkthdr.len = m->m_len = p - (u_char *)ip6;
+
+ /* connect m0 to m */
+ m_tag_delete_chain(m0, NULL);
+ m0->m_flags &= ~M_PKTHDR;
+ m->m_next = m0;
+ m->m_pkthdr.len = m->m_len + m0->m_len;
+ m0 = NULL;
+ }
+noredhdropt:;
+ if (m0) {
+ m_freem(m0);
+ m0 = NULL;
+ }
+
+ /* XXX: clear embedded link IDs in the inner header */
+ in6_clearscope(&sip6->ip6_src);
+ in6_clearscope(&sip6->ip6_dst);
+ in6_clearscope(&nd_rd->nd_rd_target);
+ in6_clearscope(&nd_rd->nd_rd_dst);
+
+ ip6->ip6_plen = htons(m->m_pkthdr.len - sizeof(struct ip6_hdr));
+
+ nd_rd->nd_rd_cksum = 0;
+ nd_rd->nd_rd_cksum = in6_cksum(m, IPPROTO_ICMPV6,
+ sizeof(*ip6), ntohs(ip6->ip6_plen));
+
+ /* send the packet to outside... */
+ ip6_output(m, NULL, NULL, 0, NULL, &outif, NULL);
+ if (outif) {
+ icmp6_ifstat_inc(outif, ifs6_out_msg);
+ icmp6_ifstat_inc(outif, ifs6_out_redirect);
+ }
+ ICMP6STAT_INC(icp6s_outhist[ND_REDIRECT]);
+
+ return;
+
+fail:
+ if (m)
+ m_freem(m);
+ if (m0)
+ m_freem(m0);
+}
+
+/*
+ * ICMPv6 socket option processing.
+ */
+int
+icmp6_ctloutput(struct socket *so, struct sockopt *sopt)
+{
+ int error = 0;
+ int optlen;
+ struct inpcb *inp = sotoinpcb(so);
+ int level, op, optname;
+
+ if (sopt) {
+ level = sopt->sopt_level;
+ op = sopt->sopt_dir;
+ optname = sopt->sopt_name;
+ optlen = sopt->sopt_valsize;
+ } else
+ level = op = optname = optlen = 0;
+
+ if (level != IPPROTO_ICMPV6) {
+ return EINVAL;
+ }
+
+ switch (op) {
+ case PRCO_SETOPT:
+ switch (optname) {
+ case ICMP6_FILTER:
+ {
+ struct icmp6_filter ic6f;
+
+ if (optlen != sizeof(ic6f)) {
+ error = EMSGSIZE;
+ break;
+ }
+ error = sooptcopyin(sopt, &ic6f, optlen, optlen);
+ if (error == 0) {
+ INP_WLOCK(inp);
+ *inp->in6p_icmp6filt = ic6f;
+ INP_WUNLOCK(inp);
+ }
+ break;
+ }
+
+ default:
+ error = ENOPROTOOPT;
+ break;
+ }
+ break;
+
+ case PRCO_GETOPT:
+ switch (optname) {
+ case ICMP6_FILTER:
+ {
+ struct icmp6_filter ic6f;
+
+ INP_RLOCK(inp);
+ ic6f = *inp->in6p_icmp6filt;
+ INP_RUNLOCK(inp);
+ error = sooptcopyout(sopt, &ic6f, sizeof(ic6f));
+ break;
+ }
+
+ default:
+ error = ENOPROTOOPT;
+ break;
+ }
+ break;
+ }
+
+ return (error);
+}
+
+/*
+ * Perform rate limit check.
+ * Returns 0 if it is okay to send the icmp6 packet.
+ * Returns 1 if the router SHOULD NOT send this icmp6 packet due to rate
+ * limitation.
+ *
+ * XXX per-destination/type check necessary?
+ *
+ * dst - not used at this moment
+ * type - not used at this moment
+ * code - not used at this moment
+ */
+static int
+icmp6_ratelimit(const struct in6_addr *dst, const int type,
+ const int code)
+{
+ int ret;
+
+ ret = 0; /* okay to send */
+
+ /* PPS limit */
+ if (!ppsratecheck(&V_icmp6errppslim_last, &V_icmp6errpps_count,
+ V_icmp6errppslim)) {
+ /* The packet is subject to rate limit */
+ ret++;
+ }
+
+ return ret;
+}
diff --git a/rtems/freebsd/netinet6/icmp6.h b/rtems/freebsd/netinet6/icmp6.h
new file mode 100644
index 00000000..a6414efc
--- /dev/null
+++ b/rtems/freebsd/netinet6/icmp6.h
@@ -0,0 +1,4 @@
+/* $FreeBSD$ */
+/* $KAME: icmp6.h,v 1.17 2000/06/11 17:23:40 jinmei Exp $ */
+
+#error "netinet6/icmp6.h is obsolete. use netinet/icmp6.h"
diff --git a/rtems/freebsd/netinet6/in6.c b/rtems/freebsd/netinet6/in6.c
new file mode 100644
index 00000000..60f112f0
--- /dev/null
+++ b/rtems/freebsd/netinet6/in6.c
@@ -0,0 +1,2671 @@
+#include <rtems/freebsd/machine/rtems-bsd-config.h>
+
+/*-
+ * Copyright (C) 1995, 1996, 1997, and 1998 WIDE Project.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the project nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $KAME: in6.c,v 1.259 2002/01/21 11:37:50 keiichi Exp $
+ */
+
+/*-
+ * Copyright (c) 1982, 1986, 1991, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * @(#)in.c 8.2 (Berkeley) 11/15/93
+ */
+
+#include <rtems/freebsd/sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <rtems/freebsd/local/opt_compat.h>
+#include <rtems/freebsd/local/opt_inet.h>
+#include <rtems/freebsd/local/opt_inet6.h>
+
+#include <rtems/freebsd/sys/param.h>
+#include <rtems/freebsd/sys/errno.h>
+#include <rtems/freebsd/sys/jail.h>
+#include <rtems/freebsd/sys/malloc.h>
+#include <rtems/freebsd/sys/socket.h>
+#include <rtems/freebsd/sys/socketvar.h>
+#include <rtems/freebsd/sys/sockio.h>
+#include <rtems/freebsd/sys/systm.h>
+#include <rtems/freebsd/sys/priv.h>
+#include <rtems/freebsd/sys/proc.h>
+#include <rtems/freebsd/sys/time.h>
+#include <rtems/freebsd/sys/kernel.h>
+#include <rtems/freebsd/sys/syslog.h>
+
+#include <rtems/freebsd/net/if.h>
+#include <rtems/freebsd/net/if_var.h>
+#include <rtems/freebsd/net/if_types.h>
+#include <rtems/freebsd/net/route.h>
+#include <rtems/freebsd/net/if_dl.h>
+#include <rtems/freebsd/net/vnet.h>
+
+#include <rtems/freebsd/netinet/in.h>
+#include <rtems/freebsd/netinet/in_var.h>
+#include <rtems/freebsd/net/if_llatbl.h>
+#include <rtems/freebsd/netinet/if_ether.h>
+#include <rtems/freebsd/netinet/in_systm.h>
+#include <rtems/freebsd/netinet/ip.h>
+#include <rtems/freebsd/netinet/in_pcb.h>
+
+#include <rtems/freebsd/netinet/ip6.h>
+#include <rtems/freebsd/netinet6/ip6_var.h>
+#include <rtems/freebsd/netinet6/nd6.h>
+#include <rtems/freebsd/netinet6/mld6_var.h>
+#include <rtems/freebsd/netinet6/ip6_mroute.h>
+#include <rtems/freebsd/netinet6/in6_ifattach.h>
+#include <rtems/freebsd/netinet6/scope6_var.h>
+#include <rtems/freebsd/netinet6/in6_pcb.h>
+
+/*
+ * Definitions of some costant IP6 addresses.
+ */
+const struct in6_addr in6addr_any = IN6ADDR_ANY_INIT;
+const struct in6_addr in6addr_loopback = IN6ADDR_LOOPBACK_INIT;
+const struct in6_addr in6addr_nodelocal_allnodes =
+ IN6ADDR_NODELOCAL_ALLNODES_INIT;
+const struct in6_addr in6addr_linklocal_allnodes =
+ IN6ADDR_LINKLOCAL_ALLNODES_INIT;
+const struct in6_addr in6addr_linklocal_allrouters =
+ IN6ADDR_LINKLOCAL_ALLROUTERS_INIT;
+const struct in6_addr in6addr_linklocal_allv2routers =
+ IN6ADDR_LINKLOCAL_ALLV2ROUTERS_INIT;
+
+const struct in6_addr in6mask0 = IN6MASK0;
+const struct in6_addr in6mask32 = IN6MASK32;
+const struct in6_addr in6mask64 = IN6MASK64;
+const struct in6_addr in6mask96 = IN6MASK96;
+const struct in6_addr in6mask128 = IN6MASK128;
+
+const struct sockaddr_in6 sa6_any =
+ { sizeof(sa6_any), AF_INET6, 0, 0, IN6ADDR_ANY_INIT, 0 };
+
+static int in6_lifaddr_ioctl __P((struct socket *, u_long, caddr_t,
+ struct ifnet *, struct thread *));
+static int in6_ifinit __P((struct ifnet *, struct in6_ifaddr *,
+ struct sockaddr_in6 *, int));
+static void in6_unlink_ifa(struct in6_ifaddr *, struct ifnet *);
+
+int (*faithprefix_p)(struct in6_addr *);
+
+
+
+int
+in6_mask2len(struct in6_addr *mask, u_char *lim0)
+{
+ int x = 0, y;
+ u_char *lim = lim0, *p;
+
+ /* ignore the scope_id part */
+ if (lim0 == NULL || lim0 - (u_char *)mask > sizeof(*mask))
+ lim = (u_char *)mask + sizeof(*mask);
+ for (p = (u_char *)mask; p < lim; x++, p++) {
+ if (*p != 0xff)
+ break;
+ }
+ y = 0;
+ if (p < lim) {
+ for (y = 0; y < 8; y++) {
+ if ((*p & (0x80 >> y)) == 0)
+ break;
+ }
+ }
+
+ /*
+ * when the limit pointer is given, do a stricter check on the
+ * remaining bits.
+ */
+ if (p < lim) {
+ if (y != 0 && (*p & (0x00ff >> y)) != 0)
+ return (-1);
+ for (p = p + 1; p < lim; p++)
+ if (*p != 0)
+ return (-1);
+ }
+
+ return x * 8 + y;
+}
+
+#define ifa2ia6(ifa) ((struct in6_ifaddr *)(ifa))
+#define ia62ifa(ia6) (&((ia6)->ia_ifa))
+
+#ifdef COMPAT_FREEBSD32
+struct in6_ndifreq32 {
+ char ifname[IFNAMSIZ];
+ uint32_t ifindex;
+};
+#define SIOCGDEFIFACE32_IN6 _IOWR('i', 86, struct in6_ndifreq32)
+#endif
+
+int
+in6_control(struct socket *so, u_long cmd, caddr_t data,
+ struct ifnet *ifp, struct thread *td)
+{
+ struct in6_ifreq *ifr = (struct in6_ifreq *)data;
+ struct in6_ifaddr *ia = NULL;
+ struct in6_aliasreq *ifra = (struct in6_aliasreq *)data;
+ struct sockaddr_in6 *sa6;
+ int error;
+
+ switch (cmd) {
+ case SIOCGETSGCNT_IN6:
+ case SIOCGETMIFCNT_IN6:
+ return (mrt6_ioctl ? mrt6_ioctl(cmd, data) : EOPNOTSUPP);
+ }
+
+ switch(cmd) {
+ case SIOCAADDRCTL_POLICY:
+ case SIOCDADDRCTL_POLICY:
+ if (td != NULL) {
+ error = priv_check(td, PRIV_NETINET_ADDRCTRL6);
+ if (error)
+ return (error);
+ }
+ return (in6_src_ioctl(cmd, data));
+ }
+
+ if (ifp == NULL)
+ return (EOPNOTSUPP);
+
+ switch (cmd) {
+ case SIOCSNDFLUSH_IN6:
+ case SIOCSPFXFLUSH_IN6:
+ case SIOCSRTRFLUSH_IN6:
+ case SIOCSDEFIFACE_IN6:
+ case SIOCSIFINFO_FLAGS:
+ case SIOCSIFINFO_IN6:
+ if (td != NULL) {
+ error = priv_check(td, PRIV_NETINET_ND6);
+ if (error)
+ return (error);
+ }
+ /* FALLTHROUGH */
+ case OSIOCGIFINFO_IN6:
+ case SIOCGIFINFO_IN6:
+ case SIOCGDRLST_IN6:
+ case SIOCGPRLST_IN6:
+ case SIOCGNBRINFO_IN6:
+ case SIOCGDEFIFACE_IN6:
+ return (nd6_ioctl(cmd, data, ifp));
+
+#ifdef COMPAT_FREEBSD32
+ case SIOCGDEFIFACE32_IN6:
+ {
+ struct in6_ndifreq ndif;
+ struct in6_ndifreq32 *ndif32;
+
+ error = nd6_ioctl(SIOCGDEFIFACE_IN6, (caddr_t)&ndif,
+ ifp);
+ if (error)
+ return (error);
+ ndif32 = (struct in6_ndifreq32 *)data;
+ ndif32->ifindex = ndif.ifindex;
+ return (0);
+ }
+#endif
+ }
+
+ switch (cmd) {
+ case SIOCSIFPREFIX_IN6:
+ case SIOCDIFPREFIX_IN6:
+ case SIOCAIFPREFIX_IN6:
+ case SIOCCIFPREFIX_IN6:
+ case SIOCSGIFPREFIX_IN6:
+ case SIOCGIFPREFIX_IN6:
+ log(LOG_NOTICE,
+ "prefix ioctls are now invalidated. "
+ "please use ifconfig.\n");
+ return (EOPNOTSUPP);
+ }
+
+ switch (cmd) {
+ case SIOCSSCOPE6:
+ if (td != NULL) {
+ error = priv_check(td, PRIV_NETINET_SCOPE6);
+ if (error)
+ return (error);
+ }
+ return (scope6_set(ifp,
+ (struct scope6_id *)ifr->ifr_ifru.ifru_scope_id));
+ case SIOCGSCOPE6:
+ return (scope6_get(ifp,
+ (struct scope6_id *)ifr->ifr_ifru.ifru_scope_id));
+ case SIOCGSCOPE6DEF:
+ return (scope6_get_default((struct scope6_id *)
+ ifr->ifr_ifru.ifru_scope_id));
+ }
+
+ switch (cmd) {
+ case SIOCALIFADDR:
+ if (td != NULL) {
+ error = priv_check(td, PRIV_NET_ADDIFADDR);
+ if (error)
+ return (error);
+ }
+ return in6_lifaddr_ioctl(so, cmd, data, ifp, td);
+
+ case SIOCDLIFADDR:
+ if (td != NULL) {
+ error = priv_check(td, PRIV_NET_DELIFADDR);
+ if (error)
+ return (error);
+ }
+ /* FALLTHROUGH */
+ case SIOCGLIFADDR:
+ return in6_lifaddr_ioctl(so, cmd, data, ifp, td);
+ }
+
+ /*
+ * Find address for this interface, if it exists.
+ *
+ * In netinet code, we have checked ifra_addr in SIOCSIF*ADDR operation
+ * only, and used the first interface address as the target of other
+ * operations (without checking ifra_addr). This was because netinet
+ * code/API assumed at most 1 interface address per interface.
+ * Since IPv6 allows a node to assign multiple addresses
+ * on a single interface, we almost always look and check the
+ * presence of ifra_addr, and reject invalid ones here.
+ * It also decreases duplicated code among SIOC*_IN6 operations.
+ */
+ switch (cmd) {
+ case SIOCAIFADDR_IN6:
+ case SIOCSIFPHYADDR_IN6:
+ sa6 = &ifra->ifra_addr;
+ break;
+ case SIOCSIFADDR_IN6:
+ case SIOCGIFADDR_IN6:
+ case SIOCSIFDSTADDR_IN6:
+ case SIOCSIFNETMASK_IN6:
+ case SIOCGIFDSTADDR_IN6:
+ case SIOCGIFNETMASK_IN6:
+ case SIOCDIFADDR_IN6:
+ case SIOCGIFPSRCADDR_IN6:
+ case SIOCGIFPDSTADDR_IN6:
+ case SIOCGIFAFLAG_IN6:
+ case SIOCSNDFLUSH_IN6:
+ case SIOCSPFXFLUSH_IN6:
+ case SIOCSRTRFLUSH_IN6:
+ case SIOCGIFALIFETIME_IN6:
+ case SIOCSIFALIFETIME_IN6:
+ case SIOCGIFSTAT_IN6:
+ case SIOCGIFSTAT_ICMP6:
+ sa6 = &ifr->ifr_addr;
+ break;
+ default:
+ sa6 = NULL;
+ break;
+ }
+ if (sa6 && sa6->sin6_family == AF_INET6) {
+ if (sa6->sin6_scope_id != 0)
+ error = sa6_embedscope(sa6, 0);
+ else
+ error = in6_setscope(&sa6->sin6_addr, ifp, NULL);
+ if (error != 0)
+ return (error);
+ if (td != NULL && (error = prison_check_ip6(td->td_ucred,
+ &sa6->sin6_addr)) != 0)
+ return (error);
+ ia = in6ifa_ifpwithaddr(ifp, &sa6->sin6_addr);
+ } else
+ ia = NULL;
+
+ switch (cmd) {
+ case SIOCSIFADDR_IN6:
+ case SIOCSIFDSTADDR_IN6:
+ case SIOCSIFNETMASK_IN6:
+ /*
+ * Since IPv6 allows a node to assign multiple addresses
+ * on a single interface, SIOCSIFxxx ioctls are deprecated.
+ */
+ /* we decided to obsolete this command (20000704) */
+ error = EINVAL;
+ goto out;
+
+ case SIOCDIFADDR_IN6:
+ /*
+ * for IPv4, we look for existing in_ifaddr here to allow
+ * "ifconfig if0 delete" to remove the first IPv4 address on
+ * the interface. For IPv6, as the spec allows multiple
+ * interface address from the day one, we consider "remove the
+ * first one" semantics to be not preferable.
+ */
+ if (ia == NULL) {
+ error = EADDRNOTAVAIL;
+ goto out;
+ }
+ /* FALLTHROUGH */
+ case SIOCAIFADDR_IN6:
+ /*
+ * We always require users to specify a valid IPv6 address for
+ * the corresponding operation.
+ */
+ if (ifra->ifra_addr.sin6_family != AF_INET6 ||
+ ifra->ifra_addr.sin6_len != sizeof(struct sockaddr_in6)) {
+ error = EAFNOSUPPORT;
+ goto out;
+ }
+
+ if (td != NULL) {
+ error = priv_check(td, (cmd == SIOCDIFADDR_IN6) ?
+ PRIV_NET_DELIFADDR : PRIV_NET_ADDIFADDR);
+ if (error)
+ goto out;
+ }
+ break;
+
+ case SIOCGIFADDR_IN6:
+ /* This interface is basically deprecated. use SIOCGIFCONF. */
+ /* FALLTHROUGH */
+ case SIOCGIFAFLAG_IN6:
+ case SIOCGIFNETMASK_IN6:
+ case SIOCGIFDSTADDR_IN6:
+ case SIOCGIFALIFETIME_IN6:
+ /* must think again about its semantics */
+ if (ia == NULL) {
+ error = EADDRNOTAVAIL;
+ goto out;
+ }
+ break;
+
+ case SIOCSIFALIFETIME_IN6:
+ {
+ struct in6_addrlifetime *lt;
+
+ if (td != NULL) {
+ error = priv_check(td, PRIV_NETINET_ALIFETIME6);
+ if (error)
+ goto out;
+ }
+ if (ia == NULL) {
+ error = EADDRNOTAVAIL;
+ goto out;
+ }
+ /* sanity for overflow - beware unsigned */
+ lt = &ifr->ifr_ifru.ifru_lifetime;
+ if (lt->ia6t_vltime != ND6_INFINITE_LIFETIME &&
+ lt->ia6t_vltime + time_second < time_second) {
+ error = EINVAL;
+ goto out;
+ }
+ if (lt->ia6t_pltime != ND6_INFINITE_LIFETIME &&
+ lt->ia6t_pltime + time_second < time_second) {
+ error = EINVAL;
+ goto out;
+ }
+ break;
+ }
+ }
+
+ switch (cmd) {
+ case SIOCGIFADDR_IN6:
+ ifr->ifr_addr = ia->ia_addr;
+ if ((error = sa6_recoverscope(&ifr->ifr_addr)) != 0)
+ goto out;
+ break;
+
+ case SIOCGIFDSTADDR_IN6:
+ if ((ifp->if_flags & IFF_POINTOPOINT) == 0) {
+ error = EINVAL;
+ goto out;
+ }
+ /*
+ * XXX: should we check if ifa_dstaddr is NULL and return
+ * an error?
+ */
+ ifr->ifr_dstaddr = ia->ia_dstaddr;
+ if ((error = sa6_recoverscope(&ifr->ifr_dstaddr)) != 0)
+ goto out;
+ break;
+
+ case SIOCGIFNETMASK_IN6:
+ ifr->ifr_addr = ia->ia_prefixmask;
+ break;
+
+ case SIOCGIFAFLAG_IN6:
+ ifr->ifr_ifru.ifru_flags6 = ia->ia6_flags;
+ break;
+
+ case SIOCGIFSTAT_IN6:
+ if (ifp == NULL) {
+ error = EINVAL;
+ goto out;
+ }
+ bzero(&ifr->ifr_ifru.ifru_stat,
+ sizeof(ifr->ifr_ifru.ifru_stat));
+ ifr->ifr_ifru.ifru_stat =
+ *((struct in6_ifextra *)ifp->if_afdata[AF_INET6])->in6_ifstat;
+ break;
+
+ case SIOCGIFSTAT_ICMP6:
+ if (ifp == NULL) {
+ error = EINVAL;
+ goto out;
+ }
+ bzero(&ifr->ifr_ifru.ifru_icmp6stat,
+ sizeof(ifr->ifr_ifru.ifru_icmp6stat));
+ ifr->ifr_ifru.ifru_icmp6stat =
+ *((struct in6_ifextra *)ifp->if_afdata[AF_INET6])->icmp6_ifstat;
+ break;
+
+ case SIOCGIFALIFETIME_IN6:
+ ifr->ifr_ifru.ifru_lifetime = ia->ia6_lifetime;
+ if (ia->ia6_lifetime.ia6t_vltime != ND6_INFINITE_LIFETIME) {
+ time_t maxexpire;
+ struct in6_addrlifetime *retlt =
+ &ifr->ifr_ifru.ifru_lifetime;
+
+ /*
+ * XXX: adjust expiration time assuming time_t is
+ * signed.
+ */
+ maxexpire = (-1) &
+ ~((time_t)1 << ((sizeof(maxexpire) * 8) - 1));
+ if (ia->ia6_lifetime.ia6t_vltime <
+ maxexpire - ia->ia6_updatetime) {
+ retlt->ia6t_expire = ia->ia6_updatetime +
+ ia->ia6_lifetime.ia6t_vltime;
+ } else
+ retlt->ia6t_expire = maxexpire;
+ }
+ if (ia->ia6_lifetime.ia6t_pltime != ND6_INFINITE_LIFETIME) {
+ time_t maxexpire;
+ struct in6_addrlifetime *retlt =
+ &ifr->ifr_ifru.ifru_lifetime;
+
+ /*
+ * XXX: adjust expiration time assuming time_t is
+ * signed.
+ */
+ maxexpire = (-1) &
+ ~((time_t)1 << ((sizeof(maxexpire) * 8) - 1));
+ if (ia->ia6_lifetime.ia6t_pltime <
+ maxexpire - ia->ia6_updatetime) {
+ retlt->ia6t_preferred = ia->ia6_updatetime +
+ ia->ia6_lifetime.ia6t_pltime;
+ } else
+ retlt->ia6t_preferred = maxexpire;
+ }
+ break;
+
+ case SIOCSIFALIFETIME_IN6:
+ ia->ia6_lifetime = ifr->ifr_ifru.ifru_lifetime;
+ /* for sanity */
+ if (ia->ia6_lifetime.ia6t_vltime != ND6_INFINITE_LIFETIME) {
+ ia->ia6_lifetime.ia6t_expire =
+ time_second + ia->ia6_lifetime.ia6t_vltime;
+ } else
+ ia->ia6_lifetime.ia6t_expire = 0;
+ if (ia->ia6_lifetime.ia6t_pltime != ND6_INFINITE_LIFETIME) {
+ ia->ia6_lifetime.ia6t_preferred =
+ time_second + ia->ia6_lifetime.ia6t_pltime;
+ } else
+ ia->ia6_lifetime.ia6t_preferred = 0;
+ break;
+
+ case SIOCAIFADDR_IN6:
+ {
+ int i;
+ struct nd_prefixctl pr0;
+ struct nd_prefix *pr;
+
+ /*
+ * first, make or update the interface address structure,
+ * and link it to the list.
+ */
+ if ((error = in6_update_ifa(ifp, ifra, ia, 0)) != 0)
+ goto out;
+ if (ia != NULL)
+ ifa_free(&ia->ia_ifa);
+ if ((ia = in6ifa_ifpwithaddr(ifp, &ifra->ifra_addr.sin6_addr))
+ == NULL) {
+ /*
+ * this can happen when the user specify the 0 valid
+ * lifetime.
+ */
+ break;
+ }
+
+ /*
+ * then, make the prefix on-link on the interface.
+ * XXX: we'd rather create the prefix before the address, but
+ * we need at least one address to install the corresponding
+ * interface route, so we configure the address first.
+ */
+
+ /*
+ * convert mask to prefix length (prefixmask has already
+ * been validated in in6_update_ifa().
+ */
+ bzero(&pr0, sizeof(pr0));
+ pr0.ndpr_ifp = ifp;
+ pr0.ndpr_plen = in6_mask2len(&ifra->ifra_prefixmask.sin6_addr,
+ NULL);
+ if (pr0.ndpr_plen == 128) {
+ break; /* we don't need to install a host route. */
+ }
+ pr0.ndpr_prefix = ifra->ifra_addr;
+ /* apply the mask for safety. */
+ for (i = 0; i < 4; i++) {
+ pr0.ndpr_prefix.sin6_addr.s6_addr32[i] &=
+ ifra->ifra_prefixmask.sin6_addr.s6_addr32[i];
+ }
+ /*
+ * XXX: since we don't have an API to set prefix (not address)
+ * lifetimes, we just use the same lifetimes as addresses.
+ * The (temporarily) installed lifetimes can be overridden by
+ * later advertised RAs (when accept_rtadv is non 0), which is
+ * an intended behavior.
+ */
+ pr0.ndpr_raf_onlink = 1; /* should be configurable? */
+ pr0.ndpr_raf_auto =
+ ((ifra->ifra_flags & IN6_IFF_AUTOCONF) != 0);
+ pr0.ndpr_vltime = ifra->ifra_lifetime.ia6t_vltime;
+ pr0.ndpr_pltime = ifra->ifra_lifetime.ia6t_pltime;
+
+ /* add the prefix if not yet. */
+ if ((pr = nd6_prefix_lookup(&pr0)) == NULL) {
+ /*
+ * nd6_prelist_add will install the corresponding
+ * interface route.
+ */
+ if ((error = nd6_prelist_add(&pr0, NULL, &pr)) != 0)
+ goto out;
+ if (pr == NULL) {
+ log(LOG_ERR, "nd6_prelist_add succeeded but "
+ "no prefix\n");
+ error = EINVAL;
+ goto out;
+ }
+ }
+
+ /* relate the address to the prefix */
+ if (ia->ia6_ndpr == NULL) {
+ ia->ia6_ndpr = pr;
+ pr->ndpr_refcnt++;
+
+ /*
+ * If this is the first autoconf address from the
+ * prefix, create a temporary address as well
+ * (when required).
+ */
+ if ((ia->ia6_flags & IN6_IFF_AUTOCONF) &&
+ V_ip6_use_tempaddr && pr->ndpr_refcnt == 1) {
+ int e;
+ if ((e = in6_tmpifadd(ia, 1, 0)) != 0) {
+ log(LOG_NOTICE, "in6_control: failed "
+ "to create a temporary address, "
+ "errno=%d\n", e);
+ }
+ }
+ }
+
+ /*
+ * this might affect the status of autoconfigured addresses,
+ * that is, this address might make other addresses detached.
+ */
+ pfxlist_onlink_check();
+ if (error == 0 && ia)
+ EVENTHANDLER_INVOKE(ifaddr_event, ifp);
+ break;
+ }
+
+ case SIOCDIFADDR_IN6:
+ {
+ struct nd_prefix *pr;
+
+ /*
+ * If the address being deleted is the only one that owns
+ * the corresponding prefix, expire the prefix as well.
+ * XXX: theoretically, we don't have to worry about such
+ * relationship, since we separate the address management
+ * and the prefix management. We do this, however, to provide
+ * as much backward compatibility as possible in terms of
+ * the ioctl operation.
+ * Note that in6_purgeaddr() will decrement ndpr_refcnt.
+ */
+ pr = ia->ia6_ndpr;
+ in6_purgeaddr(&ia->ia_ifa);
+ if (pr && pr->ndpr_refcnt == 0)
+ prelist_remove(pr);
+ EVENTHANDLER_INVOKE(ifaddr_event, ifp);
+ break;
+ }
+
+ default:
+ if (ifp == NULL || ifp->if_ioctl == 0) {
+ error = EOPNOTSUPP;
+ goto out;
+ }
+ error = (*ifp->if_ioctl)(ifp, cmd, data);
+ goto out;
+ }
+
+ error = 0;
+out:
+ if (ia != NULL)
+ ifa_free(&ia->ia_ifa);
+ return (error);
+}
+
+/*
+ * Update parameters of an IPv6 interface address.
+ * If necessary, a new entry is created and linked into address chains.
+ * This function is separated from in6_control().
+ * XXX: should this be performed under splnet()?
+ */
+int
+in6_update_ifa(struct ifnet *ifp, struct in6_aliasreq *ifra,
+ struct in6_ifaddr *ia, int flags)
+{
+ int error = 0, hostIsNew = 0, plen = -1;
+ struct sockaddr_in6 dst6;
+ struct in6_addrlifetime *lt;
+ struct in6_multi_mship *imm;
+ struct in6_multi *in6m_sol;
+ struct rtentry *rt;
+ int delay;
+ char ip6buf[INET6_ADDRSTRLEN];
+
+ /* Validate parameters */
+ if (ifp == NULL || ifra == NULL) /* this maybe redundant */
+ return (EINVAL);
+
+ /*
+ * The destination address for a p2p link must have a family
+ * of AF_UNSPEC or AF_INET6.
+ */
+ if ((ifp->if_flags & IFF_POINTOPOINT) != 0 &&
+ ifra->ifra_dstaddr.sin6_family != AF_INET6 &&
+ ifra->ifra_dstaddr.sin6_family != AF_UNSPEC)
+ return (EAFNOSUPPORT);
+ /*
+ * validate ifra_prefixmask. don't check sin6_family, netmask
+ * does not carry fields other than sin6_len.
+ */
+ if (ifra->ifra_prefixmask.sin6_len > sizeof(struct sockaddr_in6))
+ return (EINVAL);
+ /*
+ * Because the IPv6 address architecture is classless, we require
+ * users to specify a (non 0) prefix length (mask) for a new address.
+ * We also require the prefix (when specified) mask is valid, and thus
+ * reject a non-consecutive mask.
+ */
+ if (ia == NULL && ifra->ifra_prefixmask.sin6_len == 0)
+ return (EINVAL);
+ if (ifra->ifra_prefixmask.sin6_len != 0) {
+ plen = in6_mask2len(&ifra->ifra_prefixmask.sin6_addr,
+ (u_char *)&ifra->ifra_prefixmask +
+ ifra->ifra_prefixmask.sin6_len);
+ if (plen <= 0)
+ return (EINVAL);
+ } else {
+ /*
+ * In this case, ia must not be NULL. We just use its prefix
+ * length.
+ */
+ plen = in6_mask2len(&ia->ia_prefixmask.sin6_addr, NULL);
+ }
+ /*
+ * If the destination address on a p2p interface is specified,
+ * and the address is a scoped one, validate/set the scope
+ * zone identifier.
+ */
+ dst6 = ifra->ifra_dstaddr;
+ if ((ifp->if_flags & (IFF_POINTOPOINT|IFF_LOOPBACK)) != 0 &&
+ (dst6.sin6_family == AF_INET6)) {
+ struct in6_addr in6_tmp;
+ u_int32_t zoneid;
+
+ in6_tmp = dst6.sin6_addr;
+ if (in6_setscope(&in6_tmp, ifp, &zoneid))
+ return (EINVAL); /* XXX: should be impossible */
+
+ if (dst6.sin6_scope_id != 0) {
+ if (dst6.sin6_scope_id != zoneid)
+ return (EINVAL);
+ } else /* user omit to specify the ID. */
+ dst6.sin6_scope_id = zoneid;
+
+ /* convert into the internal form */
+ if (sa6_embedscope(&dst6, 0))
+ return (EINVAL); /* XXX: should be impossible */
+ }
+ /*
+ * The destination address can be specified only for a p2p or a
+ * loopback interface. If specified, the corresponding prefix length
+ * must be 128.
+ */
+ if (ifra->ifra_dstaddr.sin6_family == AF_INET6) {
+ if ((ifp->if_flags & (IFF_POINTOPOINT|IFF_LOOPBACK)) == 0) {
+ /* XXX: noisy message */
+ nd6log((LOG_INFO, "in6_update_ifa: a destination can "
+ "be specified for a p2p or a loopback IF only\n"));
+ return (EINVAL);
+ }
+ if (plen != 128) {
+ nd6log((LOG_INFO, "in6_update_ifa: prefixlen should "
+ "be 128 when dstaddr is specified\n"));
+ return (EINVAL);
+ }
+ }
+ /* lifetime consistency check */
+ lt = &ifra->ifra_lifetime;
+ if (lt->ia6t_pltime > lt->ia6t_vltime)
+ return (EINVAL);
+ if (lt->ia6t_vltime == 0) {
+ /*
+ * the following log might be noisy, but this is a typical
+ * configuration mistake or a tool's bug.
+ */
+ nd6log((LOG_INFO,
+ "in6_update_ifa: valid lifetime is 0 for %s\n",
+ ip6_sprintf(ip6buf, &ifra->ifra_addr.sin6_addr)));
+
+ if (ia == NULL)
+ return (0); /* there's nothing to do */
+ }
+
+ /*
+ * If this is a new address, allocate a new ifaddr and link it
+ * into chains.
+ */
+ if (ia == NULL) {
+ hostIsNew = 1;
+ /*
+ * When in6_update_ifa() is called in a process of a received
+ * RA, it is called under an interrupt context. So, we should
+ * call malloc with M_NOWAIT.
+ */
+ ia = (struct in6_ifaddr *) malloc(sizeof(*ia), M_IFADDR,
+ M_NOWAIT);
+ if (ia == NULL)
+ return (ENOBUFS);
+ bzero((caddr_t)ia, sizeof(*ia));
+ ifa_init(&ia->ia_ifa);
+ LIST_INIT(&ia->ia6_memberships);
+ /* Initialize the address and masks, and put time stamp */
+ ia->ia_ifa.ifa_addr = (struct sockaddr *)&ia->ia_addr;
+ ia->ia_addr.sin6_family = AF_INET6;
+ ia->ia_addr.sin6_len = sizeof(ia->ia_addr);
+ ia->ia6_createtime = time_second;
+ if ((ifp->if_flags & (IFF_POINTOPOINT | IFF_LOOPBACK)) != 0) {
+ /*
+ * XXX: some functions expect that ifa_dstaddr is not
+ * NULL for p2p interfaces.
+ */
+ ia->ia_ifa.ifa_dstaddr =
+ (struct sockaddr *)&ia->ia_dstaddr;
+ } else {
+ ia->ia_ifa.ifa_dstaddr = NULL;
+ }
+ ia->ia_ifa.ifa_netmask = (struct sockaddr *)&ia->ia_prefixmask;
+ ia->ia_ifp = ifp;
+ ifa_ref(&ia->ia_ifa); /* if_addrhead */
+ IF_ADDR_LOCK(ifp);
+ TAILQ_INSERT_TAIL(&ifp->if_addrhead, &ia->ia_ifa, ifa_link);
+ IF_ADDR_UNLOCK(ifp);
+
+ ifa_ref(&ia->ia_ifa); /* in6_ifaddrhead */
+ IN6_IFADDR_WLOCK();
+ TAILQ_INSERT_TAIL(&V_in6_ifaddrhead, ia, ia_link);
+ IN6_IFADDR_WUNLOCK();
+ }
+
+ /* update timestamp */
+ ia->ia6_updatetime = time_second;
+
+ /* set prefix mask */
+ if (ifra->ifra_prefixmask.sin6_len) {
+ /*
+ * We prohibit changing the prefix length of an existing
+ * address, because
+ * + such an operation should be rare in IPv6, and
+ * + the operation would confuse prefix management.
+ */
+ if (ia->ia_prefixmask.sin6_len &&
+ in6_mask2len(&ia->ia_prefixmask.sin6_addr, NULL) != plen) {
+ nd6log((LOG_INFO, "in6_update_ifa: the prefix length of an"
+ " existing (%s) address should not be changed\n",
+ ip6_sprintf(ip6buf, &ia->ia_addr.sin6_addr)));
+ error = EINVAL;
+ goto unlink;
+ }
+ ia->ia_prefixmask = ifra->ifra_prefixmask;
+ }
+
+ /*
+ * If a new destination address is specified, scrub the old one and
+ * install the new destination. Note that the interface must be
+ * p2p or loopback (see the check above.)
+ */
+ if (dst6.sin6_family == AF_INET6 &&
+ !IN6_ARE_ADDR_EQUAL(&dst6.sin6_addr, &ia->ia_dstaddr.sin6_addr)) {
+ int e;
+
+ if ((ia->ia_flags & IFA_ROUTE) != 0 &&
+ (e = rtinit(&(ia->ia_ifa), (int)RTM_DELETE, RTF_HOST)) != 0) {
+ nd6log((LOG_ERR, "in6_update_ifa: failed to remove "
+ "a route to the old destination: %s\n",
+ ip6_sprintf(ip6buf, &ia->ia_addr.sin6_addr)));
+ /* proceed anyway... */
+ } else
+ ia->ia_flags &= ~IFA_ROUTE;
+ ia->ia_dstaddr = dst6;
+ }
+
+ /*
+ * Set lifetimes. We do not refer to ia6t_expire and ia6t_preferred
+ * to see if the address is deprecated or invalidated, but initialize
+ * these members for applications.
+ */
+ ia->ia6_lifetime = ifra->ifra_lifetime;
+ if (ia->ia6_lifetime.ia6t_vltime != ND6_INFINITE_LIFETIME) {
+ ia->ia6_lifetime.ia6t_expire =
+ time_second + ia->ia6_lifetime.ia6t_vltime;
+ } else
+ ia->ia6_lifetime.ia6t_expire = 0;
+ if (ia->ia6_lifetime.ia6t_pltime != ND6_INFINITE_LIFETIME) {
+ ia->ia6_lifetime.ia6t_preferred =
+ time_second + ia->ia6_lifetime.ia6t_pltime;
+ } else
+ ia->ia6_lifetime.ia6t_preferred = 0;
+
+ /* reset the interface and routing table appropriately. */
+ if ((error = in6_ifinit(ifp, ia, &ifra->ifra_addr, hostIsNew)) != 0)
+ goto unlink;
+
+ /*
+ * configure address flags.
+ */
+ ia->ia6_flags = ifra->ifra_flags;
+ /*
+ * backward compatibility - if IN6_IFF_DEPRECATED is set from the
+ * userland, make it deprecated.
+ */
+ if ((ifra->ifra_flags & IN6_IFF_DEPRECATED) != 0) {
+ ia->ia6_lifetime.ia6t_pltime = 0;
+ ia->ia6_lifetime.ia6t_preferred = time_second;
+ }
+ /*
+ * Make the address tentative before joining multicast addresses,
+ * so that corresponding MLD responses would not have a tentative
+ * source address.
+ */
+ ia->ia6_flags &= ~IN6_IFF_DUPLICATED; /* safety */
+ if (hostIsNew && in6if_do_dad(ifp))
+ ia->ia6_flags |= IN6_IFF_TENTATIVE;
+
+ /*
+ * We are done if we have simply modified an existing address.
+ */
+ if (!hostIsNew)
+ return (error);
+
+ /*
+ * Beyond this point, we should call in6_purgeaddr upon an error,
+ * not just go to unlink.
+ */
+
+ /* Join necessary multicast groups */
+ in6m_sol = NULL;
+ if ((ifp->if_flags & IFF_MULTICAST) != 0) {
+ struct sockaddr_in6 mltaddr, mltmask;
+ struct in6_addr llsol;
+
+ /* join solicited multicast addr for new host id */
+ bzero(&llsol, sizeof(struct in6_addr));
+ llsol.s6_addr32[0] = IPV6_ADDR_INT32_MLL;
+ llsol.s6_addr32[1] = 0;
+ llsol.s6_addr32[2] = htonl(1);
+ llsol.s6_addr32[3] = ifra->ifra_addr.sin6_addr.s6_addr32[3];
+ llsol.s6_addr8[12] = 0xff;
+ if ((error = in6_setscope(&llsol, ifp, NULL)) != 0) {
+ /* XXX: should not happen */
+ log(LOG_ERR, "in6_update_ifa: "
+ "in6_setscope failed\n");
+ goto cleanup;
+ }
+ delay = 0;
+ if ((flags & IN6_IFAUPDATE_DADDELAY)) {
+ /*
+ * We need a random delay for DAD on the address
+ * being configured. It also means delaying
+ * transmission of the corresponding MLD report to
+ * avoid report collision.
+ * [draft-ietf-ipv6-rfc2462bis-02.txt]
+ */
+ delay = arc4random() %
+ (MAX_RTR_SOLICITATION_DELAY * hz);
+ }
+ imm = in6_joingroup(ifp, &llsol, &error, delay);
+ if (imm == NULL) {
+ nd6log((LOG_WARNING,
+ "in6_update_ifa: addmulti failed for "
+ "%s on %s (errno=%d)\n",
+ ip6_sprintf(ip6buf, &llsol), if_name(ifp),
+ error));
+ goto cleanup;
+ }
+ LIST_INSERT_HEAD(&ia->ia6_memberships,
+ imm, i6mm_chain);
+ in6m_sol = imm->i6mm_maddr;
+
+ bzero(&mltmask, sizeof(mltmask));
+ mltmask.sin6_len = sizeof(struct sockaddr_in6);
+ mltmask.sin6_family = AF_INET6;
+ mltmask.sin6_addr = in6mask32;
+#define MLTMASK_LEN 4 /* mltmask's masklen (=32bit=4octet) */
+
+ /*
+ * join link-local all-nodes address
+ */
+ bzero(&mltaddr, sizeof(mltaddr));
+ mltaddr.sin6_len = sizeof(struct sockaddr_in6);
+ mltaddr.sin6_family = AF_INET6;
+ mltaddr.sin6_addr = in6addr_linklocal_allnodes;
+ if ((error = in6_setscope(&mltaddr.sin6_addr, ifp, NULL)) !=
+ 0)
+ goto cleanup; /* XXX: should not fail */
+
+ /*
+ * XXX: do we really need this automatic routes?
+ * We should probably reconsider this stuff. Most applications
+ * actually do not need the routes, since they usually specify
+ * the outgoing interface.
+ */
+ rt = rtalloc1((struct sockaddr *)&mltaddr, 0, 0UL);
+ if (rt) {
+ /* XXX: only works in !SCOPEDROUTING case. */
+ if (memcmp(&mltaddr.sin6_addr,
+ &((struct sockaddr_in6 *)rt_key(rt))->sin6_addr,
+ MLTMASK_LEN)) {
+ RTFREE_LOCKED(rt);
+ rt = NULL;
+ }
+ }
+ if (!rt) {
+ error = rtrequest(RTM_ADD, (struct sockaddr *)&mltaddr,
+ (struct sockaddr *)&ia->ia_addr,
+ (struct sockaddr *)&mltmask, RTF_UP,
+ (struct rtentry **)0);
+ if (error)
+ goto cleanup;
+ } else {
+ RTFREE_LOCKED(rt);
+ }
+
+ imm = in6_joingroup(ifp, &mltaddr.sin6_addr, &error, 0);
+ if (!imm) {
+ nd6log((LOG_WARNING,
+ "in6_update_ifa: addmulti failed for "
+ "%s on %s (errno=%d)\n",
+ ip6_sprintf(ip6buf, &mltaddr.sin6_addr),
+ if_name(ifp), error));
+ goto cleanup;
+ }
+ LIST_INSERT_HEAD(&ia->ia6_memberships, imm, i6mm_chain);
+
+ /*
+ * join node information group address
+ */
+ delay = 0;
+ if ((flags & IN6_IFAUPDATE_DADDELAY)) {
+ /*
+ * The spec doesn't say anything about delay for this
+ * group, but the same logic should apply.
+ */
+ delay = arc4random() %
+ (MAX_RTR_SOLICITATION_DELAY * hz);
+ }
+ if (in6_nigroup(ifp, NULL, -1, &mltaddr.sin6_addr) == 0) {
+ imm = in6_joingroup(ifp, &mltaddr.sin6_addr, &error,
+ delay); /* XXX jinmei */
+ if (!imm) {
+ nd6log((LOG_WARNING, "in6_update_ifa: "
+ "addmulti failed for %s on %s "
+ "(errno=%d)\n",
+ ip6_sprintf(ip6buf, &mltaddr.sin6_addr),
+ if_name(ifp), error));
+ /* XXX not very fatal, go on... */
+ } else {
+ LIST_INSERT_HEAD(&ia->ia6_memberships,
+ imm, i6mm_chain);
+ }
+ }
+
+ /*
+ * join interface-local all-nodes address.
+ * (ff01::1%ifN, and ff01::%ifN/32)
+ */
+ mltaddr.sin6_addr = in6addr_nodelocal_allnodes;
+ if ((error = in6_setscope(&mltaddr.sin6_addr, ifp, NULL))
+ != 0)
+ goto cleanup; /* XXX: should not fail */
+ /* XXX: again, do we really need the route? */
+ rt = rtalloc1((struct sockaddr *)&mltaddr, 0, 0UL);
+ if (rt) {
+ if (memcmp(&mltaddr.sin6_addr,
+ &((struct sockaddr_in6 *)rt_key(rt))->sin6_addr,
+ MLTMASK_LEN)) {
+ RTFREE_LOCKED(rt);
+ rt = NULL;
+ }
+ }
+ if (!rt) {
+ error = rtrequest(RTM_ADD, (struct sockaddr *)&mltaddr,
+ (struct sockaddr *)&ia->ia_addr,
+ (struct sockaddr *)&mltmask, RTF_UP,
+ (struct rtentry **)0);
+ if (error)
+ goto cleanup;
+ } else
+ RTFREE_LOCKED(rt);
+
+ imm = in6_joingroup(ifp, &mltaddr.sin6_addr, &error, 0);
+ if (!imm) {
+ nd6log((LOG_WARNING, "in6_update_ifa: "
+ "addmulti failed for %s on %s "
+ "(errno=%d)\n",
+ ip6_sprintf(ip6buf, &mltaddr.sin6_addr),
+ if_name(ifp), error));
+ goto cleanup;
+ }
+ LIST_INSERT_HEAD(&ia->ia6_memberships, imm, i6mm_chain);
+#undef MLTMASK_LEN
+ }
+
+ /*
+ * Perform DAD, if needed.
+ * XXX It may be of use, if we can administratively
+ * disable DAD.
+ */
+ if (in6if_do_dad(ifp) && ((ifra->ifra_flags & IN6_IFF_NODAD) == 0) &&
+ (ia->ia6_flags & IN6_IFF_TENTATIVE))
+ {
+ int mindelay, maxdelay;
+
+ delay = 0;
+ if ((flags & IN6_IFAUPDATE_DADDELAY)) {
+ /*
+ * We need to impose a delay before sending an NS
+ * for DAD. Check if we also needed a delay for the
+ * corresponding MLD message. If we did, the delay
+ * should be larger than the MLD delay (this could be
+ * relaxed a bit, but this simple logic is at least
+ * safe).
+ * XXX: Break data hiding guidelines and look at
+ * state for the solicited multicast group.
+ */
+ mindelay = 0;
+ if (in6m_sol != NULL &&
+ in6m_sol->in6m_state == MLD_REPORTING_MEMBER) {
+ mindelay = in6m_sol->in6m_timer;
+ }
+ maxdelay = MAX_RTR_SOLICITATION_DELAY * hz;
+ if (maxdelay - mindelay == 0)
+ delay = 0;
+ else {
+ delay =
+ (arc4random() % (maxdelay - mindelay)) +
+ mindelay;
+ }
+ }
+ nd6_dad_start((struct ifaddr *)ia, delay);
+ }
+
+ KASSERT(hostIsNew, ("in6_update_ifa: !hostIsNew"));
+ ifa_free(&ia->ia_ifa);
+ return (error);
+
+ unlink:
+ /*
+ * XXX: if a change of an existing address failed, keep the entry
+ * anyway.
+ */
+ if (hostIsNew) {
+ in6_unlink_ifa(ia, ifp);
+ ifa_free(&ia->ia_ifa);
+ }
+ return (error);
+
+ cleanup:
+ KASSERT(hostIsNew, ("in6_update_ifa: cleanup: !hostIsNew"));
+ ifa_free(&ia->ia_ifa);
+ in6_purgeaddr(&ia->ia_ifa);
+ return error;
+}
+
+void
+in6_purgeaddr(struct ifaddr *ifa)
+{
+ struct ifnet *ifp = ifa->ifa_ifp;
+ struct in6_ifaddr *ia = (struct in6_ifaddr *) ifa;
+ struct in6_multi_mship *imm;
+ struct sockaddr_in6 mltaddr, mltmask;
+ struct rtentry rt0;
+ struct sockaddr_dl gateway;
+ struct sockaddr_in6 mask, addr;
+ int plen, error;
+ struct rtentry *rt;
+ struct ifaddr *ifa0, *nifa;
+
+ /*
+ * find another IPv6 address as the gateway for the
+ * link-local and node-local all-nodes multicast
+ * address routes
+ */
+ IF_ADDR_LOCK(ifp);
+ TAILQ_FOREACH_SAFE(ifa0, &ifp->if_addrhead, ifa_link, nifa) {
+ if ((ifa0->ifa_addr->sa_family != AF_INET6) ||
+ memcmp(&satosin6(ifa0->ifa_addr)->sin6_addr,
+ &ia->ia_addr.sin6_addr,
+ sizeof(struct in6_addr)) == 0)
+ continue;
+ else
+ break;
+ }
+ if (ifa0 != NULL)
+ ifa_ref(ifa0);
+ IF_ADDR_UNLOCK(ifp);
+
+ /*
+ * Remove the loopback route to the interface address.
+ * The check for the current setting of "nd6_useloopback"
+ * is not needed.
+ */
+ if (ia->ia_flags & IFA_RTSELF) {
+ error = ifa_del_loopback_route((struct ifaddr *)ia,
+ (struct sockaddr *)&ia->ia_addr);
+ if (error == 0)
+ ia->ia_flags &= ~IFA_RTSELF;
+ }
+
+ /* stop DAD processing */
+ nd6_dad_stop(ifa);
+
+ IF_AFDATA_LOCK(ifp);
+ lla_lookup(LLTABLE6(ifp), (LLE_DELETE | LLE_IFADDR),
+ (struct sockaddr *)&ia->ia_addr);
+ IF_AFDATA_UNLOCK(ifp);
+
+ /*
+ * initialize for rtmsg generation
+ */
+ bzero(&gateway, sizeof(gateway));
+ gateway.sdl_len = sizeof(gateway);
+ gateway.sdl_family = AF_LINK;
+ gateway.sdl_nlen = 0;
+ gateway.sdl_alen = ifp->if_addrlen;
+ /* */
+ bzero(&rt0, sizeof(rt0));
+ rt0.rt_gateway = (struct sockaddr *)&gateway;
+ memcpy(&mask, &ia->ia_prefixmask, sizeof(ia->ia_prefixmask));
+ memcpy(&addr, &ia->ia_addr, sizeof(ia->ia_addr));
+ rt_mask(&rt0) = (struct sockaddr *)&mask;
+ rt_key(&rt0) = (struct sockaddr *)&addr;
+ rt0.rt_flags = RTF_HOST | RTF_STATIC;
+ rt_newaddrmsg(RTM_DELETE, ifa, 0, &rt0);
+
+ /*
+ * leave from multicast groups we have joined for the interface
+ */
+ while ((imm = ia->ia6_memberships.lh_first) != NULL) {
+ LIST_REMOVE(imm, i6mm_chain);
+ in6_leavegroup(imm);
+ }
+
+ /*
+ * remove the link-local all-nodes address
+ */
+ bzero(&mltmask, sizeof(mltmask));
+ mltmask.sin6_len = sizeof(struct sockaddr_in6);
+ mltmask.sin6_family = AF_INET6;
+ mltmask.sin6_addr = in6mask32;
+
+ bzero(&mltaddr, sizeof(mltaddr));
+ mltaddr.sin6_len = sizeof(struct sockaddr_in6);
+ mltaddr.sin6_family = AF_INET6;
+ mltaddr.sin6_addr = in6addr_linklocal_allnodes;
+
+ if ((error = in6_setscope(&mltaddr.sin6_addr, ifp, NULL)) !=
+ 0)
+ goto cleanup;
+
+ rt = rtalloc1((struct sockaddr *)&mltaddr, 0, 0UL);
+ if (rt != NULL && rt->rt_gateway != NULL &&
+ (memcmp(&satosin6(rt->rt_gateway)->sin6_addr,
+ &ia->ia_addr.sin6_addr,
+ sizeof(ia->ia_addr.sin6_addr)) == 0)) {
+ /*
+ * if no more IPv6 address exists on this interface
+ * then remove the multicast address route
+ */
+ if (ifa0 == NULL) {
+ memcpy(&mltaddr.sin6_addr, &satosin6(rt_key(rt))->sin6_addr,
+ sizeof(mltaddr.sin6_addr));
+ RTFREE_LOCKED(rt);
+ error = rtrequest(RTM_DELETE, (struct sockaddr *)&mltaddr,
+ (struct sockaddr *)&ia->ia_addr,
+ (struct sockaddr *)&mltmask, RTF_UP,
+ (struct rtentry **)0);
+ if (error)
+ log(LOG_INFO, "in6_purgeaddr: link-local all-nodes"
+ "multicast address deletion error\n");
+ } else {
+ /*
+ * replace the gateway of the route
+ */
+ struct sockaddr_in6 sa;
+
+ bzero(&sa, sizeof(sa));
+ sa.sin6_len = sizeof(struct sockaddr_in6);
+ sa.sin6_family = AF_INET6;
+ memcpy(&sa.sin6_addr, &satosin6(ifa0->ifa_addr)->sin6_addr,
+ sizeof(sa.sin6_addr));
+ in6_setscope(&sa.sin6_addr, ifa0->ifa_ifp, NULL);
+ memcpy(rt->rt_gateway, &sa, sizeof(sa));
+ RTFREE_LOCKED(rt);
+ }
+ } else {
+ if (rt != NULL)
+ RTFREE_LOCKED(rt);
+ }
+
+ /*
+ * remove the node-local all-nodes address
+ */
+ mltaddr.sin6_addr = in6addr_nodelocal_allnodes;
+ if ((error = in6_setscope(&mltaddr.sin6_addr, ifp, NULL)) !=
+ 0)
+ goto cleanup;
+
+ rt = rtalloc1((struct sockaddr *)&mltaddr, 0, 0UL);
+ if (rt != NULL && rt->rt_gateway != NULL &&
+ (memcmp(&satosin6(rt->rt_gateway)->sin6_addr,
+ &ia->ia_addr.sin6_addr,
+ sizeof(ia->ia_addr.sin6_addr)) == 0)) {
+ /*
+ * if no more IPv6 address exists on this interface
+ * then remove the multicast address route
+ */
+ if (ifa0 == NULL) {
+ memcpy(&mltaddr.sin6_addr, &satosin6(rt_key(rt))->sin6_addr,
+ sizeof(mltaddr.sin6_addr));
+
+ RTFREE_LOCKED(rt);
+ error = rtrequest(RTM_DELETE, (struct sockaddr *)&mltaddr,
+ (struct sockaddr *)&ia->ia_addr,
+ (struct sockaddr *)&mltmask, RTF_UP,
+ (struct rtentry **)0);
+
+ if (error)
+ log(LOG_INFO, "in6_purgeaddr: node-local all-nodes"
+ "multicast address deletion error\n");
+ } else {
+ /*
+ * replace the gateway of the route
+ */
+ struct sockaddr_in6 sa;
+
+ bzero(&sa, sizeof(sa));
+ sa.sin6_len = sizeof(struct sockaddr_in6);
+ sa.sin6_family = AF_INET6;
+ memcpy(&sa.sin6_addr, &satosin6(ifa0->ifa_addr)->sin6_addr,
+ sizeof(sa.sin6_addr));
+ in6_setscope(&sa.sin6_addr, ifa0->ifa_ifp, NULL);
+ memcpy(rt->rt_gateway, &sa, sizeof(sa));
+ RTFREE_LOCKED(rt);
+ }
+ } else {
+ if (rt != NULL)
+ RTFREE_LOCKED(rt);
+ }
+
+cleanup:
+
+ plen = in6_mask2len(&ia->ia_prefixmask.sin6_addr, NULL); /* XXX */
+ if ((ia->ia_flags & IFA_ROUTE) && plen == 128) {
+ int error;
+ struct sockaddr *dstaddr;
+
+ /*
+ * use the interface address if configuring an
+ * interface address with a /128 prefix len
+ */
+ if (ia->ia_dstaddr.sin6_family == AF_INET6)
+ dstaddr = (struct sockaddr *)&ia->ia_dstaddr;
+ else
+ dstaddr = (struct sockaddr *)&ia->ia_addr;
+
+ error = rtrequest(RTM_DELETE,
+ (struct sockaddr *)dstaddr,
+ (struct sockaddr *)&ia->ia_addr,
+ (struct sockaddr *)&ia->ia_prefixmask,
+ ia->ia_flags | RTF_HOST, NULL);
+ if (error != 0)
+ return;
+ ia->ia_flags &= ~IFA_ROUTE;
+ }
+ if (ifa0 != NULL)
+ ifa_free(ifa0);
+
+ in6_unlink_ifa(ia, ifp);
+}
+
+static void
+in6_unlink_ifa(struct in6_ifaddr *ia, struct ifnet *ifp)
+{
+ int s = splnet();
+
+ IF_ADDR_LOCK(ifp);
+ TAILQ_REMOVE(&ifp->if_addrhead, &ia->ia_ifa, ifa_link);
+ IF_ADDR_UNLOCK(ifp);
+ ifa_free(&ia->ia_ifa); /* if_addrhead */
+
+ /*
+ * Defer the release of what might be the last reference to the
+ * in6_ifaddr so that it can't be freed before the remainder of the
+ * cleanup.
+ */
+ IN6_IFADDR_WLOCK();
+ TAILQ_REMOVE(&V_in6_ifaddrhead, ia, ia_link);
+ IN6_IFADDR_WUNLOCK();
+
+ /*
+ * Release the reference to the base prefix. There should be a
+ * positive reference.
+ */
+ if (ia->ia6_ndpr == NULL) {
+ nd6log((LOG_NOTICE,
+ "in6_unlink_ifa: autoconf'ed address "
+ "%p has no prefix\n", ia));
+ } else {
+ ia->ia6_ndpr->ndpr_refcnt--;
+ ia->ia6_ndpr = NULL;
+ }
+
+ /*
+ * Also, if the address being removed is autoconf'ed, call
+ * pfxlist_onlink_check() since the release might affect the status of
+ * other (detached) addresses.
+ */
+ if ((ia->ia6_flags & IN6_IFF_AUTOCONF)) {
+ pfxlist_onlink_check();
+ }
+ ifa_free(&ia->ia_ifa); /* in6_ifaddrhead */
+ splx(s);
+}
+
+void
+in6_purgeif(struct ifnet *ifp)
+{
+ struct ifaddr *ifa, *nifa;
+
+ TAILQ_FOREACH_SAFE(ifa, &ifp->if_addrhead, ifa_link, nifa) {
+ if (ifa->ifa_addr->sa_family != AF_INET6)
+ continue;
+ in6_purgeaddr(ifa);
+ }
+
+ in6_ifdetach(ifp);
+}
+
+/*
+ * SIOC[GAD]LIFADDR.
+ * SIOCGLIFADDR: get first address. (?)
+ * SIOCGLIFADDR with IFLR_PREFIX:
+ * get first address that matches the specified prefix.
+ * SIOCALIFADDR: add the specified address.
+ * SIOCALIFADDR with IFLR_PREFIX:
+ * add the specified prefix, filling hostid part from
+ * the first link-local address. prefixlen must be <= 64.
+ * SIOCDLIFADDR: delete the specified address.
+ * SIOCDLIFADDR with IFLR_PREFIX:
+ * delete the first address that matches the specified prefix.
+ * return values:
+ * EINVAL on invalid parameters
+ * EADDRNOTAVAIL on prefix match failed/specified address not found
+ * other values may be returned from in6_ioctl()
+ *
+ * NOTE: SIOCALIFADDR(with IFLR_PREFIX set) allows prefixlen less than 64.
+ * this is to accomodate address naming scheme other than RFC2374,
+ * in the future.
+ * RFC2373 defines interface id to be 64bit, but it allows non-RFC2374
+ * address encoding scheme. (see figure on page 8)
+ */
+static int
+in6_lifaddr_ioctl(struct socket *so, u_long cmd, caddr_t data,
+ struct ifnet *ifp, struct thread *td)
+{
+ struct if_laddrreq *iflr = (struct if_laddrreq *)data;
+ struct ifaddr *ifa;
+ struct sockaddr *sa;
+
+ /* sanity checks */
+ if (!data || !ifp) {
+ panic("invalid argument to in6_lifaddr_ioctl");
+ /* NOTREACHED */
+ }
+
+ switch (cmd) {
+ case SIOCGLIFADDR:
+ /* address must be specified on GET with IFLR_PREFIX */
+ if ((iflr->flags & IFLR_PREFIX) == 0)
+ break;
+ /* FALLTHROUGH */
+ case SIOCALIFADDR:
+ case SIOCDLIFADDR:
+ /* address must be specified on ADD and DELETE */
+ sa = (struct sockaddr *)&iflr->addr;
+ if (sa->sa_family != AF_INET6)
+ return EINVAL;
+ if (sa->sa_len != sizeof(struct sockaddr_in6))
+ return EINVAL;
+ /* XXX need improvement */
+ sa = (struct sockaddr *)&iflr->dstaddr;
+ if (sa->sa_family && sa->sa_family != AF_INET6)
+ return EINVAL;
+ if (sa->sa_len && sa->sa_len != sizeof(struct sockaddr_in6))
+ return EINVAL;
+ break;
+ default: /* shouldn't happen */
+#if 0
+ panic("invalid cmd to in6_lifaddr_ioctl");
+ /* NOTREACHED */
+#else
+ return EOPNOTSUPP;
+#endif
+ }
+ if (sizeof(struct in6_addr) * 8 < iflr->prefixlen)
+ return EINVAL;
+
+ switch (cmd) {
+ case SIOCALIFADDR:
+ {
+ struct in6_aliasreq ifra;
+ struct in6_addr *hostid = NULL;
+ int prefixlen;
+
+ ifa = NULL;
+ if ((iflr->flags & IFLR_PREFIX) != 0) {
+ struct sockaddr_in6 *sin6;
+
+ /*
+ * hostid is to fill in the hostid part of the
+ * address. hostid points to the first link-local
+ * address attached to the interface.
+ */
+ ifa = (struct ifaddr *)in6ifa_ifpforlinklocal(ifp, 0);
+ if (!ifa)
+ return EADDRNOTAVAIL;
+ hostid = IFA_IN6(ifa);
+
+ /* prefixlen must be <= 64. */
+ if (64 < iflr->prefixlen)
+ return EINVAL;
+ prefixlen = iflr->prefixlen;
+
+ /* hostid part must be zero. */
+ sin6 = (struct sockaddr_in6 *)&iflr->addr;
+ if (sin6->sin6_addr.s6_addr32[2] != 0 ||
+ sin6->sin6_addr.s6_addr32[3] != 0) {
+ return EINVAL;
+ }
+ } else
+ prefixlen = iflr->prefixlen;
+
+ /* copy args to in6_aliasreq, perform ioctl(SIOCAIFADDR_IN6). */
+ bzero(&ifra, sizeof(ifra));
+ bcopy(iflr->iflr_name, ifra.ifra_name, sizeof(ifra.ifra_name));
+
+ bcopy(&iflr->addr, &ifra.ifra_addr,
+ ((struct sockaddr *)&iflr->addr)->sa_len);
+ if (hostid) {
+ /* fill in hostid part */
+ ifra.ifra_addr.sin6_addr.s6_addr32[2] =
+ hostid->s6_addr32[2];
+ ifra.ifra_addr.sin6_addr.s6_addr32[3] =
+ hostid->s6_addr32[3];
+ }
+
+ if (((struct sockaddr *)&iflr->dstaddr)->sa_family) { /* XXX */
+ bcopy(&iflr->dstaddr, &ifra.ifra_dstaddr,
+ ((struct sockaddr *)&iflr->dstaddr)->sa_len);
+ if (hostid) {
+ ifra.ifra_dstaddr.sin6_addr.s6_addr32[2] =
+ hostid->s6_addr32[2];
+ ifra.ifra_dstaddr.sin6_addr.s6_addr32[3] =
+ hostid->s6_addr32[3];
+ }
+ }
+ if (ifa != NULL)
+ ifa_free(ifa);
+
+ ifra.ifra_prefixmask.sin6_len = sizeof(struct sockaddr_in6);
+ in6_prefixlen2mask(&ifra.ifra_prefixmask.sin6_addr, prefixlen);
+
+ ifra.ifra_flags = iflr->flags & ~IFLR_PREFIX;
+ return in6_control(so, SIOCAIFADDR_IN6, (caddr_t)&ifra, ifp, td);
+ }
+ case SIOCGLIFADDR:
+ case SIOCDLIFADDR:
+ {
+ struct in6_ifaddr *ia;
+ struct in6_addr mask, candidate, match;
+ struct sockaddr_in6 *sin6;
+ int cmp;
+
+ bzero(&mask, sizeof(mask));
+ if (iflr->flags & IFLR_PREFIX) {
+ /* lookup a prefix rather than address. */
+ in6_prefixlen2mask(&mask, iflr->prefixlen);
+
+ sin6 = (struct sockaddr_in6 *)&iflr->addr;
+ bcopy(&sin6->sin6_addr, &match, sizeof(match));
+ match.s6_addr32[0] &= mask.s6_addr32[0];
+ match.s6_addr32[1] &= mask.s6_addr32[1];
+ match.s6_addr32[2] &= mask.s6_addr32[2];
+ match.s6_addr32[3] &= mask.s6_addr32[3];
+
+ /* if you set extra bits, that's wrong */
+ if (bcmp(&match, &sin6->sin6_addr, sizeof(match)))
+ return EINVAL;
+
+ cmp = 1;
+ } else {
+ if (cmd == SIOCGLIFADDR) {
+ /* on getting an address, take the 1st match */
+ cmp = 0; /* XXX */
+ } else {
+ /* on deleting an address, do exact match */
+ in6_prefixlen2mask(&mask, 128);
+ sin6 = (struct sockaddr_in6 *)&iflr->addr;
+ bcopy(&sin6->sin6_addr, &match, sizeof(match));
+
+ cmp = 1;
+ }
+ }
+
+ IF_ADDR_LOCK(ifp);
+ TAILQ_FOREACH(ifa, &ifp->if_addrhead, ifa_link) {
+ if (ifa->ifa_addr->sa_family != AF_INET6)
+ continue;
+ if (!cmp)
+ break;
+
+ /*
+ * XXX: this is adhoc, but is necessary to allow
+ * a user to specify fe80::/64 (not /10) for a
+ * link-local address.
+ */
+ bcopy(IFA_IN6(ifa), &candidate, sizeof(candidate));
+ in6_clearscope(&candidate);
+ candidate.s6_addr32[0] &= mask.s6_addr32[0];
+ candidate.s6_addr32[1] &= mask.s6_addr32[1];
+ candidate.s6_addr32[2] &= mask.s6_addr32[2];
+ candidate.s6_addr32[3] &= mask.s6_addr32[3];
+ if (IN6_ARE_ADDR_EQUAL(&candidate, &match))
+ break;
+ }
+ IF_ADDR_UNLOCK(ifp);
+ if (!ifa)
+ return EADDRNOTAVAIL;
+ ia = ifa2ia6(ifa);
+
+ if (cmd == SIOCGLIFADDR) {
+ int error;
+
+ /* fill in the if_laddrreq structure */
+ bcopy(&ia->ia_addr, &iflr->addr, ia->ia_addr.sin6_len);
+ error = sa6_recoverscope(
+ (struct sockaddr_in6 *)&iflr->addr);
+ if (error != 0)
+ return (error);
+
+ if ((ifp->if_flags & IFF_POINTOPOINT) != 0) {
+ bcopy(&ia->ia_dstaddr, &iflr->dstaddr,
+ ia->ia_dstaddr.sin6_len);
+ error = sa6_recoverscope(
+ (struct sockaddr_in6 *)&iflr->dstaddr);
+ if (error != 0)
+ return (error);
+ } else
+ bzero(&iflr->dstaddr, sizeof(iflr->dstaddr));
+
+ iflr->prefixlen =
+ in6_mask2len(&ia->ia_prefixmask.sin6_addr, NULL);
+
+ iflr->flags = ia->ia6_flags; /* XXX */
+
+ return 0;
+ } else {
+ struct in6_aliasreq ifra;
+
+ /* fill in6_aliasreq and do ioctl(SIOCDIFADDR_IN6) */
+ bzero(&ifra, sizeof(ifra));
+ bcopy(iflr->iflr_name, ifra.ifra_name,
+ sizeof(ifra.ifra_name));
+
+ bcopy(&ia->ia_addr, &ifra.ifra_addr,
+ ia->ia_addr.sin6_len);
+ if ((ifp->if_flags & IFF_POINTOPOINT) != 0) {
+ bcopy(&ia->ia_dstaddr, &ifra.ifra_dstaddr,
+ ia->ia_dstaddr.sin6_len);
+ } else {
+ bzero(&ifra.ifra_dstaddr,
+ sizeof(ifra.ifra_dstaddr));
+ }
+ bcopy(&ia->ia_prefixmask, &ifra.ifra_dstaddr,
+ ia->ia_prefixmask.sin6_len);
+
+ ifra.ifra_flags = ia->ia6_flags;
+ return in6_control(so, SIOCDIFADDR_IN6, (caddr_t)&ifra,
+ ifp, td);
+ }
+ }
+ }
+
+ return EOPNOTSUPP; /* just for safety */
+}
+
+/*
+ * Initialize an interface's intetnet6 address
+ * and routing table entry.
+ */
+static int
+in6_ifinit(struct ifnet *ifp, struct in6_ifaddr *ia,
+ struct sockaddr_in6 *sin6, int newhost)
+{
+ int error = 0, plen, ifacount = 0;
+ int s = splimp();
+ struct ifaddr *ifa;
+
+ /*
+ * Give the interface a chance to initialize
+ * if this is its first address,
+ * and to validate the address if necessary.
+ */
+ IF_ADDR_LOCK(ifp);
+ TAILQ_FOREACH(ifa, &ifp->if_addrhead, ifa_link) {
+ if (ifa->ifa_addr->sa_family != AF_INET6)
+ continue;
+ ifacount++;
+ }
+ IF_ADDR_UNLOCK(ifp);
+
+ ia->ia_addr = *sin6;
+
+ if (ifacount <= 1 && ifp->if_ioctl) {
+ error = (*ifp->if_ioctl)(ifp, SIOCSIFADDR, (caddr_t)ia);
+ if (error) {
+ splx(s);
+ return (error);
+ }
+ }
+ splx(s);
+
+ ia->ia_ifa.ifa_metric = ifp->if_metric;
+
+ /* we could do in(6)_socktrim here, but just omit it at this moment. */
+
+ /*
+ * Special case:
+ * If a new destination address is specified for a point-to-point
+ * interface, install a route to the destination as an interface
+ * direct route.
+ * XXX: the logic below rejects assigning multiple addresses on a p2p
+ * interface that share the same destination.
+ */
+ plen = in6_mask2len(&ia->ia_prefixmask.sin6_addr, NULL); /* XXX */
+ if (!(ia->ia_flags & IFA_ROUTE) && plen == 128 &&
+ ia->ia_dstaddr.sin6_family == AF_INET6) {
+ int rtflags = RTF_UP | RTF_HOST;
+
+ error = rtrequest(RTM_ADD,
+ (struct sockaddr *)&ia->ia_dstaddr,
+ (struct sockaddr *)&ia->ia_addr,
+ (struct sockaddr *)&ia->ia_prefixmask,
+ ia->ia_flags | rtflags, NULL);
+ if (error != 0)
+ return (error);
+ ia->ia_flags |= IFA_ROUTE;
+ }
+
+ /*
+ * add a loopback route to self
+ */
+ if (!(ia->ia_flags & IFA_ROUTE)
+ && (V_nd6_useloopback
+ || (ifp->if_flags & IFF_LOOPBACK))) {
+ error = ifa_add_loopback_route((struct ifaddr *)ia,
+ (struct sockaddr *)&ia->ia_addr);
+ if (error == 0)
+ ia->ia_flags |= IFA_RTSELF;
+ }
+
+ /* Add ownaddr as loopback rtentry, if necessary (ex. on p2p link). */
+ if (newhost) {
+ struct llentry *ln;
+ struct rtentry rt;
+ struct sockaddr_dl gateway;
+ struct sockaddr_in6 mask, addr;
+
+ IF_AFDATA_LOCK(ifp);
+ ia->ia_ifa.ifa_rtrequest = NULL;
+
+ /* XXX QL
+ * we need to report rt_newaddrmsg
+ */
+ ln = lla_lookup(LLTABLE6(ifp), (LLE_CREATE | LLE_IFADDR | LLE_EXCLUSIVE),
+ (struct sockaddr *)&ia->ia_addr);
+ IF_AFDATA_UNLOCK(ifp);
+ if (ln != NULL) {
+ ln->la_expire = 0; /* for IPv6 this means permanent */
+ ln->ln_state = ND6_LLINFO_REACHABLE;
+ /*
+ * initialize for rtmsg generation
+ */
+ bzero(&gateway, sizeof(gateway));
+ gateway.sdl_len = sizeof(gateway);
+ gateway.sdl_family = AF_LINK;
+ gateway.sdl_nlen = 0;
+ gateway.sdl_alen = 6;
+ memcpy(gateway.sdl_data, &ln->ll_addr.mac_aligned, sizeof(ln->ll_addr));
+ /* */
+ LLE_WUNLOCK(ln);
+ }
+
+ bzero(&rt, sizeof(rt));
+ rt.rt_gateway = (struct sockaddr *)&gateway;
+ memcpy(&mask, &ia->ia_prefixmask, sizeof(ia->ia_prefixmask));
+ memcpy(&addr, &ia->ia_addr, sizeof(ia->ia_addr));
+ rt_mask(&rt) = (struct sockaddr *)&mask;
+ rt_key(&rt) = (struct sockaddr *)&addr;
+ rt.rt_flags = RTF_UP | RTF_HOST | RTF_STATIC;
+ rt_newaddrmsg(RTM_ADD, &ia->ia_ifa, 0, &rt);
+ }
+
+ return (error);
+}
+
+/*
+ * Find an IPv6 interface link-local address specific to an interface.
+ * ifaddr is returned referenced.
+ */
+struct in6_ifaddr *
+in6ifa_ifpforlinklocal(struct ifnet *ifp, int ignoreflags)
+{
+ struct ifaddr *ifa;
+
+ IF_ADDR_LOCK(ifp);
+ TAILQ_FOREACH(ifa, &ifp->if_addrhead, ifa_link) {
+ if (ifa->ifa_addr->sa_family != AF_INET6)
+ continue;
+ if (IN6_IS_ADDR_LINKLOCAL(IFA_IN6(ifa))) {
+ if ((((struct in6_ifaddr *)ifa)->ia6_flags &
+ ignoreflags) != 0)
+ continue;
+ ifa_ref(ifa);
+ break;
+ }
+ }
+ IF_ADDR_UNLOCK(ifp);
+
+ return ((struct in6_ifaddr *)ifa);
+}
+
+
+/*
+ * find the internet address corresponding to a given interface and address.
+ * ifaddr is returned referenced.
+ */
+struct in6_ifaddr *
+in6ifa_ifpwithaddr(struct ifnet *ifp, struct in6_addr *addr)
+{
+ struct ifaddr *ifa;
+
+ IF_ADDR_LOCK(ifp);
+ TAILQ_FOREACH(ifa, &ifp->if_addrhead, ifa_link) {
+ if (ifa->ifa_addr->sa_family != AF_INET6)
+ continue;
+ if (IN6_ARE_ADDR_EQUAL(addr, IFA_IN6(ifa))) {
+ ifa_ref(ifa);
+ break;
+ }
+ }
+ IF_ADDR_UNLOCK(ifp);
+
+ return ((struct in6_ifaddr *)ifa);
+}
+
+/*
+ * Convert IP6 address to printable (loggable) representation. Caller
+ * has to make sure that ip6buf is at least INET6_ADDRSTRLEN long.
+ */
+static char digits[] = "0123456789abcdef";
+char *
+ip6_sprintf(char *ip6buf, const struct in6_addr *addr)
+{
+ int i;
+ char *cp;
+ const u_int16_t *a = (const u_int16_t *)addr;
+ const u_int8_t *d;
+ int dcolon = 0, zero = 0;
+
+ cp = ip6buf;
+
+ for (i = 0; i < 8; i++) {
+ if (dcolon == 1) {
+ if (*a == 0) {
+ if (i == 7)
+ *cp++ = ':';
+ a++;
+ continue;
+ } else
+ dcolon = 2;
+ }
+ if (*a == 0) {
+ if (dcolon == 0 && *(a + 1) == 0) {
+ if (i == 0)
+ *cp++ = ':';
+ *cp++ = ':';
+ dcolon = 1;
+ } else {
+ *cp++ = '0';
+ *cp++ = ':';
+ }
+ a++;
+ continue;
+ }
+ d = (const u_char *)a;
+ /* Try to eliminate leading zeros in printout like in :0001. */
+ zero = 1;
+ *cp = digits[*d >> 4];
+ if (*cp != '0') {
+ zero = 0;
+ cp++;
+ }
+ *cp = digits[*d++ & 0xf];
+ if (zero == 0 || (*cp != '0')) {
+ zero = 0;
+ cp++;
+ }
+ *cp = digits[*d >> 4];
+ if (zero == 0 || (*cp != '0')) {
+ zero = 0;
+ cp++;
+ }
+ *cp++ = digits[*d & 0xf];
+ *cp++ = ':';
+ a++;
+ }
+ *--cp = '\0';
+ return (ip6buf);
+}
+
+int
+in6_localaddr(struct in6_addr *in6)
+{
+ struct in6_ifaddr *ia;
+
+ if (IN6_IS_ADDR_LOOPBACK(in6) || IN6_IS_ADDR_LINKLOCAL(in6))
+ return 1;
+
+ IN6_IFADDR_RLOCK();
+ TAILQ_FOREACH(ia, &V_in6_ifaddrhead, ia_link) {
+ if (IN6_ARE_MASKED_ADDR_EQUAL(in6, &ia->ia_addr.sin6_addr,
+ &ia->ia_prefixmask.sin6_addr)) {
+ IN6_IFADDR_RUNLOCK();
+ return 1;
+ }
+ }
+ IN6_IFADDR_RUNLOCK();
+
+ return (0);
+}
+
+int
+in6_is_addr_deprecated(struct sockaddr_in6 *sa6)
+{
+ struct in6_ifaddr *ia;
+
+ IN6_IFADDR_RLOCK();
+ TAILQ_FOREACH(ia, &V_in6_ifaddrhead, ia_link) {
+ if (IN6_ARE_ADDR_EQUAL(&ia->ia_addr.sin6_addr,
+ &sa6->sin6_addr) &&
+ (ia->ia6_flags & IN6_IFF_DEPRECATED) != 0) {
+ IN6_IFADDR_RUNLOCK();
+ return (1); /* true */
+ }
+
+ /* XXX: do we still have to go thru the rest of the list? */
+ }
+ IN6_IFADDR_RUNLOCK();
+
+ return (0); /* false */
+}
+
+/*
+ * return length of part which dst and src are equal
+ * hard coding...
+ */
+int
+in6_matchlen(struct in6_addr *src, struct in6_addr *dst)
+{
+ int match = 0;
+ u_char *s = (u_char *)src, *d = (u_char *)dst;
+ u_char *lim = s + 16, r;
+
+ while (s < lim)
+ if ((r = (*d++ ^ *s++)) != 0) {
+ while (r < 128) {
+ match++;
+ r <<= 1;
+ }
+ break;
+ } else
+ match += 8;
+ return match;
+}
+
+/* XXX: to be scope conscious */
+int
+in6_are_prefix_equal(struct in6_addr *p1, struct in6_addr *p2, int len)
+{
+ int bytelen, bitlen;
+
+ /* sanity check */
+ if (0 > len || len > 128) {
+ log(LOG_ERR, "in6_are_prefix_equal: invalid prefix length(%d)\n",
+ len);
+ return (0);
+ }
+
+ bytelen = len / 8;
+ bitlen = len % 8;
+
+ if (bcmp(&p1->s6_addr, &p2->s6_addr, bytelen))
+ return (0);
+ if (bitlen != 0 &&
+ p1->s6_addr[bytelen] >> (8 - bitlen) !=
+ p2->s6_addr[bytelen] >> (8 - bitlen))
+ return (0);
+
+ return (1);
+}
+
+void
+in6_prefixlen2mask(struct in6_addr *maskp, int len)
+{
+ u_char maskarray[8] = {0x80, 0xc0, 0xe0, 0xf0, 0xf8, 0xfc, 0xfe, 0xff};
+ int bytelen, bitlen, i;
+
+ /* sanity check */
+ if (0 > len || len > 128) {
+ log(LOG_ERR, "in6_prefixlen2mask: invalid prefix length(%d)\n",
+ len);
+ return;
+ }
+
+ bzero(maskp, sizeof(*maskp));
+ bytelen = len / 8;
+ bitlen = len % 8;
+ for (i = 0; i < bytelen; i++)
+ maskp->s6_addr[i] = 0xff;
+ if (bitlen)
+ maskp->s6_addr[bytelen] = maskarray[bitlen - 1];
+}
+
+/*
+ * return the best address out of the same scope. if no address was
+ * found, return the first valid address from designated IF.
+ */
+struct in6_ifaddr *
+in6_ifawithifp(struct ifnet *ifp, struct in6_addr *dst)
+{
+ int dst_scope = in6_addrscope(dst), blen = -1, tlen;
+ struct ifaddr *ifa;
+ struct in6_ifaddr *besta = 0;
+ struct in6_ifaddr *dep[2]; /* last-resort: deprecated */
+
+ dep[0] = dep[1] = NULL;
+
+ /*
+ * We first look for addresses in the same scope.
+ * If there is one, return it.
+ * If two or more, return one which matches the dst longest.
+ * If none, return one of global addresses assigned other ifs.
+ */
+ IF_ADDR_LOCK(ifp);
+ TAILQ_FOREACH(ifa, &ifp->if_addrhead, ifa_link) {
+ if (ifa->ifa_addr->sa_family != AF_INET6)
+ continue;
+ if (((struct in6_ifaddr *)ifa)->ia6_flags & IN6_IFF_ANYCAST)
+ continue; /* XXX: is there any case to allow anycast? */
+ if (((struct in6_ifaddr *)ifa)->ia6_flags & IN6_IFF_NOTREADY)
+ continue; /* don't use this interface */
+ if (((struct in6_ifaddr *)ifa)->ia6_flags & IN6_IFF_DETACHED)
+ continue;
+ if (((struct in6_ifaddr *)ifa)->ia6_flags & IN6_IFF_DEPRECATED) {
+ if (V_ip6_use_deprecated)
+ dep[0] = (struct in6_ifaddr *)ifa;
+ continue;
+ }
+
+ if (dst_scope == in6_addrscope(IFA_IN6(ifa))) {
+ /*
+ * call in6_matchlen() as few as possible
+ */
+ if (besta) {
+ if (blen == -1)
+ blen = in6_matchlen(&besta->ia_addr.sin6_addr, dst);
+ tlen = in6_matchlen(IFA_IN6(ifa), dst);
+ if (tlen > blen) {
+ blen = tlen;
+ besta = (struct in6_ifaddr *)ifa;
+ }
+ } else
+ besta = (struct in6_ifaddr *)ifa;
+ }
+ }
+ if (besta) {
+ ifa_ref(&besta->ia_ifa);
+ IF_ADDR_UNLOCK(ifp);
+ return (besta);
+ }
+ IF_ADDR_UNLOCK(ifp);
+
+ IN6_IFADDR_RLOCK();
+ TAILQ_FOREACH(ifa, &ifp->if_addrhead, ifa_link) {
+ if (ifa->ifa_addr->sa_family != AF_INET6)
+ continue;
+ if (((struct in6_ifaddr *)ifa)->ia6_flags & IN6_IFF_ANYCAST)
+ continue; /* XXX: is there any case to allow anycast? */
+ if (((struct in6_ifaddr *)ifa)->ia6_flags & IN6_IFF_NOTREADY)
+ continue; /* don't use this interface */
+ if (((struct in6_ifaddr *)ifa)->ia6_flags & IN6_IFF_DETACHED)
+ continue;
+ if (((struct in6_ifaddr *)ifa)->ia6_flags & IN6_IFF_DEPRECATED) {
+ if (V_ip6_use_deprecated)
+ dep[1] = (struct in6_ifaddr *)ifa;
+ continue;
+ }
+
+ if (ifa != NULL)
+ ifa_ref(ifa);
+ IN6_IFADDR_RUNLOCK();
+ return (struct in6_ifaddr *)ifa;
+ }
+ IN6_IFADDR_RUNLOCK();
+
+ /* use the last-resort values, that are, deprecated addresses */
+ if (dep[0])
+ return dep[0];
+ if (dep[1])
+ return dep[1];
+
+ return NULL;
+}
+
+/*
+ * perform DAD when interface becomes IFF_UP.
+ */
+void
+in6_if_up(struct ifnet *ifp)
+{
+ struct ifaddr *ifa;
+ struct in6_ifaddr *ia;
+
+ IF_ADDR_LOCK(ifp);
+ TAILQ_FOREACH(ifa, &ifp->if_addrhead, ifa_link) {
+ if (ifa->ifa_addr->sa_family != AF_INET6)
+ continue;
+ ia = (struct in6_ifaddr *)ifa;
+ if (ia->ia6_flags & IN6_IFF_TENTATIVE) {
+ /*
+ * The TENTATIVE flag was likely set by hand
+ * beforehand, implicitly indicating the need for DAD.
+ * We may be able to skip the random delay in this
+ * case, but we impose delays just in case.
+ */
+ nd6_dad_start(ifa,
+ arc4random() % (MAX_RTR_SOLICITATION_DELAY * hz));
+ }
+ }
+ IF_ADDR_UNLOCK(ifp);
+
+ /*
+ * special cases, like 6to4, are handled in in6_ifattach
+ */
+ in6_ifattach(ifp, NULL);
+}
+
+int
+in6if_do_dad(struct ifnet *ifp)
+{
+ if ((ifp->if_flags & IFF_LOOPBACK) != 0)
+ return (0);
+
+ switch (ifp->if_type) {
+#ifdef IFT_DUMMY
+ case IFT_DUMMY:
+#endif
+ case IFT_FAITH:
+ /*
+ * These interfaces do not have the IFF_LOOPBACK flag,
+ * but loop packets back. We do not have to do DAD on such
+ * interfaces. We should even omit it, because loop-backed
+ * NS would confuse the DAD procedure.
+ */
+ return (0);
+ default:
+ /*
+ * Our DAD routine requires the interface up and running.
+ * However, some interfaces can be up before the RUNNING
+ * status. Additionaly, users may try to assign addresses
+ * before the interface becomes up (or running).
+ * We simply skip DAD in such a case as a work around.
+ * XXX: we should rather mark "tentative" on such addresses,
+ * and do DAD after the interface becomes ready.
+ */
+ if (!((ifp->if_flags & IFF_UP) &&
+ (ifp->if_drv_flags & IFF_DRV_RUNNING)))
+ return (0);
+
+ return (1);
+ }
+}
+
+/*
+ * Calculate max IPv6 MTU through all the interfaces and store it
+ * to in6_maxmtu.
+ */
+void
+in6_setmaxmtu(void)
+{
+ unsigned long maxmtu = 0;
+ struct ifnet *ifp;
+
+ IFNET_RLOCK_NOSLEEP();
+ for (ifp = TAILQ_FIRST(&V_ifnet); ifp;
+ ifp = TAILQ_NEXT(ifp, if_list)) {
+ /* this function can be called during ifnet initialization */
+ if (!ifp->if_afdata[AF_INET6])
+ continue;
+ if ((ifp->if_flags & IFF_LOOPBACK) == 0 &&
+ IN6_LINKMTU(ifp) > maxmtu)
+ maxmtu = IN6_LINKMTU(ifp);
+ }
+ IFNET_RUNLOCK_NOSLEEP();
+ if (maxmtu) /* update only when maxmtu is positive */
+ V_in6_maxmtu = maxmtu;
+}
+
+/*
+ * Provide the length of interface identifiers to be used for the link attached
+ * to the given interface. The length should be defined in "IPv6 over
+ * xxx-link" document. Note that address architecture might also define
+ * the length for a particular set of address prefixes, regardless of the
+ * link type. As clarified in rfc2462bis, those two definitions should be
+ * consistent, and those really are as of August 2004.
+ */
+int
+in6_if2idlen(struct ifnet *ifp)
+{
+ switch (ifp->if_type) {
+ case IFT_ETHER: /* RFC2464 */
+#ifdef IFT_PROPVIRTUAL
+ case IFT_PROPVIRTUAL: /* XXX: no RFC. treat it as ether */
+#endif
+#ifdef IFT_L2VLAN
+ case IFT_L2VLAN: /* ditto */
+#endif
+#ifdef IFT_IEEE80211
+ case IFT_IEEE80211: /* ditto */
+#endif
+#ifdef IFT_MIP
+ case IFT_MIP: /* ditto */
+#endif
+ return (64);
+ case IFT_FDDI: /* RFC2467 */
+ return (64);
+ case IFT_ISO88025: /* RFC2470 (IPv6 over Token Ring) */
+ return (64);
+ case IFT_PPP: /* RFC2472 */
+ return (64);
+ case IFT_ARCNET: /* RFC2497 */
+ return (64);
+ case IFT_FRELAY: /* RFC2590 */
+ return (64);
+ case IFT_IEEE1394: /* RFC3146 */
+ return (64);
+ case IFT_GIF:
+ return (64); /* draft-ietf-v6ops-mech-v2-07 */
+ case IFT_LOOP:
+ return (64); /* XXX: is this really correct? */
+ default:
+ /*
+ * Unknown link type:
+ * It might be controversial to use the today's common constant
+ * of 64 for these cases unconditionally. For full compliance,
+ * we should return an error in this case. On the other hand,
+ * if we simply miss the standard for the link type or a new
+ * standard is defined for a new link type, the IFID length
+ * is very likely to be the common constant. As a compromise,
+ * we always use the constant, but make an explicit notice
+ * indicating the "unknown" case.
+ */
+ printf("in6_if2idlen: unknown link type (%d)\n", ifp->if_type);
+ return (64);
+ }
+}
+
+#include <rtems/freebsd/sys/sysctl.h>
+
+struct in6_llentry {
+ struct llentry base;
+ struct sockaddr_in6 l3_addr6;
+};
+
+static struct llentry *
+in6_lltable_new(const struct sockaddr *l3addr, u_int flags)
+{
+ struct in6_llentry *lle;
+
+ lle = malloc(sizeof(struct in6_llentry), M_LLTABLE,
+ M_DONTWAIT | M_ZERO);
+ if (lle == NULL) /* NB: caller generates msg */
+ return NULL;
+
+ lle->l3_addr6 = *(const struct sockaddr_in6 *)l3addr;
+ lle->base.lle_refcnt = 1;
+ LLE_LOCK_INIT(&lle->base);
+ callout_init_rw(&lle->base.ln_timer_ch, &lle->base.lle_lock,
+ CALLOUT_RETURNUNLOCKED);
+
+ return &lle->base;
+}
+
+/*
+ * Deletes an address from the address table.
+ * This function is called by the timer functions
+ * such as arptimer() and nd6_llinfo_timer(), and
+ * the caller does the locking.
+ */
+static void
+in6_lltable_free(struct lltable *llt, struct llentry *lle)
+{
+ LLE_WUNLOCK(lle);
+ LLE_LOCK_DESTROY(lle);
+ free(lle, M_LLTABLE);
+}
+
+static void
+in6_lltable_prefix_free(struct lltable *llt,
+ const struct sockaddr *prefix,
+ const struct sockaddr *mask)
+{
+ const struct sockaddr_in6 *pfx = (const struct sockaddr_in6 *)prefix;
+ const struct sockaddr_in6 *msk = (const struct sockaddr_in6 *)mask;
+ struct llentry *lle, *next;
+ register int i;
+
+ for (i=0; i < LLTBL_HASHTBL_SIZE; i++) {
+ LIST_FOREACH_SAFE(lle, &llt->lle_head[i], lle_next, next) {
+ if (IN6_ARE_MASKED_ADDR_EQUAL(
+ &((struct sockaddr_in6 *)L3_ADDR(lle))->sin6_addr,
+ &pfx->sin6_addr,
+ &msk->sin6_addr)) {
+ int canceled;
+
+ canceled = callout_drain(&lle->la_timer);
+ LLE_WLOCK(lle);
+ if (canceled)
+ LLE_REMREF(lle);
+ llentry_free(lle);
+ }
+ }
+ }
+}
+
+static int
+in6_lltable_rtcheck(struct ifnet *ifp,
+ u_int flags,
+ const struct sockaddr *l3addr)
+{
+ struct rtentry *rt;
+ char ip6buf[INET6_ADDRSTRLEN];
+
+ KASSERT(l3addr->sa_family == AF_INET6,
+ ("sin_family %d", l3addr->sa_family));
+
+ /* XXX rtalloc1 should take a const param */
+ rt = rtalloc1(__DECONST(struct sockaddr *, l3addr), 0, 0);
+ if (rt == NULL || (rt->rt_flags & RTF_GATEWAY) || rt->rt_ifp != ifp) {
+ struct ifaddr *ifa;
+ /*
+ * Create an ND6 cache for an IPv6 neighbor
+ * that is not covered by our own prefix.
+ */
+ /* XXX ifaof_ifpforaddr should take a const param */
+ ifa = ifaof_ifpforaddr(__DECONST(struct sockaddr *, l3addr), ifp);
+ if (ifa != NULL) {
+ ifa_free(ifa);
+ if (rt != NULL)
+ RTFREE_LOCKED(rt);
+ return 0;
+ }
+ log(LOG_INFO, "IPv6 address: \"%s\" is not on the network\n",
+ ip6_sprintf(ip6buf, &((const struct sockaddr_in6 *)l3addr)->sin6_addr));
+ if (rt != NULL)
+ RTFREE_LOCKED(rt);
+ return EINVAL;
+ }
+ RTFREE_LOCKED(rt);
+ return 0;
+}
+
+static struct llentry *
+in6_lltable_lookup(struct lltable *llt, u_int flags,
+ const struct sockaddr *l3addr)
+{
+ const struct sockaddr_in6 *sin6 = (const struct sockaddr_in6 *)l3addr;
+ struct ifnet *ifp = llt->llt_ifp;
+ struct llentry *lle;
+ struct llentries *lleh;
+ u_int hashkey;
+
+ IF_AFDATA_LOCK_ASSERT(ifp);
+ KASSERT(l3addr->sa_family == AF_INET6,
+ ("sin_family %d", l3addr->sa_family));
+
+ hashkey = sin6->sin6_addr.s6_addr32[3];
+ lleh = &llt->lle_head[LLATBL_HASH(hashkey, LLTBL_HASHMASK)];
+ LIST_FOREACH(lle, lleh, lle_next) {
+ struct sockaddr_in6 *sa6 = (struct sockaddr_in6 *)L3_ADDR(lle);
+ if (lle->la_flags & LLE_DELETED)
+ continue;
+ if (bcmp(&sa6->sin6_addr, &sin6->sin6_addr,
+ sizeof(struct in6_addr)) == 0)
+ break;
+ }
+
+ if (lle == NULL) {
+ if (!(flags & LLE_CREATE))
+ return (NULL);
+ /*
+ * A route that covers the given address must have
+ * been installed 1st because we are doing a resolution,
+ * verify this.
+ */
+ if (!(flags & LLE_IFADDR) &&
+ in6_lltable_rtcheck(ifp, flags, l3addr) != 0)
+ return NULL;
+
+ lle = in6_lltable_new(l3addr, flags);
+ if (lle == NULL) {
+ log(LOG_INFO, "lla_lookup: new lle malloc failed\n");
+ return NULL;
+ }
+ lle->la_flags = flags & ~LLE_CREATE;
+ if ((flags & (LLE_CREATE | LLE_IFADDR)) == (LLE_CREATE | LLE_IFADDR)) {
+ bcopy(IF_LLADDR(ifp), &lle->ll_addr, ifp->if_addrlen);
+ lle->la_flags |= (LLE_VALID | LLE_STATIC);
+ }
+
+ lle->lle_tbl = llt;
+ lle->lle_head = lleh;
+ LIST_INSERT_HEAD(lleh, lle, lle_next);
+ } else if (flags & LLE_DELETE) {
+ if (!(lle->la_flags & LLE_IFADDR) || (flags & LLE_IFADDR)) {
+ LLE_WLOCK(lle);
+ lle->la_flags = LLE_DELETED;
+ LLE_WUNLOCK(lle);
+#ifdef DIAGNOSTIC
+ log(LOG_INFO, "ifaddr cache = %p is deleted\n", lle);
+#endif
+ }
+ lle = (void *)-1;
+ }
+ if (LLE_IS_VALID(lle)) {
+ if (flags & LLE_EXCLUSIVE)
+ LLE_WLOCK(lle);
+ else
+ LLE_RLOCK(lle);
+ }
+ return (lle);
+}
+
+static int
+in6_lltable_dump(struct lltable *llt, struct sysctl_req *wr)
+{
+ struct ifnet *ifp = llt->llt_ifp;
+ struct llentry *lle;
+ /* XXX stack use */
+ struct {
+ struct rt_msghdr rtm;
+ struct sockaddr_in6 sin6;
+ /*
+ * ndp.c assumes that sdl is word aligned
+ */
+#ifdef __LP64__
+ uint32_t pad;
+#endif
+ struct sockaddr_dl sdl;
+ } ndpc;
+ int i, error;
+
+ if (ifp->if_flags & IFF_LOOPBACK)
+ return 0;
+
+ LLTABLE_LOCK_ASSERT();
+
+ error = 0;
+ for (i = 0; i < LLTBL_HASHTBL_SIZE; i++) {
+ LIST_FOREACH(lle, &llt->lle_head[i], lle_next) {
+ struct sockaddr_dl *sdl;
+
+ /* skip deleted or invalid entries */
+ if ((lle->la_flags & (LLE_DELETED|LLE_VALID)) != LLE_VALID)
+ continue;
+ /* Skip if jailed and not a valid IP of the prison. */
+ if (prison_if(wr->td->td_ucred, L3_ADDR(lle)) != 0)
+ continue;
+ /*
+ * produce a msg made of:
+ * struct rt_msghdr;
+ * struct sockaddr_in6 (IPv6)
+ * struct sockaddr_dl;
+ */
+ bzero(&ndpc, sizeof(ndpc));
+ ndpc.rtm.rtm_msglen = sizeof(ndpc);
+ ndpc.rtm.rtm_version = RTM_VERSION;
+ ndpc.rtm.rtm_type = RTM_GET;
+ ndpc.rtm.rtm_flags = RTF_UP;
+ ndpc.rtm.rtm_addrs = RTA_DST | RTA_GATEWAY;
+ ndpc.sin6.sin6_family = AF_INET6;
+ ndpc.sin6.sin6_len = sizeof(ndpc.sin6);
+ bcopy(L3_ADDR(lle), &ndpc.sin6, L3_ADDR_LEN(lle));
+
+ /* publish */
+ if (lle->la_flags & LLE_PUB)
+ ndpc.rtm.rtm_flags |= RTF_ANNOUNCE;
+
+ sdl = &ndpc.sdl;
+ sdl->sdl_family = AF_LINK;
+ sdl->sdl_len = sizeof(*sdl);
+ sdl->sdl_alen = ifp->if_addrlen;
+ sdl->sdl_index = ifp->if_index;
+ sdl->sdl_type = ifp->if_type;
+ bcopy(&lle->ll_addr, LLADDR(sdl), ifp->if_addrlen);
+ ndpc.rtm.rtm_rmx.rmx_expire =
+ lle->la_flags & LLE_STATIC ? 0 : lle->la_expire;
+ ndpc.rtm.rtm_flags |= (RTF_HOST | RTF_LLDATA);
+ if (lle->la_flags & LLE_STATIC)
+ ndpc.rtm.rtm_flags |= RTF_STATIC;
+ ndpc.rtm.rtm_index = ifp->if_index;
+ error = SYSCTL_OUT(wr, &ndpc, sizeof(ndpc));
+ if (error)
+ break;
+ }
+ }
+ return error;
+}
+
+void *
+in6_domifattach(struct ifnet *ifp)
+{
+ struct in6_ifextra *ext;
+
+ ext = (struct in6_ifextra *)malloc(sizeof(*ext), M_IFADDR, M_WAITOK);
+ bzero(ext, sizeof(*ext));
+
+ ext->in6_ifstat = (struct in6_ifstat *)malloc(sizeof(struct in6_ifstat),
+ M_IFADDR, M_WAITOK);
+ bzero(ext->in6_ifstat, sizeof(*ext->in6_ifstat));
+
+ ext->icmp6_ifstat =
+ (struct icmp6_ifstat *)malloc(sizeof(struct icmp6_ifstat),
+ M_IFADDR, M_WAITOK);
+ bzero(ext->icmp6_ifstat, sizeof(*ext->icmp6_ifstat));
+
+ ext->nd_ifinfo = nd6_ifattach(ifp);
+ ext->scope6_id = scope6_ifattach(ifp);
+ ext->lltable = lltable_init(ifp, AF_INET6);
+ if (ext->lltable != NULL) {
+ ext->lltable->llt_new = in6_lltable_new;
+ ext->lltable->llt_free = in6_lltable_free;
+ ext->lltable->llt_prefix_free = in6_lltable_prefix_free;
+ ext->lltable->llt_rtcheck = in6_lltable_rtcheck;
+ ext->lltable->llt_lookup = in6_lltable_lookup;
+ ext->lltable->llt_dump = in6_lltable_dump;
+ }
+
+ ext->mld_ifinfo = mld_domifattach(ifp);
+
+ return ext;
+}
+
+void
+in6_domifdetach(struct ifnet *ifp, void *aux)
+{
+ struct in6_ifextra *ext = (struct in6_ifextra *)aux;
+
+ mld_domifdetach(ifp);
+ scope6_ifdetach(ext->scope6_id);
+ nd6_ifdetach(ext->nd_ifinfo);
+ lltable_free(ext->lltable);
+ free(ext->in6_ifstat, M_IFADDR);
+ free(ext->icmp6_ifstat, M_IFADDR);
+ free(ext, M_IFADDR);
+}
+
+/*
+ * Convert sockaddr_in6 to sockaddr_in. Original sockaddr_in6 must be
+ * v4 mapped addr or v4 compat addr
+ */
+void
+in6_sin6_2_sin(struct sockaddr_in *sin, struct sockaddr_in6 *sin6)
+{
+
+ bzero(sin, sizeof(*sin));
+ sin->sin_len = sizeof(struct sockaddr_in);
+ sin->sin_family = AF_INET;
+ sin->sin_port = sin6->sin6_port;
+ sin->sin_addr.s_addr = sin6->sin6_addr.s6_addr32[3];
+}
+
+/* Convert sockaddr_in to sockaddr_in6 in v4 mapped addr format. */
+void
+in6_sin_2_v4mapsin6(struct sockaddr_in *sin, struct sockaddr_in6 *sin6)
+{
+ bzero(sin6, sizeof(*sin6));
+ sin6->sin6_len = sizeof(struct sockaddr_in6);
+ sin6->sin6_family = AF_INET6;
+ sin6->sin6_port = sin->sin_port;
+ sin6->sin6_addr.s6_addr32[0] = 0;
+ sin6->sin6_addr.s6_addr32[1] = 0;
+ sin6->sin6_addr.s6_addr32[2] = IPV6_ADDR_INT32_SMP;
+ sin6->sin6_addr.s6_addr32[3] = sin->sin_addr.s_addr;
+}
+
+/* Convert sockaddr_in6 into sockaddr_in. */
+void
+in6_sin6_2_sin_in_sock(struct sockaddr *nam)
+{
+ struct sockaddr_in *sin_p;
+ struct sockaddr_in6 sin6;
+
+ /*
+ * Save original sockaddr_in6 addr and convert it
+ * to sockaddr_in.
+ */
+ sin6 = *(struct sockaddr_in6 *)nam;
+ sin_p = (struct sockaddr_in *)nam;
+ in6_sin6_2_sin(sin_p, &sin6);
+}
+
+/* Convert sockaddr_in into sockaddr_in6 in v4 mapped addr format. */
+void
+in6_sin_2_v4mapsin6_in_sock(struct sockaddr **nam)
+{
+ struct sockaddr_in *sin_p;
+ struct sockaddr_in6 *sin6_p;
+
+ sin6_p = malloc(sizeof *sin6_p, M_SONAME,
+ M_WAITOK);
+ sin_p = (struct sockaddr_in *)*nam;
+ in6_sin_2_v4mapsin6(sin_p, sin6_p);
+ free(*nam, M_SONAME);
+ *nam = (struct sockaddr *)sin6_p;
+}
diff --git a/rtems/freebsd/netinet6/in6.h b/rtems/freebsd/netinet6/in6.h
new file mode 100644
index 00000000..8d241116
--- /dev/null
+++ b/rtems/freebsd/netinet6/in6.h
@@ -0,0 +1,708 @@
+/*-
+ * Copyright (C) 1995, 1996, 1997, and 1998 WIDE Project.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the project nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $KAME: in6.h,v 1.89 2001/05/27 13:28:35 itojun Exp $
+ */
+
+/*-
+ * Copyright (c) 1982, 1986, 1990, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * @(#)in.h 8.3 (Berkeley) 1/3/94
+ * $FreeBSD$
+ */
+
+#ifndef __KAME_NETINET_IN_HH_INCLUDED_
+#error "do not include netinet6/in6.h directly, include netinet/in.h. see RFC2553"
+#endif
+
+#ifndef _NETINET6_IN6_HH_
+#define _NETINET6_IN6_HH_
+
+/*
+ * Identification of the network protocol stack
+ * for *BSD-current/release: http://www.kame.net/dev/cvsweb.cgi/kame/COVERAGE
+ * has the table of implementation/integration differences.
+ */
+#define __KAME__
+#define __KAME_VERSION "FreeBSD"
+
+/*
+ * IPv6 port allocation rules should mirror the IPv4 rules and are controlled
+ * by the the net.inet.ip.portrange sysctl tree. The following defines exist
+ * for compatibility with userland applications that need them.
+ */
+#if __BSD_VISIBLE
+#define IPV6PORT_RESERVED 1024
+#define IPV6PORT_ANONMIN 49152
+#define IPV6PORT_ANONMAX 65535
+#define IPV6PORT_RESERVEDMIN 600
+#define IPV6PORT_RESERVEDMAX (IPV6PORT_RESERVED-1)
+#endif
+
+/*
+ * IPv6 address
+ */
+struct in6_addr {
+ union {
+ uint8_t __u6_addr8[16];
+ uint16_t __u6_addr16[8];
+ uint32_t __u6_addr32[4];
+ } __u6_addr; /* 128-bit IP6 address */
+};
+
+#define s6_addr __u6_addr.__u6_addr8
+#ifdef _KERNEL /* XXX nonstandard */
+#define s6_addr8 __u6_addr.__u6_addr8
+#define s6_addr16 __u6_addr.__u6_addr16
+#define s6_addr32 __u6_addr.__u6_addr32
+#endif
+
+#define INET6_ADDRSTRLEN 46
+
+/*
+ * XXX missing POSIX.1-2001 macro IPPROTO_IPV6.
+ */
+
+/*
+ * Socket address for IPv6
+ */
+#if __BSD_VISIBLE
+#define SIN6_LEN
+#endif
+
+struct sockaddr_in6 {
+ uint8_t sin6_len; /* length of this struct */
+ sa_family_t sin6_family; /* AF_INET6 */
+ in_port_t sin6_port; /* Transport layer port # */
+ uint32_t sin6_flowinfo; /* IP6 flow information */
+ struct in6_addr sin6_addr; /* IP6 address */
+ uint32_t sin6_scope_id; /* scope zone index */
+};
+
+/*
+ * Local definition for masks
+ */
+#ifdef _KERNEL /* XXX nonstandard */
+#define IN6MASK0 {{{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }}}
+#define IN6MASK32 {{{ 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, \
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }}}
+#define IN6MASK64 {{{ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, \
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }}}
+#define IN6MASK96 {{{ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, \
+ 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00 }}}
+#define IN6MASK128 {{{ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, \
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }}}
+#endif
+
+#ifdef _KERNEL
+extern const struct sockaddr_in6 sa6_any;
+
+extern const struct in6_addr in6mask0;
+extern const struct in6_addr in6mask32;
+extern const struct in6_addr in6mask64;
+extern const struct in6_addr in6mask96;
+extern const struct in6_addr in6mask128;
+#endif /* _KERNEL */
+
+/*
+ * Macros started with IPV6_ADDR is KAME local
+ */
+#ifdef _KERNEL /* XXX nonstandard */
+#if _BYTE_ORDER == _BIG_ENDIAN
+#define IPV6_ADDR_INT32_ONE 1
+#define IPV6_ADDR_INT32_TWO 2
+#define IPV6_ADDR_INT32_MNL 0xff010000
+#define IPV6_ADDR_INT32_MLL 0xff020000
+#define IPV6_ADDR_INT32_SMP 0x0000ffff
+#define IPV6_ADDR_INT16_ULL 0xfe80
+#define IPV6_ADDR_INT16_USL 0xfec0
+#define IPV6_ADDR_INT16_MLL 0xff02
+#elif _BYTE_ORDER == _LITTLE_ENDIAN
+#define IPV6_ADDR_INT32_ONE 0x01000000
+#define IPV6_ADDR_INT32_TWO 0x02000000
+#define IPV6_ADDR_INT32_MNL 0x000001ff
+#define IPV6_ADDR_INT32_MLL 0x000002ff
+#define IPV6_ADDR_INT32_SMP 0xffff0000
+#define IPV6_ADDR_INT16_ULL 0x80fe
+#define IPV6_ADDR_INT16_USL 0xc0fe
+#define IPV6_ADDR_INT16_MLL 0x02ff
+#endif
+#endif
+
+/*
+ * Definition of some useful macros to handle IP6 addresses
+ */
+#if __BSD_VISIBLE
+#define IN6ADDR_ANY_INIT \
+ {{{ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, \
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }}}
+#define IN6ADDR_LOOPBACK_INIT \
+ {{{ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, \
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01 }}}
+#define IN6ADDR_NODELOCAL_ALLNODES_INIT \
+ {{{ 0xff, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, \
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01 }}}
+#define IN6ADDR_INTFACELOCAL_ALLNODES_INIT \
+ {{{ 0xff, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, \
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01 }}}
+#define IN6ADDR_LINKLOCAL_ALLNODES_INIT \
+ {{{ 0xff, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, \
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01 }}}
+#define IN6ADDR_LINKLOCAL_ALLROUTERS_INIT \
+ {{{ 0xff, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, \
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02 }}}
+#define IN6ADDR_LINKLOCAL_ALLV2ROUTERS_INIT \
+ {{{ 0xff, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, \
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x16 }}}
+#endif
+
+extern const struct in6_addr in6addr_any;
+extern const struct in6_addr in6addr_loopback;
+#if __BSD_VISIBLE
+extern const struct in6_addr in6addr_nodelocal_allnodes;
+extern const struct in6_addr in6addr_linklocal_allnodes;
+extern const struct in6_addr in6addr_linklocal_allrouters;
+extern const struct in6_addr in6addr_linklocal_allv2routers;
+#endif
+
+/*
+ * Equality
+ * NOTE: Some of kernel programming environment (for example, openbsd/sparc)
+ * does not supply memcmp(). For userland memcmp() is preferred as it is
+ * in ANSI standard.
+ */
+#ifdef _KERNEL
+#define IN6_ARE_ADDR_EQUAL(a, b) \
+ (bcmp(&(a)->s6_addr[0], &(b)->s6_addr[0], sizeof(struct in6_addr)) == 0)
+#else
+#if __BSD_VISIBLE
+#define IN6_ARE_ADDR_EQUAL(a, b) \
+ (memcmp(&(a)->s6_addr[0], &(b)->s6_addr[0], sizeof(struct in6_addr)) == 0)
+#endif
+#endif
+
+/*
+ * Unspecified
+ */
+#define IN6_IS_ADDR_UNSPECIFIED(a) \
+ ((*(const u_int32_t *)(const void *)(&(a)->s6_addr[0]) == 0) && \
+ (*(const u_int32_t *)(const void *)(&(a)->s6_addr[4]) == 0) && \
+ (*(const u_int32_t *)(const void *)(&(a)->s6_addr[8]) == 0) && \
+ (*(const u_int32_t *)(const void *)(&(a)->s6_addr[12]) == 0))
+
+/*
+ * Loopback
+ */
+#define IN6_IS_ADDR_LOOPBACK(a) \
+ ((*(const u_int32_t *)(const void *)(&(a)->s6_addr[0]) == 0) && \
+ (*(const u_int32_t *)(const void *)(&(a)->s6_addr[4]) == 0) && \
+ (*(const u_int32_t *)(const void *)(&(a)->s6_addr[8]) == 0) && \
+ (*(const u_int32_t *)(const void *)(&(a)->s6_addr[12]) == ntohl(1)))
+
+/*
+ * IPv4 compatible
+ */
+#define IN6_IS_ADDR_V4COMPAT(a) \
+ ((*(const u_int32_t *)(const void *)(&(a)->s6_addr[0]) == 0) && \
+ (*(const u_int32_t *)(const void *)(&(a)->s6_addr[4]) == 0) && \
+ (*(const u_int32_t *)(const void *)(&(a)->s6_addr[8]) == 0) && \
+ (*(const u_int32_t *)(const void *)(&(a)->s6_addr[12]) != 0) && \
+ (*(const u_int32_t *)(const void *)(&(a)->s6_addr[12]) != ntohl(1)))
+
+/*
+ * Mapped
+ */
+#define IN6_IS_ADDR_V4MAPPED(a) \
+ ((*(const u_int32_t *)(const void *)(&(a)->s6_addr[0]) == 0) && \
+ (*(const u_int32_t *)(const void *)(&(a)->s6_addr[4]) == 0) && \
+ (*(const u_int32_t *)(const void *)(&(a)->s6_addr[8]) == ntohl(0x0000ffff)))
+
+/*
+ * KAME Scope Values
+ */
+
+#ifdef _KERNEL /* XXX nonstandard */
+#define IPV6_ADDR_SCOPE_NODELOCAL 0x01
+#define IPV6_ADDR_SCOPE_INTFACELOCAL 0x01
+#define IPV6_ADDR_SCOPE_LINKLOCAL 0x02
+#define IPV6_ADDR_SCOPE_SITELOCAL 0x05
+#define IPV6_ADDR_SCOPE_ORGLOCAL 0x08 /* just used in this file */
+#define IPV6_ADDR_SCOPE_GLOBAL 0x0e
+#else
+#define __IPV6_ADDR_SCOPE_NODELOCAL 0x01
+#define __IPV6_ADDR_SCOPE_INTFACELOCAL 0x01
+#define __IPV6_ADDR_SCOPE_LINKLOCAL 0x02
+#define __IPV6_ADDR_SCOPE_SITELOCAL 0x05
+#define __IPV6_ADDR_SCOPE_ORGLOCAL 0x08 /* just used in this file */
+#define __IPV6_ADDR_SCOPE_GLOBAL 0x0e
+#endif
+
+/*
+ * Unicast Scope
+ * Note that we must check topmost 10 bits only, not 16 bits (see RFC2373).
+ */
+#define IN6_IS_ADDR_LINKLOCAL(a) \
+ (((a)->s6_addr[0] == 0xfe) && (((a)->s6_addr[1] & 0xc0) == 0x80))
+#define IN6_IS_ADDR_SITELOCAL(a) \
+ (((a)->s6_addr[0] == 0xfe) && (((a)->s6_addr[1] & 0xc0) == 0xc0))
+
+/*
+ * Multicast
+ */
+#define IN6_IS_ADDR_MULTICAST(a) ((a)->s6_addr[0] == 0xff)
+
+#ifdef _KERNEL /* XXX nonstandard */
+#define IPV6_ADDR_MC_SCOPE(a) ((a)->s6_addr[1] & 0x0f)
+#else
+#define __IPV6_ADDR_MC_SCOPE(a) ((a)->s6_addr[1] & 0x0f)
+#endif
+
+/*
+ * Multicast Scope
+ */
+#ifdef _KERNEL /* refers nonstandard items */
+#define IN6_IS_ADDR_MC_NODELOCAL(a) \
+ (IN6_IS_ADDR_MULTICAST(a) && \
+ (IPV6_ADDR_MC_SCOPE(a) == IPV6_ADDR_SCOPE_NODELOCAL))
+#define IN6_IS_ADDR_MC_INTFACELOCAL(a) \
+ (IN6_IS_ADDR_MULTICAST(a) && \
+ (IPV6_ADDR_MC_SCOPE(a) == IPV6_ADDR_SCOPE_INTFACELOCAL))
+#define IN6_IS_ADDR_MC_LINKLOCAL(a) \
+ (IN6_IS_ADDR_MULTICAST(a) && \
+ (IPV6_ADDR_MC_SCOPE(a) == IPV6_ADDR_SCOPE_LINKLOCAL))
+#define IN6_IS_ADDR_MC_SITELOCAL(a) \
+ (IN6_IS_ADDR_MULTICAST(a) && \
+ (IPV6_ADDR_MC_SCOPE(a) == IPV6_ADDR_SCOPE_SITELOCAL))
+#define IN6_IS_ADDR_MC_ORGLOCAL(a) \
+ (IN6_IS_ADDR_MULTICAST(a) && \
+ (IPV6_ADDR_MC_SCOPE(a) == IPV6_ADDR_SCOPE_ORGLOCAL))
+#define IN6_IS_ADDR_MC_GLOBAL(a) \
+ (IN6_IS_ADDR_MULTICAST(a) && \
+ (IPV6_ADDR_MC_SCOPE(a) == IPV6_ADDR_SCOPE_GLOBAL))
+#else
+#define IN6_IS_ADDR_MC_NODELOCAL(a) \
+ (IN6_IS_ADDR_MULTICAST(a) && \
+ (__IPV6_ADDR_MC_SCOPE(a) == __IPV6_ADDR_SCOPE_NODELOCAL))
+#define IN6_IS_ADDR_MC_LINKLOCAL(a) \
+ (IN6_IS_ADDR_MULTICAST(a) && \
+ (__IPV6_ADDR_MC_SCOPE(a) == __IPV6_ADDR_SCOPE_LINKLOCAL))
+#define IN6_IS_ADDR_MC_SITELOCAL(a) \
+ (IN6_IS_ADDR_MULTICAST(a) && \
+ (__IPV6_ADDR_MC_SCOPE(a) == __IPV6_ADDR_SCOPE_SITELOCAL))
+#define IN6_IS_ADDR_MC_ORGLOCAL(a) \
+ (IN6_IS_ADDR_MULTICAST(a) && \
+ (__IPV6_ADDR_MC_SCOPE(a) == __IPV6_ADDR_SCOPE_ORGLOCAL))
+#define IN6_IS_ADDR_MC_GLOBAL(a) \
+ (IN6_IS_ADDR_MULTICAST(a) && \
+ (__IPV6_ADDR_MC_SCOPE(a) == __IPV6_ADDR_SCOPE_GLOBAL))
+#endif
+
+#ifdef _KERNEL /* nonstandard */
+/*
+ * KAME Scope
+ */
+#define IN6_IS_SCOPE_LINKLOCAL(a) \
+ ((IN6_IS_ADDR_LINKLOCAL(a)) || \
+ (IN6_IS_ADDR_MC_LINKLOCAL(a)))
+#define IN6_IS_SCOPE_EMBED(a) \
+ ((IN6_IS_ADDR_LINKLOCAL(a)) || \
+ (IN6_IS_ADDR_MC_LINKLOCAL(a)) || \
+ (IN6_IS_ADDR_MC_INTFACELOCAL(a)))
+
+#define IFA6_IS_DEPRECATED(a) \
+ ((a)->ia6_lifetime.ia6t_pltime != ND6_INFINITE_LIFETIME && \
+ (u_int32_t)((time_second - (a)->ia6_updatetime)) > \
+ (a)->ia6_lifetime.ia6t_pltime)
+#define IFA6_IS_INVALID(a) \
+ ((a)->ia6_lifetime.ia6t_vltime != ND6_INFINITE_LIFETIME && \
+ (u_int32_t)((time_second - (a)->ia6_updatetime)) > \
+ (a)->ia6_lifetime.ia6t_vltime)
+#endif /* _KERNEL */
+
+/*
+ * IP6 route structure
+ */
+#if __BSD_VISIBLE
+struct route_in6 {
+ struct rtentry *ro_rt;
+ struct llentry *ro_lle;
+ struct sockaddr_in6 ro_dst;
+};
+#endif
+
+/*
+ * Options for use with [gs]etsockopt at the IPV6 level.
+ * First word of comment is data type; bool is stored in int.
+ */
+/* no hdrincl */
+#if 0 /* the followings are relic in IPv4 and hence are disabled */
+#define IPV6_OPTIONS 1 /* buf/ip6_opts; set/get IP6 options */
+#define IPV6_RECVOPTS 5 /* bool; receive all IP6 opts w/dgram */
+#define IPV6_RECVRETOPTS 6 /* bool; receive IP6 opts for response */
+#define IPV6_RECVDSTADDR 7 /* bool; receive IP6 dst addr w/dgram */
+#define IPV6_RETOPTS 8 /* ip6_opts; set/get IP6 options */
+#endif
+#define IPV6_SOCKOPT_RESERVED1 3 /* reserved for future use */
+#define IPV6_UNICAST_HOPS 4 /* int; IP6 hops */
+#define IPV6_MULTICAST_IF 9 /* u_int; set/get IP6 multicast i/f */
+#define IPV6_MULTICAST_HOPS 10 /* int; set/get IP6 multicast hops */
+#define IPV6_MULTICAST_LOOP 11 /* u_int; set/get IP6 multicast loopback */
+#define IPV6_JOIN_GROUP 12 /* ip6_mreq; join a group membership */
+#define IPV6_LEAVE_GROUP 13 /* ip6_mreq; leave a group membership */
+#define IPV6_PORTRANGE 14 /* int; range to choose for unspec port */
+#define ICMP6_FILTER 18 /* icmp6_filter; icmp6 filter */
+/* RFC2292 options */
+#ifdef _KERNEL
+#define IPV6_2292PKTINFO 19 /* bool; send/recv if, src/dst addr */
+#define IPV6_2292HOPLIMIT 20 /* bool; hop limit */
+#define IPV6_2292NEXTHOP 21 /* bool; next hop addr */
+#define IPV6_2292HOPOPTS 22 /* bool; hop-by-hop option */
+#define IPV6_2292DSTOPTS 23 /* bool; destinaion option */
+#define IPV6_2292RTHDR 24 /* bool; routing header */
+#define IPV6_2292PKTOPTIONS 25 /* buf/cmsghdr; set/get IPv6 options */
+#endif
+
+#define IPV6_CHECKSUM 26 /* int; checksum offset for raw socket */
+#define IPV6_V6ONLY 27 /* bool; make AF_INET6 sockets v6 only */
+#ifndef _KERNEL
+#define IPV6_BINDV6ONLY IPV6_V6ONLY
+#endif
+
+#if 1 /* IPSEC */
+#define IPV6_IPSEC_POLICY 28 /* struct; get/set security policy */
+#endif /* IPSEC */
+
+#define IPV6_FAITH 29 /* bool; accept FAITH'ed connections */
+
+#if 1 /* IPV6FIREWALL */
+#define IPV6_FW_ADD 30 /* add a firewall rule to chain */
+#define IPV6_FW_DEL 31 /* delete a firewall rule from chain */
+#define IPV6_FW_FLUSH 32 /* flush firewall rule chain */
+#define IPV6_FW_ZERO 33 /* clear single/all firewall counter(s) */
+#define IPV6_FW_GET 34 /* get entire firewall rule chain */
+#endif
+
+/* new socket options introduced in RFC3542 */
+#define IPV6_RTHDRDSTOPTS 35 /* ip6_dest; send dst option before rthdr */
+
+#define IPV6_RECVPKTINFO 36 /* bool; recv if, dst addr */
+#define IPV6_RECVHOPLIMIT 37 /* bool; recv hop limit */
+#define IPV6_RECVRTHDR 38 /* bool; recv routing header */
+#define IPV6_RECVHOPOPTS 39 /* bool; recv hop-by-hop option */
+#define IPV6_RECVDSTOPTS 40 /* bool; recv dst option after rthdr */
+#ifdef _KERNEL
+#define IPV6_RECVRTHDRDSTOPTS 41 /* bool; recv dst option before rthdr */
+#endif
+
+#define IPV6_USE_MIN_MTU 42 /* bool; send packets at the minimum MTU */
+#define IPV6_RECVPATHMTU 43 /* bool; notify an according MTU */
+
+#define IPV6_PATHMTU 44 /* mtuinfo; get the current path MTU (sopt),
+ 4 bytes int; MTU notification (cmsg) */
+#if 0 /*obsoleted during 2292bis -> 3542*/
+#define IPV6_REACHCONF 45 /* no data; ND reachability confirm
+ (cmsg only/not in of RFC3542) */
+#endif
+
+/* more new socket options introduced in RFC3542 */
+#define IPV6_PKTINFO 46 /* in6_pktinfo; send if, src addr */
+#define IPV6_HOPLIMIT 47 /* int; send hop limit */
+#define IPV6_NEXTHOP 48 /* sockaddr; next hop addr */
+#define IPV6_HOPOPTS 49 /* ip6_hbh; send hop-by-hop option */
+#define IPV6_DSTOPTS 50 /* ip6_dest; send dst option befor rthdr */
+#define IPV6_RTHDR 51 /* ip6_rthdr; send routing header */
+#if 0
+#define IPV6_PKTOPTIONS 52 /* buf/cmsghdr; set/get IPv6 options */
+ /* obsoleted by RFC3542 */
+#endif
+
+#define IPV6_RECVTCLASS 57 /* bool; recv traffic class values */
+
+#define IPV6_AUTOFLOWLABEL 59 /* bool; attach flowlabel automagically */
+
+#define IPV6_TCLASS 61 /* int; send traffic class value */
+#define IPV6_DONTFRAG 62 /* bool; disable IPv6 fragmentation */
+
+#define IPV6_PREFER_TEMPADDR 63 /* int; prefer temporary addresses as
+ * the source address.
+ */
+
+#define IPV6_BINDANY 64 /* bool: allow bind to any address */
+
+/*
+ * The following option is private; do not use it from user applications.
+ * It is deliberately defined to the same value as IP_MSFILTER.
+ */
+#define IPV6_MSFILTER 74 /* struct __msfilterreq;
+ * set/get multicast source filter list.
+ */
+
+/* to define items, should talk with KAME guys first, for *BSD compatibility */
+
+#define IPV6_RTHDR_LOOSE 0 /* this hop need not be a neighbor. XXX old spec */
+#define IPV6_RTHDR_STRICT 1 /* this hop must be a neighbor. XXX old spec */
+#define IPV6_RTHDR_TYPE_0 0 /* IPv6 routing header type 0 */
+
+/*
+ * Defaults and limits for options
+ */
+#define IPV6_DEFAULT_MULTICAST_HOPS 1 /* normally limit m'casts to 1 hop */
+#define IPV6_DEFAULT_MULTICAST_LOOP 1 /* normally hear sends if a member */
+
+/*
+ * The im6o_membership vector for each socket is now dynamically allocated at
+ * run-time, bounded by USHRT_MAX, and is reallocated when needed, sized
+ * according to a power-of-two increment.
+ */
+#define IPV6_MIN_MEMBERSHIPS 31
+#define IPV6_MAX_MEMBERSHIPS 4095
+
+/*
+ * Default resource limits for IPv6 multicast source filtering.
+ * These may be modified by sysctl.
+ */
+#define IPV6_MAX_GROUP_SRC_FILTER 512 /* sources per group */
+#define IPV6_MAX_SOCK_SRC_FILTER 128 /* sources per socket/group */
+
+/*
+ * Argument structure for IPV6_JOIN_GROUP and IPV6_LEAVE_GROUP.
+ */
+struct ipv6_mreq {
+ struct in6_addr ipv6mr_multiaddr;
+ unsigned int ipv6mr_interface;
+};
+
+/*
+ * IPV6_PKTINFO: Packet information(RFC2292 sec 5)
+ */
+struct in6_pktinfo {
+ struct in6_addr ipi6_addr; /* src/dst IPv6 address */
+ unsigned int ipi6_ifindex; /* send/recv interface index */
+};
+
+/*
+ * Control structure for IPV6_RECVPATHMTU socket option.
+ */
+struct ip6_mtuinfo {
+ struct sockaddr_in6 ip6m_addr; /* or sockaddr_storage? */
+ uint32_t ip6m_mtu;
+};
+
+/*
+ * Argument for IPV6_PORTRANGE:
+ * - which range to search when port is unspecified at bind() or connect()
+ */
+#define IPV6_PORTRANGE_DEFAULT 0 /* default range */
+#define IPV6_PORTRANGE_HIGH 1 /* "high" - request firewall bypass */
+#define IPV6_PORTRANGE_LOW 2 /* "low" - vouchsafe security */
+
+#if __BSD_VISIBLE
+/*
+ * Definitions for inet6 sysctl operations.
+ *
+ * Third level is protocol number.
+ * Fourth level is desired variable within that protocol.
+ */
+#define IPV6PROTO_MAXID (IPPROTO_PIM + 1) /* don't list to IPV6PROTO_MAX */
+
+/*
+ * Names for IP sysctl objects
+ */
+#define IPV6CTL_FORWARDING 1 /* act as router */
+#define IPV6CTL_SENDREDIRECTS 2 /* may send redirects when forwarding*/
+#define IPV6CTL_DEFHLIM 3 /* default Hop-Limit */
+#ifdef notyet
+#define IPV6CTL_DEFMTU 4 /* default MTU */
+#endif
+#define IPV6CTL_FORWSRCRT 5 /* forward source-routed dgrams */
+#define IPV6CTL_STATS 6 /* stats */
+#define IPV6CTL_MRTSTATS 7 /* multicast forwarding stats */
+#define IPV6CTL_MRTPROTO 8 /* multicast routing protocol */
+#define IPV6CTL_MAXFRAGPACKETS 9 /* max packets reassembly queue */
+#define IPV6CTL_SOURCECHECK 10 /* verify source route and intf */
+#define IPV6CTL_SOURCECHECK_LOGINT 11 /* minimume logging interval */
+#define IPV6CTL_ACCEPT_RTADV 12
+#define IPV6CTL_KEEPFAITH 13
+#define IPV6CTL_LOG_INTERVAL 14
+#define IPV6CTL_HDRNESTLIMIT 15
+#define IPV6CTL_DAD_COUNT 16
+#define IPV6CTL_AUTO_FLOWLABEL 17
+#define IPV6CTL_DEFMCASTHLIM 18
+#define IPV6CTL_GIF_HLIM 19 /* default HLIM for gif encap packet */
+#define IPV6CTL_KAME_VERSION 20
+#define IPV6CTL_USE_DEPRECATED 21 /* use deprecated addr (RFC2462 5.5.4) */
+#define IPV6CTL_RR_PRUNE 22 /* walk timer for router renumbering */
+#if 0 /* obsolete */
+#define IPV6CTL_MAPPED_ADDR 23
+#endif
+#define IPV6CTL_V6ONLY 24
+#define IPV6CTL_RTEXPIRE 25 /* cloned route expiration time */
+#define IPV6CTL_RTMINEXPIRE 26 /* min value for expiration time */
+#define IPV6CTL_RTMAXCACHE 27 /* trigger level for dynamic expire */
+
+#define IPV6CTL_USETEMPADDR 32 /* use temporary addresses (RFC3041) */
+#define IPV6CTL_TEMPPLTIME 33 /* preferred lifetime for tmpaddrs */
+#define IPV6CTL_TEMPVLTIME 34 /* valid lifetime for tmpaddrs */
+#define IPV6CTL_AUTO_LINKLOCAL 35 /* automatic link-local addr assign */
+#define IPV6CTL_RIP6STATS 36 /* raw_ip6 stats */
+#define IPV6CTL_PREFER_TEMPADDR 37 /* prefer temporary addr as src */
+#define IPV6CTL_ADDRCTLPOLICY 38 /* get/set address selection policy */
+#define IPV6CTL_USE_DEFAULTZONE 39 /* use default scope zone */
+
+#define IPV6CTL_MAXFRAGS 41 /* max fragments */
+#if 0
+#define IPV6CTL_IFQ 42 /* ip6intrq node */
+#define IPV6CTL_ISATAPRTR 43 /* isatap router */
+#endif
+#define IPV6CTL_MCAST_PMTU 44 /* enable pMTU discovery for multicast? */
+
+/* New entries should be added here from current IPV6CTL_MAXID value. */
+/* to define items, should talk with KAME guys first, for *BSD compatibility */
+#define IPV6CTL_STEALTH 45
+
+#define ICMPV6CTL_ND6_ONLINKNSRFC4861 47
+#define IPV6CTL_MAXID 48
+#endif /* __BSD_VISIBLE */
+
+/*
+ * Redefinition of mbuf flags
+ */
+#define M_AUTHIPHDR M_PROTO2
+#define M_DECRYPTED M_PROTO3
+#define M_LOOP M_PROTO4
+#define M_AUTHIPDGM M_PROTO5
+#define M_RTALERT_MLD M_PROTO6
+
+#ifdef _KERNEL
+struct cmsghdr;
+
+int in6_cksum __P((struct mbuf *, u_int8_t, u_int32_t, u_int32_t));
+int in6_localaddr __P((struct in6_addr *));
+int in6_addrscope __P((struct in6_addr *));
+struct in6_ifaddr *in6_ifawithifp __P((struct ifnet *, struct in6_addr *));
+extern void in6_if_up __P((struct ifnet *));
+struct sockaddr;
+extern u_char ip6_protox[];
+
+void in6_sin6_2_sin __P((struct sockaddr_in *sin,
+ struct sockaddr_in6 *sin6));
+void in6_sin_2_v4mapsin6 __P((struct sockaddr_in *sin,
+ struct sockaddr_in6 *sin6));
+void in6_sin6_2_sin_in_sock __P((struct sockaddr *nam));
+void in6_sin_2_v4mapsin6_in_sock __P((struct sockaddr **nam));
+extern void addrsel_policy_init __P((void));
+
+#define satosin6(sa) ((struct sockaddr_in6 *)(sa))
+#define sin6tosa(sin6) ((struct sockaddr *)(sin6))
+#define ifatoia6(ifa) ((struct in6_ifaddr *)(ifa))
+
+extern int (*faithprefix_p)(struct in6_addr *);
+#endif /* _KERNEL */
+
+#ifndef _SIZE_T_DECLARED
+typedef __size_t size_t;
+#define _SIZE_T_DECLARED
+#endif
+
+#ifndef _SOCKLEN_T_DECLARED
+typedef __socklen_t socklen_t;
+#define _SOCKLEN_T_DECLARED
+#endif
+
+#if __BSD_VISIBLE
+
+__BEGIN_DECLS
+struct cmsghdr;
+
+extern int inet6_option_space __P((int));
+extern int inet6_option_init __P((void *, struct cmsghdr **, int));
+extern int inet6_option_append __P((struct cmsghdr *, const uint8_t *,
+ int, int));
+extern uint8_t *inet6_option_alloc __P((struct cmsghdr *, int, int, int));
+extern int inet6_option_next __P((const struct cmsghdr *, uint8_t **));
+extern int inet6_option_find __P((const struct cmsghdr *, uint8_t **, int));
+
+extern size_t inet6_rthdr_space __P((int, int));
+extern struct cmsghdr *inet6_rthdr_init __P((void *, int));
+extern int inet6_rthdr_add __P((struct cmsghdr *, const struct in6_addr *,
+ unsigned int));
+extern int inet6_rthdr_lasthop __P((struct cmsghdr *, unsigned int));
+#if 0 /* not implemented yet */
+extern int inet6_rthdr_reverse __P((const struct cmsghdr *, struct cmsghdr *));
+#endif
+extern int inet6_rthdr_segments __P((const struct cmsghdr *));
+extern struct in6_addr *inet6_rthdr_getaddr __P((struct cmsghdr *, int));
+extern int inet6_rthdr_getflags __P((const struct cmsghdr *, int));
+
+extern int inet6_opt_init __P((void *, socklen_t));
+extern int inet6_opt_append __P((void *, socklen_t, int, uint8_t, socklen_t,
+ uint8_t, void **));
+extern int inet6_opt_finish __P((void *, socklen_t, int));
+extern int inet6_opt_set_val __P((void *, int, void *, socklen_t));
+
+extern int inet6_opt_next __P((void *, socklen_t, int, uint8_t *, socklen_t *,
+ void **));
+extern int inet6_opt_find __P((void *, socklen_t, int, uint8_t, socklen_t *,
+ void **));
+extern int inet6_opt_get_val __P((void *, int, void *, socklen_t));
+extern socklen_t inet6_rth_space __P((int, int));
+extern void *inet6_rth_init __P((void *, socklen_t, int, int));
+extern int inet6_rth_add __P((void *, const struct in6_addr *));
+extern int inet6_rth_reverse __P((const void *, void *));
+extern int inet6_rth_segments __P((const void *));
+extern struct in6_addr *inet6_rth_getaddr __P((const void *, int));
+__END_DECLS
+
+#endif /* __BSD_VISIBLE */
+
+#endif /* !_NETINET6_IN6_HH_ */
diff --git a/rtems/freebsd/netinet6/in6_cksum.c b/rtems/freebsd/netinet6/in6_cksum.c
new file mode 100644
index 00000000..de60fdf4
--- /dev/null
+++ b/rtems/freebsd/netinet6/in6_cksum.c
@@ -0,0 +1,303 @@
+#include <rtems/freebsd/machine/rtems-bsd-config.h>
+
+/*-
+ * Copyright (C) 1995, 1996, 1997, and 1998 WIDE Project.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the project nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $KAME: in6_cksum.c,v 1.10 2000/12/03 00:53:59 itojun Exp $
+ */
+
+/*-
+ * Copyright (c) 1988, 1992, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * @(#)in_cksum.c 8.1 (Berkeley) 6/10/93
+ */
+
+#include <rtems/freebsd/sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <rtems/freebsd/sys/param.h>
+#include <rtems/freebsd/sys/mbuf.h>
+#include <rtems/freebsd/sys/systm.h>
+#include <rtems/freebsd/netinet/in.h>
+#include <rtems/freebsd/netinet/ip6.h>
+#include <rtems/freebsd/netinet6/scope6_var.h>
+
+/*
+ * Checksum routine for Internet Protocol family headers (Portable Version).
+ *
+ * This routine is very heavily used in the network
+ * code and should be modified for each CPU to be as fast as possible.
+ */
+
+#define ADDCARRY(x) (x > 65535 ? x -= 65535 : x)
+#define REDUCE {l_util.l = sum; sum = l_util.s[0] + l_util.s[1]; ADDCARRY(sum);}
+
+/*
+ * m MUST contain a continuous IP6 header.
+ * off is an offset where TCP/UDP/ICMP6 header starts.
+ * len is a total length of a transport segment.
+ * (e.g. TCP header + TCP payload)
+ */
+int
+in6_cksum(struct mbuf *m, u_int8_t nxt, u_int32_t off, u_int32_t len)
+{
+ u_int16_t *w;
+ int sum = 0;
+ int mlen = 0;
+ int byte_swapped = 0;
+ struct ip6_hdr *ip6;
+ struct in6_addr in6;
+ union {
+ u_int16_t phs[4];
+ struct {
+ u_int32_t ph_len;
+ u_int8_t ph_zero[3];
+ u_int8_t ph_nxt;
+ } __packed ph;
+ } uph;
+ union {
+ u_int8_t c[2];
+ u_int16_t s;
+ } s_util;
+ union {
+ u_int16_t s[2];
+ u_int32_t l;
+ } l_util;
+
+ /* sanity check */
+ if (m->m_pkthdr.len < off + len) {
+ panic("in6_cksum: mbuf len (%d) < off+len (%d+%d)",
+ m->m_pkthdr.len, off, len);
+ }
+
+ bzero(&uph, sizeof(uph));
+
+ /*
+ * First create IP6 pseudo header and calculate a summary.
+ */
+ ip6 = mtod(m, struct ip6_hdr *);
+ uph.ph.ph_len = htonl(len);
+ uph.ph.ph_nxt = nxt;
+
+ /*
+ * IPv6 source address.
+ * XXX: we'd like to avoid copying the address, but we can't due to
+ * the possibly embedded scope zone ID.
+ */
+ in6 = ip6->ip6_src;
+ in6_clearscope(&in6);
+ w = (u_int16_t *)&in6;
+ sum += w[0]; sum += w[1]; sum += w[2]; sum += w[3];
+ sum += w[4]; sum += w[5]; sum += w[6]; sum += w[7];
+
+ /* IPv6 destination address */
+ in6 = ip6->ip6_dst;
+ in6_clearscope(&in6);
+ w = (u_int16_t *)&in6;
+ sum += w[0]; sum += w[1]; sum += w[2]; sum += w[3];
+ sum += w[4]; sum += w[5]; sum += w[6]; sum += w[7];
+
+ /* Payload length and upper layer identifier */
+ sum += uph.phs[0]; sum += uph.phs[1];
+ sum += uph.phs[2]; sum += uph.phs[3];
+
+ /*
+ * Secondly calculate a summary of the first mbuf excluding offset.
+ */
+ while (off > 0) {
+ if (m->m_len <= off)
+ off -= m->m_len;
+ else
+ break;
+ m = m->m_next;
+ }
+ w = (u_int16_t *)(mtod(m, u_char *) + off);
+ mlen = m->m_len - off;
+ if (len < mlen)
+ mlen = len;
+ len -= mlen;
+ /*
+ * Force to even boundary.
+ */
+ if ((1 & (long) w) && (mlen > 0)) {
+ REDUCE;
+ sum <<= 8;
+ s_util.c[0] = *(u_char *)w;
+ w = (u_int16_t *)((char *)w + 1);
+ mlen--;
+ byte_swapped = 1;
+ }
+ /*
+ * Unroll the loop to make overhead from
+ * branches &c small.
+ */
+ while ((mlen -= 32) >= 0) {
+ sum += w[0]; sum += w[1]; sum += w[2]; sum += w[3];
+ sum += w[4]; sum += w[5]; sum += w[6]; sum += w[7];
+ sum += w[8]; sum += w[9]; sum += w[10]; sum += w[11];
+ sum += w[12]; sum += w[13]; sum += w[14]; sum += w[15];
+ w += 16;
+ }
+ mlen += 32;
+ while ((mlen -= 8) >= 0) {
+ sum += w[0]; sum += w[1]; sum += w[2]; sum += w[3];
+ w += 4;
+ }
+ mlen += 8;
+ if (mlen == 0 && byte_swapped == 0)
+ goto next;
+ REDUCE;
+ while ((mlen -= 2) >= 0) {
+ sum += *w++;
+ }
+ if (byte_swapped) {
+ REDUCE;
+ sum <<= 8;
+ byte_swapped = 0;
+ if (mlen == -1) {
+ s_util.c[1] = *(char *)w;
+ sum += s_util.s;
+ mlen = 0;
+ } else
+ mlen = -1;
+ } else if (mlen == -1)
+ s_util.c[0] = *(char *)w;
+ next:
+ m = m->m_next;
+
+ /*
+ * Lastly calculate a summary of the rest of mbufs.
+ */
+
+ for (;m && len; m = m->m_next) {
+ if (m->m_len == 0)
+ continue;
+ w = mtod(m, u_int16_t *);
+ if (mlen == -1) {
+ /*
+ * The first byte of this mbuf is the continuation
+ * of a word spanning between this mbuf and the
+ * last mbuf.
+ *
+ * s_util.c[0] is already saved when scanning previous
+ * mbuf.
+ */
+ s_util.c[1] = *(char *)w;
+ sum += s_util.s;
+ w = (u_int16_t *)((char *)w + 1);
+ mlen = m->m_len - 1;
+ len--;
+ } else
+ mlen = m->m_len;
+ if (len < mlen)
+ mlen = len;
+ len -= mlen;
+ /*
+ * Force to even boundary.
+ */
+ if ((1 & (long) w) && (mlen > 0)) {
+ REDUCE;
+ sum <<= 8;
+ s_util.c[0] = *(u_char *)w;
+ w = (u_int16_t *)((char *)w + 1);
+ mlen--;
+ byte_swapped = 1;
+ }
+ /*
+ * Unroll the loop to make overhead from
+ * branches &c small.
+ */
+ while ((mlen -= 32) >= 0) {
+ sum += w[0]; sum += w[1]; sum += w[2]; sum += w[3];
+ sum += w[4]; sum += w[5]; sum += w[6]; sum += w[7];
+ sum += w[8]; sum += w[9]; sum += w[10]; sum += w[11];
+ sum += w[12]; sum += w[13]; sum += w[14]; sum += w[15];
+ w += 16;
+ }
+ mlen += 32;
+ while ((mlen -= 8) >= 0) {
+ sum += w[0]; sum += w[1]; sum += w[2]; sum += w[3];
+ w += 4;
+ }
+ mlen += 8;
+ if (mlen == 0 && byte_swapped == 0)
+ continue;
+ REDUCE;
+ while ((mlen -= 2) >= 0) {
+ sum += *w++;
+ }
+ if (byte_swapped) {
+ REDUCE;
+ sum <<= 8;
+ byte_swapped = 0;
+ if (mlen == -1) {
+ s_util.c[1] = *(char *)w;
+ sum += s_util.s;
+ mlen = 0;
+ } else
+ mlen = -1;
+ } else if (mlen == -1)
+ s_util.c[0] = *(char *)w;
+ }
+ if (len)
+ panic("in6_cksum: out of data");
+ if (mlen == -1) {
+ /* The last mbuf has odd # of bytes. Follow the
+ standard (the odd byte may be shifted left by 8 bits
+ or not as determined by endian-ness of the machine) */
+ s_util.c[1] = 0;
+ sum += s_util.s;
+ }
+ REDUCE;
+ return (~sum & 0xffff);
+}
diff --git a/rtems/freebsd/netinet6/in6_gif.c b/rtems/freebsd/netinet6/in6_gif.c
new file mode 100644
index 00000000..36a3a8c2
--- /dev/null
+++ b/rtems/freebsd/netinet6/in6_gif.c
@@ -0,0 +1,466 @@
+#include <rtems/freebsd/machine/rtems-bsd-config.h>
+
+/*-
+ * Copyright (C) 1995, 1996, 1997, and 1998 WIDE Project.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the project nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $KAME: in6_gif.c,v 1.49 2001/05/14 14:02:17 itojun Exp $
+ */
+
+#include <rtems/freebsd/sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <rtems/freebsd/local/opt_inet.h>
+#include <rtems/freebsd/local/opt_inet6.h>
+
+#include <rtems/freebsd/sys/param.h>
+#include <rtems/freebsd/sys/systm.h>
+#include <rtems/freebsd/sys/socket.h>
+#include <rtems/freebsd/sys/sockio.h>
+#include <rtems/freebsd/sys/mbuf.h>
+#include <rtems/freebsd/sys/errno.h>
+#include <rtems/freebsd/sys/kernel.h>
+#include <rtems/freebsd/sys/queue.h>
+#include <rtems/freebsd/sys/syslog.h>
+#include <rtems/freebsd/sys/sysctl.h>
+#include <rtems/freebsd/sys/protosw.h>
+#include <rtems/freebsd/sys/malloc.h>
+
+#include <rtems/freebsd/net/if.h>
+#include <rtems/freebsd/net/route.h>
+
+#include <rtems/freebsd/netinet/in.h>
+#include <rtems/freebsd/netinet/in_systm.h>
+#ifdef INET
+#include <rtems/freebsd/netinet/ip.h>
+#endif
+#include <rtems/freebsd/netinet/ip_encap.h>
+#ifdef INET6
+#include <rtems/freebsd/netinet/ip6.h>
+#include <rtems/freebsd/netinet6/ip6_var.h>
+#include <rtems/freebsd/netinet6/in6_gif.h>
+#include <rtems/freebsd/netinet6/in6_var.h>
+#endif
+#include <rtems/freebsd/netinet6/ip6protosw.h>
+#include <rtems/freebsd/netinet/ip_ecn.h>
+#ifdef INET6
+#include <rtems/freebsd/netinet6/ip6_ecn.h>
+#endif
+
+#include <rtems/freebsd/net/if_gif.h>
+
+VNET_DEFINE(int, ip6_gif_hlim) = GIF_HLIM;
+#define V_ip6_gif_hlim VNET(ip6_gif_hlim)
+
+SYSCTL_DECL(_net_inet6_ip6);
+SYSCTL_VNET_INT(_net_inet6_ip6, IPV6CTL_GIF_HLIM, gifhlim, CTLFLAG_RW,
+ &VNET_NAME(ip6_gif_hlim), 0, "");
+
+static int gif_validate6(const struct ip6_hdr *, struct gif_softc *,
+ struct ifnet *);
+
+extern struct domain inet6domain;
+struct ip6protosw in6_gif_protosw = {
+ .pr_type = SOCK_RAW,
+ .pr_domain = &inet6domain,
+ .pr_protocol = 0, /* IPPROTO_IPV[46] */
+ .pr_flags = PR_ATOMIC|PR_ADDR,
+ .pr_input = in6_gif_input,
+ .pr_output = rip6_output,
+ .pr_ctloutput = rip6_ctloutput,
+ .pr_usrreqs = &rip6_usrreqs
+};
+
+int
+in6_gif_output(struct ifnet *ifp,
+ int family, /* family of the packet to be encapsulate */
+ struct mbuf *m)
+{
+ struct gif_softc *sc = ifp->if_softc;
+ struct sockaddr_in6 *dst = (struct sockaddr_in6 *)&sc->gif_ro6.ro_dst;
+ struct sockaddr_in6 *sin6_src = (struct sockaddr_in6 *)sc->gif_psrc;
+ struct sockaddr_in6 *sin6_dst = (struct sockaddr_in6 *)sc->gif_pdst;
+ struct ip6_hdr *ip6;
+ struct etherip_header eiphdr;
+ int error, len, proto;
+ u_int8_t itos, otos;
+
+ GIF_LOCK_ASSERT(sc);
+
+ if (sin6_src == NULL || sin6_dst == NULL ||
+ sin6_src->sin6_family != AF_INET6 ||
+ sin6_dst->sin6_family != AF_INET6) {
+ m_freem(m);
+ return EAFNOSUPPORT;
+ }
+
+ switch (family) {
+#ifdef INET
+ case AF_INET:
+ {
+ struct ip *ip;
+
+ proto = IPPROTO_IPV4;
+ if (m->m_len < sizeof(*ip)) {
+ m = m_pullup(m, sizeof(*ip));
+ if (!m)
+ return ENOBUFS;
+ }
+ ip = mtod(m, struct ip *);
+ itos = ip->ip_tos;
+ break;
+ }
+#endif
+#ifdef INET6
+ case AF_INET6:
+ {
+ struct ip6_hdr *ip6;
+ proto = IPPROTO_IPV6;
+ if (m->m_len < sizeof(*ip6)) {
+ m = m_pullup(m, sizeof(*ip6));
+ if (!m)
+ return ENOBUFS;
+ }
+ ip6 = mtod(m, struct ip6_hdr *);
+ itos = (ntohl(ip6->ip6_flow) >> 20) & 0xff;
+ break;
+ }
+#endif
+ case AF_LINK:
+ proto = IPPROTO_ETHERIP;
+
+ /*
+ * GIF_SEND_REVETHIP (disabled by default) intentionally
+ * sends an EtherIP packet with revered version field in
+ * the header. This is a knob for backward compatibility
+ * with FreeBSD 7.2R or prior.
+ */
+ if ((sc->gif_options & GIF_SEND_REVETHIP)) {
+ eiphdr.eip_ver = 0;
+ eiphdr.eip_resvl = ETHERIP_VERSION;
+ eiphdr.eip_resvh = 0;
+ } else {
+ eiphdr.eip_ver = ETHERIP_VERSION;
+ eiphdr.eip_resvl = 0;
+ eiphdr.eip_resvh = 0;
+ }
+ /* prepend Ethernet-in-IP header */
+ M_PREPEND(m, sizeof(struct etherip_header), M_DONTWAIT);
+ if (m && m->m_len < sizeof(struct etherip_header))
+ m = m_pullup(m, sizeof(struct etherip_header));
+ if (m == NULL)
+ return ENOBUFS;
+ bcopy(&eiphdr, mtod(m, struct etherip_header *),
+ sizeof(struct etherip_header));
+ break;
+
+ default:
+#ifdef DEBUG
+ printf("in6_gif_output: warning: unknown family %d passed\n",
+ family);
+#endif
+ m_freem(m);
+ return EAFNOSUPPORT;
+ }
+
+ /* prepend new IP header */
+ len = sizeof(struct ip6_hdr);
+#ifndef __NO_STRICT_ALIGNMENT
+ if (family == AF_LINK)
+ len += ETHERIP_ALIGN;
+#endif
+ M_PREPEND(m, len, M_DONTWAIT);
+ if (m != NULL && m->m_len < len)
+ m = m_pullup(m, len);
+ if (m == NULL) {
+ printf("ENOBUFS in in6_gif_output %d\n", __LINE__);
+ return ENOBUFS;
+ }
+#ifndef __NO_STRICT_ALIGNMENT
+ if (family == AF_LINK) {
+ len = mtod(m, vm_offset_t) & 3;
+ KASSERT(len == 0 || len == ETHERIP_ALIGN,
+ ("in6_gif_output: unexpected misalignment"));
+ m->m_data += len;
+ m->m_len -= ETHERIP_ALIGN;
+ }
+#endif
+
+ ip6 = mtod(m, struct ip6_hdr *);
+ ip6->ip6_flow = 0;
+ ip6->ip6_vfc &= ~IPV6_VERSION_MASK;
+ ip6->ip6_vfc |= IPV6_VERSION;
+ ip6->ip6_plen = htons((u_short)m->m_pkthdr.len);
+ ip6->ip6_nxt = proto;
+ ip6->ip6_hlim = V_ip6_gif_hlim;
+ ip6->ip6_src = sin6_src->sin6_addr;
+ /* bidirectional configured tunnel mode */
+ if (!IN6_IS_ADDR_UNSPECIFIED(&sin6_dst->sin6_addr))
+ ip6->ip6_dst = sin6_dst->sin6_addr;
+ else {
+ m_freem(m);
+ return ENETUNREACH;
+ }
+ ip_ecn_ingress((ifp->if_flags & IFF_LINK1) ? ECN_ALLOWED : ECN_NOCARE,
+ &otos, &itos);
+ ip6->ip6_flow &= ~htonl(0xff << 20);
+ ip6->ip6_flow |= htonl((u_int32_t)otos << 20);
+
+ if (dst->sin6_family != sin6_dst->sin6_family ||
+ !IN6_ARE_ADDR_EQUAL(&dst->sin6_addr, &sin6_dst->sin6_addr)) {
+ /* cache route doesn't match */
+ bzero(dst, sizeof(*dst));
+ dst->sin6_family = sin6_dst->sin6_family;
+ dst->sin6_len = sizeof(struct sockaddr_in6);
+ dst->sin6_addr = sin6_dst->sin6_addr;
+ if (sc->gif_ro6.ro_rt) {
+ RTFREE(sc->gif_ro6.ro_rt);
+ sc->gif_ro6.ro_rt = NULL;
+ }
+#if 0
+ GIF2IFP(sc)->if_mtu = GIF_MTU;
+#endif
+ }
+
+ if (sc->gif_ro6.ro_rt == NULL) {
+ rtalloc((struct route *)&sc->gif_ro6);
+ if (sc->gif_ro6.ro_rt == NULL) {
+ m_freem(m);
+ return ENETUNREACH;
+ }
+
+ /* if it constitutes infinite encapsulation, punt. */
+ if (sc->gif_ro.ro_rt->rt_ifp == ifp) {
+ m_freem(m);
+ return ENETUNREACH; /*XXX*/
+ }
+#if 0
+ ifp->if_mtu = sc->gif_ro6.ro_rt->rt_ifp->if_mtu
+ - sizeof(struct ip6_hdr);
+#endif
+ }
+
+#ifdef IPV6_MINMTU
+ /*
+ * force fragmentation to minimum MTU, to avoid path MTU discovery.
+ * it is too painful to ask for resend of inner packet, to achieve
+ * path MTU discovery for encapsulated packets.
+ */
+ error = ip6_output(m, 0, &sc->gif_ro6, IPV6_MINMTU, 0, NULL, NULL);
+#else
+ error = ip6_output(m, 0, &sc->gif_ro6, 0, 0, NULL, NULL);
+#endif
+
+ if (!(GIF2IFP(sc)->if_flags & IFF_LINK0) &&
+ sc->gif_ro6.ro_rt != NULL) {
+ RTFREE(sc->gif_ro6.ro_rt);
+ sc->gif_ro6.ro_rt = NULL;
+ }
+
+ return (error);
+}
+
+int
+in6_gif_input(struct mbuf **mp, int *offp, int proto)
+{
+ struct mbuf *m = *mp;
+ struct ifnet *gifp = NULL;
+ struct gif_softc *sc;
+ struct ip6_hdr *ip6;
+ int af = 0;
+ u_int32_t otos;
+
+ ip6 = mtod(m, struct ip6_hdr *);
+
+ sc = (struct gif_softc *)encap_getarg(m);
+ if (sc == NULL) {
+ m_freem(m);
+ V_ip6stat.ip6s_nogif++;
+ return IPPROTO_DONE;
+ }
+
+ gifp = GIF2IFP(sc);
+ if (gifp == NULL || (gifp->if_flags & IFF_UP) == 0) {
+ m_freem(m);
+ V_ip6stat.ip6s_nogif++;
+ return IPPROTO_DONE;
+ }
+
+ otos = ip6->ip6_flow;
+ m_adj(m, *offp);
+
+ switch (proto) {
+#ifdef INET
+ case IPPROTO_IPV4:
+ {
+ struct ip *ip;
+ u_int8_t otos8;
+ af = AF_INET;
+ otos8 = (ntohl(otos) >> 20) & 0xff;
+ if (m->m_len < sizeof(*ip)) {
+ m = m_pullup(m, sizeof(*ip));
+ if (!m)
+ return IPPROTO_DONE;
+ }
+ ip = mtod(m, struct ip *);
+ if (ip_ecn_egress((gifp->if_flags & IFF_LINK1) ?
+ ECN_ALLOWED : ECN_NOCARE,
+ &otos8, &ip->ip_tos) == 0) {
+ m_freem(m);
+ return IPPROTO_DONE;
+ }
+ break;
+ }
+#endif /* INET */
+#ifdef INET6
+ case IPPROTO_IPV6:
+ {
+ struct ip6_hdr *ip6;
+ af = AF_INET6;
+ if (m->m_len < sizeof(*ip6)) {
+ m = m_pullup(m, sizeof(*ip6));
+ if (!m)
+ return IPPROTO_DONE;
+ }
+ ip6 = mtod(m, struct ip6_hdr *);
+ if (ip6_ecn_egress((gifp->if_flags & IFF_LINK1) ?
+ ECN_ALLOWED : ECN_NOCARE,
+ &otos, &ip6->ip6_flow) == 0) {
+ m_freem(m);
+ return IPPROTO_DONE;
+ }
+ break;
+ }
+#endif
+ case IPPROTO_ETHERIP:
+ af = AF_LINK;
+ break;
+
+ default:
+ V_ip6stat.ip6s_nogif++;
+ m_freem(m);
+ return IPPROTO_DONE;
+ }
+
+ gif_input(m, af, gifp);
+ return IPPROTO_DONE;
+}
+
+/*
+ * validate outer address.
+ */
+static int
+gif_validate6(const struct ip6_hdr *ip6, struct gif_softc *sc,
+ struct ifnet *ifp)
+{
+ struct sockaddr_in6 *src, *dst;
+
+ src = (struct sockaddr_in6 *)sc->gif_psrc;
+ dst = (struct sockaddr_in6 *)sc->gif_pdst;
+
+ /*
+ * Check for address match. Note that the check is for an incoming
+ * packet. We should compare the *source* address in our configuration
+ * and the *destination* address of the packet, and vice versa.
+ */
+ if (!IN6_ARE_ADDR_EQUAL(&src->sin6_addr, &ip6->ip6_dst) ||
+ !IN6_ARE_ADDR_EQUAL(&dst->sin6_addr, &ip6->ip6_src))
+ return 0;
+
+ /* martian filters on outer source - done in ip6_input */
+
+ /* ingress filters on outer source */
+ if ((GIF2IFP(sc)->if_flags & IFF_LINK2) == 0 && ifp) {
+ struct sockaddr_in6 sin6;
+ struct rtentry *rt;
+
+ bzero(&sin6, sizeof(sin6));
+ sin6.sin6_family = AF_INET6;
+ sin6.sin6_len = sizeof(struct sockaddr_in6);
+ sin6.sin6_addr = ip6->ip6_src;
+ sin6.sin6_scope_id = 0; /* XXX */
+
+ rt = rtalloc1((struct sockaddr *)&sin6, 0, 0UL);
+ if (!rt || rt->rt_ifp != ifp) {
+#if 0
+ char ip6buf[INET6_ADDRSTRLEN];
+ log(LOG_WARNING, "%s: packet from %s dropped "
+ "due to ingress filter\n", if_name(GIF2IFP(sc)),
+ ip6_sprintf(ip6buf, &sin6.sin6_addr));
+#endif
+ if (rt)
+ RTFREE_LOCKED(rt);
+ return 0;
+ }
+ RTFREE_LOCKED(rt);
+ }
+
+ return 128 * 2;
+}
+
+/*
+ * we know that we are in IFF_UP, outer address available, and outer family
+ * matched the physical addr family. see gif_encapcheck().
+ * sanity check for arg should have been done in the caller.
+ */
+int
+gif_encapcheck6(const struct mbuf *m, int off, int proto, void *arg)
+{
+ struct ip6_hdr ip6;
+ struct gif_softc *sc;
+ struct ifnet *ifp;
+
+ /* sanity check done in caller */
+ sc = (struct gif_softc *)arg;
+
+ /* LINTED const cast */
+ m_copydata(m, 0, sizeof(ip6), (caddr_t)&ip6);
+ ifp = ((m->m_flags & M_PKTHDR) != 0) ? m->m_pkthdr.rcvif : NULL;
+
+ return gif_validate6(&ip6, sc, ifp);
+}
+
+int
+in6_gif_attach(struct gif_softc *sc)
+{
+ sc->encap_cookie6 = encap_attach_func(AF_INET6, -1, gif_encapcheck,
+ (void *)&in6_gif_protosw, sc);
+ if (sc->encap_cookie6 == NULL)
+ return EEXIST;
+ return 0;
+}
+
+int
+in6_gif_detach(struct gif_softc *sc)
+{
+ int error;
+
+ error = encap_detach(sc->encap_cookie6);
+ if (error == 0)
+ sc->encap_cookie6 = NULL;
+ return error;
+}
diff --git a/rtems/freebsd/netinet6/in6_gif.h b/rtems/freebsd/netinet6/in6_gif.h
new file mode 100644
index 00000000..ed566112
--- /dev/null
+++ b/rtems/freebsd/netinet6/in6_gif.h
@@ -0,0 +1,45 @@
+/*-
+ * Copyright (C) 1995, 1996, 1997, and 1998 WIDE Project.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the project nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $KAME: in6_gif.h,v 1.5 2000/04/14 08:36:03 itojun Exp $
+ * $FreeBSD$
+ */
+
+#ifndef _NETINET6_IN6_GIF_HH_
+#define _NETINET6_IN6_GIF_HH_
+
+#define GIF_HLIM 30
+
+struct gif_softc;
+int in6_gif_input __P((struct mbuf **, int *, int));
+int in6_gif_output __P((struct ifnet *, int, struct mbuf *));
+int gif_encapcheck6 __P((const struct mbuf *, int, int, void *));
+int in6_gif_attach __P((struct gif_softc *));
+int in6_gif_detach __P((struct gif_softc *));
+
+#endif /* _NETINET6_IN6_GIF_HH_ */
diff --git a/rtems/freebsd/netinet6/in6_ifattach.c b/rtems/freebsd/netinet6/in6_ifattach.c
new file mode 100644
index 00000000..64c4a58d
--- /dev/null
+++ b/rtems/freebsd/netinet6/in6_ifattach.c
@@ -0,0 +1,971 @@
+#include <rtems/freebsd/machine/rtems-bsd-config.h>
+
+/*-
+ * Copyright (C) 1995, 1996, 1997, and 1998 WIDE Project.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the project nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $KAME: in6_ifattach.c,v 1.118 2001/05/24 07:44:00 itojun Exp $
+ */
+
+#include <rtems/freebsd/sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <rtems/freebsd/sys/param.h>
+#include <rtems/freebsd/sys/systm.h>
+#include <rtems/freebsd/sys/malloc.h>
+#include <rtems/freebsd/sys/socket.h>
+#include <rtems/freebsd/sys/sockio.h>
+#include <rtems/freebsd/sys/jail.h>
+#include <rtems/freebsd/sys/kernel.h>
+#include <rtems/freebsd/sys/proc.h>
+#include <rtems/freebsd/sys/syslog.h>
+#include <rtems/freebsd/sys/md5.h>
+
+#include <rtems/freebsd/net/if.h>
+#include <rtems/freebsd/net/if_dl.h>
+#include <rtems/freebsd/net/if_types.h>
+#include <rtems/freebsd/net/route.h>
+#include <rtems/freebsd/net/vnet.h>
+
+#include <rtems/freebsd/netinet/in.h>
+#include <rtems/freebsd/netinet/in_var.h>
+#include <rtems/freebsd/netinet/if_ether.h>
+#include <rtems/freebsd/netinet/in_pcb.h>
+#include <rtems/freebsd/netinet/ip_var.h>
+#include <rtems/freebsd/netinet/udp.h>
+#include <rtems/freebsd/netinet/udp_var.h>
+
+#include <rtems/freebsd/netinet/ip6.h>
+#include <rtems/freebsd/netinet6/ip6_var.h>
+#include <rtems/freebsd/netinet6/in6_var.h>
+#include <rtems/freebsd/netinet6/in6_pcb.h>
+#include <rtems/freebsd/netinet6/in6_ifattach.h>
+#include <rtems/freebsd/netinet6/ip6_var.h>
+#include <rtems/freebsd/netinet6/nd6.h>
+#include <rtems/freebsd/netinet6/mld6_var.h>
+#include <rtems/freebsd/netinet6/scope6_var.h>
+
+VNET_DEFINE(unsigned long, in6_maxmtu) = 0;
+
+#ifdef IP6_AUTO_LINKLOCAL
+VNET_DEFINE(int, ip6_auto_linklocal) = IP6_AUTO_LINKLOCAL;
+#else
+VNET_DEFINE(int, ip6_auto_linklocal) = 1; /* enabled by default */
+#endif
+
+VNET_DEFINE(struct callout, in6_tmpaddrtimer_ch);
+#define V_in6_tmpaddrtimer_ch VNET(in6_tmpaddrtimer_ch)
+
+VNET_DECLARE(struct inpcbinfo, ripcbinfo);
+#define V_ripcbinfo VNET(ripcbinfo)
+
+static int get_rand_ifid(struct ifnet *, struct in6_addr *);
+static int generate_tmp_ifid(u_int8_t *, const u_int8_t *, u_int8_t *);
+static int get_ifid(struct ifnet *, struct ifnet *, struct in6_addr *);
+static int in6_ifattach_linklocal(struct ifnet *, struct ifnet *);
+static int in6_ifattach_loopback(struct ifnet *);
+static void in6_purgemaddrs(struct ifnet *);
+
+#define EUI64_GBIT 0x01
+#define EUI64_UBIT 0x02
+#define EUI64_TO_IFID(in6) do {(in6)->s6_addr[8] ^= EUI64_UBIT; } while (0)
+#define EUI64_GROUP(in6) ((in6)->s6_addr[8] & EUI64_GBIT)
+#define EUI64_INDIVIDUAL(in6) (!EUI64_GROUP(in6))
+#define EUI64_LOCAL(in6) ((in6)->s6_addr[8] & EUI64_UBIT)
+#define EUI64_UNIVERSAL(in6) (!EUI64_LOCAL(in6))
+
+#define IFID_LOCAL(in6) (!EUI64_LOCAL(in6))
+#define IFID_UNIVERSAL(in6) (!EUI64_UNIVERSAL(in6))
+
+/*
+ * Generate a last-resort interface identifier, when the machine has no
+ * IEEE802/EUI64 address sources.
+ * The goal here is to get an interface identifier that is
+ * (1) random enough and (2) does not change across reboot.
+ * We currently use MD5(hostname) for it.
+ *
+ * in6 - upper 64bits are preserved
+ */
+static int
+get_rand_ifid(struct ifnet *ifp, struct in6_addr *in6)
+{
+ MD5_CTX ctxt;
+ struct prison *pr;
+ u_int8_t digest[16];
+ int hostnamelen;
+
+ pr = curthread->td_ucred->cr_prison;
+ mtx_lock(&pr->pr_mtx);
+ hostnamelen = strlen(pr->pr_hostname);
+#if 0
+ /* we need at least several letters as seed for ifid */
+ if (hostnamelen < 3) {
+ mtx_unlock(&pr->pr_mtx);
+ return -1;
+ }
+#endif
+
+ /* generate 8 bytes of pseudo-random value. */
+ bzero(&ctxt, sizeof(ctxt));
+ MD5Init(&ctxt);
+ MD5Update(&ctxt, pr->pr_hostname, hostnamelen);
+ mtx_unlock(&pr->pr_mtx);
+ MD5Final(digest, &ctxt);
+
+ /* assumes sizeof(digest) > sizeof(ifid) */
+ bcopy(digest, &in6->s6_addr[8], 8);
+
+ /* make sure to set "u" bit to local, and "g" bit to individual. */
+ in6->s6_addr[8] &= ~EUI64_GBIT; /* g bit to "individual" */
+ in6->s6_addr[8] |= EUI64_UBIT; /* u bit to "local" */
+
+ /* convert EUI64 into IPv6 interface identifier */
+ EUI64_TO_IFID(in6);
+
+ return 0;
+}
+
+static int
+generate_tmp_ifid(u_int8_t *seed0, const u_int8_t *seed1, u_int8_t *ret)
+{
+ MD5_CTX ctxt;
+ u_int8_t seed[16], digest[16], nullbuf[8];
+ u_int32_t val32;
+
+ /* If there's no history, start with a random seed. */
+ bzero(nullbuf, sizeof(nullbuf));
+ if (bcmp(nullbuf, seed0, sizeof(nullbuf)) == 0) {
+ int i;
+
+ for (i = 0; i < 2; i++) {
+ val32 = arc4random();
+ bcopy(&val32, seed + sizeof(val32) * i, sizeof(val32));
+ }
+ } else
+ bcopy(seed0, seed, 8);
+
+ /* copy the right-most 64-bits of the given address */
+ /* XXX assumption on the size of IFID */
+ bcopy(seed1, &seed[8], 8);
+
+ if (0) { /* for debugging purposes only */
+ int i;
+
+ printf("generate_tmp_ifid: new randomized ID from: ");
+ for (i = 0; i < 16; i++)
+ printf("%02x", seed[i]);
+ printf(" ");
+ }
+
+ /* generate 16 bytes of pseudo-random value. */
+ bzero(&ctxt, sizeof(ctxt));
+ MD5Init(&ctxt);
+ MD5Update(&ctxt, seed, sizeof(seed));
+ MD5Final(digest, &ctxt);
+
+ /*
+ * RFC 3041 3.2.1. (3)
+ * Take the left-most 64-bits of the MD5 digest and set bit 6 (the
+ * left-most bit is numbered 0) to zero.
+ */
+ bcopy(digest, ret, 8);
+ ret[0] &= ~EUI64_UBIT;
+
+ /*
+ * XXX: we'd like to ensure that the generated value is not zero
+ * for simplicity. If the caclculated digest happens to be zero,
+ * use a random non-zero value as the last resort.
+ */
+ if (bcmp(nullbuf, ret, sizeof(nullbuf)) == 0) {
+ nd6log((LOG_INFO,
+ "generate_tmp_ifid: computed MD5 value is zero.\n"));
+
+ val32 = arc4random();
+ val32 = 1 + (val32 % (0xffffffff - 1));
+ }
+
+ /*
+ * RFC 3041 3.2.1. (4)
+ * Take the rightmost 64-bits of the MD5 digest and save them in
+ * stable storage as the history value to be used in the next
+ * iteration of the algorithm.
+ */
+ bcopy(&digest[8], seed0, 8);
+
+ if (0) { /* for debugging purposes only */
+ int i;
+
+ printf("to: ");
+ for (i = 0; i < 16; i++)
+ printf("%02x", digest[i]);
+ printf("\n");
+ }
+
+ return 0;
+}
+
+/*
+ * Get interface identifier for the specified interface.
+ * XXX assumes single sockaddr_dl (AF_LINK address) per an interface
+ *
+ * in6 - upper 64bits are preserved
+ */
+int
+in6_get_hw_ifid(struct ifnet *ifp, struct in6_addr *in6)
+{
+ struct ifaddr *ifa;
+ struct sockaddr_dl *sdl;
+ u_int8_t *addr;
+ size_t addrlen;
+ static u_int8_t allzero[8] = { 0, 0, 0, 0, 0, 0, 0, 0 };
+ static u_int8_t allone[8] =
+ { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
+
+ IF_ADDR_LOCK(ifp);
+ TAILQ_FOREACH(ifa, &ifp->if_addrhead, ifa_link) {
+ if (ifa->ifa_addr->sa_family != AF_LINK)
+ continue;
+ sdl = (struct sockaddr_dl *)ifa->ifa_addr;
+ if (sdl == NULL)
+ continue;
+ if (sdl->sdl_alen == 0)
+ continue;
+
+ goto found;
+ }
+ IF_ADDR_UNLOCK(ifp);
+
+ return -1;
+
+found:
+ IF_ADDR_LOCK_ASSERT(ifp);
+ addr = LLADDR(sdl);
+ addrlen = sdl->sdl_alen;
+
+ /* get EUI64 */
+ switch (ifp->if_type) {
+ case IFT_ETHER:
+ case IFT_FDDI:
+ case IFT_ISO88025:
+ case IFT_ATM:
+ case IFT_IEEE1394:
+#ifdef IFT_IEEE80211
+ case IFT_IEEE80211:
+#endif
+ /* IEEE802/EUI64 cases - what others? */
+ /* IEEE1394 uses 16byte length address starting with EUI64 */
+ if (addrlen > 8)
+ addrlen = 8;
+
+ /* look at IEEE802/EUI64 only */
+ if (addrlen != 8 && addrlen != 6) {
+ IF_ADDR_UNLOCK(ifp);
+ return -1;
+ }
+
+ /*
+ * check for invalid MAC address - on bsdi, we see it a lot
+ * since wildboar configures all-zero MAC on pccard before
+ * card insertion.
+ */
+ if (bcmp(addr, allzero, addrlen) == 0) {
+ IF_ADDR_UNLOCK(ifp);
+ return -1;
+ }
+ if (bcmp(addr, allone, addrlen) == 0) {
+ IF_ADDR_UNLOCK(ifp);
+ return -1;
+ }
+
+ /* make EUI64 address */
+ if (addrlen == 8)
+ bcopy(addr, &in6->s6_addr[8], 8);
+ else if (addrlen == 6) {
+ in6->s6_addr[8] = addr[0];
+ in6->s6_addr[9] = addr[1];
+ in6->s6_addr[10] = addr[2];
+ in6->s6_addr[11] = 0xff;
+ in6->s6_addr[12] = 0xfe;
+ in6->s6_addr[13] = addr[3];
+ in6->s6_addr[14] = addr[4];
+ in6->s6_addr[15] = addr[5];
+ }
+ break;
+
+ case IFT_ARCNET:
+ if (addrlen != 1) {
+ IF_ADDR_UNLOCK(ifp);
+ return -1;
+ }
+ if (!addr[0]) {
+ IF_ADDR_UNLOCK(ifp);
+ return -1;
+ }
+
+ bzero(&in6->s6_addr[8], 8);
+ in6->s6_addr[15] = addr[0];
+
+ /*
+ * due to insufficient bitwidth, we mark it local.
+ */
+ in6->s6_addr[8] &= ~EUI64_GBIT; /* g bit to "individual" */
+ in6->s6_addr[8] |= EUI64_UBIT; /* u bit to "local" */
+ break;
+
+ case IFT_GIF:
+#ifdef IFT_STF
+ case IFT_STF:
+#endif
+ /*
+ * RFC2893 says: "SHOULD use IPv4 address as ifid source".
+ * however, IPv4 address is not very suitable as unique
+ * identifier source (can be renumbered).
+ * we don't do this.
+ */
+ IF_ADDR_UNLOCK(ifp);
+ return -1;
+
+ default:
+ IF_ADDR_UNLOCK(ifp);
+ return -1;
+ }
+
+ /* sanity check: g bit must not indicate "group" */
+ if (EUI64_GROUP(in6)) {
+ IF_ADDR_UNLOCK(ifp);
+ return -1;
+ }
+
+ /* convert EUI64 into IPv6 interface identifier */
+ EUI64_TO_IFID(in6);
+
+ /*
+ * sanity check: ifid must not be all zero, avoid conflict with
+ * subnet router anycast
+ */
+ if ((in6->s6_addr[8] & ~(EUI64_GBIT | EUI64_UBIT)) == 0x00 &&
+ bcmp(&in6->s6_addr[9], allzero, 7) == 0) {
+ IF_ADDR_UNLOCK(ifp);
+ return -1;
+ }
+
+ IF_ADDR_UNLOCK(ifp);
+ return 0;
+}
+
+/*
+ * Get interface identifier for the specified interface. If it is not
+ * available on ifp0, borrow interface identifier from other information
+ * sources.
+ *
+ * altifp - secondary EUI64 source
+ */
+static int
+get_ifid(struct ifnet *ifp0, struct ifnet *altifp,
+ struct in6_addr *in6)
+{
+ struct ifnet *ifp;
+
+ /* first, try to get it from the interface itself */
+ if (in6_get_hw_ifid(ifp0, in6) == 0) {
+ nd6log((LOG_DEBUG, "%s: got interface identifier from itself\n",
+ if_name(ifp0)));
+ goto success;
+ }
+
+ /* try secondary EUI64 source. this basically is for ATM PVC */
+ if (altifp && in6_get_hw_ifid(altifp, in6) == 0) {
+ nd6log((LOG_DEBUG, "%s: got interface identifier from %s\n",
+ if_name(ifp0), if_name(altifp)));
+ goto success;
+ }
+
+ /* next, try to get it from some other hardware interface */
+ IFNET_RLOCK_NOSLEEP();
+ for (ifp = V_ifnet.tqh_first; ifp; ifp = ifp->if_list.tqe_next) {
+ if (ifp == ifp0)
+ continue;
+ if (in6_get_hw_ifid(ifp, in6) != 0)
+ continue;
+
+ /*
+ * to borrow ifid from other interface, ifid needs to be
+ * globally unique
+ */
+ if (IFID_UNIVERSAL(in6)) {
+ nd6log((LOG_DEBUG,
+ "%s: borrow interface identifier from %s\n",
+ if_name(ifp0), if_name(ifp)));
+ IFNET_RUNLOCK_NOSLEEP();
+ goto success;
+ }
+ }
+ IFNET_RUNLOCK_NOSLEEP();
+
+ /* last resort: get from random number source */
+ if (get_rand_ifid(ifp, in6) == 0) {
+ nd6log((LOG_DEBUG,
+ "%s: interface identifier generated by random number\n",
+ if_name(ifp0)));
+ goto success;
+ }
+
+ printf("%s: failed to get interface identifier\n", if_name(ifp0));
+ return -1;
+
+success:
+ nd6log((LOG_INFO, "%s: ifid: %02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x\n",
+ if_name(ifp0), in6->s6_addr[8], in6->s6_addr[9], in6->s6_addr[10],
+ in6->s6_addr[11], in6->s6_addr[12], in6->s6_addr[13],
+ in6->s6_addr[14], in6->s6_addr[15]));
+ return 0;
+}
+
+/*
+ * altifp - secondary EUI64 source
+ */
+static int
+in6_ifattach_linklocal(struct ifnet *ifp, struct ifnet *altifp)
+{
+ struct in6_ifaddr *ia;
+ struct in6_aliasreq ifra;
+ struct nd_prefixctl pr0;
+ int i, error;
+
+ /*
+ * configure link-local address.
+ */
+ bzero(&ifra, sizeof(ifra));
+
+ /*
+ * in6_update_ifa() does not use ifra_name, but we accurately set it
+ * for safety.
+ */
+ strncpy(ifra.ifra_name, if_name(ifp), sizeof(ifra.ifra_name));
+
+ ifra.ifra_addr.sin6_family = AF_INET6;
+ ifra.ifra_addr.sin6_len = sizeof(struct sockaddr_in6);
+ ifra.ifra_addr.sin6_addr.s6_addr32[0] = htonl(0xfe800000);
+ ifra.ifra_addr.sin6_addr.s6_addr32[1] = 0;
+ if ((ifp->if_flags & IFF_LOOPBACK) != 0) {
+ ifra.ifra_addr.sin6_addr.s6_addr32[2] = 0;
+ ifra.ifra_addr.sin6_addr.s6_addr32[3] = htonl(1);
+ } else {
+ if (get_ifid(ifp, altifp, &ifra.ifra_addr.sin6_addr) != 0) {
+ nd6log((LOG_ERR,
+ "%s: no ifid available\n", if_name(ifp)));
+ return (-1);
+ }
+ }
+ if (in6_setscope(&ifra.ifra_addr.sin6_addr, ifp, NULL))
+ return (-1);
+
+ ifra.ifra_prefixmask.sin6_len = sizeof(struct sockaddr_in6);
+ ifra.ifra_prefixmask.sin6_family = AF_INET6;
+ ifra.ifra_prefixmask.sin6_addr = in6mask64;
+ /* link-local addresses should NEVER expire. */
+ ifra.ifra_lifetime.ia6t_vltime = ND6_INFINITE_LIFETIME;
+ ifra.ifra_lifetime.ia6t_pltime = ND6_INFINITE_LIFETIME;
+
+ /*
+ * Now call in6_update_ifa() to do a bunch of procedures to configure
+ * a link-local address. We can set the 3rd argument to NULL, because
+ * we know there's no other link-local address on the interface
+ * and therefore we are adding one (instead of updating one).
+ */
+ if ((error = in6_update_ifa(ifp, &ifra, NULL,
+ IN6_IFAUPDATE_DADDELAY)) != 0) {
+ /*
+ * XXX: When the interface does not support IPv6, this call
+ * would fail in the SIOCSIFADDR ioctl. I believe the
+ * notification is rather confusing in this case, so just
+ * suppress it. (jinmei@kame.net 20010130)
+ */
+ if (error != EAFNOSUPPORT)
+ nd6log((LOG_NOTICE, "in6_ifattach_linklocal: failed to "
+ "configure a link-local address on %s "
+ "(errno=%d)\n",
+ if_name(ifp), error));
+ return (-1);
+ }
+
+ ia = in6ifa_ifpforlinklocal(ifp, 0); /* ia must not be NULL */
+#ifdef DIAGNOSTIC
+ if (!ia) {
+ panic("ia == NULL in in6_ifattach_linklocal");
+ /* NOTREACHED */
+ }
+#endif
+ ifa_free(&ia->ia_ifa);
+
+ /*
+ * Make the link-local prefix (fe80::%link/64) as on-link.
+ * Since we'd like to manage prefixes separately from addresses,
+ * we make an ND6 prefix structure for the link-local prefix,
+ * and add it to the prefix list as a never-expire prefix.
+ * XXX: this change might affect some existing code base...
+ */
+ bzero(&pr0, sizeof(pr0));
+ pr0.ndpr_ifp = ifp;
+ /* this should be 64 at this moment. */
+ pr0.ndpr_plen = in6_mask2len(&ifra.ifra_prefixmask.sin6_addr, NULL);
+ pr0.ndpr_prefix = ifra.ifra_addr;
+ /* apply the mask for safety. (nd6_prelist_add will apply it again) */
+ for (i = 0; i < 4; i++) {
+ pr0.ndpr_prefix.sin6_addr.s6_addr32[i] &=
+ in6mask64.s6_addr32[i];
+ }
+ /*
+ * Initialize parameters. The link-local prefix must always be
+ * on-link, and its lifetimes never expire.
+ */
+ pr0.ndpr_raf_onlink = 1;
+ pr0.ndpr_raf_auto = 1; /* probably meaningless */
+ pr0.ndpr_vltime = ND6_INFINITE_LIFETIME;
+ pr0.ndpr_pltime = ND6_INFINITE_LIFETIME;
+ /*
+ * Since there is no other link-local addresses, nd6_prefix_lookup()
+ * probably returns NULL. However, we cannot always expect the result.
+ * For example, if we first remove the (only) existing link-local
+ * address, and then reconfigure another one, the prefix is still
+ * valid with referring to the old link-local address.
+ */
+ if (nd6_prefix_lookup(&pr0) == NULL) {
+ if ((error = nd6_prelist_add(&pr0, NULL, NULL)) != 0)
+ return (error);
+ }
+
+ return 0;
+}
+
+/*
+ * ifp - must be IFT_LOOP
+ */
+static int
+in6_ifattach_loopback(struct ifnet *ifp)
+{
+ struct in6_aliasreq ifra;
+ int error;
+
+ bzero(&ifra, sizeof(ifra));
+
+ /*
+ * in6_update_ifa() does not use ifra_name, but we accurately set it
+ * for safety.
+ */
+ strncpy(ifra.ifra_name, if_name(ifp), sizeof(ifra.ifra_name));
+
+ ifra.ifra_prefixmask.sin6_len = sizeof(struct sockaddr_in6);
+ ifra.ifra_prefixmask.sin6_family = AF_INET6;
+ ifra.ifra_prefixmask.sin6_addr = in6mask128;
+
+ /*
+ * Always initialize ia_dstaddr (= broadcast address) to loopback
+ * address. Follows IPv4 practice - see in_ifinit().
+ */
+ ifra.ifra_dstaddr.sin6_len = sizeof(struct sockaddr_in6);
+ ifra.ifra_dstaddr.sin6_family = AF_INET6;
+ ifra.ifra_dstaddr.sin6_addr = in6addr_loopback;
+
+ ifra.ifra_addr.sin6_len = sizeof(struct sockaddr_in6);
+ ifra.ifra_addr.sin6_family = AF_INET6;
+ ifra.ifra_addr.sin6_addr = in6addr_loopback;
+
+ /* the loopback address should NEVER expire. */
+ ifra.ifra_lifetime.ia6t_vltime = ND6_INFINITE_LIFETIME;
+ ifra.ifra_lifetime.ia6t_pltime = ND6_INFINITE_LIFETIME;
+
+ /* we don't need to perform DAD on loopback interfaces. */
+ ifra.ifra_flags |= IN6_IFF_NODAD;
+
+ /* skip registration to the prefix list. XXX should be temporary. */
+ ifra.ifra_flags |= IN6_IFF_NOPFX;
+
+ /*
+ * We are sure that this is a newly assigned address, so we can set
+ * NULL to the 3rd arg.
+ */
+ if ((error = in6_update_ifa(ifp, &ifra, NULL, 0)) != 0) {
+ nd6log((LOG_ERR, "in6_ifattach_loopback: failed to configure "
+ "the loopback address on %s (errno=%d)\n",
+ if_name(ifp), error));
+ return (-1);
+ }
+
+ return 0;
+}
+
+/*
+ * compute NI group address, based on the current hostname setting.
+ * see draft-ietf-ipngwg-icmp-name-lookup-* (04 and later).
+ *
+ * when ifp == NULL, the caller is responsible for filling scopeid.
+ */
+int
+in6_nigroup(struct ifnet *ifp, const char *name, int namelen,
+ struct in6_addr *in6)
+{
+ struct prison *pr;
+ const char *p;
+ u_char *q;
+ MD5_CTX ctxt;
+ u_int8_t digest[16];
+ char l;
+ char n[64]; /* a single label must not exceed 63 chars */
+
+ /*
+ * If no name is given and namelen is -1,
+ * we try to do the hostname lookup ourselves.
+ */
+ if (!name && namelen == -1) {
+ pr = curthread->td_ucred->cr_prison;
+ mtx_lock(&pr->pr_mtx);
+ name = pr->pr_hostname;
+ namelen = strlen(name);
+ } else
+ pr = NULL;
+ if (!name || !namelen) {
+ if (pr != NULL)
+ mtx_unlock(&pr->pr_mtx);
+ return -1;
+ }
+
+ p = name;
+ while (p && *p && *p != '.' && p - name < namelen)
+ p++;
+ if (p == name || p - name > sizeof(n) - 1) {
+ if (pr != NULL)
+ mtx_unlock(&pr->pr_mtx);
+ return -1; /* label too long */
+ }
+ l = p - name;
+ strncpy(n, name, l);
+ if (pr != NULL)
+ mtx_unlock(&pr->pr_mtx);
+ n[(int)l] = '\0';
+ for (q = n; *q; q++) {
+ if ('A' <= *q && *q <= 'Z')
+ *q = *q - 'A' + 'a';
+ }
+
+ /* generate 8 bytes of pseudo-random value. */
+ bzero(&ctxt, sizeof(ctxt));
+ MD5Init(&ctxt);
+ MD5Update(&ctxt, &l, sizeof(l));
+ MD5Update(&ctxt, n, l);
+ MD5Final(digest, &ctxt);
+
+ bzero(in6, sizeof(*in6));
+ in6->s6_addr16[0] = IPV6_ADDR_INT16_MLL;
+ in6->s6_addr8[11] = 2;
+ bcopy(digest, &in6->s6_addr32[3], sizeof(in6->s6_addr32[3]));
+ if (in6_setscope(in6, ifp, NULL))
+ return (-1); /* XXX: should not fail */
+
+ return 0;
+}
+
+/*
+ * XXX multiple loopback interface needs more care. for instance,
+ * nodelocal address needs to be configured onto only one of them.
+ * XXX multiple link-local address case
+ *
+ * altifp - secondary EUI64 source
+ */
+void
+in6_ifattach(struct ifnet *ifp, struct ifnet *altifp)
+{
+ struct in6_ifaddr *ia;
+ struct in6_addr in6;
+
+ /* some of the interfaces are inherently not IPv6 capable */
+ switch (ifp->if_type) {
+ case IFT_PFLOG:
+ case IFT_PFSYNC:
+ case IFT_CARP:
+ return;
+ }
+
+ /*
+ * quirks based on interface type
+ */
+ switch (ifp->if_type) {
+#ifdef IFT_STF
+ case IFT_STF:
+ /*
+ * 6to4 interface is a very special kind of beast.
+ * no multicast, no linklocal. RFC2529 specifies how to make
+ * linklocals for 6to4 interface, but there's no use and
+ * it is rather harmful to have one.
+ */
+ goto statinit;
+#endif
+ default:
+ break;
+ }
+
+ /*
+ * usually, we require multicast capability to the interface
+ */
+ if ((ifp->if_flags & IFF_MULTICAST) == 0) {
+ nd6log((LOG_INFO, "in6_ifattach: "
+ "%s is not multicast capable, IPv6 not enabled\n",
+ if_name(ifp)));
+ return;
+ }
+
+ /*
+ * assign loopback address for loopback interface.
+ * XXX multiple loopback interface case.
+ */
+ if ((ifp->if_flags & IFF_LOOPBACK) != 0) {
+ struct ifaddr *ifa;
+
+ in6 = in6addr_loopback;
+ ifa = (struct ifaddr *)in6ifa_ifpwithaddr(ifp, &in6);
+ if (ifa == NULL) {
+ if (in6_ifattach_loopback(ifp) != 0)
+ return;
+ } else
+ ifa_free(ifa);
+ }
+
+ /*
+ * assign a link-local address, if there's none.
+ */
+ if (V_ip6_auto_linklocal && ifp->if_type != IFT_BRIDGE) {
+ ia = in6ifa_ifpforlinklocal(ifp, 0);
+ if (ia == NULL) {
+ if (in6_ifattach_linklocal(ifp, altifp) == 0) {
+ /* linklocal address assigned */
+ } else {
+ /* failed to assign linklocal address. bark? */
+ }
+ } else
+ ifa_free(&ia->ia_ifa);
+ }
+
+#ifdef IFT_STF /* XXX */
+statinit:
+#endif
+
+ /* update dynamically. */
+ if (V_in6_maxmtu < ifp->if_mtu)
+ V_in6_maxmtu = ifp->if_mtu;
+}
+
+/*
+ * NOTE: in6_ifdetach() does not support loopback if at this moment.
+ * We don't need this function in bsdi, because interfaces are never removed
+ * from the ifnet list in bsdi.
+ */
+void
+in6_ifdetach(struct ifnet *ifp)
+{
+ struct in6_ifaddr *ia;
+ struct ifaddr *ifa, *next;
+ struct radix_node_head *rnh;
+ struct rtentry *rt;
+ short rtflags;
+ struct sockaddr_in6 sin6;
+ struct in6_multi_mship *imm;
+
+ /* remove neighbor management table */
+ nd6_purge(ifp);
+
+ /* nuke any of IPv6 addresses we have */
+ TAILQ_FOREACH_SAFE(ifa, &ifp->if_addrhead, ifa_link, next) {
+ if (ifa->ifa_addr->sa_family != AF_INET6)
+ continue;
+ in6_purgeaddr(ifa);
+ }
+
+ /* undo everything done by in6_ifattach(), just in case */
+ TAILQ_FOREACH_SAFE(ifa, &ifp->if_addrhead, ifa_link, next) {
+ if (ifa->ifa_addr->sa_family != AF_INET6
+ || !IN6_IS_ADDR_LINKLOCAL(&satosin6(&ifa->ifa_addr)->sin6_addr)) {
+ continue;
+ }
+
+ ia = (struct in6_ifaddr *)ifa;
+
+ /*
+ * leave from multicast groups we have joined for the interface
+ */
+ while ((imm = ia->ia6_memberships.lh_first) != NULL) {
+ LIST_REMOVE(imm, i6mm_chain);
+ in6_leavegroup(imm);
+ }
+
+ /* remove from the routing table */
+ if ((ia->ia_flags & IFA_ROUTE) &&
+ (rt = rtalloc1((struct sockaddr *)&ia->ia_addr, 0, 0UL))) {
+ rtflags = rt->rt_flags;
+ RTFREE_LOCKED(rt);
+ rtrequest(RTM_DELETE, (struct sockaddr *)&ia->ia_addr,
+ (struct sockaddr *)&ia->ia_addr,
+ (struct sockaddr *)&ia->ia_prefixmask,
+ rtflags, (struct rtentry **)0);
+ }
+
+ /* remove from the linked list */
+ IF_ADDR_LOCK(ifp);
+ TAILQ_REMOVE(&ifp->if_addrhead, ifa, ifa_link);
+ IF_ADDR_UNLOCK(ifp);
+ ifa_free(ifa); /* if_addrhead */
+
+ IN6_IFADDR_WLOCK();
+ TAILQ_REMOVE(&V_in6_ifaddrhead, ia, ia_link);
+ IN6_IFADDR_WUNLOCK();
+ ifa_free(ifa);
+ }
+
+ in6_pcbpurgeif0(&V_udbinfo, ifp);
+ in6_pcbpurgeif0(&V_ripcbinfo, ifp);
+ /* leave from all multicast groups joined */
+ in6_purgemaddrs(ifp);
+
+ /*
+ * remove neighbor management table. we call it twice just to make
+ * sure we nuke everything. maybe we need just one call.
+ * XXX: since the first call did not release addresses, some prefixes
+ * might remain. We should call nd6_purge() again to release the
+ * prefixes after removing all addresses above.
+ * (Or can we just delay calling nd6_purge until at this point?)
+ */
+ nd6_purge(ifp);
+
+ /* remove route to link-local allnodes multicast (ff02::1) */
+ bzero(&sin6, sizeof(sin6));
+ sin6.sin6_len = sizeof(struct sockaddr_in6);
+ sin6.sin6_family = AF_INET6;
+ sin6.sin6_addr = in6addr_linklocal_allnodes;
+ if (in6_setscope(&sin6.sin6_addr, ifp, NULL))
+ /* XXX: should not fail */
+ return;
+ /* XXX grab lock first to avoid LOR */
+ rnh = rt_tables_get_rnh(0, AF_INET6);
+ if (rnh != NULL) {
+ RADIX_NODE_HEAD_LOCK(rnh);
+ rt = rtalloc1((struct sockaddr *)&sin6, 0, RTF_RNH_LOCKED);
+ if (rt) {
+ if (rt->rt_ifp == ifp)
+ rtexpunge(rt);
+ RTFREE_LOCKED(rt);
+ }
+ RADIX_NODE_HEAD_UNLOCK(rnh);
+ }
+}
+
+int
+in6_get_tmpifid(struct ifnet *ifp, u_int8_t *retbuf,
+ const u_int8_t *baseid, int generate)
+{
+ u_int8_t nullbuf[8];
+ struct nd_ifinfo *ndi = ND_IFINFO(ifp);
+
+ bzero(nullbuf, sizeof(nullbuf));
+ if (bcmp(ndi->randomid, nullbuf, sizeof(nullbuf)) == 0) {
+ /* we've never created a random ID. Create a new one. */
+ generate = 1;
+ }
+
+ if (generate) {
+ bcopy(baseid, ndi->randomseed1, sizeof(ndi->randomseed1));
+
+ /* generate_tmp_ifid will update seedn and buf */
+ (void)generate_tmp_ifid(ndi->randomseed0, ndi->randomseed1,
+ ndi->randomid);
+ }
+ bcopy(ndi->randomid, retbuf, 8);
+
+ return (0);
+}
+
+void
+in6_tmpaddrtimer(void *arg)
+{
+ CURVNET_SET((struct vnet *) arg);
+ struct nd_ifinfo *ndi;
+ u_int8_t nullbuf[8];
+ struct ifnet *ifp;
+
+ callout_reset(&V_in6_tmpaddrtimer_ch,
+ (V_ip6_temp_preferred_lifetime - V_ip6_desync_factor -
+ V_ip6_temp_regen_advance) * hz, in6_tmpaddrtimer, curvnet);
+
+ bzero(nullbuf, sizeof(nullbuf));
+ for (ifp = TAILQ_FIRST(&V_ifnet); ifp;
+ ifp = TAILQ_NEXT(ifp, if_list)) {
+ ndi = ND_IFINFO(ifp);
+ if (bcmp(ndi->randomid, nullbuf, sizeof(nullbuf)) != 0) {
+ /*
+ * We've been generating a random ID on this interface.
+ * Create a new one.
+ */
+ (void)generate_tmp_ifid(ndi->randomseed0,
+ ndi->randomseed1, ndi->randomid);
+ }
+ }
+
+ CURVNET_RESTORE();
+}
+
+static void
+in6_purgemaddrs(struct ifnet *ifp)
+{
+ LIST_HEAD(,in6_multi) purgeinms;
+ struct in6_multi *inm, *tinm;
+ struct ifmultiaddr *ifma;
+
+ LIST_INIT(&purgeinms);
+ IN6_MULTI_LOCK();
+
+ /*
+ * Extract list of in6_multi associated with the detaching ifp
+ * which the PF_INET6 layer is about to release.
+ * We need to do this as IF_ADDR_LOCK() may be re-acquired
+ * by code further down.
+ */
+ IF_ADDR_LOCK(ifp);
+ TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
+ if (ifma->ifma_addr->sa_family != AF_INET6 ||
+ ifma->ifma_protospec == NULL)
+ continue;
+ inm = (struct in6_multi *)ifma->ifma_protospec;
+ LIST_INSERT_HEAD(&purgeinms, inm, in6m_entry);
+ }
+ IF_ADDR_UNLOCK(ifp);
+
+ LIST_FOREACH_SAFE(inm, &purgeinms, in6m_entry, tinm) {
+ LIST_REMOVE(inm, in6m_entry);
+ in6m_release_locked(inm);
+ }
+ mld_ifdetach(ifp);
+
+ IN6_MULTI_UNLOCK();
+}
diff --git a/rtems/freebsd/netinet6/in6_ifattach.h b/rtems/freebsd/netinet6/in6_ifattach.h
new file mode 100644
index 00000000..d7db4e47
--- /dev/null
+++ b/rtems/freebsd/netinet6/in6_ifattach.h
@@ -0,0 +1,45 @@
+/*-
+ * Copyright (C) 1995, 1996, 1997, and 1998 WIDE Project.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the project nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $KAME: in6_ifattach.h,v 1.14 2001/02/08 12:48:39 jinmei Exp $
+ * $FreeBSD$
+ */
+
+#ifndef _NETINET6_IN6_IFATTACH_HH_
+#define _NETINET6_IN6_IFATTACH_HH_
+
+#ifdef _KERNEL
+void in6_ifattach __P((struct ifnet *, struct ifnet *));
+void in6_ifdetach __P((struct ifnet *));
+int in6_get_tmpifid __P((struct ifnet *, u_int8_t *, const u_int8_t *, int));
+void in6_tmpaddrtimer __P((void *));
+int in6_get_hw_ifid __P((struct ifnet *, struct in6_addr *));
+int in6_nigroup __P((struct ifnet *, const char *, int, struct in6_addr *));
+#endif /* _KERNEL */
+
+#endif /* _NETINET6_IN6_IFATTACH_HH_ */
diff --git a/rtems/freebsd/netinet6/in6_mcast.c b/rtems/freebsd/netinet6/in6_mcast.c
new file mode 100644
index 00000000..86c51f45
--- /dev/null
+++ b/rtems/freebsd/netinet6/in6_mcast.c
@@ -0,0 +1,2840 @@
+#include <rtems/freebsd/machine/rtems-bsd-config.h>
+
+/*
+ * Copyright (c) 2009 Bruce Simpson.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote
+ * products derived from this software without specific prior written
+ * permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+/*
+ * IPv6 multicast socket, group, and socket option processing module.
+ * Normative references: RFC 2292, RFC 3492, RFC 3542, RFC 3678, RFC 3810.
+ */
+
+#include <rtems/freebsd/sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <rtems/freebsd/local/opt_inet6.h>
+
+#include <rtems/freebsd/sys/param.h>
+#include <rtems/freebsd/sys/systm.h>
+#include <rtems/freebsd/sys/kernel.h>
+#include <rtems/freebsd/sys/malloc.h>
+#include <rtems/freebsd/sys/mbuf.h>
+#include <rtems/freebsd/sys/protosw.h>
+#include <rtems/freebsd/sys/socket.h>
+#include <rtems/freebsd/sys/socketvar.h>
+#include <rtems/freebsd/sys/protosw.h>
+#include <rtems/freebsd/sys/sysctl.h>
+#include <rtems/freebsd/sys/priv.h>
+#include <rtems/freebsd/sys/ktr.h>
+#include <rtems/freebsd/sys/tree.h>
+
+#include <rtems/freebsd/net/if.h>
+#include <rtems/freebsd/net/if_dl.h>
+#include <rtems/freebsd/net/route.h>
+#include <rtems/freebsd/net/vnet.h>
+
+#include <rtems/freebsd/netinet/in.h>
+#include <rtems/freebsd/netinet/in_var.h>
+#include <rtems/freebsd/netinet6/in6_var.h>
+#include <rtems/freebsd/netinet/ip6.h>
+#include <rtems/freebsd/netinet/icmp6.h>
+#include <rtems/freebsd/netinet6/ip6_var.h>
+#include <rtems/freebsd/netinet/in_pcb.h>
+#include <rtems/freebsd/netinet/tcp_var.h>
+#include <rtems/freebsd/netinet6/nd6.h>
+#include <rtems/freebsd/netinet6/mld6_var.h>
+#include <rtems/freebsd/netinet6/scope6_var.h>
+
+#ifndef KTR_MLD
+#define KTR_MLD KTR_INET6
+#endif
+
+#ifndef __SOCKUNION_DECLARED
+union sockunion {
+ struct sockaddr_storage ss;
+ struct sockaddr sa;
+ struct sockaddr_dl sdl;
+ struct sockaddr_in6 sin6;
+};
+typedef union sockunion sockunion_t;
+#define __SOCKUNION_DECLARED
+#endif /* __SOCKUNION_DECLARED */
+
+static MALLOC_DEFINE(M_IN6MFILTER, "in6_mfilter",
+ "IPv6 multicast PCB-layer source filter");
+static MALLOC_DEFINE(M_IP6MADDR, "in6_multi", "IPv6 multicast group");
+static MALLOC_DEFINE(M_IP6MOPTS, "ip6_moptions", "IPv6 multicast options");
+static MALLOC_DEFINE(M_IP6MSOURCE, "ip6_msource",
+ "IPv6 multicast MLD-layer source filter");
+
+RB_GENERATE(ip6_msource_tree, ip6_msource, im6s_link, ip6_msource_cmp);
+
+/*
+ * Locking:
+ * - Lock order is: Giant, INP_WLOCK, IN6_MULTI_LOCK, MLD_LOCK, IF_ADDR_LOCK.
+ * - The IF_ADDR_LOCK is implicitly taken by in6m_lookup() earlier, however
+ * it can be taken by code in net/if.c also.
+ * - ip6_moptions and in6_mfilter are covered by the INP_WLOCK.
+ *
+ * struct in6_multi is covered by IN6_MULTI_LOCK. There isn't strictly
+ * any need for in6_multi itself to be virtualized -- it is bound to an ifp
+ * anyway no matter what happens.
+ */
+struct mtx in6_multi_mtx;
+MTX_SYSINIT(in6_multi_mtx, &in6_multi_mtx, "in6_multi_mtx", MTX_DEF);
+
+static void im6f_commit(struct in6_mfilter *);
+static int im6f_get_source(struct in6_mfilter *imf,
+ const struct sockaddr_in6 *psin,
+ struct in6_msource **);
+static struct in6_msource *
+ im6f_graft(struct in6_mfilter *, const uint8_t,
+ const struct sockaddr_in6 *);
+static void im6f_leave(struct in6_mfilter *);
+static int im6f_prune(struct in6_mfilter *, const struct sockaddr_in6 *);
+static void im6f_purge(struct in6_mfilter *);
+static void im6f_rollback(struct in6_mfilter *);
+static void im6f_reap(struct in6_mfilter *);
+static int im6o_grow(struct ip6_moptions *);
+static size_t im6o_match_group(const struct ip6_moptions *,
+ const struct ifnet *, const struct sockaddr *);
+static struct in6_msource *
+ im6o_match_source(const struct ip6_moptions *, const size_t,
+ const struct sockaddr *);
+static void im6s_merge(struct ip6_msource *ims,
+ const struct in6_msource *lims, const int rollback);
+static int in6_mc_get(struct ifnet *, const struct in6_addr *,
+ struct in6_multi **);
+static int in6m_get_source(struct in6_multi *inm,
+ const struct in6_addr *addr, const int noalloc,
+ struct ip6_msource **pims);
+static int in6m_is_ifp_detached(const struct in6_multi *);
+static int in6m_merge(struct in6_multi *, /*const*/ struct in6_mfilter *);
+static void in6m_purge(struct in6_multi *);
+static void in6m_reap(struct in6_multi *);
+static struct ip6_moptions *
+ in6p_findmoptions(struct inpcb *);
+static int in6p_get_source_filters(struct inpcb *, struct sockopt *);
+static int in6p_join_group(struct inpcb *, struct sockopt *);
+static int in6p_leave_group(struct inpcb *, struct sockopt *);
+static struct ifnet *
+ in6p_lookup_mcast_ifp(const struct inpcb *,
+ const struct sockaddr_in6 *);
+static int in6p_block_unblock_source(struct inpcb *, struct sockopt *);
+static int in6p_set_multicast_if(struct inpcb *, struct sockopt *);
+static int in6p_set_source_filters(struct inpcb *, struct sockopt *);
+static int sysctl_ip6_mcast_filters(SYSCTL_HANDLER_ARGS);
+
+SYSCTL_DECL(_net_inet6_ip6); /* XXX Not in any common header. */
+
+SYSCTL_NODE(_net_inet6_ip6, OID_AUTO, mcast, CTLFLAG_RW, 0, "IPv6 multicast");
+
+static u_long in6_mcast_maxgrpsrc = IPV6_MAX_GROUP_SRC_FILTER;
+SYSCTL_ULONG(_net_inet6_ip6_mcast, OID_AUTO, maxgrpsrc,
+ CTLFLAG_RW | CTLFLAG_TUN, &in6_mcast_maxgrpsrc, 0,
+ "Max source filters per group");
+TUNABLE_ULONG("net.inet6.ip6.mcast.maxgrpsrc", &in6_mcast_maxgrpsrc);
+
+static u_long in6_mcast_maxsocksrc = IPV6_MAX_SOCK_SRC_FILTER;
+SYSCTL_ULONG(_net_inet6_ip6_mcast, OID_AUTO, maxsocksrc,
+ CTLFLAG_RW | CTLFLAG_TUN, &in6_mcast_maxsocksrc, 0,
+ "Max source filters per socket");
+TUNABLE_ULONG("net.inet6.ip6.mcast.maxsocksrc", &in6_mcast_maxsocksrc);
+
+/* TODO Virtualize this switch. */
+int in6_mcast_loop = IPV6_DEFAULT_MULTICAST_LOOP;
+SYSCTL_INT(_net_inet6_ip6_mcast, OID_AUTO, loop, CTLFLAG_RW | CTLFLAG_TUN,
+ &in6_mcast_loop, 0, "Loopback multicast datagrams by default");
+TUNABLE_INT("net.inet6.ip6.mcast.loop", &in6_mcast_loop);
+
+SYSCTL_NODE(_net_inet6_ip6_mcast, OID_AUTO, filters,
+ CTLFLAG_RD | CTLFLAG_MPSAFE, sysctl_ip6_mcast_filters,
+ "Per-interface stack-wide source filters");
+
+/*
+ * Inline function which wraps assertions for a valid ifp.
+ * The ifnet layer will set the ifma's ifp pointer to NULL if the ifp
+ * is detached.
+ */
+static int __inline
+in6m_is_ifp_detached(const struct in6_multi *inm)
+{
+ struct ifnet *ifp;
+
+ KASSERT(inm->in6m_ifma != NULL, ("%s: no ifma", __func__));
+ ifp = inm->in6m_ifma->ifma_ifp;
+ if (ifp != NULL) {
+ /*
+ * Sanity check that network-layer notion of ifp is the
+ * same as that of link-layer.
+ */
+ KASSERT(inm->in6m_ifp == ifp, ("%s: bad ifp", __func__));
+ }
+
+ return (ifp == NULL);
+}
+
+/*
+ * Initialize an in6_mfilter structure to a known state at t0, t1
+ * with an empty source filter list.
+ */
+static __inline void
+im6f_init(struct in6_mfilter *imf, const int st0, const int st1)
+{
+ memset(imf, 0, sizeof(struct in6_mfilter));
+ RB_INIT(&imf->im6f_sources);
+ imf->im6f_st[0] = st0;
+ imf->im6f_st[1] = st1;
+}
+
+/*
+ * Resize the ip6_moptions vector to the next power-of-two minus 1.
+ * May be called with locks held; do not sleep.
+ */
+static int
+im6o_grow(struct ip6_moptions *imo)
+{
+ struct in6_multi **nmships;
+ struct in6_multi **omships;
+ struct in6_mfilter *nmfilters;
+ struct in6_mfilter *omfilters;
+ size_t idx;
+ size_t newmax;
+ size_t oldmax;
+
+ nmships = NULL;
+ nmfilters = NULL;
+ omships = imo->im6o_membership;
+ omfilters = imo->im6o_mfilters;
+ oldmax = imo->im6o_max_memberships;
+ newmax = ((oldmax + 1) * 2) - 1;
+
+ if (newmax <= IPV6_MAX_MEMBERSHIPS) {
+ nmships = (struct in6_multi **)realloc(omships,
+ sizeof(struct in6_multi *) * newmax, M_IP6MOPTS, M_NOWAIT);
+ nmfilters = (struct in6_mfilter *)realloc(omfilters,
+ sizeof(struct in6_mfilter) * newmax, M_IN6MFILTER,
+ M_NOWAIT);
+ if (nmships != NULL && nmfilters != NULL) {
+ /* Initialize newly allocated source filter heads. */
+ for (idx = oldmax; idx < newmax; idx++) {
+ im6f_init(&nmfilters[idx], MCAST_UNDEFINED,
+ MCAST_EXCLUDE);
+ }
+ imo->im6o_max_memberships = newmax;
+ imo->im6o_membership = nmships;
+ imo->im6o_mfilters = nmfilters;
+ }
+ }
+
+ if (nmships == NULL || nmfilters == NULL) {
+ if (nmships != NULL)
+ free(nmships, M_IP6MOPTS);
+ if (nmfilters != NULL)
+ free(nmfilters, M_IN6MFILTER);
+ return (ETOOMANYREFS);
+ }
+
+ return (0);
+}
+
+/*
+ * Find an IPv6 multicast group entry for this ip6_moptions instance
+ * which matches the specified group, and optionally an interface.
+ * Return its index into the array, or -1 if not found.
+ */
+static size_t
+im6o_match_group(const struct ip6_moptions *imo, const struct ifnet *ifp,
+ const struct sockaddr *group)
+{
+ const struct sockaddr_in6 *gsin6;
+ struct in6_multi **pinm;
+ int idx;
+ int nmships;
+
+ gsin6 = (const struct sockaddr_in6 *)group;
+
+ /* The im6o_membership array may be lazy allocated. */
+ if (imo->im6o_membership == NULL || imo->im6o_num_memberships == 0)
+ return (-1);
+
+ nmships = imo->im6o_num_memberships;
+ pinm = &imo->im6o_membership[0];
+ for (idx = 0; idx < nmships; idx++, pinm++) {
+ if (*pinm == NULL)
+ continue;
+ if ((ifp == NULL || ((*pinm)->in6m_ifp == ifp)) &&
+ IN6_ARE_ADDR_EQUAL(&(*pinm)->in6m_addr,
+ &gsin6->sin6_addr)) {
+ break;
+ }
+ }
+ if (idx >= nmships)
+ idx = -1;
+
+ return (idx);
+}
+
+/*
+ * Find an IPv6 multicast source entry for this imo which matches
+ * the given group index for this socket, and source address.
+ *
+ * XXX TODO: The scope ID, if present in src, is stripped before
+ * any comparison. We SHOULD enforce scope/zone checks where the source
+ * filter entry has a link scope.
+ *
+ * NOTE: This does not check if the entry is in-mode, merely if
+ * it exists, which may not be the desired behaviour.
+ */
+static struct in6_msource *
+im6o_match_source(const struct ip6_moptions *imo, const size_t gidx,
+ const struct sockaddr *src)
+{
+ struct ip6_msource find;
+ struct in6_mfilter *imf;
+ struct ip6_msource *ims;
+ const sockunion_t *psa;
+
+ KASSERT(src->sa_family == AF_INET6, ("%s: !AF_INET6", __func__));
+ KASSERT(gidx != -1 && gidx < imo->im6o_num_memberships,
+ ("%s: invalid index %d\n", __func__, (int)gidx));
+
+ /* The im6o_mfilters array may be lazy allocated. */
+ if (imo->im6o_mfilters == NULL)
+ return (NULL);
+ imf = &imo->im6o_mfilters[gidx];
+
+ psa = (const sockunion_t *)src;
+ find.im6s_addr = psa->sin6.sin6_addr;
+ in6_clearscope(&find.im6s_addr); /* XXX */
+ ims = RB_FIND(ip6_msource_tree, &imf->im6f_sources, &find);
+
+ return ((struct in6_msource *)ims);
+}
+
+/*
+ * Perform filtering for multicast datagrams on a socket by group and source.
+ *
+ * Returns 0 if a datagram should be allowed through, or various error codes
+ * if the socket was not a member of the group, or the source was muted, etc.
+ */
+int
+im6o_mc_filter(const struct ip6_moptions *imo, const struct ifnet *ifp,
+ const struct sockaddr *group, const struct sockaddr *src)
+{
+ size_t gidx;
+ struct in6_msource *ims;
+ int mode;
+
+ KASSERT(ifp != NULL, ("%s: null ifp", __func__));
+
+ gidx = im6o_match_group(imo, ifp, group);
+ if (gidx == -1)
+ return (MCAST_NOTGMEMBER);
+
+ /*
+ * Check if the source was included in an (S,G) join.
+ * Allow reception on exclusive memberships by default,
+ * reject reception on inclusive memberships by default.
+ * Exclude source only if an in-mode exclude filter exists.
+ * Include source only if an in-mode include filter exists.
+ * NOTE: We are comparing group state here at MLD t1 (now)
+ * with socket-layer t0 (since last downcall).
+ */
+ mode = imo->im6o_mfilters[gidx].im6f_st[1];
+ ims = im6o_match_source(imo, gidx, src);
+
+ if ((ims == NULL && mode == MCAST_INCLUDE) ||
+ (ims != NULL && ims->im6sl_st[0] != mode))
+ return (MCAST_NOTSMEMBER);
+
+ return (MCAST_PASS);
+}
+
+/*
+ * Find and return a reference to an in6_multi record for (ifp, group),
+ * and bump its reference count.
+ * If one does not exist, try to allocate it, and update link-layer multicast
+ * filters on ifp to listen for group.
+ * Assumes the IN6_MULTI lock is held across the call.
+ * Return 0 if successful, otherwise return an appropriate error code.
+ */
+static int
+in6_mc_get(struct ifnet *ifp, const struct in6_addr *group,
+ struct in6_multi **pinm)
+{
+ struct sockaddr_in6 gsin6;
+ struct ifmultiaddr *ifma;
+ struct in6_multi *inm;
+ int error;
+
+ error = 0;
+
+ /*
+ * XXX: Accesses to ifma_protospec must be covered by IF_ADDR_LOCK;
+ * if_addmulti() takes this mutex itself, so we must drop and
+ * re-acquire around the call.
+ */
+ IN6_MULTI_LOCK_ASSERT();
+ IF_ADDR_LOCK(ifp);
+
+ inm = in6m_lookup_locked(ifp, group);
+ if (inm != NULL) {
+ /*
+ * If we already joined this group, just bump the
+ * refcount and return it.
+ */
+ KASSERT(inm->in6m_refcount >= 1,
+ ("%s: bad refcount %d", __func__, inm->in6m_refcount));
+ ++inm->in6m_refcount;
+ *pinm = inm;
+ goto out_locked;
+ }
+
+ memset(&gsin6, 0, sizeof(gsin6));
+ gsin6.sin6_family = AF_INET6;
+ gsin6.sin6_len = sizeof(struct sockaddr_in6);
+ gsin6.sin6_addr = *group;
+
+ /*
+ * Check if a link-layer group is already associated
+ * with this network-layer group on the given ifnet.
+ */
+ IF_ADDR_UNLOCK(ifp);
+ error = if_addmulti(ifp, (struct sockaddr *)&gsin6, &ifma);
+ if (error != 0)
+ return (error);
+ IF_ADDR_LOCK(ifp);
+
+ /*
+ * If something other than netinet6 is occupying the link-layer
+ * group, print a meaningful error message and back out of
+ * the allocation.
+ * Otherwise, bump the refcount on the existing network-layer
+ * group association and return it.
+ */
+ if (ifma->ifma_protospec != NULL) {
+ inm = (struct in6_multi *)ifma->ifma_protospec;
+#ifdef INVARIANTS
+ KASSERT(ifma->ifma_addr != NULL, ("%s: no ifma_addr",
+ __func__));
+ KASSERT(ifma->ifma_addr->sa_family == AF_INET6,
+ ("%s: ifma not AF_INET6", __func__));
+ KASSERT(inm != NULL, ("%s: no ifma_protospec", __func__));
+ if (inm->in6m_ifma != ifma || inm->in6m_ifp != ifp ||
+ !IN6_ARE_ADDR_EQUAL(&inm->in6m_addr, group))
+ panic("%s: ifma %p is inconsistent with %p (%p)",
+ __func__, ifma, inm, group);
+#endif
+ ++inm->in6m_refcount;
+ *pinm = inm;
+ goto out_locked;
+ }
+
+ IF_ADDR_LOCK_ASSERT(ifp);
+
+ /*
+ * A new in6_multi record is needed; allocate and initialize it.
+ * We DO NOT perform an MLD join as the in6_ layer may need to
+ * push an initial source list down to MLD to support SSM.
+ *
+ * The initial source filter state is INCLUDE, {} as per the RFC.
+ * Pending state-changes per group are subject to a bounds check.
+ */
+ inm = malloc(sizeof(*inm), M_IP6MADDR, M_NOWAIT | M_ZERO);
+ if (inm == NULL) {
+ if_delmulti_ifma(ifma);
+ error = ENOMEM;
+ goto out_locked;
+ }
+ inm->in6m_addr = *group;
+ inm->in6m_ifp = ifp;
+ inm->in6m_mli = MLD_IFINFO(ifp);
+ inm->in6m_ifma = ifma;
+ inm->in6m_refcount = 1;
+ inm->in6m_state = MLD_NOT_MEMBER;
+ IFQ_SET_MAXLEN(&inm->in6m_scq, MLD_MAX_STATE_CHANGES);
+
+ inm->in6m_st[0].iss_fmode = MCAST_UNDEFINED;
+ inm->in6m_st[1].iss_fmode = MCAST_UNDEFINED;
+ RB_INIT(&inm->in6m_srcs);
+
+ ifma->ifma_protospec = inm;
+ *pinm = inm;
+
+out_locked:
+ IF_ADDR_UNLOCK(ifp);
+ return (error);
+}
+
+/*
+ * Drop a reference to an in6_multi record.
+ *
+ * If the refcount drops to 0, free the in6_multi record and
+ * delete the underlying link-layer membership.
+ */
+void
+in6m_release_locked(struct in6_multi *inm)
+{
+ struct ifmultiaddr *ifma;
+
+ IN6_MULTI_LOCK_ASSERT();
+
+ CTR2(KTR_MLD, "%s: refcount is %d", __func__, inm->in6m_refcount);
+
+ if (--inm->in6m_refcount > 0) {
+ CTR2(KTR_MLD, "%s: refcount is now %d", __func__,
+ inm->in6m_refcount);
+ return;
+ }
+
+ CTR2(KTR_MLD, "%s: freeing inm %p", __func__, inm);
+
+ ifma = inm->in6m_ifma;
+
+ /* XXX this access is not covered by IF_ADDR_LOCK */
+ CTR2(KTR_MLD, "%s: purging ifma %p", __func__, ifma);
+ KASSERT(ifma->ifma_protospec == inm,
+ ("%s: ifma_protospec != inm", __func__));
+ ifma->ifma_protospec = NULL;
+
+ in6m_purge(inm);
+
+ free(inm, M_IP6MADDR);
+
+ if_delmulti_ifma(ifma);
+}
+
+/*
+ * Clear recorded source entries for a group.
+ * Used by the MLD code. Caller must hold the IN6_MULTI lock.
+ * FIXME: Should reap.
+ */
+void
+in6m_clear_recorded(struct in6_multi *inm)
+{
+ struct ip6_msource *ims;
+
+ IN6_MULTI_LOCK_ASSERT();
+
+ RB_FOREACH(ims, ip6_msource_tree, &inm->in6m_srcs) {
+ if (ims->im6s_stp) {
+ ims->im6s_stp = 0;
+ --inm->in6m_st[1].iss_rec;
+ }
+ }
+ KASSERT(inm->in6m_st[1].iss_rec == 0,
+ ("%s: iss_rec %d not 0", __func__, inm->in6m_st[1].iss_rec));
+}
+
+/*
+ * Record a source as pending for a Source-Group MLDv2 query.
+ * This lives here as it modifies the shared tree.
+ *
+ * inm is the group descriptor.
+ * naddr is the address of the source to record in network-byte order.
+ *
+ * If the net.inet6.mld.sgalloc sysctl is non-zero, we will
+ * lazy-allocate a source node in response to an SG query.
+ * Otherwise, no allocation is performed. This saves some memory
+ * with the trade-off that the source will not be reported to the
+ * router if joined in the window between the query response and
+ * the group actually being joined on the local host.
+ *
+ * VIMAGE: XXX: Currently the mld_sgalloc feature has been removed.
+ * This turns off the allocation of a recorded source entry if
+ * the group has not been joined.
+ *
+ * Return 0 if the source didn't exist or was already marked as recorded.
+ * Return 1 if the source was marked as recorded by this function.
+ * Return <0 if any error occured (negated errno code).
+ */
+int
+in6m_record_source(struct in6_multi *inm, const struct in6_addr *addr)
+{
+ struct ip6_msource find;
+ struct ip6_msource *ims, *nims;
+
+ IN6_MULTI_LOCK_ASSERT();
+
+ find.im6s_addr = *addr;
+ ims = RB_FIND(ip6_msource_tree, &inm->in6m_srcs, &find);
+ if (ims && ims->im6s_stp)
+ return (0);
+ if (ims == NULL) {
+ if (inm->in6m_nsrc == in6_mcast_maxgrpsrc)
+ return (-ENOSPC);
+ nims = malloc(sizeof(struct ip6_msource), M_IP6MSOURCE,
+ M_NOWAIT | M_ZERO);
+ if (nims == NULL)
+ return (-ENOMEM);
+ nims->im6s_addr = find.im6s_addr;
+ RB_INSERT(ip6_msource_tree, &inm->in6m_srcs, nims);
+ ++inm->in6m_nsrc;
+ ims = nims;
+ }
+
+ /*
+ * Mark the source as recorded and update the recorded
+ * source count.
+ */
+ ++ims->im6s_stp;
+ ++inm->in6m_st[1].iss_rec;
+
+ return (1);
+}
+
+/*
+ * Return a pointer to an in6_msource owned by an in6_mfilter,
+ * given its source address.
+ * Lazy-allocate if needed. If this is a new entry its filter state is
+ * undefined at t0.
+ *
+ * imf is the filter set being modified.
+ * addr is the source address.
+ *
+ * SMPng: May be called with locks held; malloc must not block.
+ */
+static int
+im6f_get_source(struct in6_mfilter *imf, const struct sockaddr_in6 *psin,
+ struct in6_msource **plims)
+{
+ struct ip6_msource find;
+ struct ip6_msource *ims, *nims;
+ struct in6_msource *lims;
+ int error;
+
+ error = 0;
+ ims = NULL;
+ lims = NULL;
+
+ find.im6s_addr = psin->sin6_addr;
+ ims = RB_FIND(ip6_msource_tree, &imf->im6f_sources, &find);
+ lims = (struct in6_msource *)ims;
+ if (lims == NULL) {
+ if (imf->im6f_nsrc == in6_mcast_maxsocksrc)
+ return (ENOSPC);
+ nims = malloc(sizeof(struct in6_msource), M_IN6MFILTER,
+ M_NOWAIT | M_ZERO);
+ if (nims == NULL)
+ return (ENOMEM);
+ lims = (struct in6_msource *)nims;
+ lims->im6s_addr = find.im6s_addr;
+ lims->im6sl_st[0] = MCAST_UNDEFINED;
+ RB_INSERT(ip6_msource_tree, &imf->im6f_sources, nims);
+ ++imf->im6f_nsrc;
+ }
+
+ *plims = lims;
+
+ return (error);
+}
+
+/*
+ * Graft a source entry into an existing socket-layer filter set,
+ * maintaining any required invariants and checking allocations.
+ *
+ * The source is marked as being in the new filter mode at t1.
+ *
+ * Return the pointer to the new node, otherwise return NULL.
+ */
+static struct in6_msource *
+im6f_graft(struct in6_mfilter *imf, const uint8_t st1,
+ const struct sockaddr_in6 *psin)
+{
+ struct ip6_msource *nims;
+ struct in6_msource *lims;
+
+ nims = malloc(sizeof(struct in6_msource), M_IN6MFILTER,
+ M_NOWAIT | M_ZERO);
+ if (nims == NULL)
+ return (NULL);
+ lims = (struct in6_msource *)nims;
+ lims->im6s_addr = psin->sin6_addr;
+ lims->im6sl_st[0] = MCAST_UNDEFINED;
+ lims->im6sl_st[1] = st1;
+ RB_INSERT(ip6_msource_tree, &imf->im6f_sources, nims);
+ ++imf->im6f_nsrc;
+
+ return (lims);
+}
+
+/*
+ * Prune a source entry from an existing socket-layer filter set,
+ * maintaining any required invariants and checking allocations.
+ *
+ * The source is marked as being left at t1, it is not freed.
+ *
+ * Return 0 if no error occurred, otherwise return an errno value.
+ */
+static int
+im6f_prune(struct in6_mfilter *imf, const struct sockaddr_in6 *psin)
+{
+ struct ip6_msource find;
+ struct ip6_msource *ims;
+ struct in6_msource *lims;
+
+ find.im6s_addr = psin->sin6_addr;
+ ims = RB_FIND(ip6_msource_tree, &imf->im6f_sources, &find);
+ if (ims == NULL)
+ return (ENOENT);
+ lims = (struct in6_msource *)ims;
+ lims->im6sl_st[1] = MCAST_UNDEFINED;
+ return (0);
+}
+
+/*
+ * Revert socket-layer filter set deltas at t1 to t0 state.
+ */
+static void
+im6f_rollback(struct in6_mfilter *imf)
+{
+ struct ip6_msource *ims, *tims;
+ struct in6_msource *lims;
+
+ RB_FOREACH_SAFE(ims, ip6_msource_tree, &imf->im6f_sources, tims) {
+ lims = (struct in6_msource *)ims;
+ if (lims->im6sl_st[0] == lims->im6sl_st[1]) {
+ /* no change at t1 */
+ continue;
+ } else if (lims->im6sl_st[0] != MCAST_UNDEFINED) {
+ /* revert change to existing source at t1 */
+ lims->im6sl_st[1] = lims->im6sl_st[0];
+ } else {
+ /* revert source added t1 */
+ CTR2(KTR_MLD, "%s: free ims %p", __func__, ims);
+ RB_REMOVE(ip6_msource_tree, &imf->im6f_sources, ims);
+ free(ims, M_IN6MFILTER);
+ imf->im6f_nsrc--;
+ }
+ }
+ imf->im6f_st[1] = imf->im6f_st[0];
+}
+
+/*
+ * Mark socket-layer filter set as INCLUDE {} at t1.
+ */
+static void
+im6f_leave(struct in6_mfilter *imf)
+{
+ struct ip6_msource *ims;
+ struct in6_msource *lims;
+
+ RB_FOREACH(ims, ip6_msource_tree, &imf->im6f_sources) {
+ lims = (struct in6_msource *)ims;
+ lims->im6sl_st[1] = MCAST_UNDEFINED;
+ }
+ imf->im6f_st[1] = MCAST_INCLUDE;
+}
+
+/*
+ * Mark socket-layer filter set deltas as committed.
+ */
+static void
+im6f_commit(struct in6_mfilter *imf)
+{
+ struct ip6_msource *ims;
+ struct in6_msource *lims;
+
+ RB_FOREACH(ims, ip6_msource_tree, &imf->im6f_sources) {
+ lims = (struct in6_msource *)ims;
+ lims->im6sl_st[0] = lims->im6sl_st[1];
+ }
+ imf->im6f_st[0] = imf->im6f_st[1];
+}
+
+/*
+ * Reap unreferenced sources from socket-layer filter set.
+ */
+static void
+im6f_reap(struct in6_mfilter *imf)
+{
+ struct ip6_msource *ims, *tims;
+ struct in6_msource *lims;
+
+ RB_FOREACH_SAFE(ims, ip6_msource_tree, &imf->im6f_sources, tims) {
+ lims = (struct in6_msource *)ims;
+ if ((lims->im6sl_st[0] == MCAST_UNDEFINED) &&
+ (lims->im6sl_st[1] == MCAST_UNDEFINED)) {
+ CTR2(KTR_MLD, "%s: free lims %p", __func__, ims);
+ RB_REMOVE(ip6_msource_tree, &imf->im6f_sources, ims);
+ free(ims, M_IN6MFILTER);
+ imf->im6f_nsrc--;
+ }
+ }
+}
+
+/*
+ * Purge socket-layer filter set.
+ */
+static void
+im6f_purge(struct in6_mfilter *imf)
+{
+ struct ip6_msource *ims, *tims;
+
+ RB_FOREACH_SAFE(ims, ip6_msource_tree, &imf->im6f_sources, tims) {
+ CTR2(KTR_MLD, "%s: free ims %p", __func__, ims);
+ RB_REMOVE(ip6_msource_tree, &imf->im6f_sources, ims);
+ free(ims, M_IN6MFILTER);
+ imf->im6f_nsrc--;
+ }
+ imf->im6f_st[0] = imf->im6f_st[1] = MCAST_UNDEFINED;
+ KASSERT(RB_EMPTY(&imf->im6f_sources),
+ ("%s: im6f_sources not empty", __func__));
+}
+
+/*
+ * Look up a source filter entry for a multicast group.
+ *
+ * inm is the group descriptor to work with.
+ * addr is the IPv6 address to look up.
+ * noalloc may be non-zero to suppress allocation of sources.
+ * *pims will be set to the address of the retrieved or allocated source.
+ *
+ * SMPng: NOTE: may be called with locks held.
+ * Return 0 if successful, otherwise return a non-zero error code.
+ */
+static int
+in6m_get_source(struct in6_multi *inm, const struct in6_addr *addr,
+ const int noalloc, struct ip6_msource **pims)
+{
+ struct ip6_msource find;
+ struct ip6_msource *ims, *nims;
+#ifdef KTR
+ char ip6tbuf[INET6_ADDRSTRLEN];
+#endif
+
+ find.im6s_addr = *addr;
+ ims = RB_FIND(ip6_msource_tree, &inm->in6m_srcs, &find);
+ if (ims == NULL && !noalloc) {
+ if (inm->in6m_nsrc == in6_mcast_maxgrpsrc)
+ return (ENOSPC);
+ nims = malloc(sizeof(struct ip6_msource), M_IP6MSOURCE,
+ M_NOWAIT | M_ZERO);
+ if (nims == NULL)
+ return (ENOMEM);
+ nims->im6s_addr = *addr;
+ RB_INSERT(ip6_msource_tree, &inm->in6m_srcs, nims);
+ ++inm->in6m_nsrc;
+ ims = nims;
+ CTR3(KTR_MLD, "%s: allocated %s as %p", __func__,
+ ip6_sprintf(ip6tbuf, addr), ims);
+ }
+
+ *pims = ims;
+ return (0);
+}
+
+/*
+ * Merge socket-layer source into MLD-layer source.
+ * If rollback is non-zero, perform the inverse of the merge.
+ */
+static void
+im6s_merge(struct ip6_msource *ims, const struct in6_msource *lims,
+ const int rollback)
+{
+ int n = rollback ? -1 : 1;
+#ifdef KTR
+ char ip6tbuf[INET6_ADDRSTRLEN];
+
+ ip6_sprintf(ip6tbuf, &lims->im6s_addr);
+#endif
+
+ if (lims->im6sl_st[0] == MCAST_EXCLUDE) {
+ CTR3(KTR_MLD, "%s: t1 ex -= %d on %s", __func__, n, ip6tbuf);
+ ims->im6s_st[1].ex -= n;
+ } else if (lims->im6sl_st[0] == MCAST_INCLUDE) {
+ CTR3(KTR_MLD, "%s: t1 in -= %d on %s", __func__, n, ip6tbuf);
+ ims->im6s_st[1].in -= n;
+ }
+
+ if (lims->im6sl_st[1] == MCAST_EXCLUDE) {
+ CTR3(KTR_MLD, "%s: t1 ex += %d on %s", __func__, n, ip6tbuf);
+ ims->im6s_st[1].ex += n;
+ } else if (lims->im6sl_st[1] == MCAST_INCLUDE) {
+ CTR3(KTR_MLD, "%s: t1 in += %d on %s", __func__, n, ip6tbuf);
+ ims->im6s_st[1].in += n;
+ }
+}
+
+/*
+ * Atomically update the global in6_multi state, when a membership's
+ * filter list is being updated in any way.
+ *
+ * imf is the per-inpcb-membership group filter pointer.
+ * A fake imf may be passed for in-kernel consumers.
+ *
+ * XXX This is a candidate for a set-symmetric-difference style loop
+ * which would eliminate the repeated lookup from root of ims nodes,
+ * as they share the same key space.
+ *
+ * If any error occurred this function will back out of refcounts
+ * and return a non-zero value.
+ */
+static int
+in6m_merge(struct in6_multi *inm, /*const*/ struct in6_mfilter *imf)
+{
+ struct ip6_msource *ims, *nims;
+ struct in6_msource *lims;
+ int schanged, error;
+ int nsrc0, nsrc1;
+
+ schanged = 0;
+ error = 0;
+ nsrc1 = nsrc0 = 0;
+
+ /*
+ * Update the source filters first, as this may fail.
+ * Maintain count of in-mode filters at t0, t1. These are
+ * used to work out if we transition into ASM mode or not.
+ * Maintain a count of source filters whose state was
+ * actually modified by this operation.
+ */
+ RB_FOREACH(ims, ip6_msource_tree, &imf->im6f_sources) {
+ lims = (struct in6_msource *)ims;
+ if (lims->im6sl_st[0] == imf->im6f_st[0]) nsrc0++;
+ if (lims->im6sl_st[1] == imf->im6f_st[1]) nsrc1++;
+ if (lims->im6sl_st[0] == lims->im6sl_st[1]) continue;
+ error = in6m_get_source(inm, &lims->im6s_addr, 0, &nims);
+ ++schanged;
+ if (error)
+ break;
+ im6s_merge(nims, lims, 0);
+ }
+ if (error) {
+ struct ip6_msource *bims;
+
+ RB_FOREACH_REVERSE_FROM(ims, ip6_msource_tree, nims) {
+ lims = (struct in6_msource *)ims;
+ if (lims->im6sl_st[0] == lims->im6sl_st[1])
+ continue;
+ (void)in6m_get_source(inm, &lims->im6s_addr, 1, &bims);
+ if (bims == NULL)
+ continue;
+ im6s_merge(bims, lims, 1);
+ }
+ goto out_reap;
+ }
+
+ CTR3(KTR_MLD, "%s: imf filters in-mode: %d at t0, %d at t1",
+ __func__, nsrc0, nsrc1);
+
+ /* Handle transition between INCLUDE {n} and INCLUDE {} on socket. */
+ if (imf->im6f_st[0] == imf->im6f_st[1] &&
+ imf->im6f_st[1] == MCAST_INCLUDE) {
+ if (nsrc1 == 0) {
+ CTR1(KTR_MLD, "%s: --in on inm at t1", __func__);
+ --inm->in6m_st[1].iss_in;
+ }
+ }
+
+ /* Handle filter mode transition on socket. */
+ if (imf->im6f_st[0] != imf->im6f_st[1]) {
+ CTR3(KTR_MLD, "%s: imf transition %d to %d",
+ __func__, imf->im6f_st[0], imf->im6f_st[1]);
+
+ if (imf->im6f_st[0] == MCAST_EXCLUDE) {
+ CTR1(KTR_MLD, "%s: --ex on inm at t1", __func__);
+ --inm->in6m_st[1].iss_ex;
+ } else if (imf->im6f_st[0] == MCAST_INCLUDE) {
+ CTR1(KTR_MLD, "%s: --in on inm at t1", __func__);
+ --inm->in6m_st[1].iss_in;
+ }
+
+ if (imf->im6f_st[1] == MCAST_EXCLUDE) {
+ CTR1(KTR_MLD, "%s: ex++ on inm at t1", __func__);
+ inm->in6m_st[1].iss_ex++;
+ } else if (imf->im6f_st[1] == MCAST_INCLUDE && nsrc1 > 0) {
+ CTR1(KTR_MLD, "%s: in++ on inm at t1", __func__);
+ inm->in6m_st[1].iss_in++;
+ }
+ }
+
+ /*
+ * Track inm filter state in terms of listener counts.
+ * If there are any exclusive listeners, stack-wide
+ * membership is exclusive.
+ * Otherwise, if only inclusive listeners, stack-wide is inclusive.
+ * If no listeners remain, state is undefined at t1,
+ * and the MLD lifecycle for this group should finish.
+ */
+ if (inm->in6m_st[1].iss_ex > 0) {
+ CTR1(KTR_MLD, "%s: transition to EX", __func__);
+ inm->in6m_st[1].iss_fmode = MCAST_EXCLUDE;
+ } else if (inm->in6m_st[1].iss_in > 0) {
+ CTR1(KTR_MLD, "%s: transition to IN", __func__);
+ inm->in6m_st[1].iss_fmode = MCAST_INCLUDE;
+ } else {
+ CTR1(KTR_MLD, "%s: transition to UNDEF", __func__);
+ inm->in6m_st[1].iss_fmode = MCAST_UNDEFINED;
+ }
+
+ /* Decrement ASM listener count on transition out of ASM mode. */
+ if (imf->im6f_st[0] == MCAST_EXCLUDE && nsrc0 == 0) {
+ if ((imf->im6f_st[1] != MCAST_EXCLUDE) ||
+ (imf->im6f_st[1] == MCAST_EXCLUDE && nsrc1 > 0))
+ CTR1(KTR_MLD, "%s: --asm on inm at t1", __func__);
+ --inm->in6m_st[1].iss_asm;
+ }
+
+ /* Increment ASM listener count on transition to ASM mode. */
+ if (imf->im6f_st[1] == MCAST_EXCLUDE && nsrc1 == 0) {
+ CTR1(KTR_MLD, "%s: asm++ on inm at t1", __func__);
+ inm->in6m_st[1].iss_asm++;
+ }
+
+ CTR3(KTR_MLD, "%s: merged imf %p to inm %p", __func__, imf, inm);
+ in6m_print(inm);
+
+out_reap:
+ if (schanged > 0) {
+ CTR1(KTR_MLD, "%s: sources changed; reaping", __func__);
+ in6m_reap(inm);
+ }
+ return (error);
+}
+
+/*
+ * Mark an in6_multi's filter set deltas as committed.
+ * Called by MLD after a state change has been enqueued.
+ */
+void
+in6m_commit(struct in6_multi *inm)
+{
+ struct ip6_msource *ims;
+
+ CTR2(KTR_MLD, "%s: commit inm %p", __func__, inm);
+ CTR1(KTR_MLD, "%s: pre commit:", __func__);
+ in6m_print(inm);
+
+ RB_FOREACH(ims, ip6_msource_tree, &inm->in6m_srcs) {
+ ims->im6s_st[0] = ims->im6s_st[1];
+ }
+ inm->in6m_st[0] = inm->in6m_st[1];
+}
+
+/*
+ * Reap unreferenced nodes from an in6_multi's filter set.
+ */
+static void
+in6m_reap(struct in6_multi *inm)
+{
+ struct ip6_msource *ims, *tims;
+
+ RB_FOREACH_SAFE(ims, ip6_msource_tree, &inm->in6m_srcs, tims) {
+ if (ims->im6s_st[0].ex > 0 || ims->im6s_st[0].in > 0 ||
+ ims->im6s_st[1].ex > 0 || ims->im6s_st[1].in > 0 ||
+ ims->im6s_stp != 0)
+ continue;
+ CTR2(KTR_MLD, "%s: free ims %p", __func__, ims);
+ RB_REMOVE(ip6_msource_tree, &inm->in6m_srcs, ims);
+ free(ims, M_IP6MSOURCE);
+ inm->in6m_nsrc--;
+ }
+}
+
+/*
+ * Purge all source nodes from an in6_multi's filter set.
+ */
+static void
+in6m_purge(struct in6_multi *inm)
+{
+ struct ip6_msource *ims, *tims;
+
+ RB_FOREACH_SAFE(ims, ip6_msource_tree, &inm->in6m_srcs, tims) {
+ CTR2(KTR_MLD, "%s: free ims %p", __func__, ims);
+ RB_REMOVE(ip6_msource_tree, &inm->in6m_srcs, ims);
+ free(ims, M_IP6MSOURCE);
+ inm->in6m_nsrc--;
+ }
+}
+
+/*
+ * Join a multicast address w/o sources.
+ * KAME compatibility entry point.
+ *
+ * SMPng: Assume no mc locks held by caller.
+ */
+struct in6_multi_mship *
+in6_joingroup(struct ifnet *ifp, struct in6_addr *mcaddr,
+ int *errorp, int delay)
+{
+ struct in6_multi_mship *imm;
+ int error;
+
+ imm = malloc(sizeof(*imm), M_IP6MADDR, M_NOWAIT);
+ if (imm == NULL) {
+ *errorp = ENOBUFS;
+ return (NULL);
+ }
+
+ delay = (delay * PR_FASTHZ) / hz;
+
+ error = in6_mc_join(ifp, mcaddr, NULL, &imm->i6mm_maddr, delay);
+ if (error) {
+ *errorp = error;
+ free(imm, M_IP6MADDR);
+ return (NULL);
+ }
+
+ return (imm);
+}
+
+/*
+ * Leave a multicast address w/o sources.
+ * KAME compatibility entry point.
+ *
+ * SMPng: Assume no mc locks held by caller.
+ */
+int
+in6_leavegroup(struct in6_multi_mship *imm)
+{
+
+ if (imm->i6mm_maddr != NULL)
+ in6_mc_leave(imm->i6mm_maddr, NULL);
+ free(imm, M_IP6MADDR);
+ return 0;
+}
+
+/*
+ * Join a multicast group; unlocked entry point.
+ *
+ * SMPng: XXX: in6_mc_join() is called from in6_control() when upper
+ * locks are not held. Fortunately, ifp is unlikely to have been detached
+ * at this point, so we assume it's OK to recurse.
+ */
+int
+in6_mc_join(struct ifnet *ifp, const struct in6_addr *mcaddr,
+ /*const*/ struct in6_mfilter *imf, struct in6_multi **pinm,
+ const int delay)
+{
+ int error;
+
+ IN6_MULTI_LOCK();
+ error = in6_mc_join_locked(ifp, mcaddr, imf, pinm, delay);
+ IN6_MULTI_UNLOCK();
+
+ return (error);
+}
+
+/*
+ * Join a multicast group; real entry point.
+ *
+ * Only preserves atomicity at inm level.
+ * NOTE: imf argument cannot be const due to sys/tree.h limitations.
+ *
+ * If the MLD downcall fails, the group is not joined, and an error
+ * code is returned.
+ */
+int
+in6_mc_join_locked(struct ifnet *ifp, const struct in6_addr *mcaddr,
+ /*const*/ struct in6_mfilter *imf, struct in6_multi **pinm,
+ const int delay)
+{
+ struct in6_mfilter timf;
+ struct in6_multi *inm;
+ int error;
+#ifdef KTR
+ char ip6tbuf[INET6_ADDRSTRLEN];
+#endif
+
+#ifdef INVARIANTS
+ /*
+ * Sanity: Check scope zone ID was set for ifp, if and
+ * only if group is scoped to an interface.
+ */
+ KASSERT(IN6_IS_ADDR_MULTICAST(mcaddr),
+ ("%s: not a multicast address", __func__));
+ if (IN6_IS_ADDR_MC_LINKLOCAL(mcaddr) ||
+ IN6_IS_ADDR_MC_INTFACELOCAL(mcaddr)) {
+ KASSERT(mcaddr->s6_addr16[1] != 0,
+ ("%s: scope zone ID not set", __func__));
+ }
+#endif
+
+ IN6_MULTI_LOCK_ASSERT();
+
+ CTR4(KTR_MLD, "%s: join %s on %p(%s))", __func__,
+ ip6_sprintf(ip6tbuf, mcaddr), ifp, ifp->if_xname);
+
+ error = 0;
+ inm = NULL;
+
+ /*
+ * If no imf was specified (i.e. kernel consumer),
+ * fake one up and assume it is an ASM join.
+ */
+ if (imf == NULL) {
+ im6f_init(&timf, MCAST_UNDEFINED, MCAST_EXCLUDE);
+ imf = &timf;
+ }
+
+ error = in6_mc_get(ifp, mcaddr, &inm);
+ if (error) {
+ CTR1(KTR_MLD, "%s: in6_mc_get() failure", __func__);
+ return (error);
+ }
+
+ CTR1(KTR_MLD, "%s: merge inm state", __func__);
+ error = in6m_merge(inm, imf);
+ if (error) {
+ CTR1(KTR_MLD, "%s: failed to merge inm state", __func__);
+ goto out_in6m_release;
+ }
+
+ CTR1(KTR_MLD, "%s: doing mld downcall", __func__);
+ error = mld_change_state(inm, delay);
+ if (error) {
+ CTR1(KTR_MLD, "%s: failed to update source", __func__);
+ goto out_in6m_release;
+ }
+
+out_in6m_release:
+ if (error) {
+ CTR2(KTR_MLD, "%s: dropping ref on %p", __func__, inm);
+ in6m_release_locked(inm);
+ } else {
+ *pinm = inm;
+ }
+
+ return (error);
+}
+
+/*
+ * Leave a multicast group; unlocked entry point.
+ */
+int
+in6_mc_leave(struct in6_multi *inm, /*const*/ struct in6_mfilter *imf)
+{
+ struct ifnet *ifp;
+ int error;
+
+ ifp = inm->in6m_ifp;
+
+ IN6_MULTI_LOCK();
+ error = in6_mc_leave_locked(inm, imf);
+ IN6_MULTI_UNLOCK();
+
+ return (error);
+}
+
+/*
+ * Leave a multicast group; real entry point.
+ * All source filters will be expunged.
+ *
+ * Only preserves atomicity at inm level.
+ *
+ * Holding the write lock for the INP which contains imf
+ * is highly advisable. We can't assert for it as imf does not
+ * contain a back-pointer to the owning inp.
+ *
+ * Note: This is not the same as in6m_release(*) as this function also
+ * makes a state change downcall into MLD.
+ */
+int
+in6_mc_leave_locked(struct in6_multi *inm, /*const*/ struct in6_mfilter *imf)
+{
+ struct in6_mfilter timf;
+ int error;
+#ifdef KTR
+ char ip6tbuf[INET6_ADDRSTRLEN];
+#endif
+
+ error = 0;
+
+ IN6_MULTI_LOCK_ASSERT();
+
+ CTR5(KTR_MLD, "%s: leave inm %p, %s/%s, imf %p", __func__,
+ inm, ip6_sprintf(ip6tbuf, &inm->in6m_addr),
+ (in6m_is_ifp_detached(inm) ? "null" : inm->in6m_ifp->if_xname),
+ imf);
+
+ /*
+ * If no imf was specified (i.e. kernel consumer),
+ * fake one up and assume it is an ASM join.
+ */
+ if (imf == NULL) {
+ im6f_init(&timf, MCAST_EXCLUDE, MCAST_UNDEFINED);
+ imf = &timf;
+ }
+
+ /*
+ * Begin state merge transaction at MLD layer.
+ *
+ * As this particular invocation should not cause any memory
+ * to be allocated, and there is no opportunity to roll back
+ * the transaction, it MUST NOT fail.
+ */
+ CTR1(KTR_MLD, "%s: merge inm state", __func__);
+ error = in6m_merge(inm, imf);
+ KASSERT(error == 0, ("%s: failed to merge inm state", __func__));
+
+ CTR1(KTR_MLD, "%s: doing mld downcall", __func__);
+ error = mld_change_state(inm, 0);
+ if (error)
+ CTR1(KTR_MLD, "%s: failed mld downcall", __func__);
+
+ CTR2(KTR_MLD, "%s: dropping ref on %p", __func__, inm);
+ in6m_release_locked(inm);
+
+ return (error);
+}
+
+/*
+ * Block or unblock an ASM multicast source on an inpcb.
+ * This implements the delta-based API described in RFC 3678.
+ *
+ * The delta-based API applies only to exclusive-mode memberships.
+ * An MLD downcall will be performed.
+ *
+ * SMPng: NOTE: Must take Giant as a join may create a new ifma.
+ *
+ * Return 0 if successful, otherwise return an appropriate error code.
+ */
+static int
+in6p_block_unblock_source(struct inpcb *inp, struct sockopt *sopt)
+{
+ struct group_source_req gsr;
+ sockunion_t *gsa, *ssa;
+ struct ifnet *ifp;
+ struct in6_mfilter *imf;
+ struct ip6_moptions *imo;
+ struct in6_msource *ims;
+ struct in6_multi *inm;
+ size_t idx;
+ uint16_t fmode;
+ int error, doblock;
+#ifdef KTR
+ char ip6tbuf[INET6_ADDRSTRLEN];
+#endif
+
+ ifp = NULL;
+ error = 0;
+ doblock = 0;
+
+ memset(&gsr, 0, sizeof(struct group_source_req));
+ gsa = (sockunion_t *)&gsr.gsr_group;
+ ssa = (sockunion_t *)&gsr.gsr_source;
+
+ switch (sopt->sopt_name) {
+ case MCAST_BLOCK_SOURCE:
+ case MCAST_UNBLOCK_SOURCE:
+ error = sooptcopyin(sopt, &gsr,
+ sizeof(struct group_source_req),
+ sizeof(struct group_source_req));
+ if (error)
+ return (error);
+
+ if (gsa->sin6.sin6_family != AF_INET6 ||
+ gsa->sin6.sin6_len != sizeof(struct sockaddr_in6))
+ return (EINVAL);
+
+ if (ssa->sin6.sin6_family != AF_INET6 ||
+ ssa->sin6.sin6_len != sizeof(struct sockaddr_in6))
+ return (EINVAL);
+
+ if (gsr.gsr_interface == 0 || V_if_index < gsr.gsr_interface)
+ return (EADDRNOTAVAIL);
+
+ ifp = ifnet_byindex(gsr.gsr_interface);
+
+ if (sopt->sopt_name == MCAST_BLOCK_SOURCE)
+ doblock = 1;
+ break;
+
+ default:
+ CTR2(KTR_MLD, "%s: unknown sopt_name %d",
+ __func__, sopt->sopt_name);
+ return (EOPNOTSUPP);
+ break;
+ }
+
+ if (!IN6_IS_ADDR_MULTICAST(&gsa->sin6.sin6_addr))
+ return (EINVAL);
+
+ (void)in6_setscope(&gsa->sin6.sin6_addr, ifp, NULL);
+
+ /*
+ * Check if we are actually a member of this group.
+ */
+ imo = in6p_findmoptions(inp);
+ idx = im6o_match_group(imo, ifp, &gsa->sa);
+ if (idx == -1 || imo->im6o_mfilters == NULL) {
+ error = EADDRNOTAVAIL;
+ goto out_in6p_locked;
+ }
+
+ KASSERT(imo->im6o_mfilters != NULL,
+ ("%s: im6o_mfilters not allocated", __func__));
+ imf = &imo->im6o_mfilters[idx];
+ inm = imo->im6o_membership[idx];
+
+ /*
+ * Attempting to use the delta-based API on an
+ * non exclusive-mode membership is an error.
+ */
+ fmode = imf->im6f_st[0];
+ if (fmode != MCAST_EXCLUDE) {
+ error = EINVAL;
+ goto out_in6p_locked;
+ }
+
+ /*
+ * Deal with error cases up-front:
+ * Asked to block, but already blocked; or
+ * Asked to unblock, but nothing to unblock.
+ * If adding a new block entry, allocate it.
+ */
+ ims = im6o_match_source(imo, idx, &ssa->sa);
+ if ((ims != NULL && doblock) || (ims == NULL && !doblock)) {
+ CTR3(KTR_MLD, "%s: source %s %spresent", __func__,
+ ip6_sprintf(ip6tbuf, &ssa->sin6.sin6_addr),
+ doblock ? "" : "not ");
+ error = EADDRNOTAVAIL;
+ goto out_in6p_locked;
+ }
+
+ INP_WLOCK_ASSERT(inp);
+
+ /*
+ * Begin state merge transaction at socket layer.
+ */
+ if (doblock) {
+ CTR2(KTR_MLD, "%s: %s source", __func__, "block");
+ ims = im6f_graft(imf, fmode, &ssa->sin6);
+ if (ims == NULL)
+ error = ENOMEM;
+ } else {
+ CTR2(KTR_MLD, "%s: %s source", __func__, "allow");
+ error = im6f_prune(imf, &ssa->sin6);
+ }
+
+ if (error) {
+ CTR1(KTR_MLD, "%s: merge imf state failed", __func__);
+ goto out_im6f_rollback;
+ }
+
+ /*
+ * Begin state merge transaction at MLD layer.
+ */
+ IN6_MULTI_LOCK();
+
+ CTR1(KTR_MLD, "%s: merge inm state", __func__);
+ error = in6m_merge(inm, imf);
+ if (error) {
+ CTR1(KTR_MLD, "%s: failed to merge inm state", __func__);
+ goto out_im6f_rollback;
+ }
+
+ CTR1(KTR_MLD, "%s: doing mld downcall", __func__);
+ error = mld_change_state(inm, 0);
+ if (error)
+ CTR1(KTR_MLD, "%s: failed mld downcall", __func__);
+
+ IN6_MULTI_UNLOCK();
+
+out_im6f_rollback:
+ if (error)
+ im6f_rollback(imf);
+ else
+ im6f_commit(imf);
+
+ im6f_reap(imf);
+
+out_in6p_locked:
+ INP_WUNLOCK(inp);
+ return (error);
+}
+
+/*
+ * Given an inpcb, return its multicast options structure pointer. Accepts
+ * an unlocked inpcb pointer, but will return it locked. May sleep.
+ *
+ * SMPng: NOTE: Potentially calls malloc(M_WAITOK) with Giant held.
+ * SMPng: NOTE: Returns with the INP write lock held.
+ */
+static struct ip6_moptions *
+in6p_findmoptions(struct inpcb *inp)
+{
+ struct ip6_moptions *imo;
+ struct in6_multi **immp;
+ struct in6_mfilter *imfp;
+ size_t idx;
+
+ INP_WLOCK(inp);
+ if (inp->in6p_moptions != NULL)
+ return (inp->in6p_moptions);
+
+ INP_WUNLOCK(inp);
+
+ imo = malloc(sizeof(*imo), M_IP6MOPTS, M_WAITOK);
+ immp = malloc(sizeof(*immp) * IPV6_MIN_MEMBERSHIPS, M_IP6MOPTS,
+ M_WAITOK | M_ZERO);
+ imfp = malloc(sizeof(struct in6_mfilter) * IPV6_MIN_MEMBERSHIPS,
+ M_IN6MFILTER, M_WAITOK);
+
+ imo->im6o_multicast_ifp = NULL;
+ imo->im6o_multicast_hlim = V_ip6_defmcasthlim;
+ imo->im6o_multicast_loop = in6_mcast_loop;
+ imo->im6o_num_memberships = 0;
+ imo->im6o_max_memberships = IPV6_MIN_MEMBERSHIPS;
+ imo->im6o_membership = immp;
+
+ /* Initialize per-group source filters. */
+ for (idx = 0; idx < IPV6_MIN_MEMBERSHIPS; idx++)
+ im6f_init(&imfp[idx], MCAST_UNDEFINED, MCAST_EXCLUDE);
+ imo->im6o_mfilters = imfp;
+
+ INP_WLOCK(inp);
+ if (inp->in6p_moptions != NULL) {
+ free(imfp, M_IN6MFILTER);
+ free(immp, M_IP6MOPTS);
+ free(imo, M_IP6MOPTS);
+ return (inp->in6p_moptions);
+ }
+ inp->in6p_moptions = imo;
+ return (imo);
+}
+
+/*
+ * Discard the IPv6 multicast options (and source filters).
+ *
+ * SMPng: NOTE: assumes INP write lock is held.
+ */
+void
+ip6_freemoptions(struct ip6_moptions *imo)
+{
+ struct in6_mfilter *imf;
+ size_t idx, nmships;
+
+ KASSERT(imo != NULL, ("%s: ip6_moptions is NULL", __func__));
+
+ nmships = imo->im6o_num_memberships;
+ for (idx = 0; idx < nmships; ++idx) {
+ imf = imo->im6o_mfilters ? &imo->im6o_mfilters[idx] : NULL;
+ if (imf)
+ im6f_leave(imf);
+ /* XXX this will thrash the lock(s) */
+ (void)in6_mc_leave(imo->im6o_membership[idx], imf);
+ if (imf)
+ im6f_purge(imf);
+ }
+
+ if (imo->im6o_mfilters)
+ free(imo->im6o_mfilters, M_IN6MFILTER);
+ free(imo->im6o_membership, M_IP6MOPTS);
+ free(imo, M_IP6MOPTS);
+}
+
+/*
+ * Atomically get source filters on a socket for an IPv6 multicast group.
+ * Called with INP lock held; returns with lock released.
+ */
+static int
+in6p_get_source_filters(struct inpcb *inp, struct sockopt *sopt)
+{
+ struct __msfilterreq msfr;
+ sockunion_t *gsa;
+ struct ifnet *ifp;
+ struct ip6_moptions *imo;
+ struct in6_mfilter *imf;
+ struct ip6_msource *ims;
+ struct in6_msource *lims;
+ struct sockaddr_in6 *psin;
+ struct sockaddr_storage *ptss;
+ struct sockaddr_storage *tss;
+ int error;
+ size_t idx, nsrcs, ncsrcs;
+
+ INP_WLOCK_ASSERT(inp);
+
+ imo = inp->in6p_moptions;
+ KASSERT(imo != NULL, ("%s: null ip6_moptions", __func__));
+
+ INP_WUNLOCK(inp);
+
+ error = sooptcopyin(sopt, &msfr, sizeof(struct __msfilterreq),
+ sizeof(struct __msfilterreq));
+ if (error)
+ return (error);
+
+ if (msfr.msfr_group.ss_family != AF_INET6 ||
+ msfr.msfr_group.ss_len != sizeof(struct sockaddr_in6))
+ return (EINVAL);
+
+ gsa = (sockunion_t *)&msfr.msfr_group;
+ if (!IN6_IS_ADDR_MULTICAST(&gsa->sin6.sin6_addr))
+ return (EINVAL);
+
+ if (msfr.msfr_ifindex == 0 || V_if_index < msfr.msfr_ifindex)
+ return (EADDRNOTAVAIL);
+ ifp = ifnet_byindex(msfr.msfr_ifindex);
+ if (ifp == NULL)
+ return (EADDRNOTAVAIL);
+ (void)in6_setscope(&gsa->sin6.sin6_addr, ifp, NULL);
+
+ INP_WLOCK(inp);
+
+ /*
+ * Lookup group on the socket.
+ */
+ idx = im6o_match_group(imo, ifp, &gsa->sa);
+ if (idx == -1 || imo->im6o_mfilters == NULL) {
+ INP_WUNLOCK(inp);
+ return (EADDRNOTAVAIL);
+ }
+ imf = &imo->im6o_mfilters[idx];
+
+ /*
+ * Ignore memberships which are in limbo.
+ */
+ if (imf->im6f_st[1] == MCAST_UNDEFINED) {
+ INP_WUNLOCK(inp);
+ return (EAGAIN);
+ }
+ msfr.msfr_fmode = imf->im6f_st[1];
+
+ /*
+ * If the user specified a buffer, copy out the source filter
+ * entries to userland gracefully.
+ * We only copy out the number of entries which userland
+ * has asked for, but we always tell userland how big the
+ * buffer really needs to be.
+ */
+ tss = NULL;
+ if (msfr.msfr_srcs != NULL && msfr.msfr_nsrcs > 0) {
+ tss = malloc(sizeof(struct sockaddr_storage) * msfr.msfr_nsrcs,
+ M_TEMP, M_NOWAIT | M_ZERO);
+ if (tss == NULL) {
+ INP_WUNLOCK(inp);
+ return (ENOBUFS);
+ }
+ }
+
+ /*
+ * Count number of sources in-mode at t0.
+ * If buffer space exists and remains, copy out source entries.
+ */
+ nsrcs = msfr.msfr_nsrcs;
+ ncsrcs = 0;
+ ptss = tss;
+ RB_FOREACH(ims, ip6_msource_tree, &imf->im6f_sources) {
+ lims = (struct in6_msource *)ims;
+ if (lims->im6sl_st[0] == MCAST_UNDEFINED ||
+ lims->im6sl_st[0] != imf->im6f_st[0])
+ continue;
+ ++ncsrcs;
+ if (tss != NULL && nsrcs > 0) {
+ psin = (struct sockaddr_in6 *)ptss;
+ psin->sin6_family = AF_INET6;
+ psin->sin6_len = sizeof(struct sockaddr_in6);
+ psin->sin6_addr = lims->im6s_addr;
+ psin->sin6_port = 0;
+ --nsrcs;
+ ++ptss;
+ }
+ }
+
+ INP_WUNLOCK(inp);
+
+ if (tss != NULL) {
+ error = copyout(tss, msfr.msfr_srcs,
+ sizeof(struct sockaddr_storage) * msfr.msfr_nsrcs);
+ free(tss, M_TEMP);
+ if (error)
+ return (error);
+ }
+
+ msfr.msfr_nsrcs = ncsrcs;
+ error = sooptcopyout(sopt, &msfr, sizeof(struct __msfilterreq));
+
+ return (error);
+}
+
+/*
+ * Return the IP multicast options in response to user getsockopt().
+ */
+int
+ip6_getmoptions(struct inpcb *inp, struct sockopt *sopt)
+{
+ struct ip6_moptions *im6o;
+ int error;
+ u_int optval;
+
+ INP_WLOCK(inp);
+ im6o = inp->in6p_moptions;
+ /*
+ * If socket is neither of type SOCK_RAW or SOCK_DGRAM,
+ * or is a divert socket, reject it.
+ */
+ if (inp->inp_socket->so_proto->pr_protocol == IPPROTO_DIVERT ||
+ (inp->inp_socket->so_proto->pr_type != SOCK_RAW &&
+ inp->inp_socket->so_proto->pr_type != SOCK_DGRAM)) {
+ INP_WUNLOCK(inp);
+ return (EOPNOTSUPP);
+ }
+
+ error = 0;
+ switch (sopt->sopt_name) {
+ case IPV6_MULTICAST_IF:
+ if (im6o == NULL || im6o->im6o_multicast_ifp == NULL) {
+ optval = 0;
+ } else {
+ optval = im6o->im6o_multicast_ifp->if_index;
+ }
+ INP_WUNLOCK(inp);
+ error = sooptcopyout(sopt, &optval, sizeof(u_int));
+ break;
+
+ case IPV6_MULTICAST_HOPS:
+ if (im6o == NULL)
+ optval = V_ip6_defmcasthlim;
+ else
+ optval = im6o->im6o_multicast_loop;
+ INP_WUNLOCK(inp);
+ error = sooptcopyout(sopt, &optval, sizeof(u_int));
+ break;
+
+ case IPV6_MULTICAST_LOOP:
+ if (im6o == NULL)
+ optval = in6_mcast_loop; /* XXX VIMAGE */
+ else
+ optval = im6o->im6o_multicast_loop;
+ INP_WUNLOCK(inp);
+ error = sooptcopyout(sopt, &optval, sizeof(u_int));
+ break;
+
+ case IPV6_MSFILTER:
+ if (im6o == NULL) {
+ error = EADDRNOTAVAIL;
+ INP_WUNLOCK(inp);
+ } else {
+ error = in6p_get_source_filters(inp, sopt);
+ }
+ break;
+
+ default:
+ INP_WUNLOCK(inp);
+ error = ENOPROTOOPT;
+ break;
+ }
+
+ INP_UNLOCK_ASSERT(inp);
+
+ return (error);
+}
+
+/*
+ * Look up the ifnet to use for a multicast group membership,
+ * given the address of an IPv6 group.
+ *
+ * This routine exists to support legacy IPv6 multicast applications.
+ *
+ * If inp is non-NULL, use this socket's current FIB number for any
+ * required FIB lookup. Look up the group address in the unicast FIB,
+ * and use its ifp; usually, this points to the default next-hop.
+ * If the FIB lookup fails, return NULL.
+ *
+ * FUTURE: Support multiple forwarding tables for IPv6.
+ *
+ * Returns NULL if no ifp could be found.
+ */
+static struct ifnet *
+in6p_lookup_mcast_ifp(const struct inpcb *in6p __unused,
+ const struct sockaddr_in6 *gsin6)
+{
+ struct route_in6 ro6;
+ struct ifnet *ifp;
+
+ KASSERT(in6p->inp_vflag & INP_IPV6,
+ ("%s: not INP_IPV6 inpcb", __func__));
+ KASSERT(gsin6->sin6_family == AF_INET6,
+ ("%s: not AF_INET6 group", __func__));
+ KASSERT(IN6_IS_ADDR_MULTICAST(&gsin6->sin6_addr),
+ ("%s: not multicast", __func__));
+
+ ifp = NULL;
+ memset(&ro6, 0, sizeof(struct route_in6));
+ memcpy(&ro6.ro_dst, gsin6, sizeof(struct sockaddr_in6));
+#ifdef notyet
+ rtalloc_ign_fib(&ro6, 0, inp ? inp->inp_inc.inc_fibnum : 0);
+#else
+ rtalloc_ign((struct route *)&ro6, 0);
+#endif
+ if (ro6.ro_rt != NULL) {
+ ifp = ro6.ro_rt->rt_ifp;
+ KASSERT(ifp != NULL, ("%s: null ifp", __func__));
+ RTFREE(ro6.ro_rt);
+ }
+
+ return (ifp);
+}
+
+/*
+ * Join an IPv6 multicast group, possibly with a source.
+ *
+ * FIXME: The KAME use of the unspecified address (::)
+ * to join *all* multicast groups is currently unsupported.
+ */
+static int
+in6p_join_group(struct inpcb *inp, struct sockopt *sopt)
+{
+ struct group_source_req gsr;
+ sockunion_t *gsa, *ssa;
+ struct ifnet *ifp;
+ struct in6_mfilter *imf;
+ struct ip6_moptions *imo;
+ struct in6_multi *inm;
+ struct in6_msource *lims;
+ size_t idx;
+ int error, is_new;
+
+ ifp = NULL;
+ imf = NULL;
+ lims = NULL;
+ error = 0;
+ is_new = 0;
+
+ memset(&gsr, 0, sizeof(struct group_source_req));
+ gsa = (sockunion_t *)&gsr.gsr_group;
+ gsa->ss.ss_family = AF_UNSPEC;
+ ssa = (sockunion_t *)&gsr.gsr_source;
+ ssa->ss.ss_family = AF_UNSPEC;
+
+ /*
+ * Chew everything into struct group_source_req.
+ * Overwrite the port field if present, as the sockaddr
+ * being copied in may be matched with a binary comparison.
+ * Ignore passed-in scope ID.
+ */
+ switch (sopt->sopt_name) {
+ case IPV6_JOIN_GROUP: {
+ struct ipv6_mreq mreq;
+
+ error = sooptcopyin(sopt, &mreq, sizeof(struct ipv6_mreq),
+ sizeof(struct ipv6_mreq));
+ if (error)
+ return (error);
+
+ gsa->sin6.sin6_family = AF_INET6;
+ gsa->sin6.sin6_len = sizeof(struct sockaddr_in6);
+ gsa->sin6.sin6_addr = mreq.ipv6mr_multiaddr;
+
+ if (mreq.ipv6mr_interface == 0) {
+ ifp = in6p_lookup_mcast_ifp(inp, &gsa->sin6);
+ } else {
+ if (mreq.ipv6mr_interface < 0 ||
+ V_if_index < mreq.ipv6mr_interface)
+ return (EADDRNOTAVAIL);
+ ifp = ifnet_byindex(mreq.ipv6mr_interface);
+ }
+ CTR3(KTR_MLD, "%s: ipv6mr_interface = %d, ifp = %p",
+ __func__, mreq.ipv6mr_interface, ifp);
+ } break;
+
+ case MCAST_JOIN_GROUP:
+ case MCAST_JOIN_SOURCE_GROUP:
+ if (sopt->sopt_name == MCAST_JOIN_GROUP) {
+ error = sooptcopyin(sopt, &gsr,
+ sizeof(struct group_req),
+ sizeof(struct group_req));
+ } else if (sopt->sopt_name == MCAST_JOIN_SOURCE_GROUP) {
+ error = sooptcopyin(sopt, &gsr,
+ sizeof(struct group_source_req),
+ sizeof(struct group_source_req));
+ }
+ if (error)
+ return (error);
+
+ if (gsa->sin6.sin6_family != AF_INET6 ||
+ gsa->sin6.sin6_len != sizeof(struct sockaddr_in6))
+ return (EINVAL);
+
+ if (sopt->sopt_name == MCAST_JOIN_SOURCE_GROUP) {
+ if (ssa->sin6.sin6_family != AF_INET6 ||
+ ssa->sin6.sin6_len != sizeof(struct sockaddr_in6))
+ return (EINVAL);
+ if (IN6_IS_ADDR_MULTICAST(&ssa->sin6.sin6_addr))
+ return (EINVAL);
+ /*
+ * TODO: Validate embedded scope ID in source
+ * list entry against passed-in ifp, if and only
+ * if source list filter entry is iface or node local.
+ */
+ in6_clearscope(&ssa->sin6.sin6_addr);
+ ssa->sin6.sin6_port = 0;
+ ssa->sin6.sin6_scope_id = 0;
+ }
+
+ if (gsr.gsr_interface == 0 || V_if_index < gsr.gsr_interface)
+ return (EADDRNOTAVAIL);
+ ifp = ifnet_byindex(gsr.gsr_interface);
+ break;
+
+ default:
+ CTR2(KTR_MLD, "%s: unknown sopt_name %d",
+ __func__, sopt->sopt_name);
+ return (EOPNOTSUPP);
+ break;
+ }
+
+ if (!IN6_IS_ADDR_MULTICAST(&gsa->sin6.sin6_addr))
+ return (EINVAL);
+
+ if (ifp == NULL || (ifp->if_flags & IFF_MULTICAST) == 0)
+ return (EADDRNOTAVAIL);
+
+ gsa->sin6.sin6_port = 0;
+ gsa->sin6.sin6_scope_id = 0;
+
+ /*
+ * Always set the scope zone ID on memberships created from userland.
+ * Use the passed-in ifp to do this.
+ * XXX The in6_setscope() return value is meaningless.
+ * XXX SCOPE6_LOCK() is taken by in6_setscope().
+ */
+ (void)in6_setscope(&gsa->sin6.sin6_addr, ifp, NULL);
+
+ imo = in6p_findmoptions(inp);
+ idx = im6o_match_group(imo, ifp, &gsa->sa);
+ if (idx == -1) {
+ is_new = 1;
+ } else {
+ inm = imo->im6o_membership[idx];
+ imf = &imo->im6o_mfilters[idx];
+ if (ssa->ss.ss_family != AF_UNSPEC) {
+ /*
+ * MCAST_JOIN_SOURCE_GROUP on an exclusive membership
+ * is an error. On an existing inclusive membership,
+ * it just adds the source to the filter list.
+ */
+ if (imf->im6f_st[1] != MCAST_INCLUDE) {
+ error = EINVAL;
+ goto out_in6p_locked;
+ }
+ /*
+ * Throw out duplicates.
+ *
+ * XXX FIXME: This makes a naive assumption that
+ * even if entries exist for *ssa in this imf,
+ * they will be rejected as dupes, even if they
+ * are not valid in the current mode (in-mode).
+ *
+ * in6_msource is transactioned just as for anything
+ * else in SSM -- but note naive use of in6m_graft()
+ * below for allocating new filter entries.
+ *
+ * This is only an issue if someone mixes the
+ * full-state SSM API with the delta-based API,
+ * which is discouraged in the relevant RFCs.
+ */
+ lims = im6o_match_source(imo, idx, &ssa->sa);
+ if (lims != NULL /*&&
+ lims->im6sl_st[1] == MCAST_INCLUDE*/) {
+ error = EADDRNOTAVAIL;
+ goto out_in6p_locked;
+ }
+ } else {
+ /*
+ * MCAST_JOIN_GROUP alone, on any existing membership,
+ * is rejected, to stop the same inpcb tying up
+ * multiple refs to the in_multi.
+ * On an existing inclusive membership, this is also
+ * an error; if you want to change filter mode,
+ * you must use the userland API setsourcefilter().
+ * XXX We don't reject this for imf in UNDEFINED
+ * state at t1, because allocation of a filter
+ * is atomic with allocation of a membership.
+ */
+ error = EINVAL;
+ goto out_in6p_locked;
+ }
+ }
+
+ /*
+ * Begin state merge transaction at socket layer.
+ */
+ INP_WLOCK_ASSERT(inp);
+
+ if (is_new) {
+ if (imo->im6o_num_memberships == imo->im6o_max_memberships) {
+ error = im6o_grow(imo);
+ if (error)
+ goto out_in6p_locked;
+ }
+ /*
+ * Allocate the new slot upfront so we can deal with
+ * grafting the new source filter in same code path
+ * as for join-source on existing membership.
+ */
+ idx = imo->im6o_num_memberships;
+ imo->im6o_membership[idx] = NULL;
+ imo->im6o_num_memberships++;
+ KASSERT(imo->im6o_mfilters != NULL,
+ ("%s: im6f_mfilters vector was not allocated", __func__));
+ imf = &imo->im6o_mfilters[idx];
+ KASSERT(RB_EMPTY(&imf->im6f_sources),
+ ("%s: im6f_sources not empty", __func__));
+ }
+
+ /*
+ * Graft new source into filter list for this inpcb's
+ * membership of the group. The in6_multi may not have
+ * been allocated yet if this is a new membership, however,
+ * the in_mfilter slot will be allocated and must be initialized.
+ *
+ * Note: Grafting of exclusive mode filters doesn't happen
+ * in this path.
+ * XXX: Should check for non-NULL lims (node exists but may
+ * not be in-mode) for interop with full-state API.
+ */
+ if (ssa->ss.ss_family != AF_UNSPEC) {
+ /* Membership starts in IN mode */
+ if (is_new) {
+ CTR1(KTR_MLD, "%s: new join w/source", __func__);
+ im6f_init(imf, MCAST_UNDEFINED, MCAST_INCLUDE);
+ } else {
+ CTR2(KTR_MLD, "%s: %s source", __func__, "allow");
+ }
+ lims = im6f_graft(imf, MCAST_INCLUDE, &ssa->sin6);
+ if (lims == NULL) {
+ CTR1(KTR_MLD, "%s: merge imf state failed",
+ __func__);
+ error = ENOMEM;
+ goto out_im6o_free;
+ }
+ } else {
+ /* No address specified; Membership starts in EX mode */
+ if (is_new) {
+ CTR1(KTR_MLD, "%s: new join w/o source", __func__);
+ im6f_init(imf, MCAST_UNDEFINED, MCAST_EXCLUDE);
+ }
+ }
+
+ /*
+ * Begin state merge transaction at MLD layer.
+ */
+ IN6_MULTI_LOCK();
+
+ if (is_new) {
+ error = in6_mc_join_locked(ifp, &gsa->sin6.sin6_addr, imf,
+ &inm, 0);
+ if (error)
+ goto out_im6o_free;
+ imo->im6o_membership[idx] = inm;
+ } else {
+ CTR1(KTR_MLD, "%s: merge inm state", __func__);
+ error = in6m_merge(inm, imf);
+ if (error) {
+ CTR1(KTR_MLD, "%s: failed to merge inm state",
+ __func__);
+ goto out_im6f_rollback;
+ }
+ CTR1(KTR_MLD, "%s: doing mld downcall", __func__);
+ error = mld_change_state(inm, 0);
+ if (error) {
+ CTR1(KTR_MLD, "%s: failed mld downcall",
+ __func__);
+ goto out_im6f_rollback;
+ }
+ }
+
+ IN6_MULTI_UNLOCK();
+
+out_im6f_rollback:
+ INP_WLOCK_ASSERT(inp);
+ if (error) {
+ im6f_rollback(imf);
+ if (is_new)
+ im6f_purge(imf);
+ else
+ im6f_reap(imf);
+ } else {
+ im6f_commit(imf);
+ }
+
+out_im6o_free:
+ if (error && is_new) {
+ imo->im6o_membership[idx] = NULL;
+ --imo->im6o_num_memberships;
+ }
+
+out_in6p_locked:
+ INP_WUNLOCK(inp);
+ return (error);
+}
+
+/*
+ * Leave an IPv6 multicast group on an inpcb, possibly with a source.
+ */
+static int
+in6p_leave_group(struct inpcb *inp, struct sockopt *sopt)
+{
+ struct ipv6_mreq mreq;
+ struct group_source_req gsr;
+ sockunion_t *gsa, *ssa;
+ struct ifnet *ifp;
+ struct in6_mfilter *imf;
+ struct ip6_moptions *imo;
+ struct in6_msource *ims;
+ struct in6_multi *inm;
+ uint32_t ifindex;
+ size_t idx;
+ int error, is_final;
+#ifdef KTR
+ char ip6tbuf[INET6_ADDRSTRLEN];
+#endif
+
+ ifp = NULL;
+ ifindex = 0;
+ error = 0;
+ is_final = 1;
+
+ memset(&gsr, 0, sizeof(struct group_source_req));
+ gsa = (sockunion_t *)&gsr.gsr_group;
+ gsa->ss.ss_family = AF_UNSPEC;
+ ssa = (sockunion_t *)&gsr.gsr_source;
+ ssa->ss.ss_family = AF_UNSPEC;
+
+ /*
+ * Chew everything passed in up into a struct group_source_req
+ * as that is easier to process.
+ * Note: Any embedded scope ID in the multicast group passed
+ * in by userland is ignored, the interface index is the recommended
+ * mechanism to specify an interface; see below.
+ */
+ switch (sopt->sopt_name) {
+ case IPV6_LEAVE_GROUP:
+ error = sooptcopyin(sopt, &mreq, sizeof(struct ipv6_mreq),
+ sizeof(struct ipv6_mreq));
+ if (error)
+ return (error);
+ gsa->sin6.sin6_family = AF_INET6;
+ gsa->sin6.sin6_len = sizeof(struct sockaddr_in6);
+ gsa->sin6.sin6_addr = mreq.ipv6mr_multiaddr;
+ gsa->sin6.sin6_port = 0;
+ gsa->sin6.sin6_scope_id = 0;
+ ifindex = mreq.ipv6mr_interface;
+ break;
+
+ case MCAST_LEAVE_GROUP:
+ case MCAST_LEAVE_SOURCE_GROUP:
+ if (sopt->sopt_name == MCAST_LEAVE_GROUP) {
+ error = sooptcopyin(sopt, &gsr,
+ sizeof(struct group_req),
+ sizeof(struct group_req));
+ } else if (sopt->sopt_name == MCAST_LEAVE_SOURCE_GROUP) {
+ error = sooptcopyin(sopt, &gsr,
+ sizeof(struct group_source_req),
+ sizeof(struct group_source_req));
+ }
+ if (error)
+ return (error);
+
+ if (gsa->sin6.sin6_family != AF_INET6 ||
+ gsa->sin6.sin6_len != sizeof(struct sockaddr_in6))
+ return (EINVAL);
+ if (sopt->sopt_name == MCAST_LEAVE_SOURCE_GROUP) {
+ if (ssa->sin6.sin6_family != AF_INET6 ||
+ ssa->sin6.sin6_len != sizeof(struct sockaddr_in6))
+ return (EINVAL);
+ if (IN6_IS_ADDR_MULTICAST(&ssa->sin6.sin6_addr))
+ return (EINVAL);
+ /*
+ * TODO: Validate embedded scope ID in source
+ * list entry against passed-in ifp, if and only
+ * if source list filter entry is iface or node local.
+ */
+ in6_clearscope(&ssa->sin6.sin6_addr);
+ }
+ gsa->sin6.sin6_port = 0;
+ gsa->sin6.sin6_scope_id = 0;
+ ifindex = gsr.gsr_interface;
+ break;
+
+ default:
+ CTR2(KTR_MLD, "%s: unknown sopt_name %d",
+ __func__, sopt->sopt_name);
+ return (EOPNOTSUPP);
+ break;
+ }
+
+ if (!IN6_IS_ADDR_MULTICAST(&gsa->sin6.sin6_addr))
+ return (EINVAL);
+
+ /*
+ * Validate interface index if provided. If no interface index
+ * was provided separately, attempt to look the membership up
+ * from the default scope as a last resort to disambiguate
+ * the membership we are being asked to leave.
+ * XXX SCOPE6 lock potentially taken here.
+ */
+ if (ifindex != 0) {
+ if (ifindex < 0 || V_if_index < ifindex)
+ return (EADDRNOTAVAIL);
+ ifp = ifnet_byindex(ifindex);
+ if (ifp == NULL)
+ return (EADDRNOTAVAIL);
+ (void)in6_setscope(&gsa->sin6.sin6_addr, ifp, NULL);
+ } else {
+ error = sa6_embedscope(&gsa->sin6, V_ip6_use_defzone);
+ if (error)
+ return (EADDRNOTAVAIL);
+ /*
+ * Some badly behaved applications don't pass an ifindex
+ * or a scope ID, which is an API violation. In this case,
+ * perform a lookup as per a v6 join.
+ *
+ * XXX For now, stomp on zone ID for the corner case.
+ * This is not the 'KAME way', but we need to see the ifp
+ * directly until such time as this implementation is
+ * refactored, assuming the scope IDs are the way to go.
+ */
+ ifindex = ntohs(gsa->sin6.sin6_addr.s6_addr16[1]);
+ if (ifindex == 0) {
+ CTR2(KTR_MLD, "%s: warning: no ifindex, looking up "
+ "ifp for group %s.", __func__,
+ ip6_sprintf(ip6tbuf, &gsa->sin6.sin6_addr));
+ ifp = in6p_lookup_mcast_ifp(inp, &gsa->sin6);
+ } else {
+ ifp = ifnet_byindex(ifindex);
+ }
+ if (ifp == NULL)
+ return (EADDRNOTAVAIL);
+ }
+
+ CTR2(KTR_MLD, "%s: ifp = %p", __func__, ifp);
+ KASSERT(ifp != NULL, ("%s: ifp did not resolve", __func__));
+
+ /*
+ * Find the membership in the membership array.
+ */
+ imo = in6p_findmoptions(inp);
+ idx = im6o_match_group(imo, ifp, &gsa->sa);
+ if (idx == -1) {
+ error = EADDRNOTAVAIL;
+ goto out_in6p_locked;
+ }
+ inm = imo->im6o_membership[idx];
+ imf = &imo->im6o_mfilters[idx];
+
+ if (ssa->ss.ss_family != AF_UNSPEC)
+ is_final = 0;
+
+ /*
+ * Begin state merge transaction at socket layer.
+ */
+ INP_WLOCK_ASSERT(inp);
+
+ /*
+ * If we were instructed only to leave a given source, do so.
+ * MCAST_LEAVE_SOURCE_GROUP is only valid for inclusive memberships.
+ */
+ if (is_final) {
+ im6f_leave(imf);
+ } else {
+ if (imf->im6f_st[0] == MCAST_EXCLUDE) {
+ error = EADDRNOTAVAIL;
+ goto out_in6p_locked;
+ }
+ ims = im6o_match_source(imo, idx, &ssa->sa);
+ if (ims == NULL) {
+ CTR3(KTR_MLD, "%s: source %p %spresent", __func__,
+ ip6_sprintf(ip6tbuf, &ssa->sin6.sin6_addr),
+ "not ");
+ error = EADDRNOTAVAIL;
+ goto out_in6p_locked;
+ }
+ CTR2(KTR_MLD, "%s: %s source", __func__, "block");
+ error = im6f_prune(imf, &ssa->sin6);
+ if (error) {
+ CTR1(KTR_MLD, "%s: merge imf state failed",
+ __func__);
+ goto out_in6p_locked;
+ }
+ }
+
+ /*
+ * Begin state merge transaction at MLD layer.
+ */
+ IN6_MULTI_LOCK();
+
+ if (is_final) {
+ /*
+ * Give up the multicast address record to which
+ * the membership points.
+ */
+ (void)in6_mc_leave_locked(inm, imf);
+ } else {
+ CTR1(KTR_MLD, "%s: merge inm state", __func__);
+ error = in6m_merge(inm, imf);
+ if (error) {
+ CTR1(KTR_MLD, "%s: failed to merge inm state",
+ __func__);
+ goto out_im6f_rollback;
+ }
+
+ CTR1(KTR_MLD, "%s: doing mld downcall", __func__);
+ error = mld_change_state(inm, 0);
+ if (error) {
+ CTR1(KTR_MLD, "%s: failed mld downcall",
+ __func__);
+ }
+ }
+
+ IN6_MULTI_UNLOCK();
+
+out_im6f_rollback:
+ if (error)
+ im6f_rollback(imf);
+ else
+ im6f_commit(imf);
+
+ im6f_reap(imf);
+
+ if (is_final) {
+ /* Remove the gap in the membership array. */
+ for (++idx; idx < imo->im6o_num_memberships; ++idx) {
+ imo->im6o_membership[idx-1] = imo->im6o_membership[idx];
+ imo->im6o_mfilters[idx-1] = imo->im6o_mfilters[idx];
+ }
+ imo->im6o_num_memberships--;
+ }
+
+out_in6p_locked:
+ INP_WUNLOCK(inp);
+ return (error);
+}
+
+/*
+ * Select the interface for transmitting IPv6 multicast datagrams.
+ *
+ * Either an instance of struct in6_addr or an instance of struct ipv6_mreqn
+ * may be passed to this socket option. An address of in6addr_any or an
+ * interface index of 0 is used to remove a previous selection.
+ * When no interface is selected, one is chosen for every send.
+ */
+static int
+in6p_set_multicast_if(struct inpcb *inp, struct sockopt *sopt)
+{
+ struct ifnet *ifp;
+ struct ip6_moptions *imo;
+ u_int ifindex;
+ int error;
+
+ if (sopt->sopt_valsize != sizeof(u_int))
+ return (EINVAL);
+
+ error = sooptcopyin(sopt, &ifindex, sizeof(u_int), sizeof(u_int));
+ if (error)
+ return (error);
+ if (ifindex < 0 || V_if_index < ifindex)
+ return (EINVAL);
+
+ ifp = ifnet_byindex(ifindex);
+ if (ifp == NULL || (ifp->if_flags & IFF_MULTICAST) == 0)
+ return (EADDRNOTAVAIL);
+
+ imo = in6p_findmoptions(inp);
+ imo->im6o_multicast_ifp = ifp;
+ INP_WUNLOCK(inp);
+
+ return (0);
+}
+
+/*
+ * Atomically set source filters on a socket for an IPv6 multicast group.
+ *
+ * SMPng: NOTE: Potentially calls malloc(M_WAITOK) with Giant held.
+ */
+static int
+in6p_set_source_filters(struct inpcb *inp, struct sockopt *sopt)
+{
+ struct __msfilterreq msfr;
+ sockunion_t *gsa;
+ struct ifnet *ifp;
+ struct in6_mfilter *imf;
+ struct ip6_moptions *imo;
+ struct in6_multi *inm;
+ size_t idx;
+ int error;
+
+ error = sooptcopyin(sopt, &msfr, sizeof(struct __msfilterreq),
+ sizeof(struct __msfilterreq));
+ if (error)
+ return (error);
+
+ if (msfr.msfr_nsrcs > in6_mcast_maxsocksrc)
+ return (ENOBUFS);
+
+ if (msfr.msfr_fmode != MCAST_EXCLUDE &&
+ msfr.msfr_fmode != MCAST_INCLUDE)
+ return (EINVAL);
+
+ if (msfr.msfr_group.ss_family != AF_INET6 ||
+ msfr.msfr_group.ss_len != sizeof(struct sockaddr_in6))
+ return (EINVAL);
+
+ gsa = (sockunion_t *)&msfr.msfr_group;
+ if (!IN6_IS_ADDR_MULTICAST(&gsa->sin6.sin6_addr))
+ return (EINVAL);
+
+ gsa->sin6.sin6_port = 0; /* ignore port */
+
+ if (msfr.msfr_ifindex == 0 || V_if_index < msfr.msfr_ifindex)
+ return (EADDRNOTAVAIL);
+ ifp = ifnet_byindex(msfr.msfr_ifindex);
+ if (ifp == NULL)
+ return (EADDRNOTAVAIL);
+ (void)in6_setscope(&gsa->sin6.sin6_addr, ifp, NULL);
+
+ /*
+ * Take the INP write lock.
+ * Check if this socket is a member of this group.
+ */
+ imo = in6p_findmoptions(inp);
+ idx = im6o_match_group(imo, ifp, &gsa->sa);
+ if (idx == -1 || imo->im6o_mfilters == NULL) {
+ error = EADDRNOTAVAIL;
+ goto out_in6p_locked;
+ }
+ inm = imo->im6o_membership[idx];
+ imf = &imo->im6o_mfilters[idx];
+
+ /*
+ * Begin state merge transaction at socket layer.
+ */
+ INP_WLOCK_ASSERT(inp);
+
+ imf->im6f_st[1] = msfr.msfr_fmode;
+
+ /*
+ * Apply any new source filters, if present.
+ * Make a copy of the user-space source vector so
+ * that we may copy them with a single copyin. This
+ * allows us to deal with page faults up-front.
+ */
+ if (msfr.msfr_nsrcs > 0) {
+ struct in6_msource *lims;
+ struct sockaddr_in6 *psin;
+ struct sockaddr_storage *kss, *pkss;
+ int i;
+
+ INP_WUNLOCK(inp);
+
+ CTR2(KTR_MLD, "%s: loading %lu source list entries",
+ __func__, (unsigned long)msfr.msfr_nsrcs);
+ kss = malloc(sizeof(struct sockaddr_storage) * msfr.msfr_nsrcs,
+ M_TEMP, M_WAITOK);
+ error = copyin(msfr.msfr_srcs, kss,
+ sizeof(struct sockaddr_storage) * msfr.msfr_nsrcs);
+ if (error) {
+ free(kss, M_TEMP);
+ return (error);
+ }
+
+ INP_WLOCK(inp);
+
+ /*
+ * Mark all source filters as UNDEFINED at t1.
+ * Restore new group filter mode, as im6f_leave()
+ * will set it to INCLUDE.
+ */
+ im6f_leave(imf);
+ imf->im6f_st[1] = msfr.msfr_fmode;
+
+ /*
+ * Update socket layer filters at t1, lazy-allocating
+ * new entries. This saves a bunch of memory at the
+ * cost of one RB_FIND() per source entry; duplicate
+ * entries in the msfr_nsrcs vector are ignored.
+ * If we encounter an error, rollback transaction.
+ *
+ * XXX This too could be replaced with a set-symmetric
+ * difference like loop to avoid walking from root
+ * every time, as the key space is common.
+ */
+ for (i = 0, pkss = kss; i < msfr.msfr_nsrcs; i++, pkss++) {
+ psin = (struct sockaddr_in6 *)pkss;
+ if (psin->sin6_family != AF_INET6) {
+ error = EAFNOSUPPORT;
+ break;
+ }
+ if (psin->sin6_len != sizeof(struct sockaddr_in6)) {
+ error = EINVAL;
+ break;
+ }
+ if (IN6_IS_ADDR_MULTICAST(&psin->sin6_addr)) {
+ error = EINVAL;
+ break;
+ }
+ /*
+ * TODO: Validate embedded scope ID in source
+ * list entry against passed-in ifp, if and only
+ * if source list filter entry is iface or node local.
+ */
+ in6_clearscope(&psin->sin6_addr);
+ error = im6f_get_source(imf, psin, &lims);
+ if (error)
+ break;
+ lims->im6sl_st[1] = imf->im6f_st[1];
+ }
+ free(kss, M_TEMP);
+ }
+
+ if (error)
+ goto out_im6f_rollback;
+
+ INP_WLOCK_ASSERT(inp);
+ IN6_MULTI_LOCK();
+
+ /*
+ * Begin state merge transaction at MLD layer.
+ */
+ CTR1(KTR_MLD, "%s: merge inm state", __func__);
+ error = in6m_merge(inm, imf);
+ if (error) {
+ CTR1(KTR_MLD, "%s: failed to merge inm state", __func__);
+ goto out_im6f_rollback;
+ }
+
+ CTR1(KTR_MLD, "%s: doing mld downcall", __func__);
+ error = mld_change_state(inm, 0);
+ if (error)
+ CTR1(KTR_MLD, "%s: failed mld downcall", __func__);
+
+ IN6_MULTI_UNLOCK();
+
+out_im6f_rollback:
+ if (error)
+ im6f_rollback(imf);
+ else
+ im6f_commit(imf);
+
+ im6f_reap(imf);
+
+out_in6p_locked:
+ INP_WUNLOCK(inp);
+ return (error);
+}
+
+/*
+ * Set the IP multicast options in response to user setsockopt().
+ *
+ * Many of the socket options handled in this function duplicate the
+ * functionality of socket options in the regular unicast API. However,
+ * it is not possible to merge the duplicate code, because the idempotence
+ * of the IPv6 multicast part of the BSD Sockets API must be preserved;
+ * the effects of these options must be treated as separate and distinct.
+ *
+ * SMPng: XXX: Unlocked read of inp_socket believed OK.
+ */
+int
+ip6_setmoptions(struct inpcb *inp, struct sockopt *sopt)
+{
+ struct ip6_moptions *im6o;
+ int error;
+
+ error = 0;
+
+ /*
+ * If socket is neither of type SOCK_RAW or SOCK_DGRAM,
+ * or is a divert socket, reject it.
+ */
+ if (inp->inp_socket->so_proto->pr_protocol == IPPROTO_DIVERT ||
+ (inp->inp_socket->so_proto->pr_type != SOCK_RAW &&
+ inp->inp_socket->so_proto->pr_type != SOCK_DGRAM))
+ return (EOPNOTSUPP);
+
+ switch (sopt->sopt_name) {
+ case IPV6_MULTICAST_IF:
+ error = in6p_set_multicast_if(inp, sopt);
+ break;
+
+ case IPV6_MULTICAST_HOPS: {
+ int hlim;
+
+ if (sopt->sopt_valsize != sizeof(int)) {
+ error = EINVAL;
+ break;
+ }
+ error = sooptcopyin(sopt, &hlim, sizeof(hlim), sizeof(int));
+ if (error)
+ break;
+ if (hlim < -1 || hlim > 255) {
+ error = EINVAL;
+ break;
+ } else if (hlim == -1) {
+ hlim = V_ip6_defmcasthlim;
+ }
+ im6o = in6p_findmoptions(inp);
+ im6o->im6o_multicast_hlim = hlim;
+ INP_WUNLOCK(inp);
+ break;
+ }
+
+ case IPV6_MULTICAST_LOOP: {
+ u_int loop;
+
+ /*
+ * Set the loopback flag for outgoing multicast packets.
+ * Must be zero or one.
+ */
+ if (sopt->sopt_valsize != sizeof(u_int)) {
+ error = EINVAL;
+ break;
+ }
+ error = sooptcopyin(sopt, &loop, sizeof(u_int), sizeof(u_int));
+ if (error)
+ break;
+ if (loop > 1) {
+ error = EINVAL;
+ break;
+ }
+ im6o = in6p_findmoptions(inp);
+ im6o->im6o_multicast_loop = loop;
+ INP_WUNLOCK(inp);
+ break;
+ }
+
+ case IPV6_JOIN_GROUP:
+ case MCAST_JOIN_GROUP:
+ case MCAST_JOIN_SOURCE_GROUP:
+ error = in6p_join_group(inp, sopt);
+ break;
+
+ case IPV6_LEAVE_GROUP:
+ case MCAST_LEAVE_GROUP:
+ case MCAST_LEAVE_SOURCE_GROUP:
+ error = in6p_leave_group(inp, sopt);
+ break;
+
+ case MCAST_BLOCK_SOURCE:
+ case MCAST_UNBLOCK_SOURCE:
+ error = in6p_block_unblock_source(inp, sopt);
+ break;
+
+ case IPV6_MSFILTER:
+ error = in6p_set_source_filters(inp, sopt);
+ break;
+
+ default:
+ error = EOPNOTSUPP;
+ break;
+ }
+
+ INP_UNLOCK_ASSERT(inp);
+
+ return (error);
+}
+
+/*
+ * Expose MLD's multicast filter mode and source list(s) to userland,
+ * keyed by (ifindex, group).
+ * The filter mode is written out as a uint32_t, followed by
+ * 0..n of struct in6_addr.
+ * For use by ifmcstat(8).
+ * SMPng: NOTE: unlocked read of ifindex space.
+ */
+static int
+sysctl_ip6_mcast_filters(SYSCTL_HANDLER_ARGS)
+{
+ struct in6_addr mcaddr;
+ struct in6_addr src;
+ struct ifnet *ifp;
+ struct ifmultiaddr *ifma;
+ struct in6_multi *inm;
+ struct ip6_msource *ims;
+ int *name;
+ int retval;
+ u_int namelen;
+ uint32_t fmode, ifindex;
+#ifdef KTR
+ char ip6tbuf[INET6_ADDRSTRLEN];
+#endif
+
+ name = (int *)arg1;
+ namelen = arg2;
+
+ if (req->newptr != NULL)
+ return (EPERM);
+
+ /* int: ifindex + 4 * 32 bits of IPv6 address */
+ if (namelen != 5)
+ return (EINVAL);
+
+ ifindex = name[0];
+ if (ifindex <= 0 || ifindex > V_if_index) {
+ CTR2(KTR_MLD, "%s: ifindex %u out of range",
+ __func__, ifindex);
+ return (ENOENT);
+ }
+
+ memcpy(&mcaddr, &name[1], sizeof(struct in6_addr));
+ if (!IN6_IS_ADDR_MULTICAST(&mcaddr)) {
+ CTR2(KTR_MLD, "%s: group %s is not multicast",
+ __func__, ip6_sprintf(ip6tbuf, &mcaddr));
+ return (EINVAL);
+ }
+
+ ifp = ifnet_byindex(ifindex);
+ if (ifp == NULL) {
+ CTR2(KTR_MLD, "%s: no ifp for ifindex %u",
+ __func__, ifindex);
+ return (ENOENT);
+ }
+ /*
+ * Internal MLD lookups require that scope/zone ID is set.
+ */
+ (void)in6_setscope(&mcaddr, ifp, NULL);
+
+ retval = sysctl_wire_old_buffer(req,
+ sizeof(uint32_t) + (in6_mcast_maxgrpsrc * sizeof(struct in6_addr)));
+ if (retval)
+ return (retval);
+
+ IN6_MULTI_LOCK();
+
+ IF_ADDR_LOCK(ifp);
+ TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
+ if (ifma->ifma_addr->sa_family != AF_INET6 ||
+ ifma->ifma_protospec == NULL)
+ continue;
+ inm = (struct in6_multi *)ifma->ifma_protospec;
+ if (!IN6_ARE_ADDR_EQUAL(&inm->in6m_addr, &mcaddr))
+ continue;
+ fmode = inm->in6m_st[1].iss_fmode;
+ retval = SYSCTL_OUT(req, &fmode, sizeof(uint32_t));
+ if (retval != 0)
+ break;
+ RB_FOREACH(ims, ip6_msource_tree, &inm->in6m_srcs) {
+ CTR2(KTR_MLD, "%s: visit node %p", __func__, ims);
+ /*
+ * Only copy-out sources which are in-mode.
+ */
+ if (fmode != im6s_get_mode(inm, ims, 1)) {
+ CTR1(KTR_MLD, "%s: skip non-in-mode",
+ __func__);
+ continue;
+ }
+ src = ims->im6s_addr;
+ retval = SYSCTL_OUT(req, &src,
+ sizeof(struct in6_addr));
+ if (retval != 0)
+ break;
+ }
+ }
+ IF_ADDR_UNLOCK(ifp);
+
+ IN6_MULTI_UNLOCK();
+
+ return (retval);
+}
+
+#ifdef KTR
+
+static const char *in6m_modestrs[] = { "un", "in", "ex" };
+
+static const char *
+in6m_mode_str(const int mode)
+{
+
+ if (mode >= MCAST_UNDEFINED && mode <= MCAST_EXCLUDE)
+ return (in6m_modestrs[mode]);
+ return ("??");
+}
+
+static const char *in6m_statestrs[] = {
+ "not-member",
+ "silent",
+ "idle",
+ "lazy",
+ "sleeping",
+ "awakening",
+ "query-pending",
+ "sg-query-pending",
+ "leaving"
+};
+
+static const char *
+in6m_state_str(const int state)
+{
+
+ if (state >= MLD_NOT_MEMBER && state <= MLD_LEAVING_MEMBER)
+ return (in6m_statestrs[state]);
+ return ("??");
+}
+
+/*
+ * Dump an in6_multi structure to the console.
+ */
+void
+in6m_print(const struct in6_multi *inm)
+{
+ int t;
+ char ip6tbuf[INET6_ADDRSTRLEN];
+
+ if ((ktr_mask & KTR_MLD) == 0)
+ return;
+
+ printf("%s: --- begin in6m %p ---\n", __func__, inm);
+ printf("addr %s ifp %p(%s) ifma %p\n",
+ ip6_sprintf(ip6tbuf, &inm->in6m_addr),
+ inm->in6m_ifp,
+ inm->in6m_ifp->if_xname,
+ inm->in6m_ifma);
+ printf("timer %u state %s refcount %u scq.len %u\n",
+ inm->in6m_timer,
+ in6m_state_str(inm->in6m_state),
+ inm->in6m_refcount,
+ inm->in6m_scq.ifq_len);
+ printf("mli %p nsrc %lu sctimer %u scrv %u\n",
+ inm->in6m_mli,
+ inm->in6m_nsrc,
+ inm->in6m_sctimer,
+ inm->in6m_scrv);
+ for (t = 0; t < 2; t++) {
+ printf("t%d: fmode %s asm %u ex %u in %u rec %u\n", t,
+ in6m_mode_str(inm->in6m_st[t].iss_fmode),
+ inm->in6m_st[t].iss_asm,
+ inm->in6m_st[t].iss_ex,
+ inm->in6m_st[t].iss_in,
+ inm->in6m_st[t].iss_rec);
+ }
+ printf("%s: --- end in6m %p ---\n", __func__, inm);
+}
+
+#else /* !KTR */
+
+void
+in6m_print(const struct in6_multi *inm)
+{
+
+}
+
+#endif /* KTR */
diff --git a/rtems/freebsd/netinet6/in6_pcb.c b/rtems/freebsd/netinet6/in6_pcb.c
new file mode 100644
index 00000000..a92f000b
--- /dev/null
+++ b/rtems/freebsd/netinet6/in6_pcb.c
@@ -0,0 +1,936 @@
+#include <rtems/freebsd/machine/rtems-bsd-config.h>
+
+/*-
+ * Copyright (C) 1995, 1996, 1997, and 1998 WIDE Project.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the project nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $KAME: in6_pcb.c,v 1.31 2001/05/21 05:45:10 jinmei Exp $
+ */
+
+/*-
+ * Copyright (c) 1982, 1986, 1991, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * @(#)in_pcb.c 8.2 (Berkeley) 1/4/94
+ */
+
+#include <rtems/freebsd/sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <rtems/freebsd/local/opt_inet.h>
+#include <rtems/freebsd/local/opt_inet6.h>
+#include <rtems/freebsd/local/opt_ipsec.h>
+
+#include <rtems/freebsd/sys/param.h>
+#include <rtems/freebsd/sys/systm.h>
+#include <rtems/freebsd/sys/malloc.h>
+#include <rtems/freebsd/sys/mbuf.h>
+#include <rtems/freebsd/sys/domain.h>
+#include <rtems/freebsd/sys/protosw.h>
+#include <rtems/freebsd/sys/socket.h>
+#include <rtems/freebsd/sys/socketvar.h>
+#include <rtems/freebsd/sys/sockio.h>
+#include <rtems/freebsd/sys/errno.h>
+#include <rtems/freebsd/sys/time.h>
+#include <rtems/freebsd/sys/priv.h>
+#include <rtems/freebsd/sys/proc.h>
+#include <rtems/freebsd/sys/jail.h>
+
+#include <rtems/freebsd/vm/uma.h>
+
+#include <rtems/freebsd/net/if.h>
+#include <rtems/freebsd/net/if_types.h>
+#include <rtems/freebsd/net/route.h>
+
+#include <rtems/freebsd/netinet/in.h>
+#include <rtems/freebsd/netinet/in_var.h>
+#include <rtems/freebsd/netinet/in_systm.h>
+#include <rtems/freebsd/netinet/tcp_var.h>
+#include <rtems/freebsd/netinet/ip6.h>
+#include <rtems/freebsd/netinet/ip_var.h>
+
+#include <rtems/freebsd/netinet6/ip6_var.h>
+#include <rtems/freebsd/netinet6/nd6.h>
+#include <rtems/freebsd/netinet/in_pcb.h>
+#include <rtems/freebsd/netinet6/in6_pcb.h>
+#include <rtems/freebsd/netinet6/scope6_var.h>
+
+#include <rtems/freebsd/security/mac/mac_framework.h>
+
+struct in6_addr zeroin6_addr;
+
+int
+in6_pcbbind(register struct inpcb *inp, struct sockaddr *nam,
+ struct ucred *cred)
+{
+ struct socket *so = inp->inp_socket;
+ struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)NULL;
+ struct inpcbinfo *pcbinfo = inp->inp_pcbinfo;
+ u_short lport = 0;
+ int error, wild = 0, reuseport = (so->so_options & SO_REUSEPORT);
+
+ INP_INFO_WLOCK_ASSERT(pcbinfo);
+ INP_WLOCK_ASSERT(inp);
+
+ if (TAILQ_EMPTY(&V_in6_ifaddrhead)) /* XXX broken! */
+ return (EADDRNOTAVAIL);
+ if (inp->inp_lport || !IN6_IS_ADDR_UNSPECIFIED(&inp->in6p_laddr))
+ return (EINVAL);
+ if ((so->so_options & (SO_REUSEADDR|SO_REUSEPORT)) == 0)
+ wild = INPLOOKUP_WILDCARD;
+ if (nam == NULL) {
+ if ((error = prison_local_ip6(cred, &inp->in6p_laddr,
+ ((inp->inp_flags & IN6P_IPV6_V6ONLY) != 0))) != 0)
+ return (error);
+ } else {
+ sin6 = (struct sockaddr_in6 *)nam;
+ if (nam->sa_len != sizeof(*sin6))
+ return (EINVAL);
+ /*
+ * family check.
+ */
+ if (nam->sa_family != AF_INET6)
+ return (EAFNOSUPPORT);
+
+ if ((error = sa6_embedscope(sin6, V_ip6_use_defzone)) != 0)
+ return(error);
+
+ if ((error = prison_local_ip6(cred, &sin6->sin6_addr,
+ ((inp->inp_flags & IN6P_IPV6_V6ONLY) != 0))) != 0)
+ return (error);
+
+ lport = sin6->sin6_port;
+ if (IN6_IS_ADDR_MULTICAST(&sin6->sin6_addr)) {
+ /*
+ * Treat SO_REUSEADDR as SO_REUSEPORT for multicast;
+ * allow compepte duplication of binding if
+ * SO_REUSEPORT is set, or if SO_REUSEADDR is set
+ * and a multicast address is bound on both
+ * new and duplicated sockets.
+ */
+ if (so->so_options & SO_REUSEADDR)
+ reuseport = SO_REUSEADDR|SO_REUSEPORT;
+ } else if (!IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) {
+ struct ifaddr *ifa;
+
+ sin6->sin6_port = 0; /* yech... */
+ if ((ifa = ifa_ifwithaddr((struct sockaddr *)sin6)) ==
+ NULL &&
+ (inp->inp_flags & INP_BINDANY) == 0) {
+ return (EADDRNOTAVAIL);
+ }
+
+ /*
+ * XXX: bind to an anycast address might accidentally
+ * cause sending a packet with anycast source address.
+ * We should allow to bind to a deprecated address, since
+ * the application dares to use it.
+ */
+ if (ifa != NULL &&
+ ((struct in6_ifaddr *)ifa)->ia6_flags &
+ (IN6_IFF_ANYCAST|IN6_IFF_NOTREADY|IN6_IFF_DETACHED)) {
+ ifa_free(ifa);
+ return (EADDRNOTAVAIL);
+ }
+ if (ifa != NULL)
+ ifa_free(ifa);
+ }
+ if (lport) {
+ struct inpcb *t;
+
+ /* GROSS */
+ if (ntohs(lport) <= V_ipport_reservedhigh &&
+ ntohs(lport) >= V_ipport_reservedlow &&
+ priv_check_cred(cred, PRIV_NETINET_RESERVEDPORT,
+ 0))
+ return (EACCES);
+ if (!IN6_IS_ADDR_MULTICAST(&sin6->sin6_addr) &&
+ priv_check_cred(inp->inp_cred,
+ PRIV_NETINET_REUSEPORT, 0) != 0) {
+ t = in6_pcblookup_local(pcbinfo,
+ &sin6->sin6_addr, lport,
+ INPLOOKUP_WILDCARD, cred);
+ if (t &&
+ ((t->inp_flags & INP_TIMEWAIT) == 0) &&
+ (so->so_type != SOCK_STREAM ||
+ IN6_IS_ADDR_UNSPECIFIED(&t->in6p_faddr)) &&
+ (!IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr) ||
+ !IN6_IS_ADDR_UNSPECIFIED(&t->in6p_laddr) ||
+ (t->inp_socket->so_options & SO_REUSEPORT)
+ == 0) && (inp->inp_cred->cr_uid !=
+ t->inp_cred->cr_uid))
+ return (EADDRINUSE);
+ if ((inp->inp_flags & IN6P_IPV6_V6ONLY) == 0 &&
+ IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) {
+ struct sockaddr_in sin;
+
+ in6_sin6_2_sin(&sin, sin6);
+ t = in_pcblookup_local(pcbinfo,
+ sin.sin_addr, lport,
+ INPLOOKUP_WILDCARD, cred);
+ if (t &&
+ ((t->inp_flags &
+ INP_TIMEWAIT) == 0) &&
+ (so->so_type != SOCK_STREAM ||
+ ntohl(t->inp_faddr.s_addr) ==
+ INADDR_ANY) &&
+ (inp->inp_cred->cr_uid !=
+ t->inp_cred->cr_uid))
+ return (EADDRINUSE);
+ }
+ }
+ t = in6_pcblookup_local(pcbinfo, &sin6->sin6_addr,
+ lport, wild, cred);
+ if (t && (reuseport & ((t->inp_flags & INP_TIMEWAIT) ?
+ intotw(t)->tw_so_options :
+ t->inp_socket->so_options)) == 0)
+ return (EADDRINUSE);
+ if ((inp->inp_flags & IN6P_IPV6_V6ONLY) == 0 &&
+ IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) {
+ struct sockaddr_in sin;
+
+ in6_sin6_2_sin(&sin, sin6);
+ t = in_pcblookup_local(pcbinfo, sin.sin_addr,
+ lport, wild, cred);
+ if (t && t->inp_flags & INP_TIMEWAIT) {
+ if ((reuseport &
+ intotw(t)->tw_so_options) == 0 &&
+ (ntohl(t->inp_laddr.s_addr) !=
+ INADDR_ANY || ((inp->inp_vflag &
+ INP_IPV6PROTO) ==
+ (t->inp_vflag & INP_IPV6PROTO))))
+ return (EADDRINUSE);
+ }
+ else if (t &&
+ (reuseport & t->inp_socket->so_options)
+ == 0 && (ntohl(t->inp_laddr.s_addr) !=
+ INADDR_ANY || INP_SOCKAF(so) ==
+ INP_SOCKAF(t->inp_socket)))
+ return (EADDRINUSE);
+ }
+ }
+ inp->in6p_laddr = sin6->sin6_addr;
+ }
+ if (lport == 0) {
+ if ((error = in6_pcbsetport(&inp->in6p_laddr, inp, cred)) != 0)
+ return (error);
+ } else {
+ inp->inp_lport = lport;
+ if (in_pcbinshash(inp) != 0) {
+ inp->in6p_laddr = in6addr_any;
+ inp->inp_lport = 0;
+ return (EAGAIN);
+ }
+ }
+ return (0);
+}
+
+/*
+ * Transform old in6_pcbconnect() into an inner subroutine for new
+ * in6_pcbconnect(): Do some validity-checking on the remote
+ * address (in mbuf 'nam') and then determine local host address
+ * (i.e., which interface) to use to access that remote host.
+ *
+ * This preserves definition of in6_pcbconnect(), while supporting a
+ * slightly different version for T/TCP. (This is more than
+ * a bit of a kludge, but cleaning up the internal interfaces would
+ * have forced minor changes in every protocol).
+ */
+int
+in6_pcbladdr(register struct inpcb *inp, struct sockaddr *nam,
+ struct in6_addr *plocal_addr6)
+{
+ register struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)nam;
+ int error = 0;
+ struct ifnet *ifp = NULL;
+ int scope_ambiguous = 0;
+ struct in6_addr in6a;
+
+ INP_INFO_WLOCK_ASSERT(inp->inp_pcbinfo);
+ INP_WLOCK_ASSERT(inp);
+
+ if (nam->sa_len != sizeof (*sin6))
+ return (EINVAL);
+ if (sin6->sin6_family != AF_INET6)
+ return (EAFNOSUPPORT);
+ if (sin6->sin6_port == 0)
+ return (EADDRNOTAVAIL);
+
+ if (sin6->sin6_scope_id == 0 && !V_ip6_use_defzone)
+ scope_ambiguous = 1;
+ if ((error = sa6_embedscope(sin6, V_ip6_use_defzone)) != 0)
+ return(error);
+
+ if (!TAILQ_EMPTY(&V_in6_ifaddrhead)) {
+ /*
+ * If the destination address is UNSPECIFIED addr,
+ * use the loopback addr, e.g ::1.
+ */
+ if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr))
+ sin6->sin6_addr = in6addr_loopback;
+ }
+ if ((error = prison_remote_ip6(inp->inp_cred, &sin6->sin6_addr)) != 0)
+ return (error);
+
+ error = in6_selectsrc(sin6, inp->in6p_outputopts,
+ inp, NULL, inp->inp_cred, &ifp, &in6a);
+ if (error)
+ return (error);
+
+ if (ifp && scope_ambiguous &&
+ (error = in6_setscope(&sin6->sin6_addr, ifp, NULL)) != 0) {
+ return(error);
+ }
+
+ /*
+ * Do not update this earlier, in case we return with an error.
+ *
+ * XXX: this in6_selectsrc result might replace the bound local
+ * address with the address specified by setsockopt(IPV6_PKTINFO).
+ * Is it the intended behavior?
+ */
+ *plocal_addr6 = in6a;
+
+ /*
+ * Don't do pcblookup call here; return interface in
+ * plocal_addr6
+ * and exit to caller, that will do the lookup.
+ */
+
+ return (0);
+}
+
+/*
+ * Outer subroutine:
+ * Connect from a socket to a specified address.
+ * Both address and port must be specified in argument sin.
+ * If don't have a local address for this socket yet,
+ * then pick one.
+ */
+int
+in6_pcbconnect(register struct inpcb *inp, struct sockaddr *nam,
+ struct ucred *cred)
+{
+ register struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)nam;
+ struct in6_addr addr6;
+ int error;
+
+ INP_INFO_WLOCK_ASSERT(inp->inp_pcbinfo);
+ INP_WLOCK_ASSERT(inp);
+
+ /*
+ * Call inner routine, to assign local interface address.
+ * in6_pcbladdr() may automatically fill in sin6_scope_id.
+ */
+ if ((error = in6_pcbladdr(inp, nam, &addr6)) != 0)
+ return (error);
+
+ if (in6_pcblookup_hash(inp->inp_pcbinfo, &sin6->sin6_addr,
+ sin6->sin6_port,
+ IN6_IS_ADDR_UNSPECIFIED(&inp->in6p_laddr)
+ ? &addr6 : &inp->in6p_laddr,
+ inp->inp_lport, 0, NULL) != NULL) {
+ return (EADDRINUSE);
+ }
+ if (IN6_IS_ADDR_UNSPECIFIED(&inp->in6p_laddr)) {
+ if (inp->inp_lport == 0) {
+ error = in6_pcbbind(inp, (struct sockaddr *)0, cred);
+ if (error)
+ return (error);
+ }
+ inp->in6p_laddr = addr6;
+ }
+ inp->in6p_faddr = sin6->sin6_addr;
+ inp->inp_fport = sin6->sin6_port;
+ /* update flowinfo - draft-itojun-ipv6-flowlabel-api-00 */
+ inp->inp_flow &= ~IPV6_FLOWLABEL_MASK;
+ if (inp->inp_flags & IN6P_AUTOFLOWLABEL)
+ inp->inp_flow |=
+ (htonl(ip6_randomflowlabel()) & IPV6_FLOWLABEL_MASK);
+
+ in_pcbrehash(inp);
+
+ return (0);
+}
+
+void
+in6_pcbdisconnect(struct inpcb *inp)
+{
+
+ INP_INFO_WLOCK_ASSERT(inp->inp_pcbinfo);
+ INP_WLOCK_ASSERT(inp);
+
+ bzero((caddr_t)&inp->in6p_faddr, sizeof(inp->in6p_faddr));
+ inp->inp_fport = 0;
+ /* clear flowinfo - draft-itojun-ipv6-flowlabel-api-00 */
+ inp->inp_flow &= ~IPV6_FLOWLABEL_MASK;
+ in_pcbrehash(inp);
+}
+
+struct sockaddr *
+in6_sockaddr(in_port_t port, struct in6_addr *addr_p)
+{
+ struct sockaddr_in6 *sin6;
+
+ sin6 = malloc(sizeof *sin6, M_SONAME, M_WAITOK);
+ bzero(sin6, sizeof *sin6);
+ sin6->sin6_family = AF_INET6;
+ sin6->sin6_len = sizeof(*sin6);
+ sin6->sin6_port = port;
+ sin6->sin6_addr = *addr_p;
+ (void)sa6_recoverscope(sin6); /* XXX: should catch errors */
+
+ return (struct sockaddr *)sin6;
+}
+
+struct sockaddr *
+in6_v4mapsin6_sockaddr(in_port_t port, struct in_addr *addr_p)
+{
+ struct sockaddr_in sin;
+ struct sockaddr_in6 *sin6_p;
+
+ bzero(&sin, sizeof sin);
+ sin.sin_family = AF_INET;
+ sin.sin_len = sizeof(sin);
+ sin.sin_port = port;
+ sin.sin_addr = *addr_p;
+
+ sin6_p = malloc(sizeof *sin6_p, M_SONAME,
+ M_WAITOK);
+ in6_sin_2_v4mapsin6(&sin, sin6_p);
+
+ return (struct sockaddr *)sin6_p;
+}
+
+int
+in6_getsockaddr(struct socket *so, struct sockaddr **nam)
+{
+ register struct inpcb *inp;
+ struct in6_addr addr;
+ in_port_t port;
+
+ inp = sotoinpcb(so);
+ KASSERT(inp != NULL, ("in6_getsockaddr: inp == NULL"));
+
+ INP_RLOCK(inp);
+ port = inp->inp_lport;
+ addr = inp->in6p_laddr;
+ INP_RUNLOCK(inp);
+
+ *nam = in6_sockaddr(port, &addr);
+ return 0;
+}
+
+int
+in6_getpeeraddr(struct socket *so, struct sockaddr **nam)
+{
+ struct inpcb *inp;
+ struct in6_addr addr;
+ in_port_t port;
+
+ inp = sotoinpcb(so);
+ KASSERT(inp != NULL, ("in6_getpeeraddr: inp == NULL"));
+
+ INP_RLOCK(inp);
+ port = inp->inp_fport;
+ addr = inp->in6p_faddr;
+ INP_RUNLOCK(inp);
+
+ *nam = in6_sockaddr(port, &addr);
+ return 0;
+}
+
+int
+in6_mapped_sockaddr(struct socket *so, struct sockaddr **nam)
+{
+ struct inpcb *inp;
+ int error;
+
+ inp = sotoinpcb(so);
+ KASSERT(inp != NULL, ("in6_mapped_sockaddr: inp == NULL"));
+
+ if ((inp->inp_vflag & (INP_IPV4 | INP_IPV6)) == INP_IPV4) {
+ error = in_getsockaddr(so, nam);
+ if (error == 0)
+ in6_sin_2_v4mapsin6_in_sock(nam);
+ } else {
+ /* scope issues will be handled in in6_getsockaddr(). */
+ error = in6_getsockaddr(so, nam);
+ }
+
+ return error;
+}
+
+int
+in6_mapped_peeraddr(struct socket *so, struct sockaddr **nam)
+{
+ struct inpcb *inp;
+ int error;
+
+ inp = sotoinpcb(so);
+ KASSERT(inp != NULL, ("in6_mapped_peeraddr: inp == NULL"));
+
+ if ((inp->inp_vflag & (INP_IPV4 | INP_IPV6)) == INP_IPV4) {
+ error = in_getpeeraddr(so, nam);
+ if (error == 0)
+ in6_sin_2_v4mapsin6_in_sock(nam);
+ } else
+ /* scope issues will be handled in in6_getpeeraddr(). */
+ error = in6_getpeeraddr(so, nam);
+
+ return error;
+}
+
+/*
+ * Pass some notification to all connections of a protocol
+ * associated with address dst. The local address and/or port numbers
+ * may be specified to limit the search. The "usual action" will be
+ * taken, depending on the ctlinput cmd. The caller must filter any
+ * cmds that are uninteresting (e.g., no error in the map).
+ * Call the protocol specific routine (if any) to report
+ * any errors for each matching socket.
+ */
+void
+in6_pcbnotify(struct inpcbinfo *pcbinfo, struct sockaddr *dst,
+ u_int fport_arg, const struct sockaddr *src, u_int lport_arg,
+ int cmd, void *cmdarg,
+ struct inpcb *(*notify)(struct inpcb *, int))
+{
+ struct inpcb *inp, *inp_temp;
+ struct sockaddr_in6 sa6_src, *sa6_dst;
+ u_short fport = fport_arg, lport = lport_arg;
+ u_int32_t flowinfo;
+ int errno;
+
+ if ((unsigned)cmd >= PRC_NCMDS || dst->sa_family != AF_INET6)
+ return;
+
+ sa6_dst = (struct sockaddr_in6 *)dst;
+ if (IN6_IS_ADDR_UNSPECIFIED(&sa6_dst->sin6_addr))
+ return;
+
+ /*
+ * note that src can be NULL when we get notify by local fragmentation.
+ */
+ sa6_src = (src == NULL) ? sa6_any : *(const struct sockaddr_in6 *)src;
+ flowinfo = sa6_src.sin6_flowinfo;
+
+ /*
+ * Redirects go to all references to the destination,
+ * and use in6_rtchange to invalidate the route cache.
+ * Dead host indications: also use in6_rtchange to invalidate
+ * the cache, and deliver the error to all the sockets.
+ * Otherwise, if we have knowledge of the local port and address,
+ * deliver only to that socket.
+ */
+ if (PRC_IS_REDIRECT(cmd) || cmd == PRC_HOSTDEAD) {
+ fport = 0;
+ lport = 0;
+ bzero((caddr_t)&sa6_src.sin6_addr, sizeof(sa6_src.sin6_addr));
+
+ if (cmd != PRC_HOSTDEAD)
+ notify = in6_rtchange;
+ }
+ errno = inet6ctlerrmap[cmd];
+ INP_INFO_WLOCK(pcbinfo);
+ LIST_FOREACH_SAFE(inp, pcbinfo->ipi_listhead, inp_list, inp_temp) {
+ INP_WLOCK(inp);
+ if ((inp->inp_vflag & INP_IPV6) == 0) {
+ INP_WUNLOCK(inp);
+ continue;
+ }
+
+ /*
+ * If the error designates a new path MTU for a destination
+ * and the application (associated with this socket) wanted to
+ * know the value, notify. Note that we notify for all
+ * disconnected sockets if the corresponding application
+ * wanted. This is because some UDP applications keep sending
+ * sockets disconnected.
+ * XXX: should we avoid to notify the value to TCP sockets?
+ */
+ if (cmd == PRC_MSGSIZE && (inp->inp_flags & IN6P_MTU) != 0 &&
+ (IN6_IS_ADDR_UNSPECIFIED(&inp->in6p_faddr) ||
+ IN6_ARE_ADDR_EQUAL(&inp->in6p_faddr, &sa6_dst->sin6_addr))) {
+ ip6_notify_pmtu(inp, (struct sockaddr_in6 *)dst,
+ (u_int32_t *)cmdarg);
+ }
+
+ /*
+ * Detect if we should notify the error. If no source and
+ * destination ports are specifed, but non-zero flowinfo and
+ * local address match, notify the error. This is the case
+ * when the error is delivered with an encrypted buffer
+ * by ESP. Otherwise, just compare addresses and ports
+ * as usual.
+ */
+ if (lport == 0 && fport == 0 && flowinfo &&
+ inp->inp_socket != NULL &&
+ flowinfo == (inp->inp_flow & IPV6_FLOWLABEL_MASK) &&
+ IN6_ARE_ADDR_EQUAL(&inp->in6p_laddr, &sa6_src.sin6_addr))
+ goto do_notify;
+ else if (!IN6_ARE_ADDR_EQUAL(&inp->in6p_faddr,
+ &sa6_dst->sin6_addr) ||
+ inp->inp_socket == 0 ||
+ (lport && inp->inp_lport != lport) ||
+ (!IN6_IS_ADDR_UNSPECIFIED(&sa6_src.sin6_addr) &&
+ !IN6_ARE_ADDR_EQUAL(&inp->in6p_laddr,
+ &sa6_src.sin6_addr)) ||
+ (fport && inp->inp_fport != fport)) {
+ INP_WUNLOCK(inp);
+ continue;
+ }
+
+ do_notify:
+ if (notify) {
+ if ((*notify)(inp, errno))
+ INP_WUNLOCK(inp);
+ } else
+ INP_WUNLOCK(inp);
+ }
+ INP_INFO_WUNLOCK(pcbinfo);
+}
+
+/*
+ * Lookup a PCB based on the local address and port.
+ */
+struct inpcb *
+in6_pcblookup_local(struct inpcbinfo *pcbinfo, struct in6_addr *laddr,
+ u_short lport, int wild_okay, struct ucred *cred)
+{
+ register struct inpcb *inp;
+ int matchwild = 3, wildcard;
+
+ INP_INFO_WLOCK_ASSERT(pcbinfo);
+
+ if (!wild_okay) {
+ struct inpcbhead *head;
+ /*
+ * Look for an unconnected (wildcard foreign addr) PCB that
+ * matches the local address and port we're looking for.
+ */
+ head = &pcbinfo->ipi_hashbase[INP_PCBHASH(INADDR_ANY, lport,
+ 0, pcbinfo->ipi_hashmask)];
+ LIST_FOREACH(inp, head, inp_hash) {
+ /* XXX inp locking */
+ if ((inp->inp_vflag & INP_IPV6) == 0)
+ continue;
+ if (IN6_IS_ADDR_UNSPECIFIED(&inp->in6p_faddr) &&
+ IN6_ARE_ADDR_EQUAL(&inp->in6p_laddr, laddr) &&
+ inp->inp_lport == lport) {
+ /* Found. */
+ if (cred == NULL ||
+ prison_equal_ip6(cred->cr_prison,
+ inp->inp_cred->cr_prison))
+ return (inp);
+ }
+ }
+ /*
+ * Not found.
+ */
+ return (NULL);
+ } else {
+ struct inpcbporthead *porthash;
+ struct inpcbport *phd;
+ struct inpcb *match = NULL;
+ /*
+ * Best fit PCB lookup.
+ *
+ * First see if this local port is in use by looking on the
+ * port hash list.
+ */
+ porthash = &pcbinfo->ipi_porthashbase[INP_PCBPORTHASH(lport,
+ pcbinfo->ipi_porthashmask)];
+ LIST_FOREACH(phd, porthash, phd_hash) {
+ if (phd->phd_port == lport)
+ break;
+ }
+ if (phd != NULL) {
+ /*
+ * Port is in use by one or more PCBs. Look for best
+ * fit.
+ */
+ LIST_FOREACH(inp, &phd->phd_pcblist, inp_portlist) {
+ wildcard = 0;
+ if (cred != NULL &&
+ !prison_equal_ip6(cred->cr_prison,
+ inp->inp_cred->cr_prison))
+ continue;
+ /* XXX inp locking */
+ if ((inp->inp_vflag & INP_IPV6) == 0)
+ continue;
+ if (!IN6_IS_ADDR_UNSPECIFIED(&inp->in6p_faddr))
+ wildcard++;
+ if (!IN6_IS_ADDR_UNSPECIFIED(
+ &inp->in6p_laddr)) {
+ if (IN6_IS_ADDR_UNSPECIFIED(laddr))
+ wildcard++;
+ else if (!IN6_ARE_ADDR_EQUAL(
+ &inp->in6p_laddr, laddr))
+ continue;
+ } else {
+ if (!IN6_IS_ADDR_UNSPECIFIED(laddr))
+ wildcard++;
+ }
+ if (wildcard < matchwild) {
+ match = inp;
+ matchwild = wildcard;
+ if (matchwild == 0)
+ break;
+ }
+ }
+ }
+ return (match);
+ }
+}
+
+void
+in6_pcbpurgeif0(struct inpcbinfo *pcbinfo, struct ifnet *ifp)
+{
+ struct inpcb *in6p;
+ struct ip6_moptions *im6o;
+ int i, gap;
+
+ INP_INFO_RLOCK(pcbinfo);
+ LIST_FOREACH(in6p, pcbinfo->ipi_listhead, inp_list) {
+ INP_WLOCK(in6p);
+ im6o = in6p->in6p_moptions;
+ if ((in6p->inp_vflag & INP_IPV6) && im6o != NULL) {
+ /*
+ * Unselect the outgoing ifp for multicast if it
+ * is being detached.
+ */
+ if (im6o->im6o_multicast_ifp == ifp)
+ im6o->im6o_multicast_ifp = NULL;
+ /*
+ * Drop multicast group membership if we joined
+ * through the interface being detached.
+ */
+ gap = 0;
+ for (i = 0; i < im6o->im6o_num_memberships; i++) {
+ if (im6o->im6o_membership[i]->in6m_ifp ==
+ ifp) {
+ in6_mc_leave(im6o->im6o_membership[i],
+ NULL);
+ gap++;
+ } else if (gap != 0) {
+ im6o->im6o_membership[i - gap] =
+ im6o->im6o_membership[i];
+ }
+ }
+ im6o->im6o_num_memberships -= gap;
+ }
+ INP_WUNLOCK(in6p);
+ }
+ INP_INFO_RUNLOCK(pcbinfo);
+}
+
+/*
+ * Check for alternatives when higher level complains
+ * about service problems. For now, invalidate cached
+ * routing information. If the route was created dynamically
+ * (by a redirect), time to try a default gateway again.
+ */
+void
+in6_losing(struct inpcb *in6p)
+{
+
+ /*
+ * We don't store route pointers in the routing table anymore
+ */
+ return;
+}
+
+/*
+ * After a routing change, flush old routing
+ * and allocate a (hopefully) better one.
+ */
+struct inpcb *
+in6_rtchange(struct inpcb *inp, int errno)
+{
+ /*
+ * We don't store route pointers in the routing table anymore
+ */
+ return inp;
+}
+
+/*
+ * Lookup PCB in hash list.
+ */
+struct inpcb *
+in6_pcblookup_hash(struct inpcbinfo *pcbinfo, struct in6_addr *faddr,
+ u_int fport_arg, struct in6_addr *laddr, u_int lport_arg, int wildcard,
+ struct ifnet *ifp)
+{
+ struct inpcbhead *head;
+ struct inpcb *inp, *tmpinp;
+ u_short fport = fport_arg, lport = lport_arg;
+ int faith;
+
+ INP_INFO_LOCK_ASSERT(pcbinfo);
+
+ if (faithprefix_p != NULL)
+ faith = (*faithprefix_p)(laddr);
+ else
+ faith = 0;
+
+ /*
+ * First look for an exact match.
+ */
+ tmpinp = NULL;
+ head = &pcbinfo->ipi_hashbase[
+ INP_PCBHASH(faddr->s6_addr32[3] /* XXX */, lport, fport,
+ pcbinfo->ipi_hashmask)];
+ LIST_FOREACH(inp, head, inp_hash) {
+ /* XXX inp locking */
+ if ((inp->inp_vflag & INP_IPV6) == 0)
+ continue;
+ if (IN6_ARE_ADDR_EQUAL(&inp->in6p_faddr, faddr) &&
+ IN6_ARE_ADDR_EQUAL(&inp->in6p_laddr, laddr) &&
+ inp->inp_fport == fport &&
+ inp->inp_lport == lport) {
+ /*
+ * XXX We should be able to directly return
+ * the inp here, without any checks.
+ * Well unless both bound with SO_REUSEPORT?
+ */
+ if (prison_flag(inp->inp_cred, PR_IP6))
+ return (inp);
+ if (tmpinp == NULL)
+ tmpinp = inp;
+ }
+ }
+ if (tmpinp != NULL)
+ return (tmpinp);
+
+ /*
+ * Then look for a wildcard match, if requested.
+ */
+ if (wildcard == INPLOOKUP_WILDCARD) {
+ struct inpcb *local_wild = NULL, *local_exact = NULL;
+ struct inpcb *jail_wild = NULL;
+ int injail;
+
+ /*
+ * Order of socket selection - we always prefer jails.
+ * 1. jailed, non-wild.
+ * 2. jailed, wild.
+ * 3. non-jailed, non-wild.
+ * 4. non-jailed, wild.
+ */
+ head = &pcbinfo->ipi_hashbase[INP_PCBHASH(INADDR_ANY, lport,
+ 0, pcbinfo->ipi_hashmask)];
+ LIST_FOREACH(inp, head, inp_hash) {
+ /* XXX inp locking */
+ if ((inp->inp_vflag & INP_IPV6) == 0)
+ continue;
+
+ if (!IN6_IS_ADDR_UNSPECIFIED(&inp->in6p_faddr) ||
+ inp->inp_lport != lport) {
+ continue;
+ }
+
+ /* XXX inp locking */
+ if (faith && (inp->inp_flags & INP_FAITH) == 0)
+ continue;
+
+ injail = prison_flag(inp->inp_cred, PR_IP6);
+ if (injail) {
+ if (prison_check_ip6(inp->inp_cred,
+ laddr) != 0)
+ continue;
+ } else {
+ if (local_exact != NULL)
+ continue;
+ }
+
+ if (IN6_ARE_ADDR_EQUAL(&inp->in6p_laddr, laddr)) {
+ if (injail)
+ return (inp);
+ else
+ local_exact = inp;
+ } else if (IN6_IS_ADDR_UNSPECIFIED(&inp->in6p_laddr)) {
+ if (injail)
+ jail_wild = inp;
+ else
+ local_wild = inp;
+ }
+ } /* LIST_FOREACH */
+
+ if (jail_wild != NULL)
+ return (jail_wild);
+ if (local_exact != NULL)
+ return (local_exact);
+ if (local_wild != NULL)
+ return (local_wild);
+ } /* if (wildcard == INPLOOKUP_WILDCARD) */
+
+ /*
+ * Not found.
+ */
+ return (NULL);
+}
+
+void
+init_sin6(struct sockaddr_in6 *sin6, struct mbuf *m)
+{
+ struct ip6_hdr *ip;
+
+ ip = mtod(m, struct ip6_hdr *);
+ bzero(sin6, sizeof(*sin6));
+ sin6->sin6_len = sizeof(*sin6);
+ sin6->sin6_family = AF_INET6;
+ sin6->sin6_addr = ip->ip6_src;
+
+ (void)sa6_recoverscope(sin6); /* XXX: should catch errors... */
+
+ return;
+}
diff --git a/rtems/freebsd/netinet6/in6_pcb.h b/rtems/freebsd/netinet6/in6_pcb.h
new file mode 100644
index 00000000..abc4a318
--- /dev/null
+++ b/rtems/freebsd/netinet6/in6_pcb.h
@@ -0,0 +1,109 @@
+/*-
+ * Copyright (C) 1995, 1996, 1997, and 1998 WIDE Project.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the project nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $KAME: in6_pcb.h,v 1.13 2001/02/06 09:16:53 itojun Exp $
+ */
+
+/*-
+ * Copyright (c) 1982, 1986, 1990, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * @(#)in_pcb.h 8.1 (Berkeley) 6/10/93
+ * $FreeBSD$
+ */
+
+#ifndef _NETINET6_IN6_PCB_HH_
+#define _NETINET6_IN6_PCB_HH_
+
+#ifdef _KERNEL
+#define satosin6(sa) ((struct sockaddr_in6 *)(sa))
+#define sin6tosa(sin6) ((struct sockaddr *)(sin6))
+#define ifatoia6(ifa) ((struct in6_ifaddr *)(ifa))
+
+void in6_pcbpurgeif0 __P((struct inpcbinfo *, struct ifnet *));
+void in6_losing __P((struct inpcb *));
+int in6_pcbbind __P((struct inpcb *, struct sockaddr *, struct ucred *));
+int in6_pcbconnect __P((struct inpcb *, struct sockaddr *, struct ucred *));
+void in6_pcbdisconnect __P((struct inpcb *));
+int in6_pcbladdr(struct inpcb *, struct sockaddr *, struct in6_addr *);
+struct inpcb *
+ in6_pcblookup_local __P((struct inpcbinfo *,
+ struct in6_addr *, u_short, int,
+ struct ucred *));
+struct inpcb *
+ in6_pcblookup_hash __P((struct inpcbinfo *,
+ struct in6_addr *, u_int, struct in6_addr *,
+ u_int, int, struct ifnet *));
+void in6_pcbnotify __P((struct inpcbinfo *, struct sockaddr *,
+ u_int, const struct sockaddr *, u_int, int, void *,
+ struct inpcb *(*)(struct inpcb *, int)));
+#ifndef __rtems__
+struct inpcb *
+ in6_rtchange __P((struct inpcb *, int));
+#else
+struct inpcb *
+ in6_rtchange(struct inpcb *inp, int errno);
+#endif
+struct sockaddr *
+ in6_sockaddr __P((in_port_t port, struct in6_addr *addr_p));
+struct sockaddr *
+ in6_v4mapsin6_sockaddr __P((in_port_t port, struct in_addr *addr_p));
+int in6_getpeeraddr __P((struct socket *so, struct sockaddr **nam));
+int in6_getsockaddr __P((struct socket *so, struct sockaddr **nam));
+int in6_mapped_sockaddr __P((struct socket *so, struct sockaddr **nam));
+int in6_mapped_peeraddr __P((struct socket *so, struct sockaddr **nam));
+int in6_selecthlim __P((struct in6pcb *, struct ifnet *));
+int in6_pcbsetport __P((struct in6_addr *, struct inpcb *, struct ucred *));
+void init_sin6 __P((struct sockaddr_in6 *sin6, struct mbuf *m));
+#endif /* _KERNEL */
+
+#endif /* !_NETINET6_IN6_PCB_HH_ */
diff --git a/rtems/freebsd/netinet6/in6_proto.c b/rtems/freebsd/netinet6/in6_proto.c
new file mode 100644
index 00000000..85e72c8c
--- /dev/null
+++ b/rtems/freebsd/netinet6/in6_proto.c
@@ -0,0 +1,597 @@
+#include <rtems/freebsd/machine/rtems-bsd-config.h>
+
+/*-
+ * Copyright (C) 1995, 1996, 1997, and 1998 WIDE Project.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the project nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $KAME: in6_proto.c,v 1.91 2001/05/27 13:28:35 itojun Exp $
+ */
+
+/*-
+ * Copyright (c) 1982, 1986, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * @(#)in_proto.c 8.1 (Berkeley) 6/10/93
+ */
+
+#include <rtems/freebsd/sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <rtems/freebsd/local/opt_inet.h>
+#include <rtems/freebsd/local/opt_inet6.h>
+#include <rtems/freebsd/local/opt_ipsec.h>
+#include <rtems/freebsd/local/opt_ipstealth.h>
+#include <rtems/freebsd/local/opt_sctp.h>
+#include <rtems/freebsd/local/opt_mpath.h>
+
+#include <rtems/freebsd/sys/param.h>
+#include <rtems/freebsd/sys/socket.h>
+#include <rtems/freebsd/sys/socketvar.h>
+#include <rtems/freebsd/sys/proc.h>
+#include <rtems/freebsd/sys/protosw.h>
+#include <rtems/freebsd/sys/jail.h>
+#include <rtems/freebsd/sys/kernel.h>
+#include <rtems/freebsd/sys/domain.h>
+#include <rtems/freebsd/sys/mbuf.h>
+#include <rtems/freebsd/sys/systm.h>
+#include <rtems/freebsd/sys/sysctl.h>
+
+#include <rtems/freebsd/net/if.h>
+#include <rtems/freebsd/net/radix.h>
+#include <rtems/freebsd/net/route.h>
+#ifdef RADIX_MPATH
+#include <rtems/freebsd/net/radix_mpath.h>
+#endif
+
+#include <rtems/freebsd/netinet/in.h>
+#include <rtems/freebsd/netinet/in_systm.h>
+#include <rtems/freebsd/netinet/in_var.h>
+#include <rtems/freebsd/netinet/ip_encap.h>
+#include <rtems/freebsd/netinet/ip.h>
+#include <rtems/freebsd/netinet/ip_var.h>
+#include <rtems/freebsd/netinet/ip6.h>
+#include <rtems/freebsd/netinet6/ip6_var.h>
+#include <rtems/freebsd/netinet/icmp6.h>
+
+#include <rtems/freebsd/netinet/tcp.h>
+#include <rtems/freebsd/netinet/tcp_timer.h>
+#include <rtems/freebsd/netinet/tcp_var.h>
+#include <rtems/freebsd/netinet/udp.h>
+#include <rtems/freebsd/netinet/udp_var.h>
+#include <rtems/freebsd/netinet6/tcp6_var.h>
+#include <rtems/freebsd/netinet6/raw_ip6.h>
+#include <rtems/freebsd/netinet6/udp6_var.h>
+#include <rtems/freebsd/netinet6/pim6_var.h>
+#include <rtems/freebsd/netinet6/nd6.h>
+
+#ifdef SCTP
+#include <rtems/freebsd/netinet/in_pcb.h>
+#include <rtems/freebsd/netinet/sctp_pcb.h>
+#include <rtems/freebsd/netinet/sctp.h>
+#include <rtems/freebsd/netinet/sctp_var.h>
+#include <rtems/freebsd/netinet6/sctp6_var.h>
+#endif /* SCTP */
+
+#ifdef IPSEC
+#include <rtems/freebsd/netipsec/ipsec.h>
+#include <rtems/freebsd/netipsec/ipsec6.h>
+#endif /* IPSEC */
+
+#include <rtems/freebsd/netinet6/ip6protosw.h>
+
+/*
+ * TCP/IP protocol family: IP6, ICMP6, UDP, TCP.
+ */
+
+extern struct domain inet6domain;
+static struct pr_usrreqs nousrreqs;
+
+#define PR_LISTEN 0
+#define PR_ABRTACPTDIS 0
+
+/* Spacer for loadable protocols. */
+#define IP6PROTOSPACER \
+{ \
+ .pr_domain = &inet6domain, \
+ .pr_protocol = PROTO_SPACER, \
+ .pr_usrreqs = &nousrreqs \
+}
+
+struct ip6protosw inet6sw[] = {
+{
+ .pr_type = 0,
+ .pr_domain = &inet6domain,
+ .pr_protocol = IPPROTO_IPV6,
+ .pr_init = ip6_init,
+#ifdef VIMAGE
+ .pr_destroy = ip6_destroy,
+#endif
+ .pr_slowtimo = frag6_slowtimo,
+ .pr_drain = frag6_drain,
+ .pr_usrreqs = &nousrreqs,
+},
+{
+ .pr_type = SOCK_DGRAM,
+ .pr_domain = &inet6domain,
+ .pr_protocol = IPPROTO_UDP,
+ .pr_flags = PR_ATOMIC|PR_ADDR,
+ .pr_input = udp6_input,
+ .pr_ctlinput = udp6_ctlinput,
+ .pr_ctloutput = ip6_ctloutput,
+ .pr_usrreqs = &udp6_usrreqs,
+},
+{
+ .pr_type = SOCK_STREAM,
+ .pr_domain = &inet6domain,
+ .pr_protocol = IPPROTO_TCP,
+ .pr_flags = PR_CONNREQUIRED|PR_WANTRCVD|PR_LISTEN,
+ .pr_input = tcp6_input,
+ .pr_ctlinput = tcp6_ctlinput,
+ .pr_ctloutput = tcp_ctloutput,
+#ifndef INET /* don't call initialization and timeout routines twice */
+ .pr_init = tcp_init,
+ .pr_slowtimo = tcp_slowtimo,
+#endif
+ .pr_drain = tcp_drain,
+ .pr_usrreqs = &tcp6_usrreqs,
+},
+#ifdef SCTP
+{
+ .pr_type = SOCK_DGRAM,
+ .pr_domain = &inet6domain,
+ .pr_protocol = IPPROTO_SCTP,
+ .pr_flags = PR_WANTRCVD,
+ .pr_input = sctp6_input,
+ .pr_ctlinput = sctp6_ctlinput,
+ .pr_ctloutput = sctp_ctloutput,
+ .pr_drain = sctp_drain,
+ .pr_usrreqs = &sctp6_usrreqs
+},
+{
+ .pr_type = SOCK_SEQPACKET,
+ .pr_domain = &inet6domain,
+ .pr_protocol = IPPROTO_SCTP,
+ .pr_flags = PR_WANTRCVD,
+ .pr_input = sctp6_input,
+ .pr_ctlinput = sctp6_ctlinput,
+ .pr_ctloutput = sctp_ctloutput,
+ .pr_drain = sctp_drain,
+ .pr_usrreqs = &sctp6_usrreqs
+},
+
+{
+ .pr_type = SOCK_STREAM,
+ .pr_domain = &inet6domain,
+ .pr_protocol = IPPROTO_SCTP,
+ .pr_flags = PR_WANTRCVD,
+ .pr_input = sctp6_input,
+ .pr_ctlinput = sctp6_ctlinput,
+ .pr_ctloutput = sctp_ctloutput,
+ .pr_drain = sctp_drain,
+ .pr_usrreqs = &sctp6_usrreqs
+},
+#endif /* SCTP */
+{
+ .pr_type = SOCK_RAW,
+ .pr_domain = &inet6domain,
+ .pr_protocol = IPPROTO_RAW,
+ .pr_flags = PR_ATOMIC|PR_ADDR,
+ .pr_input = rip6_input,
+ .pr_output = rip6_output,
+ .pr_ctlinput = rip6_ctlinput,
+ .pr_ctloutput = rip6_ctloutput,
+ .pr_usrreqs = &rip6_usrreqs
+},
+{
+ .pr_type = SOCK_RAW,
+ .pr_domain = &inet6domain,
+ .pr_protocol = IPPROTO_ICMPV6,
+ .pr_flags = PR_ATOMIC|PR_ADDR|PR_LASTHDR,
+ .pr_input = icmp6_input,
+ .pr_output = rip6_output,
+ .pr_ctlinput = rip6_ctlinput,
+ .pr_ctloutput = rip6_ctloutput,
+ .pr_fasttimo = icmp6_fasttimo,
+ .pr_slowtimo = icmp6_slowtimo,
+ .pr_usrreqs = &rip6_usrreqs
+},
+{
+ .pr_type = SOCK_RAW,
+ .pr_domain = &inet6domain,
+ .pr_protocol = IPPROTO_DSTOPTS,
+ .pr_flags = PR_ATOMIC|PR_ADDR,
+ .pr_input = dest6_input,
+ .pr_usrreqs = &nousrreqs
+},
+{
+ .pr_type = SOCK_RAW,
+ .pr_domain = &inet6domain,
+ .pr_protocol = IPPROTO_ROUTING,
+ .pr_flags = PR_ATOMIC|PR_ADDR,
+ .pr_input = route6_input,
+ .pr_usrreqs = &nousrreqs
+},
+{
+ .pr_type = SOCK_RAW,
+ .pr_domain = &inet6domain,
+ .pr_protocol = IPPROTO_FRAGMENT,
+ .pr_flags = PR_ATOMIC|PR_ADDR,
+ .pr_input = frag6_input,
+ .pr_usrreqs = &nousrreqs
+},
+#ifdef IPSEC
+{
+ .pr_type = SOCK_RAW,
+ .pr_domain = &inet6domain,
+ .pr_protocol = IPPROTO_AH,
+ .pr_flags = PR_ATOMIC|PR_ADDR,
+ .pr_input = ipsec6_common_input,
+ .pr_usrreqs = &nousrreqs,
+},
+{
+ .pr_type = SOCK_RAW,
+ .pr_domain = &inet6domain,
+ .pr_protocol = IPPROTO_ESP,
+ .pr_flags = PR_ATOMIC|PR_ADDR,
+ .pr_input = ipsec6_common_input,
+ .pr_ctlinput = esp6_ctlinput,
+ .pr_usrreqs = &nousrreqs,
+},
+{
+ .pr_type = SOCK_RAW,
+ .pr_domain = &inet6domain,
+ .pr_protocol = IPPROTO_IPCOMP,
+ .pr_flags = PR_ATOMIC|PR_ADDR,
+ .pr_input = ipsec6_common_input,
+ .pr_usrreqs = &nousrreqs,
+},
+#endif /* IPSEC */
+#ifdef INET
+{
+ .pr_type = SOCK_RAW,
+ .pr_domain = &inet6domain,
+ .pr_protocol = IPPROTO_IPV4,
+ .pr_flags = PR_ATOMIC|PR_ADDR|PR_LASTHDR,
+ .pr_input = encap6_input,
+ .pr_output = rip6_output,
+ .pr_ctloutput = rip6_ctloutput,
+ .pr_init = encap_init,
+ .pr_usrreqs = &rip6_usrreqs
+},
+#endif /* INET */
+{
+ .pr_type = SOCK_RAW,
+ .pr_domain = &inet6domain,
+ .pr_protocol = IPPROTO_IPV6,
+ .pr_flags = PR_ATOMIC|PR_ADDR|PR_LASTHDR,
+ .pr_input = encap6_input,
+ .pr_output = rip6_output,
+ .pr_ctloutput = rip6_ctloutput,
+ .pr_init = encap_init,
+ .pr_usrreqs = &rip6_usrreqs
+},
+{
+ .pr_type = SOCK_RAW,
+ .pr_domain = &inet6domain,
+ .pr_protocol = IPPROTO_PIM,
+ .pr_flags = PR_ATOMIC|PR_ADDR|PR_LASTHDR,
+ .pr_input = encap6_input,
+ .pr_output = rip6_output,
+ .pr_ctloutput = rip6_ctloutput,
+ .pr_usrreqs = &rip6_usrreqs
+},
+/* Spacer n-times for loadable protocols. */
+IP6PROTOSPACER,
+IP6PROTOSPACER,
+IP6PROTOSPACER,
+IP6PROTOSPACER,
+IP6PROTOSPACER,
+IP6PROTOSPACER,
+IP6PROTOSPACER,
+IP6PROTOSPACER,
+/* raw wildcard */
+{
+ .pr_type = SOCK_RAW,
+ .pr_domain = &inet6domain,
+ .pr_flags = PR_ATOMIC|PR_ADDR,
+ .pr_input = rip6_input,
+ .pr_output = rip6_output,
+ .pr_ctloutput = rip6_ctloutput,
+ .pr_usrreqs = &rip6_usrreqs
+},
+};
+
+extern int in6_inithead(void **, int);
+#ifdef VIMAGE
+extern int in6_detachhead(void **, int);
+#endif
+
+struct domain inet6domain = {
+ .dom_family = AF_INET6,
+ .dom_name = "internet6",
+ .dom_protosw = (struct protosw *)inet6sw,
+ .dom_protoswNPROTOSW = (struct protosw *)
+ &inet6sw[sizeof(inet6sw)/sizeof(inet6sw[0])],
+#ifdef RADIX_MPATH
+ .dom_rtattach = rn6_mpath_inithead,
+#else
+ .dom_rtattach = in6_inithead,
+#endif
+#ifdef VIMAGE
+ .dom_rtdetach = in6_detachhead,
+#endif
+ .dom_rtoffset = offsetof(struct sockaddr_in6, sin6_addr) << 3,
+ .dom_maxrtkey = sizeof(struct sockaddr_in6),
+ .dom_ifattach = in6_domifattach,
+ .dom_ifdetach = in6_domifdetach
+};
+
+VNET_DOMAIN_SET(inet6);
+
+/*
+ * Internet configuration info
+ */
+#ifndef IPV6FORWARDING
+#ifdef GATEWAY6
+#define IPV6FORWARDING 1 /* forward IP6 packets not for us */
+#else
+#define IPV6FORWARDING 0 /* don't forward IP6 packets not for us */
+#endif /* GATEWAY6 */
+#endif /* !IPV6FORWARDING */
+
+#ifndef IPV6_SENDREDIRECTS
+#define IPV6_SENDREDIRECTS 1
+#endif
+
+VNET_DEFINE(int, ip6_forwarding) = IPV6FORWARDING; /* act as router? */
+VNET_DEFINE(int, ip6_sendredirects) = IPV6_SENDREDIRECTS;
+VNET_DEFINE(int, ip6_defhlim) = IPV6_DEFHLIM;
+VNET_DEFINE(int, ip6_defmcasthlim) = IPV6_DEFAULT_MULTICAST_HOPS;
+VNET_DEFINE(int, ip6_accept_rtadv) = 0;
+VNET_DEFINE(int, ip6_maxfragpackets); /* initialized in frag6.c:frag6_init() */
+VNET_DEFINE(int, ip6_maxfrags); /* initialized in frag6.c:frag6_init() */
+VNET_DEFINE(int, ip6_log_interval) = 5;
+VNET_DEFINE(int, ip6_hdrnestlimit) = 15;/* How many header options will we
+ * process? */
+VNET_DEFINE(int, ip6_dad_count) = 1; /* DupAddrDetectionTransmits */
+VNET_DEFINE(int, ip6_auto_flowlabel) = 1;
+VNET_DEFINE(int, ip6_use_deprecated) = 1;/* allow deprecated addr
+ * (RFC2462 5.5.4) */
+VNET_DEFINE(int, ip6_rr_prune) = 5; /* router renumbering prefix
+ * walk list every 5 sec. */
+VNET_DEFINE(int, ip6_mcast_pmtu) = 0; /* enable pMTU discovery for multicast? */
+VNET_DEFINE(int, ip6_v6only) = 1;
+
+VNET_DEFINE(int, ip6_keepfaith) = 0;
+VNET_DEFINE(time_t, ip6_log_time) = (time_t)0L;
+#ifdef IPSTEALTH
+VNET_DEFINE(int, ip6stealth) = 0;
+#endif
+VNET_DEFINE(int, nd6_onlink_ns_rfc4861) = 0;/* allow 'on-link' nd6 NS
+ * (RFC 4861) */
+
+/* icmp6 */
+/*
+ * BSDI4 defines these variables in in_proto.c...
+ * XXX: what if we don't define INET? Should we define pmtu6_expire
+ * or so? (jinmei@kame.net 19990310)
+ */
+VNET_DEFINE(int, pmtu_expire) = 60*10;
+VNET_DEFINE(int, pmtu_probe) = 60*2;
+
+/* raw IP6 parameters */
+/*
+ * Nominal space allocated to a raw ip socket.
+ */
+#define RIPV6SNDQ 8192
+#define RIPV6RCVQ 8192
+
+VNET_DEFINE(u_long, rip6_sendspace) = RIPV6SNDQ;
+VNET_DEFINE(u_long, rip6_recvspace) = RIPV6RCVQ;
+
+/* ICMPV6 parameters */
+VNET_DEFINE(int, icmp6_rediraccept) = 1;/* accept and process redirects */
+VNET_DEFINE(int, icmp6_redirtimeout) = 10 * 60; /* 10 minutes */
+VNET_DEFINE(int, icmp6errppslim) = 100; /* 100pps */
+/* control how to respond to NI queries */
+VNET_DEFINE(int, icmp6_nodeinfo) =
+ (ICMP6_NODEINFO_FQDNOK|ICMP6_NODEINFO_NODEADDROK);
+
+/* UDP on IP6 parameters */
+VNET_DEFINE(int, udp6_sendspace) = 9216;/* really max datagram size */
+VNET_DEFINE(int, udp6_recvspace) = 40 * (1024 + sizeof(struct sockaddr_in6));
+ /* 40 1K datagrams */
+
+/*
+ * sysctl related items.
+ */
+SYSCTL_NODE(_net, PF_INET6, inet6, CTLFLAG_RW, 0,
+ "Internet6 Family");
+
+/* net.inet6 */
+SYSCTL_NODE(_net_inet6, IPPROTO_IPV6, ip6, CTLFLAG_RW, 0, "IP6");
+SYSCTL_NODE(_net_inet6, IPPROTO_ICMPV6, icmp6, CTLFLAG_RW, 0, "ICMP6");
+SYSCTL_NODE(_net_inet6, IPPROTO_UDP, udp6, CTLFLAG_RW, 0, "UDP6");
+SYSCTL_NODE(_net_inet6, IPPROTO_TCP, tcp6, CTLFLAG_RW, 0, "TCP6");
+#ifdef SCTP
+SYSCTL_NODE(_net_inet6, IPPROTO_SCTP, sctp6, CTLFLAG_RW, 0, "SCTP6");
+#endif
+#ifdef IPSEC
+SYSCTL_NODE(_net_inet6, IPPROTO_ESP, ipsec6, CTLFLAG_RW, 0, "IPSEC6");
+#endif /* IPSEC */
+
+/* net.inet6.ip6 */
+static int
+sysctl_ip6_temppltime(SYSCTL_HANDLER_ARGS)
+{
+ int error = 0;
+ int old;
+
+ VNET_SYSCTL_ARG(req, arg1);
+
+ error = SYSCTL_OUT(req, arg1, sizeof(int));
+ if (error || !req->newptr)
+ return (error);
+ old = V_ip6_temp_preferred_lifetime;
+ error = SYSCTL_IN(req, arg1, sizeof(int));
+ if (V_ip6_temp_preferred_lifetime <
+ V_ip6_desync_factor + V_ip6_temp_regen_advance) {
+ V_ip6_temp_preferred_lifetime = old;
+ return (EINVAL);
+ }
+ return (error);
+}
+
+static int
+sysctl_ip6_tempvltime(SYSCTL_HANDLER_ARGS)
+{
+ int error = 0;
+ int old;
+
+ VNET_SYSCTL_ARG(req, arg1);
+
+ error = SYSCTL_OUT(req, arg1, sizeof(int));
+ if (error || !req->newptr)
+ return (error);
+ old = V_ip6_temp_valid_lifetime;
+ error = SYSCTL_IN(req, arg1, sizeof(int));
+ if (V_ip6_temp_valid_lifetime < V_ip6_temp_preferred_lifetime) {
+ V_ip6_temp_preferred_lifetime = old;
+ return (EINVAL);
+ }
+ return (error);
+}
+
+SYSCTL_VNET_INT(_net_inet6_ip6, IPV6CTL_FORWARDING, forwarding, CTLFLAG_RW,
+ &VNET_NAME(ip6_forwarding), 0, "");
+SYSCTL_VNET_INT(_net_inet6_ip6, IPV6CTL_SENDREDIRECTS, redirect, CTLFLAG_RW,
+ &VNET_NAME(ip6_sendredirects), 0, "");
+SYSCTL_VNET_INT(_net_inet6_ip6, IPV6CTL_DEFHLIM, hlim, CTLFLAG_RW,
+ &VNET_NAME(ip6_defhlim), 0, "");
+SYSCTL_VNET_STRUCT(_net_inet6_ip6, IPV6CTL_STATS, stats, CTLFLAG_RD,
+ &VNET_NAME(ip6stat), ip6stat, "");
+SYSCTL_VNET_INT(_net_inet6_ip6, IPV6CTL_MAXFRAGPACKETS, maxfragpackets,
+ CTLFLAG_RW, &VNET_NAME(ip6_maxfragpackets), 0, "");
+SYSCTL_VNET_INT(_net_inet6_ip6, IPV6CTL_ACCEPT_RTADV, accept_rtadv,
+ CTLFLAG_RW, &VNET_NAME(ip6_accept_rtadv), 0, "");
+SYSCTL_VNET_INT(_net_inet6_ip6, IPV6CTL_KEEPFAITH, keepfaith, CTLFLAG_RW,
+ &VNET_NAME(ip6_keepfaith), 0, "");
+SYSCTL_VNET_INT(_net_inet6_ip6, IPV6CTL_LOG_INTERVAL, log_interval,
+ CTLFLAG_RW, &VNET_NAME(ip6_log_interval), 0, "");
+SYSCTL_VNET_INT(_net_inet6_ip6, IPV6CTL_HDRNESTLIMIT, hdrnestlimit,
+ CTLFLAG_RW, &VNET_NAME(ip6_hdrnestlimit), 0, "");
+SYSCTL_VNET_INT(_net_inet6_ip6, IPV6CTL_DAD_COUNT, dad_count, CTLFLAG_RW,
+ &VNET_NAME(ip6_dad_count), 0, "");
+SYSCTL_VNET_INT(_net_inet6_ip6, IPV6CTL_AUTO_FLOWLABEL, auto_flowlabel,
+ CTLFLAG_RW, &VNET_NAME(ip6_auto_flowlabel), 0, "");
+SYSCTL_VNET_INT(_net_inet6_ip6, IPV6CTL_DEFMCASTHLIM, defmcasthlim,
+ CTLFLAG_RW, &VNET_NAME(ip6_defmcasthlim), 0, "");
+SYSCTL_STRING(_net_inet6_ip6, IPV6CTL_KAME_VERSION, kame_version,
+ CTLFLAG_RD, __KAME_VERSION, 0, "");
+SYSCTL_VNET_INT(_net_inet6_ip6, IPV6CTL_USE_DEPRECATED, use_deprecated,
+ CTLFLAG_RW, &VNET_NAME(ip6_use_deprecated), 0, "");
+SYSCTL_VNET_INT(_net_inet6_ip6, IPV6CTL_RR_PRUNE, rr_prune, CTLFLAG_RW,
+ &VNET_NAME(ip6_rr_prune), 0, "");
+SYSCTL_VNET_INT(_net_inet6_ip6, IPV6CTL_USETEMPADDR, use_tempaddr,
+ CTLFLAG_RW, &VNET_NAME(ip6_use_tempaddr), 0, "");
+SYSCTL_VNET_PROC(_net_inet6_ip6, IPV6CTL_TEMPPLTIME, temppltime,
+ CTLTYPE_INT|CTLFLAG_RW, &VNET_NAME(ip6_temp_preferred_lifetime), 0,
+ sysctl_ip6_temppltime, "I", "");
+SYSCTL_VNET_PROC(_net_inet6_ip6, IPV6CTL_TEMPVLTIME, tempvltime,
+ CTLTYPE_INT|CTLFLAG_RW, &VNET_NAME(ip6_temp_valid_lifetime), 0,
+ sysctl_ip6_tempvltime, "I", "");
+SYSCTL_VNET_INT(_net_inet6_ip6, IPV6CTL_V6ONLY, v6only, CTLFLAG_RW,
+ &VNET_NAME(ip6_v6only), 0, "");
+SYSCTL_VNET_INT(_net_inet6_ip6, IPV6CTL_AUTO_LINKLOCAL, auto_linklocal,
+ CTLFLAG_RW, &VNET_NAME(ip6_auto_linklocal), 0, "");
+SYSCTL_VNET_STRUCT(_net_inet6_ip6, IPV6CTL_RIP6STATS, rip6stats, CTLFLAG_RD,
+ &VNET_NAME(rip6stat), rip6stat, "");
+SYSCTL_VNET_INT(_net_inet6_ip6, IPV6CTL_PREFER_TEMPADDR, prefer_tempaddr,
+ CTLFLAG_RW, &VNET_NAME(ip6_prefer_tempaddr), 0, "");
+SYSCTL_VNET_INT(_net_inet6_ip6, IPV6CTL_USE_DEFAULTZONE, use_defaultzone,
+ CTLFLAG_RW, &VNET_NAME(ip6_use_defzone), 0,"");
+SYSCTL_VNET_INT(_net_inet6_ip6, IPV6CTL_MAXFRAGS, maxfrags, CTLFLAG_RW,
+ &VNET_NAME(ip6_maxfrags), 0, "");
+SYSCTL_VNET_INT(_net_inet6_ip6, IPV6CTL_MCAST_PMTU, mcast_pmtu, CTLFLAG_RW,
+ &VNET_NAME(ip6_mcast_pmtu), 0, "");
+#ifdef IPSTEALTH
+SYSCTL_VNET_INT(_net_inet6_ip6, IPV6CTL_STEALTH, stealth, CTLFLAG_RW,
+ &VNET_NAME(ip6stealth), 0, "");
+#endif
+
+/* net.inet6.icmp6 */
+SYSCTL_VNET_INT(_net_inet6_icmp6, ICMPV6CTL_REDIRACCEPT, rediraccept,
+ CTLFLAG_RW, &VNET_NAME(icmp6_rediraccept), 0, "");
+SYSCTL_VNET_INT(_net_inet6_icmp6, ICMPV6CTL_REDIRTIMEOUT, redirtimeout,
+ CTLFLAG_RW, &VNET_NAME(icmp6_redirtimeout), 0, "");
+SYSCTL_VNET_STRUCT(_net_inet6_icmp6, ICMPV6CTL_STATS, stats, CTLFLAG_RD,
+ &VNET_NAME(icmp6stat), icmp6stat, "");
+SYSCTL_VNET_INT(_net_inet6_icmp6, ICMPV6CTL_ND6_PRUNE, nd6_prune, CTLFLAG_RW,
+ &VNET_NAME(nd6_prune), 0, "");
+SYSCTL_VNET_INT(_net_inet6_icmp6, ICMPV6CTL_ND6_DELAY, nd6_delay, CTLFLAG_RW,
+ &VNET_NAME(nd6_delay), 0, "");
+SYSCTL_VNET_INT(_net_inet6_icmp6, ICMPV6CTL_ND6_UMAXTRIES, nd6_umaxtries,
+ CTLFLAG_RW, &VNET_NAME(nd6_umaxtries), 0, "");
+SYSCTL_VNET_INT(_net_inet6_icmp6, ICMPV6CTL_ND6_MMAXTRIES, nd6_mmaxtries,
+ CTLFLAG_RW, &VNET_NAME(nd6_mmaxtries), 0, "");
+SYSCTL_VNET_INT(_net_inet6_icmp6, ICMPV6CTL_ND6_USELOOPBACK, nd6_useloopback,
+ CTLFLAG_RW, &VNET_NAME(nd6_useloopback), 0, "");
+SYSCTL_VNET_INT(_net_inet6_icmp6, ICMPV6CTL_NODEINFO, nodeinfo, CTLFLAG_RW,
+ &VNET_NAME(icmp6_nodeinfo), 0, "");
+SYSCTL_VNET_INT(_net_inet6_icmp6, ICMPV6CTL_ERRPPSLIMIT, errppslimit,
+ CTLFLAG_RW, &VNET_NAME(icmp6errppslim), 0, "");
+SYSCTL_VNET_INT(_net_inet6_icmp6, ICMPV6CTL_ND6_MAXNUDHINT, nd6_maxnudhint,
+ CTLFLAG_RW, &VNET_NAME(nd6_maxnudhint), 0, "");
+SYSCTL_VNET_INT(_net_inet6_icmp6, ICMPV6CTL_ND6_DEBUG, nd6_debug, CTLFLAG_RW,
+ &VNET_NAME(nd6_debug), 0, "");
+SYSCTL_VNET_INT(_net_inet6_icmp6, ICMPV6CTL_ND6_ONLINKNSRFC4861,
+ nd6_onlink_ns_rfc4861, CTLFLAG_RW, &VNET_NAME(nd6_onlink_ns_rfc4861),
+ 0, "Accept 'on-link' nd6 NS in compliance with RFC 4861.");
diff --git a/rtems/freebsd/netinet6/in6_rmx.c b/rtems/freebsd/netinet6/in6_rmx.c
new file mode 100644
index 00000000..17e73501
--- /dev/null
+++ b/rtems/freebsd/netinet6/in6_rmx.c
@@ -0,0 +1,449 @@
+#include <rtems/freebsd/machine/rtems-bsd-config.h>
+
+/*-
+ * Copyright (C) 1995, 1996, 1997, and 1998 WIDE Project.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the project nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $KAME: in6_rmx.c,v 1.11 2001/07/26 06:53:16 jinmei Exp $
+ */
+
+/*-
+ * Copyright 1994, 1995 Massachusetts Institute of Technology
+ *
+ * Permission to use, copy, modify, and distribute this software and
+ * its documentation for any purpose and without fee is hereby
+ * granted, provided that both the above copyright notice and this
+ * permission notice appear in all copies, that both the above
+ * copyright notice and this permission notice appear in all
+ * supporting documentation, and that the name of M.I.T. not be used
+ * in advertising or publicity pertaining to distribution of the
+ * software without specific, written prior permission. M.I.T. makes
+ * no representations about the suitability of this software for any
+ * purpose. It is provided "as is" without express or implied
+ * warranty.
+ *
+ * THIS SOFTWARE IS PROVIDED BY M.I.T. ``AS IS''. M.I.T. DISCLAIMS
+ * ALL EXPRESS OR IMPLIED WARRANTIES WITH REGARD TO THIS SOFTWARE,
+ * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. IN NO EVENT
+ * SHALL M.I.T. BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
+ * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+ * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
+ * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ */
+
+/*
+ * This code does two things necessary for the enhanced TCP metrics to
+ * function in a useful manner:
+ * 1) It marks all non-host routes as `cloning', thus ensuring that
+ * every actual reference to such a route actually gets turned
+ * into a reference to a host route to the specific destination
+ * requested.
+ * 2) When such routes lose all their references, it arranges for them
+ * to be deleted in some random collection of circumstances, so that
+ * a large quantity of stale routing data is not kept in kernel memory
+ * indefinitely. See in6_rtqtimo() below for the exact mechanism.
+ */
+
+#include <rtems/freebsd/sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <rtems/freebsd/sys/param.h>
+#include <rtems/freebsd/sys/systm.h>
+#include <rtems/freebsd/sys/kernel.h>
+#include <rtems/freebsd/sys/lock.h>
+#include <rtems/freebsd/sys/sysctl.h>
+#include <rtems/freebsd/sys/queue.h>
+#include <rtems/freebsd/sys/socket.h>
+#include <rtems/freebsd/sys/socketvar.h>
+#include <rtems/freebsd/sys/mbuf.h>
+#include <rtems/freebsd/sys/rwlock.h>
+#include <rtems/freebsd/sys/syslog.h>
+#include <rtems/freebsd/sys/callout.h>
+
+#include <rtems/freebsd/net/if.h>
+#include <rtems/freebsd/net/route.h>
+
+#include <rtems/freebsd/netinet/in.h>
+#include <rtems/freebsd/netinet/ip_var.h>
+#include <rtems/freebsd/netinet/in_var.h>
+
+#include <rtems/freebsd/netinet/ip6.h>
+#include <rtems/freebsd/netinet6/ip6_var.h>
+
+#include <rtems/freebsd/netinet/icmp6.h>
+#include <rtems/freebsd/netinet6/nd6.h>
+
+#include <rtems/freebsd/netinet/tcp.h>
+#include <rtems/freebsd/netinet/tcp_seq.h>
+#include <rtems/freebsd/netinet/tcp_timer.h>
+#include <rtems/freebsd/netinet/tcp_var.h>
+
+extern int in6_inithead(void **head, int off);
+#ifdef VIMAGE
+extern int in6_detachhead(void **head, int off);
+#endif
+
+#define RTPRF_OURS RTF_PROTO3 /* set on routes we manage */
+
+/*
+ * Do what we need to do when inserting a route.
+ */
+static struct radix_node *
+in6_addroute(void *v_arg, void *n_arg, struct radix_node_head *head,
+ struct radix_node *treenodes)
+{
+ struct rtentry *rt = (struct rtentry *)treenodes;
+ struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)rt_key(rt);
+ struct radix_node *ret;
+
+ RADIX_NODE_HEAD_WLOCK_ASSERT(head);
+ if (IN6_IS_ADDR_MULTICAST(&sin6->sin6_addr))
+ rt->rt_flags |= RTF_MULTICAST;
+
+ /*
+ * A little bit of help for both IPv6 output and input:
+ * For local addresses, we make sure that RTF_LOCAL is set,
+ * with the thought that this might one day be used to speed up
+ * ip_input().
+ *
+ * We also mark routes to multicast addresses as such, because
+ * it's easy to do and might be useful (but this is much more
+ * dubious since it's so easy to inspect the address). (This
+ * is done above.)
+ *
+ * XXX
+ * should elaborate the code.
+ */
+ if (rt->rt_flags & RTF_HOST) {
+ if (IN6_ARE_ADDR_EQUAL(&satosin6(rt->rt_ifa->ifa_addr)
+ ->sin6_addr,
+ &sin6->sin6_addr)) {
+ rt->rt_flags |= RTF_LOCAL;
+ }
+ }
+
+ if (!rt->rt_rmx.rmx_mtu && rt->rt_ifp)
+ rt->rt_rmx.rmx_mtu = IN6_LINKMTU(rt->rt_ifp);
+
+ ret = rn_addroute(v_arg, n_arg, head, treenodes);
+ if (ret == NULL) {
+ struct rtentry *rt2;
+ /*
+ * We are trying to add a net route, but can't.
+ * The following case should be allowed, so we'll make a
+ * special check for this:
+ * Two IPv6 addresses with the same prefix is assigned
+ * to a single interrface.
+ * # ifconfig if0 inet6 3ffe:0501::1 prefix 64 alias (*1)
+ * # ifconfig if0 inet6 3ffe:0501::2 prefix 64 alias (*2)
+ * In this case, (*1) and (*2) want to add the same
+ * net route entry, 3ffe:0501:: -> if0.
+ * This case should not raise an error.
+ */
+ rt2 = rtalloc1((struct sockaddr *)sin6, 0, RTF_RNH_LOCKED);
+ if (rt2) {
+ if (((rt2->rt_flags & (RTF_HOST|RTF_GATEWAY)) == 0)
+ && rt2->rt_gateway
+ && rt2->rt_gateway->sa_family == AF_LINK
+ && rt2->rt_ifp == rt->rt_ifp) {
+ ret = rt2->rt_nodes;
+ }
+ RTFREE_LOCKED(rt2);
+ }
+ }
+ return (ret);
+}
+
+/*
+ * This code is the inverse of in6_clsroute: on first reference, if we
+ * were managing the route, stop doing so and set the expiration timer
+ * back off again.
+ */
+static struct radix_node *
+in6_matroute(void *v_arg, struct radix_node_head *head)
+{
+ struct radix_node *rn = rn_match(v_arg, head);
+ struct rtentry *rt = (struct rtentry *)rn;
+
+ if (rt) {
+ RT_LOCK(rt);
+ if (rt->rt_flags & RTPRF_OURS) {
+ rt->rt_flags &= ~RTPRF_OURS;
+ rt->rt_rmx.rmx_expire = 0;
+ }
+ RT_UNLOCK(rt);
+ }
+ return rn;
+}
+
+SYSCTL_DECL(_net_inet6_ip6);
+
+static VNET_DEFINE(int, rtq_reallyold6) = 60*60;
+ /* one hour is ``really old'' */
+#define V_rtq_reallyold6 VNET(rtq_reallyold6)
+SYSCTL_VNET_INT(_net_inet6_ip6, IPV6CTL_RTEXPIRE, rtexpire, CTLFLAG_RW,
+ &VNET_NAME(rtq_reallyold6) , 0, "");
+
+static VNET_DEFINE(int, rtq_minreallyold6) = 10;
+ /* never automatically crank down to less */
+#define V_rtq_minreallyold6 VNET(rtq_minreallyold6)
+SYSCTL_VNET_INT(_net_inet6_ip6, IPV6CTL_RTMINEXPIRE, rtminexpire, CTLFLAG_RW,
+ &VNET_NAME(rtq_minreallyold6) , 0, "");
+
+static VNET_DEFINE(int, rtq_toomany6) = 128;
+ /* 128 cached routes is ``too many'' */
+#define V_rtq_toomany6 VNET(rtq_toomany6)
+SYSCTL_VNET_INT(_net_inet6_ip6, IPV6CTL_RTMAXCACHE, rtmaxcache, CTLFLAG_RW,
+ &VNET_NAME(rtq_toomany6) , 0, "");
+
+struct rtqk_arg {
+ struct radix_node_head *rnh;
+ int mode;
+ int updating;
+ int draining;
+ int killed;
+ int found;
+ time_t nextstop;
+};
+
+/*
+ * Get rid of old routes. When draining, this deletes everything, even when
+ * the timeout is not expired yet. When updating, this makes sure that
+ * nothing has a timeout longer than the current value of rtq_reallyold6.
+ */
+static int
+in6_rtqkill(struct radix_node *rn, void *rock)
+{
+ struct rtqk_arg *ap = rock;
+ struct rtentry *rt = (struct rtentry *)rn;
+ int err;
+
+ RADIX_NODE_HEAD_WLOCK_ASSERT(ap->rnh);
+
+ if (rt->rt_flags & RTPRF_OURS) {
+ ap->found++;
+
+ if (ap->draining || rt->rt_rmx.rmx_expire <= time_uptime) {
+ if (rt->rt_refcnt > 0)
+ panic("rtqkill route really not free");
+
+ err = rtrequest(RTM_DELETE,
+ (struct sockaddr *)rt_key(rt),
+ rt->rt_gateway, rt_mask(rt),
+ rt->rt_flags|RTF_RNH_LOCKED, 0);
+ if (err) {
+ log(LOG_WARNING, "in6_rtqkill: error %d", err);
+ } else {
+ ap->killed++;
+ }
+ } else {
+ if (ap->updating
+ && (rt->rt_rmx.rmx_expire - time_uptime
+ > V_rtq_reallyold6)) {
+ rt->rt_rmx.rmx_expire = time_uptime
+ + V_rtq_reallyold6;
+ }
+ ap->nextstop = lmin(ap->nextstop,
+ rt->rt_rmx.rmx_expire);
+ }
+ }
+
+ return 0;
+}
+
+#define RTQ_TIMEOUT 60*10 /* run no less than once every ten minutes */
+static VNET_DEFINE(int, rtq_timeout6) = RTQ_TIMEOUT;
+static VNET_DEFINE(struct callout, rtq_timer6);
+
+#define V_rtq_timeout6 VNET(rtq_timeout6)
+#define V_rtq_timer6 VNET(rtq_timer6)
+
+static void
+in6_rtqtimo(void *rock)
+{
+ CURVNET_SET_QUIET((struct vnet *) rock);
+ struct radix_node_head *rnh;
+ struct rtqk_arg arg;
+ struct timeval atv;
+ static time_t last_adjusted_timeout = 0;
+
+ rnh = rt_tables_get_rnh(0, AF_INET6);
+ if (rnh == NULL) {
+ CURVNET_RESTORE();
+ return;
+ }
+ arg.found = arg.killed = 0;
+ arg.rnh = rnh;
+ arg.nextstop = time_uptime + V_rtq_timeout6;
+ arg.draining = arg.updating = 0;
+ RADIX_NODE_HEAD_LOCK(rnh);
+ rnh->rnh_walktree(rnh, in6_rtqkill, &arg);
+ RADIX_NODE_HEAD_UNLOCK(rnh);
+
+ /*
+ * Attempt to be somewhat dynamic about this:
+ * If there are ``too many'' routes sitting around taking up space,
+ * then crank down the timeout, and see if we can't make some more
+ * go away. However, we make sure that we will never adjust more
+ * than once in rtq_timeout6 seconds, to keep from cranking down too
+ * hard.
+ */
+ if ((arg.found - arg.killed > V_rtq_toomany6)
+ && (time_uptime - last_adjusted_timeout >= V_rtq_timeout6)
+ && V_rtq_reallyold6 > V_rtq_minreallyold6) {
+ V_rtq_reallyold6 = 2*V_rtq_reallyold6 / 3;
+ if (V_rtq_reallyold6 < V_rtq_minreallyold6) {
+ V_rtq_reallyold6 = V_rtq_minreallyold6;
+ }
+
+ last_adjusted_timeout = time_uptime;
+#ifdef DIAGNOSTIC
+ log(LOG_DEBUG, "in6_rtqtimo: adjusted rtq_reallyold6 to %d",
+ V_rtq_reallyold6);
+#endif
+ arg.found = arg.killed = 0;
+ arg.updating = 1;
+ RADIX_NODE_HEAD_LOCK(rnh);
+ rnh->rnh_walktree(rnh, in6_rtqkill, &arg);
+ RADIX_NODE_HEAD_UNLOCK(rnh);
+ }
+
+ atv.tv_usec = 0;
+ atv.tv_sec = arg.nextstop - time_uptime;
+ callout_reset(&V_rtq_timer6, tvtohz(&atv), in6_rtqtimo, rock);
+ CURVNET_RESTORE();
+}
+
+/*
+ * Age old PMTUs.
+ */
+struct mtuex_arg {
+ struct radix_node_head *rnh;
+ time_t nextstop;
+};
+static VNET_DEFINE(struct callout, rtq_mtutimer);
+#define V_rtq_mtutimer VNET(rtq_mtutimer)
+
+static int
+in6_mtuexpire(struct radix_node *rn, void *rock)
+{
+ struct rtentry *rt = (struct rtentry *)rn;
+ struct mtuex_arg *ap = rock;
+
+ /* sanity */
+ if (!rt)
+ panic("rt == NULL in in6_mtuexpire");
+
+ if (rt->rt_rmx.rmx_expire && !(rt->rt_flags & RTF_PROBEMTU)) {
+ if (rt->rt_rmx.rmx_expire <= time_uptime) {
+ rt->rt_flags |= RTF_PROBEMTU;
+ } else {
+ ap->nextstop = lmin(ap->nextstop,
+ rt->rt_rmx.rmx_expire);
+ }
+ }
+
+ return 0;
+}
+
+#define MTUTIMO_DEFAULT (60*1)
+
+static void
+in6_mtutimo(void *rock)
+{
+ CURVNET_SET_QUIET((struct vnet *) rock);
+ struct radix_node_head *rnh;
+ struct mtuex_arg arg;
+ struct timeval atv;
+
+ rnh = rt_tables_get_rnh(0, AF_INET6);
+ if (rnh == NULL) {
+ CURVNET_RESTORE();
+ return;
+ }
+ arg.rnh = rnh;
+ arg.nextstop = time_uptime + MTUTIMO_DEFAULT;
+ RADIX_NODE_HEAD_LOCK(rnh);
+ rnh->rnh_walktree(rnh, in6_mtuexpire, &arg);
+ RADIX_NODE_HEAD_UNLOCK(rnh);
+
+ atv.tv_usec = 0;
+ atv.tv_sec = arg.nextstop - time_uptime;
+ if (atv.tv_sec < 0) {
+ printf("invalid mtu expiration time on routing table\n");
+ arg.nextstop = time_uptime + 30; /* last resort */
+ atv.tv_sec = 30;
+ }
+ callout_reset(&V_rtq_mtutimer, tvtohz(&atv), in6_mtutimo, rock);
+ CURVNET_RESTORE();
+}
+
+/*
+ * Initialize our routing tree.
+ * XXX MRT When off == 0, we are being called from vfs_export.c
+ * so just set up their table and leave. (we know what the correct
+ * value should be so just use that).. FIX AFTER RELENG_7 is MFC'd
+ * see also comments in in_inithead() vfs_export.c and domain.h
+ */
+int
+in6_inithead(void **head, int off)
+{
+ struct radix_node_head *rnh;
+
+ if (!rn_inithead(head, offsetof(struct sockaddr_in6, sin6_addr) << 3))
+ return 0; /* See above */
+
+ if (off == 0) /* See above */
+ return 1; /* only do the rest for the real thing */
+
+ rnh = *head;
+ KASSERT(rnh == rt_tables_get_rnh(0, AF_INET6), ("rnh?"));
+ rnh->rnh_addaddr = in6_addroute;
+ rnh->rnh_matchaddr = in6_matroute;
+ callout_init(&V_rtq_timer6, CALLOUT_MPSAFE);
+ callout_init(&V_rtq_mtutimer, CALLOUT_MPSAFE);
+ in6_rtqtimo(curvnet); /* kick off timeout first time */
+ in6_mtutimo(curvnet); /* kick off timeout first time */
+ return 1;
+}
+
+#ifdef VIMAGE
+int
+in6_detachhead(void **head, int off)
+{
+
+ callout_drain(&V_rtq_timer6);
+ callout_drain(&V_rtq_mtutimer);
+ return (1);
+}
+#endif
diff --git a/rtems/freebsd/netinet6/in6_src.c b/rtems/freebsd/netinet6/in6_src.c
new file mode 100644
index 00000000..549ce90a
--- /dev/null
+++ b/rtems/freebsd/netinet6/in6_src.c
@@ -0,0 +1,1204 @@
+#include <rtems/freebsd/machine/rtems-bsd-config.h>
+
+/*-
+ * Copyright (C) 1995, 1996, 1997, and 1998 WIDE Project.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the project nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $KAME: in6_src.c,v 1.132 2003/08/26 04:42:27 keiichi Exp $
+ */
+
+/*-
+ * Copyright (c) 1982, 1986, 1991, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * @(#)in_pcb.c 8.2 (Berkeley) 1/4/94
+ */
+
+#include <rtems/freebsd/sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <rtems/freebsd/local/opt_inet.h>
+#include <rtems/freebsd/local/opt_inet6.h>
+#include <rtems/freebsd/local/opt_mpath.h>
+
+#include <rtems/freebsd/sys/param.h>
+#include <rtems/freebsd/sys/systm.h>
+#include <rtems/freebsd/sys/lock.h>
+#include <rtems/freebsd/sys/malloc.h>
+#include <rtems/freebsd/sys/mbuf.h>
+#include <rtems/freebsd/sys/priv.h>
+#include <rtems/freebsd/sys/protosw.h>
+#include <rtems/freebsd/sys/socket.h>
+#include <rtems/freebsd/sys/socketvar.h>
+#include <rtems/freebsd/sys/sockio.h>
+#include <rtems/freebsd/sys/sysctl.h>
+#include <rtems/freebsd/sys/errno.h>
+#include <rtems/freebsd/sys/time.h>
+#include <rtems/freebsd/sys/jail.h>
+#include <rtems/freebsd/sys/kernel.h>
+#include <rtems/freebsd/sys/sx.h>
+
+#include <rtems/freebsd/net/if.h>
+#include <rtems/freebsd/net/if_dl.h>
+#include <rtems/freebsd/net/route.h>
+#include <rtems/freebsd/net/if_llatbl.h>
+#ifdef RADIX_MPATH
+#include <rtems/freebsd/net/radix_mpath.h>
+#endif
+
+#include <rtems/freebsd/netinet/in.h>
+#include <rtems/freebsd/netinet/in_var.h>
+#include <rtems/freebsd/netinet/in_systm.h>
+#include <rtems/freebsd/netinet/ip.h>
+#include <rtems/freebsd/netinet/in_pcb.h>
+#include <rtems/freebsd/netinet/ip_var.h>
+#include <rtems/freebsd/netinet/udp.h>
+#include <rtems/freebsd/netinet/udp_var.h>
+
+#include <rtems/freebsd/netinet6/in6_var.h>
+#include <rtems/freebsd/netinet/ip6.h>
+#include <rtems/freebsd/netinet6/in6_pcb.h>
+#include <rtems/freebsd/netinet6/ip6_var.h>
+#include <rtems/freebsd/netinet6/scope6_var.h>
+#include <rtems/freebsd/netinet6/nd6.h>
+
+static struct mtx addrsel_lock;
+#define ADDRSEL_LOCK_INIT() mtx_init(&addrsel_lock, "addrsel_lock", NULL, MTX_DEF)
+#define ADDRSEL_LOCK() mtx_lock(&addrsel_lock)
+#define ADDRSEL_UNLOCK() mtx_unlock(&addrsel_lock)
+#define ADDRSEL_LOCK_ASSERT() mtx_assert(&addrsel_lock, MA_OWNED)
+
+static struct sx addrsel_sxlock;
+#define ADDRSEL_SXLOCK_INIT() sx_init(&addrsel_sxlock, "addrsel_sxlock")
+#define ADDRSEL_SLOCK() sx_slock(&addrsel_sxlock)
+#define ADDRSEL_SUNLOCK() sx_sunlock(&addrsel_sxlock)
+#define ADDRSEL_XLOCK() sx_xlock(&addrsel_sxlock)
+#define ADDRSEL_XUNLOCK() sx_xunlock(&addrsel_sxlock)
+
+#define ADDR_LABEL_NOTAPP (-1)
+static VNET_DEFINE(struct in6_addrpolicy, defaultaddrpolicy);
+#define V_defaultaddrpolicy VNET(defaultaddrpolicy)
+
+VNET_DEFINE(int, ip6_prefer_tempaddr) = 0;
+
+static int selectroute __P((struct sockaddr_in6 *, struct ip6_pktopts *,
+ struct ip6_moptions *, struct route_in6 *, struct ifnet **,
+ struct rtentry **, int));
+static int in6_selectif __P((struct sockaddr_in6 *, struct ip6_pktopts *,
+ struct ip6_moptions *, struct route_in6 *ro, struct ifnet **));
+
+static struct in6_addrpolicy *lookup_addrsel_policy(struct sockaddr_in6 *);
+
+static void init_policy_queue(void);
+static int add_addrsel_policyent(struct in6_addrpolicy *);
+static int delete_addrsel_policyent(struct in6_addrpolicy *);
+static int walk_addrsel_policy __P((int (*)(struct in6_addrpolicy *, void *),
+ void *));
+static int dump_addrsel_policyent(struct in6_addrpolicy *, void *);
+static struct in6_addrpolicy *match_addrsel_policy(struct sockaddr_in6 *);
+
+/*
+ * Return an IPv6 address, which is the most appropriate for a given
+ * destination and user specified options.
+ * If necessary, this function lookups the routing table and returns
+ * an entry to the caller for later use.
+ */
+#define REPLACE(r) do {\
+ if ((r) < sizeof(V_ip6stat.ip6s_sources_rule) / \
+ sizeof(V_ip6stat.ip6s_sources_rule[0])) /* check for safety */ \
+ V_ip6stat.ip6s_sources_rule[(r)]++; \
+ /* { \
+ char ip6buf[INET6_ADDRSTRLEN], ip6b[INET6_ADDRSTRLEN]; \
+ printf("in6_selectsrc: replace %s with %s by %d\n", ia_best ? ip6_sprintf(ip6buf, &ia_best->ia_addr.sin6_addr) : "none", ip6_sprintf(ip6b, &ia->ia_addr.sin6_addr), (r)); \
+ } */ \
+ goto replace; \
+} while(0)
+#define NEXT(r) do {\
+ if ((r) < sizeof(V_ip6stat.ip6s_sources_rule) / \
+ sizeof(V_ip6stat.ip6s_sources_rule[0])) /* check for safety */ \
+ V_ip6stat.ip6s_sources_rule[(r)]++; \
+ /* { \
+ char ip6buf[INET6_ADDRSTRLEN], ip6b[INET6_ADDRSTRLEN]; \
+ printf("in6_selectsrc: keep %s against %s by %d\n", ia_best ? ip6_sprintf(ip6buf, &ia_best->ia_addr.sin6_addr) : "none", ip6_sprintf(ip6b, &ia->ia_addr.sin6_addr), (r)); \
+ } */ \
+ goto next; /* XXX: we can't use 'continue' here */ \
+} while(0)
+#define BREAK(r) do { \
+ if ((r) < sizeof(V_ip6stat.ip6s_sources_rule) / \
+ sizeof(V_ip6stat.ip6s_sources_rule[0])) /* check for safety */ \
+ V_ip6stat.ip6s_sources_rule[(r)]++; \
+ goto out; /* XXX: we can't use 'break' here */ \
+} while(0)
+
+int
+in6_selectsrc(struct sockaddr_in6 *dstsock, struct ip6_pktopts *opts,
+ struct inpcb *inp, struct route_in6 *ro, struct ucred *cred,
+ struct ifnet **ifpp, struct in6_addr *srcp)
+{
+ struct in6_addr dst, tmp;
+ struct ifnet *ifp = NULL;
+ struct in6_ifaddr *ia = NULL, *ia_best = NULL;
+ struct in6_pktinfo *pi = NULL;
+ int dst_scope = -1, best_scope = -1, best_matchlen = -1;
+ struct in6_addrpolicy *dst_policy = NULL, *best_policy = NULL;
+ u_int32_t odstzone;
+ int prefer_tempaddr;
+ int error;
+ struct ip6_moptions *mopts;
+
+ KASSERT(srcp != NULL, ("%s: srcp is NULL", __func__));
+
+ dst = dstsock->sin6_addr; /* make a copy for local operation */
+ if (ifpp)
+ *ifpp = NULL;
+
+ if (inp != NULL) {
+ INP_LOCK_ASSERT(inp);
+ mopts = inp->in6p_moptions;
+ } else {
+ mopts = NULL;
+ }
+
+ /*
+ * If the source address is explicitly specified by the caller,
+ * check if the requested source address is indeed a unicast address
+ * assigned to the node, and can be used as the packet's source
+ * address. If everything is okay, use the address as source.
+ */
+ if (opts && (pi = opts->ip6po_pktinfo) &&
+ !IN6_IS_ADDR_UNSPECIFIED(&pi->ipi6_addr)) {
+ struct sockaddr_in6 srcsock;
+ struct in6_ifaddr *ia6;
+
+ /* get the outgoing interface */
+ if ((error = in6_selectif(dstsock, opts, mopts, ro, &ifp)) != 0)
+ return (error);
+
+ /*
+ * determine the appropriate zone id of the source based on
+ * the zone of the destination and the outgoing interface.
+ * If the specified address is ambiguous wrt the scope zone,
+ * the interface must be specified; otherwise, ifa_ifwithaddr()
+ * will fail matching the address.
+ */
+ bzero(&srcsock, sizeof(srcsock));
+ srcsock.sin6_family = AF_INET6;
+ srcsock.sin6_len = sizeof(srcsock);
+ srcsock.sin6_addr = pi->ipi6_addr;
+ if (ifp) {
+ error = in6_setscope(&srcsock.sin6_addr, ifp, NULL);
+ if (error)
+ return (error);
+ }
+ if (cred != NULL && (error = prison_local_ip6(cred,
+ &srcsock.sin6_addr, (inp != NULL &&
+ (inp->inp_flags & IN6P_IPV6_V6ONLY) != 0))) != 0)
+ return (error);
+
+ ia6 = (struct in6_ifaddr *)ifa_ifwithaddr(
+ (struct sockaddr *)&srcsock);
+ if (ia6 == NULL ||
+ (ia6->ia6_flags & (IN6_IFF_ANYCAST | IN6_IFF_NOTREADY))) {
+ if (ia6 != NULL)
+ ifa_free(&ia6->ia_ifa);
+ return (EADDRNOTAVAIL);
+ }
+ pi->ipi6_addr = srcsock.sin6_addr; /* XXX: this overrides pi */
+ if (ifpp)
+ *ifpp = ifp;
+ bcopy(&ia6->ia_addr.sin6_addr, srcp, sizeof(*srcp));
+ ifa_free(&ia6->ia_ifa);
+ return (0);
+ }
+
+ /*
+ * Otherwise, if the socket has already bound the source, just use it.
+ */
+ if (inp != NULL && !IN6_IS_ADDR_UNSPECIFIED(&inp->in6p_laddr)) {
+ if (cred != NULL &&
+ (error = prison_local_ip6(cred, &inp->in6p_laddr,
+ ((inp->inp_flags & IN6P_IPV6_V6ONLY) != 0))) != 0)
+ return (error);
+ bcopy(&inp->in6p_laddr, srcp, sizeof(*srcp));
+ return (0);
+ }
+
+ /*
+ * Bypass source address selection and use the primary jail IP
+ * if requested.
+ */
+ if (cred != NULL && !prison_saddrsel_ip6(cred, srcp))
+ return (0);
+
+ /*
+ * If the address is not specified, choose the best one based on
+ * the outgoing interface and the destination address.
+ */
+ /* get the outgoing interface */
+ if ((error = in6_selectif(dstsock, opts, mopts, ro, &ifp)) != 0)
+ return (error);
+
+#ifdef DIAGNOSTIC
+ if (ifp == NULL) /* this should not happen */
+ panic("in6_selectsrc: NULL ifp");
+#endif
+ error = in6_setscope(&dst, ifp, &odstzone);
+ if (error)
+ return (error);
+
+ IN6_IFADDR_RLOCK();
+ TAILQ_FOREACH(ia, &V_in6_ifaddrhead, ia_link) {
+ int new_scope = -1, new_matchlen = -1;
+ struct in6_addrpolicy *new_policy = NULL;
+ u_int32_t srczone, osrczone, dstzone;
+ struct in6_addr src;
+ struct ifnet *ifp1 = ia->ia_ifp;
+
+ /*
+ * We'll never take an address that breaks the scope zone
+ * of the destination. We also skip an address if its zone
+ * does not contain the outgoing interface.
+ * XXX: we should probably use sin6_scope_id here.
+ */
+ if (in6_setscope(&dst, ifp1, &dstzone) ||
+ odstzone != dstzone) {
+ continue;
+ }
+ src = ia->ia_addr.sin6_addr;
+ if (in6_setscope(&src, ifp, &osrczone) ||
+ in6_setscope(&src, ifp1, &srczone) ||
+ osrczone != srczone) {
+ continue;
+ }
+
+ /* avoid unusable addresses */
+ if ((ia->ia6_flags &
+ (IN6_IFF_NOTREADY | IN6_IFF_ANYCAST | IN6_IFF_DETACHED))) {
+ continue;
+ }
+ if (!V_ip6_use_deprecated && IFA6_IS_DEPRECATED(ia))
+ continue;
+
+ /* If jailed only take addresses of the jail into account. */
+ if (cred != NULL &&
+ prison_check_ip6(cred, &ia->ia_addr.sin6_addr) != 0)
+ continue;
+
+ /* Rule 1: Prefer same address */
+ if (IN6_ARE_ADDR_EQUAL(&dst, &ia->ia_addr.sin6_addr)) {
+ ia_best = ia;
+ BREAK(1); /* there should be no better candidate */
+ }
+
+ if (ia_best == NULL)
+ REPLACE(0);
+
+ /* Rule 2: Prefer appropriate scope */
+ if (dst_scope < 0)
+ dst_scope = in6_addrscope(&dst);
+ new_scope = in6_addrscope(&ia->ia_addr.sin6_addr);
+ if (IN6_ARE_SCOPE_CMP(best_scope, new_scope) < 0) {
+ if (IN6_ARE_SCOPE_CMP(best_scope, dst_scope) < 0)
+ REPLACE(2);
+ NEXT(2);
+ } else if (IN6_ARE_SCOPE_CMP(new_scope, best_scope) < 0) {
+ if (IN6_ARE_SCOPE_CMP(new_scope, dst_scope) < 0)
+ NEXT(2);
+ REPLACE(2);
+ }
+
+ /*
+ * Rule 3: Avoid deprecated addresses. Note that the case of
+ * !ip6_use_deprecated is already rejected above.
+ */
+ if (!IFA6_IS_DEPRECATED(ia_best) && IFA6_IS_DEPRECATED(ia))
+ NEXT(3);
+ if (IFA6_IS_DEPRECATED(ia_best) && !IFA6_IS_DEPRECATED(ia))
+ REPLACE(3);
+
+ /* Rule 4: Prefer home addresses */
+ /*
+ * XXX: This is a TODO. We should probably merge the MIP6
+ * case above.
+ */
+
+ /* Rule 5: Prefer outgoing interface */
+ if (ia_best->ia_ifp == ifp && ia->ia_ifp != ifp)
+ NEXT(5);
+ if (ia_best->ia_ifp != ifp && ia->ia_ifp == ifp)
+ REPLACE(5);
+
+ /*
+ * Rule 6: Prefer matching label
+ * Note that best_policy should be non-NULL here.
+ */
+ if (dst_policy == NULL)
+ dst_policy = lookup_addrsel_policy(dstsock);
+ if (dst_policy->label != ADDR_LABEL_NOTAPP) {
+ new_policy = lookup_addrsel_policy(&ia->ia_addr);
+ if (dst_policy->label == best_policy->label &&
+ dst_policy->label != new_policy->label)
+ NEXT(6);
+ if (dst_policy->label != best_policy->label &&
+ dst_policy->label == new_policy->label)
+ REPLACE(6);
+ }
+
+ /*
+ * Rule 7: Prefer public addresses.
+ * We allow users to reverse the logic by configuring
+ * a sysctl variable, so that privacy conscious users can
+ * always prefer temporary addresses.
+ */
+ if (opts == NULL ||
+ opts->ip6po_prefer_tempaddr == IP6PO_TEMPADDR_SYSTEM) {
+ prefer_tempaddr = V_ip6_prefer_tempaddr;
+ } else if (opts->ip6po_prefer_tempaddr ==
+ IP6PO_TEMPADDR_NOTPREFER) {
+ prefer_tempaddr = 0;
+ } else
+ prefer_tempaddr = 1;
+ if (!(ia_best->ia6_flags & IN6_IFF_TEMPORARY) &&
+ (ia->ia6_flags & IN6_IFF_TEMPORARY)) {
+ if (prefer_tempaddr)
+ REPLACE(7);
+ else
+ NEXT(7);
+ }
+ if ((ia_best->ia6_flags & IN6_IFF_TEMPORARY) &&
+ !(ia->ia6_flags & IN6_IFF_TEMPORARY)) {
+ if (prefer_tempaddr)
+ NEXT(7);
+ else
+ REPLACE(7);
+ }
+
+ /*
+ * Rule 8: prefer addresses on alive interfaces.
+ * This is a KAME specific rule.
+ */
+ if ((ia_best->ia_ifp->if_flags & IFF_UP) &&
+ !(ia->ia_ifp->if_flags & IFF_UP))
+ NEXT(8);
+ if (!(ia_best->ia_ifp->if_flags & IFF_UP) &&
+ (ia->ia_ifp->if_flags & IFF_UP))
+ REPLACE(8);
+
+ /*
+ * Rule 14: Use longest matching prefix.
+ * Note: in the address selection draft, this rule is
+ * documented as "Rule 8". However, since it is also
+ * documented that this rule can be overridden, we assign
+ * a large number so that it is easy to assign smaller numbers
+ * to more preferred rules.
+ */
+ new_matchlen = in6_matchlen(&ia->ia_addr.sin6_addr, &dst);
+ if (best_matchlen < new_matchlen)
+ REPLACE(14);
+ if (new_matchlen < best_matchlen)
+ NEXT(14);
+
+ /* Rule 15 is reserved. */
+
+ /*
+ * Last resort: just keep the current candidate.
+ * Or, do we need more rules?
+ */
+ continue;
+
+ replace:
+ ia_best = ia;
+ best_scope = (new_scope >= 0 ? new_scope :
+ in6_addrscope(&ia_best->ia_addr.sin6_addr));
+ best_policy = (new_policy ? new_policy :
+ lookup_addrsel_policy(&ia_best->ia_addr));
+ best_matchlen = (new_matchlen >= 0 ? new_matchlen :
+ in6_matchlen(&ia_best->ia_addr.sin6_addr,
+ &dst));
+
+ next:
+ continue;
+
+ out:
+ break;
+ }
+
+ if ((ia = ia_best) == NULL) {
+ IN6_IFADDR_RUNLOCK();
+ return (EADDRNOTAVAIL);
+ }
+
+ /*
+ * At this point at least one of the addresses belonged to the jail
+ * but it could still be, that we want to further restrict it, e.g.
+ * theoratically IN6_IS_ADDR_LOOPBACK.
+ * It must not be IN6_IS_ADDR_UNSPECIFIED anymore.
+ * prison_local_ip6() will fix an IN6_IS_ADDR_LOOPBACK but should
+ * let all others previously selected pass.
+ * Use tmp to not change ::1 on lo0 to the primary jail address.
+ */
+ tmp = ia->ia_addr.sin6_addr;
+ if (cred != NULL && prison_local_ip6(cred, &tmp, (inp != NULL &&
+ (inp->inp_flags & IN6P_IPV6_V6ONLY) != 0)) != 0) {
+ IN6_IFADDR_RUNLOCK();
+ return (EADDRNOTAVAIL);
+ }
+
+ if (ifpp)
+ *ifpp = ifp;
+
+ bcopy(&tmp, srcp, sizeof(*srcp));
+ IN6_IFADDR_RUNLOCK();
+ return (0);
+}
+
+/*
+ * clone - meaningful only for bsdi and freebsd
+ */
+static int
+selectroute(struct sockaddr_in6 *dstsock, struct ip6_pktopts *opts,
+ struct ip6_moptions *mopts, struct route_in6 *ro,
+ struct ifnet **retifp, struct rtentry **retrt, int norouteok)
+{
+ int error = 0;
+ struct ifnet *ifp = NULL;
+ struct rtentry *rt = NULL;
+ struct sockaddr_in6 *sin6_next;
+ struct in6_pktinfo *pi = NULL;
+ struct in6_addr *dst = &dstsock->sin6_addr;
+#if 0
+ char ip6buf[INET6_ADDRSTRLEN];
+
+ if (dstsock->sin6_addr.s6_addr32[0] == 0 &&
+ dstsock->sin6_addr.s6_addr32[1] == 0 &&
+ !IN6_IS_ADDR_LOOPBACK(&dstsock->sin6_addr)) {
+ printf("in6_selectroute: strange destination %s\n",
+ ip6_sprintf(ip6buf, &dstsock->sin6_addr));
+ } else {
+ printf("in6_selectroute: destination = %s%%%d\n",
+ ip6_sprintf(ip6buf, &dstsock->sin6_addr),
+ dstsock->sin6_scope_id); /* for debug */
+ }
+#endif
+
+ /* If the caller specify the outgoing interface explicitly, use it. */
+ if (opts && (pi = opts->ip6po_pktinfo) != NULL && pi->ipi6_ifindex) {
+ /* XXX boundary check is assumed to be already done. */
+ ifp = ifnet_byindex(pi->ipi6_ifindex);
+ if (ifp != NULL &&
+ (norouteok || retrt == NULL ||
+ IN6_IS_ADDR_MULTICAST(dst))) {
+ /*
+ * we do not have to check or get the route for
+ * multicast.
+ */
+ goto done;
+ } else
+ goto getroute;
+ }
+
+ /*
+ * If the destination address is a multicast address and the outgoing
+ * interface for the address is specified by the caller, use it.
+ */
+ if (IN6_IS_ADDR_MULTICAST(dst) &&
+ mopts != NULL && (ifp = mopts->im6o_multicast_ifp) != NULL) {
+ goto done; /* we do not need a route for multicast. */
+ }
+
+ getroute:
+ /*
+ * If the next hop address for the packet is specified by the caller,
+ * use it as the gateway.
+ */
+ if (opts && opts->ip6po_nexthop) {
+ struct route_in6 *ron;
+ struct llentry *la;
+
+ sin6_next = satosin6(opts->ip6po_nexthop);
+
+ /* at this moment, we only support AF_INET6 next hops */
+ if (sin6_next->sin6_family != AF_INET6) {
+ error = EAFNOSUPPORT; /* or should we proceed? */
+ goto done;
+ }
+
+ /*
+ * If the next hop is an IPv6 address, then the node identified
+ * by that address must be a neighbor of the sending host.
+ */
+ ron = &opts->ip6po_nextroute;
+ /*
+ * XXX what do we do here?
+ * PLZ to be fixing
+ */
+
+
+ if (ron->ro_rt == NULL) {
+ rtalloc((struct route *)ron); /* multi path case? */
+ if (ron->ro_rt == NULL) {
+ if (ron->ro_rt) {
+ RTFREE(ron->ro_rt);
+ ron->ro_rt = NULL;
+ }
+ error = EHOSTUNREACH;
+ goto done;
+ }
+ }
+
+ rt = ron->ro_rt;
+ ifp = rt->rt_ifp;
+ IF_AFDATA_LOCK(ifp);
+ la = lla_lookup(LLTABLE6(ifp), 0, (struct sockaddr *)&sin6_next->sin6_addr);
+ IF_AFDATA_UNLOCK(ifp);
+ if (la != NULL)
+ LLE_RUNLOCK(la);
+ else {
+ error = EHOSTUNREACH;
+ goto done;
+ }
+#if 0
+ if ((ron->ro_rt &&
+ (ron->ro_rt->rt_flags & (RTF_UP | RTF_LLINFO)) !=
+ (RTF_UP | RTF_LLINFO)) ||
+ !IN6_ARE_ADDR_EQUAL(&satosin6(&ron->ro_dst)->sin6_addr,
+ &sin6_next->sin6_addr)) {
+ if (ron->ro_rt) {
+ RTFREE(ron->ro_rt);
+ ron->ro_rt = NULL;
+ }
+ *satosin6(&ron->ro_dst) = *sin6_next;
+ }
+ if (ron->ro_rt == NULL) {
+ rtalloc((struct route *)ron); /* multi path case? */
+ if (ron->ro_rt == NULL ||
+ !(ron->ro_rt->rt_flags & RTF_LLINFO)) {
+ if (ron->ro_rt) {
+ RTFREE(ron->ro_rt);
+ ron->ro_rt = NULL;
+ }
+ error = EHOSTUNREACH;
+ goto done;
+ }
+ }
+#endif
+
+ /*
+ * When cloning is required, try to allocate a route to the
+ * destination so that the caller can store path MTU
+ * information.
+ */
+ goto done;
+ }
+
+ /*
+ * Use a cached route if it exists and is valid, else try to allocate
+ * a new one. Note that we should check the address family of the
+ * cached destination, in case of sharing the cache with IPv4.
+ */
+ if (ro) {
+ if (ro->ro_rt &&
+ (!(ro->ro_rt->rt_flags & RTF_UP) ||
+ ((struct sockaddr *)(&ro->ro_dst))->sa_family != AF_INET6 ||
+ !IN6_ARE_ADDR_EQUAL(&satosin6(&ro->ro_dst)->sin6_addr,
+ dst))) {
+ RTFREE(ro->ro_rt);
+ ro->ro_rt = (struct rtentry *)NULL;
+ }
+ if (ro->ro_rt == (struct rtentry *)NULL) {
+ struct sockaddr_in6 *sa6;
+
+ /* No route yet, so try to acquire one */
+ bzero(&ro->ro_dst, sizeof(struct sockaddr_in6));
+ sa6 = (struct sockaddr_in6 *)&ro->ro_dst;
+ *sa6 = *dstsock;
+ sa6->sin6_scope_id = 0;
+
+#ifdef RADIX_MPATH
+ rtalloc_mpath((struct route *)ro,
+ ntohl(sa6->sin6_addr.s6_addr32[3]));
+#else
+ ro->ro_rt = rtalloc1(&((struct route *)ro)
+ ->ro_dst, 0, 0UL);
+ if (ro->ro_rt)
+ RT_UNLOCK(ro->ro_rt);
+#endif
+ }
+
+ /*
+ * do not care about the result if we have the nexthop
+ * explicitly specified.
+ */
+ if (opts && opts->ip6po_nexthop)
+ goto done;
+
+ if (ro->ro_rt) {
+ ifp = ro->ro_rt->rt_ifp;
+
+ if (ifp == NULL) { /* can this really happen? */
+ RTFREE(ro->ro_rt);
+ ro->ro_rt = NULL;
+ }
+ }
+ if (ro->ro_rt == NULL)
+ error = EHOSTUNREACH;
+ rt = ro->ro_rt;
+
+ /*
+ * Check if the outgoing interface conflicts with
+ * the interface specified by ipi6_ifindex (if specified).
+ * Note that loopback interface is always okay.
+ * (this may happen when we are sending a packet to one of
+ * our own addresses.)
+ */
+ if (ifp && opts && opts->ip6po_pktinfo &&
+ opts->ip6po_pktinfo->ipi6_ifindex) {
+ if (!(ifp->if_flags & IFF_LOOPBACK) &&
+ ifp->if_index !=
+ opts->ip6po_pktinfo->ipi6_ifindex) {
+ error = EHOSTUNREACH;
+ goto done;
+ }
+ }
+ }
+
+ done:
+ if (ifp == NULL && rt == NULL) {
+ /*
+ * This can happen if the caller did not pass a cached route
+ * nor any other hints. We treat this case an error.
+ */
+ error = EHOSTUNREACH;
+ }
+ if (error == EHOSTUNREACH)
+ V_ip6stat.ip6s_noroute++;
+
+ if (retifp != NULL) {
+ *retifp = ifp;
+
+ /*
+ * Adjust the "outgoing" interface. If we're going to loop
+ * the packet back to ourselves, the ifp would be the loopback
+ * interface. However, we'd rather know the interface associated
+ * to the destination address (which should probably be one of
+ * our own addresses.)
+ */
+ if (rt) {
+ if ((rt->rt_ifp->if_flags & IFF_LOOPBACK) &&
+ (rt->rt_gateway->sa_family == AF_LINK))
+ *retifp =
+ ifnet_byindex(((struct sockaddr_dl *)
+ rt->rt_gateway)->sdl_index);
+ }
+ }
+
+ if (retrt != NULL)
+ *retrt = rt; /* rt may be NULL */
+
+ return (error);
+}
+
+static int
+in6_selectif(struct sockaddr_in6 *dstsock, struct ip6_pktopts *opts,
+ struct ip6_moptions *mopts, struct route_in6 *ro, struct ifnet **retifp)
+{
+ int error;
+ struct route_in6 sro;
+ struct rtentry *rt = NULL;
+
+ if (ro == NULL) {
+ bzero(&sro, sizeof(sro));
+ ro = &sro;
+ }
+
+ if ((error = selectroute(dstsock, opts, mopts, ro, retifp,
+ &rt, 1)) != 0) {
+ if (ro == &sro && rt && rt == sro.ro_rt)
+ RTFREE(rt);
+ return (error);
+ }
+
+ /*
+ * do not use a rejected or black hole route.
+ * XXX: this check should be done in the L2 output routine.
+ * However, if we skipped this check here, we'd see the following
+ * scenario:
+ * - install a rejected route for a scoped address prefix
+ * (like fe80::/10)
+ * - send a packet to a destination that matches the scoped prefix,
+ * with ambiguity about the scope zone.
+ * - pick the outgoing interface from the route, and disambiguate the
+ * scope zone with the interface.
+ * - ip6_output() would try to get another route with the "new"
+ * destination, which may be valid.
+ * - we'd see no error on output.
+ * Although this may not be very harmful, it should still be confusing.
+ * We thus reject the case here.
+ */
+ if (rt && (rt->rt_flags & (RTF_REJECT | RTF_BLACKHOLE))) {
+ int flags = (rt->rt_flags & RTF_HOST ? EHOSTUNREACH : ENETUNREACH);
+
+ if (ro == &sro && rt && rt == sro.ro_rt)
+ RTFREE(rt);
+ return (flags);
+ }
+
+ if (ro == &sro && rt && rt == sro.ro_rt)
+ RTFREE(rt);
+ return (0);
+}
+
+/*
+ * clone - meaningful only for bsdi and freebsd
+ */
+int
+in6_selectroute(struct sockaddr_in6 *dstsock, struct ip6_pktopts *opts,
+ struct ip6_moptions *mopts, struct route_in6 *ro,
+ struct ifnet **retifp, struct rtentry **retrt)
+{
+
+ return (selectroute(dstsock, opts, mopts, ro, retifp,
+ retrt, 0));
+}
+
+/*
+ * Default hop limit selection. The precedence is as follows:
+ * 1. Hoplimit value specified via ioctl.
+ * 2. (If the outgoing interface is detected) the current
+ * hop limit of the interface specified by router advertisement.
+ * 3. The system default hoplimit.
+ */
+int
+in6_selecthlim(struct inpcb *in6p, struct ifnet *ifp)
+{
+
+ if (in6p && in6p->in6p_hops >= 0)
+ return (in6p->in6p_hops);
+ else if (ifp)
+ return (ND_IFINFO(ifp)->chlim);
+ else if (in6p && !IN6_IS_ADDR_UNSPECIFIED(&in6p->in6p_faddr)) {
+ struct route_in6 ro6;
+ struct ifnet *lifp;
+
+ bzero(&ro6, sizeof(ro6));
+ ro6.ro_dst.sin6_family = AF_INET6;
+ ro6.ro_dst.sin6_len = sizeof(struct sockaddr_in6);
+ ro6.ro_dst.sin6_addr = in6p->in6p_faddr;
+ rtalloc((struct route *)&ro6);
+ if (ro6.ro_rt) {
+ lifp = ro6.ro_rt->rt_ifp;
+ RTFREE(ro6.ro_rt);
+ if (lifp)
+ return (ND_IFINFO(lifp)->chlim);
+ } else
+ return (V_ip6_defhlim);
+ }
+ return (V_ip6_defhlim);
+}
+
+/*
+ * XXX: this is borrowed from in6_pcbbind(). If possible, we should
+ * share this function by all *bsd*...
+ */
+int
+in6_pcbsetport(struct in6_addr *laddr, struct inpcb *inp, struct ucred *cred)
+{
+ struct socket *so = inp->inp_socket;
+ u_int16_t lport = 0, first, last, *lastport;
+ int count, error, wild = 0, dorandom;
+ struct inpcbinfo *pcbinfo = inp->inp_pcbinfo;
+
+ INP_INFO_WLOCK_ASSERT(pcbinfo);
+ INP_WLOCK_ASSERT(inp);
+
+ error = prison_local_ip6(cred, laddr,
+ ((inp->inp_flags & IN6P_IPV6_V6ONLY) != 0));
+ if (error)
+ return(error);
+
+ /* XXX: this is redundant when called from in6_pcbbind */
+ if ((so->so_options & (SO_REUSEADDR|SO_REUSEPORT)) == 0)
+ wild = INPLOOKUP_WILDCARD;
+
+ inp->inp_flags |= INP_ANONPORT;
+
+ if (inp->inp_flags & INP_HIGHPORT) {
+ first = V_ipport_hifirstauto; /* sysctl */
+ last = V_ipport_hilastauto;
+ lastport = &pcbinfo->ipi_lasthi;
+ } else if (inp->inp_flags & INP_LOWPORT) {
+ error = priv_check_cred(cred, PRIV_NETINET_RESERVEDPORT, 0);
+ if (error)
+ return error;
+ first = V_ipport_lowfirstauto; /* 1023 */
+ last = V_ipport_lowlastauto; /* 600 */
+ lastport = &pcbinfo->ipi_lastlow;
+ } else {
+ first = V_ipport_firstauto; /* sysctl */
+ last = V_ipport_lastauto;
+ lastport = &pcbinfo->ipi_lastport;
+ }
+
+ /*
+ * For UDP, use random port allocation as long as the user
+ * allows it. For TCP (and as of yet unknown) connections,
+ * use random port allocation only if the user allows it AND
+ * ipport_tick() allows it.
+ */
+ if (V_ipport_randomized &&
+ (!V_ipport_stoprandom || pcbinfo == &V_udbinfo))
+ dorandom = 1;
+ else
+ dorandom = 0;
+ /*
+ * It makes no sense to do random port allocation if
+ * we have the only port available.
+ */
+ if (first == last)
+ dorandom = 0;
+ /* Make sure to not include UDP packets in the count. */
+ if (pcbinfo != &V_udbinfo)
+ V_ipport_tcpallocs++;
+
+ /*
+ * Instead of having two loops further down counting up or down
+ * make sure that first is always <= last and go with only one
+ * code path implementing all logic.
+ */
+ if (first > last) {
+ u_int16_t aux;
+
+ aux = first;
+ first = last;
+ last = aux;
+ }
+
+ if (dorandom)
+ *lastport = first + (arc4random() % (last - first));
+
+ count = last - first;
+
+ do {
+ if (count-- < 0) { /* completely used? */
+ /* Undo an address bind that may have occurred. */
+ inp->in6p_laddr = in6addr_any;
+ return (EADDRNOTAVAIL);
+ }
+ ++*lastport;
+ if (*lastport < first || *lastport > last)
+ *lastport = first;
+ lport = htons(*lastport);
+ } while (in6_pcblookup_local(pcbinfo, &inp->in6p_laddr,
+ lport, wild, cred));
+
+ inp->inp_lport = lport;
+ if (in_pcbinshash(inp) != 0) {
+ inp->in6p_laddr = in6addr_any;
+ inp->inp_lport = 0;
+ return (EAGAIN);
+ }
+
+ return (0);
+}
+
+void
+addrsel_policy_init(void)
+{
+
+ init_policy_queue();
+
+ /* initialize the "last resort" policy */
+ bzero(&V_defaultaddrpolicy, sizeof(V_defaultaddrpolicy));
+ V_defaultaddrpolicy.label = ADDR_LABEL_NOTAPP;
+
+ if (!IS_DEFAULT_VNET(curvnet))
+ return;
+
+ ADDRSEL_LOCK_INIT();
+ ADDRSEL_SXLOCK_INIT();
+}
+
+static struct in6_addrpolicy *
+lookup_addrsel_policy(struct sockaddr_in6 *key)
+{
+ struct in6_addrpolicy *match = NULL;
+
+ ADDRSEL_LOCK();
+ match = match_addrsel_policy(key);
+
+ if (match == NULL)
+ match = &V_defaultaddrpolicy;
+ else
+ match->use++;
+ ADDRSEL_UNLOCK();
+
+ return (match);
+}
+
+/*
+ * Subroutines to manage the address selection policy table via sysctl.
+ */
+struct walkarg {
+ struct sysctl_req *w_req;
+};
+
+static int in6_src_sysctl(SYSCTL_HANDLER_ARGS);
+SYSCTL_DECL(_net_inet6_ip6);
+SYSCTL_NODE(_net_inet6_ip6, IPV6CTL_ADDRCTLPOLICY, addrctlpolicy,
+ CTLFLAG_RD, in6_src_sysctl, "");
+
+static int
+in6_src_sysctl(SYSCTL_HANDLER_ARGS)
+{
+ struct walkarg w;
+
+ if (req->newptr)
+ return EPERM;
+
+ bzero(&w, sizeof(w));
+ w.w_req = req;
+
+ return (walk_addrsel_policy(dump_addrsel_policyent, &w));
+}
+
+int
+in6_src_ioctl(u_long cmd, caddr_t data)
+{
+ int i;
+ struct in6_addrpolicy ent0;
+
+ if (cmd != SIOCAADDRCTL_POLICY && cmd != SIOCDADDRCTL_POLICY)
+ return (EOPNOTSUPP); /* check for safety */
+
+ ent0 = *(struct in6_addrpolicy *)data;
+
+ if (ent0.label == ADDR_LABEL_NOTAPP)
+ return (EINVAL);
+ /* check if the prefix mask is consecutive. */
+ if (in6_mask2len(&ent0.addrmask.sin6_addr, NULL) < 0)
+ return (EINVAL);
+ /* clear trailing garbages (if any) of the prefix address. */
+ for (i = 0; i < 4; i++) {
+ ent0.addr.sin6_addr.s6_addr32[i] &=
+ ent0.addrmask.sin6_addr.s6_addr32[i];
+ }
+ ent0.use = 0;
+
+ switch (cmd) {
+ case SIOCAADDRCTL_POLICY:
+ return (add_addrsel_policyent(&ent0));
+ case SIOCDADDRCTL_POLICY:
+ return (delete_addrsel_policyent(&ent0));
+ }
+
+ return (0); /* XXX: compromise compilers */
+}
+
+/*
+ * The followings are implementation of the policy table using a
+ * simple tail queue.
+ * XXX such details should be hidden.
+ * XXX implementation using binary tree should be more efficient.
+ */
+struct addrsel_policyent {
+ TAILQ_ENTRY(addrsel_policyent) ape_entry;
+ struct in6_addrpolicy ape_policy;
+};
+
+TAILQ_HEAD(addrsel_policyhead, addrsel_policyent);
+
+static VNET_DEFINE(struct addrsel_policyhead, addrsel_policytab);
+#define V_addrsel_policytab VNET(addrsel_policytab)
+
+static void
+init_policy_queue(void)
+{
+
+ TAILQ_INIT(&V_addrsel_policytab);
+}
+
+static int
+add_addrsel_policyent(struct in6_addrpolicy *newpolicy)
+{
+ struct addrsel_policyent *new, *pol;
+
+ new = malloc(sizeof(*new), M_IFADDR,
+ M_WAITOK);
+ ADDRSEL_XLOCK();
+ ADDRSEL_LOCK();
+
+ /* duplication check */
+ TAILQ_FOREACH(pol, &V_addrsel_policytab, ape_entry) {
+ if (IN6_ARE_ADDR_EQUAL(&newpolicy->addr.sin6_addr,
+ &pol->ape_policy.addr.sin6_addr) &&
+ IN6_ARE_ADDR_EQUAL(&newpolicy->addrmask.sin6_addr,
+ &pol->ape_policy.addrmask.sin6_addr)) {
+ ADDRSEL_UNLOCK();
+ ADDRSEL_XUNLOCK();
+ free(new, M_IFADDR);
+ return (EEXIST); /* or override it? */
+ }
+ }
+
+ bzero(new, sizeof(*new));
+
+ /* XXX: should validate entry */
+ new->ape_policy = *newpolicy;
+
+ TAILQ_INSERT_TAIL(&V_addrsel_policytab, new, ape_entry);
+ ADDRSEL_UNLOCK();
+ ADDRSEL_XUNLOCK();
+
+ return (0);
+}
+
+static int
+delete_addrsel_policyent(struct in6_addrpolicy *key)
+{
+ struct addrsel_policyent *pol;
+
+ ADDRSEL_XLOCK();
+ ADDRSEL_LOCK();
+
+ /* search for the entry in the table */
+ TAILQ_FOREACH(pol, &V_addrsel_policytab, ape_entry) {
+ if (IN6_ARE_ADDR_EQUAL(&key->addr.sin6_addr,
+ &pol->ape_policy.addr.sin6_addr) &&
+ IN6_ARE_ADDR_EQUAL(&key->addrmask.sin6_addr,
+ &pol->ape_policy.addrmask.sin6_addr)) {
+ break;
+ }
+ }
+ if (pol == NULL) {
+ ADDRSEL_UNLOCK();
+ ADDRSEL_XUNLOCK();
+ return (ESRCH);
+ }
+
+ TAILQ_REMOVE(&V_addrsel_policytab, pol, ape_entry);
+ ADDRSEL_UNLOCK();
+ ADDRSEL_XUNLOCK();
+
+ return (0);
+}
+
+static int
+walk_addrsel_policy(int (*callback)(struct in6_addrpolicy *, void *),
+ void *w)
+{
+ struct addrsel_policyent *pol;
+ int error = 0;
+
+ ADDRSEL_SLOCK();
+ TAILQ_FOREACH(pol, &V_addrsel_policytab, ape_entry) {
+ if ((error = (*callback)(&pol->ape_policy, w)) != 0) {
+ ADDRSEL_SUNLOCK();
+ return (error);
+ }
+ }
+ ADDRSEL_SUNLOCK();
+ return (error);
+}
+
+static int
+dump_addrsel_policyent(struct in6_addrpolicy *pol, void *arg)
+{
+ int error = 0;
+ struct walkarg *w = arg;
+
+ error = SYSCTL_OUT(w->w_req, pol, sizeof(*pol));
+
+ return (error);
+}
+
+static struct in6_addrpolicy *
+match_addrsel_policy(struct sockaddr_in6 *key)
+{
+ struct addrsel_policyent *pent;
+ struct in6_addrpolicy *bestpol = NULL, *pol;
+ int matchlen, bestmatchlen = -1;
+ u_char *mp, *ep, *k, *p, m;
+
+ TAILQ_FOREACH(pent, &V_addrsel_policytab, ape_entry) {
+ matchlen = 0;
+
+ pol = &pent->ape_policy;
+ mp = (u_char *)&pol->addrmask.sin6_addr;
+ ep = mp + 16; /* XXX: scope field? */
+ k = (u_char *)&key->sin6_addr;
+ p = (u_char *)&pol->addr.sin6_addr;
+ for (; mp < ep && *mp; mp++, k++, p++) {
+ m = *mp;
+ if ((*k & m) != *p)
+ goto next; /* not match */
+ if (m == 0xff) /* short cut for a typical case */
+ matchlen += 8;
+ else {
+ while (m >= 0x80) {
+ matchlen++;
+ m <<= 1;
+ }
+ }
+ }
+
+ /* matched. check if this is better than the current best. */
+ if (bestpol == NULL ||
+ matchlen > bestmatchlen) {
+ bestpol = pol;
+ bestmatchlen = matchlen;
+ }
+
+ next:
+ continue;
+ }
+
+ return (bestpol);
+}
diff --git a/rtems/freebsd/netinet6/in6_var.h b/rtems/freebsd/netinet6/in6_var.h
new file mode 100644
index 00000000..f999191f
--- /dev/null
+++ b/rtems/freebsd/netinet6/in6_var.h
@@ -0,0 +1,786 @@
+/*-
+ * Copyright (C) 1995, 1996, 1997, and 1998 WIDE Project.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the project nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $KAME: in6_var.h,v 1.56 2001/03/29 05:34:31 itojun Exp $
+ */
+
+/*-
+ * Copyright (c) 1985, 1986, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * @(#)in_var.h 8.1 (Berkeley) 6/10/93
+ * $FreeBSD$
+ */
+
+#ifndef _NETINET6_IN6_VAR_HH_
+#define _NETINET6_IN6_VAR_HH_
+
+#include <rtems/freebsd/sys/tree.h>
+
+#ifdef _KERNEL
+#include <rtems/freebsd/sys/libkern.h>
+#endif
+
+/*
+ * Interface address, Internet version. One of these structures
+ * is allocated for each interface with an Internet address.
+ * The ifaddr structure contains the protocol-independent part
+ * of the structure and is assumed to be first.
+ */
+
+/*
+ * pltime/vltime are just for future reference (required to implements 2
+ * hour rule for hosts). they should never be modified by nd6_timeout or
+ * anywhere else.
+ * userland -> kernel: accept pltime/vltime
+ * kernel -> userland: throw up everything
+ * in kernel: modify preferred/expire only
+ */
+struct in6_addrlifetime {
+ time_t ia6t_expire; /* valid lifetime expiration time */
+ time_t ia6t_preferred; /* preferred lifetime expiration time */
+ u_int32_t ia6t_vltime; /* valid lifetime */
+ u_int32_t ia6t_pltime; /* prefix lifetime */
+};
+
+struct nd_ifinfo;
+struct scope6_id;
+struct lltable;
+struct mld_ifinfo;
+
+struct in6_ifextra {
+ struct in6_ifstat *in6_ifstat;
+ struct icmp6_ifstat *icmp6_ifstat;
+ struct nd_ifinfo *nd_ifinfo;
+ struct scope6_id *scope6_id;
+ struct lltable *lltable;
+ struct mld_ifinfo *mld_ifinfo;
+};
+
+#define LLTABLE6(ifp) (((struct in6_ifextra *)(ifp)->if_afdata[AF_INET6])->lltable)
+
+struct in6_ifaddr {
+ struct ifaddr ia_ifa; /* protocol-independent info */
+#define ia_ifp ia_ifa.ifa_ifp
+#define ia_flags ia_ifa.ifa_flags
+ struct sockaddr_in6 ia_addr; /* interface address */
+ struct sockaddr_in6 ia_net; /* network number of interface */
+ struct sockaddr_in6 ia_dstaddr; /* space for destination addr */
+ struct sockaddr_in6 ia_prefixmask; /* prefix mask */
+ u_int32_t ia_plen; /* prefix length */
+ TAILQ_ENTRY(in6_ifaddr) ia_link; /* list of IPv6 addresses */
+ int ia6_flags;
+
+ struct in6_addrlifetime ia6_lifetime;
+ time_t ia6_createtime; /* the creation time of this address, which is
+ * currently used for temporary addresses only.
+ */
+ time_t ia6_updatetime;
+
+ /* back pointer to the ND prefix (for autoconfigured addresses only) */
+ struct nd_prefix *ia6_ndpr;
+
+ /* multicast addresses joined from the kernel */
+ LIST_HEAD(, in6_multi_mship) ia6_memberships;
+};
+
+/* List of in6_ifaddr's. */
+TAILQ_HEAD(in6_ifaddrhead, in6_ifaddr);
+
+/* control structure to manage address selection policy */
+struct in6_addrpolicy {
+ struct sockaddr_in6 addr; /* prefix address */
+ struct sockaddr_in6 addrmask; /* prefix mask */
+ int preced; /* precedence */
+ int label; /* matching label */
+ u_quad_t use; /* statistics */
+};
+
+/*
+ * IPv6 interface statistics, as defined in RFC2465 Ipv6IfStatsEntry (p12).
+ */
+struct in6_ifstat {
+ u_quad_t ifs6_in_receive; /* # of total input datagram */
+ u_quad_t ifs6_in_hdrerr; /* # of datagrams with invalid hdr */
+ u_quad_t ifs6_in_toobig; /* # of datagrams exceeded MTU */
+ u_quad_t ifs6_in_noroute; /* # of datagrams with no route */
+ u_quad_t ifs6_in_addrerr; /* # of datagrams with invalid dst */
+ u_quad_t ifs6_in_protounknown; /* # of datagrams with unknown proto */
+ /* NOTE: increment on final dst if */
+ u_quad_t ifs6_in_truncated; /* # of truncated datagrams */
+ u_quad_t ifs6_in_discard; /* # of discarded datagrams */
+ /* NOTE: fragment timeout is not here */
+ u_quad_t ifs6_in_deliver; /* # of datagrams delivered to ULP */
+ /* NOTE: increment on final dst if */
+ u_quad_t ifs6_out_forward; /* # of datagrams forwarded */
+ /* NOTE: increment on outgoing if */
+ u_quad_t ifs6_out_request; /* # of outgoing datagrams from ULP */
+ /* NOTE: does not include forwrads */
+ u_quad_t ifs6_out_discard; /* # of discarded datagrams */
+ u_quad_t ifs6_out_fragok; /* # of datagrams fragmented */
+ u_quad_t ifs6_out_fragfail; /* # of datagrams failed on fragment */
+ u_quad_t ifs6_out_fragcreat; /* # of fragment datagrams */
+ /* NOTE: this is # after fragment */
+ u_quad_t ifs6_reass_reqd; /* # of incoming fragmented packets */
+ /* NOTE: increment on final dst if */
+ u_quad_t ifs6_reass_ok; /* # of reassembled packets */
+ /* NOTE: this is # after reass */
+ /* NOTE: increment on final dst if */
+ u_quad_t ifs6_reass_fail; /* # of reass failures */
+ /* NOTE: may not be packet count */
+ /* NOTE: increment on final dst if */
+ u_quad_t ifs6_in_mcast; /* # of inbound multicast datagrams */
+ u_quad_t ifs6_out_mcast; /* # of outbound multicast datagrams */
+};
+
+/*
+ * ICMPv6 interface statistics, as defined in RFC2466 Ipv6IfIcmpEntry.
+ * XXX: I'm not sure if this file is the right place for this structure...
+ */
+struct icmp6_ifstat {
+ /*
+ * Input statistics
+ */
+ /* ipv6IfIcmpInMsgs, total # of input messages */
+ u_quad_t ifs6_in_msg;
+ /* ipv6IfIcmpInErrors, # of input error messages */
+ u_quad_t ifs6_in_error;
+ /* ipv6IfIcmpInDestUnreachs, # of input dest unreach errors */
+ u_quad_t ifs6_in_dstunreach;
+ /* ipv6IfIcmpInAdminProhibs, # of input administratively prohibited errs */
+ u_quad_t ifs6_in_adminprohib;
+ /* ipv6IfIcmpInTimeExcds, # of input time exceeded errors */
+ u_quad_t ifs6_in_timeexceed;
+ /* ipv6IfIcmpInParmProblems, # of input parameter problem errors */
+ u_quad_t ifs6_in_paramprob;
+ /* ipv6IfIcmpInPktTooBigs, # of input packet too big errors */
+ u_quad_t ifs6_in_pkttoobig;
+ /* ipv6IfIcmpInEchos, # of input echo requests */
+ u_quad_t ifs6_in_echo;
+ /* ipv6IfIcmpInEchoReplies, # of input echo replies */
+ u_quad_t ifs6_in_echoreply;
+ /* ipv6IfIcmpInRouterSolicits, # of input router solicitations */
+ u_quad_t ifs6_in_routersolicit;
+ /* ipv6IfIcmpInRouterAdvertisements, # of input router advertisements */
+ u_quad_t ifs6_in_routeradvert;
+ /* ipv6IfIcmpInNeighborSolicits, # of input neighbor solicitations */
+ u_quad_t ifs6_in_neighborsolicit;
+ /* ipv6IfIcmpInNeighborAdvertisements, # of input neighbor advertisements */
+ u_quad_t ifs6_in_neighboradvert;
+ /* ipv6IfIcmpInRedirects, # of input redirects */
+ u_quad_t ifs6_in_redirect;
+ /* ipv6IfIcmpInGroupMembQueries, # of input MLD queries */
+ u_quad_t ifs6_in_mldquery;
+ /* ipv6IfIcmpInGroupMembResponses, # of input MLD reports */
+ u_quad_t ifs6_in_mldreport;
+ /* ipv6IfIcmpInGroupMembReductions, # of input MLD done */
+ u_quad_t ifs6_in_mlddone;
+
+ /*
+ * Output statistics. We should solve unresolved routing problem...
+ */
+ /* ipv6IfIcmpOutMsgs, total # of output messages */
+ u_quad_t ifs6_out_msg;
+ /* ipv6IfIcmpOutErrors, # of output error messages */
+ u_quad_t ifs6_out_error;
+ /* ipv6IfIcmpOutDestUnreachs, # of output dest unreach errors */
+ u_quad_t ifs6_out_dstunreach;
+ /* ipv6IfIcmpOutAdminProhibs, # of output administratively prohibited errs */
+ u_quad_t ifs6_out_adminprohib;
+ /* ipv6IfIcmpOutTimeExcds, # of output time exceeded errors */
+ u_quad_t ifs6_out_timeexceed;
+ /* ipv6IfIcmpOutParmProblems, # of output parameter problem errors */
+ u_quad_t ifs6_out_paramprob;
+ /* ipv6IfIcmpOutPktTooBigs, # of output packet too big errors */
+ u_quad_t ifs6_out_pkttoobig;
+ /* ipv6IfIcmpOutEchos, # of output echo requests */
+ u_quad_t ifs6_out_echo;
+ /* ipv6IfIcmpOutEchoReplies, # of output echo replies */
+ u_quad_t ifs6_out_echoreply;
+ /* ipv6IfIcmpOutRouterSolicits, # of output router solicitations */
+ u_quad_t ifs6_out_routersolicit;
+ /* ipv6IfIcmpOutRouterAdvertisements, # of output router advertisements */
+ u_quad_t ifs6_out_routeradvert;
+ /* ipv6IfIcmpOutNeighborSolicits, # of output neighbor solicitations */
+ u_quad_t ifs6_out_neighborsolicit;
+ /* ipv6IfIcmpOutNeighborAdvertisements, # of output neighbor advertisements */
+ u_quad_t ifs6_out_neighboradvert;
+ /* ipv6IfIcmpOutRedirects, # of output redirects */
+ u_quad_t ifs6_out_redirect;
+ /* ipv6IfIcmpOutGroupMembQueries, # of output MLD queries */
+ u_quad_t ifs6_out_mldquery;
+ /* ipv6IfIcmpOutGroupMembResponses, # of output MLD reports */
+ u_quad_t ifs6_out_mldreport;
+ /* ipv6IfIcmpOutGroupMembReductions, # of output MLD done */
+ u_quad_t ifs6_out_mlddone;
+};
+
+struct in6_ifreq {
+ char ifr_name[IFNAMSIZ];
+ union {
+ struct sockaddr_in6 ifru_addr;
+ struct sockaddr_in6 ifru_dstaddr;
+ int ifru_flags;
+ int ifru_flags6;
+ int ifru_metric;
+ caddr_t ifru_data;
+ struct in6_addrlifetime ifru_lifetime;
+ struct in6_ifstat ifru_stat;
+ struct icmp6_ifstat ifru_icmp6stat;
+ u_int32_t ifru_scope_id[16];
+ } ifr_ifru;
+};
+
+struct in6_aliasreq {
+ char ifra_name[IFNAMSIZ];
+ struct sockaddr_in6 ifra_addr;
+ struct sockaddr_in6 ifra_dstaddr;
+ struct sockaddr_in6 ifra_prefixmask;
+ int ifra_flags;
+ struct in6_addrlifetime ifra_lifetime;
+};
+
+/* prefix type macro */
+#define IN6_PREFIX_ND 1
+#define IN6_PREFIX_RR 2
+
+/*
+ * prefix related flags passed between kernel(NDP related part) and
+ * user land command(ifconfig) and daemon(rtadvd).
+ */
+struct in6_prflags {
+ struct prf_ra {
+ u_char onlink : 1;
+ u_char autonomous : 1;
+ u_char reserved : 6;
+ } prf_ra;
+ u_char prf_reserved1;
+ u_short prf_reserved2;
+ /* want to put this on 4byte offset */
+ struct prf_rr {
+ u_char decrvalid : 1;
+ u_char decrprefd : 1;
+ u_char reserved : 6;
+ } prf_rr;
+ u_char prf_reserved3;
+ u_short prf_reserved4;
+};
+
+struct in6_prefixreq {
+ char ipr_name[IFNAMSIZ];
+ u_char ipr_origin;
+ u_char ipr_plen;
+ u_int32_t ipr_vltime;
+ u_int32_t ipr_pltime;
+ struct in6_prflags ipr_flags;
+ struct sockaddr_in6 ipr_prefix;
+};
+
+#define PR_ORIG_RA 0
+#define PR_ORIG_RR 1
+#define PR_ORIG_STATIC 2
+#define PR_ORIG_KERNEL 3
+
+#define ipr_raf_onlink ipr_flags.prf_ra.onlink
+#define ipr_raf_auto ipr_flags.prf_ra.autonomous
+
+#define ipr_statef_onlink ipr_flags.prf_state.onlink
+
+#define ipr_rrf_decrvalid ipr_flags.prf_rr.decrvalid
+#define ipr_rrf_decrprefd ipr_flags.prf_rr.decrprefd
+
+struct in6_rrenumreq {
+ char irr_name[IFNAMSIZ];
+ u_char irr_origin;
+ u_char irr_m_len; /* match len for matchprefix */
+ u_char irr_m_minlen; /* minlen for matching prefix */
+ u_char irr_m_maxlen; /* maxlen for matching prefix */
+ u_char irr_u_uselen; /* uselen for adding prefix */
+ u_char irr_u_keeplen; /* keeplen from matching prefix */
+ struct irr_raflagmask {
+ u_char onlink : 1;
+ u_char autonomous : 1;
+ u_char reserved : 6;
+ } irr_raflagmask;
+ u_int32_t irr_vltime;
+ u_int32_t irr_pltime;
+ struct in6_prflags irr_flags;
+ struct sockaddr_in6 irr_matchprefix;
+ struct sockaddr_in6 irr_useprefix;
+};
+
+#define irr_raf_mask_onlink irr_raflagmask.onlink
+#define irr_raf_mask_auto irr_raflagmask.autonomous
+#define irr_raf_mask_reserved irr_raflagmask.reserved
+
+#define irr_raf_onlink irr_flags.prf_ra.onlink
+#define irr_raf_auto irr_flags.prf_ra.autonomous
+
+#define irr_statef_onlink irr_flags.prf_state.onlink
+
+#define irr_rrf irr_flags.prf_rr
+#define irr_rrf_decrvalid irr_flags.prf_rr.decrvalid
+#define irr_rrf_decrprefd irr_flags.prf_rr.decrprefd
+
+/*
+ * Given a pointer to an in6_ifaddr (ifaddr),
+ * return a pointer to the addr as a sockaddr_in6
+ */
+#define IA6_IN6(ia) (&((ia)->ia_addr.sin6_addr))
+#define IA6_DSTIN6(ia) (&((ia)->ia_dstaddr.sin6_addr))
+#define IA6_MASKIN6(ia) (&((ia)->ia_prefixmask.sin6_addr))
+#define IA6_SIN6(ia) (&((ia)->ia_addr))
+#define IA6_DSTSIN6(ia) (&((ia)->ia_dstaddr))
+#define IFA_IN6(x) (&((struct sockaddr_in6 *)((x)->ifa_addr))->sin6_addr)
+#define IFA_DSTIN6(x) (&((struct sockaddr_in6 *)((x)->ifa_dstaddr))->sin6_addr)
+
+#define IFPR_IN6(x) (&((struct sockaddr_in6 *)((x)->ifpr_prefix))->sin6_addr)
+
+#ifdef _KERNEL
+#define IN6_ARE_MASKED_ADDR_EQUAL(d, a, m) ( \
+ (((d)->s6_addr32[0] ^ (a)->s6_addr32[0]) & (m)->s6_addr32[0]) == 0 && \
+ (((d)->s6_addr32[1] ^ (a)->s6_addr32[1]) & (m)->s6_addr32[1]) == 0 && \
+ (((d)->s6_addr32[2] ^ (a)->s6_addr32[2]) & (m)->s6_addr32[2]) == 0 && \
+ (((d)->s6_addr32[3] ^ (a)->s6_addr32[3]) & (m)->s6_addr32[3]) == 0 )
+#endif
+
+#define SIOCSIFADDR_IN6 _IOW('i', 12, struct in6_ifreq)
+#define SIOCGIFADDR_IN6 _IOWR('i', 33, struct in6_ifreq)
+
+#ifdef _KERNEL
+/*
+ * SIOCSxxx ioctls should be unused (see comments in in6.c), but
+ * we do not shift numbers for binary compatibility.
+ */
+#define SIOCSIFDSTADDR_IN6 _IOW('i', 14, struct in6_ifreq)
+#define SIOCSIFNETMASK_IN6 _IOW('i', 22, struct in6_ifreq)
+#endif
+
+#define SIOCGIFDSTADDR_IN6 _IOWR('i', 34, struct in6_ifreq)
+#define SIOCGIFNETMASK_IN6 _IOWR('i', 37, struct in6_ifreq)
+
+#define SIOCDIFADDR_IN6 _IOW('i', 25, struct in6_ifreq)
+#define SIOCAIFADDR_IN6 _IOW('i', 26, struct in6_aliasreq)
+
+#define SIOCSIFPHYADDR_IN6 _IOW('i', 70, struct in6_aliasreq)
+#define SIOCGIFPSRCADDR_IN6 _IOWR('i', 71, struct in6_ifreq)
+#define SIOCGIFPDSTADDR_IN6 _IOWR('i', 72, struct in6_ifreq)
+
+#define SIOCGIFAFLAG_IN6 _IOWR('i', 73, struct in6_ifreq)
+
+#define SIOCGDRLST_IN6 _IOWR('i', 74, struct in6_drlist)
+#ifdef _KERNEL
+/* XXX: SIOCGPRLST_IN6 is exposed in KAME but in6_oprlist is not. */
+#define SIOCGPRLST_IN6 _IOWR('i', 75, struct in6_oprlist)
+#endif
+#ifdef _KERNEL
+#define OSIOCGIFINFO_IN6 _IOWR('i', 76, struct in6_ondireq)
+#endif
+#define SIOCGIFINFO_IN6 _IOWR('i', 108, struct in6_ndireq)
+#define SIOCSIFINFO_IN6 _IOWR('i', 109, struct in6_ndireq)
+#define SIOCSNDFLUSH_IN6 _IOWR('i', 77, struct in6_ifreq)
+#define SIOCGNBRINFO_IN6 _IOWR('i', 78, struct in6_nbrinfo)
+#define SIOCSPFXFLUSH_IN6 _IOWR('i', 79, struct in6_ifreq)
+#define SIOCSRTRFLUSH_IN6 _IOWR('i', 80, struct in6_ifreq)
+
+#define SIOCGIFALIFETIME_IN6 _IOWR('i', 81, struct in6_ifreq)
+#define SIOCSIFALIFETIME_IN6 _IOWR('i', 82, struct in6_ifreq)
+#define SIOCGIFSTAT_IN6 _IOWR('i', 83, struct in6_ifreq)
+#define SIOCGIFSTAT_ICMP6 _IOWR('i', 84, struct in6_ifreq)
+
+#define SIOCSDEFIFACE_IN6 _IOWR('i', 85, struct in6_ndifreq)
+#define SIOCGDEFIFACE_IN6 _IOWR('i', 86, struct in6_ndifreq)
+
+#define SIOCSIFINFO_FLAGS _IOWR('i', 87, struct in6_ndireq) /* XXX */
+
+#define SIOCSSCOPE6 _IOW('i', 88, struct in6_ifreq)
+#define SIOCGSCOPE6 _IOWR('i', 89, struct in6_ifreq)
+#define SIOCGSCOPE6DEF _IOWR('i', 90, struct in6_ifreq)
+
+#define SIOCSIFPREFIX_IN6 _IOW('i', 100, struct in6_prefixreq) /* set */
+#define SIOCGIFPREFIX_IN6 _IOWR('i', 101, struct in6_prefixreq) /* get */
+#define SIOCDIFPREFIX_IN6 _IOW('i', 102, struct in6_prefixreq) /* del */
+#define SIOCAIFPREFIX_IN6 _IOW('i', 103, struct in6_rrenumreq) /* add */
+#define SIOCCIFPREFIX_IN6 _IOW('i', 104, \
+ struct in6_rrenumreq) /* change */
+#define SIOCSGIFPREFIX_IN6 _IOW('i', 105, \
+ struct in6_rrenumreq) /* set global */
+
+#define SIOCGETSGCNT_IN6 _IOWR('u', 106, \
+ struct sioc_sg_req6) /* get s,g pkt cnt */
+#define SIOCGETMIFCNT_IN6 _IOWR('u', 107, \
+ struct sioc_mif_req6) /* get pkt cnt per if */
+
+#define SIOCAADDRCTL_POLICY _IOW('u', 108, struct in6_addrpolicy)
+#define SIOCDADDRCTL_POLICY _IOW('u', 109, struct in6_addrpolicy)
+
+#define IN6_IFF_ANYCAST 0x01 /* anycast address */
+#define IN6_IFF_TENTATIVE 0x02 /* tentative address */
+#define IN6_IFF_DUPLICATED 0x04 /* DAD detected duplicate */
+#define IN6_IFF_DETACHED 0x08 /* may be detached from the link */
+#define IN6_IFF_DEPRECATED 0x10 /* deprecated address */
+#define IN6_IFF_NODAD 0x20 /* don't perform DAD on this address
+ * (used only at first SIOC* call)
+ */
+#define IN6_IFF_AUTOCONF 0x40 /* autoconfigurable address. */
+#define IN6_IFF_TEMPORARY 0x80 /* temporary (anonymous) address. */
+#define IN6_IFF_NOPFX 0x8000 /* skip kernel prefix management.
+ * XXX: this should be temporary.
+ */
+
+/* do not input/output */
+#define IN6_IFF_NOTREADY (IN6_IFF_TENTATIVE|IN6_IFF_DUPLICATED)
+
+#ifdef _KERNEL
+#define IN6_ARE_SCOPE_CMP(a,b) ((a)-(b))
+#define IN6_ARE_SCOPE_EQUAL(a,b) ((a)==(b))
+#endif
+
+#ifdef _KERNEL
+VNET_DECLARE(struct in6_ifaddrhead, in6_ifaddrhead);
+#define V_in6_ifaddrhead VNET(in6_ifaddrhead)
+
+extern struct rwlock in6_ifaddr_lock;
+#define IN6_IFADDR_LOCK_ASSERT( ) rw_assert(&in6_ifaddr_lock, RA_LOCKED)
+#define IN6_IFADDR_RLOCK() rw_rlock(&in6_ifaddr_lock)
+#define IN6_IFADDR_RLOCK_ASSERT() rw_assert(&in6_ifaddr_lock, RA_RLOCKED)
+#define IN6_IFADDR_RUNLOCK() rw_runlock(&in6_ifaddr_lock)
+#define IN6_IFADDR_WLOCK() rw_wlock(&in6_ifaddr_lock)
+#define IN6_IFADDR_WLOCK_ASSERT() rw_assert(&in6_ifaddr_lock, RA_WLOCKED)
+#define IN6_IFADDR_WUNLOCK() rw_wunlock(&in6_ifaddr_lock)
+
+VNET_DECLARE(struct icmp6stat, icmp6stat);
+#define V_icmp6stat VNET(icmp6stat)
+#define in6_ifstat_inc(ifp, tag) \
+do { \
+ if (ifp) \
+ ((struct in6_ifextra *)((ifp)->if_afdata[AF_INET6]))->in6_ifstat->tag++; \
+} while (/*CONSTCOND*/ 0)
+
+extern struct in6_addr zeroin6_addr;
+extern u_char inet6ctlerrmap[];
+VNET_DECLARE(unsigned long, in6_maxmtu);
+#define V_in6_maxmtu VNET(in6_maxmtu)
+#endif /* _KERNEL */
+
+/*
+ * IPv6 multicast MLD-layer source entry.
+ */
+struct ip6_msource {
+ RB_ENTRY(ip6_msource) im6s_link; /* RB tree links */
+ struct in6_addr im6s_addr;
+ struct im6s_st {
+ uint16_t ex; /* # of exclusive members */
+ uint16_t in; /* # of inclusive members */
+ } im6s_st[2]; /* state at t0, t1 */
+ uint8_t im6s_stp; /* pending query */
+};
+RB_HEAD(ip6_msource_tree, ip6_msource);
+
+/*
+ * IPv6 multicast PCB-layer source entry.
+ *
+ * NOTE: overlapping use of struct ip6_msource fields at start.
+ */
+struct in6_msource {
+ RB_ENTRY(ip6_msource) im6s_link; /* Common field */
+ struct in6_addr im6s_addr; /* Common field */
+ uint8_t im6sl_st[2]; /* state before/at commit */
+};
+
+#ifdef _KERNEL
+/*
+ * IPv6 source tree comparison function.
+ *
+ * An ordered predicate is necessary; bcmp() is not documented to return
+ * an indication of order, memcmp() is, and is an ISO C99 requirement.
+ */
+static __inline int
+ip6_msource_cmp(const struct ip6_msource *a, const struct ip6_msource *b)
+{
+
+ return (memcmp(&a->im6s_addr, &b->im6s_addr, sizeof(struct in6_addr)));
+}
+RB_PROTOTYPE(ip6_msource_tree, ip6_msource, im6s_link, ip6_msource_cmp);
+#endif /* _KERNEL */
+
+/*
+ * IPv6 multicast PCB-layer group filter descriptor.
+ */
+struct in6_mfilter {
+ struct ip6_msource_tree im6f_sources; /* source list for (S,G) */
+ u_long im6f_nsrc; /* # of source entries */
+ uint8_t im6f_st[2]; /* state before/at commit */
+};
+
+/*
+ * Legacy KAME IPv6 multicast membership descriptor.
+ */
+struct in6_multi_mship {
+ struct in6_multi *i6mm_maddr;
+ LIST_ENTRY(in6_multi_mship) i6mm_chain;
+};
+
+/*
+ * IPv6 group descriptor.
+ *
+ * For every entry on an ifnet's if_multiaddrs list which represents
+ * an IP multicast group, there is one of these structures.
+ *
+ * If any source filters are present, then a node will exist in the RB-tree
+ * to permit fast lookup by source whenever an operation takes place.
+ * This permits pre-order traversal when we issue reports.
+ * Source filter trees are kept separately from the socket layer to
+ * greatly simplify locking.
+ *
+ * When MLDv2 is active, in6m_timer is the response to group query timer.
+ * The state-change timer in6m_sctimer is separate; whenever state changes
+ * for the group the state change record is generated and transmitted,
+ * and kept if retransmissions are necessary.
+ *
+ * FUTURE: in6m_link is now only used when groups are being purged
+ * on a detaching ifnet. It could be demoted to a SLIST_ENTRY, but
+ * because it is at the very start of the struct, we can't do this
+ * w/o breaking the ABI for ifmcstat.
+ */
+struct in6_multi {
+ LIST_ENTRY(in6_multi) in6m_entry; /* list glue */
+ struct in6_addr in6m_addr; /* IPv6 multicast address */
+ struct ifnet *in6m_ifp; /* back pointer to ifnet */
+ struct ifmultiaddr *in6m_ifma; /* back pointer to ifmultiaddr */
+ u_int in6m_refcount; /* reference count */
+ u_int in6m_state; /* state of the membership */
+ u_int in6m_timer; /* MLD6 listener report timer */
+
+ /* New fields for MLDv2 follow. */
+ struct mld_ifinfo *in6m_mli; /* MLD info */
+ SLIST_ENTRY(in6_multi) in6m_nrele; /* to-be-released by MLD */
+ struct ip6_msource_tree in6m_srcs; /* tree of sources */
+ u_long in6m_nsrc; /* # of tree entries */
+
+ struct ifqueue in6m_scq; /* queue of pending
+ * state-change packets */
+ struct timeval in6m_lastgsrtv; /* last G-S-R query */
+ uint16_t in6m_sctimer; /* state-change timer */
+ uint16_t in6m_scrv; /* state-change rexmit count */
+
+ /*
+ * SSM state counters which track state at T0 (the time the last
+ * state-change report's RV timer went to zero) and T1
+ * (time of pending report, i.e. now).
+ * Used for computing MLDv2 state-change reports. Several refcounts
+ * are maintained here to optimize for common use-cases.
+ */
+ struct in6m_st {
+ uint16_t iss_fmode; /* MLD filter mode */
+ uint16_t iss_asm; /* # of ASM listeners */
+ uint16_t iss_ex; /* # of exclusive members */
+ uint16_t iss_in; /* # of inclusive members */
+ uint16_t iss_rec; /* # of recorded sources */
+ } in6m_st[2]; /* state at t0, t1 */
+};
+
+/*
+ * Helper function to derive the filter mode on a source entry
+ * from its internal counters. Predicates are:
+ * A source is only excluded if all listeners exclude it.
+ * A source is only included if no listeners exclude it,
+ * and at least one listener includes it.
+ * May be used by ifmcstat(8).
+ */
+static __inline uint8_t
+im6s_get_mode(const struct in6_multi *inm, const struct ip6_msource *ims,
+ uint8_t t)
+{
+
+ t = !!t;
+ if (inm->in6m_st[t].iss_ex > 0 &&
+ inm->in6m_st[t].iss_ex == ims->im6s_st[t].ex)
+ return (MCAST_EXCLUDE);
+ else if (ims->im6s_st[t].in > 0 && ims->im6s_st[t].ex == 0)
+ return (MCAST_INCLUDE);
+ return (MCAST_UNDEFINED);
+}
+
+#ifdef _KERNEL
+
+/*
+ * Lock macros for IPv6 layer multicast address lists. IPv6 lock goes
+ * before link layer multicast locks in the lock order. In most cases,
+ * consumers of IN_*_MULTI() macros should acquire the locks before
+ * calling them; users of the in_{add,del}multi() functions should not.
+ */
+extern struct mtx in6_multi_mtx;
+#define IN6_MULTI_LOCK() mtx_lock(&in6_multi_mtx)
+#define IN6_MULTI_UNLOCK() mtx_unlock(&in6_multi_mtx)
+#define IN6_MULTI_LOCK_ASSERT() mtx_assert(&in6_multi_mtx, MA_OWNED)
+#define IN6_MULTI_UNLOCK_ASSERT() mtx_assert(&in6_multi_mtx, MA_NOTOWNED)
+
+/*
+ * Look up an in6_multi record for an IPv6 multicast address
+ * on the interface ifp.
+ * If no record found, return NULL.
+ *
+ * SMPng: The IN6_MULTI_LOCK and IF_ADDR_LOCK on ifp must be held.
+ */
+static __inline struct in6_multi *
+in6m_lookup_locked(struct ifnet *ifp, const struct in6_addr *mcaddr)
+{
+ struct ifmultiaddr *ifma;
+ struct in6_multi *inm;
+
+ IN6_MULTI_LOCK_ASSERT();
+ IF_ADDR_LOCK_ASSERT(ifp);
+
+ inm = NULL;
+ TAILQ_FOREACH(ifma, &((ifp)->if_multiaddrs), ifma_link) {
+ if (ifma->ifma_addr->sa_family == AF_INET6) {
+ inm = (struct in6_multi *)ifma->ifma_protospec;
+ if (IN6_ARE_ADDR_EQUAL(&inm->in6m_addr, mcaddr))
+ break;
+ inm = NULL;
+ }
+ }
+ return (inm);
+}
+
+/*
+ * Wrapper for in6m_lookup_locked().
+ *
+ * SMPng: Assumes that neithr the IN6_MULTI_LOCK() or IF_ADDR_LOCK() are held.
+ */
+static __inline struct in6_multi *
+in6m_lookup(struct ifnet *ifp, const struct in6_addr *mcaddr)
+{
+ struct in6_multi *inm;
+
+ IN6_MULTI_LOCK();
+ IF_ADDR_LOCK(ifp);
+ inm = in6m_lookup_locked(ifp, mcaddr);
+ IF_ADDR_UNLOCK(ifp);
+ IN6_MULTI_UNLOCK();
+
+ return (inm);
+}
+
+/* Acquire an in6_multi record. */
+static __inline void
+in6m_acquire_locked(struct in6_multi *inm)
+{
+
+ IN6_MULTI_LOCK_ASSERT();
+ ++inm->in6m_refcount;
+}
+
+struct ip6_moptions;
+struct sockopt;
+
+/* Multicast KPIs. */
+int im6o_mc_filter(const struct ip6_moptions *, const struct ifnet *,
+ const struct sockaddr *, const struct sockaddr *);
+int in6_mc_join(struct ifnet *, const struct in6_addr *,
+ struct in6_mfilter *, struct in6_multi **, int);
+int in6_mc_join_locked(struct ifnet *, const struct in6_addr *,
+ struct in6_mfilter *, struct in6_multi **, int);
+int in6_mc_leave(struct in6_multi *, struct in6_mfilter *);
+int in6_mc_leave_locked(struct in6_multi *, struct in6_mfilter *);
+void in6m_clear_recorded(struct in6_multi *);
+void in6m_commit(struct in6_multi *);
+void in6m_print(const struct in6_multi *);
+int in6m_record_source(struct in6_multi *, const struct in6_addr *);
+void in6m_release_locked(struct in6_multi *);
+void ip6_freemoptions(struct ip6_moptions *);
+int ip6_getmoptions(struct inpcb *, struct sockopt *);
+int ip6_setmoptions(struct inpcb *, struct sockopt *);
+
+/* Legacy KAME multicast KPIs. */
+struct in6_multi_mship *
+ in6_joingroup(struct ifnet *, struct in6_addr *, int *, int);
+int in6_leavegroup(struct in6_multi_mship *);
+
+/* flags to in6_update_ifa */
+#define IN6_IFAUPDATE_DADDELAY 0x1 /* first time to configure an address */
+
+int in6_mask2len __P((struct in6_addr *, u_char *));
+int in6_control __P((struct socket *, u_long, caddr_t, struct ifnet *,
+ struct thread *));
+int in6_update_ifa __P((struct ifnet *, struct in6_aliasreq *,
+ struct in6_ifaddr *, int));
+void in6_purgeaddr __P((struct ifaddr *));
+int in6if_do_dad __P((struct ifnet *));
+void in6_purgeif __P((struct ifnet *));
+void in6_savemkludge __P((struct in6_ifaddr *));
+void *in6_domifattach __P((struct ifnet *));
+void in6_domifdetach __P((struct ifnet *, void *));
+void in6_setmaxmtu __P((void));
+int in6_if2idlen __P((struct ifnet *));
+struct in6_ifaddr *in6ifa_ifpforlinklocal __P((struct ifnet *, int));
+struct in6_ifaddr *in6ifa_ifpwithaddr __P((struct ifnet *, struct in6_addr *));
+char *ip6_sprintf __P((char *, const struct in6_addr *));
+int in6_addr2zoneid __P((struct ifnet *, struct in6_addr *, u_int32_t *));
+int in6_matchlen __P((struct in6_addr *, struct in6_addr *));
+int in6_are_prefix_equal __P((struct in6_addr *, struct in6_addr *, int));
+void in6_prefixlen2mask __P((struct in6_addr *, int));
+int in6_prefix_ioctl __P((struct socket *, u_long, caddr_t,
+ struct ifnet *));
+int in6_prefix_add_ifid __P((int, struct in6_ifaddr *));
+void in6_prefix_remove_ifid __P((int, struct in6_ifaddr *));
+void in6_purgeprefix __P((struct ifnet *));
+void in6_ifremloop(struct ifaddr *);
+void in6_ifaddloop(struct ifaddr *);
+
+int in6_is_addr_deprecated __P((struct sockaddr_in6 *));
+struct inpcb;
+int in6_src_ioctl __P((u_long, caddr_t));
+#endif /* _KERNEL */
+
+#endif /* _NETINET6_IN6_VAR_HH_ */
diff --git a/rtems/freebsd/netinet6/ip6.h b/rtems/freebsd/netinet6/ip6.h
new file mode 100644
index 00000000..9eec13fb
--- /dev/null
+++ b/rtems/freebsd/netinet6/ip6.h
@@ -0,0 +1,4 @@
+/* $FreeBSD$ */
+/* $KAME: ip6.h,v 1.7 2000/03/25 07:23:36 sumikawa Exp $ */
+
+#error "netinet6/ip6.h is obsolete. use netinet/ip6.h"
diff --git a/rtems/freebsd/netinet6/ip6_ecn.h b/rtems/freebsd/netinet6/ip6_ecn.h
new file mode 100644
index 00000000..27d3f34d
--- /dev/null
+++ b/rtems/freebsd/netinet6/ip6_ecn.h
@@ -0,0 +1,41 @@
+/*-
+ * Copyright (C) 1999 WIDE Project.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the project nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $KAME: ip_ecn.h,v 1.5 2000/03/27 04:58:38 sumikawa Exp $
+ * $FreeBSD$
+ */
+
+/*
+ * ECN consideration on tunnel ingress/egress operation.
+ * http://www.aciri.org/floyd/papers/draft-ipsec-ecn-00.txt
+ */
+
+#ifdef _KERNEL
+extern void ip6_ecn_ingress(int, u_int32_t *, const u_int32_t *);
+extern int ip6_ecn_egress(int, const u_int32_t *, u_int32_t *);
+#endif
diff --git a/rtems/freebsd/netinet6/ip6_forward.c b/rtems/freebsd/netinet6/ip6_forward.c
new file mode 100644
index 00000000..af342c07
--- /dev/null
+++ b/rtems/freebsd/netinet6/ip6_forward.c
@@ -0,0 +1,626 @@
+#include <rtems/freebsd/machine/rtems-bsd-config.h>
+
+/*-
+ * Copyright (C) 1995, 1996, 1997, and 1998 WIDE Project.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the project nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $KAME: ip6_forward.c,v 1.69 2001/05/17 03:48:30 itojun Exp $
+ */
+
+#include <rtems/freebsd/sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <rtems/freebsd/local/opt_inet.h>
+#include <rtems/freebsd/local/opt_inet6.h>
+#include <rtems/freebsd/local/opt_ipsec.h>
+#include <rtems/freebsd/local/opt_ipstealth.h>
+
+#include <rtems/freebsd/sys/param.h>
+#include <rtems/freebsd/sys/systm.h>
+#include <rtems/freebsd/sys/malloc.h>
+#include <rtems/freebsd/sys/mbuf.h>
+#include <rtems/freebsd/sys/domain.h>
+#include <rtems/freebsd/sys/protosw.h>
+#include <rtems/freebsd/sys/socket.h>
+#include <rtems/freebsd/sys/errno.h>
+#include <rtems/freebsd/sys/time.h>
+#include <rtems/freebsd/sys/kernel.h>
+#include <rtems/freebsd/sys/syslog.h>
+
+#include <rtems/freebsd/net/if.h>
+#include <rtems/freebsd/net/route.h>
+#include <rtems/freebsd/net/pfil.h>
+
+#include <rtems/freebsd/netinet/in.h>
+#include <rtems/freebsd/netinet/in_var.h>
+#include <rtems/freebsd/netinet/in_systm.h>
+#include <rtems/freebsd/netinet/ip.h>
+#include <rtems/freebsd/netinet/ip_var.h>
+#include <rtems/freebsd/netinet6/in6_var.h>
+#include <rtems/freebsd/netinet/ip6.h>
+#include <rtems/freebsd/netinet6/ip6_var.h>
+#include <rtems/freebsd/netinet6/scope6_var.h>
+#include <rtems/freebsd/netinet/icmp6.h>
+#include <rtems/freebsd/netinet6/nd6.h>
+
+#include <rtems/freebsd/netinet/in_pcb.h>
+
+#ifdef IPSEC
+#include <rtems/freebsd/netipsec/ipsec.h>
+#include <rtems/freebsd/netipsec/ipsec6.h>
+#include <rtems/freebsd/netipsec/key.h>
+#endif /* IPSEC */
+
+#include <rtems/freebsd/netinet6/ip6protosw.h>
+
+/*
+ * Forward a packet. If some error occurs return the sender
+ * an icmp packet. Note we can't always generate a meaningful
+ * icmp message because icmp doesn't have a large enough repertoire
+ * of codes and types.
+ *
+ * If not forwarding, just drop the packet. This could be confusing
+ * if ipforwarding was zero but some routing protocol was advancing
+ * us as a gateway to somewhere. However, we must let the routing
+ * protocol deal with that.
+ *
+ */
+void
+ip6_forward(struct mbuf *m, int srcrt)
+{
+ struct ip6_hdr *ip6 = mtod(m, struct ip6_hdr *);
+ struct sockaddr_in6 *dst = NULL;
+ struct rtentry *rt = NULL;
+ struct route_in6 rin6;
+ int error, type = 0, code = 0;
+ struct mbuf *mcopy = NULL;
+ struct ifnet *origifp; /* maybe unnecessary */
+ u_int32_t inzone, outzone;
+ struct in6_addr src_in6, dst_in6;
+#ifdef IPSEC
+ struct secpolicy *sp = NULL;
+ int ipsecrt = 0;
+#endif
+ char ip6bufs[INET6_ADDRSTRLEN], ip6bufd[INET6_ADDRSTRLEN];
+
+#ifdef IPSEC
+ /*
+ * Check AH/ESP integrity.
+ */
+ /*
+ * Don't increment ip6s_cantforward because this is the check
+ * before forwarding packet actually.
+ */
+ if (ipsec6_in_reject(m, NULL)) {
+ V_ipsec6stat.in_polvio++;
+ m_freem(m);
+ return;
+ }
+#endif /* IPSEC */
+
+ /*
+ * Do not forward packets to multicast destination (should be handled
+ * by ip6_mforward().
+ * Do not forward packets with unspecified source. It was discussed
+ * in July 2000, on the ipngwg mailing list.
+ */
+ if ((m->m_flags & (M_BCAST|M_MCAST)) != 0 ||
+ IN6_IS_ADDR_MULTICAST(&ip6->ip6_dst) ||
+ IN6_IS_ADDR_UNSPECIFIED(&ip6->ip6_src)) {
+ V_ip6stat.ip6s_cantforward++;
+ /* XXX in6_ifstat_inc(rt->rt_ifp, ifs6_in_discard) */
+ if (V_ip6_log_time + V_ip6_log_interval < time_second) {
+ V_ip6_log_time = time_second;
+ log(LOG_DEBUG,
+ "cannot forward "
+ "from %s to %s nxt %d received on %s\n",
+ ip6_sprintf(ip6bufs, &ip6->ip6_src),
+ ip6_sprintf(ip6bufd, &ip6->ip6_dst),
+ ip6->ip6_nxt,
+ if_name(m->m_pkthdr.rcvif));
+ }
+ m_freem(m);
+ return;
+ }
+
+#ifdef IPSTEALTH
+ if (!V_ip6stealth) {
+#endif
+ if (ip6->ip6_hlim <= IPV6_HLIMDEC) {
+ /* XXX in6_ifstat_inc(rt->rt_ifp, ifs6_in_discard) */
+ icmp6_error(m, ICMP6_TIME_EXCEEDED,
+ ICMP6_TIME_EXCEED_TRANSIT, 0);
+ return;
+ }
+ ip6->ip6_hlim -= IPV6_HLIMDEC;
+
+#ifdef IPSTEALTH
+ }
+#endif
+
+ /*
+ * Save at most ICMPV6_PLD_MAXLEN (= the min IPv6 MTU -
+ * size of IPv6 + ICMPv6 headers) bytes of the packet in case
+ * we need to generate an ICMP6 message to the src.
+ * Thanks to M_EXT, in most cases copy will not occur.
+ *
+ * It is important to save it before IPsec processing as IPsec
+ * processing may modify the mbuf.
+ */
+ mcopy = m_copy(m, 0, imin(m->m_pkthdr.len, ICMPV6_PLD_MAXLEN));
+
+#ifdef IPSEC
+ /* get a security policy for this packet */
+ sp = ipsec_getpolicybyaddr(m, IPSEC_DIR_OUTBOUND,
+ IP_FORWARDING, &error);
+ if (sp == NULL) {
+ V_ipsec6stat.out_inval++;
+ V_ip6stat.ip6s_cantforward++;
+ if (mcopy) {
+#if 0
+ /* XXX: what icmp ? */
+#else
+ m_freem(mcopy);
+#endif
+ }
+ m_freem(m);
+ return;
+ }
+
+ error = 0;
+
+ /* check policy */
+ switch (sp->policy) {
+ case IPSEC_POLICY_DISCARD:
+ /*
+ * This packet is just discarded.
+ */
+ V_ipsec6stat.out_polvio++;
+ V_ip6stat.ip6s_cantforward++;
+ KEY_FREESP(&sp);
+ if (mcopy) {
+#if 0
+ /* XXX: what icmp ? */
+#else
+ m_freem(mcopy);
+#endif
+ }
+ m_freem(m);
+ return;
+
+ case IPSEC_POLICY_BYPASS:
+ case IPSEC_POLICY_NONE:
+ /* no need to do IPsec. */
+ KEY_FREESP(&sp);
+ goto skip_ipsec;
+
+ case IPSEC_POLICY_IPSEC:
+ if (sp->req == NULL) {
+ /* XXX should be panic ? */
+ printf("ip6_forward: No IPsec request specified.\n");
+ V_ip6stat.ip6s_cantforward++;
+ KEY_FREESP(&sp);
+ if (mcopy) {
+#if 0
+ /* XXX: what icmp ? */
+#else
+ m_freem(mcopy);
+#endif
+ }
+ m_freem(m);
+ return;
+ }
+ /* do IPsec */
+ break;
+
+ case IPSEC_POLICY_ENTRUST:
+ default:
+ /* should be panic ?? */
+ printf("ip6_forward: Invalid policy found. %d\n", sp->policy);
+ KEY_FREESP(&sp);
+ goto skip_ipsec;
+ }
+
+ {
+ struct ipsecrequest *isr = NULL;
+ struct ipsec_output_state state;
+
+ /*
+ * when the kernel forwards a packet, it is not proper to apply
+ * IPsec transport mode to the packet is not proper. this check
+ * avoid from this.
+ * at present, if there is even a transport mode SA request in the
+ * security policy, the kernel does not apply IPsec to the packet.
+ * this check is not enough because the following case is valid.
+ * ipsec esp/tunnel/xxx-xxx/require esp/transport//require;
+ */
+ for (isr = sp->req; isr; isr = isr->next) {
+ if (isr->saidx.mode == IPSEC_MODE_ANY)
+ goto doipsectunnel;
+ if (isr->saidx.mode == IPSEC_MODE_TUNNEL)
+ goto doipsectunnel;
+ }
+
+ /*
+ * if there's no need for tunnel mode IPsec, skip.
+ */
+ if (!isr)
+ goto skip_ipsec;
+
+ doipsectunnel:
+ /*
+ * All the extension headers will become inaccessible
+ * (since they can be encrypted).
+ * Don't panic, we need no more updates to extension headers
+ * on inner IPv6 packet (since they are now encapsulated).
+ *
+ * IPv6 [ESP|AH] IPv6 [extension headers] payload
+ */
+ bzero(&state, sizeof(state));
+ state.m = m;
+ state.ro = NULL; /* update at ipsec6_output_tunnel() */
+ state.dst = NULL; /* update at ipsec6_output_tunnel() */
+
+ error = ipsec6_output_tunnel(&state, sp, 0);
+
+ m = state.m;
+ KEY_FREESP(&sp);
+
+ if (error) {
+ /* mbuf is already reclaimed in ipsec6_output_tunnel. */
+ switch (error) {
+ case EHOSTUNREACH:
+ case ENETUNREACH:
+ case EMSGSIZE:
+ case ENOBUFS:
+ case ENOMEM:
+ break;
+ default:
+ printf("ip6_output (ipsec): error code %d\n", error);
+ /* FALLTHROUGH */
+ case ENOENT:
+ /* don't show these error codes to the user */
+ break;
+ }
+ V_ip6stat.ip6s_cantforward++;
+ if (mcopy) {
+#if 0
+ /* XXX: what icmp ? */
+#else
+ m_freem(mcopy);
+#endif
+ }
+ m_freem(m);
+ return;
+ } else {
+ /*
+ * In the FAST IPSec case we have already
+ * re-injected the packet and it has been freed
+ * by the ipsec_done() function. So, just clean
+ * up after ourselves.
+ */
+ m = NULL;
+ goto freecopy;
+ }
+
+ if ((m != NULL) && (ip6 != mtod(m, struct ip6_hdr *)) ){
+ /*
+ * now tunnel mode headers are added. we are originating
+ * packet instead of forwarding the packet.
+ */
+ ip6_output(m, NULL, NULL, IPV6_FORWARDING/*XXX*/, NULL, NULL,
+ NULL);
+ goto freecopy;
+ }
+
+ /* adjust pointer */
+ dst = (struct sockaddr_in6 *)state.dst;
+ rt = state.ro ? state.ro->ro_rt : NULL;
+ if (dst != NULL && rt != NULL)
+ ipsecrt = 1;
+ }
+ if (ipsecrt)
+ goto skip_routing;
+skip_ipsec:
+#endif
+
+ bzero(&rin6, sizeof(struct route_in6));
+ dst = (struct sockaddr_in6 *)&rin6.ro_dst;
+ dst->sin6_len = sizeof(struct sockaddr_in6);
+ dst->sin6_family = AF_INET6;
+ dst->sin6_addr = ip6->ip6_dst;
+
+ rin6.ro_rt = rtalloc1((struct sockaddr *)dst, 0, 0);
+ if (rin6.ro_rt != NULL)
+ RT_UNLOCK(rin6.ro_rt);
+ else {
+ V_ip6stat.ip6s_noroute++;
+ in6_ifstat_inc(m->m_pkthdr.rcvif, ifs6_in_noroute);
+ if (mcopy) {
+ icmp6_error(mcopy, ICMP6_DST_UNREACH,
+ ICMP6_DST_UNREACH_NOROUTE, 0);
+ }
+ goto bad;
+ }
+ rt = rin6.ro_rt;
+#ifdef IPSEC
+skip_routing:
+#endif
+
+ /*
+ * Source scope check: if a packet can't be delivered to its
+ * destination for the reason that the destination is beyond the scope
+ * of the source address, discard the packet and return an icmp6
+ * destination unreachable error with Code 2 (beyond scope of source
+ * address). We use a local copy of ip6_src, since in6_setscope()
+ * will possibly modify its first argument.
+ * [draft-ietf-ipngwg-icmp-v3-04.txt, Section 3.1]
+ */
+ src_in6 = ip6->ip6_src;
+ if (in6_setscope(&src_in6, rt->rt_ifp, &outzone)) {
+ /* XXX: this should not happen */
+ V_ip6stat.ip6s_cantforward++;
+ V_ip6stat.ip6s_badscope++;
+ goto bad;
+ }
+ if (in6_setscope(&src_in6, m->m_pkthdr.rcvif, &inzone)) {
+ V_ip6stat.ip6s_cantforward++;
+ V_ip6stat.ip6s_badscope++;
+ goto bad;
+ }
+ if (inzone != outzone
+#ifdef IPSEC
+ && !ipsecrt
+#endif
+ ) {
+ V_ip6stat.ip6s_cantforward++;
+ V_ip6stat.ip6s_badscope++;
+ in6_ifstat_inc(rt->rt_ifp, ifs6_in_discard);
+
+ if (V_ip6_log_time + V_ip6_log_interval < time_second) {
+ V_ip6_log_time = time_second;
+ log(LOG_DEBUG,
+ "cannot forward "
+ "src %s, dst %s, nxt %d, rcvif %s, outif %s\n",
+ ip6_sprintf(ip6bufs, &ip6->ip6_src),
+ ip6_sprintf(ip6bufd, &ip6->ip6_dst),
+ ip6->ip6_nxt,
+ if_name(m->m_pkthdr.rcvif), if_name(rt->rt_ifp));
+ }
+ if (mcopy)
+ icmp6_error(mcopy, ICMP6_DST_UNREACH,
+ ICMP6_DST_UNREACH_BEYONDSCOPE, 0);
+ goto bad;
+ }
+
+ /*
+ * Destination scope check: if a packet is going to break the scope
+ * zone of packet's destination address, discard it. This case should
+ * usually be prevented by appropriately-configured routing table, but
+ * we need an explicit check because we may mistakenly forward the
+ * packet to a different zone by (e.g.) a default route.
+ */
+ dst_in6 = ip6->ip6_dst;
+ if (in6_setscope(&dst_in6, m->m_pkthdr.rcvif, &inzone) != 0 ||
+ in6_setscope(&dst_in6, rt->rt_ifp, &outzone) != 0 ||
+ inzone != outzone) {
+ V_ip6stat.ip6s_cantforward++;
+ V_ip6stat.ip6s_badscope++;
+ goto bad;
+ }
+
+ if (m->m_pkthdr.len > IN6_LINKMTU(rt->rt_ifp)) {
+ in6_ifstat_inc(rt->rt_ifp, ifs6_in_toobig);
+ if (mcopy) {
+ u_long mtu;
+#ifdef IPSEC
+ struct secpolicy *sp;
+ int ipsecerror;
+ size_t ipsechdrsiz;
+#endif /* IPSEC */
+
+ mtu = IN6_LINKMTU(rt->rt_ifp);
+#ifdef IPSEC
+ /*
+ * When we do IPsec tunnel ingress, we need to play
+ * with the link value (decrement IPsec header size
+ * from mtu value). The code is much simpler than v4
+ * case, as we have the outgoing interface for
+ * encapsulated packet as "rt->rt_ifp".
+ */
+ sp = ipsec_getpolicybyaddr(mcopy, IPSEC_DIR_OUTBOUND,
+ IP_FORWARDING, &ipsecerror);
+ if (sp) {
+ ipsechdrsiz = ipsec_hdrsiz(mcopy,
+ IPSEC_DIR_OUTBOUND, NULL);
+ if (ipsechdrsiz < mtu)
+ mtu -= ipsechdrsiz;
+ }
+
+ /*
+ * if mtu becomes less than minimum MTU,
+ * tell minimum MTU (and I'll need to fragment it).
+ */
+ if (mtu < IPV6_MMTU)
+ mtu = IPV6_MMTU;
+#endif /* IPSEC */
+ icmp6_error(mcopy, ICMP6_PACKET_TOO_BIG, 0, mtu);
+ }
+ goto bad;
+ }
+
+ if (rt->rt_flags & RTF_GATEWAY)
+ dst = (struct sockaddr_in6 *)rt->rt_gateway;
+
+ /*
+ * If we are to forward the packet using the same interface
+ * as one we got the packet from, perhaps we should send a redirect
+ * to sender to shortcut a hop.
+ * Only send redirect if source is sending directly to us,
+ * and if packet was not source routed (or has any options).
+ * Also, don't send redirect if forwarding using a route
+ * modified by a redirect.
+ */
+ if (V_ip6_sendredirects && rt->rt_ifp == m->m_pkthdr.rcvif && !srcrt &&
+#ifdef IPSEC
+ !ipsecrt &&
+#endif /* IPSEC */
+ (rt->rt_flags & (RTF_DYNAMIC|RTF_MODIFIED)) == 0) {
+ if ((rt->rt_ifp->if_flags & IFF_POINTOPOINT) != 0) {
+ /*
+ * If the incoming interface is equal to the outgoing
+ * one, and the link attached to the interface is
+ * point-to-point, then it will be highly probable
+ * that a routing loop occurs. Thus, we immediately
+ * drop the packet and send an ICMPv6 error message.
+ *
+ * type/code is based on suggestion by Rich Draves.
+ * not sure if it is the best pick.
+ */
+ icmp6_error(mcopy, ICMP6_DST_UNREACH,
+ ICMP6_DST_UNREACH_ADDR, 0);
+ goto bad;
+ }
+ type = ND_REDIRECT;
+ }
+
+ /*
+ * Fake scoped addresses. Note that even link-local source or
+ * destinaion can appear, if the originating node just sends the
+ * packet to us (without address resolution for the destination).
+ * Since both icmp6_error and icmp6_redirect_output fill the embedded
+ * link identifiers, we can do this stuff after making a copy for
+ * returning an error.
+ */
+ if ((rt->rt_ifp->if_flags & IFF_LOOPBACK) != 0) {
+ /*
+ * See corresponding comments in ip6_output.
+ * XXX: but is it possible that ip6_forward() sends a packet
+ * to a loopback interface? I don't think so, and thus
+ * I bark here. (jinmei@kame.net)
+ * XXX: it is common to route invalid packets to loopback.
+ * also, the codepath will be visited on use of ::1 in
+ * rthdr. (itojun)
+ */
+#if 1
+ if (0)
+#else
+ if ((rt->rt_flags & (RTF_BLACKHOLE|RTF_REJECT)) == 0)
+#endif
+ {
+ printf("ip6_forward: outgoing interface is loopback. "
+ "src %s, dst %s, nxt %d, rcvif %s, outif %s\n",
+ ip6_sprintf(ip6bufs, &ip6->ip6_src),
+ ip6_sprintf(ip6bufd, &ip6->ip6_dst),
+ ip6->ip6_nxt, if_name(m->m_pkthdr.rcvif),
+ if_name(rt->rt_ifp));
+ }
+
+ /* we can just use rcvif in forwarding. */
+ origifp = m->m_pkthdr.rcvif;
+ }
+ else
+ origifp = rt->rt_ifp;
+ /*
+ * clear embedded scope identifiers if necessary.
+ * in6_clearscope will touch the addresses only when necessary.
+ */
+ in6_clearscope(&ip6->ip6_src);
+ in6_clearscope(&ip6->ip6_dst);
+
+ /* Jump over all PFIL processing if hooks are not active. */
+ if (!PFIL_HOOKED(&V_inet6_pfil_hook))
+ goto pass;
+
+ /* Run through list of hooks for output packets. */
+ error = pfil_run_hooks(&V_inet6_pfil_hook, &m, rt->rt_ifp, PFIL_OUT, NULL);
+ if (error != 0)
+ goto senderr;
+ if (m == NULL)
+ goto freecopy;
+ ip6 = mtod(m, struct ip6_hdr *);
+
+pass:
+ error = nd6_output(rt->rt_ifp, origifp, m, dst, rt);
+ if (error) {
+ in6_ifstat_inc(rt->rt_ifp, ifs6_out_discard);
+ V_ip6stat.ip6s_cantforward++;
+ } else {
+ V_ip6stat.ip6s_forward++;
+ in6_ifstat_inc(rt->rt_ifp, ifs6_out_forward);
+ if (type)
+ V_ip6stat.ip6s_redirectsent++;
+ else {
+ if (mcopy)
+ goto freecopy;
+ }
+ }
+
+senderr:
+ if (mcopy == NULL)
+ goto out;
+ switch (error) {
+ case 0:
+ if (type == ND_REDIRECT) {
+ icmp6_redirect_output(mcopy, rt);
+ goto out;
+ }
+ goto freecopy;
+
+ case EMSGSIZE:
+ /* xxx MTU is constant in PPP? */
+ goto freecopy;
+
+ case ENOBUFS:
+ /* Tell source to slow down like source quench in IP? */
+ goto freecopy;
+
+ case ENETUNREACH: /* shouldn't happen, checked above */
+ case EHOSTUNREACH:
+ case ENETDOWN:
+ case EHOSTDOWN:
+ default:
+ type = ICMP6_DST_UNREACH;
+ code = ICMP6_DST_UNREACH_ADDR;
+ break;
+ }
+ icmp6_error(mcopy, type, code, 0);
+ goto out;
+
+ freecopy:
+ m_freem(mcopy);
+ goto out;
+bad:
+ m_freem(m);
+out:
+ if (rt != NULL
+#ifdef IPSEC
+ && !ipsecrt
+#endif
+ )
+ RTFREE(rt);
+}
diff --git a/rtems/freebsd/netinet6/ip6_id.c b/rtems/freebsd/netinet6/ip6_id.c
new file mode 100644
index 00000000..e244256b
--- /dev/null
+++ b/rtems/freebsd/netinet6/ip6_id.c
@@ -0,0 +1,269 @@
+#include <rtems/freebsd/machine/rtems-bsd-config.h>
+
+/*-
+ * Copyright (C) 2003 WIDE Project.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the project nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $KAME: ip6_id.c,v 1.13 2003/09/16 09:11:19 itojun Exp $
+ */
+
+/*-
+ * Copyright 1998 Niels Provos <provos@citi.umich.edu>
+ * All rights reserved.
+ *
+ * Theo de Raadt <deraadt@openbsd.org> came up with the idea of using
+ * such a mathematical system to generate more random (yet non-repeating)
+ * ids to solve the resolver/named problem. But Niels designed the
+ * actual system based on the constraints.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by Niels Provos.
+ * 4. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * $OpenBSD: ip_id.c,v 1.6 2002/03/15 18:19:52 millert Exp $
+ */
+
+#include <rtems/freebsd/sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+/*
+ * seed = random (bits - 1) bit
+ * n = prime, g0 = generator to n,
+ * j = random so that gcd(j,n-1) == 1
+ * g = g0^j mod n will be a generator again.
+ *
+ * X[0] = random seed.
+ * X[n] = a*X[n-1]+b mod m is a Linear Congruential Generator
+ * with a = 7^(even random) mod m,
+ * b = random with gcd(b,m) == 1
+ * m = constant and a maximal period of m-1.
+ *
+ * The transaction id is determined by:
+ * id[n] = seed xor (g^X[n] mod n)
+ *
+ * Effectivly the id is restricted to the lower (bits - 1) bits, thus
+ * yielding two different cycles by toggling the msb on and off.
+ * This avoids reuse issues caused by reseeding.
+ */
+
+#include <rtems/freebsd/sys/types.h>
+#include <rtems/freebsd/sys/param.h>
+#include <rtems/freebsd/sys/kernel.h>
+#include <rtems/freebsd/sys/socket.h>
+#include <rtems/freebsd/sys/libkern.h>
+
+#include <rtems/freebsd/net/if.h>
+#include <rtems/freebsd/net/route.h>
+#include <rtems/freebsd/netinet/in.h>
+#include <rtems/freebsd/netinet/ip6.h>
+#include <rtems/freebsd/netinet6/ip6_var.h>
+
+#ifndef INT32_MAX
+#define INT32_MAX 0x7fffffffU
+#endif
+
+struct randomtab {
+ const int ru_bits; /* resulting bits */
+ const long ru_out; /* Time after wich will be reseeded */
+ const u_int32_t ru_max; /* Uniq cycle, avoid blackjack prediction */
+ const u_int32_t ru_gen; /* Starting generator */
+ const u_int32_t ru_n; /* ru_n: prime, ru_n - 1: product of pfacts[] */
+ const u_int32_t ru_agen; /* determine ru_a as ru_agen^(2*rand) */
+ const u_int32_t ru_m; /* ru_m = 2^x*3^y */
+ const u_int32_t pfacts[4]; /* factors of ru_n */
+
+ u_int32_t ru_counter;
+ u_int32_t ru_msb;
+
+ u_int32_t ru_x;
+ u_int32_t ru_seed, ru_seed2;
+ u_int32_t ru_a, ru_b;
+ u_int32_t ru_g;
+ long ru_reseed;
+};
+
+static struct randomtab randomtab_32 = {
+ 32, /* resulting bits */
+ 180, /* Time after wich will be reseeded */
+ 1000000000, /* Uniq cycle, avoid blackjack prediction */
+ 2, /* Starting generator */
+ 2147483629, /* RU_N-1 = 2^2*3^2*59652323 */
+ 7, /* determine ru_a as RU_AGEN^(2*rand) */
+ 1836660096, /* RU_M = 2^7*3^15 - don't change */
+ { 2, 3, 59652323, 0 }, /* factors of ru_n */
+};
+
+static struct randomtab randomtab_20 = {
+ 20, /* resulting bits */
+ 180, /* Time after wich will be reseeded */
+ 200000, /* Uniq cycle, avoid blackjack prediction */
+ 2, /* Starting generator */
+ 524269, /* RU_N-1 = 2^2*3^2*14563 */
+ 7, /* determine ru_a as RU_AGEN^(2*rand) */
+ 279936, /* RU_M = 2^7*3^7 - don't change */
+ { 2, 3, 14563, 0 }, /* factors of ru_n */
+};
+
+static u_int32_t pmod(u_int32_t, u_int32_t, u_int32_t);
+static void initid(struct randomtab *);
+static u_int32_t randomid(struct randomtab *);
+
+/*
+ * Do a fast modular exponation, returned value will be in the range
+ * of 0 - (mod-1)
+ */
+static u_int32_t
+pmod(u_int32_t gen, u_int32_t expo, u_int32_t mod)
+{
+ u_int64_t s, t, u;
+
+ s = 1;
+ t = gen;
+ u = expo;
+
+ while (u) {
+ if (u & 1)
+ s = (s * t) % mod;
+ u >>= 1;
+ t = (t * t) % mod;
+ }
+ return (s);
+}
+
+/*
+ * Initalizes the seed and chooses a suitable generator. Also toggles
+ * the msb flag. The msb flag is used to generate two distinct
+ * cycles of random numbers and thus avoiding reuse of ids.
+ *
+ * This function is called from id_randomid() when needed, an
+ * application does not have to worry about it.
+ */
+static void
+initid(struct randomtab *p)
+{
+ u_int32_t j, i;
+ int noprime = 1;
+
+ p->ru_x = arc4random() % p->ru_m;
+
+ /* (bits - 1) bits of random seed */
+ p->ru_seed = arc4random() & (~0U >> (32 - p->ru_bits + 1));
+ p->ru_seed2 = arc4random() & (~0U >> (32 - p->ru_bits + 1));
+
+ /* Determine the LCG we use */
+ p->ru_b = (arc4random() & (~0U >> (32 - p->ru_bits))) | 1;
+ p->ru_a = pmod(p->ru_agen,
+ (arc4random() & (~0U >> (32 - p->ru_bits))) & (~1U), p->ru_m);
+ while (p->ru_b % 3 == 0)
+ p->ru_b += 2;
+
+ j = arc4random() % p->ru_n;
+
+ /*
+ * Do a fast gcd(j, RU_N - 1), so we can find a j with
+ * gcd(j, RU_N - 1) == 1, giving a new generator for
+ * RU_GEN^j mod RU_N
+ */
+ while (noprime) {
+ for (i = 0; p->pfacts[i] > 0; i++)
+ if (j % p->pfacts[i] == 0)
+ break;
+
+ if (p->pfacts[i] == 0)
+ noprime = 0;
+ else
+ j = (j + 1) % p->ru_n;
+ }
+
+ p->ru_g = pmod(p->ru_gen, j, p->ru_n);
+ p->ru_counter = 0;
+
+ p->ru_reseed = time_second + p->ru_out;
+ p->ru_msb = p->ru_msb ? 0 : (1U << (p->ru_bits - 1));
+}
+
+static u_int32_t
+randomid(struct randomtab *p)
+{
+ int i, n;
+ u_int32_t tmp;
+
+ if (p->ru_counter >= p->ru_max || time_second > p->ru_reseed)
+ initid(p);
+
+ tmp = arc4random();
+
+ /* Skip a random number of ids */
+ n = tmp & 0x3; tmp = tmp >> 2;
+ if (p->ru_counter + n >= p->ru_max)
+ initid(p);
+
+ for (i = 0; i <= n; i++) {
+ /* Linear Congruential Generator */
+ p->ru_x = (u_int32_t)((u_int64_t)p->ru_a * p->ru_x + p->ru_b) % p->ru_m;
+ }
+
+ p->ru_counter += i;
+
+ return (p->ru_seed ^ pmod(p->ru_g, p->ru_seed2 ^ p->ru_x, p->ru_n)) |
+ p->ru_msb;
+}
+
+u_int32_t
+ip6_randomid(void)
+{
+
+ return randomid(&randomtab_32);
+}
+
+u_int32_t
+ip6_randomflowlabel(void)
+{
+
+ return randomid(&randomtab_20) & 0xfffff;
+}
diff --git a/rtems/freebsd/netinet6/ip6_input.c b/rtems/freebsd/netinet6/ip6_input.c
new file mode 100644
index 00000000..fd569be5
--- /dev/null
+++ b/rtems/freebsd/netinet6/ip6_input.c
@@ -0,0 +1,1759 @@
+#include <rtems/freebsd/machine/rtems-bsd-config.h>
+
+/*-
+ * Copyright (C) 1995, 1996, 1997, and 1998 WIDE Project.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the project nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $KAME: ip6_input.c,v 1.259 2002/01/21 04:58:09 jinmei Exp $
+ */
+
+/*-
+ * Copyright (c) 1982, 1986, 1988, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * @(#)ip_input.c 8.2 (Berkeley) 1/4/94
+ */
+
+#include <rtems/freebsd/sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <rtems/freebsd/local/opt_inet.h>
+#include <rtems/freebsd/local/opt_inet6.h>
+#include <rtems/freebsd/local/opt_ipsec.h>
+
+#include <rtems/freebsd/sys/param.h>
+#include <rtems/freebsd/sys/systm.h>
+#include <rtems/freebsd/sys/malloc.h>
+#include <rtems/freebsd/sys/mbuf.h>
+#include <rtems/freebsd/sys/proc.h>
+#include <rtems/freebsd/sys/domain.h>
+#include <rtems/freebsd/sys/protosw.h>
+#include <rtems/freebsd/sys/socket.h>
+#include <rtems/freebsd/sys/socketvar.h>
+#include <rtems/freebsd/sys/errno.h>
+#include <rtems/freebsd/sys/time.h>
+#include <rtems/freebsd/sys/kernel.h>
+#include <rtems/freebsd/sys/syslog.h>
+
+#include <rtems/freebsd/net/if.h>
+#include <rtems/freebsd/net/if_types.h>
+#include <rtems/freebsd/net/if_dl.h>
+#include <rtems/freebsd/net/route.h>
+#include <rtems/freebsd/net/netisr.h>
+#include <rtems/freebsd/net/pfil.h>
+#include <rtems/freebsd/net/vnet.h>
+
+#include <rtems/freebsd/netinet/in.h>
+#include <rtems/freebsd/netinet/in_systm.h>
+#include <rtems/freebsd/net/if_llatbl.h>
+#ifdef INET
+#include <rtems/freebsd/netinet/ip.h>
+#include <rtems/freebsd/netinet/ip_icmp.h>
+#endif /* INET */
+#include <rtems/freebsd/netinet/ip6.h>
+#include <rtems/freebsd/netinet6/in6_var.h>
+#include <rtems/freebsd/netinet6/ip6_var.h>
+#include <rtems/freebsd/netinet/in_pcb.h>
+#include <rtems/freebsd/netinet/icmp6.h>
+#include <rtems/freebsd/netinet6/scope6_var.h>
+#include <rtems/freebsd/netinet6/in6_ifattach.h>
+#include <rtems/freebsd/netinet6/nd6.h>
+
+#ifdef IPSEC
+#include <rtems/freebsd/netipsec/ipsec.h>
+#include <rtems/freebsd/netinet6/ip6_ipsec.h>
+#include <rtems/freebsd/netipsec/ipsec6.h>
+#endif /* IPSEC */
+
+#include <rtems/freebsd/netinet6/ip6protosw.h>
+
+extern struct domain inet6domain;
+
+u_char ip6_protox[IPPROTO_MAX];
+VNET_DEFINE(struct in6_ifaddrhead, in6_ifaddrhead);
+
+static struct netisr_handler ip6_nh = {
+ .nh_name = "ip6",
+ .nh_handler = ip6_input,
+ .nh_proto = NETISR_IPV6,
+ .nh_policy = NETISR_POLICY_FLOW,
+};
+
+VNET_DECLARE(struct callout, in6_tmpaddrtimer_ch);
+#define V_in6_tmpaddrtimer_ch VNET(in6_tmpaddrtimer_ch)
+
+VNET_DEFINE(struct pfil_head, inet6_pfil_hook);
+
+VNET_DEFINE(struct ip6stat, ip6stat);
+
+struct rwlock in6_ifaddr_lock;
+RW_SYSINIT(in6_ifaddr_lock, &in6_ifaddr_lock, "in6_ifaddr_lock");
+
+static void ip6_init2(void *);
+static struct ip6aux *ip6_setdstifaddr(struct mbuf *, struct in6_ifaddr *);
+static int ip6_hopopts_input(u_int32_t *, u_int32_t *, struct mbuf **, int *);
+#ifdef PULLDOWN_TEST
+static struct mbuf *ip6_pullexthdr(struct mbuf *, size_t, int);
+#endif
+
+/*
+ * IP6 initialization: fill in IP6 protocol switch table.
+ * All protocols not implemented in kernel go to raw IP6 protocol handler.
+ */
+void
+ip6_init(void)
+{
+ struct ip6protosw *pr;
+ int i;
+
+ TUNABLE_INT_FETCH("net.inet6.ip6.auto_linklocal",
+ &V_ip6_auto_linklocal);
+
+ TAILQ_INIT(&V_in6_ifaddrhead);
+
+ /* Initialize packet filter hooks. */
+ V_inet6_pfil_hook.ph_type = PFIL_TYPE_AF;
+ V_inet6_pfil_hook.ph_af = AF_INET6;
+ if ((i = pfil_head_register(&V_inet6_pfil_hook)) != 0)
+ printf("%s: WARNING: unable to register pfil hook, "
+ "error %d\n", __func__, i);
+
+ scope6_init();
+ addrsel_policy_init();
+ nd6_init();
+ frag6_init();
+
+ V_ip6_desync_factor = arc4random() % MAX_TEMP_DESYNC_FACTOR;
+
+ /* Skip global initialization stuff for non-default instances. */
+ if (!IS_DEFAULT_VNET(curvnet))
+ return;
+
+#ifdef DIAGNOSTIC
+ if (sizeof(struct protosw) != sizeof(struct ip6protosw))
+ panic("sizeof(protosw) != sizeof(ip6protosw)");
+#endif
+ pr = (struct ip6protosw *)pffindproto(PF_INET6, IPPROTO_RAW, SOCK_RAW);
+ if (pr == NULL)
+ panic("ip6_init");
+
+ /* Initialize the entire ip6_protox[] array to IPPROTO_RAW. */
+ for (i = 0; i < IPPROTO_MAX; i++)
+ ip6_protox[i] = pr - inet6sw;
+ /*
+ * Cycle through IP protocols and put them into the appropriate place
+ * in ip6_protox[].
+ */
+ for (pr = (struct ip6protosw *)inet6domain.dom_protosw;
+ pr < (struct ip6protosw *)inet6domain.dom_protoswNPROTOSW; pr++)
+ if (pr->pr_domain->dom_family == PF_INET6 &&
+ pr->pr_protocol && pr->pr_protocol != IPPROTO_RAW) {
+ /* Be careful to only index valid IP protocols. */
+ if (pr->pr_protocol < IPPROTO_MAX)
+ ip6_protox[pr->pr_protocol] = pr - inet6sw;
+ }
+
+ netisr_register(&ip6_nh);
+}
+
+/*
+ * The protocol to be inserted into ip6_protox[] must be already registered
+ * in inet6sw[], either statically or through pf_proto_register().
+ */
+int
+ip6proto_register(short ip6proto)
+{
+ struct ip6protosw *pr;
+
+ /* Sanity checks. */
+ if (ip6proto <= 0 || ip6proto >= IPPROTO_MAX)
+ return (EPROTONOSUPPORT);
+
+ /*
+ * The protocol slot must not be occupied by another protocol
+ * already. An index pointing to IPPROTO_RAW is unused.
+ */
+ pr = (struct ip6protosw *)pffindproto(PF_INET6, IPPROTO_RAW, SOCK_RAW);
+ if (pr == NULL)
+ return (EPFNOSUPPORT);
+ if (ip6_protox[ip6proto] != pr - inet6sw) /* IPPROTO_RAW */
+ return (EEXIST);
+
+ /*
+ * Find the protocol position in inet6sw[] and set the index.
+ */
+ for (pr = (struct ip6protosw *)inet6domain.dom_protosw;
+ pr < (struct ip6protosw *)inet6domain.dom_protoswNPROTOSW; pr++) {
+ if (pr->pr_domain->dom_family == PF_INET6 &&
+ pr->pr_protocol && pr->pr_protocol == ip6proto) {
+ ip6_protox[pr->pr_protocol] = pr - inet6sw;
+ return (0);
+ }
+ }
+ return (EPROTONOSUPPORT);
+}
+
+int
+ip6proto_unregister(short ip6proto)
+{
+ struct ip6protosw *pr;
+
+ /* Sanity checks. */
+ if (ip6proto <= 0 || ip6proto >= IPPROTO_MAX)
+ return (EPROTONOSUPPORT);
+
+ /* Check if the protocol was indeed registered. */
+ pr = (struct ip6protosw *)pffindproto(PF_INET6, IPPROTO_RAW, SOCK_RAW);
+ if (pr == NULL)
+ return (EPFNOSUPPORT);
+ if (ip6_protox[ip6proto] == pr - inet6sw) /* IPPROTO_RAW */
+ return (ENOENT);
+
+ /* Reset the protocol slot to IPPROTO_RAW. */
+ ip6_protox[ip6proto] = pr - inet6sw;
+ return (0);
+}
+
+#ifdef VIMAGE
+void
+ip6_destroy()
+{
+
+ nd6_destroy();
+ callout_drain(&V_in6_tmpaddrtimer_ch);
+}
+#endif
+
+static int
+ip6_init2_vnet(const void *unused __unused)
+{
+
+ /* nd6_timer_init */
+ callout_init(&V_nd6_timer_ch, 0);
+ callout_reset(&V_nd6_timer_ch, hz, nd6_timer, curvnet);
+
+ /* timer for regeneranation of temporary addresses randomize ID */
+ callout_init(&V_in6_tmpaddrtimer_ch, 0);
+ callout_reset(&V_in6_tmpaddrtimer_ch,
+ (V_ip6_temp_preferred_lifetime - V_ip6_desync_factor -
+ V_ip6_temp_regen_advance) * hz,
+ in6_tmpaddrtimer, curvnet);
+
+ return (0);
+}
+
+static void
+ip6_init2(void *dummy)
+{
+
+ ip6_init2_vnet(NULL);
+}
+
+/* cheat */
+/* This must be after route_init(), which is now SI_ORDER_THIRD */
+SYSINIT(netinet6init2, SI_SUB_PROTO_DOMAIN, SI_ORDER_MIDDLE, ip6_init2, NULL);
+
+void
+ip6_input(struct mbuf *m)
+{
+ struct ip6_hdr *ip6;
+ int off = sizeof(struct ip6_hdr), nest;
+ u_int32_t plen;
+ u_int32_t rtalert = ~0;
+ int nxt, ours = 0;
+ struct ifnet *deliverifp = NULL, *ifp = NULL;
+ struct in6_addr odst;
+ struct route_in6 rin6;
+ int srcrt = 0;
+ struct llentry *lle = NULL;
+ struct sockaddr_in6 dst6, *dst;
+
+ bzero(&rin6, sizeof(struct route_in6));
+#ifdef IPSEC
+ /*
+ * should the inner packet be considered authentic?
+ * see comment in ah4_input().
+ * NB: m cannot be NULL when passed to the input routine
+ */
+
+ m->m_flags &= ~M_AUTHIPHDR;
+ m->m_flags &= ~M_AUTHIPDGM;
+
+#endif /* IPSEC */
+
+ /*
+ * make sure we don't have onion peering information into m_tag.
+ */
+ ip6_delaux(m);
+
+ /*
+ * mbuf statistics
+ */
+ if (m->m_flags & M_EXT) {
+ if (m->m_next)
+ V_ip6stat.ip6s_mext2m++;
+ else
+ V_ip6stat.ip6s_mext1++;
+ } else {
+#define M2MMAX (sizeof(V_ip6stat.ip6s_m2m)/sizeof(V_ip6stat.ip6s_m2m[0]))
+ if (m->m_next) {
+ if (m->m_flags & M_LOOP) {
+ V_ip6stat.ip6s_m2m[V_loif->if_index]++;
+ } else if (m->m_pkthdr.rcvif->if_index < M2MMAX)
+ V_ip6stat.ip6s_m2m[m->m_pkthdr.rcvif->if_index]++;
+ else
+ V_ip6stat.ip6s_m2m[0]++;
+ } else
+ V_ip6stat.ip6s_m1++;
+#undef M2MMAX
+ }
+
+ /* drop the packet if IPv6 operation is disabled on the IF */
+ if ((ND_IFINFO(m->m_pkthdr.rcvif)->flags & ND6_IFF_IFDISABLED)) {
+ m_freem(m);
+ return;
+ }
+
+ in6_ifstat_inc(m->m_pkthdr.rcvif, ifs6_in_receive);
+ V_ip6stat.ip6s_total++;
+
+#ifndef PULLDOWN_TEST
+ /*
+ * L2 bridge code and some other code can return mbuf chain
+ * that does not conform to KAME requirement. too bad.
+ * XXX: fails to join if interface MTU > MCLBYTES. jumbogram?
+ */
+ if (m && m->m_next != NULL && m->m_pkthdr.len < MCLBYTES) {
+ struct mbuf *n;
+
+ MGETHDR(n, M_DONTWAIT, MT_HEADER);
+ if (n)
+ M_MOVE_PKTHDR(n, m);
+ if (n && n->m_pkthdr.len > MHLEN) {
+ MCLGET(n, M_DONTWAIT);
+ if ((n->m_flags & M_EXT) == 0) {
+ m_freem(n);
+ n = NULL;
+ }
+ }
+ if (n == NULL) {
+ m_freem(m);
+ return; /* ENOBUFS */
+ }
+
+ m_copydata(m, 0, n->m_pkthdr.len, mtod(n, caddr_t));
+ n->m_len = n->m_pkthdr.len;
+ m_freem(m);
+ m = n;
+ }
+ IP6_EXTHDR_CHECK(m, 0, sizeof(struct ip6_hdr), /* nothing */);
+#endif
+
+ if (m->m_len < sizeof(struct ip6_hdr)) {
+ struct ifnet *inifp;
+ inifp = m->m_pkthdr.rcvif;
+ if ((m = m_pullup(m, sizeof(struct ip6_hdr))) == NULL) {
+ V_ip6stat.ip6s_toosmall++;
+ in6_ifstat_inc(inifp, ifs6_in_hdrerr);
+ return;
+ }
+ }
+
+ ip6 = mtod(m, struct ip6_hdr *);
+
+ if ((ip6->ip6_vfc & IPV6_VERSION_MASK) != IPV6_VERSION) {
+ V_ip6stat.ip6s_badvers++;
+ in6_ifstat_inc(m->m_pkthdr.rcvif, ifs6_in_hdrerr);
+ goto bad;
+ }
+
+ V_ip6stat.ip6s_nxthist[ip6->ip6_nxt]++;
+
+ /*
+ * Check against address spoofing/corruption.
+ */
+ if (IN6_IS_ADDR_MULTICAST(&ip6->ip6_src) ||
+ IN6_IS_ADDR_UNSPECIFIED(&ip6->ip6_dst)) {
+ /*
+ * XXX: "badscope" is not very suitable for a multicast source.
+ */
+ V_ip6stat.ip6s_badscope++;
+ in6_ifstat_inc(m->m_pkthdr.rcvif, ifs6_in_addrerr);
+ goto bad;
+ }
+ if (IN6_IS_ADDR_MC_INTFACELOCAL(&ip6->ip6_dst) &&
+ !(m->m_flags & M_LOOP)) {
+ /*
+ * In this case, the packet should come from the loopback
+ * interface. However, we cannot just check the if_flags,
+ * because ip6_mloopback() passes the "actual" interface
+ * as the outgoing/incoming interface.
+ */
+ V_ip6stat.ip6s_badscope++;
+ in6_ifstat_inc(m->m_pkthdr.rcvif, ifs6_in_addrerr);
+ goto bad;
+ }
+
+#ifdef ALTQ
+ if (altq_input != NULL && (*altq_input)(m, AF_INET6) == 0) {
+ /* packet is dropped by traffic conditioner */
+ return;
+ }
+#endif
+ /*
+ * The following check is not documented in specs. A malicious
+ * party may be able to use IPv4 mapped addr to confuse tcp/udp stack
+ * and bypass security checks (act as if it was from 127.0.0.1 by using
+ * IPv6 src ::ffff:127.0.0.1). Be cautious.
+ *
+ * This check chokes if we are in an SIIT cloud. As none of BSDs
+ * support IPv4-less kernel compilation, we cannot support SIIT
+ * environment at all. So, it makes more sense for us to reject any
+ * malicious packets for non-SIIT environment, than try to do a
+ * partial support for SIIT environment.
+ */
+ if (IN6_IS_ADDR_V4MAPPED(&ip6->ip6_src) ||
+ IN6_IS_ADDR_V4MAPPED(&ip6->ip6_dst)) {
+ V_ip6stat.ip6s_badscope++;
+ in6_ifstat_inc(m->m_pkthdr.rcvif, ifs6_in_addrerr);
+ goto bad;
+ }
+#if 0
+ /*
+ * Reject packets with IPv4 compatible addresses (auto tunnel).
+ *
+ * The code forbids auto tunnel relay case in RFC1933 (the check is
+ * stronger than RFC1933). We may want to re-enable it if mech-xx
+ * is revised to forbid relaying case.
+ */
+ if (IN6_IS_ADDR_V4COMPAT(&ip6->ip6_src) ||
+ IN6_IS_ADDR_V4COMPAT(&ip6->ip6_dst)) {
+ V_ip6stat.ip6s_badscope++;
+ in6_ifstat_inc(m->m_pkthdr.rcvif, ifs6_in_addrerr);
+ goto bad;
+ }
+#endif
+
+ /*
+ * Run through list of hooks for input packets.
+ *
+ * NB: Beware of the destination address changing
+ * (e.g. by NAT rewriting). When this happens,
+ * tell ip6_forward to do the right thing.
+ */
+ odst = ip6->ip6_dst;
+
+ /* Jump over all PFIL processing if hooks are not active. */
+ if (!PFIL_HOOKED(&V_inet6_pfil_hook))
+ goto passin;
+
+ if (pfil_run_hooks(&V_inet6_pfil_hook, &m,
+ m->m_pkthdr.rcvif, PFIL_IN, NULL))
+ return;
+ if (m == NULL) /* consumed by filter */
+ return;
+ ip6 = mtod(m, struct ip6_hdr *);
+ srcrt = !IN6_ARE_ADDR_EQUAL(&odst, &ip6->ip6_dst);
+
+passin:
+ /*
+ * Disambiguate address scope zones (if there is ambiguity).
+ * We first make sure that the original source or destination address
+ * is not in our internal form for scoped addresses. Such addresses
+ * are not necessarily invalid spec-wise, but we cannot accept them due
+ * to the usage conflict.
+ * in6_setscope() then also checks and rejects the cases where src or
+ * dst are the loopback address and the receiving interface
+ * is not loopback.
+ */
+ if (in6_clearscope(&ip6->ip6_src) || in6_clearscope(&ip6->ip6_dst)) {
+ V_ip6stat.ip6s_badscope++; /* XXX */
+ goto bad;
+ }
+ if (in6_setscope(&ip6->ip6_src, m->m_pkthdr.rcvif, NULL) ||
+ in6_setscope(&ip6->ip6_dst, m->m_pkthdr.rcvif, NULL)) {
+ V_ip6stat.ip6s_badscope++;
+ goto bad;
+ }
+
+ /*
+ * Multicast check. Assume packet is for us to avoid
+ * prematurely taking locks.
+ */
+ if (IN6_IS_ADDR_MULTICAST(&ip6->ip6_dst)) {
+ ours = 1;
+ in6_ifstat_inc(m->m_pkthdr.rcvif, ifs6_in_mcast);
+ deliverifp = m->m_pkthdr.rcvif;
+ goto hbhcheck;
+ }
+
+ /*
+ * Unicast check
+ */
+
+ bzero(&dst6, sizeof(dst6));
+ dst6.sin6_family = AF_INET6;
+ dst6.sin6_len = sizeof(struct sockaddr_in6);
+ dst6.sin6_addr = ip6->ip6_dst;
+ ifp = m->m_pkthdr.rcvif;
+ IF_AFDATA_LOCK(ifp);
+ lle = lla_lookup(LLTABLE6(ifp), 0,
+ (struct sockaddr *)&dst6);
+ IF_AFDATA_UNLOCK(ifp);
+ if ((lle != NULL) && (lle->la_flags & LLE_IFADDR)) {
+ struct ifaddr *ifa;
+ struct in6_ifaddr *ia6;
+ int bad;
+
+ bad = 1;
+#define sa_equal(a1, a2) \
+ (bcmp((a1), (a2), ((a1))->sin6_len) == 0)
+ IF_ADDR_LOCK(ifp);
+ TAILQ_FOREACH(ifa, &ifp->if_addrhead, ifa_link) {
+ if (ifa->ifa_addr->sa_family != dst6.sin6_family)
+ continue;
+ if (sa_equal(&dst6, ifa->ifa_addr))
+ break;
+ }
+ KASSERT(ifa != NULL, ("%s: ifa not found for lle %p",
+ __func__, lle));
+#undef sa_equal
+
+ ia6 = (struct in6_ifaddr *)ifa;
+ if (!(ia6->ia6_flags & IN6_IFF_NOTREADY)) {
+ /* Count the packet in the ip address stats */
+ ia6->ia_ifa.if_ipackets++;
+ ia6->ia_ifa.if_ibytes += m->m_pkthdr.len;
+
+ /*
+ * record address information into m_tag.
+ */
+ (void)ip6_setdstifaddr(m, ia6);
+
+ bad = 0;
+ } else {
+ char ip6bufs[INET6_ADDRSTRLEN];
+ char ip6bufd[INET6_ADDRSTRLEN];
+ /* address is not ready, so discard the packet. */
+ nd6log((LOG_INFO,
+ "ip6_input: packet to an unready address %s->%s\n",
+ ip6_sprintf(ip6bufs, &ip6->ip6_src),
+ ip6_sprintf(ip6bufd, &ip6->ip6_dst)));
+ }
+ IF_ADDR_UNLOCK(ifp);
+ LLE_RUNLOCK(lle);
+ if (bad)
+ goto bad;
+ else {
+ ours = 1;
+ deliverifp = ifp;
+ goto hbhcheck;
+ }
+ }
+ if (lle != NULL)
+ LLE_RUNLOCK(lle);
+
+ dst = &rin6.ro_dst;
+ dst->sin6_len = sizeof(struct sockaddr_in6);
+ dst->sin6_family = AF_INET6;
+ dst->sin6_addr = ip6->ip6_dst;
+ rin6.ro_rt = rtalloc1((struct sockaddr *)dst, 0, 0);
+ if (rin6.ro_rt)
+ RT_UNLOCK(rin6.ro_rt);
+
+#define rt6_key(r) ((struct sockaddr_in6 *)((r)->rt_nodes->rn_key))
+
+ /*
+ * Accept the packet if the forwarding interface to the destination
+ * according to the routing table is the loopback interface,
+ * unless the associated route has a gateway.
+ * Note that this approach causes to accept a packet if there is a
+ * route to the loopback interface for the destination of the packet.
+ * But we think it's even useful in some situations, e.g. when using
+ * a special daemon which wants to intercept the packet.
+ *
+ * XXX: some OSes automatically make a cloned route for the destination
+ * of an outgoing packet. If the outgoing interface of the packet
+ * is a loopback one, the kernel would consider the packet to be
+ * accepted, even if we have no such address assinged on the interface.
+ * We check the cloned flag of the route entry to reject such cases,
+ * assuming that route entries for our own addresses are not made by
+ * cloning (it should be true because in6_addloop explicitly installs
+ * the host route). However, we might have to do an explicit check
+ * while it would be less efficient. Or, should we rather install a
+ * reject route for such a case?
+ */
+ if (rin6.ro_rt &&
+ (rin6.ro_rt->rt_flags &
+ (RTF_HOST|RTF_GATEWAY)) == RTF_HOST &&
+#ifdef RTF_WASCLONED
+ !(rin6.ro_rt->rt_flags & RTF_WASCLONED) &&
+#endif
+#ifdef RTF_CLONED
+ !(rin6.ro_rt->rt_flags & RTF_CLONED) &&
+#endif
+#if 0
+ /*
+ * The check below is redundant since the comparison of
+ * the destination and the key of the rtentry has
+ * already done through looking up the routing table.
+ */
+ IN6_ARE_ADDR_EQUAL(&ip6->ip6_dst,
+ &rt6_key(rin6.ro_rt)->sin6_addr)
+#endif
+ rin6.ro_rt->rt_ifp->if_type == IFT_LOOP) {
+ int free_ia6 = 0;
+ struct in6_ifaddr *ia6;
+
+ /*
+ * found the loopback route to the interface address
+ */
+ if (rin6.ro_rt->rt_gateway->sa_family == AF_LINK) {
+ struct sockaddr_in6 dest6;
+
+ bzero(&dest6, sizeof(dest6));
+ dest6.sin6_family = AF_INET6;
+ dest6.sin6_len = sizeof(dest6);
+ dest6.sin6_addr = ip6->ip6_dst;
+ ia6 = (struct in6_ifaddr *)
+ ifa_ifwithaddr((struct sockaddr *)&dest6);
+ if (ia6 == NULL)
+ goto bad;
+ free_ia6 = 1;
+ }
+ else
+ ia6 = (struct in6_ifaddr *)rin6.ro_rt->rt_ifa;
+
+ /*
+ * record address information into m_tag.
+ */
+ (void)ip6_setdstifaddr(m, ia6);
+
+ /*
+ * packets to a tentative, duplicated, or somehow invalid
+ * address must not be accepted.
+ */
+ if (!(ia6->ia6_flags & IN6_IFF_NOTREADY)) {
+ /* this address is ready */
+ ours = 1;
+ deliverifp = ia6->ia_ifp; /* correct? */
+ /* Count the packet in the ip address stats */
+ ia6->ia_ifa.if_ipackets++;
+ ia6->ia_ifa.if_ibytes += m->m_pkthdr.len;
+ if (ia6 != NULL && free_ia6 != 0)
+ ifa_free(&ia6->ia_ifa);
+ goto hbhcheck;
+ } else {
+ char ip6bufs[INET6_ADDRSTRLEN];
+ char ip6bufd[INET6_ADDRSTRLEN];
+ /* address is not ready, so discard the packet. */
+ nd6log((LOG_INFO,
+ "ip6_input: packet to an unready address %s->%s\n",
+ ip6_sprintf(ip6bufs, &ip6->ip6_src),
+ ip6_sprintf(ip6bufd, &ip6->ip6_dst)));
+
+ if (ia6 != NULL && free_ia6 != 0)
+ ifa_free(&ia6->ia_ifa);
+ goto bad;
+ }
+ }
+
+ /*
+ * FAITH (Firewall Aided Internet Translator)
+ */
+ if (V_ip6_keepfaith) {
+ if (rin6.ro_rt && rin6.ro_rt->rt_ifp &&
+ rin6.ro_rt->rt_ifp->if_type == IFT_FAITH) {
+ /* XXX do we need more sanity checks? */
+ ours = 1;
+ deliverifp = rin6.ro_rt->rt_ifp; /* faith */
+ goto hbhcheck;
+ }
+ }
+
+ /*
+ * Now there is no reason to process the packet if it's not our own
+ * and we're not a router.
+ */
+ if (!V_ip6_forwarding) {
+ V_ip6stat.ip6s_cantforward++;
+ in6_ifstat_inc(m->m_pkthdr.rcvif, ifs6_in_discard);
+ goto bad;
+ }
+
+ hbhcheck:
+ /*
+ * record address information into m_tag, if we don't have one yet.
+ * note that we are unable to record it, if the address is not listed
+ * as our interface address (e.g. multicast addresses, addresses
+ * within FAITH prefixes and such).
+ */
+ if (deliverifp && !ip6_getdstifaddr(m)) {
+ struct in6_ifaddr *ia6;
+
+ ia6 = in6_ifawithifp(deliverifp, &ip6->ip6_dst);
+ if (ia6) {
+ if (!ip6_setdstifaddr(m, ia6)) {
+ /*
+ * XXX maybe we should drop the packet here,
+ * as we could not provide enough information
+ * to the upper layers.
+ */
+ }
+ ifa_free(&ia6->ia_ifa);
+ }
+ }
+
+ /*
+ * Process Hop-by-Hop options header if it's contained.
+ * m may be modified in ip6_hopopts_input().
+ * If a JumboPayload option is included, plen will also be modified.
+ */
+ plen = (u_int32_t)ntohs(ip6->ip6_plen);
+ if (ip6->ip6_nxt == IPPROTO_HOPOPTS) {
+ struct ip6_hbh *hbh;
+
+ if (ip6_hopopts_input(&plen, &rtalert, &m, &off)) {
+#if 0 /*touches NULL pointer*/
+ in6_ifstat_inc(m->m_pkthdr.rcvif, ifs6_in_discard);
+#endif
+ goto out; /* m have already been freed */
+ }
+
+ /* adjust pointer */
+ ip6 = mtod(m, struct ip6_hdr *);
+
+ /*
+ * if the payload length field is 0 and the next header field
+ * indicates Hop-by-Hop Options header, then a Jumbo Payload
+ * option MUST be included.
+ */
+ if (ip6->ip6_plen == 0 && plen == 0) {
+ /*
+ * Note that if a valid jumbo payload option is
+ * contained, ip6_hopopts_input() must set a valid
+ * (non-zero) payload length to the variable plen.
+ */
+ V_ip6stat.ip6s_badoptions++;
+ in6_ifstat_inc(m->m_pkthdr.rcvif, ifs6_in_discard);
+ in6_ifstat_inc(m->m_pkthdr.rcvif, ifs6_in_hdrerr);
+ icmp6_error(m, ICMP6_PARAM_PROB,
+ ICMP6_PARAMPROB_HEADER,
+ (caddr_t)&ip6->ip6_plen - (caddr_t)ip6);
+ goto out;
+ }
+#ifndef PULLDOWN_TEST
+ /* ip6_hopopts_input() ensures that mbuf is contiguous */
+ hbh = (struct ip6_hbh *)(ip6 + 1);
+#else
+ IP6_EXTHDR_GET(hbh, struct ip6_hbh *, m, sizeof(struct ip6_hdr),
+ sizeof(struct ip6_hbh));
+ if (hbh == NULL) {
+ V_ip6stat.ip6s_tooshort++;
+ goto out;
+ }
+#endif
+ nxt = hbh->ip6h_nxt;
+
+ /*
+ * If we are acting as a router and the packet contains a
+ * router alert option, see if we know the option value.
+ * Currently, we only support the option value for MLD, in which
+ * case we should pass the packet to the multicast routing
+ * daemon.
+ */
+ if (rtalert != ~0) {
+ switch (rtalert) {
+ case IP6OPT_RTALERT_MLD:
+ if (V_ip6_forwarding)
+ ours = 1;
+ break;
+ default:
+ /*
+ * RFC2711 requires unrecognized values must be
+ * silently ignored.
+ */
+ break;
+ }
+ }
+ } else
+ nxt = ip6->ip6_nxt;
+
+ /*
+ * Check that the amount of data in the buffers
+ * is as at least much as the IPv6 header would have us expect.
+ * Trim mbufs if longer than we expect.
+ * Drop packet if shorter than we expect.
+ */
+ if (m->m_pkthdr.len - sizeof(struct ip6_hdr) < plen) {
+ V_ip6stat.ip6s_tooshort++;
+ in6_ifstat_inc(m->m_pkthdr.rcvif, ifs6_in_truncated);
+ goto bad;
+ }
+ if (m->m_pkthdr.len > sizeof(struct ip6_hdr) + plen) {
+ if (m->m_len == m->m_pkthdr.len) {
+ m->m_len = sizeof(struct ip6_hdr) + plen;
+ m->m_pkthdr.len = sizeof(struct ip6_hdr) + plen;
+ } else
+ m_adj(m, sizeof(struct ip6_hdr) + plen - m->m_pkthdr.len);
+ }
+
+ /*
+ * Forward if desirable.
+ */
+ if (V_ip6_mrouter &&
+ IN6_IS_ADDR_MULTICAST(&ip6->ip6_dst)) {
+ /*
+ * If we are acting as a multicast router, all
+ * incoming multicast packets are passed to the
+ * kernel-level multicast forwarding function.
+ * The packet is returned (relatively) intact; if
+ * ip6_mforward() returns a non-zero value, the packet
+ * must be discarded, else it may be accepted below.
+ *
+ * XXX TODO: Check hlim and multicast scope here to avoid
+ * unnecessarily calling into ip6_mforward().
+ */
+ if (ip6_mforward &&
+ ip6_mforward(ip6, m->m_pkthdr.rcvif, m)) {
+ IP6STAT_INC(ip6s_cantforward);
+ in6_ifstat_inc(m->m_pkthdr.rcvif, ifs6_in_discard);
+ goto bad;
+ }
+ } else if (!ours) {
+ ip6_forward(m, srcrt);
+ goto out;
+ }
+
+ ip6 = mtod(m, struct ip6_hdr *);
+
+ /*
+ * Malicious party may be able to use IPv4 mapped addr to confuse
+ * tcp/udp stack and bypass security checks (act as if it was from
+ * 127.0.0.1 by using IPv6 src ::ffff:127.0.0.1). Be cautious.
+ *
+ * For SIIT end node behavior, you may want to disable the check.
+ * However, you will become vulnerable to attacks using IPv4 mapped
+ * source.
+ */
+ if (IN6_IS_ADDR_V4MAPPED(&ip6->ip6_src) ||
+ IN6_IS_ADDR_V4MAPPED(&ip6->ip6_dst)) {
+ V_ip6stat.ip6s_badscope++;
+ in6_ifstat_inc(m->m_pkthdr.rcvif, ifs6_in_addrerr);
+ goto bad;
+ }
+
+ /*
+ * Tell launch routine the next header
+ */
+ V_ip6stat.ip6s_delivered++;
+ in6_ifstat_inc(deliverifp, ifs6_in_deliver);
+ nest = 0;
+
+ while (nxt != IPPROTO_DONE) {
+ if (V_ip6_hdrnestlimit && (++nest > V_ip6_hdrnestlimit)) {
+ V_ip6stat.ip6s_toomanyhdr++;
+ goto bad;
+ }
+
+ /*
+ * protection against faulty packet - there should be
+ * more sanity checks in header chain processing.
+ */
+ if (m->m_pkthdr.len < off) {
+ V_ip6stat.ip6s_tooshort++;
+ in6_ifstat_inc(m->m_pkthdr.rcvif, ifs6_in_truncated);
+ goto bad;
+ }
+
+#ifdef IPSEC
+ /*
+ * enforce IPsec policy checking if we are seeing last header.
+ * note that we do not visit this with protocols with pcb layer
+ * code - like udp/tcp/raw ip.
+ */
+ if (ip6_ipsec_input(m, nxt))
+ goto bad;
+#endif /* IPSEC */
+
+ /*
+ * Use mbuf flags to propagate Router Alert option to
+ * ICMPv6 layer, as hop-by-hop options have been stripped.
+ */
+ if (nxt == IPPROTO_ICMPV6 && rtalert != ~0)
+ m->m_flags |= M_RTALERT_MLD;
+
+ nxt = (*inet6sw[ip6_protox[nxt]].pr_input)(&m, &off, nxt);
+ }
+ goto out;
+bad:
+ m_freem(m);
+out:
+ if (rin6.ro_rt)
+ RTFREE(rin6.ro_rt);
+}
+
+/*
+ * set/grab in6_ifaddr correspond to IPv6 destination address.
+ * XXX backward compatibility wrapper
+ *
+ * XXXRW: We should bump the refcount on ia6 before sticking it in the m_tag,
+ * and then bump it when the tag is copied, and release it when the tag is
+ * freed. Unfortunately, m_tags don't support deep copies (yet), so instead
+ * we just bump the ia refcount when we receive it. This should be fixed.
+ */
+static struct ip6aux *
+ip6_setdstifaddr(struct mbuf *m, struct in6_ifaddr *ia6)
+{
+ struct ip6aux *ip6a;
+
+ ip6a = ip6_addaux(m);
+ if (ip6a)
+ ip6a->ip6a_dstia6 = ia6;
+ return ip6a; /* NULL if failed to set */
+}
+
+struct in6_ifaddr *
+ip6_getdstifaddr(struct mbuf *m)
+{
+ struct ip6aux *ip6a;
+ struct in6_ifaddr *ia;
+
+ ip6a = ip6_findaux(m);
+ if (ip6a) {
+ ia = ip6a->ip6a_dstia6;
+ ifa_ref(&ia->ia_ifa);
+ return ia;
+ } else
+ return NULL;
+}
+
+/*
+ * Hop-by-Hop options header processing. If a valid jumbo payload option is
+ * included, the real payload length will be stored in plenp.
+ *
+ * rtalertp - XXX: should be stored more smart way
+ */
+static int
+ip6_hopopts_input(u_int32_t *plenp, u_int32_t *rtalertp,
+ struct mbuf **mp, int *offp)
+{
+ struct mbuf *m = *mp;
+ int off = *offp, hbhlen;
+ struct ip6_hbh *hbh;
+ u_int8_t *opt;
+
+ /* validation of the length of the header */
+#ifndef PULLDOWN_TEST
+ IP6_EXTHDR_CHECK(m, off, sizeof(*hbh), -1);
+ hbh = (struct ip6_hbh *)(mtod(m, caddr_t) + off);
+ hbhlen = (hbh->ip6h_len + 1) << 3;
+
+ IP6_EXTHDR_CHECK(m, off, hbhlen, -1);
+ hbh = (struct ip6_hbh *)(mtod(m, caddr_t) + off);
+#else
+ IP6_EXTHDR_GET(hbh, struct ip6_hbh *, m,
+ sizeof(struct ip6_hdr), sizeof(struct ip6_hbh));
+ if (hbh == NULL) {
+ V_ip6stat.ip6s_tooshort++;
+ return -1;
+ }
+ hbhlen = (hbh->ip6h_len + 1) << 3;
+ IP6_EXTHDR_GET(hbh, struct ip6_hbh *, m, sizeof(struct ip6_hdr),
+ hbhlen);
+ if (hbh == NULL) {
+ V_ip6stat.ip6s_tooshort++;
+ return -1;
+ }
+#endif
+ off += hbhlen;
+ hbhlen -= sizeof(struct ip6_hbh);
+ opt = (u_int8_t *)hbh + sizeof(struct ip6_hbh);
+
+ if (ip6_process_hopopts(m, (u_int8_t *)hbh + sizeof(struct ip6_hbh),
+ hbhlen, rtalertp, plenp) < 0)
+ return (-1);
+
+ *offp = off;
+ *mp = m;
+ return (0);
+}
+
+/*
+ * Search header for all Hop-by-hop options and process each option.
+ * This function is separate from ip6_hopopts_input() in order to
+ * handle a case where the sending node itself process its hop-by-hop
+ * options header. In such a case, the function is called from ip6_output().
+ *
+ * The function assumes that hbh header is located right after the IPv6 header
+ * (RFC2460 p7), opthead is pointer into data content in m, and opthead to
+ * opthead + hbhlen is located in continuous memory region.
+ */
+int
+ip6_process_hopopts(struct mbuf *m, u_int8_t *opthead, int hbhlen,
+ u_int32_t *rtalertp, u_int32_t *plenp)
+{
+ struct ip6_hdr *ip6;
+ int optlen = 0;
+ u_int8_t *opt = opthead;
+ u_int16_t rtalert_val;
+ u_int32_t jumboplen;
+ const int erroff = sizeof(struct ip6_hdr) + sizeof(struct ip6_hbh);
+
+ for (; hbhlen > 0; hbhlen -= optlen, opt += optlen) {
+ switch (*opt) {
+ case IP6OPT_PAD1:
+ optlen = 1;
+ break;
+ case IP6OPT_PADN:
+ if (hbhlen < IP6OPT_MINLEN) {
+ V_ip6stat.ip6s_toosmall++;
+ goto bad;
+ }
+ optlen = *(opt + 1) + 2;
+ break;
+ case IP6OPT_ROUTER_ALERT:
+ /* XXX may need check for alignment */
+ if (hbhlen < IP6OPT_RTALERT_LEN) {
+ V_ip6stat.ip6s_toosmall++;
+ goto bad;
+ }
+ if (*(opt + 1) != IP6OPT_RTALERT_LEN - 2) {
+ /* XXX stat */
+ icmp6_error(m, ICMP6_PARAM_PROB,
+ ICMP6_PARAMPROB_HEADER,
+ erroff + opt + 1 - opthead);
+ return (-1);
+ }
+ optlen = IP6OPT_RTALERT_LEN;
+ bcopy((caddr_t)(opt + 2), (caddr_t)&rtalert_val, 2);
+ *rtalertp = ntohs(rtalert_val);
+ break;
+ case IP6OPT_JUMBO:
+ /* XXX may need check for alignment */
+ if (hbhlen < IP6OPT_JUMBO_LEN) {
+ V_ip6stat.ip6s_toosmall++;
+ goto bad;
+ }
+ if (*(opt + 1) != IP6OPT_JUMBO_LEN - 2) {
+ /* XXX stat */
+ icmp6_error(m, ICMP6_PARAM_PROB,
+ ICMP6_PARAMPROB_HEADER,
+ erroff + opt + 1 - opthead);
+ return (-1);
+ }
+ optlen = IP6OPT_JUMBO_LEN;
+
+ /*
+ * IPv6 packets that have non 0 payload length
+ * must not contain a jumbo payload option.
+ */
+ ip6 = mtod(m, struct ip6_hdr *);
+ if (ip6->ip6_plen) {
+ V_ip6stat.ip6s_badoptions++;
+ icmp6_error(m, ICMP6_PARAM_PROB,
+ ICMP6_PARAMPROB_HEADER,
+ erroff + opt - opthead);
+ return (-1);
+ }
+
+ /*
+ * We may see jumbolen in unaligned location, so
+ * we'd need to perform bcopy().
+ */
+ bcopy(opt + 2, &jumboplen, sizeof(jumboplen));
+ jumboplen = (u_int32_t)htonl(jumboplen);
+
+#if 1
+ /*
+ * if there are multiple jumbo payload options,
+ * *plenp will be non-zero and the packet will be
+ * rejected.
+ * the behavior may need some debate in ipngwg -
+ * multiple options does not make sense, however,
+ * there's no explicit mention in specification.
+ */
+ if (*plenp != 0) {
+ V_ip6stat.ip6s_badoptions++;
+ icmp6_error(m, ICMP6_PARAM_PROB,
+ ICMP6_PARAMPROB_HEADER,
+ erroff + opt + 2 - opthead);
+ return (-1);
+ }
+#endif
+
+ /*
+ * jumbo payload length must be larger than 65535.
+ */
+ if (jumboplen <= IPV6_MAXPACKET) {
+ V_ip6stat.ip6s_badoptions++;
+ icmp6_error(m, ICMP6_PARAM_PROB,
+ ICMP6_PARAMPROB_HEADER,
+ erroff + opt + 2 - opthead);
+ return (-1);
+ }
+ *plenp = jumboplen;
+
+ break;
+ default: /* unknown option */
+ if (hbhlen < IP6OPT_MINLEN) {
+ V_ip6stat.ip6s_toosmall++;
+ goto bad;
+ }
+ optlen = ip6_unknown_opt(opt, m,
+ erroff + opt - opthead);
+ if (optlen == -1)
+ return (-1);
+ optlen += 2;
+ break;
+ }
+ }
+
+ return (0);
+
+ bad:
+ m_freem(m);
+ return (-1);
+}
+
+/*
+ * Unknown option processing.
+ * The third argument `off' is the offset from the IPv6 header to the option,
+ * which is necessary if the IPv6 header the and option header and IPv6 header
+ * is not continuous in order to return an ICMPv6 error.
+ */
+int
+ip6_unknown_opt(u_int8_t *optp, struct mbuf *m, int off)
+{
+ struct ip6_hdr *ip6;
+
+ switch (IP6OPT_TYPE(*optp)) {
+ case IP6OPT_TYPE_SKIP: /* ignore the option */
+ return ((int)*(optp + 1));
+ case IP6OPT_TYPE_DISCARD: /* silently discard */
+ m_freem(m);
+ return (-1);
+ case IP6OPT_TYPE_FORCEICMP: /* send ICMP even if multicasted */
+ V_ip6stat.ip6s_badoptions++;
+ icmp6_error(m, ICMP6_PARAM_PROB, ICMP6_PARAMPROB_OPTION, off);
+ return (-1);
+ case IP6OPT_TYPE_ICMP: /* send ICMP if not multicasted */
+ V_ip6stat.ip6s_badoptions++;
+ ip6 = mtod(m, struct ip6_hdr *);
+ if (IN6_IS_ADDR_MULTICAST(&ip6->ip6_dst) ||
+ (m->m_flags & (M_BCAST|M_MCAST)))
+ m_freem(m);
+ else
+ icmp6_error(m, ICMP6_PARAM_PROB,
+ ICMP6_PARAMPROB_OPTION, off);
+ return (-1);
+ }
+
+ m_freem(m); /* XXX: NOTREACHED */
+ return (-1);
+}
+
+/*
+ * Create the "control" list for this pcb.
+ * These functions will not modify mbuf chain at all.
+ *
+ * With KAME mbuf chain restriction:
+ * The routine will be called from upper layer handlers like tcp6_input().
+ * Thus the routine assumes that the caller (tcp6_input) have already
+ * called IP6_EXTHDR_CHECK() and all the extension headers are located in the
+ * very first mbuf on the mbuf chain.
+ *
+ * ip6_savecontrol_v4 will handle those options that are possible to be
+ * set on a v4-mapped socket.
+ * ip6_savecontrol will directly call ip6_savecontrol_v4 to handle those
+ * options and handle the v6-only ones itself.
+ */
+struct mbuf **
+ip6_savecontrol_v4(struct inpcb *inp, struct mbuf *m, struct mbuf **mp,
+ int *v4only)
+{
+ struct ip6_hdr *ip6 = mtod(m, struct ip6_hdr *);
+
+#ifdef SO_TIMESTAMP
+ if ((inp->inp_socket->so_options & SO_TIMESTAMP) != 0) {
+ struct timeval tv;
+
+ microtime(&tv);
+ *mp = sbcreatecontrol((caddr_t) &tv, sizeof(tv),
+ SCM_TIMESTAMP, SOL_SOCKET);
+ if (*mp)
+ mp = &(*mp)->m_next;
+ }
+#endif
+
+ if ((ip6->ip6_vfc & IPV6_VERSION_MASK) != IPV6_VERSION) {
+ if (v4only != NULL)
+ *v4only = 1;
+ return (mp);
+ }
+
+#define IS2292(inp, x, y) (((inp)->inp_flags & IN6P_RFC2292) ? (x) : (y))
+ /* RFC 2292 sec. 5 */
+ if ((inp->inp_flags & IN6P_PKTINFO) != 0) {
+ struct in6_pktinfo pi6;
+
+ bcopy(&ip6->ip6_dst, &pi6.ipi6_addr, sizeof(struct in6_addr));
+ in6_clearscope(&pi6.ipi6_addr); /* XXX */
+ pi6.ipi6_ifindex =
+ (m && m->m_pkthdr.rcvif) ? m->m_pkthdr.rcvif->if_index : 0;
+
+ *mp = sbcreatecontrol((caddr_t) &pi6,
+ sizeof(struct in6_pktinfo),
+ IS2292(inp, IPV6_2292PKTINFO, IPV6_PKTINFO), IPPROTO_IPV6);
+ if (*mp)
+ mp = &(*mp)->m_next;
+ }
+
+ if ((inp->inp_flags & IN6P_HOPLIMIT) != 0) {
+ int hlim = ip6->ip6_hlim & 0xff;
+
+ *mp = sbcreatecontrol((caddr_t) &hlim, sizeof(int),
+ IS2292(inp, IPV6_2292HOPLIMIT, IPV6_HOPLIMIT),
+ IPPROTO_IPV6);
+ if (*mp)
+ mp = &(*mp)->m_next;
+ }
+
+ if (v4only != NULL)
+ *v4only = 0;
+ return (mp);
+}
+
+void
+ip6_savecontrol(struct inpcb *in6p, struct mbuf *m, struct mbuf **mp)
+{
+ struct ip6_hdr *ip6 = mtod(m, struct ip6_hdr *);
+ int v4only = 0;
+
+ mp = ip6_savecontrol_v4(in6p, m, mp, &v4only);
+ if (v4only)
+ return;
+
+ if ((in6p->inp_flags & IN6P_TCLASS) != 0) {
+ u_int32_t flowinfo;
+ int tclass;
+
+ flowinfo = (u_int32_t)ntohl(ip6->ip6_flow & IPV6_FLOWINFO_MASK);
+ flowinfo >>= 20;
+
+ tclass = flowinfo & 0xff;
+ *mp = sbcreatecontrol((caddr_t) &tclass, sizeof(tclass),
+ IPV6_TCLASS, IPPROTO_IPV6);
+ if (*mp)
+ mp = &(*mp)->m_next;
+ }
+
+ /*
+ * IPV6_HOPOPTS socket option. Recall that we required super-user
+ * privilege for the option (see ip6_ctloutput), but it might be too
+ * strict, since there might be some hop-by-hop options which can be
+ * returned to normal user.
+ * See also RFC 2292 section 6 (or RFC 3542 section 8).
+ */
+ if ((in6p->inp_flags & IN6P_HOPOPTS) != 0) {
+ /*
+ * Check if a hop-by-hop options header is contatined in the
+ * received packet, and if so, store the options as ancillary
+ * data. Note that a hop-by-hop options header must be
+ * just after the IPv6 header, which is assured through the
+ * IPv6 input processing.
+ */
+ if (ip6->ip6_nxt == IPPROTO_HOPOPTS) {
+ struct ip6_hbh *hbh;
+ int hbhlen = 0;
+#ifdef PULLDOWN_TEST
+ struct mbuf *ext;
+#endif
+
+#ifndef PULLDOWN_TEST
+ hbh = (struct ip6_hbh *)(ip6 + 1);
+ hbhlen = (hbh->ip6h_len + 1) << 3;
+#else
+ ext = ip6_pullexthdr(m, sizeof(struct ip6_hdr),
+ ip6->ip6_nxt);
+ if (ext == NULL) {
+ V_ip6stat.ip6s_tooshort++;
+ return;
+ }
+ hbh = mtod(ext, struct ip6_hbh *);
+ hbhlen = (hbh->ip6h_len + 1) << 3;
+ if (hbhlen != ext->m_len) {
+ m_freem(ext);
+ V_ip6stat.ip6s_tooshort++;
+ return;
+ }
+#endif
+
+ /*
+ * XXX: We copy the whole header even if a
+ * jumbo payload option is included, the option which
+ * is to be removed before returning according to
+ * RFC2292.
+ * Note: this constraint is removed in RFC3542
+ */
+ *mp = sbcreatecontrol((caddr_t)hbh, hbhlen,
+ IS2292(in6p, IPV6_2292HOPOPTS, IPV6_HOPOPTS),
+ IPPROTO_IPV6);
+ if (*mp)
+ mp = &(*mp)->m_next;
+#ifdef PULLDOWN_TEST
+ m_freem(ext);
+#endif
+ }
+ }
+
+ if ((in6p->inp_flags & (IN6P_RTHDR | IN6P_DSTOPTS)) != 0) {
+ int nxt = ip6->ip6_nxt, off = sizeof(struct ip6_hdr);
+
+ /*
+ * Search for destination options headers or routing
+ * header(s) through the header chain, and stores each
+ * header as ancillary data.
+ * Note that the order of the headers remains in
+ * the chain of ancillary data.
+ */
+ while (1) { /* is explicit loop prevention necessary? */
+ struct ip6_ext *ip6e = NULL;
+ int elen;
+#ifdef PULLDOWN_TEST
+ struct mbuf *ext = NULL;
+#endif
+
+ /*
+ * if it is not an extension header, don't try to
+ * pull it from the chain.
+ */
+ switch (nxt) {
+ case IPPROTO_DSTOPTS:
+ case IPPROTO_ROUTING:
+ case IPPROTO_HOPOPTS:
+ case IPPROTO_AH: /* is it possible? */
+ break;
+ default:
+ goto loopend;
+ }
+
+#ifndef PULLDOWN_TEST
+ if (off + sizeof(*ip6e) > m->m_len)
+ goto loopend;
+ ip6e = (struct ip6_ext *)(mtod(m, caddr_t) + off);
+ if (nxt == IPPROTO_AH)
+ elen = (ip6e->ip6e_len + 2) << 2;
+ else
+ elen = (ip6e->ip6e_len + 1) << 3;
+ if (off + elen > m->m_len)
+ goto loopend;
+#else
+ ext = ip6_pullexthdr(m, off, nxt);
+ if (ext == NULL) {
+ V_ip6stat.ip6s_tooshort++;
+ return;
+ }
+ ip6e = mtod(ext, struct ip6_ext *);
+ if (nxt == IPPROTO_AH)
+ elen = (ip6e->ip6e_len + 2) << 2;
+ else
+ elen = (ip6e->ip6e_len + 1) << 3;
+ if (elen != ext->m_len) {
+ m_freem(ext);
+ V_ip6stat.ip6s_tooshort++;
+ return;
+ }
+#endif
+
+ switch (nxt) {
+ case IPPROTO_DSTOPTS:
+ if (!(in6p->inp_flags & IN6P_DSTOPTS))
+ break;
+
+ *mp = sbcreatecontrol((caddr_t)ip6e, elen,
+ IS2292(in6p,
+ IPV6_2292DSTOPTS, IPV6_DSTOPTS),
+ IPPROTO_IPV6);
+ if (*mp)
+ mp = &(*mp)->m_next;
+ break;
+ case IPPROTO_ROUTING:
+ if (!in6p->inp_flags & IN6P_RTHDR)
+ break;
+
+ *mp = sbcreatecontrol((caddr_t)ip6e, elen,
+ IS2292(in6p, IPV6_2292RTHDR, IPV6_RTHDR),
+ IPPROTO_IPV6);
+ if (*mp)
+ mp = &(*mp)->m_next;
+ break;
+ case IPPROTO_HOPOPTS:
+ case IPPROTO_AH: /* is it possible? */
+ break;
+
+ default:
+ /*
+ * other cases have been filtered in the above.
+ * none will visit this case. here we supply
+ * the code just in case (nxt overwritten or
+ * other cases).
+ */
+#ifdef PULLDOWN_TEST
+ m_freem(ext);
+#endif
+ goto loopend;
+
+ }
+
+ /* proceed with the next header. */
+ off += elen;
+ nxt = ip6e->ip6e_nxt;
+ ip6e = NULL;
+#ifdef PULLDOWN_TEST
+ m_freem(ext);
+ ext = NULL;
+#endif
+ }
+ loopend:
+ ;
+ }
+}
+#undef IS2292
+
+void
+ip6_notify_pmtu(struct inpcb *in6p, struct sockaddr_in6 *dst, u_int32_t *mtu)
+{
+ struct socket *so;
+ struct mbuf *m_mtu;
+ struct ip6_mtuinfo mtuctl;
+
+ so = in6p->inp_socket;
+
+ if (mtu == NULL)
+ return;
+
+#ifdef DIAGNOSTIC
+ if (so == NULL) /* I believe this is impossible */
+ panic("ip6_notify_pmtu: socket is NULL");
+#endif
+
+ bzero(&mtuctl, sizeof(mtuctl)); /* zero-clear for safety */
+ mtuctl.ip6m_mtu = *mtu;
+ mtuctl.ip6m_addr = *dst;
+ if (sa6_recoverscope(&mtuctl.ip6m_addr))
+ return;
+
+ if ((m_mtu = sbcreatecontrol((caddr_t)&mtuctl, sizeof(mtuctl),
+ IPV6_PATHMTU, IPPROTO_IPV6)) == NULL)
+ return;
+
+ if (sbappendaddr(&so->so_rcv, (struct sockaddr *)dst, NULL, m_mtu)
+ == 0) {
+ m_freem(m_mtu);
+ /* XXX: should count statistics */
+ } else
+ sorwakeup(so);
+
+ return;
+}
+
+#ifdef PULLDOWN_TEST
+/*
+ * pull single extension header from mbuf chain. returns single mbuf that
+ * contains the result, or NULL on error.
+ */
+static struct mbuf *
+ip6_pullexthdr(struct mbuf *m, size_t off, int nxt)
+{
+ struct ip6_ext ip6e;
+ size_t elen;
+ struct mbuf *n;
+
+#ifdef DIAGNOSTIC
+ switch (nxt) {
+ case IPPROTO_DSTOPTS:
+ case IPPROTO_ROUTING:
+ case IPPROTO_HOPOPTS:
+ case IPPROTO_AH: /* is it possible? */
+ break;
+ default:
+ printf("ip6_pullexthdr: invalid nxt=%d\n", nxt);
+ }
+#endif
+
+ m_copydata(m, off, sizeof(ip6e), (caddr_t)&ip6e);
+ if (nxt == IPPROTO_AH)
+ elen = (ip6e.ip6e_len + 2) << 2;
+ else
+ elen = (ip6e.ip6e_len + 1) << 3;
+
+ MGET(n, M_DONTWAIT, MT_DATA);
+ if (n && elen >= MLEN) {
+ MCLGET(n, M_DONTWAIT);
+ if ((n->m_flags & M_EXT) == 0) {
+ m_free(n);
+ n = NULL;
+ }
+ }
+ if (!n)
+ return NULL;
+
+ n->m_len = 0;
+ if (elen >= M_TRAILINGSPACE(n)) {
+ m_free(n);
+ return NULL;
+ }
+
+ m_copydata(m, off, elen, mtod(n, caddr_t));
+ n->m_len = elen;
+ return n;
+}
+#endif
+
+/*
+ * Get pointer to the previous header followed by the header
+ * currently processed.
+ * XXX: This function supposes that
+ * M includes all headers,
+ * the next header field and the header length field of each header
+ * are valid, and
+ * the sum of each header length equals to OFF.
+ * Because of these assumptions, this function must be called very
+ * carefully. Moreover, it will not be used in the near future when
+ * we develop `neater' mechanism to process extension headers.
+ */
+char *
+ip6_get_prevhdr(struct mbuf *m, int off)
+{
+ struct ip6_hdr *ip6 = mtod(m, struct ip6_hdr *);
+
+ if (off == sizeof(struct ip6_hdr))
+ return (&ip6->ip6_nxt);
+ else {
+ int len, nxt;
+ struct ip6_ext *ip6e = NULL;
+
+ nxt = ip6->ip6_nxt;
+ len = sizeof(struct ip6_hdr);
+ while (len < off) {
+ ip6e = (struct ip6_ext *)(mtod(m, caddr_t) + len);
+
+ switch (nxt) {
+ case IPPROTO_FRAGMENT:
+ len += sizeof(struct ip6_frag);
+ break;
+ case IPPROTO_AH:
+ len += (ip6e->ip6e_len + 2) << 2;
+ break;
+ default:
+ len += (ip6e->ip6e_len + 1) << 3;
+ break;
+ }
+ nxt = ip6e->ip6e_nxt;
+ }
+ if (ip6e)
+ return (&ip6e->ip6e_nxt);
+ else
+ return NULL;
+ }
+}
+
+/*
+ * get next header offset. m will be retained.
+ */
+int
+ip6_nexthdr(struct mbuf *m, int off, int proto, int *nxtp)
+{
+ struct ip6_hdr ip6;
+ struct ip6_ext ip6e;
+ struct ip6_frag fh;
+
+ /* just in case */
+ if (m == NULL)
+ panic("ip6_nexthdr: m == NULL");
+ if ((m->m_flags & M_PKTHDR) == 0 || m->m_pkthdr.len < off)
+ return -1;
+
+ switch (proto) {
+ case IPPROTO_IPV6:
+ if (m->m_pkthdr.len < off + sizeof(ip6))
+ return -1;
+ m_copydata(m, off, sizeof(ip6), (caddr_t)&ip6);
+ if (nxtp)
+ *nxtp = ip6.ip6_nxt;
+ off += sizeof(ip6);
+ return off;
+
+ case IPPROTO_FRAGMENT:
+ /*
+ * terminate parsing if it is not the first fragment,
+ * it does not make sense to parse through it.
+ */
+ if (m->m_pkthdr.len < off + sizeof(fh))
+ return -1;
+ m_copydata(m, off, sizeof(fh), (caddr_t)&fh);
+ /* IP6F_OFF_MASK = 0xfff8(BigEndian), 0xf8ff(LittleEndian) */
+ if (fh.ip6f_offlg & IP6F_OFF_MASK)
+ return -1;
+ if (nxtp)
+ *nxtp = fh.ip6f_nxt;
+ off += sizeof(struct ip6_frag);
+ return off;
+
+ case IPPROTO_AH:
+ if (m->m_pkthdr.len < off + sizeof(ip6e))
+ return -1;
+ m_copydata(m, off, sizeof(ip6e), (caddr_t)&ip6e);
+ if (nxtp)
+ *nxtp = ip6e.ip6e_nxt;
+ off += (ip6e.ip6e_len + 2) << 2;
+ return off;
+
+ case IPPROTO_HOPOPTS:
+ case IPPROTO_ROUTING:
+ case IPPROTO_DSTOPTS:
+ if (m->m_pkthdr.len < off + sizeof(ip6e))
+ return -1;
+ m_copydata(m, off, sizeof(ip6e), (caddr_t)&ip6e);
+ if (nxtp)
+ *nxtp = ip6e.ip6e_nxt;
+ off += (ip6e.ip6e_len + 1) << 3;
+ return off;
+
+ case IPPROTO_NONE:
+ case IPPROTO_ESP:
+ case IPPROTO_IPCOMP:
+ /* give up */
+ return -1;
+
+ default:
+ return -1;
+ }
+
+ return -1;
+}
+
+/*
+ * get offset for the last header in the chain. m will be kept untainted.
+ */
+int
+ip6_lasthdr(struct mbuf *m, int off, int proto, int *nxtp)
+{
+ int newoff;
+ int nxt;
+
+ if (!nxtp) {
+ nxt = -1;
+ nxtp = &nxt;
+ }
+ while (1) {
+ newoff = ip6_nexthdr(m, off, proto, nxtp);
+ if (newoff < 0)
+ return off;
+ else if (newoff < off)
+ return -1; /* invalid */
+ else if (newoff == off)
+ return newoff;
+
+ off = newoff;
+ proto = *nxtp;
+ }
+}
+
+struct ip6aux *
+ip6_addaux(struct mbuf *m)
+{
+ struct m_tag *mtag;
+
+ mtag = m_tag_find(m, PACKET_TAG_IPV6_INPUT, NULL);
+ if (!mtag) {
+ mtag = m_tag_get(PACKET_TAG_IPV6_INPUT, sizeof(struct ip6aux),
+ M_NOWAIT);
+ if (mtag) {
+ m_tag_prepend(m, mtag);
+ bzero(mtag + 1, sizeof(struct ip6aux));
+ }
+ }
+ return mtag ? (struct ip6aux *)(mtag + 1) : NULL;
+}
+
+struct ip6aux *
+ip6_findaux(struct mbuf *m)
+{
+ struct m_tag *mtag;
+
+ mtag = m_tag_find(m, PACKET_TAG_IPV6_INPUT, NULL);
+ return mtag ? (struct ip6aux *)(mtag + 1) : NULL;
+}
+
+void
+ip6_delaux(struct mbuf *m)
+{
+ struct m_tag *mtag;
+
+ mtag = m_tag_find(m, PACKET_TAG_IPV6_INPUT, NULL);
+ if (mtag)
+ m_tag_delete(m, mtag);
+}
+
+/*
+ * System control for IP6
+ */
+
+u_char inet6ctlerrmap[PRC_NCMDS] = {
+ 0, 0, 0, 0,
+ 0, EMSGSIZE, EHOSTDOWN, EHOSTUNREACH,
+ EHOSTUNREACH, EHOSTUNREACH, ECONNREFUSED, ECONNREFUSED,
+ EMSGSIZE, EHOSTUNREACH, 0, 0,
+ 0, 0, 0, 0,
+ ENOPROTOOPT
+};
diff --git a/rtems/freebsd/netinet6/ip6_ipsec.c b/rtems/freebsd/netinet6/ip6_ipsec.c
new file mode 100644
index 00000000..736f9330
--- /dev/null
+++ b/rtems/freebsd/netinet6/ip6_ipsec.c
@@ -0,0 +1,386 @@
+#include <rtems/freebsd/machine/rtems-bsd-config.h>
+
+/*-
+ * Copyright (c) 1982, 1986, 1988, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <rtems/freebsd/sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <rtems/freebsd/local/opt_inet6.h>
+#include <rtems/freebsd/local/opt_ipsec.h>
+
+#include <rtems/freebsd/sys/param.h>
+#include <rtems/freebsd/sys/systm.h>
+#include <rtems/freebsd/sys/kernel.h>
+#include <rtems/freebsd/sys/mac.h>
+#include <rtems/freebsd/sys/malloc.h>
+#include <rtems/freebsd/sys/mbuf.h>
+#include <rtems/freebsd/sys/protosw.h>
+#include <rtems/freebsd/sys/socket.h>
+#include <rtems/freebsd/sys/socketvar.h>
+#include <rtems/freebsd/sys/sysctl.h>
+
+#include <rtems/freebsd/net/if.h>
+#include <rtems/freebsd/net/route.h>
+#include <rtems/freebsd/net/vnet.h>
+
+#include <rtems/freebsd/netinet/in.h>
+#include <rtems/freebsd/netinet/in_systm.h>
+#include <rtems/freebsd/netinet/in_var.h>
+#include <rtems/freebsd/netinet/ip.h>
+#include <rtems/freebsd/netinet/ip6.h>
+#include <rtems/freebsd/netinet/in_pcb.h>
+#include <rtems/freebsd/netinet/ip_var.h>
+#include <rtems/freebsd/netinet/ip_options.h>
+
+#include <rtems/freebsd/machine/in_cksum.h>
+
+#ifdef IPSEC
+#include <rtems/freebsd/netipsec/ipsec.h>
+#include <rtems/freebsd/netipsec/ipsec6.h>
+#include <rtems/freebsd/netipsec/xform.h>
+#include <rtems/freebsd/netipsec/key.h>
+#ifdef IPSEC_DEBUG
+#include <rtems/freebsd/netipsec/key_debug.h>
+#else
+#define KEYDEBUG(lev,arg)
+#endif
+#endif /*IPSEC*/
+
+#include <rtems/freebsd/netinet6/ip6_ipsec.h>
+#include <rtems/freebsd/netinet6/ip6_var.h>
+
+extern struct protosw inet6sw[];
+
+
+#ifdef INET6
+#ifdef IPSEC
+#ifdef IPSEC_FILTERTUNNEL
+static VNET_DEFINE(int, ip6_ipsec6_filtertunnel) = 1;
+#else
+static VNET_DEFINE(int, ip6_ipsec6_filtertunnel) = 0;
+#endif
+#define V_ip6_ipsec6_filtertunnel VNET(ip6_ipsec6_filtertunnel)
+
+SYSCTL_DECL(_net_inet6_ipsec6);
+SYSCTL_VNET_INT(_net_inet6_ipsec6, OID_AUTO,
+ filtertunnel, CTLFLAG_RW, &VNET_NAME(ip6_ipsec6_filtertunnel), 0,
+ "If set filter packets from an IPsec tunnel.");
+#endif /* IPSEC */
+#endif /* INET6 */
+
+/*
+ * Check if we have to jump over firewall processing for this packet.
+ * Called from ip_input().
+ * 1 = jump over firewall, 0 = packet goes through firewall.
+ */
+int
+ip6_ipsec_filtertunnel(struct mbuf *m)
+{
+#if defined(IPSEC)
+
+ /*
+ * Bypass packet filtering for packets from a tunnel.
+ */
+ if (!V_ip6_ipsec6_filtertunnel &&
+ m_tag_find(m, PACKET_TAG_IPSEC_IN_DONE, NULL) != NULL)
+ return 1;
+#endif
+ return 0;
+}
+
+/*
+ * Check if this packet has an active SA and needs to be dropped instead
+ * of forwarded.
+ * Called from ip_input().
+ * 1 = drop packet, 0 = forward packet.
+ */
+int
+ip6_ipsec_fwd(struct mbuf *m)
+{
+#ifdef IPSEC
+ struct m_tag *mtag;
+ struct tdb_ident *tdbi;
+ struct secpolicy *sp;
+ int s, error;
+ mtag = m_tag_find(m, PACKET_TAG_IPSEC_IN_DONE, NULL);
+ s = splnet();
+ if (mtag != NULL) {
+ tdbi = (struct tdb_ident *)(mtag + 1);
+ sp = ipsec_getpolicy(tdbi, IPSEC_DIR_INBOUND);
+ } else {
+ sp = ipsec_getpolicybyaddr(m, IPSEC_DIR_INBOUND,
+ IP_FORWARDING, &error);
+ }
+ if (sp == NULL) { /* NB: can happen if error */
+ splx(s);
+ /*XXX error stat???*/
+ DPRINTF(("ip_input: no SP for forwarding\n")); /*XXX*/
+ return 1;
+ }
+
+ /*
+ * Check security policy against packet attributes.
+ */
+ error = ipsec_in_reject(sp, m);
+ KEY_FREESP(&sp);
+ splx(s);
+ if (error) {
+ V_ip6stat.ip6s_cantforward++;
+ return 1;
+ }
+#endif /* IPSEC */
+ return 0;
+}
+
+/*
+ * Check if protocol type doesn't have a further header and do IPSEC
+ * decryption or reject right now. Protocols with further headers get
+ * their IPSEC treatment within the protocol specific processing.
+ * Called from ip_input().
+ * 1 = drop packet, 0 = continue processing packet.
+ */
+int
+ip6_ipsec_input(struct mbuf *m, int nxt)
+{
+#ifdef IPSEC
+ struct m_tag *mtag;
+ struct tdb_ident *tdbi;
+ struct secpolicy *sp;
+ int s, error;
+ /*
+ * enforce IPsec policy checking if we are seeing last header.
+ * note that we do not visit this with protocols with pcb layer
+ * code - like udp/tcp/raw ip.
+ */
+ if ((inet6sw[ip6_protox[nxt]].pr_flags & PR_LASTHDR) != 0 &&
+ ipsec6_in_reject(m, NULL)) {
+
+ /*
+ * Check if the packet has already had IPsec processing
+ * done. If so, then just pass it along. This tag gets
+ * set during AH, ESP, etc. input handling, before the
+ * packet is returned to the ip input queue for delivery.
+ */
+ mtag = m_tag_find(m, PACKET_TAG_IPSEC_IN_DONE, NULL);
+ s = splnet();
+ if (mtag != NULL) {
+ tdbi = (struct tdb_ident *)(mtag + 1);
+ sp = ipsec_getpolicy(tdbi, IPSEC_DIR_INBOUND);
+ } else {
+ sp = ipsec_getpolicybyaddr(m, IPSEC_DIR_INBOUND,
+ IP_FORWARDING, &error);
+ }
+ if (sp != NULL) {
+ /*
+ * Check security policy against packet attributes.
+ */
+ error = ipsec_in_reject(sp, m);
+ KEY_FREESP(&sp);
+ } else {
+ /* XXX error stat??? */
+ error = EINVAL;
+ DPRINTF(("ip_input: no SP, packet discarded\n"));/*XXX*/
+ return 1;
+ }
+ splx(s);
+ if (error)
+ return 1;
+ }
+#endif /* IPSEC */
+ return 0;
+}
+
+/*
+ * Called from ip6_output().
+ * 1 = drop packet, 0 = continue processing packet,
+ * -1 = packet was reinjected and stop processing packet
+ */
+
+int
+ip6_ipsec_output(struct mbuf **m, struct inpcb *inp, int *flags, int *error,
+ struct ifnet **ifp, struct secpolicy **sp)
+{
+#ifdef IPSEC
+ struct tdb_ident *tdbi;
+ struct m_tag *mtag;
+ /* XXX int s; */
+ if (sp == NULL)
+ return 1;
+ mtag = m_tag_find(*m, PACKET_TAG_IPSEC_PENDING_TDB, NULL);
+ if (mtag != NULL) {
+ tdbi = (struct tdb_ident *)(mtag + 1);
+ *sp = ipsec_getpolicy(tdbi, IPSEC_DIR_OUTBOUND);
+ if (*sp == NULL)
+ *error = -EINVAL; /* force silent drop */
+ m_tag_delete(*m, mtag);
+ } else {
+ *sp = ipsec4_checkpolicy(*m, IPSEC_DIR_OUTBOUND, *flags,
+ error, inp);
+ }
+
+ /*
+ * There are four return cases:
+ * sp != NULL apply IPsec policy
+ * sp == NULL, error == 0 no IPsec handling needed
+ * sp == NULL, error == -EINVAL discard packet w/o error
+ * sp == NULL, error != 0 discard packet, report error
+ */
+ if (*sp != NULL) {
+ /* Loop detection, check if ipsec processing already done */
+ KASSERT((*sp)->req != NULL, ("ip_output: no ipsec request"));
+ for (mtag = m_tag_first(*m); mtag != NULL;
+ mtag = m_tag_next(*m, mtag)) {
+ if (mtag->m_tag_cookie != MTAG_ABI_COMPAT)
+ continue;
+ if (mtag->m_tag_id != PACKET_TAG_IPSEC_OUT_DONE &&
+ mtag->m_tag_id != PACKET_TAG_IPSEC_OUT_CRYPTO_NEEDED)
+ continue;
+ /*
+ * Check if policy has an SA associated with it.
+ * This can happen when an SP has yet to acquire
+ * an SA; e.g. on first reference. If it occurs,
+ * then we let ipsec4_process_packet do its thing.
+ */
+ if ((*sp)->req->sav == NULL)
+ break;
+ tdbi = (struct tdb_ident *)(mtag + 1);
+ if (tdbi->spi == (*sp)->req->sav->spi &&
+ tdbi->proto == (*sp)->req->sav->sah->saidx.proto &&
+ bcmp(&tdbi->dst, &(*sp)->req->sav->sah->saidx.dst,
+ sizeof (union sockaddr_union)) == 0) {
+ /*
+ * No IPsec processing is needed, free
+ * reference to SP.
+ *
+ * NB: null pointer to avoid free at
+ * done: below.
+ */
+ KEY_FREESP(sp), *sp = NULL;
+ /* XXX splx(s); */
+ goto done;
+ }
+ }
+
+ /*
+ * Do delayed checksums now because we send before
+ * this is done in the normal processing path.
+ */
+ if ((*m)->m_pkthdr.csum_flags & CSUM_DELAY_DATA) {
+ in_delayed_cksum(*m);
+ (*m)->m_pkthdr.csum_flags &= ~CSUM_DELAY_DATA;
+ }
+
+ /*
+ * Preserve KAME behaviour: ENOENT can be returned
+ * when an SA acquire is in progress. Don't propagate
+ * this to user-level; it confuses applications.
+ *
+ * XXX this will go away when the SADB is redone.
+ */
+ if (*error == ENOENT)
+ *error = 0;
+ goto do_ipsec;
+ } else { /* sp == NULL */
+ if (*error != 0) {
+ /*
+ * Hack: -EINVAL is used to signal that a packet
+ * should be silently discarded. This is typically
+ * because we asked key management for an SA and
+ * it was delayed (e.g. kicked up to IKE).
+ */
+ if (*error == -EINVAL)
+ *error = 0;
+ goto bad;
+ } else {
+ /* No IPsec processing for this packet. */
+ }
+ }
+done:
+ return 0;
+do_ipsec:
+ return -1;
+bad:
+ return 1;
+#endif /* IPSEC */
+ return 0;
+}
+
+#if 0
+/*
+ * Compute the MTU for a forwarded packet that gets IPSEC encapsulated.
+ * Called from ip_forward().
+ * Returns MTU suggestion for ICMP needfrag reply.
+ */
+int
+ip6_ipsec_mtu(struct mbuf *m)
+{
+ int mtu = 0;
+ /*
+ * If the packet is routed over IPsec tunnel, tell the
+ * originator the tunnel MTU.
+ * tunnel MTU = if MTU - sizeof(IP) - ESP/AH hdrsiz
+ * XXX quickhack!!!
+ */
+#ifdef IPSEC
+ struct secpolicy *sp = NULL;
+ int ipsecerror;
+ int ipsechdr;
+ struct route *ro;
+ sp = ipsec_getpolicybyaddr(m,
+ IPSEC_DIR_OUTBOUND,
+ IP_FORWARDING,
+ &ipsecerror);
+ if (sp != NULL) {
+ /* count IPsec header size */
+ ipsechdr = ipsec_hdrsiz(m, IPSEC_DIR_OUTBOUND, NULL);
+
+ /*
+ * find the correct route for outer IPv4
+ * header, compute tunnel MTU.
+ */
+ if (sp->req != NULL &&
+ sp->req->sav != NULL &&
+ sp->req->sav->sah != NULL) {
+ ro = &sp->req->sav->sah->route_cache.sa_route;
+ if (ro->ro_rt && ro->ro_rt->rt_ifp) {
+ mtu =
+ ro->ro_rt->rt_rmx.rmx_mtu ?
+ ro->ro_rt->rt_rmx.rmx_mtu :
+ ro->ro_rt->rt_ifp->if_mtu;
+ mtu -= ipsechdr;
+ }
+ }
+ KEY_FREESP(&sp);
+ }
+#endif /* IPSEC */
+ /* XXX else case missing. */
+ return mtu;
+}
+#endif
diff --git a/rtems/freebsd/netinet6/ip6_ipsec.h b/rtems/freebsd/netinet6/ip6_ipsec.h
new file mode 100644
index 00000000..e3049534
--- /dev/null
+++ b/rtems/freebsd/netinet6/ip6_ipsec.h
@@ -0,0 +1,43 @@
+/*-
+ * Copyright (c) 1982, 1986, 1988, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _NETINET_IP6_IPSEC_HH_
+#define _NETINET_IP6_IPSEC_HH_
+
+int ip6_ipsec_filtertunnel(struct mbuf *);
+int ip6_ipsec_fwd(struct mbuf *);
+int ip6_ipsec_input(struct mbuf *, int);
+int ip6_ipsec_output(struct mbuf **, struct inpcb *, int *, int *,
+ struct ifnet **, struct secpolicy **sp);
+#if 0
+int ip6_ipsec_mtu(struct mbuf *);
+#endif
+#endif
diff --git a/rtems/freebsd/netinet6/ip6_mroute.c b/rtems/freebsd/netinet6/ip6_mroute.c
new file mode 100644
index 00000000..35121d2f
--- /dev/null
+++ b/rtems/freebsd/netinet6/ip6_mroute.c
@@ -0,0 +1,2065 @@
+#include <rtems/freebsd/machine/rtems-bsd-config.h>
+
+/*-
+ * Copyright (C) 1998 WIDE Project.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the project nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $KAME: ip6_mroute.c,v 1.58 2001/12/18 02:36:31 itojun Exp $
+ */
+
+/*-
+ * Copyright (c) 1989 Stephen Deering
+ * Copyright (c) 1992, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * This code is derived from software contributed to Berkeley by
+ * Stephen Deering of Stanford University.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * @(#)ip_mroute.c 8.2 (Berkeley) 11/15/93
+ * BSDI ip_mroute.c,v 2.10 1996/11/14 00:29:52 jch Exp
+ */
+
+/*
+ * IP multicast forwarding procedures
+ *
+ * Written by David Waitzman, BBN Labs, August 1988.
+ * Modified by Steve Deering, Stanford, February 1989.
+ * Modified by Mark J. Steiglitz, Stanford, May, 1991
+ * Modified by Van Jacobson, LBL, January 1993
+ * Modified by Ajit Thyagarajan, PARC, August 1993
+ * Modified by Bill Fenner, PARC, April 1994
+ *
+ * MROUTING Revision: 3.5.1.2 + PIM-SMv2 (pimd) Support
+ */
+
+#include <rtems/freebsd/sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <rtems/freebsd/local/opt_inet.h>
+#include <rtems/freebsd/local/opt_inet6.h>
+
+#include <rtems/freebsd/sys/param.h>
+#include <rtems/freebsd/sys/callout.h>
+#include <rtems/freebsd/sys/errno.h>
+#include <rtems/freebsd/sys/kernel.h>
+#include <rtems/freebsd/sys/lock.h>
+#include <rtems/freebsd/sys/malloc.h>
+#include <rtems/freebsd/sys/mbuf.h>
+#include <rtems/freebsd/sys/module.h>
+#include <rtems/freebsd/sys/domain.h>
+#include <rtems/freebsd/sys/protosw.h>
+#include <rtems/freebsd/sys/signalvar.h>
+#include <rtems/freebsd/sys/socket.h>
+#include <rtems/freebsd/sys/socketvar.h>
+#include <rtems/freebsd/sys/sockio.h>
+#include <rtems/freebsd/sys/sx.h>
+#include <rtems/freebsd/sys/sysctl.h>
+#include <rtems/freebsd/sys/syslog.h>
+#include <rtems/freebsd/sys/systm.h>
+#include <rtems/freebsd/sys/time.h>
+
+#include <rtems/freebsd/net/if.h>
+#include <rtems/freebsd/net/if_types.h>
+#include <rtems/freebsd/net/raw_cb.h>
+#include <rtems/freebsd/net/vnet.h>
+
+#include <rtems/freebsd/netinet/in.h>
+#include <rtems/freebsd/netinet/in_var.h>
+#include <rtems/freebsd/netinet/icmp6.h>
+#include <rtems/freebsd/netinet/ip_encap.h>
+
+#include <rtems/freebsd/netinet/ip6.h>
+#include <rtems/freebsd/netinet6/ip6_var.h>
+#include <rtems/freebsd/netinet6/scope6_var.h>
+#include <rtems/freebsd/netinet6/nd6.h>
+#include <rtems/freebsd/netinet6/ip6_mroute.h>
+#include <rtems/freebsd/netinet6/ip6protosw.h>
+#include <rtems/freebsd/netinet6/pim6.h>
+#include <rtems/freebsd/netinet6/pim6_var.h>
+
+static MALLOC_DEFINE(M_MRTABLE6, "mf6c", "multicast forwarding cache entry");
+
+/* XXX: this is a very common idiom; move to <sys/mbuf.h> ? */
+#define M_HASCL(m) ((m)->m_flags & M_EXT)
+
+static int ip6_mdq(struct mbuf *, struct ifnet *, struct mf6c *);
+static void phyint_send(struct ip6_hdr *, struct mif6 *, struct mbuf *);
+static int register_send(struct ip6_hdr *, struct mif6 *, struct mbuf *);
+static int set_pim6(int *);
+static int socket_send(struct socket *, struct mbuf *,
+ struct sockaddr_in6 *);
+
+extern int in6_mcast_loop;
+extern struct domain inet6domain;
+
+static const struct encaptab *pim6_encap_cookie;
+static const struct ip6protosw in6_pim_protosw = {
+ .pr_type = SOCK_RAW,
+ .pr_domain = &inet6domain,
+ .pr_protocol = IPPROTO_PIM,
+ .pr_flags = PR_ATOMIC|PR_ADDR|PR_LASTHDR,
+ .pr_input = pim6_input,
+ .pr_output = rip6_output,
+ .pr_ctloutput = rip6_ctloutput,
+ .pr_usrreqs = &rip6_usrreqs
+};
+static int pim6_encapcheck(const struct mbuf *, int, int, void *);
+
+static VNET_DEFINE(int, ip6_mrouter_ver) = 0;
+#define V_ip6_mrouter_ver VNET(ip6_mrouter_ver)
+
+SYSCTL_DECL(_net_inet6);
+SYSCTL_DECL(_net_inet6_ip6);
+SYSCTL_NODE(_net_inet6, IPPROTO_PIM, pim, CTLFLAG_RW, 0, "PIM");
+
+static struct mrt6stat mrt6stat;
+SYSCTL_STRUCT(_net_inet6_ip6, OID_AUTO, mrt6stat, CTLFLAG_RW,
+ &mrt6stat, mrt6stat,
+ "Multicast Routing Statistics (struct mrt6stat, netinet6/ip6_mroute.h)");
+
+#define NO_RTE_FOUND 0x1
+#define RTE_FOUND 0x2
+
+static struct mtx mrouter6_mtx;
+#define MROUTER6_LOCK() mtx_lock(&mrouter6_mtx)
+#define MROUTER6_UNLOCK() mtx_unlock(&mrouter6_mtx)
+#define MROUTER6_LOCK_ASSERT() do { \
+ mtx_assert(&mrouter6_mtx, MA_OWNED); \
+ NET_ASSERT_GIANT(); \
+} while (0)
+#define MROUTER6_LOCK_INIT() \
+ mtx_init(&mrouter6_mtx, "IPv6 multicast forwarding", NULL, MTX_DEF)
+#define MROUTER6_LOCK_DESTROY() mtx_destroy(&mrouter6_mtx)
+
+static struct mf6c *mf6ctable[MF6CTBLSIZ];
+SYSCTL_OPAQUE(_net_inet6_ip6, OID_AUTO, mf6ctable, CTLFLAG_RD,
+ &mf6ctable, sizeof(mf6ctable), "S,*mf6ctable[MF6CTBLSIZ]",
+ "IPv6 Multicast Forwarding Table (struct *mf6ctable[MF6CTBLSIZ], "
+ "netinet6/ip6_mroute.h)");
+
+static struct mtx mfc6_mtx;
+#define MFC6_LOCK() mtx_lock(&mfc6_mtx)
+#define MFC6_UNLOCK() mtx_unlock(&mfc6_mtx)
+#define MFC6_LOCK_ASSERT() do { \
+ mtx_assert(&mfc6_mtx, MA_OWNED); \
+ NET_ASSERT_GIANT(); \
+} while (0)
+#define MFC6_LOCK_INIT() \
+ mtx_init(&mfc6_mtx, "IPv6 multicast forwarding cache", NULL, MTX_DEF)
+#define MFC6_LOCK_DESTROY() mtx_destroy(&mfc6_mtx)
+
+static u_char n6expire[MF6CTBLSIZ];
+
+static struct mif6 mif6table[MAXMIFS];
+SYSCTL_OPAQUE(_net_inet6_ip6, OID_AUTO, mif6table, CTLFLAG_RD,
+ &mif6table, sizeof(mif6table), "S,mif6[MAXMIFS]",
+ "IPv6 Multicast Interfaces (struct mif6[MAXMIFS], netinet6/ip6_mroute.h)");
+
+static struct mtx mif6_mtx;
+#define MIF6_LOCK() mtx_lock(&mif6_mtx)
+#define MIF6_UNLOCK() mtx_unlock(&mif6_mtx)
+#define MIF6_LOCK_ASSERT() mtx_assert(&mif6_mtx, MA_OWNED)
+#define MIF6_LOCK_INIT() \
+ mtx_init(&mif6_mtx, "IPv6 multicast interfaces", NULL, MTX_DEF)
+#define MIF6_LOCK_DESTROY() mtx_destroy(&mif6_mtx)
+
+#ifdef MRT6DEBUG
+static VNET_DEFINE(u_int, mrt6debug) = 0; /* debug level */
+#define V_mrt6debug VNET(mrt6debug)
+#define DEBUG_MFC 0x02
+#define DEBUG_FORWARD 0x04
+#define DEBUG_EXPIRE 0x08
+#define DEBUG_XMIT 0x10
+#define DEBUG_REG 0x20
+#define DEBUG_PIM 0x40
+#endif
+
+static void expire_upcalls(void *);
+#define EXPIRE_TIMEOUT (hz / 4) /* 4x / second */
+#define UPCALL_EXPIRE 6 /* number of timeouts */
+
+/*
+ * XXX TODO: maintain a count to if_allmulti() calls in struct ifnet.
+ */
+
+/*
+ * 'Interfaces' associated with decapsulator (so we can tell
+ * packets that went through it from ones that get reflected
+ * by a broken gateway). Different from IPv4 register_if,
+ * these interfaces are linked into the system ifnet list,
+ * because per-interface IPv6 statistics are maintained in
+ * ifp->if_afdata. But it does not have any routes point
+ * to them. I.e., packets can't be sent this way. They
+ * only exist as a placeholder for multicast source
+ * verification.
+ */
+static struct ifnet *multicast_register_if6;
+
+#define ENCAP_HOPS 64
+
+/*
+ * Private variables.
+ */
+static mifi_t nummifs = 0;
+static mifi_t reg_mif_num = (mifi_t)-1;
+
+static struct pim6stat pim6stat;
+SYSCTL_STRUCT(_net_inet6_pim, PIM6CTL_STATS, stats, CTLFLAG_RD,
+ &pim6stat, pim6stat,
+ "PIM Statistics (struct pim6stat, netinet6/pim_var.h)");
+
+static VNET_DEFINE(int, pim6);
+#define V_pim6 VNET(pim6)
+
+/*
+ * Hash function for a source, group entry
+ */
+#define MF6CHASH(a, g) MF6CHASHMOD((a).s6_addr32[0] ^ (a).s6_addr32[1] ^ \
+ (a).s6_addr32[2] ^ (a).s6_addr32[3] ^ \
+ (g).s6_addr32[0] ^ (g).s6_addr32[1] ^ \
+ (g).s6_addr32[2] ^ (g).s6_addr32[3])
+
+/*
+ * Find a route for a given origin IPv6 address and Multicast group address.
+ */
+#define MF6CFIND(o, g, rt) do { \
+ struct mf6c *_rt = mf6ctable[MF6CHASH(o,g)]; \
+ rt = NULL; \
+ mrt6stat.mrt6s_mfc_lookups++; \
+ while (_rt) { \
+ if (IN6_ARE_ADDR_EQUAL(&_rt->mf6c_origin.sin6_addr, &(o)) && \
+ IN6_ARE_ADDR_EQUAL(&_rt->mf6c_mcastgrp.sin6_addr, &(g)) && \
+ (_rt->mf6c_stall == NULL)) { \
+ rt = _rt; \
+ break; \
+ } \
+ _rt = _rt->mf6c_next; \
+ } \
+ if (rt == NULL) { \
+ mrt6stat.mrt6s_mfc_misses++; \
+ } \
+} while (/*CONSTCOND*/ 0)
+
+/*
+ * Macros to compute elapsed time efficiently
+ * Borrowed from Van Jacobson's scheduling code
+ * XXX: replace with timersub() ?
+ */
+#define TV_DELTA(a, b, delta) do { \
+ int xxs; \
+ \
+ delta = (a).tv_usec - (b).tv_usec; \
+ if ((xxs = (a).tv_sec - (b).tv_sec)) { \
+ switch (xxs) { \
+ case 2: \
+ delta += 1000000; \
+ /* FALLTHROUGH */ \
+ case 1: \
+ delta += 1000000; \
+ break; \
+ default: \
+ delta += (1000000 * xxs); \
+ } \
+ } \
+} while (/*CONSTCOND*/ 0)
+
+/* XXX: replace with timercmp(a, b, <) ? */
+#define TV_LT(a, b) (((a).tv_usec < (b).tv_usec && \
+ (a).tv_sec <= (b).tv_sec) || (a).tv_sec < (b).tv_sec)
+
+#ifdef UPCALL_TIMING
+#define UPCALL_MAX 50
+static u_long upcall_data[UPCALL_MAX + 1];
+static void collate();
+#endif /* UPCALL_TIMING */
+
+static int ip6_mrouter_init(struct socket *, int, int);
+static int add_m6fc(struct mf6cctl *);
+static int add_m6if(struct mif6ctl *);
+static int del_m6fc(struct mf6cctl *);
+static int del_m6if(mifi_t *);
+static int del_m6if_locked(mifi_t *);
+static int get_mif6_cnt(struct sioc_mif_req6 *);
+static int get_sg_cnt(struct sioc_sg_req6 *);
+
+static struct callout expire_upcalls_ch;
+
+int X_ip6_mforward(struct ip6_hdr *, struct ifnet *, struct mbuf *);
+int X_ip6_mrouter_done(void);
+int X_ip6_mrouter_set(struct socket *, struct sockopt *);
+int X_ip6_mrouter_get(struct socket *, struct sockopt *);
+int X_mrt6_ioctl(u_long, caddr_t);
+
+/*
+ * Handle MRT setsockopt commands to modify the multicast routing tables.
+ */
+int
+X_ip6_mrouter_set(struct socket *so, struct sockopt *sopt)
+{
+ int error = 0;
+ int optval;
+ struct mif6ctl mifc;
+ struct mf6cctl mfcc;
+ mifi_t mifi;
+
+ if (so != V_ip6_mrouter && sopt->sopt_name != MRT6_INIT)
+ return (EACCES);
+
+ switch (sopt->sopt_name) {
+ case MRT6_INIT:
+#ifdef MRT6_OINIT
+ case MRT6_OINIT:
+#endif
+ error = sooptcopyin(sopt, &optval, sizeof(optval),
+ sizeof(optval));
+ if (error)
+ break;
+ error = ip6_mrouter_init(so, optval, sopt->sopt_name);
+ break;
+ case MRT6_DONE:
+ error = X_ip6_mrouter_done();
+ break;
+ case MRT6_ADD_MIF:
+ error = sooptcopyin(sopt, &mifc, sizeof(mifc), sizeof(mifc));
+ if (error)
+ break;
+ error = add_m6if(&mifc);
+ break;
+ case MRT6_ADD_MFC:
+ error = sooptcopyin(sopt, &mfcc, sizeof(mfcc), sizeof(mfcc));
+ if (error)
+ break;
+ error = add_m6fc(&mfcc);
+ break;
+ case MRT6_DEL_MFC:
+ error = sooptcopyin(sopt, &mfcc, sizeof(mfcc), sizeof(mfcc));
+ if (error)
+ break;
+ error = del_m6fc(&mfcc);
+ break;
+ case MRT6_DEL_MIF:
+ error = sooptcopyin(sopt, &mifi, sizeof(mifi), sizeof(mifi));
+ if (error)
+ break;
+ error = del_m6if(&mifi);
+ break;
+ case MRT6_PIM:
+ error = sooptcopyin(sopt, &optval, sizeof(optval),
+ sizeof(optval));
+ if (error)
+ break;
+ error = set_pim6(&optval);
+ break;
+ default:
+ error = EOPNOTSUPP;
+ break;
+ }
+
+ return (error);
+}
+
+/*
+ * Handle MRT getsockopt commands
+ */
+int
+X_ip6_mrouter_get(struct socket *so, struct sockopt *sopt)
+{
+ int error = 0;
+
+ if (so != V_ip6_mrouter)
+ return (EACCES);
+
+ switch (sopt->sopt_name) {
+ case MRT6_PIM:
+ error = sooptcopyout(sopt, &V_pim6, sizeof(V_pim6));
+ break;
+ }
+ return (error);
+}
+
+/*
+ * Handle ioctl commands to obtain information from the cache
+ */
+int
+X_mrt6_ioctl(u_long cmd, caddr_t data)
+{
+ int ret;
+
+ ret = EINVAL;
+
+ switch (cmd) {
+ case SIOCGETSGCNT_IN6:
+ ret = get_sg_cnt((struct sioc_sg_req6 *)data);
+ break;
+
+ case SIOCGETMIFCNT_IN6:
+ ret = get_mif6_cnt((struct sioc_mif_req6 *)data);
+ break;
+
+ default:
+ break;
+ }
+
+ return (ret);
+}
+
+/*
+ * returns the packet, byte, rpf-failure count for the source group provided
+ */
+static int
+get_sg_cnt(struct sioc_sg_req6 *req)
+{
+ struct mf6c *rt;
+ int ret;
+
+ ret = 0;
+
+ MFC6_LOCK();
+
+ MF6CFIND(req->src.sin6_addr, req->grp.sin6_addr, rt);
+ if (rt == NULL) {
+ ret = ESRCH;
+ } else {
+ req->pktcnt = rt->mf6c_pkt_cnt;
+ req->bytecnt = rt->mf6c_byte_cnt;
+ req->wrong_if = rt->mf6c_wrong_if;
+ }
+
+ MFC6_UNLOCK();
+
+ return (ret);
+}
+
+/*
+ * returns the input and output packet and byte counts on the mif provided
+ */
+static int
+get_mif6_cnt(struct sioc_mif_req6 *req)
+{
+ mifi_t mifi;
+ int ret;
+
+ ret = 0;
+ mifi = req->mifi;
+
+ MIF6_LOCK();
+
+ if (mifi >= nummifs) {
+ ret = EINVAL;
+ } else {
+ req->icount = mif6table[mifi].m6_pkt_in;
+ req->ocount = mif6table[mifi].m6_pkt_out;
+ req->ibytes = mif6table[mifi].m6_bytes_in;
+ req->obytes = mif6table[mifi].m6_bytes_out;
+ }
+
+ MIF6_UNLOCK();
+
+ return (ret);
+}
+
+static int
+set_pim6(int *i)
+{
+ if ((*i != 1) && (*i != 0))
+ return (EINVAL);
+
+ V_pim6 = *i;
+
+ return (0);
+}
+
+/*
+ * Enable multicast routing
+ */
+static int
+ip6_mrouter_init(struct socket *so, int v, int cmd)
+{
+
+#ifdef MRT6DEBUG
+ if (V_mrt6debug)
+ log(LOG_DEBUG,
+ "ip6_mrouter_init: so_type = %d, pr_protocol = %d\n",
+ so->so_type, so->so_proto->pr_protocol);
+#endif
+
+ if (so->so_type != SOCK_RAW ||
+ so->so_proto->pr_protocol != IPPROTO_ICMPV6)
+ return (EOPNOTSUPP);
+
+ if (v != 1)
+ return (ENOPROTOOPT);
+
+ MROUTER6_LOCK();
+
+ if (V_ip6_mrouter != NULL) {
+ MROUTER6_UNLOCK();
+ return (EADDRINUSE);
+ }
+
+ V_ip6_mrouter = so;
+ V_ip6_mrouter_ver = cmd;
+
+ bzero((caddr_t)mf6ctable, sizeof(mf6ctable));
+ bzero((caddr_t)n6expire, sizeof(n6expire));
+
+ V_pim6 = 0;/* used for stubbing out/in pim stuff */
+
+ callout_init(&expire_upcalls_ch, 0);
+ callout_reset(&expire_upcalls_ch, EXPIRE_TIMEOUT,
+ expire_upcalls, NULL);
+
+ MROUTER6_UNLOCK();
+
+#ifdef MRT6DEBUG
+ if (V_mrt6debug)
+ log(LOG_DEBUG, "ip6_mrouter_init\n");
+#endif
+
+ return (0);
+}
+
+/*
+ * Disable IPv6 multicast forwarding.
+ */
+int
+X_ip6_mrouter_done(void)
+{
+ mifi_t mifi;
+ int i;
+ struct mf6c *rt;
+ struct rtdetq *rte;
+
+ MROUTER6_LOCK();
+
+ if (V_ip6_mrouter == NULL) {
+ MROUTER6_UNLOCK();
+ return (EINVAL);
+ }
+
+ /*
+ * For each phyint in use, disable promiscuous reception of all IPv6
+ * multicasts.
+ */
+ for (mifi = 0; mifi < nummifs; mifi++) {
+ if (mif6table[mifi].m6_ifp &&
+ !(mif6table[mifi].m6_flags & MIFF_REGISTER)) {
+ if_allmulti(mif6table[mifi].m6_ifp, 0);
+ }
+ }
+ bzero((caddr_t)mif6table, sizeof(mif6table));
+ nummifs = 0;
+
+ V_pim6 = 0; /* used to stub out/in pim specific code */
+
+ callout_stop(&expire_upcalls_ch);
+
+ /*
+ * Free all multicast forwarding cache entries.
+ */
+ MFC6_LOCK();
+ for (i = 0; i < MF6CTBLSIZ; i++) {
+ rt = mf6ctable[i];
+ while (rt) {
+ struct mf6c *frt;
+
+ for (rte = rt->mf6c_stall; rte != NULL; ) {
+ struct rtdetq *n = rte->next;
+
+ m_free(rte->m);
+ free(rte, M_MRTABLE6);
+ rte = n;
+ }
+ frt = rt;
+ rt = rt->mf6c_next;
+ free(frt, M_MRTABLE6);
+ }
+ }
+ bzero((caddr_t)mf6ctable, sizeof(mf6ctable));
+ MFC6_UNLOCK();
+
+ /*
+ * Reset register interface
+ */
+ if (reg_mif_num != (mifi_t)-1 && multicast_register_if6 != NULL) {
+ if_detach(multicast_register_if6);
+ if_free(multicast_register_if6);
+ reg_mif_num = (mifi_t)-1;
+ multicast_register_if6 = NULL;
+ }
+
+ V_ip6_mrouter = NULL;
+ V_ip6_mrouter_ver = 0;
+
+ MROUTER6_UNLOCK();
+
+#ifdef MRT6DEBUG
+ if (V_mrt6debug)
+ log(LOG_DEBUG, "ip6_mrouter_done\n");
+#endif
+
+ return (0);
+}
+
+static struct sockaddr_in6 sin6 = { sizeof(sin6), AF_INET6 };
+
+/*
+ * Add a mif to the mif table
+ */
+static int
+add_m6if(struct mif6ctl *mifcp)
+{
+ struct mif6 *mifp;
+ struct ifnet *ifp;
+ int error;
+
+ MIF6_LOCK();
+
+ if (mifcp->mif6c_mifi >= MAXMIFS) {
+ MIF6_UNLOCK();
+ return (EINVAL);
+ }
+ mifp = mif6table + mifcp->mif6c_mifi;
+ if (mifp->m6_ifp != NULL) {
+ MIF6_UNLOCK();
+ return (EADDRINUSE); /* XXX: is it appropriate? */
+ }
+ if (mifcp->mif6c_pifi == 0 || mifcp->mif6c_pifi > V_if_index) {
+ MIF6_UNLOCK();
+ return (ENXIO);
+ }
+
+ ifp = ifnet_byindex(mifcp->mif6c_pifi);
+
+ if (mifcp->mif6c_flags & MIFF_REGISTER) {
+ if (reg_mif_num == (mifi_t)-1) {
+ ifp = if_alloc(IFT_OTHER);
+
+ if_initname(ifp, "register_mif", 0);
+ ifp->if_flags |= IFF_LOOPBACK;
+ if_attach(ifp);
+ multicast_register_if6 = ifp;
+ reg_mif_num = mifcp->mif6c_mifi;
+ /*
+ * it is impossible to guess the ifindex of the
+ * register interface. So mif6c_pifi is automatically
+ * calculated.
+ */
+ mifcp->mif6c_pifi = ifp->if_index;
+ } else {
+ ifp = multicast_register_if6;
+ }
+ } else {
+ /* Make sure the interface supports multicast */
+ if ((ifp->if_flags & IFF_MULTICAST) == 0) {
+ MIF6_UNLOCK();
+ return (EOPNOTSUPP);
+ }
+
+ error = if_allmulti(ifp, 1);
+ if (error) {
+ MIF6_UNLOCK();
+ return (error);
+ }
+ }
+
+ mifp->m6_flags = mifcp->mif6c_flags;
+ mifp->m6_ifp = ifp;
+
+ /* initialize per mif pkt counters */
+ mifp->m6_pkt_in = 0;
+ mifp->m6_pkt_out = 0;
+ mifp->m6_bytes_in = 0;
+ mifp->m6_bytes_out = 0;
+ bzero(&mifp->m6_route, sizeof(mifp->m6_route));
+
+ /* Adjust nummifs up if the mifi is higher than nummifs */
+ if (nummifs <= mifcp->mif6c_mifi)
+ nummifs = mifcp->mif6c_mifi + 1;
+
+ MIF6_UNLOCK();
+
+#ifdef MRT6DEBUG
+ if (V_mrt6debug)
+ log(LOG_DEBUG,
+ "add_mif #%d, phyint %s\n",
+ mifcp->mif6c_mifi,
+ ifp->if_xname);
+#endif
+
+ return (0);
+}
+
+/*
+ * Delete a mif from the mif table
+ */
+static int
+del_m6if_locked(mifi_t *mifip)
+{
+ struct mif6 *mifp = mif6table + *mifip;
+ mifi_t mifi;
+ struct ifnet *ifp;
+
+ MIF6_LOCK_ASSERT();
+
+ if (*mifip >= nummifs)
+ return (EINVAL);
+ if (mifp->m6_ifp == NULL)
+ return (EINVAL);
+
+ if (!(mifp->m6_flags & MIFF_REGISTER)) {
+ /* XXX: TODO: Maintain an ALLMULTI refcount in struct ifnet. */
+ ifp = mifp->m6_ifp;
+ if_allmulti(ifp, 0);
+ } else {
+ if (reg_mif_num != (mifi_t)-1 &&
+ multicast_register_if6 != NULL) {
+ if_detach(multicast_register_if6);
+ if_free(multicast_register_if6);
+ reg_mif_num = (mifi_t)-1;
+ multicast_register_if6 = NULL;
+ }
+ }
+
+ bzero((caddr_t)mifp, sizeof(*mifp));
+
+ /* Adjust nummifs down */
+ for (mifi = nummifs; mifi > 0; mifi--)
+ if (mif6table[mifi - 1].m6_ifp)
+ break;
+ nummifs = mifi;
+
+#ifdef MRT6DEBUG
+ if (V_mrt6debug)
+ log(LOG_DEBUG, "del_m6if %d, nummifs %d\n", *mifip, nummifs);
+#endif
+
+ return (0);
+}
+
+static int
+del_m6if(mifi_t *mifip)
+{
+ int cc;
+
+ MIF6_LOCK();
+ cc = del_m6if_locked(mifip);
+ MIF6_UNLOCK();
+
+ return (cc);
+}
+
+/*
+ * Add an mfc entry
+ */
+static int
+add_m6fc(struct mf6cctl *mfccp)
+{
+ struct mf6c *rt;
+ u_long hash;
+ struct rtdetq *rte;
+ u_short nstl;
+ char ip6bufo[INET6_ADDRSTRLEN], ip6bufg[INET6_ADDRSTRLEN];
+
+ MFC6_LOCK();
+
+ MF6CFIND(mfccp->mf6cc_origin.sin6_addr,
+ mfccp->mf6cc_mcastgrp.sin6_addr, rt);
+
+ /* If an entry already exists, just update the fields */
+ if (rt) {
+#ifdef MRT6DEBUG
+ if (V_mrt6debug & DEBUG_MFC) {
+ log(LOG_DEBUG,
+ "add_m6fc no upcall h %d o %s g %s p %x\n",
+ ip6_sprintf(ip6bufo, &mfccp->mf6cc_origin.sin6_addr),
+ ip6_sprintf(ip6bufg, &mfccp->mf6cc_mcastgrp.sin6_addr),
+ mfccp->mf6cc_parent);
+ }
+#endif
+
+ rt->mf6c_parent = mfccp->mf6cc_parent;
+ rt->mf6c_ifset = mfccp->mf6cc_ifset;
+
+ MFC6_UNLOCK();
+ return (0);
+ }
+
+ /*
+ * Find the entry for which the upcall was made and update
+ */
+ hash = MF6CHASH(mfccp->mf6cc_origin.sin6_addr,
+ mfccp->mf6cc_mcastgrp.sin6_addr);
+ for (rt = mf6ctable[hash], nstl = 0; rt; rt = rt->mf6c_next) {
+ if (IN6_ARE_ADDR_EQUAL(&rt->mf6c_origin.sin6_addr,
+ &mfccp->mf6cc_origin.sin6_addr) &&
+ IN6_ARE_ADDR_EQUAL(&rt->mf6c_mcastgrp.sin6_addr,
+ &mfccp->mf6cc_mcastgrp.sin6_addr) &&
+ (rt->mf6c_stall != NULL)) {
+
+ if (nstl++)
+ log(LOG_ERR,
+ "add_m6fc: %s o %s g %s p %x dbx %p\n",
+ "multiple kernel entries",
+ ip6_sprintf(ip6bufo,
+ &mfccp->mf6cc_origin.sin6_addr),
+ ip6_sprintf(ip6bufg,
+ &mfccp->mf6cc_mcastgrp.sin6_addr),
+ mfccp->mf6cc_parent, rt->mf6c_stall);
+
+#ifdef MRT6DEBUG
+ if (V_mrt6debug & DEBUG_MFC)
+ log(LOG_DEBUG,
+ "add_m6fc o %s g %s p %x dbg %x\n",
+ ip6_sprintf(ip6bufo,
+ &mfccp->mf6cc_origin.sin6_addr),
+ ip6_sprintf(ip6bufg,
+ &mfccp->mf6cc_mcastgrp.sin6_addr),
+ mfccp->mf6cc_parent, rt->mf6c_stall);
+#endif
+
+ rt->mf6c_origin = mfccp->mf6cc_origin;
+ rt->mf6c_mcastgrp = mfccp->mf6cc_mcastgrp;
+ rt->mf6c_parent = mfccp->mf6cc_parent;
+ rt->mf6c_ifset = mfccp->mf6cc_ifset;
+ /* initialize pkt counters per src-grp */
+ rt->mf6c_pkt_cnt = 0;
+ rt->mf6c_byte_cnt = 0;
+ rt->mf6c_wrong_if = 0;
+
+ rt->mf6c_expire = 0; /* Don't clean this guy up */
+ n6expire[hash]--;
+
+ /* free packets Qed at the end of this entry */
+ for (rte = rt->mf6c_stall; rte != NULL; ) {
+ struct rtdetq *n = rte->next;
+ ip6_mdq(rte->m, rte->ifp, rt);
+ m_freem(rte->m);
+#ifdef UPCALL_TIMING
+ collate(&(rte->t));
+#endif /* UPCALL_TIMING */
+ free(rte, M_MRTABLE6);
+ rte = n;
+ }
+ rt->mf6c_stall = NULL;
+ }
+ }
+
+ /*
+ * It is possible that an entry is being inserted without an upcall
+ */
+ if (nstl == 0) {
+#ifdef MRT6DEBUG
+ if (V_mrt6debug & DEBUG_MFC)
+ log(LOG_DEBUG,
+ "add_mfc no upcall h %d o %s g %s p %x\n",
+ hash,
+ ip6_sprintf(ip6bufo, &mfccp->mf6cc_origin.sin6_addr),
+ ip6_sprintf(ip6bufg, &mfccp->mf6cc_mcastgrp.sin6_addr),
+ mfccp->mf6cc_parent);
+#endif
+
+ for (rt = mf6ctable[hash]; rt; rt = rt->mf6c_next) {
+
+ if (IN6_ARE_ADDR_EQUAL(&rt->mf6c_origin.sin6_addr,
+ &mfccp->mf6cc_origin.sin6_addr)&&
+ IN6_ARE_ADDR_EQUAL(&rt->mf6c_mcastgrp.sin6_addr,
+ &mfccp->mf6cc_mcastgrp.sin6_addr)) {
+
+ rt->mf6c_origin = mfccp->mf6cc_origin;
+ rt->mf6c_mcastgrp = mfccp->mf6cc_mcastgrp;
+ rt->mf6c_parent = mfccp->mf6cc_parent;
+ rt->mf6c_ifset = mfccp->mf6cc_ifset;
+ /* initialize pkt counters per src-grp */
+ rt->mf6c_pkt_cnt = 0;
+ rt->mf6c_byte_cnt = 0;
+ rt->mf6c_wrong_if = 0;
+
+ if (rt->mf6c_expire)
+ n6expire[hash]--;
+ rt->mf6c_expire = 0;
+ }
+ }
+ if (rt == NULL) {
+ /* no upcall, so make a new entry */
+ rt = (struct mf6c *)malloc(sizeof(*rt), M_MRTABLE6,
+ M_NOWAIT);
+ if (rt == NULL) {
+ MFC6_UNLOCK();
+ return (ENOBUFS);
+ }
+
+ /* insert new entry at head of hash chain */
+ rt->mf6c_origin = mfccp->mf6cc_origin;
+ rt->mf6c_mcastgrp = mfccp->mf6cc_mcastgrp;
+ rt->mf6c_parent = mfccp->mf6cc_parent;
+ rt->mf6c_ifset = mfccp->mf6cc_ifset;
+ /* initialize pkt counters per src-grp */
+ rt->mf6c_pkt_cnt = 0;
+ rt->mf6c_byte_cnt = 0;
+ rt->mf6c_wrong_if = 0;
+ rt->mf6c_expire = 0;
+ rt->mf6c_stall = NULL;
+
+ /* link into table */
+ rt->mf6c_next = mf6ctable[hash];
+ mf6ctable[hash] = rt;
+ }
+ }
+
+ MFC6_UNLOCK();
+ return (0);
+}
+
+#ifdef UPCALL_TIMING
+/*
+ * collect delay statistics on the upcalls
+ */
+static void
+collate(struct timeval *t)
+{
+ u_long d;
+ struct timeval tp;
+ u_long delta;
+
+ GET_TIME(tp);
+
+ if (TV_LT(*t, tp))
+ {
+ TV_DELTA(tp, *t, delta);
+
+ d = delta >> 10;
+ if (d > UPCALL_MAX)
+ d = UPCALL_MAX;
+
+ ++upcall_data[d];
+ }
+}
+#endif /* UPCALL_TIMING */
+
+/*
+ * Delete an mfc entry
+ */
+static int
+del_m6fc(struct mf6cctl *mfccp)
+{
+ struct sockaddr_in6 origin;
+ struct sockaddr_in6 mcastgrp;
+ struct mf6c *rt;
+ struct mf6c **nptr;
+ u_long hash;
+
+ origin = mfccp->mf6cc_origin;
+ mcastgrp = mfccp->mf6cc_mcastgrp;
+ hash = MF6CHASH(origin.sin6_addr, mcastgrp.sin6_addr);
+
+#ifdef MRT6DEBUG
+ if (V_mrt6debug & DEBUG_MFC) {
+ char ip6bufo[INET6_ADDRSTRLEN], ip6bufg[INET6_ADDRSTRLEN];
+ log(LOG_DEBUG,"del_m6fc orig %s mcastgrp %s\n",
+ ip6_sprintf(ip6bufo, &origin.sin6_addr),
+ ip6_sprintf(ip6bufg, &mcastgrp.sin6_addr));
+ }
+#endif
+
+ MFC6_LOCK();
+
+ nptr = &mf6ctable[hash];
+ while ((rt = *nptr) != NULL) {
+ if (IN6_ARE_ADDR_EQUAL(&origin.sin6_addr,
+ &rt->mf6c_origin.sin6_addr) &&
+ IN6_ARE_ADDR_EQUAL(&mcastgrp.sin6_addr,
+ &rt->mf6c_mcastgrp.sin6_addr) &&
+ rt->mf6c_stall == NULL)
+ break;
+
+ nptr = &rt->mf6c_next;
+ }
+ if (rt == NULL) {
+ MFC6_UNLOCK();
+ return (EADDRNOTAVAIL);
+ }
+
+ *nptr = rt->mf6c_next;
+ free(rt, M_MRTABLE6);
+
+ MFC6_UNLOCK();
+
+ return (0);
+}
+
+static int
+socket_send(struct socket *s, struct mbuf *mm, struct sockaddr_in6 *src)
+{
+
+ if (s) {
+ if (sbappendaddr(&s->so_rcv,
+ (struct sockaddr *)src,
+ mm, (struct mbuf *)0) != 0) {
+ sorwakeup(s);
+ return (0);
+ }
+ }
+ m_freem(mm);
+ return (-1);
+}
+
+/*
+ * IPv6 multicast forwarding function. This function assumes that the packet
+ * pointed to by "ip6" has arrived on (or is about to be sent to) the interface
+ * pointed to by "ifp", and the packet is to be relayed to other networks
+ * that have members of the packet's destination IPv6 multicast group.
+ *
+ * The packet is returned unscathed to the caller, unless it is
+ * erroneous, in which case a non-zero return value tells the caller to
+ * discard it.
+ *
+ * NOTE: this implementation assumes that m->m_pkthdr.rcvif is NULL iff
+ * this function is called in the originating context (i.e., not when
+ * forwarding a packet from other node). ip6_output(), which is currently the
+ * only function that calls this function is called in the originating context,
+ * explicitly ensures this condition. It is caller's responsibility to ensure
+ * that if this function is called from somewhere else in the originating
+ * context in the future.
+ */
+int
+X_ip6_mforward(struct ip6_hdr *ip6, struct ifnet *ifp, struct mbuf *m)
+{
+ struct mf6c *rt;
+ struct mif6 *mifp;
+ struct mbuf *mm;
+ mifi_t mifi;
+ char ip6bufs[INET6_ADDRSTRLEN], ip6bufd[INET6_ADDRSTRLEN];
+
+#ifdef MRT6DEBUG
+ if (V_mrt6debug & DEBUG_FORWARD)
+ log(LOG_DEBUG, "ip6_mforward: src %s, dst %s, ifindex %d\n",
+ ip6_sprintf(ip6bufs, &ip6->ip6_src),
+ ip6_sprintf(ip6bufd, &ip6->ip6_dst),
+ ifp->if_index);
+#endif
+
+ /*
+ * Don't forward a packet with Hop limit of zero or one,
+ * or a packet destined to a local-only group.
+ */
+ if (ip6->ip6_hlim <= 1 || IN6_IS_ADDR_MC_INTFACELOCAL(&ip6->ip6_dst) ||
+ IN6_IS_ADDR_MC_LINKLOCAL(&ip6->ip6_dst))
+ return (0);
+ ip6->ip6_hlim--;
+
+ /*
+ * Source address check: do not forward packets with unspecified
+ * source. It was discussed in July 2000, on ipngwg mailing list.
+ * This is rather more serious than unicast cases, because some
+ * MLD packets can be sent with the unspecified source address
+ * (although such packets must normally set 1 to the hop limit field).
+ */
+ if (IN6_IS_ADDR_UNSPECIFIED(&ip6->ip6_src)) {
+ V_ip6stat.ip6s_cantforward++;
+ if (V_ip6_log_time + V_ip6_log_interval < time_second) {
+ V_ip6_log_time = time_second;
+ log(LOG_DEBUG,
+ "cannot forward "
+ "from %s to %s nxt %d received on %s\n",
+ ip6_sprintf(ip6bufs, &ip6->ip6_src),
+ ip6_sprintf(ip6bufd, &ip6->ip6_dst),
+ ip6->ip6_nxt,
+ if_name(m->m_pkthdr.rcvif));
+ }
+ return (0);
+ }
+
+ MFC6_LOCK();
+
+ /*
+ * Determine forwarding mifs from the forwarding cache table
+ */
+ MF6CFIND(ip6->ip6_src, ip6->ip6_dst, rt);
+
+ /* Entry exists, so forward if necessary */
+ if (rt) {
+ MFC6_UNLOCK();
+ return (ip6_mdq(m, ifp, rt));
+ } else {
+ /*
+ * If we don't have a route for packet's origin,
+ * Make a copy of the packet &
+ * send message to routing daemon
+ */
+
+ struct mbuf *mb0;
+ struct rtdetq *rte;
+ u_long hash;
+/* int i, npkts;*/
+#ifdef UPCALL_TIMING
+ struct timeval tp;
+
+ GET_TIME(tp);
+#endif /* UPCALL_TIMING */
+
+ mrt6stat.mrt6s_no_route++;
+#ifdef MRT6DEBUG
+ if (V_mrt6debug & (DEBUG_FORWARD | DEBUG_MFC))
+ log(LOG_DEBUG, "ip6_mforward: no rte s %s g %s\n",
+ ip6_sprintf(ip6bufs, &ip6->ip6_src),
+ ip6_sprintf(ip6bufd, &ip6->ip6_dst));
+#endif
+
+ /*
+ * Allocate mbufs early so that we don't do extra work if we
+ * are just going to fail anyway.
+ */
+ rte = (struct rtdetq *)malloc(sizeof(*rte), M_MRTABLE6,
+ M_NOWAIT);
+ if (rte == NULL) {
+ MFC6_UNLOCK();
+ return (ENOBUFS);
+ }
+ mb0 = m_copy(m, 0, M_COPYALL);
+ /*
+ * Pullup packet header if needed before storing it,
+ * as other references may modify it in the meantime.
+ */
+ if (mb0 &&
+ (M_HASCL(mb0) || mb0->m_len < sizeof(struct ip6_hdr)))
+ mb0 = m_pullup(mb0, sizeof(struct ip6_hdr));
+ if (mb0 == NULL) {
+ free(rte, M_MRTABLE6);
+ MFC6_UNLOCK();
+ return (ENOBUFS);
+ }
+
+ /* is there an upcall waiting for this packet? */
+ hash = MF6CHASH(ip6->ip6_src, ip6->ip6_dst);
+ for (rt = mf6ctable[hash]; rt; rt = rt->mf6c_next) {
+ if (IN6_ARE_ADDR_EQUAL(&ip6->ip6_src,
+ &rt->mf6c_origin.sin6_addr) &&
+ IN6_ARE_ADDR_EQUAL(&ip6->ip6_dst,
+ &rt->mf6c_mcastgrp.sin6_addr) &&
+ (rt->mf6c_stall != NULL))
+ break;
+ }
+
+ if (rt == NULL) {
+ struct mrt6msg *im;
+#ifdef MRT6_OINIT
+ struct omrt6msg *oim;
+#endif
+
+ /* no upcall, so make a new entry */
+ rt = (struct mf6c *)malloc(sizeof(*rt), M_MRTABLE6,
+ M_NOWAIT);
+ if (rt == NULL) {
+ free(rte, M_MRTABLE6);
+ m_freem(mb0);
+ MFC6_UNLOCK();
+ return (ENOBUFS);
+ }
+ /*
+ * Make a copy of the header to send to the user
+ * level process
+ */
+ mm = m_copy(mb0, 0, sizeof(struct ip6_hdr));
+
+ if (mm == NULL) {
+ free(rte, M_MRTABLE6);
+ m_freem(mb0);
+ free(rt, M_MRTABLE6);
+ MFC6_UNLOCK();
+ return (ENOBUFS);
+ }
+
+ /*
+ * Send message to routing daemon
+ */
+ sin6.sin6_addr = ip6->ip6_src;
+
+ im = NULL;
+#ifdef MRT6_OINIT
+ oim = NULL;
+#endif
+ switch (V_ip6_mrouter_ver) {
+#ifdef MRT6_OINIT
+ case MRT6_OINIT:
+ oim = mtod(mm, struct omrt6msg *);
+ oim->im6_msgtype = MRT6MSG_NOCACHE;
+ oim->im6_mbz = 0;
+ break;
+#endif
+ case MRT6_INIT:
+ im = mtod(mm, struct mrt6msg *);
+ im->im6_msgtype = MRT6MSG_NOCACHE;
+ im->im6_mbz = 0;
+ break;
+ default:
+ free(rte, M_MRTABLE6);
+ m_freem(mb0);
+ free(rt, M_MRTABLE6);
+ MFC6_UNLOCK();
+ return (EINVAL);
+ }
+
+#ifdef MRT6DEBUG
+ if (V_mrt6debug & DEBUG_FORWARD)
+ log(LOG_DEBUG,
+ "getting the iif info in the kernel\n");
+#endif
+
+ for (mifp = mif6table, mifi = 0;
+ mifi < nummifs && mifp->m6_ifp != ifp;
+ mifp++, mifi++)
+ ;
+
+ switch (V_ip6_mrouter_ver) {
+#ifdef MRT6_OINIT
+ case MRT6_OINIT:
+ oim->im6_mif = mifi;
+ break;
+#endif
+ case MRT6_INIT:
+ im->im6_mif = mifi;
+ break;
+ }
+
+ if (socket_send(V_ip6_mrouter, mm, &sin6) < 0) {
+ log(LOG_WARNING, "ip6_mforward: ip6_mrouter "
+ "socket queue full\n");
+ mrt6stat.mrt6s_upq_sockfull++;
+ free(rte, M_MRTABLE6);
+ m_freem(mb0);
+ free(rt, M_MRTABLE6);
+ MFC6_UNLOCK();
+ return (ENOBUFS);
+ }
+
+ mrt6stat.mrt6s_upcalls++;
+
+ /* insert new entry at head of hash chain */
+ bzero(rt, sizeof(*rt));
+ rt->mf6c_origin.sin6_family = AF_INET6;
+ rt->mf6c_origin.sin6_len = sizeof(struct sockaddr_in6);
+ rt->mf6c_origin.sin6_addr = ip6->ip6_src;
+ rt->mf6c_mcastgrp.sin6_family = AF_INET6;
+ rt->mf6c_mcastgrp.sin6_len = sizeof(struct sockaddr_in6);
+ rt->mf6c_mcastgrp.sin6_addr = ip6->ip6_dst;
+ rt->mf6c_expire = UPCALL_EXPIRE;
+ n6expire[hash]++;
+ rt->mf6c_parent = MF6C_INCOMPLETE_PARENT;
+
+ /* link into table */
+ rt->mf6c_next = mf6ctable[hash];
+ mf6ctable[hash] = rt;
+ /* Add this entry to the end of the queue */
+ rt->mf6c_stall = rte;
+ } else {
+ /* determine if q has overflowed */
+ struct rtdetq **p;
+ int npkts = 0;
+
+ for (p = &rt->mf6c_stall; *p != NULL; p = &(*p)->next)
+ if (++npkts > MAX_UPQ6) {
+ mrt6stat.mrt6s_upq_ovflw++;
+ free(rte, M_MRTABLE6);
+ m_freem(mb0);
+ MFC6_UNLOCK();
+ return (0);
+ }
+
+ /* Add this entry to the end of the queue */
+ *p = rte;
+ }
+
+ rte->next = NULL;
+ rte->m = mb0;
+ rte->ifp = ifp;
+#ifdef UPCALL_TIMING
+ rte->t = tp;
+#endif /* UPCALL_TIMING */
+
+ MFC6_UNLOCK();
+
+ return (0);
+ }
+}
+
+/*
+ * Clean up cache entries if upcalls are not serviced
+ * Call from the Slow Timeout mechanism, every half second.
+ */
+static void
+expire_upcalls(void *unused)
+{
+ struct rtdetq *rte;
+ struct mf6c *mfc, **nptr;
+ int i;
+
+ MFC6_LOCK();
+ for (i = 0; i < MF6CTBLSIZ; i++) {
+ if (n6expire[i] == 0)
+ continue;
+ nptr = &mf6ctable[i];
+ while ((mfc = *nptr) != NULL) {
+ rte = mfc->mf6c_stall;
+ /*
+ * Skip real cache entries
+ * Make sure it wasn't marked to not expire (shouldn't happen)
+ * If it expires now
+ */
+ if (rte != NULL &&
+ mfc->mf6c_expire != 0 &&
+ --mfc->mf6c_expire == 0) {
+#ifdef MRT6DEBUG
+ if (V_mrt6debug & DEBUG_EXPIRE) {
+ char ip6bufo[INET6_ADDRSTRLEN];
+ char ip6bufg[INET6_ADDRSTRLEN];
+ log(LOG_DEBUG, "expire_upcalls: expiring (%s %s)\n",
+ ip6_sprintf(ip6bufo, &mfc->mf6c_origin.sin6_addr),
+ ip6_sprintf(ip6bufg, &mfc->mf6c_mcastgrp.sin6_addr));
+ }
+#endif
+ /*
+ * drop all the packets
+ * free the mbuf with the pkt, if, timing info
+ */
+ do {
+ struct rtdetq *n = rte->next;
+ m_freem(rte->m);
+ free(rte, M_MRTABLE6);
+ rte = n;
+ } while (rte != NULL);
+ mrt6stat.mrt6s_cache_cleanups++;
+ n6expire[i]--;
+
+ *nptr = mfc->mf6c_next;
+ free(mfc, M_MRTABLE6);
+ } else {
+ nptr = &mfc->mf6c_next;
+ }
+ }
+ }
+ MFC6_UNLOCK();
+ callout_reset(&expire_upcalls_ch, EXPIRE_TIMEOUT,
+ expire_upcalls, NULL);
+}
+
+/*
+ * Packet forwarding routine once entry in the cache is made
+ */
+static int
+ip6_mdq(struct mbuf *m, struct ifnet *ifp, struct mf6c *rt)
+{
+ struct ip6_hdr *ip6 = mtod(m, struct ip6_hdr *);
+ mifi_t mifi, iif;
+ struct mif6 *mifp;
+ int plen = m->m_pkthdr.len;
+ struct in6_addr src0, dst0; /* copies for local work */
+ u_int32_t iszone, idzone, oszone, odzone;
+ int error = 0;
+
+/*
+ * Macro to send packet on mif. Since RSVP packets don't get counted on
+ * input, they shouldn't get counted on output, so statistics keeping is
+ * separate.
+ */
+
+#define MC6_SEND(ip6, mifp, m) do { \
+ if ((mifp)->m6_flags & MIFF_REGISTER) \
+ register_send((ip6), (mifp), (m)); \
+ else \
+ phyint_send((ip6), (mifp), (m)); \
+} while (/*CONSTCOND*/ 0)
+
+ /*
+ * Don't forward if it didn't arrive from the parent mif
+ * for its origin.
+ */
+ mifi = rt->mf6c_parent;
+ if ((mifi >= nummifs) || (mif6table[mifi].m6_ifp != ifp)) {
+ /* came in the wrong interface */
+#ifdef MRT6DEBUG
+ if (V_mrt6debug & DEBUG_FORWARD)
+ log(LOG_DEBUG,
+ "wrong if: ifid %d mifi %d mififid %x\n",
+ ifp->if_index, mifi,
+ mif6table[mifi].m6_ifp->if_index);
+#endif
+ mrt6stat.mrt6s_wrong_if++;
+ rt->mf6c_wrong_if++;
+ /*
+ * If we are doing PIM processing, and we are forwarding
+ * packets on this interface, send a message to the
+ * routing daemon.
+ */
+ /* have to make sure this is a valid mif */
+ if (mifi < nummifs && mif6table[mifi].m6_ifp)
+ if (V_pim6 && (m->m_flags & M_LOOP) == 0) {
+ /*
+ * Check the M_LOOP flag to avoid an
+ * unnecessary PIM assert.
+ * XXX: M_LOOP is an ad-hoc hack...
+ */
+ static struct sockaddr_in6 sin6 =
+ { sizeof(sin6), AF_INET6 };
+
+ struct mbuf *mm;
+ struct mrt6msg *im;
+#ifdef MRT6_OINIT
+ struct omrt6msg *oim;
+#endif
+
+ mm = m_copy(m, 0, sizeof(struct ip6_hdr));
+ if (mm &&
+ (M_HASCL(mm) ||
+ mm->m_len < sizeof(struct ip6_hdr)))
+ mm = m_pullup(mm, sizeof(struct ip6_hdr));
+ if (mm == NULL)
+ return (ENOBUFS);
+
+#ifdef MRT6_OINIT
+ oim = NULL;
+#endif
+ im = NULL;
+ switch (V_ip6_mrouter_ver) {
+#ifdef MRT6_OINIT
+ case MRT6_OINIT:
+ oim = mtod(mm, struct omrt6msg *);
+ oim->im6_msgtype = MRT6MSG_WRONGMIF;
+ oim->im6_mbz = 0;
+ break;
+#endif
+ case MRT6_INIT:
+ im = mtod(mm, struct mrt6msg *);
+ im->im6_msgtype = MRT6MSG_WRONGMIF;
+ im->im6_mbz = 0;
+ break;
+ default:
+ m_freem(mm);
+ return (EINVAL);
+ }
+
+ for (mifp = mif6table, iif = 0;
+ iif < nummifs && mifp &&
+ mifp->m6_ifp != ifp;
+ mifp++, iif++)
+ ;
+
+ switch (V_ip6_mrouter_ver) {
+#ifdef MRT6_OINIT
+ case MRT6_OINIT:
+ oim->im6_mif = iif;
+ sin6.sin6_addr = oim->im6_src;
+ break;
+#endif
+ case MRT6_INIT:
+ im->im6_mif = iif;
+ sin6.sin6_addr = im->im6_src;
+ break;
+ }
+
+ mrt6stat.mrt6s_upcalls++;
+
+ if (socket_send(V_ip6_mrouter, mm, &sin6) < 0) {
+#ifdef MRT6DEBUG
+ if (V_mrt6debug)
+ log(LOG_WARNING, "mdq, ip6_mrouter socket queue full\n");
+#endif
+ ++mrt6stat.mrt6s_upq_sockfull;
+ return (ENOBUFS);
+ } /* if socket Q full */
+ } /* if PIM */
+ return (0);
+ } /* if wrong iif */
+
+ /* If I sourced this packet, it counts as output, else it was input. */
+ if (m->m_pkthdr.rcvif == NULL) {
+ /* XXX: is rcvif really NULL when output?? */
+ mif6table[mifi].m6_pkt_out++;
+ mif6table[mifi].m6_bytes_out += plen;
+ } else {
+ mif6table[mifi].m6_pkt_in++;
+ mif6table[mifi].m6_bytes_in += plen;
+ }
+ rt->mf6c_pkt_cnt++;
+ rt->mf6c_byte_cnt += plen;
+
+ /*
+ * For each mif, forward a copy of the packet if there are group
+ * members downstream on the interface.
+ */
+ src0 = ip6->ip6_src;
+ dst0 = ip6->ip6_dst;
+ if ((error = in6_setscope(&src0, ifp, &iszone)) != 0 ||
+ (error = in6_setscope(&dst0, ifp, &idzone)) != 0) {
+ V_ip6stat.ip6s_badscope++;
+ return (error);
+ }
+ for (mifp = mif6table, mifi = 0; mifi < nummifs; mifp++, mifi++) {
+ if (IF_ISSET(mifi, &rt->mf6c_ifset)) {
+ /*
+ * check if the outgoing packet is going to break
+ * a scope boundary.
+ * XXX For packets through PIM register tunnel
+ * interface, we believe a routing daemon.
+ */
+ if (!(mif6table[rt->mf6c_parent].m6_flags &
+ MIFF_REGISTER) &&
+ !(mif6table[mifi].m6_flags & MIFF_REGISTER)) {
+ if (in6_setscope(&src0, mif6table[mifi].m6_ifp,
+ &oszone) ||
+ in6_setscope(&dst0, mif6table[mifi].m6_ifp,
+ &odzone) ||
+ iszone != oszone ||
+ idzone != odzone) {
+ V_ip6stat.ip6s_badscope++;
+ continue;
+ }
+ }
+
+ mifp->m6_pkt_out++;
+ mifp->m6_bytes_out += plen;
+ MC6_SEND(ip6, mifp, m);
+ }
+ }
+ return (0);
+}
+
+static void
+phyint_send(struct ip6_hdr *ip6, struct mif6 *mifp, struct mbuf *m)
+{
+ struct mbuf *mb_copy;
+ struct ifnet *ifp = mifp->m6_ifp;
+ int error = 0;
+ struct sockaddr_in6 *dst6;
+ u_long linkmtu;
+
+ dst6 = &mifp->m6_route.ro_dst;
+
+ /*
+ * Make a new reference to the packet; make sure that
+ * the IPv6 header is actually copied, not just referenced,
+ * so that ip6_output() only scribbles on the copy.
+ */
+ mb_copy = m_copy(m, 0, M_COPYALL);
+ if (mb_copy &&
+ (M_HASCL(mb_copy) || mb_copy->m_len < sizeof(struct ip6_hdr)))
+ mb_copy = m_pullup(mb_copy, sizeof(struct ip6_hdr));
+ if (mb_copy == NULL) {
+ return;
+ }
+ /* set MCAST flag to the outgoing packet */
+ mb_copy->m_flags |= M_MCAST;
+
+ /*
+ * If we sourced the packet, call ip6_output since we may devide
+ * the packet into fragments when the packet is too big for the
+ * outgoing interface.
+ * Otherwise, we can simply send the packet to the interface
+ * sending queue.
+ */
+ if (m->m_pkthdr.rcvif == NULL) {
+ struct ip6_moptions im6o;
+
+ im6o.im6o_multicast_ifp = ifp;
+ /* XXX: ip6_output will override ip6->ip6_hlim */
+ im6o.im6o_multicast_hlim = ip6->ip6_hlim;
+ im6o.im6o_multicast_loop = 1;
+ error = ip6_output(mb_copy, NULL, &mifp->m6_route,
+ IPV6_FORWARDING, &im6o, NULL, NULL);
+
+#ifdef MRT6DEBUG
+ if (V_mrt6debug & DEBUG_XMIT)
+ log(LOG_DEBUG, "phyint_send on mif %d err %d\n",
+ mifp - mif6table, error);
+#endif
+ return;
+ }
+
+ /*
+ * If configured to loop back multicasts by default,
+ * loop back a copy now.
+ */
+ if (in6_mcast_loop) {
+ dst6->sin6_len = sizeof(struct sockaddr_in6);
+ dst6->sin6_family = AF_INET6;
+ dst6->sin6_addr = ip6->ip6_dst;
+ ip6_mloopback(ifp, m, &mifp->m6_route.ro_dst);
+ }
+
+ /*
+ * Put the packet into the sending queue of the outgoing interface
+ * if it would fit in the MTU of the interface.
+ */
+ linkmtu = IN6_LINKMTU(ifp);
+ if (mb_copy->m_pkthdr.len <= linkmtu || linkmtu < IPV6_MMTU) {
+ dst6->sin6_len = sizeof(struct sockaddr_in6);
+ dst6->sin6_family = AF_INET6;
+ dst6->sin6_addr = ip6->ip6_dst;
+ /*
+ * We just call if_output instead of nd6_output here, since
+ * we need no ND for a multicast forwarded packet...right?
+ */
+ error = (*ifp->if_output)(ifp, mb_copy,
+ (struct sockaddr *)&mifp->m6_route.ro_dst, NULL);
+#ifdef MRT6DEBUG
+ if (V_mrt6debug & DEBUG_XMIT)
+ log(LOG_DEBUG, "phyint_send on mif %d err %d\n",
+ mifp - mif6table, error);
+#endif
+ } else {
+ /*
+ * pMTU discovery is intentionally disabled by default, since
+ * various router may notify pMTU in multicast, which can be
+ * a DDoS to a router
+ */
+ if (V_ip6_mcast_pmtu)
+ icmp6_error(mb_copy, ICMP6_PACKET_TOO_BIG, 0, linkmtu);
+ else {
+#ifdef MRT6DEBUG
+ if (V_mrt6debug & DEBUG_XMIT) {
+ char ip6bufs[INET6_ADDRSTRLEN];
+ char ip6bufd[INET6_ADDRSTRLEN];
+ log(LOG_DEBUG,
+ "phyint_send: packet too big on %s o %s "
+ "g %s size %d(discarded)\n",
+ if_name(ifp),
+ ip6_sprintf(ip6bufs, &ip6->ip6_src),
+ ip6_sprintf(ip6bufd, &ip6->ip6_dst),
+ mb_copy->m_pkthdr.len);
+ }
+#endif /* MRT6DEBUG */
+ m_freem(mb_copy); /* simply discard the packet */
+ }
+ }
+}
+
+static int
+register_send(struct ip6_hdr *ip6, struct mif6 *mif, struct mbuf *m)
+{
+ struct mbuf *mm;
+ int i, len = m->m_pkthdr.len;
+ static struct sockaddr_in6 sin6 = { sizeof(sin6), AF_INET6 };
+ struct mrt6msg *im6;
+
+#ifdef MRT6DEBUG
+ if (V_mrt6debug) {
+ char ip6bufs[INET6_ADDRSTRLEN], ip6bufd[INET6_ADDRSTRLEN];
+ log(LOG_DEBUG, "** IPv6 register_send **\n src %s dst %s\n",
+ ip6_sprintf(ip6bufs, &ip6->ip6_src),
+ ip6_sprintf(ip6bufd, &ip6->ip6_dst));
+ }
+#endif
+ ++pim6stat.pim6s_snd_registers;
+
+ /* Make a copy of the packet to send to the user level process */
+ MGETHDR(mm, M_DONTWAIT, MT_HEADER);
+ if (mm == NULL)
+ return (ENOBUFS);
+ mm->m_pkthdr.rcvif = NULL;
+ mm->m_data += max_linkhdr;
+ mm->m_len = sizeof(struct ip6_hdr);
+
+ if ((mm->m_next = m_copy(m, 0, M_COPYALL)) == NULL) {
+ m_freem(mm);
+ return (ENOBUFS);
+ }
+ i = MHLEN - M_LEADINGSPACE(mm);
+ if (i > len)
+ i = len;
+ mm = m_pullup(mm, i);
+ if (mm == NULL)
+ return (ENOBUFS);
+/* TODO: check it! */
+ mm->m_pkthdr.len = len + sizeof(struct ip6_hdr);
+
+ /*
+ * Send message to routing daemon
+ */
+ sin6.sin6_addr = ip6->ip6_src;
+
+ im6 = mtod(mm, struct mrt6msg *);
+ im6->im6_msgtype = MRT6MSG_WHOLEPKT;
+ im6->im6_mbz = 0;
+
+ im6->im6_mif = mif - mif6table;
+
+ /* iif info is not given for reg. encap.n */
+ mrt6stat.mrt6s_upcalls++;
+
+ if (socket_send(V_ip6_mrouter, mm, &sin6) < 0) {
+#ifdef MRT6DEBUG
+ if (V_mrt6debug)
+ log(LOG_WARNING,
+ "register_send: ip6_mrouter socket queue full\n");
+#endif
+ ++mrt6stat.mrt6s_upq_sockfull;
+ return (ENOBUFS);
+ }
+ return (0);
+}
+
+/*
+ * pim6_encapcheck() is called by the encap6_input() path at runtime to
+ * determine if a packet is for PIM; allowing PIM to be dynamically loaded
+ * into the kernel.
+ */
+static int
+pim6_encapcheck(const struct mbuf *m, int off, int proto, void *arg)
+{
+
+#ifdef DIAGNOSTIC
+ KASSERT(proto == IPPROTO_PIM, ("not for IPPROTO_PIM"));
+#endif
+ if (proto != IPPROTO_PIM)
+ return 0; /* not for us; reject the datagram. */
+
+ return 64; /* claim the datagram. */
+}
+
+/*
+ * PIM sparse mode hook
+ * Receives the pim control messages, and passes them up to the listening
+ * socket, using rip6_input.
+ * The only message processed is the REGISTER pim message; the pim header
+ * is stripped off, and the inner packet is passed to register_mforward.
+ */
+int
+pim6_input(struct mbuf **mp, int *offp, int proto)
+{
+ struct pim *pim; /* pointer to a pim struct */
+ struct ip6_hdr *ip6;
+ int pimlen;
+ struct mbuf *m = *mp;
+ int minlen;
+ int off = *offp;
+
+ ++pim6stat.pim6s_rcv_total;
+
+ ip6 = mtod(m, struct ip6_hdr *);
+ pimlen = m->m_pkthdr.len - *offp;
+
+ /*
+ * Validate lengths
+ */
+ if (pimlen < PIM_MINLEN) {
+ ++pim6stat.pim6s_rcv_tooshort;
+#ifdef MRT6DEBUG
+ if (V_mrt6debug & DEBUG_PIM)
+ log(LOG_DEBUG,"pim6_input: PIM packet too short\n");
+#endif
+ m_freem(m);
+ return (IPPROTO_DONE);
+ }
+
+ /*
+ * if the packet is at least as big as a REGISTER, go ahead
+ * and grab the PIM REGISTER header size, to avoid another
+ * possible m_pullup() later.
+ *
+ * PIM_MINLEN == pimhdr + u_int32 == 8
+ * PIM6_REG_MINLEN == pimhdr + reghdr + eip6hdr == 4 + 4 + 40
+ */
+ minlen = (pimlen >= PIM6_REG_MINLEN) ? PIM6_REG_MINLEN : PIM_MINLEN;
+
+ /*
+ * Make sure that the IP6 and PIM headers in contiguous memory, and
+ * possibly the PIM REGISTER header
+ */
+#ifndef PULLDOWN_TEST
+ IP6_EXTHDR_CHECK(m, off, minlen, IPPROTO_DONE);
+ /* adjust pointer */
+ ip6 = mtod(m, struct ip6_hdr *);
+
+ /* adjust mbuf to point to the PIM header */
+ pim = (struct pim *)((caddr_t)ip6 + off);
+#else
+ IP6_EXTHDR_GET(pim, struct pim *, m, off, minlen);
+ if (pim == NULL) {
+ pim6stat.pim6s_rcv_tooshort++;
+ return (IPPROTO_DONE);
+ }
+#endif
+
+#define PIM6_CHECKSUM
+#ifdef PIM6_CHECKSUM
+ {
+ int cksumlen;
+
+ /*
+ * Validate checksum.
+ * If PIM REGISTER, exclude the data packet
+ */
+ if (pim->pim_type == PIM_REGISTER)
+ cksumlen = PIM_MINLEN;
+ else
+ cksumlen = pimlen;
+
+ if (in6_cksum(m, IPPROTO_PIM, off, cksumlen)) {
+ ++pim6stat.pim6s_rcv_badsum;
+#ifdef MRT6DEBUG
+ if (V_mrt6debug & DEBUG_PIM)
+ log(LOG_DEBUG,
+ "pim6_input: invalid checksum\n");
+#endif
+ m_freem(m);
+ return (IPPROTO_DONE);
+ }
+ }
+#endif /* PIM_CHECKSUM */
+
+ /* PIM version check */
+ if (pim->pim_ver != PIM_VERSION) {
+ ++pim6stat.pim6s_rcv_badversion;
+#ifdef MRT6DEBUG
+ log(LOG_ERR,
+ "pim6_input: incorrect version %d, expecting %d\n",
+ pim->pim_ver, PIM_VERSION);
+#endif
+ m_freem(m);
+ return (IPPROTO_DONE);
+ }
+
+ if (pim->pim_type == PIM_REGISTER) {
+ /*
+ * since this is a REGISTER, we'll make a copy of the register
+ * headers ip6+pim+u_int32_t+encap_ip6, to be passed up to the
+ * routing daemon.
+ */
+ static struct sockaddr_in6 dst = { sizeof(dst), AF_INET6 };
+
+ struct mbuf *mcp;
+ struct ip6_hdr *eip6;
+ u_int32_t *reghdr;
+ int rc;
+#ifdef MRT6DEBUG
+ char ip6bufs[INET6_ADDRSTRLEN], ip6bufd[INET6_ADDRSTRLEN];
+#endif
+
+ ++pim6stat.pim6s_rcv_registers;
+
+ if ((reg_mif_num >= nummifs) || (reg_mif_num == (mifi_t) -1)) {
+#ifdef MRT6DEBUG
+ if (V_mrt6debug & DEBUG_PIM)
+ log(LOG_DEBUG,
+ "pim6_input: register mif not set: %d\n",
+ reg_mif_num);
+#endif
+ m_freem(m);
+ return (IPPROTO_DONE);
+ }
+
+ reghdr = (u_int32_t *)(pim + 1);
+
+ if ((ntohl(*reghdr) & PIM_NULL_REGISTER))
+ goto pim6_input_to_daemon;
+
+ /*
+ * Validate length
+ */
+ if (pimlen < PIM6_REG_MINLEN) {
+ ++pim6stat.pim6s_rcv_tooshort;
+ ++pim6stat.pim6s_rcv_badregisters;
+#ifdef MRT6DEBUG
+ log(LOG_ERR,
+ "pim6_input: register packet size too "
+ "small %d from %s\n",
+ pimlen, ip6_sprintf(ip6bufs, &ip6->ip6_src));
+#endif
+ m_freem(m);
+ return (IPPROTO_DONE);
+ }
+
+ eip6 = (struct ip6_hdr *) (reghdr + 1);
+#ifdef MRT6DEBUG
+ if (V_mrt6debug & DEBUG_PIM)
+ log(LOG_DEBUG,
+ "pim6_input[register], eip6: %s -> %s, "
+ "eip6 plen %d\n",
+ ip6_sprintf(ip6bufs, &eip6->ip6_src),
+ ip6_sprintf(ip6bufd, &eip6->ip6_dst),
+ ntohs(eip6->ip6_plen));
+#endif
+
+ /* verify the version number of the inner packet */
+ if ((eip6->ip6_vfc & IPV6_VERSION_MASK) != IPV6_VERSION) {
+ ++pim6stat.pim6s_rcv_badregisters;
+#ifdef MRT6DEBUG
+ log(LOG_DEBUG, "pim6_input: invalid IP version (%d) "
+ "of the inner packet\n",
+ (eip6->ip6_vfc & IPV6_VERSION));
+#endif
+ m_freem(m);
+ return (IPPROTO_NONE);
+ }
+
+ /* verify the inner packet is destined to a mcast group */
+ if (!IN6_IS_ADDR_MULTICAST(&eip6->ip6_dst)) {
+ ++pim6stat.pim6s_rcv_badregisters;
+#ifdef MRT6DEBUG
+ if (V_mrt6debug & DEBUG_PIM)
+ log(LOG_DEBUG,
+ "pim6_input: inner packet of register "
+ "is not multicast %s\n",
+ ip6_sprintf(ip6bufd, &eip6->ip6_dst));
+#endif
+ m_freem(m);
+ return (IPPROTO_DONE);
+ }
+
+ /*
+ * make a copy of the whole header to pass to the daemon later.
+ */
+ mcp = m_copy(m, 0, off + PIM6_REG_MINLEN);
+ if (mcp == NULL) {
+#ifdef MRT6DEBUG
+ log(LOG_ERR,
+ "pim6_input: pim register: "
+ "could not copy register head\n");
+#endif
+ m_freem(m);
+ return (IPPROTO_DONE);
+ }
+
+ /*
+ * forward the inner ip6 packet; point m_data at the inner ip6.
+ */
+ m_adj(m, off + PIM_MINLEN);
+#ifdef MRT6DEBUG
+ if (V_mrt6debug & DEBUG_PIM) {
+ log(LOG_DEBUG,
+ "pim6_input: forwarding decapsulated register: "
+ "src %s, dst %s, mif %d\n",
+ ip6_sprintf(ip6bufs, &eip6->ip6_src),
+ ip6_sprintf(ip6bufd, &eip6->ip6_dst),
+ reg_mif_num);
+ }
+#endif
+
+ rc = if_simloop(mif6table[reg_mif_num].m6_ifp, m,
+ dst.sin6_family, 0);
+
+ /* prepare the register head to send to the mrouting daemon */
+ m = mcp;
+ }
+
+ /*
+ * Pass the PIM message up to the daemon; if it is a register message
+ * pass the 'head' only up to the daemon. This includes the
+ * encapsulator ip6 header, pim header, register header and the
+ * encapsulated ip6 header.
+ */
+ pim6_input_to_daemon:
+ rip6_input(&m, offp, proto);
+ return (IPPROTO_DONE);
+}
+
+static int
+ip6_mroute_modevent(module_t mod, int type, void *unused)
+{
+
+ switch (type) {
+ case MOD_LOAD:
+ MROUTER6_LOCK_INIT();
+ MFC6_LOCK_INIT();
+ MIF6_LOCK_INIT();
+
+ pim6_encap_cookie = encap_attach_func(AF_INET6, IPPROTO_PIM,
+ pim6_encapcheck,
+ (const struct protosw *)&in6_pim_protosw, NULL);
+ if (pim6_encap_cookie == NULL) {
+ printf("ip6_mroute: unable to attach pim6 encap\n");
+ MIF6_LOCK_DESTROY();
+ MFC6_LOCK_DESTROY();
+ MROUTER6_LOCK_DESTROY();
+ return (EINVAL);
+ }
+
+ ip6_mforward = X_ip6_mforward;
+ ip6_mrouter_done = X_ip6_mrouter_done;
+ ip6_mrouter_get = X_ip6_mrouter_get;
+ ip6_mrouter_set = X_ip6_mrouter_set;
+ mrt6_ioctl = X_mrt6_ioctl;
+ break;
+
+ case MOD_UNLOAD:
+ if (V_ip6_mrouter != NULL)
+ return EINVAL;
+
+ if (pim6_encap_cookie) {
+ encap_detach(pim6_encap_cookie);
+ pim6_encap_cookie = NULL;
+ }
+ X_ip6_mrouter_done();
+ ip6_mforward = NULL;
+ ip6_mrouter_done = NULL;
+ ip6_mrouter_get = NULL;
+ ip6_mrouter_set = NULL;
+ mrt6_ioctl = NULL;
+
+ MIF6_LOCK_DESTROY();
+ MFC6_LOCK_DESTROY();
+ MROUTER6_LOCK_DESTROY();
+ break;
+
+ default:
+ return (EOPNOTSUPP);
+ }
+
+ return (0);
+}
+
+static moduledata_t ip6_mroutemod = {
+ "ip6_mroute",
+ ip6_mroute_modevent,
+ 0
+};
+
+DECLARE_MODULE(ip6_mroute, ip6_mroutemod, SI_SUB_PSEUDO, SI_ORDER_ANY);
diff --git a/rtems/freebsd/netinet6/ip6_mroute.h b/rtems/freebsd/netinet6/ip6_mroute.h
new file mode 100644
index 00000000..198659fa
--- /dev/null
+++ b/rtems/freebsd/netinet6/ip6_mroute.h
@@ -0,0 +1,271 @@
+/*-
+ * Copyright (C) 1998 WIDE Project.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the project nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $KAME: ip6_mroute.h,v 1.19 2001/06/14 06:12:55 suz Exp $
+ * $FreeBSD$
+ */
+
+/* BSDI ip_mroute.h,v 2.5 1996/10/11 16:01:48 pjd Exp */
+
+/*
+ * Definitions for IP multicast forwarding.
+ *
+ * Written by David Waitzman, BBN Labs, August 1988.
+ * Modified by Steve Deering, Stanford, February 1989.
+ * Modified by Ajit Thyagarajan, PARC, August 1993.
+ * Modified by Ajit Thyagarajan, PARC, August 1994.
+ * Modified by Ahmed Helmy, USC, September 1996.
+ *
+ * MROUTING Revision: 1.2
+ */
+
+#ifndef _NETINET6_IP6_MROUTE_HH_
+#define _NETINET6_IP6_MROUTE_HH_
+
+/*
+ * Multicast Routing set/getsockopt commands.
+ */
+#ifdef _KERNEL
+#define MRT6_OINIT 100 /* initialize forwarder (omrt6msg) */
+#endif
+#define MRT6_DONE 101 /* shut down forwarder */
+#define MRT6_ADD_MIF 102 /* add multicast interface */
+#define MRT6_DEL_MIF 103 /* delete multicast interface */
+#define MRT6_ADD_MFC 104 /* insert forwarding cache entry */
+#define MRT6_DEL_MFC 105 /* delete forwarding cache entry */
+#define MRT6_PIM 107 /* enable pim code */
+#define MRT6_INIT 108 /* initialize forwarder (mrt6msg) */
+
+#if BSD >= 199103
+#define GET_TIME(t) microtime(&t)
+#elif defined(sun)
+#define GET_TIME(t) uniqtime(&t)
+#else
+#define GET_TIME(t) ((t) = time)
+#endif
+
+/*
+ * Types and macros for handling bitmaps with one bit per multicast interface.
+ */
+typedef u_short mifi_t; /* type of a mif index */
+#define MAXMIFS 64
+
+#ifndef IF_SETSIZE
+#define IF_SETSIZE 256
+#endif
+
+typedef u_int32_t if_mask;
+#define NIFBITS (sizeof(if_mask) * NBBY) /* bits per mask */
+
+#ifndef howmany
+#define howmany(x, y) (((x) + ((y) - 1)) / (y))
+#endif
+
+typedef struct if_set {
+ if_mask ifs_bits[howmany(IF_SETSIZE, NIFBITS)];
+} if_set;
+
+#define IF_SET(n, p) ((p)->ifs_bits[(n)/NIFBITS] |= (1 << ((n) % NIFBITS)))
+#define IF_CLR(n, p) ((p)->ifs_bits[(n)/NIFBITS] &= ~(1 << ((n) % NIFBITS)))
+#define IF_ISSET(n, p) ((p)->ifs_bits[(n)/NIFBITS] & (1 << ((n) % NIFBITS)))
+#define IF_COPY(f, t) bcopy(f, t, sizeof(*(f)))
+#define IF_ZERO(p) bzero(p, sizeof(*(p)))
+
+/*
+ * Argument structure for MRT6_ADD_IF.
+ */
+struct mif6ctl {
+ mifi_t mif6c_mifi; /* the index of the mif to be added */
+ u_char mif6c_flags; /* MIFF_ flags defined below */
+ u_short mif6c_pifi; /* the index of the physical IF */
+};
+
+#define MIFF_REGISTER 0x1 /* mif represents a register end-point */
+
+/*
+ * Argument structure for MRT6_ADD_MFC and MRT6_DEL_MFC
+ */
+struct mf6cctl {
+ struct sockaddr_in6 mf6cc_origin; /* IPv6 origin of mcasts */
+ struct sockaddr_in6 mf6cc_mcastgrp; /* multicast group associated */
+ mifi_t mf6cc_parent; /* incoming ifindex */
+ struct if_set mf6cc_ifset; /* set of forwarding ifs */
+};
+
+/*
+ * The kernel's multicast routing statistics.
+ */
+struct mrt6stat {
+ u_quad_t mrt6s_mfc_lookups; /* # forw. cache hash table hits */
+ u_quad_t mrt6s_mfc_misses; /* # forw. cache hash table misses */
+ u_quad_t mrt6s_upcalls; /* # calls to multicast routing daemon */
+ u_quad_t mrt6s_no_route; /* no route for packet's origin */
+ u_quad_t mrt6s_bad_tunnel; /* malformed tunnel options */
+ u_quad_t mrt6s_cant_tunnel; /* no room for tunnel options */
+ u_quad_t mrt6s_wrong_if; /* arrived on wrong interface */
+ u_quad_t mrt6s_upq_ovflw; /* upcall Q overflow */
+ u_quad_t mrt6s_cache_cleanups; /* # entries with no upcalls */
+ u_quad_t mrt6s_drop_sel; /* pkts dropped selectively */
+ u_quad_t mrt6s_q_overflow; /* pkts dropped - Q overflow */
+ u_quad_t mrt6s_pkt2large; /* pkts dropped - size > BKT SIZE */
+ u_quad_t mrt6s_upq_sockfull; /* upcalls dropped - socket full */
+};
+
+#ifdef MRT6_OINIT
+/*
+ * Struct used to communicate from kernel to multicast router
+ * note the convenient similarity to an IPv6 header.
+ * XXX old version, superseded by mrt6msg.
+ */
+struct omrt6msg {
+ u_long unused1;
+ u_char im6_msgtype; /* what type of message */
+#if 0
+#define MRT6MSG_NOCACHE 1
+#define MRT6MSG_WRONGMIF 2
+#define MRT6MSG_WHOLEPKT 3 /* used for user level encap*/
+#endif
+ u_char im6_mbz; /* must be zero */
+ u_char im6_mif; /* mif rec'd on */
+ u_char unused2;
+ struct in6_addr im6_src, im6_dst;
+};
+#endif
+
+/*
+ * Structure used to communicate from kernel to multicast router.
+ * We'll overlay the structure onto an MLD header (not an IPv6 header
+ * like igmpmsg{} used for IPv4 implementation). This is because this
+ * structure will be passed via an IPv6 raw socket, on which an application
+ * will only receive the payload i.e. the data after the IPv6 header and all
+ * the extension headers. (see Section 3 of RFC3542)
+ */
+struct mrt6msg {
+#define MRT6MSG_NOCACHE 1
+#define MRT6MSG_WRONGMIF 2
+#define MRT6MSG_WHOLEPKT 3 /* used for user level encap*/
+ u_char im6_mbz; /* must be zero */
+ u_char im6_msgtype; /* what type of message */
+ u_int16_t im6_mif; /* mif rec'd on */
+ u_int32_t im6_pad; /* padding for 64bit arch */
+ struct in6_addr im6_src, im6_dst;
+};
+
+/*
+ * Argument structure used by multicast routing daemon to get src-grp
+ * packet counts
+ */
+struct sioc_sg_req6 {
+ struct sockaddr_in6 src;
+ struct sockaddr_in6 grp;
+ u_quad_t pktcnt;
+ u_quad_t bytecnt;
+ u_quad_t wrong_if;
+};
+
+/*
+ * Argument structure used by mrouted to get mif pkt counts
+ */
+struct sioc_mif_req6 {
+ mifi_t mifi; /* mif number */
+ u_quad_t icount; /* Input packet count on mif */
+ u_quad_t ocount; /* Output packet count on mif */
+ u_quad_t ibytes; /* Input byte count on mif */
+ u_quad_t obytes; /* Output byte count on mif */
+};
+
+#if defined(_KERNEL) || defined(KERNEL)
+/*
+ * The kernel's multicast-interface structure.
+ */
+struct mif6 {
+ u_char m6_flags; /* MIFF_ flags defined above */
+ u_int m6_rate_limit; /* max rate */
+ struct in6_addr m6_lcl_addr; /* local interface address */
+ struct ifnet *m6_ifp; /* pointer to interface */
+ u_quad_t m6_pkt_in; /* # pkts in on interface */
+ u_quad_t m6_pkt_out; /* # pkts out on interface */
+ u_quad_t m6_bytes_in; /* # bytes in on interface */
+ u_quad_t m6_bytes_out; /* # bytes out on interface */
+ struct route_in6 m6_route; /* cached route */
+#ifdef notyet
+ u_int m6_rsvp_on; /* RSVP listening on this vif */
+ struct socket *m6_rsvpd; /* RSVP daemon socket */
+#endif
+};
+
+/*
+ * The kernel's multicast forwarding cache entry structure
+ */
+struct mf6c {
+ struct sockaddr_in6 mf6c_origin; /* IPv6 origin of mcasts */
+ struct sockaddr_in6 mf6c_mcastgrp; /* multicast group associated*/
+ mifi_t mf6c_parent; /* incoming IF */
+ struct if_set mf6c_ifset; /* set of outgoing IFs */
+
+ u_quad_t mf6c_pkt_cnt; /* pkt count for src-grp */
+ u_quad_t mf6c_byte_cnt; /* byte count for src-grp */
+ u_quad_t mf6c_wrong_if; /* wrong if for src-grp */
+ int mf6c_expire; /* time to clean entry up */
+ struct timeval mf6c_last_assert; /* last time I sent an assert*/
+ struct rtdetq *mf6c_stall; /* pkts waiting for route */
+ struct mf6c *mf6c_next; /* hash table linkage */
+};
+
+#define MF6C_INCOMPLETE_PARENT ((mifi_t)-1)
+
+/*
+ * Argument structure used for pkt info. while upcall is made
+ */
+#ifndef _NETINET_IP_MROUTE_HH_
+struct rtdetq { /* XXX: rtdetq is also defined in ip_mroute.h */
+ struct mbuf *m; /* A copy of the packet */
+ struct ifnet *ifp; /* Interface pkt came in on */
+#ifdef UPCALL_TIMING
+ struct timeval t; /* Timestamp */
+#endif /* UPCALL_TIMING */
+ struct rtdetq *next;
+};
+#endif /* _NETINET_IP_MROUTE_HH_ */
+
+#define MF6CTBLSIZ 256
+#if (MF6CTBLSIZ & (MF6CTBLSIZ - 1)) == 0 /* from sys:route.h */
+#define MF6CHASHMOD(h) ((h) & (MF6CTBLSIZ - 1))
+#else
+#define MF6CHASHMOD(h) ((h) % MF6CTBLSIZ)
+#endif
+
+#define MAX_UPQ6 4 /* max. no of pkts in upcall Q */
+
+extern int (*ip6_mrouter_set)(struct socket *so, struct sockopt *sopt);
+extern int (*ip6_mrouter_get)(struct socket *so, struct sockopt *sopt);
+extern int (*ip6_mrouter_done)(void);
+extern int (*mrt6_ioctl)(u_long, caddr_t);
+#endif /* _KERNEL */
+
+#endif /* !_NETINET6_IP6_MROUTE_HH_ */
diff --git a/rtems/freebsd/netinet6/ip6_output.c b/rtems/freebsd/netinet6/ip6_output.c
new file mode 100644
index 00000000..043bcaca
--- /dev/null
+++ b/rtems/freebsd/netinet6/ip6_output.c
@@ -0,0 +1,2928 @@
+#include <rtems/freebsd/machine/rtems-bsd-config.h>
+
+/*-
+ * Copyright (C) 1995, 1996, 1997, and 1998 WIDE Project.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the project nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $KAME: ip6_output.c,v 1.279 2002/01/26 06:12:30 jinmei Exp $
+ */
+
+/*-
+ * Copyright (c) 1982, 1986, 1988, 1990, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * @(#)ip_output.c 8.3 (Berkeley) 1/21/94
+ */
+
+#include <rtems/freebsd/sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <rtems/freebsd/local/opt_inet.h>
+#include <rtems/freebsd/local/opt_inet6.h>
+#include <rtems/freebsd/local/opt_ipsec.h>
+#include <rtems/freebsd/local/opt_sctp.h>
+
+#include <rtems/freebsd/sys/param.h>
+#include <rtems/freebsd/sys/kernel.h>
+#include <rtems/freebsd/sys/malloc.h>
+#include <rtems/freebsd/sys/mbuf.h>
+#include <rtems/freebsd/sys/errno.h>
+#include <rtems/freebsd/sys/priv.h>
+#include <rtems/freebsd/sys/proc.h>
+#include <rtems/freebsd/sys/protosw.h>
+#include <rtems/freebsd/sys/socket.h>
+#include <rtems/freebsd/sys/socketvar.h>
+#include <rtems/freebsd/sys/syslog.h>
+#include <rtems/freebsd/sys/ucred.h>
+
+#include <rtems/freebsd/net/if.h>
+#include <rtems/freebsd/net/netisr.h>
+#include <rtems/freebsd/net/route.h>
+#include <rtems/freebsd/net/pfil.h>
+#include <rtems/freebsd/net/vnet.h>
+
+#include <rtems/freebsd/netinet/in.h>
+#include <rtems/freebsd/netinet/in_var.h>
+#include <rtems/freebsd/netinet6/in6_var.h>
+#include <rtems/freebsd/netinet/ip6.h>
+#include <rtems/freebsd/netinet/icmp6.h>
+#include <rtems/freebsd/netinet6/ip6_var.h>
+#include <rtems/freebsd/netinet/in_pcb.h>
+#include <rtems/freebsd/netinet/tcp_var.h>
+#include <rtems/freebsd/netinet6/nd6.h>
+
+#ifdef IPSEC
+#include <rtems/freebsd/netipsec/ipsec.h>
+#include <rtems/freebsd/netipsec/ipsec6.h>
+#include <rtems/freebsd/netipsec/key.h>
+#include <rtems/freebsd/netinet6/ip6_ipsec.h>
+#endif /* IPSEC */
+#ifdef SCTP
+#include <rtems/freebsd/netinet/sctp.h>
+#include <rtems/freebsd/netinet/sctp_crc32.h>
+#endif
+
+#include <rtems/freebsd/netinet6/ip6protosw.h>
+#include <rtems/freebsd/netinet6/scope6_var.h>
+
+extern int in6_mcast_loop;
+
+struct ip6_exthdrs {
+ struct mbuf *ip6e_ip6;
+ struct mbuf *ip6e_hbh;
+ struct mbuf *ip6e_dest1;
+ struct mbuf *ip6e_rthdr;
+ struct mbuf *ip6e_dest2;
+};
+
+static int ip6_pcbopt __P((int, u_char *, int, struct ip6_pktopts **,
+ struct ucred *, int));
+static int ip6_pcbopts __P((struct ip6_pktopts **, struct mbuf *,
+ struct socket *, struct sockopt *));
+static int ip6_getpcbopt(struct ip6_pktopts *, int, struct sockopt *);
+static int ip6_setpktopt __P((int, u_char *, int, struct ip6_pktopts *,
+ struct ucred *, int, int, int));
+
+static int ip6_copyexthdr(struct mbuf **, caddr_t, int);
+static int ip6_insertfraghdr __P((struct mbuf *, struct mbuf *, int,
+ struct ip6_frag **));
+static int ip6_insert_jumboopt(struct ip6_exthdrs *, u_int32_t);
+static int ip6_splithdr(struct mbuf *, struct ip6_exthdrs *);
+static int ip6_getpmtu __P((struct route_in6 *, struct route_in6 *,
+ struct ifnet *, struct in6_addr *, u_long *, int *));
+static int copypktopts(struct ip6_pktopts *, struct ip6_pktopts *, int);
+
+
+/*
+ * Make an extension header from option data. hp is the source, and
+ * mp is the destination.
+ */
+#define MAKE_EXTHDR(hp, mp) \
+ do { \
+ if (hp) { \
+ struct ip6_ext *eh = (struct ip6_ext *)(hp); \
+ error = ip6_copyexthdr((mp), (caddr_t)(hp), \
+ ((eh)->ip6e_len + 1) << 3); \
+ if (error) \
+ goto freehdrs; \
+ } \
+ } while (/*CONSTCOND*/ 0)
+
+/*
+ * Form a chain of extension headers.
+ * m is the extension header mbuf
+ * mp is the previous mbuf in the chain
+ * p is the next header
+ * i is the type of option.
+ */
+#define MAKE_CHAIN(m, mp, p, i)\
+ do {\
+ if (m) {\
+ if (!hdrsplit) \
+ panic("assumption failed: hdr not split"); \
+ *mtod((m), u_char *) = *(p);\
+ *(p) = (i);\
+ p = mtod((m), u_char *);\
+ (m)->m_next = (mp)->m_next;\
+ (mp)->m_next = (m);\
+ (mp) = (m);\
+ }\
+ } while (/*CONSTCOND*/ 0)
+
+/*
+ * IP6 output. The packet in mbuf chain m contains a skeletal IP6
+ * header (with pri, len, nxt, hlim, src, dst).
+ * This function may modify ver and hlim only.
+ * The mbuf chain containing the packet will be freed.
+ * The mbuf opt, if present, will not be freed.
+ *
+ * type of "mtu": rt_rmx.rmx_mtu is u_long, ifnet.ifr_mtu is int, and
+ * nd_ifinfo.linkmtu is u_int32_t. so we use u_long to hold largest one,
+ * which is rt_rmx.rmx_mtu.
+ *
+ * ifpp - XXX: just for statistics
+ */
+int
+ip6_output(struct mbuf *m0, struct ip6_pktopts *opt,
+ struct route_in6 *ro, int flags, struct ip6_moptions *im6o,
+ struct ifnet **ifpp, struct inpcb *inp)
+{
+ struct ip6_hdr *ip6, *mhip6;
+ struct ifnet *ifp, *origifp;
+ struct mbuf *m = m0;
+ struct mbuf *mprev = NULL;
+ int hlen, tlen, len, off;
+ struct route_in6 ip6route;
+ struct rtentry *rt = NULL;
+ struct sockaddr_in6 *dst, src_sa, dst_sa;
+ struct in6_addr odst;
+ int error = 0;
+ struct in6_ifaddr *ia = NULL;
+ u_long mtu;
+ int alwaysfrag, dontfrag;
+ u_int32_t optlen = 0, plen = 0, unfragpartlen = 0;
+ struct ip6_exthdrs exthdrs;
+ struct in6_addr finaldst, src0, dst0;
+ u_int32_t zone;
+ struct route_in6 *ro_pmtu = NULL;
+ int hdrsplit = 0;
+ int needipsec = 0;
+#ifdef SCTP
+ int sw_csum;
+#endif
+#ifdef IPSEC
+ struct ipsec_output_state state;
+ struct ip6_rthdr *rh = NULL;
+ int needipsectun = 0;
+ int segleft_org = 0;
+ struct secpolicy *sp = NULL;
+#endif /* IPSEC */
+
+ ip6 = mtod(m, struct ip6_hdr *);
+ if (ip6 == NULL) {
+ printf ("ip6 is NULL");
+ goto bad;
+ }
+
+ finaldst = ip6->ip6_dst;
+
+ bzero(&exthdrs, sizeof(exthdrs));
+
+ if (opt) {
+ /* Hop-by-Hop options header */
+ MAKE_EXTHDR(opt->ip6po_hbh, &exthdrs.ip6e_hbh);
+ /* Destination options header(1st part) */
+ if (opt->ip6po_rthdr) {
+ /*
+ * Destination options header(1st part)
+ * This only makes sense with a routing header.
+ * See Section 9.2 of RFC 3542.
+ * Disabling this part just for MIP6 convenience is
+ * a bad idea. We need to think carefully about a
+ * way to make the advanced API coexist with MIP6
+ * options, which might automatically be inserted in
+ * the kernel.
+ */
+ MAKE_EXTHDR(opt->ip6po_dest1, &exthdrs.ip6e_dest1);
+ }
+ /* Routing header */
+ MAKE_EXTHDR(opt->ip6po_rthdr, &exthdrs.ip6e_rthdr);
+ /* Destination options header(2nd part) */
+ MAKE_EXTHDR(opt->ip6po_dest2, &exthdrs.ip6e_dest2);
+ }
+
+ /*
+ * IPSec checking which handles several cases.
+ * FAST IPSEC: We re-injected the packet.
+ */
+#ifdef IPSEC
+ switch(ip6_ipsec_output(&m, inp, &flags, &error, &ifp, &sp))
+ {
+ case 1: /* Bad packet */
+ goto freehdrs;
+ case -1: /* Do IPSec */
+ needipsec = 1;
+ case 0: /* No IPSec */
+ default:
+ break;
+ }
+#endif /* IPSEC */
+
+ /*
+ * Calculate the total length of the extension header chain.
+ * Keep the length of the unfragmentable part for fragmentation.
+ */
+ optlen = 0;
+ if (exthdrs.ip6e_hbh)
+ optlen += exthdrs.ip6e_hbh->m_len;
+ if (exthdrs.ip6e_dest1)
+ optlen += exthdrs.ip6e_dest1->m_len;
+ if (exthdrs.ip6e_rthdr)
+ optlen += exthdrs.ip6e_rthdr->m_len;
+ unfragpartlen = optlen + sizeof(struct ip6_hdr);
+
+ /* NOTE: we don't add AH/ESP length here. do that later. */
+ if (exthdrs.ip6e_dest2)
+ optlen += exthdrs.ip6e_dest2->m_len;
+
+ /*
+ * If we need IPsec, or there is at least one extension header,
+ * separate IP6 header from the payload.
+ */
+ if ((needipsec || optlen) && !hdrsplit) {
+ if ((error = ip6_splithdr(m, &exthdrs)) != 0) {
+ m = NULL;
+ goto freehdrs;
+ }
+ m = exthdrs.ip6e_ip6;
+ hdrsplit++;
+ }
+
+ /* adjust pointer */
+ ip6 = mtod(m, struct ip6_hdr *);
+
+ /* adjust mbuf packet header length */
+ m->m_pkthdr.len += optlen;
+ plen = m->m_pkthdr.len - sizeof(*ip6);
+
+ /* If this is a jumbo payload, insert a jumbo payload option. */
+ if (plen > IPV6_MAXPACKET) {
+ if (!hdrsplit) {
+ if ((error = ip6_splithdr(m, &exthdrs)) != 0) {
+ m = NULL;
+ goto freehdrs;
+ }
+ m = exthdrs.ip6e_ip6;
+ hdrsplit++;
+ }
+ /* adjust pointer */
+ ip6 = mtod(m, struct ip6_hdr *);
+ if ((error = ip6_insert_jumboopt(&exthdrs, plen)) != 0)
+ goto freehdrs;
+ ip6->ip6_plen = 0;
+ } else
+ ip6->ip6_plen = htons(plen);
+
+ /*
+ * Concatenate headers and fill in next header fields.
+ * Here we have, on "m"
+ * IPv6 payload
+ * and we insert headers accordingly. Finally, we should be getting:
+ * IPv6 hbh dest1 rthdr ah* [esp* dest2 payload]
+ *
+ * during the header composing process, "m" points to IPv6 header.
+ * "mprev" points to an extension header prior to esp.
+ */
+ u_char *nexthdrp = &ip6->ip6_nxt;
+ mprev = m;
+
+ /*
+ * we treat dest2 specially. this makes IPsec processing
+ * much easier. the goal here is to make mprev point the
+ * mbuf prior to dest2.
+ *
+ * result: IPv6 dest2 payload
+ * m and mprev will point to IPv6 header.
+ */
+ if (exthdrs.ip6e_dest2) {
+ if (!hdrsplit)
+ panic("assumption failed: hdr not split");
+ exthdrs.ip6e_dest2->m_next = m->m_next;
+ m->m_next = exthdrs.ip6e_dest2;
+ *mtod(exthdrs.ip6e_dest2, u_char *) = ip6->ip6_nxt;
+ ip6->ip6_nxt = IPPROTO_DSTOPTS;
+ }
+
+ /*
+ * result: IPv6 hbh dest1 rthdr dest2 payload
+ * m will point to IPv6 header. mprev will point to the
+ * extension header prior to dest2 (rthdr in the above case).
+ */
+ MAKE_CHAIN(exthdrs.ip6e_hbh, mprev, nexthdrp, IPPROTO_HOPOPTS);
+ MAKE_CHAIN(exthdrs.ip6e_dest1, mprev, nexthdrp,
+ IPPROTO_DSTOPTS);
+ MAKE_CHAIN(exthdrs.ip6e_rthdr, mprev, nexthdrp,
+ IPPROTO_ROUTING);
+
+#ifdef IPSEC
+ if (!needipsec)
+ goto skip_ipsec2;
+
+ /*
+ * pointers after IPsec headers are not valid any more.
+ * other pointers need a great care too.
+ * (IPsec routines should not mangle mbufs prior to AH/ESP)
+ */
+ exthdrs.ip6e_dest2 = NULL;
+
+ if (exthdrs.ip6e_rthdr) {
+ rh = mtod(exthdrs.ip6e_rthdr, struct ip6_rthdr *);
+ segleft_org = rh->ip6r_segleft;
+ rh->ip6r_segleft = 0;
+ }
+
+ bzero(&state, sizeof(state));
+ state.m = m;
+ error = ipsec6_output_trans(&state, nexthdrp, mprev, sp, flags,
+ &needipsectun);
+ m = state.m;
+ if (error == EJUSTRETURN) {
+ /*
+ * We had a SP with a level of 'use' and no SA. We
+ * will just continue to process the packet without
+ * IPsec processing.
+ */
+ ;
+ } else if (error) {
+ /* mbuf is already reclaimed in ipsec6_output_trans. */
+ m = NULL;
+ switch (error) {
+ case EHOSTUNREACH:
+ case ENETUNREACH:
+ case EMSGSIZE:
+ case ENOBUFS:
+ case ENOMEM:
+ break;
+ default:
+ printf("[%s:%d] (ipsec): error code %d\n",
+ __func__, __LINE__, error);
+ /* FALLTHROUGH */
+ case ENOENT:
+ /* don't show these error codes to the user */
+ error = 0;
+ break;
+ }
+ goto bad;
+ } else if (!needipsectun) {
+ /*
+ * In the FAST IPSec case we have already
+ * re-injected the packet and it has been freed
+ * by the ipsec_done() function. So, just clean
+ * up after ourselves.
+ */
+ m = NULL;
+ goto done;
+ }
+ if (exthdrs.ip6e_rthdr) {
+ /* ah6_output doesn't modify mbuf chain */
+ rh->ip6r_segleft = segleft_org;
+ }
+skip_ipsec2:;
+#endif /* IPSEC */
+
+ /*
+ * If there is a routing header, discard the packet.
+ */
+ if (exthdrs.ip6e_rthdr) {
+ error = EINVAL;
+ goto bad;
+ }
+
+ /* Source address validation */
+ if (IN6_IS_ADDR_UNSPECIFIED(&ip6->ip6_src) &&
+ (flags & IPV6_UNSPECSRC) == 0) {
+ error = EOPNOTSUPP;
+ V_ip6stat.ip6s_badscope++;
+ goto bad;
+ }
+ if (IN6_IS_ADDR_MULTICAST(&ip6->ip6_src)) {
+ error = EOPNOTSUPP;
+ V_ip6stat.ip6s_badscope++;
+ goto bad;
+ }
+
+ V_ip6stat.ip6s_localout++;
+
+ /*
+ * Route packet.
+ */
+ if (ro == 0) {
+ ro = &ip6route;
+ bzero((caddr_t)ro, sizeof(*ro));
+ }
+ ro_pmtu = ro;
+ if (opt && opt->ip6po_rthdr)
+ ro = &opt->ip6po_route;
+ dst = (struct sockaddr_in6 *)&ro->ro_dst;
+
+again:
+ /*
+ * if specified, try to fill in the traffic class field.
+ * do not override if a non-zero value is already set.
+ * we check the diffserv field and the ecn field separately.
+ */
+ if (opt && opt->ip6po_tclass >= 0) {
+ int mask = 0;
+
+ if ((ip6->ip6_flow & htonl(0xfc << 20)) == 0)
+ mask |= 0xfc;
+ if ((ip6->ip6_flow & htonl(0x03 << 20)) == 0)
+ mask |= 0x03;
+ if (mask != 0)
+ ip6->ip6_flow |= htonl((opt->ip6po_tclass & mask) << 20);
+ }
+
+ /* fill in or override the hop limit field, if necessary. */
+ if (opt && opt->ip6po_hlim != -1)
+ ip6->ip6_hlim = opt->ip6po_hlim & 0xff;
+ else if (IN6_IS_ADDR_MULTICAST(&ip6->ip6_dst)) {
+ if (im6o != NULL)
+ ip6->ip6_hlim = im6o->im6o_multicast_hlim;
+ else
+ ip6->ip6_hlim = V_ip6_defmcasthlim;
+ }
+
+#ifdef IPSEC
+ /*
+ * We may re-inject packets into the stack here.
+ */
+ if (needipsec && needipsectun) {
+ struct ipsec_output_state state;
+
+ /*
+ * All the extension headers will become inaccessible
+ * (since they can be encrypted).
+ * Don't panic, we need no more updates to extension headers
+ * on inner IPv6 packet (since they are now encapsulated).
+ *
+ * IPv6 [ESP|AH] IPv6 [extension headers] payload
+ */
+ bzero(&exthdrs, sizeof(exthdrs));
+ exthdrs.ip6e_ip6 = m;
+
+ bzero(&state, sizeof(state));
+ state.m = m;
+ state.ro = (struct route *)ro;
+ state.dst = (struct sockaddr *)dst;
+
+ error = ipsec6_output_tunnel(&state, sp, flags);
+
+ m = state.m;
+ ro = (struct route_in6 *)state.ro;
+ dst = (struct sockaddr_in6 *)state.dst;
+ if (error == EJUSTRETURN) {
+ /*
+ * We had a SP with a level of 'use' and no SA. We
+ * will just continue to process the packet without
+ * IPsec processing.
+ */
+ ;
+ } else if (error) {
+ /* mbuf is already reclaimed in ipsec6_output_tunnel. */
+ m0 = m = NULL;
+ m = NULL;
+ switch (error) {
+ case EHOSTUNREACH:
+ case ENETUNREACH:
+ case EMSGSIZE:
+ case ENOBUFS:
+ case ENOMEM:
+ break;
+ default:
+ printf("[%s:%d] (ipsec): error code %d\n",
+ __func__, __LINE__, error);
+ /* FALLTHROUGH */
+ case ENOENT:
+ /* don't show these error codes to the user */
+ error = 0;
+ break;
+ }
+ goto bad;
+ } else {
+ /*
+ * In the FAST IPSec case we have already
+ * re-injected the packet and it has been freed
+ * by the ipsec_done() function. So, just clean
+ * up after ourselves.
+ */
+ m = NULL;
+ goto done;
+ }
+
+ exthdrs.ip6e_ip6 = m;
+ }
+#endif /* IPSEC */
+
+ /* adjust pointer */
+ ip6 = mtod(m, struct ip6_hdr *);
+
+ bzero(&dst_sa, sizeof(dst_sa));
+ dst_sa.sin6_family = AF_INET6;
+ dst_sa.sin6_len = sizeof(dst_sa);
+ dst_sa.sin6_addr = ip6->ip6_dst;
+ if ((error = in6_selectroute(&dst_sa, opt, im6o, ro,
+ &ifp, &rt)) != 0) {
+ switch (error) {
+ case EHOSTUNREACH:
+ V_ip6stat.ip6s_noroute++;
+ break;
+ case EADDRNOTAVAIL:
+ default:
+ break; /* XXX statistics? */
+ }
+ if (ifp != NULL)
+ in6_ifstat_inc(ifp, ifs6_out_discard);
+ goto bad;
+ }
+ if (rt == NULL) {
+ /*
+ * If in6_selectroute() does not return a route entry,
+ * dst may not have been updated.
+ */
+ *dst = dst_sa; /* XXX */
+ }
+
+ /*
+ * then rt (for unicast) and ifp must be non-NULL valid values.
+ */
+ if ((flags & IPV6_FORWARDING) == 0) {
+ /* XXX: the FORWARDING flag can be set for mrouting. */
+ in6_ifstat_inc(ifp, ifs6_out_request);
+ }
+ if (rt != NULL) {
+ ia = (struct in6_ifaddr *)(rt->rt_ifa);
+ rt->rt_use++;
+ }
+
+
+ /*
+ * The outgoing interface must be in the zone of source and
+ * destination addresses.
+ */
+ origifp = ifp;
+
+ src0 = ip6->ip6_src;
+ if (in6_setscope(&src0, origifp, &zone))
+ goto badscope;
+ bzero(&src_sa, sizeof(src_sa));
+ src_sa.sin6_family = AF_INET6;
+ src_sa.sin6_len = sizeof(src_sa);
+ src_sa.sin6_addr = ip6->ip6_src;
+ if (sa6_recoverscope(&src_sa) || zone != src_sa.sin6_scope_id)
+ goto badscope;
+
+ dst0 = ip6->ip6_dst;
+ if (in6_setscope(&dst0, origifp, &zone))
+ goto badscope;
+ /* re-initialize to be sure */
+ bzero(&dst_sa, sizeof(dst_sa));
+ dst_sa.sin6_family = AF_INET6;
+ dst_sa.sin6_len = sizeof(dst_sa);
+ dst_sa.sin6_addr = ip6->ip6_dst;
+ if (sa6_recoverscope(&dst_sa) || zone != dst_sa.sin6_scope_id) {
+ goto badscope;
+ }
+
+ /* We should use ia_ifp to support the case of
+ * sending packets to an address of our own.
+ */
+ if (ia != NULL && ia->ia_ifp)
+ ifp = ia->ia_ifp;
+
+ /* scope check is done. */
+ goto routefound;
+
+ badscope:
+ V_ip6stat.ip6s_badscope++;
+ in6_ifstat_inc(origifp, ifs6_out_discard);
+ if (error == 0)
+ error = EHOSTUNREACH; /* XXX */
+ goto bad;
+
+ routefound:
+ if (rt && !IN6_IS_ADDR_MULTICAST(&ip6->ip6_dst)) {
+ if (opt && opt->ip6po_nextroute.ro_rt) {
+ /*
+ * The nexthop is explicitly specified by the
+ * application. We assume the next hop is an IPv6
+ * address.
+ */
+ dst = (struct sockaddr_in6 *)opt->ip6po_nexthop;
+ }
+ else if ((rt->rt_flags & RTF_GATEWAY))
+ dst = (struct sockaddr_in6 *)rt->rt_gateway;
+ }
+
+ if (!IN6_IS_ADDR_MULTICAST(&ip6->ip6_dst)) {
+ m->m_flags &= ~(M_BCAST | M_MCAST); /* just in case */
+ } else {
+ m->m_flags = (m->m_flags & ~M_BCAST) | M_MCAST;
+ in6_ifstat_inc(ifp, ifs6_out_mcast);
+ /*
+ * Confirm that the outgoing interface supports multicast.
+ */
+ if (!(ifp->if_flags & IFF_MULTICAST)) {
+ V_ip6stat.ip6s_noroute++;
+ in6_ifstat_inc(ifp, ifs6_out_discard);
+ error = ENETUNREACH;
+ goto bad;
+ }
+ if ((im6o == NULL && in6_mcast_loop) ||
+ (im6o && im6o->im6o_multicast_loop)) {
+ /*
+ * Loop back multicast datagram if not expressly
+ * forbidden to do so, even if we have not joined
+ * the address; protocols will filter it later,
+ * thus deferring a hash lookup and lock acquisition
+ * at the expense of an m_copym().
+ */
+ ip6_mloopback(ifp, m, dst);
+ } else {
+ /*
+ * If we are acting as a multicast router, perform
+ * multicast forwarding as if the packet had just
+ * arrived on the interface to which we are about
+ * to send. The multicast forwarding function
+ * recursively calls this function, using the
+ * IPV6_FORWARDING flag to prevent infinite recursion.
+ *
+ * Multicasts that are looped back by ip6_mloopback(),
+ * above, will be forwarded by the ip6_input() routine,
+ * if necessary.
+ */
+ if (V_ip6_mrouter && (flags & IPV6_FORWARDING) == 0) {
+ /*
+ * XXX: ip6_mforward expects that rcvif is NULL
+ * when it is called from the originating path.
+ * However, it is not always the case, since
+ * some versions of MGETHDR() does not
+ * initialize the field.
+ */
+ m->m_pkthdr.rcvif = NULL;
+ if (ip6_mforward(ip6, ifp, m) != 0) {
+ m_freem(m);
+ goto done;
+ }
+ }
+ }
+ /*
+ * Multicasts with a hoplimit of zero may be looped back,
+ * above, but must not be transmitted on a network.
+ * Also, multicasts addressed to the loopback interface
+ * are not sent -- the above call to ip6_mloopback() will
+ * loop back a copy if this host actually belongs to the
+ * destination group on the loopback interface.
+ */
+ if (ip6->ip6_hlim == 0 || (ifp->if_flags & IFF_LOOPBACK) ||
+ IN6_IS_ADDR_MC_INTFACELOCAL(&ip6->ip6_dst)) {
+ m_freem(m);
+ goto done;
+ }
+ }
+
+ /*
+ * Fill the outgoing inteface to tell the upper layer
+ * to increment per-interface statistics.
+ */
+ if (ifpp)
+ *ifpp = ifp;
+
+ /* Determine path MTU. */
+ if ((error = ip6_getpmtu(ro_pmtu, ro, ifp, &finaldst, &mtu,
+ &alwaysfrag)) != 0)
+ goto bad;
+
+ /*
+ * The caller of this function may specify to use the minimum MTU
+ * in some cases.
+ * An advanced API option (IPV6_USE_MIN_MTU) can also override MTU
+ * setting. The logic is a bit complicated; by default, unicast
+ * packets will follow path MTU while multicast packets will be sent at
+ * the minimum MTU. If IP6PO_MINMTU_ALL is specified, all packets
+ * including unicast ones will be sent at the minimum MTU. Multicast
+ * packets will always be sent at the minimum MTU unless
+ * IP6PO_MINMTU_DISABLE is explicitly specified.
+ * See RFC 3542 for more details.
+ */
+ if (mtu > IPV6_MMTU) {
+ if ((flags & IPV6_MINMTU))
+ mtu = IPV6_MMTU;
+ else if (opt && opt->ip6po_minmtu == IP6PO_MINMTU_ALL)
+ mtu = IPV6_MMTU;
+ else if (IN6_IS_ADDR_MULTICAST(&ip6->ip6_dst) &&
+ (opt == NULL ||
+ opt->ip6po_minmtu != IP6PO_MINMTU_DISABLE)) {
+ mtu = IPV6_MMTU;
+ }
+ }
+
+ /*
+ * clear embedded scope identifiers if necessary.
+ * in6_clearscope will touch the addresses only when necessary.
+ */
+ in6_clearscope(&ip6->ip6_src);
+ in6_clearscope(&ip6->ip6_dst);
+
+ /*
+ * If the outgoing packet contains a hop-by-hop options header,
+ * it must be examined and processed even by the source node.
+ * (RFC 2460, section 4.)
+ */
+ if (exthdrs.ip6e_hbh) {
+ struct ip6_hbh *hbh = mtod(exthdrs.ip6e_hbh, struct ip6_hbh *);
+ u_int32_t dummy; /* XXX unused */
+ u_int32_t plen = 0; /* XXX: ip6_process will check the value */
+
+#ifdef DIAGNOSTIC
+ if ((hbh->ip6h_len + 1) << 3 > exthdrs.ip6e_hbh->m_len)
+ panic("ip6e_hbh is not continuous");
+#endif
+ /*
+ * XXX: if we have to send an ICMPv6 error to the sender,
+ * we need the M_LOOP flag since icmp6_error() expects
+ * the IPv6 and the hop-by-hop options header are
+ * continuous unless the flag is set.
+ */
+ m->m_flags |= M_LOOP;
+ m->m_pkthdr.rcvif = ifp;
+ if (ip6_process_hopopts(m, (u_int8_t *)(hbh + 1),
+ ((hbh->ip6h_len + 1) << 3) - sizeof(struct ip6_hbh),
+ &dummy, &plen) < 0) {
+ /* m was already freed at this point */
+ error = EINVAL;/* better error? */
+ goto done;
+ }
+ m->m_flags &= ~M_LOOP; /* XXX */
+ m->m_pkthdr.rcvif = NULL;
+ }
+
+ /* Jump over all PFIL processing if hooks are not active. */
+ if (!PFIL_HOOKED(&V_inet6_pfil_hook))
+ goto passout;
+
+ odst = ip6->ip6_dst;
+ /* Run through list of hooks for output packets. */
+ error = pfil_run_hooks(&V_inet6_pfil_hook, &m, ifp, PFIL_OUT, inp);
+ if (error != 0 || m == NULL)
+ goto done;
+ ip6 = mtod(m, struct ip6_hdr *);
+
+ /* See if destination IP address was changed by packet filter. */
+ if (!IN6_ARE_ADDR_EQUAL(&odst, &ip6->ip6_dst)) {
+ m->m_flags |= M_SKIP_FIREWALL;
+ /* If destination is now ourself drop to ip6_input(). */
+ if (in6_localaddr(&ip6->ip6_dst)) {
+ if (m->m_pkthdr.rcvif == NULL)
+ m->m_pkthdr.rcvif = V_loif;
+ if (m->m_pkthdr.csum_flags & CSUM_DELAY_DATA) {
+ m->m_pkthdr.csum_flags |=
+ CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
+ m->m_pkthdr.csum_data = 0xffff;
+ }
+ m->m_pkthdr.csum_flags |=
+ CSUM_IP_CHECKED | CSUM_IP_VALID;
+#ifdef SCTP
+ if (m->m_pkthdr.csum_flags & CSUM_SCTP)
+ m->m_pkthdr.csum_flags |= CSUM_SCTP_VALID;
+#endif
+ error = netisr_queue(NETISR_IPV6, m);
+ goto done;
+ } else
+ goto again; /* Redo the routing table lookup. */
+ }
+
+ /* XXX: IPFIREWALL_FORWARD */
+
+passout:
+ /*
+ * Send the packet to the outgoing interface.
+ * If necessary, do IPv6 fragmentation before sending.
+ *
+ * the logic here is rather complex:
+ * 1: normal case (dontfrag == 0, alwaysfrag == 0)
+ * 1-a: send as is if tlen <= path mtu
+ * 1-b: fragment if tlen > path mtu
+ *
+ * 2: if user asks us not to fragment (dontfrag == 1)
+ * 2-a: send as is if tlen <= interface mtu
+ * 2-b: error if tlen > interface mtu
+ *
+ * 3: if we always need to attach fragment header (alwaysfrag == 1)
+ * always fragment
+ *
+ * 4: if dontfrag == 1 && alwaysfrag == 1
+ * error, as we cannot handle this conflicting request
+ */
+#ifdef SCTP
+ sw_csum = m->m_pkthdr.csum_flags & ~ifp->if_hwassist;
+ if (sw_csum & CSUM_SCTP) {
+ sctp_delayed_cksum(m, sizeof(struct ip6_hdr));
+ sw_csum &= ~CSUM_SCTP;
+ }
+#endif
+ tlen = m->m_pkthdr.len;
+
+ if (opt && (opt->ip6po_flags & IP6PO_DONTFRAG))
+ dontfrag = 1;
+ else
+ dontfrag = 0;
+ if (dontfrag && alwaysfrag) { /* case 4 */
+ /* conflicting request - can't transmit */
+ error = EMSGSIZE;
+ goto bad;
+ }
+ if (dontfrag && tlen > IN6_LINKMTU(ifp)) { /* case 2-b */
+ /*
+ * Even if the DONTFRAG option is specified, we cannot send the
+ * packet when the data length is larger than the MTU of the
+ * outgoing interface.
+ * Notify the error by sending IPV6_PATHMTU ancillary data as
+ * well as returning an error code (the latter is not described
+ * in the API spec.)
+ */
+ u_int32_t mtu32;
+ struct ip6ctlparam ip6cp;
+
+ mtu32 = (u_int32_t)mtu;
+ bzero(&ip6cp, sizeof(ip6cp));
+ ip6cp.ip6c_cmdarg = (void *)&mtu32;
+ pfctlinput2(PRC_MSGSIZE, (struct sockaddr *)&ro_pmtu->ro_dst,
+ (void *)&ip6cp);
+
+ error = EMSGSIZE;
+ goto bad;
+ }
+
+ /*
+ * transmit packet without fragmentation
+ */
+ if (dontfrag || (!alwaysfrag && tlen <= mtu)) { /* case 1-a and 2-a */
+ struct in6_ifaddr *ia6;
+
+ ip6 = mtod(m, struct ip6_hdr *);
+ ia6 = in6_ifawithifp(ifp, &ip6->ip6_src);
+ if (ia6) {
+ /* Record statistics for this interface address. */
+ ia6->ia_ifa.if_opackets++;
+ ia6->ia_ifa.if_obytes += m->m_pkthdr.len;
+ ifa_free(&ia6->ia_ifa);
+ }
+ error = nd6_output(ifp, origifp, m, dst, ro->ro_rt);
+ goto done;
+ }
+
+ /*
+ * try to fragment the packet. case 1-b and 3
+ */
+ if (mtu < IPV6_MMTU) {
+ /* path MTU cannot be less than IPV6_MMTU */
+ error = EMSGSIZE;
+ in6_ifstat_inc(ifp, ifs6_out_fragfail);
+ goto bad;
+ } else if (ip6->ip6_plen == 0) {
+ /* jumbo payload cannot be fragmented */
+ error = EMSGSIZE;
+ in6_ifstat_inc(ifp, ifs6_out_fragfail);
+ goto bad;
+ } else {
+ struct mbuf **mnext, *m_frgpart;
+ struct ip6_frag *ip6f;
+ u_int32_t id = htonl(ip6_randomid());
+ u_char nextproto;
+
+ int qslots = ifp->if_snd.ifq_maxlen - ifp->if_snd.ifq_len;
+
+ /*
+ * Too large for the destination or interface;
+ * fragment if possible.
+ * Must be able to put at least 8 bytes per fragment.
+ */
+ hlen = unfragpartlen;
+ if (mtu > IPV6_MAXPACKET)
+ mtu = IPV6_MAXPACKET;
+
+ len = (mtu - hlen - sizeof(struct ip6_frag)) & ~7;
+ if (len < 8) {
+ error = EMSGSIZE;
+ in6_ifstat_inc(ifp, ifs6_out_fragfail);
+ goto bad;
+ }
+
+ /*
+ * Verify that we have any chance at all of being able to queue
+ * the packet or packet fragments
+ */
+ if (qslots <= 0 || ((u_int)qslots * (mtu - hlen)
+ < tlen /* - hlen */)) {
+ error = ENOBUFS;
+ V_ip6stat.ip6s_odropped++;
+ goto bad;
+ }
+
+ mnext = &m->m_nextpkt;
+
+ /*
+ * Change the next header field of the last header in the
+ * unfragmentable part.
+ */
+ if (exthdrs.ip6e_rthdr) {
+ nextproto = *mtod(exthdrs.ip6e_rthdr, u_char *);
+ *mtod(exthdrs.ip6e_rthdr, u_char *) = IPPROTO_FRAGMENT;
+ } else if (exthdrs.ip6e_dest1) {
+ nextproto = *mtod(exthdrs.ip6e_dest1, u_char *);
+ *mtod(exthdrs.ip6e_dest1, u_char *) = IPPROTO_FRAGMENT;
+ } else if (exthdrs.ip6e_hbh) {
+ nextproto = *mtod(exthdrs.ip6e_hbh, u_char *);
+ *mtod(exthdrs.ip6e_hbh, u_char *) = IPPROTO_FRAGMENT;
+ } else {
+ nextproto = ip6->ip6_nxt;
+ ip6->ip6_nxt = IPPROTO_FRAGMENT;
+ }
+
+ /*
+ * Loop through length of segment after first fragment,
+ * make new header and copy data of each part and link onto
+ * chain.
+ */
+ m0 = m;
+ for (off = hlen; off < tlen; off += len) {
+ MGETHDR(m, M_DONTWAIT, MT_HEADER);
+ if (!m) {
+ error = ENOBUFS;
+ V_ip6stat.ip6s_odropped++;
+ goto sendorfree;
+ }
+ m->m_pkthdr.rcvif = NULL;
+ m->m_flags = m0->m_flags & M_COPYFLAGS;
+ *mnext = m;
+ mnext = &m->m_nextpkt;
+ m->m_data += max_linkhdr;
+ mhip6 = mtod(m, struct ip6_hdr *);
+ *mhip6 = *ip6;
+ m->m_len = sizeof(*mhip6);
+ error = ip6_insertfraghdr(m0, m, hlen, &ip6f);
+ if (error) {
+ V_ip6stat.ip6s_odropped++;
+ goto sendorfree;
+ }
+ ip6f->ip6f_offlg = htons((u_short)((off - hlen) & ~7));
+ if (off + len >= tlen)
+ len = tlen - off;
+ else
+ ip6f->ip6f_offlg |= IP6F_MORE_FRAG;
+ mhip6->ip6_plen = htons((u_short)(len + hlen +
+ sizeof(*ip6f) - sizeof(struct ip6_hdr)));
+ if ((m_frgpart = m_copy(m0, off, len)) == 0) {
+ error = ENOBUFS;
+ V_ip6stat.ip6s_odropped++;
+ goto sendorfree;
+ }
+ m_cat(m, m_frgpart);
+ m->m_pkthdr.len = len + hlen + sizeof(*ip6f);
+ m->m_pkthdr.rcvif = NULL;
+ ip6f->ip6f_reserved = 0;
+ ip6f->ip6f_ident = id;
+ ip6f->ip6f_nxt = nextproto;
+ V_ip6stat.ip6s_ofragments++;
+ in6_ifstat_inc(ifp, ifs6_out_fragcreat);
+ }
+
+ in6_ifstat_inc(ifp, ifs6_out_fragok);
+ }
+
+ /*
+ * Remove leading garbages.
+ */
+sendorfree:
+ m = m0->m_nextpkt;
+ m0->m_nextpkt = 0;
+ m_freem(m0);
+ for (m0 = m; m; m = m0) {
+ m0 = m->m_nextpkt;
+ m->m_nextpkt = 0;
+ if (error == 0) {
+ /* Record statistics for this interface address. */
+ if (ia) {
+ ia->ia_ifa.if_opackets++;
+ ia->ia_ifa.if_obytes += m->m_pkthdr.len;
+ }
+ error = nd6_output(ifp, origifp, m, dst, ro->ro_rt);
+ } else
+ m_freem(m);
+ }
+
+ if (error == 0)
+ V_ip6stat.ip6s_fragmented++;
+
+done:
+ if (ro == &ip6route && ro->ro_rt) { /* brace necessary for RTFREE */
+ RTFREE(ro->ro_rt);
+ } else if (ro_pmtu == &ip6route && ro_pmtu->ro_rt) {
+ RTFREE(ro_pmtu->ro_rt);
+ }
+#ifdef IPSEC
+ if (sp != NULL)
+ KEY_FREESP(&sp);
+#endif
+
+ return (error);
+
+freehdrs:
+ m_freem(exthdrs.ip6e_hbh); /* m_freem will check if mbuf is 0 */
+ m_freem(exthdrs.ip6e_dest1);
+ m_freem(exthdrs.ip6e_rthdr);
+ m_freem(exthdrs.ip6e_dest2);
+ /* FALLTHROUGH */
+bad:
+ if (m)
+ m_freem(m);
+ goto done;
+}
+
+static int
+ip6_copyexthdr(struct mbuf **mp, caddr_t hdr, int hlen)
+{
+ struct mbuf *m;
+
+ if (hlen > MCLBYTES)
+ return (ENOBUFS); /* XXX */
+
+ MGET(m, M_DONTWAIT, MT_DATA);
+ if (!m)
+ return (ENOBUFS);
+
+ if (hlen > MLEN) {
+ MCLGET(m, M_DONTWAIT);
+ if ((m->m_flags & M_EXT) == 0) {
+ m_free(m);
+ return (ENOBUFS);
+ }
+ }
+ m->m_len = hlen;
+ if (hdr)
+ bcopy(hdr, mtod(m, caddr_t), hlen);
+
+ *mp = m;
+ return (0);
+}
+
+/*
+ * Insert jumbo payload option.
+ */
+static int
+ip6_insert_jumboopt(struct ip6_exthdrs *exthdrs, u_int32_t plen)
+{
+ struct mbuf *mopt;
+ u_char *optbuf;
+ u_int32_t v;
+
+#define JUMBOOPTLEN 8 /* length of jumbo payload option and padding */
+
+ /*
+ * If there is no hop-by-hop options header, allocate new one.
+ * If there is one but it doesn't have enough space to store the
+ * jumbo payload option, allocate a cluster to store the whole options.
+ * Otherwise, use it to store the options.
+ */
+ if (exthdrs->ip6e_hbh == 0) {
+ MGET(mopt, M_DONTWAIT, MT_DATA);
+ if (mopt == 0)
+ return (ENOBUFS);
+ mopt->m_len = JUMBOOPTLEN;
+ optbuf = mtod(mopt, u_char *);
+ optbuf[1] = 0; /* = ((JUMBOOPTLEN) >> 3) - 1 */
+ exthdrs->ip6e_hbh = mopt;
+ } else {
+ struct ip6_hbh *hbh;
+
+ mopt = exthdrs->ip6e_hbh;
+ if (M_TRAILINGSPACE(mopt) < JUMBOOPTLEN) {
+ /*
+ * XXX assumption:
+ * - exthdrs->ip6e_hbh is not referenced from places
+ * other than exthdrs.
+ * - exthdrs->ip6e_hbh is not an mbuf chain.
+ */
+ int oldoptlen = mopt->m_len;
+ struct mbuf *n;
+
+ /*
+ * XXX: give up if the whole (new) hbh header does
+ * not fit even in an mbuf cluster.
+ */
+ if (oldoptlen + JUMBOOPTLEN > MCLBYTES)
+ return (ENOBUFS);
+
+ /*
+ * As a consequence, we must always prepare a cluster
+ * at this point.
+ */
+ MGET(n, M_DONTWAIT, MT_DATA);
+ if (n) {
+ MCLGET(n, M_DONTWAIT);
+ if ((n->m_flags & M_EXT) == 0) {
+ m_freem(n);
+ n = NULL;
+ }
+ }
+ if (!n)
+ return (ENOBUFS);
+ n->m_len = oldoptlen + JUMBOOPTLEN;
+ bcopy(mtod(mopt, caddr_t), mtod(n, caddr_t),
+ oldoptlen);
+ optbuf = mtod(n, caddr_t) + oldoptlen;
+ m_freem(mopt);
+ mopt = exthdrs->ip6e_hbh = n;
+ } else {
+ optbuf = mtod(mopt, u_char *) + mopt->m_len;
+ mopt->m_len += JUMBOOPTLEN;
+ }
+ optbuf[0] = IP6OPT_PADN;
+ optbuf[1] = 1;
+
+ /*
+ * Adjust the header length according to the pad and
+ * the jumbo payload option.
+ */
+ hbh = mtod(mopt, struct ip6_hbh *);
+ hbh->ip6h_len += (JUMBOOPTLEN >> 3);
+ }
+
+ /* fill in the option. */
+ optbuf[2] = IP6OPT_JUMBO;
+ optbuf[3] = 4;
+ v = (u_int32_t)htonl(plen + JUMBOOPTLEN);
+ bcopy(&v, &optbuf[4], sizeof(u_int32_t));
+
+ /* finally, adjust the packet header length */
+ exthdrs->ip6e_ip6->m_pkthdr.len += JUMBOOPTLEN;
+
+ return (0);
+#undef JUMBOOPTLEN
+}
+
+/*
+ * Insert fragment header and copy unfragmentable header portions.
+ */
+static int
+ip6_insertfraghdr(struct mbuf *m0, struct mbuf *m, int hlen,
+ struct ip6_frag **frghdrp)
+{
+ struct mbuf *n, *mlast;
+
+ if (hlen > sizeof(struct ip6_hdr)) {
+ n = m_copym(m0, sizeof(struct ip6_hdr),
+ hlen - sizeof(struct ip6_hdr), M_DONTWAIT);
+ if (n == 0)
+ return (ENOBUFS);
+ m->m_next = n;
+ } else
+ n = m;
+
+ /* Search for the last mbuf of unfragmentable part. */
+ for (mlast = n; mlast->m_next; mlast = mlast->m_next)
+ ;
+
+ if ((mlast->m_flags & M_EXT) == 0 &&
+ M_TRAILINGSPACE(mlast) >= sizeof(struct ip6_frag)) {
+ /* use the trailing space of the last mbuf for the fragment hdr */
+ *frghdrp = (struct ip6_frag *)(mtod(mlast, caddr_t) +
+ mlast->m_len);
+ mlast->m_len += sizeof(struct ip6_frag);
+ m->m_pkthdr.len += sizeof(struct ip6_frag);
+ } else {
+ /* allocate a new mbuf for the fragment header */
+ struct mbuf *mfrg;
+
+ MGET(mfrg, M_DONTWAIT, MT_DATA);
+ if (mfrg == 0)
+ return (ENOBUFS);
+ mfrg->m_len = sizeof(struct ip6_frag);
+ *frghdrp = mtod(mfrg, struct ip6_frag *);
+ mlast->m_next = mfrg;
+ }
+
+ return (0);
+}
+
+static int
+ip6_getpmtu(struct route_in6 *ro_pmtu, struct route_in6 *ro,
+ struct ifnet *ifp, struct in6_addr *dst, u_long *mtup,
+ int *alwaysfragp)
+{
+ u_int32_t mtu = 0;
+ int alwaysfrag = 0;
+ int error = 0;
+
+ if (ro_pmtu != ro) {
+ /* The first hop and the final destination may differ. */
+ struct sockaddr_in6 *sa6_dst =
+ (struct sockaddr_in6 *)&ro_pmtu->ro_dst;
+ if (ro_pmtu->ro_rt &&
+ ((ro_pmtu->ro_rt->rt_flags & RTF_UP) == 0 ||
+ !IN6_ARE_ADDR_EQUAL(&sa6_dst->sin6_addr, dst))) {
+ RTFREE(ro_pmtu->ro_rt);
+ ro_pmtu->ro_rt = (struct rtentry *)NULL;
+ }
+ if (ro_pmtu->ro_rt == NULL) {
+ bzero(sa6_dst, sizeof(*sa6_dst));
+ sa6_dst->sin6_family = AF_INET6;
+ sa6_dst->sin6_len = sizeof(struct sockaddr_in6);
+ sa6_dst->sin6_addr = *dst;
+
+ rtalloc((struct route *)ro_pmtu);
+ }
+ }
+ if (ro_pmtu->ro_rt) {
+ u_int32_t ifmtu;
+ struct in_conninfo inc;
+
+ bzero(&inc, sizeof(inc));
+ inc.inc_flags |= INC_ISIPV6;
+ inc.inc6_faddr = *dst;
+
+ if (ifp == NULL)
+ ifp = ro_pmtu->ro_rt->rt_ifp;
+ ifmtu = IN6_LINKMTU(ifp);
+ mtu = tcp_hc_getmtu(&inc);
+ if (mtu)
+ mtu = min(mtu, ro_pmtu->ro_rt->rt_rmx.rmx_mtu);
+ else
+ mtu = ro_pmtu->ro_rt->rt_rmx.rmx_mtu;
+ if (mtu == 0)
+ mtu = ifmtu;
+ else if (mtu < IPV6_MMTU) {
+ /*
+ * RFC2460 section 5, last paragraph:
+ * if we record ICMPv6 too big message with
+ * mtu < IPV6_MMTU, transmit packets sized IPV6_MMTU
+ * or smaller, with framgent header attached.
+ * (fragment header is needed regardless from the
+ * packet size, for translators to identify packets)
+ */
+ alwaysfrag = 1;
+ mtu = IPV6_MMTU;
+ } else if (mtu > ifmtu) {
+ /*
+ * The MTU on the route is larger than the MTU on
+ * the interface! This shouldn't happen, unless the
+ * MTU of the interface has been changed after the
+ * interface was brought up. Change the MTU in the
+ * route to match the interface MTU (as long as the
+ * field isn't locked).
+ */
+ mtu = ifmtu;
+ ro_pmtu->ro_rt->rt_rmx.rmx_mtu = mtu;
+ }
+ } else if (ifp) {
+ mtu = IN6_LINKMTU(ifp);
+ } else
+ error = EHOSTUNREACH; /* XXX */
+
+ *mtup = mtu;
+ if (alwaysfragp)
+ *alwaysfragp = alwaysfrag;
+ return (error);
+}
+
+/*
+ * IP6 socket option processing.
+ */
+int
+ip6_ctloutput(struct socket *so, struct sockopt *sopt)
+{
+ int optdatalen, uproto;
+ void *optdata;
+ struct inpcb *in6p = sotoinpcb(so);
+ int error, optval;
+ int level, op, optname;
+ int optlen;
+ struct thread *td;
+
+ level = sopt->sopt_level;
+ op = sopt->sopt_dir;
+ optname = sopt->sopt_name;
+ optlen = sopt->sopt_valsize;
+ td = sopt->sopt_td;
+ error = 0;
+ optval = 0;
+ uproto = (int)so->so_proto->pr_protocol;
+
+ if (level == IPPROTO_IPV6) {
+ switch (op) {
+
+ case SOPT_SET:
+ switch (optname) {
+ case IPV6_2292PKTOPTIONS:
+#ifdef IPV6_PKTOPTIONS
+ case IPV6_PKTOPTIONS:
+#endif
+ {
+ struct mbuf *m;
+
+ error = soopt_getm(sopt, &m); /* XXX */
+ if (error != 0)
+ break;
+ error = soopt_mcopyin(sopt, m); /* XXX */
+ if (error != 0)
+ break;
+ error = ip6_pcbopts(&in6p->in6p_outputopts,
+ m, so, sopt);
+ m_freem(m); /* XXX */
+ break;
+ }
+
+ /*
+ * Use of some Hop-by-Hop options or some
+ * Destination options, might require special
+ * privilege. That is, normal applications
+ * (without special privilege) might be forbidden
+ * from setting certain options in outgoing packets,
+ * and might never see certain options in received
+ * packets. [RFC 2292 Section 6]
+ * KAME specific note:
+ * KAME prevents non-privileged users from sending or
+ * receiving ANY hbh/dst options in order to avoid
+ * overhead of parsing options in the kernel.
+ */
+ case IPV6_RECVHOPOPTS:
+ case IPV6_RECVDSTOPTS:
+ case IPV6_RECVRTHDRDSTOPTS:
+ if (td != NULL) {
+ error = priv_check(td,
+ PRIV_NETINET_SETHDROPTS);
+ if (error)
+ break;
+ }
+ /* FALLTHROUGH */
+ case IPV6_UNICAST_HOPS:
+ case IPV6_HOPLIMIT:
+ case IPV6_FAITH:
+
+ case IPV6_RECVPKTINFO:
+ case IPV6_RECVHOPLIMIT:
+ case IPV6_RECVRTHDR:
+ case IPV6_RECVPATHMTU:
+ case IPV6_RECVTCLASS:
+ case IPV6_V6ONLY:
+ case IPV6_AUTOFLOWLABEL:
+ case IPV6_BINDANY:
+ if (optname == IPV6_BINDANY && td != NULL) {
+ error = priv_check(td,
+ PRIV_NETINET_BINDANY);
+ if (error)
+ break;
+ }
+
+ if (optlen != sizeof(int)) {
+ error = EINVAL;
+ break;
+ }
+ error = sooptcopyin(sopt, &optval,
+ sizeof optval, sizeof optval);
+ if (error)
+ break;
+ switch (optname) {
+
+ case IPV6_UNICAST_HOPS:
+ if (optval < -1 || optval >= 256)
+ error = EINVAL;
+ else {
+ /* -1 = kernel default */
+ in6p->in6p_hops = optval;
+ if ((in6p->inp_vflag &
+ INP_IPV4) != 0)
+ in6p->inp_ip_ttl = optval;
+ }
+ break;
+#define OPTSET(bit) \
+do { \
+ if (optval) \
+ in6p->inp_flags |= (bit); \
+ else \
+ in6p->inp_flags &= ~(bit); \
+} while (/*CONSTCOND*/ 0)
+#define OPTSET2292(bit) \
+do { \
+ in6p->inp_flags |= IN6P_RFC2292; \
+ if (optval) \
+ in6p->inp_flags |= (bit); \
+ else \
+ in6p->inp_flags &= ~(bit); \
+} while (/*CONSTCOND*/ 0)
+#define OPTBIT(bit) (in6p->inp_flags & (bit) ? 1 : 0)
+
+ case IPV6_RECVPKTINFO:
+ /* cannot mix with RFC2292 */
+ if (OPTBIT(IN6P_RFC2292)) {
+ error = EINVAL;
+ break;
+ }
+ OPTSET(IN6P_PKTINFO);
+ break;
+
+ case IPV6_HOPLIMIT:
+ {
+ struct ip6_pktopts **optp;
+
+ /* cannot mix with RFC2292 */
+ if (OPTBIT(IN6P_RFC2292)) {
+ error = EINVAL;
+ break;
+ }
+ optp = &in6p->in6p_outputopts;
+ error = ip6_pcbopt(IPV6_HOPLIMIT,
+ (u_char *)&optval, sizeof(optval),
+ optp, (td != NULL) ? td->td_ucred :
+ NULL, uproto);
+ break;
+ }
+
+ case IPV6_RECVHOPLIMIT:
+ /* cannot mix with RFC2292 */
+ if (OPTBIT(IN6P_RFC2292)) {
+ error = EINVAL;
+ break;
+ }
+ OPTSET(IN6P_HOPLIMIT);
+ break;
+
+ case IPV6_RECVHOPOPTS:
+ /* cannot mix with RFC2292 */
+ if (OPTBIT(IN6P_RFC2292)) {
+ error = EINVAL;
+ break;
+ }
+ OPTSET(IN6P_HOPOPTS);
+ break;
+
+ case IPV6_RECVDSTOPTS:
+ /* cannot mix with RFC2292 */
+ if (OPTBIT(IN6P_RFC2292)) {
+ error = EINVAL;
+ break;
+ }
+ OPTSET(IN6P_DSTOPTS);
+ break;
+
+ case IPV6_RECVRTHDRDSTOPTS:
+ /* cannot mix with RFC2292 */
+ if (OPTBIT(IN6P_RFC2292)) {
+ error = EINVAL;
+ break;
+ }
+ OPTSET(IN6P_RTHDRDSTOPTS);
+ break;
+
+ case IPV6_RECVRTHDR:
+ /* cannot mix with RFC2292 */
+ if (OPTBIT(IN6P_RFC2292)) {
+ error = EINVAL;
+ break;
+ }
+ OPTSET(IN6P_RTHDR);
+ break;
+
+ case IPV6_FAITH:
+ OPTSET(INP_FAITH);
+ break;
+
+ case IPV6_RECVPATHMTU:
+ /*
+ * We ignore this option for TCP
+ * sockets.
+ * (RFC3542 leaves this case
+ * unspecified.)
+ */
+ if (uproto != IPPROTO_TCP)
+ OPTSET(IN6P_MTU);
+ break;
+
+ case IPV6_V6ONLY:
+ /*
+ * make setsockopt(IPV6_V6ONLY)
+ * available only prior to bind(2).
+ * see ipng mailing list, Jun 22 2001.
+ */
+ if (in6p->inp_lport ||
+ !IN6_IS_ADDR_UNSPECIFIED(&in6p->in6p_laddr)) {
+ error = EINVAL;
+ break;
+ }
+ OPTSET(IN6P_IPV6_V6ONLY);
+ if (optval)
+ in6p->inp_vflag &= ~INP_IPV4;
+ else
+ in6p->inp_vflag |= INP_IPV4;
+ break;
+ case IPV6_RECVTCLASS:
+ /* cannot mix with RFC2292 XXX */
+ if (OPTBIT(IN6P_RFC2292)) {
+ error = EINVAL;
+ break;
+ }
+ OPTSET(IN6P_TCLASS);
+ break;
+ case IPV6_AUTOFLOWLABEL:
+ OPTSET(IN6P_AUTOFLOWLABEL);
+ break;
+
+ case IPV6_BINDANY:
+ OPTSET(INP_BINDANY);
+ break;
+ }
+ break;
+
+ case IPV6_TCLASS:
+ case IPV6_DONTFRAG:
+ case IPV6_USE_MIN_MTU:
+ case IPV6_PREFER_TEMPADDR:
+ if (optlen != sizeof(optval)) {
+ error = EINVAL;
+ break;
+ }
+ error = sooptcopyin(sopt, &optval,
+ sizeof optval, sizeof optval);
+ if (error)
+ break;
+ {
+ struct ip6_pktopts **optp;
+ optp = &in6p->in6p_outputopts;
+ error = ip6_pcbopt(optname,
+ (u_char *)&optval, sizeof(optval),
+ optp, (td != NULL) ? td->td_ucred :
+ NULL, uproto);
+ break;
+ }
+
+ case IPV6_2292PKTINFO:
+ case IPV6_2292HOPLIMIT:
+ case IPV6_2292HOPOPTS:
+ case IPV6_2292DSTOPTS:
+ case IPV6_2292RTHDR:
+ /* RFC 2292 */
+ if (optlen != sizeof(int)) {
+ error = EINVAL;
+ break;
+ }
+ error = sooptcopyin(sopt, &optval,
+ sizeof optval, sizeof optval);
+ if (error)
+ break;
+ switch (optname) {
+ case IPV6_2292PKTINFO:
+ OPTSET2292(IN6P_PKTINFO);
+ break;
+ case IPV6_2292HOPLIMIT:
+ OPTSET2292(IN6P_HOPLIMIT);
+ break;
+ case IPV6_2292HOPOPTS:
+ /*
+ * Check super-user privilege.
+ * See comments for IPV6_RECVHOPOPTS.
+ */
+ if (td != NULL) {
+ error = priv_check(td,
+ PRIV_NETINET_SETHDROPTS);
+ if (error)
+ return (error);
+ }
+ OPTSET2292(IN6P_HOPOPTS);
+ break;
+ case IPV6_2292DSTOPTS:
+ if (td != NULL) {
+ error = priv_check(td,
+ PRIV_NETINET_SETHDROPTS);
+ if (error)
+ return (error);
+ }
+ OPTSET2292(IN6P_DSTOPTS|IN6P_RTHDRDSTOPTS); /* XXX */
+ break;
+ case IPV6_2292RTHDR:
+ OPTSET2292(IN6P_RTHDR);
+ break;
+ }
+ break;
+ case IPV6_PKTINFO:
+ case IPV6_HOPOPTS:
+ case IPV6_RTHDR:
+ case IPV6_DSTOPTS:
+ case IPV6_RTHDRDSTOPTS:
+ case IPV6_NEXTHOP:
+ {
+ /* new advanced API (RFC3542) */
+ u_char *optbuf;
+ u_char optbuf_storage[MCLBYTES];
+ int optlen;
+ struct ip6_pktopts **optp;
+
+ /* cannot mix with RFC2292 */
+ if (OPTBIT(IN6P_RFC2292)) {
+ error = EINVAL;
+ break;
+ }
+
+ /*
+ * We only ensure valsize is not too large
+ * here. Further validation will be done
+ * later.
+ */
+ error = sooptcopyin(sopt, optbuf_storage,
+ sizeof(optbuf_storage), 0);
+ if (error)
+ break;
+ optlen = sopt->sopt_valsize;
+ optbuf = optbuf_storage;
+ optp = &in6p->in6p_outputopts;
+ error = ip6_pcbopt(optname, optbuf, optlen,
+ optp, (td != NULL) ? td->td_ucred : NULL,
+ uproto);
+ break;
+ }
+#undef OPTSET
+
+ case IPV6_MULTICAST_IF:
+ case IPV6_MULTICAST_HOPS:
+ case IPV6_MULTICAST_LOOP:
+ case IPV6_JOIN_GROUP:
+ case IPV6_LEAVE_GROUP:
+ case IPV6_MSFILTER:
+ case MCAST_BLOCK_SOURCE:
+ case MCAST_UNBLOCK_SOURCE:
+ case MCAST_JOIN_GROUP:
+ case MCAST_LEAVE_GROUP:
+ case MCAST_JOIN_SOURCE_GROUP:
+ case MCAST_LEAVE_SOURCE_GROUP:
+ error = ip6_setmoptions(in6p, sopt);
+ break;
+
+ case IPV6_PORTRANGE:
+ error = sooptcopyin(sopt, &optval,
+ sizeof optval, sizeof optval);
+ if (error)
+ break;
+
+ switch (optval) {
+ case IPV6_PORTRANGE_DEFAULT:
+ in6p->inp_flags &= ~(INP_LOWPORT);
+ in6p->inp_flags &= ~(INP_HIGHPORT);
+ break;
+
+ case IPV6_PORTRANGE_HIGH:
+ in6p->inp_flags &= ~(INP_LOWPORT);
+ in6p->inp_flags |= INP_HIGHPORT;
+ break;
+
+ case IPV6_PORTRANGE_LOW:
+ in6p->inp_flags &= ~(INP_HIGHPORT);
+ in6p->inp_flags |= INP_LOWPORT;
+ break;
+
+ default:
+ error = EINVAL;
+ break;
+ }
+ break;
+
+#ifdef IPSEC
+ case IPV6_IPSEC_POLICY:
+ {
+ caddr_t req;
+ struct mbuf *m;
+
+ if ((error = soopt_getm(sopt, &m)) != 0) /* XXX */
+ break;
+ if ((error = soopt_mcopyin(sopt, m)) != 0) /* XXX */
+ break;
+ req = mtod(m, caddr_t);
+ error = ipsec_set_policy(in6p, optname, req,
+ m->m_len, (sopt->sopt_td != NULL) ?
+ sopt->sopt_td->td_ucred : NULL);
+ m_freem(m);
+ break;
+ }
+#endif /* IPSEC */
+
+ default:
+ error = ENOPROTOOPT;
+ break;
+ }
+ break;
+
+ case SOPT_GET:
+ switch (optname) {
+
+ case IPV6_2292PKTOPTIONS:
+#ifdef IPV6_PKTOPTIONS
+ case IPV6_PKTOPTIONS:
+#endif
+ /*
+ * RFC3542 (effectively) deprecated the
+ * semantics of the 2292-style pktoptions.
+ * Since it was not reliable in nature (i.e.,
+ * applications had to expect the lack of some
+ * information after all), it would make sense
+ * to simplify this part by always returning
+ * empty data.
+ */
+ sopt->sopt_valsize = 0;
+ break;
+
+ case IPV6_RECVHOPOPTS:
+ case IPV6_RECVDSTOPTS:
+ case IPV6_RECVRTHDRDSTOPTS:
+ case IPV6_UNICAST_HOPS:
+ case IPV6_RECVPKTINFO:
+ case IPV6_RECVHOPLIMIT:
+ case IPV6_RECVRTHDR:
+ case IPV6_RECVPATHMTU:
+
+ case IPV6_FAITH:
+ case IPV6_V6ONLY:
+ case IPV6_PORTRANGE:
+ case IPV6_RECVTCLASS:
+ case IPV6_AUTOFLOWLABEL:
+ case IPV6_BINDANY:
+ switch (optname) {
+
+ case IPV6_RECVHOPOPTS:
+ optval = OPTBIT(IN6P_HOPOPTS);
+ break;
+
+ case IPV6_RECVDSTOPTS:
+ optval = OPTBIT(IN6P_DSTOPTS);
+ break;
+
+ case IPV6_RECVRTHDRDSTOPTS:
+ optval = OPTBIT(IN6P_RTHDRDSTOPTS);
+ break;
+
+ case IPV6_UNICAST_HOPS:
+ optval = in6p->in6p_hops;
+ break;
+
+ case IPV6_RECVPKTINFO:
+ optval = OPTBIT(IN6P_PKTINFO);
+ break;
+
+ case IPV6_RECVHOPLIMIT:
+ optval = OPTBIT(IN6P_HOPLIMIT);
+ break;
+
+ case IPV6_RECVRTHDR:
+ optval = OPTBIT(IN6P_RTHDR);
+ break;
+
+ case IPV6_RECVPATHMTU:
+ optval = OPTBIT(IN6P_MTU);
+ break;
+
+ case IPV6_FAITH:
+ optval = OPTBIT(INP_FAITH);
+ break;
+
+ case IPV6_V6ONLY:
+ optval = OPTBIT(IN6P_IPV6_V6ONLY);
+ break;
+
+ case IPV6_PORTRANGE:
+ {
+ int flags;
+ flags = in6p->inp_flags;
+ if (flags & INP_HIGHPORT)
+ optval = IPV6_PORTRANGE_HIGH;
+ else if (flags & INP_LOWPORT)
+ optval = IPV6_PORTRANGE_LOW;
+ else
+ optval = 0;
+ break;
+ }
+ case IPV6_RECVTCLASS:
+ optval = OPTBIT(IN6P_TCLASS);
+ break;
+
+ case IPV6_AUTOFLOWLABEL:
+ optval = OPTBIT(IN6P_AUTOFLOWLABEL);
+ break;
+
+ case IPV6_BINDANY:
+ optval = OPTBIT(INP_BINDANY);
+ break;
+ }
+ if (error)
+ break;
+ error = sooptcopyout(sopt, &optval,
+ sizeof optval);
+ break;
+
+ case IPV6_PATHMTU:
+ {
+ u_long pmtu = 0;
+ struct ip6_mtuinfo mtuinfo;
+ struct route_in6 sro;
+
+ bzero(&sro, sizeof(sro));
+
+ if (!(so->so_state & SS_ISCONNECTED))
+ return (ENOTCONN);
+ /*
+ * XXX: we dot not consider the case of source
+ * routing, or optional information to specify
+ * the outgoing interface.
+ */
+ error = ip6_getpmtu(&sro, NULL, NULL,
+ &in6p->in6p_faddr, &pmtu, NULL);
+ if (sro.ro_rt)
+ RTFREE(sro.ro_rt);
+ if (error)
+ break;
+ if (pmtu > IPV6_MAXPACKET)
+ pmtu = IPV6_MAXPACKET;
+
+ bzero(&mtuinfo, sizeof(mtuinfo));
+ mtuinfo.ip6m_mtu = (u_int32_t)pmtu;
+ optdata = (void *)&mtuinfo;
+ optdatalen = sizeof(mtuinfo);
+ error = sooptcopyout(sopt, optdata,
+ optdatalen);
+ break;
+ }
+
+ case IPV6_2292PKTINFO:
+ case IPV6_2292HOPLIMIT:
+ case IPV6_2292HOPOPTS:
+ case IPV6_2292RTHDR:
+ case IPV6_2292DSTOPTS:
+ switch (optname) {
+ case IPV6_2292PKTINFO:
+ optval = OPTBIT(IN6P_PKTINFO);
+ break;
+ case IPV6_2292HOPLIMIT:
+ optval = OPTBIT(IN6P_HOPLIMIT);
+ break;
+ case IPV6_2292HOPOPTS:
+ optval = OPTBIT(IN6P_HOPOPTS);
+ break;
+ case IPV6_2292RTHDR:
+ optval = OPTBIT(IN6P_RTHDR);
+ break;
+ case IPV6_2292DSTOPTS:
+ optval = OPTBIT(IN6P_DSTOPTS|IN6P_RTHDRDSTOPTS);
+ break;
+ }
+ error = sooptcopyout(sopt, &optval,
+ sizeof optval);
+ break;
+ case IPV6_PKTINFO:
+ case IPV6_HOPOPTS:
+ case IPV6_RTHDR:
+ case IPV6_DSTOPTS:
+ case IPV6_RTHDRDSTOPTS:
+ case IPV6_NEXTHOP:
+ case IPV6_TCLASS:
+ case IPV6_DONTFRAG:
+ case IPV6_USE_MIN_MTU:
+ case IPV6_PREFER_TEMPADDR:
+ error = ip6_getpcbopt(in6p->in6p_outputopts,
+ optname, sopt);
+ break;
+
+ case IPV6_MULTICAST_IF:
+ case IPV6_MULTICAST_HOPS:
+ case IPV6_MULTICAST_LOOP:
+ case IPV6_MSFILTER:
+ error = ip6_getmoptions(in6p, sopt);
+ break;
+
+#ifdef IPSEC
+ case IPV6_IPSEC_POLICY:
+ {
+ caddr_t req = NULL;
+ size_t len = 0;
+ struct mbuf *m = NULL;
+ struct mbuf **mp = &m;
+ size_t ovalsize = sopt->sopt_valsize;
+ caddr_t oval = (caddr_t)sopt->sopt_val;
+
+ error = soopt_getm(sopt, &m); /* XXX */
+ if (error != 0)
+ break;
+ error = soopt_mcopyin(sopt, m); /* XXX */
+ if (error != 0)
+ break;
+ sopt->sopt_valsize = ovalsize;
+ sopt->sopt_val = oval;
+ if (m) {
+ req = mtod(m, caddr_t);
+ len = m->m_len;
+ }
+ error = ipsec_get_policy(in6p, req, len, mp);
+ if (error == 0)
+ error = soopt_mcopyout(sopt, m); /* XXX */
+ if (error == 0 && m)
+ m_freem(m);
+ break;
+ }
+#endif /* IPSEC */
+
+ default:
+ error = ENOPROTOOPT;
+ break;
+ }
+ break;
+ }
+ } else { /* level != IPPROTO_IPV6 */
+ error = EINVAL;
+ }
+ return (error);
+}
+
+int
+ip6_raw_ctloutput(struct socket *so, struct sockopt *sopt)
+{
+ int error = 0, optval, optlen;
+ const int icmp6off = offsetof(struct icmp6_hdr, icmp6_cksum);
+ struct inpcb *in6p = sotoinpcb(so);
+ int level, op, optname;
+
+ level = sopt->sopt_level;
+ op = sopt->sopt_dir;
+ optname = sopt->sopt_name;
+ optlen = sopt->sopt_valsize;
+
+ if (level != IPPROTO_IPV6) {
+ return (EINVAL);
+ }
+
+ switch (optname) {
+ case IPV6_CHECKSUM:
+ /*
+ * For ICMPv6 sockets, no modification allowed for checksum
+ * offset, permit "no change" values to help existing apps.
+ *
+ * RFC3542 says: "An attempt to set IPV6_CHECKSUM
+ * for an ICMPv6 socket will fail."
+ * The current behavior does not meet RFC3542.
+ */
+ switch (op) {
+ case SOPT_SET:
+ if (optlen != sizeof(int)) {
+ error = EINVAL;
+ break;
+ }
+ error = sooptcopyin(sopt, &optval, sizeof(optval),
+ sizeof(optval));
+ if (error)
+ break;
+ if ((optval % 2) != 0) {
+ /* the API assumes even offset values */
+ error = EINVAL;
+ } else if (so->so_proto->pr_protocol ==
+ IPPROTO_ICMPV6) {
+ if (optval != icmp6off)
+ error = EINVAL;
+ } else
+ in6p->in6p_cksum = optval;
+ break;
+
+ case SOPT_GET:
+ if (so->so_proto->pr_protocol == IPPROTO_ICMPV6)
+ optval = icmp6off;
+ else
+ optval = in6p->in6p_cksum;
+
+ error = sooptcopyout(sopt, &optval, sizeof(optval));
+ break;
+
+ default:
+ error = EINVAL;
+ break;
+ }
+ break;
+
+ default:
+ error = ENOPROTOOPT;
+ break;
+ }
+
+ return (error);
+}
+
+/*
+ * Set up IP6 options in pcb for insertion in output packets or
+ * specifying behavior of outgoing packets.
+ */
+static int
+ip6_pcbopts(struct ip6_pktopts **pktopt, struct mbuf *m,
+ struct socket *so, struct sockopt *sopt)
+{
+ struct ip6_pktopts *opt = *pktopt;
+ int error = 0;
+ struct thread *td = sopt->sopt_td;
+
+ /* turn off any old options. */
+ if (opt) {
+#ifdef DIAGNOSTIC
+ if (opt->ip6po_pktinfo || opt->ip6po_nexthop ||
+ opt->ip6po_hbh || opt->ip6po_dest1 || opt->ip6po_dest2 ||
+ opt->ip6po_rhinfo.ip6po_rhi_rthdr)
+ printf("ip6_pcbopts: all specified options are cleared.\n");
+#endif
+ ip6_clearpktopts(opt, -1);
+ } else
+ opt = malloc(sizeof(*opt), M_IP6OPT, M_WAITOK);
+ *pktopt = NULL;
+
+ if (!m || m->m_len == 0) {
+ /*
+ * Only turning off any previous options, regardless of
+ * whether the opt is just created or given.
+ */
+ free(opt, M_IP6OPT);
+ return (0);
+ }
+
+ /* set options specified by user. */
+ if ((error = ip6_setpktopts(m, opt, NULL, (td != NULL) ?
+ td->td_ucred : NULL, so->so_proto->pr_protocol)) != 0) {
+ ip6_clearpktopts(opt, -1); /* XXX: discard all options */
+ free(opt, M_IP6OPT);
+ return (error);
+ }
+ *pktopt = opt;
+ return (0);
+}
+
+/*
+ * initialize ip6_pktopts. beware that there are non-zero default values in
+ * the struct.
+ */
+void
+ip6_initpktopts(struct ip6_pktopts *opt)
+{
+
+ bzero(opt, sizeof(*opt));
+ opt->ip6po_hlim = -1; /* -1 means default hop limit */
+ opt->ip6po_tclass = -1; /* -1 means default traffic class */
+ opt->ip6po_minmtu = IP6PO_MINMTU_MCASTONLY;
+ opt->ip6po_prefer_tempaddr = IP6PO_TEMPADDR_SYSTEM;
+}
+
+static int
+ip6_pcbopt(int optname, u_char *buf, int len, struct ip6_pktopts **pktopt,
+ struct ucred *cred, int uproto)
+{
+ struct ip6_pktopts *opt;
+
+ if (*pktopt == NULL) {
+ *pktopt = malloc(sizeof(struct ip6_pktopts), M_IP6OPT,
+ M_WAITOK);
+ ip6_initpktopts(*pktopt);
+ }
+ opt = *pktopt;
+
+ return (ip6_setpktopt(optname, buf, len, opt, cred, 1, 0, uproto));
+}
+
+static int
+ip6_getpcbopt(struct ip6_pktopts *pktopt, int optname, struct sockopt *sopt)
+{
+ void *optdata = NULL;
+ int optdatalen = 0;
+ struct ip6_ext *ip6e;
+ int error = 0;
+ struct in6_pktinfo null_pktinfo;
+ int deftclass = 0, on;
+ int defminmtu = IP6PO_MINMTU_MCASTONLY;
+ int defpreftemp = IP6PO_TEMPADDR_SYSTEM;
+
+ switch (optname) {
+ case IPV6_PKTINFO:
+ if (pktopt && pktopt->ip6po_pktinfo)
+ optdata = (void *)pktopt->ip6po_pktinfo;
+ else {
+ /* XXX: we don't have to do this every time... */
+ bzero(&null_pktinfo, sizeof(null_pktinfo));
+ optdata = (void *)&null_pktinfo;
+ }
+ optdatalen = sizeof(struct in6_pktinfo);
+ break;
+ case IPV6_TCLASS:
+ if (pktopt && pktopt->ip6po_tclass >= 0)
+ optdata = (void *)&pktopt->ip6po_tclass;
+ else
+ optdata = (void *)&deftclass;
+ optdatalen = sizeof(int);
+ break;
+ case IPV6_HOPOPTS:
+ if (pktopt && pktopt->ip6po_hbh) {
+ optdata = (void *)pktopt->ip6po_hbh;
+ ip6e = (struct ip6_ext *)pktopt->ip6po_hbh;
+ optdatalen = (ip6e->ip6e_len + 1) << 3;
+ }
+ break;
+ case IPV6_RTHDR:
+ if (pktopt && pktopt->ip6po_rthdr) {
+ optdata = (void *)pktopt->ip6po_rthdr;
+ ip6e = (struct ip6_ext *)pktopt->ip6po_rthdr;
+ optdatalen = (ip6e->ip6e_len + 1) << 3;
+ }
+ break;
+ case IPV6_RTHDRDSTOPTS:
+ if (pktopt && pktopt->ip6po_dest1) {
+ optdata = (void *)pktopt->ip6po_dest1;
+ ip6e = (struct ip6_ext *)pktopt->ip6po_dest1;
+ optdatalen = (ip6e->ip6e_len + 1) << 3;
+ }
+ break;
+ case IPV6_DSTOPTS:
+ if (pktopt && pktopt->ip6po_dest2) {
+ optdata = (void *)pktopt->ip6po_dest2;
+ ip6e = (struct ip6_ext *)pktopt->ip6po_dest2;
+ optdatalen = (ip6e->ip6e_len + 1) << 3;
+ }
+ break;
+ case IPV6_NEXTHOP:
+ if (pktopt && pktopt->ip6po_nexthop) {
+ optdata = (void *)pktopt->ip6po_nexthop;
+ optdatalen = pktopt->ip6po_nexthop->sa_len;
+ }
+ break;
+ case IPV6_USE_MIN_MTU:
+ if (pktopt)
+ optdata = (void *)&pktopt->ip6po_minmtu;
+ else
+ optdata = (void *)&defminmtu;
+ optdatalen = sizeof(int);
+ break;
+ case IPV6_DONTFRAG:
+ if (pktopt && ((pktopt->ip6po_flags) & IP6PO_DONTFRAG))
+ on = 1;
+ else
+ on = 0;
+ optdata = (void *)&on;
+ optdatalen = sizeof(on);
+ break;
+ case IPV6_PREFER_TEMPADDR:
+ if (pktopt)
+ optdata = (void *)&pktopt->ip6po_prefer_tempaddr;
+ else
+ optdata = (void *)&defpreftemp;
+ optdatalen = sizeof(int);
+ break;
+ default: /* should not happen */
+#ifdef DIAGNOSTIC
+ panic("ip6_getpcbopt: unexpected option\n");
+#endif
+ return (ENOPROTOOPT);
+ }
+
+ error = sooptcopyout(sopt, optdata, optdatalen);
+
+ return (error);
+}
+
+void
+ip6_clearpktopts(struct ip6_pktopts *pktopt, int optname)
+{
+ if (pktopt == NULL)
+ return;
+
+ if (optname == -1 || optname == IPV6_PKTINFO) {
+ if (pktopt->ip6po_pktinfo)
+ free(pktopt->ip6po_pktinfo, M_IP6OPT);
+ pktopt->ip6po_pktinfo = NULL;
+ }
+ if (optname == -1 || optname == IPV6_HOPLIMIT)
+ pktopt->ip6po_hlim = -1;
+ if (optname == -1 || optname == IPV6_TCLASS)
+ pktopt->ip6po_tclass = -1;
+ if (optname == -1 || optname == IPV6_NEXTHOP) {
+ if (pktopt->ip6po_nextroute.ro_rt) {
+ RTFREE(pktopt->ip6po_nextroute.ro_rt);
+ pktopt->ip6po_nextroute.ro_rt = NULL;
+ }
+ if (pktopt->ip6po_nexthop)
+ free(pktopt->ip6po_nexthop, M_IP6OPT);
+ pktopt->ip6po_nexthop = NULL;
+ }
+ if (optname == -1 || optname == IPV6_HOPOPTS) {
+ if (pktopt->ip6po_hbh)
+ free(pktopt->ip6po_hbh, M_IP6OPT);
+ pktopt->ip6po_hbh = NULL;
+ }
+ if (optname == -1 || optname == IPV6_RTHDRDSTOPTS) {
+ if (pktopt->ip6po_dest1)
+ free(pktopt->ip6po_dest1, M_IP6OPT);
+ pktopt->ip6po_dest1 = NULL;
+ }
+ if (optname == -1 || optname == IPV6_RTHDR) {
+ if (pktopt->ip6po_rhinfo.ip6po_rhi_rthdr)
+ free(pktopt->ip6po_rhinfo.ip6po_rhi_rthdr, M_IP6OPT);
+ pktopt->ip6po_rhinfo.ip6po_rhi_rthdr = NULL;
+ if (pktopt->ip6po_route.ro_rt) {
+ RTFREE(pktopt->ip6po_route.ro_rt);
+ pktopt->ip6po_route.ro_rt = NULL;
+ }
+ }
+ if (optname == -1 || optname == IPV6_DSTOPTS) {
+ if (pktopt->ip6po_dest2)
+ free(pktopt->ip6po_dest2, M_IP6OPT);
+ pktopt->ip6po_dest2 = NULL;
+ }
+}
+
+#define PKTOPT_EXTHDRCPY(type) \
+do {\
+ if (src->type) {\
+ int hlen = (((struct ip6_ext *)src->type)->ip6e_len + 1) << 3;\
+ dst->type = malloc(hlen, M_IP6OPT, canwait);\
+ if (dst->type == NULL && canwait == M_NOWAIT)\
+ goto bad;\
+ bcopy(src->type, dst->type, hlen);\
+ }\
+} while (/*CONSTCOND*/ 0)
+
+static int
+copypktopts(struct ip6_pktopts *dst, struct ip6_pktopts *src, int canwait)
+{
+ if (dst == NULL || src == NULL) {
+ printf("ip6_clearpktopts: invalid argument\n");
+ return (EINVAL);
+ }
+
+ dst->ip6po_hlim = src->ip6po_hlim;
+ dst->ip6po_tclass = src->ip6po_tclass;
+ dst->ip6po_flags = src->ip6po_flags;
+ if (src->ip6po_pktinfo) {
+ dst->ip6po_pktinfo = malloc(sizeof(*dst->ip6po_pktinfo),
+ M_IP6OPT, canwait);
+ if (dst->ip6po_pktinfo == NULL)
+ goto bad;
+ *dst->ip6po_pktinfo = *src->ip6po_pktinfo;
+ }
+ if (src->ip6po_nexthop) {
+ dst->ip6po_nexthop = malloc(src->ip6po_nexthop->sa_len,
+ M_IP6OPT, canwait);
+ if (dst->ip6po_nexthop == NULL)
+ goto bad;
+ bcopy(src->ip6po_nexthop, dst->ip6po_nexthop,
+ src->ip6po_nexthop->sa_len);
+ }
+ PKTOPT_EXTHDRCPY(ip6po_hbh);
+ PKTOPT_EXTHDRCPY(ip6po_dest1);
+ PKTOPT_EXTHDRCPY(ip6po_dest2);
+ PKTOPT_EXTHDRCPY(ip6po_rthdr); /* not copy the cached route */
+ return (0);
+
+ bad:
+ ip6_clearpktopts(dst, -1);
+ return (ENOBUFS);
+}
+#undef PKTOPT_EXTHDRCPY
+
+struct ip6_pktopts *
+ip6_copypktopts(struct ip6_pktopts *src, int canwait)
+{
+ int error;
+ struct ip6_pktopts *dst;
+
+ dst = malloc(sizeof(*dst), M_IP6OPT, canwait);
+ if (dst == NULL)
+ return (NULL);
+ ip6_initpktopts(dst);
+
+ if ((error = copypktopts(dst, src, canwait)) != 0) {
+ free(dst, M_IP6OPT);
+ return (NULL);
+ }
+
+ return (dst);
+}
+
+void
+ip6_freepcbopts(struct ip6_pktopts *pktopt)
+{
+ if (pktopt == NULL)
+ return;
+
+ ip6_clearpktopts(pktopt, -1);
+
+ free(pktopt, M_IP6OPT);
+}
+
+/*
+ * Set IPv6 outgoing packet options based on advanced API.
+ */
+int
+ip6_setpktopts(struct mbuf *control, struct ip6_pktopts *opt,
+ struct ip6_pktopts *stickyopt, struct ucred *cred, int uproto)
+{
+ struct cmsghdr *cm = 0;
+
+ if (control == NULL || opt == NULL)
+ return (EINVAL);
+
+ ip6_initpktopts(opt);
+ if (stickyopt) {
+ int error;
+
+ /*
+ * If stickyopt is provided, make a local copy of the options
+ * for this particular packet, then override them by ancillary
+ * objects.
+ * XXX: copypktopts() does not copy the cached route to a next
+ * hop (if any). This is not very good in terms of efficiency,
+ * but we can allow this since this option should be rarely
+ * used.
+ */
+ if ((error = copypktopts(opt, stickyopt, M_NOWAIT)) != 0)
+ return (error);
+ }
+
+ /*
+ * XXX: Currently, we assume all the optional information is stored
+ * in a single mbuf.
+ */
+ if (control->m_next)
+ return (EINVAL);
+
+ for (; control->m_len > 0; control->m_data += CMSG_ALIGN(cm->cmsg_len),
+ control->m_len -= CMSG_ALIGN(cm->cmsg_len)) {
+ int error;
+
+ if (control->m_len < CMSG_LEN(0))
+ return (EINVAL);
+
+ cm = mtod(control, struct cmsghdr *);
+ if (cm->cmsg_len == 0 || cm->cmsg_len > control->m_len)
+ return (EINVAL);
+ if (cm->cmsg_level != IPPROTO_IPV6)
+ continue;
+
+ error = ip6_setpktopt(cm->cmsg_type, CMSG_DATA(cm),
+ cm->cmsg_len - CMSG_LEN(0), opt, cred, 0, 1, uproto);
+ if (error)
+ return (error);
+ }
+
+ return (0);
+}
+
+/*
+ * Set a particular packet option, as a sticky option or an ancillary data
+ * item. "len" can be 0 only when it's a sticky option.
+ * We have 4 cases of combination of "sticky" and "cmsg":
+ * "sticky=0, cmsg=0": impossible
+ * "sticky=0, cmsg=1": RFC2292 or RFC3542 ancillary data
+ * "sticky=1, cmsg=0": RFC3542 socket option
+ * "sticky=1, cmsg=1": RFC2292 socket option
+ */
+static int
+ip6_setpktopt(int optname, u_char *buf, int len, struct ip6_pktopts *opt,
+ struct ucred *cred, int sticky, int cmsg, int uproto)
+{
+ int minmtupolicy, preftemp;
+ int error;
+
+ if (!sticky && !cmsg) {
+#ifdef DIAGNOSTIC
+ printf("ip6_setpktopt: impossible case\n");
+#endif
+ return (EINVAL);
+ }
+
+ /*
+ * IPV6_2292xxx is for backward compatibility to RFC2292, and should
+ * not be specified in the context of RFC3542. Conversely,
+ * RFC3542 types should not be specified in the context of RFC2292.
+ */
+ if (!cmsg) {
+ switch (optname) {
+ case IPV6_2292PKTINFO:
+ case IPV6_2292HOPLIMIT:
+ case IPV6_2292NEXTHOP:
+ case IPV6_2292HOPOPTS:
+ case IPV6_2292DSTOPTS:
+ case IPV6_2292RTHDR:
+ case IPV6_2292PKTOPTIONS:
+ return (ENOPROTOOPT);
+ }
+ }
+ if (sticky && cmsg) {
+ switch (optname) {
+ case IPV6_PKTINFO:
+ case IPV6_HOPLIMIT:
+ case IPV6_NEXTHOP:
+ case IPV6_HOPOPTS:
+ case IPV6_DSTOPTS:
+ case IPV6_RTHDRDSTOPTS:
+ case IPV6_RTHDR:
+ case IPV6_USE_MIN_MTU:
+ case IPV6_DONTFRAG:
+ case IPV6_TCLASS:
+ case IPV6_PREFER_TEMPADDR: /* XXX: not an RFC3542 option */
+ return (ENOPROTOOPT);
+ }
+ }
+
+ switch (optname) {
+ case IPV6_2292PKTINFO:
+ case IPV6_PKTINFO:
+ {
+ struct ifnet *ifp = NULL;
+ struct in6_pktinfo *pktinfo;
+
+ if (len != sizeof(struct in6_pktinfo))
+ return (EINVAL);
+
+ pktinfo = (struct in6_pktinfo *)buf;
+
+ /*
+ * An application can clear any sticky IPV6_PKTINFO option by
+ * doing a "regular" setsockopt with ipi6_addr being
+ * in6addr_any and ipi6_ifindex being zero.
+ * [RFC 3542, Section 6]
+ */
+ if (optname == IPV6_PKTINFO && opt->ip6po_pktinfo &&
+ pktinfo->ipi6_ifindex == 0 &&
+ IN6_IS_ADDR_UNSPECIFIED(&pktinfo->ipi6_addr)) {
+ ip6_clearpktopts(opt, optname);
+ break;
+ }
+
+ if (uproto == IPPROTO_TCP && optname == IPV6_PKTINFO &&
+ sticky && !IN6_IS_ADDR_UNSPECIFIED(&pktinfo->ipi6_addr)) {
+ return (EINVAL);
+ }
+
+ /* validate the interface index if specified. */
+ if (pktinfo->ipi6_ifindex > V_if_index ||
+ pktinfo->ipi6_ifindex < 0) {
+ return (ENXIO);
+ }
+ if (pktinfo->ipi6_ifindex) {
+ ifp = ifnet_byindex(pktinfo->ipi6_ifindex);
+ if (ifp == NULL)
+ return (ENXIO);
+ }
+
+ /*
+ * We store the address anyway, and let in6_selectsrc()
+ * validate the specified address. This is because ipi6_addr
+ * may not have enough information about its scope zone, and
+ * we may need additional information (such as outgoing
+ * interface or the scope zone of a destination address) to
+ * disambiguate the scope.
+ * XXX: the delay of the validation may confuse the
+ * application when it is used as a sticky option.
+ */
+ if (opt->ip6po_pktinfo == NULL) {
+ opt->ip6po_pktinfo = malloc(sizeof(*pktinfo),
+ M_IP6OPT, M_NOWAIT);
+ if (opt->ip6po_pktinfo == NULL)
+ return (ENOBUFS);
+ }
+ bcopy(pktinfo, opt->ip6po_pktinfo, sizeof(*pktinfo));
+ break;
+ }
+
+ case IPV6_2292HOPLIMIT:
+ case IPV6_HOPLIMIT:
+ {
+ int *hlimp;
+
+ /*
+ * RFC 3542 deprecated the usage of sticky IPV6_HOPLIMIT
+ * to simplify the ordering among hoplimit options.
+ */
+ if (optname == IPV6_HOPLIMIT && sticky)
+ return (ENOPROTOOPT);
+
+ if (len != sizeof(int))
+ return (EINVAL);
+ hlimp = (int *)buf;
+ if (*hlimp < -1 || *hlimp > 255)
+ return (EINVAL);
+
+ opt->ip6po_hlim = *hlimp;
+ break;
+ }
+
+ case IPV6_TCLASS:
+ {
+ int tclass;
+
+ if (len != sizeof(int))
+ return (EINVAL);
+ tclass = *(int *)buf;
+ if (tclass < -1 || tclass > 255)
+ return (EINVAL);
+
+ opt->ip6po_tclass = tclass;
+ break;
+ }
+
+ case IPV6_2292NEXTHOP:
+ case IPV6_NEXTHOP:
+ if (cred != NULL) {
+ error = priv_check_cred(cred,
+ PRIV_NETINET_SETHDROPTS, 0);
+ if (error)
+ return (error);
+ }
+
+ if (len == 0) { /* just remove the option */
+ ip6_clearpktopts(opt, IPV6_NEXTHOP);
+ break;
+ }
+
+ /* check if cmsg_len is large enough for sa_len */
+ if (len < sizeof(struct sockaddr) || len < *buf)
+ return (EINVAL);
+
+ switch (((struct sockaddr *)buf)->sa_family) {
+ case AF_INET6:
+ {
+ struct sockaddr_in6 *sa6 = (struct sockaddr_in6 *)buf;
+ int error;
+
+ if (sa6->sin6_len != sizeof(struct sockaddr_in6))
+ return (EINVAL);
+
+ if (IN6_IS_ADDR_UNSPECIFIED(&sa6->sin6_addr) ||
+ IN6_IS_ADDR_MULTICAST(&sa6->sin6_addr)) {
+ return (EINVAL);
+ }
+ if ((error = sa6_embedscope(sa6, V_ip6_use_defzone))
+ != 0) {
+ return (error);
+ }
+ break;
+ }
+ case AF_LINK: /* should eventually be supported */
+ default:
+ return (EAFNOSUPPORT);
+ }
+
+ /* turn off the previous option, then set the new option. */
+ ip6_clearpktopts(opt, IPV6_NEXTHOP);
+ opt->ip6po_nexthop = malloc(*buf, M_IP6OPT, M_NOWAIT);
+ if (opt->ip6po_nexthop == NULL)
+ return (ENOBUFS);
+ bcopy(buf, opt->ip6po_nexthop, *buf);
+ break;
+
+ case IPV6_2292HOPOPTS:
+ case IPV6_HOPOPTS:
+ {
+ struct ip6_hbh *hbh;
+ int hbhlen;
+
+ /*
+ * XXX: We don't allow a non-privileged user to set ANY HbH
+ * options, since per-option restriction has too much
+ * overhead.
+ */
+ if (cred != NULL) {
+ error = priv_check_cred(cred,
+ PRIV_NETINET_SETHDROPTS, 0);
+ if (error)
+ return (error);
+ }
+
+ if (len == 0) {
+ ip6_clearpktopts(opt, IPV6_HOPOPTS);
+ break; /* just remove the option */
+ }
+
+ /* message length validation */
+ if (len < sizeof(struct ip6_hbh))
+ return (EINVAL);
+ hbh = (struct ip6_hbh *)buf;
+ hbhlen = (hbh->ip6h_len + 1) << 3;
+ if (len != hbhlen)
+ return (EINVAL);
+
+ /* turn off the previous option, then set the new option. */
+ ip6_clearpktopts(opt, IPV6_HOPOPTS);
+ opt->ip6po_hbh = malloc(hbhlen, M_IP6OPT, M_NOWAIT);
+ if (opt->ip6po_hbh == NULL)
+ return (ENOBUFS);
+ bcopy(hbh, opt->ip6po_hbh, hbhlen);
+
+ break;
+ }
+
+ case IPV6_2292DSTOPTS:
+ case IPV6_DSTOPTS:
+ case IPV6_RTHDRDSTOPTS:
+ {
+ struct ip6_dest *dest, **newdest = NULL;
+ int destlen;
+
+ if (cred != NULL) { /* XXX: see the comment for IPV6_HOPOPTS */
+ error = priv_check_cred(cred,
+ PRIV_NETINET_SETHDROPTS, 0);
+ if (error)
+ return (error);
+ }
+
+ if (len == 0) {
+ ip6_clearpktopts(opt, optname);
+ break; /* just remove the option */
+ }
+
+ /* message length validation */
+ if (len < sizeof(struct ip6_dest))
+ return (EINVAL);
+ dest = (struct ip6_dest *)buf;
+ destlen = (dest->ip6d_len + 1) << 3;
+ if (len != destlen)
+ return (EINVAL);
+
+ /*
+ * Determine the position that the destination options header
+ * should be inserted; before or after the routing header.
+ */
+ switch (optname) {
+ case IPV6_2292DSTOPTS:
+ /*
+ * The old advacned API is ambiguous on this point.
+ * Our approach is to determine the position based
+ * according to the existence of a routing header.
+ * Note, however, that this depends on the order of the
+ * extension headers in the ancillary data; the 1st
+ * part of the destination options header must appear
+ * before the routing header in the ancillary data,
+ * too.
+ * RFC3542 solved the ambiguity by introducing
+ * separate ancillary data or option types.
+ */
+ if (opt->ip6po_rthdr == NULL)
+ newdest = &opt->ip6po_dest1;
+ else
+ newdest = &opt->ip6po_dest2;
+ break;
+ case IPV6_RTHDRDSTOPTS:
+ newdest = &opt->ip6po_dest1;
+ break;
+ case IPV6_DSTOPTS:
+ newdest = &opt->ip6po_dest2;
+ break;
+ }
+
+ /* turn off the previous option, then set the new option. */
+ ip6_clearpktopts(opt, optname);
+ *newdest = malloc(destlen, M_IP6OPT, M_NOWAIT);
+ if (*newdest == NULL)
+ return (ENOBUFS);
+ bcopy(dest, *newdest, destlen);
+
+ break;
+ }
+
+ case IPV6_2292RTHDR:
+ case IPV6_RTHDR:
+ {
+ struct ip6_rthdr *rth;
+ int rthlen;
+
+ if (len == 0) {
+ ip6_clearpktopts(opt, IPV6_RTHDR);
+ break; /* just remove the option */
+ }
+
+ /* message length validation */
+ if (len < sizeof(struct ip6_rthdr))
+ return (EINVAL);
+ rth = (struct ip6_rthdr *)buf;
+ rthlen = (rth->ip6r_len + 1) << 3;
+ if (len != rthlen)
+ return (EINVAL);
+
+ switch (rth->ip6r_type) {
+ case IPV6_RTHDR_TYPE_0:
+ if (rth->ip6r_len == 0) /* must contain one addr */
+ return (EINVAL);
+ if (rth->ip6r_len % 2) /* length must be even */
+ return (EINVAL);
+ if (rth->ip6r_len / 2 != rth->ip6r_segleft)
+ return (EINVAL);
+ break;
+ default:
+ return (EINVAL); /* not supported */
+ }
+
+ /* turn off the previous option */
+ ip6_clearpktopts(opt, IPV6_RTHDR);
+ opt->ip6po_rthdr = malloc(rthlen, M_IP6OPT, M_NOWAIT);
+ if (opt->ip6po_rthdr == NULL)
+ return (ENOBUFS);
+ bcopy(rth, opt->ip6po_rthdr, rthlen);
+
+ break;
+ }
+
+ case IPV6_USE_MIN_MTU:
+ if (len != sizeof(int))
+ return (EINVAL);
+ minmtupolicy = *(int *)buf;
+ if (minmtupolicy != IP6PO_MINMTU_MCASTONLY &&
+ minmtupolicy != IP6PO_MINMTU_DISABLE &&
+ minmtupolicy != IP6PO_MINMTU_ALL) {
+ return (EINVAL);
+ }
+ opt->ip6po_minmtu = minmtupolicy;
+ break;
+
+ case IPV6_DONTFRAG:
+ if (len != sizeof(int))
+ return (EINVAL);
+
+ if (uproto == IPPROTO_TCP || *(int *)buf == 0) {
+ /*
+ * we ignore this option for TCP sockets.
+ * (RFC3542 leaves this case unspecified.)
+ */
+ opt->ip6po_flags &= ~IP6PO_DONTFRAG;
+ } else
+ opt->ip6po_flags |= IP6PO_DONTFRAG;
+ break;
+
+ case IPV6_PREFER_TEMPADDR:
+ if (len != sizeof(int))
+ return (EINVAL);
+ preftemp = *(int *)buf;
+ if (preftemp != IP6PO_TEMPADDR_SYSTEM &&
+ preftemp != IP6PO_TEMPADDR_NOTPREFER &&
+ preftemp != IP6PO_TEMPADDR_PREFER) {
+ return (EINVAL);
+ }
+ opt->ip6po_prefer_tempaddr = preftemp;
+ break;
+
+ default:
+ return (ENOPROTOOPT);
+ } /* end of switch */
+
+ return (0);
+}
+
+/*
+ * Routine called from ip6_output() to loop back a copy of an IP6 multicast
+ * packet to the input queue of a specified interface. Note that this
+ * calls the output routine of the loopback "driver", but with an interface
+ * pointer that might NOT be &loif -- easier than replicating that code here.
+ */
+void
+ip6_mloopback(struct ifnet *ifp, struct mbuf *m, struct sockaddr_in6 *dst)
+{
+ struct mbuf *copym;
+ struct ip6_hdr *ip6;
+
+ copym = m_copy(m, 0, M_COPYALL);
+ if (copym == NULL)
+ return;
+
+ /*
+ * Make sure to deep-copy IPv6 header portion in case the data
+ * is in an mbuf cluster, so that we can safely override the IPv6
+ * header portion later.
+ */
+ if ((copym->m_flags & M_EXT) != 0 ||
+ copym->m_len < sizeof(struct ip6_hdr)) {
+ copym = m_pullup(copym, sizeof(struct ip6_hdr));
+ if (copym == NULL)
+ return;
+ }
+
+#ifdef DIAGNOSTIC
+ if (copym->m_len < sizeof(*ip6)) {
+ m_freem(copym);
+ return;
+ }
+#endif
+
+ ip6 = mtod(copym, struct ip6_hdr *);
+ /*
+ * clear embedded scope identifiers if necessary.
+ * in6_clearscope will touch the addresses only when necessary.
+ */
+ in6_clearscope(&ip6->ip6_src);
+ in6_clearscope(&ip6->ip6_dst);
+
+ (void)if_simloop(ifp, copym, dst->sin6_family, 0);
+}
+
+/*
+ * Chop IPv6 header off from the payload.
+ */
+static int
+ip6_splithdr(struct mbuf *m, struct ip6_exthdrs *exthdrs)
+{
+ struct mbuf *mh;
+ struct ip6_hdr *ip6;
+
+ ip6 = mtod(m, struct ip6_hdr *);
+ if (m->m_len > sizeof(*ip6)) {
+ MGETHDR(mh, M_DONTWAIT, MT_HEADER);
+ if (mh == 0) {
+ m_freem(m);
+ return ENOBUFS;
+ }
+ M_MOVE_PKTHDR(mh, m);
+ MH_ALIGN(mh, sizeof(*ip6));
+ m->m_len -= sizeof(*ip6);
+ m->m_data += sizeof(*ip6);
+ mh->m_next = m;
+ m = mh;
+ m->m_len = sizeof(*ip6);
+ bcopy((caddr_t)ip6, mtod(m, caddr_t), sizeof(*ip6));
+ }
+ exthdrs->ip6e_ip6 = m;
+ return 0;
+}
+
+/*
+ * Compute IPv6 extension header length.
+ */
+int
+ip6_optlen(struct inpcb *in6p)
+{
+ int len;
+
+ if (!in6p->in6p_outputopts)
+ return 0;
+
+ len = 0;
+#define elen(x) \
+ (((struct ip6_ext *)(x)) ? (((struct ip6_ext *)(x))->ip6e_len + 1) << 3 : 0)
+
+ len += elen(in6p->in6p_outputopts->ip6po_hbh);
+ if (in6p->in6p_outputopts->ip6po_rthdr)
+ /* dest1 is valid with rthdr only */
+ len += elen(in6p->in6p_outputopts->ip6po_dest1);
+ len += elen(in6p->in6p_outputopts->ip6po_rthdr);
+ len += elen(in6p->in6p_outputopts->ip6po_dest2);
+ return len;
+#undef elen
+}
diff --git a/rtems/freebsd/netinet6/ip6_var.h b/rtems/freebsd/netinet6/ip6_var.h
new file mode 100644
index 00000000..50d8adcb
--- /dev/null
+++ b/rtems/freebsd/netinet6/ip6_var.h
@@ -0,0 +1,444 @@
+/*-
+ * Copyright (C) 1995, 1996, 1997, and 1998 WIDE Project.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the project nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $KAME: ip6_var.h,v 1.62 2001/05/03 14:51:48 itojun Exp $
+ */
+
+/*-
+ * Copyright (c) 1982, 1986, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * @(#)ip_var.h 8.1 (Berkeley) 6/10/93
+ * $FreeBSD$
+ */
+
+#ifndef _NETINET6_IP6_VAR_HH_
+#define _NETINET6_IP6_VAR_HH_
+
+/*
+ * IP6 reassembly queue structure. Each fragment
+ * being reassembled is attached to one of these structures.
+ */
+struct ip6q {
+ struct ip6asfrag *ip6q_down;
+ struct ip6asfrag *ip6q_up;
+ u_int32_t ip6q_ident;
+ u_int8_t ip6q_nxt;
+ u_int8_t ip6q_ecn;
+ u_int8_t ip6q_ttl;
+ struct in6_addr ip6q_src, ip6q_dst;
+ struct ip6q *ip6q_next;
+ struct ip6q *ip6q_prev;
+ int ip6q_unfrglen; /* len of unfragmentable part */
+#ifdef notyet
+ u_char *ip6q_nxtp;
+#endif
+ int ip6q_nfrag; /* # of fragments */
+ struct label *ip6q_label;
+};
+
+struct ip6asfrag {
+ struct ip6asfrag *ip6af_down;
+ struct ip6asfrag *ip6af_up;
+ struct mbuf *ip6af_m;
+ int ip6af_offset; /* offset in ip6af_m to next header */
+ int ip6af_frglen; /* fragmentable part length */
+ int ip6af_off; /* fragment offset */
+ u_int16_t ip6af_mff; /* more fragment bit in frag off */
+};
+
+#define IP6_REASS_MBUF(ip6af) (*(struct mbuf **)&((ip6af)->ip6af_m))
+
+/*
+ * Structure attached to inpcb.in6p_moptions and
+ * passed to ip6_output when IPv6 multicast options are in use.
+ * This structure is lazy-allocated.
+ */
+struct ip6_moptions {
+ struct ifnet *im6o_multicast_ifp; /* ifp for outgoing multicasts */
+ u_char im6o_multicast_hlim; /* hoplimit for outgoing multicasts */
+ u_char im6o_multicast_loop; /* 1 >= hear sends if a member */
+ u_short im6o_num_memberships; /* no. memberships this socket */
+ u_short im6o_max_memberships; /* max memberships this socket */
+ struct in6_multi **im6o_membership; /* group memberships */
+ struct in6_mfilter *im6o_mfilters; /* source filters */
+};
+
+/*
+ * Control options for outgoing packets
+ */
+
+/* Routing header related info */
+struct ip6po_rhinfo {
+ struct ip6_rthdr *ip6po_rhi_rthdr; /* Routing header */
+ struct route_in6 ip6po_rhi_route; /* Route to the 1st hop */
+};
+#define ip6po_rthdr ip6po_rhinfo.ip6po_rhi_rthdr
+#define ip6po_route ip6po_rhinfo.ip6po_rhi_route
+
+/* Nexthop related info */
+struct ip6po_nhinfo {
+ struct sockaddr *ip6po_nhi_nexthop;
+ struct route_in6 ip6po_nhi_route; /* Route to the nexthop */
+};
+#define ip6po_nexthop ip6po_nhinfo.ip6po_nhi_nexthop
+#define ip6po_nextroute ip6po_nhinfo.ip6po_nhi_route
+
+struct ip6_pktopts {
+ struct mbuf *ip6po_m; /* Pointer to mbuf storing the data */
+ int ip6po_hlim; /* Hoplimit for outgoing packets */
+
+ /* Outgoing IF/address information */
+ struct in6_pktinfo *ip6po_pktinfo;
+
+ /* Next-hop address information */
+ struct ip6po_nhinfo ip6po_nhinfo;
+
+ struct ip6_hbh *ip6po_hbh; /* Hop-by-Hop options header */
+
+ /* Destination options header (before a routing header) */
+ struct ip6_dest *ip6po_dest1;
+
+ /* Routing header related info. */
+ struct ip6po_rhinfo ip6po_rhinfo;
+
+ /* Destination options header (after a routing header) */
+ struct ip6_dest *ip6po_dest2;
+
+ int ip6po_tclass; /* traffic class */
+
+ int ip6po_minmtu; /* fragment vs PMTU discovery policy */
+#define IP6PO_MINMTU_MCASTONLY -1 /* default; send at min MTU for multicast*/
+#define IP6PO_MINMTU_DISABLE 0 /* always perform pmtu disc */
+#define IP6PO_MINMTU_ALL 1 /* always send at min MTU */
+
+ int ip6po_prefer_tempaddr; /* whether temporary addresses are
+ preferred as source address */
+#define IP6PO_TEMPADDR_SYSTEM -1 /* follow the system default */
+#define IP6PO_TEMPADDR_NOTPREFER 0 /* not prefer temporary address */
+#define IP6PO_TEMPADDR_PREFER 1 /* prefer temporary address */
+
+ int ip6po_flags;
+#if 0 /* parameters in this block is obsolete. do not reuse the values. */
+#define IP6PO_REACHCONF 0x01 /* upper-layer reachability confirmation. */
+#define IP6PO_MINMTU 0x02 /* use minimum MTU (IPV6_USE_MIN_MTU) */
+#endif
+#define IP6PO_DONTFRAG 0x04 /* disable fragmentation (IPV6_DONTFRAG) */
+#define IP6PO_USECOA 0x08 /* use care of address */
+};
+
+/*
+ * Control options for incoming packets
+ */
+
+struct ip6stat {
+ u_quad_t ip6s_total; /* total packets received */
+ u_quad_t ip6s_tooshort; /* packet too short */
+ u_quad_t ip6s_toosmall; /* not enough data */
+ u_quad_t ip6s_fragments; /* fragments received */
+ u_quad_t ip6s_fragdropped; /* frags dropped(dups, out of space) */
+ u_quad_t ip6s_fragtimeout; /* fragments timed out */
+ u_quad_t ip6s_fragoverflow; /* fragments that exceeded limit */
+ u_quad_t ip6s_forward; /* packets forwarded */
+ u_quad_t ip6s_cantforward; /* packets rcvd for unreachable dest */
+ u_quad_t ip6s_redirectsent; /* packets forwarded on same net */
+ u_quad_t ip6s_delivered; /* datagrams delivered to upper level*/
+ u_quad_t ip6s_localout; /* total ip packets generated here */
+ u_quad_t ip6s_odropped; /* lost packets due to nobufs, etc. */
+ u_quad_t ip6s_reassembled; /* total packets reassembled ok */
+ u_quad_t ip6s_fragmented; /* datagrams successfully fragmented */
+ u_quad_t ip6s_ofragments; /* output fragments created */
+ u_quad_t ip6s_cantfrag; /* don't fragment flag was set, etc. */
+ u_quad_t ip6s_badoptions; /* error in option processing */
+ u_quad_t ip6s_noroute; /* packets discarded due to no route */
+ u_quad_t ip6s_badvers; /* ip6 version != 6 */
+ u_quad_t ip6s_rawout; /* total raw ip packets generated */
+ u_quad_t ip6s_badscope; /* scope error */
+ u_quad_t ip6s_notmember; /* don't join this multicast group */
+ u_quad_t ip6s_nxthist[256]; /* next header history */
+ u_quad_t ip6s_m1; /* one mbuf */
+ u_quad_t ip6s_m2m[32]; /* two or more mbuf */
+ u_quad_t ip6s_mext1; /* one ext mbuf */
+ u_quad_t ip6s_mext2m; /* two or more ext mbuf */
+ u_quad_t ip6s_exthdrtoolong; /* ext hdr are not continuous */
+ u_quad_t ip6s_nogif; /* no match gif found */
+ u_quad_t ip6s_toomanyhdr; /* discarded due to too many headers */
+
+ /*
+ * statistics for improvement of the source address selection
+ * algorithm:
+ * XXX: hardcoded 16 = # of ip6 multicast scope types + 1
+ */
+ /* number of times that address selection fails */
+ u_quad_t ip6s_sources_none;
+ /* number of times that an address on the outgoing I/F is chosen */
+ u_quad_t ip6s_sources_sameif[16];
+ /* number of times that an address on a non-outgoing I/F is chosen */
+ u_quad_t ip6s_sources_otherif[16];
+ /*
+ * number of times that an address that has the same scope
+ * from the destination is chosen.
+ */
+ u_quad_t ip6s_sources_samescope[16];
+ /*
+ * number of times that an address that has a different scope
+ * from the destination is chosen.
+ */
+ u_quad_t ip6s_sources_otherscope[16];
+ /* number of times that a deprecated address is chosen */
+ u_quad_t ip6s_sources_deprecated[16];
+
+ /* number of times that each rule of source selection is applied. */
+ u_quad_t ip6s_sources_rule[16];
+};
+
+#ifdef _KERNEL
+#define IP6STAT_ADD(name, val) V_ip6stat.name += (val)
+#define IP6STAT_SUB(name, val) V_ip6stat.name -= (val)
+#define IP6STAT_INC(name) IP6STAT_ADD(name, 1)
+#define IP6STAT_DEC(name) IP6STAT_SUB(name, 1)
+#endif
+
+#ifdef _KERNEL
+/*
+ * IPv6 onion peeling state.
+ * it will be initialized when we come into ip6_input().
+ * XXX do not make it a kitchen sink!
+ */
+struct ip6aux {
+ u_int32_t ip6a_flags;
+#define IP6A_SWAP 0x01 /* swapped home/care-of on packet */
+#define IP6A_HASEEN 0x02 /* HA was present */
+#define IP6A_BRUID 0x04 /* BR Unique Identifier was present */
+#define IP6A_RTALERTSEEN 0x08 /* rtalert present */
+
+ /* ip6.ip6_src */
+ struct in6_addr ip6a_careof; /* care-of address of the peer */
+ struct in6_addr ip6a_home; /* home address of the peer */
+ u_int16_t ip6a_bruid; /* BR unique identifier */
+
+ /* ip6.ip6_dst */
+ struct in6_ifaddr *ip6a_dstia6; /* my ifaddr that matches ip6_dst */
+
+ /* rtalert */
+ u_int16_t ip6a_rtalert; /* rtalert option value */
+
+ /*
+ * decapsulation history will be here.
+ * with IPsec it may not be accurate.
+ */
+};
+#endif
+
+#ifdef _KERNEL
+/* flags passed to ip6_output as last parameter */
+#define IPV6_UNSPECSRC 0x01 /* allow :: as the source address */
+#define IPV6_FORWARDING 0x02 /* most of IPv6 header exists */
+#define IPV6_MINMTU 0x04 /* use minimum MTU (IPV6_USE_MIN_MTU) */
+
+#ifdef __NO_STRICT_ALIGNMENT
+#define IP6_HDR_ALIGNED_P(ip) 1
+#else
+#define IP6_HDR_ALIGNED_P(ip) ((((intptr_t) (ip)) & 3) == 0)
+#endif
+
+VNET_DECLARE(struct ip6stat, ip6stat); /* statistics */
+VNET_DECLARE(int, ip6_defhlim); /* default hop limit */
+VNET_DECLARE(int, ip6_defmcasthlim); /* default multicast hop limit */
+VNET_DECLARE(int, ip6_forwarding); /* act as router? */
+VNET_DECLARE(int, ip6_use_deprecated); /* allow deprecated addr as source */
+VNET_DECLARE(int, ip6_rr_prune); /* router renumbering prefix
+ * walk list every 5 sec. */
+VNET_DECLARE(int, ip6_mcast_pmtu); /* enable pMTU discovery for multicast? */
+VNET_DECLARE(int, ip6_v6only);
+#define V_ip6stat VNET(ip6stat)
+#define V_ip6_defhlim VNET(ip6_defhlim)
+#define V_ip6_defmcasthlim VNET(ip6_defmcasthlim)
+#define V_ip6_forwarding VNET(ip6_forwarding)
+#define V_ip6_use_deprecated VNET(ip6_use_deprecated)
+#define V_ip6_rr_prune VNET(ip6_rr_prune)
+#define V_ip6_mcast_pmtu VNET(ip6_mcast_pmtu)
+#define V_ip6_v6only VNET(ip6_v6only)
+
+VNET_DECLARE(struct socket *, ip6_mrouter); /* multicast routing daemon */
+VNET_DECLARE(int, ip6_sendredirects); /* send IP redirects when forwarding? */
+VNET_DECLARE(int, ip6_maxfragpackets); /* Maximum packets in reassembly
+ * queue */
+VNET_DECLARE(int, ip6_maxfrags); /* Maximum fragments in reassembly
+ * queue */
+VNET_DECLARE(int, ip6_accept_rtadv); /* Acts as a host not a router */
+VNET_DECLARE(int, ip6_keepfaith); /* Firewall Aided Internet Translator */
+VNET_DECLARE(int, ip6_log_interval);
+VNET_DECLARE(time_t, ip6_log_time);
+VNET_DECLARE(int, ip6_hdrnestlimit); /* upper limit of # of extension
+ * headers */
+VNET_DECLARE(int, ip6_dad_count); /* DupAddrDetectionTransmits */
+#define V_ip6_mrouter VNET(ip6_mrouter)
+#define V_ip6_sendredirects VNET(ip6_sendredirects)
+#define V_ip6_maxfragpackets VNET(ip6_maxfragpackets)
+#define V_ip6_maxfrags VNET(ip6_maxfrags)
+#define V_ip6_accept_rtadv VNET(ip6_accept_rtadv)
+#define V_ip6_keepfaith VNET(ip6_keepfaith)
+#define V_ip6_log_interval VNET(ip6_log_interval)
+#define V_ip6_log_time VNET(ip6_log_time)
+#define V_ip6_hdrnestlimit VNET(ip6_hdrnestlimit)
+#define V_ip6_dad_count VNET(ip6_dad_count)
+
+VNET_DECLARE(int, ip6_auto_flowlabel);
+VNET_DECLARE(int, ip6_auto_linklocal);
+#define V_ip6_auto_flowlabel VNET(ip6_auto_flowlabel)
+#define V_ip6_auto_linklocal VNET(ip6_auto_linklocal)
+
+VNET_DECLARE(int, ip6_use_tempaddr); /* Whether to use temporary addresses */
+VNET_DECLARE(int, ip6_prefer_tempaddr); /* Whether to prefer temporary
+ * addresses in the source address
+ * selection */
+#define V_ip6_use_tempaddr VNET(ip6_use_tempaddr)
+#define V_ip6_prefer_tempaddr VNET(ip6_prefer_tempaddr)
+
+VNET_DECLARE(int, ip6_use_defzone); /* Whether to use the default scope
+ * zone when unspecified */
+#define V_ip6_use_defzone VNET(ip6_use_defzone)
+
+VNET_DECLARE (struct pfil_head, inet6_pfil_hook); /* packet filter hooks */
+#define V_inet6_pfil_hook VNET(inet6_pfil_hook)
+#ifdef IPSTEALTH
+VNET_DECLARE(int, ip6stealth);
+#define V_ip6stealth VNET(ip6stealth)
+#endif
+
+extern struct pr_usrreqs rip6_usrreqs;
+struct sockopt;
+
+struct inpcb;
+
+int icmp6_ctloutput __P((struct socket *, struct sockopt *sopt));
+
+struct in6_ifaddr;
+void ip6_init __P((void));
+#ifdef VIMAGE
+void ip6_destroy __P((void));
+#endif
+int ip6proto_register(short);
+int ip6proto_unregister(short);
+
+void ip6_input __P((struct mbuf *));
+struct in6_ifaddr *ip6_getdstifaddr __P((struct mbuf *));
+void ip6_freepcbopts __P((struct ip6_pktopts *));
+
+int ip6_unknown_opt __P((u_int8_t *, struct mbuf *, int));
+char * ip6_get_prevhdr __P((struct mbuf *, int));
+int ip6_nexthdr __P((struct mbuf *, int, int, int *));
+int ip6_lasthdr __P((struct mbuf *, int, int, int *));
+
+struct ip6aux *ip6_addaux __P((struct mbuf *));
+struct ip6aux *ip6_findaux __P((struct mbuf *));
+void ip6_delaux __P((struct mbuf *));
+
+extern int (*ip6_mforward)(struct ip6_hdr *, struct ifnet *,
+ struct mbuf *);
+
+int ip6_process_hopopts __P((struct mbuf *, u_int8_t *, int, u_int32_t *,
+ u_int32_t *));
+struct mbuf **ip6_savecontrol_v4(struct inpcb *, struct mbuf *,
+ struct mbuf **, int *);
+void ip6_savecontrol __P((struct inpcb *, struct mbuf *, struct mbuf **));
+void ip6_notify_pmtu __P((struct inpcb *, struct sockaddr_in6 *,
+ u_int32_t *));
+int ip6_sysctl __P((int *, u_int, void *, size_t *, void *, size_t));
+
+void ip6_forward __P((struct mbuf *, int));
+
+void ip6_mloopback __P((struct ifnet *, struct mbuf *, struct sockaddr_in6 *));
+int ip6_output __P((struct mbuf *, struct ip6_pktopts *,
+ struct route_in6 *,
+ int,
+ struct ip6_moptions *, struct ifnet **,
+ struct inpcb *));
+int ip6_ctloutput __P((struct socket *, struct sockopt *));
+int ip6_raw_ctloutput __P((struct socket *, struct sockopt *));
+void ip6_initpktopts __P((struct ip6_pktopts *));
+int ip6_setpktopts __P((struct mbuf *, struct ip6_pktopts *,
+ struct ip6_pktopts *, struct ucred *, int));
+void ip6_clearpktopts __P((struct ip6_pktopts *, int));
+struct ip6_pktopts *ip6_copypktopts __P((struct ip6_pktopts *, int));
+int ip6_optlen __P((struct inpcb *));
+
+int route6_input __P((struct mbuf **, int *, int));
+
+void frag6_init __P((void));
+int frag6_input __P((struct mbuf **, int *, int));
+void frag6_slowtimo __P((void));
+void frag6_drain __P((void));
+
+void rip6_init __P((void));
+int rip6_input __P((struct mbuf **, int *, int));
+void rip6_ctlinput __P((int, struct sockaddr *, void *));
+int rip6_ctloutput __P((struct socket *, struct sockopt *));
+int rip6_output __P((struct mbuf *, ...));
+int rip6_usrreq __P((struct socket *,
+ int, struct mbuf *, struct mbuf *, struct mbuf *, struct thread *));
+
+int dest6_input __P((struct mbuf **, int *, int));
+int none_input __P((struct mbuf **, int *, int));
+
+int in6_selectsrc(struct sockaddr_in6 *, struct ip6_pktopts *,
+ struct inpcb *inp, struct route_in6 *, struct ucred *cred,
+ struct ifnet **, struct in6_addr *);
+int in6_selectroute __P((struct sockaddr_in6 *, struct ip6_pktopts *,
+ struct ip6_moptions *, struct route_in6 *, struct ifnet **,
+ struct rtentry **));
+u_int32_t ip6_randomid __P((void));
+u_int32_t ip6_randomflowlabel __P((void));
+#endif /* _KERNEL */
+
+#endif /* !_NETINET6_IP6_VAR_HH_ */
diff --git a/rtems/freebsd/netinet6/ip6protosw.h b/rtems/freebsd/netinet6/ip6protosw.h
new file mode 100644
index 00000000..ebb38c8d
--- /dev/null
+++ b/rtems/freebsd/netinet6/ip6protosw.h
@@ -0,0 +1,148 @@
+/*-
+ * Copyright (C) 1995, 1996, 1997, and 1998 WIDE Project.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the project nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $KAME: ip6protosw.h,v 1.25 2001/09/26 06:13:03 keiichi Exp $
+ */
+
+/*-
+ * Copyright (c) 1982, 1986, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * @(#)protosw.h 8.1 (Berkeley) 6/2/93
+ * BSDI protosw.h,v 2.3 1996/10/11 16:02:40 pjd Exp
+ * $FreeBSD$
+ */
+
+#ifndef _NETINET6_IP6PROTOSW_HH_
+#define _NETINET6_IP6PROTOSW_HH_
+
+/*
+ * Protocol switch table for IPv6.
+ * All other definitions should refer to sys/protosw.h
+ */
+
+struct mbuf;
+struct sockaddr;
+struct socket;
+struct domain;
+struct thread;
+struct ip6_hdr;
+struct icmp6_hdr;
+struct in6_addr;
+struct pr_usrreqs;
+
+/*
+ * argument type for the last arg of pr_ctlinput().
+ * should be consulted only with AF_INET6 family.
+ *
+ * IPv6 ICMP IPv6 [exthdrs] finalhdr payload
+ * ^ ^ ^ ^
+ * | | ip6c_ip6 ip6c_off
+ * | ip6c_icmp6
+ * ip6c_m
+ *
+ * ip6c_finaldst usually points to ip6c_ip6->ip6_dst. if the original
+ * (internal) packet carries a routing header, it may point the final
+ * dstination address in the routing header.
+ *
+ * ip6c_src: ip6c_ip6->ip6_src + scope info + flowlabel in ip6c_ip6
+ * (beware of flowlabel, if you try to compare it against others)
+ * ip6c_dst: ip6c_finaldst + scope info
+ */
+struct ip6ctlparam {
+ struct mbuf *ip6c_m; /* start of mbuf chain */
+ struct icmp6_hdr *ip6c_icmp6; /* icmp6 header of target packet */
+ struct ip6_hdr *ip6c_ip6; /* ip6 header of target packet */
+ int ip6c_off; /* offset of the target proto header */
+ struct sockaddr_in6 *ip6c_src; /* srcaddr w/ additional info */
+ struct sockaddr_in6 *ip6c_dst; /* (final) dstaddr w/ additional info */
+ struct in6_addr *ip6c_finaldst; /* final destination address */
+ void *ip6c_cmdarg; /* control command dependent data */
+ u_int8_t ip6c_nxt; /* final next header field */
+};
+
+struct ip6protosw {
+ short pr_type; /* socket type used for */
+ struct domain *pr_domain; /* domain protocol a member of */
+ short pr_protocol; /* protocol number */
+ short pr_flags; /* see below */
+
+/* protocol-protocol hooks */
+ int (*pr_input) /* input to protocol (from below) */
+ __P((struct mbuf **, int *, int));
+ int (*pr_output) /* output to protocol (from above) */
+ __P((struct mbuf *, ...));
+ void (*pr_ctlinput) /* control input (from below) */
+ __P((int, struct sockaddr *, void *));
+ int (*pr_ctloutput) /* control output (from above) */
+ __P((struct socket *, struct sockopt *));
+
+/* utility hooks */
+ void (*pr_init) /* initialization hook */
+ __P((void));
+ void (*pr_destroy) /* cleanup hook */
+ __P((void));
+
+ void (*pr_fasttimo) /* fast timeout (200ms) */
+ __P((void));
+ void (*pr_slowtimo) /* slow timeout (500ms) */
+ __P((void));
+ void (*pr_drain) /* flush any excess space possible */
+ __P((void));
+ struct pr_usrreqs *pr_usrreqs; /* supersedes pr_usrreq() */
+};
+
+#ifdef _KERNEL
+extern struct ip6protosw inet6sw[];
+#endif
+
+#endif /* !_NETINET6_IP6PROTOSW_HH_ */
diff --git a/rtems/freebsd/netinet6/mld6.c b/rtems/freebsd/netinet6/mld6.c
new file mode 100644
index 00000000..cf3b930c
--- /dev/null
+++ b/rtems/freebsd/netinet6/mld6.c
@@ -0,0 +1,3311 @@
+#include <rtems/freebsd/machine/rtems-bsd-config.h>
+
+/*-
+ * Copyright (c) 2009 Bruce Simpson.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote
+ * products derived from this software without specific prior written
+ * permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $KAME: mld6.c,v 1.27 2001/04/04 05:17:30 itojun Exp $
+ */
+
+/*-
+ * Copyright (c) 1988 Stephen Deering.
+ * Copyright (c) 1992, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * This code is derived from software contributed to Berkeley by
+ * Stephen Deering of Stanford University.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * @(#)igmp.c 8.1 (Berkeley) 7/19/93
+ */
+
+#include <rtems/freebsd/sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <rtems/freebsd/local/opt_inet.h>
+#include <rtems/freebsd/local/opt_inet6.h>
+
+#include <rtems/freebsd/sys/param.h>
+#include <rtems/freebsd/sys/systm.h>
+#include <rtems/freebsd/sys/mbuf.h>
+#include <rtems/freebsd/sys/socket.h>
+#include <rtems/freebsd/sys/protosw.h>
+#include <rtems/freebsd/sys/sysctl.h>
+#include <rtems/freebsd/sys/kernel.h>
+#include <rtems/freebsd/sys/callout.h>
+#include <rtems/freebsd/sys/malloc.h>
+#include <rtems/freebsd/sys/module.h>
+#include <rtems/freebsd/sys/ktr.h>
+
+#include <rtems/freebsd/net/if.h>
+#include <rtems/freebsd/net/route.h>
+#include <rtems/freebsd/net/vnet.h>
+
+#include <rtems/freebsd/netinet/in.h>
+#include <rtems/freebsd/netinet/in_var.h>
+#include <rtems/freebsd/netinet6/in6_var.h>
+#include <rtems/freebsd/netinet/ip6.h>
+#include <rtems/freebsd/netinet6/ip6_var.h>
+#include <rtems/freebsd/netinet6/scope6_var.h>
+#include <rtems/freebsd/netinet/icmp6.h>
+#include <rtems/freebsd/netinet6/mld6.h>
+#include <rtems/freebsd/netinet6/mld6_var.h>
+
+#include <rtems/freebsd/security/mac/mac_framework.h>
+
+#ifndef KTR_MLD
+#define KTR_MLD KTR_INET6
+#endif
+
+static struct mld_ifinfo *
+ mli_alloc_locked(struct ifnet *);
+static void mli_delete_locked(const struct ifnet *);
+static void mld_dispatch_packet(struct mbuf *);
+static void mld_dispatch_queue(struct ifqueue *, int);
+static void mld_final_leave(struct in6_multi *, struct mld_ifinfo *);
+static void mld_fasttimo_vnet(void);
+static int mld_handle_state_change(struct in6_multi *,
+ struct mld_ifinfo *);
+static int mld_initial_join(struct in6_multi *, struct mld_ifinfo *,
+ const int);
+#ifdef KTR
+static char * mld_rec_type_to_str(const int);
+#endif
+static void mld_set_version(struct mld_ifinfo *, const int);
+static void mld_slowtimo_vnet(void);
+static int mld_v1_input_query(struct ifnet *, const struct ip6_hdr *,
+ /*const*/ struct mld_hdr *);
+static int mld_v1_input_report(struct ifnet *, const struct ip6_hdr *,
+ /*const*/ struct mld_hdr *);
+static void mld_v1_process_group_timer(struct in6_multi *, const int);
+static void mld_v1_process_querier_timers(struct mld_ifinfo *);
+static int mld_v1_transmit_report(struct in6_multi *, const int);
+static void mld_v1_update_group(struct in6_multi *, const int);
+static void mld_v2_cancel_link_timers(struct mld_ifinfo *);
+static void mld_v2_dispatch_general_query(struct mld_ifinfo *);
+static struct mbuf *
+ mld_v2_encap_report(struct ifnet *, struct mbuf *);
+static int mld_v2_enqueue_filter_change(struct ifqueue *,
+ struct in6_multi *);
+static int mld_v2_enqueue_group_record(struct ifqueue *,
+ struct in6_multi *, const int, const int, const int,
+ const int);
+static int mld_v2_input_query(struct ifnet *, const struct ip6_hdr *,
+ struct mbuf *, const int, const int);
+static int mld_v2_merge_state_changes(struct in6_multi *,
+ struct ifqueue *);
+static void mld_v2_process_group_timers(struct mld_ifinfo *,
+ struct ifqueue *, struct ifqueue *,
+ struct in6_multi *, const int);
+static int mld_v2_process_group_query(struct in6_multi *,
+ struct mld_ifinfo *mli, int, struct mbuf *, const int);
+static int sysctl_mld_gsr(SYSCTL_HANDLER_ARGS);
+static int sysctl_mld_ifinfo(SYSCTL_HANDLER_ARGS);
+
+/*
+ * Normative references: RFC 2710, RFC 3590, RFC 3810.
+ *
+ * Locking:
+ * * The MLD subsystem lock ends up being system-wide for the moment,
+ * but could be per-VIMAGE later on.
+ * * The permitted lock order is: IN6_MULTI_LOCK, MLD_LOCK, IF_ADDR_LOCK.
+ * Any may be taken independently; if any are held at the same
+ * time, the above lock order must be followed.
+ * * IN6_MULTI_LOCK covers in_multi.
+ * * MLD_LOCK covers per-link state and any global variables in this file.
+ * * IF_ADDR_LOCK covers if_multiaddrs, which is used for a variety of
+ * per-link state iterators.
+ *
+ * XXX LOR PREVENTION
+ * A special case for IPv6 is the in6_setscope() routine. ip6_output()
+ * will not accept an ifp; it wants an embedded scope ID, unlike
+ * ip_output(), which happily takes the ifp given to it. The embedded
+ * scope ID is only used by MLD to select the outgoing interface.
+ *
+ * During interface attach and detach, MLD will take MLD_LOCK *after*
+ * the IF_AFDATA_LOCK.
+ * As in6_setscope() takes IF_AFDATA_LOCK then SCOPE_LOCK, we can't call
+ * it with MLD_LOCK held without triggering an LOR. A netisr with indirect
+ * dispatch could work around this, but we'd rather not do that, as it
+ * can introduce other races.
+ *
+ * As such, we exploit the fact that the scope ID is just the interface
+ * index, and embed it in the IPv6 destination address accordingly.
+ * This is potentially NOT VALID for MLDv1 reports, as they
+ * are always sent to the multicast group itself; as MLDv2
+ * reports are always sent to ff02::16, this is not an issue
+ * when MLDv2 is in use.
+ *
+ * This does not however eliminate the LOR when ip6_output() itself
+ * calls in6_setscope() internally whilst MLD_LOCK is held. This will
+ * trigger a LOR warning in WITNESS when the ifnet is detached.
+ *
+ * The right answer is probably to make IF_AFDATA_LOCK an rwlock, given
+ * how it's used across the network stack. Here we're simply exploiting
+ * the fact that MLD runs at a similar layer in the stack to scope6.c.
+ *
+ * VIMAGE:
+ * * Each in6_multi corresponds to an ifp, and each ifp corresponds
+ * to a vnet in ifp->if_vnet.
+ */
+static struct mtx mld_mtx;
+MALLOC_DEFINE(M_MLD, "mld", "mld state");
+
+#define MLD_EMBEDSCOPE(pin6, zoneid) \
+ if (IN6_IS_SCOPE_LINKLOCAL(pin6) || \
+ IN6_IS_ADDR_MC_INTFACELOCAL(pin6)) \
+ (pin6)->s6_addr16[1] = htons((zoneid) & 0xFFFF) \
+
+/*
+ * VIMAGE-wide globals.
+ */
+static VNET_DEFINE(struct timeval, mld_gsrdelay) = {10, 0};
+static VNET_DEFINE(LIST_HEAD(, mld_ifinfo), mli_head);
+static VNET_DEFINE(int, interface_timers_running6);
+static VNET_DEFINE(int, state_change_timers_running6);
+static VNET_DEFINE(int, current_state_timers_running6);
+
+#define V_mld_gsrdelay VNET(mld_gsrdelay)
+#define V_mli_head VNET(mli_head)
+#define V_interface_timers_running6 VNET(interface_timers_running6)
+#define V_state_change_timers_running6 VNET(state_change_timers_running6)
+#define V_current_state_timers_running6 VNET(current_state_timers_running6)
+
+SYSCTL_DECL(_net_inet6); /* Note: Not in any common header. */
+
+SYSCTL_NODE(_net_inet6, OID_AUTO, mld, CTLFLAG_RW, 0,
+ "IPv6 Multicast Listener Discovery");
+
+/*
+ * Virtualized sysctls.
+ */
+SYSCTL_VNET_PROC(_net_inet6_mld, OID_AUTO, gsrdelay,
+ CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE,
+ &VNET_NAME(mld_gsrdelay.tv_sec), 0, sysctl_mld_gsr, "I",
+ "Rate limit for MLDv2 Group-and-Source queries in seconds");
+
+/*
+ * Non-virtualized sysctls.
+ */
+SYSCTL_NODE(_net_inet6_mld, OID_AUTO, ifinfo, CTLFLAG_RD | CTLFLAG_MPSAFE,
+ sysctl_mld_ifinfo, "Per-interface MLDv2 state");
+
+static int mld_v1enable = 1;
+SYSCTL_INT(_net_inet6_mld, OID_AUTO, v1enable, CTLFLAG_RW,
+ &mld_v1enable, 0, "Enable fallback to MLDv1");
+TUNABLE_INT("net.inet6.mld.v1enable", &mld_v1enable);
+
+static int mld_use_allow = 1;
+SYSCTL_INT(_net_inet6_mld, OID_AUTO, use_allow, CTLFLAG_RW,
+ &mld_use_allow, 0, "Use ALLOW/BLOCK for RFC 4604 SSM joins/leaves");
+TUNABLE_INT("net.inet6.mld.use_allow", &mld_use_allow);
+
+/*
+ * Packed Router Alert option structure declaration.
+ */
+struct mld_raopt {
+ struct ip6_hbh hbh;
+ struct ip6_opt pad;
+ struct ip6_opt_router ra;
+} __packed;
+
+/*
+ * Router Alert hop-by-hop option header.
+ */
+static struct mld_raopt mld_ra = {
+ .hbh = { 0, 0 },
+ .pad = { .ip6o_type = IP6OPT_PADN, 0 },
+ .ra = {
+ .ip6or_type = IP6OPT_ROUTER_ALERT,
+ .ip6or_len = IP6OPT_RTALERT_LEN - 2,
+ .ip6or_value[0] = ((IP6OPT_RTALERT_MLD >> 8) & 0xFF),
+ .ip6or_value[1] = (IP6OPT_RTALERT_MLD & 0xFF)
+ }
+};
+static struct ip6_pktopts mld_po;
+
+static __inline void
+mld_save_context(struct mbuf *m, struct ifnet *ifp)
+{
+
+#ifdef VIMAGE
+ m->m_pkthdr.header = ifp->if_vnet;
+#endif /* VIMAGE */
+ m->m_pkthdr.flowid = ifp->if_index;
+}
+
+static __inline void
+mld_scrub_context(struct mbuf *m)
+{
+
+ m->m_pkthdr.header = NULL;
+ m->m_pkthdr.flowid = 0;
+}
+
+/*
+ * Restore context from a queued output chain.
+ * Return saved ifindex.
+ *
+ * VIMAGE: The assertion is there to make sure that we
+ * actually called CURVNET_SET() with what's in the mbuf chain.
+ */
+static __inline uint32_t
+mld_restore_context(struct mbuf *m)
+{
+
+#if defined(VIMAGE) && defined(INVARIANTS)
+ KASSERT(curvnet == m->m_pkthdr.header,
+ ("%s: called when curvnet was not restored", __func__));
+#endif
+ return (m->m_pkthdr.flowid);
+}
+
+/*
+ * Retrieve or set threshold between group-source queries in seconds.
+ *
+ * VIMAGE: Assume curvnet set by caller.
+ * SMPng: NOTE: Serialized by MLD lock.
+ */
+static int
+sysctl_mld_gsr(SYSCTL_HANDLER_ARGS)
+{
+ int error;
+ int i;
+
+ error = sysctl_wire_old_buffer(req, sizeof(int));
+ if (error)
+ return (error);
+
+ MLD_LOCK();
+
+ i = V_mld_gsrdelay.tv_sec;
+
+ error = sysctl_handle_int(oidp, &i, 0, req);
+ if (error || !req->newptr)
+ goto out_locked;
+
+ if (i < -1 || i >= 60) {
+ error = EINVAL;
+ goto out_locked;
+ }
+
+ CTR2(KTR_MLD, "change mld_gsrdelay from %d to %d",
+ V_mld_gsrdelay.tv_sec, i);
+ V_mld_gsrdelay.tv_sec = i;
+
+out_locked:
+ MLD_UNLOCK();
+ return (error);
+}
+
+/*
+ * Expose struct mld_ifinfo to userland, keyed by ifindex.
+ * For use by ifmcstat(8).
+ *
+ * SMPng: NOTE: Does an unlocked ifindex space read.
+ * VIMAGE: Assume curvnet set by caller. The node handler itself
+ * is not directly virtualized.
+ */
+static int
+sysctl_mld_ifinfo(SYSCTL_HANDLER_ARGS)
+{
+ int *name;
+ int error;
+ u_int namelen;
+ struct ifnet *ifp;
+ struct mld_ifinfo *mli;
+
+ name = (int *)arg1;
+ namelen = arg2;
+
+ if (req->newptr != NULL)
+ return (EPERM);
+
+ if (namelen != 1)
+ return (EINVAL);
+
+ error = sysctl_wire_old_buffer(req, sizeof(struct mld_ifinfo));
+ if (error)
+ return (error);
+
+ IN6_MULTI_LOCK();
+ MLD_LOCK();
+
+ if (name[0] <= 0 || name[0] > V_if_index) {
+ error = ENOENT;
+ goto out_locked;
+ }
+
+ error = ENOENT;
+
+ ifp = ifnet_byindex(name[0]);
+ if (ifp == NULL)
+ goto out_locked;
+
+ LIST_FOREACH(mli, &V_mli_head, mli_link) {
+ if (ifp == mli->mli_ifp) {
+ error = SYSCTL_OUT(req, mli,
+ sizeof(struct mld_ifinfo));
+ break;
+ }
+ }
+
+out_locked:
+ MLD_UNLOCK();
+ IN6_MULTI_UNLOCK();
+ return (error);
+}
+
+/*
+ * Dispatch an entire queue of pending packet chains.
+ * VIMAGE: Assumes the vnet pointer has been set.
+ */
+static void
+mld_dispatch_queue(struct ifqueue *ifq, int limit)
+{
+ struct mbuf *m;
+
+ for (;;) {
+ _IF_DEQUEUE(ifq, m);
+ if (m == NULL)
+ break;
+ CTR3(KTR_MLD, "%s: dispatch %p from %p", __func__, ifq, m);
+ mld_dispatch_packet(m);
+ if (--limit == 0)
+ break;
+ }
+}
+
+/*
+ * Filter outgoing MLD report state by group.
+ *
+ * Reports are ALWAYS suppressed for ALL-HOSTS (ff02::1)
+ * and node-local addresses. However, kernel and socket consumers
+ * always embed the KAME scope ID in the address provided, so strip it
+ * when performing comparison.
+ * Note: This is not the same as the *multicast* scope.
+ *
+ * Return zero if the given group is one for which MLD reports
+ * should be suppressed, or non-zero if reports should be issued.
+ */
+static __inline int
+mld_is_addr_reported(const struct in6_addr *addr)
+{
+
+ KASSERT(IN6_IS_ADDR_MULTICAST(addr), ("%s: not multicast", __func__));
+
+ if (IPV6_ADDR_MC_SCOPE(addr) == IPV6_ADDR_SCOPE_NODELOCAL)
+ return (0);
+
+ if (IPV6_ADDR_MC_SCOPE(addr) == IPV6_ADDR_SCOPE_LINKLOCAL) {
+ struct in6_addr tmp = *addr;
+ in6_clearscope(&tmp);
+ if (IN6_ARE_ADDR_EQUAL(&tmp, &in6addr_linklocal_allnodes))
+ return (0);
+ }
+
+ return (1);
+}
+
+/*
+ * Attach MLD when PF_INET6 is attached to an interface.
+ *
+ * SMPng: Normally called with IF_AFDATA_LOCK held.
+ */
+struct mld_ifinfo *
+mld_domifattach(struct ifnet *ifp)
+{
+ struct mld_ifinfo *mli;
+
+ CTR3(KTR_MLD, "%s: called for ifp %p(%s)",
+ __func__, ifp, ifp->if_xname);
+
+ MLD_LOCK();
+
+ mli = mli_alloc_locked(ifp);
+ if (!(ifp->if_flags & IFF_MULTICAST))
+ mli->mli_flags |= MLIF_SILENT;
+ if (mld_use_allow)
+ mli->mli_flags |= MLIF_USEALLOW;
+
+ MLD_UNLOCK();
+
+ return (mli);
+}
+
+/*
+ * VIMAGE: assume curvnet set by caller.
+ */
+static struct mld_ifinfo *
+mli_alloc_locked(/*const*/ struct ifnet *ifp)
+{
+ struct mld_ifinfo *mli;
+
+ MLD_LOCK_ASSERT();
+
+ mli = malloc(sizeof(struct mld_ifinfo), M_MLD, M_NOWAIT|M_ZERO);
+ if (mli == NULL)
+ goto out;
+
+ mli->mli_ifp = ifp;
+ mli->mli_version = MLD_VERSION_2;
+ mli->mli_flags = 0;
+ mli->mli_rv = MLD_RV_INIT;
+ mli->mli_qi = MLD_QI_INIT;
+ mli->mli_qri = MLD_QRI_INIT;
+ mli->mli_uri = MLD_URI_INIT;
+
+ SLIST_INIT(&mli->mli_relinmhead);
+
+ /*
+ * Responses to general queries are subject to bounds.
+ */
+ IFQ_SET_MAXLEN(&mli->mli_gq, MLD_MAX_RESPONSE_PACKETS);
+
+ LIST_INSERT_HEAD(&V_mli_head, mli, mli_link);
+
+ CTR2(KTR_MLD, "allocate mld_ifinfo for ifp %p(%s)",
+ ifp, ifp->if_xname);
+
+out:
+ return (mli);
+}
+
+/*
+ * Hook for ifdetach.
+ *
+ * NOTE: Some finalization tasks need to run before the protocol domain
+ * is detached, but also before the link layer does its cleanup.
+ * Run before link-layer cleanup; cleanup groups, but do not free MLD state.
+ *
+ * SMPng: Caller must hold IN6_MULTI_LOCK().
+ * Must take IF_ADDR_LOCK() to cover if_multiaddrs iterator.
+ * XXX This routine is also bitten by unlocked ifma_protospec access.
+ */
+void
+mld_ifdetach(struct ifnet *ifp)
+{
+ struct mld_ifinfo *mli;
+ struct ifmultiaddr *ifma;
+ struct in6_multi *inm, *tinm;
+
+ CTR3(KTR_MLD, "%s: called for ifp %p(%s)", __func__, ifp,
+ ifp->if_xname);
+
+ IN6_MULTI_LOCK_ASSERT();
+ MLD_LOCK();
+
+ mli = MLD_IFINFO(ifp);
+ if (mli->mli_version == MLD_VERSION_2) {
+ IF_ADDR_LOCK(ifp);
+ TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
+ if (ifma->ifma_addr->sa_family != AF_INET6 ||
+ ifma->ifma_protospec == NULL)
+ continue;
+ inm = (struct in6_multi *)ifma->ifma_protospec;
+ if (inm->in6m_state == MLD_LEAVING_MEMBER) {
+ SLIST_INSERT_HEAD(&mli->mli_relinmhead,
+ inm, in6m_nrele);
+ }
+ in6m_clear_recorded(inm);
+ }
+ IF_ADDR_UNLOCK(ifp);
+ SLIST_FOREACH_SAFE(inm, &mli->mli_relinmhead, in6m_nrele,
+ tinm) {
+ SLIST_REMOVE_HEAD(&mli->mli_relinmhead, in6m_nrele);
+ in6m_release_locked(inm);
+ }
+ }
+
+ MLD_UNLOCK();
+}
+
+/*
+ * Hook for domifdetach.
+ * Runs after link-layer cleanup; free MLD state.
+ *
+ * SMPng: Normally called with IF_AFDATA_LOCK held.
+ */
+void
+mld_domifdetach(struct ifnet *ifp)
+{
+
+ CTR3(KTR_MLD, "%s: called for ifp %p(%s)",
+ __func__, ifp, ifp->if_xname);
+
+ MLD_LOCK();
+ mli_delete_locked(ifp);
+ MLD_UNLOCK();
+}
+
+static void
+mli_delete_locked(const struct ifnet *ifp)
+{
+ struct mld_ifinfo *mli, *tmli;
+
+ CTR3(KTR_MLD, "%s: freeing mld_ifinfo for ifp %p(%s)",
+ __func__, ifp, ifp->if_xname);
+
+ MLD_LOCK_ASSERT();
+
+ LIST_FOREACH_SAFE(mli, &V_mli_head, mli_link, tmli) {
+ if (mli->mli_ifp == ifp) {
+ /*
+ * Free deferred General Query responses.
+ */
+ _IF_DRAIN(&mli->mli_gq);
+
+ LIST_REMOVE(mli, mli_link);
+
+ KASSERT(SLIST_EMPTY(&mli->mli_relinmhead),
+ ("%s: there are dangling in_multi references",
+ __func__));
+
+ free(mli, M_MLD);
+ return;
+ }
+ }
+#ifdef INVARIANTS
+ panic("%s: mld_ifinfo not found for ifp %p\n", __func__, ifp);
+#endif
+}
+
+/*
+ * Process a received MLDv1 general or address-specific query.
+ * Assumes that the query header has been pulled up to sizeof(mld_hdr).
+ *
+ * NOTE: Can't be fully const correct as we temporarily embed scope ID in
+ * mld_addr. This is OK as we own the mbuf chain.
+ */
+static int
+mld_v1_input_query(struct ifnet *ifp, const struct ip6_hdr *ip6,
+ /*const*/ struct mld_hdr *mld)
+{
+ struct ifmultiaddr *ifma;
+ struct mld_ifinfo *mli;
+ struct in6_multi *inm;
+ int is_general_query;
+ uint16_t timer;
+#ifdef KTR
+ char ip6tbuf[INET6_ADDRSTRLEN];
+#endif
+
+ is_general_query = 0;
+
+ if (!mld_v1enable) {
+ CTR3(KTR_MLD, "ignore v1 query %s on ifp %p(%s)",
+ ip6_sprintf(ip6tbuf, &mld->mld_addr),
+ ifp, ifp->if_xname);
+ return (0);
+ }
+
+ /*
+ * RFC3810 Section 6.2: MLD queries must originate from
+ * a router's link-local address.
+ */
+ if (!IN6_IS_SCOPE_LINKLOCAL(&ip6->ip6_src)) {
+ CTR3(KTR_MLD, "ignore v1 query src %s on ifp %p(%s)",
+ ip6_sprintf(ip6tbuf, &ip6->ip6_src),
+ ifp, ifp->if_xname);
+ return (0);
+ }
+
+ /*
+ * Do address field validation upfront before we accept
+ * the query.
+ */
+ if (IN6_IS_ADDR_UNSPECIFIED(&mld->mld_addr)) {
+ /*
+ * MLDv1 General Query.
+ * If this was not sent to the all-nodes group, ignore it.
+ */
+ struct in6_addr dst;
+
+ dst = ip6->ip6_dst;
+ in6_clearscope(&dst);
+ if (!IN6_ARE_ADDR_EQUAL(&dst, &in6addr_linklocal_allnodes))
+ return (EINVAL);
+ is_general_query = 1;
+ } else {
+ /*
+ * Embed scope ID of receiving interface in MLD query for
+ * lookup whilst we don't hold other locks.
+ */
+ in6_setscope(&mld->mld_addr, ifp, NULL);
+ }
+
+ IN6_MULTI_LOCK();
+ MLD_LOCK();
+ IF_ADDR_LOCK(ifp);
+
+ /*
+ * Switch to MLDv1 host compatibility mode.
+ */
+ mli = MLD_IFINFO(ifp);
+ KASSERT(mli != NULL, ("%s: no mld_ifinfo for ifp %p", __func__, ifp));
+ mld_set_version(mli, MLD_VERSION_1);
+
+ timer = (ntohs(mld->mld_maxdelay) * PR_FASTHZ) / MLD_TIMER_SCALE;
+ if (timer == 0)
+ timer = 1;
+
+ if (is_general_query) {
+ /*
+ * For each reporting group joined on this
+ * interface, kick the report timer.
+ */
+ CTR2(KTR_MLD, "process v1 general query on ifp %p(%s)",
+ ifp, ifp->if_xname);
+ TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
+ if (ifma->ifma_addr->sa_family != AF_INET6 ||
+ ifma->ifma_protospec == NULL)
+ continue;
+ inm = (struct in6_multi *)ifma->ifma_protospec;
+ mld_v1_update_group(inm, timer);
+ }
+ } else {
+ /*
+ * MLDv1 Group-Specific Query.
+ * If this is a group-specific MLDv1 query, we need only
+ * look up the single group to process it.
+ */
+ inm = in6m_lookup_locked(ifp, &mld->mld_addr);
+ if (inm != NULL) {
+ CTR3(KTR_MLD, "process v1 query %s on ifp %p(%s)",
+ ip6_sprintf(ip6tbuf, &mld->mld_addr),
+ ifp, ifp->if_xname);
+ mld_v1_update_group(inm, timer);
+ }
+ /* XXX Clear embedded scope ID as userland won't expect it. */
+ in6_clearscope(&mld->mld_addr);
+ }
+
+ IF_ADDR_UNLOCK(ifp);
+ MLD_UNLOCK();
+ IN6_MULTI_UNLOCK();
+
+ return (0);
+}
+
+/*
+ * Update the report timer on a group in response to an MLDv1 query.
+ *
+ * If we are becoming the reporting member for this group, start the timer.
+ * If we already are the reporting member for this group, and timer is
+ * below the threshold, reset it.
+ *
+ * We may be updating the group for the first time since we switched
+ * to MLDv2. If we are, then we must clear any recorded source lists,
+ * and transition to REPORTING state; the group timer is overloaded
+ * for group and group-source query responses.
+ *
+ * Unlike MLDv2, the delay per group should be jittered
+ * to avoid bursts of MLDv1 reports.
+ */
+static void
+mld_v1_update_group(struct in6_multi *inm, const int timer)
+{
+#ifdef KTR
+ char ip6tbuf[INET6_ADDRSTRLEN];
+#endif
+
+ CTR4(KTR_MLD, "%s: %s/%s timer=%d", __func__,
+ ip6_sprintf(ip6tbuf, &inm->in6m_addr),
+ inm->in6m_ifp->if_xname, timer);
+
+ IN6_MULTI_LOCK_ASSERT();
+
+ switch (inm->in6m_state) {
+ case MLD_NOT_MEMBER:
+ case MLD_SILENT_MEMBER:
+ break;
+ case MLD_REPORTING_MEMBER:
+ if (inm->in6m_timer != 0 &&
+ inm->in6m_timer <= timer) {
+ CTR1(KTR_MLD, "%s: REPORTING and timer running, "
+ "skipping.", __func__);
+ break;
+ }
+ /* FALLTHROUGH */
+ case MLD_SG_QUERY_PENDING_MEMBER:
+ case MLD_G_QUERY_PENDING_MEMBER:
+ case MLD_IDLE_MEMBER:
+ case MLD_LAZY_MEMBER:
+ case MLD_AWAKENING_MEMBER:
+ CTR1(KTR_MLD, "%s: ->REPORTING", __func__);
+ inm->in6m_state = MLD_REPORTING_MEMBER;
+ inm->in6m_timer = MLD_RANDOM_DELAY(timer);
+ V_current_state_timers_running6 = 1;
+ break;
+ case MLD_SLEEPING_MEMBER:
+ CTR1(KTR_MLD, "%s: ->AWAKENING", __func__);
+ inm->in6m_state = MLD_AWAKENING_MEMBER;
+ break;
+ case MLD_LEAVING_MEMBER:
+ break;
+ }
+}
+
+/*
+ * Process a received MLDv2 general, group-specific or
+ * group-and-source-specific query.
+ *
+ * Assumes that the query header has been pulled up to sizeof(mldv2_query).
+ *
+ * Return 0 if successful, otherwise an appropriate error code is returned.
+ */
+static int
+mld_v2_input_query(struct ifnet *ifp, const struct ip6_hdr *ip6,
+ struct mbuf *m, const int off, const int icmp6len)
+{
+ struct mld_ifinfo *mli;
+ struct mldv2_query *mld;
+ struct in6_multi *inm;
+ uint32_t maxdelay, nsrc, qqi;
+ int is_general_query;
+ uint16_t timer;
+ uint8_t qrv;
+#ifdef KTR
+ char ip6tbuf[INET6_ADDRSTRLEN];
+#endif
+
+ is_general_query = 0;
+
+ /*
+ * RFC3810 Section 6.2: MLD queries must originate from
+ * a router's link-local address.
+ */
+ if (!IN6_IS_SCOPE_LINKLOCAL(&ip6->ip6_src)) {
+ CTR3(KTR_MLD, "ignore v1 query src %s on ifp %p(%s)",
+ ip6_sprintf(ip6tbuf, &ip6->ip6_src),
+ ifp, ifp->if_xname);
+ return (0);
+ }
+
+ CTR2(KTR_MLD, "input v2 query on ifp %p(%s)", ifp, ifp->if_xname);
+
+ mld = (struct mldv2_query *)(mtod(m, uint8_t *) + off);
+
+ maxdelay = ntohs(mld->mld_maxdelay); /* in 1/10ths of a second */
+ if (maxdelay >= 32678) {
+ maxdelay = (MLD_MRC_MANT(maxdelay) | 0x1000) <<
+ (MLD_MRC_EXP(maxdelay) + 3);
+ }
+ timer = (maxdelay * PR_FASTHZ) / MLD_TIMER_SCALE;
+ if (timer == 0)
+ timer = 1;
+
+ qrv = MLD_QRV(mld->mld_misc);
+ if (qrv < 2) {
+ CTR3(KTR_MLD, "%s: clamping qrv %d to %d", __func__,
+ qrv, MLD_RV_INIT);
+ qrv = MLD_RV_INIT;
+ }
+
+ qqi = mld->mld_qqi;
+ if (qqi >= 128) {
+ qqi = MLD_QQIC_MANT(mld->mld_qqi) <<
+ (MLD_QQIC_EXP(mld->mld_qqi) + 3);
+ }
+
+ nsrc = ntohs(mld->mld_numsrc);
+ if (nsrc > MLD_MAX_GS_SOURCES)
+ return (EMSGSIZE);
+ if (icmp6len < sizeof(struct mldv2_query) +
+ (nsrc * sizeof(struct in6_addr)))
+ return (EMSGSIZE);
+
+ /*
+ * Do further input validation upfront to avoid resetting timers
+ * should we need to discard this query.
+ */
+ if (IN6_IS_ADDR_UNSPECIFIED(&mld->mld_addr)) {
+ /*
+ * General Queries SHOULD be directed to ff02::1.
+ * A general query with a source list has undefined
+ * behaviour; discard it.
+ */
+ struct in6_addr dst;
+
+ dst = ip6->ip6_dst;
+ in6_clearscope(&dst);
+ if (!IN6_ARE_ADDR_EQUAL(&dst, &in6addr_linklocal_allnodes) ||
+ nsrc > 0)
+ return (EINVAL);
+ is_general_query = 1;
+ } else {
+ /*
+ * Embed scope ID of receiving interface in MLD query for
+ * lookup whilst we don't hold other locks (due to KAME
+ * locking lameness). We own this mbuf chain just now.
+ */
+ in6_setscope(&mld->mld_addr, ifp, NULL);
+ }
+
+ IN6_MULTI_LOCK();
+ MLD_LOCK();
+ IF_ADDR_LOCK(ifp);
+
+ mli = MLD_IFINFO(ifp);
+ KASSERT(mli != NULL, ("%s: no mld_ifinfo for ifp %p", __func__, ifp));
+
+ /*
+ * Discard the v2 query if we're in Compatibility Mode.
+ * The RFC is pretty clear that hosts need to stay in MLDv1 mode
+ * until the Old Version Querier Present timer expires.
+ */
+ if (mli->mli_version != MLD_VERSION_2)
+ goto out_locked;
+
+ mld_set_version(mli, MLD_VERSION_2);
+ mli->mli_rv = qrv;
+ mli->mli_qi = qqi;
+ mli->mli_qri = maxdelay;
+
+ CTR4(KTR_MLD, "%s: qrv %d qi %d maxdelay %d", __func__, qrv, qqi,
+ maxdelay);
+
+ if (is_general_query) {
+ /*
+ * MLDv2 General Query.
+ *
+ * Schedule a current-state report on this ifp for
+ * all groups, possibly containing source lists.
+ *
+ * If there is a pending General Query response
+ * scheduled earlier than the selected delay, do
+ * not schedule any other reports.
+ * Otherwise, reset the interface timer.
+ */
+ CTR2(KTR_MLD, "process v2 general query on ifp %p(%s)",
+ ifp, ifp->if_xname);
+ if (mli->mli_v2_timer == 0 || mli->mli_v2_timer >= timer) {
+ mli->mli_v2_timer = MLD_RANDOM_DELAY(timer);
+ V_interface_timers_running6 = 1;
+ }
+ } else {
+ /*
+ * MLDv2 Group-specific or Group-and-source-specific Query.
+ *
+ * Group-source-specific queries are throttled on
+ * a per-group basis to defeat denial-of-service attempts.
+ * Queries for groups we are not a member of on this
+ * link are simply ignored.
+ */
+ inm = in6m_lookup_locked(ifp, &mld->mld_addr);
+ if (inm == NULL)
+ goto out_locked;
+ if (nsrc > 0) {
+ if (!ratecheck(&inm->in6m_lastgsrtv,
+ &V_mld_gsrdelay)) {
+ CTR1(KTR_MLD, "%s: GS query throttled.",
+ __func__);
+ goto out_locked;
+ }
+ }
+ CTR2(KTR_MLD, "process v2 group query on ifp %p(%s)",
+ ifp, ifp->if_xname);
+ /*
+ * If there is a pending General Query response
+ * scheduled sooner than the selected delay, no
+ * further report need be scheduled.
+ * Otherwise, prepare to respond to the
+ * group-specific or group-and-source query.
+ */
+ if (mli->mli_v2_timer == 0 || mli->mli_v2_timer >= timer)
+ mld_v2_process_group_query(inm, mli, timer, m, off);
+
+ /* XXX Clear embedded scope ID as userland won't expect it. */
+ in6_clearscope(&mld->mld_addr);
+ }
+
+out_locked:
+ IF_ADDR_UNLOCK(ifp);
+ MLD_UNLOCK();
+ IN6_MULTI_UNLOCK();
+
+ return (0);
+}
+
+/*
+ * Process a recieved MLDv2 group-specific or group-and-source-specific
+ * query.
+ * Return <0 if any error occured. Currently this is ignored.
+ */
+static int
+mld_v2_process_group_query(struct in6_multi *inm, struct mld_ifinfo *mli,
+ int timer, struct mbuf *m0, const int off)
+{
+ struct mldv2_query *mld;
+ int retval;
+ uint16_t nsrc;
+
+ IN6_MULTI_LOCK_ASSERT();
+ MLD_LOCK_ASSERT();
+
+ retval = 0;
+ mld = (struct mldv2_query *)(mtod(m0, uint8_t *) + off);
+
+ switch (inm->in6m_state) {
+ case MLD_NOT_MEMBER:
+ case MLD_SILENT_MEMBER:
+ case MLD_SLEEPING_MEMBER:
+ case MLD_LAZY_MEMBER:
+ case MLD_AWAKENING_MEMBER:
+ case MLD_IDLE_MEMBER:
+ case MLD_LEAVING_MEMBER:
+ return (retval);
+ break;
+ case MLD_REPORTING_MEMBER:
+ case MLD_G_QUERY_PENDING_MEMBER:
+ case MLD_SG_QUERY_PENDING_MEMBER:
+ break;
+ }
+
+ nsrc = ntohs(mld->mld_numsrc);
+
+ /*
+ * Deal with group-specific queries upfront.
+ * If any group query is already pending, purge any recorded
+ * source-list state if it exists, and schedule a query response
+ * for this group-specific query.
+ */
+ if (nsrc == 0) {
+ if (inm->in6m_state == MLD_G_QUERY_PENDING_MEMBER ||
+ inm->in6m_state == MLD_SG_QUERY_PENDING_MEMBER) {
+ in6m_clear_recorded(inm);
+ timer = min(inm->in6m_timer, timer);
+ }
+ inm->in6m_state = MLD_G_QUERY_PENDING_MEMBER;
+ inm->in6m_timer = MLD_RANDOM_DELAY(timer);
+ V_current_state_timers_running6 = 1;
+ return (retval);
+ }
+
+ /*
+ * Deal with the case where a group-and-source-specific query has
+ * been received but a group-specific query is already pending.
+ */
+ if (inm->in6m_state == MLD_G_QUERY_PENDING_MEMBER) {
+ timer = min(inm->in6m_timer, timer);
+ inm->in6m_timer = MLD_RANDOM_DELAY(timer);
+ V_current_state_timers_running6 = 1;
+ return (retval);
+ }
+
+ /*
+ * Finally, deal with the case where a group-and-source-specific
+ * query has been received, where a response to a previous g-s-r
+ * query exists, or none exists.
+ * In this case, we need to parse the source-list which the Querier
+ * has provided us with and check if we have any source list filter
+ * entries at T1 for these sources. If we do not, there is no need
+ * schedule a report and the query may be dropped.
+ * If we do, we must record them and schedule a current-state
+ * report for those sources.
+ */
+ if (inm->in6m_nsrc > 0) {
+ struct mbuf *m;
+ uint8_t *sp;
+ int i, nrecorded;
+ int soff;
+
+ m = m0;
+ soff = off + sizeof(struct mldv2_query);
+ nrecorded = 0;
+ for (i = 0; i < nsrc; i++) {
+ sp = mtod(m, uint8_t *) + soff;
+ retval = in6m_record_source(inm,
+ (const struct in6_addr *)sp);
+ if (retval < 0)
+ break;
+ nrecorded += retval;
+ soff += sizeof(struct in6_addr);
+ if (soff >= m->m_len) {
+ soff = soff - m->m_len;
+ m = m->m_next;
+ if (m == NULL)
+ break;
+ }
+ }
+ if (nrecorded > 0) {
+ CTR1(KTR_MLD,
+ "%s: schedule response to SG query", __func__);
+ inm->in6m_state = MLD_SG_QUERY_PENDING_MEMBER;
+ inm->in6m_timer = MLD_RANDOM_DELAY(timer);
+ V_current_state_timers_running6 = 1;
+ }
+ }
+
+ return (retval);
+}
+
+/*
+ * Process a received MLDv1 host membership report.
+ * Assumes mld points to mld_hdr in pulled up mbuf chain.
+ *
+ * NOTE: Can't be fully const correct as we temporarily embed scope ID in
+ * mld_addr. This is OK as we own the mbuf chain.
+ */
+static int
+mld_v1_input_report(struct ifnet *ifp, const struct ip6_hdr *ip6,
+ /*const*/ struct mld_hdr *mld)
+{
+ struct in6_addr src, dst;
+ struct in6_ifaddr *ia;
+ struct in6_multi *inm;
+#ifdef KTR
+ char ip6tbuf[INET6_ADDRSTRLEN];
+#endif
+
+ if (!mld_v1enable) {
+ CTR3(KTR_MLD, "ignore v1 report %s on ifp %p(%s)",
+ ip6_sprintf(ip6tbuf, &mld->mld_addr),
+ ifp, ifp->if_xname);
+ return (0);
+ }
+
+ if (ifp->if_flags & IFF_LOOPBACK)
+ return (0);
+
+ /*
+ * MLDv1 reports must originate from a host's link-local address,
+ * or the unspecified address (when booting).
+ */
+ src = ip6->ip6_src;
+ in6_clearscope(&src);
+ if (!IN6_IS_SCOPE_LINKLOCAL(&src) && !IN6_IS_ADDR_UNSPECIFIED(&src)) {
+ CTR3(KTR_MLD, "ignore v1 query src %s on ifp %p(%s)",
+ ip6_sprintf(ip6tbuf, &ip6->ip6_src),
+ ifp, ifp->if_xname);
+ return (EINVAL);
+ }
+
+ /*
+ * RFC2710 Section 4: MLDv1 reports must pertain to a multicast
+ * group, and must be directed to the group itself.
+ */
+ dst = ip6->ip6_dst;
+ in6_clearscope(&dst);
+ if (!IN6_IS_ADDR_MULTICAST(&mld->mld_addr) ||
+ !IN6_ARE_ADDR_EQUAL(&mld->mld_addr, &dst)) {
+ CTR3(KTR_MLD, "ignore v1 query dst %s on ifp %p(%s)",
+ ip6_sprintf(ip6tbuf, &ip6->ip6_dst),
+ ifp, ifp->if_xname);
+ return (EINVAL);
+ }
+
+ /*
+ * Make sure we don't hear our own membership report, as fast
+ * leave requires knowing that we are the only member of a
+ * group. Assume we used the link-local address if available,
+ * otherwise look for ::.
+ *
+ * XXX Note that scope ID comparison is needed for the address
+ * returned by in6ifa_ifpforlinklocal(), but SHOULD NOT be
+ * performed for the on-wire address.
+ */
+ ia = in6ifa_ifpforlinklocal(ifp, IN6_IFF_NOTREADY|IN6_IFF_ANYCAST);
+ if ((ia && IN6_ARE_ADDR_EQUAL(&ip6->ip6_src, IA6_IN6(ia))) ||
+ (ia == NULL && IN6_IS_ADDR_UNSPECIFIED(&src))) {
+ if (ia != NULL)
+ ifa_free(&ia->ia_ifa);
+ return (0);
+ }
+ if (ia != NULL)
+ ifa_free(&ia->ia_ifa);
+
+ CTR3(KTR_MLD, "process v1 report %s on ifp %p(%s)",
+ ip6_sprintf(ip6tbuf, &mld->mld_addr), ifp, ifp->if_xname);
+
+ /*
+ * Embed scope ID of receiving interface in MLD query for lookup
+ * whilst we don't hold other locks (due to KAME locking lameness).
+ */
+ if (!IN6_IS_ADDR_UNSPECIFIED(&mld->mld_addr))
+ in6_setscope(&mld->mld_addr, ifp, NULL);
+
+ IN6_MULTI_LOCK();
+ MLD_LOCK();
+ IF_ADDR_LOCK(ifp);
+
+ /*
+ * MLDv1 report suppression.
+ * If we are a member of this group, and our membership should be
+ * reported, and our group timer is pending or about to be reset,
+ * stop our group timer by transitioning to the 'lazy' state.
+ */
+ inm = in6m_lookup_locked(ifp, &mld->mld_addr);
+ if (inm != NULL) {
+ struct mld_ifinfo *mli;
+
+ mli = inm->in6m_mli;
+ KASSERT(mli != NULL,
+ ("%s: no mli for ifp %p", __func__, ifp));
+
+ /*
+ * If we are in MLDv2 host mode, do not allow the
+ * other host's MLDv1 report to suppress our reports.
+ */
+ if (mli->mli_version == MLD_VERSION_2)
+ goto out_locked;
+
+ inm->in6m_timer = 0;
+
+ switch (inm->in6m_state) {
+ case MLD_NOT_MEMBER:
+ case MLD_SILENT_MEMBER:
+ case MLD_SLEEPING_MEMBER:
+ break;
+ case MLD_REPORTING_MEMBER:
+ case MLD_IDLE_MEMBER:
+ case MLD_AWAKENING_MEMBER:
+ CTR3(KTR_MLD,
+ "report suppressed for %s on ifp %p(%s)",
+ ip6_sprintf(ip6tbuf, &mld->mld_addr),
+ ifp, ifp->if_xname);
+ case MLD_LAZY_MEMBER:
+ inm->in6m_state = MLD_LAZY_MEMBER;
+ break;
+ case MLD_G_QUERY_PENDING_MEMBER:
+ case MLD_SG_QUERY_PENDING_MEMBER:
+ case MLD_LEAVING_MEMBER:
+ break;
+ }
+ }
+
+out_locked:
+ MLD_UNLOCK();
+ IF_ADDR_UNLOCK(ifp);
+ IN6_MULTI_UNLOCK();
+
+ /* XXX Clear embedded scope ID as userland won't expect it. */
+ in6_clearscope(&mld->mld_addr);
+
+ return (0);
+}
+
+/*
+ * MLD input path.
+ *
+ * Assume query messages which fit in a single ICMPv6 message header
+ * have been pulled up.
+ * Assume that userland will want to see the message, even if it
+ * otherwise fails kernel input validation; do not free it.
+ * Pullup may however free the mbuf chain m if it fails.
+ *
+ * Return IPPROTO_DONE if we freed m. Otherwise, return 0.
+ */
+int
+mld_input(struct mbuf *m, int off, int icmp6len)
+{
+ struct ifnet *ifp;
+ struct ip6_hdr *ip6;
+ struct mld_hdr *mld;
+ int mldlen;
+
+ CTR3(KTR_MLD, "%s: called w/mbuf (%p,%d)", __func__, m, off);
+
+ ifp = m->m_pkthdr.rcvif;
+
+ ip6 = mtod(m, struct ip6_hdr *);
+
+ /* Pullup to appropriate size. */
+ mld = (struct mld_hdr *)(mtod(m, uint8_t *) + off);
+ if (mld->mld_type == MLD_LISTENER_QUERY &&
+ icmp6len >= sizeof(struct mldv2_query)) {
+ mldlen = sizeof(struct mldv2_query);
+ } else {
+ mldlen = sizeof(struct mld_hdr);
+ }
+ IP6_EXTHDR_GET(mld, struct mld_hdr *, m, off, mldlen);
+ if (mld == NULL) {
+ ICMP6STAT_INC(icp6s_badlen);
+ return (IPPROTO_DONE);
+ }
+
+ /*
+ * Userland needs to see all of this traffic for implementing
+ * the endpoint discovery portion of multicast routing.
+ */
+ switch (mld->mld_type) {
+ case MLD_LISTENER_QUERY:
+ icmp6_ifstat_inc(ifp, ifs6_in_mldquery);
+ if (icmp6len == sizeof(struct mld_hdr)) {
+ if (mld_v1_input_query(ifp, ip6, mld) != 0)
+ return (0);
+ } else if (icmp6len >= sizeof(struct mldv2_query)) {
+ if (mld_v2_input_query(ifp, ip6, m, off,
+ icmp6len) != 0)
+ return (0);
+ }
+ break;
+ case MLD_LISTENER_REPORT:
+ icmp6_ifstat_inc(ifp, ifs6_in_mldreport);
+ if (mld_v1_input_report(ifp, ip6, mld) != 0)
+ return (0);
+ break;
+ case MLDV2_LISTENER_REPORT:
+ icmp6_ifstat_inc(ifp, ifs6_in_mldreport);
+ break;
+ case MLD_LISTENER_DONE:
+ icmp6_ifstat_inc(ifp, ifs6_in_mlddone);
+ break;
+ default:
+ break;
+ }
+
+ return (0);
+}
+
+/*
+ * Fast timeout handler (global).
+ * VIMAGE: Timeout handlers are expected to service all vimages.
+ */
+void
+mld_fasttimo(void)
+{
+ VNET_ITERATOR_DECL(vnet_iter);
+
+ VNET_LIST_RLOCK_NOSLEEP();
+ VNET_FOREACH(vnet_iter) {
+ CURVNET_SET(vnet_iter);
+ mld_fasttimo_vnet();
+ CURVNET_RESTORE();
+ }
+ VNET_LIST_RUNLOCK_NOSLEEP();
+}
+
+/*
+ * Fast timeout handler (per-vnet).
+ *
+ * VIMAGE: Assume caller has set up our curvnet.
+ */
+static void
+mld_fasttimo_vnet(void)
+{
+ struct ifqueue scq; /* State-change packets */
+ struct ifqueue qrq; /* Query response packets */
+ struct ifnet *ifp;
+ struct mld_ifinfo *mli;
+ struct ifmultiaddr *ifma, *tifma;
+ struct in6_multi *inm;
+ int uri_fasthz;
+
+ uri_fasthz = 0;
+
+ /*
+ * Quick check to see if any work needs to be done, in order to
+ * minimize the overhead of fasttimo processing.
+ * SMPng: XXX Unlocked reads.
+ */
+ if (!V_current_state_timers_running6 &&
+ !V_interface_timers_running6 &&
+ !V_state_change_timers_running6)
+ return;
+
+ IN6_MULTI_LOCK();
+ MLD_LOCK();
+
+ /*
+ * MLDv2 General Query response timer processing.
+ */
+ if (V_interface_timers_running6) {
+ CTR1(KTR_MLD, "%s: interface timers running", __func__);
+
+ V_interface_timers_running6 = 0;
+ LIST_FOREACH(mli, &V_mli_head, mli_link) {
+ if (mli->mli_v2_timer == 0) {
+ /* Do nothing. */
+ } else if (--mli->mli_v2_timer == 0) {
+ mld_v2_dispatch_general_query(mli);
+ } else {
+ V_interface_timers_running6 = 1;
+ }
+ }
+ }
+
+ if (!V_current_state_timers_running6 &&
+ !V_state_change_timers_running6)
+ goto out_locked;
+
+ V_current_state_timers_running6 = 0;
+ V_state_change_timers_running6 = 0;
+
+ CTR1(KTR_MLD, "%s: state change timers running", __func__);
+
+ /*
+ * MLD host report and state-change timer processing.
+ * Note: Processing a v2 group timer may remove a node.
+ */
+ LIST_FOREACH(mli, &V_mli_head, mli_link) {
+ ifp = mli->mli_ifp;
+
+ if (mli->mli_version == MLD_VERSION_2) {
+ uri_fasthz = MLD_RANDOM_DELAY(mli->mli_uri *
+ PR_FASTHZ);
+
+ memset(&qrq, 0, sizeof(struct ifqueue));
+ IFQ_SET_MAXLEN(&qrq, MLD_MAX_G_GS_PACKETS);
+
+ memset(&scq, 0, sizeof(struct ifqueue));
+ IFQ_SET_MAXLEN(&scq, MLD_MAX_STATE_CHANGE_PACKETS);
+ }
+
+ IF_ADDR_LOCK(ifp);
+ TAILQ_FOREACH_SAFE(ifma, &ifp->if_multiaddrs, ifma_link,
+ tifma) {
+ if (ifma->ifma_addr->sa_family != AF_INET6 ||
+ ifma->ifma_protospec == NULL)
+ continue;
+ inm = (struct in6_multi *)ifma->ifma_protospec;
+ switch (mli->mli_version) {
+ case MLD_VERSION_1:
+ /*
+ * XXX Drop IF_ADDR lock temporarily to
+ * avoid recursion caused by a potential
+ * call by in6ifa_ifpforlinklocal().
+ * rwlock candidate?
+ */
+ IF_ADDR_UNLOCK(ifp);
+ mld_v1_process_group_timer(inm,
+ mli->mli_version);
+ IF_ADDR_LOCK(ifp);
+ break;
+ case MLD_VERSION_2:
+ mld_v2_process_group_timers(mli, &qrq,
+ &scq, inm, uri_fasthz);
+ break;
+ }
+ }
+ IF_ADDR_UNLOCK(ifp);
+
+ if (mli->mli_version == MLD_VERSION_2) {
+ struct in6_multi *tinm;
+
+ mld_dispatch_queue(&qrq, 0);
+ mld_dispatch_queue(&scq, 0);
+
+ /*
+ * Free the in_multi reference(s) for
+ * this lifecycle.
+ */
+ SLIST_FOREACH_SAFE(inm, &mli->mli_relinmhead,
+ in6m_nrele, tinm) {
+ SLIST_REMOVE_HEAD(&mli->mli_relinmhead,
+ in6m_nrele);
+ in6m_release_locked(inm);
+ }
+ }
+ }
+
+out_locked:
+ MLD_UNLOCK();
+ IN6_MULTI_UNLOCK();
+}
+
+/*
+ * Update host report group timer.
+ * Will update the global pending timer flags.
+ */
+static void
+mld_v1_process_group_timer(struct in6_multi *inm, const int version)
+{
+ int report_timer_expired;
+
+ IN6_MULTI_LOCK_ASSERT();
+ MLD_LOCK_ASSERT();
+
+ if (inm->in6m_timer == 0) {
+ report_timer_expired = 0;
+ } else if (--inm->in6m_timer == 0) {
+ report_timer_expired = 1;
+ } else {
+ V_current_state_timers_running6 = 1;
+ return;
+ }
+
+ switch (inm->in6m_state) {
+ case MLD_NOT_MEMBER:
+ case MLD_SILENT_MEMBER:
+ case MLD_IDLE_MEMBER:
+ case MLD_LAZY_MEMBER:
+ case MLD_SLEEPING_MEMBER:
+ case MLD_AWAKENING_MEMBER:
+ break;
+ case MLD_REPORTING_MEMBER:
+ if (report_timer_expired) {
+ inm->in6m_state = MLD_IDLE_MEMBER;
+ (void)mld_v1_transmit_report(inm,
+ MLD_LISTENER_REPORT);
+ }
+ break;
+ case MLD_G_QUERY_PENDING_MEMBER:
+ case MLD_SG_QUERY_PENDING_MEMBER:
+ case MLD_LEAVING_MEMBER:
+ break;
+ }
+}
+
+/*
+ * Update a group's timers for MLDv2.
+ * Will update the global pending timer flags.
+ * Note: Unlocked read from mli.
+ */
+static void
+mld_v2_process_group_timers(struct mld_ifinfo *mli,
+ struct ifqueue *qrq, struct ifqueue *scq,
+ struct in6_multi *inm, const int uri_fasthz)
+{
+ int query_response_timer_expired;
+ int state_change_retransmit_timer_expired;
+#ifdef KTR
+ char ip6tbuf[INET6_ADDRSTRLEN];
+#endif
+
+ IN6_MULTI_LOCK_ASSERT();
+ MLD_LOCK_ASSERT();
+
+ query_response_timer_expired = 0;
+ state_change_retransmit_timer_expired = 0;
+
+ /*
+ * During a transition from compatibility mode back to MLDv2,
+ * a group record in REPORTING state may still have its group
+ * timer active. This is a no-op in this function; it is easier
+ * to deal with it here than to complicate the slow-timeout path.
+ */
+ if (inm->in6m_timer == 0) {
+ query_response_timer_expired = 0;
+ } else if (--inm->in6m_timer == 0) {
+ query_response_timer_expired = 1;
+ } else {
+ V_current_state_timers_running6 = 1;
+ }
+
+ if (inm->in6m_sctimer == 0) {
+ state_change_retransmit_timer_expired = 0;
+ } else if (--inm->in6m_sctimer == 0) {
+ state_change_retransmit_timer_expired = 1;
+ } else {
+ V_state_change_timers_running6 = 1;
+ }
+
+ /* We are in fasttimo, so be quick about it. */
+ if (!state_change_retransmit_timer_expired &&
+ !query_response_timer_expired)
+ return;
+
+ switch (inm->in6m_state) {
+ case MLD_NOT_MEMBER:
+ case MLD_SILENT_MEMBER:
+ case MLD_SLEEPING_MEMBER:
+ case MLD_LAZY_MEMBER:
+ case MLD_AWAKENING_MEMBER:
+ case MLD_IDLE_MEMBER:
+ break;
+ case MLD_G_QUERY_PENDING_MEMBER:
+ case MLD_SG_QUERY_PENDING_MEMBER:
+ /*
+ * Respond to a previously pending Group-Specific
+ * or Group-and-Source-Specific query by enqueueing
+ * the appropriate Current-State report for
+ * immediate transmission.
+ */
+ if (query_response_timer_expired) {
+ int retval;
+
+ retval = mld_v2_enqueue_group_record(qrq, inm, 0, 1,
+ (inm->in6m_state == MLD_SG_QUERY_PENDING_MEMBER),
+ 0);
+ CTR2(KTR_MLD, "%s: enqueue record = %d",
+ __func__, retval);
+ inm->in6m_state = MLD_REPORTING_MEMBER;
+ in6m_clear_recorded(inm);
+ }
+ /* FALLTHROUGH */
+ case MLD_REPORTING_MEMBER:
+ case MLD_LEAVING_MEMBER:
+ if (state_change_retransmit_timer_expired) {
+ /*
+ * State-change retransmission timer fired.
+ * If there are any further pending retransmissions,
+ * set the global pending state-change flag, and
+ * reset the timer.
+ */
+ if (--inm->in6m_scrv > 0) {
+ inm->in6m_sctimer = uri_fasthz;
+ V_state_change_timers_running6 = 1;
+ }
+ /*
+ * Retransmit the previously computed state-change
+ * report. If there are no further pending
+ * retransmissions, the mbuf queue will be consumed.
+ * Update T0 state to T1 as we have now sent
+ * a state-change.
+ */
+ (void)mld_v2_merge_state_changes(inm, scq);
+
+ in6m_commit(inm);
+ CTR3(KTR_MLD, "%s: T1 -> T0 for %s/%s", __func__,
+ ip6_sprintf(ip6tbuf, &inm->in6m_addr),
+ inm->in6m_ifp->if_xname);
+
+ /*
+ * If we are leaving the group for good, make sure
+ * we release MLD's reference to it.
+ * This release must be deferred using a SLIST,
+ * as we are called from a loop which traverses
+ * the in_ifmultiaddr TAILQ.
+ */
+ if (inm->in6m_state == MLD_LEAVING_MEMBER &&
+ inm->in6m_scrv == 0) {
+ inm->in6m_state = MLD_NOT_MEMBER;
+ SLIST_INSERT_HEAD(&mli->mli_relinmhead,
+ inm, in6m_nrele);
+ }
+ }
+ break;
+ }
+}
+
+/*
+ * Switch to a different version on the given interface,
+ * as per Section 9.12.
+ */
+static void
+mld_set_version(struct mld_ifinfo *mli, const int version)
+{
+ int old_version_timer;
+
+ MLD_LOCK_ASSERT();
+
+ CTR4(KTR_MLD, "%s: switching to v%d on ifp %p(%s)", __func__,
+ version, mli->mli_ifp, mli->mli_ifp->if_xname);
+
+ if (version == MLD_VERSION_1) {
+ /*
+ * Compute the "Older Version Querier Present" timer as per
+ * Section 9.12.
+ */
+ old_version_timer = (mli->mli_rv * mli->mli_qi) + mli->mli_qri;
+ old_version_timer *= PR_SLOWHZ;
+ mli->mli_v1_timer = old_version_timer;
+ }
+
+ if (mli->mli_v1_timer > 0 && mli->mli_version != MLD_VERSION_1) {
+ mli->mli_version = MLD_VERSION_1;
+ mld_v2_cancel_link_timers(mli);
+ }
+}
+
+/*
+ * Cancel pending MLDv2 timers for the given link and all groups
+ * joined on it; state-change, general-query, and group-query timers.
+ */
+static void
+mld_v2_cancel_link_timers(struct mld_ifinfo *mli)
+{
+ struct ifmultiaddr *ifma;
+ struct ifnet *ifp;
+ struct in6_multi *inm;
+
+ CTR3(KTR_MLD, "%s: cancel v2 timers on ifp %p(%s)", __func__,
+ mli->mli_ifp, mli->mli_ifp->if_xname);
+
+ IN6_MULTI_LOCK_ASSERT();
+ MLD_LOCK_ASSERT();
+
+ /*
+ * Fast-track this potentially expensive operation
+ * by checking all the global 'timer pending' flags.
+ */
+ if (!V_interface_timers_running6 &&
+ !V_state_change_timers_running6 &&
+ !V_current_state_timers_running6)
+ return;
+
+ mli->mli_v2_timer = 0;
+
+ ifp = mli->mli_ifp;
+
+ IF_ADDR_LOCK(ifp);
+ TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
+ if (ifma->ifma_addr->sa_family != AF_INET6)
+ continue;
+ inm = (struct in6_multi *)ifma->ifma_protospec;
+ switch (inm->in6m_state) {
+ case MLD_NOT_MEMBER:
+ case MLD_SILENT_MEMBER:
+ case MLD_IDLE_MEMBER:
+ case MLD_LAZY_MEMBER:
+ case MLD_SLEEPING_MEMBER:
+ case MLD_AWAKENING_MEMBER:
+ break;
+ case MLD_LEAVING_MEMBER:
+ /*
+ * If we are leaving the group and switching
+ * version, we need to release the final
+ * reference held for issuing the INCLUDE {}.
+ *
+ * SMPNG: Must drop and re-acquire IF_ADDR_LOCK
+ * around in6m_release_locked(), as it is not
+ * a recursive mutex.
+ */
+ IF_ADDR_UNLOCK(ifp);
+ in6m_release_locked(inm);
+ IF_ADDR_LOCK(ifp);
+ /* FALLTHROUGH */
+ case MLD_G_QUERY_PENDING_MEMBER:
+ case MLD_SG_QUERY_PENDING_MEMBER:
+ in6m_clear_recorded(inm);
+ /* FALLTHROUGH */
+ case MLD_REPORTING_MEMBER:
+ inm->in6m_sctimer = 0;
+ inm->in6m_timer = 0;
+ inm->in6m_state = MLD_REPORTING_MEMBER;
+ /*
+ * Free any pending MLDv2 state-change records.
+ */
+ _IF_DRAIN(&inm->in6m_scq);
+ break;
+ }
+ }
+ IF_ADDR_UNLOCK(ifp);
+}
+
+/*
+ * Global slowtimo handler.
+ * VIMAGE: Timeout handlers are expected to service all vimages.
+ */
+void
+mld_slowtimo(void)
+{
+ VNET_ITERATOR_DECL(vnet_iter);
+
+ VNET_LIST_RLOCK_NOSLEEP();
+ VNET_FOREACH(vnet_iter) {
+ CURVNET_SET(vnet_iter);
+ mld_slowtimo_vnet();
+ CURVNET_RESTORE();
+ }
+ VNET_LIST_RUNLOCK_NOSLEEP();
+}
+
+/*
+ * Per-vnet slowtimo handler.
+ */
+static void
+mld_slowtimo_vnet(void)
+{
+ struct mld_ifinfo *mli;
+
+ MLD_LOCK();
+
+ LIST_FOREACH(mli, &V_mli_head, mli_link) {
+ mld_v1_process_querier_timers(mli);
+ }
+
+ MLD_UNLOCK();
+}
+
+/*
+ * Update the Older Version Querier Present timers for a link.
+ * See Section 9.12 of RFC 3810.
+ */
+static void
+mld_v1_process_querier_timers(struct mld_ifinfo *mli)
+{
+
+ MLD_LOCK_ASSERT();
+
+ if (mli->mli_version != MLD_VERSION_2 && --mli->mli_v1_timer == 0) {
+ /*
+ * MLDv1 Querier Present timer expired; revert to MLDv2.
+ */
+ CTR5(KTR_MLD,
+ "%s: transition from v%d -> v%d on %p(%s)",
+ __func__, mli->mli_version, MLD_VERSION_2,
+ mli->mli_ifp, mli->mli_ifp->if_xname);
+ mli->mli_version = MLD_VERSION_2;
+ }
+}
+
+/*
+ * Transmit an MLDv1 report immediately.
+ */
+static int
+mld_v1_transmit_report(struct in6_multi *in6m, const int type)
+{
+ struct ifnet *ifp;
+ struct in6_ifaddr *ia;
+ struct ip6_hdr *ip6;
+ struct mbuf *mh, *md;
+ struct mld_hdr *mld;
+
+ IN6_MULTI_LOCK_ASSERT();
+ MLD_LOCK_ASSERT();
+
+ ifp = in6m->in6m_ifp;
+ ia = in6ifa_ifpforlinklocal(ifp, IN6_IFF_NOTREADY|IN6_IFF_ANYCAST);
+ /* ia may be NULL if link-local address is tentative. */
+
+ MGETHDR(mh, M_DONTWAIT, MT_HEADER);
+ if (mh == NULL) {
+ if (ia != NULL)
+ ifa_free(&ia->ia_ifa);
+ return (ENOMEM);
+ }
+ MGET(md, M_DONTWAIT, MT_DATA);
+ if (md == NULL) {
+ m_free(mh);
+ if (ia != NULL)
+ ifa_free(&ia->ia_ifa);
+ return (ENOMEM);
+ }
+ mh->m_next = md;
+
+ /*
+ * FUTURE: Consider increasing alignment by ETHER_HDR_LEN, so
+ * that ether_output() does not need to allocate another mbuf
+ * for the header in the most common case.
+ */
+ MH_ALIGN(mh, sizeof(struct ip6_hdr));
+ mh->m_pkthdr.len = sizeof(struct ip6_hdr) + sizeof(struct mld_hdr);
+ mh->m_len = sizeof(struct ip6_hdr);
+
+ ip6 = mtod(mh, struct ip6_hdr *);
+ ip6->ip6_flow = 0;
+ ip6->ip6_vfc &= ~IPV6_VERSION_MASK;
+ ip6->ip6_vfc |= IPV6_VERSION;
+ ip6->ip6_nxt = IPPROTO_ICMPV6;
+ ip6->ip6_src = ia ? ia->ia_addr.sin6_addr : in6addr_any;
+ ip6->ip6_dst = in6m->in6m_addr;
+
+ md->m_len = sizeof(struct mld_hdr);
+ mld = mtod(md, struct mld_hdr *);
+ mld->mld_type = type;
+ mld->mld_code = 0;
+ mld->mld_cksum = 0;
+ mld->mld_maxdelay = 0;
+ mld->mld_reserved = 0;
+ mld->mld_addr = in6m->in6m_addr;
+ in6_clearscope(&mld->mld_addr);
+ mld->mld_cksum = in6_cksum(mh, IPPROTO_ICMPV6,
+ sizeof(struct ip6_hdr), sizeof(struct mld_hdr));
+
+ mld_save_context(mh, ifp);
+ mh->m_flags |= M_MLDV1;
+
+ mld_dispatch_packet(mh);
+
+ if (ia != NULL)
+ ifa_free(&ia->ia_ifa);
+ return (0);
+}
+
+/*
+ * Process a state change from the upper layer for the given IPv6 group.
+ *
+ * Each socket holds a reference on the in_multi in its own ip_moptions.
+ * The socket layer will have made the necessary updates to.the group
+ * state, it is now up to MLD to issue a state change report if there
+ * has been any change between T0 (when the last state-change was issued)
+ * and T1 (now).
+ *
+ * We use the MLDv2 state machine at group level. The MLd module
+ * however makes the decision as to which MLD protocol version to speak.
+ * A state change *from* INCLUDE {} always means an initial join.
+ * A state change *to* INCLUDE {} always means a final leave.
+ *
+ * If delay is non-zero, and the state change is an initial multicast
+ * join, the state change report will be delayed by 'delay' ticks
+ * in units of PR_FASTHZ if MLDv1 is active on the link; otherwise
+ * the initial MLDv2 state change report will be delayed by whichever
+ * is sooner, a pending state-change timer or delay itself.
+ *
+ * VIMAGE: curvnet should have been set by caller, as this routine
+ * is called from the socket option handlers.
+ */
+int
+mld_change_state(struct in6_multi *inm, const int delay)
+{
+ struct mld_ifinfo *mli;
+ struct ifnet *ifp;
+ int error;
+
+ IN6_MULTI_LOCK_ASSERT();
+
+ error = 0;
+
+ /*
+ * Try to detect if the upper layer just asked us to change state
+ * for an interface which has now gone away.
+ */
+ KASSERT(inm->in6m_ifma != NULL, ("%s: no ifma", __func__));
+ ifp = inm->in6m_ifma->ifma_ifp;
+ if (ifp != NULL) {
+ /*
+ * Sanity check that netinet6's notion of ifp is the
+ * same as net's.
+ */
+ KASSERT(inm->in6m_ifp == ifp, ("%s: bad ifp", __func__));
+ }
+
+ MLD_LOCK();
+
+ mli = MLD_IFINFO(ifp);
+ KASSERT(mli != NULL, ("%s: no mld_ifinfo for ifp %p", __func__, ifp));
+
+ /*
+ * If we detect a state transition to or from MCAST_UNDEFINED
+ * for this group, then we are starting or finishing an MLD
+ * life cycle for this group.
+ */
+ if (inm->in6m_st[1].iss_fmode != inm->in6m_st[0].iss_fmode) {
+ CTR3(KTR_MLD, "%s: inm transition %d -> %d", __func__,
+ inm->in6m_st[0].iss_fmode, inm->in6m_st[1].iss_fmode);
+ if (inm->in6m_st[0].iss_fmode == MCAST_UNDEFINED) {
+ CTR1(KTR_MLD, "%s: initial join", __func__);
+ error = mld_initial_join(inm, mli, delay);
+ goto out_locked;
+ } else if (inm->in6m_st[1].iss_fmode == MCAST_UNDEFINED) {
+ CTR1(KTR_MLD, "%s: final leave", __func__);
+ mld_final_leave(inm, mli);
+ goto out_locked;
+ }
+ } else {
+ CTR1(KTR_MLD, "%s: filter set change", __func__);
+ }
+
+ error = mld_handle_state_change(inm, mli);
+
+out_locked:
+ MLD_UNLOCK();
+ return (error);
+}
+
+/*
+ * Perform the initial join for an MLD group.
+ *
+ * When joining a group:
+ * If the group should have its MLD traffic suppressed, do nothing.
+ * MLDv1 starts sending MLDv1 host membership reports.
+ * MLDv2 will schedule an MLDv2 state-change report containing the
+ * initial state of the membership.
+ *
+ * If the delay argument is non-zero, then we must delay sending the
+ * initial state change for delay ticks (in units of PR_FASTHZ).
+ */
+static int
+mld_initial_join(struct in6_multi *inm, struct mld_ifinfo *mli,
+ const int delay)
+{
+ struct ifnet *ifp;
+ struct ifqueue *ifq;
+ int error, retval, syncstates;
+ int odelay;
+#ifdef KTR
+ char ip6tbuf[INET6_ADDRSTRLEN];
+#endif
+
+ CTR4(KTR_MLD, "%s: initial join %s on ifp %p(%s)",
+ __func__, ip6_sprintf(ip6tbuf, &inm->in6m_addr),
+ inm->in6m_ifp, inm->in6m_ifp->if_xname);
+
+ error = 0;
+ syncstates = 1;
+
+ ifp = inm->in6m_ifp;
+
+ IN6_MULTI_LOCK_ASSERT();
+ MLD_LOCK_ASSERT();
+
+ KASSERT(mli && mli->mli_ifp == ifp, ("%s: inconsistent ifp", __func__));
+
+ /*
+ * Groups joined on loopback or marked as 'not reported',
+ * enter the MLD_SILENT_MEMBER state and
+ * are never reported in any protocol exchanges.
+ * All other groups enter the appropriate state machine
+ * for the version in use on this link.
+ * A link marked as MLIF_SILENT causes MLD to be completely
+ * disabled for the link.
+ */
+ if ((ifp->if_flags & IFF_LOOPBACK) ||
+ (mli->mli_flags & MLIF_SILENT) ||
+ !mld_is_addr_reported(&inm->in6m_addr)) {
+ CTR1(KTR_MLD,
+"%s: not kicking state machine for silent group", __func__);
+ inm->in6m_state = MLD_SILENT_MEMBER;
+ inm->in6m_timer = 0;
+ } else {
+ /*
+ * Deal with overlapping in_multi lifecycle.
+ * If this group was LEAVING, then make sure
+ * we drop the reference we picked up to keep the
+ * group around for the final INCLUDE {} enqueue.
+ */
+ if (mli->mli_version == MLD_VERSION_2 &&
+ inm->in6m_state == MLD_LEAVING_MEMBER)
+ in6m_release_locked(inm);
+
+ inm->in6m_state = MLD_REPORTING_MEMBER;
+
+ switch (mli->mli_version) {
+ case MLD_VERSION_1:
+ /*
+ * If a delay was provided, only use it if
+ * it is greater than the delay normally
+ * used for an MLDv1 state change report,
+ * and delay sending the initial MLDv1 report
+ * by not transitioning to the IDLE state.
+ */
+ odelay = MLD_RANDOM_DELAY(MLD_V1_MAX_RI * PR_FASTHZ);
+ if (delay) {
+ inm->in6m_timer = max(delay, odelay);
+ V_current_state_timers_running6 = 1;
+ } else {
+ inm->in6m_state = MLD_IDLE_MEMBER;
+ error = mld_v1_transmit_report(inm,
+ MLD_LISTENER_REPORT);
+ if (error == 0) {
+ inm->in6m_timer = odelay;
+ V_current_state_timers_running6 = 1;
+ }
+ }
+ break;
+
+ case MLD_VERSION_2:
+ /*
+ * Defer update of T0 to T1, until the first copy
+ * of the state change has been transmitted.
+ */
+ syncstates = 0;
+
+ /*
+ * Immediately enqueue a State-Change Report for
+ * this interface, freeing any previous reports.
+ * Don't kick the timers if there is nothing to do,
+ * or if an error occurred.
+ */
+ ifq = &inm->in6m_scq;
+ _IF_DRAIN(ifq);
+ retval = mld_v2_enqueue_group_record(ifq, inm, 1,
+ 0, 0, (mli->mli_flags & MLIF_USEALLOW));
+ CTR2(KTR_MLD, "%s: enqueue record = %d",
+ __func__, retval);
+ if (retval <= 0) {
+ error = retval * -1;
+ break;
+ }
+
+ /*
+ * Schedule transmission of pending state-change
+ * report up to RV times for this link. The timer
+ * will fire at the next mld_fasttimo (~200ms),
+ * giving us an opportunity to merge the reports.
+ *
+ * If a delay was provided to this function, only
+ * use this delay if sooner than the existing one.
+ */
+ KASSERT(mli->mli_rv > 1,
+ ("%s: invalid robustness %d", __func__,
+ mli->mli_rv));
+ inm->in6m_scrv = mli->mli_rv;
+ if (delay) {
+ if (inm->in6m_sctimer > 1) {
+ inm->in6m_sctimer =
+ min(inm->in6m_sctimer, delay);
+ } else
+ inm->in6m_sctimer = delay;
+ } else
+ inm->in6m_sctimer = 1;
+ V_state_change_timers_running6 = 1;
+
+ error = 0;
+ break;
+ }
+ }
+
+ /*
+ * Only update the T0 state if state change is atomic,
+ * i.e. we don't need to wait for a timer to fire before we
+ * can consider the state change to have been communicated.
+ */
+ if (syncstates) {
+ in6m_commit(inm);
+ CTR3(KTR_MLD, "%s: T1 -> T0 for %s/%s", __func__,
+ ip6_sprintf(ip6tbuf, &inm->in6m_addr),
+ inm->in6m_ifp->if_xname);
+ }
+
+ return (error);
+}
+
+/*
+ * Issue an intermediate state change during the life-cycle.
+ */
+static int
+mld_handle_state_change(struct in6_multi *inm, struct mld_ifinfo *mli)
+{
+ struct ifnet *ifp;
+ int retval;
+#ifdef KTR
+ char ip6tbuf[INET6_ADDRSTRLEN];
+#endif
+
+ CTR4(KTR_MLD, "%s: state change for %s on ifp %p(%s)",
+ __func__, ip6_sprintf(ip6tbuf, &inm->in6m_addr),
+ inm->in6m_ifp, inm->in6m_ifp->if_xname);
+
+ ifp = inm->in6m_ifp;
+
+ IN6_MULTI_LOCK_ASSERT();
+ MLD_LOCK_ASSERT();
+
+ KASSERT(mli && mli->mli_ifp == ifp,
+ ("%s: inconsistent ifp", __func__));
+
+ if ((ifp->if_flags & IFF_LOOPBACK) ||
+ (mli->mli_flags & MLIF_SILENT) ||
+ !mld_is_addr_reported(&inm->in6m_addr) ||
+ (mli->mli_version != MLD_VERSION_2)) {
+ if (!mld_is_addr_reported(&inm->in6m_addr)) {
+ CTR1(KTR_MLD,
+"%s: not kicking state machine for silent group", __func__);
+ }
+ CTR1(KTR_MLD, "%s: nothing to do", __func__);
+ in6m_commit(inm);
+ CTR3(KTR_MLD, "%s: T1 -> T0 for %s/%s", __func__,
+ ip6_sprintf(ip6tbuf, &inm->in6m_addr),
+ inm->in6m_ifp->if_xname);
+ return (0);
+ }
+
+ _IF_DRAIN(&inm->in6m_scq);
+
+ retval = mld_v2_enqueue_group_record(&inm->in6m_scq, inm, 1, 0, 0,
+ (mli->mli_flags & MLIF_USEALLOW));
+ CTR2(KTR_MLD, "%s: enqueue record = %d", __func__, retval);
+ if (retval <= 0)
+ return (-retval);
+
+ /*
+ * If record(s) were enqueued, start the state-change
+ * report timer for this group.
+ */
+ inm->in6m_scrv = mli->mli_rv;
+ inm->in6m_sctimer = 1;
+ V_state_change_timers_running6 = 1;
+
+ return (0);
+}
+
+/*
+ * Perform the final leave for a multicast address.
+ *
+ * When leaving a group:
+ * MLDv1 sends a DONE message, if and only if we are the reporter.
+ * MLDv2 enqueues a state-change report containing a transition
+ * to INCLUDE {} for immediate transmission.
+ */
+static void
+mld_final_leave(struct in6_multi *inm, struct mld_ifinfo *mli)
+{
+ int syncstates;
+#ifdef KTR
+ char ip6tbuf[INET6_ADDRSTRLEN];
+#endif
+
+ syncstates = 1;
+
+ CTR4(KTR_MLD, "%s: final leave %s on ifp %p(%s)",
+ __func__, ip6_sprintf(ip6tbuf, &inm->in6m_addr),
+ inm->in6m_ifp, inm->in6m_ifp->if_xname);
+
+ IN6_MULTI_LOCK_ASSERT();
+ MLD_LOCK_ASSERT();
+
+ switch (inm->in6m_state) {
+ case MLD_NOT_MEMBER:
+ case MLD_SILENT_MEMBER:
+ case MLD_LEAVING_MEMBER:
+ /* Already leaving or left; do nothing. */
+ CTR1(KTR_MLD,
+"%s: not kicking state machine for silent group", __func__);
+ break;
+ case MLD_REPORTING_MEMBER:
+ case MLD_IDLE_MEMBER:
+ case MLD_G_QUERY_PENDING_MEMBER:
+ case MLD_SG_QUERY_PENDING_MEMBER:
+ if (mli->mli_version == MLD_VERSION_1) {
+#ifdef INVARIANTS
+ if (inm->in6m_state == MLD_G_QUERY_PENDING_MEMBER ||
+ inm->in6m_state == MLD_SG_QUERY_PENDING_MEMBER)
+ panic("%s: MLDv2 state reached, not MLDv2 mode",
+ __func__);
+#endif
+ mld_v1_transmit_report(inm, MLD_LISTENER_DONE);
+ inm->in6m_state = MLD_NOT_MEMBER;
+ } else if (mli->mli_version == MLD_VERSION_2) {
+ /*
+ * Stop group timer and all pending reports.
+ * Immediately enqueue a state-change report
+ * TO_IN {} to be sent on the next fast timeout,
+ * giving us an opportunity to merge reports.
+ */
+ _IF_DRAIN(&inm->in6m_scq);
+ inm->in6m_timer = 0;
+ inm->in6m_scrv = mli->mli_rv;
+ CTR4(KTR_MLD, "%s: Leaving %s/%s with %d "
+ "pending retransmissions.", __func__,
+ ip6_sprintf(ip6tbuf, &inm->in6m_addr),
+ inm->in6m_ifp->if_xname, inm->in6m_scrv);
+ if (inm->in6m_scrv == 0) {
+ inm->in6m_state = MLD_NOT_MEMBER;
+ inm->in6m_sctimer = 0;
+ } else {
+ int retval;
+
+ in6m_acquire_locked(inm);
+
+ retval = mld_v2_enqueue_group_record(
+ &inm->in6m_scq, inm, 1, 0, 0,
+ (mli->mli_flags & MLIF_USEALLOW));
+ KASSERT(retval != 0,
+ ("%s: enqueue record = %d", __func__,
+ retval));
+
+ inm->in6m_state = MLD_LEAVING_MEMBER;
+ inm->in6m_sctimer = 1;
+ V_state_change_timers_running6 = 1;
+ syncstates = 0;
+ }
+ break;
+ }
+ break;
+ case MLD_LAZY_MEMBER:
+ case MLD_SLEEPING_MEMBER:
+ case MLD_AWAKENING_MEMBER:
+ /* Our reports are suppressed; do nothing. */
+ break;
+ }
+
+ if (syncstates) {
+ in6m_commit(inm);
+ CTR3(KTR_MLD, "%s: T1 -> T0 for %s/%s", __func__,
+ ip6_sprintf(ip6tbuf, &inm->in6m_addr),
+ inm->in6m_ifp->if_xname);
+ inm->in6m_st[1].iss_fmode = MCAST_UNDEFINED;
+ CTR3(KTR_MLD, "%s: T1 now MCAST_UNDEFINED for %p/%s",
+ __func__, &inm->in6m_addr, inm->in6m_ifp->if_xname);
+ }
+}
+
+/*
+ * Enqueue an MLDv2 group record to the given output queue.
+ *
+ * If is_state_change is zero, a current-state record is appended.
+ * If is_state_change is non-zero, a state-change report is appended.
+ *
+ * If is_group_query is non-zero, an mbuf packet chain is allocated.
+ * If is_group_query is zero, and if there is a packet with free space
+ * at the tail of the queue, it will be appended to providing there
+ * is enough free space.
+ * Otherwise a new mbuf packet chain is allocated.
+ *
+ * If is_source_query is non-zero, each source is checked to see if
+ * it was recorded for a Group-Source query, and will be omitted if
+ * it is not both in-mode and recorded.
+ *
+ * If use_block_allow is non-zero, state change reports for initial join
+ * and final leave, on an inclusive mode group with a source list, will be
+ * rewritten to use the ALLOW_NEW and BLOCK_OLD record types, respectively.
+ *
+ * The function will attempt to allocate leading space in the packet
+ * for the IPv6+ICMP headers to be prepended without fragmenting the chain.
+ *
+ * If successful the size of all data appended to the queue is returned,
+ * otherwise an error code less than zero is returned, or zero if
+ * no record(s) were appended.
+ */
+static int
+mld_v2_enqueue_group_record(struct ifqueue *ifq, struct in6_multi *inm,
+ const int is_state_change, const int is_group_query,
+ const int is_source_query, const int use_block_allow)
+{
+ struct mldv2_record mr;
+ struct mldv2_record *pmr;
+ struct ifnet *ifp;
+ struct ip6_msource *ims, *nims;
+ struct mbuf *m0, *m, *md;
+ int error, is_filter_list_change;
+ int minrec0len, m0srcs, msrcs, nbytes, off;
+ int record_has_sources;
+ int now;
+ int type;
+ uint8_t mode;
+#ifdef KTR
+ char ip6tbuf[INET6_ADDRSTRLEN];
+#endif
+
+ IN6_MULTI_LOCK_ASSERT();
+
+ error = 0;
+ ifp = inm->in6m_ifp;
+ is_filter_list_change = 0;
+ m = NULL;
+ m0 = NULL;
+ m0srcs = 0;
+ msrcs = 0;
+ nbytes = 0;
+ nims = NULL;
+ record_has_sources = 1;
+ pmr = NULL;
+ type = MLD_DO_NOTHING;
+ mode = inm->in6m_st[1].iss_fmode;
+
+ /*
+ * If we did not transition out of ASM mode during t0->t1,
+ * and there are no source nodes to process, we can skip
+ * the generation of source records.
+ */
+ if (inm->in6m_st[0].iss_asm > 0 && inm->in6m_st[1].iss_asm > 0 &&
+ inm->in6m_nsrc == 0)
+ record_has_sources = 0;
+
+ if (is_state_change) {
+ /*
+ * Queue a state change record.
+ * If the mode did not change, and there are non-ASM
+ * listeners or source filters present,
+ * we potentially need to issue two records for the group.
+ * If there are ASM listeners, and there was no filter
+ * mode transition of any kind, do nothing.
+ *
+ * If we are transitioning to MCAST_UNDEFINED, we need
+ * not send any sources. A transition to/from this state is
+ * considered inclusive with some special treatment.
+ *
+ * If we are rewriting initial joins/leaves to use
+ * ALLOW/BLOCK, and the group's membership is inclusive,
+ * we need to send sources in all cases.
+ */
+ if (mode != inm->in6m_st[0].iss_fmode) {
+ if (mode == MCAST_EXCLUDE) {
+ CTR1(KTR_MLD, "%s: change to EXCLUDE",
+ __func__);
+ type = MLD_CHANGE_TO_EXCLUDE_MODE;
+ } else {
+ CTR1(KTR_MLD, "%s: change to INCLUDE",
+ __func__);
+ if (use_block_allow) {
+ /*
+ * XXX
+ * Here we're interested in state
+ * edges either direction between
+ * MCAST_UNDEFINED and MCAST_INCLUDE.
+ * Perhaps we should just check
+ * the group state, rather than
+ * the filter mode.
+ */
+ if (mode == MCAST_UNDEFINED) {
+ type = MLD_BLOCK_OLD_SOURCES;
+ } else {
+ type = MLD_ALLOW_NEW_SOURCES;
+ }
+ } else {
+ type = MLD_CHANGE_TO_INCLUDE_MODE;
+ if (mode == MCAST_UNDEFINED)
+ record_has_sources = 0;
+ }
+ }
+ } else {
+ if (record_has_sources) {
+ is_filter_list_change = 1;
+ } else {
+ type = MLD_DO_NOTHING;
+ }
+ }
+ } else {
+ /*
+ * Queue a current state record.
+ */
+ if (mode == MCAST_EXCLUDE) {
+ type = MLD_MODE_IS_EXCLUDE;
+ } else if (mode == MCAST_INCLUDE) {
+ type = MLD_MODE_IS_INCLUDE;
+ KASSERT(inm->in6m_st[1].iss_asm == 0,
+ ("%s: inm %p is INCLUDE but ASM count is %d",
+ __func__, inm, inm->in6m_st[1].iss_asm));
+ }
+ }
+
+ /*
+ * Generate the filter list changes using a separate function.
+ */
+ if (is_filter_list_change)
+ return (mld_v2_enqueue_filter_change(ifq, inm));
+
+ if (type == MLD_DO_NOTHING) {
+ CTR3(KTR_MLD, "%s: nothing to do for %s/%s",
+ __func__, ip6_sprintf(ip6tbuf, &inm->in6m_addr),
+ inm->in6m_ifp->if_xname);
+ return (0);
+ }
+
+ /*
+ * If any sources are present, we must be able to fit at least
+ * one in the trailing space of the tail packet's mbuf,
+ * ideally more.
+ */
+ minrec0len = sizeof(struct mldv2_record);
+ if (record_has_sources)
+ minrec0len += sizeof(struct in6_addr);
+
+ CTR4(KTR_MLD, "%s: queueing %s for %s/%s", __func__,
+ mld_rec_type_to_str(type),
+ ip6_sprintf(ip6tbuf, &inm->in6m_addr),
+ inm->in6m_ifp->if_xname);
+
+ /*
+ * Check if we have a packet in the tail of the queue for this
+ * group into which the first group record for this group will fit.
+ * Otherwise allocate a new packet.
+ * Always allocate leading space for IP6+RA+ICMPV6+REPORT.
+ * Note: Group records for G/GSR query responses MUST be sent
+ * in their own packet.
+ */
+ m0 = ifq->ifq_tail;
+ if (!is_group_query &&
+ m0 != NULL &&
+ (m0->m_pkthdr.PH_vt.vt_nrecs + 1 <= MLD_V2_REPORT_MAXRECS) &&
+ (m0->m_pkthdr.len + minrec0len) <
+ (ifp->if_mtu - MLD_MTUSPACE)) {
+ m0srcs = (ifp->if_mtu - m0->m_pkthdr.len -
+ sizeof(struct mldv2_record)) /
+ sizeof(struct in6_addr);
+ m = m0;
+ CTR1(KTR_MLD, "%s: use existing packet", __func__);
+ } else {
+ if (_IF_QFULL(ifq)) {
+ CTR1(KTR_MLD, "%s: outbound queue full", __func__);
+ return (-ENOMEM);
+ }
+ m = NULL;
+ m0srcs = (ifp->if_mtu - MLD_MTUSPACE -
+ sizeof(struct mldv2_record)) / sizeof(struct in6_addr);
+ if (!is_state_change && !is_group_query)
+ m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
+ if (m == NULL)
+ m = m_gethdr(M_DONTWAIT, MT_DATA);
+ if (m == NULL)
+ return (-ENOMEM);
+
+ mld_save_context(m, ifp);
+
+ CTR1(KTR_MLD, "%s: allocated first packet", __func__);
+ }
+
+ /*
+ * Append group record.
+ * If we have sources, we don't know how many yet.
+ */
+ mr.mr_type = type;
+ mr.mr_datalen = 0;
+ mr.mr_numsrc = 0;
+ mr.mr_addr = inm->in6m_addr;
+ in6_clearscope(&mr.mr_addr);
+ if (!m_append(m, sizeof(struct mldv2_record), (void *)&mr)) {
+ if (m != m0)
+ m_freem(m);
+ CTR1(KTR_MLD, "%s: m_append() failed.", __func__);
+ return (-ENOMEM);
+ }
+ nbytes += sizeof(struct mldv2_record);
+
+ /*
+ * Append as many sources as will fit in the first packet.
+ * If we are appending to a new packet, the chain allocation
+ * may potentially use clusters; use m_getptr() in this case.
+ * If we are appending to an existing packet, we need to obtain
+ * a pointer to the group record after m_append(), in case a new
+ * mbuf was allocated.
+ *
+ * Only append sources which are in-mode at t1. If we are
+ * transitioning to MCAST_UNDEFINED state on the group, and
+ * use_block_allow is zero, do not include source entries.
+ * Otherwise, we need to include this source in the report.
+ *
+ * Only report recorded sources in our filter set when responding
+ * to a group-source query.
+ */
+ if (record_has_sources) {
+ if (m == m0) {
+ md = m_last(m);
+ pmr = (struct mldv2_record *)(mtod(md, uint8_t *) +
+ md->m_len - nbytes);
+ } else {
+ md = m_getptr(m, 0, &off);
+ pmr = (struct mldv2_record *)(mtod(md, uint8_t *) +
+ off);
+ }
+ msrcs = 0;
+ RB_FOREACH_SAFE(ims, ip6_msource_tree, &inm->in6m_srcs,
+ nims) {
+ CTR2(KTR_MLD, "%s: visit node %s", __func__,
+ ip6_sprintf(ip6tbuf, &ims->im6s_addr));
+ now = im6s_get_mode(inm, ims, 1);
+ CTR2(KTR_MLD, "%s: node is %d", __func__, now);
+ if ((now != mode) ||
+ (now == mode &&
+ (!use_block_allow && mode == MCAST_UNDEFINED))) {
+ CTR1(KTR_MLD, "%s: skip node", __func__);
+ continue;
+ }
+ if (is_source_query && ims->im6s_stp == 0) {
+ CTR1(KTR_MLD, "%s: skip unrecorded node",
+ __func__);
+ continue;
+ }
+ CTR1(KTR_MLD, "%s: append node", __func__);
+ if (!m_append(m, sizeof(struct in6_addr),
+ (void *)&ims->im6s_addr)) {
+ if (m != m0)
+ m_freem(m);
+ CTR1(KTR_MLD, "%s: m_append() failed.",
+ __func__);
+ return (-ENOMEM);
+ }
+ nbytes += sizeof(struct in6_addr);
+ ++msrcs;
+ if (msrcs == m0srcs)
+ break;
+ }
+ CTR2(KTR_MLD, "%s: msrcs is %d this packet", __func__,
+ msrcs);
+ pmr->mr_numsrc = htons(msrcs);
+ nbytes += (msrcs * sizeof(struct in6_addr));
+ }
+
+ if (is_source_query && msrcs == 0) {
+ CTR1(KTR_MLD, "%s: no recorded sources to report", __func__);
+ if (m != m0)
+ m_freem(m);
+ return (0);
+ }
+
+ /*
+ * We are good to go with first packet.
+ */
+ if (m != m0) {
+ CTR1(KTR_MLD, "%s: enqueueing first packet", __func__);
+ m->m_pkthdr.PH_vt.vt_nrecs = 1;
+ _IF_ENQUEUE(ifq, m);
+ } else
+ m->m_pkthdr.PH_vt.vt_nrecs++;
+
+ /*
+ * No further work needed if no source list in packet(s).
+ */
+ if (!record_has_sources)
+ return (nbytes);
+
+ /*
+ * Whilst sources remain to be announced, we need to allocate
+ * a new packet and fill out as many sources as will fit.
+ * Always try for a cluster first.
+ */
+ while (nims != NULL) {
+ if (_IF_QFULL(ifq)) {
+ CTR1(KTR_MLD, "%s: outbound queue full", __func__);
+ return (-ENOMEM);
+ }
+ m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
+ if (m == NULL)
+ m = m_gethdr(M_DONTWAIT, MT_DATA);
+ if (m == NULL)
+ return (-ENOMEM);
+ mld_save_context(m, ifp);
+ md = m_getptr(m, 0, &off);
+ pmr = (struct mldv2_record *)(mtod(md, uint8_t *) + off);
+ CTR1(KTR_MLD, "%s: allocated next packet", __func__);
+
+ if (!m_append(m, sizeof(struct mldv2_record), (void *)&mr)) {
+ if (m != m0)
+ m_freem(m);
+ CTR1(KTR_MLD, "%s: m_append() failed.", __func__);
+ return (-ENOMEM);
+ }
+ m->m_pkthdr.PH_vt.vt_nrecs = 1;
+ nbytes += sizeof(struct mldv2_record);
+
+ m0srcs = (ifp->if_mtu - MLD_MTUSPACE -
+ sizeof(struct mldv2_record)) / sizeof(struct in6_addr);
+
+ msrcs = 0;
+ RB_FOREACH_FROM(ims, ip6_msource_tree, nims) {
+ CTR2(KTR_MLD, "%s: visit node %s",
+ __func__, ip6_sprintf(ip6tbuf, &ims->im6s_addr));
+ now = im6s_get_mode(inm, ims, 1);
+ if ((now != mode) ||
+ (now == mode &&
+ (!use_block_allow && mode == MCAST_UNDEFINED))) {
+ CTR1(KTR_MLD, "%s: skip node", __func__);
+ continue;
+ }
+ if (is_source_query && ims->im6s_stp == 0) {
+ CTR1(KTR_MLD, "%s: skip unrecorded node",
+ __func__);
+ continue;
+ }
+ CTR1(KTR_MLD, "%s: append node", __func__);
+ if (!m_append(m, sizeof(struct in6_addr),
+ (void *)&ims->im6s_addr)) {
+ if (m != m0)
+ m_freem(m);
+ CTR1(KTR_MLD, "%s: m_append() failed.",
+ __func__);
+ return (-ENOMEM);
+ }
+ ++msrcs;
+ if (msrcs == m0srcs)
+ break;
+ }
+ pmr->mr_numsrc = htons(msrcs);
+ nbytes += (msrcs * sizeof(struct in6_addr));
+
+ CTR1(KTR_MLD, "%s: enqueueing next packet", __func__);
+ _IF_ENQUEUE(ifq, m);
+ }
+
+ return (nbytes);
+}
+
+/*
+ * Type used to mark record pass completion.
+ * We exploit the fact we can cast to this easily from the
+ * current filter modes on each ip_msource node.
+ */
+typedef enum {
+ REC_NONE = 0x00, /* MCAST_UNDEFINED */
+ REC_ALLOW = 0x01, /* MCAST_INCLUDE */
+ REC_BLOCK = 0x02, /* MCAST_EXCLUDE */
+ REC_FULL = REC_ALLOW | REC_BLOCK
+} rectype_t;
+
+/*
+ * Enqueue an MLDv2 filter list change to the given output queue.
+ *
+ * Source list filter state is held in an RB-tree. When the filter list
+ * for a group is changed without changing its mode, we need to compute
+ * the deltas between T0 and T1 for each source in the filter set,
+ * and enqueue the appropriate ALLOW_NEW/BLOCK_OLD records.
+ *
+ * As we may potentially queue two record types, and the entire R-B tree
+ * needs to be walked at once, we break this out into its own function
+ * so we can generate a tightly packed queue of packets.
+ *
+ * XXX This could be written to only use one tree walk, although that makes
+ * serializing into the mbuf chains a bit harder. For now we do two walks
+ * which makes things easier on us, and it may or may not be harder on
+ * the L2 cache.
+ *
+ * If successful the size of all data appended to the queue is returned,
+ * otherwise an error code less than zero is returned, or zero if
+ * no record(s) were appended.
+ */
+static int
+mld_v2_enqueue_filter_change(struct ifqueue *ifq, struct in6_multi *inm)
+{
+ static const int MINRECLEN =
+ sizeof(struct mldv2_record) + sizeof(struct in6_addr);
+ struct ifnet *ifp;
+ struct mldv2_record mr;
+ struct mldv2_record *pmr;
+ struct ip6_msource *ims, *nims;
+ struct mbuf *m, *m0, *md;
+ int m0srcs, nbytes, npbytes, off, rsrcs, schanged;
+ int nallow, nblock;
+ uint8_t mode, now, then;
+ rectype_t crt, drt, nrt;
+#ifdef KTR
+ char ip6tbuf[INET6_ADDRSTRLEN];
+#endif
+
+ IN6_MULTI_LOCK_ASSERT();
+
+ if (inm->in6m_nsrc == 0 ||
+ (inm->in6m_st[0].iss_asm > 0 && inm->in6m_st[1].iss_asm > 0))
+ return (0);
+
+ ifp = inm->in6m_ifp; /* interface */
+ mode = inm->in6m_st[1].iss_fmode; /* filter mode at t1 */
+ crt = REC_NONE; /* current group record type */
+ drt = REC_NONE; /* mask of completed group record types */
+ nrt = REC_NONE; /* record type for current node */
+ m0srcs = 0; /* # source which will fit in current mbuf chain */
+ npbytes = 0; /* # of bytes appended this packet */
+ nbytes = 0; /* # of bytes appended to group's state-change queue */
+ rsrcs = 0; /* # sources encoded in current record */
+ schanged = 0; /* # nodes encoded in overall filter change */
+ nallow = 0; /* # of source entries in ALLOW_NEW */
+ nblock = 0; /* # of source entries in BLOCK_OLD */
+ nims = NULL; /* next tree node pointer */
+
+ /*
+ * For each possible filter record mode.
+ * The first kind of source we encounter tells us which
+ * is the first kind of record we start appending.
+ * If a node transitioned to UNDEFINED at t1, its mode is treated
+ * as the inverse of the group's filter mode.
+ */
+ while (drt != REC_FULL) {
+ do {
+ m0 = ifq->ifq_tail;
+ if (m0 != NULL &&
+ (m0->m_pkthdr.PH_vt.vt_nrecs + 1 <=
+ MLD_V2_REPORT_MAXRECS) &&
+ (m0->m_pkthdr.len + MINRECLEN) <
+ (ifp->if_mtu - MLD_MTUSPACE)) {
+ m = m0;
+ m0srcs = (ifp->if_mtu - m0->m_pkthdr.len -
+ sizeof(struct mldv2_record)) /
+ sizeof(struct in6_addr);
+ CTR1(KTR_MLD,
+ "%s: use previous packet", __func__);
+ } else {
+ m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
+ if (m == NULL)
+ m = m_gethdr(M_DONTWAIT, MT_DATA);
+ if (m == NULL) {
+ CTR1(KTR_MLD,
+ "%s: m_get*() failed", __func__);
+ return (-ENOMEM);
+ }
+ m->m_pkthdr.PH_vt.vt_nrecs = 0;
+ mld_save_context(m, ifp);
+ m0srcs = (ifp->if_mtu - MLD_MTUSPACE -
+ sizeof(struct mldv2_record)) /
+ sizeof(struct in6_addr);
+ npbytes = 0;
+ CTR1(KTR_MLD,
+ "%s: allocated new packet", __func__);
+ }
+ /*
+ * Append the MLD group record header to the
+ * current packet's data area.
+ * Recalculate pointer to free space for next
+ * group record, in case m_append() allocated
+ * a new mbuf or cluster.
+ */
+ memset(&mr, 0, sizeof(mr));
+ mr.mr_addr = inm->in6m_addr;
+ in6_clearscope(&mr.mr_addr);
+ if (!m_append(m, sizeof(mr), (void *)&mr)) {
+ if (m != m0)
+ m_freem(m);
+ CTR1(KTR_MLD,
+ "%s: m_append() failed", __func__);
+ return (-ENOMEM);
+ }
+ npbytes += sizeof(struct mldv2_record);
+ if (m != m0) {
+ /* new packet; offset in chain */
+ md = m_getptr(m, npbytes -
+ sizeof(struct mldv2_record), &off);
+ pmr = (struct mldv2_record *)(mtod(md,
+ uint8_t *) + off);
+ } else {
+ /* current packet; offset from last append */
+ md = m_last(m);
+ pmr = (struct mldv2_record *)(mtod(md,
+ uint8_t *) + md->m_len -
+ sizeof(struct mldv2_record));
+ }
+ /*
+ * Begin walking the tree for this record type
+ * pass, or continue from where we left off
+ * previously if we had to allocate a new packet.
+ * Only report deltas in-mode at t1.
+ * We need not report included sources as allowed
+ * if we are in inclusive mode on the group,
+ * however the converse is not true.
+ */
+ rsrcs = 0;
+ if (nims == NULL) {
+ nims = RB_MIN(ip6_msource_tree,
+ &inm->in6m_srcs);
+ }
+ RB_FOREACH_FROM(ims, ip6_msource_tree, nims) {
+ CTR2(KTR_MLD, "%s: visit node %s", __func__,
+ ip6_sprintf(ip6tbuf, &ims->im6s_addr));
+ now = im6s_get_mode(inm, ims, 1);
+ then = im6s_get_mode(inm, ims, 0);
+ CTR3(KTR_MLD, "%s: mode: t0 %d, t1 %d",
+ __func__, then, now);
+ if (now == then) {
+ CTR1(KTR_MLD,
+ "%s: skip unchanged", __func__);
+ continue;
+ }
+ if (mode == MCAST_EXCLUDE &&
+ now == MCAST_INCLUDE) {
+ CTR1(KTR_MLD,
+ "%s: skip IN src on EX group",
+ __func__);
+ continue;
+ }
+ nrt = (rectype_t)now;
+ if (nrt == REC_NONE)
+ nrt = (rectype_t)(~mode & REC_FULL);
+ if (schanged++ == 0) {
+ crt = nrt;
+ } else if (crt != nrt)
+ continue;
+ if (!m_append(m, sizeof(struct in6_addr),
+ (void *)&ims->im6s_addr)) {
+ if (m != m0)
+ m_freem(m);
+ CTR1(KTR_MLD,
+ "%s: m_append() failed", __func__);
+ return (-ENOMEM);
+ }
+ nallow += !!(crt == REC_ALLOW);
+ nblock += !!(crt == REC_BLOCK);
+ if (++rsrcs == m0srcs)
+ break;
+ }
+ /*
+ * If we did not append any tree nodes on this
+ * pass, back out of allocations.
+ */
+ if (rsrcs == 0) {
+ npbytes -= sizeof(struct mldv2_record);
+ if (m != m0) {
+ CTR1(KTR_MLD,
+ "%s: m_free(m)", __func__);
+ m_freem(m);
+ } else {
+ CTR1(KTR_MLD,
+ "%s: m_adj(m, -mr)", __func__);
+ m_adj(m, -((int)sizeof(
+ struct mldv2_record)));
+ }
+ continue;
+ }
+ npbytes += (rsrcs * sizeof(struct in6_addr));
+ if (crt == REC_ALLOW)
+ pmr->mr_type = MLD_ALLOW_NEW_SOURCES;
+ else if (crt == REC_BLOCK)
+ pmr->mr_type = MLD_BLOCK_OLD_SOURCES;
+ pmr->mr_numsrc = htons(rsrcs);
+ /*
+ * Count the new group record, and enqueue this
+ * packet if it wasn't already queued.
+ */
+ m->m_pkthdr.PH_vt.vt_nrecs++;
+ if (m != m0)
+ _IF_ENQUEUE(ifq, m);
+ nbytes += npbytes;
+ } while (nims != NULL);
+ drt |= crt;
+ crt = (~crt & REC_FULL);
+ }
+
+ CTR3(KTR_MLD, "%s: queued %d ALLOW_NEW, %d BLOCK_OLD", __func__,
+ nallow, nblock);
+
+ return (nbytes);
+}
+
+static int
+mld_v2_merge_state_changes(struct in6_multi *inm, struct ifqueue *ifscq)
+{
+ struct ifqueue *gq;
+ struct mbuf *m; /* pending state-change */
+ struct mbuf *m0; /* copy of pending state-change */
+ struct mbuf *mt; /* last state-change in packet */
+ int docopy, domerge;
+ u_int recslen;
+
+ docopy = 0;
+ domerge = 0;
+ recslen = 0;
+
+ IN6_MULTI_LOCK_ASSERT();
+ MLD_LOCK_ASSERT();
+
+ /*
+ * If there are further pending retransmissions, make a writable
+ * copy of each queued state-change message before merging.
+ */
+ if (inm->in6m_scrv > 0)
+ docopy = 1;
+
+ gq = &inm->in6m_scq;
+#ifdef KTR
+ if (gq->ifq_head == NULL) {
+ CTR2(KTR_MLD, "%s: WARNING: queue for inm %p is empty",
+ __func__, inm);
+ }
+#endif
+
+ m = gq->ifq_head;
+ while (m != NULL) {
+ /*
+ * Only merge the report into the current packet if
+ * there is sufficient space to do so; an MLDv2 report
+ * packet may only contain 65,535 group records.
+ * Always use a simple mbuf chain concatentation to do this,
+ * as large state changes for single groups may have
+ * allocated clusters.
+ */
+ domerge = 0;
+ mt = ifscq->ifq_tail;
+ if (mt != NULL) {
+ recslen = m_length(m, NULL);
+
+ if ((mt->m_pkthdr.PH_vt.vt_nrecs +
+ m->m_pkthdr.PH_vt.vt_nrecs <=
+ MLD_V2_REPORT_MAXRECS) &&
+ (mt->m_pkthdr.len + recslen <=
+ (inm->in6m_ifp->if_mtu - MLD_MTUSPACE)))
+ domerge = 1;
+ }
+
+ if (!domerge && _IF_QFULL(gq)) {
+ CTR2(KTR_MLD,
+ "%s: outbound queue full, skipping whole packet %p",
+ __func__, m);
+ mt = m->m_nextpkt;
+ if (!docopy)
+ m_freem(m);
+ m = mt;
+ continue;
+ }
+
+ if (!docopy) {
+ CTR2(KTR_MLD, "%s: dequeueing %p", __func__, m);
+ _IF_DEQUEUE(gq, m0);
+ m = m0->m_nextpkt;
+ } else {
+ CTR2(KTR_MLD, "%s: copying %p", __func__, m);
+ m0 = m_dup(m, M_NOWAIT);
+ if (m0 == NULL)
+ return (ENOMEM);
+ m0->m_nextpkt = NULL;
+ m = m->m_nextpkt;
+ }
+
+ if (!domerge) {
+ CTR3(KTR_MLD, "%s: queueing %p to ifscq %p)",
+ __func__, m0, ifscq);
+ _IF_ENQUEUE(ifscq, m0);
+ } else {
+ struct mbuf *mtl; /* last mbuf of packet mt */
+
+ CTR3(KTR_MLD, "%s: merging %p with ifscq tail %p)",
+ __func__, m0, mt);
+
+ mtl = m_last(mt);
+ m0->m_flags &= ~M_PKTHDR;
+ mt->m_pkthdr.len += recslen;
+ mt->m_pkthdr.PH_vt.vt_nrecs +=
+ m0->m_pkthdr.PH_vt.vt_nrecs;
+
+ mtl->m_next = m0;
+ }
+ }
+
+ return (0);
+}
+
+/*
+ * Respond to a pending MLDv2 General Query.
+ */
+static void
+mld_v2_dispatch_general_query(struct mld_ifinfo *mli)
+{
+ struct ifmultiaddr *ifma, *tifma;
+ struct ifnet *ifp;
+ struct in6_multi *inm;
+ int retval;
+
+ IN6_MULTI_LOCK_ASSERT();
+ MLD_LOCK_ASSERT();
+
+ KASSERT(mli->mli_version == MLD_VERSION_2,
+ ("%s: called when version %d", __func__, mli->mli_version));
+
+ ifp = mli->mli_ifp;
+
+ IF_ADDR_LOCK(ifp);
+ TAILQ_FOREACH_SAFE(ifma, &ifp->if_multiaddrs, ifma_link, tifma) {
+ if (ifma->ifma_addr->sa_family != AF_INET6 ||
+ ifma->ifma_protospec == NULL)
+ continue;
+
+ inm = (struct in6_multi *)ifma->ifma_protospec;
+ KASSERT(ifp == inm->in6m_ifp,
+ ("%s: inconsistent ifp", __func__));
+
+ switch (inm->in6m_state) {
+ case MLD_NOT_MEMBER:
+ case MLD_SILENT_MEMBER:
+ break;
+ case MLD_REPORTING_MEMBER:
+ case MLD_IDLE_MEMBER:
+ case MLD_LAZY_MEMBER:
+ case MLD_SLEEPING_MEMBER:
+ case MLD_AWAKENING_MEMBER:
+ inm->in6m_state = MLD_REPORTING_MEMBER;
+ retval = mld_v2_enqueue_group_record(&mli->mli_gq,
+ inm, 0, 0, 0, 0);
+ CTR2(KTR_MLD, "%s: enqueue record = %d",
+ __func__, retval);
+ break;
+ case MLD_G_QUERY_PENDING_MEMBER:
+ case MLD_SG_QUERY_PENDING_MEMBER:
+ case MLD_LEAVING_MEMBER:
+ break;
+ }
+ }
+ IF_ADDR_UNLOCK(ifp);
+
+ mld_dispatch_queue(&mli->mli_gq, MLD_MAX_RESPONSE_BURST);
+
+ /*
+ * Slew transmission of bursts over 500ms intervals.
+ */
+ if (mli->mli_gq.ifq_head != NULL) {
+ mli->mli_v2_timer = 1 + MLD_RANDOM_DELAY(
+ MLD_RESPONSE_BURST_INTERVAL);
+ V_interface_timers_running6 = 1;
+ }
+}
+
+/*
+ * Transmit the next pending message in the output queue.
+ *
+ * VIMAGE: Needs to store/restore vnet pointer on a per-mbuf-chain basis.
+ * MRT: Nothing needs to be done, as MLD traffic is always local to
+ * a link and uses a link-scope multicast address.
+ */
+static void
+mld_dispatch_packet(struct mbuf *m)
+{
+ struct ip6_moptions im6o;
+ struct ifnet *ifp;
+ struct ifnet *oifp;
+ struct mbuf *m0;
+ struct mbuf *md;
+ struct ip6_hdr *ip6;
+ struct mld_hdr *mld;
+ int error;
+ int off;
+ int type;
+ uint32_t ifindex;
+
+ CTR2(KTR_MLD, "%s: transmit %p", __func__, m);
+
+ /*
+ * Set VNET image pointer from enqueued mbuf chain
+ * before doing anything else. Whilst we use interface
+ * indexes to guard against interface detach, they are
+ * unique to each VIMAGE and must be retrieved.
+ */
+ ifindex = mld_restore_context(m);
+
+ /*
+ * Check if the ifnet still exists. This limits the scope of
+ * any race in the absence of a global ifp lock for low cost
+ * (an array lookup).
+ */
+ ifp = ifnet_byindex(ifindex);
+ if (ifp == NULL) {
+ CTR3(KTR_MLD, "%s: dropped %p as ifindex %u went away.",
+ __func__, m, ifindex);
+ m_freem(m);
+ IP6STAT_INC(ip6s_noroute);
+ goto out;
+ }
+
+ im6o.im6o_multicast_hlim = 1;
+ im6o.im6o_multicast_loop = (V_ip6_mrouter != NULL);
+ im6o.im6o_multicast_ifp = ifp;
+
+ if (m->m_flags & M_MLDV1) {
+ m0 = m;
+ } else {
+ m0 = mld_v2_encap_report(ifp, m);
+ if (m0 == NULL) {
+ CTR2(KTR_MLD, "%s: dropped %p", __func__, m);
+ m_freem(m);
+ IP6STAT_INC(ip6s_odropped);
+ goto out;
+ }
+ }
+
+ mld_scrub_context(m0);
+ m->m_flags &= ~(M_PROTOFLAGS);
+ m0->m_pkthdr.rcvif = V_loif;
+
+ ip6 = mtod(m0, struct ip6_hdr *);
+#if 0
+ (void)in6_setscope(&ip6->ip6_dst, ifp, NULL); /* XXX LOR */
+#else
+ /*
+ * XXX XXX Break some KPI rules to prevent an LOR which would
+ * occur if we called in6_setscope() at transmission.
+ * See comments at top of file.
+ */
+ MLD_EMBEDSCOPE(&ip6->ip6_dst, ifp->if_index);
+#endif
+
+ /*
+ * Retrieve the ICMPv6 type before handoff to ip6_output(),
+ * so we can bump the stats.
+ */
+ md = m_getptr(m0, sizeof(struct ip6_hdr), &off);
+ mld = (struct mld_hdr *)(mtod(md, uint8_t *) + off);
+ type = mld->mld_type;
+
+ error = ip6_output(m0, &mld_po, NULL, IPV6_UNSPECSRC, &im6o,
+ &oifp, NULL);
+ if (error) {
+ CTR3(KTR_MLD, "%s: ip6_output(%p) = %d", __func__, m0, error);
+ goto out;
+ }
+ ICMP6STAT_INC(icp6s_outhist[type]);
+ if (oifp != NULL) {
+ icmp6_ifstat_inc(oifp, ifs6_out_msg);
+ switch (type) {
+ case MLD_LISTENER_REPORT:
+ case MLDV2_LISTENER_REPORT:
+ icmp6_ifstat_inc(oifp, ifs6_out_mldreport);
+ break;
+ case MLD_LISTENER_DONE:
+ icmp6_ifstat_inc(oifp, ifs6_out_mlddone);
+ break;
+ }
+ }
+out:
+ return;
+}
+
+/*
+ * Encapsulate an MLDv2 report.
+ *
+ * KAME IPv6 requires that hop-by-hop options be passed separately,
+ * and that the IPv6 header be prepended in a separate mbuf.
+ *
+ * Returns a pointer to the new mbuf chain head, or NULL if the
+ * allocation failed.
+ */
+static struct mbuf *
+mld_v2_encap_report(struct ifnet *ifp, struct mbuf *m)
+{
+ struct mbuf *mh;
+ struct mldv2_report *mld;
+ struct ip6_hdr *ip6;
+ struct in6_ifaddr *ia;
+ int mldreclen;
+
+ KASSERT(ifp != NULL, ("%s: null ifp", __func__));
+ KASSERT((m->m_flags & M_PKTHDR),
+ ("%s: mbuf chain %p is !M_PKTHDR", __func__, m));
+
+ /*
+ * RFC3590: OK to send as :: or tentative during DAD.
+ */
+ ia = in6ifa_ifpforlinklocal(ifp, IN6_IFF_NOTREADY|IN6_IFF_ANYCAST);
+ if (ia == NULL)
+ CTR1(KTR_MLD, "%s: warning: ia is NULL", __func__);
+
+ MGETHDR(mh, M_DONTWAIT, MT_HEADER);
+ if (mh == NULL) {
+ if (ia != NULL)
+ ifa_free(&ia->ia_ifa);
+ m_freem(m);
+ return (NULL);
+ }
+ MH_ALIGN(mh, sizeof(struct ip6_hdr) + sizeof(struct mldv2_report));
+
+ mldreclen = m_length(m, NULL);
+ CTR2(KTR_MLD, "%s: mldreclen is %d", __func__, mldreclen);
+
+ mh->m_len = sizeof(struct ip6_hdr) + sizeof(struct mldv2_report);
+ mh->m_pkthdr.len = sizeof(struct ip6_hdr) +
+ sizeof(struct mldv2_report) + mldreclen;
+
+ ip6 = mtod(mh, struct ip6_hdr *);
+ ip6->ip6_flow = 0;
+ ip6->ip6_vfc &= ~IPV6_VERSION_MASK;
+ ip6->ip6_vfc |= IPV6_VERSION;
+ ip6->ip6_nxt = IPPROTO_ICMPV6;
+ ip6->ip6_src = ia ? ia->ia_addr.sin6_addr : in6addr_any;
+ if (ia != NULL)
+ ifa_free(&ia->ia_ifa);
+ ip6->ip6_dst = in6addr_linklocal_allv2routers;
+ /* scope ID will be set in netisr */
+
+ mld = (struct mldv2_report *)(ip6 + 1);
+ mld->mld_type = MLDV2_LISTENER_REPORT;
+ mld->mld_code = 0;
+ mld->mld_cksum = 0;
+ mld->mld_v2_reserved = 0;
+ mld->mld_v2_numrecs = htons(m->m_pkthdr.PH_vt.vt_nrecs);
+ m->m_pkthdr.PH_vt.vt_nrecs = 0;
+
+ mh->m_next = m;
+ mld->mld_cksum = in6_cksum(mh, IPPROTO_ICMPV6,
+ sizeof(struct ip6_hdr), sizeof(struct mldv2_report) + mldreclen);
+ return (mh);
+}
+
+#ifdef KTR
+static char *
+mld_rec_type_to_str(const int type)
+{
+
+ switch (type) {
+ case MLD_CHANGE_TO_EXCLUDE_MODE:
+ return "TO_EX";
+ break;
+ case MLD_CHANGE_TO_INCLUDE_MODE:
+ return "TO_IN";
+ break;
+ case MLD_MODE_IS_EXCLUDE:
+ return "MODE_EX";
+ break;
+ case MLD_MODE_IS_INCLUDE:
+ return "MODE_IN";
+ break;
+ case MLD_ALLOW_NEW_SOURCES:
+ return "ALLOW_NEW";
+ break;
+ case MLD_BLOCK_OLD_SOURCES:
+ return "BLOCK_OLD";
+ break;
+ default:
+ break;
+ }
+ return "unknown";
+}
+#endif
+
+static void
+mld_init(void *unused __unused)
+{
+
+ CTR1(KTR_MLD, "%s: initializing", __func__);
+ MLD_LOCK_INIT();
+
+ ip6_initpktopts(&mld_po);
+ mld_po.ip6po_hlim = 1;
+ mld_po.ip6po_hbh = &mld_ra.hbh;
+ mld_po.ip6po_prefer_tempaddr = IP6PO_TEMPADDR_NOTPREFER;
+ mld_po.ip6po_flags = IP6PO_DONTFRAG;
+}
+SYSINIT(mld_init, SI_SUB_PSEUDO, SI_ORDER_MIDDLE, mld_init, NULL);
+
+static void
+mld_uninit(void *unused __unused)
+{
+
+ CTR1(KTR_MLD, "%s: tearing down", __func__);
+ MLD_LOCK_DESTROY();
+}
+SYSUNINIT(mld_uninit, SI_SUB_PSEUDO, SI_ORDER_MIDDLE, mld_uninit, NULL);
+
+static void
+vnet_mld_init(const void *unused __unused)
+{
+
+ CTR1(KTR_MLD, "%s: initializing", __func__);
+
+ LIST_INIT(&V_mli_head);
+}
+VNET_SYSINIT(vnet_mld_init, SI_SUB_PSEUDO, SI_ORDER_ANY, vnet_mld_init,
+ NULL);
+
+static void
+vnet_mld_uninit(const void *unused __unused)
+{
+
+ CTR1(KTR_MLD, "%s: tearing down", __func__);
+
+ KASSERT(LIST_EMPTY(&V_mli_head),
+ ("%s: mli list not empty; ifnets not detached?", __func__));
+}
+VNET_SYSUNINIT(vnet_mld_uninit, SI_SUB_PSEUDO, SI_ORDER_ANY, vnet_mld_uninit,
+ NULL);
+
+static int
+mld_modevent(module_t mod, int type, void *unused __unused)
+{
+
+ switch (type) {
+ case MOD_LOAD:
+ case MOD_UNLOAD:
+ break;
+ default:
+ return (EOPNOTSUPP);
+ }
+ return (0);
+}
+
+static moduledata_t mld_mod = {
+ "mld",
+ mld_modevent,
+ 0
+};
+DECLARE_MODULE(mld, mld_mod, SI_SUB_PSEUDO, SI_ORDER_ANY);
diff --git a/rtems/freebsd/netinet6/mld6.h b/rtems/freebsd/netinet6/mld6.h
new file mode 100644
index 00000000..1839b4f7
--- /dev/null
+++ b/rtems/freebsd/netinet6/mld6.h
@@ -0,0 +1,112 @@
+/*-
+ * Copyright (c) 2009 Bruce Simpson.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote
+ * products derived from this software without specific prior written
+ * permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _NETINET6_MLD6_HH_
+#define _NETINET6_MLD6_HH_
+
+/*
+ * Multicast Listener Discovery (MLD) definitions.
+ */
+
+/* Minimum length of any MLD protocol message. */
+#define MLD_MINLEN sizeof(struct icmp6_hdr)
+
+/*
+ * MLD v2 query format.
+ * See <netinet/icmp6.h> for struct mld_hdr
+ * (MLDv1 query and host report format).
+ */
+struct mldv2_query {
+ struct icmp6_hdr mld_icmp6_hdr; /* ICMPv6 header */
+ struct in6_addr mld_addr; /* address being queried */
+ uint8_t mld_misc; /* reserved/suppress/robustness */
+ uint8_t mld_qqi; /* querier's query interval */
+ uint16_t mld_numsrc; /* number of sources */
+ /* followed by 1..numsrc source addresses */
+} __packed;
+#define MLD_V2_QUERY_MINLEN sizeof(struct mldv2_query)
+#define MLD_MRC_EXP(x) ((ntohs((x)) >> 12) & 0x0007)
+#define MLD_MRC_MANT(x) (ntohs((x)) & 0x0fff)
+#define MLD_QQIC_EXP(x) (((x) >> 4) & 0x07)
+#define MLD_QQIC_MANT(x) ((x) & 0x0f)
+#define MLD_QRESV(x) (((x) >> 4) & 0x0f)
+#define MLD_SFLAG(x) (((x) >> 3) & 0x01)
+#define MLD_QRV(x) ((x) & 0x07)
+
+/*
+ * MLDv2 host membership report header.
+ * mld_type: MLDV2_LISTENER_REPORT
+ */
+struct mldv2_report {
+ struct icmp6_hdr mld_icmp6_hdr;
+ /* followed by 1..numgrps records */
+} __packed;
+/* overlaid on struct icmp6_hdr. */
+#define mld_numrecs mld_icmp6_hdr.icmp6_data16[1]
+
+struct mldv2_record {
+ uint8_t mr_type; /* record type */
+ uint8_t mr_datalen; /* length of auxiliary data */
+ uint16_t mr_numsrc; /* number of sources */
+ struct in6_addr mr_addr; /* address being reported */
+ /* followed by 1..numsrc source addresses */
+} __packed;
+#define MLD_V2_REPORT_MAXRECS 65535
+
+/*
+ * MLDv2 report modes.
+ */
+#define MLD_DO_NOTHING 0 /* don't send a record */
+#define MLD_MODE_IS_INCLUDE 1 /* MODE_IN */
+#define MLD_MODE_IS_EXCLUDE 2 /* MODE_EX */
+#define MLD_CHANGE_TO_INCLUDE_MODE 3 /* TO_IN */
+#define MLD_CHANGE_TO_EXCLUDE_MODE 4 /* TO_EX */
+#define MLD_ALLOW_NEW_SOURCES 5 /* ALLOW_NEW */
+#define MLD_BLOCK_OLD_SOURCES 6 /* BLOCK_OLD */
+
+/*
+ * MLDv2 query types.
+ */
+#define MLD_V2_GENERAL_QUERY 1
+#define MLD_V2_GROUP_QUERY 2
+#define MLD_V2_GROUP_SOURCE_QUERY 3
+
+/*
+ * Maximum report interval for MLDv1 host membership reports.
+ */
+#define MLD_V1_MAX_RI 10
+
+/*
+ * MLD_TIMER_SCALE denotes that the MLD code field specifies
+ * time in milliseconds.
+ */
+#define MLD_TIMER_SCALE 1000
+
+#endif /* _NETINET6_MLD6_HH_ */
diff --git a/rtems/freebsd/netinet6/mld6_var.h b/rtems/freebsd/netinet6/mld6_var.h
new file mode 100644
index 00000000..40d70e51
--- /dev/null
+++ b/rtems/freebsd/netinet6/mld6_var.h
@@ -0,0 +1,164 @@
+/*-
+ * Copyright (c) 2009 Bruce Simpson.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote
+ * products derived from this software without specific prior written
+ * permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+#ifndef _NETINET6_MLD6_VAR_HH_
+#define _NETINET6_MLD6_VAR_HH_
+
+/*
+ * Multicast Listener Discovery (MLD)
+ * implementation-specific definitions.
+ */
+
+#ifdef _KERNEL
+
+/*
+ * Per-link MLD state.
+ */
+struct mld_ifinfo {
+ LIST_ENTRY(mld_ifinfo) mli_link;
+ struct ifnet *mli_ifp; /* interface this instance belongs to */
+ uint32_t mli_version; /* MLDv1 Host Compatibility Mode */
+ uint32_t mli_v1_timer; /* MLDv1 Querier Present timer (s) */
+ uint32_t mli_v2_timer; /* MLDv2 General Query (interface) timer (s)*/
+ uint32_t mli_flags; /* MLD per-interface flags */
+ uint32_t mli_rv; /* MLDv2 Robustness Variable */
+ uint32_t mli_qi; /* MLDv2 Query Interval (s) */
+ uint32_t mli_qri; /* MLDv2 Query Response Interval (s) */
+ uint32_t mli_uri; /* MLDv2 Unsolicited Report Interval (s) */
+ SLIST_HEAD(,in6_multi) mli_relinmhead; /* released groups */
+ struct ifqueue mli_gq; /* queue of general query responses */
+};
+#define MLIF_SILENT 0x00000001 /* Do not use MLD on this ifp */
+#define MLIF_USEALLOW 0x00000002 /* Use ALLOW/BLOCK for joins/leaves */
+
+#define MLD_RANDOM_DELAY(X) (arc4random() % (X) + 1)
+#define MLD_MAX_STATE_CHANGES 24 /* Max pending changes per group */
+
+/*
+ * MLD per-group states.
+ */
+#define MLD_NOT_MEMBER 0 /* Can garbage collect group */
+#define MLD_SILENT_MEMBER 1 /* Do not perform MLD for group */
+#define MLD_REPORTING_MEMBER 2 /* MLDv1 we are reporter */
+#define MLD_IDLE_MEMBER 3 /* MLDv1 we reported last */
+#define MLD_LAZY_MEMBER 4 /* MLDv1 other member reporting */
+#define MLD_SLEEPING_MEMBER 5 /* MLDv1 start query response */
+#define MLD_AWAKENING_MEMBER 6 /* MLDv1 group timer will start */
+#define MLD_G_QUERY_PENDING_MEMBER 7 /* MLDv2 group query pending */
+#define MLD_SG_QUERY_PENDING_MEMBER 8 /* MLDv2 source query pending */
+#define MLD_LEAVING_MEMBER 9 /* MLDv2 dying gasp (pending last */
+ /* retransmission of INCLUDE {}) */
+
+/*
+ * MLD version tag.
+ */
+#define MLD_VERSION_NONE 0 /* Invalid */
+#define MLD_VERSION_1 1
+#define MLD_VERSION_2 2 /* Default */
+
+/*
+ * MLDv2 protocol control variables.
+ */
+#define MLD_RV_INIT 2 /* Robustness Variable */
+#define MLD_RV_MIN 1
+#define MLD_RV_MAX 7
+
+#define MLD_QI_INIT 125 /* Query Interval (s) */
+#define MLD_QI_MIN 1
+#define MLD_QI_MAX 255
+
+#define MLD_QRI_INIT 10 /* Query Response Interval (s) */
+#define MLD_QRI_MIN 1
+#define MLD_QRI_MAX 255
+
+#define MLD_URI_INIT 3 /* Unsolicited Report Interval (s) */
+#define MLD_URI_MIN 0
+#define MLD_URI_MAX 10
+
+#define MLD_MAX_GS_SOURCES 256 /* # of sources in rx GS query */
+#define MLD_MAX_G_GS_PACKETS 8 /* # of packets to answer G/GS */
+#define MLD_MAX_STATE_CHANGE_PACKETS 8 /* # of packets per state change */
+#define MLD_MAX_RESPONSE_PACKETS 16 /* # of packets for general query */
+#define MLD_MAX_RESPONSE_BURST 4 /* # of responses to send at once */
+#define MLD_RESPONSE_BURST_INTERVAL (PR_FASTHZ / 2) /* 500ms */
+
+/*
+ * MLD-specific mbuf flags.
+ */
+#define M_MLDV1 M_PROTO1 /* Packet is MLDv1 */
+#define M_GROUPREC M_PROTO3 /* mbuf chain is a group record */
+
+/*
+ * Leading space for MLDv2 reports inside MTU.
+ *
+ * NOTE: This differs from IGMPv3 significantly. KAME IPv6 requires
+ * that a fully formed mbuf chain *without* the Router Alert option
+ * is passed to ip6_output(), however we must account for it in the
+ * MTU if we need to split an MLDv2 report into several packets.
+ *
+ * We now put the MLDv2 report header in the initial mbuf containing
+ * the IPv6 header.
+ */
+#define MLD_MTUSPACE (sizeof(struct ip6_hdr) + sizeof(struct mld_raopt) + \
+ sizeof(struct icmp6_hdr))
+
+/*
+ * Subsystem lock macros.
+ * The MLD lock is only taken with MLD. Currently it is system-wide.
+ * VIMAGE: The lock could be pushed to per-VIMAGE granularity in future.
+ */
+#define MLD_LOCK_INIT() mtx_init(&mld_mtx, "mld_mtx", NULL, MTX_DEF)
+#define MLD_LOCK_DESTROY() mtx_destroy(&mld_mtx)
+#define MLD_LOCK() mtx_lock(&mld_mtx)
+#define MLD_LOCK_ASSERT() mtx_assert(&mld_mtx, MA_OWNED)
+#define MLD_UNLOCK() mtx_unlock(&mld_mtx)
+#define MLD_UNLOCK_ASSERT() mtx_assert(&mld_mtx, MA_NOTOWNED)
+
+/*
+ * Per-link MLD context.
+ */
+#define MLD_IFINFO(ifp) \
+ (((struct in6_ifextra *)(ifp)->if_afdata[AF_INET6])->mld_ifinfo)
+
+int mld_change_state(struct in6_multi *, const int);
+struct mld_ifinfo *
+ mld_domifattach(struct ifnet *);
+void mld_domifdetach(struct ifnet *);
+void mld_fasttimo(void);
+void mld_ifdetach(struct ifnet *);
+int mld_input(struct mbuf *, int, int);
+void mld_slowtimo(void);
+
+#ifdef SYSCTL_DECL
+SYSCTL_DECL(_net_inet6_mld);
+#endif
+
+#endif /* _KERNEL */
+
+#endif /* _NETINET6_MLD6_VAR_HH_ */
diff --git a/rtems/freebsd/netinet6/nd6.c b/rtems/freebsd/netinet6/nd6.c
new file mode 100644
index 00000000..7615f3ad
--- /dev/null
+++ b/rtems/freebsd/netinet6/nd6.c
@@ -0,0 +1,2249 @@
+#include <rtems/freebsd/machine/rtems-bsd-config.h>
+
+/*-
+ * Copyright (C) 1995, 1996, 1997, and 1998 WIDE Project.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the project nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $KAME: nd6.c,v 1.144 2001/05/24 07:44:00 itojun Exp $
+ */
+
+#include <rtems/freebsd/sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <rtems/freebsd/local/opt_inet.h>
+#include <rtems/freebsd/local/opt_inet6.h>
+
+#include <rtems/freebsd/sys/param.h>
+#include <rtems/freebsd/sys/systm.h>
+#include <rtems/freebsd/sys/callout.h>
+#include <rtems/freebsd/sys/malloc.h>
+#include <rtems/freebsd/sys/mbuf.h>
+#include <rtems/freebsd/sys/socket.h>
+#include <rtems/freebsd/sys/sockio.h>
+#include <rtems/freebsd/sys/time.h>
+#include <rtems/freebsd/sys/kernel.h>
+#include <rtems/freebsd/sys/protosw.h>
+#include <rtems/freebsd/sys/errno.h>
+#include <rtems/freebsd/sys/syslog.h>
+#include <rtems/freebsd/sys/lock.h>
+#include <rtems/freebsd/sys/rwlock.h>
+#include <rtems/freebsd/sys/queue.h>
+#include <rtems/freebsd/sys/sysctl.h>
+
+#include <rtems/freebsd/net/if.h>
+#include <rtems/freebsd/net/if_arc.h>
+#include <rtems/freebsd/net/if_dl.h>
+#include <rtems/freebsd/net/if_types.h>
+#include <rtems/freebsd/net/iso88025.h>
+#include <rtems/freebsd/net/fddi.h>
+#include <rtems/freebsd/net/route.h>
+#include <rtems/freebsd/net/vnet.h>
+
+#include <rtems/freebsd/netinet/in.h>
+#include <rtems/freebsd/net/if_llatbl.h>
+#define L3_ADDR_SIN6(le) ((struct sockaddr_in6 *) L3_ADDR(le))
+#include <rtems/freebsd/netinet/if_ether.h>
+#include <rtems/freebsd/netinet6/in6_var.h>
+#include <rtems/freebsd/netinet/ip6.h>
+#include <rtems/freebsd/netinet6/ip6_var.h>
+#include <rtems/freebsd/netinet6/scope6_var.h>
+#include <rtems/freebsd/netinet6/nd6.h>
+#include <rtems/freebsd/netinet/icmp6.h>
+
+#include <rtems/freebsd/sys/limits.h>
+
+#include <rtems/freebsd/security/mac/mac_framework.h>
+
+#define ND6_SLOWTIMER_INTERVAL (60 * 60) /* 1 hour */
+#define ND6_RECALC_REACHTM_INTERVAL (60 * 120) /* 2 hours */
+
+#define SIN6(s) ((struct sockaddr_in6 *)s)
+
+/* timer values */
+VNET_DEFINE(int, nd6_prune) = 1; /* walk list every 1 seconds */
+VNET_DEFINE(int, nd6_delay) = 5; /* delay first probe time 5 second */
+VNET_DEFINE(int, nd6_umaxtries) = 3; /* maximum unicast query */
+VNET_DEFINE(int, nd6_mmaxtries) = 3; /* maximum multicast query */
+VNET_DEFINE(int, nd6_useloopback) = 1; /* use loopback interface for
+ * local traffic */
+VNET_DEFINE(int, nd6_gctimer) = (60 * 60 * 24); /* 1 day: garbage
+ * collection timer */
+
+/* preventing too many loops in ND option parsing */
+static VNET_DEFINE(int, nd6_maxndopt) = 10; /* max # of ND options allowed */
+
+VNET_DEFINE(int, nd6_maxnudhint) = 0; /* max # of subsequent upper
+ * layer hints */
+static VNET_DEFINE(int, nd6_maxqueuelen) = 1; /* max pkts cached in unresolved
+ * ND entries */
+#define V_nd6_maxndopt VNET(nd6_maxndopt)
+#define V_nd6_maxqueuelen VNET(nd6_maxqueuelen)
+
+#ifdef ND6_DEBUG
+VNET_DEFINE(int, nd6_debug) = 1;
+#else
+VNET_DEFINE(int, nd6_debug) = 0;
+#endif
+
+/* for debugging? */
+#if 0
+static int nd6_inuse, nd6_allocated;
+#endif
+
+VNET_DEFINE(struct nd_drhead, nd_defrouter);
+VNET_DEFINE(struct nd_prhead, nd_prefix);
+
+VNET_DEFINE(int, nd6_recalc_reachtm_interval) = ND6_RECALC_REACHTM_INTERVAL;
+#define V_nd6_recalc_reachtm_interval VNET(nd6_recalc_reachtm_interval)
+
+static struct sockaddr_in6 all1_sa;
+
+static int nd6_is_new_addr_neighbor __P((struct sockaddr_in6 *,
+ struct ifnet *));
+static void nd6_setmtu0(struct ifnet *, struct nd_ifinfo *);
+static void nd6_slowtimo(void *);
+static int regen_tmpaddr(struct in6_ifaddr *);
+static struct llentry *nd6_free(struct llentry *, int);
+static void nd6_llinfo_timer(void *);
+static void clear_llinfo_pqueue(struct llentry *);
+
+static VNET_DEFINE(struct callout, nd6_slowtimo_ch);
+#define V_nd6_slowtimo_ch VNET(nd6_slowtimo_ch)
+
+VNET_DEFINE(struct callout, nd6_timer_ch);
+
+void
+nd6_init(void)
+{
+ int i;
+
+ LIST_INIT(&V_nd_prefix);
+
+ all1_sa.sin6_family = AF_INET6;
+ all1_sa.sin6_len = sizeof(struct sockaddr_in6);
+ for (i = 0; i < sizeof(all1_sa.sin6_addr); i++)
+ all1_sa.sin6_addr.s6_addr[i] = 0xff;
+
+ /* initialization of the default router list */
+ TAILQ_INIT(&V_nd_defrouter);
+
+ /* start timer */
+ callout_init(&V_nd6_slowtimo_ch, 0);
+ callout_reset(&V_nd6_slowtimo_ch, ND6_SLOWTIMER_INTERVAL * hz,
+ nd6_slowtimo, curvnet);
+}
+
+#ifdef VIMAGE
+void
+nd6_destroy()
+{
+
+ callout_drain(&V_nd6_slowtimo_ch);
+ callout_drain(&V_nd6_timer_ch);
+}
+#endif
+
+struct nd_ifinfo *
+nd6_ifattach(struct ifnet *ifp)
+{
+ struct nd_ifinfo *nd;
+
+ nd = (struct nd_ifinfo *)malloc(sizeof(*nd), M_IP6NDP, M_WAITOK);
+ bzero(nd, sizeof(*nd));
+
+ nd->initialized = 1;
+
+ nd->chlim = IPV6_DEFHLIM;
+ nd->basereachable = REACHABLE_TIME;
+ nd->reachable = ND_COMPUTE_RTIME(nd->basereachable);
+ nd->retrans = RETRANS_TIMER;
+ /*
+ * Note that the default value of ip6_accept_rtadv is 0, which means
+ * we won't accept RAs by default even if we set ND6_IFF_ACCEPT_RTADV
+ * here.
+ */
+ nd->flags = (ND6_IFF_PERFORMNUD | ND6_IFF_ACCEPT_RTADV);
+
+ /* XXX: we cannot call nd6_setmtu since ifp is not fully initialized */
+ nd6_setmtu0(ifp, nd);
+
+ return nd;
+}
+
+void
+nd6_ifdetach(struct nd_ifinfo *nd)
+{
+
+ free(nd, M_IP6NDP);
+}
+
+/*
+ * Reset ND level link MTU. This function is called when the physical MTU
+ * changes, which means we might have to adjust the ND level MTU.
+ */
+void
+nd6_setmtu(struct ifnet *ifp)
+{
+
+ nd6_setmtu0(ifp, ND_IFINFO(ifp));
+}
+
+/* XXX todo: do not maintain copy of ifp->if_mtu in ndi->maxmtu */
+void
+nd6_setmtu0(struct ifnet *ifp, struct nd_ifinfo *ndi)
+{
+ u_int32_t omaxmtu;
+
+ omaxmtu = ndi->maxmtu;
+
+ switch (ifp->if_type) {
+ case IFT_ARCNET:
+ ndi->maxmtu = MIN(ARC_PHDS_MAXMTU, ifp->if_mtu); /* RFC2497 */
+ break;
+ case IFT_FDDI:
+ ndi->maxmtu = MIN(FDDIIPMTU, ifp->if_mtu); /* RFC2467 */
+ break;
+ case IFT_ISO88025:
+ ndi->maxmtu = MIN(ISO88025_MAX_MTU, ifp->if_mtu);
+ break;
+ default:
+ ndi->maxmtu = ifp->if_mtu;
+ break;
+ }
+
+ /*
+ * Decreasing the interface MTU under IPV6 minimum MTU may cause
+ * undesirable situation. We thus notify the operator of the change
+ * explicitly. The check for omaxmtu is necessary to restrict the
+ * log to the case of changing the MTU, not initializing it.
+ */
+ if (omaxmtu >= IPV6_MMTU && ndi->maxmtu < IPV6_MMTU) {
+ log(LOG_NOTICE, "nd6_setmtu0: "
+ "new link MTU on %s (%lu) is too small for IPv6\n",
+ if_name(ifp), (unsigned long)ndi->maxmtu);
+ }
+
+ if (ndi->maxmtu > V_in6_maxmtu)
+ in6_setmaxmtu(); /* check all interfaces just in case */
+
+}
+
+void
+nd6_option_init(void *opt, int icmp6len, union nd_opts *ndopts)
+{
+
+ bzero(ndopts, sizeof(*ndopts));
+ ndopts->nd_opts_search = (struct nd_opt_hdr *)opt;
+ ndopts->nd_opts_last
+ = (struct nd_opt_hdr *)(((u_char *)opt) + icmp6len);
+
+ if (icmp6len == 0) {
+ ndopts->nd_opts_done = 1;
+ ndopts->nd_opts_search = NULL;
+ }
+}
+
+/*
+ * Take one ND option.
+ */
+struct nd_opt_hdr *
+nd6_option(union nd_opts *ndopts)
+{
+ struct nd_opt_hdr *nd_opt;
+ int olen;
+
+ if (ndopts == NULL)
+ panic("ndopts == NULL in nd6_option");
+ if (ndopts->nd_opts_last == NULL)
+ panic("uninitialized ndopts in nd6_option");
+ if (ndopts->nd_opts_search == NULL)
+ return NULL;
+ if (ndopts->nd_opts_done)
+ return NULL;
+
+ nd_opt = ndopts->nd_opts_search;
+
+ /* make sure nd_opt_len is inside the buffer */
+ if ((caddr_t)&nd_opt->nd_opt_len >= (caddr_t)ndopts->nd_opts_last) {
+ bzero(ndopts, sizeof(*ndopts));
+ return NULL;
+ }
+
+ olen = nd_opt->nd_opt_len << 3;
+ if (olen == 0) {
+ /*
+ * Message validation requires that all included
+ * options have a length that is greater than zero.
+ */
+ bzero(ndopts, sizeof(*ndopts));
+ return NULL;
+ }
+
+ ndopts->nd_opts_search = (struct nd_opt_hdr *)((caddr_t)nd_opt + olen);
+ if (ndopts->nd_opts_search > ndopts->nd_opts_last) {
+ /* option overruns the end of buffer, invalid */
+ bzero(ndopts, sizeof(*ndopts));
+ return NULL;
+ } else if (ndopts->nd_opts_search == ndopts->nd_opts_last) {
+ /* reached the end of options chain */
+ ndopts->nd_opts_done = 1;
+ ndopts->nd_opts_search = NULL;
+ }
+ return nd_opt;
+}
+
+/*
+ * Parse multiple ND options.
+ * This function is much easier to use, for ND routines that do not need
+ * multiple options of the same type.
+ */
+int
+nd6_options(union nd_opts *ndopts)
+{
+ struct nd_opt_hdr *nd_opt;
+ int i = 0;
+
+ if (ndopts == NULL)
+ panic("ndopts == NULL in nd6_options");
+ if (ndopts->nd_opts_last == NULL)
+ panic("uninitialized ndopts in nd6_options");
+ if (ndopts->nd_opts_search == NULL)
+ return 0;
+
+ while (1) {
+ nd_opt = nd6_option(ndopts);
+ if (nd_opt == NULL && ndopts->nd_opts_last == NULL) {
+ /*
+ * Message validation requires that all included
+ * options have a length that is greater than zero.
+ */
+ ICMP6STAT_INC(icp6s_nd_badopt);
+ bzero(ndopts, sizeof(*ndopts));
+ return -1;
+ }
+
+ if (nd_opt == NULL)
+ goto skip1;
+
+ switch (nd_opt->nd_opt_type) {
+ case ND_OPT_SOURCE_LINKADDR:
+ case ND_OPT_TARGET_LINKADDR:
+ case ND_OPT_MTU:
+ case ND_OPT_REDIRECTED_HEADER:
+ if (ndopts->nd_opt_array[nd_opt->nd_opt_type]) {
+ nd6log((LOG_INFO,
+ "duplicated ND6 option found (type=%d)\n",
+ nd_opt->nd_opt_type));
+ /* XXX bark? */
+ } else {
+ ndopts->nd_opt_array[nd_opt->nd_opt_type]
+ = nd_opt;
+ }
+ break;
+ case ND_OPT_PREFIX_INFORMATION:
+ if (ndopts->nd_opt_array[nd_opt->nd_opt_type] == 0) {
+ ndopts->nd_opt_array[nd_opt->nd_opt_type]
+ = nd_opt;
+ }
+ ndopts->nd_opts_pi_end =
+ (struct nd_opt_prefix_info *)nd_opt;
+ break;
+ default:
+ /*
+ * Unknown options must be silently ignored,
+ * to accomodate future extension to the protocol.
+ */
+ nd6log((LOG_DEBUG,
+ "nd6_options: unsupported option %d - "
+ "option ignored\n", nd_opt->nd_opt_type));
+ }
+
+skip1:
+ i++;
+ if (i > V_nd6_maxndopt) {
+ ICMP6STAT_INC(icp6s_nd_toomanyopt);
+ nd6log((LOG_INFO, "too many loop in nd opt\n"));
+ break;
+ }
+
+ if (ndopts->nd_opts_done)
+ break;
+ }
+
+ return 0;
+}
+
+/*
+ * ND6 timer routine to handle ND6 entries
+ */
+void
+nd6_llinfo_settimer_locked(struct llentry *ln, long tick)
+{
+ int canceled;
+
+ LLE_WLOCK_ASSERT(ln);
+
+ if (tick < 0) {
+ ln->la_expire = 0;
+ ln->ln_ntick = 0;
+ canceled = callout_stop(&ln->ln_timer_ch);
+ } else {
+ ln->la_expire = time_second + tick / hz;
+ LLE_ADDREF(ln);
+ if (tick > INT_MAX) {
+ ln->ln_ntick = tick - INT_MAX;
+ canceled = callout_reset(&ln->ln_timer_ch, INT_MAX,
+ nd6_llinfo_timer, ln);
+ } else {
+ ln->ln_ntick = 0;
+ canceled = callout_reset(&ln->ln_timer_ch, tick,
+ nd6_llinfo_timer, ln);
+ }
+ }
+ if (canceled)
+ LLE_REMREF(ln);
+}
+
+void
+nd6_llinfo_settimer(struct llentry *ln, long tick)
+{
+
+ LLE_WLOCK(ln);
+ nd6_llinfo_settimer_locked(ln, tick);
+ LLE_WUNLOCK(ln);
+}
+
+static void
+nd6_llinfo_timer(void *arg)
+{
+ struct llentry *ln;
+ struct in6_addr *dst;
+ struct ifnet *ifp;
+ struct nd_ifinfo *ndi = NULL;
+
+ KASSERT(arg != NULL, ("%s: arg NULL", __func__));
+ ln = (struct llentry *)arg;
+ LLE_WLOCK_ASSERT(ln);
+ ifp = ln->lle_tbl->llt_ifp;
+
+ CURVNET_SET(ifp->if_vnet);
+
+ if (ln->ln_ntick > 0) {
+ if (ln->ln_ntick > INT_MAX) {
+ ln->ln_ntick -= INT_MAX;
+ nd6_llinfo_settimer_locked(ln, INT_MAX);
+ } else {
+ ln->ln_ntick = 0;
+ nd6_llinfo_settimer_locked(ln, ln->ln_ntick);
+ }
+ goto done;
+ }
+
+ ndi = ND_IFINFO(ifp);
+ dst = &L3_ADDR_SIN6(ln)->sin6_addr;
+ if (ln->la_flags & LLE_STATIC) {
+ goto done;
+ }
+
+ if (ln->la_flags & LLE_DELETED) {
+ (void)nd6_free(ln, 0);
+ ln = NULL;
+ goto done;
+ }
+
+ switch (ln->ln_state) {
+ case ND6_LLINFO_INCOMPLETE:
+ if (ln->la_asked < V_nd6_mmaxtries) {
+ ln->la_asked++;
+ nd6_llinfo_settimer_locked(ln, (long)ndi->retrans * hz / 1000);
+ LLE_WUNLOCK(ln);
+ nd6_ns_output(ifp, NULL, dst, ln, 0);
+ LLE_WLOCK(ln);
+ } else {
+ struct mbuf *m = ln->la_hold;
+ if (m) {
+ struct mbuf *m0;
+
+ /*
+ * assuming every packet in la_hold has the
+ * same IP header. Send error after unlock.
+ */
+ m0 = m->m_nextpkt;
+ m->m_nextpkt = NULL;
+ ln->la_hold = m0;
+ clear_llinfo_pqueue(ln);
+ }
+ (void)nd6_free(ln, 0);
+ ln = NULL;
+ if (m != NULL)
+ icmp6_error2(m, ICMP6_DST_UNREACH,
+ ICMP6_DST_UNREACH_ADDR, 0, ifp);
+ }
+ break;
+ case ND6_LLINFO_REACHABLE:
+ if (!ND6_LLINFO_PERMANENT(ln)) {
+ ln->ln_state = ND6_LLINFO_STALE;
+ nd6_llinfo_settimer_locked(ln, (long)V_nd6_gctimer * hz);
+ }
+ break;
+
+ case ND6_LLINFO_STALE:
+ /* Garbage Collection(RFC 2461 5.3) */
+ if (!ND6_LLINFO_PERMANENT(ln)) {
+ (void)nd6_free(ln, 1);
+ ln = NULL;
+ }
+ break;
+
+ case ND6_LLINFO_DELAY:
+ if (ndi && (ndi->flags & ND6_IFF_PERFORMNUD) != 0) {
+ /* We need NUD */
+ ln->la_asked = 1;
+ ln->ln_state = ND6_LLINFO_PROBE;
+ nd6_llinfo_settimer_locked(ln, (long)ndi->retrans * hz / 1000);
+ LLE_WUNLOCK(ln);
+ nd6_ns_output(ifp, dst, dst, ln, 0);
+ LLE_WLOCK(ln);
+ } else {
+ ln->ln_state = ND6_LLINFO_STALE; /* XXX */
+ nd6_llinfo_settimer_locked(ln, (long)V_nd6_gctimer * hz);
+ }
+ break;
+ case ND6_LLINFO_PROBE:
+ if (ln->la_asked < V_nd6_umaxtries) {
+ ln->la_asked++;
+ nd6_llinfo_settimer_locked(ln, (long)ndi->retrans * hz / 1000);
+ LLE_WUNLOCK(ln);
+ nd6_ns_output(ifp, dst, dst, ln, 0);
+ LLE_WLOCK(ln);
+ } else {
+ (void)nd6_free(ln, 0);
+ ln = NULL;
+ }
+ break;
+ default:
+ panic("%s: paths in a dark night can be confusing: %d",
+ __func__, ln->ln_state);
+ }
+done:
+ if (ln != NULL)
+ LLE_FREE_LOCKED(ln);
+ CURVNET_RESTORE();
+}
+
+
+/*
+ * ND6 timer routine to expire default route list and prefix list
+ */
+void
+nd6_timer(void *arg)
+{
+ CURVNET_SET((struct vnet *) arg);
+ int s;
+ struct nd_defrouter *dr;
+ struct nd_prefix *pr;
+ struct in6_ifaddr *ia6, *nia6;
+ struct in6_addrlifetime *lt6;
+
+ callout_reset(&V_nd6_timer_ch, V_nd6_prune * hz,
+ nd6_timer, curvnet);
+
+ /* expire default router list */
+ s = splnet();
+ dr = TAILQ_FIRST(&V_nd_defrouter);
+ while (dr) {
+ if (dr->expire && dr->expire < time_second) {
+ struct nd_defrouter *t;
+ t = TAILQ_NEXT(dr, dr_entry);
+ defrtrlist_del(dr);
+ dr = t;
+ } else {
+ dr = TAILQ_NEXT(dr, dr_entry);
+ }
+ }
+
+ /*
+ * expire interface addresses.
+ * in the past the loop was inside prefix expiry processing.
+ * However, from a stricter speci-confrmance standpoint, we should
+ * rather separate address lifetimes and prefix lifetimes.
+ *
+ * XXXRW: in6_ifaddrhead locking.
+ */
+ addrloop:
+ TAILQ_FOREACH_SAFE(ia6, &V_in6_ifaddrhead, ia_link, nia6) {
+ /* check address lifetime */
+ lt6 = &ia6->ia6_lifetime;
+ if (IFA6_IS_INVALID(ia6)) {
+ int regen = 0;
+
+ /*
+ * If the expiring address is temporary, try
+ * regenerating a new one. This would be useful when
+ * we suspended a laptop PC, then turned it on after a
+ * period that could invalidate all temporary
+ * addresses. Although we may have to restart the
+ * loop (see below), it must be after purging the
+ * address. Otherwise, we'd see an infinite loop of
+ * regeneration.
+ */
+ if (V_ip6_use_tempaddr &&
+ (ia6->ia6_flags & IN6_IFF_TEMPORARY) != 0) {
+ if (regen_tmpaddr(ia6) == 0)
+ regen = 1;
+ }
+
+ in6_purgeaddr(&ia6->ia_ifa);
+
+ if (regen)
+ goto addrloop; /* XXX: see below */
+ } else if (IFA6_IS_DEPRECATED(ia6)) {
+ int oldflags = ia6->ia6_flags;
+
+ ia6->ia6_flags |= IN6_IFF_DEPRECATED;
+
+ /*
+ * If a temporary address has just become deprecated,
+ * regenerate a new one if possible.
+ */
+ if (V_ip6_use_tempaddr &&
+ (ia6->ia6_flags & IN6_IFF_TEMPORARY) != 0 &&
+ (oldflags & IN6_IFF_DEPRECATED) == 0) {
+
+ if (regen_tmpaddr(ia6) == 0) {
+ /*
+ * A new temporary address is
+ * generated.
+ * XXX: this means the address chain
+ * has changed while we are still in
+ * the loop. Although the change
+ * would not cause disaster (because
+ * it's not a deletion, but an
+ * addition,) we'd rather restart the
+ * loop just for safety. Or does this
+ * significantly reduce performance??
+ */
+ goto addrloop;
+ }
+ }
+ } else {
+ /*
+ * A new RA might have made a deprecated address
+ * preferred.
+ */
+ ia6->ia6_flags &= ~IN6_IFF_DEPRECATED;
+ }
+ }
+
+ /* expire prefix list */
+ pr = V_nd_prefix.lh_first;
+ while (pr) {
+ /*
+ * check prefix lifetime.
+ * since pltime is just for autoconf, pltime processing for
+ * prefix is not necessary.
+ */
+ if (pr->ndpr_vltime != ND6_INFINITE_LIFETIME &&
+ time_second - pr->ndpr_lastupdate > pr->ndpr_vltime) {
+ struct nd_prefix *t;
+ t = pr->ndpr_next;
+
+ /*
+ * address expiration and prefix expiration are
+ * separate. NEVER perform in6_purgeaddr here.
+ */
+
+ prelist_remove(pr);
+ pr = t;
+ } else
+ pr = pr->ndpr_next;
+ }
+ splx(s);
+ CURVNET_RESTORE();
+}
+
+/*
+ * ia6 - deprecated/invalidated temporary address
+ */
+static int
+regen_tmpaddr(struct in6_ifaddr *ia6)
+{
+ struct ifaddr *ifa;
+ struct ifnet *ifp;
+ struct in6_ifaddr *public_ifa6 = NULL;
+
+ ifp = ia6->ia_ifa.ifa_ifp;
+ IF_ADDR_LOCK(ifp);
+ TAILQ_FOREACH(ifa, &ifp->if_addrhead, ifa_link) {
+ struct in6_ifaddr *it6;
+
+ if (ifa->ifa_addr->sa_family != AF_INET6)
+ continue;
+
+ it6 = (struct in6_ifaddr *)ifa;
+
+ /* ignore no autoconf addresses. */
+ if ((it6->ia6_flags & IN6_IFF_AUTOCONF) == 0)
+ continue;
+
+ /* ignore autoconf addresses with different prefixes. */
+ if (it6->ia6_ndpr == NULL || it6->ia6_ndpr != ia6->ia6_ndpr)
+ continue;
+
+ /*
+ * Now we are looking at an autoconf address with the same
+ * prefix as ours. If the address is temporary and is still
+ * preferred, do not create another one. It would be rare, but
+ * could happen, for example, when we resume a laptop PC after
+ * a long period.
+ */
+ if ((it6->ia6_flags & IN6_IFF_TEMPORARY) != 0 &&
+ !IFA6_IS_DEPRECATED(it6)) {
+ public_ifa6 = NULL;
+ break;
+ }
+
+ /*
+ * This is a public autoconf address that has the same prefix
+ * as ours. If it is preferred, keep it. We can't break the
+ * loop here, because there may be a still-preferred temporary
+ * address with the prefix.
+ */
+ if (!IFA6_IS_DEPRECATED(it6))
+ public_ifa6 = it6;
+
+ if (public_ifa6 != NULL)
+ ifa_ref(&public_ifa6->ia_ifa);
+ }
+ IF_ADDR_UNLOCK(ifp);
+
+ if (public_ifa6 != NULL) {
+ int e;
+
+ if ((e = in6_tmpifadd(public_ifa6, 0, 0)) != 0) {
+ ifa_free(&public_ifa6->ia_ifa);
+ log(LOG_NOTICE, "regen_tmpaddr: failed to create a new"
+ " tmp addr,errno=%d\n", e);
+ return (-1);
+ }
+ ifa_free(&public_ifa6->ia_ifa);
+ return (0);
+ }
+
+ return (-1);
+}
+
+/*
+ * Nuke neighbor cache/prefix/default router management table, right before
+ * ifp goes away.
+ */
+void
+nd6_purge(struct ifnet *ifp)
+{
+ struct nd_defrouter *dr, *ndr;
+ struct nd_prefix *pr, *npr;
+
+ /*
+ * Nuke default router list entries toward ifp.
+ * We defer removal of default router list entries that is installed
+ * in the routing table, in order to keep additional side effects as
+ * small as possible.
+ */
+ for (dr = TAILQ_FIRST(&V_nd_defrouter); dr; dr = ndr) {
+ ndr = TAILQ_NEXT(dr, dr_entry);
+ if (dr->installed)
+ continue;
+
+ if (dr->ifp == ifp)
+ defrtrlist_del(dr);
+ }
+
+ for (dr = TAILQ_FIRST(&V_nd_defrouter); dr; dr = ndr) {
+ ndr = TAILQ_NEXT(dr, dr_entry);
+ if (!dr->installed)
+ continue;
+
+ if (dr->ifp == ifp)
+ defrtrlist_del(dr);
+ }
+
+ /* Nuke prefix list entries toward ifp */
+ for (pr = V_nd_prefix.lh_first; pr; pr = npr) {
+ npr = pr->ndpr_next;
+ if (pr->ndpr_ifp == ifp) {
+ /*
+ * Because if_detach() does *not* release prefixes
+ * while purging addresses the reference count will
+ * still be above zero. We therefore reset it to
+ * make sure that the prefix really gets purged.
+ */
+ pr->ndpr_refcnt = 0;
+
+ /*
+ * Previously, pr->ndpr_addr is removed as well,
+ * but I strongly believe we don't have to do it.
+ * nd6_purge() is only called from in6_ifdetach(),
+ * which removes all the associated interface addresses
+ * by itself.
+ * (jinmei@kame.net 20010129)
+ */
+ prelist_remove(pr);
+ }
+ }
+
+ /* cancel default outgoing interface setting */
+ if (V_nd6_defifindex == ifp->if_index)
+ nd6_setdefaultiface(0);
+
+ if (!V_ip6_forwarding && V_ip6_accept_rtadv) { /* XXX: too restrictive? */
+ /* refresh default router list
+ *
+ *
+ */
+ defrouter_select();
+
+ }
+
+ /* XXXXX
+ * We do not nuke the neighbor cache entries here any more
+ * because the neighbor cache is kept in if_afdata[AF_INET6].
+ * nd6_purge() is invoked by in6_ifdetach() which is called
+ * from if_detach() where everything gets purged. So let
+ * in6_domifdetach() do the actual L2 table purging work.
+ */
+}
+
+/*
+ * the caller acquires and releases the lock on the lltbls
+ * Returns the llentry locked
+ */
+struct llentry *
+nd6_lookup(struct in6_addr *addr6, int flags, struct ifnet *ifp)
+{
+ struct sockaddr_in6 sin6;
+ struct llentry *ln;
+ int llflags;
+
+ bzero(&sin6, sizeof(sin6));
+ sin6.sin6_len = sizeof(struct sockaddr_in6);
+ sin6.sin6_family = AF_INET6;
+ sin6.sin6_addr = *addr6;
+
+ IF_AFDATA_LOCK_ASSERT(ifp);
+
+ llflags = 0;
+ if (flags & ND6_CREATE)
+ llflags |= LLE_CREATE;
+ if (flags & ND6_EXCLUSIVE)
+ llflags |= LLE_EXCLUSIVE;
+
+ ln = lla_lookup(LLTABLE6(ifp), llflags, (struct sockaddr *)&sin6);
+ if ((ln != NULL) && (llflags & LLE_CREATE))
+ ln->ln_state = ND6_LLINFO_NOSTATE;
+
+ return (ln);
+}
+
+/*
+ * Test whether a given IPv6 address is a neighbor or not, ignoring
+ * the actual neighbor cache. The neighbor cache is ignored in order
+ * to not reenter the routing code from within itself.
+ */
+static int
+nd6_is_new_addr_neighbor(struct sockaddr_in6 *addr, struct ifnet *ifp)
+{
+ struct nd_prefix *pr;
+ struct ifaddr *dstaddr;
+
+ /*
+ * A link-local address is always a neighbor.
+ * XXX: a link does not necessarily specify a single interface.
+ */
+ if (IN6_IS_ADDR_LINKLOCAL(&addr->sin6_addr)) {
+ struct sockaddr_in6 sin6_copy;
+ u_int32_t zone;
+
+ /*
+ * We need sin6_copy since sa6_recoverscope() may modify the
+ * content (XXX).
+ */
+ sin6_copy = *addr;
+ if (sa6_recoverscope(&sin6_copy))
+ return (0); /* XXX: should be impossible */
+ if (in6_setscope(&sin6_copy.sin6_addr, ifp, &zone))
+ return (0);
+ if (sin6_copy.sin6_scope_id == zone)
+ return (1);
+ else
+ return (0);
+ }
+
+ /*
+ * If the address matches one of our addresses,
+ * it should be a neighbor.
+ * If the address matches one of our on-link prefixes, it should be a
+ * neighbor.
+ */
+ for (pr = V_nd_prefix.lh_first; pr; pr = pr->ndpr_next) {
+ if (pr->ndpr_ifp != ifp)
+ continue;
+
+ if (!(pr->ndpr_stateflags & NDPRF_ONLINK)) {
+ struct rtentry *rt;
+ rt = rtalloc1((struct sockaddr *)&pr->ndpr_prefix, 0, 0);
+ if (rt == NULL)
+ continue;
+ /*
+ * This is the case where multiple interfaces
+ * have the same prefix, but only one is installed
+ * into the routing table and that prefix entry
+ * is not the one being examined here. In the case
+ * where RADIX_MPATH is enabled, multiple route
+ * entries (of the same rt_key value) will be
+ * installed because the interface addresses all
+ * differ.
+ */
+ if (!IN6_ARE_ADDR_EQUAL(&pr->ndpr_prefix.sin6_addr,
+ &((struct sockaddr_in6 *)rt_key(rt))->sin6_addr)) {
+ RTFREE_LOCKED(rt);
+ continue;
+ }
+ RTFREE_LOCKED(rt);
+ }
+
+ if (IN6_ARE_MASKED_ADDR_EQUAL(&pr->ndpr_prefix.sin6_addr,
+ &addr->sin6_addr, &pr->ndpr_mask))
+ return (1);
+ }
+
+ /*
+ * If the address is assigned on the node of the other side of
+ * a p2p interface, the address should be a neighbor.
+ */
+ dstaddr = ifa_ifwithdstaddr((struct sockaddr *)addr);
+ if (dstaddr != NULL) {
+ if (dstaddr->ifa_ifp == ifp) {
+ ifa_free(dstaddr);
+ return (1);
+ }
+ ifa_free(dstaddr);
+ }
+
+ /*
+ * If the default router list is empty, all addresses are regarded
+ * as on-link, and thus, as a neighbor.
+ * XXX: we restrict the condition to hosts, because routers usually do
+ * not have the "default router list".
+ */
+ if (!V_ip6_forwarding && TAILQ_FIRST(&V_nd_defrouter) == NULL &&
+ V_nd6_defifindex == ifp->if_index) {
+ return (1);
+ }
+
+ return (0);
+}
+
+
+/*
+ * Detect if a given IPv6 address identifies a neighbor on a given link.
+ * XXX: should take care of the destination of a p2p link?
+ */
+int
+nd6_is_addr_neighbor(struct sockaddr_in6 *addr, struct ifnet *ifp)
+{
+ struct llentry *lle;
+ int rc = 0;
+
+ IF_AFDATA_UNLOCK_ASSERT(ifp);
+ if (nd6_is_new_addr_neighbor(addr, ifp))
+ return (1);
+
+ /*
+ * Even if the address matches none of our addresses, it might be
+ * in the neighbor cache.
+ */
+ IF_AFDATA_LOCK(ifp);
+ if ((lle = nd6_lookup(&addr->sin6_addr, 0, ifp)) != NULL) {
+ LLE_RUNLOCK(lle);
+ rc = 1;
+ }
+ IF_AFDATA_UNLOCK(ifp);
+ return (rc);
+}
+
+/*
+ * Free an nd6 llinfo entry.
+ * Since the function would cause significant changes in the kernel, DO NOT
+ * make it global, unless you have a strong reason for the change, and are sure
+ * that the change is safe.
+ */
+static struct llentry *
+nd6_free(struct llentry *ln, int gc)
+{
+ struct llentry *next;
+ struct nd_defrouter *dr;
+ struct ifnet *ifp;
+
+ LLE_WLOCK_ASSERT(ln);
+
+ /*
+ * we used to have pfctlinput(PRC_HOSTDEAD) here.
+ * even though it is not harmful, it was not really necessary.
+ */
+
+ /* cancel timer */
+ nd6_llinfo_settimer_locked(ln, -1);
+
+ ifp = ln->lle_tbl->llt_ifp;
+
+ if (!V_ip6_forwarding) {
+
+ dr = defrouter_lookup(&L3_ADDR_SIN6(ln)->sin6_addr, ifp);
+
+ if (dr != NULL && dr->expire &&
+ ln->ln_state == ND6_LLINFO_STALE && gc) {
+ /*
+ * If the reason for the deletion is just garbage
+ * collection, and the neighbor is an active default
+ * router, do not delete it. Instead, reset the GC
+ * timer using the router's lifetime.
+ * Simply deleting the entry would affect default
+ * router selection, which is not necessarily a good
+ * thing, especially when we're using router preference
+ * values.
+ * XXX: the check for ln_state would be redundant,
+ * but we intentionally keep it just in case.
+ */
+ if (dr->expire > time_second)
+ nd6_llinfo_settimer_locked(ln,
+ (dr->expire - time_second) * hz);
+ else
+ nd6_llinfo_settimer_locked(ln,
+ (long)V_nd6_gctimer * hz);
+
+ next = LIST_NEXT(ln, lle_next);
+ LLE_REMREF(ln);
+ LLE_WUNLOCK(ln);
+ return (next);
+ }
+
+ if (dr) {
+ /*
+ * Unreachablity of a router might affect the default
+ * router selection and on-link detection of advertised
+ * prefixes.
+ */
+
+ /*
+ * Temporarily fake the state to choose a new default
+ * router and to perform on-link determination of
+ * prefixes correctly.
+ * Below the state will be set correctly,
+ * or the entry itself will be deleted.
+ */
+ ln->ln_state = ND6_LLINFO_INCOMPLETE;
+ }
+
+ if (ln->ln_router || dr) {
+
+ /*
+ * We need to unlock to avoid a LOR with rt6_flush() with the
+ * rnh and for the calls to pfxlist_onlink_check() and
+ * defrouter_select() in the block further down for calls
+ * into nd6_lookup(). We still hold a ref.
+ */
+ LLE_WUNLOCK(ln);
+
+ /*
+ * rt6_flush must be called whether or not the neighbor
+ * is in the Default Router List.
+ * See a corresponding comment in nd6_na_input().
+ */
+ rt6_flush(&L3_ADDR_SIN6(ln)->sin6_addr, ifp);
+ }
+
+ if (dr) {
+ /*
+ * Since defrouter_select() does not affect the
+ * on-link determination and MIP6 needs the check
+ * before the default router selection, we perform
+ * the check now.
+ */
+ pfxlist_onlink_check();
+
+ /*
+ * Refresh default router list.
+ */
+ defrouter_select();
+ }
+
+ if (ln->ln_router || dr)
+ LLE_WLOCK(ln);
+ }
+
+ /*
+ * Before deleting the entry, remember the next entry as the
+ * return value. We need this because pfxlist_onlink_check() above
+ * might have freed other entries (particularly the old next entry) as
+ * a side effect (XXX).
+ */
+ next = LIST_NEXT(ln, lle_next);
+
+ /*
+ * Save to unlock. We still hold an extra reference and will not
+ * free(9) in llentry_free() if someone else holds one as well.
+ */
+ LLE_WUNLOCK(ln);
+ IF_AFDATA_LOCK(ifp);
+ LLE_WLOCK(ln);
+ LLE_REMREF(ln);
+ llentry_free(ln);
+ IF_AFDATA_UNLOCK(ifp);
+
+ return (next);
+}
+
+/*
+ * Upper-layer reachability hint for Neighbor Unreachability Detection.
+ *
+ * XXX cost-effective methods?
+ */
+void
+nd6_nud_hint(struct rtentry *rt, struct in6_addr *dst6, int force)
+{
+ struct llentry *ln;
+ struct ifnet *ifp;
+
+ if ((dst6 == NULL) || (rt == NULL))
+ return;
+
+ ifp = rt->rt_ifp;
+ IF_AFDATA_LOCK(ifp);
+ ln = nd6_lookup(dst6, ND6_EXCLUSIVE, NULL);
+ IF_AFDATA_UNLOCK(ifp);
+ if (ln == NULL)
+ return;
+
+ if (ln->ln_state < ND6_LLINFO_REACHABLE)
+ goto done;
+
+ /*
+ * if we get upper-layer reachability confirmation many times,
+ * it is possible we have false information.
+ */
+ if (!force) {
+ ln->ln_byhint++;
+ if (ln->ln_byhint > V_nd6_maxnudhint) {
+ goto done;
+ }
+ }
+
+ ln->ln_state = ND6_LLINFO_REACHABLE;
+ if (!ND6_LLINFO_PERMANENT(ln)) {
+ nd6_llinfo_settimer_locked(ln,
+ (long)ND_IFINFO(rt->rt_ifp)->reachable * hz);
+ }
+done:
+ LLE_WUNLOCK(ln);
+}
+
+
+int
+nd6_ioctl(u_long cmd, caddr_t data, struct ifnet *ifp)
+{
+ struct in6_drlist *drl = (struct in6_drlist *)data;
+ struct in6_oprlist *oprl = (struct in6_oprlist *)data;
+ struct in6_ndireq *ndi = (struct in6_ndireq *)data;
+ struct in6_nbrinfo *nbi = (struct in6_nbrinfo *)data;
+ struct in6_ndifreq *ndif = (struct in6_ndifreq *)data;
+ struct nd_defrouter *dr;
+ struct nd_prefix *pr;
+ int i = 0, error = 0;
+ int s;
+
+ switch (cmd) {
+ case SIOCGDRLST_IN6:
+ /*
+ * obsolete API, use sysctl under net.inet6.icmp6
+ */
+ bzero(drl, sizeof(*drl));
+ s = splnet();
+ dr = TAILQ_FIRST(&V_nd_defrouter);
+ while (dr && i < DRLSTSIZ) {
+ drl->defrouter[i].rtaddr = dr->rtaddr;
+ in6_clearscope(&drl->defrouter[i].rtaddr);
+
+ drl->defrouter[i].flags = dr->flags;
+ drl->defrouter[i].rtlifetime = dr->rtlifetime;
+ drl->defrouter[i].expire = dr->expire;
+ drl->defrouter[i].if_index = dr->ifp->if_index;
+ i++;
+ dr = TAILQ_NEXT(dr, dr_entry);
+ }
+ splx(s);
+ break;
+ case SIOCGPRLST_IN6:
+ /*
+ * obsolete API, use sysctl under net.inet6.icmp6
+ *
+ * XXX the structure in6_prlist was changed in backward-
+ * incompatible manner. in6_oprlist is used for SIOCGPRLST_IN6,
+ * in6_prlist is used for nd6_sysctl() - fill_prlist().
+ */
+ /*
+ * XXX meaning of fields, especialy "raflags", is very
+ * differnet between RA prefix list and RR/static prefix list.
+ * how about separating ioctls into two?
+ */
+ bzero(oprl, sizeof(*oprl));
+ s = splnet();
+ pr = V_nd_prefix.lh_first;
+ while (pr && i < PRLSTSIZ) {
+ struct nd_pfxrouter *pfr;
+ int j;
+
+ oprl->prefix[i].prefix = pr->ndpr_prefix.sin6_addr;
+ oprl->prefix[i].raflags = pr->ndpr_raf;
+ oprl->prefix[i].prefixlen = pr->ndpr_plen;
+ oprl->prefix[i].vltime = pr->ndpr_vltime;
+ oprl->prefix[i].pltime = pr->ndpr_pltime;
+ oprl->prefix[i].if_index = pr->ndpr_ifp->if_index;
+ if (pr->ndpr_vltime == ND6_INFINITE_LIFETIME)
+ oprl->prefix[i].expire = 0;
+ else {
+ time_t maxexpire;
+
+ /* XXX: we assume time_t is signed. */
+ maxexpire = (-1) &
+ ~((time_t)1 <<
+ ((sizeof(maxexpire) * 8) - 1));
+ if (pr->ndpr_vltime <
+ maxexpire - pr->ndpr_lastupdate) {
+ oprl->prefix[i].expire =
+ pr->ndpr_lastupdate +
+ pr->ndpr_vltime;
+ } else
+ oprl->prefix[i].expire = maxexpire;
+ }
+
+ pfr = pr->ndpr_advrtrs.lh_first;
+ j = 0;
+ while (pfr) {
+ if (j < DRLSTSIZ) {
+#define RTRADDR oprl->prefix[i].advrtr[j]
+ RTRADDR = pfr->router->rtaddr;
+ in6_clearscope(&RTRADDR);
+#undef RTRADDR
+ }
+ j++;
+ pfr = pfr->pfr_next;
+ }
+ oprl->prefix[i].advrtrs = j;
+ oprl->prefix[i].origin = PR_ORIG_RA;
+
+ i++;
+ pr = pr->ndpr_next;
+ }
+ splx(s);
+
+ break;
+ case OSIOCGIFINFO_IN6:
+#define ND ndi->ndi
+ /* XXX: old ndp(8) assumes a positive value for linkmtu. */
+ bzero(&ND, sizeof(ND));
+ ND.linkmtu = IN6_LINKMTU(ifp);
+ ND.maxmtu = ND_IFINFO(ifp)->maxmtu;
+ ND.basereachable = ND_IFINFO(ifp)->basereachable;
+ ND.reachable = ND_IFINFO(ifp)->reachable;
+ ND.retrans = ND_IFINFO(ifp)->retrans;
+ ND.flags = ND_IFINFO(ifp)->flags;
+ ND.recalctm = ND_IFINFO(ifp)->recalctm;
+ ND.chlim = ND_IFINFO(ifp)->chlim;
+ break;
+ case SIOCGIFINFO_IN6:
+ ND = *ND_IFINFO(ifp);
+ break;
+ case SIOCSIFINFO_IN6:
+ /*
+ * used to change host variables from userland.
+ * intented for a use on router to reflect RA configurations.
+ */
+ /* 0 means 'unspecified' */
+ if (ND.linkmtu != 0) {
+ if (ND.linkmtu < IPV6_MMTU ||
+ ND.linkmtu > IN6_LINKMTU(ifp)) {
+ error = EINVAL;
+ break;
+ }
+ ND_IFINFO(ifp)->linkmtu = ND.linkmtu;
+ }
+
+ if (ND.basereachable != 0) {
+ int obasereachable = ND_IFINFO(ifp)->basereachable;
+
+ ND_IFINFO(ifp)->basereachable = ND.basereachable;
+ if (ND.basereachable != obasereachable)
+ ND_IFINFO(ifp)->reachable =
+ ND_COMPUTE_RTIME(ND.basereachable);
+ }
+ if (ND.retrans != 0)
+ ND_IFINFO(ifp)->retrans = ND.retrans;
+ if (ND.chlim != 0)
+ ND_IFINFO(ifp)->chlim = ND.chlim;
+ /* FALLTHROUGH */
+ case SIOCSIFINFO_FLAGS:
+ ND_IFINFO(ifp)->flags = ND.flags;
+ break;
+#undef ND
+ case SIOCSNDFLUSH_IN6: /* XXX: the ioctl name is confusing... */
+ /* sync kernel routing table with the default router list */
+ defrouter_reset();
+ defrouter_select();
+ break;
+ case SIOCSPFXFLUSH_IN6:
+ {
+ /* flush all the prefix advertised by routers */
+ struct nd_prefix *pr, *next;
+
+ s = splnet();
+ for (pr = V_nd_prefix.lh_first; pr; pr = next) {
+ struct in6_ifaddr *ia, *ia_next;
+
+ next = pr->ndpr_next;
+
+ if (IN6_IS_ADDR_LINKLOCAL(&pr->ndpr_prefix.sin6_addr))
+ continue; /* XXX */
+
+ /* do we really have to remove addresses as well? */
+ /* XXXRW: in6_ifaddrhead locking. */
+ TAILQ_FOREACH_SAFE(ia, &V_in6_ifaddrhead, ia_link,
+ ia_next) {
+ if ((ia->ia6_flags & IN6_IFF_AUTOCONF) == 0)
+ continue;
+
+ if (ia->ia6_ndpr == pr)
+ in6_purgeaddr(&ia->ia_ifa);
+ }
+ prelist_remove(pr);
+ }
+ splx(s);
+ break;
+ }
+ case SIOCSRTRFLUSH_IN6:
+ {
+ /* flush all the default routers */
+ struct nd_defrouter *dr, *next;
+
+ s = splnet();
+ defrouter_reset();
+ for (dr = TAILQ_FIRST(&V_nd_defrouter); dr; dr = next) {
+ next = TAILQ_NEXT(dr, dr_entry);
+ defrtrlist_del(dr);
+ }
+ defrouter_select();
+ splx(s);
+ break;
+ }
+ case SIOCGNBRINFO_IN6:
+ {
+ struct llentry *ln;
+ struct in6_addr nb_addr = nbi->addr; /* make local for safety */
+
+ if ((error = in6_setscope(&nb_addr, ifp, NULL)) != 0)
+ return (error);
+
+ IF_AFDATA_LOCK(ifp);
+ ln = nd6_lookup(&nb_addr, 0, ifp);
+ IF_AFDATA_UNLOCK(ifp);
+
+ if (ln == NULL) {
+ error = EINVAL;
+ break;
+ }
+ nbi->state = ln->ln_state;
+ nbi->asked = ln->la_asked;
+ nbi->isrouter = ln->ln_router;
+ nbi->expire = ln->la_expire;
+ LLE_RUNLOCK(ln);
+ break;
+ }
+ case SIOCGDEFIFACE_IN6: /* XXX: should be implemented as a sysctl? */
+ ndif->ifindex = V_nd6_defifindex;
+ break;
+ case SIOCSDEFIFACE_IN6: /* XXX: should be implemented as a sysctl? */
+ return (nd6_setdefaultiface(ndif->ifindex));
+ }
+ return (error);
+}
+
+/*
+ * Create neighbor cache entry and cache link-layer address,
+ * on reception of inbound ND6 packets. (RS/RA/NS/redirect)
+ *
+ * type - ICMP6 type
+ * code - type dependent information
+ *
+ * XXXXX
+ * The caller of this function already acquired the ndp
+ * cache table lock because the cache entry is returned.
+ */
+struct llentry *
+nd6_cache_lladdr(struct ifnet *ifp, struct in6_addr *from, char *lladdr,
+ int lladdrlen, int type, int code)
+{
+ struct llentry *ln = NULL;
+ int is_newentry;
+ int do_update;
+ int olladdr;
+ int llchange;
+ int flags;
+ int newstate = 0;
+ uint16_t router = 0;
+ struct sockaddr_in6 sin6;
+ struct mbuf *chain = NULL;
+ int static_route = 0;
+
+ IF_AFDATA_UNLOCK_ASSERT(ifp);
+
+ if (ifp == NULL)
+ panic("ifp == NULL in nd6_cache_lladdr");
+ if (from == NULL)
+ panic("from == NULL in nd6_cache_lladdr");
+
+ /* nothing must be updated for unspecified address */
+ if (IN6_IS_ADDR_UNSPECIFIED(from))
+ return NULL;
+
+ /*
+ * Validation about ifp->if_addrlen and lladdrlen must be done in
+ * the caller.
+ *
+ * XXX If the link does not have link-layer adderss, what should
+ * we do? (ifp->if_addrlen == 0)
+ * Spec says nothing in sections for RA, RS and NA. There's small
+ * description on it in NS section (RFC 2461 7.2.3).
+ */
+ flags = lladdr ? ND6_EXCLUSIVE : 0;
+ IF_AFDATA_LOCK(ifp);
+ ln = nd6_lookup(from, flags, ifp);
+
+ if (ln == NULL) {
+ flags |= ND6_EXCLUSIVE;
+ ln = nd6_lookup(from, flags | ND6_CREATE, ifp);
+ IF_AFDATA_UNLOCK(ifp);
+ is_newentry = 1;
+ } else {
+ IF_AFDATA_UNLOCK(ifp);
+ /* do nothing if static ndp is set */
+ if (ln->la_flags & LLE_STATIC) {
+ static_route = 1;
+ goto done;
+ }
+ is_newentry = 0;
+ }
+ if (ln == NULL)
+ return (NULL);
+
+ olladdr = (ln->la_flags & LLE_VALID) ? 1 : 0;
+ if (olladdr && lladdr) {
+ llchange = bcmp(lladdr, &ln->ll_addr,
+ ifp->if_addrlen);
+ } else
+ llchange = 0;
+
+ /*
+ * newentry olladdr lladdr llchange (*=record)
+ * 0 n n -- (1)
+ * 0 y n -- (2)
+ * 0 n y -- (3) * STALE
+ * 0 y y n (4) *
+ * 0 y y y (5) * STALE
+ * 1 -- n -- (6) NOSTATE(= PASSIVE)
+ * 1 -- y -- (7) * STALE
+ */
+
+ if (lladdr) { /* (3-5) and (7) */
+ /*
+ * Record source link-layer address
+ * XXX is it dependent to ifp->if_type?
+ */
+ bcopy(lladdr, &ln->ll_addr, ifp->if_addrlen);
+ ln->la_flags |= LLE_VALID;
+ }
+
+ if (!is_newentry) {
+ if ((!olladdr && lladdr != NULL) || /* (3) */
+ (olladdr && lladdr != NULL && llchange)) { /* (5) */
+ do_update = 1;
+ newstate = ND6_LLINFO_STALE;
+ } else /* (1-2,4) */
+ do_update = 0;
+ } else {
+ do_update = 1;
+ if (lladdr == NULL) /* (6) */
+ newstate = ND6_LLINFO_NOSTATE;
+ else /* (7) */
+ newstate = ND6_LLINFO_STALE;
+ }
+
+ if (do_update) {
+ /*
+ * Update the state of the neighbor cache.
+ */
+ ln->ln_state = newstate;
+
+ if (ln->ln_state == ND6_LLINFO_STALE) {
+ /*
+ * XXX: since nd6_output() below will cause
+ * state tansition to DELAY and reset the timer,
+ * we must set the timer now, although it is actually
+ * meaningless.
+ */
+ nd6_llinfo_settimer_locked(ln, (long)V_nd6_gctimer * hz);
+
+ if (ln->la_hold) {
+ struct mbuf *m_hold, *m_hold_next;
+
+ /*
+ * reset the la_hold in advance, to explicitly
+ * prevent a la_hold lookup in nd6_output()
+ * (wouldn't happen, though...)
+ */
+ for (m_hold = ln->la_hold, ln->la_hold = NULL;
+ m_hold; m_hold = m_hold_next) {
+ m_hold_next = m_hold->m_nextpkt;
+ m_hold->m_nextpkt = NULL;
+
+ /*
+ * we assume ifp is not a p2p here, so
+ * just set the 2nd argument as the
+ * 1st one.
+ */
+ nd6_output_lle(ifp, ifp, m_hold, L3_ADDR_SIN6(ln), NULL, ln, &chain);
+ }
+ /*
+ * If we have mbufs in the chain we need to do
+ * deferred transmit. Copy the address from the
+ * llentry before dropping the lock down below.
+ */
+ if (chain != NULL)
+ memcpy(&sin6, L3_ADDR_SIN6(ln), sizeof(sin6));
+ }
+ } else if (ln->ln_state == ND6_LLINFO_INCOMPLETE) {
+ /* probe right away */
+ nd6_llinfo_settimer_locked((void *)ln, 0);
+ }
+ }
+
+ /*
+ * ICMP6 type dependent behavior.
+ *
+ * NS: clear IsRouter if new entry
+ * RS: clear IsRouter
+ * RA: set IsRouter if there's lladdr
+ * redir: clear IsRouter if new entry
+ *
+ * RA case, (1):
+ * The spec says that we must set IsRouter in the following cases:
+ * - If lladdr exist, set IsRouter. This means (1-5).
+ * - If it is old entry (!newentry), set IsRouter. This means (7).
+ * So, based on the spec, in (1-5) and (7) cases we must set IsRouter.
+ * A quetion arises for (1) case. (1) case has no lladdr in the
+ * neighbor cache, this is similar to (6).
+ * This case is rare but we figured that we MUST NOT set IsRouter.
+ *
+ * newentry olladdr lladdr llchange NS RS RA redir
+ * D R
+ * 0 n n -- (1) c ? s
+ * 0 y n -- (2) c s s
+ * 0 n y -- (3) c s s
+ * 0 y y n (4) c s s
+ * 0 y y y (5) c s s
+ * 1 -- n -- (6) c c c s
+ * 1 -- y -- (7) c c s c s
+ *
+ * (c=clear s=set)
+ */
+ switch (type & 0xff) {
+ case ND_NEIGHBOR_SOLICIT:
+ /*
+ * New entry must have is_router flag cleared.
+ */
+ if (is_newentry) /* (6-7) */
+ ln->ln_router = 0;
+ break;
+ case ND_REDIRECT:
+ /*
+ * If the icmp is a redirect to a better router, always set the
+ * is_router flag. Otherwise, if the entry is newly created,
+ * clear the flag. [RFC 2461, sec 8.3]
+ */
+ if (code == ND_REDIRECT_ROUTER)
+ ln->ln_router = 1;
+ else if (is_newentry) /* (6-7) */
+ ln->ln_router = 0;
+ break;
+ case ND_ROUTER_SOLICIT:
+ /*
+ * is_router flag must always be cleared.
+ */
+ ln->ln_router = 0;
+ break;
+ case ND_ROUTER_ADVERT:
+ /*
+ * Mark an entry with lladdr as a router.
+ */
+ if ((!is_newentry && (olladdr || lladdr)) || /* (2-5) */
+ (is_newentry && lladdr)) { /* (7) */
+ ln->ln_router = 1;
+ }
+ break;
+ }
+
+ if (ln != NULL) {
+ static_route = (ln->la_flags & LLE_STATIC);
+ router = ln->ln_router;
+
+ if (flags & ND6_EXCLUSIVE)
+ LLE_WUNLOCK(ln);
+ else
+ LLE_RUNLOCK(ln);
+ if (static_route)
+ ln = NULL;
+ }
+ if (chain)
+ nd6_output_flush(ifp, ifp, chain, &sin6, NULL);
+
+ /*
+ * When the link-layer address of a router changes, select the
+ * best router again. In particular, when the neighbor entry is newly
+ * created, it might affect the selection policy.
+ * Question: can we restrict the first condition to the "is_newentry"
+ * case?
+ * XXX: when we hear an RA from a new router with the link-layer
+ * address option, defrouter_select() is called twice, since
+ * defrtrlist_update called the function as well. However, I believe
+ * we can compromise the overhead, since it only happens the first
+ * time.
+ * XXX: although defrouter_select() should not have a bad effect
+ * for those are not autoconfigured hosts, we explicitly avoid such
+ * cases for safety.
+ */
+ if (do_update && router && !V_ip6_forwarding && V_ip6_accept_rtadv) {
+ /*
+ * guaranteed recursion
+ */
+ defrouter_select();
+ }
+
+ return (ln);
+done:
+ if (ln != NULL) {
+ if (flags & ND6_EXCLUSIVE)
+ LLE_WUNLOCK(ln);
+ else
+ LLE_RUNLOCK(ln);
+ if (static_route)
+ ln = NULL;
+ }
+ return (ln);
+}
+
+static void
+nd6_slowtimo(void *arg)
+{
+ CURVNET_SET((struct vnet *) arg);
+ struct nd_ifinfo *nd6if;
+ struct ifnet *ifp;
+
+ callout_reset(&V_nd6_slowtimo_ch, ND6_SLOWTIMER_INTERVAL * hz,
+ nd6_slowtimo, curvnet);
+ IFNET_RLOCK_NOSLEEP();
+ for (ifp = TAILQ_FIRST(&V_ifnet); ifp;
+ ifp = TAILQ_NEXT(ifp, if_list)) {
+ nd6if = ND_IFINFO(ifp);
+ if (nd6if->basereachable && /* already initialized */
+ (nd6if->recalctm -= ND6_SLOWTIMER_INTERVAL) <= 0) {
+ /*
+ * Since reachable time rarely changes by router
+ * advertisements, we SHOULD insure that a new random
+ * value gets recomputed at least once every few hours.
+ * (RFC 2461, 6.3.4)
+ */
+ nd6if->recalctm = V_nd6_recalc_reachtm_interval;
+ nd6if->reachable = ND_COMPUTE_RTIME(nd6if->basereachable);
+ }
+ }
+ IFNET_RUNLOCK_NOSLEEP();
+ CURVNET_RESTORE();
+}
+
+int
+nd6_output(struct ifnet *ifp, struct ifnet *origifp, struct mbuf *m0,
+ struct sockaddr_in6 *dst, struct rtentry *rt0)
+{
+
+ return (nd6_output_lle(ifp, origifp, m0, dst, rt0, NULL, NULL));
+}
+
+
+/*
+ * Note that I'm not enforcing any global serialization
+ * lle state or asked changes here as the logic is too
+ * complicated to avoid having to always acquire an exclusive
+ * lock
+ * KMM
+ *
+ */
+#define senderr(e) { error = (e); goto bad;}
+
+int
+nd6_output_lle(struct ifnet *ifp, struct ifnet *origifp, struct mbuf *m0,
+ struct sockaddr_in6 *dst, struct rtentry *rt0, struct llentry *lle,
+ struct mbuf **chain)
+{
+ struct mbuf *m = m0;
+ struct llentry *ln = lle;
+ int error = 0;
+ int flags = 0;
+
+#ifdef INVARIANTS
+ if (lle != NULL) {
+
+ LLE_WLOCK_ASSERT(lle);
+
+ KASSERT(chain != NULL, (" lle locked but no mbuf chain pointer passed"));
+ }
+#endif
+ if (IN6_IS_ADDR_MULTICAST(&dst->sin6_addr))
+ goto sendpkt;
+
+ if (nd6_need_cache(ifp) == 0)
+ goto sendpkt;
+
+ /*
+ * next hop determination. This routine is derived from ether_output.
+ */
+
+ /*
+ * Address resolution or Neighbor Unreachability Detection
+ * for the next hop.
+ * At this point, the destination of the packet must be a unicast
+ * or an anycast address(i.e. not a multicast).
+ */
+
+ flags = ((m != NULL) || (lle != NULL)) ? LLE_EXCLUSIVE : 0;
+ if (ln == NULL) {
+ retry:
+ IF_AFDATA_LOCK(ifp);
+ ln = lla_lookup(LLTABLE6(ifp), flags, (struct sockaddr *)dst);
+ IF_AFDATA_UNLOCK(ifp);
+ if ((ln == NULL) && nd6_is_addr_neighbor(dst, ifp)) {
+ /*
+ * Since nd6_is_addr_neighbor() internally calls nd6_lookup(),
+ * the condition below is not very efficient. But we believe
+ * it is tolerable, because this should be a rare case.
+ */
+ flags = ND6_CREATE | (m ? ND6_EXCLUSIVE : 0);
+ IF_AFDATA_LOCK(ifp);
+ ln = nd6_lookup(&dst->sin6_addr, flags, ifp);
+ IF_AFDATA_UNLOCK(ifp);
+ }
+ }
+ if (ln == NULL) {
+ if ((ifp->if_flags & IFF_POINTOPOINT) == 0 &&
+ !(ND_IFINFO(ifp)->flags & ND6_IFF_PERFORMNUD)) {
+ char ip6buf[INET6_ADDRSTRLEN];
+ log(LOG_DEBUG,
+ "nd6_output: can't allocate llinfo for %s "
+ "(ln=%p)\n",
+ ip6_sprintf(ip6buf, &dst->sin6_addr), ln);
+ senderr(EIO); /* XXX: good error? */
+ }
+ goto sendpkt; /* send anyway */
+ }
+
+ /* We don't have to do link-layer address resolution on a p2p link. */
+ if ((ifp->if_flags & IFF_POINTOPOINT) != 0 &&
+ ln->ln_state < ND6_LLINFO_REACHABLE) {
+ if ((flags & LLE_EXCLUSIVE) == 0) {
+ flags |= LLE_EXCLUSIVE;
+ goto retry;
+ }
+ ln->ln_state = ND6_LLINFO_STALE;
+ nd6_llinfo_settimer_locked(ln, (long)V_nd6_gctimer * hz);
+ }
+
+ /*
+ * The first time we send a packet to a neighbor whose entry is
+ * STALE, we have to change the state to DELAY and a sets a timer to
+ * expire in DELAY_FIRST_PROBE_TIME seconds to ensure do
+ * neighbor unreachability detection on expiration.
+ * (RFC 2461 7.3.3)
+ */
+ if (ln->ln_state == ND6_LLINFO_STALE) {
+ if ((flags & LLE_EXCLUSIVE) == 0) {
+ flags |= LLE_EXCLUSIVE;
+ LLE_RUNLOCK(ln);
+ goto retry;
+ }
+ ln->la_asked = 0;
+ ln->ln_state = ND6_LLINFO_DELAY;
+ nd6_llinfo_settimer_locked(ln, (long)V_nd6_delay * hz);
+ }
+
+ /*
+ * If the neighbor cache entry has a state other than INCOMPLETE
+ * (i.e. its link-layer address is already resolved), just
+ * send the packet.
+ */
+ if (ln->ln_state > ND6_LLINFO_INCOMPLETE)
+ goto sendpkt;
+
+ /*
+ * There is a neighbor cache entry, but no ethernet address
+ * response yet. Append this latest packet to the end of the
+ * packet queue in the mbuf, unless the number of the packet
+ * does not exceed nd6_maxqueuelen. When it exceeds nd6_maxqueuelen,
+ * the oldest packet in the queue will be removed.
+ */
+ if (ln->ln_state == ND6_LLINFO_NOSTATE)
+ ln->ln_state = ND6_LLINFO_INCOMPLETE;
+
+ if ((flags & LLE_EXCLUSIVE) == 0) {
+ flags |= LLE_EXCLUSIVE;
+ LLE_RUNLOCK(ln);
+ goto retry;
+ }
+
+ LLE_WLOCK_ASSERT(ln);
+
+ if (ln->la_hold) {
+ struct mbuf *m_hold;
+ int i;
+
+ i = 0;
+ for (m_hold = ln->la_hold; m_hold; m_hold = m_hold->m_nextpkt) {
+ i++;
+ if (m_hold->m_nextpkt == NULL) {
+ m_hold->m_nextpkt = m;
+ break;
+ }
+ }
+ while (i >= V_nd6_maxqueuelen) {
+ m_hold = ln->la_hold;
+ ln->la_hold = ln->la_hold->m_nextpkt;
+ m_freem(m_hold);
+ i--;
+ }
+ } else {
+ ln->la_hold = m;
+ }
+
+ /*
+ * If there has been no NS for the neighbor after entering the
+ * INCOMPLETE state, send the first solicitation.
+ */
+ if (!ND6_LLINFO_PERMANENT(ln) && ln->la_asked == 0) {
+ ln->la_asked++;
+
+ nd6_llinfo_settimer_locked(ln,
+ (long)ND_IFINFO(ifp)->retrans * hz / 1000);
+ LLE_WUNLOCK(ln);
+ nd6_ns_output(ifp, NULL, &dst->sin6_addr, ln, 0);
+ if (lle != NULL && ln == lle)
+ LLE_WLOCK(lle);
+
+ } else if (lle == NULL || ln != lle) {
+ /*
+ * We did the lookup (no lle arg) so we
+ * need to do the unlock here.
+ */
+ LLE_WUNLOCK(ln);
+ }
+
+ return (0);
+
+ sendpkt:
+ /* discard the packet if IPv6 operation is disabled on the interface */
+ if ((ND_IFINFO(ifp)->flags & ND6_IFF_IFDISABLED)) {
+ error = ENETDOWN; /* better error? */
+ goto bad;
+ }
+ /*
+ * ln is valid and the caller did not pass in
+ * an llentry
+ */
+ if ((ln != NULL) && (lle == NULL)) {
+ if (flags & LLE_EXCLUSIVE)
+ LLE_WUNLOCK(ln);
+ else
+ LLE_RUNLOCK(ln);
+ }
+
+#ifdef MAC
+ mac_netinet6_nd6_send(ifp, m);
+#endif
+ /*
+ * We were passed in a pointer to an lle with the lock held
+ * this means that we can't call if_output as we will
+ * recurse on the lle lock - so what we do is we create
+ * a list of mbufs to send and transmit them in the caller
+ * after the lock is dropped
+ */
+ if (lle != NULL) {
+ if (*chain == NULL)
+ *chain = m;
+ else {
+ struct mbuf *m = *chain;
+
+ /*
+ * append mbuf to end of deferred chain
+ */
+ while (m->m_nextpkt != NULL)
+ m = m->m_nextpkt;
+ m->m_nextpkt = m;
+ }
+ return (error);
+ }
+ if ((ifp->if_flags & IFF_LOOPBACK) != 0) {
+ return ((*ifp->if_output)(origifp, m, (struct sockaddr *)dst,
+ NULL));
+ }
+ error = (*ifp->if_output)(ifp, m, (struct sockaddr *)dst, NULL);
+ return (error);
+
+ bad:
+ /*
+ * ln is valid and the caller did not pass in
+ * an llentry
+ */
+ if ((ln != NULL) && (lle == NULL)) {
+ if (flags & LLE_EXCLUSIVE)
+ LLE_WUNLOCK(ln);
+ else
+ LLE_RUNLOCK(ln);
+ }
+ if (m)
+ m_freem(m);
+ return (error);
+}
+#undef senderr
+
+
+int
+nd6_output_flush(struct ifnet *ifp, struct ifnet *origifp, struct mbuf *chain,
+ struct sockaddr_in6 *dst, struct route *ro)
+{
+ struct mbuf *m, *m_head;
+ struct ifnet *outifp;
+ int error = 0;
+
+ m_head = chain;
+ if ((ifp->if_flags & IFF_LOOPBACK) != 0)
+ outifp = origifp;
+ else
+ outifp = ifp;
+
+ while (m_head) {
+ m = m_head;
+ m_head = m_head->m_nextpkt;
+ error = (*ifp->if_output)(ifp, m, (struct sockaddr *)dst, ro);
+ }
+
+ /*
+ * XXX
+ * note that intermediate errors are blindly ignored - but this is
+ * the same convention as used with nd6_output when called by
+ * nd6_cache_lladdr
+ */
+ return (error);
+}
+
+
+int
+nd6_need_cache(struct ifnet *ifp)
+{
+ /*
+ * XXX: we currently do not make neighbor cache on any interface
+ * other than ARCnet, Ethernet, FDDI and GIF.
+ *
+ * RFC2893 says:
+ * - unidirectional tunnels needs no ND
+ */
+ switch (ifp->if_type) {
+ case IFT_ARCNET:
+ case IFT_ETHER:
+ case IFT_FDDI:
+ case IFT_IEEE1394:
+#ifdef IFT_L2VLAN
+ case IFT_L2VLAN:
+#endif
+#ifdef IFT_IEEE80211
+ case IFT_IEEE80211:
+#endif
+#ifdef IFT_CARP
+ case IFT_CARP:
+#endif
+ case IFT_GIF: /* XXX need more cases? */
+ case IFT_PPP:
+ case IFT_TUNNEL:
+ case IFT_BRIDGE:
+ case IFT_PROPVIRTUAL:
+ return (1);
+ default:
+ return (0);
+ }
+}
+
+/*
+ * the callers of this function need to be re-worked to drop
+ * the lle lock, drop here for now
+ */
+int
+nd6_storelladdr(struct ifnet *ifp, struct mbuf *m,
+ struct sockaddr *dst, u_char *desten, struct llentry **lle)
+{
+ struct llentry *ln;
+
+ *lle = NULL;
+ IF_AFDATA_UNLOCK_ASSERT(ifp);
+ if (m->m_flags & M_MCAST) {
+ int i;
+
+ switch (ifp->if_type) {
+ case IFT_ETHER:
+ case IFT_FDDI:
+#ifdef IFT_L2VLAN
+ case IFT_L2VLAN:
+#endif
+#ifdef IFT_IEEE80211
+ case IFT_IEEE80211:
+#endif
+ case IFT_BRIDGE:
+ case IFT_ISO88025:
+ ETHER_MAP_IPV6_MULTICAST(&SIN6(dst)->sin6_addr,
+ desten);
+ return (0);
+ case IFT_IEEE1394:
+ /*
+ * netbsd can use if_broadcastaddr, but we don't do so
+ * to reduce # of ifdef.
+ */
+ for (i = 0; i < ifp->if_addrlen; i++)
+ desten[i] = ~0;
+ return (0);
+ case IFT_ARCNET:
+ *desten = 0;
+ return (0);
+ default:
+ m_freem(m);
+ return (EAFNOSUPPORT);
+ }
+ }
+
+
+ /*
+ * the entry should have been created in nd6_store_lladdr
+ */
+ IF_AFDATA_LOCK(ifp);
+ ln = lla_lookup(LLTABLE6(ifp), 0, dst);
+ IF_AFDATA_UNLOCK(ifp);
+ if ((ln == NULL) || !(ln->la_flags & LLE_VALID)) {
+ if (ln != NULL)
+ LLE_RUNLOCK(ln);
+ /* this could happen, if we could not allocate memory */
+ m_freem(m);
+ return (1);
+ }
+
+ bcopy(&ln->ll_addr, desten, ifp->if_addrlen);
+ *lle = ln;
+ LLE_RUNLOCK(ln);
+ /*
+ * A *small* use after free race exists here
+ */
+ return (0);
+}
+
+static void
+clear_llinfo_pqueue(struct llentry *ln)
+{
+ struct mbuf *m_hold, *m_hold_next;
+
+ for (m_hold = ln->la_hold; m_hold; m_hold = m_hold_next) {
+ m_hold_next = m_hold->m_nextpkt;
+ m_hold->m_nextpkt = NULL;
+ m_freem(m_hold);
+ }
+
+ ln->la_hold = NULL;
+ return;
+}
+
+static int nd6_sysctl_drlist(SYSCTL_HANDLER_ARGS);
+static int nd6_sysctl_prlist(SYSCTL_HANDLER_ARGS);
+#ifdef SYSCTL_DECL
+SYSCTL_DECL(_net_inet6_icmp6);
+#endif
+SYSCTL_NODE(_net_inet6_icmp6, ICMPV6CTL_ND6_DRLIST, nd6_drlist,
+ CTLFLAG_RD, nd6_sysctl_drlist, "");
+SYSCTL_NODE(_net_inet6_icmp6, ICMPV6CTL_ND6_PRLIST, nd6_prlist,
+ CTLFLAG_RD, nd6_sysctl_prlist, "");
+SYSCTL_VNET_INT(_net_inet6_icmp6, ICMPV6CTL_ND6_MAXQLEN, nd6_maxqueuelen,
+ CTLFLAG_RW, &VNET_NAME(nd6_maxqueuelen), 1, "");
+
+static int
+nd6_sysctl_drlist(SYSCTL_HANDLER_ARGS)
+{
+ int error;
+ char buf[1024] __aligned(4);
+ struct in6_defrouter *d, *de;
+ struct nd_defrouter *dr;
+
+ if (req->newptr)
+ return EPERM;
+ error = 0;
+
+ for (dr = TAILQ_FIRST(&V_nd_defrouter); dr;
+ dr = TAILQ_NEXT(dr, dr_entry)) {
+ d = (struct in6_defrouter *)buf;
+ de = (struct in6_defrouter *)(buf + sizeof(buf));
+
+ if (d + 1 <= de) {
+ bzero(d, sizeof(*d));
+ d->rtaddr.sin6_family = AF_INET6;
+ d->rtaddr.sin6_len = sizeof(d->rtaddr);
+ d->rtaddr.sin6_addr = dr->rtaddr;
+ error = sa6_recoverscope(&d->rtaddr);
+ if (error != 0)
+ return (error);
+ d->flags = dr->flags;
+ d->rtlifetime = dr->rtlifetime;
+ d->expire = dr->expire;
+ d->if_index = dr->ifp->if_index;
+ } else
+ panic("buffer too short");
+
+ error = SYSCTL_OUT(req, buf, sizeof(*d));
+ if (error)
+ break;
+ }
+
+ return (error);
+}
+
+static int
+nd6_sysctl_prlist(SYSCTL_HANDLER_ARGS)
+{
+ int error;
+ char buf[1024] __aligned(4);
+ struct in6_prefix *p, *pe;
+ struct nd_prefix *pr;
+ char ip6buf[INET6_ADDRSTRLEN];
+
+ if (req->newptr)
+ return EPERM;
+ error = 0;
+
+ for (pr = V_nd_prefix.lh_first; pr; pr = pr->ndpr_next) {
+ u_short advrtrs;
+ size_t advance;
+ struct sockaddr_in6 *sin6, *s6;
+ struct nd_pfxrouter *pfr;
+
+ p = (struct in6_prefix *)buf;
+ pe = (struct in6_prefix *)(buf + sizeof(buf));
+
+ if (p + 1 <= pe) {
+ bzero(p, sizeof(*p));
+ sin6 = (struct sockaddr_in6 *)(p + 1);
+
+ p->prefix = pr->ndpr_prefix;
+ if (sa6_recoverscope(&p->prefix)) {
+ log(LOG_ERR,
+ "scope error in prefix list (%s)\n",
+ ip6_sprintf(ip6buf, &p->prefix.sin6_addr));
+ /* XXX: press on... */
+ }
+ p->raflags = pr->ndpr_raf;
+ p->prefixlen = pr->ndpr_plen;
+ p->vltime = pr->ndpr_vltime;
+ p->pltime = pr->ndpr_pltime;
+ p->if_index = pr->ndpr_ifp->if_index;
+ if (pr->ndpr_vltime == ND6_INFINITE_LIFETIME)
+ p->expire = 0;
+ else {
+ time_t maxexpire;
+
+ /* XXX: we assume time_t is signed. */
+ maxexpire = (-1) &
+ ~((time_t)1 <<
+ ((sizeof(maxexpire) * 8) - 1));
+ if (pr->ndpr_vltime <
+ maxexpire - pr->ndpr_lastupdate) {
+ p->expire = pr->ndpr_lastupdate +
+ pr->ndpr_vltime;
+ } else
+ p->expire = maxexpire;
+ }
+ p->refcnt = pr->ndpr_refcnt;
+ p->flags = pr->ndpr_stateflags;
+ p->origin = PR_ORIG_RA;
+ advrtrs = 0;
+ for (pfr = pr->ndpr_advrtrs.lh_first; pfr;
+ pfr = pfr->pfr_next) {
+ if ((void *)&sin6[advrtrs + 1] > (void *)pe) {
+ advrtrs++;
+ continue;
+ }
+ s6 = &sin6[advrtrs];
+ bzero(s6, sizeof(*s6));
+ s6->sin6_family = AF_INET6;
+ s6->sin6_len = sizeof(*sin6);
+ s6->sin6_addr = pfr->router->rtaddr;
+ if (sa6_recoverscope(s6)) {
+ log(LOG_ERR,
+ "scope error in "
+ "prefix list (%s)\n",
+ ip6_sprintf(ip6buf,
+ &pfr->router->rtaddr));
+ }
+ advrtrs++;
+ }
+ p->advrtrs = advrtrs;
+ } else
+ panic("buffer too short");
+
+ advance = sizeof(*p) + sizeof(*sin6) * advrtrs;
+ error = SYSCTL_OUT(req, buf, advance);
+ if (error)
+ break;
+ }
+
+ return (error);
+}
diff --git a/rtems/freebsd/netinet6/nd6.h b/rtems/freebsd/netinet6/nd6.h
new file mode 100644
index 00000000..58677ff5
--- /dev/null
+++ b/rtems/freebsd/netinet6/nd6.h
@@ -0,0 +1,455 @@
+/*-
+ * Copyright (C) 1995, 1996, 1997, and 1998 WIDE Project.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the project nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $KAME: nd6.h,v 1.76 2001/12/18 02:10:31 itojun Exp $
+ * $FreeBSD$
+ */
+
+#ifndef _NETINET6_ND6_HH_
+#define _NETINET6_ND6_HH_
+
+/* see net/route.h, or net/if_inarp.h */
+#ifndef RTF_ANNOUNCE
+#define RTF_ANNOUNCE RTF_PROTO2
+#endif
+
+#include <rtems/freebsd/sys/queue.h>
+#include <rtems/freebsd/sys/callout.h>
+
+struct llentry;
+
+#define ND6_LLINFO_NOSTATE -2
+/*
+ * We don't need the WAITDELETE state any more, but we keep the definition
+ * in a comment line instead of removing it. This is necessary to avoid
+ * unintentionally reusing the value for another purpose, which might
+ * affect backward compatibility with old applications.
+ * (20000711 jinmei@kame.net)
+ */
+/* #define ND6_LLINFO_WAITDELETE -1 */
+#define ND6_LLINFO_INCOMPLETE 0
+#define ND6_LLINFO_REACHABLE 1
+#define ND6_LLINFO_STALE 2
+#define ND6_LLINFO_DELAY 3
+#define ND6_LLINFO_PROBE 4
+
+#define ND6_IS_LLINFO_PROBREACH(n) ((n)->ln_state > ND6_LLINFO_INCOMPLETE)
+#define ND6_LLINFO_PERMANENT(n) (((n)->la_expire == 0) && ((n)->ln_state > ND6_LLINFO_INCOMPLETE))
+
+struct nd_ifinfo {
+ u_int32_t linkmtu; /* LinkMTU */
+ u_int32_t maxmtu; /* Upper bound of LinkMTU */
+ u_int32_t basereachable; /* BaseReachableTime */
+ u_int32_t reachable; /* Reachable Time */
+ u_int32_t retrans; /* Retrans Timer */
+ u_int32_t flags; /* Flags */
+ int recalctm; /* BaseReacable re-calculation timer */
+ u_int8_t chlim; /* CurHopLimit */
+ u_int8_t initialized; /* Flag to see the entry is initialized */
+ /* the following 3 members are for privacy extension for addrconf */
+ u_int8_t randomseed0[8]; /* upper 64 bits of MD5 digest */
+ u_int8_t randomseed1[8]; /* lower 64 bits (usually the EUI64 IFID) */
+ u_int8_t randomid[8]; /* current random ID */
+};
+
+#define ND6_IFF_PERFORMNUD 0x1
+#define ND6_IFF_ACCEPT_RTADV 0x2
+#define ND6_IFF_PREFER_SOURCE 0x4 /* XXX: not related to ND. */
+#define ND6_IFF_IFDISABLED 0x8 /* IPv6 operation is disabled due to
+ * DAD failure. (XXX: not ND-specific)
+ */
+#define ND6_IFF_DONT_SET_IFROUTE 0x10
+
+#define ND6_CREATE LLE_CREATE
+#define ND6_EXCLUSIVE LLE_EXCLUSIVE
+
+#ifdef _KERNEL
+#define ND_IFINFO(ifp) \
+ (((struct in6_ifextra *)(ifp)->if_afdata[AF_INET6])->nd_ifinfo)
+#define IN6_LINKMTU(ifp) \
+ ((ND_IFINFO(ifp)->linkmtu && ND_IFINFO(ifp)->linkmtu < (ifp)->if_mtu) \
+ ? ND_IFINFO(ifp)->linkmtu \
+ : ((ND_IFINFO(ifp)->maxmtu && ND_IFINFO(ifp)->maxmtu < (ifp)->if_mtu) \
+ ? ND_IFINFO(ifp)->maxmtu : (ifp)->if_mtu))
+#endif
+
+struct in6_nbrinfo {
+ char ifname[IFNAMSIZ]; /* if name, e.g. "en0" */
+ struct in6_addr addr; /* IPv6 address of the neighbor */
+ long asked; /* number of queries already sent for this addr */
+ int isrouter; /* if it acts as a router */
+ int state; /* reachability state */
+ int expire; /* lifetime for NDP state transition */
+};
+
+#define DRLSTSIZ 10
+#define PRLSTSIZ 10
+struct in6_drlist {
+ char ifname[IFNAMSIZ];
+ struct {
+ struct in6_addr rtaddr;
+ u_char flags;
+ u_short rtlifetime;
+ u_long expire;
+ u_short if_index;
+ } defrouter[DRLSTSIZ];
+};
+
+struct in6_defrouter {
+ struct sockaddr_in6 rtaddr;
+ u_char flags;
+ u_short rtlifetime;
+ u_long expire;
+ u_short if_index;
+};
+
+#ifdef _KERNEL
+struct in6_oprlist {
+ char ifname[IFNAMSIZ];
+ struct {
+ struct in6_addr prefix;
+ struct prf_ra raflags;
+ u_char prefixlen;
+ u_char origin;
+ u_long vltime;
+ u_long pltime;
+ u_long expire;
+ u_short if_index;
+ u_short advrtrs; /* number of advertisement routers */
+ struct in6_addr advrtr[DRLSTSIZ]; /* XXX: explicit limit */
+ } prefix[PRLSTSIZ];
+};
+#endif
+
+struct in6_prlist {
+ char ifname[IFNAMSIZ];
+ struct {
+ struct in6_addr prefix;
+ struct prf_ra raflags;
+ u_char prefixlen;
+ u_char origin;
+ u_int32_t vltime;
+ u_int32_t pltime;
+ time_t expire;
+ u_short if_index;
+ u_short advrtrs; /* number of advertisement routers */
+ struct in6_addr advrtr[DRLSTSIZ]; /* XXX: explicit limit */
+ } prefix[PRLSTSIZ];
+};
+
+struct in6_prefix {
+ struct sockaddr_in6 prefix;
+ struct prf_ra raflags;
+ u_char prefixlen;
+ u_char origin;
+ u_int32_t vltime;
+ u_int32_t pltime;
+ time_t expire;
+ u_int32_t flags;
+ int refcnt;
+ u_short if_index;
+ u_short advrtrs; /* number of advertisement routers */
+ /* struct sockaddr_in6 advrtr[] */
+};
+
+#ifdef _KERNEL
+struct in6_ondireq {
+ char ifname[IFNAMSIZ];
+ struct {
+ u_int32_t linkmtu; /* LinkMTU */
+ u_int32_t maxmtu; /* Upper bound of LinkMTU */
+ u_int32_t basereachable; /* BaseReachableTime */
+ u_int32_t reachable; /* Reachable Time */
+ u_int32_t retrans; /* Retrans Timer */
+ u_int32_t flags; /* Flags */
+ int recalctm; /* BaseReacable re-calculation timer */
+ u_int8_t chlim; /* CurHopLimit */
+ u_int8_t receivedra;
+ } ndi;
+};
+#endif
+
+struct in6_ndireq {
+ char ifname[IFNAMSIZ];
+ struct nd_ifinfo ndi;
+};
+
+struct in6_ndifreq {
+ char ifname[IFNAMSIZ];
+ u_long ifindex;
+};
+
+/* Prefix status */
+#define NDPRF_ONLINK 0x1
+#define NDPRF_DETACHED 0x2
+
+/* protocol constants */
+#define MAX_RTR_SOLICITATION_DELAY 1 /* 1sec */
+#define RTR_SOLICITATION_INTERVAL 4 /* 4sec */
+#define MAX_RTR_SOLICITATIONS 3
+
+#define ND6_INFINITE_LIFETIME 0xffffffff
+
+#ifdef _KERNEL
+/* node constants */
+#define MAX_REACHABLE_TIME 3600000 /* msec */
+#define REACHABLE_TIME 30000 /* msec */
+#define RETRANS_TIMER 1000 /* msec */
+#define MIN_RANDOM_FACTOR 512 /* 1024 * 0.5 */
+#define MAX_RANDOM_FACTOR 1536 /* 1024 * 1.5 */
+#define DEF_TEMP_VALID_LIFETIME 604800 /* 1 week */
+#define DEF_TEMP_PREFERRED_LIFETIME 86400 /* 1 day */
+#define TEMPADDR_REGEN_ADVANCE 5 /* sec */
+#define MAX_TEMP_DESYNC_FACTOR 600 /* 10 min */
+#define ND_COMPUTE_RTIME(x) \
+ (((MIN_RANDOM_FACTOR * (x >> 10)) + (arc4random() & \
+ ((MAX_RANDOM_FACTOR - MIN_RANDOM_FACTOR) * (x >> 10)))) /1000)
+
+TAILQ_HEAD(nd_drhead, nd_defrouter);
+struct nd_defrouter {
+ TAILQ_ENTRY(nd_defrouter) dr_entry;
+ struct in6_addr rtaddr;
+ u_char flags; /* flags on RA message */
+ u_short rtlifetime;
+ u_long expire;
+ struct ifnet *ifp;
+ int installed; /* is installed into kernel routing table */
+};
+
+struct nd_prefixctl {
+ struct ifnet *ndpr_ifp;
+
+ /* prefix */
+ struct sockaddr_in6 ndpr_prefix;
+ u_char ndpr_plen;
+
+ u_int32_t ndpr_vltime; /* advertised valid lifetime */
+ u_int32_t ndpr_pltime; /* advertised preferred lifetime */
+
+ struct prf_ra ndpr_flags;
+};
+
+
+struct nd_prefix {
+ struct ifnet *ndpr_ifp;
+ LIST_ENTRY(nd_prefix) ndpr_entry;
+ struct sockaddr_in6 ndpr_prefix; /* prefix */
+ struct in6_addr ndpr_mask; /* netmask derived from the prefix */
+
+ u_int32_t ndpr_vltime; /* advertised valid lifetime */
+ u_int32_t ndpr_pltime; /* advertised preferred lifetime */
+
+ time_t ndpr_expire; /* expiration time of the prefix */
+ time_t ndpr_preferred; /* preferred time of the prefix */
+ time_t ndpr_lastupdate; /* reception time of last advertisement */
+
+ struct prf_ra ndpr_flags;
+ u_int32_t ndpr_stateflags; /* actual state flags */
+ /* list of routers that advertise the prefix: */
+ LIST_HEAD(pr_rtrhead, nd_pfxrouter) ndpr_advrtrs;
+ u_char ndpr_plen;
+ int ndpr_refcnt; /* reference couter from addresses */
+};
+
+#define ndpr_next ndpr_entry.le_next
+
+#define ndpr_raf ndpr_flags
+#define ndpr_raf_onlink ndpr_flags.onlink
+#define ndpr_raf_auto ndpr_flags.autonomous
+#define ndpr_raf_router ndpr_flags.router
+
+/*
+ * Message format for use in obtaining information about prefixes
+ * from inet6 sysctl function
+ */
+struct inet6_ndpr_msghdr {
+ u_short inpm_msglen; /* to skip over non-understood messages */
+ u_char inpm_version; /* future binary compatibility */
+ u_char inpm_type; /* message type */
+ struct in6_addr inpm_prefix;
+ u_long prm_vltim;
+ u_long prm_pltime;
+ u_long prm_expire;
+ u_long prm_preferred;
+ struct in6_prflags prm_flags;
+ u_short prm_index; /* index for associated ifp */
+ u_char prm_plen; /* length of prefix in bits */
+};
+
+#define prm_raf_onlink prm_flags.prf_ra.onlink
+#define prm_raf_auto prm_flags.prf_ra.autonomous
+
+#define prm_statef_onlink prm_flags.prf_state.onlink
+
+#define prm_rrf_decrvalid prm_flags.prf_rr.decrvalid
+#define prm_rrf_decrprefd prm_flags.prf_rr.decrprefd
+
+struct nd_pfxrouter {
+ LIST_ENTRY(nd_pfxrouter) pfr_entry;
+#define pfr_next pfr_entry.le_next
+ struct nd_defrouter *router;
+};
+
+LIST_HEAD(nd_prhead, nd_prefix);
+
+/* nd6.c */
+VNET_DECLARE(int, nd6_prune);
+VNET_DECLARE(int, nd6_delay);
+VNET_DECLARE(int, nd6_umaxtries);
+VNET_DECLARE(int, nd6_mmaxtries);
+VNET_DECLARE(int, nd6_useloopback);
+VNET_DECLARE(int, nd6_maxnudhint);
+VNET_DECLARE(int, nd6_gctimer);
+VNET_DECLARE(struct nd_drhead, nd_defrouter);
+VNET_DECLARE(struct nd_prhead, nd_prefix);
+VNET_DECLARE(int, nd6_debug);
+VNET_DECLARE(int, nd6_onlink_ns_rfc4861);
+#define V_nd6_prune VNET(nd6_prune)
+#define V_nd6_delay VNET(nd6_delay)
+#define V_nd6_umaxtries VNET(nd6_umaxtries)
+#define V_nd6_mmaxtries VNET(nd6_mmaxtries)
+#define V_nd6_useloopback VNET(nd6_useloopback)
+#define V_nd6_maxnudhint VNET(nd6_maxnudhint)
+#define V_nd6_gctimer VNET(nd6_gctimer)
+#define V_nd_defrouter VNET(nd_defrouter)
+#define V_nd_prefix VNET(nd_prefix)
+#define V_nd6_debug VNET(nd6_debug)
+#define V_nd6_onlink_ns_rfc4861 VNET(nd6_onlink_ns_rfc4861)
+
+#define nd6log(x) do { if (V_nd6_debug) log x; } while (/*CONSTCOND*/ 0)
+
+VNET_DECLARE(struct callout, nd6_timer_ch);
+#define V_nd6_timer_ch VNET(nd6_timer_ch)
+
+/* nd6_rtr.c */
+VNET_DECLARE(int, nd6_defifindex);
+VNET_DECLARE(int, ip6_desync_factor); /* seconds */
+VNET_DECLARE(u_int32_t, ip6_temp_preferred_lifetime); /* seconds */
+VNET_DECLARE(u_int32_t, ip6_temp_valid_lifetime); /* seconds */
+VNET_DECLARE(int, ip6_temp_regen_advance); /* seconds */
+#define V_nd6_defifindex VNET(nd6_defifindex)
+#define V_ip6_desync_factor VNET(ip6_desync_factor)
+#define V_ip6_temp_preferred_lifetime VNET(ip6_temp_preferred_lifetime)
+#define V_ip6_temp_valid_lifetime VNET(ip6_temp_valid_lifetime)
+#define V_ip6_temp_regen_advance VNET(ip6_temp_regen_advance)
+
+union nd_opts {
+ struct nd_opt_hdr *nd_opt_array[8]; /* max = target address list */
+ struct {
+ struct nd_opt_hdr *zero;
+ struct nd_opt_hdr *src_lladdr;
+ struct nd_opt_hdr *tgt_lladdr;
+ struct nd_opt_prefix_info *pi_beg; /* multiple opts, start */
+ struct nd_opt_rd_hdr *rh;
+ struct nd_opt_mtu *mtu;
+ struct nd_opt_hdr *search; /* multiple opts */
+ struct nd_opt_hdr *last; /* multiple opts */
+ int done;
+ struct nd_opt_prefix_info *pi_end;/* multiple opts, end */
+ } nd_opt_each;
+};
+#define nd_opts_src_lladdr nd_opt_each.src_lladdr
+#define nd_opts_tgt_lladdr nd_opt_each.tgt_lladdr
+#define nd_opts_pi nd_opt_each.pi_beg
+#define nd_opts_pi_end nd_opt_each.pi_end
+#define nd_opts_rh nd_opt_each.rh
+#define nd_opts_mtu nd_opt_each.mtu
+#define nd_opts_search nd_opt_each.search
+#define nd_opts_last nd_opt_each.last
+#define nd_opts_done nd_opt_each.done
+
+/* XXX: need nd6_var.h?? */
+/* nd6.c */
+void nd6_init __P((void));
+#ifdef VIMAGE
+void nd6_destroy __P((void));
+#endif
+struct nd_ifinfo *nd6_ifattach __P((struct ifnet *));
+void nd6_ifdetach __P((struct nd_ifinfo *));
+int nd6_is_addr_neighbor __P((struct sockaddr_in6 *, struct ifnet *));
+void nd6_option_init __P((void *, int, union nd_opts *));
+struct nd_opt_hdr *nd6_option __P((union nd_opts *));
+int nd6_options __P((union nd_opts *));
+struct llentry *nd6_lookup __P((struct in6_addr *, int, struct ifnet *));
+void nd6_setmtu __P((struct ifnet *));
+void nd6_llinfo_settimer __P((struct llentry *, long));
+void nd6_llinfo_settimer_locked __P((struct llentry *, long));
+void nd6_timer __P((void *));
+void nd6_purge __P((struct ifnet *));
+void nd6_nud_hint __P((struct rtentry *, struct in6_addr *, int));
+int nd6_resolve __P((struct ifnet *, struct rtentry *, struct mbuf *,
+ struct sockaddr *, u_char *));
+int nd6_ioctl __P((u_long, caddr_t, struct ifnet *));
+struct llentry *nd6_cache_lladdr __P((struct ifnet *, struct in6_addr *,
+ char *, int, int, int));
+int nd6_output __P((struct ifnet *, struct ifnet *, struct mbuf *,
+ struct sockaddr_in6 *, struct rtentry *));
+int nd6_output_lle __P((struct ifnet *, struct ifnet *, struct mbuf *,
+ struct sockaddr_in6 *, struct rtentry *, struct llentry *,
+ struct mbuf **));
+int nd6_output_flush __P((struct ifnet *, struct ifnet *, struct mbuf *,
+ struct sockaddr_in6 *, struct route *));
+int nd6_need_cache __P((struct ifnet *));
+int nd6_storelladdr __P((struct ifnet *, struct mbuf *,
+ struct sockaddr *, u_char *, struct llentry **));
+
+/* nd6_nbr.c */
+void nd6_na_input __P((struct mbuf *, int, int));
+void nd6_na_output __P((struct ifnet *, const struct in6_addr *,
+ const struct in6_addr *, u_long, int, struct sockaddr *));
+void nd6_ns_input __P((struct mbuf *, int, int));
+void nd6_ns_output __P((struct ifnet *, const struct in6_addr *,
+ const struct in6_addr *, struct llentry *, int));
+caddr_t nd6_ifptomac __P((struct ifnet *));
+void nd6_dad_start __P((struct ifaddr *, int));
+void nd6_dad_stop __P((struct ifaddr *));
+void nd6_dad_duplicated __P((struct ifaddr *));
+
+/* nd6_rtr.c */
+void nd6_rs_input __P((struct mbuf *, int, int));
+void nd6_ra_input __P((struct mbuf *, int, int));
+void prelist_del __P((struct nd_prefix *));
+void defrouter_addreq __P((struct nd_defrouter *));
+void defrouter_reset __P((void));
+void defrouter_select __P((void));
+void defrtrlist_del __P((struct nd_defrouter *));
+void prelist_remove __P((struct nd_prefix *));
+int nd6_prelist_add __P((struct nd_prefixctl *, struct nd_defrouter *,
+ struct nd_prefix **));
+int nd6_prefix_onlink __P((struct nd_prefix *));
+int nd6_prefix_offlink __P((struct nd_prefix *));
+void pfxlist_onlink_check __P((void));
+struct nd_defrouter *defrouter_lookup __P((struct in6_addr *, struct ifnet *));
+struct nd_prefix *nd6_prefix_lookup __P((struct nd_prefixctl *));
+void rt6_flush __P((struct in6_addr *, struct ifnet *));
+int nd6_setdefaultiface __P((int));
+int in6_tmpifadd __P((const struct in6_ifaddr *, int, int));
+
+#endif /* _KERNEL */
+
+#endif /* _NETINET6_ND6_HH_ */
diff --git a/rtems/freebsd/netinet6/nd6_nbr.c b/rtems/freebsd/netinet6/nd6_nbr.c
new file mode 100644
index 00000000..b2c5f2c2
--- /dev/null
+++ b/rtems/freebsd/netinet6/nd6_nbr.c
@@ -0,0 +1,1514 @@
+#include <rtems/freebsd/machine/rtems-bsd-config.h>
+
+/*-
+ * Copyright (C) 1995, 1996, 1997, and 1998 WIDE Project.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the project nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $KAME: nd6_nbr.c,v 1.86 2002/01/21 02:33:04 jinmei Exp $
+ */
+
+#include <rtems/freebsd/sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <rtems/freebsd/local/opt_inet.h>
+#include <rtems/freebsd/local/opt_inet6.h>
+#include <rtems/freebsd/local/opt_ipsec.h>
+#include <rtems/freebsd/local/opt_mpath.h>
+
+#include <rtems/freebsd/sys/param.h>
+#include <rtems/freebsd/sys/systm.h>
+#include <rtems/freebsd/sys/malloc.h>
+#include <rtems/freebsd/sys/lock.h>
+#include <rtems/freebsd/sys/rwlock.h>
+#include <rtems/freebsd/sys/mbuf.h>
+#include <rtems/freebsd/sys/socket.h>
+#include <rtems/freebsd/sys/sockio.h>
+#include <rtems/freebsd/sys/time.h>
+#include <rtems/freebsd/sys/kernel.h>
+#include <rtems/freebsd/sys/errno.h>
+#include <rtems/freebsd/sys/syslog.h>
+#include <rtems/freebsd/sys/queue.h>
+#include <rtems/freebsd/sys/callout.h>
+
+#include <rtems/freebsd/net/if.h>
+#include <rtems/freebsd/net/if_types.h>
+#include <rtems/freebsd/net/if_dl.h>
+#include <rtems/freebsd/net/if_var.h>
+#include <rtems/freebsd/net/route.h>
+#ifdef RADIX_MPATH
+#include <rtems/freebsd/net/radix_mpath.h>
+#endif
+
+#include <rtems/freebsd/netinet/in.h>
+#include <rtems/freebsd/netinet/in_var.h>
+#include <rtems/freebsd/net/if_llatbl.h>
+#define L3_ADDR_SIN6(le) ((struct sockaddr_in6 *) L3_ADDR(le))
+#include <rtems/freebsd/netinet6/in6_var.h>
+#include <rtems/freebsd/netinet6/in6_ifattach.h>
+#include <rtems/freebsd/netinet/ip6.h>
+#include <rtems/freebsd/netinet6/ip6_var.h>
+#include <rtems/freebsd/netinet6/scope6_var.h>
+#include <rtems/freebsd/netinet6/nd6.h>
+#include <rtems/freebsd/netinet/icmp6.h>
+#include <rtems/freebsd/netinet/ip_carp.h>
+
+#define SDL(s) ((struct sockaddr_dl *)s)
+
+struct dadq;
+static struct dadq *nd6_dad_find(struct ifaddr *);
+static void nd6_dad_starttimer(struct dadq *, int);
+static void nd6_dad_stoptimer(struct dadq *);
+static void nd6_dad_timer(struct dadq *);
+static void nd6_dad_ns_output(struct dadq *, struct ifaddr *);
+static void nd6_dad_ns_input(struct ifaddr *);
+static void nd6_dad_na_input(struct ifaddr *);
+
+VNET_DEFINE(int, dad_ignore_ns) = 0; /* ignore NS in DAD - specwise incorrect*/
+VNET_DEFINE(int, dad_maxtry) = 15; /* max # of *tries* to transmit DAD packet */
+#define V_dad_ignore_ns VNET(dad_ignore_ns)
+#define V_dad_maxtry VNET(dad_maxtry)
+
+/*
+ * Input a Neighbor Solicitation Message.
+ *
+ * Based on RFC 2461
+ * Based on RFC 2462 (duplicate address detection)
+ */
+void
+nd6_ns_input(struct mbuf *m, int off, int icmp6len)
+{
+ struct ifnet *ifp = m->m_pkthdr.rcvif;
+ struct ip6_hdr *ip6 = mtod(m, struct ip6_hdr *);
+ struct nd_neighbor_solicit *nd_ns;
+ struct in6_addr saddr6 = ip6->ip6_src;
+ struct in6_addr daddr6 = ip6->ip6_dst;
+ struct in6_addr taddr6;
+ struct in6_addr myaddr6;
+ char *lladdr = NULL;
+ struct ifaddr *ifa = NULL;
+ int lladdrlen = 0;
+ int anycast = 0, proxy = 0, tentative = 0;
+ int tlladdr;
+ union nd_opts ndopts;
+ struct sockaddr_dl *proxydl = NULL;
+ char ip6bufs[INET6_ADDRSTRLEN], ip6bufd[INET6_ADDRSTRLEN];
+
+#ifndef PULLDOWN_TEST
+ IP6_EXTHDR_CHECK(m, off, icmp6len,);
+ nd_ns = (struct nd_neighbor_solicit *)((caddr_t)ip6 + off);
+#else
+ IP6_EXTHDR_GET(nd_ns, struct nd_neighbor_solicit *, m, off, icmp6len);
+ if (nd_ns == NULL) {
+ ICMP6STAT_INC(icp6s_tooshort);
+ return;
+ }
+#endif
+ ip6 = mtod(m, struct ip6_hdr *); /* adjust pointer for safety */
+ taddr6 = nd_ns->nd_ns_target;
+ if (in6_setscope(&taddr6, ifp, NULL) != 0)
+ goto bad;
+
+ if (ip6->ip6_hlim != 255) {
+ nd6log((LOG_ERR,
+ "nd6_ns_input: invalid hlim (%d) from %s to %s on %s\n",
+ ip6->ip6_hlim, ip6_sprintf(ip6bufs, &ip6->ip6_src),
+ ip6_sprintf(ip6bufd, &ip6->ip6_dst), if_name(ifp)));
+ goto bad;
+ }
+
+ if (IN6_IS_ADDR_UNSPECIFIED(&saddr6)) {
+ /* dst has to be a solicited node multicast address. */
+ if (daddr6.s6_addr16[0] == IPV6_ADDR_INT16_MLL &&
+ /* don't check ifindex portion */
+ daddr6.s6_addr32[1] == 0 &&
+ daddr6.s6_addr32[2] == IPV6_ADDR_INT32_ONE &&
+ daddr6.s6_addr8[12] == 0xff) {
+ ; /* good */
+ } else {
+ nd6log((LOG_INFO, "nd6_ns_input: bad DAD packet "
+ "(wrong ip6 dst)\n"));
+ goto bad;
+ }
+ } else if (!V_nd6_onlink_ns_rfc4861) {
+ struct sockaddr_in6 src_sa6;
+
+ /*
+ * According to recent IETF discussions, it is not a good idea
+ * to accept a NS from an address which would not be deemed
+ * to be a neighbor otherwise. This point is expected to be
+ * clarified in future revisions of the specification.
+ */
+ bzero(&src_sa6, sizeof(src_sa6));
+ src_sa6.sin6_family = AF_INET6;
+ src_sa6.sin6_len = sizeof(src_sa6);
+ src_sa6.sin6_addr = saddr6;
+ if (nd6_is_addr_neighbor(&src_sa6, ifp) == 0) {
+ nd6log((LOG_INFO, "nd6_ns_input: "
+ "NS packet from non-neighbor\n"));
+ goto bad;
+ }
+ }
+
+ if (IN6_IS_ADDR_MULTICAST(&taddr6)) {
+ nd6log((LOG_INFO, "nd6_ns_input: bad NS target (multicast)\n"));
+ goto bad;
+ }
+
+ icmp6len -= sizeof(*nd_ns);
+ nd6_option_init(nd_ns + 1, icmp6len, &ndopts);
+ if (nd6_options(&ndopts) < 0) {
+ nd6log((LOG_INFO,
+ "nd6_ns_input: invalid ND option, ignored\n"));
+ /* nd6_options have incremented stats */
+ goto freeit;
+ }
+
+ if (ndopts.nd_opts_src_lladdr) {
+ lladdr = (char *)(ndopts.nd_opts_src_lladdr + 1);
+ lladdrlen = ndopts.nd_opts_src_lladdr->nd_opt_len << 3;
+ }
+
+ if (IN6_IS_ADDR_UNSPECIFIED(&ip6->ip6_src) && lladdr) {
+ nd6log((LOG_INFO, "nd6_ns_input: bad DAD packet "
+ "(link-layer address option)\n"));
+ goto bad;
+ }
+
+ /*
+ * Attaching target link-layer address to the NA?
+ * (RFC 2461 7.2.4)
+ *
+ * NS IP dst is unicast/anycast MUST NOT add
+ * NS IP dst is solicited-node multicast MUST add
+ *
+ * In implementation, we add target link-layer address by default.
+ * We do not add one in MUST NOT cases.
+ */
+ if (!IN6_IS_ADDR_MULTICAST(&daddr6))
+ tlladdr = 0;
+ else
+ tlladdr = 1;
+
+ /*
+ * Target address (taddr6) must be either:
+ * (1) Valid unicast/anycast address for my receiving interface,
+ * (2) Unicast address for which I'm offering proxy service, or
+ * (3) "tentative" address on which DAD is being performed.
+ */
+ /* (1) and (3) check. */
+ if (ifp->if_carp)
+ ifa = (*carp_iamatch6_p)(ifp, &taddr6);
+ if (ifa == NULL)
+ ifa = (struct ifaddr *)in6ifa_ifpwithaddr(ifp, &taddr6);
+
+ /* (2) check. */
+ if (ifa == NULL) {
+ struct rtentry *rt;
+ struct sockaddr_in6 tsin6;
+ int need_proxy;
+#ifdef RADIX_MPATH
+ struct route_in6 ro;
+#endif
+
+ bzero(&tsin6, sizeof tsin6);
+ tsin6.sin6_len = sizeof(struct sockaddr_in6);
+ tsin6.sin6_family = AF_INET6;
+ tsin6.sin6_addr = taddr6;
+
+#ifdef RADIX_MPATH
+ bzero(&ro, sizeof(ro));
+ ro.ro_dst = tsin6;
+ rtalloc_mpath((struct route *)&ro, RTF_ANNOUNCE);
+ rt = ro.ro_rt;
+#else
+ rt = rtalloc1((struct sockaddr *)&tsin6, 0, 0);
+#endif
+ need_proxy = (rt && (rt->rt_flags & RTF_ANNOUNCE) != 0 &&
+ rt->rt_gateway->sa_family == AF_LINK);
+ if (rt)
+ RTFREE_LOCKED(rt);
+ if (need_proxy) {
+ /*
+ * proxy NDP for single entry
+ */
+ ifa = (struct ifaddr *)in6ifa_ifpforlinklocal(ifp,
+ IN6_IFF_NOTREADY|IN6_IFF_ANYCAST);
+ if (ifa) {
+ proxy = 1;
+ proxydl = SDL(rt->rt_gateway);
+ }
+ }
+ }
+ if (ifa == NULL) {
+ /*
+ * We've got an NS packet, and we don't have that adddress
+ * assigned for us. We MUST silently ignore it.
+ * See RFC2461 7.2.3.
+ */
+ goto freeit;
+ }
+ myaddr6 = *IFA_IN6(ifa);
+ anycast = ((struct in6_ifaddr *)ifa)->ia6_flags & IN6_IFF_ANYCAST;
+ tentative = ((struct in6_ifaddr *)ifa)->ia6_flags & IN6_IFF_TENTATIVE;
+ if (((struct in6_ifaddr *)ifa)->ia6_flags & IN6_IFF_DUPLICATED)
+ goto freeit;
+
+ if (lladdr && ((ifp->if_addrlen + 2 + 7) & ~7) != lladdrlen) {
+ nd6log((LOG_INFO, "nd6_ns_input: lladdrlen mismatch for %s "
+ "(if %d, NS packet %d)\n",
+ ip6_sprintf(ip6bufs, &taddr6),
+ ifp->if_addrlen, lladdrlen - 2));
+ goto bad;
+ }
+
+ if (IN6_ARE_ADDR_EQUAL(&myaddr6, &saddr6)) {
+ nd6log((LOG_INFO, "nd6_ns_input: duplicate IP6 address %s\n",
+ ip6_sprintf(ip6bufs, &saddr6)));
+ goto freeit;
+ }
+
+ /*
+ * We have neighbor solicitation packet, with target address equals to
+ * one of my tentative address.
+ *
+ * src addr how to process?
+ * --- ---
+ * multicast of course, invalid (rejected in ip6_input)
+ * unicast somebody is doing address resolution -> ignore
+ * unspec dup address detection
+ *
+ * The processing is defined in RFC 2462.
+ */
+ if (tentative) {
+ /*
+ * If source address is unspecified address, it is for
+ * duplicate address detection.
+ *
+ * If not, the packet is for addess resolution;
+ * silently ignore it.
+ */
+ if (IN6_IS_ADDR_UNSPECIFIED(&saddr6))
+ nd6_dad_ns_input(ifa);
+
+ goto freeit;
+ }
+
+ /*
+ * If the source address is unspecified address, entries must not
+ * be created or updated.
+ * It looks that sender is performing DAD. Output NA toward
+ * all-node multicast address, to tell the sender that I'm using
+ * the address.
+ * S bit ("solicited") must be zero.
+ */
+ if (IN6_IS_ADDR_UNSPECIFIED(&saddr6)) {
+ struct in6_addr in6_all;
+
+ in6_all = in6addr_linklocal_allnodes;
+ if (in6_setscope(&in6_all, ifp, NULL) != 0)
+ goto bad;
+ nd6_na_output(ifp, &in6_all, &taddr6,
+ ((anycast || proxy || !tlladdr) ? 0 : ND_NA_FLAG_OVERRIDE) |
+ (V_ip6_forwarding ? ND_NA_FLAG_ROUTER : 0),
+ tlladdr, (struct sockaddr *)proxydl);
+ goto freeit;
+ }
+
+ nd6_cache_lladdr(ifp, &saddr6, lladdr, lladdrlen,
+ ND_NEIGHBOR_SOLICIT, 0);
+
+ nd6_na_output(ifp, &saddr6, &taddr6,
+ ((anycast || proxy || !tlladdr) ? 0 : ND_NA_FLAG_OVERRIDE) |
+ (V_ip6_forwarding ? ND_NA_FLAG_ROUTER : 0) | ND_NA_FLAG_SOLICITED,
+ tlladdr, (struct sockaddr *)proxydl);
+ freeit:
+ if (ifa != NULL)
+ ifa_free(ifa);
+ m_freem(m);
+ return;
+
+ bad:
+ nd6log((LOG_ERR, "nd6_ns_input: src=%s\n",
+ ip6_sprintf(ip6bufs, &saddr6)));
+ nd6log((LOG_ERR, "nd6_ns_input: dst=%s\n",
+ ip6_sprintf(ip6bufs, &daddr6)));
+ nd6log((LOG_ERR, "nd6_ns_input: tgt=%s\n",
+ ip6_sprintf(ip6bufs, &taddr6)));
+ ICMP6STAT_INC(icp6s_badns);
+ if (ifa != NULL)
+ ifa_free(ifa);
+ m_freem(m);
+}
+
+/*
+ * Output a Neighbor Solicitation Message. Caller specifies:
+ * - ICMP6 header source IP6 address
+ * - ND6 header target IP6 address
+ * - ND6 header source datalink address
+ *
+ * Based on RFC 2461
+ * Based on RFC 2462 (duplicate address detection)
+ *
+ * ln - for source address determination
+ * dad - duplicate address detection
+ */
+void
+nd6_ns_output(struct ifnet *ifp, const struct in6_addr *daddr6,
+ const struct in6_addr *taddr6, struct llentry *ln, int dad)
+{
+ struct mbuf *m;
+ struct ip6_hdr *ip6;
+ struct nd_neighbor_solicit *nd_ns;
+ struct ip6_moptions im6o;
+ int icmp6len;
+ int maxlen;
+ caddr_t mac;
+ struct route_in6 ro;
+
+ if (IN6_IS_ADDR_MULTICAST(taddr6))
+ return;
+
+ /* estimate the size of message */
+ maxlen = sizeof(*ip6) + sizeof(*nd_ns);
+ maxlen += (sizeof(struct nd_opt_hdr) + ifp->if_addrlen + 7) & ~7;
+ if (max_linkhdr + maxlen >= MCLBYTES) {
+#ifdef DIAGNOSTIC
+ printf("nd6_ns_output: max_linkhdr + maxlen >= MCLBYTES "
+ "(%d + %d > %d)\n", max_linkhdr, maxlen, MCLBYTES);
+#endif
+ return;
+ }
+
+ MGETHDR(m, M_DONTWAIT, MT_DATA);
+ if (m && max_linkhdr + maxlen >= MHLEN) {
+ MCLGET(m, M_DONTWAIT);
+ if ((m->m_flags & M_EXT) == 0) {
+ m_free(m);
+ m = NULL;
+ }
+ }
+ if (m == NULL)
+ return;
+ m->m_pkthdr.rcvif = NULL;
+
+ bzero(&ro, sizeof(ro));
+
+ if (daddr6 == NULL || IN6_IS_ADDR_MULTICAST(daddr6)) {
+ m->m_flags |= M_MCAST;
+ im6o.im6o_multicast_ifp = ifp;
+ im6o.im6o_multicast_hlim = 255;
+ im6o.im6o_multicast_loop = 0;
+ }
+
+ icmp6len = sizeof(*nd_ns);
+ m->m_pkthdr.len = m->m_len = sizeof(*ip6) + icmp6len;
+ m->m_data += max_linkhdr; /* or MH_ALIGN() equivalent? */
+
+ /* fill neighbor solicitation packet */
+ ip6 = mtod(m, struct ip6_hdr *);
+ ip6->ip6_flow = 0;
+ ip6->ip6_vfc &= ~IPV6_VERSION_MASK;
+ ip6->ip6_vfc |= IPV6_VERSION;
+ /* ip6->ip6_plen will be set later */
+ ip6->ip6_nxt = IPPROTO_ICMPV6;
+ ip6->ip6_hlim = 255;
+ if (daddr6)
+ ip6->ip6_dst = *daddr6;
+ else {
+ ip6->ip6_dst.s6_addr16[0] = IPV6_ADDR_INT16_MLL;
+ ip6->ip6_dst.s6_addr16[1] = 0;
+ ip6->ip6_dst.s6_addr32[1] = 0;
+ ip6->ip6_dst.s6_addr32[2] = IPV6_ADDR_INT32_ONE;
+ ip6->ip6_dst.s6_addr32[3] = taddr6->s6_addr32[3];
+ ip6->ip6_dst.s6_addr8[12] = 0xff;
+ if (in6_setscope(&ip6->ip6_dst, ifp, NULL) != 0)
+ goto bad;
+ }
+ if (!dad) {
+ struct ifaddr *ifa;
+
+ /*
+ * RFC2461 7.2.2:
+ * "If the source address of the packet prompting the
+ * solicitation is the same as one of the addresses assigned
+ * to the outgoing interface, that address SHOULD be placed
+ * in the IP Source Address of the outgoing solicitation.
+ * Otherwise, any one of the addresses assigned to the
+ * interface should be used."
+ *
+ * We use the source address for the prompting packet
+ * (saddr6), if:
+ * - saddr6 is given from the caller (by giving "ln"), and
+ * - saddr6 belongs to the outgoing interface.
+ * Otherwise, we perform the source address selection as usual.
+ */
+ struct in6_addr *hsrc;
+
+ hsrc = NULL;
+ if (ln != NULL) {
+ LLE_RLOCK(ln);
+ if (ln->la_hold != NULL) {
+ struct ip6_hdr *hip6; /* hold ip6 */
+
+ /*
+ * assuming every packet in la_hold has the same IP
+ * header
+ */
+ hip6 = mtod(ln->la_hold, struct ip6_hdr *);
+ /* XXX pullup? */
+ if (sizeof(*hip6) < ln->la_hold->m_len) {
+ ip6->ip6_src = hip6->ip6_src;
+ hsrc = &hip6->ip6_src;
+ }
+ }
+ LLE_RUNLOCK(ln);
+ }
+ if (hsrc && (ifa = (struct ifaddr *)in6ifa_ifpwithaddr(ifp,
+ hsrc)) != NULL) {
+ /* ip6_src set already. */
+ ifa_free(ifa);
+ } else {
+ int error;
+ struct sockaddr_in6 dst_sa;
+ struct in6_addr src_in;
+
+ bzero(&dst_sa, sizeof(dst_sa));
+ dst_sa.sin6_family = AF_INET6;
+ dst_sa.sin6_len = sizeof(dst_sa);
+ dst_sa.sin6_addr = ip6->ip6_dst;
+
+ error = in6_selectsrc(&dst_sa, NULL,
+ NULL, &ro, NULL, NULL, &src_in);
+ if (error) {
+ char ip6buf[INET6_ADDRSTRLEN];
+ nd6log((LOG_DEBUG,
+ "nd6_ns_output: source can't be "
+ "determined: dst=%s, error=%d\n",
+ ip6_sprintf(ip6buf, &dst_sa.sin6_addr),
+ error));
+ goto bad;
+ }
+ ip6->ip6_src = src_in;
+ }
+ } else {
+ /*
+ * Source address for DAD packet must always be IPv6
+ * unspecified address. (0::0)
+ * We actually don't have to 0-clear the address (we did it
+ * above), but we do so here explicitly to make the intention
+ * clearer.
+ */
+ bzero(&ip6->ip6_src, sizeof(ip6->ip6_src));
+ }
+ nd_ns = (struct nd_neighbor_solicit *)(ip6 + 1);
+ nd_ns->nd_ns_type = ND_NEIGHBOR_SOLICIT;
+ nd_ns->nd_ns_code = 0;
+ nd_ns->nd_ns_reserved = 0;
+ nd_ns->nd_ns_target = *taddr6;
+ in6_clearscope(&nd_ns->nd_ns_target); /* XXX */
+
+ /*
+ * Add source link-layer address option.
+ *
+ * spec implementation
+ * --- ---
+ * DAD packet MUST NOT do not add the option
+ * there's no link layer address:
+ * impossible do not add the option
+ * there's link layer address:
+ * Multicast NS MUST add one add the option
+ * Unicast NS SHOULD add one add the option
+ */
+ if (!dad && (mac = nd6_ifptomac(ifp))) {
+ int optlen = sizeof(struct nd_opt_hdr) + ifp->if_addrlen;
+ struct nd_opt_hdr *nd_opt = (struct nd_opt_hdr *)(nd_ns + 1);
+ /* 8 byte alignments... */
+ optlen = (optlen + 7) & ~7;
+
+ m->m_pkthdr.len += optlen;
+ m->m_len += optlen;
+ icmp6len += optlen;
+ bzero((caddr_t)nd_opt, optlen);
+ nd_opt->nd_opt_type = ND_OPT_SOURCE_LINKADDR;
+ nd_opt->nd_opt_len = optlen >> 3;
+ bcopy(mac, (caddr_t)(nd_opt + 1), ifp->if_addrlen);
+ }
+
+ ip6->ip6_plen = htons((u_short)icmp6len);
+ nd_ns->nd_ns_cksum = 0;
+ nd_ns->nd_ns_cksum =
+ in6_cksum(m, IPPROTO_ICMPV6, sizeof(*ip6), icmp6len);
+
+ ip6_output(m, NULL, &ro, dad ? IPV6_UNSPECSRC : 0, &im6o, NULL, NULL);
+ icmp6_ifstat_inc(ifp, ifs6_out_msg);
+ icmp6_ifstat_inc(ifp, ifs6_out_neighborsolicit);
+ ICMP6STAT_INC(icp6s_outhist[ND_NEIGHBOR_SOLICIT]);
+
+ if (ro.ro_rt) { /* we don't cache this route. */
+ RTFREE(ro.ro_rt);
+ }
+ return;
+
+ bad:
+ if (ro.ro_rt) {
+ RTFREE(ro.ro_rt);
+ }
+ m_freem(m);
+ return;
+}
+
+/*
+ * Neighbor advertisement input handling.
+ *
+ * Based on RFC 2461
+ * Based on RFC 2462 (duplicate address detection)
+ *
+ * the following items are not implemented yet:
+ * - proxy advertisement delay rule (RFC2461 7.2.8, last paragraph, SHOULD)
+ * - anycast advertisement delay rule (RFC2461 7.2.7, SHOULD)
+ */
+void
+nd6_na_input(struct mbuf *m, int off, int icmp6len)
+{
+ struct ifnet *ifp = m->m_pkthdr.rcvif;
+ struct ip6_hdr *ip6 = mtod(m, struct ip6_hdr *);
+ struct nd_neighbor_advert *nd_na;
+ struct in6_addr daddr6 = ip6->ip6_dst;
+ struct in6_addr taddr6;
+ int flags;
+ int is_router;
+ int is_solicited;
+ int is_override;
+ char *lladdr = NULL;
+ int lladdrlen = 0;
+ int checklink = 0;
+ struct ifaddr *ifa;
+ struct llentry *ln = NULL;
+ union nd_opts ndopts;
+ struct mbuf *chain = NULL;
+ struct sockaddr_in6 sin6;
+ char ip6bufs[INET6_ADDRSTRLEN], ip6bufd[INET6_ADDRSTRLEN];
+
+ if (ip6->ip6_hlim != 255) {
+ nd6log((LOG_ERR,
+ "nd6_na_input: invalid hlim (%d) from %s to %s on %s\n",
+ ip6->ip6_hlim, ip6_sprintf(ip6bufs, &ip6->ip6_src),
+ ip6_sprintf(ip6bufd, &ip6->ip6_dst), if_name(ifp)));
+ goto bad;
+ }
+
+#ifndef PULLDOWN_TEST
+ IP6_EXTHDR_CHECK(m, off, icmp6len,);
+ nd_na = (struct nd_neighbor_advert *)((caddr_t)ip6 + off);
+#else
+ IP6_EXTHDR_GET(nd_na, struct nd_neighbor_advert *, m, off, icmp6len);
+ if (nd_na == NULL) {
+ ICMP6STAT_INC(icp6s_tooshort);
+ return;
+ }
+#endif
+
+ flags = nd_na->nd_na_flags_reserved;
+ is_router = ((flags & ND_NA_FLAG_ROUTER) != 0);
+ is_solicited = ((flags & ND_NA_FLAG_SOLICITED) != 0);
+ is_override = ((flags & ND_NA_FLAG_OVERRIDE) != 0);
+
+ taddr6 = nd_na->nd_na_target;
+ if (in6_setscope(&taddr6, ifp, NULL))
+ goto bad; /* XXX: impossible */
+
+ if (IN6_IS_ADDR_MULTICAST(&taddr6)) {
+ nd6log((LOG_ERR,
+ "nd6_na_input: invalid target address %s\n",
+ ip6_sprintf(ip6bufs, &taddr6)));
+ goto bad;
+ }
+ if (IN6_IS_ADDR_MULTICAST(&daddr6))
+ if (is_solicited) {
+ nd6log((LOG_ERR,
+ "nd6_na_input: a solicited adv is multicasted\n"));
+ goto bad;
+ }
+
+ icmp6len -= sizeof(*nd_na);
+ nd6_option_init(nd_na + 1, icmp6len, &ndopts);
+ if (nd6_options(&ndopts) < 0) {
+ nd6log((LOG_INFO,
+ "nd6_na_input: invalid ND option, ignored\n"));
+ /* nd6_options have incremented stats */
+ goto freeit;
+ }
+
+ if (ndopts.nd_opts_tgt_lladdr) {
+ lladdr = (char *)(ndopts.nd_opts_tgt_lladdr + 1);
+ lladdrlen = ndopts.nd_opts_tgt_lladdr->nd_opt_len << 3;
+ }
+
+ ifa = (struct ifaddr *)in6ifa_ifpwithaddr(ifp, &taddr6);
+
+ /*
+ * Target address matches one of my interface address.
+ *
+ * If my address is tentative, this means that there's somebody
+ * already using the same address as mine. This indicates DAD failure.
+ * This is defined in RFC 2462.
+ *
+ * Otherwise, process as defined in RFC 2461.
+ */
+ if (ifa
+ && (((struct in6_ifaddr *)ifa)->ia6_flags & IN6_IFF_TENTATIVE)) {
+ ifa_free(ifa);
+ nd6_dad_na_input(ifa);
+ goto freeit;
+ }
+
+ /* Just for safety, maybe unnecessary. */
+ if (ifa) {
+ ifa_free(ifa);
+ log(LOG_ERR,
+ "nd6_na_input: duplicate IP6 address %s\n",
+ ip6_sprintf(ip6bufs, &taddr6));
+ goto freeit;
+ }
+
+ if (lladdr && ((ifp->if_addrlen + 2 + 7) & ~7) != lladdrlen) {
+ nd6log((LOG_INFO, "nd6_na_input: lladdrlen mismatch for %s "
+ "(if %d, NA packet %d)\n", ip6_sprintf(ip6bufs, &taddr6),
+ ifp->if_addrlen, lladdrlen - 2));
+ goto bad;
+ }
+
+ /*
+ * If no neighbor cache entry is found, NA SHOULD silently be
+ * discarded.
+ */
+ IF_AFDATA_LOCK(ifp);
+ ln = nd6_lookup(&taddr6, LLE_EXCLUSIVE, ifp);
+ IF_AFDATA_UNLOCK(ifp);
+ if (ln == NULL) {
+ goto freeit;
+ }
+
+ if (ln->ln_state == ND6_LLINFO_INCOMPLETE) {
+ /*
+ * If the link-layer has address, and no lladdr option came,
+ * discard the packet.
+ */
+ if (ifp->if_addrlen && lladdr == NULL) {
+ goto freeit;
+ }
+
+ /*
+ * Record link-layer address, and update the state.
+ */
+ bcopy(lladdr, &ln->ll_addr, ifp->if_addrlen);
+ ln->la_flags |= LLE_VALID;
+ if (is_solicited) {
+ ln->ln_state = ND6_LLINFO_REACHABLE;
+ ln->ln_byhint = 0;
+ if (!ND6_LLINFO_PERMANENT(ln)) {
+ nd6_llinfo_settimer_locked(ln,
+ (long)ND_IFINFO(ln->lle_tbl->llt_ifp)->reachable * hz);
+ }
+ } else {
+ ln->ln_state = ND6_LLINFO_STALE;
+ nd6_llinfo_settimer_locked(ln, (long)V_nd6_gctimer * hz);
+ }
+ if ((ln->ln_router = is_router) != 0) {
+ /*
+ * This means a router's state has changed from
+ * non-reachable to probably reachable, and might
+ * affect the status of associated prefixes..
+ */
+ checklink = 1;
+ }
+ } else {
+ int llchange;
+
+ /*
+ * Check if the link-layer address has changed or not.
+ */
+ if (lladdr == NULL)
+ llchange = 0;
+ else {
+ if (ln->la_flags & LLE_VALID) {
+ if (bcmp(lladdr, &ln->ll_addr, ifp->if_addrlen))
+ llchange = 1;
+ else
+ llchange = 0;
+ } else
+ llchange = 1;
+ }
+
+ /*
+ * This is VERY complex. Look at it with care.
+ *
+ * override solicit lladdr llchange action
+ * (L: record lladdr)
+ *
+ * 0 0 n -- (2c)
+ * 0 0 y n (2b) L
+ * 0 0 y y (1) REACHABLE->STALE
+ * 0 1 n -- (2c) *->REACHABLE
+ * 0 1 y n (2b) L *->REACHABLE
+ * 0 1 y y (1) REACHABLE->STALE
+ * 1 0 n -- (2a)
+ * 1 0 y n (2a) L
+ * 1 0 y y (2a) L *->STALE
+ * 1 1 n -- (2a) *->REACHABLE
+ * 1 1 y n (2a) L *->REACHABLE
+ * 1 1 y y (2a) L *->REACHABLE
+ */
+ if (!is_override && (lladdr != NULL && llchange)) { /* (1) */
+ /*
+ * If state is REACHABLE, make it STALE.
+ * no other updates should be done.
+ */
+ if (ln->ln_state == ND6_LLINFO_REACHABLE) {
+ ln->ln_state = ND6_LLINFO_STALE;
+ nd6_llinfo_settimer_locked(ln, (long)V_nd6_gctimer * hz);
+ }
+ goto freeit;
+ } else if (is_override /* (2a) */
+ || (!is_override && (lladdr != NULL && !llchange)) /* (2b) */
+ || lladdr == NULL) { /* (2c) */
+ /*
+ * Update link-local address, if any.
+ */
+ if (lladdr != NULL) {
+ bcopy(lladdr, &ln->ll_addr, ifp->if_addrlen);
+ ln->la_flags |= LLE_VALID;
+ }
+
+ /*
+ * If solicited, make the state REACHABLE.
+ * If not solicited and the link-layer address was
+ * changed, make it STALE.
+ */
+ if (is_solicited) {
+ ln->ln_state = ND6_LLINFO_REACHABLE;
+ ln->ln_byhint = 0;
+ if (!ND6_LLINFO_PERMANENT(ln)) {
+ nd6_llinfo_settimer_locked(ln,
+ (long)ND_IFINFO(ifp)->reachable * hz);
+ }
+ } else {
+ if (lladdr != NULL && llchange) {
+ ln->ln_state = ND6_LLINFO_STALE;
+ nd6_llinfo_settimer_locked(ln,
+ (long)V_nd6_gctimer * hz);
+ }
+ }
+ }
+
+ if (ln->ln_router && !is_router) {
+ /*
+ * The peer dropped the router flag.
+ * Remove the sender from the Default Router List and
+ * update the Destination Cache entries.
+ */
+ struct nd_defrouter *dr;
+ struct in6_addr *in6;
+
+ in6 = &L3_ADDR_SIN6(ln)->sin6_addr;
+
+ /*
+ * Lock to protect the default router list.
+ * XXX: this might be unnecessary, since this function
+ * is only called under the network software interrupt
+ * context. However, we keep it just for safety.
+ */
+ dr = defrouter_lookup(in6, ln->lle_tbl->llt_ifp);
+ if (dr)
+ defrtrlist_del(dr);
+ else if (!V_ip6_forwarding) {
+ /*
+ * Even if the neighbor is not in the default
+ * router list, the neighbor may be used
+ * as a next hop for some destinations
+ * (e.g. redirect case). So we must
+ * call rt6_flush explicitly.
+ */
+ rt6_flush(&ip6->ip6_src, ifp);
+ }
+ }
+ ln->ln_router = is_router;
+ }
+ /* XXX - QL
+ * Does this matter?
+ * rt->rt_flags &= ~RTF_REJECT;
+ */
+ ln->la_asked = 0;
+ if (ln->la_hold) {
+ struct mbuf *m_hold, *m_hold_next;
+
+ /*
+ * reset the la_hold in advance, to explicitly
+ * prevent a la_hold lookup in nd6_output()
+ * (wouldn't happen, though...)
+ */
+ for (m_hold = ln->la_hold, ln->la_hold = NULL;
+ m_hold; m_hold = m_hold_next) {
+ m_hold_next = m_hold->m_nextpkt;
+ m_hold->m_nextpkt = NULL;
+ /*
+ * we assume ifp is not a loopback here, so just set
+ * the 2nd argument as the 1st one.
+ */
+ nd6_output_lle(ifp, ifp, m_hold, L3_ADDR_SIN6(ln), NULL, ln, &chain);
+ }
+ }
+ freeit:
+ if (ln != NULL) {
+ if (chain)
+ memcpy(&sin6, L3_ADDR_SIN6(ln), sizeof(sin6));
+ LLE_WUNLOCK(ln);
+
+ if (chain)
+ nd6_output_flush(ifp, ifp, chain, &sin6, NULL);
+ }
+ if (checklink)
+ pfxlist_onlink_check();
+
+ m_freem(m);
+ return;
+
+ bad:
+ if (ln != NULL)
+ LLE_WUNLOCK(ln);
+
+ ICMP6STAT_INC(icp6s_badna);
+ m_freem(m);
+}
+
+/*
+ * Neighbor advertisement output handling.
+ *
+ * Based on RFC 2461
+ *
+ * the following items are not implemented yet:
+ * - proxy advertisement delay rule (RFC2461 7.2.8, last paragraph, SHOULD)
+ * - anycast advertisement delay rule (RFC2461 7.2.7, SHOULD)
+ *
+ * tlladdr - 1 if include target link-layer address
+ * sdl0 - sockaddr_dl (= proxy NA) or NULL
+ */
+void
+nd6_na_output(struct ifnet *ifp, const struct in6_addr *daddr6_0,
+ const struct in6_addr *taddr6, u_long flags, int tlladdr,
+ struct sockaddr *sdl0)
+{
+ struct mbuf *m;
+ struct ip6_hdr *ip6;
+ struct nd_neighbor_advert *nd_na;
+ struct ip6_moptions im6o;
+ struct in6_addr src, daddr6;
+ struct sockaddr_in6 dst_sa;
+ int icmp6len, maxlen, error;
+ caddr_t mac = NULL;
+ struct route_in6 ro;
+
+ bzero(&ro, sizeof(ro));
+
+ daddr6 = *daddr6_0; /* make a local copy for modification */
+
+ /* estimate the size of message */
+ maxlen = sizeof(*ip6) + sizeof(*nd_na);
+ maxlen += (sizeof(struct nd_opt_hdr) + ifp->if_addrlen + 7) & ~7;
+ if (max_linkhdr + maxlen >= MCLBYTES) {
+#ifdef DIAGNOSTIC
+ printf("nd6_na_output: max_linkhdr + maxlen >= MCLBYTES "
+ "(%d + %d > %d)\n", max_linkhdr, maxlen, MCLBYTES);
+#endif
+ return;
+ }
+
+ MGETHDR(m, M_DONTWAIT, MT_DATA);
+ if (m && max_linkhdr + maxlen >= MHLEN) {
+ MCLGET(m, M_DONTWAIT);
+ if ((m->m_flags & M_EXT) == 0) {
+ m_free(m);
+ m = NULL;
+ }
+ }
+ if (m == NULL)
+ return;
+ m->m_pkthdr.rcvif = NULL;
+
+ if (IN6_IS_ADDR_MULTICAST(&daddr6)) {
+ m->m_flags |= M_MCAST;
+ im6o.im6o_multicast_ifp = ifp;
+ im6o.im6o_multicast_hlim = 255;
+ im6o.im6o_multicast_loop = 0;
+ }
+
+ icmp6len = sizeof(*nd_na);
+ m->m_pkthdr.len = m->m_len = sizeof(struct ip6_hdr) + icmp6len;
+ m->m_data += max_linkhdr; /* or MH_ALIGN() equivalent? */
+
+ /* fill neighbor advertisement packet */
+ ip6 = mtod(m, struct ip6_hdr *);
+ ip6->ip6_flow = 0;
+ ip6->ip6_vfc &= ~IPV6_VERSION_MASK;
+ ip6->ip6_vfc |= IPV6_VERSION;
+ ip6->ip6_nxt = IPPROTO_ICMPV6;
+ ip6->ip6_hlim = 255;
+ if (IN6_IS_ADDR_UNSPECIFIED(&daddr6)) {
+ /* reply to DAD */
+ daddr6.s6_addr16[0] = IPV6_ADDR_INT16_MLL;
+ daddr6.s6_addr16[1] = 0;
+ daddr6.s6_addr32[1] = 0;
+ daddr6.s6_addr32[2] = 0;
+ daddr6.s6_addr32[3] = IPV6_ADDR_INT32_ONE;
+ if (in6_setscope(&daddr6, ifp, NULL))
+ goto bad;
+
+ flags &= ~ND_NA_FLAG_SOLICITED;
+ }
+ ip6->ip6_dst = daddr6;
+ bzero(&dst_sa, sizeof(struct sockaddr_in6));
+ dst_sa.sin6_family = AF_INET6;
+ dst_sa.sin6_len = sizeof(struct sockaddr_in6);
+ dst_sa.sin6_addr = daddr6;
+
+ /*
+ * Select a source whose scope is the same as that of the dest.
+ */
+ bcopy(&dst_sa, &ro.ro_dst, sizeof(dst_sa));
+ error = in6_selectsrc(&dst_sa, NULL, NULL, &ro, NULL, NULL, &src);
+ if (error) {
+ char ip6buf[INET6_ADDRSTRLEN];
+ nd6log((LOG_DEBUG, "nd6_na_output: source can't be "
+ "determined: dst=%s, error=%d\n",
+ ip6_sprintf(ip6buf, &dst_sa.sin6_addr), error));
+ goto bad;
+ }
+ ip6->ip6_src = src;
+ nd_na = (struct nd_neighbor_advert *)(ip6 + 1);
+ nd_na->nd_na_type = ND_NEIGHBOR_ADVERT;
+ nd_na->nd_na_code = 0;
+ nd_na->nd_na_target = *taddr6;
+ in6_clearscope(&nd_na->nd_na_target); /* XXX */
+
+ /*
+ * "tlladdr" indicates NS's condition for adding tlladdr or not.
+ * see nd6_ns_input() for details.
+ * Basically, if NS packet is sent to unicast/anycast addr,
+ * target lladdr option SHOULD NOT be included.
+ */
+ if (tlladdr) {
+ /*
+ * sdl0 != NULL indicates proxy NA. If we do proxy, use
+ * lladdr in sdl0. If we are not proxying (sending NA for
+ * my address) use lladdr configured for the interface.
+ */
+ if (sdl0 == NULL) {
+ if (ifp->if_carp)
+ mac = (*carp_macmatch6_p)(ifp, m, taddr6);
+ if (mac == NULL)
+ mac = nd6_ifptomac(ifp);
+ } else if (sdl0->sa_family == AF_LINK) {
+ struct sockaddr_dl *sdl;
+ sdl = (struct sockaddr_dl *)sdl0;
+ if (sdl->sdl_alen == ifp->if_addrlen)
+ mac = LLADDR(sdl);
+ }
+ }
+ if (tlladdr && mac) {
+ int optlen = sizeof(struct nd_opt_hdr) + ifp->if_addrlen;
+ struct nd_opt_hdr *nd_opt = (struct nd_opt_hdr *)(nd_na + 1);
+
+ /* roundup to 8 bytes alignment! */
+ optlen = (optlen + 7) & ~7;
+
+ m->m_pkthdr.len += optlen;
+ m->m_len += optlen;
+ icmp6len += optlen;
+ bzero((caddr_t)nd_opt, optlen);
+ nd_opt->nd_opt_type = ND_OPT_TARGET_LINKADDR;
+ nd_opt->nd_opt_len = optlen >> 3;
+ bcopy(mac, (caddr_t)(nd_opt + 1), ifp->if_addrlen);
+ } else
+ flags &= ~ND_NA_FLAG_OVERRIDE;
+
+ ip6->ip6_plen = htons((u_short)icmp6len);
+ nd_na->nd_na_flags_reserved = flags;
+ nd_na->nd_na_cksum = 0;
+ nd_na->nd_na_cksum =
+ in6_cksum(m, IPPROTO_ICMPV6, sizeof(struct ip6_hdr), icmp6len);
+
+ ip6_output(m, NULL, &ro, 0, &im6o, NULL, NULL);
+ icmp6_ifstat_inc(ifp, ifs6_out_msg);
+ icmp6_ifstat_inc(ifp, ifs6_out_neighboradvert);
+ ICMP6STAT_INC(icp6s_outhist[ND_NEIGHBOR_ADVERT]);
+
+ if (ro.ro_rt) { /* we don't cache this route. */
+ RTFREE(ro.ro_rt);
+ }
+ return;
+
+ bad:
+ if (ro.ro_rt) {
+ RTFREE(ro.ro_rt);
+ }
+ m_freem(m);
+ return;
+}
+
+caddr_t
+nd6_ifptomac(struct ifnet *ifp)
+{
+ switch (ifp->if_type) {
+ case IFT_ARCNET:
+ case IFT_ETHER:
+ case IFT_FDDI:
+ case IFT_IEEE1394:
+#ifdef IFT_L2VLAN
+ case IFT_L2VLAN:
+#endif
+#ifdef IFT_IEEE80211
+ case IFT_IEEE80211:
+#endif
+#ifdef IFT_CARP
+ case IFT_CARP:
+#endif
+ case IFT_BRIDGE:
+ case IFT_ISO88025:
+ return IF_LLADDR(ifp);
+ default:
+ return NULL;
+ }
+}
+
+struct dadq {
+ TAILQ_ENTRY(dadq) dad_list;
+ struct ifaddr *dad_ifa;
+ int dad_count; /* max NS to send */
+ int dad_ns_tcount; /* # of trials to send NS */
+ int dad_ns_ocount; /* NS sent so far */
+ int dad_ns_icount;
+ int dad_na_icount;
+ struct callout dad_timer_ch;
+ struct vnet *dad_vnet;
+};
+
+static VNET_DEFINE(TAILQ_HEAD(, dadq), dadq);
+VNET_DEFINE(int, dad_init) = 0;
+#define V_dadq VNET(dadq)
+#define V_dad_init VNET(dad_init)
+
+static struct dadq *
+nd6_dad_find(struct ifaddr *ifa)
+{
+ struct dadq *dp;
+
+ for (dp = V_dadq.tqh_first; dp; dp = dp->dad_list.tqe_next) {
+ if (dp->dad_ifa == ifa)
+ return dp;
+ }
+ return NULL;
+}
+
+static void
+nd6_dad_starttimer(struct dadq *dp, int ticks)
+{
+
+ callout_reset(&dp->dad_timer_ch, ticks,
+ (void (*)(void *))nd6_dad_timer, (void *)dp);
+}
+
+static void
+nd6_dad_stoptimer(struct dadq *dp)
+{
+
+ callout_stop(&dp->dad_timer_ch);
+}
+
+/*
+ * Start Duplicate Address Detection (DAD) for specified interface address.
+ */
+void
+nd6_dad_start(struct ifaddr *ifa, int delay)
+{
+ struct in6_ifaddr *ia = (struct in6_ifaddr *)ifa;
+ struct dadq *dp;
+ char ip6buf[INET6_ADDRSTRLEN];
+
+ if (!V_dad_init) {
+ TAILQ_INIT(&V_dadq);
+ V_dad_init++;
+ }
+
+ /*
+ * If we don't need DAD, don't do it.
+ * There are several cases:
+ * - DAD is disabled (ip6_dad_count == 0)
+ * - the interface address is anycast
+ */
+ if (!(ia->ia6_flags & IN6_IFF_TENTATIVE)) {
+ log(LOG_DEBUG,
+ "nd6_dad_start: called with non-tentative address "
+ "%s(%s)\n",
+ ip6_sprintf(ip6buf, &ia->ia_addr.sin6_addr),
+ ifa->ifa_ifp ? if_name(ifa->ifa_ifp) : "???");
+ return;
+ }
+ if (ia->ia6_flags & IN6_IFF_ANYCAST) {
+ ia->ia6_flags &= ~IN6_IFF_TENTATIVE;
+ return;
+ }
+ if (!V_ip6_dad_count) {
+ ia->ia6_flags &= ~IN6_IFF_TENTATIVE;
+ return;
+ }
+ if (ifa->ifa_ifp == NULL)
+ panic("nd6_dad_start: ifa->ifa_ifp == NULL");
+ if (!(ifa->ifa_ifp->if_flags & IFF_UP)) {
+ return;
+ }
+ if (nd6_dad_find(ifa) != NULL) {
+ /* DAD already in progress */
+ return;
+ }
+
+ dp = malloc(sizeof(*dp), M_IP6NDP, M_NOWAIT);
+ if (dp == NULL) {
+ log(LOG_ERR, "nd6_dad_start: memory allocation failed for "
+ "%s(%s)\n",
+ ip6_sprintf(ip6buf, &ia->ia_addr.sin6_addr),
+ ifa->ifa_ifp ? if_name(ifa->ifa_ifp) : "???");
+ return;
+ }
+ bzero(dp, sizeof(*dp));
+ callout_init(&dp->dad_timer_ch, 0);
+#ifdef VIMAGE
+ dp->dad_vnet = curvnet;
+#endif
+ TAILQ_INSERT_TAIL(&V_dadq, (struct dadq *)dp, dad_list);
+
+ nd6log((LOG_DEBUG, "%s: starting DAD for %s\n", if_name(ifa->ifa_ifp),
+ ip6_sprintf(ip6buf, &ia->ia_addr.sin6_addr)));
+
+ /*
+ * Send NS packet for DAD, ip6_dad_count times.
+ * Note that we must delay the first transmission, if this is the
+ * first packet to be sent from the interface after interface
+ * (re)initialization.
+ */
+ dp->dad_ifa = ifa;
+ ifa_ref(ifa); /* just for safety */
+ dp->dad_count = V_ip6_dad_count;
+ dp->dad_ns_icount = dp->dad_na_icount = 0;
+ dp->dad_ns_ocount = dp->dad_ns_tcount = 0;
+ if (delay == 0) {
+ nd6_dad_ns_output(dp, ifa);
+ nd6_dad_starttimer(dp,
+ (long)ND_IFINFO(ifa->ifa_ifp)->retrans * hz / 1000);
+ } else {
+ nd6_dad_starttimer(dp, delay);
+ }
+}
+
+/*
+ * terminate DAD unconditionally. used for address removals.
+ */
+void
+nd6_dad_stop(struct ifaddr *ifa)
+{
+ struct dadq *dp;
+
+ if (!V_dad_init)
+ return;
+ dp = nd6_dad_find(ifa);
+ if (!dp) {
+ /* DAD wasn't started yet */
+ return;
+ }
+
+ nd6_dad_stoptimer(dp);
+
+ TAILQ_REMOVE(&V_dadq, (struct dadq *)dp, dad_list);
+ free(dp, M_IP6NDP);
+ dp = NULL;
+ ifa_free(ifa);
+}
+
+static void
+nd6_dad_timer(struct dadq *dp)
+{
+ CURVNET_SET(dp->dad_vnet);
+ int s;
+ struct ifaddr *ifa = dp->dad_ifa;
+ struct in6_ifaddr *ia = (struct in6_ifaddr *)ifa;
+ char ip6buf[INET6_ADDRSTRLEN];
+
+ s = splnet(); /* XXX */
+
+ /* Sanity check */
+ if (ia == NULL) {
+ log(LOG_ERR, "nd6_dad_timer: called with null parameter\n");
+ goto done;
+ }
+ if (ia->ia6_flags & IN6_IFF_DUPLICATED) {
+ log(LOG_ERR, "nd6_dad_timer: called with duplicated address "
+ "%s(%s)\n",
+ ip6_sprintf(ip6buf, &ia->ia_addr.sin6_addr),
+ ifa->ifa_ifp ? if_name(ifa->ifa_ifp) : "???");
+ goto done;
+ }
+ if ((ia->ia6_flags & IN6_IFF_TENTATIVE) == 0) {
+ log(LOG_ERR, "nd6_dad_timer: called with non-tentative address "
+ "%s(%s)\n",
+ ip6_sprintf(ip6buf, &ia->ia_addr.sin6_addr),
+ ifa->ifa_ifp ? if_name(ifa->ifa_ifp) : "???");
+ goto done;
+ }
+
+ /* timeouted with IFF_{RUNNING,UP} check */
+ if (dp->dad_ns_tcount > V_dad_maxtry) {
+ nd6log((LOG_INFO, "%s: could not run DAD, driver problem?\n",
+ if_name(ifa->ifa_ifp)));
+
+ TAILQ_REMOVE(&V_dadq, (struct dadq *)dp, dad_list);
+ free(dp, M_IP6NDP);
+ dp = NULL;
+ ifa_free(ifa);
+ goto done;
+ }
+
+ /* Need more checks? */
+ if (dp->dad_ns_ocount < dp->dad_count) {
+ /*
+ * We have more NS to go. Send NS packet for DAD.
+ */
+ nd6_dad_ns_output(dp, ifa);
+ nd6_dad_starttimer(dp,
+ (long)ND_IFINFO(ifa->ifa_ifp)->retrans * hz / 1000);
+ } else {
+ /*
+ * We have transmitted sufficient number of DAD packets.
+ * See what we've got.
+ */
+ int duplicate;
+
+ duplicate = 0;
+
+ if (dp->dad_na_icount) {
+ /*
+ * the check is in nd6_dad_na_input(),
+ * but just in case
+ */
+ duplicate++;
+ }
+
+ if (dp->dad_ns_icount) {
+ /* We've seen NS, means DAD has failed. */
+ duplicate++;
+ }
+
+ if (duplicate) {
+ /* (*dp) will be freed in nd6_dad_duplicated() */
+ dp = NULL;
+ nd6_dad_duplicated(ifa);
+ } else {
+ /*
+ * We are done with DAD. No NA came, no NS came.
+ * No duplicate address found.
+ */
+ ia->ia6_flags &= ~IN6_IFF_TENTATIVE;
+
+ nd6log((LOG_DEBUG,
+ "%s: DAD complete for %s - no duplicates found\n",
+ if_name(ifa->ifa_ifp),
+ ip6_sprintf(ip6buf, &ia->ia_addr.sin6_addr)));
+
+ TAILQ_REMOVE(&V_dadq, (struct dadq *)dp, dad_list);
+ free(dp, M_IP6NDP);
+ dp = NULL;
+ ifa_free(ifa);
+ }
+ }
+
+done:
+ splx(s);
+ CURVNET_RESTORE();
+}
+
+void
+nd6_dad_duplicated(struct ifaddr *ifa)
+{
+ struct in6_ifaddr *ia = (struct in6_ifaddr *)ifa;
+ struct ifnet *ifp;
+ struct dadq *dp;
+ char ip6buf[INET6_ADDRSTRLEN];
+
+ dp = nd6_dad_find(ifa);
+ if (dp == NULL) {
+ log(LOG_ERR, "nd6_dad_duplicated: DAD structure not found\n");
+ return;
+ }
+
+ log(LOG_ERR, "%s: DAD detected duplicate IPv6 address %s: "
+ "NS in/out=%d/%d, NA in=%d\n",
+ if_name(ifa->ifa_ifp), ip6_sprintf(ip6buf, &ia->ia_addr.sin6_addr),
+ dp->dad_ns_icount, dp->dad_ns_ocount, dp->dad_na_icount);
+
+ ia->ia6_flags &= ~IN6_IFF_TENTATIVE;
+ ia->ia6_flags |= IN6_IFF_DUPLICATED;
+
+ /* We are done with DAD, with duplicate address found. (failure) */
+ nd6_dad_stoptimer(dp);
+
+ ifp = ifa->ifa_ifp;
+ log(LOG_ERR, "%s: DAD complete for %s - duplicate found\n",
+ if_name(ifp), ip6_sprintf(ip6buf, &ia->ia_addr.sin6_addr));
+ log(LOG_ERR, "%s: manual intervention required\n",
+ if_name(ifp));
+
+ /*
+ * If the address is a link-local address formed from an interface
+ * identifier based on the hardware address which is supposed to be
+ * uniquely assigned (e.g., EUI-64 for an Ethernet interface), IP
+ * operation on the interface SHOULD be disabled.
+ * [rfc2462bis-03 Section 5.4.5]
+ */
+ if (IN6_IS_ADDR_LINKLOCAL(&ia->ia_addr.sin6_addr)) {
+ struct in6_addr in6;
+
+ /*
+ * To avoid over-reaction, we only apply this logic when we are
+ * very sure that hardware addresses are supposed to be unique.
+ */
+ switch (ifp->if_type) {
+ case IFT_ETHER:
+ case IFT_FDDI:
+ case IFT_ATM:
+ case IFT_IEEE1394:
+#ifdef IFT_IEEE80211
+ case IFT_IEEE80211:
+#endif
+ in6 = ia->ia_addr.sin6_addr;
+ if (in6_get_hw_ifid(ifp, &in6) == 0 &&
+ IN6_ARE_ADDR_EQUAL(&ia->ia_addr.sin6_addr, &in6)) {
+ ND_IFINFO(ifp)->flags |= ND6_IFF_IFDISABLED;
+ log(LOG_ERR, "%s: possible hardware address "
+ "duplication detected, disable IPv6\n",
+ if_name(ifp));
+ }
+ break;
+ }
+ }
+
+ TAILQ_REMOVE(&V_dadq, (struct dadq *)dp, dad_list);
+ free(dp, M_IP6NDP);
+ dp = NULL;
+ ifa_free(ifa);
+}
+
+static void
+nd6_dad_ns_output(struct dadq *dp, struct ifaddr *ifa)
+{
+ struct in6_ifaddr *ia = (struct in6_ifaddr *)ifa;
+ struct ifnet *ifp = ifa->ifa_ifp;
+
+ dp->dad_ns_tcount++;
+ if ((ifp->if_flags & IFF_UP) == 0) {
+ return;
+ }
+ if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
+ return;
+ }
+
+ dp->dad_ns_ocount++;
+ nd6_ns_output(ifp, NULL, &ia->ia_addr.sin6_addr, NULL, 1);
+}
+
+static void
+nd6_dad_ns_input(struct ifaddr *ifa)
+{
+ struct in6_ifaddr *ia;
+ struct ifnet *ifp;
+ const struct in6_addr *taddr6;
+ struct dadq *dp;
+ int duplicate;
+
+ if (ifa == NULL)
+ panic("ifa == NULL in nd6_dad_ns_input");
+
+ ia = (struct in6_ifaddr *)ifa;
+ ifp = ifa->ifa_ifp;
+ taddr6 = &ia->ia_addr.sin6_addr;
+ duplicate = 0;
+ dp = nd6_dad_find(ifa);
+
+ /* Quickhack - completely ignore DAD NS packets */
+ if (V_dad_ignore_ns) {
+ char ip6buf[INET6_ADDRSTRLEN];
+ nd6log((LOG_INFO,
+ "nd6_dad_ns_input: ignoring DAD NS packet for "
+ "address %s(%s)\n", ip6_sprintf(ip6buf, taddr6),
+ if_name(ifa->ifa_ifp)));
+ return;
+ }
+
+ /*
+ * if I'm yet to start DAD, someone else started using this address
+ * first. I have a duplicate and you win.
+ */
+ if (dp == NULL || dp->dad_ns_ocount == 0)
+ duplicate++;
+
+ /* XXX more checks for loopback situation - see nd6_dad_timer too */
+
+ if (duplicate) {
+ dp = NULL; /* will be freed in nd6_dad_duplicated() */
+ nd6_dad_duplicated(ifa);
+ } else {
+ /*
+ * not sure if I got a duplicate.
+ * increment ns count and see what happens.
+ */
+ if (dp)
+ dp->dad_ns_icount++;
+ }
+}
+
+static void
+nd6_dad_na_input(struct ifaddr *ifa)
+{
+ struct dadq *dp;
+
+ if (ifa == NULL)
+ panic("ifa == NULL in nd6_dad_na_input");
+
+ dp = nd6_dad_find(ifa);
+ if (dp)
+ dp->dad_na_icount++;
+
+ /* remove the address. */
+ nd6_dad_duplicated(ifa);
+}
diff --git a/rtems/freebsd/netinet6/nd6_rtr.c b/rtems/freebsd/netinet6/nd6_rtr.c
new file mode 100644
index 00000000..04992096
--- /dev/null
+++ b/rtems/freebsd/netinet6/nd6_rtr.c
@@ -0,0 +1,2162 @@
+#include <rtems/freebsd/machine/rtems-bsd-config.h>
+
+/*-
+ * Copyright (C) 1995, 1996, 1997, and 1998 WIDE Project.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the project nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $KAME: nd6_rtr.c,v 1.111 2001/04/27 01:37:15 jinmei Exp $
+ */
+
+#include <rtems/freebsd/sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <rtems/freebsd/local/opt_inet.h>
+#include <rtems/freebsd/local/opt_inet6.h>
+
+#include <rtems/freebsd/sys/param.h>
+#include <rtems/freebsd/sys/systm.h>
+#include <rtems/freebsd/sys/malloc.h>
+#include <rtems/freebsd/sys/mbuf.h>
+#include <rtems/freebsd/sys/socket.h>
+#include <rtems/freebsd/sys/sockio.h>
+#include <rtems/freebsd/sys/time.h>
+#include <rtems/freebsd/sys/kernel.h>
+#include <rtems/freebsd/sys/lock.h>
+#include <rtems/freebsd/sys/errno.h>
+#include <rtems/freebsd/sys/rwlock.h>
+#include <rtems/freebsd/sys/syslog.h>
+#include <rtems/freebsd/sys/queue.h>
+
+#include <rtems/freebsd/net/if.h>
+#include <rtems/freebsd/net/if_types.h>
+#include <rtems/freebsd/net/if_dl.h>
+#include <rtems/freebsd/net/route.h>
+#include <rtems/freebsd/net/radix.h>
+#include <rtems/freebsd/net/vnet.h>
+
+#include <rtems/freebsd/netinet/in.h>
+#include <rtems/freebsd/net/if_llatbl.h>
+#include <rtems/freebsd/netinet6/in6_var.h>
+#include <rtems/freebsd/netinet6/in6_ifattach.h>
+#include <rtems/freebsd/netinet/ip6.h>
+#include <rtems/freebsd/netinet6/ip6_var.h>
+#include <rtems/freebsd/netinet6/nd6.h>
+#include <rtems/freebsd/netinet/icmp6.h>
+#include <rtems/freebsd/netinet6/scope6_var.h>
+
+static int rtpref(struct nd_defrouter *);
+static struct nd_defrouter *defrtrlist_update(struct nd_defrouter *);
+static int prelist_update __P((struct nd_prefixctl *, struct nd_defrouter *,
+ struct mbuf *, int));
+static struct in6_ifaddr *in6_ifadd(struct nd_prefixctl *, int);
+static struct nd_pfxrouter *pfxrtr_lookup __P((struct nd_prefix *,
+ struct nd_defrouter *));
+static void pfxrtr_add(struct nd_prefix *, struct nd_defrouter *);
+static void pfxrtr_del(struct nd_pfxrouter *);
+static struct nd_pfxrouter *find_pfxlist_reachable_router
+(struct nd_prefix *);
+static void defrouter_delreq(struct nd_defrouter *);
+static void nd6_rtmsg(int, struct rtentry *);
+
+static int in6_init_prefix_ltimes(struct nd_prefix *);
+static void in6_init_address_ltimes __P((struct nd_prefix *,
+ struct in6_addrlifetime *));
+
+static int rt6_deleteroute(struct radix_node *, void *);
+
+VNET_DECLARE(int, nd6_recalc_reachtm_interval);
+#define V_nd6_recalc_reachtm_interval VNET(nd6_recalc_reachtm_interval)
+
+static VNET_DEFINE(struct ifnet *, nd6_defifp);
+VNET_DEFINE(int, nd6_defifindex);
+#define V_nd6_defifp VNET(nd6_defifp)
+
+VNET_DEFINE(int, ip6_use_tempaddr) = 0;
+
+VNET_DEFINE(int, ip6_desync_factor);
+VNET_DEFINE(u_int32_t, ip6_temp_preferred_lifetime) = DEF_TEMP_PREFERRED_LIFETIME;
+VNET_DEFINE(u_int32_t, ip6_temp_valid_lifetime) = DEF_TEMP_VALID_LIFETIME;
+
+VNET_DEFINE(int, ip6_temp_regen_advance) = TEMPADDR_REGEN_ADVANCE;
+
+/* RTPREF_MEDIUM has to be 0! */
+#define RTPREF_HIGH 1
+#define RTPREF_MEDIUM 0
+#define RTPREF_LOW (-1)
+#define RTPREF_RESERVED (-2)
+#define RTPREF_INVALID (-3) /* internal */
+
+/*
+ * Receive Router Solicitation Message - just for routers.
+ * Router solicitation/advertisement is mostly managed by userland program
+ * (rtadvd) so here we have no function like nd6_ra_output().
+ *
+ * Based on RFC 2461
+ */
+void
+nd6_rs_input(struct mbuf *m, int off, int icmp6len)
+{
+ struct ifnet *ifp = m->m_pkthdr.rcvif;
+ struct ip6_hdr *ip6 = mtod(m, struct ip6_hdr *);
+ struct nd_router_solicit *nd_rs;
+ struct in6_addr saddr6 = ip6->ip6_src;
+ char *lladdr = NULL;
+ int lladdrlen = 0;
+ union nd_opts ndopts;
+ char ip6bufs[INET6_ADDRSTRLEN], ip6bufd[INET6_ADDRSTRLEN];
+
+ /* If I'm not a router, ignore it. */
+ if (V_ip6_accept_rtadv != 0 || V_ip6_forwarding != 1)
+ goto freeit;
+
+ /* Sanity checks */
+ if (ip6->ip6_hlim != 255) {
+ nd6log((LOG_ERR,
+ "nd6_rs_input: invalid hlim (%d) from %s to %s on %s\n",
+ ip6->ip6_hlim, ip6_sprintf(ip6bufs, &ip6->ip6_src),
+ ip6_sprintf(ip6bufd, &ip6->ip6_dst), if_name(ifp)));
+ goto bad;
+ }
+
+ /*
+ * Don't update the neighbor cache, if src = ::.
+ * This indicates that the src has no IP address assigned yet.
+ */
+ if (IN6_IS_ADDR_UNSPECIFIED(&saddr6))
+ goto freeit;
+
+#ifndef PULLDOWN_TEST
+ IP6_EXTHDR_CHECK(m, off, icmp6len,);
+ nd_rs = (struct nd_router_solicit *)((caddr_t)ip6 + off);
+#else
+ IP6_EXTHDR_GET(nd_rs, struct nd_router_solicit *, m, off, icmp6len);
+ if (nd_rs == NULL) {
+ ICMP6STAT_INC(icp6s_tooshort);
+ return;
+ }
+#endif
+
+ icmp6len -= sizeof(*nd_rs);
+ nd6_option_init(nd_rs + 1, icmp6len, &ndopts);
+ if (nd6_options(&ndopts) < 0) {
+ nd6log((LOG_INFO,
+ "nd6_rs_input: invalid ND option, ignored\n"));
+ /* nd6_options have incremented stats */
+ goto freeit;
+ }
+
+ if (ndopts.nd_opts_src_lladdr) {
+ lladdr = (char *)(ndopts.nd_opts_src_lladdr + 1);
+ lladdrlen = ndopts.nd_opts_src_lladdr->nd_opt_len << 3;
+ }
+
+ if (lladdr && ((ifp->if_addrlen + 2 + 7) & ~7) != lladdrlen) {
+ nd6log((LOG_INFO,
+ "nd6_rs_input: lladdrlen mismatch for %s "
+ "(if %d, RS packet %d)\n",
+ ip6_sprintf(ip6bufs, &saddr6),
+ ifp->if_addrlen, lladdrlen - 2));
+ goto bad;
+ }
+
+ nd6_cache_lladdr(ifp, &saddr6, lladdr, lladdrlen, ND_ROUTER_SOLICIT, 0);
+
+ freeit:
+ m_freem(m);
+ return;
+
+ bad:
+ ICMP6STAT_INC(icp6s_badrs);
+ m_freem(m);
+}
+
+/*
+ * Receive Router Advertisement Message.
+ *
+ * Based on RFC 2461
+ * TODO: on-link bit on prefix information
+ * TODO: ND_RA_FLAG_{OTHER,MANAGED} processing
+ */
+void
+nd6_ra_input(struct mbuf *m, int off, int icmp6len)
+{
+ struct ifnet *ifp = m->m_pkthdr.rcvif;
+ struct nd_ifinfo *ndi = ND_IFINFO(ifp);
+ struct ip6_hdr *ip6 = mtod(m, struct ip6_hdr *);
+ struct nd_router_advert *nd_ra;
+ struct in6_addr saddr6 = ip6->ip6_src;
+ int mcast = 0;
+ union nd_opts ndopts;
+ struct nd_defrouter *dr;
+ char ip6bufs[INET6_ADDRSTRLEN], ip6bufd[INET6_ADDRSTRLEN];
+
+ /*
+ * We only accept RAs only when
+ * the system-wide variable allows the acceptance, and
+ * per-interface variable allows RAs on the receiving interface.
+ */
+ if (V_ip6_accept_rtadv == 0)
+ goto freeit;
+ if (!(ndi->flags & ND6_IFF_ACCEPT_RTADV))
+ goto freeit;
+
+ if (ip6->ip6_hlim != 255) {
+ nd6log((LOG_ERR,
+ "nd6_ra_input: invalid hlim (%d) from %s to %s on %s\n",
+ ip6->ip6_hlim, ip6_sprintf(ip6bufs, &ip6->ip6_src),
+ ip6_sprintf(ip6bufd, &ip6->ip6_dst), if_name(ifp)));
+ goto bad;
+ }
+
+ if (!IN6_IS_ADDR_LINKLOCAL(&saddr6)) {
+ nd6log((LOG_ERR,
+ "nd6_ra_input: src %s is not link-local\n",
+ ip6_sprintf(ip6bufs, &saddr6)));
+ goto bad;
+ }
+
+#ifndef PULLDOWN_TEST
+ IP6_EXTHDR_CHECK(m, off, icmp6len,);
+ nd_ra = (struct nd_router_advert *)((caddr_t)ip6 + off);
+#else
+ IP6_EXTHDR_GET(nd_ra, struct nd_router_advert *, m, off, icmp6len);
+ if (nd_ra == NULL) {
+ ICMP6STAT_INC(icp6s_tooshort);
+ return;
+ }
+#endif
+
+ icmp6len -= sizeof(*nd_ra);
+ nd6_option_init(nd_ra + 1, icmp6len, &ndopts);
+ if (nd6_options(&ndopts) < 0) {
+ nd6log((LOG_INFO,
+ "nd6_ra_input: invalid ND option, ignored\n"));
+ /* nd6_options have incremented stats */
+ goto freeit;
+ }
+
+ {
+ struct nd_defrouter dr0;
+ u_int32_t advreachable = nd_ra->nd_ra_reachable;
+
+ /* remember if this is a multicasted advertisement */
+ if (IN6_IS_ADDR_MULTICAST(&ip6->ip6_dst))
+ mcast = 1;
+
+ bzero(&dr0, sizeof(dr0));
+ dr0.rtaddr = saddr6;
+ dr0.flags = nd_ra->nd_ra_flags_reserved;
+ dr0.rtlifetime = ntohs(nd_ra->nd_ra_router_lifetime);
+ dr0.expire = time_second + dr0.rtlifetime;
+ dr0.ifp = ifp;
+ /* unspecified or not? (RFC 2461 6.3.4) */
+ if (advreachable) {
+ advreachable = ntohl(advreachable);
+ if (advreachable <= MAX_REACHABLE_TIME &&
+ ndi->basereachable != advreachable) {
+ ndi->basereachable = advreachable;
+ ndi->reachable = ND_COMPUTE_RTIME(ndi->basereachable);
+ ndi->recalctm = V_nd6_recalc_reachtm_interval; /* reset */
+ }
+ }
+ if (nd_ra->nd_ra_retransmit)
+ ndi->retrans = ntohl(nd_ra->nd_ra_retransmit);
+ if (nd_ra->nd_ra_curhoplimit)
+ ndi->chlim = nd_ra->nd_ra_curhoplimit;
+ dr = defrtrlist_update(&dr0);
+ }
+
+ /*
+ * prefix
+ */
+ if (ndopts.nd_opts_pi) {
+ struct nd_opt_hdr *pt;
+ struct nd_opt_prefix_info *pi = NULL;
+ struct nd_prefixctl pr;
+
+ for (pt = (struct nd_opt_hdr *)ndopts.nd_opts_pi;
+ pt <= (struct nd_opt_hdr *)ndopts.nd_opts_pi_end;
+ pt = (struct nd_opt_hdr *)((caddr_t)pt +
+ (pt->nd_opt_len << 3))) {
+ if (pt->nd_opt_type != ND_OPT_PREFIX_INFORMATION)
+ continue;
+ pi = (struct nd_opt_prefix_info *)pt;
+
+ if (pi->nd_opt_pi_len != 4) {
+ nd6log((LOG_INFO,
+ "nd6_ra_input: invalid option "
+ "len %d for prefix information option, "
+ "ignored\n", pi->nd_opt_pi_len));
+ continue;
+ }
+
+ if (128 < pi->nd_opt_pi_prefix_len) {
+ nd6log((LOG_INFO,
+ "nd6_ra_input: invalid prefix "
+ "len %d for prefix information option, "
+ "ignored\n", pi->nd_opt_pi_prefix_len));
+ continue;
+ }
+
+ if (IN6_IS_ADDR_MULTICAST(&pi->nd_opt_pi_prefix)
+ || IN6_IS_ADDR_LINKLOCAL(&pi->nd_opt_pi_prefix)) {
+ nd6log((LOG_INFO,
+ "nd6_ra_input: invalid prefix "
+ "%s, ignored\n",
+ ip6_sprintf(ip6bufs,
+ &pi->nd_opt_pi_prefix)));
+ continue;
+ }
+
+ bzero(&pr, sizeof(pr));
+ pr.ndpr_prefix.sin6_family = AF_INET6;
+ pr.ndpr_prefix.sin6_len = sizeof(pr.ndpr_prefix);
+ pr.ndpr_prefix.sin6_addr = pi->nd_opt_pi_prefix;
+ pr.ndpr_ifp = (struct ifnet *)m->m_pkthdr.rcvif;
+
+ pr.ndpr_raf_onlink = (pi->nd_opt_pi_flags_reserved &
+ ND_OPT_PI_FLAG_ONLINK) ? 1 : 0;
+ pr.ndpr_raf_auto = (pi->nd_opt_pi_flags_reserved &
+ ND_OPT_PI_FLAG_AUTO) ? 1 : 0;
+ pr.ndpr_plen = pi->nd_opt_pi_prefix_len;
+ pr.ndpr_vltime = ntohl(pi->nd_opt_pi_valid_time);
+ pr.ndpr_pltime = ntohl(pi->nd_opt_pi_preferred_time);
+ (void)prelist_update(&pr, dr, m, mcast);
+ }
+ }
+
+ /*
+ * MTU
+ */
+ if (ndopts.nd_opts_mtu && ndopts.nd_opts_mtu->nd_opt_mtu_len == 1) {
+ u_long mtu;
+ u_long maxmtu;
+
+ mtu = (u_long)ntohl(ndopts.nd_opts_mtu->nd_opt_mtu_mtu);
+
+ /* lower bound */
+ if (mtu < IPV6_MMTU) {
+ nd6log((LOG_INFO, "nd6_ra_input: bogus mtu option "
+ "mtu=%lu sent from %s, ignoring\n",
+ mtu, ip6_sprintf(ip6bufs, &ip6->ip6_src)));
+ goto skip;
+ }
+
+ /* upper bound */
+ maxmtu = (ndi->maxmtu && ndi->maxmtu < ifp->if_mtu)
+ ? ndi->maxmtu : ifp->if_mtu;
+ if (mtu <= maxmtu) {
+ int change = (ndi->linkmtu != mtu);
+
+ ndi->linkmtu = mtu;
+ if (change) /* in6_maxmtu may change */
+ in6_setmaxmtu();
+ } else {
+ nd6log((LOG_INFO, "nd6_ra_input: bogus mtu "
+ "mtu=%lu sent from %s; "
+ "exceeds maxmtu %lu, ignoring\n",
+ mtu, ip6_sprintf(ip6bufs, &ip6->ip6_src), maxmtu));
+ }
+ }
+
+ skip:
+
+ /*
+ * Source link layer address
+ */
+ {
+ char *lladdr = NULL;
+ int lladdrlen = 0;
+
+ if (ndopts.nd_opts_src_lladdr) {
+ lladdr = (char *)(ndopts.nd_opts_src_lladdr + 1);
+ lladdrlen = ndopts.nd_opts_src_lladdr->nd_opt_len << 3;
+ }
+
+ if (lladdr && ((ifp->if_addrlen + 2 + 7) & ~7) != lladdrlen) {
+ nd6log((LOG_INFO,
+ "nd6_ra_input: lladdrlen mismatch for %s "
+ "(if %d, RA packet %d)\n", ip6_sprintf(ip6bufs, &saddr6),
+ ifp->if_addrlen, lladdrlen - 2));
+ goto bad;
+ }
+
+ nd6_cache_lladdr(ifp, &saddr6, lladdr,
+ lladdrlen, ND_ROUTER_ADVERT, 0);
+
+ /*
+ * Installing a link-layer address might change the state of the
+ * router's neighbor cache, which might also affect our on-link
+ * detection of adveritsed prefixes.
+ */
+ pfxlist_onlink_check();
+ }
+
+ freeit:
+ m_freem(m);
+ return;
+
+ bad:
+ ICMP6STAT_INC(icp6s_badra);
+ m_freem(m);
+}
+
+/*
+ * default router list proccessing sub routines
+ */
+
+/* tell the change to user processes watching the routing socket. */
+static void
+nd6_rtmsg(int cmd, struct rtentry *rt)
+{
+ struct rt_addrinfo info;
+ struct ifnet *ifp;
+ struct ifaddr *ifa;
+
+ bzero((caddr_t)&info, sizeof(info));
+ info.rti_info[RTAX_DST] = rt_key(rt);
+ info.rti_info[RTAX_GATEWAY] = rt->rt_gateway;
+ info.rti_info[RTAX_NETMASK] = rt_mask(rt);
+ ifp = rt->rt_ifp;
+ if (ifp != NULL) {
+ IF_ADDR_LOCK(ifp);
+ ifa = TAILQ_FIRST(&ifp->if_addrhead);
+ info.rti_info[RTAX_IFP] = ifa->ifa_addr;
+ ifa_ref(ifa);
+ IF_ADDR_UNLOCK(ifp);
+ info.rti_info[RTAX_IFA] = rt->rt_ifa->ifa_addr;
+ } else
+ ifa = NULL;
+
+ rt_missmsg(cmd, &info, rt->rt_flags, 0);
+ if (ifa != NULL)
+ ifa_free(ifa);
+}
+
+void
+defrouter_addreq(struct nd_defrouter *new)
+{
+ struct sockaddr_in6 def, mask, gate;
+ struct rtentry *newrt = NULL;
+ int s;
+ int error;
+
+ bzero(&def, sizeof(def));
+ bzero(&mask, sizeof(mask));
+ bzero(&gate, sizeof(gate));
+
+ def.sin6_len = mask.sin6_len = gate.sin6_len =
+ sizeof(struct sockaddr_in6);
+ def.sin6_family = gate.sin6_family = AF_INET6;
+ gate.sin6_addr = new->rtaddr;
+
+ s = splnet();
+ error = rtrequest(RTM_ADD, (struct sockaddr *)&def,
+ (struct sockaddr *)&gate, (struct sockaddr *)&mask,
+ RTF_GATEWAY, &newrt);
+ if (newrt) {
+ nd6_rtmsg(RTM_ADD, newrt); /* tell user process */
+ RTFREE(newrt);
+ }
+ if (error == 0)
+ new->installed = 1;
+ splx(s);
+ return;
+}
+
+struct nd_defrouter *
+defrouter_lookup(struct in6_addr *addr, struct ifnet *ifp)
+{
+ struct nd_defrouter *dr;
+
+ for (dr = TAILQ_FIRST(&V_nd_defrouter); dr;
+ dr = TAILQ_NEXT(dr, dr_entry)) {
+ if (dr->ifp == ifp && IN6_ARE_ADDR_EQUAL(addr, &dr->rtaddr))
+ return (dr);
+ }
+
+ return (NULL); /* search failed */
+}
+
+/*
+ * Remove the default route for a given router.
+ * This is just a subroutine function for defrouter_select(), and should
+ * not be called from anywhere else.
+ */
+static void
+defrouter_delreq(struct nd_defrouter *dr)
+{
+ struct sockaddr_in6 def, mask, gate;
+ struct rtentry *oldrt = NULL;
+
+ bzero(&def, sizeof(def));
+ bzero(&mask, sizeof(mask));
+ bzero(&gate, sizeof(gate));
+
+ def.sin6_len = mask.sin6_len = gate.sin6_len =
+ sizeof(struct sockaddr_in6);
+ def.sin6_family = gate.sin6_family = AF_INET6;
+ gate.sin6_addr = dr->rtaddr;
+
+ rtrequest(RTM_DELETE, (struct sockaddr *)&def,
+ (struct sockaddr *)&gate,
+ (struct sockaddr *)&mask, RTF_GATEWAY, &oldrt);
+ if (oldrt) {
+ nd6_rtmsg(RTM_DELETE, oldrt);
+ RTFREE(oldrt);
+ }
+
+ dr->installed = 0;
+}
+
+/*
+ * remove all default routes from default router list
+ */
+void
+defrouter_reset(void)
+{
+ struct nd_defrouter *dr;
+
+ for (dr = TAILQ_FIRST(&V_nd_defrouter); dr;
+ dr = TAILQ_NEXT(dr, dr_entry))
+ defrouter_delreq(dr);
+
+ /*
+ * XXX should we also nuke any default routers in the kernel, by
+ * going through them by rtalloc1()?
+ */
+}
+
+void
+defrtrlist_del(struct nd_defrouter *dr)
+{
+ struct nd_defrouter *deldr = NULL;
+ struct nd_prefix *pr;
+
+ /*
+ * Flush all the routing table entries that use the router
+ * as a next hop.
+ */
+ if (!V_ip6_forwarding && V_ip6_accept_rtadv) /* XXX: better condition? */
+ rt6_flush(&dr->rtaddr, dr->ifp);
+
+ if (dr->installed) {
+ deldr = dr;
+ defrouter_delreq(dr);
+ }
+ TAILQ_REMOVE(&V_nd_defrouter, dr, dr_entry);
+
+ /*
+ * Also delete all the pointers to the router in each prefix lists.
+ */
+ for (pr = V_nd_prefix.lh_first; pr; pr = pr->ndpr_next) {
+ struct nd_pfxrouter *pfxrtr;
+ if ((pfxrtr = pfxrtr_lookup(pr, dr)) != NULL)
+ pfxrtr_del(pfxrtr);
+ }
+ pfxlist_onlink_check();
+
+ /*
+ * If the router is the primary one, choose a new one.
+ * Note that defrouter_select() will remove the current gateway
+ * from the routing table.
+ */
+ if (deldr)
+ defrouter_select();
+
+ free(dr, M_IP6NDP);
+}
+
+/*
+ * Default Router Selection according to Section 6.3.6 of RFC 2461 and
+ * draft-ietf-ipngwg-router-selection:
+ * 1) Routers that are reachable or probably reachable should be preferred.
+ * If we have more than one (probably) reachable router, prefer ones
+ * with the highest router preference.
+ * 2) When no routers on the list are known to be reachable or
+ * probably reachable, routers SHOULD be selected in a round-robin
+ * fashion, regardless of router preference values.
+ * 3) If the Default Router List is empty, assume that all
+ * destinations are on-link.
+ *
+ * We assume nd_defrouter is sorted by router preference value.
+ * Since the code below covers both with and without router preference cases,
+ * we do not need to classify the cases by ifdef.
+ *
+ * At this moment, we do not try to install more than one default router,
+ * even when the multipath routing is available, because we're not sure about
+ * the benefits for stub hosts comparing to the risk of making the code
+ * complicated and the possibility of introducing bugs.
+ */
+void
+defrouter_select(void)
+{
+ int s = splnet();
+ struct nd_defrouter *dr, *selected_dr = NULL, *installed_dr = NULL;
+ struct llentry *ln = NULL;
+
+ /*
+ * This function should be called only when acting as an autoconfigured
+ * host. Although the remaining part of this function is not effective
+ * if the node is not an autoconfigured host, we explicitly exclude
+ * such cases here for safety.
+ */
+ if (V_ip6_forwarding || !V_ip6_accept_rtadv) {
+ nd6log((LOG_WARNING,
+ "defrouter_select: called unexpectedly (forwarding=%d, "
+ "accept_rtadv=%d)\n", V_ip6_forwarding, V_ip6_accept_rtadv));
+ splx(s);
+ return;
+ }
+
+ /*
+ * Let's handle easy case (3) first:
+ * If default router list is empty, there's nothing to be done.
+ */
+ if (!TAILQ_FIRST(&V_nd_defrouter)) {
+ splx(s);
+ return;
+ }
+
+ /*
+ * Search for a (probably) reachable router from the list.
+ * We just pick up the first reachable one (if any), assuming that
+ * the ordering rule of the list described in defrtrlist_update().
+ */
+ for (dr = TAILQ_FIRST(&V_nd_defrouter); dr;
+ dr = TAILQ_NEXT(dr, dr_entry)) {
+ IF_AFDATA_LOCK(dr->ifp);
+ if (selected_dr == NULL &&
+ (ln = nd6_lookup(&dr->rtaddr, 0, dr->ifp)) &&
+ ND6_IS_LLINFO_PROBREACH(ln)) {
+ selected_dr = dr;
+ }
+ IF_AFDATA_UNLOCK(dr->ifp);
+ if (ln != NULL) {
+ LLE_RUNLOCK(ln);
+ ln = NULL;
+ }
+
+ if (dr->installed && installed_dr == NULL)
+ installed_dr = dr;
+ else if (dr->installed && installed_dr) {
+ /* this should not happen. warn for diagnosis. */
+ log(LOG_ERR, "defrouter_select: more than one router"
+ " is installed\n");
+ }
+ }
+ /*
+ * If none of the default routers was found to be reachable,
+ * round-robin the list regardless of preference.
+ * Otherwise, if we have an installed router, check if the selected
+ * (reachable) router should really be preferred to the installed one.
+ * We only prefer the new router when the old one is not reachable
+ * or when the new one has a really higher preference value.
+ */
+ if (selected_dr == NULL) {
+ if (installed_dr == NULL || !TAILQ_NEXT(installed_dr, dr_entry))
+ selected_dr = TAILQ_FIRST(&V_nd_defrouter);
+ else
+ selected_dr = TAILQ_NEXT(installed_dr, dr_entry);
+ } else if (installed_dr) {
+ IF_AFDATA_LOCK(installed_dr->ifp);
+ if ((ln = nd6_lookup(&installed_dr->rtaddr, 0, installed_dr->ifp)) &&
+ ND6_IS_LLINFO_PROBREACH(ln) &&
+ rtpref(selected_dr) <= rtpref(installed_dr)) {
+ selected_dr = installed_dr;
+ }
+ IF_AFDATA_UNLOCK(installed_dr->ifp);
+ if (ln != NULL)
+ LLE_RUNLOCK(ln);
+ }
+
+ /*
+ * If the selected router is different than the installed one,
+ * remove the installed router and install the selected one.
+ * Note that the selected router is never NULL here.
+ */
+ if (installed_dr != selected_dr) {
+ if (installed_dr)
+ defrouter_delreq(installed_dr);
+ defrouter_addreq(selected_dr);
+ }
+
+ splx(s);
+ return;
+}
+
+/*
+ * for default router selection
+ * regards router-preference field as a 2-bit signed integer
+ */
+static int
+rtpref(struct nd_defrouter *dr)
+{
+ switch (dr->flags & ND_RA_FLAG_RTPREF_MASK) {
+ case ND_RA_FLAG_RTPREF_HIGH:
+ return (RTPREF_HIGH);
+ case ND_RA_FLAG_RTPREF_MEDIUM:
+ case ND_RA_FLAG_RTPREF_RSV:
+ return (RTPREF_MEDIUM);
+ case ND_RA_FLAG_RTPREF_LOW:
+ return (RTPREF_LOW);
+ default:
+ /*
+ * This case should never happen. If it did, it would mean a
+ * serious bug of kernel internal. We thus always bark here.
+ * Or, can we even panic?
+ */
+ log(LOG_ERR, "rtpref: impossible RA flag %x\n", dr->flags);
+ return (RTPREF_INVALID);
+ }
+ /* NOTREACHED */
+}
+
+static struct nd_defrouter *
+defrtrlist_update(struct nd_defrouter *new)
+{
+ struct nd_defrouter *dr, *n;
+ int s = splnet();
+
+ if ((dr = defrouter_lookup(&new->rtaddr, new->ifp)) != NULL) {
+ /* entry exists */
+ if (new->rtlifetime == 0) {
+ defrtrlist_del(dr);
+ dr = NULL;
+ } else {
+ int oldpref = rtpref(dr);
+
+ /* override */
+ dr->flags = new->flags; /* xxx flag check */
+ dr->rtlifetime = new->rtlifetime;
+ dr->expire = new->expire;
+
+ /*
+ * If the preference does not change, there's no need
+ * to sort the entries.
+ */
+ if (rtpref(new) == oldpref) {
+ splx(s);
+ return (dr);
+ }
+
+ /*
+ * preferred router may be changed, so relocate
+ * this router.
+ * XXX: calling TAILQ_REMOVE directly is a bad manner.
+ * However, since defrtrlist_del() has many side
+ * effects, we intentionally do so here.
+ * defrouter_select() below will handle routing
+ * changes later.
+ */
+ TAILQ_REMOVE(&V_nd_defrouter, dr, dr_entry);
+ n = dr;
+ goto insert;
+ }
+ splx(s);
+ return (dr);
+ }
+
+ /* entry does not exist */
+ if (new->rtlifetime == 0) {
+ splx(s);
+ return (NULL);
+ }
+
+ n = (struct nd_defrouter *)malloc(sizeof(*n), M_IP6NDP, M_NOWAIT);
+ if (n == NULL) {
+ splx(s);
+ return (NULL);
+ }
+ bzero(n, sizeof(*n));
+ *n = *new;
+
+insert:
+ /*
+ * Insert the new router in the Default Router List;
+ * The Default Router List should be in the descending order
+ * of router-preferece. Routers with the same preference are
+ * sorted in the arriving time order.
+ */
+
+ /* insert at the end of the group */
+ for (dr = TAILQ_FIRST(&V_nd_defrouter); dr;
+ dr = TAILQ_NEXT(dr, dr_entry)) {
+ if (rtpref(n) > rtpref(dr))
+ break;
+ }
+ if (dr)
+ TAILQ_INSERT_BEFORE(dr, n, dr_entry);
+ else
+ TAILQ_INSERT_TAIL(&V_nd_defrouter, n, dr_entry);
+
+ defrouter_select();
+
+ splx(s);
+
+ return (n);
+}
+
+static struct nd_pfxrouter *
+pfxrtr_lookup(struct nd_prefix *pr, struct nd_defrouter *dr)
+{
+ struct nd_pfxrouter *search;
+
+ for (search = pr->ndpr_advrtrs.lh_first; search; search = search->pfr_next) {
+ if (search->router == dr)
+ break;
+ }
+
+ return (search);
+}
+
+static void
+pfxrtr_add(struct nd_prefix *pr, struct nd_defrouter *dr)
+{
+ struct nd_pfxrouter *new;
+
+ new = (struct nd_pfxrouter *)malloc(sizeof(*new), M_IP6NDP, M_NOWAIT);
+ if (new == NULL)
+ return;
+ bzero(new, sizeof(*new));
+ new->router = dr;
+
+ LIST_INSERT_HEAD(&pr->ndpr_advrtrs, new, pfr_entry);
+
+ pfxlist_onlink_check();
+}
+
+static void
+pfxrtr_del(struct nd_pfxrouter *pfr)
+{
+ LIST_REMOVE(pfr, pfr_entry);
+ free(pfr, M_IP6NDP);
+}
+
+struct nd_prefix *
+nd6_prefix_lookup(struct nd_prefixctl *key)
+{
+ struct nd_prefix *search;
+
+ for (search = V_nd_prefix.lh_first;
+ search; search = search->ndpr_next) {
+ if (key->ndpr_ifp == search->ndpr_ifp &&
+ key->ndpr_plen == search->ndpr_plen &&
+ in6_are_prefix_equal(&key->ndpr_prefix.sin6_addr,
+ &search->ndpr_prefix.sin6_addr, key->ndpr_plen)) {
+ break;
+ }
+ }
+
+ return (search);
+}
+
+int
+nd6_prelist_add(struct nd_prefixctl *pr, struct nd_defrouter *dr,
+ struct nd_prefix **newp)
+{
+ struct nd_prefix *new = NULL;
+ int error = 0;
+ int i, s;
+ char ip6buf[INET6_ADDRSTRLEN];
+
+ new = (struct nd_prefix *)malloc(sizeof(*new), M_IP6NDP, M_NOWAIT);
+ if (new == NULL)
+ return(ENOMEM);
+ bzero(new, sizeof(*new));
+ new->ndpr_ifp = pr->ndpr_ifp;
+ new->ndpr_prefix = pr->ndpr_prefix;
+ new->ndpr_plen = pr->ndpr_plen;
+ new->ndpr_vltime = pr->ndpr_vltime;
+ new->ndpr_pltime = pr->ndpr_pltime;
+ new->ndpr_flags = pr->ndpr_flags;
+ if ((error = in6_init_prefix_ltimes(new)) != 0) {
+ free(new, M_IP6NDP);
+ return(error);
+ }
+ new->ndpr_lastupdate = time_second;
+ if (newp != NULL)
+ *newp = new;
+
+ /* initialization */
+ LIST_INIT(&new->ndpr_advrtrs);
+ in6_prefixlen2mask(&new->ndpr_mask, new->ndpr_plen);
+ /* make prefix in the canonical form */
+ for (i = 0; i < 4; i++)
+ new->ndpr_prefix.sin6_addr.s6_addr32[i] &=
+ new->ndpr_mask.s6_addr32[i];
+
+ s = splnet();
+ /* link ndpr_entry to nd_prefix list */
+ LIST_INSERT_HEAD(&V_nd_prefix, new, ndpr_entry);
+ splx(s);
+
+ /* ND_OPT_PI_FLAG_ONLINK processing */
+ if (new->ndpr_raf_onlink) {
+ int e;
+
+ if ((e = nd6_prefix_onlink(new)) != 0) {
+ nd6log((LOG_ERR, "nd6_prelist_add: failed to make "
+ "the prefix %s/%d on-link on %s (errno=%d)\n",
+ ip6_sprintf(ip6buf, &pr->ndpr_prefix.sin6_addr),
+ pr->ndpr_plen, if_name(pr->ndpr_ifp), e));
+ /* proceed anyway. XXX: is it correct? */
+ }
+ }
+
+ if (dr)
+ pfxrtr_add(new, dr);
+
+ return 0;
+}
+
+void
+prelist_remove(struct nd_prefix *pr)
+{
+ struct nd_pfxrouter *pfr, *next;
+ int e, s;
+ char ip6buf[INET6_ADDRSTRLEN];
+
+ /* make sure to invalidate the prefix until it is really freed. */
+ pr->ndpr_vltime = 0;
+ pr->ndpr_pltime = 0;
+
+ /*
+ * Though these flags are now meaningless, we'd rather keep the value
+ * of pr->ndpr_raf_onlink and pr->ndpr_raf_auto not to confuse users
+ * when executing "ndp -p".
+ */
+
+ if ((pr->ndpr_stateflags & NDPRF_ONLINK) != 0 &&
+ (e = nd6_prefix_offlink(pr)) != 0) {
+ nd6log((LOG_ERR, "prelist_remove: failed to make %s/%d offlink "
+ "on %s, errno=%d\n",
+ ip6_sprintf(ip6buf, &pr->ndpr_prefix.sin6_addr),
+ pr->ndpr_plen, if_name(pr->ndpr_ifp), e));
+ /* what should we do? */
+ }
+
+ if (pr->ndpr_refcnt > 0)
+ return; /* notice here? */
+
+ s = splnet();
+
+ /* unlink ndpr_entry from nd_prefix list */
+ LIST_REMOVE(pr, ndpr_entry);
+
+ /* free list of routers that adversed the prefix */
+ for (pfr = pr->ndpr_advrtrs.lh_first; pfr; pfr = next) {
+ next = pfr->pfr_next;
+
+ free(pfr, M_IP6NDP);
+ }
+ splx(s);
+
+ free(pr, M_IP6NDP);
+
+ pfxlist_onlink_check();
+}
+
+/*
+ * dr - may be NULL
+ */
+
+static int
+prelist_update(struct nd_prefixctl *new, struct nd_defrouter *dr,
+ struct mbuf *m, int mcast)
+{
+ struct in6_ifaddr *ia6 = NULL, *ia6_match = NULL;
+ struct ifaddr *ifa;
+ struct ifnet *ifp = new->ndpr_ifp;
+ struct nd_prefix *pr;
+ int s = splnet();
+ int error = 0;
+ int newprefix = 0;
+ int auth;
+ struct in6_addrlifetime lt6_tmp;
+ char ip6buf[INET6_ADDRSTRLEN];
+
+ auth = 0;
+ if (m) {
+ /*
+ * Authenticity for NA consists authentication for
+ * both IP header and IP datagrams, doesn't it ?
+ */
+#if defined(M_AUTHIPHDR) && defined(M_AUTHIPDGM)
+ auth = ((m->m_flags & M_AUTHIPHDR) &&
+ (m->m_flags & M_AUTHIPDGM));
+#endif
+ }
+
+ if ((pr = nd6_prefix_lookup(new)) != NULL) {
+ /*
+ * nd6_prefix_lookup() ensures that pr and new have the same
+ * prefix on a same interface.
+ */
+
+ /*
+ * Update prefix information. Note that the on-link (L) bit
+ * and the autonomous (A) bit should NOT be changed from 1
+ * to 0.
+ */
+ if (new->ndpr_raf_onlink == 1)
+ pr->ndpr_raf_onlink = 1;
+ if (new->ndpr_raf_auto == 1)
+ pr->ndpr_raf_auto = 1;
+ if (new->ndpr_raf_onlink) {
+ pr->ndpr_vltime = new->ndpr_vltime;
+ pr->ndpr_pltime = new->ndpr_pltime;
+ (void)in6_init_prefix_ltimes(pr); /* XXX error case? */
+ pr->ndpr_lastupdate = time_second;
+ }
+
+ if (new->ndpr_raf_onlink &&
+ (pr->ndpr_stateflags & NDPRF_ONLINK) == 0) {
+ int e;
+
+ if ((e = nd6_prefix_onlink(pr)) != 0) {
+ nd6log((LOG_ERR,
+ "prelist_update: failed to make "
+ "the prefix %s/%d on-link on %s "
+ "(errno=%d)\n",
+ ip6_sprintf(ip6buf,
+ &pr->ndpr_prefix.sin6_addr),
+ pr->ndpr_plen, if_name(pr->ndpr_ifp), e));
+ /* proceed anyway. XXX: is it correct? */
+ }
+ }
+
+ if (dr && pfxrtr_lookup(pr, dr) == NULL)
+ pfxrtr_add(pr, dr);
+ } else {
+ struct nd_prefix *newpr = NULL;
+
+ newprefix = 1;
+
+ if (new->ndpr_vltime == 0)
+ goto end;
+ if (new->ndpr_raf_onlink == 0 && new->ndpr_raf_auto == 0)
+ goto end;
+
+ error = nd6_prelist_add(new, dr, &newpr);
+ if (error != 0 || newpr == NULL) {
+ nd6log((LOG_NOTICE, "prelist_update: "
+ "nd6_prelist_add failed for %s/%d on %s "
+ "errno=%d, returnpr=%p\n",
+ ip6_sprintf(ip6buf, &new->ndpr_prefix.sin6_addr),
+ new->ndpr_plen, if_name(new->ndpr_ifp),
+ error, newpr));
+ goto end; /* we should just give up in this case. */
+ }
+
+ /*
+ * XXX: from the ND point of view, we can ignore a prefix
+ * with the on-link bit being zero. However, we need a
+ * prefix structure for references from autoconfigured
+ * addresses. Thus, we explicitly make sure that the prefix
+ * itself expires now.
+ */
+ if (newpr->ndpr_raf_onlink == 0) {
+ newpr->ndpr_vltime = 0;
+ newpr->ndpr_pltime = 0;
+ in6_init_prefix_ltimes(newpr);
+ }
+
+ pr = newpr;
+ }
+
+ /*
+ * Address autoconfiguration based on Section 5.5.3 of RFC 2462.
+ * Note that pr must be non NULL at this point.
+ */
+
+ /* 5.5.3 (a). Ignore the prefix without the A bit set. */
+ if (!new->ndpr_raf_auto)
+ goto end;
+
+ /*
+ * 5.5.3 (b). the link-local prefix should have been ignored in
+ * nd6_ra_input.
+ */
+
+ /* 5.5.3 (c). Consistency check on lifetimes: pltime <= vltime. */
+ if (new->ndpr_pltime > new->ndpr_vltime) {
+ error = EINVAL; /* XXX: won't be used */
+ goto end;
+ }
+
+ /*
+ * 5.5.3 (d). If the prefix advertised is not equal to the prefix of
+ * an address configured by stateless autoconfiguration already in the
+ * list of addresses associated with the interface, and the Valid
+ * Lifetime is not 0, form an address. We first check if we have
+ * a matching prefix.
+ * Note: we apply a clarification in rfc2462bis-02 here. We only
+ * consider autoconfigured addresses while RFC2462 simply said
+ * "address".
+ */
+ IF_ADDR_LOCK(ifp);
+ TAILQ_FOREACH(ifa, &ifp->if_addrhead, ifa_link) {
+ struct in6_ifaddr *ifa6;
+ u_int32_t remaininglifetime;
+
+ if (ifa->ifa_addr->sa_family != AF_INET6)
+ continue;
+
+ ifa6 = (struct in6_ifaddr *)ifa;
+
+ /*
+ * We only consider autoconfigured addresses as per rfc2462bis.
+ */
+ if (!(ifa6->ia6_flags & IN6_IFF_AUTOCONF))
+ continue;
+
+ /*
+ * Spec is not clear here, but I believe we should concentrate
+ * on unicast (i.e. not anycast) addresses.
+ * XXX: other ia6_flags? detached or duplicated?
+ */
+ if ((ifa6->ia6_flags & IN6_IFF_ANYCAST) != 0)
+ continue;
+
+ /*
+ * Ignore the address if it is not associated with a prefix
+ * or is associated with a prefix that is different from this
+ * one. (pr is never NULL here)
+ */
+ if (ifa6->ia6_ndpr != pr)
+ continue;
+
+ if (ia6_match == NULL) /* remember the first one */
+ ia6_match = ifa6;
+
+ /*
+ * An already autoconfigured address matched. Now that we
+ * are sure there is at least one matched address, we can
+ * proceed to 5.5.3. (e): update the lifetimes according to the
+ * "two hours" rule and the privacy extension.
+ * We apply some clarifications in rfc2462bis:
+ * - use remaininglifetime instead of storedlifetime as a
+ * variable name
+ * - remove the dead code in the "two-hour" rule
+ */
+#define TWOHOUR (120*60)
+ lt6_tmp = ifa6->ia6_lifetime;
+
+ if (lt6_tmp.ia6t_vltime == ND6_INFINITE_LIFETIME)
+ remaininglifetime = ND6_INFINITE_LIFETIME;
+ else if (time_second - ifa6->ia6_updatetime >
+ lt6_tmp.ia6t_vltime) {
+ /*
+ * The case of "invalid" address. We should usually
+ * not see this case.
+ */
+ remaininglifetime = 0;
+ } else
+ remaininglifetime = lt6_tmp.ia6t_vltime -
+ (time_second - ifa6->ia6_updatetime);
+
+ /* when not updating, keep the current stored lifetime. */
+ lt6_tmp.ia6t_vltime = remaininglifetime;
+
+ if (TWOHOUR < new->ndpr_vltime ||
+ remaininglifetime < new->ndpr_vltime) {
+ lt6_tmp.ia6t_vltime = new->ndpr_vltime;
+ } else if (remaininglifetime <= TWOHOUR) {
+ if (auth) {
+ lt6_tmp.ia6t_vltime = new->ndpr_vltime;
+ }
+ } else {
+ /*
+ * new->ndpr_vltime <= TWOHOUR &&
+ * TWOHOUR < remaininglifetime
+ */
+ lt6_tmp.ia6t_vltime = TWOHOUR;
+ }
+
+ /* The 2 hour rule is not imposed for preferred lifetime. */
+ lt6_tmp.ia6t_pltime = new->ndpr_pltime;
+
+ in6_init_address_ltimes(pr, &lt6_tmp);
+
+ /*
+ * We need to treat lifetimes for temporary addresses
+ * differently, according to
+ * draft-ietf-ipv6-privacy-addrs-v2-01.txt 3.3 (1);
+ * we only update the lifetimes when they are in the maximum
+ * intervals.
+ */
+ if ((ifa6->ia6_flags & IN6_IFF_TEMPORARY) != 0) {
+ u_int32_t maxvltime, maxpltime;
+
+ if (V_ip6_temp_valid_lifetime >
+ (u_int32_t)((time_second - ifa6->ia6_createtime) +
+ V_ip6_desync_factor)) {
+ maxvltime = V_ip6_temp_valid_lifetime -
+ (time_second - ifa6->ia6_createtime) -
+ V_ip6_desync_factor;
+ } else
+ maxvltime = 0;
+ if (V_ip6_temp_preferred_lifetime >
+ (u_int32_t)((time_second - ifa6->ia6_createtime) +
+ V_ip6_desync_factor)) {
+ maxpltime = V_ip6_temp_preferred_lifetime -
+ (time_second - ifa6->ia6_createtime) -
+ V_ip6_desync_factor;
+ } else
+ maxpltime = 0;
+
+ if (lt6_tmp.ia6t_vltime == ND6_INFINITE_LIFETIME ||
+ lt6_tmp.ia6t_vltime > maxvltime) {
+ lt6_tmp.ia6t_vltime = maxvltime;
+ }
+ if (lt6_tmp.ia6t_pltime == ND6_INFINITE_LIFETIME ||
+ lt6_tmp.ia6t_pltime > maxpltime) {
+ lt6_tmp.ia6t_pltime = maxpltime;
+ }
+ }
+ ifa6->ia6_lifetime = lt6_tmp;
+ ifa6->ia6_updatetime = time_second;
+ }
+ IF_ADDR_UNLOCK(ifp);
+ if (ia6_match == NULL && new->ndpr_vltime) {
+ int ifidlen;
+
+ /*
+ * 5.5.3 (d) (continued)
+ * No address matched and the valid lifetime is non-zero.
+ * Create a new address.
+ */
+
+ /*
+ * Prefix Length check:
+ * If the sum of the prefix length and interface identifier
+ * length does not equal 128 bits, the Prefix Information
+ * option MUST be ignored. The length of the interface
+ * identifier is defined in a separate link-type specific
+ * document.
+ */
+ ifidlen = in6_if2idlen(ifp);
+ if (ifidlen < 0) {
+ /* this should not happen, so we always log it. */
+ log(LOG_ERR, "prelist_update: IFID undefined (%s)\n",
+ if_name(ifp));
+ goto end;
+ }
+ if (ifidlen + pr->ndpr_plen != 128) {
+ nd6log((LOG_INFO,
+ "prelist_update: invalid prefixlen "
+ "%d for %s, ignored\n",
+ pr->ndpr_plen, if_name(ifp)));
+ goto end;
+ }
+
+ if ((ia6 = in6_ifadd(new, mcast)) != NULL) {
+ /*
+ * note that we should use pr (not new) for reference.
+ */
+ pr->ndpr_refcnt++;
+ ia6->ia6_ndpr = pr;
+
+ /*
+ * RFC 3041 3.3 (2).
+ * When a new public address is created as described
+ * in RFC2462, also create a new temporary address.
+ *
+ * RFC 3041 3.5.
+ * When an interface connects to a new link, a new
+ * randomized interface identifier should be generated
+ * immediately together with a new set of temporary
+ * addresses. Thus, we specifiy 1 as the 2nd arg of
+ * in6_tmpifadd().
+ */
+ if (V_ip6_use_tempaddr) {
+ int e;
+ if ((e = in6_tmpifadd(ia6, 1, 1)) != 0) {
+ nd6log((LOG_NOTICE, "prelist_update: "
+ "failed to create a temporary "
+ "address, errno=%d\n",
+ e));
+ }
+ }
+ ifa_free(&ia6->ia_ifa);
+
+ /*
+ * A newly added address might affect the status
+ * of other addresses, so we check and update it.
+ * XXX: what if address duplication happens?
+ */
+ pfxlist_onlink_check();
+ } else {
+ /* just set an error. do not bark here. */
+ error = EADDRNOTAVAIL; /* XXX: might be unused. */
+ }
+ }
+
+ end:
+ splx(s);
+ return error;
+}
+
+/*
+ * A supplement function used in the on-link detection below;
+ * detect if a given prefix has a (probably) reachable advertising router.
+ * XXX: lengthy function name...
+ */
+static struct nd_pfxrouter *
+find_pfxlist_reachable_router(struct nd_prefix *pr)
+{
+ struct nd_pfxrouter *pfxrtr;
+ struct llentry *ln;
+ int canreach;
+
+ for (pfxrtr = LIST_FIRST(&pr->ndpr_advrtrs); pfxrtr != NULL;
+ pfxrtr = LIST_NEXT(pfxrtr, pfr_entry)) {
+ IF_AFDATA_LOCK(pfxrtr->router->ifp);
+ ln = nd6_lookup(&pfxrtr->router->rtaddr, 0, pfxrtr->router->ifp);
+ IF_AFDATA_UNLOCK(pfxrtr->router->ifp);
+ if (ln == NULL)
+ continue;
+ canreach = ND6_IS_LLINFO_PROBREACH(ln);
+ LLE_RUNLOCK(ln);
+ if (canreach)
+ break;
+ }
+ return (pfxrtr);
+}
+
+/*
+ * Check if each prefix in the prefix list has at least one available router
+ * that advertised the prefix (a router is "available" if its neighbor cache
+ * entry is reachable or probably reachable).
+ * If the check fails, the prefix may be off-link, because, for example,
+ * we have moved from the network but the lifetime of the prefix has not
+ * expired yet. So we should not use the prefix if there is another prefix
+ * that has an available router.
+ * But, if there is no prefix that has an available router, we still regards
+ * all the prefixes as on-link. This is because we can't tell if all the
+ * routers are simply dead or if we really moved from the network and there
+ * is no router around us.
+ */
+void
+pfxlist_onlink_check()
+{
+ struct nd_prefix *pr;
+ struct in6_ifaddr *ifa;
+ struct nd_defrouter *dr;
+ struct nd_pfxrouter *pfxrtr = NULL;
+
+ /*
+ * Check if there is a prefix that has a reachable advertising
+ * router.
+ */
+ for (pr = V_nd_prefix.lh_first; pr; pr = pr->ndpr_next) {
+ if (pr->ndpr_raf_onlink && find_pfxlist_reachable_router(pr))
+ break;
+ }
+
+ /*
+ * If we have no such prefix, check whether we still have a router
+ * that does not advertise any prefixes.
+ */
+ if (pr == NULL) {
+ for (dr = TAILQ_FIRST(&V_nd_defrouter); dr;
+ dr = TAILQ_NEXT(dr, dr_entry)) {
+ struct nd_prefix *pr0;
+
+ for (pr0 = V_nd_prefix.lh_first; pr0;
+ pr0 = pr0->ndpr_next) {
+ if ((pfxrtr = pfxrtr_lookup(pr0, dr)) != NULL)
+ break;
+ }
+ if (pfxrtr != NULL)
+ break;
+ }
+ }
+ if (pr != NULL || (TAILQ_FIRST(&V_nd_defrouter) && pfxrtr == NULL)) {
+ /*
+ * There is at least one prefix that has a reachable router,
+ * or at least a router which probably does not advertise
+ * any prefixes. The latter would be the case when we move
+ * to a new link where we have a router that does not provide
+ * prefixes and we configure an address by hand.
+ * Detach prefixes which have no reachable advertising
+ * router, and attach other prefixes.
+ */
+ for (pr = V_nd_prefix.lh_first; pr; pr = pr->ndpr_next) {
+ /* XXX: a link-local prefix should never be detached */
+ if (IN6_IS_ADDR_LINKLOCAL(&pr->ndpr_prefix.sin6_addr))
+ continue;
+
+ /*
+ * we aren't interested in prefixes without the L bit
+ * set.
+ */
+ if (pr->ndpr_raf_onlink == 0)
+ continue;
+
+ if (pr->ndpr_raf_auto == 0)
+ continue;
+
+ if ((pr->ndpr_stateflags & NDPRF_DETACHED) == 0 &&
+ find_pfxlist_reachable_router(pr) == NULL)
+ pr->ndpr_stateflags |= NDPRF_DETACHED;
+ if ((pr->ndpr_stateflags & NDPRF_DETACHED) != 0 &&
+ find_pfxlist_reachable_router(pr) != 0)
+ pr->ndpr_stateflags &= ~NDPRF_DETACHED;
+ }
+ } else {
+ /* there is no prefix that has a reachable router */
+ for (pr = V_nd_prefix.lh_first; pr; pr = pr->ndpr_next) {
+ if (IN6_IS_ADDR_LINKLOCAL(&pr->ndpr_prefix.sin6_addr))
+ continue;
+
+ if (pr->ndpr_raf_onlink == 0)
+ continue;
+
+ if (pr->ndpr_raf_auto == 0)
+ continue;
+
+ if ((pr->ndpr_stateflags & NDPRF_DETACHED) != 0)
+ pr->ndpr_stateflags &= ~NDPRF_DETACHED;
+ }
+ }
+
+ /*
+ * Remove each interface route associated with a (just) detached
+ * prefix, and reinstall the interface route for a (just) attached
+ * prefix. Note that all attempt of reinstallation does not
+ * necessarily success, when a same prefix is shared among multiple
+ * interfaces. Such cases will be handled in nd6_prefix_onlink,
+ * so we don't have to care about them.
+ */
+ for (pr = V_nd_prefix.lh_first; pr; pr = pr->ndpr_next) {
+ int e;
+ char ip6buf[INET6_ADDRSTRLEN];
+
+ if (IN6_IS_ADDR_LINKLOCAL(&pr->ndpr_prefix.sin6_addr))
+ continue;
+
+ if (pr->ndpr_raf_onlink == 0)
+ continue;
+
+ if (pr->ndpr_raf_auto == 0)
+ continue;
+
+ if ((pr->ndpr_stateflags & NDPRF_DETACHED) != 0 &&
+ (pr->ndpr_stateflags & NDPRF_ONLINK) != 0) {
+ if ((e = nd6_prefix_offlink(pr)) != 0) {
+ nd6log((LOG_ERR,
+ "pfxlist_onlink_check: failed to "
+ "make %s/%d offlink, errno=%d\n",
+ ip6_sprintf(ip6buf,
+ &pr->ndpr_prefix.sin6_addr),
+ pr->ndpr_plen, e));
+ }
+ }
+ if ((pr->ndpr_stateflags & NDPRF_DETACHED) == 0 &&
+ (pr->ndpr_stateflags & NDPRF_ONLINK) == 0 &&
+ pr->ndpr_raf_onlink) {
+ if ((e = nd6_prefix_onlink(pr)) != 0) {
+ nd6log((LOG_ERR,
+ "pfxlist_onlink_check: failed to "
+ "make %s/%d onlink, errno=%d\n",
+ ip6_sprintf(ip6buf,
+ &pr->ndpr_prefix.sin6_addr),
+ pr->ndpr_plen, e));
+ }
+ }
+ }
+
+ /*
+ * Changes on the prefix status might affect address status as well.
+ * Make sure that all addresses derived from an attached prefix are
+ * attached, and that all addresses derived from a detached prefix are
+ * detached. Note, however, that a manually configured address should
+ * always be attached.
+ * The precise detection logic is same as the one for prefixes.
+ *
+ * XXXRW: in6_ifaddrhead locking.
+ */
+ TAILQ_FOREACH(ifa, &V_in6_ifaddrhead, ia_link) {
+ if (!(ifa->ia6_flags & IN6_IFF_AUTOCONF))
+ continue;
+
+ if (ifa->ia6_ndpr == NULL) {
+ /*
+ * This can happen when we first configure the address
+ * (i.e. the address exists, but the prefix does not).
+ * XXX: complicated relationships...
+ */
+ continue;
+ }
+
+ if (find_pfxlist_reachable_router(ifa->ia6_ndpr))
+ break;
+ }
+ if (ifa) {
+ TAILQ_FOREACH(ifa, &V_in6_ifaddrhead, ia_link) {
+ if ((ifa->ia6_flags & IN6_IFF_AUTOCONF) == 0)
+ continue;
+
+ if (ifa->ia6_ndpr == NULL) /* XXX: see above. */
+ continue;
+
+ if (find_pfxlist_reachable_router(ifa->ia6_ndpr)) {
+ if (ifa->ia6_flags & IN6_IFF_DETACHED) {
+ ifa->ia6_flags &= ~IN6_IFF_DETACHED;
+ ifa->ia6_flags |= IN6_IFF_TENTATIVE;
+ nd6_dad_start((struct ifaddr *)ifa, 0);
+ }
+ } else {
+ ifa->ia6_flags |= IN6_IFF_DETACHED;
+ }
+ }
+ }
+ else {
+ TAILQ_FOREACH(ifa, &V_in6_ifaddrhead, ia_link) {
+ if ((ifa->ia6_flags & IN6_IFF_AUTOCONF) == 0)
+ continue;
+
+ if (ifa->ia6_flags & IN6_IFF_DETACHED) {
+ ifa->ia6_flags &= ~IN6_IFF_DETACHED;
+ ifa->ia6_flags |= IN6_IFF_TENTATIVE;
+ /* Do we need a delay in this case? */
+ nd6_dad_start((struct ifaddr *)ifa, 0);
+ }
+ }
+ }
+}
+
+int
+nd6_prefix_onlink(struct nd_prefix *pr)
+{
+ struct ifaddr *ifa;
+ struct ifnet *ifp = pr->ndpr_ifp;
+ struct sockaddr_in6 mask6;
+ struct nd_prefix *opr;
+ u_long rtflags;
+ int error = 0;
+ struct radix_node_head *rnh;
+ struct rtentry *rt = NULL;
+ char ip6buf[INET6_ADDRSTRLEN];
+ struct sockaddr_dl null_sdl = {sizeof(null_sdl), AF_LINK};
+
+ /* sanity check */
+ if ((pr->ndpr_stateflags & NDPRF_ONLINK) != 0) {
+ nd6log((LOG_ERR,
+ "nd6_prefix_onlink: %s/%d is already on-link\n",
+ ip6_sprintf(ip6buf, &pr->ndpr_prefix.sin6_addr),
+ pr->ndpr_plen));
+ return (EEXIST);
+ }
+
+ /*
+ * Add the interface route associated with the prefix. Before
+ * installing the route, check if there's the same prefix on another
+ * interface, and the prefix has already installed the interface route.
+ * Although such a configuration is expected to be rare, we explicitly
+ * allow it.
+ */
+ for (opr = V_nd_prefix.lh_first; opr; opr = opr->ndpr_next) {
+ if (opr == pr)
+ continue;
+
+ if ((opr->ndpr_stateflags & NDPRF_ONLINK) == 0)
+ continue;
+
+ if (opr->ndpr_plen == pr->ndpr_plen &&
+ in6_are_prefix_equal(&pr->ndpr_prefix.sin6_addr,
+ &opr->ndpr_prefix.sin6_addr, pr->ndpr_plen))
+ return (0);
+ }
+
+ /*
+ * We prefer link-local addresses as the associated interface address.
+ */
+ /* search for a link-local addr */
+ ifa = (struct ifaddr *)in6ifa_ifpforlinklocal(ifp,
+ IN6_IFF_NOTREADY | IN6_IFF_ANYCAST);
+ if (ifa == NULL) {
+ /* XXX: freebsd does not have ifa_ifwithaf */
+ IF_ADDR_LOCK(ifp);
+ TAILQ_FOREACH(ifa, &ifp->if_addrhead, ifa_link) {
+ if (ifa->ifa_addr->sa_family == AF_INET6)
+ break;
+ }
+ if (ifa != NULL)
+ ifa_ref(ifa);
+ IF_ADDR_UNLOCK(ifp);
+ /* should we care about ia6_flags? */
+ }
+ if (ifa == NULL) {
+ /*
+ * This can still happen, when, for example, we receive an RA
+ * containing a prefix with the L bit set and the A bit clear,
+ * after removing all IPv6 addresses on the receiving
+ * interface. This should, of course, be rare though.
+ */
+ nd6log((LOG_NOTICE,
+ "nd6_prefix_onlink: failed to find any ifaddr"
+ " to add route for a prefix(%s/%d) on %s\n",
+ ip6_sprintf(ip6buf, &pr->ndpr_prefix.sin6_addr),
+ pr->ndpr_plen, if_name(ifp)));
+ return (0);
+ }
+
+ /*
+ * in6_ifinit() sets nd6_rtrequest to ifa_rtrequest for all ifaddrs.
+ * ifa->ifa_rtrequest = nd6_rtrequest;
+ */
+ bzero(&mask6, sizeof(mask6));
+ mask6.sin6_len = sizeof(mask6);
+ mask6.sin6_addr = pr->ndpr_mask;
+ rtflags = (ifa->ifa_flags & ~IFA_RTSELF) | RTF_UP;
+ error = rtrequest(RTM_ADD, (struct sockaddr *)&pr->ndpr_prefix,
+ ifa->ifa_addr, (struct sockaddr *)&mask6, rtflags, &rt);
+ if (error == 0) {
+ if (rt != NULL) /* this should be non NULL, though */ {
+ rnh = rt_tables_get_rnh(rt->rt_fibnum, AF_INET6);
+ /* XXX what if rhn == NULL? */
+ RADIX_NODE_HEAD_LOCK(rnh);
+ RT_LOCK(rt);
+ if (!rt_setgate(rt, rt_key(rt), (struct sockaddr *)&null_sdl)) {
+ ((struct sockaddr_dl *)rt->rt_gateway)->sdl_type =
+ rt->rt_ifp->if_type;
+ ((struct sockaddr_dl *)rt->rt_gateway)->sdl_index =
+ rt->rt_ifp->if_index;
+ }
+ RADIX_NODE_HEAD_UNLOCK(rnh);
+ nd6_rtmsg(RTM_ADD, rt);
+ RT_UNLOCK(rt);
+ }
+ pr->ndpr_stateflags |= NDPRF_ONLINK;
+ } else {
+ char ip6bufg[INET6_ADDRSTRLEN], ip6bufm[INET6_ADDRSTRLEN];
+ nd6log((LOG_ERR, "nd6_prefix_onlink: failed to add route for a"
+ " prefix (%s/%d) on %s, gw=%s, mask=%s, flags=%lx "
+ "errno = %d\n",
+ ip6_sprintf(ip6buf, &pr->ndpr_prefix.sin6_addr),
+ pr->ndpr_plen, if_name(ifp),
+ ip6_sprintf(ip6bufg, &((struct sockaddr_in6 *)ifa->ifa_addr)->sin6_addr),
+ ip6_sprintf(ip6bufm, &mask6.sin6_addr), rtflags, error));
+ }
+
+ if (rt != NULL) {
+ RT_LOCK(rt);
+ RT_REMREF(rt);
+ RT_UNLOCK(rt);
+ }
+ if (ifa != NULL)
+ ifa_free(ifa);
+
+ return (error);
+}
+
+int
+nd6_prefix_offlink(struct nd_prefix *pr)
+{
+ int error = 0;
+ struct ifnet *ifp = pr->ndpr_ifp;
+ struct nd_prefix *opr;
+ struct sockaddr_in6 sa6, mask6;
+ struct rtentry *rt = NULL;
+ char ip6buf[INET6_ADDRSTRLEN];
+
+ /* sanity check */
+ if ((pr->ndpr_stateflags & NDPRF_ONLINK) == 0) {
+ nd6log((LOG_ERR,
+ "nd6_prefix_offlink: %s/%d is already off-link\n",
+ ip6_sprintf(ip6buf, &pr->ndpr_prefix.sin6_addr),
+ pr->ndpr_plen));
+ return (EEXIST);
+ }
+
+ bzero(&sa6, sizeof(sa6));
+ sa6.sin6_family = AF_INET6;
+ sa6.sin6_len = sizeof(sa6);
+ bcopy(&pr->ndpr_prefix.sin6_addr, &sa6.sin6_addr,
+ sizeof(struct in6_addr));
+ bzero(&mask6, sizeof(mask6));
+ mask6.sin6_family = AF_INET6;
+ mask6.sin6_len = sizeof(sa6);
+ bcopy(&pr->ndpr_mask, &mask6.sin6_addr, sizeof(struct in6_addr));
+ error = rtrequest(RTM_DELETE, (struct sockaddr *)&sa6, NULL,
+ (struct sockaddr *)&mask6, 0, &rt);
+ if (error == 0) {
+ pr->ndpr_stateflags &= ~NDPRF_ONLINK;
+
+ /* report the route deletion to the routing socket. */
+ if (rt != NULL)
+ nd6_rtmsg(RTM_DELETE, rt);
+
+ /*
+ * There might be the same prefix on another interface,
+ * the prefix which could not be on-link just because we have
+ * the interface route (see comments in nd6_prefix_onlink).
+ * If there's one, try to make the prefix on-link on the
+ * interface.
+ */
+ for (opr = V_nd_prefix.lh_first; opr; opr = opr->ndpr_next) {
+ if (opr == pr)
+ continue;
+
+ if ((opr->ndpr_stateflags & NDPRF_ONLINK) != 0)
+ continue;
+
+ /*
+ * KAME specific: detached prefixes should not be
+ * on-link.
+ */
+ if ((opr->ndpr_stateflags & NDPRF_DETACHED) != 0)
+ continue;
+
+ if (opr->ndpr_plen == pr->ndpr_plen &&
+ in6_are_prefix_equal(&pr->ndpr_prefix.sin6_addr,
+ &opr->ndpr_prefix.sin6_addr, pr->ndpr_plen)) {
+ int e;
+
+ if ((e = nd6_prefix_onlink(opr)) != 0) {
+ nd6log((LOG_ERR,
+ "nd6_prefix_offlink: failed to "
+ "recover a prefix %s/%d from %s "
+ "to %s (errno = %d)\n",
+ ip6_sprintf(ip6buf,
+ &opr->ndpr_prefix.sin6_addr),
+ opr->ndpr_plen, if_name(ifp),
+ if_name(opr->ndpr_ifp), e));
+ }
+ }
+ }
+ } else {
+ /* XXX: can we still set the NDPRF_ONLINK flag? */
+ nd6log((LOG_ERR,
+ "nd6_prefix_offlink: failed to delete route: "
+ "%s/%d on %s (errno = %d)\n",
+ ip6_sprintf(ip6buf, &sa6.sin6_addr), pr->ndpr_plen,
+ if_name(ifp), error));
+ }
+
+ if (rt != NULL) {
+ RTFREE(rt);
+ }
+
+ return (error);
+}
+
+static struct in6_ifaddr *
+in6_ifadd(struct nd_prefixctl *pr, int mcast)
+{
+ struct ifnet *ifp = pr->ndpr_ifp;
+ struct ifaddr *ifa;
+ struct in6_aliasreq ifra;
+ struct in6_ifaddr *ia, *ib;
+ int error, plen0;
+ struct in6_addr mask;
+ int prefixlen = pr->ndpr_plen;
+ int updateflags;
+ char ip6buf[INET6_ADDRSTRLEN];
+
+ in6_prefixlen2mask(&mask, prefixlen);
+
+ /*
+ * find a link-local address (will be interface ID).
+ * Is it really mandatory? Theoretically, a global or a site-local
+ * address can be configured without a link-local address, if we
+ * have a unique interface identifier...
+ *
+ * it is not mandatory to have a link-local address, we can generate
+ * interface identifier on the fly. we do this because:
+ * (1) it should be the easiest way to find interface identifier.
+ * (2) RFC2462 5.4 suggesting the use of the same interface identifier
+ * for multiple addresses on a single interface, and possible shortcut
+ * of DAD. we omitted DAD for this reason in the past.
+ * (3) a user can prevent autoconfiguration of global address
+ * by removing link-local address by hand (this is partly because we
+ * don't have other way to control the use of IPv6 on an interface.
+ * this has been our design choice - cf. NRL's "ifconfig auto").
+ * (4) it is easier to manage when an interface has addresses
+ * with the same interface identifier, than to have multiple addresses
+ * with different interface identifiers.
+ */
+ ifa = (struct ifaddr *)in6ifa_ifpforlinklocal(ifp, 0); /* 0 is OK? */
+ if (ifa)
+ ib = (struct in6_ifaddr *)ifa;
+ else
+ return NULL;
+
+ /* prefixlen + ifidlen must be equal to 128 */
+ plen0 = in6_mask2len(&ib->ia_prefixmask.sin6_addr, NULL);
+ if (prefixlen != plen0) {
+ ifa_free(ifa);
+ nd6log((LOG_INFO, "in6_ifadd: wrong prefixlen for %s "
+ "(prefix=%d ifid=%d)\n",
+ if_name(ifp), prefixlen, 128 - plen0));
+ return NULL;
+ }
+
+ /* make ifaddr */
+
+ bzero(&ifra, sizeof(ifra));
+ /*
+ * in6_update_ifa() does not use ifra_name, but we accurately set it
+ * for safety.
+ */
+ strncpy(ifra.ifra_name, if_name(ifp), sizeof(ifra.ifra_name));
+ ifra.ifra_addr.sin6_family = AF_INET6;
+ ifra.ifra_addr.sin6_len = sizeof(struct sockaddr_in6);
+ /* prefix */
+ ifra.ifra_addr.sin6_addr = pr->ndpr_prefix.sin6_addr;
+ ifra.ifra_addr.sin6_addr.s6_addr32[0] &= mask.s6_addr32[0];
+ ifra.ifra_addr.sin6_addr.s6_addr32[1] &= mask.s6_addr32[1];
+ ifra.ifra_addr.sin6_addr.s6_addr32[2] &= mask.s6_addr32[2];
+ ifra.ifra_addr.sin6_addr.s6_addr32[3] &= mask.s6_addr32[3];
+
+ /* interface ID */
+ ifra.ifra_addr.sin6_addr.s6_addr32[0] |=
+ (ib->ia_addr.sin6_addr.s6_addr32[0] & ~mask.s6_addr32[0]);
+ ifra.ifra_addr.sin6_addr.s6_addr32[1] |=
+ (ib->ia_addr.sin6_addr.s6_addr32[1] & ~mask.s6_addr32[1]);
+ ifra.ifra_addr.sin6_addr.s6_addr32[2] |=
+ (ib->ia_addr.sin6_addr.s6_addr32[2] & ~mask.s6_addr32[2]);
+ ifra.ifra_addr.sin6_addr.s6_addr32[3] |=
+ (ib->ia_addr.sin6_addr.s6_addr32[3] & ~mask.s6_addr32[3]);
+ ifa_free(ifa);
+
+ /* new prefix mask. */
+ ifra.ifra_prefixmask.sin6_len = sizeof(struct sockaddr_in6);
+ ifra.ifra_prefixmask.sin6_family = AF_INET6;
+ bcopy(&mask, &ifra.ifra_prefixmask.sin6_addr,
+ sizeof(ifra.ifra_prefixmask.sin6_addr));
+
+ /* lifetimes. */
+ ifra.ifra_lifetime.ia6t_vltime = pr->ndpr_vltime;
+ ifra.ifra_lifetime.ia6t_pltime = pr->ndpr_pltime;
+
+ /* XXX: scope zone ID? */
+
+ ifra.ifra_flags |= IN6_IFF_AUTOCONF; /* obey autoconf */
+
+ /*
+ * Make sure that we do not have this address already. This should
+ * usually not happen, but we can still see this case, e.g., if we
+ * have manually configured the exact address to be configured.
+ */
+ ifa = (struct ifaddr *)in6ifa_ifpwithaddr(ifp,
+ &ifra.ifra_addr.sin6_addr);
+ if (ifa != NULL) {
+ ifa_free(ifa);
+ /* this should be rare enough to make an explicit log */
+ log(LOG_INFO, "in6_ifadd: %s is already configured\n",
+ ip6_sprintf(ip6buf, &ifra.ifra_addr.sin6_addr));
+ return (NULL);
+ }
+
+ /*
+ * Allocate ifaddr structure, link into chain, etc.
+ * If we are going to create a new address upon receiving a multicasted
+ * RA, we need to impose a random delay before starting DAD.
+ * [draft-ietf-ipv6-rfc2462bis-02.txt, Section 5.4.2]
+ */
+ updateflags = 0;
+ if (mcast)
+ updateflags |= IN6_IFAUPDATE_DADDELAY;
+ if ((error = in6_update_ifa(ifp, &ifra, NULL, updateflags)) != 0) {
+ nd6log((LOG_ERR,
+ "in6_ifadd: failed to make ifaddr %s on %s (errno=%d)\n",
+ ip6_sprintf(ip6buf, &ifra.ifra_addr.sin6_addr),
+ if_name(ifp), error));
+ return (NULL); /* ifaddr must not have been allocated. */
+ }
+
+ ia = in6ifa_ifpwithaddr(ifp, &ifra.ifra_addr.sin6_addr);
+ /*
+ * XXXRW: Assumption of non-NULLness here might not be true with
+ * fine-grained locking -- should we validate it? Or just return
+ * earlier ifa rather than looking it up again?
+ */
+ return (ia); /* this is always non-NULL and referenced. */
+}
+
+/*
+ * ia0 - corresponding public address
+ */
+int
+in6_tmpifadd(const struct in6_ifaddr *ia0, int forcegen, int delay)
+{
+ struct ifnet *ifp = ia0->ia_ifa.ifa_ifp;
+ struct in6_ifaddr *newia, *ia;
+ struct in6_aliasreq ifra;
+ int i, error;
+ int trylimit = 3; /* XXX: adhoc value */
+ int updateflags;
+ u_int32_t randid[2];
+ time_t vltime0, pltime0;
+
+ bzero(&ifra, sizeof(ifra));
+ strncpy(ifra.ifra_name, if_name(ifp), sizeof(ifra.ifra_name));
+ ifra.ifra_addr = ia0->ia_addr;
+ /* copy prefix mask */
+ ifra.ifra_prefixmask = ia0->ia_prefixmask;
+ /* clear the old IFID */
+ for (i = 0; i < 4; i++) {
+ ifra.ifra_addr.sin6_addr.s6_addr32[i] &=
+ ifra.ifra_prefixmask.sin6_addr.s6_addr32[i];
+ }
+
+ again:
+ if (in6_get_tmpifid(ifp, (u_int8_t *)randid,
+ (const u_int8_t *)&ia0->ia_addr.sin6_addr.s6_addr[8], forcegen)) {
+ nd6log((LOG_NOTICE, "in6_tmpifadd: failed to find a good "
+ "random IFID\n"));
+ return (EINVAL);
+ }
+ ifra.ifra_addr.sin6_addr.s6_addr32[2] |=
+ (randid[0] & ~(ifra.ifra_prefixmask.sin6_addr.s6_addr32[2]));
+ ifra.ifra_addr.sin6_addr.s6_addr32[3] |=
+ (randid[1] & ~(ifra.ifra_prefixmask.sin6_addr.s6_addr32[3]));
+
+ /*
+ * in6_get_tmpifid() quite likely provided a unique interface ID.
+ * However, we may still have a chance to see collision, because
+ * there may be a time lag between generation of the ID and generation
+ * of the address. So, we'll do one more sanity check.
+ */
+ IN6_IFADDR_RLOCK();
+ TAILQ_FOREACH(ia, &V_in6_ifaddrhead, ia_link) {
+ if (IN6_ARE_ADDR_EQUAL(&ia->ia_addr.sin6_addr,
+ &ifra.ifra_addr.sin6_addr)) {
+ if (trylimit-- == 0) {
+ IN6_IFADDR_RUNLOCK();
+ /*
+ * Give up. Something strange should have
+ * happened.
+ */
+ nd6log((LOG_NOTICE, "in6_tmpifadd: failed to "
+ "find a unique random IFID\n"));
+ return (EEXIST);
+ }
+ IN6_IFADDR_RUNLOCK();
+ forcegen = 1;
+ goto again;
+ }
+ }
+ IN6_IFADDR_RUNLOCK();
+
+ /*
+ * The Valid Lifetime is the lower of the Valid Lifetime of the
+ * public address or TEMP_VALID_LIFETIME.
+ * The Preferred Lifetime is the lower of the Preferred Lifetime
+ * of the public address or TEMP_PREFERRED_LIFETIME -
+ * DESYNC_FACTOR.
+ */
+ if (ia0->ia6_lifetime.ia6t_vltime != ND6_INFINITE_LIFETIME) {
+ vltime0 = IFA6_IS_INVALID(ia0) ? 0 :
+ (ia0->ia6_lifetime.ia6t_vltime -
+ (time_second - ia0->ia6_updatetime));
+ if (vltime0 > V_ip6_temp_valid_lifetime)
+ vltime0 = V_ip6_temp_valid_lifetime;
+ } else
+ vltime0 = V_ip6_temp_valid_lifetime;
+ if (ia0->ia6_lifetime.ia6t_pltime != ND6_INFINITE_LIFETIME) {
+ pltime0 = IFA6_IS_DEPRECATED(ia0) ? 0 :
+ (ia0->ia6_lifetime.ia6t_pltime -
+ (time_second - ia0->ia6_updatetime));
+ if (pltime0 > V_ip6_temp_preferred_lifetime - V_ip6_desync_factor){
+ pltime0 = V_ip6_temp_preferred_lifetime -
+ V_ip6_desync_factor;
+ }
+ } else
+ pltime0 = V_ip6_temp_preferred_lifetime - V_ip6_desync_factor;
+ ifra.ifra_lifetime.ia6t_vltime = vltime0;
+ ifra.ifra_lifetime.ia6t_pltime = pltime0;
+
+ /*
+ * A temporary address is created only if this calculated Preferred
+ * Lifetime is greater than REGEN_ADVANCE time units.
+ */
+ if (ifra.ifra_lifetime.ia6t_pltime <= V_ip6_temp_regen_advance)
+ return (0);
+
+ /* XXX: scope zone ID? */
+
+ ifra.ifra_flags |= (IN6_IFF_AUTOCONF|IN6_IFF_TEMPORARY);
+
+ /* allocate ifaddr structure, link into chain, etc. */
+ updateflags = 0;
+ if (delay)
+ updateflags |= IN6_IFAUPDATE_DADDELAY;
+ if ((error = in6_update_ifa(ifp, &ifra, NULL, updateflags)) != 0)
+ return (error);
+
+ newia = in6ifa_ifpwithaddr(ifp, &ifra.ifra_addr.sin6_addr);
+ if (newia == NULL) { /* XXX: can it happen? */
+ nd6log((LOG_ERR,
+ "in6_tmpifadd: ifa update succeeded, but we got "
+ "no ifaddr\n"));
+ return (EINVAL); /* XXX */
+ }
+ newia->ia6_ndpr = ia0->ia6_ndpr;
+ newia->ia6_ndpr->ndpr_refcnt++;
+ ifa_free(&newia->ia_ifa);
+
+ /*
+ * A newly added address might affect the status of other addresses.
+ * XXX: when the temporary address is generated with a new public
+ * address, the onlink check is redundant. However, it would be safe
+ * to do the check explicitly everywhere a new address is generated,
+ * and, in fact, we surely need the check when we create a new
+ * temporary address due to deprecation of an old temporary address.
+ */
+ pfxlist_onlink_check();
+
+ return (0);
+}
+
+static int
+in6_init_prefix_ltimes(struct nd_prefix *ndpr)
+{
+ if (ndpr->ndpr_pltime == ND6_INFINITE_LIFETIME)
+ ndpr->ndpr_preferred = 0;
+ else
+ ndpr->ndpr_preferred = time_second + ndpr->ndpr_pltime;
+ if (ndpr->ndpr_vltime == ND6_INFINITE_LIFETIME)
+ ndpr->ndpr_expire = 0;
+ else
+ ndpr->ndpr_expire = time_second + ndpr->ndpr_vltime;
+
+ return 0;
+}
+
+static void
+in6_init_address_ltimes(struct nd_prefix *new, struct in6_addrlifetime *lt6)
+{
+ /* init ia6t_expire */
+ if (lt6->ia6t_vltime == ND6_INFINITE_LIFETIME)
+ lt6->ia6t_expire = 0;
+ else {
+ lt6->ia6t_expire = time_second;
+ lt6->ia6t_expire += lt6->ia6t_vltime;
+ }
+
+ /* init ia6t_preferred */
+ if (lt6->ia6t_pltime == ND6_INFINITE_LIFETIME)
+ lt6->ia6t_preferred = 0;
+ else {
+ lt6->ia6t_preferred = time_second;
+ lt6->ia6t_preferred += lt6->ia6t_pltime;
+ }
+}
+
+/*
+ * Delete all the routing table entries that use the specified gateway.
+ * XXX: this function causes search through all entries of routing table, so
+ * it shouldn't be called when acting as a router.
+ */
+void
+rt6_flush(struct in6_addr *gateway, struct ifnet *ifp)
+{
+ struct radix_node_head *rnh;
+ int s = splnet();
+
+ /* We'll care only link-local addresses */
+ if (!IN6_IS_ADDR_LINKLOCAL(gateway)) {
+ splx(s);
+ return;
+ }
+
+ rnh = rt_tables_get_rnh(0, AF_INET6);
+ if (rnh == NULL)
+ return;
+
+ RADIX_NODE_HEAD_LOCK(rnh);
+ rnh->rnh_walktree(rnh, rt6_deleteroute, (void *)gateway);
+ RADIX_NODE_HEAD_UNLOCK(rnh);
+ splx(s);
+}
+
+static int
+rt6_deleteroute(struct radix_node *rn, void *arg)
+{
+#define SIN6(s) ((struct sockaddr_in6 *)s)
+ struct rtentry *rt = (struct rtentry *)rn;
+ struct in6_addr *gate = (struct in6_addr *)arg;
+
+ if (rt->rt_gateway == NULL || rt->rt_gateway->sa_family != AF_INET6)
+ return (0);
+
+ if (!IN6_ARE_ADDR_EQUAL(gate, &SIN6(rt->rt_gateway)->sin6_addr)) {
+ return (0);
+ }
+
+ /*
+ * Do not delete a static route.
+ * XXX: this seems to be a bit ad-hoc. Should we consider the
+ * 'cloned' bit instead?
+ */
+ if ((rt->rt_flags & RTF_STATIC) != 0)
+ return (0);
+
+ /*
+ * We delete only host route. This means, in particular, we don't
+ * delete default route.
+ */
+ if ((rt->rt_flags & RTF_HOST) == 0)
+ return (0);
+
+ return (rtrequest(RTM_DELETE, rt_key(rt), rt->rt_gateway,
+ rt_mask(rt), rt->rt_flags, 0));
+#undef SIN6
+}
+
+int
+nd6_setdefaultiface(int ifindex)
+{
+ int error = 0;
+
+ if (ifindex < 0 || V_if_index < ifindex)
+ return (EINVAL);
+ if (ifindex != 0 && !ifnet_byindex(ifindex))
+ return (EINVAL);
+
+ if (V_nd6_defifindex != ifindex) {
+ V_nd6_defifindex = ifindex;
+ if (V_nd6_defifindex > 0)
+ V_nd6_defifp = ifnet_byindex(V_nd6_defifindex);
+ else
+ V_nd6_defifp = NULL;
+
+ /*
+ * Our current implementation assumes one-to-one maping between
+ * interfaces and links, so it would be natural to use the
+ * default interface as the default link.
+ */
+ scope6_setdefault(V_nd6_defifp);
+ }
+
+ return (error);
+}
diff --git a/rtems/freebsd/netinet6/pim6.h b/rtems/freebsd/netinet6/pim6.h
new file mode 100644
index 00000000..dec84cf2
--- /dev/null
+++ b/rtems/freebsd/netinet6/pim6.h
@@ -0,0 +1,69 @@
+/*-
+ * Copyright (C) 1998 WIDE Project.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the project nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $KAME: pim6.h,v 1.3 2000/03/25 07:23:58 sumikawa Exp $
+ * $FreeBSD$
+ */
+/*
+ * Protocol Independent Multicast (PIM) definitions
+ *
+ * Written by Ahmed Helmy, SGI, July 1996
+ *
+ * MULTICAST
+ */
+
+/*
+ * PIM packet header
+ */
+#define PIM_VERSION 2
+struct pim {
+#if defined(BYTE_ORDER) && (BYTE_ORDER == LITTLE_ENDIAN)
+ u_char pim_type:4, /* the PIM message type, currently they are:
+ * Hello, Register, Register-Stop, Join/Prune,
+ * Bootstrap, Assert, Graft (PIM-DM only),
+ * Graft-Ack (PIM-DM only), C-RP-Adv
+ */
+ pim_ver:4; /* PIM version number; 2 for PIMv2 */
+#else
+ u_char pim_ver:4, /* PIM version */
+ pim_type:4; /* PIM type */
+#endif
+ u_char pim_rsv; /* Reserved */
+ u_short pim_cksum; /* IP style check sum */
+};
+
+#define PIM_MINLEN 8 /* The header min. length is 8 */
+#define PIM6_REG_MINLEN (PIM_MINLEN+40) /* Register message + inner IP6 header */
+
+/*
+ * Message types
+ */
+#define PIM_REGISTER 1 /* PIM Register type is 1 */
+
+/* second bit in reg_head is the null bit */
+#define PIM_NULL_REGISTER 0x40000000
diff --git a/rtems/freebsd/netinet6/pim6_var.h b/rtems/freebsd/netinet6/pim6_var.h
new file mode 100644
index 00000000..8c63b5cc
--- /dev/null
+++ b/rtems/freebsd/netinet6/pim6_var.h
@@ -0,0 +1,68 @@
+/*-
+ * Copyright (C) 1998 WIDE Project.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the project nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $KAME: pim6_var.h,v 1.8 2000/06/06 08:07:43 jinmei Exp $
+ * $FreeBSD$
+ */
+
+/*
+ * Protocol Independent Multicast (PIM),
+ * implementation-specific definitions.
+ *
+ * Written by George Edmond Eddy (Rusty), ISI, February 1998
+ * Modified by Pavlin Ivanov Radoslavov, USC/ISI, May 1998
+ */
+
+#ifndef _NETINET6_PIM6_VAR_HH_
+#define _NETINET6_PIM6_VAR_HH_
+
+struct pim6stat {
+ u_quad_t pim6s_rcv_total; /* total PIM messages received */
+ u_quad_t pim6s_rcv_tooshort; /* received with too few bytes */
+ u_quad_t pim6s_rcv_badsum; /* received with bad checksum */
+ u_quad_t pim6s_rcv_badversion; /* received bad PIM version */
+ u_quad_t pim6s_rcv_registers; /* received registers */
+ u_quad_t pim6s_rcv_badregisters; /* received invalid registers */
+ u_quad_t pim6s_snd_registers; /* sent registers */
+};
+
+#if (defined(KERNEL)) || (defined(_KERNEL))
+int pim6_input __P((struct mbuf **, int*, int));
+#endif /* KERNEL */
+
+/*
+ * Names for PIM sysctl objects
+ */
+#define PIM6CTL_STATS 1 /* statistics (read-only) */
+#define PIM6CTL_MAXID 2
+
+#define PIM6CTL_NAMES { \
+ { 0, 0 }, \
+ { 0, 0 }, \
+}
+#endif /* _NETINET6_PIM6_VAR_HH_ */
diff --git a/rtems/freebsd/netinet6/raw_ip6.c b/rtems/freebsd/netinet6/raw_ip6.c
new file mode 100644
index 00000000..64894156
--- /dev/null
+++ b/rtems/freebsd/netinet6/raw_ip6.c
@@ -0,0 +1,905 @@
+#include <rtems/freebsd/machine/rtems-bsd-config.h>
+
+/*-
+ * Copyright (C) 1995, 1996, 1997, and 1998 WIDE Project.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the project nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+/*-
+ * Copyright (c) 1982, 1986, 1988, 1993
+ * The Regents of the University of California.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * @(#)raw_ip.c 8.2 (Berkeley) 1/4/94
+ */
+
+#include <rtems/freebsd/sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <rtems/freebsd/local/opt_ipsec.h>
+#include <rtems/freebsd/local/opt_inet6.h>
+
+#include <rtems/freebsd/sys/param.h>
+#include <rtems/freebsd/sys/errno.h>
+#include <rtems/freebsd/sys/jail.h>
+#include <rtems/freebsd/sys/lock.h>
+#include <rtems/freebsd/sys/malloc.h>
+#include <rtems/freebsd/sys/mbuf.h>
+#include <rtems/freebsd/sys/priv.h>
+#include <rtems/freebsd/sys/proc.h>
+#include <rtems/freebsd/sys/protosw.h>
+#include <rtems/freebsd/sys/signalvar.h>
+#include <rtems/freebsd/sys/socket.h>
+#include <rtems/freebsd/sys/socketvar.h>
+#include <rtems/freebsd/sys/sx.h>
+#include <rtems/freebsd/sys/syslog.h>
+
+#include <rtems/freebsd/net/if.h>
+#include <rtems/freebsd/net/if_types.h>
+#include <rtems/freebsd/net/route.h>
+#include <rtems/freebsd/net/vnet.h>
+
+#include <rtems/freebsd/netinet/in.h>
+#include <rtems/freebsd/netinet/in_var.h>
+#include <rtems/freebsd/netinet/in_systm.h>
+#include <rtems/freebsd/netinet/in_pcb.h>
+
+#include <rtems/freebsd/netinet/icmp6.h>
+#include <rtems/freebsd/netinet/ip6.h>
+#include <rtems/freebsd/netinet6/ip6protosw.h>
+#include <rtems/freebsd/netinet6/ip6_mroute.h>
+#include <rtems/freebsd/netinet6/in6_pcb.h>
+#include <rtems/freebsd/netinet6/ip6_var.h>
+#include <rtems/freebsd/netinet6/nd6.h>
+#include <rtems/freebsd/netinet6/raw_ip6.h>
+#include <rtems/freebsd/netinet6/scope6_var.h>
+
+#ifdef IPSEC
+#include <rtems/freebsd/netipsec/ipsec.h>
+#include <rtems/freebsd/netipsec/ipsec6.h>
+#endif /* IPSEC */
+
+#include <rtems/freebsd/machine/stdarg.h>
+
+#define satosin6(sa) ((struct sockaddr_in6 *)(sa))
+#define ifatoia6(ifa) ((struct in6_ifaddr *)(ifa))
+
+/*
+ * Raw interface to IP6 protocol.
+ */
+
+VNET_DECLARE(struct inpcbhead, ripcb);
+VNET_DECLARE(struct inpcbinfo, ripcbinfo);
+#define V_ripcb VNET(ripcb)
+#define V_ripcbinfo VNET(ripcbinfo)
+
+extern u_long rip_sendspace;
+extern u_long rip_recvspace;
+
+VNET_DEFINE(struct rip6stat, rip6stat);
+
+/*
+ * Hooks for multicast routing. They all default to NULL, so leave them not
+ * initialized and rely on BSS being set to 0.
+ */
+
+/*
+ * The socket used to communicate with the multicast routing daemon.
+ */
+VNET_DEFINE(struct socket *, ip6_mrouter);
+
+/*
+ * The various mrouter functions.
+ */
+int (*ip6_mrouter_set)(struct socket *, struct sockopt *);
+int (*ip6_mrouter_get)(struct socket *, struct sockopt *);
+int (*ip6_mrouter_done)(void);
+int (*ip6_mforward)(struct ip6_hdr *, struct ifnet *, struct mbuf *);
+int (*mrt6_ioctl)(u_long, caddr_t);
+
+/*
+ * Setup generic address and protocol structures for raw_input routine, then
+ * pass them along with mbuf chain.
+ */
+int
+rip6_input(struct mbuf **mp, int *offp, int proto)
+{
+ struct ifnet *ifp;
+ struct mbuf *m = *mp;
+ register struct ip6_hdr *ip6 = mtod(m, struct ip6_hdr *);
+ register struct inpcb *in6p;
+ struct inpcb *last = 0;
+ struct mbuf *opts = NULL;
+ struct sockaddr_in6 fromsa;
+
+ V_rip6stat.rip6s_ipackets++;
+
+ if (faithprefix_p != NULL && (*faithprefix_p)(&ip6->ip6_dst)) {
+ /* XXX Send icmp6 host/port unreach? */
+ m_freem(m);
+ return (IPPROTO_DONE);
+ }
+
+ init_sin6(&fromsa, m); /* general init */
+
+ ifp = m->m_pkthdr.rcvif;
+
+ INP_INFO_RLOCK(&V_ripcbinfo);
+ LIST_FOREACH(in6p, &V_ripcb, inp_list) {
+ /* XXX inp locking */
+ if ((in6p->inp_vflag & INP_IPV6) == 0)
+ continue;
+ if (in6p->inp_ip_p &&
+ in6p->inp_ip_p != proto)
+ continue;
+ if (!IN6_IS_ADDR_UNSPECIFIED(&in6p->in6p_laddr) &&
+ !IN6_ARE_ADDR_EQUAL(&in6p->in6p_laddr, &ip6->ip6_dst))
+ continue;
+ if (!IN6_IS_ADDR_UNSPECIFIED(&in6p->in6p_faddr) &&
+ !IN6_ARE_ADDR_EQUAL(&in6p->in6p_faddr, &ip6->ip6_src))
+ continue;
+ if (jailed_without_vnet(in6p->inp_cred)) {
+ /*
+ * Allow raw socket in jail to receive multicast;
+ * assume process had PRIV_NETINET_RAW at attach,
+ * and fall through into normal filter path if so.
+ */
+ if (!IN6_IS_ADDR_MULTICAST(&ip6->ip6_dst) &&
+ prison_check_ip6(in6p->inp_cred,
+ &ip6->ip6_dst) != 0)
+ continue;
+ }
+ if (in6p->in6p_cksum != -1) {
+ V_rip6stat.rip6s_isum++;
+ if (in6_cksum(m, proto, *offp,
+ m->m_pkthdr.len - *offp)) {
+ INP_RUNLOCK(in6p);
+ V_rip6stat.rip6s_badsum++;
+ continue;
+ }
+ }
+ INP_RLOCK(in6p);
+ /*
+ * If this raw socket has multicast state, and we
+ * have received a multicast, check if this socket
+ * should receive it, as multicast filtering is now
+ * the responsibility of the transport layer.
+ */
+ if (in6p->in6p_moptions &&
+ IN6_IS_ADDR_MULTICAST(&ip6->ip6_dst)) {
+ /*
+ * If the incoming datagram is for MLD, allow it
+ * through unconditionally to the raw socket.
+ *
+ * Use the M_RTALERT_MLD flag to check for MLD
+ * traffic without having to inspect the mbuf chain
+ * more deeply, as all MLDv1/v2 host messages MUST
+ * contain the Router Alert option.
+ *
+ * In the case of MLDv1, we may not have explicitly
+ * joined the group, and may have set IFF_ALLMULTI
+ * on the interface. im6o_mc_filter() may discard
+ * control traffic we actually need to see.
+ *
+ * Userland multicast routing daemons should continue
+ * filter the control traffic appropriately.
+ */
+ int blocked;
+
+ blocked = MCAST_PASS;
+ if ((m->m_flags & M_RTALERT_MLD) == 0) {
+ struct sockaddr_in6 mcaddr;
+
+ bzero(&mcaddr, sizeof(struct sockaddr_in6));
+ mcaddr.sin6_len = sizeof(struct sockaddr_in6);
+ mcaddr.sin6_family = AF_INET6;
+ mcaddr.sin6_addr = ip6->ip6_dst;
+
+ blocked = im6o_mc_filter(in6p->in6p_moptions,
+ ifp,
+ (struct sockaddr *)&mcaddr,
+ (struct sockaddr *)&fromsa);
+ }
+ if (blocked != MCAST_PASS) {
+ IP6STAT_INC(ip6s_notmember);
+ INP_RUNLOCK(in6p);
+ continue;
+ }
+ }
+ if (last != NULL) {
+ struct mbuf *n = m_copy(m, 0, (int)M_COPYALL);
+
+#ifdef IPSEC
+ /*
+ * Check AH/ESP integrity.
+ */
+ if (n && ipsec6_in_reject(n, last)) {
+ m_freem(n);
+ V_ipsec6stat.in_polvio++;
+ /* Do not inject data into pcb. */
+ } else
+#endif /* IPSEC */
+ if (n) {
+ if (last->inp_flags & INP_CONTROLOPTS ||
+ last->inp_socket->so_options & SO_TIMESTAMP)
+ ip6_savecontrol(last, n, &opts);
+ /* strip intermediate headers */
+ m_adj(n, *offp);
+ if (sbappendaddr(&last->inp_socket->so_rcv,
+ (struct sockaddr *)&fromsa,
+ n, opts) == 0) {
+ m_freem(n);
+ if (opts)
+ m_freem(opts);
+ V_rip6stat.rip6s_fullsock++;
+ } else
+ sorwakeup(last->inp_socket);
+ opts = NULL;
+ }
+ INP_RUNLOCK(last);
+ }
+ last = in6p;
+ }
+ INP_INFO_RUNLOCK(&V_ripcbinfo);
+#ifdef IPSEC
+ /*
+ * Check AH/ESP integrity.
+ */
+ if ((last != NULL) && ipsec6_in_reject(m, last)) {
+ m_freem(m);
+ V_ipsec6stat.in_polvio++;
+ V_ip6stat.ip6s_delivered--;
+ /* Do not inject data into pcb. */
+ INP_RUNLOCK(last);
+ } else
+#endif /* IPSEC */
+ if (last != NULL) {
+ if (last->inp_flags & INP_CONTROLOPTS ||
+ last->inp_socket->so_options & SO_TIMESTAMP)
+ ip6_savecontrol(last, m, &opts);
+ /* Strip intermediate headers. */
+ m_adj(m, *offp);
+ if (sbappendaddr(&last->inp_socket->so_rcv,
+ (struct sockaddr *)&fromsa, m, opts) == 0) {
+ m_freem(m);
+ if (opts)
+ m_freem(opts);
+ V_rip6stat.rip6s_fullsock++;
+ } else
+ sorwakeup(last->inp_socket);
+ INP_RUNLOCK(last);
+ } else {
+ V_rip6stat.rip6s_nosock++;
+ if (m->m_flags & M_MCAST)
+ V_rip6stat.rip6s_nosockmcast++;
+ if (proto == IPPROTO_NONE)
+ m_freem(m);
+ else {
+ char *prvnxtp = ip6_get_prevhdr(m, *offp); /* XXX */
+ icmp6_error(m, ICMP6_PARAM_PROB,
+ ICMP6_PARAMPROB_NEXTHEADER,
+ prvnxtp - mtod(m, char *));
+ }
+ V_ip6stat.ip6s_delivered--;
+ }
+ return (IPPROTO_DONE);
+}
+
+void
+rip6_ctlinput(int cmd, struct sockaddr *sa, void *d)
+{
+ struct ip6_hdr *ip6;
+ struct mbuf *m;
+ int off = 0;
+ struct ip6ctlparam *ip6cp = NULL;
+ const struct sockaddr_in6 *sa6_src = NULL;
+ void *cmdarg;
+ struct inpcb *(*notify)(struct inpcb *, int) = in6_rtchange;
+
+ if (sa->sa_family != AF_INET6 ||
+ sa->sa_len != sizeof(struct sockaddr_in6))
+ return;
+
+ if ((unsigned)cmd >= PRC_NCMDS)
+ return;
+ if (PRC_IS_REDIRECT(cmd))
+ notify = in6_rtchange, d = NULL;
+ else if (cmd == PRC_HOSTDEAD)
+ d = NULL;
+ else if (inet6ctlerrmap[cmd] == 0)
+ return;
+
+ /*
+ * If the parameter is from icmp6, decode it.
+ */
+ if (d != NULL) {
+ ip6cp = (struct ip6ctlparam *)d;
+ m = ip6cp->ip6c_m;
+ ip6 = ip6cp->ip6c_ip6;
+ off = ip6cp->ip6c_off;
+ cmdarg = ip6cp->ip6c_cmdarg;
+ sa6_src = ip6cp->ip6c_src;
+ } else {
+ m = NULL;
+ ip6 = NULL;
+ cmdarg = NULL;
+ sa6_src = &sa6_any;
+ }
+
+ (void) in6_pcbnotify(&V_ripcbinfo, sa, 0,
+ (const struct sockaddr *)sa6_src, 0, cmd, cmdarg, notify);
+}
+
+/*
+ * Generate IPv6 header and pass packet to ip6_output. Tack on options user
+ * may have setup with control call.
+ */
+int
+#if __STDC__
+rip6_output(struct mbuf *m, ...)
+#else
+rip6_output(m, va_alist)
+ struct mbuf *m;
+ va_dcl
+#endif
+{
+ struct mbuf *control;
+ struct socket *so;
+ struct sockaddr_in6 *dstsock;
+ struct in6_addr *dst;
+ struct ip6_hdr *ip6;
+ struct inpcb *in6p;
+ u_int plen = m->m_pkthdr.len;
+ int error = 0;
+ struct ip6_pktopts opt, *optp;
+ struct ifnet *oifp = NULL;
+ int type = 0, code = 0; /* for ICMPv6 output statistics only */
+ int scope_ambiguous = 0;
+ int use_defzone = 0;
+ struct in6_addr in6a;
+ va_list ap;
+
+ va_start(ap, m);
+ so = va_arg(ap, struct socket *);
+ dstsock = va_arg(ap, struct sockaddr_in6 *);
+ control = va_arg(ap, struct mbuf *);
+ va_end(ap);
+
+ in6p = sotoinpcb(so);
+ INP_WLOCK(in6p);
+
+ dst = &dstsock->sin6_addr;
+ if (control != NULL) {
+ if ((error = ip6_setpktopts(control, &opt,
+ in6p->in6p_outputopts, so->so_cred,
+ so->so_proto->pr_protocol)) != 0) {
+ goto bad;
+ }
+ optp = &opt;
+ } else
+ optp = in6p->in6p_outputopts;
+
+ /*
+ * Check and convert scope zone ID into internal form.
+ *
+ * XXX: we may still need to determine the zone later.
+ */
+ if (!(so->so_state & SS_ISCONNECTED)) {
+ if (!optp || !optp->ip6po_pktinfo ||
+ !optp->ip6po_pktinfo->ipi6_ifindex)
+ use_defzone = V_ip6_use_defzone;
+ if (dstsock->sin6_scope_id == 0 && !use_defzone)
+ scope_ambiguous = 1;
+ if ((error = sa6_embedscope(dstsock, use_defzone)) != 0)
+ goto bad;
+ }
+
+ /*
+ * For an ICMPv6 packet, we should know its type and code to update
+ * statistics.
+ */
+ if (so->so_proto->pr_protocol == IPPROTO_ICMPV6) {
+ struct icmp6_hdr *icmp6;
+ if (m->m_len < sizeof(struct icmp6_hdr) &&
+ (m = m_pullup(m, sizeof(struct icmp6_hdr))) == NULL) {
+ error = ENOBUFS;
+ goto bad;
+ }
+ icmp6 = mtod(m, struct icmp6_hdr *);
+ type = icmp6->icmp6_type;
+ code = icmp6->icmp6_code;
+ }
+
+ M_PREPEND(m, sizeof(*ip6), M_DONTWAIT);
+ if (m == NULL) {
+ error = ENOBUFS;
+ goto bad;
+ }
+ ip6 = mtod(m, struct ip6_hdr *);
+
+ /*
+ * Source address selection.
+ */
+ error = in6_selectsrc(dstsock, optp, in6p, NULL, so->so_cred,
+ &oifp, &in6a);
+ if (error)
+ goto bad;
+ error = prison_check_ip6(in6p->inp_cred, &in6a);
+ if (error != 0)
+ goto bad;
+ ip6->ip6_src = in6a;
+
+ if (oifp && scope_ambiguous) {
+ /*
+ * Application should provide a proper zone ID or the use of
+ * default zone IDs should be enabled. Unfortunately, some
+ * applications do not behave as it should, so we need a
+ * workaround. Even if an appropriate ID is not determined
+ * (when it's required), if we can determine the outgoing
+ * interface. determine the zone ID based on the interface.
+ */
+ error = in6_setscope(&dstsock->sin6_addr, oifp, NULL);
+ if (error != 0)
+ goto bad;
+ }
+ ip6->ip6_dst = dstsock->sin6_addr;
+
+ /*
+ * Fill in the rest of the IPv6 header fields.
+ */
+ ip6->ip6_flow = (ip6->ip6_flow & ~IPV6_FLOWINFO_MASK) |
+ (in6p->inp_flow & IPV6_FLOWINFO_MASK);
+ ip6->ip6_vfc = (ip6->ip6_vfc & ~IPV6_VERSION_MASK) |
+ (IPV6_VERSION & IPV6_VERSION_MASK);
+
+ /*
+ * ip6_plen will be filled in ip6_output, so not fill it here.
+ */
+ ip6->ip6_nxt = in6p->inp_ip_p;
+ ip6->ip6_hlim = in6_selecthlim(in6p, oifp);
+
+ if (so->so_proto->pr_protocol == IPPROTO_ICMPV6 ||
+ in6p->in6p_cksum != -1) {
+ struct mbuf *n;
+ int off;
+ u_int16_t *p;
+
+ /* Compute checksum. */
+ if (so->so_proto->pr_protocol == IPPROTO_ICMPV6)
+ off = offsetof(struct icmp6_hdr, icmp6_cksum);
+ else
+ off = in6p->in6p_cksum;
+ if (plen < off + 1) {
+ error = EINVAL;
+ goto bad;
+ }
+ off += sizeof(struct ip6_hdr);
+
+ n = m;
+ while (n && n->m_len <= off) {
+ off -= n->m_len;
+ n = n->m_next;
+ }
+ if (!n)
+ goto bad;
+ p = (u_int16_t *)(mtod(n, caddr_t) + off);
+ *p = 0;
+ *p = in6_cksum(m, ip6->ip6_nxt, sizeof(*ip6), plen);
+ }
+
+ error = ip6_output(m, optp, NULL, 0, in6p->in6p_moptions, &oifp, in6p);
+ if (so->so_proto->pr_protocol == IPPROTO_ICMPV6) {
+ if (oifp)
+ icmp6_ifoutstat_inc(oifp, type, code);
+ ICMP6STAT_INC(icp6s_outhist[type]);
+ } else
+ V_rip6stat.rip6s_opackets++;
+
+ goto freectl;
+
+ bad:
+ if (m)
+ m_freem(m);
+
+ freectl:
+ if (control != NULL) {
+ ip6_clearpktopts(&opt, -1);
+ m_freem(control);
+ }
+ INP_WUNLOCK(in6p);
+ return (error);
+}
+
+/*
+ * Raw IPv6 socket option processing.
+ */
+int
+rip6_ctloutput(struct socket *so, struct sockopt *sopt)
+{
+ int error;
+
+ if (sopt->sopt_level == IPPROTO_ICMPV6)
+ /*
+ * XXX: is it better to call icmp6_ctloutput() directly
+ * from protosw?
+ */
+ return (icmp6_ctloutput(so, sopt));
+ else if (sopt->sopt_level != IPPROTO_IPV6)
+ return (EINVAL);
+
+ error = 0;
+
+ switch (sopt->sopt_dir) {
+ case SOPT_GET:
+ switch (sopt->sopt_name) {
+ case MRT6_INIT:
+ case MRT6_DONE:
+ case MRT6_ADD_MIF:
+ case MRT6_DEL_MIF:
+ case MRT6_ADD_MFC:
+ case MRT6_DEL_MFC:
+ case MRT6_PIM:
+ error = ip6_mrouter_get ? ip6_mrouter_get(so, sopt) :
+ EOPNOTSUPP;
+ break;
+ case IPV6_CHECKSUM:
+ error = ip6_raw_ctloutput(so, sopt);
+ break;
+ default:
+ error = ip6_ctloutput(so, sopt);
+ break;
+ }
+ break;
+
+ case SOPT_SET:
+ switch (sopt->sopt_name) {
+ case MRT6_INIT:
+ case MRT6_DONE:
+ case MRT6_ADD_MIF:
+ case MRT6_DEL_MIF:
+ case MRT6_ADD_MFC:
+ case MRT6_DEL_MFC:
+ case MRT6_PIM:
+ error = ip6_mrouter_set ? ip6_mrouter_set(so, sopt) :
+ EOPNOTSUPP;
+ break;
+ case IPV6_CHECKSUM:
+ error = ip6_raw_ctloutput(so, sopt);
+ break;
+ default:
+ error = ip6_ctloutput(so, sopt);
+ break;
+ }
+ break;
+ }
+
+ return (error);
+}
+
+static int
+rip6_attach(struct socket *so, int proto, struct thread *td)
+{
+ struct inpcb *inp;
+ struct icmp6_filter *filter;
+ int error;
+
+ inp = sotoinpcb(so);
+ KASSERT(inp == NULL, ("rip6_attach: inp != NULL"));
+
+ error = priv_check(td, PRIV_NETINET_RAW);
+ if (error)
+ return (error);
+ error = soreserve(so, rip_sendspace, rip_recvspace);
+ if (error)
+ return (error);
+ filter = malloc(sizeof(struct icmp6_filter), M_PCB, M_NOWAIT);
+ if (filter == NULL)
+ return (ENOMEM);
+ INP_INFO_WLOCK(&V_ripcbinfo);
+ error = in_pcballoc(so, &V_ripcbinfo);
+ if (error) {
+ INP_INFO_WUNLOCK(&V_ripcbinfo);
+ free(filter, M_PCB);
+ return (error);
+ }
+ inp = (struct inpcb *)so->so_pcb;
+ INP_INFO_WUNLOCK(&V_ripcbinfo);
+ inp->inp_vflag |= INP_IPV6;
+ inp->inp_ip_p = (long)proto;
+ inp->in6p_hops = -1; /* use kernel default */
+ inp->in6p_cksum = -1;
+ inp->in6p_icmp6filt = filter;
+ ICMP6_FILTER_SETPASSALL(inp->in6p_icmp6filt);
+ INP_WUNLOCK(inp);
+ return (0);
+}
+
+static void
+rip6_detach(struct socket *so)
+{
+ struct inpcb *inp;
+
+ inp = sotoinpcb(so);
+ KASSERT(inp != NULL, ("rip6_detach: inp == NULL"));
+
+ if (so == V_ip6_mrouter && ip6_mrouter_done)
+ ip6_mrouter_done();
+ /* xxx: RSVP */
+ INP_INFO_WLOCK(&V_ripcbinfo);
+ INP_WLOCK(inp);
+ free(inp->in6p_icmp6filt, M_PCB);
+ in_pcbdetach(inp);
+ in_pcbfree(inp);
+ INP_INFO_WUNLOCK(&V_ripcbinfo);
+}
+
+/* XXXRW: This can't ever be called. */
+static void
+rip6_abort(struct socket *so)
+{
+ struct inpcb *inp;
+
+ inp = sotoinpcb(so);
+ KASSERT(inp != NULL, ("rip6_abort: inp == NULL"));
+
+ soisdisconnected(so);
+}
+
+static void
+rip6_close(struct socket *so)
+{
+ struct inpcb *inp;
+
+ inp = sotoinpcb(so);
+ KASSERT(inp != NULL, ("rip6_close: inp == NULL"));
+
+ soisdisconnected(so);
+}
+
+static int
+rip6_disconnect(struct socket *so)
+{
+ struct inpcb *inp;
+
+ inp = sotoinpcb(so);
+ KASSERT(inp != NULL, ("rip6_disconnect: inp == NULL"));
+
+ if ((so->so_state & SS_ISCONNECTED) == 0)
+ return (ENOTCONN);
+ inp->in6p_faddr = in6addr_any;
+ rip6_abort(so);
+ return (0);
+}
+
+static int
+rip6_bind(struct socket *so, struct sockaddr *nam, struct thread *td)
+{
+ struct inpcb *inp;
+ struct sockaddr_in6 *addr = (struct sockaddr_in6 *)nam;
+ struct ifaddr *ifa = NULL;
+ int error = 0;
+
+ inp = sotoinpcb(so);
+ KASSERT(inp != NULL, ("rip6_bind: inp == NULL"));
+
+ if (nam->sa_len != sizeof(*addr))
+ return (EINVAL);
+ if ((error = prison_check_ip6(td->td_ucred, &addr->sin6_addr)) != 0)
+ return (error);
+ if (TAILQ_EMPTY(&V_ifnet) || addr->sin6_family != AF_INET6)
+ return (EADDRNOTAVAIL);
+ if ((error = sa6_embedscope(addr, V_ip6_use_defzone)) != 0)
+ return (error);
+
+ if (!IN6_IS_ADDR_UNSPECIFIED(&addr->sin6_addr) &&
+ (ifa = ifa_ifwithaddr((struct sockaddr *)addr)) == NULL)
+ return (EADDRNOTAVAIL);
+ if (ifa != NULL &&
+ ((struct in6_ifaddr *)ifa)->ia6_flags &
+ (IN6_IFF_ANYCAST|IN6_IFF_NOTREADY|
+ IN6_IFF_DETACHED|IN6_IFF_DEPRECATED)) {
+ ifa_free(ifa);
+ return (EADDRNOTAVAIL);
+ }
+ if (ifa != NULL)
+ ifa_free(ifa);
+ INP_INFO_WLOCK(&V_ripcbinfo);
+ INP_WLOCK(inp);
+ inp->in6p_laddr = addr->sin6_addr;
+ INP_WUNLOCK(inp);
+ INP_INFO_WUNLOCK(&V_ripcbinfo);
+ return (0);
+}
+
+static int
+rip6_connect(struct socket *so, struct sockaddr *nam, struct thread *td)
+{
+ struct inpcb *inp;
+ struct sockaddr_in6 *addr = (struct sockaddr_in6 *)nam;
+ struct in6_addr in6a;
+ struct ifnet *ifp = NULL;
+ int error = 0, scope_ambiguous = 0;
+
+ inp = sotoinpcb(so);
+ KASSERT(inp != NULL, ("rip6_connect: inp == NULL"));
+
+ if (nam->sa_len != sizeof(*addr))
+ return (EINVAL);
+ if (TAILQ_EMPTY(&V_ifnet))
+ return (EADDRNOTAVAIL);
+ if (addr->sin6_family != AF_INET6)
+ return (EAFNOSUPPORT);
+
+ /*
+ * Application should provide a proper zone ID or the use of default
+ * zone IDs should be enabled. Unfortunately, some applications do
+ * not behave as it should, so we need a workaround. Even if an
+ * appropriate ID is not determined, we'll see if we can determine
+ * the outgoing interface. If we can, determine the zone ID based on
+ * the interface below.
+ */
+ if (addr->sin6_scope_id == 0 && !V_ip6_use_defzone)
+ scope_ambiguous = 1;
+ if ((error = sa6_embedscope(addr, V_ip6_use_defzone)) != 0)
+ return (error);
+
+ INP_INFO_WLOCK(&V_ripcbinfo);
+ INP_WLOCK(inp);
+ /* Source address selection. XXX: need pcblookup? */
+ error = in6_selectsrc(addr, inp->in6p_outputopts,
+ inp, NULL, so->so_cred, &ifp, &in6a);
+ if (error) {
+ INP_WUNLOCK(inp);
+ INP_INFO_WUNLOCK(&V_ripcbinfo);
+ return (error);
+ }
+
+ /* XXX: see above */
+ if (ifp && scope_ambiguous &&
+ (error = in6_setscope(&addr->sin6_addr, ifp, NULL)) != 0) {
+ INP_WUNLOCK(inp);
+ INP_INFO_WUNLOCK(&V_ripcbinfo);
+ return (error);
+ }
+ inp->in6p_faddr = addr->sin6_addr;
+ inp->in6p_laddr = in6a;
+ soisconnected(so);
+ INP_WUNLOCK(inp);
+ INP_INFO_WUNLOCK(&V_ripcbinfo);
+ return (0);
+}
+
+static int
+rip6_shutdown(struct socket *so)
+{
+ struct inpcb *inp;
+
+ inp = sotoinpcb(so);
+ KASSERT(inp != NULL, ("rip6_shutdown: inp == NULL"));
+
+ INP_WLOCK(inp);
+ socantsendmore(so);
+ INP_WUNLOCK(inp);
+ return (0);
+}
+
+static int
+rip6_send(struct socket *so, int flags, struct mbuf *m, struct sockaddr *nam,
+ struct mbuf *control, struct thread *td)
+{
+ struct inpcb *inp;
+ struct sockaddr_in6 tmp;
+ struct sockaddr_in6 *dst;
+ int ret;
+
+ inp = sotoinpcb(so);
+ KASSERT(inp != NULL, ("rip6_send: inp == NULL"));
+
+ /* Always copy sockaddr to avoid overwrites. */
+ /* Unlocked read. */
+ if (so->so_state & SS_ISCONNECTED) {
+ if (nam) {
+ m_freem(m);
+ return (EISCONN);
+ }
+ /* XXX */
+ bzero(&tmp, sizeof(tmp));
+ tmp.sin6_family = AF_INET6;
+ tmp.sin6_len = sizeof(struct sockaddr_in6);
+ INP_RLOCK(inp);
+ bcopy(&inp->in6p_faddr, &tmp.sin6_addr,
+ sizeof(struct in6_addr));
+ INP_RUNLOCK(inp);
+ dst = &tmp;
+ } else {
+ if (nam == NULL) {
+ m_freem(m);
+ return (ENOTCONN);
+ }
+ if (nam->sa_len != sizeof(struct sockaddr_in6)) {
+ m_freem(m);
+ return (EINVAL);
+ }
+ tmp = *(struct sockaddr_in6 *)nam;
+ dst = &tmp;
+
+ if (dst->sin6_family == AF_UNSPEC) {
+ /*
+ * XXX: we allow this case for backward
+ * compatibility to buggy applications that
+ * rely on old (and wrong) kernel behavior.
+ */
+ log(LOG_INFO, "rip6 SEND: address family is "
+ "unspec. Assume AF_INET6\n");
+ dst->sin6_family = AF_INET6;
+ } else if (dst->sin6_family != AF_INET6) {
+ m_freem(m);
+ return(EAFNOSUPPORT);
+ }
+ }
+ ret = rip6_output(m, so, dst, control);
+ return (ret);
+}
+
+struct pr_usrreqs rip6_usrreqs = {
+ .pru_abort = rip6_abort,
+ .pru_attach = rip6_attach,
+ .pru_bind = rip6_bind,
+ .pru_connect = rip6_connect,
+ .pru_control = in6_control,
+ .pru_detach = rip6_detach,
+ .pru_disconnect = rip6_disconnect,
+ .pru_peeraddr = in6_getpeeraddr,
+ .pru_send = rip6_send,
+ .pru_shutdown = rip6_shutdown,
+ .pru_sockaddr = in6_getsockaddr,
+ .pru_close = rip6_close,
+};
diff --git a/rtems/freebsd/netinet6/raw_ip6.h b/rtems/freebsd/netinet6/raw_ip6.h
new file mode 100644
index 00000000..c4491d01
--- /dev/null
+++ b/rtems/freebsd/netinet6/raw_ip6.h
@@ -0,0 +1,55 @@
+/*-
+ * Copyright (C) 2001 WIDE Project.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the project nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $KAME: raw_ip6.h,v 1.2 2001/05/27 13:28:35 itojun Exp $
+ * $FreeBSD$
+ */
+
+#ifndef _NETINET6_RAW_IP6_HH_
+#define _NETINET6_RAW_IP6_HH_
+
+/*
+ * ICMPv6 stat is counted separately. see netinet/icmp6.h
+ */
+struct rip6stat {
+ u_quad_t rip6s_ipackets; /* total input packets */
+ u_quad_t rip6s_isum; /* input checksum computations */
+ u_quad_t rip6s_badsum; /* of above, checksum error */
+ u_quad_t rip6s_nosock; /* no matching socket */
+ u_quad_t rip6s_nosockmcast; /* of above, arrived as multicast */
+ u_quad_t rip6s_fullsock; /* not delivered, input socket full */
+
+ u_quad_t rip6s_opackets; /* total output packets */
+};
+
+#ifdef _KERNEL
+VNET_DECLARE(struct rip6stat, rip6stat);
+#define V_rip6stat VNET(rip6stat)
+#endif
+
+#endif
diff --git a/rtems/freebsd/netinet6/route6.c b/rtems/freebsd/netinet6/route6.c
new file mode 100644
index 00000000..524600f5
--- /dev/null
+++ b/rtems/freebsd/netinet6/route6.c
@@ -0,0 +1,111 @@
+#include <rtems/freebsd/machine/rtems-bsd-config.h>
+
+/*-
+ * Copyright (C) 1995, 1996, 1997, and 1998 WIDE Project.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the project nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $KAME: route6.c,v 1.24 2001/03/14 03:07:05 itojun Exp $
+ */
+
+#include <rtems/freebsd/sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <rtems/freebsd/local/opt_inet.h>
+#include <rtems/freebsd/local/opt_inet6.h>
+
+#include <rtems/freebsd/sys/param.h>
+#include <rtems/freebsd/sys/mbuf.h>
+#include <rtems/freebsd/sys/socket.h>
+#include <rtems/freebsd/sys/systm.h>
+#include <rtems/freebsd/sys/queue.h>
+
+#include <rtems/freebsd/net/if.h>
+
+#include <rtems/freebsd/netinet/in.h>
+#include <rtems/freebsd/netinet6/in6_var.h>
+#include <rtems/freebsd/netinet/ip6.h>
+#include <rtems/freebsd/netinet6/ip6_var.h>
+#include <rtems/freebsd/netinet6/scope6_var.h>
+
+#include <rtems/freebsd/netinet/icmp6.h>
+
+/*
+ * proto - is unused
+ */
+
+int
+route6_input(struct mbuf **mp, int *offp, int proto)
+{
+ struct ip6_hdr *ip6;
+ struct mbuf *m = *mp;
+ struct ip6_rthdr *rh;
+ int off = *offp, rhlen;
+ struct ip6aux *ip6a;
+
+ ip6a = ip6_findaux(m);
+ if (ip6a) {
+ /* XXX reject home-address option before rthdr */
+ if (ip6a->ip6a_flags & IP6A_SWAP) {
+ V_ip6stat.ip6s_badoptions++;
+ m_freem(m);
+ return IPPROTO_DONE;
+ }
+ }
+
+#ifndef PULLDOWN_TEST
+ IP6_EXTHDR_CHECK(m, off, sizeof(*rh), IPPROTO_DONE);
+ ip6 = mtod(m, struct ip6_hdr *);
+ rh = (struct ip6_rthdr *)((caddr_t)ip6 + off);
+#else
+ ip6 = mtod(m, struct ip6_hdr *);
+ IP6_EXTHDR_GET(rh, struct ip6_rthdr *, m, off, sizeof(*rh));
+ if (rh == NULL) {
+ V_ip6stat.ip6s_tooshort++;
+ return IPPROTO_DONE;
+ }
+#endif
+
+ /*
+ * While this switch may look gratuitous, leave it in
+ * in favour of RH2 implementations, etc.
+ */
+ switch (rh->ip6r_type) {
+ default:
+ /* Unknown routing header type. */
+ if (rh->ip6r_segleft == 0) {
+ rhlen = (rh->ip6r_len + 1) << 3;
+ break; /* Final dst. Just ignore the header. */
+ }
+ V_ip6stat.ip6s_badoptions++;
+ icmp6_error(m, ICMP6_PARAM_PROB, ICMP6_PARAMPROB_HEADER,
+ (caddr_t)&rh->ip6r_type - (caddr_t)ip6);
+ return (IPPROTO_DONE);
+ }
+
+ *offp += rhlen;
+ return (rh->ip6r_nxt);
+}
diff --git a/rtems/freebsd/netinet6/scope6.c b/rtems/freebsd/netinet6/scope6.c
new file mode 100644
index 00000000..a11258d3
--- /dev/null
+++ b/rtems/freebsd/netinet6/scope6.c
@@ -0,0 +1,498 @@
+#include <rtems/freebsd/machine/rtems-bsd-config.h>
+
+/*-
+ * Copyright (C) 2000 WIDE Project.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the project nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $KAME: scope6.c,v 1.10 2000/07/24 13:29:31 itojun Exp $
+ */
+
+#include <rtems/freebsd/sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <rtems/freebsd/sys/param.h>
+#include <rtems/freebsd/sys/malloc.h>
+#include <rtems/freebsd/sys/mbuf.h>
+#include <rtems/freebsd/sys/socket.h>
+#include <rtems/freebsd/sys/systm.h>
+#include <rtems/freebsd/sys/queue.h>
+#include <rtems/freebsd/sys/syslog.h>
+
+#include <rtems/freebsd/net/if.h>
+#include <rtems/freebsd/net/vnet.h>
+
+#include <rtems/freebsd/netinet/in.h>
+
+#include <rtems/freebsd/netinet/ip6.h>
+#include <rtems/freebsd/netinet6/in6_var.h>
+#include <rtems/freebsd/netinet6/ip6_var.h>
+#include <rtems/freebsd/netinet6/scope6_var.h>
+
+#ifdef ENABLE_DEFAULT_SCOPE
+VNET_DEFINE(int, ip6_use_defzone) = 1;
+#else
+VNET_DEFINE(int, ip6_use_defzone) = 0;
+#endif
+
+/*
+ * The scope6_lock protects the global sid default stored in
+ * sid_default below.
+ */
+static struct mtx scope6_lock;
+#define SCOPE6_LOCK_INIT() mtx_init(&scope6_lock, "scope6_lock", NULL, MTX_DEF)
+#define SCOPE6_LOCK() mtx_lock(&scope6_lock)
+#define SCOPE6_UNLOCK() mtx_unlock(&scope6_lock)
+#define SCOPE6_LOCK_ASSERT() mtx_assert(&scope6_lock, MA_OWNED)
+
+static VNET_DEFINE(struct scope6_id, sid_default);
+#define V_sid_default VNET(sid_default)
+
+#define SID(ifp) \
+ (((struct in6_ifextra *)(ifp)->if_afdata[AF_INET6])->scope6_id)
+
+void
+scope6_init(void)
+{
+
+ bzero(&V_sid_default, sizeof(V_sid_default));
+
+ if (!IS_DEFAULT_VNET(curvnet))
+ return;
+
+ SCOPE6_LOCK_INIT();
+}
+
+struct scope6_id *
+scope6_ifattach(struct ifnet *ifp)
+{
+ struct scope6_id *sid;
+
+ sid = (struct scope6_id *)malloc(sizeof(*sid), M_IFADDR, M_WAITOK);
+ bzero(sid, sizeof(*sid));
+
+ /*
+ * XXX: IPV6_ADDR_SCOPE_xxx macros are not standard.
+ * Should we rather hardcode here?
+ */
+ sid->s6id_list[IPV6_ADDR_SCOPE_INTFACELOCAL] = ifp->if_index;
+ sid->s6id_list[IPV6_ADDR_SCOPE_LINKLOCAL] = ifp->if_index;
+#ifdef MULTI_SCOPE
+ /* by default, we don't care about scope boundary for these scopes. */
+ sid->s6id_list[IPV6_ADDR_SCOPE_SITELOCAL] = 1;
+ sid->s6id_list[IPV6_ADDR_SCOPE_ORGLOCAL] = 1;
+#endif
+
+ return sid;
+}
+
+void
+scope6_ifdetach(struct scope6_id *sid)
+{
+
+ free(sid, M_IFADDR);
+}
+
+int
+scope6_set(struct ifnet *ifp, struct scope6_id *idlist)
+{
+ int i;
+ int error = 0;
+ struct scope6_id *sid = NULL;
+
+ IF_AFDATA_LOCK(ifp);
+ sid = SID(ifp);
+
+ if (!sid) { /* paranoid? */
+ IF_AFDATA_UNLOCK(ifp);
+ return (EINVAL);
+ }
+
+ /*
+ * XXX: We need more consistency checks of the relationship among
+ * scopes (e.g. an organization should be larger than a site).
+ */
+
+ /*
+ * TODO(XXX): after setting, we should reflect the changes to
+ * interface addresses, routing table entries, PCB entries...
+ */
+
+ SCOPE6_LOCK();
+ for (i = 0; i < 16; i++) {
+ if (idlist->s6id_list[i] &&
+ idlist->s6id_list[i] != sid->s6id_list[i]) {
+ /*
+ * An interface zone ID must be the corresponding
+ * interface index by definition.
+ */
+ if (i == IPV6_ADDR_SCOPE_INTFACELOCAL &&
+ idlist->s6id_list[i] != ifp->if_index) {
+ IF_AFDATA_UNLOCK(ifp);
+ SCOPE6_UNLOCK();
+ return (EINVAL);
+ }
+
+ if (i == IPV6_ADDR_SCOPE_LINKLOCAL &&
+ idlist->s6id_list[i] > V_if_index) {
+ /*
+ * XXX: theoretically, there should be no
+ * relationship between link IDs and interface
+ * IDs, but we check the consistency for
+ * safety in later use.
+ */
+ IF_AFDATA_UNLOCK(ifp);
+ SCOPE6_UNLOCK();
+ return (EINVAL);
+ }
+
+ /*
+ * XXX: we must need lots of work in this case,
+ * but we simply set the new value in this initial
+ * implementation.
+ */
+ sid->s6id_list[i] = idlist->s6id_list[i];
+ }
+ }
+ SCOPE6_UNLOCK();
+ IF_AFDATA_UNLOCK(ifp);
+
+ return (error);
+}
+
+int
+scope6_get(struct ifnet *ifp, struct scope6_id *idlist)
+{
+ /* We only need to lock the interface's afdata for SID() to work. */
+ IF_AFDATA_LOCK(ifp);
+ struct scope6_id *sid = SID(ifp);
+
+ if (sid == NULL) { /* paranoid? */
+ IF_AFDATA_UNLOCK(ifp);
+ return (EINVAL);
+ }
+
+ SCOPE6_LOCK();
+ *idlist = *sid;
+ SCOPE6_UNLOCK();
+
+ IF_AFDATA_UNLOCK(ifp);
+ return (0);
+}
+
+
+/*
+ * Get a scope of the address. Node-local, link-local, site-local or global.
+ */
+int
+in6_addrscope(struct in6_addr *addr)
+{
+ int scope;
+
+ if (addr->s6_addr[0] == 0xfe) {
+ scope = addr->s6_addr[1] & 0xc0;
+
+ switch (scope) {
+ case 0x80:
+ return IPV6_ADDR_SCOPE_LINKLOCAL;
+ break;
+ case 0xc0:
+ return IPV6_ADDR_SCOPE_SITELOCAL;
+ break;
+ default:
+ return IPV6_ADDR_SCOPE_GLOBAL; /* just in case */
+ break;
+ }
+ }
+
+
+ if (addr->s6_addr[0] == 0xff) {
+ scope = addr->s6_addr[1] & 0x0f;
+
+ /*
+ * due to other scope such as reserved,
+ * return scope doesn't work.
+ */
+ switch (scope) {
+ case IPV6_ADDR_SCOPE_INTFACELOCAL:
+ return IPV6_ADDR_SCOPE_INTFACELOCAL;
+ break;
+ case IPV6_ADDR_SCOPE_LINKLOCAL:
+ return IPV6_ADDR_SCOPE_LINKLOCAL;
+ break;
+ case IPV6_ADDR_SCOPE_SITELOCAL:
+ return IPV6_ADDR_SCOPE_SITELOCAL;
+ break;
+ default:
+ return IPV6_ADDR_SCOPE_GLOBAL;
+ break;
+ }
+ }
+
+ /*
+ * Regard loopback and unspecified addresses as global, since
+ * they have no ambiguity.
+ */
+ if (bcmp(&in6addr_loopback, addr, sizeof(*addr) - 1) == 0) {
+ if (addr->s6_addr[15] == 1) /* loopback */
+ return IPV6_ADDR_SCOPE_LINKLOCAL;
+ if (addr->s6_addr[15] == 0) /* unspecified */
+ return IPV6_ADDR_SCOPE_GLOBAL; /* XXX: correct? */
+ }
+
+ return IPV6_ADDR_SCOPE_GLOBAL;
+}
+
+/*
+ * ifp - note that this might be NULL
+ */
+
+void
+scope6_setdefault(struct ifnet *ifp)
+{
+
+ /*
+ * Currently, this function just sets the default "interfaces"
+ * and "links" according to the given interface.
+ * We might eventually have to separate the notion of "link" from
+ * "interface" and provide a user interface to set the default.
+ */
+ SCOPE6_LOCK();
+ if (ifp) {
+ V_sid_default.s6id_list[IPV6_ADDR_SCOPE_INTFACELOCAL] =
+ ifp->if_index;
+ V_sid_default.s6id_list[IPV6_ADDR_SCOPE_LINKLOCAL] =
+ ifp->if_index;
+ } else {
+ V_sid_default.s6id_list[IPV6_ADDR_SCOPE_INTFACELOCAL] = 0;
+ V_sid_default.s6id_list[IPV6_ADDR_SCOPE_LINKLOCAL] = 0;
+ }
+ SCOPE6_UNLOCK();
+}
+
+int
+scope6_get_default(struct scope6_id *idlist)
+{
+
+ SCOPE6_LOCK();
+ *idlist = V_sid_default;
+ SCOPE6_UNLOCK();
+
+ return (0);
+}
+
+u_int32_t
+scope6_addr2default(struct in6_addr *addr)
+{
+ u_int32_t id;
+
+ /*
+ * special case: The loopback address should be considered as
+ * link-local, but there's no ambiguity in the syntax.
+ */
+ if (IN6_IS_ADDR_LOOPBACK(addr))
+ return (0);
+
+ /*
+ * XXX: 32-bit read is atomic on all our platforms, is it OK
+ * not to lock here?
+ */
+ SCOPE6_LOCK();
+ id = V_sid_default.s6id_list[in6_addrscope(addr)];
+ SCOPE6_UNLOCK();
+ return (id);
+}
+
+/*
+ * Validate the specified scope zone ID in the sin6_scope_id field. If the ID
+ * is unspecified (=0), needs to be specified, and the default zone ID can be
+ * used, the default value will be used.
+ * This routine then generates the kernel-internal form: if the address scope
+ * of is interface-local or link-local, embed the interface index in the
+ * address.
+ */
+int
+sa6_embedscope(struct sockaddr_in6 *sin6, int defaultok)
+{
+ struct ifnet *ifp;
+ u_int32_t zoneid;
+
+ if ((zoneid = sin6->sin6_scope_id) == 0 && defaultok)
+ zoneid = scope6_addr2default(&sin6->sin6_addr);
+
+ if (zoneid != 0 &&
+ (IN6_IS_SCOPE_LINKLOCAL(&sin6->sin6_addr) ||
+ IN6_IS_ADDR_MC_INTFACELOCAL(&sin6->sin6_addr))) {
+ /*
+ * At this moment, we only check interface-local and
+ * link-local scope IDs, and use interface indices as the
+ * zone IDs assuming a one-to-one mapping between interfaces
+ * and links.
+ */
+ if (V_if_index < zoneid)
+ return (ENXIO);
+ ifp = ifnet_byindex(zoneid);
+ if (ifp == NULL) /* XXX: this can happen for some OS */
+ return (ENXIO);
+
+ /* XXX assignment to 16bit from 32bit variable */
+ sin6->sin6_addr.s6_addr16[1] = htons(zoneid & 0xffff);
+
+ sin6->sin6_scope_id = 0;
+ }
+
+ return 0;
+}
+
+/*
+ * generate standard sockaddr_in6 from embedded form.
+ */
+int
+sa6_recoverscope(struct sockaddr_in6 *sin6)
+{
+ char ip6buf[INET6_ADDRSTRLEN];
+ u_int32_t zoneid;
+
+ if (sin6->sin6_scope_id != 0) {
+ log(LOG_NOTICE,
+ "sa6_recoverscope: assumption failure (non 0 ID): %s%%%d\n",
+ ip6_sprintf(ip6buf, &sin6->sin6_addr), sin6->sin6_scope_id);
+ /* XXX: proceed anyway... */
+ }
+ if (IN6_IS_SCOPE_LINKLOCAL(&sin6->sin6_addr) ||
+ IN6_IS_ADDR_MC_INTFACELOCAL(&sin6->sin6_addr)) {
+ /*
+ * KAME assumption: link id == interface id
+ */
+ zoneid = ntohs(sin6->sin6_addr.s6_addr16[1]);
+ if (zoneid) {
+ /* sanity check */
+ if (zoneid < 0 || V_if_index < zoneid)
+ return (ENXIO);
+ if (!ifnet_byindex(zoneid))
+ return (ENXIO);
+ sin6->sin6_addr.s6_addr16[1] = 0;
+ sin6->sin6_scope_id = zoneid;
+ }
+ }
+
+ return 0;
+}
+
+/*
+ * Determine the appropriate scope zone ID for in6 and ifp. If ret_id is
+ * non NULL, it is set to the zone ID. If the zone ID needs to be embedded
+ * in the in6_addr structure, in6 will be modified.
+ *
+ * ret_id - unnecessary?
+ */
+int
+in6_setscope(struct in6_addr *in6, struct ifnet *ifp, u_int32_t *ret_id)
+{
+ int scope;
+ u_int32_t zoneid = 0;
+ struct scope6_id *sid;
+
+ IF_AFDATA_LOCK(ifp);
+
+ sid = SID(ifp);
+
+#ifdef DIAGNOSTIC
+ if (sid == NULL) { /* should not happen */
+ panic("in6_setscope: scope array is NULL");
+ /* NOTREACHED */
+ }
+#endif
+
+ /*
+ * special case: the loopback address can only belong to a loopback
+ * interface.
+ */
+ if (IN6_IS_ADDR_LOOPBACK(in6)) {
+ if (!(ifp->if_flags & IFF_LOOPBACK)) {
+ IF_AFDATA_UNLOCK(ifp);
+ return (EINVAL);
+ } else {
+ if (ret_id != NULL)
+ *ret_id = 0; /* there's no ambiguity */
+ IF_AFDATA_UNLOCK(ifp);
+ return (0);
+ }
+ }
+
+ scope = in6_addrscope(in6);
+
+ SCOPE6_LOCK();
+ switch (scope) {
+ case IPV6_ADDR_SCOPE_INTFACELOCAL: /* should be interface index */
+ zoneid = sid->s6id_list[IPV6_ADDR_SCOPE_INTFACELOCAL];
+ break;
+
+ case IPV6_ADDR_SCOPE_LINKLOCAL:
+ zoneid = sid->s6id_list[IPV6_ADDR_SCOPE_LINKLOCAL];
+ break;
+
+ case IPV6_ADDR_SCOPE_SITELOCAL:
+ zoneid = sid->s6id_list[IPV6_ADDR_SCOPE_SITELOCAL];
+ break;
+
+ case IPV6_ADDR_SCOPE_ORGLOCAL:
+ zoneid = sid->s6id_list[IPV6_ADDR_SCOPE_ORGLOCAL];
+ break;
+
+ default:
+ zoneid = 0; /* XXX: treat as global. */
+ break;
+ }
+ SCOPE6_UNLOCK();
+ IF_AFDATA_UNLOCK(ifp);
+
+ if (ret_id != NULL)
+ *ret_id = zoneid;
+
+ if (IN6_IS_SCOPE_LINKLOCAL(in6) || IN6_IS_ADDR_MC_INTFACELOCAL(in6))
+ in6->s6_addr16[1] = htons(zoneid & 0xffff); /* XXX */
+
+ return (0);
+}
+
+/*
+ * Just clear the embedded scope identifier. Return 0 if the original address
+ * is intact; return non 0 if the address is modified.
+ */
+int
+in6_clearscope(struct in6_addr *in6)
+{
+ int modified = 0;
+
+ if (IN6_IS_SCOPE_LINKLOCAL(in6) || IN6_IS_ADDR_MC_INTFACELOCAL(in6)) {
+ if (in6->s6_addr16[1] != 0)
+ modified = 1;
+ in6->s6_addr16[1] = 0;
+ }
+
+ return (modified);
+}
diff --git a/rtems/freebsd/netinet6/scope6_var.h b/rtems/freebsd/netinet6/scope6_var.h
new file mode 100644
index 00000000..a87aa57c
--- /dev/null
+++ b/rtems/freebsd/netinet6/scope6_var.h
@@ -0,0 +1,60 @@
+/*-
+ * Copyright (C) 2000 WIDE Project.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the project nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $KAME: scope6_var.h,v 1.4 2000/05/18 15:03:27 jinmei Exp $
+ * $FreeBSD$
+ */
+
+#ifndef _NETINET6_SCOPE6_VAR_HH_
+#define _NETINET6_SCOPE6_VAR_HH_
+
+#ifdef _KERNEL
+struct scope6_id {
+ /*
+ * 16 is correspondent to 4bit multicast scope field.
+ * i.e. from node-local to global with some reserved/unassigned types.
+ */
+ u_int32_t s6id_list[16];
+};
+
+void scope6_init __P((void));
+struct scope6_id *scope6_ifattach __P((struct ifnet *));
+void scope6_ifdetach __P((struct scope6_id *));
+int scope6_set __P((struct ifnet *, struct scope6_id *));
+int scope6_get __P((struct ifnet *, struct scope6_id *));
+void scope6_setdefault __P((struct ifnet *));
+int scope6_get_default __P((struct scope6_id *));
+u_int32_t scope6_in6_addrscope __P((struct in6_addr *));
+u_int32_t scope6_addr2default __P((struct in6_addr *));
+int sa6_embedscope __P((struct sockaddr_in6 *, int));
+int sa6_recoverscope __P((struct sockaddr_in6 *));
+int in6_setscope __P((struct in6_addr *, struct ifnet *, u_int32_t *));
+int in6_clearscope __P((struct in6_addr *));
+#endif /* _KERNEL */
+
+#endif /* _NETINET6_SCOPE6_VAR_HH_ */
diff --git a/rtems/freebsd/netinet6/sctp6_usrreq.c b/rtems/freebsd/netinet6/sctp6_usrreq.c
new file mode 100644
index 00000000..bfd24cb6
--- /dev/null
+++ b/rtems/freebsd/netinet6/sctp6_usrreq.c
@@ -0,0 +1,1319 @@
+#include <rtems/freebsd/machine/rtems-bsd-config.h>
+
+/*-
+ * Copyright (c) 2001-2007, by Cisco Systems, Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * a) Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * b) Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the distribution.
+ *
+ * c) Neither the name of Cisco Systems, Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+/* $KAME: sctp6_usrreq.c,v 1.38 2005/08/24 08:08:56 suz Exp $ */
+
+#include <rtems/freebsd/sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <rtems/freebsd/netinet/sctp_os.h>
+#include <rtems/freebsd/sys/proc.h>
+#include <rtems/freebsd/netinet/sctp_pcb.h>
+#include <rtems/freebsd/netinet/sctp_header.h>
+#include <rtems/freebsd/netinet/sctp_var.h>
+#if defined(INET6)
+#include <rtems/freebsd/netinet6/sctp6_var.h>
+#endif
+#include <rtems/freebsd/netinet/sctp_sysctl.h>
+#include <rtems/freebsd/netinet/sctp_output.h>
+#include <rtems/freebsd/netinet/sctp_uio.h>
+#include <rtems/freebsd/netinet/sctp_asconf.h>
+#include <rtems/freebsd/netinet/sctputil.h>
+#include <rtems/freebsd/netinet/sctp_indata.h>
+#include <rtems/freebsd/netinet/sctp_timer.h>
+#include <rtems/freebsd/netinet/sctp_auth.h>
+#include <rtems/freebsd/netinet/sctp_input.h>
+#include <rtems/freebsd/netinet/sctp_output.h>
+#include <rtems/freebsd/netinet/sctp_bsd_addr.h>
+#include <rtems/freebsd/netinet/sctp_crc32.h>
+#include <rtems/freebsd/netinet/udp.h>
+
+#ifdef IPSEC
+#include <rtems/freebsd/netipsec/ipsec.h>
+#if defined(INET6)
+#include <rtems/freebsd/netipsec/ipsec6.h>
+#endif /* INET6 */
+#endif /* IPSEC */
+
+extern struct protosw inetsw[];
+
+int
+sctp6_input(struct mbuf **i_pak, int *offp, int proto)
+{
+ struct mbuf *m;
+ struct ip6_hdr *ip6;
+ struct sctphdr *sh;
+ struct sctp_inpcb *in6p = NULL;
+ struct sctp_nets *net;
+ int refcount_up = 0;
+ uint32_t vrf_id = 0;
+
+#ifdef IPSEC
+ struct inpcb *in6p_ip;
+
+#endif
+ struct sctp_chunkhdr *ch;
+ int length, offset, iphlen;
+ uint8_t ecn_bits;
+ struct sctp_tcb *stcb = NULL;
+ int pkt_len = 0;
+
+#if !defined(SCTP_WITH_NO_CSUM)
+ uint32_t check, calc_check;
+
+#endif
+ int off = *offp;
+ uint16_t port = 0;
+
+ /* get the VRF and table id's */
+ if (SCTP_GET_PKT_VRFID(*i_pak, vrf_id)) {
+ SCTP_RELEASE_PKT(*i_pak);
+ return (-1);
+ }
+ m = SCTP_HEADER_TO_CHAIN(*i_pak);
+ pkt_len = SCTP_HEADER_LEN((*i_pak));
+
+#ifdef SCTP_PACKET_LOGGING
+ sctp_packet_log(m, pkt_len);
+#endif
+ ip6 = mtod(m, struct ip6_hdr *);
+ /* Ensure that (sctphdr + sctp_chunkhdr) in a row. */
+ IP6_EXTHDR_GET(sh, struct sctphdr *, m, off,
+ (int)(sizeof(*sh) + sizeof(*ch)));
+ if (sh == NULL) {
+ SCTP_STAT_INCR(sctps_hdrops);
+ return IPPROTO_DONE;
+ }
+ ch = (struct sctp_chunkhdr *)((caddr_t)sh + sizeof(struct sctphdr));
+ iphlen = off;
+ offset = iphlen + sizeof(*sh) + sizeof(*ch);
+ SCTPDBG(SCTP_DEBUG_INPUT1,
+ "sctp6_input() length:%d iphlen:%d\n", pkt_len, iphlen);
+
+
+#if defined(NFAITH) && NFAITH > 0
+
+ if (faithprefix_p != NULL && (*faithprefix_p) (&ip6->ip6_dst)) {
+ /* XXX send icmp6 host/port unreach? */
+ goto bad;
+ }
+#endif /* NFAITH defined and > 0 */
+ SCTP_STAT_INCR(sctps_recvpackets);
+ SCTP_STAT_INCR_COUNTER64(sctps_inpackets);
+ SCTPDBG(SCTP_DEBUG_INPUT1, "V6 input gets a packet iphlen:%d pktlen:%d\n",
+ iphlen, pkt_len);
+ if (IN6_IS_ADDR_MULTICAST(&ip6->ip6_dst)) {
+ /* No multi-cast support in SCTP */
+ goto bad;
+ }
+ /* destination port of 0 is illegal, based on RFC2960. */
+ if (sh->dest_port == 0)
+ goto bad;
+
+ SCTPDBG(SCTP_DEBUG_CRCOFFLOAD,
+ "sctp_input(): Packet of length %d received on %s with csum_flags 0x%x.\n",
+ m->m_pkthdr.len,
+ if_name(m->m_pkthdr.rcvif),
+ m->m_pkthdr.csum_flags);
+#if defined(SCTP_WITH_NO_CSUM)
+ SCTP_STAT_INCR(sctps_recvnocrc);
+#else
+ if (m->m_pkthdr.csum_flags & CSUM_SCTP_VALID) {
+ SCTP_STAT_INCR(sctps_recvhwcrc);
+ goto sctp_skip_csum;
+ }
+ check = sh->checksum; /* save incoming checksum */
+ if ((check == 0) && (SCTP_BASE_SYSCTL(sctp_no_csum_on_loopback)) &&
+ (IN6_ARE_ADDR_EQUAL(&ip6->ip6_src, &ip6->ip6_dst))) {
+ SCTP_STAT_INCR(sctps_recvnocrc);
+ goto sctp_skip_csum;
+ }
+ sh->checksum = 0; /* prepare for calc */
+ calc_check = sctp_calculate_cksum(m, iphlen);
+ SCTP_STAT_INCR(sctps_recvswcrc);
+ if (calc_check != check) {
+ SCTPDBG(SCTP_DEBUG_INPUT1, "Bad CSUM on SCTP packet calc_check:%x check:%x m:%p phlen:%d\n",
+ calc_check, check, m, iphlen);
+ stcb = sctp_findassociation_addr(m, iphlen, offset - sizeof(*ch),
+ sh, ch, &in6p, &net, vrf_id);
+ if ((net) && (port)) {
+ if (net->port == 0) {
+ sctp_pathmtu_adjustment(in6p, stcb, net, net->mtu - sizeof(struct udphdr));
+ }
+ net->port = port;
+ }
+ /* in6p's ref-count increased && stcb locked */
+ if ((in6p) && (stcb)) {
+ sctp_send_packet_dropped(stcb, net, m, iphlen, 1);
+ sctp_chunk_output((struct sctp_inpcb *)in6p, stcb, SCTP_OUTPUT_FROM_INPUT_ERROR, SCTP_SO_NOT_LOCKED);
+ } else if ((in6p != NULL) && (stcb == NULL)) {
+ refcount_up = 1;
+ }
+ SCTP_STAT_INCR(sctps_badsum);
+ SCTP_STAT_INCR_COUNTER32(sctps_checksumerrors);
+ goto bad;
+ }
+ sh->checksum = calc_check;
+
+sctp_skip_csum:
+#endif
+ net = NULL;
+ /*
+ * Locate pcb and tcb for datagram sctp_findassociation_addr() wants
+ * IP/SCTP/first chunk header...
+ */
+ stcb = sctp_findassociation_addr(m, iphlen, offset - sizeof(*ch),
+ sh, ch, &in6p, &net, vrf_id);
+ if ((net) && (port)) {
+ if (net->port == 0) {
+ sctp_pathmtu_adjustment(in6p, stcb, net, net->mtu - sizeof(struct udphdr));
+ }
+ net->port = port;
+ }
+ /* in6p's ref-count increased */
+ if (in6p == NULL) {
+ struct sctp_init_chunk *init_chk, chunk_buf;
+
+ SCTP_STAT_INCR(sctps_noport);
+ if (ch->chunk_type == SCTP_INITIATION) {
+ /*
+ * we do a trick here to get the INIT tag, dig in
+ * and get the tag from the INIT and put it in the
+ * common header.
+ */
+ init_chk = (struct sctp_init_chunk *)sctp_m_getptr(m,
+ iphlen + sizeof(*sh), sizeof(*init_chk),
+ (uint8_t *) & chunk_buf);
+ if (init_chk)
+ sh->v_tag = init_chk->init.initiate_tag;
+ else
+ sh->v_tag = 0;
+ }
+ if (ch->chunk_type == SCTP_SHUTDOWN_ACK) {
+ sctp_send_shutdown_complete2(m, iphlen, sh, vrf_id, port);
+ goto bad;
+ }
+ if (ch->chunk_type == SCTP_SHUTDOWN_COMPLETE) {
+ goto bad;
+ }
+ if (ch->chunk_type != SCTP_ABORT_ASSOCIATION)
+ sctp_send_abort(m, iphlen, sh, 0, NULL, vrf_id, port);
+ goto bad;
+ } else if (stcb == NULL) {
+ refcount_up = 1;
+ }
+#ifdef IPSEC
+ /*
+ * Check AH/ESP integrity.
+ */
+ in6p_ip = (struct inpcb *)in6p;
+ if (in6p_ip && (ipsec6_in_reject(m, in6p_ip))) {
+/* XXX */
+ MODULE_GLOBAL(ipsec6stat).in_polvio++;
+ goto bad;
+ }
+#endif /* IPSEC */
+
+ /*
+ * CONTROL chunk processing
+ */
+ offset -= sizeof(*ch);
+ ecn_bits = ((ntohl(ip6->ip6_flow) >> 20) & 0x000000ff);
+
+ /* Length now holds the total packet length payload + iphlen */
+ length = ntohs(ip6->ip6_plen) + iphlen;
+
+ /* sa_ignore NO_NULL_CHK */
+ sctp_common_input_processing(&m, iphlen, offset, length, sh, ch,
+ in6p, stcb, net, ecn_bits, vrf_id, port);
+ /* inp's ref-count reduced && stcb unlocked */
+ /* XXX this stuff below gets moved to appropriate parts later... */
+ if (m)
+ sctp_m_freem(m);
+ if ((in6p) && refcount_up) {
+ /* reduce ref-count */
+ SCTP_INP_WLOCK(in6p);
+ SCTP_INP_DECR_REF(in6p);
+ SCTP_INP_WUNLOCK(in6p);
+ }
+ return IPPROTO_DONE;
+
+bad:
+ if (stcb) {
+ SCTP_TCB_UNLOCK(stcb);
+ }
+ if ((in6p) && refcount_up) {
+ /* reduce ref-count */
+ SCTP_INP_WLOCK(in6p);
+ SCTP_INP_DECR_REF(in6p);
+ SCTP_INP_WUNLOCK(in6p);
+ }
+ if (m)
+ sctp_m_freem(m);
+ return IPPROTO_DONE;
+}
+
+
+static void
+sctp6_notify_mbuf(struct sctp_inpcb *inp, struct icmp6_hdr *icmp6,
+ struct sctphdr *sh, struct sctp_tcb *stcb, struct sctp_nets *net)
+{
+ uint32_t nxtsz;
+
+ if ((inp == NULL) || (stcb == NULL) || (net == NULL) ||
+ (icmp6 == NULL) || (sh == NULL)) {
+ goto out;
+ }
+ /* First do we even look at it? */
+ if (ntohl(sh->v_tag) != (stcb->asoc.peer_vtag))
+ goto out;
+
+ if (icmp6->icmp6_type != ICMP6_PACKET_TOO_BIG) {
+ /* not PACKET TO BIG */
+ goto out;
+ }
+ /*
+ * ok we need to look closely. We could even get smarter and look at
+ * anyone that we sent to in case we get a different ICMP that tells
+ * us there is no way to reach a host, but for this impl, all we
+ * care about is MTU discovery.
+ */
+ nxtsz = ntohl(icmp6->icmp6_mtu);
+ /* Stop any PMTU timer */
+ sctp_timer_stop(SCTP_TIMER_TYPE_PATHMTURAISE, inp, stcb, NULL, SCTP_FROM_SCTP6_USRREQ + SCTP_LOC_1);
+
+ /* Adjust destination size limit */
+ if (net->mtu > nxtsz) {
+ net->mtu = nxtsz;
+ if (net->port) {
+ net->mtu -= sizeof(struct udphdr);
+ }
+ }
+ /* now what about the ep? */
+ if (stcb->asoc.smallest_mtu > nxtsz) {
+ struct sctp_tmit_chunk *chk;
+
+ /* Adjust that too */
+ stcb->asoc.smallest_mtu = nxtsz;
+ /* now off to subtract IP_DF flag if needed */
+
+ TAILQ_FOREACH(chk, &stcb->asoc.send_queue, sctp_next) {
+ if ((uint32_t) (chk->send_size + IP_HDR_SIZE) > nxtsz) {
+ chk->flags |= CHUNK_FLAGS_FRAGMENT_OK;
+ }
+ }
+ TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) {
+ if ((uint32_t) (chk->send_size + IP_HDR_SIZE) > nxtsz) {
+ /*
+ * For this guy we also mark for immediate
+ * resend since we sent to big of chunk
+ */
+ chk->flags |= CHUNK_FLAGS_FRAGMENT_OK;
+ if (chk->sent != SCTP_DATAGRAM_RESEND)
+ stcb->asoc.sent_queue_retran_cnt++;
+ chk->sent = SCTP_DATAGRAM_RESEND;
+ chk->rec.data.doing_fast_retransmit = 0;
+
+ chk->sent = SCTP_DATAGRAM_RESEND;
+ /* Clear any time so NO RTT is being done */
+ chk->sent_rcv_time.tv_sec = 0;
+ chk->sent_rcv_time.tv_usec = 0;
+ stcb->asoc.total_flight -= chk->send_size;
+ net->flight_size -= chk->send_size;
+ }
+ }
+ }
+ sctp_timer_start(SCTP_TIMER_TYPE_PATHMTURAISE, inp, stcb, NULL);
+out:
+ if (stcb) {
+ SCTP_TCB_UNLOCK(stcb);
+ }
+}
+
+
+void
+sctp6_notify(struct sctp_inpcb *inp,
+ struct icmp6_hdr *icmph,
+ struct sctphdr *sh,
+ struct sockaddr *to,
+ struct sctp_tcb *stcb,
+ struct sctp_nets *net)
+{
+#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
+ struct socket *so;
+
+#endif
+ /* protection */
+ int reason;
+
+
+ if ((inp == NULL) || (stcb == NULL) || (net == NULL) ||
+ (sh == NULL) || (to == NULL)) {
+ if (stcb)
+ SCTP_TCB_UNLOCK(stcb);
+ return;
+ }
+ /* First job is to verify the vtag matches what I would send */
+ if (ntohl(sh->v_tag) != (stcb->asoc.peer_vtag)) {
+ SCTP_TCB_UNLOCK(stcb);
+ return;
+ }
+ if (icmph->icmp6_type != ICMP_UNREACH) {
+ /* We only care about unreachable */
+ SCTP_TCB_UNLOCK(stcb);
+ return;
+ }
+ if ((icmph->icmp6_code == ICMP_UNREACH_NET) ||
+ (icmph->icmp6_code == ICMP_UNREACH_HOST) ||
+ (icmph->icmp6_code == ICMP_UNREACH_NET_UNKNOWN) ||
+ (icmph->icmp6_code == ICMP_UNREACH_HOST_UNKNOWN) ||
+ (icmph->icmp6_code == ICMP_UNREACH_ISOLATED) ||
+ (icmph->icmp6_code == ICMP_UNREACH_NET_PROHIB) ||
+ (icmph->icmp6_code == ICMP_UNREACH_HOST_PROHIB) ||
+ (icmph->icmp6_code == ICMP_UNREACH_FILTER_PROHIB)) {
+
+ /*
+ * Hmm reachablity problems we must examine closely. If its
+ * not reachable, we may have lost a network. Or if there is
+ * NO protocol at the other end named SCTP. well we consider
+ * it a OOTB abort.
+ */
+ if (net->dest_state & SCTP_ADDR_REACHABLE) {
+ /* Ok that destination is NOT reachable */
+ SCTP_PRINTF("ICMP (thresh %d/%d) takes interface %p down\n",
+ net->error_count,
+ net->failure_threshold,
+ net);
+
+ net->dest_state &= ~SCTP_ADDR_REACHABLE;
+ net->dest_state |= SCTP_ADDR_NOT_REACHABLE;
+ /*
+ * JRS 5/14/07 - If a destination is unreachable,
+ * the PF bit is turned off. This allows an
+ * unambiguous use of the PF bit for destinations
+ * that are reachable but potentially failed. If the
+ * destination is set to the unreachable state, also
+ * set the destination to the PF state.
+ */
+ /*
+ * Add debug message here if destination is not in
+ * PF state.
+ */
+ /* Stop any running T3 timers here? */
+ if ((stcb->asoc.sctp_cmt_on_off == 1) &&
+ (stcb->asoc.sctp_cmt_pf > 0)) {
+ net->dest_state &= ~SCTP_ADDR_PF;
+ SCTPDBG(SCTP_DEBUG_TIMER4, "Destination %p moved from PF to unreachable.\n",
+ net);
+ }
+ net->error_count = net->failure_threshold + 1;
+ sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_DOWN,
+ stcb, SCTP_FAILED_THRESHOLD,
+ (void *)net, SCTP_SO_NOT_LOCKED);
+ }
+ SCTP_TCB_UNLOCK(stcb);
+ } else if ((icmph->icmp6_code == ICMP_UNREACH_PROTOCOL) ||
+ (icmph->icmp6_code == ICMP_UNREACH_PORT)) {
+ /*
+ * Here the peer is either playing tricks on us, including
+ * an address that belongs to someone who does not support
+ * SCTP OR was a userland implementation that shutdown and
+ * now is dead. In either case treat it like a OOTB abort
+ * with no TCB
+ */
+ reason = SCTP_PEER_FAULTY;
+ sctp_abort_notification(stcb, reason, SCTP_SO_NOT_LOCKED);
+#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
+ so = SCTP_INP_SO(inp);
+ atomic_add_int(&stcb->asoc.refcnt, 1);
+ SCTP_TCB_UNLOCK(stcb);
+ SCTP_SOCKET_LOCK(so, 1);
+ SCTP_TCB_LOCK(stcb);
+ atomic_subtract_int(&stcb->asoc.refcnt, 1);
+#endif
+ (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_USRREQ + SCTP_LOC_2);
+#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
+ SCTP_SOCKET_UNLOCK(so, 1);
+ /* SCTP_TCB_UNLOCK(stcb); MT: I think this is not needed. */
+#endif
+ /* no need to unlock here, since the TCB is gone */
+ } else {
+ SCTP_TCB_UNLOCK(stcb);
+ }
+}
+
+
+
+void
+sctp6_ctlinput(int cmd, struct sockaddr *pktdst, void *d)
+{
+ struct sctphdr sh;
+ struct ip6ctlparam *ip6cp = NULL;
+ uint32_t vrf_id;
+
+ vrf_id = SCTP_DEFAULT_VRFID;
+
+ if (pktdst->sa_family != AF_INET6 ||
+ pktdst->sa_len != sizeof(struct sockaddr_in6))
+ return;
+
+ if ((unsigned)cmd >= PRC_NCMDS)
+ return;
+ if (PRC_IS_REDIRECT(cmd)) {
+ d = NULL;
+ } else if (inet6ctlerrmap[cmd] == 0) {
+ return;
+ }
+ /* if the parameter is from icmp6, decode it. */
+ if (d != NULL) {
+ ip6cp = (struct ip6ctlparam *)d;
+ } else {
+ ip6cp = (struct ip6ctlparam *)NULL;
+ }
+
+ if (ip6cp) {
+ /*
+ * XXX: We assume that when IPV6 is non NULL, M and OFF are
+ * valid.
+ */
+ /* check if we can safely examine src and dst ports */
+ struct sctp_inpcb *inp = NULL;
+ struct sctp_tcb *stcb = NULL;
+ struct sctp_nets *net = NULL;
+ struct sockaddr_in6 final;
+
+ if (ip6cp->ip6c_m == NULL)
+ return;
+
+ bzero(&sh, sizeof(sh));
+ bzero(&final, sizeof(final));
+ inp = NULL;
+ net = NULL;
+ m_copydata(ip6cp->ip6c_m, ip6cp->ip6c_off, sizeof(sh),
+ (caddr_t)&sh);
+ ip6cp->ip6c_src->sin6_port = sh.src_port;
+ final.sin6_len = sizeof(final);
+ final.sin6_family = AF_INET6;
+ final.sin6_addr = ((struct sockaddr_in6 *)pktdst)->sin6_addr;
+ final.sin6_port = sh.dest_port;
+ stcb = sctp_findassociation_addr_sa((struct sockaddr *)ip6cp->ip6c_src,
+ (struct sockaddr *)&final,
+ &inp, &net, 1, vrf_id);
+ /* inp's ref-count increased && stcb locked */
+ if (stcb != NULL && inp && (inp->sctp_socket != NULL)) {
+ if (cmd == PRC_MSGSIZE) {
+ sctp6_notify_mbuf(inp,
+ ip6cp->ip6c_icmp6,
+ &sh,
+ stcb,
+ net);
+ /* inp's ref-count reduced && stcb unlocked */
+ } else {
+ sctp6_notify(inp, ip6cp->ip6c_icmp6, &sh,
+ (struct sockaddr *)&final,
+ stcb, net);
+ /* inp's ref-count reduced && stcb unlocked */
+ }
+ } else {
+ if (PRC_IS_REDIRECT(cmd) && inp) {
+ in6_rtchange((struct in6pcb *)inp,
+ inet6ctlerrmap[cmd]);
+ }
+ if (inp) {
+ /* reduce inp's ref-count */
+ SCTP_INP_WLOCK(inp);
+ SCTP_INP_DECR_REF(inp);
+ SCTP_INP_WUNLOCK(inp);
+ }
+ if (stcb)
+ SCTP_TCB_UNLOCK(stcb);
+ }
+ }
+}
+
+/*
+ * this routine can probably be collasped into the one in sctp_userreq.c
+ * since they do the same thing and now we lookup with a sockaddr
+ */
+static int
+sctp6_getcred(SYSCTL_HANDLER_ARGS)
+{
+ struct xucred xuc;
+ struct sockaddr_in6 addrs[2];
+ struct sctp_inpcb *inp;
+ struct sctp_nets *net;
+ struct sctp_tcb *stcb;
+ int error;
+ uint32_t vrf_id;
+
+ vrf_id = SCTP_DEFAULT_VRFID;
+
+ error = priv_check(req->td, PRIV_NETINET_GETCRED);
+ if (error)
+ return (error);
+
+ if (req->newlen != sizeof(addrs)) {
+ SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTP6_USRREQ, EINVAL);
+ return (EINVAL);
+ }
+ if (req->oldlen != sizeof(struct ucred)) {
+ SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTP6_USRREQ, EINVAL);
+ return (EINVAL);
+ }
+ error = SYSCTL_IN(req, addrs, sizeof(addrs));
+ if (error)
+ return (error);
+
+ stcb = sctp_findassociation_addr_sa(sin6tosa(&addrs[0]),
+ sin6tosa(&addrs[1]),
+ &inp, &net, 1, vrf_id);
+ if (stcb == NULL || inp == NULL || inp->sctp_socket == NULL) {
+ if ((inp != NULL) && (stcb == NULL)) {
+ /* reduce ref-count */
+ SCTP_INP_WLOCK(inp);
+ SCTP_INP_DECR_REF(inp);
+ goto cred_can_cont;
+ }
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP6_USRREQ, ENOENT);
+ error = ENOENT;
+ goto out;
+ }
+ SCTP_TCB_UNLOCK(stcb);
+ /*
+ * We use the write lock here, only since in the error leg we need
+ * it. If we used RLOCK, then we would have to
+ * wlock/decr/unlock/rlock. Which in theory could create a hole.
+ * Better to use higher wlock.
+ */
+ SCTP_INP_WLOCK(inp);
+cred_can_cont:
+ error = cr_canseesocket(req->td->td_ucred, inp->sctp_socket);
+ if (error) {
+ SCTP_INP_WUNLOCK(inp);
+ goto out;
+ }
+ cru2x(inp->sctp_socket->so_cred, &xuc);
+ SCTP_INP_WUNLOCK(inp);
+ error = SYSCTL_OUT(req, &xuc, sizeof(struct xucred));
+out:
+ return (error);
+}
+
+SYSCTL_PROC(_net_inet6_sctp6, OID_AUTO, getcred, CTLTYPE_OPAQUE | CTLFLAG_RW,
+ 0, 0,
+ sctp6_getcred, "S,ucred", "Get the ucred of a SCTP6 connection");
+
+
+/* This is the same as the sctp_abort() could be made common */
+static void
+sctp6_abort(struct socket *so)
+{
+ struct sctp_inpcb *inp;
+ uint32_t flags;
+
+ inp = (struct sctp_inpcb *)so->so_pcb;
+ if (inp == 0) {
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP6_USRREQ, EINVAL);
+ return;
+ }
+sctp_must_try_again:
+ flags = inp->sctp_flags;
+#ifdef SCTP_LOG_CLOSING
+ sctp_log_closing(inp, NULL, 17);
+#endif
+ if (((flags & SCTP_PCB_FLAGS_SOCKET_GONE) == 0) &&
+ (atomic_cmpset_int(&inp->sctp_flags, flags, (flags | SCTP_PCB_FLAGS_SOCKET_GONE | SCTP_PCB_FLAGS_CLOSE_IP)))) {
+#ifdef SCTP_LOG_CLOSING
+ sctp_log_closing(inp, NULL, 16);
+#endif
+ sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT,
+ SCTP_CALLED_AFTER_CMPSET_OFCLOSE);
+ SOCK_LOCK(so);
+ SCTP_SB_CLEAR(so->so_snd);
+ /*
+ * same for the rcv ones, they are only here for the
+ * accounting/select.
+ */
+ SCTP_SB_CLEAR(so->so_rcv);
+ /* Now null out the reference, we are completely detached. */
+ so->so_pcb = NULL;
+ SOCK_UNLOCK(so);
+ } else {
+ flags = inp->sctp_flags;
+ if ((flags & SCTP_PCB_FLAGS_SOCKET_GONE) == 0) {
+ goto sctp_must_try_again;
+ }
+ }
+ return;
+}
+
+static int
+sctp6_attach(struct socket *so, int proto, struct thread *p)
+{
+ struct in6pcb *inp6;
+ int error;
+ struct sctp_inpcb *inp;
+ uint32_t vrf_id = SCTP_DEFAULT_VRFID;
+
+ inp = (struct sctp_inpcb *)so->so_pcb;
+ if (inp != NULL) {
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP6_USRREQ, EINVAL);
+ return EINVAL;
+ }
+ if (so->so_snd.sb_hiwat == 0 || so->so_rcv.sb_hiwat == 0) {
+ error = SCTP_SORESERVE(so, SCTP_BASE_SYSCTL(sctp_sendspace), SCTP_BASE_SYSCTL(sctp_recvspace));
+ if (error)
+ return error;
+ }
+ error = sctp_inpcb_alloc(so, vrf_id);
+ if (error)
+ return error;
+ inp = (struct sctp_inpcb *)so->so_pcb;
+ SCTP_INP_WLOCK(inp);
+ inp->sctp_flags |= SCTP_PCB_FLAGS_BOUND_V6; /* I'm v6! */
+ inp6 = (struct in6pcb *)inp;
+
+ inp6->inp_vflag |= INP_IPV6;
+ inp6->in6p_hops = -1; /* use kernel default */
+ inp6->in6p_cksum = -1; /* just to be sure */
+#ifdef INET
+ /*
+ * XXX: ugly!! IPv4 TTL initialization is necessary for an IPv6
+ * socket as well, because the socket may be bound to an IPv6
+ * wildcard address, which may match an IPv4-mapped IPv6 address.
+ */
+ inp6->inp_ip_ttl = MODULE_GLOBAL(ip_defttl);
+#endif
+ /*
+ * Hmm what about the IPSEC stuff that is missing here but in
+ * sctp_attach()?
+ */
+ SCTP_INP_WUNLOCK(inp);
+ return 0;
+}
+
+static int
+sctp6_bind(struct socket *so, struct sockaddr *addr, struct thread *p)
+{
+ struct sctp_inpcb *inp;
+ struct in6pcb *inp6;
+ int error;
+
+ inp = (struct sctp_inpcb *)so->so_pcb;
+ if (inp == 0) {
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP6_USRREQ, EINVAL);
+ return EINVAL;
+ }
+ if (addr) {
+ if ((addr->sa_family == AF_INET6) &&
+ (addr->sa_len != sizeof(struct sockaddr_in6))) {
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP6_USRREQ, EINVAL);
+ return EINVAL;
+ }
+ if ((addr->sa_family == AF_INET) &&
+ (addr->sa_len != sizeof(struct sockaddr_in))) {
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP6_USRREQ, EINVAL);
+ return EINVAL;
+ }
+ }
+ inp6 = (struct in6pcb *)inp;
+ inp6->inp_vflag &= ~INP_IPV4;
+ inp6->inp_vflag |= INP_IPV6;
+ if ((addr != NULL) && (SCTP_IPV6_V6ONLY(inp6) == 0)) {
+ if (addr->sa_family == AF_INET) {
+ /* binding v4 addr to v6 socket, so reset flags */
+ inp6->inp_vflag |= INP_IPV4;
+ inp6->inp_vflag &= ~INP_IPV6;
+ } else {
+ struct sockaddr_in6 *sin6_p;
+
+ sin6_p = (struct sockaddr_in6 *)addr;
+
+ if (IN6_IS_ADDR_UNSPECIFIED(&sin6_p->sin6_addr)) {
+ inp6->inp_vflag |= INP_IPV4;
+ } else if (IN6_IS_ADDR_V4MAPPED(&sin6_p->sin6_addr)) {
+ struct sockaddr_in sin;
+
+ in6_sin6_2_sin(&sin, sin6_p);
+ inp6->inp_vflag |= INP_IPV4;
+ inp6->inp_vflag &= ~INP_IPV6;
+ error = sctp_inpcb_bind(so, (struct sockaddr *)&sin, NULL, p);
+ return error;
+ }
+ }
+ } else if (addr != NULL) {
+ /* IPV6_V6ONLY socket */
+ if (addr->sa_family == AF_INET) {
+ /* can't bind v4 addr to v6 only socket! */
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP6_USRREQ, EINVAL);
+ return EINVAL;
+ } else {
+ struct sockaddr_in6 *sin6_p;
+
+ sin6_p = (struct sockaddr_in6 *)addr;
+
+ if (IN6_IS_ADDR_V4MAPPED(&sin6_p->sin6_addr)) {
+ /* can't bind v4-mapped addrs either! */
+ /* NOTE: we don't support SIIT */
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP6_USRREQ, EINVAL);
+ return EINVAL;
+ }
+ }
+ }
+ error = sctp_inpcb_bind(so, addr, NULL, p);
+ return error;
+}
+
+
+static void
+sctp6_close(struct socket *so)
+{
+ sctp_close(so);
+}
+
+/* This could be made common with sctp_detach() since they are identical */
+
+static
+int
+sctp6_disconnect(struct socket *so)
+{
+ return (sctp_disconnect(so));
+}
+
+
+int
+sctp_sendm(struct socket *so, int flags, struct mbuf *m, struct sockaddr *addr,
+ struct mbuf *control, struct thread *p);
+
+
+static int
+sctp6_send(struct socket *so, int flags, struct mbuf *m, struct sockaddr *addr,
+ struct mbuf *control, struct thread *p)
+{
+ struct sctp_inpcb *inp;
+ struct in6pcb *inp6;
+
+#ifdef INET
+ struct sockaddr_in6 *sin6;
+
+#endif /* INET */
+ /* No SPL needed since sctp_output does this */
+
+ inp = (struct sctp_inpcb *)so->so_pcb;
+ if (inp == NULL) {
+ if (control) {
+ SCTP_RELEASE_PKT(control);
+ control = NULL;
+ }
+ SCTP_RELEASE_PKT(m);
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP6_USRREQ, EINVAL);
+ return EINVAL;
+ }
+ inp6 = (struct in6pcb *)inp;
+ /*
+ * For the TCP model we may get a NULL addr, if we are a connected
+ * socket thats ok.
+ */
+ if ((inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) &&
+ (addr == NULL)) {
+ goto connected_type;
+ }
+ if (addr == NULL) {
+ SCTP_RELEASE_PKT(m);
+ if (control) {
+ SCTP_RELEASE_PKT(control);
+ control = NULL;
+ }
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP6_USRREQ, EDESTADDRREQ);
+ return (EDESTADDRREQ);
+ }
+#ifdef INET
+ sin6 = (struct sockaddr_in6 *)addr;
+ if (SCTP_IPV6_V6ONLY(inp6)) {
+ /*
+ * if IPV6_V6ONLY flag, we discard datagrams destined to a
+ * v4 addr or v4-mapped addr
+ */
+ if (addr->sa_family == AF_INET) {
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP6_USRREQ, EINVAL);
+ return EINVAL;
+ }
+ if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP6_USRREQ, EINVAL);
+ return EINVAL;
+ }
+ }
+ if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
+ if (!MODULE_GLOBAL(ip6_v6only)) {
+ struct sockaddr_in sin;
+
+ /* convert v4-mapped into v4 addr and send */
+ in6_sin6_2_sin(&sin, sin6);
+ return sctp_sendm(so, flags, m, (struct sockaddr *)&sin,
+ control, p);
+ } else {
+ /* mapped addresses aren't enabled */
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP6_USRREQ, EINVAL);
+ return EINVAL;
+ }
+ }
+#endif /* INET */
+connected_type:
+ /* now what about control */
+ if (control) {
+ if (inp->control) {
+ SCTP_PRINTF("huh? control set?\n");
+ SCTP_RELEASE_PKT(inp->control);
+ inp->control = NULL;
+ }
+ inp->control = control;
+ }
+ /* Place the data */
+ if (inp->pkt) {
+ SCTP_BUF_NEXT(inp->pkt_last) = m;
+ inp->pkt_last = m;
+ } else {
+ inp->pkt_last = inp->pkt = m;
+ }
+ if (
+ /* FreeBSD and MacOSX uses a flag passed */
+ ((flags & PRUS_MORETOCOME) == 0)
+ ) {
+ /*
+ * note with the current version this code will only be used
+ * by OpenBSD, NetBSD and FreeBSD have methods for
+ * re-defining sosend() to use sctp_sosend(). One can
+ * optionaly switch back to this code (by changing back the
+ * defininitions but this is not advisable.
+ */
+ int ret;
+
+ ret = sctp_output(inp, inp->pkt, addr, inp->control, p, flags);
+ inp->pkt = NULL;
+ inp->control = NULL;
+ return (ret);
+ } else {
+ return (0);
+ }
+}
+
+static int
+sctp6_connect(struct socket *so, struct sockaddr *addr, struct thread *p)
+{
+ uint32_t vrf_id;
+ int error = 0;
+ struct sctp_inpcb *inp;
+ struct in6pcb *inp6;
+ struct sctp_tcb *stcb;
+
+#ifdef INET
+ struct sockaddr_in6 *sin6;
+ struct sockaddr_storage ss;
+
+#endif /* INET */
+
+ inp6 = (struct in6pcb *)so->so_pcb;
+ inp = (struct sctp_inpcb *)so->so_pcb;
+ if (inp == 0) {
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP6_USRREQ, ECONNRESET);
+ return (ECONNRESET); /* I made the same as TCP since we are
+ * not setup? */
+ }
+ if (addr == NULL) {
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP6_USRREQ, EINVAL);
+ return (EINVAL);
+ }
+ if ((addr->sa_family == AF_INET6) && (addr->sa_len != sizeof(struct sockaddr_in6))) {
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP6_USRREQ, EINVAL);
+ return (EINVAL);
+ }
+ if ((addr->sa_family == AF_INET) && (addr->sa_len != sizeof(struct sockaddr_in))) {
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP6_USRREQ, EINVAL);
+ return (EINVAL);
+ }
+ vrf_id = inp->def_vrf_id;
+ SCTP_ASOC_CREATE_LOCK(inp);
+ SCTP_INP_RLOCK(inp);
+ if ((inp->sctp_flags & SCTP_PCB_FLAGS_UNBOUND) ==
+ SCTP_PCB_FLAGS_UNBOUND) {
+ /* Bind a ephemeral port */
+ SCTP_INP_RUNLOCK(inp);
+ error = sctp6_bind(so, NULL, p);
+ if (error) {
+ SCTP_ASOC_CREATE_UNLOCK(inp);
+
+ return (error);
+ }
+ SCTP_INP_RLOCK(inp);
+ }
+ if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) &&
+ (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED)) {
+ /* We are already connected AND the TCP model */
+ SCTP_INP_RUNLOCK(inp);
+ SCTP_ASOC_CREATE_UNLOCK(inp);
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP6_USRREQ, EADDRINUSE);
+ return (EADDRINUSE);
+ }
+#ifdef INET
+ sin6 = (struct sockaddr_in6 *)addr;
+ if (SCTP_IPV6_V6ONLY(inp6)) {
+ /*
+ * if IPV6_V6ONLY flag, ignore connections destined to a v4
+ * addr or v4-mapped addr
+ */
+ if (addr->sa_family == AF_INET) {
+ SCTP_INP_RUNLOCK(inp);
+ SCTP_ASOC_CREATE_UNLOCK(inp);
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP6_USRREQ, EINVAL);
+ return EINVAL;
+ }
+ if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
+ SCTP_INP_RUNLOCK(inp);
+ SCTP_ASOC_CREATE_UNLOCK(inp);
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP6_USRREQ, EINVAL);
+ return EINVAL;
+ }
+ }
+ if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
+ if (!MODULE_GLOBAL(ip6_v6only)) {
+ /* convert v4-mapped into v4 addr */
+ in6_sin6_2_sin((struct sockaddr_in *)&ss, sin6);
+ addr = (struct sockaddr *)&ss;
+ } else {
+ /* mapped addresses aren't enabled */
+ SCTP_INP_RUNLOCK(inp);
+ SCTP_ASOC_CREATE_UNLOCK(inp);
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP6_USRREQ, EINVAL);
+ return EINVAL;
+ }
+ } else
+#endif /* INET */
+ addr = addr; /* for true v6 address case */
+
+ /* Now do we connect? */
+ if (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) {
+ stcb = LIST_FIRST(&inp->sctp_asoc_list);
+ if (stcb) {
+ SCTP_TCB_UNLOCK(stcb);
+ }
+ SCTP_INP_RUNLOCK(inp);
+ } else {
+ SCTP_INP_RUNLOCK(inp);
+ SCTP_INP_WLOCK(inp);
+ SCTP_INP_INCR_REF(inp);
+ SCTP_INP_WUNLOCK(inp);
+ stcb = sctp_findassociation_ep_addr(&inp, addr, NULL, NULL, NULL);
+ if (stcb == NULL) {
+ SCTP_INP_WLOCK(inp);
+ SCTP_INP_DECR_REF(inp);
+ SCTP_INP_WUNLOCK(inp);
+ }
+ }
+
+ if (stcb != NULL) {
+ /* Already have or am bring up an association */
+ SCTP_ASOC_CREATE_UNLOCK(inp);
+ SCTP_TCB_UNLOCK(stcb);
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP6_USRREQ, EALREADY);
+ return (EALREADY);
+ }
+ /* We are GOOD to go */
+ stcb = sctp_aloc_assoc(inp, addr, &error, 0, vrf_id, p);
+ SCTP_ASOC_CREATE_UNLOCK(inp);
+ if (stcb == NULL) {
+ /* Gak! no memory */
+ return (error);
+ }
+ if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) {
+ stcb->sctp_ep->sctp_flags |= SCTP_PCB_FLAGS_CONNECTED;
+ /* Set the connected flag so we can queue data */
+ soisconnecting(so);
+ }
+ stcb->asoc.state = SCTP_STATE_COOKIE_WAIT;
+ (void)SCTP_GETTIME_TIMEVAL(&stcb->asoc.time_entered);
+
+ /* initialize authentication parameters for the assoc */
+ sctp_initialize_auth_params(inp, stcb);
+
+ sctp_send_initiate(inp, stcb, SCTP_SO_LOCKED);
+ SCTP_TCB_UNLOCK(stcb);
+ return error;
+}
+
+static int
+sctp6_getaddr(struct socket *so, struct sockaddr **addr)
+{
+ struct sockaddr_in6 *sin6;
+ struct sctp_inpcb *inp;
+ uint32_t vrf_id;
+ struct sctp_ifa *sctp_ifa;
+
+ int error;
+
+ /*
+ * Do the malloc first in case it blocks.
+ */
+ SCTP_MALLOC_SONAME(sin6, struct sockaddr_in6 *, sizeof *sin6);
+ if (sin6 == NULL)
+ return ENOMEM;
+ sin6->sin6_family = AF_INET6;
+ sin6->sin6_len = sizeof(*sin6);
+
+ inp = (struct sctp_inpcb *)so->so_pcb;
+ if (inp == NULL) {
+ SCTP_FREE_SONAME(sin6);
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP6_USRREQ, ECONNRESET);
+ return ECONNRESET;
+ }
+ SCTP_INP_RLOCK(inp);
+ sin6->sin6_port = inp->sctp_lport;
+ if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
+ /* For the bound all case you get back 0 */
+ if (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) {
+ struct sctp_tcb *stcb;
+ struct sockaddr_in6 *sin_a6;
+ struct sctp_nets *net;
+ int fnd;
+
+ stcb = LIST_FIRST(&inp->sctp_asoc_list);
+ if (stcb == NULL) {
+ goto notConn6;
+ }
+ fnd = 0;
+ sin_a6 = NULL;
+ TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) {
+ sin_a6 = (struct sockaddr_in6 *)&net->ro._l_addr;
+ if (sin_a6 == NULL)
+ /* this will make coverity happy */
+ continue;
+
+ if (sin_a6->sin6_family == AF_INET6) {
+ fnd = 1;
+ break;
+ }
+ }
+ if ((!fnd) || (sin_a6 == NULL)) {
+ /* punt */
+ goto notConn6;
+ }
+ vrf_id = inp->def_vrf_id;
+ sctp_ifa = sctp_source_address_selection(inp, stcb, (sctp_route_t *) & net->ro, net, 0, vrf_id);
+ if (sctp_ifa) {
+ sin6->sin6_addr = sctp_ifa->address.sin6.sin6_addr;
+ }
+ } else {
+ /* For the bound all case you get back 0 */
+ notConn6:
+ memset(&sin6->sin6_addr, 0, sizeof(sin6->sin6_addr));
+ }
+ } else {
+ /* Take the first IPv6 address in the list */
+ struct sctp_laddr *laddr;
+ int fnd = 0;
+
+ LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) {
+ if (laddr->ifa->address.sa.sa_family == AF_INET6) {
+ struct sockaddr_in6 *sin_a;
+
+ sin_a = (struct sockaddr_in6 *)&laddr->ifa->address.sin6;
+ sin6->sin6_addr = sin_a->sin6_addr;
+ fnd = 1;
+ break;
+ }
+ }
+ if (!fnd) {
+ SCTP_FREE_SONAME(sin6);
+ SCTP_INP_RUNLOCK(inp);
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP6_USRREQ, ENOENT);
+ return ENOENT;
+ }
+ }
+ SCTP_INP_RUNLOCK(inp);
+ /* Scoping things for v6 */
+ if ((error = sa6_recoverscope(sin6)) != 0) {
+ SCTP_FREE_SONAME(sin6);
+ return (error);
+ }
+ (*addr) = (struct sockaddr *)sin6;
+ return (0);
+}
+
+static int
+sctp6_peeraddr(struct socket *so, struct sockaddr **addr)
+{
+ struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)*addr;
+ int fnd;
+ struct sockaddr_in6 *sin_a6;
+ struct sctp_inpcb *inp;
+ struct sctp_tcb *stcb;
+ struct sctp_nets *net;
+
+ int error;
+
+ /*
+ * Do the malloc first in case it blocks.
+ */
+ inp = (struct sctp_inpcb *)so->so_pcb;
+ if ((inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) == 0) {
+ /* UDP type and listeners will drop out here */
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP6_USRREQ, ENOTCONN);
+ return (ENOTCONN);
+ }
+ SCTP_MALLOC_SONAME(sin6, struct sockaddr_in6 *, sizeof *sin6);
+ if (sin6 == NULL)
+ return (ENOMEM);
+ sin6->sin6_family = AF_INET6;
+ sin6->sin6_len = sizeof(*sin6);
+
+ /* We must recapture incase we blocked */
+ inp = (struct sctp_inpcb *)so->so_pcb;
+ if (inp == NULL) {
+ SCTP_FREE_SONAME(sin6);
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP6_USRREQ, ECONNRESET);
+ return ECONNRESET;
+ }
+ SCTP_INP_RLOCK(inp);
+ stcb = LIST_FIRST(&inp->sctp_asoc_list);
+ if (stcb) {
+ SCTP_TCB_LOCK(stcb);
+ }
+ SCTP_INP_RUNLOCK(inp);
+ if (stcb == NULL) {
+ SCTP_FREE_SONAME(sin6);
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP6_USRREQ, ECONNRESET);
+ return ECONNRESET;
+ }
+ fnd = 0;
+ TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) {
+ sin_a6 = (struct sockaddr_in6 *)&net->ro._l_addr;
+ if (sin_a6->sin6_family == AF_INET6) {
+ fnd = 1;
+ sin6->sin6_port = stcb->rport;
+ sin6->sin6_addr = sin_a6->sin6_addr;
+ break;
+ }
+ }
+ SCTP_TCB_UNLOCK(stcb);
+ if (!fnd) {
+ /* No IPv4 address */
+ SCTP_FREE_SONAME(sin6);
+ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP6_USRREQ, ENOENT);
+ return ENOENT;
+ }
+ if ((error = sa6_recoverscope(sin6)) != 0)
+ return (error);
+ *addr = (struct sockaddr *)sin6;
+ return (0);
+}
+
+static int
+sctp6_in6getaddr(struct socket *so, struct sockaddr **nam)
+{
+ struct sockaddr *addr;
+ struct in6pcb *inp6 = sotoin6pcb(so);
+ int error;
+
+ if (inp6 == NULL) {
+ SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTP6_USRREQ, EINVAL);
+ return EINVAL;
+ }
+ /* allow v6 addresses precedence */
+ error = sctp6_getaddr(so, nam);
+ if (error) {
+ /* try v4 next if v6 failed */
+ error = sctp_ingetaddr(so, nam);
+ if (error) {
+ return (error);
+ }
+ addr = *nam;
+ /* if I'm V6ONLY, convert it to v4-mapped */
+ if (SCTP_IPV6_V6ONLY(inp6)) {
+ struct sockaddr_in6 sin6;
+
+ in6_sin_2_v4mapsin6((struct sockaddr_in *)addr, &sin6);
+ memcpy(addr, &sin6, sizeof(struct sockaddr_in6));
+
+ }
+ }
+ return (error);
+}
+
+
+static int
+sctp6_getpeeraddr(struct socket *so, struct sockaddr **nam)
+{
+ struct sockaddr *addr = *nam;
+ struct in6pcb *inp6 = sotoin6pcb(so);
+ int error;
+
+ if (inp6 == NULL) {
+ SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTP6_USRREQ, EINVAL);
+ return EINVAL;
+ }
+ /* allow v6 addresses precedence */
+ error = sctp6_peeraddr(so, nam);
+ if (error) {
+ /* try v4 next if v6 failed */
+ error = sctp_peeraddr(so, nam);
+ if (error) {
+ return (error);
+ }
+ /* if I'm V6ONLY, convert it to v4-mapped */
+ if (SCTP_IPV6_V6ONLY(inp6)) {
+ struct sockaddr_in6 sin6;
+
+ in6_sin_2_v4mapsin6((struct sockaddr_in *)addr, &sin6);
+ memcpy(addr, &sin6, sizeof(struct sockaddr_in6));
+ }
+ }
+ return error;
+}
+
+struct pr_usrreqs sctp6_usrreqs = {
+ .pru_abort = sctp6_abort,
+ .pru_accept = sctp_accept,
+ .pru_attach = sctp6_attach,
+ .pru_bind = sctp6_bind,
+ .pru_connect = sctp6_connect,
+ .pru_control = in6_control,
+ .pru_close = sctp6_close,
+ .pru_detach = sctp6_close,
+ .pru_sopoll = sopoll_generic,
+ .pru_flush = sctp_flush,
+ .pru_disconnect = sctp6_disconnect,
+ .pru_listen = sctp_listen,
+ .pru_peeraddr = sctp6_getpeeraddr,
+ .pru_send = sctp6_send,
+ .pru_shutdown = sctp_shutdown,
+ .pru_sockaddr = sctp6_in6getaddr,
+ .pru_sosend = sctp_sosend,
+ .pru_soreceive = sctp_soreceive
+};
diff --git a/rtems/freebsd/netinet6/sctp6_var.h b/rtems/freebsd/netinet6/sctp6_var.h
new file mode 100644
index 00000000..22fb0d90
--- /dev/null
+++ b/rtems/freebsd/netinet6/sctp6_var.h
@@ -0,0 +1,61 @@
+/*-
+ * Copyright (c) 2001-2007, by Cisco Systems, Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * a) Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * b) Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the distribution.
+ *
+ * c) Neither the name of Cisco Systems, Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+/* $KAME: sctp6_var.h,v 1.7 2004/08/17 04:06:22 itojun Exp $ */
+
+#ifndef _NETINET6_SCTP6_VAR_HH_
+#define _NETINET6_SCTP6_VAR_HH_
+
+#include <rtems/freebsd/sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+/* TODO __Userspace__ IPv6 stuff... */
+#if defined(_KERNEL)
+
+SYSCTL_DECL(_net_inet6_sctp6);
+extern struct pr_usrreqs sctp6_usrreqs;
+
+
+int sctp6_input __P((struct mbuf **, int *, int));
+int sctp6_output
+__P((struct sctp_inpcb *, struct mbuf *, struct sockaddr *,
+ struct mbuf *, struct proc *));
+ void sctp6_ctlinput __P((int, struct sockaddr *, void *));
+
+
+ extern void sctp6_notify(struct sctp_inpcb *inp,
+ struct icmp6_hdr *icmph,
+ struct sctphdr *sh,
+ struct sockaddr *to,
+ struct sctp_tcb *stcb,
+ struct sctp_nets *net);
+
+
+#endif /* _KERNEL */
+#endif
diff --git a/rtems/freebsd/netinet6/tcp6_var.h b/rtems/freebsd/netinet6/tcp6_var.h
new file mode 100644
index 00000000..e0373b3d
--- /dev/null
+++ b/rtems/freebsd/netinet6/tcp6_var.h
@@ -0,0 +1,83 @@
+/*-
+ * Copyright (C) 1995, 1996, 1997, and 1998 WIDE Project.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the project nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+/*-
+ * Copyright (c) 1982, 1986, 1993, 1994, 1995
+ * The Regents of the University of California. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * @(#)tcp_var.h 8.4 (Berkeley) 5/24/95
+ * $FreeBSD$
+ */
+
+#ifndef _NETINET_TCP6_VAR_HH_
+#define _NETINET_TCP6_VAR_HH_
+
+#ifdef _KERNEL
+#ifdef SYSCTL_DECL
+SYSCTL_DECL(_net_inet6_tcp6);
+
+VNET_DECLARE(int, tcp_v6mssdflt); /* XXX */
+#define V_tcp_v6mssdflt VNET(tcp_v6mssdflt)
+#endif
+
+struct ip6_hdr;
+void tcp6_ctlinput __P((int, struct sockaddr *, void *));
+void tcp6_init __P((void));
+int tcp6_input __P((struct mbuf **, int *, int));
+struct rtentry *tcp_rtlookup6(struct in_conninfo *);
+
+extern struct pr_usrreqs tcp6_usrreqs;
+
+#endif /* _KERNEL */
+
+#endif /* _NETINET_TCP6_VAR_HH_ */
diff --git a/rtems/freebsd/netinet6/udp6_usrreq.c b/rtems/freebsd/netinet6/udp6_usrreq.c
new file mode 100644
index 00000000..3ef55fd8
--- /dev/null
+++ b/rtems/freebsd/netinet6/udp6_usrreq.c
@@ -0,0 +1,1112 @@
+#include <rtems/freebsd/machine/rtems-bsd-config.h>
+
+/*-
+ * Copyright (C) 1995, 1996, 1997, and 1998 WIDE Project.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the project nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $KAME: udp6_usrreq.c,v 1.27 2001/05/21 05:45:10 jinmei Exp $
+ * $KAME: udp6_output.c,v 1.31 2001/05/21 16:39:15 jinmei Exp $
+ */
+
+/*-
+ * Copyright (c) 1982, 1986, 1988, 1990, 1993, 1995
+ * The Regents of the University of California.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * @(#)udp_usrreq.c 8.6 (Berkeley) 5/23/95
+ */
+
+#include <rtems/freebsd/sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <rtems/freebsd/local/opt_inet.h>
+#include <rtems/freebsd/local/opt_inet6.h>
+#include <rtems/freebsd/local/opt_ipsec.h>
+
+#include <rtems/freebsd/sys/param.h>
+#include <rtems/freebsd/sys/jail.h>
+#include <rtems/freebsd/sys/kernel.h>
+#include <rtems/freebsd/sys/lock.h>
+#include <rtems/freebsd/sys/mbuf.h>
+#include <rtems/freebsd/sys/priv.h>
+#include <rtems/freebsd/sys/proc.h>
+#include <rtems/freebsd/sys/protosw.h>
+#include <rtems/freebsd/sys/signalvar.h>
+#include <rtems/freebsd/sys/socket.h>
+#include <rtems/freebsd/sys/socketvar.h>
+#include <rtems/freebsd/sys/sx.h>
+#include <rtems/freebsd/sys/sysctl.h>
+#include <rtems/freebsd/sys/syslog.h>
+#include <rtems/freebsd/sys/systm.h>
+
+#include <rtems/freebsd/net/if.h>
+#include <rtems/freebsd/net/if_types.h>
+#include <rtems/freebsd/net/route.h>
+
+#include <rtems/freebsd/netinet/in.h>
+#include <rtems/freebsd/netinet/in_pcb.h>
+#include <rtems/freebsd/netinet/in_systm.h>
+#include <rtems/freebsd/netinet/in_var.h>
+#include <rtems/freebsd/netinet/ip.h>
+#include <rtems/freebsd/netinet/ip_icmp.h>
+#include <rtems/freebsd/netinet/ip6.h>
+#include <rtems/freebsd/netinet/icmp_var.h>
+#include <rtems/freebsd/netinet/icmp6.h>
+#include <rtems/freebsd/netinet/ip_var.h>
+#include <rtems/freebsd/netinet/udp.h>
+#include <rtems/freebsd/netinet/udp_var.h>
+
+#include <rtems/freebsd/netinet6/ip6protosw.h>
+#include <rtems/freebsd/netinet6/ip6_var.h>
+#include <rtems/freebsd/netinet6/in6_pcb.h>
+#include <rtems/freebsd/netinet6/udp6_var.h>
+#include <rtems/freebsd/netinet6/scope6_var.h>
+
+#ifdef IPSEC
+#include <rtems/freebsd/netipsec/ipsec.h>
+#include <rtems/freebsd/netipsec/ipsec6.h>
+#endif /* IPSEC */
+
+#include <rtems/freebsd/security/mac/mac_framework.h>
+
+/*
+ * UDP protocol implementation.
+ * Per RFC 768, August, 1980.
+ */
+
+extern struct protosw inetsw[];
+static void udp6_detach(struct socket *so);
+
+static void
+udp6_append(struct inpcb *inp, struct mbuf *n, int off,
+ struct sockaddr_in6 *fromsa)
+{
+ struct socket *so;
+ struct mbuf *opts;
+
+ INP_LOCK_ASSERT(inp);
+
+#ifdef IPSEC
+ /* Check AH/ESP integrity. */
+ if (ipsec6_in_reject(n, inp)) {
+ m_freem(n);
+ V_ipsec6stat.in_polvio++;
+ return;
+ }
+#endif /* IPSEC */
+#ifdef MAC
+ if (mac_inpcb_check_deliver(inp, n) != 0) {
+ m_freem(n);
+ return;
+ }
+#endif
+ opts = NULL;
+ if (inp->inp_flags & INP_CONTROLOPTS ||
+ inp->inp_socket->so_options & SO_TIMESTAMP)
+ ip6_savecontrol(inp, n, &opts);
+ m_adj(n, off + sizeof(struct udphdr));
+
+ so = inp->inp_socket;
+ SOCKBUF_LOCK(&so->so_rcv);
+ if (sbappendaddr_locked(&so->so_rcv, (struct sockaddr *)fromsa, n,
+ opts) == 0) {
+ SOCKBUF_UNLOCK(&so->so_rcv);
+ m_freem(n);
+ if (opts)
+ m_freem(opts);
+ UDPSTAT_INC(udps_fullsock);
+ } else
+ sorwakeup_locked(so);
+}
+
+int
+udp6_input(struct mbuf **mp, int *offp, int proto)
+{
+ struct mbuf *m = *mp;
+ struct ifnet *ifp;
+ struct ip6_hdr *ip6;
+ struct udphdr *uh;
+ struct inpcb *inp;
+ struct udpcb *up;
+ int off = *offp;
+ int plen, ulen;
+ struct sockaddr_in6 fromsa;
+
+ ifp = m->m_pkthdr.rcvif;
+ ip6 = mtod(m, struct ip6_hdr *);
+
+ if (faithprefix_p != NULL && (*faithprefix_p)(&ip6->ip6_dst)) {
+ /* XXX send icmp6 host/port unreach? */
+ m_freem(m);
+ return (IPPROTO_DONE);
+ }
+
+#ifndef PULLDOWN_TEST
+ IP6_EXTHDR_CHECK(m, off, sizeof(struct udphdr), IPPROTO_DONE);
+ ip6 = mtod(m, struct ip6_hdr *);
+ uh = (struct udphdr *)((caddr_t)ip6 + off);
+#else
+ IP6_EXTHDR_GET(uh, struct udphdr *, m, off, sizeof(*uh));
+ if (!uh)
+ return (IPPROTO_DONE);
+#endif
+
+ UDPSTAT_INC(udps_ipackets);
+
+ /*
+ * Destination port of 0 is illegal, based on RFC768.
+ */
+ if (uh->uh_dport == 0)
+ goto badunlocked;
+
+ plen = ntohs(ip6->ip6_plen) - off + sizeof(*ip6);
+ ulen = ntohs((u_short)uh->uh_ulen);
+
+ if (plen != ulen) {
+ UDPSTAT_INC(udps_badlen);
+ goto badunlocked;
+ }
+
+ /*
+ * Checksum extended UDP header and data.
+ */
+ if (uh->uh_sum == 0) {
+ UDPSTAT_INC(udps_nosum);
+ goto badunlocked;
+ }
+ if (in6_cksum(m, IPPROTO_UDP, off, ulen) != 0) {
+ UDPSTAT_INC(udps_badsum);
+ goto badunlocked;
+ }
+
+ /*
+ * Construct sockaddr format source address.
+ */
+ init_sin6(&fromsa, m);
+ fromsa.sin6_port = uh->uh_sport;
+
+ INP_INFO_RLOCK(&V_udbinfo);
+ if (IN6_IS_ADDR_MULTICAST(&ip6->ip6_dst)) {
+ struct inpcb *last;
+ struct ip6_moptions *imo;
+
+ /*
+ * In the event that laddr should be set to the link-local
+ * address (this happens in RIPng), the multicast address
+ * specified in the received packet will not match laddr. To
+ * handle this situation, matching is relaxed if the
+ * receiving interface is the same as one specified in the
+ * socket and if the destination multicast address matches
+ * one of the multicast groups specified in the socket.
+ */
+
+ /*
+ * KAME note: traditionally we dropped udpiphdr from mbuf
+ * here. We need udphdr for IPsec processing so we do that
+ * later.
+ */
+ last = NULL;
+ LIST_FOREACH(inp, &V_udb, inp_list) {
+ if ((inp->inp_vflag & INP_IPV6) == 0)
+ continue;
+ if (inp->inp_lport != uh->uh_dport)
+ continue;
+ if (inp->inp_fport != 0 &&
+ inp->inp_fport != uh->uh_sport)
+ continue;
+ if (!IN6_IS_ADDR_UNSPECIFIED(&inp->in6p_laddr)) {
+ if (!IN6_ARE_ADDR_EQUAL(&inp->in6p_laddr,
+ &ip6->ip6_dst))
+ continue;
+ }
+ if (!IN6_IS_ADDR_UNSPECIFIED(&inp->in6p_faddr)) {
+ if (!IN6_ARE_ADDR_EQUAL(&inp->in6p_faddr,
+ &ip6->ip6_src) ||
+ inp->inp_fport != uh->uh_sport)
+ continue;
+ }
+
+ /*
+ * Handle socket delivery policy for any-source
+ * and source-specific multicast. [RFC3678]
+ */
+ imo = inp->in6p_moptions;
+ if (imo && IN6_IS_ADDR_MULTICAST(&ip6->ip6_dst)) {
+ struct sockaddr_in6 mcaddr;
+ int blocked;
+
+ INP_RLOCK(inp);
+
+ bzero(&mcaddr, sizeof(struct sockaddr_in6));
+ mcaddr.sin6_len = sizeof(struct sockaddr_in6);
+ mcaddr.sin6_family = AF_INET6;
+ mcaddr.sin6_addr = ip6->ip6_dst;
+
+ blocked = im6o_mc_filter(imo, ifp,
+ (struct sockaddr *)&mcaddr,
+ (struct sockaddr *)&fromsa);
+ if (blocked != MCAST_PASS) {
+ if (blocked == MCAST_NOTGMEMBER)
+ IP6STAT_INC(ip6s_notmember);
+ if (blocked == MCAST_NOTSMEMBER ||
+ blocked == MCAST_MUTED)
+ UDPSTAT_INC(udps_filtermcast);
+ INP_RUNLOCK(inp); /* XXX */
+ continue;
+ }
+
+ INP_RUNLOCK(inp);
+ }
+ if (last != NULL) {
+ struct mbuf *n;
+
+ if ((n = m_copy(m, 0, M_COPYALL)) != NULL) {
+ INP_RLOCK(last);
+ up = intoudpcb(last);
+ if (up->u_tun_func == NULL) {
+ udp6_append(last, n, off, &fromsa);
+ } else {
+ /*
+ * Engage the tunneling
+ * protocol we will have to
+ * leave the info_lock up,
+ * since we are hunting
+ * through multiple UDP's.
+ *
+ */
+ (*up->u_tun_func)(n, off, last);
+ }
+ INP_RUNLOCK(last);
+ }
+ }
+ last = inp;
+ /*
+ * Don't look for additional matches if this one does
+ * not have either the SO_REUSEPORT or SO_REUSEADDR
+ * socket options set. This heuristic avoids
+ * searching through all pcbs in the common case of a
+ * non-shared port. It assumes that an application
+ * will never clear these options after setting them.
+ */
+ if ((last->inp_socket->so_options &
+ (SO_REUSEPORT|SO_REUSEADDR)) == 0)
+ break;
+ }
+
+ if (last == NULL) {
+ /*
+ * No matching pcb found; discard datagram. (No need
+ * to send an ICMP Port Unreachable for a broadcast
+ * or multicast datgram.)
+ */
+ UDPSTAT_INC(udps_noport);
+ UDPSTAT_INC(udps_noportmcast);
+ goto badheadlocked;
+ }
+ INP_RLOCK(last);
+ INP_INFO_RUNLOCK(&V_udbinfo);
+ up = intoudpcb(last);
+ if (up->u_tun_func == NULL) {
+ udp6_append(last, m, off, &fromsa);
+ } else {
+ /*
+ * Engage the tunneling protocol.
+ */
+ (*up->u_tun_func)(m, off, last);
+ }
+ INP_RUNLOCK(last);
+ return (IPPROTO_DONE);
+ }
+ /*
+ * Locate pcb for datagram.
+ */
+ inp = in6_pcblookup_hash(&V_udbinfo, &ip6->ip6_src, uh->uh_sport,
+ &ip6->ip6_dst, uh->uh_dport, 1, m->m_pkthdr.rcvif);
+ if (inp == NULL) {
+ if (udp_log_in_vain) {
+ char ip6bufs[INET6_ADDRSTRLEN];
+ char ip6bufd[INET6_ADDRSTRLEN];
+
+ log(LOG_INFO,
+ "Connection attempt to UDP [%s]:%d from [%s]:%d\n",
+ ip6_sprintf(ip6bufd, &ip6->ip6_dst),
+ ntohs(uh->uh_dport),
+ ip6_sprintf(ip6bufs, &ip6->ip6_src),
+ ntohs(uh->uh_sport));
+ }
+ UDPSTAT_INC(udps_noport);
+ if (m->m_flags & M_MCAST) {
+ printf("UDP6: M_MCAST is set in a unicast packet.\n");
+ UDPSTAT_INC(udps_noportmcast);
+ goto badheadlocked;
+ }
+ INP_INFO_RUNLOCK(&V_udbinfo);
+ if (V_udp_blackhole)
+ goto badunlocked;
+ if (badport_bandlim(BANDLIM_ICMP6_UNREACH) < 0)
+ goto badunlocked;
+ icmp6_error(m, ICMP6_DST_UNREACH, ICMP6_DST_UNREACH_NOPORT, 0);
+ return (IPPROTO_DONE);
+ }
+ INP_RLOCK(inp);
+ INP_INFO_RUNLOCK(&V_udbinfo);
+ up = intoudpcb(inp);
+ if (up->u_tun_func == NULL) {
+ udp6_append(inp, m, off, &fromsa);
+ } else {
+ /*
+ * Engage the tunneling protocol.
+ */
+
+ (*up->u_tun_func)(m, off, inp);
+ }
+ INP_RUNLOCK(inp);
+ return (IPPROTO_DONE);
+
+badheadlocked:
+ INP_INFO_RUNLOCK(&V_udbinfo);
+badunlocked:
+ if (m)
+ m_freem(m);
+ return (IPPROTO_DONE);
+}
+
+void
+udp6_ctlinput(int cmd, struct sockaddr *sa, void *d)
+{
+ struct udphdr uh;
+ struct ip6_hdr *ip6;
+ struct mbuf *m;
+ int off = 0;
+ struct ip6ctlparam *ip6cp = NULL;
+ const struct sockaddr_in6 *sa6_src = NULL;
+ void *cmdarg;
+ struct inpcb *(*notify)(struct inpcb *, int) = udp_notify;
+ struct udp_portonly {
+ u_int16_t uh_sport;
+ u_int16_t uh_dport;
+ } *uhp;
+
+ if (sa->sa_family != AF_INET6 ||
+ sa->sa_len != sizeof(struct sockaddr_in6))
+ return;
+
+ if ((unsigned)cmd >= PRC_NCMDS)
+ return;
+ if (PRC_IS_REDIRECT(cmd))
+ notify = in6_rtchange, d = NULL;
+ else if (cmd == PRC_HOSTDEAD)
+ d = NULL;
+ else if (inet6ctlerrmap[cmd] == 0)
+ return;
+
+ /* if the parameter is from icmp6, decode it. */
+ if (d != NULL) {
+ ip6cp = (struct ip6ctlparam *)d;
+ m = ip6cp->ip6c_m;
+ ip6 = ip6cp->ip6c_ip6;
+ off = ip6cp->ip6c_off;
+ cmdarg = ip6cp->ip6c_cmdarg;
+ sa6_src = ip6cp->ip6c_src;
+ } else {
+ m = NULL;
+ ip6 = NULL;
+ cmdarg = NULL;
+ sa6_src = &sa6_any;
+ }
+
+ if (ip6) {
+ /*
+ * XXX: We assume that when IPV6 is non NULL,
+ * M and OFF are valid.
+ */
+
+ /* Check if we can safely examine src and dst ports. */
+ if (m->m_pkthdr.len < off + sizeof(*uhp))
+ return;
+
+ bzero(&uh, sizeof(uh));
+ m_copydata(m, off, sizeof(*uhp), (caddr_t)&uh);
+
+ (void) in6_pcbnotify(&V_udbinfo, sa, uh.uh_dport,
+ (struct sockaddr *)ip6cp->ip6c_src, uh.uh_sport, cmd,
+ cmdarg, notify);
+ } else
+ (void) in6_pcbnotify(&V_udbinfo, sa, 0,
+ (const struct sockaddr *)sa6_src, 0, cmd, cmdarg, notify);
+}
+
+static int
+udp6_getcred(SYSCTL_HANDLER_ARGS)
+{
+ struct xucred xuc;
+ struct sockaddr_in6 addrs[2];
+ struct inpcb *inp;
+ int error;
+
+ error = priv_check(req->td, PRIV_NETINET_GETCRED);
+ if (error)
+ return (error);
+
+ if (req->newlen != sizeof(addrs))
+ return (EINVAL);
+ if (req->oldlen != sizeof(struct xucred))
+ return (EINVAL);
+ error = SYSCTL_IN(req, addrs, sizeof(addrs));
+ if (error)
+ return (error);
+ if ((error = sa6_embedscope(&addrs[0], V_ip6_use_defzone)) != 0 ||
+ (error = sa6_embedscope(&addrs[1], V_ip6_use_defzone)) != 0) {
+ return (error);
+ }
+ INP_INFO_RLOCK(&V_udbinfo);
+ inp = in6_pcblookup_hash(&V_udbinfo, &addrs[1].sin6_addr,
+ addrs[1].sin6_port, &addrs[0].sin6_addr, addrs[0].sin6_port, 1,
+ NULL);
+ if (inp != NULL) {
+ INP_RLOCK(inp);
+ INP_INFO_RUNLOCK(&V_udbinfo);
+ if (inp->inp_socket == NULL)
+ error = ENOENT;
+ if (error == 0)
+ error = cr_canseesocket(req->td->td_ucred,
+ inp->inp_socket);
+ if (error == 0)
+ cru2x(inp->inp_cred, &xuc);
+ INP_RUNLOCK(inp);
+ } else {
+ INP_INFO_RUNLOCK(&V_udbinfo);
+ error = ENOENT;
+ }
+ if (error == 0)
+ error = SYSCTL_OUT(req, &xuc, sizeof(struct xucred));
+ return (error);
+}
+
+SYSCTL_PROC(_net_inet6_udp6, OID_AUTO, getcred, CTLTYPE_OPAQUE|CTLFLAG_RW, 0,
+ 0, udp6_getcred, "S,xucred", "Get the xucred of a UDP6 connection");
+
+static int
+udp6_output(struct inpcb *inp, struct mbuf *m, struct sockaddr *addr6,
+ struct mbuf *control, struct thread *td)
+{
+ u_int32_t ulen = m->m_pkthdr.len;
+ u_int32_t plen = sizeof(struct udphdr) + ulen;
+ struct ip6_hdr *ip6;
+ struct udphdr *udp6;
+ struct in6_addr *laddr, *faddr, in6a;
+ struct sockaddr_in6 *sin6 = NULL;
+ struct ifnet *oifp = NULL;
+ int scope_ambiguous = 0;
+ u_short fport;
+ int error = 0;
+ struct ip6_pktopts *optp, opt;
+ int af = AF_INET6, hlen = sizeof(struct ip6_hdr);
+ int flags;
+ struct sockaddr_in6 tmp;
+
+ INP_WLOCK_ASSERT(inp);
+
+ if (addr6) {
+ /* addr6 has been validated in udp6_send(). */
+ sin6 = (struct sockaddr_in6 *)addr6;
+
+ /* protect *sin6 from overwrites */
+ tmp = *sin6;
+ sin6 = &tmp;
+
+ /*
+ * Application should provide a proper zone ID or the use of
+ * default zone IDs should be enabled. Unfortunately, some
+ * applications do not behave as it should, so we need a
+ * workaround. Even if an appropriate ID is not determined,
+ * we'll see if we can determine the outgoing interface. If we
+ * can, determine the zone ID based on the interface below.
+ */
+ if (sin6->sin6_scope_id == 0 && !V_ip6_use_defzone)
+ scope_ambiguous = 1;
+ if ((error = sa6_embedscope(sin6, V_ip6_use_defzone)) != 0)
+ return (error);
+ }
+
+ if (control) {
+ if ((error = ip6_setpktopts(control, &opt,
+ inp->in6p_outputopts, td->td_ucred, IPPROTO_UDP)) != 0)
+ goto release;
+ optp = &opt;
+ } else
+ optp = inp->in6p_outputopts;
+
+ if (sin6) {
+ faddr = &sin6->sin6_addr;
+
+ /*
+ * IPv4 version of udp_output calls in_pcbconnect in this case,
+ * which needs splnet and affects performance.
+ * Since we saw no essential reason for calling in_pcbconnect,
+ * we get rid of such kind of logic, and call in6_selectsrc
+ * and in6_pcbsetport in order to fill in the local address
+ * and the local port.
+ */
+ if (sin6->sin6_port == 0) {
+ error = EADDRNOTAVAIL;
+ goto release;
+ }
+
+ if (!IN6_IS_ADDR_UNSPECIFIED(&inp->in6p_faddr)) {
+ /* how about ::ffff:0.0.0.0 case? */
+ error = EISCONN;
+ goto release;
+ }
+
+ fport = sin6->sin6_port; /* allow 0 port */
+
+ if (IN6_IS_ADDR_V4MAPPED(faddr)) {
+ if ((inp->inp_flags & IN6P_IPV6_V6ONLY)) {
+ /*
+ * I believe we should explicitly discard the
+ * packet when mapped addresses are disabled,
+ * rather than send the packet as an IPv6 one.
+ * If we chose the latter approach, the packet
+ * might be sent out on the wire based on the
+ * default route, the situation which we'd
+ * probably want to avoid.
+ * (20010421 jinmei@kame.net)
+ */
+ error = EINVAL;
+ goto release;
+ }
+ if (!IN6_IS_ADDR_UNSPECIFIED(&inp->in6p_laddr) &&
+ !IN6_IS_ADDR_V4MAPPED(&inp->in6p_laddr)) {
+ /*
+ * when remote addr is an IPv4-mapped address,
+ * local addr should not be an IPv6 address,
+ * since you cannot determine how to map IPv6
+ * source address to IPv4.
+ */
+ error = EINVAL;
+ goto release;
+ }
+
+ af = AF_INET;
+ }
+
+ if (!IN6_IS_ADDR_V4MAPPED(faddr)) {
+ error = in6_selectsrc(sin6, optp, inp, NULL,
+ td->td_ucred, &oifp, &in6a);
+ if (error)
+ goto release;
+ if (oifp && scope_ambiguous &&
+ (error = in6_setscope(&sin6->sin6_addr,
+ oifp, NULL))) {
+ goto release;
+ }
+ laddr = &in6a;
+ } else
+ laddr = &inp->in6p_laddr; /* XXX */
+ if (laddr == NULL) {
+ if (error == 0)
+ error = EADDRNOTAVAIL;
+ goto release;
+ }
+ if (inp->inp_lport == 0 &&
+ (error = in6_pcbsetport(laddr, inp, td->td_ucred)) != 0)
+ goto release;
+ } else {
+ if (IN6_IS_ADDR_UNSPECIFIED(&inp->in6p_faddr)) {
+ error = ENOTCONN;
+ goto release;
+ }
+ if (IN6_IS_ADDR_V4MAPPED(&inp->in6p_faddr)) {
+ if ((inp->inp_flags & IN6P_IPV6_V6ONLY)) {
+ /*
+ * XXX: this case would happen when the
+ * application sets the V6ONLY flag after
+ * connecting the foreign address.
+ * Such applications should be fixed,
+ * so we bark here.
+ */
+ log(LOG_INFO, "udp6_output: IPV6_V6ONLY "
+ "option was set for a connected socket\n");
+ error = EINVAL;
+ goto release;
+ } else
+ af = AF_INET;
+ }
+ laddr = &inp->in6p_laddr;
+ faddr = &inp->in6p_faddr;
+ fport = inp->inp_fport;
+ }
+
+ if (af == AF_INET)
+ hlen = sizeof(struct ip);
+
+ /*
+ * Calculate data length and get a mbuf
+ * for UDP and IP6 headers.
+ */
+ M_PREPEND(m, hlen + sizeof(struct udphdr), M_DONTWAIT);
+ if (m == 0) {
+ error = ENOBUFS;
+ goto release;
+ }
+
+ /*
+ * Stuff checksum and output datagram.
+ */
+ udp6 = (struct udphdr *)(mtod(m, caddr_t) + hlen);
+ udp6->uh_sport = inp->inp_lport; /* lport is always set in the PCB */
+ udp6->uh_dport = fport;
+ if (plen <= 0xffff)
+ udp6->uh_ulen = htons((u_short)plen);
+ else
+ udp6->uh_ulen = 0;
+ udp6->uh_sum = 0;
+
+ switch (af) {
+ case AF_INET6:
+ ip6 = mtod(m, struct ip6_hdr *);
+ ip6->ip6_flow = inp->inp_flow & IPV6_FLOWINFO_MASK;
+ ip6->ip6_vfc &= ~IPV6_VERSION_MASK;
+ ip6->ip6_vfc |= IPV6_VERSION;
+#if 0 /* ip6_plen will be filled in ip6_output. */
+ ip6->ip6_plen = htons((u_short)plen);
+#endif
+ ip6->ip6_nxt = IPPROTO_UDP;
+ ip6->ip6_hlim = in6_selecthlim(inp, NULL);
+ ip6->ip6_src = *laddr;
+ ip6->ip6_dst = *faddr;
+
+ if ((udp6->uh_sum = in6_cksum(m, IPPROTO_UDP,
+ sizeof(struct ip6_hdr), plen)) == 0) {
+ udp6->uh_sum = 0xffff;
+ }
+
+ flags = 0;
+
+ UDPSTAT_INC(udps_opackets);
+ error = ip6_output(m, optp, NULL, flags, inp->in6p_moptions,
+ NULL, inp);
+ break;
+ case AF_INET:
+ error = EAFNOSUPPORT;
+ goto release;
+ }
+ goto releaseopt;
+
+release:
+ m_freem(m);
+
+releaseopt:
+ if (control) {
+ ip6_clearpktopts(&opt, -1);
+ m_freem(control);
+ }
+ return (error);
+}
+
+static void
+udp6_abort(struct socket *so)
+{
+ struct inpcb *inp;
+
+ inp = sotoinpcb(so);
+ KASSERT(inp != NULL, ("udp6_abort: inp == NULL"));
+
+#ifdef INET
+ if (inp->inp_vflag & INP_IPV4) {
+ struct pr_usrreqs *pru;
+
+ pru = inetsw[ip_protox[IPPROTO_UDP]].pr_usrreqs;
+ (*pru->pru_abort)(so);
+ return;
+ }
+#endif
+
+ INP_INFO_WLOCK(&V_udbinfo);
+ INP_WLOCK(inp);
+ if (!IN6_IS_ADDR_UNSPECIFIED(&inp->in6p_faddr)) {
+ in6_pcbdisconnect(inp);
+ inp->in6p_laddr = in6addr_any;
+ soisdisconnected(so);
+ }
+ INP_WUNLOCK(inp);
+ INP_INFO_WUNLOCK(&V_udbinfo);
+}
+
+static int
+udp6_attach(struct socket *so, int proto, struct thread *td)
+{
+ struct inpcb *inp;
+ int error;
+
+ inp = sotoinpcb(so);
+ KASSERT(inp == NULL, ("udp6_attach: inp != NULL"));
+
+ if (so->so_snd.sb_hiwat == 0 || so->so_rcv.sb_hiwat == 0) {
+ error = soreserve(so, udp_sendspace, udp_recvspace);
+ if (error)
+ return (error);
+ }
+ INP_INFO_WLOCK(&V_udbinfo);
+ error = in_pcballoc(so, &V_udbinfo);
+ if (error) {
+ INP_INFO_WUNLOCK(&V_udbinfo);
+ return (error);
+ }
+ inp = (struct inpcb *)so->so_pcb;
+ inp->inp_vflag |= INP_IPV6;
+ if ((inp->inp_flags & IN6P_IPV6_V6ONLY) == 0)
+ inp->inp_vflag |= INP_IPV4;
+ inp->in6p_hops = -1; /* use kernel default */
+ inp->in6p_cksum = -1; /* just to be sure */
+ /*
+ * XXX: ugly!!
+ * IPv4 TTL initialization is necessary for an IPv6 socket as well,
+ * because the socket may be bound to an IPv6 wildcard address,
+ * which may match an IPv4-mapped IPv6 address.
+ */
+ inp->inp_ip_ttl = V_ip_defttl;
+
+ error = udp_newudpcb(inp);
+ if (error) {
+ in_pcbdetach(inp);
+ in_pcbfree(inp);
+ INP_INFO_WUNLOCK(&V_udbinfo);
+ return (error);
+ }
+ INP_WUNLOCK(inp);
+ INP_INFO_WUNLOCK(&V_udbinfo);
+ return (0);
+}
+
+static int
+udp6_bind(struct socket *so, struct sockaddr *nam, struct thread *td)
+{
+ struct inpcb *inp;
+ int error;
+
+ inp = sotoinpcb(so);
+ KASSERT(inp != NULL, ("udp6_bind: inp == NULL"));
+
+ INP_INFO_WLOCK(&V_udbinfo);
+ INP_WLOCK(inp);
+ inp->inp_vflag &= ~INP_IPV4;
+ inp->inp_vflag |= INP_IPV6;
+ if ((inp->inp_flags & IN6P_IPV6_V6ONLY) == 0) {
+ struct sockaddr_in6 *sin6_p;
+
+ sin6_p = (struct sockaddr_in6 *)nam;
+
+ if (IN6_IS_ADDR_UNSPECIFIED(&sin6_p->sin6_addr))
+ inp->inp_vflag |= INP_IPV4;
+ else if (IN6_IS_ADDR_V4MAPPED(&sin6_p->sin6_addr)) {
+ struct sockaddr_in sin;
+
+ in6_sin6_2_sin(&sin, sin6_p);
+ inp->inp_vflag |= INP_IPV4;
+ inp->inp_vflag &= ~INP_IPV6;
+ error = in_pcbbind(inp, (struct sockaddr *)&sin,
+ td->td_ucred);
+ goto out;
+ }
+ }
+
+ error = in6_pcbbind(inp, nam, td->td_ucred);
+out:
+ INP_WUNLOCK(inp);
+ INP_INFO_WUNLOCK(&V_udbinfo);
+ return (error);
+}
+
+static void
+udp6_close(struct socket *so)
+{
+ struct inpcb *inp;
+
+ inp = sotoinpcb(so);
+ KASSERT(inp != NULL, ("udp6_close: inp == NULL"));
+
+#ifdef INET
+ if (inp->inp_vflag & INP_IPV4) {
+ struct pr_usrreqs *pru;
+
+ pru = inetsw[ip_protox[IPPROTO_UDP]].pr_usrreqs;
+ (*pru->pru_disconnect)(so);
+ return;
+ }
+#endif
+ INP_INFO_WLOCK(&V_udbinfo);
+ INP_WLOCK(inp);
+ if (!IN6_IS_ADDR_UNSPECIFIED(&inp->in6p_faddr)) {
+ in6_pcbdisconnect(inp);
+ inp->in6p_laddr = in6addr_any;
+ soisdisconnected(so);
+ }
+ INP_WUNLOCK(inp);
+ INP_INFO_WUNLOCK(&V_udbinfo);
+}
+
+static int
+udp6_connect(struct socket *so, struct sockaddr *nam, struct thread *td)
+{
+ struct inpcb *inp;
+ struct sockaddr_in6 *sin6;
+ int error;
+
+ inp = sotoinpcb(so);
+ sin6 = (struct sockaddr_in6 *)nam;
+ KASSERT(inp != NULL, ("udp6_connect: inp == NULL"));
+
+ INP_INFO_WLOCK(&V_udbinfo);
+ INP_WLOCK(inp);
+ if ((inp->inp_flags & IN6P_IPV6_V6ONLY) == 0 &&
+ IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
+ struct sockaddr_in sin;
+
+ if (inp->inp_faddr.s_addr != INADDR_ANY) {
+ error = EISCONN;
+ goto out;
+ }
+ in6_sin6_2_sin(&sin, sin6);
+ error = prison_remote_ip4(td->td_ucred, &sin.sin_addr);
+ if (error != 0)
+ goto out;
+ error = in_pcbconnect(inp, (struct sockaddr *)&sin,
+ td->td_ucred);
+ if (error == 0) {
+ inp->inp_vflag |= INP_IPV4;
+ inp->inp_vflag &= ~INP_IPV6;
+ soisconnected(so);
+ }
+ goto out;
+ }
+ if (!IN6_IS_ADDR_UNSPECIFIED(&inp->in6p_faddr)) {
+ error = EISCONN;
+ goto out;
+ }
+ error = prison_remote_ip6(td->td_ucred, &sin6->sin6_addr);
+ if (error != 0)
+ goto out;
+ error = in6_pcbconnect(inp, nam, td->td_ucred);
+ if (error == 0) {
+ if ((inp->inp_flags & IN6P_IPV6_V6ONLY) == 0) {
+ /* should be non mapped addr */
+ inp->inp_vflag &= ~INP_IPV4;
+ inp->inp_vflag |= INP_IPV6;
+ }
+ soisconnected(so);
+ }
+out:
+ INP_WUNLOCK(inp);
+ INP_INFO_WUNLOCK(&V_udbinfo);
+ return (error);
+}
+
+static void
+udp6_detach(struct socket *so)
+{
+ struct inpcb *inp;
+ struct udpcb *up;
+
+ inp = sotoinpcb(so);
+ KASSERT(inp != NULL, ("udp6_detach: inp == NULL"));
+
+ INP_INFO_WLOCK(&V_udbinfo);
+ INP_WLOCK(inp);
+ up = intoudpcb(inp);
+ KASSERT(up != NULL, ("%s: up == NULL", __func__));
+ in_pcbdetach(inp);
+ in_pcbfree(inp);
+ INP_INFO_WUNLOCK(&V_udbinfo);
+ udp_discardcb(up);
+}
+
+static int
+udp6_disconnect(struct socket *so)
+{
+ struct inpcb *inp;
+ int error;
+
+ inp = sotoinpcb(so);
+ KASSERT(inp != NULL, ("udp6_disconnect: inp == NULL"));
+
+ INP_INFO_WLOCK(&V_udbinfo);
+ INP_WLOCK(inp);
+
+#ifdef INET
+ if (inp->inp_vflag & INP_IPV4) {
+ struct pr_usrreqs *pru;
+
+ pru = inetsw[ip_protox[IPPROTO_UDP]].pr_usrreqs;
+ error = (*pru->pru_disconnect)(so);
+ goto out;
+ }
+#endif
+
+ if (IN6_IS_ADDR_UNSPECIFIED(&inp->in6p_faddr)) {
+ error = ENOTCONN;
+ goto out;
+ }
+
+ in6_pcbdisconnect(inp);
+ inp->in6p_laddr = in6addr_any;
+ SOCK_LOCK(so);
+ so->so_state &= ~SS_ISCONNECTED; /* XXX */
+ SOCK_UNLOCK(so);
+out:
+ INP_WUNLOCK(inp);
+ INP_INFO_WUNLOCK(&V_udbinfo);
+ return (0);
+}
+
+static int
+udp6_send(struct socket *so, int flags, struct mbuf *m,
+ struct sockaddr *addr, struct mbuf *control, struct thread *td)
+{
+ struct inpcb *inp;
+ int error = 0;
+
+ inp = sotoinpcb(so);
+ KASSERT(inp != NULL, ("udp6_send: inp == NULL"));
+
+ INP_INFO_WLOCK(&V_udbinfo);
+ INP_WLOCK(inp);
+ if (addr) {
+ if (addr->sa_len != sizeof(struct sockaddr_in6)) {
+ error = EINVAL;
+ goto bad;
+ }
+ if (addr->sa_family != AF_INET6) {
+ error = EAFNOSUPPORT;
+ goto bad;
+ }
+ }
+
+#ifdef INET
+ if ((inp->inp_flags & IN6P_IPV6_V6ONLY) == 0) {
+ int hasv4addr;
+ struct sockaddr_in6 *sin6 = 0;
+
+ if (addr == 0)
+ hasv4addr = (inp->inp_vflag & INP_IPV4);
+ else {
+ sin6 = (struct sockaddr_in6 *)addr;
+ hasv4addr = IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)
+ ? 1 : 0;
+ }
+ if (hasv4addr) {
+ struct pr_usrreqs *pru;
+
+ if (!IN6_IS_ADDR_UNSPECIFIED(&inp->in6p_laddr) &&
+ !IN6_IS_ADDR_V4MAPPED(&inp->in6p_laddr)) {
+ /*
+ * When remote addr is IPv4-mapped address,
+ * local addr should not be an IPv6 address;
+ * since you cannot determine how to map IPv6
+ * source address to IPv4.
+ */
+ error = EINVAL;
+ goto out;
+ }
+
+ /*
+ * XXXRW: We release UDP-layer locks before calling
+ * udp_send() in order to avoid recursion. However,
+ * this does mean there is a short window where inp's
+ * fields are unstable. Could this lead to a
+ * potential race in which the factors causing us to
+ * select the UDPv4 output routine are invalidated?
+ */
+ INP_WUNLOCK(inp);
+ INP_INFO_WUNLOCK(&V_udbinfo);
+ if (sin6)
+ in6_sin6_2_sin_in_sock(addr);
+ pru = inetsw[ip_protox[IPPROTO_UDP]].pr_usrreqs;
+ /* addr will just be freed in sendit(). */
+ return ((*pru->pru_send)(so, flags, m, addr, control,
+ td));
+ }
+ }
+#endif
+#ifdef MAC
+ mac_inpcb_create_mbuf(inp, m);
+#endif
+ error = udp6_output(inp, m, addr, control, td);
+out:
+ INP_WUNLOCK(inp);
+ INP_INFO_WUNLOCK(&V_udbinfo);
+ return (error);
+
+bad:
+ INP_WUNLOCK(inp);
+ INP_INFO_WUNLOCK(&V_udbinfo);
+ m_freem(m);
+ return (error);
+}
+
+struct pr_usrreqs udp6_usrreqs = {
+ .pru_abort = udp6_abort,
+ .pru_attach = udp6_attach,
+ .pru_bind = udp6_bind,
+ .pru_connect = udp6_connect,
+ .pru_control = in6_control,
+ .pru_detach = udp6_detach,
+ .pru_disconnect = udp6_disconnect,
+ .pru_peeraddr = in6_mapped_peeraddr,
+ .pru_send = udp6_send,
+ .pru_shutdown = udp_shutdown,
+ .pru_sockaddr = in6_mapped_sockaddr,
+ .pru_soreceive = soreceive_dgram,
+ .pru_sosend = sosend_dgram,
+ .pru_sosetlabel = in_pcbsosetlabel,
+ .pru_close = udp6_close
+};
diff --git a/rtems/freebsd/netinet6/udp6_var.h b/rtems/freebsd/netinet6/udp6_var.h
new file mode 100644
index 00000000..f52503cd
--- /dev/null
+++ b/rtems/freebsd/netinet6/udp6_var.h
@@ -0,0 +1,75 @@
+/*-
+ * Copyright (C) 1995, 1996, 1997, and 1998 WIDE Project.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the project nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+/*-
+ * Copyright (c) 1982, 1986, 1989, 1993
+ * The Regents of the University of California.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * @(#)udp_var.h 8.1 (Berkeley) 6/10/93
+ * $FreeBSD$
+ */
+
+#ifndef _NETINET6_UDP6_VAR_HH_
+#define _NETINET6_UDP6_VAR_HH_
+
+#ifdef _KERNEL
+SYSCTL_DECL(_net_inet6_udp6);
+
+extern struct pr_usrreqs udp6_usrreqs;
+
+void udp6_ctlinput(int, struct sockaddr *, void *);
+int udp6_input(struct mbuf **, int *, int);
+#endif
+
+#endif /*_NETINET6_UDP6_VAR_HH_*/
diff --git a/rtems/freebsd/netipsec/ah.h b/rtems/freebsd/netipsec/ah.h
new file mode 100644
index 00000000..6a35089b
--- /dev/null
+++ b/rtems/freebsd/netipsec/ah.h
@@ -0,0 +1,56 @@
+/* $FreeBSD$ */
+/* $KAME: ah.h,v 1.13 2000/10/18 21:28:00 itojun Exp $ */
+
+/*-
+ * Copyright (C) 1995, 1996, 1997, and 1998 WIDE Project.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the project nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+/*
+ * RFC1826/2402 authentication header.
+ */
+
+#ifndef _NETIPSEC_AH_HH_
+#define _NETIPSEC_AH_HH_
+
+struct ah {
+ u_int8_t ah_nxt; /* Next Header */
+ u_int8_t ah_len; /* Length of data, in 32bit */
+ u_int16_t ah_reserve; /* Reserved for future use */
+ u_int32_t ah_spi; /* Security parameter index */
+ /* variable size, 32bit bound*/ /* Authentication data */
+};
+
+struct newah {
+ u_int8_t ah_nxt; /* Next Header */
+ u_int8_t ah_len; /* Length of data + 1, in 32bit */
+ u_int16_t ah_reserve; /* Reserved for future use */
+ u_int32_t ah_spi; /* Security parameter index */
+ u_int32_t ah_seq; /* Sequence number field */
+ /* variable size, 32bit bound*/ /* Authentication data */
+};
+#endif /*_NETIPSEC_AH_HH_*/
diff --git a/rtems/freebsd/netipsec/ah_var.h b/rtems/freebsd/netipsec/ah_var.h
new file mode 100644
index 00000000..f82ea401
--- /dev/null
+++ b/rtems/freebsd/netipsec/ah_var.h
@@ -0,0 +1,82 @@
+/* $FreeBSD$ */
+/* $OpenBSD: ip_ah.h,v 1.29 2002/06/09 16:26:10 itojun Exp $ */
+/*-
+ * The authors of this code are John Ioannidis (ji@tla.org),
+ * Angelos D. Keromytis (kermit@csd.uch.gr) and
+ * Niels Provos (provos@physnet.uni-hamburg.de).
+ *
+ * The original version of this code was written by John Ioannidis
+ * for BSD/OS in Athens, Greece, in November 1995.
+ *
+ * Ported to OpenBSD and NetBSD, with additional transforms, in December 1996,
+ * by Angelos D. Keromytis.
+ *
+ * Additional transforms and features in 1997 and 1998 by Angelos D. Keromytis
+ * and Niels Provos.
+ *
+ * Additional features in 1999 by Angelos D. Keromytis.
+ *
+ * Copyright (C) 1995, 1996, 1997, 1998, 1999 John Ioannidis,
+ * Angelos D. Keromytis and Niels Provos.
+ * Copyright (c) 2001 Angelos D. Keromytis.
+ *
+ * Permission to use, copy, and modify this software with or without fee
+ * is hereby granted, provided that this entire notice is included in
+ * all copies of any software which is or includes a copy or
+ * modification of this software.
+ * You may use this code under the GNU public license if you so wish. Please
+ * contribute changes back to the authors under this freer than GPL license
+ * so that we may further the use of strong encryption without limitations to
+ * all.
+ *
+ * THIS SOFTWARE IS BEING PROVIDED "AS IS", WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTY. IN PARTICULAR, NONE OF THE AUTHORS MAKES ANY
+ * REPRESENTATION OR WARRANTY OF ANY KIND CONCERNING THE
+ * MERCHANTABILITY OF THIS SOFTWARE OR ITS FITNESS FOR ANY PARTICULAR
+ * PURPOSE.
+ */
+
+#ifndef _NETIPSEC_AH_VAR_HH_
+#define _NETIPSEC_AH_VAR_HH_
+
+/*
+ * These define the algorithm indices into the histogram. They're
+ * presently based on the PF_KEY v2 protocol values which is bogus;
+ * they should be decoupled from the protocol at which time we can
+ * pack them and reduce the size of the array to a minimum.
+ */
+#define AH_ALG_MAX 16
+
+struct ahstat {
+ u_int32_t ahs_hdrops; /* Packet shorter than header shows */
+ u_int32_t ahs_nopf; /* Protocol family not supported */
+ u_int32_t ahs_notdb;
+ u_int32_t ahs_badkcr;
+ u_int32_t ahs_badauth;
+ u_int32_t ahs_noxform;
+ u_int32_t ahs_qfull;
+ u_int32_t ahs_wrap;
+ u_int32_t ahs_replay;
+ u_int32_t ahs_badauthl; /* Bad authenticator length */
+ u_int32_t ahs_input; /* Input AH packets */
+ u_int32_t ahs_output; /* Output AH packets */
+ u_int32_t ahs_invalid; /* Trying to use an invalid TDB */
+ u_int64_t ahs_ibytes; /* Input bytes */
+ u_int64_t ahs_obytes; /* Output bytes */
+ u_int32_t ahs_toobig; /* Packet got larger than IP_MAXPACKET */
+ u_int32_t ahs_pdrops; /* Packet blocked due to policy */
+ u_int32_t ahs_crypto; /* Crypto processing failure */
+ u_int32_t ahs_tunnel; /* Tunnel sanity check failure */
+ u_int32_t ahs_hist[AH_ALG_MAX]; /* Per-algorithm op count */
+};
+
+#ifdef _KERNEL
+VNET_DECLARE(int, ah_enable);
+VNET_DECLARE(int, ah_cleartos);
+VNET_DECLARE(struct ahstat, ahstat);
+
+#define V_ah_enable VNET(ah_enable)
+#define V_ah_cleartos VNET(ah_cleartos)
+#define V_ahstat VNET(ahstat)
+#endif /* _KERNEL */
+#endif /*_NETIPSEC_AH_VAR_HH_*/
diff --git a/rtems/freebsd/netipsec/esp.h b/rtems/freebsd/netipsec/esp.h
new file mode 100644
index 00000000..282883b2
--- /dev/null
+++ b/rtems/freebsd/netipsec/esp.h
@@ -0,0 +1,69 @@
+/* $FreeBSD$ */
+/* $KAME: esp.h,v 1.16 2000/10/18 21:28:00 itojun Exp $ */
+
+/*-
+ * Copyright (C) 1995, 1996, 1997, and 1998 WIDE Project.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the project nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+/*
+ * RFC1827/2406 Encapsulated Security Payload.
+ */
+
+#ifndef _NETIPSEC_ESP_HH_
+#define _NETIPSEC_ESP_HH_
+
+struct esp {
+ u_int32_t esp_spi; /* ESP */
+ /*variable size, 32bit bound*/ /* Initialization Vector */
+ /*variable size*/ /* Payload data */
+ /*variable size*/ /* padding */
+ /*8bit*/ /* pad size */
+ /*8bit*/ /* next header */
+ /*8bit*/ /* next header */
+ /*variable size, 32bit bound*/ /* Authentication data (new IPsec) */
+};
+
+struct newesp {
+ u_int32_t esp_spi; /* ESP */
+ u_int32_t esp_seq; /* Sequence number */
+ /*variable size*/ /* (IV and) Payload data */
+ /*variable size*/ /* padding */
+ /*8bit*/ /* pad size */
+ /*8bit*/ /* next header */
+ /*8bit*/ /* next header */
+ /*variable size, 32bit bound*/ /* Authentication data */
+};
+
+struct esptail {
+ u_int8_t esp_padlen; /* pad length */
+ u_int8_t esp_nxt; /* Next header */
+ /*variable size, 32bit bound*/ /* Authentication data (new IPsec)*/
+};
+
+#define ESP_ALEN 12 /* 96-bit authenticator */
+#endif /*_NETIPSEC_ESP_HH_*/
diff --git a/rtems/freebsd/netipsec/esp_var.h b/rtems/freebsd/netipsec/esp_var.h
new file mode 100644
index 00000000..eda4033b
--- /dev/null
+++ b/rtems/freebsd/netipsec/esp_var.h
@@ -0,0 +1,81 @@
+/* $FreeBSD$ */
+/* $OpenBSD: ip_esp.h,v 1.37 2002/06/09 16:26:10 itojun Exp $ */
+/*-
+ * The authors of this code are John Ioannidis (ji@tla.org),
+ * Angelos D. Keromytis (kermit@csd.uch.gr) and
+ * Niels Provos (provos@physnet.uni-hamburg.de).
+ *
+ * The original version of this code was written by John Ioannidis
+ * for BSD/OS in Athens, Greece, in November 1995.
+ *
+ * Ported to OpenBSD and NetBSD, with additional transforms, in December 1996,
+ * by Angelos D. Keromytis.
+ *
+ * Additional transforms and features in 1997 and 1998 by Angelos D. Keromytis
+ * and Niels Provos.
+ *
+ * Additional features in 1999 by Angelos D. Keromytis.
+ *
+ * Copyright (C) 1995, 1996, 1997, 1998, 1999 by John Ioannidis,
+ * Angelos D. Keromytis and Niels Provos.
+ * Copyright (c) 2001 Angelos D. Keromytis.
+ *
+ * Permission to use, copy, and modify this software with or without fee
+ * is hereby granted, provided that this entire notice is included in
+ * all copies of any software which is or includes a copy or
+ * modification of this software.
+ * You may use this code under the GNU public license if you so wish. Please
+ * contribute changes back to the authors under this freer than GPL license
+ * so that we may further the use of strong encryption without limitations to
+ * all.
+ *
+ * THIS SOFTWARE IS BEING PROVIDED "AS IS", WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTY. IN PARTICULAR, NONE OF THE AUTHORS MAKES ANY
+ * REPRESENTATION OR WARRANTY OF ANY KIND CONCERNING THE
+ * MERCHANTABILITY OF THIS SOFTWARE OR ITS FITNESS FOR ANY PARTICULAR
+ * PURPOSE.
+ */
+
+#ifndef _NETIPSEC_ESP_VAR_HH_
+#define _NETIPSEC_ESP_VAR_HH_
+
+/*
+ * These define the algorithm indices into the histogram. They're
+ * presently based on the PF_KEY v2 protocol values which is bogus;
+ * they should be decoupled from the protocol at which time we can
+ * pack them and reduce the size of the array to a reasonable value.
+ */
+#define ESP_ALG_MAX 256 /* NB: could be < but skipjack is 249 */
+
+struct espstat {
+ u_int32_t esps_hdrops; /* Packet shorter than header shows */
+ u_int32_t esps_nopf; /* Protocol family not supported */
+ u_int32_t esps_notdb;
+ u_int32_t esps_badkcr;
+ u_int32_t esps_qfull;
+ u_int32_t esps_noxform;
+ u_int32_t esps_badilen;
+ u_int32_t esps_wrap; /* Replay counter wrapped around */
+ u_int32_t esps_badenc; /* Bad encryption detected */
+ u_int32_t esps_badauth; /* Only valid for transforms with auth */
+ u_int32_t esps_replay; /* Possible packet replay detected */
+ u_int32_t esps_input; /* Input ESP packets */
+ u_int32_t esps_output; /* Output ESP packets */
+ u_int32_t esps_invalid; /* Trying to use an invalid TDB */
+ u_int64_t esps_ibytes; /* Input bytes */
+ u_int64_t esps_obytes; /* Output bytes */
+ u_int32_t esps_toobig; /* Packet got larger than IP_MAXPACKET */
+ u_int32_t esps_pdrops; /* Packet blocked due to policy */
+ u_int32_t esps_crypto; /* Crypto processing failure */
+ u_int32_t esps_tunnel; /* Tunnel sanity check failure */
+ u_int32_t esps_hist[ESP_ALG_MAX]; /* Per-algorithm op count */
+};
+
+#ifdef _KERNEL
+VNET_DECLARE(int, esp_enable);
+VNET_DECLARE(struct espstat, espstat);
+
+#define V_esp_enable VNET(esp_enable)
+#define V_espstat VNET(espstat)
+#endif /* _KERNEL */
+#endif /*_NETIPSEC_ESP_VAR_HH_*/
diff --git a/rtems/freebsd/netipsec/ipcomp.h b/rtems/freebsd/netipsec/ipcomp.h
new file mode 100644
index 00000000..a43bf2e8
--- /dev/null
+++ b/rtems/freebsd/netipsec/ipcomp.h
@@ -0,0 +1,55 @@
+/* $FreeBSD$ */
+/* $KAME: ipcomp.h,v 1.8 2000/09/26 07:55:14 itojun Exp $ */
+
+/*-
+ * Copyright (C) 1999 WIDE Project.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the project nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+/*
+ * RFC2393 IP payload compression protocol (IPComp).
+ */
+
+#ifndef _NETIPSEC_IPCOMP_HH_
+#define _NETIPSEC_IPCOMP_HH_
+
+struct ipcomp {
+ u_int8_t comp_nxt; /* Next Header */
+ u_int8_t comp_flags; /* reserved, must be zero */
+ u_int16_t comp_cpi; /* Compression parameter index */
+};
+
+#define IPCOMP_HLENGTH 4 /* Length of IPCOMP header */
+
+/* well-known algorithm number (in CPI), from RFC2409 */
+#define IPCOMP_OUI 1 /* vendor specific */
+#define IPCOMP_DEFLATE 2 /* RFC2394 */
+#define IPCOMP_LZS 3 /* RFC2395 */
+#define IPCOMP_MAX 4
+
+#define IPCOMP_CPI_NEGOTIATE_MIN 256
+#endif /*_NETIPSEC_IPCOMP_HH_*/
diff --git a/rtems/freebsd/netipsec/ipcomp_var.h b/rtems/freebsd/netipsec/ipcomp_var.h
new file mode 100644
index 00000000..bf4598f4
--- /dev/null
+++ b/rtems/freebsd/netipsec/ipcomp_var.h
@@ -0,0 +1,74 @@
+/* $FreeBSD$ */
+/* $KAME: ipcomp.h,v 1.8 2000/09/26 07:55:14 itojun Exp $ */
+
+/*-
+ * Copyright (C) 1999 WIDE Project.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the project nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifndef _NETIPSEC_IPCOMP_VAR_HH_
+#define _NETIPSEC_IPCOMP_VAR_HH_
+
+/*
+ * These define the algorithm indices into the histogram. They're
+ * presently based on the PF_KEY v2 protocol values which is bogus;
+ * they should be decoupled from the protocol at which time we can
+ * pack them and reduce the size of the array to a minimum.
+ */
+#define IPCOMP_ALG_MAX 8
+
+#define IPCOMPSTAT_VERSION 1
+struct ipcompstat {
+ u_int32_t ipcomps_hdrops; /* Packet shorter than header shows */
+ u_int32_t ipcomps_nopf; /* Protocol family not supported */
+ u_int32_t ipcomps_notdb;
+ u_int32_t ipcomps_badkcr;
+ u_int32_t ipcomps_qfull;
+ u_int32_t ipcomps_noxform;
+ u_int32_t ipcomps_wrap;
+ u_int32_t ipcomps_input; /* Input IPcomp packets */
+ u_int32_t ipcomps_output; /* Output IPcomp packets */
+ u_int32_t ipcomps_invalid;/* Trying to use an invalid TDB */
+ u_int64_t ipcomps_ibytes; /* Input bytes */
+ u_int64_t ipcomps_obytes; /* Output bytes */
+ u_int32_t ipcomps_toobig; /* Packet got > IP_MAXPACKET */
+ u_int32_t ipcomps_pdrops; /* Packet blocked due to policy */
+ u_int32_t ipcomps_crypto; /* "Crypto" processing failure */
+ u_int32_t ipcomps_hist[IPCOMP_ALG_MAX];/* Per-algorithm op count */
+ u_int32_t version; /* Version of this structure. */
+ u_int32_t ipcomps_threshold; /* Packet < comp. algo. threshold. */
+ u_int32_t ipcomps_uncompr; /* Compression was useles. */
+};
+
+#ifdef _KERNEL
+VNET_DECLARE(int, ipcomp_enable);
+VNET_DECLARE(struct ipcompstat, ipcompstat);
+
+#define V_ipcomp_enable VNET(ipcomp_enable)
+#define V_ipcompstat VNET(ipcompstat)
+#endif /* _KERNEL */
+#endif /*_NETIPSEC_IPCOMP_VAR_HH_*/
diff --git a/rtems/freebsd/netipsec/ipip_var.h b/rtems/freebsd/netipsec/ipip_var.h
new file mode 100644
index 00000000..51d8a554
--- /dev/null
+++ b/rtems/freebsd/netipsec/ipip_var.h
@@ -0,0 +1,68 @@
+/* $FreeBSD$ */
+/* $OpenBSD: ip_ipip.h,v 1.5 2002/06/09 16:26:10 itojun Exp $ */
+/*-
+ * The authors of this code are John Ioannidis (ji@tla.org),
+ * Angelos D. Keromytis (kermit@csd.uch.gr) and
+ * Niels Provos (provos@physnet.uni-hamburg.de).
+ *
+ * The original version of this code was written by John Ioannidis
+ * for BSD/OS in Athens, Greece, in November 1995.
+ *
+ * Ported to OpenBSD and NetBSD, with additional transforms, in December 1996,
+ * by Angelos D. Keromytis.
+ *
+ * Additional transforms and features in 1997 and 1998 by Angelos D. Keromytis
+ * and Niels Provos.
+ *
+ * Additional features in 1999 by Angelos D. Keromytis.
+ *
+ * Copyright (C) 1995, 1996, 1997, 1998, 1999 by John Ioannidis,
+ * Angelos D. Keromytis and Niels Provos.
+ * Copyright (c) 2001, Angelos D. Keromytis.
+ *
+ * Permission to use, copy, and modify this software with or without fee
+ * is hereby granted, provided that this entire notice is included in
+ * all copies of any software which is or includes a copy or
+ * modification of this software.
+ * You may use this code under the GNU public license if you so wish. Please
+ * contribute changes back to the authors under this freer than GPL license
+ * so that we may further the use of strong encryption without limitations to
+ * all.
+ *
+ * THIS SOFTWARE IS BEING PROVIDED "AS IS", WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTY. IN PARTICULAR, NONE OF THE AUTHORS MAKES ANY
+ * REPRESENTATION OR WARRANTY OF ANY KIND CONCERNING THE
+ * MERCHANTABILITY OF THIS SOFTWARE OR ITS FITNESS FOR ANY PARTICULAR
+ * PURPOSE.
+ */
+
+#ifndef _NETINET_IPIP_HH_
+#define _NETINET_IPIP_HH_
+
+/*
+ * IP-inside-IP processing.
+ * Not quite all the functionality of RFC-1853, but the main idea is there.
+ */
+
+struct ipipstat
+{
+ u_int32_t ipips_ipackets; /* total input packets */
+ u_int32_t ipips_opackets; /* total output packets */
+ u_int32_t ipips_hdrops; /* packet shorter than header shows */
+ u_int32_t ipips_qfull;
+ u_int64_t ipips_ibytes;
+ u_int64_t ipips_obytes;
+ u_int32_t ipips_pdrops; /* packet dropped due to policy */
+ u_int32_t ipips_spoof; /* IP spoofing attempts */
+ u_int32_t ipips_family; /* Protocol family mismatch */
+ u_int32_t ipips_unspec; /* Missing tunnel endpoint address */
+};
+
+#ifdef _KERNEL
+VNET_DECLARE(int, ipip_allow);
+VNET_DECLARE(struct ipipstat, ipipstat);
+
+#define V_ipip_allow VNET(ipip_allow)
+#define V_ipipstat VNET(ipipstat)
+#endif /* _KERNEL */
+#endif /* _NETINET_IPIP_HH_ */
diff --git a/rtems/freebsd/netipsec/ipsec.c b/rtems/freebsd/netipsec/ipsec.c
new file mode 100644
index 00000000..0499a32f
--- /dev/null
+++ b/rtems/freebsd/netipsec/ipsec.c
@@ -0,0 +1,1749 @@
+/* $FreeBSD$ */
+/* $KAME: ipsec.c,v 1.103 2001/05/24 07:14:18 sakane Exp $ */
+
+/*-
+ * Copyright (C) 1995, 1996, 1997, and 1998 WIDE Project.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the project nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+/*
+ * IPsec controller part.
+ */
+
+#include <rtems/freebsd/local/opt_inet.h>
+#include <rtems/freebsd/local/opt_inet6.h>
+#include <rtems/freebsd/local/opt_ipsec.h>
+
+#include <rtems/freebsd/sys/param.h>
+#include <rtems/freebsd/sys/systm.h>
+#include <rtems/freebsd/sys/malloc.h>
+#include <rtems/freebsd/sys/mbuf.h>
+#include <rtems/freebsd/sys/domain.h>
+#include <rtems/freebsd/sys/priv.h>
+#include <rtems/freebsd/sys/protosw.h>
+#include <rtems/freebsd/sys/socket.h>
+#include <rtems/freebsd/sys/socketvar.h>
+#include <rtems/freebsd/sys/errno.h>
+#include <rtems/freebsd/sys/time.h>
+#include <rtems/freebsd/sys/kernel.h>
+#include <rtems/freebsd/sys/syslog.h>
+#include <rtems/freebsd/sys/sysctl.h>
+#include <rtems/freebsd/sys/proc.h>
+
+#include <rtems/freebsd/net/if.h>
+#include <rtems/freebsd/net/route.h>
+#include <rtems/freebsd/net/vnet.h>
+
+#include <rtems/freebsd/netinet/in.h>
+#include <rtems/freebsd/netinet/in_systm.h>
+#include <rtems/freebsd/netinet/ip.h>
+#include <rtems/freebsd/netinet/ip_var.h>
+#include <rtems/freebsd/netinet/in_var.h>
+#include <rtems/freebsd/netinet/udp.h>
+#include <rtems/freebsd/netinet/udp_var.h>
+#include <rtems/freebsd/netinet/tcp.h>
+#include <rtems/freebsd/netinet/udp.h>
+
+#include <rtems/freebsd/netinet/ip6.h>
+#ifdef INET6
+#include <rtems/freebsd/netinet6/ip6_var.h>
+#endif
+#include <rtems/freebsd/netinet/in_pcb.h>
+#ifdef INET6
+#include <rtems/freebsd/netinet/icmp6.h>
+#endif
+
+#include <rtems/freebsd/sys/types.h>
+#include <rtems/freebsd/netipsec/ipsec.h>
+#ifdef INET6
+#include <rtems/freebsd/netipsec/ipsec6.h>
+#endif
+#include <rtems/freebsd/netipsec/ah_var.h>
+#include <rtems/freebsd/netipsec/esp_var.h>
+#include <rtems/freebsd/netipsec/ipcomp.h> /*XXX*/
+#include <rtems/freebsd/netipsec/ipcomp_var.h>
+
+#include <rtems/freebsd/netipsec/key.h>
+#include <rtems/freebsd/netipsec/keydb.h>
+#include <rtems/freebsd/netipsec/key_debug.h>
+
+#include <rtems/freebsd/netipsec/xform.h>
+
+#include <rtems/freebsd/machine/in_cksum.h>
+
+#include <rtems/freebsd/opencrypto/cryptodev.h>
+
+#ifdef IPSEC_DEBUG
+VNET_DEFINE(int, ipsec_debug) = 1;
+#else
+VNET_DEFINE(int, ipsec_debug) = 0;
+#endif
+
+/* NB: name changed so netstat doesn't use it. */
+VNET_DEFINE(struct ipsecstat, ipsec4stat);
+VNET_DEFINE(int, ip4_ah_offsetmask) = 0; /* maybe IP_DF? */
+/* DF bit on encap. 0: clear 1: set 2: copy */
+VNET_DEFINE(int, ip4_ipsec_dfbit) = 0;
+VNET_DEFINE(int, ip4_esp_trans_deflev) = IPSEC_LEVEL_USE;
+VNET_DEFINE(int, ip4_esp_net_deflev) = IPSEC_LEVEL_USE;
+VNET_DEFINE(int, ip4_ah_trans_deflev) = IPSEC_LEVEL_USE;
+VNET_DEFINE(int, ip4_ah_net_deflev) = IPSEC_LEVEL_USE;
+VNET_DEFINE(struct secpolicy, ip4_def_policy);
+/* ECN ignore(-1)/forbidden(0)/allowed(1) */
+VNET_DEFINE(int, ip4_ipsec_ecn) = 0;
+VNET_DEFINE(int, ip4_esp_randpad) = -1;
+
+/*
+ * Crypto support requirements:
+ *
+ * 1 require hardware support
+ * -1 require software support
+ * 0 take anything
+ */
+VNET_DEFINE(int, crypto_support) = CRYPTOCAP_F_HARDWARE | CRYPTOCAP_F_SOFTWARE;
+
+FEATURE(ipsec, "Internet Protocol Security (IPsec)");
+#ifdef IPSEC_NAT_T
+FEATURE(ipsec_natt, "UDP Encapsulation of IPsec ESP Packets ('NAT-T')");
+#endif
+
+SYSCTL_DECL(_net_inet_ipsec);
+
+/* net.inet.ipsec */
+SYSCTL_VNET_INT(_net_inet_ipsec, IPSECCTL_DEF_POLICY, def_policy,
+ CTLFLAG_RW, &VNET_NAME(ip4_def_policy).policy, 0,
+ "IPsec default policy.");
+SYSCTL_VNET_INT(_net_inet_ipsec, IPSECCTL_DEF_ESP_TRANSLEV, esp_trans_deflev,
+ CTLFLAG_RW, &VNET_NAME(ip4_esp_trans_deflev), 0,
+ "Default ESP transport mode level");
+SYSCTL_VNET_INT(_net_inet_ipsec, IPSECCTL_DEF_ESP_NETLEV, esp_net_deflev,
+ CTLFLAG_RW, &VNET_NAME(ip4_esp_net_deflev), 0,
+ "Default ESP tunnel mode level.");
+SYSCTL_VNET_INT(_net_inet_ipsec, IPSECCTL_DEF_AH_TRANSLEV, ah_trans_deflev,
+ CTLFLAG_RW, &VNET_NAME(ip4_ah_trans_deflev), 0,
+ "AH transfer mode default level.");
+SYSCTL_VNET_INT(_net_inet_ipsec, IPSECCTL_DEF_AH_NETLEV, ah_net_deflev,
+ CTLFLAG_RW, &VNET_NAME(ip4_ah_net_deflev), 0,
+ "AH tunnel mode default level.");
+SYSCTL_VNET_INT(_net_inet_ipsec, IPSECCTL_AH_CLEARTOS, ah_cleartos,
+ CTLFLAG_RW, &VNET_NAME(ah_cleartos), 0,
+ "If set clear type-of-service field when doing AH computation.");
+SYSCTL_VNET_INT(_net_inet_ipsec, IPSECCTL_AH_OFFSETMASK, ah_offsetmask,
+ CTLFLAG_RW, &VNET_NAME(ip4_ah_offsetmask), 0,
+ "If not set clear offset field mask when doing AH computation.");
+SYSCTL_VNET_INT(_net_inet_ipsec, IPSECCTL_DFBIT, dfbit,
+ CTLFLAG_RW, &VNET_NAME(ip4_ipsec_dfbit), 0,
+ "Do not fragment bit on encap.");
+SYSCTL_VNET_INT(_net_inet_ipsec, IPSECCTL_ECN, ecn,
+ CTLFLAG_RW, &VNET_NAME(ip4_ipsec_ecn), 0,
+ "Explicit Congestion Notification handling.");
+SYSCTL_VNET_INT(_net_inet_ipsec, IPSECCTL_DEBUG, debug,
+ CTLFLAG_RW, &VNET_NAME(ipsec_debug), 0,
+ "Enable IPsec debugging output when set.");
+SYSCTL_VNET_INT(_net_inet_ipsec, OID_AUTO, crypto_support,
+ CTLFLAG_RW, &VNET_NAME(crypto_support), 0,
+ "Crypto driver selection.");
+SYSCTL_VNET_STRUCT(_net_inet_ipsec, OID_AUTO, ipsecstats,
+ CTLFLAG_RD, &VNET_NAME(ipsec4stat), ipsecstat,
+ "IPsec IPv4 statistics.");
+
+#ifdef REGRESSION
+/*
+ * When set to 1, IPsec will send packets with the same sequence number.
+ * This allows to verify if the other side has proper replay attacks detection.
+ */
+VNET_DEFINE(int, ipsec_replay) = 0;
+SYSCTL_VNET_INT(_net_inet_ipsec, OID_AUTO, test_replay,
+ CTLFLAG_RW, &VNET_NAME(ipsec_replay), 0,
+ "Emulate replay attack");
+/*
+ * When set 1, IPsec will send packets with corrupted HMAC.
+ * This allows to verify if the other side properly detects modified packets.
+ */
+VNET_DEFINE(int, ipsec_integrity) = 0;
+SYSCTL_VNET_INT(_net_inet_ipsec, OID_AUTO, test_integrity,
+ CTLFLAG_RW, &VNET_NAME(ipsec_integrity), 0,
+ "Emulate man-in-the-middle attack");
+#endif
+
+#ifdef INET6
+VNET_DEFINE(struct ipsecstat, ipsec6stat);
+VNET_DEFINE(int, ip6_esp_trans_deflev) = IPSEC_LEVEL_USE;
+VNET_DEFINE(int, ip6_esp_net_deflev) = IPSEC_LEVEL_USE;
+VNET_DEFINE(int, ip6_ah_trans_deflev) = IPSEC_LEVEL_USE;
+VNET_DEFINE(int, ip6_ah_net_deflev) = IPSEC_LEVEL_USE;
+VNET_DEFINE(int, ip6_ipsec_ecn) = 0; /* ECN ignore(-1)/forbidden(0)/allowed(1) */
+
+SYSCTL_DECL(_net_inet6_ipsec6);
+
+/* net.inet6.ipsec6 */
+#ifdef COMPAT_KAME
+SYSCTL_OID(_net_inet6_ipsec6, IPSECCTL_STATS, stats, CTLFLAG_RD,
+ 0, 0, compat_ipsecstats_sysctl, "S", "IPsec IPv6 statistics.");
+#endif /* COMPAT_KAME */
+SYSCTL_VNET_INT(_net_inet6_ipsec6, IPSECCTL_DEF_POLICY, def_policy, CTLFLAG_RW,
+ &VNET_NAME(ip4_def_policy).policy, 0,
+ "IPsec default policy.");
+SYSCTL_VNET_INT(_net_inet6_ipsec6, IPSECCTL_DEF_ESP_TRANSLEV,
+ esp_trans_deflev, CTLFLAG_RW, &VNET_NAME(ip6_esp_trans_deflev), 0,
+ "Default ESP transport mode level.");
+SYSCTL_VNET_INT(_net_inet6_ipsec6, IPSECCTL_DEF_ESP_NETLEV,
+ esp_net_deflev, CTLFLAG_RW, &VNET_NAME(ip6_esp_net_deflev), 0,
+ "Default ESP tunnel mode level.");
+SYSCTL_VNET_INT(_net_inet6_ipsec6, IPSECCTL_DEF_AH_TRANSLEV,
+ ah_trans_deflev, CTLFLAG_RW, &VNET_NAME(ip6_ah_trans_deflev), 0,
+ "AH transfer mode default level.");
+SYSCTL_VNET_INT(_net_inet6_ipsec6, IPSECCTL_DEF_AH_NETLEV,
+ ah_net_deflev, CTLFLAG_RW, &VNET_NAME(ip6_ah_net_deflev), 0,
+ "AH tunnel mode default level.");
+SYSCTL_VNET_INT(_net_inet6_ipsec6, IPSECCTL_ECN,
+ ecn, CTLFLAG_RW, &VNET_NAME(ip6_ipsec_ecn), 0,
+ "Explicit Congestion Notification handling.");
+SYSCTL_VNET_INT(_net_inet6_ipsec6, IPSECCTL_DEBUG, debug, CTLFLAG_RW,
+ &VNET_NAME(ipsec_debug), 0,
+ "Enable IPsec debugging output when set.");
+SYSCTL_VNET_STRUCT(_net_inet6_ipsec6, IPSECCTL_STATS,
+ ipsecstats, CTLFLAG_RD, &VNET_NAME(ipsec6stat), ipsecstat,
+ "IPsec IPv6 statistics.");
+#endif /* INET6 */
+
+static int ipsec_setspidx_inpcb __P((struct mbuf *, struct inpcb *));
+static int ipsec_setspidx __P((struct mbuf *, struct secpolicyindex *, int));
+static void ipsec4_get_ulp __P((struct mbuf *m, struct secpolicyindex *, int));
+static int ipsec4_setspidx_ipaddr __P((struct mbuf *, struct secpolicyindex *));
+#ifdef INET6
+static void ipsec6_get_ulp __P((struct mbuf *m, struct secpolicyindex *, int));
+static int ipsec6_setspidx_ipaddr __P((struct mbuf *, struct secpolicyindex *));
+#endif
+static void ipsec_delpcbpolicy __P((struct inpcbpolicy *));
+static struct secpolicy *ipsec_deepcopy_policy __P((struct secpolicy *src));
+static void vshiftl __P((unsigned char *, int, int));
+
+MALLOC_DEFINE(M_IPSEC_INPCB, "inpcbpolicy", "inpcb-resident ipsec policy");
+
+/*
+ * Return a held reference to the default SP.
+ */
+static struct secpolicy *
+key_allocsp_default(const char* where, int tag)
+{
+ struct secpolicy *sp;
+
+ KEYDEBUG(KEYDEBUG_IPSEC_STAMP,
+ printf("DP key_allocsp_default from %s:%u\n", where, tag));
+
+ sp = &V_ip4_def_policy;
+ if (sp->policy != IPSEC_POLICY_DISCARD &&
+ sp->policy != IPSEC_POLICY_NONE) {
+ ipseclog((LOG_INFO, "fixed system default policy: %d->%d\n",
+ sp->policy, IPSEC_POLICY_NONE));
+ sp->policy = IPSEC_POLICY_NONE;
+ }
+ key_addref(sp);
+
+ KEYDEBUG(KEYDEBUG_IPSEC_STAMP,
+ printf("DP key_allocsp_default returns SP:%p (%u)\n",
+ sp, sp->refcnt));
+ return (sp);
+}
+#define KEY_ALLOCSP_DEFAULT() \
+ key_allocsp_default(__FILE__, __LINE__)
+
+/*
+ * For OUTBOUND packet having a socket. Searching SPD for packet,
+ * and return a pointer to SP.
+ * OUT: NULL: no apropreate SP found, the following value is set to error.
+ * 0 : bypass
+ * EACCES : discard packet.
+ * ENOENT : ipsec_acquire() in progress, maybe.
+ * others : error occured.
+ * others: a pointer to SP
+ *
+ * NOTE: IPv6 mapped adddress concern is implemented here.
+ */
+struct secpolicy *
+ipsec_getpolicy(struct tdb_ident *tdbi, u_int dir)
+{
+ struct secpolicy *sp;
+
+ IPSEC_ASSERT(tdbi != NULL, ("null tdbi"));
+ IPSEC_ASSERT(dir == IPSEC_DIR_INBOUND || dir == IPSEC_DIR_OUTBOUND,
+ ("invalid direction %u", dir));
+
+ sp = KEY_ALLOCSP2(tdbi->spi, &tdbi->dst, tdbi->proto, dir);
+ if (sp == NULL) /*XXX????*/
+ sp = KEY_ALLOCSP_DEFAULT();
+ IPSEC_ASSERT(sp != NULL, ("null SP"));
+ return (sp);
+}
+
+/*
+ * For OUTBOUND packet having a socket. Searching SPD for packet,
+ * and return a pointer to SP.
+ * OUT: NULL: no apropreate SP found, the following value is set to error.
+ * 0 : bypass
+ * EACCES : discard packet.
+ * ENOENT : ipsec_acquire() in progress, maybe.
+ * others : error occured.
+ * others: a pointer to SP
+ *
+ * NOTE: IPv6 mapped adddress concern is implemented here.
+ */
+static struct secpolicy *
+ipsec_getpolicybysock(struct mbuf *m, u_int dir, struct inpcb *inp, int *error)
+{
+ struct inpcbpolicy *pcbsp;
+ struct secpolicy *currsp = NULL; /* Policy on socket. */
+ struct secpolicy *sp;
+
+ IPSEC_ASSERT(m != NULL, ("null mbuf"));
+ IPSEC_ASSERT(inp != NULL, ("null inpcb"));
+ IPSEC_ASSERT(error != NULL, ("null error"));
+ IPSEC_ASSERT(dir == IPSEC_DIR_INBOUND || dir == IPSEC_DIR_OUTBOUND,
+ ("invalid direction %u", dir));
+
+ /* Set spidx in pcb. */
+ *error = ipsec_setspidx_inpcb(m, inp);
+ if (*error)
+ return (NULL);
+
+ pcbsp = inp->inp_sp;
+ IPSEC_ASSERT(pcbsp != NULL, ("null pcbsp"));
+ switch (dir) {
+ case IPSEC_DIR_INBOUND:
+ currsp = pcbsp->sp_in;
+ break;
+ case IPSEC_DIR_OUTBOUND:
+ currsp = pcbsp->sp_out;
+ break;
+ }
+ IPSEC_ASSERT(currsp != NULL, ("null currsp"));
+
+ if (pcbsp->priv) { /* When privilieged socket. */
+ switch (currsp->policy) {
+ case IPSEC_POLICY_BYPASS:
+ case IPSEC_POLICY_IPSEC:
+ key_addref(currsp);
+ sp = currsp;
+ break;
+
+ case IPSEC_POLICY_ENTRUST:
+ /* Look for a policy in SPD. */
+ sp = KEY_ALLOCSP(&currsp->spidx, dir);
+ if (sp == NULL) /* No SP found. */
+ sp = KEY_ALLOCSP_DEFAULT();
+ break;
+
+ default:
+ ipseclog((LOG_ERR, "%s: Invalid policy for PCB %d\n",
+ __func__, currsp->policy));
+ *error = EINVAL;
+ return (NULL);
+ }
+ } else { /* Unpriv, SPD has policy. */
+ sp = KEY_ALLOCSP(&currsp->spidx, dir);
+ if (sp == NULL) { /* No SP found. */
+ switch (currsp->policy) {
+ case IPSEC_POLICY_BYPASS:
+ ipseclog((LOG_ERR, "%s: Illegal policy for "
+ "non-priviliged defined %d\n",
+ __func__, currsp->policy));
+ *error = EINVAL;
+ return (NULL);
+
+ case IPSEC_POLICY_ENTRUST:
+ sp = KEY_ALLOCSP_DEFAULT();
+ break;
+
+ case IPSEC_POLICY_IPSEC:
+ key_addref(currsp);
+ sp = currsp;
+ break;
+
+ default:
+ ipseclog((LOG_ERR, "%s: Invalid policy for "
+ "PCB %d\n", __func__, currsp->policy));
+ *error = EINVAL;
+ return (NULL);
+ }
+ }
+ }
+ IPSEC_ASSERT(sp != NULL,
+ ("null SP (priv %u policy %u", pcbsp->priv, currsp->policy));
+ KEYDEBUG(KEYDEBUG_IPSEC_STAMP,
+ printf("DP %s (priv %u policy %u) allocate SP:%p (refcnt %u)\n",
+ __func__, pcbsp->priv, currsp->policy, sp, sp->refcnt));
+ return (sp);
+}
+
+/*
+ * For FORWADING packet or OUTBOUND without a socket. Searching SPD for packet,
+ * and return a pointer to SP.
+ * OUT: positive: a pointer to the entry for security policy leaf matched.
+ * NULL: no apropreate SP found, the following value is set to error.
+ * 0 : bypass
+ * EACCES : discard packet.
+ * ENOENT : ipsec_acquire() in progress, maybe.
+ * others : error occured.
+ */
+struct secpolicy *
+ipsec_getpolicybyaddr(struct mbuf *m, u_int dir, int flag, int *error)
+{
+ struct secpolicyindex spidx;
+ struct secpolicy *sp;
+
+ IPSEC_ASSERT(m != NULL, ("null mbuf"));
+ IPSEC_ASSERT(error != NULL, ("null error"));
+ IPSEC_ASSERT(dir == IPSEC_DIR_INBOUND || dir == IPSEC_DIR_OUTBOUND,
+ ("invalid direction %u", dir));
+
+ sp = NULL;
+ if (key_havesp(dir)) {
+ /* Make an index to look for a policy. */
+ *error = ipsec_setspidx(m, &spidx,
+ (flag & IP_FORWARDING) ? 0 : 1);
+ if (*error != 0) {
+ DPRINTF(("%s: setpidx failed, dir %u flag %u\n",
+ __func__, dir, flag));
+ return (NULL);
+ }
+ spidx.dir = dir;
+
+ sp = KEY_ALLOCSP(&spidx, dir);
+ }
+ if (sp == NULL) /* No SP found, use system default. */
+ sp = KEY_ALLOCSP_DEFAULT();
+ IPSEC_ASSERT(sp != NULL, ("null SP"));
+ return (sp);
+}
+
+struct secpolicy *
+ipsec4_checkpolicy(struct mbuf *m, u_int dir, u_int flag, int *error,
+ struct inpcb *inp)
+{
+ struct secpolicy *sp;
+
+ *error = 0;
+ if (inp == NULL)
+ sp = ipsec_getpolicybyaddr(m, dir, flag, error);
+ else
+ sp = ipsec_getpolicybysock(m, dir, inp, error);
+ if (sp == NULL) {
+ IPSEC_ASSERT(*error != 0, ("getpolicy failed w/o error"));
+ V_ipsec4stat.ips_out_inval++;
+ return (NULL);
+ }
+ IPSEC_ASSERT(*error == 0, ("sp w/ error set to %u", *error));
+ switch (sp->policy) {
+ case IPSEC_POLICY_ENTRUST:
+ default:
+ printf("%s: invalid policy %u\n", __func__, sp->policy);
+ /* FALLTHROUGH */
+ case IPSEC_POLICY_DISCARD:
+ V_ipsec4stat.ips_out_polvio++;
+ *error = -EINVAL; /* Packet is discarded by caller. */
+ break;
+ case IPSEC_POLICY_BYPASS:
+ case IPSEC_POLICY_NONE:
+ KEY_FREESP(&sp);
+ sp = NULL; /* NB: force NULL result. */
+ break;
+ case IPSEC_POLICY_IPSEC:
+ if (sp->req == NULL) /* Acquire a SA. */
+ *error = key_spdacquire(sp);
+ break;
+ }
+ if (*error != 0) {
+ KEY_FREESP(&sp);
+ sp = NULL;
+ }
+ return (sp);
+}
+
+static int
+ipsec_setspidx_inpcb(struct mbuf *m, struct inpcb *inp)
+{
+ int error;
+
+ IPSEC_ASSERT(inp != NULL, ("null inp"));
+ IPSEC_ASSERT(inp->inp_sp != NULL, ("null inp_sp"));
+ IPSEC_ASSERT(inp->inp_sp->sp_out != NULL && inp->inp_sp->sp_in != NULL,
+ ("null sp_in || sp_out"));
+
+ error = ipsec_setspidx(m, &inp->inp_sp->sp_in->spidx, 1);
+ if (error == 0) {
+ inp->inp_sp->sp_in->spidx.dir = IPSEC_DIR_INBOUND;
+ inp->inp_sp->sp_out->spidx = inp->inp_sp->sp_in->spidx;
+ inp->inp_sp->sp_out->spidx.dir = IPSEC_DIR_OUTBOUND;
+ } else {
+ bzero(&inp->inp_sp->sp_in->spidx,
+ sizeof (inp->inp_sp->sp_in->spidx));
+ bzero(&inp->inp_sp->sp_out->spidx,
+ sizeof (inp->inp_sp->sp_in->spidx));
+ }
+ return (error);
+}
+
+/*
+ * Configure security policy index (src/dst/proto/sport/dport)
+ * by looking at the content of mbuf.
+ * The caller is responsible for error recovery (like clearing up spidx).
+ */
+static int
+ipsec_setspidx(struct mbuf *m, struct secpolicyindex *spidx, int needport)
+{
+ struct ip *ip = NULL;
+ struct ip ipbuf;
+ u_int v;
+ struct mbuf *n;
+ int len;
+ int error;
+
+ IPSEC_ASSERT(m != NULL, ("null mbuf"));
+
+ /*
+ * Validate m->m_pkthdr.len. We see incorrect length if we
+ * mistakenly call this function with inconsistent mbuf chain
+ * (like 4.4BSD tcp/udp processing). XXX Should we panic here?
+ */
+ len = 0;
+ for (n = m; n; n = n->m_next)
+ len += n->m_len;
+ if (m->m_pkthdr.len != len) {
+ KEYDEBUG(KEYDEBUG_IPSEC_DUMP,
+ printf("%s: pkthdr len(%d) mismatch (%d), ignored.\n",
+ __func__, len, m->m_pkthdr.len));
+ return (EINVAL);
+ }
+
+ if (m->m_pkthdr.len < sizeof(struct ip)) {
+ KEYDEBUG(KEYDEBUG_IPSEC_DUMP,
+ printf("%s: pkthdr len(%d) too small (v4), ignored.\n",
+ __func__, m->m_pkthdr.len));
+ return (EINVAL);
+ }
+
+ if (m->m_len >= sizeof(*ip))
+ ip = mtod(m, struct ip *);
+ else {
+ m_copydata(m, 0, sizeof(ipbuf), (caddr_t)&ipbuf);
+ ip = &ipbuf;
+ }
+#ifdef _IP_VHL
+ v = _IP_VHL_V(ip->ip_vhl);
+#else
+ v = ip->ip_v;
+#endif
+ switch (v) {
+ case 4:
+ error = ipsec4_setspidx_ipaddr(m, spidx);
+ if (error)
+ return (error);
+ ipsec4_get_ulp(m, spidx, needport);
+ return (0);
+#ifdef INET6
+ case 6:
+ if (m->m_pkthdr.len < sizeof(struct ip6_hdr)) {
+ KEYDEBUG(KEYDEBUG_IPSEC_DUMP,
+ printf("%s: pkthdr len(%d) too small (v6), "
+ "ignored\n", __func__, m->m_pkthdr.len));
+ return (EINVAL);
+ }
+ error = ipsec6_setspidx_ipaddr(m, spidx);
+ if (error)
+ return (error);
+ ipsec6_get_ulp(m, spidx, needport);
+ return (0);
+#endif
+ default:
+ KEYDEBUG(KEYDEBUG_IPSEC_DUMP,
+ printf("%s: " "unknown IP version %u, ignored.\n",
+ __func__, v));
+ return (EINVAL);
+ }
+}
+
+static void
+ipsec4_get_ulp(struct mbuf *m, struct secpolicyindex *spidx, int needport)
+{
+ u_int8_t nxt;
+ int off;
+
+ /* Sanity check. */
+ IPSEC_ASSERT(m != NULL, ("null mbuf"));
+ IPSEC_ASSERT(m->m_pkthdr.len >= sizeof(struct ip),("packet too short"));
+
+ /* NB: ip_input() flips it into host endian. XXX Need more checking. */
+ if (m->m_len < sizeof (struct ip)) {
+ struct ip *ip = mtod(m, struct ip *);
+ if (ip->ip_off & (IP_MF | IP_OFFMASK))
+ goto done;
+#ifdef _IP_VHL
+ off = _IP_VHL_HL(ip->ip_vhl) << 2;
+#else
+ off = ip->ip_hl << 2;
+#endif
+ nxt = ip->ip_p;
+ } else {
+ struct ip ih;
+
+ m_copydata(m, 0, sizeof (struct ip), (caddr_t) &ih);
+ if (ih.ip_off & (IP_MF | IP_OFFMASK))
+ goto done;
+#ifdef _IP_VHL
+ off = _IP_VHL_HL(ih.ip_vhl) << 2;
+#else
+ off = ih.ip_hl << 2;
+#endif
+ nxt = ih.ip_p;
+ }
+
+ while (off < m->m_pkthdr.len) {
+ struct ip6_ext ip6e;
+ struct tcphdr th;
+ struct udphdr uh;
+
+ switch (nxt) {
+ case IPPROTO_TCP:
+ spidx->ul_proto = nxt;
+ if (!needport)
+ goto done_proto;
+ if (off + sizeof(struct tcphdr) > m->m_pkthdr.len)
+ goto done;
+ m_copydata(m, off, sizeof (th), (caddr_t) &th);
+ spidx->src.sin.sin_port = th.th_sport;
+ spidx->dst.sin.sin_port = th.th_dport;
+ return;
+ case IPPROTO_UDP:
+ spidx->ul_proto = nxt;
+ if (!needport)
+ goto done_proto;
+ if (off + sizeof(struct udphdr) > m->m_pkthdr.len)
+ goto done;
+ m_copydata(m, off, sizeof (uh), (caddr_t) &uh);
+ spidx->src.sin.sin_port = uh.uh_sport;
+ spidx->dst.sin.sin_port = uh.uh_dport;
+ return;
+ case IPPROTO_AH:
+ if (off + sizeof(ip6e) > m->m_pkthdr.len)
+ goto done;
+ /* XXX Sigh, this works but is totally bogus. */
+ m_copydata(m, off, sizeof(ip6e), (caddr_t) &ip6e);
+ off += (ip6e.ip6e_len + 2) << 2;
+ nxt = ip6e.ip6e_nxt;
+ break;
+ case IPPROTO_ICMP:
+ default:
+ /* XXX Intermediate headers??? */
+ spidx->ul_proto = nxt;
+ goto done_proto;
+ }
+ }
+done:
+ spidx->ul_proto = IPSEC_ULPROTO_ANY;
+done_proto:
+ spidx->src.sin.sin_port = IPSEC_PORT_ANY;
+ spidx->dst.sin.sin_port = IPSEC_PORT_ANY;
+}
+
+/* Assumes that m is sane. */
+static int
+ipsec4_setspidx_ipaddr(struct mbuf *m, struct secpolicyindex *spidx)
+{
+ static const struct sockaddr_in template = {
+ sizeof (struct sockaddr_in),
+ AF_INET,
+ 0, { 0 }, { 0, 0, 0, 0, 0, 0, 0, 0 }
+ };
+
+ spidx->src.sin = template;
+ spidx->dst.sin = template;
+
+ if (m->m_len < sizeof (struct ip)) {
+ m_copydata(m, offsetof(struct ip, ip_src),
+ sizeof (struct in_addr),
+ (caddr_t) &spidx->src.sin.sin_addr);
+ m_copydata(m, offsetof(struct ip, ip_dst),
+ sizeof (struct in_addr),
+ (caddr_t) &spidx->dst.sin.sin_addr);
+ } else {
+ struct ip *ip = mtod(m, struct ip *);
+ spidx->src.sin.sin_addr = ip->ip_src;
+ spidx->dst.sin.sin_addr = ip->ip_dst;
+ }
+
+ spidx->prefs = sizeof(struct in_addr) << 3;
+ spidx->prefd = sizeof(struct in_addr) << 3;
+
+ return (0);
+}
+
+#ifdef INET6
+static void
+ipsec6_get_ulp(struct mbuf *m, struct secpolicyindex *spidx, int needport)
+{
+ int off, nxt;
+ struct tcphdr th;
+ struct udphdr uh;
+ struct icmp6_hdr ih;
+
+ /* Sanity check. */
+ if (m == NULL)
+ panic("%s: NULL pointer was passed.\n", __func__);
+
+ KEYDEBUG(KEYDEBUG_IPSEC_DUMP,
+ printf("%s:\n", __func__); kdebug_mbuf(m));
+
+ /* Set default. */
+ spidx->ul_proto = IPSEC_ULPROTO_ANY;
+ ((struct sockaddr_in6 *)&spidx->src)->sin6_port = IPSEC_PORT_ANY;
+ ((struct sockaddr_in6 *)&spidx->dst)->sin6_port = IPSEC_PORT_ANY;
+
+ nxt = -1;
+ off = ip6_lasthdr(m, 0, IPPROTO_IPV6, &nxt);
+ if (off < 0 || m->m_pkthdr.len < off)
+ return;
+
+ switch (nxt) {
+ case IPPROTO_TCP:
+ spidx->ul_proto = nxt;
+ if (!needport)
+ break;
+ if (off + sizeof(struct tcphdr) > m->m_pkthdr.len)
+ break;
+ m_copydata(m, off, sizeof(th), (caddr_t)&th);
+ ((struct sockaddr_in6 *)&spidx->src)->sin6_port = th.th_sport;
+ ((struct sockaddr_in6 *)&spidx->dst)->sin6_port = th.th_dport;
+ break;
+ case IPPROTO_UDP:
+ spidx->ul_proto = nxt;
+ if (!needport)
+ break;
+ if (off + sizeof(struct udphdr) > m->m_pkthdr.len)
+ break;
+ m_copydata(m, off, sizeof(uh), (caddr_t)&uh);
+ ((struct sockaddr_in6 *)&spidx->src)->sin6_port = uh.uh_sport;
+ ((struct sockaddr_in6 *)&spidx->dst)->sin6_port = uh.uh_dport;
+ break;
+ case IPPROTO_ICMPV6:
+ spidx->ul_proto = nxt;
+ if (off + sizeof(struct icmp6_hdr) > m->m_pkthdr.len)
+ break;
+ m_copydata(m, off, sizeof(ih), (caddr_t)&ih);
+ ((struct sockaddr_in6 *)&spidx->src)->sin6_port =
+ htons((uint16_t)ih.icmp6_type);
+ ((struct sockaddr_in6 *)&spidx->dst)->sin6_port =
+ htons((uint16_t)ih.icmp6_code);
+ break;
+ default:
+ /* XXX Intermediate headers??? */
+ spidx->ul_proto = nxt;
+ break;
+ }
+}
+
+/* Assumes that m is sane. */
+static int
+ipsec6_setspidx_ipaddr(struct mbuf *m, struct secpolicyindex *spidx)
+{
+ struct ip6_hdr *ip6 = NULL;
+ struct ip6_hdr ip6buf;
+ struct sockaddr_in6 *sin6;
+
+ if (m->m_len >= sizeof(*ip6))
+ ip6 = mtod(m, struct ip6_hdr *);
+ else {
+ m_copydata(m, 0, sizeof(ip6buf), (caddr_t)&ip6buf);
+ ip6 = &ip6buf;
+ }
+
+ sin6 = (struct sockaddr_in6 *)&spidx->src;
+ bzero(sin6, sizeof(*sin6));
+ sin6->sin6_family = AF_INET6;
+ sin6->sin6_len = sizeof(struct sockaddr_in6);
+ bcopy(&ip6->ip6_src, &sin6->sin6_addr, sizeof(ip6->ip6_src));
+ if (IN6_IS_SCOPE_LINKLOCAL(&ip6->ip6_src)) {
+ sin6->sin6_addr.s6_addr16[1] = 0;
+ sin6->sin6_scope_id = ntohs(ip6->ip6_src.s6_addr16[1]);
+ }
+ spidx->prefs = sizeof(struct in6_addr) << 3;
+
+ sin6 = (struct sockaddr_in6 *)&spidx->dst;
+ bzero(sin6, sizeof(*sin6));
+ sin6->sin6_family = AF_INET6;
+ sin6->sin6_len = sizeof(struct sockaddr_in6);
+ bcopy(&ip6->ip6_dst, &sin6->sin6_addr, sizeof(ip6->ip6_dst));
+ if (IN6_IS_SCOPE_LINKLOCAL(&ip6->ip6_dst)) {
+ sin6->sin6_addr.s6_addr16[1] = 0;
+ sin6->sin6_scope_id = ntohs(ip6->ip6_dst.s6_addr16[1]);
+ }
+ spidx->prefd = sizeof(struct in6_addr) << 3;
+
+ return (0);
+}
+#endif
+
+static void
+ipsec_delpcbpolicy(struct inpcbpolicy *p)
+{
+
+ free(p, M_IPSEC_INPCB);
+}
+
+/* Initialize policy in PCB. */
+int
+ipsec_init_policy(struct socket *so, struct inpcbpolicy **pcb_sp)
+{
+ struct inpcbpolicy *new;
+
+ /* Sanity check. */
+ if (so == NULL || pcb_sp == NULL)
+ panic("%s: NULL pointer was passed.\n", __func__);
+
+ new = (struct inpcbpolicy *) malloc(sizeof(struct inpcbpolicy),
+ M_IPSEC_INPCB, M_NOWAIT|M_ZERO);
+ if (new == NULL) {
+ ipseclog((LOG_DEBUG, "%s: No more memory.\n", __func__));
+ return (ENOBUFS);
+ }
+
+ new->priv = IPSEC_IS_PRIVILEGED_SO(so);
+
+ if ((new->sp_in = KEY_NEWSP()) == NULL) {
+ ipsec_delpcbpolicy(new);
+ return (ENOBUFS);
+ }
+ new->sp_in->state = IPSEC_SPSTATE_ALIVE;
+ new->sp_in->policy = IPSEC_POLICY_ENTRUST;
+
+ if ((new->sp_out = KEY_NEWSP()) == NULL) {
+ KEY_FREESP(&new->sp_in);
+ ipsec_delpcbpolicy(new);
+ return (ENOBUFS);
+ }
+ new->sp_out->state = IPSEC_SPSTATE_ALIVE;
+ new->sp_out->policy = IPSEC_POLICY_ENTRUST;
+
+ *pcb_sp = new;
+
+ return (0);
+}
+
+/* Copy old IPsec policy into new. */
+int
+ipsec_copy_policy(struct inpcbpolicy *old, struct inpcbpolicy *new)
+{
+ struct secpolicy *sp;
+
+ sp = ipsec_deepcopy_policy(old->sp_in);
+ if (sp) {
+ KEY_FREESP(&new->sp_in);
+ new->sp_in = sp;
+ } else
+ return (ENOBUFS);
+
+ sp = ipsec_deepcopy_policy(old->sp_out);
+ if (sp) {
+ KEY_FREESP(&new->sp_out);
+ new->sp_out = sp;
+ } else
+ return (ENOBUFS);
+
+ new->priv = old->priv;
+
+ return (0);
+}
+
+struct ipsecrequest *
+ipsec_newisr(void)
+{
+ struct ipsecrequest *p;
+
+ p = malloc(sizeof(struct ipsecrequest), M_IPSEC_SR, M_NOWAIT|M_ZERO);
+ if (p != NULL)
+ IPSECREQUEST_LOCK_INIT(p);
+ return (p);
+}
+
+void
+ipsec_delisr(struct ipsecrequest *p)
+{
+
+ IPSECREQUEST_LOCK_DESTROY(p);
+ free(p, M_IPSEC_SR);
+}
+
+/* Deep-copy a policy in PCB. */
+static struct secpolicy *
+ipsec_deepcopy_policy(struct secpolicy *src)
+{
+ struct ipsecrequest *newchain = NULL;
+ struct ipsecrequest *p;
+ struct ipsecrequest **q;
+ struct ipsecrequest *r;
+ struct secpolicy *dst;
+
+ if (src == NULL)
+ return (NULL);
+ dst = KEY_NEWSP();
+ if (dst == NULL)
+ return (NULL);
+
+ /*
+ * Deep-copy IPsec request chain. This is required since struct
+ * ipsecrequest is not reference counted.
+ */
+ q = &newchain;
+ for (p = src->req; p; p = p->next) {
+ *q = ipsec_newisr();
+ if (*q == NULL)
+ goto fail;
+ (*q)->saidx.proto = p->saidx.proto;
+ (*q)->saidx.mode = p->saidx.mode;
+ (*q)->level = p->level;
+ (*q)->saidx.reqid = p->saidx.reqid;
+
+ bcopy(&p->saidx.src, &(*q)->saidx.src, sizeof((*q)->saidx.src));
+ bcopy(&p->saidx.dst, &(*q)->saidx.dst, sizeof((*q)->saidx.dst));
+
+ (*q)->sp = dst;
+
+ q = &((*q)->next);
+ }
+
+ dst->req = newchain;
+ dst->state = src->state;
+ dst->policy = src->policy;
+ /* Do not touch the refcnt fields. */
+
+ return (dst);
+
+fail:
+ for (p = newchain; p; p = r) {
+ r = p->next;
+ ipsec_delisr(p);
+ p = NULL;
+ }
+ return (NULL);
+}
+
+/* Set policy and IPsec request if present. */
+static int
+ipsec_set_policy_internal(struct secpolicy **pcb_sp, int optname,
+ caddr_t request, size_t len, struct ucred *cred)
+{
+ struct sadb_x_policy *xpl;
+ struct secpolicy *newsp = NULL;
+ int error;
+
+ /* Sanity check. */
+ if (pcb_sp == NULL || *pcb_sp == NULL || request == NULL)
+ return (EINVAL);
+ if (len < sizeof(*xpl))
+ return (EINVAL);
+ xpl = (struct sadb_x_policy *)request;
+
+ KEYDEBUG(KEYDEBUG_IPSEC_DUMP,
+ printf("%s: passed policy\n", __func__);
+ kdebug_sadb_x_policy((struct sadb_ext *)xpl));
+
+ /* Check policy type. */
+ /* ipsec_set_policy_internal() accepts IPSEC, ENTRUST and BYPASS. */
+ if (xpl->sadb_x_policy_type == IPSEC_POLICY_DISCARD
+ || xpl->sadb_x_policy_type == IPSEC_POLICY_NONE)
+ return (EINVAL);
+
+ /* Check privileged socket. */
+ if (cred != NULL && xpl->sadb_x_policy_type == IPSEC_POLICY_BYPASS) {
+ error = priv_check_cred(cred, PRIV_NETINET_IPSEC, 0);
+ if (error)
+ return (EACCES);
+ }
+
+ /* Allocating new SP entry. */
+ if ((newsp = key_msg2sp(xpl, len, &error)) == NULL)
+ return (error);
+
+ newsp->state = IPSEC_SPSTATE_ALIVE;
+
+ /* Clear old SP and set new SP. */
+ KEY_FREESP(pcb_sp);
+ *pcb_sp = newsp;
+ KEYDEBUG(KEYDEBUG_IPSEC_DUMP,
+ printf("%s: new policy\n", __func__);
+ kdebug_secpolicy(newsp));
+
+ return (0);
+}
+
+int
+ipsec_set_policy(struct inpcb *inp, int optname, caddr_t request,
+ size_t len, struct ucred *cred)
+{
+ struct sadb_x_policy *xpl;
+ struct secpolicy **pcb_sp;
+
+ /* Sanity check. */
+ if (inp == NULL || request == NULL)
+ return (EINVAL);
+ if (len < sizeof(*xpl))
+ return (EINVAL);
+ xpl = (struct sadb_x_policy *)request;
+
+ /* Select direction. */
+ switch (xpl->sadb_x_policy_dir) {
+ case IPSEC_DIR_INBOUND:
+ pcb_sp = &inp->inp_sp->sp_in;
+ break;
+ case IPSEC_DIR_OUTBOUND:
+ pcb_sp = &inp->inp_sp->sp_out;
+ break;
+ default:
+ ipseclog((LOG_ERR, "%s: invalid direction=%u\n", __func__,
+ xpl->sadb_x_policy_dir));
+ return (EINVAL);
+ }
+
+ return (ipsec_set_policy_internal(pcb_sp, optname, request, len, cred));
+}
+
+int
+ipsec_get_policy(struct inpcb *inp, caddr_t request, size_t len,
+ struct mbuf **mp)
+{
+ struct sadb_x_policy *xpl;
+ struct secpolicy *pcb_sp;
+
+ /* Sanity check. */
+ if (inp == NULL || request == NULL || mp == NULL)
+ return (EINVAL);
+ IPSEC_ASSERT(inp->inp_sp != NULL, ("null inp_sp"));
+ if (len < sizeof(*xpl))
+ return (EINVAL);
+ xpl = (struct sadb_x_policy *)request;
+
+ /* Select direction. */
+ switch (xpl->sadb_x_policy_dir) {
+ case IPSEC_DIR_INBOUND:
+ pcb_sp = inp->inp_sp->sp_in;
+ break;
+ case IPSEC_DIR_OUTBOUND:
+ pcb_sp = inp->inp_sp->sp_out;
+ break;
+ default:
+ ipseclog((LOG_ERR, "%s: invalid direction=%u\n", __func__,
+ xpl->sadb_x_policy_dir));
+ return (EINVAL);
+ }
+
+ /* Sanity check. Should be an IPSEC_ASSERT. */
+ if (pcb_sp == NULL)
+ return (EINVAL);
+
+ *mp = key_sp2msg(pcb_sp);
+ if (!*mp) {
+ ipseclog((LOG_DEBUG, "%s: No more memory.\n", __func__));
+ return (ENOBUFS);
+ }
+
+ (*mp)->m_type = MT_DATA;
+ KEYDEBUG(KEYDEBUG_IPSEC_DUMP,
+ printf("%s:\n", __func__); kdebug_mbuf(*mp));
+
+ return (0);
+}
+
+/* Delete policy in PCB. */
+int
+ipsec_delete_pcbpolicy(struct inpcb *inp)
+{
+ IPSEC_ASSERT(inp != NULL, ("null inp"));
+
+ if (inp->inp_sp == NULL)
+ return (0);
+
+ if (inp->inp_sp->sp_in != NULL)
+ KEY_FREESP(&inp->inp_sp->sp_in);
+
+ if (inp->inp_sp->sp_out != NULL)
+ KEY_FREESP(&inp->inp_sp->sp_out);
+
+ ipsec_delpcbpolicy(inp->inp_sp);
+ inp->inp_sp = NULL;
+
+ return (0);
+}
+
+/*
+ * Return current level.
+ * Either IPSEC_LEVEL_USE or IPSEC_LEVEL_REQUIRE are always returned.
+ */
+u_int
+ipsec_get_reqlevel(struct ipsecrequest *isr)
+{
+ u_int level = 0;
+ u_int esp_trans_deflev, esp_net_deflev;
+ u_int ah_trans_deflev, ah_net_deflev;
+
+ IPSEC_ASSERT(isr != NULL && isr->sp != NULL, ("null argument"));
+ IPSEC_ASSERT(isr->sp->spidx.src.sa.sa_family == isr->sp->spidx.dst.sa.sa_family,
+ ("af family mismatch, src %u, dst %u",
+ isr->sp->spidx.src.sa.sa_family,
+ isr->sp->spidx.dst.sa.sa_family));
+
+/* XXX Note that we have ipseclog() expanded here - code sync issue. */
+#define IPSEC_CHECK_DEFAULT(lev) \
+ (((lev) != IPSEC_LEVEL_USE && (lev) != IPSEC_LEVEL_REQUIRE \
+ && (lev) != IPSEC_LEVEL_UNIQUE) \
+ ? (V_ipsec_debug \
+ ? log(LOG_INFO, "fixed system default level " #lev ":%d->%d\n",\
+ (lev), IPSEC_LEVEL_REQUIRE) \
+ : 0), \
+ (lev) = IPSEC_LEVEL_REQUIRE, \
+ (lev) \
+ : (lev))
+
+ /* Set default level. */
+ switch (((struct sockaddr *)&isr->sp->spidx.src)->sa_family) {
+#ifdef INET
+ case AF_INET:
+ esp_trans_deflev = IPSEC_CHECK_DEFAULT(V_ip4_esp_trans_deflev);
+ esp_net_deflev = IPSEC_CHECK_DEFAULT(V_ip4_esp_net_deflev);
+ ah_trans_deflev = IPSEC_CHECK_DEFAULT(V_ip4_ah_trans_deflev);
+ ah_net_deflev = IPSEC_CHECK_DEFAULT(V_ip4_ah_net_deflev);
+ break;
+#endif
+#ifdef INET6
+ case AF_INET6:
+ esp_trans_deflev = IPSEC_CHECK_DEFAULT(V_ip6_esp_trans_deflev);
+ esp_net_deflev = IPSEC_CHECK_DEFAULT(V_ip6_esp_net_deflev);
+ ah_trans_deflev = IPSEC_CHECK_DEFAULT(V_ip6_ah_trans_deflev);
+ ah_net_deflev = IPSEC_CHECK_DEFAULT(V_ip6_ah_net_deflev);
+ break;
+#endif /* INET6 */
+ default:
+ panic("%s: unknown af %u",
+ __func__, isr->sp->spidx.src.sa.sa_family);
+ }
+
+#undef IPSEC_CHECK_DEFAULT
+
+ /* Set level. */
+ switch (isr->level) {
+ case IPSEC_LEVEL_DEFAULT:
+ switch (isr->saidx.proto) {
+ case IPPROTO_ESP:
+ if (isr->saidx.mode == IPSEC_MODE_TUNNEL)
+ level = esp_net_deflev;
+ else
+ level = esp_trans_deflev;
+ break;
+ case IPPROTO_AH:
+ if (isr->saidx.mode == IPSEC_MODE_TUNNEL)
+ level = ah_net_deflev;
+ else
+ level = ah_trans_deflev;
+ break;
+ case IPPROTO_IPCOMP:
+ /*
+ * We don't really care, as IPcomp document says that
+ * we shouldn't compress small packets.
+ */
+ level = IPSEC_LEVEL_USE;
+ break;
+ default:
+ panic("%s: Illegal protocol defined %u\n", __func__,
+ isr->saidx.proto);
+ }
+ break;
+
+ case IPSEC_LEVEL_USE:
+ case IPSEC_LEVEL_REQUIRE:
+ level = isr->level;
+ break;
+ case IPSEC_LEVEL_UNIQUE:
+ level = IPSEC_LEVEL_REQUIRE;
+ break;
+
+ default:
+ panic("%s: Illegal IPsec level %u\n", __func__, isr->level);
+ }
+
+ return (level);
+}
+
+/*
+ * Check security policy requirements against the actual
+ * packet contents. Return one if the packet should be
+ * reject as "invalid"; otherwiser return zero to have the
+ * packet treated as "valid".
+ *
+ * OUT:
+ * 0: valid
+ * 1: invalid
+ */
+int
+ipsec_in_reject(struct secpolicy *sp, struct mbuf *m)
+{
+ struct ipsecrequest *isr;
+ int need_auth;
+
+ KEYDEBUG(KEYDEBUG_IPSEC_DATA,
+ printf("%s: using SP\n", __func__); kdebug_secpolicy(sp));
+
+ /* Check policy. */
+ switch (sp->policy) {
+ case IPSEC_POLICY_DISCARD:
+ return (1);
+ case IPSEC_POLICY_BYPASS:
+ case IPSEC_POLICY_NONE:
+ return (0);
+ }
+
+ IPSEC_ASSERT(sp->policy == IPSEC_POLICY_IPSEC,
+ ("invalid policy %u", sp->policy));
+
+ /* XXX Should compare policy against IPsec header history. */
+
+ need_auth = 0;
+ for (isr = sp->req; isr != NULL; isr = isr->next) {
+ if (ipsec_get_reqlevel(isr) != IPSEC_LEVEL_REQUIRE)
+ continue;
+ switch (isr->saidx.proto) {
+ case IPPROTO_ESP:
+ if ((m->m_flags & M_DECRYPTED) == 0) {
+ KEYDEBUG(KEYDEBUG_IPSEC_DUMP,
+ printf("%s: ESP m_flags:%x\n", __func__,
+ m->m_flags));
+ return (1);
+ }
+
+ if (!need_auth &&
+ isr->sav != NULL &&
+ isr->sav->tdb_authalgxform != NULL &&
+ (m->m_flags & M_AUTHIPDGM) == 0) {
+ KEYDEBUG(KEYDEBUG_IPSEC_DUMP,
+ printf("%s: ESP/AH m_flags:%x\n", __func__,
+ m->m_flags));
+ return (1);
+ }
+ break;
+ case IPPROTO_AH:
+ need_auth = 1;
+ if ((m->m_flags & M_AUTHIPHDR) == 0) {
+ KEYDEBUG(KEYDEBUG_IPSEC_DUMP,
+ printf("%s: AH m_flags:%x\n", __func__,
+ m->m_flags));
+ return (1);
+ }
+ break;
+ case IPPROTO_IPCOMP:
+ /*
+ * We don't really care, as IPcomp document
+ * says that we shouldn't compress small
+ * packets. IPComp policy should always be
+ * treated as being in "use" level.
+ */
+ break;
+ }
+ }
+ return (0); /* Valid. */
+}
+
+static int
+ipsec46_in_reject(struct mbuf *m, struct inpcb *inp)
+{
+ struct secpolicy *sp;
+ int error;
+ int result;
+
+ IPSEC_ASSERT(m != NULL, ("null mbuf"));
+
+ /*
+ * Get SP for this packet.
+ * When we are called from ip_forward(), we call
+ * ipsec_getpolicybyaddr() with IP_FORWARDING flag.
+ */
+ if (inp == NULL)
+ sp = ipsec_getpolicybyaddr(m, IPSEC_DIR_INBOUND, IP_FORWARDING, &error);
+ else
+ sp = ipsec_getpolicybysock(m, IPSEC_DIR_INBOUND, inp, &error);
+
+ if (sp != NULL) {
+ result = ipsec_in_reject(sp, m);
+ KEY_FREESP(&sp);
+ } else {
+ result = 0; /* XXX Should be panic?
+ * -> No, there may be error. */
+ }
+ return (result);
+}
+
+/*
+ * Check AH/ESP integrity.
+ * This function is called from tcp_input(), udp_input(),
+ * and {ah,esp}4_input for tunnel mode.
+ */
+int
+ipsec4_in_reject(struct mbuf *m, struct inpcb *inp)
+{
+ int result;
+
+ result = ipsec46_in_reject(m, inp);
+ if (result)
+ V_ipsec4stat.ips_in_polvio++;
+
+ return (result);
+}
+
+#ifdef INET6
+/*
+ * Check AH/ESP integrity.
+ * This function is called from tcp6_input(), udp6_input(),
+ * and {ah,esp}6_input for tunnel mode.
+ */
+int
+ipsec6_in_reject(struct mbuf *m, struct inpcb *inp)
+{
+ int result;
+
+ result = ipsec46_in_reject(m, inp);
+ if (result)
+ V_ipsec6stat.ips_in_polvio++;
+
+ return (result);
+}
+#endif
+
+/*
+ * Compute the byte size to be occupied by IPsec header.
+ * In case it is tunnelled, it includes the size of outer IP header.
+ * NOTE: SP passed is freed in this function.
+ */
+static size_t
+ipsec_hdrsiz_internal(struct secpolicy *sp)
+{
+ struct ipsecrequest *isr;
+ size_t size;
+
+ KEYDEBUG(KEYDEBUG_IPSEC_DATA,
+ printf("%s: using SP\n", __func__); kdebug_secpolicy(sp));
+
+ switch (sp->policy) {
+ case IPSEC_POLICY_DISCARD:
+ case IPSEC_POLICY_BYPASS:
+ case IPSEC_POLICY_NONE:
+ return (0);
+ }
+
+ IPSEC_ASSERT(sp->policy == IPSEC_POLICY_IPSEC,
+ ("invalid policy %u", sp->policy));
+
+ size = 0;
+ for (isr = sp->req; isr != NULL; isr = isr->next) {
+ size_t clen = 0;
+
+ switch (isr->saidx.proto) {
+ case IPPROTO_ESP:
+ clen = esp_hdrsiz(isr->sav);
+ break;
+ case IPPROTO_AH:
+ clen = ah_hdrsiz(isr->sav);
+ break;
+ case IPPROTO_IPCOMP:
+ clen = sizeof(struct ipcomp);
+ break;
+ }
+
+ if (isr->saidx.mode == IPSEC_MODE_TUNNEL) {
+ switch (isr->saidx.dst.sa.sa_family) {
+ case AF_INET:
+ clen += sizeof(struct ip);
+ break;
+#ifdef INET6
+ case AF_INET6:
+ clen += sizeof(struct ip6_hdr);
+ break;
+#endif
+ default:
+ ipseclog((LOG_ERR, "%s: unknown AF %d in "
+ "IPsec tunnel SA\n", __func__,
+ ((struct sockaddr *)&isr->saidx.dst)->sa_family));
+ break;
+ }
+ }
+ size += clen;
+ }
+
+ return (size);
+}
+
+/*
+ * This function is called from ipsec_hdrsiz_tcp(), ip_ipsec_mtu(),
+ * disabled ip6_ipsec_mtu() and ip6_forward().
+ */
+size_t
+ipsec_hdrsiz(struct mbuf *m, u_int dir, struct inpcb *inp)
+{
+ struct secpolicy *sp;
+ int error;
+ size_t size;
+
+ IPSEC_ASSERT(m != NULL, ("null mbuf"));
+
+ /* Get SP for this packet.
+ * When we are called from ip_forward(), we call
+ * ipsec_getpolicybyaddr() with IP_FORWARDING flag.
+ */
+ if (inp == NULL)
+ sp = ipsec_getpolicybyaddr(m, dir, IP_FORWARDING, &error);
+ else
+ sp = ipsec_getpolicybysock(m, dir, inp, &error);
+
+ if (sp != NULL) {
+ size = ipsec_hdrsiz_internal(sp);
+ KEYDEBUG(KEYDEBUG_IPSEC_DATA,
+ printf("%s: size:%lu.\n", __func__,
+ (unsigned long)size));
+
+ KEY_FREESP(&sp);
+ } else {
+ size = 0; /* XXX Should be panic?
+ * -> No, we are called w/o knowing if
+ * IPsec processing is needed. */
+ }
+ return (size);
+}
+
+/*
+ * Check the variable replay window.
+ * ipsec_chkreplay() performs replay check before ICV verification.
+ * ipsec_updatereplay() updates replay bitmap. This must be called after
+ * ICV verification (it also performs replay check, which is usually done
+ * beforehand).
+ * 0 (zero) is returned if packet disallowed, 1 if packet permitted.
+ *
+ * Based on RFC 2401.
+ */
+int
+ipsec_chkreplay(u_int32_t seq, struct secasvar *sav)
+{
+ const struct secreplay *replay;
+ u_int32_t diff;
+ int fr;
+ u_int32_t wsizeb; /* Constant: bits of window size. */
+ int frlast; /* Constant: last frame. */
+
+ IPSEC_ASSERT(sav != NULL, ("Null SA"));
+ IPSEC_ASSERT(sav->replay != NULL, ("Null replay state"));
+
+ replay = sav->replay;
+
+ if (replay->wsize == 0)
+ return (1); /* No need to check replay. */
+
+ /* Constant. */
+ frlast = replay->wsize - 1;
+ wsizeb = replay->wsize << 3;
+
+ /* Sequence number of 0 is invalid. */
+ if (seq == 0)
+ return (0);
+
+ /* First time is always okay. */
+ if (replay->count == 0)
+ return (1);
+
+ if (seq > replay->lastseq) {
+ /* Larger sequences are okay. */
+ return (1);
+ } else {
+ /* seq is equal or less than lastseq. */
+ diff = replay->lastseq - seq;
+
+ /* Over range to check, i.e. too old or wrapped. */
+ if (diff >= wsizeb)
+ return (0);
+
+ fr = frlast - diff / 8;
+
+ /* This packet already seen? */
+ if ((replay->bitmap)[fr] & (1 << (diff % 8)))
+ return (0);
+
+ /* Out of order but good. */
+ return (1);
+ }
+}
+
+/*
+ * Check replay counter whether to update or not.
+ * OUT: 0: OK
+ * 1: NG
+ */
+int
+ipsec_updatereplay(u_int32_t seq, struct secasvar *sav)
+{
+ struct secreplay *replay;
+ u_int32_t diff;
+ int fr;
+ u_int32_t wsizeb; /* Constant: bits of window size. */
+ int frlast; /* Constant: last frame. */
+
+ IPSEC_ASSERT(sav != NULL, ("Null SA"));
+ IPSEC_ASSERT(sav->replay != NULL, ("Null replay state"));
+
+ replay = sav->replay;
+
+ if (replay->wsize == 0)
+ goto ok; /* No need to check replay. */
+
+ /* Constant. */
+ frlast = replay->wsize - 1;
+ wsizeb = replay->wsize << 3;
+
+ /* Sequence number of 0 is invalid. */
+ if (seq == 0)
+ return (1);
+
+ /* First time. */
+ if (replay->count == 0) {
+ replay->lastseq = seq;
+ bzero(replay->bitmap, replay->wsize);
+ (replay->bitmap)[frlast] = 1;
+ goto ok;
+ }
+
+ if (seq > replay->lastseq) {
+ /* seq is larger than lastseq. */
+ diff = seq - replay->lastseq;
+
+ /* New larger sequence number. */
+ if (diff < wsizeb) {
+ /* In window. */
+ /* Set bit for this packet. */
+ vshiftl(replay->bitmap, diff, replay->wsize);
+ (replay->bitmap)[frlast] |= 1;
+ } else {
+ /* This packet has a "way larger". */
+ bzero(replay->bitmap, replay->wsize);
+ (replay->bitmap)[frlast] = 1;
+ }
+ replay->lastseq = seq;
+
+ /* Larger is good. */
+ } else {
+ /* seq is equal or less than lastseq. */
+ diff = replay->lastseq - seq;
+
+ /* Over range to check, i.e. too old or wrapped. */
+ if (diff >= wsizeb)
+ return (1);
+
+ fr = frlast - diff / 8;
+
+ /* This packet already seen? */
+ if ((replay->bitmap)[fr] & (1 << (diff % 8)))
+ return (1);
+
+ /* Mark as seen. */
+ (replay->bitmap)[fr] |= (1 << (diff % 8));
+
+ /* Out of order but good. */
+ }
+
+ok:
+ if (replay->count == ~0) {
+
+ /* Set overflow flag. */
+ replay->overflow++;
+
+ /* Don't increment, no more packets accepted. */
+ if ((sav->flags & SADB_X_EXT_CYCSEQ) == 0)
+ return (1);
+
+ ipseclog((LOG_WARNING, "%s: replay counter made %d cycle. %s\n",
+ __func__, replay->overflow, ipsec_logsastr(sav)));
+ }
+
+ replay->count++;
+
+ return (0);
+}
+
+/*
+ * Shift variable length buffer to left.
+ * IN: bitmap: pointer to the buffer
+ * nbit: the number of to shift.
+ * wsize: buffer size (bytes).
+ */
+static void
+vshiftl(unsigned char *bitmap, int nbit, int wsize)
+{
+ int s, j, i;
+ unsigned char over;
+
+ for (j = 0; j < nbit; j += 8) {
+ s = (nbit - j < 8) ? (nbit - j): 8;
+ bitmap[0] <<= s;
+ for (i = 1; i < wsize; i++) {
+ over = (bitmap[i] >> (8 - s));
+ bitmap[i] <<= s;
+ bitmap[i-1] |= over;
+ }
+ }
+}
+
+#ifdef INET
+/* Return a printable string for the IPv4 address. */
+static char *
+inet_ntoa4(struct in_addr ina)
+{
+ static char buf[4][4 * sizeof "123" + 4];
+ unsigned char *ucp = (unsigned char *) &ina;
+ static int i = 3;
+
+ /* XXX-BZ Returns static buffer. */
+ i = (i + 1) % 4;
+ sprintf(buf[i], "%d.%d.%d.%d", ucp[0] & 0xff, ucp[1] & 0xff,
+ ucp[2] & 0xff, ucp[3] & 0xff);
+ return (buf[i]);
+}
+#endif
+
+/* Return a printable string for the address. */
+char *
+ipsec_address(union sockaddr_union* sa)
+{
+#ifdef INET6
+ char ip6buf[INET6_ADDRSTRLEN];
+#endif
+
+ switch (sa->sa.sa_family) {
+#ifdef INET
+ case AF_INET:
+ return (inet_ntoa4(sa->sin.sin_addr));
+#endif /* INET */
+#ifdef INET6
+ case AF_INET6:
+ return (ip6_sprintf(ip6buf, &sa->sin6.sin6_addr));
+#endif /* INET6 */
+ default:
+ return ("(unknown address family)");
+ }
+}
+
+const char *
+ipsec_logsastr(struct secasvar *sav)
+{
+ static char buf[256];
+ char *p;
+ struct secasindex *saidx = &sav->sah->saidx;
+
+ IPSEC_ASSERT(saidx->src.sa.sa_family == saidx->dst.sa.sa_family,
+ ("address family mismatch"));
+
+ p = buf;
+ snprintf(buf, sizeof(buf), "SA(SPI=%u ", (u_int32_t)ntohl(sav->spi));
+ while (p && *p)
+ p++;
+ /* NB: only use ipsec_address on one address at a time. */
+ snprintf(p, sizeof (buf) - (p - buf), "src=%s ",
+ ipsec_address(&saidx->src));
+ while (p && *p)
+ p++;
+ snprintf(p, sizeof (buf) - (p - buf), "dst=%s)",
+ ipsec_address(&saidx->dst));
+
+ return (buf);
+}
+
+void
+ipsec_dumpmbuf(struct mbuf *m)
+{
+ int totlen;
+ int i;
+ u_char *p;
+
+ totlen = 0;
+ printf("---\n");
+ while (m) {
+ p = mtod(m, u_char *);
+ for (i = 0; i < m->m_len; i++) {
+ printf("%02x ", p[i]);
+ totlen++;
+ if (totlen % 16 == 0)
+ printf("\n");
+ }
+ m = m->m_next;
+ }
+ if (totlen % 16 != 0)
+ printf("\n");
+ printf("---\n");
+}
+
+static void
+ipsec_init(const void *unused __unused)
+{
+
+ SECPOLICY_LOCK_INIT(&V_ip4_def_policy);
+ V_ip4_def_policy.refcnt = 1; /* NB: disallow free. */
+}
+VNET_SYSINIT(ipsec_init, SI_SUB_PROTO_DOMAININIT, SI_ORDER_ANY, ipsec_init,
+ NULL);
+
+
+/* XXX This stuff doesn't belong here... */
+
+static struct xformsw* xforms = NULL;
+
+/*
+ * Register a transform; typically at system startup.
+ */
+void
+xform_register(struct xformsw* xsp)
+{
+
+ xsp->xf_next = xforms;
+ xforms = xsp;
+}
+
+/*
+ * Initialize transform support in an sav.
+ */
+int
+xform_init(struct secasvar *sav, int xftype)
+{
+ struct xformsw *xsp;
+
+ if (sav->tdb_xform != NULL) /* Previously initialized. */
+ return (0);
+ for (xsp = xforms; xsp; xsp = xsp->xf_next)
+ if (xsp->xf_type == xftype)
+ return ((*xsp->xf_init)(sav, xsp));
+ return (EINVAL);
+}
diff --git a/rtems/freebsd/netipsec/ipsec.h b/rtems/freebsd/netipsec/ipsec.h
new file mode 100644
index 00000000..7512a199
--- /dev/null
+++ b/rtems/freebsd/netipsec/ipsec.h
@@ -0,0 +1,453 @@
+/* $FreeBSD$ */
+/* $KAME: ipsec.h,v 1.53 2001/11/20 08:32:38 itojun Exp $ */
+
+/*-
+ * Copyright (C) 1995, 1996, 1997, and 1998 WIDE Project.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the project nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+/*
+ * IPsec controller part.
+ */
+
+#ifndef _NETIPSEC_IPSEC_HH_
+#define _NETIPSEC_IPSEC_HH_
+
+#if defined(_KERNEL) && !defined(_LKM) && !defined(KLD_MODULE)
+#include <rtems/freebsd/local/opt_inet.h>
+#include <rtems/freebsd/local/opt_ipsec.h>
+#endif
+
+#include <rtems/freebsd/net/pfkeyv2.h>
+#include <rtems/freebsd/netipsec/keydb.h>
+
+#ifdef _KERNEL
+
+#define IPSEC_ASSERT(_c,_m) KASSERT(_c, _m)
+
+#define IPSEC_IS_PRIVILEGED_SO(_so) \
+ ((_so)->so_cred != NULL && \
+ priv_check_cred((_so)->so_cred, PRIV_NETINET_IPSEC, 0) \
+ == 0)
+
+/*
+ * Security Policy Index
+ * Ensure that both address families in the "src" and "dst" are same.
+ * When the value of the ul_proto is ICMPv6, the port field in "src"
+ * specifies ICMPv6 type, and the port field in "dst" specifies ICMPv6 code.
+ */
+struct secpolicyindex {
+ u_int8_t dir; /* direction of packet flow, see blow */
+ union sockaddr_union src; /* IP src address for SP */
+ union sockaddr_union dst; /* IP dst address for SP */
+ u_int8_t prefs; /* prefix length in bits for src */
+ u_int8_t prefd; /* prefix length in bits for dst */
+ u_int16_t ul_proto; /* upper layer Protocol */
+#ifdef notyet
+ uid_t uids;
+ uid_t uidd;
+ gid_t gids;
+ gid_t gidd;
+#endif
+};
+
+/* Security Policy Data Base */
+struct secpolicy {
+ LIST_ENTRY(secpolicy) chain;
+ struct mtx lock;
+
+ u_int refcnt; /* reference count */
+ struct secpolicyindex spidx; /* selector */
+ u_int32_t id; /* It's unique number on the system. */
+ u_int state; /* 0: dead, others: alive */
+#define IPSEC_SPSTATE_DEAD 0
+#define IPSEC_SPSTATE_ALIVE 1
+ u_int16_t policy; /* policy_type per pfkeyv2.h */
+ u_int16_t scangen; /* scan generation # */
+ struct ipsecrequest *req;
+ /* pointer to the ipsec request tree, */
+ /* if policy == IPSEC else this value == NULL.*/
+
+ /*
+ * lifetime handler.
+ * the policy can be used without limitiation if both lifetime and
+ * validtime are zero.
+ * "lifetime" is passed by sadb_lifetime.sadb_lifetime_addtime.
+ * "validtime" is passed by sadb_lifetime.sadb_lifetime_usetime.
+ */
+ time_t created; /* time created the policy */
+ time_t lastused; /* updated every when kernel sends a packet */
+ long lifetime; /* duration of the lifetime of this policy */
+ long validtime; /* duration this policy is valid without use */
+};
+
+#define SECPOLICY_LOCK_INIT(_sp) \
+ mtx_init(&(_sp)->lock, "ipsec policy", NULL, MTX_DEF)
+#define SECPOLICY_LOCK(_sp) mtx_lock(&(_sp)->lock)
+#define SECPOLICY_UNLOCK(_sp) mtx_unlock(&(_sp)->lock)
+#define SECPOLICY_LOCK_DESTROY(_sp) mtx_destroy(&(_sp)->lock)
+#define SECPOLICY_LOCK_ASSERT(_sp) mtx_assert(&(_sp)->lock, MA_OWNED)
+
+/* Request for IPsec */
+struct ipsecrequest {
+ struct ipsecrequest *next;
+ /* pointer to next structure */
+ /* If NULL, it means the end of chain. */
+ struct secasindex saidx;/* hint for search proper SA */
+ /* if __ss_len == 0 then no address specified.*/
+ u_int level; /* IPsec level defined below. */
+
+ struct secasvar *sav; /* place holder of SA for use */
+ struct secpolicy *sp; /* back pointer to SP */
+ struct mtx lock; /* to interlock updates */
+};
+
+/*
+ * Need recursion for when crypto callbacks happen directly,
+ * as in the case of software crypto. Need to look at how
+ * hard it is to remove this...
+ */
+#define IPSECREQUEST_LOCK_INIT(_isr) \
+ mtx_init(&(_isr)->lock, "ipsec request", NULL, MTX_DEF | MTX_RECURSE)
+#define IPSECREQUEST_LOCK(_isr) mtx_lock(&(_isr)->lock)
+#define IPSECREQUEST_UNLOCK(_isr) mtx_unlock(&(_isr)->lock)
+#define IPSECREQUEST_LOCK_DESTROY(_isr) mtx_destroy(&(_isr)->lock)
+#define IPSECREQUEST_LOCK_ASSERT(_isr) mtx_assert(&(_isr)->lock, MA_OWNED)
+
+/* security policy in PCB */
+struct inpcbpolicy {
+ struct secpolicy *sp_in;
+ struct secpolicy *sp_out;
+ int priv; /* privileged socket ? */
+};
+
+/* SP acquiring list table. */
+struct secspacq {
+ LIST_ENTRY(secspacq) chain;
+
+ struct secpolicyindex spidx;
+
+ time_t created; /* for lifetime */
+ int count; /* for lifetime */
+ /* XXX: here is mbuf place holder to be sent ? */
+};
+#endif /* _KERNEL */
+
+/* according to IANA assignment, port 0x0000 and proto 0xff are reserved. */
+#define IPSEC_PORT_ANY 0
+#define IPSEC_ULPROTO_ANY 255
+#define IPSEC_PROTO_ANY 255
+
+/* mode of security protocol */
+/* NOTE: DON'T use IPSEC_MODE_ANY at SPD. It's only use in SAD */
+#define IPSEC_MODE_ANY 0 /* i.e. wildcard. */
+#define IPSEC_MODE_TRANSPORT 1
+#define IPSEC_MODE_TUNNEL 2
+#define IPSEC_MODE_TCPMD5 3 /* TCP MD5 mode */
+
+/*
+ * Direction of security policy.
+ * NOTE: Since INVALID is used just as flag.
+ * The other are used for loop counter too.
+ */
+#define IPSEC_DIR_ANY 0
+#define IPSEC_DIR_INBOUND 1
+#define IPSEC_DIR_OUTBOUND 2
+#define IPSEC_DIR_MAX 3
+#define IPSEC_DIR_INVALID 4
+
+/* Policy level */
+/*
+ * IPSEC, ENTRUST and BYPASS are allowed for setsockopt() in PCB,
+ * DISCARD, IPSEC and NONE are allowed for setkey() in SPD.
+ * DISCARD and NONE are allowed for system default.
+ */
+#define IPSEC_POLICY_DISCARD 0 /* discarding packet */
+#define IPSEC_POLICY_NONE 1 /* through IPsec engine */
+#define IPSEC_POLICY_IPSEC 2 /* do IPsec */
+#define IPSEC_POLICY_ENTRUST 3 /* consulting SPD if present. */
+#define IPSEC_POLICY_BYPASS 4 /* only for privileged socket. */
+
+/* Security protocol level */
+#define IPSEC_LEVEL_DEFAULT 0 /* reference to system default */
+#define IPSEC_LEVEL_USE 1 /* use SA if present. */
+#define IPSEC_LEVEL_REQUIRE 2 /* require SA. */
+#define IPSEC_LEVEL_UNIQUE 3 /* unique SA. */
+
+#define IPSEC_MANUAL_REQID_MAX 0x3fff
+ /*
+ * if security policy level == unique, this id
+ * indicate to a relative SA for use, else is
+ * zero.
+ * 1 - 0x3fff are reserved for manual keying.
+ * 0 are reserved for above reason. Others is
+ * for kernel use.
+ * Note that this id doesn't identify SA
+ * by only itself.
+ */
+#define IPSEC_REPLAYWSIZE 32
+
+/* statistics for ipsec processing */
+struct ipsecstat {
+ u_quad_t in_success; /* succeeded inbound process */
+ u_quad_t in_polvio;
+ /* security policy violation for inbound process */
+ u_quad_t in_nosa; /* inbound SA is unavailable */
+ u_quad_t in_inval; /* inbound processing failed due to EINVAL */
+ u_quad_t in_nomem; /* inbound processing failed due to ENOBUFS */
+ u_quad_t in_badspi; /* failed getting a SPI */
+ u_quad_t in_ahreplay; /* AH replay check failed */
+ u_quad_t in_espreplay; /* ESP replay check failed */
+ u_quad_t in_ahauthsucc; /* AH authentication success */
+ u_quad_t in_ahauthfail; /* AH authentication failure */
+ u_quad_t in_espauthsucc; /* ESP authentication success */
+ u_quad_t in_espauthfail; /* ESP authentication failure */
+ u_quad_t in_esphist[256];
+ u_quad_t in_ahhist[256];
+ u_quad_t in_comphist[256];
+ u_quad_t out_success; /* succeeded outbound process */
+ u_quad_t out_polvio;
+ /* security policy violation for outbound process */
+ u_quad_t out_nosa; /* outbound SA is unavailable */
+ u_quad_t out_inval; /* outbound process failed due to EINVAL */
+ u_quad_t out_nomem; /* inbound processing failed due to ENOBUFS */
+ u_quad_t out_noroute; /* there is no route */
+ u_quad_t out_esphist[256];
+ u_quad_t out_ahhist[256];
+ u_quad_t out_comphist[256];
+
+ u_quad_t spdcachelookup;
+ u_quad_t spdcachemiss;
+
+ u_int32_t ips_in_polvio; /* input: sec policy violation */
+ u_int32_t ips_out_polvio; /* output: sec policy violation */
+ u_int32_t ips_out_nosa; /* output: SA unavailable */
+ u_int32_t ips_out_nomem; /* output: no memory available */
+ u_int32_t ips_out_noroute; /* output: no route available */
+ u_int32_t ips_out_inval; /* output: generic error */
+ u_int32_t ips_out_bundlesa; /* output: bundled SA processed */
+ u_int32_t ips_mbcoalesced; /* mbufs coalesced during clone */
+ u_int32_t ips_clcoalesced; /* clusters coalesced during clone */
+ u_int32_t ips_clcopied; /* clusters copied during clone */
+ u_int32_t ips_mbinserted; /* mbufs inserted during makespace */
+ /*
+ * Temporary statistics for performance analysis.
+ */
+ /* See where ESP/AH/IPCOMP header land in mbuf on input */
+ u_int32_t ips_input_front;
+ u_int32_t ips_input_middle;
+ u_int32_t ips_input_end;
+};
+
+/*
+ * Definitions for IPsec & Key sysctl operations.
+ */
+/*
+ * Names for IPsec & Key sysctl objects
+ */
+#define IPSECCTL_STATS 1 /* stats */
+#define IPSECCTL_DEF_POLICY 2
+#define IPSECCTL_DEF_ESP_TRANSLEV 3 /* int; ESP transport mode */
+#define IPSECCTL_DEF_ESP_NETLEV 4 /* int; ESP tunnel mode */
+#define IPSECCTL_DEF_AH_TRANSLEV 5 /* int; AH transport mode */
+#define IPSECCTL_DEF_AH_NETLEV 6 /* int; AH tunnel mode */
+#if 0 /* obsolete, do not reuse */
+#define IPSECCTL_INBOUND_CALL_IKE 7
+#endif
+#define IPSECCTL_AH_CLEARTOS 8
+#define IPSECCTL_AH_OFFSETMASK 9
+#define IPSECCTL_DFBIT 10
+#define IPSECCTL_ECN 11
+#define IPSECCTL_DEBUG 12
+#define IPSECCTL_ESP_RANDPAD 13
+#define IPSECCTL_MAXID 14
+
+#define IPSECCTL_NAMES { \
+ { 0, 0 }, \
+ { 0, 0 }, \
+ { "def_policy", CTLTYPE_INT }, \
+ { "esp_trans_deflev", CTLTYPE_INT }, \
+ { "esp_net_deflev", CTLTYPE_INT }, \
+ { "ah_trans_deflev", CTLTYPE_INT }, \
+ { "ah_net_deflev", CTLTYPE_INT }, \
+ { 0, 0 }, \
+ { "ah_cleartos", CTLTYPE_INT }, \
+ { "ah_offsetmask", CTLTYPE_INT }, \
+ { "dfbit", CTLTYPE_INT }, \
+ { "ecn", CTLTYPE_INT }, \
+ { "debug", CTLTYPE_INT }, \
+ { "esp_randpad", CTLTYPE_INT }, \
+}
+
+#define IPSEC6CTL_NAMES { \
+ { 0, 0 }, \
+ { 0, 0 }, \
+ { "def_policy", CTLTYPE_INT }, \
+ { "esp_trans_deflev", CTLTYPE_INT }, \
+ { "esp_net_deflev", CTLTYPE_INT }, \
+ { "ah_trans_deflev", CTLTYPE_INT }, \
+ { "ah_net_deflev", CTLTYPE_INT }, \
+ { 0, 0 }, \
+ { 0, 0 }, \
+ { 0, 0 }, \
+ { 0, 0 }, \
+ { "ecn", CTLTYPE_INT }, \
+ { "debug", CTLTYPE_INT }, \
+ { "esp_randpad", CTLTYPE_INT }, \
+}
+
+#ifdef _KERNEL
+struct ipsec_output_state {
+ struct mbuf *m;
+ struct route *ro;
+ struct sockaddr *dst;
+};
+
+struct ipsec_history {
+ int ih_proto;
+ u_int32_t ih_spi;
+};
+
+VNET_DECLARE(int, ipsec_debug);
+#define V_ipsec_debug VNET(ipsec_debug)
+
+#ifdef REGRESSION
+VNET_DECLARE(int, ipsec_replay);
+VNET_DECLARE(int, ipsec_integrity);
+
+#define V_ipsec_replay VNET(ipsec_replay)
+#define V_ipsec_integrity VNET(ipsec_integrity)
+#endif
+
+VNET_DECLARE(struct ipsecstat, ipsec4stat);
+VNET_DECLARE(struct secpolicy, ip4_def_policy);
+VNET_DECLARE(int, ip4_esp_trans_deflev);
+VNET_DECLARE(int, ip4_esp_net_deflev);
+VNET_DECLARE(int, ip4_ah_trans_deflev);
+VNET_DECLARE(int, ip4_ah_net_deflev);
+VNET_DECLARE(int, ip4_ah_offsetmask);
+VNET_DECLARE(int, ip4_ipsec_dfbit);
+VNET_DECLARE(int, ip4_ipsec_ecn);
+VNET_DECLARE(int, ip4_esp_randpad);
+VNET_DECLARE(int, crypto_support);
+
+#define V_ipsec4stat VNET(ipsec4stat)
+#define V_ip4_def_policy VNET(ip4_def_policy)
+#define V_ip4_esp_trans_deflev VNET(ip4_esp_trans_deflev)
+#define V_ip4_esp_net_deflev VNET(ip4_esp_net_deflev)
+#define V_ip4_ah_trans_deflev VNET(ip4_ah_trans_deflev)
+#define V_ip4_ah_net_deflev VNET(ip4_ah_net_deflev)
+#define V_ip4_ah_offsetmask VNET(ip4_ah_offsetmask)
+#define V_ip4_ipsec_dfbit VNET(ip4_ipsec_dfbit)
+#define V_ip4_ipsec_ecn VNET(ip4_ipsec_ecn)
+#define V_ip4_esp_randpad VNET(ip4_esp_randpad)
+#define V_crypto_support VNET(crypto_support)
+
+#define ipseclog(x) do { if (V_ipsec_debug) log x; } while (0)
+/* for openbsd compatibility */
+#define DPRINTF(x) do { if (V_ipsec_debug) printf x; } while (0)
+
+extern struct ipsecrequest *ipsec_newisr(void);
+extern void ipsec_delisr(struct ipsecrequest *);
+
+struct tdb_ident;
+extern struct secpolicy *ipsec_getpolicy __P((struct tdb_ident*, u_int));
+struct inpcb;
+extern struct secpolicy *ipsec4_checkpolicy __P((struct mbuf *, u_int, u_int,
+ int *, struct inpcb *));
+extern struct secpolicy * ipsec_getpolicybyaddr(struct mbuf *, u_int,
+ int, int *);
+
+struct inpcb;
+extern int ipsec_init_policy __P((struct socket *so, struct inpcbpolicy **));
+extern int ipsec_copy_policy
+ __P((struct inpcbpolicy *, struct inpcbpolicy *));
+extern u_int ipsec_get_reqlevel __P((struct ipsecrequest *));
+extern int ipsec_in_reject __P((struct secpolicy *, struct mbuf *));
+
+extern int ipsec_set_policy __P((struct inpcb *inp, int optname,
+ caddr_t request, size_t len, struct ucred *cred));
+extern int ipsec_get_policy __P((struct inpcb *inpcb, caddr_t request,
+ size_t len, struct mbuf **mp));
+extern int ipsec_delete_pcbpolicy __P((struct inpcb *));
+extern int ipsec4_in_reject __P((struct mbuf *, struct inpcb *));
+
+struct secas;
+struct tcpcb;
+extern int ipsec_chkreplay __P((u_int32_t, struct secasvar *));
+extern int ipsec_updatereplay __P((u_int32_t, struct secasvar *));
+
+extern size_t ipsec_hdrsiz __P((struct mbuf *, u_int, struct inpcb *));
+extern size_t ipsec_hdrsiz_tcp __P((struct tcpcb *));
+
+union sockaddr_union;
+extern char * ipsec_address(union sockaddr_union* sa);
+extern const char *ipsec_logsastr __P((struct secasvar *));
+
+extern void ipsec_dumpmbuf __P((struct mbuf *));
+
+struct m_tag;
+extern void ah4_input(struct mbuf *m, int off);
+extern void ah4_ctlinput(int cmd, struct sockaddr *sa, void *);
+extern void esp4_input(struct mbuf *m, int off);
+extern void esp4_ctlinput(int cmd, struct sockaddr *sa, void *);
+extern void ipcomp4_input(struct mbuf *m, int off);
+extern int ipsec4_common_input(struct mbuf *m, ...);
+extern int ipsec4_common_input_cb(struct mbuf *m, struct secasvar *sav,
+ int skip, int protoff, struct m_tag *mt);
+extern int ipsec4_process_packet __P((struct mbuf *, struct ipsecrequest *,
+ int, int));
+extern int ipsec_process_done __P((struct mbuf *, struct ipsecrequest *));
+
+extern struct mbuf *ipsec_copypkt __P((struct mbuf *));
+
+extern void m_checkalignment(const char* where, struct mbuf *m0,
+ int off, int len);
+extern struct mbuf *m_makespace(struct mbuf *m0, int skip, int hlen, int *off);
+extern caddr_t m_pad(struct mbuf *m, int n);
+extern int m_striphdr(struct mbuf *m, int skip, int hlen);
+
+#ifdef DEV_ENC
+#define ENC_BEFORE 0x0001
+#define ENC_AFTER 0x0002
+#define ENC_IN 0x0100
+#define ENC_OUT 0x0200
+extern int ipsec_filter(struct mbuf **, int, int);
+extern void ipsec_bpf(struct mbuf *, struct secasvar *, int, int);
+#endif
+#endif /* _KERNEL */
+
+#ifndef _KERNEL
+extern caddr_t ipsec_set_policy __P((char *, int));
+extern int ipsec_get_policylen __P((caddr_t));
+extern char *ipsec_dump_policy __P((caddr_t, char *));
+
+extern const char *ipsec_strerror __P((void));
+
+#endif /* ! KERNEL */
+
+#endif /* _NETIPSEC_IPSEC_HH_ */
diff --git a/rtems/freebsd/netipsec/ipsec6.h b/rtems/freebsd/netipsec/ipsec6.h
new file mode 100644
index 00000000..1dd0a6df
--- /dev/null
+++ b/rtems/freebsd/netipsec/ipsec6.h
@@ -0,0 +1,78 @@
+/* $FreeBSD$ */
+/* $KAME: ipsec.h,v 1.44 2001/03/23 08:08:47 itojun Exp $ */
+
+/*-
+ * Copyright (C) 1995, 1996, 1997, and 1998 WIDE Project.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the project nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+/*
+ * IPsec controller part.
+ */
+
+#ifndef _NETIPSEC_IPSEC6_HH_
+#define _NETIPSEC_IPSEC6_HH_
+
+#include <rtems/freebsd/net/pfkeyv2.h>
+#include <rtems/freebsd/netipsec/keydb.h>
+
+#ifdef _KERNEL
+VNET_DECLARE(struct ipsecstat, ipsec6stat);
+VNET_DECLARE(int, ip6_esp_trans_deflev);
+VNET_DECLARE(int, ip6_esp_net_deflev);
+VNET_DECLARE(int, ip6_ah_trans_deflev);
+VNET_DECLARE(int, ip6_ah_net_deflev);
+VNET_DECLARE(int, ip6_ipsec_ecn);
+
+#define V_ipsec6stat VNET(ipsec6stat)
+#define V_ip6_esp_trans_deflev VNET(ip6_esp_trans_deflev)
+#define V_ip6_esp_net_deflev VNET(ip6_esp_net_deflev)
+#define V_ip6_ah_trans_deflev VNET(ip6_ah_trans_deflev)
+#define V_ip6_ah_net_deflev VNET(ip6_ah_net_deflev)
+#define V_ip6_ipsec_ecn VNET(ip6_ipsec_ecn)
+
+struct inpcb;
+
+extern int ipsec6_in_reject __P((struct mbuf *, struct inpcb *));
+
+struct ip6_hdr;
+extern const char *ipsec6_logpacketstr __P((struct ip6_hdr *, u_int32_t));
+
+struct m_tag;
+extern int ipsec6_common_input(struct mbuf **mp, int *offp, int proto);
+extern int ipsec6_common_input_cb(struct mbuf *m, struct secasvar *sav,
+ int skip, int protoff, struct m_tag *mt);
+extern void esp6_ctlinput(int, struct sockaddr *, void *);
+
+struct ipsec_output_state;
+extern int ipsec6_output_trans __P((struct ipsec_output_state *, u_char *,
+ struct mbuf *, struct secpolicy *, int, int *));
+extern int ipsec6_output_tunnel __P((struct ipsec_output_state *,
+ struct secpolicy *, int));
+#endif /*_KERNEL*/
+
+#endif /*_NETIPSEC_IPSEC6_HH_*/
diff --git a/rtems/freebsd/netipsec/ipsec_input.c b/rtems/freebsd/netipsec/ipsec_input.c
new file mode 100644
index 00000000..3f86f260
--- /dev/null
+++ b/rtems/freebsd/netipsec/ipsec_input.c
@@ -0,0 +1,891 @@
+#include <rtems/freebsd/machine/rtems-bsd-config.h>
+
+/* $FreeBSD$ */
+/* $OpenBSD: ipsec_input.c,v 1.63 2003/02/20 18:35:43 deraadt Exp $ */
+/*-
+ * The authors of this code are John Ioannidis (ji@tla.org),
+ * Angelos D. Keromytis (kermit@csd.uch.gr) and
+ * Niels Provos (provos@physnet.uni-hamburg.de).
+ *
+ * This code was written by John Ioannidis for BSD/OS in Athens, Greece,
+ * in November 1995.
+ *
+ * Ported to OpenBSD and NetBSD, with additional transforms, in December 1996,
+ * by Angelos D. Keromytis.
+ *
+ * Additional transforms and features in 1997 and 1998 by Angelos D. Keromytis
+ * and Niels Provos.
+ *
+ * Additional features in 1999 by Angelos D. Keromytis.
+ *
+ * Copyright (C) 1995, 1996, 1997, 1998, 1999 by John Ioannidis,
+ * Angelos D. Keromytis and Niels Provos.
+ * Copyright (c) 2001, Angelos D. Keromytis.
+ *
+ * Permission to use, copy, and modify this software with or without fee
+ * is hereby granted, provided that this entire notice is included in
+ * all copies of any software which is or includes a copy or
+ * modification of this software.
+ * You may use this code under the GNU public license if you so wish. Please
+ * contribute changes back to the authors under this freer than GPL license
+ * so that we may further the use of strong encryption without limitations to
+ * all.
+ *
+ * THIS SOFTWARE IS BEING PROVIDED "AS IS", WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTY. IN PARTICULAR, NONE OF THE AUTHORS MAKES ANY
+ * REPRESENTATION OR WARRANTY OF ANY KIND CONCERNING THE
+ * MERCHANTABILITY OF THIS SOFTWARE OR ITS FITNESS FOR ANY PARTICULAR
+ * PURPOSE.
+ */
+
+/*
+ * IPsec input processing.
+ */
+
+#include <rtems/freebsd/local/opt_inet.h>
+#include <rtems/freebsd/local/opt_inet6.h>
+#include <rtems/freebsd/local/opt_ipsec.h>
+#include <rtems/freebsd/local/opt_enc.h>
+
+#include <rtems/freebsd/sys/param.h>
+#include <rtems/freebsd/sys/systm.h>
+#include <rtems/freebsd/sys/malloc.h>
+#include <rtems/freebsd/sys/mbuf.h>
+#include <rtems/freebsd/sys/domain.h>
+#include <rtems/freebsd/sys/protosw.h>
+#include <rtems/freebsd/sys/socket.h>
+#include <rtems/freebsd/sys/errno.h>
+#include <rtems/freebsd/sys/syslog.h>
+
+#include <rtems/freebsd/net/if.h>
+#include <rtems/freebsd/net/pfil.h>
+#include <rtems/freebsd/net/route.h>
+#include <rtems/freebsd/net/netisr.h>
+#include <rtems/freebsd/net/vnet.h>
+
+#include <rtems/freebsd/netinet/in.h>
+#include <rtems/freebsd/netinet/in_systm.h>
+#include <rtems/freebsd/netinet/ip.h>
+#include <rtems/freebsd/netinet/ip_var.h>
+#include <rtems/freebsd/netinet/in_var.h>
+
+#include <rtems/freebsd/netinet/ip6.h>
+#ifdef INET6
+#include <rtems/freebsd/netinet6/ip6_var.h>
+#endif
+#include <rtems/freebsd/netinet/in_pcb.h>
+#ifdef INET6
+#include <rtems/freebsd/netinet/icmp6.h>
+#endif
+
+#include <rtems/freebsd/netipsec/ipsec.h>
+#ifdef INET6
+#include <rtems/freebsd/netipsec/ipsec6.h>
+#endif
+#include <rtems/freebsd/netipsec/ah_var.h>
+#include <rtems/freebsd/netipsec/esp.h>
+#include <rtems/freebsd/netipsec/esp_var.h>
+#include <rtems/freebsd/netipsec/ipcomp_var.h>
+
+#include <rtems/freebsd/netipsec/key.h>
+#include <rtems/freebsd/netipsec/keydb.h>
+
+#include <rtems/freebsd/netipsec/xform.h>
+#include <rtems/freebsd/netinet6/ip6protosw.h>
+
+#include <rtems/freebsd/machine/in_cksum.h>
+#include <rtems/freebsd/machine/stdarg.h>
+
+#ifdef DEV_ENC
+#include <rtems/freebsd/net/if_enc.h>
+#endif
+
+
+#define IPSEC_ISTAT(p,x,y,z) ((p) == IPPROTO_ESP ? (x)++ : \
+ (p) == IPPROTO_AH ? (y)++ : (z)++)
+
+#ifdef INET
+static void ipsec4_common_ctlinput(int, struct sockaddr *, void *, int);
+#endif
+
+/*
+ * ipsec_common_input gets called when an IPsec-protected packet
+ * is received by IPv4 or IPv6. It's job is to find the right SA
+ * and call the appropriate transform. The transform callback
+ * takes care of further processing (like ingress filtering).
+ */
+static int
+ipsec_common_input(struct mbuf *m, int skip, int protoff, int af, int sproto)
+{
+ union sockaddr_union dst_address;
+ struct secasvar *sav;
+ u_int32_t spi;
+ int error;
+#ifdef IPSEC_NAT_T
+ struct m_tag *tag;
+#endif
+
+ IPSEC_ISTAT(sproto, V_espstat.esps_input, V_ahstat.ahs_input,
+ V_ipcompstat.ipcomps_input);
+
+ IPSEC_ASSERT(m != NULL, ("null packet"));
+
+ IPSEC_ASSERT(sproto == IPPROTO_ESP || sproto == IPPROTO_AH ||
+ sproto == IPPROTO_IPCOMP,
+ ("unexpected security protocol %u", sproto));
+
+ if ((sproto == IPPROTO_ESP && !V_esp_enable) ||
+ (sproto == IPPROTO_AH && !V_ah_enable) ||
+ (sproto == IPPROTO_IPCOMP && !V_ipcomp_enable)) {
+ m_freem(m);
+ IPSEC_ISTAT(sproto, V_espstat.esps_pdrops, V_ahstat.ahs_pdrops,
+ V_ipcompstat.ipcomps_pdrops);
+ return EOPNOTSUPP;
+ }
+
+ if (m->m_pkthdr.len - skip < 2 * sizeof (u_int32_t)) {
+ m_freem(m);
+ IPSEC_ISTAT(sproto, V_espstat.esps_hdrops, V_ahstat.ahs_hdrops,
+ V_ipcompstat.ipcomps_hdrops);
+ DPRINTF(("%s: packet too small\n", __func__));
+ return EINVAL;
+ }
+
+ /* Retrieve the SPI from the relevant IPsec header */
+ if (sproto == IPPROTO_ESP)
+ m_copydata(m, skip, sizeof(u_int32_t), (caddr_t) &spi);
+ else if (sproto == IPPROTO_AH)
+ m_copydata(m, skip + sizeof(u_int32_t), sizeof(u_int32_t),
+ (caddr_t) &spi);
+ else if (sproto == IPPROTO_IPCOMP) {
+ u_int16_t cpi;
+ m_copydata(m, skip + sizeof(u_int16_t), sizeof(u_int16_t),
+ (caddr_t) &cpi);
+ spi = ntohl(htons(cpi));
+ }
+
+ /*
+ * Find the SA and (indirectly) call the appropriate
+ * kernel crypto routine. The resulting mbuf chain is a valid
+ * IP packet ready to go through input processing.
+ */
+ bzero(&dst_address, sizeof (dst_address));
+ dst_address.sa.sa_family = af;
+ switch (af) {
+#ifdef INET
+ case AF_INET:
+ dst_address.sin.sin_len = sizeof(struct sockaddr_in);
+ m_copydata(m, offsetof(struct ip, ip_dst),
+ sizeof(struct in_addr),
+ (caddr_t) &dst_address.sin.sin_addr);
+#ifdef IPSEC_NAT_T
+ /* Find the source port for NAT-T; see udp*_espdecap. */
+ tag = m_tag_find(m, PACKET_TAG_IPSEC_NAT_T_PORTS, NULL);
+ if (tag != NULL)
+ dst_address.sin.sin_port = ((u_int16_t *)(tag + 1))[1];
+#endif /* IPSEC_NAT_T */
+ break;
+#endif /* INET */
+#ifdef INET6
+ case AF_INET6:
+ dst_address.sin6.sin6_len = sizeof(struct sockaddr_in6);
+ m_copydata(m, offsetof(struct ip6_hdr, ip6_dst),
+ sizeof(struct in6_addr),
+ (caddr_t) &dst_address.sin6.sin6_addr);
+ break;
+#endif /* INET6 */
+ default:
+ DPRINTF(("%s: unsupported protocol family %u\n", __func__, af));
+ m_freem(m);
+ IPSEC_ISTAT(sproto, V_espstat.esps_nopf, V_ahstat.ahs_nopf,
+ V_ipcompstat.ipcomps_nopf);
+ return EPFNOSUPPORT;
+ }
+
+ /* NB: only pass dst since key_allocsa follows RFC2401 */
+ sav = KEY_ALLOCSA(&dst_address, sproto, spi);
+ if (sav == NULL) {
+ DPRINTF(("%s: no key association found for SA %s/%08lx/%u\n",
+ __func__, ipsec_address(&dst_address),
+ (u_long) ntohl(spi), sproto));
+ IPSEC_ISTAT(sproto, V_espstat.esps_notdb, V_ahstat.ahs_notdb,
+ V_ipcompstat.ipcomps_notdb);
+ m_freem(m);
+ return ENOENT;
+ }
+
+ if (sav->tdb_xform == NULL) {
+ DPRINTF(("%s: attempted to use uninitialized SA %s/%08lx/%u\n",
+ __func__, ipsec_address(&dst_address),
+ (u_long) ntohl(spi), sproto));
+ IPSEC_ISTAT(sproto, V_espstat.esps_noxform, V_ahstat.ahs_noxform,
+ V_ipcompstat.ipcomps_noxform);
+ KEY_FREESAV(&sav);
+ m_freem(m);
+ return ENXIO;
+ }
+
+ /*
+ * Call appropriate transform and return -- callback takes care of
+ * everything else.
+ */
+ error = (*sav->tdb_xform->xf_input)(m, sav, skip, protoff);
+ KEY_FREESAV(&sav);
+ return error;
+}
+
+#ifdef INET
+/*
+ * Common input handler for IPv4 AH, ESP, and IPCOMP.
+ */
+int
+ipsec4_common_input(struct mbuf *m, ...)
+{
+ va_list ap;
+ int off, nxt;
+
+ va_start(ap, m);
+ off = va_arg(ap, int);
+ nxt = va_arg(ap, int);
+ va_end(ap);
+
+ return ipsec_common_input(m, off, offsetof(struct ip, ip_p),
+ AF_INET, nxt);
+}
+
+void
+ah4_input(struct mbuf *m, int off)
+{
+ ipsec4_common_input(m, off, IPPROTO_AH);
+}
+void
+ah4_ctlinput(int cmd, struct sockaddr *sa, void *v)
+{
+ if (sa->sa_family == AF_INET &&
+ sa->sa_len == sizeof(struct sockaddr_in))
+ ipsec4_common_ctlinput(cmd, sa, v, IPPROTO_AH);
+}
+
+void
+esp4_input(struct mbuf *m, int off)
+{
+ ipsec4_common_input(m, off, IPPROTO_ESP);
+}
+void
+esp4_ctlinput(int cmd, struct sockaddr *sa, void *v)
+{
+ if (sa->sa_family == AF_INET &&
+ sa->sa_len == sizeof(struct sockaddr_in))
+ ipsec4_common_ctlinput(cmd, sa, v, IPPROTO_ESP);
+}
+
+void
+ipcomp4_input(struct mbuf *m, int off)
+{
+ ipsec4_common_input(m, off, IPPROTO_IPCOMP);
+}
+
+/*
+ * IPsec input callback for INET protocols.
+ * This routine is called as the transform callback.
+ * Takes care of filtering and other sanity checks on
+ * the processed packet.
+ */
+int
+ipsec4_common_input_cb(struct mbuf *m, struct secasvar *sav,
+ int skip, int protoff, struct m_tag *mt)
+{
+ int prot, af, sproto;
+ struct ip *ip;
+ struct m_tag *mtag;
+ struct tdb_ident *tdbi;
+ struct secasindex *saidx;
+ int error;
+#ifdef INET6
+#ifdef notyet
+ char ip6buf[INET6_ADDRSTRLEN];
+#endif
+#endif
+
+ IPSEC_ASSERT(m != NULL, ("null mbuf"));
+ IPSEC_ASSERT(sav != NULL, ("null SA"));
+ IPSEC_ASSERT(sav->sah != NULL, ("null SAH"));
+ saidx = &sav->sah->saidx;
+ af = saidx->dst.sa.sa_family;
+ IPSEC_ASSERT(af == AF_INET, ("unexpected af %u", af));
+ sproto = saidx->proto;
+ IPSEC_ASSERT(sproto == IPPROTO_ESP || sproto == IPPROTO_AH ||
+ sproto == IPPROTO_IPCOMP,
+ ("unexpected security protocol %u", sproto));
+
+ /* Sanity check */
+ if (m == NULL) {
+ DPRINTF(("%s: null mbuf", __func__));
+ IPSEC_ISTAT(sproto, V_espstat.esps_badkcr, V_ahstat.ahs_badkcr,
+ V_ipcompstat.ipcomps_badkcr);
+ KEY_FREESAV(&sav);
+ return EINVAL;
+ }
+
+ if (skip != 0) {
+ /* Fix IPv4 header */
+ if (m->m_len < skip && (m = m_pullup(m, skip)) == NULL) {
+ DPRINTF(("%s: processing failed for SA %s/%08lx\n",
+ __func__, ipsec_address(&sav->sah->saidx.dst),
+ (u_long) ntohl(sav->spi)));
+ IPSEC_ISTAT(sproto, V_espstat.esps_hdrops, V_ahstat.ahs_hdrops,
+ V_ipcompstat.ipcomps_hdrops);
+ error = ENOBUFS;
+ goto bad;
+ }
+
+ ip = mtod(m, struct ip *);
+ ip->ip_len = htons(m->m_pkthdr.len);
+ ip->ip_off = htons(ip->ip_off);
+ ip->ip_sum = 0;
+ ip->ip_sum = in_cksum(m, ip->ip_hl << 2);
+ } else {
+ ip = mtod(m, struct ip *);
+ }
+ prot = ip->ip_p;
+
+#ifdef notyet
+ /* IP-in-IP encapsulation */
+ if (prot == IPPROTO_IPIP) {
+ struct ip ipn;
+
+ if (m->m_pkthdr.len - skip < sizeof(struct ip)) {
+ IPSEC_ISTAT(sproto, V_espstat.esps_hdrops,
+ V_ahstat.ahs_hdrops,
+ V_ipcompstat.ipcomps_hdrops);
+ error = EINVAL;
+ goto bad;
+ }
+ /* ipn will now contain the inner IPv4 header */
+ m_copydata(m, ip->ip_hl << 2, sizeof(struct ip),
+ (caddr_t) &ipn);
+
+ /* XXX PROXY address isn't recorded in SAH */
+ /*
+ * Check that the inner source address is the same as
+ * the proxy address, if available.
+ */
+ if ((saidx->proxy.sa.sa_family == AF_INET &&
+ saidx->proxy.sin.sin_addr.s_addr !=
+ INADDR_ANY &&
+ ipn.ip_src.s_addr !=
+ saidx->proxy.sin.sin_addr.s_addr) ||
+ (saidx->proxy.sa.sa_family != AF_INET &&
+ saidx->proxy.sa.sa_family != 0)) {
+
+ DPRINTF(("%s: inner source address %s doesn't "
+ "correspond to expected proxy source %s, "
+ "SA %s/%08lx\n", __func__,
+ inet_ntoa4(ipn.ip_src),
+ ipsp_address(saidx->proxy),
+ ipsp_address(saidx->dst),
+ (u_long) ntohl(sav->spi)));
+
+ IPSEC_ISTAT(sproto, V_espstat.esps_pdrops,
+ V_ahstat.ahs_pdrops,
+ V_ipcompstat.ipcomps_pdrops);
+ error = EACCES;
+ goto bad;
+ }
+ }
+#ifdef INET6
+ /* IPv6-in-IP encapsulation. */
+ if (prot == IPPROTO_IPV6) {
+ struct ip6_hdr ip6n;
+
+ if (m->m_pkthdr.len - skip < sizeof(struct ip6_hdr)) {
+ IPSEC_ISTAT(sproto, V_espstat.esps_hdrops,
+ V_ahstat.ahs_hdrops,
+ V_ipcompstat.ipcomps_hdrops);
+ error = EINVAL;
+ goto bad;
+ }
+ /* ip6n will now contain the inner IPv6 header. */
+ m_copydata(m, ip->ip_hl << 2, sizeof(struct ip6_hdr),
+ (caddr_t) &ip6n);
+
+ /*
+ * Check that the inner source address is the same as
+ * the proxy address, if available.
+ */
+ if ((saidx->proxy.sa.sa_family == AF_INET6 &&
+ !IN6_IS_ADDR_UNSPECIFIED(&saidx->proxy.sin6.sin6_addr) &&
+ !IN6_ARE_ADDR_EQUAL(&ip6n.ip6_src,
+ &saidx->proxy.sin6.sin6_addr)) ||
+ (saidx->proxy.sa.sa_family != AF_INET6 &&
+ saidx->proxy.sa.sa_family != 0)) {
+
+ DPRINTF(("%s: inner source address %s doesn't "
+ "correspond to expected proxy source %s, "
+ "SA %s/%08lx\n", __func__,
+ ip6_sprintf(ip6buf, &ip6n.ip6_src),
+ ipsec_address(&saidx->proxy),
+ ipsec_address(&saidx->dst),
+ (u_long) ntohl(sav->spi)));
+
+ IPSEC_ISTAT(sproto, V_espstat.esps_pdrops,
+ V_ahstat.ahs_pdrops,
+ V_ipcompstat.ipcomps_pdrops);
+ error = EACCES;
+ goto bad;
+ }
+ }
+#endif /* INET6 */
+#endif /*XXX*/
+
+ /*
+ * Record what we've done to the packet (under what SA it was
+ * processed). If we've been passed an mtag, it means the packet
+ * was already processed by an ethernet/crypto combo card and
+ * thus has a tag attached with all the right information, but
+ * with a PACKET_TAG_IPSEC_IN_CRYPTO_DONE as opposed to
+ * PACKET_TAG_IPSEC_IN_DONE type; in that case, just change the type.
+ */
+ if (mt == NULL && sproto != IPPROTO_IPCOMP) {
+ mtag = m_tag_get(PACKET_TAG_IPSEC_IN_DONE,
+ sizeof(struct tdb_ident), M_NOWAIT);
+ if (mtag == NULL) {
+ DPRINTF(("%s: failed to get tag\n", __func__));
+ IPSEC_ISTAT(sproto, V_espstat.esps_hdrops,
+ V_ahstat.ahs_hdrops, V_ipcompstat.ipcomps_hdrops);
+ error = ENOMEM;
+ goto bad;
+ }
+
+ tdbi = (struct tdb_ident *)(mtag + 1);
+ bcopy(&saidx->dst, &tdbi->dst, saidx->dst.sa.sa_len);
+ tdbi->proto = sproto;
+ tdbi->spi = sav->spi;
+ /* Cache those two for enc(4) in xform_ipip. */
+ tdbi->alg_auth = sav->alg_auth;
+ tdbi->alg_enc = sav->alg_enc;
+
+ m_tag_prepend(m, mtag);
+ } else if (mt != NULL) {
+ mt->m_tag_id = PACKET_TAG_IPSEC_IN_DONE;
+ /* XXX do we need to mark m_flags??? */
+ }
+
+ key_sa_recordxfer(sav, m); /* record data transfer */
+
+#ifdef DEV_ENC
+ encif->if_ipackets++;
+ encif->if_ibytes += m->m_pkthdr.len;
+
+ /*
+ * Pass the mbuf to enc0 for bpf and pfil. We will filter the IPIP
+ * packet later after it has been decapsulated.
+ */
+ ipsec_bpf(m, sav, AF_INET, ENC_IN|ENC_BEFORE);
+
+ if (prot != IPPROTO_IPIP)
+ if ((error = ipsec_filter(&m, PFIL_IN, ENC_IN|ENC_BEFORE)) != 0)
+ return (error);
+#endif
+
+ /*
+ * Re-dispatch via software interrupt.
+ */
+ if ((error = netisr_queue_src(NETISR_IP, (uintptr_t)sav->spi, m))) {
+ IPSEC_ISTAT(sproto, V_espstat.esps_qfull, V_ahstat.ahs_qfull,
+ V_ipcompstat.ipcomps_qfull);
+
+ DPRINTF(("%s: queue full; proto %u packet dropped\n",
+ __func__, sproto));
+ return error;
+ }
+ return 0;
+bad:
+ m_freem(m);
+ return error;
+}
+
+void
+ipsec4_common_ctlinput(int cmd, struct sockaddr *sa, void *v, int proto)
+{
+ /* XXX nothing just yet */
+}
+#endif /* INET */
+
+#ifdef INET6
+/* IPv6 AH wrapper. */
+int
+ipsec6_common_input(struct mbuf **mp, int *offp, int proto)
+{
+ int l = 0;
+ int protoff;
+ struct ip6_ext ip6e;
+
+ if (*offp < sizeof(struct ip6_hdr)) {
+ DPRINTF(("%s: bad offset %u\n", __func__, *offp));
+ return IPPROTO_DONE;
+ } else if (*offp == sizeof(struct ip6_hdr)) {
+ protoff = offsetof(struct ip6_hdr, ip6_nxt);
+ } else {
+ /* Chase down the header chain... */
+ protoff = sizeof(struct ip6_hdr);
+
+ do {
+ protoff += l;
+ m_copydata(*mp, protoff, sizeof(ip6e),
+ (caddr_t) &ip6e);
+
+ if (ip6e.ip6e_nxt == IPPROTO_AH)
+ l = (ip6e.ip6e_len + 2) << 2;
+ else
+ l = (ip6e.ip6e_len + 1) << 3;
+ IPSEC_ASSERT(l > 0, ("l went zero or negative"));
+ } while (protoff + l < *offp);
+
+ /* Malformed packet check */
+ if (protoff + l != *offp) {
+ DPRINTF(("%s: bad packet header chain, protoff %u, "
+ "l %u, off %u\n", __func__, protoff, l, *offp));
+ IPSEC_ISTAT(proto, V_espstat.esps_hdrops,
+ V_ahstat.ahs_hdrops,
+ V_ipcompstat.ipcomps_hdrops);
+ m_freem(*mp);
+ *mp = NULL;
+ return IPPROTO_DONE;
+ }
+ protoff += offsetof(struct ip6_ext, ip6e_nxt);
+ }
+ (void) ipsec_common_input(*mp, *offp, protoff, AF_INET6, proto);
+ return IPPROTO_DONE;
+}
+
+/*
+ * IPsec input callback, called by the transform callback. Takes care of
+ * filtering and other sanity checks on the processed packet.
+ */
+int
+ipsec6_common_input_cb(struct mbuf *m, struct secasvar *sav, int skip, int protoff,
+ struct m_tag *mt)
+{
+ int prot, af, sproto;
+ struct ip6_hdr *ip6;
+ struct m_tag *mtag;
+ struct tdb_ident *tdbi;
+ struct secasindex *saidx;
+ int nxt;
+ u_int8_t nxt8;
+ int error, nest;
+#ifdef notyet
+ char ip6buf[INET6_ADDRSTRLEN];
+#endif
+
+ IPSEC_ASSERT(m != NULL, ("null mbuf"));
+ IPSEC_ASSERT(sav != NULL, ("null SA"));
+ IPSEC_ASSERT(sav->sah != NULL, ("null SAH"));
+ saidx = &sav->sah->saidx;
+ af = saidx->dst.sa.sa_family;
+ IPSEC_ASSERT(af == AF_INET6, ("unexpected af %u", af));
+ sproto = saidx->proto;
+ IPSEC_ASSERT(sproto == IPPROTO_ESP || sproto == IPPROTO_AH ||
+ sproto == IPPROTO_IPCOMP,
+ ("unexpected security protocol %u", sproto));
+
+ /* Sanity check */
+ if (m == NULL) {
+ DPRINTF(("%s: null mbuf", __func__));
+ IPSEC_ISTAT(sproto, V_espstat.esps_badkcr, V_ahstat.ahs_badkcr,
+ V_ipcompstat.ipcomps_badkcr);
+ error = EINVAL;
+ goto bad;
+ }
+
+ /* Fix IPv6 header */
+ if (m->m_len < sizeof(struct ip6_hdr) &&
+ (m = m_pullup(m, sizeof(struct ip6_hdr))) == NULL) {
+
+ DPRINTF(("%s: processing failed for SA %s/%08lx\n",
+ __func__, ipsec_address(&sav->sah->saidx.dst),
+ (u_long) ntohl(sav->spi)));
+
+ IPSEC_ISTAT(sproto, V_espstat.esps_hdrops, V_ahstat.ahs_hdrops,
+ V_ipcompstat.ipcomps_hdrops);
+ error = EACCES;
+ goto bad;
+ }
+
+ ip6 = mtod(m, struct ip6_hdr *);
+ ip6->ip6_plen = htons(m->m_pkthdr.len - sizeof(struct ip6_hdr));
+
+ /* Save protocol */
+ m_copydata(m, protoff, 1, (unsigned char *) &prot);
+
+#ifdef notyet
+#ifdef INET
+ /* IP-in-IP encapsulation */
+ if (prot == IPPROTO_IPIP) {
+ struct ip ipn;
+
+ if (m->m_pkthdr.len - skip < sizeof(struct ip)) {
+ IPSEC_ISTAT(sproto, V_espstat.esps_hdrops,
+ V_ahstat.ahs_hdrops,
+ V_ipcompstat.ipcomps_hdrops);
+ error = EINVAL;
+ goto bad;
+ }
+ /* ipn will now contain the inner IPv4 header */
+ m_copydata(m, skip, sizeof(struct ip), (caddr_t) &ipn);
+
+ /*
+ * Check that the inner source address is the same as
+ * the proxy address, if available.
+ */
+ if ((saidx->proxy.sa.sa_family == AF_INET &&
+ saidx->proxy.sin.sin_addr.s_addr != INADDR_ANY &&
+ ipn.ip_src.s_addr != saidx->proxy.sin.sin_addr.s_addr) ||
+ (saidx->proxy.sa.sa_family != AF_INET &&
+ saidx->proxy.sa.sa_family != 0)) {
+
+ DPRINTF(("%s: inner source address %s doesn't "
+ "correspond to expected proxy source %s, "
+ "SA %s/%08lx\n", __func__,
+ inet_ntoa4(ipn.ip_src),
+ ipsec_address(&saidx->proxy),
+ ipsec_address(&saidx->dst),
+ (u_long) ntohl(sav->spi)));
+
+ IPSEC_ISTATsproto, (V_espstat.esps_pdrops,
+ V_ahstat.ahs_pdrops, V_ipcompstat.ipcomps_pdrops);
+ error = EACCES;
+ goto bad;
+ }
+ }
+#endif /* INET */
+
+ /* IPv6-in-IP encapsulation */
+ if (prot == IPPROTO_IPV6) {
+ struct ip6_hdr ip6n;
+
+ if (m->m_pkthdr.len - skip < sizeof(struct ip6_hdr)) {
+ IPSEC_ISTAT(sproto, V_espstat.esps_hdrops,
+ V_ahstat.ahs_hdrops,
+ V_ipcompstat.ipcomps_hdrops);
+ error = EINVAL;
+ goto bad;
+ }
+ /* ip6n will now contain the inner IPv6 header. */
+ m_copydata(m, skip, sizeof(struct ip6_hdr),
+ (caddr_t) &ip6n);
+
+ /*
+ * Check that the inner source address is the same as
+ * the proxy address, if available.
+ */
+ if ((saidx->proxy.sa.sa_family == AF_INET6 &&
+ !IN6_IS_ADDR_UNSPECIFIED(&saidx->proxy.sin6.sin6_addr) &&
+ !IN6_ARE_ADDR_EQUAL(&ip6n.ip6_src,
+ &saidx->proxy.sin6.sin6_addr)) ||
+ (saidx->proxy.sa.sa_family != AF_INET6 &&
+ saidx->proxy.sa.sa_family != 0)) {
+
+ DPRINTF(("%s: inner source address %s doesn't "
+ "correspond to expected proxy source %s, "
+ "SA %s/%08lx\n", __func__,
+ ip6_sprintf(ip6buf, &ip6n.ip6_src),
+ ipsec_address(&saidx->proxy),
+ ipsec_address(&saidx->dst),
+ (u_long) ntohl(sav->spi)));
+
+ IPSEC_ISTAT(sproto, V_espstat.esps_pdrops,
+ V_ahstat.ahs_pdrops, V_ipcompstat.ipcomps_pdrops);
+ error = EACCES;
+ goto bad;
+ }
+ }
+#endif /*XXX*/
+
+ /*
+ * Record what we've done to the packet (under what SA it was
+ * processed). If we've been passed an mtag, it means the packet
+ * was already processed by an ethernet/crypto combo card and
+ * thus has a tag attached with all the right information, but
+ * with a PACKET_TAG_IPSEC_IN_CRYPTO_DONE as opposed to
+ * PACKET_TAG_IPSEC_IN_DONE type; in that case, just change the type.
+ */
+ if (mt == NULL && sproto != IPPROTO_IPCOMP) {
+ mtag = m_tag_get(PACKET_TAG_IPSEC_IN_DONE,
+ sizeof(struct tdb_ident), M_NOWAIT);
+ if (mtag == NULL) {
+ DPRINTF(("%s: failed to get tag\n", __func__));
+ IPSEC_ISTAT(sproto, V_espstat.esps_hdrops,
+ V_ahstat.ahs_hdrops, V_ipcompstat.ipcomps_hdrops);
+ error = ENOMEM;
+ goto bad;
+ }
+
+ tdbi = (struct tdb_ident *)(mtag + 1);
+ bcopy(&saidx->dst, &tdbi->dst, sizeof(union sockaddr_union));
+ tdbi->proto = sproto;
+ tdbi->spi = sav->spi;
+ /* Cache those two for enc(4) in xform_ipip. */
+ tdbi->alg_auth = sav->alg_auth;
+ tdbi->alg_enc = sav->alg_enc;
+
+ m_tag_prepend(m, mtag);
+ } else {
+ if (mt != NULL)
+ mt->m_tag_id = PACKET_TAG_IPSEC_IN_DONE;
+ /* XXX do we need to mark m_flags??? */
+ }
+
+ key_sa_recordxfer(sav, m);
+
+#ifdef DEV_ENC
+ encif->if_ipackets++;
+ encif->if_ibytes += m->m_pkthdr.len;
+
+ /*
+ * Pass the mbuf to enc0 for bpf and pfil. We will filter the IPIP
+ * packet later after it has been decapsulated.
+ */
+ ipsec_bpf(m, sav, AF_INET6, ENC_IN|ENC_BEFORE);
+
+ /* XXX-BZ does not make sense. */
+ if (prot != IPPROTO_IPIP)
+ if ((error = ipsec_filter(&m, PFIL_IN, ENC_IN|ENC_BEFORE)) != 0)
+ return (error);
+#endif
+
+ /* Retrieve new protocol */
+ m_copydata(m, protoff, sizeof(u_int8_t), (caddr_t) &nxt8);
+
+ /*
+ * See the end of ip6_input for this logic.
+ * IPPROTO_IPV[46] case will be processed just like other ones
+ */
+ nest = 0;
+ nxt = nxt8;
+ while (nxt != IPPROTO_DONE) {
+ if (V_ip6_hdrnestlimit && (++nest > V_ip6_hdrnestlimit)) {
+ V_ip6stat.ip6s_toomanyhdr++;
+ error = EINVAL;
+ goto bad;
+ }
+
+ /*
+ * Protection against faulty packet - there should be
+ * more sanity checks in header chain processing.
+ */
+ if (m->m_pkthdr.len < skip) {
+ V_ip6stat.ip6s_tooshort++;
+ in6_ifstat_inc(m->m_pkthdr.rcvif, ifs6_in_truncated);
+ error = EINVAL;
+ goto bad;
+ }
+ /*
+ * Enforce IPsec policy checking if we are seeing last header.
+ * note that we do not visit this with protocols with pcb layer
+ * code - like udp/tcp/raw ip.
+ */
+ if ((inet6sw[ip6_protox[nxt]].pr_flags & PR_LASTHDR) != 0 &&
+ ipsec6_in_reject(m, NULL)) {
+ error = EINVAL;
+ goto bad;
+ }
+ nxt = (*inet6sw[ip6_protox[nxt]].pr_input)(&m, &skip, nxt);
+ }
+ return 0;
+bad:
+ if (m)
+ m_freem(m);
+ return error;
+}
+
+void
+esp6_ctlinput(int cmd, struct sockaddr *sa, void *d)
+{
+ struct ip6ctlparam *ip6cp = NULL;
+ struct mbuf *m = NULL;
+ struct ip6_hdr *ip6;
+ int off;
+
+ if (sa->sa_family != AF_INET6 ||
+ sa->sa_len != sizeof(struct sockaddr_in6))
+ return;
+ if ((unsigned)cmd >= PRC_NCMDS)
+ return;
+
+ /* if the parameter is from icmp6, decode it. */
+ if (d != NULL) {
+ ip6cp = (struct ip6ctlparam *)d;
+ m = ip6cp->ip6c_m;
+ ip6 = ip6cp->ip6c_ip6;
+ off = ip6cp->ip6c_off;
+ } else {
+ m = NULL;
+ ip6 = NULL;
+ off = 0; /* calm gcc */
+ }
+
+ if (ip6 != NULL) {
+
+ struct ip6ctlparam ip6cp1;
+
+ /*
+ * Notify the error to all possible sockets via pfctlinput2.
+ * Since the upper layer information (such as protocol type,
+ * source and destination ports) is embedded in the encrypted
+ * data and might have been cut, we can't directly call
+ * an upper layer ctlinput function. However, the pcbnotify
+ * function will consider source and destination addresses
+ * as well as the flow info value, and may be able to find
+ * some PCB that should be notified.
+ * Although pfctlinput2 will call esp6_ctlinput(), there is
+ * no possibility of an infinite loop of function calls,
+ * because we don't pass the inner IPv6 header.
+ */
+ bzero(&ip6cp1, sizeof(ip6cp1));
+ ip6cp1.ip6c_src = ip6cp->ip6c_src;
+ pfctlinput2(cmd, sa, (void *)&ip6cp1);
+
+ /*
+ * Then go to special cases that need ESP header information.
+ * XXX: We assume that when ip6 is non NULL,
+ * M and OFF are valid.
+ */
+
+ if (cmd == PRC_MSGSIZE) {
+ struct secasvar *sav;
+ u_int32_t spi;
+ int valid;
+
+ /* check header length before using m_copydata */
+ if (m->m_pkthdr.len < off + sizeof (struct esp))
+ return;
+ m_copydata(m, off + offsetof(struct esp, esp_spi),
+ sizeof(u_int32_t), (caddr_t) &spi);
+ /*
+ * Check to see if we have a valid SA corresponding to
+ * the address in the ICMP message payload.
+ */
+ sav = KEY_ALLOCSA((union sockaddr_union *)sa,
+ IPPROTO_ESP, spi);
+ valid = (sav != NULL);
+ if (sav)
+ KEY_FREESAV(&sav);
+
+ /* XXX Further validation? */
+
+ /*
+ * Depending on whether the SA is "valid" and
+ * routing table size (mtudisc_{hi,lo}wat), we will:
+ * - recalcurate the new MTU and create the
+ * corresponding routing entry, or
+ * - ignore the MTU change notification.
+ */
+ icmp6_mtudisc_update(ip6cp, valid);
+ }
+ } else {
+ /* we normally notify any pcb here */
+ }
+}
+#endif /* INET6 */
diff --git a/rtems/freebsd/netipsec/ipsec_mbuf.c b/rtems/freebsd/netipsec/ipsec_mbuf.c
new file mode 100644
index 00000000..ea97d24a
--- /dev/null
+++ b/rtems/freebsd/netipsec/ipsec_mbuf.c
@@ -0,0 +1,329 @@
+#include <rtems/freebsd/machine/rtems-bsd-config.h>
+
+/*-
+ * Copyright (c) 2002, 2003 Sam Leffler, Errno Consulting
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+/*
+ * IPsec-specific mbuf routines.
+ */
+
+#include <rtems/freebsd/local/opt_param.h>
+
+#include <rtems/freebsd/sys/param.h>
+#include <rtems/freebsd/sys/systm.h>
+#include <rtems/freebsd/sys/mbuf.h>
+#include <rtems/freebsd/sys/socket.h>
+
+#include <rtems/freebsd/net/route.h>
+#include <rtems/freebsd/net/vnet.h>
+
+#include <rtems/freebsd/netinet/in.h>
+
+#include <rtems/freebsd/netipsec/ipsec.h>
+
+/*
+ * Make space for a new header of length hlen at skip bytes
+ * into the packet. When doing this we allocate new mbufs only
+ * when absolutely necessary. The mbuf where the new header
+ * is to go is returned together with an offset into the mbuf.
+ * If NULL is returned then the mbuf chain may have been modified;
+ * the caller is assumed to always free the chain.
+ */
+struct mbuf *
+m_makespace(struct mbuf *m0, int skip, int hlen, int *off)
+{
+ struct mbuf *m;
+ unsigned remain;
+
+ IPSEC_ASSERT(m0 != NULL, ("null mbuf"));
+ IPSEC_ASSERT(hlen < MHLEN, ("hlen too big: %u", hlen));
+
+ for (m = m0; m && skip > m->m_len; m = m->m_next)
+ skip -= m->m_len;
+ if (m == NULL)
+ return (NULL);
+ /*
+ * At this point skip is the offset into the mbuf m
+ * where the new header should be placed. Figure out
+ * if there's space to insert the new header. If so,
+ * and copying the remainder makes sense then do so.
+ * Otherwise insert a new mbuf in the chain, splitting
+ * the contents of m as needed.
+ */
+ remain = m->m_len - skip; /* data to move */
+ if (hlen > M_TRAILINGSPACE(m)) {
+ struct mbuf *n0, *n, **np;
+ int todo, len, done, alloc;
+
+ n0 = NULL;
+ np = &n0;
+ alloc = 0;
+ done = 0;
+ todo = remain;
+ while (todo > 0) {
+ if (todo > MHLEN) {
+ n = m_getcl(M_DONTWAIT, m->m_type, 0);
+ len = MCLBYTES;
+ }
+ else {
+ n = m_get(M_DONTWAIT, m->m_type);
+ len = MHLEN;
+ }
+ if (n == NULL) {
+ m_freem(n0);
+ return NULL;
+ }
+ *np = n;
+ np = &n->m_next;
+ alloc++;
+ len = min(todo, len);
+ memcpy(n->m_data, mtod(m, char *) + skip + done, len);
+ n->m_len = len;
+ done += len;
+ todo -= len;
+ }
+
+ if (hlen <= M_TRAILINGSPACE(m) + remain) {
+ m->m_len = skip + hlen;
+ *off = skip;
+ if (n0 != NULL) {
+ *np = m->m_next;
+ m->m_next = n0;
+ }
+ }
+ else {
+ n = m_get(M_DONTWAIT, m->m_type);
+ if (n == NULL) {
+ m_freem(n0);
+ return NULL;
+ }
+ alloc++;
+
+ if ((n->m_next = n0) == NULL)
+ np = &n->m_next;
+ n0 = n;
+
+ *np = m->m_next;
+ m->m_next = n0;
+
+ n->m_len = hlen;
+ m->m_len = skip;
+
+ m = n; /* header is at front ... */
+ *off = 0; /* ... of new mbuf */
+ }
+ V_ipsec4stat.ips_mbinserted++;
+ } else {
+ /*
+ * Copy the remainder to the back of the mbuf
+ * so there's space to write the new header.
+ */
+ bcopy(mtod(m, caddr_t) + skip,
+ mtod(m, caddr_t) + skip + hlen, remain);
+ m->m_len += hlen;
+ *off = skip;
+ }
+ m0->m_pkthdr.len += hlen; /* adjust packet length */
+ return m;
+}
+
+/*
+ * m_pad(m, n) pads <m> with <n> bytes at the end. The packet header
+ * length is updated, and a pointer to the first byte of the padding
+ * (which is guaranteed to be all in one mbuf) is returned.
+ */
+caddr_t
+m_pad(struct mbuf *m, int n)
+{
+ register struct mbuf *m0, *m1;
+ register int len, pad;
+ caddr_t retval;
+
+ if (n <= 0) { /* No stupid arguments. */
+ DPRINTF(("%s: pad length invalid (%d)\n", __func__, n));
+ m_freem(m);
+ return NULL;
+ }
+
+ len = m->m_pkthdr.len;
+ pad = n;
+ m0 = m;
+
+ while (m0->m_len < len) {
+ len -= m0->m_len;
+ m0 = m0->m_next;
+ }
+
+ if (m0->m_len != len) {
+ DPRINTF(("%s: length mismatch (should be %d instead of %d)\n",
+ __func__, m->m_pkthdr.len,
+ m->m_pkthdr.len + m0->m_len - len));
+
+ m_freem(m);
+ return NULL;
+ }
+
+ /* Check for zero-length trailing mbufs, and find the last one. */
+ for (m1 = m0; m1->m_next; m1 = m1->m_next) {
+ if (m1->m_next->m_len != 0) {
+ DPRINTF(("%s: length mismatch (should be %d instead "
+ "of %d)\n", __func__,
+ m->m_pkthdr.len,
+ m->m_pkthdr.len + m1->m_next->m_len));
+
+ m_freem(m);
+ return NULL;
+ }
+
+ m0 = m1->m_next;
+ }
+
+ if (pad > M_TRAILINGSPACE(m0)) {
+ /* Add an mbuf to the chain. */
+ MGET(m1, M_DONTWAIT, MT_DATA);
+ if (m1 == 0) {
+ m_freem(m0);
+ DPRINTF(("%s: unable to get extra mbuf\n", __func__));
+ return NULL;
+ }
+
+ m0->m_next = m1;
+ m0 = m1;
+ m0->m_len = 0;
+ }
+
+ retval = m0->m_data + m0->m_len;
+ m0->m_len += pad;
+ m->m_pkthdr.len += pad;
+
+ return retval;
+}
+
+/*
+ * Remove hlen data at offset skip in the packet. This is used by
+ * the protocols strip protocol headers and associated data (e.g. IV,
+ * authenticator) on input.
+ */
+int
+m_striphdr(struct mbuf *m, int skip, int hlen)
+{
+ struct mbuf *m1;
+ int roff;
+
+ /* Find beginning of header */
+ m1 = m_getptr(m, skip, &roff);
+ if (m1 == NULL)
+ return (EINVAL);
+
+ /* Remove the header and associated data from the mbuf. */
+ if (roff == 0) {
+ /* The header was at the beginning of the mbuf */
+ V_ipsec4stat.ips_input_front++;
+ m_adj(m1, hlen);
+ if ((m1->m_flags & M_PKTHDR) == 0)
+ m->m_pkthdr.len -= hlen;
+ } else if (roff + hlen >= m1->m_len) {
+ struct mbuf *mo;
+
+ /*
+ * Part or all of the header is at the end of this mbuf,
+ * so first let's remove the remainder of the header from
+ * the beginning of the remainder of the mbuf chain, if any.
+ */
+ V_ipsec4stat.ips_input_end++;
+ if (roff + hlen > m1->m_len) {
+ /* Adjust the next mbuf by the remainder */
+ m_adj(m1->m_next, roff + hlen - m1->m_len);
+
+ /* The second mbuf is guaranteed not to have a pkthdr... */
+ m->m_pkthdr.len -= (roff + hlen - m1->m_len);
+ }
+
+ /* Now, let's unlink the mbuf chain for a second...*/
+ mo = m1->m_next;
+ m1->m_next = NULL;
+
+ /* ...and trim the end of the first part of the chain...sick */
+ m_adj(m1, -(m1->m_len - roff));
+ if ((m1->m_flags & M_PKTHDR) == 0)
+ m->m_pkthdr.len -= (m1->m_len - roff);
+
+ /* Finally, let's relink */
+ m1->m_next = mo;
+ } else {
+ /*
+ * The header lies in the "middle" of the mbuf; copy
+ * the remainder of the mbuf down over the header.
+ */
+ V_ipsec4stat.ips_input_middle++;
+ bcopy(mtod(m1, u_char *) + roff + hlen,
+ mtod(m1, u_char *) + roff,
+ m1->m_len - (roff + hlen));
+ m1->m_len -= hlen;
+ m->m_pkthdr.len -= hlen;
+ }
+ return (0);
+}
+
+/*
+ * Diagnostic routine to check mbuf alignment as required by the
+ * crypto device drivers (that use DMA).
+ */
+void
+m_checkalignment(const char* where, struct mbuf *m0, int off, int len)
+{
+ int roff;
+ struct mbuf *m = m_getptr(m0, off, &roff);
+ caddr_t addr;
+
+ if (m == NULL)
+ return;
+ printf("%s (off %u len %u): ", where, off, len);
+ addr = mtod(m, caddr_t) + roff;
+ do {
+ int mlen;
+
+ if (((uintptr_t) addr) & 3) {
+ printf("addr misaligned %p,", addr);
+ break;
+ }
+ mlen = m->m_len;
+ if (mlen > len)
+ mlen = len;
+ len -= mlen;
+ if (len && (mlen & 3)) {
+ printf("len mismatch %u,", mlen);
+ break;
+ }
+ m = m->m_next;
+ addr = m ? mtod(m, caddr_t) : NULL;
+ } while (m && len > 0);
+ for (m = m0; m; m = m->m_next)
+ printf(" [%p:%u]", mtod(m, caddr_t), m->m_len);
+ printf("\n");
+}
diff --git a/rtems/freebsd/netipsec/ipsec_output.c b/rtems/freebsd/netipsec/ipsec_output.c
new file mode 100644
index 00000000..371b0379
--- /dev/null
+++ b/rtems/freebsd/netipsec/ipsec_output.c
@@ -0,0 +1,892 @@
+#include <rtems/freebsd/machine/rtems-bsd-config.h>
+
+/*-
+ * Copyright (c) 2002, 2003 Sam Leffler, Errno Consulting
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+/*
+ * IPsec output processing.
+ */
+#include <rtems/freebsd/local/opt_inet.h>
+#include <rtems/freebsd/local/opt_inet6.h>
+#include <rtems/freebsd/local/opt_ipsec.h>
+#include <rtems/freebsd/local/opt_enc.h>
+
+#include <rtems/freebsd/sys/param.h>
+#include <rtems/freebsd/sys/systm.h>
+#include <rtems/freebsd/sys/mbuf.h>
+#include <rtems/freebsd/sys/domain.h>
+#include <rtems/freebsd/sys/protosw.h>
+#include <rtems/freebsd/sys/socket.h>
+#include <rtems/freebsd/sys/errno.h>
+#include <rtems/freebsd/sys/syslog.h>
+
+#include <rtems/freebsd/net/if.h>
+#include <rtems/freebsd/net/pfil.h>
+#include <rtems/freebsd/net/route.h>
+#include <rtems/freebsd/net/vnet.h>
+
+#include <rtems/freebsd/netinet/in.h>
+#include <rtems/freebsd/netinet/in_systm.h>
+#include <rtems/freebsd/netinet/ip.h>
+#include <rtems/freebsd/netinet/ip_var.h>
+#include <rtems/freebsd/netinet/in_var.h>
+#include <rtems/freebsd/netinet/ip_ecn.h>
+#ifdef INET6
+#include <rtems/freebsd/netinet6/ip6_ecn.h>
+#endif
+
+#include <rtems/freebsd/netinet/ip6.h>
+#ifdef INET6
+#include <rtems/freebsd/netinet6/ip6_var.h>
+#endif
+#include <rtems/freebsd/netinet/in_pcb.h>
+#ifdef INET6
+#include <rtems/freebsd/netinet/icmp6.h>
+#endif
+
+#include <rtems/freebsd/netipsec/ipsec.h>
+#ifdef INET6
+#include <rtems/freebsd/netipsec/ipsec6.h>
+#endif
+#include <rtems/freebsd/netipsec/ah_var.h>
+#include <rtems/freebsd/netipsec/esp_var.h>
+#include <rtems/freebsd/netipsec/ipcomp_var.h>
+
+#include <rtems/freebsd/netipsec/xform.h>
+
+#include <rtems/freebsd/netipsec/key.h>
+#include <rtems/freebsd/netipsec/keydb.h>
+#include <rtems/freebsd/netipsec/key_debug.h>
+
+#include <rtems/freebsd/machine/in_cksum.h>
+
+#ifdef IPSEC_NAT_T
+#include <rtems/freebsd/netinet/udp.h>
+#endif
+
+#ifdef DEV_ENC
+#include <rtems/freebsd/net/if_enc.h>
+#endif
+
+
+int
+ipsec_process_done(struct mbuf *m, struct ipsecrequest *isr)
+{
+ struct tdb_ident *tdbi;
+ struct m_tag *mtag;
+ struct secasvar *sav;
+ struct secasindex *saidx;
+ int error;
+
+ IPSEC_ASSERT(m != NULL, ("null mbuf"));
+ IPSEC_ASSERT(isr != NULL, ("null ISR"));
+ sav = isr->sav;
+ IPSEC_ASSERT(sav != NULL, ("null SA"));
+ IPSEC_ASSERT(sav->sah != NULL, ("null SAH"));
+
+ saidx = &sav->sah->saidx;
+ switch (saidx->dst.sa.sa_family) {
+#ifdef INET
+ case AF_INET:
+ /* Fix the header length, for AH processing. */
+ mtod(m, struct ip *)->ip_len = htons(m->m_pkthdr.len);
+ break;
+#endif /* INET */
+#ifdef INET6
+ case AF_INET6:
+ /* Fix the header length, for AH processing. */
+ if (m->m_pkthdr.len < sizeof (struct ip6_hdr)) {
+ error = ENXIO;
+ goto bad;
+ }
+ if (m->m_pkthdr.len - sizeof (struct ip6_hdr) > IPV6_MAXPACKET) {
+ /* No jumbogram support. */
+ error = ENXIO; /*?*/
+ goto bad;
+ }
+ mtod(m, struct ip6_hdr *)->ip6_plen =
+ htons(m->m_pkthdr.len - sizeof(struct ip6_hdr));
+ break;
+#endif /* INET6 */
+ default:
+ DPRINTF(("%s: unknown protocol family %u\n", __func__,
+ saidx->dst.sa.sa_family));
+ error = ENXIO;
+ goto bad;
+ }
+
+ /*
+ * Add a record of what we've done or what needs to be done to the
+ * packet.
+ */
+ mtag = m_tag_get(PACKET_TAG_IPSEC_OUT_DONE,
+ sizeof(struct tdb_ident), M_NOWAIT);
+ if (mtag == NULL) {
+ DPRINTF(("%s: could not get packet tag\n", __func__));
+ error = ENOMEM;
+ goto bad;
+ }
+
+ tdbi = (struct tdb_ident *)(mtag + 1);
+ tdbi->dst = saidx->dst;
+ tdbi->proto = saidx->proto;
+ tdbi->spi = sav->spi;
+ m_tag_prepend(m, mtag);
+
+ /*
+ * If there's another (bundled) SA to apply, do so.
+ * Note that this puts a burden on the kernel stack size.
+ * If this is a problem we'll need to introduce a queue
+ * to set the packet on so we can unwind the stack before
+ * doing further processing.
+ */
+ if (isr->next) {
+ V_ipsec4stat.ips_out_bundlesa++;
+ return ipsec4_process_packet(m, isr->next, 0, 0);
+ }
+ key_sa_recordxfer(sav, m); /* record data transfer */
+
+ /*
+ * We're done with IPsec processing, transmit the packet using the
+ * appropriate network protocol (IP or IPv6). SPD lookup will be
+ * performed again there.
+ */
+ switch (saidx->dst.sa.sa_family) {
+#ifdef INET
+ struct ip *ip;
+ case AF_INET:
+ ip = mtod(m, struct ip *);
+ ip->ip_len = ntohs(ip->ip_len);
+ ip->ip_off = ntohs(ip->ip_off);
+
+#ifdef IPSEC_NAT_T
+ /*
+ * If NAT-T is enabled, now that all IPsec processing is done
+ * insert UDP encapsulation header after IP header.
+ */
+ if (sav->natt_type) {
+#ifdef _IP_VHL
+ const int hlen = IP_VHL_HL(ip->ip_vhl);
+#else
+ const int hlen = (ip->ip_hl << 2);
+#endif
+ int size, off;
+ struct mbuf *mi;
+ struct udphdr *udp;
+
+ size = sizeof(struct udphdr);
+ if (sav->natt_type == UDP_ENCAP_ESPINUDP_NON_IKE) {
+ /*
+ * draft-ietf-ipsec-nat-t-ike-0[01].txt and
+ * draft-ietf-ipsec-udp-encaps-(00/)01.txt,
+ * ignoring possible AH mode
+ * non-IKE marker + non-ESP marker
+ * from draft-ietf-ipsec-udp-encaps-00.txt.
+ */
+ size += sizeof(u_int64_t);
+ }
+ mi = m_makespace(m, hlen, size, &off);
+ if (mi == NULL) {
+ DPRINTF(("%s: m_makespace for udphdr failed\n",
+ __func__));
+ error = ENOBUFS;
+ goto bad;
+ }
+
+ udp = (struct udphdr *)(mtod(mi, caddr_t) + off);
+ if (sav->natt_type == UDP_ENCAP_ESPINUDP_NON_IKE)
+ udp->uh_sport = htons(UDP_ENCAP_ESPINUDP_PORT);
+ else
+ udp->uh_sport =
+ KEY_PORTFROMSADDR(&sav->sah->saidx.src);
+ udp->uh_dport = KEY_PORTFROMSADDR(&sav->sah->saidx.dst);
+ udp->uh_sum = 0;
+ udp->uh_ulen = htons(m->m_pkthdr.len - hlen);
+ ip->ip_len = m->m_pkthdr.len;
+ ip->ip_p = IPPROTO_UDP;
+
+ if (sav->natt_type == UDP_ENCAP_ESPINUDP_NON_IKE)
+ *(u_int64_t *)(udp + 1) = 0;
+ }
+#endif /* IPSEC_NAT_T */
+
+ return ip_output(m, NULL, NULL, IP_RAWOUTPUT, NULL, NULL);
+#endif /* INET */
+#ifdef INET6
+ case AF_INET6:
+ /*
+ * We don't need massage, IPv6 header fields are always in
+ * net endian.
+ */
+ return ip6_output(m, NULL, NULL, 0, NULL, NULL, NULL);
+#endif /* INET6 */
+ }
+ panic("ipsec_process_done");
+bad:
+ m_freem(m);
+ KEY_FREESAV(&sav);
+ return (error);
+}
+
+static struct ipsecrequest *
+ipsec_nextisr(
+ struct mbuf *m,
+ struct ipsecrequest *isr,
+ int af,
+ struct secasindex *saidx,
+ int *error
+)
+{
+#define IPSEC_OSTAT(x,y,z) (isr->saidx.proto == IPPROTO_ESP ? (x)++ : \
+ isr->saidx.proto == IPPROTO_AH ? (y)++ : (z)++)
+ struct secasvar *sav;
+
+ IPSECREQUEST_LOCK_ASSERT(isr);
+
+ IPSEC_ASSERT(af == AF_INET || af == AF_INET6,
+ ("invalid address family %u", af));
+again:
+ /*
+ * Craft SA index to search for proper SA. Note that
+ * we only fillin unspecified SA peers for transport
+ * mode; for tunnel mode they must already be filled in.
+ */
+ *saidx = isr->saidx;
+ if (isr->saidx.mode == IPSEC_MODE_TRANSPORT) {
+ /* Fillin unspecified SA peers only for transport mode */
+ if (af == AF_INET) {
+ struct sockaddr_in *sin;
+ struct ip *ip = mtod(m, struct ip *);
+
+ if (saidx->src.sa.sa_len == 0) {
+ sin = &saidx->src.sin;
+ sin->sin_len = sizeof(*sin);
+ sin->sin_family = AF_INET;
+ sin->sin_port = IPSEC_PORT_ANY;
+ sin->sin_addr = ip->ip_src;
+ }
+ if (saidx->dst.sa.sa_len == 0) {
+ sin = &saidx->dst.sin;
+ sin->sin_len = sizeof(*sin);
+ sin->sin_family = AF_INET;
+ sin->sin_port = IPSEC_PORT_ANY;
+ sin->sin_addr = ip->ip_dst;
+ }
+ } else {
+ struct sockaddr_in6 *sin6;
+ struct ip6_hdr *ip6 = mtod(m, struct ip6_hdr *);
+
+ if (saidx->src.sin6.sin6_len == 0) {
+ sin6 = (struct sockaddr_in6 *)&saidx->src;
+ sin6->sin6_len = sizeof(*sin6);
+ sin6->sin6_family = AF_INET6;
+ sin6->sin6_port = IPSEC_PORT_ANY;
+ sin6->sin6_addr = ip6->ip6_src;
+ if (IN6_IS_SCOPE_LINKLOCAL(&ip6->ip6_src)) {
+ /* fix scope id for comparing SPD */
+ sin6->sin6_addr.s6_addr16[1] = 0;
+ sin6->sin6_scope_id =
+ ntohs(ip6->ip6_src.s6_addr16[1]);
+ }
+ }
+ if (saidx->dst.sin6.sin6_len == 0) {
+ sin6 = (struct sockaddr_in6 *)&saidx->dst;
+ sin6->sin6_len = sizeof(*sin6);
+ sin6->sin6_family = AF_INET6;
+ sin6->sin6_port = IPSEC_PORT_ANY;
+ sin6->sin6_addr = ip6->ip6_dst;
+ if (IN6_IS_SCOPE_LINKLOCAL(&ip6->ip6_dst)) {
+ /* fix scope id for comparing SPD */
+ sin6->sin6_addr.s6_addr16[1] = 0;
+ sin6->sin6_scope_id =
+ ntohs(ip6->ip6_dst.s6_addr16[1]);
+ }
+ }
+ }
+ }
+
+ /*
+ * Lookup SA and validate it.
+ */
+ *error = key_checkrequest(isr, saidx);
+ if (*error != 0) {
+ /*
+ * IPsec processing is required, but no SA found.
+ * I assume that key_acquire() had been called
+ * to get/establish the SA. Here I discard
+ * this packet because it is responsibility for
+ * upper layer to retransmit the packet.
+ */
+ V_ipsec4stat.ips_out_nosa++;
+ goto bad;
+ }
+ sav = isr->sav;
+ if (sav == NULL) {
+ IPSEC_ASSERT(ipsec_get_reqlevel(isr) == IPSEC_LEVEL_USE,
+ ("no SA found, but required; level %u",
+ ipsec_get_reqlevel(isr)));
+ IPSECREQUEST_UNLOCK(isr);
+ isr = isr->next;
+ /*
+ * If isr is NULL, we found a 'use' policy w/o SA.
+ * Return w/o error and w/o isr so we can drop out
+ * and continue w/o IPsec processing.
+ */
+ if (isr == NULL)
+ return isr;
+ IPSECREQUEST_LOCK(isr);
+ goto again;
+ }
+
+ /*
+ * Check system global policy controls.
+ */
+ if ((isr->saidx.proto == IPPROTO_ESP && !V_esp_enable) ||
+ (isr->saidx.proto == IPPROTO_AH && !V_ah_enable) ||
+ (isr->saidx.proto == IPPROTO_IPCOMP && !V_ipcomp_enable)) {
+ DPRINTF(("%s: IPsec outbound packet dropped due"
+ " to policy (check your sysctls)\n", __func__));
+ IPSEC_OSTAT(V_espstat.esps_pdrops, V_ahstat.ahs_pdrops,
+ V_ipcompstat.ipcomps_pdrops);
+ *error = EHOSTUNREACH;
+ goto bad;
+ }
+
+ /*
+ * Sanity check the SA contents for the caller
+ * before they invoke the xform output method.
+ */
+ if (sav->tdb_xform == NULL) {
+ DPRINTF(("%s: no transform for SA\n", __func__));
+ IPSEC_OSTAT(V_espstat.esps_noxform, V_ahstat.ahs_noxform,
+ V_ipcompstat.ipcomps_noxform);
+ *error = EHOSTUNREACH;
+ goto bad;
+ }
+ return isr;
+bad:
+ IPSEC_ASSERT(*error != 0, ("error return w/ no error code"));
+ IPSECREQUEST_UNLOCK(isr);
+ return NULL;
+#undef IPSEC_OSTAT
+}
+
+#ifdef INET
+/*
+ * IPsec output logic for IPv4.
+ */
+int
+ipsec4_process_packet(
+ struct mbuf *m,
+ struct ipsecrequest *isr,
+ int flags,
+ int tunalready)
+{
+ struct secasindex saidx;
+ struct secasvar *sav;
+ struct ip *ip;
+ int error, i, off;
+
+ IPSEC_ASSERT(m != NULL, ("null mbuf"));
+ IPSEC_ASSERT(isr != NULL, ("null isr"));
+
+ IPSECREQUEST_LOCK(isr); /* insure SA contents don't change */
+
+ isr = ipsec_nextisr(m, isr, AF_INET, &saidx, &error);
+ if (isr == NULL) {
+ if (error != 0)
+ goto bad;
+ return EJUSTRETURN;
+ }
+
+ sav = isr->sav;
+
+#ifdef DEV_ENC
+ encif->if_opackets++;
+ encif->if_obytes += m->m_pkthdr.len;
+
+ /* pass the mbuf to enc0 for bpf processing */
+ ipsec_bpf(m, sav, AF_INET, ENC_OUT|ENC_BEFORE);
+ /* pass the mbuf to enc0 for packet filtering */
+ if ((error = ipsec_filter(&m, PFIL_OUT, ENC_OUT|ENC_BEFORE)) != 0)
+ goto bad;
+#endif
+
+ if (!tunalready) {
+ union sockaddr_union *dst = &sav->sah->saidx.dst;
+ int setdf;
+
+ /*
+ * Collect IP_DF state from the outer header.
+ */
+ if (dst->sa.sa_family == AF_INET) {
+ if (m->m_len < sizeof (struct ip) &&
+ (m = m_pullup(m, sizeof (struct ip))) == NULL) {
+ error = ENOBUFS;
+ goto bad;
+ }
+ ip = mtod(m, struct ip *);
+ /* Honor system-wide control of how to handle IP_DF */
+ switch (V_ip4_ipsec_dfbit) {
+ case 0: /* clear in outer header */
+ case 1: /* set in outer header */
+ setdf = V_ip4_ipsec_dfbit;
+ break;
+ default: /* propagate to outer header */
+ setdf = ntohs(ip->ip_off & IP_DF);
+ break;
+ }
+ } else {
+ ip = NULL; /* keep compiler happy */
+ setdf = 0;
+ }
+ /* Do the appropriate encapsulation, if necessary */
+ if (isr->saidx.mode == IPSEC_MODE_TUNNEL || /* Tunnel requ'd */
+ dst->sa.sa_family != AF_INET || /* PF mismatch */
+#if 0
+ (sav->flags & SADB_X_SAFLAGS_TUNNEL) || /* Tunnel requ'd */
+ sav->tdb_xform->xf_type == XF_IP4 || /* ditto */
+#endif
+ (dst->sa.sa_family == AF_INET && /* Proxy */
+ dst->sin.sin_addr.s_addr != INADDR_ANY &&
+ dst->sin.sin_addr.s_addr != ip->ip_dst.s_addr)) {
+ struct mbuf *mp;
+
+ /* Fix IPv4 header checksum and length */
+ if (m->m_len < sizeof (struct ip) &&
+ (m = m_pullup(m, sizeof (struct ip))) == NULL) {
+ error = ENOBUFS;
+ goto bad;
+ }
+ ip = mtod(m, struct ip *);
+ ip->ip_len = htons(m->m_pkthdr.len);
+ ip->ip_sum = 0;
+#ifdef _IP_VHL
+ if (ip->ip_vhl == IP_VHL_BORING)
+ ip->ip_sum = in_cksum_hdr(ip);
+ else
+ ip->ip_sum = in_cksum(m,
+ _IP_VHL_HL(ip->ip_vhl) << 2);
+#else
+ ip->ip_sum = in_cksum(m, ip->ip_hl << 2);
+#endif
+
+ /* Encapsulate the packet */
+ error = ipip_output(m, isr, &mp, 0, 0);
+ if (mp == NULL && !error) {
+ /* Should never happen. */
+ DPRINTF(("%s: ipip_output returns no mbuf and "
+ "no error!", __func__));
+ error = EFAULT;
+ }
+ if (error) {
+ if (mp) {
+ /* XXX: Should never happen! */
+ m_freem(mp);
+ }
+ m = NULL; /* ipip_output() already freed it */
+ goto bad;
+ }
+ m = mp, mp = NULL;
+ /*
+ * ipip_output clears IP_DF in the new header. If
+ * we need to propagate IP_DF from the outer header,
+ * then we have to do it here.
+ *
+ * XXX shouldn't assume what ipip_output does.
+ */
+ if (dst->sa.sa_family == AF_INET && setdf) {
+ if (m->m_len < sizeof (struct ip) &&
+ (m = m_pullup(m, sizeof (struct ip))) == NULL) {
+ error = ENOBUFS;
+ goto bad;
+ }
+ ip = mtod(m, struct ip *);
+ ip->ip_off = ntohs(ip->ip_off);
+ ip->ip_off |= IP_DF;
+ ip->ip_off = htons(ip->ip_off);
+ }
+ }
+ }
+
+#ifdef DEV_ENC
+ /* pass the mbuf to enc0 for bpf processing */
+ ipsec_bpf(m, sav, AF_INET, ENC_OUT|ENC_AFTER);
+ /* pass the mbuf to enc0 for packet filtering */
+ if ((error = ipsec_filter(&m, PFIL_OUT, ENC_OUT|ENC_AFTER)) != 0)
+ goto bad;
+#endif
+
+ /*
+ * Dispatch to the appropriate IPsec transform logic. The
+ * packet will be returned for transmission after crypto
+ * processing, etc. are completed. For encapsulation we
+ * bypass this call because of the explicit call done above
+ * (necessary to deal with IP_DF handling for IPv4).
+ *
+ * NB: m & sav are ``passed to caller'' who's reponsible for
+ * for reclaiming their resources.
+ */
+ if (sav->tdb_xform->xf_type != XF_IP4) {
+ ip = mtod(m, struct ip *);
+ i = ip->ip_hl << 2;
+ off = offsetof(struct ip, ip_p);
+ error = (*sav->tdb_xform->xf_output)(m, isr, NULL, i, off);
+ } else {
+ error = ipsec_process_done(m, isr);
+ }
+ IPSECREQUEST_UNLOCK(isr);
+ return error;
+bad:
+ if (isr)
+ IPSECREQUEST_UNLOCK(isr);
+ if (m)
+ m_freem(m);
+ return error;
+}
+#endif
+
+#ifdef INET6
+/*
+ * Chop IP6 header from the payload.
+ */
+static struct mbuf *
+ipsec6_splithdr(struct mbuf *m)
+{
+ struct mbuf *mh;
+ struct ip6_hdr *ip6;
+ int hlen;
+
+ IPSEC_ASSERT(m->m_len >= sizeof (struct ip6_hdr),
+ ("first mbuf too short, len %u", m->m_len));
+ ip6 = mtod(m, struct ip6_hdr *);
+ hlen = sizeof(struct ip6_hdr);
+ if (m->m_len > hlen) {
+ MGETHDR(mh, M_DONTWAIT, MT_DATA);
+ if (!mh) {
+ m_freem(m);
+ return NULL;
+ }
+ M_MOVE_PKTHDR(mh, m);
+ MH_ALIGN(mh, hlen);
+ m->m_len -= hlen;
+ m->m_data += hlen;
+ mh->m_next = m;
+ m = mh;
+ m->m_len = hlen;
+ bcopy((caddr_t)ip6, mtod(m, caddr_t), hlen);
+ } else if (m->m_len < hlen) {
+ m = m_pullup(m, hlen);
+ if (!m)
+ return NULL;
+ }
+ return m;
+}
+
+/*
+ * IPsec output logic for IPv6, transport mode.
+ */
+int
+ipsec6_output_trans(
+ struct ipsec_output_state *state,
+ u_char *nexthdrp,
+ struct mbuf *mprev,
+ struct secpolicy *sp,
+ int flags,
+ int *tun)
+{
+ struct ipsecrequest *isr;
+ struct secasindex saidx;
+ int error = 0;
+ struct mbuf *m;
+
+ IPSEC_ASSERT(state != NULL, ("null state"));
+ IPSEC_ASSERT(state->m != NULL, ("null m"));
+ IPSEC_ASSERT(nexthdrp != NULL, ("null nexthdrp"));
+ IPSEC_ASSERT(mprev != NULL, ("null mprev"));
+ IPSEC_ASSERT(sp != NULL, ("null sp"));
+ IPSEC_ASSERT(tun != NULL, ("null tun"));
+
+ KEYDEBUG(KEYDEBUG_IPSEC_DATA,
+ printf("%s: applied SP\n", __func__);
+ kdebug_secpolicy(sp));
+
+ isr = sp->req;
+ if (isr->saidx.mode == IPSEC_MODE_TUNNEL) {
+ /* the rest will be handled by ipsec6_output_tunnel() */
+ *tun = 1; /* need tunnel-mode processing */
+ return 0;
+ }
+
+ *tun = 0;
+ m = state->m;
+
+ IPSECREQUEST_LOCK(isr); /* insure SA contents don't change */
+ isr = ipsec_nextisr(m, isr, AF_INET6, &saidx, &error);
+ if (isr == NULL) {
+ if (error != 0) {
+#ifdef notdef
+ /* XXX should notification be done for all errors ? */
+ /*
+ * Notify the fact that the packet is discarded
+ * to ourselves. I believe this is better than
+ * just silently discarding. (jinmei@kame.net)
+ * XXX: should we restrict the error to TCP packets?
+ * XXX: should we directly notify sockets via
+ * pfctlinputs?
+ */
+ icmp6_error(m, ICMP6_DST_UNREACH,
+ ICMP6_DST_UNREACH_ADMIN, 0);
+ m = NULL; /* NB: icmp6_error frees mbuf */
+#endif
+ goto bad;
+ }
+ return EJUSTRETURN;
+ }
+
+ error = (*isr->sav->tdb_xform->xf_output)(m, isr, NULL,
+ sizeof (struct ip6_hdr),
+ offsetof(struct ip6_hdr,
+ ip6_nxt));
+ IPSECREQUEST_UNLOCK(isr);
+ return error;
+bad:
+ if (isr)
+ IPSECREQUEST_UNLOCK(isr);
+ if (m)
+ m_freem(m);
+ state->m = NULL;
+ return error;
+}
+
+static int
+ipsec6_encapsulate(struct mbuf *m, struct secasvar *sav)
+{
+ struct ip6_hdr *oip6;
+ struct ip6_hdr *ip6;
+ size_t plen;
+
+ /* can't tunnel between different AFs */
+ if (sav->sah->saidx.src.sa.sa_family != AF_INET6 ||
+ sav->sah->saidx.dst.sa.sa_family != AF_INET6) {
+ m_freem(m);
+ return EINVAL;
+ }
+ IPSEC_ASSERT(m->m_len == sizeof (struct ip6_hdr),
+ ("mbuf wrong size; len %u", m->m_len));
+
+
+ /*
+ * grow the mbuf to accomodate the new IPv6 header.
+ */
+ plen = m->m_pkthdr.len;
+ if (M_LEADINGSPACE(m->m_next) < sizeof(struct ip6_hdr)) {
+ struct mbuf *n;
+ MGET(n, M_DONTWAIT, MT_DATA);
+ if (!n) {
+ m_freem(m);
+ return ENOBUFS;
+ }
+ n->m_len = sizeof(struct ip6_hdr);
+ n->m_next = m->m_next;
+ m->m_next = n;
+ m->m_pkthdr.len += sizeof(struct ip6_hdr);
+ oip6 = mtod(n, struct ip6_hdr *);
+ } else {
+ m->m_next->m_len += sizeof(struct ip6_hdr);
+ m->m_next->m_data -= sizeof(struct ip6_hdr);
+ m->m_pkthdr.len += sizeof(struct ip6_hdr);
+ oip6 = mtod(m->m_next, struct ip6_hdr *);
+ }
+ ip6 = mtod(m, struct ip6_hdr *);
+ bcopy((caddr_t)ip6, (caddr_t)oip6, sizeof(struct ip6_hdr));
+
+ /* Fake link-local scope-class addresses */
+ if (IN6_IS_SCOPE_LINKLOCAL(&oip6->ip6_src))
+ oip6->ip6_src.s6_addr16[1] = 0;
+ if (IN6_IS_SCOPE_LINKLOCAL(&oip6->ip6_dst))
+ oip6->ip6_dst.s6_addr16[1] = 0;
+
+ /* construct new IPv6 header. see RFC 2401 5.1.2.2 */
+ /* ECN consideration. */
+ ip6_ecn_ingress(V_ip6_ipsec_ecn, &ip6->ip6_flow, &oip6->ip6_flow);
+ if (plen < IPV6_MAXPACKET - sizeof(struct ip6_hdr))
+ ip6->ip6_plen = htons(plen);
+ else {
+ /* ip6->ip6_plen will be updated in ip6_output() */
+ }
+ ip6->ip6_nxt = IPPROTO_IPV6;
+ ip6->ip6_src = sav->sah->saidx.src.sin6.sin6_addr;
+ ip6->ip6_dst = sav->sah->saidx.dst.sin6.sin6_addr;
+ ip6->ip6_hlim = IPV6_DEFHLIM;
+
+ /* XXX Should ip6_src be updated later ? */
+
+ return 0;
+}
+
+/*
+ * IPsec output logic for IPv6, tunnel mode.
+ */
+int
+ipsec6_output_tunnel(struct ipsec_output_state *state, struct secpolicy *sp, int flags)
+{
+ struct ip6_hdr *ip6;
+ struct ipsecrequest *isr;
+ struct secasindex saidx;
+ int error;
+ struct sockaddr_in6 *dst6;
+ struct mbuf *m;
+
+ IPSEC_ASSERT(state != NULL, ("null state"));
+ IPSEC_ASSERT(state->m != NULL, ("null m"));
+ IPSEC_ASSERT(sp != NULL, ("null sp"));
+
+ KEYDEBUG(KEYDEBUG_IPSEC_DATA,
+ printf("%s: applied SP\n", __func__);
+ kdebug_secpolicy(sp));
+
+ m = state->m;
+ /*
+ * transport mode ipsec (before the 1st tunnel mode) is already
+ * processed by ipsec6_output_trans().
+ */
+ for (isr = sp->req; isr; isr = isr->next) {
+ if (isr->saidx.mode == IPSEC_MODE_TUNNEL)
+ break;
+ }
+
+ IPSECREQUEST_LOCK(isr); /* insure SA contents don't change */
+ isr = ipsec_nextisr(m, isr, AF_INET6, &saidx, &error);
+ if (isr == NULL) {
+ if (error != 0)
+ goto bad;
+ return EJUSTRETURN;
+ }
+
+#ifdef DEV_ENC
+ encif->if_opackets++;
+ encif->if_obytes += m->m_pkthdr.len;
+
+ /* pass the mbuf to enc0 for bpf processing */
+ ipsec_bpf(m, isr->sav, AF_INET6, ENC_OUT|ENC_BEFORE);
+ /* pass the mbuf to enc0 for packet filtering */
+ if ((error = ipsec_filter(&m, PFIL_OUT, ENC_OUT|ENC_BEFORE)) != 0)
+ goto bad;
+#endif
+
+ /*
+ * There may be the case that SA status will be changed when
+ * we are refering to one. So calling splsoftnet().
+ */
+ if (isr->saidx.mode == IPSEC_MODE_TUNNEL) {
+ /*
+ * build IPsec tunnel.
+ */
+ /* XXX should be processed with other familiy */
+ if (isr->sav->sah->saidx.src.sa.sa_family != AF_INET6) {
+ ipseclog((LOG_ERR, "%s: family mismatched between "
+ "inner and outer, spi=%u\n", __func__,
+ ntohl(isr->sav->spi)));
+ V_ipsec6stat.ips_out_inval++;
+ error = EAFNOSUPPORT;
+ goto bad;
+ }
+
+ m = ipsec6_splithdr(m);
+ if (!m) {
+ V_ipsec6stat.ips_out_nomem++;
+ error = ENOMEM;
+ goto bad;
+ }
+ error = ipsec6_encapsulate(m, isr->sav);
+ if (error) {
+ m = NULL;
+ goto bad;
+ }
+ ip6 = mtod(m, struct ip6_hdr *);
+
+ state->ro =
+ (struct route *)&isr->sav->sah->route_cache.sin6_route;
+ state->dst = (struct sockaddr *)&state->ro->ro_dst;
+ dst6 = (struct sockaddr_in6 *)state->dst;
+ if (state->ro->ro_rt
+ && ((state->ro->ro_rt->rt_flags & RTF_UP) == 0
+ || !IN6_ARE_ADDR_EQUAL(&dst6->sin6_addr, &ip6->ip6_dst))) {
+ RTFREE(state->ro->ro_rt);
+ state->ro->ro_rt = NULL;
+ }
+ if (state->ro->ro_rt == NULL) {
+ bzero(dst6, sizeof(*dst6));
+ dst6->sin6_family = AF_INET6;
+ dst6->sin6_len = sizeof(*dst6);
+ dst6->sin6_addr = ip6->ip6_dst;
+ rtalloc(state->ro);
+ }
+ if (state->ro->ro_rt == NULL) {
+ V_ip6stat.ip6s_noroute++;
+ V_ipsec6stat.ips_out_noroute++;
+ error = EHOSTUNREACH;
+ goto bad;
+ }
+
+ /* adjust state->dst if tunnel endpoint is offlink */
+ if (state->ro->ro_rt->rt_flags & RTF_GATEWAY)
+ state->dst = (struct sockaddr *)state->ro->ro_rt->rt_gateway;
+ }
+
+ m = ipsec6_splithdr(m);
+ if (!m) {
+ V_ipsec6stat.ips_out_nomem++;
+ error = ENOMEM;
+ goto bad;
+ }
+ ip6 = mtod(m, struct ip6_hdr *);
+
+#ifdef DEV_ENC
+ /* pass the mbuf to enc0 for bpf processing */
+ ipsec_bpf(m, isr->sav, AF_INET6, ENC_OUT|ENC_AFTER);
+ /* pass the mbuf to enc0 for packet filtering */
+ if ((error = ipsec_filter(&m, PFIL_OUT, ENC_OUT|ENC_AFTER)) != 0)
+ goto bad;
+#endif
+
+ error = (*isr->sav->tdb_xform->xf_output)(m, isr, NULL,
+ sizeof (struct ip6_hdr),
+ offsetof(struct ip6_hdr, ip6_nxt));
+ IPSECREQUEST_UNLOCK(isr);
+ return error;
+bad:
+ if (isr)
+ IPSECREQUEST_UNLOCK(isr);
+ if (m)
+ m_freem(m);
+ state->m = NULL;
+ return error;
+}
+#endif /*INET6*/
diff --git a/rtems/freebsd/netipsec/key.c b/rtems/freebsd/netipsec/key.c
new file mode 100644
index 00000000..0a18222a
--- /dev/null
+++ b/rtems/freebsd/netipsec/key.c
@@ -0,0 +1,8086 @@
+#include <rtems/freebsd/machine/rtems-bsd-config.h>
+
+/* $FreeBSD$ */
+/* $KAME: key.c,v 1.191 2001/06/27 10:46:49 sakane Exp $ */
+
+/*-
+ * Copyright (C) 1995, 1996, 1997, and 1998 WIDE Project.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the project nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+/*
+ * This code is referd to RFC 2367
+ */
+
+#include <rtems/freebsd/local/opt_inet.h>
+#include <rtems/freebsd/local/opt_inet6.h>
+#include <rtems/freebsd/local/opt_ipsec.h>
+
+#include <rtems/freebsd/sys/types.h>
+#include <rtems/freebsd/sys/param.h>
+#include <rtems/freebsd/sys/systm.h>
+#include <rtems/freebsd/sys/kernel.h>
+#include <rtems/freebsd/sys/lock.h>
+#include <rtems/freebsd/sys/mutex.h>
+#include <rtems/freebsd/sys/mbuf.h>
+#include <rtems/freebsd/sys/domain.h>
+#include <rtems/freebsd/sys/protosw.h>
+#include <rtems/freebsd/sys/malloc.h>
+#include <rtems/freebsd/sys/socket.h>
+#include <rtems/freebsd/sys/socketvar.h>
+#include <rtems/freebsd/sys/sysctl.h>
+#include <rtems/freebsd/sys/errno.h>
+#include <rtems/freebsd/sys/proc.h>
+#include <rtems/freebsd/sys/queue.h>
+#include <rtems/freebsd/sys/refcount.h>
+#include <rtems/freebsd/sys/syslog.h>
+
+#include <rtems/freebsd/net/if.h>
+#include <rtems/freebsd/net/route.h>
+#include <rtems/freebsd/net/raw_cb.h>
+#include <rtems/freebsd/net/vnet.h>
+
+#include <rtems/freebsd/netinet/in.h>
+#include <rtems/freebsd/netinet/in_systm.h>
+#include <rtems/freebsd/netinet/ip.h>
+#include <rtems/freebsd/netinet/in_var.h>
+
+#ifdef INET6
+#include <rtems/freebsd/netinet/ip6.h>
+#include <rtems/freebsd/netinet6/in6_var.h>
+#include <rtems/freebsd/netinet6/ip6_var.h>
+#endif /* INET6 */
+
+#ifdef INET
+#include <rtems/freebsd/netinet/in_pcb.h>
+#endif
+#ifdef INET6
+#include <rtems/freebsd/netinet6/in6_pcb.h>
+#endif /* INET6 */
+
+#include <rtems/freebsd/net/pfkeyv2.h>
+#include <rtems/freebsd/netipsec/keydb.h>
+#include <rtems/freebsd/netipsec/key.h>
+#include <rtems/freebsd/netipsec/keysock.h>
+#include <rtems/freebsd/netipsec/key_debug.h>
+
+#include <rtems/freebsd/netipsec/ipsec.h>
+#ifdef INET6
+#include <rtems/freebsd/netipsec/ipsec6.h>
+#endif
+
+#include <rtems/freebsd/netipsec/xform.h>
+
+#include <rtems/freebsd/machine/stdarg.h>
+
+/* randomness */
+#include <rtems/freebsd/sys/random.h>
+
+#define FULLMASK 0xff
+#define _BITS(bytes) ((bytes) << 3)
+
+/*
+ * Note on SA reference counting:
+ * - SAs that are not in DEAD state will have (total external reference + 1)
+ * following value in reference count field. they cannot be freed and are
+ * referenced from SA header.
+ * - SAs that are in DEAD state will have (total external reference)
+ * in reference count field. they are ready to be freed. reference from
+ * SA header will be removed in key_delsav(), when the reference count
+ * field hits 0 (= no external reference other than from SA header.
+ */
+
+VNET_DEFINE(u_int32_t, key_debug_level) = 0;
+static VNET_DEFINE(u_int, key_spi_trycnt) = 1000;
+static VNET_DEFINE(u_int32_t, key_spi_minval) = 0x100;
+static VNET_DEFINE(u_int32_t, key_spi_maxval) = 0x0fffffff; /* XXX */
+static VNET_DEFINE(u_int32_t, policy_id) = 0;
+/*interval to initialize randseed,1(m)*/
+static VNET_DEFINE(u_int, key_int_random) = 60;
+/* interval to expire acquiring, 30(s)*/
+static VNET_DEFINE(u_int, key_larval_lifetime) = 30;
+/* counter for blocking SADB_ACQUIRE.*/
+static VNET_DEFINE(int, key_blockacq_count) = 10;
+/* lifetime for blocking SADB_ACQUIRE.*/
+static VNET_DEFINE(int, key_blockacq_lifetime) = 20;
+/* preferred old sa rather than new sa.*/
+static VNET_DEFINE(int, key_preferred_oldsa) = 1;
+#define V_key_spi_trycnt VNET(key_spi_trycnt)
+#define V_key_spi_minval VNET(key_spi_minval)
+#define V_key_spi_maxval VNET(key_spi_maxval)
+#define V_policy_id VNET(policy_id)
+#define V_key_int_random VNET(key_int_random)
+#define V_key_larval_lifetime VNET(key_larval_lifetime)
+#define V_key_blockacq_count VNET(key_blockacq_count)
+#define V_key_blockacq_lifetime VNET(key_blockacq_lifetime)
+#define V_key_preferred_oldsa VNET(key_preferred_oldsa)
+
+static VNET_DEFINE(u_int32_t, acq_seq) = 0;
+#define V_acq_seq VNET(acq_seq)
+
+ /* SPD */
+static VNET_DEFINE(LIST_HEAD(_sptree, secpolicy), sptree[IPSEC_DIR_MAX]);
+#define V_sptree VNET(sptree)
+static struct mtx sptree_lock;
+#define SPTREE_LOCK_INIT() \
+ mtx_init(&sptree_lock, "sptree", \
+ "fast ipsec security policy database", MTX_DEF)
+#define SPTREE_LOCK_DESTROY() mtx_destroy(&sptree_lock)
+#define SPTREE_LOCK() mtx_lock(&sptree_lock)
+#define SPTREE_UNLOCK() mtx_unlock(&sptree_lock)
+#define SPTREE_LOCK_ASSERT() mtx_assert(&sptree_lock, MA_OWNED)
+
+static VNET_DEFINE(LIST_HEAD(_sahtree, secashead), sahtree); /* SAD */
+#define V_sahtree VNET(sahtree)
+static struct mtx sahtree_lock;
+#define SAHTREE_LOCK_INIT() \
+ mtx_init(&sahtree_lock, "sahtree", \
+ "fast ipsec security association database", MTX_DEF)
+#define SAHTREE_LOCK_DESTROY() mtx_destroy(&sahtree_lock)
+#define SAHTREE_LOCK() mtx_lock(&sahtree_lock)
+#define SAHTREE_UNLOCK() mtx_unlock(&sahtree_lock)
+#define SAHTREE_LOCK_ASSERT() mtx_assert(&sahtree_lock, MA_OWNED)
+
+ /* registed list */
+static VNET_DEFINE(LIST_HEAD(_regtree, secreg), regtree[SADB_SATYPE_MAX + 1]);
+#define V_regtree VNET(regtree)
+static struct mtx regtree_lock;
+#define REGTREE_LOCK_INIT() \
+ mtx_init(&regtree_lock, "regtree", "fast ipsec regtree", MTX_DEF)
+#define REGTREE_LOCK_DESTROY() mtx_destroy(&regtree_lock)
+#define REGTREE_LOCK() mtx_lock(&regtree_lock)
+#define REGTREE_UNLOCK() mtx_unlock(&regtree_lock)
+#define REGTREE_LOCK_ASSERT() mtx_assert(&regtree_lock, MA_OWNED)
+
+static VNET_DEFINE(LIST_HEAD(_acqtree, secacq), acqtree); /* acquiring list */
+#define V_acqtree VNET(acqtree)
+static struct mtx acq_lock;
+#define ACQ_LOCK_INIT() \
+ mtx_init(&acq_lock, "acqtree", "fast ipsec acquire list", MTX_DEF)
+#define ACQ_LOCK_DESTROY() mtx_destroy(&acq_lock)
+#define ACQ_LOCK() mtx_lock(&acq_lock)
+#define ACQ_UNLOCK() mtx_unlock(&acq_lock)
+#define ACQ_LOCK_ASSERT() mtx_assert(&acq_lock, MA_OWNED)
+
+ /* SP acquiring list */
+static VNET_DEFINE(LIST_HEAD(_spacqtree, secspacq), spacqtree);
+#define V_spacqtree VNET(spacqtree)
+static struct mtx spacq_lock;
+#define SPACQ_LOCK_INIT() \
+ mtx_init(&spacq_lock, "spacqtree", \
+ "fast ipsec security policy acquire list", MTX_DEF)
+#define SPACQ_LOCK_DESTROY() mtx_destroy(&spacq_lock)
+#define SPACQ_LOCK() mtx_lock(&spacq_lock)
+#define SPACQ_UNLOCK() mtx_unlock(&spacq_lock)
+#define SPACQ_LOCK_ASSERT() mtx_assert(&spacq_lock, MA_OWNED)
+
+/* search order for SAs */
+static const u_int saorder_state_valid_prefer_old[] = {
+ SADB_SASTATE_DYING, SADB_SASTATE_MATURE,
+};
+static const u_int saorder_state_valid_prefer_new[] = {
+ SADB_SASTATE_MATURE, SADB_SASTATE_DYING,
+};
+static const u_int saorder_state_alive[] = {
+ /* except DEAD */
+ SADB_SASTATE_MATURE, SADB_SASTATE_DYING, SADB_SASTATE_LARVAL
+};
+static const u_int saorder_state_any[] = {
+ SADB_SASTATE_MATURE, SADB_SASTATE_DYING,
+ SADB_SASTATE_LARVAL, SADB_SASTATE_DEAD
+};
+
+static const int minsize[] = {
+ sizeof(struct sadb_msg), /* SADB_EXT_RESERVED */
+ sizeof(struct sadb_sa), /* SADB_EXT_SA */
+ sizeof(struct sadb_lifetime), /* SADB_EXT_LIFETIME_CURRENT */
+ sizeof(struct sadb_lifetime), /* SADB_EXT_LIFETIME_HARD */
+ sizeof(struct sadb_lifetime), /* SADB_EXT_LIFETIME_SOFT */
+ sizeof(struct sadb_address), /* SADB_EXT_ADDRESS_SRC */
+ sizeof(struct sadb_address), /* SADB_EXT_ADDRESS_DST */
+ sizeof(struct sadb_address), /* SADB_EXT_ADDRESS_PROXY */
+ sizeof(struct sadb_key), /* SADB_EXT_KEY_AUTH */
+ sizeof(struct sadb_key), /* SADB_EXT_KEY_ENCRYPT */
+ sizeof(struct sadb_ident), /* SADB_EXT_IDENTITY_SRC */
+ sizeof(struct sadb_ident), /* SADB_EXT_IDENTITY_DST */
+ sizeof(struct sadb_sens), /* SADB_EXT_SENSITIVITY */
+ sizeof(struct sadb_prop), /* SADB_EXT_PROPOSAL */
+ sizeof(struct sadb_supported), /* SADB_EXT_SUPPORTED_AUTH */
+ sizeof(struct sadb_supported), /* SADB_EXT_SUPPORTED_ENCRYPT */
+ sizeof(struct sadb_spirange), /* SADB_EXT_SPIRANGE */
+ 0, /* SADB_X_EXT_KMPRIVATE */
+ sizeof(struct sadb_x_policy), /* SADB_X_EXT_POLICY */
+ sizeof(struct sadb_x_sa2), /* SADB_X_SA2 */
+ sizeof(struct sadb_x_nat_t_type),/* SADB_X_EXT_NAT_T_TYPE */
+ sizeof(struct sadb_x_nat_t_port),/* SADB_X_EXT_NAT_T_SPORT */
+ sizeof(struct sadb_x_nat_t_port),/* SADB_X_EXT_NAT_T_DPORT */
+ sizeof(struct sadb_address), /* SADB_X_EXT_NAT_T_OAI */
+ sizeof(struct sadb_address), /* SADB_X_EXT_NAT_T_OAR */
+ sizeof(struct sadb_x_nat_t_frag),/* SADB_X_EXT_NAT_T_FRAG */
+};
+static const int maxsize[] = {
+ sizeof(struct sadb_msg), /* SADB_EXT_RESERVED */
+ sizeof(struct sadb_sa), /* SADB_EXT_SA */
+ sizeof(struct sadb_lifetime), /* SADB_EXT_LIFETIME_CURRENT */
+ sizeof(struct sadb_lifetime), /* SADB_EXT_LIFETIME_HARD */
+ sizeof(struct sadb_lifetime), /* SADB_EXT_LIFETIME_SOFT */
+ 0, /* SADB_EXT_ADDRESS_SRC */
+ 0, /* SADB_EXT_ADDRESS_DST */
+ 0, /* SADB_EXT_ADDRESS_PROXY */
+ 0, /* SADB_EXT_KEY_AUTH */
+ 0, /* SADB_EXT_KEY_ENCRYPT */
+ 0, /* SADB_EXT_IDENTITY_SRC */
+ 0, /* SADB_EXT_IDENTITY_DST */
+ 0, /* SADB_EXT_SENSITIVITY */
+ 0, /* SADB_EXT_PROPOSAL */
+ 0, /* SADB_EXT_SUPPORTED_AUTH */
+ 0, /* SADB_EXT_SUPPORTED_ENCRYPT */
+ sizeof(struct sadb_spirange), /* SADB_EXT_SPIRANGE */
+ 0, /* SADB_X_EXT_KMPRIVATE */
+ 0, /* SADB_X_EXT_POLICY */
+ sizeof(struct sadb_x_sa2), /* SADB_X_SA2 */
+ sizeof(struct sadb_x_nat_t_type),/* SADB_X_EXT_NAT_T_TYPE */
+ sizeof(struct sadb_x_nat_t_port),/* SADB_X_EXT_NAT_T_SPORT */
+ sizeof(struct sadb_x_nat_t_port),/* SADB_X_EXT_NAT_T_DPORT */
+ 0, /* SADB_X_EXT_NAT_T_OAI */
+ 0, /* SADB_X_EXT_NAT_T_OAR */
+ sizeof(struct sadb_x_nat_t_frag),/* SADB_X_EXT_NAT_T_FRAG */
+};
+
+static VNET_DEFINE(int, ipsec_esp_keymin) = 256;
+static VNET_DEFINE(int, ipsec_esp_auth) = 0;
+static VNET_DEFINE(int, ipsec_ah_keymin) = 128;
+
+#define V_ipsec_esp_keymin VNET(ipsec_esp_keymin)
+#define V_ipsec_esp_auth VNET(ipsec_esp_auth)
+#define V_ipsec_ah_keymin VNET(ipsec_ah_keymin)
+
+#ifdef SYSCTL_DECL
+SYSCTL_DECL(_net_key);
+#endif
+
+SYSCTL_VNET_INT(_net_key, KEYCTL_DEBUG_LEVEL, debug,
+ CTLFLAG_RW, &VNET_NAME(key_debug_level), 0, "");
+
+/* max count of trial for the decision of spi value */
+SYSCTL_VNET_INT(_net_key, KEYCTL_SPI_TRY, spi_trycnt,
+ CTLFLAG_RW, &VNET_NAME(key_spi_trycnt), 0, "");
+
+/* minimum spi value to allocate automatically. */
+SYSCTL_VNET_INT(_net_key, KEYCTL_SPI_MIN_VALUE,
+ spi_minval, CTLFLAG_RW, &VNET_NAME(key_spi_minval), 0, "");
+
+/* maximun spi value to allocate automatically. */
+SYSCTL_VNET_INT(_net_key, KEYCTL_SPI_MAX_VALUE,
+ spi_maxval, CTLFLAG_RW, &VNET_NAME(key_spi_maxval), 0, "");
+
+/* interval to initialize randseed */
+SYSCTL_VNET_INT(_net_key, KEYCTL_RANDOM_INT,
+ int_random, CTLFLAG_RW, &VNET_NAME(key_int_random), 0, "");
+
+/* lifetime for larval SA */
+SYSCTL_VNET_INT(_net_key, KEYCTL_LARVAL_LIFETIME,
+ larval_lifetime, CTLFLAG_RW, &VNET_NAME(key_larval_lifetime), 0, "");
+
+/* counter for blocking to send SADB_ACQUIRE to IKEd */
+SYSCTL_VNET_INT(_net_key, KEYCTL_BLOCKACQ_COUNT,
+ blockacq_count, CTLFLAG_RW, &VNET_NAME(key_blockacq_count), 0, "");
+
+/* lifetime for blocking to send SADB_ACQUIRE to IKEd */
+SYSCTL_VNET_INT(_net_key, KEYCTL_BLOCKACQ_LIFETIME,
+ blockacq_lifetime, CTLFLAG_RW, &VNET_NAME(key_blockacq_lifetime), 0, "");
+
+/* ESP auth */
+SYSCTL_VNET_INT(_net_key, KEYCTL_ESP_AUTH, esp_auth,
+ CTLFLAG_RW, &VNET_NAME(ipsec_esp_auth), 0, "");
+
+/* minimum ESP key length */
+SYSCTL_VNET_INT(_net_key, KEYCTL_ESP_KEYMIN,
+ esp_keymin, CTLFLAG_RW, &VNET_NAME(ipsec_esp_keymin), 0, "");
+
+/* minimum AH key length */
+SYSCTL_VNET_INT(_net_key, KEYCTL_AH_KEYMIN, ah_keymin,
+ CTLFLAG_RW, &VNET_NAME(ipsec_ah_keymin), 0, "");
+
+/* perfered old SA rather than new SA */
+SYSCTL_VNET_INT(_net_key, KEYCTL_PREFERED_OLDSA,
+ preferred_oldsa, CTLFLAG_RW, &VNET_NAME(key_preferred_oldsa), 0, "");
+
+#define __LIST_CHAINED(elm) \
+ (!((elm)->chain.le_next == NULL && (elm)->chain.le_prev == NULL))
+#define LIST_INSERT_TAIL(head, elm, type, field) \
+do {\
+ struct type *curelm = LIST_FIRST(head); \
+ if (curelm == NULL) {\
+ LIST_INSERT_HEAD(head, elm, field); \
+ } else { \
+ while (LIST_NEXT(curelm, field)) \
+ curelm = LIST_NEXT(curelm, field);\
+ LIST_INSERT_AFTER(curelm, elm, field);\
+ }\
+} while (0)
+
+#define KEY_CHKSASTATE(head, sav, name) \
+do { \
+ if ((head) != (sav)) { \
+ ipseclog((LOG_DEBUG, "%s: state mismatched (TREE=%d SA=%d)\n", \
+ (name), (head), (sav))); \
+ continue; \
+ } \
+} while (0)
+
+#define KEY_CHKSPDIR(head, sp, name) \
+do { \
+ if ((head) != (sp)) { \
+ ipseclog((LOG_DEBUG, "%s: direction mismatched (TREE=%d SP=%d), " \
+ "anyway continue.\n", \
+ (name), (head), (sp))); \
+ } \
+} while (0)
+
+MALLOC_DEFINE(M_IPSEC_SA, "secasvar", "ipsec security association");
+MALLOC_DEFINE(M_IPSEC_SAH, "sahead", "ipsec sa head");
+MALLOC_DEFINE(M_IPSEC_SP, "ipsecpolicy", "ipsec security policy");
+MALLOC_DEFINE(M_IPSEC_SR, "ipsecrequest", "ipsec security request");
+MALLOC_DEFINE(M_IPSEC_MISC, "ipsec-misc", "ipsec miscellaneous");
+MALLOC_DEFINE(M_IPSEC_SAQ, "ipsec-saq", "ipsec sa acquire");
+MALLOC_DEFINE(M_IPSEC_SAR, "ipsec-reg", "ipsec sa acquire");
+
+/*
+ * set parameters into secpolicyindex buffer.
+ * Must allocate secpolicyindex buffer passed to this function.
+ */
+#define KEY_SETSECSPIDX(_dir, s, d, ps, pd, ulp, idx) \
+do { \
+ bzero((idx), sizeof(struct secpolicyindex)); \
+ (idx)->dir = (_dir); \
+ (idx)->prefs = (ps); \
+ (idx)->prefd = (pd); \
+ (idx)->ul_proto = (ulp); \
+ bcopy((s), &(idx)->src, ((const struct sockaddr *)(s))->sa_len); \
+ bcopy((d), &(idx)->dst, ((const struct sockaddr *)(d))->sa_len); \
+} while (0)
+
+/*
+ * set parameters into secasindex buffer.
+ * Must allocate secasindex buffer before calling this function.
+ */
+#define KEY_SETSECASIDX(p, m, r, s, d, idx) \
+do { \
+ bzero((idx), sizeof(struct secasindex)); \
+ (idx)->proto = (p); \
+ (idx)->mode = (m); \
+ (idx)->reqid = (r); \
+ bcopy((s), &(idx)->src, ((const struct sockaddr *)(s))->sa_len); \
+ bcopy((d), &(idx)->dst, ((const struct sockaddr *)(d))->sa_len); \
+} while (0)
+
+/* key statistics */
+struct _keystat {
+ u_long getspi_count; /* the avarage of count to try to get new SPI */
+} keystat;
+
+struct sadb_msghdr {
+ struct sadb_msg *msg;
+ struct sadb_ext *ext[SADB_EXT_MAX + 1];
+ int extoff[SADB_EXT_MAX + 1];
+ int extlen[SADB_EXT_MAX + 1];
+};
+
+static struct secasvar *key_allocsa_policy __P((const struct secasindex *));
+static void key_freesp_so __P((struct secpolicy **));
+static struct secasvar *key_do_allocsa_policy __P((struct secashead *, u_int));
+static void key_delsp __P((struct secpolicy *));
+static struct secpolicy *key_getsp __P((struct secpolicyindex *));
+static void _key_delsp(struct secpolicy *sp);
+static struct secpolicy *key_getspbyid __P((u_int32_t));
+static u_int32_t key_newreqid __P((void));
+static struct mbuf *key_gather_mbuf __P((struct mbuf *,
+ const struct sadb_msghdr *, int, int, ...));
+static int key_spdadd __P((struct socket *, struct mbuf *,
+ const struct sadb_msghdr *));
+static u_int32_t key_getnewspid __P((void));
+static int key_spddelete __P((struct socket *, struct mbuf *,
+ const struct sadb_msghdr *));
+static int key_spddelete2 __P((struct socket *, struct mbuf *,
+ const struct sadb_msghdr *));
+static int key_spdget __P((struct socket *, struct mbuf *,
+ const struct sadb_msghdr *));
+static int key_spdflush __P((struct socket *, struct mbuf *,
+ const struct sadb_msghdr *));
+static int key_spddump __P((struct socket *, struct mbuf *,
+ const struct sadb_msghdr *));
+static struct mbuf *key_setdumpsp __P((struct secpolicy *,
+ u_int8_t, u_int32_t, u_int32_t));
+static u_int key_getspreqmsglen __P((struct secpolicy *));
+static int key_spdexpire __P((struct secpolicy *));
+static struct secashead *key_newsah __P((struct secasindex *));
+static void key_delsah __P((struct secashead *));
+static struct secasvar *key_newsav __P((struct mbuf *,
+ const struct sadb_msghdr *, struct secashead *, int *,
+ const char*, int));
+#define KEY_NEWSAV(m, sadb, sah, e) \
+ key_newsav(m, sadb, sah, e, __FILE__, __LINE__)
+static void key_delsav __P((struct secasvar *));
+static struct secashead *key_getsah __P((struct secasindex *));
+static struct secasvar *key_checkspidup __P((struct secasindex *, u_int32_t));
+static struct secasvar *key_getsavbyspi __P((struct secashead *, u_int32_t));
+static int key_setsaval __P((struct secasvar *, struct mbuf *,
+ const struct sadb_msghdr *));
+static int key_mature __P((struct secasvar *));
+static struct mbuf *key_setdumpsa __P((struct secasvar *, u_int8_t,
+ u_int8_t, u_int32_t, u_int32_t));
+static struct mbuf *key_setsadbmsg __P((u_int8_t, u_int16_t, u_int8_t,
+ u_int32_t, pid_t, u_int16_t));
+static struct mbuf *key_setsadbsa __P((struct secasvar *));
+static struct mbuf *key_setsadbaddr __P((u_int16_t,
+ const struct sockaddr *, u_int8_t, u_int16_t));
+#ifdef IPSEC_NAT_T
+static struct mbuf *key_setsadbxport(u_int16_t, u_int16_t);
+static struct mbuf *key_setsadbxtype(u_int16_t);
+#endif
+static void key_porttosaddr(struct sockaddr *, u_int16_t);
+#define KEY_PORTTOSADDR(saddr, port) \
+ key_porttosaddr((struct sockaddr *)(saddr), (port))
+static struct mbuf *key_setsadbxsa2 __P((u_int8_t, u_int32_t, u_int32_t));
+static struct mbuf *key_setsadbxpolicy __P((u_int16_t, u_int8_t,
+ u_int32_t));
+static struct seckey *key_dup_keymsg(const struct sadb_key *, u_int,
+ struct malloc_type *);
+static struct seclifetime *key_dup_lifemsg(const struct sadb_lifetime *src,
+ struct malloc_type *type);
+#ifdef INET6
+static int key_ismyaddr6 __P((struct sockaddr_in6 *));
+#endif
+
+/* flags for key_cmpsaidx() */
+#define CMP_HEAD 1 /* protocol, addresses. */
+#define CMP_MODE_REQID 2 /* additionally HEAD, reqid, mode. */
+#define CMP_REQID 3 /* additionally HEAD, reaid. */
+#define CMP_EXACTLY 4 /* all elements. */
+static int key_cmpsaidx
+ __P((const struct secasindex *, const struct secasindex *, int));
+
+static int key_cmpspidx_exactly
+ __P((struct secpolicyindex *, struct secpolicyindex *));
+static int key_cmpspidx_withmask
+ __P((struct secpolicyindex *, struct secpolicyindex *));
+static int key_sockaddrcmp __P((const struct sockaddr *, const struct sockaddr *, int));
+static int key_bbcmp __P((const void *, const void *, u_int));
+static u_int16_t key_satype2proto __P((u_int8_t));
+static u_int8_t key_proto2satype __P((u_int16_t));
+
+static int key_getspi __P((struct socket *, struct mbuf *,
+ const struct sadb_msghdr *));
+static u_int32_t key_do_getnewspi __P((struct sadb_spirange *,
+ struct secasindex *));
+static int key_update __P((struct socket *, struct mbuf *,
+ const struct sadb_msghdr *));
+#ifdef IPSEC_DOSEQCHECK
+static struct secasvar *key_getsavbyseq __P((struct secashead *, u_int32_t));
+#endif
+static int key_add __P((struct socket *, struct mbuf *,
+ const struct sadb_msghdr *));
+static int key_setident __P((struct secashead *, struct mbuf *,
+ const struct sadb_msghdr *));
+static struct mbuf *key_getmsgbuf_x1 __P((struct mbuf *,
+ const struct sadb_msghdr *));
+static int key_delete __P((struct socket *, struct mbuf *,
+ const struct sadb_msghdr *));
+static int key_get __P((struct socket *, struct mbuf *,
+ const struct sadb_msghdr *));
+
+static void key_getcomb_setlifetime __P((struct sadb_comb *));
+static struct mbuf *key_getcomb_esp __P((void));
+static struct mbuf *key_getcomb_ah __P((void));
+static struct mbuf *key_getcomb_ipcomp __P((void));
+static struct mbuf *key_getprop __P((const struct secasindex *));
+
+static int key_acquire __P((const struct secasindex *, struct secpolicy *));
+static struct secacq *key_newacq __P((const struct secasindex *));
+static struct secacq *key_getacq __P((const struct secasindex *));
+static struct secacq *key_getacqbyseq __P((u_int32_t));
+static struct secspacq *key_newspacq __P((struct secpolicyindex *));
+static struct secspacq *key_getspacq __P((struct secpolicyindex *));
+static int key_acquire2 __P((struct socket *, struct mbuf *,
+ const struct sadb_msghdr *));
+static int key_register __P((struct socket *, struct mbuf *,
+ const struct sadb_msghdr *));
+static int key_expire __P((struct secasvar *));
+static int key_flush __P((struct socket *, struct mbuf *,
+ const struct sadb_msghdr *));
+static int key_dump __P((struct socket *, struct mbuf *,
+ const struct sadb_msghdr *));
+static int key_promisc __P((struct socket *, struct mbuf *,
+ const struct sadb_msghdr *));
+static int key_senderror __P((struct socket *, struct mbuf *, int));
+static int key_validate_ext __P((const struct sadb_ext *, int));
+static int key_align __P((struct mbuf *, struct sadb_msghdr *));
+static struct mbuf *key_setlifetime(struct seclifetime *src,
+ u_int16_t exttype);
+static struct mbuf *key_setkey(struct seckey *src, u_int16_t exttype);
+
+#if 0
+static const char *key_getfqdn __P((void));
+static const char *key_getuserfqdn __P((void));
+#endif
+static void key_sa_chgstate __P((struct secasvar *, u_int8_t));
+static struct mbuf *key_alloc_mbuf __P((int));
+
+static __inline void
+sa_initref(struct secasvar *sav)
+{
+
+ refcount_init(&sav->refcnt, 1);
+}
+static __inline void
+sa_addref(struct secasvar *sav)
+{
+
+ refcount_acquire(&sav->refcnt);
+ IPSEC_ASSERT(sav->refcnt != 0, ("SA refcnt overflow"));
+}
+static __inline int
+sa_delref(struct secasvar *sav)
+{
+
+ IPSEC_ASSERT(sav->refcnt > 0, ("SA refcnt underflow"));
+ return (refcount_release(&sav->refcnt));
+}
+
+#define SP_ADDREF(p) do { \
+ (p)->refcnt++; \
+ IPSEC_ASSERT((p)->refcnt != 0, ("SP refcnt overflow")); \
+} while (0)
+#define SP_DELREF(p) do { \
+ IPSEC_ASSERT((p)->refcnt > 0, ("SP refcnt underflow")); \
+ (p)->refcnt--; \
+} while (0)
+
+
+/*
+ * Update the refcnt while holding the SPTREE lock.
+ */
+void
+key_addref(struct secpolicy *sp)
+{
+ SPTREE_LOCK();
+ SP_ADDREF(sp);
+ SPTREE_UNLOCK();
+}
+
+/*
+ * Return 0 when there are known to be no SP's for the specified
+ * direction. Otherwise return 1. This is used by IPsec code
+ * to optimize performance.
+ */
+int
+key_havesp(u_int dir)
+{
+
+ return (dir == IPSEC_DIR_INBOUND || dir == IPSEC_DIR_OUTBOUND ?
+ LIST_FIRST(&V_sptree[dir]) != NULL : 1);
+}
+
+/* %%% IPsec policy management */
+/*
+ * allocating a SP for OUTBOUND or INBOUND packet.
+ * Must call key_freesp() later.
+ * OUT: NULL: not found
+ * others: found and return the pointer.
+ */
+struct secpolicy *
+key_allocsp(struct secpolicyindex *spidx, u_int dir, const char* where, int tag)
+{
+ struct secpolicy *sp;
+
+ IPSEC_ASSERT(spidx != NULL, ("null spidx"));
+ IPSEC_ASSERT(dir == IPSEC_DIR_INBOUND || dir == IPSEC_DIR_OUTBOUND,
+ ("invalid direction %u", dir));
+
+ KEYDEBUG(KEYDEBUG_IPSEC_STAMP,
+ printf("DP %s from %s:%u\n", __func__, where, tag));
+
+ /* get a SP entry */
+ KEYDEBUG(KEYDEBUG_IPSEC_DATA,
+ printf("*** objects\n");
+ kdebug_secpolicyindex(spidx));
+
+ SPTREE_LOCK();
+ LIST_FOREACH(sp, &V_sptree[dir], chain) {
+ KEYDEBUG(KEYDEBUG_IPSEC_DATA,
+ printf("*** in SPD\n");
+ kdebug_secpolicyindex(&sp->spidx));
+
+ if (sp->state == IPSEC_SPSTATE_DEAD)
+ continue;
+ if (key_cmpspidx_withmask(&sp->spidx, spidx))
+ goto found;
+ }
+ sp = NULL;
+found:
+ if (sp) {
+ /* sanity check */
+ KEY_CHKSPDIR(sp->spidx.dir, dir, __func__);
+
+ /* found a SPD entry */
+ sp->lastused = time_second;
+ SP_ADDREF(sp);
+ }
+ SPTREE_UNLOCK();
+
+ KEYDEBUG(KEYDEBUG_IPSEC_STAMP,
+ printf("DP %s return SP:%p (ID=%u) refcnt %u\n", __func__,
+ sp, sp ? sp->id : 0, sp ? sp->refcnt : 0));
+ return sp;
+}
+
+/*
+ * allocating a SP for OUTBOUND or INBOUND packet.
+ * Must call key_freesp() later.
+ * OUT: NULL: not found
+ * others: found and return the pointer.
+ */
+struct secpolicy *
+key_allocsp2(u_int32_t spi,
+ union sockaddr_union *dst,
+ u_int8_t proto,
+ u_int dir,
+ const char* where, int tag)
+{
+ struct secpolicy *sp;
+
+ IPSEC_ASSERT(dst != NULL, ("null dst"));
+ IPSEC_ASSERT(dir == IPSEC_DIR_INBOUND || dir == IPSEC_DIR_OUTBOUND,
+ ("invalid direction %u", dir));
+
+ KEYDEBUG(KEYDEBUG_IPSEC_STAMP,
+ printf("DP %s from %s:%u\n", __func__, where, tag));
+
+ /* get a SP entry */
+ KEYDEBUG(KEYDEBUG_IPSEC_DATA,
+ printf("*** objects\n");
+ printf("spi %u proto %u dir %u\n", spi, proto, dir);
+ kdebug_sockaddr(&dst->sa));
+
+ SPTREE_LOCK();
+ LIST_FOREACH(sp, &V_sptree[dir], chain) {
+ KEYDEBUG(KEYDEBUG_IPSEC_DATA,
+ printf("*** in SPD\n");
+ kdebug_secpolicyindex(&sp->spidx));
+
+ if (sp->state == IPSEC_SPSTATE_DEAD)
+ continue;
+ /* compare simple values, then dst address */
+ if (sp->spidx.ul_proto != proto)
+ continue;
+ /* NB: spi's must exist and match */
+ if (!sp->req || !sp->req->sav || sp->req->sav->spi != spi)
+ continue;
+ if (key_sockaddrcmp(&sp->spidx.dst.sa, &dst->sa, 1) == 0)
+ goto found;
+ }
+ sp = NULL;
+found:
+ if (sp) {
+ /* sanity check */
+ KEY_CHKSPDIR(sp->spidx.dir, dir, __func__);
+
+ /* found a SPD entry */
+ sp->lastused = time_second;
+ SP_ADDREF(sp);
+ }
+ SPTREE_UNLOCK();
+
+ KEYDEBUG(KEYDEBUG_IPSEC_STAMP,
+ printf("DP %s return SP:%p (ID=%u) refcnt %u\n", __func__,
+ sp, sp ? sp->id : 0, sp ? sp->refcnt : 0));
+ return sp;
+}
+
+#if 0
+/*
+ * return a policy that matches this particular inbound packet.
+ * XXX slow
+ */
+struct secpolicy *
+key_gettunnel(const struct sockaddr *osrc,
+ const struct sockaddr *odst,
+ const struct sockaddr *isrc,
+ const struct sockaddr *idst,
+ const char* where, int tag)
+{
+ struct secpolicy *sp;
+ const int dir = IPSEC_DIR_INBOUND;
+ struct ipsecrequest *r1, *r2, *p;
+ struct secpolicyindex spidx;
+
+ KEYDEBUG(KEYDEBUG_IPSEC_STAMP,
+ printf("DP %s from %s:%u\n", __func__, where, tag));
+
+ if (isrc->sa_family != idst->sa_family) {
+ ipseclog((LOG_ERR, "%s: protocol family mismatched %d != %d\n.",
+ __func__, isrc->sa_family, idst->sa_family));
+ sp = NULL;
+ goto done;
+ }
+
+ SPTREE_LOCK();
+ LIST_FOREACH(sp, &V_sptree[dir], chain) {
+ if (sp->state == IPSEC_SPSTATE_DEAD)
+ continue;
+
+ r1 = r2 = NULL;
+ for (p = sp->req; p; p = p->next) {
+ if (p->saidx.mode != IPSEC_MODE_TUNNEL)
+ continue;
+
+ r1 = r2;
+ r2 = p;
+
+ if (!r1) {
+ /* here we look at address matches only */
+ spidx = sp->spidx;
+ if (isrc->sa_len > sizeof(spidx.src) ||
+ idst->sa_len > sizeof(spidx.dst))
+ continue;
+ bcopy(isrc, &spidx.src, isrc->sa_len);
+ bcopy(idst, &spidx.dst, idst->sa_len);
+ if (!key_cmpspidx_withmask(&sp->spidx, &spidx))
+ continue;
+ } else {
+ if (key_sockaddrcmp(&r1->saidx.src.sa, isrc, 0) ||
+ key_sockaddrcmp(&r1->saidx.dst.sa, idst, 0))
+ continue;
+ }
+
+ if (key_sockaddrcmp(&r2->saidx.src.sa, osrc, 0) ||
+ key_sockaddrcmp(&r2->saidx.dst.sa, odst, 0))
+ continue;
+
+ goto found;
+ }
+ }
+ sp = NULL;
+found:
+ if (sp) {
+ sp->lastused = time_second;
+ SP_ADDREF(sp);
+ }
+ SPTREE_UNLOCK();
+done:
+ KEYDEBUG(KEYDEBUG_IPSEC_STAMP,
+ printf("DP %s return SP:%p (ID=%u) refcnt %u\n", __func__,
+ sp, sp ? sp->id : 0, sp ? sp->refcnt : 0));
+ return sp;
+}
+#endif
+
+/*
+ * allocating an SA entry for an *OUTBOUND* packet.
+ * checking each request entries in SP, and acquire an SA if need.
+ * OUT: 0: there are valid requests.
+ * ENOENT: policy may be valid, but SA with REQUIRE is on acquiring.
+ */
+int
+key_checkrequest(struct ipsecrequest *isr, const struct secasindex *saidx)
+{
+ u_int level;
+ int error;
+
+ IPSEC_ASSERT(isr != NULL, ("null isr"));
+ IPSEC_ASSERT(saidx != NULL, ("null saidx"));
+ IPSEC_ASSERT(saidx->mode == IPSEC_MODE_TRANSPORT ||
+ saidx->mode == IPSEC_MODE_TUNNEL,
+ ("unexpected policy %u", saidx->mode));
+
+ /*
+ * XXX guard against protocol callbacks from the crypto
+ * thread as they reference ipsecrequest.sav which we
+ * temporarily null out below. Need to rethink how we
+ * handle bundled SA's in the callback thread.
+ */
+ IPSECREQUEST_LOCK_ASSERT(isr);
+
+ /* get current level */
+ level = ipsec_get_reqlevel(isr);
+#if 0
+ /*
+ * We do allocate new SA only if the state of SA in the holder is
+ * SADB_SASTATE_DEAD. The SA for outbound must be the oldest.
+ */
+ if (isr->sav != NULL) {
+ if (isr->sav->sah == NULL)
+ panic("%s: sah is null.\n", __func__);
+ if (isr->sav == (struct secasvar *)LIST_FIRST(
+ &isr->sav->sah->savtree[SADB_SASTATE_DEAD])) {
+ KEY_FREESAV(&isr->sav);
+ isr->sav = NULL;
+ }
+ }
+#else
+ /*
+ * we free any SA stashed in the IPsec request because a different
+ * SA may be involved each time this request is checked, either
+ * because new SAs are being configured, or this request is
+ * associated with an unconnected datagram socket, or this request
+ * is associated with a system default policy.
+ *
+ * The operation may have negative impact to performance. We may
+ * want to check cached SA carefully, rather than picking new SA
+ * every time.
+ */
+ if (isr->sav != NULL) {
+ KEY_FREESAV(&isr->sav);
+ isr->sav = NULL;
+ }
+#endif
+
+ /*
+ * new SA allocation if no SA found.
+ * key_allocsa_policy should allocate the oldest SA available.
+ * See key_do_allocsa_policy(), and draft-jenkins-ipsec-rekeying-03.txt.
+ */
+ if (isr->sav == NULL)
+ isr->sav = key_allocsa_policy(saidx);
+
+ /* When there is SA. */
+ if (isr->sav != NULL) {
+ if (isr->sav->state != SADB_SASTATE_MATURE &&
+ isr->sav->state != SADB_SASTATE_DYING)
+ return EINVAL;
+ return 0;
+ }
+
+ /* there is no SA */
+ error = key_acquire(saidx, isr->sp);
+ if (error != 0) {
+ /* XXX What should I do ? */
+ ipseclog((LOG_DEBUG, "%s: error %d returned from key_acquire\n",
+ __func__, error));
+ return error;
+ }
+
+ if (level != IPSEC_LEVEL_REQUIRE) {
+ /* XXX sigh, the interface to this routine is botched */
+ IPSEC_ASSERT(isr->sav == NULL, ("unexpected SA"));
+ return 0;
+ } else {
+ return ENOENT;
+ }
+}
+
+/*
+ * allocating a SA for policy entry from SAD.
+ * NOTE: searching SAD of aliving state.
+ * OUT: NULL: not found.
+ * others: found and return the pointer.
+ */
+static struct secasvar *
+key_allocsa_policy(const struct secasindex *saidx)
+{
+#define N(a) _ARRAYLEN(a)
+ struct secashead *sah;
+ struct secasvar *sav;
+ u_int stateidx, arraysize;
+ const u_int *state_valid;
+
+ SAHTREE_LOCK();
+ LIST_FOREACH(sah, &V_sahtree, chain) {
+ if (sah->state == SADB_SASTATE_DEAD)
+ continue;
+ if (key_cmpsaidx(&sah->saidx, saidx, CMP_MODE_REQID)) {
+ if (V_key_preferred_oldsa) {
+ state_valid = saorder_state_valid_prefer_old;
+ arraysize = N(saorder_state_valid_prefer_old);
+ } else {
+ state_valid = saorder_state_valid_prefer_new;
+ arraysize = N(saorder_state_valid_prefer_new);
+ }
+ SAHTREE_UNLOCK();
+ goto found;
+ }
+ }
+ SAHTREE_UNLOCK();
+
+ return NULL;
+
+ found:
+ /* search valid state */
+ for (stateidx = 0; stateidx < arraysize; stateidx++) {
+ sav = key_do_allocsa_policy(sah, state_valid[stateidx]);
+ if (sav != NULL)
+ return sav;
+ }
+
+ return NULL;
+#undef N
+}
+
+/*
+ * searching SAD with direction, protocol, mode and state.
+ * called by key_allocsa_policy().
+ * OUT:
+ * NULL : not found
+ * others : found, pointer to a SA.
+ */
+static struct secasvar *
+key_do_allocsa_policy(struct secashead *sah, u_int state)
+{
+ struct secasvar *sav, *nextsav, *candidate, *d;
+
+ /* initilize */
+ candidate = NULL;
+
+ SAHTREE_LOCK();
+ for (sav = LIST_FIRST(&sah->savtree[state]);
+ sav != NULL;
+ sav = nextsav) {
+
+ nextsav = LIST_NEXT(sav, chain);
+
+ /* sanity check */
+ KEY_CHKSASTATE(sav->state, state, __func__);
+
+ /* initialize */
+ if (candidate == NULL) {
+ candidate = sav;
+ continue;
+ }
+
+ /* Which SA is the better ? */
+
+ IPSEC_ASSERT(candidate->lft_c != NULL,
+ ("null candidate lifetime"));
+ IPSEC_ASSERT(sav->lft_c != NULL, ("null sav lifetime"));
+
+ /* What the best method is to compare ? */
+ if (V_key_preferred_oldsa) {
+ if (candidate->lft_c->addtime >
+ sav->lft_c->addtime) {
+ candidate = sav;
+ }
+ continue;
+ /*NOTREACHED*/
+ }
+
+ /* preferred new sa rather than old sa */
+ if (candidate->lft_c->addtime <
+ sav->lft_c->addtime) {
+ d = candidate;
+ candidate = sav;
+ } else
+ d = sav;
+
+ /*
+ * prepared to delete the SA when there is more
+ * suitable candidate and the lifetime of the SA is not
+ * permanent.
+ */
+ if (d->lft_h->addtime != 0) {
+ struct mbuf *m, *result;
+ u_int8_t satype;
+
+ key_sa_chgstate(d, SADB_SASTATE_DEAD);
+
+ IPSEC_ASSERT(d->refcnt > 0, ("bogus ref count"));
+
+ satype = key_proto2satype(d->sah->saidx.proto);
+ if (satype == 0)
+ goto msgfail;
+
+ m = key_setsadbmsg(SADB_DELETE, 0,
+ satype, 0, 0, d->refcnt - 1);
+ if (!m)
+ goto msgfail;
+ result = m;
+
+ /* set sadb_address for saidx's. */
+ m = key_setsadbaddr(SADB_EXT_ADDRESS_SRC,
+ &d->sah->saidx.src.sa,
+ d->sah->saidx.src.sa.sa_len << 3,
+ IPSEC_ULPROTO_ANY);
+ if (!m)
+ goto msgfail;
+ m_cat(result, m);
+
+ /* set sadb_address for saidx's. */
+ m = key_setsadbaddr(SADB_EXT_ADDRESS_DST,
+ &d->sah->saidx.dst.sa,
+ d->sah->saidx.dst.sa.sa_len << 3,
+ IPSEC_ULPROTO_ANY);
+ if (!m)
+ goto msgfail;
+ m_cat(result, m);
+
+ /* create SA extension */
+ m = key_setsadbsa(d);
+ if (!m)
+ goto msgfail;
+ m_cat(result, m);
+
+ if (result->m_len < sizeof(struct sadb_msg)) {
+ result = m_pullup(result,
+ sizeof(struct sadb_msg));
+ if (result == NULL)
+ goto msgfail;
+ }
+
+ result->m_pkthdr.len = 0;
+ for (m = result; m; m = m->m_next)
+ result->m_pkthdr.len += m->m_len;
+ mtod(result, struct sadb_msg *)->sadb_msg_len =
+ PFKEY_UNIT64(result->m_pkthdr.len);
+
+ if (key_sendup_mbuf(NULL, result,
+ KEY_SENDUP_REGISTERED))
+ goto msgfail;
+ msgfail:
+ KEY_FREESAV(&d);
+ }
+ }
+ if (candidate) {
+ sa_addref(candidate);
+ KEYDEBUG(KEYDEBUG_IPSEC_STAMP,
+ printf("DP %s cause refcnt++:%d SA:%p\n",
+ __func__, candidate->refcnt, candidate));
+ }
+ SAHTREE_UNLOCK();
+
+ return candidate;
+}
+
+/*
+ * allocating a usable SA entry for a *INBOUND* packet.
+ * Must call key_freesav() later.
+ * OUT: positive: pointer to a usable sav (i.e. MATURE or DYING state).
+ * NULL: not found, or error occured.
+ *
+ * In the comparison, no source address is used--for RFC2401 conformance.
+ * To quote, from section 4.1:
+ * A security association is uniquely identified by a triple consisting
+ * of a Security Parameter Index (SPI), an IP Destination Address, and a
+ * security protocol (AH or ESP) identifier.
+ * Note that, however, we do need to keep source address in IPsec SA.
+ * IKE specification and PF_KEY specification do assume that we
+ * keep source address in IPsec SA. We see a tricky situation here.
+ */
+struct secasvar *
+key_allocsa(
+ union sockaddr_union *dst,
+ u_int proto,
+ u_int32_t spi,
+ const char* where, int tag)
+{
+ struct secashead *sah;
+ struct secasvar *sav;
+ u_int stateidx, arraysize, state;
+ const u_int *saorder_state_valid;
+ int chkport;
+
+ IPSEC_ASSERT(dst != NULL, ("null dst address"));
+
+ KEYDEBUG(KEYDEBUG_IPSEC_STAMP,
+ printf("DP %s from %s:%u\n", __func__, where, tag));
+
+#ifdef IPSEC_NAT_T
+ chkport = (dst->sa.sa_family == AF_INET &&
+ dst->sa.sa_len == sizeof(struct sockaddr_in) &&
+ dst->sin.sin_port != 0);
+#else
+ chkport = 0;
+#endif
+
+ /*
+ * searching SAD.
+ * XXX: to be checked internal IP header somewhere. Also when
+ * IPsec tunnel packet is received. But ESP tunnel mode is
+ * encrypted so we can't check internal IP header.
+ */
+ SAHTREE_LOCK();
+ if (V_key_preferred_oldsa) {
+ saorder_state_valid = saorder_state_valid_prefer_old;
+ arraysize = _ARRAYLEN(saorder_state_valid_prefer_old);
+ } else {
+ saorder_state_valid = saorder_state_valid_prefer_new;
+ arraysize = _ARRAYLEN(saorder_state_valid_prefer_new);
+ }
+ LIST_FOREACH(sah, &V_sahtree, chain) {
+ /* search valid state */
+ for (stateidx = 0; stateidx < arraysize; stateidx++) {
+ state = saorder_state_valid[stateidx];
+ LIST_FOREACH(sav, &sah->savtree[state], chain) {
+ /* sanity check */
+ KEY_CHKSASTATE(sav->state, state, __func__);
+ /* do not return entries w/ unusable state */
+ if (sav->state != SADB_SASTATE_MATURE &&
+ sav->state != SADB_SASTATE_DYING)
+ continue;
+ if (proto != sav->sah->saidx.proto)
+ continue;
+ if (spi != sav->spi)
+ continue;
+#if 0 /* don't check src */
+ /* check src address */
+ if (key_sockaddrcmp(&src->sa, &sav->sah->saidx.src.sa, chkport) != 0)
+ continue;
+#endif
+ /* check dst address */
+ if (key_sockaddrcmp(&dst->sa, &sav->sah->saidx.dst.sa, chkport) != 0)
+ continue;
+ sa_addref(sav);
+ goto done;
+ }
+ }
+ }
+ sav = NULL;
+done:
+ SAHTREE_UNLOCK();
+
+ KEYDEBUG(KEYDEBUG_IPSEC_STAMP,
+ printf("DP %s return SA:%p; refcnt %u\n", __func__,
+ sav, sav ? sav->refcnt : 0));
+ return sav;
+}
+
+/*
+ * Must be called after calling key_allocsp().
+ * For both the packet without socket and key_freeso().
+ */
+void
+_key_freesp(struct secpolicy **spp, const char* where, int tag)
+{
+ struct secpolicy *sp = *spp;
+
+ IPSEC_ASSERT(sp != NULL, ("null sp"));
+
+ SPTREE_LOCK();
+ SP_DELREF(sp);
+
+ KEYDEBUG(KEYDEBUG_IPSEC_STAMP,
+ printf("DP %s SP:%p (ID=%u) from %s:%u; refcnt now %u\n",
+ __func__, sp, sp->id, where, tag, sp->refcnt));
+
+ if (sp->refcnt == 0) {
+ *spp = NULL;
+ key_delsp(sp);
+ }
+ SPTREE_UNLOCK();
+}
+
+/*
+ * Must be called after calling key_allocsp().
+ * For the packet with socket.
+ */
+void
+key_freeso(struct socket *so)
+{
+ IPSEC_ASSERT(so != NULL, ("null so"));
+
+ switch (so->so_proto->pr_domain->dom_family) {
+#if defined(INET) || defined(INET6)
+#ifdef INET
+ case PF_INET:
+#endif
+#ifdef INET6
+ case PF_INET6:
+#endif
+ {
+ struct inpcb *pcb = sotoinpcb(so);
+
+ /* Does it have a PCB ? */
+ if (pcb == NULL)
+ return;
+ key_freesp_so(&pcb->inp_sp->sp_in);
+ key_freesp_so(&pcb->inp_sp->sp_out);
+ }
+ break;
+#endif /* INET || INET6 */
+ default:
+ ipseclog((LOG_DEBUG, "%s: unknown address family=%d.\n",
+ __func__, so->so_proto->pr_domain->dom_family));
+ return;
+ }
+}
+
+static void
+key_freesp_so(struct secpolicy **sp)
+{
+ IPSEC_ASSERT(sp != NULL && *sp != NULL, ("null sp"));
+
+ if ((*sp)->policy == IPSEC_POLICY_ENTRUST ||
+ (*sp)->policy == IPSEC_POLICY_BYPASS)
+ return;
+
+ IPSEC_ASSERT((*sp)->policy == IPSEC_POLICY_IPSEC,
+ ("invalid policy %u", (*sp)->policy));
+ KEY_FREESP(sp);
+}
+
+/*
+ * Must be called after calling key_allocsa().
+ * This function is called by key_freesp() to free some SA allocated
+ * for a policy.
+ */
+void
+key_freesav(struct secasvar **psav, const char* where, int tag)
+{
+ struct secasvar *sav = *psav;
+
+ IPSEC_ASSERT(sav != NULL, ("null sav"));
+
+ if (sa_delref(sav)) {
+ KEYDEBUG(KEYDEBUG_IPSEC_STAMP,
+ printf("DP %s SA:%p (SPI %u) from %s:%u; refcnt now %u\n",
+ __func__, sav, ntohl(sav->spi), where, tag, sav->refcnt));
+ *psav = NULL;
+ key_delsav(sav);
+ } else {
+ KEYDEBUG(KEYDEBUG_IPSEC_STAMP,
+ printf("DP %s SA:%p (SPI %u) from %s:%u; refcnt now %u\n",
+ __func__, sav, ntohl(sav->spi), where, tag, sav->refcnt));
+ }
+}
+
+/* %%% SPD management */
+/*
+ * free security policy entry.
+ */
+static void
+key_delsp(struct secpolicy *sp)
+{
+ struct ipsecrequest *isr, *nextisr;
+
+ IPSEC_ASSERT(sp != NULL, ("null sp"));
+ SPTREE_LOCK_ASSERT();
+
+ sp->state = IPSEC_SPSTATE_DEAD;
+
+ IPSEC_ASSERT(sp->refcnt == 0,
+ ("SP with references deleted (refcnt %u)", sp->refcnt));
+
+ /* remove from SP index */
+ if (__LIST_CHAINED(sp))
+ LIST_REMOVE(sp, chain);
+
+ for (isr = sp->req; isr != NULL; isr = nextisr) {
+ if (isr->sav != NULL) {
+ KEY_FREESAV(&isr->sav);
+ isr->sav = NULL;
+ }
+
+ nextisr = isr->next;
+ ipsec_delisr(isr);
+ }
+ _key_delsp(sp);
+}
+
+/*
+ * search SPD
+ * OUT: NULL : not found
+ * others : found, pointer to a SP.
+ */
+static struct secpolicy *
+key_getsp(struct secpolicyindex *spidx)
+{
+ struct secpolicy *sp;
+
+ IPSEC_ASSERT(spidx != NULL, ("null spidx"));
+
+ SPTREE_LOCK();
+ LIST_FOREACH(sp, &V_sptree[spidx->dir], chain) {
+ if (sp->state == IPSEC_SPSTATE_DEAD)
+ continue;
+ if (key_cmpspidx_exactly(spidx, &sp->spidx)) {
+ SP_ADDREF(sp);
+ break;
+ }
+ }
+ SPTREE_UNLOCK();
+
+ return sp;
+}
+
+/*
+ * get SP by index.
+ * OUT: NULL : not found
+ * others : found, pointer to a SP.
+ */
+static struct secpolicy *
+key_getspbyid(u_int32_t id)
+{
+ struct secpolicy *sp;
+
+ SPTREE_LOCK();
+ LIST_FOREACH(sp, &V_sptree[IPSEC_DIR_INBOUND], chain) {
+ if (sp->state == IPSEC_SPSTATE_DEAD)
+ continue;
+ if (sp->id == id) {
+ SP_ADDREF(sp);
+ goto done;
+ }
+ }
+
+ LIST_FOREACH(sp, &V_sptree[IPSEC_DIR_OUTBOUND], chain) {
+ if (sp->state == IPSEC_SPSTATE_DEAD)
+ continue;
+ if (sp->id == id) {
+ SP_ADDREF(sp);
+ goto done;
+ }
+ }
+done:
+ SPTREE_UNLOCK();
+
+ return sp;
+}
+
+struct secpolicy *
+key_newsp(const char* where, int tag)
+{
+ struct secpolicy *newsp = NULL;
+
+ newsp = (struct secpolicy *)
+ malloc(sizeof(struct secpolicy), M_IPSEC_SP, M_NOWAIT|M_ZERO);
+ if (newsp) {
+ SECPOLICY_LOCK_INIT(newsp);
+ newsp->refcnt = 1;
+ newsp->req = NULL;
+ }
+
+ KEYDEBUG(KEYDEBUG_IPSEC_STAMP,
+ printf("DP %s from %s:%u return SP:%p\n", __func__,
+ where, tag, newsp));
+ return newsp;
+}
+
+static void
+_key_delsp(struct secpolicy *sp)
+{
+ SECPOLICY_LOCK_DESTROY(sp);
+ free(sp, M_IPSEC_SP);
+}
+
+/*
+ * create secpolicy structure from sadb_x_policy structure.
+ * NOTE: `state', `secpolicyindex' in secpolicy structure are not set,
+ * so must be set properly later.
+ */
+struct secpolicy *
+key_msg2sp(xpl0, len, error)
+ struct sadb_x_policy *xpl0;
+ size_t len;
+ int *error;
+{
+ struct secpolicy *newsp;
+
+ IPSEC_ASSERT(xpl0 != NULL, ("null xpl0"));
+ IPSEC_ASSERT(len >= sizeof(*xpl0), ("policy too short: %zu", len));
+
+ if (len != PFKEY_EXTLEN(xpl0)) {
+ ipseclog((LOG_DEBUG, "%s: Invalid msg length.\n", __func__));
+ *error = EINVAL;
+ return NULL;
+ }
+
+ if ((newsp = KEY_NEWSP()) == NULL) {
+ *error = ENOBUFS;
+ return NULL;
+ }
+
+ newsp->spidx.dir = xpl0->sadb_x_policy_dir;
+ newsp->policy = xpl0->sadb_x_policy_type;
+
+ /* check policy */
+ switch (xpl0->sadb_x_policy_type) {
+ case IPSEC_POLICY_DISCARD:
+ case IPSEC_POLICY_NONE:
+ case IPSEC_POLICY_ENTRUST:
+ case IPSEC_POLICY_BYPASS:
+ newsp->req = NULL;
+ break;
+
+ case IPSEC_POLICY_IPSEC:
+ {
+ int tlen;
+ struct sadb_x_ipsecrequest *xisr;
+ struct ipsecrequest **p_isr = &newsp->req;
+
+ /* validity check */
+ if (PFKEY_EXTLEN(xpl0) < sizeof(*xpl0)) {
+ ipseclog((LOG_DEBUG, "%s: Invalid msg length.\n",
+ __func__));
+ KEY_FREESP(&newsp);
+ *error = EINVAL;
+ return NULL;
+ }
+
+ tlen = PFKEY_EXTLEN(xpl0) - sizeof(*xpl0);
+ xisr = (struct sadb_x_ipsecrequest *)(xpl0 + 1);
+
+ while (tlen > 0) {
+ /* length check */
+ if (xisr->sadb_x_ipsecrequest_len < sizeof(*xisr)) {
+ ipseclog((LOG_DEBUG, "%s: invalid ipsecrequest "
+ "length.\n", __func__));
+ KEY_FREESP(&newsp);
+ *error = EINVAL;
+ return NULL;
+ }
+
+ /* allocate request buffer */
+ /* NB: data structure is zero'd */
+ *p_isr = ipsec_newisr();
+ if ((*p_isr) == NULL) {
+ ipseclog((LOG_DEBUG,
+ "%s: No more memory.\n", __func__));
+ KEY_FREESP(&newsp);
+ *error = ENOBUFS;
+ return NULL;
+ }
+
+ /* set values */
+ switch (xisr->sadb_x_ipsecrequest_proto) {
+ case IPPROTO_ESP:
+ case IPPROTO_AH:
+ case IPPROTO_IPCOMP:
+ break;
+ default:
+ ipseclog((LOG_DEBUG,
+ "%s: invalid proto type=%u\n", __func__,
+ xisr->sadb_x_ipsecrequest_proto));
+ KEY_FREESP(&newsp);
+ *error = EPROTONOSUPPORT;
+ return NULL;
+ }
+ (*p_isr)->saidx.proto = xisr->sadb_x_ipsecrequest_proto;
+
+ switch (xisr->sadb_x_ipsecrequest_mode) {
+ case IPSEC_MODE_TRANSPORT:
+ case IPSEC_MODE_TUNNEL:
+ break;
+ case IPSEC_MODE_ANY:
+ default:
+ ipseclog((LOG_DEBUG,
+ "%s: invalid mode=%u\n", __func__,
+ xisr->sadb_x_ipsecrequest_mode));
+ KEY_FREESP(&newsp);
+ *error = EINVAL;
+ return NULL;
+ }
+ (*p_isr)->saidx.mode = xisr->sadb_x_ipsecrequest_mode;
+
+ switch (xisr->sadb_x_ipsecrequest_level) {
+ case IPSEC_LEVEL_DEFAULT:
+ case IPSEC_LEVEL_USE:
+ case IPSEC_LEVEL_REQUIRE:
+ break;
+ case IPSEC_LEVEL_UNIQUE:
+ /* validity check */
+ /*
+ * If range violation of reqid, kernel will
+ * update it, don't refuse it.
+ */
+ if (xisr->sadb_x_ipsecrequest_reqid
+ > IPSEC_MANUAL_REQID_MAX) {
+ ipseclog((LOG_DEBUG,
+ "%s: reqid=%d range "
+ "violation, updated by kernel.\n",
+ __func__,
+ xisr->sadb_x_ipsecrequest_reqid));
+ xisr->sadb_x_ipsecrequest_reqid = 0;
+ }
+
+ /* allocate new reqid id if reqid is zero. */
+ if (xisr->sadb_x_ipsecrequest_reqid == 0) {
+ u_int32_t reqid;
+ if ((reqid = key_newreqid()) == 0) {
+ KEY_FREESP(&newsp);
+ *error = ENOBUFS;
+ return NULL;
+ }
+ (*p_isr)->saidx.reqid = reqid;
+ xisr->sadb_x_ipsecrequest_reqid = reqid;
+ } else {
+ /* set it for manual keying. */
+ (*p_isr)->saidx.reqid =
+ xisr->sadb_x_ipsecrequest_reqid;
+ }
+ break;
+
+ default:
+ ipseclog((LOG_DEBUG, "%s: invalid level=%u\n",
+ __func__,
+ xisr->sadb_x_ipsecrequest_level));
+ KEY_FREESP(&newsp);
+ *error = EINVAL;
+ return NULL;
+ }
+ (*p_isr)->level = xisr->sadb_x_ipsecrequest_level;
+
+ /* set IP addresses if there */
+ if (xisr->sadb_x_ipsecrequest_len > sizeof(*xisr)) {
+ struct sockaddr *paddr;
+
+ paddr = (struct sockaddr *)(xisr + 1);
+
+ /* validity check */
+ if (paddr->sa_len
+ > sizeof((*p_isr)->saidx.src)) {
+ ipseclog((LOG_DEBUG, "%s: invalid "
+ "request address length.\n",
+ __func__));
+ KEY_FREESP(&newsp);
+ *error = EINVAL;
+ return NULL;
+ }
+ bcopy(paddr, &(*p_isr)->saidx.src,
+ paddr->sa_len);
+
+ paddr = (struct sockaddr *)((caddr_t)paddr
+ + paddr->sa_len);
+
+ /* validity check */
+ if (paddr->sa_len
+ > sizeof((*p_isr)->saidx.dst)) {
+ ipseclog((LOG_DEBUG, "%s: invalid "
+ "request address length.\n",
+ __func__));
+ KEY_FREESP(&newsp);
+ *error = EINVAL;
+ return NULL;
+ }
+ bcopy(paddr, &(*p_isr)->saidx.dst,
+ paddr->sa_len);
+ }
+
+ (*p_isr)->sp = newsp;
+
+ /* initialization for the next. */
+ p_isr = &(*p_isr)->next;
+ tlen -= xisr->sadb_x_ipsecrequest_len;
+
+ /* validity check */
+ if (tlen < 0) {
+ ipseclog((LOG_DEBUG, "%s: becoming tlen < 0.\n",
+ __func__));
+ KEY_FREESP(&newsp);
+ *error = EINVAL;
+ return NULL;
+ }
+
+ xisr = (struct sadb_x_ipsecrequest *)((caddr_t)xisr
+ + xisr->sadb_x_ipsecrequest_len);
+ }
+ }
+ break;
+ default:
+ ipseclog((LOG_DEBUG, "%s: invalid policy type.\n", __func__));
+ KEY_FREESP(&newsp);
+ *error = EINVAL;
+ return NULL;
+ }
+
+ *error = 0;
+ return newsp;
+}
+
+static u_int32_t
+key_newreqid()
+{
+ static u_int32_t auto_reqid = IPSEC_MANUAL_REQID_MAX + 1;
+
+ auto_reqid = (auto_reqid == ~0
+ ? IPSEC_MANUAL_REQID_MAX + 1 : auto_reqid + 1);
+
+ /* XXX should be unique check */
+
+ return auto_reqid;
+}
+
+/*
+ * copy secpolicy struct to sadb_x_policy structure indicated.
+ */
+struct mbuf *
+key_sp2msg(sp)
+ struct secpolicy *sp;
+{
+ struct sadb_x_policy *xpl;
+ int tlen;
+ caddr_t p;
+ struct mbuf *m;
+
+ IPSEC_ASSERT(sp != NULL, ("null policy"));
+
+ tlen = key_getspreqmsglen(sp);
+
+ m = key_alloc_mbuf(tlen);
+ if (!m || m->m_next) { /*XXX*/
+ if (m)
+ m_freem(m);
+ return NULL;
+ }
+
+ m->m_len = tlen;
+ m->m_next = NULL;
+ xpl = mtod(m, struct sadb_x_policy *);
+ bzero(xpl, tlen);
+
+ xpl->sadb_x_policy_len = PFKEY_UNIT64(tlen);
+ xpl->sadb_x_policy_exttype = SADB_X_EXT_POLICY;
+ xpl->sadb_x_policy_type = sp->policy;
+ xpl->sadb_x_policy_dir = sp->spidx.dir;
+ xpl->sadb_x_policy_id = sp->id;
+ p = (caddr_t)xpl + sizeof(*xpl);
+
+ /* if is the policy for ipsec ? */
+ if (sp->policy == IPSEC_POLICY_IPSEC) {
+ struct sadb_x_ipsecrequest *xisr;
+ struct ipsecrequest *isr;
+
+ for (isr = sp->req; isr != NULL; isr = isr->next) {
+
+ xisr = (struct sadb_x_ipsecrequest *)p;
+
+ xisr->sadb_x_ipsecrequest_proto = isr->saidx.proto;
+ xisr->sadb_x_ipsecrequest_mode = isr->saidx.mode;
+ xisr->sadb_x_ipsecrequest_level = isr->level;
+ xisr->sadb_x_ipsecrequest_reqid = isr->saidx.reqid;
+
+ p += sizeof(*xisr);
+ bcopy(&isr->saidx.src, p, isr->saidx.src.sa.sa_len);
+ p += isr->saidx.src.sa.sa_len;
+ bcopy(&isr->saidx.dst, p, isr->saidx.dst.sa.sa_len);
+ p += isr->saidx.src.sa.sa_len;
+
+ xisr->sadb_x_ipsecrequest_len =
+ PFKEY_ALIGN8(sizeof(*xisr)
+ + isr->saidx.src.sa.sa_len
+ + isr->saidx.dst.sa.sa_len);
+ }
+ }
+
+ return m;
+}
+
+/* m will not be freed nor modified */
+static struct mbuf *
+#ifdef __STDC__
+key_gather_mbuf(struct mbuf *m, const struct sadb_msghdr *mhp,
+ int ndeep, int nitem, ...)
+#else
+key_gather_mbuf(m, mhp, ndeep, nitem, va_alist)
+ struct mbuf *m;
+ const struct sadb_msghdr *mhp;
+ int ndeep;
+ int nitem;
+ va_dcl
+#endif
+{
+ va_list ap;
+ int idx;
+ int i;
+ struct mbuf *result = NULL, *n;
+ int len;
+
+ IPSEC_ASSERT(m != NULL, ("null mbuf"));
+ IPSEC_ASSERT(mhp != NULL, ("null msghdr"));
+
+ va_start(ap, nitem);
+ for (i = 0; i < nitem; i++) {
+ idx = va_arg(ap, int);
+ if (idx < 0 || idx > SADB_EXT_MAX)
+ goto fail;
+ /* don't attempt to pull empty extension */
+ if (idx == SADB_EXT_RESERVED && mhp->msg == NULL)
+ continue;
+ if (idx != SADB_EXT_RESERVED &&
+ (mhp->ext[idx] == NULL || mhp->extlen[idx] == 0))
+ continue;
+
+ if (idx == SADB_EXT_RESERVED) {
+ len = PFKEY_ALIGN8(sizeof(struct sadb_msg));
+
+ IPSEC_ASSERT(len <= MHLEN, ("header too big %u", len));
+
+ MGETHDR(n, M_DONTWAIT, MT_DATA);
+ if (!n)
+ goto fail;
+ n->m_len = len;
+ n->m_next = NULL;
+ m_copydata(m, 0, sizeof(struct sadb_msg),
+ mtod(n, caddr_t));
+ } else if (i < ndeep) {
+ len = mhp->extlen[idx];
+ n = key_alloc_mbuf(len);
+ if (!n || n->m_next) { /*XXX*/
+ if (n)
+ m_freem(n);
+ goto fail;
+ }
+ m_copydata(m, mhp->extoff[idx], mhp->extlen[idx],
+ mtod(n, caddr_t));
+ } else {
+ n = m_copym(m, mhp->extoff[idx], mhp->extlen[idx],
+ M_DONTWAIT);
+ }
+ if (n == NULL)
+ goto fail;
+
+ if (result)
+ m_cat(result, n);
+ else
+ result = n;
+ }
+ va_end(ap);
+
+ if ((result->m_flags & M_PKTHDR) != 0) {
+ result->m_pkthdr.len = 0;
+ for (n = result; n; n = n->m_next)
+ result->m_pkthdr.len += n->m_len;
+ }
+
+ return result;
+
+fail:
+ m_freem(result);
+ return NULL;
+}
+
+/*
+ * SADB_X_SPDADD, SADB_X_SPDSETIDX or SADB_X_SPDUPDATE processing
+ * add an entry to SP database, when received
+ * <base, address(SD), (lifetime(H),) policy>
+ * from the user(?).
+ * Adding to SP database,
+ * and send
+ * <base, address(SD), (lifetime(H),) policy>
+ * to the socket which was send.
+ *
+ * SPDADD set a unique policy entry.
+ * SPDSETIDX like SPDADD without a part of policy requests.
+ * SPDUPDATE replace a unique policy entry.
+ *
+ * m will always be freed.
+ */
+static int
+key_spdadd(so, m, mhp)
+ struct socket *so;
+ struct mbuf *m;
+ const struct sadb_msghdr *mhp;
+{
+ struct sadb_address *src0, *dst0;
+ struct sadb_x_policy *xpl0, *xpl;
+ struct sadb_lifetime *lft = NULL;
+ struct secpolicyindex spidx;
+ struct secpolicy *newsp;
+ int error;
+
+ IPSEC_ASSERT(so != NULL, ("null socket"));
+ IPSEC_ASSERT(m != NULL, ("null mbuf"));
+ IPSEC_ASSERT(mhp != NULL, ("null msghdr"));
+ IPSEC_ASSERT(mhp->msg != NULL, ("null msg"));
+
+ if (mhp->ext[SADB_EXT_ADDRESS_SRC] == NULL ||
+ mhp->ext[SADB_EXT_ADDRESS_DST] == NULL ||
+ mhp->ext[SADB_X_EXT_POLICY] == NULL) {
+ ipseclog((LOG_DEBUG, "key_spdadd: invalid message is passed.\n"));
+ return key_senderror(so, m, EINVAL);
+ }
+ if (mhp->extlen[SADB_EXT_ADDRESS_SRC] < sizeof(struct sadb_address) ||
+ mhp->extlen[SADB_EXT_ADDRESS_DST] < sizeof(struct sadb_address) ||
+ mhp->extlen[SADB_X_EXT_POLICY] < sizeof(struct sadb_x_policy)) {
+ ipseclog((LOG_DEBUG, "%s: invalid message is passed.\n",
+ __func__));
+ return key_senderror(so, m, EINVAL);
+ }
+ if (mhp->ext[SADB_EXT_LIFETIME_HARD] != NULL) {
+ if (mhp->extlen[SADB_EXT_LIFETIME_HARD]
+ < sizeof(struct sadb_lifetime)) {
+ ipseclog((LOG_DEBUG, "%s: invalid message is passed.\n",
+ __func__));
+ return key_senderror(so, m, EINVAL);
+ }
+ lft = (struct sadb_lifetime *)mhp->ext[SADB_EXT_LIFETIME_HARD];
+ }
+
+ src0 = (struct sadb_address *)mhp->ext[SADB_EXT_ADDRESS_SRC];
+ dst0 = (struct sadb_address *)mhp->ext[SADB_EXT_ADDRESS_DST];
+ xpl0 = (struct sadb_x_policy *)mhp->ext[SADB_X_EXT_POLICY];
+
+ /*
+ * Note: do not parse SADB_X_EXT_NAT_T_* here:
+ * we are processing traffic endpoints.
+ */
+
+ /* make secindex */
+ /* XXX boundary check against sa_len */
+ KEY_SETSECSPIDX(xpl0->sadb_x_policy_dir,
+ src0 + 1,
+ dst0 + 1,
+ src0->sadb_address_prefixlen,
+ dst0->sadb_address_prefixlen,
+ src0->sadb_address_proto,
+ &spidx);
+
+ /* checking the direciton. */
+ switch (xpl0->sadb_x_policy_dir) {
+ case IPSEC_DIR_INBOUND:
+ case IPSEC_DIR_OUTBOUND:
+ break;
+ default:
+ ipseclog((LOG_DEBUG, "%s: Invalid SP direction.\n", __func__));
+ mhp->msg->sadb_msg_errno = EINVAL;
+ return 0;
+ }
+
+ /* check policy */
+ /* key_spdadd() accepts DISCARD, NONE and IPSEC. */
+ if (xpl0->sadb_x_policy_type == IPSEC_POLICY_ENTRUST
+ || xpl0->sadb_x_policy_type == IPSEC_POLICY_BYPASS) {
+ ipseclog((LOG_DEBUG, "%s: Invalid policy type.\n", __func__));
+ return key_senderror(so, m, EINVAL);
+ }
+
+ /* policy requests are mandatory when action is ipsec. */
+ if (mhp->msg->sadb_msg_type != SADB_X_SPDSETIDX
+ && xpl0->sadb_x_policy_type == IPSEC_POLICY_IPSEC
+ && mhp->extlen[SADB_X_EXT_POLICY] <= sizeof(*xpl0)) {
+ ipseclog((LOG_DEBUG, "%s: some policy requests part required\n",
+ __func__));
+ return key_senderror(so, m, EINVAL);
+ }
+
+ /*
+ * checking there is SP already or not.
+ * SPDUPDATE doesn't depend on whether there is a SP or not.
+ * If the type is either SPDADD or SPDSETIDX AND a SP is found,
+ * then error.
+ */
+ newsp = key_getsp(&spidx);
+ if (mhp->msg->sadb_msg_type == SADB_X_SPDUPDATE) {
+ if (newsp) {
+ SPTREE_LOCK();
+ newsp->state = IPSEC_SPSTATE_DEAD;
+ SPTREE_UNLOCK();
+ KEY_FREESP(&newsp);
+ }
+ } else {
+ if (newsp != NULL) {
+ KEY_FREESP(&newsp);
+ ipseclog((LOG_DEBUG, "%s: a SP entry exists already.\n",
+ __func__));
+ return key_senderror(so, m, EEXIST);
+ }
+ }
+
+ /* allocation new SP entry */
+ if ((newsp = key_msg2sp(xpl0, PFKEY_EXTLEN(xpl0), &error)) == NULL) {
+ return key_senderror(so, m, error);
+ }
+
+ if ((newsp->id = key_getnewspid()) == 0) {
+ _key_delsp(newsp);
+ return key_senderror(so, m, ENOBUFS);
+ }
+
+ /* XXX boundary check against sa_len */
+ KEY_SETSECSPIDX(xpl0->sadb_x_policy_dir,
+ src0 + 1,
+ dst0 + 1,
+ src0->sadb_address_prefixlen,
+ dst0->sadb_address_prefixlen,
+ src0->sadb_address_proto,
+ &newsp->spidx);
+
+ /* sanity check on addr pair */
+ if (((struct sockaddr *)(src0 + 1))->sa_family !=
+ ((struct sockaddr *)(dst0+ 1))->sa_family) {
+ _key_delsp(newsp);
+ return key_senderror(so, m, EINVAL);
+ }
+ if (((struct sockaddr *)(src0 + 1))->sa_len !=
+ ((struct sockaddr *)(dst0+ 1))->sa_len) {
+ _key_delsp(newsp);
+ return key_senderror(so, m, EINVAL);
+ }
+#if 1
+ if (newsp->req && newsp->req->saidx.src.sa.sa_family) {
+ struct sockaddr *sa;
+ sa = (struct sockaddr *)(src0 + 1);
+ if (sa->sa_family != newsp->req->saidx.src.sa.sa_family) {
+ _key_delsp(newsp);
+ return key_senderror(so, m, EINVAL);
+ }
+ }
+ if (newsp->req && newsp->req->saidx.dst.sa.sa_family) {
+ struct sockaddr *sa;
+ sa = (struct sockaddr *)(dst0 + 1);
+ if (sa->sa_family != newsp->req->saidx.dst.sa.sa_family) {
+ _key_delsp(newsp);
+ return key_senderror(so, m, EINVAL);
+ }
+ }
+#endif
+
+ newsp->created = time_second;
+ newsp->lastused = newsp->created;
+ newsp->lifetime = lft ? lft->sadb_lifetime_addtime : 0;
+ newsp->validtime = lft ? lft->sadb_lifetime_usetime : 0;
+
+ newsp->refcnt = 1; /* do not reclaim until I say I do */
+ newsp->state = IPSEC_SPSTATE_ALIVE;
+ LIST_INSERT_TAIL(&V_sptree[newsp->spidx.dir], newsp, secpolicy, chain);
+
+ /* delete the entry in spacqtree */
+ if (mhp->msg->sadb_msg_type == SADB_X_SPDUPDATE) {
+ struct secspacq *spacq = key_getspacq(&spidx);
+ if (spacq != NULL) {
+ /* reset counter in order to deletion by timehandler. */
+ spacq->created = time_second;
+ spacq->count = 0;
+ SPACQ_UNLOCK();
+ }
+ }
+
+ {
+ struct mbuf *n, *mpolicy;
+ struct sadb_msg *newmsg;
+ int off;
+
+ /*
+ * Note: do not send SADB_X_EXT_NAT_T_* here:
+ * we are sending traffic endpoints.
+ */
+
+ /* create new sadb_msg to reply. */
+ if (lft) {
+ n = key_gather_mbuf(m, mhp, 2, 5, SADB_EXT_RESERVED,
+ SADB_X_EXT_POLICY, SADB_EXT_LIFETIME_HARD,
+ SADB_EXT_ADDRESS_SRC, SADB_EXT_ADDRESS_DST);
+ } else {
+ n = key_gather_mbuf(m, mhp, 2, 4, SADB_EXT_RESERVED,
+ SADB_X_EXT_POLICY,
+ SADB_EXT_ADDRESS_SRC, SADB_EXT_ADDRESS_DST);
+ }
+ if (!n)
+ return key_senderror(so, m, ENOBUFS);
+
+ if (n->m_len < sizeof(*newmsg)) {
+ n = m_pullup(n, sizeof(*newmsg));
+ if (!n)
+ return key_senderror(so, m, ENOBUFS);
+ }
+ newmsg = mtod(n, struct sadb_msg *);
+ newmsg->sadb_msg_errno = 0;
+ newmsg->sadb_msg_len = PFKEY_UNIT64(n->m_pkthdr.len);
+
+ off = 0;
+ mpolicy = m_pulldown(n, PFKEY_ALIGN8(sizeof(struct sadb_msg)),
+ sizeof(*xpl), &off);
+ if (mpolicy == NULL) {
+ /* n is already freed */
+ return key_senderror(so, m, ENOBUFS);
+ }
+ xpl = (struct sadb_x_policy *)(mtod(mpolicy, caddr_t) + off);
+ if (xpl->sadb_x_policy_exttype != SADB_X_EXT_POLICY) {
+ m_freem(n);
+ return key_senderror(so, m, EINVAL);
+ }
+ xpl->sadb_x_policy_id = newsp->id;
+
+ m_freem(m);
+ return key_sendup_mbuf(so, n, KEY_SENDUP_ALL);
+ }
+}
+
+/*
+ * get new policy id.
+ * OUT:
+ * 0: failure.
+ * others: success.
+ */
+static u_int32_t
+key_getnewspid()
+{
+ u_int32_t newid = 0;
+ int count = V_key_spi_trycnt; /* XXX */
+ struct secpolicy *sp;
+
+ /* when requesting to allocate spi ranged */
+ while (count--) {
+ newid = (V_policy_id = (V_policy_id == ~0 ? 1 : V_policy_id + 1));
+
+ if ((sp = key_getspbyid(newid)) == NULL)
+ break;
+
+ KEY_FREESP(&sp);
+ }
+
+ if (count == 0 || newid == 0) {
+ ipseclog((LOG_DEBUG, "%s: to allocate policy id is failed.\n",
+ __func__));
+ return 0;
+ }
+
+ return newid;
+}
+
+/*
+ * SADB_SPDDELETE processing
+ * receive
+ * <base, address(SD), policy(*)>
+ * from the user(?), and set SADB_SASTATE_DEAD,
+ * and send,
+ * <base, address(SD), policy(*)>
+ * to the ikmpd.
+ * policy(*) including direction of policy.
+ *
+ * m will always be freed.
+ */
+static int
+key_spddelete(so, m, mhp)
+ struct socket *so;
+ struct mbuf *m;
+ const struct sadb_msghdr *mhp;
+{
+ struct sadb_address *src0, *dst0;
+ struct sadb_x_policy *xpl0;
+ struct secpolicyindex spidx;
+ struct secpolicy *sp;
+
+ IPSEC_ASSERT(so != NULL, ("null so"));
+ IPSEC_ASSERT(m != NULL, ("null mbuf"));
+ IPSEC_ASSERT(mhp != NULL, ("null msghdr"));
+ IPSEC_ASSERT(mhp->msg != NULL, ("null msg"));
+
+ if (mhp->ext[SADB_EXT_ADDRESS_SRC] == NULL ||
+ mhp->ext[SADB_EXT_ADDRESS_DST] == NULL ||
+ mhp->ext[SADB_X_EXT_POLICY] == NULL) {
+ ipseclog((LOG_DEBUG, "%s: invalid message is passed.\n",
+ __func__));
+ return key_senderror(so, m, EINVAL);
+ }
+ if (mhp->extlen[SADB_EXT_ADDRESS_SRC] < sizeof(struct sadb_address) ||
+ mhp->extlen[SADB_EXT_ADDRESS_DST] < sizeof(struct sadb_address) ||
+ mhp->extlen[SADB_X_EXT_POLICY] < sizeof(struct sadb_x_policy)) {
+ ipseclog((LOG_DEBUG, "%s: invalid message is passed.\n",
+ __func__));
+ return key_senderror(so, m, EINVAL);
+ }
+
+ src0 = (struct sadb_address *)mhp->ext[SADB_EXT_ADDRESS_SRC];
+ dst0 = (struct sadb_address *)mhp->ext[SADB_EXT_ADDRESS_DST];
+ xpl0 = (struct sadb_x_policy *)mhp->ext[SADB_X_EXT_POLICY];
+
+ /*
+ * Note: do not parse SADB_X_EXT_NAT_T_* here:
+ * we are processing traffic endpoints.
+ */
+
+ /* make secindex */
+ /* XXX boundary check against sa_len */
+ KEY_SETSECSPIDX(xpl0->sadb_x_policy_dir,
+ src0 + 1,
+ dst0 + 1,
+ src0->sadb_address_prefixlen,
+ dst0->sadb_address_prefixlen,
+ src0->sadb_address_proto,
+ &spidx);
+
+ /* checking the direciton. */
+ switch (xpl0->sadb_x_policy_dir) {
+ case IPSEC_DIR_INBOUND:
+ case IPSEC_DIR_OUTBOUND:
+ break;
+ default:
+ ipseclog((LOG_DEBUG, "%s: Invalid SP direction.\n", __func__));
+ return key_senderror(so, m, EINVAL);
+ }
+
+ /* Is there SP in SPD ? */
+ if ((sp = key_getsp(&spidx)) == NULL) {
+ ipseclog((LOG_DEBUG, "%s: no SP found.\n", __func__));
+ return key_senderror(so, m, EINVAL);
+ }
+
+ /* save policy id to buffer to be returned. */
+ xpl0->sadb_x_policy_id = sp->id;
+
+ SPTREE_LOCK();
+ sp->state = IPSEC_SPSTATE_DEAD;
+ SPTREE_UNLOCK();
+ KEY_FREESP(&sp);
+
+ {
+ struct mbuf *n;
+ struct sadb_msg *newmsg;
+
+ /*
+ * Note: do not send SADB_X_EXT_NAT_T_* here:
+ * we are sending traffic endpoints.
+ */
+
+ /* create new sadb_msg to reply. */
+ n = key_gather_mbuf(m, mhp, 1, 4, SADB_EXT_RESERVED,
+ SADB_X_EXT_POLICY, SADB_EXT_ADDRESS_SRC, SADB_EXT_ADDRESS_DST);
+ if (!n)
+ return key_senderror(so, m, ENOBUFS);
+
+ newmsg = mtod(n, struct sadb_msg *);
+ newmsg->sadb_msg_errno = 0;
+ newmsg->sadb_msg_len = PFKEY_UNIT64(n->m_pkthdr.len);
+
+ m_freem(m);
+ return key_sendup_mbuf(so, n, KEY_SENDUP_ALL);
+ }
+}
+
+/*
+ * SADB_SPDDELETE2 processing
+ * receive
+ * <base, policy(*)>
+ * from the user(?), and set SADB_SASTATE_DEAD,
+ * and send,
+ * <base, policy(*)>
+ * to the ikmpd.
+ * policy(*) including direction of policy.
+ *
+ * m will always be freed.
+ */
+static int
+key_spddelete2(so, m, mhp)
+ struct socket *so;
+ struct mbuf *m;
+ const struct sadb_msghdr *mhp;
+{
+ u_int32_t id;
+ struct secpolicy *sp;
+
+ IPSEC_ASSERT(so != NULL, ("null socket"));
+ IPSEC_ASSERT(m != NULL, ("null mbuf"));
+ IPSEC_ASSERT(mhp != NULL, ("null msghdr"));
+ IPSEC_ASSERT(mhp->msg != NULL, ("null msg"));
+
+ if (mhp->ext[SADB_X_EXT_POLICY] == NULL ||
+ mhp->extlen[SADB_X_EXT_POLICY] < sizeof(struct sadb_x_policy)) {
+ ipseclog((LOG_DEBUG, "%s: invalid message is passed.\n", __func__));
+ return key_senderror(so, m, EINVAL);
+ }
+
+ id = ((struct sadb_x_policy *)mhp->ext[SADB_X_EXT_POLICY])->sadb_x_policy_id;
+
+ /* Is there SP in SPD ? */
+ if ((sp = key_getspbyid(id)) == NULL) {
+ ipseclog((LOG_DEBUG, "%s: no SP found id:%u.\n", __func__, id));
+ return key_senderror(so, m, EINVAL);
+ }
+
+ SPTREE_LOCK();
+ sp->state = IPSEC_SPSTATE_DEAD;
+ SPTREE_UNLOCK();
+ KEY_FREESP(&sp);
+
+ {
+ struct mbuf *n, *nn;
+ struct sadb_msg *newmsg;
+ int off, len;
+
+ /* create new sadb_msg to reply. */
+ len = PFKEY_ALIGN8(sizeof(struct sadb_msg));
+
+ MGETHDR(n, M_DONTWAIT, MT_DATA);
+ if (n && len > MHLEN) {
+ MCLGET(n, M_DONTWAIT);
+ if ((n->m_flags & M_EXT) == 0) {
+ m_freem(n);
+ n = NULL;
+ }
+ }
+ if (!n)
+ return key_senderror(so, m, ENOBUFS);
+
+ n->m_len = len;
+ n->m_next = NULL;
+ off = 0;
+
+ m_copydata(m, 0, sizeof(struct sadb_msg), mtod(n, caddr_t) + off);
+ off += PFKEY_ALIGN8(sizeof(struct sadb_msg));
+
+ IPSEC_ASSERT(off == len, ("length inconsistency (off %u len %u)",
+ off, len));
+
+ n->m_next = m_copym(m, mhp->extoff[SADB_X_EXT_POLICY],
+ mhp->extlen[SADB_X_EXT_POLICY], M_DONTWAIT);
+ if (!n->m_next) {
+ m_freem(n);
+ return key_senderror(so, m, ENOBUFS);
+ }
+
+ n->m_pkthdr.len = 0;
+ for (nn = n; nn; nn = nn->m_next)
+ n->m_pkthdr.len += nn->m_len;
+
+ newmsg = mtod(n, struct sadb_msg *);
+ newmsg->sadb_msg_errno = 0;
+ newmsg->sadb_msg_len = PFKEY_UNIT64(n->m_pkthdr.len);
+
+ m_freem(m);
+ return key_sendup_mbuf(so, n, KEY_SENDUP_ALL);
+ }
+}
+
+/*
+ * SADB_X_GET processing
+ * receive
+ * <base, policy(*)>
+ * from the user(?),
+ * and send,
+ * <base, address(SD), policy>
+ * to the ikmpd.
+ * policy(*) including direction of policy.
+ *
+ * m will always be freed.
+ */
+static int
+key_spdget(so, m, mhp)
+ struct socket *so;
+ struct mbuf *m;
+ const struct sadb_msghdr *mhp;
+{
+ u_int32_t id;
+ struct secpolicy *sp;
+ struct mbuf *n;
+
+ IPSEC_ASSERT(so != NULL, ("null socket"));
+ IPSEC_ASSERT(m != NULL, ("null mbuf"));
+ IPSEC_ASSERT(mhp != NULL, ("null msghdr"));
+ IPSEC_ASSERT(mhp->msg != NULL, ("null msg"));
+
+ if (mhp->ext[SADB_X_EXT_POLICY] == NULL ||
+ mhp->extlen[SADB_X_EXT_POLICY] < sizeof(struct sadb_x_policy)) {
+ ipseclog((LOG_DEBUG, "%s: invalid message is passed.\n",
+ __func__));
+ return key_senderror(so, m, EINVAL);
+ }
+
+ id = ((struct sadb_x_policy *)mhp->ext[SADB_X_EXT_POLICY])->sadb_x_policy_id;
+
+ /* Is there SP in SPD ? */
+ if ((sp = key_getspbyid(id)) == NULL) {
+ ipseclog((LOG_DEBUG, "%s: no SP found id:%u.\n", __func__, id));
+ return key_senderror(so, m, ENOENT);
+ }
+
+ n = key_setdumpsp(sp, SADB_X_SPDGET, 0, mhp->msg->sadb_msg_pid);
+ if (n != NULL) {
+ m_freem(m);
+ return key_sendup_mbuf(so, n, KEY_SENDUP_ONE);
+ } else
+ return key_senderror(so, m, ENOBUFS);
+}
+
+/*
+ * SADB_X_SPDACQUIRE processing.
+ * Acquire policy and SA(s) for a *OUTBOUND* packet.
+ * send
+ * <base, policy(*)>
+ * to KMD, and expect to receive
+ * <base> with SADB_X_SPDACQUIRE if error occured,
+ * or
+ * <base, policy>
+ * with SADB_X_SPDUPDATE from KMD by PF_KEY.
+ * policy(*) is without policy requests.
+ *
+ * 0 : succeed
+ * others: error number
+ */
+int
+key_spdacquire(sp)
+ struct secpolicy *sp;
+{
+ struct mbuf *result = NULL, *m;
+ struct secspacq *newspacq;
+
+ IPSEC_ASSERT(sp != NULL, ("null secpolicy"));
+ IPSEC_ASSERT(sp->req == NULL, ("policy exists"));
+ IPSEC_ASSERT(sp->policy == IPSEC_POLICY_IPSEC,
+ ("policy not IPSEC %u", sp->policy));
+
+ /* Get an entry to check whether sent message or not. */
+ newspacq = key_getspacq(&sp->spidx);
+ if (newspacq != NULL) {
+ if (V_key_blockacq_count < newspacq->count) {
+ /* reset counter and do send message. */
+ newspacq->count = 0;
+ } else {
+ /* increment counter and do nothing. */
+ newspacq->count++;
+ return 0;
+ }
+ SPACQ_UNLOCK();
+ } else {
+ /* make new entry for blocking to send SADB_ACQUIRE. */
+ newspacq = key_newspacq(&sp->spidx);
+ if (newspacq == NULL)
+ return ENOBUFS;
+ }
+
+ /* create new sadb_msg to reply. */
+ m = key_setsadbmsg(SADB_X_SPDACQUIRE, 0, 0, 0, 0, 0);
+ if (!m)
+ return ENOBUFS;
+
+ result = m;
+
+ result->m_pkthdr.len = 0;
+ for (m = result; m; m = m->m_next)
+ result->m_pkthdr.len += m->m_len;
+
+ mtod(result, struct sadb_msg *)->sadb_msg_len =
+ PFKEY_UNIT64(result->m_pkthdr.len);
+
+ return key_sendup_mbuf(NULL, m, KEY_SENDUP_REGISTERED);
+}
+
+/*
+ * SADB_SPDFLUSH processing
+ * receive
+ * <base>
+ * from the user, and free all entries in secpctree.
+ * and send,
+ * <base>
+ * to the user.
+ * NOTE: what to do is only marking SADB_SASTATE_DEAD.
+ *
+ * m will always be freed.
+ */
+static int
+key_spdflush(so, m, mhp)
+ struct socket *so;
+ struct mbuf *m;
+ const struct sadb_msghdr *mhp;
+{
+ struct sadb_msg *newmsg;
+ struct secpolicy *sp;
+ u_int dir;
+
+ IPSEC_ASSERT(so != NULL, ("null socket"));
+ IPSEC_ASSERT(m != NULL, ("null mbuf"));
+ IPSEC_ASSERT(mhp != NULL, ("null msghdr"));
+ IPSEC_ASSERT(mhp->msg != NULL, ("null msg"));
+
+ if (m->m_len != PFKEY_ALIGN8(sizeof(struct sadb_msg)))
+ return key_senderror(so, m, EINVAL);
+
+ for (dir = 0; dir < IPSEC_DIR_MAX; dir++) {
+ SPTREE_LOCK();
+ LIST_FOREACH(sp, &V_sptree[dir], chain)
+ sp->state = IPSEC_SPSTATE_DEAD;
+ SPTREE_UNLOCK();
+ }
+
+ if (sizeof(struct sadb_msg) > m->m_len + M_TRAILINGSPACE(m)) {
+ ipseclog((LOG_DEBUG, "%s: No more memory.\n", __func__));
+ return key_senderror(so, m, ENOBUFS);
+ }
+
+ if (m->m_next)
+ m_freem(m->m_next);
+ m->m_next = NULL;
+ m->m_pkthdr.len = m->m_len = PFKEY_ALIGN8(sizeof(struct sadb_msg));
+ newmsg = mtod(m, struct sadb_msg *);
+ newmsg->sadb_msg_errno = 0;
+ newmsg->sadb_msg_len = PFKEY_UNIT64(m->m_pkthdr.len);
+
+ return key_sendup_mbuf(so, m, KEY_SENDUP_ALL);
+}
+
+/*
+ * SADB_SPDDUMP processing
+ * receive
+ * <base>
+ * from the user, and dump all SP leaves
+ * and send,
+ * <base> .....
+ * to the ikmpd.
+ *
+ * m will always be freed.
+ */
+static int
+key_spddump(so, m, mhp)
+ struct socket *so;
+ struct mbuf *m;
+ const struct sadb_msghdr *mhp;
+{
+ struct secpolicy *sp;
+ int cnt;
+ u_int dir;
+ struct mbuf *n;
+
+ IPSEC_ASSERT(so != NULL, ("null socket"));
+ IPSEC_ASSERT(m != NULL, ("null mbuf"));
+ IPSEC_ASSERT(mhp != NULL, ("null msghdr"));
+ IPSEC_ASSERT(mhp->msg != NULL, ("null msg"));
+
+ /* search SPD entry and get buffer size. */
+ cnt = 0;
+ SPTREE_LOCK();
+ for (dir = 0; dir < IPSEC_DIR_MAX; dir++) {
+ LIST_FOREACH(sp, &V_sptree[dir], chain) {
+ cnt++;
+ }
+ }
+
+ if (cnt == 0) {
+ SPTREE_UNLOCK();
+ return key_senderror(so, m, ENOENT);
+ }
+
+ for (dir = 0; dir < IPSEC_DIR_MAX; dir++) {
+ LIST_FOREACH(sp, &V_sptree[dir], chain) {
+ --cnt;
+ n = key_setdumpsp(sp, SADB_X_SPDDUMP, cnt,
+ mhp->msg->sadb_msg_pid);
+
+ if (n)
+ key_sendup_mbuf(so, n, KEY_SENDUP_ONE);
+ }
+ }
+
+ SPTREE_UNLOCK();
+ m_freem(m);
+ return 0;
+}
+
+static struct mbuf *
+key_setdumpsp(struct secpolicy *sp, u_int8_t type, u_int32_t seq, u_int32_t pid)
+{
+ struct mbuf *result = NULL, *m;
+ struct seclifetime lt;
+
+ m = key_setsadbmsg(type, 0, SADB_SATYPE_UNSPEC, seq, pid, sp->refcnt);
+ if (!m)
+ goto fail;
+ result = m;
+
+ /*
+ * Note: do not send SADB_X_EXT_NAT_T_* here:
+ * we are sending traffic endpoints.
+ */
+ m = key_setsadbaddr(SADB_EXT_ADDRESS_SRC,
+ &sp->spidx.src.sa, sp->spidx.prefs,
+ sp->spidx.ul_proto);
+ if (!m)
+ goto fail;
+ m_cat(result, m);
+
+ m = key_setsadbaddr(SADB_EXT_ADDRESS_DST,
+ &sp->spidx.dst.sa, sp->spidx.prefd,
+ sp->spidx.ul_proto);
+ if (!m)
+ goto fail;
+ m_cat(result, m);
+
+ m = key_sp2msg(sp);
+ if (!m)
+ goto fail;
+ m_cat(result, m);
+
+ if(sp->lifetime){
+ lt.addtime=sp->created;
+ lt.usetime= sp->lastused;
+ m = key_setlifetime(&lt, SADB_EXT_LIFETIME_CURRENT);
+ if (!m)
+ goto fail;
+ m_cat(result, m);
+
+ lt.addtime=sp->lifetime;
+ lt.usetime= sp->validtime;
+ m = key_setlifetime(&lt, SADB_EXT_LIFETIME_HARD);
+ if (!m)
+ goto fail;
+ m_cat(result, m);
+ }
+
+ if ((result->m_flags & M_PKTHDR) == 0)
+ goto fail;
+
+ if (result->m_len < sizeof(struct sadb_msg)) {
+ result = m_pullup(result, sizeof(struct sadb_msg));
+ if (result == NULL)
+ goto fail;
+ }
+
+ result->m_pkthdr.len = 0;
+ for (m = result; m; m = m->m_next)
+ result->m_pkthdr.len += m->m_len;
+
+ mtod(result, struct sadb_msg *)->sadb_msg_len =
+ PFKEY_UNIT64(result->m_pkthdr.len);
+
+ return result;
+
+fail:
+ m_freem(result);
+ return NULL;
+}
+
+/*
+ * get PFKEY message length for security policy and request.
+ */
+static u_int
+key_getspreqmsglen(sp)
+ struct secpolicy *sp;
+{
+ u_int tlen;
+
+ tlen = sizeof(struct sadb_x_policy);
+
+ /* if is the policy for ipsec ? */
+ if (sp->policy != IPSEC_POLICY_IPSEC)
+ return tlen;
+
+ /* get length of ipsec requests */
+ {
+ struct ipsecrequest *isr;
+ int len;
+
+ for (isr = sp->req; isr != NULL; isr = isr->next) {
+ len = sizeof(struct sadb_x_ipsecrequest)
+ + isr->saidx.src.sa.sa_len
+ + isr->saidx.dst.sa.sa_len;
+
+ tlen += PFKEY_ALIGN8(len);
+ }
+ }
+
+ return tlen;
+}
+
+/*
+ * SADB_SPDEXPIRE processing
+ * send
+ * <base, address(SD), lifetime(CH), policy>
+ * to KMD by PF_KEY.
+ *
+ * OUT: 0 : succeed
+ * others : error number
+ */
+static int
+key_spdexpire(sp)
+ struct secpolicy *sp;
+{
+ struct mbuf *result = NULL, *m;
+ int len;
+ int error = -1;
+ struct sadb_lifetime *lt;
+
+ /* XXX: Why do we lock ? */
+
+ IPSEC_ASSERT(sp != NULL, ("null secpolicy"));
+
+ /* set msg header */
+ m = key_setsadbmsg(SADB_X_SPDEXPIRE, 0, 0, 0, 0, 0);
+ if (!m) {
+ error = ENOBUFS;
+ goto fail;
+ }
+ result = m;
+
+ /* create lifetime extension (current and hard) */
+ len = PFKEY_ALIGN8(sizeof(*lt)) * 2;
+ m = key_alloc_mbuf(len);
+ if (!m || m->m_next) { /*XXX*/
+ if (m)
+ m_freem(m);
+ error = ENOBUFS;
+ goto fail;
+ }
+ bzero(mtod(m, caddr_t), len);
+ lt = mtod(m, struct sadb_lifetime *);
+ lt->sadb_lifetime_len = PFKEY_UNIT64(sizeof(struct sadb_lifetime));
+ lt->sadb_lifetime_exttype = SADB_EXT_LIFETIME_CURRENT;
+ lt->sadb_lifetime_allocations = 0;
+ lt->sadb_lifetime_bytes = 0;
+ lt->sadb_lifetime_addtime = sp->created;
+ lt->sadb_lifetime_usetime = sp->lastused;
+ lt = (struct sadb_lifetime *)(mtod(m, caddr_t) + len / 2);
+ lt->sadb_lifetime_len = PFKEY_UNIT64(sizeof(struct sadb_lifetime));
+ lt->sadb_lifetime_exttype = SADB_EXT_LIFETIME_HARD;
+ lt->sadb_lifetime_allocations = 0;
+ lt->sadb_lifetime_bytes = 0;
+ lt->sadb_lifetime_addtime = sp->lifetime;
+ lt->sadb_lifetime_usetime = sp->validtime;
+ m_cat(result, m);
+
+ /*
+ * Note: do not send SADB_X_EXT_NAT_T_* here:
+ * we are sending traffic endpoints.
+ */
+
+ /* set sadb_address for source */
+ m = key_setsadbaddr(SADB_EXT_ADDRESS_SRC,
+ &sp->spidx.src.sa,
+ sp->spidx.prefs, sp->spidx.ul_proto);
+ if (!m) {
+ error = ENOBUFS;
+ goto fail;
+ }
+ m_cat(result, m);
+
+ /* set sadb_address for destination */
+ m = key_setsadbaddr(SADB_EXT_ADDRESS_DST,
+ &sp->spidx.dst.sa,
+ sp->spidx.prefd, sp->spidx.ul_proto);
+ if (!m) {
+ error = ENOBUFS;
+ goto fail;
+ }
+ m_cat(result, m);
+
+ /* set secpolicy */
+ m = key_sp2msg(sp);
+ if (!m) {
+ error = ENOBUFS;
+ goto fail;
+ }
+ m_cat(result, m);
+
+ if ((result->m_flags & M_PKTHDR) == 0) {
+ error = EINVAL;
+ goto fail;
+ }
+
+ if (result->m_len < sizeof(struct sadb_msg)) {
+ result = m_pullup(result, sizeof(struct sadb_msg));
+ if (result == NULL) {
+ error = ENOBUFS;
+ goto fail;
+ }
+ }
+
+ result->m_pkthdr.len = 0;
+ for (m = result; m; m = m->m_next)
+ result->m_pkthdr.len += m->m_len;
+
+ mtod(result, struct sadb_msg *)->sadb_msg_len =
+ PFKEY_UNIT64(result->m_pkthdr.len);
+
+ return key_sendup_mbuf(NULL, result, KEY_SENDUP_REGISTERED);
+
+ fail:
+ if (result)
+ m_freem(result);
+ return error;
+}
+
+/* %%% SAD management */
+/*
+ * allocating a memory for new SA head, and copy from the values of mhp.
+ * OUT: NULL : failure due to the lack of memory.
+ * others : pointer to new SA head.
+ */
+static struct secashead *
+key_newsah(saidx)
+ struct secasindex *saidx;
+{
+ struct secashead *newsah;
+
+ IPSEC_ASSERT(saidx != NULL, ("null saidx"));
+
+ newsah = malloc(sizeof(struct secashead), M_IPSEC_SAH, M_NOWAIT|M_ZERO);
+ if (newsah != NULL) {
+ int i;
+ for (i = 0; i < sizeof(newsah->savtree)/sizeof(newsah->savtree[0]); i++)
+ LIST_INIT(&newsah->savtree[i]);
+ newsah->saidx = *saidx;
+
+ /* add to saidxtree */
+ newsah->state = SADB_SASTATE_MATURE;
+
+ SAHTREE_LOCK();
+ LIST_INSERT_HEAD(&V_sahtree, newsah, chain);
+ SAHTREE_UNLOCK();
+ }
+ return(newsah);
+}
+
+/*
+ * delete SA index and all SA registerd.
+ */
+static void
+key_delsah(sah)
+ struct secashead *sah;
+{
+ struct secasvar *sav, *nextsav;
+ u_int stateidx;
+ int zombie = 0;
+
+ IPSEC_ASSERT(sah != NULL, ("NULL sah"));
+ SAHTREE_LOCK_ASSERT();
+
+ /* searching all SA registerd in the secindex. */
+ for (stateidx = 0;
+ stateidx < _ARRAYLEN(saorder_state_any);
+ stateidx++) {
+ u_int state = saorder_state_any[stateidx];
+ LIST_FOREACH_SAFE(sav, &sah->savtree[state], chain, nextsav) {
+ if (sav->refcnt == 0) {
+ /* sanity check */
+ KEY_CHKSASTATE(state, sav->state, __func__);
+ /*
+ * do NOT call KEY_FREESAV here:
+ * it will only delete the sav if refcnt == 1,
+ * where we already know that refcnt == 0
+ */
+ key_delsav(sav);
+ } else {
+ /* give up to delete this sa */
+ zombie++;
+ }
+ }
+ }
+ if (!zombie) { /* delete only if there are savs */
+ /* remove from tree of SA index */
+ if (__LIST_CHAINED(sah))
+ LIST_REMOVE(sah, chain);
+ if (sah->route_cache.sa_route.ro_rt) {
+ RTFREE(sah->route_cache.sa_route.ro_rt);
+ sah->route_cache.sa_route.ro_rt = (struct rtentry *)NULL;
+ }
+ free(sah, M_IPSEC_SAH);
+ }
+}
+
+/*
+ * allocating a new SA with LARVAL state. key_add() and key_getspi() call,
+ * and copy the values of mhp into new buffer.
+ * When SAD message type is GETSPI:
+ * to set sequence number from acq_seq++,
+ * to set zero to SPI.
+ * not to call key_setsava().
+ * OUT: NULL : fail
+ * others : pointer to new secasvar.
+ *
+ * does not modify mbuf. does not free mbuf on error.
+ */
+static struct secasvar *
+key_newsav(m, mhp, sah, errp, where, tag)
+ struct mbuf *m;
+ const struct sadb_msghdr *mhp;
+ struct secashead *sah;
+ int *errp;
+ const char* where;
+ int tag;
+{
+ struct secasvar *newsav;
+ const struct sadb_sa *xsa;
+
+ IPSEC_ASSERT(m != NULL, ("null mbuf"));
+ IPSEC_ASSERT(mhp != NULL, ("null msghdr"));
+ IPSEC_ASSERT(mhp->msg != NULL, ("null msg"));
+ IPSEC_ASSERT(sah != NULL, ("null secashead"));
+
+ newsav = malloc(sizeof(struct secasvar), M_IPSEC_SA, M_NOWAIT|M_ZERO);
+ if (newsav == NULL) {
+ ipseclog((LOG_DEBUG, "%s: No more memory.\n", __func__));
+ *errp = ENOBUFS;
+ goto done;
+ }
+
+ switch (mhp->msg->sadb_msg_type) {
+ case SADB_GETSPI:
+ newsav->spi = 0;
+
+#ifdef IPSEC_DOSEQCHECK
+ /* sync sequence number */
+ if (mhp->msg->sadb_msg_seq == 0)
+ newsav->seq =
+ (V_acq_seq = (V_acq_seq == ~0 ? 1 : ++V_acq_seq));
+ else
+#endif
+ newsav->seq = mhp->msg->sadb_msg_seq;
+ break;
+
+ case SADB_ADD:
+ /* sanity check */
+ if (mhp->ext[SADB_EXT_SA] == NULL) {
+ free(newsav, M_IPSEC_SA);
+ newsav = NULL;
+ ipseclog((LOG_DEBUG, "%s: invalid message is passed.\n",
+ __func__));
+ *errp = EINVAL;
+ goto done;
+ }
+ xsa = (const struct sadb_sa *)mhp->ext[SADB_EXT_SA];
+ newsav->spi = xsa->sadb_sa_spi;
+ newsav->seq = mhp->msg->sadb_msg_seq;
+ break;
+ default:
+ free(newsav, M_IPSEC_SA);
+ newsav = NULL;
+ *errp = EINVAL;
+ goto done;
+ }
+
+
+ /* copy sav values */
+ if (mhp->msg->sadb_msg_type != SADB_GETSPI) {
+ *errp = key_setsaval(newsav, m, mhp);
+ if (*errp) {
+ free(newsav, M_IPSEC_SA);
+ newsav = NULL;
+ goto done;
+ }
+ }
+
+ SECASVAR_LOCK_INIT(newsav);
+
+ /* reset created */
+ newsav->created = time_second;
+ newsav->pid = mhp->msg->sadb_msg_pid;
+
+ /* add to satree */
+ newsav->sah = sah;
+ sa_initref(newsav);
+ newsav->state = SADB_SASTATE_LARVAL;
+
+ /* XXX locking??? */
+ LIST_INSERT_TAIL(&sah->savtree[SADB_SASTATE_LARVAL], newsav,
+ secasvar, chain);
+done:
+ KEYDEBUG(KEYDEBUG_IPSEC_STAMP,
+ printf("DP %s from %s:%u return SP:%p\n", __func__,
+ where, tag, newsav));
+
+ return newsav;
+}
+
+/*
+ * free() SA variable entry.
+ */
+static void
+key_cleansav(struct secasvar *sav)
+{
+ /*
+ * Cleanup xform state. Note that zeroize'ing causes the
+ * keys to be cleared; otherwise we must do it ourself.
+ */
+ if (sav->tdb_xform != NULL) {
+ sav->tdb_xform->xf_zeroize(sav);
+ sav->tdb_xform = NULL;
+ } else {
+ KASSERT(sav->iv == NULL, ("iv but no xform"));
+ if (sav->key_auth != NULL)
+ bzero(sav->key_auth->key_data, _KEYLEN(sav->key_auth));
+ if (sav->key_enc != NULL)
+ bzero(sav->key_enc->key_data, _KEYLEN(sav->key_enc));
+ }
+ if (sav->key_auth != NULL) {
+ if (sav->key_auth->key_data != NULL)
+ free(sav->key_auth->key_data, M_IPSEC_MISC);
+ free(sav->key_auth, M_IPSEC_MISC);
+ sav->key_auth = NULL;
+ }
+ if (sav->key_enc != NULL) {
+ if (sav->key_enc->key_data != NULL)
+ free(sav->key_enc->key_data, M_IPSEC_MISC);
+ free(sav->key_enc, M_IPSEC_MISC);
+ sav->key_enc = NULL;
+ }
+ if (sav->sched) {
+ bzero(sav->sched, sav->schedlen);
+ free(sav->sched, M_IPSEC_MISC);
+ sav->sched = NULL;
+ }
+ if (sav->replay != NULL) {
+ free(sav->replay, M_IPSEC_MISC);
+ sav->replay = NULL;
+ }
+ if (sav->lft_c != NULL) {
+ free(sav->lft_c, M_IPSEC_MISC);
+ sav->lft_c = NULL;
+ }
+ if (sav->lft_h != NULL) {
+ free(sav->lft_h, M_IPSEC_MISC);
+ sav->lft_h = NULL;
+ }
+ if (sav->lft_s != NULL) {
+ free(sav->lft_s, M_IPSEC_MISC);
+ sav->lft_s = NULL;
+ }
+}
+
+/*
+ * free() SA variable entry.
+ */
+static void
+key_delsav(sav)
+ struct secasvar *sav;
+{
+ IPSEC_ASSERT(sav != NULL, ("null sav"));
+ IPSEC_ASSERT(sav->refcnt == 0, ("reference count %u > 0", sav->refcnt));
+
+ /* remove from SA header */
+ if (__LIST_CHAINED(sav))
+ LIST_REMOVE(sav, chain);
+ key_cleansav(sav);
+ SECASVAR_LOCK_DESTROY(sav);
+ free(sav, M_IPSEC_SA);
+}
+
+/*
+ * search SAD.
+ * OUT:
+ * NULL : not found
+ * others : found, pointer to a SA.
+ */
+static struct secashead *
+key_getsah(saidx)
+ struct secasindex *saidx;
+{
+ struct secashead *sah;
+
+ SAHTREE_LOCK();
+ LIST_FOREACH(sah, &V_sahtree, chain) {
+ if (sah->state == SADB_SASTATE_DEAD)
+ continue;
+ if (key_cmpsaidx(&sah->saidx, saidx, CMP_REQID))
+ break;
+ }
+ SAHTREE_UNLOCK();
+
+ return sah;
+}
+
+/*
+ * check not to be duplicated SPI.
+ * NOTE: this function is too slow due to searching all SAD.
+ * OUT:
+ * NULL : not found
+ * others : found, pointer to a SA.
+ */
+static struct secasvar *
+key_checkspidup(saidx, spi)
+ struct secasindex *saidx;
+ u_int32_t spi;
+{
+ struct secashead *sah;
+ struct secasvar *sav;
+
+ /* check address family */
+ if (saidx->src.sa.sa_family != saidx->dst.sa.sa_family) {
+ ipseclog((LOG_DEBUG, "%s: address family mismatched.\n",
+ __func__));
+ return NULL;
+ }
+
+ sav = NULL;
+ /* check all SAD */
+ SAHTREE_LOCK();
+ LIST_FOREACH(sah, &V_sahtree, chain) {
+ if (!key_ismyaddr((struct sockaddr *)&sah->saidx.dst))
+ continue;
+ sav = key_getsavbyspi(sah, spi);
+ if (sav != NULL)
+ break;
+ }
+ SAHTREE_UNLOCK();
+
+ return sav;
+}
+
+/*
+ * search SAD litmited alive SA, protocol, SPI.
+ * OUT:
+ * NULL : not found
+ * others : found, pointer to a SA.
+ */
+static struct secasvar *
+key_getsavbyspi(sah, spi)
+ struct secashead *sah;
+ u_int32_t spi;
+{
+ struct secasvar *sav;
+ u_int stateidx, state;
+
+ sav = NULL;
+ SAHTREE_LOCK_ASSERT();
+ /* search all status */
+ for (stateidx = 0;
+ stateidx < _ARRAYLEN(saorder_state_alive);
+ stateidx++) {
+
+ state = saorder_state_alive[stateidx];
+ LIST_FOREACH(sav, &sah->savtree[state], chain) {
+
+ /* sanity check */
+ if (sav->state != state) {
+ ipseclog((LOG_DEBUG, "%s: "
+ "invalid sav->state (queue: %d SA: %d)\n",
+ __func__, state, sav->state));
+ continue;
+ }
+
+ if (sav->spi == spi)
+ return sav;
+ }
+ }
+
+ return NULL;
+}
+
+/*
+ * copy SA values from PF_KEY message except *SPI, SEQ, PID, STATE and TYPE*.
+ * You must update these if need.
+ * OUT: 0: success.
+ * !0: failure.
+ *
+ * does not modify mbuf. does not free mbuf on error.
+ */
+static int
+key_setsaval(sav, m, mhp)
+ struct secasvar *sav;
+ struct mbuf *m;
+ const struct sadb_msghdr *mhp;
+{
+ int error = 0;
+
+ IPSEC_ASSERT(m != NULL, ("null mbuf"));
+ IPSEC_ASSERT(mhp != NULL, ("null msghdr"));
+ IPSEC_ASSERT(mhp->msg != NULL, ("null msg"));
+
+ /* initialization */
+ sav->replay = NULL;
+ sav->key_auth = NULL;
+ sav->key_enc = NULL;
+ sav->sched = NULL;
+ sav->schedlen = 0;
+ sav->iv = NULL;
+ sav->lft_c = NULL;
+ sav->lft_h = NULL;
+ sav->lft_s = NULL;
+ sav->tdb_xform = NULL; /* transform */
+ sav->tdb_encalgxform = NULL; /* encoding algorithm */
+ sav->tdb_authalgxform = NULL; /* authentication algorithm */
+ sav->tdb_compalgxform = NULL; /* compression algorithm */
+ /* Initialize even if NAT-T not compiled in: */
+ sav->natt_type = 0;
+ sav->natt_esp_frag_len = 0;
+
+ /* SA */
+ if (mhp->ext[SADB_EXT_SA] != NULL) {
+ const struct sadb_sa *sa0;
+
+ sa0 = (const struct sadb_sa *)mhp->ext[SADB_EXT_SA];
+ if (mhp->extlen[SADB_EXT_SA] < sizeof(*sa0)) {
+ error = EINVAL;
+ goto fail;
+ }
+
+ sav->alg_auth = sa0->sadb_sa_auth;
+ sav->alg_enc = sa0->sadb_sa_encrypt;
+ sav->flags = sa0->sadb_sa_flags;
+
+ /* replay window */
+ if ((sa0->sadb_sa_flags & SADB_X_EXT_OLD) == 0) {
+ sav->replay = (struct secreplay *)
+ malloc(sizeof(struct secreplay)+sa0->sadb_sa_replay, M_IPSEC_MISC, M_NOWAIT|M_ZERO);
+ if (sav->replay == NULL) {
+ ipseclog((LOG_DEBUG, "%s: No more memory.\n",
+ __func__));
+ error = ENOBUFS;
+ goto fail;
+ }
+ if (sa0->sadb_sa_replay != 0)
+ sav->replay->bitmap = (caddr_t)(sav->replay+1);
+ sav->replay->wsize = sa0->sadb_sa_replay;
+ }
+ }
+
+ /* Authentication keys */
+ if (mhp->ext[SADB_EXT_KEY_AUTH] != NULL) {
+ const struct sadb_key *key0;
+ int len;
+
+ key0 = (const struct sadb_key *)mhp->ext[SADB_EXT_KEY_AUTH];
+ len = mhp->extlen[SADB_EXT_KEY_AUTH];
+
+ error = 0;
+ if (len < sizeof(*key0)) {
+ error = EINVAL;
+ goto fail;
+ }
+ switch (mhp->msg->sadb_msg_satype) {
+ case SADB_SATYPE_AH:
+ case SADB_SATYPE_ESP:
+ case SADB_X_SATYPE_TCPSIGNATURE:
+ if (len == PFKEY_ALIGN8(sizeof(struct sadb_key)) &&
+ sav->alg_auth != SADB_X_AALG_NULL)
+ error = EINVAL;
+ break;
+ case SADB_X_SATYPE_IPCOMP:
+ default:
+ error = EINVAL;
+ break;
+ }
+ if (error) {
+ ipseclog((LOG_DEBUG, "%s: invalid key_auth values.\n",
+ __func__));
+ goto fail;
+ }
+
+ sav->key_auth = (struct seckey *)key_dup_keymsg(key0, len,
+ M_IPSEC_MISC);
+ if (sav->key_auth == NULL ) {
+ ipseclog((LOG_DEBUG, "%s: No more memory.\n",
+ __func__));
+ error = ENOBUFS;
+ goto fail;
+ }
+ }
+
+ /* Encryption key */
+ if (mhp->ext[SADB_EXT_KEY_ENCRYPT] != NULL) {
+ const struct sadb_key *key0;
+ int len;
+
+ key0 = (const struct sadb_key *)mhp->ext[SADB_EXT_KEY_ENCRYPT];
+ len = mhp->extlen[SADB_EXT_KEY_ENCRYPT];
+
+ error = 0;
+ if (len < sizeof(*key0)) {
+ error = EINVAL;
+ goto fail;
+ }
+ switch (mhp->msg->sadb_msg_satype) {
+ case SADB_SATYPE_ESP:
+ if (len == PFKEY_ALIGN8(sizeof(struct sadb_key)) &&
+ sav->alg_enc != SADB_EALG_NULL) {
+ error = EINVAL;
+ break;
+ }
+ sav->key_enc = (struct seckey *)key_dup_keymsg(key0,
+ len,
+ M_IPSEC_MISC);
+ if (sav->key_enc == NULL) {
+ ipseclog((LOG_DEBUG, "%s: No more memory.\n",
+ __func__));
+ error = ENOBUFS;
+ goto fail;
+ }
+ break;
+ case SADB_X_SATYPE_IPCOMP:
+ if (len != PFKEY_ALIGN8(sizeof(struct sadb_key)))
+ error = EINVAL;
+ sav->key_enc = NULL; /*just in case*/
+ break;
+ case SADB_SATYPE_AH:
+ case SADB_X_SATYPE_TCPSIGNATURE:
+ default:
+ error = EINVAL;
+ break;
+ }
+ if (error) {
+ ipseclog((LOG_DEBUG, "%s: invalid key_enc value.\n",
+ __func__));
+ goto fail;
+ }
+ }
+
+ /* set iv */
+ sav->ivlen = 0;
+
+ switch (mhp->msg->sadb_msg_satype) {
+ case SADB_SATYPE_AH:
+ error = xform_init(sav, XF_AH);
+ break;
+ case SADB_SATYPE_ESP:
+ error = xform_init(sav, XF_ESP);
+ break;
+ case SADB_X_SATYPE_IPCOMP:
+ error = xform_init(sav, XF_IPCOMP);
+ break;
+ case SADB_X_SATYPE_TCPSIGNATURE:
+ error = xform_init(sav, XF_TCPSIGNATURE);
+ break;
+ }
+ if (error) {
+ ipseclog((LOG_DEBUG, "%s: unable to initialize SA type %u.\n",
+ __func__, mhp->msg->sadb_msg_satype));
+ goto fail;
+ }
+
+ /* reset created */
+ sav->created = time_second;
+
+ /* make lifetime for CURRENT */
+ sav->lft_c = malloc(sizeof(struct seclifetime), M_IPSEC_MISC, M_NOWAIT);
+ if (sav->lft_c == NULL) {
+ ipseclog((LOG_DEBUG, "%s: No more memory.\n", __func__));
+ error = ENOBUFS;
+ goto fail;
+ }
+
+ sav->lft_c->allocations = 0;
+ sav->lft_c->bytes = 0;
+ sav->lft_c->addtime = time_second;
+ sav->lft_c->usetime = 0;
+
+ /* lifetimes for HARD and SOFT */
+ {
+ const struct sadb_lifetime *lft0;
+
+ lft0 = (struct sadb_lifetime *)mhp->ext[SADB_EXT_LIFETIME_HARD];
+ if (lft0 != NULL) {
+ if (mhp->extlen[SADB_EXT_LIFETIME_HARD] < sizeof(*lft0)) {
+ error = EINVAL;
+ goto fail;
+ }
+ sav->lft_h = key_dup_lifemsg(lft0, M_IPSEC_MISC);
+ if (sav->lft_h == NULL) {
+ ipseclog((LOG_DEBUG, "%s: No more memory.\n",__func__));
+ error = ENOBUFS;
+ goto fail;
+ }
+ /* to be initialize ? */
+ }
+
+ lft0 = (struct sadb_lifetime *)mhp->ext[SADB_EXT_LIFETIME_SOFT];
+ if (lft0 != NULL) {
+ if (mhp->extlen[SADB_EXT_LIFETIME_SOFT] < sizeof(*lft0)) {
+ error = EINVAL;
+ goto fail;
+ }
+ sav->lft_s = key_dup_lifemsg(lft0, M_IPSEC_MISC);
+ if (sav->lft_s == NULL) {
+ ipseclog((LOG_DEBUG, "%s: No more memory.\n",__func__));
+ error = ENOBUFS;
+ goto fail;
+ }
+ /* to be initialize ? */
+ }
+ }
+
+ return 0;
+
+ fail:
+ /* initialization */
+ key_cleansav(sav);
+
+ return error;
+}
+
+/*
+ * validation with a secasvar entry, and set SADB_SATYPE_MATURE.
+ * OUT: 0: valid
+ * other: errno
+ */
+static int
+key_mature(struct secasvar *sav)
+{
+ int error;
+
+ /* check SPI value */
+ switch (sav->sah->saidx.proto) {
+ case IPPROTO_ESP:
+ case IPPROTO_AH:
+ /*
+ * RFC 4302, 2.4. Security Parameters Index (SPI), SPI values
+ * 1-255 reserved by IANA for future use,
+ * 0 for implementation specific, local use.
+ */
+ if (ntohl(sav->spi) <= 255) {
+ ipseclog((LOG_DEBUG, "%s: illegal range of SPI %u.\n",
+ __func__, (u_int32_t)ntohl(sav->spi)));
+ return EINVAL;
+ }
+ break;
+ }
+
+ /* check satype */
+ switch (sav->sah->saidx.proto) {
+ case IPPROTO_ESP:
+ /* check flags */
+ if ((sav->flags & (SADB_X_EXT_OLD|SADB_X_EXT_DERIV)) ==
+ (SADB_X_EXT_OLD|SADB_X_EXT_DERIV)) {
+ ipseclog((LOG_DEBUG, "%s: invalid flag (derived) "
+ "given to old-esp.\n", __func__));
+ return EINVAL;
+ }
+ error = xform_init(sav, XF_ESP);
+ break;
+ case IPPROTO_AH:
+ /* check flags */
+ if (sav->flags & SADB_X_EXT_DERIV) {
+ ipseclog((LOG_DEBUG, "%s: invalid flag (derived) "
+ "given to AH SA.\n", __func__));
+ return EINVAL;
+ }
+ if (sav->alg_enc != SADB_EALG_NONE) {
+ ipseclog((LOG_DEBUG, "%s: protocol and algorithm "
+ "mismated.\n", __func__));
+ return(EINVAL);
+ }
+ error = xform_init(sav, XF_AH);
+ break;
+ case IPPROTO_IPCOMP:
+ if (sav->alg_auth != SADB_AALG_NONE) {
+ ipseclog((LOG_DEBUG, "%s: protocol and algorithm "
+ "mismated.\n", __func__));
+ return(EINVAL);
+ }
+ if ((sav->flags & SADB_X_EXT_RAWCPI) == 0
+ && ntohl(sav->spi) >= 0x10000) {
+ ipseclog((LOG_DEBUG, "%s: invalid cpi for IPComp.\n",
+ __func__));
+ return(EINVAL);
+ }
+ error = xform_init(sav, XF_IPCOMP);
+ break;
+ case IPPROTO_TCP:
+ if (sav->alg_enc != SADB_EALG_NONE) {
+ ipseclog((LOG_DEBUG, "%s: protocol and algorithm "
+ "mismated.\n", __func__));
+ return(EINVAL);
+ }
+ error = xform_init(sav, XF_TCPSIGNATURE);
+ break;
+ default:
+ ipseclog((LOG_DEBUG, "%s: Invalid satype.\n", __func__));
+ error = EPROTONOSUPPORT;
+ break;
+ }
+ if (error == 0) {
+ SAHTREE_LOCK();
+ key_sa_chgstate(sav, SADB_SASTATE_MATURE);
+ SAHTREE_UNLOCK();
+ }
+ return (error);
+}
+
+/*
+ * subroutine for SADB_GET and SADB_DUMP.
+ */
+static struct mbuf *
+key_setdumpsa(struct secasvar *sav, u_int8_t type, u_int8_t satype,
+ u_int32_t seq, u_int32_t pid)
+{
+ struct mbuf *result = NULL, *tres = NULL, *m;
+ int i;
+ int dumporder[] = {
+ SADB_EXT_SA, SADB_X_EXT_SA2,
+ SADB_EXT_LIFETIME_HARD, SADB_EXT_LIFETIME_SOFT,
+ SADB_EXT_LIFETIME_CURRENT, SADB_EXT_ADDRESS_SRC,
+ SADB_EXT_ADDRESS_DST, SADB_EXT_ADDRESS_PROXY, SADB_EXT_KEY_AUTH,
+ SADB_EXT_KEY_ENCRYPT, SADB_EXT_IDENTITY_SRC,
+ SADB_EXT_IDENTITY_DST, SADB_EXT_SENSITIVITY,
+#ifdef IPSEC_NAT_T
+ SADB_X_EXT_NAT_T_TYPE,
+ SADB_X_EXT_NAT_T_SPORT, SADB_X_EXT_NAT_T_DPORT,
+ SADB_X_EXT_NAT_T_OAI, SADB_X_EXT_NAT_T_OAR,
+ SADB_X_EXT_NAT_T_FRAG,
+#endif
+ };
+
+ m = key_setsadbmsg(type, 0, satype, seq, pid, sav->refcnt);
+ if (m == NULL)
+ goto fail;
+ result = m;
+
+ for (i = sizeof(dumporder)/sizeof(dumporder[0]) - 1; i >= 0; i--) {
+ m = NULL;
+ switch (dumporder[i]) {
+ case SADB_EXT_SA:
+ m = key_setsadbsa(sav);
+ if (!m)
+ goto fail;
+ break;
+
+ case SADB_X_EXT_SA2:
+ m = key_setsadbxsa2(sav->sah->saidx.mode,
+ sav->replay ? sav->replay->count : 0,
+ sav->sah->saidx.reqid);
+ if (!m)
+ goto fail;
+ break;
+
+ case SADB_EXT_ADDRESS_SRC:
+ m = key_setsadbaddr(SADB_EXT_ADDRESS_SRC,
+ &sav->sah->saidx.src.sa,
+ FULLMASK, IPSEC_ULPROTO_ANY);
+ if (!m)
+ goto fail;
+ break;
+
+ case SADB_EXT_ADDRESS_DST:
+ m = key_setsadbaddr(SADB_EXT_ADDRESS_DST,
+ &sav->sah->saidx.dst.sa,
+ FULLMASK, IPSEC_ULPROTO_ANY);
+ if (!m)
+ goto fail;
+ break;
+
+ case SADB_EXT_KEY_AUTH:
+ if (!sav->key_auth)
+ continue;
+ m = key_setkey(sav->key_auth, SADB_EXT_KEY_AUTH);
+ if (!m)
+ goto fail;
+ break;
+
+ case SADB_EXT_KEY_ENCRYPT:
+ if (!sav->key_enc)
+ continue;
+ m = key_setkey(sav->key_enc, SADB_EXT_KEY_ENCRYPT);
+ if (!m)
+ goto fail;
+ break;
+
+ case SADB_EXT_LIFETIME_CURRENT:
+ if (!sav->lft_c)
+ continue;
+ m = key_setlifetime(sav->lft_c,
+ SADB_EXT_LIFETIME_CURRENT);
+ if (!m)
+ goto fail;
+ break;
+
+ case SADB_EXT_LIFETIME_HARD:
+ if (!sav->lft_h)
+ continue;
+ m = key_setlifetime(sav->lft_h,
+ SADB_EXT_LIFETIME_HARD);
+ if (!m)
+ goto fail;
+ break;
+
+ case SADB_EXT_LIFETIME_SOFT:
+ if (!sav->lft_s)
+ continue;
+ m = key_setlifetime(sav->lft_s,
+ SADB_EXT_LIFETIME_SOFT);
+
+ if (!m)
+ goto fail;
+ break;
+
+#ifdef IPSEC_NAT_T
+ case SADB_X_EXT_NAT_T_TYPE:
+ m = key_setsadbxtype(sav->natt_type);
+ if (!m)
+ goto fail;
+ break;
+
+ case SADB_X_EXT_NAT_T_DPORT:
+ m = key_setsadbxport(
+ KEY_PORTFROMSADDR(&sav->sah->saidx.dst),
+ SADB_X_EXT_NAT_T_DPORT);
+ if (!m)
+ goto fail;
+ break;
+
+ case SADB_X_EXT_NAT_T_SPORT:
+ m = key_setsadbxport(
+ KEY_PORTFROMSADDR(&sav->sah->saidx.src),
+ SADB_X_EXT_NAT_T_SPORT);
+ if (!m)
+ goto fail;
+ break;
+
+ case SADB_X_EXT_NAT_T_OAI:
+ case SADB_X_EXT_NAT_T_OAR:
+ case SADB_X_EXT_NAT_T_FRAG:
+ /* We do not (yet) support those. */
+ continue;
+#endif
+
+ case SADB_EXT_ADDRESS_PROXY:
+ case SADB_EXT_IDENTITY_SRC:
+ case SADB_EXT_IDENTITY_DST:
+ /* XXX: should we brought from SPD ? */
+ case SADB_EXT_SENSITIVITY:
+ default:
+ continue;
+ }
+
+ if (!m)
+ goto fail;
+ if (tres)
+ m_cat(m, tres);
+ tres = m;
+
+ }
+
+ m_cat(result, tres);
+ if (result->m_len < sizeof(struct sadb_msg)) {
+ result = m_pullup(result, sizeof(struct sadb_msg));
+ if (result == NULL)
+ goto fail;
+ }
+
+ result->m_pkthdr.len = 0;
+ for (m = result; m; m = m->m_next)
+ result->m_pkthdr.len += m->m_len;
+
+ mtod(result, struct sadb_msg *)->sadb_msg_len =
+ PFKEY_UNIT64(result->m_pkthdr.len);
+
+ return result;
+
+fail:
+ m_freem(result);
+ m_freem(tres);
+ return NULL;
+}
+
+/*
+ * set data into sadb_msg.
+ */
+static struct mbuf *
+key_setsadbmsg(u_int8_t type, u_int16_t tlen, u_int8_t satype, u_int32_t seq,
+ pid_t pid, u_int16_t reserved)
+{
+ struct mbuf *m;
+ struct sadb_msg *p;
+ int len;
+
+ len = PFKEY_ALIGN8(sizeof(struct sadb_msg));
+ if (len > MCLBYTES)
+ return NULL;
+ MGETHDR(m, M_DONTWAIT, MT_DATA);
+ if (m && len > MHLEN) {
+ MCLGET(m, M_DONTWAIT);
+ if ((m->m_flags & M_EXT) == 0) {
+ m_freem(m);
+ m = NULL;
+ }
+ }
+ if (!m)
+ return NULL;
+ m->m_pkthdr.len = m->m_len = len;
+ m->m_next = NULL;
+
+ p = mtod(m, struct sadb_msg *);
+
+ bzero(p, len);
+ p->sadb_msg_version = PF_KEY_V2;
+ p->sadb_msg_type = type;
+ p->sadb_msg_errno = 0;
+ p->sadb_msg_satype = satype;
+ p->sadb_msg_len = PFKEY_UNIT64(tlen);
+ p->sadb_msg_reserved = reserved;
+ p->sadb_msg_seq = seq;
+ p->sadb_msg_pid = (u_int32_t)pid;
+
+ return m;
+}
+
+/*
+ * copy secasvar data into sadb_address.
+ */
+static struct mbuf *
+key_setsadbsa(sav)
+ struct secasvar *sav;
+{
+ struct mbuf *m;
+ struct sadb_sa *p;
+ int len;
+
+ len = PFKEY_ALIGN8(sizeof(struct sadb_sa));
+ m = key_alloc_mbuf(len);
+ if (!m || m->m_next) { /*XXX*/
+ if (m)
+ m_freem(m);
+ return NULL;
+ }
+
+ p = mtod(m, struct sadb_sa *);
+
+ bzero(p, len);
+ p->sadb_sa_len = PFKEY_UNIT64(len);
+ p->sadb_sa_exttype = SADB_EXT_SA;
+ p->sadb_sa_spi = sav->spi;
+ p->sadb_sa_replay = (sav->replay != NULL ? sav->replay->wsize : 0);
+ p->sadb_sa_state = sav->state;
+ p->sadb_sa_auth = sav->alg_auth;
+ p->sadb_sa_encrypt = sav->alg_enc;
+ p->sadb_sa_flags = sav->flags;
+
+ return m;
+}
+
+/*
+ * set data into sadb_address.
+ */
+static struct mbuf *
+key_setsadbaddr(u_int16_t exttype, const struct sockaddr *saddr, u_int8_t prefixlen, u_int16_t ul_proto)
+{
+ struct mbuf *m;
+ struct sadb_address *p;
+ size_t len;
+
+ len = PFKEY_ALIGN8(sizeof(struct sadb_address)) +
+ PFKEY_ALIGN8(saddr->sa_len);
+ m = key_alloc_mbuf(len);
+ if (!m || m->m_next) { /*XXX*/
+ if (m)
+ m_freem(m);
+ return NULL;
+ }
+
+ p = mtod(m, struct sadb_address *);
+
+ bzero(p, len);
+ p->sadb_address_len = PFKEY_UNIT64(len);
+ p->sadb_address_exttype = exttype;
+ p->sadb_address_proto = ul_proto;
+ if (prefixlen == FULLMASK) {
+ switch (saddr->sa_family) {
+ case AF_INET:
+ prefixlen = sizeof(struct in_addr) << 3;
+ break;
+ case AF_INET6:
+ prefixlen = sizeof(struct in6_addr) << 3;
+ break;
+ default:
+ ; /*XXX*/
+ }
+ }
+ p->sadb_address_prefixlen = prefixlen;
+ p->sadb_address_reserved = 0;
+
+ bcopy(saddr,
+ mtod(m, caddr_t) + PFKEY_ALIGN8(sizeof(struct sadb_address)),
+ saddr->sa_len);
+
+ return m;
+}
+
+/*
+ * set data into sadb_x_sa2.
+ */
+static struct mbuf *
+key_setsadbxsa2(u_int8_t mode, u_int32_t seq, u_int32_t reqid)
+{
+ struct mbuf *m;
+ struct sadb_x_sa2 *p;
+ size_t len;
+
+ len = PFKEY_ALIGN8(sizeof(struct sadb_x_sa2));
+ m = key_alloc_mbuf(len);
+ if (!m || m->m_next) { /*XXX*/
+ if (m)
+ m_freem(m);
+ return NULL;
+ }
+
+ p = mtod(m, struct sadb_x_sa2 *);
+
+ bzero(p, len);
+ p->sadb_x_sa2_len = PFKEY_UNIT64(len);
+ p->sadb_x_sa2_exttype = SADB_X_EXT_SA2;
+ p->sadb_x_sa2_mode = mode;
+ p->sadb_x_sa2_reserved1 = 0;
+ p->sadb_x_sa2_reserved2 = 0;
+ p->sadb_x_sa2_sequence = seq;
+ p->sadb_x_sa2_reqid = reqid;
+
+ return m;
+}
+
+#ifdef IPSEC_NAT_T
+/*
+ * Set a type in sadb_x_nat_t_type.
+ */
+static struct mbuf *
+key_setsadbxtype(u_int16_t type)
+{
+ struct mbuf *m;
+ size_t len;
+ struct sadb_x_nat_t_type *p;
+
+ len = PFKEY_ALIGN8(sizeof(struct sadb_x_nat_t_type));
+
+ m = key_alloc_mbuf(len);
+ if (!m || m->m_next) { /*XXX*/
+ if (m)
+ m_freem(m);
+ return (NULL);
+ }
+
+ p = mtod(m, struct sadb_x_nat_t_type *);
+
+ bzero(p, len);
+ p->sadb_x_nat_t_type_len = PFKEY_UNIT64(len);
+ p->sadb_x_nat_t_type_exttype = SADB_X_EXT_NAT_T_TYPE;
+ p->sadb_x_nat_t_type_type = type;
+
+ return (m);
+}
+/*
+ * Set a port in sadb_x_nat_t_port.
+ * In contrast to default RFC 2367 behaviour, port is in network byte order.
+ */
+static struct mbuf *
+key_setsadbxport(u_int16_t port, u_int16_t type)
+{
+ struct mbuf *m;
+ size_t len;
+ struct sadb_x_nat_t_port *p;
+
+ len = PFKEY_ALIGN8(sizeof(struct sadb_x_nat_t_port));
+
+ m = key_alloc_mbuf(len);
+ if (!m || m->m_next) { /*XXX*/
+ if (m)
+ m_freem(m);
+ return (NULL);
+ }
+
+ p = mtod(m, struct sadb_x_nat_t_port *);
+
+ bzero(p, len);
+ p->sadb_x_nat_t_port_len = PFKEY_UNIT64(len);
+ p->sadb_x_nat_t_port_exttype = type;
+ p->sadb_x_nat_t_port_port = port;
+
+ return (m);
+}
+
+/*
+ * Get port from sockaddr. Port is in network byte order.
+ */
+u_int16_t
+key_portfromsaddr(struct sockaddr *sa)
+{
+
+ switch (sa->sa_family) {
+#ifdef INET
+ case AF_INET:
+ return ((struct sockaddr_in *)sa)->sin_port;
+#endif
+#ifdef INET6
+ case AF_INET6:
+ return ((struct sockaddr_in6 *)sa)->sin6_port;
+#endif
+ }
+ KEYDEBUG(KEYDEBUG_IPSEC_STAMP,
+ printf("DP %s unexpected address family %d\n",
+ __func__, sa->sa_family));
+ return (0);
+}
+#endif /* IPSEC_NAT_T */
+
+/*
+ * Set port in struct sockaddr. Port is in network byte order.
+ */
+static void
+key_porttosaddr(struct sockaddr *sa, u_int16_t port)
+{
+
+ switch (sa->sa_family) {
+#ifdef INET
+ case AF_INET:
+ ((struct sockaddr_in *)sa)->sin_port = port;
+ break;
+#endif
+#ifdef INET6
+ case AF_INET6:
+ ((struct sockaddr_in6 *)sa)->sin6_port = port;
+ break;
+#endif
+ default:
+ ipseclog((LOG_DEBUG, "%s: unexpected address family %d.\n",
+ __func__, sa->sa_family));
+ break;
+ }
+}
+
+/*
+ * set data into sadb_x_policy
+ */
+static struct mbuf *
+key_setsadbxpolicy(u_int16_t type, u_int8_t dir, u_int32_t id)
+{
+ struct mbuf *m;
+ struct sadb_x_policy *p;
+ size_t len;
+
+ len = PFKEY_ALIGN8(sizeof(struct sadb_x_policy));
+ m = key_alloc_mbuf(len);
+ if (!m || m->m_next) { /*XXX*/
+ if (m)
+ m_freem(m);
+ return NULL;
+ }
+
+ p = mtod(m, struct sadb_x_policy *);
+
+ bzero(p, len);
+ p->sadb_x_policy_len = PFKEY_UNIT64(len);
+ p->sadb_x_policy_exttype = SADB_X_EXT_POLICY;
+ p->sadb_x_policy_type = type;
+ p->sadb_x_policy_dir = dir;
+ p->sadb_x_policy_id = id;
+
+ return m;
+}
+
+/* %%% utilities */
+/* Take a key message (sadb_key) from the socket and turn it into one
+ * of the kernel's key structures (seckey).
+ *
+ * IN: pointer to the src
+ * OUT: NULL no more memory
+ */
+struct seckey *
+key_dup_keymsg(const struct sadb_key *src, u_int len,
+ struct malloc_type *type)
+{
+ struct seckey *dst;
+ dst = (struct seckey *)malloc(sizeof(struct seckey), type, M_NOWAIT);
+ if (dst != NULL) {
+ dst->bits = src->sadb_key_bits;
+ dst->key_data = (char *)malloc(len, type, M_NOWAIT);
+ if (dst->key_data != NULL) {
+ bcopy((const char *)src + sizeof(struct sadb_key),
+ dst->key_data, len);
+ } else {
+ ipseclog((LOG_DEBUG, "%s: No more memory.\n",
+ __func__));
+ free(dst, type);
+ dst = NULL;
+ }
+ } else {
+ ipseclog((LOG_DEBUG, "%s: No more memory.\n",
+ __func__));
+
+ }
+ return dst;
+}
+
+/* Take a lifetime message (sadb_lifetime) passed in on a socket and
+ * turn it into one of the kernel's lifetime structures (seclifetime).
+ *
+ * IN: pointer to the destination, source and malloc type
+ * OUT: NULL, no more memory
+ */
+
+static struct seclifetime *
+key_dup_lifemsg(const struct sadb_lifetime *src,
+ struct malloc_type *type)
+{
+ struct seclifetime *dst = NULL;
+
+ dst = (struct seclifetime *)malloc(sizeof(struct seclifetime),
+ type, M_NOWAIT);
+ if (dst == NULL) {
+ /* XXX counter */
+ ipseclog((LOG_DEBUG, "%s: No more memory.\n", __func__));
+ } else {
+ dst->allocations = src->sadb_lifetime_allocations;
+ dst->bytes = src->sadb_lifetime_bytes;
+ dst->addtime = src->sadb_lifetime_addtime;
+ dst->usetime = src->sadb_lifetime_usetime;
+ }
+ return dst;
+}
+
+/* compare my own address
+ * OUT: 1: true, i.e. my address.
+ * 0: false
+ */
+int
+key_ismyaddr(sa)
+ struct sockaddr *sa;
+{
+#ifdef INET
+ struct sockaddr_in *sin;
+ struct in_ifaddr *ia;
+#endif
+
+ IPSEC_ASSERT(sa != NULL, ("null sockaddr"));
+
+ switch (sa->sa_family) {
+#ifdef INET
+ case AF_INET:
+ sin = (struct sockaddr_in *)sa;
+ IN_IFADDR_RLOCK();
+ for (ia = V_in_ifaddrhead.tqh_first; ia;
+ ia = ia->ia_link.tqe_next)
+ {
+ if (sin->sin_family == ia->ia_addr.sin_family &&
+ sin->sin_len == ia->ia_addr.sin_len &&
+ sin->sin_addr.s_addr == ia->ia_addr.sin_addr.s_addr)
+ {
+ IN_IFADDR_RUNLOCK();
+ return 1;
+ }
+ }
+ IN_IFADDR_RUNLOCK();
+ break;
+#endif
+#ifdef INET6
+ case AF_INET6:
+ return key_ismyaddr6((struct sockaddr_in6 *)sa);
+#endif
+ }
+
+ return 0;
+}
+
+#ifdef INET6
+/*
+ * compare my own address for IPv6.
+ * 1: ours
+ * 0: other
+ * NOTE: derived ip6_input() in KAME. This is necessary to modify more.
+ */
+#include <rtems/freebsd/netinet6/in6_var.h>
+
+static int
+key_ismyaddr6(sin6)
+ struct sockaddr_in6 *sin6;
+{
+ struct in6_ifaddr *ia;
+#if 0
+ struct in6_multi *in6m;
+#endif
+
+ IN6_IFADDR_RLOCK();
+ TAILQ_FOREACH(ia, &V_in6_ifaddrhead, ia_link) {
+ if (key_sockaddrcmp((struct sockaddr *)&sin6,
+ (struct sockaddr *)&ia->ia_addr, 0) == 0) {
+ IN6_IFADDR_RUNLOCK();
+ return 1;
+ }
+
+#if 0
+ /*
+ * XXX Multicast
+ * XXX why do we care about multlicast here while we don't care
+ * about IPv4 multicast??
+ * XXX scope
+ */
+ in6m = NULL;
+ IN6_LOOKUP_MULTI(sin6->sin6_addr, ia->ia_ifp, in6m);
+ if (in6m) {
+ IN6_IFADDR_RUNLOCK();
+ return 1;
+ }
+#endif
+ }
+ IN6_IFADDR_RUNLOCK();
+
+ /* loopback, just for safety */
+ if (IN6_IS_ADDR_LOOPBACK(&sin6->sin6_addr))
+ return 1;
+
+ return 0;
+}
+#endif /*INET6*/
+
+/*
+ * compare two secasindex structure.
+ * flag can specify to compare 2 saidxes.
+ * compare two secasindex structure without both mode and reqid.
+ * don't compare port.
+ * IN:
+ * saidx0: source, it can be in SAD.
+ * saidx1: object.
+ * OUT:
+ * 1 : equal
+ * 0 : not equal
+ */
+static int
+key_cmpsaidx(
+ const struct secasindex *saidx0,
+ const struct secasindex *saidx1,
+ int flag)
+{
+ int chkport = 0;
+
+ /* sanity */
+ if (saidx0 == NULL && saidx1 == NULL)
+ return 1;
+
+ if (saidx0 == NULL || saidx1 == NULL)
+ return 0;
+
+ if (saidx0->proto != saidx1->proto)
+ return 0;
+
+ if (flag == CMP_EXACTLY) {
+ if (saidx0->mode != saidx1->mode)
+ return 0;
+ if (saidx0->reqid != saidx1->reqid)
+ return 0;
+ if (bcmp(&saidx0->src, &saidx1->src, saidx0->src.sa.sa_len) != 0 ||
+ bcmp(&saidx0->dst, &saidx1->dst, saidx0->dst.sa.sa_len) != 0)
+ return 0;
+ } else {
+
+ /* CMP_MODE_REQID, CMP_REQID, CMP_HEAD */
+ if (flag == CMP_MODE_REQID
+ ||flag == CMP_REQID) {
+ /*
+ * If reqid of SPD is non-zero, unique SA is required.
+ * The result must be of same reqid in this case.
+ */
+ if (saidx1->reqid != 0 && saidx0->reqid != saidx1->reqid)
+ return 0;
+ }
+
+ if (flag == CMP_MODE_REQID) {
+ if (saidx0->mode != IPSEC_MODE_ANY
+ && saidx0->mode != saidx1->mode)
+ return 0;
+ }
+
+#ifdef IPSEC_NAT_T
+ /*
+ * If NAT-T is enabled, check ports for tunnel mode.
+ * Do not check ports if they are set to zero in the SPD.
+ * Also do not do it for transport mode, as there is no
+ * port information available in the SP.
+ */
+ if (saidx1->mode == IPSEC_MODE_TUNNEL &&
+ saidx1->src.sa.sa_family == AF_INET &&
+ saidx1->dst.sa.sa_family == AF_INET &&
+ ((const struct sockaddr_in *)(&saidx1->src))->sin_port &&
+ ((const struct sockaddr_in *)(&saidx1->dst))->sin_port)
+ chkport = 1;
+#endif /* IPSEC_NAT_T */
+
+ if (key_sockaddrcmp(&saidx0->src.sa, &saidx1->src.sa, chkport) != 0) {
+ return 0;
+ }
+ if (key_sockaddrcmp(&saidx0->dst.sa, &saidx1->dst.sa, chkport) != 0) {
+ return 0;
+ }
+ }
+
+ return 1;
+}
+
+/*
+ * compare two secindex structure exactly.
+ * IN:
+ * spidx0: source, it is often in SPD.
+ * spidx1: object, it is often from PFKEY message.
+ * OUT:
+ * 1 : equal
+ * 0 : not equal
+ */
+static int
+key_cmpspidx_exactly(
+ struct secpolicyindex *spidx0,
+ struct secpolicyindex *spidx1)
+{
+ /* sanity */
+ if (spidx0 == NULL && spidx1 == NULL)
+ return 1;
+
+ if (spidx0 == NULL || spidx1 == NULL)
+ return 0;
+
+ if (spidx0->prefs != spidx1->prefs
+ || spidx0->prefd != spidx1->prefd
+ || spidx0->ul_proto != spidx1->ul_proto)
+ return 0;
+
+ return key_sockaddrcmp(&spidx0->src.sa, &spidx1->src.sa, 1) == 0 &&
+ key_sockaddrcmp(&spidx0->dst.sa, &spidx1->dst.sa, 1) == 0;
+}
+
+/*
+ * compare two secindex structure with mask.
+ * IN:
+ * spidx0: source, it is often in SPD.
+ * spidx1: object, it is often from IP header.
+ * OUT:
+ * 1 : equal
+ * 0 : not equal
+ */
+static int
+key_cmpspidx_withmask(
+ struct secpolicyindex *spidx0,
+ struct secpolicyindex *spidx1)
+{
+ /* sanity */
+ if (spidx0 == NULL && spidx1 == NULL)
+ return 1;
+
+ if (spidx0 == NULL || spidx1 == NULL)
+ return 0;
+
+ if (spidx0->src.sa.sa_family != spidx1->src.sa.sa_family ||
+ spidx0->dst.sa.sa_family != spidx1->dst.sa.sa_family ||
+ spidx0->src.sa.sa_len != spidx1->src.sa.sa_len ||
+ spidx0->dst.sa.sa_len != spidx1->dst.sa.sa_len)
+ return 0;
+
+ /* if spidx.ul_proto == IPSEC_ULPROTO_ANY, ignore. */
+ if (spidx0->ul_proto != (u_int16_t)IPSEC_ULPROTO_ANY
+ && spidx0->ul_proto != spidx1->ul_proto)
+ return 0;
+
+ switch (spidx0->src.sa.sa_family) {
+ case AF_INET:
+ if (spidx0->src.sin.sin_port != IPSEC_PORT_ANY
+ && spidx0->src.sin.sin_port != spidx1->src.sin.sin_port)
+ return 0;
+ if (!key_bbcmp(&spidx0->src.sin.sin_addr,
+ &spidx1->src.sin.sin_addr, spidx0->prefs))
+ return 0;
+ break;
+ case AF_INET6:
+ if (spidx0->src.sin6.sin6_port != IPSEC_PORT_ANY
+ && spidx0->src.sin6.sin6_port != spidx1->src.sin6.sin6_port)
+ return 0;
+ /*
+ * scope_id check. if sin6_scope_id is 0, we regard it
+ * as a wildcard scope, which matches any scope zone ID.
+ */
+ if (spidx0->src.sin6.sin6_scope_id &&
+ spidx1->src.sin6.sin6_scope_id &&
+ spidx0->src.sin6.sin6_scope_id != spidx1->src.sin6.sin6_scope_id)
+ return 0;
+ if (!key_bbcmp(&spidx0->src.sin6.sin6_addr,
+ &spidx1->src.sin6.sin6_addr, spidx0->prefs))
+ return 0;
+ break;
+ default:
+ /* XXX */
+ if (bcmp(&spidx0->src, &spidx1->src, spidx0->src.sa.sa_len) != 0)
+ return 0;
+ break;
+ }
+
+ switch (spidx0->dst.sa.sa_family) {
+ case AF_INET:
+ if (spidx0->dst.sin.sin_port != IPSEC_PORT_ANY
+ && spidx0->dst.sin.sin_port != spidx1->dst.sin.sin_port)
+ return 0;
+ if (!key_bbcmp(&spidx0->dst.sin.sin_addr,
+ &spidx1->dst.sin.sin_addr, spidx0->prefd))
+ return 0;
+ break;
+ case AF_INET6:
+ if (spidx0->dst.sin6.sin6_port != IPSEC_PORT_ANY
+ && spidx0->dst.sin6.sin6_port != spidx1->dst.sin6.sin6_port)
+ return 0;
+ /*
+ * scope_id check. if sin6_scope_id is 0, we regard it
+ * as a wildcard scope, which matches any scope zone ID.
+ */
+ if (spidx0->dst.sin6.sin6_scope_id &&
+ spidx1->dst.sin6.sin6_scope_id &&
+ spidx0->dst.sin6.sin6_scope_id != spidx1->dst.sin6.sin6_scope_id)
+ return 0;
+ if (!key_bbcmp(&spidx0->dst.sin6.sin6_addr,
+ &spidx1->dst.sin6.sin6_addr, spidx0->prefd))
+ return 0;
+ break;
+ default:
+ /* XXX */
+ if (bcmp(&spidx0->dst, &spidx1->dst, spidx0->dst.sa.sa_len) != 0)
+ return 0;
+ break;
+ }
+
+ /* XXX Do we check other field ? e.g. flowinfo */
+
+ return 1;
+}
+
+/* returns 0 on match */
+static int
+key_sockaddrcmp(
+ const struct sockaddr *sa1,
+ const struct sockaddr *sa2,
+ int port)
+{
+#ifdef satosin
+#undef satosin
+#endif
+#define satosin(s) ((const struct sockaddr_in *)s)
+#ifdef satosin6
+#undef satosin6
+#endif
+#define satosin6(s) ((const struct sockaddr_in6 *)s)
+ if (sa1->sa_family != sa2->sa_family || sa1->sa_len != sa2->sa_len)
+ return 1;
+
+ switch (sa1->sa_family) {
+ case AF_INET:
+ if (sa1->sa_len != sizeof(struct sockaddr_in))
+ return 1;
+ if (satosin(sa1)->sin_addr.s_addr !=
+ satosin(sa2)->sin_addr.s_addr) {
+ return 1;
+ }
+ if (port && satosin(sa1)->sin_port != satosin(sa2)->sin_port)
+ return 1;
+ break;
+ case AF_INET6:
+ if (sa1->sa_len != sizeof(struct sockaddr_in6))
+ return 1; /*EINVAL*/
+ if (satosin6(sa1)->sin6_scope_id !=
+ satosin6(sa2)->sin6_scope_id) {
+ return 1;
+ }
+ if (!IN6_ARE_ADDR_EQUAL(&satosin6(sa1)->sin6_addr,
+ &satosin6(sa2)->sin6_addr)) {
+ return 1;
+ }
+ if (port &&
+ satosin6(sa1)->sin6_port != satosin6(sa2)->sin6_port) {
+ return 1;
+ }
+ break;
+ default:
+ if (bcmp(sa1, sa2, sa1->sa_len) != 0)
+ return 1;
+ break;
+ }
+
+ return 0;
+#undef satosin
+#undef satosin6
+}
+
+/*
+ * compare two buffers with mask.
+ * IN:
+ * addr1: source
+ * addr2: object
+ * bits: Number of bits to compare
+ * OUT:
+ * 1 : equal
+ * 0 : not equal
+ */
+static int
+key_bbcmp(const void *a1, const void *a2, u_int bits)
+{
+ const unsigned char *p1 = a1;
+ const unsigned char *p2 = a2;
+
+ /* XXX: This could be considerably faster if we compare a word
+ * at a time, but it is complicated on LSB Endian machines */
+
+ /* Handle null pointers */
+ if (p1 == NULL || p2 == NULL)
+ return (p1 == p2);
+
+ while (bits >= 8) {
+ if (*p1++ != *p2++)
+ return 0;
+ bits -= 8;
+ }
+
+ if (bits > 0) {
+ u_int8_t mask = ~((1<<(8-bits))-1);
+ if ((*p1 & mask) != (*p2 & mask))
+ return 0;
+ }
+ return 1; /* Match! */
+}
+
+static void
+key_flush_spd(time_t now)
+{
+ static u_int16_t sptree_scangen = 0;
+ u_int16_t gen = sptree_scangen++;
+ struct secpolicy *sp;
+ u_int dir;
+
+ /* SPD */
+ for (dir = 0; dir < IPSEC_DIR_MAX; dir++) {
+restart:
+ SPTREE_LOCK();
+ LIST_FOREACH(sp, &V_sptree[dir], chain) {
+ if (sp->scangen == gen) /* previously handled */
+ continue;
+ sp->scangen = gen;
+ if (sp->state == IPSEC_SPSTATE_DEAD &&
+ sp->refcnt == 1) {
+ /*
+ * Ensure that we only decrease refcnt once,
+ * when we're the last consumer.
+ * Directly call SP_DELREF/key_delsp instead
+ * of KEY_FREESP to avoid unlocking/relocking
+ * SPTREE_LOCK before key_delsp: may refcnt
+ * be increased again during that time ?
+ * NB: also clean entries created by
+ * key_spdflush
+ */
+ SP_DELREF(sp);
+ key_delsp(sp);
+ SPTREE_UNLOCK();
+ goto restart;
+ }
+ if (sp->lifetime == 0 && sp->validtime == 0)
+ continue;
+ if ((sp->lifetime && now - sp->created > sp->lifetime)
+ || (sp->validtime && now - sp->lastused > sp->validtime)) {
+ sp->state = IPSEC_SPSTATE_DEAD;
+ SPTREE_UNLOCK();
+ key_spdexpire(sp);
+ goto restart;
+ }
+ }
+ SPTREE_UNLOCK();
+ }
+}
+
+static void
+key_flush_sad(time_t now)
+{
+ struct secashead *sah, *nextsah;
+ struct secasvar *sav, *nextsav;
+
+ /* SAD */
+ SAHTREE_LOCK();
+ LIST_FOREACH_SAFE(sah, &V_sahtree, chain, nextsah) {
+ /* if sah has been dead, then delete it and process next sah. */
+ if (sah->state == SADB_SASTATE_DEAD) {
+ key_delsah(sah);
+ continue;
+ }
+
+ /* if LARVAL entry doesn't become MATURE, delete it. */
+ LIST_FOREACH_SAFE(sav, &sah->savtree[SADB_SASTATE_LARVAL], chain, nextsav) {
+ /* Need to also check refcnt for a larval SA ??? */
+ if (now - sav->created > V_key_larval_lifetime)
+ KEY_FREESAV(&sav);
+ }
+
+ /*
+ * check MATURE entry to start to send expire message
+ * whether or not.
+ */
+ LIST_FOREACH_SAFE(sav, &sah->savtree[SADB_SASTATE_MATURE], chain, nextsav) {
+ /* we don't need to check. */
+ if (sav->lft_s == NULL)
+ continue;
+
+ /* sanity check */
+ if (sav->lft_c == NULL) {
+ ipseclog((LOG_DEBUG,"%s: there is no CURRENT "
+ "time, why?\n", __func__));
+ continue;
+ }
+
+ /* check SOFT lifetime */
+ if (sav->lft_s->addtime != 0 &&
+ now - sav->created > sav->lft_s->addtime) {
+ key_sa_chgstate(sav, SADB_SASTATE_DYING);
+ /*
+ * Actually, only send expire message if
+ * SA has been used, as it was done before,
+ * but should we always send such message,
+ * and let IKE daemon decide if it should be
+ * renegotiated or not ?
+ * XXX expire message will actually NOT be
+ * sent if SA is only used after soft
+ * lifetime has been reached, see below
+ * (DYING state)
+ */
+ if (sav->lft_c->usetime != 0)
+ key_expire(sav);
+ }
+ /* check SOFT lifetime by bytes */
+ /*
+ * XXX I don't know the way to delete this SA
+ * when new SA is installed. Caution when it's
+ * installed too big lifetime by time.
+ */
+ else if (sav->lft_s->bytes != 0 &&
+ sav->lft_s->bytes < sav->lft_c->bytes) {
+
+ key_sa_chgstate(sav, SADB_SASTATE_DYING);
+ /*
+ * XXX If we keep to send expire
+ * message in the status of
+ * DYING. Do remove below code.
+ */
+ key_expire(sav);
+ }
+ }
+
+ /* check DYING entry to change status to DEAD. */
+ LIST_FOREACH_SAFE(sav, &sah->savtree[SADB_SASTATE_DYING], chain, nextsav) {
+ /* we don't need to check. */
+ if (sav->lft_h == NULL)
+ continue;
+
+ /* sanity check */
+ if (sav->lft_c == NULL) {
+ ipseclog((LOG_DEBUG, "%s: there is no CURRENT "
+ "time, why?\n", __func__));
+ continue;
+ }
+
+ if (sav->lft_h->addtime != 0 &&
+ now - sav->created > sav->lft_h->addtime) {
+ key_sa_chgstate(sav, SADB_SASTATE_DEAD);
+ KEY_FREESAV(&sav);
+ }
+#if 0 /* XXX Should we keep to send expire message until HARD lifetime ? */
+ else if (sav->lft_s != NULL
+ && sav->lft_s->addtime != 0
+ && now - sav->created > sav->lft_s->addtime) {
+ /*
+ * XXX: should be checked to be
+ * installed the valid SA.
+ */
+
+ /*
+ * If there is no SA then sending
+ * expire message.
+ */
+ key_expire(sav);
+ }
+#endif
+ /* check HARD lifetime by bytes */
+ else if (sav->lft_h->bytes != 0 &&
+ sav->lft_h->bytes < sav->lft_c->bytes) {
+ key_sa_chgstate(sav, SADB_SASTATE_DEAD);
+ KEY_FREESAV(&sav);
+ }
+ }
+
+ /* delete entry in DEAD */
+ LIST_FOREACH_SAFE(sav, &sah->savtree[SADB_SASTATE_DEAD], chain, nextsav) {
+ /* sanity check */
+ if (sav->state != SADB_SASTATE_DEAD) {
+ ipseclog((LOG_DEBUG, "%s: invalid sav->state "
+ "(queue: %d SA: %d): kill it anyway\n",
+ __func__,
+ SADB_SASTATE_DEAD, sav->state));
+ }
+ /*
+ * do not call key_freesav() here.
+ * sav should already be freed, and sav->refcnt
+ * shows other references to sav
+ * (such as from SPD).
+ */
+ }
+ }
+ SAHTREE_UNLOCK();
+}
+
+static void
+key_flush_acq(time_t now)
+{
+ struct secacq *acq, *nextacq;
+
+ /* ACQ tree */
+ ACQ_LOCK();
+ for (acq = LIST_FIRST(&V_acqtree); acq != NULL; acq = nextacq) {
+ nextacq = LIST_NEXT(acq, chain);
+ if (now - acq->created > V_key_blockacq_lifetime
+ && __LIST_CHAINED(acq)) {
+ LIST_REMOVE(acq, chain);
+ free(acq, M_IPSEC_SAQ);
+ }
+ }
+ ACQ_UNLOCK();
+}
+
+static void
+key_flush_spacq(time_t now)
+{
+ struct secspacq *acq, *nextacq;
+
+ /* SP ACQ tree */
+ SPACQ_LOCK();
+ for (acq = LIST_FIRST(&V_spacqtree); acq != NULL; acq = nextacq) {
+ nextacq = LIST_NEXT(acq, chain);
+ if (now - acq->created > V_key_blockacq_lifetime
+ && __LIST_CHAINED(acq)) {
+ LIST_REMOVE(acq, chain);
+ free(acq, M_IPSEC_SAQ);
+ }
+ }
+ SPACQ_UNLOCK();
+}
+
+/*
+ * time handler.
+ * scanning SPD and SAD to check status for each entries,
+ * and do to remove or to expire.
+ * XXX: year 2038 problem may remain.
+ */
+void
+key_timehandler(void)
+{
+ VNET_ITERATOR_DECL(vnet_iter);
+ time_t now = time_second;
+
+ VNET_LIST_RLOCK_NOSLEEP();
+ VNET_FOREACH(vnet_iter) {
+ CURVNET_SET(vnet_iter);
+ key_flush_spd(now);
+ key_flush_sad(now);
+ key_flush_acq(now);
+ key_flush_spacq(now);
+ CURVNET_RESTORE();
+ }
+ VNET_LIST_RUNLOCK_NOSLEEP();
+
+#ifndef IPSEC_DEBUG2
+ /* do exchange to tick time !! */
+ (void)timeout((void *)key_timehandler, (void *)0, hz);
+#endif /* IPSEC_DEBUG2 */
+}
+
+u_long
+key_random()
+{
+ u_long value;
+
+ key_randomfill(&value, sizeof(value));
+ return value;
+}
+
+void
+key_randomfill(p, l)
+ void *p;
+ size_t l;
+{
+ size_t n;
+ u_long v;
+ static int warn = 1;
+
+ n = 0;
+ n = (size_t)read_random(p, (u_int)l);
+ /* last resort */
+ while (n < l) {
+ v = random();
+ bcopy(&v, (u_int8_t *)p + n,
+ l - n < sizeof(v) ? l - n : sizeof(v));
+ n += sizeof(v);
+
+ if (warn) {
+ printf("WARNING: pseudo-random number generator "
+ "used for IPsec processing\n");
+ warn = 0;
+ }
+ }
+}
+
+/*
+ * map SADB_SATYPE_* to IPPROTO_*.
+ * if satype == SADB_SATYPE then satype is mapped to ~0.
+ * OUT:
+ * 0: invalid satype.
+ */
+static u_int16_t
+key_satype2proto(u_int8_t satype)
+{
+ switch (satype) {
+ case SADB_SATYPE_UNSPEC:
+ return IPSEC_PROTO_ANY;
+ case SADB_SATYPE_AH:
+ return IPPROTO_AH;
+ case SADB_SATYPE_ESP:
+ return IPPROTO_ESP;
+ case SADB_X_SATYPE_IPCOMP:
+ return IPPROTO_IPCOMP;
+ case SADB_X_SATYPE_TCPSIGNATURE:
+ return IPPROTO_TCP;
+ default:
+ return 0;
+ }
+ /* NOTREACHED */
+}
+
+/*
+ * map IPPROTO_* to SADB_SATYPE_*
+ * OUT:
+ * 0: invalid protocol type.
+ */
+static u_int8_t
+key_proto2satype(u_int16_t proto)
+{
+ switch (proto) {
+ case IPPROTO_AH:
+ return SADB_SATYPE_AH;
+ case IPPROTO_ESP:
+ return SADB_SATYPE_ESP;
+ case IPPROTO_IPCOMP:
+ return SADB_X_SATYPE_IPCOMP;
+ case IPPROTO_TCP:
+ return SADB_X_SATYPE_TCPSIGNATURE;
+ default:
+ return 0;
+ }
+ /* NOTREACHED */
+}
+
+/* %%% PF_KEY */
+/*
+ * SADB_GETSPI processing is to receive
+ * <base, (SA2), src address, dst address, (SPI range)>
+ * from the IKMPd, to assign a unique spi value, to hang on the INBOUND
+ * tree with the status of LARVAL, and send
+ * <base, SA(*), address(SD)>
+ * to the IKMPd.
+ *
+ * IN: mhp: pointer to the pointer to each header.
+ * OUT: NULL if fail.
+ * other if success, return pointer to the message to send.
+ */
+static int
+key_getspi(so, m, mhp)
+ struct socket *so;
+ struct mbuf *m;
+ const struct sadb_msghdr *mhp;
+{
+ struct sadb_address *src0, *dst0;
+ struct secasindex saidx;
+ struct secashead *newsah;
+ struct secasvar *newsav;
+ u_int8_t proto;
+ u_int32_t spi;
+ u_int8_t mode;
+ u_int32_t reqid;
+ int error;
+
+ IPSEC_ASSERT(so != NULL, ("null socket"));
+ IPSEC_ASSERT(m != NULL, ("null mbuf"));
+ IPSEC_ASSERT(mhp != NULL, ("null msghdr"));
+ IPSEC_ASSERT(mhp->msg != NULL, ("null msg"));
+
+ if (mhp->ext[SADB_EXT_ADDRESS_SRC] == NULL ||
+ mhp->ext[SADB_EXT_ADDRESS_DST] == NULL) {
+ ipseclog((LOG_DEBUG, "%s: invalid message is passed.\n",
+ __func__));
+ return key_senderror(so, m, EINVAL);
+ }
+ if (mhp->extlen[SADB_EXT_ADDRESS_SRC] < sizeof(struct sadb_address) ||
+ mhp->extlen[SADB_EXT_ADDRESS_DST] < sizeof(struct sadb_address)) {
+ ipseclog((LOG_DEBUG, "%s: invalid message is passed.\n",
+ __func__));
+ return key_senderror(so, m, EINVAL);
+ }
+ if (mhp->ext[SADB_X_EXT_SA2] != NULL) {
+ mode = ((struct sadb_x_sa2 *)mhp->ext[SADB_X_EXT_SA2])->sadb_x_sa2_mode;
+ reqid = ((struct sadb_x_sa2 *)mhp->ext[SADB_X_EXT_SA2])->sadb_x_sa2_reqid;
+ } else {
+ mode = IPSEC_MODE_ANY;
+ reqid = 0;
+ }
+
+ src0 = (struct sadb_address *)(mhp->ext[SADB_EXT_ADDRESS_SRC]);
+ dst0 = (struct sadb_address *)(mhp->ext[SADB_EXT_ADDRESS_DST]);
+
+ /* map satype to proto */
+ if ((proto = key_satype2proto(mhp->msg->sadb_msg_satype)) == 0) {
+ ipseclog((LOG_DEBUG, "%s: invalid satype is passed.\n",
+ __func__));
+ return key_senderror(so, m, EINVAL);
+ }
+
+ /*
+ * Make sure the port numbers are zero.
+ * In case of NAT-T we will update them later if needed.
+ */
+ switch (((struct sockaddr *)(src0 + 1))->sa_family) {
+ case AF_INET:
+ if (((struct sockaddr *)(src0 + 1))->sa_len !=
+ sizeof(struct sockaddr_in))
+ return key_senderror(so, m, EINVAL);
+ ((struct sockaddr_in *)(src0 + 1))->sin_port = 0;
+ break;
+ case AF_INET6:
+ if (((struct sockaddr *)(src0 + 1))->sa_len !=
+ sizeof(struct sockaddr_in6))
+ return key_senderror(so, m, EINVAL);
+ ((struct sockaddr_in6 *)(src0 + 1))->sin6_port = 0;
+ break;
+ default:
+ ; /*???*/
+ }
+ switch (((struct sockaddr *)(dst0 + 1))->sa_family) {
+ case AF_INET:
+ if (((struct sockaddr *)(dst0 + 1))->sa_len !=
+ sizeof(struct sockaddr_in))
+ return key_senderror(so, m, EINVAL);
+ ((struct sockaddr_in *)(dst0 + 1))->sin_port = 0;
+ break;
+ case AF_INET6:
+ if (((struct sockaddr *)(dst0 + 1))->sa_len !=
+ sizeof(struct sockaddr_in6))
+ return key_senderror(so, m, EINVAL);
+ ((struct sockaddr_in6 *)(dst0 + 1))->sin6_port = 0;
+ break;
+ default:
+ ; /*???*/
+ }
+
+ /* XXX boundary check against sa_len */
+ KEY_SETSECASIDX(proto, mode, reqid, src0 + 1, dst0 + 1, &saidx);
+
+#ifdef IPSEC_NAT_T
+ /*
+ * Handle NAT-T info if present.
+ * We made sure the port numbers are zero above, so we do
+ * not have to worry in case we do not update them.
+ */
+ if (mhp->ext[SADB_X_EXT_NAT_T_OAI] != NULL)
+ ipseclog((LOG_DEBUG, "%s: NAT-T OAi present\n", __func__));
+ if (mhp->ext[SADB_X_EXT_NAT_T_OAR] != NULL)
+ ipseclog((LOG_DEBUG, "%s: NAT-T OAr present\n", __func__));
+
+ if (mhp->ext[SADB_X_EXT_NAT_T_TYPE] != NULL &&
+ mhp->ext[SADB_X_EXT_NAT_T_SPORT] != NULL &&
+ mhp->ext[SADB_X_EXT_NAT_T_DPORT] != NULL) {
+ struct sadb_x_nat_t_type *type;
+ struct sadb_x_nat_t_port *sport, *dport;
+
+ if (mhp->extlen[SADB_X_EXT_NAT_T_TYPE] < sizeof(*type) ||
+ mhp->extlen[SADB_X_EXT_NAT_T_SPORT] < sizeof(*sport) ||
+ mhp->extlen[SADB_X_EXT_NAT_T_DPORT] < sizeof(*dport)) {
+ ipseclog((LOG_DEBUG, "%s: invalid nat-t message "
+ "passed.\n", __func__));
+ return key_senderror(so, m, EINVAL);
+ }
+
+ sport = (struct sadb_x_nat_t_port *)
+ mhp->ext[SADB_X_EXT_NAT_T_SPORT];
+ dport = (struct sadb_x_nat_t_port *)
+ mhp->ext[SADB_X_EXT_NAT_T_DPORT];
+
+ if (sport)
+ KEY_PORTTOSADDR(&saidx.src, sport->sadb_x_nat_t_port_port);
+ if (dport)
+ KEY_PORTTOSADDR(&saidx.dst, dport->sadb_x_nat_t_port_port);
+ }
+#endif
+
+ /* SPI allocation */
+ spi = key_do_getnewspi((struct sadb_spirange *)mhp->ext[SADB_EXT_SPIRANGE],
+ &saidx);
+ if (spi == 0)
+ return key_senderror(so, m, EINVAL);
+
+ /* get a SA index */
+ if ((newsah = key_getsah(&saidx)) == NULL) {
+ /* create a new SA index */
+ if ((newsah = key_newsah(&saidx)) == NULL) {
+ ipseclog((LOG_DEBUG, "%s: No more memory.\n",__func__));
+ return key_senderror(so, m, ENOBUFS);
+ }
+ }
+
+ /* get a new SA */
+ /* XXX rewrite */
+ newsav = KEY_NEWSAV(m, mhp, newsah, &error);
+ if (newsav == NULL) {
+ /* XXX don't free new SA index allocated in above. */
+ return key_senderror(so, m, error);
+ }
+
+ /* set spi */
+ newsav->spi = htonl(spi);
+
+ /* delete the entry in acqtree */
+ if (mhp->msg->sadb_msg_seq != 0) {
+ struct secacq *acq;
+ if ((acq = key_getacqbyseq(mhp->msg->sadb_msg_seq)) != NULL) {
+ /* reset counter in order to deletion by timehandler. */
+ acq->created = time_second;
+ acq->count = 0;
+ }
+ }
+
+ {
+ struct mbuf *n, *nn;
+ struct sadb_sa *m_sa;
+ struct sadb_msg *newmsg;
+ int off, len;
+
+ /* create new sadb_msg to reply. */
+ len = PFKEY_ALIGN8(sizeof(struct sadb_msg)) +
+ PFKEY_ALIGN8(sizeof(struct sadb_sa));
+
+ MGETHDR(n, M_DONTWAIT, MT_DATA);
+ if (len > MHLEN) {
+ MCLGET(n, M_DONTWAIT);
+ if ((n->m_flags & M_EXT) == 0) {
+ m_freem(n);
+ n = NULL;
+ }
+ }
+ if (!n)
+ return key_senderror(so, m, ENOBUFS);
+
+ n->m_len = len;
+ n->m_next = NULL;
+ off = 0;
+
+ m_copydata(m, 0, sizeof(struct sadb_msg), mtod(n, caddr_t) + off);
+ off += PFKEY_ALIGN8(sizeof(struct sadb_msg));
+
+ m_sa = (struct sadb_sa *)(mtod(n, caddr_t) + off);
+ m_sa->sadb_sa_len = PFKEY_UNIT64(sizeof(struct sadb_sa));
+ m_sa->sadb_sa_exttype = SADB_EXT_SA;
+ m_sa->sadb_sa_spi = htonl(spi);
+ off += PFKEY_ALIGN8(sizeof(struct sadb_sa));
+
+ IPSEC_ASSERT(off == len,
+ ("length inconsistency (off %u len %u)", off, len));
+
+ n->m_next = key_gather_mbuf(m, mhp, 0, 2, SADB_EXT_ADDRESS_SRC,
+ SADB_EXT_ADDRESS_DST);
+ if (!n->m_next) {
+ m_freem(n);
+ return key_senderror(so, m, ENOBUFS);
+ }
+
+ if (n->m_len < sizeof(struct sadb_msg)) {
+ n = m_pullup(n, sizeof(struct sadb_msg));
+ if (n == NULL)
+ return key_sendup_mbuf(so, m, KEY_SENDUP_ONE);
+ }
+
+ n->m_pkthdr.len = 0;
+ for (nn = n; nn; nn = nn->m_next)
+ n->m_pkthdr.len += nn->m_len;
+
+ newmsg = mtod(n, struct sadb_msg *);
+ newmsg->sadb_msg_seq = newsav->seq;
+ newmsg->sadb_msg_errno = 0;
+ newmsg->sadb_msg_len = PFKEY_UNIT64(n->m_pkthdr.len);
+
+ m_freem(m);
+ return key_sendup_mbuf(so, n, KEY_SENDUP_ONE);
+ }
+}
+
+/*
+ * allocating new SPI
+ * called by key_getspi().
+ * OUT:
+ * 0: failure.
+ * others: success.
+ */
+static u_int32_t
+key_do_getnewspi(spirange, saidx)
+ struct sadb_spirange *spirange;
+ struct secasindex *saidx;
+{
+ u_int32_t newspi;
+ u_int32_t min, max;
+ int count = V_key_spi_trycnt;
+
+ /* set spi range to allocate */
+ if (spirange != NULL) {
+ min = spirange->sadb_spirange_min;
+ max = spirange->sadb_spirange_max;
+ } else {
+ min = V_key_spi_minval;
+ max = V_key_spi_maxval;
+ }
+ /* IPCOMP needs 2-byte SPI */
+ if (saidx->proto == IPPROTO_IPCOMP) {
+ u_int32_t t;
+ if (min >= 0x10000)
+ min = 0xffff;
+ if (max >= 0x10000)
+ max = 0xffff;
+ if (min > max) {
+ t = min; min = max; max = t;
+ }
+ }
+
+ if (min == max) {
+ if (key_checkspidup(saidx, min) != NULL) {
+ ipseclog((LOG_DEBUG, "%s: SPI %u exists already.\n",
+ __func__, min));
+ return 0;
+ }
+
+ count--; /* taking one cost. */
+ newspi = min;
+
+ } else {
+
+ /* init SPI */
+ newspi = 0;
+
+ /* when requesting to allocate spi ranged */
+ while (count--) {
+ /* generate pseudo-random SPI value ranged. */
+ newspi = min + (key_random() % (max - min + 1));
+
+ if (key_checkspidup(saidx, newspi) == NULL)
+ break;
+ }
+
+ if (count == 0 || newspi == 0) {
+ ipseclog((LOG_DEBUG, "%s: to allocate spi is failed.\n",
+ __func__));
+ return 0;
+ }
+ }
+
+ /* statistics */
+ keystat.getspi_count =
+ (keystat.getspi_count + V_key_spi_trycnt - count) / 2;
+
+ return newspi;
+}
+
+/*
+ * SADB_UPDATE processing
+ * receive
+ * <base, SA, (SA2), (lifetime(HSC),) address(SD), (address(P),)
+ * key(AE), (identity(SD),) (sensitivity)>
+ * from the ikmpd, and update a secasvar entry whose status is SADB_SASTATE_LARVAL.
+ * and send
+ * <base, SA, (SA2), (lifetime(HSC),) address(SD), (address(P),)
+ * (identity(SD),) (sensitivity)>
+ * to the ikmpd.
+ *
+ * m will always be freed.
+ */
+static int
+key_update(so, m, mhp)
+ struct socket *so;
+ struct mbuf *m;
+ const struct sadb_msghdr *mhp;
+{
+ struct sadb_sa *sa0;
+ struct sadb_address *src0, *dst0;
+#ifdef IPSEC_NAT_T
+ struct sadb_x_nat_t_type *type;
+ struct sadb_x_nat_t_port *sport, *dport;
+ struct sadb_address *iaddr, *raddr;
+ struct sadb_x_nat_t_frag *frag;
+#endif
+ struct secasindex saidx;
+ struct secashead *sah;
+ struct secasvar *sav;
+ u_int16_t proto;
+ u_int8_t mode;
+ u_int32_t reqid;
+ int error;
+
+ IPSEC_ASSERT(so != NULL, ("null socket"));
+ IPSEC_ASSERT(m != NULL, ("null mbuf"));
+ IPSEC_ASSERT(mhp != NULL, ("null msghdr"));
+ IPSEC_ASSERT(mhp->msg != NULL, ("null msg"));
+
+ /* map satype to proto */
+ if ((proto = key_satype2proto(mhp->msg->sadb_msg_satype)) == 0) {
+ ipseclog((LOG_DEBUG, "%s: invalid satype is passed.\n",
+ __func__));
+ return key_senderror(so, m, EINVAL);
+ }
+
+ if (mhp->ext[SADB_EXT_SA] == NULL ||
+ mhp->ext[SADB_EXT_ADDRESS_SRC] == NULL ||
+ mhp->ext[SADB_EXT_ADDRESS_DST] == NULL ||
+ (mhp->msg->sadb_msg_satype == SADB_SATYPE_ESP &&
+ mhp->ext[SADB_EXT_KEY_ENCRYPT] == NULL) ||
+ (mhp->msg->sadb_msg_satype == SADB_SATYPE_AH &&
+ mhp->ext[SADB_EXT_KEY_AUTH] == NULL) ||
+ (mhp->ext[SADB_EXT_LIFETIME_HARD] != NULL &&
+ mhp->ext[SADB_EXT_LIFETIME_SOFT] == NULL) ||
+ (mhp->ext[SADB_EXT_LIFETIME_HARD] == NULL &&
+ mhp->ext[SADB_EXT_LIFETIME_SOFT] != NULL)) {
+ ipseclog((LOG_DEBUG, "%s: invalid message is passed.\n",
+ __func__));
+ return key_senderror(so, m, EINVAL);
+ }
+ if (mhp->extlen[SADB_EXT_SA] < sizeof(struct sadb_sa) ||
+ mhp->extlen[SADB_EXT_ADDRESS_SRC] < sizeof(struct sadb_address) ||
+ mhp->extlen[SADB_EXT_ADDRESS_DST] < sizeof(struct sadb_address)) {
+ ipseclog((LOG_DEBUG, "%s: invalid message is passed.\n",
+ __func__));
+ return key_senderror(so, m, EINVAL);
+ }
+ if (mhp->ext[SADB_X_EXT_SA2] != NULL) {
+ mode = ((struct sadb_x_sa2 *)mhp->ext[SADB_X_EXT_SA2])->sadb_x_sa2_mode;
+ reqid = ((struct sadb_x_sa2 *)mhp->ext[SADB_X_EXT_SA2])->sadb_x_sa2_reqid;
+ } else {
+ mode = IPSEC_MODE_ANY;
+ reqid = 0;
+ }
+ /* XXX boundary checking for other extensions */
+
+ sa0 = (struct sadb_sa *)mhp->ext[SADB_EXT_SA];
+ src0 = (struct sadb_address *)(mhp->ext[SADB_EXT_ADDRESS_SRC]);
+ dst0 = (struct sadb_address *)(mhp->ext[SADB_EXT_ADDRESS_DST]);
+
+ /* XXX boundary check against sa_len */
+ KEY_SETSECASIDX(proto, mode, reqid, src0 + 1, dst0 + 1, &saidx);
+
+ /*
+ * Make sure the port numbers are zero.
+ * In case of NAT-T we will update them later if needed.
+ */
+ KEY_PORTTOSADDR(&saidx.src, 0);
+ KEY_PORTTOSADDR(&saidx.dst, 0);
+
+#ifdef IPSEC_NAT_T
+ /*
+ * Handle NAT-T info if present.
+ */
+ if (mhp->ext[SADB_X_EXT_NAT_T_TYPE] != NULL &&
+ mhp->ext[SADB_X_EXT_NAT_T_SPORT] != NULL &&
+ mhp->ext[SADB_X_EXT_NAT_T_DPORT] != NULL) {
+
+ if (mhp->extlen[SADB_X_EXT_NAT_T_TYPE] < sizeof(*type) ||
+ mhp->extlen[SADB_X_EXT_NAT_T_SPORT] < sizeof(*sport) ||
+ mhp->extlen[SADB_X_EXT_NAT_T_DPORT] < sizeof(*dport)) {
+ ipseclog((LOG_DEBUG, "%s: invalid message.\n",
+ __func__));
+ return key_senderror(so, m, EINVAL);
+ }
+
+ type = (struct sadb_x_nat_t_type *)
+ mhp->ext[SADB_X_EXT_NAT_T_TYPE];
+ sport = (struct sadb_x_nat_t_port *)
+ mhp->ext[SADB_X_EXT_NAT_T_SPORT];
+ dport = (struct sadb_x_nat_t_port *)
+ mhp->ext[SADB_X_EXT_NAT_T_DPORT];
+ } else {
+ type = 0;
+ sport = dport = 0;
+ }
+ if (mhp->ext[SADB_X_EXT_NAT_T_OAI] != NULL &&
+ mhp->ext[SADB_X_EXT_NAT_T_OAR] != NULL) {
+ if (mhp->extlen[SADB_X_EXT_NAT_T_OAI] < sizeof(*iaddr) ||
+ mhp->extlen[SADB_X_EXT_NAT_T_OAR] < sizeof(*raddr)) {
+ ipseclog((LOG_DEBUG, "%s: invalid message\n",
+ __func__));
+ return key_senderror(so, m, EINVAL);
+ }
+ iaddr = (struct sadb_address *)mhp->ext[SADB_X_EXT_NAT_T_OAI];
+ raddr = (struct sadb_address *)mhp->ext[SADB_X_EXT_NAT_T_OAR];
+ ipseclog((LOG_DEBUG, "%s: NAT-T OAi/r present\n", __func__));
+ } else {
+ iaddr = raddr = NULL;
+ }
+ if (mhp->ext[SADB_X_EXT_NAT_T_FRAG] != NULL) {
+ if (mhp->extlen[SADB_X_EXT_NAT_T_FRAG] < sizeof(*frag)) {
+ ipseclog((LOG_DEBUG, "%s: invalid message\n",
+ __func__));
+ return key_senderror(so, m, EINVAL);
+ }
+ frag = (struct sadb_x_nat_t_frag *)
+ mhp->ext[SADB_X_EXT_NAT_T_FRAG];
+ } else {
+ frag = 0;
+ }
+#endif
+
+ /* get a SA header */
+ if ((sah = key_getsah(&saidx)) == NULL) {
+ ipseclog((LOG_DEBUG, "%s: no SA index found.\n", __func__));
+ return key_senderror(so, m, ENOENT);
+ }
+
+ /* set spidx if there */
+ /* XXX rewrite */
+ error = key_setident(sah, m, mhp);
+ if (error)
+ return key_senderror(so, m, error);
+
+ /* find a SA with sequence number. */
+#ifdef IPSEC_DOSEQCHECK
+ if (mhp->msg->sadb_msg_seq != 0
+ && (sav = key_getsavbyseq(sah, mhp->msg->sadb_msg_seq)) == NULL) {
+ ipseclog((LOG_DEBUG, "%s: no larval SA with sequence %u "
+ "exists.\n", __func__, mhp->msg->sadb_msg_seq));
+ return key_senderror(so, m, ENOENT);
+ }
+#else
+ SAHTREE_LOCK();
+ sav = key_getsavbyspi(sah, sa0->sadb_sa_spi);
+ SAHTREE_UNLOCK();
+ if (sav == NULL) {
+ ipseclog((LOG_DEBUG, "%s: no such a SA found (spi:%u)\n",
+ __func__, (u_int32_t)ntohl(sa0->sadb_sa_spi)));
+ return key_senderror(so, m, EINVAL);
+ }
+#endif
+
+ /* validity check */
+ if (sav->sah->saidx.proto != proto) {
+ ipseclog((LOG_DEBUG, "%s: protocol mismatched "
+ "(DB=%u param=%u)\n", __func__,
+ sav->sah->saidx.proto, proto));
+ return key_senderror(so, m, EINVAL);
+ }
+#ifdef IPSEC_DOSEQCHECK
+ if (sav->spi != sa0->sadb_sa_spi) {
+ ipseclog((LOG_DEBUG, "%s: SPI mismatched (DB:%u param:%u)\n",
+ __func__,
+ (u_int32_t)ntohl(sav->spi),
+ (u_int32_t)ntohl(sa0->sadb_sa_spi)));
+ return key_senderror(so, m, EINVAL);
+ }
+#endif
+ if (sav->pid != mhp->msg->sadb_msg_pid) {
+ ipseclog((LOG_DEBUG, "%s: pid mismatched (DB:%u param:%u)\n",
+ __func__, sav->pid, mhp->msg->sadb_msg_pid));
+ return key_senderror(so, m, EINVAL);
+ }
+
+ /* copy sav values */
+ error = key_setsaval(sav, m, mhp);
+ if (error) {
+ KEY_FREESAV(&sav);
+ return key_senderror(so, m, error);
+ }
+
+#ifdef IPSEC_NAT_T
+ /*
+ * Handle more NAT-T info if present,
+ * now that we have a sav to fill.
+ */
+ if (type)
+ sav->natt_type = type->sadb_x_nat_t_type_type;
+
+ if (sport)
+ KEY_PORTTOSADDR(&sav->sah->saidx.src,
+ sport->sadb_x_nat_t_port_port);
+ if (dport)
+ KEY_PORTTOSADDR(&sav->sah->saidx.dst,
+ dport->sadb_x_nat_t_port_port);
+
+#if 0
+ /*
+ * In case SADB_X_EXT_NAT_T_FRAG was not given, leave it at 0.
+ * We should actually check for a minimum MTU here, if we
+ * want to support it in ip_output.
+ */
+ if (frag)
+ sav->natt_esp_frag_len = frag->sadb_x_nat_t_frag_fraglen;
+#endif
+#endif
+
+ /* check SA values to be mature. */
+ if ((mhp->msg->sadb_msg_errno = key_mature(sav)) != 0) {
+ KEY_FREESAV(&sav);
+ return key_senderror(so, m, 0);
+ }
+
+ {
+ struct mbuf *n;
+
+ /* set msg buf from mhp */
+ n = key_getmsgbuf_x1(m, mhp);
+ if (n == NULL) {
+ ipseclog((LOG_DEBUG, "%s: No more memory.\n", __func__));
+ return key_senderror(so, m, ENOBUFS);
+ }
+
+ m_freem(m);
+ return key_sendup_mbuf(so, n, KEY_SENDUP_ALL);
+ }
+}
+
+/*
+ * search SAD with sequence for a SA which state is SADB_SASTATE_LARVAL.
+ * only called by key_update().
+ * OUT:
+ * NULL : not found
+ * others : found, pointer to a SA.
+ */
+#ifdef IPSEC_DOSEQCHECK
+static struct secasvar *
+key_getsavbyseq(sah, seq)
+ struct secashead *sah;
+ u_int32_t seq;
+{
+ struct secasvar *sav;
+ u_int state;
+
+ state = SADB_SASTATE_LARVAL;
+
+ /* search SAD with sequence number ? */
+ LIST_FOREACH(sav, &sah->savtree[state], chain) {
+
+ KEY_CHKSASTATE(state, sav->state, __func__);
+
+ if (sav->seq == seq) {
+ sa_addref(sav);
+ KEYDEBUG(KEYDEBUG_IPSEC_STAMP,
+ printf("DP %s cause refcnt++:%d SA:%p\n",
+ __func__, sav->refcnt, sav));
+ return sav;
+ }
+ }
+
+ return NULL;
+}
+#endif
+
+/*
+ * SADB_ADD processing
+ * add an entry to SA database, when received
+ * <base, SA, (SA2), (lifetime(HSC),) address(SD), (address(P),)
+ * key(AE), (identity(SD),) (sensitivity)>
+ * from the ikmpd,
+ * and send
+ * <base, SA, (SA2), (lifetime(HSC),) address(SD), (address(P),)
+ * (identity(SD),) (sensitivity)>
+ * to the ikmpd.
+ *
+ * IGNORE identity and sensitivity messages.
+ *
+ * m will always be freed.
+ */
+static int
+key_add(so, m, mhp)
+ struct socket *so;
+ struct mbuf *m;
+ const struct sadb_msghdr *mhp;
+{
+ struct sadb_sa *sa0;
+ struct sadb_address *src0, *dst0;
+#ifdef IPSEC_NAT_T
+ struct sadb_x_nat_t_type *type;
+ struct sadb_address *iaddr, *raddr;
+ struct sadb_x_nat_t_frag *frag;
+#endif
+ struct secasindex saidx;
+ struct secashead *newsah;
+ struct secasvar *newsav;
+ u_int16_t proto;
+ u_int8_t mode;
+ u_int32_t reqid;
+ int error;
+
+ IPSEC_ASSERT(so != NULL, ("null socket"));
+ IPSEC_ASSERT(m != NULL, ("null mbuf"));
+ IPSEC_ASSERT(mhp != NULL, ("null msghdr"));
+ IPSEC_ASSERT(mhp->msg != NULL, ("null msg"));
+
+ /* map satype to proto */
+ if ((proto = key_satype2proto(mhp->msg->sadb_msg_satype)) == 0) {
+ ipseclog((LOG_DEBUG, "%s: invalid satype is passed.\n",
+ __func__));
+ return key_senderror(so, m, EINVAL);
+ }
+
+ if (mhp->ext[SADB_EXT_SA] == NULL ||
+ mhp->ext[SADB_EXT_ADDRESS_SRC] == NULL ||
+ mhp->ext[SADB_EXT_ADDRESS_DST] == NULL ||
+ (mhp->msg->sadb_msg_satype == SADB_SATYPE_ESP &&
+ mhp->ext[SADB_EXT_KEY_ENCRYPT] == NULL) ||
+ (mhp->msg->sadb_msg_satype == SADB_SATYPE_AH &&
+ mhp->ext[SADB_EXT_KEY_AUTH] == NULL) ||
+ (mhp->ext[SADB_EXT_LIFETIME_HARD] != NULL &&
+ mhp->ext[SADB_EXT_LIFETIME_SOFT] == NULL) ||
+ (mhp->ext[SADB_EXT_LIFETIME_HARD] == NULL &&
+ mhp->ext[SADB_EXT_LIFETIME_SOFT] != NULL)) {
+ ipseclog((LOG_DEBUG, "%s: invalid message is passed.\n",
+ __func__));
+ return key_senderror(so, m, EINVAL);
+ }
+ if (mhp->extlen[SADB_EXT_SA] < sizeof(struct sadb_sa) ||
+ mhp->extlen[SADB_EXT_ADDRESS_SRC] < sizeof(struct sadb_address) ||
+ mhp->extlen[SADB_EXT_ADDRESS_DST] < sizeof(struct sadb_address)) {
+ /* XXX need more */
+ ipseclog((LOG_DEBUG, "%s: invalid message is passed.\n",
+ __func__));
+ return key_senderror(so, m, EINVAL);
+ }
+ if (mhp->ext[SADB_X_EXT_SA2] != NULL) {
+ mode = ((struct sadb_x_sa2 *)mhp->ext[SADB_X_EXT_SA2])->sadb_x_sa2_mode;
+ reqid = ((struct sadb_x_sa2 *)mhp->ext[SADB_X_EXT_SA2])->sadb_x_sa2_reqid;
+ } else {
+ mode = IPSEC_MODE_ANY;
+ reqid = 0;
+ }
+
+ sa0 = (struct sadb_sa *)mhp->ext[SADB_EXT_SA];
+ src0 = (struct sadb_address *)mhp->ext[SADB_EXT_ADDRESS_SRC];
+ dst0 = (struct sadb_address *)mhp->ext[SADB_EXT_ADDRESS_DST];
+
+ /* XXX boundary check against sa_len */
+ KEY_SETSECASIDX(proto, mode, reqid, src0 + 1, dst0 + 1, &saidx);
+
+ /*
+ * Make sure the port numbers are zero.
+ * In case of NAT-T we will update them later if needed.
+ */
+ KEY_PORTTOSADDR(&saidx.src, 0);
+ KEY_PORTTOSADDR(&saidx.dst, 0);
+
+#ifdef IPSEC_NAT_T
+ /*
+ * Handle NAT-T info if present.
+ */
+ if (mhp->ext[SADB_X_EXT_NAT_T_TYPE] != NULL &&
+ mhp->ext[SADB_X_EXT_NAT_T_SPORT] != NULL &&
+ mhp->ext[SADB_X_EXT_NAT_T_DPORT] != NULL) {
+ struct sadb_x_nat_t_port *sport, *dport;
+
+ if (mhp->extlen[SADB_X_EXT_NAT_T_TYPE] < sizeof(*type) ||
+ mhp->extlen[SADB_X_EXT_NAT_T_SPORT] < sizeof(*sport) ||
+ mhp->extlen[SADB_X_EXT_NAT_T_DPORT] < sizeof(*dport)) {
+ ipseclog((LOG_DEBUG, "%s: invalid message.\n",
+ __func__));
+ return key_senderror(so, m, EINVAL);
+ }
+
+ type = (struct sadb_x_nat_t_type *)
+ mhp->ext[SADB_X_EXT_NAT_T_TYPE];
+ sport = (struct sadb_x_nat_t_port *)
+ mhp->ext[SADB_X_EXT_NAT_T_SPORT];
+ dport = (struct sadb_x_nat_t_port *)
+ mhp->ext[SADB_X_EXT_NAT_T_DPORT];
+
+ if (sport)
+ KEY_PORTTOSADDR(&saidx.src,
+ sport->sadb_x_nat_t_port_port);
+ if (dport)
+ KEY_PORTTOSADDR(&saidx.dst,
+ dport->sadb_x_nat_t_port_port);
+ } else {
+ type = 0;
+ }
+ if (mhp->ext[SADB_X_EXT_NAT_T_OAI] != NULL &&
+ mhp->ext[SADB_X_EXT_NAT_T_OAR] != NULL) {
+ if (mhp->extlen[SADB_X_EXT_NAT_T_OAI] < sizeof(*iaddr) ||
+ mhp->extlen[SADB_X_EXT_NAT_T_OAR] < sizeof(*raddr)) {
+ ipseclog((LOG_DEBUG, "%s: invalid message\n",
+ __func__));
+ return key_senderror(so, m, EINVAL);
+ }
+ iaddr = (struct sadb_address *)mhp->ext[SADB_X_EXT_NAT_T_OAI];
+ raddr = (struct sadb_address *)mhp->ext[SADB_X_EXT_NAT_T_OAR];
+ ipseclog((LOG_DEBUG, "%s: NAT-T OAi/r present\n", __func__));
+ } else {
+ iaddr = raddr = NULL;
+ }
+ if (mhp->ext[SADB_X_EXT_NAT_T_FRAG] != NULL) {
+ if (mhp->extlen[SADB_X_EXT_NAT_T_FRAG] < sizeof(*frag)) {
+ ipseclog((LOG_DEBUG, "%s: invalid message\n",
+ __func__));
+ return key_senderror(so, m, EINVAL);
+ }
+ frag = (struct sadb_x_nat_t_frag *)
+ mhp->ext[SADB_X_EXT_NAT_T_FRAG];
+ } else {
+ frag = 0;
+ }
+#endif
+
+ /* get a SA header */
+ if ((newsah = key_getsah(&saidx)) == NULL) {
+ /* create a new SA header */
+ if ((newsah = key_newsah(&saidx)) == NULL) {
+ ipseclog((LOG_DEBUG, "%s: No more memory.\n",__func__));
+ return key_senderror(so, m, ENOBUFS);
+ }
+ }
+
+ /* set spidx if there */
+ /* XXX rewrite */
+ error = key_setident(newsah, m, mhp);
+ if (error) {
+ return key_senderror(so, m, error);
+ }
+
+ /* create new SA entry. */
+ /* We can create new SA only if SPI is differenct. */
+ SAHTREE_LOCK();
+ newsav = key_getsavbyspi(newsah, sa0->sadb_sa_spi);
+ SAHTREE_UNLOCK();
+ if (newsav != NULL) {
+ ipseclog((LOG_DEBUG, "%s: SA already exists.\n", __func__));
+ return key_senderror(so, m, EEXIST);
+ }
+ newsav = KEY_NEWSAV(m, mhp, newsah, &error);
+ if (newsav == NULL) {
+ return key_senderror(so, m, error);
+ }
+
+#ifdef IPSEC_NAT_T
+ /*
+ * Handle more NAT-T info if present,
+ * now that we have a sav to fill.
+ */
+ if (type)
+ newsav->natt_type = type->sadb_x_nat_t_type_type;
+
+#if 0
+ /*
+ * In case SADB_X_EXT_NAT_T_FRAG was not given, leave it at 0.
+ * We should actually check for a minimum MTU here, if we
+ * want to support it in ip_output.
+ */
+ if (frag)
+ newsav->natt_esp_frag_len = frag->sadb_x_nat_t_frag_fraglen;
+#endif
+#endif
+
+ /* check SA values to be mature. */
+ if ((error = key_mature(newsav)) != 0) {
+ KEY_FREESAV(&newsav);
+ return key_senderror(so, m, error);
+ }
+
+ /*
+ * don't call key_freesav() here, as we would like to keep the SA
+ * in the database on success.
+ */
+
+ {
+ struct mbuf *n;
+
+ /* set msg buf from mhp */
+ n = key_getmsgbuf_x1(m, mhp);
+ if (n == NULL) {
+ ipseclog((LOG_DEBUG, "%s: No more memory.\n", __func__));
+ return key_senderror(so, m, ENOBUFS);
+ }
+
+ m_freem(m);
+ return key_sendup_mbuf(so, n, KEY_SENDUP_ALL);
+ }
+}
+
+/* m is retained */
+static int
+key_setident(sah, m, mhp)
+ struct secashead *sah;
+ struct mbuf *m;
+ const struct sadb_msghdr *mhp;
+{
+ const struct sadb_ident *idsrc, *iddst;
+ int idsrclen, iddstlen;
+
+ IPSEC_ASSERT(sah != NULL, ("null secashead"));
+ IPSEC_ASSERT(m != NULL, ("null mbuf"));
+ IPSEC_ASSERT(mhp != NULL, ("null msghdr"));
+ IPSEC_ASSERT(mhp->msg != NULL, ("null msg"));
+
+ /* don't make buffer if not there */
+ if (mhp->ext[SADB_EXT_IDENTITY_SRC] == NULL &&
+ mhp->ext[SADB_EXT_IDENTITY_DST] == NULL) {
+ sah->idents = NULL;
+ sah->identd = NULL;
+ return 0;
+ }
+
+ if (mhp->ext[SADB_EXT_IDENTITY_SRC] == NULL ||
+ mhp->ext[SADB_EXT_IDENTITY_DST] == NULL) {
+ ipseclog((LOG_DEBUG, "%s: invalid identity.\n", __func__));
+ return EINVAL;
+ }
+
+ idsrc = (const struct sadb_ident *)mhp->ext[SADB_EXT_IDENTITY_SRC];
+ iddst = (const struct sadb_ident *)mhp->ext[SADB_EXT_IDENTITY_DST];
+ idsrclen = mhp->extlen[SADB_EXT_IDENTITY_SRC];
+ iddstlen = mhp->extlen[SADB_EXT_IDENTITY_DST];
+
+ /* validity check */
+ if (idsrc->sadb_ident_type != iddst->sadb_ident_type) {
+ ipseclog((LOG_DEBUG, "%s: ident type mismatch.\n", __func__));
+ return EINVAL;
+ }
+
+ switch (idsrc->sadb_ident_type) {
+ case SADB_IDENTTYPE_PREFIX:
+ case SADB_IDENTTYPE_FQDN:
+ case SADB_IDENTTYPE_USERFQDN:
+ default:
+ /* XXX do nothing */
+ sah->idents = NULL;
+ sah->identd = NULL;
+ return 0;
+ }
+
+ /* make structure */
+ sah->idents = malloc(sizeof(struct secident), M_IPSEC_MISC, M_NOWAIT);
+ if (sah->idents == NULL) {
+ ipseclog((LOG_DEBUG, "%s: No more memory.\n", __func__));
+ return ENOBUFS;
+ }
+ sah->identd = malloc(sizeof(struct secident), M_IPSEC_MISC, M_NOWAIT);
+ if (sah->identd == NULL) {
+ free(sah->idents, M_IPSEC_MISC);
+ sah->idents = NULL;
+ ipseclog((LOG_DEBUG, "%s: No more memory.\n", __func__));
+ return ENOBUFS;
+ }
+ sah->idents->type = idsrc->sadb_ident_type;
+ sah->idents->id = idsrc->sadb_ident_id;
+
+ sah->identd->type = iddst->sadb_ident_type;
+ sah->identd->id = iddst->sadb_ident_id;
+
+ return 0;
+}
+
+/*
+ * m will not be freed on return.
+ * it is caller's responsibility to free the result.
+ */
+static struct mbuf *
+key_getmsgbuf_x1(m, mhp)
+ struct mbuf *m;
+ const struct sadb_msghdr *mhp;
+{
+ struct mbuf *n;
+
+ IPSEC_ASSERT(m != NULL, ("null mbuf"));
+ IPSEC_ASSERT(mhp != NULL, ("null msghdr"));
+ IPSEC_ASSERT(mhp->msg != NULL, ("null msg"));
+
+ /* create new sadb_msg to reply. */
+ n = key_gather_mbuf(m, mhp, 1, 9, SADB_EXT_RESERVED,
+ SADB_EXT_SA, SADB_X_EXT_SA2,
+ SADB_EXT_ADDRESS_SRC, SADB_EXT_ADDRESS_DST,
+ SADB_EXT_LIFETIME_HARD, SADB_EXT_LIFETIME_SOFT,
+ SADB_EXT_IDENTITY_SRC, SADB_EXT_IDENTITY_DST);
+ if (!n)
+ return NULL;
+
+ if (n->m_len < sizeof(struct sadb_msg)) {
+ n = m_pullup(n, sizeof(struct sadb_msg));
+ if (n == NULL)
+ return NULL;
+ }
+ mtod(n, struct sadb_msg *)->sadb_msg_errno = 0;
+ mtod(n, struct sadb_msg *)->sadb_msg_len =
+ PFKEY_UNIT64(n->m_pkthdr.len);
+
+ return n;
+}
+
+static int key_delete_all __P((struct socket *, struct mbuf *,
+ const struct sadb_msghdr *, u_int16_t));
+
+/*
+ * SADB_DELETE processing
+ * receive
+ * <base, SA(*), address(SD)>
+ * from the ikmpd, and set SADB_SASTATE_DEAD,
+ * and send,
+ * <base, SA(*), address(SD)>
+ * to the ikmpd.
+ *
+ * m will always be freed.
+ */
+static int
+key_delete(so, m, mhp)
+ struct socket *so;
+ struct mbuf *m;
+ const struct sadb_msghdr *mhp;
+{
+ struct sadb_sa *sa0;
+ struct sadb_address *src0, *dst0;
+ struct secasindex saidx;
+ struct secashead *sah;
+ struct secasvar *sav = NULL;
+ u_int16_t proto;
+
+ IPSEC_ASSERT(so != NULL, ("null socket"));
+ IPSEC_ASSERT(m != NULL, ("null mbuf"));
+ IPSEC_ASSERT(mhp != NULL, ("null msghdr"));
+ IPSEC_ASSERT(mhp->msg != NULL, ("null msg"));
+
+ /* map satype to proto */
+ if ((proto = key_satype2proto(mhp->msg->sadb_msg_satype)) == 0) {
+ ipseclog((LOG_DEBUG, "%s: invalid satype is passed.\n",
+ __func__));
+ return key_senderror(so, m, EINVAL);
+ }
+
+ if (mhp->ext[SADB_EXT_ADDRESS_SRC] == NULL ||
+ mhp->ext[SADB_EXT_ADDRESS_DST] == NULL) {
+ ipseclog((LOG_DEBUG, "%s: invalid message is passed.\n",
+ __func__));
+ return key_senderror(so, m, EINVAL);
+ }
+
+ if (mhp->extlen[SADB_EXT_ADDRESS_SRC] < sizeof(struct sadb_address) ||
+ mhp->extlen[SADB_EXT_ADDRESS_DST] < sizeof(struct sadb_address)) {
+ ipseclog((LOG_DEBUG, "%s: invalid message is passed.\n",
+ __func__));
+ return key_senderror(so, m, EINVAL);
+ }
+
+ if (mhp->ext[SADB_EXT_SA] == NULL) {
+ /*
+ * Caller wants us to delete all non-LARVAL SAs
+ * that match the src/dst. This is used during
+ * IKE INITIAL-CONTACT.
+ */
+ ipseclog((LOG_DEBUG, "%s: doing delete all.\n", __func__));
+ return key_delete_all(so, m, mhp, proto);
+ } else if (mhp->extlen[SADB_EXT_SA] < sizeof(struct sadb_sa)) {
+ ipseclog((LOG_DEBUG, "%s: invalid message is passed.\n",
+ __func__));
+ return key_senderror(so, m, EINVAL);
+ }
+
+ sa0 = (struct sadb_sa *)mhp->ext[SADB_EXT_SA];
+ src0 = (struct sadb_address *)(mhp->ext[SADB_EXT_ADDRESS_SRC]);
+ dst0 = (struct sadb_address *)(mhp->ext[SADB_EXT_ADDRESS_DST]);
+
+ /* XXX boundary check against sa_len */
+ KEY_SETSECASIDX(proto, IPSEC_MODE_ANY, 0, src0 + 1, dst0 + 1, &saidx);
+
+ /*
+ * Make sure the port numbers are zero.
+ * In case of NAT-T we will update them later if needed.
+ */
+ KEY_PORTTOSADDR(&saidx.src, 0);
+ KEY_PORTTOSADDR(&saidx.dst, 0);
+
+#ifdef IPSEC_NAT_T
+ /*
+ * Handle NAT-T info if present.
+ */
+ if (mhp->ext[SADB_X_EXT_NAT_T_SPORT] != NULL &&
+ mhp->ext[SADB_X_EXT_NAT_T_DPORT] != NULL) {
+ struct sadb_x_nat_t_port *sport, *dport;
+
+ if (mhp->extlen[SADB_X_EXT_NAT_T_SPORT] < sizeof(*sport) ||
+ mhp->extlen[SADB_X_EXT_NAT_T_DPORT] < sizeof(*dport)) {
+ ipseclog((LOG_DEBUG, "%s: invalid message.\n",
+ __func__));
+ return key_senderror(so, m, EINVAL);
+ }
+
+ sport = (struct sadb_x_nat_t_port *)
+ mhp->ext[SADB_X_EXT_NAT_T_SPORT];
+ dport = (struct sadb_x_nat_t_port *)
+ mhp->ext[SADB_X_EXT_NAT_T_DPORT];
+
+ if (sport)
+ KEY_PORTTOSADDR(&saidx.src,
+ sport->sadb_x_nat_t_port_port);
+ if (dport)
+ KEY_PORTTOSADDR(&saidx.dst,
+ dport->sadb_x_nat_t_port_port);
+ }
+#endif
+
+ /* get a SA header */
+ SAHTREE_LOCK();
+ LIST_FOREACH(sah, &V_sahtree, chain) {
+ if (sah->state == SADB_SASTATE_DEAD)
+ continue;
+ if (key_cmpsaidx(&sah->saidx, &saidx, CMP_HEAD) == 0)
+ continue;
+
+ /* get a SA with SPI. */
+ sav = key_getsavbyspi(sah, sa0->sadb_sa_spi);
+ if (sav)
+ break;
+ }
+ if (sah == NULL) {
+ SAHTREE_UNLOCK();
+ ipseclog((LOG_DEBUG, "%s: no SA found.\n", __func__));
+ return key_senderror(so, m, ENOENT);
+ }
+
+ key_sa_chgstate(sav, SADB_SASTATE_DEAD);
+ SAHTREE_UNLOCK();
+ KEY_FREESAV(&sav);
+
+ {
+ struct mbuf *n;
+ struct sadb_msg *newmsg;
+
+ /* create new sadb_msg to reply. */
+ /* XXX-BZ NAT-T extensions? */
+ n = key_gather_mbuf(m, mhp, 1, 4, SADB_EXT_RESERVED,
+ SADB_EXT_SA, SADB_EXT_ADDRESS_SRC, SADB_EXT_ADDRESS_DST);
+ if (!n)
+ return key_senderror(so, m, ENOBUFS);
+
+ if (n->m_len < sizeof(struct sadb_msg)) {
+ n = m_pullup(n, sizeof(struct sadb_msg));
+ if (n == NULL)
+ return key_senderror(so, m, ENOBUFS);
+ }
+ newmsg = mtod(n, struct sadb_msg *);
+ newmsg->sadb_msg_errno = 0;
+ newmsg->sadb_msg_len = PFKEY_UNIT64(n->m_pkthdr.len);
+
+ m_freem(m);
+ return key_sendup_mbuf(so, n, KEY_SENDUP_ALL);
+ }
+}
+
+/*
+ * delete all SAs for src/dst. Called from key_delete().
+ */
+static int
+key_delete_all(struct socket *so, struct mbuf *m, const struct sadb_msghdr *mhp,
+ u_int16_t proto)
+{
+ struct sadb_address *src0, *dst0;
+ struct secasindex saidx;
+ struct secashead *sah;
+ struct secasvar *sav, *nextsav;
+ u_int stateidx, state;
+
+ src0 = (struct sadb_address *)(mhp->ext[SADB_EXT_ADDRESS_SRC]);
+ dst0 = (struct sadb_address *)(mhp->ext[SADB_EXT_ADDRESS_DST]);
+
+ /* XXX boundary check against sa_len */
+ KEY_SETSECASIDX(proto, IPSEC_MODE_ANY, 0, src0 + 1, dst0 + 1, &saidx);
+
+ /*
+ * Make sure the port numbers are zero.
+ * In case of NAT-T we will update them later if needed.
+ */
+ KEY_PORTTOSADDR(&saidx.src, 0);
+ KEY_PORTTOSADDR(&saidx.dst, 0);
+
+#ifdef IPSEC_NAT_T
+ /*
+ * Handle NAT-T info if present.
+ */
+
+ if (mhp->ext[SADB_X_EXT_NAT_T_SPORT] != NULL &&
+ mhp->ext[SADB_X_EXT_NAT_T_DPORT] != NULL) {
+ struct sadb_x_nat_t_port *sport, *dport;
+
+ if (mhp->extlen[SADB_X_EXT_NAT_T_SPORT] < sizeof(*sport) ||
+ mhp->extlen[SADB_X_EXT_NAT_T_DPORT] < sizeof(*dport)) {
+ ipseclog((LOG_DEBUG, "%s: invalid message.\n",
+ __func__));
+ return key_senderror(so, m, EINVAL);
+ }
+
+ sport = (struct sadb_x_nat_t_port *)
+ mhp->ext[SADB_X_EXT_NAT_T_SPORT];
+ dport = (struct sadb_x_nat_t_port *)
+ mhp->ext[SADB_X_EXT_NAT_T_DPORT];
+
+ if (sport)
+ KEY_PORTTOSADDR(&saidx.src,
+ sport->sadb_x_nat_t_port_port);
+ if (dport)
+ KEY_PORTTOSADDR(&saidx.dst,
+ dport->sadb_x_nat_t_port_port);
+ }
+#endif
+
+ SAHTREE_LOCK();
+ LIST_FOREACH(sah, &V_sahtree, chain) {
+ if (sah->state == SADB_SASTATE_DEAD)
+ continue;
+ if (key_cmpsaidx(&sah->saidx, &saidx, CMP_HEAD) == 0)
+ continue;
+
+ /* Delete all non-LARVAL SAs. */
+ for (stateidx = 0;
+ stateidx < _ARRAYLEN(saorder_state_alive);
+ stateidx++) {
+ state = saorder_state_alive[stateidx];
+ if (state == SADB_SASTATE_LARVAL)
+ continue;
+ for (sav = LIST_FIRST(&sah->savtree[state]);
+ sav != NULL; sav = nextsav) {
+ nextsav = LIST_NEXT(sav, chain);
+ /* sanity check */
+ if (sav->state != state) {
+ ipseclog((LOG_DEBUG, "%s: invalid "
+ "sav->state (queue %d SA %d)\n",
+ __func__, state, sav->state));
+ continue;
+ }
+
+ key_sa_chgstate(sav, SADB_SASTATE_DEAD);
+ KEY_FREESAV(&sav);
+ }
+ }
+ }
+ SAHTREE_UNLOCK();
+ {
+ struct mbuf *n;
+ struct sadb_msg *newmsg;
+
+ /* create new sadb_msg to reply. */
+ /* XXX-BZ NAT-T extensions? */
+ n = key_gather_mbuf(m, mhp, 1, 3, SADB_EXT_RESERVED,
+ SADB_EXT_ADDRESS_SRC, SADB_EXT_ADDRESS_DST);
+ if (!n)
+ return key_senderror(so, m, ENOBUFS);
+
+ if (n->m_len < sizeof(struct sadb_msg)) {
+ n = m_pullup(n, sizeof(struct sadb_msg));
+ if (n == NULL)
+ return key_senderror(so, m, ENOBUFS);
+ }
+ newmsg = mtod(n, struct sadb_msg *);
+ newmsg->sadb_msg_errno = 0;
+ newmsg->sadb_msg_len = PFKEY_UNIT64(n->m_pkthdr.len);
+
+ m_freem(m);
+ return key_sendup_mbuf(so, n, KEY_SENDUP_ALL);
+ }
+}
+
+/*
+ * SADB_GET processing
+ * receive
+ * <base, SA(*), address(SD)>
+ * from the ikmpd, and get a SP and a SA to respond,
+ * and send,
+ * <base, SA, (lifetime(HSC),) address(SD), (address(P),) key(AE),
+ * (identity(SD),) (sensitivity)>
+ * to the ikmpd.
+ *
+ * m will always be freed.
+ */
+static int
+key_get(so, m, mhp)
+ struct socket *so;
+ struct mbuf *m;
+ const struct sadb_msghdr *mhp;
+{
+ struct sadb_sa *sa0;
+ struct sadb_address *src0, *dst0;
+ struct secasindex saidx;
+ struct secashead *sah;
+ struct secasvar *sav = NULL;
+ u_int16_t proto;
+
+ IPSEC_ASSERT(so != NULL, ("null socket"));
+ IPSEC_ASSERT(m != NULL, ("null mbuf"));
+ IPSEC_ASSERT(mhp != NULL, ("null msghdr"));
+ IPSEC_ASSERT(mhp->msg != NULL, ("null msg"));
+
+ /* map satype to proto */
+ if ((proto = key_satype2proto(mhp->msg->sadb_msg_satype)) == 0) {
+ ipseclog((LOG_DEBUG, "%s: invalid satype is passed.\n",
+ __func__));
+ return key_senderror(so, m, EINVAL);
+ }
+
+ if (mhp->ext[SADB_EXT_SA] == NULL ||
+ mhp->ext[SADB_EXT_ADDRESS_SRC] == NULL ||
+ mhp->ext[SADB_EXT_ADDRESS_DST] == NULL) {
+ ipseclog((LOG_DEBUG, "%s: invalid message is passed.\n",
+ __func__));
+ return key_senderror(so, m, EINVAL);
+ }
+ if (mhp->extlen[SADB_EXT_SA] < sizeof(struct sadb_sa) ||
+ mhp->extlen[SADB_EXT_ADDRESS_SRC] < sizeof(struct sadb_address) ||
+ mhp->extlen[SADB_EXT_ADDRESS_DST] < sizeof(struct sadb_address)) {
+ ipseclog((LOG_DEBUG, "%s: invalid message is passed.\n",
+ __func__));
+ return key_senderror(so, m, EINVAL);
+ }
+
+ sa0 = (struct sadb_sa *)mhp->ext[SADB_EXT_SA];
+ src0 = (struct sadb_address *)mhp->ext[SADB_EXT_ADDRESS_SRC];
+ dst0 = (struct sadb_address *)mhp->ext[SADB_EXT_ADDRESS_DST];
+
+ /* XXX boundary check against sa_len */
+ KEY_SETSECASIDX(proto, IPSEC_MODE_ANY, 0, src0 + 1, dst0 + 1, &saidx);
+
+ /*
+ * Make sure the port numbers are zero.
+ * In case of NAT-T we will update them later if needed.
+ */
+ KEY_PORTTOSADDR(&saidx.src, 0);
+ KEY_PORTTOSADDR(&saidx.dst, 0);
+
+#ifdef IPSEC_NAT_T
+ /*
+ * Handle NAT-T info if present.
+ */
+
+ if (mhp->ext[SADB_X_EXT_NAT_T_SPORT] != NULL &&
+ mhp->ext[SADB_X_EXT_NAT_T_DPORT] != NULL) {
+ struct sadb_x_nat_t_port *sport, *dport;
+
+ if (mhp->extlen[SADB_X_EXT_NAT_T_SPORT] < sizeof(*sport) ||
+ mhp->extlen[SADB_X_EXT_NAT_T_DPORT] < sizeof(*dport)) {
+ ipseclog((LOG_DEBUG, "%s: invalid message.\n",
+ __func__));
+ return key_senderror(so, m, EINVAL);
+ }
+
+ sport = (struct sadb_x_nat_t_port *)
+ mhp->ext[SADB_X_EXT_NAT_T_SPORT];
+ dport = (struct sadb_x_nat_t_port *)
+ mhp->ext[SADB_X_EXT_NAT_T_DPORT];
+
+ if (sport)
+ KEY_PORTTOSADDR(&saidx.src,
+ sport->sadb_x_nat_t_port_port);
+ if (dport)
+ KEY_PORTTOSADDR(&saidx.dst,
+ dport->sadb_x_nat_t_port_port);
+ }
+#endif
+
+ /* get a SA header */
+ SAHTREE_LOCK();
+ LIST_FOREACH(sah, &V_sahtree, chain) {
+ if (sah->state == SADB_SASTATE_DEAD)
+ continue;
+ if (key_cmpsaidx(&sah->saidx, &saidx, CMP_HEAD) == 0)
+ continue;
+
+ /* get a SA with SPI. */
+ sav = key_getsavbyspi(sah, sa0->sadb_sa_spi);
+ if (sav)
+ break;
+ }
+ SAHTREE_UNLOCK();
+ if (sah == NULL) {
+ ipseclog((LOG_DEBUG, "%s: no SA found.\n", __func__));
+ return key_senderror(so, m, ENOENT);
+ }
+
+ {
+ struct mbuf *n;
+ u_int8_t satype;
+
+ /* map proto to satype */
+ if ((satype = key_proto2satype(sah->saidx.proto)) == 0) {
+ ipseclog((LOG_DEBUG, "%s: there was invalid proto in SAD.\n",
+ __func__));
+ return key_senderror(so, m, EINVAL);
+ }
+
+ /* create new sadb_msg to reply. */
+ n = key_setdumpsa(sav, SADB_GET, satype, mhp->msg->sadb_msg_seq,
+ mhp->msg->sadb_msg_pid);
+ if (!n)
+ return key_senderror(so, m, ENOBUFS);
+
+ m_freem(m);
+ return key_sendup_mbuf(so, n, KEY_SENDUP_ONE);
+ }
+}
+
+/* XXX make it sysctl-configurable? */
+static void
+key_getcomb_setlifetime(comb)
+ struct sadb_comb *comb;
+{
+
+ comb->sadb_comb_soft_allocations = 1;
+ comb->sadb_comb_hard_allocations = 1;
+ comb->sadb_comb_soft_bytes = 0;
+ comb->sadb_comb_hard_bytes = 0;
+ comb->sadb_comb_hard_addtime = 86400; /* 1 day */
+ comb->sadb_comb_soft_addtime = comb->sadb_comb_soft_addtime * 80 / 100;
+ comb->sadb_comb_soft_usetime = 28800; /* 8 hours */
+ comb->sadb_comb_hard_usetime = comb->sadb_comb_hard_usetime * 80 / 100;
+}
+
+/*
+ * XXX reorder combinations by preference
+ * XXX no idea if the user wants ESP authentication or not
+ */
+static struct mbuf *
+key_getcomb_esp()
+{
+ struct sadb_comb *comb;
+ struct enc_xform *algo;
+ struct mbuf *result = NULL, *m, *n;
+ int encmin;
+ int i, off, o;
+ int totlen;
+ const int l = PFKEY_ALIGN8(sizeof(struct sadb_comb));
+
+ m = NULL;
+ for (i = 1; i <= SADB_EALG_MAX; i++) {
+ algo = esp_algorithm_lookup(i);
+ if (algo == NULL)
+ continue;
+
+ /* discard algorithms with key size smaller than system min */
+ if (_BITS(algo->maxkey) < V_ipsec_esp_keymin)
+ continue;
+ if (_BITS(algo->minkey) < V_ipsec_esp_keymin)
+ encmin = V_ipsec_esp_keymin;
+ else
+ encmin = _BITS(algo->minkey);
+
+ if (V_ipsec_esp_auth)
+ m = key_getcomb_ah();
+ else {
+ IPSEC_ASSERT(l <= MLEN,
+ ("l=%u > MLEN=%lu", l, (u_long) MLEN));
+ MGET(m, M_DONTWAIT, MT_DATA);
+ if (m) {
+ M_ALIGN(m, l);
+ m->m_len = l;
+ m->m_next = NULL;
+ bzero(mtod(m, caddr_t), m->m_len);
+ }
+ }
+ if (!m)
+ goto fail;
+
+ totlen = 0;
+ for (n = m; n; n = n->m_next)
+ totlen += n->m_len;
+ IPSEC_ASSERT((totlen % l) == 0, ("totlen=%u, l=%u", totlen, l));
+
+ for (off = 0; off < totlen; off += l) {
+ n = m_pulldown(m, off, l, &o);
+ if (!n) {
+ /* m is already freed */
+ goto fail;
+ }
+ comb = (struct sadb_comb *)(mtod(n, caddr_t) + o);
+ bzero(comb, sizeof(*comb));
+ key_getcomb_setlifetime(comb);
+ comb->sadb_comb_encrypt = i;
+ comb->sadb_comb_encrypt_minbits = encmin;
+ comb->sadb_comb_encrypt_maxbits = _BITS(algo->maxkey);
+ }
+
+ if (!result)
+ result = m;
+ else
+ m_cat(result, m);
+ }
+
+ return result;
+
+ fail:
+ if (result)
+ m_freem(result);
+ return NULL;
+}
+
+static void
+key_getsizes_ah(
+ const struct auth_hash *ah,
+ int alg,
+ u_int16_t* min,
+ u_int16_t* max)
+{
+
+ *min = *max = ah->keysize;
+ if (ah->keysize == 0) {
+ /*
+ * Transform takes arbitrary key size but algorithm
+ * key size is restricted. Enforce this here.
+ */
+ switch (alg) {
+ case SADB_X_AALG_MD5: *min = *max = 16; break;
+ case SADB_X_AALG_SHA: *min = *max = 20; break;
+ case SADB_X_AALG_NULL: *min = 1; *max = 256; break;
+ default:
+ DPRINTF(("%s: unknown AH algorithm %u\n",
+ __func__, alg));
+ break;
+ }
+ }
+}
+
+/*
+ * XXX reorder combinations by preference
+ */
+static struct mbuf *
+key_getcomb_ah()
+{
+ struct sadb_comb *comb;
+ struct auth_hash *algo;
+ struct mbuf *m;
+ u_int16_t minkeysize, maxkeysize;
+ int i;
+ const int l = PFKEY_ALIGN8(sizeof(struct sadb_comb));
+
+ m = NULL;
+ for (i = 1; i <= SADB_AALG_MAX; i++) {
+#if 1
+ /* we prefer HMAC algorithms, not old algorithms */
+ if (i != SADB_AALG_SHA1HMAC && i != SADB_AALG_MD5HMAC)
+ continue;
+#endif
+ algo = ah_algorithm_lookup(i);
+ if (!algo)
+ continue;
+ key_getsizes_ah(algo, i, &minkeysize, &maxkeysize);
+ /* discard algorithms with key size smaller than system min */
+ if (_BITS(minkeysize) < V_ipsec_ah_keymin)
+ continue;
+
+ if (!m) {
+ IPSEC_ASSERT(l <= MLEN,
+ ("l=%u > MLEN=%lu", l, (u_long) MLEN));
+ MGET(m, M_DONTWAIT, MT_DATA);
+ if (m) {
+ M_ALIGN(m, l);
+ m->m_len = l;
+ m->m_next = NULL;
+ }
+ } else
+ M_PREPEND(m, l, M_DONTWAIT);
+ if (!m)
+ return NULL;
+
+ comb = mtod(m, struct sadb_comb *);
+ bzero(comb, sizeof(*comb));
+ key_getcomb_setlifetime(comb);
+ comb->sadb_comb_auth = i;
+ comb->sadb_comb_auth_minbits = _BITS(minkeysize);
+ comb->sadb_comb_auth_maxbits = _BITS(maxkeysize);
+ }
+
+ return m;
+}
+
+/*
+ * not really an official behavior. discussed in pf_key@inner.net in Sep2000.
+ * XXX reorder combinations by preference
+ */
+static struct mbuf *
+key_getcomb_ipcomp()
+{
+ struct sadb_comb *comb;
+ struct comp_algo *algo;
+ struct mbuf *m;
+ int i;
+ const int l = PFKEY_ALIGN8(sizeof(struct sadb_comb));
+
+ m = NULL;
+ for (i = 1; i <= SADB_X_CALG_MAX; i++) {
+ algo = ipcomp_algorithm_lookup(i);
+ if (!algo)
+ continue;
+
+ if (!m) {
+ IPSEC_ASSERT(l <= MLEN,
+ ("l=%u > MLEN=%lu", l, (u_long) MLEN));
+ MGET(m, M_DONTWAIT, MT_DATA);
+ if (m) {
+ M_ALIGN(m, l);
+ m->m_len = l;
+ m->m_next = NULL;
+ }
+ } else
+ M_PREPEND(m, l, M_DONTWAIT);
+ if (!m)
+ return NULL;
+
+ comb = mtod(m, struct sadb_comb *);
+ bzero(comb, sizeof(*comb));
+ key_getcomb_setlifetime(comb);
+ comb->sadb_comb_encrypt = i;
+ /* what should we set into sadb_comb_*_{min,max}bits? */
+ }
+
+ return m;
+}
+
+/*
+ * XXX no way to pass mode (transport/tunnel) to userland
+ * XXX replay checking?
+ * XXX sysctl interface to ipsec_{ah,esp}_keymin
+ */
+static struct mbuf *
+key_getprop(saidx)
+ const struct secasindex *saidx;
+{
+ struct sadb_prop *prop;
+ struct mbuf *m, *n;
+ const int l = PFKEY_ALIGN8(sizeof(struct sadb_prop));
+ int totlen;
+
+ switch (saidx->proto) {
+ case IPPROTO_ESP:
+ m = key_getcomb_esp();
+ break;
+ case IPPROTO_AH:
+ m = key_getcomb_ah();
+ break;
+ case IPPROTO_IPCOMP:
+ m = key_getcomb_ipcomp();
+ break;
+ default:
+ return NULL;
+ }
+
+ if (!m)
+ return NULL;
+ M_PREPEND(m, l, M_DONTWAIT);
+ if (!m)
+ return NULL;
+
+ totlen = 0;
+ for (n = m; n; n = n->m_next)
+ totlen += n->m_len;
+
+ prop = mtod(m, struct sadb_prop *);
+ bzero(prop, sizeof(*prop));
+ prop->sadb_prop_len = PFKEY_UNIT64(totlen);
+ prop->sadb_prop_exttype = SADB_EXT_PROPOSAL;
+ prop->sadb_prop_replay = 32; /* XXX */
+
+ return m;
+}
+
+/*
+ * SADB_ACQUIRE processing called by key_checkrequest() and key_acquire2().
+ * send
+ * <base, SA, address(SD), (address(P)), x_policy,
+ * (identity(SD),) (sensitivity,) proposal>
+ * to KMD, and expect to receive
+ * <base> with SADB_ACQUIRE if error occured,
+ * or
+ * <base, src address, dst address, (SPI range)> with SADB_GETSPI
+ * from KMD by PF_KEY.
+ *
+ * XXX x_policy is outside of RFC2367 (KAME extension).
+ * XXX sensitivity is not supported.
+ * XXX for ipcomp, RFC2367 does not define how to fill in proposal.
+ * see comment for key_getcomb_ipcomp().
+ *
+ * OUT:
+ * 0 : succeed
+ * others: error number
+ */
+static int
+key_acquire(const struct secasindex *saidx, struct secpolicy *sp)
+{
+ struct mbuf *result = NULL, *m;
+ struct secacq *newacq;
+ u_int8_t satype;
+ int error = -1;
+ u_int32_t seq;
+
+ IPSEC_ASSERT(saidx != NULL, ("null saidx"));
+ satype = key_proto2satype(saidx->proto);
+ IPSEC_ASSERT(satype != 0, ("null satype, protocol %u", saidx->proto));
+
+ /*
+ * We never do anything about acquirng SA. There is anather
+ * solution that kernel blocks to send SADB_ACQUIRE message until
+ * getting something message from IKEd. In later case, to be
+ * managed with ACQUIRING list.
+ */
+ /* Get an entry to check whether sending message or not. */
+ if ((newacq = key_getacq(saidx)) != NULL) {
+ if (V_key_blockacq_count < newacq->count) {
+ /* reset counter and do send message. */
+ newacq->count = 0;
+ } else {
+ /* increment counter and do nothing. */
+ newacq->count++;
+ return 0;
+ }
+ } else {
+ /* make new entry for blocking to send SADB_ACQUIRE. */
+ if ((newacq = key_newacq(saidx)) == NULL)
+ return ENOBUFS;
+ }
+
+
+ seq = newacq->seq;
+ m = key_setsadbmsg(SADB_ACQUIRE, 0, satype, seq, 0, 0);
+ if (!m) {
+ error = ENOBUFS;
+ goto fail;
+ }
+ result = m;
+
+ /*
+ * No SADB_X_EXT_NAT_T_* here: we do not know
+ * anything related to NAT-T at this time.
+ */
+
+ /* set sadb_address for saidx's. */
+ m = key_setsadbaddr(SADB_EXT_ADDRESS_SRC,
+ &saidx->src.sa, FULLMASK, IPSEC_ULPROTO_ANY);
+ if (!m) {
+ error = ENOBUFS;
+ goto fail;
+ }
+ m_cat(result, m);
+
+ m = key_setsadbaddr(SADB_EXT_ADDRESS_DST,
+ &saidx->dst.sa, FULLMASK, IPSEC_ULPROTO_ANY);
+ if (!m) {
+ error = ENOBUFS;
+ goto fail;
+ }
+ m_cat(result, m);
+
+ /* XXX proxy address (optional) */
+
+ /* set sadb_x_policy */
+ if (sp) {
+ m = key_setsadbxpolicy(sp->policy, sp->spidx.dir, sp->id);
+ if (!m) {
+ error = ENOBUFS;
+ goto fail;
+ }
+ m_cat(result, m);
+ }
+
+ /* XXX identity (optional) */
+#if 0
+ if (idexttype && fqdn) {
+ /* create identity extension (FQDN) */
+ struct sadb_ident *id;
+ int fqdnlen;
+
+ fqdnlen = strlen(fqdn) + 1; /* +1 for terminating-NUL */
+ id = (struct sadb_ident *)p;
+ bzero(id, sizeof(*id) + PFKEY_ALIGN8(fqdnlen));
+ id->sadb_ident_len = PFKEY_UNIT64(sizeof(*id) + PFKEY_ALIGN8(fqdnlen));
+ id->sadb_ident_exttype = idexttype;
+ id->sadb_ident_type = SADB_IDENTTYPE_FQDN;
+ bcopy(fqdn, id + 1, fqdnlen);
+ p += sizeof(struct sadb_ident) + PFKEY_ALIGN8(fqdnlen);
+ }
+
+ if (idexttype) {
+ /* create identity extension (USERFQDN) */
+ struct sadb_ident *id;
+ int userfqdnlen;
+
+ if (userfqdn) {
+ /* +1 for terminating-NUL */
+ userfqdnlen = strlen(userfqdn) + 1;
+ } else
+ userfqdnlen = 0;
+ id = (struct sadb_ident *)p;
+ bzero(id, sizeof(*id) + PFKEY_ALIGN8(userfqdnlen));
+ id->sadb_ident_len = PFKEY_UNIT64(sizeof(*id) + PFKEY_ALIGN8(userfqdnlen));
+ id->sadb_ident_exttype = idexttype;
+ id->sadb_ident_type = SADB_IDENTTYPE_USERFQDN;
+ /* XXX is it correct? */
+ if (curproc && curproc->p_cred)
+ id->sadb_ident_id = curproc->p_cred->p_ruid;
+ if (userfqdn && userfqdnlen)
+ bcopy(userfqdn, id + 1, userfqdnlen);
+ p += sizeof(struct sadb_ident) + PFKEY_ALIGN8(userfqdnlen);
+ }
+#endif
+
+ /* XXX sensitivity (optional) */
+
+ /* create proposal/combination extension */
+ m = key_getprop(saidx);
+#if 0
+ /*
+ * spec conformant: always attach proposal/combination extension,
+ * the problem is that we have no way to attach it for ipcomp,
+ * due to the way sadb_comb is declared in RFC2367.
+ */
+ if (!m) {
+ error = ENOBUFS;
+ goto fail;
+ }
+ m_cat(result, m);
+#else
+ /*
+ * outside of spec; make proposal/combination extension optional.
+ */
+ if (m)
+ m_cat(result, m);
+#endif
+
+ if ((result->m_flags & M_PKTHDR) == 0) {
+ error = EINVAL;
+ goto fail;
+ }
+
+ if (result->m_len < sizeof(struct sadb_msg)) {
+ result = m_pullup(result, sizeof(struct sadb_msg));
+ if (result == NULL) {
+ error = ENOBUFS;
+ goto fail;
+ }
+ }
+
+ result->m_pkthdr.len = 0;
+ for (m = result; m; m = m->m_next)
+ result->m_pkthdr.len += m->m_len;
+
+ mtod(result, struct sadb_msg *)->sadb_msg_len =
+ PFKEY_UNIT64(result->m_pkthdr.len);
+
+ return key_sendup_mbuf(NULL, result, KEY_SENDUP_REGISTERED);
+
+ fail:
+ if (result)
+ m_freem(result);
+ return error;
+}
+
+static struct secacq *
+key_newacq(const struct secasindex *saidx)
+{
+ struct secacq *newacq;
+
+ /* get new entry */
+ newacq = malloc(sizeof(struct secacq), M_IPSEC_SAQ, M_NOWAIT|M_ZERO);
+ if (newacq == NULL) {
+ ipseclog((LOG_DEBUG, "%s: No more memory.\n", __func__));
+ return NULL;
+ }
+
+ /* copy secindex */
+ bcopy(saidx, &newacq->saidx, sizeof(newacq->saidx));
+ newacq->seq = (V_acq_seq == ~0 ? 1 : ++V_acq_seq);
+ newacq->created = time_second;
+ newacq->count = 0;
+
+ /* add to acqtree */
+ ACQ_LOCK();
+ LIST_INSERT_HEAD(&V_acqtree, newacq, chain);
+ ACQ_UNLOCK();
+
+ return newacq;
+}
+
+static struct secacq *
+key_getacq(const struct secasindex *saidx)
+{
+ struct secacq *acq;
+
+ ACQ_LOCK();
+ LIST_FOREACH(acq, &V_acqtree, chain) {
+ if (key_cmpsaidx(saidx, &acq->saidx, CMP_EXACTLY))
+ break;
+ }
+ ACQ_UNLOCK();
+
+ return acq;
+}
+
+static struct secacq *
+key_getacqbyseq(seq)
+ u_int32_t seq;
+{
+ struct secacq *acq;
+
+ ACQ_LOCK();
+ LIST_FOREACH(acq, &V_acqtree, chain) {
+ if (acq->seq == seq)
+ break;
+ }
+ ACQ_UNLOCK();
+
+ return acq;
+}
+
+static struct secspacq *
+key_newspacq(spidx)
+ struct secpolicyindex *spidx;
+{
+ struct secspacq *acq;
+
+ /* get new entry */
+ acq = malloc(sizeof(struct secspacq), M_IPSEC_SAQ, M_NOWAIT|M_ZERO);
+ if (acq == NULL) {
+ ipseclog((LOG_DEBUG, "%s: No more memory.\n", __func__));
+ return NULL;
+ }
+
+ /* copy secindex */
+ bcopy(spidx, &acq->spidx, sizeof(acq->spidx));
+ acq->created = time_second;
+ acq->count = 0;
+
+ /* add to spacqtree */
+ SPACQ_LOCK();
+ LIST_INSERT_HEAD(&V_spacqtree, acq, chain);
+ SPACQ_UNLOCK();
+
+ return acq;
+}
+
+static struct secspacq *
+key_getspacq(spidx)
+ struct secpolicyindex *spidx;
+{
+ struct secspacq *acq;
+
+ SPACQ_LOCK();
+ LIST_FOREACH(acq, &V_spacqtree, chain) {
+ if (key_cmpspidx_exactly(spidx, &acq->spidx)) {
+ /* NB: return holding spacq_lock */
+ return acq;
+ }
+ }
+ SPACQ_UNLOCK();
+
+ return NULL;
+}
+
+/*
+ * SADB_ACQUIRE processing,
+ * in first situation, is receiving
+ * <base>
+ * from the ikmpd, and clear sequence of its secasvar entry.
+ *
+ * In second situation, is receiving
+ * <base, address(SD), (address(P),) (identity(SD),) (sensitivity,) proposal>
+ * from a user land process, and return
+ * <base, address(SD), (address(P),) (identity(SD),) (sensitivity,) proposal>
+ * to the socket.
+ *
+ * m will always be freed.
+ */
+static int
+key_acquire2(so, m, mhp)
+ struct socket *so;
+ struct mbuf *m;
+ const struct sadb_msghdr *mhp;
+{
+ const struct sadb_address *src0, *dst0;
+ struct secasindex saidx;
+ struct secashead *sah;
+ u_int16_t proto;
+ int error;
+
+ IPSEC_ASSERT(so != NULL, ("null socket"));
+ IPSEC_ASSERT(m != NULL, ("null mbuf"));
+ IPSEC_ASSERT(mhp != NULL, ("null msghdr"));
+ IPSEC_ASSERT(mhp->msg != NULL, ("null msg"));
+
+ /*
+ * Error message from KMd.
+ * We assume that if error was occured in IKEd, the length of PFKEY
+ * message is equal to the size of sadb_msg structure.
+ * We do not raise error even if error occured in this function.
+ */
+ if (mhp->msg->sadb_msg_len == PFKEY_UNIT64(sizeof(struct sadb_msg))) {
+ struct secacq *acq;
+
+ /* check sequence number */
+ if (mhp->msg->sadb_msg_seq == 0) {
+ ipseclog((LOG_DEBUG, "%s: must specify sequence "
+ "number.\n", __func__));
+ m_freem(m);
+ return 0;
+ }
+
+ if ((acq = key_getacqbyseq(mhp->msg->sadb_msg_seq)) == NULL) {
+ /*
+ * the specified larval SA is already gone, or we got
+ * a bogus sequence number. we can silently ignore it.
+ */
+ m_freem(m);
+ return 0;
+ }
+
+ /* reset acq counter in order to deletion by timehander. */
+ acq->created = time_second;
+ acq->count = 0;
+ m_freem(m);
+ return 0;
+ }
+
+ /*
+ * This message is from user land.
+ */
+
+ /* map satype to proto */
+ if ((proto = key_satype2proto(mhp->msg->sadb_msg_satype)) == 0) {
+ ipseclog((LOG_DEBUG, "%s: invalid satype is passed.\n",
+ __func__));
+ return key_senderror(so, m, EINVAL);
+ }
+
+ if (mhp->ext[SADB_EXT_ADDRESS_SRC] == NULL ||
+ mhp->ext[SADB_EXT_ADDRESS_DST] == NULL ||
+ mhp->ext[SADB_EXT_PROPOSAL] == NULL) {
+ /* error */
+ ipseclog((LOG_DEBUG, "%s: invalid message is passed.\n",
+ __func__));
+ return key_senderror(so, m, EINVAL);
+ }
+ if (mhp->extlen[SADB_EXT_ADDRESS_SRC] < sizeof(struct sadb_address) ||
+ mhp->extlen[SADB_EXT_ADDRESS_DST] < sizeof(struct sadb_address) ||
+ mhp->extlen[SADB_EXT_PROPOSAL] < sizeof(struct sadb_prop)) {
+ /* error */
+ ipseclog((LOG_DEBUG, "%s: invalid message is passed.\n",
+ __func__));
+ return key_senderror(so, m, EINVAL);
+ }
+
+ src0 = (struct sadb_address *)mhp->ext[SADB_EXT_ADDRESS_SRC];
+ dst0 = (struct sadb_address *)mhp->ext[SADB_EXT_ADDRESS_DST];
+
+ /* XXX boundary check against sa_len */
+ KEY_SETSECASIDX(proto, IPSEC_MODE_ANY, 0, src0 + 1, dst0 + 1, &saidx);
+
+ /*
+ * Make sure the port numbers are zero.
+ * In case of NAT-T we will update them later if needed.
+ */
+ KEY_PORTTOSADDR(&saidx.src, 0);
+ KEY_PORTTOSADDR(&saidx.dst, 0);
+
+#ifndef IPSEC_NAT_T
+ /*
+ * Handle NAT-T info if present.
+ */
+
+ if (mhp->ext[SADB_X_EXT_NAT_T_SPORT] != NULL &&
+ mhp->ext[SADB_X_EXT_NAT_T_DPORT] != NULL) {
+ struct sadb_x_nat_t_port *sport, *dport;
+
+ if (mhp->extlen[SADB_X_EXT_NAT_T_SPORT] < sizeof(*sport) ||
+ mhp->extlen[SADB_X_EXT_NAT_T_DPORT] < sizeof(*dport)) {
+ ipseclog((LOG_DEBUG, "%s: invalid message.\n",
+ __func__));
+ return key_senderror(so, m, EINVAL);
+ }
+
+ sport = (struct sadb_x_nat_t_port *)
+ mhp->ext[SADB_X_EXT_NAT_T_SPORT];
+ dport = (struct sadb_x_nat_t_port *)
+ mhp->ext[SADB_X_EXT_NAT_T_DPORT];
+
+ if (sport)
+ KEY_PORTTOSADDR(&saidx.src,
+ sport->sadb_x_nat_t_port_port);
+ if (dport)
+ KEY_PORTTOSADDR(&saidx.dst,
+ dport->sadb_x_nat_t_port_port);
+ }
+#endif
+
+ /* get a SA index */
+ SAHTREE_LOCK();
+ LIST_FOREACH(sah, &V_sahtree, chain) {
+ if (sah->state == SADB_SASTATE_DEAD)
+ continue;
+ if (key_cmpsaidx(&sah->saidx, &saidx, CMP_MODE_REQID))
+ break;
+ }
+ SAHTREE_UNLOCK();
+ if (sah != NULL) {
+ ipseclog((LOG_DEBUG, "%s: a SA exists already.\n", __func__));
+ return key_senderror(so, m, EEXIST);
+ }
+
+ error = key_acquire(&saidx, NULL);
+ if (error != 0) {
+ ipseclog((LOG_DEBUG, "%s: error %d returned from key_acquire\n",
+ __func__, mhp->msg->sadb_msg_errno));
+ return key_senderror(so, m, error);
+ }
+
+ return key_sendup_mbuf(so, m, KEY_SENDUP_REGISTERED);
+}
+
+/*
+ * SADB_REGISTER processing.
+ * If SATYPE_UNSPEC has been passed as satype, only return sabd_supported.
+ * receive
+ * <base>
+ * from the ikmpd, and register a socket to send PF_KEY messages,
+ * and send
+ * <base, supported>
+ * to KMD by PF_KEY.
+ * If socket is detached, must free from regnode.
+ *
+ * m will always be freed.
+ */
+static int
+key_register(so, m, mhp)
+ struct socket *so;
+ struct mbuf *m;
+ const struct sadb_msghdr *mhp;
+{
+ struct secreg *reg, *newreg = 0;
+
+ IPSEC_ASSERT(so != NULL, ("null socket"));
+ IPSEC_ASSERT(m != NULL, ("null mbuf"));
+ IPSEC_ASSERT(mhp != NULL, ("null msghdr"));
+ IPSEC_ASSERT(mhp->msg != NULL, ("null msg"));
+
+ /* check for invalid register message */
+ if (mhp->msg->sadb_msg_satype >= sizeof(V_regtree)/sizeof(V_regtree[0]))
+ return key_senderror(so, m, EINVAL);
+
+ /* When SATYPE_UNSPEC is specified, only return sabd_supported. */
+ if (mhp->msg->sadb_msg_satype == SADB_SATYPE_UNSPEC)
+ goto setmsg;
+
+ /* check whether existing or not */
+ REGTREE_LOCK();
+ LIST_FOREACH(reg, &V_regtree[mhp->msg->sadb_msg_satype], chain) {
+ if (reg->so == so) {
+ REGTREE_UNLOCK();
+ ipseclog((LOG_DEBUG, "%s: socket exists already.\n",
+ __func__));
+ return key_senderror(so, m, EEXIST);
+ }
+ }
+
+ /* create regnode */
+ newreg = malloc(sizeof(struct secreg), M_IPSEC_SAR, M_NOWAIT|M_ZERO);
+ if (newreg == NULL) {
+ REGTREE_UNLOCK();
+ ipseclog((LOG_DEBUG, "%s: No more memory.\n", __func__));
+ return key_senderror(so, m, ENOBUFS);
+ }
+
+ newreg->so = so;
+ ((struct keycb *)sotorawcb(so))->kp_registered++;
+
+ /* add regnode to regtree. */
+ LIST_INSERT_HEAD(&V_regtree[mhp->msg->sadb_msg_satype], newreg, chain);
+ REGTREE_UNLOCK();
+
+ setmsg:
+ {
+ struct mbuf *n;
+ struct sadb_msg *newmsg;
+ struct sadb_supported *sup;
+ u_int len, alen, elen;
+ int off;
+ int i;
+ struct sadb_alg *alg;
+
+ /* create new sadb_msg to reply. */
+ alen = 0;
+ for (i = 1; i <= SADB_AALG_MAX; i++) {
+ if (ah_algorithm_lookup(i))
+ alen += sizeof(struct sadb_alg);
+ }
+ if (alen)
+ alen += sizeof(struct sadb_supported);
+ elen = 0;
+ for (i = 1; i <= SADB_EALG_MAX; i++) {
+ if (esp_algorithm_lookup(i))
+ elen += sizeof(struct sadb_alg);
+ }
+ if (elen)
+ elen += sizeof(struct sadb_supported);
+
+ len = sizeof(struct sadb_msg) + alen + elen;
+
+ if (len > MCLBYTES)
+ return key_senderror(so, m, ENOBUFS);
+
+ MGETHDR(n, M_DONTWAIT, MT_DATA);
+ if (len > MHLEN) {
+ MCLGET(n, M_DONTWAIT);
+ if ((n->m_flags & M_EXT) == 0) {
+ m_freem(n);
+ n = NULL;
+ }
+ }
+ if (!n)
+ return key_senderror(so, m, ENOBUFS);
+
+ n->m_pkthdr.len = n->m_len = len;
+ n->m_next = NULL;
+ off = 0;
+
+ m_copydata(m, 0, sizeof(struct sadb_msg), mtod(n, caddr_t) + off);
+ newmsg = mtod(n, struct sadb_msg *);
+ newmsg->sadb_msg_errno = 0;
+ newmsg->sadb_msg_len = PFKEY_UNIT64(len);
+ off += PFKEY_ALIGN8(sizeof(struct sadb_msg));
+
+ /* for authentication algorithm */
+ if (alen) {
+ sup = (struct sadb_supported *)(mtod(n, caddr_t) + off);
+ sup->sadb_supported_len = PFKEY_UNIT64(alen);
+ sup->sadb_supported_exttype = SADB_EXT_SUPPORTED_AUTH;
+ off += PFKEY_ALIGN8(sizeof(*sup));
+
+ for (i = 1; i <= SADB_AALG_MAX; i++) {
+ struct auth_hash *aalgo;
+ u_int16_t minkeysize, maxkeysize;
+
+ aalgo = ah_algorithm_lookup(i);
+ if (!aalgo)
+ continue;
+ alg = (struct sadb_alg *)(mtod(n, caddr_t) + off);
+ alg->sadb_alg_id = i;
+ alg->sadb_alg_ivlen = 0;
+ key_getsizes_ah(aalgo, i, &minkeysize, &maxkeysize);
+ alg->sadb_alg_minbits = _BITS(minkeysize);
+ alg->sadb_alg_maxbits = _BITS(maxkeysize);
+ off += PFKEY_ALIGN8(sizeof(*alg));
+ }
+ }
+
+ /* for encryption algorithm */
+ if (elen) {
+ sup = (struct sadb_supported *)(mtod(n, caddr_t) + off);
+ sup->sadb_supported_len = PFKEY_UNIT64(elen);
+ sup->sadb_supported_exttype = SADB_EXT_SUPPORTED_ENCRYPT;
+ off += PFKEY_ALIGN8(sizeof(*sup));
+
+ for (i = 1; i <= SADB_EALG_MAX; i++) {
+ struct enc_xform *ealgo;
+
+ ealgo = esp_algorithm_lookup(i);
+ if (!ealgo)
+ continue;
+ alg = (struct sadb_alg *)(mtod(n, caddr_t) + off);
+ alg->sadb_alg_id = i;
+ alg->sadb_alg_ivlen = ealgo->blocksize;
+ alg->sadb_alg_minbits = _BITS(ealgo->minkey);
+ alg->sadb_alg_maxbits = _BITS(ealgo->maxkey);
+ off += PFKEY_ALIGN8(sizeof(struct sadb_alg));
+ }
+ }
+
+ IPSEC_ASSERT(off == len,
+ ("length assumption failed (off %u len %u)", off, len));
+
+ m_freem(m);
+ return key_sendup_mbuf(so, n, KEY_SENDUP_REGISTERED);
+ }
+}
+
+/*
+ * free secreg entry registered.
+ * XXX: I want to do free a socket marked done SADB_RESIGER to socket.
+ */
+void
+key_freereg(struct socket *so)
+{
+ struct secreg *reg;
+ int i;
+
+ IPSEC_ASSERT(so != NULL, ("NULL so"));
+
+ /*
+ * check whether existing or not.
+ * check all type of SA, because there is a potential that
+ * one socket is registered to multiple type of SA.
+ */
+ REGTREE_LOCK();
+ for (i = 0; i <= SADB_SATYPE_MAX; i++) {
+ LIST_FOREACH(reg, &V_regtree[i], chain) {
+ if (reg->so == so && __LIST_CHAINED(reg)) {
+ LIST_REMOVE(reg, chain);
+ free(reg, M_IPSEC_SAR);
+ break;
+ }
+ }
+ }
+ REGTREE_UNLOCK();
+}
+
+/*
+ * SADB_EXPIRE processing
+ * send
+ * <base, SA, SA2, lifetime(C and one of HS), address(SD)>
+ * to KMD by PF_KEY.
+ * NOTE: We send only soft lifetime extension.
+ *
+ * OUT: 0 : succeed
+ * others : error number
+ */
+static int
+key_expire(struct secasvar *sav)
+{
+ int s;
+ int satype;
+ struct mbuf *result = NULL, *m;
+ int len;
+ int error = -1;
+ struct sadb_lifetime *lt;
+
+ /* XXX: Why do we lock ? */
+ s = splnet(); /*called from softclock()*/
+
+ IPSEC_ASSERT (sav != NULL, ("null sav"));
+ IPSEC_ASSERT (sav->sah != NULL, ("null sa header"));
+
+ /* set msg header */
+ satype = key_proto2satype(sav->sah->saidx.proto);
+ IPSEC_ASSERT(satype != 0, ("invalid proto, satype %u", satype));
+ m = key_setsadbmsg(SADB_EXPIRE, 0, satype, sav->seq, 0, sav->refcnt);
+ if (!m) {
+ error = ENOBUFS;
+ goto fail;
+ }
+ result = m;
+
+ /* create SA extension */
+ m = key_setsadbsa(sav);
+ if (!m) {
+ error = ENOBUFS;
+ goto fail;
+ }
+ m_cat(result, m);
+
+ /* create SA extension */
+ m = key_setsadbxsa2(sav->sah->saidx.mode,
+ sav->replay ? sav->replay->count : 0,
+ sav->sah->saidx.reqid);
+ if (!m) {
+ error = ENOBUFS;
+ goto fail;
+ }
+ m_cat(result, m);
+
+ /* create lifetime extension (current and soft) */
+ len = PFKEY_ALIGN8(sizeof(*lt)) * 2;
+ m = key_alloc_mbuf(len);
+ if (!m || m->m_next) { /*XXX*/
+ if (m)
+ m_freem(m);
+ error = ENOBUFS;
+ goto fail;
+ }
+ bzero(mtod(m, caddr_t), len);
+ lt = mtod(m, struct sadb_lifetime *);
+ lt->sadb_lifetime_len = PFKEY_UNIT64(sizeof(struct sadb_lifetime));
+ lt->sadb_lifetime_exttype = SADB_EXT_LIFETIME_CURRENT;
+ lt->sadb_lifetime_allocations = sav->lft_c->allocations;
+ lt->sadb_lifetime_bytes = sav->lft_c->bytes;
+ lt->sadb_lifetime_addtime = sav->lft_c->addtime;
+ lt->sadb_lifetime_usetime = sav->lft_c->usetime;
+ lt = (struct sadb_lifetime *)(mtod(m, caddr_t) + len / 2);
+ lt->sadb_lifetime_len = PFKEY_UNIT64(sizeof(struct sadb_lifetime));
+ lt->sadb_lifetime_exttype = SADB_EXT_LIFETIME_SOFT;
+ lt->sadb_lifetime_allocations = sav->lft_s->allocations;
+ lt->sadb_lifetime_bytes = sav->lft_s->bytes;
+ lt->sadb_lifetime_addtime = sav->lft_s->addtime;
+ lt->sadb_lifetime_usetime = sav->lft_s->usetime;
+ m_cat(result, m);
+
+ /* set sadb_address for source */
+ m = key_setsadbaddr(SADB_EXT_ADDRESS_SRC,
+ &sav->sah->saidx.src.sa,
+ FULLMASK, IPSEC_ULPROTO_ANY);
+ if (!m) {
+ error = ENOBUFS;
+ goto fail;
+ }
+ m_cat(result, m);
+
+ /* set sadb_address for destination */
+ m = key_setsadbaddr(SADB_EXT_ADDRESS_DST,
+ &sav->sah->saidx.dst.sa,
+ FULLMASK, IPSEC_ULPROTO_ANY);
+ if (!m) {
+ error = ENOBUFS;
+ goto fail;
+ }
+ m_cat(result, m);
+
+ /*
+ * XXX-BZ Handle NAT-T extensions here.
+ */
+
+ if ((result->m_flags & M_PKTHDR) == 0) {
+ error = EINVAL;
+ goto fail;
+ }
+
+ if (result->m_len < sizeof(struct sadb_msg)) {
+ result = m_pullup(result, sizeof(struct sadb_msg));
+ if (result == NULL) {
+ error = ENOBUFS;
+ goto fail;
+ }
+ }
+
+ result->m_pkthdr.len = 0;
+ for (m = result; m; m = m->m_next)
+ result->m_pkthdr.len += m->m_len;
+
+ mtod(result, struct sadb_msg *)->sadb_msg_len =
+ PFKEY_UNIT64(result->m_pkthdr.len);
+
+ splx(s);
+ return key_sendup_mbuf(NULL, result, KEY_SENDUP_REGISTERED);
+
+ fail:
+ if (result)
+ m_freem(result);
+ splx(s);
+ return error;
+}
+
+/*
+ * SADB_FLUSH processing
+ * receive
+ * <base>
+ * from the ikmpd, and free all entries in secastree.
+ * and send,
+ * <base>
+ * to the ikmpd.
+ * NOTE: to do is only marking SADB_SASTATE_DEAD.
+ *
+ * m will always be freed.
+ */
+static int
+key_flush(so, m, mhp)
+ struct socket *so;
+ struct mbuf *m;
+ const struct sadb_msghdr *mhp;
+{
+ struct sadb_msg *newmsg;
+ struct secashead *sah, *nextsah;
+ struct secasvar *sav, *nextsav;
+ u_int16_t proto;
+ u_int8_t state;
+ u_int stateidx;
+
+ IPSEC_ASSERT(so != NULL, ("null socket"));
+ IPSEC_ASSERT(mhp != NULL, ("null msghdr"));
+ IPSEC_ASSERT(mhp->msg != NULL, ("null msg"));
+
+ /* map satype to proto */
+ if ((proto = key_satype2proto(mhp->msg->sadb_msg_satype)) == 0) {
+ ipseclog((LOG_DEBUG, "%s: invalid satype is passed.\n",
+ __func__));
+ return key_senderror(so, m, EINVAL);
+ }
+
+ /* no SATYPE specified, i.e. flushing all SA. */
+ SAHTREE_LOCK();
+ for (sah = LIST_FIRST(&V_sahtree);
+ sah != NULL;
+ sah = nextsah) {
+ nextsah = LIST_NEXT(sah, chain);
+
+ if (mhp->msg->sadb_msg_satype != SADB_SATYPE_UNSPEC
+ && proto != sah->saidx.proto)
+ continue;
+
+ for (stateidx = 0;
+ stateidx < _ARRAYLEN(saorder_state_alive);
+ stateidx++) {
+ state = saorder_state_any[stateidx];
+ for (sav = LIST_FIRST(&sah->savtree[state]);
+ sav != NULL;
+ sav = nextsav) {
+
+ nextsav = LIST_NEXT(sav, chain);
+
+ key_sa_chgstate(sav, SADB_SASTATE_DEAD);
+ KEY_FREESAV(&sav);
+ }
+ }
+
+ sah->state = SADB_SASTATE_DEAD;
+ }
+ SAHTREE_UNLOCK();
+
+ if (m->m_len < sizeof(struct sadb_msg) ||
+ sizeof(struct sadb_msg) > m->m_len + M_TRAILINGSPACE(m)) {
+ ipseclog((LOG_DEBUG, "%s: No more memory.\n", __func__));
+ return key_senderror(so, m, ENOBUFS);
+ }
+
+ if (m->m_next)
+ m_freem(m->m_next);
+ m->m_next = NULL;
+ m->m_pkthdr.len = m->m_len = sizeof(struct sadb_msg);
+ newmsg = mtod(m, struct sadb_msg *);
+ newmsg->sadb_msg_errno = 0;
+ newmsg->sadb_msg_len = PFKEY_UNIT64(m->m_pkthdr.len);
+
+ return key_sendup_mbuf(so, m, KEY_SENDUP_ALL);
+}
+
+/*
+ * SADB_DUMP processing
+ * dump all entries including status of DEAD in SAD.
+ * receive
+ * <base>
+ * from the ikmpd, and dump all secasvar leaves
+ * and send,
+ * <base> .....
+ * to the ikmpd.
+ *
+ * m will always be freed.
+ */
+static int
+key_dump(so, m, mhp)
+ struct socket *so;
+ struct mbuf *m;
+ const struct sadb_msghdr *mhp;
+{
+ struct secashead *sah;
+ struct secasvar *sav;
+ u_int16_t proto;
+ u_int stateidx;
+ u_int8_t satype;
+ u_int8_t state;
+ int cnt;
+ struct sadb_msg *newmsg;
+ struct mbuf *n;
+
+ IPSEC_ASSERT(so != NULL, ("null socket"));
+ IPSEC_ASSERT(m != NULL, ("null mbuf"));
+ IPSEC_ASSERT(mhp != NULL, ("null msghdr"));
+ IPSEC_ASSERT(mhp->msg != NULL, ("null msg"));
+
+ /* map satype to proto */
+ if ((proto = key_satype2proto(mhp->msg->sadb_msg_satype)) == 0) {
+ ipseclog((LOG_DEBUG, "%s: invalid satype is passed.\n",
+ __func__));
+ return key_senderror(so, m, EINVAL);
+ }
+
+ /* count sav entries to be sent to the userland. */
+ cnt = 0;
+ SAHTREE_LOCK();
+ LIST_FOREACH(sah, &V_sahtree, chain) {
+ if (mhp->msg->sadb_msg_satype != SADB_SATYPE_UNSPEC
+ && proto != sah->saidx.proto)
+ continue;
+
+ for (stateidx = 0;
+ stateidx < _ARRAYLEN(saorder_state_any);
+ stateidx++) {
+ state = saorder_state_any[stateidx];
+ LIST_FOREACH(sav, &sah->savtree[state], chain) {
+ cnt++;
+ }
+ }
+ }
+
+ if (cnt == 0) {
+ SAHTREE_UNLOCK();
+ return key_senderror(so, m, ENOENT);
+ }
+
+ /* send this to the userland, one at a time. */
+ newmsg = NULL;
+ LIST_FOREACH(sah, &V_sahtree, chain) {
+ if (mhp->msg->sadb_msg_satype != SADB_SATYPE_UNSPEC
+ && proto != sah->saidx.proto)
+ continue;
+
+ /* map proto to satype */
+ if ((satype = key_proto2satype(sah->saidx.proto)) == 0) {
+ SAHTREE_UNLOCK();
+ ipseclog((LOG_DEBUG, "%s: there was invalid proto in "
+ "SAD.\n", __func__));
+ return key_senderror(so, m, EINVAL);
+ }
+
+ for (stateidx = 0;
+ stateidx < _ARRAYLEN(saorder_state_any);
+ stateidx++) {
+ state = saorder_state_any[stateidx];
+ LIST_FOREACH(sav, &sah->savtree[state], chain) {
+ n = key_setdumpsa(sav, SADB_DUMP, satype,
+ --cnt, mhp->msg->sadb_msg_pid);
+ if (!n) {
+ SAHTREE_UNLOCK();
+ return key_senderror(so, m, ENOBUFS);
+ }
+ key_sendup_mbuf(so, n, KEY_SENDUP_ONE);
+ }
+ }
+ }
+ SAHTREE_UNLOCK();
+
+ m_freem(m);
+ return 0;
+}
+
+/*
+ * SADB_X_PROMISC processing
+ *
+ * m will always be freed.
+ */
+static int
+key_promisc(so, m, mhp)
+ struct socket *so;
+ struct mbuf *m;
+ const struct sadb_msghdr *mhp;
+{
+ int olen;
+
+ IPSEC_ASSERT(so != NULL, ("null socket"));
+ IPSEC_ASSERT(m != NULL, ("null mbuf"));
+ IPSEC_ASSERT(mhp != NULL, ("null msghdr"));
+ IPSEC_ASSERT(mhp->msg != NULL, ("null msg"));
+
+ olen = PFKEY_UNUNIT64(mhp->msg->sadb_msg_len);
+
+ if (olen < sizeof(struct sadb_msg)) {
+#if 1
+ return key_senderror(so, m, EINVAL);
+#else
+ m_freem(m);
+ return 0;
+#endif
+ } else if (olen == sizeof(struct sadb_msg)) {
+ /* enable/disable promisc mode */
+ struct keycb *kp;
+
+ if ((kp = (struct keycb *)sotorawcb(so)) == NULL)
+ return key_senderror(so, m, EINVAL);
+ mhp->msg->sadb_msg_errno = 0;
+ switch (mhp->msg->sadb_msg_satype) {
+ case 0:
+ case 1:
+ kp->kp_promisc = mhp->msg->sadb_msg_satype;
+ break;
+ default:
+ return key_senderror(so, m, EINVAL);
+ }
+
+ /* send the original message back to everyone */
+ mhp->msg->sadb_msg_errno = 0;
+ return key_sendup_mbuf(so, m, KEY_SENDUP_ALL);
+ } else {
+ /* send packet as is */
+
+ m_adj(m, PFKEY_ALIGN8(sizeof(struct sadb_msg)));
+
+ /* TODO: if sadb_msg_seq is specified, send to specific pid */
+ return key_sendup_mbuf(so, m, KEY_SENDUP_ALL);
+ }
+}
+
+static int (*key_typesw[]) __P((struct socket *, struct mbuf *,
+ const struct sadb_msghdr *)) = {
+ NULL, /* SADB_RESERVED */
+ key_getspi, /* SADB_GETSPI */
+ key_update, /* SADB_UPDATE */
+ key_add, /* SADB_ADD */
+ key_delete, /* SADB_DELETE */
+ key_get, /* SADB_GET */
+ key_acquire2, /* SADB_ACQUIRE */
+ key_register, /* SADB_REGISTER */
+ NULL, /* SADB_EXPIRE */
+ key_flush, /* SADB_FLUSH */
+ key_dump, /* SADB_DUMP */
+ key_promisc, /* SADB_X_PROMISC */
+ NULL, /* SADB_X_PCHANGE */
+ key_spdadd, /* SADB_X_SPDUPDATE */
+ key_spdadd, /* SADB_X_SPDADD */
+ key_spddelete, /* SADB_X_SPDDELETE */
+ key_spdget, /* SADB_X_SPDGET */
+ NULL, /* SADB_X_SPDACQUIRE */
+ key_spddump, /* SADB_X_SPDDUMP */
+ key_spdflush, /* SADB_X_SPDFLUSH */
+ key_spdadd, /* SADB_X_SPDSETIDX */
+ NULL, /* SADB_X_SPDEXPIRE */
+ key_spddelete2, /* SADB_X_SPDDELETE2 */
+};
+
+/*
+ * parse sadb_msg buffer to process PFKEYv2,
+ * and create a data to response if needed.
+ * I think to be dealed with mbuf directly.
+ * IN:
+ * msgp : pointer to pointer to a received buffer pulluped.
+ * This is rewrited to response.
+ * so : pointer to socket.
+ * OUT:
+ * length for buffer to send to user process.
+ */
+int
+key_parse(m, so)
+ struct mbuf *m;
+ struct socket *so;
+{
+ struct sadb_msg *msg;
+ struct sadb_msghdr mh;
+ u_int orglen;
+ int error;
+ int target;
+
+ IPSEC_ASSERT(so != NULL, ("null socket"));
+ IPSEC_ASSERT(m != NULL, ("null mbuf"));
+
+#if 0 /*kdebug_sadb assumes msg in linear buffer*/
+ KEYDEBUG(KEYDEBUG_KEY_DUMP,
+ ipseclog((LOG_DEBUG, "%s: passed sadb_msg\n", __func__));
+ kdebug_sadb(msg));
+#endif
+
+ if (m->m_len < sizeof(struct sadb_msg)) {
+ m = m_pullup(m, sizeof(struct sadb_msg));
+ if (!m)
+ return ENOBUFS;
+ }
+ msg = mtod(m, struct sadb_msg *);
+ orglen = PFKEY_UNUNIT64(msg->sadb_msg_len);
+ target = KEY_SENDUP_ONE;
+
+ if ((m->m_flags & M_PKTHDR) == 0 ||
+ m->m_pkthdr.len != m->m_pkthdr.len) {
+ ipseclog((LOG_DEBUG, "%s: invalid message length.\n",__func__));
+ V_pfkeystat.out_invlen++;
+ error = EINVAL;
+ goto senderror;
+ }
+
+ if (msg->sadb_msg_version != PF_KEY_V2) {
+ ipseclog((LOG_DEBUG, "%s: PF_KEY version %u is mismatched.\n",
+ __func__, msg->sadb_msg_version));
+ V_pfkeystat.out_invver++;
+ error = EINVAL;
+ goto senderror;
+ }
+
+ if (msg->sadb_msg_type > SADB_MAX) {
+ ipseclog((LOG_DEBUG, "%s: invalid type %u is passed.\n",
+ __func__, msg->sadb_msg_type));
+ V_pfkeystat.out_invmsgtype++;
+ error = EINVAL;
+ goto senderror;
+ }
+
+ /* for old-fashioned code - should be nuked */
+ if (m->m_pkthdr.len > MCLBYTES) {
+ m_freem(m);
+ return ENOBUFS;
+ }
+ if (m->m_next) {
+ struct mbuf *n;
+
+ MGETHDR(n, M_DONTWAIT, MT_DATA);
+ if (n && m->m_pkthdr.len > MHLEN) {
+ MCLGET(n, M_DONTWAIT);
+ if ((n->m_flags & M_EXT) == 0) {
+ m_free(n);
+ n = NULL;
+ }
+ }
+ if (!n) {
+ m_freem(m);
+ return ENOBUFS;
+ }
+ m_copydata(m, 0, m->m_pkthdr.len, mtod(n, caddr_t));
+ n->m_pkthdr.len = n->m_len = m->m_pkthdr.len;
+ n->m_next = NULL;
+ m_freem(m);
+ m = n;
+ }
+
+ /* align the mbuf chain so that extensions are in contiguous region. */
+ error = key_align(m, &mh);
+ if (error)
+ return error;
+
+ msg = mh.msg;
+
+ /* check SA type */
+ switch (msg->sadb_msg_satype) {
+ case SADB_SATYPE_UNSPEC:
+ switch (msg->sadb_msg_type) {
+ case SADB_GETSPI:
+ case SADB_UPDATE:
+ case SADB_ADD:
+ case SADB_DELETE:
+ case SADB_GET:
+ case SADB_ACQUIRE:
+ case SADB_EXPIRE:
+ ipseclog((LOG_DEBUG, "%s: must specify satype "
+ "when msg type=%u.\n", __func__,
+ msg->sadb_msg_type));
+ V_pfkeystat.out_invsatype++;
+ error = EINVAL;
+ goto senderror;
+ }
+ break;
+ case SADB_SATYPE_AH:
+ case SADB_SATYPE_ESP:
+ case SADB_X_SATYPE_IPCOMP:
+ case SADB_X_SATYPE_TCPSIGNATURE:
+ switch (msg->sadb_msg_type) {
+ case SADB_X_SPDADD:
+ case SADB_X_SPDDELETE:
+ case SADB_X_SPDGET:
+ case SADB_X_SPDDUMP:
+ case SADB_X_SPDFLUSH:
+ case SADB_X_SPDSETIDX:
+ case SADB_X_SPDUPDATE:
+ case SADB_X_SPDDELETE2:
+ ipseclog((LOG_DEBUG, "%s: illegal satype=%u\n",
+ __func__, msg->sadb_msg_type));
+ V_pfkeystat.out_invsatype++;
+ error = EINVAL;
+ goto senderror;
+ }
+ break;
+ case SADB_SATYPE_RSVP:
+ case SADB_SATYPE_OSPFV2:
+ case SADB_SATYPE_RIPV2:
+ case SADB_SATYPE_MIP:
+ ipseclog((LOG_DEBUG, "%s: type %u isn't supported.\n",
+ __func__, msg->sadb_msg_satype));
+ V_pfkeystat.out_invsatype++;
+ error = EOPNOTSUPP;
+ goto senderror;
+ case 1: /* XXX: What does it do? */
+ if (msg->sadb_msg_type == SADB_X_PROMISC)
+ break;
+ /*FALLTHROUGH*/
+ default:
+ ipseclog((LOG_DEBUG, "%s: invalid type %u is passed.\n",
+ __func__, msg->sadb_msg_satype));
+ V_pfkeystat.out_invsatype++;
+ error = EINVAL;
+ goto senderror;
+ }
+
+ /* check field of upper layer protocol and address family */
+ if (mh.ext[SADB_EXT_ADDRESS_SRC] != NULL
+ && mh.ext[SADB_EXT_ADDRESS_DST] != NULL) {
+ struct sadb_address *src0, *dst0;
+ u_int plen;
+
+ src0 = (struct sadb_address *)(mh.ext[SADB_EXT_ADDRESS_SRC]);
+ dst0 = (struct sadb_address *)(mh.ext[SADB_EXT_ADDRESS_DST]);
+
+ /* check upper layer protocol */
+ if (src0->sadb_address_proto != dst0->sadb_address_proto) {
+ ipseclog((LOG_DEBUG, "%s: upper layer protocol "
+ "mismatched.\n", __func__));
+ V_pfkeystat.out_invaddr++;
+ error = EINVAL;
+ goto senderror;
+ }
+
+ /* check family */
+ if (PFKEY_ADDR_SADDR(src0)->sa_family !=
+ PFKEY_ADDR_SADDR(dst0)->sa_family) {
+ ipseclog((LOG_DEBUG, "%s: address family mismatched.\n",
+ __func__));
+ V_pfkeystat.out_invaddr++;
+ error = EINVAL;
+ goto senderror;
+ }
+ if (PFKEY_ADDR_SADDR(src0)->sa_len !=
+ PFKEY_ADDR_SADDR(dst0)->sa_len) {
+ ipseclog((LOG_DEBUG, "%s: address struct size "
+ "mismatched.\n", __func__));
+ V_pfkeystat.out_invaddr++;
+ error = EINVAL;
+ goto senderror;
+ }
+
+ switch (PFKEY_ADDR_SADDR(src0)->sa_family) {
+ case AF_INET:
+ if (PFKEY_ADDR_SADDR(src0)->sa_len !=
+ sizeof(struct sockaddr_in)) {
+ V_pfkeystat.out_invaddr++;
+ error = EINVAL;
+ goto senderror;
+ }
+ break;
+ case AF_INET6:
+ if (PFKEY_ADDR_SADDR(src0)->sa_len !=
+ sizeof(struct sockaddr_in6)) {
+ V_pfkeystat.out_invaddr++;
+ error = EINVAL;
+ goto senderror;
+ }
+ break;
+ default:
+ ipseclog((LOG_DEBUG, "%s: unsupported address family\n",
+ __func__));
+ V_pfkeystat.out_invaddr++;
+ error = EAFNOSUPPORT;
+ goto senderror;
+ }
+
+ switch (PFKEY_ADDR_SADDR(src0)->sa_family) {
+ case AF_INET:
+ plen = sizeof(struct in_addr) << 3;
+ break;
+ case AF_INET6:
+ plen = sizeof(struct in6_addr) << 3;
+ break;
+ default:
+ plen = 0; /*fool gcc*/
+ break;
+ }
+
+ /* check max prefix length */
+ if (src0->sadb_address_prefixlen > plen ||
+ dst0->sadb_address_prefixlen > plen) {
+ ipseclog((LOG_DEBUG, "%s: illegal prefixlen.\n",
+ __func__));
+ V_pfkeystat.out_invaddr++;
+ error = EINVAL;
+ goto senderror;
+ }
+
+ /*
+ * prefixlen == 0 is valid because there can be a case when
+ * all addresses are matched.
+ */
+ }
+
+ if (msg->sadb_msg_type >= sizeof(key_typesw)/sizeof(key_typesw[0]) ||
+ key_typesw[msg->sadb_msg_type] == NULL) {
+ V_pfkeystat.out_invmsgtype++;
+ error = EINVAL;
+ goto senderror;
+ }
+
+ return (*key_typesw[msg->sadb_msg_type])(so, m, &mh);
+
+senderror:
+ msg->sadb_msg_errno = error;
+ return key_sendup_mbuf(so, m, target);
+}
+
+static int
+key_senderror(so, m, code)
+ struct socket *so;
+ struct mbuf *m;
+ int code;
+{
+ struct sadb_msg *msg;
+
+ IPSEC_ASSERT(m->m_len >= sizeof(struct sadb_msg),
+ ("mbuf too small, len %u", m->m_len));
+
+ msg = mtod(m, struct sadb_msg *);
+ msg->sadb_msg_errno = code;
+ return key_sendup_mbuf(so, m, KEY_SENDUP_ONE);
+}
+
+/*
+ * set the pointer to each header into message buffer.
+ * m will be freed on error.
+ * XXX larger-than-MCLBYTES extension?
+ */
+static int
+key_align(m, mhp)
+ struct mbuf *m;
+ struct sadb_msghdr *mhp;
+{
+ struct mbuf *n;
+ struct sadb_ext *ext;
+ size_t off, end;
+ int extlen;
+ int toff;
+
+ IPSEC_ASSERT(m != NULL, ("null mbuf"));
+ IPSEC_ASSERT(mhp != NULL, ("null msghdr"));
+ IPSEC_ASSERT(m->m_len >= sizeof(struct sadb_msg),
+ ("mbuf too small, len %u", m->m_len));
+
+ /* initialize */
+ bzero(mhp, sizeof(*mhp));
+
+ mhp->msg = mtod(m, struct sadb_msg *);
+ mhp->ext[0] = (struct sadb_ext *)mhp->msg; /*XXX backward compat */
+
+ end = PFKEY_UNUNIT64(mhp->msg->sadb_msg_len);
+ extlen = end; /*just in case extlen is not updated*/
+ for (off = sizeof(struct sadb_msg); off < end; off += extlen) {
+ n = m_pulldown(m, off, sizeof(struct sadb_ext), &toff);
+ if (!n) {
+ /* m is already freed */
+ return ENOBUFS;
+ }
+ ext = (struct sadb_ext *)(mtod(n, caddr_t) + toff);
+
+ /* set pointer */
+ switch (ext->sadb_ext_type) {
+ case SADB_EXT_SA:
+ case SADB_EXT_ADDRESS_SRC:
+ case SADB_EXT_ADDRESS_DST:
+ case SADB_EXT_ADDRESS_PROXY:
+ case SADB_EXT_LIFETIME_CURRENT:
+ case SADB_EXT_LIFETIME_HARD:
+ case SADB_EXT_LIFETIME_SOFT:
+ case SADB_EXT_KEY_AUTH:
+ case SADB_EXT_KEY_ENCRYPT:
+ case SADB_EXT_IDENTITY_SRC:
+ case SADB_EXT_IDENTITY_DST:
+ case SADB_EXT_SENSITIVITY:
+ case SADB_EXT_PROPOSAL:
+ case SADB_EXT_SUPPORTED_AUTH:
+ case SADB_EXT_SUPPORTED_ENCRYPT:
+ case SADB_EXT_SPIRANGE:
+ case SADB_X_EXT_POLICY:
+ case SADB_X_EXT_SA2:
+#ifdef IPSEC_NAT_T
+ case SADB_X_EXT_NAT_T_TYPE:
+ case SADB_X_EXT_NAT_T_SPORT:
+ case SADB_X_EXT_NAT_T_DPORT:
+ case SADB_X_EXT_NAT_T_OAI:
+ case SADB_X_EXT_NAT_T_OAR:
+ case SADB_X_EXT_NAT_T_FRAG:
+#endif
+ /* duplicate check */
+ /*
+ * XXX Are there duplication payloads of either
+ * KEY_AUTH or KEY_ENCRYPT ?
+ */
+ if (mhp->ext[ext->sadb_ext_type] != NULL) {
+ ipseclog((LOG_DEBUG, "%s: duplicate ext_type "
+ "%u\n", __func__, ext->sadb_ext_type));
+ m_freem(m);
+ V_pfkeystat.out_dupext++;
+ return EINVAL;
+ }
+ break;
+ default:
+ ipseclog((LOG_DEBUG, "%s: invalid ext_type %u\n",
+ __func__, ext->sadb_ext_type));
+ m_freem(m);
+ V_pfkeystat.out_invexttype++;
+ return EINVAL;
+ }
+
+ extlen = PFKEY_UNUNIT64(ext->sadb_ext_len);
+
+ if (key_validate_ext(ext, extlen)) {
+ m_freem(m);
+ V_pfkeystat.out_invlen++;
+ return EINVAL;
+ }
+
+ n = m_pulldown(m, off, extlen, &toff);
+ if (!n) {
+ /* m is already freed */
+ return ENOBUFS;
+ }
+ ext = (struct sadb_ext *)(mtod(n, caddr_t) + toff);
+
+ mhp->ext[ext->sadb_ext_type] = ext;
+ mhp->extoff[ext->sadb_ext_type] = off;
+ mhp->extlen[ext->sadb_ext_type] = extlen;
+ }
+
+ if (off != end) {
+ m_freem(m);
+ V_pfkeystat.out_invlen++;
+ return EINVAL;
+ }
+
+ return 0;
+}
+
+static int
+key_validate_ext(ext, len)
+ const struct sadb_ext *ext;
+ int len;
+{
+ const struct sockaddr *sa;
+ enum { NONE, ADDR } checktype = NONE;
+ int baselen = 0;
+ const int sal = offsetof(struct sockaddr, sa_len) + sizeof(sa->sa_len);
+
+ if (len != PFKEY_UNUNIT64(ext->sadb_ext_len))
+ return EINVAL;
+
+ /* if it does not match minimum/maximum length, bail */
+ if (ext->sadb_ext_type >= sizeof(minsize) / sizeof(minsize[0]) ||
+ ext->sadb_ext_type >= sizeof(maxsize) / sizeof(maxsize[0]))
+ return EINVAL;
+ if (!minsize[ext->sadb_ext_type] || len < minsize[ext->sadb_ext_type])
+ return EINVAL;
+ if (maxsize[ext->sadb_ext_type] && len > maxsize[ext->sadb_ext_type])
+ return EINVAL;
+
+ /* more checks based on sadb_ext_type XXX need more */
+ switch (ext->sadb_ext_type) {
+ case SADB_EXT_ADDRESS_SRC:
+ case SADB_EXT_ADDRESS_DST:
+ case SADB_EXT_ADDRESS_PROXY:
+ baselen = PFKEY_ALIGN8(sizeof(struct sadb_address));
+ checktype = ADDR;
+ break;
+ case SADB_EXT_IDENTITY_SRC:
+ case SADB_EXT_IDENTITY_DST:
+ if (((const struct sadb_ident *)ext)->sadb_ident_type ==
+ SADB_X_IDENTTYPE_ADDR) {
+ baselen = PFKEY_ALIGN8(sizeof(struct sadb_ident));
+ checktype = ADDR;
+ } else
+ checktype = NONE;
+ break;
+ default:
+ checktype = NONE;
+ break;
+ }
+
+ switch (checktype) {
+ case NONE:
+ break;
+ case ADDR:
+ sa = (const struct sockaddr *)(((const u_int8_t*)ext)+baselen);
+ if (len < baselen + sal)
+ return EINVAL;
+ if (baselen + PFKEY_ALIGN8(sa->sa_len) != len)
+ return EINVAL;
+ break;
+ }
+
+ return 0;
+}
+
+void
+key_init(void)
+{
+ int i;
+
+ for (i = 0; i < IPSEC_DIR_MAX; i++)
+ LIST_INIT(&V_sptree[i]);
+
+ LIST_INIT(&V_sahtree);
+
+ for (i = 0; i <= SADB_SATYPE_MAX; i++)
+ LIST_INIT(&V_regtree[i]);
+
+ LIST_INIT(&V_acqtree);
+ LIST_INIT(&V_spacqtree);
+
+ /* system default */
+ V_ip4_def_policy.policy = IPSEC_POLICY_NONE;
+ V_ip4_def_policy.refcnt++; /*never reclaim this*/
+
+ if (!IS_DEFAULT_VNET(curvnet))
+ return;
+
+ SPTREE_LOCK_INIT();
+ REGTREE_LOCK_INIT();
+ SAHTREE_LOCK_INIT();
+ ACQ_LOCK_INIT();
+ SPACQ_LOCK_INIT();
+
+#ifndef IPSEC_DEBUG2
+ timeout((void *)key_timehandler, (void *)0, hz);
+#endif /*IPSEC_DEBUG2*/
+
+ /* initialize key statistics */
+ keystat.getspi_count = 1;
+
+ printf("IPsec: Initialized Security Association Processing.\n");
+}
+
+#ifdef VIMAGE
+void
+key_destroy(void)
+{
+ struct secpolicy *sp, *nextsp;
+ struct secacq *acq, *nextacq;
+ struct secspacq *spacq, *nextspacq;
+ struct secashead *sah, *nextsah;
+ struct secreg *reg;
+ int i;
+
+ SPTREE_LOCK();
+ for (i = 0; i < IPSEC_DIR_MAX; i++) {
+ for (sp = LIST_FIRST(&V_sptree[i]);
+ sp != NULL; sp = nextsp) {
+ nextsp = LIST_NEXT(sp, chain);
+ if (__LIST_CHAINED(sp)) {
+ LIST_REMOVE(sp, chain);
+ free(sp, M_IPSEC_SP);
+ }
+ }
+ }
+ SPTREE_UNLOCK();
+
+ SAHTREE_LOCK();
+ for (sah = LIST_FIRST(&V_sahtree); sah != NULL; sah = nextsah) {
+ nextsah = LIST_NEXT(sah, chain);
+ if (__LIST_CHAINED(sah)) {
+ LIST_REMOVE(sah, chain);
+ free(sah, M_IPSEC_SAH);
+ }
+ }
+ SAHTREE_UNLOCK();
+
+ REGTREE_LOCK();
+ for (i = 0; i <= SADB_SATYPE_MAX; i++) {
+ LIST_FOREACH(reg, &V_regtree[i], chain) {
+ if (__LIST_CHAINED(reg)) {
+ LIST_REMOVE(reg, chain);
+ free(reg, M_IPSEC_SAR);
+ break;
+ }
+ }
+ }
+ REGTREE_UNLOCK();
+
+ ACQ_LOCK();
+ for (acq = LIST_FIRST(&V_acqtree); acq != NULL; acq = nextacq) {
+ nextacq = LIST_NEXT(acq, chain);
+ if (__LIST_CHAINED(acq)) {
+ LIST_REMOVE(acq, chain);
+ free(acq, M_IPSEC_SAQ);
+ }
+ }
+ ACQ_UNLOCK();
+
+ SPACQ_LOCK();
+ for (spacq = LIST_FIRST(&V_spacqtree); spacq != NULL;
+ spacq = nextspacq) {
+ nextspacq = LIST_NEXT(spacq, chain);
+ if (__LIST_CHAINED(spacq)) {
+ LIST_REMOVE(spacq, chain);
+ free(spacq, M_IPSEC_SAQ);
+ }
+ }
+ SPACQ_UNLOCK();
+}
+#endif
+
+/*
+ * XXX: maybe This function is called after INBOUND IPsec processing.
+ *
+ * Special check for tunnel-mode packets.
+ * We must make some checks for consistency between inner and outer IP header.
+ *
+ * xxx more checks to be provided
+ */
+int
+key_checktunnelsanity(sav, family, src, dst)
+ struct secasvar *sav;
+ u_int family;
+ caddr_t src;
+ caddr_t dst;
+{
+ IPSEC_ASSERT(sav->sah != NULL, ("null SA header"));
+
+ /* XXX: check inner IP header */
+
+ return 1;
+}
+
+/* record data transfer on SA, and update timestamps */
+void
+key_sa_recordxfer(sav, m)
+ struct secasvar *sav;
+ struct mbuf *m;
+{
+ IPSEC_ASSERT(sav != NULL, ("Null secasvar"));
+ IPSEC_ASSERT(m != NULL, ("Null mbuf"));
+ if (!sav->lft_c)
+ return;
+
+ /*
+ * XXX Currently, there is a difference of bytes size
+ * between inbound and outbound processing.
+ */
+ sav->lft_c->bytes += m->m_pkthdr.len;
+ /* to check bytes lifetime is done in key_timehandler(). */
+
+ /*
+ * We use the number of packets as the unit of
+ * allocations. We increment the variable
+ * whenever {esp,ah}_{in,out}put is called.
+ */
+ sav->lft_c->allocations++;
+ /* XXX check for expires? */
+
+ /*
+ * NOTE: We record CURRENT usetime by using wall clock,
+ * in seconds. HARD and SOFT lifetime are measured by the time
+ * difference (again in seconds) from usetime.
+ *
+ * usetime
+ * v expire expire
+ * -----+-----+--------+---> t
+ * <--------------> HARD
+ * <-----> SOFT
+ */
+ sav->lft_c->usetime = time_second;
+ /* XXX check for expires? */
+
+ return;
+}
+
+/* dumb version */
+void
+key_sa_routechange(dst)
+ struct sockaddr *dst;
+{
+ struct secashead *sah;
+ struct route *ro;
+
+ SAHTREE_LOCK();
+ LIST_FOREACH(sah, &V_sahtree, chain) {
+ ro = &sah->route_cache.sa_route;
+ if (ro->ro_rt && dst->sa_len == ro->ro_dst.sa_len
+ && bcmp(dst, &ro->ro_dst, dst->sa_len) == 0) {
+ RTFREE(ro->ro_rt);
+ ro->ro_rt = (struct rtentry *)NULL;
+ }
+ }
+ SAHTREE_UNLOCK();
+}
+
+static void
+key_sa_chgstate(struct secasvar *sav, u_int8_t state)
+{
+ IPSEC_ASSERT(sav != NULL, ("NULL sav"));
+ SAHTREE_LOCK_ASSERT();
+
+ if (sav->state != state) {
+ if (__LIST_CHAINED(sav))
+ LIST_REMOVE(sav, chain);
+ sav->state = state;
+ LIST_INSERT_HEAD(&sav->sah->savtree[state], sav, chain);
+ }
+}
+
+void
+key_sa_stir_iv(sav)
+ struct secasvar *sav;
+{
+
+ IPSEC_ASSERT(sav->iv != NULL, ("null IV"));
+ key_randomfill(sav->iv, sav->ivlen);
+}
+
+/* XXX too much? */
+static struct mbuf *
+key_alloc_mbuf(l)
+ int l;
+{
+ struct mbuf *m = NULL, *n;
+ int len, t;
+
+ len = l;
+ while (len > 0) {
+ MGET(n, M_DONTWAIT, MT_DATA);
+ if (n && len > MLEN)
+ MCLGET(n, M_DONTWAIT);
+ if (!n) {
+ m_freem(m);
+ return NULL;
+ }
+
+ n->m_next = NULL;
+ n->m_len = 0;
+ n->m_len = M_TRAILINGSPACE(n);
+ /* use the bottom of mbuf, hoping we can prepend afterwards */
+ if (n->m_len > len) {
+ t = (n->m_len - len) & ~(sizeof(long) - 1);
+ n->m_data += t;
+ n->m_len = len;
+ }
+
+ len -= n->m_len;
+
+ if (m)
+ m_cat(m, n);
+ else
+ m = n;
+ }
+
+ return m;
+}
+
+/*
+ * Take one of the kernel's security keys and convert it into a PF_KEY
+ * structure within an mbuf, suitable for sending up to a waiting
+ * application in user land.
+ *
+ * IN:
+ * src: A pointer to a kernel security key.
+ * exttype: Which type of key this is. Refer to the PF_KEY data structures.
+ * OUT:
+ * a valid mbuf or NULL indicating an error
+ *
+ */
+
+static struct mbuf *
+key_setkey(struct seckey *src, u_int16_t exttype)
+{
+ struct mbuf *m;
+ struct sadb_key *p;
+ int len;
+
+ if (src == NULL)
+ return NULL;
+
+ len = PFKEY_ALIGN8(sizeof(struct sadb_key) + _KEYLEN(src));
+ m = key_alloc_mbuf(len);
+ if (m == NULL)
+ return NULL;
+ p = mtod(m, struct sadb_key *);
+ bzero(p, len);
+ p->sadb_key_len = PFKEY_UNIT64(len);
+ p->sadb_key_exttype = exttype;
+ p->sadb_key_bits = src->bits;
+ bcopy(src->key_data, _KEYBUF(p), _KEYLEN(src));
+
+ return m;
+}
+
+/*
+ * Take one of the kernel's lifetime data structures and convert it
+ * into a PF_KEY structure within an mbuf, suitable for sending up to
+ * a waiting application in user land.
+ *
+ * IN:
+ * src: A pointer to a kernel lifetime structure.
+ * exttype: Which type of lifetime this is. Refer to the PF_KEY
+ * data structures for more information.
+ * OUT:
+ * a valid mbuf or NULL indicating an error
+ *
+ */
+
+static struct mbuf *
+key_setlifetime(struct seclifetime *src, u_int16_t exttype)
+{
+ struct mbuf *m = NULL;
+ struct sadb_lifetime *p;
+ int len = PFKEY_ALIGN8(sizeof(struct sadb_lifetime));
+
+ if (src == NULL)
+ return NULL;
+
+ m = key_alloc_mbuf(len);
+ if (m == NULL)
+ return m;
+ p = mtod(m, struct sadb_lifetime *);
+
+ bzero(p, len);
+ p->sadb_lifetime_len = PFKEY_UNIT64(len);
+ p->sadb_lifetime_exttype = exttype;
+ p->sadb_lifetime_allocations = src->allocations;
+ p->sadb_lifetime_bytes = src->bytes;
+ p->sadb_lifetime_addtime = src->addtime;
+ p->sadb_lifetime_usetime = src->usetime;
+
+ return m;
+
+}
diff --git a/rtems/freebsd/netipsec/key.h b/rtems/freebsd/netipsec/key.h
new file mode 100644
index 00000000..4d2b3083
--- /dev/null
+++ b/rtems/freebsd/netipsec/key.h
@@ -0,0 +1,127 @@
+/* $FreeBSD$ */
+/* $KAME: key.h,v 1.21 2001/07/27 03:51:30 itojun Exp $ */
+
+/*-
+ * Copyright (C) 1995, 1996, 1997, and 1998 WIDE Project.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the project nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifndef _NETIPSEC_KEY_HH_
+#define _NETIPSEC_KEY_HH_
+
+#ifdef _KERNEL
+
+struct secpolicy;
+struct secpolicyindex;
+struct ipsecrequest;
+struct secasvar;
+struct sockaddr;
+struct socket;
+struct sadb_msg;
+struct sadb_x_policy;
+struct secasindex;
+union sockaddr_union;
+
+extern void key_addref(struct secpolicy *sp);
+extern int key_havesp(u_int dir);
+extern struct secpolicy *key_allocsp(struct secpolicyindex *, u_int,
+ const char*, int);
+extern struct secpolicy *key_allocsp2(u_int32_t spi, union sockaddr_union *dst,
+ u_int8_t proto, u_int dir, const char*, int);
+extern struct secpolicy *key_newsp(const char*, int);
+#if 0
+extern struct secpolicy *key_gettunnel(const struct sockaddr *,
+ const struct sockaddr *, const struct sockaddr *,
+ const struct sockaddr *, const char*, int);
+#endif
+/* NB: prepend with _ for KAME IPv6 compatbility */
+extern void _key_freesp(struct secpolicy **, const char*, int);
+
+#define KEY_ALLOCSP(spidx, dir) \
+ key_allocsp(spidx, dir, __FILE__, __LINE__)
+#define KEY_ALLOCSP2(spi, dst, proto, dir) \
+ key_allocsp2(spi, dst, proto, dir, __FILE__, __LINE__)
+#define KEY_NEWSP() \
+ key_newsp(__FILE__, __LINE__)
+#if 0
+#define KEY_GETTUNNEL(osrc, odst, isrc, idst) \
+ key_gettunnel(osrc, odst, isrc, idst, __FILE__, __LINE__)
+#endif
+#define KEY_FREESP(spp) \
+ _key_freesp(spp, __FILE__, __LINE__)
+
+extern struct secasvar *key_allocsa(union sockaddr_union *, u_int, u_int32_t,
+ const char*, int);
+extern void key_freesav(struct secasvar **, const char*, int);
+
+#define KEY_ALLOCSA(dst, proto, spi) \
+ key_allocsa(dst, proto, spi, __FILE__, __LINE__)
+#define KEY_FREESAV(psav) \
+ key_freesav(psav, __FILE__, __LINE__)
+
+extern void key_freeso __P((struct socket *));
+extern int key_checktunnelsanity __P((struct secasvar *, u_int,
+ caddr_t, caddr_t));
+extern int key_checkrequest
+ __P((struct ipsecrequest *isr, const struct secasindex *));
+
+extern struct secpolicy *key_msg2sp __P((struct sadb_x_policy *,
+ size_t, int *));
+extern struct mbuf *key_sp2msg __P((struct secpolicy *));
+extern int key_ismyaddr __P((struct sockaddr *));
+extern int key_spdacquire __P((struct secpolicy *));
+extern void key_timehandler __P((void));
+extern u_long key_random __P((void));
+extern void key_randomfill __P((void *, size_t));
+extern void key_freereg __P((struct socket *));
+extern int key_parse __P((struct mbuf *, struct socket *));
+extern void key_init __P((void));
+#ifdef VIMAGE
+extern void key_destroy(void);
+#endif
+extern void key_sa_recordxfer __P((struct secasvar *, struct mbuf *));
+extern void key_sa_routechange __P((struct sockaddr *));
+extern void key_sa_stir_iv __P((struct secasvar *));
+#ifdef IPSEC_NAT_T
+u_int16_t key_portfromsaddr(struct sockaddr *);
+#define KEY_PORTFROMSADDR(saddr) \
+ key_portfromsaddr((struct sockaddr *)(saddr))
+#endif
+
+#ifdef MALLOC_DECLARE
+MALLOC_DECLARE(M_IPSEC_SA);
+MALLOC_DECLARE(M_IPSEC_SAH);
+MALLOC_DECLARE(M_IPSEC_SP);
+MALLOC_DECLARE(M_IPSEC_SR);
+MALLOC_DECLARE(M_IPSEC_MISC);
+MALLOC_DECLARE(M_IPSEC_SAQ);
+MALLOC_DECLARE(M_IPSEC_SAR);
+MALLOC_DECLARE(M_IPSEC_INPCB);
+#endif /* MALLOC_DECLARE */
+
+#endif /* defined(_KERNEL) */
+#endif /* _NETIPSEC_KEY_HH_ */
diff --git a/rtems/freebsd/netipsec/key_debug.c b/rtems/freebsd/netipsec/key_debug.c
new file mode 100644
index 00000000..95501abd
--- /dev/null
+++ b/rtems/freebsd/netipsec/key_debug.c
@@ -0,0 +1,771 @@
+#include <rtems/freebsd/machine/rtems-bsd-config.h>
+
+/* $FreeBSD$ */
+/* $KAME: key_debug.c,v 1.26 2001/06/27 10:46:50 sakane Exp $ */
+
+/*-
+ * Copyright (C) 1995, 1996, 1997, and 1998 WIDE Project.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the project nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifdef _KERNEL
+#include <rtems/freebsd/local/opt_inet.h>
+#include <rtems/freebsd/local/opt_inet6.h>
+#include <rtems/freebsd/local/opt_ipsec.h>
+#endif
+
+#include <rtems/freebsd/sys/types.h>
+#include <rtems/freebsd/sys/param.h>
+#ifdef _KERNEL
+#include <rtems/freebsd/sys/systm.h>
+#include <rtems/freebsd/sys/mbuf.h>
+#include <rtems/freebsd/sys/queue.h>
+#endif
+#include <rtems/freebsd/sys/socket.h>
+
+#include <rtems/freebsd/net/route.h>
+#include <rtems/freebsd/net/vnet.h>
+
+#include <rtems/freebsd/netipsec/key_var.h>
+#include <rtems/freebsd/netipsec/key_debug.h>
+
+#include <rtems/freebsd/netinet/in.h>
+#include <rtems/freebsd/netipsec/ipsec.h>
+#ifdef _KERNEL
+#include <rtems/freebsd/netipsec/keydb.h>
+#endif
+
+#ifndef _KERNEL
+#include <rtems/freebsd/ctype.h>
+#include <rtems/freebsd/stdio.h>
+#include <rtems/freebsd/stdlib.h>
+#endif /* !_KERNEL */
+
+static void kdebug_sadb_prop __P((struct sadb_ext *));
+static void kdebug_sadb_identity __P((struct sadb_ext *));
+static void kdebug_sadb_supported __P((struct sadb_ext *));
+static void kdebug_sadb_lifetime __P((struct sadb_ext *));
+static void kdebug_sadb_sa __P((struct sadb_ext *));
+static void kdebug_sadb_address __P((struct sadb_ext *));
+static void kdebug_sadb_key __P((struct sadb_ext *));
+static void kdebug_sadb_x_sa2 __P((struct sadb_ext *));
+
+#ifdef _KERNEL
+static void kdebug_secreplay __P((struct secreplay *));
+#endif
+
+#ifndef _KERNEL
+#define panic(fmt, ...) { printf(fmt, ## __VA_ARGS__); exit(-1); }
+#endif
+
+/* NOTE: host byte order */
+
+/* %%%: about struct sadb_msg */
+void
+kdebug_sadb(base)
+ struct sadb_msg *base;
+{
+ struct sadb_ext *ext;
+ int tlen, extlen;
+
+ /* sanity check */
+ if (base == NULL)
+ panic("%s: NULL pointer was passed.\n", __func__);
+
+ printf("sadb_msg{ version=%u type=%u errno=%u satype=%u\n",
+ base->sadb_msg_version, base->sadb_msg_type,
+ base->sadb_msg_errno, base->sadb_msg_satype);
+ printf(" len=%u reserved=%u seq=%u pid=%u\n",
+ base->sadb_msg_len, base->sadb_msg_reserved,
+ base->sadb_msg_seq, base->sadb_msg_pid);
+
+ tlen = PFKEY_UNUNIT64(base->sadb_msg_len) - sizeof(struct sadb_msg);
+ ext = (struct sadb_ext *)((caddr_t)base + sizeof(struct sadb_msg));
+
+ while (tlen > 0) {
+ printf("sadb_ext{ len=%u type=%u }\n",
+ ext->sadb_ext_len, ext->sadb_ext_type);
+
+ if (ext->sadb_ext_len == 0) {
+ printf("%s: invalid ext_len=0 was passed.\n", __func__);
+ return;
+ }
+ if (ext->sadb_ext_len > tlen) {
+ printf("%s: ext_len too big (%u > %u).\n",
+ __func__, ext->sadb_ext_len, tlen);
+ return;
+ }
+
+ switch (ext->sadb_ext_type) {
+ case SADB_EXT_SA:
+ kdebug_sadb_sa(ext);
+ break;
+ case SADB_EXT_LIFETIME_CURRENT:
+ case SADB_EXT_LIFETIME_HARD:
+ case SADB_EXT_LIFETIME_SOFT:
+ kdebug_sadb_lifetime(ext);
+ break;
+ case SADB_EXT_ADDRESS_SRC:
+ case SADB_EXT_ADDRESS_DST:
+ case SADB_EXT_ADDRESS_PROXY:
+ kdebug_sadb_address(ext);
+ break;
+ case SADB_EXT_KEY_AUTH:
+ case SADB_EXT_KEY_ENCRYPT:
+ kdebug_sadb_key(ext);
+ break;
+ case SADB_EXT_IDENTITY_SRC:
+ case SADB_EXT_IDENTITY_DST:
+ kdebug_sadb_identity(ext);
+ break;
+ case SADB_EXT_SENSITIVITY:
+ break;
+ case SADB_EXT_PROPOSAL:
+ kdebug_sadb_prop(ext);
+ break;
+ case SADB_EXT_SUPPORTED_AUTH:
+ case SADB_EXT_SUPPORTED_ENCRYPT:
+ kdebug_sadb_supported(ext);
+ break;
+ case SADB_EXT_SPIRANGE:
+ case SADB_X_EXT_KMPRIVATE:
+ break;
+ case SADB_X_EXT_POLICY:
+ kdebug_sadb_x_policy(ext);
+ break;
+ case SADB_X_EXT_SA2:
+ kdebug_sadb_x_sa2(ext);
+ break;
+ default:
+ printf("%s: invalid ext_type %u\n", __func__,
+ ext->sadb_ext_type);
+ return;
+ }
+
+ extlen = PFKEY_UNUNIT64(ext->sadb_ext_len);
+ tlen -= extlen;
+ ext = (struct sadb_ext *)((caddr_t)ext + extlen);
+ }
+
+ return;
+}
+
+static void
+kdebug_sadb_prop(ext)
+ struct sadb_ext *ext;
+{
+ struct sadb_prop *prop = (struct sadb_prop *)ext;
+ struct sadb_comb *comb;
+ int len;
+
+ /* sanity check */
+ if (ext == NULL)
+ panic("%s: NULL pointer was passed.\n", __func__);
+
+ len = (PFKEY_UNUNIT64(prop->sadb_prop_len) - sizeof(*prop))
+ / sizeof(*comb);
+ comb = (struct sadb_comb *)(prop + 1);
+ printf("sadb_prop{ replay=%u\n", prop->sadb_prop_replay);
+
+ while (len--) {
+ printf("sadb_comb{ auth=%u encrypt=%u "
+ "flags=0x%04x reserved=0x%08x\n",
+ comb->sadb_comb_auth, comb->sadb_comb_encrypt,
+ comb->sadb_comb_flags, comb->sadb_comb_reserved);
+
+ printf(" auth_minbits=%u auth_maxbits=%u "
+ "encrypt_minbits=%u encrypt_maxbits=%u\n",
+ comb->sadb_comb_auth_minbits,
+ comb->sadb_comb_auth_maxbits,
+ comb->sadb_comb_encrypt_minbits,
+ comb->sadb_comb_encrypt_maxbits);
+
+ printf(" soft_alloc=%u hard_alloc=%u "
+ "soft_bytes=%lu hard_bytes=%lu\n",
+ comb->sadb_comb_soft_allocations,
+ comb->sadb_comb_hard_allocations,
+ (unsigned long)comb->sadb_comb_soft_bytes,
+ (unsigned long)comb->sadb_comb_hard_bytes);
+
+ printf(" soft_alloc=%lu hard_alloc=%lu "
+ "soft_bytes=%lu hard_bytes=%lu }\n",
+ (unsigned long)comb->sadb_comb_soft_addtime,
+ (unsigned long)comb->sadb_comb_hard_addtime,
+ (unsigned long)comb->sadb_comb_soft_usetime,
+ (unsigned long)comb->sadb_comb_hard_usetime);
+ comb++;
+ }
+ printf("}\n");
+
+ return;
+}
+
+static void
+kdebug_sadb_identity(ext)
+ struct sadb_ext *ext;
+{
+ struct sadb_ident *id = (struct sadb_ident *)ext;
+ int len;
+
+ /* sanity check */
+ if (ext == NULL)
+ panic("%s: NULL pointer was passed.\n", __func__);
+
+ len = PFKEY_UNUNIT64(id->sadb_ident_len) - sizeof(*id);
+ printf("sadb_ident_%s{",
+ id->sadb_ident_exttype == SADB_EXT_IDENTITY_SRC ? "src" : "dst");
+ switch (id->sadb_ident_type) {
+ default:
+ printf(" type=%d id=%lu",
+ id->sadb_ident_type, (u_long)id->sadb_ident_id);
+ if (len) {
+#ifdef _KERNEL
+ ipsec_hexdump((caddr_t)(id + 1), len); /*XXX cast ?*/
+#else
+ char *p, *ep;
+ printf("\n str=\"");
+ p = (char *)(id + 1);
+ ep = p + len;
+ for (/*nothing*/; *p && p < ep; p++) {
+ if (isprint(*p))
+ printf("%c", *p & 0xff);
+ else
+ printf("\\%03o", *p & 0xff);
+ }
+#endif
+ printf("\"");
+ }
+ break;
+ }
+
+ printf(" }\n");
+
+ return;
+}
+
+static void
+kdebug_sadb_supported(ext)
+ struct sadb_ext *ext;
+{
+ struct sadb_supported *sup = (struct sadb_supported *)ext;
+ struct sadb_alg *alg;
+ int len;
+
+ /* sanity check */
+ if (ext == NULL)
+ panic("%s: NULL pointer was passed.\n", __func__);
+
+ len = (PFKEY_UNUNIT64(sup->sadb_supported_len) - sizeof(*sup))
+ / sizeof(*alg);
+ alg = (struct sadb_alg *)(sup + 1);
+ printf("sadb_sup{\n");
+ while (len--) {
+ printf(" { id=%d ivlen=%d min=%d max=%d }\n",
+ alg->sadb_alg_id, alg->sadb_alg_ivlen,
+ alg->sadb_alg_minbits, alg->sadb_alg_maxbits);
+ alg++;
+ }
+ printf("}\n");
+
+ return;
+}
+
+static void
+kdebug_sadb_lifetime(ext)
+ struct sadb_ext *ext;
+{
+ struct sadb_lifetime *lft = (struct sadb_lifetime *)ext;
+
+ /* sanity check */
+ if (ext == NULL)
+ panic("%s: NULL pointer was passed.\n", __func__);
+
+ printf("sadb_lifetime{ alloc=%u, bytes=%u\n",
+ lft->sadb_lifetime_allocations,
+ (u_int32_t)lft->sadb_lifetime_bytes);
+ printf(" addtime=%u, usetime=%u }\n",
+ (u_int32_t)lft->sadb_lifetime_addtime,
+ (u_int32_t)lft->sadb_lifetime_usetime);
+
+ return;
+}
+
+static void
+kdebug_sadb_sa(ext)
+ struct sadb_ext *ext;
+{
+ struct sadb_sa *sa = (struct sadb_sa *)ext;
+
+ /* sanity check */
+ if (ext == NULL)
+ panic("%s: NULL pointer was passed.\n", __func__);
+
+ printf("sadb_sa{ spi=%u replay=%u state=%u\n",
+ (u_int32_t)ntohl(sa->sadb_sa_spi), sa->sadb_sa_replay,
+ sa->sadb_sa_state);
+ printf(" auth=%u encrypt=%u flags=0x%08x }\n",
+ sa->sadb_sa_auth, sa->sadb_sa_encrypt, sa->sadb_sa_flags);
+
+ return;
+}
+
+static void
+kdebug_sadb_address(ext)
+ struct sadb_ext *ext;
+{
+ struct sadb_address *addr = (struct sadb_address *)ext;
+
+ /* sanity check */
+ if (ext == NULL)
+ panic("%s: NULL pointer was passed.\n", __func__);
+
+ printf("sadb_address{ proto=%u prefixlen=%u reserved=0x%02x%02x }\n",
+ addr->sadb_address_proto, addr->sadb_address_prefixlen,
+ ((u_char *)&addr->sadb_address_reserved)[0],
+ ((u_char *)&addr->sadb_address_reserved)[1]);
+
+ kdebug_sockaddr((struct sockaddr *)((caddr_t)ext + sizeof(*addr)));
+
+ return;
+}
+
+static void
+kdebug_sadb_key(ext)
+ struct sadb_ext *ext;
+{
+ struct sadb_key *key = (struct sadb_key *)ext;
+
+ /* sanity check */
+ if (ext == NULL)
+ panic("%s: NULL pointer was passed.\n", __func__);
+
+ printf("sadb_key{ bits=%u reserved=%u\n",
+ key->sadb_key_bits, key->sadb_key_reserved);
+ printf(" key=");
+
+ /* sanity check 2 */
+ if ((key->sadb_key_bits >> 3) >
+ (PFKEY_UNUNIT64(key->sadb_key_len) - sizeof(struct sadb_key))) {
+ printf("%s: key length mismatch, bit:%d len:%ld.\n",
+ __func__,
+ key->sadb_key_bits >> 3,
+ (long)PFKEY_UNUNIT64(key->sadb_key_len) - sizeof(struct sadb_key));
+ }
+
+ ipsec_hexdump((caddr_t)key + sizeof(struct sadb_key),
+ key->sadb_key_bits >> 3);
+ printf(" }\n");
+ return;
+}
+
+static void
+kdebug_sadb_x_sa2(ext)
+ struct sadb_ext *ext;
+{
+ struct sadb_x_sa2 *sa2 = (struct sadb_x_sa2 *)ext;
+
+ /* sanity check */
+ if (ext == NULL)
+ panic("%s: NULL pointer was passed.\n", __func__);
+
+ printf("sadb_x_sa2{ mode=%u reqid=%u\n",
+ sa2->sadb_x_sa2_mode, sa2->sadb_x_sa2_reqid);
+ printf(" reserved1=%u reserved2=%u sequence=%u }\n",
+ sa2->sadb_x_sa2_reserved1, sa2->sadb_x_sa2_reserved2,
+ sa2->sadb_x_sa2_sequence);
+
+ return;
+}
+
+void
+kdebug_sadb_x_policy(ext)
+ struct sadb_ext *ext;
+{
+ struct sadb_x_policy *xpl = (struct sadb_x_policy *)ext;
+ struct sockaddr *addr;
+
+ /* sanity check */
+ if (ext == NULL)
+ panic("%s: NULL pointer was passed.\n", __func__);
+
+ printf("sadb_x_policy{ type=%u dir=%u id=%x }\n",
+ xpl->sadb_x_policy_type, xpl->sadb_x_policy_dir,
+ xpl->sadb_x_policy_id);
+
+ if (xpl->sadb_x_policy_type == IPSEC_POLICY_IPSEC) {
+ int tlen;
+ struct sadb_x_ipsecrequest *xisr;
+
+ tlen = PFKEY_UNUNIT64(xpl->sadb_x_policy_len) - sizeof(*xpl);
+ xisr = (struct sadb_x_ipsecrequest *)(xpl + 1);
+
+ while (tlen > 0) {
+ printf(" { len=%u proto=%u mode=%u level=%u reqid=%u\n",
+ xisr->sadb_x_ipsecrequest_len,
+ xisr->sadb_x_ipsecrequest_proto,
+ xisr->sadb_x_ipsecrequest_mode,
+ xisr->sadb_x_ipsecrequest_level,
+ xisr->sadb_x_ipsecrequest_reqid);
+
+ if (xisr->sadb_x_ipsecrequest_len > sizeof(*xisr)) {
+ addr = (struct sockaddr *)(xisr + 1);
+ kdebug_sockaddr(addr);
+ addr = (struct sockaddr *)((caddr_t)addr
+ + addr->sa_len);
+ kdebug_sockaddr(addr);
+ }
+
+ printf(" }\n");
+
+ /* prevent infinite loop */
+ if (xisr->sadb_x_ipsecrequest_len <= 0) {
+ printf("%s: wrong policy struct.\n", __func__);
+ return;
+ }
+ /* prevent overflow */
+ if (xisr->sadb_x_ipsecrequest_len > tlen) {
+ printf("%s: invalid ipsec policy length "
+ "(%u > %u)\n", __func__,
+ xisr->sadb_x_ipsecrequest_len, tlen);
+ return;
+ }
+
+ tlen -= xisr->sadb_x_ipsecrequest_len;
+
+ xisr = (struct sadb_x_ipsecrequest *)((caddr_t)xisr
+ + xisr->sadb_x_ipsecrequest_len);
+ }
+
+ if (tlen != 0)
+ panic("%s: wrong policy struct.\n", __func__);
+ }
+
+ return;
+}
+
+#ifdef _KERNEL
+/* %%%: about SPD and SAD */
+void
+kdebug_secpolicy(sp)
+ struct secpolicy *sp;
+{
+ /* sanity check */
+ if (sp == NULL)
+ panic("%s: NULL pointer was passed.\n", __func__);
+
+ printf("secpolicy{ refcnt=%u state=%u policy=%u\n",
+ sp->refcnt, sp->state, sp->policy);
+
+ kdebug_secpolicyindex(&sp->spidx);
+
+ switch (sp->policy) {
+ case IPSEC_POLICY_DISCARD:
+ printf(" type=discard }\n");
+ break;
+ case IPSEC_POLICY_NONE:
+ printf(" type=none }\n");
+ break;
+ case IPSEC_POLICY_IPSEC:
+ {
+ struct ipsecrequest *isr;
+ for (isr = sp->req; isr != NULL; isr = isr->next) {
+
+ printf(" level=%u\n", isr->level);
+ kdebug_secasindex(&isr->saidx);
+
+ if (isr->sav != NULL)
+ kdebug_secasv(isr->sav);
+ }
+ printf(" }\n");
+ }
+ break;
+ case IPSEC_POLICY_BYPASS:
+ printf(" type=bypass }\n");
+ break;
+ case IPSEC_POLICY_ENTRUST:
+ printf(" type=entrust }\n");
+ break;
+ default:
+ printf("%s: Invalid policy found. %d\n", __func__, sp->policy);
+ break;
+ }
+
+ return;
+}
+
+void
+kdebug_secpolicyindex(spidx)
+ struct secpolicyindex *spidx;
+{
+ /* sanity check */
+ if (spidx == NULL)
+ panic("%s: NULL pointer was passed.\n", __func__);
+
+ printf("secpolicyindex{ dir=%u prefs=%u prefd=%u ul_proto=%u\n",
+ spidx->dir, spidx->prefs, spidx->prefd, spidx->ul_proto);
+
+ ipsec_hexdump((caddr_t)&spidx->src,
+ ((struct sockaddr *)&spidx->src)->sa_len);
+ printf("\n");
+ ipsec_hexdump((caddr_t)&spidx->dst,
+ ((struct sockaddr *)&spidx->dst)->sa_len);
+ printf("}\n");
+
+ return;
+}
+
+void
+kdebug_secasindex(saidx)
+ struct secasindex *saidx;
+{
+ /* sanity check */
+ if (saidx == NULL)
+ panic("%s: NULL pointer was passed.\n", __func__);
+
+ printf("secasindex{ mode=%u proto=%u\n",
+ saidx->mode, saidx->proto);
+
+ ipsec_hexdump((caddr_t)&saidx->src,
+ ((struct sockaddr *)&saidx->src)->sa_len);
+ printf("\n");
+ ipsec_hexdump((caddr_t)&saidx->dst,
+ ((struct sockaddr *)&saidx->dst)->sa_len);
+ printf("\n");
+
+ return;
+}
+
+static void
+kdebug_sec_lifetime(struct seclifetime *lft)
+{
+ /* sanity check */
+ if (lft == NULL)
+ panic("%s: NULL pointer was passed.\n", __func__);
+
+ printf("sec_lifetime{ alloc=%u, bytes=%u\n",
+ lft->allocations, (u_int32_t)lft->bytes);
+ printf(" addtime=%u, usetime=%u }\n",
+ (u_int32_t)lft->addtime, (u_int32_t)lft->usetime);
+
+ return;
+}
+
+void
+kdebug_secasv(sav)
+ struct secasvar *sav;
+{
+ /* sanity check */
+ if (sav == NULL)
+ panic("%s: NULL pointer was passed.\n", __func__);
+
+ printf("secas{");
+ kdebug_secasindex(&sav->sah->saidx);
+
+ printf(" refcnt=%u state=%u auth=%u enc=%u\n",
+ sav->refcnt, sav->state, sav->alg_auth, sav->alg_enc);
+ printf(" spi=%u flags=%u\n",
+ (u_int32_t)ntohl(sav->spi), sav->flags);
+
+ if (sav->key_auth != NULL)
+ kdebug_sadb_key((struct sadb_ext *)sav->key_auth);
+ if (sav->key_enc != NULL)
+ kdebug_sadb_key((struct sadb_ext *)sav->key_enc);
+ if (sav->iv != NULL) {
+ printf(" iv=");
+ ipsec_hexdump(sav->iv, sav->ivlen ? sav->ivlen : 8);
+ printf("\n");
+ }
+
+ if (sav->replay != NULL)
+ kdebug_secreplay(sav->replay);
+ if (sav->lft_c != NULL)
+ kdebug_sec_lifetime(sav->lft_c);
+ if (sav->lft_h != NULL)
+ kdebug_sec_lifetime(sav->lft_h);
+ if (sav->lft_s != NULL)
+ kdebug_sec_lifetime(sav->lft_s);
+
+#ifdef notyet
+ /* XXX: misc[123] ? */
+#endif
+
+ return;
+}
+
+static void
+kdebug_secreplay(rpl)
+ struct secreplay *rpl;
+{
+ int len, l;
+
+ /* sanity check */
+ if (rpl == NULL)
+ panic("%s: NULL pointer was passed.\n", __func__);
+
+ printf(" secreplay{ count=%u wsize=%u seq=%u lastseq=%u",
+ rpl->count, rpl->wsize, rpl->seq, rpl->lastseq);
+
+ if (rpl->bitmap == NULL) {
+ printf(" }\n");
+ return;
+ }
+
+ printf("\n bitmap { ");
+
+ for (len = 0; len < rpl->wsize; len++) {
+ for (l = 7; l >= 0; l--)
+ printf("%u", (((rpl->bitmap)[len] >> l) & 1) ? 1 : 0);
+ }
+ printf(" }\n");
+
+ return;
+}
+
+void
+kdebug_mbufhdr(m)
+ struct mbuf *m;
+{
+ /* sanity check */
+ if (m == NULL)
+ return;
+
+ printf("mbuf(%p){ m_next:%p m_nextpkt:%p m_data:%p "
+ "m_len:%d m_type:0x%02x m_flags:0x%02x }\n",
+ m, m->m_next, m->m_nextpkt, m->m_data,
+ m->m_len, m->m_type, m->m_flags);
+
+ if (m->m_flags & M_PKTHDR) {
+ printf(" m_pkthdr{ len:%d rcvif:%p }\n",
+ m->m_pkthdr.len, m->m_pkthdr.rcvif);
+ }
+
+ if (m->m_flags & M_EXT) {
+ printf(" m_ext{ ext_buf:%p ext_free:%p "
+ "ext_size:%u ref_cnt:%p }\n",
+ m->m_ext.ext_buf, m->m_ext.ext_free,
+ m->m_ext.ext_size, m->m_ext.ref_cnt);
+ }
+
+ return;
+}
+
+void
+kdebug_mbuf(m0)
+ struct mbuf *m0;
+{
+ struct mbuf *m = m0;
+ int i, j;
+
+ for (j = 0; m; m = m->m_next) {
+ kdebug_mbufhdr(m);
+ printf(" m_data:\n");
+ for (i = 0; i < m->m_len; i++) {
+ if (i && i % 32 == 0)
+ printf("\n");
+ if (i % 4 == 0)
+ printf(" ");
+ printf("%02x", mtod(m, u_char *)[i]);
+ j++;
+ }
+ printf("\n");
+ }
+
+ return;
+}
+#endif /* _KERNEL */
+
+void
+kdebug_sockaddr(addr)
+ struct sockaddr *addr;
+{
+ struct sockaddr_in *sin4;
+#ifdef INET6
+ struct sockaddr_in6 *sin6;
+#endif
+
+ /* sanity check */
+ if (addr == NULL)
+ panic("%s: NULL pointer was passed.\n", __func__);
+
+ /* NOTE: We deal with port number as host byte order. */
+ printf("sockaddr{ len=%u family=%u", addr->sa_len, addr->sa_family);
+
+ switch (addr->sa_family) {
+ case AF_INET:
+ sin4 = (struct sockaddr_in *)addr;
+ printf(" port=%u\n", ntohs(sin4->sin_port));
+ ipsec_hexdump((caddr_t)&sin4->sin_addr, sizeof(sin4->sin_addr));
+ break;
+#ifdef INET6
+ case AF_INET6:
+ sin6 = (struct sockaddr_in6 *)addr;
+ printf(" port=%u\n", ntohs(sin6->sin6_port));
+ printf(" flowinfo=0x%08x, scope_id=0x%08x\n",
+ sin6->sin6_flowinfo, sin6->sin6_scope_id);
+ ipsec_hexdump((caddr_t)&sin6->sin6_addr,
+ sizeof(sin6->sin6_addr));
+ break;
+#endif
+ }
+
+ printf(" }\n");
+
+ return;
+}
+
+void
+ipsec_bindump(buf, len)
+ caddr_t buf;
+ int len;
+{
+ int i;
+
+ for (i = 0; i < len; i++)
+ printf("%c", (unsigned char)buf[i]);
+
+ return;
+}
+
+
+void
+ipsec_hexdump(buf, len)
+ caddr_t buf;
+ int len;
+{
+ int i;
+
+ for (i = 0; i < len; i++) {
+ if (i != 0 && i % 32 == 0) printf("\n");
+ if (i % 4 == 0) printf(" ");
+ printf("%02x", (unsigned char)buf[i]);
+ }
+#if 0
+ if (i % 32 != 0) printf("\n");
+#endif
+
+ return;
+}
diff --git a/rtems/freebsd/netipsec/key_debug.h b/rtems/freebsd/netipsec/key_debug.h
new file mode 100644
index 00000000..26dfdbcf
--- /dev/null
+++ b/rtems/freebsd/netipsec/key_debug.h
@@ -0,0 +1,89 @@
+/* $FreeBSD$ */
+/* $KAME: key_debug.h,v 1.10 2001/08/05 08:37:52 itojun Exp $ */
+
+/*-
+ * Copyright (C) 1995, 1996, 1997, and 1998 WIDE Project.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the project nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifndef _NETIPSEC_KEY_DEBUG_HH_
+#define _NETIPSEC_KEY_DEBUG_HH_
+
+#ifdef _KERNEL
+/* debug flags */
+#define KEYDEBUG_STAMP 0x00000001 /* path */
+#define KEYDEBUG_DATA 0x00000002 /* data */
+#define KEYDEBUG_DUMP 0x00000004 /* dump */
+
+#define KEYDEBUG_KEY 0x00000010 /* key processing */
+#define KEYDEBUG_ALG 0x00000020 /* ciph & auth algorithm */
+#define KEYDEBUG_IPSEC 0x00000040 /* ipsec processing */
+
+#define KEYDEBUG_KEY_STAMP (KEYDEBUG_KEY | KEYDEBUG_STAMP)
+#define KEYDEBUG_KEY_DATA (KEYDEBUG_KEY | KEYDEBUG_DATA)
+#define KEYDEBUG_KEY_DUMP (KEYDEBUG_KEY | KEYDEBUG_DUMP)
+#define KEYDEBUG_ALG_STAMP (KEYDEBUG_ALG | KEYDEBUG_STAMP)
+#define KEYDEBUG_ALG_DATA (KEYDEBUG_ALG | KEYDEBUG_DATA)
+#define KEYDEBUG_ALG_DUMP (KEYDEBUG_ALG | KEYDEBUG_DUMP)
+#define KEYDEBUG_IPSEC_STAMP (KEYDEBUG_IPSEC | KEYDEBUG_STAMP)
+#define KEYDEBUG_IPSEC_DATA (KEYDEBUG_IPSEC | KEYDEBUG_DATA)
+#define KEYDEBUG_IPSEC_DUMP (KEYDEBUG_IPSEC | KEYDEBUG_DUMP)
+
+#define KEYDEBUG(lev,arg) \
+ do { if ((V_key_debug_level & (lev)) == (lev)) { arg; } } while (0)
+
+VNET_DECLARE(u_int32_t, key_debug_level);
+#define V_key_debug_level VNET(key_debug_level)
+#endif /*_KERNEL*/
+
+struct sadb_msg;
+struct sadb_ext;
+extern void kdebug_sadb __P((struct sadb_msg *));
+extern void kdebug_sadb_x_policy __P((struct sadb_ext *));
+
+#ifdef _KERNEL
+struct secpolicy;
+struct secpolicyindex;
+struct secasindex;
+struct secasvar;
+struct secreplay;
+struct mbuf;
+extern void kdebug_secpolicy __P((struct secpolicy *));
+extern void kdebug_secpolicyindex __P((struct secpolicyindex *));
+extern void kdebug_secasindex __P((struct secasindex *));
+extern void kdebug_secasv __P((struct secasvar *));
+extern void kdebug_mbufhdr __P((struct mbuf *));
+extern void kdebug_mbuf __P((struct mbuf *));
+#endif /*_KERNEL*/
+
+struct sockaddr;
+extern void kdebug_sockaddr __P((struct sockaddr *));
+
+extern void ipsec_hexdump __P((caddr_t, int));
+extern void ipsec_bindump __P((caddr_t, int));
+
+#endif /* _NETIPSEC_KEY_DEBUG_HH_ */
diff --git a/rtems/freebsd/netipsec/key_var.h b/rtems/freebsd/netipsec/key_var.h
new file mode 100644
index 00000000..127b6abf
--- /dev/null
+++ b/rtems/freebsd/netipsec/key_var.h
@@ -0,0 +1,74 @@
+/* $FreeBSD$ */
+/* $KAME: key_var.h,v 1.11 2001/09/12 23:05:07 sakane Exp $ */
+
+/*-
+ * Copyright (C) 1995, 1996, 1997, and 1998 WIDE Project.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the project nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifndef _NETIPSEC_KEY_VAR_HH_
+#define _NETIPSEC_KEY_VAR_HH_
+
+/* sysctl */
+#define KEYCTL_DEBUG_LEVEL 1
+#define KEYCTL_SPI_TRY 2
+#define KEYCTL_SPI_MIN_VALUE 3
+#define KEYCTL_SPI_MAX_VALUE 4
+#define KEYCTL_RANDOM_INT 5
+#define KEYCTL_LARVAL_LIFETIME 6
+#define KEYCTL_BLOCKACQ_COUNT 7
+#define KEYCTL_BLOCKACQ_LIFETIME 8
+#define KEYCTL_ESP_KEYMIN 9
+#define KEYCTL_ESP_AUTH 10
+#define KEYCTL_AH_KEYMIN 11
+#define KEYCTL_PREFERED_OLDSA 12
+#define KEYCTL_MAXID 13
+
+#define KEYCTL_NAMES { \
+ { 0, 0 }, \
+ { "debug", CTLTYPE_INT }, \
+ { "spi_try", CTLTYPE_INT }, \
+ { "spi_min_value", CTLTYPE_INT }, \
+ { "spi_max_value", CTLTYPE_INT }, \
+ { "random_int", CTLTYPE_INT }, \
+ { "larval_lifetime", CTLTYPE_INT }, \
+ { "blockacq_count", CTLTYPE_INT }, \
+ { "blockacq_lifetime", CTLTYPE_INT }, \
+ { "esp_keymin", CTLTYPE_INT }, \
+ { "esp_auth", CTLTYPE_INT }, \
+ { "ah_keymin", CTLTYPE_INT }, \
+ { "prefered_oldsa", CTLTYPE_INT }, \
+}
+
+#ifdef _KERNEL
+#define _ARRAYLEN(p) (sizeof(p)/sizeof(p[0]))
+#define _KEYLEN(key) ((u_int)((key)->bits >> 3))
+#define _KEYBITS(key) ((u_int)((key)->bits))
+#define _KEYBUF(key) ((caddr_t)((caddr_t)(key) + sizeof(struct sadb_key)))
+#endif /*_KERNEL*/
+
+#endif /* _NETIPSEC_KEY_VAR_HH_ */
diff --git a/rtems/freebsd/netipsec/keydb.h b/rtems/freebsd/netipsec/keydb.h
new file mode 100644
index 00000000..d077cc74
--- /dev/null
+++ b/rtems/freebsd/netipsec/keydb.h
@@ -0,0 +1,227 @@
+/* $FreeBSD$ */
+/* $KAME: keydb.h,v 1.14 2000/08/02 17:58:26 sakane Exp $ */
+
+/*-
+ * Copyright (C) 1995, 1996, 1997, and 1998 WIDE Project.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the project nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifndef _NETIPSEC_KEYDB_HH_
+#define _NETIPSEC_KEYDB_HH_
+
+#ifdef _KERNEL
+
+#include <rtems/freebsd/netipsec/key_var.h>
+
+#ifndef _SOCKADDR_UNION_DEFINED
+#define _SOCKADDR_UNION_DEFINED
+/*
+ * The union of all possible address formats we handle.
+ */
+union sockaddr_union {
+ struct sockaddr sa;
+ struct sockaddr_in sin;
+ struct sockaddr_in6 sin6;
+};
+#endif /* _SOCKADDR_UNION_DEFINED */
+
+/* Security Assocciation Index */
+/* NOTE: Ensure to be same address family */
+struct secasindex {
+ union sockaddr_union src; /* srouce address for SA */
+ union sockaddr_union dst; /* destination address for SA */
+ u_int16_t proto; /* IPPROTO_ESP or IPPROTO_AH */
+ u_int8_t mode; /* mode of protocol, see ipsec.h */
+ u_int32_t reqid; /* reqid id who owned this SA */
+ /* see IPSEC_MANUAL_REQID_MAX. */
+};
+
+/*
+ * In order to split out the keydb implementation from that of the
+ * PF_KEY sockets we need to define a few structures that while they
+ * may seem common are likely to diverge over time.
+ */
+
+/* sadb_identity */
+struct secident {
+ u_int16_t type;
+ u_int64_t id;
+};
+
+/* sadb_key */
+struct seckey {
+ u_int16_t bits;
+ char *key_data;
+};
+
+struct seclifetime {
+ u_int32_t allocations;
+ u_int64_t bytes;
+ u_int64_t addtime;
+ u_int64_t usetime;
+};
+
+union sa_route_union {
+ struct route sa_route;
+ struct route sin_route; /* Duplicate for consistency. */
+ struct route_in6 sin6_route;
+};
+
+/* Security Association Data Base */
+struct secashead {
+ LIST_ENTRY(secashead) chain;
+
+ struct secasindex saidx;
+
+ struct secident *idents; /* source identity */
+ struct secident *identd; /* destination identity */
+ /* XXX I don't know how to use them. */
+
+ u_int8_t state; /* MATURE or DEAD. */
+ LIST_HEAD(_satree, secasvar) savtree[SADB_SASTATE_MAX+1];
+ /* SA chain */
+ /* The first of this list is newer SA */
+
+ union sa_route_union route_cache;
+};
+
+struct xformsw;
+struct enc_xform;
+struct auth_hash;
+struct comp_algo;
+
+/* Security Association */
+struct secasvar {
+ LIST_ENTRY(secasvar) chain;
+ struct mtx lock; /* update/access lock */
+
+ u_int refcnt; /* reference count */
+ u_int8_t state; /* Status of this Association */
+
+ u_int8_t alg_auth; /* Authentication Algorithm Identifier*/
+ u_int8_t alg_enc; /* Cipher Algorithm Identifier */
+ u_int8_t alg_comp; /* Compression Algorithm Identifier */
+ u_int32_t spi; /* SPI Value, network byte order */
+ u_int32_t flags; /* holder for SADB_KEY_FLAGS */
+
+ struct seckey *key_auth; /* Key for Authentication */
+ struct seckey *key_enc; /* Key for Encryption */
+ caddr_t iv; /* Initilization Vector */
+ u_int ivlen; /* length of IV */
+ void *sched; /* intermediate encryption key */
+ size_t schedlen;
+
+ struct secreplay *replay; /* replay prevention */
+ time_t created; /* for lifetime */
+
+ struct seclifetime *lft_c; /* CURRENT lifetime, it's constant. */
+ struct seclifetime *lft_h; /* HARD lifetime */
+ struct seclifetime *lft_s; /* SOFT lifetime */
+
+ u_int32_t seq; /* sequence number */
+ pid_t pid; /* message's pid */
+
+ struct secashead *sah; /* back pointer to the secashead */
+
+ /*
+ * NB: Fields with a tdb_ prefix are part of the "glue" used
+ * to interface to the OpenBSD crypto support. This was done
+ * to distinguish this code from the mainline KAME code.
+ */
+ struct xformsw *tdb_xform; /* transform */
+ struct enc_xform *tdb_encalgxform; /* encoding algorithm */
+ struct auth_hash *tdb_authalgxform; /* authentication algorithm */
+ struct comp_algo *tdb_compalgxform; /* compression algorithm */
+ u_int64_t tdb_cryptoid; /* crypto session id */
+
+ /*
+ * NAT-Traversal.
+ */
+ u_int16_t natt_type; /* IKE/ESP-marker in output. */
+ u_int16_t natt_esp_frag_len; /* MTU for payload fragmentation. */
+};
+
+#define SECASVAR_LOCK_INIT(_sav) \
+ mtx_init(&(_sav)->lock, "ipsec association", NULL, MTX_DEF)
+#define SECASVAR_LOCK(_sav) mtx_lock(&(_sav)->lock)
+#define SECASVAR_UNLOCK(_sav) mtx_unlock(&(_sav)->lock)
+#define SECASVAR_LOCK_DESTROY(_sav) mtx_destroy(&(_sav)->lock)
+#define SECASVAR_LOCK_ASSERT(_sav) mtx_assert(&(_sav)->lock, MA_OWNED)
+
+/* replay prevention */
+struct secreplay {
+ u_int32_t count;
+ u_int wsize; /* window size, i.g. 4 bytes */
+ u_int32_t seq; /* used by sender */
+ u_int32_t lastseq; /* used by receiver */
+ caddr_t bitmap; /* used by receiver */
+ int overflow; /* overflow flag */
+};
+
+/* socket table due to send PF_KEY messages. */
+struct secreg {
+ LIST_ENTRY(secreg) chain;
+
+ struct socket *so;
+};
+
+/* acquiring list table. */
+struct secacq {
+ LIST_ENTRY(secacq) chain;
+
+ struct secasindex saidx;
+
+ u_int32_t seq; /* sequence number */
+ time_t created; /* for lifetime */
+ int count; /* for lifetime */
+};
+
+/* Sensitivity Level Specification */
+/* nothing */
+
+#define SADB_KILL_INTERVAL 600 /* six seconds */
+
+/* secpolicy */
+extern struct secpolicy *keydb_newsecpolicy __P((void));
+extern void keydb_delsecpolicy __P((struct secpolicy *));
+/* secashead */
+extern struct secashead *keydb_newsecashead __P((void));
+extern void keydb_delsecashead __P((struct secashead *));
+/* secasvar */
+extern struct secasvar *keydb_newsecasvar __P((void));
+extern void keydb_refsecasvar __P((struct secasvar *));
+extern void keydb_freesecasvar __P((struct secasvar *));
+/* secreplay */
+extern struct secreplay *keydb_newsecreplay __P((size_t));
+extern void keydb_delsecreplay __P((struct secreplay *));
+/* secreg */
+extern struct secreg *keydb_newsecreg __P((void));
+extern void keydb_delsecreg __P((struct secreg *));
+
+#endif /* _KERNEL */
+
+#endif /* _NETIPSEC_KEYDB_HH_ */
diff --git a/rtems/freebsd/netipsec/keysock.c b/rtems/freebsd/netipsec/keysock.c
new file mode 100644
index 00000000..609759a6
--- /dev/null
+++ b/rtems/freebsd/netipsec/keysock.c
@@ -0,0 +1,584 @@
+#include <rtems/freebsd/machine/rtems-bsd-config.h>
+
+/* $FreeBSD$ */
+/* $KAME: keysock.c,v 1.25 2001/08/13 20:07:41 itojun Exp $ */
+
+/*-
+ * Copyright (C) 1995, 1996, 1997, and 1998 WIDE Project.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the project nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <rtems/freebsd/local/opt_ipsec.h>
+
+/* This code has derived from sys/net/rtsock.c on FreeBSD2.2.5 */
+
+#include <rtems/freebsd/sys/types.h>
+#include <rtems/freebsd/sys/param.h>
+#include <rtems/freebsd/sys/domain.h>
+#include <rtems/freebsd/sys/errno.h>
+#include <rtems/freebsd/sys/kernel.h>
+#include <rtems/freebsd/sys/lock.h>
+#include <rtems/freebsd/sys/malloc.h>
+#include <rtems/freebsd/sys/mbuf.h>
+#include <rtems/freebsd/sys/mutex.h>
+#include <rtems/freebsd/sys/priv.h>
+#include <rtems/freebsd/sys/protosw.h>
+#include <rtems/freebsd/sys/signalvar.h>
+#include <rtems/freebsd/sys/socket.h>
+#include <rtems/freebsd/sys/socketvar.h>
+#include <rtems/freebsd/sys/sysctl.h>
+#include <rtems/freebsd/sys/systm.h>
+
+#include <rtems/freebsd/net/if.h>
+#include <rtems/freebsd/net/raw_cb.h>
+#include <rtems/freebsd/net/route.h>
+#include <rtems/freebsd/net/vnet.h>
+
+#include <rtems/freebsd/netinet/in.h>
+
+#include <rtems/freebsd/net/pfkeyv2.h>
+#include <rtems/freebsd/netipsec/key.h>
+#include <rtems/freebsd/netipsec/keysock.h>
+#include <rtems/freebsd/netipsec/key_debug.h>
+#include <rtems/freebsd/netipsec/ipsec.h>
+
+#include <rtems/freebsd/machine/stdarg.h>
+
+struct key_cb {
+ int key_count;
+ int any_count;
+};
+static VNET_DEFINE(struct key_cb, key_cb);
+#define V_key_cb VNET(key_cb)
+
+static struct sockaddr key_src = { 2, PF_KEY, };
+
+static int key_sendup0 __P((struct rawcb *, struct mbuf *, int));
+
+VNET_DEFINE(struct pfkeystat, pfkeystat);
+
+/*
+ * key_output()
+ */
+int
+key_output(struct mbuf *m, struct socket *so)
+{
+ struct sadb_msg *msg;
+ int len, error = 0;
+
+ if (m == 0)
+ panic("%s: NULL pointer was passed.\n", __func__);
+
+ V_pfkeystat.out_total++;
+ V_pfkeystat.out_bytes += m->m_pkthdr.len;
+
+ len = m->m_pkthdr.len;
+ if (len < sizeof(struct sadb_msg)) {
+ V_pfkeystat.out_tooshort++;
+ error = EINVAL;
+ goto end;
+ }
+
+ if (m->m_len < sizeof(struct sadb_msg)) {
+ if ((m = m_pullup(m, sizeof(struct sadb_msg))) == 0) {
+ V_pfkeystat.out_nomem++;
+ error = ENOBUFS;
+ goto end;
+ }
+ }
+
+ M_ASSERTPKTHDR(m);
+
+ KEYDEBUG(KEYDEBUG_KEY_DUMP, kdebug_mbuf(m));
+
+ msg = mtod(m, struct sadb_msg *);
+ V_pfkeystat.out_msgtype[msg->sadb_msg_type]++;
+ if (len != PFKEY_UNUNIT64(msg->sadb_msg_len)) {
+ V_pfkeystat.out_invlen++;
+ error = EINVAL;
+ goto end;
+ }
+
+ error = key_parse(m, so);
+ m = NULL;
+end:
+ if (m)
+ m_freem(m);
+ return error;
+}
+
+/*
+ * send message to the socket.
+ */
+static int
+key_sendup0(rp, m, promisc)
+ struct rawcb *rp;
+ struct mbuf *m;
+ int promisc;
+{
+ int error;
+
+ if (promisc) {
+ struct sadb_msg *pmsg;
+
+ M_PREPEND(m, sizeof(struct sadb_msg), M_DONTWAIT);
+ if (m && m->m_len < sizeof(struct sadb_msg))
+ m = m_pullup(m, sizeof(struct sadb_msg));
+ if (!m) {
+ V_pfkeystat.in_nomem++;
+ m_freem(m);
+ return ENOBUFS;
+ }
+ m->m_pkthdr.len += sizeof(*pmsg);
+
+ pmsg = mtod(m, struct sadb_msg *);
+ bzero(pmsg, sizeof(*pmsg));
+ pmsg->sadb_msg_version = PF_KEY_V2;
+ pmsg->sadb_msg_type = SADB_X_PROMISC;
+ pmsg->sadb_msg_len = PFKEY_UNIT64(m->m_pkthdr.len);
+ /* pid and seq? */
+
+ V_pfkeystat.in_msgtype[pmsg->sadb_msg_type]++;
+ }
+
+ if (!sbappendaddr(&rp->rcb_socket->so_rcv, (struct sockaddr *)&key_src,
+ m, NULL)) {
+ V_pfkeystat.in_nomem++;
+ m_freem(m);
+ error = ENOBUFS;
+ } else
+ error = 0;
+ sorwakeup(rp->rcb_socket);
+ return error;
+}
+
+/* XXX this interface should be obsoleted. */
+int
+key_sendup(so, msg, len, target)
+ struct socket *so;
+ struct sadb_msg *msg;
+ u_int len;
+ int target; /*target of the resulting message*/
+{
+ struct mbuf *m, *n, *mprev;
+ int tlen;
+
+ /* sanity check */
+ if (so == 0 || msg == 0)
+ panic("%s: NULL pointer was passed.\n", __func__);
+
+ KEYDEBUG(KEYDEBUG_KEY_DUMP,
+ printf("%s: \n", __func__);
+ kdebug_sadb(msg));
+
+ /*
+ * we increment statistics here, just in case we have ENOBUFS
+ * in this function.
+ */
+ V_pfkeystat.in_total++;
+ V_pfkeystat.in_bytes += len;
+ V_pfkeystat.in_msgtype[msg->sadb_msg_type]++;
+
+ /*
+ * Get mbuf chain whenever possible (not clusters),
+ * to save socket buffer. We'll be generating many SADB_ACQUIRE
+ * messages to listening key sockets. If we simply allocate clusters,
+ * sbappendaddr() will raise ENOBUFS due to too little sbspace().
+ * sbspace() computes # of actual data bytes AND mbuf region.
+ *
+ * TODO: SADB_ACQUIRE filters should be implemented.
+ */
+ tlen = len;
+ m = mprev = NULL;
+ while (tlen > 0) {
+ if (tlen == len) {
+ MGETHDR(n, M_DONTWAIT, MT_DATA);
+ if (n == NULL) {
+ V_pfkeystat.in_nomem++;
+ return ENOBUFS;
+ }
+ n->m_len = MHLEN;
+ } else {
+ MGET(n, M_DONTWAIT, MT_DATA);
+ if (n == NULL) {
+ V_pfkeystat.in_nomem++;
+ return ENOBUFS;
+ }
+ n->m_len = MLEN;
+ }
+ if (tlen >= MCLBYTES) { /*XXX better threshold? */
+ MCLGET(n, M_DONTWAIT);
+ if ((n->m_flags & M_EXT) == 0) {
+ m_free(n);
+ m_freem(m);
+ V_pfkeystat.in_nomem++;
+ return ENOBUFS;
+ }
+ n->m_len = MCLBYTES;
+ }
+
+ if (tlen < n->m_len)
+ n->m_len = tlen;
+ n->m_next = NULL;
+ if (m == NULL)
+ m = mprev = n;
+ else {
+ mprev->m_next = n;
+ mprev = n;
+ }
+ tlen -= n->m_len;
+ n = NULL;
+ }
+ m->m_pkthdr.len = len;
+ m->m_pkthdr.rcvif = NULL;
+ m_copyback(m, 0, len, (caddr_t)msg);
+
+ /* avoid duplicated statistics */
+ V_pfkeystat.in_total--;
+ V_pfkeystat.in_bytes -= len;
+ V_pfkeystat.in_msgtype[msg->sadb_msg_type]--;
+
+ return key_sendup_mbuf(so, m, target);
+}
+
+/* so can be NULL if target != KEY_SENDUP_ONE */
+int
+key_sendup_mbuf(so, m, target)
+ struct socket *so;
+ struct mbuf *m;
+ int target;
+{
+ struct mbuf *n;
+ struct keycb *kp;
+ int sendup;
+ struct rawcb *rp;
+ int error = 0;
+
+ if (m == NULL)
+ panic("key_sendup_mbuf: NULL pointer was passed.\n");
+ if (so == NULL && target == KEY_SENDUP_ONE)
+ panic("%s: NULL pointer was passed.\n", __func__);
+
+ V_pfkeystat.in_total++;
+ V_pfkeystat.in_bytes += m->m_pkthdr.len;
+ if (m->m_len < sizeof(struct sadb_msg)) {
+ m = m_pullup(m, sizeof(struct sadb_msg));
+ if (m == NULL) {
+ V_pfkeystat.in_nomem++;
+ return ENOBUFS;
+ }
+ }
+ if (m->m_len >= sizeof(struct sadb_msg)) {
+ struct sadb_msg *msg;
+ msg = mtod(m, struct sadb_msg *);
+ V_pfkeystat.in_msgtype[msg->sadb_msg_type]++;
+ }
+ mtx_lock(&rawcb_mtx);
+ LIST_FOREACH(rp, &V_rawcb_list, list)
+ {
+ if (rp->rcb_proto.sp_family != PF_KEY)
+ continue;
+ if (rp->rcb_proto.sp_protocol
+ && rp->rcb_proto.sp_protocol != PF_KEY_V2) {
+ continue;
+ }
+
+ kp = (struct keycb *)rp;
+
+ /*
+ * If you are in promiscuous mode, and when you get broadcasted
+ * reply, you'll get two PF_KEY messages.
+ * (based on pf_key@inner.net message on 14 Oct 1998)
+ */
+ if (((struct keycb *)rp)->kp_promisc) {
+ if ((n = m_copy(m, 0, (int)M_COPYALL)) != NULL) {
+ (void)key_sendup0(rp, n, 1);
+ n = NULL;
+ }
+ }
+
+ /* the exact target will be processed later */
+ if (so && sotorawcb(so) == rp)
+ continue;
+
+ sendup = 0;
+ switch (target) {
+ case KEY_SENDUP_ONE:
+ /* the statement has no effect */
+ if (so && sotorawcb(so) == rp)
+ sendup++;
+ break;
+ case KEY_SENDUP_ALL:
+ sendup++;
+ break;
+ case KEY_SENDUP_REGISTERED:
+ if (kp->kp_registered)
+ sendup++;
+ break;
+ }
+ V_pfkeystat.in_msgtarget[target]++;
+
+ if (!sendup)
+ continue;
+
+ if ((n = m_copy(m, 0, (int)M_COPYALL)) == NULL) {
+ m_freem(m);
+ V_pfkeystat.in_nomem++;
+ mtx_unlock(&rawcb_mtx);
+ return ENOBUFS;
+ }
+
+ if ((error = key_sendup0(rp, n, 0)) != 0) {
+ m_freem(m);
+ mtx_unlock(&rawcb_mtx);
+ return error;
+ }
+
+ n = NULL;
+ }
+
+ if (so) {
+ error = key_sendup0(sotorawcb(so), m, 0);
+ m = NULL;
+ } else {
+ error = 0;
+ m_freem(m);
+ }
+ mtx_unlock(&rawcb_mtx);
+ return error;
+}
+
+/*
+ * key_abort()
+ * derived from net/rtsock.c:rts_abort()
+ */
+static void
+key_abort(struct socket *so)
+{
+ raw_usrreqs.pru_abort(so);
+}
+
+/*
+ * key_attach()
+ * derived from net/rtsock.c:rts_attach()
+ */
+static int
+key_attach(struct socket *so, int proto, struct thread *td)
+{
+ struct keycb *kp;
+ int error;
+
+ KASSERT(so->so_pcb == NULL, ("key_attach: so_pcb != NULL"));
+
+ if (td != NULL) {
+ error = priv_check(td, PRIV_NET_RAW);
+ if (error)
+ return error;
+ }
+
+ /* XXX */
+ kp = malloc(sizeof *kp, M_PCB, M_WAITOK | M_ZERO);
+ if (kp == 0)
+ return ENOBUFS;
+
+ so->so_pcb = (caddr_t)kp;
+ error = raw_attach(so, proto);
+ kp = (struct keycb *)sotorawcb(so);
+ if (error) {
+ free(kp, M_PCB);
+ so->so_pcb = (caddr_t) 0;
+ return error;
+ }
+
+ kp->kp_promisc = kp->kp_registered = 0;
+
+ if (kp->kp_raw.rcb_proto.sp_protocol == PF_KEY) /* XXX: AF_KEY */
+ V_key_cb.key_count++;
+ V_key_cb.any_count++;
+ soisconnected(so);
+ so->so_options |= SO_USELOOPBACK;
+
+ return 0;
+}
+
+/*
+ * key_bind()
+ * derived from net/rtsock.c:rts_bind()
+ */
+static int
+key_bind(struct socket *so, struct sockaddr *nam, struct thread *td)
+{
+ return EINVAL;
+}
+
+/*
+ * key_close()
+ * derived from net/rtsock.c:rts_close().
+ */
+static void
+key_close(struct socket *so)
+{
+
+ raw_usrreqs.pru_close(so);
+}
+
+/*
+ * key_connect()
+ * derived from net/rtsock.c:rts_connect()
+ */
+static int
+key_connect(struct socket *so, struct sockaddr *nam, struct thread *td)
+{
+ return EINVAL;
+}
+
+/*
+ * key_detach()
+ * derived from net/rtsock.c:rts_detach()
+ */
+static void
+key_detach(struct socket *so)
+{
+ struct keycb *kp = (struct keycb *)sotorawcb(so);
+
+ KASSERT(kp != NULL, ("key_detach: kp == NULL"));
+ if (kp->kp_raw.rcb_proto.sp_protocol
+ == PF_KEY) /* XXX: AF_KEY */
+ V_key_cb.key_count--;
+ V_key_cb.any_count--;
+
+ key_freereg(so);
+ raw_usrreqs.pru_detach(so);
+}
+
+/*
+ * key_disconnect()
+ * derived from net/rtsock.c:key_disconnect()
+ */
+static int
+key_disconnect(struct socket *so)
+{
+ return(raw_usrreqs.pru_disconnect(so));
+}
+
+/*
+ * key_peeraddr()
+ * derived from net/rtsock.c:rts_peeraddr()
+ */
+static int
+key_peeraddr(struct socket *so, struct sockaddr **nam)
+{
+ return(raw_usrreqs.pru_peeraddr(so, nam));
+}
+
+/*
+ * key_send()
+ * derived from net/rtsock.c:rts_send()
+ */
+static int
+key_send(struct socket *so, int flags, struct mbuf *m, struct sockaddr *nam,
+ struct mbuf *control, struct thread *td)
+{
+ return(raw_usrreqs.pru_send(so, flags, m, nam, control, td));
+}
+
+/*
+ * key_shutdown()
+ * derived from net/rtsock.c:rts_shutdown()
+ */
+static int
+key_shutdown(struct socket *so)
+{
+ return(raw_usrreqs.pru_shutdown(so));
+}
+
+/*
+ * key_sockaddr()
+ * derived from net/rtsock.c:rts_sockaddr()
+ */
+static int
+key_sockaddr(struct socket *so, struct sockaddr **nam)
+{
+ return(raw_usrreqs.pru_sockaddr(so, nam));
+}
+
+struct pr_usrreqs key_usrreqs = {
+ .pru_abort = key_abort,
+ .pru_attach = key_attach,
+ .pru_bind = key_bind,
+ .pru_connect = key_connect,
+ .pru_detach = key_detach,
+ .pru_disconnect = key_disconnect,
+ .pru_peeraddr = key_peeraddr,
+ .pru_send = key_send,
+ .pru_shutdown = key_shutdown,
+ .pru_sockaddr = key_sockaddr,
+ .pru_close = key_close,
+};
+
+/* sysctl */
+SYSCTL_NODE(_net, PF_KEY, key, CTLFLAG_RW, 0, "Key Family");
+
+/*
+ * Definitions of protocols supported in the KEY domain.
+ */
+
+extern struct domain keydomain;
+
+struct protosw keysw[] = {
+{
+ .pr_type = SOCK_RAW,
+ .pr_domain = &keydomain,
+ .pr_protocol = PF_KEY_V2,
+ .pr_flags = PR_ATOMIC|PR_ADDR,
+ .pr_output = key_output,
+ .pr_ctlinput = raw_ctlinput,
+ .pr_init = raw_init,
+ .pr_usrreqs = &key_usrreqs
+}
+};
+
+static void
+key_init0(void)
+{
+
+ bzero((caddr_t)&V_key_cb, sizeof(V_key_cb));
+ key_init();
+}
+
+struct domain keydomain = {
+ .dom_family = PF_KEY,
+ .dom_name = "key",
+ .dom_init = key_init0,
+#ifdef VIMAGE
+ .dom_destroy = key_destroy,
+#endif
+ .dom_protosw = keysw,
+ .dom_protoswNPROTOSW = &keysw[sizeof(keysw)/sizeof(keysw[0])]
+};
+
+VNET_DOMAIN_SET(key);
diff --git a/rtems/freebsd/netipsec/keysock.h b/rtems/freebsd/netipsec/keysock.h
new file mode 100644
index 00000000..2d147b7a
--- /dev/null
+++ b/rtems/freebsd/netipsec/keysock.h
@@ -0,0 +1,83 @@
+/* $FreeBSD$ */
+/* $KAME: keysock.h,v 1.8 2000/03/27 05:11:06 sumikawa Exp $ */
+
+/*-
+ * Copyright (C) 1995, 1996, 1997, and 1998 WIDE Project.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the project nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifndef _NETIPSEC_KEYSOCK_HH_
+#define _NETIPSEC_KEYSOCK_HH_
+
+/* statistics for pfkey socket */
+struct pfkeystat {
+ /* kernel -> userland */
+ u_quad_t out_total; /* # of total calls */
+ u_quad_t out_bytes; /* total bytecount */
+ u_quad_t out_msgtype[256]; /* message type histogram */
+ u_quad_t out_invlen; /* invalid length field */
+ u_quad_t out_invver; /* invalid version field */
+ u_quad_t out_invmsgtype; /* invalid message type field */
+ u_quad_t out_tooshort; /* msg too short */
+ u_quad_t out_nomem; /* memory allocation failure */
+ u_quad_t out_dupext; /* duplicate extension */
+ u_quad_t out_invexttype; /* invalid extension type */
+ u_quad_t out_invsatype; /* invalid sa type */
+ u_quad_t out_invaddr; /* invalid address extension */
+ /* userland -> kernel */
+ u_quad_t in_total; /* # of total calls */
+ u_quad_t in_bytes; /* total bytecount */
+ u_quad_t in_msgtype[256]; /* message type histogram */
+ u_quad_t in_msgtarget[3]; /* one/all/registered */
+ u_quad_t in_nomem; /* memory allocation failure */
+ /* others */
+ u_quad_t sockerr; /* # of socket related errors */
+};
+
+#define KEY_SENDUP_ONE 0
+#define KEY_SENDUP_ALL 1
+#define KEY_SENDUP_REGISTERED 2
+
+#ifdef _KERNEL
+struct keycb {
+ struct rawcb kp_raw; /* rawcb */
+ int kp_promisc; /* promiscuous mode */
+ int kp_registered; /* registered socket */
+};
+
+VNET_DECLARE(struct pfkeystat, pfkeystat);
+#define V_pfkeystat VNET(pfkeystat)
+
+extern int key_output(struct mbuf *m, struct socket *so);
+extern int key_usrreq __P((struct socket *,
+ int, struct mbuf *, struct mbuf *, struct mbuf *));
+
+extern int key_sendup __P((struct socket *, struct sadb_msg *, u_int, int));
+extern int key_sendup_mbuf __P((struct socket *, struct mbuf *, int));
+#endif /* _KERNEL */
+
+#endif /*_NETIPSEC_KEYSOCK_HH_*/
diff --git a/rtems/freebsd/netipsec/xform.h b/rtems/freebsd/netipsec/xform.h
new file mode 100644
index 00000000..dc82edef
--- /dev/null
+++ b/rtems/freebsd/netipsec/xform.h
@@ -0,0 +1,129 @@
+/* $FreeBSD$ */
+/* $OpenBSD: ip_ipsp.h,v 1.119 2002/03/14 01:27:11 millert Exp $ */
+/*-
+ * The authors of this code are John Ioannidis (ji@tla.org),
+ * Angelos D. Keromytis (kermit@csd.uch.gr),
+ * Niels Provos (provos@physnet.uni-hamburg.de) and
+ * Niklas Hallqvist (niklas@appli.se).
+ *
+ * The original version of this code was written by John Ioannidis
+ * for BSD/OS in Athens, Greece, in November 1995.
+ *
+ * Ported to OpenBSD and NetBSD, with additional transforms, in December 1996,
+ * by Angelos D. Keromytis.
+ *
+ * Additional transforms and features in 1997 and 1998 by Angelos D. Keromytis
+ * and Niels Provos.
+ *
+ * Additional features in 1999 by Angelos D. Keromytis and Niklas Hallqvist.
+ *
+ * Copyright (c) 1995, 1996, 1997, 1998, 1999 by John Ioannidis,
+ * Angelos D. Keromytis and Niels Provos.
+ * Copyright (c) 1999 Niklas Hallqvist.
+ * Copyright (c) 2001, Angelos D. Keromytis.
+ *
+ * Permission to use, copy, and modify this software with or without fee
+ * is hereby granted, provided that this entire notice is included in
+ * all copies of any software which is or includes a copy or
+ * modification of this software.
+ * You may use this code under the GNU public license if you so wish. Please
+ * contribute changes back to the authors under this freer than GPL license
+ * so that we may further the use of strong encryption without limitations to
+ * all.
+ *
+ * THIS SOFTWARE IS BEING PROVIDED "AS IS", WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTY. IN PARTICULAR, NONE OF THE AUTHORS MAKES ANY
+ * REPRESENTATION OR WARRANTY OF ANY KIND CONCERNING THE
+ * MERCHANTABILITY OF THIS SOFTWARE OR ITS FITNESS FOR ANY PARTICULAR
+ * PURPOSE.
+ */
+
+#ifndef _NETIPSEC_XFORM_HH_
+#define _NETIPSEC_XFORM_HH_
+
+#include <rtems/freebsd/sys/types.h>
+#include <rtems/freebsd/netinet/in.h>
+#include <rtems/freebsd/opencrypto/xform.h>
+
+#define AH_HMAC_HASHLEN 12 /* 96 bits of authenticator */
+#define AH_HMAC_INITIAL_RPL 1 /* replay counter initial value */
+
+/*
+ * Packet tag assigned on completion of IPsec processing; used
+ * to speedup processing when/if the packet comes back for more
+ * processing.
+ */
+struct tdb_ident {
+ u_int32_t spi;
+ union sockaddr_union dst;
+ u_int8_t proto;
+ /* Cache those two for enc(4) in xform_ipip. */
+ u_int8_t alg_auth;
+ u_int8_t alg_enc;
+};
+
+/*
+ * Opaque data structure hung off a crypto operation descriptor.
+ */
+struct tdb_crypto {
+ struct ipsecrequest *tc_isr; /* ipsec request state */
+ u_int32_t tc_spi; /* associated SPI */
+ union sockaddr_union tc_dst; /* dst addr of packet */
+ u_int8_t tc_proto; /* current protocol, e.g. AH */
+ u_int8_t tc_nxt; /* next protocol, e.g. IPV4 */
+ int tc_protoff; /* current protocol offset */
+ int tc_skip; /* data offset */
+ caddr_t tc_ptr; /* associated crypto data */
+};
+
+struct secasvar;
+struct ipescrequest;
+
+struct xformsw {
+ u_short xf_type; /* xform ID */
+#define XF_IP4 1 /* IP inside IP */
+#define XF_AH 2 /* AH */
+#define XF_ESP 3 /* ESP */
+#define XF_TCPSIGNATURE 5 /* TCP MD5 Signature option, RFC 2358 */
+#define XF_IPCOMP 6 /* IPCOMP */
+ u_short xf_flags;
+#define XFT_AUTH 0x0001
+#define XFT_CONF 0x0100
+#define XFT_COMP 0x1000
+ char *xf_name; /* human-readable name */
+ int (*xf_init)(struct secasvar*, struct xformsw*); /* setup */
+ int (*xf_zeroize)(struct secasvar*); /* cleanup */
+ int (*xf_input)(struct mbuf*, struct secasvar*, /* input */
+ int, int);
+ int (*xf_output)(struct mbuf*, /* output */
+ struct ipsecrequest *, struct mbuf **, int, int);
+ struct xformsw *xf_next; /* list of registered xforms */
+};
+
+#ifdef _KERNEL
+extern void xform_register(struct xformsw*);
+extern int xform_init(struct secasvar *sav, int xftype);
+
+struct cryptoini;
+
+/* XF_IP4 */
+extern int ip4_input6(struct mbuf **m, int *offp, int proto);
+extern void ip4_input(struct mbuf *m, int);
+extern int ipip_output(struct mbuf *, struct ipsecrequest *,
+ struct mbuf **, int, int);
+
+/* XF_AH */
+extern int ah_init0(struct secasvar *, struct xformsw *, struct cryptoini *);
+extern int ah_zeroize(struct secasvar *sav);
+extern struct auth_hash *ah_algorithm_lookup(int alg);
+extern size_t ah_hdrsiz(struct secasvar *);
+
+/* XF_ESP */
+extern struct enc_xform *esp_algorithm_lookup(int alg);
+extern size_t esp_hdrsiz(struct secasvar *sav);
+
+/* XF_COMP */
+extern struct comp_algo *ipcomp_algorithm_lookup(int alg);
+
+#endif /* _KERNEL */
+#endif /* _NETIPSEC_XFORM_HH_ */
diff --git a/rtems/freebsd/netipsec/xform_ah.c b/rtems/freebsd/netipsec/xform_ah.c
new file mode 100644
index 00000000..2bdbdaf9
--- /dev/null
+++ b/rtems/freebsd/netipsec/xform_ah.c
@@ -0,0 +1,1219 @@
+#include <rtems/freebsd/machine/rtems-bsd-config.h>
+
+/* $FreeBSD$ */
+/* $OpenBSD: ip_ah.c,v 1.63 2001/06/26 06:18:58 angelos Exp $ */
+/*-
+ * The authors of this code are John Ioannidis (ji@tla.org),
+ * Angelos D. Keromytis (kermit@csd.uch.gr) and
+ * Niels Provos (provos@physnet.uni-hamburg.de).
+ *
+ * The original version of this code was written by John Ioannidis
+ * for BSD/OS in Athens, Greece, in November 1995.
+ *
+ * Ported to OpenBSD and NetBSD, with additional transforms, in December 1996,
+ * by Angelos D. Keromytis.
+ *
+ * Additional transforms and features in 1997 and 1998 by Angelos D. Keromytis
+ * and Niels Provos.
+ *
+ * Additional features in 1999 by Angelos D. Keromytis and Niklas Hallqvist.
+ *
+ * Copyright (c) 1995, 1996, 1997, 1998, 1999 by John Ioannidis,
+ * Angelos D. Keromytis and Niels Provos.
+ * Copyright (c) 1999 Niklas Hallqvist.
+ * Copyright (c) 2001 Angelos D. Keromytis.
+ *
+ * Permission to use, copy, and modify this software with or without fee
+ * is hereby granted, provided that this entire notice is included in
+ * all copies of any software which is or includes a copy or
+ * modification of this software.
+ * You may use this code under the GNU public license if you so wish. Please
+ * contribute changes back to the authors under this freer than GPL license
+ * so that we may further the use of strong encryption without limitations to
+ * all.
+ *
+ * THIS SOFTWARE IS BEING PROVIDED "AS IS", WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTY. IN PARTICULAR, NONE OF THE AUTHORS MAKES ANY
+ * REPRESENTATION OR WARRANTY OF ANY KIND CONCERNING THE
+ * MERCHANTABILITY OF THIS SOFTWARE OR ITS FITNESS FOR ANY PARTICULAR
+ * PURPOSE.
+ */
+#include <rtems/freebsd/local/opt_inet.h>
+#include <rtems/freebsd/local/opt_inet6.h>
+
+#include <rtems/freebsd/sys/param.h>
+#include <rtems/freebsd/sys/systm.h>
+#include <rtems/freebsd/sys/mbuf.h>
+#include <rtems/freebsd/sys/socket.h>
+#include <rtems/freebsd/sys/syslog.h>
+#include <rtems/freebsd/sys/kernel.h>
+#include <rtems/freebsd/sys/sysctl.h>
+
+#include <rtems/freebsd/net/if.h>
+#include <rtems/freebsd/net/vnet.h>
+
+#include <rtems/freebsd/netinet/in.h>
+#include <rtems/freebsd/netinet/in_systm.h>
+#include <rtems/freebsd/netinet/ip.h>
+#include <rtems/freebsd/netinet/ip_ecn.h>
+#include <rtems/freebsd/netinet/ip6.h>
+
+#include <rtems/freebsd/net/route.h>
+#include <rtems/freebsd/netipsec/ipsec.h>
+#include <rtems/freebsd/netipsec/ah.h>
+#include <rtems/freebsd/netipsec/ah_var.h>
+#include <rtems/freebsd/netipsec/xform.h>
+
+#ifdef INET6
+#include <rtems/freebsd/netinet6/ip6_var.h>
+#include <rtems/freebsd/netipsec/ipsec6.h>
+#include <rtems/freebsd/netinet6/ip6_ecn.h>
+#endif
+
+#include <rtems/freebsd/netipsec/key.h>
+#include <rtems/freebsd/netipsec/key_debug.h>
+
+#include <rtems/freebsd/opencrypto/cryptodev.h>
+
+/*
+ * Return header size in bytes. The old protocol did not support
+ * the replay counter; the new protocol always includes the counter.
+ */
+#define HDRSIZE(sav) \
+ (((sav)->flags & SADB_X_EXT_OLD) ? \
+ sizeof (struct ah) : sizeof (struct ah) + sizeof (u_int32_t))
+/*
+ * Return authenticator size in bytes. The old protocol is known
+ * to use a fixed 16-byte authenticator. The new algorithm use 12-byte
+ * authenticator.
+ */
+#define AUTHSIZE(sav) \
+ ((sav->flags & SADB_X_EXT_OLD) ? 16 : AH_HMAC_HASHLEN)
+
+VNET_DEFINE(int, ah_enable) = 1; /* control flow of packets with AH */
+VNET_DEFINE(int, ah_cleartos) = 1; /* clear ip_tos when doing AH calc */
+VNET_DEFINE(struct ahstat, ahstat);
+
+SYSCTL_DECL(_net_inet_ah);
+SYSCTL_VNET_INT(_net_inet_ah, OID_AUTO,
+ ah_enable, CTLFLAG_RW, &VNET_NAME(ah_enable), 0, "");
+SYSCTL_VNET_INT(_net_inet_ah, OID_AUTO,
+ ah_cleartos, CTLFLAG_RW, &VNET_NAME(ah_cleartos), 0, "");
+SYSCTL_VNET_STRUCT(_net_inet_ah, IPSECCTL_STATS,
+ stats, CTLFLAG_RD, &VNET_NAME(ahstat), ahstat, "");
+
+static unsigned char ipseczeroes[256]; /* larger than an ip6 extension hdr */
+
+static int ah_input_cb(struct cryptop*);
+static int ah_output_cb(struct cryptop*);
+
+/*
+ * NB: this is public for use by the PF_KEY support.
+ */
+struct auth_hash *
+ah_algorithm_lookup(int alg)
+{
+ if (alg > SADB_AALG_MAX)
+ return NULL;
+ switch (alg) {
+ case SADB_X_AALG_NULL:
+ return &auth_hash_null;
+ case SADB_AALG_MD5HMAC:
+ return &auth_hash_hmac_md5;
+ case SADB_AALG_SHA1HMAC:
+ return &auth_hash_hmac_sha1;
+ case SADB_X_AALG_RIPEMD160HMAC:
+ return &auth_hash_hmac_ripemd_160;
+ case SADB_X_AALG_MD5:
+ return &auth_hash_key_md5;
+ case SADB_X_AALG_SHA:
+ return &auth_hash_key_sha1;
+ case SADB_X_AALG_SHA2_256:
+ return &auth_hash_hmac_sha2_256;
+ case SADB_X_AALG_SHA2_384:
+ return &auth_hash_hmac_sha2_384;
+ case SADB_X_AALG_SHA2_512:
+ return &auth_hash_hmac_sha2_512;
+ }
+ return NULL;
+}
+
+size_t
+ah_hdrsiz(struct secasvar *sav)
+{
+ size_t size;
+
+ if (sav != NULL) {
+ int authsize;
+ IPSEC_ASSERT(sav->tdb_authalgxform != NULL, ("null xform"));
+ /*XXX not right for null algorithm--does it matter??*/
+ authsize = AUTHSIZE(sav);
+ size = roundup(authsize, sizeof (u_int32_t)) + HDRSIZE(sav);
+ } else {
+ /* default guess */
+ size = sizeof (struct ah) + sizeof (u_int32_t) + 16;
+ }
+ return size;
+}
+
+/*
+ * NB: public for use by esp_init.
+ */
+int
+ah_init0(struct secasvar *sav, struct xformsw *xsp, struct cryptoini *cria)
+{
+ struct auth_hash *thash;
+ int keylen;
+
+ thash = ah_algorithm_lookup(sav->alg_auth);
+ if (thash == NULL) {
+ DPRINTF(("%s: unsupported authentication algorithm %u\n",
+ __func__, sav->alg_auth));
+ return EINVAL;
+ }
+ /*
+ * Verify the replay state block allocation is consistent with
+ * the protocol type. We check here so we can make assumptions
+ * later during protocol processing.
+ */
+ /* NB: replay state is setup elsewhere (sigh) */
+ if (((sav->flags&SADB_X_EXT_OLD) == 0) ^ (sav->replay != NULL)) {
+ DPRINTF(("%s: replay state block inconsistency, "
+ "%s algorithm %s replay state\n", __func__,
+ (sav->flags & SADB_X_EXT_OLD) ? "old" : "new",
+ sav->replay == NULL ? "without" : "with"));
+ return EINVAL;
+ }
+ if (sav->key_auth == NULL) {
+ DPRINTF(("%s: no authentication key for %s algorithm\n",
+ __func__, thash->name));
+ return EINVAL;
+ }
+ keylen = _KEYLEN(sav->key_auth);
+ if (keylen != thash->keysize && thash->keysize != 0) {
+ DPRINTF(("%s: invalid keylength %d, algorithm %s requires "
+ "keysize %d\n", __func__,
+ keylen, thash->name, thash->keysize));
+ return EINVAL;
+ }
+
+ sav->tdb_xform = xsp;
+ sav->tdb_authalgxform = thash;
+
+ /* Initialize crypto session. */
+ bzero(cria, sizeof (*cria));
+ cria->cri_alg = sav->tdb_authalgxform->type;
+ cria->cri_klen = _KEYBITS(sav->key_auth);
+ cria->cri_key = sav->key_auth->key_data;
+ cria->cri_mlen = AUTHSIZE(sav);
+
+ return 0;
+}
+
+/*
+ * ah_init() is called when an SPI is being set up.
+ */
+static int
+ah_init(struct secasvar *sav, struct xformsw *xsp)
+{
+ struct cryptoini cria;
+ int error;
+
+ error = ah_init0(sav, xsp, &cria);
+ return error ? error :
+ crypto_newsession(&sav->tdb_cryptoid, &cria, V_crypto_support);
+}
+
+/*
+ * Paranoia.
+ *
+ * NB: public for use by esp_zeroize (XXX).
+ */
+int
+ah_zeroize(struct secasvar *sav)
+{
+ int err;
+
+ if (sav->key_auth)
+ bzero(sav->key_auth->key_data, _KEYLEN(sav->key_auth));
+
+ err = crypto_freesession(sav->tdb_cryptoid);
+ sav->tdb_cryptoid = 0;
+ sav->tdb_authalgxform = NULL;
+ sav->tdb_xform = NULL;
+ return err;
+}
+
+/*
+ * Massage IPv4/IPv6 headers for AH processing.
+ */
+static int
+ah_massage_headers(struct mbuf **m0, int proto, int skip, int alg, int out)
+{
+ struct mbuf *m = *m0;
+ unsigned char *ptr;
+ int off, count;
+
+#ifdef INET
+ struct ip *ip;
+#endif /* INET */
+
+#ifdef INET6
+ struct ip6_ext *ip6e;
+ struct ip6_hdr ip6;
+ int alloc, len, ad;
+#endif /* INET6 */
+
+ switch (proto) {
+#ifdef INET
+ case AF_INET:
+ /*
+ * This is the least painful way of dealing with IPv4 header
+ * and option processing -- just make sure they're in
+ * contiguous memory.
+ */
+ *m0 = m = m_pullup(m, skip);
+ if (m == NULL) {
+ DPRINTF(("%s: m_pullup failed\n", __func__));
+ return ENOBUFS;
+ }
+
+ /* Fix the IP header */
+ ip = mtod(m, struct ip *);
+ if (V_ah_cleartos)
+ ip->ip_tos = 0;
+ ip->ip_ttl = 0;
+ ip->ip_sum = 0;
+
+ /*
+ * On input, fix ip_len which has been byte-swapped
+ * at ip_input().
+ */
+ if (!out) {
+ ip->ip_len = htons(ip->ip_len + skip);
+
+ if (alg == CRYPTO_MD5_KPDK || alg == CRYPTO_SHA1_KPDK)
+ ip->ip_off = htons(ip->ip_off & IP_DF);
+ else
+ ip->ip_off = 0;
+ } else {
+ if (alg == CRYPTO_MD5_KPDK || alg == CRYPTO_SHA1_KPDK)
+ ip->ip_off = htons(ntohs(ip->ip_off) & IP_DF);
+ else
+ ip->ip_off = 0;
+ }
+
+ ptr = mtod(m, unsigned char *) + sizeof(struct ip);
+
+ /* IPv4 option processing */
+ for (off = sizeof(struct ip); off < skip;) {
+ if (ptr[off] == IPOPT_EOL || ptr[off] == IPOPT_NOP ||
+ off + 1 < skip)
+ ;
+ else {
+ DPRINTF(("%s: illegal IPv4 option length for "
+ "option %d\n", __func__, ptr[off]));
+
+ m_freem(m);
+ return EINVAL;
+ }
+
+ switch (ptr[off]) {
+ case IPOPT_EOL:
+ off = skip; /* End the loop. */
+ break;
+
+ case IPOPT_NOP:
+ off++;
+ break;
+
+ case IPOPT_SECURITY: /* 0x82 */
+ case 0x85: /* Extended security. */
+ case 0x86: /* Commercial security. */
+ case 0x94: /* Router alert */
+ case 0x95: /* RFC1770 */
+ /* Sanity check for option length. */
+ if (ptr[off + 1] < 2) {
+ DPRINTF(("%s: illegal IPv4 option "
+ "length for option %d\n",
+ __func__, ptr[off]));
+
+ m_freem(m);
+ return EINVAL;
+ }
+
+ off += ptr[off + 1];
+ break;
+
+ case IPOPT_LSRR:
+ case IPOPT_SSRR:
+ /* Sanity check for option length. */
+ if (ptr[off + 1] < 2) {
+ DPRINTF(("%s: illegal IPv4 option "
+ "length for option %d\n",
+ __func__, ptr[off]));
+
+ m_freem(m);
+ return EINVAL;
+ }
+
+ /*
+ * On output, if we have either of the
+ * source routing options, we should
+ * swap the destination address of the
+ * IP header with the last address
+ * specified in the option, as that is
+ * what the destination's IP header
+ * will look like.
+ */
+ if (out)
+ bcopy(ptr + off + ptr[off + 1] -
+ sizeof(struct in_addr),
+ &(ip->ip_dst), sizeof(struct in_addr));
+
+ /* Fall through */
+ default:
+ /* Sanity check for option length. */
+ if (ptr[off + 1] < 2) {
+ DPRINTF(("%s: illegal IPv4 option "
+ "length for option %d\n",
+ __func__, ptr[off]));
+ m_freem(m);
+ return EINVAL;
+ }
+
+ /* Zeroize all other options. */
+ count = ptr[off + 1];
+ bcopy(ipseczeroes, ptr, count);
+ off += count;
+ break;
+ }
+
+ /* Sanity check. */
+ if (off > skip) {
+ DPRINTF(("%s: malformed IPv4 options header\n",
+ __func__));
+
+ m_freem(m);
+ return EINVAL;
+ }
+ }
+
+ break;
+#endif /* INET */
+
+#ifdef INET6
+ case AF_INET6: /* Ugly... */
+ /* Copy and "cook" the IPv6 header. */
+ m_copydata(m, 0, sizeof(ip6), (caddr_t) &ip6);
+
+ /* We don't do IPv6 Jumbograms. */
+ if (ip6.ip6_plen == 0) {
+ DPRINTF(("%s: unsupported IPv6 jumbogram\n", __func__));
+ m_freem(m);
+ return EMSGSIZE;
+ }
+
+ ip6.ip6_flow = 0;
+ ip6.ip6_hlim = 0;
+ ip6.ip6_vfc &= ~IPV6_VERSION_MASK;
+ ip6.ip6_vfc |= IPV6_VERSION;
+
+ /* Scoped address handling. */
+ if (IN6_IS_SCOPE_LINKLOCAL(&ip6.ip6_src))
+ ip6.ip6_src.s6_addr16[1] = 0;
+ if (IN6_IS_SCOPE_LINKLOCAL(&ip6.ip6_dst))
+ ip6.ip6_dst.s6_addr16[1] = 0;
+
+ /* Done with IPv6 header. */
+ m_copyback(m, 0, sizeof(struct ip6_hdr), (caddr_t) &ip6);
+
+ /* Let's deal with the remaining headers (if any). */
+ if (skip - sizeof(struct ip6_hdr) > 0) {
+ if (m->m_len <= skip) {
+ ptr = (unsigned char *) malloc(
+ skip - sizeof(struct ip6_hdr),
+ M_XDATA, M_NOWAIT);
+ if (ptr == NULL) {
+ DPRINTF(("%s: failed to allocate memory"
+ "for IPv6 headers\n",__func__));
+ m_freem(m);
+ return ENOBUFS;
+ }
+
+ /*
+ * Copy all the protocol headers after
+ * the IPv6 header.
+ */
+ m_copydata(m, sizeof(struct ip6_hdr),
+ skip - sizeof(struct ip6_hdr), ptr);
+ alloc = 1;
+ } else {
+ /* No need to allocate memory. */
+ ptr = mtod(m, unsigned char *) +
+ sizeof(struct ip6_hdr);
+ alloc = 0;
+ }
+ } else
+ break;
+
+ off = ip6.ip6_nxt & 0xff; /* Next header type. */
+
+ for (len = 0; len < skip - sizeof(struct ip6_hdr);)
+ switch (off) {
+ case IPPROTO_HOPOPTS:
+ case IPPROTO_DSTOPTS:
+ ip6e = (struct ip6_ext *) (ptr + len);
+
+ /*
+ * Process the mutable/immutable
+ * options -- borrows heavily from the
+ * KAME code.
+ */
+ for (count = len + sizeof(struct ip6_ext);
+ count < len + ((ip6e->ip6e_len + 1) << 3);) {
+ if (ptr[count] == IP6OPT_PAD1) {
+ count++;
+ continue; /* Skip padding. */
+ }
+
+ /* Sanity check. */
+ if (count > len +
+ ((ip6e->ip6e_len + 1) << 3)) {
+ m_freem(m);
+
+ /* Free, if we allocated. */
+ if (alloc)
+ free(ptr, M_XDATA);
+ return EINVAL;
+ }
+
+ ad = ptr[count + 1];
+
+ /* If mutable option, zeroize. */
+ if (ptr[count] & IP6OPT_MUTABLE)
+ bcopy(ipseczeroes, ptr + count,
+ ptr[count + 1]);
+
+ count += ad;
+
+ /* Sanity check. */
+ if (count >
+ skip - sizeof(struct ip6_hdr)) {
+ m_freem(m);
+
+ /* Free, if we allocated. */
+ if (alloc)
+ free(ptr, M_XDATA);
+ return EINVAL;
+ }
+ }
+
+ /* Advance. */
+ len += ((ip6e->ip6e_len + 1) << 3);
+ off = ip6e->ip6e_nxt;
+ break;
+
+ case IPPROTO_ROUTING:
+ /*
+ * Always include routing headers in
+ * computation.
+ */
+ ip6e = (struct ip6_ext *) (ptr + len);
+ len += ((ip6e->ip6e_len + 1) << 3);
+ off = ip6e->ip6e_nxt;
+ break;
+
+ default:
+ DPRINTF(("%s: unexpected IPv6 header type %d",
+ __func__, off));
+ if (alloc)
+ free(ptr, M_XDATA);
+ m_freem(m);
+ return EINVAL;
+ }
+
+ /* Copyback and free, if we allocated. */
+ if (alloc) {
+ m_copyback(m, sizeof(struct ip6_hdr),
+ skip - sizeof(struct ip6_hdr), ptr);
+ free(ptr, M_XDATA);
+ }
+
+ break;
+#endif /* INET6 */
+ }
+
+ return 0;
+}
+
+/*
+ * ah_input() gets called to verify that an input packet
+ * passes authentication.
+ */
+static int
+ah_input(struct mbuf *m, struct secasvar *sav, int skip, int protoff)
+{
+ struct auth_hash *ahx;
+ struct tdb_ident *tdbi;
+ struct tdb_crypto *tc;
+ struct m_tag *mtag;
+ struct newah *ah;
+ int hl, rplen, authsize;
+
+ struct cryptodesc *crda;
+ struct cryptop *crp;
+
+ IPSEC_ASSERT(sav != NULL, ("null SA"));
+ IPSEC_ASSERT(sav->key_auth != NULL, ("null authentication key"));
+ IPSEC_ASSERT(sav->tdb_authalgxform != NULL,
+ ("null authentication xform"));
+
+ /* Figure out header size. */
+ rplen = HDRSIZE(sav);
+
+ /* XXX don't pullup, just copy header */
+ IP6_EXTHDR_GET(ah, struct newah *, m, skip, rplen);
+ if (ah == NULL) {
+ DPRINTF(("ah_input: cannot pullup header\n"));
+ V_ahstat.ahs_hdrops++; /*XXX*/
+ m_freem(m);
+ return ENOBUFS;
+ }
+
+ /* Check replay window, if applicable. */
+ if (sav->replay && !ipsec_chkreplay(ntohl(ah->ah_seq), sav)) {
+ V_ahstat.ahs_replay++;
+ DPRINTF(("%s: packet replay failure: %s\n", __func__,
+ ipsec_logsastr(sav)));
+ m_freem(m);
+ return ENOBUFS;
+ }
+
+ /* Verify AH header length. */
+ hl = ah->ah_len * sizeof (u_int32_t);
+ ahx = sav->tdb_authalgxform;
+ authsize = AUTHSIZE(sav);
+ if (hl != authsize + rplen - sizeof (struct ah)) {
+ DPRINTF(("%s: bad authenticator length %u (expecting %lu)"
+ " for packet in SA %s/%08lx\n", __func__,
+ hl, (u_long) (authsize + rplen - sizeof (struct ah)),
+ ipsec_address(&sav->sah->saidx.dst),
+ (u_long) ntohl(sav->spi)));
+ V_ahstat.ahs_badauthl++;
+ m_freem(m);
+ return EACCES;
+ }
+ V_ahstat.ahs_ibytes += m->m_pkthdr.len - skip - hl;
+
+ /* Get crypto descriptors. */
+ crp = crypto_getreq(1);
+ if (crp == NULL) {
+ DPRINTF(("%s: failed to acquire crypto descriptor\n",__func__));
+ V_ahstat.ahs_crypto++;
+ m_freem(m);
+ return ENOBUFS;
+ }
+
+ crda = crp->crp_desc;
+ IPSEC_ASSERT(crda != NULL, ("null crypto descriptor"));
+
+ crda->crd_skip = 0;
+ crda->crd_len = m->m_pkthdr.len;
+ crda->crd_inject = skip + rplen;
+
+ /* Authentication operation. */
+ crda->crd_alg = ahx->type;
+ crda->crd_klen = _KEYBITS(sav->key_auth);
+ crda->crd_key = sav->key_auth->key_data;
+
+ /* Find out if we've already done crypto. */
+ for (mtag = m_tag_find(m, PACKET_TAG_IPSEC_IN_CRYPTO_DONE, NULL);
+ mtag != NULL;
+ mtag = m_tag_find(m, PACKET_TAG_IPSEC_IN_CRYPTO_DONE, mtag)) {
+ tdbi = (struct tdb_ident *) (mtag + 1);
+ if (tdbi->proto == sav->sah->saidx.proto &&
+ tdbi->spi == sav->spi &&
+ !bcmp(&tdbi->dst, &sav->sah->saidx.dst,
+ sizeof (union sockaddr_union)))
+ break;
+ }
+
+ /* Allocate IPsec-specific opaque crypto info. */
+ if (mtag == NULL) {
+ tc = (struct tdb_crypto *) malloc(sizeof (struct tdb_crypto) +
+ skip + rplen + authsize, M_XDATA, M_NOWAIT|M_ZERO);
+ } else {
+ /* Hash verification has already been done successfully. */
+ tc = (struct tdb_crypto *) malloc(sizeof (struct tdb_crypto),
+ M_XDATA, M_NOWAIT|M_ZERO);
+ }
+ if (tc == NULL) {
+ DPRINTF(("%s: failed to allocate tdb_crypto\n", __func__));
+ V_ahstat.ahs_crypto++;
+ crypto_freereq(crp);
+ m_freem(m);
+ return ENOBUFS;
+ }
+
+ /* Only save information if crypto processing is needed. */
+ if (mtag == NULL) {
+ int error;
+
+ /*
+ * Save the authenticator, the skipped portion of the packet,
+ * and the AH header.
+ */
+ m_copydata(m, 0, skip + rplen + authsize, (caddr_t)(tc+1));
+
+ /* Zeroize the authenticator on the packet. */
+ m_copyback(m, skip + rplen, authsize, ipseczeroes);
+
+ /* "Massage" the packet headers for crypto processing. */
+ error = ah_massage_headers(&m, sav->sah->saidx.dst.sa.sa_family,
+ skip, ahx->type, 0);
+ if (error != 0) {
+ /* NB: mbuf is free'd by ah_massage_headers */
+ V_ahstat.ahs_hdrops++;
+ free(tc, M_XDATA);
+ crypto_freereq(crp);
+ return error;
+ }
+ }
+
+ /* Crypto operation descriptor. */
+ crp->crp_ilen = m->m_pkthdr.len; /* Total input length. */
+ crp->crp_flags = CRYPTO_F_IMBUF | CRYPTO_F_CBIFSYNC;
+ crp->crp_buf = (caddr_t) m;
+ crp->crp_callback = ah_input_cb;
+ crp->crp_sid = sav->tdb_cryptoid;
+ crp->crp_opaque = (caddr_t) tc;
+
+ /* These are passed as-is to the callback. */
+ tc->tc_spi = sav->spi;
+ tc->tc_dst = sav->sah->saidx.dst;
+ tc->tc_proto = sav->sah->saidx.proto;
+ tc->tc_nxt = ah->ah_nxt;
+ tc->tc_protoff = protoff;
+ tc->tc_skip = skip;
+ tc->tc_ptr = (caddr_t) mtag; /* Save the mtag we've identified. */
+
+ if (mtag == NULL)
+ return crypto_dispatch(crp);
+ else
+ return ah_input_cb(crp);
+}
+
+#ifdef INET6
+#define IPSEC_COMMON_INPUT_CB(m, sav, skip, protoff, mtag) do { \
+ if (saidx->dst.sa.sa_family == AF_INET6) { \
+ error = ipsec6_common_input_cb(m, sav, skip, protoff, mtag); \
+ } else { \
+ error = ipsec4_common_input_cb(m, sav, skip, protoff, mtag); \
+ } \
+} while (0)
+#else
+#define IPSEC_COMMON_INPUT_CB(m, sav, skip, protoff, mtag) \
+ (error = ipsec4_common_input_cb(m, sav, skip, protoff, mtag))
+#endif
+
+/*
+ * AH input callback from the crypto driver.
+ */
+static int
+ah_input_cb(struct cryptop *crp)
+{
+ int rplen, error, skip, protoff;
+ unsigned char calc[AH_ALEN_MAX];
+ struct mbuf *m;
+ struct cryptodesc *crd;
+ struct auth_hash *ahx;
+ struct tdb_crypto *tc;
+ struct m_tag *mtag;
+ struct secasvar *sav;
+ struct secasindex *saidx;
+ u_int8_t nxt;
+ caddr_t ptr;
+ int authsize;
+
+ crd = crp->crp_desc;
+
+ tc = (struct tdb_crypto *) crp->crp_opaque;
+ IPSEC_ASSERT(tc != NULL, ("null opaque crypto data area!"));
+ skip = tc->tc_skip;
+ nxt = tc->tc_nxt;
+ protoff = tc->tc_protoff;
+ mtag = (struct m_tag *) tc->tc_ptr;
+ m = (struct mbuf *) crp->crp_buf;
+
+ sav = KEY_ALLOCSA(&tc->tc_dst, tc->tc_proto, tc->tc_spi);
+ if (sav == NULL) {
+ V_ahstat.ahs_notdb++;
+ DPRINTF(("%s: SA expired while in crypto\n", __func__));
+ error = ENOBUFS; /*XXX*/
+ goto bad;
+ }
+
+ saidx = &sav->sah->saidx;
+ IPSEC_ASSERT(saidx->dst.sa.sa_family == AF_INET ||
+ saidx->dst.sa.sa_family == AF_INET6,
+ ("unexpected protocol family %u", saidx->dst.sa.sa_family));
+
+ ahx = (struct auth_hash *) sav->tdb_authalgxform;
+
+ /* Check for crypto errors. */
+ if (crp->crp_etype) {
+ if (sav->tdb_cryptoid != 0)
+ sav->tdb_cryptoid = crp->crp_sid;
+
+ if (crp->crp_etype == EAGAIN) {
+ error = crypto_dispatch(crp);
+ return error;
+ }
+
+ V_ahstat.ahs_noxform++;
+ DPRINTF(("%s: crypto error %d\n", __func__, crp->crp_etype));
+ error = crp->crp_etype;
+ goto bad;
+ } else {
+ V_ahstat.ahs_hist[sav->alg_auth]++;
+ crypto_freereq(crp); /* No longer needed. */
+ crp = NULL;
+ }
+
+ /* Shouldn't happen... */
+ if (m == NULL) {
+ V_ahstat.ahs_crypto++;
+ DPRINTF(("%s: bogus returned buffer from crypto\n", __func__));
+ error = EINVAL;
+ goto bad;
+ }
+
+ /* Figure out header size. */
+ rplen = HDRSIZE(sav);
+ authsize = AUTHSIZE(sav);
+
+ /* Copy authenticator off the packet. */
+ m_copydata(m, skip + rplen, authsize, calc);
+
+ /*
+ * If we have an mtag, we don't need to verify the authenticator --
+ * it has been verified by an IPsec-aware NIC.
+ */
+ if (mtag == NULL) {
+ ptr = (caddr_t) (tc + 1);
+
+ /* Verify authenticator. */
+ if (bcmp(ptr + skip + rplen, calc, authsize)) {
+ DPRINTF(("%s: authentication hash mismatch for packet "
+ "in SA %s/%08lx\n", __func__,
+ ipsec_address(&saidx->dst),
+ (u_long) ntohl(sav->spi)));
+ V_ahstat.ahs_badauth++;
+ error = EACCES;
+ goto bad;
+ }
+
+ /* Fix the Next Protocol field. */
+ ((u_int8_t *) ptr)[protoff] = nxt;
+
+ /* Copyback the saved (uncooked) network headers. */
+ m_copyback(m, 0, skip, ptr);
+ } else {
+ /* Fix the Next Protocol field. */
+ m_copyback(m, protoff, sizeof(u_int8_t), &nxt);
+ }
+
+ free(tc, M_XDATA), tc = NULL; /* No longer needed */
+
+ /*
+ * Header is now authenticated.
+ */
+ m->m_flags |= M_AUTHIPHDR|M_AUTHIPDGM;
+
+ /*
+ * Update replay sequence number, if appropriate.
+ */
+ if (sav->replay) {
+ u_int32_t seq;
+
+ m_copydata(m, skip + offsetof(struct newah, ah_seq),
+ sizeof (seq), (caddr_t) &seq);
+ if (ipsec_updatereplay(ntohl(seq), sav)) {
+ V_ahstat.ahs_replay++;
+ error = ENOBUFS; /*XXX as above*/
+ goto bad;
+ }
+ }
+
+ /*
+ * Remove the AH header and authenticator from the mbuf.
+ */
+ error = m_striphdr(m, skip, rplen + authsize);
+ if (error) {
+ DPRINTF(("%s: mangled mbuf chain for SA %s/%08lx\n", __func__,
+ ipsec_address(&saidx->dst), (u_long) ntohl(sav->spi)));
+
+ V_ahstat.ahs_hdrops++;
+ goto bad;
+ }
+
+ IPSEC_COMMON_INPUT_CB(m, sav, skip, protoff, mtag);
+
+ KEY_FREESAV(&sav);
+ return error;
+bad:
+ if (sav)
+ KEY_FREESAV(&sav);
+ if (m != NULL)
+ m_freem(m);
+ if (tc != NULL)
+ free(tc, M_XDATA);
+ if (crp != NULL)
+ crypto_freereq(crp);
+ return error;
+}
+
+/*
+ * AH output routine, called by ipsec[46]_process_packet().
+ */
+static int
+ah_output(
+ struct mbuf *m,
+ struct ipsecrequest *isr,
+ struct mbuf **mp,
+ int skip,
+ int protoff)
+{
+ struct secasvar *sav;
+ struct auth_hash *ahx;
+ struct cryptodesc *crda;
+ struct tdb_crypto *tc;
+ struct mbuf *mi;
+ struct cryptop *crp;
+ u_int16_t iplen;
+ int error, rplen, authsize, maxpacketsize, roff;
+ u_int8_t prot;
+ struct newah *ah;
+
+ sav = isr->sav;
+ IPSEC_ASSERT(sav != NULL, ("null SA"));
+ ahx = sav->tdb_authalgxform;
+ IPSEC_ASSERT(ahx != NULL, ("null authentication xform"));
+
+ V_ahstat.ahs_output++;
+
+ /* Figure out header size. */
+ rplen = HDRSIZE(sav);
+
+ /* Check for maximum packet size violations. */
+ switch (sav->sah->saidx.dst.sa.sa_family) {
+#ifdef INET
+ case AF_INET:
+ maxpacketsize = IP_MAXPACKET;
+ break;
+#endif /* INET */
+#ifdef INET6
+ case AF_INET6:
+ maxpacketsize = IPV6_MAXPACKET;
+ break;
+#endif /* INET6 */
+ default:
+ DPRINTF(("%s: unknown/unsupported protocol family %u, "
+ "SA %s/%08lx\n", __func__,
+ sav->sah->saidx.dst.sa.sa_family,
+ ipsec_address(&sav->sah->saidx.dst),
+ (u_long) ntohl(sav->spi)));
+ V_ahstat.ahs_nopf++;
+ error = EPFNOSUPPORT;
+ goto bad;
+ }
+ authsize = AUTHSIZE(sav);
+ if (rplen + authsize + m->m_pkthdr.len > maxpacketsize) {
+ DPRINTF(("%s: packet in SA %s/%08lx got too big "
+ "(len %u, max len %u)\n", __func__,
+ ipsec_address(&sav->sah->saidx.dst),
+ (u_long) ntohl(sav->spi),
+ rplen + authsize + m->m_pkthdr.len, maxpacketsize));
+ V_ahstat.ahs_toobig++;
+ error = EMSGSIZE;
+ goto bad;
+ }
+
+ /* Update the counters. */
+ V_ahstat.ahs_obytes += m->m_pkthdr.len - skip;
+
+ m = m_unshare(m, M_NOWAIT);
+ if (m == NULL) {
+ DPRINTF(("%s: cannot clone mbuf chain, SA %s/%08lx\n", __func__,
+ ipsec_address(&sav->sah->saidx.dst),
+ (u_long) ntohl(sav->spi)));
+ V_ahstat.ahs_hdrops++;
+ error = ENOBUFS;
+ goto bad;
+ }
+
+ /* Inject AH header. */
+ mi = m_makespace(m, skip, rplen + authsize, &roff);
+ if (mi == NULL) {
+ DPRINTF(("%s: failed to inject %u byte AH header for SA "
+ "%s/%08lx\n", __func__,
+ rplen + authsize,
+ ipsec_address(&sav->sah->saidx.dst),
+ (u_long) ntohl(sav->spi)));
+ V_ahstat.ahs_hdrops++; /*XXX differs from openbsd */
+ error = ENOBUFS;
+ goto bad;
+ }
+
+ /*
+ * The AH header is guaranteed by m_makespace() to be in
+ * contiguous memory, at roff bytes offset into the returned mbuf.
+ */
+ ah = (struct newah *)(mtod(mi, caddr_t) + roff);
+
+ /* Initialize the AH header. */
+ m_copydata(m, protoff, sizeof(u_int8_t), (caddr_t) &ah->ah_nxt);
+ ah->ah_len = (rplen + authsize - sizeof(struct ah)) / sizeof(u_int32_t);
+ ah->ah_reserve = 0;
+ ah->ah_spi = sav->spi;
+
+ /* Zeroize authenticator. */
+ m_copyback(m, skip + rplen, authsize, ipseczeroes);
+
+ /* Insert packet replay counter, as requested. */
+ if (sav->replay) {
+ if (sav->replay->count == ~0 &&
+ (sav->flags & SADB_X_EXT_CYCSEQ) == 0) {
+ DPRINTF(("%s: replay counter wrapped for SA %s/%08lx\n",
+ __func__,
+ ipsec_address(&sav->sah->saidx.dst),
+ (u_long) ntohl(sav->spi)));
+ V_ahstat.ahs_wrap++;
+ error = EINVAL;
+ goto bad;
+ }
+#ifdef REGRESSION
+ /* Emulate replay attack when ipsec_replay is TRUE. */
+ if (!V_ipsec_replay)
+#endif
+ sav->replay->count++;
+ ah->ah_seq = htonl(sav->replay->count);
+ }
+
+ /* Get crypto descriptors. */
+ crp = crypto_getreq(1);
+ if (crp == NULL) {
+ DPRINTF(("%s: failed to acquire crypto descriptors\n",
+ __func__));
+ V_ahstat.ahs_crypto++;
+ error = ENOBUFS;
+ goto bad;
+ }
+
+ crda = crp->crp_desc;
+
+ crda->crd_skip = 0;
+ crda->crd_inject = skip + rplen;
+ crda->crd_len = m->m_pkthdr.len;
+
+ /* Authentication operation. */
+ crda->crd_alg = ahx->type;
+ crda->crd_key = sav->key_auth->key_data;
+ crda->crd_klen = _KEYBITS(sav->key_auth);
+
+ /* Allocate IPsec-specific opaque crypto info. */
+ tc = (struct tdb_crypto *) malloc(
+ sizeof(struct tdb_crypto) + skip, M_XDATA, M_NOWAIT|M_ZERO);
+ if (tc == NULL) {
+ crypto_freereq(crp);
+ DPRINTF(("%s: failed to allocate tdb_crypto\n", __func__));
+ V_ahstat.ahs_crypto++;
+ error = ENOBUFS;
+ goto bad;
+ }
+
+ /* Save the skipped portion of the packet. */
+ m_copydata(m, 0, skip, (caddr_t) (tc + 1));
+
+ /*
+ * Fix IP header length on the header used for
+ * authentication. We don't need to fix the original
+ * header length as it will be fixed by our caller.
+ */
+ switch (sav->sah->saidx.dst.sa.sa_family) {
+#ifdef INET
+ case AF_INET:
+ bcopy(((caddr_t)(tc + 1)) +
+ offsetof(struct ip, ip_len),
+ (caddr_t) &iplen, sizeof(u_int16_t));
+ iplen = htons(ntohs(iplen) + rplen + authsize);
+ m_copyback(m, offsetof(struct ip, ip_len),
+ sizeof(u_int16_t), (caddr_t) &iplen);
+ break;
+#endif /* INET */
+
+#ifdef INET6
+ case AF_INET6:
+ bcopy(((caddr_t)(tc + 1)) +
+ offsetof(struct ip6_hdr, ip6_plen),
+ (caddr_t) &iplen, sizeof(u_int16_t));
+ iplen = htons(ntohs(iplen) + rplen + authsize);
+ m_copyback(m, offsetof(struct ip6_hdr, ip6_plen),
+ sizeof(u_int16_t), (caddr_t) &iplen);
+ break;
+#endif /* INET6 */
+ }
+
+ /* Fix the Next Header field in saved header. */
+ ((u_int8_t *) (tc + 1))[protoff] = IPPROTO_AH;
+
+ /* Update the Next Protocol field in the IP header. */
+ prot = IPPROTO_AH;
+ m_copyback(m, protoff, sizeof(u_int8_t), (caddr_t) &prot);
+
+ /* "Massage" the packet headers for crypto processing. */
+ error = ah_massage_headers(&m, sav->sah->saidx.dst.sa.sa_family,
+ skip, ahx->type, 1);
+ if (error != 0) {
+ m = NULL; /* mbuf was free'd by ah_massage_headers. */
+ free(tc, M_XDATA);
+ crypto_freereq(crp);
+ goto bad;
+ }
+
+ /* Crypto operation descriptor. */
+ crp->crp_ilen = m->m_pkthdr.len; /* Total input length. */
+ crp->crp_flags = CRYPTO_F_IMBUF | CRYPTO_F_CBIFSYNC;
+ crp->crp_buf = (caddr_t) m;
+ crp->crp_callback = ah_output_cb;
+ crp->crp_sid = sav->tdb_cryptoid;
+ crp->crp_opaque = (caddr_t) tc;
+
+ /* These are passed as-is to the callback. */
+ tc->tc_isr = isr;
+ tc->tc_spi = sav->spi;
+ tc->tc_dst = sav->sah->saidx.dst;
+ tc->tc_proto = sav->sah->saidx.proto;
+ tc->tc_skip = skip;
+ tc->tc_protoff = protoff;
+
+ return crypto_dispatch(crp);
+bad:
+ if (m)
+ m_freem(m);
+ return (error);
+}
+
+/*
+ * AH output callback from the crypto driver.
+ */
+static int
+ah_output_cb(struct cryptop *crp)
+{
+ int skip, protoff, error;
+ struct tdb_crypto *tc;
+ struct ipsecrequest *isr;
+ struct secasvar *sav;
+ struct mbuf *m;
+ caddr_t ptr;
+ int err;
+
+ tc = (struct tdb_crypto *) crp->crp_opaque;
+ IPSEC_ASSERT(tc != NULL, ("null opaque data area!"));
+ skip = tc->tc_skip;
+ protoff = tc->tc_protoff;
+ ptr = (caddr_t) (tc + 1);
+ m = (struct mbuf *) crp->crp_buf;
+
+ isr = tc->tc_isr;
+ IPSECREQUEST_LOCK(isr);
+ sav = KEY_ALLOCSA(&tc->tc_dst, tc->tc_proto, tc->tc_spi);
+ if (sav == NULL) {
+ V_ahstat.ahs_notdb++;
+ DPRINTF(("%s: SA expired while in crypto\n", __func__));
+ error = ENOBUFS; /*XXX*/
+ goto bad;
+ }
+ IPSEC_ASSERT(isr->sav == sav, ("SA changed\n"));
+
+ /* Check for crypto errors. */
+ if (crp->crp_etype) {
+ if (sav->tdb_cryptoid != 0)
+ sav->tdb_cryptoid = crp->crp_sid;
+
+ if (crp->crp_etype == EAGAIN) {
+ KEY_FREESAV(&sav);
+ IPSECREQUEST_UNLOCK(isr);
+ error = crypto_dispatch(crp);
+ return error;
+ }
+
+ V_ahstat.ahs_noxform++;
+ DPRINTF(("%s: crypto error %d\n", __func__, crp->crp_etype));
+ error = crp->crp_etype;
+ goto bad;
+ }
+
+ /* Shouldn't happen... */
+ if (m == NULL) {
+ V_ahstat.ahs_crypto++;
+ DPRINTF(("%s: bogus returned buffer from crypto\n", __func__));
+ error = EINVAL;
+ goto bad;
+ }
+ V_ahstat.ahs_hist[sav->alg_auth]++;
+
+ /*
+ * Copy original headers (with the new protocol number) back
+ * in place.
+ */
+ m_copyback(m, 0, skip, ptr);
+
+ /* No longer needed. */
+ free(tc, M_XDATA);
+ crypto_freereq(crp);
+
+#ifdef REGRESSION
+ /* Emulate man-in-the-middle attack when ipsec_integrity is TRUE. */
+ if (V_ipsec_integrity) {
+ int alen;
+
+ /*
+ * Corrupt HMAC if we want to test integrity verification of
+ * the other side.
+ */
+ alen = AUTHSIZE(sav);
+ m_copyback(m, m->m_pkthdr.len - alen, alen, ipseczeroes);
+ }
+#endif
+
+ /* NB: m is reclaimed by ipsec_process_done. */
+ err = ipsec_process_done(m, isr);
+ KEY_FREESAV(&sav);
+ IPSECREQUEST_UNLOCK(isr);
+ return err;
+bad:
+ if (sav)
+ KEY_FREESAV(&sav);
+ IPSECREQUEST_UNLOCK(isr);
+ if (m)
+ m_freem(m);
+ free(tc, M_XDATA);
+ crypto_freereq(crp);
+ return error;
+}
+
+static struct xformsw ah_xformsw = {
+ XF_AH, XFT_AUTH, "IPsec AH",
+ ah_init, ah_zeroize, ah_input, ah_output,
+};
+
+static void
+ah_attach(void)
+{
+
+ xform_register(&ah_xformsw);
+}
+
+SYSINIT(ah_xform_init, SI_SUB_PROTO_DOMAIN, SI_ORDER_MIDDLE, ah_attach, NULL);
diff --git a/rtems/freebsd/netipsec/xform_esp.c b/rtems/freebsd/netipsec/xform_esp.c
new file mode 100644
index 00000000..e7a56ae0
--- /dev/null
+++ b/rtems/freebsd/netipsec/xform_esp.c
@@ -0,0 +1,1005 @@
+#include <rtems/freebsd/machine/rtems-bsd-config.h>
+
+/* $FreeBSD$ */
+/* $OpenBSD: ip_esp.c,v 1.69 2001/06/26 06:18:59 angelos Exp $ */
+/*-
+ * The authors of this code are John Ioannidis (ji@tla.org),
+ * Angelos D. Keromytis (kermit@csd.uch.gr) and
+ * Niels Provos (provos@physnet.uni-hamburg.de).
+ *
+ * The original version of this code was written by John Ioannidis
+ * for BSD/OS in Athens, Greece, in November 1995.
+ *
+ * Ported to OpenBSD and NetBSD, with additional transforms, in December 1996,
+ * by Angelos D. Keromytis.
+ *
+ * Additional transforms and features in 1997 and 1998 by Angelos D. Keromytis
+ * and Niels Provos.
+ *
+ * Additional features in 1999 by Angelos D. Keromytis.
+ *
+ * Copyright (C) 1995, 1996, 1997, 1998, 1999 by John Ioannidis,
+ * Angelos D. Keromytis and Niels Provos.
+ * Copyright (c) 2001 Angelos D. Keromytis.
+ *
+ * Permission to use, copy, and modify this software with or without fee
+ * is hereby granted, provided that this entire notice is included in
+ * all copies of any software which is or includes a copy or
+ * modification of this software.
+ * You may use this code under the GNU public license if you so wish. Please
+ * contribute changes back to the authors under this freer than GPL license
+ * so that we may further the use of strong encryption without limitations to
+ * all.
+ *
+ * THIS SOFTWARE IS BEING PROVIDED "AS IS", WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTY. IN PARTICULAR, NONE OF THE AUTHORS MAKES ANY
+ * REPRESENTATION OR WARRANTY OF ANY KIND CONCERNING THE
+ * MERCHANTABILITY OF THIS SOFTWARE OR ITS FITNESS FOR ANY PARTICULAR
+ * PURPOSE.
+ */
+#include <rtems/freebsd/local/opt_inet.h>
+#include <rtems/freebsd/local/opt_inet6.h>
+
+#include <rtems/freebsd/sys/param.h>
+#include <rtems/freebsd/sys/systm.h>
+#include <rtems/freebsd/sys/mbuf.h>
+#include <rtems/freebsd/sys/socket.h>
+#include <rtems/freebsd/sys/syslog.h>
+#include <rtems/freebsd/sys/kernel.h>
+#include <rtems/freebsd/sys/random.h>
+#include <rtems/freebsd/sys/sysctl.h>
+
+#include <rtems/freebsd/net/if.h>
+#include <rtems/freebsd/net/vnet.h>
+
+#include <rtems/freebsd/netinet/in.h>
+#include <rtems/freebsd/netinet/in_systm.h>
+#include <rtems/freebsd/netinet/ip.h>
+#include <rtems/freebsd/netinet/ip_ecn.h>
+#include <rtems/freebsd/netinet/ip6.h>
+
+#include <rtems/freebsd/net/route.h>
+#include <rtems/freebsd/netipsec/ipsec.h>
+#include <rtems/freebsd/netipsec/ah.h>
+#include <rtems/freebsd/netipsec/ah_var.h>
+#include <rtems/freebsd/netipsec/esp.h>
+#include <rtems/freebsd/netipsec/esp_var.h>
+#include <rtems/freebsd/netipsec/xform.h>
+
+#ifdef INET6
+#include <rtems/freebsd/netinet6/ip6_var.h>
+#include <rtems/freebsd/netipsec/ipsec6.h>
+#include <rtems/freebsd/netinet6/ip6_ecn.h>
+#endif
+
+#include <rtems/freebsd/netipsec/key.h>
+#include <rtems/freebsd/netipsec/key_debug.h>
+
+#include <rtems/freebsd/opencrypto/cryptodev.h>
+#include <rtems/freebsd/opencrypto/xform.h>
+
+VNET_DEFINE(int, esp_enable) = 1;
+VNET_DEFINE(struct espstat, espstat);
+
+SYSCTL_DECL(_net_inet_esp);
+SYSCTL_VNET_INT(_net_inet_esp, OID_AUTO,
+ esp_enable, CTLFLAG_RW, &VNET_NAME(esp_enable), 0, "");
+SYSCTL_VNET_STRUCT(_net_inet_esp, IPSECCTL_STATS,
+ stats, CTLFLAG_RD, &VNET_NAME(espstat), espstat, "");
+
+static VNET_DEFINE(int, esp_max_ivlen); /* max iv length over all algorithms */
+#define V_esp_max_ivlen VNET(esp_max_ivlen)
+
+static int esp_input_cb(struct cryptop *op);
+static int esp_output_cb(struct cryptop *crp);
+
+/*
+ * NB: this is public for use by the PF_KEY support.
+ * NB: if you add support here; be sure to add code to esp_attach below!
+ */
+struct enc_xform *
+esp_algorithm_lookup(int alg)
+{
+ if (alg >= ESP_ALG_MAX)
+ return NULL;
+ switch (alg) {
+ case SADB_EALG_DESCBC:
+ return &enc_xform_des;
+ case SADB_EALG_3DESCBC:
+ return &enc_xform_3des;
+ case SADB_X_EALG_AES:
+ return &enc_xform_rijndael128;
+ case SADB_X_EALG_BLOWFISHCBC:
+ return &enc_xform_blf;
+ case SADB_X_EALG_CAST128CBC:
+ return &enc_xform_cast5;
+ case SADB_X_EALG_SKIPJACK:
+ return &enc_xform_skipjack;
+ case SADB_EALG_NULL:
+ return &enc_xform_null;
+ case SADB_X_EALG_CAMELLIACBC:
+ return &enc_xform_camellia;
+ }
+ return NULL;
+}
+
+size_t
+esp_hdrsiz(struct secasvar *sav)
+{
+ size_t size;
+
+ if (sav != NULL) {
+ /*XXX not right for null algorithm--does it matter??*/
+ IPSEC_ASSERT(sav->tdb_encalgxform != NULL,
+ ("SA with null xform"));
+ if (sav->flags & SADB_X_EXT_OLD)
+ size = sizeof (struct esp);
+ else
+ size = sizeof (struct newesp);
+ size += sav->tdb_encalgxform->blocksize + 9;
+ /*XXX need alg check???*/
+ if (sav->tdb_authalgxform != NULL && sav->replay)
+ size += ah_hdrsiz(sav);
+ } else {
+ /*
+ * base header size
+ * + max iv length for CBC mode
+ * + max pad length
+ * + sizeof (pad length field)
+ * + sizeof (next header field)
+ * + max icv supported.
+ */
+ size = sizeof (struct newesp) + V_esp_max_ivlen + 9 + 16;
+ }
+ return size;
+}
+
+/*
+ * esp_init() is called when an SPI is being set up.
+ */
+static int
+esp_init(struct secasvar *sav, struct xformsw *xsp)
+{
+ struct enc_xform *txform;
+ struct cryptoini cria, crie;
+ int keylen;
+ int error;
+
+ txform = esp_algorithm_lookup(sav->alg_enc);
+ if (txform == NULL) {
+ DPRINTF(("%s: unsupported encryption algorithm %d\n",
+ __func__, sav->alg_enc));
+ return EINVAL;
+ }
+ if (sav->key_enc == NULL) {
+ DPRINTF(("%s: no encoding key for %s algorithm\n",
+ __func__, txform->name));
+ return EINVAL;
+ }
+ if ((sav->flags&(SADB_X_EXT_OLD|SADB_X_EXT_IV4B)) == SADB_X_EXT_IV4B) {
+ DPRINTF(("%s: 4-byte IV not supported with protocol\n",
+ __func__));
+ return EINVAL;
+ }
+ keylen = _KEYLEN(sav->key_enc);
+ if (txform->minkey > keylen || keylen > txform->maxkey) {
+ DPRINTF(("%s: invalid key length %u, must be in the range "
+ "[%u..%u] for algorithm %s\n", __func__,
+ keylen, txform->minkey, txform->maxkey,
+ txform->name));
+ return EINVAL;
+ }
+
+ /*
+ * NB: The null xform needs a non-zero blocksize to keep the
+ * crypto code happy but if we use it to set ivlen then
+ * the ESP header will be processed incorrectly. The
+ * compromise is to force it to zero here.
+ */
+ sav->ivlen = (txform == &enc_xform_null ? 0 : txform->blocksize);
+ sav->iv = (caddr_t) malloc(sav->ivlen, M_XDATA, M_WAITOK);
+ if (sav->iv == NULL) {
+ DPRINTF(("%s: no memory for IV\n", __func__));
+ return EINVAL;
+ }
+ key_randomfill(sav->iv, sav->ivlen); /*XXX*/
+
+ /*
+ * Setup AH-related state.
+ */
+ if (sav->alg_auth != 0) {
+ error = ah_init0(sav, xsp, &cria);
+ if (error)
+ return error;
+ }
+
+ /* NB: override anything set in ah_init0 */
+ sav->tdb_xform = xsp;
+ sav->tdb_encalgxform = txform;
+
+ /* Initialize crypto session. */
+ bzero(&crie, sizeof (crie));
+ crie.cri_alg = sav->tdb_encalgxform->type;
+ crie.cri_klen = _KEYBITS(sav->key_enc);
+ crie.cri_key = sav->key_enc->key_data;
+ /* XXX Rounds ? */
+
+ if (sav->tdb_authalgxform && sav->tdb_encalgxform) {
+ /* init both auth & enc */
+ crie.cri_next = &cria;
+ error = crypto_newsession(&sav->tdb_cryptoid,
+ &crie, V_crypto_support);
+ } else if (sav->tdb_encalgxform) {
+ error = crypto_newsession(&sav->tdb_cryptoid,
+ &crie, V_crypto_support);
+ } else if (sav->tdb_authalgxform) {
+ error = crypto_newsession(&sav->tdb_cryptoid,
+ &cria, V_crypto_support);
+ } else {
+ /* XXX cannot happen? */
+ DPRINTF(("%s: no encoding OR authentication xform!\n",
+ __func__));
+ error = EINVAL;
+ }
+ return error;
+}
+
+/*
+ * Paranoia.
+ */
+static int
+esp_zeroize(struct secasvar *sav)
+{
+ /* NB: ah_zerorize free's the crypto session state */
+ int error = ah_zeroize(sav);
+
+ if (sav->key_enc)
+ bzero(sav->key_enc->key_data, _KEYLEN(sav->key_enc));
+ if (sav->iv) {
+ free(sav->iv, M_XDATA);
+ sav->iv = NULL;
+ }
+ sav->tdb_encalgxform = NULL;
+ sav->tdb_xform = NULL;
+ return error;
+}
+
+/*
+ * ESP input processing, called (eventually) through the protocol switch.
+ */
+static int
+esp_input(struct mbuf *m, struct secasvar *sav, int skip, int protoff)
+{
+ struct auth_hash *esph;
+ struct enc_xform *espx;
+ struct tdb_ident *tdbi;
+ struct tdb_crypto *tc;
+ int plen, alen, hlen;
+ struct m_tag *mtag;
+ struct newesp *esp;
+
+ struct cryptodesc *crde;
+ struct cryptop *crp;
+
+ IPSEC_ASSERT(sav != NULL, ("null SA"));
+ IPSEC_ASSERT(sav->tdb_encalgxform != NULL, ("null encoding xform"));
+ IPSEC_ASSERT((skip&3) == 0 && (m->m_pkthdr.len&3) == 0,
+ ("misaligned packet, skip %u pkt len %u",
+ skip, m->m_pkthdr.len));
+
+ /* XXX don't pullup, just copy header */
+ IP6_EXTHDR_GET(esp, struct newesp *, m, skip, sizeof (struct newesp));
+
+ esph = sav->tdb_authalgxform;
+ espx = sav->tdb_encalgxform;
+
+ /* Determine the ESP header length */
+ if (sav->flags & SADB_X_EXT_OLD)
+ hlen = sizeof (struct esp) + sav->ivlen;
+ else
+ hlen = sizeof (struct newesp) + sav->ivlen;
+ /* Authenticator hash size */
+ alen = esph ? AH_HMAC_HASHLEN : 0;
+
+ /*
+ * Verify payload length is multiple of encryption algorithm
+ * block size.
+ *
+ * NB: This works for the null algorithm because the blocksize
+ * is 4 and all packets must be 4-byte aligned regardless
+ * of the algorithm.
+ */
+ plen = m->m_pkthdr.len - (skip + hlen + alen);
+ if ((plen & (espx->blocksize - 1)) || (plen <= 0)) {
+ DPRINTF(("%s: payload of %d octets not a multiple of %d octets,"
+ " SA %s/%08lx\n", __func__,
+ plen, espx->blocksize,
+ ipsec_address(&sav->sah->saidx.dst),
+ (u_long) ntohl(sav->spi)));
+ V_espstat.esps_badilen++;
+ m_freem(m);
+ return EINVAL;
+ }
+
+ /*
+ * Check sequence number.
+ */
+ if (esph && sav->replay && !ipsec_chkreplay(ntohl(esp->esp_seq), sav)) {
+ DPRINTF(("%s: packet replay check for %s\n", __func__,
+ ipsec_logsastr(sav))); /*XXX*/
+ V_espstat.esps_replay++;
+ m_freem(m);
+ return ENOBUFS; /*XXX*/
+ }
+
+ /* Update the counters */
+ V_espstat.esps_ibytes += m->m_pkthdr.len - (skip + hlen + alen);
+
+ /* Find out if we've already done crypto */
+ for (mtag = m_tag_find(m, PACKET_TAG_IPSEC_IN_CRYPTO_DONE, NULL);
+ mtag != NULL;
+ mtag = m_tag_find(m, PACKET_TAG_IPSEC_IN_CRYPTO_DONE, mtag)) {
+ tdbi = (struct tdb_ident *) (mtag + 1);
+ if (tdbi->proto == sav->sah->saidx.proto &&
+ tdbi->spi == sav->spi &&
+ !bcmp(&tdbi->dst, &sav->sah->saidx.dst,
+ sizeof(union sockaddr_union)))
+ break;
+ }
+
+ /* Get crypto descriptors */
+ crp = crypto_getreq(esph && espx ? 2 : 1);
+ if (crp == NULL) {
+ DPRINTF(("%s: failed to acquire crypto descriptors\n",
+ __func__));
+ V_espstat.esps_crypto++;
+ m_freem(m);
+ return ENOBUFS;
+ }
+
+ /* Get IPsec-specific opaque pointer */
+ if (esph == NULL || mtag != NULL)
+ tc = (struct tdb_crypto *) malloc(sizeof(struct tdb_crypto),
+ M_XDATA, M_NOWAIT|M_ZERO);
+ else
+ tc = (struct tdb_crypto *) malloc(sizeof(struct tdb_crypto) + alen,
+ M_XDATA, M_NOWAIT|M_ZERO);
+ if (tc == NULL) {
+ crypto_freereq(crp);
+ DPRINTF(("%s: failed to allocate tdb_crypto\n", __func__));
+ V_espstat.esps_crypto++;
+ m_freem(m);
+ return ENOBUFS;
+ }
+
+ tc->tc_ptr = (caddr_t) mtag;
+
+ if (esph) {
+ struct cryptodesc *crda = crp->crp_desc;
+
+ IPSEC_ASSERT(crda != NULL, ("null ah crypto descriptor"));
+
+ /* Authentication descriptor */
+ crda->crd_skip = skip;
+ crda->crd_len = m->m_pkthdr.len - (skip + alen);
+ crda->crd_inject = m->m_pkthdr.len - alen;
+
+ crda->crd_alg = esph->type;
+ crda->crd_key = sav->key_auth->key_data;
+ crda->crd_klen = _KEYBITS(sav->key_auth);
+
+ /* Copy the authenticator */
+ if (mtag == NULL)
+ m_copydata(m, m->m_pkthdr.len - alen, alen,
+ (caddr_t) (tc + 1));
+
+ /* Chain authentication request */
+ crde = crda->crd_next;
+ } else {
+ crde = crp->crp_desc;
+ }
+
+ /* Crypto operation descriptor */
+ crp->crp_ilen = m->m_pkthdr.len; /* Total input length */
+ crp->crp_flags = CRYPTO_F_IMBUF | CRYPTO_F_CBIFSYNC;
+ crp->crp_buf = (caddr_t) m;
+ crp->crp_callback = esp_input_cb;
+ crp->crp_sid = sav->tdb_cryptoid;
+ crp->crp_opaque = (caddr_t) tc;
+
+ /* These are passed as-is to the callback */
+ tc->tc_spi = sav->spi;
+ tc->tc_dst = sav->sah->saidx.dst;
+ tc->tc_proto = sav->sah->saidx.proto;
+ tc->tc_protoff = protoff;
+ tc->tc_skip = skip;
+
+ /* Decryption descriptor */
+ if (espx) {
+ IPSEC_ASSERT(crde != NULL, ("null esp crypto descriptor"));
+ crde->crd_skip = skip + hlen;
+ crde->crd_len = m->m_pkthdr.len - (skip + hlen + alen);
+ crde->crd_inject = skip + hlen - sav->ivlen;
+
+ crde->crd_alg = espx->type;
+ crde->crd_key = sav->key_enc->key_data;
+ crde->crd_klen = _KEYBITS(sav->key_enc);
+ /* XXX Rounds ? */
+ }
+
+ if (mtag == NULL)
+ return crypto_dispatch(crp);
+ else
+ return esp_input_cb(crp);
+}
+
+#ifdef INET6
+#define IPSEC_COMMON_INPUT_CB(m, sav, skip, protoff, mtag) do { \
+ if (saidx->dst.sa.sa_family == AF_INET6) { \
+ error = ipsec6_common_input_cb(m, sav, skip, protoff, mtag); \
+ } else { \
+ error = ipsec4_common_input_cb(m, sav, skip, protoff, mtag); \
+ } \
+} while (0)
+#else
+#define IPSEC_COMMON_INPUT_CB(m, sav, skip, protoff, mtag) \
+ (error = ipsec4_common_input_cb(m, sav, skip, protoff, mtag))
+#endif
+
+/*
+ * ESP input callback from the crypto driver.
+ */
+static int
+esp_input_cb(struct cryptop *crp)
+{
+ u_int8_t lastthree[3], aalg[AH_HMAC_HASHLEN];
+ int hlen, skip, protoff, error;
+ struct mbuf *m;
+ struct cryptodesc *crd;
+ struct auth_hash *esph;
+ struct enc_xform *espx;
+ struct tdb_crypto *tc;
+ struct m_tag *mtag;
+ struct secasvar *sav;
+ struct secasindex *saidx;
+ caddr_t ptr;
+
+ crd = crp->crp_desc;
+ IPSEC_ASSERT(crd != NULL, ("null crypto descriptor!"));
+
+ tc = (struct tdb_crypto *) crp->crp_opaque;
+ IPSEC_ASSERT(tc != NULL, ("null opaque crypto data area!"));
+ skip = tc->tc_skip;
+ protoff = tc->tc_protoff;
+ mtag = (struct m_tag *) tc->tc_ptr;
+ m = (struct mbuf *) crp->crp_buf;
+
+ sav = KEY_ALLOCSA(&tc->tc_dst, tc->tc_proto, tc->tc_spi);
+ if (sav == NULL) {
+ V_espstat.esps_notdb++;
+ DPRINTF(("%s: SA gone during crypto (SA %s/%08lx proto %u)\n",
+ __func__, ipsec_address(&tc->tc_dst),
+ (u_long) ntohl(tc->tc_spi), tc->tc_proto));
+ error = ENOBUFS; /*XXX*/
+ goto bad;
+ }
+
+ saidx = &sav->sah->saidx;
+ IPSEC_ASSERT(saidx->dst.sa.sa_family == AF_INET ||
+ saidx->dst.sa.sa_family == AF_INET6,
+ ("unexpected protocol family %u", saidx->dst.sa.sa_family));
+
+ esph = sav->tdb_authalgxform;
+ espx = sav->tdb_encalgxform;
+
+ /* Check for crypto errors */
+ if (crp->crp_etype) {
+ /* Reset the session ID */
+ if (sav->tdb_cryptoid != 0)
+ sav->tdb_cryptoid = crp->crp_sid;
+
+ if (crp->crp_etype == EAGAIN) {
+ KEY_FREESAV(&sav);
+ error = crypto_dispatch(crp);
+ return error;
+ }
+
+ V_espstat.esps_noxform++;
+ DPRINTF(("%s: crypto error %d\n", __func__, crp->crp_etype));
+ error = crp->crp_etype;
+ goto bad;
+ }
+
+ /* Shouldn't happen... */
+ if (m == NULL) {
+ V_espstat.esps_crypto++;
+ DPRINTF(("%s: bogus returned buffer from crypto\n", __func__));
+ error = EINVAL;
+ goto bad;
+ }
+ V_espstat.esps_hist[sav->alg_enc]++;
+
+ /* If authentication was performed, check now. */
+ if (esph != NULL) {
+ /*
+ * If we have a tag, it means an IPsec-aware NIC did
+ * the verification for us. Otherwise we need to
+ * check the authentication calculation.
+ */
+ V_ahstat.ahs_hist[sav->alg_auth]++;
+ if (mtag == NULL) {
+ /* Copy the authenticator from the packet */
+ m_copydata(m, m->m_pkthdr.len - AH_HMAC_HASHLEN,
+ AH_HMAC_HASHLEN, aalg);
+
+ ptr = (caddr_t) (tc + 1);
+
+ /* Verify authenticator */
+ if (bcmp(ptr, aalg, AH_HMAC_HASHLEN) != 0) {
+ DPRINTF(("%s: "
+ "authentication hash mismatch for packet in SA %s/%08lx\n",
+ __func__,
+ ipsec_address(&saidx->dst),
+ (u_long) ntohl(sav->spi)));
+ V_espstat.esps_badauth++;
+ error = EACCES;
+ goto bad;
+ }
+ }
+
+ /* Remove trailing authenticator */
+ m_adj(m, -AH_HMAC_HASHLEN);
+ }
+
+ /* Release the crypto descriptors */
+ free(tc, M_XDATA), tc = NULL;
+ crypto_freereq(crp), crp = NULL;
+
+ /*
+ * Packet is now decrypted.
+ */
+ m->m_flags |= M_DECRYPTED;
+
+ /*
+ * Update replay sequence number, if appropriate.
+ */
+ if (sav->replay) {
+ u_int32_t seq;
+
+ m_copydata(m, skip + offsetof(struct newesp, esp_seq),
+ sizeof (seq), (caddr_t) &seq);
+ if (ipsec_updatereplay(ntohl(seq), sav)) {
+ DPRINTF(("%s: packet replay check for %s\n", __func__,
+ ipsec_logsastr(sav)));
+ V_espstat.esps_replay++;
+ error = ENOBUFS;
+ goto bad;
+ }
+ }
+
+ /* Determine the ESP header length */
+ if (sav->flags & SADB_X_EXT_OLD)
+ hlen = sizeof (struct esp) + sav->ivlen;
+ else
+ hlen = sizeof (struct newesp) + sav->ivlen;
+
+ /* Remove the ESP header and IV from the mbuf. */
+ error = m_striphdr(m, skip, hlen);
+ if (error) {
+ V_espstat.esps_hdrops++;
+ DPRINTF(("%s: bad mbuf chain, SA %s/%08lx\n", __func__,
+ ipsec_address(&sav->sah->saidx.dst),
+ (u_long) ntohl(sav->spi)));
+ goto bad;
+ }
+
+ /* Save the last three bytes of decrypted data */
+ m_copydata(m, m->m_pkthdr.len - 3, 3, lastthree);
+
+ /* Verify pad length */
+ if (lastthree[1] + 2 > m->m_pkthdr.len - skip) {
+ V_espstat.esps_badilen++;
+ DPRINTF(("%s: invalid padding length %d for %u byte packet "
+ "in SA %s/%08lx\n", __func__,
+ lastthree[1], m->m_pkthdr.len - skip,
+ ipsec_address(&sav->sah->saidx.dst),
+ (u_long) ntohl(sav->spi)));
+ error = EINVAL;
+ goto bad;
+ }
+
+ /* Verify correct decryption by checking the last padding bytes */
+ if ((sav->flags & SADB_X_EXT_PMASK) != SADB_X_EXT_PRAND) {
+ if (lastthree[1] != lastthree[0] && lastthree[1] != 0) {
+ V_espstat.esps_badenc++;
+ DPRINTF(("%s: decryption failed for packet in "
+ "SA %s/%08lx\n", __func__,
+ ipsec_address(&sav->sah->saidx.dst),
+ (u_long) ntohl(sav->spi)));
+ error = EINVAL;
+ goto bad;
+ }
+ }
+
+ /* Trim the mbuf chain to remove trailing authenticator and padding */
+ m_adj(m, -(lastthree[1] + 2));
+
+ /* Restore the Next Protocol field */
+ m_copyback(m, protoff, sizeof (u_int8_t), lastthree + 2);
+
+ IPSEC_COMMON_INPUT_CB(m, sav, skip, protoff, mtag);
+
+ KEY_FREESAV(&sav);
+ return error;
+bad:
+ if (sav)
+ KEY_FREESAV(&sav);
+ if (m != NULL)
+ m_freem(m);
+ if (tc != NULL)
+ free(tc, M_XDATA);
+ if (crp != NULL)
+ crypto_freereq(crp);
+ return error;
+}
+
+/*
+ * ESP output routine, called by ipsec[46]_process_packet().
+ */
+static int
+esp_output(
+ struct mbuf *m,
+ struct ipsecrequest *isr,
+ struct mbuf **mp,
+ int skip,
+ int protoff
+)
+{
+ struct enc_xform *espx;
+ struct auth_hash *esph;
+ int hlen, rlen, plen, padding, blks, alen, i, roff;
+ struct mbuf *mo = (struct mbuf *) NULL;
+ struct tdb_crypto *tc;
+ struct secasvar *sav;
+ struct secasindex *saidx;
+ unsigned char *pad;
+ u_int8_t prot;
+ int error, maxpacketsize;
+
+ struct cryptodesc *crde = NULL, *crda = NULL;
+ struct cryptop *crp;
+
+ sav = isr->sav;
+ IPSEC_ASSERT(sav != NULL, ("null SA"));
+ esph = sav->tdb_authalgxform;
+ espx = sav->tdb_encalgxform;
+ IPSEC_ASSERT(espx != NULL, ("null encoding xform"));
+
+ if (sav->flags & SADB_X_EXT_OLD)
+ hlen = sizeof (struct esp) + sav->ivlen;
+ else
+ hlen = sizeof (struct newesp) + sav->ivlen;
+
+ rlen = m->m_pkthdr.len - skip; /* Raw payload length. */
+ /*
+ * NB: The null encoding transform has a blocksize of 4
+ * so that headers are properly aligned.
+ */
+ blks = espx->blocksize; /* IV blocksize */
+
+ /* XXX clamp padding length a la KAME??? */
+ padding = ((blks - ((rlen + 2) % blks)) % blks) + 2;
+ plen = rlen + padding; /* Padded payload length. */
+
+ if (esph)
+ alen = AH_HMAC_HASHLEN;
+ else
+ alen = 0;
+
+ V_espstat.esps_output++;
+
+ saidx = &sav->sah->saidx;
+ /* Check for maximum packet size violations. */
+ switch (saidx->dst.sa.sa_family) {
+#ifdef INET
+ case AF_INET:
+ maxpacketsize = IP_MAXPACKET;
+ break;
+#endif /* INET */
+#ifdef INET6
+ case AF_INET6:
+ maxpacketsize = IPV6_MAXPACKET;
+ break;
+#endif /* INET6 */
+ default:
+ DPRINTF(("%s: unknown/unsupported protocol "
+ "family %d, SA %s/%08lx\n", __func__,
+ saidx->dst.sa.sa_family, ipsec_address(&saidx->dst),
+ (u_long) ntohl(sav->spi)));
+ V_espstat.esps_nopf++;
+ error = EPFNOSUPPORT;
+ goto bad;
+ }
+ if (skip + hlen + rlen + padding + alen > maxpacketsize) {
+ DPRINTF(("%s: packet in SA %s/%08lx got too big "
+ "(len %u, max len %u)\n", __func__,
+ ipsec_address(&saidx->dst), (u_long) ntohl(sav->spi),
+ skip + hlen + rlen + padding + alen, maxpacketsize));
+ V_espstat.esps_toobig++;
+ error = EMSGSIZE;
+ goto bad;
+ }
+
+ /* Update the counters. */
+ V_espstat.esps_obytes += m->m_pkthdr.len - skip;
+
+ m = m_unshare(m, M_NOWAIT);
+ if (m == NULL) {
+ DPRINTF(("%s: cannot clone mbuf chain, SA %s/%08lx\n", __func__,
+ ipsec_address(&saidx->dst), (u_long) ntohl(sav->spi)));
+ V_espstat.esps_hdrops++;
+ error = ENOBUFS;
+ goto bad;
+ }
+
+ /* Inject ESP header. */
+ mo = m_makespace(m, skip, hlen, &roff);
+ if (mo == NULL) {
+ DPRINTF(("%s: %u byte ESP hdr inject failed for SA %s/%08lx\n",
+ __func__, hlen, ipsec_address(&saidx->dst),
+ (u_long) ntohl(sav->spi)));
+ V_espstat.esps_hdrops++; /* XXX diffs from openbsd */
+ error = ENOBUFS;
+ goto bad;
+ }
+
+ /* Initialize ESP header. */
+ bcopy((caddr_t) &sav->spi, mtod(mo, caddr_t) + roff, sizeof(u_int32_t));
+ if (sav->replay) {
+ u_int32_t replay;
+
+#ifdef REGRESSION
+ /* Emulate replay attack when ipsec_replay is TRUE. */
+ if (!V_ipsec_replay)
+#endif
+ sav->replay->count++;
+ replay = htonl(sav->replay->count);
+ bcopy((caddr_t) &replay,
+ mtod(mo, caddr_t) + roff + sizeof(u_int32_t),
+ sizeof(u_int32_t));
+ }
+
+ /*
+ * Add padding -- better to do it ourselves than use the crypto engine,
+ * although if/when we support compression, we'd have to do that.
+ */
+ pad = (u_char *) m_pad(m, padding + alen);
+ if (pad == NULL) {
+ DPRINTF(("%s: m_pad failed for SA %s/%08lx\n", __func__,
+ ipsec_address(&saidx->dst), (u_long) ntohl(sav->spi)));
+ m = NULL; /* NB: free'd by m_pad */
+ error = ENOBUFS;
+ goto bad;
+ }
+
+ /*
+ * Add padding: random, zero, or self-describing.
+ * XXX catch unexpected setting
+ */
+ switch (sav->flags & SADB_X_EXT_PMASK) {
+ case SADB_X_EXT_PRAND:
+ (void) read_random(pad, padding - 2);
+ break;
+ case SADB_X_EXT_PZERO:
+ bzero(pad, padding - 2);
+ break;
+ case SADB_X_EXT_PSEQ:
+ for (i = 0; i < padding - 2; i++)
+ pad[i] = i+1;
+ break;
+ }
+
+ /* Fix padding length and Next Protocol in padding itself. */
+ pad[padding - 2] = padding - 2;
+ m_copydata(m, protoff, sizeof(u_int8_t), pad + padding - 1);
+
+ /* Fix Next Protocol in IPv4/IPv6 header. */
+ prot = IPPROTO_ESP;
+ m_copyback(m, protoff, sizeof(u_int8_t), (u_char *) &prot);
+
+ /* Get crypto descriptors. */
+ crp = crypto_getreq(esph && espx ? 2 : 1);
+ if (crp == NULL) {
+ DPRINTF(("%s: failed to acquire crypto descriptors\n",
+ __func__));
+ V_espstat.esps_crypto++;
+ error = ENOBUFS;
+ goto bad;
+ }
+
+ if (espx) {
+ crde = crp->crp_desc;
+ crda = crde->crd_next;
+
+ /* Encryption descriptor. */
+ crde->crd_skip = skip + hlen;
+ crde->crd_len = m->m_pkthdr.len - (skip + hlen + alen);
+ crde->crd_flags = CRD_F_ENCRYPT;
+ crde->crd_inject = skip + hlen - sav->ivlen;
+
+ /* Encryption operation. */
+ crde->crd_alg = espx->type;
+ crde->crd_key = sav->key_enc->key_data;
+ crde->crd_klen = _KEYBITS(sav->key_enc);
+ /* XXX Rounds ? */
+ } else
+ crda = crp->crp_desc;
+
+ /* IPsec-specific opaque crypto info. */
+ tc = (struct tdb_crypto *) malloc(sizeof(struct tdb_crypto),
+ M_XDATA, M_NOWAIT|M_ZERO);
+ if (tc == NULL) {
+ crypto_freereq(crp);
+ DPRINTF(("%s: failed to allocate tdb_crypto\n", __func__));
+ V_espstat.esps_crypto++;
+ error = ENOBUFS;
+ goto bad;
+ }
+
+ /* Callback parameters */
+ tc->tc_isr = isr;
+ tc->tc_spi = sav->spi;
+ tc->tc_dst = saidx->dst;
+ tc->tc_proto = saidx->proto;
+
+ /* Crypto operation descriptor. */
+ crp->crp_ilen = m->m_pkthdr.len; /* Total input length. */
+ crp->crp_flags = CRYPTO_F_IMBUF | CRYPTO_F_CBIFSYNC;
+ crp->crp_buf = (caddr_t) m;
+ crp->crp_callback = esp_output_cb;
+ crp->crp_opaque = (caddr_t) tc;
+ crp->crp_sid = sav->tdb_cryptoid;
+
+ if (esph) {
+ /* Authentication descriptor. */
+ crda->crd_skip = skip;
+ crda->crd_len = m->m_pkthdr.len - (skip + alen);
+ crda->crd_inject = m->m_pkthdr.len - alen;
+
+ /* Authentication operation. */
+ crda->crd_alg = esph->type;
+ crda->crd_key = sav->key_auth->key_data;
+ crda->crd_klen = _KEYBITS(sav->key_auth);
+ }
+
+ return crypto_dispatch(crp);
+bad:
+ if (m)
+ m_freem(m);
+ return (error);
+}
+
+/*
+ * ESP output callback from the crypto driver.
+ */
+static int
+esp_output_cb(struct cryptop *crp)
+{
+ struct tdb_crypto *tc;
+ struct ipsecrequest *isr;
+ struct secasvar *sav;
+ struct mbuf *m;
+ int err, error;
+
+ tc = (struct tdb_crypto *) crp->crp_opaque;
+ IPSEC_ASSERT(tc != NULL, ("null opaque data area!"));
+ m = (struct mbuf *) crp->crp_buf;
+
+ isr = tc->tc_isr;
+ IPSECREQUEST_LOCK(isr);
+ sav = KEY_ALLOCSA(&tc->tc_dst, tc->tc_proto, tc->tc_spi);
+ if (sav == NULL) {
+ V_espstat.esps_notdb++;
+ DPRINTF(("%s: SA gone during crypto (SA %s/%08lx proto %u)\n",
+ __func__, ipsec_address(&tc->tc_dst),
+ (u_long) ntohl(tc->tc_spi), tc->tc_proto));
+ error = ENOBUFS; /*XXX*/
+ goto bad;
+ }
+ IPSEC_ASSERT(isr->sav == sav,
+ ("SA changed was %p now %p\n", isr->sav, sav));
+
+ /* Check for crypto errors. */
+ if (crp->crp_etype) {
+ /* Reset session ID. */
+ if (sav->tdb_cryptoid != 0)
+ sav->tdb_cryptoid = crp->crp_sid;
+
+ if (crp->crp_etype == EAGAIN) {
+ KEY_FREESAV(&sav);
+ IPSECREQUEST_UNLOCK(isr);
+ error = crypto_dispatch(crp);
+ return error;
+ }
+
+ V_espstat.esps_noxform++;
+ DPRINTF(("%s: crypto error %d\n", __func__, crp->crp_etype));
+ error = crp->crp_etype;
+ goto bad;
+ }
+
+ /* Shouldn't happen... */
+ if (m == NULL) {
+ V_espstat.esps_crypto++;
+ DPRINTF(("%s: bogus returned buffer from crypto\n", __func__));
+ error = EINVAL;
+ goto bad;
+ }
+ V_espstat.esps_hist[sav->alg_enc]++;
+ if (sav->tdb_authalgxform != NULL)
+ V_ahstat.ahs_hist[sav->alg_auth]++;
+
+ /* Release crypto descriptors. */
+ free(tc, M_XDATA);
+ crypto_freereq(crp);
+
+#ifdef REGRESSION
+ /* Emulate man-in-the-middle attack when ipsec_integrity is TRUE. */
+ if (V_ipsec_integrity) {
+ static unsigned char ipseczeroes[AH_HMAC_HASHLEN];
+ struct auth_hash *esph;
+
+ /*
+ * Corrupt HMAC if we want to test integrity verification of
+ * the other side.
+ */
+ esph = sav->tdb_authalgxform;
+ if (esph != NULL) {
+ m_copyback(m, m->m_pkthdr.len - AH_HMAC_HASHLEN,
+ AH_HMAC_HASHLEN, ipseczeroes);
+ }
+ }
+#endif
+
+ /* NB: m is reclaimed by ipsec_process_done. */
+ err = ipsec_process_done(m, isr);
+ KEY_FREESAV(&sav);
+ IPSECREQUEST_UNLOCK(isr);
+ return err;
+bad:
+ if (sav)
+ KEY_FREESAV(&sav);
+ IPSECREQUEST_UNLOCK(isr);
+ if (m)
+ m_freem(m);
+ free(tc, M_XDATA);
+ crypto_freereq(crp);
+ return error;
+}
+
+static struct xformsw esp_xformsw = {
+ XF_ESP, XFT_CONF|XFT_AUTH, "IPsec ESP",
+ esp_init, esp_zeroize, esp_input,
+ esp_output
+};
+
+static void
+esp_attach(void)
+{
+#define MAXIV(xform) \
+ if (xform.blocksize > V_esp_max_ivlen) \
+ V_esp_max_ivlen = xform.blocksize \
+
+ MAXIV(enc_xform_des); /* SADB_EALG_DESCBC */
+ MAXIV(enc_xform_3des); /* SADB_EALG_3DESCBC */
+ MAXIV(enc_xform_rijndael128); /* SADB_X_EALG_AES */
+ MAXIV(enc_xform_blf); /* SADB_X_EALG_BLOWFISHCBC */
+ MAXIV(enc_xform_cast5); /* SADB_X_EALG_CAST128CBC */
+ MAXIV(enc_xform_skipjack); /* SADB_X_EALG_SKIPJACK */
+ MAXIV(enc_xform_null); /* SADB_EALG_NULL */
+ MAXIV(enc_xform_camellia); /* SADB_X_EALG_CAMELLIACBC */
+
+ xform_register(&esp_xformsw);
+#undef MAXIV
+}
+SYSINIT(esp_xform_init, SI_SUB_PROTO_DOMAIN, SI_ORDER_MIDDLE, esp_attach, NULL);
diff --git a/rtems/freebsd/netipsec/xform_ipcomp.c b/rtems/freebsd/netipsec/xform_ipcomp.c
new file mode 100644
index 00000000..ab194bfa
--- /dev/null
+++ b/rtems/freebsd/netipsec/xform_ipcomp.c
@@ -0,0 +1,625 @@
+#include <rtems/freebsd/machine/rtems-bsd-config.h>
+
+/* $FreeBSD$ */
+/* $OpenBSD: ip_ipcomp.c,v 1.1 2001/07/05 12:08:52 jjbg Exp $ */
+
+/*-
+ * Copyright (c) 2001 Jean-Jacques Bernard-Gundol (jj@wabbitt.org)
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/* IP payload compression protocol (IPComp), see RFC 2393 */
+#include <rtems/freebsd/local/opt_inet.h>
+#include <rtems/freebsd/local/opt_inet6.h>
+
+#include <rtems/freebsd/sys/param.h>
+#include <rtems/freebsd/sys/systm.h>
+#include <rtems/freebsd/sys/mbuf.h>
+#include <rtems/freebsd/sys/lock.h>
+#include <rtems/freebsd/sys/mutex.h>
+#include <rtems/freebsd/sys/socket.h>
+#include <rtems/freebsd/sys/kernel.h>
+#include <rtems/freebsd/sys/protosw.h>
+#include <rtems/freebsd/sys/sysctl.h>
+
+#include <rtems/freebsd/netinet/in.h>
+#include <rtems/freebsd/netinet/in_systm.h>
+#include <rtems/freebsd/netinet/ip.h>
+#include <rtems/freebsd/netinet/ip_var.h>
+
+#include <rtems/freebsd/net/route.h>
+#include <rtems/freebsd/net/vnet.h>
+
+#include <rtems/freebsd/netipsec/ipsec.h>
+#include <rtems/freebsd/netipsec/xform.h>
+
+#ifdef INET6
+#include <rtems/freebsd/netinet/ip6.h>
+#include <rtems/freebsd/netipsec/ipsec6.h>
+#endif
+
+#include <rtems/freebsd/netipsec/ipcomp.h>
+#include <rtems/freebsd/netipsec/ipcomp_var.h>
+
+#include <rtems/freebsd/netipsec/key.h>
+#include <rtems/freebsd/netipsec/key_debug.h>
+
+#include <rtems/freebsd/opencrypto/cryptodev.h>
+#include <rtems/freebsd/opencrypto/deflate.h>
+#include <rtems/freebsd/opencrypto/xform.h>
+
+VNET_DEFINE(int, ipcomp_enable) = 1;
+VNET_DEFINE(struct ipcompstat, ipcompstat);
+
+SYSCTL_DECL(_net_inet_ipcomp);
+SYSCTL_VNET_INT(_net_inet_ipcomp, OID_AUTO,
+ ipcomp_enable, CTLFLAG_RW, &VNET_NAME(ipcomp_enable), 0, "");
+SYSCTL_VNET_STRUCT(_net_inet_ipcomp, IPSECCTL_STATS,
+ stats, CTLFLAG_RD, &VNET_NAME(ipcompstat), ipcompstat, "");
+
+static int ipcomp_input_cb(struct cryptop *crp);
+static int ipcomp_output_cb(struct cryptop *crp);
+
+struct comp_algo *
+ipcomp_algorithm_lookup(int alg)
+{
+ if (alg >= IPCOMP_ALG_MAX)
+ return NULL;
+ switch (alg) {
+ case SADB_X_CALG_DEFLATE:
+ return &comp_algo_deflate;
+ }
+ return NULL;
+}
+
+/*
+ * ipcomp_init() is called when an CPI is being set up.
+ */
+static int
+ipcomp_init(struct secasvar *sav, struct xformsw *xsp)
+{
+ struct comp_algo *tcomp;
+ struct cryptoini cric;
+
+ /* NB: algorithm really comes in alg_enc and not alg_comp! */
+ tcomp = ipcomp_algorithm_lookup(sav->alg_enc);
+ if (tcomp == NULL) {
+ DPRINTF(("%s: unsupported compression algorithm %d\n", __func__,
+ sav->alg_comp));
+ return EINVAL;
+ }
+ sav->alg_comp = sav->alg_enc; /* set for doing histogram */
+ sav->tdb_xform = xsp;
+ sav->tdb_compalgxform = tcomp;
+
+ /* Initialize crypto session */
+ bzero(&cric, sizeof (cric));
+ cric.cri_alg = sav->tdb_compalgxform->type;
+
+ return crypto_newsession(&sav->tdb_cryptoid, &cric, V_crypto_support);
+}
+
+/*
+ * ipcomp_zeroize() used when IPCA is deleted
+ */
+static int
+ipcomp_zeroize(struct secasvar *sav)
+{
+ int err;
+
+ err = crypto_freesession(sav->tdb_cryptoid);
+ sav->tdb_cryptoid = 0;
+ return err;
+}
+
+/*
+ * ipcomp_input() gets called to uncompress an input packet
+ */
+static int
+ipcomp_input(struct mbuf *m, struct secasvar *sav, int skip, int protoff)
+{
+ struct tdb_crypto *tc;
+ struct cryptodesc *crdc;
+ struct cryptop *crp;
+ int hlen = IPCOMP_HLENGTH;
+
+ /* Get crypto descriptors */
+ crp = crypto_getreq(1);
+ if (crp == NULL) {
+ m_freem(m);
+ DPRINTF(("%s: no crypto descriptors\n", __func__));
+ V_ipcompstat.ipcomps_crypto++;
+ return ENOBUFS;
+ }
+ /* Get IPsec-specific opaque pointer */
+ tc = (struct tdb_crypto *) malloc(sizeof (*tc), M_XDATA, M_NOWAIT|M_ZERO);
+ if (tc == NULL) {
+ m_freem(m);
+ crypto_freereq(crp);
+ DPRINTF(("%s: cannot allocate tdb_crypto\n", __func__));
+ V_ipcompstat.ipcomps_crypto++;
+ return ENOBUFS;
+ }
+ crdc = crp->crp_desc;
+
+ crdc->crd_skip = skip + hlen;
+ crdc->crd_len = m->m_pkthdr.len - (skip + hlen);
+ crdc->crd_inject = skip;
+
+ tc->tc_ptr = 0;
+
+ /* Decompression operation */
+ crdc->crd_alg = sav->tdb_compalgxform->type;
+
+ /* Crypto operation descriptor */
+ crp->crp_ilen = m->m_pkthdr.len - (skip + hlen);
+ crp->crp_flags = CRYPTO_F_IMBUF | CRYPTO_F_CBIFSYNC;
+ crp->crp_buf = (caddr_t) m;
+ crp->crp_callback = ipcomp_input_cb;
+ crp->crp_sid = sav->tdb_cryptoid;
+ crp->crp_opaque = (caddr_t) tc;
+
+ /* These are passed as-is to the callback */
+ tc->tc_spi = sav->spi;
+ tc->tc_dst = sav->sah->saidx.dst;
+ tc->tc_proto = sav->sah->saidx.proto;
+ tc->tc_protoff = protoff;
+ tc->tc_skip = skip;
+
+ return crypto_dispatch(crp);
+}
+
+#ifdef INET6
+#define IPSEC_COMMON_INPUT_CB(m, sav, skip, protoff, mtag) do { \
+ if (saidx->dst.sa.sa_family == AF_INET6) { \
+ error = ipsec6_common_input_cb(m, sav, skip, protoff, mtag); \
+ } else { \
+ error = ipsec4_common_input_cb(m, sav, skip, protoff, mtag); \
+ } \
+} while (0)
+#else
+#define IPSEC_COMMON_INPUT_CB(m, sav, skip, protoff, mtag) \
+ (error = ipsec4_common_input_cb(m, sav, skip, protoff, mtag))
+#endif
+
+/*
+ * IPComp input callback from the crypto driver.
+ */
+static int
+ipcomp_input_cb(struct cryptop *crp)
+{
+ struct cryptodesc *crd;
+ struct tdb_crypto *tc;
+ int skip, protoff;
+ struct mtag *mtag;
+ struct mbuf *m;
+ struct secasvar *sav;
+ struct secasindex *saidx;
+ int hlen = IPCOMP_HLENGTH, error, clen;
+ u_int8_t nproto;
+ caddr_t addr;
+
+ crd = crp->crp_desc;
+
+ tc = (struct tdb_crypto *) crp->crp_opaque;
+ IPSEC_ASSERT(tc != NULL, ("null opaque crypto data area!"));
+ skip = tc->tc_skip;
+ protoff = tc->tc_protoff;
+ mtag = (struct mtag *) tc->tc_ptr;
+ m = (struct mbuf *) crp->crp_buf;
+
+ sav = KEY_ALLOCSA(&tc->tc_dst, tc->tc_proto, tc->tc_spi);
+ if (sav == NULL) {
+ V_ipcompstat.ipcomps_notdb++;
+ DPRINTF(("%s: SA expired while in crypto\n", __func__));
+ error = ENOBUFS; /*XXX*/
+ goto bad;
+ }
+
+ saidx = &sav->sah->saidx;
+ IPSEC_ASSERT(saidx->dst.sa.sa_family == AF_INET ||
+ saidx->dst.sa.sa_family == AF_INET6,
+ ("unexpected protocol family %u", saidx->dst.sa.sa_family));
+
+ /* Check for crypto errors */
+ if (crp->crp_etype) {
+ /* Reset the session ID */
+ if (sav->tdb_cryptoid != 0)
+ sav->tdb_cryptoid = crp->crp_sid;
+
+ if (crp->crp_etype == EAGAIN) {
+ KEY_FREESAV(&sav);
+ return crypto_dispatch(crp);
+ }
+ V_ipcompstat.ipcomps_noxform++;
+ DPRINTF(("%s: crypto error %d\n", __func__, crp->crp_etype));
+ error = crp->crp_etype;
+ goto bad;
+ }
+ /* Shouldn't happen... */
+ if (m == NULL) {
+ V_ipcompstat.ipcomps_crypto++;
+ DPRINTF(("%s: null mbuf returned from crypto\n", __func__));
+ error = EINVAL;
+ goto bad;
+ }
+ V_ipcompstat.ipcomps_hist[sav->alg_comp]++;
+
+ clen = crp->crp_olen; /* Length of data after processing */
+
+ /* Release the crypto descriptors */
+ free(tc, M_XDATA), tc = NULL;
+ crypto_freereq(crp), crp = NULL;
+
+ /* In case it's not done already, adjust the size of the mbuf chain */
+ m->m_pkthdr.len = clen + hlen + skip;
+
+ if (m->m_len < skip + hlen && (m = m_pullup(m, skip + hlen)) == 0) {
+ V_ipcompstat.ipcomps_hdrops++; /*XXX*/
+ DPRINTF(("%s: m_pullup failed\n", __func__));
+ error = EINVAL; /*XXX*/
+ goto bad;
+ }
+
+ /* Keep the next protocol field */
+ addr = (caddr_t) mtod(m, struct ip *) + skip;
+ nproto = ((struct ipcomp *) addr)->comp_nxt;
+
+ /* Remove the IPCOMP header */
+ error = m_striphdr(m, skip, hlen);
+ if (error) {
+ V_ipcompstat.ipcomps_hdrops++;
+ DPRINTF(("%s: bad mbuf chain, IPCA %s/%08lx\n", __func__,
+ ipsec_address(&sav->sah->saidx.dst),
+ (u_long) ntohl(sav->spi)));
+ goto bad;
+ }
+
+ /* Restore the Next Protocol field */
+ m_copyback(m, protoff, sizeof (u_int8_t), (u_int8_t *) &nproto);
+
+ IPSEC_COMMON_INPUT_CB(m, sav, skip, protoff, NULL);
+
+ KEY_FREESAV(&sav);
+ return error;
+bad:
+ if (sav)
+ KEY_FREESAV(&sav);
+ if (m)
+ m_freem(m);
+ if (tc != NULL)
+ free(tc, M_XDATA);
+ if (crp)
+ crypto_freereq(crp);
+ return error;
+}
+
+/*
+ * IPComp output routine, called by ipsec[46]_process_packet()
+ */
+static int
+ipcomp_output(
+ struct mbuf *m,
+ struct ipsecrequest *isr,
+ struct mbuf **mp,
+ int skip,
+ int protoff
+)
+{
+ struct secasvar *sav;
+ struct comp_algo *ipcompx;
+ int error, ralen, maxpacketsize;
+ struct cryptodesc *crdc;
+ struct cryptop *crp;
+ struct tdb_crypto *tc;
+
+ sav = isr->sav;
+ IPSEC_ASSERT(sav != NULL, ("null SA"));
+ ipcompx = sav->tdb_compalgxform;
+ IPSEC_ASSERT(ipcompx != NULL, ("null compression xform"));
+
+ /*
+ * Do not touch the packet in case our payload to compress
+ * is lower than the minimal threshold of the compression
+ * alogrithm. We will just send out the data uncompressed.
+ * See RFC 3173, 2.2. Non-Expansion Policy.
+ */
+ if (m->m_pkthdr.len <= ipcompx->minlen) {
+ V_ipcompstat.ipcomps_threshold++;
+ return ipsec_process_done(m, isr);
+ }
+
+ ralen = m->m_pkthdr.len - skip; /* Raw payload length before comp. */
+ V_ipcompstat.ipcomps_output++;
+
+ /* Check for maximum packet size violations. */
+ switch (sav->sah->saidx.dst.sa.sa_family) {
+#ifdef INET
+ case AF_INET:
+ maxpacketsize = IP_MAXPACKET;
+ break;
+#endif /* INET */
+#ifdef INET6
+ case AF_INET6:
+ maxpacketsize = IPV6_MAXPACKET;
+ break;
+#endif /* INET6 */
+ default:
+ V_ipcompstat.ipcomps_nopf++;
+ DPRINTF(("%s: unknown/unsupported protocol family %d, "
+ "IPCA %s/%08lx\n", __func__,
+ sav->sah->saidx.dst.sa.sa_family,
+ ipsec_address(&sav->sah->saidx.dst),
+ (u_long) ntohl(sav->spi)));
+ error = EPFNOSUPPORT;
+ goto bad;
+ }
+ if (ralen + skip + IPCOMP_HLENGTH > maxpacketsize) {
+ V_ipcompstat.ipcomps_toobig++;
+ DPRINTF(("%s: packet in IPCA %s/%08lx got too big "
+ "(len %u, max len %u)\n", __func__,
+ ipsec_address(&sav->sah->saidx.dst),
+ (u_long) ntohl(sav->spi),
+ ralen + skip + IPCOMP_HLENGTH, maxpacketsize));
+ error = EMSGSIZE;
+ goto bad;
+ }
+
+ /* Update the counters */
+ V_ipcompstat.ipcomps_obytes += m->m_pkthdr.len - skip;
+
+ m = m_unshare(m, M_NOWAIT);
+ if (m == NULL) {
+ V_ipcompstat.ipcomps_hdrops++;
+ DPRINTF(("%s: cannot clone mbuf chain, IPCA %s/%08lx\n",
+ __func__, ipsec_address(&sav->sah->saidx.dst),
+ (u_long) ntohl(sav->spi)));
+ error = ENOBUFS;
+ goto bad;
+ }
+
+ /* Ok now, we can pass to the crypto processing. */
+
+ /* Get crypto descriptors */
+ crp = crypto_getreq(1);
+ if (crp == NULL) {
+ V_ipcompstat.ipcomps_crypto++;
+ DPRINTF(("%s: failed to acquire crypto descriptor\n",__func__));
+ error = ENOBUFS;
+ goto bad;
+ }
+ crdc = crp->crp_desc;
+
+ /* Compression descriptor */
+ crdc->crd_skip = skip;
+ crdc->crd_len = ralen;
+ crdc->crd_flags = CRD_F_COMP;
+ crdc->crd_inject = skip;
+
+ /* Compression operation */
+ crdc->crd_alg = ipcompx->type;
+
+ /* IPsec-specific opaque crypto info */
+ tc = (struct tdb_crypto *) malloc(sizeof(struct tdb_crypto),
+ M_XDATA, M_NOWAIT|M_ZERO);
+ if (tc == NULL) {
+ V_ipcompstat.ipcomps_crypto++;
+ DPRINTF(("%s: failed to allocate tdb_crypto\n", __func__));
+ crypto_freereq(crp);
+ error = ENOBUFS;
+ goto bad;
+ }
+
+ tc->tc_isr = isr;
+ tc->tc_spi = sav->spi;
+ tc->tc_dst = sav->sah->saidx.dst;
+ tc->tc_proto = sav->sah->saidx.proto;
+ tc->tc_protoff = protoff;
+ tc->tc_skip = skip;
+
+ /* Crypto operation descriptor */
+ crp->crp_ilen = m->m_pkthdr.len; /* Total input length */
+ crp->crp_flags = CRYPTO_F_IMBUF | CRYPTO_F_CBIFSYNC;
+ crp->crp_buf = (caddr_t) m;
+ crp->crp_callback = ipcomp_output_cb;
+ crp->crp_opaque = (caddr_t) tc;
+ crp->crp_sid = sav->tdb_cryptoid;
+
+ return crypto_dispatch(crp);
+bad:
+ if (m)
+ m_freem(m);
+ return (error);
+}
+
+/*
+ * IPComp output callback from the crypto driver.
+ */
+static int
+ipcomp_output_cb(struct cryptop *crp)
+{
+ struct tdb_crypto *tc;
+ struct ipsecrequest *isr;
+ struct secasvar *sav;
+ struct mbuf *m;
+ int error, skip;
+
+ tc = (struct tdb_crypto *) crp->crp_opaque;
+ IPSEC_ASSERT(tc != NULL, ("null opaque data area!"));
+ m = (struct mbuf *) crp->crp_buf;
+ skip = tc->tc_skip;
+
+ isr = tc->tc_isr;
+ IPSECREQUEST_LOCK(isr);
+ sav = KEY_ALLOCSA(&tc->tc_dst, tc->tc_proto, tc->tc_spi);
+ if (sav == NULL) {
+ V_ipcompstat.ipcomps_notdb++;
+ DPRINTF(("%s: SA expired while in crypto\n", __func__));
+ error = ENOBUFS; /*XXX*/
+ goto bad;
+ }
+ IPSEC_ASSERT(isr->sav == sav, ("SA changed\n"));
+
+ /* Check for crypto errors */
+ if (crp->crp_etype) {
+ /* Reset the session ID */
+ if (sav->tdb_cryptoid != 0)
+ sav->tdb_cryptoid = crp->crp_sid;
+
+ if (crp->crp_etype == EAGAIN) {
+ KEY_FREESAV(&sav);
+ IPSECREQUEST_UNLOCK(isr);
+ return crypto_dispatch(crp);
+ }
+ V_ipcompstat.ipcomps_noxform++;
+ DPRINTF(("%s: crypto error %d\n", __func__, crp->crp_etype));
+ error = crp->crp_etype;
+ goto bad;
+ }
+ /* Shouldn't happen... */
+ if (m == NULL) {
+ V_ipcompstat.ipcomps_crypto++;
+ DPRINTF(("%s: bogus return buffer from crypto\n", __func__));
+ error = EINVAL;
+ goto bad;
+ }
+ V_ipcompstat.ipcomps_hist[sav->alg_comp]++;
+
+ if (crp->crp_ilen - skip > crp->crp_olen) {
+ struct mbuf *mo;
+ struct ipcomp *ipcomp;
+ int roff;
+ uint8_t prot;
+
+ /* Compression helped, inject IPCOMP header. */
+ mo = m_makespace(m, skip, IPCOMP_HLENGTH, &roff);
+ if (mo == NULL) {
+ V_ipcompstat.ipcomps_wrap++;
+ DPRINTF(("%s: IPCOMP header inject failed for IPCA %s/%08lx\n",
+ __func__, ipsec_address(&sav->sah->saidx.dst),
+ (u_long) ntohl(sav->spi)));
+ error = ENOBUFS;
+ goto bad;
+ }
+ ipcomp = (struct ipcomp *)(mtod(mo, caddr_t) + roff);
+
+ /* Initialize the IPCOMP header */
+ /* XXX alignment always correct? */
+ switch (sav->sah->saidx.dst.sa.sa_family) {
+#ifdef INET
+ case AF_INET:
+ ipcomp->comp_nxt = mtod(m, struct ip *)->ip_p;
+ break;
+#endif /* INET */
+#ifdef INET6
+ case AF_INET6:
+ ipcomp->comp_nxt = mtod(m, struct ip6_hdr *)->ip6_nxt;
+ break;
+#endif
+ }
+ ipcomp->comp_flags = 0;
+ ipcomp->comp_cpi = htons((u_int16_t) ntohl(sav->spi));
+
+ /* Fix Next Protocol in IPv4/IPv6 header */
+ prot = IPPROTO_IPCOMP;
+ m_copyback(m, tc->tc_protoff, sizeof(u_int8_t),
+ (u_char *)&prot);
+
+ /* Adjust the length in the IP header */
+ switch (sav->sah->saidx.dst.sa.sa_family) {
+#ifdef INET
+ case AF_INET:
+ mtod(m, struct ip *)->ip_len = htons(m->m_pkthdr.len);
+ break;
+#endif /* INET */
+#ifdef INET6
+ case AF_INET6:
+ mtod(m, struct ip6_hdr *)->ip6_plen =
+ htons(m->m_pkthdr.len) - sizeof(struct ip6_hdr);
+ break;
+#endif /* INET6 */
+ default:
+ V_ipcompstat.ipcomps_nopf++;
+ DPRINTF(("%s: unknown/unsupported protocol "
+ "family %d, IPCA %s/%08lx\n", __func__,
+ sav->sah->saidx.dst.sa.sa_family,
+ ipsec_address(&sav->sah->saidx.dst),
+ (u_long) ntohl(sav->spi)));
+ error = EPFNOSUPPORT;
+ goto bad;
+ }
+ } else {
+ /* Compression was useless, we have lost time. */
+ V_ipcompstat.ipcomps_uncompr++;
+ DPRINTF(("%s: compressions was useless %d - %d <= %d\n",
+ __func__, crp->crp_ilen, skip, crp->crp_olen));
+ /* XXX remember state to not compress the next couple
+ * of packets, RFC 3173, 2.2. Non-Expansion Policy */
+ }
+
+ /* Release the crypto descriptor */
+ free(tc, M_XDATA);
+ crypto_freereq(crp);
+
+ /* NB: m is reclaimed by ipsec_process_done. */
+ error = ipsec_process_done(m, isr);
+ KEY_FREESAV(&sav);
+ IPSECREQUEST_UNLOCK(isr);
+ return error;
+bad:
+ if (sav)
+ KEY_FREESAV(&sav);
+ IPSECREQUEST_UNLOCK(isr);
+ if (m)
+ m_freem(m);
+ free(tc, M_XDATA);
+ crypto_freereq(crp);
+ return error;
+}
+
+static struct xformsw ipcomp_xformsw = {
+ XF_IPCOMP, XFT_COMP, "IPcomp",
+ ipcomp_init, ipcomp_zeroize, ipcomp_input,
+ ipcomp_output
+};
+
+static void
+ipcomp_attach(void)
+{
+
+ xform_register(&ipcomp_xformsw);
+}
+
+SYSINIT(ipcomp_xform_init, SI_SUB_PROTO_DOMAIN, SI_ORDER_MIDDLE, ipcomp_attach, NULL);
+
+static void
+vnet_ipcomp_attach(const void *unused __unused)
+{
+
+ V_ipcompstat.version = IPCOMPSTAT_VERSION;
+}
+
+VNET_SYSINIT(vnet_ipcomp_xform_init, SI_SUB_PROTO_DOMAIN, SI_ORDER_MIDDLE,
+ vnet_ipcomp_attach, NULL);
diff --git a/rtems/freebsd/netipsec/xform_ipip.c b/rtems/freebsd/netipsec/xform_ipip.c
new file mode 100644
index 00000000..5b4d288d
--- /dev/null
+++ b/rtems/freebsd/netipsec/xform_ipip.c
@@ -0,0 +1,708 @@
+#include <rtems/freebsd/machine/rtems-bsd-config.h>
+
+/* $FreeBSD$ */
+/* $OpenBSD: ip_ipip.c,v 1.25 2002/06/10 18:04:55 itojun Exp $ */
+/*-
+ * The authors of this code are John Ioannidis (ji@tla.org),
+ * Angelos D. Keromytis (kermit@csd.uch.gr) and
+ * Niels Provos (provos@physnet.uni-hamburg.de).
+ *
+ * The original version of this code was written by John Ioannidis
+ * for BSD/OS in Athens, Greece, in November 1995.
+ *
+ * Ported to OpenBSD and NetBSD, with additional transforms, in December 1996,
+ * by Angelos D. Keromytis.
+ *
+ * Additional transforms and features in 1997 and 1998 by Angelos D. Keromytis
+ * and Niels Provos.
+ *
+ * Additional features in 1999 by Angelos D. Keromytis.
+ *
+ * Copyright (C) 1995, 1996, 1997, 1998, 1999 by John Ioannidis,
+ * Angelos D. Keromytis and Niels Provos.
+ * Copyright (c) 2001, Angelos D. Keromytis.
+ *
+ * Permission to use, copy, and modify this software with or without fee
+ * is hereby granted, provided that this entire notice is included in
+ * all copies of any software which is or includes a copy or
+ * modification of this software.
+ * You may use this code under the GNU public license if you so wish. Please
+ * contribute changes back to the authors under this freer than GPL license
+ * so that we may further the use of strong encryption without limitations to
+ * all.
+ *
+ * THIS SOFTWARE IS BEING PROVIDED "AS IS", WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTY. IN PARTICULAR, NONE OF THE AUTHORS MAKES ANY
+ * REPRESENTATION OR WARRANTY OF ANY KIND CONCERNING THE
+ * MERCHANTABILITY OF THIS SOFTWARE OR ITS FITNESS FOR ANY PARTICULAR
+ * PURPOSE.
+ */
+
+/*
+ * IP-inside-IP processing
+ */
+#include <rtems/freebsd/local/opt_inet.h>
+#include <rtems/freebsd/local/opt_inet6.h>
+#include <rtems/freebsd/local/opt_enc.h>
+
+#include <rtems/freebsd/sys/param.h>
+#include <rtems/freebsd/sys/systm.h>
+#include <rtems/freebsd/sys/mbuf.h>
+#include <rtems/freebsd/sys/socket.h>
+#include <rtems/freebsd/sys/kernel.h>
+#include <rtems/freebsd/sys/protosw.h>
+#include <rtems/freebsd/sys/sysctl.h>
+
+#include <rtems/freebsd/net/if.h>
+#include <rtems/freebsd/net/pfil.h>
+#include <rtems/freebsd/net/route.h>
+#include <rtems/freebsd/net/netisr.h>
+#include <rtems/freebsd/net/vnet.h>
+
+#include <rtems/freebsd/netinet/in.h>
+#include <rtems/freebsd/netinet/in_systm.h>
+#include <rtems/freebsd/netinet/in_var.h>
+#include <rtems/freebsd/netinet/ip.h>
+#include <rtems/freebsd/netinet/ip_ecn.h>
+#include <rtems/freebsd/netinet/ip_var.h>
+#include <rtems/freebsd/netinet/ip_encap.h>
+#ifdef MROUTING
+#include <rtems/freebsd/netinet/ip_mroute.h>
+#endif
+
+#include <rtems/freebsd/netipsec/ipsec.h>
+#include <rtems/freebsd/netipsec/xform.h>
+
+#include <rtems/freebsd/netipsec/ipip_var.h>
+
+#ifdef INET6
+#include <rtems/freebsd/netinet/ip6.h>
+#include <rtems/freebsd/netipsec/ipsec6.h>
+#include <rtems/freebsd/netinet6/ip6_ecn.h>
+#include <rtems/freebsd/netinet6/in6_var.h>
+#include <rtems/freebsd/netinet6/ip6protosw.h>
+#endif
+
+#include <rtems/freebsd/netipsec/key.h>
+#include <rtems/freebsd/netipsec/key_debug.h>
+
+#include <rtems/freebsd/machine/stdarg.h>
+
+/*
+ * We can control the acceptance of IP4 packets by altering the sysctl
+ * net.inet.ipip.allow value. Zero means drop them, all else is acceptance.
+ */
+VNET_DEFINE(int, ipip_allow) = 0;
+VNET_DEFINE(struct ipipstat, ipipstat);
+
+SYSCTL_DECL(_net_inet_ipip);
+SYSCTL_VNET_INT(_net_inet_ipip, OID_AUTO,
+ ipip_allow, CTLFLAG_RW, &VNET_NAME(ipip_allow), 0, "");
+SYSCTL_VNET_STRUCT(_net_inet_ipip, IPSECCTL_STATS,
+ stats, CTLFLAG_RD, &VNET_NAME(ipipstat), ipipstat, "");
+
+/* XXX IPCOMP */
+#define M_IPSEC (M_AUTHIPHDR|M_AUTHIPDGM|M_DECRYPTED)
+
+static void _ipip_input(struct mbuf *m, int iphlen, struct ifnet *gifp);
+
+#ifdef INET6
+/*
+ * Really only a wrapper for ipip_input(), for use with IPv6.
+ */
+int
+ip4_input6(struct mbuf **m, int *offp, int proto)
+{
+#if 0
+ /* If we do not accept IP-in-IP explicitly, drop. */
+ if (!V_ipip_allow && ((*m)->m_flags & M_IPSEC) == 0) {
+ DPRINTF(("%s: dropped due to policy\n", __func__));
+ V_ipipstat.ipips_pdrops++;
+ m_freem(*m);
+ return IPPROTO_DONE;
+ }
+#endif
+ _ipip_input(*m, *offp, NULL);
+ return IPPROTO_DONE;
+}
+#endif /* INET6 */
+
+#ifdef INET
+/*
+ * Really only a wrapper for ipip_input(), for use with IPv4.
+ */
+void
+ip4_input(struct mbuf *m, int off)
+{
+#if 0
+ /* If we do not accept IP-in-IP explicitly, drop. */
+ if (!V_ipip_allow && (m->m_flags & M_IPSEC) == 0) {
+ DPRINTF(("%s: dropped due to policy\n", __func__));
+ V_ipipstat.ipips_pdrops++;
+ m_freem(m);
+ return;
+ }
+#endif
+ _ipip_input(m, off, NULL);
+}
+#endif /* INET */
+
+/*
+ * ipip_input gets called when we receive an IP{46} encapsulated packet,
+ * either because we got it at a real interface, or because AH or ESP
+ * were being used in tunnel mode (in which case the rcvif element will
+ * contain the address of the encX interface associated with the tunnel.
+ */
+
+static void
+_ipip_input(struct mbuf *m, int iphlen, struct ifnet *gifp)
+{
+#ifdef INET
+ register struct sockaddr_in *sin;
+#endif
+ register struct ifnet *ifp;
+ register struct ifaddr *ifa;
+ struct ip *ipo;
+#ifdef INET6
+ register struct sockaddr_in6 *sin6;
+ struct ip6_hdr *ip6 = NULL;
+ u_int8_t itos;
+#endif
+ u_int8_t nxt;
+ int isr;
+ u_int8_t otos;
+ u_int8_t v;
+ int hlen;
+
+ V_ipipstat.ipips_ipackets++;
+
+ m_copydata(m, 0, 1, &v);
+
+ switch (v >> 4) {
+#ifdef INET
+ case 4:
+ hlen = sizeof(struct ip);
+ break;
+#endif /* INET */
+#ifdef INET6
+ case 6:
+ hlen = sizeof(struct ip6_hdr);
+ break;
+#endif
+ default:
+ V_ipipstat.ipips_family++;
+ m_freem(m);
+ return /* EAFNOSUPPORT */;
+ }
+
+ /* Bring the IP header in the first mbuf, if not there already */
+ if (m->m_len < hlen) {
+ if ((m = m_pullup(m, hlen)) == NULL) {
+ DPRINTF(("%s: m_pullup (1) failed\n", __func__));
+ V_ipipstat.ipips_hdrops++;
+ return;
+ }
+ }
+
+ ipo = mtod(m, struct ip *);
+
+#ifdef MROUTING
+ if (ipo->ip_v == IPVERSION && ipo->ip_p == IPPROTO_IPV4) {
+ if (IN_MULTICAST(((struct ip *)((char *) ipo + iphlen))->ip_dst.s_addr)) {
+ ipip_mroute_input (m, iphlen);
+ return;
+ }
+ }
+#endif /* MROUTING */
+
+ /* Keep outer ecn field. */
+ switch (v >> 4) {
+#ifdef INET
+ case 4:
+ otos = ipo->ip_tos;
+ break;
+#endif /* INET */
+#ifdef INET6
+ case 6:
+ otos = (ntohl(mtod(m, struct ip6_hdr *)->ip6_flow) >> 20) & 0xff;
+ break;
+#endif
+ default:
+ panic("ipip_input: unknown ip version %u (outer)", v>>4);
+ }
+
+ /* Remove outer IP header */
+ m_adj(m, iphlen);
+
+ /* Sanity check */
+ if (m->m_pkthdr.len < sizeof(struct ip)) {
+ V_ipipstat.ipips_hdrops++;
+ m_freem(m);
+ return;
+ }
+
+ m_copydata(m, 0, 1, &v);
+
+ switch (v >> 4) {
+#ifdef INET
+ case 4:
+ hlen = sizeof(struct ip);
+ break;
+#endif /* INET */
+
+#ifdef INET6
+ case 6:
+ hlen = sizeof(struct ip6_hdr);
+ break;
+#endif
+ default:
+ V_ipipstat.ipips_family++;
+ m_freem(m);
+ return; /* EAFNOSUPPORT */
+ }
+
+ /*
+ * Bring the inner IP header in the first mbuf, if not there already.
+ */
+ if (m->m_len < hlen) {
+ if ((m = m_pullup(m, hlen)) == NULL) {
+ DPRINTF(("%s: m_pullup (2) failed\n", __func__));
+ V_ipipstat.ipips_hdrops++;
+ return;
+ }
+ }
+
+ /*
+ * RFC 1853 specifies that the inner TTL should not be touched on
+ * decapsulation. There's no reason this comment should be here, but
+ * this is as good as any a position.
+ */
+
+ /* Some sanity checks in the inner IP header */
+ switch (v >> 4) {
+#ifdef INET
+ case 4:
+ ipo = mtod(m, struct ip *);
+ nxt = ipo->ip_p;
+ ip_ecn_egress(V_ip4_ipsec_ecn, &otos, &ipo->ip_tos);
+ break;
+#endif /* INET */
+#ifdef INET6
+ case 6:
+ ip6 = (struct ip6_hdr *) ipo;
+ nxt = ip6->ip6_nxt;
+ itos = (ntohl(ip6->ip6_flow) >> 20) & 0xff;
+ ip_ecn_egress(V_ip6_ipsec_ecn, &otos, &itos);
+ ip6->ip6_flow &= ~htonl(0xff << 20);
+ ip6->ip6_flow |= htonl((u_int32_t) itos << 20);
+ break;
+#endif
+ default:
+ panic("ipip_input: unknown ip version %u (inner)", v>>4);
+ }
+
+ /* Check for local address spoofing. */
+ if ((m->m_pkthdr.rcvif == NULL ||
+ !(m->m_pkthdr.rcvif->if_flags & IFF_LOOPBACK)) &&
+ V_ipip_allow != 2) {
+ IFNET_RLOCK_NOSLEEP();
+ TAILQ_FOREACH(ifp, &V_ifnet, if_link) {
+ TAILQ_FOREACH(ifa, &ifp->if_addrhead, ifa_link) {
+#ifdef INET
+ if (ipo) {
+ if (ifa->ifa_addr->sa_family !=
+ AF_INET)
+ continue;
+
+ sin = (struct sockaddr_in *) ifa->ifa_addr;
+
+ if (sin->sin_addr.s_addr ==
+ ipo->ip_src.s_addr) {
+ V_ipipstat.ipips_spoof++;
+ m_freem(m);
+ IFNET_RUNLOCK_NOSLEEP();
+ return;
+ }
+ }
+#endif /* INET */
+
+#ifdef INET6
+ if (ip6) {
+ if (ifa->ifa_addr->sa_family !=
+ AF_INET6)
+ continue;
+
+ sin6 = (struct sockaddr_in6 *) ifa->ifa_addr;
+
+ if (IN6_ARE_ADDR_EQUAL(&sin6->sin6_addr, &ip6->ip6_src)) {
+ V_ipipstat.ipips_spoof++;
+ m_freem(m);
+ IFNET_RUNLOCK_NOSLEEP();
+ return;
+ }
+
+ }
+#endif /* INET6 */
+ }
+ }
+ IFNET_RUNLOCK_NOSLEEP();
+ }
+
+ /* Statistics */
+ V_ipipstat.ipips_ibytes += m->m_pkthdr.len - iphlen;
+
+#ifdef DEV_ENC
+ switch (v >> 4) {
+#ifdef INET
+ case 4:
+ ipsec_bpf(m, NULL, AF_INET, ENC_IN|ENC_AFTER);
+ break;
+#endif
+#ifdef INET6
+ case 6:
+ ipsec_bpf(m, NULL, AF_INET6, ENC_IN|ENC_AFTER);
+ break;
+#endif
+ default:
+ panic("%s: bogus ip version %u", __func__, v>>4);
+ }
+ /* pass the mbuf to enc0 for packet filtering */
+ if (ipsec_filter(&m, PFIL_IN, ENC_IN|ENC_AFTER) != 0)
+ return;
+#endif
+
+ /*
+ * Interface pointer stays the same; if no IPsec processing has
+ * been done (or will be done), this will point to a normal
+ * interface. Otherwise, it'll point to an enc interface, which
+ * will allow a packet filter to distinguish between secure and
+ * untrusted packets.
+ */
+
+ switch (v >> 4) {
+#ifdef INET
+ case 4:
+ isr = NETISR_IP;
+ break;
+#endif
+#ifdef INET6
+ case 6:
+ isr = NETISR_IPV6;
+ break;
+#endif
+ default:
+ panic("%s: bogus ip version %u", __func__, v>>4);
+ }
+
+ if (netisr_queue(isr, m)) { /* (0) on success. */
+ V_ipipstat.ipips_qfull++;
+ DPRINTF(("%s: packet dropped because of full queue\n",
+ __func__));
+ }
+}
+
+int
+ipip_output(
+ struct mbuf *m,
+ struct ipsecrequest *isr,
+ struct mbuf **mp,
+ int skip,
+ int protoff
+)
+{
+ struct secasvar *sav;
+ u_int8_t tp, otos;
+ struct secasindex *saidx;
+ int error;
+#ifdef INET
+ u_int8_t itos;
+ struct ip *ipo;
+#endif /* INET */
+#ifdef INET6
+ struct ip6_hdr *ip6, *ip6o;
+#endif /* INET6 */
+
+ sav = isr->sav;
+ IPSEC_ASSERT(sav != NULL, ("null SA"));
+ IPSEC_ASSERT(sav->sah != NULL, ("null SAH"));
+
+ /* XXX Deal with empty TDB source/destination addresses. */
+
+ m_copydata(m, 0, 1, &tp);
+ tp = (tp >> 4) & 0xff; /* Get the IP version number. */
+
+ saidx = &sav->sah->saidx;
+ switch (saidx->dst.sa.sa_family) {
+#ifdef INET
+ case AF_INET:
+ if (saidx->src.sa.sa_family != AF_INET ||
+ saidx->src.sin.sin_addr.s_addr == INADDR_ANY ||
+ saidx->dst.sin.sin_addr.s_addr == INADDR_ANY) {
+ DPRINTF(("%s: unspecified tunnel endpoint "
+ "address in SA %s/%08lx\n", __func__,
+ ipsec_address(&saidx->dst),
+ (u_long) ntohl(sav->spi)));
+ V_ipipstat.ipips_unspec++;
+ error = EINVAL;
+ goto bad;
+ }
+
+ M_PREPEND(m, sizeof(struct ip), M_DONTWAIT);
+ if (m == 0) {
+ DPRINTF(("%s: M_PREPEND failed\n", __func__));
+ V_ipipstat.ipips_hdrops++;
+ error = ENOBUFS;
+ goto bad;
+ }
+
+ ipo = mtod(m, struct ip *);
+
+ ipo->ip_v = IPVERSION;
+ ipo->ip_hl = 5;
+ ipo->ip_len = htons(m->m_pkthdr.len);
+ ipo->ip_ttl = V_ip_defttl;
+ ipo->ip_sum = 0;
+ ipo->ip_src = saidx->src.sin.sin_addr;
+ ipo->ip_dst = saidx->dst.sin.sin_addr;
+
+ ipo->ip_id = ip_newid();
+
+ /* If the inner protocol is IP... */
+ if (tp == IPVERSION) {
+ /* Save ECN notification */
+ m_copydata(m, sizeof(struct ip) +
+ offsetof(struct ip, ip_tos),
+ sizeof(u_int8_t), (caddr_t) &itos);
+
+ ipo->ip_p = IPPROTO_IPIP;
+
+ /*
+ * We should be keeping tunnel soft-state and
+ * send back ICMPs if needed.
+ */
+ m_copydata(m, sizeof(struct ip) +
+ offsetof(struct ip, ip_off),
+ sizeof(u_int16_t), (caddr_t) &ipo->ip_off);
+ ipo->ip_off = ntohs(ipo->ip_off);
+ ipo->ip_off &= ~(IP_DF | IP_MF | IP_OFFMASK);
+ ipo->ip_off = htons(ipo->ip_off);
+ }
+#ifdef INET6
+ else if (tp == (IPV6_VERSION >> 4)) {
+ u_int32_t itos32;
+
+ /* Save ECN notification. */
+ m_copydata(m, sizeof(struct ip) +
+ offsetof(struct ip6_hdr, ip6_flow),
+ sizeof(u_int32_t), (caddr_t) &itos32);
+ itos = ntohl(itos32) >> 20;
+ ipo->ip_p = IPPROTO_IPV6;
+ ipo->ip_off = 0;
+ }
+#endif /* INET6 */
+ else {
+ goto nofamily;
+ }
+
+ otos = 0;
+ ip_ecn_ingress(ECN_ALLOWED, &otos, &itos);
+ ipo->ip_tos = otos;
+ break;
+#endif /* INET */
+
+#ifdef INET6
+ case AF_INET6:
+ if (IN6_IS_ADDR_UNSPECIFIED(&saidx->dst.sin6.sin6_addr) ||
+ saidx->src.sa.sa_family != AF_INET6 ||
+ IN6_IS_ADDR_UNSPECIFIED(&saidx->src.sin6.sin6_addr)) {
+ DPRINTF(("%s: unspecified tunnel endpoint "
+ "address in SA %s/%08lx\n", __func__,
+ ipsec_address(&saidx->dst),
+ (u_long) ntohl(sav->spi)));
+ V_ipipstat.ipips_unspec++;
+ error = ENOBUFS;
+ goto bad;
+ }
+
+ /* scoped address handling */
+ ip6 = mtod(m, struct ip6_hdr *);
+ if (IN6_IS_SCOPE_LINKLOCAL(&ip6->ip6_src))
+ ip6->ip6_src.s6_addr16[1] = 0;
+ if (IN6_IS_SCOPE_LINKLOCAL(&ip6->ip6_dst))
+ ip6->ip6_dst.s6_addr16[1] = 0;
+
+ M_PREPEND(m, sizeof(struct ip6_hdr), M_DONTWAIT);
+ if (m == 0) {
+ DPRINTF(("%s: M_PREPEND failed\n", __func__));
+ V_ipipstat.ipips_hdrops++;
+ error = ENOBUFS;
+ goto bad;
+ }
+
+ /* Initialize IPv6 header */
+ ip6o = mtod(m, struct ip6_hdr *);
+ ip6o->ip6_flow = 0;
+ ip6o->ip6_vfc &= ~IPV6_VERSION_MASK;
+ ip6o->ip6_vfc |= IPV6_VERSION;
+ ip6o->ip6_plen = htons(m->m_pkthdr.len);
+ ip6o->ip6_hlim = V_ip_defttl;
+ ip6o->ip6_dst = saidx->dst.sin6.sin6_addr;
+ ip6o->ip6_src = saidx->src.sin6.sin6_addr;
+
+#ifdef INET
+ if (tp == IPVERSION) {
+ /* Save ECN notification */
+ m_copydata(m, sizeof(struct ip6_hdr) +
+ offsetof(struct ip, ip_tos), sizeof(u_int8_t),
+ (caddr_t) &itos);
+
+ /* This is really IPVERSION. */
+ ip6o->ip6_nxt = IPPROTO_IPIP;
+ } else
+#endif /* INET */
+ if (tp == (IPV6_VERSION >> 4)) {
+ u_int32_t itos32;
+
+ /* Save ECN notification. */
+ m_copydata(m, sizeof(struct ip6_hdr) +
+ offsetof(struct ip6_hdr, ip6_flow),
+ sizeof(u_int32_t), (caddr_t) &itos32);
+ itos = ntohl(itos32) >> 20;
+
+ ip6o->ip6_nxt = IPPROTO_IPV6;
+ } else {
+ goto nofamily;
+ }
+
+ otos = 0;
+ ip_ecn_ingress(ECN_ALLOWED, &otos, &itos);
+ ip6o->ip6_flow |= htonl((u_int32_t) otos << 20);
+ break;
+#endif /* INET6 */
+
+ default:
+nofamily:
+ DPRINTF(("%s: unsupported protocol family %u\n", __func__,
+ saidx->dst.sa.sa_family));
+ V_ipipstat.ipips_family++;
+ error = EAFNOSUPPORT; /* XXX diffs from openbsd */
+ goto bad;
+ }
+
+ V_ipipstat.ipips_opackets++;
+ *mp = m;
+
+#ifdef INET
+ if (saidx->dst.sa.sa_family == AF_INET) {
+#if 0
+ if (sav->tdb_xform->xf_type == XF_IP4)
+ tdb->tdb_cur_bytes +=
+ m->m_pkthdr.len - sizeof(struct ip);
+#endif
+ V_ipipstat.ipips_obytes += m->m_pkthdr.len - sizeof(struct ip);
+ }
+#endif /* INET */
+
+#ifdef INET6
+ if (saidx->dst.sa.sa_family == AF_INET6) {
+#if 0
+ if (sav->tdb_xform->xf_type == XF_IP4)
+ tdb->tdb_cur_bytes +=
+ m->m_pkthdr.len - sizeof(struct ip6_hdr);
+#endif
+ V_ipipstat.ipips_obytes +=
+ m->m_pkthdr.len - sizeof(struct ip6_hdr);
+ }
+#endif /* INET6 */
+
+ return 0;
+bad:
+ if (m)
+ m_freem(m);
+ *mp = NULL;
+ return (error);
+}
+
+#ifdef IPSEC
+static int
+ipe4_init(struct secasvar *sav, struct xformsw *xsp)
+{
+ sav->tdb_xform = xsp;
+ return 0;
+}
+
+static int
+ipe4_zeroize(struct secasvar *sav)
+{
+ sav->tdb_xform = NULL;
+ return 0;
+}
+
+static int
+ipe4_input(struct mbuf *m, struct secasvar *sav, int skip, int protoff)
+{
+ /* This is a rather serious mistake, so no conditional printing. */
+ printf("%s: should never be called\n", __func__);
+ if (m)
+ m_freem(m);
+ return EOPNOTSUPP;
+}
+
+static struct xformsw ipe4_xformsw = {
+ XF_IP4, 0, "IPv4 Simple Encapsulation",
+ ipe4_init, ipe4_zeroize, ipe4_input, ipip_output,
+};
+
+extern struct domain inetdomain;
+static struct protosw ipe4_protosw = {
+ .pr_type = SOCK_RAW,
+ .pr_domain = &inetdomain,
+ .pr_protocol = IPPROTO_IPV4,
+ .pr_flags = PR_ATOMIC|PR_ADDR|PR_LASTHDR,
+ .pr_input = ip4_input,
+ .pr_ctloutput = rip_ctloutput,
+ .pr_usrreqs = &rip_usrreqs
+};
+#ifdef INET6
+static struct ip6protosw ipe6_protosw = {
+ .pr_type = SOCK_RAW,
+ .pr_domain = &inetdomain,
+ .pr_protocol = IPPROTO_IPV6,
+ .pr_flags = PR_ATOMIC|PR_ADDR|PR_LASTHDR,
+ .pr_input = ip4_input6,
+ .pr_ctloutput = rip_ctloutput,
+ .pr_usrreqs = &rip_usrreqs
+};
+#endif
+
+/*
+ * Check the encapsulated packet to see if we want it
+ */
+static int
+ipe4_encapcheck(const struct mbuf *m, int off, int proto, void *arg)
+{
+ /*
+ * Only take packets coming from IPSEC tunnels; the rest
+ * must be handled by the gif tunnel code. Note that we
+ * also return a minimum priority when we want the packet
+ * so any explicit gif tunnels take precedence.
+ */
+ return ((m->m_flags & M_IPSEC) != 0 ? 1 : 0);
+}
+
+static void
+ipe4_attach(void)
+{
+
+ xform_register(&ipe4_xformsw);
+ /* attach to encapsulation framework */
+ /* XXX save return cookie for detach on module remove */
+ (void) encap_attach_func(AF_INET, -1,
+ ipe4_encapcheck, &ipe4_protosw, NULL);
+#ifdef INET6
+ (void) encap_attach_func(AF_INET6, -1,
+ ipe4_encapcheck, (struct protosw *)&ipe6_protosw, NULL);
+#endif
+}
+SYSINIT(ipe4_xform_init, SI_SUB_PROTO_DOMAIN, SI_ORDER_MIDDLE, ipe4_attach, NULL);
+#endif /* IPSEC */
diff --git a/rtems/freebsd/netipsec/xform_tcp.c b/rtems/freebsd/netipsec/xform_tcp.c
new file mode 100644
index 00000000..d2f20c53
--- /dev/null
+++ b/rtems/freebsd/netipsec/xform_tcp.c
@@ -0,0 +1,173 @@
+#include <rtems/freebsd/machine/rtems-bsd-config.h>
+
+/* $FreeBSD$ */
+
+/*-
+ * Copyright (c) 2003 Bruce M. Simpson <bms@spc.org>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/* TCP MD5 Signature Option (RFC2385) */
+#include <rtems/freebsd/local/opt_inet.h>
+#include <rtems/freebsd/local/opt_inet6.h>
+
+#include <rtems/freebsd/sys/param.h>
+#include <rtems/freebsd/sys/systm.h>
+#include <rtems/freebsd/sys/mbuf.h>
+#include <rtems/freebsd/sys/lock.h>
+#include <rtems/freebsd/sys/socket.h>
+#include <rtems/freebsd/sys/kernel.h>
+#include <rtems/freebsd/sys/protosw.h>
+#include <rtems/freebsd/sys/sysctl.h>
+
+#include <rtems/freebsd/netinet/in.h>
+#include <rtems/freebsd/netinet/in_systm.h>
+#include <rtems/freebsd/netinet/ip.h>
+#include <rtems/freebsd/netinet/ip_var.h>
+#include <rtems/freebsd/netinet/tcp.h>
+#include <rtems/freebsd/netinet/tcp_var.h>
+
+#include <rtems/freebsd/net/route.h>
+#include <rtems/freebsd/net/vnet.h>
+
+#include <rtems/freebsd/netipsec/ipsec.h>
+#include <rtems/freebsd/netipsec/xform.h>
+
+#ifdef INET6
+#include <rtems/freebsd/netinet/ip6.h>
+#include <rtems/freebsd/netipsec/ipsec6.h>
+#endif
+
+#include <rtems/freebsd/netipsec/key.h>
+#include <rtems/freebsd/netipsec/key_debug.h>
+
+/*
+ * Initialize a TCP-MD5 SA. Called when the SA is being set up.
+ *
+ * We don't need to set up the tdb prefixed fields, as we don't use the
+ * opencrypto code; we just perform a key length check.
+ *
+ * XXX: Currently we only allow a single 'magic' SPI to be used.
+ *
+ * This allows per-host granularity without affecting the userland
+ * interface, which is a simple socket option toggle switch,
+ * TCP_SIGNATURE_ENABLE.
+ *
+ * To allow per-service granularity requires that we have a means
+ * of mapping port to SPI. The mandated way of doing this is to
+ * use SPD entries to specify packet flows which get the TCP-MD5
+ * treatment, however the code to do this is currently unstable
+ * and unsuitable for production use.
+ *
+ * Therefore we use this compromise in the meantime.
+ */
+static int
+tcpsignature_init(struct secasvar *sav, struct xformsw *xsp)
+{
+ int keylen;
+
+ if (sav->spi != htonl(TCP_SIG_SPI)) {
+ DPRINTF(("%s: SPI must be TCP_SIG_SPI (0x1000)\n",
+ __func__));
+ return (EINVAL);
+ }
+ if (sav->alg_auth != SADB_X_AALG_TCP_MD5) {
+ DPRINTF(("%s: unsupported authentication algorithm %u\n",
+ __func__, sav->alg_auth));
+ return (EINVAL);
+ }
+ if (sav->key_auth == NULL) {
+ DPRINTF(("%s: no authentication key present\n", __func__));
+ return (EINVAL);
+ }
+ keylen = _KEYLEN(sav->key_auth);
+ if ((keylen < TCP_KEYLEN_MIN) || (keylen > TCP_KEYLEN_MAX)) {
+ DPRINTF(("%s: invalid key length %u\n", __func__, keylen));
+ return (EINVAL);
+ }
+
+ return (0);
+}
+
+/*
+ * Paranoia.
+ *
+ * Called when the SA is deleted.
+ */
+static int
+tcpsignature_zeroize(struct secasvar *sav)
+{
+
+ if (sav->key_auth)
+ bzero(sav->key_auth->key_data, _KEYLEN(sav->key_auth));
+
+ sav->tdb_cryptoid = 0;
+ sav->tdb_authalgxform = NULL;
+ sav->tdb_xform = NULL;
+
+ return (0);
+}
+
+/*
+ * Verify that an input packet passes authentication.
+ * Called from the ipsec layer.
+ * We do this from within tcp itself, so this routine is just a stub.
+ */
+static int
+tcpsignature_input(struct mbuf *m, struct secasvar *sav, int skip,
+ int protoff)
+{
+
+ return (0);
+}
+
+/*
+ * Prepend the authentication header.
+ * Called from the ipsec layer.
+ * We do this from within tcp itself, so this routine is just a stub.
+ */
+static int
+tcpsignature_output(struct mbuf *m, struct ipsecrequest *isr,
+ struct mbuf **mp, int skip, int protoff)
+{
+
+ return (EINVAL);
+}
+
+static struct xformsw tcpsignature_xformsw = {
+ XF_TCPSIGNATURE, XFT_AUTH, "TCPMD5",
+ tcpsignature_init, tcpsignature_zeroize,
+ tcpsignature_input, tcpsignature_output
+};
+
+static void
+tcpsignature_attach(void)
+{
+
+ xform_register(&tcpsignature_xformsw);
+}
+
+SYSINIT(tcpsignature_xform_init, SI_SUB_DRIVERS, SI_ORDER_FIRST,
+ tcpsignature_attach, NULL);
diff --git a/rtems/freebsd/opencrypto/cast.c b/rtems/freebsd/opencrypto/cast.c
new file mode 100644
index 00000000..0c648668
--- /dev/null
+++ b/rtems/freebsd/opencrypto/cast.c
@@ -0,0 +1,246 @@
+#include <rtems/freebsd/machine/rtems-bsd-config.h>
+
+/* $OpenBSD: cast.c,v 1.2 2000/06/06 06:49:47 deraadt Exp $ */
+/*-
+ * CAST-128 in C
+ * Written by Steve Reid <sreid@sea-to-sky.net>
+ * 100% Public Domain - no warranty
+ * Released 1997.10.11
+ */
+
+#include <rtems/freebsd/sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <rtems/freebsd/sys/types.h>
+#include <rtems/freebsd/opencrypto/cast.h>
+#include <rtems/freebsd/opencrypto/castsb.h>
+
+/* Macros to access 8-bit bytes out of a 32-bit word */
+#define U_INT8_Ta(x) ( (u_int8_t) (x>>24) )
+#define U_INT8_Tb(x) ( (u_int8_t) ((x>>16)&255) )
+#define U_INT8_Tc(x) ( (u_int8_t) ((x>>8)&255) )
+#define U_INT8_Td(x) ( (u_int8_t) ((x)&255) )
+
+/* Circular left shift */
+#define ROL(x, n) ( ((x)<<(n)) | ((x)>>(32-(n))) )
+
+/* CAST-128 uses three different round functions */
+#define F1(l, r, i) \
+ t = ROL(key->xkey[i] + r, key->xkey[i+16]); \
+ l ^= ((cast_sbox1[U_INT8_Ta(t)] ^ cast_sbox2[U_INT8_Tb(t)]) - \
+ cast_sbox3[U_INT8_Tc(t)]) + cast_sbox4[U_INT8_Td(t)];
+#define F2(l, r, i) \
+ t = ROL(key->xkey[i] ^ r, key->xkey[i+16]); \
+ l ^= ((cast_sbox1[U_INT8_Ta(t)] - cast_sbox2[U_INT8_Tb(t)]) + \
+ cast_sbox3[U_INT8_Tc(t)]) ^ cast_sbox4[U_INT8_Td(t)];
+#define F3(l, r, i) \
+ t = ROL(key->xkey[i] - r, key->xkey[i+16]); \
+ l ^= ((cast_sbox1[U_INT8_Ta(t)] + cast_sbox2[U_INT8_Tb(t)]) ^ \
+ cast_sbox3[U_INT8_Tc(t)]) - cast_sbox4[U_INT8_Td(t)];
+
+
+/***** Encryption Function *****/
+
+void cast_encrypt(cast_key* key, u_int8_t* inblock, u_int8_t* outblock)
+{
+u_int32_t t, l, r;
+
+ /* Get inblock into l,r */
+ l = ((u_int32_t)inblock[0] << 24) | ((u_int32_t)inblock[1] << 16) |
+ ((u_int32_t)inblock[2] << 8) | (u_int32_t)inblock[3];
+ r = ((u_int32_t)inblock[4] << 24) | ((u_int32_t)inblock[5] << 16) |
+ ((u_int32_t)inblock[6] << 8) | (u_int32_t)inblock[7];
+ /* Do the work */
+ F1(l, r, 0);
+ F2(r, l, 1);
+ F3(l, r, 2);
+ F1(r, l, 3);
+ F2(l, r, 4);
+ F3(r, l, 5);
+ F1(l, r, 6);
+ F2(r, l, 7);
+ F3(l, r, 8);
+ F1(r, l, 9);
+ F2(l, r, 10);
+ F3(r, l, 11);
+ /* Only do full 16 rounds if key length > 80 bits */
+ if (key->rounds > 12) {
+ F1(l, r, 12);
+ F2(r, l, 13);
+ F3(l, r, 14);
+ F1(r, l, 15);
+ }
+ /* Put l,r into outblock */
+ outblock[0] = U_INT8_Ta(r);
+ outblock[1] = U_INT8_Tb(r);
+ outblock[2] = U_INT8_Tc(r);
+ outblock[3] = U_INT8_Td(r);
+ outblock[4] = U_INT8_Ta(l);
+ outblock[5] = U_INT8_Tb(l);
+ outblock[6] = U_INT8_Tc(l);
+ outblock[7] = U_INT8_Td(l);
+ /* Wipe clean */
+ t = l = r = 0;
+}
+
+
+/***** Decryption Function *****/
+
+void cast_decrypt(cast_key* key, u_int8_t* inblock, u_int8_t* outblock)
+{
+u_int32_t t, l, r;
+
+ /* Get inblock into l,r */
+ r = ((u_int32_t)inblock[0] << 24) | ((u_int32_t)inblock[1] << 16) |
+ ((u_int32_t)inblock[2] << 8) | (u_int32_t)inblock[3];
+ l = ((u_int32_t)inblock[4] << 24) | ((u_int32_t)inblock[5] << 16) |
+ ((u_int32_t)inblock[6] << 8) | (u_int32_t)inblock[7];
+ /* Do the work */
+ /* Only do full 16 rounds if key length > 80 bits */
+ if (key->rounds > 12) {
+ F1(r, l, 15);
+ F3(l, r, 14);
+ F2(r, l, 13);
+ F1(l, r, 12);
+ }
+ F3(r, l, 11);
+ F2(l, r, 10);
+ F1(r, l, 9);
+ F3(l, r, 8);
+ F2(r, l, 7);
+ F1(l, r, 6);
+ F3(r, l, 5);
+ F2(l, r, 4);
+ F1(r, l, 3);
+ F3(l, r, 2);
+ F2(r, l, 1);
+ F1(l, r, 0);
+ /* Put l,r into outblock */
+ outblock[0] = U_INT8_Ta(l);
+ outblock[1] = U_INT8_Tb(l);
+ outblock[2] = U_INT8_Tc(l);
+ outblock[3] = U_INT8_Td(l);
+ outblock[4] = U_INT8_Ta(r);
+ outblock[5] = U_INT8_Tb(r);
+ outblock[6] = U_INT8_Tc(r);
+ outblock[7] = U_INT8_Td(r);
+ /* Wipe clean */
+ t = l = r = 0;
+}
+
+
+/***** Key Schedual *****/
+
+void cast_setkey(cast_key* key, u_int8_t* rawkey, int keybytes)
+{
+u_int32_t t[4] = {0, 0, 0, 0}, z[4] = {0, 0, 0, 0}, x[4];
+int i;
+
+ /* Set number of rounds to 12 or 16, depending on key length */
+ key->rounds = (keybytes <= 10 ? 12 : 16);
+
+ /* Copy key to workspace x */
+ for (i = 0; i < 4; i++) {
+ x[i] = 0;
+ if ((i*4+0) < keybytes) x[i] = (u_int32_t)rawkey[i*4+0] << 24;
+ if ((i*4+1) < keybytes) x[i] |= (u_int32_t)rawkey[i*4+1] << 16;
+ if ((i*4+2) < keybytes) x[i] |= (u_int32_t)rawkey[i*4+2] << 8;
+ if ((i*4+3) < keybytes) x[i] |= (u_int32_t)rawkey[i*4+3];
+ }
+ /* Generate 32 subkeys, four at a time */
+ for (i = 0; i < 32; i+=4) {
+ switch (i & 4) {
+ case 0:
+ t[0] = z[0] = x[0] ^ cast_sbox5[U_INT8_Tb(x[3])] ^
+ cast_sbox6[U_INT8_Td(x[3])] ^ cast_sbox7[U_INT8_Ta(x[3])] ^
+ cast_sbox8[U_INT8_Tc(x[3])] ^ cast_sbox7[U_INT8_Ta(x[2])];
+ t[1] = z[1] = x[2] ^ cast_sbox5[U_INT8_Ta(z[0])] ^
+ cast_sbox6[U_INT8_Tc(z[0])] ^ cast_sbox7[U_INT8_Tb(z[0])] ^
+ cast_sbox8[U_INT8_Td(z[0])] ^ cast_sbox8[U_INT8_Tc(x[2])];
+ t[2] = z[2] = x[3] ^ cast_sbox5[U_INT8_Td(z[1])] ^
+ cast_sbox6[U_INT8_Tc(z[1])] ^ cast_sbox7[U_INT8_Tb(z[1])] ^
+ cast_sbox8[U_INT8_Ta(z[1])] ^ cast_sbox5[U_INT8_Tb(x[2])];
+ t[3] = z[3] = x[1] ^ cast_sbox5[U_INT8_Tc(z[2])] ^
+ cast_sbox6[U_INT8_Tb(z[2])] ^ cast_sbox7[U_INT8_Td(z[2])] ^
+ cast_sbox8[U_INT8_Ta(z[2])] ^ cast_sbox6[U_INT8_Td(x[2])];
+ break;
+ case 4:
+ t[0] = x[0] = z[2] ^ cast_sbox5[U_INT8_Tb(z[1])] ^
+ cast_sbox6[U_INT8_Td(z[1])] ^ cast_sbox7[U_INT8_Ta(z[1])] ^
+ cast_sbox8[U_INT8_Tc(z[1])] ^ cast_sbox7[U_INT8_Ta(z[0])];
+ t[1] = x[1] = z[0] ^ cast_sbox5[U_INT8_Ta(x[0])] ^
+ cast_sbox6[U_INT8_Tc(x[0])] ^ cast_sbox7[U_INT8_Tb(x[0])] ^
+ cast_sbox8[U_INT8_Td(x[0])] ^ cast_sbox8[U_INT8_Tc(z[0])];
+ t[2] = x[2] = z[1] ^ cast_sbox5[U_INT8_Td(x[1])] ^
+ cast_sbox6[U_INT8_Tc(x[1])] ^ cast_sbox7[U_INT8_Tb(x[1])] ^
+ cast_sbox8[U_INT8_Ta(x[1])] ^ cast_sbox5[U_INT8_Tb(z[0])];
+ t[3] = x[3] = z[3] ^ cast_sbox5[U_INT8_Tc(x[2])] ^
+ cast_sbox6[U_INT8_Tb(x[2])] ^ cast_sbox7[U_INT8_Td(x[2])] ^
+ cast_sbox8[U_INT8_Ta(x[2])] ^ cast_sbox6[U_INT8_Td(z[0])];
+ break;
+ }
+ switch (i & 12) {
+ case 0:
+ case 12:
+ key->xkey[i+0] = cast_sbox5[U_INT8_Ta(t[2])] ^ cast_sbox6[U_INT8_Tb(t[2])] ^
+ cast_sbox7[U_INT8_Td(t[1])] ^ cast_sbox8[U_INT8_Tc(t[1])];
+ key->xkey[i+1] = cast_sbox5[U_INT8_Tc(t[2])] ^ cast_sbox6[U_INT8_Td(t[2])] ^
+ cast_sbox7[U_INT8_Tb(t[1])] ^ cast_sbox8[U_INT8_Ta(t[1])];
+ key->xkey[i+2] = cast_sbox5[U_INT8_Ta(t[3])] ^ cast_sbox6[U_INT8_Tb(t[3])] ^
+ cast_sbox7[U_INT8_Td(t[0])] ^ cast_sbox8[U_INT8_Tc(t[0])];
+ key->xkey[i+3] = cast_sbox5[U_INT8_Tc(t[3])] ^ cast_sbox6[U_INT8_Td(t[3])] ^
+ cast_sbox7[U_INT8_Tb(t[0])] ^ cast_sbox8[U_INT8_Ta(t[0])];
+ break;
+ case 4:
+ case 8:
+ key->xkey[i+0] = cast_sbox5[U_INT8_Td(t[0])] ^ cast_sbox6[U_INT8_Tc(t[0])] ^
+ cast_sbox7[U_INT8_Ta(t[3])] ^ cast_sbox8[U_INT8_Tb(t[3])];
+ key->xkey[i+1] = cast_sbox5[U_INT8_Tb(t[0])] ^ cast_sbox6[U_INT8_Ta(t[0])] ^
+ cast_sbox7[U_INT8_Tc(t[3])] ^ cast_sbox8[U_INT8_Td(t[3])];
+ key->xkey[i+2] = cast_sbox5[U_INT8_Td(t[1])] ^ cast_sbox6[U_INT8_Tc(t[1])] ^
+ cast_sbox7[U_INT8_Ta(t[2])] ^ cast_sbox8[U_INT8_Tb(t[2])];
+ key->xkey[i+3] = cast_sbox5[U_INT8_Tb(t[1])] ^ cast_sbox6[U_INT8_Ta(t[1])] ^
+ cast_sbox7[U_INT8_Tc(t[2])] ^ cast_sbox8[U_INT8_Td(t[2])];
+ break;
+ }
+ switch (i & 12) {
+ case 0:
+ key->xkey[i+0] ^= cast_sbox5[U_INT8_Tc(z[0])];
+ key->xkey[i+1] ^= cast_sbox6[U_INT8_Tc(z[1])];
+ key->xkey[i+2] ^= cast_sbox7[U_INT8_Tb(z[2])];
+ key->xkey[i+3] ^= cast_sbox8[U_INT8_Ta(z[3])];
+ break;
+ case 4:
+ key->xkey[i+0] ^= cast_sbox5[U_INT8_Ta(x[2])];
+ key->xkey[i+1] ^= cast_sbox6[U_INT8_Tb(x[3])];
+ key->xkey[i+2] ^= cast_sbox7[U_INT8_Td(x[0])];
+ key->xkey[i+3] ^= cast_sbox8[U_INT8_Td(x[1])];
+ break;
+ case 8:
+ key->xkey[i+0] ^= cast_sbox5[U_INT8_Tb(z[2])];
+ key->xkey[i+1] ^= cast_sbox6[U_INT8_Ta(z[3])];
+ key->xkey[i+2] ^= cast_sbox7[U_INT8_Tc(z[0])];
+ key->xkey[i+3] ^= cast_sbox8[U_INT8_Tc(z[1])];
+ break;
+ case 12:
+ key->xkey[i+0] ^= cast_sbox5[U_INT8_Td(x[0])];
+ key->xkey[i+1] ^= cast_sbox6[U_INT8_Td(x[1])];
+ key->xkey[i+2] ^= cast_sbox7[U_INT8_Ta(x[2])];
+ key->xkey[i+3] ^= cast_sbox8[U_INT8_Tb(x[3])];
+ break;
+ }
+ if (i >= 16) {
+ key->xkey[i+0] &= 31;
+ key->xkey[i+1] &= 31;
+ key->xkey[i+2] &= 31;
+ key->xkey[i+3] &= 31;
+ }
+ }
+ /* Wipe clean */
+ for (i = 0; i < 4; i++) {
+ t[i] = x[i] = z[i] = 0;
+ }
+}
+
+/* Made in Canada */
+
diff --git a/rtems/freebsd/opencrypto/cast.h b/rtems/freebsd/opencrypto/cast.h
new file mode 100644
index 00000000..25d6c763
--- /dev/null
+++ b/rtems/freebsd/opencrypto/cast.h
@@ -0,0 +1,23 @@
+/* $FreeBSD$ */
+/* $OpenBSD: cast.h,v 1.2 2002/03/14 01:26:51 millert Exp $ */
+
+/*-
+ * CAST-128 in C
+ * Written by Steve Reid <sreid@sea-to-sky.net>
+ * 100% Public Domain - no warranty
+ * Released 1997.10.11
+ */
+
+#ifndef _CAST_HH_
+#define _CAST_HH_
+
+typedef struct {
+ u_int32_t xkey[32]; /* Key, after expansion */
+ int rounds; /* Number of rounds to use, 12 or 16 */
+} cast_key;
+
+void cast_setkey(cast_key * key, u_int8_t * rawkey, int keybytes);
+void cast_encrypt(cast_key * key, u_int8_t * inblock, u_int8_t * outblock);
+void cast_decrypt(cast_key * key, u_int8_t * inblock, u_int8_t * outblock);
+
+#endif /* ifndef _CAST_HH_ */
diff --git a/rtems/freebsd/opencrypto/castsb.h b/rtems/freebsd/opencrypto/castsb.h
new file mode 100644
index 00000000..ed13058c
--- /dev/null
+++ b/rtems/freebsd/opencrypto/castsb.h
@@ -0,0 +1,545 @@
+/* $FreeBSD$ */
+/* $OpenBSD: castsb.h,v 1.1 2000/02/28 23:13:04 deraadt Exp $ */
+/*-
+ * CAST-128 in C
+ * Written by Steve Reid <sreid@sea-to-sky.net>
+ * 100% Public Domain - no warranty
+ * Released 1997.10.11
+ */
+
+static const u_int32_t cast_sbox1[256] = {
+ 0x30FB40D4, 0x9FA0FF0B, 0x6BECCD2F, 0x3F258C7A,
+ 0x1E213F2F, 0x9C004DD3, 0x6003E540, 0xCF9FC949,
+ 0xBFD4AF27, 0x88BBBDB5, 0xE2034090, 0x98D09675,
+ 0x6E63A0E0, 0x15C361D2, 0xC2E7661D, 0x22D4FF8E,
+ 0x28683B6F, 0xC07FD059, 0xFF2379C8, 0x775F50E2,
+ 0x43C340D3, 0xDF2F8656, 0x887CA41A, 0xA2D2BD2D,
+ 0xA1C9E0D6, 0x346C4819, 0x61B76D87, 0x22540F2F,
+ 0x2ABE32E1, 0xAA54166B, 0x22568E3A, 0xA2D341D0,
+ 0x66DB40C8, 0xA784392F, 0x004DFF2F, 0x2DB9D2DE,
+ 0x97943FAC, 0x4A97C1D8, 0x527644B7, 0xB5F437A7,
+ 0xB82CBAEF, 0xD751D159, 0x6FF7F0ED, 0x5A097A1F,
+ 0x827B68D0, 0x90ECF52E, 0x22B0C054, 0xBC8E5935,
+ 0x4B6D2F7F, 0x50BB64A2, 0xD2664910, 0xBEE5812D,
+ 0xB7332290, 0xE93B159F, 0xB48EE411, 0x4BFF345D,
+ 0xFD45C240, 0xAD31973F, 0xC4F6D02E, 0x55FC8165,
+ 0xD5B1CAAD, 0xA1AC2DAE, 0xA2D4B76D, 0xC19B0C50,
+ 0x882240F2, 0x0C6E4F38, 0xA4E4BFD7, 0x4F5BA272,
+ 0x564C1D2F, 0xC59C5319, 0xB949E354, 0xB04669FE,
+ 0xB1B6AB8A, 0xC71358DD, 0x6385C545, 0x110F935D,
+ 0x57538AD5, 0x6A390493, 0xE63D37E0, 0x2A54F6B3,
+ 0x3A787D5F, 0x6276A0B5, 0x19A6FCDF, 0x7A42206A,
+ 0x29F9D4D5, 0xF61B1891, 0xBB72275E, 0xAA508167,
+ 0x38901091, 0xC6B505EB, 0x84C7CB8C, 0x2AD75A0F,
+ 0x874A1427, 0xA2D1936B, 0x2AD286AF, 0xAA56D291,
+ 0xD7894360, 0x425C750D, 0x93B39E26, 0x187184C9,
+ 0x6C00B32D, 0x73E2BB14, 0xA0BEBC3C, 0x54623779,
+ 0x64459EAB, 0x3F328B82, 0x7718CF82, 0x59A2CEA6,
+ 0x04EE002E, 0x89FE78E6, 0x3FAB0950, 0x325FF6C2,
+ 0x81383F05, 0x6963C5C8, 0x76CB5AD6, 0xD49974C9,
+ 0xCA180DCF, 0x380782D5, 0xC7FA5CF6, 0x8AC31511,
+ 0x35E79E13, 0x47DA91D0, 0xF40F9086, 0xA7E2419E,
+ 0x31366241, 0x051EF495, 0xAA573B04, 0x4A805D8D,
+ 0x548300D0, 0x00322A3C, 0xBF64CDDF, 0xBA57A68E,
+ 0x75C6372B, 0x50AFD341, 0xA7C13275, 0x915A0BF5,
+ 0x6B54BFAB, 0x2B0B1426, 0xAB4CC9D7, 0x449CCD82,
+ 0xF7FBF265, 0xAB85C5F3, 0x1B55DB94, 0xAAD4E324,
+ 0xCFA4BD3F, 0x2DEAA3E2, 0x9E204D02, 0xC8BD25AC,
+ 0xEADF55B3, 0xD5BD9E98, 0xE31231B2, 0x2AD5AD6C,
+ 0x954329DE, 0xADBE4528, 0xD8710F69, 0xAA51C90F,
+ 0xAA786BF6, 0x22513F1E, 0xAA51A79B, 0x2AD344CC,
+ 0x7B5A41F0, 0xD37CFBAD, 0x1B069505, 0x41ECE491,
+ 0xB4C332E6, 0x032268D4, 0xC9600ACC, 0xCE387E6D,
+ 0xBF6BB16C, 0x6A70FB78, 0x0D03D9C9, 0xD4DF39DE,
+ 0xE01063DA, 0x4736F464, 0x5AD328D8, 0xB347CC96,
+ 0x75BB0FC3, 0x98511BFB, 0x4FFBCC35, 0xB58BCF6A,
+ 0xE11F0ABC, 0xBFC5FE4A, 0xA70AEC10, 0xAC39570A,
+ 0x3F04442F, 0x6188B153, 0xE0397A2E, 0x5727CB79,
+ 0x9CEB418F, 0x1CACD68D, 0x2AD37C96, 0x0175CB9D,
+ 0xC69DFF09, 0xC75B65F0, 0xD9DB40D8, 0xEC0E7779,
+ 0x4744EAD4, 0xB11C3274, 0xDD24CB9E, 0x7E1C54BD,
+ 0xF01144F9, 0xD2240EB1, 0x9675B3FD, 0xA3AC3755,
+ 0xD47C27AF, 0x51C85F4D, 0x56907596, 0xA5BB15E6,
+ 0x580304F0, 0xCA042CF1, 0x011A37EA, 0x8DBFAADB,
+ 0x35BA3E4A, 0x3526FFA0, 0xC37B4D09, 0xBC306ED9,
+ 0x98A52666, 0x5648F725, 0xFF5E569D, 0x0CED63D0,
+ 0x7C63B2CF, 0x700B45E1, 0xD5EA50F1, 0x85A92872,
+ 0xAF1FBDA7, 0xD4234870, 0xA7870BF3, 0x2D3B4D79,
+ 0x42E04198, 0x0CD0EDE7, 0x26470DB8, 0xF881814C,
+ 0x474D6AD7, 0x7C0C5E5C, 0xD1231959, 0x381B7298,
+ 0xF5D2F4DB, 0xAB838653, 0x6E2F1E23, 0x83719C9E,
+ 0xBD91E046, 0x9A56456E, 0xDC39200C, 0x20C8C571,
+ 0x962BDA1C, 0xE1E696FF, 0xB141AB08, 0x7CCA89B9,
+ 0x1A69E783, 0x02CC4843, 0xA2F7C579, 0x429EF47D,
+ 0x427B169C, 0x5AC9F049, 0xDD8F0F00, 0x5C8165BF
+};
+
+static const u_int32_t cast_sbox2[256] = {
+ 0x1F201094, 0xEF0BA75B, 0x69E3CF7E, 0x393F4380,
+ 0xFE61CF7A, 0xEEC5207A, 0x55889C94, 0x72FC0651,
+ 0xADA7EF79, 0x4E1D7235, 0xD55A63CE, 0xDE0436BA,
+ 0x99C430EF, 0x5F0C0794, 0x18DCDB7D, 0xA1D6EFF3,
+ 0xA0B52F7B, 0x59E83605, 0xEE15B094, 0xE9FFD909,
+ 0xDC440086, 0xEF944459, 0xBA83CCB3, 0xE0C3CDFB,
+ 0xD1DA4181, 0x3B092AB1, 0xF997F1C1, 0xA5E6CF7B,
+ 0x01420DDB, 0xE4E7EF5B, 0x25A1FF41, 0xE180F806,
+ 0x1FC41080, 0x179BEE7A, 0xD37AC6A9, 0xFE5830A4,
+ 0x98DE8B7F, 0x77E83F4E, 0x79929269, 0x24FA9F7B,
+ 0xE113C85B, 0xACC40083, 0xD7503525, 0xF7EA615F,
+ 0x62143154, 0x0D554B63, 0x5D681121, 0xC866C359,
+ 0x3D63CF73, 0xCEE234C0, 0xD4D87E87, 0x5C672B21,
+ 0x071F6181, 0x39F7627F, 0x361E3084, 0xE4EB573B,
+ 0x602F64A4, 0xD63ACD9C, 0x1BBC4635, 0x9E81032D,
+ 0x2701F50C, 0x99847AB4, 0xA0E3DF79, 0xBA6CF38C,
+ 0x10843094, 0x2537A95E, 0xF46F6FFE, 0xA1FF3B1F,
+ 0x208CFB6A, 0x8F458C74, 0xD9E0A227, 0x4EC73A34,
+ 0xFC884F69, 0x3E4DE8DF, 0xEF0E0088, 0x3559648D,
+ 0x8A45388C, 0x1D804366, 0x721D9BFD, 0xA58684BB,
+ 0xE8256333, 0x844E8212, 0x128D8098, 0xFED33FB4,
+ 0xCE280AE1, 0x27E19BA5, 0xD5A6C252, 0xE49754BD,
+ 0xC5D655DD, 0xEB667064, 0x77840B4D, 0xA1B6A801,
+ 0x84DB26A9, 0xE0B56714, 0x21F043B7, 0xE5D05860,
+ 0x54F03084, 0x066FF472, 0xA31AA153, 0xDADC4755,
+ 0xB5625DBF, 0x68561BE6, 0x83CA6B94, 0x2D6ED23B,
+ 0xECCF01DB, 0xA6D3D0BA, 0xB6803D5C, 0xAF77A709,
+ 0x33B4A34C, 0x397BC8D6, 0x5EE22B95, 0x5F0E5304,
+ 0x81ED6F61, 0x20E74364, 0xB45E1378, 0xDE18639B,
+ 0x881CA122, 0xB96726D1, 0x8049A7E8, 0x22B7DA7B,
+ 0x5E552D25, 0x5272D237, 0x79D2951C, 0xC60D894C,
+ 0x488CB402, 0x1BA4FE5B, 0xA4B09F6B, 0x1CA815CF,
+ 0xA20C3005, 0x8871DF63, 0xB9DE2FCB, 0x0CC6C9E9,
+ 0x0BEEFF53, 0xE3214517, 0xB4542835, 0x9F63293C,
+ 0xEE41E729, 0x6E1D2D7C, 0x50045286, 0x1E6685F3,
+ 0xF33401C6, 0x30A22C95, 0x31A70850, 0x60930F13,
+ 0x73F98417, 0xA1269859, 0xEC645C44, 0x52C877A9,
+ 0xCDFF33A6, 0xA02B1741, 0x7CBAD9A2, 0x2180036F,
+ 0x50D99C08, 0xCB3F4861, 0xC26BD765, 0x64A3F6AB,
+ 0x80342676, 0x25A75E7B, 0xE4E6D1FC, 0x20C710E6,
+ 0xCDF0B680, 0x17844D3B, 0x31EEF84D, 0x7E0824E4,
+ 0x2CCB49EB, 0x846A3BAE, 0x8FF77888, 0xEE5D60F6,
+ 0x7AF75673, 0x2FDD5CDB, 0xA11631C1, 0x30F66F43,
+ 0xB3FAEC54, 0x157FD7FA, 0xEF8579CC, 0xD152DE58,
+ 0xDB2FFD5E, 0x8F32CE19, 0x306AF97A, 0x02F03EF8,
+ 0x99319AD5, 0xC242FA0F, 0xA7E3EBB0, 0xC68E4906,
+ 0xB8DA230C, 0x80823028, 0xDCDEF3C8, 0xD35FB171,
+ 0x088A1BC8, 0xBEC0C560, 0x61A3C9E8, 0xBCA8F54D,
+ 0xC72FEFFA, 0x22822E99, 0x82C570B4, 0xD8D94E89,
+ 0x8B1C34BC, 0x301E16E6, 0x273BE979, 0xB0FFEAA6,
+ 0x61D9B8C6, 0x00B24869, 0xB7FFCE3F, 0x08DC283B,
+ 0x43DAF65A, 0xF7E19798, 0x7619B72F, 0x8F1C9BA4,
+ 0xDC8637A0, 0x16A7D3B1, 0x9FC393B7, 0xA7136EEB,
+ 0xC6BCC63E, 0x1A513742, 0xEF6828BC, 0x520365D6,
+ 0x2D6A77AB, 0x3527ED4B, 0x821FD216, 0x095C6E2E,
+ 0xDB92F2FB, 0x5EEA29CB, 0x145892F5, 0x91584F7F,
+ 0x5483697B, 0x2667A8CC, 0x85196048, 0x8C4BACEA,
+ 0x833860D4, 0x0D23E0F9, 0x6C387E8A, 0x0AE6D249,
+ 0xB284600C, 0xD835731D, 0xDCB1C647, 0xAC4C56EA,
+ 0x3EBD81B3, 0x230EABB0, 0x6438BC87, 0xF0B5B1FA,
+ 0x8F5EA2B3, 0xFC184642, 0x0A036B7A, 0x4FB089BD,
+ 0x649DA589, 0xA345415E, 0x5C038323, 0x3E5D3BB9,
+ 0x43D79572, 0x7E6DD07C, 0x06DFDF1E, 0x6C6CC4EF,
+ 0x7160A539, 0x73BFBE70, 0x83877605, 0x4523ECF1
+};
+
+static const u_int32_t cast_sbox3[256] = {
+ 0x8DEFC240, 0x25FA5D9F, 0xEB903DBF, 0xE810C907,
+ 0x47607FFF, 0x369FE44B, 0x8C1FC644, 0xAECECA90,
+ 0xBEB1F9BF, 0xEEFBCAEA, 0xE8CF1950, 0x51DF07AE,
+ 0x920E8806, 0xF0AD0548, 0xE13C8D83, 0x927010D5,
+ 0x11107D9F, 0x07647DB9, 0xB2E3E4D4, 0x3D4F285E,
+ 0xB9AFA820, 0xFADE82E0, 0xA067268B, 0x8272792E,
+ 0x553FB2C0, 0x489AE22B, 0xD4EF9794, 0x125E3FBC,
+ 0x21FFFCEE, 0x825B1BFD, 0x9255C5ED, 0x1257A240,
+ 0x4E1A8302, 0xBAE07FFF, 0x528246E7, 0x8E57140E,
+ 0x3373F7BF, 0x8C9F8188, 0xA6FC4EE8, 0xC982B5A5,
+ 0xA8C01DB7, 0x579FC264, 0x67094F31, 0xF2BD3F5F,
+ 0x40FFF7C1, 0x1FB78DFC, 0x8E6BD2C1, 0x437BE59B,
+ 0x99B03DBF, 0xB5DBC64B, 0x638DC0E6, 0x55819D99,
+ 0xA197C81C, 0x4A012D6E, 0xC5884A28, 0xCCC36F71,
+ 0xB843C213, 0x6C0743F1, 0x8309893C, 0x0FEDDD5F,
+ 0x2F7FE850, 0xD7C07F7E, 0x02507FBF, 0x5AFB9A04,
+ 0xA747D2D0, 0x1651192E, 0xAF70BF3E, 0x58C31380,
+ 0x5F98302E, 0x727CC3C4, 0x0A0FB402, 0x0F7FEF82,
+ 0x8C96FDAD, 0x5D2C2AAE, 0x8EE99A49, 0x50DA88B8,
+ 0x8427F4A0, 0x1EAC5790, 0x796FB449, 0x8252DC15,
+ 0xEFBD7D9B, 0xA672597D, 0xADA840D8, 0x45F54504,
+ 0xFA5D7403, 0xE83EC305, 0x4F91751A, 0x925669C2,
+ 0x23EFE941, 0xA903F12E, 0x60270DF2, 0x0276E4B6,
+ 0x94FD6574, 0x927985B2, 0x8276DBCB, 0x02778176,
+ 0xF8AF918D, 0x4E48F79E, 0x8F616DDF, 0xE29D840E,
+ 0x842F7D83, 0x340CE5C8, 0x96BBB682, 0x93B4B148,
+ 0xEF303CAB, 0x984FAF28, 0x779FAF9B, 0x92DC560D,
+ 0x224D1E20, 0x8437AA88, 0x7D29DC96, 0x2756D3DC,
+ 0x8B907CEE, 0xB51FD240, 0xE7C07CE3, 0xE566B4A1,
+ 0xC3E9615E, 0x3CF8209D, 0x6094D1E3, 0xCD9CA341,
+ 0x5C76460E, 0x00EA983B, 0xD4D67881, 0xFD47572C,
+ 0xF76CEDD9, 0xBDA8229C, 0x127DADAA, 0x438A074E,
+ 0x1F97C090, 0x081BDB8A, 0x93A07EBE, 0xB938CA15,
+ 0x97B03CFF, 0x3DC2C0F8, 0x8D1AB2EC, 0x64380E51,
+ 0x68CC7BFB, 0xD90F2788, 0x12490181, 0x5DE5FFD4,
+ 0xDD7EF86A, 0x76A2E214, 0xB9A40368, 0x925D958F,
+ 0x4B39FFFA, 0xBA39AEE9, 0xA4FFD30B, 0xFAF7933B,
+ 0x6D498623, 0x193CBCFA, 0x27627545, 0x825CF47A,
+ 0x61BD8BA0, 0xD11E42D1, 0xCEAD04F4, 0x127EA392,
+ 0x10428DB7, 0x8272A972, 0x9270C4A8, 0x127DE50B,
+ 0x285BA1C8, 0x3C62F44F, 0x35C0EAA5, 0xE805D231,
+ 0x428929FB, 0xB4FCDF82, 0x4FB66A53, 0x0E7DC15B,
+ 0x1F081FAB, 0x108618AE, 0xFCFD086D, 0xF9FF2889,
+ 0x694BCC11, 0x236A5CAE, 0x12DECA4D, 0x2C3F8CC5,
+ 0xD2D02DFE, 0xF8EF5896, 0xE4CF52DA, 0x95155B67,
+ 0x494A488C, 0xB9B6A80C, 0x5C8F82BC, 0x89D36B45,
+ 0x3A609437, 0xEC00C9A9, 0x44715253, 0x0A874B49,
+ 0xD773BC40, 0x7C34671C, 0x02717EF6, 0x4FEB5536,
+ 0xA2D02FFF, 0xD2BF60C4, 0xD43F03C0, 0x50B4EF6D,
+ 0x07478CD1, 0x006E1888, 0xA2E53F55, 0xB9E6D4BC,
+ 0xA2048016, 0x97573833, 0xD7207D67, 0xDE0F8F3D,
+ 0x72F87B33, 0xABCC4F33, 0x7688C55D, 0x7B00A6B0,
+ 0x947B0001, 0x570075D2, 0xF9BB88F8, 0x8942019E,
+ 0x4264A5FF, 0x856302E0, 0x72DBD92B, 0xEE971B69,
+ 0x6EA22FDE, 0x5F08AE2B, 0xAF7A616D, 0xE5C98767,
+ 0xCF1FEBD2, 0x61EFC8C2, 0xF1AC2571, 0xCC8239C2,
+ 0x67214CB8, 0xB1E583D1, 0xB7DC3E62, 0x7F10BDCE,
+ 0xF90A5C38, 0x0FF0443D, 0x606E6DC6, 0x60543A49,
+ 0x5727C148, 0x2BE98A1D, 0x8AB41738, 0x20E1BE24,
+ 0xAF96DA0F, 0x68458425, 0x99833BE5, 0x600D457D,
+ 0x282F9350, 0x8334B362, 0xD91D1120, 0x2B6D8DA0,
+ 0x642B1E31, 0x9C305A00, 0x52BCE688, 0x1B03588A,
+ 0xF7BAEFD5, 0x4142ED9C, 0xA4315C11, 0x83323EC5,
+ 0xDFEF4636, 0xA133C501, 0xE9D3531C, 0xEE353783
+};
+
+static const u_int32_t cast_sbox4[256] = {
+ 0x9DB30420, 0x1FB6E9DE, 0xA7BE7BEF, 0xD273A298,
+ 0x4A4F7BDB, 0x64AD8C57, 0x85510443, 0xFA020ED1,
+ 0x7E287AFF, 0xE60FB663, 0x095F35A1, 0x79EBF120,
+ 0xFD059D43, 0x6497B7B1, 0xF3641F63, 0x241E4ADF,
+ 0x28147F5F, 0x4FA2B8CD, 0xC9430040, 0x0CC32220,
+ 0xFDD30B30, 0xC0A5374F, 0x1D2D00D9, 0x24147B15,
+ 0xEE4D111A, 0x0FCA5167, 0x71FF904C, 0x2D195FFE,
+ 0x1A05645F, 0x0C13FEFE, 0x081B08CA, 0x05170121,
+ 0x80530100, 0xE83E5EFE, 0xAC9AF4F8, 0x7FE72701,
+ 0xD2B8EE5F, 0x06DF4261, 0xBB9E9B8A, 0x7293EA25,
+ 0xCE84FFDF, 0xF5718801, 0x3DD64B04, 0xA26F263B,
+ 0x7ED48400, 0x547EEBE6, 0x446D4CA0, 0x6CF3D6F5,
+ 0x2649ABDF, 0xAEA0C7F5, 0x36338CC1, 0x503F7E93,
+ 0xD3772061, 0x11B638E1, 0x72500E03, 0xF80EB2BB,
+ 0xABE0502E, 0xEC8D77DE, 0x57971E81, 0xE14F6746,
+ 0xC9335400, 0x6920318F, 0x081DBB99, 0xFFC304A5,
+ 0x4D351805, 0x7F3D5CE3, 0xA6C866C6, 0x5D5BCCA9,
+ 0xDAEC6FEA, 0x9F926F91, 0x9F46222F, 0x3991467D,
+ 0xA5BF6D8E, 0x1143C44F, 0x43958302, 0xD0214EEB,
+ 0x022083B8, 0x3FB6180C, 0x18F8931E, 0x281658E6,
+ 0x26486E3E, 0x8BD78A70, 0x7477E4C1, 0xB506E07C,
+ 0xF32D0A25, 0x79098B02, 0xE4EABB81, 0x28123B23,
+ 0x69DEAD38, 0x1574CA16, 0xDF871B62, 0x211C40B7,
+ 0xA51A9EF9, 0x0014377B, 0x041E8AC8, 0x09114003,
+ 0xBD59E4D2, 0xE3D156D5, 0x4FE876D5, 0x2F91A340,
+ 0x557BE8DE, 0x00EAE4A7, 0x0CE5C2EC, 0x4DB4BBA6,
+ 0xE756BDFF, 0xDD3369AC, 0xEC17B035, 0x06572327,
+ 0x99AFC8B0, 0x56C8C391, 0x6B65811C, 0x5E146119,
+ 0x6E85CB75, 0xBE07C002, 0xC2325577, 0x893FF4EC,
+ 0x5BBFC92D, 0xD0EC3B25, 0xB7801AB7, 0x8D6D3B24,
+ 0x20C763EF, 0xC366A5FC, 0x9C382880, 0x0ACE3205,
+ 0xAAC9548A, 0xECA1D7C7, 0x041AFA32, 0x1D16625A,
+ 0x6701902C, 0x9B757A54, 0x31D477F7, 0x9126B031,
+ 0x36CC6FDB, 0xC70B8B46, 0xD9E66A48, 0x56E55A79,
+ 0x026A4CEB, 0x52437EFF, 0x2F8F76B4, 0x0DF980A5,
+ 0x8674CDE3, 0xEDDA04EB, 0x17A9BE04, 0x2C18F4DF,
+ 0xB7747F9D, 0xAB2AF7B4, 0xEFC34D20, 0x2E096B7C,
+ 0x1741A254, 0xE5B6A035, 0x213D42F6, 0x2C1C7C26,
+ 0x61C2F50F, 0x6552DAF9, 0xD2C231F8, 0x25130F69,
+ 0xD8167FA2, 0x0418F2C8, 0x001A96A6, 0x0D1526AB,
+ 0x63315C21, 0x5E0A72EC, 0x49BAFEFD, 0x187908D9,
+ 0x8D0DBD86, 0x311170A7, 0x3E9B640C, 0xCC3E10D7,
+ 0xD5CAD3B6, 0x0CAEC388, 0xF73001E1, 0x6C728AFF,
+ 0x71EAE2A1, 0x1F9AF36E, 0xCFCBD12F, 0xC1DE8417,
+ 0xAC07BE6B, 0xCB44A1D8, 0x8B9B0F56, 0x013988C3,
+ 0xB1C52FCA, 0xB4BE31CD, 0xD8782806, 0x12A3A4E2,
+ 0x6F7DE532, 0x58FD7EB6, 0xD01EE900, 0x24ADFFC2,
+ 0xF4990FC5, 0x9711AAC5, 0x001D7B95, 0x82E5E7D2,
+ 0x109873F6, 0x00613096, 0xC32D9521, 0xADA121FF,
+ 0x29908415, 0x7FBB977F, 0xAF9EB3DB, 0x29C9ED2A,
+ 0x5CE2A465, 0xA730F32C, 0xD0AA3FE8, 0x8A5CC091,
+ 0xD49E2CE7, 0x0CE454A9, 0xD60ACD86, 0x015F1919,
+ 0x77079103, 0xDEA03AF6, 0x78A8565E, 0xDEE356DF,
+ 0x21F05CBE, 0x8B75E387, 0xB3C50651, 0xB8A5C3EF,
+ 0xD8EEB6D2, 0xE523BE77, 0xC2154529, 0x2F69EFDF,
+ 0xAFE67AFB, 0xF470C4B2, 0xF3E0EB5B, 0xD6CC9876,
+ 0x39E4460C, 0x1FDA8538, 0x1987832F, 0xCA007367,
+ 0xA99144F8, 0x296B299E, 0x492FC295, 0x9266BEAB,
+ 0xB5676E69, 0x9BD3DDDA, 0xDF7E052F, 0xDB25701C,
+ 0x1B5E51EE, 0xF65324E6, 0x6AFCE36C, 0x0316CC04,
+ 0x8644213E, 0xB7DC59D0, 0x7965291F, 0xCCD6FD43,
+ 0x41823979, 0x932BCDF6, 0xB657C34D, 0x4EDFD282,
+ 0x7AE5290C, 0x3CB9536B, 0x851E20FE, 0x9833557E,
+ 0x13ECF0B0, 0xD3FFB372, 0x3F85C5C1, 0x0AEF7ED2
+};
+
+static const u_int32_t cast_sbox5[256] = {
+ 0x7EC90C04, 0x2C6E74B9, 0x9B0E66DF, 0xA6337911,
+ 0xB86A7FFF, 0x1DD358F5, 0x44DD9D44, 0x1731167F,
+ 0x08FBF1FA, 0xE7F511CC, 0xD2051B00, 0x735ABA00,
+ 0x2AB722D8, 0x386381CB, 0xACF6243A, 0x69BEFD7A,
+ 0xE6A2E77F, 0xF0C720CD, 0xC4494816, 0xCCF5C180,
+ 0x38851640, 0x15B0A848, 0xE68B18CB, 0x4CAADEFF,
+ 0x5F480A01, 0x0412B2AA, 0x259814FC, 0x41D0EFE2,
+ 0x4E40B48D, 0x248EB6FB, 0x8DBA1CFE, 0x41A99B02,
+ 0x1A550A04, 0xBA8F65CB, 0x7251F4E7, 0x95A51725,
+ 0xC106ECD7, 0x97A5980A, 0xC539B9AA, 0x4D79FE6A,
+ 0xF2F3F763, 0x68AF8040, 0xED0C9E56, 0x11B4958B,
+ 0xE1EB5A88, 0x8709E6B0, 0xD7E07156, 0x4E29FEA7,
+ 0x6366E52D, 0x02D1C000, 0xC4AC8E05, 0x9377F571,
+ 0x0C05372A, 0x578535F2, 0x2261BE02, 0xD642A0C9,
+ 0xDF13A280, 0x74B55BD2, 0x682199C0, 0xD421E5EC,
+ 0x53FB3CE8, 0xC8ADEDB3, 0x28A87FC9, 0x3D959981,
+ 0x5C1FF900, 0xFE38D399, 0x0C4EFF0B, 0x062407EA,
+ 0xAA2F4FB1, 0x4FB96976, 0x90C79505, 0xB0A8A774,
+ 0xEF55A1FF, 0xE59CA2C2, 0xA6B62D27, 0xE66A4263,
+ 0xDF65001F, 0x0EC50966, 0xDFDD55BC, 0x29DE0655,
+ 0x911E739A, 0x17AF8975, 0x32C7911C, 0x89F89468,
+ 0x0D01E980, 0x524755F4, 0x03B63CC9, 0x0CC844B2,
+ 0xBCF3F0AA, 0x87AC36E9, 0xE53A7426, 0x01B3D82B,
+ 0x1A9E7449, 0x64EE2D7E, 0xCDDBB1DA, 0x01C94910,
+ 0xB868BF80, 0x0D26F3FD, 0x9342EDE7, 0x04A5C284,
+ 0x636737B6, 0x50F5B616, 0xF24766E3, 0x8ECA36C1,
+ 0x136E05DB, 0xFEF18391, 0xFB887A37, 0xD6E7F7D4,
+ 0xC7FB7DC9, 0x3063FCDF, 0xB6F589DE, 0xEC2941DA,
+ 0x26E46695, 0xB7566419, 0xF654EFC5, 0xD08D58B7,
+ 0x48925401, 0xC1BACB7F, 0xE5FF550F, 0xB6083049,
+ 0x5BB5D0E8, 0x87D72E5A, 0xAB6A6EE1, 0x223A66CE,
+ 0xC62BF3CD, 0x9E0885F9, 0x68CB3E47, 0x086C010F,
+ 0xA21DE820, 0xD18B69DE, 0xF3F65777, 0xFA02C3F6,
+ 0x407EDAC3, 0xCBB3D550, 0x1793084D, 0xB0D70EBA,
+ 0x0AB378D5, 0xD951FB0C, 0xDED7DA56, 0x4124BBE4,
+ 0x94CA0B56, 0x0F5755D1, 0xE0E1E56E, 0x6184B5BE,
+ 0x580A249F, 0x94F74BC0, 0xE327888E, 0x9F7B5561,
+ 0xC3DC0280, 0x05687715, 0x646C6BD7, 0x44904DB3,
+ 0x66B4F0A3, 0xC0F1648A, 0x697ED5AF, 0x49E92FF6,
+ 0x309E374F, 0x2CB6356A, 0x85808573, 0x4991F840,
+ 0x76F0AE02, 0x083BE84D, 0x28421C9A, 0x44489406,
+ 0x736E4CB8, 0xC1092910, 0x8BC95FC6, 0x7D869CF4,
+ 0x134F616F, 0x2E77118D, 0xB31B2BE1, 0xAA90B472,
+ 0x3CA5D717, 0x7D161BBA, 0x9CAD9010, 0xAF462BA2,
+ 0x9FE459D2, 0x45D34559, 0xD9F2DA13, 0xDBC65487,
+ 0xF3E4F94E, 0x176D486F, 0x097C13EA, 0x631DA5C7,
+ 0x445F7382, 0x175683F4, 0xCDC66A97, 0x70BE0288,
+ 0xB3CDCF72, 0x6E5DD2F3, 0x20936079, 0x459B80A5,
+ 0xBE60E2DB, 0xA9C23101, 0xEBA5315C, 0x224E42F2,
+ 0x1C5C1572, 0xF6721B2C, 0x1AD2FFF3, 0x8C25404E,
+ 0x324ED72F, 0x4067B7FD, 0x0523138E, 0x5CA3BC78,
+ 0xDC0FD66E, 0x75922283, 0x784D6B17, 0x58EBB16E,
+ 0x44094F85, 0x3F481D87, 0xFCFEAE7B, 0x77B5FF76,
+ 0x8C2302BF, 0xAAF47556, 0x5F46B02A, 0x2B092801,
+ 0x3D38F5F7, 0x0CA81F36, 0x52AF4A8A, 0x66D5E7C0,
+ 0xDF3B0874, 0x95055110, 0x1B5AD7A8, 0xF61ED5AD,
+ 0x6CF6E479, 0x20758184, 0xD0CEFA65, 0x88F7BE58,
+ 0x4A046826, 0x0FF6F8F3, 0xA09C7F70, 0x5346ABA0,
+ 0x5CE96C28, 0xE176EDA3, 0x6BAC307F, 0x376829D2,
+ 0x85360FA9, 0x17E3FE2A, 0x24B79767, 0xF5A96B20,
+ 0xD6CD2595, 0x68FF1EBF, 0x7555442C, 0xF19F06BE,
+ 0xF9E0659A, 0xEEB9491D, 0x34010718, 0xBB30CAB8,
+ 0xE822FE15, 0x88570983, 0x750E6249, 0xDA627E55,
+ 0x5E76FFA8, 0xB1534546, 0x6D47DE08, 0xEFE9E7D4
+};
+
+static const u_int32_t cast_sbox6[256] = {
+ 0xF6FA8F9D, 0x2CAC6CE1, 0x4CA34867, 0xE2337F7C,
+ 0x95DB08E7, 0x016843B4, 0xECED5CBC, 0x325553AC,
+ 0xBF9F0960, 0xDFA1E2ED, 0x83F0579D, 0x63ED86B9,
+ 0x1AB6A6B8, 0xDE5EBE39, 0xF38FF732, 0x8989B138,
+ 0x33F14961, 0xC01937BD, 0xF506C6DA, 0xE4625E7E,
+ 0xA308EA99, 0x4E23E33C, 0x79CBD7CC, 0x48A14367,
+ 0xA3149619, 0xFEC94BD5, 0xA114174A, 0xEAA01866,
+ 0xA084DB2D, 0x09A8486F, 0xA888614A, 0x2900AF98,
+ 0x01665991, 0xE1992863, 0xC8F30C60, 0x2E78EF3C,
+ 0xD0D51932, 0xCF0FEC14, 0xF7CA07D2, 0xD0A82072,
+ 0xFD41197E, 0x9305A6B0, 0xE86BE3DA, 0x74BED3CD,
+ 0x372DA53C, 0x4C7F4448, 0xDAB5D440, 0x6DBA0EC3,
+ 0x083919A7, 0x9FBAEED9, 0x49DBCFB0, 0x4E670C53,
+ 0x5C3D9C01, 0x64BDB941, 0x2C0E636A, 0xBA7DD9CD,
+ 0xEA6F7388, 0xE70BC762, 0x35F29ADB, 0x5C4CDD8D,
+ 0xF0D48D8C, 0xB88153E2, 0x08A19866, 0x1AE2EAC8,
+ 0x284CAF89, 0xAA928223, 0x9334BE53, 0x3B3A21BF,
+ 0x16434BE3, 0x9AEA3906, 0xEFE8C36E, 0xF890CDD9,
+ 0x80226DAE, 0xC340A4A3, 0xDF7E9C09, 0xA694A807,
+ 0x5B7C5ECC, 0x221DB3A6, 0x9A69A02F, 0x68818A54,
+ 0xCEB2296F, 0x53C0843A, 0xFE893655, 0x25BFE68A,
+ 0xB4628ABC, 0xCF222EBF, 0x25AC6F48, 0xA9A99387,
+ 0x53BDDB65, 0xE76FFBE7, 0xE967FD78, 0x0BA93563,
+ 0x8E342BC1, 0xE8A11BE9, 0x4980740D, 0xC8087DFC,
+ 0x8DE4BF99, 0xA11101A0, 0x7FD37975, 0xDA5A26C0,
+ 0xE81F994F, 0x9528CD89, 0xFD339FED, 0xB87834BF,
+ 0x5F04456D, 0x22258698, 0xC9C4C83B, 0x2DC156BE,
+ 0x4F628DAA, 0x57F55EC5, 0xE2220ABE, 0xD2916EBF,
+ 0x4EC75B95, 0x24F2C3C0, 0x42D15D99, 0xCD0D7FA0,
+ 0x7B6E27FF, 0xA8DC8AF0, 0x7345C106, 0xF41E232F,
+ 0x35162386, 0xE6EA8926, 0x3333B094, 0x157EC6F2,
+ 0x372B74AF, 0x692573E4, 0xE9A9D848, 0xF3160289,
+ 0x3A62EF1D, 0xA787E238, 0xF3A5F676, 0x74364853,
+ 0x20951063, 0x4576698D, 0xB6FAD407, 0x592AF950,
+ 0x36F73523, 0x4CFB6E87, 0x7DA4CEC0, 0x6C152DAA,
+ 0xCB0396A8, 0xC50DFE5D, 0xFCD707AB, 0x0921C42F,
+ 0x89DFF0BB, 0x5FE2BE78, 0x448F4F33, 0x754613C9,
+ 0x2B05D08D, 0x48B9D585, 0xDC049441, 0xC8098F9B,
+ 0x7DEDE786, 0xC39A3373, 0x42410005, 0x6A091751,
+ 0x0EF3C8A6, 0x890072D6, 0x28207682, 0xA9A9F7BE,
+ 0xBF32679D, 0xD45B5B75, 0xB353FD00, 0xCBB0E358,
+ 0x830F220A, 0x1F8FB214, 0xD372CF08, 0xCC3C4A13,
+ 0x8CF63166, 0x061C87BE, 0x88C98F88, 0x6062E397,
+ 0x47CF8E7A, 0xB6C85283, 0x3CC2ACFB, 0x3FC06976,
+ 0x4E8F0252, 0x64D8314D, 0xDA3870E3, 0x1E665459,
+ 0xC10908F0, 0x513021A5, 0x6C5B68B7, 0x822F8AA0,
+ 0x3007CD3E, 0x74719EEF, 0xDC872681, 0x073340D4,
+ 0x7E432FD9, 0x0C5EC241, 0x8809286C, 0xF592D891,
+ 0x08A930F6, 0x957EF305, 0xB7FBFFBD, 0xC266E96F,
+ 0x6FE4AC98, 0xB173ECC0, 0xBC60B42A, 0x953498DA,
+ 0xFBA1AE12, 0x2D4BD736, 0x0F25FAAB, 0xA4F3FCEB,
+ 0xE2969123, 0x257F0C3D, 0x9348AF49, 0x361400BC,
+ 0xE8816F4A, 0x3814F200, 0xA3F94043, 0x9C7A54C2,
+ 0xBC704F57, 0xDA41E7F9, 0xC25AD33A, 0x54F4A084,
+ 0xB17F5505, 0x59357CBE, 0xEDBD15C8, 0x7F97C5AB,
+ 0xBA5AC7B5, 0xB6F6DEAF, 0x3A479C3A, 0x5302DA25,
+ 0x653D7E6A, 0x54268D49, 0x51A477EA, 0x5017D55B,
+ 0xD7D25D88, 0x44136C76, 0x0404A8C8, 0xB8E5A121,
+ 0xB81A928A, 0x60ED5869, 0x97C55B96, 0xEAEC991B,
+ 0x29935913, 0x01FDB7F1, 0x088E8DFA, 0x9AB6F6F5,
+ 0x3B4CBF9F, 0x4A5DE3AB, 0xE6051D35, 0xA0E1D855,
+ 0xD36B4CF1, 0xF544EDEB, 0xB0E93524, 0xBEBB8FBD,
+ 0xA2D762CF, 0x49C92F54, 0x38B5F331, 0x7128A454,
+ 0x48392905, 0xA65B1DB8, 0x851C97BD, 0xD675CF2F
+};
+
+static const u_int32_t cast_sbox7[256] = {
+ 0x85E04019, 0x332BF567, 0x662DBFFF, 0xCFC65693,
+ 0x2A8D7F6F, 0xAB9BC912, 0xDE6008A1, 0x2028DA1F,
+ 0x0227BCE7, 0x4D642916, 0x18FAC300, 0x50F18B82,
+ 0x2CB2CB11, 0xB232E75C, 0x4B3695F2, 0xB28707DE,
+ 0xA05FBCF6, 0xCD4181E9, 0xE150210C, 0xE24EF1BD,
+ 0xB168C381, 0xFDE4E789, 0x5C79B0D8, 0x1E8BFD43,
+ 0x4D495001, 0x38BE4341, 0x913CEE1D, 0x92A79C3F,
+ 0x089766BE, 0xBAEEADF4, 0x1286BECF, 0xB6EACB19,
+ 0x2660C200, 0x7565BDE4, 0x64241F7A, 0x8248DCA9,
+ 0xC3B3AD66, 0x28136086, 0x0BD8DFA8, 0x356D1CF2,
+ 0x107789BE, 0xB3B2E9CE, 0x0502AA8F, 0x0BC0351E,
+ 0x166BF52A, 0xEB12FF82, 0xE3486911, 0xD34D7516,
+ 0x4E7B3AFF, 0x5F43671B, 0x9CF6E037, 0x4981AC83,
+ 0x334266CE, 0x8C9341B7, 0xD0D854C0, 0xCB3A6C88,
+ 0x47BC2829, 0x4725BA37, 0xA66AD22B, 0x7AD61F1E,
+ 0x0C5CBAFA, 0x4437F107, 0xB6E79962, 0x42D2D816,
+ 0x0A961288, 0xE1A5C06E, 0x13749E67, 0x72FC081A,
+ 0xB1D139F7, 0xF9583745, 0xCF19DF58, 0xBEC3F756,
+ 0xC06EBA30, 0x07211B24, 0x45C28829, 0xC95E317F,
+ 0xBC8EC511, 0x38BC46E9, 0xC6E6FA14, 0xBAE8584A,
+ 0xAD4EBC46, 0x468F508B, 0x7829435F, 0xF124183B,
+ 0x821DBA9F, 0xAFF60FF4, 0xEA2C4E6D, 0x16E39264,
+ 0x92544A8B, 0x009B4FC3, 0xABA68CED, 0x9AC96F78,
+ 0x06A5B79A, 0xB2856E6E, 0x1AEC3CA9, 0xBE838688,
+ 0x0E0804E9, 0x55F1BE56, 0xE7E5363B, 0xB3A1F25D,
+ 0xF7DEBB85, 0x61FE033C, 0x16746233, 0x3C034C28,
+ 0xDA6D0C74, 0x79AAC56C, 0x3CE4E1AD, 0x51F0C802,
+ 0x98F8F35A, 0x1626A49F, 0xEED82B29, 0x1D382FE3,
+ 0x0C4FB99A, 0xBB325778, 0x3EC6D97B, 0x6E77A6A9,
+ 0xCB658B5C, 0xD45230C7, 0x2BD1408B, 0x60C03EB7,
+ 0xB9068D78, 0xA33754F4, 0xF430C87D, 0xC8A71302,
+ 0xB96D8C32, 0xEBD4E7BE, 0xBE8B9D2D, 0x7979FB06,
+ 0xE7225308, 0x8B75CF77, 0x11EF8DA4, 0xE083C858,
+ 0x8D6B786F, 0x5A6317A6, 0xFA5CF7A0, 0x5DDA0033,
+ 0xF28EBFB0, 0xF5B9C310, 0xA0EAC280, 0x08B9767A,
+ 0xA3D9D2B0, 0x79D34217, 0x021A718D, 0x9AC6336A,
+ 0x2711FD60, 0x438050E3, 0x069908A8, 0x3D7FEDC4,
+ 0x826D2BEF, 0x4EEB8476, 0x488DCF25, 0x36C9D566,
+ 0x28E74E41, 0xC2610ACA, 0x3D49A9CF, 0xBAE3B9DF,
+ 0xB65F8DE6, 0x92AEAF64, 0x3AC7D5E6, 0x9EA80509,
+ 0xF22B017D, 0xA4173F70, 0xDD1E16C3, 0x15E0D7F9,
+ 0x50B1B887, 0x2B9F4FD5, 0x625ABA82, 0x6A017962,
+ 0x2EC01B9C, 0x15488AA9, 0xD716E740, 0x40055A2C,
+ 0x93D29A22, 0xE32DBF9A, 0x058745B9, 0x3453DC1E,
+ 0xD699296E, 0x496CFF6F, 0x1C9F4986, 0xDFE2ED07,
+ 0xB87242D1, 0x19DE7EAE, 0x053E561A, 0x15AD6F8C,
+ 0x66626C1C, 0x7154C24C, 0xEA082B2A, 0x93EB2939,
+ 0x17DCB0F0, 0x58D4F2AE, 0x9EA294FB, 0x52CF564C,
+ 0x9883FE66, 0x2EC40581, 0x763953C3, 0x01D6692E,
+ 0xD3A0C108, 0xA1E7160E, 0xE4F2DFA6, 0x693ED285,
+ 0x74904698, 0x4C2B0EDD, 0x4F757656, 0x5D393378,
+ 0xA132234F, 0x3D321C5D, 0xC3F5E194, 0x4B269301,
+ 0xC79F022F, 0x3C997E7E, 0x5E4F9504, 0x3FFAFBBD,
+ 0x76F7AD0E, 0x296693F4, 0x3D1FCE6F, 0xC61E45BE,
+ 0xD3B5AB34, 0xF72BF9B7, 0x1B0434C0, 0x4E72B567,
+ 0x5592A33D, 0xB5229301, 0xCFD2A87F, 0x60AEB767,
+ 0x1814386B, 0x30BCC33D, 0x38A0C07D, 0xFD1606F2,
+ 0xC363519B, 0x589DD390, 0x5479F8E6, 0x1CB8D647,
+ 0x97FD61A9, 0xEA7759F4, 0x2D57539D, 0x569A58CF,
+ 0xE84E63AD, 0x462E1B78, 0x6580F87E, 0xF3817914,
+ 0x91DA55F4, 0x40A230F3, 0xD1988F35, 0xB6E318D2,
+ 0x3FFA50BC, 0x3D40F021, 0xC3C0BDAE, 0x4958C24C,
+ 0x518F36B2, 0x84B1D370, 0x0FEDCE83, 0x878DDADA,
+ 0xF2A279C7, 0x94E01BE8, 0x90716F4B, 0x954B8AA3
+};
+
+static const u_int32_t cast_sbox8[256] = {
+ 0xE216300D, 0xBBDDFFFC, 0xA7EBDABD, 0x35648095,
+ 0x7789F8B7, 0xE6C1121B, 0x0E241600, 0x052CE8B5,
+ 0x11A9CFB0, 0xE5952F11, 0xECE7990A, 0x9386D174,
+ 0x2A42931C, 0x76E38111, 0xB12DEF3A, 0x37DDDDFC,
+ 0xDE9ADEB1, 0x0A0CC32C, 0xBE197029, 0x84A00940,
+ 0xBB243A0F, 0xB4D137CF, 0xB44E79F0, 0x049EEDFD,
+ 0x0B15A15D, 0x480D3168, 0x8BBBDE5A, 0x669DED42,
+ 0xC7ECE831, 0x3F8F95E7, 0x72DF191B, 0x7580330D,
+ 0x94074251, 0x5C7DCDFA, 0xABBE6D63, 0xAA402164,
+ 0xB301D40A, 0x02E7D1CA, 0x53571DAE, 0x7A3182A2,
+ 0x12A8DDEC, 0xFDAA335D, 0x176F43E8, 0x71FB46D4,
+ 0x38129022, 0xCE949AD4, 0xB84769AD, 0x965BD862,
+ 0x82F3D055, 0x66FB9767, 0x15B80B4E, 0x1D5B47A0,
+ 0x4CFDE06F, 0xC28EC4B8, 0x57E8726E, 0x647A78FC,
+ 0x99865D44, 0x608BD593, 0x6C200E03, 0x39DC5FF6,
+ 0x5D0B00A3, 0xAE63AFF2, 0x7E8BD632, 0x70108C0C,
+ 0xBBD35049, 0x2998DF04, 0x980CF42A, 0x9B6DF491,
+ 0x9E7EDD53, 0x06918548, 0x58CB7E07, 0x3B74EF2E,
+ 0x522FFFB1, 0xD24708CC, 0x1C7E27CD, 0xA4EB215B,
+ 0x3CF1D2E2, 0x19B47A38, 0x424F7618, 0x35856039,
+ 0x9D17DEE7, 0x27EB35E6, 0xC9AFF67B, 0x36BAF5B8,
+ 0x09C467CD, 0xC18910B1, 0xE11DBF7B, 0x06CD1AF8,
+ 0x7170C608, 0x2D5E3354, 0xD4DE495A, 0x64C6D006,
+ 0xBCC0C62C, 0x3DD00DB3, 0x708F8F34, 0x77D51B42,
+ 0x264F620F, 0x24B8D2BF, 0x15C1B79E, 0x46A52564,
+ 0xF8D7E54E, 0x3E378160, 0x7895CDA5, 0x859C15A5,
+ 0xE6459788, 0xC37BC75F, 0xDB07BA0C, 0x0676A3AB,
+ 0x7F229B1E, 0x31842E7B, 0x24259FD7, 0xF8BEF472,
+ 0x835FFCB8, 0x6DF4C1F2, 0x96F5B195, 0xFD0AF0FC,
+ 0xB0FE134C, 0xE2506D3D, 0x4F9B12EA, 0xF215F225,
+ 0xA223736F, 0x9FB4C428, 0x25D04979, 0x34C713F8,
+ 0xC4618187, 0xEA7A6E98, 0x7CD16EFC, 0x1436876C,
+ 0xF1544107, 0xBEDEEE14, 0x56E9AF27, 0xA04AA441,
+ 0x3CF7C899, 0x92ECBAE6, 0xDD67016D, 0x151682EB,
+ 0xA842EEDF, 0xFDBA60B4, 0xF1907B75, 0x20E3030F,
+ 0x24D8C29E, 0xE139673B, 0xEFA63FB8, 0x71873054,
+ 0xB6F2CF3B, 0x9F326442, 0xCB15A4CC, 0xB01A4504,
+ 0xF1E47D8D, 0x844A1BE5, 0xBAE7DFDC, 0x42CBDA70,
+ 0xCD7DAE0A, 0x57E85B7A, 0xD53F5AF6, 0x20CF4D8C,
+ 0xCEA4D428, 0x79D130A4, 0x3486EBFB, 0x33D3CDDC,
+ 0x77853B53, 0x37EFFCB5, 0xC5068778, 0xE580B3E6,
+ 0x4E68B8F4, 0xC5C8B37E, 0x0D809EA2, 0x398FEB7C,
+ 0x132A4F94, 0x43B7950E, 0x2FEE7D1C, 0x223613BD,
+ 0xDD06CAA2, 0x37DF932B, 0xC4248289, 0xACF3EBC3,
+ 0x5715F6B7, 0xEF3478DD, 0xF267616F, 0xC148CBE4,
+ 0x9052815E, 0x5E410FAB, 0xB48A2465, 0x2EDA7FA4,
+ 0xE87B40E4, 0xE98EA084, 0x5889E9E1, 0xEFD390FC,
+ 0xDD07D35B, 0xDB485694, 0x38D7E5B2, 0x57720101,
+ 0x730EDEBC, 0x5B643113, 0x94917E4F, 0x503C2FBA,
+ 0x646F1282, 0x7523D24A, 0xE0779695, 0xF9C17A8F,
+ 0x7A5B2121, 0xD187B896, 0x29263A4D, 0xBA510CDF,
+ 0x81F47C9F, 0xAD1163ED, 0xEA7B5965, 0x1A00726E,
+ 0x11403092, 0x00DA6D77, 0x4A0CDD61, 0xAD1F4603,
+ 0x605BDFB0, 0x9EEDC364, 0x22EBE6A8, 0xCEE7D28A,
+ 0xA0E736A0, 0x5564A6B9, 0x10853209, 0xC7EB8F37,
+ 0x2DE705CA, 0x8951570F, 0xDF09822B, 0xBD691A6C,
+ 0xAA12E4F2, 0x87451C0F, 0xE0F6A27A, 0x3ADA4819,
+ 0x4CF1764F, 0x0D771C2B, 0x67CDB156, 0x350D8384,
+ 0x5938FA0F, 0x42399EF3, 0x36997B07, 0x0E84093D,
+ 0x4AA93E61, 0x8360D87B, 0x1FA98B0C, 0x1149382C,
+ 0xE97625A5, 0x0614D1B7, 0x0E25244B, 0x0C768347,
+ 0x589E8D82, 0x0D2059D1, 0xA466BB1E, 0xF8DA0A82,
+ 0x04F19130, 0xBA6E4EC0, 0x99265164, 0x1EE7230D,
+ 0x50B2AD80, 0xEAEE6801, 0x8DB2A283, 0xEA8BF59E
+};
+
diff --git a/rtems/freebsd/opencrypto/criov.c b/rtems/freebsd/opencrypto/criov.c
new file mode 100644
index 00000000..5c6fa7a2
--- /dev/null
+++ b/rtems/freebsd/opencrypto/criov.c
@@ -0,0 +1,200 @@
+#include <rtems/freebsd/machine/rtems-bsd-config.h>
+
+/* $OpenBSD: criov.c,v 1.9 2002/01/29 15:48:29 jason Exp $ */
+
+/*-
+ * Copyright (c) 1999 Theo de Raadt
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <rtems/freebsd/sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <rtems/freebsd/sys/param.h>
+#include <rtems/freebsd/sys/systm.h>
+#include <rtems/freebsd/sys/proc.h>
+#include <rtems/freebsd/sys/errno.h>
+#include <rtems/freebsd/sys/malloc.h>
+#include <rtems/freebsd/sys/kernel.h>
+#include <rtems/freebsd/sys/mbuf.h>
+#include <rtems/freebsd/sys/uio.h>
+
+#include <rtems/freebsd/opencrypto/cryptodev.h>
+
+/*
+ * This macro is only for avoiding code duplication, as we need to skip
+ * given number of bytes in the same way in three functions below.
+ */
+#define CUIO_SKIP() do { \
+ KASSERT(off >= 0, ("%s: off %d < 0", __func__, off)); \
+ KASSERT(len >= 0, ("%s: len %d < 0", __func__, len)); \
+ while (off > 0) { \
+ KASSERT(iol >= 0, ("%s: empty in skip", __func__)); \
+ if (off < iov->iov_len) \
+ break; \
+ off -= iov->iov_len; \
+ iol--; \
+ iov++; \
+ } \
+} while (0)
+
+void
+cuio_copydata(struct uio* uio, int off, int len, caddr_t cp)
+{
+ struct iovec *iov = uio->uio_iov;
+ int iol = uio->uio_iovcnt;
+ unsigned count;
+
+ CUIO_SKIP();
+ while (len > 0) {
+ KASSERT(iol >= 0, ("%s: empty", __func__));
+ count = min(iov->iov_len - off, len);
+ bcopy(((caddr_t)iov->iov_base) + off, cp, count);
+ len -= count;
+ cp += count;
+ off = 0;
+ iol--;
+ iov++;
+ }
+}
+
+void
+cuio_copyback(struct uio* uio, int off, int len, caddr_t cp)
+{
+ struct iovec *iov = uio->uio_iov;
+ int iol = uio->uio_iovcnt;
+ unsigned count;
+
+ CUIO_SKIP();
+ while (len > 0) {
+ KASSERT(iol >= 0, ("%s: empty", __func__));
+ count = min(iov->iov_len - off, len);
+ bcopy(cp, ((caddr_t)iov->iov_base) + off, count);
+ len -= count;
+ cp += count;
+ off = 0;
+ iol--;
+ iov++;
+ }
+}
+
+/*
+ * Return a pointer to iov/offset of location in iovec list.
+ */
+struct iovec *
+cuio_getptr(struct uio *uio, int loc, int *off)
+{
+ struct iovec *iov = uio->uio_iov;
+ int iol = uio->uio_iovcnt;
+
+ while (loc >= 0) {
+ /* Normal end of search */
+ if (loc < iov->iov_len) {
+ *off = loc;
+ return (iov);
+ }
+
+ loc -= iov->iov_len;
+ if (iol == 0) {
+ if (loc == 0) {
+ /* Point at the end of valid data */
+ *off = iov->iov_len;
+ return (iov);
+ } else
+ return (NULL);
+ } else {
+ iov++, iol--;
+ }
+ }
+
+ return (NULL);
+}
+
+/*
+ * Apply function f to the data in an iovec list starting "off" bytes from
+ * the beginning, continuing for "len" bytes.
+ */
+int
+cuio_apply(struct uio *uio, int off, int len, int (*f)(void *, void *, u_int),
+ void *arg)
+{
+ struct iovec *iov = uio->uio_iov;
+ int iol = uio->uio_iovcnt;
+ unsigned count;
+ int rval;
+
+ CUIO_SKIP();
+ while (len > 0) {
+ KASSERT(iol >= 0, ("%s: empty", __func__));
+ count = min(iov->iov_len - off, len);
+ rval = (*f)(arg, ((caddr_t)iov->iov_base) + off, count);
+ if (rval)
+ return (rval);
+ len -= count;
+ off = 0;
+ iol--;
+ iov++;
+ }
+ return (0);
+}
+
+void
+crypto_copyback(int flags, caddr_t buf, int off, int size, caddr_t in)
+{
+
+ if ((flags & CRYPTO_F_IMBUF) != 0)
+ m_copyback((struct mbuf *)buf, off, size, in);
+ else if ((flags & CRYPTO_F_IOV) != 0)
+ cuio_copyback((struct uio *)buf, off, size, in);
+ else
+ bcopy(in, buf + off, size);
+}
+
+void
+crypto_copydata(int flags, caddr_t buf, int off, int size, caddr_t out)
+{
+
+ if ((flags & CRYPTO_F_IMBUF) != 0)
+ m_copydata((struct mbuf *)buf, off, size, out);
+ else if ((flags & CRYPTO_F_IOV) != 0)
+ cuio_copydata((struct uio *)buf, off, size, out);
+ else
+ bcopy(buf + off, out, size);
+}
+
+int
+crypto_apply(int flags, caddr_t buf, int off, int len,
+ int (*f)(void *, void *, u_int), void *arg)
+{
+ int error;
+
+ if ((flags & CRYPTO_F_IMBUF) != 0)
+ error = m_apply((struct mbuf *)buf, off, len, f, arg);
+ else if ((flags & CRYPTO_F_IOV) != 0)
+ error = cuio_apply((struct uio *)buf, off, len, f, arg);
+ else
+ error = (*f)(arg, buf + off, len);
+ return (error);
+}
diff --git a/rtems/freebsd/opencrypto/crypto.c b/rtems/freebsd/opencrypto/crypto.c
new file mode 100644
index 00000000..67852b6b
--- /dev/null
+++ b/rtems/freebsd/opencrypto/crypto.c
@@ -0,0 +1,1578 @@
+#include <rtems/freebsd/machine/rtems-bsd-config.h>
+
+/*-
+ * Copyright (c) 2002-2006 Sam Leffler. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <rtems/freebsd/sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+/*
+ * Cryptographic Subsystem.
+ *
+ * This code is derived from the Openbsd Cryptographic Framework (OCF)
+ * that has the copyright shown below. Very little of the original
+ * code remains.
+ */
+
+/*-
+ * The author of this code is Angelos D. Keromytis (angelos@cis.upenn.edu)
+ *
+ * This code was written by Angelos D. Keromytis in Athens, Greece, in
+ * February 2000. Network Security Technologies Inc. (NSTI) kindly
+ * supported the development of this code.
+ *
+ * Copyright (c) 2000, 2001 Angelos D. Keromytis
+ *
+ * Permission to use, copy, and modify this software with or without fee
+ * is hereby granted, provided that this entire notice is included in
+ * all source code copies of any software which is or includes a copy or
+ * modification of this software.
+ *
+ * THIS SOFTWARE IS BEING PROVIDED "AS IS", WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTY. IN PARTICULAR, NONE OF THE AUTHORS MAKES ANY
+ * REPRESENTATION OR WARRANTY OF ANY KIND CONCERNING THE
+ * MERCHANTABILITY OF THIS SOFTWARE OR ITS FITNESS FOR ANY PARTICULAR
+ * PURPOSE.
+ */
+
+#define CRYPTO_TIMING /* enable timing support */
+
+#include <rtems/freebsd/local/opt_ddb.h>
+#include <rtems/freebsd/local/opt_kdtrace.h>
+
+#include <rtems/freebsd/sys/param.h>
+#include <rtems/freebsd/sys/systm.h>
+#include <rtems/freebsd/sys/eventhandler.h>
+#include <rtems/freebsd/sys/kernel.h>
+#include <rtems/freebsd/sys/kthread.h>
+#include <rtems/freebsd/sys/lock.h>
+#include <rtems/freebsd/sys/module.h>
+#include <rtems/freebsd/sys/mutex.h>
+#include <rtems/freebsd/sys/malloc.h>
+#include <rtems/freebsd/sys/proc.h>
+#include <rtems/freebsd/sys/sdt.h>
+#include <rtems/freebsd/sys/sysctl.h>
+
+#include <rtems/freebsd/ddb/ddb.h>
+
+#include <rtems/freebsd/vm/uma.h>
+#include <rtems/freebsd/opencrypto/cryptodev.h>
+#include <rtems/freebsd/opencrypto/xform.h> /* XXX for M_XDATA */
+
+#include <rtems/freebsd/sys/kobj.h>
+#include <rtems/freebsd/sys/bus.h>
+#include <rtems/freebsd/local/cryptodev_if.h>
+
+#if defined(__i386__) || defined(__amd64__)
+#include <rtems/freebsd/machine/pcb.h>
+#endif
+
+SDT_PROVIDER_DEFINE(opencrypto);
+
+/*
+ * Crypto drivers register themselves by allocating a slot in the
+ * crypto_drivers table with crypto_get_driverid() and then registering
+ * each algorithm they support with crypto_register() and crypto_kregister().
+ */
+static struct mtx crypto_drivers_mtx; /* lock on driver table */
+#define CRYPTO_DRIVER_LOCK() mtx_lock(&crypto_drivers_mtx)
+#define CRYPTO_DRIVER_UNLOCK() mtx_unlock(&crypto_drivers_mtx)
+#define CRYPTO_DRIVER_ASSERT() mtx_assert(&crypto_drivers_mtx, MA_OWNED)
+
+/*
+ * Crypto device/driver capabilities structure.
+ *
+ * Synchronization:
+ * (d) - protected by CRYPTO_DRIVER_LOCK()
+ * (q) - protected by CRYPTO_Q_LOCK()
+ * Not tagged fields are read-only.
+ */
+struct cryptocap {
+ device_t cc_dev; /* (d) device/driver */
+ u_int32_t cc_sessions; /* (d) # of sessions */
+ u_int32_t cc_koperations; /* (d) # os asym operations */
+ /*
+ * Largest possible operator length (in bits) for each type of
+ * encryption algorithm. XXX not used
+ */
+ u_int16_t cc_max_op_len[CRYPTO_ALGORITHM_MAX + 1];
+ u_int8_t cc_alg[CRYPTO_ALGORITHM_MAX + 1];
+ u_int8_t cc_kalg[CRK_ALGORITHM_MAX + 1];
+
+ int cc_flags; /* (d) flags */
+#define CRYPTOCAP_F_CLEANUP 0x80000000 /* needs resource cleanup */
+ int cc_qblocked; /* (q) symmetric q blocked */
+ int cc_kqblocked; /* (q) asymmetric q blocked */
+};
+static struct cryptocap *crypto_drivers = NULL;
+static int crypto_drivers_num = 0;
+
+/*
+ * There are two queues for crypto requests; one for symmetric (e.g.
+ * cipher) operations and one for asymmetric (e.g. MOD)operations.
+ * A single mutex is used to lock access to both queues. We could
+ * have one per-queue but having one simplifies handling of block/unblock
+ * operations.
+ */
+static int crp_sleep = 0;
+static TAILQ_HEAD(,cryptop) crp_q; /* request queues */
+static TAILQ_HEAD(,cryptkop) crp_kq;
+static struct mtx crypto_q_mtx;
+#define CRYPTO_Q_LOCK() mtx_lock(&crypto_q_mtx)
+#define CRYPTO_Q_UNLOCK() mtx_unlock(&crypto_q_mtx)
+
+/*
+ * There are two queues for processing completed crypto requests; one
+ * for the symmetric and one for the asymmetric ops. We only need one
+ * but have two to avoid type futzing (cryptop vs. cryptkop). A single
+ * mutex is used to lock access to both queues. Note that this lock
+ * must be separate from the lock on request queues to insure driver
+ * callbacks don't generate lock order reversals.
+ */
+static TAILQ_HEAD(,cryptop) crp_ret_q; /* callback queues */
+static TAILQ_HEAD(,cryptkop) crp_ret_kq;
+static struct mtx crypto_ret_q_mtx;
+#define CRYPTO_RETQ_LOCK() mtx_lock(&crypto_ret_q_mtx)
+#define CRYPTO_RETQ_UNLOCK() mtx_unlock(&crypto_ret_q_mtx)
+#define CRYPTO_RETQ_EMPTY() (TAILQ_EMPTY(&crp_ret_q) && TAILQ_EMPTY(&crp_ret_kq))
+
+static uma_zone_t cryptop_zone;
+static uma_zone_t cryptodesc_zone;
+
+int crypto_userasymcrypto = 1; /* userland may do asym crypto reqs */
+SYSCTL_INT(_kern, OID_AUTO, userasymcrypto, CTLFLAG_RW,
+ &crypto_userasymcrypto, 0,
+ "Enable/disable user-mode access to asymmetric crypto support");
+int crypto_devallowsoft = 0; /* only use hardware crypto for asym */
+SYSCTL_INT(_kern, OID_AUTO, cryptodevallowsoft, CTLFLAG_RW,
+ &crypto_devallowsoft, 0,
+ "Enable/disable use of software asym crypto support");
+
+MALLOC_DEFINE(M_CRYPTO_DATA, "crypto", "crypto session records");
+
+static void crypto_proc(void);
+static struct proc *cryptoproc;
+static void crypto_ret_proc(void);
+static struct proc *cryptoretproc;
+static void crypto_destroy(void);
+static int crypto_invoke(struct cryptocap *cap, struct cryptop *crp, int hint);
+static int crypto_kinvoke(struct cryptkop *krp, int flags);
+
+static struct cryptostats cryptostats;
+SYSCTL_STRUCT(_kern, OID_AUTO, crypto_stats, CTLFLAG_RW, &cryptostats,
+ cryptostats, "Crypto system statistics");
+
+#ifdef CRYPTO_TIMING
+static int crypto_timing = 0;
+SYSCTL_INT(_debug, OID_AUTO, crypto_timing, CTLFLAG_RW,
+ &crypto_timing, 0, "Enable/disable crypto timing support");
+#endif
+
+static int
+crypto_init(void)
+{
+ int error;
+
+ mtx_init(&crypto_drivers_mtx, "crypto", "crypto driver table",
+ MTX_DEF|MTX_QUIET);
+
+ TAILQ_INIT(&crp_q);
+ TAILQ_INIT(&crp_kq);
+ mtx_init(&crypto_q_mtx, "crypto", "crypto op queues", MTX_DEF);
+
+ TAILQ_INIT(&crp_ret_q);
+ TAILQ_INIT(&crp_ret_kq);
+ mtx_init(&crypto_ret_q_mtx, "crypto", "crypto return queues", MTX_DEF);
+
+ cryptop_zone = uma_zcreate("cryptop", sizeof (struct cryptop),
+ 0, 0, 0, 0,
+ UMA_ALIGN_PTR, UMA_ZONE_ZINIT);
+ cryptodesc_zone = uma_zcreate("cryptodesc", sizeof (struct cryptodesc),
+ 0, 0, 0, 0,
+ UMA_ALIGN_PTR, UMA_ZONE_ZINIT);
+ if (cryptodesc_zone == NULL || cryptop_zone == NULL) {
+ printf("crypto_init: cannot setup crypto zones\n");
+ error = ENOMEM;
+ goto bad;
+ }
+
+ crypto_drivers_num = CRYPTO_DRIVERS_INITIAL;
+ crypto_drivers = malloc(crypto_drivers_num *
+ sizeof(struct cryptocap), M_CRYPTO_DATA, M_NOWAIT | M_ZERO);
+ if (crypto_drivers == NULL) {
+ printf("crypto_init: cannot setup crypto drivers\n");
+ error = ENOMEM;
+ goto bad;
+ }
+
+ error = kproc_create((void (*)(void *)) crypto_proc, NULL,
+ &cryptoproc, 0, 0, "crypto");
+ if (error) {
+ printf("crypto_init: cannot start crypto thread; error %d",
+ error);
+ goto bad;
+ }
+
+ error = kproc_create((void (*)(void *)) crypto_ret_proc, NULL,
+ &cryptoretproc, 0, 0, "crypto returns");
+ if (error) {
+ printf("crypto_init: cannot start cryptoret thread; error %d",
+ error);
+ goto bad;
+ }
+ return 0;
+bad:
+ crypto_destroy();
+ return error;
+}
+
+/*
+ * Signal a crypto thread to terminate. We use the driver
+ * table lock to synchronize the sleep/wakeups so that we
+ * are sure the threads have terminated before we release
+ * the data structures they use. See crypto_finis below
+ * for the other half of this song-and-dance.
+ */
+static void
+crypto_terminate(struct proc **pp, void *q)
+{
+ struct proc *p;
+
+ mtx_assert(&crypto_drivers_mtx, MA_OWNED);
+ p = *pp;
+ *pp = NULL;
+ if (p) {
+ wakeup_one(q);
+ PROC_LOCK(p); /* NB: insure we don't miss wakeup */
+ CRYPTO_DRIVER_UNLOCK(); /* let crypto_finis progress */
+ msleep(p, &p->p_mtx, PWAIT, "crypto_destroy", 0);
+ PROC_UNLOCK(p);
+ CRYPTO_DRIVER_LOCK();
+ }
+}
+
+static void
+crypto_destroy(void)
+{
+ /*
+ * Terminate any crypto threads.
+ */
+ CRYPTO_DRIVER_LOCK();
+ crypto_terminate(&cryptoproc, &crp_q);
+ crypto_terminate(&cryptoretproc, &crp_ret_q);
+ CRYPTO_DRIVER_UNLOCK();
+
+ /* XXX flush queues??? */
+
+ /*
+ * Reclaim dynamically allocated resources.
+ */
+ if (crypto_drivers != NULL)
+ free(crypto_drivers, M_CRYPTO_DATA);
+
+ if (cryptodesc_zone != NULL)
+ uma_zdestroy(cryptodesc_zone);
+ if (cryptop_zone != NULL)
+ uma_zdestroy(cryptop_zone);
+ mtx_destroy(&crypto_q_mtx);
+ mtx_destroy(&crypto_ret_q_mtx);
+ mtx_destroy(&crypto_drivers_mtx);
+}
+
+static struct cryptocap *
+crypto_checkdriver(u_int32_t hid)
+{
+ if (crypto_drivers == NULL)
+ return NULL;
+ return (hid >= crypto_drivers_num ? NULL : &crypto_drivers[hid]);
+}
+
+/*
+ * Compare a driver's list of supported algorithms against another
+ * list; return non-zero if all algorithms are supported.
+ */
+static int
+driver_suitable(const struct cryptocap *cap, const struct cryptoini *cri)
+{
+ const struct cryptoini *cr;
+
+ /* See if all the algorithms are supported. */
+ for (cr = cri; cr; cr = cr->cri_next)
+ if (cap->cc_alg[cr->cri_alg] == 0)
+ return 0;
+ return 1;
+}
+
+/*
+ * Select a driver for a new session that supports the specified
+ * algorithms and, optionally, is constrained according to the flags.
+ * The algorithm we use here is pretty stupid; just use the
+ * first driver that supports all the algorithms we need. If there
+ * are multiple drivers we choose the driver with the fewest active
+ * sessions. We prefer hardware-backed drivers to software ones.
+ *
+ * XXX We need more smarts here (in real life too, but that's
+ * XXX another story altogether).
+ */
+static struct cryptocap *
+crypto_select_driver(const struct cryptoini *cri, int flags)
+{
+ struct cryptocap *cap, *best;
+ int match, hid;
+
+ CRYPTO_DRIVER_ASSERT();
+
+ /*
+ * Look first for hardware crypto devices if permitted.
+ */
+ if (flags & CRYPTOCAP_F_HARDWARE)
+ match = CRYPTOCAP_F_HARDWARE;
+ else
+ match = CRYPTOCAP_F_SOFTWARE;
+ best = NULL;
+again:
+ for (hid = 0; hid < crypto_drivers_num; hid++) {
+ cap = &crypto_drivers[hid];
+ /*
+ * If it's not initialized, is in the process of
+ * going away, or is not appropriate (hardware
+ * or software based on match), then skip.
+ */
+ if (cap->cc_dev == NULL ||
+ (cap->cc_flags & CRYPTOCAP_F_CLEANUP) ||
+ (cap->cc_flags & match) == 0)
+ continue;
+
+ /* verify all the algorithms are supported. */
+ if (driver_suitable(cap, cri)) {
+ if (best == NULL ||
+ cap->cc_sessions < best->cc_sessions)
+ best = cap;
+ }
+ }
+ if (best != NULL)
+ return best;
+ if (match == CRYPTOCAP_F_HARDWARE && (flags & CRYPTOCAP_F_SOFTWARE)) {
+ /* sort of an Algol 68-style for loop */
+ match = CRYPTOCAP_F_SOFTWARE;
+ goto again;
+ }
+ return best;
+}
+
+/*
+ * Create a new session. The crid argument specifies a crypto
+ * driver to use or constraints on a driver to select (hardware
+ * only, software only, either). Whatever driver is selected
+ * must be capable of the requested crypto algorithms.
+ */
+int
+crypto_newsession(u_int64_t *sid, struct cryptoini *cri, int crid)
+{
+ struct cryptocap *cap;
+ u_int32_t hid, lid;
+ int err;
+
+ CRYPTO_DRIVER_LOCK();
+ if ((crid & (CRYPTOCAP_F_HARDWARE | CRYPTOCAP_F_SOFTWARE)) == 0) {
+ /*
+ * Use specified driver; verify it is capable.
+ */
+ cap = crypto_checkdriver(crid);
+ if (cap != NULL && !driver_suitable(cap, cri))
+ cap = NULL;
+ } else {
+ /*
+ * No requested driver; select based on crid flags.
+ */
+ cap = crypto_select_driver(cri, crid);
+ /*
+ * if NULL then can't do everything in one session.
+ * XXX Fix this. We need to inject a "virtual" session
+ * XXX layer right about here.
+ */
+ }
+ if (cap != NULL) {
+ /* Call the driver initialization routine. */
+ hid = cap - crypto_drivers;
+ lid = hid; /* Pass the driver ID. */
+ err = CRYPTODEV_NEWSESSION(cap->cc_dev, &lid, cri);
+ if (err == 0) {
+ (*sid) = (cap->cc_flags & 0xff000000)
+ | (hid & 0x00ffffff);
+ (*sid) <<= 32;
+ (*sid) |= (lid & 0xffffffff);
+ cap->cc_sessions++;
+ }
+ } else
+ err = EINVAL;
+ CRYPTO_DRIVER_UNLOCK();
+ return err;
+}
+
+static void
+crypto_remove(struct cryptocap *cap)
+{
+
+ mtx_assert(&crypto_drivers_mtx, MA_OWNED);
+ if (cap->cc_sessions == 0 && cap->cc_koperations == 0)
+ bzero(cap, sizeof(*cap));
+}
+
+/*
+ * Delete an existing session (or a reserved session on an unregistered
+ * driver).
+ */
+int
+crypto_freesession(u_int64_t sid)
+{
+ struct cryptocap *cap;
+ u_int32_t hid;
+ int err;
+
+ CRYPTO_DRIVER_LOCK();
+
+ if (crypto_drivers == NULL) {
+ err = EINVAL;
+ goto done;
+ }
+
+ /* Determine two IDs. */
+ hid = CRYPTO_SESID2HID(sid);
+
+ if (hid >= crypto_drivers_num) {
+ err = ENOENT;
+ goto done;
+ }
+ cap = &crypto_drivers[hid];
+
+ if (cap->cc_sessions)
+ cap->cc_sessions--;
+
+ /* Call the driver cleanup routine, if available. */
+ err = CRYPTODEV_FREESESSION(cap->cc_dev, sid);
+
+ if (cap->cc_flags & CRYPTOCAP_F_CLEANUP)
+ crypto_remove(cap);
+
+done:
+ CRYPTO_DRIVER_UNLOCK();
+ return err;
+}
+
+/*
+ * Return an unused driver id. Used by drivers prior to registering
+ * support for the algorithms they handle.
+ */
+int32_t
+crypto_get_driverid(device_t dev, int flags)
+{
+ struct cryptocap *newdrv;
+ int i;
+
+ if ((flags & (CRYPTOCAP_F_HARDWARE | CRYPTOCAP_F_SOFTWARE)) == 0) {
+ printf("%s: no flags specified when registering driver\n",
+ device_get_nameunit(dev));
+ return -1;
+ }
+
+ CRYPTO_DRIVER_LOCK();
+
+ for (i = 0; i < crypto_drivers_num; i++) {
+ if (crypto_drivers[i].cc_dev == NULL &&
+ (crypto_drivers[i].cc_flags & CRYPTOCAP_F_CLEANUP) == 0) {
+ break;
+ }
+ }
+
+ /* Out of entries, allocate some more. */
+ if (i == crypto_drivers_num) {
+ /* Be careful about wrap-around. */
+ if (2 * crypto_drivers_num <= crypto_drivers_num) {
+ CRYPTO_DRIVER_UNLOCK();
+ printf("crypto: driver count wraparound!\n");
+ return -1;
+ }
+
+ newdrv = malloc(2 * crypto_drivers_num *
+ sizeof(struct cryptocap), M_CRYPTO_DATA, M_NOWAIT|M_ZERO);
+ if (newdrv == NULL) {
+ CRYPTO_DRIVER_UNLOCK();
+ printf("crypto: no space to expand driver table!\n");
+ return -1;
+ }
+
+ bcopy(crypto_drivers, newdrv,
+ crypto_drivers_num * sizeof(struct cryptocap));
+
+ crypto_drivers_num *= 2;
+
+ free(crypto_drivers, M_CRYPTO_DATA);
+ crypto_drivers = newdrv;
+ }
+
+ /* NB: state is zero'd on free */
+ crypto_drivers[i].cc_sessions = 1; /* Mark */
+ crypto_drivers[i].cc_dev = dev;
+ crypto_drivers[i].cc_flags = flags;
+ if (bootverbose)
+ printf("crypto: assign %s driver id %u, flags %u\n",
+ device_get_nameunit(dev), i, flags);
+
+ CRYPTO_DRIVER_UNLOCK();
+
+ return i;
+}
+
+/*
+ * Lookup a driver by name. We match against the full device
+ * name and unit, and against just the name. The latter gives
+ * us a simple widlcarding by device name. On success return the
+ * driver/hardware identifier; otherwise return -1.
+ */
+int
+crypto_find_driver(const char *match)
+{
+ int i, len = strlen(match);
+
+ CRYPTO_DRIVER_LOCK();
+ for (i = 0; i < crypto_drivers_num; i++) {
+ device_t dev = crypto_drivers[i].cc_dev;
+ if (dev == NULL ||
+ (crypto_drivers[i].cc_flags & CRYPTOCAP_F_CLEANUP))
+ continue;
+ if (strncmp(match, device_get_nameunit(dev), len) == 0 ||
+ strncmp(match, device_get_name(dev), len) == 0)
+ break;
+ }
+ CRYPTO_DRIVER_UNLOCK();
+ return i < crypto_drivers_num ? i : -1;
+}
+
+/*
+ * Return the device_t for the specified driver or NULL
+ * if the driver identifier is invalid.
+ */
+device_t
+crypto_find_device_byhid(int hid)
+{
+ struct cryptocap *cap = crypto_checkdriver(hid);
+ return cap != NULL ? cap->cc_dev : NULL;
+}
+
+/*
+ * Return the device/driver capabilities.
+ */
+int
+crypto_getcaps(int hid)
+{
+ struct cryptocap *cap = crypto_checkdriver(hid);
+ return cap != NULL ? cap->cc_flags : 0;
+}
+
+/*
+ * Register support for a key-related algorithm. This routine
+ * is called once for each algorithm supported a driver.
+ */
+int
+crypto_kregister(u_int32_t driverid, int kalg, u_int32_t flags)
+{
+ struct cryptocap *cap;
+ int err;
+
+ CRYPTO_DRIVER_LOCK();
+
+ cap = crypto_checkdriver(driverid);
+ if (cap != NULL &&
+ (CRK_ALGORITM_MIN <= kalg && kalg <= CRK_ALGORITHM_MAX)) {
+ /*
+ * XXX Do some performance testing to determine placing.
+ * XXX We probably need an auxiliary data structure that
+ * XXX describes relative performances.
+ */
+
+ cap->cc_kalg[kalg] = flags | CRYPTO_ALG_FLAG_SUPPORTED;
+ if (bootverbose)
+ printf("crypto: %s registers key alg %u flags %u\n"
+ , device_get_nameunit(cap->cc_dev)
+ , kalg
+ , flags
+ );
+ err = 0;
+ } else
+ err = EINVAL;
+
+ CRYPTO_DRIVER_UNLOCK();
+ return err;
+}
+
+/*
+ * Register support for a non-key-related algorithm. This routine
+ * is called once for each such algorithm supported by a driver.
+ */
+int
+crypto_register(u_int32_t driverid, int alg, u_int16_t maxoplen,
+ u_int32_t flags)
+{
+ struct cryptocap *cap;
+ int err;
+
+ CRYPTO_DRIVER_LOCK();
+
+ cap = crypto_checkdriver(driverid);
+ /* NB: algorithms are in the range [1..max] */
+ if (cap != NULL &&
+ (CRYPTO_ALGORITHM_MIN <= alg && alg <= CRYPTO_ALGORITHM_MAX)) {
+ /*
+ * XXX Do some performance testing to determine placing.
+ * XXX We probably need an auxiliary data structure that
+ * XXX describes relative performances.
+ */
+
+ cap->cc_alg[alg] = flags | CRYPTO_ALG_FLAG_SUPPORTED;
+ cap->cc_max_op_len[alg] = maxoplen;
+ if (bootverbose)
+ printf("crypto: %s registers alg %u flags %u maxoplen %u\n"
+ , device_get_nameunit(cap->cc_dev)
+ , alg
+ , flags
+ , maxoplen
+ );
+ cap->cc_sessions = 0; /* Unmark */
+ err = 0;
+ } else
+ err = EINVAL;
+
+ CRYPTO_DRIVER_UNLOCK();
+ return err;
+}
+
+static void
+driver_finis(struct cryptocap *cap)
+{
+ u_int32_t ses, kops;
+
+ CRYPTO_DRIVER_ASSERT();
+
+ ses = cap->cc_sessions;
+ kops = cap->cc_koperations;
+ bzero(cap, sizeof(*cap));
+ if (ses != 0 || kops != 0) {
+ /*
+ * If there are pending sessions,
+ * just mark as invalid.
+ */
+ cap->cc_flags |= CRYPTOCAP_F_CLEANUP;
+ cap->cc_sessions = ses;
+ cap->cc_koperations = kops;
+ }
+}
+
+/*
+ * Unregister a crypto driver. If there are pending sessions using it,
+ * leave enough information around so that subsequent calls using those
+ * sessions will correctly detect the driver has been unregistered and
+ * reroute requests.
+ */
+int
+crypto_unregister(u_int32_t driverid, int alg)
+{
+ struct cryptocap *cap;
+ int i, err;
+
+ CRYPTO_DRIVER_LOCK();
+ cap = crypto_checkdriver(driverid);
+ if (cap != NULL &&
+ (CRYPTO_ALGORITHM_MIN <= alg && alg <= CRYPTO_ALGORITHM_MAX) &&
+ cap->cc_alg[alg] != 0) {
+ cap->cc_alg[alg] = 0;
+ cap->cc_max_op_len[alg] = 0;
+
+ /* Was this the last algorithm ? */
+ for (i = 1; i <= CRYPTO_ALGORITHM_MAX; i++)
+ if (cap->cc_alg[i] != 0)
+ break;
+
+ if (i == CRYPTO_ALGORITHM_MAX + 1)
+ driver_finis(cap);
+ err = 0;
+ } else
+ err = EINVAL;
+ CRYPTO_DRIVER_UNLOCK();
+
+ return err;
+}
+
+/*
+ * Unregister all algorithms associated with a crypto driver.
+ * If there are pending sessions using it, leave enough information
+ * around so that subsequent calls using those sessions will
+ * correctly detect the driver has been unregistered and reroute
+ * requests.
+ */
+int
+crypto_unregister_all(u_int32_t driverid)
+{
+ struct cryptocap *cap;
+ int err;
+
+ CRYPTO_DRIVER_LOCK();
+ cap = crypto_checkdriver(driverid);
+ if (cap != NULL) {
+ driver_finis(cap);
+ err = 0;
+ } else
+ err = EINVAL;
+ CRYPTO_DRIVER_UNLOCK();
+
+ return err;
+}
+
+/*
+ * Clear blockage on a driver. The what parameter indicates whether
+ * the driver is now ready for cryptop's and/or cryptokop's.
+ */
+int
+crypto_unblock(u_int32_t driverid, int what)
+{
+ struct cryptocap *cap;
+ int err;
+
+ CRYPTO_Q_LOCK();
+ cap = crypto_checkdriver(driverid);
+ if (cap != NULL) {
+ if (what & CRYPTO_SYMQ)
+ cap->cc_qblocked = 0;
+ if (what & CRYPTO_ASYMQ)
+ cap->cc_kqblocked = 0;
+ if (crp_sleep)
+ wakeup_one(&crp_q);
+ err = 0;
+ } else
+ err = EINVAL;
+ CRYPTO_Q_UNLOCK();
+
+ return err;
+}
+
+/*
+ * Add a crypto request to a queue, to be processed by the kernel thread.
+ */
+int
+crypto_dispatch(struct cryptop *crp)
+{
+ struct cryptocap *cap;
+ u_int32_t hid;
+ int result;
+
+ cryptostats.cs_ops++;
+
+#ifdef CRYPTO_TIMING
+ if (crypto_timing)
+ binuptime(&crp->crp_tstamp);
+#endif
+
+ hid = CRYPTO_SESID2HID(crp->crp_sid);
+
+ if ((crp->crp_flags & CRYPTO_F_BATCH) == 0) {
+ /*
+ * Caller marked the request to be processed
+ * immediately; dispatch it directly to the
+ * driver unless the driver is currently blocked.
+ */
+ cap = crypto_checkdriver(hid);
+ /* Driver cannot disappeared when there is an active session. */
+ KASSERT(cap != NULL, ("%s: Driver disappeared.", __func__));
+ if (!cap->cc_qblocked) {
+ result = crypto_invoke(cap, crp, 0);
+ if (result != ERESTART)
+ return (result);
+ /*
+ * The driver ran out of resources, put the request on
+ * the queue.
+ */
+ }
+ }
+ CRYPTO_Q_LOCK();
+ TAILQ_INSERT_TAIL(&crp_q, crp, crp_next);
+ if (crp_sleep)
+ wakeup_one(&crp_q);
+ CRYPTO_Q_UNLOCK();
+ return 0;
+}
+
+/*
+ * Add an asymetric crypto request to a queue,
+ * to be processed by the kernel thread.
+ */
+int
+crypto_kdispatch(struct cryptkop *krp)
+{
+ int error;
+
+ cryptostats.cs_kops++;
+
+ error = crypto_kinvoke(krp, krp->krp_crid);
+ if (error == ERESTART) {
+ CRYPTO_Q_LOCK();
+ TAILQ_INSERT_TAIL(&crp_kq, krp, krp_next);
+ if (crp_sleep)
+ wakeup_one(&crp_q);
+ CRYPTO_Q_UNLOCK();
+ error = 0;
+ }
+ return error;
+}
+
+/*
+ * Verify a driver is suitable for the specified operation.
+ */
+static __inline int
+kdriver_suitable(const struct cryptocap *cap, const struct cryptkop *krp)
+{
+ return (cap->cc_kalg[krp->krp_op] & CRYPTO_ALG_FLAG_SUPPORTED) != 0;
+}
+
+/*
+ * Select a driver for an asym operation. The driver must
+ * support the necessary algorithm. The caller can constrain
+ * which device is selected with the flags parameter. The
+ * algorithm we use here is pretty stupid; just use the first
+ * driver that supports the algorithms we need. If there are
+ * multiple suitable drivers we choose the driver with the
+ * fewest active operations. We prefer hardware-backed
+ * drivers to software ones when either may be used.
+ */
+static struct cryptocap *
+crypto_select_kdriver(const struct cryptkop *krp, int flags)
+{
+ struct cryptocap *cap, *best, *blocked;
+ int match, hid;
+
+ CRYPTO_DRIVER_ASSERT();
+
+ /*
+ * Look first for hardware crypto devices if permitted.
+ */
+ if (flags & CRYPTOCAP_F_HARDWARE)
+ match = CRYPTOCAP_F_HARDWARE;
+ else
+ match = CRYPTOCAP_F_SOFTWARE;
+ best = NULL;
+ blocked = NULL;
+again:
+ for (hid = 0; hid < crypto_drivers_num; hid++) {
+ cap = &crypto_drivers[hid];
+ /*
+ * If it's not initialized, is in the process of
+ * going away, or is not appropriate (hardware
+ * or software based on match), then skip.
+ */
+ if (cap->cc_dev == NULL ||
+ (cap->cc_flags & CRYPTOCAP_F_CLEANUP) ||
+ (cap->cc_flags & match) == 0)
+ continue;
+
+ /* verify all the algorithms are supported. */
+ if (kdriver_suitable(cap, krp)) {
+ if (best == NULL ||
+ cap->cc_koperations < best->cc_koperations)
+ best = cap;
+ }
+ }
+ if (best != NULL)
+ return best;
+ if (match == CRYPTOCAP_F_HARDWARE && (flags & CRYPTOCAP_F_SOFTWARE)) {
+ /* sort of an Algol 68-style for loop */
+ match = CRYPTOCAP_F_SOFTWARE;
+ goto again;
+ }
+ return best;
+}
+
+/*
+ * Dispatch an assymetric crypto request.
+ */
+static int
+crypto_kinvoke(struct cryptkop *krp, int crid)
+{
+ struct cryptocap *cap = NULL;
+ int error;
+
+ KASSERT(krp != NULL, ("%s: krp == NULL", __func__));
+ KASSERT(krp->krp_callback != NULL,
+ ("%s: krp->crp_callback == NULL", __func__));
+
+ CRYPTO_DRIVER_LOCK();
+ if ((crid & (CRYPTOCAP_F_HARDWARE | CRYPTOCAP_F_SOFTWARE)) == 0) {
+ cap = crypto_checkdriver(crid);
+ if (cap != NULL) {
+ /*
+ * Driver present, it must support the necessary
+ * algorithm and, if s/w drivers are excluded,
+ * it must be registered as hardware-backed.
+ */
+ if (!kdriver_suitable(cap, krp) ||
+ (!crypto_devallowsoft &&
+ (cap->cc_flags & CRYPTOCAP_F_HARDWARE) == 0))
+ cap = NULL;
+ }
+ } else {
+ /*
+ * No requested driver; select based on crid flags.
+ */
+ if (!crypto_devallowsoft) /* NB: disallow s/w drivers */
+ crid &= ~CRYPTOCAP_F_SOFTWARE;
+ cap = crypto_select_kdriver(krp, crid);
+ }
+ if (cap != NULL && !cap->cc_kqblocked) {
+ krp->krp_hid = cap - crypto_drivers;
+ cap->cc_koperations++;
+ CRYPTO_DRIVER_UNLOCK();
+ error = CRYPTODEV_KPROCESS(cap->cc_dev, krp, 0);
+ CRYPTO_DRIVER_LOCK();
+ if (error == ERESTART) {
+ cap->cc_koperations--;
+ CRYPTO_DRIVER_UNLOCK();
+ return (error);
+ }
+ } else {
+ /*
+ * NB: cap is !NULL if device is blocked; in
+ * that case return ERESTART so the operation
+ * is resubmitted if possible.
+ */
+ error = (cap == NULL) ? ENODEV : ERESTART;
+ }
+ CRYPTO_DRIVER_UNLOCK();
+
+ if (error) {
+ krp->krp_status = error;
+ crypto_kdone(krp);
+ }
+ return 0;
+}
+
+#ifdef CRYPTO_TIMING
+static void
+crypto_tstat(struct cryptotstat *ts, struct bintime *bt)
+{
+ struct bintime now, delta;
+ struct timespec t;
+ uint64_t u;
+
+ binuptime(&now);
+ u = now.frac;
+ delta.frac = now.frac - bt->frac;
+ delta.sec = now.sec - bt->sec;
+ if (u < delta.frac)
+ delta.sec--;
+ bintime2timespec(&delta, &t);
+ timespecadd(&ts->acc, &t);
+ if (timespeccmp(&t, &ts->min, <))
+ ts->min = t;
+ if (timespeccmp(&t, &ts->max, >))
+ ts->max = t;
+ ts->count++;
+
+ *bt = now;
+}
+#endif
+
+/*
+ * Dispatch a crypto request to the appropriate crypto devices.
+ */
+static int
+crypto_invoke(struct cryptocap *cap, struct cryptop *crp, int hint)
+{
+
+ KASSERT(crp != NULL, ("%s: crp == NULL", __func__));
+ KASSERT(crp->crp_callback != NULL,
+ ("%s: crp->crp_callback == NULL", __func__));
+ KASSERT(crp->crp_desc != NULL, ("%s: crp->crp_desc == NULL", __func__));
+
+#ifdef CRYPTO_TIMING
+ if (crypto_timing)
+ crypto_tstat(&cryptostats.cs_invoke, &crp->crp_tstamp);
+#endif
+ if (cap->cc_flags & CRYPTOCAP_F_CLEANUP) {
+ struct cryptodesc *crd;
+ u_int64_t nid;
+
+ /*
+ * Driver has unregistered; migrate the session and return
+ * an error to the caller so they'll resubmit the op.
+ *
+ * XXX: What if there are more already queued requests for this
+ * session?
+ */
+ crypto_freesession(crp->crp_sid);
+
+ for (crd = crp->crp_desc; crd->crd_next; crd = crd->crd_next)
+ crd->CRD_INI.cri_next = &(crd->crd_next->CRD_INI);
+
+ /* XXX propagate flags from initial session? */
+ if (crypto_newsession(&nid, &(crp->crp_desc->CRD_INI),
+ CRYPTOCAP_F_HARDWARE | CRYPTOCAP_F_SOFTWARE) == 0)
+ crp->crp_sid = nid;
+
+ crp->crp_etype = EAGAIN;
+ crypto_done(crp);
+ return 0;
+ } else {
+ /*
+ * Invoke the driver to process the request.
+ */
+ return CRYPTODEV_PROCESS(cap->cc_dev, crp, hint);
+ }
+}
+
+/*
+ * Release a set of crypto descriptors.
+ */
+void
+crypto_freereq(struct cryptop *crp)
+{
+ struct cryptodesc *crd;
+
+ if (crp == NULL)
+ return;
+
+#ifdef DIAGNOSTIC
+ {
+ struct cryptop *crp2;
+
+ CRYPTO_Q_LOCK();
+ TAILQ_FOREACH(crp2, &crp_q, crp_next) {
+ KASSERT(crp2 != crp,
+ ("Freeing cryptop from the crypto queue (%p).",
+ crp));
+ }
+ CRYPTO_Q_UNLOCK();
+ CRYPTO_RETQ_LOCK();
+ TAILQ_FOREACH(crp2, &crp_ret_q, crp_next) {
+ KASSERT(crp2 != crp,
+ ("Freeing cryptop from the return queue (%p).",
+ crp));
+ }
+ CRYPTO_RETQ_UNLOCK();
+ }
+#endif
+
+ while ((crd = crp->crp_desc) != NULL) {
+ crp->crp_desc = crd->crd_next;
+ uma_zfree(cryptodesc_zone, crd);
+ }
+ uma_zfree(cryptop_zone, crp);
+}
+
+/*
+ * Acquire a set of crypto descriptors.
+ */
+struct cryptop *
+crypto_getreq(int num)
+{
+ struct cryptodesc *crd;
+ struct cryptop *crp;
+
+ crp = uma_zalloc(cryptop_zone, M_NOWAIT|M_ZERO);
+ if (crp != NULL) {
+ while (num--) {
+ crd = uma_zalloc(cryptodesc_zone, M_NOWAIT|M_ZERO);
+ if (crd == NULL) {
+ crypto_freereq(crp);
+ return NULL;
+ }
+
+ crd->crd_next = crp->crp_desc;
+ crp->crp_desc = crd;
+ }
+ }
+ return crp;
+}
+
+/*
+ * Invoke the callback on behalf of the driver.
+ */
+void
+crypto_done(struct cryptop *crp)
+{
+ KASSERT((crp->crp_flags & CRYPTO_F_DONE) == 0,
+ ("crypto_done: op already done, flags 0x%x", crp->crp_flags));
+ crp->crp_flags |= CRYPTO_F_DONE;
+ if (crp->crp_etype != 0)
+ cryptostats.cs_errs++;
+#ifdef CRYPTO_TIMING
+ if (crypto_timing)
+ crypto_tstat(&cryptostats.cs_done, &crp->crp_tstamp);
+#endif
+ /*
+ * CBIMM means unconditionally do the callback immediately;
+ * CBIFSYNC means do the callback immediately only if the
+ * operation was done synchronously. Both are used to avoid
+ * doing extraneous context switches; the latter is mostly
+ * used with the software crypto driver.
+ */
+ if ((crp->crp_flags & CRYPTO_F_CBIMM) ||
+ ((crp->crp_flags & CRYPTO_F_CBIFSYNC) &&
+ (CRYPTO_SESID2CAPS(crp->crp_sid) & CRYPTOCAP_F_SYNC))) {
+ /*
+ * Do the callback directly. This is ok when the
+ * callback routine does very little (e.g. the
+ * /dev/crypto callback method just does a wakeup).
+ */
+#ifdef CRYPTO_TIMING
+ if (crypto_timing) {
+ /*
+ * NB: We must copy the timestamp before
+ * doing the callback as the cryptop is
+ * likely to be reclaimed.
+ */
+ struct bintime t = crp->crp_tstamp;
+ crypto_tstat(&cryptostats.cs_cb, &t);
+ crp->crp_callback(crp);
+ crypto_tstat(&cryptostats.cs_finis, &t);
+ } else
+#endif
+ crp->crp_callback(crp);
+ } else {
+ /*
+ * Normal case; queue the callback for the thread.
+ */
+ CRYPTO_RETQ_LOCK();
+ if (CRYPTO_RETQ_EMPTY())
+ wakeup_one(&crp_ret_q); /* shared wait channel */
+ TAILQ_INSERT_TAIL(&crp_ret_q, crp, crp_next);
+ CRYPTO_RETQ_UNLOCK();
+ }
+}
+
+/*
+ * Invoke the callback on behalf of the driver.
+ */
+void
+crypto_kdone(struct cryptkop *krp)
+{
+ struct cryptocap *cap;
+
+ if (krp->krp_status != 0)
+ cryptostats.cs_kerrs++;
+ CRYPTO_DRIVER_LOCK();
+ /* XXX: What if driver is loaded in the meantime? */
+ if (krp->krp_hid < crypto_drivers_num) {
+ cap = &crypto_drivers[krp->krp_hid];
+ cap->cc_koperations--;
+ KASSERT(cap->cc_koperations >= 0, ("cc_koperations < 0"));
+ if (cap->cc_flags & CRYPTOCAP_F_CLEANUP)
+ crypto_remove(cap);
+ }
+ CRYPTO_DRIVER_UNLOCK();
+ CRYPTO_RETQ_LOCK();
+ if (CRYPTO_RETQ_EMPTY())
+ wakeup_one(&crp_ret_q); /* shared wait channel */
+ TAILQ_INSERT_TAIL(&crp_ret_kq, krp, krp_next);
+ CRYPTO_RETQ_UNLOCK();
+}
+
+int
+crypto_getfeat(int *featp)
+{
+ int hid, kalg, feat = 0;
+
+ CRYPTO_DRIVER_LOCK();
+ for (hid = 0; hid < crypto_drivers_num; hid++) {
+ const struct cryptocap *cap = &crypto_drivers[hid];
+
+ if ((cap->cc_flags & CRYPTOCAP_F_SOFTWARE) &&
+ !crypto_devallowsoft) {
+ continue;
+ }
+ for (kalg = 0; kalg < CRK_ALGORITHM_MAX; kalg++)
+ if (cap->cc_kalg[kalg] & CRYPTO_ALG_FLAG_SUPPORTED)
+ feat |= 1 << kalg;
+ }
+ CRYPTO_DRIVER_UNLOCK();
+ *featp = feat;
+ return (0);
+}
+
+/*
+ * Terminate a thread at module unload. The process that
+ * initiated this is waiting for us to signal that we're gone;
+ * wake it up and exit. We use the driver table lock to insure
+ * we don't do the wakeup before they're waiting. There is no
+ * race here because the waiter sleeps on the proc lock for the
+ * thread so it gets notified at the right time because of an
+ * extra wakeup that's done in exit1().
+ */
+static void
+crypto_finis(void *chan)
+{
+ CRYPTO_DRIVER_LOCK();
+ wakeup_one(chan);
+ CRYPTO_DRIVER_UNLOCK();
+ kproc_exit(0);
+}
+
+/*
+ * Crypto thread, dispatches crypto requests.
+ */
+static void
+crypto_proc(void)
+{
+ struct cryptop *crp, *submit;
+ struct cryptkop *krp;
+ struct cryptocap *cap;
+ u_int32_t hid;
+ int result, hint;
+
+#if defined(__i386__) || defined(__amd64__)
+ fpu_kern_thread(FPU_KERN_NORMAL);
+#endif
+
+ CRYPTO_Q_LOCK();
+ for (;;) {
+ /*
+ * Find the first element in the queue that can be
+ * processed and look-ahead to see if multiple ops
+ * are ready for the same driver.
+ */
+ submit = NULL;
+ hint = 0;
+ TAILQ_FOREACH(crp, &crp_q, crp_next) {
+ hid = CRYPTO_SESID2HID(crp->crp_sid);
+ cap = crypto_checkdriver(hid);
+ /*
+ * Driver cannot disappeared when there is an active
+ * session.
+ */
+ KASSERT(cap != NULL, ("%s:%u Driver disappeared.",
+ __func__, __LINE__));
+ if (cap == NULL || cap->cc_dev == NULL) {
+ /* Op needs to be migrated, process it. */
+ if (submit == NULL)
+ submit = crp;
+ break;
+ }
+ if (!cap->cc_qblocked) {
+ if (submit != NULL) {
+ /*
+ * We stop on finding another op,
+ * regardless whether its for the same
+ * driver or not. We could keep
+ * searching the queue but it might be
+ * better to just use a per-driver
+ * queue instead.
+ */
+ if (CRYPTO_SESID2HID(submit->crp_sid) == hid)
+ hint = CRYPTO_HINT_MORE;
+ break;
+ } else {
+ submit = crp;
+ if ((submit->crp_flags & CRYPTO_F_BATCH) == 0)
+ break;
+ /* keep scanning for more are q'd */
+ }
+ }
+ }
+ if (submit != NULL) {
+ TAILQ_REMOVE(&crp_q, submit, crp_next);
+ hid = CRYPTO_SESID2HID(submit->crp_sid);
+ cap = crypto_checkdriver(hid);
+ KASSERT(cap != NULL, ("%s:%u Driver disappeared.",
+ __func__, __LINE__));
+ result = crypto_invoke(cap, submit, hint);
+ if (result == ERESTART) {
+ /*
+ * The driver ran out of resources, mark the
+ * driver ``blocked'' for cryptop's and put
+ * the request back in the queue. It would
+ * best to put the request back where we got
+ * it but that's hard so for now we put it
+ * at the front. This should be ok; putting
+ * it at the end does not work.
+ */
+ /* XXX validate sid again? */
+ crypto_drivers[CRYPTO_SESID2HID(submit->crp_sid)].cc_qblocked = 1;
+ TAILQ_INSERT_HEAD(&crp_q, submit, crp_next);
+ cryptostats.cs_blocks++;
+ }
+ }
+
+ /* As above, but for key ops */
+ TAILQ_FOREACH(krp, &crp_kq, krp_next) {
+ cap = crypto_checkdriver(krp->krp_hid);
+ if (cap == NULL || cap->cc_dev == NULL) {
+ /*
+ * Operation needs to be migrated, invalidate
+ * the assigned device so it will reselect a
+ * new one below. Propagate the original
+ * crid selection flags if supplied.
+ */
+ krp->krp_hid = krp->krp_crid &
+ (CRYPTOCAP_F_SOFTWARE|CRYPTOCAP_F_HARDWARE);
+ if (krp->krp_hid == 0)
+ krp->krp_hid =
+ CRYPTOCAP_F_SOFTWARE|CRYPTOCAP_F_HARDWARE;
+ break;
+ }
+ if (!cap->cc_kqblocked)
+ break;
+ }
+ if (krp != NULL) {
+ TAILQ_REMOVE(&crp_kq, krp, krp_next);
+ result = crypto_kinvoke(krp, krp->krp_hid);
+ if (result == ERESTART) {
+ /*
+ * The driver ran out of resources, mark the
+ * driver ``blocked'' for cryptkop's and put
+ * the request back in the queue. It would
+ * best to put the request back where we got
+ * it but that's hard so for now we put it
+ * at the front. This should be ok; putting
+ * it at the end does not work.
+ */
+ /* XXX validate sid again? */
+ crypto_drivers[krp->krp_hid].cc_kqblocked = 1;
+ TAILQ_INSERT_HEAD(&crp_kq, krp, krp_next);
+ cryptostats.cs_kblocks++;
+ }
+ }
+
+ if (submit == NULL && krp == NULL) {
+ /*
+ * Nothing more to be processed. Sleep until we're
+ * woken because there are more ops to process.
+ * This happens either by submission or by a driver
+ * becoming unblocked and notifying us through
+ * crypto_unblock. Note that when we wakeup we
+ * start processing each queue again from the
+ * front. It's not clear that it's important to
+ * preserve this ordering since ops may finish
+ * out of order if dispatched to different devices
+ * and some become blocked while others do not.
+ */
+ crp_sleep = 1;
+ msleep(&crp_q, &crypto_q_mtx, PWAIT, "crypto_wait", 0);
+ crp_sleep = 0;
+ if (cryptoproc == NULL)
+ break;
+ cryptostats.cs_intrs++;
+ }
+ }
+ CRYPTO_Q_UNLOCK();
+
+ crypto_finis(&crp_q);
+}
+
+/*
+ * Crypto returns thread, does callbacks for processed crypto requests.
+ * Callbacks are done here, rather than in the crypto drivers, because
+ * callbacks typically are expensive and would slow interrupt handling.
+ */
+static void
+crypto_ret_proc(void)
+{
+ struct cryptop *crpt;
+ struct cryptkop *krpt;
+
+ CRYPTO_RETQ_LOCK();
+ for (;;) {
+ /* Harvest return q's for completed ops */
+ crpt = TAILQ_FIRST(&crp_ret_q);
+ if (crpt != NULL)
+ TAILQ_REMOVE(&crp_ret_q, crpt, crp_next);
+
+ krpt = TAILQ_FIRST(&crp_ret_kq);
+ if (krpt != NULL)
+ TAILQ_REMOVE(&crp_ret_kq, krpt, krp_next);
+
+ if (crpt != NULL || krpt != NULL) {
+ CRYPTO_RETQ_UNLOCK();
+ /*
+ * Run callbacks unlocked.
+ */
+ if (crpt != NULL) {
+#ifdef CRYPTO_TIMING
+ if (crypto_timing) {
+ /*
+ * NB: We must copy the timestamp before
+ * doing the callback as the cryptop is
+ * likely to be reclaimed.
+ */
+ struct bintime t = crpt->crp_tstamp;
+ crypto_tstat(&cryptostats.cs_cb, &t);
+ crpt->crp_callback(crpt);
+ crypto_tstat(&cryptostats.cs_finis, &t);
+ } else
+#endif
+ crpt->crp_callback(crpt);
+ }
+ if (krpt != NULL)
+ krpt->krp_callback(krpt);
+ CRYPTO_RETQ_LOCK();
+ } else {
+ /*
+ * Nothing more to be processed. Sleep until we're
+ * woken because there are more returns to process.
+ */
+ msleep(&crp_ret_q, &crypto_ret_q_mtx, PWAIT,
+ "crypto_ret_wait", 0);
+ if (cryptoretproc == NULL)
+ break;
+ cryptostats.cs_rets++;
+ }
+ }
+ CRYPTO_RETQ_UNLOCK();
+
+ crypto_finis(&crp_ret_q);
+}
+
+#ifdef DDB
+static void
+db_show_drivers(void)
+{
+ int hid;
+
+ db_printf("%12s %4s %4s %8s %2s %2s\n"
+ , "Device"
+ , "Ses"
+ , "Kops"
+ , "Flags"
+ , "QB"
+ , "KB"
+ );
+ for (hid = 0; hid < crypto_drivers_num; hid++) {
+ const struct cryptocap *cap = &crypto_drivers[hid];
+ if (cap->cc_dev == NULL)
+ continue;
+ db_printf("%-12s %4u %4u %08x %2u %2u\n"
+ , device_get_nameunit(cap->cc_dev)
+ , cap->cc_sessions
+ , cap->cc_koperations
+ , cap->cc_flags
+ , cap->cc_qblocked
+ , cap->cc_kqblocked
+ );
+ }
+}
+
+DB_SHOW_COMMAND(crypto, db_show_crypto)
+{
+ struct cryptop *crp;
+
+ db_show_drivers();
+ db_printf("\n");
+
+ db_printf("%4s %8s %4s %4s %4s %4s %8s %8s\n",
+ "HID", "Caps", "Ilen", "Olen", "Etype", "Flags",
+ "Desc", "Callback");
+ TAILQ_FOREACH(crp, &crp_q, crp_next) {
+ db_printf("%4u %08x %4u %4u %4u %04x %8p %8p\n"
+ , (int) CRYPTO_SESID2HID(crp->crp_sid)
+ , (int) CRYPTO_SESID2CAPS(crp->crp_sid)
+ , crp->crp_ilen, crp->crp_olen
+ , crp->crp_etype
+ , crp->crp_flags
+ , crp->crp_desc
+ , crp->crp_callback
+ );
+ }
+ if (!TAILQ_EMPTY(&crp_ret_q)) {
+ db_printf("\n%4s %4s %4s %8s\n",
+ "HID", "Etype", "Flags", "Callback");
+ TAILQ_FOREACH(crp, &crp_ret_q, crp_next) {
+ db_printf("%4u %4u %04x %8p\n"
+ , (int) CRYPTO_SESID2HID(crp->crp_sid)
+ , crp->crp_etype
+ , crp->crp_flags
+ , crp->crp_callback
+ );
+ }
+ }
+}
+
+DB_SHOW_COMMAND(kcrypto, db_show_kcrypto)
+{
+ struct cryptkop *krp;
+
+ db_show_drivers();
+ db_printf("\n");
+
+ db_printf("%4s %5s %4s %4s %8s %4s %8s\n",
+ "Op", "Status", "#IP", "#OP", "CRID", "HID", "Callback");
+ TAILQ_FOREACH(krp, &crp_kq, krp_next) {
+ db_printf("%4u %5u %4u %4u %08x %4u %8p\n"
+ , krp->krp_op
+ , krp->krp_status
+ , krp->krp_iparams, krp->krp_oparams
+ , krp->krp_crid, krp->krp_hid
+ , krp->krp_callback
+ );
+ }
+ if (!TAILQ_EMPTY(&crp_ret_q)) {
+ db_printf("%4s %5s %8s %4s %8s\n",
+ "Op", "Status", "CRID", "HID", "Callback");
+ TAILQ_FOREACH(krp, &crp_ret_kq, krp_next) {
+ db_printf("%4u %5u %08x %4u %8p\n"
+ , krp->krp_op
+ , krp->krp_status
+ , krp->krp_crid, krp->krp_hid
+ , krp->krp_callback
+ );
+ }
+ }
+}
+#endif
+
+int crypto_modevent(module_t mod, int type, void *unused);
+
+/*
+ * Initialization code, both for static and dynamic loading.
+ * Note this is not invoked with the usual MODULE_DECLARE
+ * mechanism but instead is listed as a dependency by the
+ * cryptosoft driver. This guarantees proper ordering of
+ * calls on module load/unload.
+ */
+int
+crypto_modevent(module_t mod, int type, void *unused)
+{
+ int error = EINVAL;
+
+ switch (type) {
+ case MOD_LOAD:
+ error = crypto_init();
+ if (error == 0 && bootverbose)
+ printf("crypto: <crypto core>\n");
+ break;
+ case MOD_UNLOAD:
+ /*XXX disallow if active sessions */
+ error = 0;
+ crypto_destroy();
+ return 0;
+ }
+ return error;
+}
+MODULE_VERSION(crypto, 1);
+MODULE_DEPEND(crypto, zlib, 1, 1, 1);
diff --git a/rtems/freebsd/opencrypto/cryptodev.c b/rtems/freebsd/opencrypto/cryptodev.c
new file mode 100644
index 00000000..842688d8
--- /dev/null
+++ b/rtems/freebsd/opencrypto/cryptodev.c
@@ -0,0 +1,1178 @@
+#include <rtems/freebsd/machine/rtems-bsd-config.h>
+
+/* $OpenBSD: cryptodev.c,v 1.52 2002/06/19 07:22:46 deraadt Exp $ */
+
+/*-
+ * Copyright (c) 2001 Theo de Raadt
+ * Copyright (c) 2002-2006 Sam Leffler, Errno Consulting
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * Effort sponsored in part by the Defense Advanced Research Projects
+ * Agency (DARPA) and Air Force Research Laboratory, Air Force
+ * Materiel Command, USAF, under agreement number F30602-01-2-0537.
+ */
+
+#include <rtems/freebsd/sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <rtems/freebsd/local/opt_compat.h>
+
+#include <rtems/freebsd/sys/param.h>
+#include <rtems/freebsd/sys/systm.h>
+#include <rtems/freebsd/sys/malloc.h>
+#include <rtems/freebsd/sys/mbuf.h>
+#include <rtems/freebsd/sys/lock.h>
+#include <rtems/freebsd/sys/mutex.h>
+#include <rtems/freebsd/sys/sysctl.h>
+#include <rtems/freebsd/sys/file.h>
+#include <rtems/freebsd/sys/filedesc.h>
+#include <rtems/freebsd/sys/errno.h>
+#include <rtems/freebsd/sys/uio.h>
+#include <rtems/freebsd/sys/random.h>
+#include <rtems/freebsd/sys/conf.h>
+#include <rtems/freebsd/sys/kernel.h>
+#include <rtems/freebsd/sys/module.h>
+#include <rtems/freebsd/sys/fcntl.h>
+#include <rtems/freebsd/sys/bus.h>
+
+#include <rtems/freebsd/opencrypto/cryptodev.h>
+#include <rtems/freebsd/opencrypto/xform.h>
+
+#ifdef COMPAT_FREEBSD32
+#include <rtems/freebsd/sys/mount.h>
+#include <rtems/freebsd/compat/freebsd32/freebsd32.h>
+
+struct session_op32 {
+ u_int32_t cipher;
+ u_int32_t mac;
+ u_int32_t keylen;
+ u_int32_t key;
+ int mackeylen;
+ u_int32_t mackey;
+ u_int32_t ses;
+};
+
+struct session2_op32 {
+ u_int32_t cipher;
+ u_int32_t mac;
+ u_int32_t keylen;
+ u_int32_t key;
+ int mackeylen;
+ u_int32_t mackey;
+ u_int32_t ses;
+ int crid;
+ int pad[4];
+};
+
+struct crypt_op32 {
+ u_int32_t ses;
+ u_int16_t op;
+ u_int16_t flags;
+ u_int len;
+ u_int32_t src, dst;
+ u_int32_t mac;
+ u_int32_t iv;
+};
+
+struct crparam32 {
+ u_int32_t crp_p;
+ u_int crp_nbits;
+};
+
+struct crypt_kop32 {
+ u_int crk_op;
+ u_int crk_status;
+ u_short crk_iparams;
+ u_short crk_oparams;
+ u_int crk_crid;
+ struct crparam32 crk_param[CRK_MAXPARAM];
+};
+
+struct cryptotstat32 {
+ struct timespec32 acc;
+ struct timespec32 min;
+ struct timespec32 max;
+ u_int32_t count;
+};
+
+struct cryptostats32 {
+ u_int32_t cs_ops;
+ u_int32_t cs_errs;
+ u_int32_t cs_kops;
+ u_int32_t cs_kerrs;
+ u_int32_t cs_intrs;
+ u_int32_t cs_rets;
+ u_int32_t cs_blocks;
+ u_int32_t cs_kblocks;
+ struct cryptotstat32 cs_invoke;
+ struct cryptotstat32 cs_done;
+ struct cryptotstat32 cs_cb;
+ struct cryptotstat32 cs_finis;
+};
+
+#define CIOCGSESSION32 _IOWR('c', 101, struct session_op32)
+#define CIOCCRYPT32 _IOWR('c', 103, struct crypt_op32)
+#define CIOCKEY32 _IOWR('c', 104, struct crypt_kop32)
+#define CIOCGSESSION232 _IOWR('c', 106, struct session2_op32)
+#define CIOCKEY232 _IOWR('c', 107, struct crypt_kop32)
+
+static void
+session_op_from_32(const struct session_op32 *from, struct session_op *to)
+{
+
+ CP(*from, *to, cipher);
+ CP(*from, *to, mac);
+ CP(*from, *to, keylen);
+ PTRIN_CP(*from, *to, key);
+ CP(*from, *to, mackeylen);
+ PTRIN_CP(*from, *to, mackey);
+ CP(*from, *to, ses);
+}
+
+static void
+session2_op_from_32(const struct session2_op32 *from, struct session2_op *to)
+{
+
+ session_op_from_32((const struct session_op32 *)from,
+ (struct session_op *)to);
+ CP(*from, *to, crid);
+}
+
+static void
+session_op_to_32(const struct session_op *from, struct session_op32 *to)
+{
+
+ CP(*from, *to, cipher);
+ CP(*from, *to, mac);
+ CP(*from, *to, keylen);
+ PTROUT_CP(*from, *to, key);
+ CP(*from, *to, mackeylen);
+ PTROUT_CP(*from, *to, mackey);
+ CP(*from, *to, ses);
+}
+
+static void
+session2_op_to_32(const struct session2_op *from, struct session2_op32 *to)
+{
+
+ session_op_to_32((const struct session_op *)from,
+ (struct session_op32 *)to);
+ CP(*from, *to, crid);
+}
+
+static void
+crypt_op_from_32(const struct crypt_op32 *from, struct crypt_op *to)
+{
+
+ CP(*from, *to, ses);
+ CP(*from, *to, op);
+ CP(*from, *to, flags);
+ CP(*from, *to, len);
+ PTRIN_CP(*from, *to, src);
+ PTRIN_CP(*from, *to, dst);
+ PTRIN_CP(*from, *to, mac);
+ PTRIN_CP(*from, *to, iv);
+}
+
+static void
+crypt_op_to_32(const struct crypt_op *from, struct crypt_op32 *to)
+{
+
+ CP(*from, *to, ses);
+ CP(*from, *to, op);
+ CP(*from, *to, flags);
+ CP(*from, *to, len);
+ PTROUT_CP(*from, *to, src);
+ PTROUT_CP(*from, *to, dst);
+ PTROUT_CP(*from, *to, mac);
+ PTROUT_CP(*from, *to, iv);
+}
+
+static void
+crparam_from_32(const struct crparam32 *from, struct crparam *to)
+{
+
+ PTRIN_CP(*from, *to, crp_p);
+ CP(*from, *to, crp_nbits);
+}
+
+static void
+crparam_to_32(const struct crparam *from, struct crparam32 *to)
+{
+
+ PTROUT_CP(*from, *to, crp_p);
+ CP(*from, *to, crp_nbits);
+}
+
+static void
+crypt_kop_from_32(const struct crypt_kop32 *from, struct crypt_kop *to)
+{
+ int i;
+
+ CP(*from, *to, crk_op);
+ CP(*from, *to, crk_status);
+ CP(*from, *to, crk_iparams);
+ CP(*from, *to, crk_oparams);
+ CP(*from, *to, crk_crid);
+ for (i = 0; i < CRK_MAXPARAM; i++)
+ crparam_from_32(&from->crk_param[i], &to->crk_param[i]);
+}
+
+static void
+crypt_kop_to_32(const struct crypt_kop *from, struct crypt_kop32 *to)
+{
+ int i;
+
+ CP(*from, *to, crk_op);
+ CP(*from, *to, crk_status);
+ CP(*from, *to, crk_iparams);
+ CP(*from, *to, crk_oparams);
+ CP(*from, *to, crk_crid);
+ for (i = 0; i < CRK_MAXPARAM; i++)
+ crparam_to_32(&from->crk_param[i], &to->crk_param[i]);
+}
+#endif
+
+struct csession {
+ TAILQ_ENTRY(csession) next;
+ u_int64_t sid;
+ u_int32_t ses;
+ struct mtx lock; /* for op submission */
+
+ u_int32_t cipher;
+ struct enc_xform *txform;
+ u_int32_t mac;
+ struct auth_hash *thash;
+
+ caddr_t key;
+ int keylen;
+ u_char tmp_iv[EALG_MAX_BLOCK_LEN];
+
+ caddr_t mackey;
+ int mackeylen;
+
+ struct iovec iovec;
+ struct uio uio;
+ int error;
+};
+
+struct fcrypt {
+ TAILQ_HEAD(csessionlist, csession) csessions;
+ int sesn;
+};
+
+static int cryptof_rw(struct file *fp, struct uio *uio,
+ struct ucred *cred, int flags, struct thread *);
+static int cryptof_truncate(struct file *, off_t, struct ucred *,
+ struct thread *);
+static int cryptof_ioctl(struct file *, u_long, void *,
+ struct ucred *, struct thread *);
+static int cryptof_poll(struct file *, int, struct ucred *, struct thread *);
+static int cryptof_kqfilter(struct file *, struct knote *);
+static int cryptof_stat(struct file *, struct stat *,
+ struct ucred *, struct thread *);
+static int cryptof_close(struct file *, struct thread *);
+
+static struct fileops cryptofops = {
+ .fo_read = cryptof_rw,
+ .fo_write = cryptof_rw,
+ .fo_truncate = cryptof_truncate,
+ .fo_ioctl = cryptof_ioctl,
+ .fo_poll = cryptof_poll,
+ .fo_kqfilter = cryptof_kqfilter,
+ .fo_stat = cryptof_stat,
+ .fo_close = cryptof_close
+};
+
+static struct csession *csefind(struct fcrypt *, u_int);
+static int csedelete(struct fcrypt *, struct csession *);
+static struct csession *cseadd(struct fcrypt *, struct csession *);
+static struct csession *csecreate(struct fcrypt *, u_int64_t, caddr_t,
+ u_int64_t, caddr_t, u_int64_t, u_int32_t, u_int32_t, struct enc_xform *,
+ struct auth_hash *);
+static int csefree(struct csession *);
+
+static int cryptodev_op(struct csession *, struct crypt_op *,
+ struct ucred *, struct thread *td);
+static int cryptodev_key(struct crypt_kop *);
+static int cryptodev_find(struct crypt_find_op *);
+
+static int
+cryptof_rw(
+ struct file *fp,
+ struct uio *uio,
+ struct ucred *active_cred,
+ int flags,
+ struct thread *td)
+{
+
+ return (EIO);
+}
+
+static int
+cryptof_truncate(
+ struct file *fp,
+ off_t length,
+ struct ucred *active_cred,
+ struct thread *td)
+{
+
+ return (EINVAL);
+}
+
+/*
+ * Check a crypto identifier to see if it requested
+ * a software device/driver. This can be done either
+ * by device name/class or through search constraints.
+ */
+static int
+checkforsoftware(int crid)
+{
+ if (crid & CRYPTOCAP_F_SOFTWARE)
+ return EINVAL; /* XXX */
+ if ((crid & CRYPTOCAP_F_HARDWARE) == 0 &&
+ (crypto_getcaps(crid) & CRYPTOCAP_F_HARDWARE) == 0)
+ return EINVAL; /* XXX */
+ return 0;
+}
+
+/* ARGSUSED */
+static int
+cryptof_ioctl(
+ struct file *fp,
+ u_long cmd,
+ void *data,
+ struct ucred *active_cred,
+ struct thread *td)
+{
+#define SES2(p) ((struct session2_op *)p)
+ struct cryptoini cria, crie;
+ struct fcrypt *fcr = fp->f_data;
+ struct csession *cse;
+ struct session_op *sop;
+ struct crypt_op *cop;
+ struct enc_xform *txform = NULL;
+ struct auth_hash *thash = NULL;
+ struct crypt_kop *kop;
+ u_int64_t sid;
+ u_int32_t ses;
+ int error = 0, crid;
+#ifdef COMPAT_FREEBSD32
+ struct session2_op sopc;
+ struct crypt_op copc;
+ struct crypt_kop kopc;
+#endif
+
+ switch (cmd) {
+ case CIOCGSESSION:
+ case CIOCGSESSION2:
+#ifdef COMPAT_FREEBSD32
+ case CIOCGSESSION32:
+ case CIOCGSESSION232:
+ if (cmd == CIOCGSESSION32) {
+ session_op_from_32(data, (struct session_op *)&sopc);
+ sop = (struct session_op *)&sopc;
+ } else if (cmd == CIOCGSESSION232) {
+ session2_op_from_32(data, &sopc);
+ sop = (struct session_op *)&sopc;
+ } else
+#endif
+ sop = (struct session_op *)data;
+ switch (sop->cipher) {
+ case 0:
+ break;
+ case CRYPTO_DES_CBC:
+ txform = &enc_xform_des;
+ break;
+ case CRYPTO_3DES_CBC:
+ txform = &enc_xform_3des;
+ break;
+ case CRYPTO_BLF_CBC:
+ txform = &enc_xform_blf;
+ break;
+ case CRYPTO_CAST_CBC:
+ txform = &enc_xform_cast5;
+ break;
+ case CRYPTO_SKIPJACK_CBC:
+ txform = &enc_xform_skipjack;
+ break;
+ case CRYPTO_AES_CBC:
+ txform = &enc_xform_rijndael128;
+ break;
+ case CRYPTO_AES_XTS:
+ txform = &enc_xform_aes_xts;
+ break;
+ case CRYPTO_NULL_CBC:
+ txform = &enc_xform_null;
+ break;
+ case CRYPTO_ARC4:
+ txform = &enc_xform_arc4;
+ break;
+ case CRYPTO_CAMELLIA_CBC:
+ txform = &enc_xform_camellia;
+ break;
+ default:
+ return (EINVAL);
+ }
+
+ switch (sop->mac) {
+ case 0:
+ break;
+ case CRYPTO_MD5_HMAC:
+ thash = &auth_hash_hmac_md5;
+ break;
+ case CRYPTO_SHA1_HMAC:
+ thash = &auth_hash_hmac_sha1;
+ break;
+ case CRYPTO_SHA2_256_HMAC:
+ thash = &auth_hash_hmac_sha2_256;
+ break;
+ case CRYPTO_SHA2_384_HMAC:
+ thash = &auth_hash_hmac_sha2_384;
+ break;
+ case CRYPTO_SHA2_512_HMAC:
+ thash = &auth_hash_hmac_sha2_512;
+ break;
+ case CRYPTO_RIPEMD160_HMAC:
+ thash = &auth_hash_hmac_ripemd_160;
+ break;
+#ifdef notdef
+ case CRYPTO_MD5:
+ thash = &auth_hash_md5;
+ break;
+ case CRYPTO_SHA1:
+ thash = &auth_hash_sha1;
+ break;
+#endif
+ case CRYPTO_NULL_HMAC:
+ thash = &auth_hash_null;
+ break;
+ default:
+ return (EINVAL);
+ }
+
+ bzero(&crie, sizeof(crie));
+ bzero(&cria, sizeof(cria));
+
+ if (txform) {
+ crie.cri_alg = txform->type;
+ crie.cri_klen = sop->keylen * 8;
+ if (sop->keylen > txform->maxkey ||
+ sop->keylen < txform->minkey) {
+ error = EINVAL;
+ goto bail;
+ }
+
+ crie.cri_key = malloc(crie.cri_klen / 8,
+ M_XDATA, M_WAITOK);
+ if ((error = copyin(sop->key, crie.cri_key,
+ crie.cri_klen / 8)))
+ goto bail;
+ if (thash)
+ crie.cri_next = &cria;
+ }
+
+ if (thash) {
+ cria.cri_alg = thash->type;
+ cria.cri_klen = sop->mackeylen * 8;
+ if (sop->mackeylen != thash->keysize) {
+ error = EINVAL;
+ goto bail;
+ }
+
+ if (cria.cri_klen) {
+ cria.cri_key = malloc(cria.cri_klen / 8,
+ M_XDATA, M_WAITOK);
+ if ((error = copyin(sop->mackey, cria.cri_key,
+ cria.cri_klen / 8)))
+ goto bail;
+ }
+ }
+
+ /* NB: CIOGSESSION2 has the crid */
+ if (cmd == CIOCGSESSION2
+#ifdef COMPAT_FREEBSD32
+ || cmd == CIOCGSESSION232
+#endif
+ ) {
+ crid = SES2(sop)->crid;
+ error = checkforsoftware(crid);
+ if (error)
+ goto bail;
+ } else
+ crid = CRYPTOCAP_F_HARDWARE;
+ error = crypto_newsession(&sid, (txform ? &crie : &cria), crid);
+ if (error)
+ goto bail;
+
+ cse = csecreate(fcr, sid, crie.cri_key, crie.cri_klen,
+ cria.cri_key, cria.cri_klen, sop->cipher, sop->mac, txform,
+ thash);
+
+ if (cse == NULL) {
+ crypto_freesession(sid);
+ error = EINVAL;
+ goto bail;
+ }
+ sop->ses = cse->ses;
+ if (cmd == CIOCGSESSION2
+#ifdef COMPAT_FREEBSD32
+ || cmd == CIOCGSESSION232
+#endif
+ ) {
+ /* return hardware/driver id */
+ SES2(sop)->crid = CRYPTO_SESID2HID(cse->sid);
+ }
+bail:
+ if (error) {
+ if (crie.cri_key)
+ free(crie.cri_key, M_XDATA);
+ if (cria.cri_key)
+ free(cria.cri_key, M_XDATA);
+ }
+#ifdef COMPAT_FREEBSD32
+ else {
+ if (cmd == CIOCGSESSION32)
+ session_op_to_32(sop, data);
+ else if (cmd == CIOCGSESSION232)
+ session2_op_to_32((struct session2_op *)sop,
+ data);
+ }
+#endif
+ break;
+ case CIOCFSESSION:
+ ses = *(u_int32_t *)data;
+ cse = csefind(fcr, ses);
+ if (cse == NULL)
+ return (EINVAL);
+ csedelete(fcr, cse);
+ error = csefree(cse);
+ break;
+ case CIOCCRYPT:
+#ifdef COMPAT_FREEBSD32
+ case CIOCCRYPT32:
+ if (cmd == CIOCCRYPT32) {
+ cop = &copc;
+ crypt_op_from_32(data, cop);
+ } else
+#endif
+ cop = (struct crypt_op *)data;
+ cse = csefind(fcr, cop->ses);
+ if (cse == NULL)
+ return (EINVAL);
+ error = cryptodev_op(cse, cop, active_cred, td);
+#ifdef COMPAT_FREEBSD32
+ if (error == 0 && cmd == CIOCCRYPT32)
+ crypt_op_to_32(cop, data);
+#endif
+ break;
+ case CIOCKEY:
+ case CIOCKEY2:
+#ifdef COMPAT_FREEBSD32
+ case CIOCKEY32:
+ case CIOCKEY232:
+#endif
+ if (!crypto_userasymcrypto)
+ return (EPERM); /* XXX compat? */
+#ifdef COMPAT_FREEBSD32
+ if (cmd == CIOCKEY32 || cmd == CIOCKEY232) {
+ kop = &kopc;
+ crypt_kop_from_32(data, kop);
+ } else
+#endif
+ kop = (struct crypt_kop *)data;
+ if (cmd == CIOCKEY
+#ifdef COMPAT_FREEBSD32
+ || cmd == CIOCKEY32
+#endif
+ ) {
+ /* NB: crypto core enforces s/w driver use */
+ kop->crk_crid =
+ CRYPTOCAP_F_HARDWARE | CRYPTOCAP_F_SOFTWARE;
+ }
+ mtx_lock(&Giant);
+ error = cryptodev_key(kop);
+ mtx_unlock(&Giant);
+#ifdef COMPAT_FREEBSD32
+ if (cmd == CIOCKEY32 || cmd == CIOCKEY232)
+ crypt_kop_to_32(kop, data);
+#endif
+ break;
+ case CIOCASYMFEAT:
+ if (!crypto_userasymcrypto) {
+ /*
+ * NB: if user asym crypto operations are
+ * not permitted return "no algorithms"
+ * so well-behaved applications will just
+ * fallback to doing them in software.
+ */
+ *(int *)data = 0;
+ } else
+ error = crypto_getfeat((int *)data);
+ break;
+ case CIOCFINDDEV:
+ error = cryptodev_find((struct crypt_find_op *)data);
+ break;
+ default:
+ error = EINVAL;
+ break;
+ }
+ return (error);
+#undef SES2
+}
+
+static int cryptodev_cb(void *);
+
+
+static int
+cryptodev_op(
+ struct csession *cse,
+ struct crypt_op *cop,
+ struct ucred *active_cred,
+ struct thread *td)
+{
+ struct cryptop *crp = NULL;
+ struct cryptodesc *crde = NULL, *crda = NULL;
+ int error;
+
+ if (cop->len > 256*1024-4)
+ return (E2BIG);
+
+ if (cse->txform) {
+ if (cop->len == 0 || (cop->len % cse->txform->blocksize) != 0)
+ return (EINVAL);
+ }
+
+ cse->uio.uio_iov = &cse->iovec;
+ cse->uio.uio_iovcnt = 1;
+ cse->uio.uio_offset = 0;
+ cse->uio.uio_resid = cop->len;
+ cse->uio.uio_segflg = UIO_SYSSPACE;
+ cse->uio.uio_rw = UIO_WRITE;
+ cse->uio.uio_td = td;
+ cse->uio.uio_iov[0].iov_len = cop->len;
+ if (cse->thash) {
+ cse->uio.uio_iov[0].iov_len += cse->thash->hashsize;
+ cse->uio.uio_resid += cse->thash->hashsize;
+ }
+ cse->uio.uio_iov[0].iov_base = malloc(cse->uio.uio_iov[0].iov_len,
+ M_XDATA, M_WAITOK);
+
+ crp = crypto_getreq((cse->txform != NULL) + (cse->thash != NULL));
+ if (crp == NULL) {
+ error = ENOMEM;
+ goto bail;
+ }
+
+ if (cse->thash) {
+ crda = crp->crp_desc;
+ if (cse->txform)
+ crde = crda->crd_next;
+ } else {
+ if (cse->txform)
+ crde = crp->crp_desc;
+ else {
+ error = EINVAL;
+ goto bail;
+ }
+ }
+
+ if ((error = copyin(cop->src, cse->uio.uio_iov[0].iov_base, cop->len)))
+ goto bail;
+
+ if (crda) {
+ crda->crd_skip = 0;
+ crda->crd_len = cop->len;
+ crda->crd_inject = cop->len;
+
+ crda->crd_alg = cse->mac;
+ crda->crd_key = cse->mackey;
+ crda->crd_klen = cse->mackeylen * 8;
+ }
+
+ if (crde) {
+ if (cop->op == COP_ENCRYPT)
+ crde->crd_flags |= CRD_F_ENCRYPT;
+ else
+ crde->crd_flags &= ~CRD_F_ENCRYPT;
+ crde->crd_len = cop->len;
+ crde->crd_inject = 0;
+
+ crde->crd_alg = cse->cipher;
+ crde->crd_key = cse->key;
+ crde->crd_klen = cse->keylen * 8;
+ }
+
+ crp->crp_ilen = cop->len;
+ crp->crp_flags = CRYPTO_F_IOV | CRYPTO_F_CBIMM
+ | (cop->flags & COP_F_BATCH);
+ crp->crp_buf = (caddr_t)&cse->uio;
+ crp->crp_callback = (int (*) (struct cryptop *)) cryptodev_cb;
+ crp->crp_sid = cse->sid;
+ crp->crp_opaque = (void *)cse;
+
+ if (cop->iv) {
+ if (crde == NULL) {
+ error = EINVAL;
+ goto bail;
+ }
+ if (cse->cipher == CRYPTO_ARC4) { /* XXX use flag? */
+ error = EINVAL;
+ goto bail;
+ }
+ if ((error = copyin(cop->iv, cse->tmp_iv, cse->txform->blocksize)))
+ goto bail;
+ bcopy(cse->tmp_iv, crde->crd_iv, cse->txform->blocksize);
+ crde->crd_flags |= CRD_F_IV_EXPLICIT | CRD_F_IV_PRESENT;
+ crde->crd_skip = 0;
+ } else if (cse->cipher == CRYPTO_ARC4) { /* XXX use flag? */
+ crde->crd_skip = 0;
+ } else if (crde) {
+ crde->crd_flags |= CRD_F_IV_PRESENT;
+ crde->crd_skip = cse->txform->blocksize;
+ crde->crd_len -= cse->txform->blocksize;
+ }
+
+ if (cop->mac && crda == NULL) {
+ error = EINVAL;
+ goto bail;
+ }
+
+ /*
+ * Let the dispatch run unlocked, then, interlock against the
+ * callback before checking if the operation completed and going
+ * to sleep. This insures drivers don't inherit our lock which
+ * results in a lock order reversal between crypto_dispatch forced
+ * entry and the crypto_done callback into us.
+ */
+ error = crypto_dispatch(crp);
+ mtx_lock(&cse->lock);
+ if (error == 0 && (crp->crp_flags & CRYPTO_F_DONE) == 0)
+ error = msleep(crp, &cse->lock, PWAIT, "crydev", 0);
+ mtx_unlock(&cse->lock);
+
+ if (error != 0)
+ goto bail;
+
+ if (crp->crp_etype != 0) {
+ error = crp->crp_etype;
+ goto bail;
+ }
+
+ if (cse->error) {
+ error = cse->error;
+ goto bail;
+ }
+
+ if (cop->dst &&
+ (error = copyout(cse->uio.uio_iov[0].iov_base, cop->dst, cop->len)))
+ goto bail;
+
+ if (cop->mac &&
+ (error = copyout((caddr_t)cse->uio.uio_iov[0].iov_base + cop->len,
+ cop->mac, cse->thash->hashsize)))
+ goto bail;
+
+bail:
+ if (crp)
+ crypto_freereq(crp);
+ if (cse->uio.uio_iov[0].iov_base)
+ free(cse->uio.uio_iov[0].iov_base, M_XDATA);
+
+ return (error);
+}
+
+static int
+cryptodev_cb(void *op)
+{
+ struct cryptop *crp = (struct cryptop *) op;
+ struct csession *cse = (struct csession *)crp->crp_opaque;
+ int error;
+
+ error = crp->crp_etype;
+ if (error == EAGAIN)
+ error = crypto_dispatch(crp);
+ mtx_lock(&cse->lock);
+ if (error != 0 || (crp->crp_flags & CRYPTO_F_DONE)) {
+ cse->error = error;
+ wakeup_one(crp);
+ }
+ mtx_unlock(&cse->lock);
+ return (0);
+}
+
+static int
+cryptodevkey_cb(void *op)
+{
+ struct cryptkop *krp = (struct cryptkop *) op;
+
+ wakeup_one(krp);
+ return (0);
+}
+
+static int
+cryptodev_key(struct crypt_kop *kop)
+{
+ struct cryptkop *krp = NULL;
+ int error = EINVAL;
+ int in, out, size, i;
+
+ if (kop->crk_iparams + kop->crk_oparams > CRK_MAXPARAM) {
+ return (EFBIG);
+ }
+
+ in = kop->crk_iparams;
+ out = kop->crk_oparams;
+ switch (kop->crk_op) {
+ case CRK_MOD_EXP:
+ if (in == 3 && out == 1)
+ break;
+ return (EINVAL);
+ case CRK_MOD_EXP_CRT:
+ if (in == 6 && out == 1)
+ break;
+ return (EINVAL);
+ case CRK_DSA_SIGN:
+ if (in == 5 && out == 2)
+ break;
+ return (EINVAL);
+ case CRK_DSA_VERIFY:
+ if (in == 7 && out == 0)
+ break;
+ return (EINVAL);
+ case CRK_DH_COMPUTE_KEY:
+ if (in == 3 && out == 1)
+ break;
+ return (EINVAL);
+ default:
+ return (EINVAL);
+ }
+
+ krp = (struct cryptkop *)malloc(sizeof *krp, M_XDATA, M_WAITOK|M_ZERO);
+ if (!krp)
+ return (ENOMEM);
+ krp->krp_op = kop->crk_op;
+ krp->krp_status = kop->crk_status;
+ krp->krp_iparams = kop->crk_iparams;
+ krp->krp_oparams = kop->crk_oparams;
+ krp->krp_crid = kop->crk_crid;
+ krp->krp_status = 0;
+ krp->krp_callback = (int (*) (struct cryptkop *)) cryptodevkey_cb;
+
+ for (i = 0; i < CRK_MAXPARAM; i++) {
+ if (kop->crk_param[i].crp_nbits > 65536)
+ /* Limit is the same as in OpenBSD */
+ goto fail;
+ krp->krp_param[i].crp_nbits = kop->crk_param[i].crp_nbits;
+ }
+ for (i = 0; i < krp->krp_iparams + krp->krp_oparams; i++) {
+ size = (krp->krp_param[i].crp_nbits + 7) / 8;
+ if (size == 0)
+ continue;
+ krp->krp_param[i].crp_p = malloc(size, M_XDATA, M_WAITOK);
+ if (i >= krp->krp_iparams)
+ continue;
+ error = copyin(kop->crk_param[i].crp_p, krp->krp_param[i].crp_p, size);
+ if (error)
+ goto fail;
+ }
+
+ error = crypto_kdispatch(krp);
+ if (error)
+ goto fail;
+ error = tsleep(krp, PSOCK, "crydev", 0);
+ if (error) {
+ /* XXX can this happen? if so, how do we recover? */
+ goto fail;
+ }
+
+ kop->crk_crid = krp->krp_crid; /* device that did the work */
+ if (krp->krp_status != 0) {
+ error = krp->krp_status;
+ goto fail;
+ }
+
+ for (i = krp->krp_iparams; i < krp->krp_iparams + krp->krp_oparams; i++) {
+ size = (krp->krp_param[i].crp_nbits + 7) / 8;
+ if (size == 0)
+ continue;
+ error = copyout(krp->krp_param[i].crp_p, kop->crk_param[i].crp_p, size);
+ if (error)
+ goto fail;
+ }
+
+fail:
+ if (krp) {
+ kop->crk_status = krp->krp_status;
+ for (i = 0; i < CRK_MAXPARAM; i++) {
+ if (krp->krp_param[i].crp_p)
+ free(krp->krp_param[i].crp_p, M_XDATA);
+ }
+ free(krp, M_XDATA);
+ }
+ return (error);
+}
+
+static int
+cryptodev_find(struct crypt_find_op *find)
+{
+ device_t dev;
+
+ if (find->crid != -1) {
+ dev = crypto_find_device_byhid(find->crid);
+ if (dev == NULL)
+ return (ENOENT);
+ strlcpy(find->name, device_get_nameunit(dev),
+ sizeof(find->name));
+ } else {
+ find->crid = crypto_find_driver(find->name);
+ if (find->crid == -1)
+ return (ENOENT);
+ }
+ return (0);
+}
+
+/* ARGSUSED */
+static int
+cryptof_poll(
+ struct file *fp,
+ int events,
+ struct ucred *active_cred,
+ struct thread *td)
+{
+
+ return (0);
+}
+
+/* ARGSUSED */
+static int
+cryptof_kqfilter(struct file *fp, struct knote *kn)
+{
+
+ return (0);
+}
+
+/* ARGSUSED */
+static int
+cryptof_stat(
+ struct file *fp,
+ struct stat *sb,
+ struct ucred *active_cred,
+ struct thread *td)
+{
+
+ return (EOPNOTSUPP);
+}
+
+/* ARGSUSED */
+static int
+cryptof_close(struct file *fp, struct thread *td)
+{
+ struct fcrypt *fcr = fp->f_data;
+ struct csession *cse;
+
+ while ((cse = TAILQ_FIRST(&fcr->csessions))) {
+ TAILQ_REMOVE(&fcr->csessions, cse, next);
+ (void)csefree(cse);
+ }
+ free(fcr, M_XDATA);
+ fp->f_data = NULL;
+ return 0;
+}
+
+static struct csession *
+csefind(struct fcrypt *fcr, u_int ses)
+{
+ struct csession *cse;
+
+ TAILQ_FOREACH(cse, &fcr->csessions, next)
+ if (cse->ses == ses)
+ return (cse);
+ return (NULL);
+}
+
+static int
+csedelete(struct fcrypt *fcr, struct csession *cse_del)
+{
+ struct csession *cse;
+
+ TAILQ_FOREACH(cse, &fcr->csessions, next) {
+ if (cse == cse_del) {
+ TAILQ_REMOVE(&fcr->csessions, cse, next);
+ return (1);
+ }
+ }
+ return (0);
+}
+
+static struct csession *
+cseadd(struct fcrypt *fcr, struct csession *cse)
+{
+ TAILQ_INSERT_TAIL(&fcr->csessions, cse, next);
+ cse->ses = fcr->sesn++;
+ return (cse);
+}
+
+struct csession *
+csecreate(struct fcrypt *fcr, u_int64_t sid, caddr_t key, u_int64_t keylen,
+ caddr_t mackey, u_int64_t mackeylen, u_int32_t cipher, u_int32_t mac,
+ struct enc_xform *txform, struct auth_hash *thash)
+{
+ struct csession *cse;
+
+#ifdef INVARIANTS
+ /* NB: required when mtx_init is built with INVARIANTS */
+ cse = malloc(sizeof(struct csession), M_XDATA, M_NOWAIT | M_ZERO);
+#else
+ cse = malloc(sizeof(struct csession), M_XDATA, M_NOWAIT);
+#endif
+ if (cse == NULL)
+ return NULL;
+ mtx_init(&cse->lock, "cryptodev", "crypto session lock", MTX_DEF);
+ cse->key = key;
+ cse->keylen = keylen/8;
+ cse->mackey = mackey;
+ cse->mackeylen = mackeylen/8;
+ cse->sid = sid;
+ cse->cipher = cipher;
+ cse->mac = mac;
+ cse->txform = txform;
+ cse->thash = thash;
+ cseadd(fcr, cse);
+ return (cse);
+}
+
+static int
+csefree(struct csession *cse)
+{
+ int error;
+
+ error = crypto_freesession(cse->sid);
+ mtx_destroy(&cse->lock);
+ if (cse->key)
+ free(cse->key, M_XDATA);
+ if (cse->mackey)
+ free(cse->mackey, M_XDATA);
+ free(cse, M_XDATA);
+ return (error);
+}
+
+static int
+cryptoopen(struct cdev *dev, int oflags, int devtype, struct thread *td)
+{
+ return (0);
+}
+
+static int
+cryptoread(struct cdev *dev, struct uio *uio, int ioflag)
+{
+ return (EIO);
+}
+
+static int
+cryptowrite(struct cdev *dev, struct uio *uio, int ioflag)
+{
+ return (EIO);
+}
+
+static int
+cryptoioctl(struct cdev *dev, u_long cmd, caddr_t data, int flag, struct thread *td)
+{
+ struct file *f;
+ struct fcrypt *fcr;
+ int fd, error;
+
+ switch (cmd) {
+ case CRIOGET:
+ fcr = malloc(sizeof(struct fcrypt), M_XDATA, M_WAITOK);
+ TAILQ_INIT(&fcr->csessions);
+ fcr->sesn = 0;
+
+ error = falloc(td, &f, &fd);
+
+ if (error) {
+ free(fcr, M_XDATA);
+ return (error);
+ }
+ /* falloc automatically provides an extra reference to 'f'. */
+ finit(f, FREAD | FWRITE, DTYPE_CRYPTO, fcr, &cryptofops);
+ *(u_int32_t *)data = fd;
+ fdrop(f, td);
+ break;
+ case CRIOFINDDEV:
+ error = cryptodev_find((struct crypt_find_op *)data);
+ break;
+ case CRIOASYMFEAT:
+ error = crypto_getfeat((int *)data);
+ break;
+ default:
+ error = EINVAL;
+ break;
+ }
+ return (error);
+}
+
+static struct cdevsw crypto_cdevsw = {
+ .d_version = D_VERSION,
+ .d_flags = D_NEEDGIANT,
+ .d_open = cryptoopen,
+ .d_read = cryptoread,
+ .d_write = cryptowrite,
+ .d_ioctl = cryptoioctl,
+ .d_name = "crypto",
+};
+static struct cdev *crypto_dev;
+
+/*
+ * Initialization code, both for static and dynamic loading.
+ */
+static int
+cryptodev_modevent(module_t mod, int type, void *unused)
+{
+ switch (type) {
+ case MOD_LOAD:
+ if (bootverbose)
+ printf("crypto: <crypto device>\n");
+ crypto_dev = make_dev(&crypto_cdevsw, 0,
+ UID_ROOT, GID_WHEEL, 0666,
+ "crypto");
+ return 0;
+ case MOD_UNLOAD:
+ /*XXX disallow if active sessions */
+ destroy_dev(crypto_dev);
+ return 0;
+ }
+ return EINVAL;
+}
+
+static moduledata_t cryptodev_mod = {
+ "cryptodev",
+ cryptodev_modevent,
+ 0
+};
+MODULE_VERSION(cryptodev, 1);
+DECLARE_MODULE(cryptodev, cryptodev_mod, SI_SUB_PSEUDO, SI_ORDER_ANY);
+MODULE_DEPEND(cryptodev, crypto, 1, 1, 1);
+MODULE_DEPEND(cryptodev, zlib, 1, 1, 1);
diff --git a/rtems/freebsd/opencrypto/cryptodev.h b/rtems/freebsd/opencrypto/cryptodev.h
new file mode 100644
index 00000000..ec2f05e5
--- /dev/null
+++ b/rtems/freebsd/opencrypto/cryptodev.h
@@ -0,0 +1,432 @@
+/* $FreeBSD$ */
+/* $OpenBSD: cryptodev.h,v 1.31 2002/06/11 11:14:29 beck Exp $ */
+
+/*-
+ * The author of this code is Angelos D. Keromytis (angelos@cis.upenn.edu)
+ * Copyright (c) 2002-2006 Sam Leffler, Errno Consulting
+ *
+ * This code was written by Angelos D. Keromytis in Athens, Greece, in
+ * February 2000. Network Security Technologies Inc. (NSTI) kindly
+ * supported the development of this code.
+ *
+ * Copyright (c) 2000 Angelos D. Keromytis
+ *
+ * Permission to use, copy, and modify this software with or without fee
+ * is hereby granted, provided that this entire notice is included in
+ * all source code copies of any software which is or includes a copy or
+ * modification of this software.
+ *
+ * THIS SOFTWARE IS BEING PROVIDED "AS IS", WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTY. IN PARTICULAR, NONE OF THE AUTHORS MAKES ANY
+ * REPRESENTATION OR WARRANTY OF ANY KIND CONCERNING THE
+ * MERCHANTABILITY OF THIS SOFTWARE OR ITS FITNESS FOR ANY PARTICULAR
+ * PURPOSE.
+ *
+ * Copyright (c) 2001 Theo de Raadt
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * Effort sponsored in part by the Defense Advanced Research Projects
+ * Agency (DARPA) and Air Force Research Laboratory, Air Force
+ * Materiel Command, USAF, under agreement number F30602-01-2-0537.
+ *
+ */
+
+#ifndef _CRYPTO_CRYPTO_HH_
+#define _CRYPTO_CRYPTO_HH_
+
+#include <rtems/freebsd/sys/ioccom.h>
+
+/* Some initial values */
+#define CRYPTO_DRIVERS_INITIAL 4
+#define CRYPTO_SW_SESSIONS 32
+
+/* Hash values */
+#define NULL_HASH_LEN 16
+#define MD5_HASH_LEN 16
+#define SHA1_HASH_LEN 20
+#define RIPEMD160_HASH_LEN 20
+#define SHA2_256_HASH_LEN 32
+#define SHA2_384_HASH_LEN 48
+#define SHA2_512_HASH_LEN 64
+#define MD5_KPDK_HASH_LEN 16
+#define SHA1_KPDK_HASH_LEN 20
+/* Maximum hash algorithm result length */
+#define HASH_MAX_LEN SHA2_512_HASH_LEN /* Keep this updated */
+
+/* HMAC values */
+#define NULL_HMAC_BLOCK_LEN 64
+#define MD5_HMAC_BLOCK_LEN 64
+#define SHA1_HMAC_BLOCK_LEN 64
+#define RIPEMD160_HMAC_BLOCK_LEN 64
+#define SHA2_256_HMAC_BLOCK_LEN 64
+#define SHA2_384_HMAC_BLOCK_LEN 128
+#define SHA2_512_HMAC_BLOCK_LEN 128
+/* Maximum HMAC block length */
+#define HMAC_MAX_BLOCK_LEN SHA2_512_HMAC_BLOCK_LEN /* Keep this updated */
+#define HMAC_IPAD_VAL 0x36
+#define HMAC_OPAD_VAL 0x5C
+
+/* Encryption algorithm block sizes */
+#define NULL_BLOCK_LEN 4
+#define DES_BLOCK_LEN 8
+#define DES3_BLOCK_LEN 8
+#define BLOWFISH_BLOCK_LEN 8
+#define SKIPJACK_BLOCK_LEN 8
+#define CAST128_BLOCK_LEN 8
+#define RIJNDAEL128_BLOCK_LEN 16
+#define AES_BLOCK_LEN RIJNDAEL128_BLOCK_LEN
+#define CAMELLIA_BLOCK_LEN 16
+#define EALG_MAX_BLOCK_LEN AES_BLOCK_LEN /* Keep this updated */
+
+#define CRYPTO_ALGORITHM_MIN 1
+#define CRYPTO_DES_CBC 1
+#define CRYPTO_3DES_CBC 2
+#define CRYPTO_BLF_CBC 3
+#define CRYPTO_CAST_CBC 4
+#define CRYPTO_SKIPJACK_CBC 5
+#define CRYPTO_MD5_HMAC 6
+#define CRYPTO_SHA1_HMAC 7
+#define CRYPTO_RIPEMD160_HMAC 8
+#define CRYPTO_MD5_KPDK 9
+#define CRYPTO_SHA1_KPDK 10
+#define CRYPTO_RIJNDAEL128_CBC 11 /* 128 bit blocksize */
+#define CRYPTO_AES_CBC 11 /* 128 bit blocksize -- the same as above */
+#define CRYPTO_ARC4 12
+#define CRYPTO_MD5 13
+#define CRYPTO_SHA1 14
+#define CRYPTO_NULL_HMAC 15
+#define CRYPTO_NULL_CBC 16
+#define CRYPTO_DEFLATE_COMP 17 /* Deflate compression algorithm */
+#define CRYPTO_SHA2_256_HMAC 18
+#define CRYPTO_SHA2_384_HMAC 19
+#define CRYPTO_SHA2_512_HMAC 20
+#define CRYPTO_CAMELLIA_CBC 21
+#define CRYPTO_AES_XTS 22
+#define CRYPTO_ALGORITHM_MAX 22 /* Keep updated - see below */
+
+/* Algorithm flags */
+#define CRYPTO_ALG_FLAG_SUPPORTED 0x01 /* Algorithm is supported */
+#define CRYPTO_ALG_FLAG_RNG_ENABLE 0x02 /* Has HW RNG for DH/DSA */
+#define CRYPTO_ALG_FLAG_DSA_SHA 0x04 /* Can do SHA on msg */
+
+/*
+ * Crypto driver/device flags. They can set in the crid
+ * parameter when creating a session or submitting a key
+ * op to affect the device/driver assigned. If neither
+ * of these are specified then the crid is assumed to hold
+ * the driver id of an existing (and suitable) device that
+ * must be used to satisfy the request.
+ */
+#define CRYPTO_FLAG_HARDWARE 0x01000000 /* hardware accelerated */
+#define CRYPTO_FLAG_SOFTWARE 0x02000000 /* software implementation */
+
+/* NB: deprecated */
+struct session_op {
+ u_int32_t cipher; /* ie. CRYPTO_DES_CBC */
+ u_int32_t mac; /* ie. CRYPTO_MD5_HMAC */
+
+ u_int32_t keylen; /* cipher key */
+ caddr_t key;
+ int mackeylen; /* mac key */
+ caddr_t mackey;
+
+ u_int32_t ses; /* returns: session # */
+};
+
+struct session2_op {
+ u_int32_t cipher; /* ie. CRYPTO_DES_CBC */
+ u_int32_t mac; /* ie. CRYPTO_MD5_HMAC */
+
+ u_int32_t keylen; /* cipher key */
+ caddr_t key;
+ int mackeylen; /* mac key */
+ caddr_t mackey;
+
+ u_int32_t ses; /* returns: session # */
+ int crid; /* driver id + flags (rw) */
+ int pad[4]; /* for future expansion */
+};
+
+struct crypt_op {
+ u_int32_t ses;
+ u_int16_t op; /* i.e. COP_ENCRYPT */
+#define COP_ENCRYPT 1
+#define COP_DECRYPT 2
+ u_int16_t flags;
+#define COP_F_BATCH 0x0008 /* Batch op if possible */
+ u_int len;
+ caddr_t src, dst; /* become iov[] inside kernel */
+ caddr_t mac; /* must be big enough for chosen MAC */
+ caddr_t iv;
+};
+
+/*
+ * Parameters for looking up a crypto driver/device by
+ * device name or by id. The latter are returned for
+ * created sessions (crid) and completed key operations.
+ */
+struct crypt_find_op {
+ int crid; /* driver id + flags */
+ char name[32]; /* device/driver name */
+};
+
+/* bignum parameter, in packed bytes, ... */
+struct crparam {
+ caddr_t crp_p;
+ u_int crp_nbits;
+};
+
+#define CRK_MAXPARAM 8
+
+struct crypt_kop {
+ u_int crk_op; /* ie. CRK_MOD_EXP or other */
+ u_int crk_status; /* return status */
+ u_short crk_iparams; /* # of input parameters */
+ u_short crk_oparams; /* # of output parameters */
+ u_int crk_crid; /* NB: only used by CIOCKEY2 (rw) */
+ struct crparam crk_param[CRK_MAXPARAM];
+};
+#define CRK_ALGORITM_MIN 0
+#define CRK_MOD_EXP 0
+#define CRK_MOD_EXP_CRT 1
+#define CRK_DSA_SIGN 2
+#define CRK_DSA_VERIFY 3
+#define CRK_DH_COMPUTE_KEY 4
+#define CRK_ALGORITHM_MAX 4 /* Keep updated - see below */
+
+#define CRF_MOD_EXP (1 << CRK_MOD_EXP)
+#define CRF_MOD_EXP_CRT (1 << CRK_MOD_EXP_CRT)
+#define CRF_DSA_SIGN (1 << CRK_DSA_SIGN)
+#define CRF_DSA_VERIFY (1 << CRK_DSA_VERIFY)
+#define CRF_DH_COMPUTE_KEY (1 << CRK_DH_COMPUTE_KEY)
+
+/*
+ * done against open of /dev/crypto, to get a cloned descriptor.
+ * Please use F_SETFD against the cloned descriptor.
+ */
+#define CRIOGET _IOWR('c', 100, u_int32_t)
+#define CRIOASYMFEAT CIOCASYMFEAT
+#define CRIOFINDDEV CIOCFINDDEV
+
+/* the following are done against the cloned descriptor */
+#define CIOCGSESSION _IOWR('c', 101, struct session_op)
+#define CIOCFSESSION _IOW('c', 102, u_int32_t)
+#define CIOCCRYPT _IOWR('c', 103, struct crypt_op)
+#define CIOCKEY _IOWR('c', 104, struct crypt_kop)
+#define CIOCASYMFEAT _IOR('c', 105, u_int32_t)
+#define CIOCGSESSION2 _IOWR('c', 106, struct session2_op)
+#define CIOCKEY2 _IOWR('c', 107, struct crypt_kop)
+#define CIOCFINDDEV _IOWR('c', 108, struct crypt_find_op)
+
+struct cryptotstat {
+ struct timespec acc; /* total accumulated time */
+ struct timespec min; /* min time */
+ struct timespec max; /* max time */
+ u_int32_t count; /* number of observations */
+};
+
+struct cryptostats {
+ u_int32_t cs_ops; /* symmetric crypto ops submitted */
+ u_int32_t cs_errs; /* symmetric crypto ops that failed */
+ u_int32_t cs_kops; /* asymetric/key ops submitted */
+ u_int32_t cs_kerrs; /* asymetric/key ops that failed */
+ u_int32_t cs_intrs; /* crypto swi thread activations */
+ u_int32_t cs_rets; /* crypto return thread activations */
+ u_int32_t cs_blocks; /* symmetric op driver block */
+ u_int32_t cs_kblocks; /* symmetric op driver block */
+ /*
+ * When CRYPTO_TIMING is defined at compile time and the
+ * sysctl debug.crypto is set to 1, the crypto system will
+ * accumulate statistics about how long it takes to process
+ * crypto requests at various points during processing.
+ */
+ struct cryptotstat cs_invoke; /* crypto_dipsatch -> crypto_invoke */
+ struct cryptotstat cs_done; /* crypto_invoke -> crypto_done */
+ struct cryptotstat cs_cb; /* crypto_done -> callback */
+ struct cryptotstat cs_finis; /* callback -> callback return */
+};
+
+#ifdef _KERNEL
+/* Standard initialization structure beginning */
+struct cryptoini {
+ int cri_alg; /* Algorithm to use */
+ int cri_klen; /* Key length, in bits */
+ int cri_mlen; /* Number of bytes we want from the
+ entire hash. 0 means all. */
+ caddr_t cri_key; /* key to use */
+ u_int8_t cri_iv[EALG_MAX_BLOCK_LEN]; /* IV to use */
+ struct cryptoini *cri_next;
+};
+
+/* Describe boundaries of a single crypto operation */
+struct cryptodesc {
+ int crd_skip; /* How many bytes to ignore from start */
+ int crd_len; /* How many bytes to process */
+ int crd_inject; /* Where to inject results, if applicable */
+ int crd_flags;
+
+#define CRD_F_ENCRYPT 0x01 /* Set when doing encryption */
+#define CRD_F_IV_PRESENT 0x02 /* When encrypting, IV is already in
+ place, so don't copy. */
+#define CRD_F_IV_EXPLICIT 0x04 /* IV explicitly provided */
+#define CRD_F_DSA_SHA_NEEDED 0x08 /* Compute SHA-1 of buffer for DSA */
+#define CRD_F_KEY_EXPLICIT 0x10 /* Key explicitly provided */
+#define CRD_F_COMP 0x0f /* Set when doing compression */
+
+ struct cryptoini CRD_INI; /* Initialization/context data */
+#define crd_iv CRD_INI.cri_iv
+#define crd_key CRD_INI.cri_key
+#define crd_alg CRD_INI.cri_alg
+#define crd_klen CRD_INI.cri_klen
+
+ struct cryptodesc *crd_next;
+};
+
+/* Structure describing complete operation */
+struct cryptop {
+ TAILQ_ENTRY(cryptop) crp_next;
+
+ u_int64_t crp_sid; /* Session ID */
+ int crp_ilen; /* Input data total length */
+ int crp_olen; /* Result total length */
+
+ int crp_etype; /*
+ * Error type (zero means no error).
+ * All error codes except EAGAIN
+ * indicate possible data corruption (as in,
+ * the data have been touched). On all
+ * errors, the crp_sid may have changed
+ * (reset to a new one), so the caller
+ * should always check and use the new
+ * value on future requests.
+ */
+ int crp_flags;
+
+#define CRYPTO_F_IMBUF 0x0001 /* Input/output are mbuf chains */
+#define CRYPTO_F_IOV 0x0002 /* Input/output are uio */
+#define CRYPTO_F_REL 0x0004 /* Must return data in same place */
+#define CRYPTO_F_BATCH 0x0008 /* Batch op if possible */
+#define CRYPTO_F_CBIMM 0x0010 /* Do callback immediately */
+#define CRYPTO_F_DONE 0x0020 /* Operation completed */
+#define CRYPTO_F_CBIFSYNC 0x0040 /* Do CBIMM if op is synchronous */
+
+ caddr_t crp_buf; /* Data to be processed */
+ caddr_t crp_opaque; /* Opaque pointer, passed along */
+ struct cryptodesc *crp_desc; /* Linked list of processing descriptors */
+
+ int (*crp_callback)(struct cryptop *); /* Callback function */
+
+ struct bintime crp_tstamp; /* performance time stamp */
+};
+
+#define CRYPTO_BUF_CONTIG 0x0
+#define CRYPTO_BUF_IOV 0x1
+#define CRYPTO_BUF_MBUF 0x2
+
+#define CRYPTO_OP_DECRYPT 0x0
+#define CRYPTO_OP_ENCRYPT 0x1
+
+/*
+ * Hints passed to process methods.
+ */
+#define CRYPTO_HINT_MORE 0x1 /* more ops coming shortly */
+
+struct cryptkop {
+ TAILQ_ENTRY(cryptkop) krp_next;
+
+ u_int krp_op; /* ie. CRK_MOD_EXP or other */
+ u_int krp_status; /* return status */
+ u_short krp_iparams; /* # of input parameters */
+ u_short krp_oparams; /* # of output parameters */
+ u_int krp_crid; /* desired device, etc. */
+ u_int32_t krp_hid;
+ struct crparam krp_param[CRK_MAXPARAM]; /* kvm */
+ int (*krp_callback)(struct cryptkop *);
+};
+
+/*
+ * Session ids are 64 bits. The lower 32 bits contain a "local id" which
+ * is a driver-private session identifier. The upper 32 bits contain a
+ * "hardware id" used by the core crypto code to identify the driver and
+ * a copy of the driver's capabilities that can be used by client code to
+ * optimize operation.
+ */
+#define CRYPTO_SESID2HID(_sid) (((_sid) >> 32) & 0x00ffffff)
+#define CRYPTO_SESID2CAPS(_sid) (((_sid) >> 32) & 0xff000000)
+#define CRYPTO_SESID2LID(_sid) (((u_int32_t) (_sid)) & 0xffffffff)
+
+MALLOC_DECLARE(M_CRYPTO_DATA);
+
+extern int crypto_newsession(u_int64_t *sid, struct cryptoini *cri, int hard);
+extern int crypto_freesession(u_int64_t sid);
+#define CRYPTOCAP_F_HARDWARE CRYPTO_FLAG_HARDWARE
+#define CRYPTOCAP_F_SOFTWARE CRYPTO_FLAG_SOFTWARE
+#define CRYPTOCAP_F_SYNC 0x04000000 /* operates synchronously */
+extern int32_t crypto_get_driverid(device_t dev, int flags);
+extern int crypto_find_driver(const char *);
+extern device_t crypto_find_device_byhid(int hid);
+extern int crypto_getcaps(int hid);
+extern int crypto_register(u_int32_t driverid, int alg, u_int16_t maxoplen,
+ u_int32_t flags);
+extern int crypto_kregister(u_int32_t, int, u_int32_t);
+extern int crypto_unregister(u_int32_t driverid, int alg);
+extern int crypto_unregister_all(u_int32_t driverid);
+extern int crypto_dispatch(struct cryptop *crp);
+extern int crypto_kdispatch(struct cryptkop *);
+#define CRYPTO_SYMQ 0x1
+#define CRYPTO_ASYMQ 0x2
+extern int crypto_unblock(u_int32_t, int);
+extern void crypto_done(struct cryptop *crp);
+extern void crypto_kdone(struct cryptkop *);
+extern int crypto_getfeat(int *);
+
+extern void crypto_freereq(struct cryptop *crp);
+extern struct cryptop *crypto_getreq(int num);
+
+extern int crypto_usercrypto; /* userland may do crypto requests */
+extern int crypto_userasymcrypto; /* userland may do asym crypto reqs */
+extern int crypto_devallowsoft; /* only use hardware crypto */
+
+/*
+ * Crypto-related utility routines used mainly by drivers.
+ *
+ * XXX these don't really belong here; but for now they're
+ * kept apart from the rest of the system.
+ */
+struct uio;
+extern void cuio_copydata(struct uio* uio, int off, int len, caddr_t cp);
+extern void cuio_copyback(struct uio* uio, int off, int len, caddr_t cp);
+extern struct iovec *cuio_getptr(struct uio *uio, int loc, int *off);
+extern int cuio_apply(struct uio *uio, int off, int len,
+ int (*f)(void *, void *, u_int), void *arg);
+
+extern void crypto_copyback(int flags, caddr_t buf, int off, int size,
+ caddr_t in);
+extern void crypto_copydata(int flags, caddr_t buf, int off, int size,
+ caddr_t out);
+extern int crypto_apply(int flags, caddr_t buf, int off, int len,
+ int (*f)(void *, void *, u_int), void *arg);
+#endif /* _KERNEL */
+#endif /* _CRYPTO_CRYPTO_HH_ */
diff --git a/rtems/freebsd/opencrypto/cryptosoft.c b/rtems/freebsd/opencrypto/cryptosoft.c
new file mode 100644
index 00000000..0237d784
--- /dev/null
+++ b/rtems/freebsd/opencrypto/cryptosoft.c
@@ -0,0 +1,1156 @@
+#include <rtems/freebsd/machine/rtems-bsd-config.h>
+
+/* $OpenBSD: cryptosoft.c,v 1.35 2002/04/26 08:43:50 deraadt Exp $ */
+
+/*-
+ * The author of this code is Angelos D. Keromytis (angelos@cis.upenn.edu)
+ * Copyright (c) 2002-2006 Sam Leffler, Errno Consulting
+ *
+ * This code was written by Angelos D. Keromytis in Athens, Greece, in
+ * February 2000. Network Security Technologies Inc. (NSTI) kindly
+ * supported the development of this code.
+ *
+ * Copyright (c) 2000, 2001 Angelos D. Keromytis
+ *
+ * Permission to use, copy, and modify this software with or without fee
+ * is hereby granted, provided that this entire notice is included in
+ * all source code copies of any software which is or includes a copy or
+ * modification of this software.
+ *
+ * THIS SOFTWARE IS BEING PROVIDED "AS IS", WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTY. IN PARTICULAR, NONE OF THE AUTHORS MAKES ANY
+ * REPRESENTATION OR WARRANTY OF ANY KIND CONCERNING THE
+ * MERCHANTABILITY OF THIS SOFTWARE OR ITS FITNESS FOR ANY PARTICULAR
+ * PURPOSE.
+ */
+
+#include <rtems/freebsd/sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <rtems/freebsd/sys/param.h>
+#include <rtems/freebsd/sys/systm.h>
+#include <rtems/freebsd/sys/malloc.h>
+#include <rtems/freebsd/sys/mbuf.h>
+#include <rtems/freebsd/sys/module.h>
+#include <rtems/freebsd/sys/sysctl.h>
+#include <rtems/freebsd/sys/errno.h>
+#include <rtems/freebsd/sys/random.h>
+#include <rtems/freebsd/sys/kernel.h>
+#include <rtems/freebsd/sys/uio.h>
+
+#include <rtems/freebsd/crypto/blowfish/blowfish.h>
+#include <rtems/freebsd/crypto/sha1.h>
+#include <rtems/freebsd/opencrypto/rmd160.h>
+#include <rtems/freebsd/opencrypto/cast.h>
+#include <rtems/freebsd/opencrypto/skipjack.h>
+#include <rtems/freebsd/sys/md5.h>
+
+#include <rtems/freebsd/opencrypto/cryptodev.h>
+#include <rtems/freebsd/opencrypto/cryptosoft.h>
+#include <rtems/freebsd/opencrypto/xform.h>
+
+#include <rtems/freebsd/sys/kobj.h>
+#include <rtems/freebsd/sys/bus.h>
+#include <rtems/freebsd/local/cryptodev_if.h>
+
+static int32_t swcr_id;
+static struct swcr_data **swcr_sessions = NULL;
+static u_int32_t swcr_sesnum;
+
+u_int8_t hmac_ipad_buffer[HMAC_MAX_BLOCK_LEN];
+u_int8_t hmac_opad_buffer[HMAC_MAX_BLOCK_LEN];
+
+static int swcr_encdec(struct cryptodesc *, struct swcr_data *, caddr_t, int);
+static int swcr_authcompute(struct cryptodesc *, struct swcr_data *, caddr_t, int);
+static int swcr_compdec(struct cryptodesc *, struct swcr_data *, caddr_t, int);
+static int swcr_freesession(device_t dev, u_int64_t tid);
+
+/*
+ * Apply a symmetric encryption/decryption algorithm.
+ */
+static int
+swcr_encdec(struct cryptodesc *crd, struct swcr_data *sw, caddr_t buf,
+ int flags)
+{
+ unsigned char iv[EALG_MAX_BLOCK_LEN], blk[EALG_MAX_BLOCK_LEN], *idat;
+ unsigned char *ivp, piv[EALG_MAX_BLOCK_LEN];
+ struct enc_xform *exf;
+ int i, k, j, blks;
+
+ exf = sw->sw_exf;
+ blks = exf->blocksize;
+
+ /* Check for non-padded data */
+ if (crd->crd_len % blks)
+ return EINVAL;
+
+ /* Initialize the IV */
+ if (crd->crd_flags & CRD_F_ENCRYPT) {
+ /* IV explicitly provided ? */
+ if (crd->crd_flags & CRD_F_IV_EXPLICIT)
+ bcopy(crd->crd_iv, iv, blks);
+ else
+ arc4rand(iv, blks, 0);
+
+ /* Do we need to write the IV */
+ if (!(crd->crd_flags & CRD_F_IV_PRESENT))
+ crypto_copyback(flags, buf, crd->crd_inject, blks, iv);
+
+ } else { /* Decryption */
+ /* IV explicitly provided ? */
+ if (crd->crd_flags & CRD_F_IV_EXPLICIT)
+ bcopy(crd->crd_iv, iv, blks);
+ else {
+ /* Get IV off buf */
+ crypto_copydata(flags, buf, crd->crd_inject, blks, iv);
+ }
+ }
+
+ if (crd->crd_flags & CRD_F_KEY_EXPLICIT) {
+ int error;
+
+ if (sw->sw_kschedule)
+ exf->zerokey(&(sw->sw_kschedule));
+ error = exf->setkey(&sw->sw_kschedule,
+ crd->crd_key, crd->crd_klen / 8);
+ if (error)
+ return (error);
+ }
+
+ ivp = iv;
+
+ /*
+ * xforms that provide a reinit method perform all IV
+ * handling themselves.
+ */
+ if (exf->reinit)
+ exf->reinit(sw->sw_kschedule, iv);
+
+ if (flags & CRYPTO_F_IMBUF) {
+ struct mbuf *m = (struct mbuf *) buf;
+
+ /* Find beginning of data */
+ m = m_getptr(m, crd->crd_skip, &k);
+ if (m == NULL)
+ return EINVAL;
+
+ i = crd->crd_len;
+
+ while (i > 0) {
+ /*
+ * If there's insufficient data at the end of
+ * an mbuf, we have to do some copying.
+ */
+ if (m->m_len < k + blks && m->m_len != k) {
+ m_copydata(m, k, blks, blk);
+
+ /* Actual encryption/decryption */
+ if (exf->reinit) {
+ if (crd->crd_flags & CRD_F_ENCRYPT) {
+ exf->encrypt(sw->sw_kschedule,
+ blk);
+ } else {
+ exf->decrypt(sw->sw_kschedule,
+ blk);
+ }
+ } else if (crd->crd_flags & CRD_F_ENCRYPT) {
+ /* XOR with previous block */
+ for (j = 0; j < blks; j++)
+ blk[j] ^= ivp[j];
+
+ exf->encrypt(sw->sw_kschedule, blk);
+
+ /*
+ * Keep encrypted block for XOR'ing
+ * with next block
+ */
+ bcopy(blk, iv, blks);
+ ivp = iv;
+ } else { /* decrypt */
+ /*
+ * Keep encrypted block for XOR'ing
+ * with next block
+ */
+ if (ivp == iv)
+ bcopy(blk, piv, blks);
+ else
+ bcopy(blk, iv, blks);
+
+ exf->decrypt(sw->sw_kschedule, blk);
+
+ /* XOR with previous block */
+ for (j = 0; j < blks; j++)
+ blk[j] ^= ivp[j];
+
+ if (ivp == iv)
+ bcopy(piv, iv, blks);
+ else
+ ivp = iv;
+ }
+
+ /* Copy back decrypted block */
+ m_copyback(m, k, blks, blk);
+
+ /* Advance pointer */
+ m = m_getptr(m, k + blks, &k);
+ if (m == NULL)
+ return EINVAL;
+
+ i -= blks;
+
+ /* Could be done... */
+ if (i == 0)
+ break;
+ }
+
+ /* Skip possibly empty mbufs */
+ if (k == m->m_len) {
+ for (m = m->m_next; m && m->m_len == 0;
+ m = m->m_next)
+ ;
+ k = 0;
+ }
+
+ /* Sanity check */
+ if (m == NULL)
+ return EINVAL;
+
+ /*
+ * Warning: idat may point to garbage here, but
+ * we only use it in the while() loop, only if
+ * there are indeed enough data.
+ */
+ idat = mtod(m, unsigned char *) + k;
+
+ while (m->m_len >= k + blks && i > 0) {
+ if (exf->reinit) {
+ if (crd->crd_flags & CRD_F_ENCRYPT) {
+ exf->encrypt(sw->sw_kschedule,
+ idat);
+ } else {
+ exf->decrypt(sw->sw_kschedule,
+ idat);
+ }
+ } else if (crd->crd_flags & CRD_F_ENCRYPT) {
+ /* XOR with previous block/IV */
+ for (j = 0; j < blks; j++)
+ idat[j] ^= ivp[j];
+
+ exf->encrypt(sw->sw_kschedule, idat);
+ ivp = idat;
+ } else { /* decrypt */
+ /*
+ * Keep encrypted block to be used
+ * in next block's processing.
+ */
+ if (ivp == iv)
+ bcopy(idat, piv, blks);
+ else
+ bcopy(idat, iv, blks);
+
+ exf->decrypt(sw->sw_kschedule, idat);
+
+ /* XOR with previous block/IV */
+ for (j = 0; j < blks; j++)
+ idat[j] ^= ivp[j];
+
+ if (ivp == iv)
+ bcopy(piv, iv, blks);
+ else
+ ivp = iv;
+ }
+
+ idat += blks;
+ k += blks;
+ i -= blks;
+ }
+ }
+
+ return 0; /* Done with mbuf encryption/decryption */
+ } else if (flags & CRYPTO_F_IOV) {
+ struct uio *uio = (struct uio *) buf;
+ struct iovec *iov;
+
+ /* Find beginning of data */
+ iov = cuio_getptr(uio, crd->crd_skip, &k);
+ if (iov == NULL)
+ return EINVAL;
+
+ i = crd->crd_len;
+
+ while (i > 0) {
+ /*
+ * If there's insufficient data at the end of
+ * an iovec, we have to do some copying.
+ */
+ if (iov->iov_len < k + blks && iov->iov_len != k) {
+ cuio_copydata(uio, k, blks, blk);
+
+ /* Actual encryption/decryption */
+ if (exf->reinit) {
+ if (crd->crd_flags & CRD_F_ENCRYPT) {
+ exf->encrypt(sw->sw_kschedule,
+ blk);
+ } else {
+ exf->decrypt(sw->sw_kschedule,
+ blk);
+ }
+ } else if (crd->crd_flags & CRD_F_ENCRYPT) {
+ /* XOR with previous block */
+ for (j = 0; j < blks; j++)
+ blk[j] ^= ivp[j];
+
+ exf->encrypt(sw->sw_kschedule, blk);
+
+ /*
+ * Keep encrypted block for XOR'ing
+ * with next block
+ */
+ bcopy(blk, iv, blks);
+ ivp = iv;
+ } else { /* decrypt */
+ /*
+ * Keep encrypted block for XOR'ing
+ * with next block
+ */
+ if (ivp == iv)
+ bcopy(blk, piv, blks);
+ else
+ bcopy(blk, iv, blks);
+
+ exf->decrypt(sw->sw_kschedule, blk);
+
+ /* XOR with previous block */
+ for (j = 0; j < blks; j++)
+ blk[j] ^= ivp[j];
+
+ if (ivp == iv)
+ bcopy(piv, iv, blks);
+ else
+ ivp = iv;
+ }
+
+ /* Copy back decrypted block */
+ cuio_copyback(uio, k, blks, blk);
+
+ /* Advance pointer */
+ iov = cuio_getptr(uio, k + blks, &k);
+ if (iov == NULL)
+ return EINVAL;
+
+ i -= blks;
+
+ /* Could be done... */
+ if (i == 0)
+ break;
+ }
+
+ /*
+ * Warning: idat may point to garbage here, but
+ * we only use it in the while() loop, only if
+ * there are indeed enough data.
+ */
+ idat = (char *)iov->iov_base + k;
+
+ while (iov->iov_len >= k + blks && i > 0) {
+ if (exf->reinit) {
+ if (crd->crd_flags & CRD_F_ENCRYPT) {
+ exf->encrypt(sw->sw_kschedule,
+ idat);
+ } else {
+ exf->decrypt(sw->sw_kschedule,
+ idat);
+ }
+ } else if (crd->crd_flags & CRD_F_ENCRYPT) {
+ /* XOR with previous block/IV */
+ for (j = 0; j < blks; j++)
+ idat[j] ^= ivp[j];
+
+ exf->encrypt(sw->sw_kschedule, idat);
+ ivp = idat;
+ } else { /* decrypt */
+ /*
+ * Keep encrypted block to be used
+ * in next block's processing.
+ */
+ if (ivp == iv)
+ bcopy(idat, piv, blks);
+ else
+ bcopy(idat, iv, blks);
+
+ exf->decrypt(sw->sw_kschedule, idat);
+
+ /* XOR with previous block/IV */
+ for (j = 0; j < blks; j++)
+ idat[j] ^= ivp[j];
+
+ if (ivp == iv)
+ bcopy(piv, iv, blks);
+ else
+ ivp = iv;
+ }
+
+ idat += blks;
+ k += blks;
+ i -= blks;
+ }
+ if (k == iov->iov_len) {
+ iov++;
+ k = 0;
+ }
+ }
+
+ return 0; /* Done with iovec encryption/decryption */
+ } else { /* contiguous buffer */
+ if (exf->reinit) {
+ for (i = crd->crd_skip;
+ i < crd->crd_skip + crd->crd_len; i += blks) {
+ if (crd->crd_flags & CRD_F_ENCRYPT)
+ exf->encrypt(sw->sw_kschedule, buf + i);
+ else
+ exf->decrypt(sw->sw_kschedule, buf + i);
+ }
+ } else if (crd->crd_flags & CRD_F_ENCRYPT) {
+ for (i = crd->crd_skip;
+ i < crd->crd_skip + crd->crd_len; i += blks) {
+ /* XOR with the IV/previous block, as appropriate. */
+ if (i == crd->crd_skip)
+ for (k = 0; k < blks; k++)
+ buf[i + k] ^= ivp[k];
+ else
+ for (k = 0; k < blks; k++)
+ buf[i + k] ^= buf[i + k - blks];
+ exf->encrypt(sw->sw_kschedule, buf + i);
+ }
+ } else { /* Decrypt */
+ /*
+ * Start at the end, so we don't need to keep the encrypted
+ * block as the IV for the next block.
+ */
+ for (i = crd->crd_skip + crd->crd_len - blks;
+ i >= crd->crd_skip; i -= blks) {
+ exf->decrypt(sw->sw_kschedule, buf + i);
+
+ /* XOR with the IV/previous block, as appropriate */
+ if (i == crd->crd_skip)
+ for (k = 0; k < blks; k++)
+ buf[i + k] ^= ivp[k];
+ else
+ for (k = 0; k < blks; k++)
+ buf[i + k] ^= buf[i + k - blks];
+ }
+ }
+
+ return 0; /* Done with contiguous buffer encryption/decryption */
+ }
+
+ /* Unreachable */
+ return EINVAL;
+}
+
+static void
+swcr_authprepare(struct auth_hash *axf, struct swcr_data *sw, u_char *key,
+ int klen)
+{
+ int k;
+
+ klen /= 8;
+
+ switch (axf->type) {
+ case CRYPTO_MD5_HMAC:
+ case CRYPTO_SHA1_HMAC:
+ case CRYPTO_SHA2_256_HMAC:
+ case CRYPTO_SHA2_384_HMAC:
+ case CRYPTO_SHA2_512_HMAC:
+ case CRYPTO_NULL_HMAC:
+ case CRYPTO_RIPEMD160_HMAC:
+ for (k = 0; k < klen; k++)
+ key[k] ^= HMAC_IPAD_VAL;
+
+ axf->Init(sw->sw_ictx);
+ axf->Update(sw->sw_ictx, key, klen);
+ axf->Update(sw->sw_ictx, hmac_ipad_buffer, axf->blocksize - klen);
+
+ for (k = 0; k < klen; k++)
+ key[k] ^= (HMAC_IPAD_VAL ^ HMAC_OPAD_VAL);
+
+ axf->Init(sw->sw_octx);
+ axf->Update(sw->sw_octx, key, klen);
+ axf->Update(sw->sw_octx, hmac_opad_buffer, axf->blocksize - klen);
+
+ for (k = 0; k < klen; k++)
+ key[k] ^= HMAC_OPAD_VAL;
+ break;
+ case CRYPTO_MD5_KPDK:
+ case CRYPTO_SHA1_KPDK:
+ {
+ /*
+ * We need a buffer that can hold an md5 and a sha1 result
+ * just to throw it away.
+ * What we do here is the initial part of:
+ * ALGO( key, keyfill, .. )
+ * adding the key to sw_ictx and abusing Final() to get the
+ * "keyfill" padding.
+ * In addition we abuse the sw_octx to save the key to have
+ * it to be able to append it at the end in swcr_authcompute().
+ */
+ u_char buf[SHA1_RESULTLEN];
+
+ sw->sw_klen = klen;
+ bcopy(key, sw->sw_octx, klen);
+ axf->Init(sw->sw_ictx);
+ axf->Update(sw->sw_ictx, key, klen);
+ axf->Final(buf, sw->sw_ictx);
+ break;
+ }
+ default:
+ printf("%s: CRD_F_KEY_EXPLICIT flag given, but algorithm %d "
+ "doesn't use keys.\n", __func__, axf->type);
+ }
+}
+
+/*
+ * Compute keyed-hash authenticator.
+ */
+static int
+swcr_authcompute(struct cryptodesc *crd, struct swcr_data *sw, caddr_t buf,
+ int flags)
+{
+ unsigned char aalg[HASH_MAX_LEN];
+ struct auth_hash *axf;
+ union authctx ctx;
+ int err;
+
+ if (sw->sw_ictx == 0)
+ return EINVAL;
+
+ axf = sw->sw_axf;
+
+ if (crd->crd_flags & CRD_F_KEY_EXPLICIT)
+ swcr_authprepare(axf, sw, crd->crd_key, crd->crd_klen);
+
+ bcopy(sw->sw_ictx, &ctx, axf->ctxsize);
+
+ err = crypto_apply(flags, buf, crd->crd_skip, crd->crd_len,
+ (int (*)(void *, void *, unsigned int))axf->Update, (caddr_t)&ctx);
+ if (err)
+ return err;
+
+ switch (sw->sw_alg) {
+ case CRYPTO_MD5_HMAC:
+ case CRYPTO_SHA1_HMAC:
+ case CRYPTO_SHA2_256_HMAC:
+ case CRYPTO_SHA2_384_HMAC:
+ case CRYPTO_SHA2_512_HMAC:
+ case CRYPTO_RIPEMD160_HMAC:
+ if (sw->sw_octx == NULL)
+ return EINVAL;
+
+ axf->Final(aalg, &ctx);
+ bcopy(sw->sw_octx, &ctx, axf->ctxsize);
+ axf->Update(&ctx, aalg, axf->hashsize);
+ axf->Final(aalg, &ctx);
+ break;
+
+ case CRYPTO_MD5_KPDK:
+ case CRYPTO_SHA1_KPDK:
+ /* If we have no key saved, return error. */
+ if (sw->sw_octx == NULL)
+ return EINVAL;
+
+ /*
+ * Add the trailing copy of the key (see comment in
+ * swcr_authprepare()) after the data:
+ * ALGO( .., key, algofill )
+ * and let Final() do the proper, natural "algofill"
+ * padding.
+ */
+ axf->Update(&ctx, sw->sw_octx, sw->sw_klen);
+ axf->Final(aalg, &ctx);
+ break;
+
+ case CRYPTO_NULL_HMAC:
+ axf->Final(aalg, &ctx);
+ break;
+ }
+
+ /* Inject the authentication data */
+ crypto_copyback(flags, buf, crd->crd_inject,
+ sw->sw_mlen == 0 ? axf->hashsize : sw->sw_mlen, aalg);
+ return 0;
+}
+
+/*
+ * Apply a compression/decompression algorithm
+ */
+static int
+swcr_compdec(struct cryptodesc *crd, struct swcr_data *sw,
+ caddr_t buf, int flags)
+{
+ u_int8_t *data, *out;
+ struct comp_algo *cxf;
+ int adj;
+ u_int32_t result;
+
+ cxf = sw->sw_cxf;
+
+ /* We must handle the whole buffer of data in one time
+ * then if there is not all the data in the mbuf, we must
+ * copy in a buffer.
+ */
+
+ data = malloc(crd->crd_len, M_CRYPTO_DATA, M_NOWAIT);
+ if (data == NULL)
+ return (EINVAL);
+ crypto_copydata(flags, buf, crd->crd_skip, crd->crd_len, data);
+
+ if (crd->crd_flags & CRD_F_COMP)
+ result = cxf->compress(data, crd->crd_len, &out);
+ else
+ result = cxf->decompress(data, crd->crd_len, &out);
+
+ free(data, M_CRYPTO_DATA);
+ if (result == 0)
+ return EINVAL;
+
+ /* Copy back the (de)compressed data. m_copyback is
+ * extending the mbuf as necessary.
+ */
+ sw->sw_size = result;
+ /* Check the compressed size when doing compression */
+ if (crd->crd_flags & CRD_F_COMP) {
+ if (result >= crd->crd_len) {
+ /* Compression was useless, we lost time */
+ free(out, M_CRYPTO_DATA);
+ return 0;
+ }
+ }
+
+ crypto_copyback(flags, buf, crd->crd_skip, result, out);
+ if (result < crd->crd_len) {
+ adj = result - crd->crd_len;
+ if (flags & CRYPTO_F_IMBUF) {
+ adj = result - crd->crd_len;
+ m_adj((struct mbuf *)buf, adj);
+ } else if (flags & CRYPTO_F_IOV) {
+ struct uio *uio = (struct uio *)buf;
+ int ind;
+
+ adj = crd->crd_len - result;
+ ind = uio->uio_iovcnt - 1;
+
+ while (adj > 0 && ind >= 0) {
+ if (adj < uio->uio_iov[ind].iov_len) {
+ uio->uio_iov[ind].iov_len -= adj;
+ break;
+ }
+
+ adj -= uio->uio_iov[ind].iov_len;
+ uio->uio_iov[ind].iov_len = 0;
+ ind--;
+ uio->uio_iovcnt--;
+ }
+ }
+ }
+ free(out, M_CRYPTO_DATA);
+ return 0;
+}
+
+/*
+ * Generate a new software session.
+ */
+static int
+swcr_newsession(device_t dev, u_int32_t *sid, struct cryptoini *cri)
+{
+ struct swcr_data **swd;
+ struct auth_hash *axf;
+ struct enc_xform *txf;
+ struct comp_algo *cxf;
+ u_int32_t i;
+ int error;
+
+ if (sid == NULL || cri == NULL)
+ return EINVAL;
+
+ if (swcr_sessions) {
+ for (i = 1; i < swcr_sesnum; i++)
+ if (swcr_sessions[i] == NULL)
+ break;
+ } else
+ i = 1; /* NB: to silence compiler warning */
+
+ if (swcr_sessions == NULL || i == swcr_sesnum) {
+ if (swcr_sessions == NULL) {
+ i = 1; /* We leave swcr_sessions[0] empty */
+ swcr_sesnum = CRYPTO_SW_SESSIONS;
+ } else
+ swcr_sesnum *= 2;
+
+ swd = malloc(swcr_sesnum * sizeof(struct swcr_data *),
+ M_CRYPTO_DATA, M_NOWAIT|M_ZERO);
+ if (swd == NULL) {
+ /* Reset session number */
+ if (swcr_sesnum == CRYPTO_SW_SESSIONS)
+ swcr_sesnum = 0;
+ else
+ swcr_sesnum /= 2;
+ return ENOBUFS;
+ }
+
+ /* Copy existing sessions */
+ if (swcr_sessions != NULL) {
+ bcopy(swcr_sessions, swd,
+ (swcr_sesnum / 2) * sizeof(struct swcr_data *));
+ free(swcr_sessions, M_CRYPTO_DATA);
+ }
+
+ swcr_sessions = swd;
+ }
+
+ swd = &swcr_sessions[i];
+ *sid = i;
+
+ while (cri) {
+ *swd = malloc(sizeof(struct swcr_data),
+ M_CRYPTO_DATA, M_NOWAIT|M_ZERO);
+ if (*swd == NULL) {
+ swcr_freesession(dev, i);
+ return ENOBUFS;
+ }
+
+ switch (cri->cri_alg) {
+ case CRYPTO_DES_CBC:
+ txf = &enc_xform_des;
+ goto enccommon;
+ case CRYPTO_3DES_CBC:
+ txf = &enc_xform_3des;
+ goto enccommon;
+ case CRYPTO_BLF_CBC:
+ txf = &enc_xform_blf;
+ goto enccommon;
+ case CRYPTO_CAST_CBC:
+ txf = &enc_xform_cast5;
+ goto enccommon;
+ case CRYPTO_SKIPJACK_CBC:
+ txf = &enc_xform_skipjack;
+ goto enccommon;
+ case CRYPTO_RIJNDAEL128_CBC:
+ txf = &enc_xform_rijndael128;
+ goto enccommon;
+ case CRYPTO_AES_XTS:
+ txf = &enc_xform_aes_xts;
+ goto enccommon;
+ case CRYPTO_CAMELLIA_CBC:
+ txf = &enc_xform_camellia;
+ goto enccommon;
+ case CRYPTO_NULL_CBC:
+ txf = &enc_xform_null;
+ goto enccommon;
+ enccommon:
+ if (cri->cri_key != NULL) {
+ error = txf->setkey(&((*swd)->sw_kschedule),
+ cri->cri_key, cri->cri_klen / 8);
+ if (error) {
+ swcr_freesession(dev, i);
+ return error;
+ }
+ }
+ (*swd)->sw_exf = txf;
+ break;
+
+ case CRYPTO_MD5_HMAC:
+ axf = &auth_hash_hmac_md5;
+ goto authcommon;
+ case CRYPTO_SHA1_HMAC:
+ axf = &auth_hash_hmac_sha1;
+ goto authcommon;
+ case CRYPTO_SHA2_256_HMAC:
+ axf = &auth_hash_hmac_sha2_256;
+ goto authcommon;
+ case CRYPTO_SHA2_384_HMAC:
+ axf = &auth_hash_hmac_sha2_384;
+ goto authcommon;
+ case CRYPTO_SHA2_512_HMAC:
+ axf = &auth_hash_hmac_sha2_512;
+ goto authcommon;
+ case CRYPTO_NULL_HMAC:
+ axf = &auth_hash_null;
+ goto authcommon;
+ case CRYPTO_RIPEMD160_HMAC:
+ axf = &auth_hash_hmac_ripemd_160;
+ authcommon:
+ (*swd)->sw_ictx = malloc(axf->ctxsize, M_CRYPTO_DATA,
+ M_NOWAIT);
+ if ((*swd)->sw_ictx == NULL) {
+ swcr_freesession(dev, i);
+ return ENOBUFS;
+ }
+
+ (*swd)->sw_octx = malloc(axf->ctxsize, M_CRYPTO_DATA,
+ M_NOWAIT);
+ if ((*swd)->sw_octx == NULL) {
+ swcr_freesession(dev, i);
+ return ENOBUFS;
+ }
+
+ if (cri->cri_key != NULL) {
+ swcr_authprepare(axf, *swd, cri->cri_key,
+ cri->cri_klen);
+ }
+
+ (*swd)->sw_mlen = cri->cri_mlen;
+ (*swd)->sw_axf = axf;
+ break;
+
+ case CRYPTO_MD5_KPDK:
+ axf = &auth_hash_key_md5;
+ goto auth2common;
+
+ case CRYPTO_SHA1_KPDK:
+ axf = &auth_hash_key_sha1;
+ auth2common:
+ (*swd)->sw_ictx = malloc(axf->ctxsize, M_CRYPTO_DATA,
+ M_NOWAIT);
+ if ((*swd)->sw_ictx == NULL) {
+ swcr_freesession(dev, i);
+ return ENOBUFS;
+ }
+
+ (*swd)->sw_octx = malloc(cri->cri_klen / 8,
+ M_CRYPTO_DATA, M_NOWAIT);
+ if ((*swd)->sw_octx == NULL) {
+ swcr_freesession(dev, i);
+ return ENOBUFS;
+ }
+
+ /* Store the key so we can "append" it to the payload */
+ if (cri->cri_key != NULL) {
+ swcr_authprepare(axf, *swd, cri->cri_key,
+ cri->cri_klen);
+ }
+
+ (*swd)->sw_mlen = cri->cri_mlen;
+ (*swd)->sw_axf = axf;
+ break;
+#ifdef notdef
+ case CRYPTO_MD5:
+ axf = &auth_hash_md5;
+ goto auth3common;
+
+ case CRYPTO_SHA1:
+ axf = &auth_hash_sha1;
+ auth3common:
+ (*swd)->sw_ictx = malloc(axf->ctxsize, M_CRYPTO_DATA,
+ M_NOWAIT);
+ if ((*swd)->sw_ictx == NULL) {
+ swcr_freesession(dev, i);
+ return ENOBUFS;
+ }
+
+ axf->Init((*swd)->sw_ictx);
+ (*swd)->sw_mlen = cri->cri_mlen;
+ (*swd)->sw_axf = axf;
+ break;
+#endif
+ case CRYPTO_DEFLATE_COMP:
+ cxf = &comp_algo_deflate;
+ (*swd)->sw_cxf = cxf;
+ break;
+ default:
+ swcr_freesession(dev, i);
+ return EINVAL;
+ }
+
+ (*swd)->sw_alg = cri->cri_alg;
+ cri = cri->cri_next;
+ swd = &((*swd)->sw_next);
+ }
+ return 0;
+}
+
+/*
+ * Free a session.
+ */
+static int
+swcr_freesession(device_t dev, u_int64_t tid)
+{
+ struct swcr_data *swd;
+ struct enc_xform *txf;
+ struct auth_hash *axf;
+ struct comp_algo *cxf;
+ u_int32_t sid = CRYPTO_SESID2LID(tid);
+
+ if (sid > swcr_sesnum || swcr_sessions == NULL ||
+ swcr_sessions[sid] == NULL)
+ return EINVAL;
+
+ /* Silently accept and return */
+ if (sid == 0)
+ return 0;
+
+ while ((swd = swcr_sessions[sid]) != NULL) {
+ swcr_sessions[sid] = swd->sw_next;
+
+ switch (swd->sw_alg) {
+ case CRYPTO_DES_CBC:
+ case CRYPTO_3DES_CBC:
+ case CRYPTO_BLF_CBC:
+ case CRYPTO_CAST_CBC:
+ case CRYPTO_SKIPJACK_CBC:
+ case CRYPTO_RIJNDAEL128_CBC:
+ case CRYPTO_AES_XTS:
+ case CRYPTO_CAMELLIA_CBC:
+ case CRYPTO_NULL_CBC:
+ txf = swd->sw_exf;
+
+ if (swd->sw_kschedule)
+ txf->zerokey(&(swd->sw_kschedule));
+ break;
+
+ case CRYPTO_MD5_HMAC:
+ case CRYPTO_SHA1_HMAC:
+ case CRYPTO_SHA2_256_HMAC:
+ case CRYPTO_SHA2_384_HMAC:
+ case CRYPTO_SHA2_512_HMAC:
+ case CRYPTO_RIPEMD160_HMAC:
+ case CRYPTO_NULL_HMAC:
+ axf = swd->sw_axf;
+
+ if (swd->sw_ictx) {
+ bzero(swd->sw_ictx, axf->ctxsize);
+ free(swd->sw_ictx, M_CRYPTO_DATA);
+ }
+ if (swd->sw_octx) {
+ bzero(swd->sw_octx, axf->ctxsize);
+ free(swd->sw_octx, M_CRYPTO_DATA);
+ }
+ break;
+
+ case CRYPTO_MD5_KPDK:
+ case CRYPTO_SHA1_KPDK:
+ axf = swd->sw_axf;
+
+ if (swd->sw_ictx) {
+ bzero(swd->sw_ictx, axf->ctxsize);
+ free(swd->sw_ictx, M_CRYPTO_DATA);
+ }
+ if (swd->sw_octx) {
+ bzero(swd->sw_octx, swd->sw_klen);
+ free(swd->sw_octx, M_CRYPTO_DATA);
+ }
+ break;
+
+ case CRYPTO_MD5:
+ case CRYPTO_SHA1:
+ axf = swd->sw_axf;
+
+ if (swd->sw_ictx)
+ free(swd->sw_ictx, M_CRYPTO_DATA);
+ break;
+
+ case CRYPTO_DEFLATE_COMP:
+ cxf = swd->sw_cxf;
+ break;
+ }
+
+ free(swd, M_CRYPTO_DATA);
+ }
+ return 0;
+}
+
+/*
+ * Process a software request.
+ */
+static int
+swcr_process(device_t dev, struct cryptop *crp, int hint)
+{
+ struct cryptodesc *crd;
+ struct swcr_data *sw;
+ u_int32_t lid;
+
+ /* Sanity check */
+ if (crp == NULL)
+ return EINVAL;
+
+ if (crp->crp_desc == NULL || crp->crp_buf == NULL) {
+ crp->crp_etype = EINVAL;
+ goto done;
+ }
+
+ lid = crp->crp_sid & 0xffffffff;
+ if (lid >= swcr_sesnum || lid == 0 || swcr_sessions[lid] == NULL) {
+ crp->crp_etype = ENOENT;
+ goto done;
+ }
+
+ /* Go through crypto descriptors, processing as we go */
+ for (crd = crp->crp_desc; crd; crd = crd->crd_next) {
+ /*
+ * Find the crypto context.
+ *
+ * XXX Note that the logic here prevents us from having
+ * XXX the same algorithm multiple times in a session
+ * XXX (or rather, we can but it won't give us the right
+ * XXX results). To do that, we'd need some way of differentiating
+ * XXX between the various instances of an algorithm (so we can
+ * XXX locate the correct crypto context).
+ */
+ for (sw = swcr_sessions[lid];
+ sw && sw->sw_alg != crd->crd_alg;
+ sw = sw->sw_next)
+ ;
+
+ /* No such context ? */
+ if (sw == NULL) {
+ crp->crp_etype = EINVAL;
+ goto done;
+ }
+ switch (sw->sw_alg) {
+ case CRYPTO_DES_CBC:
+ case CRYPTO_3DES_CBC:
+ case CRYPTO_BLF_CBC:
+ case CRYPTO_CAST_CBC:
+ case CRYPTO_SKIPJACK_CBC:
+ case CRYPTO_RIJNDAEL128_CBC:
+ case CRYPTO_AES_XTS:
+ case CRYPTO_CAMELLIA_CBC:
+ if ((crp->crp_etype = swcr_encdec(crd, sw,
+ crp->crp_buf, crp->crp_flags)) != 0)
+ goto done;
+ break;
+ case CRYPTO_NULL_CBC:
+ crp->crp_etype = 0;
+ break;
+ case CRYPTO_MD5_HMAC:
+ case CRYPTO_SHA1_HMAC:
+ case CRYPTO_SHA2_256_HMAC:
+ case CRYPTO_SHA2_384_HMAC:
+ case CRYPTO_SHA2_512_HMAC:
+ case CRYPTO_RIPEMD160_HMAC:
+ case CRYPTO_NULL_HMAC:
+ case CRYPTO_MD5_KPDK:
+ case CRYPTO_SHA1_KPDK:
+ case CRYPTO_MD5:
+ case CRYPTO_SHA1:
+ if ((crp->crp_etype = swcr_authcompute(crd, sw,
+ crp->crp_buf, crp->crp_flags)) != 0)
+ goto done;
+ break;
+
+ case CRYPTO_DEFLATE_COMP:
+ if ((crp->crp_etype = swcr_compdec(crd, sw,
+ crp->crp_buf, crp->crp_flags)) != 0)
+ goto done;
+ else
+ crp->crp_olen = (int)sw->sw_size;
+ break;
+
+ default:
+ /* Unknown/unsupported algorithm */
+ crp->crp_etype = EINVAL;
+ goto done;
+ }
+ }
+
+done:
+ crypto_done(crp);
+ return 0;
+}
+
+static void
+swcr_identify(driver_t *drv, device_t parent)
+{
+ /* NB: order 10 is so we get attached after h/w devices */
+ if (device_find_child(parent, "cryptosoft", -1) == NULL &&
+ BUS_ADD_CHILD(parent, 10, "cryptosoft", 0) == 0)
+ panic("cryptosoft: could not attach");
+}
+
+static int
+swcr_probe(device_t dev)
+{
+ device_set_desc(dev, "software crypto");
+ return (BUS_PROBE_NOWILDCARD);
+}
+
+static int
+swcr_attach(device_t dev)
+{
+ memset(hmac_ipad_buffer, HMAC_IPAD_VAL, HMAC_MAX_BLOCK_LEN);
+ memset(hmac_opad_buffer, HMAC_OPAD_VAL, HMAC_MAX_BLOCK_LEN);
+
+ swcr_id = crypto_get_driverid(dev,
+ CRYPTOCAP_F_SOFTWARE | CRYPTOCAP_F_SYNC);
+ if (swcr_id < 0) {
+ device_printf(dev, "cannot initialize!");
+ return ENOMEM;
+ }
+#define REGISTER(alg) \
+ crypto_register(swcr_id, alg, 0,0)
+ REGISTER(CRYPTO_DES_CBC);
+ REGISTER(CRYPTO_3DES_CBC);
+ REGISTER(CRYPTO_BLF_CBC);
+ REGISTER(CRYPTO_CAST_CBC);
+ REGISTER(CRYPTO_SKIPJACK_CBC);
+ REGISTER(CRYPTO_NULL_CBC);
+ REGISTER(CRYPTO_MD5_HMAC);
+ REGISTER(CRYPTO_SHA1_HMAC);
+ REGISTER(CRYPTO_SHA2_256_HMAC);
+ REGISTER(CRYPTO_SHA2_384_HMAC);
+ REGISTER(CRYPTO_SHA2_512_HMAC);
+ REGISTER(CRYPTO_RIPEMD160_HMAC);
+ REGISTER(CRYPTO_NULL_HMAC);
+ REGISTER(CRYPTO_MD5_KPDK);
+ REGISTER(CRYPTO_SHA1_KPDK);
+ REGISTER(CRYPTO_MD5);
+ REGISTER(CRYPTO_SHA1);
+ REGISTER(CRYPTO_RIJNDAEL128_CBC);
+ REGISTER(CRYPTO_AES_XTS);
+ REGISTER(CRYPTO_CAMELLIA_CBC);
+ REGISTER(CRYPTO_DEFLATE_COMP);
+#undef REGISTER
+
+ return 0;
+}
+
+static int
+swcr_detach(device_t dev)
+{
+ crypto_unregister_all(swcr_id);
+ if (swcr_sessions != NULL)
+ free(swcr_sessions, M_CRYPTO_DATA);
+ return 0;
+}
+
+static device_method_t swcr_methods[] = {
+ DEVMETHOD(device_identify, swcr_identify),
+ DEVMETHOD(device_probe, swcr_probe),
+ DEVMETHOD(device_attach, swcr_attach),
+ DEVMETHOD(device_detach, swcr_detach),
+
+ DEVMETHOD(cryptodev_newsession, swcr_newsession),
+ DEVMETHOD(cryptodev_freesession,swcr_freesession),
+ DEVMETHOD(cryptodev_process, swcr_process),
+
+ {0, 0},
+};
+
+static driver_t swcr_driver = {
+ "cryptosoft",
+ swcr_methods,
+ 0, /* NB: no softc */
+};
+static devclass_t swcr_devclass;
+
+/*
+ * NB: We explicitly reference the crypto module so we
+ * get the necessary ordering when built as a loadable
+ * module. This is required because we bundle the crypto
+ * module code together with the cryptosoft driver (otherwise
+ * normal module dependencies would handle things).
+ */
+extern int crypto_modevent(struct module *, int, void *);
+/* XXX where to attach */
+DRIVER_MODULE(cryptosoft, nexus, swcr_driver, swcr_devclass, crypto_modevent,0);
+MODULE_VERSION(cryptosoft, 1);
+MODULE_DEPEND(cryptosoft, crypto, 1, 1, 1);
diff --git a/rtems/freebsd/opencrypto/cryptosoft.h b/rtems/freebsd/opencrypto/cryptosoft.h
new file mode 100644
index 00000000..363fdbba
--- /dev/null
+++ b/rtems/freebsd/opencrypto/cryptosoft.h
@@ -0,0 +1,67 @@
+/* $FreeBSD$ */
+/* $OpenBSD: cryptosoft.h,v 1.10 2002/04/22 23:10:09 deraadt Exp $ */
+
+/*-
+ * The author of this code is Angelos D. Keromytis (angelos@cis.upenn.edu)
+ *
+ * This code was written by Angelos D. Keromytis in Athens, Greece, in
+ * February 2000. Network Security Technologies Inc. (NSTI) kindly
+ * supported the development of this code.
+ *
+ * Copyright (c) 2000 Angelos D. Keromytis
+ *
+ * Permission to use, copy, and modify this software with or without fee
+ * is hereby granted, provided that this entire notice is included in
+ * all source code copies of any software which is or includes a copy or
+ * modification of this software.
+ *
+ * THIS SOFTWARE IS BEING PROVIDED "AS IS", WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTY. IN PARTICULAR, NONE OF THE AUTHORS MAKES ANY
+ * REPRESENTATION OR WARRANTY OF ANY KIND CONCERNING THE
+ * MERCHANTABILITY OF THIS SOFTWARE OR ITS FITNESS FOR ANY PARTICULAR
+ * PURPOSE.
+ */
+
+#ifndef _CRYPTO_CRYPTOSOFT_HH_
+#define _CRYPTO_CRYPTOSOFT_HH_
+
+/* Software session entry */
+struct swcr_data {
+ int sw_alg; /* Algorithm */
+ union {
+ struct {
+ u_int8_t *SW_ictx;
+ u_int8_t *SW_octx;
+ u_int16_t SW_klen;
+ u_int16_t SW_mlen;
+ struct auth_hash *SW_axf;
+ } SWCR_AUTH;
+ struct {
+ u_int8_t *SW_kschedule;
+ struct enc_xform *SW_exf;
+ } SWCR_ENC;
+ struct {
+ u_int32_t SW_size;
+ struct comp_algo *SW_cxf;
+ } SWCR_COMP;
+ } SWCR_UN;
+
+#define sw_ictx SWCR_UN.SWCR_AUTH.SW_ictx
+#define sw_octx SWCR_UN.SWCR_AUTH.SW_octx
+#define sw_klen SWCR_UN.SWCR_AUTH.SW_klen
+#define sw_mlen SWCR_UN.SWCR_AUTH.SW_mlen
+#define sw_axf SWCR_UN.SWCR_AUTH.SW_axf
+#define sw_kschedule SWCR_UN.SWCR_ENC.SW_kschedule
+#define sw_exf SWCR_UN.SWCR_ENC.SW_exf
+#define sw_size SWCR_UN.SWCR_COMP.SW_size
+#define sw_cxf SWCR_UN.SWCR_COMP.SW_cxf
+
+ struct swcr_data *sw_next;
+};
+
+#ifdef _KERNEL
+extern u_int8_t hmac_ipad_buffer[];
+extern u_int8_t hmac_opad_buffer[];
+#endif /* _KERNEL */
+
+#endif /* _CRYPTO_CRYPTO_HH_ */
diff --git a/rtems/freebsd/opencrypto/deflate.c b/rtems/freebsd/opencrypto/deflate.c
new file mode 100644
index 00000000..296c3bf8
--- /dev/null
+++ b/rtems/freebsd/opencrypto/deflate.c
@@ -0,0 +1,267 @@
+#include <rtems/freebsd/machine/rtems-bsd-config.h>
+
+/* $OpenBSD: deflate.c,v 1.3 2001/08/20 02:45:22 hugh Exp $ */
+
+/*-
+ * Copyright (c) 2001 Jean-Jacques Bernard-Gundol (jj@wabbitt.org)
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * This file contains a wrapper around the deflate algo compression
+ * functions using the zlib library (see net/zlib.{c,h})
+ */
+
+#include <rtems/freebsd/sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <rtems/freebsd/local/opt_kdtrace.h>
+
+#include <rtems/freebsd/sys/types.h>
+#include <rtems/freebsd/sys/param.h>
+#include <rtems/freebsd/sys/malloc.h>
+#include <rtems/freebsd/sys/param.h>
+#include <rtems/freebsd/sys/kernel.h>
+#include <rtems/freebsd/sys/sdt.h>
+#include <rtems/freebsd/sys/systm.h>
+#include <rtems/freebsd/net/zlib.h>
+
+#include <rtems/freebsd/opencrypto/cryptodev.h>
+#include <rtems/freebsd/opencrypto/deflate.h>
+
+SDT_PROVIDER_DECLARE(opencrypto);
+SDT_PROBE_DEFINE2(opencrypto, deflate, deflate_global, entry,
+ "int", "u_int32_t");
+SDT_PROBE_DEFINE5(opencrypto, deflate, deflate_global, bad,
+ "int", "int", "int", "int", "int");
+SDT_PROBE_DEFINE5(opencrypto, deflate, deflate_global, iter,
+ "int", "int", "int", "int", "int");
+SDT_PROBE_DEFINE2(opencrypto, deflate, deflate_global, return,
+ "int", "u_int32_t");
+
+int window_inflate = -1 * MAX_WBITS;
+int window_deflate = -12;
+
+/*
+ * This function takes a block of data and (de)compress it using the deflate
+ * algorithm
+ */
+
+u_int32_t
+deflate_global(data, size, decomp, out)
+ u_int8_t *data;
+ u_int32_t size;
+ int decomp;
+ u_int8_t **out;
+{
+ /* decomp indicates whether we compress (0) or decompress (1) */
+
+ z_stream zbuf;
+ u_int8_t *output;
+ u_int32_t count, result;
+ int error, i;
+ struct deflate_buf *bufh, *bufp;
+
+ SDT_PROBE2(opencrypto, deflate, deflate_global, entry, decomp, size);
+
+ bufh = bufp = NULL;
+ if (!decomp) {
+ i = 1;
+ } else {
+ /*
+ * Choose a buffer with 4x the size of the input buffer
+ * for the size of the output buffer in the case of
+ * decompression. If it's not sufficient, it will need to be
+ * updated while the decompression is going on.
+ */
+ i = 4;
+ }
+ /*
+ * Make sure we do have enough output space. Repeated calls to
+ * deflate need at least 6 bytes of output buffer space to avoid
+ * repeated markers. We will always provide at least 16 bytes.
+ */
+ while ((size * i) < 16)
+ i++;
+
+ bufh = bufp = malloc(sizeof(*bufp) + (size_t)(size * i),
+ M_CRYPTO_DATA, M_NOWAIT);
+ if (bufp == NULL) {
+ SDT_PROBE3(opencrypto, deflate, deflate_global, bad,
+ decomp, 0, __LINE__);
+ goto bad2;
+ }
+ bufp->next = NULL;
+ bufp->size = size * i;
+
+ bzero(&zbuf, sizeof(z_stream));
+ zbuf.zalloc = z_alloc;
+ zbuf.zfree = z_free;
+ zbuf.opaque = Z_NULL;
+ zbuf.next_in = data; /* Data that is going to be processed. */
+ zbuf.avail_in = size; /* Total length of data to be processed. */
+ zbuf.next_out = bufp->data;
+ zbuf.avail_out = bufp->size;
+
+ error = decomp ? inflateInit2(&zbuf, window_inflate) :
+ deflateInit2(&zbuf, Z_DEFAULT_COMPRESSION, Z_METHOD,
+ window_deflate, Z_MEMLEVEL, Z_DEFAULT_STRATEGY);
+ if (error != Z_OK) {
+ SDT_PROBE3(opencrypto, deflate, deflate_global, bad,
+ decomp, error, __LINE__);
+ goto bad;
+ }
+
+ for (;;) {
+ error = decomp ? inflate(&zbuf, Z_SYNC_FLUSH) :
+ deflate(&zbuf, Z_FINISH);
+ if (error != Z_OK && error != Z_STREAM_END) {
+ /*
+ * Unfortunately we are limited to 5 arguments,
+ * thus use two probes.
+ */
+ SDT_PROBE5(opencrypto, deflate, deflate_global, bad,
+ decomp, error, __LINE__,
+ zbuf.avail_in, zbuf.avail_out);
+ SDT_PROBE5(opencrypto, deflate, deflate_global, bad,
+ decomp, error, __LINE__,
+ zbuf.state->dummy, zbuf.total_out);
+ goto bad;
+ }
+ SDT_PROBE5(opencrypto, deflate, deflate_global, iter,
+ decomp, error, __LINE__,
+ zbuf.avail_in, zbuf.avail_out);
+ SDT_PROBE5(opencrypto, deflate, deflate_global, iter,
+ decomp, error, __LINE__,
+ zbuf.state->dummy, zbuf.total_out);
+ if (decomp && zbuf.avail_in == 0 && error == Z_STREAM_END) {
+ /* Done. */
+ break;
+ } else if (!decomp && error == Z_STREAM_END) {
+ /* Done. */
+ break;
+ } else if (zbuf.avail_out == 0) {
+ struct deflate_buf *p;
+
+ /* We need more output space for another iteration. */
+ p = malloc(sizeof(*p) + (size_t)(size * i),
+ M_CRYPTO_DATA, M_NOWAIT);
+ if (p == NULL) {
+ SDT_PROBE3(opencrypto, deflate, deflate_global,
+ bad, decomp, 0, __LINE__);
+ goto bad;
+ }
+ p->next = NULL;
+ p->size = size * i;
+ bufp->next = p;
+ bufp = p;
+ zbuf.next_out = bufp->data;
+ zbuf.avail_out = bufp->size;
+ } else {
+ /* Unexpect result. */
+ /*
+ * Unfortunately we are limited to 5 arguments,
+ * thus, again, use two probes.
+ */
+ SDT_PROBE5(opencrypto, deflate, deflate_global, bad,
+ decomp, error, __LINE__,
+ zbuf.avail_in, zbuf.avail_out);
+ SDT_PROBE5(opencrypto, deflate, deflate_global, bad,
+ decomp, error, __LINE__,
+ zbuf.state->dummy, zbuf.total_out);
+ goto bad;
+ }
+ }
+
+ result = count = zbuf.total_out;
+
+ *out = malloc(result, M_CRYPTO_DATA, M_NOWAIT);
+ if (*out == NULL) {
+ SDT_PROBE3(opencrypto, deflate, deflate_global, bad,
+ decomp, 0, __LINE__);
+ goto bad;
+ }
+ if (decomp)
+ inflateEnd(&zbuf);
+ else
+ deflateEnd(&zbuf);
+ output = *out;
+ for (bufp = bufh; bufp != NULL; ) {
+ if (count > bufp->size) {
+ struct deflate_buf *p;
+
+ bcopy(bufp->data, *out, bufp->size);
+ *out += bufp->size;
+ count -= bufp->size;
+ p = bufp;
+ bufp = bufp->next;
+ free(p, M_CRYPTO_DATA);
+ } else {
+ /* It should be the last buffer. */
+ bcopy(bufp->data, *out, count);
+ *out += count;
+ free(bufp, M_CRYPTO_DATA);
+ bufp = NULL;
+ count = 0;
+ }
+ }
+ *out = output;
+ SDT_PROBE2(opencrypto, deflate, deflate_global, return, decomp, result);
+ return result;
+
+bad:
+ if (decomp)
+ inflateEnd(&zbuf);
+ else
+ deflateEnd(&zbuf);
+ for (bufp = bufh; bufp != NULL; ) {
+ struct deflate_buf *p;
+
+ p = bufp;
+ bufp = bufp->next;
+ free(p, M_CRYPTO_DATA);
+ }
+bad2:
+ *out = NULL;
+ return 0;
+}
+
+void *
+z_alloc(nil, type, size)
+ void *nil;
+ u_int type, size;
+{
+ void *ptr;
+
+ ptr = malloc(type *size, M_CRYPTO_DATA, M_NOWAIT);
+ return ptr;
+}
+
+void
+z_free(nil, ptr)
+ void *nil, *ptr;
+{
+ free(ptr, M_CRYPTO_DATA);
+}
diff --git a/rtems/freebsd/opencrypto/deflate.h b/rtems/freebsd/opencrypto/deflate.h
new file mode 100644
index 00000000..178ea267
--- /dev/null
+++ b/rtems/freebsd/opencrypto/deflate.h
@@ -0,0 +1,60 @@
+/* $FreeBSD$ */
+/* $OpenBSD: deflate.h,v 1.3 2002/03/14 01:26:51 millert Exp $ */
+
+/*-
+ * Copyright (c) 2001 Jean-Jacques Bernard-Gundol (jj@wabbitt.org)
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * Definition for the wrapper around the deflate compression
+ * algorithm used in /sys/crypto
+ */
+
+#ifndef _CRYPTO_DEFLATE_HH_
+#define _CRYPTO_DEFLATE_HH_
+
+#include <rtems/freebsd/net/zlib.h>
+
+#define Z_METHOD 8
+#define Z_MEMLEVEL 8
+#define MINCOMP 2 /* won't be used, but must be defined */
+#define ZBUF 10
+
+u_int32_t deflate_global(u_int8_t *, u_int32_t, int, u_int8_t **);
+void *z_alloc(void *, u_int, u_int);
+void z_free(void *, void *);
+
+/*
+ * We are going to use a combined allocation to hold the metadata
+ * from the struct immediately followed by the real application data.
+ */
+struct deflate_buf {
+ struct deflate_buf *next;
+ uint32_t size;
+ uint8_t data[];
+};
+
+#endif /* _CRYPTO_DEFLATE_HH_ */
diff --git a/rtems/freebsd/opencrypto/rmd160.c b/rtems/freebsd/opencrypto/rmd160.c
new file mode 100644
index 00000000..e6f8ba4a
--- /dev/null
+++ b/rtems/freebsd/opencrypto/rmd160.c
@@ -0,0 +1,369 @@
+#include <rtems/freebsd/machine/rtems-bsd-config.h>
+
+/* $OpenBSD: rmd160.c,v 1.3 2001/09/26 21:40:13 markus Exp $ */
+/*-
+ * Copyright (c) 2001 Markus Friedl. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * Preneel, Bosselaers, Dobbertin, "The Cryptographic Hash Function RIPEMD-160",
+ * RSA Laboratories, CryptoBytes, Volume 3, Number 2, Autumn 1997,
+ * ftp://ftp.rsasecurity.com/pub/cryptobytes/crypto3n2.pdf
+ */
+
+#include <rtems/freebsd/sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <rtems/freebsd/sys/param.h>
+#include <rtems/freebsd/sys/systm.h>
+#include <rtems/freebsd/sys/endian.h>
+#include <rtems/freebsd/opencrypto/rmd160.h>
+
+#define PUT_64BIT_LE(cp, value) do { \
+ (cp)[7] = (value) >> 56; \
+ (cp)[6] = (value) >> 48; \
+ (cp)[5] = (value) >> 40; \
+ (cp)[4] = (value) >> 32; \
+ (cp)[3] = (value) >> 24; \
+ (cp)[2] = (value) >> 16; \
+ (cp)[1] = (value) >> 8; \
+ (cp)[0] = (value); } while (0)
+
+#define PUT_32BIT_LE(cp, value) do { \
+ (cp)[3] = (value) >> 24; \
+ (cp)[2] = (value) >> 16; \
+ (cp)[1] = (value) >> 8; \
+ (cp)[0] = (value); } while (0)
+
+#define H0 0x67452301U
+#define H1 0xEFCDAB89U
+#define H2 0x98BADCFEU
+#define H3 0x10325476U
+#define H4 0xC3D2E1F0U
+
+#define K0 0x00000000U
+#define K1 0x5A827999U
+#define K2 0x6ED9EBA1U
+#define K3 0x8F1BBCDCU
+#define K4 0xA953FD4EU
+
+#define KK0 0x50A28BE6U
+#define KK1 0x5C4DD124U
+#define KK2 0x6D703EF3U
+#define KK3 0x7A6D76E9U
+#define KK4 0x00000000U
+
+/* rotate x left n bits. */
+#define ROL(n, x) (((x) << (n)) | ((x) >> (32-(n))))
+
+#define F0(x, y, z) ((x) ^ (y) ^ (z))
+#define F1(x, y, z) (((x) & (y)) | ((~x) & (z)))
+#define F2(x, y, z) (((x) | (~y)) ^ (z))
+#define F3(x, y, z) (((x) & (z)) | ((y) & (~z)))
+#define F4(x, y, z) ((x) ^ ((y) | (~z)))
+
+#define R(a, b, c, d, e, Fj, Kj, sj, rj) \
+ do { \
+ a = ROL(sj, a + Fj(b,c,d) + X(rj) + Kj) + e; \
+ c = ROL(10, c); \
+ } while(0)
+
+#define X(i) x[i]
+
+static u_char PADDING[64] = {
+ 0x80, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+void
+RMD160Init(RMD160_CTX *ctx)
+{
+ ctx->count = 0;
+ ctx->state[0] = H0;
+ ctx->state[1] = H1;
+ ctx->state[2] = H2;
+ ctx->state[3] = H3;
+ ctx->state[4] = H4;
+}
+
+void
+RMD160Update(RMD160_CTX *ctx, const u_char *input, u_int32_t len)
+{
+ u_int32_t have, off, need;
+
+ have = (ctx->count/8) % 64;
+ need = 64 - have;
+ ctx->count += 8 * len;
+ off = 0;
+
+ if (len >= need) {
+ if (have) {
+ memcpy(ctx->buffer + have, input, need);
+ RMD160Transform(ctx->state, ctx->buffer);
+ off = need;
+ have = 0;
+ }
+ /* now the buffer is empty */
+ while (off + 64 <= len) {
+ RMD160Transform(ctx->state, input+off);
+ off += 64;
+ }
+ }
+ if (off < len)
+ memcpy(ctx->buffer + have, input+off, len-off);
+}
+
+void
+RMD160Final(u_char digest[20], RMD160_CTX *ctx)
+{
+ int i;
+ u_char size[8];
+ u_int32_t padlen;
+
+ PUT_64BIT_LE(size, ctx->count);
+
+ /*
+ * pad to 64 byte blocks, at least one byte from PADDING plus 8 bytes
+ * for the size
+ */
+ padlen = 64 - ((ctx->count/8) % 64);
+ if (padlen < 1 + 8)
+ padlen += 64;
+ RMD160Update(ctx, PADDING, padlen - 8); /* padlen - 8 <= 64 */
+ RMD160Update(ctx, size, 8);
+
+ if (digest != NULL)
+ for (i = 0; i < 5; i++)
+ PUT_32BIT_LE(digest + i*4, ctx->state[i]);
+
+ memset(ctx, 0, sizeof (*ctx));
+}
+
+void
+RMD160Transform(u_int32_t state[5], const u_char block[64])
+{
+ u_int32_t a, b, c, d, e, aa, bb, cc, dd, ee, t, x[16];
+
+#if BYTE_ORDER == LITTLE_ENDIAN
+ memcpy(x, block, 64);
+#else
+ int i;
+
+ for (i = 0; i < 16; i++)
+ x[i] = bswap32(*(const u_int32_t*)(block+i*4));
+#endif
+
+ a = state[0];
+ b = state[1];
+ c = state[2];
+ d = state[3];
+ e = state[4];
+
+ /* Round 1 */
+ R(a, b, c, d, e, F0, K0, 11, 0);
+ R(e, a, b, c, d, F0, K0, 14, 1);
+ R(d, e, a, b, c, F0, K0, 15, 2);
+ R(c, d, e, a, b, F0, K0, 12, 3);
+ R(b, c, d, e, a, F0, K0, 5, 4);
+ R(a, b, c, d, e, F0, K0, 8, 5);
+ R(e, a, b, c, d, F0, K0, 7, 6);
+ R(d, e, a, b, c, F0, K0, 9, 7);
+ R(c, d, e, a, b, F0, K0, 11, 8);
+ R(b, c, d, e, a, F0, K0, 13, 9);
+ R(a, b, c, d, e, F0, K0, 14, 10);
+ R(e, a, b, c, d, F0, K0, 15, 11);
+ R(d, e, a, b, c, F0, K0, 6, 12);
+ R(c, d, e, a, b, F0, K0, 7, 13);
+ R(b, c, d, e, a, F0, K0, 9, 14);
+ R(a, b, c, d, e, F0, K0, 8, 15); /* #15 */
+ /* Round 2 */
+ R(e, a, b, c, d, F1, K1, 7, 7);
+ R(d, e, a, b, c, F1, K1, 6, 4);
+ R(c, d, e, a, b, F1, K1, 8, 13);
+ R(b, c, d, e, a, F1, K1, 13, 1);
+ R(a, b, c, d, e, F1, K1, 11, 10);
+ R(e, a, b, c, d, F1, K1, 9, 6);
+ R(d, e, a, b, c, F1, K1, 7, 15);
+ R(c, d, e, a, b, F1, K1, 15, 3);
+ R(b, c, d, e, a, F1, K1, 7, 12);
+ R(a, b, c, d, e, F1, K1, 12, 0);
+ R(e, a, b, c, d, F1, K1, 15, 9);
+ R(d, e, a, b, c, F1, K1, 9, 5);
+ R(c, d, e, a, b, F1, K1, 11, 2);
+ R(b, c, d, e, a, F1, K1, 7, 14);
+ R(a, b, c, d, e, F1, K1, 13, 11);
+ R(e, a, b, c, d, F1, K1, 12, 8); /* #31 */
+ /* Round 3 */
+ R(d, e, a, b, c, F2, K2, 11, 3);
+ R(c, d, e, a, b, F2, K2, 13, 10);
+ R(b, c, d, e, a, F2, K2, 6, 14);
+ R(a, b, c, d, e, F2, K2, 7, 4);
+ R(e, a, b, c, d, F2, K2, 14, 9);
+ R(d, e, a, b, c, F2, K2, 9, 15);
+ R(c, d, e, a, b, F2, K2, 13, 8);
+ R(b, c, d, e, a, F2, K2, 15, 1);
+ R(a, b, c, d, e, F2, K2, 14, 2);
+ R(e, a, b, c, d, F2, K2, 8, 7);
+ R(d, e, a, b, c, F2, K2, 13, 0);
+ R(c, d, e, a, b, F2, K2, 6, 6);
+ R(b, c, d, e, a, F2, K2, 5, 13);
+ R(a, b, c, d, e, F2, K2, 12, 11);
+ R(e, a, b, c, d, F2, K2, 7, 5);
+ R(d, e, a, b, c, F2, K2, 5, 12); /* #47 */
+ /* Round 4 */
+ R(c, d, e, a, b, F3, K3, 11, 1);
+ R(b, c, d, e, a, F3, K3, 12, 9);
+ R(a, b, c, d, e, F3, K3, 14, 11);
+ R(e, a, b, c, d, F3, K3, 15, 10);
+ R(d, e, a, b, c, F3, K3, 14, 0);
+ R(c, d, e, a, b, F3, K3, 15, 8);
+ R(b, c, d, e, a, F3, K3, 9, 12);
+ R(a, b, c, d, e, F3, K3, 8, 4);
+ R(e, a, b, c, d, F3, K3, 9, 13);
+ R(d, e, a, b, c, F3, K3, 14, 3);
+ R(c, d, e, a, b, F3, K3, 5, 7);
+ R(b, c, d, e, a, F3, K3, 6, 15);
+ R(a, b, c, d, e, F3, K3, 8, 14);
+ R(e, a, b, c, d, F3, K3, 6, 5);
+ R(d, e, a, b, c, F3, K3, 5, 6);
+ R(c, d, e, a, b, F3, K3, 12, 2); /* #63 */
+ /* Round 5 */
+ R(b, c, d, e, a, F4, K4, 9, 4);
+ R(a, b, c, d, e, F4, K4, 15, 0);
+ R(e, a, b, c, d, F4, K4, 5, 5);
+ R(d, e, a, b, c, F4, K4, 11, 9);
+ R(c, d, e, a, b, F4, K4, 6, 7);
+ R(b, c, d, e, a, F4, K4, 8, 12);
+ R(a, b, c, d, e, F4, K4, 13, 2);
+ R(e, a, b, c, d, F4, K4, 12, 10);
+ R(d, e, a, b, c, F4, K4, 5, 14);
+ R(c, d, e, a, b, F4, K4, 12, 1);
+ R(b, c, d, e, a, F4, K4, 13, 3);
+ R(a, b, c, d, e, F4, K4, 14, 8);
+ R(e, a, b, c, d, F4, K4, 11, 11);
+ R(d, e, a, b, c, F4, K4, 8, 6);
+ R(c, d, e, a, b, F4, K4, 5, 15);
+ R(b, c, d, e, a, F4, K4, 6, 13); /* #79 */
+
+ aa = a ; bb = b; cc = c; dd = d; ee = e;
+
+ a = state[0];
+ b = state[1];
+ c = state[2];
+ d = state[3];
+ e = state[4];
+
+ /* Parallel round 1 */
+ R(a, b, c, d, e, F4, KK0, 8, 5);
+ R(e, a, b, c, d, F4, KK0, 9, 14);
+ R(d, e, a, b, c, F4, KK0, 9, 7);
+ R(c, d, e, a, b, F4, KK0, 11, 0);
+ R(b, c, d, e, a, F4, KK0, 13, 9);
+ R(a, b, c, d, e, F4, KK0, 15, 2);
+ R(e, a, b, c, d, F4, KK0, 15, 11);
+ R(d, e, a, b, c, F4, KK0, 5, 4);
+ R(c, d, e, a, b, F4, KK0, 7, 13);
+ R(b, c, d, e, a, F4, KK0, 7, 6);
+ R(a, b, c, d, e, F4, KK0, 8, 15);
+ R(e, a, b, c, d, F4, KK0, 11, 8);
+ R(d, e, a, b, c, F4, KK0, 14, 1);
+ R(c, d, e, a, b, F4, KK0, 14, 10);
+ R(b, c, d, e, a, F4, KK0, 12, 3);
+ R(a, b, c, d, e, F4, KK0, 6, 12); /* #15 */
+ /* Parallel round 2 */
+ R(e, a, b, c, d, F3, KK1, 9, 6);
+ R(d, e, a, b, c, F3, KK1, 13, 11);
+ R(c, d, e, a, b, F3, KK1, 15, 3);
+ R(b, c, d, e, a, F3, KK1, 7, 7);
+ R(a, b, c, d, e, F3, KK1, 12, 0);
+ R(e, a, b, c, d, F3, KK1, 8, 13);
+ R(d, e, a, b, c, F3, KK1, 9, 5);
+ R(c, d, e, a, b, F3, KK1, 11, 10);
+ R(b, c, d, e, a, F3, KK1, 7, 14);
+ R(a, b, c, d, e, F3, KK1, 7, 15);
+ R(e, a, b, c, d, F3, KK1, 12, 8);
+ R(d, e, a, b, c, F3, KK1, 7, 12);
+ R(c, d, e, a, b, F3, KK1, 6, 4);
+ R(b, c, d, e, a, F3, KK1, 15, 9);
+ R(a, b, c, d, e, F3, KK1, 13, 1);
+ R(e, a, b, c, d, F3, KK1, 11, 2); /* #31 */
+ /* Parallel round 3 */
+ R(d, e, a, b, c, F2, KK2, 9, 15);
+ R(c, d, e, a, b, F2, KK2, 7, 5);
+ R(b, c, d, e, a, F2, KK2, 15, 1);
+ R(a, b, c, d, e, F2, KK2, 11, 3);
+ R(e, a, b, c, d, F2, KK2, 8, 7);
+ R(d, e, a, b, c, F2, KK2, 6, 14);
+ R(c, d, e, a, b, F2, KK2, 6, 6);
+ R(b, c, d, e, a, F2, KK2, 14, 9);
+ R(a, b, c, d, e, F2, KK2, 12, 11);
+ R(e, a, b, c, d, F2, KK2, 13, 8);
+ R(d, e, a, b, c, F2, KK2, 5, 12);
+ R(c, d, e, a, b, F2, KK2, 14, 2);
+ R(b, c, d, e, a, F2, KK2, 13, 10);
+ R(a, b, c, d, e, F2, KK2, 13, 0);
+ R(e, a, b, c, d, F2, KK2, 7, 4);
+ R(d, e, a, b, c, F2, KK2, 5, 13); /* #47 */
+ /* Parallel round 4 */
+ R(c, d, e, a, b, F1, KK3, 15, 8);
+ R(b, c, d, e, a, F1, KK3, 5, 6);
+ R(a, b, c, d, e, F1, KK3, 8, 4);
+ R(e, a, b, c, d, F1, KK3, 11, 1);
+ R(d, e, a, b, c, F1, KK3, 14, 3);
+ R(c, d, e, a, b, F1, KK3, 14, 11);
+ R(b, c, d, e, a, F1, KK3, 6, 15);
+ R(a, b, c, d, e, F1, KK3, 14, 0);
+ R(e, a, b, c, d, F1, KK3, 6, 5);
+ R(d, e, a, b, c, F1, KK3, 9, 12);
+ R(c, d, e, a, b, F1, KK3, 12, 2);
+ R(b, c, d, e, a, F1, KK3, 9, 13);
+ R(a, b, c, d, e, F1, KK3, 12, 9);
+ R(e, a, b, c, d, F1, KK3, 5, 7);
+ R(d, e, a, b, c, F1, KK3, 15, 10);
+ R(c, d, e, a, b, F1, KK3, 8, 14); /* #63 */
+ /* Parallel round 5 */
+ R(b, c, d, e, a, F0, KK4, 8, 12);
+ R(a, b, c, d, e, F0, KK4, 5, 15);
+ R(e, a, b, c, d, F0, KK4, 12, 10);
+ R(d, e, a, b, c, F0, KK4, 9, 4);
+ R(c, d, e, a, b, F0, KK4, 12, 1);
+ R(b, c, d, e, a, F0, KK4, 5, 5);
+ R(a, b, c, d, e, F0, KK4, 14, 8);
+ R(e, a, b, c, d, F0, KK4, 6, 7);
+ R(d, e, a, b, c, F0, KK4, 8, 6);
+ R(c, d, e, a, b, F0, KK4, 13, 2);
+ R(b, c, d, e, a, F0, KK4, 6, 13);
+ R(a, b, c, d, e, F0, KK4, 5, 14);
+ R(e, a, b, c, d, F0, KK4, 15, 0);
+ R(d, e, a, b, c, F0, KK4, 13, 3);
+ R(c, d, e, a, b, F0, KK4, 11, 9);
+ R(b, c, d, e, a, F0, KK4, 11, 11); /* #79 */
+
+ t = state[1] + cc + d;
+ state[1] = state[2] + dd + e;
+ state[2] = state[3] + ee + a;
+ state[3] = state[4] + aa + b;
+ state[4] = state[0] + bb + c;
+ state[0] = t;
+}
diff --git a/rtems/freebsd/opencrypto/rmd160.h b/rtems/freebsd/opencrypto/rmd160.h
new file mode 100644
index 00000000..60dce642
--- /dev/null
+++ b/rtems/freebsd/opencrypto/rmd160.h
@@ -0,0 +1,41 @@
+/* $FreeBSD$ */
+/* $OpenBSD: rmd160.h,v 1.3 2002/03/14 01:26:51 millert Exp $ */
+/*-
+ * Copyright (c) 2001 Markus Friedl. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef _RMD160_H
+#define _RMD160_H
+
+/* RMD160 context. */
+typedef struct RMD160Context {
+ u_int32_t state[5]; /* state */
+ u_int64_t count; /* number of bits, modulo 2^64 */
+ u_char buffer[64]; /* input buffer */
+} RMD160_CTX;
+
+void RMD160Init(RMD160_CTX *);
+void RMD160Transform(u_int32_t [5], const u_char [64]);
+void RMD160Update(RMD160_CTX *, const u_char *, u_int32_t);
+void RMD160Final(u_char [20], RMD160_CTX *);
+
+#endif /* _RMD160_H */
diff --git a/rtems/freebsd/opencrypto/skipjack.c b/rtems/freebsd/opencrypto/skipjack.c
new file mode 100644
index 00000000..ed8533b4
--- /dev/null
+++ b/rtems/freebsd/opencrypto/skipjack.c
@@ -0,0 +1,262 @@
+#include <rtems/freebsd/machine/rtems-bsd-config.h>
+
+/* $OpenBSD: skipjack.c,v 1.3 2001/05/05 00:31:34 angelos Exp $ */
+/*-
+ * Further optimized test implementation of SKIPJACK algorithm
+ * Mark Tillotson <markt@chaos.org.uk>, 25 June 98
+ * Optimizations suit RISC (lots of registers) machine best.
+ *
+ * based on unoptimized implementation of
+ * Panu Rissanen <bande@lut.fi> 960624
+ *
+ * SKIPJACK and KEA Algorithm Specifications
+ * Version 2.0
+ * 29 May 1998
+*/
+
+#include <rtems/freebsd/sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <rtems/freebsd/sys/param.h>
+
+#include <rtems/freebsd/opencrypto/skipjack.h>
+
+static const u_int8_t ftable[0x100] =
+{
+ 0xa3, 0xd7, 0x09, 0x83, 0xf8, 0x48, 0xf6, 0xf4,
+ 0xb3, 0x21, 0x15, 0x78, 0x99, 0xb1, 0xaf, 0xf9,
+ 0xe7, 0x2d, 0x4d, 0x8a, 0xce, 0x4c, 0xca, 0x2e,
+ 0x52, 0x95, 0xd9, 0x1e, 0x4e, 0x38, 0x44, 0x28,
+ 0x0a, 0xdf, 0x02, 0xa0, 0x17, 0xf1, 0x60, 0x68,
+ 0x12, 0xb7, 0x7a, 0xc3, 0xe9, 0xfa, 0x3d, 0x53,
+ 0x96, 0x84, 0x6b, 0xba, 0xf2, 0x63, 0x9a, 0x19,
+ 0x7c, 0xae, 0xe5, 0xf5, 0xf7, 0x16, 0x6a, 0xa2,
+ 0x39, 0xb6, 0x7b, 0x0f, 0xc1, 0x93, 0x81, 0x1b,
+ 0xee, 0xb4, 0x1a, 0xea, 0xd0, 0x91, 0x2f, 0xb8,
+ 0x55, 0xb9, 0xda, 0x85, 0x3f, 0x41, 0xbf, 0xe0,
+ 0x5a, 0x58, 0x80, 0x5f, 0x66, 0x0b, 0xd8, 0x90,
+ 0x35, 0xd5, 0xc0, 0xa7, 0x33, 0x06, 0x65, 0x69,
+ 0x45, 0x00, 0x94, 0x56, 0x6d, 0x98, 0x9b, 0x76,
+ 0x97, 0xfc, 0xb2, 0xc2, 0xb0, 0xfe, 0xdb, 0x20,
+ 0xe1, 0xeb, 0xd6, 0xe4, 0xdd, 0x47, 0x4a, 0x1d,
+ 0x42, 0xed, 0x9e, 0x6e, 0x49, 0x3c, 0xcd, 0x43,
+ 0x27, 0xd2, 0x07, 0xd4, 0xde, 0xc7, 0x67, 0x18,
+ 0x89, 0xcb, 0x30, 0x1f, 0x8d, 0xc6, 0x8f, 0xaa,
+ 0xc8, 0x74, 0xdc, 0xc9, 0x5d, 0x5c, 0x31, 0xa4,
+ 0x70, 0x88, 0x61, 0x2c, 0x9f, 0x0d, 0x2b, 0x87,
+ 0x50, 0x82, 0x54, 0x64, 0x26, 0x7d, 0x03, 0x40,
+ 0x34, 0x4b, 0x1c, 0x73, 0xd1, 0xc4, 0xfd, 0x3b,
+ 0xcc, 0xfb, 0x7f, 0xab, 0xe6, 0x3e, 0x5b, 0xa5,
+ 0xad, 0x04, 0x23, 0x9c, 0x14, 0x51, 0x22, 0xf0,
+ 0x29, 0x79, 0x71, 0x7e, 0xff, 0x8c, 0x0e, 0xe2,
+ 0x0c, 0xef, 0xbc, 0x72, 0x75, 0x6f, 0x37, 0xa1,
+ 0xec, 0xd3, 0x8e, 0x62, 0x8b, 0x86, 0x10, 0xe8,
+ 0x08, 0x77, 0x11, 0xbe, 0x92, 0x4f, 0x24, 0xc5,
+ 0x32, 0x36, 0x9d, 0xcf, 0xf3, 0xa6, 0xbb, 0xac,
+ 0x5e, 0x6c, 0xa9, 0x13, 0x57, 0x25, 0xb5, 0xe3,
+ 0xbd, 0xa8, 0x3a, 0x01, 0x05, 0x59, 0x2a, 0x46
+};
+
+/*
+ * For each key byte generate a table to represent the function
+ * ftable [in ^ keybyte]
+ *
+ * These tables used to save an XOR in each stage of the G-function
+ * the tables are hopefully pointed to by register allocated variables
+ * k0, k1..k9
+ */
+
+void
+subkey_table_gen (u_int8_t *key, u_int8_t **key_tables)
+{
+ int i, k;
+
+ for (k = 0; k < 10; k++) {
+ u_int8_t key_byte = key [k];
+ u_int8_t * table = key_tables[k];
+ for (i = 0; i < 0x100; i++)
+ table [i] = ftable [i ^ key_byte];
+ }
+}
+
+
+#define g(k0, k1, k2, k3, ih, il, oh, ol) \
+{ \
+ oh = k##k0 [il] ^ ih; \
+ ol = k##k1 [oh] ^ il; \
+ oh = k##k2 [ol] ^ oh; \
+ ol = k##k3 [oh] ^ ol; \
+}
+
+#define g0(ih, il, oh, ol) g(0, 1, 2, 3, ih, il, oh, ol)
+#define g4(ih, il, oh, ol) g(4, 5, 6, 7, ih, il, oh, ol)
+#define g8(ih, il, oh, ol) g(8, 9, 0, 1, ih, il, oh, ol)
+#define g2(ih, il, oh, ol) g(2, 3, 4, 5, ih, il, oh, ol)
+#define g6(ih, il, oh, ol) g(6, 7, 8, 9, ih, il, oh, ol)
+
+
+#define g_inv(k0, k1, k2, k3, ih, il, oh, ol) \
+{ \
+ ol = k##k3 [ih] ^ il; \
+ oh = k##k2 [ol] ^ ih; \
+ ol = k##k1 [oh] ^ ol; \
+ oh = k##k0 [ol] ^ oh; \
+}
+
+
+#define g0_inv(ih, il, oh, ol) g_inv(0, 1, 2, 3, ih, il, oh, ol)
+#define g4_inv(ih, il, oh, ol) g_inv(4, 5, 6, 7, ih, il, oh, ol)
+#define g8_inv(ih, il, oh, ol) g_inv(8, 9, 0, 1, ih, il, oh, ol)
+#define g2_inv(ih, il, oh, ol) g_inv(2, 3, 4, 5, ih, il, oh, ol)
+#define g6_inv(ih, il, oh, ol) g_inv(6, 7, 8, 9, ih, il, oh, ol)
+
+/* optimized version of Skipjack algorithm
+ *
+ * the appropriate g-function is inlined for each round
+ *
+ * the data movement is minimized by rotating the names of the
+ * variables w1..w4, not their contents (saves 3 moves per round)
+ *
+ * the loops are completely unrolled (needed to staticize choice of g)
+ *
+ * compiles to about 470 instructions on a Sparc (gcc -O)
+ * which is about 58 instructions per byte, 14 per round.
+ * gcc seems to leave in some unnecessary and with 0xFF operations
+ * but only in the latter part of the functions. Perhaps it
+ * runs out of resources to properly optimize long inlined function?
+ * in theory should get about 11 instructions per round, not 14
+ */
+
+void
+skipjack_forwards(u_int8_t *plain, u_int8_t *cipher, u_int8_t **key_tables)
+{
+ u_int8_t wh1 = plain[0]; u_int8_t wl1 = plain[1];
+ u_int8_t wh2 = plain[2]; u_int8_t wl2 = plain[3];
+ u_int8_t wh3 = plain[4]; u_int8_t wl3 = plain[5];
+ u_int8_t wh4 = plain[6]; u_int8_t wl4 = plain[7];
+
+ u_int8_t * k0 = key_tables [0];
+ u_int8_t * k1 = key_tables [1];
+ u_int8_t * k2 = key_tables [2];
+ u_int8_t * k3 = key_tables [3];
+ u_int8_t * k4 = key_tables [4];
+ u_int8_t * k5 = key_tables [5];
+ u_int8_t * k6 = key_tables [6];
+ u_int8_t * k7 = key_tables [7];
+ u_int8_t * k8 = key_tables [8];
+ u_int8_t * k9 = key_tables [9];
+
+ /* first 8 rounds */
+ g0 (wh1,wl1, wh1,wl1); wl4 ^= wl1 ^ 1; wh4 ^= wh1;
+ g4 (wh4,wl4, wh4,wl4); wl3 ^= wl4 ^ 2; wh3 ^= wh4;
+ g8 (wh3,wl3, wh3,wl3); wl2 ^= wl3 ^ 3; wh2 ^= wh3;
+ g2 (wh2,wl2, wh2,wl2); wl1 ^= wl2 ^ 4; wh1 ^= wh2;
+ g6 (wh1,wl1, wh1,wl1); wl4 ^= wl1 ^ 5; wh4 ^= wh1;
+ g0 (wh4,wl4, wh4,wl4); wl3 ^= wl4 ^ 6; wh3 ^= wh4;
+ g4 (wh3,wl3, wh3,wl3); wl2 ^= wl3 ^ 7; wh2 ^= wh3;
+ g8 (wh2,wl2, wh2,wl2); wl1 ^= wl2 ^ 8; wh1 ^= wh2;
+
+ /* second 8 rounds */
+ wh2 ^= wh1; wl2 ^= wl1 ^ 9 ; g2 (wh1,wl1, wh1,wl1);
+ wh1 ^= wh4; wl1 ^= wl4 ^ 10; g6 (wh4,wl4, wh4,wl4);
+ wh4 ^= wh3; wl4 ^= wl3 ^ 11; g0 (wh3,wl3, wh3,wl3);
+ wh3 ^= wh2; wl3 ^= wl2 ^ 12; g4 (wh2,wl2, wh2,wl2);
+ wh2 ^= wh1; wl2 ^= wl1 ^ 13; g8 (wh1,wl1, wh1,wl1);
+ wh1 ^= wh4; wl1 ^= wl4 ^ 14; g2 (wh4,wl4, wh4,wl4);
+ wh4 ^= wh3; wl4 ^= wl3 ^ 15; g6 (wh3,wl3, wh3,wl3);
+ wh3 ^= wh2; wl3 ^= wl2 ^ 16; g0 (wh2,wl2, wh2,wl2);
+
+ /* third 8 rounds */
+ g4 (wh1,wl1, wh1,wl1); wl4 ^= wl1 ^ 17; wh4 ^= wh1;
+ g8 (wh4,wl4, wh4,wl4); wl3 ^= wl4 ^ 18; wh3 ^= wh4;
+ g2 (wh3,wl3, wh3,wl3); wl2 ^= wl3 ^ 19; wh2 ^= wh3;
+ g6 (wh2,wl2, wh2,wl2); wl1 ^= wl2 ^ 20; wh1 ^= wh2;
+ g0 (wh1,wl1, wh1,wl1); wl4 ^= wl1 ^ 21; wh4 ^= wh1;
+ g4 (wh4,wl4, wh4,wl4); wl3 ^= wl4 ^ 22; wh3 ^= wh4;
+ g8 (wh3,wl3, wh3,wl3); wl2 ^= wl3 ^ 23; wh2 ^= wh3;
+ g2 (wh2,wl2, wh2,wl2); wl1 ^= wl2 ^ 24; wh1 ^= wh2;
+
+ /* last 8 rounds */
+ wh2 ^= wh1; wl2 ^= wl1 ^ 25; g6 (wh1,wl1, wh1,wl1);
+ wh1 ^= wh4; wl1 ^= wl4 ^ 26; g0 (wh4,wl4, wh4,wl4);
+ wh4 ^= wh3; wl4 ^= wl3 ^ 27; g4 (wh3,wl3, wh3,wl3);
+ wh3 ^= wh2; wl3 ^= wl2 ^ 28; g8 (wh2,wl2, wh2,wl2);
+ wh2 ^= wh1; wl2 ^= wl1 ^ 29; g2 (wh1,wl1, wh1,wl1);
+ wh1 ^= wh4; wl1 ^= wl4 ^ 30; g6 (wh4,wl4, wh4,wl4);
+ wh4 ^= wh3; wl4 ^= wl3 ^ 31; g0 (wh3,wl3, wh3,wl3);
+ wh3 ^= wh2; wl3 ^= wl2 ^ 32; g4 (wh2,wl2, wh2,wl2);
+
+ /* pack into byte vector */
+ cipher [0] = wh1; cipher [1] = wl1;
+ cipher [2] = wh2; cipher [3] = wl2;
+ cipher [4] = wh3; cipher [5] = wl3;
+ cipher [6] = wh4; cipher [7] = wl4;
+}
+
+
+void
+skipjack_backwards (u_int8_t *cipher, u_int8_t *plain, u_int8_t **key_tables)
+{
+ /* setup 4 16-bit portions */
+ u_int8_t wh1 = cipher[0]; u_int8_t wl1 = cipher[1];
+ u_int8_t wh2 = cipher[2]; u_int8_t wl2 = cipher[3];
+ u_int8_t wh3 = cipher[4]; u_int8_t wl3 = cipher[5];
+ u_int8_t wh4 = cipher[6]; u_int8_t wl4 = cipher[7];
+
+ u_int8_t * k0 = key_tables [0];
+ u_int8_t * k1 = key_tables [1];
+ u_int8_t * k2 = key_tables [2];
+ u_int8_t * k3 = key_tables [3];
+ u_int8_t * k4 = key_tables [4];
+ u_int8_t * k5 = key_tables [5];
+ u_int8_t * k6 = key_tables [6];
+ u_int8_t * k7 = key_tables [7];
+ u_int8_t * k8 = key_tables [8];
+ u_int8_t * k9 = key_tables [9];
+
+ /* first 8 rounds */
+ g4_inv (wh2,wl2, wh2,wl2); wl3 ^= wl2 ^ 32; wh3 ^= wh2;
+ g0_inv (wh3,wl3, wh3,wl3); wl4 ^= wl3 ^ 31; wh4 ^= wh3;
+ g6_inv (wh4,wl4, wh4,wl4); wl1 ^= wl4 ^ 30; wh1 ^= wh4;
+ g2_inv (wh1,wl1, wh1,wl1); wl2 ^= wl1 ^ 29; wh2 ^= wh1;
+ g8_inv (wh2,wl2, wh2,wl2); wl3 ^= wl2 ^ 28; wh3 ^= wh2;
+ g4_inv (wh3,wl3, wh3,wl3); wl4 ^= wl3 ^ 27; wh4 ^= wh3;
+ g0_inv (wh4,wl4, wh4,wl4); wl1 ^= wl4 ^ 26; wh1 ^= wh4;
+ g6_inv (wh1,wl1, wh1,wl1); wl2 ^= wl1 ^ 25; wh2 ^= wh1;
+
+ /* second 8 rounds */
+ wh1 ^= wh2; wl1 ^= wl2 ^ 24; g2_inv (wh2,wl2, wh2,wl2);
+ wh2 ^= wh3; wl2 ^= wl3 ^ 23; g8_inv (wh3,wl3, wh3,wl3);
+ wh3 ^= wh4; wl3 ^= wl4 ^ 22; g4_inv (wh4,wl4, wh4,wl4);
+ wh4 ^= wh1; wl4 ^= wl1 ^ 21; g0_inv (wh1,wl1, wh1,wl1);
+ wh1 ^= wh2; wl1 ^= wl2 ^ 20; g6_inv (wh2,wl2, wh2,wl2);
+ wh2 ^= wh3; wl2 ^= wl3 ^ 19; g2_inv (wh3,wl3, wh3,wl3);
+ wh3 ^= wh4; wl3 ^= wl4 ^ 18; g8_inv (wh4,wl4, wh4,wl4);
+ wh4 ^= wh1; wl4 ^= wl1 ^ 17; g4_inv (wh1,wl1, wh1,wl1);
+
+ /* third 8 rounds */
+ g0_inv (wh2,wl2, wh2,wl2); wl3 ^= wl2 ^ 16; wh3 ^= wh2;
+ g6_inv (wh3,wl3, wh3,wl3); wl4 ^= wl3 ^ 15; wh4 ^= wh3;
+ g2_inv (wh4,wl4, wh4,wl4); wl1 ^= wl4 ^ 14; wh1 ^= wh4;
+ g8_inv (wh1,wl1, wh1,wl1); wl2 ^= wl1 ^ 13; wh2 ^= wh1;
+ g4_inv (wh2,wl2, wh2,wl2); wl3 ^= wl2 ^ 12; wh3 ^= wh2;
+ g0_inv (wh3,wl3, wh3,wl3); wl4 ^= wl3 ^ 11; wh4 ^= wh3;
+ g6_inv (wh4,wl4, wh4,wl4); wl1 ^= wl4 ^ 10; wh1 ^= wh4;
+ g2_inv (wh1,wl1, wh1,wl1); wl2 ^= wl1 ^ 9; wh2 ^= wh1;
+
+ /* last 8 rounds */
+ wh1 ^= wh2; wl1 ^= wl2 ^ 8; g8_inv (wh2,wl2, wh2,wl2);
+ wh2 ^= wh3; wl2 ^= wl3 ^ 7; g4_inv (wh3,wl3, wh3,wl3);
+ wh3 ^= wh4; wl3 ^= wl4 ^ 6; g0_inv (wh4,wl4, wh4,wl4);
+ wh4 ^= wh1; wl4 ^= wl1 ^ 5; g6_inv (wh1,wl1, wh1,wl1);
+ wh1 ^= wh2; wl1 ^= wl2 ^ 4; g2_inv (wh2,wl2, wh2,wl2);
+ wh2 ^= wh3; wl2 ^= wl3 ^ 3; g8_inv (wh3,wl3, wh3,wl3);
+ wh3 ^= wh4; wl3 ^= wl4 ^ 2; g4_inv (wh4,wl4, wh4,wl4);
+ wh4 ^= wh1; wl4 ^= wl1 ^ 1; g0_inv (wh1,wl1, wh1,wl1);
+
+ /* pack into byte vector */
+ plain [0] = wh1; plain [1] = wl1;
+ plain [2] = wh2; plain [3] = wl2;
+ plain [4] = wh3; plain [5] = wl3;
+ plain [6] = wh4; plain [7] = wl4;
+}
diff --git a/rtems/freebsd/opencrypto/skipjack.h b/rtems/freebsd/opencrypto/skipjack.h
new file mode 100644
index 00000000..3e88418c
--- /dev/null
+++ b/rtems/freebsd/opencrypto/skipjack.h
@@ -0,0 +1,19 @@
+/* $FreeBSD$ */
+/* $OpenBSD: skipjack.h,v 1.3 2002/03/14 01:26:51 millert Exp $ */
+
+/*-
+ * Further optimized test implementation of SKIPJACK algorithm
+ * Mark Tillotson <markt@chaos.org.uk>, 25 June 98
+ * Optimizations suit RISC (lots of registers) machine best.
+ *
+ * based on unoptimized implementation of
+ * Panu Rissanen <bande@lut.fi> 960624
+ *
+ * SKIPJACK and KEA Algorithm Specifications
+ * Version 2.0
+ * 29 May 1998
+*/
+
+extern void skipjack_forwards(u_int8_t *plain, u_int8_t *cipher, u_int8_t **key);
+extern void skipjack_backwards(u_int8_t *cipher, u_int8_t *plain, u_int8_t **key);
+extern void subkey_table_gen(u_int8_t *key, u_int8_t **key_tables);
diff --git a/rtems/freebsd/opencrypto/xform.c b/rtems/freebsd/opencrypto/xform.c
new file mode 100644
index 00000000..9f9400de
--- /dev/null
+++ b/rtems/freebsd/opencrypto/xform.c
@@ -0,0 +1,815 @@
+#include <rtems/freebsd/machine/rtems-bsd-config.h>
+
+/* $OpenBSD: xform.c,v 1.16 2001/08/28 12:20:43 ben Exp $ */
+/*-
+ * The authors of this code are John Ioannidis (ji@tla.org),
+ * Angelos D. Keromytis (kermit@csd.uch.gr) and
+ * Niels Provos (provos@physnet.uni-hamburg.de).
+ *
+ * This code was written by John Ioannidis for BSD/OS in Athens, Greece,
+ * in November 1995.
+ *
+ * Ported to OpenBSD and NetBSD, with additional transforms, in December 1996,
+ * by Angelos D. Keromytis.
+ *
+ * Additional transforms and features in 1997 and 1998 by Angelos D. Keromytis
+ * and Niels Provos.
+ *
+ * Additional features in 1999 by Angelos D. Keromytis.
+ *
+ * Copyright (C) 1995, 1996, 1997, 1998, 1999 by John Ioannidis,
+ * Angelos D. Keromytis and Niels Provos.
+ *
+ * Copyright (C) 2001, Angelos D. Keromytis.
+ *
+ * Permission to use, copy, and modify this software with or without fee
+ * is hereby granted, provided that this entire notice is included in
+ * all copies of any software which is or includes a copy or
+ * modification of this software.
+ * You may use this code under the GNU public license if you so wish. Please
+ * contribute changes back to the authors under this freer than GPL license
+ * so that we may further the use of strong encryption without limitations to
+ * all.
+ *
+ * THIS SOFTWARE IS BEING PROVIDED "AS IS", WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTY. IN PARTICULAR, NONE OF THE AUTHORS MAKES ANY
+ * REPRESENTATION OR WARRANTY OF ANY KIND CONCERNING THE
+ * MERCHANTABILITY OF THIS SOFTWARE OR ITS FITNESS FOR ANY PARTICULAR
+ * PURPOSE.
+ */
+
+#include <rtems/freebsd/sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <rtems/freebsd/sys/param.h>
+#include <rtems/freebsd/sys/systm.h>
+#include <rtems/freebsd/sys/malloc.h>
+#include <rtems/freebsd/sys/sysctl.h>
+#include <rtems/freebsd/sys/errno.h>
+#include <rtems/freebsd/sys/time.h>
+#include <rtems/freebsd/sys/kernel.h>
+#include <rtems/freebsd/machine/cpu.h>
+
+#include <rtems/freebsd/crypto/blowfish/blowfish.h>
+#include <rtems/freebsd/crypto/des/des.h>
+#include <rtems/freebsd/crypto/rijndael/rijndael.h>
+#include <rtems/freebsd/crypto/camellia/camellia.h>
+#include <rtems/freebsd/crypto/sha1.h>
+
+#include <rtems/freebsd/opencrypto/cast.h>
+#include <rtems/freebsd/opencrypto/deflate.h>
+#include <rtems/freebsd/opencrypto/rmd160.h>
+#include <rtems/freebsd/opencrypto/skipjack.h>
+
+#include <rtems/freebsd/sys/md5.h>
+
+#include <rtems/freebsd/opencrypto/cryptodev.h>
+#include <rtems/freebsd/opencrypto/xform.h>
+
+static int null_setkey(u_int8_t **, u_int8_t *, int);
+static int des1_setkey(u_int8_t **, u_int8_t *, int);
+static int des3_setkey(u_int8_t **, u_int8_t *, int);
+static int blf_setkey(u_int8_t **, u_int8_t *, int);
+static int cast5_setkey(u_int8_t **, u_int8_t *, int);
+static int skipjack_setkey(u_int8_t **, u_int8_t *, int);
+static int rijndael128_setkey(u_int8_t **, u_int8_t *, int);
+static int aes_xts_setkey(u_int8_t **, u_int8_t *, int);
+static int cml_setkey(u_int8_t **, u_int8_t *, int);
+
+static void null_encrypt(caddr_t, u_int8_t *);
+static void des1_encrypt(caddr_t, u_int8_t *);
+static void des3_encrypt(caddr_t, u_int8_t *);
+static void blf_encrypt(caddr_t, u_int8_t *);
+static void cast5_encrypt(caddr_t, u_int8_t *);
+static void skipjack_encrypt(caddr_t, u_int8_t *);
+static void rijndael128_encrypt(caddr_t, u_int8_t *);
+static void aes_xts_encrypt(caddr_t, u_int8_t *);
+static void cml_encrypt(caddr_t, u_int8_t *);
+
+static void null_decrypt(caddr_t, u_int8_t *);
+static void des1_decrypt(caddr_t, u_int8_t *);
+static void des3_decrypt(caddr_t, u_int8_t *);
+static void blf_decrypt(caddr_t, u_int8_t *);
+static void cast5_decrypt(caddr_t, u_int8_t *);
+static void skipjack_decrypt(caddr_t, u_int8_t *);
+static void rijndael128_decrypt(caddr_t, u_int8_t *);
+static void aes_xts_decrypt(caddr_t, u_int8_t *);
+static void cml_decrypt(caddr_t, u_int8_t *);
+
+static void null_zerokey(u_int8_t **);
+static void des1_zerokey(u_int8_t **);
+static void des3_zerokey(u_int8_t **);
+static void blf_zerokey(u_int8_t **);
+static void cast5_zerokey(u_int8_t **);
+static void skipjack_zerokey(u_int8_t **);
+static void rijndael128_zerokey(u_int8_t **);
+static void aes_xts_zerokey(u_int8_t **);
+static void cml_zerokey(u_int8_t **);
+
+static void aes_xts_reinit(caddr_t, u_int8_t *);
+
+static void null_init(void *);
+static int null_update(void *, u_int8_t *, u_int16_t);
+static void null_final(u_int8_t *, void *);
+static int MD5Update_int(void *, u_int8_t *, u_int16_t);
+static void SHA1Init_int(void *);
+static int SHA1Update_int(void *, u_int8_t *, u_int16_t);
+static void SHA1Final_int(u_int8_t *, void *);
+static int RMD160Update_int(void *, u_int8_t *, u_int16_t);
+static int SHA256Update_int(void *, u_int8_t *, u_int16_t);
+static int SHA384Update_int(void *, u_int8_t *, u_int16_t);
+static int SHA512Update_int(void *, u_int8_t *, u_int16_t);
+
+static u_int32_t deflate_compress(u_int8_t *, u_int32_t, u_int8_t **);
+static u_int32_t deflate_decompress(u_int8_t *, u_int32_t, u_int8_t **);
+
+MALLOC_DEFINE(M_XDATA, "xform", "xform data buffers");
+
+/* Encryption instances */
+struct enc_xform enc_xform_null = {
+ CRYPTO_NULL_CBC, "NULL",
+ /* NB: blocksize of 4 is to generate a properly aligned ESP header */
+ NULL_BLOCK_LEN, 0, 256, /* 2048 bits, max key */
+ null_encrypt,
+ null_decrypt,
+ null_setkey,
+ null_zerokey,
+ NULL
+};
+
+struct enc_xform enc_xform_des = {
+ CRYPTO_DES_CBC, "DES",
+ DES_BLOCK_LEN, 8, 8,
+ des1_encrypt,
+ des1_decrypt,
+ des1_setkey,
+ des1_zerokey,
+ NULL
+};
+
+struct enc_xform enc_xform_3des = {
+ CRYPTO_3DES_CBC, "3DES",
+ DES3_BLOCK_LEN, 24, 24,
+ des3_encrypt,
+ des3_decrypt,
+ des3_setkey,
+ des3_zerokey,
+ NULL
+};
+
+struct enc_xform enc_xform_blf = {
+ CRYPTO_BLF_CBC, "Blowfish",
+ BLOWFISH_BLOCK_LEN, 5, 56 /* 448 bits, max key */,
+ blf_encrypt,
+ blf_decrypt,
+ blf_setkey,
+ blf_zerokey,
+ NULL
+};
+
+struct enc_xform enc_xform_cast5 = {
+ CRYPTO_CAST_CBC, "CAST-128",
+ CAST128_BLOCK_LEN, 5, 16,
+ cast5_encrypt,
+ cast5_decrypt,
+ cast5_setkey,
+ cast5_zerokey,
+ NULL
+};
+
+struct enc_xform enc_xform_skipjack = {
+ CRYPTO_SKIPJACK_CBC, "Skipjack",
+ SKIPJACK_BLOCK_LEN, 10, 10,
+ skipjack_encrypt,
+ skipjack_decrypt,
+ skipjack_setkey,
+ skipjack_zerokey,
+ NULL
+};
+
+struct enc_xform enc_xform_rijndael128 = {
+ CRYPTO_RIJNDAEL128_CBC, "Rijndael-128/AES",
+ RIJNDAEL128_BLOCK_LEN, 8, 32,
+ rijndael128_encrypt,
+ rijndael128_decrypt,
+ rijndael128_setkey,
+ rijndael128_zerokey,
+ NULL
+};
+
+struct enc_xform enc_xform_aes_xts = {
+ CRYPTO_AES_XTS, "AES-XTS",
+ RIJNDAEL128_BLOCK_LEN, 32, 64,
+ aes_xts_encrypt,
+ aes_xts_decrypt,
+ aes_xts_setkey,
+ aes_xts_zerokey,
+ aes_xts_reinit
+};
+
+struct enc_xform enc_xform_arc4 = {
+ CRYPTO_ARC4, "ARC4",
+ 1, 1, 32,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ NULL
+};
+
+struct enc_xform enc_xform_camellia = {
+ CRYPTO_CAMELLIA_CBC, "Camellia",
+ CAMELLIA_BLOCK_LEN, 8, 32,
+ cml_encrypt,
+ cml_decrypt,
+ cml_setkey,
+ cml_zerokey,
+ NULL
+};
+
+/* Authentication instances */
+struct auth_hash auth_hash_null = {
+ CRYPTO_NULL_HMAC, "NULL-HMAC",
+ 0, NULL_HASH_LEN, NULL_HMAC_BLOCK_LEN, sizeof(int), /* NB: context isn't used */
+ null_init, null_update, null_final
+};
+
+struct auth_hash auth_hash_hmac_md5 = {
+ CRYPTO_MD5_HMAC, "HMAC-MD5",
+ 16, MD5_HASH_LEN, MD5_HMAC_BLOCK_LEN, sizeof(MD5_CTX),
+ (void (*) (void *)) MD5Init, MD5Update_int,
+ (void (*) (u_int8_t *, void *)) MD5Final
+};
+
+struct auth_hash auth_hash_hmac_sha1 = {
+ CRYPTO_SHA1_HMAC, "HMAC-SHA1",
+ 20, SHA1_HASH_LEN, SHA1_HMAC_BLOCK_LEN, sizeof(SHA1_CTX),
+ SHA1Init_int, SHA1Update_int, SHA1Final_int
+};
+
+struct auth_hash auth_hash_hmac_ripemd_160 = {
+ CRYPTO_RIPEMD160_HMAC, "HMAC-RIPEMD-160",
+ 20, RIPEMD160_HASH_LEN, RIPEMD160_HMAC_BLOCK_LEN, sizeof(RMD160_CTX),
+ (void (*)(void *)) RMD160Init, RMD160Update_int,
+ (void (*)(u_int8_t *, void *)) RMD160Final
+};
+
+struct auth_hash auth_hash_key_md5 = {
+ CRYPTO_MD5_KPDK, "Keyed MD5",
+ 0, MD5_KPDK_HASH_LEN, 0, sizeof(MD5_CTX),
+ (void (*)(void *)) MD5Init, MD5Update_int,
+ (void (*)(u_int8_t *, void *)) MD5Final
+};
+
+struct auth_hash auth_hash_key_sha1 = {
+ CRYPTO_SHA1_KPDK, "Keyed SHA1",
+ 0, SHA1_KPDK_HASH_LEN, 0, sizeof(SHA1_CTX),
+ SHA1Init_int, SHA1Update_int, SHA1Final_int
+};
+
+struct auth_hash auth_hash_hmac_sha2_256 = {
+ CRYPTO_SHA2_256_HMAC, "HMAC-SHA2-256",
+ 32, SHA2_256_HASH_LEN, SHA2_256_HMAC_BLOCK_LEN, sizeof(SHA256_CTX),
+ (void (*)(void *)) SHA256_Init, SHA256Update_int,
+ (void (*)(u_int8_t *, void *)) SHA256_Final
+};
+
+struct auth_hash auth_hash_hmac_sha2_384 = {
+ CRYPTO_SHA2_384_HMAC, "HMAC-SHA2-384",
+ 48, SHA2_384_HASH_LEN, SHA2_384_HMAC_BLOCK_LEN, sizeof(SHA384_CTX),
+ (void (*)(void *)) SHA384_Init, SHA384Update_int,
+ (void (*)(u_int8_t *, void *)) SHA384_Final
+};
+
+struct auth_hash auth_hash_hmac_sha2_512 = {
+ CRYPTO_SHA2_512_HMAC, "HMAC-SHA2-512",
+ 64, SHA2_512_HASH_LEN, SHA2_512_HMAC_BLOCK_LEN, sizeof(SHA512_CTX),
+ (void (*)(void *)) SHA512_Init, SHA512Update_int,
+ (void (*)(u_int8_t *, void *)) SHA512_Final
+};
+
+/* Compression instance */
+struct comp_algo comp_algo_deflate = {
+ CRYPTO_DEFLATE_COMP, "Deflate",
+ 90, deflate_compress,
+ deflate_decompress
+};
+
+/*
+ * Encryption wrapper routines.
+ */
+static void
+null_encrypt(caddr_t key, u_int8_t *blk)
+{
+}
+static void
+null_decrypt(caddr_t key, u_int8_t *blk)
+{
+}
+static int
+null_setkey(u_int8_t **sched, u_int8_t *key, int len)
+{
+ *sched = NULL;
+ return 0;
+}
+static void
+null_zerokey(u_int8_t **sched)
+{
+ *sched = NULL;
+}
+
+static void
+des1_encrypt(caddr_t key, u_int8_t *blk)
+{
+ des_cblock *cb = (des_cblock *) blk;
+ des_key_schedule *p = (des_key_schedule *) key;
+
+ des_ecb_encrypt(cb, cb, p[0], DES_ENCRYPT);
+}
+
+static void
+des1_decrypt(caddr_t key, u_int8_t *blk)
+{
+ des_cblock *cb = (des_cblock *) blk;
+ des_key_schedule *p = (des_key_schedule *) key;
+
+ des_ecb_encrypt(cb, cb, p[0], DES_DECRYPT);
+}
+
+static int
+des1_setkey(u_int8_t **sched, u_int8_t *key, int len)
+{
+ des_key_schedule *p;
+ int err;
+
+ p = malloc(sizeof (des_key_schedule),
+ M_CRYPTO_DATA, M_NOWAIT|M_ZERO);
+ if (p != NULL) {
+ des_set_key((des_cblock *) key, p[0]);
+ err = 0;
+ } else
+ err = ENOMEM;
+ *sched = (u_int8_t *) p;
+ return err;
+}
+
+static void
+des1_zerokey(u_int8_t **sched)
+{
+ bzero(*sched, sizeof (des_key_schedule));
+ free(*sched, M_CRYPTO_DATA);
+ *sched = NULL;
+}
+
+static void
+des3_encrypt(caddr_t key, u_int8_t *blk)
+{
+ des_cblock *cb = (des_cblock *) blk;
+ des_key_schedule *p = (des_key_schedule *) key;
+
+ des_ecb3_encrypt(cb, cb, p[0], p[1], p[2], DES_ENCRYPT);
+}
+
+static void
+des3_decrypt(caddr_t key, u_int8_t *blk)
+{
+ des_cblock *cb = (des_cblock *) blk;
+ des_key_schedule *p = (des_key_schedule *) key;
+
+ des_ecb3_encrypt(cb, cb, p[0], p[1], p[2], DES_DECRYPT);
+}
+
+static int
+des3_setkey(u_int8_t **sched, u_int8_t *key, int len)
+{
+ des_key_schedule *p;
+ int err;
+
+ p = malloc(3*sizeof (des_key_schedule),
+ M_CRYPTO_DATA, M_NOWAIT|M_ZERO);
+ if (p != NULL) {
+ des_set_key((des_cblock *)(key + 0), p[0]);
+ des_set_key((des_cblock *)(key + 8), p[1]);
+ des_set_key((des_cblock *)(key + 16), p[2]);
+ err = 0;
+ } else
+ err = ENOMEM;
+ *sched = (u_int8_t *) p;
+ return err;
+}
+
+static void
+des3_zerokey(u_int8_t **sched)
+{
+ bzero(*sched, 3*sizeof (des_key_schedule));
+ free(*sched, M_CRYPTO_DATA);
+ *sched = NULL;
+}
+
+static void
+blf_encrypt(caddr_t key, u_int8_t *blk)
+{
+ BF_LONG t[2];
+
+ memcpy(t, blk, sizeof (t));
+ t[0] = ntohl(t[0]);
+ t[1] = ntohl(t[1]);
+ /* NB: BF_encrypt expects the block in host order! */
+ BF_encrypt(t, (BF_KEY *) key);
+ t[0] = htonl(t[0]);
+ t[1] = htonl(t[1]);
+ memcpy(blk, t, sizeof (t));
+}
+
+static void
+blf_decrypt(caddr_t key, u_int8_t *blk)
+{
+ BF_LONG t[2];
+
+ memcpy(t, blk, sizeof (t));
+ t[0] = ntohl(t[0]);
+ t[1] = ntohl(t[1]);
+ /* NB: BF_decrypt expects the block in host order! */
+ BF_decrypt(t, (BF_KEY *) key);
+ t[0] = htonl(t[0]);
+ t[1] = htonl(t[1]);
+ memcpy(blk, t, sizeof (t));
+}
+
+static int
+blf_setkey(u_int8_t **sched, u_int8_t *key, int len)
+{
+ int err;
+
+ *sched = malloc(sizeof(BF_KEY),
+ M_CRYPTO_DATA, M_NOWAIT|M_ZERO);
+ if (*sched != NULL) {
+ BF_set_key((BF_KEY *) *sched, len, key);
+ err = 0;
+ } else
+ err = ENOMEM;
+ return err;
+}
+
+static void
+blf_zerokey(u_int8_t **sched)
+{
+ bzero(*sched, sizeof(BF_KEY));
+ free(*sched, M_CRYPTO_DATA);
+ *sched = NULL;
+}
+
+static void
+cast5_encrypt(caddr_t key, u_int8_t *blk)
+{
+ cast_encrypt((cast_key *) key, blk, blk);
+}
+
+static void
+cast5_decrypt(caddr_t key, u_int8_t *blk)
+{
+ cast_decrypt((cast_key *) key, blk, blk);
+}
+
+static int
+cast5_setkey(u_int8_t **sched, u_int8_t *key, int len)
+{
+ int err;
+
+ *sched = malloc(sizeof(cast_key), M_CRYPTO_DATA, M_NOWAIT|M_ZERO);
+ if (*sched != NULL) {
+ cast_setkey((cast_key *)*sched, key, len);
+ err = 0;
+ } else
+ err = ENOMEM;
+ return err;
+}
+
+static void
+cast5_zerokey(u_int8_t **sched)
+{
+ bzero(*sched, sizeof(cast_key));
+ free(*sched, M_CRYPTO_DATA);
+ *sched = NULL;
+}
+
+static void
+skipjack_encrypt(caddr_t key, u_int8_t *blk)
+{
+ skipjack_forwards(blk, blk, (u_int8_t **) key);
+}
+
+static void
+skipjack_decrypt(caddr_t key, u_int8_t *blk)
+{
+ skipjack_backwards(blk, blk, (u_int8_t **) key);
+}
+
+static int
+skipjack_setkey(u_int8_t **sched, u_int8_t *key, int len)
+{
+ int err;
+
+ /* NB: allocate all the memory that's needed at once */
+ *sched = malloc(10 * (sizeof(u_int8_t *) + 0x100),
+ M_CRYPTO_DATA, M_NOWAIT|M_ZERO);
+ if (*sched != NULL) {
+ u_int8_t** key_tables = (u_int8_t**) *sched;
+ u_int8_t* table = (u_int8_t*) &key_tables[10];
+ int k;
+
+ for (k = 0; k < 10; k++) {
+ key_tables[k] = table;
+ table += 0x100;
+ }
+ subkey_table_gen(key, (u_int8_t **) *sched);
+ err = 0;
+ } else
+ err = ENOMEM;
+ return err;
+}
+
+static void
+skipjack_zerokey(u_int8_t **sched)
+{
+ bzero(*sched, 10 * (sizeof(u_int8_t *) + 0x100));
+ free(*sched, M_CRYPTO_DATA);
+ *sched = NULL;
+}
+
+static void
+rijndael128_encrypt(caddr_t key, u_int8_t *blk)
+{
+ rijndael_encrypt((rijndael_ctx *) key, (u_char *) blk, (u_char *) blk);
+}
+
+static void
+rijndael128_decrypt(caddr_t key, u_int8_t *blk)
+{
+ rijndael_decrypt(((rijndael_ctx *) key), (u_char *) blk,
+ (u_char *) blk);
+}
+
+static int
+rijndael128_setkey(u_int8_t **sched, u_int8_t *key, int len)
+{
+ int err;
+
+ if (len != 16 && len != 24 && len != 32)
+ return (EINVAL);
+ *sched = malloc(sizeof(rijndael_ctx), M_CRYPTO_DATA,
+ M_NOWAIT|M_ZERO);
+ if (*sched != NULL) {
+ rijndael_set_key((rijndael_ctx *) *sched, (u_char *) key,
+ len * 8);
+ err = 0;
+ } else
+ err = ENOMEM;
+ return err;
+}
+
+static void
+rijndael128_zerokey(u_int8_t **sched)
+{
+ bzero(*sched, sizeof(rijndael_ctx));
+ free(*sched, M_CRYPTO_DATA);
+ *sched = NULL;
+}
+
+#define AES_XTS_BLOCKSIZE 16
+#define AES_XTS_IVSIZE 8
+#define AES_XTS_ALPHA 0x87 /* GF(2^128) generator polynomial */
+
+struct aes_xts_ctx {
+ rijndael_ctx key1;
+ rijndael_ctx key2;
+ u_int8_t tweak[AES_XTS_BLOCKSIZE];
+};
+
+void
+aes_xts_reinit(caddr_t key, u_int8_t *iv)
+{
+ struct aes_xts_ctx *ctx = (struct aes_xts_ctx *)key;
+ u_int64_t blocknum;
+ u_int i;
+
+ /*
+ * Prepare tweak as E_k2(IV). IV is specified as LE representation
+ * of a 64-bit block number which we allow to be passed in directly.
+ */
+ bcopy(iv, &blocknum, AES_XTS_IVSIZE);
+ for (i = 0; i < AES_XTS_IVSIZE; i++) {
+ ctx->tweak[i] = blocknum & 0xff;
+ blocknum >>= 8;
+ }
+ /* Last 64 bits of IV are always zero */
+ bzero(ctx->tweak + AES_XTS_IVSIZE, AES_XTS_IVSIZE);
+
+ rijndael_encrypt(&ctx->key2, ctx->tweak, ctx->tweak);
+}
+
+static void
+aes_xts_crypt(struct aes_xts_ctx *ctx, u_int8_t *data, u_int do_encrypt)
+{
+ u_int8_t block[AES_XTS_BLOCKSIZE];
+ u_int i, carry_in, carry_out;
+
+ for (i = 0; i < AES_XTS_BLOCKSIZE; i++)
+ block[i] = data[i] ^ ctx->tweak[i];
+
+ if (do_encrypt)
+ rijndael_encrypt(&ctx->key1, block, data);
+ else
+ rijndael_decrypt(&ctx->key1, block, data);
+
+ for (i = 0; i < AES_XTS_BLOCKSIZE; i++)
+ data[i] ^= ctx->tweak[i];
+
+ /* Exponentiate tweak */
+ carry_in = 0;
+ for (i = 0; i < AES_XTS_BLOCKSIZE; i++) {
+ carry_out = ctx->tweak[i] & 0x80;
+ ctx->tweak[i] = (ctx->tweak[i] << 1) | (carry_in ? 1 : 0);
+ carry_in = carry_out;
+ }
+ if (carry_in)
+ ctx->tweak[0] ^= AES_XTS_ALPHA;
+ bzero(block, sizeof(block));
+}
+
+void
+aes_xts_encrypt(caddr_t key, u_int8_t *data)
+{
+ aes_xts_crypt((struct aes_xts_ctx *)key, data, 1);
+}
+
+void
+aes_xts_decrypt(caddr_t key, u_int8_t *data)
+{
+ aes_xts_crypt((struct aes_xts_ctx *)key, data, 0);
+}
+
+int
+aes_xts_setkey(u_int8_t **sched, u_int8_t *key, int len)
+{
+ struct aes_xts_ctx *ctx;
+
+ if (len != 32 && len != 64)
+ return EINVAL;
+
+ *sched = malloc(sizeof(struct aes_xts_ctx), M_CRYPTO_DATA,
+ M_NOWAIT | M_ZERO);
+ if (*sched == NULL)
+ return ENOMEM;
+ ctx = (struct aes_xts_ctx *)*sched;
+
+ rijndael_set_key(&ctx->key1, key, len * 4);
+ rijndael_set_key(&ctx->key2, key + (len / 2), len * 4);
+
+ return 0;
+}
+
+void
+aes_xts_zerokey(u_int8_t **sched)
+{
+ bzero(*sched, sizeof(struct aes_xts_ctx));
+ free(*sched, M_CRYPTO_DATA);
+ *sched = NULL;
+}
+
+static void
+cml_encrypt(caddr_t key, u_int8_t *blk)
+{
+ camellia_encrypt((camellia_ctx *) key, (u_char *) blk, (u_char *) blk);
+}
+
+static void
+cml_decrypt(caddr_t key, u_int8_t *blk)
+{
+ camellia_decrypt(((camellia_ctx *) key), (u_char *) blk,
+ (u_char *) blk);
+}
+
+static int
+cml_setkey(u_int8_t **sched, u_int8_t *key, int len)
+{
+ int err;
+
+ if (len != 16 && len != 24 && len != 32)
+ return (EINVAL);
+ *sched = malloc(sizeof(camellia_ctx), M_CRYPTO_DATA,
+ M_NOWAIT|M_ZERO);
+ if (*sched != NULL) {
+ camellia_set_key((camellia_ctx *) *sched, (u_char *) key,
+ len * 8);
+ err = 0;
+ } else
+ err = ENOMEM;
+ return err;
+}
+
+static void
+cml_zerokey(u_int8_t **sched)
+{
+ bzero(*sched, sizeof(camellia_ctx));
+ free(*sched, M_CRYPTO_DATA);
+ *sched = NULL;
+}
+
+/*
+ * And now for auth.
+ */
+
+static void
+null_init(void *ctx)
+{
+}
+
+static int
+null_update(void *ctx, u_int8_t *buf, u_int16_t len)
+{
+ return 0;
+}
+
+static void
+null_final(u_int8_t *buf, void *ctx)
+{
+ if (buf != (u_int8_t *) 0)
+ bzero(buf, 12);
+}
+
+static int
+RMD160Update_int(void *ctx, u_int8_t *buf, u_int16_t len)
+{
+ RMD160Update(ctx, buf, len);
+ return 0;
+}
+
+static int
+MD5Update_int(void *ctx, u_int8_t *buf, u_int16_t len)
+{
+ MD5Update(ctx, buf, len);
+ return 0;
+}
+
+static void
+SHA1Init_int(void *ctx)
+{
+ SHA1Init(ctx);
+}
+
+static int
+SHA1Update_int(void *ctx, u_int8_t *buf, u_int16_t len)
+{
+ SHA1Update(ctx, buf, len);
+ return 0;
+}
+
+static void
+SHA1Final_int(u_int8_t *blk, void *ctx)
+{
+ SHA1Final(blk, ctx);
+}
+
+static int
+SHA256Update_int(void *ctx, u_int8_t *buf, u_int16_t len)
+{
+ SHA256_Update(ctx, buf, len);
+ return 0;
+}
+
+static int
+SHA384Update_int(void *ctx, u_int8_t *buf, u_int16_t len)
+{
+ SHA384_Update(ctx, buf, len);
+ return 0;
+}
+
+static int
+SHA512Update_int(void *ctx, u_int8_t *buf, u_int16_t len)
+{
+ SHA512_Update(ctx, buf, len);
+ return 0;
+}
+
+/*
+ * And compression
+ */
+
+static u_int32_t
+deflate_compress(data, size, out)
+ u_int8_t *data;
+ u_int32_t size;
+ u_int8_t **out;
+{
+ return deflate_global(data, size, 0, out);
+}
+
+static u_int32_t
+deflate_decompress(data, size, out)
+ u_int8_t *data;
+ u_int32_t size;
+ u_int8_t **out;
+{
+ return deflate_global(data, size, 1, out);
+}
diff --git a/rtems/freebsd/opencrypto/xform.h b/rtems/freebsd/opencrypto/xform.h
new file mode 100644
index 00000000..0dcbf253
--- /dev/null
+++ b/rtems/freebsd/opencrypto/xform.h
@@ -0,0 +1,104 @@
+/* $FreeBSD$ */
+/* $OpenBSD: xform.h,v 1.8 2001/08/28 12:20:43 ben Exp $ */
+
+/*-
+ * The author of this code is Angelos D. Keromytis (angelos@cis.upenn.edu)
+ *
+ * This code was written by Angelos D. Keromytis in Athens, Greece, in
+ * February 2000. Network Security Technologies Inc. (NSTI) kindly
+ * supported the development of this code.
+ *
+ * Copyright (c) 2000 Angelos D. Keromytis
+ *
+ * Permission to use, copy, and modify this software without fee
+ * is hereby granted, provided that this entire notice is included in
+ * all source code copies of any software which is or includes a copy or
+ * modification of this software.
+ *
+ * THIS SOFTWARE IS BEING PROVIDED "AS IS", WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTY. IN PARTICULAR, NONE OF THE AUTHORS MAKES ANY
+ * REPRESENTATION OR WARRANTY OF ANY KIND CONCERNING THE
+ * MERCHANTABILITY OF THIS SOFTWARE OR ITS FITNESS FOR ANY PARTICULAR
+ * PURPOSE.
+ */
+
+#ifndef _CRYPTO_XFORM_HH_
+#define _CRYPTO_XFORM_HH_
+
+#include <rtems/freebsd/sys/md5.h>
+#include <rtems/freebsd/crypto/sha1.h>
+#include <rtems/freebsd/crypto/sha2/sha2.h>
+#include <rtems/freebsd/opencrypto/rmd160.h>
+
+/* Declarations */
+struct auth_hash {
+ int type;
+ char *name;
+ u_int16_t keysize;
+ u_int16_t hashsize;
+ u_int16_t blocksize;
+ u_int16_t ctxsize;
+ void (*Init) (void *);
+ int (*Update) (void *, u_int8_t *, u_int16_t);
+ void (*Final) (u_int8_t *, void *);
+};
+
+#define AH_ALEN_MAX 20 /* max authenticator hash length */
+
+struct enc_xform {
+ int type;
+ char *name;
+ u_int16_t blocksize;
+ u_int16_t minkey, maxkey;
+ void (*encrypt) (caddr_t, u_int8_t *);
+ void (*decrypt) (caddr_t, u_int8_t *);
+ int (*setkey) (u_int8_t **, u_int8_t *, int len);
+ void (*zerokey) (u_int8_t **);
+ void (*reinit) (caddr_t, u_int8_t *);
+};
+
+struct comp_algo {
+ int type;
+ char *name;
+ size_t minlen;
+ u_int32_t (*compress) (u_int8_t *, u_int32_t, u_int8_t **);
+ u_int32_t (*decompress) (u_int8_t *, u_int32_t, u_int8_t **);
+};
+
+union authctx {
+ MD5_CTX md5ctx;
+ SHA1_CTX sha1ctx;
+ RMD160_CTX rmd160ctx;
+ SHA256_CTX sha256ctx;
+ SHA384_CTX sha384ctx;
+ SHA512_CTX sha512ctx;
+};
+
+extern struct enc_xform enc_xform_null;
+extern struct enc_xform enc_xform_des;
+extern struct enc_xform enc_xform_3des;
+extern struct enc_xform enc_xform_blf;
+extern struct enc_xform enc_xform_cast5;
+extern struct enc_xform enc_xform_skipjack;
+extern struct enc_xform enc_xform_rijndael128;
+extern struct enc_xform enc_xform_aes_xts;
+extern struct enc_xform enc_xform_arc4;
+extern struct enc_xform enc_xform_camellia;
+
+extern struct auth_hash auth_hash_null;
+extern struct auth_hash auth_hash_key_md5;
+extern struct auth_hash auth_hash_key_sha1;
+extern struct auth_hash auth_hash_hmac_md5;
+extern struct auth_hash auth_hash_hmac_sha1;
+extern struct auth_hash auth_hash_hmac_ripemd_160;
+extern struct auth_hash auth_hash_hmac_sha2_256;
+extern struct auth_hash auth_hash_hmac_sha2_384;
+extern struct auth_hash auth_hash_hmac_sha2_512;
+
+extern struct comp_algo comp_algo_deflate;
+
+#ifdef _KERNEL
+#include <rtems/freebsd/sys/malloc.h>
+MALLOC_DECLARE(M_XDATA);
+#endif
+#endif /* _CRYPTO_XFORM_HH_ */
diff --git a/rtems/freebsd/rtems/rtems-bsd-assert.c b/rtems/freebsd/rtems/rtems-bsd-assert.c
new file mode 100644
index 00000000..6e04db8c
--- /dev/null
+++ b/rtems/freebsd/rtems/rtems-bsd-assert.c
@@ -0,0 +1,39 @@
+/**
+ * @file
+ *
+ * @ingroup rtems_bsd_rtems
+ *
+ * @brief TODO.
+ */
+
+/*
+ * Copyright (c) 2009, 2010 embedded brains GmbH. All rights reserved.
+ *
+ * embedded brains GmbH
+ * Obere Lagerstr. 30
+ * 82178 Puchheim
+ * Germany
+ * <rtems@embedded-brains.de>
+ *
+ * The license and distribution terms for this file may be
+ * found in the file LICENSE in this distribution or at
+ * http://www.rtems.com/license/LICENSE.
+ */
+
+#include <rtems/freebsd/machine/rtems-bsd-config.h>
+
+#include <rtems/freebsd/sys/types.h>
+#include <rtems/freebsd/sys/systm.h>
+
+void
+rtems_bsd_assert_func(const char *file, int line, const char *func, const char *expr)
+{
+ panic(
+ "assertion \"%s\" failed: file \"%s\", line %d%s%s\n",
+ expr,
+ file,
+ line,
+ (func != NULL) ? ", function: " : "",
+ (func != NULL) ? func : ""
+ );
+}
diff --git a/rtems/freebsd/rtems/rtems-bsd-autoconf.c b/rtems/freebsd/rtems/rtems-bsd-autoconf.c
new file mode 100644
index 00000000..cdf9fc61
--- /dev/null
+++ b/rtems/freebsd/rtems/rtems-bsd-autoconf.c
@@ -0,0 +1,51 @@
+/**
+ * @file
+ *
+ * @ingroup rtems_bsd_rtems
+ *
+ * @brief TODO.
+ */
+
+/*
+ * Copyright (c) 2009, 2010 embedded brains GmbH. All rights reserved.
+ *
+ * embedded brains GmbH
+ * Obere Lagerstr. 30
+ * 82178 Puchheim
+ * Germany
+ * <rtems@embedded-brains.de>
+ *
+ * The license and distribution terms for this file may be
+ * found in the file LICENSE in this distribution or at
+ * http://www.rtems.com/license/LICENSE.
+ */
+
+#include <rtems/freebsd/machine/rtems-bsd-config.h>
+
+#include <rtems/freebsd/sys/param.h>
+#include <rtems/freebsd/sys/types.h>
+#include <rtems/freebsd/sys/systm.h>
+#include <rtems/freebsd/sys/bus.h>
+#include <rtems/freebsd/sys/kernel.h>
+
+static void
+configure_first(void *dummy)
+{
+ device_add_child(root_bus, "nexus", 0);
+}
+
+static void
+configure(void *dummy)
+{
+ root_bus_configure();
+}
+
+static void
+configure_final(void *dummy)
+{
+ /* Do nothing */
+}
+
+SYSINIT(configure1, SI_SUB_CONFIGURE, SI_ORDER_FIRST, configure_first, NULL);
+SYSINIT(configure2, SI_SUB_CONFIGURE, SI_ORDER_THIRD, configure, NULL);
+SYSINIT(configure3, SI_SUB_CONFIGURE, SI_ORDER_ANY, configure_final, NULL);
diff --git a/rtems/freebsd/rtems/rtems-bsd-bus-dma.c b/rtems/freebsd/rtems/rtems-bsd-bus-dma.c
new file mode 100644
index 00000000..1ed8564e
--- /dev/null
+++ b/rtems/freebsd/rtems/rtems-bsd-bus-dma.c
@@ -0,0 +1,455 @@
+/**
+ * @file
+ *
+ * @ingroup rtems_bsd_rtems
+ *
+ * @brief TODO.
+ *
+ * File origin from FreeBSD 'sys/powerpc/powerpc/busdma_machdep.c'.
+ */
+
+/*-
+ * Copyright (c) 2009, 2010 embedded brains GmbH. All rights reserved.
+ *
+ * embedded brains GmbH
+ * Obere Lagerstr. 30
+ * 82178 Puchheim
+ * Germany
+ * <rtems@embedded-brains.de>
+ *
+ * Copyright (c) 2004 Olivier Houchard
+ * Copyright (c) 2002 Peter Grehan
+ * Copyright (c) 1997, 1998 Justin T. Gibbs.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification, immediately at the beginning of the file.
+ * 2. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
+ * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <rtems/freebsd/machine/rtems-bsd-config.h>
+#include <rtems/freebsd/machine/rtems-bsd-cache.h>
+#include <rtems/malloc.h>
+
+#include <rtems/freebsd/sys/param.h>
+#include <rtems/freebsd/sys/types.h>
+#include <rtems/freebsd/sys/lock.h>
+#include <rtems/freebsd/sys/mutex.h>
+#include <rtems/freebsd/sys/systm.h>
+#include <rtems/freebsd/sys/malloc.h>
+#include <rtems/freebsd/machine/atomic.h>
+#include <rtems/freebsd/machine/bus.h>
+
+#ifdef CPU_DATA_CACHE_ALIGNMENT
+ #define CLSZ ((uintptr_t) CPU_DATA_CACHE_ALIGNMENT)
+ #define CLMASK (CLSZ - (uintptr_t) 1)
+#endif
+
+struct bus_dma_tag {
+ bus_dma_tag_t parent;
+ bus_size_t alignment;
+ bus_size_t boundary;
+ bus_addr_t lowaddr;
+ bus_addr_t highaddr;
+ bus_dma_filter_t *filter;
+ void *filterarg;
+ bus_size_t maxsize;
+ int nsegments;
+ bus_size_t maxsegsz;
+ int flags;
+ int ref_count;
+ int map_count;
+ bus_dma_lock_t *lockfunc;
+ void *lockfuncarg;
+};
+
+struct bus_dmamap {
+ void *buffer_begin;
+ bus_size_t buffer_size;
+};
+
+/*
+ * Convenience function for manipulating driver locks from busdma (during
+ * busdma_swi, for example). Drivers that don't provide their own locks
+ * should specify &Giant to dmat->lockfuncarg. Drivers that use their own
+ * non-mutex locking scheme don't have to use this at all.
+ */
+void
+busdma_lock_mutex(void *arg, bus_dma_lock_op_t op)
+{
+ struct mtx *dmtx;
+
+ dmtx = (struct mtx *)arg;
+ switch (op) {
+ case BUS_DMA_LOCK:
+ mtx_lock(dmtx);
+ break;
+ case BUS_DMA_UNLOCK:
+ mtx_unlock(dmtx);
+ break;
+ default:
+ panic("Unknown operation 0x%x for busdma_lock_mutex!", op);
+ }
+}
+
+/*
+ * dflt_lock should never get called. It gets put into the dma tag when
+ * lockfunc == NULL, which is only valid if the maps that are associated
+ * with the tag are meant to never be defered.
+ * XXX Should have a way to identify which driver is responsible here.
+ */
+static void
+dflt_lock(void *arg, bus_dma_lock_op_t op)
+{
+ panic("driver error: busdma dflt_lock called");
+}
+
+/*
+ * Allocate a device specific dma_tag.
+ */
+int
+bus_dma_tag_create(bus_dma_tag_t parent, bus_size_t alignment,
+ bus_size_t boundary, bus_addr_t lowaddr, bus_addr_t highaddr,
+ bus_dma_filter_t *filter, void *filterarg, bus_size_t maxsize,
+ int nsegments, bus_size_t maxsegsz, int flags, bus_dma_lock_t *lockfunc,
+ void *lockfuncarg, bus_dma_tag_t *dmat)
+{
+ bus_dma_tag_t newtag;
+ int error = 0;
+
+ /* Return a NULL tag on failure */
+ *dmat = NULL;
+
+ newtag = malloc(sizeof(*newtag), M_DEVBUF, M_NOWAIT | M_ZERO);
+ if (newtag == NULL)
+ return (ENOMEM);
+
+ newtag->parent = parent;
+ newtag->alignment = alignment;
+ newtag->boundary = boundary;
+ newtag->lowaddr = lowaddr;
+ newtag->highaddr = highaddr;
+ newtag->filter = filter;
+ newtag->filterarg = filterarg;
+ newtag->maxsize = maxsize;
+ newtag->nsegments = nsegments;
+ newtag->maxsegsz = maxsegsz;
+ newtag->flags = flags;
+ newtag->ref_count = 1; /* Count ourself */
+ newtag->map_count = 0;
+ if (lockfunc != NULL) {
+ newtag->lockfunc = lockfunc;
+ newtag->lockfuncarg = lockfuncarg;
+ } else {
+ newtag->lockfunc = dflt_lock;
+ newtag->lockfuncarg = NULL;
+ }
+
+ /*
+ * Take into account any restrictions imposed by our parent tag
+ */
+ if (parent != NULL) {
+ newtag->lowaddr = min(parent->lowaddr, newtag->lowaddr);
+ newtag->highaddr = max(parent->highaddr, newtag->highaddr);
+ if (newtag->boundary == 0)
+ newtag->boundary = parent->boundary;
+ else if (parent->boundary != 0)
+ newtag->boundary = MIN(parent->boundary,
+ newtag->boundary);
+ if (newtag->filter == NULL) {
+ /*
+ * Short circuit looking at our parent directly
+ * since we have encapsulated all of its information
+ */
+ newtag->filter = parent->filter;
+ newtag->filterarg = parent->filterarg;
+ newtag->parent = parent->parent;
+ }
+ if (newtag->parent != NULL)
+ atomic_add_int(&parent->ref_count, 1);
+ }
+
+ *dmat = newtag;
+ return (error);
+}
+
+int
+bus_dma_tag_destroy(bus_dma_tag_t dmat)
+{
+ if (dmat != NULL) {
+
+ if (dmat->map_count != 0)
+ return (EBUSY);
+
+ while (dmat != NULL) {
+ bus_dma_tag_t parent;
+
+ parent = dmat->parent;
+ atomic_subtract_int(&dmat->ref_count, 1);
+ if (dmat->ref_count == 0) {
+ free(dmat, M_DEVBUF);
+ /*
+ * Last reference count, so
+ * release our reference
+ * count on our parent.
+ */
+ dmat = parent;
+ } else
+ dmat = NULL;
+ }
+ }
+ return (0);
+}
+
+/*
+ * Allocate a handle for mapping from kva/uva/physical
+ * address space into bus device space.
+ */
+int
+bus_dmamap_create(bus_dma_tag_t dmat, int flags, bus_dmamap_t *mapp)
+{
+ *mapp = malloc(sizeof(**mapp), M_DEVBUF, M_NOWAIT | M_ZERO);
+ if (*mapp == NULL) {
+ return ENOMEM;
+ }
+
+ dmat->map_count++;
+
+ return (0);
+}
+
+/*
+ * Destroy a handle for mapping from kva/uva/physical
+ * address space into bus device space.
+ */
+int
+bus_dmamap_destroy(bus_dma_tag_t dmat, bus_dmamap_t map)
+{
+ free(map, M_DEVBUF);
+
+ dmat->map_count--;
+
+ return (0);
+}
+
+/*
+ * Allocate a piece of memory that can be efficiently mapped into
+ * bus device space based on the constraints lited in the dma tag.
+ * A dmamap to for use with dmamap_load is also allocated.
+ */
+int
+bus_dmamem_alloc(bus_dma_tag_t dmat, void** vaddr, int flags,
+ bus_dmamap_t *mapp)
+{
+ *mapp = malloc(sizeof(**mapp), M_DEVBUF, M_NOWAIT | M_ZERO);
+ if (*mapp == NULL) {
+ return ENOMEM;
+ }
+
+ *vaddr = rtems_heap_allocate_aligned_with_boundary(dmat->maxsize, dmat->alignment, dmat->boundary);
+ if (*vaddr == NULL) {
+ free(*mapp, M_DEVBUF);
+
+ return ENOMEM;
+ }
+
+ (*mapp)->buffer_begin = *vaddr;
+ (*mapp)->buffer_size = dmat->maxsize;
+
+ if ((flags & BUS_DMA_ZERO) != 0) {
+ memset(*vaddr, 0, dmat->maxsize);
+ }
+
+ return (0);
+}
+
+/*
+ * Free a piece of memory and it's allocated dmamap, that was allocated
+ * via bus_dmamem_alloc. Make the same choice for free/contigfree.
+ */
+void
+bus_dmamem_free(bus_dma_tag_t dmat, void *vaddr, bus_dmamap_t map)
+{
+ free(vaddr, M_RTEMS_HEAP);
+ free(map, M_DEVBUF);
+}
+
+/*
+ * Utility function to load a linear buffer. lastaddrp holds state
+ * between invocations (for multiple-buffer loads). segp contains
+ * the starting segment on entrance, and the ending segment on exit.
+ * first indicates if this is the first invocation of this function.
+ */
+static int
+bus_dmamap_load_buffer(bus_dma_tag_t dmat, bus_dma_segment_t segs[],
+ void *buf, bus_size_t buflen, struct thread *td, int flags,
+ vm_offset_t *lastaddrp, int *segp, int first)
+{
+ bus_size_t sgsize;
+ bus_addr_t curaddr, lastaddr, baddr, bmask;
+ vm_offset_t vaddr = (vm_offset_t)buf;
+ int seg;
+
+ lastaddr = *lastaddrp;
+ bmask = ~(dmat->boundary - 1);
+
+ for (seg = *segp; buflen > 0 ; ) {
+ /*
+ * Get the physical address for this segment.
+ */
+ curaddr = vaddr;
+
+ /*
+ * Compute the segment size, and adjust counts.
+ */
+ sgsize = PAGE_SIZE - ((u_long)curaddr & PAGE_MASK);
+ if (sgsize > dmat->maxsegsz)
+ sgsize = dmat->maxsegsz;
+ if (buflen < sgsize)
+ sgsize = buflen;
+
+ /*
+ * Make sure we don't cross any boundaries.
+ */
+ if (dmat->boundary > 0) {
+ baddr = (curaddr + dmat->boundary) & bmask;
+ if (sgsize > (baddr - curaddr))
+ sgsize = (baddr - curaddr);
+ }
+
+ /*
+ * Insert chunk into a segment, coalescing with
+ * the previous segment if possible.
+ */
+ if (first) {
+ segs[seg].ds_addr = curaddr;
+ segs[seg].ds_len = sgsize;
+ first = 0;
+ } else {
+ if (curaddr == lastaddr &&
+ (segs[seg].ds_len + sgsize) <= dmat->maxsegsz &&
+ (dmat->boundary == 0 ||
+ (segs[seg].ds_addr & bmask) == (curaddr & bmask)))
+ segs[seg].ds_len += sgsize;
+ else {
+ if (++seg >= dmat->nsegments)
+ break;
+ segs[seg].ds_addr = curaddr;
+ segs[seg].ds_len = sgsize;
+ }
+ }
+
+ lastaddr = curaddr + sgsize;
+ vaddr += sgsize;
+ buflen -= sgsize;
+ }
+
+ *segp = seg;
+ *lastaddrp = lastaddr;
+
+ /*
+ * Did we fit?
+ */
+ return (buflen != 0 ? EFBIG : 0); /* XXX better return value here? */
+}
+
+/*
+ * Map the buffer buf into bus space using the dmamap map.
+ */
+int
+bus_dmamap_load(bus_dma_tag_t dmat, bus_dmamap_t map, void *buf,
+ bus_size_t buflen, bus_dmamap_callback_t *callback,
+ void *callback_arg, int flags)
+{
+ bus_dma_segment_t dm_segments[dmat->nsegments];
+ vm_offset_t lastaddr;
+ int error, nsegs;
+
+ map->buffer_begin = buf;
+ map->buffer_size = buflen;
+
+ lastaddr = (vm_offset_t)0;
+ nsegs = 0;
+ error = bus_dmamap_load_buffer(dmat, dm_segments, buf, buflen,
+ NULL, flags, &lastaddr, &nsegs, 1);
+
+ if (error == 0)
+ (*callback)(callback_arg, dm_segments, nsegs + 1, 0);
+ else
+ (*callback)(callback_arg, NULL, 0, error);
+
+ return (0);
+}
+
+/*
+ * Release the mapping held by map. A no-op on PowerPC.
+ */
+void
+_bus_dmamap_unload(bus_dma_tag_t dmat, bus_dmamap_t map)
+{
+
+ return;
+}
+
+void
+_bus_dmamap_sync(bus_dma_tag_t dmat, bus_dmamap_t map, bus_dmasync_op_t op)
+{
+#ifdef CPU_DATA_CACHE_ALIGNMENT
+ uintptr_t size = map->buffer_size;
+ uintptr_t begin = (uintptr_t) map->buffer_begin;
+ uintptr_t end = begin + size;
+
+ if ((op & BUS_DMASYNC_PREWRITE) != 0 && (op & BUS_DMASYNC_PREREAD) == 0) {
+ rtems_cache_flush_multiple_data_lines((void *) begin, size);
+ }
+ if ((op & BUS_DMASYNC_PREREAD) != 0) {
+ if ((op & BUS_DMASYNC_PREWRITE) != 0 || ((begin | size) & CLMASK) != 0) {
+ rtems_cache_flush_multiple_data_lines((void *) begin, size);
+ }
+ rtems_cache_invalidate_multiple_data_lines((void *) begin, size);
+ }
+ if ((op & BUS_DMASYNC_POSTREAD) != 0) {
+ char first_buf [CLSZ];
+ char last_buf [CLSZ];
+ bool first_is_aligned = (begin & CLMASK) == 0;
+ bool last_is_aligned = (end & CLMASK) == 0;
+ void *first_begin = (void *) (begin & ~CLMASK);
+ size_t first_size = begin & CLMASK;
+ void *last_begin = (void *) end;
+ size_t last_size = CLSZ - (end & CLMASK);
+
+ if (!first_is_aligned) {
+ memcpy(first_buf, first_begin, first_size);
+ }
+ if (!last_is_aligned) {
+ memcpy(last_buf, last_begin, last_size);
+ }
+
+ rtems_cache_invalidate_multiple_data_lines((void *) begin, size);
+
+ if (!first_is_aligned) {
+ memcpy(first_begin, first_buf, first_size);
+ }
+ if (!last_is_aligned) {
+ memcpy(last_begin, last_buf, last_size);
+ }
+ }
+#endif /* CPU_DATA_CACHE_ALIGNMENT */
+}
diff --git a/rtems/freebsd/rtems/rtems-bsd-callout.c b/rtems/freebsd/rtems/rtems-bsd-callout.c
new file mode 100644
index 00000000..d427b636
--- /dev/null
+++ b/rtems/freebsd/rtems/rtems-bsd-callout.c
@@ -0,0 +1,122 @@
+/**
+ * @file
+ *
+ * @ingroup rtems_bsd_rtems
+ *
+ * @brief TODO.
+ */
+
+/*
+ * Copyright (c) 2009, 2010 embedded brains GmbH. All rights reserved.
+ *
+ * embedded brains GmbH
+ * Obere Lagerstr. 30
+ * 82178 Puchheim
+ * Germany
+ * <rtems@embedded-brains.de>
+ *
+ * The license and distribution terms for this file may be
+ * found in the file LICENSE in this distribution or at
+ * http://www.rtems.com/license/LICENSE.
+ */
+
+#include <rtems/freebsd/machine/rtems-bsd-config.h>
+
+#include <rtems/freebsd/sys/param.h>
+#include <rtems/freebsd/sys/types.h>
+#include <rtems/freebsd/sys/systm.h>
+#include <rtems/freebsd/sys/callout.h>
+#include <rtems/freebsd/sys/lock.h>
+#include <rtems/freebsd/sys/mutex.h>
+
+RTEMS_CHAIN_DEFINE_EMPTY(rtems_bsd_callout_chain);
+
+static void
+rtems_bsd_callout_dispatch(rtems_id id, void *arg)
+{
+ rtems_status_code sc = RTEMS_SUCCESSFUL;
+ struct callout *c = arg;
+
+ if (c->c_lock != NULL) {
+ sc = rtems_semaphore_obtain(c->c_lock->lo_id, RTEMS_WAIT, RTEMS_NO_TIMEOUT);
+ BSD_ASSERT_SC(sc);
+ }
+
+ if (c->c_func != NULL) {
+ (*c->c_func)(c->c_arg);
+ }
+
+ if (c->c_lock != NULL && (c->c_flags & CALLOUT_RETURNUNLOCKED) == 0) {
+ sc = rtems_semaphore_release(c->c_lock->lo_id);
+ BSD_ASSERT_SC(sc);
+ }
+}
+
+void
+callout_init(struct callout *c, int mpsafe)
+{
+ _callout_init_lock(c, mpsafe ? NULL : &Giant.lock_object, mpsafe ? CALLOUT_RETURNUNLOCKED : 0);
+}
+
+void
+_callout_init_lock(struct callout *c, struct lock_object *lock, int flags)
+{
+ rtems_status_code sc = RTEMS_SUCCESSFUL;
+ rtems_id id = RTEMS_ID_NONE;
+
+ sc = rtems_timer_create(rtems_build_name('_', 'T', 'M', 'R'), &id);
+ BSD_ASSERT_SC(sc);
+
+ c->c_id = id;
+ c->c_lock = lock;
+ c->c_flags = flags;
+ c->c_func = NULL;
+ c->c_arg = NULL;
+
+ rtems_chain_append(&rtems_bsd_callout_chain, &c->c_node);
+}
+
+int
+callout_reset(struct callout *c, int to_ticks, void (*ftn)(void *), void *arg)
+{
+ /* FIXME: Integer conversions */
+
+ rtems_status_code sc = RTEMS_SUCCESSFUL;
+
+ if (to_ticks <= 0) {
+ to_ticks = 1;
+ }
+
+ c->c_func = ftn;
+ c->c_arg = arg;
+
+ sc = rtems_timer_server_fire_after(c->c_id, (rtems_interval) to_ticks, rtems_bsd_callout_dispatch, c);
+ BSD_ASSERT_SC(sc);
+
+ return 0;
+}
+
+int
+callout_schedule(struct callout *c, int to_ticks)
+{
+ return callout_reset(c, to_ticks, c->c_func, c->c_arg);
+}
+
+int
+_callout_stop_safe(struct callout *c, int safe)
+{
+ rtems_status_code sc = RTEMS_SUCCESSFUL;
+
+ if (!safe) {
+ sc = rtems_timer_cancel(c->c_id);
+ BSD_ASSERT_SC(sc);
+ } else {
+ sc = rtems_timer_delete(c->c_id);
+ BSD_ASSERT_SC(sc);
+
+ c->c_id = RTEMS_ID_NONE;
+ rtems_chain_extract(&c->c_node);
+ }
+
+ return 0;
+}
diff --git a/rtems/freebsd/rtems/rtems-bsd-cam.c b/rtems/freebsd/rtems/rtems-bsd-cam.c
new file mode 100644
index 00000000..36d4d67c
--- /dev/null
+++ b/rtems/freebsd/rtems/rtems-bsd-cam.c
@@ -0,0 +1,495 @@
+/**
+ * @file
+ *
+ * @ingroup rtems_bsd_rtems
+ *
+ * @brief TODO.
+ */
+
+/*
+ * Copyright (c) 2009, 2010 embedded brains GmbH. All rights reserved.
+ *
+ * embedded brains GmbH
+ * Obere Lagerstr. 30
+ * 82178 Puchheim
+ * Germany
+ * <rtems@embedded-brains.de>
+ *
+ * The license and distribution terms for this file may be
+ * found in the file LICENSE in this distribution or at
+ * http://www.rtems.com/license/LICENSE.
+ */
+
+#include <rtems/freebsd/machine/rtems-bsd-config.h>
+
+#include <rtems/freebsd/sys/param.h>
+#include <rtems/freebsd/sys/systm.h>
+#include <rtems/freebsd/sys/malloc.h>
+#include <rtems/freebsd/sys/kernel.h>
+#include <rtems/freebsd/sys/lock.h>
+#include <rtems/freebsd/sys/mutex.h>
+#include <rtems/freebsd/sys/condvar.h>
+
+#include <rtems/freebsd/cam/cam.h>
+#include <rtems/freebsd/cam/cam_ccb.h>
+#include <rtems/freebsd/cam/cam_sim.h>
+#include <rtems/freebsd/cam/cam_xpt.h>
+#include <rtems/freebsd/cam/cam_xpt_sim.h>
+#include <rtems/freebsd/cam/cam_debug.h>
+
+#include <rtems/freebsd/cam/scsi/scsi_all.h>
+
+#include <rtems/media.h>
+#include <rtems/libio.h>
+#include <rtems/diskdevs.h>
+
+#define BSD_CAM_DEVQ_DUMMY ((struct cam_devq *) 0xdeadbeef)
+
+#define BSD_SCSI_TAG 0
+
+#define BSD_SCSI_RETRIES 4
+
+#define BSD_SCSI_TIMEOUT (60 * 1000)
+
+#define BSD_SCSI_MIN_COMMAND_SIZE 10
+
+MALLOC_DEFINE(M_CAMSIM, "CAM SIM", "CAM SIM buffers");
+
+static void
+rtems_bsd_sim_set_state(struct cam_sim *sim, enum bsd_sim_state state)
+{
+ sim->state = state;
+}
+
+static void
+rtems_bsd_sim_set_state_and_notify(struct cam_sim *sim, enum bsd_sim_state state)
+{
+ sim->state = state;
+ cv_broadcast(&sim->state_changed);
+}
+
+static void
+rtems_bsd_sim_wait_for_state(struct cam_sim *sim, enum bsd_sim_state state)
+{
+ while (sim->state != state) {
+ cv_wait(&sim->state_changed, sim->mtx);
+ }
+}
+
+static void
+rtems_bsd_sim_wait_for_state_and_cancel_ccb(struct cam_sim *sim, enum bsd_sim_state state)
+{
+ while (sim->state != state) {
+ if (sim->state != BSD_SIM_BUSY) {
+ cv_wait(&sim->state_changed, sim->mtx);
+ } else {
+ sim->ccb.ccb_h.status = CAM_SEL_TIMEOUT;
+ (*sim->ccb.ccb_h.cbfcnp)(NULL, &sim->ccb);
+ }
+ }
+}
+
+static void
+rtems_bsd_ccb_callback(struct cam_periph *periph, union ccb *ccb)
+{
+ struct cam_sim *sim = ccb->ccb_h.sim;
+
+ BSD_ASSERT(periph == NULL && sim->state == BSD_SIM_INIT_BUSY);
+
+ rtems_bsd_sim_set_state_and_notify(sim, BSD_SIM_INIT_READY);
+}
+
+static rtems_status_code
+rtems_bsd_ccb_action(union ccb *ccb)
+{
+ rtems_status_code sc = RTEMS_SUCCESSFUL;
+ struct cam_sim *sim = ccb->ccb_h.sim;
+
+ mtx_lock(sim->mtx);
+
+ BSD_ASSERT(sim->state == BSD_SIM_INIT);
+ rtems_bsd_sim_set_state(sim, BSD_SIM_INIT_BUSY);
+ (*sim->sim_action)(sim, ccb);
+ rtems_bsd_sim_wait_for_state(sim, BSD_SIM_INIT_READY);
+ if (ccb->ccb_h.status != CAM_REQ_CMP) {
+ sc = RTEMS_IO_ERROR;
+ }
+ rtems_bsd_sim_set_state(sim, BSD_SIM_INIT);
+
+ mtx_unlock(sim->mtx);
+
+ return sc;
+}
+
+static rtems_status_code
+rtems_bsd_scsi_inquiry(union ccb *ccb, struct scsi_inquiry_data *inq_data)
+{
+ memset(inq_data, 0, sizeof(*inq_data));
+
+ scsi_inquiry(
+ &ccb->csio,
+ BSD_SCSI_RETRIES,
+ rtems_bsd_ccb_callback,
+ BSD_SCSI_TAG,
+ (u_int8_t *) inq_data,
+ sizeof(*inq_data) - 1,
+ FALSE,
+ 0,
+ SSD_MIN_SIZE,
+ BSD_SCSI_TIMEOUT
+ );
+
+ return rtems_bsd_ccb_action(ccb);
+}
+
+static rtems_status_code
+rtems_bsd_scsi_test_unit_ready(union ccb *ccb)
+{
+ scsi_test_unit_ready(
+ &ccb->csio,
+ BSD_SCSI_RETRIES,
+ rtems_bsd_ccb_callback,
+ BSD_SCSI_TAG,
+ SSD_FULL_SIZE,
+ BSD_SCSI_TIMEOUT
+ );
+
+ return rtems_bsd_ccb_action(ccb);
+}
+
+static rtems_status_code
+rtems_bsd_scsi_read_capacity(union ccb *ccb, uint32_t *block_count, uint32_t *block_size)
+{
+ rtems_status_code sc = RTEMS_SUCCESSFUL;
+ struct scsi_read_capacity_data rdcap;
+
+ memset(&rdcap, 0, sizeof(rdcap));
+
+ scsi_read_capacity(
+ &ccb->csio,
+ BSD_SCSI_RETRIES,
+ rtems_bsd_ccb_callback,
+ BSD_SCSI_TAG,
+ &rdcap,
+ SSD_FULL_SIZE,
+ BSD_SCSI_TIMEOUT
+ );
+
+ sc = rtems_bsd_ccb_action(ccb);
+ if (sc != RTEMS_SUCCESSFUL) {
+ return RTEMS_IO_ERROR;
+ }
+
+ *block_size = scsi_4btoul(rdcap.length);
+ *block_count = scsi_4btoul(rdcap.addr) + 1;
+
+ return RTEMS_SUCCESSFUL;
+}
+
+static void
+rtems_bsd_csio_callback(struct cam_periph *periph, union ccb *ccb)
+{
+ rtems_status_code sc = RTEMS_SUCCESSFUL;
+ bool done = false;
+ struct cam_sim *sim = ccb->ccb_h.sim;
+
+ BSD_ASSERT(periph == NULL && sim->state == BSD_SIM_BUSY);
+
+ if (ccb->ccb_h.status == CAM_REQ_CMP) {
+ rtems_blkdev_sg_buffer *sg = ccb->csio.sg_current;
+
+ if (sg != ccb->csio.sg_end) {
+ scsi_read_write(
+ &ccb->csio,
+ BSD_SCSI_RETRIES,
+ rtems_bsd_csio_callback,
+ BSD_SCSI_TAG,
+ ccb->csio.readop,
+ 0,
+ BSD_SCSI_MIN_COMMAND_SIZE,
+ sg->block,
+ sg->length / 512, /* FIXME */
+ sg->buffer,
+ sg->length,
+ SSD_FULL_SIZE,
+ BSD_SCSI_TIMEOUT
+ );
+ ccb->csio.sg_current = sg + 1;
+ (*sim->sim_action)(sim, ccb);
+ } else {
+ done = true;
+ }
+ } else if (ccb->ccb_h.status == CAM_SEL_TIMEOUT) {
+ sc = RTEMS_UNSATISFIED;
+ done = true;
+ } else {
+ sc = RTEMS_IO_ERROR;
+ done = true;
+ }
+
+ if (done) {
+ ccb->csio.req->req_done(ccb->csio.req->done_arg, sc);
+ rtems_bsd_sim_set_state_and_notify(sim, BSD_SIM_IDLE);
+ }
+}
+
+static int rtems_bsd_sim_disk_read_write(struct cam_sim *sim, rtems_blkdev_request *req)
+{
+ mtx_lock(sim->mtx);
+
+ rtems_bsd_sim_wait_for_state(sim, BSD_SIM_IDLE);
+ rtems_bsd_sim_set_state(sim, BSD_SIM_BUSY);
+
+ switch (req->req) {
+ case RTEMS_BLKDEV_REQ_READ:
+ sim->ccb.csio.readop = TRUE;
+ break;
+ case RTEMS_BLKDEV_REQ_WRITE:
+ sim->ccb.csio.readop = FALSE;
+ break;
+ default:
+ mtx_unlock(sim->mtx);
+ return -1;
+ }
+
+ sim->ccb.csio.sg_current = req->bufs;
+ sim->ccb.csio.sg_end = req->bufs + req->bufnum;
+ sim->ccb.csio.req = req;
+
+ sim->ccb.ccb_h.status = CAM_REQ_CMP;
+
+ rtems_bsd_csio_callback(NULL, &sim->ccb);
+
+ mtx_unlock(sim->mtx);
+
+ return 0;
+}
+
+static int rtems_bsd_sim_disk_ioctl(rtems_disk_device *dd, uint32_t req, void *arg)
+{
+ struct cam_sim *sim = rtems_disk_get_driver_data(dd);
+
+ if (req == RTEMS_BLKIO_REQUEST) {
+ rtems_blkdev_request *r = arg;
+
+ return rtems_bsd_sim_disk_read_write(sim, r);
+ } else if (req == RTEMS_BLKIO_DELETED) {
+ mtx_lock(sim->mtx);
+
+ free(sim->disk, M_RTEMS_HEAP);
+ sim->disk = NULL;
+ rtems_bsd_sim_set_state_and_notify(sim, BSD_SIM_DELETED);
+
+ mtx_unlock(sim->mtx);
+
+ return 0;
+ } else {
+ return -1;
+ }
+}
+
+static void
+rtems_bsd_sim_disk_initialized(struct cam_sim *sim, char *disk)
+{
+ mtx_lock(sim->mtx);
+
+ sim->disk = disk;
+ rtems_bsd_sim_set_state_and_notify(sim, BSD_SIM_IDLE);
+
+ mtx_unlock(sim->mtx);
+}
+
+static rtems_status_code
+rtems_bsd_sim_attach_worker(rtems_media_state state, const char *src, char **dest, void *arg)
+{
+ rtems_status_code sc = RTEMS_SUCCESSFUL;
+ rtems_device_major_number major = UINT32_MAX;
+ struct cam_sim *sim = arg;
+ char *disk = NULL;
+
+ if (state == RTEMS_MEDIA_STATE_READY) {
+ dev_t dev = 0;
+ unsigned retries = 0;
+
+ struct scsi_inquiry_data inq_data;
+ uint32_t block_count = 0;
+ uint32_t block_size = 0;
+
+ sc = rtems_io_register_driver(0, &rtems_blkdev_generic_ops, &major);
+ if (sc != RTEMS_SUCCESSFUL) {
+ BSD_PRINTF("OOPS: register driver failed\n");
+ goto error;
+ }
+
+ disk = rtems_media_create_path("/dev", src, major);
+ if (disk == NULL) {
+ BSD_PRINTF("OOPS: create path failed\n");
+ goto unregister_and_error;
+ }
+
+ sc = rtems_bsd_scsi_inquiry(&sim->ccb, &inq_data);
+ if (sc != RTEMS_SUCCESSFUL) {
+ BSD_PRINTF("OOPS: inquiry failed\n");
+ goto unregister_and_error;
+ }
+ scsi_print_inquiry(&inq_data);
+
+ for (retries = 0; retries <= 3; ++retries) {
+ sc = rtems_bsd_scsi_test_unit_ready(&sim->ccb);
+ if (sc == RTEMS_SUCCESSFUL) {
+ break;
+ }
+ }
+ if (sc != RTEMS_SUCCESSFUL) {
+ BSD_PRINTF("OOPS: test unit ready failed\n");
+ goto unregister_and_error;
+ }
+
+ sc = rtems_bsd_scsi_read_capacity(&sim->ccb, &block_count, &block_size);
+ if (sc != RTEMS_SUCCESSFUL) {
+ BSD_PRINTF("OOPS: read capacity failed\n");
+ goto unregister_and_error;
+ }
+
+ BSD_PRINTF("read capacity: block count %u, block size %u\n", block_count, block_size);
+
+ dev = rtems_filesystem_make_dev_t(major, 0);
+
+ sc = rtems_disk_create_phys(dev, block_size, block_count, rtems_bsd_sim_disk_ioctl, sim, disk);
+ if (sc != RTEMS_SUCCESSFUL) {
+ goto unregister_and_error;
+ }
+
+ /* FIXME */
+#if 0
+ rtems_disk_device *dd = rtems_disk_obtain(dev);
+ dd->block_size *= 64;
+ rtems_disk_release(dd);
+#endif
+
+ rtems_bsd_sim_disk_initialized(sim, disk);
+
+ *dest = strdup(disk, M_RTEMS_HEAP);
+ }
+
+ return RTEMS_SUCCESSFUL;
+
+unregister_and_error:
+
+ rtems_io_unregister_driver(major);
+
+error:
+
+ free(disk, M_RTEMS_HEAP);
+
+ rtems_bsd_sim_disk_initialized(sim, NULL);
+
+ return RTEMS_IO_ERROR;
+}
+
+struct cam_sim *
+cam_sim_alloc(
+ sim_action_func sim_action,
+ sim_poll_func sim_poll,
+ const char *sim_name,
+ void *softc,
+ u_int32_t unit,
+ struct mtx *mtx,
+ int max_dev_transactions,
+ int max_tagged_dev_transactions,
+ struct cam_devq *queue
+)
+{
+ rtems_status_code sc = RTEMS_SUCCESSFUL;
+ struct cam_sim *sim = NULL;
+
+ if (mtx == NULL) {
+ return NULL;
+ }
+
+ sim = malloc(sizeof(*sim), M_CAMSIM, M_NOWAIT | M_ZERO);
+ if (sim == NULL) {
+ return NULL;
+ }
+
+ sim->sim_action = sim_action;
+ sim->sim_poll = sim_poll;
+ sim->sim_name = sim_name;
+ sim->softc = softc;
+ sim->mtx = mtx;
+ sim->unit_number = unit;
+ sim->ccb.ccb_h.sim = sim;
+
+ cv_init(&sim->state_changed, "SIM state changed");
+
+ sc = rtems_media_server_disk_attach(sim_name, rtems_bsd_sim_attach_worker, sim);
+ BSD_ASSERT_SC(sc);
+
+ return sim;
+}
+
+void
+cam_sim_free(struct cam_sim *sim, int free_devq)
+{
+ rtems_status_code sc = RTEMS_SUCCESSFUL;
+
+ /*
+ * The umass_detach() cancels all transfers via
+ * usbd_transfer_unsetup(). This prevents also the start of new
+ * transfers since the transfer descriptors will be removed. Started
+ * transfers that are not in the transferring state will be canceled
+ * and the callbacks will be not called. Thus it is necessary to do
+ * this here if we are in the BUSY state.
+ */
+ rtems_bsd_sim_wait_for_state_and_cancel_ccb(sim, BSD_SIM_IDLE);
+
+ if (sim->disk != NULL) {
+ sc = rtems_media_server_disk_detach(sim->disk);
+ BSD_ASSERT_SC(sc);
+
+ rtems_bsd_sim_wait_for_state(sim, BSD_SIM_DELETED);
+ }
+
+ cv_destroy(&sim->state_changed);
+ free(sim, M_CAMSIM);
+}
+
+struct cam_devq *
+cam_simq_alloc(u_int32_t max_sim_transactions)
+{
+ return BSD_CAM_DEVQ_DUMMY;
+}
+
+void
+cam_simq_free(struct cam_devq *devq)
+{
+ BSD_ASSERT(devq == BSD_CAM_DEVQ_DUMMY);
+}
+
+void
+xpt_done(union ccb *done_ccb)
+{
+ (*done_ccb->ccb_h.cbfcnp)(NULL, done_ccb);
+}
+
+int32_t
+xpt_bus_register(struct cam_sim *sim, device_t parent, u_int32_t bus)
+{
+ /*
+ * We ignore this bus stuff completely. This is easier than removing
+ * the calls from "umass.c".
+ */
+
+ return CAM_SUCCESS;
+}
+
+int32_t
+xpt_bus_deregister(path_id_t pathid)
+{
+ /*
+ * We ignore this bus stuff completely. This is easier than removing
+ * the calls from "umass.c".
+ */
+
+ return CAM_REQ_CMP;
+}
diff --git a/rtems/freebsd/rtems/rtems-bsd-condvar.c b/rtems/freebsd/rtems/rtems-bsd-condvar.c
new file mode 100644
index 00000000..80b9db73
--- /dev/null
+++ b/rtems/freebsd/rtems/rtems-bsd-condvar.c
@@ -0,0 +1,167 @@
+/**
+ * @file
+ *
+ * @ingroup rtems_bsd_rtems
+ *
+ * @brief TODO.
+ */
+
+/*
+ * Copyright (c) 2009, 2010 embedded brains GmbH. All rights reserved.
+ *
+ * embedded brains GmbH
+ * Obere Lagerstr. 30
+ * 82178 Puchheim
+ * Germany
+ * <rtems@embedded-brains.de>
+ *
+ * The license and distribution terms for this file may be
+ * found in the file LICENSE in this distribution or at
+ * http://www.rtems.com/license/LICENSE.
+ */
+
+/* Necessary to obtain some internal functions */
+#define __RTEMS_VIOLATE_KERNEL_VISIBILITY__
+
+#include <rtems/freebsd/machine/rtems-bsd-config.h>
+
+#include <rtems/posix/cond.h>
+
+#include <rtems/freebsd/sys/param.h>
+#include <rtems/freebsd/sys/types.h>
+#include <rtems/freebsd/sys/systm.h>
+#include <rtems/freebsd/sys/lock.h>
+#include <rtems/freebsd/sys/mutex.h>
+#include <rtems/freebsd/sys/condvar.h>
+
+RTEMS_CHAIN_DEFINE_EMPTY(rtems_bsd_condvar_chain);
+
+void
+cv_init(struct cv *cv, const char *desc)
+{
+ int rv = pthread_cond_init(&cv->cv_id, NULL);
+
+ BSD_ASSERT_RV(rv);
+
+ cv->cv_description = desc;
+
+ rtems_chain_append(&rtems_bsd_condvar_chain, &cv->cv_node);
+}
+
+void
+cv_destroy(struct cv *cv)
+{
+ int rv = pthread_cond_destroy(&cv->cv_id);
+
+ BSD_ASSERT_RV(rv);
+
+ rtems_chain_extract(&cv->cv_node);
+}
+
+static int _cv_wait_support(struct cv *cv, struct lock_object *lock, int timo, bool relock)
+{
+ rtems_status_code sc = RTEMS_SUCCESSFUL;
+ int eno = 0;
+ Objects_Locations location = OBJECTS_ERROR;
+ POSIX_Condition_variables_Control *pcv = _POSIX_Condition_variables_Get(&cv->cv_id, &location);
+
+ if (location == OBJECTS_LOCAL) {
+ if (pcv->Mutex != POSIX_CONDITION_VARIABLES_NO_MUTEX && pcv->Mutex != lock->lo_id) {
+ _Thread_Enable_dispatch();
+
+ BSD_ASSERT(false);
+
+ return EINVAL;
+ }
+
+ sc = rtems_semaphore_release(lock->lo_id);
+ if (sc != RTEMS_SUCCESSFUL) {
+ _Thread_Enable_dispatch();
+
+ BSD_ASSERT(false);
+
+ return EINVAL;
+ }
+
+ pcv->Mutex = lock->lo_id;
+
+ _Thread_queue_Enter_critical_section(&pcv->Wait_queue);
+ _Thread_Executing->Wait.return_code = 0;
+ _Thread_Executing->Wait.queue = &pcv->Wait_queue;
+ _Thread_Executing->Wait.id = cv->cv_id;
+
+ /* FIXME: Integer conversion */
+ _Thread_queue_Enqueue(&pcv->Wait_queue, (Watchdog_Interval) timo);
+
+ DROP_GIANT();
+
+ _Thread_Enable_dispatch();
+
+ PICKUP_GIANT();
+
+ eno = (int) _Thread_Executing->Wait.return_code;
+ if (eno != 0) {
+ if (eno == ETIMEDOUT) {
+ eno = EWOULDBLOCK;
+ } else {
+ BSD_ASSERT(false);
+
+ eno = EINVAL;
+ }
+ }
+
+ if (relock) {
+ sc = rtems_semaphore_obtain(lock->lo_id, RTEMS_WAIT, RTEMS_NO_TIMEOUT);
+ if (sc != RTEMS_SUCCESSFUL) {
+ BSD_ASSERT(false);
+
+ eno = EINVAL;
+ }
+ }
+
+ return eno;
+ }
+
+ BSD_PANIC("unexpected object location");
+}
+
+void
+_cv_wait(struct cv *cv, struct lock_object *lock)
+{
+ _cv_wait_support(cv, lock, 0, true);
+}
+
+void
+_cv_wait_unlock(struct cv *cv, struct lock_object *lock)
+{
+ _cv_wait_support(cv, lock, 0, false);
+}
+
+int
+_cv_timedwait(struct cv *cv, struct lock_object *lock, int timo)
+{
+ if (timo <= 0) {
+ timo = 1;
+ }
+
+ return _cv_wait_support(cv, lock, timo, true);
+}
+
+void
+cv_signal(struct cv *cv)
+{
+ int rv = pthread_cond_signal(&cv->cv_id);
+
+ BSD_ASSERT_RV(rv);
+}
+
+void
+cv_broadcastpri(struct cv *cv, int pri)
+{
+ int rv = 0;
+
+ BSD_ASSERT(pri == 0);
+
+ rv = pthread_cond_broadcast(&cv->cv_id);
+ BSD_ASSERT_RV(rv);
+}
diff --git a/rtems/freebsd/rtems/rtems-bsd-delay.c b/rtems/freebsd/rtems/rtems-bsd-delay.c
new file mode 100644
index 00000000..b047ab6e
--- /dev/null
+++ b/rtems/freebsd/rtems/rtems-bsd-delay.c
@@ -0,0 +1,45 @@
+/**
+ * @file
+ *
+ * @ingroup rtems_bsd_rtems
+ *
+ * @brief TODO.
+ */
+
+/*
+ * Copyright (c) 2009, 2010 embedded brains GmbH. All rights reserved.
+ *
+ * embedded brains GmbH
+ * Obere Lagerstr. 30
+ * 82178 Puchheim
+ * Germany
+ * <rtems@embedded-brains.de>
+ *
+ * The license and distribution terms for this file may be
+ * found in the file LICENSE in this distribution or at
+ * http://www.rtems.com/license/LICENSE.
+ */
+
+#include <rtems/freebsd/machine/rtems-bsd-config.h>
+
+#include <rtems/freebsd/sys/param.h>
+#include <rtems/freebsd/sys/types.h>
+#include <rtems/freebsd/sys/systm.h>
+#include <rtems/freebsd/sys/kernel.h>
+
+void
+DELAY(int usec)
+{
+ rtems_status_code sc = RTEMS_SUCCESSFUL;
+
+ /* FIXME: Integer conversion */
+ rtems_interval ticks =
+ ((rtems_interval) usec * (rtems_interval) hz) / 1000000;
+
+ if (ticks == 0) {
+ ticks = 1;
+ }
+
+ sc = rtems_task_wake_after(ticks);
+ BSD_ASSERT_SC(sc);
+}
diff --git a/rtems/freebsd/rtems/rtems-bsd-generic.c b/rtems/freebsd/rtems/rtems-bsd-generic.c
new file mode 100644
index 00000000..3a46da43
--- /dev/null
+++ b/rtems/freebsd/rtems/rtems-bsd-generic.c
@@ -0,0 +1,209 @@
+/**
+ * @file
+ *
+ * @ingroup rtems_bsd_rtems
+ *
+ * @brief TODO.
+ */
+
+/*
+ * Copyright (c) 2009, 2010 embedded brains GmbH. All rights reserved.
+ *
+ * embedded brains GmbH
+ * Obere Lagerstr. 30
+ * 82178 Puchheim
+ * Germany
+ * <rtems@embedded-brains.de>
+ *
+ * The license and distribution terms for this file may be
+ * found in the file LICENSE in this distribution or at
+ * http://www.rtems.com/license/LICENSE.
+ */
+
+#include <sys/types.h> //needed for fd_mask and such
+#include <rtems/freebsd/machine/rtems-bsd-config.h>
+
+#include <rtems/freebsd/sys/types.h>
+#include <rtems/freebsd/sys/systm.h>
+#include <rtems/freebsd/sys/selinfo.h>
+#include <rtems/freebsd/sys/fcntl.h>
+#include <rtems/freebsd/sys/socket.h>
+#include <rtems/freebsd/sys/socketvar.h>
+#include <rtems/freebsd/sys/protosw.h>
+#include <rtems/freebsd/sys/select.h>
+#include <rtems/freebsd/sys/kernel.h>
+#include <rtems/freebsd/sys/lock.h>
+#include <rtems/freebsd/sys/mutex.h>
+#include <rtems/freebsd/sys/malloc.h>
+
+MALLOC_DEFINE(M_IOV, "iov", "large iov's");
+
+void selrecord(struct thread *selector, struct selinfo *sip)
+{
+ BSD_PANIC("not implemented");
+}
+
+void selwakeup(struct selinfo *sip)
+{
+ BSD_PANIC("not implemented");
+}
+
+void selwakeuppri(struct selinfo *sip, int pri)
+{
+ BSD_PANIC("not implemented");
+}
+
+void seltdfini(struct thread *td)
+{
+ BSD_PANIC("not implemented");
+}
+
+/*
+ *********************************************************************
+ * RTEMS implementation of select() system call *
+ *********************************************************************
+ */
+
+/*
+ * This implementation is quite restricted:
+ * Works on sockets only -- no support for other devices!
+ * A given socket can be in a read-select or a read/recv* by only
+ * one task at a time.
+ * A given socket can be in a write-select or a write/send* by only
+ * one task at a time.
+ *
+ * NOTE - select() is a very expensive system call. It should be avoided
+ * if at all possible. In many cases, rewriting the application
+ * to use multiple tasks (one per socket) is a better solution.
+ */
+
+struct socket *rtems_bsdnet_fdToSocket(int fd);
+
+static int
+socket_select (struct socket *so, int which, rtems_id tid)
+{
+ switch (which) {
+
+ case FREAD:
+ if (soreadable(so))
+ return (1);
+ SOCK_LOCK(so);
+ so->so_rcv.sb_flags |= SB_WAIT;
+ so->so_rcv.sb_sel.si_pid = tid;
+ SOCK_UNLOCK(so);
+ break;
+
+ case FWRITE:
+ if (sowriteable(so))
+ return (1);
+ SOCK_LOCK(so);
+ so->so_snd.sb_flags |= SB_WAIT;
+ so->so_snd.sb_sel.si_pid = tid;
+ SOCK_UNLOCK(so);
+ break;
+
+ case 0:
+ if (so->so_oobmark || (so->so_state & SBS_RCVATMARK))
+ return (1);
+ SOCK_LOCK(so);
+ so->so_rcv.sb_sel.si_pid = tid;
+ SOCK_UNLOCK(so);
+ break;
+ }
+ return (0);
+}
+
+static int
+selscan (rtems_id tid, fd_mask **ibits, fd_mask **obits, int nfd, int *retval)
+{
+ struct socket *so;
+ int msk, i, fd;
+ fd_mask bits, bit;
+ int n = 0;
+ static int flag[3] = { FREAD, FWRITE, 0 };
+
+ for (msk = 0; msk < 3; msk++) {
+ if (ibits[msk] == NULL)
+ continue;
+ for (i = 0; i < nfd; i += NFDBITS) {
+ bits = ibits[msk][i/NFDBITS];
+ for (fd = i, bit = 1 ; bits && (fd < nfd) ; fd++, bit <<= 1) {
+ if ((bits & bit) == 0)
+ continue;
+ bits &= ~bit;
+ so = rtems_bsdnet_fdToSocket (fd);
+ if (so == NULL)
+ return (EBADF);
+ if (socket_select (so, flag[msk], tid)) {
+ obits[msk][fd/NFDBITS] |=
+ (1 << (fd % NFDBITS));
+ n++;
+ }
+ }
+ }
+ }
+ *retval = n;
+ return (0);
+}
+
+int
+select (int nfds, fd_set *readfds, fd_set *writefds, fd_set *exceptfds, struct timeval *tv)
+{
+ fd_mask *ibits[3], *obits[3];
+ fd_set ob[3];
+ int error, timo;
+ int retval = 0;
+ rtems_id tid;
+ rtems_interval then = 0, now;
+ rtems_event_set events;
+
+ if (nfds < 0)
+ return (EINVAL);
+ if (tv) {
+ timo = tv->tv_sec * hz + tv->tv_usec / tick;
+ if (timo == 0)
+ timo = 1;
+ then = rtems_clock_get_ticks_since_boot();
+ }
+ else {
+ timo = 0;
+ }
+
+#define getbits(name,i) if (name) { \
+ ibits[i] = &name->fds_bits[0]; \
+ obits[i] = &ob[i].fds_bits[0]; \
+ FD_ZERO(&ob[i]); \
+ } \
+ else ibits[i] = NULL
+ getbits (readfds, 0);
+ getbits (writefds, 1);
+ getbits (exceptfds, 2);
+#undef getbits
+
+ //rtems_task_ident (RTEMS_SELF, 0, &tid);
+ //rtems_event_receive (SBWAIT_EVENT, RTEMS_EVENT_ANY | RTEMS_NO_WAIT, RTEMS_NO_TIMEOUT, &events);
+ for (;;) {
+ error = selscan(tid, ibits, obits, nfds, &retval);
+ if (error || retval)
+ break;
+ if (timo) {
+ now = rtems_clock_get_ticks_since_boot();
+ timo -= now - then;
+ if (timo <= 0)
+ break;
+ then = now;
+ }
+ //rtems_event_receive (SBWAIT_EVENT, RTEMS_EVENT_ANY | RTEMS_WAIT, timo, &events);
+ }
+
+#define putbits(name,i) if (name) *name = ob[i]
+ putbits (readfds, 0);
+ putbits (writefds, 1);
+ putbits (exceptfds, 2);
+#undef putbits
+ if (error) {
+ errno = error;
+ retval = -1;
+ }
+ return (retval);
+}
diff --git a/rtems/freebsd/rtems/rtems-bsd-init-with-irq.c b/rtems/freebsd/rtems/rtems-bsd-init-with-irq.c
new file mode 100644
index 00000000..c8b3ddc7
--- /dev/null
+++ b/rtems/freebsd/rtems/rtems-bsd-init-with-irq.c
@@ -0,0 +1,46 @@
+/**
+ * @file
+ *
+ * @ingroup rtems_bsd_rtems
+ *
+ * @brief TODO.
+ */
+
+/*
+ * Copyright (c) 2009, 2010 embedded brains GmbH. All rights reserved.
+ *
+ * embedded brains GmbH
+ * Obere Lagerstr. 30
+ * 82178 Puchheim
+ * Germany
+ * <rtems@embedded-brains.de>
+ *
+ * The license and distribution terms for this file may be
+ * found in the file LICENSE in this distribution or at
+ * http://www.rtems.com/license/LICENSE.
+ */
+
+#include <rtems/freebsd/machine/rtems-bsd-config.h>
+
+#include <rtems/irq-extension.h>
+
+#include <rtems/freebsd/bsd.h>
+
+rtems_status_code
+rtems_bsd_initialize_with_interrupt_server(void)
+{
+ rtems_status_code sc = RTEMS_SUCCESSFUL;
+
+ sc = rtems_interrupt_server_initialize(
+ BSD_TASK_PRIORITY_INTERRUPT,
+ BSD_MINIMUM_TASK_STACK_SIZE,
+ RTEMS_DEFAULT_MODES,
+ RTEMS_DEFAULT_ATTRIBUTES,
+ NULL
+ );
+ if (sc != RTEMS_SUCCESSFUL) {
+ return RTEMS_UNSATISFIED;
+ }
+
+ return rtems_bsd_initialize();
+}
diff --git a/rtems/freebsd/rtems/rtems-bsd-init.c b/rtems/freebsd/rtems/rtems-bsd-init.c
new file mode 100644
index 00000000..347d6710
--- /dev/null
+++ b/rtems/freebsd/rtems/rtems-bsd-init.c
@@ -0,0 +1,65 @@
+/**
+ * @file
+ *
+ * @ingroup rtems_bsd_rtems
+ *
+ * @brief TODO.
+ */
+
+/*
+ * Copyright (c) 2009, 2010 embedded brains GmbH. All rights reserved.
+ *
+ * embedded brains GmbH
+ * Obere Lagerstr. 30
+ * 82178 Puchheim
+ * Germany
+ * <rtems@embedded-brains.de>
+ *
+ * The license and distribution terms for this file may be
+ * found in the file LICENSE in this distribution or at
+ * http://www.rtems.com/license/LICENSE.
+ */
+
+#include <rtems/freebsd/machine/rtems-bsd-config.h>
+
+#include <rtems/freebsd/sys/param.h>
+#include <rtems/freebsd/sys/types.h>
+#include <rtems/freebsd/sys/systm.h>
+#include <rtems/freebsd/sys/kernel.h>
+#include <rtems/freebsd/sys/lock.h>
+#include <rtems/freebsd/sys/mutex.h>
+#include <rtems/freebsd/sys/proc.h>
+
+#include <rtems/freebsd/bsd.h>
+
+/* In FreeBSD this is a local function */
+void mi_startup(void);
+
+int hz;
+int tick;
+int maxusers; /* base tunable */
+
+rtems_status_code
+rtems_bsd_initialize(void)
+{
+ rtems_status_code sc = RTEMS_SUCCESSFUL;
+
+ hz = (int) rtems_clock_get_ticks_per_second();
+ tick = 1000000 / hz;
+ maxusers = 1;
+
+ sc = rtems_timer_initiate_server(
+ BSD_TASK_PRIORITY_TIMER,
+ BSD_MINIMUM_TASK_STACK_SIZE,
+ RTEMS_DEFAULT_ATTRIBUTES
+ );
+ if (sc != RTEMS_SUCCESSFUL) {
+ return RTEMS_UNSATISFIED;
+ }
+
+ mutex_init();
+
+ mi_startup();
+
+ return RTEMS_SUCCESSFUL;
+}
diff --git a/rtems/freebsd/rtems/rtems-bsd-jail.c b/rtems/freebsd/rtems/rtems-bsd-jail.c
new file mode 100644
index 00000000..d04efe5b
--- /dev/null
+++ b/rtems/freebsd/rtems/rtems-bsd-jail.c
@@ -0,0 +1,92 @@
+/**
+ * @file
+ *
+ * @ingroup rtems_bsd_rtems
+ *
+ * @brief TODO.
+ */
+
+/*
+ * Copyright (c) 2009, 2010 embedded brains GmbH. All rights reserved.
+ *
+ * embedded brains GmbH
+ * Obere Lagerstr. 30
+ * 82178 Puchheim
+ * Germany
+ * <rtems@embedded-brains.de>
+ *
+ * The license and distribution terms for this file may be
+ * found in the file LICENSE in this distribution or at
+ * http://www.rtems.com/license/LICENSE.
+ */
+
+#include <rtems/freebsd/machine/rtems-bsd-config.h>
+
+/*#include <rtems/freebsd/sys/types.h>
+#include <rtems/freebsd/sys/systm.h>
+#include <rtems/freebsd/sys/malloc.h>
+#include <rtems/freebsd/sys/jail.h>
+#include <rtems/freebsd/sys/lock.h>
+#include <rtems/freebsd/sys/mutex.h>*/
+
+#include <rtems/freebsd/sys/param.h>
+#include <rtems/freebsd/sys/types.h>
+#include <rtems/freebsd/sys/kernel.h>
+#include <rtems/freebsd/sys/systm.h>
+#include <rtems/freebsd/sys/errno.h>
+#include <rtems/freebsd/sys/sysproto.h>
+#include <rtems/freebsd/sys/malloc.h>
+#include <rtems/freebsd/sys/osd.h>
+#include <rtems/freebsd/sys/priv.h>
+#include <rtems/freebsd/sys/proc.h>
+#include <rtems/freebsd/sys/taskqueue.h>
+#include <rtems/freebsd/sys/fcntl.h>
+#include <rtems/freebsd/sys/jail.h>
+#include <rtems/freebsd/sys/lock.h>
+#include <rtems/freebsd/sys/mutex.h>
+#include <rtems/freebsd/sys/sx.h>
+#include <rtems/freebsd/sys/sysent.h>
+#include <rtems/freebsd/sys/namei.h>
+#include <rtems/freebsd/sys/mount.h>
+#include <rtems/freebsd/sys/queue.h>
+#include <rtems/freebsd/sys/socket.h>
+#include <rtems/freebsd/sys/syscallsubr.h>
+#include <rtems/freebsd/sys/sysctl.h>
+
+#define DEFAULT_HOSTUUID "00000000-0000-0000-0000-000000000000"
+
+/* Keep struct prison prison0 and some code in kern_jail_set() readable. */
+#ifdef INET
+#ifdef INET6
+#define _PR_IP_SADDRSEL PR_IP4_SADDRSEL|PR_IP6_SADDRSEL
+#else
+#define _PR_IP_SADDRSEL PR_IP4_SADDRSEL
+#endif
+#else /* !INET */
+#ifdef INET6
+#define _PR_IP_SADDRSEL PR_IP6_SADDRSEL
+#else
+#define _PR_IP_SADDRSEL 0
+#endif
+#endif
+
+/* prison0 describes what is "real" about the system. */
+struct prison prison0 = {
+ .pr_id = 0,
+ .pr_name = "0",
+ .pr_ref = 1,
+ .pr_uref = 1,
+ .pr_path = "/",
+ .pr_securelevel = -1,
+ .pr_childmax = JAIL_MAX,
+ .pr_hostuuid = DEFAULT_HOSTUUID,
+ .pr_children = LIST_HEAD_INITIALIZER(prison0.pr_children),
+#ifdef VIMAGE
+ .pr_flags = PR_HOST|PR_VNET|_PR_IP_SADDRSEL,
+#else
+ .pr_flags = PR_HOST|_PR_IP_SADDRSEL,
+#endif
+ .pr_allow = PR_ALLOW_ALL,
+};
+MTX_SYSINIT(prison0, &prison0.pr_mtx, "jail mutex", MTX_DEF);
+
diff --git a/rtems/freebsd/rtems/rtems-bsd-lock.c b/rtems/freebsd/rtems/rtems-bsd-lock.c
new file mode 100644
index 00000000..e351debc
--- /dev/null
+++ b/rtems/freebsd/rtems/rtems-bsd-lock.c
@@ -0,0 +1,45 @@
+/**
+ * @file
+ *
+ * @ingroup rtems_bsd_rtems
+ *
+ * @brief TODO.
+ */
+
+/*
+ * Copyright (c) 2009, 2010 embedded brains GmbH. All rights reserved.
+ *
+ * embedded brains GmbH
+ * Obere Lagerstr. 30
+ * 82178 Puchheim
+ * Germany
+ * <rtems@embedded-brains.de>
+ *
+ * The license and distribution terms for this file may be
+ * found in the file LICENSE in this distribution or at
+ * http://www.rtems.com/license/LICENSE.
+ */
+
+/* Necessary to obtain some internal functions */
+#define __RTEMS_VIOLATE_KERNEL_VISIBILITY__
+
+#include <rtems/freebsd/machine/rtems-bsd-config.h>
+
+#include <rtems/freebsd/sys/param.h>
+#include <rtems/freebsd/sys/types.h>
+#include <rtems/freebsd/sys/systm.h>
+#include <rtems/freebsd/sys/ktr.h>
+#include <rtems/freebsd/sys/lock.h>
+#include <rtems/freebsd/sys/mutex.h>
+#include <rtems/freebsd/sys/sx.h>
+#include <rtems/freebsd/sys/rwlock.h>
+#include <rtems/freebsd/sys/proc.h>
+
+struct lock_class *lock_classes[LOCK_CLASS_MAX + 1] = {
+ &lock_class_mtx_spin,
+ &lock_class_mtx_sleep,
+ &lock_class_sx,
+ &lock_class_rm,
+ &lock_class_rw,
+};
+
diff --git a/rtems/freebsd/rtems/rtems-bsd-malloc.c b/rtems/freebsd/rtems/rtems-bsd-malloc.c
new file mode 100644
index 00000000..b534b729
--- /dev/null
+++ b/rtems/freebsd/rtems/rtems-bsd-malloc.c
@@ -0,0 +1,77 @@
+/**
+ * @file
+ *
+ * @ingroup rtems_bsd_rtems
+ *
+ * @brief TODO.
+ */
+
+/*
+ * Copyright (c) 2009, 2010 embedded brains GmbH. All rights reserved.
+ *
+ * embedded brains GmbH
+ * Obere Lagerstr. 30
+ * 82178 Puchheim
+ * Germany
+ * <rtems@embedded-brains.de>
+ *
+ * The license and distribution terms for this file may be
+ * found in the file LICENSE in this distribution or at
+ * http://www.rtems.com/license/LICENSE.
+ */
+
+#include <rtems/freebsd/machine/rtems-bsd-config.h>
+
+#include <rtems/freebsd/sys/param.h>
+#include <rtems/freebsd/sys/types.h>
+#include <rtems/freebsd/sys/systm.h>
+#include <rtems/freebsd/sys/malloc.h>
+#include <rtems/freebsd/sys/kernel.h>
+
+MALLOC_DEFINE(M_DEVBUF, "devbuf", "device driver memory");
+
+MALLOC_DEFINE(M_TEMP, "temp", "misc temporary data buffers");
+
+void
+malloc_init(void *data)
+{
+ struct malloc_type *mtp = data;
+}
+
+void
+malloc_uninit(void *data)
+{
+ struct malloc_type *mtp = data;
+
+ BSD_PRINTF( "desc = %s\n", mtp->ks_shortdesc);
+}
+
+#undef malloc
+
+void *
+_bsd_malloc(unsigned long size, struct malloc_type *mtp, int flags)
+{
+ void *p = malloc(size);
+
+ if ((flags & M_ZERO) != 0 && p != NULL) {
+ memset(p, 0, size);
+ }
+
+ return p;
+}
+
+#undef free
+
+void
+_bsd_free(void *addr, struct malloc_type *mtp)
+{
+ free(addr);
+}
+
+#undef strdup
+
+char *
+_bsd_strdup(const char *__restrict s, struct malloc_type *type)
+{
+ return strdup(s);
+}
diff --git a/rtems/freebsd/rtems/rtems-bsd-mutex.c b/rtems/freebsd/rtems/rtems-bsd-mutex.c
new file mode 100644
index 00000000..837232d2
--- /dev/null
+++ b/rtems/freebsd/rtems/rtems-bsd-mutex.c
@@ -0,0 +1,314 @@
+/**
+ * @file
+ *
+ * @ingroup rtems_bsd_rtems
+ *
+ * @brief TODO.
+ */
+
+/*
+ * Copyright (c) 2009, 2010 embedded brains GmbH. All rights reserved.
+ *
+ * embedded brains GmbH
+ * Obere Lagerstr. 30
+ * 82178 Puchheim
+ * Germany
+ * <rtems@embedded-brains.de>
+ *
+ * The license and distribution terms for this file may be
+ * found in the file LICENSE in this distribution or at
+ * http://www.rtems.com/license/LICENSE.
+ */
+
+/* Necessary to obtain some internal functions */
+#define __RTEMS_VIOLATE_KERNEL_VISIBILITY__
+
+#include <rtems/freebsd/machine/rtems-bsd-config.h>
+
+#include <rtems/freebsd/sys/param.h>
+#include <rtems/freebsd/sys/types.h>
+#include <rtems/freebsd/sys/systm.h>
+#include <rtems/freebsd/sys/lock.h>
+#include <rtems/freebsd/sys/mutex.h>
+#include <rtems/freebsd/sys/proc.h>
+
+static void assert_mtx(struct lock_object *lock, int what);
+static void lock_mtx(struct lock_object *lock, int how);
+static void lock_spin(struct lock_object *lock, int how);
+#ifdef KDTRACE_HOOKS
+static int owner_mtx(struct lock_object *lock, struct thread **owner);
+#endif
+static int unlock_mtx(struct lock_object *lock);
+static int unlock_spin(struct lock_object *lock);
+
+RTEMS_CHAIN_DEFINE_EMPTY(rtems_bsd_mtx_chain);
+
+/*
+ * Lock classes for sleep and spin mutexes.
+ */
+struct lock_class lock_class_mtx_sleep = {
+ .lc_name = "sleep mutex",
+ .lc_flags = LC_SLEEPLOCK | LC_RECURSABLE,
+ .lc_assert = assert_mtx,
+#ifdef DDB
+ .lc_ddb_show = db_show_mtx,
+#endif
+ .lc_lock = lock_mtx,
+ .lc_unlock = unlock_mtx,
+#ifdef KDTRACE_HOOKS
+ .lc_owner = owner_mtx,
+#endif
+};
+
+struct lock_class lock_class_mtx_spin = {
+ .lc_name = "spin mutex",
+ .lc_flags = LC_SPINLOCK | LC_RECURSABLE,
+ .lc_assert = assert_mtx,
+#ifdef DDB
+ .lc_ddb_show = db_show_mtx,
+#endif
+ .lc_lock = lock_spin,
+ .lc_unlock = unlock_spin,
+#ifdef KDTRACE_HOOKS
+ .lc_owner = owner_mtx,
+#endif
+};
+
+struct mtx Giant;
+
+void
+assert_mtx(struct lock_object *lock, int what)
+{
+ mtx_assert((struct mtx *)lock, what);
+}
+
+void
+lock_mtx(struct lock_object *lock, int how)
+{
+
+ mtx_lock((struct mtx *)lock);
+}
+
+void
+lock_spin(struct lock_object *lock, int how)
+{
+
+ panic("spin locks can only use msleep_spin");
+}
+
+int
+unlock_mtx(struct lock_object *lock)
+{
+ struct mtx *m;
+
+ m = (struct mtx *)lock;
+ mtx_assert(m, MA_OWNED | MA_NOTRECURSED);
+ mtx_unlock(m);
+ return (0);
+}
+
+int
+unlock_spin(struct lock_object *lock)
+{
+
+ panic("spin locks can only use msleep_spin");
+}
+
+#ifdef KDTRACE_HOOKS
+int
+owner_mtx(struct lock_object *lock, struct thread **owner)
+{
+ struct mtx *m = (struct mtx *)lock;
+
+ *owner = mtx_owner(m);
+ return (mtx_unowned(m) == 0);
+}
+#endif
+
+void
+mtx_init(struct mtx *m, const char *name, const char *type, int opts)
+{
+ struct lock_class *class;
+ int i;
+ rtems_status_code sc = RTEMS_SUCCESSFUL;
+ rtems_id id = RTEMS_ID_NONE;
+ /* rtems_attribute attr = RTEMS_LOCAL | RTEMS_PRIORITY | RTEMS_BINARY_SEMAPHORE | RTEMS_PRIORITY_CEILING; */
+ rtems_attribute attr = RTEMS_LOCAL | RTEMS_PRIORITY | RTEMS_BINARY_SEMAPHORE;
+
+ if ((opts & MTX_RECURSE) != 0 )
+ {
+ /*FIXME*/
+ }
+
+ /* Determine lock class and lock flags. */
+ if (opts & MTX_SPIN)
+ class = &lock_class_mtx_spin;
+ else
+ class = &lock_class_mtx_sleep;
+
+ /* Check for double-init and zero object. */
+ KASSERT(!lock_initalized(&m->lock_object), ("lock \"%s\" %p already initialized", name, m->lock_object));
+
+ /* Look up lock class to find its index. */
+ for (i = 0; i < LOCK_CLASS_MAX; i++)
+ {
+ if (lock_classes[i] == class)
+ {
+ m->lock_object.lo_flags = i << LO_CLASSSHIFT;
+ break;
+ }
+ }
+ KASSERT(i < LOCK_CLASS_MAX, ("unknown lock class %p", class));
+
+ sc = rtems_semaphore_create(
+ rtems_build_name('_', 'M', 'T', 'X'),
+ 1,
+ attr,
+ BSD_TASK_PRIORITY_RESOURCE_OWNER,
+ &id
+ );
+ BSD_ASSERT_SC(sc);
+
+ m->lock_object.lo_name = name;
+ m->lock_object.lo_flags |= LO_INITIALIZED;
+ m->lock_object.lo_id = id;
+
+ rtems_chain_append(&rtems_bsd_mtx_chain, &m->lock_object.lo_node);
+}
+
+void
+_mtx_lock_flags(struct mtx *m, int opts, const char *file, int line)
+{
+ rtems_status_code sc = RTEMS_SUCCESSFUL;
+
+ sc = rtems_semaphore_obtain(m->lock_object.lo_id, RTEMS_WAIT, RTEMS_NO_TIMEOUT);
+ BSD_ASSERT_SC(sc);
+}
+
+int
+_mtx_trylock(struct mtx *m, int opts, const char *file, int line)
+{
+ rtems_status_code sc = RTEMS_SUCCESSFUL;
+
+ sc = rtems_semaphore_obtain(m->lock_object.lo_id, RTEMS_NO_WAIT, 0);
+ if (sc == RTEMS_SUCCESSFUL) {
+ return 1;
+ } else if (sc == RTEMS_UNSATISFIED) {
+ return 0;
+ } else {
+ BSD_ASSERT_SC(sc);
+
+ return 0;
+ }
+}
+
+void
+_mtx_unlock_flags(struct mtx *m, int opts, const char *file, int line)
+{
+ rtems_status_code sc = RTEMS_SUCCESSFUL;
+
+ sc = rtems_semaphore_release(m->lock_object.lo_id);
+ BSD_ASSERT_SC(sc);
+}
+
+/*
+ * The backing function for the INVARIANTS-enabled mtx_assert()
+ */
+#ifdef INVARIANT_SUPPORT
+void
+_mtx_assert(struct mtx *m, int what, const char *file, int line)
+{
+
+ if (panicstr != NULL || dumping)
+ return;
+ switch (what) {
+ case MA_OWNED:
+ case MA_OWNED | MA_RECURSED:
+ case MA_OWNED | MA_NOTRECURSED:
+ if (!mtx_owned(m))
+ panic("mutex %s not owned at %s:%d",
+ m->lock_object.lo_name, file, line);
+ if (mtx_recursed(m)) {
+ if ((what & MA_NOTRECURSED) != 0)
+ panic("mutex %s recursed at %s:%d",
+ m->lock_object.lo_name, file, line);
+ } else if ((what & MA_RECURSED) != 0) {
+ panic("mutex %s unrecursed at %s:%d",
+ m->lock_object.lo_name, file, line);
+ }
+ break;
+ case MA_NOTOWNED:
+ if (mtx_owned(m))
+ panic("mutex %s owned at %s:%d",
+ m->lock_object.lo_name, file, line);
+ break;
+ default:
+ panic("unknown mtx_assert at %s:%d", file, line);
+ }
+}
+#endif
+
+int mtx_owned(struct mtx *m)
+{
+ Objects_Locations location;
+ Semaphore_Control *sema = _Semaphore_Get(m->lock_object.lo_id, &location);
+
+ if (location == OBJECTS_LOCAL && !_Attributes_Is_counting_semaphore(sema->attribute_set)) {
+ int owned = sema->Core_control.mutex.holder_id == rtems_task_self();
+
+ _Thread_Enable_dispatch();
+
+ return owned;
+ } else {
+ _Thread_Enable_dispatch();
+
+ BSD_PANIC("unexpected semaphore location or attributes");
+ }
+}
+
+int mtx_recursed(struct mtx *m)
+{
+ Objects_Locations location;
+ Semaphore_Control *sema = _Semaphore_Get(m->lock_object.lo_id, &location);
+
+ if (location == OBJECTS_LOCAL && !_Attributes_Is_counting_semaphore(sema->attribute_set)) {
+ int recursed = sema->Core_control.mutex.nest_count != 0;
+
+ _Thread_Enable_dispatch();
+
+ return recursed;
+ } else {
+ _Thread_Enable_dispatch();
+
+ BSD_PANIC("unexpected semaphore location or attributes");
+ }
+}
+
+void
+mtx_sysinit(void *arg)
+{
+ struct mtx_args *margs = arg;
+
+ mtx_init(margs->ma_mtx, margs->ma_desc, NULL, margs->ma_opts);
+}
+
+void
+mtx_destroy(struct mtx *m)
+{
+ rtems_status_code sc = RTEMS_SUCCESSFUL;
+
+ sc = rtems_semaphore_delete(m->lock_object.lo_id);
+ BSD_ASSERT_SC(sc);
+
+ rtems_chain_extract(&m->lock_object.lo_node);
+
+ m->lock_object.lo_id = 0;
+ m->lock_object.lo_flags &= ~LO_INITIALIZED;
+}
+
+void
+mutex_init(void)
+{
+ mtx_init(&Giant, "Giant", NULL, MTX_DEF | MTX_RECURSE);
+ mtx_init(&proc0.p_mtx, "process lock", NULL, MTX_DEF | MTX_DUPOK);
+}
diff --git a/rtems/freebsd/rtems/rtems-bsd-nexus.c b/rtems/freebsd/rtems/rtems-bsd-nexus.c
new file mode 100644
index 00000000..480307fc
--- /dev/null
+++ b/rtems/freebsd/rtems/rtems-bsd-nexus.c
@@ -0,0 +1,71 @@
+/**
+ * @file
+ *
+ * @ingroup rtems_bsd_rtems
+ *
+ * @brief TODO.
+ */
+
+/*
+ * Copyright (c) 2009, 2010 embedded brains GmbH. All rights reserved.
+ *
+ * embedded brains GmbH
+ * Obere Lagerstr. 30
+ * 82178 Puchheim
+ * Germany
+ * <rtems@embedded-brains.de>
+ *
+ * The license and distribution terms for this file may be
+ * found in the file LICENSE in this distribution or at
+ * http://www.rtems.com/license/LICENSE.
+ */
+
+#include <rtems/freebsd/machine/rtems-bsd-config.h>
+#include <rtems/freebsd/machine/rtems-bsd-sysinit.h>
+
+#include <rtems/freebsd/sys/param.h>
+#include <rtems/freebsd/sys/types.h>
+#include <rtems/freebsd/sys/systm.h>
+#include <rtems/freebsd/sys/bus.h>
+#include <rtems/freebsd/sys/kernel.h>
+#include <rtems/freebsd/sys/module.h>
+
+static int
+nexus_probe(device_t dev)
+{
+ size_t unit = 0;
+
+ /* FIXME */
+ for (unit = 0; _bsd_nexus_devices [unit] != NULL; ++unit) {
+ device_add_child(dev, _bsd_nexus_devices [unit], unit);
+ }
+
+ device_set_desc(dev, "RTEMS Nexus device");
+
+ return (0);
+}
+
+static device_method_t nexus_methods [] = {
+ /* Device interface */
+ DEVMETHOD(device_probe, nexus_probe),
+ DEVMETHOD(device_attach, bus_generic_attach),
+ DEVMETHOD(device_detach, bus_generic_detach),
+ DEVMETHOD(device_shutdown, bus_generic_shutdown),
+ DEVMETHOD(device_suspend, bus_generic_suspend),
+ DEVMETHOD(device_resume, bus_generic_resume),
+
+ /* Bus interface */
+ DEVMETHOD(bus_print_child, bus_generic_print_child),
+
+ { 0, 0 }
+};
+
+static driver_t nexus_driver = {
+ .name = "nexus",
+ .methods = nexus_methods,
+ .size = 0
+};
+
+static devclass_t nexus_devclass;
+
+DRIVER_MODULE(nexus, root, nexus_driver, nexus_devclass, 0, 0);
diff --git a/rtems/freebsd/rtems/rtems-bsd-panic.c b/rtems/freebsd/rtems/rtems-bsd-panic.c
new file mode 100644
index 00000000..2425abed
--- /dev/null
+++ b/rtems/freebsd/rtems/rtems-bsd-panic.c
@@ -0,0 +1,70 @@
+/**
+ * @file
+ *
+ * @ingroup rtems_bsd_rtems
+ *
+ * @brief TODO.
+ */
+
+/*
+ * Copyright (c) 2009, 2010 embedded brains GmbH. All rights reserved.
+ *
+ * embedded brains GmbH
+ * Obere Lagerstr. 30
+ * 82178 Puchheim
+ * Germany
+ * <rtems@embedded-brains.de>
+ *
+ * The license and distribution terms for this file may be
+ * found in the file LICENSE in this distribution or at
+ * http://www.rtems.com/license/LICENSE.
+ */
+
+#include <rtems/freebsd/machine/rtems-bsd-config.h>
+
+#include <rtems/freebsd/sys/param.h>
+#include <rtems/freebsd/sys/types.h>
+#include <rtems/freebsd/sys/systm.h>
+#include <rtems/freebsd/sys/kernel.h>
+#include <rtems/freebsd/sys/lock.h>
+#include <rtems/freebsd/sys/mutex.h>
+#include <rtems/freebsd/sys/proc.h>
+
+static void
+suspend_all_threads(void)
+{
+ rtems_chain_control *chain = &rtems_bsd_thread_chain;
+ rtems_chain_node *node = rtems_chain_first(chain);
+ rtems_id self = rtems_task_self();
+
+ while (!rtems_chain_is_tail(chain, node)) {
+ struct thread *td = (struct thread *) node;
+
+ if (td->td_id != self && td->td_id != RTEMS_SELF) {
+ rtems_task_suspend(td->td_id);
+ }
+
+ node = rtems_chain_next(node);
+ }
+
+ rtems_task_suspend(RTEMS_SELF);
+}
+
+void
+panic(const char *fmt, ...)
+{
+ va_list ap;
+
+ printf("*** BSD PANIC *** ");
+
+ va_start(ap, fmt);
+ vprintf(fmt, ap);
+ va_end(ap);
+
+ printf("\n");
+
+ suspend_all_threads();
+
+ /* FIXME */
+ rtems_fatal_error_occurred(0xdeadbeef);
+}
diff --git a/rtems/freebsd/rtems/rtems-bsd-prot.c b/rtems/freebsd/rtems/rtems-bsd-prot.c
new file mode 100644
index 00000000..3199c4ba
--- /dev/null
+++ b/rtems/freebsd/rtems/rtems-bsd-prot.c
@@ -0,0 +1,142 @@
+/**
+ * @file
+ *
+ * @ingroup rtems_bsd_rtems
+ *
+ * @brief TODO.
+ */
+
+/*
+ * Copyright (c) 2009, 2010 embedded brains GmbH. All rights reserved.
+ *
+ * embedded brains GmbH
+ * Obere Lagerstr. 30
+ * 82178 Puchheim
+ * Germany
+ * <rtems@embedded-brains.de>
+ *
+ * The license and distribution terms for this file may be
+ * found in the file LICENSE in this distribution or at
+ * http://www.rtems.com/license/LICENSE.
+ */
+
+#include <rtems/freebsd/machine/rtems-bsd-config.h>
+
+#include <rtems/freebsd/sys/types.h>
+#include <rtems/freebsd/sys/systm.h>
+#include <rtems/freebsd/sys/malloc.h>
+#include <rtems/freebsd/sys/ucred.h>
+
+static MALLOC_DEFINE(M_CRED, "cred", "credentials");
+
+/*
+ * Allocate a zeroed cred structure.
+ */
+struct ucred *
+crget(void)
+{
+ register struct ucred *cr;
+
+ cr = malloc(sizeof(*cr), M_CRED, M_WAITOK | M_ZERO);
+ refcount_init(&cr->cr_ref, 1);
+#ifdef AUDIT
+ audit_cred_init(cr);
+#endif
+#ifdef MAC
+ mac_cred_init(cr);
+#endif
+ crextend(cr, XU_NGROUPS);
+ return (cr);
+}
+
+/*
+ * Claim another reference to a ucred structure.
+ */
+struct ucred *
+crhold(struct ucred *cr)
+{
+
+ refcount_acquire(&cr->cr_ref);
+ return (cr);
+}
+
+/*
+ * Free a cred structure. Throws away space when ref count gets to 0.
+ */
+void
+crfree(struct ucred *cr)
+{
+
+ KASSERT(cr->cr_ref > 0, ("bad ucred refcount: %d", cr->cr_ref));
+ KASSERT(cr->cr_ref != 0xdeadc0de, ("dangling reference to ucred"));
+ if (refcount_release(&cr->cr_ref)) {
+ /*
+ * Some callers of crget(), such as nfs_statfs(),
+ * allocate a temporary credential, but don't
+ * allocate a uidinfo structure.
+ */
+ if (cr->cr_uidinfo != NULL)
+ uifree(cr->cr_uidinfo);
+ if (cr->cr_ruidinfo != NULL)
+ uifree(cr->cr_ruidinfo);
+ /*
+ * Free a prison, if any.
+ */
+ if (cr->cr_prison != NULL)
+ prison_free(cr->cr_prison);
+#ifdef AUDIT
+ audit_cred_destroy(cr);
+#endif
+#ifdef MAC
+ mac_cred_destroy(cr);
+#endif
+ free(cr->cr_groups, M_CRED);
+ free(cr, M_CRED);
+ }
+}
+
+/*
+ * Check to see if this ucred is shared.
+ */
+int
+crshared(struct ucred *cr)
+{
+
+ return (cr->cr_ref > 1);
+}
+
+/*
+ * Copy a ucred's contents from a template. Does not block.
+ */
+void
+crcopy(struct ucred *dest, struct ucred *src)
+{
+
+ KASSERT(crshared(dest) == 0, ("crcopy of shared ucred"));
+ bcopy(&src->cr_startcopy, &dest->cr_startcopy,
+ (unsigned)((caddr_t)&src->cr_endcopy -
+ (caddr_t)&src->cr_startcopy));
+ crsetgroups(dest, src->cr_ngroups, src->cr_groups);
+ uihold(dest->cr_uidinfo);
+ uihold(dest->cr_ruidinfo);
+ prison_hold(dest->cr_prison);
+#ifdef AUDIT
+ audit_cred_copy(src, dest);
+#endif
+#ifdef MAC
+ mac_cred_copy(src, dest);
+#endif
+}
+
+/*
+ * Dup cred struct to a new held one.
+ */
+struct ucred *
+crdup(struct ucred *cr)
+{
+ struct ucred *newcr;
+
+ newcr = crget();
+ crcopy(newcr, cr);
+ return (newcr);
+}
diff --git a/rtems/freebsd/rtems/rtems-bsd-resource.c b/rtems/freebsd/rtems/rtems-bsd-resource.c
new file mode 100644
index 00000000..3b85d4b7
--- /dev/null
+++ b/rtems/freebsd/rtems/rtems-bsd-resource.c
@@ -0,0 +1,173 @@
+/**
+ * @file
+ *
+ * @ingroup rtems_bsd_rtems
+ *
+ * @brief TODO.
+ */
+
+/*
+ * Copyright (c) 2009, 2010 embedded brains GmbH. All rights reserved.
+ *
+ * embedded brains GmbH
+ * Obere Lagerstr. 30
+ * 82178 Puchheim
+ * Germany
+ * <rtems@embedded-brains.de>
+ *
+ * The license and distribution terms for this file may be
+ * found in the file LICENSE in this distribution or at
+ * http://www.rtems.com/license/LICENSE.
+ */
+
+#include <rtems/freebsd/machine/rtems-bsd-config.h>
+
+#include <rtems/freebsd/sys/types.h>
+#include <rtems/freebsd/sys/systm.h>
+#include <rtems/freebsd/sys/lock.h>
+#include <rtems/freebsd/sys/malloc.h>
+#include <rtems/freebsd/sys/mutex.h>
+#include <rtems/freebsd/sys/proc.h>
+#include <rtems/freebsd/sys/resourcevar.h>
+#include <rtems/freebsd/sys/rwlock.h>
+
+static MALLOC_DEFINE(M_UIDINFO, "uidinfo", "uidinfo structures");
+
+#define UIHASH(uid) (&uihashtbl[(uid) & uihash])
+static struct rwlock uihashtbl_lock;
+static LIST_HEAD(uihashhead, uidinfo) *uihashtbl;
+static u_long uihash; /* size of hash table - 1 */
+
+/*
+ * Find the uidinfo structure for a uid. This structure is used to
+ * track the total resource consumption (process count, socket buffer
+ * size, etc.) for the uid and impose limits.
+ */
+void
+uihashinit()
+{
+
+ uihashtbl = hashinit(maxproc / 16, M_UIDINFO, &uihash);
+ rw_init(&uihashtbl_lock, "uidinfo hash");
+}
+
+/*
+ * Look up a uidinfo struct for the parameter uid.
+ * uihashtbl_lock must be locked.
+ */
+static struct uidinfo *
+uilookup(uid)
+ uid_t uid;
+{
+ struct uihashhead *uipp;
+ struct uidinfo *uip;
+
+ rw_assert(&uihashtbl_lock, RA_LOCKED);
+ uipp = UIHASH(uid);
+ LIST_FOREACH(uip, uipp, ui_hash)
+ if (uip->ui_uid == uid)
+ break;
+
+ return (uip);
+}
+
+/*
+ * Find or allocate a struct uidinfo for a particular uid.
+ * Increase refcount on uidinfo struct returned.
+ * uifree() should be called on a struct uidinfo when released.
+ */
+struct uidinfo *
+uifind(uid)
+ uid_t uid;
+{
+ struct uidinfo *old_uip, *uip;
+
+ rw_rlock(&uihashtbl_lock);
+ uip = uilookup(uid);
+ if (uip == NULL) {
+ rw_runlock(&uihashtbl_lock);
+ uip = malloc(sizeof(*uip), M_UIDINFO, M_WAITOK | M_ZERO);
+ rw_wlock(&uihashtbl_lock);
+ /*
+ * There's a chance someone created our uidinfo while we
+ * were in malloc and not holding the lock, so we have to
+ * make sure we don't insert a duplicate uidinfo.
+ */
+ if ((old_uip = uilookup(uid)) != NULL) {
+ /* Someone else beat us to it. */
+ free(uip, M_UIDINFO);
+ uip = old_uip;
+ } else {
+ refcount_init(&uip->ui_ref, 0);
+ uip->ui_uid = uid;
+ mtx_init(&uip->ui_vmsize_mtx, "ui_vmsize", NULL,
+ MTX_DEF);
+ LIST_INSERT_HEAD(UIHASH(uid), uip, ui_hash);
+ }
+ }
+ uihold(uip);
+ rw_unlock(&uihashtbl_lock);
+ return (uip);
+}
+
+/*
+ * Place another refcount on a uidinfo struct.
+ */
+void
+uihold(uip)
+ struct uidinfo *uip;
+{
+
+ refcount_acquire(&uip->ui_ref);
+}
+
+/*-
+ * Since uidinfo structs have a long lifetime, we use an
+ * opportunistic refcounting scheme to avoid locking the lookup hash
+ * for each release.
+ *
+ * If the refcount hits 0, we need to free the structure,
+ * which means we need to lock the hash.
+ * Optimal case:
+ * After locking the struct and lowering the refcount, if we find
+ * that we don't need to free, simply unlock and return.
+ * Suboptimal case:
+ * If refcount lowering results in need to free, bump the count
+ * back up, lose the lock and acquire the locks in the proper
+ * order to try again.
+ */
+void
+uifree(uip)
+ struct uidinfo *uip;
+{
+ int old;
+
+ /* Prepare for optimal case. */
+ old = uip->ui_ref;
+ if (old > 1 && atomic_cmpset_int(&uip->ui_ref, old, old - 1))
+ return;
+
+ /* Prepare for suboptimal case. */
+ rw_wlock(&uihashtbl_lock);
+ if (refcount_release(&uip->ui_ref)) {
+ LIST_REMOVE(uip, ui_hash);
+ rw_wunlock(&uihashtbl_lock);
+ if (uip->ui_sbsize != 0)
+ printf("freeing uidinfo: uid = %d, sbsize = %ld\n",
+ uip->ui_uid, uip->ui_sbsize);
+ if (uip->ui_proccnt != 0)
+ printf("freeing uidinfo: uid = %d, proccnt = %ld\n",
+ uip->ui_uid, uip->ui_proccnt);
+ if (uip->ui_vmsize != 0)
+ printf("freeing uidinfo: uid = %d, swapuse = %lld\n",
+ uip->ui_uid, (unsigned long long)uip->ui_vmsize);
+ mtx_destroy(&uip->ui_vmsize_mtx);
+ free(uip, M_UIDINFO);
+ return;
+ }
+ /*
+ * Someone added a reference between atomic_cmpset_int() and
+ * rw_wlock(&uihashtbl_lock).
+ */
+ rw_wunlock(&uihashtbl_lock);
+}
diff --git a/rtems/freebsd/rtems/rtems-bsd-rwlock.c b/rtems/freebsd/rtems/rtems-bsd-rwlock.c
new file mode 100644
index 00000000..de9decd2
--- /dev/null
+++ b/rtems/freebsd/rtems/rtems-bsd-rwlock.c
@@ -0,0 +1,340 @@
+/**
+ * @file
+ *
+ * @ingroup rtems_bsd_rtems
+ *
+ * @brief TODO.
+ */
+
+/*
+ * Copyright (c) 2011 OPTI Medical. All rights reserved.
+ *
+ * OPTI Medical
+ * 235 Hembree Park Drive
+ * Roswell, GA 30076
+ * USA
+ * <kevin.kirspel@optimedical.com>
+ *
+ * The license and distribution terms for this file may be
+ * found in the file LICENSE in this distribution or at
+ * http://www.rtems.com/license/LICENSE.
+ */
+
+/* Necessary to obtain some internal functions */
+#define __RTEMS_VIOLATE_KERNEL_VISIBILITY__
+
+#include <rtems/freebsd/machine/rtems-bsd-config.h>
+
+#include <sys/types.h>
+#include <rtems/freebsd/sys/param.h>
+#include <rtems/freebsd/sys/types.h>
+#include <rtems/freebsd/sys/systm.h>
+#include <rtems/freebsd/sys/lock.h>
+#include <rtems/freebsd/sys/rwlock.h>
+#include <pthread.h>
+
+#ifndef INVARIANTS
+#define _rw_assert(rw, what, file, line)
+#endif
+
+static void assert_rw(struct lock_object *lock, int what);
+static void lock_rw(struct lock_object *lock, int how);
+#ifdef KDTRACE_HOOKS
+static int owner_rw(struct lock_object *lock, struct thread **owner);
+#endif
+static int unlock_rw(struct lock_object *lock);
+
+typedef uint32_t pthread_rwlock_t;
+
+struct lock_class lock_class_rw = {
+ .lc_name = "rw",
+ .lc_flags = LC_SLEEPLOCK | LC_RECURSABLE | LC_UPGRADABLE,
+ .lc_assert = assert_rw,
+#ifdef DDB
+ .lc_ddb_show = db_show_rwlock,
+#endif
+ .lc_lock = lock_rw,
+ .lc_unlock = unlock_rw,
+#ifdef KDTRACE_HOOKS
+ .lc_owner = owner_rw,
+#endif
+};
+
+RTEMS_CHAIN_DEFINE_EMPTY(rtems_bsd_rwlock_chain);
+
+void
+assert_rw(struct lock_object *lock, int what)
+{
+ rw_assert((struct rwlock *)lock, what);
+}
+
+void
+lock_rw(struct lock_object *lock, int how)
+{
+ struct rwlock *rw;
+
+ rw = (struct rwlock *)lock;
+ if (how)
+ rw_wlock(rw);
+ else
+ rw_rlock(rw);
+}
+
+int
+unlock_rw(struct lock_object *lock)
+{
+ struct rwlock *rw;
+
+ rw = (struct rwlock *)lock;
+ rw_assert(rw, RA_LOCKED | LA_NOTRECURSED);
+ if (rw->rw_lock & RW_LOCK_READ) {
+ rw_runlock(rw);
+ return (0);
+ } else {
+ rw_wunlock(rw);
+ return (1);
+ }
+}
+
+#ifdef KDTRACE_HOOKS
+int
+owner_rw(struct lock_object *lock, struct thread **owner)
+{
+ struct rwlock *rw = (struct rwlock *)lock;
+ uintptr_t x = rw->rw_lock;
+
+ *owner = rw_wowner(rw);
+ return ((x & RW_LOCK_READ) != 0 ? (RW_READERS(x) != 0) :
+ (*owner != NULL));
+}
+#endif
+
+void
+rw_init_flags(struct rwlock *rw, const char *name, int opts)
+{
+ struct lock_class *class;
+ int i;
+ pthread_rwlock_t lock;
+ int iret;
+
+ if ((opts & RW_RECURSE) != 0) {
+ /* FIXME */
+ }
+
+ class = &lock_class_rw;
+
+ /* Check for double-init and zero object. */
+ KASSERT(!lock_initalized(&rw->lock_object), ("lock \"%s\" %p already initialized", name, rw->lock_object));
+
+ /* Look up lock class to find its index. */
+ for (i = 0; i < LOCK_CLASS_MAX; i++)
+ {
+ if (lock_classes[i] == class)
+ {
+ rw->lock_object.lo_flags = i << LO_CLASSSHIFT;
+ break;
+ }
+ }
+ KASSERT(i < LOCK_CLASS_MAX, ("unknown lock class %p", class));
+
+ iret = pthread_rwlock_init( &lock, NULL );
+ BSD_ASSERT( iret == 0 );
+
+ rw->lock_object.lo_name = name;
+ rw->lock_object.lo_flags |= LO_INITIALIZED;
+ rw->lock_object.lo_id = lock;
+
+ rtems_chain_append(&rtems_bsd_rwlock_chain, &rw->lock_object.lo_node);
+}
+
+void
+rw_destroy(struct rwlock *rw)
+{
+ int iret;
+ pthread_rwlock_destroy( rw->lock_object.lo_id );
+ BSD_ASSERT( iret == 0 );
+ rtems_chain_extract( &rw->lock_object.lo_node );
+ rw->lock_object.lo_id = 0;
+ rw->lock_object.lo_flags &= ~LO_INITIALIZED;
+}
+
+void
+rw_sysinit(void *arg)
+{
+ struct rw_args *args = arg;
+
+ rw_init(args->ra_rw, args->ra_desc);
+}
+
+void
+rw_sysinit_flags(void *arg)
+{
+ struct rw_args_flags *args = arg;
+
+ rw_init_flags(args->ra_rw, args->ra_desc, args->ra_flags);
+}
+
+int
+rw_wowned(struct rwlock *rw)
+{
+ Objects_Locations location;
+ Semaphore_Control *sema = _Semaphore_Get(rw->lock_object.lo_id, &location);
+
+ if (location == OBJECTS_LOCAL && !_Attributes_Is_counting_semaphore(sema->attribute_set)) {
+ int owned = sema->Core_control.mutex.holder_id == rtems_task_self();
+
+ _Thread_Enable_dispatch();
+
+ return owned;
+ } else {
+ _Thread_Enable_dispatch();
+
+ BSD_PANIC("unexpected semaphore location or attributes");
+ }
+}
+
+void
+_rw_wlock(struct rwlock *rw, const char *file, int line)
+{
+ int iret;
+
+ pthread_rwlock_wrlock( &rw->lock_object.lo_id );
+ BSD_ASSERT( iret == 0 );
+
+ return 0;
+}
+
+int
+_rw_try_wlock(struct rwlock *rw, const char *file, int line)
+{
+ int iret;
+
+ iret = pthread_rwlock_trywrlock( &rw->lock_object.lo_id );
+ if (iret == 0) {
+ return 1;
+ } else {
+ return 0;
+ }
+}
+
+void
+_rw_wunlock(struct rwlock *rw, const char *file, int line)
+{
+ int iret;
+
+ iret = pthread_rwlock_unlock( &rw->lock_object.lo_id );
+ BSD_ASSERT( iret == 0 );
+}
+
+void
+_rw_rlock(struct rwlock *rw, const char *file, int line)
+{
+ int iret;
+
+ iret = pthread_rwlock_rdlock( &rw->lock_object.lo_id );
+ BSD_ASSERT( iret == 0 );
+}
+
+int
+_rw_try_rlock(struct rwlock *rw, const char *file, int line)
+{
+ int iret;
+
+ iret = pthread_rwlock_tryrdlock( &rw->lock_object.lo_id );
+ if (iret == 0) {
+ return 1;
+ } else {
+ return 0;
+ }
+}
+
+void
+_rw_runlock(struct rwlock *rw, const char *file, int line)
+{
+ int iret;
+
+ iret = pthread_rwlock_unlock( &rw->lock_object.lo_id );
+ BSD_ASSERT( iret == 0 );
+}
+
+#ifdef INVARIANT_SUPPORT
+#ifndef INVARIANTS
+#undef _rw_assert
+#endif
+
+/*
+ * In the non-WITNESS case, rw_assert() can only detect that at least
+ * *some* thread owns an rlock, but it cannot guarantee that *this*
+ * thread owns an rlock.
+ */
+void
+_rw_assert(struct rwlock *rw, int what, const char *file, int line)
+{
+
+ if (panicstr != NULL)
+ return;
+ switch (what) {
+ case RA_LOCKED:
+ case RA_LOCKED | RA_RECURSED:
+ case RA_LOCKED | RA_NOTRECURSED:
+ case RA_RLOCKED:
+#ifdef WITNESS
+ witness_assert(&rw->lock_object, what, file, line);
+#else
+ /*
+ * If some other thread has a write lock or we have one
+ * and are asserting a read lock, fail. Also, if no one
+ * has a lock at all, fail.
+ */
+ if (rw->rw_lock == RW_UNLOCKED ||
+ (!(rw->rw_lock & RW_LOCK_READ) && (what == RA_RLOCKED ||
+ rw_wowner(rw) != curthread)))
+ panic("Lock %s not %slocked @ %s:%d\n",
+ rw->lock_object.lo_name, (what == RA_RLOCKED) ?
+ "read " : "", file, line);
+
+ if (!(rw->rw_lock & RW_LOCK_READ)) {
+ if (rw_recursed(rw)) {
+ if (what & RA_NOTRECURSED)
+ panic("Lock %s recursed @ %s:%d\n",
+ rw->lock_object.lo_name, file,
+ line);
+ } else if (what & RA_RECURSED)
+ panic("Lock %s not recursed @ %s:%d\n",
+ rw->lock_object.lo_name, file, line);
+ }
+#endif
+ break;
+ case RA_WLOCKED:
+ case RA_WLOCKED | RA_RECURSED:
+ case RA_WLOCKED | RA_NOTRECURSED:
+ if (rw_wowner(rw) != curthread)
+ panic("Lock %s not exclusively locked @ %s:%d\n",
+ rw->lock_object.lo_name, file, line);
+ if (rw_recursed(rw)) {
+ if (what & RA_NOTRECURSED)
+ panic("Lock %s recursed @ %s:%d\n",
+ rw->lock_object.lo_name, file, line);
+ } else if (what & RA_RECURSED)
+ panic("Lock %s not recursed @ %s:%d\n",
+ rw->lock_object.lo_name, file, line);
+ break;
+ case RA_UNLOCKED:
+#ifdef WITNESS
+ witness_assert(&rw->lock_object, what, file, line);
+#else
+ /*
+ * If we hold a write lock fail. We can't reliably check
+ * to see if we hold a read lock or not.
+ */
+ if (rw_wowner(rw) == curthread)
+ panic("Lock %s exclusively locked @ %s:%d\n",
+ rw->lock_object.lo_name, file, line);
+#endif
+ break;
+ default:
+ panic("Unknown rw lock assertion: %d @ %s:%d", what, file,
+ line);
+ }
+}
+#endif /* INVARIANT_SUPPORT */
diff --git a/rtems/freebsd/rtems/rtems-bsd-shell.c b/rtems/freebsd/rtems/rtems-bsd-shell.c
new file mode 100644
index 00000000..ec704c81
--- /dev/null
+++ b/rtems/freebsd/rtems/rtems-bsd-shell.c
@@ -0,0 +1,181 @@
+/**
+ * @file
+ *
+ * @ingroup rtems_bsd_rtems
+ *
+ * @brief TODO.
+ */
+
+/*
+ * Copyright (c) 2009, 2010 embedded brains GmbH. All rights reserved.
+ *
+ * embedded brains GmbH
+ * Obere Lagerstr. 30
+ * 82178 Puchheim
+ * Germany
+ * <rtems@embedded-brains.de>
+ *
+ * The license and distribution terms for this file may be
+ * found in the file LICENSE in this distribution or at
+ * http://www.rtems.com/license/LICENSE.
+ */
+
+#include <rtems/freebsd/machine/rtems-bsd-config.h>
+
+#include <rtems/freebsd/sys/param.h>
+#include <rtems/freebsd/sys/types.h>
+#include <rtems/freebsd/sys/systm.h>
+#include <rtems/freebsd/sys/lock.h>
+#include <rtems/freebsd/sys/mutex.h>
+#include <rtems/freebsd/sys/callout.h>
+#include <rtems/freebsd/sys/condvar.h>
+#include <rtems/freebsd/sys/proc.h>
+
+#include <rtems/freebsd/bsd.h>
+#include <rtems/shell.h>
+
+static void
+rtems_bsd_dump_callout(void)
+{
+ rtems_chain_control *chain = &rtems_bsd_callout_chain;
+ rtems_chain_node *node = rtems_chain_first(chain);
+
+ printf("callout dump:\n");
+
+ while (!rtems_chain_is_tail(chain, node)) {
+ struct callout *c = (struct callout *) node;
+
+ printf("\t%08x\n", c->c_id);
+
+ node = rtems_chain_next(node);
+ }
+}
+
+static void
+rtems_bsd_dump_mtx(void)
+{
+ rtems_chain_control *chain = &rtems_bsd_mtx_chain;
+ rtems_chain_node *node = rtems_chain_first(chain);
+
+ printf("mtx dump:\n");
+
+ while (!rtems_chain_is_tail(chain, node)) {
+ struct lock_object *lo = (struct lock_object *) node;
+
+ printf("\t%s: 0x%08x\n", lo->lo_name, lo->lo_id);
+
+ node = rtems_chain_next(node);
+ }
+}
+
+static void
+rtems_bsd_dump_sx(void)
+{
+ rtems_chain_control *chain = &rtems_bsd_sx_chain;
+ rtems_chain_node *node = rtems_chain_first(chain);
+
+ printf("sx dump:\n");
+
+ while (!rtems_chain_is_tail(chain, node)) {
+ struct lock_object *lo = (struct lock_object *) node;
+
+ printf("\t%s: 0x%08x\n", lo->lo_name, lo->lo_id);
+
+ node = rtems_chain_next(node);
+ }
+}
+
+static void
+rtems_bsd_dump_condvar(void)
+{
+ rtems_chain_control *chain = &rtems_bsd_condvar_chain;
+ rtems_chain_node *node = rtems_chain_first(chain);
+
+ printf("condvar dump:\n");
+
+ while (!rtems_chain_is_tail(chain, node)) {
+ struct cv *cv = (struct cv *) node;
+
+ printf("\t%s: 0x%08x\n", cv->cv_description, cv->cv_id);
+
+ node = rtems_chain_next(node);
+ }
+}
+
+static void
+rtems_bsd_dump_thread(void)
+{
+ rtems_chain_control *chain = &rtems_bsd_thread_chain;
+ rtems_chain_node *node = rtems_chain_first(chain);
+
+ printf("thread dump:\n");
+
+ while (!rtems_chain_is_tail(chain, node)) {
+ struct thread *td = (struct thread *) node;
+
+ printf("\t%s: 0x%08x\n", td->td_name, td->td_id);
+
+ node = rtems_chain_next(node);
+ }
+}
+
+static const char rtems_bsd_usage [] =
+ "bsd {all|mtx|sx|condvar|thread|callout}";
+
+#define CMP(s) all || strcasecmp(argv [1], s) == 0
+
+static int
+rtems_bsd_info(int argc, char **argv)
+{
+ bool usage = true;
+
+ if (argc == 2) {
+ bool all = false;
+
+ if (CMP("all")) {
+ all = true;
+ }
+
+ if (CMP("mtx")) {
+ rtems_bsd_dump_mtx();
+ usage = false;
+ }
+ if (CMP("sx")) {
+ rtems_bsd_dump_sx();
+ usage = false;
+ }
+ if (CMP("condvar")) {
+ rtems_bsd_dump_condvar();
+ usage = false;
+ }
+ if (CMP("thread")) {
+ rtems_bsd_dump_thread();
+ usage = false;
+ }
+ if (CMP("callout")) {
+ rtems_bsd_dump_callout();
+ usage = false;
+ }
+ }
+
+ if (usage) {
+ puts(rtems_bsd_usage);
+ }
+
+ return 0;
+}
+
+static rtems_shell_cmd_t rtems_bsd_info_command = {
+ .name = "bsd",
+ .usage = rtems_bsd_usage,
+ .topic = "bsp",
+ .command = rtems_bsd_info,
+ .alias = NULL,
+ .next = NULL
+};
+
+void
+rtems_bsd_shell_initialize(void)
+{
+ rtems_shell_add_cmd_struct(&rtems_bsd_info_command);
+}
diff --git a/rtems/freebsd/rtems/rtems-bsd-signal.c b/rtems/freebsd/rtems/rtems-bsd-signal.c
new file mode 100644
index 00000000..02294f96
--- /dev/null
+++ b/rtems/freebsd/rtems/rtems-bsd-signal.c
@@ -0,0 +1,33 @@
+/**
+ * @file
+ *
+ * @ingroup rtems_bsd_rtems
+ *
+ * @brief TODO.
+ */
+
+/*
+ * Copyright (c) 2009, 2010 embedded brains GmbH. All rights reserved.
+ *
+ * embedded brains GmbH
+ * Obere Lagerstr. 30
+ * 82178 Puchheim
+ * Germany
+ * <rtems@embedded-brains.de>
+ *
+ * The license and distribution terms for this file may be
+ * found in the file LICENSE in this distribution or at
+ * http://www.rtems.com/license/LICENSE.
+ */
+
+#include <rtems/freebsd/machine/rtems-bsd-config.h>
+
+#include <rtems/freebsd/sys/types.h>
+#include <rtems/freebsd/sys/systm.h>
+#include <rtems/freebsd/sys/signalvar.h>
+
+void
+psignal(struct proc *p, int sig)
+{
+ BSD_PANIC("not implemented");
+}
diff --git a/rtems/freebsd/rtems/rtems-bsd-support.c b/rtems/freebsd/rtems/rtems-bsd-support.c
new file mode 100644
index 00000000..461078e9
--- /dev/null
+++ b/rtems/freebsd/rtems/rtems-bsd-support.c
@@ -0,0 +1,75 @@
+/**
+ * @file
+ *
+ * @ingroup rtems_bsd_rtems
+ *
+ * @brief TODO.
+ */
+
+/*
+ * Copyright (c) 2009, 2010 embedded brains GmbH. All rights reserved.
+ *
+ * embedded brains GmbH
+ * Obere Lagerstr. 30
+ * 82178 Puchheim
+ * Germany
+ * <rtems@embedded-brains.de>
+ *
+ * The license and distribution terms for this file may be
+ * found in the file LICENSE in this distribution or at
+ * http://www.rtems.com/license/LICENSE.
+ */
+
+#include <rtems/freebsd/machine/rtems-bsd-config.h>
+#include <rtems/score/states.h>
+#include <rtems/score/thread.h>
+#include <rtems/score/threadq.h>
+
+#include <rtems/freebsd/sys/types.h>
+#include <rtems/freebsd/sys/systm.h>
+#include <rtems/freebsd/sys/malloc.h>
+#include <rtems/freebsd/sys/uio.h>
+
+int
+copyout(const void *kaddr, void *udaddr, size_t len)
+{
+ bcopy(kaddr, udaddr, len);
+ return (0);
+}
+
+int
+copyin(const void *udaddr, void *kaddr, size_t len)
+{
+ bcopy(udaddr, kaddr, len);
+ return (0);
+}
+
+int
+copyiniov(struct iovec *iovp, u_int iovcnt, struct iovec **iov, int error)
+{
+ u_int iovlen;
+
+ *iov = NULL;
+ if (iovcnt > UIO_MAXIOV)
+ return (error);
+ iovlen = iovcnt * sizeof (struct iovec);
+ *iov = malloc(iovlen, M_IOV, M_WAITOK);
+ error = copyin(iovp, *iov, iovlen);
+ if (error) {
+ free(*iov, M_IOV);
+ *iov = NULL;
+ }
+ return (error);
+}
+
+void
+critical_enter(void)
+{
+ _Thread_Disable_dispatch();
+}
+
+void
+critical_exit(void)
+{
+ _Thread_Enable_dispatch();
+}
diff --git a/rtems/freebsd/rtems/rtems-bsd-sx.c b/rtems/freebsd/rtems/rtems-bsd-sx.c
new file mode 100644
index 00000000..93232be4
--- /dev/null
+++ b/rtems/freebsd/rtems/rtems-bsd-sx.c
@@ -0,0 +1,335 @@
+/**
+ * @file
+ *
+ * @ingroup rtems_bsd_rtems
+ *
+ * @brief TODO.
+ */
+
+/*
+ * Copyright (c) 2009, 2010 embedded brains GmbH. All rights reserved.
+ *
+ * embedded brains GmbH
+ * Obere Lagerstr. 30
+ * 82178 Puchheim
+ * Germany
+ * <rtems@embedded-brains.de>
+ *
+ * The license and distribution terms for this file may be
+ * found in the file LICENSE in this distribution or at
+ * http://www.rtems.com/license/LICENSE.
+ */
+
+/* Necessary to obtain some internal functions */
+#define __RTEMS_VIOLATE_KERNEL_VISIBILITY__
+
+#include <rtems/freebsd/machine/rtems-bsd-config.h>
+
+#include <rtems/freebsd/sys/param.h>
+#include <rtems/freebsd/sys/types.h>
+#include <rtems/freebsd/sys/systm.h>
+#include <rtems/freebsd/sys/lock.h>
+#include <rtems/freebsd/sys/sx.h>
+
+#ifndef INVARIANTS
+#define _sx_assert(sx, what, file, line)
+#endif
+
+static void assert_sx(struct lock_object *lock, int what);
+static void lock_sx(struct lock_object *lock, int how);
+#ifdef KDTRACE_HOOKS
+static int owner_sx(struct lock_object *lock, struct thread **owner);
+#endif
+static int unlock_sx(struct lock_object *lock);
+
+struct lock_class lock_class_sx = {
+ .lc_name = "sx",
+ .lc_flags = LC_SLEEPLOCK | LC_SLEEPABLE | LC_RECURSABLE | LC_UPGRADABLE,
+ .lc_assert = assert_sx,
+#ifdef DDB
+ .lc_ddb_show = db_show_sx,
+#endif
+ .lc_lock = lock_sx,
+ .lc_unlock = unlock_sx,
+#ifdef KDTRACE_HOOKS
+ .lc_owner = owner_sx,
+#endif
+};
+
+RTEMS_CHAIN_DEFINE_EMPTY(rtems_bsd_sx_chain);
+
+void
+assert_sx(struct lock_object *lock, int what)
+{
+ sx_assert((struct sx *)lock, what);
+}
+
+void
+lock_sx(struct lock_object *lock, int how)
+{
+ struct sx *sx;
+
+ sx = (struct sx *)lock;
+ if (how)
+ sx_xlock(sx);
+ else
+ sx_slock(sx);
+}
+
+int
+unlock_sx(struct lock_object *lock)
+{
+ struct sx *sx;
+
+ sx = (struct sx *)lock;
+ sx_assert(sx, SA_LOCKED | SA_NOTRECURSED);
+ if (sx_xlocked(sx)) {
+ sx_xunlock(sx);
+ return (1);
+ } else {
+ sx_sunlock(sx);
+ return (0);
+ }
+}
+
+#ifdef KDTRACE_HOOKS
+int
+owner_sx(struct lock_object *lock, struct thread **owner)
+{
+ struct sx *sx = (struct sx *)lock;
+ uintptr_t x = sx->sx_lock;
+
+ *owner = (struct thread *)SX_OWNER(x);
+ return ((x & SX_LOCK_SHARED) != 0 ? (SX_SHARERS(x) != 0) :
+ (*owner != NULL));
+}
+#endif
+
+void
+sx_sysinit(void *arg)
+{
+ struct sx_args *sargs = arg;
+
+ sx_init(sargs->sa_sx, sargs->sa_desc);
+}
+
+void
+sx_init_flags(struct sx *sx, const char *description, int opts)
+{
+ struct lock_class *class;
+ int i;
+ rtems_status_code sc = RTEMS_SUCCESSFUL;
+ rtems_id id = RTEMS_ID_NONE;
+ rtems_attribute attr = RTEMS_LOCAL | RTEMS_PRIORITY | RTEMS_BINARY_SEMAPHORE;
+
+ if ((opts & SX_RECURSE) != 0) {
+ /* FIXME */
+ }
+
+ class = &lock_class_sx;
+
+ /* Check for double-init and zero object. */
+ KASSERT(!lock_initalized(&sx->lock_object), ("lock \"%s\" %p already initialized", name, sx->lock_object));
+
+ /* Look up lock class to find its index. */
+ for (i = 0; i < LOCK_CLASS_MAX; i++)
+ {
+ if (lock_classes[i] == class)
+ {
+ sx->lock_object.lo_flags = i << LO_CLASSSHIFT;
+ break;
+ }
+ }
+ KASSERT(i < LOCK_CLASS_MAX, ("unknown lock class %p", class));
+
+ sc = rtems_semaphore_create(
+ rtems_build_name( '_', 'S', 'X', ' '),
+ 1,
+ attr,
+ 0,
+ &id
+ );
+ BSD_ASSERT_SC(sc);
+
+ sx->lock_object.lo_name = description;
+ sx->lock_object.lo_flags |= LO_INITIALIZED;
+ sx->lock_object.lo_id = id;
+
+ rtems_chain_append(&rtems_bsd_sx_chain, &sx->lock_object.lo_node);
+}
+
+void
+sx_destroy(struct sx *sx)
+{
+ rtems_status_code sc = RTEMS_SUCCESSFUL;
+
+ sc = rtems_semaphore_delete( sx->lock_object.lo_id);
+ BSD_ASSERT_SC(sc);
+
+ rtems_chain_extract(&sx->lock_object.lo_node);
+
+ sx->lock_object.lo_id = 0;
+ sx->lock_object.lo_flags &= ~LO_INITIALIZED;
+}
+
+int
+_sx_xlock(struct sx *sx, int opts, const char *file, int line)
+{
+ rtems_status_code sc = RTEMS_SUCCESSFUL;
+
+ BSD_ASSERT((opts & SX_INTERRUPTIBLE) == 0);
+
+ sc = rtems_semaphore_obtain( sx->lock_object.lo_id, RTEMS_WAIT, RTEMS_NO_TIMEOUT);
+ BSD_ASSERT_SC(sc);
+
+ return 0;
+}
+
+int
+_sx_try_xlock(struct sx *sx, const char *file, int line)
+{
+ rtems_status_code sc = RTEMS_SUCCESSFUL;
+
+ sc = rtems_semaphore_obtain( sx->lock_object.lo_id, RTEMS_NO_WAIT, 0);
+ if (sc == RTEMS_SUCCESSFUL) {
+ return 1;
+ } else if (sc == RTEMS_UNSATISFIED) {
+ return 0;
+ } else {
+ BSD_ASSERT_SC(sc);
+
+ return 0;
+ }
+}
+
+void
+_sx_xunlock(struct sx *sx, const char *file, int line)
+{
+ rtems_status_code sc = RTEMS_SUCCESSFUL;
+
+ sc = rtems_semaphore_release( sx->lock_object.lo_id);
+ BSD_ASSERT_SC(sc);
+}
+
+int
+_sx_try_upgrade(struct sx *sx, const char *file, int line)
+{
+ return 1;
+}
+
+void
+_sx_downgrade(struct sx *sx, const char *file, int line)
+{
+ /* Do nothing */
+}
+
+#ifdef INVARIANT_SUPPORT
+#ifndef INVARIANTS
+#undef _sx_assert
+#endif
+
+/*
+ * In the non-WITNESS case, sx_assert() can only detect that at least
+ * *some* thread owns an slock, but it cannot guarantee that *this*
+ * thread owns an slock.
+ */
+void
+_sx_assert(struct sx *sx, int what, const char *file, int line)
+{
+#ifndef WITNESS
+ int slocked = 0;
+#endif
+
+ if (panicstr != NULL)
+ return;
+ switch (what) {
+ case SA_SLOCKED:
+ case SA_SLOCKED | SA_NOTRECURSED:
+ case SA_SLOCKED | SA_RECURSED:
+#ifndef WITNESS
+ slocked = 1;
+ /* FALLTHROUGH */
+#endif
+ case SA_LOCKED:
+ case SA_LOCKED | SA_NOTRECURSED:
+ case SA_LOCKED | SA_RECURSED:
+#ifdef WITNESS
+ witness_assert(&sx->lock_object, what, file, line);
+#else
+ /*
+ * If some other thread has an exclusive lock or we
+ * have one and are asserting a shared lock, fail.
+ * Also, if no one has a lock at all, fail.
+ */
+ if (sx->sx_lock == SX_LOCK_UNLOCKED ||
+ (!(sx->sx_lock & SX_LOCK_SHARED) && (slocked ||
+ sx_xholder(sx) != curthread)))
+ panic("Lock %s not %slocked @ %s:%d\n",
+ sx->lock_object.lo_name, slocked ? "share " : "",
+ file, line);
+
+ if (!(sx->sx_lock & SX_LOCK_SHARED)) {
+ if (sx_recursed(sx)) {
+ if (what & SA_NOTRECURSED)
+ panic("Lock %s recursed @ %s:%d\n",
+ sx->lock_object.lo_name, file,
+ line);
+ } else if (what & SA_RECURSED)
+ panic("Lock %s not recursed @ %s:%d\n",
+ sx->lock_object.lo_name, file, line);
+ }
+#endif
+ break;
+ case SA_XLOCKED:
+ case SA_XLOCKED | SA_NOTRECURSED:
+ case SA_XLOCKED | SA_RECURSED:
+ if (sx_xholder(sx) != curthread)
+ panic("Lock %s not exclusively locked @ %s:%d\n",
+ sx->lock_object.lo_name, file, line);
+ if (sx_recursed(sx)) {
+ if (what & SA_NOTRECURSED)
+ panic("Lock %s recursed @ %s:%d\n",
+ sx->lock_object.lo_name, file, line);
+ } else if (what & SA_RECURSED)
+ panic("Lock %s not recursed @ %s:%d\n",
+ sx->lock_object.lo_name, file, line);
+ break;
+ case SA_UNLOCKED:
+#ifdef WITNESS
+ witness_assert(&sx->lock_object, what, file, line);
+#else
+ /*
+ * If we hold an exclusve lock fail. We can't
+ * reliably check to see if we hold a shared lock or
+ * not.
+ */
+ if (sx_xholder(sx) == curthread)
+ panic("Lock %s exclusively locked @ %s:%d\n",
+ sx->lock_object.lo_name, file, line);
+#endif
+ break;
+ default:
+ panic("Unknown sx lock assertion: %d @ %s:%d", what, file,
+ line);
+ }
+}
+#endif /* INVARIANT_SUPPORT */
+
+int
+sx_xlocked(struct sx *sx)
+{
+ Objects_Locations location;
+ Semaphore_Control *sema = _Semaphore_Get(sx->lock_object.lo_id, &location);
+
+ if (location == OBJECTS_LOCAL && !_Attributes_Is_counting_semaphore(sema->attribute_set)) {
+ int xlocked = sema->Core_control.mutex.holder_id == rtems_task_self();
+
+ _Thread_Enable_dispatch();
+
+ return xlocked;
+ } else {
+ _Thread_Enable_dispatch();
+
+ BSD_PANIC("unexpected semaphore location or attributes");
+ }
+}
diff --git a/rtems/freebsd/rtems/rtems-bsd-synch.c b/rtems/freebsd/rtems/rtems-bsd-synch.c
new file mode 100644
index 00000000..2102c1a7
--- /dev/null
+++ b/rtems/freebsd/rtems/rtems-bsd-synch.c
@@ -0,0 +1,274 @@
+/**
+ * @file
+ *
+ * @ingroup rtems_bsd_rtems
+ *
+ * @brief TODO.
+ */
+
+/*
+ * Copyright (c) 2009, 2010 embedded brains GmbH. All rights reserved.
+ *
+ * embedded brains GmbH
+ * Obere Lagerstr. 30
+ * 82178 Puchheim
+ * Germany
+ * <rtems@embedded-brains.de>
+ *
+ * The license and distribution terms for this file may be
+ * found in the file LICENSE in this distribution or at
+ * http://www.rtems.com/license/LICENSE.
+ */
+
+#include <rtems/freebsd/machine/rtems-bsd-config.h>
+#include <rtems/score/states.h>
+#include <rtems/score/thread.h>
+#include <rtems/score/threadq.h>
+
+#include <rtems/freebsd/sys/param.h>
+#include <rtems/freebsd/sys/types.h>
+#include <rtems/freebsd/sys/systm.h>
+#include <rtems/freebsd/sys/kernel.h>
+#include <rtems/freebsd/sys/ktr.h>
+#include <rtems/freebsd/sys/lock.h>
+#include <rtems/freebsd/sys/mutex.h>
+#include <rtems/freebsd/sys/proc.h>
+#include <rtems/freebsd/machine/pcpu.h>
+
+#define STATES_WAITING_FOR_SLEEP 0x40000
+
+static int pause_wchan;
+
+typedef struct
+{
+ Chain_Node node;
+ void *ident;
+ Thread_queue_Control queue;
+}sleep_queue_control_t;
+
+sleep_queue_control_t sleep_queue[BSD_MAXIMUM_SLEEP_QUEUES]; //this memory allocation could use _Workspace_Allocate once inside RTEMS tree
+Chain_Control sleep_queue_inactive_nodes; //chain of inactive nodes
+Chain_Control sleep_queue_active_nodes; //chain of active nodes
+
+void
+sleepinit(void)
+{
+ int ii;
+
+ /* initialize the sleep queue */
+ for( ii = 0; ii < BSD_MAXIMUM_SLEEP_QUEUES; ii++ )
+ {
+ sleep_queue[ii].ident = NULL;
+ /*
+ * Initialize the queue we use to block for signals
+ */
+ _Thread_queue_Initialize(
+ &sleep_queue[ii].queue,
+ THREAD_QUEUE_DISCIPLINE_FIFO,
+ STATES_WAITING_FOR_SLEEP | STATES_INTERRUPTIBLE_BY_SIGNAL,
+ EAGAIN
+ );
+ }
+ //initialize active chain
+ _Chain_Initialize_empty( &sleep_queue_active_nodes );
+ //initialize inactive chain
+ _Chain_Initialize( &sleep_queue_inactive_nodes, sleep_queue, BSD_MAXIMUM_SLEEP_QUEUES, sizeof( sleep_queue_control_t ));
+}
+
+sleep_queue_control_t*
+sleep_queue_lookup(void *ident)
+{
+ int ii;
+
+ /* initialize the sleep queue */
+ for( ii = 0; ii < BSD_MAXIMUM_SLEEP_QUEUES; ii++ )
+ {
+ if( sleep_queue[ii].ident == ident )
+ {
+ return &sleep_queue[ii];
+ }
+ }
+ return NULL;
+}
+
+sleep_queue_control_t*
+sleep_queue_get(void *ident)
+{
+ sleep_queue_control_t *sq;
+
+ sq = sleep_queue_lookup( ident );
+ if (sq == NULL)
+ {
+ KASSERT(!_Chain_Is_empty( &inactive_nodes ), ("sleep_queue_get"));
+ //get a control from the inactive chain
+ sq = ( sleep_queue_control_t * )_Chain_Get( &sleep_queue_inactive_nodes );
+ sq->ident = ident;
+ _Chain_Append( &sleep_queue_active_nodes, &sq->node );
+ }
+ return sq;
+}
+
+/*
+ * Block the current thread until it is awakened from its sleep queue
+ * or it times out while waiting.
+ */
+int
+sleep_queue_timedwait(void *wchan, int pri, int timeout, int catch)
+{
+ sleep_queue_control_t *sq;
+ Thread_Control *executing;
+ ISR_Level level;
+
+ _Thread_Disable_dispatch();
+
+ sq = sleep_queue_get( wchan );
+
+ executing = _Thread_Executing;
+ if( timeout )
+ {
+ executing->Wait.return_code = EWOULDBLOCK;
+ }
+ else
+ {
+ executing->Wait.return_code = 0;
+ }
+ _ISR_Disable( level );
+ _Thread_queue_Enter_critical_section( &sq->queue );
+ if( catch )
+ {
+ sq->queue.state |= STATES_INTERRUPTIBLE_BY_SIGNAL;
+ }
+ else
+ {
+ sq->queue.state &= ~STATES_INTERRUPTIBLE_BY_SIGNAL;
+ }
+ executing->Wait.queue = &sq->queue;
+ _ISR_Enable( level );
+
+ _Thread_queue_Enqueue( &sq->queue, timeout );
+ _Thread_Enable_dispatch();
+ return _Thread_Executing->Wait.return_code;
+}
+
+/*
+ * General sleep call. Suspends the current thread until a wakeup is
+ * performed on the specified identifier. The thread will then be made
+ * runnable with the specified priority. Sleeps at most timo/hz seconds
+ * (0 means no timeout). If pri includes PCATCH flag, signals are checked
+ * before and after sleeping, else signals are not checked. Returns 0 if
+ * awakened, EWOULDBLOCK if the timeout expires. If PCATCH is set and a
+ * signal needs to be delivered, ERESTART is returned if the current system
+ * call should be restarted if possible, and EINTR is returned if the system
+ * call should be interrupted by the signal (return EINTR).
+ *
+ * The lock argument is unlocked before the caller is suspended, and
+ * re-locked before _sleep() returns. If priority includes the PDROP
+ * flag the lock is not re-locked before returning.
+ */
+int
+_sleep(void *ident, struct lock_object *lock, int priority, const char *wmesg, int timo)
+{
+ struct thread *td;
+ struct proc *p;
+ struct lock_class *class;
+ int catch, flags, lock_state, pri, rval;
+
+ td = curthread;
+ p = td->td_proc;
+#ifdef KTRACE
+ if (KTRPOINT(td, KTR_CSW))
+ ktrcsw(1, 0);
+#endif
+ KASSERT(timo != 0 || mtx_owned(&Giant) || lock != NULL,
+ ("sleeping without a lock"));
+ KASSERT(p != NULL, ("msleep1"));
+ KASSERT(ident != NULL && TD_IS_RUNNING(td), ("msleep"));
+ if (priority & PDROP)
+ KASSERT(lock != NULL && lock != &Giant.lock_object,
+ ("PDROP requires a non-Giant lock"));
+ if (lock != NULL)
+ class = LOCK_CLASS(lock);
+ else
+ class = NULL;
+
+ if (cold) {
+ /*
+ * During autoconfiguration, just return;
+ * don't run any other threads or panic below,
+ * in case this is the idle thread and already asleep.
+ * XXX: this used to do "s = splhigh(); splx(safepri);
+ * splx(s);" to give interrupts a chance, but there is
+ * no way to give interrupts a chance now.
+ */
+ if (lock != NULL && priority & PDROP)
+ class->lc_unlock(lock);
+ return (0);
+ }
+ catch = priority & PCATCH;
+ pri = priority & PRIMASK;
+
+ CTR5(KTR_PROC, "sleep: thread %ld (pid %ld, %s) on %s (%p)",
+ td->td_tid, p->p_pid, td->td_name, wmesg, ident);
+
+ if (lock == &Giant.lock_object)
+ mtx_assert(&Giant, MA_OWNED);
+ DROP_GIANT();
+ if (lock != NULL && lock != &Giant.lock_object &&
+ !(class->lc_flags & LC_SLEEPABLE)) {
+ lock_state = class->lc_unlock(lock);
+ } else
+ /* GCC needs to follow the Yellow Brick Road */
+ lock_state = -1;
+
+ if (lock != NULL && class->lc_flags & LC_SLEEPABLE) {
+ lock_state = class->lc_unlock(lock);
+ }
+
+ rval = sleep_queue_timedwait(ident, pri, timo, catch);
+
+#ifdef KTRACE
+ if (KTRPOINT(td, KTR_CSW))
+ ktrcsw(0, 0);
+#endif
+ PICKUP_GIANT();
+ if (lock != NULL && lock != &Giant.lock_object && !(priority & PDROP)) {
+ class->lc_lock(lock, lock_state);
+ }
+ return (rval);
+}
+
+/*
+ * pause() is like tsleep() except that the intention is to not be
+ * explicitly woken up by another thread. Instead, the current thread
+ * simply wishes to sleep until the timeout expires. It is
+ * implemented using a dummy wait channel.
+ */
+int
+pause(const char *wmesg, int timo)
+{
+
+ KASSERT(timo != 0, ("pause: timeout required"));
+ return (tsleep(&pause_wchan, 0, wmesg, timo));
+}
+
+/*
+ * Make all threads sleeping on the specified identifier runnable.
+ */
+void
+wakeup(void *ident)
+{
+ sleep_queue_control_t *sq;
+ Thread_Control *the_thread;
+
+ sq = sleep_queue_lookup( ident );
+ if (sq == NULL)
+ {
+ return (0);
+ }
+
+ while ( (the_thread = _Thread_queue_Dequeue(&sq->queue)) )
+ {
+ }
+ return 0;
+}
+
diff --git a/rtems/freebsd/rtems/rtems-bsd-syscalls.c b/rtems/freebsd/rtems/rtems-bsd-syscalls.c
new file mode 100644
index 00000000..0a15bc4b
--- /dev/null
+++ b/rtems/freebsd/rtems/rtems-bsd-syscalls.c
@@ -0,0 +1,1487 @@
+/**
+ * @file
+ *
+ * @ingroup rtems_bsd_rtems
+ *
+ * @brief TODO.
+ */
+
+/*
+ * Copyright (c) 2009, 2010 embedded brains GmbH. All rights reserved.
+ *
+ * embedded brains GmbH
+ * Obere Lagerstr. 30
+ * 82178 Puchheim
+ * Germany
+ * <rtems@embedded-brains.de>
+ *
+ * The license and distribution terms for this file may be
+ * found in the file LICENSE in this distribution or at
+ * http://www.rtems.com/license/LICENSE.
+ */
+#include <rtems/freebsd/machine/rtems-bsd-config.h>
+
+#include <rtems/freebsd/sys/types.h>
+#include <rtems/freebsd/sys/param.h>
+#include <rtems/freebsd/sys/systm.h>
+#include <rtems/freebsd/sys/kernel.h>
+#include <rtems/freebsd/sys/lock.h>
+#include <rtems/freebsd/sys/mutex.h>
+#include <rtems/freebsd/sys/malloc.h>
+#include <rtems/freebsd/sys/proc.h>
+#include <rtems/freebsd/sys/fcntl.h>
+#include <rtems/freebsd/sys/protosw.h>
+#include <rtems/freebsd/sys/mbuf.h>
+#include <rtems/freebsd/sys/socket.h>
+#include <rtems/freebsd/sys/socketvar.h>
+#include <rtems/freebsd/sys/uio.h>
+#include <rtems/freebsd/machine/pcpu.h>
+#include <rtems/freebsd/net/vnet.h>
+
+#include <rtems/libio_.h>
+#include <rtems/seterr.h>
+
+static const rtems_filesystem_file_handlers_r socket_handlers;
+extern int killinfo( pid_t pid, int sig, const union sigval *value );
+
+/*
+ * Convert an RTEMS file descriptor to a BSD socket pointer.
+ */
+
+struct socket *rtems_bsdnet_fdToSocket(
+ int fd
+)
+{
+ rtems_libio_t *iop;
+
+ /* same as rtems_libio_check_fd(_fd) but different return */
+ if ((uint32_t)fd >= rtems_libio_number_iops) {
+ errno = EBADF;
+ return NULL;
+ }
+ iop = &rtems_libio_iops[fd];
+
+ /* same as rtems_libio_check_is_open(iop) but different return */
+ if ((iop->flags & LIBIO_FLAGS_OPEN) == 0) {
+ errno = EBADF;
+ return NULL;
+ }
+
+ if (iop->data1 == NULL)
+ errno = EBADF;
+ return iop->data1;
+}
+
+/*
+ * Create an RTEMS file descriptor for a socket
+ */
+
+int rtems_bsdnet_makeFdForSocket(
+ void *so,
+ const rtems_filesystem_file_handlers_r *h
+)
+{
+ rtems_libio_t *iop;
+ int fd;
+
+ iop = rtems_libio_allocate();
+ if (iop == 0)
+ rtems_set_errno_and_return_minus_one( ENFILE );
+
+ fd = iop - rtems_libio_iops;
+ iop->flags |= LIBIO_FLAGS_WRITE | LIBIO_FLAGS_READ;
+ iop->data0 = fd;
+ iop->data1 = so;
+ iop->pathinfo.handlers = h;
+ iop->pathinfo.ops = &rtems_filesystem_operations_default;
+ return fd;
+}
+
+/*
+ * The following code is based on FreeBSD uipc_syscalls.c
+ */
+
+int
+sockargs(mp, buf, buflen, type)
+ struct mbuf **mp;
+ caddr_t buf;
+ int buflen, type;
+{
+ struct sockaddr *sa;
+ struct mbuf *m;
+ int error;
+
+ if ((u_int)buflen > MLEN) {
+#ifdef COMPAT_OLDSOCK
+ if (type == MT_SONAME && (u_int)buflen <= 112)
+ buflen = MLEN; /* unix domain compat. hack */
+ else
+#endif
+ if ((u_int)buflen > MCLBYTES)
+ return (EINVAL);
+ }
+ m = m_get(M_WAIT, type);
+ if ((u_int)buflen > MLEN)
+ MCLGET(m, M_WAIT);
+ m->m_len = buflen;
+ error = copyin(buf, mtod(m, caddr_t), (u_int)buflen);
+ if (error)
+ (void) m_free(m);
+ else {
+ *mp = m;
+ if (type == MT_SONAME) {
+ sa = mtod(m, struct sockaddr *);
+
+#if defined(COMPAT_OLDSOCK) && BYTE_ORDER != BIG_ENDIAN
+ if (sa->sa_family == 0 && sa->sa_len < AF_MAX)
+ sa->sa_family = sa->sa_len;
+#endif
+ sa->sa_len = buflen;
+ }
+ }
+ return (error);
+}
+
+int
+getsockaddr(namp, uaddr, len)
+ struct sockaddr **namp;
+ caddr_t uaddr;
+ size_t len;
+{
+ struct sockaddr *sa;
+ int error;
+
+ if (len > SOCK_MAXADDRLEN)
+ return (ENAMETOOLONG);
+ if (len < offsetof(struct sockaddr, sa_data[0]))
+ return (EINVAL);
+ sa = malloc(len, M_SONAME, M_WAITOK);
+ error = copyin(uaddr, sa, len);
+ if (error) {
+ free(sa, M_SONAME);
+ } else {
+#if defined(COMPAT_OLDSOCK) && BYTE_ORDER != BIG_ENDIAN
+ if (sa->sa_family == 0 && sa->sa_len < AF_MAX)
+ sa->sa_family = sa->sa_len;
+#endif
+ sa->sa_len = len;
+ *namp = sa;
+ }
+ return (error);
+}
+
+/*
+ *********************************************************************
+ * BSD-style entry points *
+ *********************************************************************
+ */
+int
+socket (int domain, int type, int protocol)
+{
+ struct thread *td;
+ struct socket *so;
+ int fd, error;
+
+ td = curthread;
+#ifdef MAC
+ error = mac_socket_check_create(td->td_ucred, domain, type, protocol);
+ if (error == 0 )
+ {
+#endif
+ /* An extra reference on `fp' has been held for us by falloc(). */
+ error = socreate(domain, &so, type, protocol, td->td_ucred, td);
+ if (error == 0) {
+ fd = rtems_bsdnet_makeFdForSocket (so, &socket_handlers);
+ if (fd < 0)
+ {
+ soclose (so);
+ error = EBADF;
+ }
+ }
+#ifdef MAC
+ }
+#endif
+ if( error == 0 )
+ {
+ return fd;
+ }
+ errno = error;
+ return -1;
+}
+
+int
+kern_bind(td, fd, sa)
+ struct thread *td;
+ int fd;
+ struct sockaddr *sa;
+{
+ struct socket *so;
+ int error;
+
+ if ((so = rtems_bsdnet_fdToSocket (fd)) == NULL) {
+ error = EBADF;
+ return (error);
+ }
+#ifdef KTRACE
+ if (KTRPOINT(td, KTR_STRUCT))
+ ktrsockaddr(sa);
+#endif
+#ifdef MAC
+ error = mac_socket_check_bind(td->td_ucred, so, sa);
+ if (error == 0)
+#endif
+ error = sobind(so, sa, td);
+ return (error);
+}
+
+int
+bind (int s, struct sockaddr *name, int namelen)
+{
+ struct thread *td;
+ struct sockaddr *sa;
+ int error;
+
+ error = getsockaddr(&sa, name, namelen);
+ if( error == 0 )
+ {
+ td = curthread;
+ error = kern_bind(td, s, sa);
+ free(sa, M_SONAME);
+ }
+ if( error == 0 )
+ {
+ return error;
+ }
+ errno = error;
+ return -1;
+}
+
+int
+kern_connect(td, fd, sa)
+ struct thread *td;
+ int fd;
+ struct sockaddr *sa;
+{
+ struct socket *so;
+ int error;
+ int interrupted = 0;
+
+ if ((so = rtems_bsdnet_fdToSocket (fd)) == NULL) {
+ error = EBADF;
+ return (error);
+ }
+
+ if (so->so_state & SS_ISCONNECTING) {
+ error = EALREADY;
+ goto done1;
+ }
+#ifdef KTRACE
+ if (KTRPOINT(td, KTR_STRUCT))
+ ktrsockaddr(sa);
+#endif
+#ifdef MAC
+ error = mac_socket_check_connect(td->td_ucred, so, sa);
+ if (error)
+ goto bad;
+#endif
+ error = soconnect(so, sa, td);
+ if (error)
+ goto bad;
+ if ((so->so_state & SS_NBIO) && (so->so_state & SS_ISCONNECTING)) {
+ error = EINPROGRESS;
+ goto done1;
+ }
+ SOCK_LOCK(so);
+ while ((so->so_state & SS_ISCONNECTING) && so->so_error == 0) {
+ error = msleep(&so->so_timeo, SOCK_MTX(so), PSOCK | PCATCH,
+ "connec", 0);
+ if (error) {
+ if (error == EINTR || error == ERESTART)
+ interrupted = 1;
+ break;
+ }
+ }
+ if (error == 0) {
+ error = so->so_error;
+ so->so_error = 0;
+ }
+ SOCK_UNLOCK(so);
+bad:
+ if (!interrupted)
+ so->so_state &= ~SS_ISCONNECTING;
+ if (error == ERESTART)
+ error = EINTR;
+done1:
+ return (error);
+}
+
+int
+connect (int s, struct sockaddr *name, int namelen)
+{
+ int error;
+ struct sockaddr *sa;
+ struct thread *td;
+
+ error = getsockaddr(&sa, name, namelen);
+ if (error == 0)
+ {
+ td = curthread;
+ error = kern_connect(td, s, sa);
+ free(sa, M_SONAME);
+ }
+ if( error == 0 )
+ {
+ return error;
+ }
+ errno = error;
+ return -1;
+}
+
+int
+listen (int s, int backlog)
+{
+ struct thread *td;
+ struct socket *so;
+ int error = 0;
+
+ if ((so = rtems_bsdnet_fdToSocket (s)) == NULL) {
+ error = EBADF;
+ }
+ if( error == 0 )
+ {
+ td = curthread;
+#ifdef MAC
+ error = mac_socket_check_listen(td->td_ucred, so);
+ if (error == 0) {
+#endif
+ CURVNET_SET(so->so_vnet);
+ error = solisten(so, backlog, td);
+ CURVNET_RESTORE();
+#ifdef MAC
+ }
+#endif
+ }
+ if( error == 0 )
+ {
+ return error;
+ }
+ errno = error;
+ return -1;
+}
+
+int
+kern_accept(struct thread *td, int s, struct sockaddr **name, socklen_t *namelen)
+{
+ struct sockaddr *sa = NULL;
+ int error;
+ struct socket *head, *so;
+ int fd;
+ u_int fflag;
+ pid_t pgid;
+ int tmp;
+
+ if (name) {
+ *name = NULL;
+ if (*namelen < 0)
+ return (EINVAL);
+ }
+
+ if ((head = rtems_bsdnet_fdToSocket (s)) == NULL) {
+ error = EBADF;
+ return (error);
+ }
+ if ((head->so_options & SO_ACCEPTCONN) == 0) {
+ error = EINVAL;
+ goto done;
+ }
+#ifdef MAC
+ error = mac_socket_check_accept(td->td_ucred, head);
+ if (error != 0)
+ goto done;
+#endif
+ ACCEPT_LOCK();
+ if ((head->so_state & SS_NBIO) && TAILQ_EMPTY(&head->so_comp)) {
+ ACCEPT_UNLOCK();
+ error = EWOULDBLOCK;
+ goto noconnection;
+ }
+ while (TAILQ_EMPTY(&head->so_comp) && head->so_error == 0) {
+ if (head->so_rcv.sb_state & SBS_CANTRCVMORE) {
+ head->so_error = ECONNABORTED;
+ break;
+ }
+ error = msleep(&head->so_timeo, &accept_mtx, PSOCK | PCATCH,
+ "accept", 0);
+ if (error) {
+ ACCEPT_UNLOCK();
+ goto noconnection;
+ }
+ }
+ if (head->so_error) {
+ error = head->so_error;
+ head->so_error = 0;
+ ACCEPT_UNLOCK();
+ goto noconnection;
+ }
+ so = TAILQ_FIRST(&head->so_comp);
+ KASSERT(!(so->so_qstate & SQ_INCOMP), ("accept1: so SQ_INCOMP"));
+ KASSERT(so->so_qstate & SQ_COMP, ("accept1: so not SQ_COMP"));
+
+ /*
+ * Before changing the flags on the socket, we have to bump the
+ * reference count. Otherwise, if the protocol calls sofree(),
+ * the socket will be released due to a zero refcount.
+ */
+ SOCK_LOCK(so); /* soref() and so_state update */
+ soref(so); /* file descriptor reference */
+
+ TAILQ_REMOVE(&head->so_comp, so, so_list);
+ head->so_qlen--;
+
+ fd = rtems_bsdnet_makeFdForSocket (so, &socket_handlers);
+ if (fd < 0) {
+ TAILQ_INSERT_HEAD(&head->so_comp, so, so_list);
+ head->so_qlen++;
+ wakeup(head);
+ error = EBADF;
+ return (error);
+ }
+
+ so->so_state |= (head->so_state & SS_NBIO);
+ so->so_qstate &= ~SQ_COMP;
+ so->so_head = NULL;
+
+ SOCK_UNLOCK(so);
+ ACCEPT_UNLOCK();
+
+ td->td_retval[0] = fd;
+
+ sa = 0;
+ CURVNET_SET(so->so_vnet);
+ error = soaccept(so, &sa);
+ CURVNET_RESTORE();
+ if (error) {
+ /*
+ * return a namelen of zero for older code which might
+ * ignore the return value from accept.
+ */
+ if (name)
+ *namelen = 0;
+ goto noconnection;
+ }
+ if (sa == NULL) {
+ if (name)
+ *namelen = 0;
+ goto done;
+ }
+ if (name) {
+ /* check sa_len before it is destroyed */
+ if (*namelen > sa->sa_len)
+ *namelen = sa->sa_len;
+#ifdef KTRACE
+ if (KTRPOINT(td, KTR_STRUCT))
+ ktrsockaddr(sa);
+#endif
+ *name = sa;
+ sa = NULL;
+ }
+noconnection:
+ if (sa)
+ free(sa, M_SONAME);
+
+done:
+ return (error);
+}
+
+static int
+accept1(td, s, _name, _namelen, compat)
+ struct thread *td;
+ int s;
+ struct sockaddr *_name;
+ int *_namelen;
+ int compat;
+{
+ struct sockaddr *name;
+ socklen_t namelen;
+ int error;
+
+ if (_name == NULL)
+ return (kern_accept(td, s, NULL, NULL));
+
+ error = copyin(_namelen, &namelen, sizeof (namelen));
+ if (error)
+ return (error);
+
+ error = kern_accept(td, s, &name, &namelen);
+
+ /*
+ * return a namelen of zero for older code which might
+ * ignore the return value from accept.
+ */
+ if (error) {
+ (void) copyout(&namelen,
+ _namelen, sizeof(*_namelen));
+ return (error);
+ }
+
+ if (error == 0 && name != NULL) {
+#ifdef COMPAT_OLDSOCK
+ if (compat)
+ ((struct osockaddr *)name)->sa_family =
+ name->sa_family;
+#endif
+ error = copyout(name, _name, namelen);
+ }
+ if (error == 0)
+ error = copyout(&namelen, _namelen,
+ sizeof(namelen));
+ free(name, M_SONAME);
+ return (error);
+}
+
+int
+accept (int s, struct sockaddr *name, int *namelen)
+{
+ struct thread *td;
+ int error;
+
+ td = curthread;
+ error = accept1(td, s, name, namelen, 0);
+ if( error == 0 )
+ {
+ return td->td_retval[0];
+ }
+ errno = error;
+ return -1;
+}
+
+/*
+ * Shutdown routine
+ */
+
+int
+shutdown (int s, int how)
+{
+ struct socket *so;
+ int error = 0;
+
+ if ((so = rtems_bsdnet_fdToSocket (s)) == NULL) {
+ error = EBADF;
+ }
+ if( error == 0 )
+ {
+ error = soshutdown(so, how);
+ }
+ if( error == 0 )
+ {
+ return error;
+ }
+ errno = error;
+ return -1;
+}
+
+int
+kern_sendit(td, s, mp, flags, control, segflg)
+ struct thread *td;
+ int s;
+ struct msghdr *mp;
+ int flags;
+ struct mbuf *control;
+ enum uio_seg segflg;
+{
+ struct uio auio;
+ struct iovec *iov;
+ struct socket *so;
+ int i;
+ int len, error;
+#ifdef KTRACE
+ struct uio *ktruio = NULL;
+#endif
+
+ if ((so = rtems_bsdnet_fdToSocket (s)) == NULL) {
+ error = EBADF;
+ return (error);
+ }
+
+#ifdef MAC
+ if (mp->msg_name != NULL) {
+ error = mac_socket_check_connect(td->td_ucred, so,
+ mp->msg_name);
+ if (error)
+ goto bad;
+ }
+ error = mac_socket_check_send(td->td_ucred, so);
+ if (error)
+ goto bad;
+#endif
+
+ auio.uio_iov = mp->msg_iov;
+ auio.uio_iovcnt = mp->msg_iovlen;
+ auio.uio_segflg = segflg;
+ auio.uio_rw = UIO_WRITE;
+ auio.uio_td = td;
+ auio.uio_offset = 0; /* XXX */
+ auio.uio_resid = 0;
+ iov = mp->msg_iov;
+ for (i = 0; i < mp->msg_iovlen; i++, iov++) {
+ if ((auio.uio_resid += iov->iov_len) < 0) {
+ error = EINVAL;
+ goto bad;
+ }
+ }
+#ifdef KTRACE
+ if (KTRPOINT(td, KTR_GENIO))
+ ktruio = cloneuio(&auio);
+#endif
+ len = auio.uio_resid;
+ error = sosend(so, mp->msg_name, &auio, 0, control, flags, td);
+ if (error) {
+ if (auio.uio_resid != len && (error == ERESTART ||
+ error == EINTR || error == EWOULDBLOCK))
+ error = 0;
+ /* Generation of SIGPIPE can be controlled per socket */
+ if (error == EPIPE && !(so->so_options & SO_NOSIGPIPE) &&
+ !(flags & MSG_NOSIGNAL)) {
+ PROC_LOCK(td->td_proc);
+ killinfo(td->td_proc->p_pid, SIGPIPE, NULL);
+ PROC_UNLOCK(td->td_proc);
+ }
+ }
+ if (error == 0)
+ td->td_retval[0] = len - auio.uio_resid;
+#ifdef KTRACE
+ if (ktruio != NULL) {
+ ktruio->uio_resid = td->td_retval[0];
+ ktrgenio(s, UIO_WRITE, ktruio, error);
+ }
+#endif
+bad:
+ return (error);
+}
+
+static int
+sendit(td, s, mp, flags)
+ struct thread *td;
+ int s;
+ struct msghdr *mp;
+ int flags;
+{
+ struct mbuf *control;
+ struct sockaddr *to;
+ int error;
+
+ if (mp->msg_name != NULL) {
+ error = getsockaddr(&to, mp->msg_name, mp->msg_namelen);
+ if (error) {
+ to = NULL;
+ goto bad;
+ }
+ mp->msg_name = to;
+ } else {
+ to = NULL;
+ }
+
+ if (mp->msg_control) {
+ if (mp->msg_controllen < sizeof(struct cmsghdr)
+#ifdef COMPAT_OLDSOCK
+ && mp->msg_flags != MSG_COMPAT
+#endif
+ ) {
+ error = EINVAL;
+ goto bad;
+ }
+ error = sockargs(&control, mp->msg_control,
+ mp->msg_controllen, MT_CONTROL);
+ if (error)
+ goto bad;
+#ifdef COMPAT_OLDSOCK
+ if (mp->msg_flags == MSG_COMPAT) {
+ struct cmsghdr *cm;
+
+ M_PREPEND(control, sizeof(*cm), M_WAIT);
+ cm = mtod(control, struct cmsghdr *);
+ cm->cmsg_len = control->m_len;
+ cm->cmsg_level = SOL_SOCKET;
+ cm->cmsg_type = SCM_RIGHTS;
+ }
+#endif
+ } else {
+ control = NULL;
+ }
+
+ error = kern_sendit(td, s, mp, flags, control, UIO_USERSPACE);
+
+bad:
+ if (to)
+ free(to, M_SONAME);
+ return (error);
+}
+
+/*
+ * All `transmit' operations end up calling this routine.
+ */
+ssize_t
+sendmsg (int s, const struct msghdr *mp, int flags)
+{
+ struct thread *td;
+ struct msghdr msg;
+ struct iovec *iov;
+ int error;
+
+ td = curthread;
+ error = copyin(mp, &msg, sizeof (msg));
+ if (error)
+ return (error);
+ error = copyiniov(msg.msg_iov, msg.msg_iovlen, &iov, EMSGSIZE);
+ if (error)
+ return (error);
+ msg.msg_iov = iov;
+#ifdef COMPAT_OLDSOCK
+ msg.msg_flags = 0;
+#endif
+ error = sendit(td, s, &msg, flags);
+ free(iov, M_IOV);
+ if( error == 0 )
+ {
+ return td->td_retval[0];
+ }
+ errno = error;
+ return -1;
+}
+
+/*
+ * Send a message to a host
+ */
+ssize_t
+sendto (int s, const void *buf, size_t len, int flags, const struct sockaddr *to, int tolen)
+{
+ struct thread *td;
+ struct msghdr msg;
+ struct iovec aiov;
+ int error;
+
+ td = curthread;
+ msg.msg_name = to;
+ msg.msg_namelen = tolen;
+ msg.msg_iov = &aiov;
+ msg.msg_iovlen = 1;
+ msg.msg_control = 0;
+#ifdef COMPAT_OLDSOCK
+ msg.msg_flags = 0;
+#endif
+ aiov.iov_base = buf;
+ aiov.iov_len = len;
+ error = sendit(td, s, &msg, flags);
+ if( error == 0 )
+ {
+ return td->td_retval[0];
+ }
+ errno = error;
+ return -1;
+}
+
+ssize_t
+send( int s, const void *msg, size_t len, int flags )
+{
+ return (sendto(s, msg, len, flags, NULL, 0));
+}
+
+int
+kern_recvit(td, s, mp, fromseg, controlp)
+ struct thread *td;
+ int s;
+ struct msghdr *mp;
+ enum uio_seg fromseg;
+ struct mbuf **controlp;
+{
+ struct uio auio;
+ struct iovec *iov;
+ int i;
+ socklen_t len;
+ int error;
+ struct mbuf *m, *control = 0;
+ caddr_t ctlbuf;
+ struct socket *so;
+ struct sockaddr *fromsa = 0;
+#ifdef KTRACE
+ struct uio *ktruio = NULL;
+#endif
+
+ if(controlp != NULL)
+ *controlp = 0;
+
+ if ((so = rtems_bsdnet_fdToSocket (s)) == NULL) {
+ error = EBADF;
+ return (error);
+ }
+
+#ifdef MAC
+ error = mac_socket_check_receive(td->td_ucred, so);
+ if (error) {
+ return (error);
+ }
+#endif
+
+ auio.uio_iov = mp->msg_iov;
+ auio.uio_iovcnt = mp->msg_iovlen;
+ auio.uio_segflg = UIO_USERSPACE;
+ auio.uio_rw = UIO_READ;
+ auio.uio_td = td;
+ auio.uio_offset = 0; /* XXX */
+ auio.uio_resid = 0;
+ iov = mp->msg_iov;
+ for (i = 0; i < mp->msg_iovlen; i++, iov++) {
+ if ((auio.uio_resid += iov->iov_len) < 0) {
+ return (EINVAL);
+ }
+ }
+#ifdef KTRACE
+ if (KTRPOINT(td, KTR_GENIO))
+ ktruio = cloneuio(&auio);
+#endif
+ len = auio.uio_resid;
+ CURVNET_SET(so->so_vnet);
+ error = soreceive(so, &fromsa, &auio, (struct mbuf **)0,
+ (mp->msg_control || controlp) ? &control : (struct mbuf **)0,
+ &mp->msg_flags);
+ CURVNET_RESTORE();
+ if (error) {
+ if (auio.uio_resid != (int)len && (error == ERESTART ||
+ error == EINTR || error == EWOULDBLOCK))
+ error = 0;
+ }
+#ifdef KTRACE
+ if (ktruio != NULL) {
+ ktruio->uio_resid = (int)len - auio.uio_resid;
+ ktrgenio(s, UIO_READ, ktruio, error);
+ }
+#endif
+ if (error)
+ goto out;
+ td->td_retval[0] = (int)len - auio.uio_resid;
+ if (mp->msg_name) {
+ len = mp->msg_namelen;
+ if (len <= 0 || fromsa == 0)
+ len = 0;
+ else {
+ /* save sa_len before it is destroyed by MSG_COMPAT */
+ len = MIN(len, fromsa->sa_len);
+#ifdef COMPAT_OLDSOCK
+ if (mp->msg_flags & MSG_COMPAT)
+ ((struct osockaddr *)fromsa)->sa_family =
+ fromsa->sa_family;
+#endif
+ if (fromseg == UIO_USERSPACE) {
+ error = copyout(fromsa, mp->msg_name,
+ (unsigned)len);
+ if (error)
+ goto out;
+ } else
+ bcopy(fromsa, mp->msg_name, len);
+ }
+ mp->msg_namelen = len;
+ }
+ if (mp->msg_control && controlp == NULL) {
+#ifdef COMPAT_OLDSOCK
+ /*
+ * We assume that old recvmsg calls won't receive access
+ * rights and other control info, esp. as control info
+ * is always optional and those options didn't exist in 4.3.
+ * If we receive rights, trim the cmsghdr; anything else
+ * is tossed.
+ */
+ if (control && mp->msg_flags & MSG_COMPAT) {
+ if (mtod(control, struct cmsghdr *)->cmsg_level !=
+ SOL_SOCKET ||
+ mtod(control, struct cmsghdr *)->cmsg_type !=
+ SCM_RIGHTS) {
+ mp->msg_controllen = 0;
+ goto out;
+ }
+ control->m_len -= sizeof (struct cmsghdr);
+ control->m_data += sizeof (struct cmsghdr);
+ }
+#endif
+ len = mp->msg_controllen;
+ m = control;
+ mp->msg_controllen = 0;
+ ctlbuf = mp->msg_control;
+
+ while (m && len > 0) {
+ unsigned int tocopy;
+
+ if (len >= m->m_len)
+ tocopy = m->m_len;
+ else {
+ mp->msg_flags |= MSG_CTRUNC;
+ tocopy = len;
+ }
+
+ if ((error = copyout(mtod(m, caddr_t),
+ ctlbuf, tocopy)) != 0)
+ goto out;
+
+ ctlbuf += tocopy;
+ len -= tocopy;
+ m = m->m_next;
+ }
+ mp->msg_controllen = ctlbuf - (caddr_t)mp->msg_control;
+ }
+out:
+#ifdef KTRACE
+ if (fromsa && KTRPOINT(td, KTR_STRUCT))
+ ktrsockaddr(fromsa);
+#endif
+ if (fromsa)
+ free(fromsa, M_SONAME);
+
+ if (error == 0 && controlp != NULL)
+ *controlp = control;
+ else if (control)
+ m_freem(control);
+
+ return (error);
+}
+
+static int
+recvit(td, s, mp, namelenp)
+ struct thread *td;
+ int s;
+ struct msghdr *mp;
+ void *namelenp;
+{
+ int error;
+
+ error = kern_recvit(td, s, mp, UIO_USERSPACE, NULL);
+ if (error)
+ return (error);
+ if (namelenp) {
+ error = copyout(&mp->msg_namelen, namelenp, sizeof (socklen_t));
+#ifdef COMPAT_OLDSOCK
+ if (mp->msg_flags & MSG_COMPAT)
+ error = 0; /* old recvfrom didn't check */
+#endif
+ }
+ return (error);
+}
+
+/*
+ * All `receive' operations end up calling this routine.
+ */
+ssize_t
+recvmsg (int s, struct msghdr *mp, int flags)
+{
+ struct thread *td;
+ struct msghdr msg;
+ struct iovec *uiov, *iov;
+ int error;
+
+ td = curthread;
+ error = copyin(mp, &msg, sizeof (msg));
+ if (error == 0 )
+ {
+ error = copyiniov(msg.msg_iov, msg.msg_iovlen, &iov, EMSGSIZE);
+ if (error == 0)
+ {
+ msg.msg_flags = flags;
+ #ifdef COMPAT_OLDSOCK
+ msg.msg_flags &= ~MSG_COMPAT;
+ #endif
+ uiov = msg.msg_iov;
+ msg.msg_iov = iov;
+ error = recvit(td, s, &msg, NULL);
+ if (error == 0) {
+ msg.msg_iov = uiov;
+ error = copyout(&msg, mp, sizeof(msg));
+ }
+ free(iov, M_IOV);
+ }
+ }
+ if( error == 0 )
+ {
+ return td->td_retval[0];
+ }
+ errno = error;
+ return -1;
+}
+
+/*
+ * Receive a message from a host
+ */
+ssize_t
+recvfrom (int s, void *buf, size_t len, int flags, const struct sockaddr *from, socklen_t *fromlenaddr)
+{
+ struct thread *td;
+ struct msghdr msg;
+ struct iovec aiov;
+ int error;
+
+ td = curthread;
+ if (fromlenaddr) {
+ error = copyin(fromlenaddr,
+ &msg.msg_namelen, sizeof (msg.msg_namelen));
+ if (error)
+ goto done2;
+ } else {
+ msg.msg_namelen = 0;
+ }
+ msg.msg_name = from;
+ msg.msg_iov = &aiov;
+ msg.msg_iovlen = 1;
+ aiov.iov_base = buf;
+ aiov.iov_len = len;
+ msg.msg_control = 0;
+ msg.msg_flags = flags;
+ error = recvit(td, s, &msg, fromlenaddr);
+done2:
+ if( error == 0 )
+ {
+ return td->td_retval[0];
+ }
+ errno = error;
+ return -1;
+}
+
+ssize_t
+recv( int s, void *buf, size_t len, int flags )
+{
+ return (recvfrom(s, buf, len, flags, NULL, 0));
+}
+
+int
+kern_setsockopt(td, s, level, name, val, valseg, valsize)
+ struct thread *td;
+ int s;
+ int level;
+ int name;
+ void *val;
+ enum uio_seg valseg;
+ socklen_t valsize;
+{
+ int error;
+ struct socket *so;
+ struct sockopt sopt;
+
+ if (val == NULL && valsize != 0)
+ return (EFAULT);
+ if ((int)valsize < 0)
+ return (EINVAL);
+
+ sopt.sopt_dir = SOPT_SET;
+ sopt.sopt_level = level;
+ sopt.sopt_name = name;
+ sopt.sopt_val = val;
+ sopt.sopt_valsize = valsize;
+ switch (valseg) {
+ case UIO_USERSPACE:
+ sopt.sopt_td = td;
+ break;
+ case UIO_SYSSPACE:
+ sopt.sopt_td = NULL;
+ break;
+ default:
+ panic("kern_setsockopt called with bad valseg");
+ }
+
+ if ((so = rtems_bsdnet_fdToSocket (s)) == NULL) {
+ error = EBADF;
+ return error;
+ }
+ CURVNET_SET(so->so_vnet);
+ error = sosetopt(so, &sopt);
+ CURVNET_RESTORE();
+ return(error);
+}
+
+int
+setsockopt (int s, int level, int name, const void *val, socklen_t valsize)
+{
+ struct thread *td;
+ int error;
+
+ td = curthread;
+ error = kern_setsockopt(td, s, level, name, val, UIO_USERSPACE, valsize);
+ if( error == 0 )
+ {
+ return error;
+ }
+ errno = error;
+ return -1;
+}
+
+int
+kern_getsockopt(td, s, level, name, val, valseg, valsize)
+ struct thread *td;
+ int s;
+ int level;
+ int name;
+ void *val;
+ enum uio_seg valseg;
+ socklen_t *valsize;
+{
+ int error;
+ struct socket *so;
+ struct sockopt sopt;
+
+ if (val == NULL)
+ *valsize = 0;
+ if ((int)*valsize < 0)
+ return (EINVAL);
+
+ sopt.sopt_dir = SOPT_GET;
+ sopt.sopt_level = level;
+ sopt.sopt_name = name;
+ sopt.sopt_val = val;
+ sopt.sopt_valsize = (size_t)*valsize; /* checked non-negative above */
+ switch (valseg) {
+ case UIO_USERSPACE:
+ sopt.sopt_td = td;
+ break;
+ case UIO_SYSSPACE:
+ sopt.sopt_td = NULL;
+ break;
+ default:
+ panic("kern_getsockopt called with bad valseg");
+ }
+
+ if ((so = rtems_bsdnet_fdToSocket (s)) == NULL) {
+ error = EBADF;
+ return error;
+ }
+ CURVNET_SET(so->so_vnet);
+ error = sogetopt(so, &sopt);
+ CURVNET_RESTORE();
+ *valsize = sopt.sopt_valsize;
+ return (error);
+}
+
+int
+getsockopt (int s, int level, int name, void *val, socklen_t *avalsize)
+{
+ struct thread *td;
+ socklen_t valsize;
+ int error = 0;
+
+ td = curthread;
+ if (val) {
+ error = copyin(avalsize, &valsize, sizeof (valsize));
+ }
+
+ if( error == 0 )
+ {
+ error = kern_getsockopt(td, s, level, name, val, UIO_USERSPACE, &valsize);
+
+ if (error == 0)
+ error = copyout(&valsize, avalsize, sizeof (valsize));
+ }
+ if( error == 0 )
+ {
+ return error;
+ }
+ errno = error;
+ return -1;
+}
+
+int
+kern_getpeername(struct thread *td, int fd, struct sockaddr **sa,
+ socklen_t *alen)
+{
+ struct socket *so;
+ socklen_t len;
+ int error;
+
+ if (*alen < 0)
+ return (EINVAL);
+
+ if ((so = rtems_bsdnet_fdToSocket (fd)) == NULL) {
+ error = EBADF;
+ return error;
+ }
+ if ((so->so_state & (SS_ISCONNECTED|SS_ISCONFIRMING)) == 0) {
+ error = ENOTCONN;
+ goto done;
+ }
+ *sa = NULL;
+ CURVNET_SET(so->so_vnet);
+ error = (*so->so_proto->pr_usrreqs->pru_peeraddr)(so, sa);
+ CURVNET_RESTORE();
+ if (error)
+ goto bad;
+ if (*sa == NULL)
+ len = 0;
+ else
+ len = MIN(*alen, (*sa)->sa_len);
+ *alen = len;
+#ifdef KTRACE
+ if (KTRPOINT(td, KTR_STRUCT))
+ ktrsockaddr(*sa);
+#endif
+bad:
+ if (error && *sa) {
+ free(*sa, M_SONAME);
+ *sa = NULL;
+ }
+done:
+ return (error);
+}
+
+static int
+getpeername1(td, fdes, asa, alen, compat)
+ struct thread *td;
+ int fdes;
+ struct sockaddr * asa;
+ socklen_t * alen;
+ int compat;
+{
+ struct sockaddr *sa;
+ socklen_t len;
+ int error;
+
+ error = copyin(alen, &len, sizeof (len));
+ if (error)
+ return (error);
+
+ error = kern_getpeername(td, fdes, &sa, &len);
+ if (error)
+ return (error);
+
+ if (len != 0) {
+#ifdef COMPAT_OLDSOCK
+ if (compat)
+ ((struct osockaddr *)sa)->sa_family = sa->sa_family;
+#endif
+ error = copyout(sa, asa, (u_int)len);
+ }
+ free(sa, M_SONAME);
+ if (error == 0)
+ error = copyout(&len, alen, sizeof(len));
+ return (error);
+}
+
+int
+getpeername (int s, struct sockaddr *name, socklen_t *namelen)
+{
+ struct thread *td;
+ int error;
+
+ td = curthread;
+ error = getpeername1(td, s, name, namelen, 0);
+ if( error == 0 )
+ {
+ return error;
+ }
+ errno = error;
+ return -1;
+}
+
+int
+kern_getsockname(struct thread *td, int fd, struct sockaddr **sa,
+ socklen_t *alen)
+{
+ struct socket *so;
+ socklen_t len;
+ int error;
+
+ if (*alen < 0)
+ return (EINVAL);
+
+ if ((so = rtems_bsdnet_fdToSocket (fd)) == NULL) {
+ error = EBADF;
+ return error;
+ }
+ *sa = NULL;
+ CURVNET_SET(so->so_vnet);
+ error = (*so->so_proto->pr_usrreqs->pru_sockaddr)(so, sa);
+ CURVNET_RESTORE();
+ if (error)
+ goto bad;
+ if (*sa == NULL)
+ len = 0;
+ else
+ len = MIN(*alen, (*sa)->sa_len);
+ *alen = len;
+#ifdef KTRACE
+ if (KTRPOINT(td, KTR_STRUCT))
+ ktrsockaddr(*sa);
+#endif
+bad:
+ if (error && *sa) {
+ free(*sa, M_SONAME);
+ *sa = NULL;
+ }
+ return (error);
+}
+
+static int
+getsockname1(td, fdes, asa, alen, compat)
+ struct thread *td;
+ int fdes;
+ struct sockaddr * asa;
+ socklen_t * alen;
+ int compat;
+{
+ struct sockaddr *sa;
+ socklen_t len;
+ int error;
+
+ error = copyin(alen, &len, sizeof(len));
+ if (error)
+ return (error);
+
+ error = kern_getsockname(td, fdes, &sa, &len);
+ if (error)
+ return (error);
+
+ if (len != 0) {
+#ifdef COMPAT_OLDSOCK
+ if (compat)
+ ((struct osockaddr *)sa)->sa_family = sa->sa_family;
+#endif
+ error = copyout(sa, asa, (u_int)len);
+ }
+ free(sa, M_SONAME);
+ if (error == 0)
+ error = copyout(&len, alen, sizeof(len));
+ return (error);
+}
+
+int
+getsockname (int s, struct sockaddr *name, socklen_t *namelen)
+{
+ struct thread *td;
+ int error;
+
+ td = curthread;
+ error = getsockname1(td, s, name, namelen, 0);
+ if( error == 0 )
+ {
+ return error;
+ }
+ errno = error;
+ return -1;
+}
+
+/*
+ ************************************************************************
+ * RTEMS I/O HANDLER ROUTINES *
+ ************************************************************************
+ */
+static int
+rtems_bsdnet_close (rtems_libio_t *iop)
+{
+ struct socket *so;
+ int error;
+
+ if ((so = iop->data1) == NULL) {
+ errno = EBADF;
+ return -1;
+ }
+ error = soclose (so);
+ if (error) {
+ errno = error;
+ return -1;
+ }
+ return 0;
+}
+
+static ssize_t
+rtems_bsdnet_read (rtems_libio_t *iop, void *buffer, size_t count)
+{
+ return recv (iop->data0, buffer, count, 0);
+}
+
+static ssize_t
+rtems_bsdnet_write (rtems_libio_t *iop, const void *buffer, size_t count)
+{
+ return send (iop->data0, buffer, count, 0);
+}
+
+static int
+so_ioctl (rtems_libio_t *iop, struct socket *so, uint32_t command, void *buffer)
+{
+ switch (command) {
+ case FIONBIO:
+ SOCK_LOCK(so);
+ if (*(int *)buffer) {
+ iop->flags |= O_NONBLOCK;
+ so->so_state |= SS_NBIO;
+ }
+ else {
+ iop->flags &= ~O_NONBLOCK;
+ so->so_state &= ~SS_NBIO;
+ }
+ SOCK_UNLOCK(so);
+ return 0;
+
+ case FIONREAD:
+ *(int *)buffer = so->so_rcv.sb_cc;
+ return 0;
+ }
+
+ if (IOCGROUP(command) == 'i')
+ return ifioctl (so, command, buffer, NULL);
+ if (IOCGROUP(command) == 'r')
+ return rtioctl (command, buffer, NULL);
+ return (*so->so_proto->pr_usrreqs->pru_control)(so, command, buffer, 0, curthread);
+}
+
+static int
+rtems_bsdnet_ioctl (rtems_libio_t *iop, uint32_t command, void *buffer)
+{
+ struct socket *so;
+ int error;
+
+ if ((so = iop->data1) == NULL) {
+ errno = EBADF;
+ return -1;
+ }
+ error = so_ioctl (iop, so, command, buffer);
+ if (error) {
+ errno = error;
+ return -1;
+ }
+ return 0;
+}
+
+static int
+rtems_bsdnet_fcntl (int cmd, rtems_libio_t *iop)
+{
+ struct socket *so;
+
+ if (cmd == F_SETFL) {
+ if ((so = iop->data1) == NULL) {
+ return EBADF;
+ }
+ SOCK_LOCK(so);
+ if (iop->flags & LIBIO_FLAGS_NO_DELAY)
+ so->so_state |= SS_NBIO;
+ else
+ so->so_state &= ~SS_NBIO;
+ SOCK_UNLOCK(so);
+ }
+ return 0;
+}
+
+static int
+rtems_bsdnet_fstat (rtems_filesystem_location_info_t *loc, struct stat *sp)
+{
+ sp->st_mode = S_IFSOCK;
+ return 0;
+}
+
+static const rtems_filesystem_file_handlers_r socket_handlers = {
+ rtems_filesystem_default_open, /* open */
+ rtems_bsdnet_close, /* close */
+ rtems_bsdnet_read, /* read */
+ rtems_bsdnet_write, /* write */
+ rtems_bsdnet_ioctl, /* ioctl */
+ rtems_filesystem_default_lseek, /* lseek */
+ rtems_bsdnet_fstat, /* fstat */
+ rtems_filesystem_default_fchmod, /* fchmod */
+ rtems_filesystem_default_ftruncate, /* ftruncate */
+ rtems_filesystem_default_fpathconf, /* fpathconf */
+ rtems_filesystem_default_fsync, /* fsync */
+ rtems_filesystem_default_fdatasync, /* fdatasync */
+ rtems_bsdnet_fcntl, /* fcntl */
+ rtems_filesystem_default_rmnod /* rmnod */
+};
diff --git a/rtems/freebsd/rtems/rtems-bsd-sysctl.c b/rtems/freebsd/rtems/rtems-bsd-sysctl.c
new file mode 100644
index 00000000..dcf963f9
--- /dev/null
+++ b/rtems/freebsd/rtems/rtems-bsd-sysctl.c
@@ -0,0 +1,64 @@
+/**
+ * @file
+ *
+ * @ingroup rtems_bsd_rtems
+ *
+ * @brief TODO.
+ */
+
+/*
+ * Copyright (c) 2010 embedded brains GmbH. All rights reserved.
+ *
+ * embedded brains GmbH
+ * Obere Lagerstr. 30
+ * 82178 Puchheim
+ * Germany
+ * <rtems@embedded-brains.de>
+ *
+ * The license and distribution terms for this file may be
+ * found in the file LICENSE in this distribution or at
+ * http://www.rtems.com/license/LICENSE.
+ */
+
+#include <rtems/freebsd/machine/rtems-bsd-config.h>
+
+#include <rtems/freebsd/sys/types.h>
+#include <rtems/freebsd/sys/sysctl.h>
+
+int sysctl(
+ int *name,
+ u_int namelen,
+ void *oldp,
+ size_t *oldlenp,
+ void *newp,
+ size_t newlen
+)
+{
+ int eno = EINVAL;
+
+ if (namelen <= CTL_MAXNAME) {
+ int namedup [CTL_MAXNAME];
+
+ memcpy(namedup, name, namelen * sizeof(*name));
+
+ eno = kernel_sysctl(
+ NULL,
+ namedup,
+ namelen,
+ oldp,
+ oldlenp,
+ newp,
+ newlen,
+ oldlenp,
+ 0
+ );
+ }
+
+ if (eno == 0) {
+ return 0;
+ } else {
+ errno = eno;
+
+ return -1;
+ }
+}
diff --git a/rtems/freebsd/rtems/rtems-bsd-sysctlbyname.c b/rtems/freebsd/rtems/rtems-bsd-sysctlbyname.c
new file mode 100644
index 00000000..b2953cc2
--- /dev/null
+++ b/rtems/freebsd/rtems/rtems-bsd-sysctlbyname.c
@@ -0,0 +1,43 @@
+/**
+ * @file
+ *
+ * @ingroup rtems_bsd_rtems
+ *
+ * @brief TODO.
+ *
+ * File origin from FreeBSD 'lib/libc/gen/sysctlbyname.c'.
+ */
+
+/*
+ * ----------------------------------------------------------------------------
+ * "THE BEER-WARE LICENSE" (Revision 42):
+ * <phk@FreeBSD.org> wrote this file. As long as you retain this notice you
+ * can do whatever you want with this stuff. If we meet some day, and you think
+ * this stuff is worth it, you can buy me a beer in return. Poul-Henning Kamp
+ * ----------------------------------------------------------------------------
+ *
+ */
+
+#include <rtems/freebsd/machine/rtems-bsd-config.h>
+
+#include <rtems/freebsd/sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <rtems/freebsd/sys/types.h>
+#include <rtems/freebsd/sys/sysctl.h>
+
+int
+sysctlbyname(const char *name, void *oldp, size_t *oldlenp,
+ void *newp, size_t newlen)
+{
+ int real_oid[CTL_MAXNAME+2];
+ int error;
+ size_t oidlen;
+
+ oidlen = sizeof(real_oid) / sizeof(int);
+ error = sysctlnametomib(name, real_oid, &oidlen);
+ if (error < 0)
+ return (error);
+ error = sysctl(real_oid, oidlen, oldp, oldlenp, newp, newlen);
+ return (error);
+}
diff --git a/rtems/freebsd/rtems/rtems-bsd-sysctlnametomib.c b/rtems/freebsd/rtems/rtems-bsd-sysctlnametomib.c
new file mode 100644
index 00000000..0ce5f088
--- /dev/null
+++ b/rtems/freebsd/rtems/rtems-bsd-sysctlnametomib.c
@@ -0,0 +1,67 @@
+/**
+ * @file
+ *
+ * @ingroup rtems_bsd_rtems
+ *
+ * @brief TODO.
+ *
+ * File origin from FreeBSD 'lib/libc/gen/sysctlnametomib.c'.
+ */
+
+/*
+ * Copyright 2001 The FreeBSD Project. All Rights Reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE FREEBSD PROJECT ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL THE FREEBSD PROJECT BE LIABLE FOR
+ * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <rtems/freebsd/machine/rtems-bsd-config.h>
+
+#include <rtems/freebsd/sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <rtems/freebsd/sys/types.h>
+#include <rtems/freebsd/sys/sysctl.h>
+#include <string.h>
+
+/*
+ * This function uses a presently undocumented interface to the kernel
+ * to walk the tree and get the type so it can print the value.
+ * This interface is under work and consideration, and should probably
+ * be killed with a big axe by the first person who can find the time.
+ * (be aware though, that the proper interface isn't as obvious as it
+ * may seem, there are various conflicting requirements.
+ */
+int
+sysctlnametomib(const char *name, int *mibp, size_t *sizep)
+{
+ int oid[2];
+ int error;
+
+ oid[0] = 0;
+ oid[1] = 3;
+
+ *sizep *= sizeof(int);
+ error = sysctl(oid, 2, mibp, sizep, name, strlen(name));
+ *sizep /= sizeof(int);
+ return (error);
+}
diff --git a/rtems/freebsd/rtems/rtems-bsd-thread.c b/rtems/freebsd/rtems/rtems-bsd-thread.c
new file mode 100644
index 00000000..92bf79c0
--- /dev/null
+++ b/rtems/freebsd/rtems/rtems-bsd-thread.c
@@ -0,0 +1,208 @@
+/**
+ * @file
+ *
+ * @ingroup rtems_bsd_rtems
+ *
+ * @brief TODO.
+ */
+
+/*
+ * Copyright (c) 2009, 2010 embedded brains GmbH. All rights reserved.
+ *
+ * embedded brains GmbH
+ * Obere Lagerstr. 30
+ * 82178 Puchheim
+ * Germany
+ * <rtems@embedded-brains.de>
+ *
+ * The license and distribution terms for this file may be
+ * found in the file LICENSE in this distribution or at
+ * http://www.rtems.com/license/LICENSE.
+ */
+
+#include <rtems/freebsd/machine/rtems-bsd-config.h>
+
+#include <rtems/freebsd/sys/param.h>
+#include <rtems/freebsd/sys/types.h>
+#include <rtems/freebsd/sys/systm.h>
+#include <rtems/freebsd/sys/proc.h>
+#include <rtems/freebsd/sys/kthread.h>
+#include <rtems/freebsd/sys/malloc.h>
+
+RTEMS_CHAIN_DEFINE_EMPTY(rtems_bsd_thread_chain);
+
+static int
+rtems_bsd_thread_start(struct thread **td_ptr, void (*func)(void *), void *arg, int flags, int pages, const char *fmt, va_list ap)
+{
+ struct proc *p = &proc0;;
+ struct thread *td = malloc(sizeof(struct thread), M_TEMP, M_WAITOK | M_ZERO);
+
+ if (td != NULL) {
+ rtems_status_code sc = RTEMS_SUCCESSFUL;
+ rtems_id id = RTEMS_ID_NONE;
+ unsigned index = 0;
+ char name [5] = "_???";
+
+ BSD_ASSERT(pages >= 0);
+
+ sc = rtems_task_create(
+ rtems_build_name('_', 'T', 'S', 'K'),
+ BSD_TASK_PRIORITY_NORMAL,
+ BSD_MINIMUM_TASK_STACK_SIZE + (size_t) pages * PAGE_SIZE,
+ RTEMS_DEFAULT_ATTRIBUTES,
+ RTEMS_DEFAULT_ATTRIBUTES,
+ &id
+ );
+ if (sc != RTEMS_SUCCESSFUL) {
+ free(td, M_TEMP);
+
+ return ENOMEM;
+ }
+
+ sc = rtems_task_set_note( id, RTEMS_NOTEPAD_0, ( uint32_t )td );
+ if (sc != RTEMS_SUCCESSFUL) {
+ free(td, M_TEMP);
+
+ return ENOMEM;
+ }
+
+ index = rtems_object_id_get_index(id);
+ snprintf(name + 1, sizeof(name) - 1, "%03u", index);
+ sc = rtems_object_set_name(id, name);
+ if (sc != RTEMS_SUCCESSFUL) {
+ rtems_task_delete(id);
+ free(td, M_TEMP);
+
+ return ENOMEM;
+ }
+
+ sc = rtems_task_start(id, (rtems_task_entry) func, (rtems_task_argument) arg);
+ if (sc != RTEMS_SUCCESSFUL) {
+ rtems_task_delete(id);
+ free(td, M_TEMP);
+
+ return ENOMEM;
+ }
+
+ td->td_id = id;
+ vsnprintf(td->td_name, sizeof(td->td_name), fmt, ap);
+ bzero(&td->td_ru, sizeof(td->td_ru));
+ td->td_ucred = crhold(p->p_ucred);
+ td->td_proc = p;
+
+ rtems_chain_append(&rtems_bsd_thread_chain, &td->td_node);
+
+ if (td_ptr != NULL) {
+ *td_ptr = td;
+ }
+
+ return 0;
+ }
+
+ return ENOMEM;
+}
+
+static void rtems_bsd_thread_delete(void) __dead2;
+
+static void
+rtems_bsd_thread_delete(void)
+{
+ rtems_chain_control *chain = &rtems_bsd_thread_chain;
+ rtems_chain_node *node = rtems_chain_first(chain);
+ rtems_id id = rtems_task_self();
+ struct thread *td = NULL;
+
+ while (!rtems_chain_is_tail(chain, node)) {
+ struct thread *cur = (struct thread *) node;
+
+ if (cur->td_id == id) {
+ td = cur;
+ break;
+ }
+
+ node = rtems_chain_next(node);
+ }
+
+ if (td != NULL) {
+ rtems_chain_extract(&td->td_node);
+
+ free(td, M_TEMP);
+ } else {
+ BSD_PANIC("cannot find task entry");
+ }
+
+ rtems_task_delete(RTEMS_SELF);
+
+ while (true) {
+ /* Do nothing */
+ }
+}
+
+void
+kproc_start(const void *udata)
+{
+ const struct kproc_desc *pd = udata;
+ int eno = kproc_create((void (*)(void *))pd->func, NULL, pd->global_procpp, 0, 0, "%s", pd->arg0);
+
+ BSD_ASSERT(eno == 0);
+}
+
+int
+kproc_create(void (*func)(void *), void *arg, struct proc **newpp, int flags, int pages, const char *fmt, ...)
+{
+ int eno = 0;
+ va_list ap;
+
+ va_start(ap, fmt);
+ eno = rtems_bsd_thread_start(newpp, func, arg, flags, pages, fmt, ap);
+ va_end(ap);
+
+ return eno;
+}
+
+void
+kproc_exit(int ecode)
+{
+ rtems_bsd_thread_delete();
+}
+
+void
+kthread_start(const void *udata)
+{
+ const struct kthread_desc *td = udata;
+ int eno = kthread_add((void (*)(void *)) td->func, NULL, NULL, td->global_threadpp, 0, 0, "%s", td->arg0);
+
+ BSD_ASSERT(eno == 0);
+}
+
+int
+kthread_add(void (*func)(void *), void *arg, struct proc *p, struct thread **newtdp, int flags, int pages, const char *fmt, ...)
+{
+ int eno = 0;
+ va_list ap;
+
+ va_start(ap, fmt);
+ eno = rtems_bsd_thread_start(newtdp, func, arg, flags, pages, fmt, ap);
+ va_end(ap);
+
+ return eno;
+}
+
+void
+kthread_exit(void)
+{
+ rtems_bsd_thread_delete();
+}
+
+int
+kproc_kthread_add(void (*func)(void *), void *arg, struct proc **procptr, struct thread **tdptr, int flags, int pages, const char * procname, const char *fmt, ...)
+{
+ int eno = 0;
+ va_list ap;
+
+ va_start(ap, fmt);
+ eno = rtems_bsd_thread_start(tdptr, func, arg, flags, pages, fmt, ap);
+ va_end(ap);
+
+ return eno;
+}
diff --git a/rtems/freebsd/rtems/rtems-bsd-uma.c b/rtems/freebsd/rtems/rtems-bsd-uma.c
new file mode 100644
index 00000000..c289bf00
--- /dev/null
+++ b/rtems/freebsd/rtems/rtems-bsd-uma.c
@@ -0,0 +1,2796 @@
+/**
+ * @file
+ *
+ * @ingroup rtems_bsd_rtems
+ *
+ * @brief TODO.
+ */
+
+/*
+ * Copyright (c) 2009, 2010 embedded brains GmbH. All rights reserved.
+ *
+ * embedded brains GmbH
+ * Obere Lagerstr. 30
+ * 82178 Puchheim
+ * Germany
+ * <rtems@embedded-brains.de>
+ *
+ * The license and distribution terms for this file may be
+ * found in the file LICENSE in this distribution or at
+ * http://www.rtems.com/license/LICENSE.
+ */
+
+#include <rtems/freebsd/machine/rtems-bsd-config.h>
+
+#include <rtems/freebsd/sys/param.h>
+#include <rtems/freebsd/sys/types.h>
+#include <rtems/freebsd/sys/systm.h>
+#include <rtems/freebsd/sys/malloc.h>
+#include <rtems/freebsd/sys/kernel.h>
+#include <rtems/freebsd/sys/lock.h>
+#include <rtems/freebsd/sys/mutex.h>
+#include <rtems/freebsd/sys/ktr.h>
+#include <rtems/freebsd/vm/uma.h>
+#include <rtems/freebsd/vm/uma_int.h>
+#include <rtems/freebsd/vm/uma_dbg.h>
+
+/*
+ * This is the zone and keg from which all zones are spawned. The idea is that
+ * even the zone & keg heads are allocated from the allocator, so we use the
+ * bss section to bootstrap us.
+ */
+static struct uma_keg masterkeg;
+static struct uma_zone masterzone_k;
+static struct uma_zone masterzone_z;
+static uma_zone_t kegs = &masterzone_k;
+static uma_zone_t zones = &masterzone_z;
+
+/* This is the zone from which all of uma_slab_t's are allocated. */
+static uma_zone_t slabzone;
+static uma_zone_t slabrefzone; /* With refcounters (for UMA_ZONE_REFCNT) */
+
+static u_int mp_maxid = 0; /* simulate 1 CPU. This should really come from RTEMS SMP. AT this time, RTEMS SMP is not functional */
+#define CPU_ABSENT(x_cpu) 0 /* force all cpus to be present. This should really come from RTEMS SMP. */
+#define CPU_FOREACH(i) \
+ for ((i) = 0; (i) <= mp_maxid; (i)++) \
+ if (!CPU_ABSENT((i)))
+
+/*
+ * The initial hash tables come out of this zone so they can be allocated
+ * prior to malloc coming up.
+ */
+static uma_zone_t hashzone;
+
+/* The boot-time adjusted value for cache line alignment. */
+static int uma_align_cache = 64 - 1;
+
+static MALLOC_DEFINE(M_UMAHASH, "UMAHash", "UMA Hash Buckets");
+
+/*
+ * Are we allowed to allocate buckets?
+ */
+static int bucketdisable = 1;
+
+/* Linked list of all kegs in the system */
+static LIST_HEAD(,uma_keg) uma_kegs = LIST_HEAD_INITIALIZER(uma_kegs);
+
+/* This mutex protects the keg list */
+static struct mtx uma_mtx;
+
+/* Linked list of boot time pages */
+static LIST_HEAD(,uma_slab) uma_boot_pages =
+ LIST_HEAD_INITIALIZER(uma_boot_pages);
+
+/* This mutex protects the boot time pages list */
+static struct mtx uma_boot_pages_mtx;
+
+/* Is the VM done starting up? */
+static int booted = 0;
+
+/* Maximum number of allowed items-per-slab if the slab header is OFFPAGE */
+static u_int uma_max_ipers;
+static u_int uma_max_ipers_ref;
+
+/*
+ * This is the handle used to schedule events that need to happen
+ * outside of the allocation fast path.
+ */
+static struct callout uma_callout;
+#define UMA_TIMEOUT 20 /* Seconds for callout interval. */
+
+/*
+ * This structure is passed as the zone ctor arg so that I don't have to create
+ * a special allocation function just for zones.
+ */
+struct uma_zctor_args {
+ char *name;
+ size_t size;
+ uma_ctor ctor;
+ uma_dtor dtor;
+ uma_init uminit;
+ uma_fini fini;
+ uma_keg_t keg;
+ int align;
+ u_int32_t flags;
+};
+
+struct uma_kctor_args {
+ uma_zone_t zone;
+ size_t size;
+ uma_init uminit;
+ uma_fini fini;
+ int align;
+ u_int32_t flags;
+};
+
+struct uma_bucket_zone {
+ uma_zone_t ubz_zone;
+ char *ubz_name;
+ int ubz_entries;
+};
+
+#define BUCKET_MAX 128
+
+struct uma_bucket_zone bucket_zones[] = {
+ { NULL, "16 Bucket", 16 },
+ { NULL, "32 Bucket", 32 },
+ { NULL, "64 Bucket", 64 },
+ { NULL, "128 Bucket", 128 },
+ { NULL, NULL, 0}
+};
+
+#define BUCKET_SHIFT 4
+#define BUCKET_ZONES ((BUCKET_MAX >> BUCKET_SHIFT) + 1)
+
+/*
+ * bucket_size[] maps requested bucket sizes to zones that allocate a bucket
+ * of approximately the right size.
+ */
+static uint8_t bucket_size[BUCKET_ZONES];
+
+/*
+ * Flags and enumerations to be passed to internal functions.
+ */
+enum zfreeskip { SKIP_NONE, SKIP_DTOR, SKIP_FINI };
+
+#define ZFREE_STATFAIL 0x00000001 /* Update zone failure statistic. */
+#define ZFREE_STATFREE 0x00000002 /* Update zone free statistic. */
+
+/* Prototypes.. */
+
+static void *page_alloc(uma_zone_t, int, u_int8_t *, int);
+static void *startup_alloc(uma_zone_t, int, u_int8_t *, int);
+static void page_free(void *, int, u_int8_t);
+static uma_slab_t keg_alloc_slab(uma_keg_t, uma_zone_t, int);
+static void cache_drain(uma_zone_t);
+static void bucket_drain(uma_zone_t, uma_bucket_t);
+static void bucket_cache_drain(uma_zone_t zone);
+static int keg_ctor(void *, int, void *, int);
+static void keg_dtor(void *, int, void *);
+static int zone_ctor(void *, int, void *, int);
+static void zone_dtor(void *, int, void *);
+static int zero_init(void *, int, int);
+static void keg_small_init(uma_keg_t keg);
+static void keg_large_init(uma_keg_t keg);
+static void zone_foreach(void (*zfunc)(uma_zone_t));
+static void zone_timeout(uma_zone_t zone);
+static int hash_alloc(struct uma_hash *);
+static int hash_expand(struct uma_hash *, struct uma_hash *);
+static void hash_free(struct uma_hash *hash);
+static void *zone_alloc_item(uma_zone_t, void *, int);
+static void zone_free_item(uma_zone_t, void *, void *, enum zfreeskip,
+ int);
+static void bucket_init(void);
+static uma_bucket_t bucket_alloc(int, int);
+static void bucket_free(uma_bucket_t);
+static void bucket_zone_drain(void);
+static int zone_alloc_bucket(uma_zone_t zone, int flags);
+static uma_slab_t zone_fetch_slab(uma_zone_t zone, uma_keg_t last, int flags);
+static uma_slab_t zone_fetch_slab_multi(uma_zone_t zone, uma_keg_t last, int flags);
+static void *slab_alloc_item(uma_zone_t zone, uma_slab_t slab);
+static uma_keg_t uma_kcreate(uma_zone_t zone, size_t size, uma_init uminit,
+ uma_fini fini, int align, u_int32_t flags);
+static inline void zone_relock(uma_zone_t zone, uma_keg_t keg);
+static inline void keg_relock(uma_keg_t keg, uma_zone_t zone);
+
+void uma_print_zone(uma_zone_t);
+void uma_print_stats(void);
+
+/*
+ * Initialize bucket_zones, the array of zones of buckets of various sizes.
+ *
+ * For each zone, calculate the memory required for each bucket, consisting
+ * of the header and an array of pointers. Initialize bucket_size[] to point
+ * the range of appropriate bucket sizes at the zone.
+ */
+static void
+bucket_init(void)
+{
+ struct uma_bucket_zone *ubz;
+ int i;
+ int j;
+
+ for (i = 0, j = 0; bucket_zones[j].ubz_entries != 0; j++) {
+ int size;
+
+ ubz = &bucket_zones[j];
+ size = roundup(sizeof(struct uma_bucket), sizeof(void *));
+ size += sizeof(void *) * ubz->ubz_entries;
+ ubz->ubz_zone = uma_zcreate(ubz->ubz_name, size,
+ NULL, NULL, NULL, NULL, UMA_ALIGN_PTR,
+ UMA_ZFLAG_INTERNAL | UMA_ZFLAG_BUCKET);
+ for (; i <= ubz->ubz_entries; i += (1 << BUCKET_SHIFT))
+ bucket_size[i >> BUCKET_SHIFT] = j;
+ }
+}
+
+/*
+ * Given a desired number of entries for a bucket, return the zone from which
+ * to allocate the bucket.
+ */
+static struct uma_bucket_zone *
+bucket_zone_lookup(int entries)
+{
+ int idx;
+
+ idx = howmany(entries, 1 << BUCKET_SHIFT);
+ return (&bucket_zones[bucket_size[idx]]);
+}
+
+static uma_bucket_t
+bucket_alloc(int entries, int bflags)
+{
+ struct uma_bucket_zone *ubz;
+ uma_bucket_t bucket;
+
+ /*
+ * This is to stop us from allocating per cpu buckets while we're
+ * running out of vm.boot_pages. Otherwise, we would exhaust the
+ * boot pages. This also prevents us from allocating buckets in
+ * low memory situations.
+ */
+ if (bucketdisable)
+ return (NULL);
+
+ ubz = bucket_zone_lookup(entries);
+ bucket = zone_alloc_item(ubz->ubz_zone, NULL, bflags);
+ if (bucket) {
+#ifdef INVARIANTS
+ bzero(bucket->ub_bucket, sizeof(void *) * ubz->ubz_entries);
+#endif
+ bucket->ub_cnt = 0;
+ bucket->ub_entries = ubz->ubz_entries;
+ }
+
+ return (bucket);
+}
+
+static void
+bucket_free(uma_bucket_t bucket)
+{
+ struct uma_bucket_zone *ubz;
+
+ ubz = bucket_zone_lookup(bucket->ub_entries);
+ zone_free_item(ubz->ubz_zone, bucket, NULL, SKIP_NONE,
+ ZFREE_STATFREE);
+}
+
+static void
+bucket_zone_drain(void)
+{
+ struct uma_bucket_zone *ubz;
+
+ for (ubz = &bucket_zones[0]; ubz->ubz_entries != 0; ubz++)
+ zone_drain(ubz->ubz_zone);
+}
+
+static inline uma_keg_t
+zone_first_keg(uma_zone_t zone)
+{
+
+ return (LIST_FIRST(&zone->uz_kegs)->kl_keg);
+}
+
+static void
+zone_foreach_keg(uma_zone_t zone, void (*kegfn)(uma_keg_t))
+{
+ uma_klink_t klink;
+
+ LIST_FOREACH(klink, &zone->uz_kegs, kl_link)
+ kegfn(klink->kl_keg);
+}
+
+/*
+ * Routine to perform timeout driven calculations. This expands the
+ * hashes and does per cpu statistics aggregation.
+ *
+ * Returns nothing.
+ */
+static void
+keg_timeout(uma_keg_t keg)
+{
+
+ KEG_LOCK(keg);
+ /*
+ * Expand the keg hash table.
+ *
+ * This is done if the number of slabs is larger than the hash size.
+ * What I'm trying to do here is completely reduce collisions. This
+ * may be a little aggressive. Should I allow for two collisions max?
+ */
+ if (keg->uk_flags & UMA_ZONE_HASH &&
+ keg->uk_pages / keg->uk_ppera >= keg->uk_hash.uh_hashsize) {
+ struct uma_hash newhash;
+ struct uma_hash oldhash;
+ int ret;
+
+ /*
+ * This is so involved because allocating and freeing
+ * while the keg lock is held will lead to deadlock.
+ * I have to do everything in stages and check for
+ * races.
+ */
+ newhash = keg->uk_hash;
+ KEG_UNLOCK(keg);
+ ret = hash_alloc(&newhash);
+ KEG_LOCK(keg);
+ if (ret) {
+ if (hash_expand(&keg->uk_hash, &newhash)) {
+ oldhash = keg->uk_hash;
+ keg->uk_hash = newhash;
+ } else
+ oldhash = newhash;
+
+ KEG_UNLOCK(keg);
+ hash_free(&oldhash);
+ KEG_LOCK(keg);
+ }
+ }
+ KEG_UNLOCK(keg);
+}
+
+static void
+zone_timeout(uma_zone_t zone)
+{
+
+ zone_foreach_keg(zone, &keg_timeout);
+}
+
+/*
+ * Allocate and zero fill the next sized hash table from the appropriate
+ * backing store.
+ *
+ * Arguments:
+ * hash A new hash structure with the old hash size in uh_hashsize
+ *
+ * Returns:
+ * 1 on sucess and 0 on failure.
+ */
+static int
+hash_alloc(struct uma_hash *hash)
+{
+ int oldsize;
+ int alloc;
+
+ oldsize = hash->uh_hashsize;
+
+ /* We're just going to go to a power of two greater */
+ if (oldsize) {
+ hash->uh_hashsize = oldsize * 2;
+ alloc = sizeof(hash->uh_slab_hash[0]) * hash->uh_hashsize;
+ hash->uh_slab_hash = (struct slabhead *)malloc(alloc,
+ M_UMAHASH, M_NOWAIT);
+ } else {
+ alloc = sizeof(hash->uh_slab_hash[0]) * UMA_HASH_SIZE_INIT;
+ hash->uh_slab_hash = zone_alloc_item(hashzone, NULL,
+ M_WAITOK);
+ hash->uh_hashsize = UMA_HASH_SIZE_INIT;
+ }
+ if (hash->uh_slab_hash) {
+ bzero(hash->uh_slab_hash, alloc);
+ hash->uh_hashmask = hash->uh_hashsize - 1;
+ return (1);
+ }
+
+ return (0);
+}
+
+/*
+ * Expands the hash table for HASH zones. This is done from zone_timeout
+ * to reduce collisions. This must not be done in the regular allocation
+ * path, otherwise, we can recurse on the vm while allocating pages.
+ *
+ * Arguments:
+ * oldhash The hash you want to expand
+ * newhash The hash structure for the new table
+ *
+ * Returns:
+ * Nothing
+ *
+ * Discussion:
+ */
+static int
+hash_expand(struct uma_hash *oldhash, struct uma_hash *newhash)
+{
+ uma_slab_t slab;
+ int hval;
+ int i;
+
+ if (!newhash->uh_slab_hash)
+ return (0);
+
+ if (oldhash->uh_hashsize >= newhash->uh_hashsize)
+ return (0);
+
+ /*
+ * I need to investigate hash algorithms for resizing without a
+ * full rehash.
+ */
+
+ for (i = 0; i < oldhash->uh_hashsize; i++)
+ while (!SLIST_EMPTY(&oldhash->uh_slab_hash[i])) {
+ slab = SLIST_FIRST(&oldhash->uh_slab_hash[i]);
+ SLIST_REMOVE_HEAD(&oldhash->uh_slab_hash[i], us_hlink);
+ hval = UMA_HASH(newhash, slab->us_data);
+ SLIST_INSERT_HEAD(&newhash->uh_slab_hash[hval],
+ slab, us_hlink);
+ }
+
+ return (1);
+}
+
+/*
+ * Free the hash bucket to the appropriate backing store.
+ *
+ * Arguments:
+ * slab_hash The hash bucket we're freeing
+ * hashsize The number of entries in that hash bucket
+ *
+ * Returns:
+ * Nothing
+ */
+static void
+hash_free(struct uma_hash *hash)
+{
+ if (hash->uh_slab_hash == NULL)
+ return;
+ if (hash->uh_hashsize == UMA_HASH_SIZE_INIT)
+ zone_free_item(hashzone,
+ hash->uh_slab_hash, NULL, SKIP_NONE, ZFREE_STATFREE);
+ else
+ free(hash->uh_slab_hash, M_UMAHASH);
+}
+
+/*
+ * Frees all outstanding items in a bucket
+ *
+ * Arguments:
+ * zone The zone to free to, must be unlocked.
+ * bucket The free/alloc bucket with items, cpu queue must be locked.
+ *
+ * Returns:
+ * Nothing
+ */
+
+static void
+bucket_drain(uma_zone_t zone, uma_bucket_t bucket)
+{
+ void *item;
+
+ if (bucket == NULL)
+ return;
+
+ while (bucket->ub_cnt > 0) {
+ bucket->ub_cnt--;
+ item = bucket->ub_bucket[bucket->ub_cnt];
+#ifdef INVARIANTS
+ bucket->ub_bucket[bucket->ub_cnt] = NULL;
+ KASSERT(item != NULL,
+ ("bucket_drain: botched ptr, item is NULL"));
+#endif
+ zone_free_item(zone, item, NULL, SKIP_DTOR, 0);
+ }
+}
+
+/*
+ * Drains the per cpu caches for a zone.
+ *
+ * NOTE: This may only be called while the zone is being turn down, and not
+ * during normal operation. This is necessary in order that we do not have
+ * to migrate CPUs to drain the per-CPU caches.
+ *
+ * Arguments:
+ * zone The zone to drain, must be unlocked.
+ *
+ * Returns:
+ * Nothing
+ */
+static void
+cache_drain(uma_zone_t zone)
+{
+ uma_cache_t cache;
+ int cpu;
+
+ /*
+ * XXX: It is safe to not lock the per-CPU caches, because we're
+ * tearing down the zone anyway. I.e., there will be no further use
+ * of the caches at this point.
+ *
+ * XXX: It would good to be able to assert that the zone is being
+ * torn down to prevent improper use of cache_drain().
+ *
+ * XXX: We lock the zone before passing into bucket_cache_drain() as
+ * it is used elsewhere. Should the tear-down path be made special
+ * there in some form?
+ */
+ for (cpu = 0; cpu <= mp_maxid; cpu++) {
+ if (CPU_ABSENT(cpu))
+ continue;
+ cache = &zone->uz_cpu[cpu];
+ bucket_drain(zone, cache->uc_allocbucket);
+ bucket_drain(zone, cache->uc_freebucket);
+ if (cache->uc_allocbucket != NULL)
+ bucket_free(cache->uc_allocbucket);
+ if (cache->uc_freebucket != NULL)
+ bucket_free(cache->uc_freebucket);
+ cache->uc_allocbucket = cache->uc_freebucket = NULL;
+ }
+ ZONE_LOCK(zone);
+ bucket_cache_drain(zone);
+ ZONE_UNLOCK(zone);
+}
+
+/*
+ * Drain the cached buckets from a zone. Expects a locked zone on entry.
+ */
+static void
+bucket_cache_drain(uma_zone_t zone)
+{
+ uma_bucket_t bucket;
+
+ /*
+ * Drain the bucket queues and free the buckets, we just keep two per
+ * cpu (alloc/free).
+ */
+ while ((bucket = LIST_FIRST(&zone->uz_full_bucket)) != NULL) {
+ LIST_REMOVE(bucket, ub_link);
+ ZONE_UNLOCK(zone);
+ bucket_drain(zone, bucket);
+ bucket_free(bucket);
+ ZONE_LOCK(zone);
+ }
+
+ /* Now we do the free queue.. */
+ while ((bucket = LIST_FIRST(&zone->uz_free_bucket)) != NULL) {
+ LIST_REMOVE(bucket, ub_link);
+ bucket_free(bucket);
+ }
+}
+
+/*
+ * Frees pages from a keg back to the system. This is done on demand from
+ * the pageout daemon.
+ *
+ * Returns nothing.
+ */
+static void
+keg_drain(uma_keg_t keg)
+{
+ struct slabhead freeslabs = { 0 };
+ uma_slab_t slab;
+ uma_slab_t n;
+ u_int8_t flags;
+ u_int8_t *mem;
+ int i;
+
+ /*
+ * We don't want to take pages from statically allocated kegs at this
+ * time
+ */
+ if (keg->uk_flags & UMA_ZONE_NOFREE || keg->uk_freef == NULL)
+ return;
+
+#ifdef UMA_DEBUG
+ printf("%s free items: %u\n", keg->uk_name, keg->uk_free);
+#endif
+ KEG_LOCK(keg);
+ if (keg->uk_free == 0)
+ goto finished;
+
+ slab = LIST_FIRST(&keg->uk_free_slab);
+ while (slab) {
+ n = LIST_NEXT(slab, us_link);
+
+ /* We have no where to free these to */
+ if (slab->us_flags & UMA_SLAB_BOOT) {
+ slab = n;
+ continue;
+ }
+
+ LIST_REMOVE(slab, us_link);
+ keg->uk_pages -= keg->uk_ppera;
+ keg->uk_free -= keg->uk_ipers;
+
+ if (keg->uk_flags & UMA_ZONE_HASH)
+ UMA_HASH_REMOVE(&keg->uk_hash, slab, slab->us_data);
+
+ SLIST_INSERT_HEAD(&freeslabs, slab, us_hlink);
+
+ slab = n;
+ }
+finished:
+ KEG_UNLOCK(keg);
+
+ while ((slab = SLIST_FIRST(&freeslabs)) != NULL) {
+ SLIST_REMOVE(&freeslabs, slab, uma_slab, us_hlink);
+ if (keg->uk_fini)
+ for (i = 0; i < keg->uk_ipers; i++)
+ keg->uk_fini(
+ slab->us_data + (keg->uk_rsize * i),
+ keg->uk_size);
+ flags = slab->us_flags;
+ mem = slab->us_data;
+
+ if (keg->uk_flags & UMA_ZONE_OFFPAGE)
+ zone_free_item(keg->uk_slabzone, slab, NULL,
+ SKIP_NONE, ZFREE_STATFREE);
+#ifdef UMA_DEBUG
+ printf("%s: Returning %d bytes.\n",
+ keg->uk_name, UMA_SLAB_SIZE * keg->uk_ppera);
+#endif
+ keg->uk_freef(mem, UMA_SLAB_SIZE * keg->uk_ppera, flags);
+ }
+}
+
+static void
+zone_drain_wait(uma_zone_t zone, int waitok)
+{
+
+ /*
+ * Set draining to interlock with zone_dtor() so we can release our
+ * locks as we go. Only dtor() should do a WAITOK call since it
+ * is the only call that knows the structure will still be available
+ * when it wakes up.
+ */
+ ZONE_LOCK(zone);
+ while (zone->uz_flags & UMA_ZFLAG_DRAINING) {
+ if (waitok == M_NOWAIT)
+ goto out;
+ mtx_unlock(&uma_mtx);
+ msleep(zone, zone->uz_lock, PVM, "zonedrain", 1);
+ mtx_lock(&uma_mtx);
+ }
+ zone->uz_flags |= UMA_ZFLAG_DRAINING;
+ bucket_cache_drain(zone);
+ ZONE_UNLOCK(zone);
+ /*
+ * The DRAINING flag protects us from being freed while
+ * we're running. Normally the uma_mtx would protect us but we
+ * must be able to release and acquire the right lock for each keg.
+ */
+ zone_foreach_keg(zone, &keg_drain);
+ ZONE_LOCK(zone);
+ zone->uz_flags &= ~UMA_ZFLAG_DRAINING;
+ wakeup(zone);
+out:
+ ZONE_UNLOCK(zone);
+}
+
+void
+zone_drain(uma_zone_t zone)
+{
+
+ zone_drain_wait(zone, M_NOWAIT);
+}
+
+/*
+ * Allocate a new slab for a keg. This does not insert the slab onto a list.
+ *
+ * Arguments:
+ * wait Shall we wait?
+ *
+ * Returns:
+ * The slab that was allocated or NULL if there is no memory and the
+ * caller specified M_NOWAIT.
+ */
+static uma_slab_t
+keg_alloc_slab(uma_keg_t keg, uma_zone_t zone, int wait)
+{
+ uma_slabrefcnt_t slabref;
+ uma_alloc allocf;
+ uma_slab_t slab;
+ u_int8_t *mem;
+ u_int8_t flags;
+ int i;
+
+ mtx_assert(&keg->uk_lock, MA_OWNED);
+ slab = NULL;
+
+#ifdef UMA_DEBUG
+ printf("slab_zalloc: Allocating a new slab for %s\n", keg->uk_name);
+#endif
+ allocf = keg->uk_allocf;
+ KEG_UNLOCK(keg);
+
+ if (keg->uk_flags & UMA_ZONE_OFFPAGE) {
+ slab = zone_alloc_item(keg->uk_slabzone, NULL, wait);
+ if (slab == NULL) {
+ KEG_LOCK(keg);
+ return NULL;
+ }
+ }
+
+ /*
+ * This reproduces the old vm_zone behavior of zero filling pages the
+ * first time they are added to a zone.
+ *
+ * Malloced items are zeroed in uma_zalloc.
+ */
+
+ if ((keg->uk_flags & UMA_ZONE_MALLOC) == 0)
+ wait |= M_ZERO;
+ else
+ wait &= ~M_ZERO;
+
+ /* zone is passed for legacy reasons. */
+ mem = allocf(zone, keg->uk_ppera * UMA_SLAB_SIZE, &flags, wait);
+ if (mem == NULL) {
+ if (keg->uk_flags & UMA_ZONE_OFFPAGE)
+ zone_free_item(keg->uk_slabzone, slab, NULL,
+ SKIP_NONE, ZFREE_STATFREE);
+ KEG_LOCK(keg);
+ return (NULL);
+ }
+
+ /* Point the slab into the allocated memory */
+ if (!(keg->uk_flags & UMA_ZONE_OFFPAGE))
+ slab = (uma_slab_t )(mem + keg->uk_pgoff);
+
+ slab->us_keg = keg;
+ slab->us_data = mem;
+ slab->us_freecount = keg->uk_ipers;
+ slab->us_firstfree = 0;
+ slab->us_flags = flags;
+
+ if (keg->uk_flags & UMA_ZONE_REFCNT) {
+ slabref = (uma_slabrefcnt_t)slab;
+ for (i = 0; i < keg->uk_ipers; i++) {
+ slabref->us_freelist[i].us_refcnt = 0;
+ slabref->us_freelist[i].us_item = i+1;
+ }
+ } else {
+ for (i = 0; i < keg->uk_ipers; i++)
+ slab->us_freelist[i].us_item = i+1;
+ }
+
+ if (keg->uk_init != NULL) {
+ for (i = 0; i < keg->uk_ipers; i++)
+ if (keg->uk_init(slab->us_data + (keg->uk_rsize * i),
+ keg->uk_size, wait) != 0)
+ break;
+ if (i != keg->uk_ipers) {
+ if (keg->uk_fini != NULL) {
+ for (i--; i > -1; i--)
+ keg->uk_fini(slab->us_data +
+ (keg->uk_rsize * i),
+ keg->uk_size);
+ }
+ if (keg->uk_flags & UMA_ZONE_OFFPAGE)
+ zone_free_item(keg->uk_slabzone, slab,
+ NULL, SKIP_NONE, ZFREE_STATFREE);
+ keg->uk_freef(mem, UMA_SLAB_SIZE * keg->uk_ppera,
+ flags);
+ KEG_LOCK(keg);
+ return (NULL);
+ }
+ }
+ KEG_LOCK(keg);
+
+ if (keg->uk_flags & UMA_ZONE_HASH)
+ UMA_HASH_INSERT(&keg->uk_hash, slab, mem);
+
+ keg->uk_pages += keg->uk_ppera;
+ keg->uk_free += keg->uk_ipers;
+
+ return (slab);
+}
+
+/*
+ * This function is intended to be used early on in place of page_alloc() so
+ * that we may use the boot time page cache to satisfy allocations before
+ * the VM is ready.
+ */
+static void *
+startup_alloc(uma_zone_t zone, int bytes, u_int8_t *pflag, int wait)
+{
+ uma_keg_t keg;
+ uma_slab_t tmps;
+ int pages, check_pages;
+
+ keg = zone_first_keg(zone);
+ pages = howmany(bytes, PAGE_SIZE);
+ check_pages = pages - 1;
+ KASSERT(pages > 0, ("startup_alloc can't reserve 0 pages\n"));
+
+ /*
+ * Check our small startup cache to see if it has pages remaining.
+ */
+ mtx_lock(&uma_boot_pages_mtx);
+
+ /* First check if we have enough room. */
+ tmps = LIST_FIRST(&uma_boot_pages);
+ while (tmps != NULL && check_pages-- > 0)
+ tmps = LIST_NEXT(tmps, us_link);
+ if (tmps != NULL) {
+ /*
+ * It's ok to lose tmps references. The last one will
+ * have tmps->us_data pointing to the start address of
+ * "pages" contiguous pages of memory.
+ */
+ while (pages-- > 0) {
+ tmps = LIST_FIRST(&uma_boot_pages);
+ LIST_REMOVE(tmps, us_link);
+ }
+ mtx_unlock(&uma_boot_pages_mtx);
+ *pflag = tmps->us_flags;
+ return (tmps->us_data);
+ }
+ mtx_unlock(&uma_boot_pages_mtx);
+ if (booted == 0)
+ panic("UMA: Increase vm.boot_pages");
+ /*
+ * Now that we've booted reset these users to their real allocator.
+ */
+#ifdef UMA_MD_SMALL_ALLOC
+ keg->uk_allocf = (keg->uk_ppera > 1) ? page_alloc : uma_small_alloc;
+#else
+ keg->uk_allocf = page_alloc;
+#endif
+ return keg->uk_allocf(zone, bytes, pflag, wait);
+}
+
+/*
+ * Allocates a number of pages from the system
+ *
+ * Arguments:
+ * bytes The number of bytes requested
+ * wait Shall we wait?
+ *
+ * Returns:
+ * A pointer to the alloced memory or possibly
+ * NULL if M_NOWAIT is set.
+ */
+static void *
+page_alloc(uma_zone_t zone, int bytes, u_int8_t *pflag, int wait)
+{
+ void *p; /* Returned page */
+
+ *pflag = UMA_SLAB_KMEM;
+ p = (void *) malloc(bytes, M_TEMP, wait);
+
+ return (p);
+}
+
+/*
+ * Frees a number of pages to the system
+ *
+ * Arguments:
+ * mem A pointer to the memory to be freed
+ * size The size of the memory being freed
+ * flags The original p->us_flags field
+ *
+ * Returns:
+ * Nothing
+ */
+static void
+page_free(void *mem, int size, u_int8_t flags)
+{
+ free( mem, M_TEMP );
+}
+
+/*
+ * Zero fill initializer
+ *
+ * Arguments/Returns follow uma_init specifications
+ */
+static int
+zero_init(void *mem, int size, int flags)
+{
+ bzero(mem, size);
+ return (0);
+}
+
+/*
+ * Finish creating a small uma keg. This calculates ipers, and the keg size.
+ *
+ * Arguments
+ * keg The zone we should initialize
+ *
+ * Returns
+ * Nothing
+ */
+static void
+keg_small_init(uma_keg_t keg)
+{
+ u_int rsize;
+ u_int memused;
+ u_int wastedspace;
+ u_int shsize;
+
+ KASSERT(keg != NULL, ("Keg is null in keg_small_init"));
+ rsize = keg->uk_size;
+
+ if (rsize < UMA_SMALLEST_UNIT)
+ rsize = UMA_SMALLEST_UNIT;
+ if (rsize & keg->uk_align)
+ rsize = (rsize & ~keg->uk_align) + (keg->uk_align + 1);
+
+ keg->uk_rsize = rsize;
+ keg->uk_ppera = 1;
+
+ if (keg->uk_flags & UMA_ZONE_REFCNT) {
+ rsize += UMA_FRITMREF_SZ; /* linkage & refcnt */
+ shsize = sizeof(struct uma_slab_refcnt);
+ } else {
+ rsize += UMA_FRITM_SZ; /* Account for linkage */
+ shsize = sizeof(struct uma_slab);
+ }
+
+ keg->uk_ipers = (UMA_SLAB_SIZE - shsize) / rsize;
+ KASSERT(keg->uk_ipers != 0, ("keg_small_init: ipers is 0"));
+ memused = keg->uk_ipers * rsize + shsize;
+ wastedspace = UMA_SLAB_SIZE - memused;
+
+ /*
+ * We can't do OFFPAGE if we're internal or if we've been
+ * asked to not go to the VM for buckets. If we do this we
+ * may end up going to the VM (kmem_map) for slabs which we
+ * do not want to do if we're UMA_ZFLAG_CACHEONLY as a
+ * result of UMA_ZONE_VM, which clearly forbids it.
+ */
+ if ((keg->uk_flags & UMA_ZFLAG_INTERNAL) ||
+ (keg->uk_flags & UMA_ZFLAG_CACHEONLY))
+ return;
+
+ if ((wastedspace >= UMA_MAX_WASTE) &&
+ (keg->uk_ipers < (UMA_SLAB_SIZE / keg->uk_rsize))) {
+ keg->uk_ipers = UMA_SLAB_SIZE / keg->uk_rsize;
+ KASSERT(keg->uk_ipers <= 255,
+ ("keg_small_init: keg->uk_ipers too high!"));
+#ifdef UMA_DEBUG
+ printf("UMA decided we need offpage slab headers for "
+ "keg: %s, calculated wastedspace = %d, "
+ "maximum wasted space allowed = %d, "
+ "calculated ipers = %d, "
+ "new wasted space = %d\n", keg->uk_name, wastedspace,
+ UMA_MAX_WASTE, keg->uk_ipers,
+ UMA_SLAB_SIZE - keg->uk_ipers * keg->uk_rsize);
+#endif
+ keg->uk_flags |= UMA_ZONE_OFFPAGE;
+ if ((keg->uk_flags & UMA_ZONE_VTOSLAB) == 0)
+ keg->uk_flags |= UMA_ZONE_HASH;
+ }
+}
+
+/*
+ * Finish creating a large (> UMA_SLAB_SIZE) uma kegs. Just give in and do
+ * OFFPAGE for now. When I can allow for more dynamic slab sizes this will be
+ * more complicated.
+ *
+ * Arguments
+ * keg The keg we should initialize
+ *
+ * Returns
+ * Nothing
+ */
+static void
+keg_large_init(uma_keg_t keg)
+{
+ int pages;
+
+ KASSERT(keg != NULL, ("Keg is null in keg_large_init"));
+ KASSERT((keg->uk_flags & UMA_ZFLAG_CACHEONLY) == 0,
+ ("keg_large_init: Cannot large-init a UMA_ZFLAG_CACHEONLY keg"));
+
+ pages = keg->uk_size / UMA_SLAB_SIZE;
+
+ /* Account for remainder */
+ if ((pages * UMA_SLAB_SIZE) < keg->uk_size)
+ pages++;
+
+ keg->uk_ppera = pages;
+ keg->uk_ipers = 1;
+ keg->uk_rsize = keg->uk_size;
+
+ /* We can't do OFFPAGE if we're internal, bail out here. */
+ if (keg->uk_flags & UMA_ZFLAG_INTERNAL)
+ return;
+
+ keg->uk_flags |= UMA_ZONE_OFFPAGE;
+ if ((keg->uk_flags & UMA_ZONE_VTOSLAB) == 0)
+ keg->uk_flags |= UMA_ZONE_HASH;
+}
+
+static void
+keg_cachespread_init(uma_keg_t keg)
+{
+ int alignsize;
+ int trailer;
+ int pages;
+ int rsize;
+
+ alignsize = keg->uk_align + 1;
+ rsize = keg->uk_size;
+ /*
+ * We want one item to start on every align boundary in a page. To
+ * do this we will span pages. We will also extend the item by the
+ * size of align if it is an even multiple of align. Otherwise, it
+ * would fall on the same boundary every time.
+ */
+ if (rsize & keg->uk_align)
+ rsize = (rsize & ~keg->uk_align) + alignsize;
+ if ((rsize & alignsize) == 0)
+ rsize += alignsize;
+ trailer = rsize - keg->uk_size;
+ pages = (rsize * (PAGE_SIZE / alignsize)) / PAGE_SIZE;
+ pages = MIN(pages, (128 * 1024) / PAGE_SIZE);
+ keg->uk_rsize = rsize;
+ keg->uk_ppera = pages;
+ keg->uk_ipers = ((pages * PAGE_SIZE) + trailer) / rsize;
+ //keg->uk_flags |= UMA_ZONE_OFFPAGE | UMA_ZONE_VTOSLAB;
+ KASSERT(keg->uk_ipers <= uma_max_ipers,
+ ("keg_small_init: keg->uk_ipers too high(%d) increase max_ipers",
+ keg->uk_ipers));
+}
+
+/*
+ * Keg header ctor. This initializes all fields, locks, etc. And inserts
+ * the keg onto the global keg list.
+ *
+ * Arguments/Returns follow uma_ctor specifications
+ * udata Actually uma_kctor_args
+ */
+static int
+keg_ctor(void *mem, int size, void *udata, int flags)
+{
+ struct uma_kctor_args *arg = udata;
+ uma_keg_t keg = mem;
+ uma_zone_t zone;
+
+ bzero(keg, size);
+ keg->uk_size = arg->size;
+ keg->uk_init = arg->uminit;
+ keg->uk_fini = arg->fini;
+ keg->uk_align = arg->align;
+ keg->uk_free = 0;
+ keg->uk_pages = 0;
+ keg->uk_flags = arg->flags;
+ keg->uk_allocf = page_alloc;
+ keg->uk_freef = page_free;
+ keg->uk_recurse = 0;
+ keg->uk_slabzone = NULL;
+
+ /*
+ * The master zone is passed to us at keg-creation time.
+ */
+ zone = arg->zone;
+ keg->uk_name = zone->uz_name;
+
+ if (arg->flags & UMA_ZONE_VM)
+ keg->uk_flags |= UMA_ZFLAG_CACHEONLY;
+
+ if (arg->flags & UMA_ZONE_ZINIT)
+ keg->uk_init = zero_init;
+
+ /*if (arg->flags & UMA_ZONE_REFCNT || arg->flags & UMA_ZONE_MALLOC)
+ keg->uk_flags |= UMA_ZONE_VTOSLAB;*/
+
+ /*
+ * The +UMA_FRITM_SZ added to uk_size is to account for the
+ * linkage that is added to the size in keg_small_init(). If
+ * we don't account for this here then we may end up in
+ * keg_small_init() with a calculated 'ipers' of 0.
+ */
+ if (keg->uk_flags & UMA_ZONE_REFCNT) {
+ if (keg->uk_flags & UMA_ZONE_CACHESPREAD)
+ keg_cachespread_init(keg);
+ else if ((keg->uk_size+UMA_FRITMREF_SZ) >
+ (UMA_SLAB_SIZE - sizeof(struct uma_slab_refcnt)))
+ keg_large_init(keg);
+ else
+ keg_small_init(keg);
+ } else {
+ if (keg->uk_flags & UMA_ZONE_CACHESPREAD)
+ keg_cachespread_init(keg);
+ else if ((keg->uk_size+UMA_FRITM_SZ) >
+ (UMA_SLAB_SIZE - sizeof(struct uma_slab)))
+ keg_large_init(keg);
+ else
+ keg_small_init(keg);
+ }
+
+ if (keg->uk_flags & UMA_ZONE_OFFPAGE) {
+ if (keg->uk_flags & UMA_ZONE_REFCNT)
+ keg->uk_slabzone = slabrefzone;
+ else
+ keg->uk_slabzone = slabzone;
+ }
+
+ /*
+ * If we haven't booted yet we need allocations to go through the
+ * startup cache until the vm is ready.
+ */
+ if (keg->uk_ppera == 1) {
+#ifdef UMA_MD_SMALL_ALLOC
+ keg->uk_allocf = uma_small_alloc;
+ keg->uk_freef = uma_small_free;
+#endif
+ if (booted == 0)
+ keg->uk_allocf = startup_alloc;
+ } else if (booted == 0 && (keg->uk_flags & UMA_ZFLAG_INTERNAL))
+ keg->uk_allocf = startup_alloc;
+
+ /*
+ * Initialize keg's lock (shared among zones).
+ */
+ if (arg->flags & UMA_ZONE_MTXCLASS)
+ KEG_LOCK_INIT(keg, 1);
+ else
+ KEG_LOCK_INIT(keg, 0);
+
+ /*
+ * If we're putting the slab header in the actual page we need to
+ * figure out where in each page it goes. This calculates a right
+ * justified offset into the memory on an ALIGN_PTR boundary.
+ */
+ if (!(keg->uk_flags & UMA_ZONE_OFFPAGE)) {
+ u_int totsize;
+
+ /* Size of the slab struct and free list */
+ if (keg->uk_flags & UMA_ZONE_REFCNT)
+ totsize = sizeof(struct uma_slab_refcnt) +
+ keg->uk_ipers * UMA_FRITMREF_SZ;
+ else
+ totsize = sizeof(struct uma_slab) +
+ keg->uk_ipers * UMA_FRITM_SZ;
+
+ if (totsize & UMA_ALIGN_PTR)
+ totsize = (totsize & ~UMA_ALIGN_PTR) +
+ (UMA_ALIGN_PTR + 1);
+ keg->uk_pgoff = (UMA_SLAB_SIZE * keg->uk_ppera) - totsize;
+
+ if (keg->uk_flags & UMA_ZONE_REFCNT)
+ totsize = keg->uk_pgoff + sizeof(struct uma_slab_refcnt)
+ + keg->uk_ipers * UMA_FRITMREF_SZ;
+ else
+ totsize = keg->uk_pgoff + sizeof(struct uma_slab)
+ + keg->uk_ipers * UMA_FRITM_SZ;
+
+ /*
+ * The only way the following is possible is if with our
+ * UMA_ALIGN_PTR adjustments we are now bigger than
+ * UMA_SLAB_SIZE. I haven't checked whether this is
+ * mathematically possible for all cases, so we make
+ * sure here anyway.
+ */
+ if (totsize > UMA_SLAB_SIZE * keg->uk_ppera) {
+ printf("zone %s ipers %d rsize %d size %d\n",
+ zone->uz_name, keg->uk_ipers, keg->uk_rsize,
+ keg->uk_size);
+ panic("UMA slab won't fit.");
+ }
+ }
+
+ if (keg->uk_flags & UMA_ZONE_HASH)
+ hash_alloc(&keg->uk_hash);
+
+#ifdef UMA_DEBUG
+ printf("UMA: %s(%p) size %d(%d) flags %d ipers %d ppera %d out %d free %d\n",
+ zone->uz_name, zone, keg->uk_size, keg->uk_rsize, keg->uk_flags,
+ keg->uk_ipers, keg->uk_ppera,
+ (keg->uk_ipers * keg->uk_pages) - keg->uk_free, keg->uk_free);
+#endif
+
+ LIST_INSERT_HEAD(&keg->uk_zones, zone, uz_link);
+
+ mtx_lock(&uma_mtx);
+ LIST_INSERT_HEAD(&uma_kegs, keg, uk_link);
+ mtx_unlock(&uma_mtx);
+ return (0);
+}
+
+/*
+ * Zone header ctor. This initializes all fields, locks, etc.
+ *
+ * Arguments/Returns follow uma_ctor specifications
+ * udata Actually uma_zctor_args
+ */
+static int
+zone_ctor(void *mem, int size, void *udata, int flags)
+{
+ struct uma_zctor_args *arg = udata;
+ uma_zone_t zone = mem;
+ uma_zone_t z;
+ uma_keg_t keg;
+
+ bzero(zone, size);
+ zone->uz_name = arg->name;
+ zone->uz_ctor = arg->ctor;
+ zone->uz_dtor = arg->dtor;
+ zone->uz_slab = zone_fetch_slab;
+ zone->uz_init = NULL;
+ zone->uz_fini = NULL;
+ zone->uz_allocs = 0;
+ zone->uz_frees = 0;
+ zone->uz_fails = 0;
+ zone->uz_fills = zone->uz_count = 0;
+ zone->uz_flags = 0;
+ keg = arg->keg;
+
+ if (arg->flags & UMA_ZONE_SECONDARY) {
+ KASSERT(arg->keg != NULL, ("Secondary zone on zero'd keg"));
+ zone->uz_init = arg->uminit;
+ zone->uz_fini = arg->fini;
+ zone->uz_lock = &keg->uk_lock;
+ zone->uz_flags |= UMA_ZONE_SECONDARY;
+ mtx_lock(&uma_mtx);
+ ZONE_LOCK(zone);
+ LIST_FOREACH(z, &keg->uk_zones, uz_link) {
+ if (LIST_NEXT(z, uz_link) == NULL) {
+ LIST_INSERT_AFTER(z, zone, uz_link);
+ break;
+ }
+ }
+ ZONE_UNLOCK(zone);
+ mtx_unlock(&uma_mtx);
+ } else if (keg == NULL) {
+ if ((keg = uma_kcreate(zone, arg->size, arg->uminit, arg->fini,
+ arg->align, arg->flags)) == NULL)
+ return (ENOMEM);
+ } else {
+ struct uma_kctor_args karg;
+ int error;
+
+ /* We should only be here from uma_startup() */
+ karg.size = arg->size;
+ karg.uminit = arg->uminit;
+ karg.fini = arg->fini;
+ karg.align = arg->align;
+ karg.flags = arg->flags;
+ karg.zone = zone;
+ error = keg_ctor(arg->keg, sizeof(struct uma_keg), &karg,
+ flags);
+ if (error)
+ return (error);
+ }
+ /*
+ * Link in the first keg.
+ */
+ zone->uz_klink.kl_keg = keg;
+ LIST_INSERT_HEAD(&zone->uz_kegs, &zone->uz_klink, kl_link);
+ zone->uz_lock = &keg->uk_lock;
+ zone->uz_size = keg->uk_size;
+ zone->uz_flags |= (keg->uk_flags &
+ (UMA_ZONE_INHERIT | UMA_ZFLAG_INHERIT));
+
+ /*
+ * Some internal zones don't have room allocated for the per cpu
+ * caches. If we're internal, bail out here.
+ */
+ if (keg->uk_flags & UMA_ZFLAG_INTERNAL) {
+ KASSERT((zone->uz_flags & UMA_ZONE_SECONDARY) == 0,
+ ("Secondary zone requested UMA_ZFLAG_INTERNAL"));
+ return (0);
+ }
+
+ if (keg->uk_flags & UMA_ZONE_MAXBUCKET)
+ zone->uz_count = BUCKET_MAX;
+ else if (keg->uk_ipers <= BUCKET_MAX)
+ zone->uz_count = keg->uk_ipers;
+ else
+ zone->uz_count = BUCKET_MAX;
+ return (0);
+}
+
+/*
+ * Keg header dtor. This frees all data, destroys locks, frees the hash
+ * table and removes the keg from the global list.
+ *
+ * Arguments/Returns follow uma_dtor specifications
+ * udata unused
+ */
+static void
+keg_dtor(void *arg, int size, void *udata)
+{
+ uma_keg_t keg;
+
+ keg = (uma_keg_t)arg;
+ KEG_LOCK(keg);
+ if (keg->uk_free != 0) {
+ printf("Freed UMA keg was not empty (%d items). "
+ " Lost %d pages of memory.\n",
+ keg->uk_free, keg->uk_pages);
+ }
+ KEG_UNLOCK(keg);
+
+ hash_free(&keg->uk_hash);
+
+ KEG_LOCK_FINI(keg);
+}
+
+/*
+ * Zone header dtor.
+ *
+ * Arguments/Returns follow uma_dtor specifications
+ * udata unused
+ */
+static void
+zone_dtor(void *arg, int size, void *udata)
+{
+ uma_klink_t klink;
+ uma_zone_t zone;
+ uma_keg_t keg;
+
+ zone = (uma_zone_t)arg;
+ keg = zone_first_keg(zone);
+
+ if (!(zone->uz_flags & UMA_ZFLAG_INTERNAL))
+ cache_drain(zone);
+
+ mtx_lock(&uma_mtx);
+ LIST_REMOVE(zone, uz_link);
+ mtx_unlock(&uma_mtx);
+ /*
+ * XXX there are some races here where
+ * the zone can be drained but zone lock
+ * released and then refilled before we
+ * remove it... we dont care for now
+ */
+ zone_drain_wait(zone, M_WAITOK);
+ /*
+ * Unlink all of our kegs.
+ */
+ while ((klink = LIST_FIRST(&zone->uz_kegs)) != NULL) {
+ klink->kl_keg = NULL;
+ LIST_REMOVE(klink, kl_link);
+ if (klink == &zone->uz_klink)
+ continue;
+ free(klink, M_TEMP);
+ }
+ /*
+ * We only destroy kegs from non secondary zones.
+ */
+ if ((zone->uz_flags & UMA_ZONE_SECONDARY) == 0) {
+ mtx_lock(&uma_mtx);
+ LIST_REMOVE(keg, uk_link);
+ mtx_unlock(&uma_mtx);
+ zone_free_item(kegs, keg, NULL, SKIP_NONE,
+ ZFREE_STATFREE);
+ }
+}
+
+/*
+ * Traverses every zone in the system and calls a callback
+ *
+ * Arguments:
+ * zfunc A pointer to a function which accepts a zone
+ * as an argument.
+ *
+ * Returns:
+ * Nothing
+ */
+static void
+zone_foreach(void (*zfunc)(uma_zone_t))
+{
+ uma_keg_t keg;
+ uma_zone_t zone;
+
+ mtx_lock(&uma_mtx);
+ LIST_FOREACH(keg, &uma_kegs, uk_link) {
+ LIST_FOREACH(zone, &keg->uk_zones, uz_link)
+ zfunc(zone);
+ }
+ mtx_unlock(&uma_mtx);
+}
+
+/* Public functions */
+/* See uma.h */
+void
+uma_startup(void *bootmem, int boot_pages)
+{
+ struct uma_zctor_args args;
+ uma_slab_t slab;
+ u_int slabsize;
+ u_int objsize, totsize, wsize;
+ int i;
+
+#ifdef UMA_DEBUG
+ printf("Creating uma keg headers zone and keg.\n");
+#endif
+ mtx_init(&uma_mtx, "UMA lock", NULL, MTX_DEF);
+
+ /*
+ * Figure out the maximum number of items-per-slab we'll have if
+ * we're using the OFFPAGE slab header to track free items, given
+ * all possible object sizes and the maximum desired wastage
+ * (UMA_MAX_WASTE).
+ *
+ * We iterate until we find an object size for
+ * which the calculated wastage in keg_small_init() will be
+ * enough to warrant OFFPAGE. Since wastedspace versus objsize
+ * is an overall increasing see-saw function, we find the smallest
+ * objsize such that the wastage is always acceptable for objects
+ * with that objsize or smaller. Since a smaller objsize always
+ * generates a larger possible uma_max_ipers, we use this computed
+ * objsize to calculate the largest ipers possible. Since the
+ * ipers calculated for OFFPAGE slab headers is always larger than
+ * the ipers initially calculated in keg_small_init(), we use
+ * the former's equation (UMA_SLAB_SIZE / keg->uk_rsize) to
+ * obtain the maximum ipers possible for offpage slab headers.
+ *
+ * It should be noted that ipers versus objsize is an inversly
+ * proportional function which drops off rather quickly so as
+ * long as our UMA_MAX_WASTE is such that the objsize we calculate
+ * falls into the portion of the inverse relation AFTER the steep
+ * falloff, then uma_max_ipers shouldn't be too high (~10 on i386).
+ *
+ * Note that we have 8-bits (1 byte) to use as a freelist index
+ * inside the actual slab header itself and this is enough to
+ * accomodate us. In the worst case, a UMA_SMALLEST_UNIT sized
+ * object with offpage slab header would have ipers =
+ * UMA_SLAB_SIZE / UMA_SMALLEST_UNIT (currently = 256), which is
+ * 1 greater than what our byte-integer freelist index can
+ * accomodate, but we know that this situation never occurs as
+ * for UMA_SMALLEST_UNIT-sized objects, we will never calculate
+ * that we need to go to offpage slab headers. Or, if we do,
+ * then we trap that condition below and panic in the INVARIANTS case.
+ */
+ wsize = UMA_SLAB_SIZE - sizeof(struct uma_slab) - UMA_MAX_WASTE;
+ totsize = wsize;
+ objsize = UMA_SMALLEST_UNIT;
+ while (totsize >= wsize) {
+ totsize = (UMA_SLAB_SIZE - sizeof(struct uma_slab)) /
+ (objsize + UMA_FRITM_SZ);
+ totsize *= (UMA_FRITM_SZ + objsize);
+ objsize++;
+ }
+ if (objsize > UMA_SMALLEST_UNIT)
+ objsize--;
+ uma_max_ipers = MAX(UMA_SLAB_SIZE / objsize, 64);
+
+ wsize = UMA_SLAB_SIZE - sizeof(struct uma_slab_refcnt) - UMA_MAX_WASTE;
+ totsize = wsize;
+ objsize = UMA_SMALLEST_UNIT;
+ while (totsize >= wsize) {
+ totsize = (UMA_SLAB_SIZE - sizeof(struct uma_slab_refcnt)) /
+ (objsize + UMA_FRITMREF_SZ);
+ totsize *= (UMA_FRITMREF_SZ + objsize);
+ objsize++;
+ }
+ if (objsize > UMA_SMALLEST_UNIT)
+ objsize--;
+ uma_max_ipers_ref = MAX(UMA_SLAB_SIZE / objsize, 64);
+
+ KASSERT((uma_max_ipers_ref <= 255) && (uma_max_ipers <= 255),
+ ("uma_startup: calculated uma_max_ipers values too large!"));
+
+#ifdef UMA_DEBUG
+ printf("Calculated uma_max_ipers (for OFFPAGE) is %d\n", uma_max_ipers);
+ printf("Calculated uma_max_ipers_slab (for OFFPAGE) is %d\n",
+ uma_max_ipers_ref);
+#endif
+
+ /* "manually" create the initial zone */
+ args.name = "UMA Kegs";
+ args.size = sizeof(struct uma_keg);
+ args.ctor = keg_ctor;
+ args.dtor = keg_dtor;
+ args.uminit = zero_init;
+ args.fini = NULL;
+ args.keg = &masterkeg;
+ args.align = 32 - 1;
+ args.flags = UMA_ZFLAG_INTERNAL;
+ /* The initial zone has no Per cpu queues so it's smaller */
+ zone_ctor(kegs, sizeof(struct uma_zone), &args, M_WAITOK);
+
+#ifdef UMA_DEBUG
+ printf("Filling boot free list.\n");
+#endif
+ for (i = 0; i < boot_pages; i++) {
+ slab = (uma_slab_t)((u_int8_t *)bootmem + (i * UMA_SLAB_SIZE));
+ slab->us_data = (u_int8_t *)slab;
+ slab->us_flags = UMA_SLAB_BOOT;
+ LIST_INSERT_HEAD(&uma_boot_pages, slab, us_link);
+ }
+ mtx_init(&uma_boot_pages_mtx, "UMA boot pages", NULL, MTX_DEF);
+
+#ifdef UMA_DEBUG
+ printf("Creating uma zone headers zone and keg.\n");
+#endif
+ args.name = "UMA Zones";
+ args.size = sizeof(struct uma_zone) +
+ (sizeof(struct uma_cache) * (mp_maxid + 1));
+ args.ctor = zone_ctor;
+ args.dtor = zone_dtor;
+ args.uminit = zero_init;
+ args.fini = NULL;
+ args.keg = NULL;
+ args.align = 32 - 1;
+ args.flags = UMA_ZFLAG_INTERNAL;
+ /* The initial zone has no Per cpu queues so it's smaller */
+ zone_ctor(zones, sizeof(struct uma_zone), &args, M_WAITOK);
+
+#ifdef UMA_DEBUG
+ printf("Initializing pcpu cache locks.\n");
+#endif
+#ifdef UMA_DEBUG
+ printf("Creating slab and hash zones.\n");
+#endif
+
+ /*
+ * This is the max number of free list items we'll have with
+ * offpage slabs.
+ */
+ slabsize = uma_max_ipers * UMA_FRITM_SZ;
+ slabsize += sizeof(struct uma_slab);
+
+ /* Now make a zone for slab headers */
+ slabzone = uma_zcreate("UMA Slabs",
+ slabsize,
+ NULL, NULL, NULL, NULL,
+ UMA_ALIGN_PTR, UMA_ZFLAG_INTERNAL);
+
+ /*
+ * We also create a zone for the bigger slabs with reference
+ * counts in them, to accomodate UMA_ZONE_REFCNT zones.
+ */
+ slabsize = uma_max_ipers_ref * UMA_FRITMREF_SZ;
+ slabsize += sizeof(struct uma_slab_refcnt);
+ slabrefzone = uma_zcreate("UMA RCntSlabs",
+ slabsize,
+ NULL, NULL, NULL, NULL,
+ UMA_ALIGN_PTR,
+ UMA_ZFLAG_INTERNAL);
+
+ hashzone = uma_zcreate("UMA Hash",
+ sizeof(struct slabhead *) * UMA_HASH_SIZE_INIT,
+ NULL, NULL, NULL, NULL,
+ UMA_ALIGN_PTR, UMA_ZFLAG_INTERNAL);
+
+ bucket_init();
+
+#if defined(UMA_MD_SMALL_ALLOC) && !defined(UMA_MD_SMALL_ALLOC_NEEDS_VM)
+ booted = 1;
+#endif
+
+#ifdef UMA_DEBUG
+ printf("UMA startup complete.\n");
+#endif
+}
+
+static uma_keg_t
+uma_kcreate(uma_zone_t zone, size_t size, uma_init uminit, uma_fini fini,
+ int align, u_int32_t flags)
+{
+ struct uma_kctor_args args;
+
+ args.size = size;
+ args.uminit = uminit;
+ args.fini = fini;
+ args.align = (align == UMA_ALIGN_CACHE) ? uma_align_cache : align;
+ args.flags = flags;
+ args.zone = zone;
+ return (zone_alloc_item(kegs, &args, M_WAITOK));
+}
+
+/* See uma.h */
+void
+uma_set_align(int align)
+{
+
+ if (align != UMA_ALIGN_CACHE)
+ uma_align_cache = align;
+}
+
+/* See uma.h */
+uma_zone_t
+uma_zcreate(char *name, size_t size, uma_ctor ctor, uma_dtor dtor,
+ uma_init uminit, uma_fini fini, int align, u_int32_t flags)
+
+{
+ struct uma_zctor_args args;
+
+ /* This stuff is essential for the zone ctor */
+ args.name = name;
+ args.size = size;
+ args.ctor = ctor;
+ args.dtor = dtor;
+ args.uminit = uminit;
+ args.fini = fini;
+ args.align = align;
+ args.flags = flags;
+ args.keg = NULL;
+
+ return (zone_alloc_item(zones, &args, M_WAITOK));
+}
+
+/* See uma.h */
+uma_zone_t
+uma_zsecond_create(char *name, uma_ctor ctor, uma_dtor dtor,
+ uma_init zinit, uma_fini zfini, uma_zone_t master)
+{
+ struct uma_zctor_args args;
+ uma_keg_t keg;
+
+ keg = zone_first_keg(master);
+ args.name = name;
+ args.size = keg->uk_size;
+ args.ctor = ctor;
+ args.dtor = dtor;
+ args.uminit = zinit;
+ args.fini = zfini;
+ args.align = keg->uk_align;
+ args.flags = keg->uk_flags | UMA_ZONE_SECONDARY;
+ args.keg = keg;
+
+ /* XXX Attaches only one keg of potentially many. */
+ return (zone_alloc_item(zones, &args, M_WAITOK));
+}
+
+static void
+zone_lock_pair(uma_zone_t a, uma_zone_t b)
+{
+ if (a < b) {
+ ZONE_LOCK(a);
+ mtx_lock_flags(b->uz_lock, MTX_DUPOK);
+ } else {
+ ZONE_LOCK(b);
+ mtx_lock_flags(a->uz_lock, MTX_DUPOK);
+ }
+}
+
+static void
+zone_unlock_pair(uma_zone_t a, uma_zone_t b)
+{
+
+ ZONE_UNLOCK(a);
+ ZONE_UNLOCK(b);
+}
+
+
+/* See uma.h */
+void
+uma_zdestroy(uma_zone_t zone)
+{
+
+ zone_free_item(zones, zone, NULL, SKIP_NONE, ZFREE_STATFREE);
+}
+
+/* See uma.h */
+void *
+uma_zalloc_arg(uma_zone_t zone, void *udata, int flags)
+{
+ void *item;
+ uma_cache_t cache;
+ uma_bucket_t bucket;
+ int cpu;
+
+ /* This is the fast path allocation */
+#ifdef UMA_DEBUG_ALLOC_1
+ printf("Allocating one item from %s(%p)\n", zone->uz_name, zone);
+#endif
+ CTR3(KTR_UMA, "uma_zalloc_arg thread %x zone %s flags %d", curthread,
+ zone->uz_name, flags);
+
+ if (flags & M_WAITOK) {
+ WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL,
+ "uma_zalloc_arg: zone \"%s\"", zone->uz_name);
+ }
+
+ /*
+ * If possible, allocate from the per-CPU cache. There are two
+ * requirements for safe access to the per-CPU cache: (1) the thread
+ * accessing the cache must not be preempted or yield during access,
+ * and (2) the thread must not migrate CPUs without switching which
+ * cache it accesses. We rely on a critical section to prevent
+ * preemption and migration. We release the critical section in
+ * order to acquire the zone mutex if we are unable to allocate from
+ * the current cache; when we re-acquire the critical section, we
+ * must detect and handle migration if it has occurred.
+ */
+zalloc_restart:
+ critical_enter();
+ cpu = curcpu;
+ cache = &zone->uz_cpu[cpu];
+
+zalloc_start:
+ bucket = cache->uc_allocbucket;
+
+ if (bucket) {
+ if (bucket->ub_cnt > 0) {
+ bucket->ub_cnt--;
+ item = bucket->ub_bucket[bucket->ub_cnt];
+#ifdef INVARIANTS
+ bucket->ub_bucket[bucket->ub_cnt] = NULL;
+#endif
+ KASSERT(item != NULL,
+ ("uma_zalloc: Bucket pointer mangled."));
+ cache->uc_allocs++;
+ critical_exit();
+#ifdef INVARIANTS
+ ZONE_LOCK(zone);
+ uma_dbg_alloc(zone, NULL, item);
+ ZONE_UNLOCK(zone);
+#endif
+ if (zone->uz_ctor != NULL) {
+ if (zone->uz_ctor(item, zone->uz_size,
+ udata, flags) != 0) {
+ zone_free_item(zone, item, udata,
+ SKIP_DTOR, ZFREE_STATFAIL |
+ ZFREE_STATFREE);
+ return (NULL);
+ }
+ }
+ if (flags & M_ZERO)
+ bzero(item, zone->uz_size);
+ return (item);
+ } else if (cache->uc_freebucket) {
+ /*
+ * We have run out of items in our allocbucket.
+ * See if we can switch with our free bucket.
+ */
+ if (cache->uc_freebucket->ub_cnt > 0) {
+#ifdef UMA_DEBUG_ALLOC
+ printf("uma_zalloc: Swapping empty with"
+ " alloc.\n");
+#endif
+ bucket = cache->uc_freebucket;
+ cache->uc_freebucket = cache->uc_allocbucket;
+ cache->uc_allocbucket = bucket;
+
+ goto zalloc_start;
+ }
+ }
+ }
+ /*
+ * Attempt to retrieve the item from the per-CPU cache has failed, so
+ * we must go back to the zone. This requires the zone lock, so we
+ * must drop the critical section, then re-acquire it when we go back
+ * to the cache. Since the critical section is released, we may be
+ * preempted or migrate. As such, make sure not to maintain any
+ * thread-local state specific to the cache from prior to releasing
+ * the critical section.
+ */
+ critical_exit();
+ ZONE_LOCK(zone);
+ critical_enter();
+ cpu = curcpu;
+ cache = &zone->uz_cpu[cpu];
+ bucket = cache->uc_allocbucket;
+ if (bucket != NULL) {
+ if (bucket->ub_cnt > 0) {
+ ZONE_UNLOCK(zone);
+ goto zalloc_start;
+ }
+ bucket = cache->uc_freebucket;
+ if (bucket != NULL && bucket->ub_cnt > 0) {
+ ZONE_UNLOCK(zone);
+ goto zalloc_start;
+ }
+ }
+
+ /* Since we have locked the zone we may as well send back our stats */
+ zone->uz_allocs += cache->uc_allocs;
+ cache->uc_allocs = 0;
+ zone->uz_frees += cache->uc_frees;
+ cache->uc_frees = 0;
+
+ /* Our old one is now a free bucket */
+ if (cache->uc_allocbucket) {
+ KASSERT(cache->uc_allocbucket->ub_cnt == 0,
+ ("uma_zalloc_arg: Freeing a non free bucket."));
+ LIST_INSERT_HEAD(&zone->uz_free_bucket,
+ cache->uc_allocbucket, ub_link);
+ cache->uc_allocbucket = NULL;
+ }
+
+ /* Check the free list for a new alloc bucket */
+ if ((bucket = LIST_FIRST(&zone->uz_full_bucket)) != NULL) {
+ KASSERT(bucket->ub_cnt != 0,
+ ("uma_zalloc_arg: Returning an empty bucket."));
+
+ LIST_REMOVE(bucket, ub_link);
+ cache->uc_allocbucket = bucket;
+ ZONE_UNLOCK(zone);
+ goto zalloc_start;
+ }
+ /* We are no longer associated with this CPU. */
+ critical_exit();
+
+ /* Bump up our uz_count so we get here less */
+ if (zone->uz_count < BUCKET_MAX)
+ zone->uz_count++;
+
+ /*
+ * Now lets just fill a bucket and put it on the free list. If that
+ * works we'll restart the allocation from the begining.
+ */
+ if (zone_alloc_bucket(zone, flags)) {
+ ZONE_UNLOCK(zone);
+ goto zalloc_restart;
+ }
+ ZONE_UNLOCK(zone);
+ /*
+ * We may not be able to get a bucket so return an actual item.
+ */
+#ifdef UMA_DEBUG
+ printf("uma_zalloc_arg: Bucketzone returned NULL\n");
+#endif
+
+ item = zone_alloc_item(zone, udata, flags);
+ return (item);
+}
+
+static uma_slab_t
+keg_fetch_slab(uma_keg_t keg, uma_zone_t zone, int flags)
+{
+ uma_slab_t slab;
+
+ mtx_assert(&keg->uk_lock, MA_OWNED);
+ slab = NULL;
+
+ for (;;) {
+ /*
+ * Find a slab with some space. Prefer slabs that are partially
+ * used over those that are totally full. This helps to reduce
+ * fragmentation.
+ */
+ if (keg->uk_free != 0) {
+ if (!LIST_EMPTY(&keg->uk_part_slab)) {
+ slab = LIST_FIRST(&keg->uk_part_slab);
+ } else {
+ slab = LIST_FIRST(&keg->uk_free_slab);
+ LIST_REMOVE(slab, us_link);
+ LIST_INSERT_HEAD(&keg->uk_part_slab, slab,
+ us_link);
+ }
+ MPASS(slab->us_keg == keg);
+ return (slab);
+ }
+
+ /*
+ * M_NOVM means don't ask at all!
+ */
+ if (flags & M_NOVM)
+ break;
+
+ if (keg->uk_maxpages && keg->uk_pages >= keg->uk_maxpages) {
+ keg->uk_flags |= UMA_ZFLAG_FULL;
+ /*
+ * If this is not a multi-zone, set the FULL bit.
+ * Otherwise slab_multi() takes care of it.
+ */
+ if ((zone->uz_flags & UMA_ZFLAG_MULTI) == 0)
+ zone->uz_flags |= UMA_ZFLAG_FULL;
+ if (flags & M_NOWAIT)
+ break;
+ msleep(keg, &keg->uk_lock, PVM, "keglimit", 0);
+ continue;
+ }
+ keg->uk_recurse++;
+ slab = keg_alloc_slab(keg, zone, flags);
+ keg->uk_recurse--;
+ /*
+ * If we got a slab here it's safe to mark it partially used
+ * and return. We assume that the caller is going to remove
+ * at least one item.
+ */
+ if (slab) {
+ MPASS(slab->us_keg == keg);
+ LIST_INSERT_HEAD(&keg->uk_part_slab, slab, us_link);
+ return (slab);
+ }
+ /*
+ * We might not have been able to get a slab but another cpu
+ * could have while we were unlocked. Check again before we
+ * fail.
+ */
+ flags |= M_NOVM;
+ }
+ return (slab);
+}
+
+static inline void
+zone_relock(uma_zone_t zone, uma_keg_t keg)
+{
+ if (zone->uz_lock != &keg->uk_lock) {
+ KEG_UNLOCK(keg);
+ ZONE_LOCK(zone);
+ }
+}
+
+static inline void
+keg_relock(uma_keg_t keg, uma_zone_t zone)
+{
+ if (zone->uz_lock != &keg->uk_lock) {
+ ZONE_UNLOCK(zone);
+ KEG_LOCK(keg);
+ }
+}
+
+static uma_slab_t
+zone_fetch_slab(uma_zone_t zone, uma_keg_t keg, int flags)
+{
+ uma_slab_t slab;
+
+ if (keg == NULL)
+ keg = zone_first_keg(zone);
+ /*
+ * This is to prevent us from recursively trying to allocate
+ * buckets. The problem is that if an allocation forces us to
+ * grab a new bucket we will call page_alloc, which will go off
+ * and cause the vm to allocate vm_map_entries. If we need new
+ * buckets there too we will recurse in kmem_alloc and bad
+ * things happen. So instead we return a NULL bucket, and make
+ * the code that allocates buckets smart enough to deal with it
+ */
+ if (keg->uk_flags & UMA_ZFLAG_BUCKET && keg->uk_recurse != 0)
+ return (NULL);
+
+ for (;;) {
+ slab = keg_fetch_slab(keg, zone, flags);
+ if (slab)
+ return (slab);
+ if (flags & (M_NOWAIT | M_NOVM))
+ break;
+ }
+ return (NULL);
+}
+
+/*
+ * uma_zone_fetch_slab_multi: Fetches a slab from one available keg. Returns
+ * with the keg locked. Caller must call zone_relock() afterwards if the
+ * zone lock is required. On NULL the zone lock is held.
+ *
+ * The last pointer is used to seed the search. It is not required.
+ */
+static uma_slab_t
+zone_fetch_slab_multi(uma_zone_t zone, uma_keg_t last, int rflags)
+{
+ uma_klink_t klink;
+ uma_slab_t slab;
+ uma_keg_t keg;
+ int flags;
+ int empty;
+ int full;
+
+ /*
+ * Don't wait on the first pass. This will skip limit tests
+ * as well. We don't want to block if we can find a provider
+ * without blocking.
+ */
+ flags = (rflags & ~M_WAITOK) | M_NOWAIT;
+ /*
+ * Use the last slab allocated as a hint for where to start
+ * the search.
+ */
+ if (last) {
+ slab = keg_fetch_slab(last, zone, flags);
+ if (slab)
+ return (slab);
+ zone_relock(zone, last);
+ last = NULL;
+ }
+ /*
+ * Loop until we have a slab incase of transient failures
+ * while M_WAITOK is specified. I'm not sure this is 100%
+ * required but we've done it for so long now.
+ */
+ for (;;) {
+ empty = 0;
+ full = 0;
+ /*
+ * Search the available kegs for slabs. Be careful to hold the
+ * correct lock while calling into the keg layer.
+ */
+ LIST_FOREACH(klink, &zone->uz_kegs, kl_link) {
+ keg = klink->kl_keg;
+ keg_relock(keg, zone);
+ if ((keg->uk_flags & UMA_ZFLAG_FULL) == 0) {
+ slab = keg_fetch_slab(keg, zone, flags);
+ if (slab)
+ return (slab);
+ }
+ if (keg->uk_flags & UMA_ZFLAG_FULL)
+ full++;
+ else
+ empty++;
+ zone_relock(zone, keg);
+ }
+ if (rflags & (M_NOWAIT | M_NOVM))
+ break;
+ flags = rflags;
+ /*
+ * All kegs are full. XXX We can't atomically check all kegs
+ * and sleep so just sleep for a short period and retry.
+ */
+ if (full && !empty) {
+ zone->uz_flags |= UMA_ZFLAG_FULL;
+ msleep(zone, zone->uz_lock, PVM, "zonelimit", hz/100);
+ zone->uz_flags &= ~UMA_ZFLAG_FULL;
+ continue;
+ }
+ }
+ return (NULL);
+}
+
+static void *
+slab_alloc_item(uma_zone_t zone, uma_slab_t slab)
+{
+ uma_keg_t keg;
+ uma_slabrefcnt_t slabref;
+ void *item;
+ u_int8_t freei;
+
+ keg = slab->us_keg;
+ mtx_assert(&keg->uk_lock, MA_OWNED);
+
+ freei = slab->us_firstfree;
+ if (keg->uk_flags & UMA_ZONE_REFCNT) {
+ slabref = (uma_slabrefcnt_t)slab;
+ slab->us_firstfree = slabref->us_freelist[freei].us_item;
+ } else {
+ slab->us_firstfree = slab->us_freelist[freei].us_item;
+ }
+ item = slab->us_data + (keg->uk_rsize * freei);
+
+ slab->us_freecount--;
+ keg->uk_free--;
+#ifdef INVARIANTS
+ uma_dbg_alloc(zone, slab, item);
+#endif
+ /* Move this slab to the full list */
+ if (slab->us_freecount == 0) {
+ LIST_REMOVE(slab, us_link);
+ LIST_INSERT_HEAD(&keg->uk_full_slab, slab, us_link);
+ }
+
+ return (item);
+}
+
+static int
+zone_alloc_bucket(uma_zone_t zone, int flags)
+{
+ uma_bucket_t bucket;
+ uma_slab_t slab;
+ uma_keg_t keg;
+ int16_t saved;
+ int max, origflags = flags;
+
+ /*
+ * Try this zone's free list first so we don't allocate extra buckets.
+ */
+ if ((bucket = LIST_FIRST(&zone->uz_free_bucket)) != NULL) {
+ KASSERT(bucket->ub_cnt == 0,
+ ("zone_alloc_bucket: Bucket on free list is not empty."));
+ LIST_REMOVE(bucket, ub_link);
+ } else {
+ int bflags;
+
+ bflags = (flags & ~M_ZERO);
+ if (zone->uz_flags & UMA_ZFLAG_CACHEONLY)
+ bflags |= M_NOVM;
+
+ ZONE_UNLOCK(zone);
+ bucket = bucket_alloc(zone->uz_count, bflags);
+ ZONE_LOCK(zone);
+ }
+
+ if (bucket == NULL) {
+ return (0);
+ }
+
+#ifdef SMP
+ /*
+ * This code is here to limit the number of simultaneous bucket fills
+ * for any given zone to the number of per cpu caches in this zone. This
+ * is done so that we don't allocate more memory than we really need.
+ */
+ if (zone->uz_fills >= mp_ncpus)
+ goto done;
+
+#endif
+ zone->uz_fills++;
+
+ max = MIN(bucket->ub_entries, zone->uz_count);
+ /* Try to keep the buckets totally full */
+ saved = bucket->ub_cnt;
+ slab = NULL;
+ keg = NULL;
+ while (bucket->ub_cnt < max &&
+ (slab = zone->uz_slab(zone, keg, flags)) != NULL) {
+ keg = slab->us_keg;
+ while (slab->us_freecount && bucket->ub_cnt < max) {
+ bucket->ub_bucket[bucket->ub_cnt++] =
+ slab_alloc_item(zone, slab);
+ }
+
+ /* Don't block on the next fill */
+ flags |= M_NOWAIT;
+ }
+ if (slab)
+ zone_relock(zone, keg);
+
+ /*
+ * We unlock here because we need to call the zone's init.
+ * It should be safe to unlock because the slab dealt with
+ * above is already on the appropriate list within the keg
+ * and the bucket we filled is not yet on any list, so we
+ * own it.
+ */
+ if (zone->uz_init != NULL) {
+ int i;
+
+ ZONE_UNLOCK(zone);
+ for (i = saved; i < bucket->ub_cnt; i++)
+ if (zone->uz_init(bucket->ub_bucket[i], zone->uz_size,
+ origflags) != 0)
+ break;
+ /*
+ * If we couldn't initialize the whole bucket, put the
+ * rest back onto the freelist.
+ */
+ if (i != bucket->ub_cnt) {
+ int j;
+
+ for (j = i; j < bucket->ub_cnt; j++) {
+ zone_free_item(zone, bucket->ub_bucket[j],
+ NULL, SKIP_FINI, 0);
+#ifdef INVARIANTS
+ bucket->ub_bucket[j] = NULL;
+#endif
+ }
+ bucket->ub_cnt = i;
+ }
+ ZONE_LOCK(zone);
+ }
+
+ zone->uz_fills--;
+ if (bucket->ub_cnt != 0) {
+ LIST_INSERT_HEAD(&zone->uz_full_bucket,
+ bucket, ub_link);
+ return (1);
+ }
+#ifdef SMP
+done:
+#endif
+ bucket_free(bucket);
+
+ return (0);
+}
+/*
+ * Allocates an item for an internal zone
+ *
+ * Arguments
+ * zone The zone to alloc for.
+ * udata The data to be passed to the constructor.
+ * flags M_WAITOK, M_NOWAIT, M_ZERO.
+ *
+ * Returns
+ * NULL if there is no memory and M_NOWAIT is set
+ * An item if successful
+ */
+
+static void *
+zone_alloc_item(uma_zone_t zone, void *udata, int flags)
+{
+ uma_slab_t slab;
+ void *item;
+
+ item = NULL;
+
+#ifdef UMA_DEBUG_ALLOC
+ printf("INTERNAL: Allocating one item from %s(%p)\n", zone->uz_name, zone);
+#endif
+ ZONE_LOCK(zone);
+
+ slab = zone->uz_slab(zone, NULL, flags);
+ if (slab == NULL) {
+ zone->uz_fails++;
+ ZONE_UNLOCK(zone);
+ return (NULL);
+ }
+
+ item = slab_alloc_item(zone, slab);
+
+ zone_relock(zone, slab->us_keg);
+ zone->uz_allocs++;
+ ZONE_UNLOCK(zone);
+
+ /*
+ * We have to call both the zone's init (not the keg's init)
+ * and the zone's ctor. This is because the item is going from
+ * a keg slab directly to the user, and the user is expecting it
+ * to be both zone-init'd as well as zone-ctor'd.
+ */
+ if (zone->uz_init != NULL) {
+ if (zone->uz_init(item, zone->uz_size, flags) != 0) {
+ zone_free_item(zone, item, udata, SKIP_FINI,
+ ZFREE_STATFAIL | ZFREE_STATFREE);
+ return (NULL);
+ }
+ }
+ if (zone->uz_ctor != NULL) {
+ if (zone->uz_ctor(item, zone->uz_size, udata, flags) != 0) {
+ zone_free_item(zone, item, udata, SKIP_DTOR,
+ ZFREE_STATFAIL | ZFREE_STATFREE);
+ return (NULL);
+ }
+ }
+ if (flags & M_ZERO)
+ bzero(item, zone->uz_size);
+
+ return (item);
+}
+
+/* See uma.h */
+void
+uma_zfree_arg(uma_zone_t zone, void *item, void *udata)
+{
+ uma_cache_t cache;
+ uma_bucket_t bucket;
+ int bflags;
+ int cpu;
+
+#ifdef UMA_DEBUG_ALLOC_1
+ printf("Freeing item %p to %s(%p)\n", item, zone->uz_name, zone);
+#endif
+ CTR2(KTR_UMA, "uma_zfree_arg thread %x zone %s", curthread,
+ zone->uz_name);
+
+ /* uma_zfree(..., NULL) does nothing, to match free(9). */
+ if (item == NULL)
+ return;
+
+ if (zone->uz_dtor)
+ zone->uz_dtor(item, zone->uz_size, udata);
+
+#ifdef INVARIANTS
+ ZONE_LOCK(zone);
+ if (zone->uz_flags & UMA_ZONE_MALLOC)
+ uma_dbg_free(zone, udata, item);
+ else
+ uma_dbg_free(zone, NULL, item);
+ ZONE_UNLOCK(zone);
+#endif
+ /*
+ * The race here is acceptable. If we miss it we'll just have to wait
+ * a little longer for the limits to be reset.
+ */
+ if (zone->uz_flags & UMA_ZFLAG_FULL)
+ goto zfree_internal;
+
+ /*
+ * If possible, free to the per-CPU cache. There are two
+ * requirements for safe access to the per-CPU cache: (1) the thread
+ * accessing the cache must not be preempted or yield during access,
+ * and (2) the thread must not migrate CPUs without switching which
+ * cache it accesses. We rely on a critical section to prevent
+ * preemption and migration. We release the critical section in
+ * order to acquire the zone mutex if we are unable to free to the
+ * current cache; when we re-acquire the critical section, we must
+ * detect and handle migration if it has occurred.
+ */
+zfree_restart:
+ critical_enter();
+ cpu = curcpu;
+ cache = &zone->uz_cpu[cpu];
+
+zfree_start:
+ bucket = cache->uc_freebucket;
+
+ if (bucket) {
+ /*
+ * Do we have room in our bucket? It is OK for this uz count
+ * check to be slightly out of sync.
+ */
+
+ if (bucket->ub_cnt < bucket->ub_entries) {
+ KASSERT(bucket->ub_bucket[bucket->ub_cnt] == NULL,
+ ("uma_zfree: Freeing to non free bucket index."));
+ bucket->ub_bucket[bucket->ub_cnt] = item;
+ bucket->ub_cnt++;
+ cache->uc_frees++;
+ critical_exit();
+ return;
+ } else if (cache->uc_allocbucket) {
+#ifdef UMA_DEBUG_ALLOC
+ printf("uma_zfree: Swapping buckets.\n");
+#endif
+ /*
+ * We have run out of space in our freebucket.
+ * See if we can switch with our alloc bucket.
+ */
+ if (cache->uc_allocbucket->ub_cnt <
+ cache->uc_freebucket->ub_cnt) {
+ bucket = cache->uc_freebucket;
+ cache->uc_freebucket = cache->uc_allocbucket;
+ cache->uc_allocbucket = bucket;
+ goto zfree_start;
+ }
+ }
+ }
+ /*
+ * We can get here for two reasons:
+ *
+ * 1) The buckets are NULL
+ * 2) The alloc and free buckets are both somewhat full.
+ *
+ * We must go back the zone, which requires acquiring the zone lock,
+ * which in turn means we must release and re-acquire the critical
+ * section. Since the critical section is released, we may be
+ * preempted or migrate. As such, make sure not to maintain any
+ * thread-local state specific to the cache from prior to releasing
+ * the critical section.
+ */
+ critical_exit();
+ ZONE_LOCK(zone);
+ critical_enter();
+ cpu = curcpu;
+ cache = &zone->uz_cpu[cpu];
+ if (cache->uc_freebucket != NULL) {
+ if (cache->uc_freebucket->ub_cnt <
+ cache->uc_freebucket->ub_entries) {
+ ZONE_UNLOCK(zone);
+ goto zfree_start;
+ }
+ if (cache->uc_allocbucket != NULL &&
+ (cache->uc_allocbucket->ub_cnt <
+ cache->uc_freebucket->ub_cnt)) {
+ ZONE_UNLOCK(zone);
+ goto zfree_start;
+ }
+ }
+
+ /* Since we have locked the zone we may as well send back our stats */
+ zone->uz_allocs += cache->uc_allocs;
+ cache->uc_allocs = 0;
+ zone->uz_frees += cache->uc_frees;
+ cache->uc_frees = 0;
+
+ bucket = cache->uc_freebucket;
+ cache->uc_freebucket = NULL;
+
+ /* Can we throw this on the zone full list? */
+ if (bucket != NULL) {
+#ifdef UMA_DEBUG_ALLOC
+ printf("uma_zfree: Putting old bucket on the free list.\n");
+#endif
+ /* ub_cnt is pointing to the last free item */
+ KASSERT(bucket->ub_cnt != 0,
+ ("uma_zfree: Attempting to insert an empty bucket onto the full list.\n"));
+ LIST_INSERT_HEAD(&zone->uz_full_bucket,
+ bucket, ub_link);
+ }
+ if ((bucket = LIST_FIRST(&zone->uz_free_bucket)) != NULL) {
+ LIST_REMOVE(bucket, ub_link);
+ ZONE_UNLOCK(zone);
+ cache->uc_freebucket = bucket;
+ goto zfree_start;
+ }
+ /* We are no longer associated with this CPU. */
+ critical_exit();
+
+ /* And the zone.. */
+ ZONE_UNLOCK(zone);
+
+#ifdef UMA_DEBUG_ALLOC
+ printf("uma_zfree: Allocating new free bucket.\n");
+#endif
+ bflags = M_NOWAIT;
+
+ if (zone->uz_flags & UMA_ZFLAG_CACHEONLY)
+ bflags |= M_NOVM;
+ bucket = bucket_alloc(zone->uz_count, bflags);
+ if (bucket) {
+ ZONE_LOCK(zone);
+ LIST_INSERT_HEAD(&zone->uz_free_bucket,
+ bucket, ub_link);
+ ZONE_UNLOCK(zone);
+ goto zfree_restart;
+ }
+
+ /*
+ * If nothing else caught this, we'll just do an internal free.
+ */
+zfree_internal:
+ zone_free_item(zone, item, udata, SKIP_DTOR, ZFREE_STATFREE);
+
+ return;
+}
+
+/*
+ * Frees an item to an INTERNAL zone or allocates a free bucket
+ *
+ * Arguments:
+ * zone The zone to free to
+ * item The item we're freeing
+ * udata User supplied data for the dtor
+ * skip Skip dtors and finis
+ */
+static void
+zone_free_item(uma_zone_t zone, void *item, void *udata,
+ enum zfreeskip skip, int flags)
+{
+ uma_slab_t slab;
+ uma_slabrefcnt_t slabref;
+ uma_keg_t keg;
+ u_int8_t *mem;
+ u_int8_t freei;
+ int clearfull;
+
+ if (skip < SKIP_DTOR && zone->uz_dtor)
+ zone->uz_dtor(item, zone->uz_size, udata);
+
+ if (skip < SKIP_FINI && zone->uz_fini)
+ zone->uz_fini(item, zone->uz_size);
+
+ ZONE_LOCK(zone);
+
+ if (flags & ZFREE_STATFAIL)
+ zone->uz_fails++;
+ if (flags & ZFREE_STATFREE)
+ zone->uz_frees++;
+
+ if (!(zone->uz_flags & UMA_ZONE_VTOSLAB)) {
+ mem = (u_int8_t *)((unsigned long)item & (~UMA_SLAB_MASK));
+ keg = zone_first_keg(zone); /* Must only be one. */
+ if (zone->uz_flags & UMA_ZONE_HASH) {
+ slab = hash_sfind(&keg->uk_hash, mem);
+ } else {
+ mem += keg->uk_pgoff;
+ slab = (uma_slab_t)mem;
+ }
+ } else {
+ panic("uma virtual memory not supported!" );
+ }
+ MPASS(keg == slab->us_keg);
+
+ /* Do we need to remove from any lists? */
+ if (slab->us_freecount+1 == keg->uk_ipers) {
+ LIST_REMOVE(slab, us_link);
+ LIST_INSERT_HEAD(&keg->uk_free_slab, slab, us_link);
+ } else if (slab->us_freecount == 0) {
+ LIST_REMOVE(slab, us_link);
+ LIST_INSERT_HEAD(&keg->uk_part_slab, slab, us_link);
+ }
+
+ /* Slab management stuff */
+ freei = ((unsigned long)item - (unsigned long)slab->us_data)
+ / keg->uk_rsize;
+
+#ifdef INVARIANTS
+ if (!skip)
+ uma_dbg_free(zone, slab, item);
+#endif
+
+ if (keg->uk_flags & UMA_ZONE_REFCNT) {
+ slabref = (uma_slabrefcnt_t)slab;
+ slabref->us_freelist[freei].us_item = slab->us_firstfree;
+ } else {
+ slab->us_freelist[freei].us_item = slab->us_firstfree;
+ }
+ slab->us_firstfree = freei;
+ slab->us_freecount++;
+
+ /* Zone statistics */
+ keg->uk_free++;
+
+ clearfull = 0;
+ if (keg->uk_flags & UMA_ZFLAG_FULL) {
+ if (keg->uk_pages < keg->uk_maxpages) {
+ keg->uk_flags &= ~UMA_ZFLAG_FULL;
+ clearfull = 1;
+ }
+
+ /*
+ * We can handle one more allocation. Since we're clearing ZFLAG_FULL,
+ * wake up all procs blocked on pages. This should be uncommon, so
+ * keeping this simple for now (rather than adding count of blocked
+ * threads etc).
+ */
+ wakeup(keg);
+ }
+ if (clearfull) {
+ zone_relock(zone, keg);
+ zone->uz_flags &= ~UMA_ZFLAG_FULL;
+ wakeup(zone);
+ ZONE_UNLOCK(zone);
+ } else
+ KEG_UNLOCK(keg);
+}
+
+/* See uma.h */
+void
+uma_zone_set_max(uma_zone_t zone, int nitems)
+{
+ uma_keg_t keg;
+
+ ZONE_LOCK(zone);
+ keg = zone_first_keg(zone);
+ keg->uk_maxpages = (nitems / keg->uk_ipers) * keg->uk_ppera;
+ if (keg->uk_maxpages * keg->uk_ipers < nitems)
+ keg->uk_maxpages += keg->uk_ppera;
+
+ ZONE_UNLOCK(zone);
+}
+
+/* See uma.h */
+int
+uma_zone_get_max(uma_zone_t zone)
+{
+ int nitems;
+ uma_keg_t keg;
+
+ ZONE_LOCK(zone);
+ keg = zone_first_keg(zone);
+ nitems = keg->uk_maxpages * keg->uk_ipers;
+ ZONE_UNLOCK(zone);
+
+ return (nitems);
+}
+
+/* See uma.h */
+int
+uma_zone_get_cur(uma_zone_t zone)
+{
+ int64_t nitems;
+ u_int i;
+
+ ZONE_LOCK(zone);
+ nitems = zone->uz_allocs - zone->uz_frees;
+ CPU_FOREACH(i) {
+ /*
+ * See the comment in sysctl_vm_zone_stats() regarding the
+ * safety of accessing the per-cpu caches. With the zone lock
+ * held, it is safe, but can potentially result in stale data.
+ */
+ nitems += zone->uz_cpu[i].uc_allocs -
+ zone->uz_cpu[i].uc_frees;
+ }
+ ZONE_UNLOCK(zone);
+
+ return (nitems < 0 ? 0 : nitems);
+}
+
+/* See uma.h */
+void
+uma_zone_set_init(uma_zone_t zone, uma_init uminit)
+{
+ uma_keg_t keg;
+
+ ZONE_LOCK(zone);
+ keg = zone_first_keg(zone);
+ KASSERT(keg->uk_pages == 0,
+ ("uma_zone_set_init on non-empty keg"));
+ keg->uk_init = uminit;
+ ZONE_UNLOCK(zone);
+}
+
+/* See uma.h */
+void
+uma_zone_set_fini(uma_zone_t zone, uma_fini fini)
+{
+ uma_keg_t keg;
+
+ ZONE_LOCK(zone);
+ keg = zone_first_keg(zone);
+ KASSERT(keg->uk_pages == 0,
+ ("uma_zone_set_fini on non-empty keg"));
+ keg->uk_fini = fini;
+ ZONE_UNLOCK(zone);
+}
+
+/* See uma.h */
+void
+uma_zone_set_zinit(uma_zone_t zone, uma_init zinit)
+{
+ ZONE_LOCK(zone);
+ KASSERT(zone_first_keg(zone)->uk_pages == 0,
+ ("uma_zone_set_zinit on non-empty keg"));
+ zone->uz_init = zinit;
+ ZONE_UNLOCK(zone);
+}
+
+/* See uma.h */
+void
+uma_zone_set_zfini(uma_zone_t zone, uma_fini zfini)
+{
+ ZONE_LOCK(zone);
+ KASSERT(zone_first_keg(zone)->uk_pages == 0,
+ ("uma_zone_set_zfini on non-empty keg"));
+ zone->uz_fini = zfini;
+ ZONE_UNLOCK(zone);
+}
+
+/* See uma.h */
+/* XXX uk_freef is not actually used with the zone locked */
+void
+uma_zone_set_freef(uma_zone_t zone, uma_free freef)
+{
+
+ ZONE_LOCK(zone);
+ zone_first_keg(zone)->uk_freef = freef;
+ ZONE_UNLOCK(zone);
+}
+
+/* See uma.h */
+/* XXX uk_allocf is not actually used with the zone locked */
+void
+uma_zone_set_allocf(uma_zone_t zone, uma_alloc allocf)
+{
+ uma_keg_t keg;
+
+ ZONE_LOCK(zone);
+ keg = zone_first_keg(zone);
+ keg->uk_flags |= UMA_ZFLAG_PRIVALLOC;
+ keg->uk_allocf = allocf;
+ ZONE_UNLOCK(zone);
+}
+
+/* See uma.h */
+void
+uma_prealloc(uma_zone_t zone, int items)
+{
+ int slabs;
+ uma_slab_t slab;
+ uma_keg_t keg;
+
+ keg = zone_first_keg(zone);
+ ZONE_LOCK(zone);
+ slabs = items / keg->uk_ipers;
+ if (slabs * keg->uk_ipers < items)
+ slabs++;
+ while (slabs > 0) {
+ slab = keg_alloc_slab(keg, zone, M_WAITOK);
+ if (slab == NULL)
+ break;
+ MPASS(slab->us_keg == keg);
+ LIST_INSERT_HEAD(&keg->uk_free_slab, slab, us_link);
+ slabs--;
+ }
+ ZONE_UNLOCK(zone);
+}
+
+/* See uma.h */
+void
+uma_reclaim(void)
+{
+#ifdef UMA_DEBUG
+ printf("UMA: vm asked us to release pages!\n");
+#endif
+ zone_foreach(zone_drain);
+ /*
+ * Some slabs may have been freed but this zone will be visited early
+ * we visit again so that we can free pages that are empty once other
+ * zones are drained. We have to do the same for buckets.
+ */
+ zone_drain(slabzone);
+ zone_drain(slabrefzone);
+ bucket_zone_drain();
+}
+
+/* See uma.h */
+int
+uma_zone_exhausted(uma_zone_t zone)
+{
+ int full;
+
+ ZONE_LOCK(zone);
+ full = (zone->uz_flags & UMA_ZFLAG_FULL);
+ ZONE_UNLOCK(zone);
+ return (full);
+}
+
+int
+uma_zone_exhausted_nolock(uma_zone_t zone)
+{
+ return (zone->uz_flags & UMA_ZFLAG_FULL);
+}
+
+void *
+uma_large_malloc(int size, int wait)
+{
+ void *mem;
+ uma_slab_t slab;
+ u_int8_t flags;
+
+ slab = zone_alloc_item(slabzone, NULL, wait);
+ if (slab == NULL)
+ return (NULL);
+ mem = page_alloc(NULL, size, &flags, wait);
+ if (mem) {
+ slab->us_data = mem;
+ slab->us_flags = flags | UMA_SLAB_MALLOC;
+ slab->us_size = size;
+ } else {
+ zone_free_item(slabzone, slab, NULL, SKIP_NONE,
+ ZFREE_STATFAIL | ZFREE_STATFREE);
+ }
+
+ return (mem);
+}
+
+void
+uma_large_free(uma_slab_t slab)
+{
+ page_free(slab->us_data, slab->us_size, slab->us_flags);
+ zone_free_item(slabzone, slab, NULL, SKIP_NONE, ZFREE_STATFREE);
+}
+
+void
+uma_print_stats(void)
+{
+ zone_foreach(uma_print_zone);
+}
+
+static void
+slab_print(uma_slab_t slab)
+{
+ printf("slab: keg %p, data %p, freecount %d, firstfree %d\n",
+ slab->us_keg, slab->us_data, slab->us_freecount,
+ slab->us_firstfree);
+}
+
+static void
+cache_print(uma_cache_t cache)
+{
+ printf("alloc: %p(%d), free: %p(%d)\n",
+ cache->uc_allocbucket,
+ cache->uc_allocbucket?cache->uc_allocbucket->ub_cnt:0,
+ cache->uc_freebucket,
+ cache->uc_freebucket?cache->uc_freebucket->ub_cnt:0);
+}
+
+static void
+uma_print_keg(uma_keg_t keg)
+{
+ uma_slab_t slab;
+
+ printf("keg: %s(%p) size %d(%d) flags %d ipers %d ppera %d "
+ "out %d free %d limit %d\n",
+ keg->uk_name, keg, keg->uk_size, keg->uk_rsize, keg->uk_flags,
+ keg->uk_ipers, keg->uk_ppera,
+ (keg->uk_ipers * keg->uk_pages) - keg->uk_free, keg->uk_free,
+ (keg->uk_maxpages / keg->uk_ppera) * keg->uk_ipers);
+ printf("Part slabs:\n");
+ LIST_FOREACH(slab, &keg->uk_part_slab, us_link)
+ slab_print(slab);
+ printf("Free slabs:\n");
+ LIST_FOREACH(slab, &keg->uk_free_slab, us_link)
+ slab_print(slab);
+ printf("Full slabs:\n");
+ LIST_FOREACH(slab, &keg->uk_full_slab, us_link)
+ slab_print(slab);
+}
+
+void
+uma_print_zone(uma_zone_t zone)
+{
+ uma_cache_t cache;
+ uma_klink_t kl;
+ int i;
+
+ printf("zone: %s(%p) size %d flags %d\n",
+ zone->uz_name, zone, zone->uz_size, zone->uz_flags);
+ LIST_FOREACH(kl, &zone->uz_kegs, kl_link)
+ uma_print_keg(kl->kl_keg);
+ for (i = 0; i <= mp_maxid; i++) {
+ if (CPU_ABSENT(i))
+ continue;
+ cache = &zone->uz_cpu[i];
+ printf("CPU %d Cache:\n", i);
+ cache_print(cache);
+ }
+}
+
diff --git a/rtems/freebsd/security/audit/audit.h b/rtems/freebsd/security/audit/audit.h
new file mode 100644
index 00000000..936ffd88
--- /dev/null
+++ b/rtems/freebsd/security/audit/audit.h
@@ -0,0 +1 @@
+/* EMPTY */
diff --git a/rtems/freebsd/security/mac/mac_framework.h b/rtems/freebsd/security/mac/mac_framework.h
new file mode 100644
index 00000000..1b47b8d7
--- /dev/null
+++ b/rtems/freebsd/security/mac/mac_framework.h
@@ -0,0 +1,441 @@
+/*-
+ * Copyright (c) 1999-2002, 2007-2009 Robert N. M. Watson
+ * Copyright (c) 2001-2005 Networks Associates Technology, Inc.
+ * Copyright (c) 2005-2006 SPARTA, Inc.
+ * All rights reserved.
+ *
+ * This software was developed by Robert Watson for the TrustedBSD Project.
+ *
+ * This software was developed for the FreeBSD Project in part by Network
+ * Associates Laboratories, the Security Research Division of Network
+ * Associates, Inc. under DARPA/SPAWAR contract N66001-01-C-8035 ("CBOSS"),
+ * as part of the DARPA CHATS research program.
+ *
+ * This software was enhanced by SPARTA ISSO under SPAWAR contract
+ * N66001-04-C-6019 ("SEFOS").
+ *
+ * This software was developed at the University of Cambridge Computer
+ * Laboratory with support from a grant from Google, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+/*
+ * Kernel interface for Mandatory Access Control -- how kernel services
+ * interact with the TrustedBSD MAC Framework.
+ */
+
+#ifndef _SECURITY_MAC_MAC_FRAMEWORK_HH_
+#define _SECURITY_MAC_MAC_FRAMEWORK_HH_
+
+#ifndef _KERNEL
+#error "no user-serviceable parts inside"
+#endif
+
+struct auditinfo;
+struct auditinfo_addr;
+struct bpf_d;
+struct cdev;
+struct componentname;
+struct devfs_dirent;
+struct ifnet;
+struct ifreq;
+struct image_params;
+struct inpcb;
+struct ip6q;
+struct ipq;
+struct ksem;
+struct label;
+struct m_tag;
+struct mac;
+struct mbuf;
+struct mount;
+struct msg;
+struct msqid_kernel;
+struct proc;
+struct semid_kernel;
+struct shmfd;
+struct shmid_kernel;
+struct sockaddr;
+struct socket;
+struct sysctl_oid;
+struct sysctl_req;
+struct pipepair;
+struct thread;
+struct timespec;
+struct ucred;
+struct vattr;
+struct vnode;
+struct vop_setlabel_args;
+
+#include <rtems/freebsd/sys/acl.h> /* XXX acl_type_t */
+#include <rtems/freebsd/sys/types.h> /* accmode_t */
+
+/*
+ * Entry points to the TrustedBSD MAC Framework from the remainder of the
+ * kernel: entry points are named based on a principle object type and an
+ * action relating to it. They are sorted alphabetically first by object
+ * type and then action. In some situations, the principle object type is
+ * obvious, and in other cases, less so as multiple objects may be inolved
+ * in the operation.
+ */
+int mac_bpfdesc_check_receive(struct bpf_d *d, struct ifnet *ifp);
+void mac_bpfdesc_create(struct ucred *cred, struct bpf_d *d);
+void mac_bpfdesc_create_mbuf(struct bpf_d *d, struct mbuf *m);
+void mac_bpfdesc_destroy(struct bpf_d *);
+void mac_bpfdesc_init(struct bpf_d *);
+
+void mac_cred_associate_nfsd(struct ucred *cred);
+int mac_cred_check_setaudit(struct ucred *cred, struct auditinfo *ai);
+int mac_cred_check_setaudit_addr(struct ucred *cred,
+ struct auditinfo_addr *aia);
+int mac_cred_check_setauid(struct ucred *cred, uid_t auid);
+int mac_cred_check_setegid(struct ucred *cred, gid_t egid);
+int mac_cred_check_seteuid(struct ucred *cred, uid_t euid);
+int mac_cred_check_setgid(struct ucred *cred, gid_t gid);
+int mac_cred_check_setgroups(struct ucred *cred, int ngroups,
+ gid_t *gidset);
+int mac_cred_check_setregid(struct ucred *cred, gid_t rgid, gid_t egid);
+int mac_cred_check_setresgid(struct ucred *cred, gid_t rgid, gid_t egid,
+ gid_t sgid);
+int mac_cred_check_setresuid(struct ucred *cred, uid_t ruid, uid_t euid,
+ uid_t suid);
+int mac_cred_check_setreuid(struct ucred *cred, uid_t ruid, uid_t euid);
+int mac_cred_check_setuid(struct ucred *cred, uid_t uid);
+int mac_cred_check_visible(struct ucred *cr1, struct ucred *cr2);
+void mac_cred_copy(struct ucred *cr1, struct ucred *cr2);
+void mac_cred_create_init(struct ucred *cred);
+void mac_cred_create_swapper(struct ucred *cred);
+void mac_cred_destroy(struct ucred *);
+void mac_cred_init(struct ucred *);
+
+void mac_devfs_create_device(struct ucred *cred, struct mount *mp,
+ struct cdev *dev, struct devfs_dirent *de);
+void mac_devfs_create_directory(struct mount *mp, char *dirname,
+ int dirnamelen, struct devfs_dirent *de);
+void mac_devfs_create_symlink(struct ucred *cred, struct mount *mp,
+ struct devfs_dirent *dd, struct devfs_dirent *de);
+void mac_devfs_destroy(struct devfs_dirent *);
+void mac_devfs_init(struct devfs_dirent *);
+void mac_devfs_update(struct mount *mp, struct devfs_dirent *de,
+ struct vnode *vp);
+void mac_devfs_vnode_associate(struct mount *mp, struct devfs_dirent *de,
+ struct vnode *vp);
+
+int mac_ifnet_check_transmit(struct ifnet *ifp, struct mbuf *m);
+void mac_ifnet_create(struct ifnet *ifp);
+void mac_ifnet_create_mbuf(struct ifnet *ifp, struct mbuf *m);
+void mac_ifnet_destroy(struct ifnet *);
+void mac_ifnet_init(struct ifnet *);
+int mac_ifnet_ioctl_get(struct ucred *cred, struct ifreq *ifr,
+ struct ifnet *ifp);
+int mac_ifnet_ioctl_set(struct ucred *cred, struct ifreq *ifr,
+ struct ifnet *ifp);
+
+int mac_inpcb_check_deliver(struct inpcb *inp, struct mbuf *m);
+int mac_inpcb_check_visible(struct ucred *cred, struct inpcb *inp);
+void mac_inpcb_create(struct socket *so, struct inpcb *inp);
+void mac_inpcb_create_mbuf(struct inpcb *inp, struct mbuf *m);
+void mac_inpcb_destroy(struct inpcb *);
+int mac_inpcb_init(struct inpcb *, int);
+void mac_inpcb_sosetlabel(struct socket *so, struct inpcb *inp);
+
+void mac_ip6q_create(struct mbuf *m, struct ip6q *q6);
+void mac_ip6q_destroy(struct ip6q *q6);
+int mac_ip6q_init(struct ip6q *q6, int);
+int mac_ip6q_match(struct mbuf *m, struct ip6q *q6);
+void mac_ip6q_reassemble(struct ip6q *q6, struct mbuf *m);
+void mac_ip6q_update(struct mbuf *m, struct ip6q *q6);
+
+void mac_ipq_create(struct mbuf *m, struct ipq *q);
+void mac_ipq_destroy(struct ipq *q);
+int mac_ipq_init(struct ipq *q, int);
+int mac_ipq_match(struct mbuf *m, struct ipq *q);
+void mac_ipq_reassemble(struct ipq *q, struct mbuf *m);
+void mac_ipq_update(struct mbuf *m, struct ipq *q);
+
+int mac_kenv_check_dump(struct ucred *cred);
+int mac_kenv_check_get(struct ucred *cred, char *name);
+int mac_kenv_check_set(struct ucred *cred, char *name, char *value);
+int mac_kenv_check_unset(struct ucred *cred, char *name);
+
+int mac_kld_check_load(struct ucred *cred, struct vnode *vp);
+int mac_kld_check_stat(struct ucred *cred);
+
+void mac_mbuf_copy(struct mbuf *, struct mbuf *);
+int mac_mbuf_init(struct mbuf *, int);
+
+void mac_mbuf_tag_copy(struct m_tag *, struct m_tag *);
+void mac_mbuf_tag_destroy(struct m_tag *);
+int mac_mbuf_tag_init(struct m_tag *, int);
+
+int mac_mount_check_stat(struct ucred *cred, struct mount *mp);
+void mac_mount_create(struct ucred *cred, struct mount *mp);
+void mac_mount_destroy(struct mount *);
+void mac_mount_init(struct mount *);
+
+void mac_netatalk_aarp_send(struct ifnet *ifp, struct mbuf *m);
+
+void mac_netinet_arp_send(struct ifnet *ifp, struct mbuf *m);
+void mac_netinet_firewall_reply(struct mbuf *mrecv, struct mbuf *msend);
+void mac_netinet_firewall_send(struct mbuf *m);
+void mac_netinet_fragment(struct mbuf *m, struct mbuf *frag);
+void mac_netinet_icmp_reply(struct mbuf *mrecv, struct mbuf *msend);
+void mac_netinet_icmp_replyinplace(struct mbuf *m);
+void mac_netinet_igmp_send(struct ifnet *ifp, struct mbuf *m);
+void mac_netinet_tcp_reply(struct mbuf *m);
+
+void mac_netinet6_nd6_send(struct ifnet *ifp, struct mbuf *m);
+
+int mac_pipe_check_ioctl(struct ucred *cred, struct pipepair *pp,
+ unsigned long cmd, void *data);
+int mac_pipe_check_poll(struct ucred *cred, struct pipepair *pp);
+int mac_pipe_check_read(struct ucred *cred, struct pipepair *pp);
+int mac_pipe_check_stat(struct ucred *cred, struct pipepair *pp);
+int mac_pipe_check_write(struct ucred *cred, struct pipepair *pp);
+void mac_pipe_create(struct ucred *cred, struct pipepair *pp);
+void mac_pipe_destroy(struct pipepair *);
+void mac_pipe_init(struct pipepair *);
+int mac_pipe_label_set(struct ucred *cred, struct pipepair *pp,
+ struct label *label);
+
+int mac_posixsem_check_getvalue(struct ucred *active_cred,
+ struct ucred *file_cred, struct ksem *ks);
+int mac_posixsem_check_open(struct ucred *cred, struct ksem *ks);
+int mac_posixsem_check_post(struct ucred *active_cred,
+ struct ucred *file_cred, struct ksem *ks);
+int mac_posixsem_check_stat(struct ucred *active_cred,
+ struct ucred *file_cred, struct ksem *ks);
+int mac_posixsem_check_unlink(struct ucred *cred, struct ksem *ks);
+int mac_posixsem_check_wait(struct ucred *active_cred,
+ struct ucred *file_cred, struct ksem *ks);
+void mac_posixsem_create(struct ucred *cred, struct ksem *ks);
+void mac_posixsem_destroy(struct ksem *);
+void mac_posixsem_init(struct ksem *);
+
+int mac_posixshm_check_mmap(struct ucred *cred, struct shmfd *shmfd,
+ int prot, int flags);
+int mac_posixshm_check_open(struct ucred *cred, struct shmfd *shmfd);
+int mac_posixshm_check_stat(struct ucred *active_cred,
+ struct ucred *file_cred, struct shmfd *shmfd);
+int mac_posixshm_check_truncate(struct ucred *active_cred,
+ struct ucred *file_cred, struct shmfd *shmfd);
+int mac_posixshm_check_unlink(struct ucred *cred, struct shmfd *shmfd);
+void mac_posixshm_create(struct ucred *cred, struct shmfd *shmfd);
+void mac_posixshm_destroy(struct shmfd *);
+void mac_posixshm_init(struct shmfd *);
+
+int mac_priv_check(struct ucred *cred, int priv);
+int mac_priv_grant(struct ucred *cred, int priv);
+
+int mac_proc_check_debug(struct ucred *cred, struct proc *p);
+int mac_proc_check_sched(struct ucred *cred, struct proc *p);
+int mac_proc_check_signal(struct ucred *cred, struct proc *p,
+ int signum);
+int mac_proc_check_wait(struct ucred *cred, struct proc *p);
+void mac_proc_destroy(struct proc *);
+void mac_proc_init(struct proc *);
+void mac_proc_vm_revoke(struct thread *td);
+int mac_execve_enter(struct image_params *imgp, struct mac *mac_p);
+void mac_execve_exit(struct image_params *imgp);
+void mac_execve_interpreter_enter(struct vnode *interpvp,
+ struct label **interplabel);
+void mac_execve_interpreter_exit(struct label *interpvplabel);
+
+int mac_socket_check_accept(struct ucred *cred, struct socket *so);
+int mac_socket_check_bind(struct ucred *cred, struct socket *so,
+ struct sockaddr *sa);
+int mac_socket_check_connect(struct ucred *cred, struct socket *so,
+ struct sockaddr *sa);
+int mac_socket_check_create(struct ucred *cred, int domain, int type,
+ int proto);
+int mac_socket_check_deliver(struct socket *so, struct mbuf *m);
+int mac_socket_check_listen(struct ucred *cred, struct socket *so);
+int mac_socket_check_poll(struct ucred *cred, struct socket *so);
+int mac_socket_check_receive(struct ucred *cred, struct socket *so);
+int mac_socket_check_send(struct ucred *cred, struct socket *so);
+int mac_socket_check_stat(struct ucred *cred, struct socket *so);
+int mac_socket_check_visible(struct ucred *cred, struct socket *so);
+void mac_socket_create_mbuf(struct socket *so, struct mbuf *m);
+void mac_socket_create(struct ucred *cred, struct socket *so);
+void mac_socket_destroy(struct socket *);
+int mac_socket_init(struct socket *, int);
+void mac_socket_newconn(struct socket *oldso, struct socket *newso);
+int mac_getsockopt_label(struct ucred *cred, struct socket *so,
+ struct mac *extmac);
+int mac_getsockopt_peerlabel(struct ucred *cred, struct socket *so,
+ struct mac *extmac);
+int mac_setsockopt_label(struct ucred *cred, struct socket *so,
+ struct mac *extmac);
+
+void mac_socketpeer_set_from_mbuf(struct mbuf *m, struct socket *so);
+void mac_socketpeer_set_from_socket(struct socket *oldso,
+ struct socket *newso);
+
+void mac_syncache_create(struct label *l, struct inpcb *inp);
+void mac_syncache_create_mbuf(struct label *l, struct mbuf *m);
+void mac_syncache_destroy(struct label **l);
+int mac_syncache_init(struct label **l);
+
+int mac_system_check_acct(struct ucred *cred, struct vnode *vp);
+int mac_system_check_audit(struct ucred *cred, void *record, int length);
+int mac_system_check_auditctl(struct ucred *cred, struct vnode *vp);
+int mac_system_check_auditon(struct ucred *cred, int cmd);
+int mac_system_check_reboot(struct ucred *cred, int howto);
+int mac_system_check_swapon(struct ucred *cred, struct vnode *vp);
+int mac_system_check_swapoff(struct ucred *cred, struct vnode *vp);
+int mac_system_check_sysctl(struct ucred *cred, struct sysctl_oid *oidp,
+ void *arg1, int arg2, struct sysctl_req *req);
+
+void mac_sysvmsg_cleanup(struct msg *msgptr);
+void mac_sysvmsg_create(struct ucred *cred, struct msqid_kernel *msqkptr,
+ struct msg *msgptr);
+void mac_sysvmsg_destroy(struct msg *);
+void mac_sysvmsg_init(struct msg *);
+
+int mac_sysvmsq_check_msgmsq(struct ucred *cred, struct msg *msgptr,
+ struct msqid_kernel *msqkptr);
+int mac_sysvmsq_check_msgrcv(struct ucred *cred, struct msg *msgptr);
+int mac_sysvmsq_check_msgrmid(struct ucred *cred, struct msg *msgptr);
+int mac_sysvmsq_check_msqctl(struct ucred *cred,
+ struct msqid_kernel *msqkptr, int cmd);
+int mac_sysvmsq_check_msqget(struct ucred *cred,
+ struct msqid_kernel *msqkptr);
+int mac_sysvmsq_check_msqrcv(struct ucred *cred,
+ struct msqid_kernel *msqkptr);
+int mac_sysvmsq_check_msqsnd(struct ucred *cred,
+ struct msqid_kernel *msqkptr);
+void mac_sysvmsq_cleanup(struct msqid_kernel *msqkptr);
+void mac_sysvmsq_create(struct ucred *cred, struct msqid_kernel *msqkptr);
+void mac_sysvmsq_destroy(struct msqid_kernel *);
+void mac_sysvmsq_init(struct msqid_kernel *);
+
+int mac_sysvsem_check_semctl(struct ucred *cred,
+ struct semid_kernel *semakptr, int cmd);
+int mac_sysvsem_check_semget(struct ucred *cred,
+ struct semid_kernel *semakptr);
+int mac_sysvsem_check_semop(struct ucred *cred,
+ struct semid_kernel *semakptr, size_t accesstype);
+void mac_sysvsem_cleanup(struct semid_kernel *semakptr);
+void mac_sysvsem_create(struct ucred *cred,
+ struct semid_kernel *semakptr);
+void mac_sysvsem_destroy(struct semid_kernel *);
+void mac_sysvsem_init(struct semid_kernel *);
+
+int mac_sysvshm_check_shmat(struct ucred *cred,
+ struct shmid_kernel *shmsegptr, int shmflg);
+int mac_sysvshm_check_shmctl(struct ucred *cred,
+ struct shmid_kernel *shmsegptr, int cmd);
+int mac_sysvshm_check_shmdt(struct ucred *cred,
+ struct shmid_kernel *shmsegptr);
+int mac_sysvshm_check_shmget(struct ucred *cred,
+ struct shmid_kernel *shmsegptr, int shmflg);
+void mac_sysvshm_cleanup(struct shmid_kernel *shmsegptr);
+void mac_sysvshm_create(struct ucred *cred,
+ struct shmid_kernel *shmsegptr);
+void mac_sysvshm_destroy(struct shmid_kernel *);
+void mac_sysvshm_init(struct shmid_kernel *);
+
+void mac_thread_userret(struct thread *td);
+
+int mac_vnode_associate_extattr(struct mount *mp, struct vnode *vp);
+void mac_vnode_associate_singlelabel(struct mount *mp, struct vnode *vp);
+int mac_vnode_check_access(struct ucred *cred, struct vnode *vp,
+ accmode_t accmode);
+int mac_vnode_check_chdir(struct ucred *cred, struct vnode *dvp);
+int mac_vnode_check_chroot(struct ucred *cred, struct vnode *dvp);
+int mac_vnode_check_create(struct ucred *cred, struct vnode *dvp,
+ struct componentname *cnp, struct vattr *vap);
+int mac_vnode_check_deleteacl(struct ucred *cred, struct vnode *vp,
+ acl_type_t type);
+int mac_vnode_check_deleteextattr(struct ucred *cred, struct vnode *vp,
+ int attrnamespace, const char *name);
+int mac_vnode_check_exec(struct ucred *cred, struct vnode *vp,
+ struct image_params *imgp);
+int mac_vnode_check_getacl(struct ucred *cred, struct vnode *vp,
+ acl_type_t type);
+int mac_vnode_check_getextattr(struct ucred *cred, struct vnode *vp,
+ int attrnamespace, const char *name);
+int mac_vnode_check_link(struct ucred *cred, struct vnode *dvp,
+ struct vnode *vp, struct componentname *cnp);
+int mac_vnode_check_listextattr(struct ucred *cred, struct vnode *vp,
+ int attrnamespace);
+int mac_vnode_check_lookup(struct ucred *cred, struct vnode *dvp,
+ struct componentname *cnp);
+int mac_vnode_check_mmap(struct ucred *cred, struct vnode *vp, int prot,
+ int flags);
+int mac_vnode_check_mprotect(struct ucred *cred, struct vnode *vp,
+ int prot);
+int mac_vnode_check_open(struct ucred *cred, struct vnode *vp,
+ accmode_t accmode);
+int mac_vnode_check_poll(struct ucred *active_cred,
+ struct ucred *file_cred, struct vnode *vp);
+int mac_vnode_check_read(struct ucred *active_cred,
+ struct ucred *file_cred, struct vnode *vp);
+int mac_vnode_check_readdir(struct ucred *cred, struct vnode *vp);
+int mac_vnode_check_readlink(struct ucred *cred, struct vnode *vp);
+int mac_vnode_check_rename_from(struct ucred *cred, struct vnode *dvp,
+ struct vnode *vp, struct componentname *cnp);
+int mac_vnode_check_rename_to(struct ucred *cred, struct vnode *dvp,
+ struct vnode *vp, int samedir, struct componentname *cnp);
+int mac_vnode_check_revoke(struct ucred *cred, struct vnode *vp);
+int mac_vnode_check_setacl(struct ucred *cred, struct vnode *vp,
+ acl_type_t type, struct acl *acl);
+int mac_vnode_check_setextattr(struct ucred *cred, struct vnode *vp,
+ int attrnamespace, const char *name);
+int mac_vnode_check_setflags(struct ucred *cred, struct vnode *vp,
+ u_long flags);
+int mac_vnode_check_setmode(struct ucred *cred, struct vnode *vp,
+ mode_t mode);
+int mac_vnode_check_setowner(struct ucred *cred, struct vnode *vp,
+ uid_t uid, gid_t gid);
+int mac_vnode_check_setutimes(struct ucred *cred, struct vnode *vp,
+ struct timespec atime, struct timespec mtime);
+int mac_vnode_check_stat(struct ucred *active_cred,
+ struct ucred *file_cred, struct vnode *vp);
+int mac_vnode_check_unlink(struct ucred *cred, struct vnode *dvp,
+ struct vnode *vp, struct componentname *cnp);
+int mac_vnode_check_write(struct ucred *active_cred,
+ struct ucred *file_cred, struct vnode *vp);
+void mac_vnode_copy_label(struct label *, struct label *);
+void mac_vnode_init(struct vnode *);
+int mac_vnode_create_extattr(struct ucred *cred, struct mount *mp,
+ struct vnode *dvp, struct vnode *vp, struct componentname *cnp);
+void mac_vnode_destroy(struct vnode *);
+void mac_vnode_execve_transition(struct ucred *oldcred,
+ struct ucred *newcred, struct vnode *vp,
+ struct label *interpvplabel, struct image_params *imgp);
+int mac_vnode_execve_will_transition(struct ucred *cred,
+ struct vnode *vp, struct label *interpvplabel,
+ struct image_params *imgp);
+void mac_vnode_relabel(struct ucred *cred, struct vnode *vp,
+ struct label *newlabel);
+
+/*
+ * Calls to help various file systems implement labeling functionality using
+ * their existing EA implementation.
+ */
+int vop_stdsetlabel_ea(struct vop_setlabel_args *ap);
+
+#endif /* !_SECURITY_MAC_MAC_FRAMEWORK_HH_ */
diff --git a/rtems/freebsd/sys/_bus_dma.h b/rtems/freebsd/sys/_bus_dma.h
new file mode 100644
index 00000000..8d60c957
--- /dev/null
+++ b/rtems/freebsd/sys/_bus_dma.h
@@ -0,0 +1,63 @@
+/*-
+ * Copyright 2006 John-Mark Gurney.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ *
+ */
+
+#ifndef _SYS__BUS_DMA_HH_
+#define _SYS__BUS_DMA_HH_
+
+typedef int bus_dmasync_op_t;
+
+/*
+ * bus_dma_tag_t
+ *
+ * A machine-dependent opaque type describing the characteristics
+ * of how to perform DMA mappings. This structure encapsultes
+ * information concerning address and alignment restrictions, number
+ * of S/G segments, amount of data per S/G segment, etc.
+ */
+typedef struct bus_dma_tag *bus_dma_tag_t;
+
+/*
+ * bus_dmamap_t
+ *
+ * DMA mapping instance information.
+ */
+typedef struct bus_dmamap *bus_dmamap_t;
+
+/*
+ * A function that performs driver-specific synchronization on behalf of
+ * busdma.
+ */
+typedef enum {
+ BUS_DMA_LOCK = 0x01,
+ BUS_DMA_UNLOCK = 0x02,
+} bus_dma_lock_op_t;
+
+typedef void bus_dma_lock_t(void *, bus_dma_lock_op_t);
+
+#endif /* !_SYS__BUS_DMA_HH_ */
diff --git a/rtems/freebsd/sys/_iovec.h b/rtems/freebsd/sys/_iovec.h
new file mode 100644
index 00000000..62868661
--- /dev/null
+++ b/rtems/freebsd/sys/_iovec.h
@@ -0,0 +1,48 @@
+/*-
+ * Copyright (c) 1982, 1986, 1993, 1994
+ * The Regents of the University of California. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * @(#)uio.h 8.5 (Berkeley) 2/22/94
+ * $FreeBSD$
+ */
+
+#ifndef _SYS__IOVEC_HH_
+#define _SYS__IOVEC_HH_
+
+#include <rtems/freebsd/sys/_types.h>
+
+#ifndef _SIZE_T_DECLARED
+typedef __size_t size_t;
+#define _SIZE_T_DECLARED
+#endif
+
+struct iovec {
+ void *iov_base; /* Base address. */
+ size_t iov_len; /* Length. */
+};
+
+#endif /* !_SYS__IOVEC_HH_ */
diff --git a/rtems/freebsd/sys/_lock.h b/rtems/freebsd/sys/_lock.h
new file mode 100644
index 00000000..eb13f2aa
--- /dev/null
+++ b/rtems/freebsd/sys/_lock.h
@@ -0,0 +1,47 @@
+/*-
+ * Copyright (c) 1997 Berkeley Software Design, Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Berkeley Software Design Inc's name may not be used to endorse or
+ * promote products derived from this software without specific prior
+ * written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY BERKELEY SOFTWARE DESIGN INC ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL BERKELEY SOFTWARE DESIGN INC BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _SYS__LOCK_HH_
+#define _SYS__LOCK_HH_
+
+struct lock_object {
+#ifdef __rtems__
+ rtems_chain_node lo_node;
+ rtems_id lo_id;
+#endif /* __rtems__ */
+ const char *lo_name; /* Individual lock name. */
+ u_int lo_flags;
+ u_int lo_data; /* General class specific data. */
+#ifndef __rtems__
+ struct witness *lo_witness; /* Data for witness. */
+#endif /* __rtems__ */
+};
+
+#endif /* !_SYS__LOCK_HH_ */
diff --git a/rtems/freebsd/sys/_lockmgr.h b/rtems/freebsd/sys/_lockmgr.h
new file mode 100644
index 00000000..985b31cf
--- /dev/null
+++ b/rtems/freebsd/sys/_lockmgr.h
@@ -0,0 +1,48 @@
+/*-
+ * Copyright (c) 2008 Attilio Rao <attilio@FreeBSD.org>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice(s), this list of conditions and the following disclaimer as
+ * the first lines of this file unmodified other than the possible
+ * addition of one or more copyright notices.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice(s), this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
+ * DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _SYS__LOCKMGR_HH_
+#define _SYS__LOCKMGR_HH_
+
+#ifdef DEBUG_LOCKS
+#include <rtems/freebsd/sys/_stack.h>
+#endif
+
+struct lock {
+ struct lock_object lock_object;
+ volatile uintptr_t lk_lock;
+ int lk_timo;
+ int lk_pri;
+#ifdef DEBUG_LOCKS
+ struct stack lk_stack;
+#endif
+};
+
+#endif
diff --git a/rtems/freebsd/sys/_mutex.h b/rtems/freebsd/sys/_mutex.h
new file mode 100644
index 00000000..eac45780
--- /dev/null
+++ b/rtems/freebsd/sys/_mutex.h
@@ -0,0 +1,44 @@
+/*-
+ * Copyright (c) 1997 Berkeley Software Design, Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Berkeley Software Design Inc's name may not be used to endorse or
+ * promote products derived from this software without specific prior
+ * written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY BERKELEY SOFTWARE DESIGN INC ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL BERKELEY SOFTWARE DESIGN INC BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _SYS__MUTEX_HH_
+#define _SYS__MUTEX_HH_
+
+/*
+ * Sleep/spin mutex.
+ */
+struct mtx {
+ struct lock_object lock_object; /* Common lock properties. */
+#ifndef __rtems__
+ volatile uintptr_t mtx_lock; /* Owner and flags. */
+#endif /* __rtems__ */
+};
+
+#endif /* !_SYS__MUTEX_HH_ */
diff --git a/rtems/freebsd/sys/_null.h b/rtems/freebsd/sys/_null.h
new file mode 100644
index 00000000..ed6804cc
--- /dev/null
+++ b/rtems/freebsd/sys/_null.h
@@ -0,0 +1,45 @@
+/*-
+ * Copyright (c) 2003 Marcel Moolenaar
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef NULL
+
+#if !defined(__cplusplus)
+#define NULL ((void *)0)
+#else
+#if defined(__GNUG__) && defined(__GNUC__) && __GNUC__ >= 4
+#define NULL __null
+#else
+#if defined(__LP64__)
+#define NULL (0L)
+#else
+#define NULL 0
+#endif /* __LP64__ */
+#endif /* __GNUG__ */
+#endif /* !__cplusplus */
+
+#endif
diff --git a/rtems/freebsd/sys/_pthreadtypes.h b/rtems/freebsd/sys/_pthreadtypes.h
new file mode 100644
index 00000000..936ffd88
--- /dev/null
+++ b/rtems/freebsd/sys/_pthreadtypes.h
@@ -0,0 +1 @@
+/* EMPTY */
diff --git a/rtems/freebsd/sys/_rmlock.h b/rtems/freebsd/sys/_rmlock.h
new file mode 100644
index 00000000..592d73bb
--- /dev/null
+++ b/rtems/freebsd/sys/_rmlock.h
@@ -0,0 +1,62 @@
+/*-
+ * Copyright (c) 2007 Stephan Uphoff <ups@FreeBSD.org>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the author nor the names of any co-contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _SYS__RMLOCK_HH_
+#define _SYS__RMLOCK_HH_
+
+/*
+ * XXXUPS remove as soon as we have per cpu variable
+ * linker sets and can define rm_queue in _rm_lock.h
+*/
+#include <rtems/freebsd/sys/pcpu.h>
+/*
+ * Mostly reader/occasional writer lock.
+ */
+
+LIST_HEAD(rmpriolist,rm_priotracker);
+
+struct rmlock {
+ struct lock_object lock_object;
+ volatile int rm_noreadtoken;
+ LIST_HEAD(,rm_priotracker) rm_activeReaders;
+ struct mtx rm_lock;
+
+};
+
+struct rm_priotracker {
+ struct rm_queue rmp_cpuQueue; /* Must be first */
+ struct rmlock *rmp_rmlock;
+ struct thread *rmp_thread;
+ int rmp_flags;
+ LIST_ENTRY(rm_priotracker) rmp_qentry;
+};
+
+#endif /* !_SYS__RMLOCK_HH_ */
diff --git a/rtems/freebsd/sys/_rwlock.h b/rtems/freebsd/sys/_rwlock.h
new file mode 100644
index 00000000..536ba1dd
--- /dev/null
+++ b/rtems/freebsd/sys/_rwlock.h
@@ -0,0 +1,43 @@
+/*-
+ * Copyright (c) 2006 John Baldwin <jhb@FreeBSD.org>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the author nor the names of any co-contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _SYS__RWLOCK_HH_
+#define _SYS__RWLOCK_HH_
+
+/*
+ * Reader/writer lock.
+ */
+struct rwlock {
+ struct lock_object lock_object;
+ volatile uintptr_t rw_lock;
+};
+
+#endif /* !_SYS__RWLOCK_HH_ */
diff --git a/rtems/freebsd/sys/_semaphore.h b/rtems/freebsd/sys/_semaphore.h
new file mode 100644
index 00000000..c04f3704
--- /dev/null
+++ b/rtems/freebsd/sys/_semaphore.h
@@ -0,0 +1,73 @@
+/*-
+ * Copyright (c) 2002 Alfred Perlstein <alfred@FreeBSD.org>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+#ifndef __SEMAPHORE_HH_
+#define __SEMAPHORE_HH_
+
+typedef intptr_t semid_t;
+struct timespec;
+
+#ifndef _KERNEL
+
+#include <rtems/freebsd/sys/cdefs.h>
+
+/*
+ * Semaphore definitions.
+ */
+struct sem {
+#define SEM_MAGIC ((u_int32_t) 0x09fa4012)
+ u_int32_t magic;
+ pthread_mutex_t lock;
+ pthread_cond_t gtzero;
+ u_int32_t count;
+ u_int32_t nwaiters;
+#define SEM_USER (NULL)
+ semid_t semid; /* semaphore id if kernel (shared) semaphore */
+ int syssem; /* 1 if kernel (shared) semaphore */
+ LIST_ENTRY(sem) entry;
+ struct sem **backpointer;
+};
+
+__BEGIN_DECLS
+
+int ksem_close(semid_t id);
+int ksem_post(semid_t id);
+int ksem_wait(semid_t id);
+int ksem_trywait(semid_t id);
+int ksem_timedwait(semid_t id, const struct timespec *abstime);
+int ksem_init(semid_t *idp, unsigned int value);
+int ksem_open(semid_t *idp, const char *name, int oflag, mode_t mode,
+ unsigned int value);
+int ksem_unlink(const char *name);
+int ksem_getvalue(semid_t id, int *val);
+int ksem_destroy(semid_t id);
+
+__END_DECLS
+
+#endif /* !_KERNEL */
+
+#endif /* __SEMAPHORE_HH_ */
diff --git a/rtems/freebsd/sys/_sigset.h b/rtems/freebsd/sys/_sigset.h
new file mode 100644
index 00000000..52028110
--- /dev/null
+++ b/rtems/freebsd/sys/_sigset.h
@@ -0,0 +1,59 @@
+/*-
+ * Copyright (c) 1982, 1986, 1989, 1991, 1993
+ * The Regents of the University of California. All rights reserved.
+ * (c) UNIX System Laboratories, Inc.
+ * All or some portions of this file are derived from material licensed
+ * to the University of California by American Telephone and Telegraph
+ * Co. or Unix System Laboratories, Inc. and are reproduced herein with
+ * the permission of UNIX System Laboratories, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * @(#)signal.h 8.4 (Berkeley) 5/4/95
+ * $FreeBSD$
+ */
+
+#ifndef _SYS__SIGSET_HH_
+#define _SYS__SIGSET_HH_
+
+/*
+ * sigset_t macros.
+ */
+#define _SIG_WORDS 4
+#define _SIG_MAXSIG 128
+#define _SIG_IDX(sig) ((sig) - 1)
+#define _SIG_WORD(sig) (_SIG_IDX(sig) >> 5)
+#define _SIG_BIT(sig) (1 << (_SIG_IDX(sig) & 31))
+#define _SIG_VALID(sig) ((sig) <= _SIG_MAXSIG && (sig) > 0)
+
+typedef struct __sigset {
+ __uint32_t __bits[_SIG_WORDS];
+} __sigset_t;
+
+#if defined(_KERNEL) && defined(COMPAT_43)
+typedef unsigned int osigset_t;
+#endif
+
+#endif /* !_SYS__SIGSET_HH_ */
diff --git a/rtems/freebsd/sys/_sx.h b/rtems/freebsd/sys/_sx.h
new file mode 100644
index 00000000..ff8f821e
--- /dev/null
+++ b/rtems/freebsd/sys/_sx.h
@@ -0,0 +1,44 @@
+/*-
+ * Copyright (c) 2007 Attilio Rao <attilio@freebsd.org>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice(s), this list of conditions and the following disclaimer as
+ * the first lines of this file unmodified other than the possible
+ * addition of one or more copyright notices.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice(s), this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
+ * DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _SYS__SX_HH_
+#define _SYS__SX_HH_
+
+/*
+ * Shared/exclusive lock main structure definition.
+ */
+struct sx {
+ struct lock_object lock_object;
+#ifndef __rtems__
+ volatile uintptr_t sx_lock;
+#endif /* __rtems__ */
+};
+
+#endif /* !_SYS__SX_HH_ */
diff --git a/rtems/freebsd/sys/_task.h b/rtems/freebsd/sys/_task.h
new file mode 100644
index 00000000..51b032ed
--- /dev/null
+++ b/rtems/freebsd/sys/_task.h
@@ -0,0 +1,50 @@
+/*-
+ * Copyright (c) 2000 Doug Rabson
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _SYS__TASK_HH_
+#define _SYS__TASK_HH_
+
+#include <rtems/freebsd/sys/queue.h>
+
+/*
+ * Each task includes a function which is called from
+ * taskqueue_run(). The first argument is taken from the 'ta_context'
+ * field of struct task and the second argument is a count of how many
+ * times the task was enqueued before the call to taskqueue_run().
+ */
+typedef void task_fn_t(void *context, int pending);
+
+struct task {
+ STAILQ_ENTRY(task) ta_link; /* link for queue */
+ u_short ta_pending; /* count times queued */
+ u_short ta_priority; /* Priority */
+ task_fn_t *ta_func; /* task handler */
+ void *ta_context; /* argument for handler */
+};
+
+#endif /* !_SYS__TASK_HH_ */
diff --git a/rtems/freebsd/sys/_timeval.h b/rtems/freebsd/sys/_timeval.h
new file mode 100644
index 00000000..936ffd88
--- /dev/null
+++ b/rtems/freebsd/sys/_timeval.h
@@ -0,0 +1 @@
+/* EMPTY */
diff --git a/rtems/freebsd/sys/_types.h b/rtems/freebsd/sys/_types.h
new file mode 100644
index 00000000..56f11486
--- /dev/null
+++ b/rtems/freebsd/sys/_types.h
@@ -0,0 +1,105 @@
+/*-
+ * Copyright (c) 2002 Mike Barcroft <mike@FreeBSD.org>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _SYS__TYPES_HH_
+#define _SYS__TYPES_HH_
+
+#include <rtems/freebsd/sys/cdefs.h>
+#include <rtems/freebsd/machine/_types.h>
+
+/*
+ * Standard type definitions.
+ */
+typedef __uint32_t __blksize_t; /* file block size */
+typedef __int64_t __blkcnt_t; /* file block count */
+typedef __int32_t __clockid_t; /* clock_gettime()... */
+typedef __uint32_t __fflags_t; /* file flags */
+typedef __uint64_t __fsblkcnt_t;
+typedef __uint64_t __fsfilcnt_t;
+typedef __uint32_t __gid_t;
+typedef __int64_t __id_t; /* can hold a gid_t, pid_t, or uid_t */
+typedef __uint32_t __ino_t; /* inode number */
+typedef long __key_t; /* IPC key (for Sys V IPC) */
+typedef __int32_t __lwpid_t; /* Thread ID (a.k.a. LWP) */
+typedef __uint16_t __mode_t; /* permissions */
+typedef int __accmode_t; /* access permissions */
+typedef int __nl_item;
+typedef __uint16_t __nlink_t; /* link count */
+typedef __int64_t __off_t; /* file offset */
+typedef __int32_t __pid_t; /* process [group] */
+typedef __int64_t __rlim_t; /* resource limit - intentionally */
+ /* signed, because of legacy code */
+ /* that uses -1 for RLIM_INFINITY */
+typedef __uint8_t __sa_family_t;
+typedef __uint32_t __socklen_t;
+typedef long __suseconds_t; /* microseconds (signed) */
+typedef struct __timer *__timer_t; /* timer_gettime()... */
+typedef struct __mq *__mqd_t; /* mq_open()... */
+typedef __uint32_t __uid_t;
+typedef unsigned int __useconds_t; /* microseconds (unsigned) */
+typedef int __cpuwhich_t; /* which parameter for cpuset. */
+typedef int __cpulevel_t; /* level parameter for cpuset. */
+typedef int __cpusetid_t; /* cpuset identifier. */
+
+/*
+ * Unusual type definitions.
+ */
+/*
+ * rune_t is declared to be an ``int'' instead of the more natural
+ * ``unsigned long'' or ``long''. Two things are happening here. It is not
+ * unsigned so that EOF (-1) can be naturally assigned to it and used. Also,
+ * it looks like 10646 will be a 31 bit standard. This means that if your
+ * ints cannot hold 32 bits, you will be in trouble. The reason an int was
+ * chosen over a long is that the is*() and to*() routines take ints (says
+ * ANSI C), but they use __ct_rune_t instead of int.
+ *
+ * NOTE: rune_t is not covered by ANSI nor other standards, and should not
+ * be instantiated outside of lib/libc/locale. Use wchar_t. wchar_t and
+ * rune_t must be the same type. Also, wint_t must be no narrower than
+ * wchar_t, and should be able to hold all members of the largest
+ * character set plus one extra value (WEOF), and must be at least 16 bits.
+ */
+typedef int __ct_rune_t; /* arg type for ctype funcs */
+typedef __ct_rune_t __rune_t; /* rune_t (see above) */
+typedef __ct_rune_t __wchar_t; /* wchar_t (see above) */
+typedef __ct_rune_t __wint_t; /* wint_t (see above) */
+
+typedef __uint32_t __dev_t; /* device number */
+
+typedef __uint32_t __fixpt_t; /* fixed point number */
+
+/*
+ * mbstate_t is an opaque object to keep conversion state during multibyte
+ * stream conversions.
+ */
+typedef union {
+ char __mbstate8[128];
+ __int64_t _mbstateL; /* for alignment */
+} __mbstate_t;
+
+#endif /* !_SYS__TYPES_HH_ */
diff --git a/rtems/freebsd/sys/acl.h b/rtems/freebsd/sys/acl.h
new file mode 100644
index 00000000..79224aff
--- /dev/null
+++ b/rtems/freebsd/sys/acl.h
@@ -0,0 +1,407 @@
+/*-
+ * Copyright (c) 1999-2001 Robert N. M. Watson
+ * Copyright (c) 2008 Edward Tomasz Napierała <trasz@FreeBSD.org>
+ * All rights reserved.
+ *
+ * This software was developed by Robert Watson for the TrustedBSD Project.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+/*
+ * Developed by the TrustedBSD Project.
+ * Support for POSIX.1e and NFSv4 access control lists.
+ */
+
+#ifndef _SYS_ACL_HH_
+#define _SYS_ACL_HH_
+
+#include <rtems/freebsd/sys/param.h>
+#include <rtems/freebsd/sys/queue.h>
+#include <rtems/freebsd/vm/uma.h>
+
+/*
+ * POSIX.1e and NFSv4 ACL types and related constants.
+ */
+
+typedef uint32_t acl_tag_t;
+typedef uint32_t acl_perm_t;
+typedef uint16_t acl_entry_type_t;
+typedef uint16_t acl_flag_t;
+typedef int acl_type_t;
+typedef int *acl_permset_t;
+typedef uint16_t *acl_flagset_t;
+
+/*
+ * With 254 entries, "struct acl_t_struct" is exactly one 4kB page big.
+ * Note that with NFSv4 ACLs, the maximum number of ACL entries one
+ * may set on file or directory is about half of ACL_MAX_ENTRIES.
+ *
+ * If you increase this, you might also need to increase
+ * _ACL_T_ALIGNMENT_BITS in lib/libc/posix1e/acl_support.h.
+ *
+ * The maximum number of POSIX.1e ACLs is controlled
+ * by OLDACL_MAX_ENTRIES. Changing that one will break binary
+ * compatibility with pre-8.0 userland and change on-disk ACL layout.
+ */
+#define ACL_MAX_ENTRIES 254
+
+#if defined(_KERNEL) || defined(_ACL_PRIVATE)
+
+#define POSIX1E_ACL_ACCESS_EXTATTR_NAMESPACE EXTATTR_NAMESPACE_SYSTEM
+#define POSIX1E_ACL_ACCESS_EXTATTR_NAME "posix1e.acl_access"
+#define POSIX1E_ACL_DEFAULT_EXTATTR_NAMESPACE EXTATTR_NAMESPACE_SYSTEM
+#define POSIX1E_ACL_DEFAULT_EXTATTR_NAME "posix1e.acl_default"
+#define NFS4_ACL_EXTATTR_NAMESPACE EXTATTR_NAMESPACE_SYSTEM
+#define NFS4_ACL_EXTATTR_NAME "nfs4.acl"
+#define OLDACL_MAX_ENTRIES 32
+
+/*
+ * "struct oldacl" is used in compatibility ACL syscalls and for on-disk
+ * storage of POSIX.1e ACLs.
+ */
+typedef int oldacl_tag_t;
+typedef mode_t oldacl_perm_t;
+
+struct oldacl_entry {
+ oldacl_tag_t ae_tag;
+ uid_t ae_id;
+ oldacl_perm_t ae_perm;
+};
+typedef struct oldacl_entry *oldacl_entry_t;
+
+struct oldacl {
+ int acl_cnt;
+ struct oldacl_entry acl_entry[OLDACL_MAX_ENTRIES];
+};
+
+/*
+ * Current "struct acl".
+ */
+struct acl_entry {
+ acl_tag_t ae_tag;
+ uid_t ae_id;
+ acl_perm_t ae_perm;
+ /* NFSv4 entry type, "allow" or "deny". Unused in POSIX.1e ACLs. */
+ acl_entry_type_t ae_entry_type;
+ /* NFSv4 ACL inheritance. Unused in POSIX.1e ACLs. */
+ acl_flag_t ae_flags;
+};
+typedef struct acl_entry *acl_entry_t;
+
+/*
+ * Internal ACL structure, used in libc, kernel APIs and for on-disk
+ * storage of NFSv4 ACLs. POSIX.1e ACLs use "struct oldacl" for on-disk
+ * storage.
+ */
+struct acl {
+ unsigned int acl_maxcnt;
+ unsigned int acl_cnt;
+ /* Will be required e.g. to implement NFSv4.1 ACL inheritance. */
+ int acl_spare[4];
+ struct acl_entry acl_entry[ACL_MAX_ENTRIES];
+};
+
+/*
+ * ACL structure internal to libc.
+ */
+struct acl_t_struct {
+ struct acl ats_acl;
+ int ats_cur_entry;
+ /*
+ * ats_brand is for libc internal bookkeeping only.
+ * Applications should use acl_get_brand_np(3).
+ * Kernel code should use the "type" argument passed
+ * to VOP_SETACL, VOP_GETACL or VOP_ACLCHECK calls;
+ * ACL_TYPE_ACCESS or ACL_TYPE_DEFAULT mean POSIX.1e
+ * ACL, ACL_TYPE_NFS4 means NFSv4 ACL.
+ */
+ int ats_brand;
+};
+typedef struct acl_t_struct *acl_t;
+
+#else /* _KERNEL || _ACL_PRIVATE */
+
+typedef void *acl_entry_t;
+typedef void *acl_t;
+
+#endif /* !_KERNEL && !_ACL_PRIVATE */
+
+/*
+ * Possible valid values for ats_brand field.
+ */
+#define ACL_BRAND_UNKNOWN 0
+#define ACL_BRAND_POSIX 1
+#define ACL_BRAND_NFS4 2
+
+/*
+ * Possible valid values for ae_tag field. For explanation, see acl(9).
+ */
+#define ACL_UNDEFINED_TAG 0x00000000
+#define ACL_USER_OBJ 0x00000001
+#define ACL_USER 0x00000002
+#define ACL_GROUP_OBJ 0x00000004
+#define ACL_GROUP 0x00000008
+#define ACL_MASK 0x00000010
+#define ACL_OTHER 0x00000020
+#define ACL_OTHER_OBJ ACL_OTHER
+#define ACL_EVERYONE 0x00000040
+
+/*
+ * Possible valid values for ae_entry_type field, valid only for NFSv4 ACLs.
+ */
+#define ACL_ENTRY_TYPE_ALLOW 0x0100
+#define ACL_ENTRY_TYPE_DENY 0x0200
+#define ACL_ENTRY_TYPE_AUDIT 0x0400
+#define ACL_ENTRY_TYPE_ALARM 0x0800
+
+/*
+ * Possible valid values for acl_type_t arguments. First two
+ * are provided only for backwards binary compatibility.
+ */
+#define ACL_TYPE_ACCESS_OLD 0x00000000
+#define ACL_TYPE_DEFAULT_OLD 0x00000001
+#define ACL_TYPE_ACCESS 0x00000002
+#define ACL_TYPE_DEFAULT 0x00000003
+#define ACL_TYPE_NFS4 0x00000004
+
+/*
+ * Possible bits in ae_perm field for POSIX.1e ACLs. Note
+ * that ACL_EXECUTE may be used in both NFSv4 and POSIX.1e ACLs.
+ */
+#define ACL_EXECUTE 0x0001
+#define ACL_WRITE 0x0002
+#define ACL_READ 0x0004
+#define ACL_PERM_NONE 0x0000
+#define ACL_PERM_BITS (ACL_EXECUTE | ACL_WRITE | ACL_READ)
+#define ACL_POSIX1E_BITS (ACL_EXECUTE | ACL_WRITE | ACL_READ)
+
+/*
+ * Possible bits in ae_perm field for NFSv4 ACLs.
+ */
+#define ACL_READ_DATA 0x00000008
+#define ACL_LIST_DIRECTORY 0x00000008
+#define ACL_WRITE_DATA 0x00000010
+#define ACL_ADD_FILE 0x00000010
+#define ACL_APPEND_DATA 0x00000020
+#define ACL_ADD_SUBDIRECTORY 0x00000020
+#define ACL_READ_NAMED_ATTRS 0x00000040
+#define ACL_WRITE_NAMED_ATTRS 0x00000080
+/* ACL_EXECUTE is defined above. */
+#define ACL_DELETE_CHILD 0x00000100
+#define ACL_READ_ATTRIBUTES 0x00000200
+#define ACL_WRITE_ATTRIBUTES 0x00000400
+#define ACL_DELETE 0x00000800
+#define ACL_READ_ACL 0x00001000
+#define ACL_WRITE_ACL 0x00002000
+#define ACL_WRITE_OWNER 0x00004000
+#define ACL_SYNCHRONIZE 0x00008000
+
+#define ACL_NFS4_PERM_BITS (ACL_READ_DATA | ACL_WRITE_DATA | \
+ ACL_APPEND_DATA | ACL_READ_NAMED_ATTRS | ACL_WRITE_NAMED_ATTRS | \
+ ACL_EXECUTE | ACL_DELETE_CHILD | ACL_READ_ATTRIBUTES | \
+ ACL_WRITE_ATTRIBUTES | ACL_DELETE | ACL_READ_ACL | ACL_WRITE_ACL | \
+ ACL_WRITE_OWNER | ACL_SYNCHRONIZE)
+
+/*
+ * Possible entry_id values for acl_get_entry(3).
+ */
+#define ACL_FIRST_ENTRY 0
+#define ACL_NEXT_ENTRY 1
+
+/*
+ * Possible values in ae_flags field; valid only for NFSv4 ACLs.
+ */
+#define ACL_ENTRY_FILE_INHERIT 0x0001
+#define ACL_ENTRY_DIRECTORY_INHERIT 0x0002
+#define ACL_ENTRY_NO_PROPAGATE_INHERIT 0x0004
+#define ACL_ENTRY_INHERIT_ONLY 0x0008
+#define ACL_ENTRY_SUCCESSFUL_ACCESS 0x0010
+#define ACL_ENTRY_FAILED_ACCESS 0x0020
+
+#define ACL_FLAGS_BITS (ACL_ENTRY_FILE_INHERIT | \
+ ACL_ENTRY_DIRECTORY_INHERIT | ACL_ENTRY_NO_PROPAGATE_INHERIT | \
+ ACL_ENTRY_INHERIT_ONLY | ACL_ENTRY_SUCCESSFUL_ACCESS | \
+ ACL_ENTRY_FAILED_ACCESS)
+
+/*
+ * Undefined value in ae_id field. ae_id should be set to this value
+ * iff ae_tag is ACL_USER_OBJ, ACL_GROUP_OBJ, ACL_OTHER or ACL_EVERYONE.
+ */
+#define ACL_UNDEFINED_ID ((uid_t)-1)
+
+/*
+ * Possible values for _flags parameter in acl_to_text_np(3).
+ */
+#define ACL_TEXT_VERBOSE 0x01
+#define ACL_TEXT_NUMERIC_IDS 0x02
+#define ACL_TEXT_APPEND_ID 0x04
+
+/*
+ * POSIX.1e ACLs are capable of expressing the read, write, and execute bits
+ * of the POSIX mode field. We provide two masks: one that defines the bits
+ * the ACL will replace in the mode, and the other that defines the bits that
+ * must be preseved when an ACL is updating a mode.
+ */
+#define ACL_OVERRIDE_MASK (S_IRWXU | S_IRWXG | S_IRWXO)
+#define ACL_PRESERVE_MASK (~ACL_OVERRIDE_MASK)
+
+#ifdef _KERNEL
+
+/*
+ * Filesystem-independent code to move back and forth between POSIX mode and
+ * POSIX.1e ACL representations.
+ */
+acl_perm_t acl_posix1e_mode_to_perm(acl_tag_t tag, mode_t mode);
+struct acl_entry acl_posix1e_mode_to_entry(acl_tag_t tag, uid_t uid,
+ gid_t gid, mode_t mode);
+mode_t acl_posix1e_perms_to_mode(
+ struct acl_entry *acl_user_obj_entry,
+ struct acl_entry *acl_group_obj_entry,
+ struct acl_entry *acl_other_entry);
+mode_t acl_posix1e_acl_to_mode(struct acl *acl);
+mode_t acl_posix1e_newfilemode(mode_t cmode,
+ struct acl *dacl);
+struct acl *acl_alloc(int flags);
+void acl_free(struct acl *aclp);
+
+void acl_nfs4_trivial_from_mode(struct acl *aclp,
+ mode_t mode);
+void acl_nfs4_sync_acl_from_mode(struct acl *aclp,
+ mode_t mode, int file_owner_id);
+void acl_nfs4_sync_mode_from_acl(mode_t *mode,
+ const struct acl *aclp);
+int acl_nfs4_is_trivial(const struct acl *aclp,
+ int file_owner_id);
+void acl_nfs4_compute_inherited_acl(
+ const struct acl *parent_aclp,
+ struct acl *child_aclp, mode_t mode,
+ int file_owner_id, int is_directory);
+int acl_copy_oldacl_into_acl(const struct oldacl *source,
+ struct acl *dest);
+int acl_copy_acl_into_oldacl(const struct acl *source,
+ struct oldacl *dest);
+
+/*
+ * To allocate 'struct acl', use acl_alloc()/acl_free() instead of this.
+ */
+MALLOC_DECLARE(M_ACL);
+/*
+ * Filesystem-independent syntax check for a POSIX.1e ACL.
+ */
+int acl_posix1e_check(struct acl *acl);
+int acl_nfs4_check(const struct acl *aclp, int is_directory);
+
+#else /* !_KERNEL */
+
+#if defined(_ACL_PRIVATE)
+
+/*
+ * Syscall interface -- use the library calls instead as the syscalls have
+ * strict ACL entry ordering requirements.
+ */
+__BEGIN_DECLS
+int __acl_aclcheck_fd(int _filedes, acl_type_t _type, struct acl *_aclp);
+int __acl_aclcheck_file(const char *_path, acl_type_t _type,
+ struct acl *_aclp);
+int __acl_aclcheck_link(const char *_path, acl_type_t _type,
+ struct acl *_aclp);
+int __acl_delete_fd(int _filedes, acl_type_t _type);
+int __acl_delete_file(const char *_path_p, acl_type_t _type);
+int __acl_delete_link(const char *_path_p, acl_type_t _type);
+int __acl_get_fd(int _filedes, acl_type_t _type, struct acl *_aclp);
+int __acl_get_file(const char *_path, acl_type_t _type, struct acl *_aclp);
+int __acl_get_link(const char *_path, acl_type_t _type, struct acl *_aclp);
+int __acl_set_fd(int _filedes, acl_type_t _type, struct acl *_aclp);
+int __acl_set_file(const char *_path, acl_type_t _type, struct acl *_aclp);
+int __acl_set_link(const char *_path, acl_type_t _type, struct acl *_aclp);
+__END_DECLS
+
+#endif /* _ACL_PRIVATE */
+
+/*
+ * Supported POSIX.1e ACL manipulation and assignment/retrieval API _np calls
+ * are local extensions that reflect an environment capable of opening file
+ * descriptors of directories, and allowing additional ACL type for different
+ * filesystems (i.e., AFS).
+ */
+__BEGIN_DECLS
+int acl_add_flag_np(acl_flagset_t _flagset_d, acl_flag_t _flag);
+int acl_add_perm(acl_permset_t _permset_d, acl_perm_t _perm);
+int acl_calc_mask(acl_t *_acl_p);
+int acl_clear_flags_np(acl_flagset_t _flagset_d);
+int acl_clear_perms(acl_permset_t _permset_d);
+int acl_copy_entry(acl_entry_t _dest_d, acl_entry_t _src_d);
+ssize_t acl_copy_ext(void *_buf_p, acl_t _acl, ssize_t _size);
+acl_t acl_copy_int(const void *_buf_p);
+int acl_create_entry(acl_t *_acl_p, acl_entry_t *_entry_p);
+int acl_create_entry_np(acl_t *_acl_p, acl_entry_t *_entry_p, int _index);
+int acl_delete_entry(acl_t _acl, acl_entry_t _entry_d);
+int acl_delete_entry_np(acl_t _acl, int _index);
+int acl_delete_fd_np(int _filedes, acl_type_t _type);
+int acl_delete_file_np(const char *_path_p, acl_type_t _type);
+int acl_delete_link_np(const char *_path_p, acl_type_t _type);
+int acl_delete_def_file(const char *_path_p);
+int acl_delete_def_link_np(const char *_path_p);
+int acl_delete_flag_np(acl_flagset_t _flagset_d, acl_flag_t _flag);
+int acl_delete_perm(acl_permset_t _permset_d, acl_perm_t _perm);
+acl_t acl_dup(acl_t _acl);
+int acl_free(void *_obj_p);
+acl_t acl_from_text(const char *_buf_p);
+int acl_get_brand_np(acl_t _acl, int *_brand_p);
+int acl_get_entry(acl_t _acl, int _entry_id, acl_entry_t *_entry_p);
+acl_t acl_get_fd(int _fd);
+acl_t acl_get_fd_np(int fd, acl_type_t _type);
+acl_t acl_get_file(const char *_path_p, acl_type_t _type);
+int acl_get_entry_type_np(acl_entry_t _entry_d, acl_entry_type_t *_entry_type_p);
+acl_t acl_get_link_np(const char *_path_p, acl_type_t _type);
+void *acl_get_qualifier(acl_entry_t _entry_d);
+int acl_get_flag_np(acl_flagset_t _flagset_d, acl_flag_t _flag);
+int acl_get_perm_np(acl_permset_t _permset_d, acl_perm_t _perm);
+int acl_get_flagset_np(acl_entry_t _entry_d, acl_flagset_t *_flagset_p);
+int acl_get_permset(acl_entry_t _entry_d, acl_permset_t *_permset_p);
+int acl_get_tag_type(acl_entry_t _entry_d, acl_tag_t *_tag_type_p);
+acl_t acl_init(int _count);
+int acl_set_fd(int _fd, acl_t _acl);
+int acl_set_fd_np(int _fd, acl_t _acl, acl_type_t _type);
+int acl_set_file(const char *_path_p, acl_type_t _type, acl_t _acl);
+int acl_set_entry_type_np(acl_entry_t _entry_d, acl_entry_type_t _entry_type);
+int acl_set_link_np(const char *_path_p, acl_type_t _type, acl_t _acl);
+int acl_set_flagset_np(acl_entry_t _entry_d, acl_flagset_t _flagset_d);
+int acl_set_permset(acl_entry_t _entry_d, acl_permset_t _permset_d);
+int acl_set_qualifier(acl_entry_t _entry_d, const void *_tag_qualifier_p);
+int acl_set_tag_type(acl_entry_t _entry_d, acl_tag_t _tag_type);
+ssize_t acl_size(acl_t _acl);
+char *acl_to_text(acl_t _acl, ssize_t *_len_p);
+char *acl_to_text_np(acl_t _acl, ssize_t *_len_p, int _flags);
+int acl_valid(acl_t _acl);
+int acl_valid_fd_np(int _fd, acl_type_t _type, acl_t _acl);
+int acl_valid_file_np(const char *_path_p, acl_type_t _type, acl_t _acl);
+int acl_valid_link_np(const char *_path_p, acl_type_t _type, acl_t _acl);
+int acl_is_trivial_np(const acl_t _acl, int *_trivialp);
+acl_t acl_strip_np(const acl_t _acl, int recalculate_mask);
+__END_DECLS
+
+#endif /* !_KERNEL */
+
+#endif /* !_SYS_ACL_HH_ */
diff --git a/rtems/freebsd/sys/ata.h b/rtems/freebsd/sys/ata.h
new file mode 100644
index 00000000..ff64fe88
--- /dev/null
+++ b/rtems/freebsd/sys/ata.h
@@ -0,0 +1,560 @@
+/*-
+ * Copyright (c) 2000 - 2008 Søren Schmidt <sos@FreeBSD.org>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer,
+ * without modification, immediately at the beginning of the file.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _SYS_ATA_HH_
+#define _SYS_ATA_HH_
+
+#include <rtems/freebsd/sys/ioccom.h>
+
+/* ATA/ATAPI device parameters */
+struct ata_params {
+/*000*/ u_int16_t config; /* configuration info */
+#define ATA_PROTO_MASK 0x8003
+#define ATA_PROTO_ATAPI 0x8000
+#define ATA_PROTO_ATAPI_12 0x8000
+#define ATA_PROTO_ATAPI_16 0x8001
+#define ATA_PROTO_CFA 0x848a
+#define ATA_ATAPI_TYPE_MASK 0x1f00
+#define ATA_ATAPI_TYPE_DIRECT 0x0000 /* disk/floppy */
+#define ATA_ATAPI_TYPE_TAPE 0x0100 /* streaming tape */
+#define ATA_ATAPI_TYPE_CDROM 0x0500 /* CD-ROM device */
+#define ATA_ATAPI_TYPE_OPTICAL 0x0700 /* optical disk */
+#define ATA_DRQ_MASK 0x0060
+#define ATA_DRQ_SLOW 0x0000 /* cpu 3 ms delay */
+#define ATA_DRQ_INTR 0x0020 /* interrupt 10 ms delay */
+#define ATA_DRQ_FAST 0x0040 /* accel 50 us delay */
+#define ATA_RESP_INCOMPLETE 0x0004
+
+/*001*/ u_int16_t cylinders; /* # of cylinders */
+/*002*/ u_int16_t specconf; /* specific configuration */
+/*003*/ u_int16_t heads; /* # heads */
+ u_int16_t obsolete4;
+ u_int16_t obsolete5;
+/*006*/ u_int16_t sectors; /* # sectors/track */
+/*007*/ u_int16_t vendor7[3];
+/*010*/ u_int8_t serial[20]; /* serial number */
+/*020*/ u_int16_t retired20;
+ u_int16_t retired21;
+ u_int16_t obsolete22;
+/*023*/ u_int8_t revision[8]; /* firmware revision */
+/*027*/ u_int8_t model[40]; /* model name */
+/*047*/ u_int16_t sectors_intr; /* sectors per interrupt */
+/*048*/ u_int16_t usedmovsd; /* double word read/write? */
+/*049*/ u_int16_t capabilities1;
+#define ATA_SUPPORT_DMA 0x0100
+#define ATA_SUPPORT_LBA 0x0200
+#define ATA_SUPPORT_IORDY 0x0400
+#define ATA_SUPPORT_IORDYDIS 0x0800
+#define ATA_SUPPORT_OVERLAP 0x4000
+
+/*050*/ u_int16_t capabilities2;
+/*051*/ u_int16_t retired_piomode; /* PIO modes 0-2 */
+#define ATA_RETIRED_PIO_MASK 0x0300
+
+/*052*/ u_int16_t retired_dmamode; /* DMA modes */
+#define ATA_RETIRED_DMA_MASK 0x0003
+
+/*053*/ u_int16_t atavalid; /* fields valid */
+#define ATA_FLAG_54_58 0x0001 /* words 54-58 valid */
+#define ATA_FLAG_64_70 0x0002 /* words 64-70 valid */
+#define ATA_FLAG_88 0x0004 /* word 88 valid */
+
+/*054*/ u_int16_t current_cylinders;
+/*055*/ u_int16_t current_heads;
+/*056*/ u_int16_t current_sectors;
+/*057*/ u_int16_t current_size_1;
+/*058*/ u_int16_t current_size_2;
+/*059*/ u_int16_t multi;
+#define ATA_MULTI_VALID 0x0100
+
+/*060*/ u_int16_t lba_size_1;
+ u_int16_t lba_size_2;
+ u_int16_t obsolete62;
+/*063*/ u_int16_t mwdmamodes; /* multiword DMA modes */
+/*064*/ u_int16_t apiomodes; /* advanced PIO modes */
+
+/*065*/ u_int16_t mwdmamin; /* min. M/W DMA time/word ns */
+/*066*/ u_int16_t mwdmarec; /* rec. M/W DMA time ns */
+/*067*/ u_int16_t pioblind; /* min. PIO cycle w/o flow */
+/*068*/ u_int16_t pioiordy; /* min. PIO cycle IORDY flow */
+/*069*/ u_int16_t support3;
+#define ATA_SUPPORT_RZAT 0x0020
+#define ATA_SUPPORT_DRAT 0x4000
+ u_int16_t reserved70;
+/*071*/ u_int16_t rlsovlap; /* rel time (us) for overlap */
+/*072*/ u_int16_t rlsservice; /* rel time (us) for service */
+ u_int16_t reserved73;
+ u_int16_t reserved74;
+/*075*/ u_int16_t queue;
+#define ATA_QUEUE_LEN(x) ((x) & 0x001f)
+
+/*76*/ u_int16_t satacapabilities;
+#define ATA_SATA_GEN1 0x0002
+#define ATA_SATA_GEN2 0x0004
+#define ATA_SATA_GEN3 0x0008
+#define ATA_SUPPORT_NCQ 0x0100
+#define ATA_SUPPORT_IFPWRMNGTRCV 0x0200
+#define ATA_SUPPORT_PHYEVENTCNT 0x0400
+#define ATA_SUPPORT_NCQ_UNLOAD 0x0800
+#define ATA_SUPPORT_NCQ_PRIO 0x1000
+#define ATA_SUPPORT_HAPST 0x2000
+#define ATA_SUPPORT_DAPST 0x4000
+#define ATA_SUPPORT_READLOGDMAEXT 0x8000
+
+/*77*/ u_int16_t satacapabilities2;
+#define ATA_SATA_CURR_GEN_MASK 0x0006
+#define ATA_SUPPORT_NCQ_STREAM 0x0010
+#define ATA_SUPPORT_NCQ_QMANAGEMENT 0x0020
+/*78*/ u_int16_t satasupport;
+#define ATA_SUPPORT_NONZERO 0x0002
+#define ATA_SUPPORT_AUTOACTIVATE 0x0004
+#define ATA_SUPPORT_IFPWRMNGT 0x0008
+#define ATA_SUPPORT_INORDERDATA 0x0010
+#define ATA_SUPPORT_SOFTSETPRESERVE 0x0040
+/*79*/ u_int16_t sataenabled;
+#define ATA_ENABLED_DAPST 0x0080
+
+/*080*/ u_int16_t version_major;
+/*081*/ u_int16_t version_minor;
+
+ struct {
+/*082/085*/ u_int16_t command1;
+#define ATA_SUPPORT_SMART 0x0001
+#define ATA_SUPPORT_SECURITY 0x0002
+#define ATA_SUPPORT_REMOVABLE 0x0004
+#define ATA_SUPPORT_POWERMGT 0x0008
+#define ATA_SUPPORT_PACKET 0x0010
+#define ATA_SUPPORT_WRITECACHE 0x0020
+#define ATA_SUPPORT_LOOKAHEAD 0x0040
+#define ATA_SUPPORT_RELEASEIRQ 0x0080
+#define ATA_SUPPORT_SERVICEIRQ 0x0100
+#define ATA_SUPPORT_RESET 0x0200
+#define ATA_SUPPORT_PROTECTED 0x0400
+#define ATA_SUPPORT_WRITEBUFFER 0x1000
+#define ATA_SUPPORT_READBUFFER 0x2000
+#define ATA_SUPPORT_NOP 0x4000
+
+/*083/086*/ u_int16_t command2;
+#define ATA_SUPPORT_MICROCODE 0x0001
+#define ATA_SUPPORT_QUEUED 0x0002
+#define ATA_SUPPORT_CFA 0x0004
+#define ATA_SUPPORT_APM 0x0008
+#define ATA_SUPPORT_NOTIFY 0x0010
+#define ATA_SUPPORT_STANDBY 0x0020
+#define ATA_SUPPORT_SPINUP 0x0040
+#define ATA_SUPPORT_MAXSECURITY 0x0100
+#define ATA_SUPPORT_AUTOACOUSTIC 0x0200
+#define ATA_SUPPORT_ADDRESS48 0x0400
+#define ATA_SUPPORT_OVERLAY 0x0800
+#define ATA_SUPPORT_FLUSHCACHE 0x1000
+#define ATA_SUPPORT_FLUSHCACHE48 0x2000
+
+/*084/087*/ u_int16_t extension;
+#define ATA_SUPPORT_SMARTLOG 0x0001
+#define ATA_SUPPORT_SMARTTEST 0x0002
+#define ATA_SUPPORT_MEDIASN 0x0004
+#define ATA_SUPPORT_MEDIAPASS 0x0008
+#define ATA_SUPPORT_STREAMING 0x0010
+#define ATA_SUPPORT_GENLOG 0x0020
+#define ATA_SUPPORT_WRITEDMAFUAEXT 0x0040
+#define ATA_SUPPORT_WRITEDMAQFUAEXT 0x0080
+#define ATA_SUPPORT_64BITWWN 0x0100
+#define ATA_SUPPORT_UNLOAD 0x2000
+ } __packed support, enabled;
+
+/*088*/ u_int16_t udmamodes; /* UltraDMA modes */
+/*089*/ u_int16_t erase_time;
+/*090*/ u_int16_t enhanced_erase_time;
+/*091*/ u_int16_t apm_value;
+/*092*/ u_int16_t master_passwd_revision;
+/*093*/ u_int16_t hwres;
+#define ATA_CABLE_ID 0x2000
+
+/*094*/ u_int16_t acoustic;
+#define ATA_ACOUSTIC_CURRENT(x) ((x) & 0x00ff)
+#define ATA_ACOUSTIC_VENDOR(x) (((x) & 0xff00) >> 8)
+
+/*095*/ u_int16_t stream_min_req_size;
+/*096*/ u_int16_t stream_transfer_time;
+/*097*/ u_int16_t stream_access_latency;
+/*098*/ u_int32_t stream_granularity;
+/*100*/ u_int16_t lba_size48_1;
+ u_int16_t lba_size48_2;
+ u_int16_t lba_size48_3;
+ u_int16_t lba_size48_4;
+ u_int16_t reserved104;
+/*105*/ u_int16_t max_dsm_blocks;
+/*106*/ u_int16_t pss;
+#define ATA_PSS_LSPPS 0x000F
+#define ATA_PSS_LSSABOVE512 0x1000
+#define ATA_PSS_MULTLS 0x2000
+/*107*/ u_int16_t isd;
+/*108*/ u_int16_t wwn[4];
+ u_int16_t reserved112[5];
+/*117*/ u_int16_t lss_1;
+/*118*/ u_int16_t lss_2;
+/*119*/ u_int16_t support2;
+#define ATA_SUPPORT_WRITEREADVERIFY 0x0002
+#define ATA_SUPPORT_WRITEUNCORREXT 0x0004
+#define ATA_SUPPORT_RWLOGDMAEXT 0x0008
+#define ATA_SUPPORT_MICROCODE3 0x0010
+#define ATA_SUPPORT_FREEFALL 0x0020
+/*120*/ u_int16_t enabled2;
+ u_int16_t reserved121[6];
+/*127*/ u_int16_t removable_status;
+/*128*/ u_int16_t security_status;
+ u_int16_t reserved129[31];
+/*160*/ u_int16_t cfa_powermode1;
+ u_int16_t reserved161;
+/*162*/ u_int16_t cfa_kms_support;
+/*163*/ u_int16_t cfa_trueide_modes;
+/*164*/ u_int16_t cfa_memory_modes;
+ u_int16_t reserved165[4];
+/*169*/ u_int16_t support_dsm;
+#define ATA_SUPPORT_DSM_TRIM 0x0001
+ u_int16_t reserved170[6];
+/*176*/ u_int8_t media_serial[60];
+/*206*/ u_int16_t sct;
+ u_int16_t reserved206[2];
+/*209*/ u_int16_t lsalign;
+/*210*/ u_int16_t wrv_sectors_m3_1;
+ u_int16_t wrv_sectors_m3_2;
+/*212*/ u_int16_t wrv_sectors_m2_1;
+ u_int16_t wrv_sectors_m2_2;
+/*214*/ u_int16_t nv_cache_caps;
+/*215*/ u_int16_t nv_cache_size_1;
+ u_int16_t nv_cache_size_2;
+/*217*/ u_int16_t media_rotation_rate;
+ u_int16_t reserved218;
+/*219*/ u_int16_t nv_cache_opt;
+/*220*/ u_int16_t wrv_mode;
+ u_int16_t reserved221;
+/*222*/ u_int16_t transport_major;
+/*223*/ u_int16_t transport_minor;
+ u_int16_t reserved224[31];
+/*255*/ u_int16_t integrity;
+} __packed;
+
+
+/* ATA transfer modes */
+#define ATA_MODE_MASK 0x0f
+#define ATA_DMA_MASK 0xf0
+#define ATA_PIO 0x00
+#define ATA_PIO0 0x08
+#define ATA_PIO1 0x09
+#define ATA_PIO2 0x0a
+#define ATA_PIO3 0x0b
+#define ATA_PIO4 0x0c
+#define ATA_PIO_MAX 0x0f
+#define ATA_DMA 0x10
+#define ATA_WDMA0 0x20
+#define ATA_WDMA1 0x21
+#define ATA_WDMA2 0x22
+#define ATA_UDMA0 0x40
+#define ATA_UDMA1 0x41
+#define ATA_UDMA2 0x42
+#define ATA_UDMA3 0x43
+#define ATA_UDMA4 0x44
+#define ATA_UDMA5 0x45
+#define ATA_UDMA6 0x46
+#define ATA_SA150 0x47
+#define ATA_SA300 0x48
+#define ATA_DMA_MAX 0x4f
+
+
+/* ATA commands */
+#define ATA_NOP 0x00 /* NOP */
+#define ATA_NF_FLUSHQUEUE 0x00 /* flush queued cmd's */
+#define ATA_NF_AUTOPOLL 0x01 /* start autopoll function */
+#define ATA_DATA_SET_MANAGEMENT 0x06
+#define ATA_DSM_TRIM 0x01
+#define ATA_DEVICE_RESET 0x08 /* reset device */
+#define ATA_READ 0x20 /* read */
+#define ATA_READ48 0x24 /* read 48bit LBA */
+#define ATA_READ_DMA48 0x25 /* read DMA 48bit LBA */
+#define ATA_READ_DMA_QUEUED48 0x26 /* read DMA QUEUED 48bit LBA */
+#define ATA_READ_NATIVE_MAX_ADDRESS48 0x27 /* read native max addr 48bit */
+#define ATA_READ_MUL48 0x29 /* read multi 48bit LBA */
+#define ATA_READ_STREAM_DMA48 0x2a /* read DMA stream 48bit LBA */
+#define ATA_READ_STREAM48 0x2b /* read stream 48bit LBA */
+#define ATA_WRITE 0x30 /* write */
+#define ATA_WRITE48 0x34 /* write 48bit LBA */
+#define ATA_WRITE_DMA48 0x35 /* write DMA 48bit LBA */
+#define ATA_WRITE_DMA_QUEUED48 0x36 /* write DMA QUEUED 48bit LBA*/
+#define ATA_SET_MAX_ADDRESS48 0x37 /* set max address 48bit */
+#define ATA_WRITE_MUL48 0x39 /* write multi 48bit LBA */
+#define ATA_WRITE_STREAM_DMA48 0x3a
+#define ATA_WRITE_STREAM48 0x3b
+#define ATA_WRITE_DMA_FUA48 0x3d
+#define ATA_WRITE_DMA_QUEUED_FUA48 0x3e
+#define ATA_WRITE_LOG_EXT 0x3f
+#define ATA_READ_VERIFY 0x40
+#define ATA_READ_VERIFY48 0x42
+#define ATA_READ_FPDMA_QUEUED 0x60 /* read DMA NCQ */
+#define ATA_WRITE_FPDMA_QUEUED 0x61 /* write DMA NCQ */
+#define ATA_SEEK 0x70 /* seek */
+#define ATA_PACKET_CMD 0xa0 /* packet command */
+#define ATA_ATAPI_IDENTIFY 0xa1 /* get ATAPI params*/
+#define ATA_SERVICE 0xa2 /* service command */
+#define ATA_SMART_CMD 0xb0 /* SMART command */
+#define ATA_CFA_ERASE 0xc0 /* CFA erase */
+#define ATA_READ_MUL 0xc4 /* read multi */
+#define ATA_WRITE_MUL 0xc5 /* write multi */
+#define ATA_SET_MULTI 0xc6 /* set multi size */
+#define ATA_READ_DMA_QUEUED 0xc7 /* read DMA QUEUED */
+#define ATA_READ_DMA 0xc8 /* read DMA */
+#define ATA_WRITE_DMA 0xca /* write DMA */
+#define ATA_WRITE_DMA_QUEUED 0xcc /* write DMA QUEUED */
+#define ATA_WRITE_MUL_FUA48 0xce
+#define ATA_STANDBY_IMMEDIATE 0xe0 /* standby immediate */
+#define ATA_IDLE_IMMEDIATE 0xe1 /* idle immediate */
+#define ATA_STANDBY_CMD 0xe2 /* standby */
+#define ATA_IDLE_CMD 0xe3 /* idle */
+#define ATA_READ_BUFFER 0xe4 /* read buffer */
+#define ATA_READ_PM 0xe4 /* read portmultiplier */
+#define ATA_SLEEP 0xe6 /* sleep */
+#define ATA_FLUSHCACHE 0xe7 /* flush cache to disk */
+#define ATA_WRITE_PM 0xe8 /* write portmultiplier */
+#define ATA_FLUSHCACHE48 0xea /* flush cache to disk */
+#define ATA_ATA_IDENTIFY 0xec /* get ATA params */
+#define ATA_SETFEATURES 0xef /* features command */
+#define ATA_SF_SETXFER 0x03 /* set transfer mode */
+#define ATA_SF_ENAB_WCACHE 0x02 /* enable write cache */
+#define ATA_SF_DIS_WCACHE 0x82 /* disable write cache */
+#define ATA_SF_ENAB_PUIS 0x06 /* enable PUIS */
+#define ATA_SF_DIS_PUIS 0x86 /* disable PUIS */
+#define ATA_SF_PUIS_SPINUP 0x07 /* PUIS spin-up */
+#define ATA_SF_ENAB_RCACHE 0xaa /* enable readahead cache */
+#define ATA_SF_DIS_RCACHE 0x55 /* disable readahead cache */
+#define ATA_SF_ENAB_RELIRQ 0x5d /* enable release interrupt */
+#define ATA_SF_DIS_RELIRQ 0xdd /* disable release interrupt */
+#define ATA_SF_ENAB_SRVIRQ 0x5e /* enable service interrupt */
+#define ATA_SF_DIS_SRVIRQ 0xde /* disable service interrupt */
+#define ATA_SECURITY_FREEE_LOCK 0xf5 /* freeze security config */
+#define ATA_READ_NATIVE_MAX_ADDRESS 0xf8 /* read native max address */
+#define ATA_SET_MAX_ADDRESS 0xf9 /* set max address */
+
+
+/* ATAPI commands */
+#define ATAPI_TEST_UNIT_READY 0x00 /* check if device is ready */
+#define ATAPI_REZERO 0x01 /* rewind */
+#define ATAPI_REQUEST_SENSE 0x03 /* get sense data */
+#define ATAPI_FORMAT 0x04 /* format unit */
+#define ATAPI_READ 0x08 /* read data */
+#define ATAPI_WRITE 0x0a /* write data */
+#define ATAPI_WEOF 0x10 /* write filemark */
+#define ATAPI_WF_WRITE 0x01
+#define ATAPI_SPACE 0x11 /* space command */
+#define ATAPI_SP_FM 0x01
+#define ATAPI_SP_EOD 0x03
+#define ATAPI_INQUIRY 0x12 /* get inquiry data */
+#define ATAPI_MODE_SELECT 0x15 /* mode select */
+#define ATAPI_ERASE 0x19 /* erase */
+#define ATAPI_MODE_SENSE 0x1a /* mode sense */
+#define ATAPI_START_STOP 0x1b /* start/stop unit */
+#define ATAPI_SS_LOAD 0x01
+#define ATAPI_SS_RETENSION 0x02
+#define ATAPI_SS_EJECT 0x04
+#define ATAPI_PREVENT_ALLOW 0x1e /* media removal */
+#define ATAPI_READ_FORMAT_CAPACITIES 0x23 /* get format capacities */
+#define ATAPI_READ_CAPACITY 0x25 /* get volume capacity */
+#define ATAPI_READ_BIG 0x28 /* read data */
+#define ATAPI_WRITE_BIG 0x2a /* write data */
+#define ATAPI_LOCATE 0x2b /* locate to position */
+#define ATAPI_READ_POSITION 0x34 /* read position */
+#define ATAPI_SYNCHRONIZE_CACHE 0x35 /* flush buf, close channel */
+#define ATAPI_WRITE_BUFFER 0x3b /* write device buffer */
+#define ATAPI_READ_BUFFER 0x3c /* read device buffer */
+#define ATAPI_READ_SUBCHANNEL 0x42 /* get subchannel info */
+#define ATAPI_READ_TOC 0x43 /* get table of contents */
+#define ATAPI_PLAY_10 0x45 /* play by lba */
+#define ATAPI_PLAY_MSF 0x47 /* play by MSF address */
+#define ATAPI_PLAY_TRACK 0x48 /* play by track number */
+#define ATAPI_PAUSE 0x4b /* pause audio operation */
+#define ATAPI_READ_DISK_INFO 0x51 /* get disk info structure */
+#define ATAPI_READ_TRACK_INFO 0x52 /* get track info structure */
+#define ATAPI_RESERVE_TRACK 0x53 /* reserve track */
+#define ATAPI_SEND_OPC_INFO 0x54 /* send OPC structurek */
+#define ATAPI_MODE_SELECT_BIG 0x55 /* set device parameters */
+#define ATAPI_REPAIR_TRACK 0x58 /* repair track */
+#define ATAPI_READ_MASTER_CUE 0x59 /* read master CUE info */
+#define ATAPI_MODE_SENSE_BIG 0x5a /* get device parameters */
+#define ATAPI_CLOSE_TRACK 0x5b /* close track/session */
+#define ATAPI_READ_BUFFER_CAPACITY 0x5c /* get buffer capicity */
+#define ATAPI_SEND_CUE_SHEET 0x5d /* send CUE sheet */
+#define ATAPI_SERVICE_ACTION_IN 0x96 /* get service data */
+#define ATAPI_BLANK 0xa1 /* blank the media */
+#define ATAPI_SEND_KEY 0xa3 /* send DVD key structure */
+#define ATAPI_REPORT_KEY 0xa4 /* get DVD key structure */
+#define ATAPI_PLAY_12 0xa5 /* play by lba */
+#define ATAPI_LOAD_UNLOAD 0xa6 /* changer control command */
+#define ATAPI_READ_STRUCTURE 0xad /* get DVD structure */
+#define ATAPI_PLAY_CD 0xb4 /* universal play command */
+#define ATAPI_SET_SPEED 0xbb /* set drive speed */
+#define ATAPI_MECH_STATUS 0xbd /* get changer status */
+#define ATAPI_READ_CD 0xbe /* read data */
+#define ATAPI_POLL_DSC 0xff /* poll DSC status bit */
+
+
+struct ata_ioc_devices {
+ int channel;
+ char name[2][32];
+ struct ata_params params[2];
+};
+
+/* pr channel ATA ioctl calls */
+#define IOCATAGMAXCHANNEL _IOR('a', 1, int)
+#define IOCATAREINIT _IOW('a', 2, int)
+#define IOCATAATTACH _IOW('a', 3, int)
+#define IOCATADETACH _IOW('a', 4, int)
+#define IOCATADEVICES _IOWR('a', 5, struct ata_ioc_devices)
+
+/* ATAPI request sense structure */
+struct atapi_sense {
+ u_int8_t error; /* current or deferred errors */
+#define ATA_SENSE_VALID 0x80
+
+ u_int8_t segment; /* segment number */
+ u_int8_t key; /* sense key */
+#define ATA_SENSE_KEY_MASK 0x0f /* sense key mask */
+#define ATA_SENSE_NO_SENSE 0x00 /* no specific sense key info */
+#define ATA_SENSE_RECOVERED_ERROR 0x01 /* command OK, data recovered */
+#define ATA_SENSE_NOT_READY 0x02 /* no access to drive */
+#define ATA_SENSE_MEDIUM_ERROR 0x03 /* non-recovered data error */
+#define ATA_SENSE_HARDWARE_ERROR 0x04 /* non-recoverable HW failure */
+#define ATA_SENSE_ILLEGAL_REQUEST 0x05 /* invalid command param(s) */
+#define ATA_SENSE_UNIT_ATTENTION 0x06 /* media changed */
+#define ATA_SENSE_DATA_PROTECT 0x07 /* write protect */
+#define ATA_SENSE_BLANK_CHECK 0x08 /* blank check */
+#define ATA_SENSE_VENDOR_SPECIFIC 0x09 /* vendor specific skey */
+#define ATA_SENSE_COPY_ABORTED 0x0a /* copy aborted */
+#define ATA_SENSE_ABORTED_COMMAND 0x0b /* command aborted, try again */
+#define ATA_SENSE_EQUAL 0x0c /* equal */
+#define ATA_SENSE_VOLUME_OVERFLOW 0x0d /* volume overflow */
+#define ATA_SENSE_MISCOMPARE 0x0e /* data dont match the medium */
+#define ATA_SENSE_RESERVED 0x0f
+#define ATA_SENSE_ILI 0x20;
+#define ATA_SENSE_EOM 0x40;
+#define ATA_SENSE_FILEMARK 0x80;
+
+ u_int32_t cmd_info; /* cmd information */
+ u_int8_t sense_length; /* additional sense len (n-7) */
+ u_int32_t cmd_specific_info; /* additional cmd spec info */
+ u_int8_t asc; /* additional sense code */
+ u_int8_t ascq; /* additional sense code qual */
+ u_int8_t replaceable_unit_code; /* replaceable unit code */
+ u_int8_t specific; /* sense key specific */
+#define ATA_SENSE_SPEC_VALID 0x80
+#define ATA_SENSE_SPEC_MASK 0x7f
+
+ u_int8_t specific1; /* sense key specific */
+ u_int8_t specific2; /* sense key specific */
+} __packed;
+
+struct ata_ioc_request {
+ union {
+ struct {
+ u_int8_t command;
+ u_int8_t feature;
+ u_int64_t lba;
+ u_int16_t count;
+ } ata;
+ struct {
+ char ccb[16];
+ struct atapi_sense sense;
+ } atapi;
+ } u;
+ caddr_t data;
+ int count;
+ int flags;
+#define ATA_CMD_CONTROL 0x01
+#define ATA_CMD_READ 0x02
+#define ATA_CMD_WRITE 0x04
+#define ATA_CMD_ATAPI 0x08
+
+ int timeout;
+ int error;
+};
+
+/* pr device ATA ioctl calls */
+#define IOCATAREQUEST _IOWR('a', 100, struct ata_ioc_request)
+#define IOCATAGPARM _IOR('a', 101, struct ata_params)
+#define IOCATAGMODE _IOR('a', 102, int)
+#define IOCATASMODE _IOW('a', 103, int)
+
+#define IOCATAGSPINDOWN _IOR('a', 104, int)
+#define IOCATASSPINDOWN _IOW('a', 105, int)
+
+
+struct ata_ioc_raid_config {
+ int lun;
+ int type;
+#define AR_JBOD 0x0001
+#define AR_SPAN 0x0002
+#define AR_RAID0 0x0004
+#define AR_RAID1 0x0008
+#define AR_RAID01 0x0010
+#define AR_RAID3 0x0020
+#define AR_RAID4 0x0040
+#define AR_RAID5 0x0080
+
+ int interleave;
+ int status;
+#define AR_READY 1
+#define AR_DEGRADED 2
+#define AR_REBUILDING 4
+
+ int progress;
+ int total_disks;
+ int disks[16];
+};
+
+struct ata_ioc_raid_status {
+ int lun;
+ int type;
+ int interleave;
+ int status;
+ int progress;
+ int total_disks;
+ struct {
+ int state;
+#define AR_DISK_ONLINE 0x01
+#define AR_DISK_PRESENT 0x02
+#define AR_DISK_SPARE 0x04
+ int lun;
+ } disks[16];
+};
+
+/* ATA RAID ioctl calls */
+#define IOCATARAIDCREATE _IOWR('a', 200, struct ata_ioc_raid_config)
+#define IOCATARAIDDELETE _IOW('a', 201, int)
+#define IOCATARAIDSTATUS _IOWR('a', 202, struct ata_ioc_raid_status)
+#define IOCATARAIDADDSPARE _IOW('a', 203, struct ata_ioc_raid_config)
+#define IOCATARAIDREBUILD _IOW('a', 204, int)
+
+#endif /* _SYS_ATA_HH_ */
diff --git a/rtems/freebsd/sys/bio.h b/rtems/freebsd/sys/bio.h
new file mode 100644
index 00000000..936ffd88
--- /dev/null
+++ b/rtems/freebsd/sys/bio.h
@@ -0,0 +1 @@
+/* EMPTY */
diff --git a/rtems/freebsd/sys/bitstring.h b/rtems/freebsd/sys/bitstring.h
new file mode 100644
index 00000000..a2b70ba8
--- /dev/null
+++ b/rtems/freebsd/sys/bitstring.h
@@ -0,0 +1,146 @@
+/*-
+ * Copyright (c) 1989, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * This code is derived from software contributed to Berkeley by
+ * Paul Vixie.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _SYS_BITSTRING_HH_
+#define _SYS_BITSTRING_HH_
+
+typedef unsigned char bitstr_t;
+
+/* internal macros */
+ /* byte of the bitstring bit is in */
+#define _bit_byte(bit) \
+ ((bit) >> 3)
+
+ /* mask for the bit within its byte */
+#define _bit_mask(bit) \
+ (1 << ((bit)&0x7))
+
+/* external macros */
+ /* bytes in a bitstring of nbits bits */
+#define bitstr_size(nbits) \
+ (((nbits) + 7) >> 3)
+
+ /* allocate a bitstring */
+#define bit_alloc(nbits) \
+ (bitstr_t *)calloc((size_t)bitstr_size(nbits), sizeof(bitstr_t))
+
+ /* allocate a bitstring on the stack */
+#define bit_decl(name, nbits) \
+ ((name)[bitstr_size(nbits)])
+
+ /* is bit N of bitstring name set? */
+#define bit_test(name, bit) \
+ ((name)[_bit_byte(bit)] & _bit_mask(bit))
+
+ /* set bit N of bitstring name */
+#define bit_set(name, bit) \
+ ((name)[_bit_byte(bit)] |= _bit_mask(bit))
+
+ /* clear bit N of bitstring name */
+#define bit_clear(name, bit) \
+ ((name)[_bit_byte(bit)] &= ~_bit_mask(bit))
+
+ /* clear bits start ... stop in bitstring */
+#define bit_nclear(name, start, stop) do { \
+ register bitstr_t *_name = (name); \
+ register int _start = (start), _stop = (stop); \
+ register int _startbyte = _bit_byte(_start); \
+ register int _stopbyte = _bit_byte(_stop); \
+ if (_startbyte == _stopbyte) { \
+ _name[_startbyte] &= ((0xff >> (8 - (_start&0x7))) | \
+ (0xff << ((_stop&0x7) + 1))); \
+ } else { \
+ _name[_startbyte] &= 0xff >> (8 - (_start&0x7)); \
+ while (++_startbyte < _stopbyte) \
+ _name[_startbyte] = 0; \
+ _name[_stopbyte] &= 0xff << ((_stop&0x7) + 1); \
+ } \
+} while (0)
+
+ /* set bits start ... stop in bitstring */
+#define bit_nset(name, start, stop) do { \
+ register bitstr_t *_name = (name); \
+ register int _start = (start), _stop = (stop); \
+ register int _startbyte = _bit_byte(_start); \
+ register int _stopbyte = _bit_byte(_stop); \
+ if (_startbyte == _stopbyte) { \
+ _name[_startbyte] |= ((0xff << (_start&0x7)) & \
+ (0xff >> (7 - (_stop&0x7)))); \
+ } else { \
+ _name[_startbyte] |= 0xff << ((_start)&0x7); \
+ while (++_startbyte < _stopbyte) \
+ _name[_startbyte] = 0xff; \
+ _name[_stopbyte] |= 0xff >> (7 - (_stop&0x7)); \
+ } \
+} while (0)
+
+ /* find first bit clear in name */
+#define bit_ffc(name, nbits, value) do { \
+ register bitstr_t *_name = (name); \
+ register int _byte, _nbits = (nbits); \
+ register int _stopbyte = _bit_byte(_nbits - 1), _value = -1; \
+ if (_nbits > 0) \
+ for (_byte = 0; _byte <= _stopbyte; ++_byte) \
+ if (_name[_byte] != 0xff) { \
+ bitstr_t _lb; \
+ _value = _byte << 3; \
+ for (_lb = _name[_byte]; (_lb&0x1); \
+ ++_value, _lb >>= 1); \
+ break; \
+ } \
+ if (_value >= nbits) \
+ _value = -1; \
+ *(value) = _value; \
+} while (0)
+
+ /* find first bit set in name */
+#define bit_ffs(name, nbits, value) do { \
+ register bitstr_t *_name = (name); \
+ register int _byte, _nbits = (nbits); \
+ register int _stopbyte = _bit_byte(_nbits - 1), _value = -1; \
+ if (_nbits > 0) \
+ for (_byte = 0; _byte <= _stopbyte; ++_byte) \
+ if (_name[_byte]) { \
+ bitstr_t _lb; \
+ _value = _byte << 3; \
+ for (_lb = _name[_byte]; !(_lb&0x1); \
+ ++_value, _lb >>= 1); \
+ break; \
+ } \
+ if (_value >= nbits) \
+ _value = -1; \
+ *(value) = _value; \
+} while (0)
+
+#endif /* !_SYS_BITSTRING_HH_ */
diff --git a/rtems/freebsd/sys/buf_ring.h b/rtems/freebsd/sys/buf_ring.h
new file mode 100644
index 00000000..48b02f50
--- /dev/null
+++ b/rtems/freebsd/sys/buf_ring.h
@@ -0,0 +1,277 @@
+/*-
+ * Copyright (c) 2007-2009 Kip Macy <kmacy@freebsd.org>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ *
+ */
+
+#ifndef _SYS_BUF_RING_HH_
+#define _SYS_BUF_RING_HH_
+
+#include <rtems/freebsd/machine/cpu.h>
+
+#if defined(INVARIANTS) && !defined(DEBUG_BUFRING)
+#define DEBUG_BUFRING 1
+#endif
+
+#ifdef DEBUG_BUFRING
+#include <rtems/freebsd/sys/lock.h>
+#include <rtems/freebsd/sys/mutex.h>
+#endif
+
+struct buf_ring {
+ volatile uint32_t br_prod_head;
+ volatile uint32_t br_prod_tail;
+ int br_prod_size;
+ int br_prod_mask;
+ uint64_t br_drops;
+ uint64_t br_prod_bufs;
+ uint64_t br_prod_bytes;
+ /*
+ * Pad out to next L2 cache line
+ */
+ uint64_t _pad0[11];
+
+ volatile uint32_t br_cons_head;
+ volatile uint32_t br_cons_tail;
+ int br_cons_size;
+ int br_cons_mask;
+
+ /*
+ * Pad out to next L2 cache line
+ */
+ uint64_t _pad1[14];
+#ifdef DEBUG_BUFRING
+ struct mtx *br_lock;
+#endif
+ void *br_ring[0];
+};
+
+/*
+ * multi-producer safe lock-free ring buffer enqueue
+ *
+ */
+static __inline int
+buf_ring_enqueue_bytes(struct buf_ring *br, void *buf, int nbytes)
+{
+ uint32_t prod_head, prod_next;
+ uint32_t cons_tail;
+ int success;
+#ifdef DEBUG_BUFRING
+ int i;
+ for (i = br->br_cons_head; i != br->br_prod_head;
+ i = ((i + 1) & br->br_cons_mask))
+ if(br->br_ring[i] == buf)
+ panic("buf=%p already enqueue at %d prod=%d cons=%d",
+ buf, i, br->br_prod_tail, br->br_cons_tail);
+#endif
+ critical_enter();
+ do {
+ prod_head = br->br_prod_head;
+ cons_tail = br->br_cons_tail;
+
+ prod_next = (prod_head + 1) & br->br_prod_mask;
+
+ if (prod_next == cons_tail) {
+ critical_exit();
+ return (ENOBUFS);
+ }
+
+ success = atomic_cmpset_int(&br->br_prod_head, prod_head,
+ prod_next);
+ } while (success == 0);
+#ifdef DEBUG_BUFRING
+ if (br->br_ring[prod_head] != NULL)
+ panic("dangling value in enqueue");
+#endif
+ br->br_ring[prod_head] = buf;
+ wmb();
+
+ /*
+ * If there are other enqueues in progress
+ * that preceeded us, we need to wait for them
+ * to complete
+ */
+ while (br->br_prod_tail != prod_head)
+ cpu_spinwait();
+ br->br_prod_bufs++;
+ br->br_prod_bytes += nbytes;
+ br->br_prod_tail = prod_next;
+ critical_exit();
+ return (0);
+}
+
+static __inline int
+buf_ring_enqueue(struct buf_ring *br, void *buf)
+{
+
+ return (buf_ring_enqueue_bytes(br, buf, 0));
+}
+
+/*
+ * multi-consumer safe dequeue
+ *
+ */
+static __inline void *
+buf_ring_dequeue_mc(struct buf_ring *br)
+{
+ uint32_t cons_head, cons_next;
+ uint32_t prod_tail;
+ void *buf;
+ int success;
+
+ critical_enter();
+ do {
+ cons_head = br->br_cons_head;
+ prod_tail = br->br_prod_tail;
+
+ cons_next = (cons_head + 1) & br->br_cons_mask;
+
+ if (cons_head == prod_tail) {
+ critical_exit();
+ return (NULL);
+ }
+
+ success = atomic_cmpset_int(&br->br_cons_head, cons_head,
+ cons_next);
+ } while (success == 0);
+
+ buf = br->br_ring[cons_head];
+#ifdef DEBUG_BUFRING
+ br->br_ring[cons_head] = NULL;
+#endif
+ rmb();
+
+ /*
+ * If there are other dequeues in progress
+ * that preceeded us, we need to wait for them
+ * to complete
+ */
+ while (br->br_cons_tail != cons_head)
+ cpu_spinwait();
+
+ br->br_cons_tail = cons_next;
+ critical_exit();
+
+ return (buf);
+}
+
+/*
+ * single-consumer dequeue
+ * use where dequeue is protected by a lock
+ * e.g. a network driver's tx queue lock
+ */
+static __inline void *
+buf_ring_dequeue_sc(struct buf_ring *br)
+{
+ uint32_t cons_head, cons_next, cons_next_next;
+ uint32_t prod_tail;
+ void *buf;
+
+ cons_head = br->br_cons_head;
+ prod_tail = br->br_prod_tail;
+
+ cons_next = (cons_head + 1) & br->br_cons_mask;
+ cons_next_next = (cons_head + 2) & br->br_cons_mask;
+
+ if (cons_head == prod_tail)
+ return (NULL);
+
+#ifdef PREFETCH_DEFINED
+ if (cons_next != prod_tail) {
+ prefetch(br->br_ring[cons_next]);
+ if (cons_next_next != prod_tail)
+ prefetch(br->br_ring[cons_next_next]);
+ }
+#endif
+ br->br_cons_head = cons_next;
+ buf = br->br_ring[cons_head];
+
+#ifdef DEBUG_BUFRING
+ br->br_ring[cons_head] = NULL;
+ if (!mtx_owned(br->br_lock))
+ panic("lock not held on single consumer dequeue");
+ if (br->br_cons_tail != cons_head)
+ panic("inconsistent list cons_tail=%d cons_head=%d",
+ br->br_cons_tail, cons_head);
+#endif
+ br->br_cons_tail = cons_next;
+ return (buf);
+}
+
+/*
+ * return a pointer to the first entry in the ring
+ * without modifying it, or NULL if the ring is empty
+ * race-prone if not protected by a lock
+ */
+static __inline void *
+buf_ring_peek(struct buf_ring *br)
+{
+
+#ifdef DEBUG_BUFRING
+ if ((br->br_lock != NULL) && !mtx_owned(br->br_lock))
+ panic("lock not held on single consumer dequeue");
+#endif
+ /*
+ * I believe it is safe to not have a memory barrier
+ * here because we control cons and tail is worst case
+ * a lagging indicator so we worst case we might
+ * return NULL immediately after a buffer has been enqueued
+ */
+ if (br->br_cons_head == br->br_prod_tail)
+ return (NULL);
+
+ return (br->br_ring[br->br_cons_head]);
+}
+
+static __inline int
+buf_ring_full(struct buf_ring *br)
+{
+
+ return (((br->br_prod_head + 1) & br->br_prod_mask) == br->br_cons_tail);
+}
+
+static __inline int
+buf_ring_empty(struct buf_ring *br)
+{
+
+ return (br->br_cons_head == br->br_prod_tail);
+}
+
+static __inline int
+buf_ring_count(struct buf_ring *br)
+{
+
+ return ((br->br_prod_size + br->br_prod_tail - br->br_cons_tail)
+ & br->br_prod_mask);
+}
+
+struct buf_ring *buf_ring_alloc(int count, struct malloc_type *type, int flags,
+ struct mtx *);
+void buf_ring_free(struct buf_ring *br, struct malloc_type *type);
+
+
+
+#endif
diff --git a/rtems/freebsd/sys/bufobj.h b/rtems/freebsd/sys/bufobj.h
new file mode 100644
index 00000000..8cc459e9
--- /dev/null
+++ b/rtems/freebsd/sys/bufobj.h
@@ -0,0 +1,131 @@
+/*-
+ * Copyright (c) 2004 Poul-Henning Kamp
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+/*
+ * Architectural notes:
+ *
+ * bufobj is a new object which is what buffers hang from in the buffer
+ * cache.
+ *
+ * This used to be vnodes, but we need non-vnode code to be able
+ * to use the buffer cache as well, specifically geom classes like gbde,
+ * raid3 and raid5.
+ *
+ * All vnodes will contain a bufobj initially, but down the road we may
+ * want to only allocate bufobjs when they are needed. There could be a
+ * large number of vnodes in the system which wouldn't need a bufobj during
+ * their lifetime.
+ *
+ * The exact relationship to the vmobject is not determined at this point,
+ * it may in fact be that we find them to be two sides of the same object
+ * once things starts to crystalize.
+ */
+
+#ifndef _SYS_BUFOBJ_HH_
+#define _SYS_BUFOBJ_HH_
+
+#if defined(_KERNEL) || defined(_KVM_VNODE)
+
+#include <rtems/freebsd/sys/queue.h>
+#include <rtems/freebsd/sys/_lock.h>
+#include <rtems/freebsd/sys/_mutex.h>
+
+struct bufobj;
+struct buf_ops;
+
+extern struct buf_ops buf_ops_bio;
+
+TAILQ_HEAD(buflists, buf);
+
+/* A Buffer splay list */
+struct bufv {
+ struct buflists bv_hd; /* Sorted blocklist */
+ struct buf *bv_root; /* Buf splay tree */
+ int bv_cnt; /* Number of buffers */
+};
+
+typedef void b_strategy_t(struct bufobj *, struct buf *);
+typedef int b_write_t(struct buf *);
+typedef int b_sync_t(struct bufobj *, int waitfor);
+typedef void b_bdflush_t(struct bufobj *, struct buf *);
+
+struct buf_ops {
+ char *bop_name;
+ b_write_t *bop_write;
+ b_strategy_t *bop_strategy;
+ b_sync_t *bop_sync;
+ b_bdflush_t *bop_bdflush;
+};
+
+#define BO_STRATEGY(bo, bp) ((bo)->bo_ops->bop_strategy((bo), (bp)))
+#define BO_SYNC(bo, w) ((bo)->bo_ops->bop_sync((bo), (w)))
+#define BO_WRITE(bo, bp) ((bo)->bo_ops->bop_write((bp)))
+#define BO_BDFLUSH(bo, bp) ((bo)->bo_ops->bop_bdflush((bo), (bp)))
+
+struct bufobj {
+ struct mtx bo_mtx; /* Mutex which protects "i" things */
+ struct bufv bo_clean; /* i Clean buffers */
+ struct bufv bo_dirty; /* i Dirty buffers */
+ long bo_numoutput; /* i Writes in progress */
+ u_int bo_flag; /* i Flags */
+ struct buf_ops *bo_ops; /* - Buffer operations */
+ int bo_bsize; /* - Block size for i/o */
+ struct vm_object *bo_object; /* v Place to store VM object */
+ LIST_ENTRY(bufobj) bo_synclist; /* S dirty vnode list */
+ void *bo_private; /* private pointer */
+ struct vnode *__bo_vnode; /*
+ * XXX: This vnode pointer is here
+ * XXX: only to keep the syncer working
+ * XXX: for now.
+ */
+};
+
+/*
+ * XXX BO_ONWORKLST could be replaced with a check for NULL list elements
+ * in v_synclist.
+ */
+#define BO_ONWORKLST (1 << 0) /* On syncer work-list */
+#define BO_WWAIT (1 << 1) /* Wait for output to complete */
+#define BO_NEEDSGIANT (1 << 2) /* Require giant for child buffers. */
+
+#define BO_MTX(bo) (&(bo)->bo_mtx)
+#define BO_LOCK(bo) mtx_lock(BO_MTX((bo)))
+#define BO_UNLOCK(bo) mtx_unlock(BO_MTX((bo)))
+#define ASSERT_BO_LOCKED(bo) mtx_assert(BO_MTX((bo)), MA_OWNED)
+#define ASSERT_BO_UNLOCKED(bo) mtx_assert(BO_MTX((bo)), MA_NOTOWNED)
+
+void bufobj_wdrop(struct bufobj *bo);
+void bufobj_wref(struct bufobj *bo);
+void bufobj_wrefl(struct bufobj *bo);
+int bufobj_invalbuf(struct bufobj *bo, int flags, int slpflag, int slptimeo);
+int bufobj_wwait(struct bufobj *bo, int slpflag, int timeo);
+int bufsync(struct bufobj *bo, int waitfor);
+void bufbdflush(struct bufobj *bo, struct buf *bp);
+
+#endif /* defined(_KERNEL) || defined(_KVM_VNODE) */
+#endif /* _SYS_BUFOBJ_HH_ */
diff --git a/rtems/freebsd/sys/bus.h b/rtems/freebsd/sys/bus.h
new file mode 100644
index 00000000..4ca144da
--- /dev/null
+++ b/rtems/freebsd/sys/bus.h
@@ -0,0 +1,748 @@
+/*-
+ * Copyright (c) 1997,1998,2003 Doug Rabson
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _SYS_BUS_HH_
+#define _SYS_BUS_HH_
+
+#include <rtems/freebsd/machine/_limits.h>
+#include <rtems/freebsd/sys/_bus_dma.h>
+
+/**
+ * @defgroup NEWBUS newbus - a generic framework for managing devices
+ * @{
+ */
+
+/**
+ * @brief Interface information structure.
+ */
+struct u_businfo {
+ int ub_version; /**< @brief interface version */
+#define BUS_USER_VERSION 1
+ int ub_generation; /**< @brief generation count */
+};
+
+/**
+ * @brief State of the device.
+ */
+typedef enum device_state {
+ DS_NOTPRESENT = 10, /**< @brief not probed or probe failed */
+ DS_ALIVE = 20, /**< @brief probe succeeded */
+ DS_ATTACHED = 30, /**< @brief attach method called */
+ DS_BUSY = 40 /**< @brief device is open */
+} device_state_t;
+
+/**
+ * @brief Device information exported to userspace.
+ */
+struct u_device {
+ uintptr_t dv_handle;
+ uintptr_t dv_parent;
+
+ char dv_name[32]; /**< @brief Name of device in tree. */
+ char dv_desc[32]; /**< @brief Driver description */
+ char dv_drivername[32]; /**< @brief Driver name */
+ char dv_pnpinfo[128]; /**< @brief Plug and play info */
+ char dv_location[128]; /**< @brief Where is the device? */
+ uint32_t dv_devflags; /**< @brief API Flags for device */
+ uint16_t dv_flags; /**< @brief flags for dev date */
+ device_state_t dv_state; /**< @brief State of attachment */
+ /* XXX more driver info? */
+};
+
+#ifdef _KERNEL
+
+#include <rtems/freebsd/sys/queue.h>
+#include <rtems/freebsd/sys/kobj.h>
+
+/**
+ * devctl hooks. Typically one should use the devctl_notify
+ * hook to send the message. However, devctl_queue_data is also
+ * included in case devctl_notify isn't sufficiently general.
+ */
+boolean_t devctl_process_running(void);
+void devctl_notify_f(const char *__system, const char *__subsystem,
+ const char *__type, const char *__data, int __flags);
+void devctl_notify(const char *__system, const char *__subsystem,
+ const char *__type, const char *__data);
+void devctl_queue_data_f(char *__data, int __flags);
+void devctl_queue_data(char *__data);
+
+/**
+ * @brief A device driver (included mainly for compatibility with
+ * FreeBSD 4.x).
+ */
+typedef struct kobj_class driver_t;
+
+/**
+ * @brief A device class
+ *
+ * The devclass object has two main functions in the system. The first
+ * is to manage the allocation of unit numbers for device instances
+ * and the second is to hold the list of device drivers for a
+ * particular bus type. Each devclass has a name and there cannot be
+ * two devclasses with the same name. This ensures that unique unit
+ * numbers are allocated to device instances.
+ *
+ * Drivers that support several different bus attachments (e.g. isa,
+ * pci, pccard) should all use the same devclass to ensure that unit
+ * numbers do not conflict.
+ *
+ * Each devclass may also have a parent devclass. This is used when
+ * searching for device drivers to allow a form of inheritance. When
+ * matching drivers with devices, first the driver list of the parent
+ * device's devclass is searched. If no driver is found in that list,
+ * the search continues in the parent devclass (if any).
+ */
+typedef struct devclass *devclass_t;
+
+/**
+ * @brief A device method (included mainly for compatibility with
+ * FreeBSD 4.x).
+ */
+#define device_method_t kobj_method_t
+
+/**
+ * @brief Driver interrupt filter return values
+ *
+ * If a driver provides an interrupt filter routine it must return an
+ * integer consisting of oring together zero or more of the following
+ * flags:
+ *
+ * FILTER_STRAY - this device did not trigger the interrupt
+ * FILTER_HANDLED - the interrupt has been fully handled and can be EOId
+ * FILTER_SCHEDULE_THREAD - the threaded interrupt handler should be
+ * scheduled to execute
+ *
+ * If the driver does not provide a filter, then the interrupt code will
+ * act is if the filter had returned FILTER_SCHEDULE_THREAD. Note that it
+ * is illegal to specify any other flag with FILTER_STRAY and that it is
+ * illegal to not specify either of FILTER_HANDLED or FILTER_SCHEDULE_THREAD
+ * if FILTER_STRAY is not specified.
+ */
+#define FILTER_STRAY 0x01
+#define FILTER_HANDLED 0x02
+#define FILTER_SCHEDULE_THREAD 0x04
+
+/**
+ * @brief Driver interrupt service routines
+ *
+ * The filter routine is run in primary interrupt context and may not
+ * block or use regular mutexes. It may only use spin mutexes for
+ * synchronization. The filter may either completely handle the
+ * interrupt or it may perform some of the work and defer more
+ * expensive work to the regular interrupt handler. If a filter
+ * routine is not registered by the driver, then the regular interrupt
+ * handler is always used to handle interrupts from this device.
+ *
+ * The regular interrupt handler executes in its own thread context
+ * and may use regular mutexes. However, it is prohibited from
+ * sleeping on a sleep queue.
+ */
+typedef int driver_filter_t(void*);
+typedef void driver_intr_t(void*);
+
+/**
+ * @brief Interrupt type bits.
+ *
+ * These flags are used both by newbus interrupt
+ * registration (nexus.c) and also in struct intrec, which defines
+ * interrupt properties.
+ *
+ * XXX We should probably revisit this and remove the vestiges of the
+ * spls implicit in names like INTR_TYPE_TTY. In the meantime, don't
+ * confuse things by renaming them (Grog, 18 July 2000).
+ *
+ * We define this in terms of bits because some devices may belong
+ * to multiple classes (and therefore need to be included in
+ * multiple interrupt masks, which is what this really serves to
+ * indicate. Buses which do interrupt remapping will want to
+ * change their type to reflect what sort of devices are underneath.
+ */
+enum intr_type {
+ INTR_TYPE_TTY = 1,
+ INTR_TYPE_BIO = 2,
+ INTR_TYPE_NET = 4,
+ INTR_TYPE_CAM = 8,
+ INTR_TYPE_MISC = 16,
+ INTR_TYPE_CLK = 32,
+ INTR_TYPE_AV = 64,
+ INTR_FAST = 128,
+ INTR_EXCL = 256, /* exclusive interrupt */
+ INTR_MPSAFE = 512, /* this interrupt is SMP safe */
+ INTR_ENTROPY = 1024 /* this interrupt provides entropy */
+};
+
+enum intr_trigger {
+ INTR_TRIGGER_CONFORM = 0,
+ INTR_TRIGGER_EDGE = 1,
+ INTR_TRIGGER_LEVEL = 2
+};
+
+enum intr_polarity {
+ INTR_POLARITY_CONFORM = 0,
+ INTR_POLARITY_HIGH = 1,
+ INTR_POLARITY_LOW = 2
+};
+
+typedef int (*devop_t)(void);
+
+/**
+ * @brief This structure is deprecated.
+ *
+ * Use the kobj(9) macro DEFINE_CLASS to
+ * declare classes which implement device drivers.
+ */
+struct driver {
+ KOBJ_CLASS_FIELDS;
+};
+
+/*
+ * Definitions for drivers which need to keep simple lists of resources
+ * for their child devices.
+ */
+struct resource;
+
+/**
+ * @brief An entry for a single resource in a resource list.
+ */
+struct resource_list_entry {
+ STAILQ_ENTRY(resource_list_entry) link;
+ int type; /**< @brief type argument to alloc_resource */
+ int rid; /**< @brief resource identifier */
+ struct resource *res; /**< @brief the real resource when allocated */
+ u_long start; /**< @brief start of resource range */
+ u_long end; /**< @brief end of resource range */
+ u_long count; /**< @brief count within range */
+};
+STAILQ_HEAD(resource_list, resource_list_entry);
+
+void resource_list_init(struct resource_list *rl);
+void resource_list_free(struct resource_list *rl);
+struct resource_list_entry *
+ resource_list_add(struct resource_list *rl,
+ int type, int rid,
+ u_long start, u_long end, u_long count);
+int resource_list_add_next(struct resource_list *rl,
+ int type,
+ u_long start, u_long end, u_long count);
+struct resource_list_entry*
+ resource_list_find(struct resource_list *rl,
+ int type, int rid);
+void resource_list_delete(struct resource_list *rl,
+ int type, int rid);
+struct resource *
+ resource_list_alloc(struct resource_list *rl,
+ device_t bus, device_t child,
+ int type, int *rid,
+ u_long start, u_long end,
+ u_long count, u_int flags);
+int resource_list_release(struct resource_list *rl,
+ device_t bus, device_t child,
+ int type, int rid, struct resource *res);
+void resource_list_purge(struct resource_list *rl);
+int resource_list_print_type(struct resource_list *rl,
+ const char *name, int type,
+ const char *format);
+
+/*
+ * The root bus, to which all top-level busses are attached.
+ */
+extern device_t root_bus;
+extern devclass_t root_devclass;
+void root_bus_configure(void);
+
+/*
+ * Useful functions for implementing busses.
+ */
+
+int bus_generic_activate_resource(device_t dev, device_t child, int type,
+ int rid, struct resource *r);
+device_t
+ bus_generic_add_child(device_t dev, u_int order, const char *name,
+ int unit);
+struct resource *
+ bus_generic_alloc_resource(device_t bus, device_t child, int type,
+ int *rid, u_long start, u_long end,
+ u_long count, u_int flags);
+int bus_generic_attach(device_t dev);
+int bus_generic_bind_intr(device_t dev, device_t child,
+ struct resource *irq, int cpu);
+int bus_generic_child_present(device_t dev, device_t child);
+int bus_generic_config_intr(device_t, int, enum intr_trigger,
+ enum intr_polarity);
+int bus_generic_describe_intr(device_t dev, device_t child,
+ struct resource *irq, void *cookie,
+ const char *descr);
+int bus_generic_deactivate_resource(device_t dev, device_t child, int type,
+ int rid, struct resource *r);
+int bus_generic_detach(device_t dev);
+void bus_generic_driver_added(device_t dev, driver_t *driver);
+bus_dma_tag_t
+ bus_generic_get_dma_tag(device_t dev, device_t child);
+struct resource_list *
+ bus_generic_get_resource_list (device_t, device_t);
+void bus_generic_new_pass(device_t dev);
+int bus_print_child_header(device_t dev, device_t child);
+int bus_print_child_footer(device_t dev, device_t child);
+int bus_generic_print_child(device_t dev, device_t child);
+int bus_generic_probe(device_t dev);
+int bus_generic_read_ivar(device_t dev, device_t child, int which,
+ uintptr_t *result);
+int bus_generic_release_resource(device_t bus, device_t child,
+ int type, int rid, struct resource *r);
+int bus_generic_resume(device_t dev);
+int bus_generic_setup_intr(device_t dev, device_t child,
+ struct resource *irq, int flags,
+ driver_filter_t *filter, driver_intr_t *intr,
+ void *arg, void **cookiep);
+
+struct resource *
+ bus_generic_rl_alloc_resource (device_t, device_t, int, int *,
+ u_long, u_long, u_long, u_int);
+void bus_generic_rl_delete_resource (device_t, device_t, int, int);
+int bus_generic_rl_get_resource (device_t, device_t, int, int, u_long *,
+ u_long *);
+int bus_generic_rl_set_resource (device_t, device_t, int, int, u_long,
+ u_long);
+int bus_generic_rl_release_resource (device_t, device_t, int, int,
+ struct resource *);
+
+int bus_generic_shutdown(device_t dev);
+int bus_generic_suspend(device_t dev);
+int bus_generic_teardown_intr(device_t dev, device_t child,
+ struct resource *irq, void *cookie);
+int bus_generic_write_ivar(device_t dev, device_t child, int which,
+ uintptr_t value);
+
+/*
+ * Wrapper functions for the BUS_*_RESOURCE methods to make client code
+ * a little simpler.
+ */
+
+struct resource_spec {
+ int type;
+ int rid;
+ int flags;
+};
+
+int bus_alloc_resources(device_t dev, struct resource_spec *rs, struct resource **res);
+void bus_release_resources(device_t dev, const struct resource_spec *rs, struct resource **res);
+
+struct resource *bus_alloc_resource(device_t dev, int type, int *rid,
+ u_long start, u_long end, u_long count,
+ u_int flags);
+int bus_activate_resource(device_t dev, int type, int rid,
+ struct resource *r);
+int bus_deactivate_resource(device_t dev, int type, int rid,
+ struct resource *r);
+bus_dma_tag_t bus_get_dma_tag(device_t dev);
+int bus_release_resource(device_t dev, int type, int rid,
+ struct resource *r);
+int bus_free_resource(device_t dev, int type, struct resource *r);
+int bus_setup_intr(device_t dev, struct resource *r, int flags,
+ driver_filter_t filter, driver_intr_t handler,
+ void *arg, void **cookiep);
+int bus_teardown_intr(device_t dev, struct resource *r, void *cookie);
+int bus_bind_intr(device_t dev, struct resource *r, int cpu);
+int bus_describe_intr(device_t dev, struct resource *irq, void *cookie,
+ const char *fmt, ...);
+int bus_set_resource(device_t dev, int type, int rid,
+ u_long start, u_long count);
+int bus_get_resource(device_t dev, int type, int rid,
+ u_long *startp, u_long *countp);
+u_long bus_get_resource_start(device_t dev, int type, int rid);
+u_long bus_get_resource_count(device_t dev, int type, int rid);
+void bus_delete_resource(device_t dev, int type, int rid);
+int bus_child_present(device_t child);
+int bus_child_pnpinfo_str(device_t child, char *buf, size_t buflen);
+int bus_child_location_str(device_t child, char *buf, size_t buflen);
+void bus_enumerate_hinted_children(device_t bus);
+
+static __inline struct resource *
+bus_alloc_resource_any(device_t dev, int type, int *rid, u_int flags)
+{
+ return (bus_alloc_resource(dev, type, rid, 0ul, ~0ul, 1, flags));
+}
+
+/*
+ * Access functions for device.
+ */
+device_t device_add_child(device_t dev, const char *name, int unit);
+device_t device_add_child_ordered(device_t dev, u_int order,
+ const char *name, int unit);
+void device_busy(device_t dev);
+int device_delete_child(device_t dev, device_t child);
+int device_attach(device_t dev);
+int device_detach(device_t dev);
+void device_disable(device_t dev);
+void device_enable(device_t dev);
+device_t device_find_child(device_t dev, const char *classname,
+ int unit);
+const char *device_get_desc(device_t dev);
+devclass_t device_get_devclass(device_t dev);
+driver_t *device_get_driver(device_t dev);
+u_int32_t device_get_flags(device_t dev);
+device_t device_get_parent(device_t dev);
+int device_get_children(device_t dev, device_t **listp, int *countp);
+void *device_get_ivars(device_t dev);
+void device_set_ivars(device_t dev, void *ivars);
+const char *device_get_name(device_t dev);
+const char *device_get_nameunit(device_t dev);
+void *device_get_softc(device_t dev);
+device_state_t device_get_state(device_t dev);
+int device_get_unit(device_t dev);
+struct sysctl_ctx_list *device_get_sysctl_ctx(device_t dev);
+struct sysctl_oid *device_get_sysctl_tree(device_t dev);
+int device_is_alive(device_t dev); /* did probe succeed? */
+int device_is_attached(device_t dev); /* did attach succeed? */
+int device_is_enabled(device_t dev);
+int device_is_quiet(device_t dev);
+int device_print_prettyname(device_t dev);
+int device_printf(device_t dev, const char *, ...) __printflike(2, 3);
+int device_probe(device_t dev);
+int device_probe_and_attach(device_t dev);
+int device_probe_child(device_t bus, device_t dev);
+int device_quiesce(device_t dev);
+void device_quiet(device_t dev);
+void device_set_desc(device_t dev, const char* desc);
+void device_set_desc_copy(device_t dev, const char* desc);
+int device_set_devclass(device_t dev, const char *classname);
+int device_set_driver(device_t dev, driver_t *driver);
+void device_set_flags(device_t dev, u_int32_t flags);
+void device_set_softc(device_t dev, void *softc);
+int device_set_unit(device_t dev, int unit); /* XXX DONT USE XXX */
+int device_shutdown(device_t dev);
+void device_unbusy(device_t dev);
+void device_verbose(device_t dev);
+
+/*
+ * Access functions for devclass.
+ */
+devclass_t devclass_create(const char *classname);
+devclass_t devclass_find(const char *classname);
+const char *devclass_get_name(devclass_t dc);
+device_t devclass_get_device(devclass_t dc, int unit);
+void *devclass_get_softc(devclass_t dc, int unit);
+int devclass_get_devices(devclass_t dc, device_t **listp, int *countp);
+int devclass_get_drivers(devclass_t dc, driver_t ***listp, int *countp);
+int devclass_get_count(devclass_t dc);
+int devclass_get_maxunit(devclass_t dc);
+int devclass_find_free_unit(devclass_t dc, int unit);
+void devclass_set_parent(devclass_t dc, devclass_t pdc);
+devclass_t devclass_get_parent(devclass_t dc);
+struct sysctl_ctx_list *devclass_get_sysctl_ctx(devclass_t dc);
+struct sysctl_oid *devclass_get_sysctl_tree(devclass_t dc);
+
+/*
+ * Access functions for device resources.
+ */
+
+int resource_int_value(const char *name, int unit, const char *resname,
+ int *result);
+int resource_long_value(const char *name, int unit, const char *resname,
+ long *result);
+int resource_string_value(const char *name, int unit, const char *resname,
+ const char **result);
+int resource_disabled(const char *name, int unit);
+int resource_find_match(int *anchor, const char **name, int *unit,
+ const char *resname, const char *value);
+int resource_find_dev(int *anchor, const char *name, int *unit,
+ const char *resname, const char *value);
+int resource_set_int(const char *name, int unit, const char *resname,
+ int value);
+int resource_set_long(const char *name, int unit, const char *resname,
+ long value);
+int resource_set_string(const char *name, int unit, const char *resname,
+ const char *value);
+/*
+ * Functions for maintaining and checking consistency of
+ * bus information exported to userspace.
+ */
+int bus_data_generation_check(int generation);
+void bus_data_generation_update(void);
+
+/**
+ * Some convenience defines for probe routines to return. These are just
+ * suggested values, and there's nothing magical about them.
+ * BUS_PROBE_SPECIFIC is for devices that cannot be reprobed, and that no
+ * possible other driver may exist (typically legacy drivers who don't fallow
+ * all the rules, or special needs drivers). BUS_PROBE_VENDOR is the
+ * suggested value that vendor supplied drivers use. This is for source or
+ * binary drivers that are not yet integrated into the FreeBSD tree. Its use
+ * in the base OS is prohibited. BUS_PROBE_DEFAULT is the normal return value
+ * for drivers to use. It is intended that nearly all of the drivers in the
+ * tree should return this value. BUS_PROBE_LOW_PRIORITY are for drivers that
+ * have special requirements like when there are two drivers that support
+ * overlapping series of hardware devices. In this case the one that supports
+ * the older part of the line would return this value, while the one that
+ * supports the newer ones would return BUS_PROBE_DEFAULT. BUS_PROBE_GENERIC
+ * is for drivers that wish to have a generic form and a specialized form,
+ * like is done with the pci bus and the acpi pci bus. BUS_PROBE_HOOVER is
+ * for those busses that implement a generic device place-holder for devices on
+ * the bus that have no more specific river for them (aka ugen).
+ * BUS_PROBE_NOWILDCARD or lower means that the device isn't really bidding
+ * for a device node, but accepts only devices that its parent has told it
+ * use this driver.
+ */
+#define BUS_PROBE_SPECIFIC 0 /* Only I can use this device */
+#define BUS_PROBE_VENDOR (-10) /* Vendor supplied driver */
+#define BUS_PROBE_DEFAULT (-20) /* Base OS default driver */
+#define BUS_PROBE_LOW_PRIORITY (-40) /* Older, less desirable drivers */
+#define BUS_PROBE_GENERIC (-100) /* generic driver for dev */
+#define BUS_PROBE_HOOVER (-500) /* Generic dev for all devs on bus */
+#define BUS_PROBE_NOWILDCARD (-2000000000) /* No wildcard device matches */
+
+/**
+ * During boot, the device tree is scanned multiple times. Each scan,
+ * or pass, drivers may be attached to devices. Each driver
+ * attachment is assigned a pass number. Drivers may only probe and
+ * attach to devices if their pass number is less than or equal to the
+ * current system-wide pass number. The default pass is the last pass
+ * and is used by most drivers. Drivers needed by the scheduler are
+ * probed in earlier passes.
+ */
+#define BUS_PASS_ROOT 0 /* Used to attach root0. */
+#define BUS_PASS_BUS 10 /* Busses and bridges. */
+#define BUS_PASS_CPU 20 /* CPU devices. */
+#define BUS_PASS_RESOURCE 30 /* Resource discovery. */
+#define BUS_PASS_INTERRUPT 40 /* Interrupt controllers. */
+#define BUS_PASS_TIMER 50 /* Timers and clocks. */
+#define BUS_PASS_SCHEDULER 60 /* Start scheduler. */
+#define BUS_PASS_DEFAULT __INT_MAX /* Everything else. */
+
+extern int bus_current_pass;
+
+void bus_set_pass(int pass);
+
+/**
+ * Shorthand for constructing method tables.
+ */
+#define DEVMETHOD KOBJMETHOD
+
+/*
+ * Some common device interfaces.
+ */
+#include <rtems/freebsd/local/device_if.h>
+#include <rtems/freebsd/local/bus_if.h>
+
+struct module;
+
+int driver_module_handler(struct module *, int, void *);
+
+/**
+ * Module support for automatically adding drivers to busses.
+ */
+struct driver_module_data {
+ int (*dmd_chainevh)(struct module *, int, void *);
+ void *dmd_chainarg;
+ const char *dmd_busname;
+ kobj_class_t dmd_driver;
+ devclass_t *dmd_devclass;
+ int dmd_pass;
+};
+
+#define EARLY_DRIVER_MODULE(name, busname, driver, devclass, evh, arg, pass) \
+ \
+static struct driver_module_data name##_##busname##_driver_mod = { \
+ evh, arg, \
+ #busname, \
+ (kobj_class_t) &driver, \
+ &devclass, \
+ pass \
+}; \
+ \
+static moduledata_t name##_##busname##_mod = { \
+ #busname "/" #name, \
+ driver_module_handler, \
+ &name##_##busname##_driver_mod \
+}; \
+DECLARE_MODULE(name##_##busname, name##_##busname##_mod, \
+ SI_SUB_DRIVERS, SI_ORDER_MIDDLE)
+
+#define DRIVER_MODULE(name, busname, driver, devclass, evh, arg) \
+ EARLY_DRIVER_MODULE(name, busname, driver, devclass, evh, arg, \
+ BUS_PASS_DEFAULT)
+
+/**
+ * Generic ivar accessor generation macros for bus drivers
+ */
+#define __BUS_ACCESSOR(varp, var, ivarp, ivar, type) \
+ \
+static __inline type varp ## _get_ ## var(device_t dev) \
+{ \
+ uintptr_t v; \
+ BUS_READ_IVAR(device_get_parent(dev), dev, \
+ ivarp ## _IVAR_ ## ivar, &v); \
+ return ((type) v); \
+} \
+ \
+static __inline void varp ## _set_ ## var(device_t dev, type t) \
+{ \
+ uintptr_t v = (uintptr_t) t; \
+ BUS_WRITE_IVAR(device_get_parent(dev), dev, \
+ ivarp ## _IVAR_ ## ivar, v); \
+}
+
+/**
+ * Shorthand macros, taking resource argument
+ * Generated with sys/tools/bus_macro.sh
+ */
+
+#define bus_barrier(r, o, l, f) \
+ bus_space_barrier((r)->r_bustag, (r)->r_bushandle, (o), (l), (f))
+#define bus_read_1(r, o) \
+ bus_space_read_1((r)->r_bustag, (r)->r_bushandle, (o))
+#define bus_read_multi_1(r, o, d, c) \
+ bus_space_read_multi_1((r)->r_bustag, (r)->r_bushandle, (o), (d), (c))
+#define bus_read_region_1(r, o, d, c) \
+ bus_space_read_region_1((r)->r_bustag, (r)->r_bushandle, (o), (d), (c))
+#define bus_set_multi_1(r, o, v, c) \
+ bus_space_set_multi_1((r)->r_bustag, (r)->r_bushandle, (o), (v), (c))
+#define bus_set_region_1(r, o, v, c) \
+ bus_space_set_region_1((r)->r_bustag, (r)->r_bushandle, (o), (v), (c))
+#define bus_write_1(r, o, v) \
+ bus_space_write_1((r)->r_bustag, (r)->r_bushandle, (o), (v))
+#define bus_write_multi_1(r, o, d, c) \
+ bus_space_write_multi_1((r)->r_bustag, (r)->r_bushandle, (o), (d), (c))
+#define bus_write_region_1(r, o, d, c) \
+ bus_space_write_region_1((r)->r_bustag, (r)->r_bushandle, (o), (d), (c))
+#define bus_read_stream_1(r, o) \
+ bus_space_read_stream_1((r)->r_bustag, (r)->r_bushandle, (o))
+#define bus_read_multi_stream_1(r, o, d, c) \
+ bus_space_read_multi_stream_1((r)->r_bustag, (r)->r_bushandle, (o), (d), (c))
+#define bus_read_region_stream_1(r, o, d, c) \
+ bus_space_read_region_stream_1((r)->r_bustag, (r)->r_bushandle, (o), (d), (c))
+#define bus_set_multi_stream_1(r, o, v, c) \
+ bus_space_set_multi_stream_1((r)->r_bustag, (r)->r_bushandle, (o), (v), (c))
+#define bus_set_region_stream_1(r, o, v, c) \
+ bus_space_set_region_stream_1((r)->r_bustag, (r)->r_bushandle, (o), (v), (c))
+#define bus_write_stream_1(r, o, v) \
+ bus_space_write_stream_1((r)->r_bustag, (r)->r_bushandle, (o), (v))
+#define bus_write_multi_stream_1(r, o, d, c) \
+ bus_space_write_multi_stream_1((r)->r_bustag, (r)->r_bushandle, (o), (d), (c))
+#define bus_write_region_stream_1(r, o, d, c) \
+ bus_space_write_region_stream_1((r)->r_bustag, (r)->r_bushandle, (o), (d), (c))
+#define bus_read_2(r, o) \
+ bus_space_read_2((r)->r_bustag, (r)->r_bushandle, (o))
+#define bus_read_multi_2(r, o, d, c) \
+ bus_space_read_multi_2((r)->r_bustag, (r)->r_bushandle, (o), (d), (c))
+#define bus_read_region_2(r, o, d, c) \
+ bus_space_read_region_2((r)->r_bustag, (r)->r_bushandle, (o), (d), (c))
+#define bus_set_multi_2(r, o, v, c) \
+ bus_space_set_multi_2((r)->r_bustag, (r)->r_bushandle, (o), (v), (c))
+#define bus_set_region_2(r, o, v, c) \
+ bus_space_set_region_2((r)->r_bustag, (r)->r_bushandle, (o), (v), (c))
+#define bus_write_2(r, o, v) \
+ bus_space_write_2((r)->r_bustag, (r)->r_bushandle, (o), (v))
+#define bus_write_multi_2(r, o, d, c) \
+ bus_space_write_multi_2((r)->r_bustag, (r)->r_bushandle, (o), (d), (c))
+#define bus_write_region_2(r, o, d, c) \
+ bus_space_write_region_2((r)->r_bustag, (r)->r_bushandle, (o), (d), (c))
+#define bus_read_stream_2(r, o) \
+ bus_space_read_stream_2((r)->r_bustag, (r)->r_bushandle, (o))
+#define bus_read_multi_stream_2(r, o, d, c) \
+ bus_space_read_multi_stream_2((r)->r_bustag, (r)->r_bushandle, (o), (d), (c))
+#define bus_read_region_stream_2(r, o, d, c) \
+ bus_space_read_region_stream_2((r)->r_bustag, (r)->r_bushandle, (o), (d), (c))
+#define bus_set_multi_stream_2(r, o, v, c) \
+ bus_space_set_multi_stream_2((r)->r_bustag, (r)->r_bushandle, (o), (v), (c))
+#define bus_set_region_stream_2(r, o, v, c) \
+ bus_space_set_region_stream_2((r)->r_bustag, (r)->r_bushandle, (o), (v), (c))
+#define bus_write_stream_2(r, o, v) \
+ bus_space_write_stream_2((r)->r_bustag, (r)->r_bushandle, (o), (v))
+#define bus_write_multi_stream_2(r, o, d, c) \
+ bus_space_write_multi_stream_2((r)->r_bustag, (r)->r_bushandle, (o), (d), (c))
+#define bus_write_region_stream_2(r, o, d, c) \
+ bus_space_write_region_stream_2((r)->r_bustag, (r)->r_bushandle, (o), (d), (c))
+#define bus_read_4(r, o) \
+ bus_space_read_4((r)->r_bustag, (r)->r_bushandle, (o))
+#define bus_read_multi_4(r, o, d, c) \
+ bus_space_read_multi_4((r)->r_bustag, (r)->r_bushandle, (o), (d), (c))
+#define bus_read_region_4(r, o, d, c) \
+ bus_space_read_region_4((r)->r_bustag, (r)->r_bushandle, (o), (d), (c))
+#define bus_set_multi_4(r, o, v, c) \
+ bus_space_set_multi_4((r)->r_bustag, (r)->r_bushandle, (o), (v), (c))
+#define bus_set_region_4(r, o, v, c) \
+ bus_space_set_region_4((r)->r_bustag, (r)->r_bushandle, (o), (v), (c))
+#define bus_write_4(r, o, v) \
+ bus_space_write_4((r)->r_bustag, (r)->r_bushandle, (o), (v))
+#define bus_write_multi_4(r, o, d, c) \
+ bus_space_write_multi_4((r)->r_bustag, (r)->r_bushandle, (o), (d), (c))
+#define bus_write_region_4(r, o, d, c) \
+ bus_space_write_region_4((r)->r_bustag, (r)->r_bushandle, (o), (d), (c))
+#define bus_read_stream_4(r, o) \
+ bus_space_read_stream_4((r)->r_bustag, (r)->r_bushandle, (o))
+#define bus_read_multi_stream_4(r, o, d, c) \
+ bus_space_read_multi_stream_4((r)->r_bustag, (r)->r_bushandle, (o), (d), (c))
+#define bus_read_region_stream_4(r, o, d, c) \
+ bus_space_read_region_stream_4((r)->r_bustag, (r)->r_bushandle, (o), (d), (c))
+#define bus_set_multi_stream_4(r, o, v, c) \
+ bus_space_set_multi_stream_4((r)->r_bustag, (r)->r_bushandle, (o), (v), (c))
+#define bus_set_region_stream_4(r, o, v, c) \
+ bus_space_set_region_stream_4((r)->r_bustag, (r)->r_bushandle, (o), (v), (c))
+#define bus_write_stream_4(r, o, v) \
+ bus_space_write_stream_4((r)->r_bustag, (r)->r_bushandle, (o), (v))
+#define bus_write_multi_stream_4(r, o, d, c) \
+ bus_space_write_multi_stream_4((r)->r_bustag, (r)->r_bushandle, (o), (d), (c))
+#define bus_write_region_stream_4(r, o, d, c) \
+ bus_space_write_region_stream_4((r)->r_bustag, (r)->r_bushandle, (o), (d), (c))
+#define bus_read_8(r, o) \
+ bus_space_read_8((r)->r_bustag, (r)->r_bushandle, (o))
+#define bus_read_multi_8(r, o, d, c) \
+ bus_space_read_multi_8((r)->r_bustag, (r)->r_bushandle, (o), (d), (c))
+#define bus_read_region_8(r, o, d, c) \
+ bus_space_read_region_8((r)->r_bustag, (r)->r_bushandle, (o), (d), (c))
+#define bus_set_multi_8(r, o, v, c) \
+ bus_space_set_multi_8((r)->r_bustag, (r)->r_bushandle, (o), (v), (c))
+#define bus_set_region_8(r, o, v, c) \
+ bus_space_set_region_8((r)->r_bustag, (r)->r_bushandle, (o), (v), (c))
+#define bus_write_8(r, o, v) \
+ bus_space_write_8((r)->r_bustag, (r)->r_bushandle, (o), (v))
+#define bus_write_multi_8(r, o, d, c) \
+ bus_space_write_multi_8((r)->r_bustag, (r)->r_bushandle, (o), (d), (c))
+#define bus_write_region_8(r, o, d, c) \
+ bus_space_write_region_8((r)->r_bustag, (r)->r_bushandle, (o), (d), (c))
+#define bus_read_stream_8(r, o) \
+ bus_space_read_stream_8((r)->r_bustag, (r)->r_bushandle, (o))
+#define bus_read_multi_stream_8(r, o, d, c) \
+ bus_space_read_multi_stream_8((r)->r_bustag, (r)->r_bushandle, (o), (d), (c))
+#define bus_read_region_stream_8(r, o, d, c) \
+ bus_space_read_region_stream_8((r)->r_bustag, (r)->r_bushandle, (o), (d), (c))
+#define bus_set_multi_stream_8(r, o, v, c) \
+ bus_space_set_multi_stream_8((r)->r_bustag, (r)->r_bushandle, (o), (v), (c))
+#define bus_set_region_stream_8(r, o, v, c) \
+ bus_space_set_region_stream_8((r)->r_bustag, (r)->r_bushandle, (o), (v), (c))
+#define bus_write_stream_8(r, o, v) \
+ bus_space_write_stream_8((r)->r_bustag, (r)->r_bushandle, (o), (v))
+#define bus_write_multi_stream_8(r, o, d, c) \
+ bus_space_write_multi_stream_8((r)->r_bustag, (r)->r_bushandle, (o), (d), (c))
+#define bus_write_region_stream_8(r, o, d, c) \
+ bus_space_write_region_stream_8((r)->r_bustag, (r)->r_bushandle, (o), (d), (c))
+#endif /* _KERNEL */
+
+#endif /* !_SYS_BUS_HH_ */
diff --git a/rtems/freebsd/sys/bus_dma.h b/rtems/freebsd/sys/bus_dma.h
new file mode 100644
index 00000000..5d843646
--- /dev/null
+++ b/rtems/freebsd/sys/bus_dma.h
@@ -0,0 +1,277 @@
+/* $NetBSD: bus.h,v 1.12 1997/10/01 08:25:15 fvdl Exp $ */
+
+/*-
+ * Copyright (c) 1996, 1997 The NetBSD Foundation, Inc.
+ * All rights reserved.
+ *
+ * This code is derived from software contributed to The NetBSD Foundation
+ * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
+ * NASA Ames Research Center.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the NetBSD
+ * Foundation, Inc. and its contributors.
+ * 4. Neither the name of The NetBSD Foundation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
+ * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*-
+ * Copyright (c) 1996 Charles M. Hannum. All rights reserved.
+ * Copyright (c) 1996 Christopher G. Demetriou. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by Christopher G. Demetriou
+ * for the NetBSD Project.
+ * 4. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+/* $FreeBSD$ */
+
+#ifndef _BUS_DMA_HH_
+#define _BUS_DMA_HH_
+
+#include <rtems/freebsd/sys/_bus_dma.h>
+
+/*
+ * Machine independent interface for mapping physical addresses to peripheral
+ * bus 'physical' addresses, and assisting with DMA operations.
+ *
+ * XXX This file is always included from <machine/bus_dma.h> and should not
+ * (yet) be included directly.
+ */
+
+/*
+ * Flags used in various bus DMA methods.
+ */
+#define BUS_DMA_WAITOK 0x00 /* safe to sleep (pseudo-flag) */
+#define BUS_DMA_NOWAIT 0x01 /* not safe to sleep */
+#define BUS_DMA_ALLOCNOW 0x02 /* perform resource allocation now */
+#define BUS_DMA_COHERENT 0x04 /* hint: map memory in a coherent way */
+#define BUS_DMA_ZERO 0x08 /* allocate zero'ed memory */
+#define BUS_DMA_BUS1 0x10 /* placeholders for bus functions... */
+#define BUS_DMA_BUS2 0x20
+#define BUS_DMA_BUS3 0x40
+#define BUS_DMA_BUS4 0x80
+
+/*
+ * The following two flags are non-standard or specific to only certain
+ * architectures
+ */
+#define BUS_DMA_NOWRITE 0x100
+#define BUS_DMA_NOCACHE 0x200
+
+/*
+ * The following flag is a DMA tag hint that the page offset of the
+ * loaded kernel virtual address must be preserved in the first
+ * physical segment address, when the KVA is loaded into DMA.
+ */
+#define BUS_DMA_KEEP_PG_OFFSET 0x400
+
+/* Forwards needed by prototypes below. */
+struct mbuf;
+struct uio;
+
+/*
+ * Operations performed by bus_dmamap_sync().
+ */
+#define BUS_DMASYNC_PREREAD 1
+#define BUS_DMASYNC_POSTREAD 2
+#define BUS_DMASYNC_PREWRITE 4
+#define BUS_DMASYNC_POSTWRITE 8
+
+/*
+ * bus_dma_segment_t
+ *
+ * Describes a single contiguous DMA transaction. Values
+ * are suitable for programming into DMA registers.
+ */
+typedef struct bus_dma_segment {
+ bus_addr_t ds_addr; /* DMA address */
+ bus_size_t ds_len; /* length of transfer */
+} bus_dma_segment_t;
+
+/*
+ * A function that returns 1 if the address cannot be accessed by
+ * a device and 0 if it can be.
+ */
+typedef int bus_dma_filter_t(void *, bus_addr_t);
+
+/*
+ * Generic helper function for manipulating mutexes.
+ */
+void busdma_lock_mutex(void *arg, bus_dma_lock_op_t op);
+
+/*
+ * Allocate a device specific dma_tag encapsulating the constraints of
+ * the parent tag in addition to other restrictions specified:
+ *
+ * alignment: Alignment for segments.
+ * boundary: Boundary that segments cannot cross.
+ * lowaddr: Low restricted address that cannot appear in a mapping.
+ * highaddr: High restricted address that cannot appear in a mapping.
+ * filtfunc: An optional function to further test if an address
+ * within the range of lowaddr and highaddr cannot appear
+ * in a mapping.
+ * filtfuncarg: An argument that will be passed to filtfunc in addition
+ * to the address to test.
+ * maxsize: Maximum mapping size supported by this tag.
+ * nsegments: Number of discontinuities allowed in maps.
+ * maxsegsz: Maximum size of a segment in the map.
+ * flags: Bus DMA flags.
+ * lockfunc: An optional function to handle driver-defined lock
+ * operations.
+ * lockfuncarg: An argument that will be passed to lockfunc in addition
+ * to the lock operation.
+ * dmat: A pointer to set to a valid dma tag should the return
+ * value of this function indicate success.
+ */
+/* XXX Should probably allow specification of alignment */
+int bus_dma_tag_create(bus_dma_tag_t parent, bus_size_t alignment,
+ bus_size_t boundary, bus_addr_t lowaddr,
+ bus_addr_t highaddr, bus_dma_filter_t *filtfunc,
+ void *filtfuncarg, bus_size_t maxsize, int nsegments,
+ bus_size_t maxsegsz, int flags, bus_dma_lock_t *lockfunc,
+ void *lockfuncarg, bus_dma_tag_t *dmat);
+
+int bus_dma_tag_destroy(bus_dma_tag_t dmat);
+
+/*
+ * A function that processes a successfully loaded dma map or an error
+ * from a delayed load map.
+ */
+typedef void bus_dmamap_callback_t(void *, bus_dma_segment_t *, int, int);
+
+/*
+ * Like bus_dmamap_callback but includes map size in bytes. This is
+ * defined as a separate interface to maintain compatibility for users
+ * of bus_dmamap_callback_t--at some point these interfaces should be merged.
+ */
+typedef void bus_dmamap_callback2_t(void *, bus_dma_segment_t *, int, bus_size_t, int);
+
+/*
+ * XXX sparc64 uses the same interface, but a much different implementation.
+ * <machine/bus_dma.h> for the sparc64 arch contains the equivalent
+ * declarations.
+ */
+#if !defined(__sparc64__)
+
+/*
+ * Allocate a handle for mapping from kva/uva/physical
+ * address space into bus device space.
+ */
+int bus_dmamap_create(bus_dma_tag_t dmat, int flags, bus_dmamap_t *mapp);
+
+/*
+ * Destroy a handle for mapping from kva/uva/physical
+ * address space into bus device space.
+ */
+int bus_dmamap_destroy(bus_dma_tag_t dmat, bus_dmamap_t map);
+
+/*
+ * Allocate a piece of memory that can be efficiently mapped into
+ * bus device space based on the constraints listed in the dma tag.
+ * A dmamap to for use with dmamap_load is also allocated.
+ */
+int bus_dmamem_alloc(bus_dma_tag_t dmat, void** vaddr, int flags,
+ bus_dmamap_t *mapp);
+
+/*
+ * Free a piece of memory and its allocated dmamap, that was allocated
+ * via bus_dmamem_alloc.
+ */
+void bus_dmamem_free(bus_dma_tag_t dmat, void *vaddr, bus_dmamap_t map);
+
+/*
+ * Map the buffer buf into bus space using the dmamap map.
+ */
+int bus_dmamap_load(bus_dma_tag_t dmat, bus_dmamap_t map, void *buf,
+ bus_size_t buflen, bus_dmamap_callback_t *callback,
+ void *callback_arg, int flags);
+
+/*
+ * Like bus_dmamap_load but for mbufs. Note the use of the
+ * bus_dmamap_callback2_t interface.
+ */
+int bus_dmamap_load_mbuf(bus_dma_tag_t dmat, bus_dmamap_t map,
+ struct mbuf *mbuf,
+ bus_dmamap_callback2_t *callback, void *callback_arg,
+ int flags);
+
+int bus_dmamap_load_mbuf_sg(bus_dma_tag_t dmat, bus_dmamap_t map,
+ struct mbuf *mbuf, bus_dma_segment_t *segs,
+ int *nsegs, int flags);
+
+/*
+ * Like bus_dmamap_load but for uios. Note the use of the
+ * bus_dmamap_callback2_t interface.
+ */
+int bus_dmamap_load_uio(bus_dma_tag_t dmat, bus_dmamap_t map,
+ struct uio *ui,
+ bus_dmamap_callback2_t *callback, void *callback_arg,
+ int flags);
+
+/*
+ * Perform a synchronization operation on the given map.
+ */
+void _bus_dmamap_sync(bus_dma_tag_t, bus_dmamap_t, bus_dmasync_op_t);
+#define bus_dmamap_sync(dmat, dmamap, op) \
+ do { \
+ if ((dmamap) != NULL) \
+ _bus_dmamap_sync(dmat, dmamap, op); \
+ } while (0)
+
+/*
+ * Release the mapping held by map.
+ */
+void _bus_dmamap_unload(bus_dma_tag_t dmat, bus_dmamap_t map);
+#define bus_dmamap_unload(dmat, dmamap) \
+ do { \
+ if ((dmamap) != NULL) \
+ _bus_dmamap_unload(dmat, dmamap); \
+ } while (0)
+
+#endif /* __sparc64__ */
+
+#endif /* _BUS_DMA_HH_ */
diff --git a/rtems/freebsd/sys/callout.h b/rtems/freebsd/sys/callout.h
new file mode 100644
index 00000000..aedd74f4
--- /dev/null
+++ b/rtems/freebsd/sys/callout.h
@@ -0,0 +1,114 @@
+/*-
+ * Copyright (c) 1990, 1993
+ * The Regents of the University of California. All rights reserved.
+ * (c) UNIX System Laboratories, Inc.
+ * All or some portions of this file are derived from material licensed
+ * to the University of California by American Telephone and Telegraph
+ * Co. or Unix System Laboratories, Inc. and are reproduced herein with
+ * the permission of UNIX System Laboratories, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * @(#)callout.h 8.2 (Berkeley) 1/21/94
+ * $FreeBSD$
+ */
+
+#ifndef _SYS_CALLOUT_HH_
+#define _SYS_CALLOUT_HH_
+
+#include <rtems/freebsd/sys/queue.h>
+
+struct lock_object;
+
+SLIST_HEAD(callout_list, callout);
+TAILQ_HEAD(callout_tailq, callout);
+
+struct callout {
+#ifndef __rtems__
+ union {
+ SLIST_ENTRY(callout) sle;
+ TAILQ_ENTRY(callout) tqe;
+ } c_links;
+ int c_time; /* ticks to the event */
+#else /* __rtems__ */
+ rtems_chain_node c_node;
+ rtems_id c_id;
+#endif /* __rtems__ */
+ void *c_arg; /* function argument */
+ void (*c_func)(void *); /* function to call */
+ struct lock_object *c_lock; /* lock to handle */
+ int c_flags; /* state of this entry */
+#ifndef __rtems__
+ volatile int c_cpu; /* CPU we're scheduled on */
+#endif /* __rtems__ */
+};
+
+#define CALLOUT_LOCAL_ALLOC 0x0001 /* was allocated from callfree */
+#define CALLOUT_ACTIVE 0x0002 /* callout is currently active */
+#define CALLOUT_PENDING 0x0004 /* callout is waiting for timeout */
+#define CALLOUT_MPSAFE 0x0008 /* callout handler is mp safe */
+#define CALLOUT_RETURNUNLOCKED 0x0010 /* handler returns with mtx unlocked */
+#define CALLOUT_SHAREDLOCK 0x0020 /* callout lock held in shared mode */
+
+struct callout_handle {
+ struct callout *callout;
+};
+
+#ifdef _KERNEL
+extern int ncallout;
+
+#define callout_active(c) ((c)->c_flags & CALLOUT_ACTIVE)
+#define callout_deactivate(c) ((c)->c_flags &= ~CALLOUT_ACTIVE)
+#define callout_drain(c) _callout_stop_safe(c, 1)
+void callout_init(struct callout *, int);
+void _callout_init_lock(struct callout *, struct lock_object *, int);
+#define callout_init_mtx(c, mtx, flags) \
+ _callout_init_lock((c), ((mtx) != NULL) ? &(mtx)->lock_object : \
+ NULL, (flags))
+#define callout_init_rw(c, rw, flags) \
+ _callout_init_lock((c), ((rw) != NULL) ? &(rw)->lock_object : \
+ NULL, (flags))
+#define callout_pending(c) ((c)->c_flags & CALLOUT_PENDING)
+int callout_reset_on(struct callout *, int, void (*)(void *), void *, int);
+#ifndef __rtems__
+#define callout_reset(c, on_tick, fn, arg) \
+ callout_reset_on((c), (on_tick), (fn), (arg), (c)->c_cpu)
+#else /* __rtems__ */
+int callout_reset(struct callout *, int, void (*)(void *), void *);
+#endif /* __rtems__ */
+#define callout_reset_curcpu(c, on_tick, fn, arg) \
+ callout_reset_on((c), (on_tick), (fn), (arg), PCPU_GET(cpuid))
+int callout_schedule(struct callout *, int);
+int callout_schedule_on(struct callout *, int, int);
+#define callout_schedule_curcpu(c, on_tick) \
+ callout_schedule_on((c), (on_tick), PCPU_GET(cpuid))
+#define callout_stop(c) _callout_stop_safe(c, 0)
+int _callout_stop_safe(struct callout *, int);
+void callout_tick(void);
+
+
+#endif
+
+#endif /* _SYS_CALLOUT_HH_ */
diff --git a/rtems/freebsd/sys/cdefs.h b/rtems/freebsd/sys/cdefs.h
new file mode 100644
index 00000000..07abef5a
--- /dev/null
+++ b/rtems/freebsd/sys/cdefs.h
@@ -0,0 +1,582 @@
+/*-
+ * Copyright (c) 1991, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * This code is derived from software contributed to Berkeley by
+ * Berkeley Software Design, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * @(#)cdefs.h 8.8 (Berkeley) 1/9/95
+ * $FreeBSD$
+ */
+
+#ifndef _SYS_CDEFS_HH_
+#define _SYS_CDEFS_HH_
+
+#if defined(__cplusplus)
+#define __BEGIN_DECLS extern "C" {
+#define __END_DECLS }
+#else
+#define __BEGIN_DECLS
+#define __END_DECLS
+#endif
+
+/*
+ * This code has been put in place to help reduce the addition of
+ * compiler specific defines in FreeBSD code. It helps to aid in
+ * having a compiler-agnostic source tree.
+ */
+
+#if defined(__GNUC__) || defined(__INTEL_COMPILER)
+
+#if __GNUC__ >= 3 || defined(__INTEL_COMPILER)
+#define __GNUCLIKE_ASM 3
+#define __GNUCLIKE_MATH_BUILTIN_CONSTANTS
+#else
+#define __GNUCLIKE_ASM 2
+#endif
+#define __GNUCLIKE___TYPEOF 1
+#define __GNUCLIKE___OFFSETOF 1
+#define __GNUCLIKE___SECTION 1
+
+#define __GNUCLIKE_ATTRIBUTE_MODE_DI 1
+
+#ifndef __INTEL_COMPILER
+# define __GNUCLIKE_CTOR_SECTION_HANDLING 1
+#endif
+
+#define __GNUCLIKE_BUILTIN_CONSTANT_P 1
+# if defined(__INTEL_COMPILER) && defined(__cplusplus) \
+ && __INTEL_COMPILER < 800
+# undef __GNUCLIKE_BUILTIN_CONSTANT_P
+# endif
+
+#if (__GNUC_MINOR__ > 95 || __GNUC__ >= 3) && !defined(__INTEL_COMPILER)
+# define __GNUCLIKE_BUILTIN_VARARGS 1
+# define __GNUCLIKE_BUILTIN_STDARG 1
+# define __GNUCLIKE_BUILTIN_VAALIST 1
+#endif
+
+#if defined(__GNUC__)
+# define __GNUC_VA_LIST_COMPATIBILITY 1
+#endif
+
+#ifndef __INTEL_COMPILER
+# define __GNUCLIKE_BUILTIN_NEXT_ARG 1
+# define __GNUCLIKE_MATH_BUILTIN_RELOPS
+#endif
+
+#define __GNUCLIKE_BUILTIN_MEMCPY 1
+
+/* XXX: if __GNUC__ >= 2: not tested everywhere originally, where replaced */
+#define __CC_SUPPORTS_INLINE 1
+#define __CC_SUPPORTS___INLINE 1
+#define __CC_SUPPORTS___INLINE__ 1
+
+#define __CC_SUPPORTS___FUNC__ 1
+#define __CC_SUPPORTS_WARNING 1
+
+#define __CC_SUPPORTS_VARADIC_XXX 1 /* see varargs.h */
+
+#define __CC_SUPPORTS_DYNAMIC_ARRAY_INIT 1
+
+#endif /* __GNUC__ || __INTEL_COMPILER */
+
+/*
+ * Macro to test if we're using a specific version of gcc or later.
+ */
+#if defined(__GNUC__) && !defined(__INTEL_COMPILER)
+#define __GNUC_PREREQ__(ma, mi) \
+ (__GNUC__ > (ma) || __GNUC__ == (ma) && __GNUC_MINOR__ >= (mi))
+#else
+#define __GNUC_PREREQ__(ma, mi) 0
+#endif
+
+/*
+ * The __CONCAT macro is used to concatenate parts of symbol names, e.g.
+ * with "#define OLD(foo) __CONCAT(old,foo)", OLD(foo) produces oldfoo.
+ * The __CONCAT macro is a bit tricky to use if it must work in non-ANSI
+ * mode -- there must be no spaces between its arguments, and for nested
+ * __CONCAT's, all the __CONCAT's must be at the left. __CONCAT can also
+ * concatenate double-quoted strings produced by the __STRING macro, but
+ * this only works with ANSI C.
+ *
+ * __XSTRING is like __STRING, but it expands any macros in its argument
+ * first. It is only available with ANSI C.
+ */
+#if defined(__STDC__) || defined(__cplusplus)
+#define __P(protos) protos /* full-blown ANSI C */
+#define __CONCAT1(x,y) x ## y
+#define __CONCAT(x,y) __CONCAT1(x,y)
+#define __STRING(x) #x /* stringify without expanding x */
+#define __XSTRING(x) __STRING(x) /* expand x, then stringify */
+
+#define __const const /* define reserved names to standard */
+#define __signed signed
+#define __volatile volatile
+#if defined(__cplusplus)
+#define __inline inline /* convert to C++ keyword */
+#else
+#if !(defined(__CC_SUPPORTS___INLINE))
+#define __inline /* delete GCC keyword */
+#endif /* ! __CC_SUPPORTS___INLINE */
+#endif /* !__cplusplus */
+
+#else /* !(__STDC__ || __cplusplus) */
+#define __P(protos) () /* traditional C preprocessor */
+#define __CONCAT(x,y) x/**/y
+#define __STRING(x) "x"
+
+#if !defined(__CC_SUPPORTS___INLINE)
+#define __const /* delete pseudo-ANSI C keywords */
+#define __inline
+#define __signed
+#define __volatile
+/*
+ * In non-ANSI C environments, new programs will want ANSI-only C keywords
+ * deleted from the program and old programs will want them left alone.
+ * When using a compiler other than gcc, programs using the ANSI C keywords
+ * const, inline etc. as normal identifiers should define -DNO_ANSI_KEYWORDS.
+ * When using "gcc -traditional", we assume that this is the intent; if
+ * __GNUC__ is defined but __STDC__ is not, we leave the new keywords alone.
+ */
+#ifndef NO_ANSI_KEYWORDS
+#define const /* delete ANSI C keywords */
+#define inline
+#define signed
+#define volatile
+#endif /* !NO_ANSI_KEYWORDS */
+#endif /* !__CC_SUPPORTS___INLINE */
+#endif /* !(__STDC__ || __cplusplus) */
+
+/*
+ * Compiler-dependent macros to help declare dead (non-returning) and
+ * pure (no side effects) functions, and unused variables. They are
+ * null except for versions of gcc that are known to support the features
+ * properly (old versions of gcc-2 supported the dead and pure features
+ * in a different (wrong) way). If we do not provide an implementation
+ * for a given compiler, let the compile fail if it is told to use
+ * a feature that we cannot live without.
+ */
+#ifdef lint
+#define __dead2
+#define __pure2
+#define __unused
+#define __packed
+#define __aligned(x)
+#define __section(x)
+#else
+#if !__GNUC_PREREQ__(2, 5) && !defined(__INTEL_COMPILER)
+#define __dead2
+#define __pure2
+#define __unused
+#endif
+#if __GNUC__ == 2 && __GNUC_MINOR__ >= 5 && __GNUC_MINOR__ < 7 && !defined(__INTEL_COMPILER)
+#define __dead2 __attribute__((__noreturn__))
+#define __pure2 __attribute__((__const__))
+#define __unused
+/* XXX Find out what to do for __packed, __aligned and __section */
+#endif
+#if __GNUC_PREREQ__(2, 7)
+#define __dead2 __attribute__((__noreturn__))
+#define __pure2 __attribute__((__const__))
+#define __unused __attribute__((__unused__))
+#define __used __attribute__((__used__))
+#define __packed __attribute__((__packed__))
+#define __aligned(x) __attribute__((__aligned__(x)))
+#define __section(x) __attribute__((__section__(x)))
+#endif
+#if defined(__INTEL_COMPILER)
+#define __dead2 __attribute__((__noreturn__))
+#define __pure2 __attribute__((__const__))
+#define __unused __attribute__((__unused__))
+#define __used __attribute__((__used__))
+#define __packed __attribute__((__packed__))
+#define __aligned(x) __attribute__((__aligned__(x)))
+#define __section(x) __attribute__((__section__(x)))
+#endif
+#endif
+
+#if __GNUC_PREREQ__(2, 96)
+#define __malloc_like __attribute__((__malloc__))
+#define __pure __attribute__((__pure__))
+#else
+#define __malloc_like
+#define __pure
+#endif
+
+#if __GNUC_PREREQ__(3, 1) || (defined(__INTEL_COMPILER) && __INTEL_COMPILER >= 800)
+#define __always_inline __attribute__((__always_inline__))
+#else
+#define __always_inline
+#endif
+
+#if __GNUC_PREREQ__(4, 2) /* actually 4.1.3 */
+#define __gnu89_inline __attribute__((__gnu_inline__)) __inline
+#else
+#define __gnu89_inline
+#endif
+
+#if __GNUC_PREREQ__(3, 1)
+#define __noinline __attribute__ ((__noinline__))
+#else
+#define __noinline
+#endif
+
+#if __GNUC_PREREQ__(3, 3)
+#define __nonnull(x) __attribute__((__nonnull__(x)))
+#else
+#define __nonnull(x)
+#endif
+
+/* XXX: should use `#if __STDC_VERSION__ < 199901'. */
+#if !__GNUC_PREREQ__(2, 7) && !defined(__INTEL_COMPILER)
+#define __func__ NULL
+#endif
+
+#if (defined(__INTEL_COMPILER) || (defined(__GNUC__) && __GNUC__ >= 2)) && !defined(__STRICT_ANSI__) || __STDC_VERSION__ >= 199901
+#define __LONG_LONG_SUPPORTED
+#endif
+
+/*
+ * GCC 2.95 provides `__restrict' as an extension to C90 to support the
+ * C99-specific `restrict' type qualifier. We happen to use `__restrict' as
+ * a way to define the `restrict' type qualifier without disturbing older
+ * software that is unaware of C99 keywords.
+ */
+#if !(__GNUC__ == 2 && __GNUC_MINOR__ == 95)
+#if !defined(__STDC_VERSION__) || __STDC_VERSION__ < 199901 || defined(lint)
+#define __restrict
+#else
+#define __restrict restrict
+#endif
+#endif
+
+/*
+ * GNU C version 2.96 adds explicit branch prediction so that
+ * the CPU back-end can hint the processor and also so that
+ * code blocks can be reordered such that the predicted path
+ * sees a more linear flow, thus improving cache behavior, etc.
+ *
+ * The following two macros provide us with a way to utilize this
+ * compiler feature. Use __predict_true() if you expect the expression
+ * to evaluate to true, and __predict_false() if you expect the
+ * expression to evaluate to false.
+ *
+ * A few notes about usage:
+ *
+ * * Generally, __predict_false() error condition checks (unless
+ * you have some _strong_ reason to do otherwise, in which case
+ * document it), and/or __predict_true() `no-error' condition
+ * checks, assuming you want to optimize for the no-error case.
+ *
+ * * Other than that, if you don't know the likelihood of a test
+ * succeeding from empirical or other `hard' evidence, don't
+ * make predictions.
+ *
+ * * These are meant to be used in places that are run `a lot'.
+ * It is wasteful to make predictions in code that is run
+ * seldomly (e.g. at subsystem initialization time) as the
+ * basic block reordering that this affects can often generate
+ * larger code.
+ */
+#if __GNUC_PREREQ__(2, 96)
+#define __predict_true(exp) __builtin_expect((exp), 1)
+#define __predict_false(exp) __builtin_expect((exp), 0)
+#else
+#define __predict_true(exp) (exp)
+#define __predict_false(exp) (exp)
+#endif
+
+#if __GNUC_PREREQ__(4, 2)
+#define __hidden __attribute__((__visibility__("hidden")))
+#define __exported __attribute__((__visibility__("default")))
+#else
+#define __hidden
+#define __exported
+#endif
+
+/*
+ * We define this here since <stddef.h>, <sys/queue.h>, and <sys/types.h>
+ * require it.
+ */
+#if __GNUC_PREREQ__(4, 1)
+#define __offsetof(type, field) __builtin_offsetof(type, field)
+#else
+#ifndef __cplusplus
+#define __offsetof(type, field) ((size_t)(&((type *)0)->field))
+#else
+#define __offsetof(type, field) \
+ (__offsetof__ (reinterpret_cast <size_t> \
+ (&reinterpret_cast <const volatile char &> \
+ (static_cast<type *> (0)->field))))
+#endif
+#endif
+#define __rangeof(type, start, end) \
+ (__offsetof(type, end) - __offsetof(type, start))
+
+/*
+ * Compiler-dependent macros to declare that functions take printf-like
+ * or scanf-like arguments. They are null except for versions of gcc
+ * that are known to support the features properly (old versions of gcc-2
+ * didn't permit keeping the keywords out of the application namespace).
+ */
+#if !__GNUC_PREREQ__(2, 7) && !defined(__INTEL_COMPILER)
+#define __printflike(fmtarg, firstvararg)
+#define __scanflike(fmtarg, firstvararg)
+#define __format_arg(fmtarg)
+#else
+#define __printflike(fmtarg, firstvararg) \
+ __attribute__((__format__ (__printf__, fmtarg, firstvararg)))
+#define __scanflike(fmtarg, firstvararg) \
+ __attribute__((__format__ (__scanf__, fmtarg, firstvararg)))
+#define __format_arg(fmtarg) __attribute__((__format_arg__ (fmtarg)))
+#endif
+
+/* Compiler-dependent macros that rely on FreeBSD-specific extensions. */
+#if __FreeBSD_cc_version >= 300001 && defined(__GNUC__) && !defined(__INTEL_COMPILER)
+#define __printf0like(fmtarg, firstvararg) \
+ __attribute__((__format__ (__printf0__, fmtarg, firstvararg)))
+#else
+#define __printf0like(fmtarg, firstvararg)
+#endif
+
+#if defined(__GNUC__) || defined(__INTEL_COMPILER)
+#ifndef __INTEL_COMPILER
+#define __strong_reference(sym,aliassym) \
+ extern __typeof (sym) aliassym __attribute__ ((__alias__ (#sym)))
+#endif
+#ifdef __STDC__
+#define __weak_reference(sym,alias) \
+ __asm__(".weak " #alias); \
+ __asm__(".equ " #alias ", " #sym)
+#define __warn_references(sym,msg) \
+ __asm__(".section .gnu.warning." #sym); \
+ __asm__(".asciz \"" msg "\""); \
+ __asm__(".previous")
+#define __sym_compat(sym,impl,verid) \
+ __asm__(".symver " #impl ", " #sym "@" #verid)
+#define __sym_default(sym,impl,verid) \
+ __asm__(".symver " #impl ", " #sym "@@" #verid)
+#else
+#define __weak_reference(sym,alias) \
+ __asm__(".weak alias"); \
+ __asm__(".equ alias, sym")
+#define __warn_references(sym,msg) \
+ __asm__(".section .gnu.warning.sym"); \
+ __asm__(".asciz \"msg\""); \
+ __asm__(".previous")
+#define __sym_compat(sym,impl,verid) \
+ __asm__(".symver impl, sym@verid")
+#define __sym_default(impl,sym,verid) \
+ __asm__(".symver impl, sym@@verid")
+#endif /* __STDC__ */
+#endif /* __GNUC__ || __INTEL_COMPILER */
+
+#if defined(__GNUC__) || defined(__INTEL_COMPILER)
+#define __IDSTRING(name,string) __asm__(".ident\t\"" string "\"")
+#else
+/*
+ * The following definition might not work well if used in header files,
+ * but it should be better than nothing. If you want a "do nothing"
+ * version, then it should generate some harmless declaration, such as:
+ * #define __IDSTRING(name,string) struct __hack
+ */
+#define __IDSTRING(name,string) static const char name[] __unused = string
+#endif
+
+/*
+ * Embed the rcs id of a source file in the resulting library. Note that in
+ * more recent ELF binutils, we use .ident allowing the ID to be stripped.
+ * Usage:
+ * __FBSDID("$FreeBSD$");
+ */
+#ifndef __FBSDID
+#if !defined(lint) && !defined(STRIP_FBSDID)
+#define __FBSDID(s) __IDSTRING(__CONCAT(__rcsid_,__LINE__),s)
+#else
+#define __FBSDID(s) struct __hack
+#endif
+#endif
+
+#ifndef __RCSID
+#ifndef NO__RCSID
+#define __RCSID(s) __IDSTRING(__CONCAT(__rcsid_,__LINE__),s)
+#else
+#define __RCSID(s) struct __hack
+#endif
+#endif
+
+#ifndef __RCSID_SOURCE
+#ifndef NO__RCSID_SOURCE
+#define __RCSID_SOURCE(s) __IDSTRING(__CONCAT(__rcsid_source_,__LINE__),s)
+#else
+#define __RCSID_SOURCE(s) struct __hack
+#endif
+#endif
+
+#ifndef __SCCSID
+#ifndef NO__SCCSID
+#define __SCCSID(s) __IDSTRING(__CONCAT(__sccsid_,__LINE__),s)
+#else
+#define __SCCSID(s) struct __hack
+#endif
+#endif
+
+#ifndef __COPYRIGHT
+#ifndef NO__COPYRIGHT
+#define __COPYRIGHT(s) __IDSTRING(__CONCAT(__copyright_,__LINE__),s)
+#else
+#define __COPYRIGHT(s) struct __hack
+#endif
+#endif
+
+#ifndef __DECONST
+#define __DECONST(type, var) ((type)(uintptr_t)(const void *)(var))
+#endif
+
+#ifndef __DEVOLATILE
+#define __DEVOLATILE(type, var) ((type)(uintptr_t)(volatile void *)(var))
+#endif
+
+#ifndef __DEQUALIFY
+#define __DEQUALIFY(type, var) ((type)(uintptr_t)(const volatile void *)(var))
+#endif
+
+/*-
+ * The following definitions are an extension of the behavior originally
+ * implemented in <sys/_posix.h>, but with a different level of granularity.
+ * POSIX.1 requires that the macros we test be defined before any standard
+ * header file is included.
+ *
+ * Here's a quick run-down of the versions:
+ * defined(_POSIX_SOURCE) 1003.1-1988
+ * _POSIX_C_SOURCE == 1 1003.1-1990
+ * _POSIX_C_SOURCE == 2 1003.2-1992 C Language Binding Option
+ * _POSIX_C_SOURCE == 199309 1003.1b-1993
+ * _POSIX_C_SOURCE == 199506 1003.1c-1995, 1003.1i-1995,
+ * and the omnibus ISO/IEC 9945-1: 1996
+ * _POSIX_C_SOURCE == 200112 1003.1-2001
+ * _POSIX_C_SOURCE == 200809 1003.1-2008
+ *
+ * In addition, the X/Open Portability Guide, which is now the Single UNIX
+ * Specification, defines a feature-test macro which indicates the version of
+ * that specification, and which subsumes _POSIX_C_SOURCE.
+ *
+ * Our macros begin with two underscores to avoid namespace screwage.
+ */
+
+/* Deal with IEEE Std. 1003.1-1990, in which _POSIX_C_SOURCE == 1. */
+#if defined(_POSIX_C_SOURCE) && _POSIX_C_SOURCE == 1
+#undef _POSIX_C_SOURCE /* Probably illegal, but beyond caring now. */
+#define _POSIX_C_SOURCE 199009
+#endif
+
+/* Deal with IEEE Std. 1003.2-1992, in which _POSIX_C_SOURCE == 2. */
+#if defined(_POSIX_C_SOURCE) && _POSIX_C_SOURCE == 2
+#undef _POSIX_C_SOURCE
+#define _POSIX_C_SOURCE 199209
+#endif
+
+/* Deal with various X/Open Portability Guides and Single UNIX Spec. */
+#ifdef _XOPEN_SOURCE
+#if _XOPEN_SOURCE - 0 >= 700
+#define __XSI_VISIBLE 700
+#undef _POSIX_C_SOURCE
+#define _POSIX_C_SOURCE 200809
+#elif _XOPEN_SOURCE - 0 >= 600
+#define __XSI_VISIBLE 600
+#undef _POSIX_C_SOURCE
+#define _POSIX_C_SOURCE 200112
+#elif _XOPEN_SOURCE - 0 >= 500
+#define __XSI_VISIBLE 500
+#undef _POSIX_C_SOURCE
+#define _POSIX_C_SOURCE 199506
+#endif
+#endif
+
+/*
+ * Deal with all versions of POSIX. The ordering relative to the tests above is
+ * important.
+ */
+#if defined(_POSIX_SOURCE) && !defined(_POSIX_C_SOURCE)
+#define _POSIX_C_SOURCE 198808
+#endif
+#ifdef _POSIX_C_SOURCE
+#if _POSIX_C_SOURCE >= 200809
+#define __POSIX_VISIBLE 200809
+#define __ISO_C_VISIBLE 1999
+#elif _POSIX_C_SOURCE >= 200112
+#define __POSIX_VISIBLE 200112
+#define __ISO_C_VISIBLE 1999
+#elif _POSIX_C_SOURCE >= 199506
+#define __POSIX_VISIBLE 199506
+#define __ISO_C_VISIBLE 1990
+#elif _POSIX_C_SOURCE >= 199309
+#define __POSIX_VISIBLE 199309
+#define __ISO_C_VISIBLE 1990
+#elif _POSIX_C_SOURCE >= 199209
+#define __POSIX_VISIBLE 199209
+#define __ISO_C_VISIBLE 1990
+#elif _POSIX_C_SOURCE >= 199009
+#define __POSIX_VISIBLE 199009
+#define __ISO_C_VISIBLE 1990
+#else
+#define __POSIX_VISIBLE 198808
+#define __ISO_C_VISIBLE 0
+#endif /* _POSIX_C_SOURCE */
+#else
+/*-
+ * Deal with _ANSI_SOURCE:
+ * If it is defined, and no other compilation environment is explicitly
+ * requested, then define our internal feature-test macros to zero. This
+ * makes no difference to the preprocessor (undefined symbols in preprocessing
+ * expressions are defined to have value zero), but makes it more convenient for
+ * a test program to print out the values.
+ *
+ * If a program mistakenly defines _ANSI_SOURCE and some other macro such as
+ * _POSIX_C_SOURCE, we will assume that it wants the broader compilation
+ * environment (and in fact we will never get here).
+ */
+#if defined(_ANSI_SOURCE) /* Hide almost everything. */
+#define __POSIX_VISIBLE 0
+#define __XSI_VISIBLE 0
+#define __BSD_VISIBLE 0
+#define __ISO_C_VISIBLE 1990
+#elif defined(_C99_SOURCE) /* Localism to specify strict C99 env. */
+#define __POSIX_VISIBLE 0
+#define __XSI_VISIBLE 0
+#define __BSD_VISIBLE 0
+#define __ISO_C_VISIBLE 1999
+#else /* Default environment: show everything. */
+#define __POSIX_VISIBLE 200809
+#define __XSI_VISIBLE 700
+#define __BSD_VISIBLE 1
+#define __ISO_C_VISIBLE 1999
+#endif
+#endif
+
+#endif /* !_SYS_CDEFS_HH_ */
diff --git a/rtems/freebsd/sys/condvar.h b/rtems/freebsd/sys/condvar.h
new file mode 100644
index 00000000..e46070e3
--- /dev/null
+++ b/rtems/freebsd/sys/condvar.h
@@ -0,0 +1,87 @@
+/*-
+ * Copyright (c) 2000 Jake Burkholder <jake@freebsd.org>.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _SYS_CONDVAR_HH_
+#define _SYS_CONDVAR_HH_
+
+#ifndef LOCORE
+#include <rtems/freebsd/sys/queue.h>
+
+struct lock_object;
+struct thread;
+
+TAILQ_HEAD(cv_waitq, thread);
+
+/*
+ * Condition variable. The waiters count is protected by the mutex that
+ * protects the condition; that is, the mutex that is passed to cv_wait*()
+ * and is held across calls to cv_signal() and cv_broadcast(). It is an
+ * optimization to avoid looking up the sleep queue if there are no waiters.
+ */
+struct cv {
+#ifdef __rtems__
+ rtems_chain_node cv_node;
+ pthread_cond_t cv_id;
+#endif /* __rtems__ */
+ const char *cv_description;
+#ifndef __rtems__
+ int cv_waiters;
+#endif /* __rtems__ */
+};
+
+#ifdef _KERNEL
+void cv_init(struct cv *cvp, const char *desc);
+void cv_destroy(struct cv *cvp);
+
+void _cv_wait(struct cv *cvp, struct lock_object *lock);
+void _cv_wait_unlock(struct cv *cvp, struct lock_object *lock);
+int _cv_wait_sig(struct cv *cvp, struct lock_object *lock);
+int _cv_timedwait(struct cv *cvp, struct lock_object *lock, int timo);
+int _cv_timedwait_sig(struct cv *cvp, struct lock_object *lock, int timo);
+
+void cv_signal(struct cv *cvp);
+void cv_broadcastpri(struct cv *cvp, int pri);
+
+#define cv_wait(cvp, lock) \
+ _cv_wait((cvp), &(lock)->lock_object)
+#define cv_wait_unlock(cvp, lock) \
+ _cv_wait_unlock((cvp), &(lock)->lock_object)
+#define cv_wait_sig(cvp, lock) \
+ _cv_wait_sig((cvp), &(lock)->lock_object)
+#define cv_timedwait(cvp, lock, timo) \
+ _cv_timedwait((cvp), &(lock)->lock_object, (timo))
+#define cv_timedwait_sig(cvp, lock, timo) \
+ _cv_timedwait_sig((cvp), &(lock)->lock_object, (timo))
+
+#define cv_broadcast(cvp) cv_broadcastpri(cvp, 0)
+
+#define cv_wmesg(cvp) ((cvp)->cv_description)
+
+#endif /* _KERNEL */
+#endif /* !LOCORE */
+#endif /* _SYS_CONDVAR_HH_ */
diff --git a/rtems/freebsd/sys/conf.h b/rtems/freebsd/sys/conf.h
new file mode 100644
index 00000000..accc71be
--- /dev/null
+++ b/rtems/freebsd/sys/conf.h
@@ -0,0 +1,345 @@
+/*-
+ * Copyright (c) 1990, 1993
+ * The Regents of the University of California. All rights reserved.
+ * Copyright (c) 2000
+ * Poul-Henning Kamp. All rights reserved.
+ * (c) UNIX System Laboratories, Inc.
+ * All or some portions of this file are derived from material licensed
+ * to the University of California by American Telephone and Telegraph
+ * Co. or Unix System Laboratories, Inc. and are reproduced herein with
+ * the permission of UNIX System Laboratories, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * @(#)conf.h 8.5 (Berkeley) 1/9/95
+ * $FreeBSD$
+ */
+
+#ifndef _SYS_CONF_HH_
+#define _SYS_CONF_HH_
+
+#ifdef _KERNEL
+#include <rtems/freebsd/sys/eventhandler.h>
+#else
+#include <rtems/freebsd/sys/queue.h>
+#endif
+
+struct snapdata;
+struct devfs_dirent;
+struct cdevsw;
+struct file;
+
+struct cdev {
+ void *__si_reserved;
+ u_int si_flags;
+#define SI_ETERNAL 0x0001 /* never destroyed */
+#define SI_ALIAS 0x0002 /* carrier of alias name */
+#define SI_NAMED 0x0004 /* make_dev{_alias} has been called */
+#define SI_CHEAPCLONE 0x0008 /* can be removed_dev'ed when vnode reclaims */
+#define SI_CHILD 0x0010 /* child of another struct cdev **/
+#define SI_DEVOPEN 0x0020 /* opened by device */
+#define SI_CONSOPEN 0x0040 /* opened by console */
+#define SI_DUMPDEV 0x0080 /* is kernel dumpdev */
+#define SI_CANDELETE 0x0100 /* can do BIO_DELETE */
+#define SI_CLONELIST 0x0200 /* on a clone list */
+ struct timespec si_atime;
+ struct timespec si_ctime;
+ struct timespec si_mtime;
+ uid_t si_uid;
+ gid_t si_gid;
+ mode_t si_mode;
+ struct ucred *si_cred; /* cached clone-time credential */
+ int si_drv0;
+ int si_refcount;
+ LIST_ENTRY(cdev) si_list;
+ LIST_ENTRY(cdev) si_clone;
+ LIST_HEAD(, cdev) si_children;
+ LIST_ENTRY(cdev) si_siblings;
+ struct cdev *si_parent;
+ char *si_name;
+ void *si_drv1, *si_drv2;
+ struct cdevsw *si_devsw;
+ int si_iosize_max; /* maximum I/O size (for physio &al) */
+ u_long si_usecount;
+ u_long si_threadcount;
+ union {
+ struct snapdata *__sid_snapdata;
+ } __si_u;
+ char __si_namebuf[SPECNAMELEN + 1];
+};
+
+#define si_snapdata __si_u.__sid_snapdata
+
+#ifdef _KERNEL
+
+/*
+ * Definitions of device driver entry switches
+ */
+
+struct bio;
+struct buf;
+struct thread;
+struct uio;
+struct knote;
+struct clonedevs;
+struct vm_object;
+struct vnode;
+
+/*
+ * Note: d_thread_t is provided as a transition aid for those drivers
+ * that treat struct proc/struct thread as an opaque data type and
+ * exist in substantially the same form in both 4.x and 5.x. Writers
+ * of drivers that dips into the d_thread_t structure should use
+ * struct thread or struct proc as appropriate for the version of the
+ * OS they are using. It is provided in lieu of each device driver
+ * inventing its own way of doing this. While it does violate style(9)
+ * in a number of ways, this violation is deemed to be less
+ * important than the benefits that a uniform API between releases
+ * gives.
+ *
+ * Users of struct thread/struct proc that aren't device drivers should
+ * not use d_thread_t.
+ */
+
+typedef struct thread d_thread_t;
+
+typedef int d_open_t(struct cdev *dev, int oflags, int devtype, struct thread *td);
+typedef int d_fdopen_t(struct cdev *dev, int oflags, struct thread *td, struct file *fp);
+typedef int d_close_t(struct cdev *dev, int fflag, int devtype, struct thread *td);
+typedef void d_strategy_t(struct bio *bp);
+typedef int d_ioctl_t(struct cdev *dev, u_long cmd, caddr_t data,
+ int fflag, struct thread *td);
+
+typedef int d_read_t(struct cdev *dev, struct uio *uio, int ioflag);
+typedef int d_write_t(struct cdev *dev, struct uio *uio, int ioflag);
+typedef int d_poll_t(struct cdev *dev, int events, struct thread *td);
+typedef int d_kqfilter_t(struct cdev *dev, struct knote *kn);
+typedef int d_mmap_t(struct cdev *dev, vm_offset_t offset, vm_paddr_t *paddr,
+ int nprot);
+typedef int d_mmap2_t(struct cdev *dev, vm_offset_t offset, vm_paddr_t *paddr,
+ int nprot, vm_memattr_t *memattr);
+typedef int d_mmap_single_t(struct cdev *cdev, vm_ooffset_t *offset,
+ vm_size_t size, struct vm_object **object, int nprot);
+typedef void d_purge_t(struct cdev *dev);
+
+typedef int dumper_t(
+ void *_priv, /* Private to the driver. */
+ void *_virtual, /* Virtual (mapped) address. */
+ vm_offset_t _physical, /* Physical address of virtual. */
+ off_t _offset, /* Byte-offset to write at. */
+ size_t _length); /* Number of bytes to dump. */
+
+#endif /* _KERNEL */
+
+/*
+ * Types for d_flags.
+ */
+#define D_TAPE 0x0001
+#define D_DISK 0x0002
+#define D_TTY 0x0004
+#define D_MEM 0x0008
+
+#ifdef _KERNEL
+
+#define D_TYPEMASK 0xffff
+
+/*
+ * Flags for d_flags which the drivers can set.
+ */
+#define D_TRACKCLOSE 0x00080000 /* track all closes */
+#define D_MMAP_ANON 0x00100000 /* special treatment in vm_mmap.c */
+#define D_PSEUDO 0x00200000 /* make_dev() can return NULL */
+#define D_NEEDGIANT 0x00400000 /* driver want Giant */
+#define D_NEEDMINOR 0x00800000 /* driver uses clone_create() */
+#define D_MMAP2 0x01000000 /* driver uses d_mmap2() */
+
+/*
+ * Version numbers.
+ */
+#define D_VERSION_00 0x20011966
+#define D_VERSION_01 0x17032005 /* Add d_uid,gid,mode & kind */
+#define D_VERSION_02 0x28042009 /* Add d_mmap_single */
+#define D_VERSION D_VERSION_02
+
+/*
+ * Flags used for internal housekeeping
+ */
+#define D_INIT 0x80000000 /* cdevsw initialized */
+
+/*
+ * Character device switch table
+ */
+struct cdevsw {
+ int d_version;
+ u_int d_flags;
+ const char *d_name;
+ d_open_t *d_open;
+ d_fdopen_t *d_fdopen;
+ d_close_t *d_close;
+ d_read_t *d_read;
+ d_write_t *d_write;
+ d_ioctl_t *d_ioctl;
+ d_poll_t *d_poll;
+ union {
+ d_mmap_t *old;
+ d_mmap2_t *new;
+ } __d_mmap;
+ d_strategy_t *d_strategy;
+ dumper_t *d_dump;
+ d_kqfilter_t *d_kqfilter;
+ d_purge_t *d_purge;
+ d_mmap_single_t *d_mmap_single;
+ uid_t d_uid;
+ gid_t d_gid;
+ mode_t d_mode;
+ const char *d_kind;
+
+ /* These fields should not be messed with by drivers */
+ LIST_ENTRY(cdevsw) d_list;
+ LIST_HEAD(, cdev) d_devs;
+ int d_spare3;
+ union {
+ struct cdevsw *gianttrick;
+ SLIST_ENTRY(cdevsw) postfree_list;
+ } __d_giant;
+};
+#define d_mmap __d_mmap.old
+#define d_mmap2 __d_mmap.new
+#define d_gianttrick __d_giant.gianttrick
+#define d_postfree_list __d_giant.postfree_list
+
+struct module;
+
+struct devsw_module_data {
+ int (*chainevh)(struct module *, int, void *); /* next handler */
+ void *chainarg; /* arg for next event handler */
+ /* Do not initialize fields hereafter */
+};
+
+#define DEV_MODULE(name, evh, arg) \
+static moduledata_t name##_mod = { \
+ #name, \
+ evh, \
+ arg \
+}; \
+DECLARE_MODULE(name, name##_mod, SI_SUB_DRIVERS, SI_ORDER_MIDDLE)
+
+
+void clone_setup(struct clonedevs **cdp);
+void clone_cleanup(struct clonedevs **);
+#define CLONE_UNITMASK 0xfffff
+#define CLONE_FLAG0 (CLONE_UNITMASK + 1)
+int clone_create(struct clonedevs **, struct cdevsw *, int *unit, struct cdev **dev, int extra);
+
+int count_dev(struct cdev *_dev);
+void destroy_dev(struct cdev *_dev);
+int destroy_dev_sched(struct cdev *dev);
+int destroy_dev_sched_cb(struct cdev *dev, void (*cb)(void *), void *arg);
+void destroy_dev_drain(struct cdevsw *csw);
+void drain_dev_clone_events(void);
+struct cdevsw *dev_refthread(struct cdev *_dev, int *_ref);
+struct cdevsw *devvn_refthread(struct vnode *vp, struct cdev **devp, int *_ref);
+void dev_relthread(struct cdev *_dev, int _ref);
+void dev_depends(struct cdev *_pdev, struct cdev *_cdev);
+void dev_ref(struct cdev *dev);
+void dev_refl(struct cdev *dev);
+void dev_rel(struct cdev *dev);
+void dev_strategy(struct cdev *dev, struct buf *bp);
+struct cdev *make_dev(struct cdevsw *_devsw, int _unit, uid_t _uid, gid_t _gid,
+ int _perms, const char *_fmt, ...) __printflike(6, 7);
+struct cdev *make_dev_cred(struct cdevsw *_devsw, int _unit,
+ struct ucred *_cr, uid_t _uid, gid_t _gid, int _perms,
+ const char *_fmt, ...) __printflike(7, 8);
+#define MAKEDEV_REF 0x01
+#define MAKEDEV_WHTOUT 0x02
+#define MAKEDEV_NOWAIT 0x04
+#define MAKEDEV_WAITOK 0x08
+#define MAKEDEV_ETERNAL 0x10
+struct cdev *make_dev_credf(int _flags,
+ struct cdevsw *_devsw, int _unit,
+ struct ucred *_cr, uid_t _uid, gid_t _gid, int _mode,
+ const char *_fmt, ...) __printflike(8, 9);
+int make_dev_p(int _flags, struct cdev **_cdev, struct cdevsw *_devsw,
+ struct ucred *_cr, uid_t _uid, gid_t _gid, int _mode,
+ const char *_fmt, ...) __printflike(8, 9);
+struct cdev *make_dev_alias(struct cdev *_pdev, const char *_fmt, ...)
+ __printflike(2, 3);
+void dev_lock(void);
+void dev_unlock(void);
+void setconf(void);
+
+#ifdef KLD_MODULE
+#define MAKEDEV_ETERNAL_KLD 0
+#else
+#define MAKEDEV_ETERNAL_KLD MAKEDEV_ETERNAL
+#endif
+
+#define dev2unit(d) ((d)->si_drv0)
+
+typedef void (*cdevpriv_dtr_t)(void *data);
+int devfs_get_cdevpriv(void **datap);
+int devfs_set_cdevpriv(void *priv, cdevpriv_dtr_t dtr);
+void devfs_clear_cdevpriv(void);
+void devfs_fpdrop(struct file *fp); /* XXX This is not public KPI */
+
+#define UID_ROOT 0
+#define UID_BIN 3
+#define UID_UUCP 66
+#define UID_NOBODY 65534
+
+#define GID_WHEEL 0
+#define GID_KMEM 2
+#define GID_TTY 4
+#define GID_OPERATOR 5
+#define GID_BIN 7
+#define GID_GAMES 13
+#define GID_DIALER 68
+#define GID_NOBODY 65534
+
+typedef void (*dev_clone_fn)(void *arg, struct ucred *cred, char *name,
+ int namelen, struct cdev **result);
+
+int dev_stdclone(char *_name, char **_namep, const char *_stem, int *_unit);
+EVENTHANDLER_DECLARE(dev_clone, dev_clone_fn);
+
+/* Stuff relating to kernel-dump */
+
+struct dumperinfo {
+ dumper_t *dumper; /* Dumping function. */
+ void *priv; /* Private parts. */
+ u_int blocksize; /* Size of block in bytes. */
+ u_int maxiosize; /* Max size allowed for an individual I/O */
+ off_t mediaoffset; /* Initial offset in bytes. */
+ off_t mediasize; /* Space available in bytes. */
+};
+
+int set_dumper(struct dumperinfo *);
+int dump_write(struct dumperinfo *, void *, vm_offset_t, off_t, size_t);
+void dumpsys(struct dumperinfo *);
+extern int dumping; /* system is dumping */
+
+#endif /* _KERNEL */
+
+#endif /* !_SYS_CONF_HH_ */
diff --git a/rtems/freebsd/sys/copyright.h b/rtems/freebsd/sys/copyright.h
new file mode 100644
index 00000000..936ffd88
--- /dev/null
+++ b/rtems/freebsd/sys/copyright.h
@@ -0,0 +1 @@
+/* EMPTY */
diff --git a/rtems/freebsd/sys/cpu.h b/rtems/freebsd/sys/cpu.h
new file mode 100644
index 00000000..22dc659a
--- /dev/null
+++ b/rtems/freebsd/sys/cpu.h
@@ -0,0 +1,173 @@
+/*-
+ * Copyright (c) 2005-2007 Nate Lawson (SDG)
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _SYS_CPU_HH_
+#define _SYS_CPU_HH_
+
+#include <rtems/freebsd/sys/eventhandler.h>
+
+/*
+ * CPU device support.
+ */
+
+#define CPU_IVAR_PCPU 1
+#define CPU_IVAR_NOMINAL_MHZ 2
+
+static __inline struct pcpu *cpu_get_pcpu(device_t dev)
+{
+ uintptr_t v = 0;
+ BUS_READ_IVAR(device_get_parent(dev), dev, CPU_IVAR_PCPU, &v);
+ return ((struct pcpu *)v);
+}
+
+static __inline int32_t cpu_get_nominal_mhz(device_t dev)
+{
+ uintptr_t v = 0;
+ if (BUS_READ_IVAR(device_get_parent(dev), dev,
+ CPU_IVAR_NOMINAL_MHZ, &v) != 0)
+ return (-1);
+ return ((int32_t)v);
+}
+
+/*
+ * CPU frequency control interface.
+ */
+
+/* Each driver's CPU frequency setting is exported in this format. */
+struct cf_setting {
+ int freq; /* CPU clock in Mhz or 100ths of a percent. */
+ int volts; /* Voltage in mV. */
+ int power; /* Power consumed in mW. */
+ int lat; /* Transition latency in us. */
+ device_t dev; /* Driver providing this setting. */
+ int spec[4];/* Driver-specific storage for non-standard info. */
+};
+
+/* Maximum number of settings a given driver can have. */
+#define MAX_SETTINGS 24
+
+/* A combination of settings is a level. */
+struct cf_level {
+ struct cf_setting total_set;
+ struct cf_setting abs_set;
+ struct cf_setting rel_set[MAX_SETTINGS];
+ int rel_count;
+ TAILQ_ENTRY(cf_level) link;
+};
+
+TAILQ_HEAD(cf_level_lst, cf_level);
+
+/* Drivers should set all unknown values to this. */
+#define CPUFREQ_VAL_UNKNOWN (-1)
+
+/*
+ * Every driver offers a type of CPU control. Absolute levels are mutually
+ * exclusive while relative levels modify the current absolute level. There
+ * may be multiple absolute and relative drivers available on a given
+ * system.
+ *
+ * For example, consider a system with two absolute drivers that provide
+ * frequency settings of 100, 200 and 300, 400 and a relative driver that
+ * provides settings of 50%, 100%. The cpufreq core would export frequency
+ * levels of 50, 100, 150, 200, 300, 400.
+ *
+ * The "info only" flag signifies that settings returned by
+ * CPUFREQ_DRV_SETTINGS cannot be passed to the CPUFREQ_DRV_SET method and
+ * are only informational. This is for some drivers that can return
+ * information about settings but rely on another machine-dependent driver
+ * for actually performing the frequency transition (e.g., ACPI performance
+ * states of type "functional fixed hardware.")
+ */
+#define CPUFREQ_TYPE_MASK 0xffff
+#define CPUFREQ_TYPE_RELATIVE (1<<0)
+#define CPUFREQ_TYPE_ABSOLUTE (1<<1)
+#define CPUFREQ_FLAG_INFO_ONLY (1<<16)
+
+/*
+ * When setting a level, the caller indicates the priority of this request.
+ * Priorities determine, among other things, whether a level can be
+ * overridden by other callers. For example, if the user sets a level but
+ * the system thermal driver needs to override it for emergency cooling,
+ * the driver would use a higher priority. Once the event has passed, the
+ * driver would call cpufreq to resume any previous level.
+ */
+#define CPUFREQ_PRIO_HIGHEST 1000000
+#define CPUFREQ_PRIO_KERN 1000
+#define CPUFREQ_PRIO_USER 100
+#define CPUFREQ_PRIO_LOWEST 0
+
+/*
+ * Register and unregister a driver with the cpufreq core. Once a driver
+ * is registered, it must support calls to its CPUFREQ_GET, CPUFREQ_GET_LEVEL,
+ * and CPUFREQ_SET methods. It must also unregister before returning from
+ * its DEVICE_DETACH method.
+ */
+int cpufreq_register(device_t dev);
+int cpufreq_unregister(device_t dev);
+
+/*
+ * Notify the cpufreq core that the number of or values for settings have
+ * changed.
+ */
+int cpufreq_settings_changed(device_t dev);
+
+/*
+ * Eventhandlers that are called before and after a change in frequency.
+ * The new level and the result of the change (0 is success) is passed in.
+ * If the driver wishes to revoke the change from cpufreq_pre_change, it
+ * stores a non-zero error code in the result parameter and the change will
+ * not be made. If the post-change eventhandler gets a non-zero result,
+ * no change was made and the previous level remains in effect. If a change
+ * is revoked, the post-change eventhandler is still called with the error
+ * value supplied by the revoking driver. This gives listeners who cached
+ * some data in preparation for a level change a chance to clean up.
+ */
+typedef void (*cpufreq_pre_notify_fn)(void *, const struct cf_level *, int *);
+typedef void (*cpufreq_post_notify_fn)(void *, const struct cf_level *, int);
+EVENTHANDLER_DECLARE(cpufreq_pre_change, cpufreq_pre_notify_fn);
+EVENTHANDLER_DECLARE(cpufreq_post_change, cpufreq_post_notify_fn);
+
+/*
+ * Eventhandler called when the available list of levels changed.
+ * The unit number of the device (i.e. "cpufreq0") whose levels changed
+ * is provided so the listener can retrieve the new list of levels.
+ */
+typedef void (*cpufreq_levels_notify_fn)(void *, int);
+EVENTHANDLER_DECLARE(cpufreq_levels_changed, cpufreq_levels_notify_fn);
+
+/* Allow values to be +/- a bit since sometimes we have to estimate. */
+#define CPUFREQ_CMP(x, y) (abs((x) - (y)) < 25)
+
+/*
+ * Machine-dependent functions.
+ */
+
+/* Estimate the current clock rate for the given CPU id. */
+int cpu_est_clockrate(int cpu_id, uint64_t *rate);
+
+#endif /* !_SYS_CPU_HH_ */
diff --git a/rtems/freebsd/sys/cpuset.h b/rtems/freebsd/sys/cpuset.h
new file mode 100644
index 00000000..936ffd88
--- /dev/null
+++ b/rtems/freebsd/sys/cpuset.h
@@ -0,0 +1 @@
+/* EMPTY */
diff --git a/rtems/freebsd/sys/ctype.h b/rtems/freebsd/sys/ctype.h
new file mode 100644
index 00000000..b7b4ad5a
--- /dev/null
+++ b/rtems/freebsd/sys/ctype.h
@@ -0,0 +1,57 @@
+/*-
+ * Copyright (c) 1982, 1988, 1991, 1993
+ * The Regents of the University of California. All rights reserved.
+ * (c) UNIX System Laboratories, Inc.
+ * All or some portions of this file are derived from material licensed
+ * to the University of California by American Telephone and Telegraph
+ * Co. or Unix System Laboratories, Inc. and are reproduced herein with
+ * the permission of UNIX System Laboratories, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _SYS_CTYPE_HH_
+#define _SYS_CTYPE_HH_
+
+#ifdef _KERNEL
+
+#define isspace(c) ((c) == ' ' || ((c) >= '\t' && (c) <= '\r'))
+#define isascii(c) (((c) & ~0x7f) == 0)
+#define isupper(c) ((c) >= 'A' && (c) <= 'Z')
+#define islower(c) ((c) >= 'a' && (c) <= 'z')
+#define isalpha(c) (isupper(c) || islower(c))
+#define isdigit(c) ((c) >= '0' && (c) <= '9')
+#define isxdigit(c) (isdigit(c) \
+ || ((c) >= 'A' && (c) <= 'F') \
+ || ((c) >= 'a' && (c) <= 'f'))
+#define isprint(c) ((c) >= ' ' && (c) <= '~')
+
+#define toupper(c) ((c) - 0x20 * (((c) >= 'a') && ((c) <= 'z')))
+#define tolower(c) ((c) + 0x20 * (((c) >= 'A') && ((c) <= 'Z')))
+
+#endif
+#endif /* !_SYS_CTYPE_HH_ */
diff --git a/rtems/freebsd/sys/domain.h b/rtems/freebsd/sys/domain.h
new file mode 100644
index 00000000..0b44e9fd
--- /dev/null
+++ b/rtems/freebsd/sys/domain.h
@@ -0,0 +1,106 @@
+/*-
+ * Copyright (c) 1982, 1986, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * @(#)domain.h 8.1 (Berkeley) 6/2/93
+ * $FreeBSD$
+ */
+
+#ifndef _SYS_DOMAIN_HH_
+#define _SYS_DOMAIN_HH_
+
+/*
+ * Structure per communications domain.
+ */
+
+/*
+ * Forward structure declarations for function prototypes [sic].
+ */
+struct mbuf;
+struct ifnet;
+
+struct domain {
+ int dom_family; /* AF_xxx */
+ char *dom_name;
+ void (*dom_init) /* initialize domain data structures */
+ (void);
+ void (*dom_destroy) /* cleanup structures / state */
+ (void);
+ int (*dom_externalize) /* externalize access rights */
+ (struct mbuf *, struct mbuf **);
+ void (*dom_dispose) /* dispose of internalized rights */
+ (struct mbuf *);
+ struct protosw *dom_protosw, *dom_protoswNPROTOSW;
+ struct domain *dom_next;
+ int (*dom_rtattach) /* initialize routing table */
+ (void **, int);
+ int (*dom_rtdetach) /* clean up routing table */
+ (void **, int);
+ int dom_rtoffset; /* an arg to rtattach, in bits */
+ /* XXX MRT.
+ * rtoffset May be 0 if the domain supplies its own rtattach(),
+ * in which case, a 0 indicates it's being called from
+ * vfs_export.c (HACK) Only for AF_INET{,6} at this time.
+ * Temporary ABI compat hack.. fix post RELENG_7
+ */
+ int dom_maxrtkey; /* for routing layer */
+ void *(*dom_ifattach)(struct ifnet *);
+ void (*dom_ifdetach)(struct ifnet *, void *);
+ /* af-dependent data on ifnet */
+};
+
+#ifdef _KERNEL
+extern int domain_init_status;
+extern struct domain *domains;
+void domain_add(void *);
+void domain_init(void *);
+#ifdef VIMAGE
+void vnet_domain_init(void *);
+void vnet_domain_uninit(void *);
+#endif
+
+#define DOMAIN_SET(name) \
+ SYSINIT(domain_add_ ## name, SI_SUB_PROTO_DOMAIN, \
+ SI_ORDER_FIRST, domain_add, & name ## domain); \
+ SYSINIT(domain_init_ ## name, SI_SUB_PROTO_DOMAIN, \
+ SI_ORDER_SECOND, domain_init, & name ## domain);
+#ifdef VIMAGE
+#define VNET_DOMAIN_SET(name) \
+ SYSINIT(domain_add_ ## name, SI_SUB_PROTO_DOMAIN, \
+ SI_ORDER_FIRST, domain_add, & name ## domain); \
+ VNET_SYSINIT(vnet_domain_init_ ## name, SI_SUB_PROTO_DOMAIN, \
+ SI_ORDER_SECOND, vnet_domain_init, & name ## domain); \
+ VNET_SYSUNINIT(vnet_domain_uninit_ ## name, \
+ SI_SUB_PROTO_DOMAIN, SI_ORDER_SECOND, vnet_domain_uninit, \
+ & name ## domain)
+#else /* !VIMAGE */
+#define VNET_DOMAIN_SET(name) DOMAIN_SET(name)
+#endif /* VIMAGE */
+
+#endif /* _KERNEL */
+
+#endif /* !_SYS_DOMAIN_HH_ */
diff --git a/rtems/freebsd/sys/endian.h b/rtems/freebsd/sys/endian.h
new file mode 100644
index 00000000..fb6bc69f
--- /dev/null
+++ b/rtems/freebsd/sys/endian.h
@@ -0,0 +1,200 @@
+/*-
+ * Copyright (c) 2002 Thomas Moestl <tmm@FreeBSD.org>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _SYS_ENDIAN_HH_
+#define _SYS_ENDIAN_HH_
+
+#include <rtems/freebsd/sys/cdefs.h>
+#include <rtems/freebsd/sys/_types.h>
+#include <rtems/freebsd/machine/endian.h>
+
+#ifndef _UINT16_T_DECLARED
+typedef __uint16_t uint16_t;
+#define _UINT16_T_DECLARED
+#endif
+
+#ifndef _UINT32_T_DECLARED
+typedef __uint32_t uint32_t;
+#define _UINT32_T_DECLARED
+#endif
+
+#ifndef _UINT64_T_DECLARED
+typedef __uint64_t uint64_t;
+#define _UINT64_T_DECLARED
+#endif
+
+/*
+ * General byte order swapping functions.
+ */
+#define bswap16(x) __bswap16(x)
+#define bswap32(x) __bswap32(x)
+#define bswap64(x) __bswap64(x)
+
+/*
+ * Host to big endian, host to little endian, big endian to host, and little
+ * endian to host byte order functions as detailed in byteorder(9).
+ */
+#if _BYTE_ORDER == _LITTLE_ENDIAN
+#define htobe16(x) bswap16((x))
+#define htobe32(x) bswap32((x))
+#define htobe64(x) bswap64((x))
+#define htole16(x) ((uint16_t)(x))
+#define htole32(x) ((uint32_t)(x))
+#define htole64(x) ((uint64_t)(x))
+
+#define be16toh(x) bswap16((x))
+#define be32toh(x) bswap32((x))
+#define be64toh(x) bswap64((x))
+#define le16toh(x) ((uint16_t)(x))
+#define le32toh(x) ((uint32_t)(x))
+#define le64toh(x) ((uint64_t)(x))
+#else /* _BYTE_ORDER != _LITTLE_ENDIAN */
+#define htobe16(x) ((uint16_t)(x))
+#define htobe32(x) ((uint32_t)(x))
+#define htobe64(x) ((uint64_t)(x))
+#define htole16(x) bswap16((x))
+#define htole32(x) bswap32((x))
+#define htole64(x) bswap64((x))
+
+#define be16toh(x) ((uint16_t)(x))
+#define be32toh(x) ((uint32_t)(x))
+#define be64toh(x) ((uint64_t)(x))
+#define le16toh(x) bswap16((x))
+#define le32toh(x) bswap32((x))
+#define le64toh(x) bswap64((x))
+#endif /* _BYTE_ORDER == _LITTLE_ENDIAN */
+
+/* Alignment-agnostic encode/decode bytestream to/from little/big endian. */
+
+static __inline uint16_t
+be16dec(const void *pp)
+{
+ unsigned char const *p = (unsigned char const *)pp;
+
+ return ((p[0] << 8) | p[1]);
+}
+
+static __inline uint32_t
+be32dec(const void *pp)
+{
+ unsigned char const *p = (unsigned char const *)pp;
+
+ return ((p[0] << 24) | (p[1] << 16) | (p[2] << 8) | p[3]);
+}
+
+static __inline uint64_t
+be64dec(const void *pp)
+{
+ unsigned char const *p = (unsigned char const *)pp;
+
+ return (((uint64_t)be32dec(p) << 32) | be32dec(p + 4));
+}
+
+static __inline uint16_t
+le16dec(const void *pp)
+{
+ unsigned char const *p = (unsigned char const *)pp;
+
+ return ((p[1] << 8) | p[0]);
+}
+
+static __inline uint32_t
+le32dec(const void *pp)
+{
+ unsigned char const *p = (unsigned char const *)pp;
+
+ return ((p[3] << 24) | (p[2] << 16) | (p[1] << 8) | p[0]);
+}
+
+static __inline uint64_t
+le64dec(const void *pp)
+{
+ unsigned char const *p = (unsigned char const *)pp;
+
+ return (((uint64_t)le32dec(p + 4) << 32) | le32dec(p));
+}
+
+static __inline void
+be16enc(void *pp, uint16_t u)
+{
+ unsigned char *p = (unsigned char *)pp;
+
+ p[0] = (u >> 8) & 0xff;
+ p[1] = u & 0xff;
+}
+
+static __inline void
+be32enc(void *pp, uint32_t u)
+{
+ unsigned char *p = (unsigned char *)pp;
+
+ p[0] = (u >> 24) & 0xff;
+ p[1] = (u >> 16) & 0xff;
+ p[2] = (u >> 8) & 0xff;
+ p[3] = u & 0xff;
+}
+
+static __inline void
+be64enc(void *pp, uint64_t u)
+{
+ unsigned char *p = (unsigned char *)pp;
+
+ be32enc(p, u >> 32);
+ be32enc(p + 4, u & 0xffffffff);
+}
+
+static __inline void
+le16enc(void *pp, uint16_t u)
+{
+ unsigned char *p = (unsigned char *)pp;
+
+ p[0] = u & 0xff;
+ p[1] = (u >> 8) & 0xff;
+}
+
+static __inline void
+le32enc(void *pp, uint32_t u)
+{
+ unsigned char *p = (unsigned char *)pp;
+
+ p[0] = u & 0xff;
+ p[1] = (u >> 8) & 0xff;
+ p[2] = (u >> 16) & 0xff;
+ p[3] = (u >> 24) & 0xff;
+}
+
+static __inline void
+le64enc(void *pp, uint64_t u)
+{
+ unsigned char *p = (unsigned char *)pp;
+
+ le32enc(p, u & 0xffffffff);
+ le32enc(p + 4, u >> 32);
+}
+
+#endif /* _SYS_ENDIAN_HH_ */
diff --git a/rtems/freebsd/sys/errno.h b/rtems/freebsd/sys/errno.h
new file mode 100644
index 00000000..591bcc23
--- /dev/null
+++ b/rtems/freebsd/sys/errno.h
@@ -0,0 +1,192 @@
+/*-
+ * Copyright (c) 1982, 1986, 1989, 1993
+ * The Regents of the University of California. All rights reserved.
+ * (c) UNIX System Laboratories, Inc.
+ * All or some portions of this file are derived from material licensed
+ * to the University of California by American Telephone and Telegraph
+ * Co. or Unix System Laboratories, Inc. and are reproduced herein with
+ * the permission of UNIX System Laboratories, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * @(#)errno.h 8.5 (Berkeley) 1/21/94
+ * $FreeBSD$
+ */
+
+#ifndef _SYS_ERRNO_HH_
+#define _SYS_ERRNO_HH_
+
+#ifndef _KERNEL
+#include <rtems/freebsd/sys/cdefs.h>
+__BEGIN_DECLS
+int * __error(void);
+__END_DECLS
+#define errno (* __error())
+#endif
+
+#define EPERM 1 /* Operation not permitted */
+#define ENOENT 2 /* No such file or directory */
+#define ESRCH 3 /* No such process */
+#define EINTR 4 /* Interrupted system call */
+#define EIO 5 /* Input/output error */
+#define ENXIO 6 /* Device not configured */
+#define E2BIG 7 /* Argument list too long */
+#define ENOEXEC 8 /* Exec format error */
+#define EBADF 9 /* Bad file descriptor */
+#define ECHILD 10 /* No child processes */
+#define EDEADLK 11 /* Resource deadlock avoided */
+ /* 11 was EAGAIN */
+#define ENOMEM 12 /* Cannot allocate memory */
+#define EACCES 13 /* Permission denied */
+#define EFAULT 14 /* Bad address */
+#ifndef _POSIX_SOURCE
+#define ENOTBLK 15 /* Block device required */
+#endif
+#define EBUSY 16 /* Device busy */
+#define EEXIST 17 /* File exists */
+#define EXDEV 18 /* Cross-device link */
+#define ENODEV 19 /* Operation not supported by device */
+#define ENOTDIR 20 /* Not a directory */
+#define EISDIR 21 /* Is a directory */
+#define EINVAL 22 /* Invalid argument */
+#define ENFILE 23 /* Too many open files in system */
+#define EMFILE 24 /* Too many open files */
+#define ENOTTY 25 /* Inappropriate ioctl for device */
+#ifndef _POSIX_SOURCE
+#define ETXTBSY 26 /* Text file busy */
+#endif
+#define EFBIG 27 /* File too large */
+#define ENOSPC 28 /* No space left on device */
+#define ESPIPE 29 /* Illegal seek */
+#define EROFS 30 /* Read-only filesystem */
+#define EMLINK 31 /* Too many links */
+#define EPIPE 32 /* Broken pipe */
+
+/* math software */
+#define EDOM 33 /* Numerical argument out of domain */
+#define ERANGE 34 /* Result too large */
+
+/* non-blocking and interrupt i/o */
+#define EAGAIN 35 /* Resource temporarily unavailable */
+#ifndef _POSIX_SOURCE
+#define EWOULDBLOCK EAGAIN /* Operation would block */
+#define EINPROGRESS 36 /* Operation now in progress */
+#define EALREADY 37 /* Operation already in progress */
+
+/* ipc/network software -- argument errors */
+#define ENOTSOCK 38 /* Socket operation on non-socket */
+#define EDESTADDRREQ 39 /* Destination address required */
+#define EMSGSIZE 40 /* Message too long */
+#define EPROTOTYPE 41 /* Protocol wrong type for socket */
+#define ENOPROTOOPT 42 /* Protocol not available */
+#define EPROTONOSUPPORT 43 /* Protocol not supported */
+#define ESOCKTNOSUPPORT 44 /* Socket type not supported */
+#define EOPNOTSUPP 45 /* Operation not supported */
+#define ENOTSUP EOPNOTSUPP /* Operation not supported */
+#define EPFNOSUPPORT 46 /* Protocol family not supported */
+#define EAFNOSUPPORT 47 /* Address family not supported by protocol family */
+#define EADDRINUSE 48 /* Address already in use */
+#define EADDRNOTAVAIL 49 /* Can't assign requested address */
+
+/* ipc/network software -- operational errors */
+#define ENETDOWN 50 /* Network is down */
+#define ENETUNREACH 51 /* Network is unreachable */
+#define ENETRESET 52 /* Network dropped connection on reset */
+#define ECONNABORTED 53 /* Software caused connection abort */
+#define ECONNRESET 54 /* Connection reset by peer */
+#define ENOBUFS 55 /* No buffer space available */
+#define EISCONN 56 /* Socket is already connected */
+#define ENOTCONN 57 /* Socket is not connected */
+#define ESHUTDOWN 58 /* Can't send after socket shutdown */
+#define ETOOMANYREFS 59 /* Too many references: can't splice */
+#define ETIMEDOUT 60 /* Operation timed out */
+#define ECONNREFUSED 61 /* Connection refused */
+
+#define ELOOP 62 /* Too many levels of symbolic links */
+#endif /* _POSIX_SOURCE */
+#define ENAMETOOLONG 63 /* File name too long */
+
+/* should be rearranged */
+#ifndef _POSIX_SOURCE
+#define EHOSTDOWN 64 /* Host is down */
+#define EHOSTUNREACH 65 /* No route to host */
+#endif /* _POSIX_SOURCE */
+#define ENOTEMPTY 66 /* Directory not empty */
+
+/* quotas & mush */
+#ifndef _POSIX_SOURCE
+#define EPROCLIM 67 /* Too many processes */
+#define EUSERS 68 /* Too many users */
+#define EDQUOT 69 /* Disc quota exceeded */
+
+/* Network File System */
+#define ESTALE 70 /* Stale NFS file handle */
+#define EREMOTE 71 /* Too many levels of remote in path */
+#define EBADRPC 72 /* RPC struct is bad */
+#define ERPCMISMATCH 73 /* RPC version wrong */
+#define EPROGUNAVAIL 74 /* RPC prog. not avail */
+#define EPROGMISMATCH 75 /* Program version wrong */
+#define EPROCUNAVAIL 76 /* Bad procedure for program */
+#endif /* _POSIX_SOURCE */
+
+#define ENOLCK 77 /* No locks available */
+#define ENOSYS 78 /* Function not implemented */
+
+#ifndef _POSIX_SOURCE
+#define EFTYPE 79 /* Inappropriate file type or format */
+#define EAUTH 80 /* Authentication error */
+#define ENEEDAUTH 81 /* Need authenticator */
+#define EIDRM 82 /* Identifier removed */
+#define ENOMSG 83 /* No message of desired type */
+#define EOVERFLOW 84 /* Value too large to be stored in data type */
+#define ECANCELED 85 /* Operation canceled */
+#define EILSEQ 86 /* Illegal byte sequence */
+#define ENOATTR 87 /* Attribute not found */
+
+#define EDOOFUS 88 /* Programming error */
+#endif /* _POSIX_SOURCE */
+
+#define EBADMSG 89 /* Bad message */
+#define EMULTIHOP 90 /* Multihop attempted */
+#define ENOLINK 91 /* Link has been severed */
+#define EPROTO 92 /* Protocol error */
+
+#ifndef _POSIX_SOURCE
+#define ENOTCAPABLE 93 /* Capabilities insufficient */
+#endif /* _POSIX_SOURCE */
+
+#ifndef _POSIX_SOURCE
+#define ELAST 93 /* Must be equal largest errno */
+#endif /* _POSIX_SOURCE */
+
+#ifdef _KERNEL
+/* pseudo-errors returned inside kernel to modify return to process */
+#define ERESTART (-1) /* restart syscall */
+#define EJUSTRETURN (-2) /* don't modify regs, just return */
+#define ENOIOCTL (-3) /* ioctl not handled by this layer */
+#define EDIRIOCTL (-4) /* do direct ioctl in GEOM */
+#endif
+
+#endif
diff --git a/rtems/freebsd/sys/event.h b/rtems/freebsd/sys/event.h
new file mode 100644
index 00000000..6572b9b1
--- /dev/null
+++ b/rtems/freebsd/sys/event.h
@@ -0,0 +1,279 @@
+/*-
+ * Copyright (c) 1999,2000,2001 Jonathan Lemon <jlemon@FreeBSD.org>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _SYS_EVENT_HH_
+#define _SYS_EVENT_HH_
+
+#include <rtems/freebsd/sys/queue.h>
+
+#define EVFILT_READ (-1)
+#define EVFILT_WRITE (-2)
+#define EVFILT_AIO (-3) /* attached to aio requests */
+#define EVFILT_VNODE (-4) /* attached to vnodes */
+#define EVFILT_PROC (-5) /* attached to struct proc */
+#define EVFILT_SIGNAL (-6) /* attached to struct proc */
+#define EVFILT_TIMER (-7) /* timers */
+/* EVFILT_NETDEV (-8) no longer supported */
+#define EVFILT_FS (-9) /* filesystem events */
+#define EVFILT_LIO (-10) /* attached to lio requests */
+#define EVFILT_USER (-11) /* User events */
+#define EVFILT_SYSCOUNT 11
+
+#define EV_SET(kevp_, a, b, c, d, e, f) do { \
+ struct kevent *kevp = (kevp_); \
+ (kevp)->ident = (a); \
+ (kevp)->filter = (b); \
+ (kevp)->flags = (c); \
+ (kevp)->fflags = (d); \
+ (kevp)->data = (e); \
+ (kevp)->udata = (f); \
+} while(0)
+
+struct kevent {
+ uintptr_t ident; /* identifier for this event */
+ short filter; /* filter for event */
+ u_short flags;
+ u_int fflags;
+ intptr_t data;
+ void *udata; /* opaque user data identifier */
+};
+
+/* actions */
+#define EV_ADD 0x0001 /* add event to kq (implies enable) */
+#define EV_DELETE 0x0002 /* delete event from kq */
+#define EV_ENABLE 0x0004 /* enable event */
+#define EV_DISABLE 0x0008 /* disable event (not reported) */
+
+/* flags */
+#define EV_ONESHOT 0x0010 /* only report one occurrence */
+#define EV_CLEAR 0x0020 /* clear event state after reporting */
+#define EV_RECEIPT 0x0040 /* force EV_ERROR on success, data=0 */
+#define EV_DISPATCH 0x0080 /* disable event after reporting */
+
+#define EV_SYSFLAGS 0xF000 /* reserved by system */
+#define EV_FLAG1 0x2000 /* filter-specific flag */
+
+/* returned values */
+#define EV_EOF 0x8000 /* EOF detected */
+#define EV_ERROR 0x4000 /* error, data contains errno */
+
+ /*
+ * data/hint flags/masks for EVFILT_USER, shared with userspace
+ *
+ * On input, the top two bits of fflags specifies how the lower twenty four
+ * bits should be applied to the stored value of fflags.
+ *
+ * On output, the top two bits will always be set to NOTE_FFNOP and the
+ * remaining twenty four bits will contain the stored fflags value.
+ */
+#define NOTE_FFNOP 0x00000000 /* ignore input fflags */
+#define NOTE_FFAND 0x40000000 /* AND fflags */
+#define NOTE_FFOR 0x80000000 /* OR fflags */
+#define NOTE_FFCOPY 0xc0000000 /* copy fflags */
+#define NOTE_FFCTRLMASK 0xc0000000 /* masks for operations */
+#define NOTE_FFLAGSMASK 0x00ffffff
+
+#define NOTE_TRIGGER 0x01000000 /* Cause the event to be
+ triggered for output. */
+
+/*
+ * data/hint flags for EVFILT_{READ|WRITE}, shared with userspace
+ */
+#define NOTE_LOWAT 0x0001 /* low water mark */
+
+/*
+ * data/hint flags for EVFILT_VNODE, shared with userspace
+ */
+#define NOTE_DELETE 0x0001 /* vnode was removed */
+#define NOTE_WRITE 0x0002 /* data contents changed */
+#define NOTE_EXTEND 0x0004 /* size increased */
+#define NOTE_ATTRIB 0x0008 /* attributes changed */
+#define NOTE_LINK 0x0010 /* link count changed */
+#define NOTE_RENAME 0x0020 /* vnode was renamed */
+#define NOTE_REVOKE 0x0040 /* vnode access was revoked */
+
+/*
+ * data/hint flags for EVFILT_PROC, shared with userspace
+ */
+#define NOTE_EXIT 0x80000000 /* process exited */
+#define NOTE_FORK 0x40000000 /* process forked */
+#define NOTE_EXEC 0x20000000 /* process exec'd */
+#define NOTE_PCTRLMASK 0xf0000000 /* mask for hint bits */
+#define NOTE_PDATAMASK 0x000fffff /* mask for pid */
+
+/* additional flags for EVFILT_PROC */
+#define NOTE_TRACK 0x00000001 /* follow across forks */
+#define NOTE_TRACKERR 0x00000002 /* could not track child */
+#define NOTE_CHILD 0x00000004 /* am a child process */
+
+struct knote;
+SLIST_HEAD(klist, knote);
+struct kqueue;
+SLIST_HEAD(kqlist, kqueue);
+struct knlist {
+ struct klist kl_list;
+ void (*kl_lock)(void *); /* lock function */
+ void (*kl_unlock)(void *);
+ void (*kl_assert_locked)(void *);
+ void (*kl_assert_unlocked)(void *);
+ void *kl_lockarg; /* argument passed to kl_lockf() */
+};
+
+
+#ifdef _KERNEL
+
+#ifdef MALLOC_DECLARE
+MALLOC_DECLARE(M_KQUEUE);
+#endif
+
+/*
+ * Flags for knote call
+ */
+#define KNF_LISTLOCKED 0x0001 /* knlist is locked */
+#define KNF_NOKQLOCK 0x0002 /* do not keep KQ_LOCK */
+
+#define KNOTE(list, hist, flags) knote(list, hist, flags)
+#define KNOTE_LOCKED(list, hint) knote(list, hint, KNF_LISTLOCKED)
+#define KNOTE_UNLOCKED(list, hint) knote(list, hint, 0)
+
+#define KNLIST_EMPTY(list) SLIST_EMPTY(&(list)->kl_list)
+
+/*
+ * Flag indicating hint is a signal. Used by EVFILT_SIGNAL, and also
+ * shared by EVFILT_PROC (all knotes attached to p->p_klist)
+ */
+#define NOTE_SIGNAL 0x08000000
+
+/*
+ * Hint values for the optional f_touch event filter. If f_touch is not set
+ * to NULL and f_isfd is zero the f_touch filter will be called with the type
+ * argument set to EVENT_REGISTER during a kevent() system call. It is also
+ * called under the same conditions with the type argument set to EVENT_PROCESS
+ * when the event has been triggered.
+ */
+#define EVENT_REGISTER 1
+#define EVENT_PROCESS 2
+
+struct filterops {
+ int f_isfd; /* true if ident == filedescriptor */
+ int (*f_attach)(struct knote *kn);
+ void (*f_detach)(struct knote *kn);
+ int (*f_event)(struct knote *kn, long hint);
+ void (*f_touch)(struct knote *kn, struct kevent *kev, u_long type);
+};
+
+/*
+ * Setting the KN_INFLUX flag enables you to unlock the kq that this knote
+ * is on, and modify kn_status as if you had the KQ lock.
+ *
+ * kn_sfflags, kn_sdata, and kn_kevent are protected by the knlist lock.
+ */
+struct knote {
+ SLIST_ENTRY(knote) kn_link; /* for kq */
+ SLIST_ENTRY(knote) kn_selnext; /* for struct selinfo */
+ struct knlist *kn_knlist; /* f_attach populated */
+ TAILQ_ENTRY(knote) kn_tqe;
+ struct kqueue *kn_kq; /* which queue we are on */
+ struct kevent kn_kevent;
+ int kn_status; /* protected by kq lock */
+#define KN_ACTIVE 0x01 /* event has been triggered */
+#define KN_QUEUED 0x02 /* event is on queue */
+#define KN_DISABLED 0x04 /* event is disabled */
+#define KN_DETACHED 0x08 /* knote is detached */
+#define KN_INFLUX 0x10 /* knote is in flux */
+#define KN_MARKER 0x20 /* ignore this knote */
+#define KN_KQUEUE 0x40 /* this knote belongs to a kq */
+#define KN_HASKQLOCK 0x80 /* for _inevent */
+ int kn_sfflags; /* saved filter flags */
+ intptr_t kn_sdata; /* saved data field */
+ union {
+ struct file *p_fp; /* file data pointer */
+ struct proc *p_proc; /* proc pointer */
+ struct aiocblist *p_aio; /* AIO job pointer */
+ struct aioliojob *p_lio; /* LIO job pointer */
+ } kn_ptr;
+ struct filterops *kn_fop;
+ void *kn_hook;
+ int kn_hookid;
+
+#define kn_id kn_kevent.ident
+#define kn_filter kn_kevent.filter
+#define kn_flags kn_kevent.flags
+#define kn_fflags kn_kevent.fflags
+#define kn_data kn_kevent.data
+#define kn_fp kn_ptr.p_fp
+};
+struct kevent_copyops {
+ void *arg;
+ int (*k_copyout)(void *arg, struct kevent *kevp, int count);
+ int (*k_copyin)(void *arg, struct kevent *kevp, int count);
+};
+
+struct thread;
+struct proc;
+struct knlist;
+struct mtx;
+
+extern void knote(struct knlist *list, long hint, int lockflags);
+extern void knote_fork(struct knlist *list, int pid);
+extern void knlist_add(struct knlist *knl, struct knote *kn, int islocked);
+extern void knlist_remove(struct knlist *knl, struct knote *kn, int islocked);
+extern void knlist_remove_inevent(struct knlist *knl, struct knote *kn);
+extern int knlist_empty(struct knlist *knl);
+extern void knlist_init(struct knlist *knl, void *lock,
+ void (*kl_lock)(void *), void (*kl_unlock)(void *),
+ void (*kl_assert_locked)(void *), void (*kl_assert_unlocked)(void *));
+extern void knlist_init_mtx(struct knlist *knl, struct mtx *lock);
+extern void knlist_destroy(struct knlist *knl);
+extern void knlist_cleardel(struct knlist *knl, struct thread *td,
+ int islocked, int killkn);
+#define knlist_clear(knl, islocked) \
+ knlist_cleardel((knl), NULL, (islocked), 0)
+#define knlist_delete(knl, td, islocked) \
+ knlist_cleardel((knl), (td), (islocked), 1)
+extern void knote_fdclose(struct thread *p, int fd);
+extern int kqfd_register(int fd, struct kevent *kev, struct thread *p,
+ int waitok);
+extern int kqueue_add_filteropts(int filt, struct filterops *filtops);
+extern int kqueue_del_filteropts(int filt);
+
+#else /* !_KERNEL */
+
+#include <rtems/freebsd/sys/cdefs.h>
+struct timespec;
+
+__BEGIN_DECLS
+int kqueue(void);
+int kevent(int kq, const struct kevent *changelist, int nchanges,
+ struct kevent *eventlist, int nevents,
+ const struct timespec *timeout);
+__END_DECLS
+
+#endif /* !_KERNEL */
+
+#endif /* !_SYS_EVENT_HH_ */
diff --git a/rtems/freebsd/sys/eventhandler.h b/rtems/freebsd/sys/eventhandler.h
new file mode 100644
index 00000000..d591fe9b
--- /dev/null
+++ b/rtems/freebsd/sys/eventhandler.h
@@ -0,0 +1,242 @@
+/*-
+ * Copyright (c) 1999 Michael Smith <msmith@freebsd.org>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef SYS_EVENTHANDLER_H
+#define SYS_EVENTHANDLER_H
+
+#include <rtems/freebsd/sys/lock.h>
+#include <rtems/freebsd/sys/ktr.h>
+#include <rtems/freebsd/sys/mutex.h>
+#include <rtems/freebsd/sys/queue.h>
+
+struct eventhandler_entry {
+ TAILQ_ENTRY(eventhandler_entry) ee_link;
+ int ee_priority;
+#define EHE_DEAD_PRIORITY (-1)
+ void *ee_arg;
+};
+
+#ifdef VIMAGE
+struct eventhandler_entry_vimage {
+ void (* func)(void); /* Original function registered. */
+ void *ee_arg; /* Original argument registered. */
+ void *sparep[2];
+};
+#endif
+
+struct eventhandler_list {
+ char *el_name;
+ int el_flags;
+#define EHL_INITTED (1<<0)
+ u_int el_runcount;
+ struct mtx el_lock;
+ TAILQ_ENTRY(eventhandler_list) el_link;
+ TAILQ_HEAD(,eventhandler_entry) el_entries;
+};
+
+typedef struct eventhandler_entry *eventhandler_tag;
+
+#define EHL_LOCK(p) mtx_lock(&(p)->el_lock)
+#define EHL_UNLOCK(p) mtx_unlock(&(p)->el_lock)
+#define EHL_LOCK_ASSERT(p, x) mtx_assert(&(p)->el_lock, x)
+
+/*
+ * Macro to invoke the handlers for a given event.
+ */
+#define _EVENTHANDLER_INVOKE(name, list, ...) do { \
+ struct eventhandler_entry *_ep; \
+ struct eventhandler_entry_ ## name *_t; \
+ \
+ KASSERT((list)->el_flags & EHL_INITTED, \
+ ("eventhandler_invoke: running non-inited list")); \
+ EHL_LOCK_ASSERT((list), MA_OWNED); \
+ (list)->el_runcount++; \
+ KASSERT((list)->el_runcount > 0, \
+ ("eventhandler_invoke: runcount overflow")); \
+ CTR0(KTR_EVH, "eventhandler_invoke(\"" __STRING(name) "\")"); \
+ TAILQ_FOREACH(_ep, &((list)->el_entries), ee_link) { \
+ if (_ep->ee_priority != EHE_DEAD_PRIORITY) { \
+ EHL_UNLOCK((list)); \
+ _t = (struct eventhandler_entry_ ## name *)_ep; \
+ CTR1(KTR_EVH, "eventhandler_invoke: executing %p", \
+ (void *)_t->eh_func); \
+ _t->eh_func(_ep->ee_arg , ## __VA_ARGS__); \
+ EHL_LOCK((list)); \
+ } \
+ } \
+ KASSERT((list)->el_runcount > 0, \
+ ("eventhandler_invoke: runcount underflow")); \
+ (list)->el_runcount--; \
+ if ((list)->el_runcount == 0) \
+ eventhandler_prune_list(list); \
+ EHL_UNLOCK((list)); \
+} while (0)
+
+/*
+ * Slow handlers are entirely dynamic; lists are created
+ * when entries are added to them, and thus have no concept of "owner",
+ *
+ * Slow handlers need to be declared, but do not need to be defined. The
+ * declaration must be in scope wherever the handler is to be invoked.
+ */
+#define EVENTHANDLER_DECLARE(name, type) \
+struct eventhandler_entry_ ## name \
+{ \
+ struct eventhandler_entry ee; \
+ type eh_func; \
+}; \
+struct __hack
+
+#define EVENTHANDLER_DEFINE(name, func, arg, priority) \
+ static eventhandler_tag name ## _tag; \
+ static void name ## _evh_init(void *ctx) \
+ { \
+ name ## _tag = EVENTHANDLER_REGISTER(name, func, ctx, \
+ priority); \
+ } \
+ SYSINIT(name ## _evh_init, SI_SUB_CONFIGURE, SI_ORDER_ANY, \
+ name ## _evh_init, arg); \
+ struct __hack
+
+#define EVENTHANDLER_INVOKE(name, ...) \
+do { \
+ struct eventhandler_list *_el; \
+ \
+ if ((_el = eventhandler_find_list(#name)) != NULL) \
+ _EVENTHANDLER_INVOKE(name, _el , ## __VA_ARGS__); \
+} while (0)
+
+#define EVENTHANDLER_REGISTER(name, func, arg, priority) \
+ eventhandler_register(NULL, #name, func, arg, priority)
+
+#define EVENTHANDLER_DEREGISTER(name, tag) \
+do { \
+ struct eventhandler_list *_el; \
+ \
+ if ((_el = eventhandler_find_list(#name)) != NULL) \
+ eventhandler_deregister(_el, tag); \
+} while(0)
+
+
+eventhandler_tag eventhandler_register(struct eventhandler_list *list,
+ const char *name, void *func, void *arg, int priority);
+void eventhandler_deregister(struct eventhandler_list *list,
+ eventhandler_tag tag);
+struct eventhandler_list *eventhandler_find_list(const char *name);
+void eventhandler_prune_list(struct eventhandler_list *list);
+
+#ifdef VIMAGE
+typedef void (*vimage_iterator_func_t)(void *, ...);
+
+eventhandler_tag vimage_eventhandler_register(struct eventhandler_list *list,
+ const char *name, void *func, void *arg, int priority,
+ vimage_iterator_func_t);
+#endif
+
+/*
+ * Standard system event queues.
+ */
+
+/* Generic priority levels */
+#define EVENTHANDLER_PRI_FIRST 0
+#define EVENTHANDLER_PRI_ANY 10000
+#define EVENTHANDLER_PRI_LAST 20000
+
+/* Shutdown events */
+typedef void (*shutdown_fn)(void *, int);
+
+#define SHUTDOWN_PRI_FIRST EVENTHANDLER_PRI_FIRST
+#define SHUTDOWN_PRI_DEFAULT EVENTHANDLER_PRI_ANY
+#define SHUTDOWN_PRI_LAST EVENTHANDLER_PRI_LAST
+
+EVENTHANDLER_DECLARE(shutdown_pre_sync, shutdown_fn); /* before fs sync */
+EVENTHANDLER_DECLARE(shutdown_post_sync, shutdown_fn); /* after fs sync */
+EVENTHANDLER_DECLARE(shutdown_final, shutdown_fn);
+
+/* Low memory event */
+typedef void (*vm_lowmem_handler_t)(void *, int);
+#define LOWMEM_PRI_DEFAULT EVENTHANDLER_PRI_FIRST
+EVENTHANDLER_DECLARE(vm_lowmem, vm_lowmem_handler_t);
+
+/* Root mounted event */
+typedef void (*mountroot_handler_t)(void *);
+EVENTHANDLER_DECLARE(mountroot, mountroot_handler_t);
+
+/* VLAN state change events */
+struct ifnet;
+typedef void (*vlan_config_fn)(void *, struct ifnet *, uint16_t);
+typedef void (*vlan_unconfig_fn)(void *, struct ifnet *, uint16_t);
+EVENTHANDLER_DECLARE(vlan_config, vlan_config_fn);
+EVENTHANDLER_DECLARE(vlan_unconfig, vlan_unconfig_fn);
+
+/* BPF attach/detach events */
+struct ifnet;
+typedef void (*bpf_track_fn)(void *, struct ifnet *, int /* dlt */,
+ int /* 1 =>'s attach */);
+EVENTHANDLER_DECLARE(bpf_track, bpf_track_fn);
+
+/*
+ * Process events
+ * process_fork and exit handlers are called without Giant.
+ * exec handlers are called with Giant, but that is by accident.
+ */
+struct proc;
+struct image_params;
+
+typedef void (*exitlist_fn)(void *, struct proc *);
+typedef void (*forklist_fn)(void *, struct proc *, struct proc *, int);
+typedef void (*execlist_fn)(void *, struct proc *, struct image_params *);
+typedef void (*proc_ctor_fn)(void *, struct proc *);
+typedef void (*proc_dtor_fn)(void *, struct proc *);
+typedef void (*proc_init_fn)(void *, struct proc *);
+typedef void (*proc_fini_fn)(void *, struct proc *);
+EVENTHANDLER_DECLARE(process_ctor, proc_ctor_fn);
+EVENTHANDLER_DECLARE(process_dtor, proc_dtor_fn);
+EVENTHANDLER_DECLARE(process_init, proc_init_fn);
+EVENTHANDLER_DECLARE(process_fini, proc_fini_fn);
+EVENTHANDLER_DECLARE(process_exit, exitlist_fn);
+EVENTHANDLER_DECLARE(process_fork, forklist_fn);
+EVENTHANDLER_DECLARE(process_exec, execlist_fn);
+
+struct thread;
+typedef void (*thread_ctor_fn)(void *, struct thread *);
+typedef void (*thread_dtor_fn)(void *, struct thread *);
+typedef void (*thread_fini_fn)(void *, struct thread *);
+typedef void (*thread_init_fn)(void *, struct thread *);
+EVENTHANDLER_DECLARE(thread_ctor, thread_ctor_fn);
+EVENTHANDLER_DECLARE(thread_dtor, thread_dtor_fn);
+EVENTHANDLER_DECLARE(thread_init, thread_init_fn);
+EVENTHANDLER_DECLARE(thread_fini, thread_fini_fn);
+
+typedef void (*uma_zone_chfn)(void *);
+EVENTHANDLER_DECLARE(nmbclusters_change, uma_zone_chfn);
+EVENTHANDLER_DECLARE(maxsockets_change, uma_zone_chfn);
+
+typedef void(*schedtail_fn)(void *, struct proc *);
+EVENTHANDLER_DECLARE(schedtail, schedtail_fn);
+#endif /* SYS_EVENTHANDLER_H */
diff --git a/rtems/freebsd/sys/exec.h b/rtems/freebsd/sys/exec.h
new file mode 100644
index 00000000..936ffd88
--- /dev/null
+++ b/rtems/freebsd/sys/exec.h
@@ -0,0 +1 @@
+/* EMPTY */
diff --git a/rtems/freebsd/sys/fail.h b/rtems/freebsd/sys/fail.h
new file mode 100644
index 00000000..936ffd88
--- /dev/null
+++ b/rtems/freebsd/sys/fail.h
@@ -0,0 +1 @@
+/* EMPTY */
diff --git a/rtems/freebsd/sys/fcntl.h b/rtems/freebsd/sys/fcntl.h
new file mode 100644
index 00000000..45e87ba4
--- /dev/null
+++ b/rtems/freebsd/sys/fcntl.h
@@ -0,0 +1,296 @@
+/*-
+ * Copyright (c) 1983, 1990, 1993
+ * The Regents of the University of California. All rights reserved.
+ * (c) UNIX System Laboratories, Inc.
+ * All or some portions of this file are derived from material licensed
+ * to the University of California by American Telephone and Telegraph
+ * Co. or Unix System Laboratories, Inc. and are reproduced herein with
+ * the permission of UNIX System Laboratories, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * @(#)fcntl.h 8.3 (Berkeley) 1/21/94
+ * $FreeBSD$
+ */
+
+#ifndef _SYS_FCNTL_HH_
+#define _SYS_FCNTL_HH_
+
+/*
+ * This file includes the definitions for open and fcntl
+ * described by POSIX for <fcntl.h>; it also includes
+ * related kernel definitions.
+ */
+
+#include <rtems/freebsd/sys/cdefs.h>
+#include <rtems/freebsd/sys/_types.h>
+
+#ifndef _MODE_T_DECLARED
+typedef __mode_t mode_t;
+#define _MODE_T_DECLARED
+#endif
+
+#ifndef _OFF_T_DECLARED
+typedef __off_t off_t;
+#define _OFF_T_DECLARED
+#endif
+
+#ifndef _PID_T_DECLARED
+typedef __pid_t pid_t;
+#define _PID_T_DECLARED
+#endif
+
+/*
+ * File status flags: these are used by open(2), fcntl(2).
+ * They are also used (indirectly) in the kernel file structure f_flags,
+ * which is a superset of the open/fcntl flags. Open flags and f_flags
+ * are inter-convertible using OFLAGS(fflags) and FFLAGS(oflags).
+ * Open/fcntl flags begin with O_; kernel-internal flags begin with F.
+ */
+/* open-only flags */
+#define O_RDONLY 0x0000 /* open for reading only */
+#define O_WRONLY 0x0001 /* open for writing only */
+#define O_RDWR 0x0002 /* open for reading and writing */
+#define O_ACCMODE 0x0003 /* mask for above modes */
+
+/*
+ * Kernel encoding of open mode; separate read and write bits that are
+ * independently testable: 1 greater than the above.
+ *
+ * XXX
+ * FREAD and FWRITE are excluded from the #ifdef _KERNEL so that TIOCFLUSH,
+ * which was documented to use FREAD/FWRITE, continues to work.
+ */
+#if __BSD_VISIBLE
+#define FREAD 0x0001
+#define FWRITE 0x0002
+#endif
+#define O_NONBLOCK 0x0004 /* no delay */
+#define O_APPEND 0x0008 /* set append mode */
+#if __BSD_VISIBLE
+#define O_SHLOCK 0x0010 /* open with shared file lock */
+#define O_EXLOCK 0x0020 /* open with exclusive file lock */
+#define O_ASYNC 0x0040 /* signal pgrp when data ready */
+#define O_FSYNC 0x0080 /* synchronous writes */
+#endif
+#define O_SYNC 0x0080 /* POSIX synonym for O_FSYNC */
+#if __BSD_VISIBLE
+#define O_NOFOLLOW 0x0100 /* don't follow symlinks */
+#endif
+#define O_CREAT 0x0200 /* create if nonexistent */
+#define O_TRUNC 0x0400 /* truncate to zero length */
+#define O_EXCL 0x0800 /* error if already exists */
+#ifdef _KERNEL
+#define FHASLOCK 0x4000 /* descriptor holds advisory lock */
+#endif
+
+/* Defined by POSIX 1003.1; BSD default, but must be distinct from O_RDONLY. */
+#define O_NOCTTY 0x8000 /* don't assign controlling terminal */
+
+#if __BSD_VISIBLE
+/* Attempt to bypass buffer cache */
+#define O_DIRECT 0x00010000
+#endif
+
+/* Defined by POSIX Extended API Set Part 2 */
+#if __BSD_VISIBLE
+#define O_DIRECTORY 0x00020000 /* Fail if not directory */
+#define O_EXEC 0x00040000 /* Open for execute only */
+#endif
+#ifdef _KERNEL
+#define FEXEC O_EXEC
+#endif
+
+/* Defined by POSIX 1003.1-2008; BSD default, but reserve for future use. */
+#if __POSIX_VISIBLE >= 200809
+#define O_TTY_INIT 0x00080000 /* Restore default termios attributes */
+#endif
+
+/*
+ * XXX missing O_DSYNC, O_RSYNC.
+ */
+
+#ifdef _KERNEL
+/* convert from open() flags to/from fflags; convert O_RD/WR to FREAD/FWRITE */
+#define FFLAGS(oflags) ((oflags) + 1)
+#define OFLAGS(fflags) ((fflags) - 1)
+
+/* bits to save after open */
+#define FMASK (FREAD|FWRITE|FAPPEND|FASYNC|FFSYNC|FNONBLOCK|O_DIRECT|FEXEC)
+/* bits settable by fcntl(F_SETFL, ...) */
+#define FCNTLFLAGS (FAPPEND|FASYNC|FFSYNC|FNONBLOCK|FRDAHEAD|O_DIRECT)
+
+#if defined(COMPAT_FREEBSD7) || defined(COMPAT_FREEBSD6) || \
+ defined(COMPAT_FREEBSD5) || defined(COMPAT_FREEBSD4)
+/*
+ * Set by shm_open(3) in older libc's to get automatic MAP_ASYNC
+ * behavior for POSIX shared memory objects (which are otherwise
+ * implemented as plain files).
+ */
+#define FPOSIXSHM O_NOFOLLOW
+#undef FCNTLFLAGS
+#define FCNTLFLAGS (FAPPEND|FASYNC|FFSYNC|FNONBLOCK|FPOSIXSHM|FRDAHEAD| \
+ O_DIRECT)
+#endif
+#endif
+
+/*
+ * The O_* flags used to have only F* names, which were used in the kernel
+ * and by fcntl. We retain the F* names for the kernel f_flag field
+ * and for backward compatibility for fcntl. These flags are deprecated.
+ */
+#if __BSD_VISIBLE
+#define FAPPEND O_APPEND /* kernel/compat */
+#define FASYNC O_ASYNC /* kernel/compat */
+#define FFSYNC O_FSYNC /* kernel */
+#define FNONBLOCK O_NONBLOCK /* kernel */
+#define FNDELAY O_NONBLOCK /* compat */
+#define O_NDELAY O_NONBLOCK /* compat */
+#endif
+
+/*
+ * We are out of bits in f_flag (which is a short). However,
+ * the flag bits not set in FMASK are only meaningful in the
+ * initial open syscall. Those bits can thus be given a
+ * different meaning for fcntl(2).
+ */
+#if __BSD_VISIBLE
+/* Read ahead */
+#define FRDAHEAD O_CREAT
+#endif
+
+/* Defined by POSIX Extended API Set Part 2 */
+#if __BSD_VISIBLE
+/*
+ * Magic value that specify the use of the current working directory
+ * to determine the target of relative file paths in the openat() and
+ * similar syscalls.
+ */
+#define AT_FDCWD -100
+
+/*
+ * Miscellaneous flags for the *at() syscalls.
+ */
+#define AT_EACCESS 0x100 /* Check access using effective user and group ID */
+#define AT_SYMLINK_NOFOLLOW 0x200 /* Do not follow symbolic links */
+#define AT_SYMLINK_FOLLOW 0x400 /* Follow symbolic link */
+#define AT_REMOVEDIR 0x800 /* Remove directory instead of file */
+#endif
+
+/*
+ * Constants used for fcntl(2)
+ */
+
+/* command values */
+#define F_DUPFD 0 /* duplicate file descriptor */
+#define F_GETFD 1 /* get file descriptor flags */
+#define F_SETFD 2 /* set file descriptor flags */
+#define F_GETFL 3 /* get file status flags */
+#define F_SETFL 4 /* set file status flags */
+#if __BSD_VISIBLE || __XSI_VISIBLE || __POSIX_VISIBLE >= 200112
+#define F_GETOWN 5 /* get SIGIO/SIGURG proc/pgrp */
+#define F_SETOWN 6 /* set SIGIO/SIGURG proc/pgrp */
+#endif
+#define F_OGETLK 7 /* get record locking information */
+#define F_OSETLK 8 /* set record locking information */
+#define F_OSETLKW 9 /* F_SETLK; wait if blocked */
+#define F_DUP2FD 10 /* duplicate file descriptor to arg */
+#define F_GETLK 11 /* get record locking information */
+#define F_SETLK 12 /* set record locking information */
+#define F_SETLKW 13 /* F_SETLK; wait if blocked */
+#define F_SETLK_REMOTE 14 /* debugging support for remote locks */
+#define F_READAHEAD 15 /* read ahead */
+#define F_RDAHEAD 16 /* Darwin compatible read ahead */
+
+/* file descriptor flags (F_GETFD, F_SETFD) */
+#define FD_CLOEXEC 1 /* close-on-exec flag */
+
+/* record locking flags (F_GETLK, F_SETLK, F_SETLKW) */
+#define F_RDLCK 1 /* shared or read lock */
+#define F_UNLCK 2 /* unlock */
+#define F_WRLCK 3 /* exclusive or write lock */
+#define F_UNLCKSYS 4 /* purge locks for a given system ID */
+#define F_CANCEL 5 /* cancel an async lock request */
+#ifdef _KERNEL
+#define F_WAIT 0x010 /* Wait until lock is granted */
+#define F_FLOCK 0x020 /* Use flock(2) semantics for lock */
+#define F_POSIX 0x040 /* Use POSIX semantics for lock */
+#define F_REMOTE 0x080 /* Lock owner is remote NFS client */
+#define F_NOINTR 0x100 /* Ignore signals when waiting */
+#endif
+
+/*
+ * Advisory file segment locking data type -
+ * information passed to system by user
+ */
+struct flock {
+ off_t l_start; /* starting offset */
+ off_t l_len; /* len = 0 means until end of file */
+ pid_t l_pid; /* lock owner */
+ short l_type; /* lock type: read/write, etc. */
+ short l_whence; /* type of l_start */
+ int l_sysid; /* remote system id or zero for local */
+};
+
+/*
+ * Old advisory file segment locking data type,
+ * before adding l_sysid.
+ */
+struct oflock {
+ off_t l_start; /* starting offset */
+ off_t l_len; /* len = 0 means until end of file */
+ pid_t l_pid; /* lock owner */
+ short l_type; /* lock type: read/write, etc. */
+ short l_whence; /* type of l_start */
+};
+
+
+#if __BSD_VISIBLE
+/* lock operations for flock(2) */
+#define LOCK_SH 0x01 /* shared file lock */
+#define LOCK_EX 0x02 /* exclusive file lock */
+#define LOCK_NB 0x04 /* don't block when locking */
+#define LOCK_UN 0x08 /* unlock file */
+#endif
+
+/*
+ * XXX missing posix_fadvise() and posix_fallocate(), and POSIX_FADV_* macros.
+ */
+
+#ifndef _KERNEL
+__BEGIN_DECLS
+int open(const char *, int, ...);
+int creat(const char *, mode_t);
+int fcntl(int, int, ...);
+#if __BSD_VISIBLE || __POSIX_VISIBLE >= 200809
+int openat(int, const char *, int, ...);
+#endif
+#if __BSD_VISIBLE
+int flock(int, int);
+#endif
+__END_DECLS
+#endif
+
+#endif /* !_SYS_FCNTL_HH_ */
diff --git a/rtems/freebsd/sys/file.h b/rtems/freebsd/sys/file.h
new file mode 100644
index 00000000..05583932
--- /dev/null
+++ b/rtems/freebsd/sys/file.h
@@ -0,0 +1,307 @@
+/*-
+ * Copyright (c) 1982, 1986, 1989, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * @(#)file.h 8.3 (Berkeley) 1/9/95
+ * $FreeBSD$
+ */
+
+#ifndef _SYS_FILE_HH_
+#define _SYS_FILE_HH_
+
+#ifndef _KERNEL
+#include <rtems/freebsd/sys/types.h> /* XXX */
+#include <rtems/freebsd/sys/fcntl.h>
+#include <rtems/freebsd/sys/unistd.h>
+#else
+#include <rtems/freebsd/sys/queue.h>
+#include <rtems/freebsd/sys/refcount.h>
+#include <rtems/freebsd/sys/_lock.h>
+#include <rtems/freebsd/sys/_mutex.h>
+
+struct stat;
+struct thread;
+struct uio;
+struct knote;
+struct vnode;
+struct socket;
+
+
+#endif /* _KERNEL */
+
+#define DTYPE_VNODE 1 /* file */
+#define DTYPE_SOCKET 2 /* communications endpoint */
+#define DTYPE_PIPE 3 /* pipe */
+#define DTYPE_FIFO 4 /* fifo (named pipe) */
+#define DTYPE_KQUEUE 5 /* event queue */
+#define DTYPE_CRYPTO 6 /* crypto */
+#define DTYPE_MQUEUE 7 /* posix message queue */
+#define DTYPE_SHM 8 /* swap-backed shared memory */
+#define DTYPE_SEM 9 /* posix semaphore */
+#define DTYPE_PTS 10 /* pseudo teletype master device */
+
+#ifdef _KERNEL
+
+struct file;
+struct ucred;
+
+typedef int fo_rdwr_t(struct file *fp, struct uio *uio,
+ struct ucred *active_cred, int flags,
+ struct thread *td);
+#define FOF_OFFSET 1 /* Use the offset in uio argument */
+typedef int fo_truncate_t(struct file *fp, off_t length,
+ struct ucred *active_cred, struct thread *td);
+typedef int fo_ioctl_t(struct file *fp, u_long com, void *data,
+ struct ucred *active_cred, struct thread *td);
+typedef int fo_poll_t(struct file *fp, int events,
+ struct ucred *active_cred, struct thread *td);
+typedef int fo_kqfilter_t(struct file *fp, struct knote *kn);
+typedef int fo_stat_t(struct file *fp, struct stat *sb,
+ struct ucred *active_cred, struct thread *td);
+typedef int fo_close_t(struct file *fp, struct thread *td);
+typedef int fo_flags_t;
+
+struct fileops {
+ fo_rdwr_t *fo_read;
+ fo_rdwr_t *fo_write;
+ fo_truncate_t *fo_truncate;
+ fo_ioctl_t *fo_ioctl;
+ fo_poll_t *fo_poll;
+ fo_kqfilter_t *fo_kqfilter;
+ fo_stat_t *fo_stat;
+ fo_close_t *fo_close;
+ fo_flags_t fo_flags; /* DFLAG_* below */
+};
+
+#define DFLAG_PASSABLE 0x01 /* may be passed via unix sockets. */
+#define DFLAG_SEEKABLE 0x02 /* seekable / nonsequential */
+#endif /* _KERNEL */
+
+#if defined(_KERNEL) || defined(_WANT_FILE)
+/*
+ * Kernel descriptor table.
+ * One entry for each open kernel vnode and socket.
+ *
+ * Below is the list of locks that protects members in struct file.
+ *
+ * (f) protected with mtx_lock(mtx_pool_find(fp))
+ * (d) cdevpriv_mtx
+ * none not locked
+ */
+
+struct file {
+ void *f_data; /* file descriptor specific data */
+ struct fileops *f_ops; /* File operations */
+ struct ucred *f_cred; /* associated credentials. */
+ struct vnode *f_vnode; /* NULL or applicable vnode */
+ short f_type; /* descriptor type */
+ short f_vnread_flags; /* (f) Sleep lock for f_offset */
+ volatile u_int f_flag; /* see fcntl.h */
+ volatile u_int f_count; /* reference count */
+ /*
+ * DTYPE_VNODE specific fields.
+ */
+ int f_seqcount; /* Count of sequential accesses. */
+ off_t f_nextoff; /* next expected read/write offset. */
+ struct cdev_privdata *f_cdevpriv; /* (d) Private data for the cdev. */
+ /*
+ * DFLAG_SEEKABLE specific fields
+ */
+ off_t f_offset;
+ /*
+ * Mandatory Access control information.
+ */
+ void *f_label; /* Place-holder for MAC label. */
+};
+
+#define FOFFSET_LOCKED 0x1
+#define FOFFSET_LOCK_WAITING 0x2
+
+#endif /* _KERNEL || _WANT_FILE */
+
+/*
+ * Userland version of struct file, for sysctl
+ */
+struct xfile {
+ size_t xf_size; /* size of struct xfile */
+ pid_t xf_pid; /* owning process */
+ uid_t xf_uid; /* effective uid of owning process */
+ int xf_fd; /* descriptor number */
+ void *xf_file; /* address of struct file */
+ short xf_type; /* descriptor type */
+ int xf_count; /* reference count */
+ int xf_msgcount; /* references from message queue */
+ off_t xf_offset; /* file offset */
+ void *xf_data; /* file descriptor specific data */
+ void *xf_vnode; /* vnode pointer */
+ u_int xf_flag; /* flags (see fcntl.h) */
+};
+
+#ifdef _KERNEL
+
+#ifdef MALLOC_DECLARE
+MALLOC_DECLARE(M_FILE);
+#endif
+
+extern struct fileops vnops;
+extern struct fileops badfileops;
+extern struct fileops socketops;
+extern int maxfiles; /* kernel limit on number of open files */
+extern int maxfilesperproc; /* per process limit on number of open files */
+extern volatile int openfiles; /* actual number of open files */
+
+int fget(struct thread *td, int fd, struct file **fpp);
+int fget_read(struct thread *td, int fd, struct file **fpp);
+int fget_write(struct thread *td, int fd, struct file **fpp);
+int _fdrop(struct file *fp, struct thread *td);
+
+/*
+ * The socket operations are used a couple of places.
+ * XXX: This is wrong, they should go through the operations vector for
+ * XXX: sockets instead of going directly for the individual functions. /phk
+ */
+fo_rdwr_t soo_read;
+fo_rdwr_t soo_write;
+fo_truncate_t soo_truncate;
+fo_ioctl_t soo_ioctl;
+fo_poll_t soo_poll;
+fo_kqfilter_t soo_kqfilter;
+fo_stat_t soo_stat;
+fo_close_t soo_close;
+
+void finit(struct file *, u_int, short, void *, struct fileops *);
+int fgetvp(struct thread *td, int fd, struct vnode **vpp);
+int fgetvp_read(struct thread *td, int fd, struct vnode **vpp);
+int fgetvp_write(struct thread *td, int fd, struct vnode **vpp);
+
+int fgetsock(struct thread *td, int fd, struct socket **spp, u_int *fflagp);
+void fputsock(struct socket *sp);
+
+#define fhold(fp) \
+ (refcount_acquire(&(fp)->f_count))
+#define fdrop(fp, td) \
+ (refcount_release(&(fp)->f_count) ? _fdrop((fp), (td)) : 0)
+
+static __inline fo_rdwr_t fo_read;
+static __inline fo_rdwr_t fo_write;
+static __inline fo_truncate_t fo_truncate;
+static __inline fo_ioctl_t fo_ioctl;
+static __inline fo_poll_t fo_poll;
+static __inline fo_kqfilter_t fo_kqfilter;
+static __inline fo_stat_t fo_stat;
+static __inline fo_close_t fo_close;
+
+static __inline int
+fo_read(fp, uio, active_cred, flags, td)
+ struct file *fp;
+ struct uio *uio;
+ struct ucred *active_cred;
+ int flags;
+ struct thread *td;
+{
+
+ return ((*fp->f_ops->fo_read)(fp, uio, active_cred, flags, td));
+}
+
+static __inline int
+fo_write(fp, uio, active_cred, flags, td)
+ struct file *fp;
+ struct uio *uio;
+ struct ucred *active_cred;
+ int flags;
+ struct thread *td;
+{
+
+ return ((*fp->f_ops->fo_write)(fp, uio, active_cred, flags, td));
+}
+
+static __inline int
+fo_truncate(fp, length, active_cred, td)
+ struct file *fp;
+ off_t length;
+ struct ucred *active_cred;
+ struct thread *td;
+{
+
+ return ((*fp->f_ops->fo_truncate)(fp, length, active_cred, td));
+}
+
+static __inline int
+fo_ioctl(fp, com, data, active_cred, td)
+ struct file *fp;
+ u_long com;
+ void *data;
+ struct ucred *active_cred;
+ struct thread *td;
+{
+
+ return ((*fp->f_ops->fo_ioctl)(fp, com, data, active_cred, td));
+}
+
+static __inline int
+fo_poll(fp, events, active_cred, td)
+ struct file *fp;
+ int events;
+ struct ucred *active_cred;
+ struct thread *td;
+{
+
+ return ((*fp->f_ops->fo_poll)(fp, events, active_cred, td));
+}
+
+static __inline int
+fo_stat(fp, sb, active_cred, td)
+ struct file *fp;
+ struct stat *sb;
+ struct ucred *active_cred;
+ struct thread *td;
+{
+
+ return ((*fp->f_ops->fo_stat)(fp, sb, active_cred, td));
+}
+
+static __inline int
+fo_close(fp, td)
+ struct file *fp;
+ struct thread *td;
+{
+
+ return ((*fp->f_ops->fo_close)(fp, td));
+}
+
+static __inline int
+fo_kqfilter(fp, kn)
+ struct file *fp;
+ struct knote *kn;
+{
+
+ return ((*fp->f_ops->fo_kqfilter)(fp, kn));
+}
+
+#endif /* _KERNEL */
+
+#endif /* !SYS_FILE_H */
diff --git a/rtems/freebsd/sys/filedesc.h b/rtems/freebsd/sys/filedesc.h
new file mode 100644
index 00000000..ee26a1bf
--- /dev/null
+++ b/rtems/freebsd/sys/filedesc.h
@@ -0,0 +1,145 @@
+/*-
+ * Copyright (c) 1990, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * @(#)filedesc.h 8.1 (Berkeley) 6/2/93
+ * $FreeBSD$
+ */
+
+#ifndef _SYS_FILEDESC_HH_
+#define _SYS_FILEDESC_HH_
+
+#include <rtems/freebsd/sys/queue.h>
+#include <rtems/freebsd/sys/event.h>
+#include <rtems/freebsd/sys/lock.h>
+#include <rtems/freebsd/sys/priority.h>
+#include <rtems/freebsd/sys/sx.h>
+
+#include <rtems/freebsd/machine/_limits.h>
+
+/*
+ * This structure is used for the management of descriptors. It may be
+ * shared by multiple processes.
+ */
+#define NDSLOTTYPE u_long
+
+struct filedesc {
+ struct file **fd_ofiles; /* file structures for open files */
+ char *fd_ofileflags; /* per-process open file flags */
+ struct vnode *fd_cdir; /* current directory */
+ struct vnode *fd_rdir; /* root directory */
+ struct vnode *fd_jdir; /* jail root directory */
+ int fd_nfiles; /* number of open files allocated */
+ NDSLOTTYPE *fd_map; /* bitmap of free fds */
+ int fd_lastfile; /* high-water mark of fd_ofiles */
+ int fd_freefile; /* approx. next free file */
+ u_short fd_cmask; /* mask for file creation */
+ u_short fd_refcnt; /* thread reference count */
+ u_short fd_holdcnt; /* hold count on structure + mutex */
+ struct sx fd_sx; /* protects members of this struct */
+ struct kqlist fd_kqlist; /* list of kqueues on this filedesc */
+ int fd_holdleaderscount; /* block fdfree() for shared close() */
+ int fd_holdleaderswakeup; /* fdfree() needs wakeup */
+};
+
+/*
+ * Structure to keep track of (process leader, struct fildedesc) tuples.
+ * Each process has a pointer to such a structure when detailed tracking
+ * is needed, e.g., when rfork(RFPROC | RFMEM) causes a file descriptor
+ * table to be shared by processes having different "p_leader" pointers
+ * and thus distinct POSIX style locks.
+ *
+ * fdl_refcount and fdl_holdcount are protected by struct filedesc mtx.
+ */
+struct filedesc_to_leader {
+ int fdl_refcount; /* references from struct proc */
+ int fdl_holdcount; /* temporary hold during closef */
+ int fdl_wakeup; /* fdfree() waits on closef() */
+ struct proc *fdl_leader; /* owner of POSIX locks */
+ /* Circular list: */
+ struct filedesc_to_leader *fdl_prev;
+ struct filedesc_to_leader *fdl_next;
+};
+
+/*
+ * Per-process open flags.
+ */
+#define UF_EXCLOSE 0x01 /* auto-close on exec */
+
+#ifdef _KERNEL
+
+/* Lock a file descriptor table. */
+#define FILEDESC_LOCK_INIT(fdp) sx_init(&(fdp)->fd_sx, "filedesc structure")
+#define FILEDESC_LOCK_DESTROY(fdp) sx_destroy(&(fdp)->fd_sx)
+#define FILEDESC_LOCK(fdp) (&(fdp)->fd_sx)
+#define FILEDESC_XLOCK(fdp) sx_xlock(&(fdp)->fd_sx)
+#define FILEDESC_XUNLOCK(fdp) sx_xunlock(&(fdp)->fd_sx)
+#define FILEDESC_SLOCK(fdp) sx_slock(&(fdp)->fd_sx)
+#define FILEDESC_SUNLOCK(fdp) sx_sunlock(&(fdp)->fd_sx)
+
+#define FILEDESC_LOCK_ASSERT(fdp) sx_assert(&(fdp)->fd_sx, SX_LOCKED | \
+ SX_NOTRECURSED)
+#define FILEDESC_XLOCK_ASSERT(fdp) sx_assert(&(fdp)->fd_sx, SX_XLOCKED | \
+ SX_NOTRECURSED)
+
+struct thread;
+
+int closef(struct file *fp, struct thread *td);
+int dupfdopen(struct thread *td, struct filedesc *fdp, int indx, int dfd,
+ int mode, int error);
+int falloc(struct thread *td, struct file **resultfp, int *resultfd);
+int fdalloc(struct thread *td, int minfd, int *result);
+int fdavail(struct thread *td, int n);
+int fdcheckstd(struct thread *td);
+void fdclose(struct filedesc *fdp, struct file *fp, int idx, struct thread *td);
+void fdcloseexec(struct thread *td);
+struct filedesc *fdcopy(struct filedesc *fdp);
+void fdunshare(struct proc *p, struct thread *td);
+void fdfree(struct thread *td);
+struct filedesc *fdinit(struct filedesc *fdp);
+struct filedesc *fdshare(struct filedesc *fdp);
+struct filedesc_to_leader *
+ filedesc_to_leader_alloc(struct filedesc_to_leader *old,
+ struct filedesc *fdp, struct proc *leader);
+int getvnode(struct filedesc *fdp, int fd, struct file **fpp);
+void mountcheckdirs(struct vnode *olddp, struct vnode *newdp);
+void setugidsafety(struct thread *td);
+
+/* Return a referenced file from an unlocked descriptor. */
+struct file *fget_unlocked(struct filedesc *fdp, int fd);
+
+/* Requires a FILEDESC_{S,X}LOCK held and returns without a ref. */
+static __inline struct file *
+fget_locked(struct filedesc *fdp, int fd)
+{
+
+ return (fd < 0 || fd >= fdp->fd_nfiles ? NULL : fdp->fd_ofiles[fd]);
+}
+
+#endif /* _KERNEL */
+
+#endif /* !_SYS_FILEDESC_HH_ */
diff --git a/rtems/freebsd/sys/filio.h b/rtems/freebsd/sys/filio.h
new file mode 100644
index 00000000..3937c1cd
--- /dev/null
+++ b/rtems/freebsd/sys/filio.h
@@ -0,0 +1,64 @@
+/*-
+ * Copyright (c) 1982, 1986, 1990, 1993, 1994
+ * The Regents of the University of California. All rights reserved.
+ * (c) UNIX System Laboratories, Inc.
+ * All or some portions of this file are derived from material licensed
+ * to the University of California by American Telephone and Telegraph
+ * Co. or Unix System Laboratories, Inc. and are reproduced herein with
+ * the permission of UNIX System Laboratories, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * @(#)filio.h 8.1 (Berkeley) 3/28/94
+ * $FreeBSD$
+ */
+
+#ifndef _SYS_FILIO_HH_
+#define _SYS_FILIO_HH_
+
+#include <rtems/freebsd/sys/ioccom.h>
+
+/* Generic file-descriptor ioctl's. */
+#define FIOCLEX _IO('f', 1) /* set close on exec on fd */
+#define FIONCLEX _IO('f', 2) /* remove close on exec */
+#define FIONREAD _IOR('f', 127, int) /* get # bytes to read */
+#define FIONBIO _IOW('f', 126, int) /* set/clear non-blocking i/o */
+#define FIOASYNC _IOW('f', 125, int) /* set/clear async i/o */
+#define FIOSETOWN _IOW('f', 124, int) /* set owner */
+#define FIOGETOWN _IOR('f', 123, int) /* get owner */
+#define FIODTYPE _IOR('f', 122, int) /* get d_flags type part */
+#define FIOGETLBA _IOR('f', 121, int) /* get start blk # */
+struct fiodgname_arg {
+ int len;
+ void *buf;
+};
+#define FIODGNAME _IOW('f', 120, struct fiodgname_arg) /* get dev. name */
+#define FIONWRITE _IOR('f', 119, int) /* get # bytes (yet) to write */
+#define FIONSPACE _IOR('f', 118, int) /* get space in send queue */
+/* Handle lseek SEEK_DATA and SEEK_HOLE for holey file knowledge. */
+#define FIOSEEKDATA _IOWR('f', 97, off_t) /* SEEK_DATA */
+#define FIOSEEKHOLE _IOWR('f', 98, off_t) /* SEEK_HOLE */
+
+#endif /* !_SYS_FILIO_HH_ */
diff --git a/rtems/freebsd/sys/fnv_hash.h b/rtems/freebsd/sys/fnv_hash.h
new file mode 100644
index 00000000..2dbed339
--- /dev/null
+++ b/rtems/freebsd/sys/fnv_hash.h
@@ -0,0 +1,68 @@
+/*-
+ * Fowler / Noll / Vo Hash (FNV Hash)
+ * http://www.isthe.com/chongo/tech/comp/fnv/
+ *
+ * This is an implementation of the algorithms posted above.
+ * This file is placed in the public domain by Peter Wemm.
+ *
+ * $FreeBSD$
+ */
+
+typedef u_int32_t Fnv32_t;
+typedef u_int64_t Fnv64_t;
+
+#define FNV1_32_INIT ((Fnv32_t) 33554467UL)
+#define FNV1_64_INIT ((Fnv64_t) 0xcbf29ce484222325ULL)
+
+#define FNV_32_PRIME ((Fnv32_t) 0x01000193UL)
+#define FNV_64_PRIME ((Fnv64_t) 0x100000001b3ULL)
+
+static __inline Fnv32_t
+fnv_32_buf(const void *buf, size_t len, Fnv32_t hval)
+{
+ const u_int8_t *s = (const u_int8_t *)buf;
+
+ while (len-- != 0) {
+ hval *= FNV_32_PRIME;
+ hval ^= *s++;
+ }
+ return hval;
+}
+
+static __inline Fnv32_t
+fnv_32_str(const char *str, Fnv32_t hval)
+{
+ const u_int8_t *s = (const u_int8_t *)str;
+ Fnv32_t c;
+
+ while ((c = *s++) != 0) {
+ hval *= FNV_32_PRIME;
+ hval ^= c;
+ }
+ return hval;
+}
+
+static __inline Fnv64_t
+fnv_64_buf(const void *buf, size_t len, Fnv64_t hval)
+{
+ const u_int8_t *s = (const u_int8_t *)buf;
+
+ while (len-- != 0) {
+ hval *= FNV_64_PRIME;
+ hval ^= *s++;
+ }
+ return hval;
+}
+
+static __inline Fnv64_t
+fnv_64_str(const char *str, Fnv64_t hval)
+{
+ const u_int8_t *s = (const u_int8_t *)str;
+ u_register_t c; /* 32 bit on i386, 64 bit on alpha,ia64 */
+
+ while ((c = *s++) != 0) {
+ hval *= FNV_64_PRIME;
+ hval ^= c;
+ }
+ return hval;
+}
diff --git a/rtems/freebsd/sys/hash.h b/rtems/freebsd/sys/hash.h
new file mode 100644
index 00000000..fb0f47fb
--- /dev/null
+++ b/rtems/freebsd/sys/hash.h
@@ -0,0 +1,121 @@
+/*-
+ * Copyright (c) 2001 Tobias Weingartner
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * $OpenBSD: hash.h,v 1.4 2004/05/25 18:37:23 jmc Exp $
+ * $FreeBSD$
+ */
+
+#ifndef _SYS_HASH_HH_
+#define _SYS_HASH_HH_
+#include <rtems/freebsd/sys/types.h>
+
+/* Convenience */
+#ifndef HASHINIT
+#define HASHINIT 5381
+#define HASHSTEP(x,c) (((x << 5) + x) + (c))
+#endif
+
+/*
+ * Return a 32-bit hash of the given buffer. The init
+ * value should be 0, or the previous hash value to extend
+ * the previous hash.
+ */
+static __inline uint32_t
+hash32_buf(const void *buf, size_t len, uint32_t hash)
+{
+ const unsigned char *p = buf;
+
+ while (len--)
+ hash = HASHSTEP(hash, *p++);
+
+ return hash;
+}
+
+/*
+ * Return a 32-bit hash of the given string.
+ */
+static __inline uint32_t
+hash32_str(const void *buf, uint32_t hash)
+{
+ const unsigned char *p = buf;
+
+ while (*p)
+ hash = HASHSTEP(hash, *p++);
+
+ return hash;
+}
+
+/*
+ * Return a 32-bit hash of the given string, limited by N.
+ */
+static __inline uint32_t
+hash32_strn(const void *buf, size_t len, uint32_t hash)
+{
+ const unsigned char *p = buf;
+
+ while (*p && len--)
+ hash = HASHSTEP(hash, *p++);
+
+ return hash;
+}
+
+/*
+ * Return a 32-bit hash of the given string terminated by C,
+ * (as well as 0). This is mainly here as a helper for the
+ * namei() hashing of path name parts.
+ */
+static __inline uint32_t
+hash32_stre(const void *buf, int end, const char **ep, uint32_t hash)
+{
+ const unsigned char *p = buf;
+
+ while (*p && (*p != end))
+ hash = HASHSTEP(hash, *p++);
+
+ if (ep)
+ *ep = p;
+
+ return hash;
+}
+
+/*
+ * Return a 32-bit hash of the given string, limited by N,
+ * and terminated by C (as well as 0). This is mainly here
+ * as a helper for the namei() hashing of path name parts.
+ */
+static __inline uint32_t
+hash32_strne(const void *buf, size_t len, int end, const char **ep,
+ uint32_t hash)
+{
+ const unsigned char *p = buf;
+
+ while (*p && (*p != end) && len--)
+ hash = HASHSTEP(hash, *p++);
+
+ if (ep)
+ *ep = p;
+
+ return hash;
+}
+#endif /* !_SYS_HASH_HH_ */
diff --git a/rtems/freebsd/sys/interrupt.h b/rtems/freebsd/sys/interrupt.h
new file mode 100644
index 00000000..87efcc51
--- /dev/null
+++ b/rtems/freebsd/sys/interrupt.h
@@ -0,0 +1,186 @@
+/*-
+ * Copyright (c) 1997, Stefan Esser <se@freebsd.org>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice unmodified, this list of conditions, and the following
+ * disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _SYS_INTERRUPT_HH_
+#define _SYS_INTERRUPT_HH_
+
+#include <rtems/freebsd/sys/_lock.h>
+#include <rtems/freebsd/sys/_mutex.h>
+
+struct intr_event;
+struct intr_thread;
+struct trapframe;
+
+/*
+ * Describe a hardware interrupt handler.
+ *
+ * Multiple interrupt handlers for a specific event can be chained
+ * together.
+ */
+struct intr_handler {
+ driver_filter_t *ih_filter; /* Filter handler function. */
+ driver_intr_t *ih_handler; /* Threaded handler function. */
+ void *ih_argument; /* Argument to pass to handlers. */
+ int ih_flags;
+ char ih_name[MAXCOMLEN + 1]; /* Name of handler. */
+ struct intr_event *ih_event; /* Event we are connected to. */
+ int ih_need; /* Needs service. */
+ TAILQ_ENTRY(intr_handler) ih_next; /* Next handler for this event. */
+ u_char ih_pri; /* Priority of this handler. */
+ struct intr_thread *ih_thread; /* Ithread for filtered handler. */
+};
+
+/* Interrupt handle flags kept in ih_flags */
+#define IH_EXCLUSIVE 0x00000002 /* Exclusive interrupt. */
+#define IH_ENTROPY 0x00000004 /* Device is a good entropy source. */
+#define IH_DEAD 0x00000008 /* Handler should be removed. */
+#define IH_MPSAFE 0x80000000 /* Handler does not need Giant. */
+
+/*
+ * Describe an interrupt event. An event holds a list of handlers.
+ * The 'pre_ithread', 'post_ithread', 'post_filter', and 'assign_cpu'
+ * hooks are used to invoke MD code for certain operations.
+ *
+ * The 'pre_ithread' hook is called when an interrupt thread for
+ * handlers without filters is scheduled. It is responsible for
+ * ensuring that 1) the system won't be swamped with an interrupt
+ * storm from the associated source while the ithread runs and 2) the
+ * current CPU is able to receive interrupts from other interrupt
+ * sources. The first is usually accomplished by disabling
+ * level-triggered interrupts until the ithread completes. The second
+ * is accomplished on some platforms by acknowledging the interrupt
+ * via an EOI.
+ *
+ * The 'post_ithread' hook is invoked when an ithread finishes. It is
+ * responsible for ensuring that the associated interrupt source will
+ * trigger an interrupt when it is asserted in the future. Usually
+ * this is implemented by enabling a level-triggered interrupt that
+ * was previously disabled via the 'pre_ithread' hook.
+ *
+ * The 'post_filter' hook is invoked when a filter handles an
+ * interrupt. It is responsible for ensuring that the current CPU is
+ * able to receive interrupts again. On some platforms this is done
+ * by acknowledging the interrupts via an EOI.
+ *
+ * The 'assign_cpu' hook is used to bind an interrupt source to a
+ * specific CPU. If the interrupt cannot be bound, this function may
+ * return an error.
+ *
+ * Note that device drivers may also use interrupt events to manage
+ * multiplexing interrupt interrupt handler into handlers for child
+ * devices. In that case, the above hooks are not used. The device
+ * can create an event for its interrupt resource and register child
+ * event handlers with that event. It can then use
+ * intr_event_execute_handlers() to execute non-filter handlers.
+ * Currently filter handlers are not supported by this, but that can
+ * be added by splitting out the filter loop from intr_event_handle()
+ * if desired.
+ */
+struct intr_event {
+ TAILQ_ENTRY(intr_event) ie_list;
+ TAILQ_HEAD(, intr_handler) ie_handlers; /* Interrupt handlers. */
+ char ie_name[MAXCOMLEN + 1]; /* Individual event name. */
+ char ie_fullname[MAXCOMLEN + 1];
+ struct mtx ie_lock;
+ void *ie_source; /* Cookie used by MD code. */
+ struct intr_thread *ie_thread; /* Thread we are connected to. */
+ void (*ie_pre_ithread)(void *);
+ void (*ie_post_ithread)(void *);
+ void (*ie_post_filter)(void *);
+ int (*ie_assign_cpu)(void *, u_char);
+ int ie_flags;
+ int ie_count; /* Loop counter. */
+ int ie_warncnt; /* Rate-check interrupt storm warns. */
+ struct timeval ie_warntm;
+ int ie_irq; /* Physical irq number if !SOFT. */
+ u_char ie_cpu; /* CPU this event is bound to. */
+};
+
+/* Interrupt event flags kept in ie_flags. */
+#define IE_SOFT 0x000001 /* Software interrupt. */
+#define IE_ENTROPY 0x000002 /* Interrupt is an entropy source. */
+#define IE_ADDING_THREAD 0x000004 /* Currently building an ithread. */
+
+/* Flags to pass to sched_swi. */
+#define SWI_DELAY 0x2
+
+/*
+ * Software interrupt numbers in priority order. The priority determines
+ * the priority of the corresponding interrupt thread.
+ */
+#define SWI_TTY 0
+#define SWI_NET 1
+#define SWI_CAMBIO 2
+#define SWI_VM 3
+#define SWI_CLOCK 4
+#define SWI_TQ_FAST 5
+#define SWI_TQ 6
+#define SWI_TQ_GIANT 6
+
+struct proc;
+
+extern struct intr_event *tty_intr_event;
+extern struct intr_event *clk_intr_event;
+extern void *softclock_ih;
+extern void *vm_ih;
+
+/* Counts and names for statistics (defined in MD code). */
+extern u_long eintrcnt[]; /* end of intrcnt[] */
+extern char eintrnames[]; /* end of intrnames[] */
+extern u_long intrcnt[]; /* counts for for each device and stray */
+extern char intrnames[]; /* string table containing device names */
+
+#ifdef DDB
+void db_dump_intr_event(struct intr_event *ie, int handlers);
+#endif
+u_char intr_priority(enum intr_type flags);
+int intr_event_add_handler(struct intr_event *ie, const char *name,
+ driver_filter_t filter, driver_intr_t handler, void *arg,
+ u_char pri, enum intr_type flags, void **cookiep);
+int intr_event_bind(struct intr_event *ie, u_char cpu);
+int intr_event_create(struct intr_event **event, void *source,
+ int flags, int irq, void (*pre_ithread)(void *),
+ void (*post_ithread)(void *), void (*post_filter)(void *),
+ int (*assign_cpu)(void *, u_char), const char *fmt, ...)
+ __printflike(9, 10);
+int intr_event_describe_handler(struct intr_event *ie, void *cookie,
+ const char *descr);
+int intr_event_destroy(struct intr_event *ie);
+void intr_event_execute_handlers(struct proc *p, struct intr_event *ie);
+int intr_event_handle(struct intr_event *ie, struct trapframe *frame);
+int intr_event_remove_handler(void *cookie);
+int intr_getaffinity(int irq, void *mask);
+void *intr_handler_source(void *cookie);
+int intr_setaffinity(int irq, void *mask);
+int swi_add(struct intr_event **eventp, const char *name,
+ driver_intr_t handler, void *arg, int pri, enum intr_type flags,
+ void **cookiep);
+void swi_sched(void *cookie, int flags);
+int swi_remove(void *cookie);
+
+#endif
diff --git a/rtems/freebsd/sys/ioccom.h b/rtems/freebsd/sys/ioccom.h
new file mode 100644
index 00000000..67ad9795
--- /dev/null
+++ b/rtems/freebsd/sys/ioccom.h
@@ -0,0 +1,80 @@
+/*-
+ * Copyright (c) 1982, 1986, 1990, 1993, 1994
+ * The Regents of the University of California. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * @(#)ioccom.h 8.2 (Berkeley) 3/28/94
+ * $FreeBSD$
+ */
+
+#ifndef _SYS_IOCCOM_HH_
+#define _SYS_IOCCOM_HH_
+
+/*
+ * Ioctl's have the command encoded in the lower word, and the size of
+ * any in or out parameters in the upper word. The high 3 bits of the
+ * upper word are used to encode the in/out status of the parameter.
+ */
+#define IOCPARM_SHIFT 13 /* number of bits for ioctl size */
+#define IOCPARM_MASK ((1 << IOCPARM_SHIFT) - 1) /* parameter length mask */
+#define IOCPARM_LEN(x) (((x) >> 16) & IOCPARM_MASK)
+#define IOCBASECMD(x) ((x) & ~(IOCPARM_MASK << 16))
+#define IOCGROUP(x) (((x) >> 8) & 0xff)
+
+#define IOCPARM_MAX (1 << IOCPARM_SHIFT) /* max size of ioctl */
+#define IOC_VOID 0x20000000 /* no parameters */
+#define IOC_OUT 0x40000000 /* copy out parameters */
+#define IOC_IN 0x80000000 /* copy in parameters */
+#define IOC_INOUT (IOC_IN|IOC_OUT)
+#define IOC_DIRMASK (IOC_VOID|IOC_OUT|IOC_IN)
+
+#define _IOC(inout,group,num,len) ((unsigned long) \
+ ((inout) | (((len) & IOCPARM_MASK) << 16) | ((group) << 8) | (num)))
+#define _IO(g,n) _IOC(IOC_VOID, (g), (n), 0)
+#define _IOWINT(g,n) _IOC(IOC_VOID, (g), (n), sizeof(int))
+#define _IOR(g,n,t) _IOC(IOC_OUT, (g), (n), sizeof(t))
+#define _IOW(g,n,t) _IOC(IOC_IN, (g), (n), sizeof(t))
+/* this should be _IORW, but stdio got there first */
+#define _IOWR(g,n,t) _IOC(IOC_INOUT, (g), (n), sizeof(t))
+
+#ifdef _KERNEL
+
+#if defined(COMPAT_FREEBSD6) || defined(COMPAT_FREEBSD5) || \
+ defined(COMPAT_FREEBSD4) || defined(COMPAT_43)
+#define IOCPARM_IVAL(x) ((int)(intptr_t)(void *)*(caddr_t *)(void *)(x))
+#endif
+
+#else
+
+#include <rtems/freebsd/sys/cdefs.h>
+
+__BEGIN_DECLS
+int ioctl(int, unsigned long, ...);
+__END_DECLS
+
+#endif
+
+#endif /* !_SYS_IOCCOM_HH_ */
diff --git a/rtems/freebsd/sys/jail.h b/rtems/freebsd/sys/jail.h
new file mode 100644
index 00000000..af690132
--- /dev/null
+++ b/rtems/freebsd/sys/jail.h
@@ -0,0 +1,385 @@
+/*-
+ * Copyright (c) 1999 Poul-Henning Kamp.
+ * Copyright (c) 2009 James Gritton.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _SYS_JAIL_HH_
+#define _SYS_JAIL_HH_
+
+#ifdef _KERNEL
+struct jail_v0 {
+ u_int32_t version;
+ char *path;
+ char *hostname;
+ u_int32_t ip_number;
+};
+#endif
+
+struct jail {
+ uint32_t version;
+ char *path;
+ char *hostname;
+ char *jailname;
+ uint32_t ip4s;
+ uint32_t ip6s;
+ struct in_addr *ip4;
+ struct in6_addr *ip6;
+};
+#define JAIL_API_VERSION 2
+
+/*
+ * For all xprison structs, always keep the pr_version an int and
+ * the first variable so userspace can easily distinguish them.
+ */
+#ifndef _KERNEL
+struct xprison_v1 {
+ int pr_version;
+ int pr_id;
+ char pr_path[MAXPATHLEN];
+ char pr_host[MAXHOSTNAMELEN];
+ u_int32_t pr_ip;
+};
+#endif
+
+struct xprison {
+ int pr_version;
+ int pr_id;
+ int pr_state;
+ cpusetid_t pr_cpusetid;
+ char pr_path[MAXPATHLEN];
+ char pr_host[MAXHOSTNAMELEN];
+ char pr_name[MAXHOSTNAMELEN];
+ uint32_t pr_ip4s;
+ uint32_t pr_ip6s;
+#if 0
+ /*
+ * sizeof(xprison) will be malloced + size needed for all
+ * IPv4 and IPv6 addesses. Offsets are based numbers of addresses.
+ */
+ struct in_addr pr_ip4[];
+ struct in6_addr pr_ip6[];
+#endif
+};
+#define XPRISON_VERSION 3
+
+#define PRISON_STATE_INVALID 0
+#define PRISON_STATE_ALIVE 1
+#define PRISON_STATE_DYING 2
+
+/*
+ * Flags for jail_set and jail_get.
+ */
+#define JAIL_CREATE 0x01 /* Create jail if it doesn't exist */
+#define JAIL_UPDATE 0x02 /* Update parameters of existing jail */
+#define JAIL_ATTACH 0x04 /* Attach to jail upon creation */
+#define JAIL_DYING 0x08 /* Allow getting a dying jail */
+#define JAIL_SET_MASK 0x0f
+#define JAIL_GET_MASK 0x08
+
+#define JAIL_SYS_DISABLE 0
+#define JAIL_SYS_NEW 1
+#define JAIL_SYS_INHERIT 2
+
+#ifndef _KERNEL
+
+struct iovec;
+
+int jail(struct jail *);
+int jail_set(struct iovec *, unsigned int, int);
+int jail_get(struct iovec *, unsigned int, int);
+int jail_attach(int);
+int jail_remove(int);
+
+#else /* _KERNEL */
+
+#include <rtems/freebsd/sys/queue.h>
+#include <rtems/freebsd/sys/sysctl.h>
+#include <rtems/freebsd/sys/lock.h>
+#include <rtems/freebsd/sys/mutex.h>
+#include <rtems/freebsd/sys/_task.h>
+
+#define JAIL_MAX 999999
+
+#ifdef MALLOC_DECLARE
+MALLOC_DECLARE(M_PRISON);
+#endif
+#endif /* _KERNEL */
+
+#if defined(_KERNEL) || defined(_WANT_PRISON)
+
+#include <rtems/freebsd/sys/osd.h>
+
+#define HOSTUUIDLEN 64
+
+/*
+ * This structure describes a prison. It is pointed to by all struct
+ * ucreds's of the inmates. pr_ref keeps track of them and is used to
+ * delete the struture when the last inmate is dead.
+ *
+ * Lock key:
+ * (a) allprison_lock
+ * (p) locked by pr_mtx
+ * (c) set only during creation before the structure is shared, no mutex
+ * required to read
+ * (d) set only during destruction of jail, no mutex needed
+ */
+struct prison {
+ TAILQ_ENTRY(prison) pr_list; /* (a) all prisons */
+ int pr_id; /* (c) prison id */
+ int pr_ref; /* (p) refcount */
+ int pr_uref; /* (p) user (alive) refcount */
+ unsigned pr_flags; /* (p) PR_* flags */
+ LIST_HEAD(, prison) pr_children; /* (a) list of child jails */
+ LIST_ENTRY(prison) pr_sibling; /* (a) next in parent's list */
+ struct prison *pr_parent; /* (c) containing jail */
+ struct mtx pr_mtx;
+ struct task pr_task; /* (d) destroy task */
+ struct osd pr_osd; /* (p) additional data */
+ struct cpuset *pr_cpuset; /* (p) cpuset */
+ struct vnet *pr_vnet; /* (c) network stack */
+ struct vnode *pr_root; /* (c) vnode to rdir */
+ int pr_ip4s; /* (p) number of v4 IPs */
+ int pr_ip6s; /* (p) number of v6 IPs */
+ struct in_addr *pr_ip4; /* (p) v4 IPs of jail */
+ struct in6_addr *pr_ip6; /* (p) v6 IPs of jail */
+ void *pr_sparep[4];
+ int pr_childcount; /* (a) number of child jails */
+ int pr_childmax; /* (p) maximum child jails */
+ unsigned pr_allow; /* (p) PR_ALLOW_* flags */
+ int pr_securelevel; /* (p) securelevel */
+ int pr_enforce_statfs; /* (p) statfs permission */
+ int pr_spare[5];
+ unsigned long pr_hostid; /* (p) jail hostid */
+ char pr_name[MAXHOSTNAMELEN]; /* (p) admin jail name */
+ char pr_path[MAXPATHLEN]; /* (c) chroot path */
+ char pr_hostname[MAXHOSTNAMELEN]; /* (p) jail hostname */
+ char pr_domainname[MAXHOSTNAMELEN]; /* (p) jail domainname */
+ char pr_hostuuid[HOSTUUIDLEN]; /* (p) jail hostuuid */
+};
+#endif /* _KERNEL || _WANT_PRISON */
+
+#ifdef _KERNEL
+/* Flag bits set via options */
+#define PR_PERSIST 0x00000001 /* Can exist without processes */
+#define PR_HOST 0x00000002 /* Virtualize hostname et al */
+#define PR_IP4_USER 0x00000004 /* Restrict IPv4 addresses */
+#define PR_IP6_USER 0x00000008 /* Restrict IPv6 addresses */
+#define PR_VNET 0x00000010 /* Virtual network stack */
+#define PR_IP4_DISABLE 0x00000020 /* Disable IPv4 */
+#define PR_IP6_DISABLE 0x00000040 /* Disable IPv6 */
+#define PR_IP4_SADDRSEL 0x00000080 /* Do IPv4 src addr sel. or use the */
+ /* primary jail address. */
+#define PR_IP6_SADDRSEL 0x00000100 /* Do IPv6 src addr sel. or use the */
+ /* primary jail address. */
+
+/* Internal flag bits */
+#define PR_REMOVE 0x01000000 /* In process of being removed */
+#define PR_IP4 0x02000000 /* IPv4 restricted or disabled */
+ /* by this jail or an ancestor */
+#define PR_IP6 0x04000000 /* IPv6 restricted or disabled */
+ /* by this jail or an ancestor */
+
+/* Flags for pr_allow */
+#define PR_ALLOW_SET_HOSTNAME 0x0001
+#define PR_ALLOW_SYSVIPC 0x0002
+#define PR_ALLOW_RAW_SOCKETS 0x0004
+#define PR_ALLOW_CHFLAGS 0x0008
+#define PR_ALLOW_MOUNT 0x0010
+#define PR_ALLOW_QUOTAS 0x0020
+#define PR_ALLOW_SOCKET_AF 0x0040
+#define PR_ALLOW_ALL 0x007f
+
+/*
+ * OSD methods
+ */
+#define PR_METHOD_CREATE 0
+#define PR_METHOD_GET 1
+#define PR_METHOD_SET 2
+#define PR_METHOD_CHECK 3
+#define PR_METHOD_ATTACH 4
+#define PR_MAXMETHOD 5
+
+/*
+ * Lock/unlock a prison.
+ * XXX These exist not so much for general convenience, but to be useable in
+ * the FOREACH_PRISON_DESCENDANT_LOCKED macro which can't handle them in
+ * non-function form as currently defined.
+ */
+static __inline void
+prison_lock(struct prison *pr)
+{
+
+ mtx_lock(&pr->pr_mtx);
+}
+
+static __inline void
+prison_unlock(struct prison *pr)
+{
+
+ mtx_unlock(&pr->pr_mtx);
+}
+
+/* Traverse a prison's immediate children. */
+#define FOREACH_PRISON_CHILD(ppr, cpr) \
+ LIST_FOREACH(cpr, &(ppr)->pr_children, pr_sibling)
+
+/*
+ * Preorder traversal of all of a prison's descendants.
+ * This ugly loop allows the macro to be followed by a single block
+ * as expected in a looping primitive.
+ */
+#define FOREACH_PRISON_DESCENDANT(ppr, cpr, descend) \
+ for ((cpr) = (ppr), (descend) = 1; \
+ ((cpr) = (((descend) && !LIST_EMPTY(&(cpr)->pr_children)) \
+ ? LIST_FIRST(&(cpr)->pr_children) \
+ : ((cpr) == (ppr) \
+ ? NULL \
+ : (((descend) = LIST_NEXT(cpr, pr_sibling) != NULL) \
+ ? LIST_NEXT(cpr, pr_sibling) \
+ : (cpr)->pr_parent))));) \
+ if (!(descend)) \
+ ; \
+ else
+
+/*
+ * As above, but lock descendants on the way down and unlock on the way up.
+ */
+#define FOREACH_PRISON_DESCENDANT_LOCKED(ppr, cpr, descend) \
+ for ((cpr) = (ppr), (descend) = 1; \
+ ((cpr) = (((descend) && !LIST_EMPTY(&(cpr)->pr_children)) \
+ ? LIST_FIRST(&(cpr)->pr_children) \
+ : ((cpr) == (ppr) \
+ ? NULL \
+ : ((prison_unlock(cpr), \
+ (descend) = LIST_NEXT(cpr, pr_sibling) != NULL) \
+ ? LIST_NEXT(cpr, pr_sibling) \
+ : (cpr)->pr_parent))));) \
+ if ((descend) ? (prison_lock(cpr), 0) : 1) \
+ ; \
+ else
+
+/*
+ * As above, but also keep track of the level descended to.
+ */
+#define FOREACH_PRISON_DESCENDANT_LOCKED_LEVEL(ppr, cpr, descend, level)\
+ for ((cpr) = (ppr), (descend) = 1, (level) = 0; \
+ ((cpr) = (((descend) && !LIST_EMPTY(&(cpr)->pr_children)) \
+ ? (level++, LIST_FIRST(&(cpr)->pr_children)) \
+ : ((cpr) == (ppr) \
+ ? NULL \
+ : ((prison_unlock(cpr), \
+ (descend) = LIST_NEXT(cpr, pr_sibling) != NULL) \
+ ? LIST_NEXT(cpr, pr_sibling) \
+ : (level--, (cpr)->pr_parent)))));) \
+ if ((descend) ? (prison_lock(cpr), 0) : 1) \
+ ; \
+ else
+
+/*
+ * Attributes of the physical system, and the root of the jail tree.
+ */
+extern struct prison prison0;
+
+TAILQ_HEAD(prisonlist, prison);
+extern struct prisonlist allprison;
+extern struct sx allprison_lock;
+
+/*
+ * Sysctls to describe jail parameters.
+ */
+SYSCTL_DECL(_security_jail_param);
+
+#define SYSCTL_JAIL_PARAM(module, param, type, fmt, descr) \
+ SYSCTL_PROC(_security_jail_param ## module, OID_AUTO, param, \
+ (type) | CTLFLAG_MPSAFE, NULL, 0, sysctl_jail_param, fmt, descr)
+#define SYSCTL_JAIL_PARAM_STRING(module, param, access, len, descr) \
+ SYSCTL_PROC(_security_jail_param ## module, OID_AUTO, param, \
+ CTLTYPE_STRING | CTLFLAG_MPSAFE | (access), NULL, len, \
+ sysctl_jail_param, "A", descr)
+#define SYSCTL_JAIL_PARAM_STRUCT(module, param, access, len, fmt, descr)\
+ SYSCTL_PROC(_security_jail_param ## module, OID_AUTO, param, \
+ CTLTYPE_STRUCT | CTLFLAG_MPSAFE | (access), NULL, len, \
+ sysctl_jail_param, fmt, descr)
+#define SYSCTL_JAIL_PARAM_NODE(module, descr) \
+ SYSCTL_NODE(_security_jail_param, OID_AUTO, module, 0, 0, descr)
+#define SYSCTL_JAIL_PARAM_SYS_NODE(module, access, descr) \
+ SYSCTL_JAIL_PARAM_NODE(module, descr); \
+ SYSCTL_JAIL_PARAM(_##module, , CTLTYPE_INT | (access), "E,jailsys", \
+ descr)
+
+/*
+ * Kernel support functions for jail().
+ */
+struct ucred;
+struct mount;
+struct sockaddr;
+struct statfs;
+int jailed(struct ucred *cred);
+int jailed_without_vnet(struct ucred *);
+void getcredhostname(struct ucred *, char *, size_t);
+void getcreddomainname(struct ucred *, char *, size_t);
+void getcredhostuuid(struct ucred *, char *, size_t);
+void getcredhostid(struct ucred *, unsigned long *);
+int prison_allow(struct ucred *, unsigned);
+int prison_check(struct ucred *cred1, struct ucred *cred2);
+int prison_owns_vnet(struct ucred *);
+int prison_canseemount(struct ucred *cred, struct mount *mp);
+void prison_enforce_statfs(struct ucred *cred, struct mount *mp,
+ struct statfs *sp);
+struct prison *prison_find(int prid);
+struct prison *prison_find_child(struct prison *, int);
+struct prison *prison_find_name(struct prison *, const char *);
+int prison_flag(struct ucred *, unsigned);
+void prison_free(struct prison *pr);
+void prison_free_locked(struct prison *pr);
+void prison_hold(struct prison *pr);
+void prison_hold_locked(struct prison *pr);
+void prison_proc_hold(struct prison *);
+void prison_proc_free(struct prison *);
+int prison_ischild(struct prison *, struct prison *);
+int prison_equal_ip4(struct prison *, struct prison *);
+int prison_get_ip4(struct ucred *cred, struct in_addr *ia);
+int prison_local_ip4(struct ucred *cred, struct in_addr *ia);
+int prison_remote_ip4(struct ucred *cred, struct in_addr *ia);
+int prison_check_ip4(struct ucred *cred, struct in_addr *ia);
+int prison_saddrsel_ip4(struct ucred *, struct in_addr *);
+#ifdef INET6
+int prison_equal_ip6(struct prison *, struct prison *);
+int prison_get_ip6(struct ucred *, struct in6_addr *);
+int prison_local_ip6(struct ucred *, struct in6_addr *, int);
+int prison_remote_ip6(struct ucred *, struct in6_addr *);
+int prison_check_ip6(struct ucred *, struct in6_addr *);
+int prison_saddrsel_ip6(struct ucred *, struct in6_addr *);
+#endif
+int prison_check_af(struct ucred *cred, int af);
+int prison_if(struct ucred *cred, struct sockaddr *sa);
+char *prison_name(struct prison *, struct prison *);
+int prison_priv_check(struct ucred *cred, int priv);
+int sysctl_jail_param(struct sysctl_oid *, void *, int , struct sysctl_req *);
+
+#endif /* _KERNEL */
+#endif /* !_SYS_JAIL_HH_ */
diff --git a/rtems/freebsd/sys/kernel.h b/rtems/freebsd/sys/kernel.h
new file mode 100644
index 00000000..70ce1f2c
--- /dev/null
+++ b/rtems/freebsd/sys/kernel.h
@@ -0,0 +1,424 @@
+/*-
+ * Copyright (c) 1995 Terrence R. Lambert
+ * All rights reserved.
+ *
+ * Copyright (c) 1990, 1993
+ * The Regents of the University of California. All rights reserved.
+ * (c) UNIX System Laboratories, Inc.
+ * All or some portions of this file are derived from material licensed
+ * to the University of California by American Telephone and Telegraph
+ * Co. or Unix System Laboratories, Inc. and are reproduced herein with
+ * the permission of UNIX System Laboratories, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the University of
+ * California, Berkeley and its contributors.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * @(#)kernel.h 8.3 (Berkeley) 1/21/94
+ * $FreeBSD$
+ */
+
+#ifndef _SYS_KERNEL_HH_
+#define _SYS_KERNEL_HH_
+
+#include <rtems/freebsd/sys/linker_set.h>
+
+#ifdef _KERNEL
+
+/* for intrhook below */
+#include <rtems/freebsd/sys/queue.h>
+
+/* Global variables for the kernel. */
+
+#ifndef __rtems__
+/* 1.1 */
+extern char kernelname[MAXPATHLEN];
+#endif /* __rtems__ */
+
+extern int tick; /* usec per tick (1000000 / hz) */
+extern int hz; /* system clock's frequency */
+extern int psratio; /* ratio: prof / stat */
+extern int stathz; /* statistics clock's frequency */
+extern int profhz; /* profiling clock's frequency */
+extern int profprocs; /* number of process's profiling */
+#ifndef __rtems__
+extern int ticks;
+#endif /* __rtems__ */
+
+#endif /* _KERNEL */
+
+/*
+ * Enumerated types for known system startup interfaces.
+ *
+ * Startup occurs in ascending numeric order; the list entries are
+ * sorted prior to attempting startup to guarantee order. Items
+ * of the same level are arbitrated for order based on the 'order'
+ * element.
+ *
+ * These numbers are arbitrary and are chosen ONLY for ordering; the
+ * enumeration values are explicit rather than implicit to provide
+ * for binary compatibility with inserted elements.
+ *
+ * The SI_SUB_RUN_SCHEDULER value must have the highest lexical value.
+ *
+ * The SI_SUB_SWAP values represent a value used by
+ * the BSD 4.4Lite but not by FreeBSD; it is maintained in dependent
+ * order to support porting.
+ *
+ * The SI_SUB_PROTO_BEGIN and SI_SUB_PROTO_END bracket a range of
+ * initializations to take place at splimp(). This is a historical
+ * wart that should be removed -- probably running everything at
+ * splimp() until the first init that doesn't want it is the correct
+ * fix. They are currently present to ensure historical behavior.
+ */
+enum sysinit_sub_id {
+ SI_SUB_DUMMY = 0x0000000, /* not executed; for linker*/
+ SI_SUB_DONE = 0x0000001, /* processed*/
+ SI_SUB_TUNABLES = 0x0700000, /* establish tunable values */
+ SI_SUB_COPYRIGHT = 0x0800001, /* first use of console*/
+ SI_SUB_SETTINGS = 0x0880000, /* check and recheck settings */
+ SI_SUB_MTX_POOL_STATIC = 0x0900000, /* static mutex pool */
+ SI_SUB_LOCKMGR = 0x0980000, /* lockmgr locks */
+ SI_SUB_VM = 0x1000000, /* virtual memory system init*/
+ SI_SUB_KMEM = 0x1800000, /* kernel memory*/
+ SI_SUB_KVM_RSRC = 0x1A00000, /* kvm operational limits*/
+ SI_SUB_WITNESS = 0x1A80000, /* witness initialization */
+ SI_SUB_MTX_POOL_DYNAMIC = 0x1AC0000, /* dynamic mutex pool */
+ SI_SUB_LOCK = 0x1B00000, /* various locks */
+ SI_SUB_EVENTHANDLER = 0x1C00000, /* eventhandler init */
+ SI_SUB_VNET_PRELINK = 0x1E00000, /* vnet init before modules */
+ SI_SUB_KLD = 0x2000000, /* KLD and module setup */
+ SI_SUB_CPU = 0x2100000, /* CPU resource(s)*/
+ SI_SUB_RANDOM = 0x2120000, /* random number generator */
+ SI_SUB_KDTRACE = 0x2140000, /* Kernel dtrace hooks */
+ SI_SUB_MAC = 0x2180000, /* TrustedBSD MAC subsystem */
+ SI_SUB_MAC_POLICY = 0x21C0000, /* TrustedBSD MAC policies */
+ SI_SUB_MAC_LATE = 0x21D0000, /* TrustedBSD MAC subsystem */
+ SI_SUB_VNET = 0x21E0000, /* vnet 0 */
+ SI_SUB_INTRINSIC = 0x2200000, /* proc 0*/
+ SI_SUB_VM_CONF = 0x2300000, /* config VM, set limits*/
+ SI_SUB_DDB_SERVICES = 0x2380000, /* capture, scripting, etc. */
+ SI_SUB_RUN_QUEUE = 0x2400000, /* set up run queue*/
+ SI_SUB_KTRACE = 0x2480000, /* ktrace */
+ SI_SUB_OPENSOLARIS = 0x2490000, /* OpenSolaris compatibility */
+ SI_SUB_CYCLIC = 0x24A0000, /* Cyclic timers */
+ SI_SUB_AUDIT = 0x24C0000, /* audit */
+ SI_SUB_CREATE_INIT = 0x2500000, /* create init process*/
+ SI_SUB_SCHED_IDLE = 0x2600000, /* required idle procs */
+ SI_SUB_MBUF = 0x2700000, /* mbuf subsystem */
+ SI_SUB_INTR = 0x2800000, /* interrupt threads */
+ SI_SUB_SOFTINTR = 0x2800001, /* start soft interrupt thread */
+ SI_SUB_ACL = 0x2900000, /* start for filesystem ACLs */
+ SI_SUB_DEVFS = 0x2F00000, /* devfs ready for devices */
+ SI_SUB_INIT_IF = 0x3000000, /* prep for net interfaces */
+ SI_SUB_NETGRAPH = 0x3010000, /* Let Netgraph initialize */
+ SI_SUB_DTRACE = 0x3020000, /* DTrace subsystem */
+ SI_SUB_DTRACE_PROVIDER = 0x3048000, /* DTrace providers */
+ SI_SUB_DTRACE_ANON = 0x308C000, /* DTrace anon enabling */
+ SI_SUB_DRIVERS = 0x3100000, /* Let Drivers initialize */
+ SI_SUB_CONFIGURE = 0x3800000, /* Configure devices */
+ SI_SUB_VFS = 0x4000000, /* virtual filesystem*/
+ SI_SUB_CLOCKS = 0x4800000, /* real time and stat clocks*/
+ SI_SUB_CLIST = 0x5800000, /* clists*/
+ SI_SUB_SYSV_SHM = 0x6400000, /* System V shared memory*/
+ SI_SUB_SYSV_SEM = 0x6800000, /* System V semaphores*/
+ SI_SUB_SYSV_MSG = 0x6C00000, /* System V message queues*/
+ SI_SUB_P1003_1B = 0x6E00000, /* P1003.1B realtime */
+ SI_SUB_PSEUDO = 0x7000000, /* pseudo devices*/
+ SI_SUB_EXEC = 0x7400000, /* execve() handlers */
+ SI_SUB_PROTO_BEGIN = 0x8000000, /* XXX: set splimp (kludge)*/
+ SI_SUB_PROTO_IF = 0x8400000, /* interfaces*/
+ SI_SUB_PROTO_DOMAININIT = 0x8600000, /* domain registration system */
+ SI_SUB_PROTO_DOMAIN = 0x8800000, /* domains (address families?)*/
+ SI_SUB_PROTO_IFATTACHDOMAIN = 0x8800001, /* domain dependent data init*/
+ SI_SUB_PROTO_END = 0x8ffffff, /* XXX: set splx (kludge)*/
+ SI_SUB_KPROF = 0x9000000, /* kernel profiling*/
+ SI_SUB_KICK_SCHEDULER = 0xa000000, /* start the timeout events*/
+ SI_SUB_INT_CONFIG_HOOKS = 0xa800000, /* Interrupts enabled config */
+ SI_SUB_ROOT_CONF = 0xb000000, /* Find root devices */
+ SI_SUB_DUMP_CONF = 0xb200000, /* Find dump devices */
+ SI_SUB_RAID = 0xb380000, /* Configure GEOM classes */
+ SI_SUB_SWAP = 0xc000000, /* swap */
+ SI_SUB_INTRINSIC_POST = 0xd000000, /* proc 0 cleanup*/
+ SI_SUB_SYSCALLS = 0xd800000, /* register system calls */
+ SI_SUB_VNET_DONE = 0xdc00000, /* vnet registration complete */
+ SI_SUB_KTHREAD_INIT = 0xe000000, /* init process*/
+ SI_SUB_KTHREAD_PAGE = 0xe400000, /* pageout daemon*/
+ SI_SUB_KTHREAD_VM = 0xe800000, /* vm daemon*/
+ SI_SUB_KTHREAD_BUF = 0xea00000, /* buffer daemon*/
+ SI_SUB_KTHREAD_UPDATE = 0xec00000, /* update daemon*/
+ SI_SUB_KTHREAD_IDLE = 0xee00000, /* idle procs*/
+ SI_SUB_SMP = 0xf000000, /* start the APs*/
+ SI_SUB_RUN_SCHEDULER = 0xfffffff /* scheduler*/
+};
+
+
+/*
+ * Some enumerated orders; "ANY" sorts last.
+ */
+enum sysinit_elem_order {
+ SI_ORDER_FIRST = 0x0000000, /* first*/
+ SI_ORDER_SECOND = 0x0000001, /* second*/
+ SI_ORDER_THIRD = 0x0000002, /* third*/
+ SI_ORDER_FOURTH = 0x0000003, /* fourth*/
+ SI_ORDER_MIDDLE = 0x1000000, /* somewhere in the middle */
+ SI_ORDER_ANY = 0xfffffff /* last*/
+};
+
+
+/*
+ * A system initialization call instance
+ *
+ * At the moment there is one instance of sysinit. We probably do not
+ * want two which is why this code is if'd out, but we definitely want
+ * to discern SYSINIT's which take non-constant data pointers and
+ * SYSINIT's which take constant data pointers,
+ *
+ * The C_* macros take functions expecting const void * arguments
+ * while the non-C_* macros take functions expecting just void * arguments.
+ *
+ * With -Wcast-qual on, the compiler issues warnings:
+ * - if we pass non-const data or functions taking non-const data
+ * to a C_* macro.
+ *
+ * - if we pass const data to the normal macros
+ *
+ * However, no warning is issued if we pass a function taking const data
+ * through a normal non-const macro. This is ok because the function is
+ * saying it won't modify the data so we don't care whether the data is
+ * modifiable or not.
+ */
+
+typedef void (*sysinit_nfunc_t)(void *);
+typedef void (*sysinit_cfunc_t)(const void *);
+
+struct sysinit {
+ enum sysinit_sub_id subsystem; /* subsystem identifier*/
+ enum sysinit_elem_order order; /* init order within subsystem*/
+ sysinit_cfunc_t func; /* function */
+ const void *udata; /* multiplexer/argument */
+};
+
+/*
+ * Default: no special processing
+ *
+ * The C_ version of SYSINIT is for data pointers to const
+ * data ( and functions taking data pointers to const data ).
+ * At the moment it is no different from SYSINIT and thus
+ * still results in warnings.
+ *
+ * The casts are necessary to have the compiler produce the
+ * correct warnings when -Wcast-qual is used.
+ *
+ */
+#ifndef __rtems__
+#define C_SYSINIT(uniquifier, subsystem, order, func, ident) \
+ static struct sysinit uniquifier ## _sys_init = { \
+ subsystem, \
+ order, \
+ func, \
+ (ident) \
+ }; \
+ DATA_SET(sysinit_set,uniquifier ## _sys_init)
+#else /* __rtems__ */
+#define SYSINIT_ENTRY_NAME(uniquifier) \
+ _bsd_ ## uniquifier ## _sys_init
+#define SYSINIT_REFERENCE_NAME(uniquifier) \
+ _bsd_ ## uniquifier ## _sys_init_ref
+#define C_SYSINIT(uniquifier, subsystem, order, func, ident) \
+ struct sysinit SYSINIT_ENTRY_NAME(uniquifier) = { \
+ subsystem, \
+ order, \
+ func, \
+ (ident) \
+ }; \
+ DATA_SET(sysinit_set,SYSINIT_ENTRY_NAME(uniquifier))
+#define SYSINIT_REFERENCE(uniquifier) \
+ extern struct sysinit SYSINIT_ENTRY_NAME(uniquifier); \
+ static struct sysinit const * const \
+ SYSINIT_REFERENCE_NAME(uniquifier) __used \
+ = &SYSINIT_ENTRY_NAME(uniquifier)
+#define SYSINIT_MODULE_REFERENCE(mod) \
+ SYSINIT_REFERENCE(mod ## module)
+#define SYSINIT_DRIVER_REFERENCE(driver, bus) \
+ SYSINIT_MODULE_REFERENCE(driver ## _ ## bus)
+#endif /* __rtems__ */
+
+#define SYSINIT(uniquifier, subsystem, order, func, ident) \
+ C_SYSINIT(uniquifier, subsystem, order, \
+ (sysinit_cfunc_t)(sysinit_nfunc_t)func, (void *)(ident))
+
+/*
+ * Called on module unload: no special processing
+ */
+#ifndef __rtems__
+#define C_SYSUNINIT(uniquifier, subsystem, order, func, ident) \
+ static struct sysinit uniquifier ## _sys_uninit = { \
+ subsystem, \
+ order, \
+ func, \
+ (ident) \
+ }; \
+ DATA_SET(sysuninit_set,uniquifier ## _sys_uninit)
+#else /* __rtems__ */
+#define C_SYSUNINIT(uniquifier, subsystem, order, func, ident)
+#endif /* __rtems__ */
+
+#define SYSUNINIT(uniquifier, subsystem, order, func, ident) \
+ C_SYSUNINIT(uniquifier, subsystem, order, \
+ (sysinit_cfunc_t)(sysinit_nfunc_t)func, (void *)(ident))
+
+void sysinit_add(struct sysinit **set, struct sysinit **set_end);
+
+#ifndef __rtems__
+/*
+ * Infrastructure for tunable 'constants'. Value may be specified at compile
+ * time or kernel load time. Rules relating tunables together can be placed
+ * in a SYSINIT function at SI_SUB_TUNABLES with SI_ORDER_LAST.
+ *
+ * WARNING: developers should never use the reserved suffixes specified in
+ * loader.conf(5) for any tunables or conflicts will result.
+ */
+
+/*
+ * int
+ * please avoid using for new tunables!
+ */
+extern void tunable_int_init(void *);
+struct tunable_int {
+ const char *path;
+ int *var;
+};
+#define TUNABLE_INT(path, var) \
+ static struct tunable_int __CONCAT(__tunable_int_, __LINE__) = { \
+ (path), \
+ (var), \
+ }; \
+ SYSINIT(__CONCAT(__Tunable_init_, __LINE__), \
+ SI_SUB_TUNABLES, SI_ORDER_MIDDLE, tunable_int_init, \
+ &__CONCAT(__tunable_int_, __LINE__))
+
+#define TUNABLE_INT_FETCH(path, var) getenv_int((path), (var))
+
+/*
+ * long
+ */
+extern void tunable_long_init(void *);
+struct tunable_long {
+ const char *path;
+ long *var;
+};
+#define TUNABLE_LONG(path, var) \
+ static struct tunable_long __CONCAT(__tunable_long_, __LINE__) = { \
+ (path), \
+ (var), \
+ }; \
+ SYSINIT(__CONCAT(__Tunable_init_, __LINE__), \
+ SI_SUB_TUNABLES, SI_ORDER_MIDDLE, tunable_long_init,\
+ &__CONCAT(__tunable_long_, __LINE__))
+
+#define TUNABLE_LONG_FETCH(path, var) getenv_long((path), (var))
+
+/*
+ * unsigned long
+ */
+extern void tunable_ulong_init(void *);
+struct tunable_ulong {
+ const char *path;
+ unsigned long *var;
+};
+#define TUNABLE_ULONG(path, var) \
+ static struct tunable_ulong __CONCAT(__tunable_ulong_, __LINE__) = { \
+ (path), \
+ (var), \
+ }; \
+ SYSINIT(__CONCAT(__Tunable_init_, __LINE__), \
+ SI_SUB_TUNABLES, SI_ORDER_MIDDLE, tunable_ulong_init, \
+ &__CONCAT(__tunable_ulong_, __LINE__))
+
+#define TUNABLE_ULONG_FETCH(path, var) getenv_ulong((path), (var))
+
+/*
+ * quad
+ */
+extern void tunable_quad_init(void *);
+struct tunable_quad {
+ const char *path;
+ quad_t *var;
+};
+#define TUNABLE_QUAD(path, var) \
+ static struct tunable_quad __CONCAT(__tunable_quad_, __LINE__) = { \
+ (path), \
+ (var), \
+ }; \
+ SYSINIT(__CONCAT(__Tunable_init_, __LINE__), \
+ SI_SUB_TUNABLES, SI_ORDER_MIDDLE, tunable_quad_init, \
+ &__CONCAT(__tunable_quad_, __LINE__))
+
+#define TUNABLE_QUAD_FETCH(path, var) getenv_quad((path), (var))
+
+extern void tunable_str_init(void *);
+struct tunable_str {
+ const char *path;
+ char *var;
+ int size;
+};
+#define TUNABLE_STR(path, var, size) \
+ static struct tunable_str __CONCAT(__tunable_str_, __LINE__) = { \
+ (path), \
+ (var), \
+ (size), \
+ }; \
+ SYSINIT(__CONCAT(__Tunable_init_, __LINE__), \
+ SI_SUB_TUNABLES, SI_ORDER_MIDDLE, tunable_str_init, \
+ &__CONCAT(__tunable_str_, __LINE__))
+
+#define TUNABLE_STR_FETCH(path, var, size) \
+ getenv_string((path), (var), (size))
+#else /* __rtems__ */
+#define TUNABLE_INT(path, var)
+#define TUNABLE_INT_FETCH(path, var)
+#define TUNABLE_LONG(path, var)
+#define TUNABLE_LONG_FETCH(path, var)
+#define TUNABLE_ULONG(path, var)
+#define TUNABLE_ULONG_FETCH(path, var)
+#define TUNABLE_QUAD(path, var)
+#define TUNABLE_QUAD_FETCH(path, var)
+#define TUNABLE_STR(path, var, size)
+#define TUNABLE_STR_FETCH(path, var, size)
+#endif /* __rtems__ */
+
+struct intr_config_hook {
+ TAILQ_ENTRY(intr_config_hook) ich_links;
+ void (*ich_func)(void *arg);
+ void *ich_arg;
+};
+
+int config_intrhook_establish(struct intr_config_hook *hook);
+void config_intrhook_disestablish(struct intr_config_hook *hook);
+
+#endif /* !_SYS_KERNEL_HH_*/
diff --git a/rtems/freebsd/sys/kobj.h b/rtems/freebsd/sys/kobj.h
new file mode 100644
index 00000000..385159aa
--- /dev/null
+++ b/rtems/freebsd/sys/kobj.h
@@ -0,0 +1,257 @@
+/*-
+ * Copyright (c) 2000,2003 Doug Rabson
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _SYS_KOBJ_HH_
+#define _SYS_KOBJ_HH_
+
+/*
+ * Forward declarations
+ */
+typedef struct kobj *kobj_t;
+typedef struct kobj_class *kobj_class_t;
+typedef struct kobj_method kobj_method_t;
+typedef int (*kobjop_t)(void);
+typedef struct kobj_ops *kobj_ops_t;
+typedef struct kobjop_desc *kobjop_desc_t;
+struct malloc_type;
+
+struct kobj_method {
+ kobjop_desc_t desc;
+ kobjop_t func;
+};
+
+/*
+ * A class is simply a method table and a sizeof value. When the first
+ * instance of the class is created, the method table will be compiled
+ * into a form more suited to efficient method dispatch. This compiled
+ * method table is always the first field of the object.
+ */
+#define KOBJ_CLASS_FIELDS \
+ const char *name; /* class name */ \
+ kobj_method_t *methods; /* method table */ \
+ size_t size; /* object size */ \
+ kobj_class_t *baseclasses; /* base classes */ \
+ u_int refs; /* reference count */ \
+ kobj_ops_t ops /* compiled method table */
+
+struct kobj_class {
+ KOBJ_CLASS_FIELDS;
+};
+
+/*
+ * Implementation of kobj.
+ */
+#define KOBJ_FIELDS \
+ kobj_ops_t ops
+
+struct kobj {
+ KOBJ_FIELDS;
+};
+
+/*
+ * The ops table is used as a cache of results from kobj_lookup_method().
+ */
+
+#define KOBJ_CACHE_SIZE 256
+
+struct kobj_ops {
+ kobj_method_t *cache[KOBJ_CACHE_SIZE];
+ kobj_class_t cls;
+};
+
+struct kobjop_desc {
+ unsigned int id; /* unique ID */
+ kobj_method_t *deflt; /* default implementation */
+};
+
+/*
+ * Shorthand for constructing method tables.
+ * The ternary operator is (ab)used to provoke a warning when FUNC
+ * has a signature that is not compatible with kobj method signature.
+ */
+#define KOBJMETHOD(NAME, FUNC) \
+ { &NAME##_desc, (kobjop_t) (1 ? FUNC : (NAME##_t *)NULL) }
+
+/*
+ *
+ */
+#define KOBJMETHOD_END { NULL, NULL }
+
+/*
+ * Declare a class (which should be defined in another file.
+ */
+#define DECLARE_CLASS(name) extern struct kobj_class name
+
+/*
+ * Define a class with no base classes (api backward-compatible. with
+ * FreeBSD-5.1 and earlier).
+ */
+#define DEFINE_CLASS(name, methods, size) \
+DEFINE_CLASS_0(name, name ## _class, methods, size)
+
+/*
+ * Define a class with no base classes. Use like this:
+ *
+ * DEFINE_CLASS_0(foo, foo_class, foo_methods, sizeof(foo_softc));
+ */
+#define DEFINE_CLASS_0(name, classvar, methods, size) \
+ \
+struct kobj_class classvar = { \
+ #name, methods, size, NULL \
+}
+
+/*
+ * Define a class inheriting a single base class. Use like this:
+ *
+ * DEFINE_CLASS_1(foo, foo_class, foo_methods, sizeof(foo_softc),
+ * bar);
+ */
+#define DEFINE_CLASS_1(name, classvar, methods, size, \
+ base1) \
+ \
+static kobj_class_t name ## _baseclasses[] = \
+ { &base1, NULL }; \
+struct kobj_class classvar = { \
+ #name, methods, size, name ## _baseclasses \
+}
+
+/*
+ * Define a class inheriting two base classes. Use like this:
+ *
+ * DEFINE_CLASS_2(foo, foo_class, foo_methods, sizeof(foo_softc),
+ * bar, baz);
+ */
+#define DEFINE_CLASS_2(name, methods, size, \
+ base1, base2) \
+ \
+static kobj_class_t name ## _baseclasses[] = \
+ { &base1, \
+ &base2, NULL }; \
+struct kobj_class name ## _class = { \
+ #name, methods, size, name ## _baseclasses \
+}
+
+/*
+ * Define a class inheriting three base classes. Use like this:
+ *
+ * DEFINE_CLASS_3(foo, foo_class, foo_methods, sizeof(foo_softc),
+ * bar, baz, foobar);
+ */
+#define DEFINE_CLASS_3(name, methods, size, \
+ base1, base2, base3) \
+ \
+static kobj_class_t name ## _baseclasses[] = \
+ { &base1, \
+ &base2, \
+ &base3, NULL }; \
+struct kobj_class name ## _class = { \
+ #name, methods, size, name ## _baseclasses \
+}
+
+
+/*
+ * Compile the method table in a class.
+ */
+void kobj_class_compile(kobj_class_t cls);
+
+/*
+ * Compile the method table, with the caller providing the space for
+ * the ops table.(for use before malloc is initialised).
+ */
+void kobj_class_compile_static(kobj_class_t cls, kobj_ops_t ops);
+
+/*
+ * Free the compiled method table in a class.
+ */
+void kobj_class_free(kobj_class_t cls);
+
+/*
+ * Allocate memory for and initialise a new object.
+ */
+kobj_t kobj_create(kobj_class_t cls,
+ struct malloc_type *mtype,
+ int mflags);
+
+/*
+ * Initialise a pre-allocated object.
+ */
+void kobj_init(kobj_t obj, kobj_class_t cls);
+
+/*
+ * Delete an object. If mtype is non-zero, free the memory.
+ */
+void kobj_delete(kobj_t obj, struct malloc_type *mtype);
+
+/*
+ * Maintain stats on hits/misses in lookup caches.
+ */
+#ifdef KOBJ_STATS
+extern u_int kobj_lookup_hits;
+extern u_int kobj_lookup_misses;
+#endif
+
+/*
+ * Lookup the method in the cache and if it isn't there look it up the
+ * slow way.
+ */
+#ifdef KOBJ_STATS
+#define KOBJOPLOOKUP(OPS,OP) do { \
+ kobjop_desc_t _desc = &OP##_##desc; \
+ kobj_method_t **_cep = \
+ &OPS->cache[_desc->id & (KOBJ_CACHE_SIZE-1)]; \
+ kobj_method_t *_ce = *_cep; \
+ kobj_lookup_hits++; /* assume hit */ \
+ if (_ce->desc != _desc) \
+ _ce = kobj_lookup_method(OPS->cls, \
+ _cep, _desc); \
+ _m = _ce->func; \
+} while(0)
+#else
+#define KOBJOPLOOKUP(OPS,OP) do { \
+ kobjop_desc_t _desc = &OP##_##desc; \
+ kobj_method_t **_cep = \
+ &OPS->cache[_desc->id & (KOBJ_CACHE_SIZE-1)]; \
+ kobj_method_t *_ce = *_cep; \
+ if (_ce->desc != _desc) \
+ _ce = kobj_lookup_method(OPS->cls, \
+ _cep, _desc); \
+ _m = _ce->func; \
+} while(0)
+#endif
+
+kobj_method_t* kobj_lookup_method(kobj_class_t cls,
+ kobj_method_t **cep,
+ kobjop_desc_t desc);
+
+
+/*
+ * Default method implementation. Returns ENXIO.
+ */
+int kobj_error_method(void);
+
+#endif /* !_SYS_KOBJ_HH_ */
diff --git a/rtems/freebsd/sys/kthread.h b/rtems/freebsd/sys/kthread.h
new file mode 100644
index 00000000..2814bcdc
--- /dev/null
+++ b/rtems/freebsd/sys/kthread.h
@@ -0,0 +1,79 @@
+/*-
+ * Copyright (c) 1999 Peter Wemm <peter@FreeBSD.org>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _SYS_KTHREAD_HH_
+#define _SYS_KTHREAD_HH_
+
+#include <rtems/freebsd/sys/cdefs.h>
+
+/*-
+ * A kernel process descriptor; used to start "internal" daemons.
+ *
+ * Note: global_procpp may be NULL for no global save area.
+ */
+struct kproc_desc {
+ char *arg0; /* arg 0 (for 'ps' listing) */
+ void (*func)(void); /* "main" for kernel process */
+ struct proc **global_procpp; /* ptr to proc ptr save area */
+};
+
+ /* A kernel thread descriptor; used to start "internal" daemons. */
+struct kthread_desc {
+ char *arg0; /* arg 0 (for 'ps' listing) */
+ void (*func)(void); /* "main" for kernel thread */
+ struct thread **global_threadpp; /* ptr to thread ptr save area */
+};
+
+int kproc_create(void (*)(void *), void *, struct proc **,
+ int flags, int pages, const char *, ...) __printflike(6, 7);
+void kproc_exit(int) __dead2;
+int kproc_resume(struct proc *);
+void kproc_shutdown(void *, int);
+void kproc_start(const void *);
+int kproc_suspend(struct proc *, int);
+void kproc_suspend_check(struct proc *);
+
+/* create a thread inthe given process. create the process if needed */
+int kproc_kthread_add(void (*)(void *), void *,
+ struct proc **,
+ struct thread **,
+ int flags, int pages,
+ const char *procname, const char *, ...) __printflike(8, 9);
+
+int kthread_add(void (*)(void *), void *,
+ struct proc *, struct thread **,
+ int flags, int pages, const char *, ...) __printflike(7, 8);
+void kthread_exit(void) __dead2;
+int kthread_resume(struct thread *);
+void kthread_shutdown(void *, int);
+void kthread_start(const void *);
+int kthread_suspend(struct thread *, int);
+void kthread_suspend_check(struct thread *);
+
+
+#endif /* !_SYS_KTHREAD_HH_ */
diff --git a/rtems/freebsd/sys/ktr.h b/rtems/freebsd/sys/ktr.h
new file mode 100644
index 00000000..3942b92e
--- /dev/null
+++ b/rtems/freebsd/sys/ktr.h
@@ -0,0 +1,282 @@
+/*-
+ * Copyright (c) 1996 Berkeley Software Design, Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Berkeley Software Design Inc's name may not be used to endorse or
+ * promote products derived from this software without specific prior
+ * written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY BERKELEY SOFTWARE DESIGN INC ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL BERKELEY SOFTWARE DESIGN INC BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * from BSDI $Id: ktr.h,v 1.10.2.7 2000/03/16 21:44:42 cp Exp $
+ * $FreeBSD$
+ */
+
+/*
+ * Wraparound kernel trace buffer support.
+ */
+
+#ifndef _SYS_KTR_HH_
+#define _SYS_KTR_HH_
+
+/*
+ * Trace classes
+ *
+ * Two of the trace classes (KTR_DEV and KTR_SUBSYS) are special in that
+ * they are really placeholders so that indvidual drivers and subsystems
+ * can map their internal tracing to the general class when they wish to
+ * have tracing enabled and map it to 0 when they don't.
+ */
+#define KTR_GEN 0x00000001 /* General (TR) */
+#define KTR_NET 0x00000002 /* Network */
+#define KTR_DEV 0x00000004 /* Device driver */
+#define KTR_LOCK 0x00000008 /* MP locking */
+#define KTR_SMP 0x00000010 /* MP general */
+#define KTR_SUBSYS 0x00000020 /* Subsystem. */
+#define KTR_PMAP 0x00000040 /* Pmap tracing */
+#define KTR_MALLOC 0x00000080 /* Malloc tracing */
+#define KTR_TRAP 0x00000100 /* Trap processing */
+#define KTR_INTR 0x00000200 /* Interrupt tracing */
+#define KTR_SIG 0x00000400 /* Signal processing */
+#define KTR_SPARE2 0x00000800 /* XXX Used by cxgb */
+#define KTR_PROC 0x00001000 /* Process scheduling */
+#define KTR_SYSC 0x00002000 /* System call */
+#define KTR_INIT 0x00004000 /* System initialization */
+#define KTR_SPARE3 0x00008000 /* XXX Used by cxgb */
+#define KTR_SPARE4 0x00010000 /* XXX Used by cxgb */
+#define KTR_EVH 0x00020000 /* Eventhandler */
+#define KTR_VFS 0x00040000 /* VFS events */
+#define KTR_VOP 0x00080000 /* Auto-generated vop events */
+#define KTR_VM 0x00100000 /* The virtual memory system */
+#define KTR_INET 0x00200000 /* IPv4 stack */
+#define KTR_RUNQ 0x00400000 /* Run queue */
+#define KTR_CONTENTION 0x00800000 /* Lock contention */
+#define KTR_UMA 0x01000000 /* UMA slab allocator */
+#define KTR_CALLOUT 0x02000000 /* Callouts and timeouts */
+#define KTR_GEOM 0x04000000 /* GEOM I/O events */
+#define KTR_BUSDMA 0x08000000 /* busdma(9) events */
+#define KTR_INET6 0x10000000 /* IPv6 stack */
+#define KTR_SCHED 0x20000000 /* Machine parsed sched info. */
+#define KTR_BUF 0x40000000 /* Buffer cache */
+#define KTR_ALL 0x7fffffff
+
+/*
+ * Trace classes which can be assigned to particular use at compile time
+ * These must remain in high 22 as some assembly code counts on it
+ */
+#define KTR_CT1 0x01000000
+#define KTR_CT2 0x02000000
+#define KTR_CT3 0x04000000
+#define KTR_CT4 0x08000000
+#define KTR_CT5 0x10000000
+#define KTR_CT6 0x20000000
+#define KTR_CT7 0x40000000
+#define KTR_CT8 0x80000000
+
+/* Trace classes to compile in */
+#ifdef KTR
+#ifndef KTR_COMPILE
+#define KTR_COMPILE (KTR_ALL)
+#endif
+#else /* !KTR */
+#undef KTR_COMPILE
+#define KTR_COMPILE 0
+#endif /* KTR */
+
+/*
+ * Version number for ktr_entry struct. Increment this when you break binary
+ * compatibility.
+ */
+#define KTR_VERSION 2
+
+#define KTR_PARMS 6
+
+#ifndef LOCORE
+
+struct ktr_entry {
+ u_int64_t ktr_timestamp;
+ int ktr_cpu;
+ int ktr_line;
+ const char *ktr_file;
+ const char *ktr_desc;
+ struct thread *ktr_thread;
+ u_long ktr_parms[KTR_PARMS];
+};
+
+extern int ktr_cpumask;
+extern int ktr_mask;
+extern int ktr_entries;
+extern int ktr_verbose;
+
+extern volatile int ktr_idx;
+extern struct ktr_entry ktr_buf[];
+
+#ifdef KTR
+
+void ktr_tracepoint(u_int mask, const char *file, int line,
+ const char *format, u_long arg1, u_long arg2, u_long arg3,
+ u_long arg4, u_long arg5, u_long arg6);
+
+#define CTR6(m, format, p1, p2, p3, p4, p5, p6) do { \
+ if (KTR_COMPILE & (m)) \
+ ktr_tracepoint((m), __FILE__, __LINE__, format, \
+ (u_long)(p1), (u_long)(p2), (u_long)(p3), \
+ (u_long)(p4), (u_long)(p5), (u_long)(p6)); \
+ } while(0)
+#define CTR0(m, format) CTR6(m, format, 0, 0, 0, 0, 0, 0)
+#define CTR1(m, format, p1) CTR6(m, format, p1, 0, 0, 0, 0, 0)
+#define CTR2(m, format, p1, p2) CTR6(m, format, p1, p2, 0, 0, 0, 0)
+#define CTR3(m, format, p1, p2, p3) CTR6(m, format, p1, p2, p3, 0, 0, 0)
+#define CTR4(m, format, p1, p2, p3, p4) CTR6(m, format, p1, p2, p3, p4, 0, 0)
+#define CTR5(m, format, p1, p2, p3, p4, p5) CTR6(m, format, p1, p2, p3, p4, p5, 0)
+#else /* KTR */
+#define CTR0(m, d) (void)0
+#define CTR1(m, d, p1) (void)0
+#define CTR2(m, d, p1, p2) (void)0
+#define CTR3(m, d, p1, p2, p3) (void)0
+#define CTR4(m, d, p1, p2, p3, p4) (void)0
+#define CTR5(m, d, p1, p2, p3, p4, p5) (void)0
+#define CTR6(m, d, p1, p2, p3, p4, p5, p6) (void)0
+#endif /* KTR */
+
+#define TR0(d) CTR0(KTR_GEN, d)
+#define TR1(d, p1) CTR1(KTR_GEN, d, p1)
+#define TR2(d, p1, p2) CTR2(KTR_GEN, d, p1, p2)
+#define TR3(d, p1, p2, p3) CTR3(KTR_GEN, d, p1, p2, p3)
+#define TR4(d, p1, p2, p3, p4) CTR4(KTR_GEN, d, p1, p2, p3, p4)
+#define TR5(d, p1, p2, p3, p4, p5) CTR5(KTR_GEN, d, p1, p2, p3, p4, p5)
+#define TR6(d, p1, p2, p3, p4, p5, p6) CTR6(KTR_GEN, d, p1, p2, p3, p4, p5, p6)
+
+/*
+ * The event macros implement KTR graphic plotting facilities provided
+ * by src/tools/sched/schedgraph.py. Three generic types of events are
+ * supported: states, counters, and points.
+ *
+ * m is the ktr class for ktr_mask.
+ * ident is the string identifier that owns the event (ie: "thread 10001")
+ * etype is the type of event to plot (state, counter, point)
+ * edat is the event specific data (state name, counter value, point name)
+ * up to four attributes may be supplied as a name, value pair of arguments.
+ *
+ * etype and attribute names must be string constants. This minimizes the
+ * number of ktr slots required by construction the final format strings
+ * at compile time. Both must also include a colon and format specifier
+ * (ie. "prio:%d", prio). It is recommended that string arguments be
+ * contained within escaped quotes if they may contain ',' or ':' characters.
+ *
+ * The special attribute (KTR_ATTR_LINKED, ident) creates a reference to another
+ * id on the graph for easy traversal of related graph elements.
+ */
+
+#define KTR_ATTR_LINKED "linkedto:\"%s\""
+#define KTR_EFMT(egroup, ident, etype) \
+ "KTRGRAPH group:\"" egroup "\", id:\"%s\", " etype ", attributes: "
+
+#define KTR_EVENT0(m, egroup, ident, etype, edat) \
+ CTR2(m, KTR_EFMT(egroup, ident, etype) "none", ident, edat)
+#define KTR_EVENT1(m, egroup, ident, etype, edat, a0, v0) \
+ CTR3(m, KTR_EFMT(egroup, ident, etype) a0, ident, edat, (v0))
+#define KTR_EVENT2(m, egroup, ident, etype, edat, a0, v0, a1, v1) \
+ CTR4(m, KTR_EFMT(egroup, ident, etype) a0 ", " a1, \
+ ident, edat, (v0), (v1))
+#define KTR_EVENT3(m, egroup, ident, etype, edat, a0, v0, a1, v1, a2, v2)\
+ CTR5(m,KTR_EFMT(egroup, ident, etype) a0 ", " a1 ", " a2, \
+ ident, edat, (v0), (v1), (v2))
+#define KTR_EVENT4(m, egroup, ident, etype, edat, \
+ a0, v0, a1, v1, a2, v2, a3, v3) \
+ CTR6(m,KTR_EFMT(egroup, ident, etype) a0 ", " a1 ", " a2 ", " a3,\
+ ident, edat, (v0), (v1), (v2), (v3))
+
+/*
+ * State functions graph state changes on an ident.
+ */
+#define KTR_STATE0(m, egroup, ident, state) \
+ KTR_EVENT0(m, egroup, ident, "state:\"%s\"", state)
+#define KTR_STATE1(m, egroup, ident, state, a0, v0) \
+ KTR_EVENT1(m, egroup, ident, "state:\"%s\"", state, a0, (v0))
+#define KTR_STATE2(m, egroup, ident, state, a0, v0, a1, v1) \
+ KTR_EVENT2(m, egroup, ident, "state:\"%s\"", state, a0, (v0), a1, (v1))
+#define KTR_STATE3(m, egroup, ident, state, a0, v0, a1, v1, a2, v2) \
+ KTR_EVENT3(m, egroup, ident, "state:\"%s\"", \
+ state, a0, (v0), a1, (v1), a2, (v2))
+#define KTR_STATE4(m, egroup, ident, state, a0, v0, a1, v1, a2, v2, a3, v3)\
+ KTR_EVENT4(m, egroup, ident, "state:\"%s\"", \
+ state, a0, (v0), a1, (v1), a2, (v2), a3, (v3))
+
+/*
+ * Counter functions graph counter values. The counter id
+ * must not be intermixed with a state id.
+ */
+#define KTR_COUNTER0(m, egroup, ident, counter) \
+ KTR_EVENT0(m, egroup, ident, "counter:%d", counter)
+#define KTR_COUNTER1(m, egroup, ident, edat, a0, v0) \
+ KTR_EVENT1(m, egroup, ident, "counter:%d", counter, a0, (v0))
+#define KTR_COUNTER2(m, egroup, ident, counter, a0, v0, a1, v1) \
+ KTR_EVENT2(m, egroup, ident, "counter:%d", counter, a0, (v0), a1, (v1))
+#define KTR_COUNTER3(m, egroup, ident, counter, a0, v0, a1, v1, a2, v2) \
+ KTR_EVENT3(m, egroup, ident, "counter:%d", \
+ counter, a0, (v0), a1, (v1), a2, (v2))
+#define KTR_COUNTER4(m, egroup, ident, counter, a0, v0, a1, v1, a2, v2, a3, v3)\
+ KTR_EVENT4(m, egroup, ident, "counter:%d", \
+ counter, a0, (v0), a1, (v1), a2, (v2), a3, (v3))
+
+/*
+ * Point functions plot points of interest on counter or state graphs.
+ */
+#define KTR_POINT0(m, egroup, ident, point) \
+ KTR_EVENT0(m, egroup, ident, "point:\"%s\"", point)
+#define KTR_POINT1(m, egroup, ident, point, a0, v0) \
+ KTR_EVENT1(m, egroup, ident, "point:\"%s\"", point, a0, (v0))
+#define KTR_POINT2(m, egroup, ident, point, a0, v0, a1, v1) \
+ KTR_EVENT2(m, egroup, ident, "point:\"%s\"", point, a0, (v0), a1, (v1))
+#define KTR_POINT3(m, egroup, ident, point, a0, v0, a1, v1, a2, v2) \
+ KTR_EVENT3(m, egroup, ident, "point:\"%s\"", point, \
+ a0, (v0), a1, (v1), a2, (v2))
+#define KTR_POINT4(m, egroup, ident, point, a0, v0, a1, v1, a2, v2, a3, v3)\
+ KTR_EVENT4(m, egroup, ident, "point:\"%s\"", \
+ point, a0, (v0), a1, (v1), a2, (v2), a3, (v3))
+
+/*
+ * Trace initialization events, similar to CTR with KTR_INIT, but
+ * completely ifdef'ed out if KTR_INIT isn't in KTR_COMPILE (to
+ * save string space, the compiler doesn't optimize out strings
+ * for the conditional ones above).
+ */
+#if (KTR_COMPILE & KTR_INIT) != 0
+#define ITR0(d) CTR0(KTR_INIT, d)
+#define ITR1(d, p1) CTR1(KTR_INIT, d, p1)
+#define ITR2(d, p1, p2) CTR2(KTR_INIT, d, p1, p2)
+#define ITR3(d, p1, p2, p3) CTR3(KTR_INIT, d, p1, p2, p3)
+#define ITR4(d, p1, p2, p3, p4) CTR4(KTR_INIT, d, p1, p2, p3, p4)
+#define ITR5(d, p1, p2, p3, p4, p5) CTR5(KTR_INIT, d, p1, p2, p3, p4, p5)
+#define ITR6(d, p1, p2, p3, p4, p5, p6) CTR6(KTR_INIT, d, p1, p2, p3, p4, p5, p6)
+#else
+#define ITR0(d)
+#define ITR1(d, p1)
+#define ITR2(d, p1, p2)
+#define ITR3(d, p1, p2, p3)
+#define ITR4(d, p1, p2, p3, p4)
+#define ITR5(d, p1, p2, p3, p4, p5)
+#define ITR6(d, p1, p2, p3, p4, p5, p6)
+#endif
+
+#endif /* !LOCORE */
+
+#endif /* !_SYS_KTR_HH_ */
diff --git a/rtems/freebsd/sys/libkern.h b/rtems/freebsd/sys/libkern.h
new file mode 100644
index 00000000..ff65eb85
--- /dev/null
+++ b/rtems/freebsd/sys/libkern.h
@@ -0,0 +1,195 @@
+/*-
+ * Copyright (c) 1992, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * @(#)libkern.h 8.1 (Berkeley) 6/10/93
+ * $FreeBSD$
+ */
+
+#ifndef _SYS_LIBKERN_HH_
+#define _SYS_LIBKERN_HH_
+
+#include <rtems/freebsd/sys/cdefs.h>
+#include <rtems/freebsd/sys/types.h>
+#ifdef _KERNEL
+#include <rtems/freebsd/sys/systm.h>
+#endif
+
+#ifndef __rtems__
+#ifndef LIBKERN_INLINE
+#define LIBKERN_INLINE static __inline
+#define LIBKERN_BODY
+#endif
+#else /* __rtems__ */
+#define LIBKERN_INLINE
+#endif /* __rtems__ */
+
+/* BCD conversions. */
+extern u_char const bcd2bin_data[];
+extern u_char const bin2bcd_data[];
+extern char const hex2ascii_data[];
+
+#define bcd2bin(bcd) (bcd2bin_data[bcd])
+#define bin2bcd(bin) (bin2bcd_data[bin])
+#define hex2ascii(hex) (hex2ascii_data[hex])
+
+static __inline int imax(int a, int b) { return (a > b ? a : b); }
+static __inline int imin(int a, int b) { return (a < b ? a : b); }
+static __inline long lmax(long a, long b) { return (a > b ? a : b); }
+static __inline long lmin(long a, long b) { return (a < b ? a : b); }
+static __inline u_int max(u_int a, u_int b) { return (a > b ? a : b); }
+static __inline u_int min(u_int a, u_int b) { return (a < b ? a : b); }
+static __inline quad_t qmax(quad_t a, quad_t b) { return (a > b ? a : b); }
+static __inline quad_t qmin(quad_t a, quad_t b) { return (a < b ? a : b); }
+static __inline u_long ulmax(u_long a, u_long b) { return (a > b ? a : b); }
+static __inline u_long ulmin(u_long a, u_long b) { return (a < b ? a : b); }
+static __inline off_t omax(off_t a, off_t b) { return (a > b ? a : b); }
+static __inline off_t omin(off_t a, off_t b) { return (a < b ? a : b); }
+
+static __inline int abs(int a) { return (a < 0 ? -a : a); }
+static __inline long labs(long a) { return (a < 0 ? -a : a); }
+static __inline quad_t qabs(quad_t a) { return (a < 0 ? -a : a); }
+
+/* Prototypes for non-quad routines. */
+struct malloc_type;
+uint32_t arc4random(void);
+void arc4rand(void *ptr, u_int len, int reseed);
+int bcmp(const void *, const void *, size_t);
+void *bsearch(const void *, const void *, size_t,
+ size_t, int (*)(const void *, const void *));
+#ifndef HAVE_INLINE_FFS
+int ffs(int);
+#endif
+#ifndef HAVE_INLINE_FFSL
+int ffsl(long);
+#endif
+#ifndef HAVE_INLINE_FLS
+int fls(int);
+#endif
+#ifndef HAVE_INLINE_FLSL
+int flsl(long);
+#endif
+int fnmatch(const char *, const char *, int);
+void gets(char *, size_t, int);
+int locc(int, char *, u_int);
+int memcmp(const void *b1, const void *b2, size_t len);
+void qsort(void *base, size_t nmemb, size_t size,
+ int (*compar)(const void *, const void *));
+void qsort_r(void *base, size_t nmemb, size_t size, void *thunk,
+ int (*compar)(void *, const void *, const void *));
+u_long random(void);
+char *index(const char *, int);
+char *rindex(const char *, int);
+int scanc(u_int, const u_char *, const u_char *, int);
+int skpc(int, int, char *);
+void srandom(u_long);
+int strcasecmp(const char *, const char *);
+char *strcat(char * __restrict, const char * __restrict);
+int strcmp(const char *, const char *);
+char *strcpy(char * __restrict, const char * __restrict);
+size_t strcspn(const char * __restrict, const char * __restrict) __pure;
+char *strdup(const char *__restrict, struct malloc_type *);
+size_t strlcat(char *, const char *, size_t);
+size_t strlcpy(char *, const char *, size_t);
+size_t strlen(const char *);
+int strncasecmp(const char *, const char *, size_t);
+int strncmp(const char *, const char *, size_t);
+char *strncpy(char * __restrict, const char * __restrict, size_t);
+char *strsep(char **, const char *delim);
+size_t strspn(const char *, const char *);
+char *strstr(const char *, const char *);
+int strvalid(const char *, size_t);
+
+extern uint32_t crc32_tab[];
+
+static __inline uint32_t
+crc32_raw(const void *buf, size_t size, uint32_t crc)
+{
+ const uint8_t *p = (const uint8_t *)buf;
+
+ while (size--)
+ crc = crc32_tab[(crc ^ *p++) & 0xFF] ^ (crc >> 8);
+ return (crc);
+}
+
+static __inline uint32_t
+crc32(const void *buf, size_t size)
+{
+ uint32_t crc;
+
+ crc = crc32_raw(buf, size, ~0U);
+ return (crc ^ ~0U);
+}
+
+uint32_t
+calculate_crc32c(uint32_t crc32c, const unsigned char *buffer,
+ unsigned int length);
+
+
+LIBKERN_INLINE void *memset(void *, int, size_t);
+#ifdef LIBKERN_BODY
+LIBKERN_INLINE void *
+memset(void *b, int c, size_t len)
+{
+ char *bb;
+
+ if (c == 0)
+ bzero(b, len);
+ else
+ for (bb = (char *)b; len--; )
+ *bb++ = c;
+ return (b);
+}
+#endif
+
+#ifndef __rtems__
+static __inline char *
+strchr(const char *p, int ch)
+{
+ return (index(p, ch));
+}
+
+static __inline char *
+strrchr(const char *p, int ch)
+{
+ return (rindex(p, ch));
+}
+#endif /* __rtems__ */
+
+/* fnmatch() return values. */
+#define FNM_NOMATCH 1 /* Match failed. */
+
+/* fnmatch() flags. */
+#define FNM_NOESCAPE 0x01 /* Disable backslash escaping. */
+#define FNM_PATHNAME 0x02 /* Slash must be matched by slash. */
+#define FNM_PERIOD 0x04 /* Period must be matched by period. */
+#define FNM_LEADING_DIR 0x08 /* Ignore /<tail> after Imatch. */
+#define FNM_CASEFOLD 0x10 /* Case insensitive search. */
+#define FNM_IGNORECASE FNM_CASEFOLD
+#define FNM_FILE_NAME FNM_PATHNAME
+
+#endif /* !_SYS_LIBKERN_HH_ */
diff --git a/rtems/freebsd/sys/limits.h b/rtems/freebsd/sys/limits.h
new file mode 100644
index 00000000..936ffd88
--- /dev/null
+++ b/rtems/freebsd/sys/limits.h
@@ -0,0 +1 @@
+/* EMPTY */
diff --git a/rtems/freebsd/sys/linker.h b/rtems/freebsd/sys/linker.h
new file mode 100644
index 00000000..f68550b1
--- /dev/null
+++ b/rtems/freebsd/sys/linker.h
@@ -0,0 +1,349 @@
+/*-
+ * Copyright (c) 1997-2000 Doug Rabson
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _SYS_LINKER_HH_
+#define _SYS_LINKER_HH_
+
+#ifdef _KERNEL
+
+#include <rtems/freebsd/machine/elf.h>
+#include <rtems/freebsd/sys/kobj.h>
+
+#ifdef MALLOC_DECLARE
+MALLOC_DECLARE(M_LINKER);
+#endif
+
+struct mod_depend;
+
+/*
+ * Object representing a file which has been loaded by the linker.
+ */
+typedef struct linker_file* linker_file_t;
+typedef TAILQ_HEAD(, linker_file) linker_file_list_t;
+
+typedef caddr_t linker_sym_t; /* opaque symbol */
+typedef c_caddr_t c_linker_sym_t; /* const opaque symbol */
+typedef int (*linker_function_name_callback_t)(const char *, void *);
+
+/*
+ * expanded out linker_sym_t
+ */
+typedef struct linker_symval {
+ const char* name;
+ caddr_t value;
+ size_t size;
+} linker_symval_t;
+
+typedef int (*linker_function_nameval_callback_t)(linker_file_t, int, linker_symval_t *, void *);
+
+struct common_symbol {
+ STAILQ_ENTRY(common_symbol) link;
+ char* name;
+ caddr_t address;
+};
+
+struct linker_file {
+ KOBJ_FIELDS;
+ int refs; /* reference count */
+ int userrefs; /* kldload(2) count */
+ int flags;
+#define LINKER_FILE_LINKED 0x1 /* file has been fully linked */
+ TAILQ_ENTRY(linker_file) link; /* list of all loaded files */
+ char* filename; /* file which was loaded */
+ char* pathname; /* file name with full path */
+ int id; /* unique id */
+ caddr_t address; /* load address */
+ size_t size; /* size of file */
+ int ndeps; /* number of dependencies */
+ linker_file_t* deps; /* list of dependencies */
+ STAILQ_HEAD(, common_symbol) common; /* list of common symbols */
+ TAILQ_HEAD(, module) modules; /* modules in this file */
+ TAILQ_ENTRY(linker_file) loaded; /* preload dependency support */
+ int loadcnt; /* load counter value */
+
+ /*
+ * Function Boundary Tracing (FBT) or Statically Defined Tracing (SDT)
+ * fields.
+ */
+ int nenabled; /* number of enabled probes. */
+ int fbt_nentries; /* number of fbt entries created. */
+ void *sdt_probes;
+ int sdt_nentries;
+ size_t sdt_nprobes;
+ size_t sdt_size;
+};
+
+/*
+ * Object implementing a class of file (a.out, elf, etc.)
+ */
+typedef struct linker_class *linker_class_t;
+typedef TAILQ_HEAD(, linker_class) linker_class_list_t;
+
+struct linker_class {
+ KOBJ_CLASS_FIELDS;
+ TAILQ_ENTRY(linker_class) link; /* list of all file classes */
+};
+
+/*
+ * Function type used when iterating over the list of linker files.
+ */
+typedef int linker_predicate_t(linker_file_t, void *);
+
+/*
+ * The "file" for the kernel.
+ */
+extern linker_file_t linker_kernel_file;
+
+/*
+ * Obtain a reference to a module, loading it if required.
+ */
+int linker_reference_module(const char* _modname, struct mod_depend *_verinfo,
+ linker_file_t* _result);
+
+/*
+ * Release a reference to a module, unloading it if there are no more
+ * references. Note that one should either provide a module name and
+ * optional version info or a linker file, but not both.
+ */
+int linker_release_module(const char *_modname, struct mod_depend *_verinfo,
+ linker_file_t _file);
+
+/*
+ * Iterate over all of the currently loaded linker files calling the
+ * predicate function while the function returns 0. Returns the value
+ * returned by the last predicate function.
+ */
+int linker_file_foreach(linker_predicate_t *_predicate, void *_context);
+
+/*
+ * Lookup a symbol in a file. If deps is TRUE, look in dependencies
+ * if not found in file.
+ */
+caddr_t linker_file_lookup_symbol(linker_file_t _file, const char* _name,
+ int _deps);
+
+/*
+ * Lookup a linker set in a file. Return pointers to the first entry,
+ * last + 1, and count of entries. Use: for (p = start; p < stop; p++) {}
+ * void *start is really: "struct yoursetmember ***start;"
+ */
+int linker_file_lookup_set(linker_file_t _file, const char *_name,
+ void *_start, void *_stop, int *_count);
+
+/*
+ * List all functions in a file.
+ */
+int linker_file_function_listall(linker_file_t,
+ linker_function_nameval_callback_t, void *);
+
+/*
+ * Functions soley for use by the linker class handlers.
+ */
+int linker_add_class(linker_class_t _cls);
+int linker_file_unload(linker_file_t _file, int flags);
+int linker_load_dependencies(linker_file_t _lf);
+linker_file_t linker_make_file(const char* _filename, linker_class_t _cls);
+
+/*
+ * DDB Helpers, tuned specifically for ddb/db_kld.c
+ */
+int linker_ddb_lookup(const char *_symstr, c_linker_sym_t *_sym);
+int linker_ddb_search_symbol(caddr_t _value, c_linker_sym_t *_sym,
+ long *_diffp);
+int linker_ddb_symbol_values(c_linker_sym_t _sym, linker_symval_t *_symval);
+int linker_ddb_search_symbol_name(caddr_t value, char *buf, u_int buflen,
+ long *offset);
+
+/*
+ * stack(9) helper for situations where kernel locking is required.
+ */
+int linker_search_symbol_name(caddr_t value, char *buf, u_int buflen,
+ long *offset);
+
+
+/* HWPMC helper */
+void *linker_hwpmc_list_objects(void);
+
+#endif /* _KERNEL */
+
+/*
+ * Module information subtypes
+ */
+#define MODINFO_END 0x0000 /* End of list */
+#define MODINFO_NAME 0x0001 /* Name of module (string) */
+#define MODINFO_TYPE 0x0002 /* Type of module (string) */
+#define MODINFO_ADDR 0x0003 /* Loaded address */
+#define MODINFO_SIZE 0x0004 /* Size of module */
+#define MODINFO_EMPTY 0x0005 /* Has been deleted */
+#define MODINFO_ARGS 0x0006 /* Parameters string */
+#define MODINFO_METADATA 0x8000 /* Module-specfic */
+
+#define MODINFOMD_AOUTEXEC 0x0001 /* a.out exec header */
+#define MODINFOMD_ELFHDR 0x0002 /* ELF header */
+#define MODINFOMD_SSYM 0x0003 /* start of symbols */
+#define MODINFOMD_ESYM 0x0004 /* end of symbols */
+#define MODINFOMD_DYNAMIC 0x0005 /* _DYNAMIC pointer */
+/* These values are MD on these two platforms */
+#if !defined(__sparc64__) && !defined(__powerpc__)
+#define MODINFOMD_ENVP 0x0006 /* envp[] */
+#define MODINFOMD_HOWTO 0x0007 /* boothowto */
+#define MODINFOMD_KERNEND 0x0008 /* kernend */
+#endif
+#define MODINFOMD_SHDR 0x0009 /* section header table */
+#define MODINFOMD_NOCOPY 0x8000 /* don't copy this metadata to the kernel */
+
+#define MODINFOMD_DEPLIST (0x4001 | MODINFOMD_NOCOPY) /* depends on */
+
+#ifdef _KERNEL
+#define MD_FETCH(mdp, info, type) ({ \
+ type *__p; \
+ __p = (type *)preload_search_info((mdp), MODINFO_METADATA | (info)); \
+ __p ? *__p : 0; \
+})
+#endif
+
+#define LINKER_HINTS_VERSION 1 /* linker.hints file version */
+
+#ifdef _KERNEL
+
+/*
+ * Module lookup
+ */
+extern caddr_t preload_metadata;
+extern caddr_t preload_search_by_name(const char *_name);
+extern caddr_t preload_search_by_type(const char *_type);
+extern caddr_t preload_search_next_name(caddr_t _base);
+extern caddr_t preload_search_info(caddr_t _mod, int _inf);
+extern void preload_delete_name(const char *_name);
+extern void preload_bootstrap_relocate(vm_offset_t _offset);
+
+#ifdef KLD_DEBUG
+
+extern int kld_debug;
+#define KLD_DEBUG_FILE 1 /* file load/unload */
+#define KLD_DEBUG_SYM 2 /* symbol lookup */
+
+#define KLD_DPF(cat, args) \
+ do { \
+ if (kld_debug & KLD_DEBUG_##cat) printf args; \
+ } while (0)
+
+#else
+
+#define KLD_DPF(cat, args)
+
+#endif
+
+#ifndef __rtems__
+typedef Elf_Addr elf_lookup_fn(linker_file_t, Elf_Size, int);
+
+/* Support functions */
+int elf_reloc(linker_file_t _lf, Elf_Addr base, const void *_rel, int _type, elf_lookup_fn _lu);
+int elf_reloc_local(linker_file_t _lf, Elf_Addr base, const void *_rel, int _type, elf_lookup_fn _lu);
+Elf_Addr elf_relocaddr(linker_file_t _lf, Elf_Addr addr);
+const Elf_Sym *elf_get_sym(linker_file_t _lf, Elf_Size _symidx);
+const char *elf_get_symname(linker_file_t _lf, Elf_Size _symidx);
+
+typedef struct linker_ctf {
+ const uint8_t *ctftab; /* Decompressed CTF data. */
+ int ctfcnt; /* Number of CTF data bytes. */
+ const Elf_Sym *symtab; /* Ptr to the symbol table. */
+ int nsym; /* Number of symbols. */
+ const char *strtab; /* Ptr to the string table. */
+ int strcnt; /* Number of string bytes. */
+ uint32_t **ctfoffp; /* Ptr to array of obj/fnc offsets. */
+ uint32_t **typoffp; /* Ptr to array of type offsets. */
+ long *typlenp; /* Ptr to number of type data entries. */
+} linker_ctf_t;
+
+int linker_ctf_get(linker_file_t, linker_ctf_t *);
+
+int elf_cpu_load_file(linker_file_t);
+int elf_cpu_unload_file(linker_file_t);
+
+/* values for type */
+#define ELF_RELOC_REL 1
+#define ELF_RELOC_RELA 2
+#endif /* __rtems__ */
+
+/*
+ * This is version 1 of the KLD file status structure. It is identified
+ * by its _size_ in the version field.
+ */
+struct kld_file_stat_1 {
+ int version; /* set to sizeof(struct kld_file_stat_1) */
+ char name[MAXPATHLEN];
+ int refs;
+ int id;
+ caddr_t address; /* load address */
+ size_t size; /* size in bytes */
+};
+#endif /* _KERNEL */
+
+struct kld_file_stat {
+ int version; /* set to sizeof(struct kld_file_stat) */
+ char name[MAXPATHLEN];
+ int refs;
+ int id;
+ caddr_t address; /* load address */
+ size_t size; /* size in bytes */
+ char pathname[MAXPATHLEN];
+};
+
+struct kld_sym_lookup {
+ int version; /* set to sizeof(struct kld_sym_lookup) */
+ char *symname; /* Symbol name we are looking up */
+ u_long symvalue;
+ size_t symsize;
+};
+#define KLDSYM_LOOKUP 1
+
+/*
+ * Flags for kldunloadf() and linker_file_unload()
+ */
+#define LINKER_UNLOAD_NORMAL 0
+#define LINKER_UNLOAD_FORCE 1
+
+#ifndef _KERNEL
+
+#include <rtems/freebsd/sys/cdefs.h>
+
+__BEGIN_DECLS
+int kldload(const char* _file);
+int kldunload(int _fileid);
+int kldunloadf(int _fileid, int flags);
+int kldfind(const char* _file);
+int kldnext(int _fileid);
+int kldstat(int _fileid, struct kld_file_stat* _stat);
+int kldfirstmod(int _fileid);
+int kldsym(int _fileid, int _cmd, void *_data);
+__END_DECLS
+
+#endif
+
+#endif /* !_SYS_LINKER_HH_ */
diff --git a/rtems/freebsd/sys/linker_set.h b/rtems/freebsd/sys/linker_set.h
new file mode 100644
index 00000000..94b307ef
--- /dev/null
+++ b/rtems/freebsd/sys/linker_set.h
@@ -0,0 +1,114 @@
+/*-
+ * Copyright (c) 1999 John D. Polstra
+ * Copyright (c) 1999,2001 Peter Wemm <peter@FreeBSD.org>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _SYS_LINKER_SET_HH_
+#define _SYS_LINKER_SET_HH_
+
+#ifndef _SYS_CDEFS_HH_
+#error this file needs sys/cdefs.h as a prerequisite
+#endif
+
+/*
+ * The following macros are used to declare global sets of objects, which
+ * are collected by the linker into a `linker_set' as defined below.
+ * For ELF, this is done by constructing a separate segment for each set.
+ */
+
+/*
+ * Private macros, not to be used outside this header file.
+ */
+#ifdef __GNUCLIKE___SECTION
+#ifndef __rtems__
+#define __MAKE_SET(set, sym) \
+ static void const * const __set_##set##_sym_##sym \
+ __section("set_" #set) __used = &sym
+#else /* __rtems__ */
+#define __MAKE_SET(set, sym) \
+ static void const * const __set_##set##_sym_##sym \
+ __section("_bsd_set_" #set) __used = &sym
+#endif /* __rtems__ */
+#else /* !__GNUCLIKE___SECTION */
+#ifndef lint
+#error this file needs to be ported to your compiler
+#endif /* lint */
+#define __MAKE_SET(set, sym) extern void const * const (__set_##set##_sym_##sym)
+#endif /* __GNUCLIKE___SECTION */
+
+/*
+ * Public macros.
+ */
+#define TEXT_SET(set, sym) __MAKE_SET(set, sym)
+#define DATA_SET(set, sym) __MAKE_SET(set, sym)
+#define BSS_SET(set, sym) __MAKE_SET(set, sym)
+#define ABS_SET(set, sym) __MAKE_SET(set, sym)
+#define SET_ENTRY(set, sym) __MAKE_SET(set, sym)
+
+/*
+ * Initialize before referring to a given linker set.
+ */
+#ifndef __rtems__
+#define SET_DECLARE(set, ptype) \
+ extern ptype *__CONCAT(__start_set_,set); \
+ extern ptype *__CONCAT(__stop_set_,set)
+
+#define SET_BEGIN(set) \
+ (&__CONCAT(__start_set_,set))
+#define SET_LIMIT(set) \
+ (&__CONCAT(__stop_set_,set))
+#else /* __rtems__ */
+#define SET_DECLARE(set, ptype) \
+ extern ptype *__CONCAT(_bsd__start_set_,set) []; \
+ extern ptype *__CONCAT(_bsd__stop_set_,set) []
+
+#define SET_BEGIN(set) \
+ (__CONCAT(_bsd__start_set_,set))
+#define SET_LIMIT(set) \
+ (__CONCAT(_bsd__stop_set_,set))
+#endif /* __rtems__ */
+
+/*
+ * Iterate over all the elements of a set.
+ *
+ * Sets always contain addresses of things, and "pvar" points to words
+ * containing those addresses. Thus is must be declared as "type **pvar",
+ * and the address of each set item is obtained inside the loop by "*pvar".
+ */
+#define SET_FOREACH(pvar, set) \
+ for (pvar = SET_BEGIN(set); pvar < SET_LIMIT(set); pvar++)
+
+#define SET_ITEM(set, i) \
+ ((SET_BEGIN(set))[i])
+
+/*
+ * Provide a count of the items in a set.
+ */
+#define SET_COUNT(set) \
+ (SET_LIMIT(set) - SET_BEGIN(set))
+
+#endif /* _SYS_LINKER_SET_HH_ */
diff --git a/rtems/freebsd/sys/lock.h b/rtems/freebsd/sys/lock.h
new file mode 100644
index 00000000..c0cb63e6
--- /dev/null
+++ b/rtems/freebsd/sys/lock.h
@@ -0,0 +1,319 @@
+/*-
+ * Copyright (c) 1997 Berkeley Software Design, Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Berkeley Software Design Inc's name may not be used to endorse or
+ * promote products derived from this software without specific prior
+ * written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY BERKELEY SOFTWARE DESIGN INC ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL BERKELEY SOFTWARE DESIGN INC BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * from BSDI $Id: mutex.h,v 2.7.2.35 2000/04/27 03:10:26 cp Exp $
+ * $FreeBSD$
+ */
+
+#ifndef _SYS_LOCK_HH_
+#define _SYS_LOCK_HH_
+
+#include <rtems/freebsd/sys/queue.h>
+#include <rtems/freebsd/sys/_lock.h>
+
+struct lock_list_entry;
+struct thread;
+
+/*
+ * Lock classes. Each lock has a class which describes characteristics
+ * common to all types of locks of a given class.
+ *
+ * Spin locks in general must always protect against preemption, as it is
+ * an error to perform any type of context switch while holding a spin lock.
+ * Also, for an individual lock to be recursable, its class must allow
+ * recursion and the lock itself must explicitly allow recursion.
+ *
+ * The 'lc_ddb_show' function pointer is used to dump class-specific
+ * data for the 'show lock' DDB command. The 'lc_lock' and
+ * 'lc_unlock' function pointers are used in sleep(9) and cv_wait(9)
+ * to lock and unlock locks while blocking on a sleep queue. The
+ * return value of 'lc_unlock' will be passed to 'lc_lock' on resume
+ * to allow communication of state between the two routines.
+ */
+
+struct lock_class {
+ const char *lc_name;
+ u_int lc_flags;
+ void (*lc_assert)(struct lock_object *lock, int what);
+ void (*lc_ddb_show)(struct lock_object *lock);
+ void (*lc_lock)(struct lock_object *lock, int how);
+ int (*lc_owner)(struct lock_object *lock, struct thread **owner);
+ int (*lc_unlock)(struct lock_object *lock);
+};
+
+#define LC_SLEEPLOCK 0x00000001 /* Sleep lock. */
+#define LC_SPINLOCK 0x00000002 /* Spin lock. */
+#define LC_SLEEPABLE 0x00000004 /* Sleeping allowed with this lock. */
+#define LC_RECURSABLE 0x00000008 /* Locks of this type may recurse. */
+#define LC_UPGRADABLE 0x00000010 /* Upgrades and downgrades permitted. */
+
+#define LO_CLASSFLAGS 0x0000ffff /* Class specific flags. */
+#define LO_INITIALIZED 0x00010000 /* Lock has been initialized. */
+#define LO_WITNESS 0x00020000 /* Should witness monitor this lock. */
+#define LO_QUIET 0x00040000 /* Don't log locking operations. */
+#define LO_RECURSABLE 0x00080000 /* Lock may recurse. */
+#define LO_SLEEPABLE 0x00100000 /* Lock may be held while sleeping. */
+#define LO_UPGRADABLE 0x00200000 /* Lock may be upgraded/downgraded. */
+#define LO_DUPOK 0x00400000 /* Don't check for duplicate acquires */
+#define LO_CLASSMASK 0x0f000000 /* Class index bitmask. */
+#define LO_NOPROFILE 0x10000000 /* Don't profile this lock */
+
+/*
+ * Lock classes are statically assigned an index into the gobal lock_classes
+ * array. Debugging code looks up the lock class for a given lock object
+ * by indexing the array.
+ */
+#define LO_CLASSSHIFT 24
+#define LO_CLASSINDEX(lock) ((((lock)->lo_flags) & LO_CLASSMASK) >> LO_CLASSSHIFT)
+#define LOCK_CLASS(lock) (lock_classes[LO_CLASSINDEX((lock))])
+#define LOCK_CLASS_MAX (LO_CLASSMASK >> LO_CLASSSHIFT)
+
+/*
+ * Option flags passed to lock operations that witness also needs to know
+ * about or that are generic across all locks.
+ */
+#define LOP_NEWORDER 0x00000001 /* Define a new lock order. */
+#define LOP_QUIET 0x00000002 /* Don't log locking operations. */
+#define LOP_TRYLOCK 0x00000004 /* Don't check lock order. */
+#define LOP_EXCLUSIVE 0x00000008 /* Exclusive lock. */
+#define LOP_DUPOK 0x00000010 /* Don't check for duplicate acquires */
+
+/* Flags passed to witness_assert. */
+#define LA_MASKASSERT 0x000000ff /* Mask for witness defined asserts. */
+#define LA_UNLOCKED 0x00000000 /* Lock is unlocked. */
+#define LA_LOCKED 0x00000001 /* Lock is at least share locked. */
+#define LA_SLOCKED 0x00000002 /* Lock is exactly share locked. */
+#define LA_XLOCKED 0x00000004 /* Lock is exclusively locked. */
+#define LA_RECURSED 0x00000008 /* Lock is recursed. */
+#define LA_NOTRECURSED 0x00000010 /* Lock is not recursed. */
+
+#ifdef _KERNEL
+/*
+ * If any of WITNESS, INVARIANTS, or KTR_LOCK KTR tracing has been enabled,
+ * then turn on LOCK_DEBUG. When this option is on, extra debugging
+ * facilities such as tracking the file and line number of lock operations
+ * are enabled. Also, mutex locking operations are not inlined to avoid
+ * bloat from all the extra debugging code. We also have to turn on all the
+ * calling conventions for this debugging code in modules so that modules can
+ * work with both debug and non-debug kernels.
+ */
+#if defined(KLD_MODULE) || defined(WITNESS) || defined(INVARIANTS) || defined(INVARIANT_SUPPORT) || defined(KTR) || defined(LOCK_PROFILING)
+#define LOCK_DEBUG 1
+#else
+#define LOCK_DEBUG 0
+#endif
+
+/*
+ * In the LOCK_DEBUG case, use the filename and line numbers for debugging
+ * operations. Otherwise, use default values to avoid the unneeded bloat.
+ */
+#if LOCK_DEBUG > 0
+#define LOCK_FILE __FILE__
+#define LOCK_LINE __LINE__
+#else
+#define LOCK_FILE NULL
+#define LOCK_LINE 0
+#endif
+
+/*
+ * Macros for KTR_LOCK tracing.
+ *
+ * opname - name of this operation (LOCK/UNLOCK/SLOCK, etc.)
+ * lo - struct lock_object * for this lock
+ * flags - flags passed to the lock operation
+ * recurse - this locks recursion level (or 0 if class is not recursable)
+ * result - result of a try lock operation
+ * file - file name
+ * line - line number
+ */
+#define LOCK_LOG_TEST(lo, flags) \
+ (((flags) & LOP_QUIET) == 0 && ((lo)->lo_flags & LO_QUIET) == 0)
+
+#define LOCK_LOG_LOCK(opname, lo, flags, recurse, file, line) do { \
+ if (LOCK_LOG_TEST((lo), (flags))) \
+ CTR6(KTR_LOCK, opname " (%s) %s %p r = %d at %s:%d", \
+ LOCK_CLASS(lo)->lc_name, (lo)->lo_name, \
+ (lo), (u_int)(recurse), (file), (line)); \
+} while (0)
+
+#define LOCK_LOG_TRY(opname, lo, flags, result, file, line) do { \
+ if (LOCK_LOG_TEST((lo), (flags))) \
+ CTR6(KTR_LOCK, "TRY_" opname " (%s) %s %p result=%d at %s:%d",\
+ LOCK_CLASS(lo)->lc_name, (lo)->lo_name, \
+ (lo), (u_int)(result), (file), (line)); \
+} while (0)
+
+#define LOCK_LOG_INIT(lo, flags) do { \
+ if (LOCK_LOG_TEST((lo), (flags))) \
+ CTR4(KTR_LOCK, "%s: %p (%s) %s", __func__, (lo), \
+ LOCK_CLASS(lo)->lc_name, (lo)->lo_name); \
+} while (0)
+
+#define LOCK_LOG_DESTROY(lo, flags) LOCK_LOG_INIT(lo, flags)
+
+#define lock_initalized(lo) ((lo)->lo_flags & LO_INITIALIZED)
+
+/*
+ * Helpful macros for quickly coming up with assertions with informative
+ * panic messages.
+ */
+#define MPASS(ex) MPASS4(ex, #ex, __FILE__, __LINE__)
+#define MPASS2(ex, what) MPASS4(ex, what, __FILE__, __LINE__)
+#define MPASS3(ex, file, line) MPASS4(ex, #ex, file, line)
+#define MPASS4(ex, what, file, line) \
+ KASSERT((ex), ("Assertion %s failed at %s:%d", what, file, line))
+
+extern struct lock_class lock_class_mtx_sleep;
+extern struct lock_class lock_class_mtx_spin;
+extern struct lock_class lock_class_sx;
+extern struct lock_class lock_class_rw;
+extern struct lock_class lock_class_rm;
+extern struct lock_class lock_class_lockmgr;
+
+extern struct lock_class *lock_classes[];
+
+void lock_init(struct lock_object *, struct lock_class *,
+ const char *, const char *, int);
+void lock_destroy(struct lock_object *);
+void spinlock_enter(void);
+void spinlock_exit(void);
+void witness_init(struct lock_object *, const char *);
+void witness_destroy(struct lock_object *);
+int witness_defineorder(struct lock_object *, struct lock_object *);
+void witness_checkorder(struct lock_object *, int, const char *, int,
+ struct lock_object *);
+void witness_lock(struct lock_object *, int, const char *, int);
+void witness_upgrade(struct lock_object *, int, const char *, int);
+void witness_downgrade(struct lock_object *, int, const char *, int);
+void witness_unlock(struct lock_object *, int, const char *, int);
+void witness_save(struct lock_object *, const char **, int *);
+void witness_restore(struct lock_object *, const char *, int);
+int witness_list_locks(struct lock_list_entry **,
+ int (*)(const char *, ...));
+int witness_warn(int, struct lock_object *, const char *, ...);
+void witness_assert(struct lock_object *, int, const char *, int);
+void witness_display_spinlock(struct lock_object *, struct thread *,
+ int (*)(const char *, ...));
+int witness_line(struct lock_object *);
+void witness_norelease(struct lock_object *);
+void witness_releaseok(struct lock_object *);
+const char *witness_file(struct lock_object *);
+void witness_thread_exit(struct thread *);
+
+#ifdef WITNESS
+
+/* Flags for witness_warn(). */
+#define WARN_GIANTOK 0x01 /* Giant is exempt from this check. */
+#define WARN_PANIC 0x02 /* Panic if check fails. */
+#define WARN_SLEEPOK 0x04 /* Sleepable locks are exempt from check. */
+
+#define WITNESS_INIT(lock, type) \
+ witness_init((lock), (type))
+
+#define WITNESS_DESTROY(lock) \
+ witness_destroy(lock)
+
+#define WITNESS_CHECKORDER(lock, flags, file, line, interlock) \
+ witness_checkorder((lock), (flags), (file), (line), (interlock))
+
+#define WITNESS_DEFINEORDER(lock1, lock2) \
+ witness_defineorder((struct lock_object *)(lock1), \
+ (struct lock_object *)(lock2))
+
+#define WITNESS_LOCK(lock, flags, file, line) \
+ witness_lock((lock), (flags), (file), (line))
+
+#define WITNESS_UPGRADE(lock, flags, file, line) \
+ witness_upgrade((lock), (flags), (file), (line))
+
+#define WITNESS_DOWNGRADE(lock, flags, file, line) \
+ witness_downgrade((lock), (flags), (file), (line))
+
+#define WITNESS_UNLOCK(lock, flags, file, line) \
+ witness_unlock((lock), (flags), (file), (line))
+
+#define WITNESS_CHECK(flags, lock, fmt, ...) \
+ witness_warn((flags), (lock), (fmt), ## __VA_ARGS__)
+
+#define WITNESS_WARN(flags, lock, fmt, ...) \
+ witness_warn((flags), (lock), (fmt), ## __VA_ARGS__)
+
+#define WITNESS_SAVE_DECL(n) \
+ const char * __CONCAT(n, __wf); \
+ int __CONCAT(n, __wl)
+
+#define WITNESS_SAVE(lock, n) \
+ witness_save((lock), &__CONCAT(n, __wf), &__CONCAT(n, __wl))
+
+#define WITNESS_RESTORE(lock, n) \
+ witness_restore((lock), __CONCAT(n, __wf), __CONCAT(n, __wl))
+
+#define WITNESS_NORELEASE(lock) \
+ witness_norelease(&(lock)->lock_object)
+
+#define WITNESS_RELEASEOK(lock) \
+ witness_releaseok(&(lock)->lock_object)
+
+#define WITNESS_FILE(lock) \
+ witness_file(lock)
+
+#define WITNESS_LINE(lock) \
+ witness_line(lock)
+
+#else /* WITNESS */
+#define WITNESS_INIT(lock, type) (void)0
+#define WITNESS_DESTROY(lock) (void)0
+#define WITNESS_DEFINEORDER(lock1, lock2) 0
+#define WITNESS_CHECKORDER(lock, flags, file, line, interlock) (void)0
+#define WITNESS_LOCK(lock, flags, file, line) (void)0
+#define WITNESS_UPGRADE(lock, flags, file, line) (void)0
+#define WITNESS_DOWNGRADE(lock, flags, file, line) (void)0
+#define WITNESS_UNLOCK(lock, flags, file, line) (void)0
+#define WITNESS_CHECK(flags, lock, fmt, ...) 0
+#define WITNESS_WARN(flags, lock, fmt, ...) (void)0
+#define WITNESS_SAVE_DECL(n) (void)0
+#define WITNESS_SAVE(lock, n) (void)0
+#define WITNESS_RESTORE(lock, n) (void)0
+#define WITNESS_NORELEASE(lock) (void)0
+#define WITNESS_RELEASEOK(lock) (void)0
+#define WITNESS_FILE(lock) ("?")
+#define WITNESS_LINE(lock) (0)
+#endif /* WITNESS */
+
+/*
+ * Helper macros to allow developers to add explicit lock order checks
+ * wherever they please without having to actually grab a lock to do so.
+ */
+#define witness_check(l) \
+ WITNESS_CHECKORDER(&(l)->lock_object, LOP_EXCLUSIVE, LOCK_FILE, \
+ LOCK_LINE, NULL)
+
+#define witness_check_shared(l) \
+ WITNESS_CHECKORDER(&(l)->lock_object, 0, LOCK_FILE, LOCK_LINE, NULL)
+
+#endif /* _KERNEL */
+#endif /* _SYS_LOCK_HH_ */
diff --git a/rtems/freebsd/sys/lock_profile.h b/rtems/freebsd/sys/lock_profile.h
new file mode 100644
index 00000000..4b438c34
--- /dev/null
+++ b/rtems/freebsd/sys/lock_profile.h
@@ -0,0 +1,75 @@
+/*-
+ * Copyright (c) 2006 Kip Macy kmacy@FreeBSD.org
+ * Copyright (c) 2006 Kris Kennaway kris@FreeBSD.org
+ * Copyright (c) 2006 Dag-Erling Smorgrav des@des.no
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHAL THE AUTHORS BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+
+#ifndef _SYS_LOCK_PROFILE_HH_
+#define _SYS_LOCK_PROFILE_HH_
+
+struct lock_profile_object;
+LIST_HEAD(lpohead, lock_profile_object);
+
+#ifdef _KERNEL
+#ifdef LOCK_PROFILING
+#include <rtems/freebsd/machine/cpufunc.h>
+#include <rtems/freebsd/sys/lock.h>
+
+#ifndef USE_CPU_NANOSECONDS
+u_int64_t nanoseconds(void);
+#endif
+
+extern volatile int lock_prof_enable;
+
+void lock_profile_obtain_lock_success(struct lock_object *lo, int contested,
+ uint64_t waittime, const char *file, int line);
+void lock_profile_release_lock(struct lock_object *lo);
+void lock_profile_thread_exit(struct thread *td);
+
+
+static inline void
+lock_profile_obtain_lock_failed(struct lock_object *lo, int *contested,
+ uint64_t *waittime)
+{
+ if (!lock_prof_enable || (lo->lo_flags & LO_NOPROFILE) || *contested)
+ return;
+ *waittime = nanoseconds();
+ *contested = 1;
+}
+
+#else /* !LOCK_PROFILING */
+
+#define lock_profile_release_lock(lo) (void)0
+#define lock_profile_obtain_lock_failed(lo, contested, waittime) (void)0
+#define lock_profile_obtain_lock_success(lo, contested, waittime, file, line) (void)0
+#define lock_profile_thread_exit(td) (void)0
+
+#endif /* !LOCK_PROFILING */
+
+#endif /* _KERNEL */
+
+#endif /* _SYS_LOCK_PROFILE_HH_ */
diff --git a/rtems/freebsd/sys/lockmgr.h b/rtems/freebsd/sys/lockmgr.h
new file mode 100644
index 00000000..a58e963f
--- /dev/null
+++ b/rtems/freebsd/sys/lockmgr.h
@@ -0,0 +1,195 @@
+/*-
+ * Copyright (c) 2008 Attilio Rao <attilio@FreeBSD.org>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice(s), this list of conditions and the following disclaimer as
+ * the first lines of this file unmodified other than the possible
+ * addition of one or more copyright notices.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice(s), this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
+ * DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _SYS_LOCKMGR_HH_
+#define _SYS_LOCKMGR_HH_
+
+#include <rtems/freebsd/sys/_lock.h>
+#include <rtems/freebsd/sys/_lockmgr.h>
+#include <rtems/freebsd/sys/_mutex.h>
+#include <rtems/freebsd/sys/_rwlock.h>
+
+#define LK_SHARE 0x01
+#define LK_SHARED_WAITERS 0x02
+#define LK_EXCLUSIVE_WAITERS 0x04
+#define LK_EXCLUSIVE_SPINNERS 0x08
+#define LK_ALL_WAITERS \
+ (LK_SHARED_WAITERS | LK_EXCLUSIVE_WAITERS)
+#define LK_FLAGMASK \
+ (LK_SHARE | LK_ALL_WAITERS | LK_EXCLUSIVE_SPINNERS)
+
+#define LK_HOLDER(x) ((x) & ~LK_FLAGMASK)
+#define LK_SHARERS_SHIFT 4
+#define LK_SHARERS(x) (LK_HOLDER(x) >> LK_SHARERS_SHIFT)
+#define LK_SHARERS_LOCK(x) ((x) << LK_SHARERS_SHIFT | LK_SHARE)
+#define LK_ONE_SHARER (1 << LK_SHARERS_SHIFT)
+#define LK_UNLOCKED LK_SHARERS_LOCK(0)
+#define LK_KERNPROC ((uintptr_t)(-1) & ~LK_FLAGMASK)
+
+#ifdef _KERNEL
+
+#if !defined(LOCK_FILE) || !defined(LOCK_LINE)
+#error "LOCK_FILE and LOCK_LINE not defined, include <sys/lock.h> before"
+#endif
+
+struct thread;
+#define lk_recurse lock_object.lo_data
+
+/*
+ * Function prototipes. Routines that start with an underscore are not part
+ * of the public interface and might be wrappered with a macro.
+ */
+int __lockmgr_args(struct lock *lk, u_int flags, struct lock_object *ilk,
+ const char *wmesg, int prio, int timo, const char *file, int line);
+#if defined(INVARIANTS) || defined(INVARIANT_SUPPORT)
+void _lockmgr_assert(struct lock *lk, int what, const char *file, int line);
+#endif
+void _lockmgr_disown(struct lock *lk, const char *file, int line);
+
+void lockdestroy(struct lock *lk);
+void lockinit(struct lock *lk, int prio, const char *wmesg, int timo,
+ int flags);
+#ifdef DDB
+int lockmgr_chain(struct thread *td, struct thread **ownerp);
+#endif
+void lockmgr_printinfo(struct lock *lk);
+int lockstatus(struct lock *lk);
+
+/*
+ * As far as the ilk can be a static NULL pointer these functions need a
+ * strict prototype in order to safely use the lock_object member.
+ */
+static __inline int
+_lockmgr_args(struct lock *lk, u_int flags, struct mtx *ilk, const char *wmesg,
+ int prio, int timo, const char *file, int line)
+{
+
+ return (__lockmgr_args(lk, flags, (ilk != NULL) ? &ilk->lock_object :
+ NULL, wmesg, prio, timo, file, line));
+}
+
+static __inline int
+_lockmgr_args_rw(struct lock *lk, u_int flags, struct rwlock *ilk,
+ const char *wmesg, int prio, int timo, const char *file, int line)
+{
+
+ return (__lockmgr_args(lk, flags, (ilk != NULL) ? &ilk->lock_object :
+ NULL, wmesg, prio, timo, file, line));
+}
+
+/*
+ * Define aliases in order to complete lockmgr KPI.
+ */
+#define lockmgr(lk, flags, ilk) \
+ _lockmgr_args((lk), (flags), (ilk), LK_WMESG_DEFAULT, \
+ LK_PRIO_DEFAULT, LK_TIMO_DEFAULT, LOCK_FILE, LOCK_LINE)
+#define lockmgr_args(lk, flags, ilk, wmesg, prio, timo) \
+ _lockmgr_args((lk), (flags), (ilk), (wmesg), (prio), (timo), \
+ LOCK_FILE, LOCK_LINE)
+#define lockmgr_args_rw(lk, flags, ilk, wmesg, prio, timo) \
+ _lockmgr_args_rw((lk), (flags), (ilk), (wmesg), (prio), (timo), \
+ LOCK_FILE, LOCK_LINE)
+#define lockmgr_disown(lk) \
+ _lockmgr_disown((lk), LOCK_FILE, LOCK_LINE)
+#define lockmgr_recursed(lk) \
+ ((lk)->lk_recurse != 0)
+#define lockmgr_rw(lk, flags, ilk) \
+ _lockmgr_args_rw((lk), (flags), (ilk), LK_WMESG_DEFAULT, \
+ LK_PRIO_DEFAULT, LK_TIMO_DEFAULT, LOCK_FILE, LOCK_LINE)
+#define lockmgr_waiters(lk) \
+ ((lk)->lk_lock & LK_ALL_WAITERS)
+#ifdef INVARIANTS
+#define lockmgr_assert(lk, what) \
+ _lockmgr_assert((lk), (what), LOCK_FILE, LOCK_LINE)
+#else
+#define lockmgr_assert(lk, what)
+#endif
+
+/*
+ * Flags for lockinit().
+ */
+#define LK_INIT_MASK 0x0000FF
+#define LK_CANRECURSE 0x000001
+#define LK_NODUP 0x000002
+#define LK_NOPROFILE 0x000004
+#define LK_NOSHARE 0x000008
+#define LK_NOWITNESS 0x000010
+#define LK_QUIET 0x000020
+#define LK_ADAPTIVE 0x000040
+
+/* LK_EXSLPFAIL to follow, even if not used in lockinit() */
+#define LK_EXSLPFAIL 0x000080
+
+/*
+ * Additional attributes to be used in lockmgr().
+ */
+#define LK_EATTR_MASK 0x00FF00
+#define LK_INTERLOCK 0x000100
+#define LK_NOWAIT 0x000200
+#define LK_RETRY 0x000400
+#define LK_SLEEPFAIL 0x000800
+#define LK_TIMELOCK 0x001000
+
+/*
+ * Operations for lockmgr().
+ */
+#define LK_TYPE_MASK 0xFF0000
+#define LK_DOWNGRADE 0x010000
+#define LK_DRAIN 0x020000
+#define LK_EXCLOTHER 0x040000
+#define LK_EXCLUSIVE 0x080000
+#define LK_RELEASE 0x100000
+#define LK_SHARED 0x200000
+#define LK_UPGRADE 0x400000
+
+#define LK_TOTAL_MASK (LK_INIT_MASK | LK_EATTR_MASK | LK_TYPE_MASK)
+
+/*
+ * Default values for lockmgr_args().
+ */
+#define LK_WMESG_DEFAULT (NULL)
+#define LK_PRIO_DEFAULT (0)
+#define LK_TIMO_DEFAULT (0)
+
+/*
+ * Assertion flags.
+ */
+#if defined(INVARIANTS) || defined(INVARIANT_SUPPORT)
+#define KA_LOCKED LA_LOCKED
+#define KA_SLOCKED LA_SLOCKED
+#define KA_XLOCKED LA_XLOCKED
+#define KA_UNLOCKED LA_UNLOCKED
+#define KA_RECURSED LA_RECURSED
+#define KA_NOTRECURSED LA_NOTRECURSED
+#endif
+
+#endif /* _KERNEL */
+
+#endif /* !_SYS_LOCKMGR_HH_ */
diff --git a/rtems/freebsd/sys/lockstat.h b/rtems/freebsd/sys/lockstat.h
new file mode 100644
index 00000000..ed9cffa9
--- /dev/null
+++ b/rtems/freebsd/sys/lockstat.h
@@ -0,0 +1,220 @@
+/*-
+ * Copyright (c) 2008-2009 Stacey Son <sson@FreeBSD.org>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+/*
+ * DTrace lockstat provider definitions
+ *
+ */
+
+#ifndef _SYS_LOCKSTAT_H
+#define _SYS_LOCKSTAT_H
+
+#ifdef _KERNEL
+
+/*
+ * Spin Locks
+ */
+#define LS_MTX_SPIN_LOCK_ACQUIRE 0
+#define LS_MTX_SPIN_UNLOCK_RELEASE 1
+#define LS_MTX_SPIN_LOCK_SPIN 2
+
+/*
+ * Adaptive Locks
+ */
+#define LS_MTX_LOCK_ACQUIRE 3
+#define LS_MTX_UNLOCK_RELEASE 4
+#define LS_MTX_LOCK_SPIN 5
+#define LS_MTX_LOCK_BLOCK 6
+#define LS_MTX_TRYLOCK_ACQUIRE 7
+
+/*
+ * Reader/Writer Locks
+ */
+#define LS_RW_RLOCK_ACQUIRE 8
+#define LS_RW_RUNLOCK_RELEASE 9
+#define LS_RW_WLOCK_ACQUIRE 10
+#define LS_RW_WUNLOCK_RELEASE 11
+#define LS_RW_RLOCK_SPIN 12
+#define LS_RW_RLOCK_BLOCK 13
+#define LS_RW_WLOCK_SPIN 14
+#define LS_RW_WLOCK_BLOCK 15
+#define LS_RW_TRYUPGRADE_UPGRADE 16
+#define LS_RW_DOWNGRADE_DOWNGRADE 17
+
+/*
+ * Shared/Exclusive Locks
+ */
+#define LS_SX_SLOCK_ACQUIRE 18
+#define LS_SX_SUNLOCK_RELEASE 19
+#define LS_SX_XLOCK_ACQUIRE 20
+#define LS_SX_XUNLOCK_RELEASE 21
+#define LS_SX_SLOCK_SPIN 22
+#define LS_SX_SLOCK_BLOCK 23
+#define LS_SX_XLOCK_SPIN 24
+#define LS_SX_XLOCK_BLOCK 25
+#define LS_SX_TRYUPGRADE_UPGRADE 26
+#define LS_SX_DOWNGRADE_DOWNGRADE 27
+
+/*
+ * Thread Locks
+ */
+#define LS_THREAD_LOCK_SPIN 28
+
+/*
+ * Lockmanager Locks
+ * According to locking(9) Lockmgr locks are "Largely deprecated"
+ * so no support for these have been added in the lockstat provider.
+ */
+
+#define LS_NPROBES 29
+
+#define LS_MTX_LOCK "mtx_lock"
+#define LS_MTX_UNLOCK "mtx_unlock"
+#define LS_MTX_SPIN_LOCK "mtx_lock_spin"
+#define LS_MTX_SPIN_UNLOCK "mtx_unlock_spin"
+#define LS_MTX_TRYLOCK "mtx_trylock"
+#define LS_RW_RLOCK "rw_rlock"
+#define LS_RW_WLOCK "rw_wlock"
+#define LS_RW_RUNLOCK "rw_runlock"
+#define LS_RW_WUNLOCK "rw_wunlock"
+#define LS_RW_TRYUPGRADE "rw_try_upgrade"
+#define LS_RW_DOWNGRADE "rw_downgrade"
+#define LS_SX_SLOCK "sx_slock"
+#define LS_SX_XLOCK "sx_xlock"
+#define LS_SX_SUNLOCK "sx_sunlock"
+#define LS_SX_XUNLOCK "sx_xunlock"
+#define LS_SX_TRYUPGRADE "sx_try_upgrade"
+#define LS_SX_DOWNGRADE "sx_downgrade"
+#define LS_THREAD_LOCK "thread_lock"
+
+#define LS_ACQUIRE "acquire"
+#define LS_RELEASE "release"
+#define LS_SPIN "spin"
+#define LS_BLOCK "block"
+#define LS_UPGRADE "upgrade"
+#define LS_DOWNGRADE "downgrade"
+
+#define LS_TYPE_ADAPTIVE "adaptive"
+#define LS_TYPE_SPIN "spin"
+#define LS_TYPE_THREAD "thread"
+#define LS_TYPE_RW "rw"
+#define LS_TYPE_SX "sx"
+
+#define LSA_ACQUIRE (LS_TYPE_ADAPTIVE "-" LS_ACQUIRE)
+#define LSA_RELEASE (LS_TYPE_ADAPTIVE "-" LS_RELEASE)
+#define LSA_SPIN (LS_TYPE_ADAPTIVE "-" LS_SPIN)
+#define LSA_BLOCK (LS_TYPE_ADAPTIVE "-" LS_BLOCK)
+#define LSS_ACQUIRE (LS_TYPE_SPIN "-" LS_ACQUIRE)
+#define LSS_RELEASE (LS_TYPE_SPIN "-" LS_RELEASE)
+#define LSS_SPIN (LS_TYPE_SPIN "-" LS_SPIN)
+#define LSR_ACQUIRE (LS_TYPE_RW "-" LS_ACQUIRE)
+#define LSR_RELEASE (LS_TYPE_RW "-" LS_RELEASE)
+#define LSR_BLOCK (LS_TYPE_RW "-" LS_BLOCK)
+#define LSR_SPIN (LS_TYPE_RW "-" LS_SPIN)
+#define LSR_UPGRADE (LS_TYPE_RW "-" LS_UPGRADE)
+#define LSR_DOWNGRADE (LS_TYPE_RW "-" LS_DOWNGRADE)
+#define LSX_ACQUIRE (LS_TYPE_SX "-" LS_ACQUIRE)
+#define LSX_RELEASE (LS_TYPE_SX "-" LS_RELEASE)
+#define LSX_BLOCK (LS_TYPE_SX "-" LS_BLOCK)
+#define LSX_SPIN (LS_TYPE_SX "-" LS_SPIN)
+#define LSX_UPGRADE (LS_TYPE_SX "-" LS_UPGRADE)
+#define LSX_DOWNGRADE (LS_TYPE_SX "-" LS_DOWNGRADE)
+#define LST_SPIN (LS_TYPE_THREAD "-" LS_SPIN)
+
+/*
+ * The following must match the type definition of dtrace_probe. It is
+ * defined this way to avoid having to rely on CDDL code.
+ */
+extern uint32_t lockstat_probemap[LS_NPROBES];
+typedef void (*lockstat_probe_func_t)(uint32_t, uintptr_t arg0, uintptr_t arg1,
+ uintptr_t arg2, uintptr_t arg3, uintptr_t arg4);
+extern lockstat_probe_func_t lockstat_probe_func;
+extern uint64_t lockstat_nsecs(void);
+
+#ifdef KDTRACE_HOOKS
+/*
+ * Macros to record lockstat probes.
+ */
+#define LOCKSTAT_RECORD4(probe, lp, arg1, arg2, arg3, arg4) do { \
+ uint32_t id; \
+ \
+ if ((id = lockstat_probemap[(probe)])) \
+ (*lockstat_probe_func)(id, (uintptr_t)(lp), (arg1), (arg2), \
+ (arg3), (arg4)); \
+} while (0)
+
+#define LOCKSTAT_RECORD(probe, lp, arg1) \
+ LOCKSTAT_RECORD4(probe, lp, arg1, 0, 0, 0)
+
+#define LOCKSTAT_RECORD0(probe, lp) \
+ LOCKSTAT_RECORD4(probe, lp, 0, 0, 0, 0)
+
+#define LOCKSTAT_RECORD1(probe, lp, arg1) \
+ LOCKSTAT_RECORD4(probe, lp, arg1, 0, 0, 0)
+
+#define LOCKSTAT_RECORD2(probe, lp, arg1, arg2) \
+ LOCKSTAT_RECORD4(probe, lp, arg1, arg2, 0, 0)
+
+#define LOCKSTAT_RECORD3(probe, lp, arg1, arg2, arg3) \
+ LOCKSTAT_RECORD4(probe, lp, arg1, arg2, arg3, 0)
+
+#define LOCKSTAT_PROFILE_OBTAIN_LOCK_SUCCESS(probe, lp, c, wt, f, l) do { \
+ uint32_t id; \
+ \
+ lock_profile_obtain_lock_success(&(lp)->lock_object, c, wt, f, l); \
+ if ((id = lockstat_probemap[(probe)])) \
+ (*lockstat_probe_func)(id, (uintptr_t)(lp), 0, 0, 0, 0); \
+} while (0)
+
+#define LOCKSTAT_PROFILE_RELEASE_LOCK(probe, lp) do { \
+ uint32_t id; \
+ \
+ lock_profile_release_lock(&(lp)->lock_object); \
+ if ((id = lockstat_probemap[(probe)])) \
+ (*lockstat_probe_func)(id, (uintptr_t)(lp), 0, 0, 0, 0); \
+} while (0)
+
+#else /* !KDTRACE_HOOKS */
+
+#define LOCKSTAT_RECORD(probe, lp, arg1)
+#define LOCKSTAT_RECORD0(probe, lp)
+#define LOCKSTAT_RECORD1(probe, lp, arg1)
+#define LOCKSTAT_RECORD2(probe, lp, arg1, arg2)
+#define LOCKSTAT_RECORD3(probe, lp, arg1, arg2, arg3)
+#define LOCKSTAT_RECORD4(probe, lp, arg1, arg2, arg3, arg4)
+
+#define LOCKSTAT_PROFILE_OBTAIN_LOCK_SUCCESS(probe, lp, c, wt, f, l) \
+ lock_profile_obtain_lock_success(&(lp)->lock_object, c, wt, f, l)
+
+#define LOCKSTAT_PROFILE_RELEASE_LOCK(probe, lp) \
+ lock_profile_release_lock(&(lp)->lock_object)
+
+#endif /* !KDTRACE_HOOKS */
+
+#endif /* _KERNEL */
+
+#endif /* _SYS_LOCKSTAT_H */
diff --git a/rtems/freebsd/sys/mac.h b/rtems/freebsd/sys/mac.h
new file mode 100644
index 00000000..f261c7ff
--- /dev/null
+++ b/rtems/freebsd/sys/mac.h
@@ -0,0 +1,111 @@
+/*-
+ * Copyright (c) 1999-2002 Robert N. M. Watson
+ * Copyright (c) 2001-2005 Networks Associates Technology, Inc.
+ * Copyright (c) 2005-2006 SPARTA, Inc.
+ * All rights reserved.
+ *
+ * This software was developed by Robert Watson for the TrustedBSD Project.
+ *
+ * This software was developed for the FreeBSD Project in part by Network
+ * Associates Laboratories, the Security Research Division of Network
+ * Associates, Inc. under DARPA/SPAWAR contract N66001-01-C-8035 ("CBOSS"),
+ * as part of the DARPA CHATS research program.
+ *
+ * This software was enhanced by SPARTA ISSO under SPAWAR contract
+ * N66001-04-C-6019 ("SEFOS").
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+/*
+ * Userland interface for Mandatory Access Control. Loosely based on the
+ * POSIX.1e API. More information may be found at:
+ *
+ * http://www.TrustedBSD.org/
+ */
+
+#ifndef _SYS_MAC_HH_
+#define _SYS_MAC_HH_
+
+#ifndef _POSIX_MAC
+#define _POSIX_MAC
+#endif
+
+/*
+ * MAC framework-related constants and limits.
+ */
+#define MAC_MAX_POLICY_NAME 32
+#define MAC_MAX_LABEL_ELEMENT_NAME 32
+#define MAC_MAX_LABEL_ELEMENT_DATA 4096
+#define MAC_MAX_LABEL_BUF_LEN 8192
+
+/*
+ * struct mac is the data structure used to carry MAC labels in system calls
+ * and ioctls between userspace and the kernel.
+ */
+struct mac {
+ size_t m_buflen;
+ char *m_string;
+};
+
+typedef struct mac *mac_t;
+
+#ifndef _KERNEL
+
+/*
+ * Location of the userland MAC framework configuration file. mac.conf
+ * set defaults for MAC-aware applications.
+ */
+#define MAC_CONFFILE "/etc/mac.conf"
+
+/*
+ * Extended non-POSIX.1e interfaces that offer additional services available
+ * from the userland and kernel MAC frameworks.
+ */
+__BEGIN_DECLS
+int mac_execve(char *fname, char **argv, char **envv, mac_t _label);
+int mac_free(mac_t _label);
+int mac_from_text(mac_t *_label, const char *_text);
+int mac_get_fd(int _fd, mac_t _label);
+int mac_get_file(const char *_path, mac_t _label);
+int mac_get_link(const char *_path, mac_t _label);
+int mac_get_peer(int _fd, mac_t _label);
+int mac_get_pid(pid_t _pid, mac_t _label);
+int mac_get_proc(mac_t _label);
+int mac_is_present(const char *_policyname);
+int mac_prepare(mac_t *_label, const char *_elements);
+int mac_prepare_file_label(mac_t *_label);
+int mac_prepare_ifnet_label(mac_t *_label);
+int mac_prepare_process_label(mac_t *_label);
+int mac_prepare_type(mac_t *_label, const char *_type);
+int mac_set_fd(int _fildes, const mac_t _label);
+int mac_set_file(const char *_path, mac_t _label);
+int mac_set_link(const char *_path, mac_t _label);
+int mac_set_proc(const mac_t _label);
+int mac_syscall(const char *_policyname, int _call, void *_arg);
+int mac_to_text(mac_t mac, char **_text);
+__END_DECLS
+
+#endif /* !_KERNEL */
+
+#endif /* !_SYS_MAC_HH_ */
diff --git a/rtems/freebsd/sys/malloc.h b/rtems/freebsd/sys/malloc.h
new file mode 100644
index 00000000..7d229310
--- /dev/null
+++ b/rtems/freebsd/sys/malloc.h
@@ -0,0 +1,189 @@
+/*-
+ * Copyright (c) 1987, 1993
+ * The Regents of the University of California.
+ * Copyright (c) 2005, 2009 Robert N. M. Watson
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * @(#)malloc.h 8.5 (Berkeley) 5/3/95
+ * $FreeBSD$
+ */
+
+#ifndef _SYS_MALLOC_HH_
+#define _SYS_MALLOC_HH_
+
+#include <rtems/freebsd/sys/param.h>
+#include <rtems/freebsd/sys/queue.h>
+#include <rtems/freebsd/sys/_lock.h>
+#include <rtems/freebsd/sys/_mutex.h>
+
+#define MINALLOCSIZE UMA_SMALLEST_UNIT
+
+/*
+ * flags to malloc.
+ */
+#define M_NOWAIT 0x0001 /* do not block */
+#define M_WAITOK 0x0002 /* ok to block */
+#define M_ZERO 0x0100 /* bzero the allocation */
+#define M_NOVM 0x0200 /* don't ask VM for pages */
+#define M_USE_RESERVE 0x0400 /* can alloc out of reserve memory */
+
+#define M_MAGIC 877983977 /* time when first defined :-) */
+
+/*
+ * Two malloc type structures are present: malloc_type, which is used by a
+ * type owner to declare the type, and malloc_type_internal, which holds
+ * malloc-owned statistics and other ABI-sensitive fields, such as the set of
+ * malloc statistics indexed by the compile-time MAXCPU constant.
+ * Applications should avoid introducing dependence on the allocator private
+ * data layout and size.
+ *
+ * The malloc_type ks_next field is protected by malloc_mtx. Other fields in
+ * malloc_type are static after initialization so unsynchronized.
+ *
+ * Statistics in malloc_type_stats are written only when holding a critical
+ * section and running on the CPU associated with the index into the stat
+ * array, but read lock-free resulting in possible (minor) races, which the
+ * monitoring app should take into account.
+ */
+struct malloc_type_stats {
+ uint64_t mts_memalloced; /* Bytes allocated on CPU. */
+ uint64_t mts_memfreed; /* Bytes freed on CPU. */
+ uint64_t mts_numallocs; /* Number of allocates on CPU. */
+ uint64_t mts_numfrees; /* number of frees on CPU. */
+ uint64_t mts_size; /* Bitmask of sizes allocated on CPU. */
+ uint64_t _mts_reserved1; /* Reserved field. */
+ uint64_t _mts_reserved2; /* Reserved field. */
+ uint64_t _mts_reserved3; /* Reserved field. */
+};
+
+/*
+ * Index definitions for the mti_probes[] array.
+ */
+#define DTMALLOC_PROBE_MALLOC 0
+#define DTMALLOC_PROBE_FREE 1
+#define DTMALLOC_PROBE_MAX 2
+
+struct malloc_type_internal {
+ uint32_t mti_probes[DTMALLOC_PROBE_MAX];
+ /* DTrace probe ID array. */
+ struct malloc_type_stats mti_stats[MAXCPU];
+};
+
+/*
+ * Public data structure describing a malloc type. Private data is hung off
+ * of ks_handle to avoid encoding internal malloc(9) data structures in
+ * modules, which will statically allocate struct malloc_type.
+ */
+struct malloc_type {
+ struct malloc_type *ks_next; /* Next in global chain. */
+ u_long ks_magic; /* Detect programmer error. */
+ const char *ks_shortdesc; /* Printable type name. */
+ void *ks_handle; /* Priv. data, was lo_class. */
+};
+
+/*
+ * Statistics structure headers for user space. The kern.malloc sysctl
+ * exposes a structure stream consisting of a stream header, then a series of
+ * malloc type headers and statistics structures (quantity maxcpus). For
+ * convenience, the kernel will provide the current value of maxcpus at the
+ * head of the stream.
+ */
+#define MALLOC_TYPE_STREAM_VERSION 0x00000001
+struct malloc_type_stream_header {
+ uint32_t mtsh_version; /* Stream format version. */
+ uint32_t mtsh_maxcpus; /* Value of MAXCPU for stream. */
+ uint32_t mtsh_count; /* Number of records. */
+ uint32_t _mtsh_pad; /* Pad/reserved field. */
+};
+
+#define MALLOC_MAX_NAME 32
+struct malloc_type_header {
+ char mth_name[MALLOC_MAX_NAME];
+};
+
+#ifdef _KERNEL
+#define MALLOC_DEFINE(type, shortdesc, longdesc) \
+ struct malloc_type type[1] = { \
+ { NULL, M_MAGIC, shortdesc, NULL } \
+ }; \
+ SYSINIT(type##_init, SI_SUB_KMEM, SI_ORDER_SECOND, malloc_init, \
+ type); \
+ SYSUNINIT(type##_uninit, SI_SUB_KMEM, SI_ORDER_ANY, \
+ malloc_uninit, type)
+
+#define MALLOC_DECLARE(type) \
+ extern struct malloc_type type[1]
+
+MALLOC_DECLARE(M_CACHE);
+MALLOC_DECLARE(M_DEVBUF);
+MALLOC_DECLARE(M_TEMP);
+
+MALLOC_DECLARE(M_IP6OPT); /* for INET6 */
+MALLOC_DECLARE(M_IP6NDP); /* for INET6 */
+
+/*
+ * Deprecated macro versions of not-quite-malloc() and free().
+ */
+#define MALLOC(space, cast, size, type, flags) \
+ ((space) = (cast)malloc((u_long)(size), (type), (flags)))
+#define FREE(addr, type) free((addr), (type))
+
+/*
+ * XXX this should be declared in <sys/uio.h>, but that tends to fail
+ * because <sys/uio.h> is included in a header before the source file
+ * has a chance to include <sys/malloc.h> to get MALLOC_DECLARE() defined.
+ */
+MALLOC_DECLARE(M_IOV);
+
+extern struct mtx malloc_mtx;
+
+/*
+ * Function type used when iterating over the list of malloc types.
+ */
+typedef void malloc_type_list_func_t(struct malloc_type *, void *);
+
+void contigfree(void *addr, unsigned long size, struct malloc_type *type);
+void *contigmalloc(unsigned long size, struct malloc_type *type, int flags,
+ vm_paddr_t low, vm_paddr_t high, unsigned long alignment,
+ unsigned long boundary) __malloc_like;
+void free(void *addr, struct malloc_type *type);
+void *malloc(unsigned long size, struct malloc_type *type, int flags) __malloc_like;
+void malloc_init(void *);
+int malloc_last_fail(void);
+void malloc_type_allocated(struct malloc_type *type, unsigned long size);
+void malloc_type_freed(struct malloc_type *type, unsigned long size);
+void malloc_type_list(malloc_type_list_func_t *, void *);
+void malloc_uninit(void *);
+void *realloc(void *addr, unsigned long size, struct malloc_type *type,
+ int flags);
+void *reallocf(void *addr, unsigned long size, struct malloc_type *type,
+ int flags);
+
+struct malloc_type *malloc_desc2type(const char *desc);
+#endif /* _KERNEL */
+
+#endif /* !_SYS_MALLOC_HH_ */
diff --git a/rtems/freebsd/sys/mbuf.h b/rtems/freebsd/sys/mbuf.h
new file mode 100644
index 00000000..37d5fcfd
--- /dev/null
+++ b/rtems/freebsd/sys/mbuf.h
@@ -0,0 +1,1032 @@
+/*-
+ * Copyright (c) 1982, 1986, 1988, 1993
+ * The Regents of the University of California.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * @(#)mbuf.h 8.5 (Berkeley) 2/19/95
+ * $FreeBSD$
+ */
+
+#ifndef _SYS_MBUF_HH_
+#define _SYS_MBUF_HH_
+
+/* XXX: These includes suck. Sorry! */
+#include <rtems/freebsd/sys/queue.h>
+#ifdef _KERNEL
+#include <rtems/freebsd/sys/systm.h>
+#include <rtems/freebsd/vm/uma.h>
+#ifdef WITNESS
+#include <rtems/freebsd/sys/lock.h>
+#endif
+#endif
+
+/*
+ * Mbufs are of a single size, MSIZE (sys/param.h), which includes overhead.
+ * An mbuf may add a single "mbuf cluster" of size MCLBYTES (also in
+ * sys/param.h), which has no additional overhead and is used instead of the
+ * internal data area; this is done when at least MINCLSIZE of data must be
+ * stored. Additionally, it is possible to allocate a separate buffer
+ * externally and attach it to the mbuf in a way similar to that of mbuf
+ * clusters.
+ */
+#define MLEN (MSIZE - sizeof(struct m_hdr)) /* normal data len */
+#define MHLEN (MLEN - sizeof(struct pkthdr)) /* data len w/pkthdr */
+#define MINCLSIZE (MHLEN + 1) /* smallest amount to put in cluster */
+#define M_MAXCOMPRESS (MHLEN / 2) /* max amount to copy for compression */
+
+#ifdef _KERNEL
+/*-
+ * Macro for type conversion: convert mbuf pointer to data pointer of correct
+ * type:
+ *
+ * mtod(m, t) -- Convert mbuf pointer to data pointer of correct type.
+ */
+#define mtod(m, t) ((t)((m)->m_data))
+
+/*
+ * Argument structure passed to UMA routines during mbuf and packet
+ * allocations.
+ */
+struct mb_args {
+ int flags; /* Flags for mbuf being allocated */
+ short type; /* Type of mbuf being allocated */
+};
+#endif /* _KERNEL */
+
+#if defined(__LP64__)
+#define M_HDR_PAD 6
+#else
+#define M_HDR_PAD 2
+#endif
+
+/*
+ * Header present at the beginning of every mbuf.
+ */
+struct m_hdr {
+ struct mbuf *mh_next; /* next buffer in chain */
+ struct mbuf *mh_nextpkt; /* next chain in queue/record */
+ caddr_t mh_data; /* location of data */
+ int mh_len; /* amount of data in this mbuf */
+ int mh_flags; /* flags; see below */
+ short mh_type; /* type of data in this mbuf */
+ uint8_t pad[M_HDR_PAD];/* word align */
+};
+
+/*
+ * Packet tag structure (see below for details).
+ */
+struct m_tag {
+ SLIST_ENTRY(m_tag) m_tag_link; /* List of packet tags */
+ u_int16_t m_tag_id; /* Tag ID */
+ u_int16_t m_tag_len; /* Length of data */
+ u_int32_t m_tag_cookie; /* ABI/Module ID */
+ void (*m_tag_free)(struct m_tag *);
+};
+
+/*
+ * Record/packet header in first mbuf of chain; valid only if M_PKTHDR is set.
+ */
+struct pkthdr {
+ struct ifnet *rcvif; /* rcv interface */
+ /* variables for ip and tcp reassembly */
+ void *header; /* pointer to packet header */
+ int len; /* total packet length */
+ uint32_t flowid; /* packet's 4-tuple system
+ * flow identifier
+ */
+ /* variables for hardware checksum */
+ int csum_flags; /* flags regarding checksum */
+ int csum_data; /* data field used by csum routines */
+ u_int16_t tso_segsz; /* TSO segment size */
+ union {
+ u_int16_t vt_vtag; /* Ethernet 802.1p+q vlan tag */
+ u_int16_t vt_nrecs; /* # of IGMPv3 records in this chain */
+ } PH_vt;
+ SLIST_HEAD(packet_tags, m_tag) tags; /* list of packet tags */
+};
+#define ether_vtag PH_vt.vt_vtag
+
+/*
+ * Description of external storage mapped into mbuf; valid only if M_EXT is
+ * set.
+ */
+struct m_ext {
+ caddr_t ext_buf; /* start of buffer */
+ void (*ext_free) /* free routine if not the usual */
+ (void *, void *);
+ void *ext_arg1; /* optional argument pointer */
+ void *ext_arg2; /* optional argument pointer */
+ u_int ext_size; /* size of buffer, for ext_free */
+ volatile u_int *ref_cnt; /* pointer to ref count info */
+ int ext_type; /* type of external storage */
+};
+
+/*
+ * The core of the mbuf object along with some shortcut defines for practical
+ * purposes.
+ */
+struct mbuf {
+ struct m_hdr m_hdr;
+ union {
+ struct {
+ struct pkthdr MH_pkthdr; /* M_PKTHDR set */
+ union {
+ struct m_ext MH_ext; /* M_EXT set */
+ char MH_databuf[MHLEN];
+ } MH_dat;
+ } MH;
+ char M_databuf[MLEN]; /* !M_PKTHDR, !M_EXT */
+ } M_dat;
+};
+#define m_next m_hdr.mh_next
+#define m_len m_hdr.mh_len
+#define m_data m_hdr.mh_data
+#define m_type m_hdr.mh_type
+#define m_flags m_hdr.mh_flags
+#define m_nextpkt m_hdr.mh_nextpkt
+#define m_act m_nextpkt
+#define m_pkthdr M_dat.MH.MH_pkthdr
+#define m_ext M_dat.MH.MH_dat.MH_ext
+#define m_pktdat M_dat.MH.MH_dat.MH_databuf
+#define m_dat M_dat.M_databuf
+
+/*
+ * mbuf flags.
+ */
+#define M_EXT 0x00000001 /* has associated external storage */
+#define M_PKTHDR 0x00000002 /* start of record */
+#define M_EOR 0x00000004 /* end of record */
+#define M_RDONLY 0x00000008 /* associated data is marked read-only */
+#define M_PROTO1 0x00000010 /* protocol-specific */
+#define M_PROTO2 0x00000020 /* protocol-specific */
+#define M_PROTO3 0x00000040 /* protocol-specific */
+#define M_PROTO4 0x00000080 /* protocol-specific */
+#define M_PROTO5 0x00000100 /* protocol-specific */
+#define M_BCAST 0x00000200 /* send/received as link-level broadcast */
+#define M_MCAST 0x00000400 /* send/received as link-level multicast */
+#define M_FRAG 0x00000800 /* packet is a fragment of a larger packet */
+#define M_FIRSTFRAG 0x00001000 /* packet is first fragment */
+#define M_LASTFRAG 0x00002000 /* packet is last fragment */
+#define M_SKIP_FIREWALL 0x00004000 /* skip firewall processing */
+#define M_FREELIST 0x00008000 /* mbuf is on the free list */
+#define M_VLANTAG 0x00010000 /* ether_vtag is valid */
+#define M_PROMISC 0x00020000 /* packet was not for us */
+#define M_NOFREE 0x00040000 /* do not free mbuf, embedded in cluster */
+#define M_PROTO6 0x00080000 /* protocol-specific */
+#define M_PROTO7 0x00100000 /* protocol-specific */
+#define M_PROTO8 0x00200000 /* protocol-specific */
+#define M_FLOWID 0x00400000 /* flowid is valid */
+/*
+ * For RELENG_{6,7} steal these flags for limited multiple routing table
+ * support. In RELENG_8 and beyond, use just one flag and a tag.
+ */
+#define M_FIB 0xF0000000 /* steal some bits to store fib number. */
+
+#define M_NOTIFICATION M_PROTO5 /* SCTP notification */
+
+/*
+ * Flags to purge when crossing layers.
+ */
+#define M_PROTOFLAGS \
+ (M_PROTO1|M_PROTO2|M_PROTO3|M_PROTO4|M_PROTO5|M_PROTO6|M_PROTO7|M_PROTO8)
+
+/*
+ * Flags preserved when copying m_pkthdr.
+ */
+#define M_COPYFLAGS \
+ (M_PKTHDR|M_EOR|M_RDONLY|M_PROTOFLAGS|M_SKIP_FIREWALL|M_BCAST|M_MCAST|\
+ M_FRAG|M_FIRSTFRAG|M_LASTFRAG|M_VLANTAG|M_PROMISC|M_FIB)
+
+/*
+ * External buffer types: identify ext_buf type.
+ */
+#define EXT_CLUSTER 1 /* mbuf cluster */
+#define EXT_SFBUF 2 /* sendfile(2)'s sf_bufs */
+#define EXT_JUMBOP 3 /* jumbo cluster 4096 bytes */
+#define EXT_JUMBO9 4 /* jumbo cluster 9216 bytes */
+#define EXT_JUMBO16 5 /* jumbo cluster 16184 bytes */
+#define EXT_PACKET 6 /* mbuf+cluster from packet zone */
+#define EXT_MBUF 7 /* external mbuf reference (M_IOVEC) */
+#define EXT_NET_DRV 100 /* custom ext_buf provided by net driver(s) */
+#define EXT_MOD_TYPE 200 /* custom module's ext_buf type */
+#define EXT_DISPOSABLE 300 /* can throw this buffer away w/page flipping */
+#define EXT_EXTREF 400 /* has externally maintained ref_cnt ptr */
+
+/*
+ * Flags indicating hw checksum support and sw checksum requirements. This
+ * field can be directly tested against if_data.ifi_hwassist.
+ */
+#define CSUM_IP 0x0001 /* will csum IP */
+#define CSUM_TCP 0x0002 /* will csum TCP */
+#define CSUM_UDP 0x0004 /* will csum UDP */
+#define CSUM_IP_FRAGS 0x0008 /* will csum IP fragments */
+#define CSUM_FRAGMENT 0x0010 /* will do IP fragmentation */
+#define CSUM_TSO 0x0020 /* will do TSO */
+#define CSUM_SCTP 0x0040 /* will csum SCTP */
+
+#define CSUM_IP_CHECKED 0x0100 /* did csum IP */
+#define CSUM_IP_VALID 0x0200 /* ... the csum is valid */
+#define CSUM_DATA_VALID 0x0400 /* csum_data field is valid */
+#define CSUM_PSEUDO_HDR 0x0800 /* csum_data has pseudo hdr */
+#define CSUM_SCTP_VALID 0x1000 /* SCTP checksum is valid */
+
+#define CSUM_DELAY_DATA (CSUM_TCP | CSUM_UDP)
+#define CSUM_DELAY_IP (CSUM_IP) /* XXX add ipv6 here too? */
+
+/*
+ * mbuf types.
+ */
+#define MT_NOTMBUF 0 /* USED INTERNALLY ONLY! Object is not mbuf */
+#define MT_DATA 1 /* dynamic (data) allocation */
+#define MT_HEADER MT_DATA /* packet header, use M_PKTHDR instead */
+#define MT_SONAME 8 /* socket name */
+#define MT_CONTROL 14 /* extra-data protocol message */
+#define MT_OOBDATA 15 /* expedited data */
+#define MT_NTYPES 16 /* number of mbuf types for mbtypes[] */
+
+#define MT_NOINIT 255 /* Not a type but a flag to allocate
+ a non-initialized mbuf */
+
+#define MB_NOTAGS 0x1UL /* no tags attached to mbuf */
+
+/*
+ * General mbuf allocator statistics structure.
+ *
+ * Many of these statistics are no longer used; we instead track many
+ * allocator statistics through UMA's built in statistics mechanism.
+ */
+struct mbstat {
+ u_long m_mbufs; /* XXX */
+ u_long m_mclusts; /* XXX */
+
+ u_long m_drain; /* times drained protocols for space */
+ u_long m_mcfail; /* XXX: times m_copym failed */
+ u_long m_mpfail; /* XXX: times m_pullup failed */
+ u_long m_msize; /* length of an mbuf */
+ u_long m_mclbytes; /* length of an mbuf cluster */
+ u_long m_minclsize; /* min length of data to allocate a cluster */
+ u_long m_mlen; /* length of data in an mbuf */
+ u_long m_mhlen; /* length of data in a header mbuf */
+
+ /* Number of mbtypes (gives # elems in mbtypes[] array) */
+ short m_numtypes;
+
+ /* XXX: Sendfile stats should eventually move to their own struct */
+ u_long sf_iocnt; /* times sendfile had to do disk I/O */
+ u_long sf_allocfail; /* times sfbuf allocation failed */
+ u_long sf_allocwait; /* times sfbuf allocation had to wait */
+};
+
+/*
+ * Flags specifying how an allocation should be made.
+ *
+ * The flag to use is as follows:
+ * - M_DONTWAIT or M_NOWAIT from an interrupt handler to not block allocation.
+ * - M_WAIT or M_WAITOK from wherever it is safe to block.
+ *
+ * M_DONTWAIT/M_NOWAIT means that we will not block the thread explicitly and
+ * if we cannot allocate immediately we may return NULL, whereas
+ * M_WAIT/M_WAITOK means that if we cannot allocate resources we
+ * will block until they are available, and thus never return NULL.
+ *
+ * XXX Eventually just phase this out to use M_WAITOK/M_NOWAIT.
+ */
+#define MBTOM(how) (how)
+#define M_DONTWAIT M_NOWAIT
+#define M_TRYWAIT M_WAITOK
+#define M_WAIT M_WAITOK
+
+/*
+ * String names of mbuf-related UMA(9) and malloc(9) types. Exposed to
+ * !_KERNEL so that monitoring tools can look up the zones with
+ * libmemstat(3).
+ */
+#define MBUF_MEM_NAME "mbuf"
+#define MBUF_CLUSTER_MEM_NAME "mbuf_cluster"
+#define MBUF_PACKET_MEM_NAME "mbuf_packet"
+#define MBUF_JUMBOP_MEM_NAME "mbuf_jumbo_page"
+#define MBUF_JUMBO9_MEM_NAME "mbuf_jumbo_9k"
+#define MBUF_JUMBO16_MEM_NAME "mbuf_jumbo_16k"
+#define MBUF_TAG_MEM_NAME "mbuf_tag"
+#define MBUF_EXTREFCNT_MEM_NAME "mbuf_ext_refcnt"
+
+#ifdef _KERNEL
+
+#ifdef WITNESS
+#define MBUF_CHECKSLEEP(how) do { \
+ if (how == M_WAITOK) \
+ WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL, \
+ "Sleeping in \"%s\"", __func__); \
+} while (0)
+#else
+#define MBUF_CHECKSLEEP(how)
+#endif
+
+/*
+ * Network buffer allocation API
+ *
+ * The rest of it is defined in kern/kern_mbuf.c
+ */
+
+extern uma_zone_t zone_mbuf;
+extern uma_zone_t zone_clust;
+extern uma_zone_t zone_pack;
+extern uma_zone_t zone_jumbop;
+extern uma_zone_t zone_jumbo9;
+extern uma_zone_t zone_jumbo16;
+extern uma_zone_t zone_ext_refcnt;
+
+static __inline struct mbuf *m_getcl(int how, short type, int flags);
+static __inline struct mbuf *m_get(int how, short type);
+static __inline struct mbuf *m_gethdr(int how, short type);
+static __inline struct mbuf *m_getjcl(int how, short type, int flags,
+ int size);
+static __inline struct mbuf *m_getclr(int how, short type); /* XXX */
+static __inline int m_init(struct mbuf *m, uma_zone_t zone,
+ int size, int how, short type, int flags);
+static __inline struct mbuf *m_free(struct mbuf *m);
+static __inline void m_clget(struct mbuf *m, int how);
+static __inline void *m_cljget(struct mbuf *m, int how, int size);
+static __inline void m_chtype(struct mbuf *m, short new_type);
+void mb_free_ext(struct mbuf *);
+static __inline struct mbuf *m_last(struct mbuf *m);
+int m_pkthdr_init(struct mbuf *m, int how);
+
+static __inline int
+m_gettype(int size)
+{
+ int type;
+
+ switch (size) {
+ case MSIZE:
+ type = EXT_MBUF;
+ break;
+ case MCLBYTES:
+ type = EXT_CLUSTER;
+ break;
+#if MJUMPAGESIZE != MCLBYTES
+ case MJUMPAGESIZE:
+ type = EXT_JUMBOP;
+ break;
+#endif
+ case MJUM9BYTES:
+ type = EXT_JUMBO9;
+ break;
+ case MJUM16BYTES:
+ type = EXT_JUMBO16;
+ break;
+ default:
+ panic("%s: m_getjcl: invalid cluster size", __func__);
+ }
+
+ return (type);
+}
+
+static __inline uma_zone_t
+m_getzone(int size)
+{
+ uma_zone_t zone;
+
+ switch (size) {
+ case MSIZE:
+ zone = zone_mbuf;
+ break;
+ case MCLBYTES:
+ zone = zone_clust;
+ break;
+#if MJUMPAGESIZE != MCLBYTES
+ case MJUMPAGESIZE:
+ zone = zone_jumbop;
+ break;
+#endif
+ case MJUM9BYTES:
+ zone = zone_jumbo9;
+ break;
+ case MJUM16BYTES:
+ zone = zone_jumbo16;
+ break;
+ default:
+ panic("%s: m_getjcl: invalid cluster type", __func__);
+ }
+
+ return (zone);
+}
+
+/*
+ * Initialize an mbuf with linear storage.
+ *
+ * Inline because the consumer text overhead will be roughly the same to
+ * initialize or call a function with this many parameters and M_PKTHDR
+ * should go away with constant propagation for !MGETHDR.
+ */
+static __inline int
+m_init(struct mbuf *m, uma_zone_t zone, int size, int how, short type,
+ int flags)
+{
+ int error;
+
+ m->m_next = NULL;
+ m->m_nextpkt = NULL;
+ m->m_data = m->m_dat;
+ m->m_len = 0;
+ m->m_flags = flags;
+ m->m_type = type;
+ if (flags & M_PKTHDR) {
+ if ((error = m_pkthdr_init(m, how)) != 0)
+ return (error);
+ }
+
+ return (0);
+}
+
+static __inline struct mbuf *
+m_get(int how, short type)
+{
+ struct mb_args args;
+
+ args.flags = 0;
+ args.type = type;
+ return ((struct mbuf *)(uma_zalloc_arg(zone_mbuf, &args, how)));
+}
+
+/*
+ * XXX This should be deprecated, very little use.
+ */
+static __inline struct mbuf *
+m_getclr(int how, short type)
+{
+ struct mbuf *m;
+ struct mb_args args;
+
+ args.flags = 0;
+ args.type = type;
+ m = uma_zalloc_arg(zone_mbuf, &args, how);
+ if (m != NULL)
+ bzero(m->m_data, MLEN);
+ return (m);
+}
+
+static __inline struct mbuf *
+m_gethdr(int how, short type)
+{
+ struct mb_args args;
+
+ args.flags = M_PKTHDR;
+ args.type = type;
+ return ((struct mbuf *)(uma_zalloc_arg(zone_mbuf, &args, how)));
+}
+
+static __inline struct mbuf *
+m_getcl(int how, short type, int flags)
+{
+ struct mb_args args;
+
+ args.flags = flags;
+ args.type = type;
+ return ((struct mbuf *)(uma_zalloc_arg(zone_pack, &args, how)));
+}
+
+/*
+ * m_getjcl() returns an mbuf with a cluster of the specified size attached.
+ * For size it takes MCLBYTES, MJUMPAGESIZE, MJUM9BYTES, MJUM16BYTES.
+ *
+ * XXX: This is rather large, should be real function maybe.
+ */
+static __inline struct mbuf *
+m_getjcl(int how, short type, int flags, int size)
+{
+ struct mb_args args;
+ struct mbuf *m, *n;
+ uma_zone_t zone;
+
+ if (size == MCLBYTES)
+ return m_getcl(how, type, flags);
+
+ args.flags = flags;
+ args.type = type;
+
+ m = uma_zalloc_arg(zone_mbuf, &args, how);
+ if (m == NULL)
+ return (NULL);
+
+ zone = m_getzone(size);
+ n = uma_zalloc_arg(zone, m, how);
+ if (n == NULL) {
+ uma_zfree(zone_mbuf, m);
+ return (NULL);
+ }
+ return (m);
+}
+
+static __inline void
+m_free_fast(struct mbuf *m)
+{
+#ifdef INVARIANTS
+ if (m->m_flags & M_PKTHDR)
+ KASSERT(SLIST_EMPTY(&m->m_pkthdr.tags), ("doing fast free of mbuf with tags"));
+#endif
+ uma_zfree_arg(zone_mbuf, m, (void *)MB_NOTAGS);
+}
+
+static __inline struct mbuf *
+m_free(struct mbuf *m)
+{
+ struct mbuf *n = m->m_next;
+
+ if (m->m_flags & M_EXT)
+ mb_free_ext(m);
+ else if ((m->m_flags & M_NOFREE) == 0)
+ uma_zfree(zone_mbuf, m);
+ return (n);
+}
+
+static __inline void
+m_clget(struct mbuf *m, int how)
+{
+
+ if (m->m_flags & M_EXT)
+ printf("%s: %p mbuf already has cluster\n", __func__, m);
+ m->m_ext.ext_buf = (char *)NULL;
+ uma_zalloc_arg(zone_clust, m, how);
+ /*
+ * On a cluster allocation failure, drain the packet zone and retry,
+ * we might be able to loosen a few clusters up on the drain.
+ */
+ if ((how & M_NOWAIT) && (m->m_ext.ext_buf == NULL)) {
+ zone_drain(zone_pack);
+ uma_zalloc_arg(zone_clust, m, how);
+ }
+}
+
+/*
+ * m_cljget() is different from m_clget() as it can allocate clusters without
+ * attaching them to an mbuf. In that case the return value is the pointer
+ * to the cluster of the requested size. If an mbuf was specified, it gets
+ * the cluster attached to it and the return value can be safely ignored.
+ * For size it takes MCLBYTES, MJUMPAGESIZE, MJUM9BYTES, MJUM16BYTES.
+ */
+static __inline void *
+m_cljget(struct mbuf *m, int how, int size)
+{
+ uma_zone_t zone;
+
+ if (m && m->m_flags & M_EXT)
+ printf("%s: %p mbuf already has cluster\n", __func__, m);
+ if (m != NULL)
+ m->m_ext.ext_buf = NULL;
+
+ zone = m_getzone(size);
+ return (uma_zalloc_arg(zone, m, how));
+}
+
+static __inline void
+m_cljset(struct mbuf *m, void *cl, int type)
+{
+ uma_zone_t zone;
+ int size;
+
+ switch (type) {
+ case EXT_CLUSTER:
+ size = MCLBYTES;
+ zone = zone_clust;
+ break;
+#if MJUMPAGESIZE != MCLBYTES
+ case EXT_JUMBOP:
+ size = MJUMPAGESIZE;
+ zone = zone_jumbop;
+ break;
+#endif
+ case EXT_JUMBO9:
+ size = MJUM9BYTES;
+ zone = zone_jumbo9;
+ break;
+ case EXT_JUMBO16:
+ size = MJUM16BYTES;
+ zone = zone_jumbo16;
+ break;
+ default:
+ panic("unknown cluster type");
+ break;
+ }
+
+ m->m_data = m->m_ext.ext_buf = cl;
+ m->m_ext.ext_free = m->m_ext.ext_arg1 = m->m_ext.ext_arg2 = NULL;
+ m->m_ext.ext_size = size;
+ m->m_ext.ext_type = type;
+ m->m_ext.ref_cnt = uma_find_refcnt(zone, cl);
+ m->m_flags |= M_EXT;
+
+}
+
+static __inline void
+m_chtype(struct mbuf *m, short new_type)
+{
+
+ m->m_type = new_type;
+}
+
+static __inline struct mbuf *
+m_last(struct mbuf *m)
+{
+
+ while (m->m_next)
+ m = m->m_next;
+ return (m);
+}
+
+/*
+ * mbuf, cluster, and external object allocation macros (for compatibility
+ * purposes).
+ */
+#define M_MOVE_PKTHDR(to, from) m_move_pkthdr((to), (from))
+#define MGET(m, how, type) ((m) = m_get((how), (type)))
+#define MGETHDR(m, how, type) ((m) = m_gethdr((how), (type)))
+#define MCLGET(m, how) m_clget((m), (how))
+#define MEXTADD(m, buf, size, free, arg1, arg2, flags, type) \
+ m_extadd((m), (caddr_t)(buf), (size), (free),(arg1),(arg2),(flags), (type))
+#define m_getm(m, len, how, type) \
+ m_getm2((m), (len), (how), (type), M_PKTHDR)
+
+/*
+ * Evaluate TRUE if it's safe to write to the mbuf m's data region (this can
+ * be both the local data payload, or an external buffer area, depending on
+ * whether M_EXT is set).
+ */
+#define M_WRITABLE(m) (!((m)->m_flags & M_RDONLY) && \
+ (!(((m)->m_flags & M_EXT)) || \
+ (*((m)->m_ext.ref_cnt) == 1)) ) \
+
+/* Check if the supplied mbuf has a packet header, or else panic. */
+#define M_ASSERTPKTHDR(m) \
+ KASSERT((m) != NULL && (m)->m_flags & M_PKTHDR, \
+ ("%s: no mbuf packet header!", __func__))
+
+/*
+ * Ensure that the supplied mbuf is a valid, non-free mbuf.
+ *
+ * XXX: Broken at the moment. Need some UMA magic to make it work again.
+ */
+#define M_ASSERTVALID(m) \
+ KASSERT((((struct mbuf *)m)->m_flags & 0) == 0, \
+ ("%s: attempted use of a free mbuf!", __func__))
+
+/*
+ * Set the m_data pointer of a newly-allocated mbuf (m_get/MGET) to place an
+ * object of the specified size at the end of the mbuf, longword aligned.
+ */
+#define M_ALIGN(m, len) do { \
+ KASSERT(!((m)->m_flags & (M_PKTHDR|M_EXT)), \
+ ("%s: M_ALIGN not normal mbuf", __func__)); \
+ KASSERT((m)->m_data == (m)->m_dat, \
+ ("%s: M_ALIGN not a virgin mbuf", __func__)); \
+ (m)->m_data += (MLEN - (len)) & ~(sizeof(long) - 1); \
+} while (0)
+
+/*
+ * As above, for mbufs allocated with m_gethdr/MGETHDR or initialized by
+ * M_DUP/MOVE_PKTHDR.
+ */
+#define MH_ALIGN(m, len) do { \
+ KASSERT((m)->m_flags & M_PKTHDR && !((m)->m_flags & M_EXT), \
+ ("%s: MH_ALIGN not PKTHDR mbuf", __func__)); \
+ KASSERT((m)->m_data == (m)->m_pktdat, \
+ ("%s: MH_ALIGN not a virgin mbuf", __func__)); \
+ (m)->m_data += (MHLEN - (len)) & ~(sizeof(long) - 1); \
+} while (0)
+
+/*
+ * Compute the amount of space available before the current start of data in
+ * an mbuf.
+ *
+ * The M_WRITABLE() is a temporary, conservative safety measure: the burden
+ * of checking writability of the mbuf data area rests solely with the caller.
+ */
+#define M_LEADINGSPACE(m) \
+ ((m)->m_flags & M_EXT ? \
+ (M_WRITABLE(m) ? (m)->m_data - (m)->m_ext.ext_buf : 0): \
+ (m)->m_flags & M_PKTHDR ? (m)->m_data - (m)->m_pktdat : \
+ (m)->m_data - (m)->m_dat)
+
+/*
+ * Compute the amount of space available after the end of data in an mbuf.
+ *
+ * The M_WRITABLE() is a temporary, conservative safety measure: the burden
+ * of checking writability of the mbuf data area rests solely with the caller.
+ */
+#define M_TRAILINGSPACE(m) \
+ ((m)->m_flags & M_EXT ? \
+ (M_WRITABLE(m) ? (m)->m_ext.ext_buf + (m)->m_ext.ext_size \
+ - ((m)->m_data + (m)->m_len) : 0) : \
+ &(m)->m_dat[MLEN] - ((m)->m_data + (m)->m_len))
+
+/*
+ * Arrange to prepend space of size plen to mbuf m. If a new mbuf must be
+ * allocated, how specifies whether to wait. If the allocation fails, the
+ * original mbuf chain is freed and m is set to NULL.
+ */
+#define M_PREPEND(m, plen, how) do { \
+ struct mbuf **_mmp = &(m); \
+ struct mbuf *_mm = *_mmp; \
+ int _mplen = (plen); \
+ int __mhow = (how); \
+ \
+ MBUF_CHECKSLEEP(how); \
+ if (M_LEADINGSPACE(_mm) >= _mplen) { \
+ _mm->m_data -= _mplen; \
+ _mm->m_len += _mplen; \
+ } else \
+ _mm = m_prepend(_mm, _mplen, __mhow); \
+ if (_mm != NULL && _mm->m_flags & M_PKTHDR) \
+ _mm->m_pkthdr.len += _mplen; \
+ *_mmp = _mm; \
+} while (0)
+
+/*
+ * Change mbuf to new type. This is a relatively expensive operation and
+ * should be avoided.
+ */
+#define MCHTYPE(m, t) m_chtype((m), (t))
+
+/* Length to m_copy to copy all. */
+#define M_COPYALL 1000000000
+
+/* Compatibility with 4.3. */
+#define m_copy(m, o, l) m_copym((m), (o), (l), M_DONTWAIT)
+
+extern int max_datalen; /* MHLEN - max_hdr */
+extern int max_hdr; /* Largest link + protocol header */
+extern int max_linkhdr; /* Largest link-level header */
+extern int max_protohdr; /* Largest protocol header */
+extern struct mbstat mbstat; /* General mbuf stats/infos */
+extern int nmbclusters; /* Maximum number of clusters */
+
+struct uio;
+
+void m_adj(struct mbuf *, int);
+void m_align(struct mbuf *, int);
+int m_apply(struct mbuf *, int, int,
+ int (*)(void *, void *, u_int), void *);
+int m_append(struct mbuf *, int, c_caddr_t);
+void m_cat(struct mbuf *, struct mbuf *);
+void m_extadd(struct mbuf *, caddr_t, u_int,
+ void (*)(void *, void *), void *, void *, int, int);
+struct mbuf *m_collapse(struct mbuf *, int, int);
+void m_copyback(struct mbuf *, int, int, c_caddr_t);
+void m_copydata(const struct mbuf *, int, int, caddr_t);
+struct mbuf *m_copym(struct mbuf *, int, int, int);
+struct mbuf *m_copymdata(struct mbuf *, struct mbuf *,
+ int, int, int, int);
+struct mbuf *m_copypacket(struct mbuf *, int);
+void m_copy_pkthdr(struct mbuf *, struct mbuf *);
+struct mbuf *m_copyup(struct mbuf *n, int len, int dstoff);
+struct mbuf *m_defrag(struct mbuf *, int);
+void m_demote(struct mbuf *, int);
+struct mbuf *m_devget(char *, int, int, struct ifnet *,
+ void (*)(char *, caddr_t, u_int));
+struct mbuf *m_dup(struct mbuf *, int);
+int m_dup_pkthdr(struct mbuf *, struct mbuf *, int);
+u_int m_fixhdr(struct mbuf *);
+struct mbuf *m_fragment(struct mbuf *, int, int);
+void m_freem(struct mbuf *);
+struct mbuf *m_getm2(struct mbuf *, int, int, short, int);
+struct mbuf *m_getptr(struct mbuf *, int, int *);
+u_int m_length(struct mbuf *, struct mbuf **);
+int m_mbuftouio(struct uio *, struct mbuf *, int);
+void m_move_pkthdr(struct mbuf *, struct mbuf *);
+struct mbuf *m_prepend(struct mbuf *, int, int);
+void m_print(const struct mbuf *, int);
+struct mbuf *m_pulldown(struct mbuf *, int, int, int *);
+struct mbuf *m_pullup(struct mbuf *, int);
+int m_sanity(struct mbuf *, int);
+struct mbuf *m_split(struct mbuf *, int, int);
+struct mbuf *m_uiotombuf(struct uio *, int, int, int, int);
+struct mbuf *m_unshare(struct mbuf *, int how);
+
+/*-
+ * Network packets may have annotations attached by affixing a list of
+ * "packet tags" to the pkthdr structure. Packet tags are dynamically
+ * allocated semi-opaque data structures that have a fixed header
+ * (struct m_tag) that specifies the size of the memory block and a
+ * <cookie,type> pair that identifies it. The cookie is a 32-bit unique
+ * unsigned value used to identify a module or ABI. By convention this value
+ * is chosen as the date+time that the module is created, expressed as the
+ * number of seconds since the epoch (e.g., using date -u +'%s'). The type
+ * value is an ABI/module-specific value that identifies a particular
+ * annotation and is private to the module. For compatibility with systems
+ * like OpenBSD that define packet tags w/o an ABI/module cookie, the value
+ * PACKET_ABI_COMPAT is used to implement m_tag_get and m_tag_find
+ * compatibility shim functions and several tag types are defined below.
+ * Users that do not require compatibility should use a private cookie value
+ * so that packet tag-related definitions can be maintained privately.
+ *
+ * Note that the packet tag returned by m_tag_alloc has the default memory
+ * alignment implemented by malloc. To reference private data one can use a
+ * construct like:
+ *
+ * struct m_tag *mtag = m_tag_alloc(...);
+ * struct foo *p = (struct foo *)(mtag+1);
+ *
+ * if the alignment of struct m_tag is sufficient for referencing members of
+ * struct foo. Otherwise it is necessary to embed struct m_tag within the
+ * private data structure to insure proper alignment; e.g.,
+ *
+ * struct foo {
+ * struct m_tag tag;
+ * ...
+ * };
+ * struct foo *p = (struct foo *) m_tag_alloc(...);
+ * struct m_tag *mtag = &p->tag;
+ */
+
+/*
+ * Persistent tags stay with an mbuf until the mbuf is reclaimed. Otherwise
+ * tags are expected to ``vanish'' when they pass through a network
+ * interface. For most interfaces this happens normally as the tags are
+ * reclaimed when the mbuf is free'd. However in some special cases
+ * reclaiming must be done manually. An example is packets that pass through
+ * the loopback interface. Also, one must be careful to do this when
+ * ``turning around'' packets (e.g., icmp_reflect).
+ *
+ * To mark a tag persistent bit-or this flag in when defining the tag id.
+ * The tag will then be treated as described above.
+ */
+#define MTAG_PERSISTENT 0x800
+
+#define PACKET_TAG_NONE 0 /* Nadda */
+
+/* Packet tags for use with PACKET_ABI_COMPAT. */
+#define PACKET_TAG_IPSEC_IN_DONE 1 /* IPsec applied, in */
+#define PACKET_TAG_IPSEC_OUT_DONE 2 /* IPsec applied, out */
+#define PACKET_TAG_IPSEC_IN_CRYPTO_DONE 3 /* NIC IPsec crypto done */
+#define PACKET_TAG_IPSEC_OUT_CRYPTO_NEEDED 4 /* NIC IPsec crypto req'ed */
+#define PACKET_TAG_IPSEC_IN_COULD_DO_CRYPTO 5 /* NIC notifies IPsec */
+#define PACKET_TAG_IPSEC_PENDING_TDB 6 /* Reminder to do IPsec */
+#define PACKET_TAG_BRIDGE 7 /* Bridge processing done */
+#define PACKET_TAG_GIF 8 /* GIF processing done */
+#define PACKET_TAG_GRE 9 /* GRE processing done */
+#define PACKET_TAG_IN_PACKET_CHECKSUM 10 /* NIC checksumming done */
+#define PACKET_TAG_ENCAP 11 /* Encap. processing */
+#define PACKET_TAG_IPSEC_SOCKET 12 /* IPSEC socket ref */
+#define PACKET_TAG_IPSEC_HISTORY 13 /* IPSEC history */
+#define PACKET_TAG_IPV6_INPUT 14 /* IPV6 input processing */
+#define PACKET_TAG_DUMMYNET 15 /* dummynet info */
+#define PACKET_TAG_DIVERT 17 /* divert info */
+#define PACKET_TAG_IPFORWARD 18 /* ipforward info */
+#define PACKET_TAG_MACLABEL (19 | MTAG_PERSISTENT) /* MAC label */
+#define PACKET_TAG_PF 21 /* PF + ALTQ information */
+#define PACKET_TAG_RTSOCKFAM 25 /* rtsock sa family */
+#define PACKET_TAG_IPOPTIONS 27 /* Saved IP options */
+#define PACKET_TAG_CARP 28 /* CARP info */
+#define PACKET_TAG_IPSEC_NAT_T_PORTS 29 /* two uint16_t */
+
+/* Specific cookies and tags. */
+
+/* Packet tag routines. */
+struct m_tag *m_tag_alloc(u_int32_t, int, int, int);
+void m_tag_delete(struct mbuf *, struct m_tag *);
+void m_tag_delete_chain(struct mbuf *, struct m_tag *);
+void m_tag_free_default(struct m_tag *);
+struct m_tag *m_tag_locate(struct mbuf *, u_int32_t, int, struct m_tag *);
+struct m_tag *m_tag_copy(struct m_tag *, int);
+int m_tag_copy_chain(struct mbuf *, struct mbuf *, int);
+void m_tag_delete_nonpersistent(struct mbuf *);
+
+/*
+ * Initialize the list of tags associated with an mbuf.
+ */
+static __inline void
+m_tag_init(struct mbuf *m)
+{
+
+ SLIST_INIT(&m->m_pkthdr.tags);
+}
+
+/*
+ * Set up the contents of a tag. Note that this does not fill in the free
+ * method; the caller is expected to do that.
+ *
+ * XXX probably should be called m_tag_init, but that was already taken.
+ */
+static __inline void
+m_tag_setup(struct m_tag *t, u_int32_t cookie, int type, int len)
+{
+
+ t->m_tag_id = type;
+ t->m_tag_len = len;
+ t->m_tag_cookie = cookie;
+}
+
+/*
+ * Reclaim resources associated with a tag.
+ */
+static __inline void
+m_tag_free(struct m_tag *t)
+{
+
+ (*t->m_tag_free)(t);
+}
+
+/*
+ * Return the first tag associated with an mbuf.
+ */
+static __inline struct m_tag *
+m_tag_first(struct mbuf *m)
+{
+
+ return (SLIST_FIRST(&m->m_pkthdr.tags));
+}
+
+/*
+ * Return the next tag in the list of tags associated with an mbuf.
+ */
+static __inline struct m_tag *
+m_tag_next(struct mbuf *m, struct m_tag *t)
+{
+
+ return (SLIST_NEXT(t, m_tag_link));
+}
+
+/*
+ * Prepend a tag to the list of tags associated with an mbuf.
+ */
+static __inline void
+m_tag_prepend(struct mbuf *m, struct m_tag *t)
+{
+
+ SLIST_INSERT_HEAD(&m->m_pkthdr.tags, t, m_tag_link);
+}
+
+/*
+ * Unlink a tag from the list of tags associated with an mbuf.
+ */
+static __inline void
+m_tag_unlink(struct mbuf *m, struct m_tag *t)
+{
+
+ SLIST_REMOVE(&m->m_pkthdr.tags, t, m_tag, m_tag_link);
+}
+
+/* These are for OpenBSD compatibility. */
+#define MTAG_ABI_COMPAT 0 /* compatibility ABI */
+
+static __inline struct m_tag *
+m_tag_get(int type, int length, int wait)
+{
+ return (m_tag_alloc(MTAG_ABI_COMPAT, type, length, wait));
+}
+
+static __inline struct m_tag *
+m_tag_find(struct mbuf *m, int type, struct m_tag *start)
+{
+ return (SLIST_EMPTY(&m->m_pkthdr.tags) ? (struct m_tag *)NULL :
+ m_tag_locate(m, MTAG_ABI_COMPAT, type, start));
+}
+
+/* XXX temporary FIB methods probably eventually use tags.*/
+#define M_FIBSHIFT 28
+#define M_FIBMASK 0x0F
+
+/* get the fib from an mbuf and if it is not set, return the default */
+#define M_GETFIB(_m) \
+ ((((_m)->m_flags & M_FIB) >> M_FIBSHIFT) & M_FIBMASK)
+
+#define M_SETFIB(_m, _fib) do { \
+ _m->m_flags &= ~M_FIB; \
+ _m->m_flags |= (((_fib) << M_FIBSHIFT) & M_FIB); \
+} while (0)
+
+#endif /* _KERNEL */
+
+#ifdef MBUF_PROFILING
+ void m_profile(struct mbuf *m);
+ #define M_PROFILE(m) m_profile(m)
+#else
+ #define M_PROFILE(m)
+#endif
+
+
+#endif /* !_SYS_MBUF_HH_ */
diff --git a/rtems/freebsd/sys/md5.h b/rtems/freebsd/sys/md5.h
new file mode 100644
index 00000000..c250a7d4
--- /dev/null
+++ b/rtems/freebsd/sys/md5.h
@@ -0,0 +1,53 @@
+/* MD5.H - header file for MD5C.C
+ * $FreeBSD$
+ */
+
+/*-
+ Copyright (C) 1991-2, RSA Data Security, Inc. Created 1991. All
+rights reserved.
+
+License to copy and use this software is granted provided that it
+is identified as the "RSA Data Security, Inc. MD5 Message-Digest
+Algorithm" in all material mentioning or referencing this software
+or this function.
+
+License is also granted to make and use derivative works provided
+that such works are identified as "derived from the RSA Data
+Security, Inc. MD5 Message-Digest Algorithm" in all material
+mentioning or referencing the derived work.
+
+RSA Data Security, Inc. makes no representations concerning either
+the merchantability of this software or the suitability of this
+software for any particular purpose. It is provided "as is"
+without express or implied warranty of any kind.
+
+These notices must be retained in any copies of any part of this
+documentation and/or software.
+ */
+
+#ifndef _SYS_MD5_HH_
+#define _SYS_MD5_HH_
+
+#define MD5_BLOCK_LENGTH 64
+#define MD5_DIGEST_LENGTH 16
+#define MD5_DIGEST_STRING_LENGTH (MD5_DIGEST_LENGTH * 2 + 1)
+
+/* MD5 context. */
+typedef struct MD5Context {
+ u_int32_t state[4]; /* state (ABCD) */
+ u_int32_t count[2]; /* number of bits, modulo 2^64 (lsb first) */
+ unsigned char buffer[64]; /* input buffer */
+} MD5_CTX;
+
+#include <rtems/freebsd/sys/cdefs.h>
+
+__BEGIN_DECLS
+void MD5Init (MD5_CTX *);
+void MD5Update (MD5_CTX *, const void *, unsigned int);
+void MD5Final (unsigned char [16], MD5_CTX *);
+char * MD5End(MD5_CTX *, char *);
+char * MD5File(const char *, char *);
+char * MD5FileChunk(const char *, char *, off_t, off_t);
+char * MD5Data(const void *, unsigned int, char *);
+__END_DECLS
+#endif /* _SYS_MD5_HH_ */
diff --git a/rtems/freebsd/sys/module.h b/rtems/freebsd/sys/module.h
new file mode 100644
index 00000000..23650f71
--- /dev/null
+++ b/rtems/freebsd/sys/module.h
@@ -0,0 +1,218 @@
+/*-
+ * Copyright (c) 1997 Doug Rabson
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _SYS_MODULE_HH_
+#define _SYS_MODULE_HH_
+
+/*
+ * Module metadata types
+ */
+#define MDT_DEPEND 1 /* argument is a module name */
+#define MDT_MODULE 2 /* module declaration */
+#define MDT_VERSION 3 /* module version(s) */
+
+#define MDT_STRUCT_VERSION 1 /* version of metadata structure */
+#define MDT_SETNAME "modmetadata_set"
+
+typedef enum modeventtype {
+ MOD_LOAD,
+ MOD_UNLOAD,
+ MOD_SHUTDOWN,
+ MOD_QUIESCE
+} modeventtype_t;
+
+typedef struct module *module_t;
+typedef int (*modeventhand_t)(module_t, int /* modeventtype_t */, void *);
+
+/*
+ * Struct for registering modules statically via SYSINIT.
+ */
+typedef struct moduledata {
+ const char *name; /* module name */
+ modeventhand_t evhand; /* event handler */
+ void *priv; /* extra data */
+} moduledata_t;
+
+/*
+ * A module can use this to report module specific data to the user via
+ * kldstat(2).
+ */
+typedef union modspecific {
+ int intval;
+ u_int uintval;
+ long longval;
+ u_long ulongval;
+} modspecific_t;
+
+/*
+ * Module dependency declarartion
+ */
+struct mod_depend {
+ int md_ver_minimum;
+ int md_ver_preferred;
+ int md_ver_maximum;
+};
+
+/*
+ * Module version declaration
+ */
+struct mod_version {
+ int mv_version;
+};
+
+struct mod_metadata {
+ int md_version; /* structure version MDTV_* */
+ int md_type; /* type of entry MDT_* */
+ void *md_data; /* specific data */
+ const char *md_cval; /* common string label */
+};
+
+#ifdef _KERNEL
+
+#include <rtems/freebsd/sys/linker_set.h>
+
+#define MODULE_METADATA(uniquifier, type, data, cval) \
+ static struct mod_metadata _mod_metadata##uniquifier = { \
+ MDT_STRUCT_VERSION, \
+ type, \
+ data, \
+ cval \
+ }; \
+ DATA_SET(modmetadata_set, _mod_metadata##uniquifier)
+
+#define MODULE_DEPEND(module, mdepend, vmin, vpref, vmax) \
+ static struct mod_depend _##module##_depend_on_##mdepend = { \
+ vmin, \
+ vpref, \
+ vmax \
+ }; \
+ MODULE_METADATA(_md_##module##_on_##mdepend, MDT_DEPEND, \
+ &_##module##_depend_on_##mdepend, #mdepend)
+
+/*
+ * Every kernel has a 'kernel' module with the version set to
+ * __FreeBSD_version. We embed a MODULE_DEPEND() inside every module
+ * that depends on the 'kernel' module. It uses the current value of
+ * __FreeBSD_version as the minimum and preferred versions. For the
+ * maximum version it rounds the version up to the end of its branch
+ * (i.e. M99999 for M.x). This allows a module built on M.x to work
+ * on M.y systems where y >= x, but fail on M.z systems where z < x.
+ */
+#define MODULE_KERNEL_MAXVER (roundup(__FreeBSD_version, 100000) - 1)
+
+#define DECLARE_MODULE_WITH_MAXVER(name, data, sub, order, maxver) \
+ MODULE_DEPEND(name, kernel, __FreeBSD_version, \
+ __FreeBSD_version, maxver); \
+ MODULE_METADATA(_md_##name, MDT_MODULE, &data, #name); \
+ SYSINIT(name##module, sub, order, module_register_init, &data); \
+ struct __hack
+
+#define DECLARE_MODULE(name, data, sub, order) \
+ DECLARE_MODULE_WITH_MAXVER(name, data, sub, order, MODULE_KERNEL_MAXVER)
+
+/*
+ * The module declared with DECLARE_MODULE_TIED can only be loaded
+ * into the kernel with exactly the same __FreeBSD_version.
+ *
+ * Use it for modules that use kernel interfaces that are not stable
+ * even on STABLE/X branches.
+ */
+#define DECLARE_MODULE_TIED(name, data, sub, order) \
+ DECLARE_MODULE_WITH_MAXVER(name, data, sub, order, __FreeBSD_version)
+
+#define MODULE_VERSION(module, version) \
+ static struct mod_version _##module##_version = { \
+ version \
+ }; \
+ MODULE_METADATA(_##module##_version, MDT_VERSION, \
+ &_##module##_version, #module)
+
+extern struct sx modules_sx;
+
+#define MOD_XLOCK sx_xlock(&modules_sx)
+#define MOD_SLOCK sx_slock(&modules_sx)
+#define MOD_XUNLOCK sx_xunlock(&modules_sx)
+#define MOD_SUNLOCK sx_sunlock(&modules_sx)
+#define MOD_LOCK_ASSERT sx_assert(&modules_sx, SX_LOCKED)
+#define MOD_XLOCK_ASSERT sx_assert(&modules_sx, SX_XLOCKED)
+
+struct linker_file;
+
+void module_register_init(const void *);
+int module_register(const struct moduledata *, struct linker_file *);
+module_t module_lookupbyname(const char *);
+module_t module_lookupbyid(int);
+int module_quiesce(module_t);
+void module_reference(module_t);
+void module_release(module_t);
+int module_unload(module_t);
+int module_getid(module_t);
+module_t module_getfnext(module_t);
+const char * module_getname(module_t);
+void module_setspecific(module_t, modspecific_t *);
+struct linker_file *module_file(module_t);
+
+#ifdef MOD_DEBUG
+extern int mod_debug;
+#define MOD_DEBUG_REFS 1
+
+#define MOD_DPF(cat, args) do { \
+ if (mod_debug & MOD_DEBUG_##cat) \
+ printf(args); \
+} while (0)
+
+#else /* !MOD_DEBUG */
+
+#define MOD_DPF(cat, args)
+#endif
+#endif /* _KERNEL */
+
+#define MAXMODNAME 32
+
+struct module_stat {
+ int version; /* set to sizeof(struct module_stat) */
+ char name[MAXMODNAME];
+ int refs;
+ int id;
+ modspecific_t data;
+};
+
+#ifndef _KERNEL
+
+#include <rtems/freebsd/sys/cdefs.h>
+
+__BEGIN_DECLS
+int modnext(int _modid);
+int modfnext(int _modid);
+int modstat(int _modid, struct module_stat *_stat);
+int modfind(const char *_name);
+__END_DECLS
+
+#endif
+
+#endif /* !_SYS_MODULE_HH_ */
diff --git a/rtems/freebsd/sys/mount.h b/rtems/freebsd/sys/mount.h
new file mode 100644
index 00000000..a3fc6717
--- /dev/null
+++ b/rtems/freebsd/sys/mount.h
@@ -0,0 +1,798 @@
+/*-
+ * Copyright (c) 1989, 1991, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * @(#)mount.h 8.21 (Berkeley) 5/20/95
+ * $FreeBSD$
+ */
+
+#ifndef _SYS_MOUNT_HH_
+#define _SYS_MOUNT_HH_
+
+#include <rtems/freebsd/sys/ucred.h>
+#include <rtems/freebsd/sys/queue.h>
+#ifdef _KERNEL
+#include <rtems/freebsd/sys/lock.h>
+#include <rtems/freebsd/sys/lockmgr.h>
+#include <rtems/freebsd/sys/_mutex.h>
+#endif
+
+/*
+ * NOTE: When changing statfs structure, mount structure, MNT_* flags or
+ * MNTK_* flags also update DDB show mount command in vfs_subr.c.
+ */
+
+typedef struct fsid { int32_t val[2]; } fsid_t; /* filesystem id type */
+
+/*
+ * File identifier.
+ * These are unique per filesystem on a single machine.
+ */
+#define MAXFIDSZ 16
+
+struct fid {
+ u_short fid_len; /* length of data in bytes */
+ u_short fid_data0; /* force longword alignment */
+ char fid_data[MAXFIDSZ]; /* data (variable length) */
+};
+
+/*
+ * filesystem statistics
+ */
+#define MFSNAMELEN 16 /* length of type name including null */
+#define MNAMELEN 88 /* size of on/from name bufs */
+#define STATFS_VERSION 0x20030518 /* current version number */
+struct statfs {
+ uint32_t f_version; /* structure version number */
+ uint32_t f_type; /* type of filesystem */
+ uint64_t f_flags; /* copy of mount exported flags */
+ uint64_t f_bsize; /* filesystem fragment size */
+ uint64_t f_iosize; /* optimal transfer block size */
+ uint64_t f_blocks; /* total data blocks in filesystem */
+ uint64_t f_bfree; /* free blocks in filesystem */
+ int64_t f_bavail; /* free blocks avail to non-superuser */
+ uint64_t f_files; /* total file nodes in filesystem */
+ int64_t f_ffree; /* free nodes avail to non-superuser */
+ uint64_t f_syncwrites; /* count of sync writes since mount */
+ uint64_t f_asyncwrites; /* count of async writes since mount */
+ uint64_t f_syncreads; /* count of sync reads since mount */
+ uint64_t f_asyncreads; /* count of async reads since mount */
+ uint64_t f_spare[10]; /* unused spare */
+ uint32_t f_namemax; /* maximum filename length */
+ uid_t f_owner; /* user that mounted the filesystem */
+ fsid_t f_fsid; /* filesystem id */
+ char f_charspare[80]; /* spare string space */
+ char f_fstypename[MFSNAMELEN]; /* filesystem type name */
+ char f_mntfromname[MNAMELEN]; /* mounted filesystem */
+ char f_mntonname[MNAMELEN]; /* directory on which mounted */
+};
+
+#ifdef _KERNEL
+#define OMFSNAMELEN 16 /* length of fs type name, including null */
+#define OMNAMELEN (88 - 2 * sizeof(long)) /* size of on/from name bufs */
+
+/* XXX getfsstat.2 is out of date with write and read counter changes here. */
+/* XXX statfs.2 is out of date with read counter changes here. */
+struct ostatfs {
+ long f_spare2; /* placeholder */
+ long f_bsize; /* fundamental filesystem block size */
+ long f_iosize; /* optimal transfer block size */
+ long f_blocks; /* total data blocks in filesystem */
+ long f_bfree; /* free blocks in fs */
+ long f_bavail; /* free blocks avail to non-superuser */
+ long f_files; /* total file nodes in filesystem */
+ long f_ffree; /* free file nodes in fs */
+ fsid_t f_fsid; /* filesystem id */
+ uid_t f_owner; /* user that mounted the filesystem */
+ int f_type; /* type of filesystem */
+ int f_flags; /* copy of mount exported flags */
+ long f_syncwrites; /* count of sync writes since mount */
+ long f_asyncwrites; /* count of async writes since mount */
+ char f_fstypename[OMFSNAMELEN]; /* fs type name */
+ char f_mntonname[OMNAMELEN]; /* directory on which mounted */
+ long f_syncreads; /* count of sync reads since mount */
+ long f_asyncreads; /* count of async reads since mount */
+ short f_spares1; /* unused spare */
+ char f_mntfromname[OMNAMELEN];/* mounted filesystem */
+ short f_spares2; /* unused spare */
+ /*
+ * XXX on machines where longs are aligned to 8-byte boundaries, there
+ * is an unnamed int32_t here. This spare was after the apparent end
+ * of the struct until we bit off the read counters from f_mntonname.
+ */
+ long f_spare[2]; /* unused spare */
+};
+
+TAILQ_HEAD(vnodelst, vnode);
+
+/* Mount options list */
+TAILQ_HEAD(vfsoptlist, vfsopt);
+struct vfsopt {
+ TAILQ_ENTRY(vfsopt) link;
+ char *name;
+ void *value;
+ int len;
+ int pos;
+ int seen;
+};
+
+/*
+ * Structure per mounted filesystem. Each mounted filesystem has an
+ * array of operations and an instance record. The filesystems are
+ * put on a doubly linked list.
+ *
+ * Lock reference:
+ * m - mountlist_mtx
+ * i - interlock
+ *
+ * Unmarked fields are considered stable as long as a ref is held.
+ *
+ */
+struct mount {
+ struct mtx mnt_mtx; /* mount structure interlock */
+ int mnt_gen; /* struct mount generation */
+#define mnt_startzero mnt_list
+ TAILQ_ENTRY(mount) mnt_list; /* (m) mount list */
+ struct vfsops *mnt_op; /* operations on fs */
+ struct vfsconf *mnt_vfc; /* configuration info */
+ struct vnode *mnt_vnodecovered; /* vnode we mounted on */
+ struct vnode *mnt_syncer; /* syncer vnode */
+ int mnt_ref; /* (i) Reference count */
+ struct vnodelst mnt_nvnodelist; /* (i) list of vnodes */
+ int mnt_nvnodelistsize; /* (i) # of vnodes */
+ int mnt_writeopcount; /* (i) write syscalls pending */
+ int mnt_kern_flag; /* (i) kernel only flags */
+ u_int mnt_flag; /* (i) flags shared with user */
+ u_int mnt_xflag; /* (i) more flags shared with user */
+ u_int mnt_noasync; /* (i) # noasync overrides */
+ struct vfsoptlist *mnt_opt; /* current mount options */
+ struct vfsoptlist *mnt_optnew; /* new options passed to fs */
+ int mnt_maxsymlinklen; /* max size of short symlink */
+ struct statfs mnt_stat; /* cache of filesystem stats */
+ struct ucred *mnt_cred; /* credentials of mounter */
+ void * mnt_data; /* private data */
+ time_t mnt_time; /* last time written*/
+ int mnt_iosize_max; /* max size for clusters, etc */
+ struct netexport *mnt_export; /* export list */
+ struct label *mnt_label; /* MAC label for the fs */
+ u_int mnt_hashseed; /* Random seed for vfs_hash */
+ int mnt_lockref; /* (i) Lock reference count */
+ int mnt_secondary_writes; /* (i) # of secondary writes */
+ int mnt_secondary_accwrites;/* (i) secondary wr. starts */
+ struct thread *mnt_susp_owner; /* (i) thread owning suspension */
+#define mnt_endzero mnt_gjprovider
+ char *mnt_gjprovider; /* gjournal provider name */
+ struct lock mnt_explock; /* vfs_export walkers lock */
+};
+
+struct vnode *__mnt_vnode_next(struct vnode **mvp, struct mount *mp);
+struct vnode *__mnt_vnode_first(struct vnode **mvp, struct mount *mp);
+void __mnt_vnode_markerfree(struct vnode **mvp, struct mount *mp);
+
+#define MNT_VNODE_FOREACH(vp, mp, mvp) \
+ for (vp = __mnt_vnode_first(&(mvp), (mp)); \
+ (vp) != NULL; vp = __mnt_vnode_next(&(mvp), (mp)))
+
+#define MNT_VNODE_FOREACH_ABORT_ILOCKED(mp, mvp) \
+ __mnt_vnode_markerfree(&(mvp), (mp))
+
+#define MNT_VNODE_FOREACH_ABORT(mp, mvp) \
+ do { \
+ MNT_ILOCK(mp); \
+ MNT_VNODE_FOREACH_ABORT_ILOCKED(mp, mvp); \
+ MNT_IUNLOCK(mp); \
+ } while (0)
+
+#define MNT_ILOCK(mp) mtx_lock(&(mp)->mnt_mtx)
+#define MNT_ITRYLOCK(mp) mtx_trylock(&(mp)->mnt_mtx)
+#define MNT_IUNLOCK(mp) mtx_unlock(&(mp)->mnt_mtx)
+#define MNT_MTX(mp) (&(mp)->mnt_mtx)
+#define MNT_REF(mp) (mp)->mnt_ref++
+#define MNT_REL(mp) do { \
+ KASSERT((mp)->mnt_ref > 0, ("negative mnt_ref")); \
+ (mp)->mnt_ref--; \
+ if ((mp)->mnt_ref == 0) \
+ wakeup((mp)); \
+} while (0)
+
+#endif /* _KERNEL */
+
+/*
+ * User specifiable flags, stored in mnt_flag.
+ */
+#define MNT_RDONLY 0x00000001 /* read only filesystem */
+#define MNT_SYNCHRONOUS 0x00000002 /* filesystem written synchronously */
+#define MNT_NOEXEC 0x00000004 /* can't exec from filesystem */
+#define MNT_NOSUID 0x00000008 /* don't honor setuid bits on fs */
+#define MNT_UNION 0x00000020 /* union with underlying filesystem */
+#define MNT_ASYNC 0x00000040 /* filesystem written asynchronously */
+#define MNT_SUIDDIR 0x00100000 /* special handling of SUID on dirs */
+#define MNT_SOFTDEP 0x00200000 /* soft updates being done */
+#define MNT_NOSYMFOLLOW 0x00400000 /* do not follow symlinks */
+#define MNT_GJOURNAL 0x02000000 /* GEOM journal support enabled */
+#define MNT_MULTILABEL 0x04000000 /* MAC support for individual objects */
+#define MNT_ACLS 0x08000000 /* ACL support enabled */
+#define MNT_NOATIME 0x10000000 /* disable update of file access time */
+#define MNT_NOCLUSTERR 0x40000000 /* disable cluster read */
+#define MNT_NOCLUSTERW 0x80000000 /* disable cluster write */
+#define MNT_NFS4ACLS 0x00000010
+
+/*
+ * NFS export related mount flags.
+ */
+#define MNT_EXRDONLY 0x00000080 /* exported read only */
+#define MNT_EXPORTED 0x00000100 /* filesystem is exported */
+#define MNT_DEFEXPORTED 0x00000200 /* exported to the world */
+#define MNT_EXPORTANON 0x00000400 /* use anon uid mapping for everyone */
+#define MNT_EXKERB 0x00000800 /* exported with Kerberos uid mapping */
+#define MNT_EXPUBLIC 0x20000000 /* public export (WebNFS) */
+
+/*
+ * Flags set by internal operations,
+ * but visible to the user.
+ * XXX some of these are not quite right.. (I've never seen the root flag set)
+ */
+#define MNT_LOCAL 0x00001000 /* filesystem is stored locally */
+#define MNT_QUOTA 0x00002000 /* quotas are enabled on filesystem */
+#define MNT_ROOTFS 0x00004000 /* identifies the root filesystem */
+#define MNT_USER 0x00008000 /* mounted by a user */
+#define MNT_IGNORE 0x00800000 /* do not show entry in df */
+
+/*
+ * Mask of flags that are visible to statfs().
+ * XXX I think that this could now become (~(MNT_CMDFLAGS))
+ * but the 'mount' program may need changing to handle this.
+ */
+#define MNT_VISFLAGMASK (MNT_RDONLY | MNT_SYNCHRONOUS | MNT_NOEXEC | \
+ MNT_NOSUID | MNT_UNION | \
+ MNT_ASYNC | MNT_EXRDONLY | MNT_EXPORTED | \
+ MNT_DEFEXPORTED | MNT_EXPORTANON| MNT_EXKERB | \
+ MNT_LOCAL | MNT_USER | MNT_QUOTA | \
+ MNT_ROOTFS | MNT_NOATIME | MNT_NOCLUSTERR| \
+ MNT_NOCLUSTERW | MNT_SUIDDIR | MNT_SOFTDEP | \
+ MNT_IGNORE | MNT_EXPUBLIC | MNT_NOSYMFOLLOW | \
+ MNT_GJOURNAL | MNT_MULTILABEL | MNT_ACLS | MNT_NFS4ACLS)
+
+/* Mask of flags that can be updated. */
+#define MNT_UPDATEMASK (MNT_NOSUID | MNT_NOEXEC | \
+ MNT_SYNCHRONOUS | MNT_UNION | MNT_ASYNC | \
+ MNT_NOATIME | \
+ MNT_NOSYMFOLLOW | MNT_IGNORE | \
+ MNT_NOCLUSTERR | MNT_NOCLUSTERW | MNT_SUIDDIR | \
+ MNT_ACLS | MNT_USER | MNT_NFS4ACLS)
+
+/*
+ * External filesystem command modifier flags.
+ * Unmount can use the MNT_FORCE flag.
+ * XXX: These are not STATES and really should be somewhere else.
+ * XXX: MNT_BYFSID collides with MNT_ACLS, but because MNT_ACLS is only used for
+ * mount(2) and MNT_BYFSID is only used for unmount(2) it's harmless.
+ */
+#define MNT_UPDATE 0x00010000 /* not a real mount, just an update */
+#define MNT_DELEXPORT 0x00020000 /* delete export host lists */
+#define MNT_RELOAD 0x00040000 /* reload filesystem data */
+#define MNT_FORCE 0x00080000 /* force unmount or readonly change */
+#define MNT_SNAPSHOT 0x01000000 /* snapshot the filesystem */
+#define MNT_BYFSID 0x08000000 /* specify filesystem by ID. */
+#define MNT_CMDFLAGS (MNT_UPDATE | MNT_DELEXPORT | MNT_RELOAD | \
+ MNT_FORCE | MNT_SNAPSHOT | MNT_BYFSID)
+/*
+ * Internal filesystem control flags stored in mnt_kern_flag.
+ *
+ * MNTK_UNMOUNT locks the mount entry so that name lookup cannot proceed
+ * past the mount point. This keeps the subtree stable during mounts
+ * and unmounts.
+ *
+ * MNTK_UNMOUNTF permits filesystems to detect a forced unmount while
+ * dounmount() is still waiting to lock the mountpoint. This allows
+ * the filesystem to cancel operations that might otherwise deadlock
+ * with the unmount attempt (used by NFS).
+ *
+ * MNTK_NOINSMNTQ is strict subset of MNTK_UNMOUNT. They are separated
+ * to allow for failed unmount attempt to restore the syncer vnode for
+ * the mount.
+ */
+#define MNTK_UNMOUNTF 0x00000001 /* forced unmount in progress */
+#define MNTK_ASYNC 0x00000002 /* filtered async flag */
+#define MNTK_SOFTDEP 0x00000004 /* async disabled by softdep */
+#define MNTK_NOINSMNTQ 0x00000008 /* insmntque is not allowed */
+#define MNTK_DRAINING 0x00000010 /* lock draining is happening */
+#define MNTK_REFEXPIRE 0x00000020 /* refcount expiring is happening */
+#define MNTK_EXTENDED_SHARED 0x00000040 /* Allow shared locking for more ops */
+#define MNTK_SHARED_WRITES 0x00000080 /* Allow shared locking for writes */
+#define MNTK_UNMOUNT 0x01000000 /* unmount in progress */
+#define MNTK_MWAIT 0x02000000 /* waiting for unmount to finish */
+#define MNTK_SUSPEND 0x08000000 /* request write suspension */
+#define MNTK_SUSPEND2 0x04000000 /* block secondary writes */
+#define MNTK_SUSPENDED 0x10000000 /* write operations are suspended */
+#define MNTK_MPSAFE 0x20000000 /* Filesystem is MPSAFE. */
+#define MNTK_LOOKUP_SHARED 0x40000000 /* FS supports shared lock lookups */
+#define MNTK_NOKNOTE 0x80000000 /* Don't send KNOTEs from VOP hooks */
+
+#define MNT_SHARED_WRITES(mp) (((mp) != NULL) && \
+ ((mp)->mnt_kern_flag & MNTK_SHARED_WRITES))
+
+/*
+ * Sysctl CTL_VFS definitions.
+ *
+ * Second level identifier specifies which filesystem. Second level
+ * identifier VFS_VFSCONF returns information about all filesystems.
+ * Second level identifier VFS_GENERIC is non-terminal.
+ */
+#define VFS_VFSCONF 0 /* get configured filesystems */
+#define VFS_GENERIC 0 /* generic filesystem information */
+/*
+ * Third level identifiers for VFS_GENERIC are given below; third
+ * level identifiers for specific filesystems are given in their
+ * mount specific header files.
+ */
+#define VFS_MAXTYPENUM 1 /* int: highest defined filesystem type */
+#define VFS_CONF 2 /* struct: vfsconf for filesystem given
+ as next argument */
+
+/*
+ * Flags for various system call interfaces.
+ *
+ * waitfor flags to vfs_sync() and getfsstat()
+ */
+#define MNT_WAIT 1 /* synchronously wait for I/O to complete */
+#define MNT_NOWAIT 2 /* start all I/O, but do not wait for it */
+#define MNT_LAZY 3 /* push data not written by filesystem syncer */
+#define MNT_SUSPEND 4 /* Suspend file system after sync */
+
+/*
+ * Generic file handle
+ */
+struct fhandle {
+ fsid_t fh_fsid; /* Filesystem id of mount point */
+ struct fid fh_fid; /* Filesys specific id */
+};
+typedef struct fhandle fhandle_t;
+
+/*
+ * Old export arguments without security flavor list
+ */
+struct oexport_args {
+ int ex_flags; /* export related flags */
+ uid_t ex_root; /* mapping for root uid */
+ struct xucred ex_anon; /* mapping for anonymous user */
+ struct sockaddr *ex_addr; /* net address to which exported */
+ u_char ex_addrlen; /* and the net address length */
+ struct sockaddr *ex_mask; /* mask of valid bits in saddr */
+ u_char ex_masklen; /* and the smask length */
+ char *ex_indexfile; /* index file for WebNFS URLs */
+};
+
+/*
+ * Export arguments for local filesystem mount calls.
+ */
+#define MAXSECFLAVORS 5
+struct export_args {
+ int ex_flags; /* export related flags */
+ uid_t ex_root; /* mapping for root uid */
+ struct xucred ex_anon; /* mapping for anonymous user */
+ struct sockaddr *ex_addr; /* net address to which exported */
+ u_char ex_addrlen; /* and the net address length */
+ struct sockaddr *ex_mask; /* mask of valid bits in saddr */
+ u_char ex_masklen; /* and the smask length */
+ char *ex_indexfile; /* index file for WebNFS URLs */
+ int ex_numsecflavors; /* security flavor count */
+ int ex_secflavors[MAXSECFLAVORS]; /* list of security flavors */
+};
+
+/*
+ * Structure holding information for a publicly exported filesystem
+ * (WebNFS). Currently the specs allow just for one such filesystem.
+ */
+struct nfs_public {
+ int np_valid; /* Do we hold valid information */
+ fhandle_t np_handle; /* Filehandle for pub fs (internal) */
+ struct mount *np_mount; /* Mountpoint of exported fs */
+ char *np_index; /* Index file */
+};
+
+/*
+ * Filesystem configuration information. One of these exists for each
+ * type of filesystem supported by the kernel. These are searched at
+ * mount time to identify the requested filesystem.
+ *
+ * XXX: Never change the first two arguments!
+ */
+struct vfsconf {
+ u_int vfc_version; /* ABI version number */
+ char vfc_name[MFSNAMELEN]; /* filesystem type name */
+ struct vfsops *vfc_vfsops; /* filesystem operations vector */
+ int vfc_typenum; /* historic filesystem type number */
+ int vfc_refcount; /* number mounted of this type */
+ int vfc_flags; /* permanent flags */
+ struct vfsoptdecl *vfc_opts; /* mount options */
+ TAILQ_ENTRY(vfsconf) vfc_list; /* list of vfscons */
+};
+
+/* Userland version of the struct vfsconf. */
+struct xvfsconf {
+ struct vfsops *vfc_vfsops; /* filesystem operations vector */
+ char vfc_name[MFSNAMELEN]; /* filesystem type name */
+ int vfc_typenum; /* historic filesystem type number */
+ int vfc_refcount; /* number mounted of this type */
+ int vfc_flags; /* permanent flags */
+ struct vfsconf *vfc_next; /* next in list */
+};
+
+#ifndef BURN_BRIDGES
+struct ovfsconf {
+ void *vfc_vfsops;
+ char vfc_name[32];
+ int vfc_index;
+ int vfc_refcount;
+ int vfc_flags;
+};
+#endif
+
+/*
+ * NB: these flags refer to IMPLEMENTATION properties, not properties of
+ * any actual mounts; i.e., it does not make sense to change the flags.
+ */
+#define VFCF_STATIC 0x00010000 /* statically compiled into kernel */
+#define VFCF_NETWORK 0x00020000 /* may get data over the network */
+#define VFCF_READONLY 0x00040000 /* writes are not implemented */
+#define VFCF_SYNTHETIC 0x00080000 /* data does not represent real files */
+#define VFCF_LOOPBACK 0x00100000 /* aliases some other mounted FS */
+#define VFCF_UNICODE 0x00200000 /* stores file names as Unicode */
+#define VFCF_JAIL 0x00400000 /* can be mounted from within a jail */
+#define VFCF_DELEGADMIN 0x00800000 /* supports delegated administration */
+
+typedef uint32_t fsctlop_t;
+
+struct vfsidctl {
+ int vc_vers; /* should be VFSIDCTL_VERS1 (below) */
+ fsid_t vc_fsid; /* fsid to operate on */
+ char vc_fstypename[MFSNAMELEN];
+ /* type of fs 'nfs' or '*' */
+ fsctlop_t vc_op; /* operation VFS_CTL_* (below) */
+ void *vc_ptr; /* pointer to data structure */
+ size_t vc_len; /* sizeof said structure */
+ u_int32_t vc_spare[12]; /* spare (must be zero) */
+};
+
+/* vfsidctl API version. */
+#define VFS_CTL_VERS1 0x01
+
+/*
+ * New style VFS sysctls, do not reuse/conflict with the namespace for
+ * private sysctls.
+ * All "global" sysctl ops have the 33rd bit set:
+ * 0x...1....
+ * Private sysctl ops should have the 33rd bit unset.
+ */
+#define VFS_CTL_QUERY 0x00010001 /* anything wrong? (vfsquery) */
+#define VFS_CTL_TIMEO 0x00010002 /* set timeout for vfs notification */
+#define VFS_CTL_NOLOCKS 0x00010003 /* disable file locking */
+
+struct vfsquery {
+ u_int32_t vq_flags;
+ u_int32_t vq_spare[31];
+};
+
+/* vfsquery flags */
+#define VQ_NOTRESP 0x0001 /* server down */
+#define VQ_NEEDAUTH 0x0002 /* server bad auth */
+#define VQ_LOWDISK 0x0004 /* we're low on space */
+#define VQ_MOUNT 0x0008 /* new filesystem arrived */
+#define VQ_UNMOUNT 0x0010 /* filesystem has left */
+#define VQ_DEAD 0x0020 /* filesystem is dead, needs force unmount */
+#define VQ_ASSIST 0x0040 /* filesystem needs assistance from external
+ program */
+#define VQ_NOTRESPLOCK 0x0080 /* server lockd down */
+#define VQ_FLAG0100 0x0100 /* placeholder */
+#define VQ_FLAG0200 0x0200 /* placeholder */
+#define VQ_FLAG0400 0x0400 /* placeholder */
+#define VQ_FLAG0800 0x0800 /* placeholder */
+#define VQ_FLAG1000 0x1000 /* placeholder */
+#define VQ_FLAG2000 0x2000 /* placeholder */
+#define VQ_FLAG4000 0x4000 /* placeholder */
+#define VQ_FLAG8000 0x8000 /* placeholder */
+
+#ifdef _KERNEL
+/* Point a sysctl request at a vfsidctl's data. */
+#define VCTLTOREQ(vc, req) \
+ do { \
+ (req)->newptr = (vc)->vc_ptr; \
+ (req)->newlen = (vc)->vc_len; \
+ (req)->newidx = 0; \
+ } while (0)
+#endif
+
+struct iovec;
+struct uio;
+
+#ifdef _KERNEL
+
+/*
+ * vfs_busy specific flags and mask.
+ */
+#define MBF_NOWAIT 0x01
+#define MBF_MNTLSTLOCK 0x02
+#define MBF_MASK (MBF_NOWAIT | MBF_MNTLSTLOCK)
+
+#ifdef MALLOC_DECLARE
+MALLOC_DECLARE(M_MOUNT);
+#endif
+extern int maxvfsconf; /* highest defined filesystem type */
+extern int nfs_mount_type; /* vfc_typenum for nfs, or -1 */
+
+TAILQ_HEAD(vfsconfhead, vfsconf);
+extern struct vfsconfhead vfsconf;
+
+/*
+ * Operations supported on mounted filesystem.
+ */
+struct mount_args;
+struct nameidata;
+struct sysctl_req;
+struct mntarg;
+
+typedef int vfs_cmount_t(struct mntarg *ma, void *data, int flags);
+typedef int vfs_unmount_t(struct mount *mp, int mntflags);
+typedef int vfs_root_t(struct mount *mp, int flags, struct vnode **vpp);
+typedef int vfs_quotactl_t(struct mount *mp, int cmds, uid_t uid, void *arg);
+typedef int vfs_statfs_t(struct mount *mp, struct statfs *sbp);
+typedef int vfs_sync_t(struct mount *mp, int waitfor);
+typedef int vfs_vget_t(struct mount *mp, ino_t ino, int flags,
+ struct vnode **vpp);
+typedef int vfs_fhtovp_t(struct mount *mp, struct fid *fhp, struct vnode **vpp);
+typedef int vfs_checkexp_t(struct mount *mp, struct sockaddr *nam,
+ int *extflagsp, struct ucred **credanonp,
+ int *numsecflavors, int **secflavors);
+typedef int vfs_init_t(struct vfsconf *);
+typedef int vfs_uninit_t(struct vfsconf *);
+typedef int vfs_extattrctl_t(struct mount *mp, int cmd,
+ struct vnode *filename_vp, int attrnamespace,
+ const char *attrname);
+typedef int vfs_mount_t(struct mount *mp);
+typedef int vfs_sysctl_t(struct mount *mp, fsctlop_t op,
+ struct sysctl_req *req);
+typedef void vfs_susp_clean_t(struct mount *mp);
+
+struct vfsops {
+ vfs_mount_t *vfs_mount;
+ vfs_cmount_t *vfs_cmount;
+ vfs_unmount_t *vfs_unmount;
+ vfs_root_t *vfs_root;
+ vfs_quotactl_t *vfs_quotactl;
+ vfs_statfs_t *vfs_statfs;
+ vfs_sync_t *vfs_sync;
+ vfs_vget_t *vfs_vget;
+ vfs_fhtovp_t *vfs_fhtovp;
+ vfs_checkexp_t *vfs_checkexp;
+ vfs_init_t *vfs_init;
+ vfs_uninit_t *vfs_uninit;
+ vfs_extattrctl_t *vfs_extattrctl;
+ vfs_sysctl_t *vfs_sysctl;
+ vfs_susp_clean_t *vfs_susp_clean;
+};
+
+vfs_statfs_t __vfs_statfs;
+
+#define VFS_MOUNT(MP) (*(MP)->mnt_op->vfs_mount)(MP)
+#define VFS_UNMOUNT(MP, FORCE) (*(MP)->mnt_op->vfs_unmount)(MP, FORCE)
+#define VFS_ROOT(MP, FLAGS, VPP) \
+ (*(MP)->mnt_op->vfs_root)(MP, FLAGS, VPP)
+#define VFS_QUOTACTL(MP, C, U, A) \
+ (*(MP)->mnt_op->vfs_quotactl)(MP, C, U, A)
+#define VFS_STATFS(MP, SBP) __vfs_statfs((MP), (SBP))
+#define VFS_SYNC(MP, WAIT) (*(MP)->mnt_op->vfs_sync)(MP, WAIT)
+#define VFS_VGET(MP, INO, FLAGS, VPP) \
+ (*(MP)->mnt_op->vfs_vget)(MP, INO, FLAGS, VPP)
+#define VFS_FHTOVP(MP, FIDP, VPP) \
+ (*(MP)->mnt_op->vfs_fhtovp)(MP, FIDP, VPP)
+#define VFS_CHECKEXP(MP, NAM, EXFLG, CRED, NUMSEC, SEC) \
+ (*(MP)->mnt_op->vfs_checkexp)(MP, NAM, EXFLG, CRED, NUMSEC, SEC)
+#define VFS_EXTATTRCTL(MP, C, FN, NS, N) \
+ (*(MP)->mnt_op->vfs_extattrctl)(MP, C, FN, NS, N)
+#define VFS_SYSCTL(MP, OP, REQ) \
+ (*(MP)->mnt_op->vfs_sysctl)(MP, OP, REQ)
+#define VFS_SUSP_CLEAN(MP) \
+ ({if (*(MP)->mnt_op->vfs_susp_clean != NULL) \
+ (*(MP)->mnt_op->vfs_susp_clean)(MP); })
+
+#define VFS_NEEDSGIANT_(MP) \
+ ((MP) != NULL && ((MP)->mnt_kern_flag & MNTK_MPSAFE) == 0)
+
+#define VFS_NEEDSGIANT(MP) __extension__ \
+({ \
+ struct mount *_mp; \
+ _mp = (MP); \
+ VFS_NEEDSGIANT_(_mp); \
+})
+
+#define VFS_LOCK_GIANT(MP) __extension__ \
+({ \
+ int _locked; \
+ struct mount *_mp; \
+ _mp = (MP); \
+ if (VFS_NEEDSGIANT_(_mp)) { \
+ mtx_lock(&Giant); \
+ _locked = 1; \
+ } else \
+ _locked = 0; \
+ _locked; \
+})
+#define VFS_UNLOCK_GIANT(locked) if ((locked)) mtx_unlock(&Giant);
+#define VFS_ASSERT_GIANT(MP) do \
+{ \
+ struct mount *_mp; \
+ _mp = (MP); \
+ if (VFS_NEEDSGIANT_(_mp)) \
+ mtx_assert(&Giant, MA_OWNED); \
+} while (0)
+
+#define VFS_KNOTE_LOCKED(vp, hint) do \
+{ \
+ if (((vp)->v_vflag & VV_NOKNOTE) == 0) \
+ VN_KNOTE((vp), (hint), KNF_LISTLOCKED); \
+} while (0)
+
+#define VFS_KNOTE_UNLOCKED(vp, hint) do \
+{ \
+ if (((vp)->v_vflag & VV_NOKNOTE) == 0) \
+ VN_KNOTE((vp), (hint), 0); \
+} while (0)
+
+#include <rtems/freebsd/sys/module.h>
+
+/*
+ * Version numbers.
+ */
+#define VFS_VERSION_00 0x19660120
+#define VFS_VERSION VFS_VERSION_00
+
+#define VFS_SET(vfsops, fsname, flags) \
+ static struct vfsconf fsname ## _vfsconf = { \
+ .vfc_version = VFS_VERSION, \
+ .vfc_name = #fsname, \
+ .vfc_vfsops = &vfsops, \
+ .vfc_typenum = -1, \
+ .vfc_flags = flags, \
+ }; \
+ static moduledata_t fsname ## _mod = { \
+ #fsname, \
+ vfs_modevent, \
+ & fsname ## _vfsconf \
+ }; \
+ DECLARE_MODULE(fsname, fsname ## _mod, SI_SUB_VFS, SI_ORDER_MIDDLE)
+
+extern char *mountrootfsname;
+
+/*
+ * exported vnode operations
+ */
+
+int dounmount(struct mount *, int, struct thread *);
+
+int kernel_mount(struct mntarg *ma, int flags);
+int kernel_vmount(int flags, ...);
+struct mntarg *mount_arg(struct mntarg *ma, const char *name, const void *val, int len);
+struct mntarg *mount_argb(struct mntarg *ma, int flag, const char *name);
+struct mntarg *mount_argf(struct mntarg *ma, const char *name, const char *fmt, ...);
+struct mntarg *mount_argsu(struct mntarg *ma, const char *name, const void *val, int len);
+void statfs_scale_blocks(struct statfs *sf, long max_size);
+struct vfsconf *vfs_byname(const char *);
+struct vfsconf *vfs_byname_kld(const char *, struct thread *td, int *);
+void vfs_mount_destroy(struct mount *);
+void vfs_event_signal(fsid_t *, u_int32_t, intptr_t);
+void vfs_freeopts(struct vfsoptlist *opts);
+void vfs_deleteopt(struct vfsoptlist *opts, const char *name);
+int vfs_buildopts(struct uio *auio, struct vfsoptlist **options);
+int vfs_flagopt(struct vfsoptlist *opts, const char *name, u_int *w, u_int val);
+int vfs_getopt(struct vfsoptlist *, const char *, void **, int *);
+int vfs_getopt_pos(struct vfsoptlist *opts, const char *name);
+char *vfs_getopts(struct vfsoptlist *, const char *, int *error);
+int vfs_copyopt(struct vfsoptlist *, const char *, void *, int);
+int vfs_filteropt(struct vfsoptlist *, const char **legal);
+void vfs_opterror(struct vfsoptlist *opts, const char *fmt, ...);
+int vfs_scanopt(struct vfsoptlist *opts, const char *name, const char *fmt, ...);
+int vfs_setopt(struct vfsoptlist *opts, const char *name, void *value,
+ int len);
+int vfs_setopt_part(struct vfsoptlist *opts, const char *name, void *value,
+ int len);
+int vfs_setopts(struct vfsoptlist *opts, const char *name,
+ const char *value);
+int vfs_setpublicfs /* set publicly exported fs */
+ (struct mount *, struct netexport *, struct export_args *);
+void vfs_msync(struct mount *, int);
+int vfs_busy(struct mount *, int);
+int vfs_export /* process mount export info */
+ (struct mount *, struct export_args *);
+int vfs_allocate_syncvnode(struct mount *);
+int vfs_donmount(struct thread *td, int fsflags, struct uio *fsoptions);
+void vfs_getnewfsid(struct mount *);
+struct cdev *vfs_getrootfsid(struct mount *);
+struct mount *vfs_getvfs(fsid_t *); /* return vfs given fsid */
+struct mount *vfs_busyfs(fsid_t *);
+int vfs_modevent(module_t, int, void *);
+void vfs_mount_error(struct mount *, const char *, ...);
+void vfs_mountroot(void); /* mount our root filesystem */
+void vfs_mountedfrom(struct mount *, const char *from);
+void vfs_oexport_conv(const struct oexport_args *oexp,
+ struct export_args *exp);
+void vfs_ref(struct mount *);
+void vfs_rel(struct mount *);
+struct mount *vfs_mount_alloc(struct vnode *, struct vfsconf *, const char *,
+ struct ucred *);
+int vfs_suser(struct mount *, struct thread *);
+void vfs_unbusy(struct mount *);
+void vfs_unmountall(void);
+extern TAILQ_HEAD(mntlist, mount) mountlist; /* mounted filesystem list */
+extern struct mtx mountlist_mtx;
+extern struct nfs_public nfs_pub;
+
+/*
+ * Declarations for these vfs default operations are located in
+ * kern/vfs_default.c, they should be used instead of making "dummy"
+ * functions or casting entries in the VFS op table to "enopnotsupp()".
+ */
+vfs_root_t vfs_stdroot;
+vfs_quotactl_t vfs_stdquotactl;
+vfs_statfs_t vfs_stdstatfs;
+vfs_sync_t vfs_stdsync;
+vfs_sync_t vfs_stdnosync;
+vfs_vget_t vfs_stdvget;
+vfs_fhtovp_t vfs_stdfhtovp;
+vfs_checkexp_t vfs_stdcheckexp;
+vfs_init_t vfs_stdinit;
+vfs_uninit_t vfs_stduninit;
+vfs_extattrctl_t vfs_stdextattrctl;
+vfs_sysctl_t vfs_stdsysctl;
+
+#else /* !_KERNEL */
+
+#include <rtems/freebsd/sys/cdefs.h>
+
+struct stat;
+
+__BEGIN_DECLS
+int fhopen(const struct fhandle *, int);
+int fhstat(const struct fhandle *, struct stat *);
+int fhstatfs(const struct fhandle *, struct statfs *);
+int fstatfs(int, struct statfs *);
+int getfh(const char *, fhandle_t *);
+int getfsstat(struct statfs *, long, int);
+int getmntinfo(struct statfs **, int);
+int lgetfh(const char *, fhandle_t *);
+int mount(const char *, const char *, int, void *);
+int nmount(struct iovec *, unsigned int, int);
+int statfs(const char *, struct statfs *);
+int unmount(const char *, int);
+
+/* C library stuff */
+int getvfsbyname(const char *, struct xvfsconf *);
+__END_DECLS
+
+#endif /* _KERNEL */
+
+#endif /* !_SYS_MOUNT_HH_ */
diff --git a/rtems/freebsd/sys/mutex.h b/rtems/freebsd/sys/mutex.h
new file mode 100644
index 00000000..2961ae4e
--- /dev/null
+++ b/rtems/freebsd/sys/mutex.h
@@ -0,0 +1,463 @@
+/*-
+ * Copyright (c) 1997 Berkeley Software Design, Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Berkeley Software Design Inc's name may not be used to endorse or
+ * promote products derived from this software without specific prior
+ * written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY BERKELEY SOFTWARE DESIGN INC ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL BERKELEY SOFTWARE DESIGN INC BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * from BSDI $Id: mutex.h,v 2.7.2.35 2000/04/27 03:10:26 cp Exp $
+ * $FreeBSD$
+ */
+
+#ifndef _SYS_MUTEX_HH_
+#define _SYS_MUTEX_HH_
+
+#ifndef LOCORE
+#include <rtems/freebsd/sys/queue.h>
+#include <rtems/freebsd/sys/_lock.h>
+#include <rtems/freebsd/sys/_mutex.h>
+
+#ifdef _KERNEL
+#include <rtems/freebsd/sys/pcpu.h>
+#include <rtems/freebsd/sys/lock_profile.h>
+#include <rtems/freebsd/sys/lockstat.h>
+#include <rtems/freebsd/machine/atomic.h>
+#include <rtems/freebsd/machine/cpufunc.h>
+#endif /* _KERNEL_ */
+#endif /* !LOCORE */
+
+#include <rtems/freebsd/machine/mutex.h>
+
+#ifdef _KERNEL
+
+#ifdef __rtems__
+#define MUTEX_NOINLINE 1
+#endif /* __rtems__ */
+/*
+ * Mutex types and options passed to mtx_init(). MTX_QUIET and MTX_DUPOK
+ * can also be passed in.
+ */
+#define MTX_DEF 0x00000000 /* DEFAULT (sleep) lock */
+#define MTX_SPIN 0x00000001 /* Spin lock (disables interrupts) */
+#define MTX_RECURSE 0x00000004 /* Option: lock allowed to recurse */
+#define MTX_NOWITNESS 0x00000008 /* Don't do any witness checking. */
+#define MTX_NOPROFILE 0x00000020 /* Don't profile this lock */
+
+/*
+ * Option flags passed to certain lock/unlock routines, through the use
+ * of corresponding mtx_{lock,unlock}_flags() interface macros.
+ */
+#define MTX_QUIET LOP_QUIET /* Don't log a mutex event */
+#define MTX_DUPOK LOP_DUPOK /* Don't log a duplicate acquire */
+
+/*
+ * State bits kept in mutex->mtx_lock, for the DEFAULT lock type. None of this,
+ * with the exception of MTX_UNOWNED, applies to spin locks.
+ */
+#define MTX_RECURSED 0x00000001 /* lock recursed (for MTX_DEF only) */
+#define MTX_CONTESTED 0x00000002 /* lock contested (for MTX_DEF only) */
+#define MTX_UNOWNED 0x00000004 /* Cookie for free mutex */
+#define MTX_FLAGMASK (MTX_RECURSED | MTX_CONTESTED | MTX_UNOWNED)
+
+/*
+ * Value stored in mutex->mtx_lock to denote a destroyed mutex.
+ */
+#define MTX_DESTROYED (MTX_CONTESTED | MTX_UNOWNED)
+
+#endif /* _KERNEL */
+
+#ifndef LOCORE
+
+/*
+ * XXX: Friendly reminder to fix things in MP code that is presently being
+ * XXX: worked on.
+ */
+#define mp_fixme(string)
+
+#ifdef _KERNEL
+
+/*
+ * Prototypes
+ *
+ * NOTE: Functions prepended with `_' (underscore) are exported to other parts
+ * of the kernel via macros, thus allowing us to use the cpp LOCK_FILE
+ * and LOCK_LINE. These functions should not be called directly by any
+ * code using the API. Their macros cover their functionality.
+ *
+ * [See below for descriptions]
+ *
+ */
+void mtx_init(struct mtx *m, const char *name, const char *type, int opts);
+void mtx_destroy(struct mtx *m);
+void mtx_sysinit(void *arg);
+void mutex_init(void);
+void _mtx_lock_sleep(struct mtx *m, uintptr_t tid, int opts,
+ const char *file, int line);
+void _mtx_unlock_sleep(struct mtx *m, int opts, const char *file, int line);
+#ifdef SMP
+void _mtx_lock_spin(struct mtx *m, uintptr_t tid, int opts,
+ const char *file, int line);
+#endif
+void _mtx_unlock_spin(struct mtx *m, int opts, const char *file, int line);
+int _mtx_trylock(struct mtx *m, int opts, const char *file, int line);
+void _mtx_lock_flags(struct mtx *m, int opts, const char *file, int line);
+void _mtx_unlock_flags(struct mtx *m, int opts, const char *file, int line);
+void _mtx_lock_spin_flags(struct mtx *m, int opts, const char *file,
+ int line);
+void _mtx_unlock_spin_flags(struct mtx *m, int opts, const char *file,
+ int line);
+#if defined(INVARIANTS) || defined(INVARIANT_SUPPORT)
+void _mtx_assert(struct mtx *m, int what, const char *file, int line);
+#endif
+void _thread_lock_flags(struct thread *, int, const char *, int);
+
+#define thread_lock(tdp) \
+ _thread_lock_flags((tdp), 0, __FILE__, __LINE__)
+#define thread_lock_flags(tdp, opt) \
+ _thread_lock_flags((tdp), (opt), __FILE__, __LINE__)
+#define thread_unlock(tdp) \
+ mtx_unlock_spin((tdp)->td_lock)
+
+#define mtx_recurse lock_object.lo_data
+
+/*
+ * We define our machine-independent (unoptimized) mutex micro-operations
+ * here, if they are not already defined in the machine-dependent mutex.h
+ */
+
+/* Try to obtain mtx_lock once. */
+#ifndef _obtain_lock
+#define _obtain_lock(mp, tid) \
+ atomic_cmpset_acq_ptr(&(mp)->mtx_lock, MTX_UNOWNED, (tid))
+#endif
+
+/* Try to release mtx_lock if it is unrecursed and uncontested. */
+#ifndef _release_lock
+#define _release_lock(mp, tid) \
+ atomic_cmpset_rel_ptr(&(mp)->mtx_lock, (tid), MTX_UNOWNED)
+#endif
+
+/* Release mtx_lock quickly, assuming we own it. */
+#ifndef _release_lock_quick
+#define _release_lock_quick(mp) \
+ atomic_store_rel_ptr(&(mp)->mtx_lock, MTX_UNOWNED)
+#endif
+
+/*
+ * Obtain a sleep lock inline, or call the "hard" function if we can't get it
+ * easy.
+ */
+#ifndef _get_sleep_lock
+#define _get_sleep_lock(mp, tid, opts, file, line) do { \
+ uintptr_t _tid = (uintptr_t)(tid); \
+ if (!_obtain_lock((mp), _tid)) \
+ _mtx_lock_sleep((mp), _tid, (opts), (file), (line)); \
+ else \
+ LOCKSTAT_PROFILE_OBTAIN_LOCK_SUCCESS(LS_MTX_LOCK_ACQUIRE, \
+ mp, 0, 0, (file), (line)); \
+} while (0)
+#endif
+
+/*
+ * Obtain a spin lock inline, or call the "hard" function if we can't get it
+ * easy. For spinlocks, we handle recursion inline (it turns out that function
+ * calls can be significantly expensive on some architectures).
+ * Since spin locks are not _too_ common, inlining this code is not too big
+ * a deal.
+ */
+#ifndef _get_spin_lock
+#ifdef SMP
+#define _get_spin_lock(mp, tid, opts, file, line) do { \
+ uintptr_t _tid = (uintptr_t)(tid); \
+ spinlock_enter(); \
+ if (!_obtain_lock((mp), _tid)) { \
+ if ((mp)->mtx_lock == _tid) \
+ (mp)->mtx_recurse++; \
+ else { \
+ _mtx_lock_spin((mp), _tid, (opts), (file), (line)); \
+ } \
+ } else \
+ LOCKSTAT_PROFILE_OBTAIN_LOCK_SUCCESS(LS_MTX_SPIN_LOCK_ACQUIRE, \
+ mp, 0, 0, (file), (line)); \
+} while (0)
+#else /* SMP */
+#define _get_spin_lock(mp, tid, opts, file, line) do { \
+ uintptr_t _tid = (uintptr_t)(tid); \
+ \
+ spinlock_enter(); \
+ if ((mp)->mtx_lock == _tid) \
+ (mp)->mtx_recurse++; \
+ else { \
+ KASSERT((mp)->mtx_lock == MTX_UNOWNED, ("corrupt spinlock")); \
+ (mp)->mtx_lock = _tid; \
+ } \
+} while (0)
+#endif /* SMP */
+#endif
+
+/*
+ * Release a sleep lock inline, or call the "hard" function if we can't do it
+ * easy.
+ */
+#ifndef _rel_sleep_lock
+#define _rel_sleep_lock(mp, tid, opts, file, line) do { \
+ uintptr_t _tid = (uintptr_t)(tid); \
+ \
+ if (!_release_lock((mp), _tid)) \
+ _mtx_unlock_sleep((mp), (opts), (file), (line)); \
+} while (0)
+#endif
+
+/*
+ * For spinlocks, we can handle everything inline, as it's pretty simple and
+ * a function call would be too expensive (at least on some architectures).
+ * Since spin locks are not _too_ common, inlining this code is not too big
+ * a deal.
+ *
+ * Since we always perform a spinlock_enter() when attempting to acquire a
+ * spin lock, we need to always perform a matching spinlock_exit() when
+ * releasing a spin lock. This includes the recursion cases.
+ */
+#ifndef _rel_spin_lock
+#ifdef SMP
+#define _rel_spin_lock(mp) do { \
+ if (mtx_recursed((mp))) \
+ (mp)->mtx_recurse--; \
+ else { \
+ LOCKSTAT_PROFILE_RELEASE_LOCK(LS_MTX_SPIN_UNLOCK_RELEASE, \
+ mp); \
+ _release_lock_quick((mp)); \
+ } \
+ spinlock_exit(); \
+} while (0)
+#else /* SMP */
+#define _rel_spin_lock(mp) do { \
+ if (mtx_recursed((mp))) \
+ (mp)->mtx_recurse--; \
+ else { \
+ LOCKSTAT_PROFILE_RELEASE_LOCK(LS_MTX_SPIN_UNLOCK_RELEASE, \
+ mp); \
+ (mp)->mtx_lock = MTX_UNOWNED; \
+ } \
+ spinlock_exit(); \
+} while (0)
+#endif /* SMP */
+#endif
+
+/*
+ * Exported lock manipulation interface.
+ *
+ * mtx_lock(m) locks MTX_DEF mutex `m'
+ *
+ * mtx_lock_spin(m) locks MTX_SPIN mutex `m'
+ *
+ * mtx_unlock(m) unlocks MTX_DEF mutex `m'
+ *
+ * mtx_unlock_spin(m) unlocks MTX_SPIN mutex `m'
+ *
+ * mtx_lock_spin_flags(m, opts) and mtx_lock_flags(m, opts) locks mutex `m'
+ * and passes option flags `opts' to the "hard" function, if required.
+ * With these routines, it is possible to pass flags such as MTX_QUIET
+ * to the appropriate lock manipulation routines.
+ *
+ * mtx_trylock(m) attempts to acquire MTX_DEF mutex `m' but doesn't sleep if
+ * it cannot. Rather, it returns 0 on failure and non-zero on success.
+ * It does NOT handle recursion as we assume that if a caller is properly
+ * using this part of the interface, he will know that the lock in question
+ * is _not_ recursed.
+ *
+ * mtx_trylock_flags(m, opts) is used the same way as mtx_trylock() but accepts
+ * relevant option flags `opts.'
+ *
+ * mtx_initialized(m) returns non-zero if the lock `m' has been initialized.
+ *
+ * mtx_owned(m) returns non-zero if the current thread owns the lock `m'
+ *
+ * mtx_recursed(m) returns non-zero if the lock `m' is presently recursed.
+ */
+#define mtx_lock(m) mtx_lock_flags((m), 0)
+#define mtx_lock_spin(m) mtx_lock_spin_flags((m), 0)
+#define mtx_trylock(m) mtx_trylock_flags((m), 0)
+#define mtx_unlock(m) mtx_unlock_flags((m), 0)
+#define mtx_unlock_spin(m) mtx_unlock_spin_flags((m), 0)
+
+struct mtx_pool;
+
+struct mtx_pool *mtx_pool_create(const char *mtx_name, int pool_size, int opts);
+void mtx_pool_destroy(struct mtx_pool **poolp);
+struct mtx *mtx_pool_find(struct mtx_pool *pool, void *ptr);
+struct mtx *mtx_pool_alloc(struct mtx_pool *pool);
+#define mtx_pool_lock(pool, ptr) \
+ mtx_lock(mtx_pool_find((pool), (ptr)))
+#define mtx_pool_lock_spin(pool, ptr) \
+ mtx_lock_spin(mtx_pool_find((pool), (ptr)))
+#define mtx_pool_unlock(pool, ptr) \
+ mtx_unlock(mtx_pool_find((pool), (ptr)))
+#define mtx_pool_unlock_spin(pool, ptr) \
+ mtx_unlock_spin(mtx_pool_find((pool), (ptr)))
+
+/*
+ * mtxpool_lockbuilder is a pool of sleep locks that is not witness
+ * checked and should only be used for building higher level locks.
+ *
+ * mtxpool_sleep is a general purpose pool of sleep mutexes.
+ */
+extern struct mtx_pool *mtxpool_lockbuilder;
+extern struct mtx_pool *mtxpool_sleep;
+
+#ifndef LOCK_DEBUG
+#error LOCK_DEBUG not defined, include <sys/lock.h> before <sys/mutex.h>
+#endif
+#if LOCK_DEBUG > 0 || defined(MUTEX_NOINLINE)
+#define mtx_lock_flags(m, opts) \
+ _mtx_lock_flags((m), (opts), LOCK_FILE, LOCK_LINE)
+#define mtx_unlock_flags(m, opts) \
+ _mtx_unlock_flags((m), (opts), LOCK_FILE, LOCK_LINE)
+#define mtx_lock_spin_flags(m, opts) \
+ _mtx_lock_spin_flags((m), (opts), LOCK_FILE, LOCK_LINE)
+#define mtx_unlock_spin_flags(m, opts) \
+ _mtx_unlock_spin_flags((m), (opts), LOCK_FILE, LOCK_LINE)
+#else /* LOCK_DEBUG == 0 && !MUTEX_NOINLINE */
+#define mtx_lock_flags(m, opts) \
+ _get_sleep_lock((m), curthread, (opts), LOCK_FILE, LOCK_LINE)
+#define mtx_unlock_flags(m, opts) \
+ _rel_sleep_lock((m), curthread, (opts), LOCK_FILE, LOCK_LINE)
+#define mtx_lock_spin_flags(m, opts) \
+ _get_spin_lock((m), curthread, (opts), LOCK_FILE, LOCK_LINE)
+#define mtx_unlock_spin_flags(m, opts) \
+ _rel_spin_lock((m))
+#endif /* LOCK_DEBUG > 0 || MUTEX_NOINLINE */
+
+#define mtx_trylock_flags(m, opts) \
+ _mtx_trylock((m), (opts), LOCK_FILE, LOCK_LINE)
+
+#define mtx_sleep(chan, mtx, pri, wmesg, timo) \
+ _sleep((chan), &(mtx)->lock_object, (pri), (wmesg), (timo))
+
+#define mtx_initialized(m) lock_initalized(&(m)->lock_object)
+
+#ifndef __rtems__
+#define mtx_owned(m) (((m)->mtx_lock & ~MTX_FLAGMASK) == (uintptr_t)curthread)
+
+#define mtx_recursed(m) ((m)->mtx_recurse != 0)
+#else /* __rtems__ */
+int mtx_owned(struct mtx *m);
+int mtx_recursed(struct mtx *m);
+#endif /* __rtems__ */
+
+#define mtx_name(m) ((m)->lock_object.lo_name)
+
+/*
+ * Global locks.
+ */
+extern struct mtx Giant;
+extern struct mtx blocked_lock;
+
+/*
+ * Giant lock manipulation and clean exit macros.
+ * Used to replace return with an exit Giant and return.
+ *
+ * Note that DROP_GIANT*() needs to be paired with PICKUP_GIANT()
+ * The #ifndef is to allow lint-like tools to redefine DROP_GIANT.
+ */
+#ifndef DROP_GIANT
+#define DROP_GIANT() \
+do { \
+ int _giantcnt = 0; \
+ WITNESS_SAVE_DECL(Giant); \
+ \
+ if (mtx_owned(&Giant)) { \
+ WITNESS_SAVE(&Giant.lock_object, Giant); \
+ for (_giantcnt = 0; mtx_owned(&Giant); _giantcnt++) \
+ mtx_unlock(&Giant); \
+ }
+
+#define PICKUP_GIANT() \
+ PARTIAL_PICKUP_GIANT(); \
+} while (0)
+
+#define PARTIAL_PICKUP_GIANT() \
+ mtx_assert(&Giant, MA_NOTOWNED); \
+ if (_giantcnt > 0) { \
+ while (_giantcnt--) \
+ mtx_lock(&Giant); \
+ WITNESS_RESTORE(&Giant.lock_object, Giant); \
+ }
+#endif
+
+#define UGAR(rval) do { \
+ int _val = (rval); \
+ mtx_unlock(&Giant); \
+ return (_val); \
+} while (0)
+
+struct mtx_args {
+ struct mtx *ma_mtx;
+ const char *ma_desc;
+ int ma_opts;
+};
+
+#define MTX_SYSINIT(name, mtx, desc, opts) \
+ static struct mtx_args name##_args = { \
+ (mtx), \
+ (desc), \
+ (opts) \
+ }; \
+ SYSINIT(name##_mtx_sysinit, SI_SUB_LOCK, SI_ORDER_MIDDLE, \
+ mtx_sysinit, &name##_args); \
+ SYSUNINIT(name##_mtx_sysuninit, SI_SUB_LOCK, SI_ORDER_MIDDLE, \
+ mtx_destroy, (mtx))
+
+/*
+ * The INVARIANTS-enabled mtx_assert() functionality.
+ *
+ * The constants need to be defined for INVARIANT_SUPPORT infrastructure
+ * support as _mtx_assert() itself uses them and the latter implies that
+ * _mtx_assert() must build.
+ */
+#if defined(INVARIANTS) || defined(INVARIANT_SUPPORT)
+#define MA_OWNED LA_XLOCKED
+#define MA_NOTOWNED LA_UNLOCKED
+#define MA_RECURSED LA_RECURSED
+#define MA_NOTRECURSED LA_NOTRECURSED
+#endif
+
+#ifdef INVARIANTS
+#define mtx_assert(m, what) \
+ _mtx_assert((m), (what), __FILE__, __LINE__)
+
+#define GIANT_REQUIRED mtx_assert(&Giant, MA_OWNED)
+
+#else /* INVARIANTS */
+#define mtx_assert(m, what) (void)0
+#define GIANT_REQUIRED
+#endif /* INVARIANTS */
+
+/*
+ * Common lock type names.
+ */
+#define MTX_NETWORK_LOCK "network driver"
+
+#endif /* _KERNEL */
+#endif /* !LOCORE */
+#endif /* _SYS_MUTEX_HH_ */
diff --git a/rtems/freebsd/sys/namei.h b/rtems/freebsd/sys/namei.h
new file mode 100644
index 00000000..936ffd88
--- /dev/null
+++ b/rtems/freebsd/sys/namei.h
@@ -0,0 +1 @@
+/* EMPTY */
diff --git a/rtems/freebsd/sys/osd.h b/rtems/freebsd/sys/osd.h
new file mode 100644
index 00000000..20affbbf
--- /dev/null
+++ b/rtems/freebsd/sys/osd.h
@@ -0,0 +1,101 @@
+/*-
+ * Copyright (c) 2007 Pawel Jakub Dawidek <pjd@FreeBSD.org>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _SYS_OSD_HH_
+#define _SYS_OSD_HH_
+
+#include <rtems/freebsd/sys/queue.h>
+
+/*
+ * Lock key:
+ * (c) container lock (e.g. jail's pr_mtx) and/or osd_object_lock
+ * (l) osd_list_lock
+ */
+struct osd {
+ u_int osd_nslots; /* (c) */
+ void **osd_slots; /* (c) */
+ LIST_ENTRY(osd) osd_next; /* (l) */
+};
+
+#ifdef _KERNEL
+
+#define OSD_THREAD 0
+#define OSD_JAIL 1
+
+#define OSD_FIRST OSD_THREAD
+#define OSD_LAST OSD_JAIL
+
+typedef void (*osd_destructor_t)(void *value);
+typedef int (*osd_method_t)(void *obj, void *data);
+
+int osd_register(u_int type, osd_destructor_t destructor,
+ osd_method_t *methods);
+void osd_deregister(u_int type, u_int slot);
+
+int osd_set(u_int type, struct osd *osd, u_int slot, void *value);
+void *osd_get(u_int type, struct osd *osd, u_int slot);
+void osd_del(u_int type, struct osd *osd, u_int slot);
+int osd_call(u_int type, u_int method, void *obj, void *data);
+
+void osd_exit(u_int type, struct osd *osd);
+
+#define osd_thread_register(destructor) \
+ osd_register(OSD_THREAD, (destructor), NULL)
+#define osd_thread_deregister(slot) \
+ osd_deregister(OSD_THREAD, (slot))
+#define osd_thread_set(td, slot, value) \
+ osd_set(OSD_THREAD, &(td)->td_osd, (slot), (value))
+#define osd_thread_get(td, slot) \
+ osd_get(OSD_THREAD, &(td)->td_osd, (slot))
+#define osd_thread_del(td, slot) do { \
+ KASSERT((td) == curthread, ("Not curthread.")); \
+ osd_del(OSD_THREAD, &(td)->td_osd, (slot)); \
+} while (0)
+#define osd_thread_call(td, method, data) \
+ osd_call(OSD_THREAD, (method), (td), (data))
+#define osd_thread_exit(td) \
+ osd_exit(OSD_THREAD, &(td)->td_osd)
+
+#define osd_jail_register(destructor, methods) \
+ osd_register(OSD_JAIL, (destructor), (methods))
+#define osd_jail_deregister(slot) \
+ osd_deregister(OSD_JAIL, (slot))
+#define osd_jail_set(pr, slot, value) \
+ osd_set(OSD_JAIL, &(pr)->pr_osd, (slot), (value))
+#define osd_jail_get(pr, slot) \
+ osd_get(OSD_JAIL, &(pr)->pr_osd, (slot))
+#define osd_jail_del(pr, slot) \
+ osd_del(OSD_JAIL, &(pr)->pr_osd, (slot))
+#define osd_jail_call(pr, method, data) \
+ osd_call(OSD_JAIL, (method), (pr), (data))
+#define osd_jail_exit(pr) \
+ osd_exit(OSD_JAIL, &(pr)->pr_osd)
+
+#endif /* _KERNEL */
+
+#endif /* !_SYS_OSD_HH_ */
diff --git a/rtems/freebsd/sys/param.h b/rtems/freebsd/sys/param.h
new file mode 100644
index 00000000..085b51ce
--- /dev/null
+++ b/rtems/freebsd/sys/param.h
@@ -0,0 +1,320 @@
+/*-
+ * Copyright (c) 1982, 1986, 1989, 1993
+ * The Regents of the University of California. All rights reserved.
+ * (c) UNIX System Laboratories, Inc.
+ * All or some portions of this file are derived from material licensed
+ * to the University of California by American Telephone and Telegraph
+ * Co. or Unix System Laboratories, Inc. and are reproduced herein with
+ * the permission of UNIX System Laboratories, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * @(#)param.h 8.3 (Berkeley) 4/4/95
+ * $FreeBSD$
+ */
+
+#ifndef _SYS_PARAM_HH_
+#define _SYS_PARAM_HH_
+
+#include <rtems/freebsd/sys/_null.h>
+
+#define BSD 199506 /* System version (year & month). */
+#define BSD4_3 1
+#define BSD4_4 1
+
+/*
+ * __FreeBSD_version numbers are documented in the Porter's Handbook.
+ * If you bump the version for any reason, you should update the documentation
+ * there.
+ * Currently this lives here:
+ *
+ * doc/en_US.ISO8859-1/books/porters-handbook/book.sgml
+ *
+ * scheme is: <major><two digit minor>Rxx
+ * 'R' is in the range 0 to 4 if this is a release branch or
+ * x.0-CURRENT before RELENG_*_0 is created, otherwise 'R' is
+ * in the range 5 to 9.
+ */
+#undef __FreeBSD_version
+#define __FreeBSD_version 802000 /* Master, propagated to newvers */
+
+#ifdef _KERNEL
+#define P_OSREL_SIGSEGV 700004
+#define P_OSREL_MAP_ANON 800104
+#endif
+
+#ifndef LOCORE
+#include <rtems/freebsd/sys/types.h>
+#endif
+
+/*
+ * Machine-independent constants (some used in following include files).
+ * Redefined constants are from POSIX 1003.1 limits file.
+ *
+ * MAXCOMLEN should be >= sizeof(ac_comm) (see <acct.h>)
+ * MAXLOGNAME should be == UT_NAMESIZE+1 (see <utmp.h>)
+ */
+#include <rtems/freebsd/sys/syslimits.h>
+
+#define MAXCOMLEN 19 /* max command name remembered */
+#define MAXINTERP 32 /* max interpreter file name length */
+#define MAXLOGNAME 17 /* max login name length (incl. NUL) */
+#define MAXUPRC CHILD_MAX /* max simultaneous processes */
+#define NCARGS ARG_MAX /* max bytes for an exec function */
+#define NGROUPS (NGROUPS_MAX+1) /* max number groups */
+#define NOFILE OPEN_MAX /* max open files per process */
+#define NOGROUP 65535 /* marker for empty group set member */
+#define MAXHOSTNAMELEN 256 /* max hostname size */
+#define SPECNAMELEN 63 /* max length of devicename */
+
+/* More types and definitions used throughout the kernel. */
+#ifdef _KERNEL
+#include <rtems/freebsd/sys/cdefs.h>
+#include <rtems/freebsd/sys/errno.h>
+#ifndef LOCORE
+#include <rtems/freebsd/sys/time.h>
+#include <rtems/freebsd/sys/priority.h>
+#endif
+
+#ifndef FALSE
+#define FALSE 0
+#endif
+#ifndef TRUE
+#define TRUE 1
+#endif
+#endif
+
+#ifndef _KERNEL
+/* Signals. */
+#include <rtems/freebsd/sys/signal.h>
+#endif
+
+/* Machine type dependent parameters. */
+#include <rtems/freebsd/machine/param.h>
+#ifndef _KERNEL
+#include <rtems/freebsd/sys/limits.h>
+#endif
+
+#ifndef _NO_NAMESPACE_POLLUTION
+
+#ifndef DEV_BSHIFT
+#define DEV_BSHIFT 9 /* log2(DEV_BSIZE) */
+#endif
+#define DEV_BSIZE (1<<DEV_BSHIFT)
+
+#ifndef BLKDEV_IOSIZE
+#define BLKDEV_IOSIZE PAGE_SIZE /* default block device I/O size */
+#endif
+#ifndef DFLTPHYS
+#define DFLTPHYS (64 * 1024) /* default max raw I/O transfer size */
+#endif
+#ifndef MAXPHYS
+#define MAXPHYS (128 * 1024) /* max raw I/O transfer size */
+#endif
+#ifndef MAXDUMPPGS
+#define MAXDUMPPGS (DFLTPHYS/PAGE_SIZE)
+#endif
+
+/*
+ * Constants related to network buffer management.
+ * MCLBYTES must be no larger than PAGE_SIZE.
+ */
+#ifndef MSIZE
+#define MSIZE 256 /* size of an mbuf */
+#endif /* MSIZE */
+
+#ifndef MCLSHIFT
+#define MCLSHIFT 11 /* convert bytes to mbuf clusters */
+#endif /* MCLSHIFT */
+
+#define MCLBYTES (1 << MCLSHIFT) /* size of an mbuf cluster */
+
+#define MJUMPAGESIZE PAGE_SIZE /* jumbo cluster 4k */
+#define MJUM9BYTES (9 * 1024) /* jumbo cluster 9k */
+#define MJUM16BYTES (16 * 1024) /* jumbo cluster 16k */
+
+/*
+ * Some macros for units conversion
+ */
+
+/* clicks to bytes */
+#ifndef ctob
+#define ctob(x) ((x)<<PAGE_SHIFT)
+#endif
+
+/* bytes to clicks */
+#ifndef btoc
+#define btoc(x) (((vm_offset_t)(x)+PAGE_MASK)>>PAGE_SHIFT)
+#endif
+
+/*
+ * btodb() is messy and perhaps slow because `bytes' may be an off_t. We
+ * want to shift an unsigned type to avoid sign extension and we don't
+ * want to widen `bytes' unnecessarily. Assume that the result fits in
+ * a daddr_t.
+ */
+#ifndef btodb
+#define btodb(bytes) /* calculates (bytes / DEV_BSIZE) */ \
+ (sizeof (bytes) > sizeof(long) \
+ ? (daddr_t)((unsigned long long)(bytes) >> DEV_BSHIFT) \
+ : (daddr_t)((unsigned long)(bytes) >> DEV_BSHIFT))
+#endif
+
+#ifndef dbtob
+#define dbtob(db) /* calculates (db * DEV_BSIZE) */ \
+ ((off_t)(db) << DEV_BSHIFT)
+#endif
+
+#endif /* _NO_NAMESPACE_POLLUTION */
+
+#define PRIMASK 0x0ff
+#define PCATCH 0x100 /* OR'd with pri for tsleep to check signals */
+#define PDROP 0x200 /* OR'd with pri to stop re-entry of interlock mutex */
+#define PBDRY 0x400 /* for PCATCH stop is done on the user boundary */
+
+#define NZERO 0 /* default "nice" */
+
+#define NBBY 8 /* number of bits in a byte */
+#define NBPW sizeof(int) /* number of bytes per word (integer) */
+
+#define CMASK 022 /* default file mask: S_IWGRP|S_IWOTH */
+
+#define NODEV (dev_t)(-1) /* non-existent device */
+
+/*
+ * File system parameters and macros.
+ *
+ * MAXBSIZE - Filesystems are made out of blocks of at most MAXBSIZE bytes
+ * per block. MAXBSIZE may be made larger without effecting
+ * any existing filesystems as long as it does not exceed MAXPHYS,
+ * and may be made smaller at the risk of not being able to use
+ * filesystems which require a block size exceeding MAXBSIZE.
+ *
+ * BKVASIZE - Nominal buffer space per buffer, in bytes. BKVASIZE is the
+ * minimum KVM memory reservation the kernel is willing to make.
+ * Filesystems can of course request smaller chunks. Actual
+ * backing memory uses a chunk size of a page (PAGE_SIZE).
+ *
+ * If you make BKVASIZE too small you risk seriously fragmenting
+ * the buffer KVM map which may slow things down a bit. If you
+ * make it too big the kernel will not be able to optimally use
+ * the KVM memory reserved for the buffer cache and will wind
+ * up with too-few buffers.
+ *
+ * The default is 16384, roughly 2x the block size used by a
+ * normal UFS filesystem.
+ */
+#define MAXBSIZE 65536 /* must be power of 2 */
+#define BKVASIZE 16384 /* must be power of 2 */
+#define BKVAMASK (BKVASIZE-1)
+
+/*
+ * MAXPATHLEN defines the longest permissible path length after expanding
+ * symbolic links. It is used to allocate a temporary buffer from the buffer
+ * pool in which to do the name expansion, hence should be a power of two,
+ * and must be less than or equal to MAXBSIZE. MAXSYMLINKS defines the
+ * maximum number of symbolic links that may be expanded in a path name.
+ * It should be set high enough to allow all legitimate uses, but halt
+ * infinite loops reasonably quickly.
+ */
+#define MAXPATHLEN PATH_MAX
+#define MAXSYMLINKS 32
+
+/* Bit map related macros. */
+#define setbit(a,i) (((unsigned char *)(a))[(i)/NBBY] |= 1<<((i)%NBBY))
+#define clrbit(a,i) (((unsigned char *)(a))[(i)/NBBY] &= ~(1<<((i)%NBBY)))
+#define isset(a,i) \
+ (((const unsigned char *)(a))[(i)/NBBY] & (1<<((i)%NBBY)))
+#define isclr(a,i) \
+ ((((const unsigned char *)(a))[(i)/NBBY] & (1<<((i)%NBBY))) == 0)
+
+/* Macros for counting and rounding. */
+#ifndef howmany
+#define howmany(x, y) (((x)+((y)-1))/(y))
+#endif
+#define rounddown(x, y) (((x)/(y))*(y))
+#define roundup(x, y) ((((x)+((y)-1))/(y))*(y)) /* to any y */
+#define roundup2(x, y) (((x)+((y)-1))&(~((y)-1))) /* if y is powers of two */
+#define powerof2(x) ((((x)-1)&(x))==0)
+
+/* Macros for min/max. */
+#define MIN(a,b) (((a)<(b))?(a):(b))
+#define MAX(a,b) (((a)>(b))?(a):(b))
+
+#ifdef _KERNEL
+/*
+ * Basic byte order function prototypes for non-inline functions.
+ */
+#ifndef LOCORE
+#ifndef _BYTEORDER_PROTOTYPED
+#define _BYTEORDER_PROTOTYPED
+__BEGIN_DECLS
+__uint32_t htonl(__uint32_t);
+__uint16_t htons(__uint16_t);
+__uint32_t ntohl(__uint32_t);
+__uint16_t ntohs(__uint16_t);
+__END_DECLS
+#endif
+#endif
+
+#ifndef lint
+#ifndef _BYTEORDER_FUNC_DEFINED
+#define _BYTEORDER_FUNC_DEFINED
+#define htonl(x) __htonl(x)
+#define htons(x) __htons(x)
+#define ntohl(x) __ntohl(x)
+#define ntohs(x) __ntohs(x)
+#endif /* !_BYTEORDER_FUNC_DEFINED */
+#endif /* lint */
+#endif /* _KERNEL */
+
+/*
+ * Scale factor for scaled integers used to count %cpu time and load avgs.
+ *
+ * The number of CPU `tick's that map to a unique `%age' can be expressed
+ * by the formula (1 / (2 ^ (FSHIFT - 11))). The maximum load average that
+ * can be calculated (assuming 32 bits) can be closely approximated using
+ * the formula (2 ^ (2 * (16 - FSHIFT))) for (FSHIFT < 15).
+ *
+ * For the scheduler to maintain a 1:1 mapping of CPU `tick' to `%age',
+ * FSHIFT must be at least 11; this gives us a maximum load avg of ~1024.
+ */
+#define FSHIFT 11 /* bits to right of fixed binary point */
+#define FSCALE (1<<FSHIFT)
+
+#define dbtoc(db) /* calculates devblks to pages */ \
+ ((db + (ctodb(1) - 1)) >> (PAGE_SHIFT - DEV_BSHIFT))
+
+#define ctodb(db) /* calculates pages to devblks */ \
+ ((db) << (PAGE_SHIFT - DEV_BSHIFT))
+
+/*
+ * Given the pointer x to the member m of the struct s, return
+ * a pointer to the containing structure.
+ */
+#define member2struct(s, m, x) \
+ ((struct s *)(void *)((char *)(x) - offsetof(struct s, m)))
+
+#endif /* _SYS_PARAM_HH_ */
diff --git a/rtems/freebsd/sys/pcpu.h b/rtems/freebsd/sys/pcpu.h
new file mode 100644
index 00000000..c4523226
--- /dev/null
+++ b/rtems/freebsd/sys/pcpu.h
@@ -0,0 +1,228 @@
+/*-
+ * Copyright (c) 2001 Wind River Systems, Inc.
+ * All rights reserved.
+ * Written by: John Baldwin <jhb@FreeBSD.org>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 4. Neither the name of the author nor the names of any co-contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _SYS_PCPU_HH_
+#define _SYS_PCPU_HH_
+
+#ifdef LOCORE
+#error "no assembler-serviceable parts inside"
+#endif
+
+#include <rtems/freebsd/sys/queue.h>
+#ifndef __rtems__
+#include <rtems/freebsd/sys/vmmeter.h>
+#endif
+#include <rtems/freebsd/sys/resource.h>
+#include <rtems/freebsd/machine/pcpu.h>
+
+/*
+ * Define a set for pcpu data.
+ */
+extern uintptr_t *__start_set_pcpu;
+extern uintptr_t *__stop_set_pcpu;
+
+/*
+ * Array of dynamic pcpu base offsets. Indexed by id.
+ */
+extern uintptr_t dpcpu_off[];
+
+/*
+ * Convenience defines.
+ */
+#define DPCPU_START ((uintptr_t)&__start_set_pcpu)
+#define DPCPU_STOP ((uintptr_t)&__stop_set_pcpu)
+#define DPCPU_BYTES (DPCPU_STOP - DPCPU_START)
+#define DPCPU_MODMIN 2048
+#define DPCPU_SIZE roundup2(DPCPU_BYTES, PAGE_SIZE)
+#define DPCPU_MODSIZE (DPCPU_SIZE - (DPCPU_BYTES - DPCPU_MODMIN))
+
+/*
+ * Declaration and definition.
+ */
+#define DPCPU_NAME(n) pcpu_entry_##n
+#define DPCPU_DECLARE(t, n) extern t DPCPU_NAME(n)
+#define DPCPU_DEFINE(t, n) t DPCPU_NAME(n) __section("set_pcpu") __used
+
+/*
+ * Accessors with a given base.
+ */
+#define _DPCPU_PTR(b, n) \
+ (__typeof(DPCPU_NAME(n))*)((b) + (uintptr_t)&DPCPU_NAME(n))
+#define _DPCPU_GET(b, n) (*_DPCPU_PTR(b, n))
+#define _DPCPU_SET(b, n, v) (*_DPCPU_PTR(b, n) = v)
+
+/*
+ * Accessors for the current cpu.
+ */
+#define DPCPU_PTR(n) _DPCPU_PTR(PCPU_GET(dynamic), n)
+#define DPCPU_GET(n) (*DPCPU_PTR(n))
+#define DPCPU_SET(n, v) (*DPCPU_PTR(n) = v)
+
+/*
+ * Accessors for remote cpus.
+ */
+#define DPCPU_ID_PTR(i, n) _DPCPU_PTR(dpcpu_off[(i)], n)
+#define DPCPU_ID_GET(i, n) (*DPCPU_ID_PTR(i, n))
+#define DPCPU_ID_SET(i, n, v) (*DPCPU_ID_PTR(i, n) = v)
+
+/*
+ * Utility macros.
+ */
+#define DPCPU_SUM(n) __extension__ \
+({ \
+ u_int _i; \
+ __typeof(*DPCPU_PTR(n)) sum; \
+ \
+ sum = 0; \
+ CPU_FOREACH(_i) { \
+ sum += *DPCPU_ID_PTR(_i, n); \
+ } \
+ sum; \
+})
+
+#define DPCPU_VARSUM(n, var) __extension__ \
+({ \
+ u_int _i; \
+ __typeof((DPCPU_PTR(n))->var) sum; \
+ \
+ sum = 0; \
+ CPU_FOREACH(_i) { \
+ sum += (DPCPU_ID_PTR(_i, n))->var; \
+ } \
+ sum; \
+})
+
+#define DPCPU_ZERO(n) do { \
+ u_int _i; \
+ \
+ CPU_FOREACH(_i) { \
+ bzero(DPCPU_ID_PTR(_i, n), sizeof(*DPCPU_PTR(n))); \
+ } \
+} while(0)
+
+/*
+ * XXXUPS remove as soon as we have per cpu variable
+ * linker sets and can define rm_queue in _rm_lock.h
+ */
+struct rm_queue {
+ struct rm_queue* volatile rmq_next;
+ struct rm_queue* volatile rmq_prev;
+};
+
+#define PCPU_NAME_LEN (sizeof("CPU ") + sizeof(__XSTRING(MAXCPU) + 1))
+
+/*
+ * This structure maps out the global data that needs to be kept on a
+ * per-cpu basis. The members are accessed via the PCPU_GET/SET/PTR
+ * macros defined in <machine/pcpu.h>. Machine dependent fields are
+ * defined in the PCPU_MD_FIELDS macro defined in <machine/pcpu.h>.
+ */
+struct pcpu {
+ struct thread *pc_curthread; /* Current thread */
+ struct thread *pc_idlethread; /* Idle thread */
+ struct thread *pc_fpcurthread; /* Fp state owner */
+ struct thread *pc_deadthread; /* Zombie thread or NULL */
+ struct pcb *pc_curpcb; /* Current pcb */
+ uint64_t pc_switchtime; /* cpu_ticks() at last csw */
+ int pc_switchticks; /* `ticks' at last csw */
+ u_int pc_cpuid; /* This cpu number */
+ cpumask_t pc_cpumask; /* This cpu mask */
+ cpumask_t pc_other_cpus; /* Mask of all other cpus */
+ SLIST_ENTRY(pcpu) pc_allcpu;
+ struct lock_list_entry *pc_spinlocks;
+#ifdef KTR
+ char pc_name[PCPU_NAME_LEN]; /* String name for KTR */
+#endif
+#ifndef __rtems__
+ struct vmmeter pc_cnt; /* VM stats counters */
+#endif
+ long pc_cp_time[CPUSTATES]; /* statclock ticks */
+ struct device *pc_device;
+ void *pc_netisr; /* netisr SWI cookie */
+
+ /*
+ * Stuff for read mostly lock
+ *
+ * XXXUPS remove as soon as we have per cpu variable
+ * linker sets.
+ */
+ struct rm_queue pc_rm_queue;
+
+ uintptr_t pc_dynamic; /* Dynamic per-cpu data area */
+
+ /*
+ * Keep MD fields last, so that CPU-specific variations on a
+ * single architecture don't result in offset variations of
+ * the machine-independent fields of the pcpu. Even though
+ * the pcpu structure is private to the kernel, some ports
+ * (e.g., lsof, part of gtop) define _KERNEL and include this
+ * header. While strictly speaking this is wrong, there's no
+ * reason not to keep the offsets of the MI fields constant
+ * if only to make kernel debugging easier.
+ */
+ PCPU_MD_FIELDS;
+} __aligned(128);
+
+#ifdef _KERNEL
+
+SLIST_HEAD(cpuhead, pcpu);
+
+extern struct cpuhead cpuhead;
+extern struct pcpu *cpuid_to_pcpu[MAXCPU];
+
+#define curcpu PCPU_GET(cpuid)
+#define curproc (curthread->td_proc)
+#ifndef curthread
+#define curthread PCPU_GET(curthread)
+#endif
+#define curvidata PCPU_GET(vidata)
+
+/*
+ * Machine dependent callouts. cpu_pcpu_init() is responsible for
+ * initializing machine dependent fields of struct pcpu, and
+ * db_show_mdpcpu() is responsible for handling machine dependent
+ * fields for the DDB 'show pcpu' command.
+ */
+void cpu_pcpu_init(struct pcpu *pcpu, int cpuid, size_t size);
+void db_show_mdpcpu(struct pcpu *pcpu);
+
+void *dpcpu_alloc(int size);
+void dpcpu_copy(void *s, int size);
+void dpcpu_free(void *s, int size);
+void dpcpu_init(void *dpcpu, int cpuid);
+void pcpu_destroy(struct pcpu *pcpu);
+struct pcpu *pcpu_find(u_int cpuid);
+void pcpu_init(struct pcpu *pcpu, int cpuid, size_t size);
+
+#endif /* _KERNEL */
+
+#endif /* !_SYS_PCPU_HH_ */
diff --git a/rtems/freebsd/sys/poll.h b/rtems/freebsd/sys/poll.h
new file mode 100644
index 00000000..280832eb
--- /dev/null
+++ b/rtems/freebsd/sys/poll.h
@@ -0,0 +1,104 @@
+/*-
+ * Copyright (c) 1997 Peter Wemm <peter@freebsd.org>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _SYS_POLL_HH_
+#define _SYS_POLL_HH_
+
+#include <rtems/freebsd/sys/cdefs.h>
+
+/*
+ * This file is intended to be compatible with the traditional poll.h.
+ */
+
+typedef unsigned int nfds_t;
+
+/*
+ * This structure is passed as an array to poll(2).
+ */
+struct pollfd {
+ int fd; /* which file descriptor to poll */
+ short events; /* events we are interested in */
+ short revents; /* events found on return */
+};
+
+/*
+ * Requestable events. If poll(2) finds any of these set, they are
+ * copied to revents on return.
+ * XXX Note that FreeBSD doesn't make much distinction between POLLPRI
+ * and POLLRDBAND since none of the file types have distinct priority
+ * bands - and only some have an urgent "mode".
+ * XXX Note POLLIN isn't really supported in true SVSV terms. Under SYSV
+ * POLLIN includes all of normal, band and urgent data. Most poll handlers
+ * on FreeBSD only treat it as "normal" data.
+ */
+#define POLLIN 0x0001 /* any readable data available */
+#define POLLPRI 0x0002 /* OOB/Urgent readable data */
+#define POLLOUT 0x0004 /* file descriptor is writeable */
+#define POLLRDNORM 0x0040 /* non-OOB/URG data available */
+#define POLLWRNORM POLLOUT /* no write type differentiation */
+#define POLLRDBAND 0x0080 /* OOB/Urgent readable data */
+#define POLLWRBAND 0x0100 /* OOB/Urgent data can be written */
+
+#if __BSD_VISIBLE
+/* General FreeBSD extension (currently only supported for sockets): */
+#define POLLINIGNEOF 0x2000 /* like POLLIN, except ignore EOF */
+#endif
+
+/*
+ * These events are set if they occur regardless of whether they were
+ * requested.
+ */
+#define POLLERR 0x0008 /* some poll error occurred */
+#define POLLHUP 0x0010 /* file descriptor was "hung up" */
+#define POLLNVAL 0x0020 /* requested events "invalid" */
+
+#if __BSD_VISIBLE
+
+#define POLLSTANDARD (POLLIN|POLLPRI|POLLOUT|POLLRDNORM|POLLRDBAND|\
+ POLLWRBAND|POLLERR|POLLHUP|POLLNVAL)
+
+/*
+ * Request that poll() wait forever.
+ * XXX in SYSV, this is defined in stropts.h, which is not included
+ * by poll.h.
+ */
+#define INFTIM (-1)
+
+#endif
+
+#ifndef _KERNEL
+
+__BEGIN_DECLS
+int poll(struct pollfd _pfd[], nfds_t _nfds, int _timeout);
+__END_DECLS
+
+#endif /* !_KERNEL */
+
+#endif /* !_SYS_POLL_HH_ */
diff --git a/rtems/freebsd/sys/priority.h b/rtems/freebsd/sys/priority.h
new file mode 100644
index 00000000..dbaa5eaa
--- /dev/null
+++ b/rtems/freebsd/sys/priority.h
@@ -0,0 +1,130 @@
+/*-
+ * Copyright (c) 1994, Henrik Vestergaard Draboel
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by Henrik Vestergaard Draboel.
+ * 4. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _SYS_PRIORITY_HH_
+#define _SYS_PRIORITY_HH_
+
+/*
+ * Process priority specifications.
+ */
+
+/*
+ * Priority classes.
+ */
+
+#define PRI_ITHD 1 /* Interrupt thread. */
+#define PRI_REALTIME 2 /* Real time process. */
+#define PRI_TIMESHARE 3 /* Time sharing process. */
+#define PRI_IDLE 4 /* Idle process. */
+
+/*
+ * PRI_FIFO is POSIX.1B SCHED_FIFO.
+ */
+
+#define PRI_FIFO_BIT 8
+#define PRI_FIFO (PRI_FIFO_BIT | PRI_REALTIME)
+
+#define PRI_BASE(P) ((P) & ~PRI_FIFO_BIT)
+#define PRI_IS_REALTIME(P) (PRI_BASE(P) == PRI_REALTIME)
+#define PRI_NEED_RR(P) ((P) != PRI_FIFO)
+
+/*
+ * Priorities. Note that with 64 run queues, differences less than 4 are
+ * insignificant.
+ */
+
+/*
+ * Priorities range from 0 to 255, but differences of less then 4 (RQ_PPQ)
+ * are insignificant. Ranges are as follows:
+ *
+ * Interrupt threads: 0 - 63
+ * Top half kernel threads: 64 - 127
+ * Realtime user threads: 128 - 159
+ * Time sharing user threads: 160 - 223
+ * Idle user threads: 224 - 255
+ *
+ * XXX If/When the specific interrupt thread and top half thread ranges
+ * disappear, a larger range can be used for user processes.
+ */
+
+#define PRI_MIN (0) /* Highest priority. */
+#define PRI_MAX (255) /* Lowest priority. */
+
+#define PRI_MIN_ITHD (PRI_MIN)
+#define PRI_MAX_ITHD (PRI_MIN_KERN - 1)
+
+#define PI_REALTIME (PRI_MIN_ITHD + 0)
+#define PI_AV (PRI_MIN_ITHD + 4)
+#define PI_TTYHIGH (PRI_MIN_ITHD + 8)
+#define PI_TAPE (PRI_MIN_ITHD + 12)
+#define PI_NET (PRI_MIN_ITHD + 16)
+#define PI_DISK (PRI_MIN_ITHD + 20)
+#define PI_TTYLOW (PRI_MIN_ITHD + 24)
+#define PI_DISKLOW (PRI_MIN_ITHD + 28)
+#define PI_DULL (PRI_MIN_ITHD + 32)
+#define PI_SOFT (PRI_MIN_ITHD + 36)
+
+#define PRI_MIN_KERN (64)
+#define PRI_MAX_KERN (PRI_MIN_REALTIME - 1)
+
+#define PSWP (PRI_MIN_KERN + 0)
+#define PVM (PRI_MIN_KERN + 4)
+#define PINOD (PRI_MIN_KERN + 8)
+#define PRIBIO (PRI_MIN_KERN + 12)
+#define PVFS (PRI_MIN_KERN + 16)
+#define PZERO (PRI_MIN_KERN + 20)
+#define PSOCK (PRI_MIN_KERN + 24)
+#define PWAIT (PRI_MIN_KERN + 28)
+#define PCONFIG (PRI_MIN_KERN + 32)
+#define PLOCK (PRI_MIN_KERN + 36)
+#define PPAUSE (PRI_MIN_KERN + 40)
+
+#define PRI_MIN_REALTIME (128)
+#define PRI_MAX_REALTIME (PRI_MIN_TIMESHARE - 1)
+
+#define PRI_MIN_TIMESHARE (160)
+#define PRI_MAX_TIMESHARE (PRI_MIN_IDLE - 1)
+
+#define PUSER (PRI_MIN_TIMESHARE)
+
+#define PRI_MIN_IDLE (224)
+#define PRI_MAX_IDLE (PRI_MAX)
+
+struct priority {
+ u_char pri_class; /* Scheduling class. */
+ u_char pri_level; /* Normal priority level. */
+ u_char pri_native; /* Priority before propogation. */
+ u_char pri_user; /* User priority based on p_cpu and p_nice. */
+};
+
+#endif /* !_SYS_PRIORITY_HH_ */
diff --git a/rtems/freebsd/sys/priv.h b/rtems/freebsd/sys/priv.h
new file mode 100644
index 00000000..6b104e3e
--- /dev/null
+++ b/rtems/freebsd/sys/priv.h
@@ -0,0 +1,517 @@
+/*-
+ * Copyright (c) 2006 nCircle Network Security, Inc.
+ * All rights reserved.
+ *
+ * This software was developed by Robert N. M. Watson for the TrustedBSD
+ * Project under contract to nCircle Network Security, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR, NCIRCLE NETWORK SECURITY,
+ * INC., OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
+ * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+ * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+ * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+/*
+ * Privilege checking interface for BSD kernel.
+ */
+#ifndef _SYS_PRIV_HH_
+#define _SYS_PRIV_HH_
+
+/*
+ * Privilege list, sorted loosely by kernel subsystem.
+ *
+ * Think carefully before adding or reusing one of these privileges -- are
+ * there existing instances referring to the same privilege? Third party
+ * vendors may request the assignment of privileges to be used in loadable
+ * modules. Particular numeric privilege assignments are part of the
+ * loadable kernel module ABI, and should not be changed across minor
+ * releases.
+ *
+ * When adding a new privilege, remember to determine if it's appropriate for
+ * use in jail, and update the privilege switch in kern_jail.c as necessary.
+ */
+
+/*
+ * Track beginning of privilege list.
+ */
+#define _PRIV_LOWEST 1
+
+/*
+ * The remaining privileges typically correspond to one or a small
+ * number of specific privilege checks, and have (relatively) precise
+ * meanings. They are loosely sorted into a set of base system
+ * privileges, such as the ability to reboot, and then loosely by
+ * subsystem, indicated by a subsystem name.
+ */
+#define _PRIV_ROOT 1 /* Removed. */
+#define PRIV_ACCT 2 /* Manage process accounting. */
+#define PRIV_MAXFILES 3 /* Exceed system open files limit. */
+#define PRIV_MAXPROC 4 /* Exceed system processes limit. */
+#define PRIV_KTRACE 5 /* Set/clear KTRFAC_ROOT on ktrace. */
+#define PRIV_SETDUMPER 6 /* Configure dump device. */
+#define PRIV_REBOOT 8 /* Can reboot system. */
+#define PRIV_SWAPON 9 /* Can swapon(). */
+#define PRIV_SWAPOFF 10 /* Can swapoff(). */
+#define PRIV_MSGBUF 11 /* Can read kernel message buffer. */
+#define PRIV_IO 12 /* Can perform low-level I/O. */
+#define PRIV_KEYBOARD 13 /* Reprogram keyboard. */
+#define PRIV_DRIVER 14 /* Low-level driver privilege. */
+#define PRIV_ADJTIME 15 /* Set time adjustment. */
+#define PRIV_NTP_ADJTIME 16 /* Set NTP time adjustment. */
+#define PRIV_CLOCK_SETTIME 17 /* Can call clock_settime. */
+#define PRIV_SETTIMEOFDAY 18 /* Can call settimeofday. */
+#define _PRIV_SETHOSTID 19 /* Removed. */
+#define _PRIV_SETDOMAINNAME 20 /* Removed. */
+
+/*
+ * Audit subsystem privileges.
+ */
+#define PRIV_AUDIT_CONTROL 40 /* Can configure audit. */
+#define PRIV_AUDIT_FAILSTOP 41 /* Can run during audit fail stop. */
+#define PRIV_AUDIT_GETAUDIT 42 /* Can get proc audit properties. */
+#define PRIV_AUDIT_SETAUDIT 43 /* Can set proc audit properties. */
+#define PRIV_AUDIT_SUBMIT 44 /* Can submit an audit record. */
+
+/*
+ * Credential management privileges.
+ */
+#define PRIV_CRED_SETUID 50 /* setuid. */
+#define PRIV_CRED_SETEUID 51 /* seteuid to !ruid and !svuid. */
+#define PRIV_CRED_SETGID 52 /* setgid. */
+#define PRIV_CRED_SETEGID 53 /* setgid to !rgid and !svgid. */
+#define PRIV_CRED_SETGROUPS 54 /* Set process additional groups. */
+#define PRIV_CRED_SETREUID 55 /* setreuid. */
+#define PRIV_CRED_SETREGID 56 /* setregid. */
+#define PRIV_CRED_SETRESUID 57 /* setresuid. */
+#define PRIV_CRED_SETRESGID 58 /* setresgid. */
+#define PRIV_SEEOTHERGIDS 59 /* Exempt bsd.seeothergids. */
+#define PRIV_SEEOTHERUIDS 60 /* Exempt bsd.seeotheruids. */
+
+/*
+ * Debugging privileges.
+ */
+#define PRIV_DEBUG_DIFFCRED 80 /* Exempt debugging other users. */
+#define PRIV_DEBUG_SUGID 81 /* Exempt debugging setuid proc. */
+#define PRIV_DEBUG_UNPRIV 82 /* Exempt unprivileged debug limit. */
+
+/*
+ * Dtrace privileges.
+ */
+#define PRIV_DTRACE_KERNEL 90 /* Allow use of DTrace on the kernel. */
+#define PRIV_DTRACE_PROC 91 /* Allow attaching DTrace to process. */
+#define PRIV_DTRACE_USER 92 /* Process may submit DTrace events. */
+
+/*
+ * Firmware privilegs.
+ */
+#define PRIV_FIRMWARE_LOAD 100 /* Can load firmware. */
+
+/*
+ * Jail privileges.
+ */
+#define PRIV_JAIL_ATTACH 110 /* Attach to a jail. */
+#define PRIV_JAIL_SET 111 /* Set jail parameters. */
+#define PRIV_JAIL_REMOVE 112 /* Remove a jail. */
+
+/*
+ * Kernel environment priveleges.
+ */
+#define PRIV_KENV_SET 120 /* Set kernel env. variables. */
+#define PRIV_KENV_UNSET 121 /* Unset kernel env. variables. */
+
+/*
+ * Loadable kernel module privileges.
+ */
+#define PRIV_KLD_LOAD 130 /* Load a kernel module. */
+#define PRIV_KLD_UNLOAD 131 /* Unload a kernel module. */
+
+/*
+ * Privileges associated with the MAC Framework and specific MAC policy
+ * modules.
+ */
+#define PRIV_MAC_PARTITION 140 /* Privilege in mac_partition policy. */
+#define PRIV_MAC_PRIVS 141 /* Privilege in the mac_privs policy. */
+
+/*
+ * Process-related privileges.
+ */
+#define PRIV_PROC_LIMIT 160 /* Exceed user process limit. */
+#define PRIV_PROC_SETLOGIN 161 /* Can call setlogin. */
+#define PRIV_PROC_SETRLIMIT 162 /* Can raise resources limits. */
+
+/* System V IPC privileges.
+ */
+#define PRIV_IPC_READ 170 /* Can override IPC read perm. */
+#define PRIV_IPC_WRITE 171 /* Can override IPC write perm. */
+#define PRIV_IPC_ADMIN 172 /* Can override IPC owner-only perm. */
+#define PRIV_IPC_MSGSIZE 173 /* Exempt IPC message queue limit. */
+
+/*
+ * POSIX message queue privileges.
+ */
+#define PRIV_MQ_ADMIN 180 /* Can override msgq owner-only perm. */
+
+/*
+ * Performance monitoring counter privileges.
+ */
+#define PRIV_PMC_MANAGE 190 /* Can administer PMC. */
+#define PRIV_PMC_SYSTEM 191 /* Can allocate a system-wide PMC. */
+
+/*
+ * Scheduling privileges.
+ */
+#define PRIV_SCHED_DIFFCRED 200 /* Exempt scheduling other users. */
+#define PRIV_SCHED_SETPRIORITY 201 /* Can set lower nice value for proc. */
+#define PRIV_SCHED_RTPRIO 202 /* Can set real time scheduling. */
+#define PRIV_SCHED_SETPOLICY 203 /* Can set scheduler policy. */
+#define PRIV_SCHED_SET 204 /* Can set thread scheduler. */
+#define PRIV_SCHED_SETPARAM 205 /* Can set thread scheduler params. */
+#define PRIV_SCHED_CPUSET 206 /* Can manipulate cpusets. */
+#define PRIV_SCHED_CPUSET_INTR 207 /* Can adjust IRQ to CPU binding. */
+
+/*
+ * POSIX semaphore privileges.
+ */
+#define PRIV_SEM_WRITE 220 /* Can override sem write perm. */
+
+/*
+ * Signal privileges.
+ */
+#define PRIV_SIGNAL_DIFFCRED 230 /* Exempt signalling other users. */
+#define PRIV_SIGNAL_SUGID 231 /* Non-conserv signal setuid proc. */
+
+/*
+ * Sysctl privileges.
+ */
+#define PRIV_SYSCTL_DEBUG 240 /* Can invoke sysctl.debug. */
+#define PRIV_SYSCTL_WRITE 241 /* Can write sysctls. */
+#define PRIV_SYSCTL_WRITEJAIL 242 /* Can write sysctls, jail permitted. */
+
+/*
+ * TTY privileges.
+ */
+#define PRIV_TTY_CONSOLE 250 /* Set console to tty. */
+#define PRIV_TTY_DRAINWAIT 251 /* Set tty drain wait time. */
+#define PRIV_TTY_DTRWAIT 252 /* Set DTR wait on tty. */
+#define PRIV_TTY_EXCLUSIVE 253 /* Override tty exclusive flag. */
+#define _PRIV_TTY_PRISON 254 /* Removed. */
+#define PRIV_TTY_STI 255 /* Simulate input on another tty. */
+#define PRIV_TTY_SETA 256 /* Set tty termios structure. */
+
+/*
+ * UFS-specific privileges.
+ */
+#define PRIV_UFS_EXTATTRCTL 270 /* Can configure EAs on UFS1. */
+#define PRIV_UFS_QUOTAOFF 271 /* quotaoff(). */
+#define PRIV_UFS_QUOTAON 272 /* quotaon(). */
+#define PRIV_UFS_SETUSE 273 /* setuse(). */
+
+/*
+ * ZFS-specific privileges.
+ */
+#define PRIV_ZFS_POOL_CONFIG 280 /* Can configure ZFS pools. */
+#define PRIV_ZFS_INJECT 281 /* Can inject faults in the ZFS fault
+ injection framework. */
+#define PRIV_ZFS_JAIL 282 /* Can attach/detach ZFS file systems
+ to/from jails. */
+
+/*
+ * NFS-specific privileges.
+ */
+#define PRIV_NFS_DAEMON 290 /* Can become the NFS daemon. */
+#define PRIV_NFS_LOCKD 291 /* Can become NFS lock daemon. */
+
+/*
+ * VFS privileges.
+ */
+#define PRIV_VFS_READ 310 /* Override vnode DAC read perm. */
+#define PRIV_VFS_WRITE 311 /* Override vnode DAC write perm. */
+#define PRIV_VFS_ADMIN 312 /* Override vnode DAC admin perm. */
+#define PRIV_VFS_EXEC 313 /* Override vnode DAC exec perm. */
+#define PRIV_VFS_LOOKUP 314 /* Override vnode DAC lookup perm. */
+#define PRIV_VFS_BLOCKRESERVE 315 /* Can use free block reserve. */
+#define PRIV_VFS_CHFLAGS_DEV 316 /* Can chflags() a device node. */
+#define PRIV_VFS_CHOWN 317 /* Can set user; group to non-member. */
+#define PRIV_VFS_CHROOT 318 /* chroot(). */
+#define PRIV_VFS_RETAINSUGID 319 /* Can retain sugid bits on change. */
+#define PRIV_VFS_EXCEEDQUOTA 320 /* Exempt from quota restrictions. */
+#define PRIV_VFS_EXTATTR_SYSTEM 321 /* Operate on system EA namespace. */
+#define PRIV_VFS_FCHROOT 322 /* fchroot(). */
+#define PRIV_VFS_FHOPEN 323 /* Can fhopen(). */
+#define PRIV_VFS_FHSTAT 324 /* Can fhstat(). */
+#define PRIV_VFS_FHSTATFS 325 /* Can fhstatfs(). */
+#define PRIV_VFS_GENERATION 326 /* stat() returns generation number. */
+#define PRIV_VFS_GETFH 327 /* Can retrieve file handles. */
+#define PRIV_VFS_GETQUOTA 328 /* getquota(). */
+#define PRIV_VFS_LINK 329 /* bsd.hardlink_check_uid */
+#define PRIV_VFS_MKNOD_BAD 330 /* Can mknod() to mark bad inodes. */
+#define PRIV_VFS_MKNOD_DEV 331 /* Can mknod() to create dev nodes. */
+#define PRIV_VFS_MKNOD_WHT 332 /* Can mknod() to create whiteout. */
+#define PRIV_VFS_MOUNT 333 /* Can mount(). */
+#define PRIV_VFS_MOUNT_OWNER 334 /* Can manage other users' file systems. */
+#define PRIV_VFS_MOUNT_EXPORTED 335 /* Can set MNT_EXPORTED on mount. */
+#define PRIV_VFS_MOUNT_PERM 336 /* Override dev node perms at mount. */
+#define PRIV_VFS_MOUNT_SUIDDIR 337 /* Can set MNT_SUIDDIR on mount. */
+#define PRIV_VFS_MOUNT_NONUSER 338 /* Can perform a non-user mount. */
+#define PRIV_VFS_SETGID 339 /* Can setgid if not in group. */
+#define PRIV_VFS_SETQUOTA 340 /* setquota(). */
+#define PRIV_VFS_STICKYFILE 341 /* Can set sticky bit on file. */
+#define PRIV_VFS_SYSFLAGS 342 /* Can modify system flags. */
+#define PRIV_VFS_UNMOUNT 343 /* Can unmount(). */
+#define PRIV_VFS_STAT 344 /* Override vnode MAC stat perm. */
+
+/*
+ * Virtual memory privileges.
+ */
+#define PRIV_VM_MADV_PROTECT 360 /* Can set MADV_PROTECT. */
+#define PRIV_VM_MLOCK 361 /* Can mlock(), mlockall(). */
+#define PRIV_VM_MUNLOCK 362 /* Can munlock(), munlockall(). */
+#define PRIV_VM_SWAP_NOQUOTA 363 /*
+ * Can override the global
+ * swap reservation limits.
+ */
+#define PRIV_VM_SWAP_NORLIMIT 364 /*
+ * Can override the per-uid
+ * swap reservation limits.
+ */
+
+/*
+ * Device file system privileges.
+ */
+#define PRIV_DEVFS_RULE 370 /* Can manage devfs rules. */
+#define PRIV_DEVFS_SYMLINK 371 /* Can create symlinks in devfs. */
+
+/*
+ * Random number generator privileges.
+ */
+#define PRIV_RANDOM_RESEED 380 /* Closing /dev/random reseeds. */
+
+/*
+ * Network stack privileges.
+ */
+#define PRIV_NET_BRIDGE 390 /* Administer bridge. */
+#define PRIV_NET_GRE 391 /* Administer GRE. */
+#define _PRIV_NET_PPP 392 /* Removed. */
+#define _PRIV_NET_SLIP 393 /* Removed. */
+#define PRIV_NET_BPF 394 /* Monitor BPF. */
+#define PRIV_NET_RAW 395 /* Open raw socket. */
+#define PRIV_NET_ROUTE 396 /* Administer routing. */
+#define PRIV_NET_TAP 397 /* Can open tap device. */
+#define PRIV_NET_SETIFMTU 398 /* Set interface MTU. */
+#define PRIV_NET_SETIFFLAGS 399 /* Set interface flags. */
+#define PRIV_NET_SETIFCAP 400 /* Set interface capabilities. */
+#define PRIV_NET_SETIFNAME 401 /* Set interface name. */
+#define PRIV_NET_SETIFMETRIC 402 /* Set interface metrics. */
+#define PRIV_NET_SETIFPHYS 403 /* Set interface physical layer prop. */
+#define PRIV_NET_SETIFMAC 404 /* Set interface MAC label. */
+#define PRIV_NET_ADDMULTI 405 /* Add multicast addr. to ifnet. */
+#define PRIV_NET_DELMULTI 406 /* Delete multicast addr. from ifnet. */
+#define PRIV_NET_HWIOCTL 407 /* Issue hardware ioctl on ifnet. */
+#define PRIV_NET_SETLLADDR 408 /* Set interface link-level address. */
+#define PRIV_NET_ADDIFGROUP 409 /* Add new interface group. */
+#define PRIV_NET_DELIFGROUP 410 /* Delete interface group. */
+#define PRIV_NET_IFCREATE 411 /* Create cloned interface. */
+#define PRIV_NET_IFDESTROY 412 /* Destroy cloned interface. */
+#define PRIV_NET_ADDIFADDR 413 /* Add protocol addr to interface. */
+#define PRIV_NET_DELIFADDR 414 /* Delete protocol addr on interface. */
+#define PRIV_NET_LAGG 415 /* Administer lagg interface. */
+#define PRIV_NET_GIF 416 /* Administer gif interface. */
+#define PRIV_NET_SETIFVNET 417 /* Move interface to vnet. */
+#define PRIV_NET_SETIFDESCR 418 /* Set interface description. */
+
+/*
+ * 802.11-related privileges.
+ */
+#define PRIV_NET80211_GETKEY 440 /* Query 802.11 keys. */
+#define PRIV_NET80211_MANAGE 441 /* Administer 802.11. */
+
+/*
+ * AppleTalk privileges.
+ */
+#define PRIV_NETATALK_RESERVEDPORT 450 /* Bind low port number. */
+
+/*
+ * ATM privileges.
+ */
+#define PRIV_NETATM_CFG 460
+#define PRIV_NETATM_ADD 461
+#define PRIV_NETATM_DEL 462
+#define PRIV_NETATM_SET 463
+
+/*
+ * Bluetooth privileges.
+ */
+#define PRIV_NETBLUETOOTH_RAW 470 /* Open raw bluetooth socket. */
+
+/*
+ * Netgraph and netgraph module privileges.
+ */
+#define PRIV_NETGRAPH_CONTROL 480 /* Open netgraph control socket. */
+#define PRIV_NETGRAPH_TTY 481 /* Configure tty for netgraph. */
+
+/*
+ * IPv4 and IPv6 privileges.
+ */
+#define PRIV_NETINET_RESERVEDPORT 490 /* Bind low port number. */
+#define PRIV_NETINET_IPFW 491 /* Administer IPFW firewall. */
+#define PRIV_NETINET_DIVERT 492 /* Open IP divert socket. */
+#define PRIV_NETINET_PF 493 /* Administer pf firewall. */
+#define PRIV_NETINET_DUMMYNET 494 /* Administer DUMMYNET. */
+#define PRIV_NETINET_CARP 495 /* Administer CARP. */
+#define PRIV_NETINET_MROUTE 496 /* Administer multicast routing. */
+#define PRIV_NETINET_RAW 497 /* Open netinet raw socket. */
+#define PRIV_NETINET_GETCRED 498 /* Query netinet pcb credentials. */
+#define PRIV_NETINET_ADDRCTRL6 499 /* Administer IPv6 address scopes. */
+#define PRIV_NETINET_ND6 500 /* Administer IPv6 neighbor disc. */
+#define PRIV_NETINET_SCOPE6 501 /* Administer IPv6 address scopes. */
+#define PRIV_NETINET_ALIFETIME6 502 /* Administer IPv6 address lifetimes. */
+#define PRIV_NETINET_IPSEC 503 /* Administer IPSEC. */
+#define PRIV_NETINET_REUSEPORT 504 /* Allow [rapid] port/address reuse. */
+#define PRIV_NETINET_SETHDROPTS 505 /* Set certain IPv4/6 header options. */
+#define PRIV_NETINET_BINDANY 506 /* Allow bind to any address. */
+
+/*
+ * IPX/SPX privileges.
+ */
+#define PRIV_NETIPX_RESERVEDPORT 520 /* Bind low port number. */
+#define PRIV_NETIPX_RAW 521 /* Open netipx raw socket. */
+
+/*
+ * NCP privileges.
+ */
+#define PRIV_NETNCP 530 /* Use another user's connection. */
+
+/*
+ * SMB privileges.
+ */
+#define PRIV_NETSMB 540 /* Use another user's connection. */
+
+/*
+ * VM86 privileges.
+ */
+#define PRIV_VM86_INTCALL 550 /* Allow invoking vm86 int handlers. */
+
+/*
+ * Set of reserved privilege values, which will be allocated to code as
+ * needed, in order to avoid renumbering later privileges due to insertion.
+ */
+#define _PRIV_RESERVED0 560
+#define _PRIV_RESERVED1 561
+#define _PRIV_RESERVED2 562
+#define _PRIV_RESERVED3 563
+#define _PRIV_RESERVED4 564
+#define _PRIV_RESERVED5 565
+#define _PRIV_RESERVED6 566
+#define _PRIV_RESERVED7 567
+#define _PRIV_RESERVED8 568
+#define _PRIV_RESERVED9 569
+#define _PRIV_RESERVED10 570
+#define _PRIV_RESERVED11 571
+#define _PRIV_RESERVED12 572
+#define _PRIV_RESERVED13 573
+#define _PRIV_RESERVED14 574
+#define _PRIV_RESERVED15 575
+
+/*
+ * Define a set of valid privilege numbers that can be used by loadable
+ * modules that don't yet have privilege reservations. Ideally, these should
+ * not be used, since their meaning is opaque to any policies that are aware
+ * of specific privileges, such as jail, and as such may be arbitrarily
+ * denied.
+ */
+#define PRIV_MODULE0 600
+#define PRIV_MODULE1 601
+#define PRIV_MODULE2 602
+#define PRIV_MODULE3 603
+#define PRIV_MODULE4 604
+#define PRIV_MODULE5 605
+#define PRIV_MODULE6 606
+#define PRIV_MODULE7 607
+#define PRIV_MODULE8 608
+#define PRIV_MODULE9 609
+#define PRIV_MODULE10 610
+#define PRIV_MODULE11 611
+#define PRIV_MODULE12 612
+#define PRIV_MODULE13 613
+#define PRIV_MODULE14 614
+#define PRIV_MODULE15 615
+
+/*
+ * DDB(4) privileges.
+ */
+#define PRIV_DDB_CAPTURE 620 /* Allow reading of DDB capture log. */
+
+/*
+ * Arla/nnpfs privileges.
+ */
+#define PRIV_NNPFS_DEBUG 630 /* Perforn ARLA_VIOC_NNPFSDEBUG. */
+
+/*
+ * cpuctl(4) privileges.
+ */
+#define PRIV_CPUCTL_WRMSR 640 /* Write model-specific register. */
+#define PRIV_CPUCTL_UPDATE 641 /* Update cpu microcode. */
+
+/*
+ * Capi4BSD privileges.
+ */
+#define PRIV_C4B_RESET_CTLR 650 /* Load firmware, reset controller. */
+#define PRIV_C4B_TRACE 651 /* Unrestricted CAPI message tracing. */
+
+/*
+ * OpenAFS privileges.
+ */
+#define PRIV_AFS_ADMIN 660 /* Can change AFS client settings. */
+#define PRIV_AFS_DAEMON 661 /* Can become the AFS daemon. */
+
+/*
+ * Track end of privilege list.
+ */
+#define _PRIV_HIGHEST 662
+
+/*
+ * Validate that a named privilege is known by the privilege system. Invalid
+ * privileges presented to the privilege system by a priv_check interface
+ * will result in a panic. This is only approximate due to sparse allocation
+ * of the privilege space.
+ */
+#define PRIV_VALID(x) ((x) > _PRIV_LOWEST && (x) < _PRIV_HIGHEST)
+
+#ifdef _KERNEL
+/*
+ * Privilege check interfaces, modeled after historic suser() interfacs, but
+ * with the addition of a specific privilege name. No flags are currently
+ * defined for the API. Historically, flags specified using the real uid
+ * instead of the effective uid, and whether or not the check should be
+ * allowed in jail.
+ */
+#ifndef __rtems__
+struct thread;
+struct ucred;
+int priv_check(struct thread *td, int priv);
+int priv_check_cred(struct ucred *cred, int priv, int flags);
+#else /* __rtems__ */
+#define priv_check(td, priv) 1
+#define priv_check_cred(cred, priv, flags) 1
+#endif /* __rtems__ */
+#endif
+
+#endif /* !_SYS_PRIV_HH_ */
diff --git a/rtems/freebsd/sys/proc.h b/rtems/freebsd/sys/proc.h
new file mode 100644
index 00000000..cedcb715
--- /dev/null
+++ b/rtems/freebsd/sys/proc.h
@@ -0,0 +1,915 @@
+/*-
+ * Copyright (c) 1986, 1989, 1991, 1993
+ * The Regents of the University of California. All rights reserved.
+ * (c) UNIX System Laboratories, Inc.
+ * All or some portions of this file are derived from material licensed
+ * to the University of California by American Telephone and Telegraph
+ * Co. or Unix System Laboratories, Inc. and are reproduced herein with
+ * the permission of UNIX System Laboratories, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * @(#)proc.h 8.15 (Berkeley) 5/19/95
+ * $FreeBSD$
+ */
+
+#ifndef _SYS_PROC_HH_
+#define _SYS_PROC_HH_
+
+#include <rtems/freebsd/sys/callout.h> /* For struct callout. */
+#include <rtems/freebsd/sys/event.h> /* For struct klist. */
+#include <rtems/freebsd/sys/condvar.h>
+#ifndef _KERNEL
+#include <rtems/freebsd/sys/filedesc.h>
+#endif
+#include <rtems/freebsd/sys/queue.h>
+#include <rtems/freebsd/sys/_lock.h>
+#include <rtems/freebsd/sys/lock_profile.h>
+#include <rtems/freebsd/sys/_mutex.h>
+#include <rtems/freebsd/sys/osd.h>
+#include <rtems/freebsd/sys/priority.h>
+#include <rtems/freebsd/sys/rtprio.h> /* XXX. */
+#include <rtems/freebsd/sys/runq.h>
+#include <rtems/freebsd/sys/resource.h>
+#include <rtems/freebsd/sys/sigio.h>
+#include <rtems/freebsd/sys/signal.h>
+#include <rtems/freebsd/sys/signalvar.h>
+#ifndef _KERNEL
+#include <rtems/freebsd/sys/time.h> /* For structs itimerval, timeval. */
+#else
+#include <rtems/freebsd/sys/pcpu.h>
+#endif
+#include <rtems/freebsd/sys/ucontext.h>
+#include <rtems/freebsd/sys/ucred.h>
+#include <rtems/freebsd/machine/proc.h> /* Machine-dependent proc substruct. */
+
+/*
+ * One structure allocated per session.
+ *
+ * List of locks
+ * (m) locked by s_mtx mtx
+ * (e) locked by proctree_lock sx
+ * (c) const until freeing
+ */
+struct session {
+ u_int s_count; /* Ref cnt; pgrps in session - atomic. */
+ struct proc *s_leader; /* (m + e) Session leader. */
+ struct vnode *s_ttyvp; /* (m) Vnode of controlling tty. */
+ struct tty *s_ttyp; /* (e) Controlling tty. */
+ pid_t s_sid; /* (c) Session ID. */
+ /* (m) Setlogin() name: */
+ char s_login[roundup(MAXLOGNAME, sizeof(long))];
+ struct mtx s_mtx; /* Mutex to protect members. */
+};
+
+/*
+ * One structure allocated per process group.
+ *
+ * List of locks
+ * (m) locked by pg_mtx mtx
+ * (e) locked by proctree_lock sx
+ * (c) const until freeing
+ */
+struct pgrp {
+ LIST_ENTRY(pgrp) pg_hash; /* (e) Hash chain. */
+ LIST_HEAD(, proc) pg_members; /* (m + e) Pointer to pgrp members. */
+ struct session *pg_session; /* (c) Pointer to session. */
+ struct sigiolst pg_sigiolst; /* (m) List of sigio sources. */
+ pid_t pg_id; /* (c) Process group id. */
+ int pg_jobc; /* (m) Job control process count. */
+ struct mtx pg_mtx; /* Mutex to protect members */
+};
+
+/*
+ * pargs, used to hold a copy of the command line, if it had a sane length.
+ */
+struct pargs {
+ u_int ar_ref; /* Reference count. */
+ u_int ar_length; /* Length. */
+ u_char ar_args[1]; /* Arguments. */
+};
+
+/*-
+ * Description of a process.
+ *
+ * This structure contains the information needed to manage a thread of
+ * control, known in UN*X as a process; it has references to substructures
+ * containing descriptions of things that the process uses, but may share
+ * with related processes. The process structure and the substructures
+ * are always addressable except for those marked "(CPU)" below,
+ * which might be addressable only on a processor on which the process
+ * is running.
+ *
+ * Below is a key of locks used to protect each member of struct proc. The
+ * lock is indicated by a reference to a specific character in parens in the
+ * associated comment.
+ * * - not yet protected
+ * a - only touched by curproc or parent during fork/wait
+ * b - created at fork, never changes
+ * (exception aiods switch vmspaces, but they are also
+ * marked 'P_SYSTEM' so hopefully it will be left alone)
+ * c - locked by proc mtx
+ * d - locked by allproc_lock lock
+ * e - locked by proctree_lock lock
+ * f - session mtx
+ * g - process group mtx
+ * h - callout_lock mtx
+ * i - by curproc or the master session mtx
+ * j - locked by proc slock
+ * k - only accessed by curthread
+ * k*- only accessed by curthread and from an interrupt
+ * l - the attaching proc or attaching proc parent
+ * m - Giant
+ * n - not locked, lazy
+ * o - ktrace lock
+ * q - td_contested lock
+ * r - p_peers lock
+ * t - thread lock
+ * x - created at fork, only changes during single threading in exec
+ * y - created at first aio, doesn't change until exit or exec at which
+ * point we are single-threaded and only curthread changes it
+ * z - zombie threads lock
+ *
+ * If the locking key specifies two identifiers (for example, p_pptr) then
+ * either lock is sufficient for read access, but both locks must be held
+ * for write access.
+ */
+struct kaudit_record;
+struct td_sched;
+struct nlminfo;
+struct kaioinfo;
+struct p_sched;
+struct proc;
+struct sleepqueue;
+struct thread;
+struct trapframe;
+struct turnstile;
+struct mqueue_notifier;
+struct kdtrace_proc;
+struct kdtrace_thread;
+struct cpuset;
+
+/*
+ * XXX: Does this belong in resource.h or resourcevar.h instead?
+ * Resource usage extension. The times in rusage structs in the kernel are
+ * never up to date. The actual times are kept as runtimes and tick counts
+ * (with control info in the "previous" times), and are converted when
+ * userland asks for rusage info. Backwards compatibility prevents putting
+ * this directly in the user-visible rusage struct.
+ *
+ * Locking for p_rux: (cj) means (j) for p_rux and (c) for p_crux.
+ * Locking for td_rux: (t) for all fields.
+ */
+struct rusage_ext {
+ u_int64_t rux_runtime; /* (cj) Real time. */
+ u_int64_t rux_uticks; /* (cj) Statclock hits in user mode. */
+ u_int64_t rux_sticks; /* (cj) Statclock hits in sys mode. */
+ u_int64_t rux_iticks; /* (cj) Statclock hits in intr mode. */
+ u_int64_t rux_uu; /* (c) Previous user time in usec. */
+ u_int64_t rux_su; /* (c) Previous sys time in usec. */
+ u_int64_t rux_tu; /* (c) Previous total time in usec. */
+};
+
+/*
+ * Kernel runnable context (thread).
+ * This is what is put to sleep and reactivated.
+ * Thread context. Processes may have multiple threads.
+ */
+struct thread {
+#ifndef __rtems__
+ struct mtx *volatile td_lock; /* replaces sched lock */
+ struct proc *td_proc; /* (*) Associated process. */
+ TAILQ_ENTRY(thread) td_plist; /* (*) All threads in this proc. */
+ TAILQ_ENTRY(thread) td_runq; /* (t) Run queue. */
+ TAILQ_ENTRY(thread) td_slpq; /* (t) Sleep queue. */
+ TAILQ_ENTRY(thread) td_lockq; /* (t) Lock queue. */
+ struct cpuset *td_cpuset; /* (t) CPU affinity mask. */
+ struct seltd *td_sel; /* Select queue/channel. */
+ struct sleepqueue *td_sleepqueue; /* (k) Associated sleep queue. */
+ struct turnstile *td_turnstile; /* (k) Associated turnstile. */
+ struct umtx_q *td_umtxq; /* (c?) Link for when we're blocked. */
+ lwpid_t td_tid; /* (b) Thread ID. */
+ sigqueue_t td_sigqueue; /* (c) Sigs arrived, not delivered. */
+#define td_siglist td_sigqueue.sq_signals
+
+/* Cleared during fork1() */
+#define td_startzero td_flags
+ int td_flags; /* (t) TDF_* flags. */
+ int td_inhibitors; /* (t) Why can not run. */
+ int td_pflags; /* (k) Private thread (TDP_*) flags. */
+ int td_dupfd; /* (k) Ret value from fdopen. XXX */
+ int td_sqqueue; /* (t) Sleepqueue queue blocked on. */
+ void *td_wchan; /* (t) Sleep address. */
+ const char *td_wmesg; /* (t) Reason for sleep. */
+ u_char td_lastcpu; /* (t) Last cpu we were on. */
+ u_char td_oncpu; /* (t) Which cpu we are on. */
+ volatile u_char td_owepreempt; /* (k*) Preempt on last critical_exit */
+ u_char td_tsqueue; /* (t) Turnstile queue blocked on. */
+ short td_locks; /* (k) Count of non-spin locks. */
+ short td_rw_rlocks; /* (k) Count of rwlock read locks. */
+ short td_lk_slocks; /* (k) Count of lockmgr shared locks. */
+ struct turnstile *td_blocked; /* (t) Lock thread is blocked on. */
+ const char *td_lockname; /* (t) Name of lock blocked on. */
+ LIST_HEAD(, turnstile) td_contested; /* (q) Contested locks. */
+ struct lock_list_entry *td_sleeplocks; /* (k) Held sleep locks. */
+ int td_intr_nesting_level; /* (k) Interrupt recursion. */
+ int td_pinned; /* (k) Temporary cpu pin count. */
+ struct ucred *td_ucred; /* (k) Reference to credentials. */
+ u_int td_estcpu; /* (t) estimated cpu utilization */
+ int td_slptick; /* (t) Time at sleep. */
+ int td_blktick; /* (t) Time spent blocked. */
+ struct rusage td_ru; /* (t) rusage information. */
+ uint64_t td_incruntime; /* (t) Cpu ticks to transfer to proc. */
+ uint64_t td_runtime; /* (t) How many cpu ticks we've run. */
+ u_int td_pticks; /* (t) Statclock hits for profiling */
+ u_int td_sticks; /* (t) Statclock hits in system mode. */
+ u_int td_iticks; /* (t) Statclock hits in intr mode. */
+ u_int td_uticks; /* (t) Statclock hits in user mode. */
+ int td_intrval; /* (t) Return value for sleepq. */
+ sigset_t td_oldsigmask; /* (k) Saved mask from pre sigpause. */
+ sigset_t td_sigmask; /* (c) Current signal mask. */
+ volatile u_int td_generation; /* (k) For detection of preemption */
+ stack_t td_sigstk; /* (k) Stack ptr and on-stack flag. */
+ int td_xsig; /* (c) Signal for ptrace */
+ u_long td_profil_addr; /* (k) Temporary addr until AST. */
+ u_int td_profil_ticks; /* (k) Temporary ticks until AST. */
+ char td_name[MAXCOMLEN + 1]; /* (*) Thread name. */
+ struct file *td_fpop; /* (k) file referencing cdev under op */
+ int td_dbgflags; /* (c) Userland debugger flags */
+ struct ksiginfo td_dbgksi; /* (c) ksi reflected to debugger. */
+ int td_ng_outbound; /* (k) Thread entered ng from above. */
+ struct osd td_osd; /* (k) Object specific data. */
+#define td_endzero td_base_pri
+
+/* Copied during fork1() or thread_sched_upcall(). */
+#define td_startcopy td_endzero
+ u_char td_rqindex; /* (t) Run queue index. */
+ u_char td_base_pri; /* (t) Thread base kernel priority. */
+ u_char td_priority; /* (t) Thread active priority. */
+ u_char td_pri_class; /* (t) Scheduling class. */
+ u_char td_user_pri; /* (t) User pri from estcpu and nice. */
+ u_char td_base_user_pri; /* (t) Base user pri */
+#define td_endcopy td_pcb
+
+/*
+ * Fields that must be manually set in fork1() or thread_sched_upcall()
+ * or already have been set in the allocator, constructor, etc.
+ */
+ struct pcb *td_pcb; /* (k) Kernel VA of pcb and kstack. */
+ enum {
+ TDS_INACTIVE = 0x0,
+ TDS_INHIBITED,
+ TDS_CAN_RUN,
+ TDS_RUNQ,
+ TDS_RUNNING
+ } td_state; /* (t) thread state */
+ register_t td_retval[2]; /* (k) Syscall aux returns. */
+ struct callout td_slpcallout; /* (h) Callout for sleep. */
+ struct trapframe *td_frame; /* (k) */
+ struct vm_object *td_kstack_obj;/* (a) Kstack object. */
+ vm_offset_t td_kstack; /* (a) Kernel VA of kstack. */
+ int td_kstack_pages; /* (a) Size of the kstack. */
+ void *td_unused1;
+ vm_offset_t td_unused2;
+ int td_unused3;
+ volatile u_int td_critnest; /* (k*) Critical section nest level. */
+ struct mdthread td_md; /* (k) Any machine-dependent fields. */
+ struct td_sched *td_sched; /* (*) Scheduler-specific data. */
+ struct kaudit_record *td_ar; /* (k) Active audit record, if any. */
+ int td_syscalls; /* per-thread syscall count (used by NFS :)) */
+ struct lpohead td_lprof[2]; /* (a) lock profiling objects. */
+ struct kdtrace_thread *td_dtrace; /* (*) DTrace-specific data. */
+ int td_errno; /* Error returned by last syscall. */
+ struct vnet *td_vnet; /* (k) Effective vnet. */
+ const char *td_vnet_lpush; /* (k) Debugging vnet push / pop. */
+ struct rusage_ext td_rux; /* (t) Internal rusage information. */
+ struct vm_map_entry *td_map_def_user; /* (k) Deferred entries. */
+#else /* __rtems__ */
+ rtems_chain_node td_node;
+ rtems_id td_id;
+ char td_name [16];
+ struct proc *td_proc; /* (*) Associated process. */
+ struct ucred *td_ucred; /* (k) Reference to credentials. */
+ struct rusage td_ru; /* (t) rusage information. */
+ register_t td_retval[2]; /* (k) Syscall aux returns. */
+ void *td_wchan; /* (t) Sleep address. */
+#endif /* __rtems__ */
+};
+
+struct mtx *thread_lock_block(struct thread *);
+void thread_lock_unblock(struct thread *, struct mtx *);
+void thread_lock_set(struct thread *, struct mtx *);
+#define THREAD_LOCK_ASSERT(td, type) \
+do { \
+ struct mtx *__m = (td)->td_lock; \
+ if (__m != &blocked_lock) \
+ mtx_assert(__m, (type)); \
+} while (0)
+
+#ifdef INVARIANTS
+#define THREAD_LOCKPTR_ASSERT(td, lock) \
+do { \
+ struct mtx *__m = (td)->td_lock; \
+ KASSERT((__m == &blocked_lock || __m == (lock)), \
+ ("Thread %p lock %p does not match %p", td, __m, (lock))); \
+} while (0)
+#else
+#define THREAD_LOCKPTR_ASSERT(td, lock)
+#endif
+
+#define CRITICAL_ASSERT(td) \
+ KASSERT((td)->td_critnest >= 1, ("Not in critical section"));
+
+/*
+ * Flags kept in td_flags:
+ * To change these you MUST have the scheduler lock.
+ */
+#define TDF_BORROWING 0x00000001 /* Thread is borrowing pri from another. */
+#define TDF_INPANIC 0x00000002 /* Caused a panic, let it drive crashdump. */
+#define TDF_INMEM 0x00000004 /* Thread's stack is in memory. */
+#define TDF_SINTR 0x00000008 /* Sleep is interruptible. */
+#define TDF_TIMEOUT 0x00000010 /* Timing out during sleep. */
+#define TDF_IDLETD 0x00000020 /* This is a per-CPU idle thread. */
+#define TDF_CANSWAP 0x00000040 /* Thread can be swapped. */
+#define TDF_SLEEPABORT 0x00000080 /* sleepq_abort was called. */
+#define TDF_KTH_SUSP 0x00000100 /* kthread is suspended */
+#define TDF_UBORROWING 0x00000200 /* Thread is borrowing user pri. */
+#define TDF_BOUNDARY 0x00000400 /* Thread suspended at user boundary */
+#define TDF_ASTPENDING 0x00000800 /* Thread has some asynchronous events. */
+#define TDF_TIMOFAIL 0x00001000 /* Timeout from sleep after we were awake. */
+#define TDF_SBDRY 0x00002000 /* Stop only on usermode boundary. */
+#define TDF_UPIBLOCKED 0x00004000 /* Thread blocked on user PI mutex. */
+#define TDF_NEEDSUSPCHK 0x00008000 /* Thread may need to suspend. */
+#define TDF_NEEDRESCHED 0x00010000 /* Thread needs to yield. */
+#define TDF_NEEDSIGCHK 0x00020000 /* Thread may need signal delivery. */
+#define TDF_UNUSED18 0x00040000 /* --available-- */
+#define TDF_UNUSED19 0x00080000 /* Thread is sleeping on a umtx. */
+#define TDF_THRWAKEUP 0x00100000 /* Libthr thread must not suspend itself. */
+#define TDF_UNUSED21 0x00200000 /* --available-- */
+#define TDF_SWAPINREQ 0x00400000 /* Swapin request due to wakeup. */
+#define TDF_UNUSED23 0x00800000 /* --available-- */
+#define TDF_SCHED0 0x01000000 /* Reserved for scheduler private use */
+#define TDF_SCHED1 0x02000000 /* Reserved for scheduler private use */
+#define TDF_SCHED2 0x04000000 /* Reserved for scheduler private use */
+#define TDF_SCHED3 0x08000000 /* Reserved for scheduler private use */
+#define TDF_ALRMPEND 0x10000000 /* Pending SIGVTALRM needs to be posted. */
+#define TDF_PROFPEND 0x20000000 /* Pending SIGPROF needs to be posted. */
+#define TDF_MACPEND 0x40000000 /* AST-based MAC event pending. */
+
+/* Userland debug flags */
+#define TDB_SUSPEND 0x00000001 /* Thread is suspended by debugger */
+#define TDB_XSIG 0x00000002 /* Thread is exchanging signal under trace */
+#define TDB_USERWR 0x00000004 /* Debugger modified memory or registers */
+#define TDB_SCE 0x00000008 /* Thread performs syscall enter */
+#define TDB_SCX 0x00000010 /* Thread performs syscall exit */
+#define TDB_EXEC 0x00000020 /* TDB_SCX from exec(2) family */
+
+/*
+ * "Private" flags kept in td_pflags:
+ * These are only written by curthread and thus need no locking.
+ */
+#define TDP_OLDMASK 0x00000001 /* Need to restore mask after suspend. */
+#define TDP_INKTR 0x00000002 /* Thread is currently in KTR code. */
+#define TDP_INKTRACE 0x00000004 /* Thread is currently in KTRACE code. */
+#define TDP_BUFNEED 0x00000008 /* Do not recurse into the buf flush */
+#define TDP_COWINPROGRESS 0x00000010 /* Snapshot copy-on-write in progress. */
+#define TDP_ALTSTACK 0x00000020 /* Have alternate signal stack. */
+#define TDP_DEADLKTREAT 0x00000040 /* Lock aquisition - deadlock treatment. */
+#define TDP_UNUSED80 0x00000080 /* available. */
+#define TDP_NOSLEEPING 0x00000100 /* Thread is not allowed to sleep on a sq. */
+#define TDP_OWEUPC 0x00000200 /* Call addupc() at next AST. */
+#define TDP_ITHREAD 0x00000400 /* Thread is an interrupt thread. */
+#define TDP_UNUSED800 0x00000800 /* available. */
+#define TDP_SCHED1 0x00001000 /* Reserved for scheduler private use */
+#define TDP_SCHED2 0x00002000 /* Reserved for scheduler private use */
+#define TDP_SCHED3 0x00004000 /* Reserved for scheduler private use */
+#define TDP_SCHED4 0x00008000 /* Reserved for scheduler private use */
+#define TDP_GEOM 0x00010000 /* Settle GEOM before finishing syscall */
+#define TDP_SOFTDEP 0x00020000 /* Stuck processing softdep worklist */
+#define TDP_NORUNNINGBUF 0x00040000 /* Ignore runningbufspace check */
+#define TDP_WAKEUP 0x00080000 /* Don't sleep in umtx cond_wait */
+#define TDP_INBDFLUSH 0x00100000 /* Already in BO_BDFLUSH, do not recurse */
+#define TDP_KTHREAD 0x00200000 /* This is an official kernel thread */
+#define TDP_CALLCHAIN 0x00400000 /* Capture thread's callchain */
+#define TDP_IGNSUSP 0x00800000 /* Permission to ignore the MNTK_SUSPEND* */
+#define TDP_AUDITREC 0x01000000 /* Audit record pending on thread */
+
+/*
+ * Reasons that the current thread can not be run yet.
+ * More than one may apply.
+ */
+#define TDI_SUSPENDED 0x0001 /* On suspension queue. */
+#define TDI_SLEEPING 0x0002 /* Actually asleep! (tricky). */
+#define TDI_SWAPPED 0x0004 /* Stack not in mem. Bad juju if run. */
+#define TDI_LOCK 0x0008 /* Stopped on a lock. */
+#define TDI_IWAIT 0x0010 /* Awaiting interrupt. */
+
+#define TD_IS_SLEEPING(td) ((td)->td_inhibitors & TDI_SLEEPING)
+#define TD_ON_SLEEPQ(td) ((td)->td_wchan != NULL)
+#define TD_IS_SUSPENDED(td) ((td)->td_inhibitors & TDI_SUSPENDED)
+#define TD_IS_SWAPPED(td) ((td)->td_inhibitors & TDI_SWAPPED)
+#define TD_ON_LOCK(td) ((td)->td_inhibitors & TDI_LOCK)
+#define TD_AWAITING_INTR(td) ((td)->td_inhibitors & TDI_IWAIT)
+#define TD_IS_RUNNING(td) ((td)->td_state == TDS_RUNNING)
+#define TD_ON_RUNQ(td) ((td)->td_state == TDS_RUNQ)
+#define TD_CAN_RUN(td) ((td)->td_state == TDS_CAN_RUN)
+#define TD_IS_INHIBITED(td) ((td)->td_state == TDS_INHIBITED)
+#define TD_ON_UPILOCK(td) ((td)->td_flags & TDF_UPIBLOCKED)
+#define TD_IS_IDLETHREAD(td) ((td)->td_flags & TDF_IDLETD)
+
+
+#define TD_SET_INHIB(td, inhib) do { \
+ (td)->td_state = TDS_INHIBITED; \
+ (td)->td_inhibitors |= (inhib); \
+} while (0)
+
+#define TD_CLR_INHIB(td, inhib) do { \
+ if (((td)->td_inhibitors & (inhib)) && \
+ (((td)->td_inhibitors &= ~(inhib)) == 0)) \
+ (td)->td_state = TDS_CAN_RUN; \
+} while (0)
+
+#define TD_SET_SLEEPING(td) TD_SET_INHIB((td), TDI_SLEEPING)
+#define TD_SET_SWAPPED(td) TD_SET_INHIB((td), TDI_SWAPPED)
+#define TD_SET_LOCK(td) TD_SET_INHIB((td), TDI_LOCK)
+#define TD_SET_SUSPENDED(td) TD_SET_INHIB((td), TDI_SUSPENDED)
+#define TD_SET_IWAIT(td) TD_SET_INHIB((td), TDI_IWAIT)
+#define TD_SET_EXITING(td) TD_SET_INHIB((td), TDI_EXITING)
+
+#define TD_CLR_SLEEPING(td) TD_CLR_INHIB((td), TDI_SLEEPING)
+#define TD_CLR_SWAPPED(td) TD_CLR_INHIB((td), TDI_SWAPPED)
+#define TD_CLR_LOCK(td) TD_CLR_INHIB((td), TDI_LOCK)
+#define TD_CLR_SUSPENDED(td) TD_CLR_INHIB((td), TDI_SUSPENDED)
+#define TD_CLR_IWAIT(td) TD_CLR_INHIB((td), TDI_IWAIT)
+
+#define TD_SET_RUNNING(td) (td)->td_state = TDS_RUNNING
+#define TD_SET_RUNQ(td) (td)->td_state = TDS_RUNQ
+#define TD_SET_CAN_RUN(td) (td)->td_state = TDS_CAN_RUN
+
+/*
+ * Process structure.
+ */
+struct proc {
+#ifndef __rtems__
+ LIST_ENTRY(proc) p_list; /* (d) List of all processes. */
+ TAILQ_HEAD(, thread) p_threads; /* (c) all threads. */
+ struct mtx p_slock; /* process spin lock */
+ struct ucred *p_ucred; /* (c) Process owner's identity. */
+ struct filedesc *p_fd; /* (b) Open files. */
+ struct filedesc_to_leader *p_fdtol; /* (b) Tracking node */
+ struct pstats *p_stats; /* (b) Accounting/statistics (CPU). */
+ struct plimit *p_limit; /* (c) Process limits. */
+ struct callout p_limco; /* (c) Limit callout handle */
+ struct sigacts *p_sigacts; /* (x) Signal actions, state (CPU). */
+
+ /*
+ * The following don't make too much sense.
+ * See the td_ or ke_ versions of the same flags.
+ */
+ int p_flag; /* (c) P_* flags. */
+ enum {
+ PRS_NEW = 0, /* In creation */
+ PRS_NORMAL, /* threads can be run. */
+ PRS_ZOMBIE
+ } p_state; /* (j/c) S* process status. */
+ pid_t p_pid; /* (b) Process identifier. */
+ LIST_ENTRY(proc) p_hash; /* (d) Hash chain. */
+ LIST_ENTRY(proc) p_pglist; /* (g + e) List of processes in pgrp. */
+ struct proc *p_pptr; /* (c + e) Pointer to parent process. */
+ LIST_ENTRY(proc) p_sibling; /* (e) List of sibling processes. */
+ LIST_HEAD(, proc) p_children; /* (e) Pointer to list of children. */
+ struct mtx p_mtx; /* (n) Lock for this struct. */
+ struct ksiginfo *p_ksi; /* Locked by parent proc lock */
+ sigqueue_t p_sigqueue; /* (c) Sigs not delivered to a td. */
+#define p_siglist p_sigqueue.sq_signals
+
+/* The following fields are all zeroed upon creation in fork. */
+#define p_startzero p_oppid
+ pid_t p_oppid; /* (c + e) Save ppid in ptrace. XXX */
+ struct vmspace *p_vmspace; /* (b) Address space. */
+ u_int p_swtick; /* (c) Tick when swapped in or out. */
+ struct itimerval p_realtimer; /* (c) Alarm timer. */
+ struct rusage p_ru; /* (a) Exit information. */
+ struct rusage_ext p_rux; /* (cj) Internal resource usage. */
+ struct rusage_ext p_crux; /* (c) Internal child resource usage. */
+ int p_profthreads; /* (c) Num threads in addupc_task. */
+ volatile int p_exitthreads; /* (j) Number of threads exiting */
+ int p_traceflag; /* (o) Kernel trace points. */
+ struct vnode *p_tracevp; /* (c + o) Trace to vnode. */
+ struct ucred *p_tracecred; /* (o) Credentials to trace with. */
+ struct vnode *p_textvp; /* (b) Vnode of executable. */
+ u_int p_lock; /* (c) Proclock (prevent swap) count. */
+ struct sigiolst p_sigiolst; /* (c) List of sigio sources. */
+ int p_sigparent; /* (c) Signal to parent on exit. */
+ int p_sig; /* (n) For core dump/debugger XXX. */
+ u_long p_code; /* (n) For core dump/debugger XXX. */
+ u_int p_stops; /* (c) Stop event bitmask. */
+ u_int p_stype; /* (c) Stop event type. */
+ char p_step; /* (c) Process is stopped. */
+ u_char p_pfsflags; /* (c) Procfs flags. */
+ struct nlminfo *p_nlminfo; /* (?) Only used by/for lockd. */
+ struct kaioinfo *p_aioinfo; /* (y) ASYNC I/O info. */
+ struct thread *p_singlethread;/* (c + j) If single threading this is it */
+ int p_suspcount; /* (j) Num threads in suspended mode. */
+ struct thread *p_xthread; /* (c) Trap thread */
+ int p_boundary_count;/* (c) Num threads at user boundary */
+ int p_pendingcnt; /* how many signals are pending */
+ struct itimers *p_itimers; /* (c) POSIX interval timers. */
+/* End area that is zeroed on creation. */
+#define p_endzero p_magic
+
+/* The following fields are all copied upon creation in fork. */
+#define p_startcopy p_endzero
+ u_int p_magic; /* (b) Magic number. */
+ int p_osrel; /* (x) osreldate for the
+ binary (from ELF note, if any) */
+ char p_comm[MAXCOMLEN + 1]; /* (b) Process name. */
+ struct pgrp *p_pgrp; /* (c + e) Pointer to process group. */
+ struct sysentvec *p_sysent; /* (b) Syscall dispatch info. */
+ struct pargs *p_args; /* (c) Process arguments. */
+ rlim_t p_cpulimit; /* (c) Current CPU limit in seconds. */
+ signed char p_nice; /* (c) Process "nice" value. */
+ int p_fibnum; /* in this routing domain XXX MRT */
+/* End area that is copied on creation. */
+#define p_endcopy p_xstat
+
+ u_short p_xstat; /* (c) Exit status; also stop sig. */
+ struct knlist p_klist; /* (c) Knotes attached to this proc. */
+ int p_numthreads; /* (c) Number of threads. */
+ struct mdproc p_md; /* Any machine-dependent fields. */
+ struct callout p_itcallout; /* (h + c) Interval timer callout. */
+ u_short p_acflag; /* (c) Accounting flags. */
+ struct proc *p_peers; /* (r) */
+ struct proc *p_leader; /* (b) */
+ void *p_emuldata; /* (c) Emulator state data. */
+ struct label *p_label; /* (*) Proc (not subject) MAC label. */
+ struct p_sched *p_sched; /* (*) Scheduler-specific data. */
+ STAILQ_HEAD(, ktr_request) p_ktr; /* (o) KTR event queue. */
+ LIST_HEAD(, mqueue_notifier) p_mqnotifier; /* (c) mqueue notifiers.*/
+ struct kdtrace_proc *p_dtrace; /* (*) DTrace-specific data. */
+ struct cv p_pwait; /* (*) wait cv for exit/exec */
+#else /* __rtems__ */
+ struct ucred *p_ucred; /* (c) Process owner's identity. */
+ struct mtx p_mtx; /* (n) Lock for this struct. */
+ rtems_id p_pid;
+ int p_fibnum; /* in this routing domain XXX MRT */
+#endif /* __rtems__ */
+};
+
+#define p_session p_pgrp->pg_session
+#define p_pgid p_pgrp->pg_id
+
+#define NOCPU 0xff /* For when we aren't on a CPU. */
+
+#define PROC_SLOCK(p) mtx_lock_spin(&(p)->p_slock)
+#define PROC_SUNLOCK(p) mtx_unlock_spin(&(p)->p_slock)
+#define PROC_SLOCK_ASSERT(p, type) mtx_assert(&(p)->p_slock, (type))
+
+/* These flags are kept in p_flag. */
+#define P_ADVLOCK 0x00001 /* Process may hold a POSIX advisory lock. */
+#define P_CONTROLT 0x00002 /* Has a controlling terminal. */
+#define P_KTHREAD 0x00004 /* Kernel thread (*). */
+#define P_NOLOAD 0x00008 /* Ignore during load avg calculations. */
+#define P_PPWAIT 0x00010 /* Parent is waiting for child to exec/exit. */
+#define P_PROFIL 0x00020 /* Has started profiling. */
+#define P_STOPPROF 0x00040 /* Has thread requesting to stop profiling. */
+#define P_HADTHREADS 0x00080 /* Has had threads (no cleanup shortcuts) */
+#define P_SUGID 0x00100 /* Had set id privileges since last exec. */
+#define P_SYSTEM 0x00200 /* System proc: no sigs, stats or swapping. */
+#define P_SINGLE_EXIT 0x00400 /* Threads suspending should exit, not wait. */
+#define P_TRACED 0x00800 /* Debugged process being traced. */
+#define P_WAITED 0x01000 /* Someone is waiting for us. */
+#define P_WEXIT 0x02000 /* Working on exiting. */
+#define P_EXEC 0x04000 /* Process called exec. */
+#define P_WKILLED 0x08000 /* Killed, go to kernel/user boundary ASAP. */
+#define P_CONTINUED 0x10000 /* Proc has continued from a stopped state. */
+#define P_STOPPED_SIG 0x20000 /* Stopped due to SIGSTOP/SIGTSTP. */
+#define P_STOPPED_TRACE 0x40000 /* Stopped because of tracing. */
+#define P_STOPPED_SINGLE 0x80000 /* Only 1 thread can continue (not to user). */
+#define P_PROTECTED 0x100000 /* Do not kill on memory overcommit. */
+#define P_SIGEVENT 0x200000 /* Process pending signals changed. */
+#define P_SINGLE_BOUNDARY 0x400000 /* Threads should suspend at user boundary. */
+#define P_HWPMC 0x800000 /* Process is using HWPMCs */
+
+#define P_JAILED 0x1000000 /* Process is in jail. */
+#define P_INEXEC 0x4000000 /* Process is in execve(). */
+#define P_STATCHILD 0x8000000 /* Child process stopped or exited. */
+#define P_INMEM 0x10000000 /* Loaded into memory. */
+#define P_SWAPPINGOUT 0x20000000 /* Process is being swapped out. */
+#define P_SWAPPINGIN 0x40000000 /* Process is being swapped in. */
+
+#define P_STOPPED (P_STOPPED_SIG|P_STOPPED_SINGLE|P_STOPPED_TRACE)
+#define P_SHOULDSTOP(p) ((p)->p_flag & P_STOPPED)
+#define P_KILLED(p) ((p)->p_flag & P_WKILLED)
+
+/*
+ * These were process status values (p_stat), now they are only used in
+ * legacy conversion code.
+ */
+#define SIDL 1 /* Process being created by fork. */
+#define SRUN 2 /* Currently runnable. */
+#define SSLEEP 3 /* Sleeping on an address. */
+#define SSTOP 4 /* Process debugging or suspension. */
+#define SZOMB 5 /* Awaiting collection by parent. */
+#define SWAIT 6 /* Waiting for interrupt. */
+#define SLOCK 7 /* Blocked on a lock. */
+
+#define P_MAGIC 0xbeefface
+
+#ifdef _KERNEL
+
+/* Types and flags for mi_switch(). */
+#define SW_TYPE_MASK 0xff /* First 8 bits are switch type */
+#define SWT_NONE 0 /* Unspecified switch. */
+#define SWT_PREEMPT 1 /* Switching due to preemption. */
+#define SWT_OWEPREEMPT 2 /* Switching due to opepreempt. */
+#define SWT_TURNSTILE 3 /* Turnstile contention. */
+#define SWT_SLEEPQ 4 /* Sleepq wait. */
+#define SWT_SLEEPQTIMO 5 /* Sleepq timeout wait. */
+#define SWT_RELINQUISH 6 /* yield call. */
+#define SWT_NEEDRESCHED 7 /* NEEDRESCHED was set. */
+#define SWT_IDLE 8 /* Switching from the idle thread. */
+#define SWT_IWAIT 9 /* Waiting for interrupts. */
+#define SWT_SUSPEND 10 /* Thread suspended. */
+#define SWT_REMOTEPREEMPT 11 /* Remote processor preempted. */
+#define SWT_REMOTEWAKEIDLE 12 /* Remote processor preempted idle. */
+#define SWT_COUNT 13 /* Number of switch types. */
+/* Flags */
+#define SW_VOL 0x0100 /* Voluntary switch. */
+#define SW_INVOL 0x0200 /* Involuntary switch. */
+#define SW_PREEMPT 0x0400 /* The invol switch is a preemption */
+
+/* How values for thread_single(). */
+#define SINGLE_NO_EXIT 0
+#define SINGLE_EXIT 1
+#define SINGLE_BOUNDARY 2
+
+#ifdef MALLOC_DECLARE
+MALLOC_DECLARE(M_PARGS);
+MALLOC_DECLARE(M_PGRP);
+MALLOC_DECLARE(M_SESSION);
+MALLOC_DECLARE(M_SUBPROC);
+MALLOC_DECLARE(M_ZOMBIE);
+#endif
+
+#define FOREACH_PROC_IN_SYSTEM(p) \
+ LIST_FOREACH((p), &allproc, p_list)
+#define FOREACH_THREAD_IN_PROC(p, td) \
+ TAILQ_FOREACH((td), &(p)->p_threads, td_plist)
+
+#define FIRST_THREAD_IN_PROC(p) TAILQ_FIRST(&(p)->p_threads)
+
+/*
+ * We use process IDs <= PID_MAX; PID_MAX + 1 must also fit in a pid_t,
+ * as it is used to represent "no process group".
+ */
+#define PID_MAX 99999
+#define NO_PID 100000
+
+#define SESS_LEADER(p) ((p)->p_session->s_leader == (p))
+
+
+#define STOPEVENT(p, e, v) do { \
+ if ((p)->p_stops & (e)) { \
+ PROC_LOCK(p); \
+ stopevent((p), (e), (v)); \
+ PROC_UNLOCK(p); \
+ } \
+} while (0)
+#define _STOPEVENT(p, e, v) do { \
+ PROC_LOCK_ASSERT(p, MA_OWNED); \
+ WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, &p->p_mtx.lock_object, \
+ "checking stopevent %d", (e)); \
+ if ((p)->p_stops & (e)) \
+ stopevent((p), (e), (v)); \
+} while (0)
+
+/* Lock and unlock a process. */
+#define PROC_LOCK(p) mtx_lock(&(p)->p_mtx)
+#define PROC_TRYLOCK(p) mtx_trylock(&(p)->p_mtx)
+#define PROC_UNLOCK(p) mtx_unlock(&(p)->p_mtx)
+#define PROC_LOCKED(p) mtx_owned(&(p)->p_mtx)
+#define PROC_LOCK_ASSERT(p, type) mtx_assert(&(p)->p_mtx, (type))
+
+/* Lock and unlock a process group. */
+#define PGRP_LOCK(pg) mtx_lock(&(pg)->pg_mtx)
+#define PGRP_UNLOCK(pg) mtx_unlock(&(pg)->pg_mtx)
+#define PGRP_LOCKED(pg) mtx_owned(&(pg)->pg_mtx)
+#define PGRP_LOCK_ASSERT(pg, type) mtx_assert(&(pg)->pg_mtx, (type))
+
+#define PGRP_LOCK_PGSIGNAL(pg) do { \
+ if ((pg) != NULL) \
+ PGRP_LOCK(pg); \
+} while (0)
+#define PGRP_UNLOCK_PGSIGNAL(pg) do { \
+ if ((pg) != NULL) \
+ PGRP_UNLOCK(pg); \
+} while (0)
+
+/* Lock and unlock a session. */
+#define SESS_LOCK(s) mtx_lock(&(s)->s_mtx)
+#define SESS_UNLOCK(s) mtx_unlock(&(s)->s_mtx)
+#define SESS_LOCKED(s) mtx_owned(&(s)->s_mtx)
+#define SESS_LOCK_ASSERT(s, type) mtx_assert(&(s)->s_mtx, (type))
+
+/* Hold process U-area in memory, normally for ptrace/procfs work. */
+#define PHOLD(p) do { \
+ PROC_LOCK(p); \
+ _PHOLD(p); \
+ PROC_UNLOCK(p); \
+} while (0)
+#define _PHOLD(p) do { \
+ PROC_LOCK_ASSERT((p), MA_OWNED); \
+ KASSERT(!((p)->p_flag & P_WEXIT) || (p) == curproc, \
+ ("PHOLD of exiting process")); \
+ (p)->p_lock++; \
+ if (((p)->p_flag & P_INMEM) == 0) \
+ faultin((p)); \
+} while (0)
+#define PROC_ASSERT_HELD(p) do { \
+ KASSERT((p)->p_lock > 0, ("process not held")); \
+} while (0)
+
+#define PRELE(p) do { \
+ PROC_LOCK((p)); \
+ _PRELE((p)); \
+ PROC_UNLOCK((p)); \
+} while (0)
+#define _PRELE(p) do { \
+ PROC_LOCK_ASSERT((p), MA_OWNED); \
+ (--(p)->p_lock); \
+ if (((p)->p_flag & P_WEXIT) && (p)->p_lock == 0) \
+ wakeup(&(p)->p_lock); \
+} while (0)
+#define PROC_ASSERT_NOT_HELD(p) do { \
+ KASSERT((p)->p_lock == 0, ("process held")); \
+} while (0)
+
+/* Check whether a thread is safe to be swapped out. */
+#define thread_safetoswapout(td) ((td)->td_flags & TDF_CANSWAP)
+
+/* Control whether or not it is safe for curthread to sleep. */
+#define THREAD_NO_SLEEPING() do { \
+ KASSERT(!(curthread->td_pflags & TDP_NOSLEEPING), \
+ ("nested no sleeping")); \
+ curthread->td_pflags |= TDP_NOSLEEPING; \
+} while (0)
+
+#define THREAD_SLEEPING_OK() do { \
+ KASSERT((curthread->td_pflags & TDP_NOSLEEPING), \
+ ("nested sleeping ok")); \
+ curthread->td_pflags &= ~TDP_NOSLEEPING; \
+} while (0)
+
+#define PIDHASH(pid) (&pidhashtbl[(pid) & pidhash])
+extern LIST_HEAD(pidhashhead, proc) *pidhashtbl;
+extern u_long pidhash;
+
+#define PGRPHASH(pgid) (&pgrphashtbl[(pgid) & pgrphash])
+extern LIST_HEAD(pgrphashhead, pgrp) *pgrphashtbl;
+extern u_long pgrphash;
+
+extern struct sx allproc_lock;
+extern struct sx proctree_lock;
+extern struct mtx ppeers_lock;
+extern struct proc proc0; /* Process slot for swapper. */
+extern struct thread thread0; /* Primary thread in proc0. */
+extern struct vmspace vmspace0; /* VM space for proc0. */
+extern int hogticks; /* Limit on kernel cpu hogs. */
+extern int lastpid;
+extern int nprocs, maxproc; /* Current and max number of procs. */
+extern int maxprocperuid; /* Max procs per uid. */
+extern u_long ps_arg_cache_limit;
+
+LIST_HEAD(proclist, proc);
+TAILQ_HEAD(procqueue, proc);
+TAILQ_HEAD(threadqueue, thread);
+extern struct proclist allproc; /* List of all processes. */
+extern struct proclist zombproc; /* List of zombie processes. */
+extern struct proc *initproc, *pageproc; /* Process slots for init, pager. */
+
+extern struct uma_zone *proc_zone;
+
+struct proc *pfind(pid_t); /* Find process by id. */
+struct pgrp *pgfind(pid_t); /* Find process group by id. */
+struct proc *zpfind(pid_t); /* Find zombie process by id. */
+
+void ast(struct trapframe *framep);
+struct thread *choosethread(void);
+int cr_cansignal(struct ucred *cred, struct proc *proc, int signum);
+int enterpgrp(struct proc *p, pid_t pgid, struct pgrp *pgrp,
+ struct session *sess);
+int enterthispgrp(struct proc *p, struct pgrp *pgrp);
+void faultin(struct proc *p);
+void fixjobc(struct proc *p, struct pgrp *pgrp, int entering);
+int fork1(struct thread *, int, int, struct proc **);
+void fork_exit(void (*)(void *, struct trapframe *), void *,
+ struct trapframe *);
+void fork_return(struct thread *, struct trapframe *);
+int inferior(struct proc *p);
+void kick_proc0(void);
+int leavepgrp(struct proc *p);
+int maybe_preempt(struct thread *td);
+void mi_switch(int flags, struct thread *newtd);
+int p_candebug(struct thread *td, struct proc *p);
+int p_cansee(struct thread *td, struct proc *p);
+int p_cansched(struct thread *td, struct proc *p);
+int p_cansignal(struct thread *td, struct proc *p, int signum);
+int p_canwait(struct thread *td, struct proc *p);
+struct pargs *pargs_alloc(int len);
+void pargs_drop(struct pargs *pa);
+void pargs_hold(struct pargs *pa);
+void procinit(void);
+void proc_linkup0(struct proc *p, struct thread *td);
+void proc_linkup(struct proc *p, struct thread *td);
+void proc_reparent(struct proc *child, struct proc *newparent);
+struct pstats *pstats_alloc(void);
+void pstats_fork(struct pstats *src, struct pstats *dst);
+void pstats_free(struct pstats *ps);
+int securelevel_ge(struct ucred *cr, int level);
+int securelevel_gt(struct ucred *cr, int level);
+void sess_hold(struct session *);
+void sess_release(struct session *);
+int setrunnable(struct thread *);
+void setsugid(struct proc *p);
+int sigonstack(size_t sp);
+void sleepinit(void);
+void stopevent(struct proc *, u_int, u_int);
+void threadinit(void);
+void cpu_idle(int);
+int cpu_idle_wakeup(int);
+extern void (*cpu_idle_hook)(void); /* Hook to machdep CPU idler. */
+void cpu_switch(struct thread *, struct thread *, struct mtx *);
+void cpu_throw(struct thread *, struct thread *) __dead2;
+void unsleep(struct thread *);
+void userret(struct thread *, struct trapframe *);
+struct syscall_args;
+int syscallenter(struct thread *, struct syscall_args *);
+void syscallret(struct thread *, int, struct syscall_args *);
+
+void cpu_exit(struct thread *);
+void exit1(struct thread *, int) __dead2;
+struct syscall_args;
+int cpu_fetch_syscall_args(struct thread *td, struct syscall_args *sa);
+void cpu_fork(struct thread *, struct proc *, struct thread *, int);
+void cpu_set_fork_handler(struct thread *, void (*)(void *), void *);
+void cpu_set_syscall_retval(struct thread *, int);
+void cpu_set_upcall(struct thread *td, struct thread *td0);
+void cpu_set_upcall_kse(struct thread *, void (*)(void *), void *,
+ stack_t *);
+int cpu_set_user_tls(struct thread *, void *tls_base);
+void cpu_thread_alloc(struct thread *);
+void cpu_thread_clean(struct thread *);
+void cpu_thread_exit(struct thread *);
+void cpu_thread_free(struct thread *);
+void cpu_thread_swapin(struct thread *);
+void cpu_thread_swapout(struct thread *);
+struct thread *thread_alloc(int pages);
+int thread_alloc_stack(struct thread *, int pages);
+void thread_exit(void) __dead2;
+void thread_free(struct thread *td);
+void thread_link(struct thread *td, struct proc *p);
+void thread_reap(void);
+int thread_single(int how);
+void thread_single_end(void);
+void thread_stash(struct thread *td);
+void thread_stopped(struct proc *p);
+void childproc_stopped(struct proc *child, int reason);
+void childproc_continued(struct proc *child);
+void childproc_exited(struct proc *child);
+int thread_suspend_check(int how);
+void thread_suspend_switch(struct thread *);
+void thread_suspend_one(struct thread *td);
+void thread_unlink(struct thread *td);
+void thread_unsuspend(struct proc *p);
+int thread_unsuspend_one(struct thread *td);
+void thread_unthread(struct thread *td);
+void thread_wait(struct proc *p);
+struct thread *thread_find(struct proc *p, lwpid_t tid);
+void thr_exit1(void);
+
+#endif /* _KERNEL */
+
+#endif /* !_SYS_PROC_HH_ */
diff --git a/rtems/freebsd/sys/protosw.h b/rtems/freebsd/sys/protosw.h
new file mode 100644
index 00000000..5b8e9f03
--- /dev/null
+++ b/rtems/freebsd/sys/protosw.h
@@ -0,0 +1,339 @@
+/*-
+ * Copyright (c) 1982, 1986, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * @(#)protosw.h 8.1 (Berkeley) 6/2/93
+ * $FreeBSD$
+ */
+
+#ifndef _SYS_PROTOSW_HH_
+#define _SYS_PROTOSW_HH_
+
+/* Forward declare these structures referenced from prototypes below. */
+struct mbuf;
+struct thread;
+struct sockaddr;
+struct socket;
+struct sockopt;
+
+/*#ifdef _KERNEL*/
+/*
+ * Protocol switch table.
+ *
+ * Each protocol has a handle initializing one of these structures,
+ * which is used for protocol-protocol and system-protocol communication.
+ *
+ * A protocol is called through the pr_init entry before any other.
+ * Thereafter it is called every 200ms through the pr_fasttimo entry and
+ * every 500ms through the pr_slowtimo for timer based actions.
+ * The system will call the pr_drain entry if it is low on space and
+ * this should throw away any non-critical data.
+ *
+ * Protocols pass data between themselves as chains of mbufs using
+ * the pr_input and pr_output hooks. Pr_input passes data up (towards
+ * the users) and pr_output passes it down (towards the interfaces); control
+ * information passes up and down on pr_ctlinput and pr_ctloutput.
+ * The protocol is responsible for the space occupied by any the
+ * arguments to these entries and must dispose it.
+ *
+ * In retrospect, it would be a lot nicer to use an interface
+ * similar to the vnode VOP interface.
+ */
+/* USE THESE FOR YOUR PROTOTYPES ! */
+typedef void pr_input_t (struct mbuf *, int);
+typedef int pr_input6_t (struct mbuf **, int*, int); /* XXX FIX THIS */
+typedef int pr_output_t (struct mbuf *, struct socket *);
+typedef void pr_ctlinput_t (int, struct sockaddr *, void *);
+typedef int pr_ctloutput_t (struct socket *, struct sockopt *);
+typedef void pr_init_t (void);
+typedef void pr_destroy_t (void);
+typedef void pr_fasttimo_t (void);
+typedef void pr_slowtimo_t (void);
+typedef void pr_drain_t (void);
+
+struct protosw {
+ short pr_type; /* socket type used for */
+ struct domain *pr_domain; /* domain protocol a member of */
+ short pr_protocol; /* protocol number */
+ short pr_flags; /* see below */
+/* protocol-protocol hooks */
+ pr_input_t *pr_input; /* input to protocol (from below) */
+ pr_output_t *pr_output; /* output to protocol (from above) */
+ pr_ctlinput_t *pr_ctlinput; /* control input (from below) */
+ pr_ctloutput_t *pr_ctloutput; /* control output (from above) */
+/* utility hooks */
+ pr_init_t *pr_init;
+ pr_destroy_t *pr_destroy;
+ pr_fasttimo_t *pr_fasttimo; /* fast timeout (200ms) */
+ pr_slowtimo_t *pr_slowtimo; /* slow timeout (500ms) */
+ pr_drain_t *pr_drain; /* flush any excess space possible */
+
+ struct pr_usrreqs *pr_usrreqs; /* user-protocol hook */
+};
+/*#endif*/
+
+#define PR_SLOWHZ 2 /* 2 slow timeouts per second */
+#define PR_FASTHZ 5 /* 5 fast timeouts per second */
+
+/*
+ * This number should be defined again within each protocol family to avoid
+ * confusion.
+ */
+#define PROTO_SPACER 32767 /* spacer for loadable protocols */
+
+/*
+ * Values for pr_flags.
+ * PR_ADDR requires PR_ATOMIC;
+ * PR_ADDR and PR_CONNREQUIRED are mutually exclusive.
+ * PR_IMPLOPCL means that the protocol allows sendto without prior connect,
+ * and the protocol understands the MSG_EOF flag. The first property is
+ * is only relevant if PR_CONNREQUIRED is set (otherwise sendto is allowed
+ * anyhow).
+ */
+#define PR_ATOMIC 0x01 /* exchange atomic messages only */
+#define PR_ADDR 0x02 /* addresses given with messages */
+#define PR_CONNREQUIRED 0x04 /* connection required by protocol */
+#define PR_WANTRCVD 0x08 /* want PRU_RCVD calls */
+#define PR_RIGHTS 0x10 /* passes capabilities */
+#define PR_IMPLOPCL 0x20 /* implied open/close */
+#define PR_LASTHDR 0x40 /* enforce ipsec policy; last header */
+
+/*
+ * In earlier BSD network stacks, a single pr_usrreq() function pointer was
+ * invoked with an operation number indicating what operation was desired.
+ * We now provide individual function pointers which protocols can implement,
+ * which offers a number of benefits (such as type checking for arguments).
+ * These older constants are still present in order to support TCP debugging.
+ */
+#define PRU_ATTACH 0 /* attach protocol to up */
+#define PRU_DETACH 1 /* detach protocol from up */
+#define PRU_BIND 2 /* bind socket to address */
+#define PRU_LISTEN 3 /* listen for connection */
+#define PRU_CONNECT 4 /* establish connection to peer */
+#define PRU_ACCEPT 5 /* accept connection from peer */
+#define PRU_DISCONNECT 6 /* disconnect from peer */
+#define PRU_SHUTDOWN 7 /* won't send any more data */
+#define PRU_RCVD 8 /* have taken data; more room now */
+#define PRU_SEND 9 /* send this data */
+#define PRU_ABORT 10 /* abort (fast DISCONNECT, DETATCH) */
+#define PRU_CONTROL 11 /* control operations on protocol */
+#define PRU_SENSE 12 /* return status into m */
+#define PRU_RCVOOB 13 /* retrieve out of band data */
+#define PRU_SENDOOB 14 /* send out of band data */
+#define PRU_SOCKADDR 15 /* fetch socket's address */
+#define PRU_PEERADDR 16 /* fetch peer's address */
+#define PRU_CONNECT2 17 /* connect two sockets */
+/* begin for protocols internal use */
+#define PRU_FASTTIMO 18 /* 200ms timeout */
+#define PRU_SLOWTIMO 19 /* 500ms timeout */
+#define PRU_PROTORCV 20 /* receive from below */
+#define PRU_PROTOSEND 21 /* send to below */
+/* end for protocol's internal use */
+#define PRU_SEND_EOF 22 /* send and close */
+#define PRU_SOSETLABEL 23 /* MAC label change */
+#define PRU_CLOSE 24 /* socket close */
+#define PRU_FLUSH 25 /* flush the socket */
+#define PRU_NREQ 25
+
+#ifdef PRUREQUESTS
+const char *prurequests[] = {
+ "ATTACH", "DETACH", "BIND", "LISTEN",
+ "CONNECT", "ACCEPT", "DISCONNECT", "SHUTDOWN",
+ "RCVD", "SEND", "ABORT", "CONTROL",
+ "SENSE", "RCVOOB", "SENDOOB", "SOCKADDR",
+ "PEERADDR", "CONNECT2", "FASTTIMO", "SLOWTIMO",
+ "PROTORCV", "PROTOSEND", "SEND_EOF", "SOSETLABEL",
+ "CLOSE", "FLUSH",
+};
+#endif
+
+#ifdef _KERNEL /* users shouldn't see this decl */
+
+struct ifnet;
+struct stat;
+struct ucred;
+struct uio;
+
+/*
+ * If the ordering here looks odd, that's because it's alphabetical. These
+ * should eventually be merged back into struct protosw.
+ *
+ * Some fields initialized to defaults if they are NULL.
+ * See uipc_domain.c:net_init_domain()
+ */
+struct pr_usrreqs {
+ void (*pru_abort)(struct socket *so);
+ int (*pru_accept)(struct socket *so, struct sockaddr **nam);
+ int (*pru_attach)(struct socket *so, int proto, struct thread *td);
+ int (*pru_bind)(struct socket *so, struct sockaddr *nam,
+ struct thread *td);
+ int (*pru_connect)(struct socket *so, struct sockaddr *nam,
+ struct thread *td);
+ int (*pru_connect2)(struct socket *so1, struct socket *so2);
+ int (*pru_control)(struct socket *so, u_long cmd, caddr_t data,
+ struct ifnet *ifp, struct thread *td);
+ void (*pru_detach)(struct socket *so);
+ int (*pru_disconnect)(struct socket *so);
+ int (*pru_listen)(struct socket *so, int backlog,
+ struct thread *td);
+ int (*pru_peeraddr)(struct socket *so, struct sockaddr **nam);
+ int (*pru_rcvd)(struct socket *so, int flags);
+ int (*pru_rcvoob)(struct socket *so, struct mbuf *m, int flags);
+ int (*pru_send)(struct socket *so, int flags, struct mbuf *m,
+ struct sockaddr *addr, struct mbuf *control,
+ struct thread *td);
+#define PRUS_OOB 0x1
+#define PRUS_EOF 0x2
+#define PRUS_MORETOCOME 0x4
+ int (*pru_sense)(struct socket *so, struct stat *sb);
+ int (*pru_shutdown)(struct socket *so);
+ int (*pru_flush)(struct socket *so, int direction);
+ int (*pru_sockaddr)(struct socket *so, struct sockaddr **nam);
+ int (*pru_sosend)(struct socket *so, struct sockaddr *addr,
+ struct uio *uio, struct mbuf *top, struct mbuf *control,
+ int flags, struct thread *td);
+ int (*pru_soreceive)(struct socket *so, struct sockaddr **paddr,
+ struct uio *uio, struct mbuf **mp0, struct mbuf **controlp,
+ int *flagsp);
+ int (*pru_sopoll)(struct socket *so, int events,
+ struct ucred *cred, struct thread *td);
+ void (*pru_sosetlabel)(struct socket *so);
+ void (*pru_close)(struct socket *so);
+};
+
+/*
+ * All nonvoid pru_*() functions below return EOPNOTSUPP.
+ */
+int pru_accept_notsupp(struct socket *so, struct sockaddr **nam);
+int pru_attach_notsupp(struct socket *so, int proto, struct thread *td);
+int pru_bind_notsupp(struct socket *so, struct sockaddr *nam,
+ struct thread *td);
+int pru_connect_notsupp(struct socket *so, struct sockaddr *nam,
+ struct thread *td);
+int pru_connect2_notsupp(struct socket *so1, struct socket *so2);
+int pru_control_notsupp(struct socket *so, u_long cmd, caddr_t data,
+ struct ifnet *ifp, struct thread *td);
+int pru_disconnect_notsupp(struct socket *so);
+int pru_listen_notsupp(struct socket *so, int backlog, struct thread *td);
+int pru_peeraddr_notsupp(struct socket *so, struct sockaddr **nam);
+int pru_rcvd_notsupp(struct socket *so, int flags);
+int pru_rcvoob_notsupp(struct socket *so, struct mbuf *m, int flags);
+int pru_send_notsupp(struct socket *so, int flags, struct mbuf *m,
+ struct sockaddr *addr, struct mbuf *control, struct thread *td);
+int pru_sense_null(struct socket *so, struct stat *sb);
+int pru_shutdown_notsupp(struct socket *so);
+int pru_sockaddr_notsupp(struct socket *so, struct sockaddr **nam);
+int pru_sosend_notsupp(struct socket *so, struct sockaddr *addr,
+ struct uio *uio, struct mbuf *top, struct mbuf *control, int flags,
+ struct thread *td);
+int pru_soreceive_notsupp(struct socket *so, struct sockaddr **paddr,
+ struct uio *uio, struct mbuf **mp0, struct mbuf **controlp,
+ int *flagsp);
+int pru_sopoll_notsupp(struct socket *so, int events, struct ucred *cred,
+ struct thread *td);
+
+#endif /* _KERNEL */
+
+/*
+ * The arguments to the ctlinput routine are
+ * (*protosw[].pr_ctlinput)(cmd, sa, arg);
+ * where cmd is one of the commands below, sa is a pointer to a sockaddr,
+ * and arg is a `void *' argument used within a protocol family.
+ */
+#define PRC_IFDOWN 0 /* interface transition */
+#define PRC_ROUTEDEAD 1 /* select new route if possible ??? */
+#define PRC_IFUP 2 /* interface has come back up */
+#define PRC_QUENCH2 3 /* DEC congestion bit says slow down */
+#define PRC_QUENCH 4 /* some one said to slow down */
+#define PRC_MSGSIZE 5 /* message size forced drop */
+#define PRC_HOSTDEAD 6 /* host appears to be down */
+#define PRC_HOSTUNREACH 7 /* deprecated (use PRC_UNREACH_HOST) */
+#define PRC_UNREACH_NET 8 /* no route to network */
+#define PRC_UNREACH_HOST 9 /* no route to host */
+#define PRC_UNREACH_PROTOCOL 10 /* dst says bad protocol */
+#define PRC_UNREACH_PORT 11 /* bad port # */
+/* was PRC_UNREACH_NEEDFRAG 12 (use PRC_MSGSIZE) */
+#define PRC_UNREACH_SRCFAIL 13 /* source route failed */
+#define PRC_REDIRECT_NET 14 /* net routing redirect */
+#define PRC_REDIRECT_HOST 15 /* host routing redirect */
+#define PRC_REDIRECT_TOSNET 16 /* redirect for type of service & net */
+#define PRC_REDIRECT_TOSHOST 17 /* redirect for tos & host */
+#define PRC_TIMXCEED_INTRANS 18 /* packet lifetime expired in transit */
+#define PRC_TIMXCEED_REASS 19 /* lifetime expired on reass q */
+#define PRC_PARAMPROB 20 /* header incorrect */
+#define PRC_UNREACH_ADMIN_PROHIB 21 /* packet administrativly prohibited */
+
+#define PRC_NCMDS 22
+
+#define PRC_IS_REDIRECT(cmd) \
+ ((cmd) >= PRC_REDIRECT_NET && (cmd) <= PRC_REDIRECT_TOSHOST)
+
+#ifdef PRCREQUESTS
+char *prcrequests[] = {
+ "IFDOWN", "ROUTEDEAD", "IFUP", "DEC-BIT-QUENCH2",
+ "QUENCH", "MSGSIZE", "HOSTDEAD", "#7",
+ "NET-UNREACH", "HOST-UNREACH", "PROTO-UNREACH", "PORT-UNREACH",
+ "#12", "SRCFAIL-UNREACH", "NET-REDIRECT", "HOST-REDIRECT",
+ "TOSNET-REDIRECT", "TOSHOST-REDIRECT", "TX-INTRANS", "TX-REASS",
+ "PARAMPROB", "ADMIN-UNREACH"
+};
+#endif
+
+/*
+ * The arguments to ctloutput are:
+ * (*protosw[].pr_ctloutput)(req, so, level, optname, optval, p);
+ * req is one of the actions listed below, so is a (struct socket *),
+ * level is an indication of which protocol layer the option is intended.
+ * optname is a protocol dependent socket option request,
+ * optval is a pointer to a mbuf-chain pointer, for value-return results.
+ * The protocol is responsible for disposal of the mbuf chain *optval
+ * if supplied,
+ * the caller is responsible for any space held by *optval, when returned.
+ * A non-zero return from ctloutput gives an
+ * UNIX error number which should be passed to higher level software.
+ */
+#define PRCO_GETOPT 0
+#define PRCO_SETOPT 1
+
+#define PRCO_NCMDS 2
+
+#ifdef PRCOREQUESTS
+char *prcorequests[] = {
+ "GETOPT", "SETOPT",
+};
+#endif
+
+#ifdef _KERNEL
+void pfctlinput(int, struct sockaddr *);
+void pfctlinput2(int, struct sockaddr *, void *);
+struct protosw *pffindproto(int family, int protocol, int type);
+struct protosw *pffindtype(int family, int type);
+int pf_proto_register(int family, struct protosw *npr);
+int pf_proto_unregister(int family, int protocol, int type);
+#endif
+
+#endif
diff --git a/rtems/freebsd/sys/queue.h b/rtems/freebsd/sys/queue.h
new file mode 100644
index 00000000..0675e989
--- /dev/null
+++ b/rtems/freebsd/sys/queue.h
@@ -0,0 +1,636 @@
+/*-
+ * Copyright (c) 1991, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * @(#)queue.h 8.5 (Berkeley) 8/20/94
+ * $FreeBSD$
+ */
+
+#ifndef _SYS_QUEUE_HH_
+#define _SYS_QUEUE_HH_
+
+#include <rtems/freebsd/sys/cdefs.h>
+
+/*
+ * This file defines four types of data structures: singly-linked lists,
+ * singly-linked tail queues, lists and tail queues.
+ *
+ * A singly-linked list is headed by a single forward pointer. The elements
+ * are singly linked for minimum space and pointer manipulation overhead at
+ * the expense of O(n) removal for arbitrary elements. New elements can be
+ * added to the list after an existing element or at the head of the list.
+ * Elements being removed from the head of the list should use the explicit
+ * macro for this purpose for optimum efficiency. A singly-linked list may
+ * only be traversed in the forward direction. Singly-linked lists are ideal
+ * for applications with large datasets and few or no removals or for
+ * implementing a LIFO queue.
+ *
+ * A singly-linked tail queue is headed by a pair of pointers, one to the
+ * head of the list and the other to the tail of the list. The elements are
+ * singly linked for minimum space and pointer manipulation overhead at the
+ * expense of O(n) removal for arbitrary elements. New elements can be added
+ * to the list after an existing element, at the head of the list, or at the
+ * end of the list. Elements being removed from the head of the tail queue
+ * should use the explicit macro for this purpose for optimum efficiency.
+ * A singly-linked tail queue may only be traversed in the forward direction.
+ * Singly-linked tail queues are ideal for applications with large datasets
+ * and few or no removals or for implementing a FIFO queue.
+ *
+ * A list is headed by a single forward pointer (or an array of forward
+ * pointers for a hash table header). The elements are doubly linked
+ * so that an arbitrary element can be removed without a need to
+ * traverse the list. New elements can be added to the list before
+ * or after an existing element or at the head of the list. A list
+ * may only be traversed in the forward direction.
+ *
+ * A tail queue is headed by a pair of pointers, one to the head of the
+ * list and the other to the tail of the list. The elements are doubly
+ * linked so that an arbitrary element can be removed without a need to
+ * traverse the list. New elements can be added to the list before or
+ * after an existing element, at the head of the list, or at the end of
+ * the list. A tail queue may be traversed in either direction.
+ *
+ * For details on the use of these macros, see the queue(3) manual page.
+ *
+ *
+ * SLIST LIST STAILQ TAILQ
+ * _HEAD + + + +
+ * _HEAD_INITIALIZER + + + +
+ * _ENTRY + + + +
+ * _INIT + + + +
+ * _EMPTY + + + +
+ * _FIRST + + + +
+ * _NEXT + + + +
+ * _PREV - - - +
+ * _LAST - - + +
+ * _FOREACH + + + +
+ * _FOREACH_SAFE + + + +
+ * _FOREACH_REVERSE - - - +
+ * _FOREACH_REVERSE_SAFE - - - +
+ * _INSERT_HEAD + + + +
+ * _INSERT_BEFORE - + - +
+ * _INSERT_AFTER + + + +
+ * _INSERT_TAIL - - + +
+ * _CONCAT - - + +
+ * _REMOVE_AFTER + - + -
+ * _REMOVE_HEAD + - + -
+ * _REMOVE + + + +
+ *
+ */
+#ifdef QUEUE_MACRO_DEBUG
+/* Store the last 2 places the queue element or head was altered */
+struct qm_trace {
+ char * lastfile;
+ int lastline;
+ char * prevfile;
+ int prevline;
+};
+
+#define TRACEBUF struct qm_trace trace;
+#define TRASHIT(x) do {(x) = (void *)-1;} while (0)
+#define QMD_SAVELINK(name, link) void **name = (void *)&(link)
+
+#define QMD_TRACE_HEAD(head) do { \
+ (head)->trace.prevline = (head)->trace.lastline; \
+ (head)->trace.prevfile = (head)->trace.lastfile; \
+ (head)->trace.lastline = __LINE__; \
+ (head)->trace.lastfile = __FILE__; \
+} while (0)
+
+#define QMD_TRACE_ELEM(elem) do { \
+ (elem)->trace.prevline = (elem)->trace.lastline; \
+ (elem)->trace.prevfile = (elem)->trace.lastfile; \
+ (elem)->trace.lastline = __LINE__; \
+ (elem)->trace.lastfile = __FILE__; \
+} while (0)
+
+#else
+#define QMD_TRACE_ELEM(elem)
+#define QMD_TRACE_HEAD(head)
+#define QMD_SAVELINK(name, link)
+#define TRACEBUF
+#define TRASHIT(x)
+#endif /* QUEUE_MACRO_DEBUG */
+
+/*
+ * Singly-linked List declarations.
+ */
+#define SLIST_HEAD(name, type) \
+struct name { \
+ struct type *slh_first; /* first element */ \
+}
+
+#define SLIST_HEAD_INITIALIZER(head) \
+ { NULL }
+
+#define SLIST_ENTRY(type) \
+struct { \
+ struct type *sle_next; /* next element */ \
+}
+
+/*
+ * Singly-linked List functions.
+ */
+#define SLIST_EMPTY(head) ((head)->slh_first == NULL)
+
+#define SLIST_FIRST(head) ((head)->slh_first)
+
+#define SLIST_FOREACH(var, head, field) \
+ for ((var) = SLIST_FIRST((head)); \
+ (var); \
+ (var) = SLIST_NEXT((var), field))
+
+#define SLIST_FOREACH_SAFE(var, head, field, tvar) \
+ for ((var) = SLIST_FIRST((head)); \
+ (var) && ((tvar) = SLIST_NEXT((var), field), 1); \
+ (var) = (tvar))
+
+#define SLIST_FOREACH_PREVPTR(var, varp, head, field) \
+ for ((varp) = &SLIST_FIRST((head)); \
+ ((var) = *(varp)) != NULL; \
+ (varp) = &SLIST_NEXT((var), field))
+
+#define SLIST_INIT(head) do { \
+ SLIST_FIRST((head)) = NULL; \
+} while (0)
+
+#define SLIST_INSERT_AFTER(slistelm, elm, field) do { \
+ SLIST_NEXT((elm), field) = SLIST_NEXT((slistelm), field); \
+ SLIST_NEXT((slistelm), field) = (elm); \
+} while (0)
+
+#define SLIST_INSERT_HEAD(head, elm, field) do { \
+ SLIST_NEXT((elm), field) = SLIST_FIRST((head)); \
+ SLIST_FIRST((head)) = (elm); \
+} while (0)
+
+#define SLIST_NEXT(elm, field) ((elm)->field.sle_next)
+
+#define SLIST_REMOVE(head, elm, type, field) do { \
+ QMD_SAVELINK(oldnext, (elm)->field.sle_next); \
+ if (SLIST_FIRST((head)) == (elm)) { \
+ SLIST_REMOVE_HEAD((head), field); \
+ } \
+ else { \
+ struct type *curelm = SLIST_FIRST((head)); \
+ while (SLIST_NEXT(curelm, field) != (elm)) \
+ curelm = SLIST_NEXT(curelm, field); \
+ SLIST_REMOVE_AFTER(curelm, field); \
+ } \
+ TRASHIT(*oldnext); \
+} while (0)
+
+#define SLIST_REMOVE_AFTER(elm, field) do { \
+ SLIST_NEXT(elm, field) = \
+ SLIST_NEXT(SLIST_NEXT(elm, field), field); \
+} while (0)
+
+#define SLIST_REMOVE_HEAD(head, field) do { \
+ SLIST_FIRST((head)) = SLIST_NEXT(SLIST_FIRST((head)), field); \
+} while (0)
+
+#define SLIST_SWAP(head1, head2, type) do { \
+ struct type *swap_first = SLIST_FIRST(head1); \
+ SLIST_FIRST(head1) = SLIST_FIRST(head2); \
+ SLIST_FIRST(head2) = swap_first; \
+} while (0)
+
+/*
+ * Singly-linked Tail queue declarations.
+ */
+#define STAILQ_HEAD(name, type) \
+struct name { \
+ struct type *stqh_first;/* first element */ \
+ struct type **stqh_last;/* addr of last next element */ \
+}
+
+#define STAILQ_HEAD_INITIALIZER(head) \
+ { NULL, &(head).stqh_first }
+
+#define STAILQ_ENTRY(type) \
+struct { \
+ struct type *stqe_next; /* next element */ \
+}
+
+/*
+ * Singly-linked Tail queue functions.
+ */
+#define STAILQ_CONCAT(head1, head2) do { \
+ if (!STAILQ_EMPTY((head2))) { \
+ *(head1)->stqh_last = (head2)->stqh_first; \
+ (head1)->stqh_last = (head2)->stqh_last; \
+ STAILQ_INIT((head2)); \
+ } \
+} while (0)
+
+#define STAILQ_EMPTY(head) ((head)->stqh_first == NULL)
+
+#define STAILQ_FIRST(head) ((head)->stqh_first)
+
+#define STAILQ_FOREACH(var, head, field) \
+ for((var) = STAILQ_FIRST((head)); \
+ (var); \
+ (var) = STAILQ_NEXT((var), field))
+
+
+#define STAILQ_FOREACH_SAFE(var, head, field, tvar) \
+ for ((var) = STAILQ_FIRST((head)); \
+ (var) && ((tvar) = STAILQ_NEXT((var), field), 1); \
+ (var) = (tvar))
+
+#define STAILQ_INIT(head) do { \
+ STAILQ_FIRST((head)) = NULL; \
+ (head)->stqh_last = &STAILQ_FIRST((head)); \
+} while (0)
+
+#define STAILQ_INSERT_AFTER(head, tqelm, elm, field) do { \
+ if ((STAILQ_NEXT((elm), field) = STAILQ_NEXT((tqelm), field)) == NULL)\
+ (head)->stqh_last = &STAILQ_NEXT((elm), field); \
+ STAILQ_NEXT((tqelm), field) = (elm); \
+} while (0)
+
+#define STAILQ_INSERT_HEAD(head, elm, field) do { \
+ if ((STAILQ_NEXT((elm), field) = STAILQ_FIRST((head))) == NULL) \
+ (head)->stqh_last = &STAILQ_NEXT((elm), field); \
+ STAILQ_FIRST((head)) = (elm); \
+} while (0)
+
+#define STAILQ_INSERT_TAIL(head, elm, field) do { \
+ STAILQ_NEXT((elm), field) = NULL; \
+ *(head)->stqh_last = (elm); \
+ (head)->stqh_last = &STAILQ_NEXT((elm), field); \
+} while (0)
+
+#define STAILQ_LAST(head, type, field) \
+ (STAILQ_EMPTY((head)) ? \
+ NULL : \
+ ((struct type *)(void *) \
+ ((char *)((head)->stqh_last) - __offsetof(struct type, field))))
+
+#define STAILQ_NEXT(elm, field) ((elm)->field.stqe_next)
+
+#define STAILQ_REMOVE(head, elm, type, field) do { \
+ QMD_SAVELINK(oldnext, (elm)->field.stqe_next); \
+ if (STAILQ_FIRST((head)) == (elm)) { \
+ STAILQ_REMOVE_HEAD((head), field); \
+ } \
+ else { \
+ struct type *curelm = STAILQ_FIRST((head)); \
+ while (STAILQ_NEXT(curelm, field) != (elm)) \
+ curelm = STAILQ_NEXT(curelm, field); \
+ STAILQ_REMOVE_AFTER(head, curelm, field); \
+ } \
+ TRASHIT(*oldnext); \
+} while (0)
+
+#define STAILQ_REMOVE_HEAD(head, field) do { \
+ if ((STAILQ_FIRST((head)) = \
+ STAILQ_NEXT(STAILQ_FIRST((head)), field)) == NULL) \
+ (head)->stqh_last = &STAILQ_FIRST((head)); \
+} while (0)
+
+#define STAILQ_REMOVE_AFTER(head, elm, field) do { \
+ if ((STAILQ_NEXT(elm, field) = \
+ STAILQ_NEXT(STAILQ_NEXT(elm, field), field)) == NULL) \
+ (head)->stqh_last = &STAILQ_NEXT((elm), field); \
+} while (0)
+
+#define STAILQ_SWAP(head1, head2, type) do { \
+ struct type *swap_first = STAILQ_FIRST(head1); \
+ struct type **swap_last = (head1)->stqh_last; \
+ STAILQ_FIRST(head1) = STAILQ_FIRST(head2); \
+ (head1)->stqh_last = (head2)->stqh_last; \
+ STAILQ_FIRST(head2) = swap_first; \
+ (head2)->stqh_last = swap_last; \
+ if (STAILQ_EMPTY(head1)) \
+ (head1)->stqh_last = &STAILQ_FIRST(head1); \
+ if (STAILQ_EMPTY(head2)) \
+ (head2)->stqh_last = &STAILQ_FIRST(head2); \
+} while (0)
+
+
+/*
+ * List declarations.
+ */
+#define LIST_HEAD(name, type) \
+struct name { \
+ struct type *lh_first; /* first element */ \
+}
+
+#define LIST_HEAD_INITIALIZER(head) \
+ { NULL }
+
+#define LIST_ENTRY(type) \
+struct { \
+ struct type *le_next; /* next element */ \
+ struct type **le_prev; /* address of previous next element */ \
+}
+
+/*
+ * List functions.
+ */
+
+#if (defined(_KERNEL) && defined(INVARIANTS))
+#define QMD_LIST_CHECK_HEAD(head, field) do { \
+ if (LIST_FIRST((head)) != NULL && \
+ LIST_FIRST((head))->field.le_prev != \
+ &LIST_FIRST((head))) \
+ panic("Bad list head %p first->prev != head", (head)); \
+} while (0)
+
+#define QMD_LIST_CHECK_NEXT(elm, field) do { \
+ if (LIST_NEXT((elm), field) != NULL && \
+ LIST_NEXT((elm), field)->field.le_prev != \
+ &((elm)->field.le_next)) \
+ panic("Bad link elm %p next->prev != elm", (elm)); \
+} while (0)
+
+#define QMD_LIST_CHECK_PREV(elm, field) do { \
+ if (*(elm)->field.le_prev != (elm)) \
+ panic("Bad link elm %p prev->next != elm", (elm)); \
+} while (0)
+#else
+#define QMD_LIST_CHECK_HEAD(head, field)
+#define QMD_LIST_CHECK_NEXT(elm, field)
+#define QMD_LIST_CHECK_PREV(elm, field)
+#endif /* (_KERNEL && INVARIANTS) */
+
+#define LIST_EMPTY(head) ((head)->lh_first == NULL)
+
+#define LIST_FIRST(head) ((head)->lh_first)
+
+#define LIST_FOREACH(var, head, field) \
+ for ((var) = LIST_FIRST((head)); \
+ (var); \
+ (var) = LIST_NEXT((var), field))
+
+#define LIST_FOREACH_SAFE(var, head, field, tvar) \
+ for ((var) = LIST_FIRST((head)); \
+ (var) && ((tvar) = LIST_NEXT((var), field), 1); \
+ (var) = (tvar))
+
+#define LIST_INIT(head) do { \
+ LIST_FIRST((head)) = NULL; \
+} while (0)
+
+#define LIST_INSERT_AFTER(listelm, elm, field) do { \
+ QMD_LIST_CHECK_NEXT(listelm, field); \
+ if ((LIST_NEXT((elm), field) = LIST_NEXT((listelm), field)) != NULL)\
+ LIST_NEXT((listelm), field)->field.le_prev = \
+ &LIST_NEXT((elm), field); \
+ LIST_NEXT((listelm), field) = (elm); \
+ (elm)->field.le_prev = &LIST_NEXT((listelm), field); \
+} while (0)
+
+#define LIST_INSERT_BEFORE(listelm, elm, field) do { \
+ QMD_LIST_CHECK_PREV(listelm, field); \
+ (elm)->field.le_prev = (listelm)->field.le_prev; \
+ LIST_NEXT((elm), field) = (listelm); \
+ *(listelm)->field.le_prev = (elm); \
+ (listelm)->field.le_prev = &LIST_NEXT((elm), field); \
+} while (0)
+
+#define LIST_INSERT_HEAD(head, elm, field) do { \
+ QMD_LIST_CHECK_HEAD((head), field); \
+ if ((LIST_NEXT((elm), field) = LIST_FIRST((head))) != NULL) \
+ LIST_FIRST((head))->field.le_prev = &LIST_NEXT((elm), field);\
+ LIST_FIRST((head)) = (elm); \
+ (elm)->field.le_prev = &LIST_FIRST((head)); \
+} while (0)
+
+#define LIST_NEXT(elm, field) ((elm)->field.le_next)
+
+#define LIST_REMOVE(elm, field) do { \
+ QMD_SAVELINK(oldnext, (elm)->field.le_next); \
+ QMD_SAVELINK(oldprev, (elm)->field.le_prev); \
+ QMD_LIST_CHECK_NEXT(elm, field); \
+ QMD_LIST_CHECK_PREV(elm, field); \
+ if (LIST_NEXT((elm), field) != NULL) \
+ LIST_NEXT((elm), field)->field.le_prev = \
+ (elm)->field.le_prev; \
+ *(elm)->field.le_prev = LIST_NEXT((elm), field); \
+ TRASHIT(*oldnext); \
+ TRASHIT(*oldprev); \
+} while (0)
+
+#define LIST_SWAP(head1, head2, type, field) do { \
+ struct type *swap_tmp = LIST_FIRST((head1)); \
+ LIST_FIRST((head1)) = LIST_FIRST((head2)); \
+ LIST_FIRST((head2)) = swap_tmp; \
+ if ((swap_tmp = LIST_FIRST((head1))) != NULL) \
+ swap_tmp->field.le_prev = &LIST_FIRST((head1)); \
+ if ((swap_tmp = LIST_FIRST((head2))) != NULL) \
+ swap_tmp->field.le_prev = &LIST_FIRST((head2)); \
+} while (0)
+
+/*
+ * Tail queue declarations.
+ */
+#define TAILQ_HEAD(name, type) \
+struct name { \
+ struct type *tqh_first; /* first element */ \
+ struct type **tqh_last; /* addr of last next element */ \
+ TRACEBUF \
+}
+
+#define TAILQ_HEAD_INITIALIZER(head) \
+ { NULL, &(head).tqh_first }
+
+#define TAILQ_ENTRY(type) \
+struct { \
+ struct type *tqe_next; /* next element */ \
+ struct type **tqe_prev; /* address of previous next element */ \
+ TRACEBUF \
+}
+
+/*
+ * Tail queue functions.
+ */
+#if (defined(_KERNEL) && defined(INVARIANTS))
+#define QMD_TAILQ_CHECK_HEAD(head, field) do { \
+ if (!TAILQ_EMPTY(head) && \
+ TAILQ_FIRST((head))->field.tqe_prev != \
+ &TAILQ_FIRST((head))) \
+ panic("Bad tailq head %p first->prev != head", (head)); \
+} while (0)
+
+#define QMD_TAILQ_CHECK_TAIL(head, field) do { \
+ if (*(head)->tqh_last != NULL) \
+ panic("Bad tailq NEXT(%p->tqh_last) != NULL", (head)); \
+} while (0)
+
+#define QMD_TAILQ_CHECK_NEXT(elm, field) do { \
+ if (TAILQ_NEXT((elm), field) != NULL && \
+ TAILQ_NEXT((elm), field)->field.tqe_prev != \
+ &((elm)->field.tqe_next)) \
+ panic("Bad link elm %p next->prev != elm", (elm)); \
+} while (0)
+
+#define QMD_TAILQ_CHECK_PREV(elm, field) do { \
+ if (*(elm)->field.tqe_prev != (elm)) \
+ panic("Bad link elm %p prev->next != elm", (elm)); \
+} while (0)
+#else
+#define QMD_TAILQ_CHECK_HEAD(head, field)
+#define QMD_TAILQ_CHECK_TAIL(head, headname)
+#define QMD_TAILQ_CHECK_NEXT(elm, field)
+#define QMD_TAILQ_CHECK_PREV(elm, field)
+#endif /* (_KERNEL && INVARIANTS) */
+
+#define TAILQ_CONCAT(head1, head2, field) do { \
+ if (!TAILQ_EMPTY(head2)) { \
+ *(head1)->tqh_last = (head2)->tqh_first; \
+ (head2)->tqh_first->field.tqe_prev = (head1)->tqh_last; \
+ (head1)->tqh_last = (head2)->tqh_last; \
+ TAILQ_INIT((head2)); \
+ QMD_TRACE_HEAD(head1); \
+ QMD_TRACE_HEAD(head2); \
+ } \
+} while (0)
+
+#define TAILQ_EMPTY(head) ((head)->tqh_first == NULL)
+
+#define TAILQ_FIRST(head) ((head)->tqh_first)
+
+#define TAILQ_FOREACH(var, head, field) \
+ for ((var) = TAILQ_FIRST((head)); \
+ (var); \
+ (var) = TAILQ_NEXT((var), field))
+
+#define TAILQ_FOREACH_SAFE(var, head, field, tvar) \
+ for ((var) = TAILQ_FIRST((head)); \
+ (var) && ((tvar) = TAILQ_NEXT((var), field), 1); \
+ (var) = (tvar))
+
+#define TAILQ_FOREACH_REVERSE(var, head, headname, field) \
+ for ((var) = TAILQ_LAST((head), headname); \
+ (var); \
+ (var) = TAILQ_PREV((var), headname, field))
+
+#define TAILQ_FOREACH_REVERSE_SAFE(var, head, headname, field, tvar) \
+ for ((var) = TAILQ_LAST((head), headname); \
+ (var) && ((tvar) = TAILQ_PREV((var), headname, field), 1); \
+ (var) = (tvar))
+
+#define TAILQ_INIT(head) do { \
+ TAILQ_FIRST((head)) = NULL; \
+ (head)->tqh_last = &TAILQ_FIRST((head)); \
+ QMD_TRACE_HEAD(head); \
+} while (0)
+
+#define TAILQ_INSERT_AFTER(head, listelm, elm, field) do { \
+ QMD_TAILQ_CHECK_NEXT(listelm, field); \
+ if ((TAILQ_NEXT((elm), field) = TAILQ_NEXT((listelm), field)) != NULL)\
+ TAILQ_NEXT((elm), field)->field.tqe_prev = \
+ &TAILQ_NEXT((elm), field); \
+ else { \
+ (head)->tqh_last = &TAILQ_NEXT((elm), field); \
+ QMD_TRACE_HEAD(head); \
+ } \
+ TAILQ_NEXT((listelm), field) = (elm); \
+ (elm)->field.tqe_prev = &TAILQ_NEXT((listelm), field); \
+ QMD_TRACE_ELEM(&(elm)->field); \
+ QMD_TRACE_ELEM(&listelm->field); \
+} while (0)
+
+#define TAILQ_INSERT_BEFORE(listelm, elm, field) do { \
+ QMD_TAILQ_CHECK_PREV(listelm, field); \
+ (elm)->field.tqe_prev = (listelm)->field.tqe_prev; \
+ TAILQ_NEXT((elm), field) = (listelm); \
+ *(listelm)->field.tqe_prev = (elm); \
+ (listelm)->field.tqe_prev = &TAILQ_NEXT((elm), field); \
+ QMD_TRACE_ELEM(&(elm)->field); \
+ QMD_TRACE_ELEM(&listelm->field); \
+} while (0)
+
+#define TAILQ_INSERT_HEAD(head, elm, field) do { \
+ QMD_TAILQ_CHECK_HEAD(head, field); \
+ if ((TAILQ_NEXT((elm), field) = TAILQ_FIRST((head))) != NULL) \
+ TAILQ_FIRST((head))->field.tqe_prev = \
+ &TAILQ_NEXT((elm), field); \
+ else \
+ (head)->tqh_last = &TAILQ_NEXT((elm), field); \
+ TAILQ_FIRST((head)) = (elm); \
+ (elm)->field.tqe_prev = &TAILQ_FIRST((head)); \
+ QMD_TRACE_HEAD(head); \
+ QMD_TRACE_ELEM(&(elm)->field); \
+} while (0)
+
+#define TAILQ_INSERT_TAIL(head, elm, field) do { \
+ QMD_TAILQ_CHECK_TAIL(head, field); \
+ TAILQ_NEXT((elm), field) = NULL; \
+ (elm)->field.tqe_prev = (head)->tqh_last; \
+ *(head)->tqh_last = (elm); \
+ (head)->tqh_last = &TAILQ_NEXT((elm), field); \
+ QMD_TRACE_HEAD(head); \
+ QMD_TRACE_ELEM(&(elm)->field); \
+} while (0)
+
+#define TAILQ_LAST(head, headname) \
+ (*(((struct headname *)((head)->tqh_last))->tqh_last))
+
+#define TAILQ_NEXT(elm, field) ((elm)->field.tqe_next)
+
+#define TAILQ_PREV(elm, headname, field) \
+ (*(((struct headname *)((elm)->field.tqe_prev))->tqh_last))
+
+#define TAILQ_REMOVE(head, elm, field) do { \
+ QMD_SAVELINK(oldnext, (elm)->field.tqe_next); \
+ QMD_SAVELINK(oldprev, (elm)->field.tqe_prev); \
+ QMD_TAILQ_CHECK_NEXT(elm, field); \
+ QMD_TAILQ_CHECK_PREV(elm, field); \
+ if ((TAILQ_NEXT((elm), field)) != NULL) \
+ TAILQ_NEXT((elm), field)->field.tqe_prev = \
+ (elm)->field.tqe_prev; \
+ else { \
+ (head)->tqh_last = (elm)->field.tqe_prev; \
+ QMD_TRACE_HEAD(head); \
+ } \
+ *(elm)->field.tqe_prev = TAILQ_NEXT((elm), field); \
+ TRASHIT(*oldnext); \
+ TRASHIT(*oldprev); \
+ QMD_TRACE_ELEM(&(elm)->field); \
+} while (0)
+
+#define TAILQ_SWAP(head1, head2, type, field) do { \
+ struct type *swap_first = (head1)->tqh_first; \
+ struct type **swap_last = (head1)->tqh_last; \
+ (head1)->tqh_first = (head2)->tqh_first; \
+ (head1)->tqh_last = (head2)->tqh_last; \
+ (head2)->tqh_first = swap_first; \
+ (head2)->tqh_last = swap_last; \
+ if ((swap_first = (head1)->tqh_first) != NULL) \
+ swap_first->field.tqe_prev = &(head1)->tqh_first; \
+ else \
+ (head1)->tqh_last = &(head1)->tqh_first; \
+ if ((swap_first = (head2)->tqh_first) != NULL) \
+ swap_first->field.tqe_prev = &(head2)->tqh_first; \
+ else \
+ (head2)->tqh_last = &(head2)->tqh_first; \
+} while (0)
+
+#endif /* !_SYS_QUEUE_HH_ */
diff --git a/rtems/freebsd/sys/random.h b/rtems/freebsd/sys/random.h
new file mode 100644
index 00000000..e5dea939
--- /dev/null
+++ b/rtems/freebsd/sys/random.h
@@ -0,0 +1,66 @@
+/*-
+ * Copyright (c) 2000 Mark R. V. Murray
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer
+ * in this position and unchanged.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _SYS_RANDOM_HH_
+#define _SYS_RANDOM_HH_
+
+#ifdef _KERNEL
+
+int read_random(void *, int);
+
+/*
+ * Note: if you add or remove members of esource, remember to also update the
+ * KASSERT regarding what valid members are in random_harvest_internal().
+ */
+enum esource {
+ RANDOM_START = 0,
+ RANDOM_WRITE = 0,
+ RANDOM_KEYBOARD,
+ RANDOM_MOUSE,
+ RANDOM_NET,
+ RANDOM_INTERRUPT,
+ RANDOM_PURE,
+ ENTROPYSOURCE
+};
+void random_harvest(void *, u_int, u_int, u_int, enum esource);
+
+/* Allow the sysadmin to select the broad category of
+ * entropy types to harvest
+ */
+struct harvest_select {
+ int ethernet;
+ int point_to_point;
+ int interrupt;
+ int swi;
+};
+
+extern struct harvest_select harvest;
+
+#endif /* _KERNEL */
+
+#endif /* _SYS_RANDOM_HH_ */
diff --git a/rtems/freebsd/sys/reboot.h b/rtems/freebsd/sys/reboot.h
new file mode 100644
index 00000000..c2ffd30e
--- /dev/null
+++ b/rtems/freebsd/sys/reboot.h
@@ -0,0 +1,66 @@
+/*-
+ * Copyright (c) 1982, 1986, 1988, 1993, 1994
+ * The Regents of the University of California. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * @(#)reboot.h 8.3 (Berkeley) 12/13/94
+ * $FreeBSD$
+ */
+
+#ifndef _SYS_REBOOT_HH_
+#define _SYS_REBOOT_HH_
+
+/*
+ * Arguments to reboot system call. These are passed to
+ * the boot program and on to init.
+ */
+#define RB_AUTOBOOT 0 /* flags for system auto-booting itself */
+
+#define RB_ASKNAME 0x001 /* ask for file name to reboot from */
+#define RB_SINGLE 0x002 /* reboot to single user only */
+#define RB_NOSYNC 0x004 /* dont sync before reboot */
+#define RB_HALT 0x008 /* don't reboot, just halt */
+#define RB_INITNAME 0x010 /* name given for /etc/init (unused) */
+#define RB_DFLTROOT 0x020 /* use compiled-in rootdev */
+#define RB_KDB 0x040 /* give control to kernel debugger */
+#define RB_RDONLY 0x080 /* mount root fs read-only */
+#define RB_DUMP 0x100 /* dump kernel memory before reboot */
+#define RB_MINIROOT 0x200 /* mini-root present in memory at boot time */
+#define RB_VERBOSE 0x800 /* print all potentially useful info */
+#define RB_SERIAL 0x1000 /* use serial port as console */
+#define RB_CDROM 0x2000 /* use cdrom as root */
+#define RB_POWEROFF 0x4000 /* turn the power off if possible */
+#define RB_GDB 0x8000 /* use GDB remote debugger instead of DDB */
+#define RB_MUTE 0x10000 /* start up with the console muted */
+#define RB_SELFTEST 0x20000 /* don't complete the boot; do selftest */
+#define RB_RESERVED1 0x40000 /* reserved for internal use of boot blocks */
+#define RB_RESERVED2 0x80000 /* reserved for internal use of boot blocks */
+#define RB_PAUSE 0x100000 /* pause after each output line during probe */
+#define RB_MULTIPLE 0x20000000 /* use multiple consoles */
+
+#define RB_BOOTINFO 0x80000000 /* have `struct bootinfo *' arg */
+
+#endif
diff --git a/rtems/freebsd/sys/refcount.h b/rtems/freebsd/sys/refcount.h
new file mode 100644
index 00000000..57e97b41
--- /dev/null
+++ b/rtems/freebsd/sys/refcount.h
@@ -0,0 +1,68 @@
+/*-
+ * Copyright (c) 2005 John Baldwin <jhb@FreeBSD.org>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the author nor the names of any co-contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef __SYS_REFCOUNT_HH__
+#define __SYS_REFCOUNT_HH__
+
+#include <rtems/freebsd/machine/atomic.h>
+
+#ifdef _KERNEL
+#include <rtems/freebsd/sys/systm.h>
+#else
+#define KASSERT(exp, msg) /* */
+#endif
+
+static __inline void
+refcount_init(volatile u_int *count, u_int value)
+{
+
+ *count = value;
+}
+
+static __inline void
+refcount_acquire(volatile u_int *count)
+{
+
+ atomic_add_acq_int(count, 1);
+}
+
+static __inline int
+refcount_release(volatile u_int *count)
+{
+ u_int old;
+
+ /* XXX: Should this have a rel membar? */
+ old = atomic_fetchadd_int(count, -1);
+ KASSERT(old > 0, ("negative refcount %p", count));
+ return (old == 1);
+}
+
+#endif /* ! __SYS_REFCOUNT_HH__ */
diff --git a/rtems/freebsd/sys/resource.h b/rtems/freebsd/sys/resource.h
new file mode 100644
index 00000000..25a7aaad
--- /dev/null
+++ b/rtems/freebsd/sys/resource.h
@@ -0,0 +1,176 @@
+/*-
+ * Copyright (c) 1982, 1986, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * @(#)resource.h 8.4 (Berkeley) 1/9/95
+ * $FreeBSD$
+ */
+
+#ifndef _SYS_RESOURCE_HH_
+#define _SYS_RESOURCE_HH_
+
+#include <rtems/freebsd/sys/cdefs.h>
+#include <rtems/freebsd/sys/_timeval.h>
+#include <rtems/freebsd/sys/_types.h>
+
+/*
+ * Process priority specifications to get/setpriority.
+ */
+#define PRIO_MIN -20
+#define PRIO_MAX 20
+
+#define PRIO_PROCESS 0
+#define PRIO_PGRP 1
+#define PRIO_USER 2
+
+/*
+ * Resource utilization information.
+ *
+ * All fields are only modified by curthread and
+ * no locks are required to read.
+ */
+
+#define RUSAGE_SELF 0
+#define RUSAGE_CHILDREN -1
+#define RUSAGE_THREAD 1
+
+struct rusage {
+ struct timeval ru_utime; /* user time used */
+ struct timeval ru_stime; /* system time used */
+ long ru_maxrss; /* max resident set size */
+#define ru_first ru_ixrss
+ long ru_ixrss; /* integral shared memory size */
+ long ru_idrss; /* integral unshared data " */
+ long ru_isrss; /* integral unshared stack " */
+ long ru_minflt; /* page reclaims */
+ long ru_majflt; /* page faults */
+ long ru_nswap; /* swaps */
+ long ru_inblock; /* block input operations */
+ long ru_oublock; /* block output operations */
+ long ru_msgsnd; /* messages sent */
+ long ru_msgrcv; /* messages received */
+ long ru_nsignals; /* signals received */
+ long ru_nvcsw; /* voluntary context switches */
+ long ru_nivcsw; /* involuntary " */
+#define ru_last ru_nivcsw
+};
+
+/*
+ * Resource limits
+ */
+#define RLIMIT_CPU 0 /* maximum cpu time in seconds */
+#define RLIMIT_FSIZE 1 /* maximum file size */
+#define RLIMIT_DATA 2 /* data size */
+#define RLIMIT_STACK 3 /* stack size */
+#define RLIMIT_CORE 4 /* core file size */
+#define RLIMIT_RSS 5 /* resident set size */
+#define RLIMIT_MEMLOCK 6 /* locked-in-memory address space */
+#define RLIMIT_NPROC 7 /* number of processes */
+#define RLIMIT_NOFILE 8 /* number of open files */
+#define RLIMIT_SBSIZE 9 /* maximum size of all socket buffers */
+#define RLIMIT_VMEM 10 /* virtual process size (inclusive of mmap) */
+#define RLIMIT_AS RLIMIT_VMEM /* standard name for RLIMIT_VMEM */
+#define RLIMIT_NPTS 11 /* pseudo-terminals */
+#define RLIMIT_SWAP 12 /* swap used */
+
+#define RLIM_NLIMITS 13 /* number of resource limits */
+
+#define RLIM_INFINITY ((rlim_t)(((uint64_t)1 << 63) - 1))
+/* XXX Missing: RLIM_SAVED_MAX, RLIM_SAVED_CUR */
+
+
+/*
+ * Resource limit string identifiers
+ */
+
+#ifdef _RLIMIT_IDENT
+static char *rlimit_ident[RLIM_NLIMITS] = {
+ "cpu",
+ "fsize",
+ "data",
+ "stack",
+ "core",
+ "rss",
+ "memlock",
+ "nproc",
+ "nofile",
+ "sbsize",
+ "vmem",
+ "npts",
+ "swap",
+};
+#endif
+
+#ifndef _RLIM_T_DECLARED
+typedef __rlim_t rlim_t;
+#define _RLIM_T_DECLARED
+#endif
+
+struct rlimit {
+ rlim_t rlim_cur; /* current (soft) limit */
+ rlim_t rlim_max; /* maximum value for rlim_cur */
+};
+
+#if __BSD_VISIBLE
+
+struct orlimit {
+ __int32_t rlim_cur; /* current (soft) limit */
+ __int32_t rlim_max; /* maximum value for rlim_cur */
+};
+
+struct loadavg {
+ __fixpt_t ldavg[3];
+ long fscale;
+};
+
+#define CP_USER 0
+#define CP_NICE 1
+#define CP_SYS 2
+#define CP_INTR 3
+#define CP_IDLE 4
+#define CPUSTATES 5
+
+#endif /* __BSD_VISIBLE */
+
+#ifdef _KERNEL
+
+extern struct loadavg averunnable;
+void read_cpu_time(long *cp_time); /* Writes array of CPUSTATES */
+
+#else
+
+__BEGIN_DECLS
+/* XXX 2nd arg to [gs]etpriority() should be an id_t */
+int getpriority(int, int);
+int getrlimit(int, struct rlimit *);
+int getrusage(int, struct rusage *);
+int setpriority(int, int, int);
+int setrlimit(int, const struct rlimit *);
+__END_DECLS
+
+#endif /* _KERNEL */
+#endif /* !_SYS_RESOURCE_HH_ */
diff --git a/rtems/freebsd/sys/resourcevar.h b/rtems/freebsd/sys/resourcevar.h
new file mode 100644
index 00000000..3a90306c
--- /dev/null
+++ b/rtems/freebsd/sys/resourcevar.h
@@ -0,0 +1,143 @@
+/*-
+ * Copyright (c) 1991, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * @(#)resourcevar.h 8.4 (Berkeley) 1/9/95
+ * $FreeBSD$
+ */
+
+#ifndef _SYS_RESOURCEVAR_HH_
+#define _SYS_RESOURCEVAR_HH_
+
+#include <rtems/freebsd/sys/resource.h>
+#include <rtems/freebsd/sys/queue.h>
+#ifdef _KERNEL
+#include <rtems/freebsd/sys/_lock.h>
+#include <rtems/freebsd/sys/_mutex.h>
+#endif
+
+/*
+ * Kernel per-process accounting / statistics
+ * (not necessarily resident except when running).
+ *
+ * Locking key:
+ * b - created at fork, never changes
+ * c - locked by proc mtx
+ * j - locked by proc slock
+ * k - only accessed by curthread
+ */
+struct pstats {
+#define pstat_startzero p_cru
+ struct rusage p_cru; /* Stats for reaped children. */
+ struct itimerval p_timer[3]; /* (j) Virtual-time timers. */
+#define pstat_endzero pstat_startcopy
+
+#define pstat_startcopy p_prof
+ struct uprof { /* Profile arguments. */
+ caddr_t pr_base; /* (c + j) Buffer base. */
+ u_long pr_size; /* (c + j) Buffer size. */
+ u_long pr_off; /* (c + j) PC offset. */
+ u_long pr_scale; /* (c + j) PC scaling. */
+ } p_prof;
+#define pstat_endcopy p_start
+ struct timeval p_start; /* (b) Starting time. */
+};
+
+#ifdef _KERNEL
+
+/*
+ * Kernel shareable process resource limits. Because this structure
+ * is moderately large but changes infrequently, it is normally
+ * shared copy-on-write after forks.
+ */
+struct plimit {
+ struct rlimit pl_rlimit[RLIM_NLIMITS];
+ int pl_refcnt; /* number of references */
+};
+
+/*-
+ * Per uid resource consumption
+ *
+ * Locking guide:
+ * (a) Constant from inception
+ * (b) Lockless, updated using atomics
+ * (c) Locked by global uihashtbl_mtx
+ * (d) Locked by the ui_vmsize_mtx
+ */
+struct uidinfo {
+ LIST_ENTRY(uidinfo) ui_hash; /* (c) hash chain of uidinfos */
+ struct mtx ui_vmsize_mtx;
+ vm_ooffset_t ui_vmsize; /* (d) swap reservation by uid */
+ long ui_sbsize; /* (b) socket buffer space consumed */
+ long ui_proccnt; /* (b) number of processes */
+ long ui_ptscnt; /* (b) number of pseudo-terminals */
+ uid_t ui_uid; /* (a) uid */
+ u_int ui_ref; /* (b) reference count */
+};
+
+#define UIDINFO_VMSIZE_LOCK(ui) mtx_lock(&((ui)->ui_vmsize_mtx))
+#define UIDINFO_VMSIZE_UNLOCK(ui) mtx_unlock(&((ui)->ui_vmsize_mtx))
+
+struct proc;
+struct rusage_ext;
+struct thread;
+
+void addupc_intr(struct thread *td, uintfptr_t pc, u_int ticks);
+void addupc_task(struct thread *td, uintfptr_t pc, u_int ticks);
+void calccru(struct proc *p, struct timeval *up, struct timeval *sp);
+void calcru(struct proc *p, struct timeval *up, struct timeval *sp);
+int chgproccnt(struct uidinfo *uip, int diff, rlim_t maxval);
+int chgsbsize(struct uidinfo *uip, u_int *hiwat, u_int to,
+ rlim_t maxval);
+int chgptscnt(struct uidinfo *uip, int diff, rlim_t maxval);
+int fuswintr(void *base);
+struct plimit
+ *lim_alloc(void);
+void lim_copy(struct plimit *dst, struct plimit *src);
+rlim_t lim_cur(struct proc *p, int which);
+void lim_fork(struct proc *p1, struct proc *p2);
+void lim_free(struct plimit *limp);
+struct plimit
+ *lim_hold(struct plimit *limp);
+rlim_t lim_max(struct proc *p, int which);
+void lim_rlimit(struct proc *p, int which, struct rlimit *rlp);
+void ruadd(struct rusage *ru, struct rusage_ext *rux, struct rusage *ru2,
+ struct rusage_ext *rux2);
+void rucollect(struct rusage *ru, struct rusage *ru2);
+void rufetch(struct proc *p, struct rusage *ru);
+void rufetchcalc(struct proc *p, struct rusage *ru, struct timeval *up,
+ struct timeval *sp);
+void ruxagg(struct proc *p, struct thread *td);
+int suswintr(void *base, int word);
+struct uidinfo
+ *uifind(uid_t uid);
+void uifree(struct uidinfo *uip);
+void uihashinit(void);
+void uihold(struct uidinfo *uip);
+
+#endif /* _KERNEL */
+#endif /* !_SYS_RESOURCEVAR_HH_ */
diff --git a/rtems/freebsd/sys/rman.h b/rtems/freebsd/sys/rman.h
new file mode 100644
index 00000000..ca5d4d08
--- /dev/null
+++ b/rtems/freebsd/sys/rman.h
@@ -0,0 +1,155 @@
+/*-
+ * Copyright 1998 Massachusetts Institute of Technology
+ *
+ * Permission to use, copy, modify, and distribute this software and
+ * its documentation for any purpose and without fee is hereby
+ * granted, provided that both the above copyright notice and this
+ * permission notice appear in all copies, that both the above
+ * copyright notice and this permission notice appear in all
+ * supporting documentation, and that the name of M.I.T. not be used
+ * in advertising or publicity pertaining to distribution of the
+ * software without specific, written prior permission. M.I.T. makes
+ * no representations about the suitability of this software for any
+ * purpose. It is provided "as is" without express or implied
+ * warranty.
+ *
+ * THIS SOFTWARE IS PROVIDED BY M.I.T. ``AS IS''. M.I.T. DISCLAIMS
+ * ALL EXPRESS OR IMPLIED WARRANTIES WITH REGARD TO THIS SOFTWARE,
+ * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. IN NO EVENT
+ * SHALL M.I.T. BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
+ * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+ * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
+ * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _SYS_RMAN_HH_
+#define _SYS_RMAN_HH_ 1
+
+#ifndef _KERNEL
+#include <rtems/freebsd/sys/queue.h>
+#else
+#include <rtems/freebsd/machine/_bus.h>
+#include <rtems/freebsd/machine/resource.h>
+#endif
+
+#define RF_ALLOCATED 0x0001 /* resource has been reserved */
+#define RF_ACTIVE 0x0002 /* resource allocation has been activated */
+#define RF_SHAREABLE 0x0004 /* resource permits contemporaneous sharing */
+#define RF_TIMESHARE 0x0008 /* resource permits time-division sharing */
+#define RF_WANTED 0x0010 /* somebody is waiting for this resource */
+#define RF_FIRSTSHARE 0x0020 /* first in sharing list */
+#define RF_PREFETCHABLE 0x0040 /* resource is prefetchable */
+#define RF_OPTIONAL 0x0080 /* for bus_alloc_resources() */
+
+#define RF_ALIGNMENT_SHIFT 10 /* alignment size bit starts bit 10 */
+#define RF_ALIGNMENT_MASK (0x003F << RF_ALIGNMENT_SHIFT)
+ /* resource address alignemnt size bit mask */
+#define RF_ALIGNMENT_LOG2(x) ((x) << RF_ALIGNMENT_SHIFT)
+#define RF_ALIGNMENT(x) (((x) & RF_ALIGNMENT_MASK) >> RF_ALIGNMENT_SHIFT)
+
+enum rman_type { RMAN_UNINIT = 0, RMAN_GAUGE, RMAN_ARRAY };
+
+/*
+ * String length exported to userspace for resource names, etc.
+ */
+#define RM_TEXTLEN 32
+
+/*
+ * Userspace-exported structures.
+ */
+struct u_resource {
+ uintptr_t r_handle; /* resource uniquifier */
+ uintptr_t r_parent; /* parent rman */
+ uintptr_t r_device; /* device owning this resource */
+ char r_devname[RM_TEXTLEN]; /* device name XXX obsolete */
+
+ u_long r_start; /* offset in resource space */
+ u_long r_size; /* size in resource space */
+ u_int r_flags; /* RF_* flags */
+};
+
+struct u_rman {
+ uintptr_t rm_handle; /* rman uniquifier */
+ char rm_descr[RM_TEXTLEN]; /* rman description */
+
+ u_long rm_start; /* base of managed region */
+ u_long rm_size; /* size of managed region */
+ enum rman_type rm_type; /* region type */
+};
+
+#ifdef _KERNEL
+
+/*
+ * The public (kernel) view of struct resource
+ *
+ * NB: Changing the offset/size/type of existing fields in struct resource
+ * NB: breaks the device driver ABI and is strongly FORBIDDEN.
+ * NB: Appending new fields is probably just misguided.
+ */
+
+struct resource {
+ struct resource_i *__r_i;
+ bus_space_tag_t r_bustag; /* bus_space tag */
+ bus_space_handle_t r_bushandle; /* bus_space handle */
+};
+
+struct resource_i;
+
+TAILQ_HEAD(resource_head, resource_i);
+
+struct rman {
+ struct resource_head rm_list;
+ struct mtx *rm_mtx; /* mutex used to protect rm_list */
+ TAILQ_ENTRY(rman) rm_link; /* link in list of all rmans */
+ u_long rm_start; /* index of globally first entry */
+ u_long rm_end; /* index of globally last entry */
+ enum rman_type rm_type; /* what type of resource this is */
+ const char *rm_descr; /* text descripion of this resource */
+};
+TAILQ_HEAD(rman_head, rman);
+
+int rman_activate_resource(struct resource *r);
+int rman_await_resource(struct resource *r, int pri, int timo);
+bus_space_handle_t rman_get_bushandle(struct resource *);
+bus_space_tag_t rman_get_bustag(struct resource *);
+u_long rman_get_end(struct resource *);
+struct device *rman_get_device(struct resource *);
+u_int rman_get_flags(struct resource *);
+int rman_get_rid(struct resource *);
+u_long rman_get_size(struct resource *);
+u_long rman_get_start(struct resource *);
+void *rman_get_virtual(struct resource *);
+int rman_deactivate_resource(struct resource *r);
+int rman_fini(struct rman *rm);
+int rman_init(struct rman *rm);
+int rman_init_from_resource(struct rman *rm, struct resource *r);
+uint32_t rman_make_alignment_flags(uint32_t size);
+int rman_manage_region(struct rman *rm, u_long start, u_long end);
+int rman_is_region_manager(struct resource *r, struct rman *rm);
+int rman_release_resource(struct resource *r);
+struct resource *rman_reserve_resource(struct rman *rm, u_long start,
+ u_long end, u_long count,
+ u_int flags, struct device *dev);
+struct resource *rman_reserve_resource_bound(struct rman *rm, u_long start,
+ u_long end, u_long count, u_long bound,
+ u_int flags, struct device *dev);
+void rman_set_bushandle(struct resource *_r, bus_space_handle_t _h);
+void rman_set_bustag(struct resource *_r, bus_space_tag_t _t);
+void rman_set_device(struct resource *_r, struct device *_dev);
+void rman_set_end(struct resource *_r, u_long _end);
+void rman_set_rid(struct resource *_r, int _rid);
+void rman_set_start(struct resource *_r, u_long _start);
+void rman_set_virtual(struct resource *_r, void *_v);
+
+extern struct rman_head rman_head;
+
+#endif /* _KERNEL */
+
+#endif /* !_SYS_RMAN_HH_ */
diff --git a/rtems/freebsd/sys/rmlock.h b/rtems/freebsd/sys/rmlock.h
new file mode 100644
index 00000000..efb244b1
--- /dev/null
+++ b/rtems/freebsd/sys/rmlock.h
@@ -0,0 +1,121 @@
+/*-
+ * Copyright (c) 2007 Stephan Uphoff <ups@FreeBSD.org>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the author nor the names of any co-contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _SYS_RMLOCK_HH_
+#define _SYS_RMLOCK_HH_
+
+#include <rtems/freebsd/sys/mutex.h>
+#include <rtems/freebsd/sys/_lock.h>
+#include <rtems/freebsd/sys/_rmlock.h>
+
+#ifdef _KERNEL
+
+/*
+ * Flags passed to rm_init(9).
+ */
+#define RM_NOWITNESS 0x00000001
+#define RM_RECURSE 0x00000002
+
+void rm_init(struct rmlock *rm, const char *name);
+void rm_init_flags(struct rmlock *rm, const char *name, int opts);
+void rm_destroy(struct rmlock *rm);
+int rm_wowned(struct rmlock *rm);
+void rm_sysinit(void *arg);
+void rm_sysinit_flags(void *arg);
+
+void _rm_wlock_debug(struct rmlock *rm, const char *file, int line);
+void _rm_wunlock_debug(struct rmlock *rm, const char *file, int line);
+void _rm_rlock_debug(struct rmlock *rm, struct rm_priotracker *tracker,
+ const char *file, int line);
+void _rm_runlock_debug(struct rmlock *rm, struct rm_priotracker *tracker,
+ const char *file, int line);
+
+void _rm_wlock(struct rmlock *rm);
+void _rm_wunlock(struct rmlock *rm);
+void _rm_rlock(struct rmlock *rm, struct rm_priotracker *tracker);
+void _rm_runlock(struct rmlock *rm, struct rm_priotracker *tracker);
+
+/*
+ * Public interface for lock operations.
+ */
+#ifndef LOCK_DEBUG
+#error LOCK_DEBUG not defined, include <sys/lock.h> before <sys/rmlock.h>
+#endif
+
+#if LOCK_DEBUG > 0
+#define rm_wlock(rm) _rm_wlock_debug((rm), LOCK_FILE, LOCK_LINE)
+#define rm_wunlock(rm) _rm_wunlock_debug((rm), LOCK_FILE, LOCK_LINE)
+#define rm_rlock(rm,tracker) \
+ _rm_rlock_debug((rm),(tracker), LOCK_FILE, LOCK_LINE )
+#define rm_runlock(rm,tracker) \
+ _rm_runlock_debug((rm), (tracker), LOCK_FILE, LOCK_LINE )
+#else
+#define rm_wlock(rm) _rm_wlock((rm))
+#define rm_wunlock(rm) _rm_wunlock((rm))
+#define rm_rlock(rm,tracker) _rm_rlock((rm),(tracker))
+#define rm_runlock(rm,tracker) _rm_runlock((rm), (tracker))
+#endif
+
+struct rm_args {
+ struct rmlock *ra_rm;
+ const char *ra_desc;
+};
+
+struct rm_args_flags {
+ struct rmlock *ra_rm;
+ const char *ra_desc;
+ int ra_opts;
+};
+
+#define RM_SYSINIT(name, rm, desc) \
+ static struct rm_args name##_args = { \
+ (rm), \
+ (desc), \
+ }; \
+ SYSINIT(name##_rm_sysinit, SI_SUB_LOCK, SI_ORDER_MIDDLE, \
+ rm_sysinit, &name##_args); \
+ SYSUNINIT(name##_rm_sysuninit, SI_SUB_LOCK, SI_ORDER_MIDDLE, \
+ rm_destroy, (rm))
+
+
+#define RM_SYSINIT_FLAGS(name, rm, desc, opts) \
+ static struct rm_args name##_args = { \
+ (rm), \
+ (desc), \
+ (opts), \
+ }; \
+ SYSINIT(name##_rm_sysinit, SI_SUB_LOCK, SI_ORDER_MIDDLE, \
+ rm_sysinit_flags, &name##_args); \
+ SYSUNINIT(name##_rm_sysuninit, SI_SUB_LOCK, SI_ORDER_MIDDLE, \
+ rm_destroy, (rm))
+
+#endif /* _KERNEL */
+#endif /* !_SYS_RMLOCK_HH_ */
diff --git a/rtems/freebsd/sys/rtprio.h b/rtems/freebsd/sys/rtprio.h
new file mode 100644
index 00000000..a4998a14
--- /dev/null
+++ b/rtems/freebsd/sys/rtprio.h
@@ -0,0 +1,92 @@
+/*-
+ * Copyright (c) 1994, Henrik Vestergaard Draboel
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by Henrik Vestergaard Draboel.
+ * 4. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _SYS_RTPRIO_HH_
+#define _SYS_RTPRIO_HH_
+
+#include <rtems/freebsd/sys/priority.h>
+
+/*
+ * Process realtime-priority specifications to rtprio.
+ */
+
+/* priority types. Start at 1 to catch uninitialized fields. */
+
+#define RTP_PRIO_REALTIME PRI_REALTIME /* real time process */
+#define RTP_PRIO_NORMAL PRI_TIMESHARE /* time sharing process */
+#define RTP_PRIO_IDLE PRI_IDLE /* idle process */
+
+/* RTP_PRIO_FIFO is POSIX.1B SCHED_FIFO.
+ */
+
+#define RTP_PRIO_FIFO_BIT PRI_FIFO_BIT
+#define RTP_PRIO_FIFO PRI_FIFO
+#define RTP_PRIO_BASE(P) PRI_BASE(P)
+#define RTP_PRIO_IS_REALTIME(P) PRI_IS_REALTIME(P)
+#define RTP_PRIO_NEED_RR(P) PRI_NEED_RR(P)
+
+/* priority range */
+#define RTP_PRIO_MIN 0 /* Highest priority */
+#define RTP_PRIO_MAX 31 /* Lowest priority */
+
+/*
+ * rtprio() syscall functions
+ */
+#define RTP_LOOKUP 0
+#define RTP_SET 1
+
+#ifndef LOCORE
+/*
+ * Scheduling class information.
+ */
+struct rtprio {
+ u_short type; /* scheduling class */
+ u_short prio;
+};
+
+#ifdef _KERNEL
+struct thread;
+int rtp_to_pri(struct rtprio *, struct thread *);
+void pri_to_rtp(struct thread *, struct rtprio *);
+#endif
+#endif
+
+#ifndef _KERNEL
+#include <rtems/freebsd/sys/cdefs.h>
+
+__BEGIN_DECLS
+int rtprio(int, pid_t, struct rtprio *);
+int rtprio_thread(int, lwpid_t, struct rtprio *);
+__END_DECLS
+#endif /* !_KERNEL */
+#endif /* !_SYS_RTPRIO_HH_ */
diff --git a/rtems/freebsd/sys/runq.h b/rtems/freebsd/sys/runq.h
new file mode 100644
index 00000000..78091501
--- /dev/null
+++ b/rtems/freebsd/sys/runq.h
@@ -0,0 +1,75 @@
+/*-
+ * Copyright (c) 2001 Jake Burkholder <jake@FreeBSD.org>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _RUNQ_HH_
+#define _RUNQ_HH_
+
+#include <rtems/freebsd/machine/runq.h>
+
+struct thread;
+
+/*
+ * Run queue parameters.
+ */
+
+#define RQ_NQS (64) /* Number of run queues. */
+#define RQ_PPQ (4) /* Priorities per queue. */
+
+/*
+ * Head of run queues.
+ */
+TAILQ_HEAD(rqhead, thread);
+
+/*
+ * Bit array which maintains the status of a run queue. When a queue is
+ * non-empty the bit corresponding to the queue number will be set.
+ */
+struct rqbits {
+ rqb_word_t rqb_bits[RQB_LEN];
+};
+
+/*
+ * Run queue structure. Contains an array of run queues on which processes
+ * are placed, and a structure to maintain the status of each queue.
+ */
+struct runq {
+ struct rqbits rq_status;
+ struct rqhead rq_queues[RQ_NQS];
+};
+
+void runq_add(struct runq *, struct thread *, int);
+void runq_add_pri(struct runq *, struct thread *, u_char, int);
+int runq_check(struct runq *);
+struct thread *runq_choose(struct runq *);
+struct thread *runq_choose_from(struct runq *, u_char);
+struct thread *runq_choose_fuzz(struct runq *, int);
+void runq_init(struct runq *);
+void runq_remove(struct runq *, struct thread *);
+void runq_remove_idx(struct runq *, struct thread *, u_char *);
+
+#endif
diff --git a/rtems/freebsd/sys/rwlock.h b/rtems/freebsd/sys/rwlock.h
new file mode 100644
index 00000000..cee67827
--- /dev/null
+++ b/rtems/freebsd/sys/rwlock.h
@@ -0,0 +1,251 @@
+/*-
+ * Copyright (c) 2006 John Baldwin <jhb@FreeBSD.org>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the author nor the names of any co-contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _SYS_RWLOCK_HH_
+#define _SYS_RWLOCK_HH_
+
+#include <rtems/freebsd/sys/_lock.h>
+#include <rtems/freebsd/sys/_rwlock.h>
+#include <rtems/freebsd/sys/lock_profile.h>
+#include <rtems/freebsd/sys/lockstat.h>
+
+#ifdef _KERNEL
+#include <rtems/freebsd/sys/pcpu.h>
+#include <rtems/freebsd/machine/atomic.h>
+#endif
+
+#ifdef __rtems__
+#define RWLOCK_NOINLINE 1
+#endif /* __rtems__ */
+
+/*
+ * The rw_lock field consists of several fields. The low bit indicates
+ * if the lock is locked with a read (shared) or write (exclusive) lock.
+ * A value of 0 indicates a write lock, and a value of 1 indicates a read
+ * lock. Bit 1 is a boolean indicating if there are any threads waiting
+ * for a read lock. Bit 2 is a boolean indicating if there are any threads
+ * waiting for a write lock. The rest of the variable's definition is
+ * dependent on the value of the first bit. For a write lock, it is a
+ * pointer to the thread holding the lock, similar to the mtx_lock field of
+ * mutexes. For read locks, it is a count of read locks that are held.
+ *
+ * When the lock is not locked by any thread, it is encoded as a read lock
+ * with zero waiters.
+ */
+
+#define RW_LOCK_READ 0x01
+#define RW_LOCK_READ_WAITERS 0x02
+#define RW_LOCK_WRITE_WAITERS 0x04
+#define RW_LOCK_WRITE_SPINNER 0x08
+#define RW_LOCK_FLAGMASK \
+ (RW_LOCK_READ | RW_LOCK_READ_WAITERS | RW_LOCK_WRITE_WAITERS | \
+ RW_LOCK_WRITE_SPINNER)
+#define RW_LOCK_WAITERS (RW_LOCK_READ_WAITERS | RW_LOCK_WRITE_WAITERS)
+
+#define RW_OWNER(x) ((x) & ~RW_LOCK_FLAGMASK)
+#define RW_READERS_SHIFT 4
+#define RW_READERS(x) (RW_OWNER((x)) >> RW_READERS_SHIFT)
+#define RW_READERS_LOCK(x) ((x) << RW_READERS_SHIFT | RW_LOCK_READ)
+#define RW_ONE_READER (1 << RW_READERS_SHIFT)
+
+#define RW_UNLOCKED RW_READERS_LOCK(0)
+#define RW_DESTROYED (RW_LOCK_READ_WAITERS | RW_LOCK_WRITE_WAITERS)
+
+#ifdef _KERNEL
+
+#define rw_recurse lock_object.lo_data
+
+/* Very simple operations on rw_lock. */
+
+/* Try to obtain a write lock once. */
+#define _rw_write_lock(rw, tid) \
+ atomic_cmpset_acq_ptr(&(rw)->rw_lock, RW_UNLOCKED, (tid))
+
+/* Release a write lock quickly if there are no waiters. */
+#define _rw_write_unlock(rw, tid) \
+ atomic_cmpset_rel_ptr(&(rw)->rw_lock, (tid), RW_UNLOCKED)
+
+/*
+ * Full lock operations that are suitable to be inlined in non-debug
+ * kernels. If the lock cannot be acquired or released trivially then
+ * the work is deferred to another function.
+ */
+
+#ifndef __rtems__
+/* Acquire a write lock. */
+#define __rw_wlock(rw, tid, file, line) do { \
+ uintptr_t _tid = (uintptr_t)(tid); \
+ \
+ if (!_rw_write_lock((rw), _tid)) \
+ _rw_wlock_hard((rw), _tid, (file), (line)); \
+ else \
+ LOCKSTAT_PROFILE_OBTAIN_LOCK_SUCCESS(LS_RW_WLOCK_ACQUIRE, \
+ rw, 0, 0, (file), (line)); \
+} while (0)
+
+/* Release a write lock. */
+#define __rw_wunlock(rw, tid, file, line) do { \
+ uintptr_t _tid = (uintptr_t)(tid); \
+ \
+ if ((rw)->rw_recurse) \
+ (rw)->rw_recurse--; \
+ else if (!_rw_write_unlock((rw), _tid)) \
+ _rw_wunlock_hard((rw), _tid, (file), (line)); \
+} while (0)
+#endif /* __rtems__ */
+
+/*
+ * Function prototypes. Routines that start with _ are not part of the
+ * external API and should not be called directly. Wrapper macros should
+ * be used instead.
+ */
+
+#define rw_init(rw, name) rw_init_flags((rw), (name), 0)
+void rw_init_flags(struct rwlock *rw, const char *name, int opts);
+void rw_destroy(struct rwlock *rw);
+void rw_sysinit(void *arg);
+void rw_sysinit_flags(void *arg);
+int rw_wowned(struct rwlock *rw);
+void _rw_wlock(struct rwlock *rw, const char *file, int line);
+int _rw_try_wlock(struct rwlock *rw, const char *file, int line);
+void _rw_wunlock(struct rwlock *rw, const char *file, int line);
+void _rw_rlock(struct rwlock *rw, const char *file, int line);
+int _rw_try_rlock(struct rwlock *rw, const char *file, int line);
+void _rw_runlock(struct rwlock *rw, const char *file, int line);
+void _rw_wlock_hard(struct rwlock *rw, uintptr_t tid, const char *file,
+ int line);
+void _rw_wunlock_hard(struct rwlock *rw, uintptr_t tid, const char *file,
+ int line);
+int _rw_try_upgrade(struct rwlock *rw, const char *file, int line);
+void _rw_downgrade(struct rwlock *rw, const char *file, int line);
+#if defined(INVARIANTS) || defined(INVARIANT_SUPPORT)
+void _rw_assert(struct rwlock *rw, int what, const char *file, int line);
+#endif
+
+/*
+ * Public interface for lock operations.
+ */
+
+#ifndef LOCK_DEBUG
+#error LOCK_DEBUG not defined, include <sys/lock.h> before <sys/rwlock.h>
+#endif
+#if LOCK_DEBUG > 0 || defined(RWLOCK_NOINLINE)
+#define rw_wlock(rw) _rw_wlock((rw), LOCK_FILE, LOCK_LINE)
+#define rw_wunlock(rw) _rw_wunlock((rw), LOCK_FILE, LOCK_LINE)
+#else
+#define rw_wlock(rw) \
+ __rw_wlock((rw), curthread, LOCK_FILE, LOCK_LINE)
+#define rw_wunlock(rw) \
+ __rw_wunlock((rw), curthread, LOCK_FILE, LOCK_LINE)
+#endif
+#define rw_rlock(rw) _rw_rlock((rw), LOCK_FILE, LOCK_LINE)
+#define rw_runlock(rw) _rw_runlock((rw), LOCK_FILE, LOCK_LINE)
+#define rw_try_rlock(rw) _rw_try_rlock((rw), LOCK_FILE, LOCK_LINE)
+#define rw_try_upgrade(rw) _rw_try_upgrade((rw), LOCK_FILE, LOCK_LINE)
+#define rw_try_wlock(rw) _rw_try_wlock((rw), LOCK_FILE, LOCK_LINE)
+#define rw_downgrade(rw) _rw_downgrade((rw), LOCK_FILE, LOCK_LINE)
+#define rw_unlock(rw) do { \
+ if (rw_wowned(rw)) \
+ rw_wunlock(rw); \
+ else \
+ rw_runlock(rw); \
+} while (0)
+#define rw_sleep(chan, rw, pri, wmesg, timo) \
+ _sleep((chan), &(rw)->lock_object, (pri), (wmesg), (timo))
+
+#define rw_initialized(rw) lock_initalized(&(rw)->lock_object)
+
+struct rw_args {
+ struct rwlock *ra_rw;
+ const char *ra_desc;
+};
+
+struct rw_args_flags {
+ struct rwlock *ra_rw;
+ const char *ra_desc;
+ int ra_flags;
+};
+
+#define RW_SYSINIT(name, rw, desc) \
+ static struct rw_args name##_args = { \
+ (rw), \
+ (desc), \
+ }; \
+ SYSINIT(name##_rw_sysinit, SI_SUB_LOCK, SI_ORDER_MIDDLE, \
+ rw_sysinit, &name##_args); \
+ SYSUNINIT(name##_rw_sysuninit, SI_SUB_LOCK, SI_ORDER_MIDDLE, \
+ rw_destroy, (rw))
+
+
+#define RW_SYSINIT_FLAGS(name, rw, desc, flags) \
+ static struct rw_args_flags name##_args = { \
+ (rw), \
+ (desc), \
+ (flags), \
+ }; \
+ SYSINIT(name##_rw_sysinit, SI_SUB_LOCK, SI_ORDER_MIDDLE, \
+ rw_sysinit_flags, &name##_args); \
+ SYSUNINIT(name##_rw_sysuninit, SI_SUB_LOCK, SI_ORDER_MIDDLE, \
+ rw_destroy, (rw))
+
+/*
+ * Options passed to rw_init_flags().
+ */
+#define RW_DUPOK 0x01
+#define RW_NOPROFILE 0x02
+#define RW_NOWITNESS 0x04
+#define RW_QUIET 0x08
+#define RW_RECURSE 0x10
+
+/*
+ * The INVARIANTS-enabled rw_assert() functionality.
+ *
+ * The constants need to be defined for INVARIANT_SUPPORT infrastructure
+ * support as _rw_assert() itself uses them and the latter implies that
+ * _rw_assert() must build.
+ */
+#if defined(INVARIANTS) || defined(INVARIANT_SUPPORT)
+#define RA_LOCKED LA_LOCKED
+#define RA_RLOCKED LA_SLOCKED
+#define RA_WLOCKED LA_XLOCKED
+#define RA_UNLOCKED LA_UNLOCKED
+#define RA_RECURSED LA_RECURSED
+#define RA_NOTRECURSED LA_NOTRECURSED
+#endif
+
+#ifdef INVARIANTS
+#define rw_assert(rw, what) _rw_assert((rw), (what), LOCK_FILE, LOCK_LINE)
+#else
+#define rw_assert(rw, what)
+#endif
+
+#endif /* _KERNEL */
+#endif /* !_SYS_RWLOCK_HH_ */
diff --git a/rtems/freebsd/sys/sbuf.h b/rtems/freebsd/sys/sbuf.h
new file mode 100644
index 00000000..26c5f734
--- /dev/null
+++ b/rtems/freebsd/sys/sbuf.h
@@ -0,0 +1,88 @@
+/*-
+ * Copyright (c) 2000-2008 Poul-Henning Kamp
+ * Copyright (c) 2000-2008 Dag-Erling Coïdan Smørgrav
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer
+ * in this position and unchanged.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _SYS_SBUF_HH_
+#define _SYS_SBUF_HH_
+
+#include <rtems/freebsd/sys/_types.h>
+
+/*
+ * Structure definition
+ */
+struct sbuf {
+ char *s_buf; /* storage buffer */
+ void *s_unused; /* binary compatibility. */
+ int s_size; /* size of storage buffer */
+ int s_len; /* current length of string */
+#define SBUF_FIXEDLEN 0x00000000 /* fixed length buffer (default) */
+#define SBUF_AUTOEXTEND 0x00000001 /* automatically extend buffer */
+#define SBUF_USRFLAGMSK 0x0000ffff /* mask of flags the user may specify */
+#define SBUF_DYNAMIC 0x00010000 /* s_buf must be freed */
+#define SBUF_FINISHED 0x00020000 /* set by sbuf_finish() */
+#define SBUF_OVERFLOWED 0x00040000 /* sbuf overflowed */
+#define SBUF_DYNSTRUCT 0x00080000 /* sbuf must be freed */
+ int s_flags; /* flags */
+};
+
+__BEGIN_DECLS
+/*
+ * API functions
+ */
+struct sbuf *sbuf_new(struct sbuf *, char *, int, int);
+#define sbuf_new_auto() \
+ sbuf_new(NULL, NULL, 0, SBUF_AUTOEXTEND)
+void sbuf_clear(struct sbuf *);
+int sbuf_setpos(struct sbuf *, int);
+int sbuf_bcat(struct sbuf *, const void *, size_t);
+int sbuf_bcpy(struct sbuf *, const void *, size_t);
+int sbuf_cat(struct sbuf *, const char *);
+int sbuf_cpy(struct sbuf *, const char *);
+int sbuf_printf(struct sbuf *, const char *, ...)
+ __printflike(2, 3);
+int sbuf_vprintf(struct sbuf *, const char *, __va_list)
+ __printflike(2, 0);
+int sbuf_putc(struct sbuf *, int);
+int sbuf_trim(struct sbuf *);
+int sbuf_overflowed(struct sbuf *);
+void sbuf_finish(struct sbuf *);
+char *sbuf_data(struct sbuf *);
+int sbuf_len(struct sbuf *);
+int sbuf_done(struct sbuf *);
+void sbuf_delete(struct sbuf *);
+
+#ifdef _KERNEL
+struct uio;
+struct sbuf *sbuf_uionew(struct sbuf *, struct uio *, int *);
+int sbuf_bcopyin(struct sbuf *, const void *, size_t);
+int sbuf_copyin(struct sbuf *, const void *, size_t);
+#endif
+__END_DECLS
+
+#endif
diff --git a/rtems/freebsd/sys/sched.h b/rtems/freebsd/sys/sched.h
new file mode 100644
index 00000000..936ffd88
--- /dev/null
+++ b/rtems/freebsd/sys/sched.h
@@ -0,0 +1 @@
+/* EMPTY */
diff --git a/rtems/freebsd/sys/sdt.h b/rtems/freebsd/sys/sdt.h
new file mode 100644
index 00000000..eeae665c
--- /dev/null
+++ b/rtems/freebsd/sys/sdt.h
@@ -0,0 +1,232 @@
+/*-
+ * Copyright 2006-2008 John Birrell <jb@FreeBSD.org>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ *
+ * Statically Defined Tracing (SDT) definitions.
+ *
+ */
+
+#ifndef _SYS_SDT_H
+#define _SYS_SDT_H
+
+/* Stub these for the time being. */
+#define DTRACE_PROBE(name)
+#define DTRACE_PROBE1(name, type1, arg1)
+#define DTRACE_PROBE2(name, type1, arg1, type2, arg2)
+#define DTRACE_PROBE3(name, type1, arg1, type2, arg2, type3, arg3)
+#define DTRACE_PROBE4(name, type1, arg1, type2, arg2, type3, arg3, type4, arg4)
+
+#ifndef _KERNEL
+
+/* The promise of things to come. Worlds to explore. People to meet. Things to do. */
+
+#else
+
+#ifndef KDTRACE_HOOKS
+
+#define SDT_PROVIDER_DEFINE(prov)
+#define SDT_PROVIDER_DECLARE(prov)
+#define SDT_PROBE_DEFINE(prov, mod, func, name)
+#define SDT_PROBE_DECLARE(prov, mod, func, name)
+#define SDT_PROBE(prov, mod, func, name, arg0, arg1, arg2, arg3, arg4)
+#define SDT_PROBE_ARGTYPE(prov, mod, func, name, num, type)
+
+#define SDT_PROBE_DEFINE1(prov, mod, func, name, arg0)
+#define SDT_PROBE_DEFINE2(prov, mod, func, name, arg0, arg1)
+#define SDT_PROBE_DEFINE3(prov, mod, func, name, arg0, arg1, arg2)
+#define SDT_PROBE_DEFINE4(prov, mod, func, name, arg0, arg1, arg2, arg3)
+#define SDT_PROBE_DEFINE5(prov, mod, func, name, arg0, arg1, arg2, arg3, arg4)
+
+#define SDT_PROBE1(prov, mod, func, name, arg0)
+#define SDT_PROBE2(prov, mod, func, name, arg0, arg1)
+#define SDT_PROBE3(prov, mod, func, name, arg0, arg1, arg2)
+#define SDT_PROBE4(prov, mod, func, name, arg0, arg1, arg2, arg3)
+#define SDT_PROBE5(prov, mod, func, name, arg0, arg1, arg2, arg3, arg4)
+
+#else
+
+/*
+ * This type definition must match that of dtrace_probe. It is defined this
+ * way to avoid having to rely on CDDL code.
+ */
+typedef void (*sdt_probe_func_t)(u_int32_t, uintptr_t arg0, uintptr_t arg1,
+ uintptr_t arg2, uintptr_t arg3, uintptr_t arg4);
+
+/*
+ * The hook for the probe function. See kern_sdt.c which defaults this to
+ * it's own stub. The 'sdt' provider will set it to dtrace_probe when it
+ * loads.
+ */
+extern sdt_probe_func_t sdt_probe_func;
+
+typedef enum {
+ SDT_UNINIT = 1,
+ SDT_INIT,
+} sdt_state_t;
+
+struct sdt_probe;
+struct sdt_provider;
+
+struct sdt_argtype {
+ int ndx; /* Argument index. */
+ const char *type; /* Argument type string. */
+ TAILQ_ENTRY(sdt_argtype)
+ argtype_entry; /* Argument type list entry. */
+ struct sdt_probe
+ *probe; /* Ptr to the probe structure. */
+};
+
+struct sdt_probe {
+ int version; /* Set to sizeof(struct sdt_ref). */
+ sdt_state_t state;
+ struct sdt_provider
+ *prov; /* Ptr to the provider structure. */
+ TAILQ_ENTRY(sdt_probe)
+ probe_entry; /* SDT probe list entry. */
+ TAILQ_HEAD(argtype_list_head, sdt_argtype) argtype_list;
+ const char *mod;
+ const char *func;
+ const char *name;
+ id_t id; /* DTrace probe ID. */
+ int n_args; /* Number of arguments. */
+};
+
+struct sdt_provider {
+ const char *name; /* Provider name. */
+ TAILQ_ENTRY(sdt_provider)
+ prov_entry; /* SDT provider list entry. */
+ TAILQ_HEAD(probe_list_head, sdt_probe) probe_list;
+ uintptr_t id; /* DTrace provider ID. */
+};
+
+#define SDT_PROVIDER_DEFINE(prov) \
+ struct sdt_provider sdt_provider_##prov[1] = { \
+ { #prov, { NULL, NULL }, { NULL, NULL } } \
+ }; \
+ SYSINIT(sdt_provider_##prov##_init, SI_SUB_KDTRACE, \
+ SI_ORDER_SECOND, sdt_provider_register, \
+ sdt_provider_##prov ); \
+ SYSUNINIT(sdt_provider_##prov##_uninit, SI_SUB_KDTRACE, \
+ SI_ORDER_SECOND, sdt_provider_deregister, \
+ sdt_provider_##prov )
+
+#define SDT_PROVIDER_DECLARE(prov) \
+ extern struct sdt_provider sdt_provider_##prov[1]
+
+#define SDT_PROBE_DEFINE(prov, mod, func, name) \
+ struct sdt_probe sdt_##prov##_##mod##_##func##_##name[1] = { \
+ { sizeof(struct sdt_probe), 0, sdt_provider_##prov, \
+ { NULL, NULL }, { NULL, NULL }, #mod, #func, #name, 0, 0 } \
+ }; \
+ SYSINIT(sdt_##prov##_##mod##_##func##_##name##_init, SI_SUB_KDTRACE, \
+ SI_ORDER_SECOND + 1, sdt_probe_register, \
+ sdt_##prov##_##mod##_##func##_##name ); \
+ SYSUNINIT(sdt_##prov##_##mod##_##func##_##name##_uninit, \
+ SI_SUB_KDTRACE, SI_ORDER_SECOND + 1, sdt_probe_deregister, \
+ sdt_##prov##_##mod##_##func##_##name )
+
+#define SDT_PROBE_DECLARE(prov, mod, func, name) \
+ extern struct sdt_probe sdt_##prov##_##mod##_##func##_##name[1]
+
+#define SDT_PROBE(prov, mod, func, name, arg0, arg1, arg2, arg3, arg4) \
+ if (sdt_##prov##_##mod##_##func##_##name->id) \
+ (*sdt_probe_func)(sdt_##prov##_##mod##_##func##_##name->id, \
+ (uintptr_t) arg0, (uintptr_t) arg1, (uintptr_t) arg2, \
+ (uintptr_t) arg3, (uintptr_t) arg4)
+
+#define SDT_PROBE_ARGTYPE(prov, mod, func, name, num, type) \
+ struct sdt_argtype sdt_##prov##_##mod##_##func##_##name##num[1] \
+ = { { num, type, { NULL, NULL }, \
+ sdt_##prov##_##mod##_##func##_##name } \
+ }; \
+ SYSINIT(sdt_##prov##_##mod##_##func##_##name##num##_init, \
+ SI_SUB_KDTRACE, SI_ORDER_SECOND + 2, sdt_argtype_register, \
+ sdt_##prov##_##mod##_##func##_##name##num ); \
+ SYSUNINIT(sdt_##prov##_##mod##_##func##_##name##num##_uninit, \
+ SI_SUB_KDTRACE, SI_ORDER_SECOND + 2, sdt_argtype_deregister, \
+ sdt_##prov##_##mod##_##func##_##name##num )
+
+#define SDT_PROBE_DEFINE1(prov, mod, func, name, arg0) \
+ SDT_PROBE_DEFINE(prov, mod, func, name); \
+ SDT_PROBE_ARGTYPE(prov, mod, func, name, 0, arg0)
+
+#define SDT_PROBE_DEFINE2(prov, mod, func, name, arg0, arg1) \
+ SDT_PROBE_DEFINE(prov, mod, func, name); \
+ SDT_PROBE_ARGTYPE(prov, mod, func, name, 0, arg0); \
+ SDT_PROBE_ARGTYPE(prov, mod, func, name, 1, arg1)
+
+#define SDT_PROBE_DEFINE3(prov, mod, func, name, arg0, arg1, arg2) \
+ SDT_PROBE_DEFINE(prov, mod, func, name); \
+ SDT_PROBE_ARGTYPE(prov, mod, func, name, 0, arg0); \
+ SDT_PROBE_ARGTYPE(prov, mod, func, name, 1, arg1); \
+ SDT_PROBE_ARGTYPE(prov, mod, func, name, 2, arg2)
+
+#define SDT_PROBE_DEFINE4(prov, mod, func, name, arg0, arg1, arg2, arg3) \
+ SDT_PROBE_DEFINE(prov, mod, func, name); \
+ SDT_PROBE_ARGTYPE(prov, mod, func, name, 0, arg0); \
+ SDT_PROBE_ARGTYPE(prov, mod, func, name, 1, arg1); \
+ SDT_PROBE_ARGTYPE(prov, mod, func, name, 2, arg2); \
+ SDT_PROBE_ARGTYPE(prov, mod, func, name, 3, arg3)
+
+#define SDT_PROBE_DEFINE5(prov, mod, func, name, arg0, arg1, arg2, arg3, arg4) \
+ SDT_PROBE_DEFINE(prov, mod, func, name); \
+ SDT_PROBE_ARGTYPE(prov, mod, func, name, 0, arg0); \
+ SDT_PROBE_ARGTYPE(prov, mod, func, name, 1, arg1); \
+ SDT_PROBE_ARGTYPE(prov, mod, func, name, 2, arg2); \
+ SDT_PROBE_ARGTYPE(prov, mod, func, name, 3, arg3); \
+ SDT_PROBE_ARGTYPE(prov, mod, func, name, 4, arg4)
+
+#define SDT_PROBE1(prov, mod, func, name, arg0) \
+ SDT_PROBE(prov, mod, func, name, arg0, 0, 0, 0, 0)
+#define SDT_PROBE2(prov, mod, func, name, arg0, arg1) \
+ SDT_PROBE(prov, mod, func, name, arg0, arg1, 0, 0, 0)
+#define SDT_PROBE3(prov, mod, func, name, arg0, arg1, arg2) \
+ SDT_PROBE(prov, mod, func, name, arg0, arg1, arg2, 0, 0)
+#define SDT_PROBE4(prov, mod, func, name, arg0, arg1, arg2, arg3) \
+ SDT_PROBE(prov, mod, func, name, arg0, arg1, arg2, arg3, 0)
+#define SDT_PROBE5(prov, mod, func, name, arg0, arg1, arg2, arg3, arg4) \
+ SDT_PROBE(prov, mod, func, name, arg0, arg1, arg2, arg3, arg4)
+
+typedef int (*sdt_argtype_listall_func_t)(struct sdt_argtype *, void *);
+typedef int (*sdt_probe_listall_func_t)(struct sdt_probe *, void *);
+typedef int (*sdt_provider_listall_func_t)(struct sdt_provider *, void *);
+
+void sdt_argtype_deregister(void *);
+void sdt_argtype_register(void *);
+void sdt_probe_deregister(void *);
+void sdt_probe_register(void *);
+void sdt_provider_deregister(void *);
+void sdt_provider_register(void *);
+void sdt_probe_stub(u_int32_t, uintptr_t arg0, uintptr_t arg1, uintptr_t arg2,
+ uintptr_t arg3, uintptr_t arg4);
+int sdt_argtype_listall(struct sdt_probe *, sdt_argtype_listall_func_t, void *);
+int sdt_probe_listall(struct sdt_provider *, sdt_probe_listall_func_t, void *);
+int sdt_provider_listall(sdt_provider_listall_func_t,void *);
+
+#endif /* KDTRACE_HOOKS */
+
+#endif /* _KERNEL */
+
+#endif /* _SYS_SDT_H */
diff --git a/rtems/freebsd/sys/select.h b/rtems/freebsd/sys/select.h
new file mode 100644
index 00000000..936ffd88
--- /dev/null
+++ b/rtems/freebsd/sys/select.h
@@ -0,0 +1 @@
+/* EMPTY */
diff --git a/rtems/freebsd/sys/selinfo.h b/rtems/freebsd/sys/selinfo.h
new file mode 100644
index 00000000..dc13ef00
--- /dev/null
+++ b/rtems/freebsd/sys/selinfo.h
@@ -0,0 +1,64 @@
+/*-
+ * Copyright (c) 1992, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * @(#)select.h 8.2 (Berkeley) 1/4/94
+ * $FreeBSD$
+ */
+
+#ifndef _SYS_SELINFO_HH_
+#define _SYS_SELINFO_HH_
+
+#include <rtems/freebsd/sys/event.h> /* for struct klist */
+
+struct selfd;
+TAILQ_HEAD(selfdlist, selfd);
+
+/*
+ * Used to maintain information about processes that wish to be
+ * notified when I/O becomes possible.
+ */
+struct selinfo {
+ struct selfdlist si_tdlist; /* List of sleeping threads. */
+ struct knlist si_note; /* kernel note list */
+ struct mtx *si_mtx; /* Lock for tdlist. */
+#ifdef __rtems__
+ pid_t si_pid; /* process to be notified */
+ short si_flags; /* see below */
+#endif
+};
+
+#define SEL_WAITING(si) (!TAILQ_EMPTY(&(si)->si_tdlist))
+
+#ifdef _KERNEL
+void selrecord(struct thread *selector, struct selinfo *sip);
+void selwakeup(struct selinfo *sip);
+void selwakeuppri(struct selinfo *sip, int pri);
+void seltdfini(struct thread *td);
+#endif
+
+#endif /* !_SYS_SELINFO_HH_ */
diff --git a/rtems/freebsd/sys/sf_buf.h b/rtems/freebsd/sys/sf_buf.h
new file mode 100644
index 00000000..2ad33388
--- /dev/null
+++ b/rtems/freebsd/sys/sf_buf.h
@@ -0,0 +1,56 @@
+/*-
+ * Copyright (c) 2003-2004 Alan L. Cox <alc@cs.rice.edu>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _SYS_SF_BUF_HH_
+#define _SYS_SF_BUF_HH_
+
+#include <rtems/freebsd/machine/sf_buf.h>
+
+/*
+ * Options to sf_buf_alloc() are specified through its flags argument. This
+ * argument's value should be the result of a bitwise or'ing of one or more
+ * of the following values.
+ */
+#define SFB_CATCH 1 /* Check signals if the allocation
+ sleeps. */
+#define SFB_CPUPRIVATE 2 /* Create a CPU private mapping. */
+#define SFB_DEFAULT 0
+#define SFB_NOWAIT 4 /* Return NULL if all bufs are used. */
+
+struct vm_page;
+
+extern int nsfbufs; /* Number of sendfile(2) bufs alloced */
+extern int nsfbufspeak; /* Peak of nsfbufsused */
+extern int nsfbufsused; /* Number of sendfile(2) bufs in use */
+
+struct sf_buf *
+ sf_buf_alloc(struct vm_page *m, int flags);
+void sf_buf_free(struct sf_buf *sf);
+void sf_buf_mext(void *addr, void *args);
+
+#endif /* !_SYS_SF_BUF_HH_ */
diff --git a/rtems/freebsd/sys/sigio.h b/rtems/freebsd/sys/sigio.h
new file mode 100644
index 00000000..dc29d948
--- /dev/null
+++ b/rtems/freebsd/sys/sigio.h
@@ -0,0 +1,67 @@
+/*-
+ * Copyright (c) 1990, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * @(#)filedesc.h 8.1 (Berkeley) 6/2/93
+ * $FreeBSD$
+ */
+
+#ifndef _SYS_SIGIO_HH_
+#define _SYS_SIGIO_HH_
+
+/*
+ * This structure holds the information needed to send a SIGIO or
+ * a SIGURG signal to a process or process group when new data arrives
+ * on a device or socket. The structure is placed on an SLIST belonging
+ * to the proc or pgrp so that the entire list may be revoked when the
+ * process exits or the process group disappears.
+ *
+ * (c) const
+ * (pg) locked by either the process or process group lock
+ */
+struct sigio {
+ union {
+ struct proc *siu_proc; /* (c) process to receive SIGIO/SIGURG */
+ struct pgrp *siu_pgrp; /* (c) process group to receive ... */
+ } sio_u;
+ SLIST_ENTRY(sigio) sio_pgsigio; /* (pg) sigio's for process or group */
+ struct sigio **sio_myref; /* (c) location of the pointer that holds
+ * the reference to this structure */
+ struct ucred *sio_ucred; /* (c) current credentials */
+ pid_t sio_pgid; /* (c) pgid for signals */
+};
+#define sio_proc sio_u.siu_proc
+#define sio_pgrp sio_u.siu_pgrp
+
+SLIST_HEAD(sigiolst, sigio);
+
+pid_t fgetown(struct sigio **sigiop);
+int fsetown(pid_t pgid, struct sigio **sigiop);
+void funsetown(struct sigio **sigiop);
+void funsetownlst(struct sigiolst *sigiolst);
+
+#endif /* _SYS_SIGIO_HH_ */
diff --git a/rtems/freebsd/sys/signal.h b/rtems/freebsd/sys/signal.h
new file mode 100644
index 00000000..8a09ca36
--- /dev/null
+++ b/rtems/freebsd/sys/signal.h
@@ -0,0 +1,439 @@
+/*-
+ * Copyright (c) 1982, 1986, 1989, 1991, 1993
+ * The Regents of the University of California. All rights reserved.
+ * (c) UNIX System Laboratories, Inc.
+ * All or some portions of this file are derived from material licensed
+ * to the University of California by American Telephone and Telegraph
+ * Co. or Unix System Laboratories, Inc. and are reproduced herein with
+ * the permission of UNIX System Laboratories, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * @(#)signal.h 8.4 (Berkeley) 5/4/95
+ * $FreeBSD$
+ */
+
+#ifndef _SYS_SIGNAL_HH_
+#define _SYS_SIGNAL_HH_
+
+#include <rtems/freebsd/sys/cdefs.h>
+#include <rtems/freebsd/sys/_types.h>
+#include <rtems/freebsd/sys/_sigset.h>
+
+#include <rtems/freebsd/machine/_limits.h> /* __MINSIGSTKSZ */
+#include <rtems/freebsd/machine/signal.h> /* sig_atomic_t; trap codes; sigcontext */
+
+/*
+ * System defined signals.
+ */
+#if __POSIX_VISIBLE || __XSI_VISIBLE
+#define SIGHUP 1 /* hangup */
+#endif
+#define SIGINT 2 /* interrupt */
+#if __POSIX_VISIBLE || __XSI_VISIBLE
+#define SIGQUIT 3 /* quit */
+#endif
+#define SIGILL 4 /* illegal instr. (not reset when caught) */
+#if __XSI_VISIBLE
+#define SIGTRAP 5 /* trace trap (not reset when caught) */
+#endif
+#define SIGABRT 6 /* abort() */
+#if __BSD_VISIBLE
+#define SIGIOT SIGABRT /* compatibility */
+#define SIGEMT 7 /* EMT instruction */
+#endif
+#define SIGFPE 8 /* floating point exception */
+#if __POSIX_VISIBLE || __XSI_VISIBLE
+#define SIGKILL 9 /* kill (cannot be caught or ignored) */
+#endif
+#if __POSIX_VISIBLE >= 200112 || __XSI_VISIBLE
+#define SIGBUS 10 /* bus error */
+#endif
+#define SIGSEGV 11 /* segmentation violation */
+#if __POSIX_VISIBLE >= 200112 || __XSI_VISIBLE
+#define SIGSYS 12 /* non-existent system call invoked */
+#endif
+#if __POSIX_VISIBLE || __XSI_VISIBLE
+#define SIGPIPE 13 /* write on a pipe with no one to read it */
+#define SIGALRM 14 /* alarm clock */
+#endif
+#define SIGTERM 15 /* software termination signal from kill */
+#if __POSIX_VISIBLE >= 200112 || __XSI_VISIBLE
+#define SIGURG 16 /* urgent condition on IO channel */
+#endif
+#if __POSIX_VISIBLE || __XSI_VISIBLE
+#define SIGSTOP 17 /* sendable stop signal not from tty */
+#define SIGTSTP 18 /* stop signal from tty */
+#define SIGCONT 19 /* continue a stopped process */
+#define SIGCHLD 20 /* to parent on child stop or exit */
+#define SIGTTIN 21 /* to readers pgrp upon background tty read */
+#define SIGTTOU 22 /* like TTIN if (tp->t_local&LTOSTOP) */
+#endif
+#if __BSD_VISIBLE
+#define SIGIO 23 /* input/output possible signal */
+#endif
+#if __XSI_VISIBLE
+#define SIGXCPU 24 /* exceeded CPU time limit */
+#define SIGXFSZ 25 /* exceeded file size limit */
+#define SIGVTALRM 26 /* virtual time alarm */
+#define SIGPROF 27 /* profiling time alarm */
+#endif
+#if __BSD_VISIBLE
+#define SIGWINCH 28 /* window size changes */
+#define SIGINFO 29 /* information request */
+#endif
+#if __POSIX_VISIBLE || __XSI_VISIBLE
+#define SIGUSR1 30 /* user defined signal 1 */
+#define SIGUSR2 31 /* user defined signal 2 */
+#endif
+#if __BSD_VISIBLE
+#define SIGTHR 32 /* reserved by thread library. */
+#define SIGLWP SIGTHR
+#endif
+
+#define SIGRTMIN 65
+#define SIGRTMAX 126
+
+#define SIG_DFL ((__sighandler_t *)0)
+#define SIG_IGN ((__sighandler_t *)1)
+#define SIG_ERR ((__sighandler_t *)-1)
+/* #define SIG_CATCH ((__sighandler_t *)2) See signalvar.h */
+#define SIG_HOLD ((__sighandler_t *)3)
+
+/*-
+ * Type of a signal handling function.
+ *
+ * Language spec sez signal handlers take exactly one arg, even though we
+ * actually supply three. Ugh!
+ *
+ * We don't try to hide the difference by leaving out the args because
+ * that would cause warnings about conformant programs. Nonconformant
+ * programs can avoid the warnings by casting to (__sighandler_t *) or
+ * sig_t before calling signal() or assigning to sa_handler or sv_handler.
+ *
+ * The kernel should reverse the cast before calling the function. It
+ * has no way to do this, but on most machines 1-arg and 3-arg functions
+ * have the same calling protocol so there is no problem in practice.
+ * A bit in sa_flags could be used to specify the number of args.
+ */
+typedef void __sighandler_t(int);
+
+#if __POSIX_VISIBLE || __XSI_VISIBLE
+#ifndef _SIGSET_T_DECLARED
+#define _SIGSET_T_DECLARED
+typedef __sigset_t sigset_t;
+#endif
+#endif
+
+#if __POSIX_VISIBLE >= 199309 || __XSI_VISIBLE >= 500
+union sigval {
+ /* Members as suggested by Annex C of POSIX 1003.1b. */
+ int sival_int;
+ void *sival_ptr;
+ /* 6.0 compatibility */
+ int sigval_int;
+ void *sigval_ptr;
+};
+#endif
+
+#if __POSIX_VISIBLE >= 199309
+struct sigevent {
+ int sigev_notify; /* Notification type */
+ int sigev_signo; /* Signal number */
+ union sigval sigev_value; /* Signal value */
+ union {
+ __lwpid_t _threadid;
+ struct {
+ void (*_function)(union sigval);
+ void *_attribute; /* pthread_attr_t * */
+ } _sigev_thread;
+ long __spare__[8];
+ } _sigev_un;
+};
+
+#if __BSD_VISIBLE
+#define sigev_notify_kqueue sigev_signo
+#define sigev_notify_thread_id _sigev_un._threadid
+#endif
+#define sigev_notify_function _sigev_un._sigev_thread._function
+#define sigev_notify_attributes _sigev_un._sigev_thread._attribute
+
+#define SIGEV_NONE 0 /* No async notification. */
+#define SIGEV_SIGNAL 1 /* Generate a queued signal. */
+#define SIGEV_THREAD 2 /* Call back from another pthread. */
+#if __BSD_VISIBLE
+#define SIGEV_KEVENT 3 /* Generate a kevent. */
+#define SIGEV_THREAD_ID 4 /* Send signal to a kernel thread. */
+#endif
+#endif /* __POSIX_VISIBLE >= 199309 */
+
+#if __POSIX_VISIBLE >= 199309 || __XSI_VISIBLE
+typedef struct __siginfo {
+ int si_signo; /* signal number */
+ int si_errno; /* errno association */
+ /*
+ * Cause of signal, one of the SI_ macros or signal-specific
+ * values, i.e. one of the FPE_... values for SIGFPE. This
+ * value is equivalent to the second argument to an old-style
+ * FreeBSD signal handler.
+ */
+ int si_code; /* signal code */
+ __pid_t si_pid; /* sending process */
+ __uid_t si_uid; /* sender's ruid */
+ int si_status; /* exit value */
+ void *si_addr; /* faulting instruction */
+ union sigval si_value; /* signal value */
+ union {
+ struct {
+ int _trapno;/* machine specific trap code */
+ } _fault;
+ struct {
+ int _timerid;
+ int _overrun;
+ } _timer;
+ struct {
+ int _mqd;
+ } _mesgq;
+ struct {
+ long _band; /* band event for SIGPOLL */
+ } _poll; /* was this ever used ? */
+ struct {
+ long __spare1__;
+ int __spare2__[7];
+ } __spare__;
+ } _reason;
+} siginfo_t;
+
+#define si_trapno _reason._fault._trapno
+#define si_timerid _reason._timer._timerid
+#define si_overrun _reason._timer._overrun
+#define si_mqd _reason._mesgq._mqd
+#define si_band _reason._poll._band
+
+/** si_code **/
+/* codes for SIGILL */
+#define ILL_ILLOPC 1 /* Illegal opcode. */
+#define ILL_ILLOPN 2 /* Illegal operand. */
+#define ILL_ILLADR 3 /* Illegal addressing mode. */
+#define ILL_ILLTRP 4 /* Illegal trap. */
+#define ILL_PRVOPC 5 /* Privileged opcode. */
+#define ILL_PRVREG 6 /* Privileged register. */
+#define ILL_COPROC 7 /* Coprocessor error. */
+#define ILL_BADSTK 8 /* Internal stack error. */
+
+/* codes for SIGBUS */
+#define BUS_ADRALN 1 /* Invalid address alignment. */
+#define BUS_ADRERR 2 /* Nonexistent physical address. */
+#define BUS_OBJERR 3 /* Object-specific hardware error. */
+
+/* codes for SIGSEGV */
+#define SEGV_MAPERR 1 /* Address not mapped to object. */
+#define SEGV_ACCERR 2 /* Invalid permissions for mapped */
+ /* object. */
+
+/* codes for SIGFPE */
+#define FPE_INTOVF 1 /* Integer overflow. */
+#define FPE_INTDIV 2 /* Integer divide by zero. */
+#define FPE_FLTDIV 3 /* Floating point divide by zero. */
+#define FPE_FLTOVF 4 /* Floating point overflow. */
+#define FPE_FLTUND 5 /* Floating point underflow. */
+#define FPE_FLTRES 6 /* Floating point inexact result. */
+#define FPE_FLTINV 7 /* Invalid floating point operation. */
+#define FPE_FLTSUB 8 /* Subscript out of range. */
+
+/* codes for SIGTRAP */
+#define TRAP_BRKPT 1 /* Process breakpoint. */
+#define TRAP_TRACE 2 /* Process trace trap. */
+
+/* codes for SIGCHLD */
+#define CLD_EXITED 1 /* Child has exited */
+#define CLD_KILLED 2 /* Child has terminated abnormally but */
+ /* did not create a core file */
+#define CLD_DUMPED 3 /* Child has terminated abnormally and */
+ /* created a core file */
+#define CLD_TRAPPED 4 /* Traced child has trapped */
+#define CLD_STOPPED 5 /* Child has stopped */
+#define CLD_CONTINUED 6 /* Stopped child has continued */
+
+/* codes for SIGPOLL */
+#define POLL_IN 1 /* Data input available */
+#define POLL_OUT 2 /* Output buffers available */
+#define POLL_MSG 3 /* Input message available */
+#define POLL_ERR 4 /* I/O Error */
+#define POLL_PRI 5 /* High priority input available */
+#define POLL_HUP 6 /* Device disconnected */
+
+#endif
+
+#if __POSIX_VISIBLE || __XSI_VISIBLE
+struct __siginfo;
+
+/*
+ * Signal vector "template" used in sigaction call.
+ */
+struct sigaction {
+ union {
+ void (*__sa_handler)(int);
+ void (*__sa_sigaction)(int, struct __siginfo *, void *);
+ } __sigaction_u; /* signal handler */
+ int sa_flags; /* see signal options below */
+ sigset_t sa_mask; /* signal mask to apply */
+};
+
+#define sa_handler __sigaction_u.__sa_handler
+#endif
+
+#if __XSI_VISIBLE
+/* If SA_SIGINFO is set, sa_sigaction must be used instead of sa_handler. */
+#define sa_sigaction __sigaction_u.__sa_sigaction
+#endif
+
+#if __POSIX_VISIBLE || __XSI_VISIBLE
+#define SA_NOCLDSTOP 0x0008 /* do not generate SIGCHLD on child stop */
+#endif /* __POSIX_VISIBLE || __XSI_VISIBLE */
+
+#if __XSI_VISIBLE
+#define SA_ONSTACK 0x0001 /* take signal on signal stack */
+#define SA_RESTART 0x0002 /* restart system call on signal return */
+#define SA_RESETHAND 0x0004 /* reset to SIG_DFL when taking signal */
+#define SA_NODEFER 0x0010 /* don't mask the signal we're delivering */
+#define SA_NOCLDWAIT 0x0020 /* don't keep zombies around */
+#define SA_SIGINFO 0x0040 /* signal handler with SA_SIGINFO args */
+#endif
+
+#if __BSD_VISIBLE
+#define NSIG 32 /* number of old signals (counting 0) */
+#endif
+
+#if __POSIX_VISIBLE || __XSI_VISIBLE
+#define SI_NOINFO 0 /* No signal info besides si_signo. */
+#define SI_USER 0x10001 /* Signal sent by kill(). */
+#define SI_QUEUE 0x10002 /* Signal sent by the sigqueue(). */
+#define SI_TIMER 0x10003 /* Signal generated by expiration of */
+ /* a timer set by timer_settime(). */
+#define SI_ASYNCIO 0x10004 /* Signal generated by completion of */
+ /* an asynchronous I/O request.*/
+#define SI_MESGQ 0x10005 /* Signal generated by arrival of a */
+ /* message on an empty message queue. */
+#define SI_KERNEL 0x10006
+#endif
+#if __BSD_VISIBLE
+#define SI_UNDEFINED 0
+#endif
+
+#if __BSD_VISIBLE
+typedef __sighandler_t *sig_t; /* type of pointer to a signal function */
+typedef void __siginfohandler_t(int, struct __siginfo *, void *);
+#endif
+
+#if __XSI_VISIBLE
+/*
+ * Structure used in sigaltstack call.
+ */
+#if __BSD_VISIBLE
+typedef struct sigaltstack {
+#else
+typedef struct {
+#endif
+ char *ss_sp; /* signal stack base */
+ __size_t ss_size; /* signal stack length */
+ int ss_flags; /* SS_DISABLE and/or SS_ONSTACK */
+} stack_t;
+
+#define SS_ONSTACK 0x0001 /* take signal on alternate stack */
+#define SS_DISABLE 0x0004 /* disable taking signals on alternate stack */
+#define MINSIGSTKSZ __MINSIGSTKSZ /* minimum stack size */
+#define SIGSTKSZ (MINSIGSTKSZ + 32768) /* recommended stack size */
+#endif
+
+#if __BSD_VISIBLE
+/*
+ * 4.3 compatibility:
+ * Signal vector "template" used in sigvec call.
+ */
+struct sigvec {
+ __sighandler_t *sv_handler; /* signal handler */
+ int sv_mask; /* signal mask to apply */
+ int sv_flags; /* see signal options below */
+};
+
+#define SV_ONSTACK SA_ONSTACK
+#define SV_INTERRUPT SA_RESTART /* same bit, opposite sense */
+#define SV_RESETHAND SA_RESETHAND
+#define SV_NODEFER SA_NODEFER
+#define SV_NOCLDSTOP SA_NOCLDSTOP
+#define SV_SIGINFO SA_SIGINFO
+#define sv_onstack sv_flags /* isn't compatibility wonderful! */
+#endif
+
+/* Keep this in one place only */
+#if defined(_KERNEL) && defined(COMPAT_43) && \
+ !defined(__i386__)
+struct osigcontext {
+ int _not_used;
+};
+#endif
+
+#if __XSI_VISIBLE
+/*
+ * Structure used in sigstack call.
+ */
+struct sigstack {
+ /* XXX ss_sp's type should be `void *'. */
+ char *ss_sp; /* signal stack pointer */
+ int ss_onstack; /* current status */
+};
+#endif
+
+#if __BSD_VISIBLE || __POSIX_VISIBLE > 0 && __POSIX_VISIBLE <= 200112
+/*
+ * Macro for converting signal number to a mask suitable for
+ * sigblock().
+ */
+#define sigmask(m) (1 << ((m)-1))
+#endif
+
+#if __BSD_VISIBLE
+#define BADSIG SIG_ERR
+#endif
+
+#if __POSIX_VISIBLE || __XSI_VISIBLE
+/*
+ * Flags for sigprocmask:
+ */
+#define SIG_BLOCK 1 /* block specified signal set */
+#define SIG_UNBLOCK 2 /* unblock specified signal set */
+#define SIG_SETMASK 3 /* set specified signal set */
+#endif
+
+/*
+ * For historical reasons; programs expect signal's return value to be
+ * defined by <sys/signal.h>.
+ */
+__BEGIN_DECLS
+__sighandler_t *signal(int, __sighandler_t *);
+__END_DECLS
+
+#endif /* !_SYS_SIGNAL_HH_ */
diff --git a/rtems/freebsd/sys/signalvar.h b/rtems/freebsd/sys/signalvar.h
new file mode 100644
index 00000000..4ae58ab6
--- /dev/null
+++ b/rtems/freebsd/sys/signalvar.h
@@ -0,0 +1,372 @@
+/*-
+ * Copyright (c) 1991, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * @(#)signalvar.h 8.6 (Berkeley) 2/19/95
+ * $FreeBSD$
+ */
+
+#ifndef _SYS_SIGNALVAR_HH_
+#define _SYS_SIGNALVAR_HH_
+
+#include <rtems/freebsd/sys/queue.h>
+#include <rtems/freebsd/sys/_lock.h>
+#include <rtems/freebsd/sys/_mutex.h>
+#include <rtems/freebsd/sys/signal.h>
+
+/*
+ * Kernel signal definitions and data structures,
+ * not exported to user programs.
+ */
+
+/*
+ * Logical process signal actions and state, needed only within the process
+ * The mapping between sigacts and proc structures is 1:1 except for rfork()
+ * processes masquerading as threads which use one structure for the whole
+ * group. All members are locked by the included mutex. The reference count
+ * and mutex must be last for the bcopy in sigacts_copy() to work.
+ */
+struct sigacts {
+ sig_t ps_sigact[_SIG_MAXSIG]; /* Disposition of signals. */
+ sigset_t ps_catchmask[_SIG_MAXSIG]; /* Signals to be blocked. */
+ sigset_t ps_sigonstack; /* Signals to take on sigstack. */
+ sigset_t ps_sigintr; /* Signals that interrupt syscalls. */
+ sigset_t ps_sigreset; /* Signals that reset when caught. */
+ sigset_t ps_signodefer; /* Signals not masked while handled. */
+ sigset_t ps_siginfo; /* Signals that want SA_SIGINFO args. */
+ sigset_t ps_sigignore; /* Signals being ignored. */
+ sigset_t ps_sigcatch; /* Signals being caught by user. */
+ sigset_t ps_freebsd4; /* signals using freebsd4 ucontext. */
+ sigset_t ps_osigset; /* Signals using <= 3.x osigset_t. */
+ sigset_t ps_usertramp; /* SunOS compat; libc sigtramp. XXX */
+ int ps_flag;
+ int ps_refcnt;
+ struct mtx ps_mtx;
+};
+
+#define PS_NOCLDWAIT 0x0001 /* No zombies if child dies */
+#define PS_NOCLDSTOP 0x0002 /* No SIGCHLD when children stop. */
+#define PS_CLDSIGIGN 0x0004 /* The SIGCHLD handler is SIG_IGN. */
+
+#if defined(_KERNEL) && defined(COMPAT_43)
+/*
+ * Compatibility.
+ */
+typedef struct {
+ struct osigcontext si_sc;
+ int si_signo;
+ int si_code;
+ union sigval si_value;
+} osiginfo_t;
+
+struct osigaction {
+ union {
+ void (*__sa_handler)(int);
+ void (*__sa_sigaction)(int, osiginfo_t *, void *);
+ } __sigaction_u; /* signal handler */
+ osigset_t sa_mask; /* signal mask to apply */
+ int sa_flags; /* see signal options below */
+};
+
+typedef void __osiginfohandler_t(int, osiginfo_t *, void *);
+#endif /* _KERNEL && COMPAT_43 */
+
+/* additional signal action values, used only temporarily/internally */
+#define SIG_CATCH ((__sighandler_t *)2)
+/* #define SIG_HOLD ((__sighandler_t *)3) See signal.h */
+
+/*
+ * get signal action for process and signal; currently only for current process
+ */
+#define SIGACTION(p, sig) (p->p_sigacts->ps_sigact[_SIG_IDX(sig)])
+
+/*
+ * sigset_t manipulation macros
+ */
+#define SIGADDSET(set, signo) \
+ ((set).__bits[_SIG_WORD(signo)] |= _SIG_BIT(signo))
+
+#define SIGDELSET(set, signo) \
+ ((set).__bits[_SIG_WORD(signo)] &= ~_SIG_BIT(signo))
+
+#define SIGEMPTYSET(set) \
+ do { \
+ int __i; \
+ for (__i = 0; __i < _SIG_WORDS; __i++) \
+ (set).__bits[__i] = 0; \
+ } while (0)
+
+#define SIGFILLSET(set) \
+ do { \
+ int __i; \
+ for (__i = 0; __i < _SIG_WORDS; __i++) \
+ (set).__bits[__i] = ~0U; \
+ } while (0)
+
+#define SIGISMEMBER(set, signo) \
+ ((set).__bits[_SIG_WORD(signo)] & _SIG_BIT(signo))
+
+#define SIGISEMPTY(set) (__sigisempty(&(set)))
+#define SIGNOTEMPTY(set) (!__sigisempty(&(set)))
+
+#define SIGSETEQ(set1, set2) (__sigseteq(&(set1), &(set2)))
+#define SIGSETNEQ(set1, set2) (!__sigseteq(&(set1), &(set2)))
+
+#define SIGSETOR(set1, set2) \
+ do { \
+ int __i; \
+ for (__i = 0; __i < _SIG_WORDS; __i++) \
+ (set1).__bits[__i] |= (set2).__bits[__i]; \
+ } while (0)
+
+#define SIGSETAND(set1, set2) \
+ do { \
+ int __i; \
+ for (__i = 0; __i < _SIG_WORDS; __i++) \
+ (set1).__bits[__i] &= (set2).__bits[__i]; \
+ } while (0)
+
+#define SIGSETNAND(set1, set2) \
+ do { \
+ int __i; \
+ for (__i = 0; __i < _SIG_WORDS; __i++) \
+ (set1).__bits[__i] &= ~(set2).__bits[__i]; \
+ } while (0)
+
+#define SIGSETLO(set1, set2) ((set1).__bits[0] = (set2).__bits[0])
+#define SIGSETOLD(set, oset) ((set).__bits[0] = (oset))
+
+#define SIG_CANTMASK(set) \
+ SIGDELSET(set, SIGKILL), SIGDELSET(set, SIGSTOP)
+
+#define SIG_STOPSIGMASK(set) \
+ SIGDELSET(set, SIGSTOP), SIGDELSET(set, SIGTSTP), \
+ SIGDELSET(set, SIGTTIN), SIGDELSET(set, SIGTTOU)
+
+#define SIG_CONTSIGMASK(set) \
+ SIGDELSET(set, SIGCONT)
+
+#define sigcantmask (sigmask(SIGKILL) | sigmask(SIGSTOP))
+
+#define SIG2OSIG(sig, osig) (osig = (sig).__bits[0])
+#define OSIG2SIG(osig, sig) SIGEMPTYSET(sig); (sig).__bits[0] = osig
+
+static __inline int
+__sigisempty(sigset_t *set)
+{
+ int i;
+
+ for (i = 0; i < _SIG_WORDS; i++) {
+ if (set->__bits[i])
+ return (0);
+ }
+ return (1);
+}
+
+static __inline int
+__sigseteq(sigset_t *set1, sigset_t *set2)
+{
+ int i;
+
+ for (i = 0; i < _SIG_WORDS; i++) {
+ if (set1->__bits[i] != set2->__bits[i])
+ return (0);
+ }
+ return (1);
+}
+
+struct osigevent {
+ int sigev_notify; /* Notification type */
+ union {
+ int __sigev_signo; /* Signal number */
+ int __sigev_notify_kqueue;
+ } __sigev_u;
+ union sigval sigev_value; /* Signal value */
+};
+
+typedef struct ksiginfo {
+ TAILQ_ENTRY(ksiginfo) ksi_link;
+ siginfo_t ksi_info;
+ int ksi_flags;
+ struct sigqueue *ksi_sigq;
+} ksiginfo_t;
+
+#define ksi_signo ksi_info.si_signo
+#define ksi_errno ksi_info.si_errno
+#define ksi_code ksi_info.si_code
+#define ksi_pid ksi_info.si_pid
+#define ksi_uid ksi_info.si_uid
+#define ksi_status ksi_info.si_status
+#define ksi_addr ksi_info.si_addr
+#define ksi_value ksi_info.si_value
+#define ksi_band ksi_info.si_band
+#define ksi_trapno ksi_info.si_trapno
+#define ksi_overrun ksi_info.si_overrun
+#define ksi_timerid ksi_info.si_timerid
+#define ksi_mqd ksi_info.si_mqd
+
+/* bits for ksi_flags */
+#define KSI_TRAP 0x01 /* Generated by trap. */
+#define KSI_EXT 0x02 /* Externally managed ksi. */
+#define KSI_INS 0x04 /* Directly insert ksi, not the copy */
+#define KSI_SIGQ 0x08 /* Generated by sigqueue, might ret EGAIN. */
+#define KSI_HEAD 0x10 /* Insert into head, not tail. */
+#define KSI_COPYMASK (KSI_TRAP|KSI_SIGQ)
+
+#define KSI_ONQ(ksi) ((ksi)->ksi_sigq != NULL)
+
+typedef struct sigqueue {
+ sigset_t sq_signals; /* All pending signals. */
+ sigset_t sq_kill; /* Legacy depth 1 queue. */
+ TAILQ_HEAD(, ksiginfo) sq_list;/* Queued signal info. */
+ struct proc *sq_proc;
+ int sq_flags;
+} sigqueue_t;
+
+/* Flags for ksi_flags */
+#define SQ_INIT 0x01
+
+#ifdef _KERNEL
+
+/* Return nonzero if process p has an unmasked pending signal. */
+#define SIGPENDING(td) \
+ ((!SIGISEMPTY((td)->td_siglist) && \
+ !sigsetmasked(&(td)->td_siglist, &(td)->td_sigmask)) || \
+ (!SIGISEMPTY((td)->td_proc->p_siglist) && \
+ !sigsetmasked(&(td)->td_proc->p_siglist, &(td)->td_sigmask)))
+/*
+ * Return the value of the pseudo-expression ((*set & ~*mask) != 0). This
+ * is an optimized version of SIGISEMPTY() on a temporary variable
+ * containing SIGSETNAND(*set, *mask).
+ */
+static __inline int
+sigsetmasked(sigset_t *set, sigset_t *mask)
+{
+ int i;
+
+ for (i = 0; i < _SIG_WORDS; i++) {
+ if (set->__bits[i] & ~mask->__bits[i])
+ return (0);
+ }
+ return (1);
+}
+
+#define ksiginfo_init(ksi) \
+do { \
+ bzero(ksi, sizeof(ksiginfo_t)); \
+} while(0)
+
+#define ksiginfo_init_trap(ksi) \
+do { \
+ ksiginfo_t *kp = ksi; \
+ bzero(kp, sizeof(ksiginfo_t)); \
+ kp->ksi_flags |= KSI_TRAP; \
+} while(0)
+
+static __inline void
+ksiginfo_copy(ksiginfo_t *src, ksiginfo_t *dst)
+{
+ (dst)->ksi_info = src->ksi_info;
+ (dst)->ksi_flags = (src->ksi_flags & KSI_COPYMASK);
+}
+
+struct pgrp;
+struct thread;
+struct proc;
+struct sigio;
+struct mtx;
+
+extern int sugid_coredump; /* Sysctl variable kern.sugid_coredump */
+extern struct mtx sigio_lock;
+extern int kern_logsigexit; /* Sysctl variable kern.logsigexit */
+
+/*
+ * Lock the pointers for a sigio object in the underlying objects of
+ * a file descriptor.
+ */
+#define SIGIO_LOCK() mtx_lock(&sigio_lock)
+#define SIGIO_TRYLOCK() mtx_trylock(&sigio_lock)
+#define SIGIO_UNLOCK() mtx_unlock(&sigio_lock)
+#define SIGIO_LOCKED() mtx_owned(&sigio_lock)
+#define SIGIO_ASSERT(type) mtx_assert(&sigio_lock, type)
+
+/* stop_allowed parameter for cursig */
+#define SIG_STOP_ALLOWED 100
+#define SIG_STOP_NOT_ALLOWED 101
+
+/* flags for kern_sigprocmask */
+#define SIGPROCMASK_OLD 0x0001
+#define SIGPROCMASK_PROC_LOCKED 0x0002
+#define SIGPROCMASK_PS_LOCKED 0x0004
+
+/*
+ * Machine-independent functions:
+ */
+int cursig(struct thread *td, int stop_allowed);
+void execsigs(struct proc *p);
+void gsignal(int pgid, int sig, ksiginfo_t *ksi);
+void killproc(struct proc *p, char *why);
+void pksignal(struct proc *p, int sig, ksiginfo_t *ksi);
+void pgsigio(struct sigio **, int signum, int checkctty);
+void pgsignal(struct pgrp *pgrp, int sig, int checkctty, ksiginfo_t *ksi);
+int postsig(int sig);
+void psignal(struct proc *p, int sig);
+int psignal_event(struct proc *p, struct sigevent *, ksiginfo_t *);
+struct sigacts *sigacts_alloc(void);
+void sigacts_copy(struct sigacts *dest, struct sigacts *src);
+void sigacts_free(struct sigacts *ps);
+struct sigacts *sigacts_hold(struct sigacts *ps);
+int sigacts_shared(struct sigacts *ps);
+void sigexit(struct thread *td, int signum) __dead2;
+int sig_ffs(sigset_t *set);
+void siginit(struct proc *p);
+void signotify(struct thread *td);
+void tdksignal(struct thread *td, int sig, ksiginfo_t *ksi);
+void tdsigcleanup(struct thread *td);
+int tdsignal(struct proc *p, struct thread *td, int sig,
+ ksiginfo_t *ksi);
+void trapsignal(struct thread *td, ksiginfo_t *);
+int ptracestop(struct thread *td, int sig);
+ksiginfo_t * ksiginfo_alloc(int);
+void ksiginfo_free(ksiginfo_t *);
+void sigqueue_init(struct sigqueue *queue, struct proc *p);
+void sigqueue_flush(struct sigqueue *queue);
+void sigqueue_delete_proc(struct proc *p, int sig);
+void sigqueue_delete(struct sigqueue *queue, int sig);
+void sigqueue_take(ksiginfo_t *ksi);
+int kern_sigtimedwait(struct thread *, sigset_t,
+ ksiginfo_t *, struct timespec *);
+int kern_sigprocmask(struct thread *td, int how,
+ sigset_t *set, sigset_t *oset, int flags);
+/*
+ * Machine-dependent functions:
+ */
+void sendsig(sig_t, ksiginfo_t *, sigset_t *retmask);
+
+#endif /* _KERNEL */
+
+#endif /* !_SYS_SIGNALVAR_HH_ */
diff --git a/rtems/freebsd/sys/smp.h b/rtems/freebsd/sys/smp.h
new file mode 100644
index 00000000..c73bec25
--- /dev/null
+++ b/rtems/freebsd/sys/smp.h
@@ -0,0 +1,183 @@
+/*-
+ * ----------------------------------------------------------------------------
+ * "THE BEER-WARE LICENSE" (Revision 42):
+ * <phk@FreeBSD.org> wrote this file. As long as you retain this notice you
+ * can do whatever you want with this stuff. If we meet some day, and you think
+ * this stuff is worth it, you can buy me a beer in return. Poul-Henning Kamp
+ * ----------------------------------------------------------------------------
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _SYS_SMP_HH_
+#define _SYS_SMP_HH_
+
+#ifdef _KERNEL
+
+#ifndef LOCORE
+
+#ifdef SMP
+
+/*
+ * Topology of a NUMA or HTT system.
+ *
+ * The top level topology is an array of pointers to groups. Each group
+ * contains a bitmask of cpus in its group or subgroups. It may also
+ * contain a pointer to an array of child groups.
+ *
+ * The bitmasks at non leaf groups may be used by consumers who support
+ * a smaller depth than the hardware provides.
+ *
+ * The topology may be omitted by systems where all CPUs are equal.
+ */
+
+struct cpu_group {
+ struct cpu_group *cg_parent; /* Our parent group. */
+ struct cpu_group *cg_child; /* Optional children groups. */
+ cpumask_t cg_mask; /* Mask of cpus in this group. */
+ int8_t cg_count; /* Count of cpus in this group. */
+ int8_t cg_children; /* Number of children groups. */
+ int8_t cg_level; /* Shared cache level. */
+ int8_t cg_flags; /* Traversal modifiers. */
+};
+
+/*
+ * Defines common resources for CPUs in the group. The highest level
+ * resource should be used when multiple are shared.
+ */
+#define CG_SHARE_NONE 0
+#define CG_SHARE_L1 1
+#define CG_SHARE_L2 2
+#define CG_SHARE_L3 3
+
+/*
+ * Behavior modifiers for load balancing and affinity.
+ */
+#define CG_FLAG_HTT 0x01 /* Schedule the alternate core last. */
+#define CG_FLAG_SMT 0x02 /* New age htt, less crippled. */
+#define CG_FLAG_THREAD (CG_FLAG_HTT | CG_FLAG_SMT) /* Any threading. */
+
+/*
+ * Convenience routines for building topologies.
+ */
+struct cpu_group *smp_topo(void);
+struct cpu_group *smp_topo_none(void);
+struct cpu_group *smp_topo_1level(int l1share, int l1count, int l1flags);
+struct cpu_group *smp_topo_2level(int l2share, int l2count, int l1share,
+ int l1count, int l1flags);
+struct cpu_group *smp_topo_find(struct cpu_group *top, int cpu);
+
+extern void (*cpustop_restartfunc)(void);
+extern int smp_active;
+extern int smp_cpus;
+extern volatile cpumask_t started_cpus;
+extern volatile cpumask_t stopped_cpus;
+extern cpumask_t idle_cpus_mask;
+extern cpumask_t hlt_cpus_mask;
+extern cpumask_t logical_cpus_mask;
+#endif /* SMP */
+
+extern u_int mp_maxid;
+extern int mp_maxcpus;
+extern int mp_ncpus;
+extern volatile int smp_started;
+
+extern cpumask_t all_cpus;
+
+/*
+ * Macro allowing us to determine whether a CPU is absent at any given
+ * time, thus permitting us to configure sparse maps of cpuid-dependent
+ * (per-CPU) structures.
+ */
+#define CPU_ABSENT(x_cpu) ((all_cpus & (1 << (x_cpu))) == 0)
+
+/*
+ * Macros to iterate over non-absent CPUs. CPU_FOREACH() takes an
+ * integer iterator and iterates over the available set of CPUs.
+ * CPU_FIRST() returns the id of the first non-absent CPU. CPU_NEXT()
+ * returns the id of the next non-absent CPU. It will wrap back to
+ * CPU_FIRST() once the end of the list is reached. The iterators are
+ * currently implemented via inline functions.
+ */
+#define CPU_FOREACH(i) \
+ for ((i) = 0; (i) <= mp_maxid; (i)++) \
+ if (!CPU_ABSENT((i)))
+
+static __inline int
+cpu_first(void)
+{
+ int i;
+
+ for (i = 0;; i++)
+ if (!CPU_ABSENT(i))
+ return (i);
+}
+
+static __inline int
+cpu_next(int i)
+{
+
+ for (;;) {
+ i++;
+ if (i > mp_maxid)
+ i = 0;
+ if (!CPU_ABSENT(i))
+ return (i);
+ }
+}
+
+#define CPU_FIRST() cpu_first()
+#define CPU_NEXT(i) cpu_next((i))
+
+#ifdef SMP
+/*
+ * Machine dependent functions used to initialize MP support.
+ *
+ * The cpu_mp_probe() should check to see if MP support is present and return
+ * zero if it is not or non-zero if it is. If MP support is present, then
+ * cpu_mp_start() will be called so that MP can be enabled. This function
+ * should do things such as startup secondary processors. It should also
+ * setup mp_ncpus, all_cpus, and smp_cpus. It should also ensure that
+ * smp_active and smp_started are initialized at the appropriate time.
+ * Once cpu_mp_start() returns, machine independent MP startup code will be
+ * executed and a simple message will be output to the console. Finally,
+ * cpu_mp_announce() will be called so that machine dependent messages about
+ * the MP support may be output to the console if desired.
+ *
+ * The cpu_setmaxid() function is called very early during the boot process
+ * so that the MD code may set mp_maxid to provide an upper bound on CPU IDs
+ * that other subsystems may use. If a platform is not able to determine
+ * the exact maximum ID that early, then it may set mp_maxid to MAXCPU - 1.
+ */
+struct thread;
+
+struct cpu_group *cpu_topo(void);
+void cpu_mp_announce(void);
+int cpu_mp_probe(void);
+void cpu_mp_setmaxid(void);
+void cpu_mp_start(void);
+
+void forward_signal(struct thread *);
+int restart_cpus(cpumask_t);
+int stop_cpus(cpumask_t);
+int stop_cpus_hard(cpumask_t);
+#if defined(__amd64__)
+int suspend_cpus(cpumask_t);
+#endif
+void smp_rendezvous_action(void);
+extern struct mtx smp_ipi_mtx;
+
+#endif /* SMP */
+void smp_no_rendevous_barrier(void *);
+void smp_rendezvous(void (*)(void *),
+ void (*)(void *),
+ void (*)(void *),
+ void *arg);
+void smp_rendezvous_cpus(cpumask_t,
+ void (*)(void *),
+ void (*)(void *),
+ void (*)(void *),
+ void *arg);
+#endif /* !LOCORE */
+#endif /* _KERNEL */
+#endif /* _SYS_SMP_HH_ */
diff --git a/rtems/freebsd/sys/sockbuf.h b/rtems/freebsd/sys/sockbuf.h
new file mode 100644
index 00000000..1f54f7dc
--- /dev/null
+++ b/rtems/freebsd/sys/sockbuf.h
@@ -0,0 +1,223 @@
+/*-
+ * Copyright (c) 1982, 1986, 1990, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * @(#)socketvar.h 8.3 (Berkeley) 2/19/95
+ *
+ * $FreeBSD$
+ */
+#ifndef _SYS_SOCKBUF_HH_
+#define _SYS_SOCKBUF_HH_
+#include <rtems/freebsd/sys/selinfo.h> /* for struct selinfo */
+#include <rtems/freebsd/sys/_lock.h>
+#include <rtems/freebsd/sys/_mutex.h>
+#include <rtems/freebsd/sys/_sx.h>
+
+#define SB_MAX (256*1024) /* default for max chars in sockbuf */
+
+/*
+ * Constants for sb_flags field of struct sockbuf.
+ */
+#define SB_WAIT 0x04 /* someone is waiting for data/space */
+#define SB_SEL 0x08 /* someone is selecting */
+#define SB_ASYNC 0x10 /* ASYNC I/O, need signals */
+#define SB_UPCALL 0x20 /* someone wants an upcall */
+#define SB_NOINTR 0x40 /* operations not interruptible */
+#define SB_AIO 0x80 /* AIO operations queued */
+#define SB_KNOTE 0x100 /* kernel note attached */
+#define SB_NOCOALESCE 0x200 /* don't coalesce new data into existing mbufs */
+#define SB_IN_TOE 0x400 /* socket buffer is in the middle of an operation */
+#define SB_AUTOSIZE 0x800 /* automatically size socket buffer */
+
+#define SBS_CANTSENDMORE 0x0010 /* can't send more data to peer */
+#define SBS_CANTRCVMORE 0x0020 /* can't receive more data from peer */
+#define SBS_RCVATMARK 0x0040 /* at mark on input */
+
+struct mbuf;
+struct sockaddr;
+struct socket;
+struct thread;
+
+struct xsockbuf {
+ u_int sb_cc;
+ u_int sb_hiwat;
+ u_int sb_mbcnt;
+ u_int sb_mcnt;
+ u_int sb_ccnt;
+ u_int sb_mbmax;
+ int sb_lowat;
+ int sb_timeo;
+ short sb_flags;
+};
+
+/*
+ * Variables for socket buffering.
+ */
+struct sockbuf {
+ struct selinfo sb_sel; /* process selecting read/write */
+ struct mtx sb_mtx; /* sockbuf lock */
+ struct sx sb_sx; /* prevent I/O interlacing */
+ short sb_state; /* (c/d) socket state on sockbuf */
+#define sb_startzero sb_mb
+ struct mbuf *sb_mb; /* (c/d) the mbuf chain */
+ struct mbuf *sb_mbtail; /* (c/d) the last mbuf in the chain */
+ struct mbuf *sb_lastrecord; /* (c/d) first mbuf of last
+ * record in socket buffer */
+ struct mbuf *sb_sndptr; /* (c/d) pointer into mbuf chain */
+ u_int sb_sndptroff; /* (c/d) byte offset of ptr into chain */
+ u_int sb_cc; /* (c/d) actual chars in buffer */
+ u_int sb_hiwat; /* (c/d) max actual char count */
+ u_int sb_mbcnt; /* (c/d) chars of mbufs used */
+ u_int sb_mcnt; /* (c/d) number of mbufs in buffer */
+ u_int sb_ccnt; /* (c/d) number of clusters in buffer */
+ u_int sb_mbmax; /* (c/d) max chars of mbufs to use */
+ u_int sb_ctl; /* (c/d) non-data chars in buffer */
+ int sb_lowat; /* (c/d) low water mark */
+ int sb_timeo; /* (c/d) timeout for read/write */
+ short sb_flags; /* (c/d) flags, see below */
+ int (*sb_upcall)(struct socket *, void *, int); /* (c/d) */
+ void *sb_upcallarg; /* (c/d) */
+};
+
+#ifdef _KERNEL
+
+/*
+ * Per-socket buffer mutex used to protect most fields in the socket
+ * buffer.
+ */
+#define SOCKBUF_MTX(_sb) (&(_sb)->sb_mtx)
+#define SOCKBUF_LOCK_INIT(_sb, _name) \
+ mtx_init(SOCKBUF_MTX(_sb), _name, NULL, MTX_DEF)
+#define SOCKBUF_LOCK_DESTROY(_sb) mtx_destroy(SOCKBUF_MTX(_sb))
+#define SOCKBUF_LOCK(_sb) mtx_lock(SOCKBUF_MTX(_sb))
+#define SOCKBUF_OWNED(_sb) mtx_owned(SOCKBUF_MTX(_sb))
+#define SOCKBUF_UNLOCK(_sb) mtx_unlock(SOCKBUF_MTX(_sb))
+#define SOCKBUF_LOCK_ASSERT(_sb) mtx_assert(SOCKBUF_MTX(_sb), MA_OWNED)
+#define SOCKBUF_UNLOCK_ASSERT(_sb) mtx_assert(SOCKBUF_MTX(_sb), MA_NOTOWNED)
+
+void sbappend(struct sockbuf *sb, struct mbuf *m);
+void sbappend_locked(struct sockbuf *sb, struct mbuf *m);
+void sbappendstream(struct sockbuf *sb, struct mbuf *m);
+void sbappendstream_locked(struct sockbuf *sb, struct mbuf *m);
+int sbappendaddr(struct sockbuf *sb, const struct sockaddr *asa,
+ struct mbuf *m0, struct mbuf *control);
+int sbappendaddr_locked(struct sockbuf *sb, const struct sockaddr *asa,
+ struct mbuf *m0, struct mbuf *control);
+int sbappendcontrol(struct sockbuf *sb, struct mbuf *m0,
+ struct mbuf *control);
+int sbappendcontrol_locked(struct sockbuf *sb, struct mbuf *m0,
+ struct mbuf *control);
+void sbappendrecord(struct sockbuf *sb, struct mbuf *m0);
+void sbappendrecord_locked(struct sockbuf *sb, struct mbuf *m0);
+void sbcheck(struct sockbuf *sb);
+void sbcompress(struct sockbuf *sb, struct mbuf *m, struct mbuf *n);
+struct mbuf *
+ sbcreatecontrol(caddr_t p, int size, int type, int level);
+void sbdestroy(struct sockbuf *sb, struct socket *so);
+void sbdrop(struct sockbuf *sb, int len);
+void sbdrop_locked(struct sockbuf *sb, int len);
+void sbdroprecord(struct sockbuf *sb);
+void sbdroprecord_locked(struct sockbuf *sb);
+void sbflush(struct sockbuf *sb);
+void sbflush_locked(struct sockbuf *sb);
+void sbrelease(struct sockbuf *sb, struct socket *so);
+void sbrelease_internal(struct sockbuf *sb, struct socket *so);
+void sbrelease_locked(struct sockbuf *sb, struct socket *so);
+int sbreserve(struct sockbuf *sb, u_long cc, struct socket *so,
+ struct thread *td);
+int sbreserve_locked(struct sockbuf *sb, u_long cc, struct socket *so,
+ struct thread *td);
+struct mbuf *
+ sbsndptr(struct sockbuf *sb, u_int off, u_int len, u_int *moff);
+void sbtoxsockbuf(struct sockbuf *sb, struct xsockbuf *xsb);
+int sbwait(struct sockbuf *sb);
+int sblock(struct sockbuf *sb, int flags);
+void sbunlock(struct sockbuf *sb);
+
+/*
+ * How much space is there in a socket buffer (so->so_snd or so->so_rcv)?
+ * This is problematical if the fields are unsigned, as the space might
+ * still be negative (cc > hiwat or mbcnt > mbmax). Should detect
+ * overflow and return 0. Should use "lmin" but it doesn't exist now.
+ */
+#define sbspace(sb) \
+ ((long) imin((int)((sb)->sb_hiwat - (sb)->sb_cc), \
+ (int)((sb)->sb_mbmax - (sb)->sb_mbcnt)))
+
+/* adjust counters in sb reflecting allocation of m */
+#define sballoc(sb, m) { \
+ (sb)->sb_cc += (m)->m_len; \
+ if ((m)->m_type != MT_DATA && (m)->m_type != MT_OOBDATA) \
+ (sb)->sb_ctl += (m)->m_len; \
+ (sb)->sb_mbcnt += MSIZE; \
+ (sb)->sb_mcnt += 1; \
+ if ((m)->m_flags & M_EXT) { \
+ (sb)->sb_mbcnt += (m)->m_ext.ext_size; \
+ (sb)->sb_ccnt += 1; \
+ } \
+}
+
+/* adjust counters in sb reflecting freeing of m */
+#define sbfree(sb, m) { \
+ (sb)->sb_cc -= (m)->m_len; \
+ if ((m)->m_type != MT_DATA && (m)->m_type != MT_OOBDATA) \
+ (sb)->sb_ctl -= (m)->m_len; \
+ (sb)->sb_mbcnt -= MSIZE; \
+ (sb)->sb_mcnt -= 1; \
+ if ((m)->m_flags & M_EXT) { \
+ (sb)->sb_mbcnt -= (m)->m_ext.ext_size; \
+ (sb)->sb_ccnt -= 1; \
+ } \
+ if ((sb)->sb_sndptr == (m)) { \
+ (sb)->sb_sndptr = NULL; \
+ (sb)->sb_sndptroff = 0; \
+ } \
+ if ((sb)->sb_sndptroff != 0) \
+ (sb)->sb_sndptroff -= (m)->m_len; \
+}
+
+#define SB_EMPTY_FIXUP(sb) do { \
+ if ((sb)->sb_mb == NULL) { \
+ (sb)->sb_mbtail = NULL; \
+ (sb)->sb_lastrecord = NULL; \
+ } \
+} while (/*CONSTCOND*/0)
+
+#ifdef SOCKBUF_DEBUG
+void sblastrecordchk(struct sockbuf *, const char *, int);
+#define SBLASTRECORDCHK(sb) sblastrecordchk((sb), __FILE__, __LINE__)
+
+void sblastmbufchk(struct sockbuf *, const char *, int);
+#define SBLASTMBUFCHK(sb) sblastmbufchk((sb), __FILE__, __LINE__)
+#else
+#define SBLASTRECORDCHK(sb) /* nothing */
+#define SBLASTMBUFCHK(sb) /* nothing */
+#endif /* SOCKBUF_DEBUG */
+
+#endif /* _KERNEL */
+
+#endif /* _SYS_SOCKBUF_HH_ */
diff --git a/rtems/freebsd/sys/socket.h b/rtems/freebsd/sys/socket.h
new file mode 100644
index 00000000..e7069368
--- /dev/null
+++ b/rtems/freebsd/sys/socket.h
@@ -0,0 +1,691 @@
+/*-
+ * Copyright (c) 1982, 1985, 1986, 1988, 1993, 1994
+ * The Regents of the University of California. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * @(#)socket.h 8.4 (Berkeley) 2/21/94
+ * $FreeBSD$
+ */
+
+#ifndef _SYS_SOCKET_HH_
+#define _SYS_SOCKET_HH_
+
+#include <rtems/freebsd/sys/cdefs.h>
+#include <rtems/freebsd/sys/_types.h>
+#include <rtems/freebsd/sys/_iovec.h>
+#define _NO_NAMESPACE_POLLUTION
+#include <rtems/freebsd/machine/param.h>
+#undef _NO_NAMESPACE_POLLUTION
+
+/*
+ * Definitions related to sockets: types, address families, options.
+ */
+
+/*
+ * Data types.
+ */
+#if __BSD_VISIBLE
+#ifndef _GID_T_DECLARED
+typedef __gid_t gid_t;
+#define _GID_T_DECLARED
+#endif
+
+#ifndef _OFF_T_DECLARED
+typedef __off_t off_t;
+#define _OFF_T_DECLARED
+#endif
+
+#ifndef _PID_T_DECLARED
+typedef __pid_t pid_t;
+#define _PID_T_DECLARED
+#endif
+#endif
+
+#ifndef _SA_FAMILY_T_DECLARED
+typedef __sa_family_t sa_family_t;
+#define _SA_FAMILY_T_DECLARED
+#endif
+
+#ifndef _SOCKLEN_T_DECLARED
+typedef __socklen_t socklen_t;
+#define _SOCKLEN_T_DECLARED
+#endif
+
+#ifndef _SSIZE_T_DECLARED
+typedef __ssize_t ssize_t;
+#define _SSIZE_T_DECLARED
+#endif
+
+#if __BSD_VISIBLE
+#ifndef _UID_T_DECLARED
+typedef __uid_t uid_t;
+#define _UID_T_DECLARED
+#endif
+#endif
+
+/*
+ * Types
+ */
+#define SOCK_STREAM 1 /* stream socket */
+#define SOCK_DGRAM 2 /* datagram socket */
+#define SOCK_RAW 3 /* raw-protocol interface */
+#if __BSD_VISIBLE
+#define SOCK_RDM 4 /* reliably-delivered message */
+#endif
+#define SOCK_SEQPACKET 5 /* sequenced packet stream */
+
+/*
+ * Option flags per-socket.
+ */
+#define SO_DEBUG 0x0001 /* turn on debugging info recording */
+#define SO_ACCEPTCONN 0x0002 /* socket has had listen() */
+#define SO_REUSEADDR 0x0004 /* allow local address reuse */
+#define SO_KEEPALIVE 0x0008 /* keep connections alive */
+#define SO_DONTROUTE 0x0010 /* just use interface addresses */
+#define SO_BROADCAST 0x0020 /* permit sending of broadcast msgs */
+#if __BSD_VISIBLE
+#define SO_USELOOPBACK 0x0040 /* bypass hardware when possible */
+#endif
+#define SO_LINGER 0x0080 /* linger on close if data present */
+#define SO_OOBINLINE 0x0100 /* leave received OOB data in line */
+#if __BSD_VISIBLE
+#define SO_REUSEPORT 0x0200 /* allow local address & port reuse */
+#define SO_TIMESTAMP 0x0400 /* timestamp received dgram traffic */
+#define SO_NOSIGPIPE 0x0800 /* no SIGPIPE from EPIPE */
+#define SO_ACCEPTFILTER 0x1000 /* there is an accept filter */
+#define SO_BINTIME 0x2000 /* timestamp received dgram traffic */
+#endif
+#define SO_NO_OFFLOAD 0x4000 /* socket cannot be offloaded */
+#define SO_NO_DDP 0x8000 /* disable direct data placement */
+
+/*
+ * Additional options, not kept in so_options.
+ */
+#define SO_SNDBUF 0x1001 /* send buffer size */
+#define SO_RCVBUF 0x1002 /* receive buffer size */
+#define SO_SNDLOWAT 0x1003 /* send low-water mark */
+#define SO_RCVLOWAT 0x1004 /* receive low-water mark */
+#define SO_SNDTIMEO 0x1005 /* send timeout */
+#define SO_RCVTIMEO 0x1006 /* receive timeout */
+#define SO_ERROR 0x1007 /* get error status and clear */
+#define SO_TYPE 0x1008 /* get socket type */
+#if __BSD_VISIBLE
+#define SO_LABEL 0x1009 /* socket's MAC label */
+#define SO_PEERLABEL 0x1010 /* socket's peer's MAC label */
+#define SO_LISTENQLIMIT 0x1011 /* socket's backlog limit */
+#define SO_LISTENQLEN 0x1012 /* socket's complete queue length */
+#define SO_LISTENINCQLEN 0x1013 /* socket's incomplete queue length */
+#define SO_SETFIB 0x1014 /* use this FIB to route */
+#endif
+
+/*
+ * Structure used for manipulating linger option.
+ */
+struct linger {
+ int l_onoff; /* option on/off */
+ int l_linger; /* linger time */
+};
+
+#if __BSD_VISIBLE
+struct accept_filter_arg {
+ char af_name[16];
+ char af_arg[256-16];
+};
+#endif
+
+/*
+ * Level number for (get/set)sockopt() to apply to socket itself.
+ */
+#define SOL_SOCKET 0xffff /* options for socket level */
+
+/*
+ * Address families.
+ */
+#define AF_UNSPEC 0 /* unspecified */
+#if __BSD_VISIBLE
+#define AF_LOCAL AF_UNIX /* local to host (pipes, portals) */
+#endif
+#define AF_UNIX 1 /* standardized name for AF_LOCAL */
+#define AF_INET 2 /* internetwork: UDP, TCP, etc. */
+#if __BSD_VISIBLE
+#define AF_IMPLINK 3 /* arpanet imp addresses */
+#define AF_PUP 4 /* pup protocols: e.g. BSP */
+#define AF_CHAOS 5 /* mit CHAOS protocols */
+#define AF_NETBIOS 6 /* SMB protocols */
+#define AF_ISO 7 /* ISO protocols */
+#define AF_OSI AF_ISO
+#define AF_ECMA 8 /* European computer manufacturers */
+#define AF_DATAKIT 9 /* datakit protocols */
+#define AF_CCITT 10 /* CCITT protocols, X.25 etc */
+#define AF_SNA 11 /* IBM SNA */
+#define AF_DECnet 12 /* DECnet */
+#define AF_DLI 13 /* DEC Direct data link interface */
+#define AF_LAT 14 /* LAT */
+#define AF_HYLINK 15 /* NSC Hyperchannel */
+#define AF_APPLETALK 16 /* Apple Talk */
+#define AF_ROUTE 17 /* Internal Routing Protocol */
+#define AF_LINK 18 /* Link layer interface */
+#define pseudo_AF_XTP 19 /* eXpress Transfer Protocol (no AF) */
+#define AF_COIP 20 /* connection-oriented IP, aka ST II */
+#define AF_CNT 21 /* Computer Network Technology */
+#define pseudo_AF_RTIP 22 /* Help Identify RTIP packets */
+#define AF_IPX 23 /* Novell Internet Protocol */
+#define AF_SIP 24 /* Simple Internet Protocol */
+#define pseudo_AF_PIP 25 /* Help Identify PIP packets */
+#define AF_ISDN 26 /* Integrated Services Digital Network*/
+#define AF_E164 AF_ISDN /* CCITT E.164 recommendation */
+#define pseudo_AF_KEY 27 /* Internal key-management function */
+#endif
+#define AF_INET6 28 /* IPv6 */
+#if __BSD_VISIBLE
+#define AF_NATM 29 /* native ATM access */
+#define AF_ATM 30 /* ATM */
+#define pseudo_AF_HDRCMPLT 31 /* Used by BPF to not rewrite headers
+ * in interface output routine
+ */
+#define AF_NETGRAPH 32 /* Netgraph sockets */
+#define AF_SLOW 33 /* 802.3ad slow protocol */
+#define AF_SCLUSTER 34 /* Sitara cluster protocol */
+#define AF_ARP 35
+#define AF_BLUETOOTH 36 /* Bluetooth sockets */
+#define AF_IEEE80211 37 /* IEEE 802.11 protocol */
+#define AF_MAX 38
+/*
+ * When allocating a new AF_ constant, please only allocate
+ * even numbered constants for FreeBSD until 134 as odd numbered AF_
+ * constants 39-133 are now reserved for vendors.
+ */
+#define AF_VENDOR00 39
+#define AF_VENDOR01 41
+#define AF_VENDOR02 43
+#define AF_VENDOR03 45
+#define AF_VENDOR04 47
+#define AF_VENDOR05 49
+#define AF_VENDOR06 51
+#define AF_VENDOR07 53
+#define AF_VENDOR08 55
+#define AF_VENDOR09 57
+#define AF_VENDOR10 59
+#define AF_VENDOR11 61
+#define AF_VENDOR12 63
+#define AF_VENDOR13 65
+#define AF_VENDOR14 67
+#define AF_VENDOR15 69
+#define AF_VENDOR16 71
+#define AF_VENDOR17 73
+#define AF_VENDOR18 75
+#define AF_VENDOR19 77
+#define AF_VENDOR20 79
+#define AF_VENDOR21 81
+#define AF_VENDOR22 83
+#define AF_VENDOR23 85
+#define AF_VENDOR24 87
+#define AF_VENDOR25 89
+#define AF_VENDOR26 91
+#define AF_VENDOR27 93
+#define AF_VENDOR28 95
+#define AF_VENDOR29 97
+#define AF_VENDOR30 99
+#define AF_VENDOR31 101
+#define AF_VENDOR32 103
+#define AF_VENDOR33 105
+#define AF_VENDOR34 107
+#define AF_VENDOR35 109
+#define AF_VENDOR36 111
+#define AF_VENDOR37 113
+#define AF_VENDOR38 115
+#define AF_VENDOR39 117
+#define AF_VENDOR40 119
+#define AF_VENDOR41 121
+#define AF_VENDOR42 123
+#define AF_VENDOR43 125
+#define AF_VENDOR44 127
+#define AF_VENDOR45 129
+#define AF_VENDOR46 131
+#define AF_VENDOR47 133
+#endif
+
+/*
+ * Structure used by kernel to store most
+ * addresses.
+ */
+struct sockaddr {
+ unsigned char sa_len; /* total length */
+ sa_family_t sa_family; /* address family */
+ char sa_data[14]; /* actually longer; address value */
+};
+#if __BSD_VISIBLE
+#define SOCK_MAXADDRLEN 255 /* longest possible addresses */
+
+/*
+ * Structure used by kernel to pass protocol
+ * information in raw sockets.
+ */
+struct sockproto {
+ unsigned short sp_family; /* address family */
+ unsigned short sp_protocol; /* protocol */
+};
+#endif
+
+#ifndef _STRUCT_SOCKADDR_STORAGE_DECLARED
+/*
+ * RFC 2553: protocol-independent placeholder for socket addresses
+ */
+#define _SS_MAXSIZE 128U
+#define _SS_ALIGNSIZE (sizeof(__int64_t))
+#define _SS_PAD1SIZE (_SS_ALIGNSIZE - sizeof(unsigned char) - \
+ sizeof(sa_family_t))
+#define _SS_PAD2SIZE (_SS_MAXSIZE - sizeof(unsigned char) - \
+ sizeof(sa_family_t) - _SS_PAD1SIZE - _SS_ALIGNSIZE)
+
+struct sockaddr_storage {
+ unsigned char ss_len; /* address length */
+ sa_family_t ss_family; /* address family */
+ char __ss_pad1[_SS_PAD1SIZE];
+ __int64_t __ss_align; /* force desired struct alignment */
+ char __ss_pad2[_SS_PAD2SIZE];
+};
+#define _STRUCT_SOCKADDR_STORAGE_DECLARED
+#endif
+
+#if __BSD_VISIBLE
+/*
+ * Protocol families, same as address families for now.
+ */
+#define PF_UNSPEC AF_UNSPEC
+#define PF_LOCAL AF_LOCAL
+#define PF_UNIX PF_LOCAL /* backward compatibility */
+#define PF_INET AF_INET
+#define PF_IMPLINK AF_IMPLINK
+#define PF_PUP AF_PUP
+#define PF_CHAOS AF_CHAOS
+#define PF_NETBIOS AF_NETBIOS
+#define PF_ISO AF_ISO
+#define PF_OSI AF_ISO
+#define PF_ECMA AF_ECMA
+#define PF_DATAKIT AF_DATAKIT
+#define PF_CCITT AF_CCITT
+#define PF_SNA AF_SNA
+#define PF_DECnet AF_DECnet
+#define PF_DLI AF_DLI
+#define PF_LAT AF_LAT
+#define PF_HYLINK AF_HYLINK
+#define PF_APPLETALK AF_APPLETALK
+#define PF_ROUTE AF_ROUTE
+#define PF_LINK AF_LINK
+#define PF_XTP pseudo_AF_XTP /* really just proto family, no AF */
+#define PF_COIP AF_COIP
+#define PF_CNT AF_CNT
+#define PF_SIP AF_SIP
+#define PF_IPX AF_IPX
+#define PF_RTIP pseudo_AF_RTIP /* same format as AF_INET */
+#define PF_PIP pseudo_AF_PIP
+#define PF_ISDN AF_ISDN
+#define PF_KEY pseudo_AF_KEY
+#define PF_INET6 AF_INET6
+#define PF_NATM AF_NATM
+#define PF_ATM AF_ATM
+#define PF_NETGRAPH AF_NETGRAPH
+#define PF_SLOW AF_SLOW
+#define PF_SCLUSTER AF_SCLUSTER
+#define PF_ARP AF_ARP
+#define PF_BLUETOOTH AF_BLUETOOTH
+
+#define PF_MAX AF_MAX
+
+/*
+ * Definitions for network related sysctl, CTL_NET.
+ *
+ * Second level is protocol family.
+ * Third level is protocol number.
+ *
+ * Further levels are defined by the individual families below.
+ */
+#define NET_MAXID AF_MAX
+
+#define CTL_NET_NAMES { \
+ { 0, 0 }, \
+ { "unix", CTLTYPE_NODE }, \
+ { "inet", CTLTYPE_NODE }, \
+ { "implink", CTLTYPE_NODE }, \
+ { "pup", CTLTYPE_NODE }, \
+ { "chaos", CTLTYPE_NODE }, \
+ { "xerox_ns", CTLTYPE_NODE }, \
+ { "iso", CTLTYPE_NODE }, \
+ { "emca", CTLTYPE_NODE }, \
+ { "datakit", CTLTYPE_NODE }, \
+ { "ccitt", CTLTYPE_NODE }, \
+ { "ibm_sna", CTLTYPE_NODE }, \
+ { "decnet", CTLTYPE_NODE }, \
+ { "dec_dli", CTLTYPE_NODE }, \
+ { "lat", CTLTYPE_NODE }, \
+ { "hylink", CTLTYPE_NODE }, \
+ { "appletalk", CTLTYPE_NODE }, \
+ { "route", CTLTYPE_NODE }, \
+ { "link_layer", CTLTYPE_NODE }, \
+ { "xtp", CTLTYPE_NODE }, \
+ { "coip", CTLTYPE_NODE }, \
+ { "cnt", CTLTYPE_NODE }, \
+ { "rtip", CTLTYPE_NODE }, \
+ { "ipx", CTLTYPE_NODE }, \
+ { "sip", CTLTYPE_NODE }, \
+ { "pip", CTLTYPE_NODE }, \
+ { "isdn", CTLTYPE_NODE }, \
+ { "key", CTLTYPE_NODE }, \
+ { "inet6", CTLTYPE_NODE }, \
+ { "natm", CTLTYPE_NODE }, \
+ { "atm", CTLTYPE_NODE }, \
+ { "hdrcomplete", CTLTYPE_NODE }, \
+ { "netgraph", CTLTYPE_NODE }, \
+ { "snp", CTLTYPE_NODE }, \
+ { "scp", CTLTYPE_NODE }, \
+}
+
+/*
+ * PF_ROUTE - Routing table
+ *
+ * Three additional levels are defined:
+ * Fourth: address family, 0 is wildcard
+ * Fifth: type of info, defined below
+ * Sixth: flag(s) to mask with for NET_RT_FLAGS
+ */
+#define NET_RT_DUMP 1 /* dump; may limit to a.f. */
+#define NET_RT_FLAGS 2 /* by flags, e.g. RESOLVING */
+#define NET_RT_IFLIST 3 /* survey interface list */
+#define NET_RT_IFMALIST 4 /* return multicast address list */
+#define NET_RT_MAXID 5
+
+#define CTL_NET_RT_NAMES { \
+ { 0, 0 }, \
+ { "dump", CTLTYPE_STRUCT }, \
+ { "flags", CTLTYPE_STRUCT }, \
+ { "iflist", CTLTYPE_STRUCT }, \
+ { "ifmalist", CTLTYPE_STRUCT }, \
+}
+#endif /* __BSD_VISIBLE */
+
+/*
+ * Maximum queue length specifiable by listen.
+ */
+#define SOMAXCONN 128
+
+/*
+ * Message header for recvmsg and sendmsg calls.
+ * Used value-result for recvmsg, value only for sendmsg.
+ */
+struct msghdr {
+ void *msg_name; /* optional address */
+ socklen_t msg_namelen; /* size of address */
+ struct iovec *msg_iov; /* scatter/gather array */
+ int msg_iovlen; /* # elements in msg_iov */
+ void *msg_control; /* ancillary data, see below */
+ socklen_t msg_controllen; /* ancillary data buffer len */
+ int msg_flags; /* flags on received message */
+};
+
+#define MSG_OOB 0x1 /* process out-of-band data */
+#define MSG_PEEK 0x2 /* peek at incoming message */
+#define MSG_DONTROUTE 0x4 /* send without using routing tables */
+#define MSG_EOR 0x8 /* data completes record */
+#define MSG_TRUNC 0x10 /* data discarded before delivery */
+#define MSG_CTRUNC 0x20 /* control data lost before delivery */
+#define MSG_WAITALL 0x40 /* wait for full request or error */
+#define MSG_NOTIFICATION 0x2000 /* SCTP notification */
+#if __BSD_VISIBLE
+#define MSG_DONTWAIT 0x80 /* this message should be nonblocking */
+#define MSG_EOF 0x100 /* data completes connection */
+#define MSG_NBIO 0x4000 /* FIONBIO mode, used by fifofs */
+#define MSG_COMPAT 0x8000 /* used in sendit() */
+#endif
+#ifdef _KERNEL
+#define MSG_SOCALLBCK 0x10000 /* for use by socket callbacks - soreceive (TCP) */
+#endif
+#if __BSD_VISIBLE
+#define MSG_NOSIGNAL 0x20000 /* do not generate SIGPIPE on EOF */
+#endif
+
+/*
+ * Header for ancillary data objects in msg_control buffer.
+ * Used for additional information with/about a datagram
+ * not expressible by flags. The format is a sequence
+ * of message elements headed by cmsghdr structures.
+ */
+struct cmsghdr {
+ socklen_t cmsg_len; /* data byte count, including hdr */
+ int cmsg_level; /* originating protocol */
+ int cmsg_type; /* protocol-specific type */
+/* followed by u_char cmsg_data[]; */
+};
+
+#if __BSD_VISIBLE
+/*
+ * While we may have more groups than this, the cmsgcred struct must
+ * be able to fit in an mbuf and we have historically supported a
+ * maximum of 16 groups.
+*/
+#define CMGROUP_MAX 16
+
+/*
+ * Credentials structure, used to verify the identity of a peer
+ * process that has sent us a message. This is allocated by the
+ * peer process but filled in by the kernel. This prevents the
+ * peer from lying about its identity. (Note that cmcred_groups[0]
+ * is the effective GID.)
+ */
+struct cmsgcred {
+ pid_t cmcred_pid; /* PID of sending process */
+ uid_t cmcred_uid; /* real UID of sending process */
+ uid_t cmcred_euid; /* effective UID of sending process */
+ gid_t cmcred_gid; /* real GID of sending process */
+ short cmcred_ngroups; /* number or groups */
+ gid_t cmcred_groups[CMGROUP_MAX]; /* groups */
+};
+
+/*
+ * Socket credentials.
+ */
+struct sockcred {
+ uid_t sc_uid; /* real user id */
+ uid_t sc_euid; /* effective user id */
+ gid_t sc_gid; /* real group id */
+ gid_t sc_egid; /* effective group id */
+ int sc_ngroups; /* number of supplemental groups */
+ gid_t sc_groups[1]; /* variable length */
+};
+
+/*
+ * Compute size of a sockcred structure with groups.
+ */
+#define SOCKCREDSIZE(ngrps) \
+ (sizeof(struct sockcred) + (sizeof(gid_t) * ((ngrps) - 1)))
+
+#endif /* __BSD_VISIBLE */
+
+/* given pointer to struct cmsghdr, return pointer to data */
+#define CMSG_DATA(cmsg) ((unsigned char *)(cmsg) + \
+ _ALIGN(sizeof(struct cmsghdr)))
+
+/* given pointer to struct cmsghdr, return pointer to next cmsghdr */
+#define CMSG_NXTHDR(mhdr, cmsg) \
+ ((char *)(cmsg) == NULL ? CMSG_FIRSTHDR(mhdr) : \
+ ((char *)(cmsg) + _ALIGN(((struct cmsghdr *)(cmsg))->cmsg_len) + \
+ _ALIGN(sizeof(struct cmsghdr)) > \
+ (char *)(mhdr)->msg_control + (mhdr)->msg_controllen) ? \
+ (struct cmsghdr *)0 : \
+ (struct cmsghdr *)((char *)(cmsg) + \
+ _ALIGN(((struct cmsghdr *)(cmsg))->cmsg_len)))
+
+/*
+ * RFC 2292 requires to check msg_controllen, in case that the kernel returns
+ * an empty list for some reasons.
+ */
+#define CMSG_FIRSTHDR(mhdr) \
+ ((mhdr)->msg_controllen >= sizeof(struct cmsghdr) ? \
+ (struct cmsghdr *)(mhdr)->msg_control : \
+ (struct cmsghdr *)NULL)
+
+#if __BSD_VISIBLE
+/* RFC 2292 additions */
+#define CMSG_SPACE(l) (_ALIGN(sizeof(struct cmsghdr)) + _ALIGN(l))
+#define CMSG_LEN(l) (_ALIGN(sizeof(struct cmsghdr)) + (l))
+#endif
+
+#ifdef _KERNEL
+#define CMSG_ALIGN(n) _ALIGN(n)
+#endif
+
+/* "Socket"-level control message types: */
+#define SCM_RIGHTS 0x01 /* access rights (array of int) */
+#if __BSD_VISIBLE
+#define SCM_TIMESTAMP 0x02 /* timestamp (struct timeval) */
+#define SCM_CREDS 0x03 /* process creds (struct cmsgcred) */
+#define SCM_BINTIME 0x04 /* timestamp (struct bintime) */
+#endif
+
+#if __BSD_VISIBLE
+/*
+ * 4.3 compat sockaddr, move to compat file later
+ */
+struct osockaddr {
+ unsigned short sa_family; /* address family */
+ char sa_data[14]; /* up to 14 bytes of direct address */
+};
+
+/*
+ * 4.3-compat message header (move to compat file later).
+ */
+struct omsghdr {
+ char *msg_name; /* optional address */
+ int msg_namelen; /* size of address */
+ struct iovec *msg_iov; /* scatter/gather array */
+ int msg_iovlen; /* # elements in msg_iov */
+ char *msg_accrights; /* access rights sent/received */
+ int msg_accrightslen;
+};
+#endif
+
+/*
+ * howto arguments for shutdown(2), specified by Posix.1g.
+ */
+#define SHUT_RD 0 /* shut down the reading side */
+#define SHUT_WR 1 /* shut down the writing side */
+#define SHUT_RDWR 2 /* shut down both sides */
+
+/* we cheat and use the SHUT_XX defines for these */
+#define PRU_FLUSH_RD SHUT_RD
+#define PRU_FLUSH_WR SHUT_WR
+#define PRU_FLUSH_RDWR SHUT_RDWR
+
+
+#if __BSD_VISIBLE
+/*
+ * sendfile(2) header/trailer struct
+ */
+struct sf_hdtr {
+ struct iovec *headers; /* pointer to an array of header struct iovec's */
+ int hdr_cnt; /* number of header iovec's */
+ struct iovec *trailers; /* pointer to an array of trailer struct iovec's */
+ int trl_cnt; /* number of trailer iovec's */
+};
+
+/*
+ * Sendfile-specific flag(s)
+ */
+#define SF_NODISKIO 0x00000001
+#define SF_MNOWAIT 0x00000002
+#define SF_SYNC 0x00000004
+#endif
+
+#ifndef _KERNEL
+
+#include <rtems/freebsd/sys/cdefs.h>
+
+__BEGIN_DECLS
+int accept(int, struct sockaddr * __restrict, socklen_t * __restrict);
+int bind(int, const struct sockaddr *, socklen_t);
+int connect(int, const struct sockaddr *, socklen_t);
+int getpeername(int, struct sockaddr * __restrict, socklen_t * __restrict);
+int getsockname(int, struct sockaddr * __restrict, socklen_t * __restrict);
+int getsockopt(int, int, int, void * __restrict, socklen_t * __restrict);
+int listen(int, int);
+ssize_t recv(int, void *, size_t, int);
+ssize_t recvfrom(int, void *, size_t, int, struct sockaddr * __restrict, socklen_t * __restrict);
+ssize_t recvmsg(int, struct msghdr *, int);
+ssize_t send(int, const void *, size_t, int);
+ssize_t sendto(int, const void *,
+ size_t, int, const struct sockaddr *, socklen_t);
+ssize_t sendmsg(int, const struct msghdr *, int);
+#if __BSD_VISIBLE
+int sendfile(int, int, off_t, size_t, struct sf_hdtr *, off_t *, int);
+int setfib(int);
+#endif
+int setsockopt(int, int, int, const void *, socklen_t);
+int shutdown(int, int);
+int sockatmark(int);
+int socket(int, int, int);
+int socketpair(int, int, int, int *);
+__END_DECLS
+
+#endif /* !_KERNEL */
+
+#ifdef _KERNEL
+struct socket;
+
+struct tcpcb *so_sototcpcb(struct socket *so);
+struct inpcb *so_sotoinpcb(struct socket *so);
+struct sockbuf *so_sockbuf_snd(struct socket *);
+struct sockbuf *so_sockbuf_rcv(struct socket *);
+
+int so_state_get(const struct socket *);
+void so_state_set(struct socket *, int);
+
+int so_options_get(const struct socket *);
+void so_options_set(struct socket *, int);
+
+int so_error_get(const struct socket *);
+void so_error_set(struct socket *, int);
+
+int so_linger_get(const struct socket *);
+void so_linger_set(struct socket *, int);
+
+struct protosw *so_protosw_get(const struct socket *);
+void so_protosw_set(struct socket *, struct protosw *);
+
+void so_sorwakeup_locked(struct socket *so);
+void so_sowwakeup_locked(struct socket *so);
+
+void so_sorwakeup(struct socket *so);
+void so_sowwakeup(struct socket *so);
+
+void so_lock(struct socket *so);
+void so_unlock(struct socket *so);
+
+void so_listeners_apply_all(struct socket *so, void (*func)(struct socket *, void *), void *arg);
+
+#endif
+
+
+#endif /* !_SYS_SOCKET_HH_ */
diff --git a/rtems/freebsd/sys/socketvar.h b/rtems/freebsd/sys/socketvar.h
new file mode 100644
index 00000000..17ff0901
--- /dev/null
+++ b/rtems/freebsd/sys/socketvar.h
@@ -0,0 +1,393 @@
+/*-
+ * Copyright (c) 1982, 1986, 1990, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * @(#)socketvar.h 8.3 (Berkeley) 2/19/95
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _SYS_SOCKETVAR_HH_
+#define _SYS_SOCKETVAR_HH_
+
+#include <rtems/freebsd/sys/queue.h> /* for TAILQ macros */
+#include <rtems/freebsd/sys/selinfo.h> /* for struct selinfo */
+#include <rtems/freebsd/sys/_lock.h>
+#include <rtems/freebsd/sys/_mutex.h>
+#include <rtems/freebsd/sys/_sx.h>
+#include <rtems/freebsd/sys/sockbuf.h>
+#include <rtems/freebsd/sys/sockstate.h>
+#ifdef _KERNEL
+#include <rtems/freebsd/sys/sockopt.h>
+#endif
+
+struct vnet;
+
+/*
+ * Kernel structure per socket.
+ * Contains send and receive buffer queues,
+ * handle on protocol and pointer to protocol
+ * private data and error information.
+ */
+typedef u_quad_t so_gen_t;
+
+struct socket;
+
+/*-
+ * Locking key to struct socket:
+ * (a) constant after allocation, no locking required.
+ * (b) locked by SOCK_LOCK(so).
+ * (c) locked by SOCKBUF_LOCK(&so->so_rcv).
+ * (d) locked by SOCKBUF_LOCK(&so->so_snd).
+ * (e) locked by ACCEPT_LOCK().
+ * (f) not locked since integer reads/writes are atomic.
+ * (g) used only as a sleep/wakeup address, no value.
+ * (h) locked by global mutex so_global_mtx.
+ */
+struct socket {
+ int so_count; /* (b) reference count */
+ short so_type; /* (a) generic type, see socket.h */
+ short so_options; /* from socket call, see socket.h */
+ short so_linger; /* time to linger while closing */
+ short so_state; /* (b) internal state flags SS_* */
+ int so_qstate; /* (e) internal state flags SQ_* */
+ void *so_pcb; /* protocol control block */
+ struct vnet *so_vnet; /* network stack instance */
+ struct protosw *so_proto; /* (a) protocol handle */
+/*
+ * Variables for connection queuing.
+ * Socket where accepts occur is so_head in all subsidiary sockets.
+ * If so_head is 0, socket is not related to an accept.
+ * For head socket so_incomp queues partially completed connections,
+ * while so_comp is a queue of connections ready to be accepted.
+ * If a connection is aborted and it has so_head set, then
+ * it has to be pulled out of either so_incomp or so_comp.
+ * We allow connections to queue up based on current queue lengths
+ * and limit on number of queued connections for this socket.
+ */
+ struct socket *so_head; /* (e) back pointer to listen socket */
+ TAILQ_HEAD(, socket) so_incomp; /* (e) queue of partial unaccepted connections */
+ TAILQ_HEAD(, socket) so_comp; /* (e) queue of complete unaccepted connections */
+ TAILQ_ENTRY(socket) so_list; /* (e) list of unaccepted connections */
+ u_short so_qlen; /* (e) number of unaccepted connections */
+ u_short so_incqlen; /* (e) number of unaccepted incomplete
+ connections */
+ u_short so_qlimit; /* (e) max number queued connections */
+ short so_timeo; /* (g) connection timeout */
+ u_short so_error; /* (f) error affecting connection */
+ struct sigio *so_sigio; /* [sg] information for async I/O or
+ out of band data (SIGURG) */
+ u_long so_oobmark; /* (c) chars to oob mark */
+ TAILQ_HEAD(, aiocblist) so_aiojobq; /* AIO ops waiting on socket */
+
+ struct sockbuf so_rcv, so_snd;
+
+ struct ucred *so_cred; /* (a) user credentials */
+ struct label *so_label; /* (b) MAC label for socket */
+ struct label *so_peerlabel; /* (b) cached MAC label for peer */
+ /* NB: generation count must not be first. */
+ so_gen_t so_gencnt; /* (h) generation count */
+ void *so_emuldata; /* (b) private data for emulators */
+ struct so_accf {
+ struct accept_filter *so_accept_filter;
+ void *so_accept_filter_arg; /* saved filter args */
+ char *so_accept_filter_str; /* saved user args */
+ } *so_accf;
+ int so_fibnum; /* routing domain for this socket */
+};
+
+/*
+ * Global accept mutex to serialize access to accept queues and
+ * fields associated with multiple sockets. This allows us to
+ * avoid defining a lock order between listen and accept sockets
+ * until such time as it proves to be a good idea.
+ */
+extern struct mtx accept_mtx;
+#define ACCEPT_LOCK_ASSERT() mtx_assert(&accept_mtx, MA_OWNED)
+#define ACCEPT_UNLOCK_ASSERT() mtx_assert(&accept_mtx, MA_NOTOWNED)
+#define ACCEPT_LOCK() mtx_lock(&accept_mtx)
+#define ACCEPT_UNLOCK() mtx_unlock(&accept_mtx)
+
+/*
+ * Per-socket mutex: we reuse the receive socket buffer mutex for space
+ * efficiency. This decision should probably be revisited as we optimize
+ * locking for the socket code.
+ */
+#define SOCK_MTX(_so) SOCKBUF_MTX(&(_so)->so_rcv)
+#define SOCK_LOCK(_so) SOCKBUF_LOCK(&(_so)->so_rcv)
+#define SOCK_OWNED(_so) SOCKBUF_OWNED(&(_so)->so_rcv)
+#define SOCK_UNLOCK(_so) SOCKBUF_UNLOCK(&(_so)->so_rcv)
+#define SOCK_LOCK_ASSERT(_so) SOCKBUF_LOCK_ASSERT(&(_so)->so_rcv)
+
+/*
+ * Socket state bits stored in so_qstate.
+ */
+#define SQ_INCOMP 0x0800 /* unaccepted, incomplete connection */
+#define SQ_COMP 0x1000 /* unaccepted, complete connection */
+
+/*
+ * Externalized form of struct socket used by the sysctl(3) interface.
+ */
+struct xsocket {
+ size_t xso_len; /* length of this structure */
+ struct socket *xso_so; /* makes a convenient handle sometimes */
+ short so_type;
+ short so_options;
+ short so_linger;
+ short so_state;
+ caddr_t so_pcb; /* another convenient handle */
+ int xso_protocol;
+ int xso_family;
+ u_short so_qlen;
+ u_short so_incqlen;
+ u_short so_qlimit;
+ short so_timeo;
+ u_short so_error;
+ pid_t so_pgid;
+ u_long so_oobmark;
+ struct xsockbuf so_rcv, so_snd;
+ uid_t so_uid; /* XXX */
+};
+
+#ifdef _KERNEL
+
+/*
+ * Macros for sockets and socket buffering.
+ */
+
+/*
+ * Flags to sblock().
+ */
+#define SBL_WAIT 0x00000001 /* Wait if not immediately available. */
+#define SBL_NOINTR 0x00000002 /* Force non-interruptible sleep. */
+#define SBL_VALID (SBL_WAIT | SBL_NOINTR)
+
+/*
+ * Do we need to notify the other side when I/O is possible?
+ */
+#define sb_notify(sb) (((sb)->sb_flags & (SB_WAIT | SB_SEL | SB_ASYNC | \
+ SB_UPCALL | SB_AIO | SB_KNOTE)) != 0)
+
+/* do we have to send all at once on a socket? */
+#define sosendallatonce(so) \
+ ((so)->so_proto->pr_flags & PR_ATOMIC)
+
+/* can we read something from so? */
+#define soreadabledata(so) \
+ ((so)->so_rcv.sb_cc >= (so)->so_rcv.sb_lowat || \
+ !TAILQ_EMPTY(&(so)->so_comp) || (so)->so_error)
+#define soreadable(so) \
+ (soreadabledata(so) || ((so)->so_rcv.sb_state & SBS_CANTRCVMORE))
+
+/* can we write something to so? */
+#define sowriteable(so) \
+ ((sbspace(&(so)->so_snd) >= (so)->so_snd.sb_lowat && \
+ (((so)->so_state&SS_ISCONNECTED) || \
+ ((so)->so_proto->pr_flags&PR_CONNREQUIRED)==0)) || \
+ ((so)->so_snd.sb_state & SBS_CANTSENDMORE) || \
+ (so)->so_error)
+
+/*
+ * soref()/sorele() ref-count the socket structure. Note that you must
+ * still explicitly close the socket, but the last ref count will free
+ * the structure.
+ */
+#define soref(so) do { \
+ SOCK_LOCK_ASSERT(so); \
+ ++(so)->so_count; \
+} while (0)
+
+#define sorele(so) do { \
+ ACCEPT_LOCK_ASSERT(); \
+ SOCK_LOCK_ASSERT(so); \
+ if ((so)->so_count <= 0) \
+ panic("sorele"); \
+ if (--(so)->so_count == 0) \
+ sofree(so); \
+ else { \
+ SOCK_UNLOCK(so); \
+ ACCEPT_UNLOCK(); \
+ } \
+} while (0)
+
+#define sotryfree(so) do { \
+ ACCEPT_LOCK_ASSERT(); \
+ SOCK_LOCK_ASSERT(so); \
+ if ((so)->so_count == 0) \
+ sofree(so); \
+ else { \
+ SOCK_UNLOCK(so); \
+ ACCEPT_UNLOCK(); \
+ } \
+} while(0)
+
+/*
+ * In sorwakeup() and sowwakeup(), acquire the socket buffer lock to
+ * avoid a non-atomic test-and-wakeup. However, sowakeup is
+ * responsible for releasing the lock if it is called. We unlock only
+ * if we don't call into sowakeup. If any code is introduced that
+ * directly invokes the underlying sowakeup() primitives, it must
+ * maintain the same semantics.
+ */
+#define sorwakeup_locked(so) do { \
+ SOCKBUF_LOCK_ASSERT(&(so)->so_rcv); \
+ if (sb_notify(&(so)->so_rcv)) \
+ sowakeup((so), &(so)->so_rcv); \
+ else \
+ SOCKBUF_UNLOCK(&(so)->so_rcv); \
+} while (0)
+
+#define sorwakeup(so) do { \
+ SOCKBUF_LOCK(&(so)->so_rcv); \
+ sorwakeup_locked(so); \
+} while (0)
+
+#define sowwakeup_locked(so) do { \
+ SOCKBUF_LOCK_ASSERT(&(so)->so_snd); \
+ if (sb_notify(&(so)->so_snd)) \
+ sowakeup((so), &(so)->so_snd); \
+ else \
+ SOCKBUF_UNLOCK(&(so)->so_snd); \
+} while (0)
+
+#define sowwakeup(so) do { \
+ SOCKBUF_LOCK(&(so)->so_snd); \
+ sowwakeup_locked(so); \
+} while (0)
+
+struct accept_filter {
+ char accf_name[16];
+ int (*accf_callback)
+ (struct socket *so, void *arg, int waitflag);
+ void * (*accf_create)
+ (struct socket *so, char *arg);
+ void (*accf_destroy)
+ (struct socket *so);
+ SLIST_ENTRY(accept_filter) accf_next;
+};
+
+#ifdef MALLOC_DECLARE
+MALLOC_DECLARE(M_ACCF);
+MALLOC_DECLARE(M_PCB);
+MALLOC_DECLARE(M_SONAME);
+#endif
+
+extern int maxsockets;
+extern u_long sb_max;
+extern struct uma_zone *socket_zone;
+extern so_gen_t so_gencnt;
+
+struct mbuf;
+struct sockaddr;
+struct ucred;
+struct uio;
+
+/* 'which' values for socket upcalls. */
+#define SO_RCV 1
+#define SO_SND 2
+
+/* Return values for socket upcalls. */
+#define SU_OK 0
+#define SU_ISCONNECTED 1
+
+/*
+ * From uipc_socket and friends
+ */
+int sockargs(struct mbuf **mp, caddr_t buf, int buflen, int type);
+int getsockaddr(struct sockaddr **namp, caddr_t uaddr, size_t len);
+void soabort(struct socket *so);
+int soaccept(struct socket *so, struct sockaddr **nam);
+int socheckuid(struct socket *so, uid_t uid);
+int sobind(struct socket *so, struct sockaddr *nam, struct thread *td);
+int soclose(struct socket *so);
+int soconnect(struct socket *so, struct sockaddr *nam, struct thread *td);
+int soconnect2(struct socket *so1, struct socket *so2);
+int socow_setup(struct mbuf *m0, struct uio *uio);
+int socreate(int dom, struct socket **aso, int type, int proto,
+ struct ucred *cred, struct thread *td);
+int sodisconnect(struct socket *so);
+struct sockaddr *sodupsockaddr(const struct sockaddr *sa, int mflags);
+void sofree(struct socket *so);
+void sohasoutofband(struct socket *so);
+int solisten(struct socket *so, int backlog, struct thread *td);
+void solisten_proto(struct socket *so, int backlog);
+int solisten_proto_check(struct socket *so);
+struct socket *
+ sonewconn(struct socket *head, int connstatus);
+
+
+int sopoll(struct socket *so, int events, struct ucred *active_cred,
+ struct thread *td);
+int sopoll_generic(struct socket *so, int events,
+ struct ucred *active_cred, struct thread *td);
+int soreceive(struct socket *so, struct sockaddr **paddr, struct uio *uio,
+ struct mbuf **mp0, struct mbuf **controlp, int *flagsp);
+int soreceive_stream(struct socket *so, struct sockaddr **paddr,
+ struct uio *uio, struct mbuf **mp0, struct mbuf **controlp,
+ int *flagsp);
+int soreceive_dgram(struct socket *so, struct sockaddr **paddr,
+ struct uio *uio, struct mbuf **mp0, struct mbuf **controlp,
+ int *flagsp);
+int soreceive_generic(struct socket *so, struct sockaddr **paddr,
+ struct uio *uio, struct mbuf **mp0, struct mbuf **controlp,
+ int *flagsp);
+int soreserve(struct socket *so, u_long sndcc, u_long rcvcc);
+void sorflush(struct socket *so);
+int sosend(struct socket *so, struct sockaddr *addr, struct uio *uio,
+ struct mbuf *top, struct mbuf *control, int flags,
+ struct thread *td);
+int sosend_dgram(struct socket *so, struct sockaddr *addr,
+ struct uio *uio, struct mbuf *top, struct mbuf *control,
+ int flags, struct thread *td);
+int sosend_generic(struct socket *so, struct sockaddr *addr,
+ struct uio *uio, struct mbuf *top, struct mbuf *control,
+ int flags, struct thread *td);
+int soshutdown(struct socket *so, int how);
+void sotoxsocket(struct socket *so, struct xsocket *xso);
+void soupcall_clear(struct socket *so, int which);
+void soupcall_set(struct socket *so, int which,
+ int (*func)(struct socket *, void *, int), void *arg);
+void sowakeup(struct socket *so, struct sockbuf *sb);
+int selsocket(struct socket *so, int events, struct timeval *tv,
+ struct thread *td);
+
+/*
+ * Accept filter functions (duh).
+ */
+int accept_filt_add(struct accept_filter *filt);
+int accept_filt_del(char *name);
+struct accept_filter *accept_filt_get(char *name);
+#ifdef ACCEPT_FILTER_MOD
+#ifdef SYSCTL_DECL
+SYSCTL_DECL(_net_inet_accf);
+#endif
+int accept_filt_generic_mod_event(module_t mod, int event, void *data);
+#endif
+
+#endif /* _KERNEL */
+
+#endif /* !_SYS_SOCKETVAR_HH_ */
diff --git a/rtems/freebsd/sys/sockio.h b/rtems/freebsd/sys/sockio.h
new file mode 100644
index 00000000..17bf6736
--- /dev/null
+++ b/rtems/freebsd/sys/sockio.h
@@ -0,0 +1,128 @@
+/*-
+ * Copyright (c) 1982, 1986, 1990, 1993, 1994
+ * The Regents of the University of California. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * @(#)sockio.h 8.1 (Berkeley) 3/28/94
+ * $FreeBSD$
+ */
+
+#ifndef _SYS_SOCKIO_HH_
+#define _SYS_SOCKIO_HH_
+
+#include <rtems/freebsd/sys/ioccom.h>
+
+/* Socket ioctl's. */
+#define SIOCSHIWAT _IOW('s', 0, int) /* set high watermark */
+#define SIOCGHIWAT _IOR('s', 1, int) /* get high watermark */
+#define SIOCSLOWAT _IOW('s', 2, int) /* set low watermark */
+#define SIOCGLOWAT _IOR('s', 3, int) /* get low watermark */
+#define SIOCATMARK _IOR('s', 7, int) /* at oob mark? */
+#define SIOCSPGRP _IOW('s', 8, int) /* set process group */
+#define SIOCGPGRP _IOR('s', 9, int) /* get process group */
+
+#define SIOCADDRT _IOW('r', 10, struct ortentry) /* add route */
+#define SIOCDELRT _IOW('r', 11, struct ortentry) /* delete route */
+#define SIOCGETVIFCNT _IOWR('r', 15, struct sioc_vif_req)/* get vif pkt cnt */
+#define SIOCGETSGCNT _IOWR('r', 16, struct sioc_sg_req) /* get s,g pkt cnt */
+
+#define SIOCSIFADDR _IOW('i', 12, struct ifreq) /* set ifnet address */
+#define OSIOCGIFADDR _IOWR('i', 13, struct ifreq) /* get ifnet address */
+#define SIOCGIFADDR _IOWR('i', 33, struct ifreq) /* get ifnet address */
+#define SIOCSIFDSTADDR _IOW('i', 14, struct ifreq) /* set p-p address */
+#define OSIOCGIFDSTADDR _IOWR('i', 15, struct ifreq) /* get p-p address */
+#define SIOCGIFDSTADDR _IOWR('i', 34, struct ifreq) /* get p-p address */
+#define SIOCSIFFLAGS _IOW('i', 16, struct ifreq) /* set ifnet flags */
+#define SIOCGIFFLAGS _IOWR('i', 17, struct ifreq) /* get ifnet flags */
+#define OSIOCGIFBRDADDR _IOWR('i', 18, struct ifreq) /* get broadcast addr */
+#define SIOCGIFBRDADDR _IOWR('i', 35, struct ifreq) /* get broadcast addr */
+#define SIOCSIFBRDADDR _IOW('i', 19, struct ifreq) /* set broadcast addr */
+#define OSIOCGIFCONF _IOWR('i', 20, struct ifconf) /* get ifnet list */
+#define SIOCGIFCONF _IOWR('i', 36, struct ifconf) /* get ifnet list */
+#define OSIOCGIFNETMASK _IOWR('i', 21, struct ifreq) /* get net addr mask */
+#define SIOCGIFNETMASK _IOWR('i', 37, struct ifreq) /* get net addr mask */
+#define SIOCSIFNETMASK _IOW('i', 22, struct ifreq) /* set net addr mask */
+#define SIOCGIFMETRIC _IOWR('i', 23, struct ifreq) /* get IF metric */
+#define SIOCSIFMETRIC _IOW('i', 24, struct ifreq) /* set IF metric */
+#define SIOCDIFADDR _IOW('i', 25, struct ifreq) /* delete IF addr */
+#define SIOCAIFADDR _IOW('i', 26, struct ifaliasreq)/* add/chg IF alias */
+
+#define SIOCALIFADDR _IOW('i', 27, struct if_laddrreq) /* add IF addr */
+#define SIOCGLIFADDR _IOWR('i', 28, struct if_laddrreq) /* get IF addr */
+#define SIOCDLIFADDR _IOW('i', 29, struct if_laddrreq) /* delete IF addr */
+#define SIOCSIFCAP _IOW('i', 30, struct ifreq) /* set IF features */
+#define SIOCGIFCAP _IOWR('i', 31, struct ifreq) /* get IF features */
+#define SIOCGIFINDEX _IOWR('i', 32, struct ifreq) /* get IF index */
+#define SIOCGIFMAC _IOWR('i', 38, struct ifreq) /* get IF MAC label */
+#define SIOCSIFMAC _IOW('i', 39, struct ifreq) /* set IF MAC label */
+#define SIOCSIFNAME _IOW('i', 40, struct ifreq) /* set IF name */
+#define SIOCSIFDESCR _IOW('i', 41, struct ifreq) /* set ifnet descr */
+#define SIOCGIFDESCR _IOWR('i', 42, struct ifreq) /* get ifnet descr */
+
+#define SIOCADDMULTI _IOW('i', 49, struct ifreq) /* add m'cast addr */
+#define SIOCDELMULTI _IOW('i', 50, struct ifreq) /* del m'cast addr */
+#define SIOCGIFMTU _IOWR('i', 51, struct ifreq) /* get IF mtu */
+#define SIOCSIFMTU _IOW('i', 52, struct ifreq) /* set IF mtu */
+#define SIOCGIFPHYS _IOWR('i', 53, struct ifreq) /* get IF wire */
+#define SIOCSIFPHYS _IOW('i', 54, struct ifreq) /* set IF wire */
+#define SIOCSIFMEDIA _IOWR('i', 55, struct ifreq) /* set net media */
+#define SIOCGIFMEDIA _IOWR('i', 56, struct ifmediareq) /* get net media */
+
+#define SIOCSIFGENERIC _IOW('i', 57, struct ifreq) /* generic IF set op */
+#define SIOCGIFGENERIC _IOWR('i', 58, struct ifreq) /* generic IF get op */
+
+#define SIOCGIFSTATUS _IOWR('i', 59, struct ifstat) /* get IF status */
+#define SIOCSIFLLADDR _IOW('i', 60, struct ifreq) /* set linklevel addr */
+
+#define SIOCSIFPHYADDR _IOW('i', 70, struct ifaliasreq) /* set gif addres */
+#define SIOCGIFPSRCADDR _IOWR('i', 71, struct ifreq) /* get gif psrc addr */
+#define SIOCGIFPDSTADDR _IOWR('i', 72, struct ifreq) /* get gif pdst addr */
+#define SIOCDIFPHYADDR _IOW('i', 73, struct ifreq) /* delete gif addrs */
+#define SIOCSLIFPHYADDR _IOW('i', 74, struct if_laddrreq) /* set gif addrs */
+#define SIOCGLIFPHYADDR _IOWR('i', 75, struct if_laddrreq) /* get gif addrs */
+
+#define SIOCGPRIVATE_0 _IOWR('i', 80, struct ifreq) /* device private 0 */
+#define SIOCGPRIVATE_1 _IOWR('i', 81, struct ifreq) /* device private 1 */
+
+#define SIOCSIFVNET _IOWR('i', 90, struct ifreq) /* move IF jail/vnet */
+#define SIOCSIFRVNET _IOWR('i', 91, struct ifreq) /* reclaim vnet IF */
+
+#define SIOCSDRVSPEC _IOW('i', 123, struct ifdrv) /* set driver-specific
+ parameters */
+#define SIOCGDRVSPEC _IOWR('i', 123, struct ifdrv) /* get driver-specific
+ parameters */
+
+#define SIOCIFCREATE _IOWR('i', 122, struct ifreq) /* create clone if */
+#define SIOCIFCREATE2 _IOWR('i', 124, struct ifreq) /* create clone if */
+#define SIOCIFDESTROY _IOW('i', 121, struct ifreq) /* destroy clone if */
+#define SIOCIFGCLONERS _IOWR('i', 120, struct if_clonereq) /* get cloners */
+
+#define SIOCAIFGROUP _IOW('i', 135, struct ifgroupreq) /* add an ifgroup */
+#define SIOCGIFGROUP _IOWR('i', 136, struct ifgroupreq) /* get ifgroups */
+#define SIOCDIFGROUP _IOW('i', 137, struct ifgroupreq) /* delete ifgroup */
+#define SIOCGIFGMEMB _IOWR('i', 138, struct ifgroupreq) /* get members */
+
+#endif /* !_SYS_SOCKIO_HH_ */
diff --git a/rtems/freebsd/sys/sockopt.h b/rtems/freebsd/sys/sockopt.h
new file mode 100644
index 00000000..09dc5599
--- /dev/null
+++ b/rtems/freebsd/sys/sockopt.h
@@ -0,0 +1,72 @@
+/*-
+ * Copyright (c) 1982, 1986, 1990, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * @(#)socketvar.h 8.3 (Berkeley) 2/19/95
+ *
+ * $FreeBSD$
+ */
+#ifndef _SYS_SOCKOPT_HH_
+#define _SYS_SOCKOPT_HH_
+
+#ifndef _KERNEL
+#error "no user-servicable parts inside"
+#endif
+
+
+struct thread;
+struct socket;
+
+/*
+ * Argument structure for sosetopt et seq. This is in the KERNEL
+ * section because it will never be visible to user code.
+ */
+enum sopt_dir { SOPT_GET, SOPT_SET };
+
+struct sockopt {
+ enum sopt_dir sopt_dir; /* is this a get or a set? */
+ int sopt_level; /* second arg of [gs]etsockopt */
+ int sopt_name; /* third arg of [gs]etsockopt */
+ void *sopt_val; /* fourth arg of [gs]etsockopt */
+ size_t sopt_valsize; /* (almost) fifth arg of [gs]etsockopt */
+ struct thread *sopt_td; /* calling thread or null if kernel */
+};
+
+int sosetopt(struct socket *so, struct sockopt *sopt);
+int sogetopt(struct socket *so, struct sockopt *sopt);
+int sooptcopyin(struct sockopt *sopt, void *buf, size_t len, size_t minlen);
+int sooptcopyout(struct sockopt *sopt, const void *buf, size_t len);
+/* XXX; prepare mbuf for (__FreeBSD__ < 3) routines. */
+int soopt_getm(struct sockopt *sopt, struct mbuf **mp);
+int soopt_mcopyin(struct sockopt *sopt, struct mbuf *m);
+int soopt_mcopyout(struct sockopt *sopt, struct mbuf *m);
+int do_getopt_accept_filter(struct socket *so, struct sockopt *sopt);
+int do_setopt_accept_filter(struct socket *so, struct sockopt *sopt);
+int so_setsockopt(struct socket *so, int level, int optname,
+ void *optval, size_t optlen);
+
+#endif /* _SYS_SOCKOPT_HH_ */
diff --git a/rtems/freebsd/sys/sockstate.h b/rtems/freebsd/sys/sockstate.h
new file mode 100644
index 00000000..a6c4149d
--- /dev/null
+++ b/rtems/freebsd/sys/sockstate.h
@@ -0,0 +1,83 @@
+/*-
+ * Copyright (c) 1982, 1986, 1990, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * @(#)socketvar.h 8.3 (Berkeley) 2/19/95
+ *
+ * $FreeBSD$
+ */
+#ifndef _SYS_SOCKTATE_HH_
+#define _SYS_SOCKTATE_HH_
+
+/*
+ * Socket state bits.
+ *
+ * Historically, this bits were all kept in the so_state field. For
+ * locking reasons, they are now in multiple fields, as they are
+ * locked differently. so_state maintains basic socket state protected
+ * by the socket lock. so_qstate holds information about the socket
+ * accept queues. Each socket buffer also has a state field holding
+ * information relevant to that socket buffer (can't send, rcv). Many
+ * fields will be read without locks to improve performance and avoid
+ * lock order issues. However, this approach must be used with caution.
+ */
+#define SS_NOFDREF 0x0001 /* no file table ref any more */
+#define SS_ISCONNECTED 0x0002 /* socket connected to a peer */
+#define SS_ISCONNECTING 0x0004 /* in process of connecting to peer */
+#define SS_ISDISCONNECTING 0x0008 /* in process of disconnecting */
+#define SS_NBIO 0x0100 /* non-blocking ops */
+#define SS_ASYNC 0x0200 /* async i/o notify */
+#define SS_ISCONFIRMING 0x0400 /* deciding to accept connection req */
+#define SS_ISDISCONNECTED 0x2000 /* socket disconnected from peer */
+
+/*
+ * Protocols can mark a socket as SS_PROTOREF to indicate that, following
+ * pru_detach, they still want the socket to persist, and will free it
+ * themselves when they are done. Protocols should only ever call sofree()
+ * following setting this flag in pru_detach(), and never otherwise, as
+ * sofree() bypasses socket reference counting.
+ */
+#define SS_PROTOREF 0x4000 /* strong protocol reference */
+
+/*
+ * Socket state bits now stored in the socket buffer state field.
+ */
+#define SBS_CANTSENDMORE 0x0010 /* can't send more data to peer */
+#define SBS_CANTRCVMORE 0x0020 /* can't receive more data from peer */
+#define SBS_RCVATMARK 0x0040 /* at mark on input */
+
+struct socket;
+
+void soisconnected(struct socket *so);
+void soisconnecting(struct socket *so);
+void soisdisconnected(struct socket *so);
+void soisdisconnecting(struct socket *so);
+void socantrcvmore(struct socket *so);
+void socantrcvmore_locked(struct socket *so);
+void socantsendmore(struct socket *so);
+void socantsendmore_locked(struct socket *so);
+#endif /* _SYS_SOCKTATE_HH_ */
diff --git a/rtems/freebsd/sys/stat.h b/rtems/freebsd/sys/stat.h
new file mode 100644
index 00000000..936ffd88
--- /dev/null
+++ b/rtems/freebsd/sys/stat.h
@@ -0,0 +1 @@
+/* EMPTY */
diff --git a/rtems/freebsd/sys/stddef.h b/rtems/freebsd/sys/stddef.h
new file mode 100644
index 00000000..88c9cf47
--- /dev/null
+++ b/rtems/freebsd/sys/stddef.h
@@ -0,0 +1,42 @@
+/*-
+ * Copyright (c) 2002 Maxime Henrion <mux@FreeBSD.org>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _SYS_STDDEF_HH_
+#define _SYS_STDDEF_HH_
+
+#include <rtems/freebsd/sys/cdefs.h>
+#include <rtems/freebsd/sys/_null.h>
+#include <rtems/freebsd/machine/_types.h>
+
+#ifndef __rtems__
+typedef __ptrdiff_t ptrdiff_t;
+
+#define offsetof(type, field) __offsetof(type, field)
+#endif /* __rtems__ */
+
+#endif /* !_SYS_STDDEF_HH_ */
diff --git a/rtems/freebsd/sys/stdint.h b/rtems/freebsd/sys/stdint.h
new file mode 100644
index 00000000..3227f4f2
--- /dev/null
+++ b/rtems/freebsd/sys/stdint.h
@@ -0,0 +1,106 @@
+/*-
+ * Copyright (c) 2001 Mike Barcroft <mike@FreeBSD.org>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _SYS_STDINT_HH_
+#define _SYS_STDINT_HH_
+
+#include <rtems/freebsd/sys/cdefs.h>
+#include <rtems/freebsd/sys/_types.h>
+
+#include <rtems/freebsd/machine/_stdint.h>
+
+#ifndef _INT8_T_DECLARED
+typedef __int8_t int8_t;
+#define _INT8_T_DECLARED
+#endif
+
+#ifndef _INT16_T_DECLARED
+typedef __int16_t int16_t;
+#define _INT16_T_DECLARED
+#endif
+
+#ifndef _INT32_T_DECLARED
+typedef __int32_t int32_t;
+#define _INT32_T_DECLARED
+#endif
+
+#ifndef _INT64_T_DECLARED
+typedef __int64_t int64_t;
+#define _INT64_T_DECLARED
+#endif
+
+#ifndef _UINT8_T_DECLARED
+typedef __uint8_t uint8_t;
+#define _UINT8_T_DECLARED
+#endif
+
+#ifndef _UINT16_T_DECLARED
+typedef __uint16_t uint16_t;
+#define _UINT16_T_DECLARED
+#endif
+
+#ifndef _UINT32_T_DECLARED
+typedef __uint32_t uint32_t;
+#define _UINT32_T_DECLARED
+#endif
+
+#ifndef _UINT64_T_DECLARED
+typedef __uint64_t uint64_t;
+#define _UINT64_T_DECLARED
+#endif
+
+typedef __int_least8_t int_least8_t;
+typedef __int_least16_t int_least16_t;
+typedef __int_least32_t int_least32_t;
+typedef __int_least64_t int_least64_t;
+
+typedef __uint_least8_t uint_least8_t;
+typedef __uint_least16_t uint_least16_t;
+typedef __uint_least32_t uint_least32_t;
+typedef __uint_least64_t uint_least64_t;
+
+typedef __int_fast8_t int_fast8_t;
+typedef __int_fast16_t int_fast16_t;
+typedef __int_fast32_t int_fast32_t;
+typedef __int_fast64_t int_fast64_t;
+
+typedef __uint_fast8_t uint_fast8_t;
+typedef __uint_fast16_t uint_fast16_t;
+typedef __uint_fast32_t uint_fast32_t;
+typedef __uint_fast64_t uint_fast64_t;
+
+typedef __intmax_t intmax_t;
+typedef __uintmax_t uintmax_t;
+
+#ifndef _INTPTR_T_DECLARED
+typedef __intptr_t intptr_t;
+typedef __uintptr_t uintptr_t;
+#define _INTPTR_T_DECLARED
+#endif
+
+#endif /* !_SYS_STDINT_HH_ */
diff --git a/rtems/freebsd/sys/sx.h b/rtems/freebsd/sys/sx.h
new file mode 100644
index 00000000..1243d30e
--- /dev/null
+++ b/rtems/freebsd/sys/sx.h
@@ -0,0 +1,307 @@
+/*-
+ * Copyright (c) 2007 Attilio Rao <attilio@freebsd.org>
+ * Copyright (c) 2001 Jason Evans <jasone@freebsd.org>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice(s), this list of conditions and the following disclaimer as
+ * the first lines of this file unmodified other than the possible
+ * addition of one or more copyright notices.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice(s), this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
+ * DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _SYS_SX_HH_
+#define _SYS_SX_HH_
+
+#include <rtems/freebsd/sys/_lock.h>
+#include <rtems/freebsd/sys/_sx.h>
+
+#ifdef _KERNEL
+#include <rtems/freebsd/sys/pcpu.h>
+#include <rtems/freebsd/sys/lock_profile.h>
+#include <rtems/freebsd/sys/lockstat.h>
+#include <rtems/freebsd/machine/atomic.h>
+#endif
+
+#ifdef __rtems__
+#define SX_NOINLINE 1
+#define _sx_slock _bsd__sx_xlock
+#define _sx_try_slock _bsd__sx_try_xlock
+#define _sx_sunlock _bsd__sx_xunlock
+#endif /* __rtems__ */
+
+/*
+ * In general, the sx locks and rwlocks use very similar algorithms.
+ * The main difference in the implementations is how threads are
+ * blocked when a lock is unavailable. For this, sx locks use sleep
+ * queues which do not support priority propagation, and rwlocks use
+ * turnstiles which do.
+ *
+ * The sx_lock field consists of several fields. The low bit
+ * indicates if the lock is locked with a shared or exclusive lock. A
+ * value of 0 indicates an exclusive lock, and a value of 1 indicates
+ * a shared lock. Bit 1 is a boolean indicating if there are any
+ * threads waiting for a shared lock. Bit 2 is a boolean indicating
+ * if there are any threads waiting for an exclusive lock. Bit 3 is a
+ * boolean indicating if an exclusive lock is recursively held. The
+ * rest of the variable's definition is dependent on the value of the
+ * first bit. For an exclusive lock, it is a pointer to the thread
+ * holding the lock, similar to the mtx_lock field of mutexes. For
+ * shared locks, it is a count of read locks that are held.
+ *
+ * When the lock is not locked by any thread, it is encoded as a
+ * shared lock with zero waiters.
+ */
+
+#define SX_LOCK_SHARED 0x01
+#define SX_LOCK_SHARED_WAITERS 0x02
+#define SX_LOCK_EXCLUSIVE_WAITERS 0x04
+#define SX_LOCK_RECURSED 0x08
+#define SX_LOCK_FLAGMASK \
+ (SX_LOCK_SHARED | SX_LOCK_SHARED_WAITERS | \
+ SX_LOCK_EXCLUSIVE_WAITERS | SX_LOCK_RECURSED)
+
+#define SX_OWNER(x) ((x) & ~SX_LOCK_FLAGMASK)
+#define SX_SHARERS_SHIFT 4
+#define SX_SHARERS(x) (SX_OWNER(x) >> SX_SHARERS_SHIFT)
+#define SX_SHARERS_LOCK(x) \
+ ((x) << SX_SHARERS_SHIFT | SX_LOCK_SHARED)
+#define SX_ONE_SHARER (1 << SX_SHARERS_SHIFT)
+
+#define SX_LOCK_UNLOCKED SX_SHARERS_LOCK(0)
+#define SX_LOCK_DESTROYED \
+ (SX_LOCK_SHARED_WAITERS | SX_LOCK_EXCLUSIVE_WAITERS)
+
+#ifdef _KERNEL
+
+/*
+ * Function prototipes. Routines that start with an underscore are not part
+ * of the public interface and are wrappered with a macro.
+ */
+void sx_sysinit(void *arg);
+#define sx_init(sx, desc) sx_init_flags((sx), (desc), 0)
+void sx_init_flags(struct sx *sx, const char *description, int opts);
+void sx_destroy(struct sx *sx);
+int _sx_slock(struct sx *sx, int opts, const char *file, int line);
+int _sx_xlock(struct sx *sx, int opts, const char *file, int line);
+int _sx_try_slock(struct sx *sx, const char *file, int line);
+int _sx_try_xlock(struct sx *sx, const char *file, int line);
+void _sx_sunlock(struct sx *sx, const char *file, int line);
+void _sx_xunlock(struct sx *sx, const char *file, int line);
+int _sx_try_upgrade(struct sx *sx, const char *file, int line);
+void _sx_downgrade(struct sx *sx, const char *file, int line);
+int _sx_xlock_hard(struct sx *sx, uintptr_t tid, int opts,
+ const char *file, int line);
+int _sx_slock_hard(struct sx *sx, int opts, const char *file, int line);
+void _sx_xunlock_hard(struct sx *sx, uintptr_t tid, const char *file, int
+ line);
+void _sx_sunlock_hard(struct sx *sx, const char *file, int line);
+#if defined(INVARIANTS) || defined(INVARIANT_SUPPORT)
+void _sx_assert(struct sx *sx, int what, const char *file, int line);
+#endif
+#ifdef DDB
+int sx_chain(struct thread *td, struct thread **ownerp);
+#endif
+
+struct sx_args {
+ struct sx *sa_sx;
+ const char *sa_desc;
+};
+
+#define SX_SYSINIT(name, sxa, desc) \
+ static struct sx_args name##_args = { \
+ (sxa), \
+ (desc) \
+ }; \
+ SYSINIT(name##_sx_sysinit, SI_SUB_LOCK, SI_ORDER_MIDDLE, \
+ sx_sysinit, &name##_args); \
+ SYSUNINIT(name##_sx_sysuninit, SI_SUB_LOCK, SI_ORDER_MIDDLE, \
+ sx_destroy, (sxa))
+
+#ifndef __rtems__
+/*
+ * Full lock operations that are suitable to be inlined in non-debug kernels.
+ * If the lock can't be acquired or released trivially then the work is
+ * deferred to 'tougher' functions.
+ */
+
+/* Acquire an exclusive lock. */
+static __inline int
+__sx_xlock(struct sx *sx, struct thread *td, int opts, const char *file,
+ int line)
+{
+ uintptr_t tid = (uintptr_t)td;
+ int error = 0;
+
+ if (!atomic_cmpset_acq_ptr(&sx->sx_lock, SX_LOCK_UNLOCKED, tid))
+ error = _sx_xlock_hard(sx, tid, opts, file, line);
+ else
+ LOCKSTAT_PROFILE_OBTAIN_LOCK_SUCCESS(LS_SX_XLOCK_ACQUIRE,
+ sx, 0, 0, file, line);
+
+ return (error);
+}
+
+/* Release an exclusive lock. */
+static __inline void
+__sx_xunlock(struct sx *sx, struct thread *td, const char *file, int line)
+{
+ uintptr_t tid = (uintptr_t)td;
+
+ if (!atomic_cmpset_rel_ptr(&sx->sx_lock, tid, SX_LOCK_UNLOCKED))
+ _sx_xunlock_hard(sx, tid, file, line);
+}
+
+/* Acquire a shared lock. */
+static __inline int
+__sx_slock(struct sx *sx, int opts, const char *file, int line)
+{
+ uintptr_t x = sx->sx_lock;
+ int error = 0;
+
+ if (!(x & SX_LOCK_SHARED) ||
+ !atomic_cmpset_acq_ptr(&sx->sx_lock, x, x + SX_ONE_SHARER))
+ error = _sx_slock_hard(sx, opts, file, line);
+ else
+ LOCKSTAT_PROFILE_OBTAIN_LOCK_SUCCESS(LS_SX_SLOCK_ACQUIRE, sx, 0,
+ 0, file, line);
+
+ return (error);
+}
+
+/*
+ * Release a shared lock. We can just drop a single shared lock so
+ * long as we aren't trying to drop the last shared lock when other
+ * threads are waiting for an exclusive lock. This takes advantage of
+ * the fact that an unlocked lock is encoded as a shared lock with a
+ * count of 0.
+ */
+static __inline void
+__sx_sunlock(struct sx *sx, const char *file, int line)
+{
+ uintptr_t x = sx->sx_lock;
+
+ if (x == (SX_SHARERS_LOCK(1) | SX_LOCK_EXCLUSIVE_WAITERS) ||
+ !atomic_cmpset_rel_ptr(&sx->sx_lock, x, x - SX_ONE_SHARER))
+ _sx_sunlock_hard(sx, file, line);
+}
+#endif /* __rtems__ */
+
+/*
+ * Public interface for lock operations.
+ */
+#ifndef LOCK_DEBUG
+#error "LOCK_DEBUG not defined, include <sys/lock.h> before <sys/sx.h>"
+#endif
+#if (LOCK_DEBUG > 0) || defined(SX_NOINLINE)
+#define sx_xlock(sx) (void)_sx_xlock((sx), 0, LOCK_FILE, LOCK_LINE)
+#define sx_xlock_sig(sx) \
+ _sx_xlock((sx), SX_INTERRUPTIBLE, LOCK_FILE, LOCK_LINE)
+#define sx_xunlock(sx) _sx_xunlock((sx), LOCK_FILE, LOCK_LINE)
+#define sx_slock(sx) (void)_sx_slock((sx), 0, LOCK_FILE, LOCK_LINE)
+#define sx_slock_sig(sx) \
+ _sx_slock((sx), SX_INTERRUPTIBLE, LOCK_FILE, LOCK_LINE)
+#define sx_sunlock(sx) _sx_sunlock((sx), LOCK_FILE, LOCK_LINE)
+#else
+#define sx_xlock(sx) \
+ (void)__sx_xlock((sx), curthread, 0, LOCK_FILE, LOCK_LINE)
+#define sx_xlock_sig(sx) \
+ __sx_xlock((sx), curthread, SX_INTERRUPTIBLE, LOCK_FILE, LOCK_LINE)
+#define sx_xunlock(sx) \
+ __sx_xunlock((sx), curthread, LOCK_FILE, LOCK_LINE)
+#define sx_slock(sx) (void)__sx_slock((sx), 0, LOCK_FILE, LOCK_LINE)
+#define sx_slock_sig(sx) \
+ __sx_slock((sx), SX_INTERRUPTIBLE, LOCK_FILE, LOCK_LINE)
+#define sx_sunlock(sx) __sx_sunlock((sx), LOCK_FILE, LOCK_LINE)
+#endif /* LOCK_DEBUG > 0 || SX_NOINLINE */
+#define sx_try_slock(sx) _sx_try_slock((sx), LOCK_FILE, LOCK_LINE)
+#define sx_try_xlock(sx) _sx_try_xlock((sx), LOCK_FILE, LOCK_LINE)
+#define sx_try_upgrade(sx) _sx_try_upgrade((sx), LOCK_FILE, LOCK_LINE)
+#define sx_downgrade(sx) _sx_downgrade((sx), LOCK_FILE, LOCK_LINE)
+
+/*
+ * Return a pointer to the owning thread if the lock is exclusively
+ * locked.
+ */
+#ifndef __rtems__
+#define sx_xholder(sx) \
+ ((sx)->sx_lock & SX_LOCK_SHARED ? NULL : \
+ (struct thread *)SX_OWNER((sx)->sx_lock))
+
+#define sx_xlocked(sx) \
+ (((sx)->sx_lock & ~(SX_LOCK_FLAGMASK & ~SX_LOCK_SHARED)) == \
+ (uintptr_t)curthread)
+#else /* __rtems__ */
+int sx_xlocked(struct sx *sx);
+#endif /* __rtems__ */
+
+#define sx_unlock(sx) do { \
+ if (sx_xlocked(sx)) \
+ sx_xunlock(sx); \
+ else \
+ sx_sunlock(sx); \
+} while (0)
+
+#define sx_sleep(chan, sx, pri, wmesg, timo) \
+ _sleep((chan), &(sx)->lock_object, (pri), (wmesg), (timo))
+
+/*
+ * Options passed to sx_init_flags().
+ */
+#define SX_DUPOK 0x01
+#define SX_NOPROFILE 0x02
+#define SX_NOWITNESS 0x04
+#define SX_QUIET 0x08
+#define SX_NOADAPTIVE 0x10
+#define SX_RECURSE 0x20
+
+/*
+ * Options passed to sx_*lock_hard().
+ */
+#define SX_INTERRUPTIBLE 0x40
+
+#if defined(INVARIANTS) || defined(INVARIANT_SUPPORT)
+#define SA_LOCKED LA_LOCKED
+#define SA_SLOCKED LA_SLOCKED
+#define SA_XLOCKED LA_XLOCKED
+#define SA_UNLOCKED LA_UNLOCKED
+#define SA_RECURSED LA_RECURSED
+#define SA_NOTRECURSED LA_NOTRECURSED
+
+/* Backwards compatability. */
+#define SX_LOCKED LA_LOCKED
+#define SX_SLOCKED LA_SLOCKED
+#define SX_XLOCKED LA_XLOCKED
+#define SX_UNLOCKED LA_UNLOCKED
+#define SX_RECURSED LA_RECURSED
+#define SX_NOTRECURSED LA_NOTRECURSED
+#endif
+
+#ifdef INVARIANTS
+#define sx_assert(sx, what) _sx_assert((sx), (what), LOCK_FILE, LOCK_LINE)
+#else
+#define sx_assert(sx, what) (void)0
+#endif
+
+#endif /* _KERNEL */
+
+#endif /* !_SYS_SX_HH_ */
diff --git a/rtems/freebsd/sys/syscallsubr.h b/rtems/freebsd/sys/syscallsubr.h
new file mode 100644
index 00000000..936ffd88
--- /dev/null
+++ b/rtems/freebsd/sys/syscallsubr.h
@@ -0,0 +1 @@
+/* EMPTY */
diff --git a/rtems/freebsd/sys/sysctl.h b/rtems/freebsd/sys/sysctl.h
new file mode 100644
index 00000000..62d2ea0a
--- /dev/null
+++ b/rtems/freebsd/sys/sysctl.h
@@ -0,0 +1,762 @@
+/*-
+ * Copyright (c) 1989, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * This code is derived from software contributed to Berkeley by
+ * Mike Karels at Berkeley Software Design, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * @(#)sysctl.h 8.1 (Berkeley) 6/2/93
+ * $FreeBSD$
+ */
+
+#ifndef _SYS_SYSCTL_HH_
+#define _SYS_SYSCTL_HH_
+
+#include <rtems/freebsd/sys/queue.h>
+
+struct thread;
+/*
+ * Definitions for sysctl call. The sysctl call uses a hierarchical name
+ * for objects that can be examined or modified. The name is expressed as
+ * a sequence of integers. Like a file path name, the meaning of each
+ * component depends on its place in the hierarchy. The top-level and kern
+ * identifiers are defined here, and other identifiers are defined in the
+ * respective subsystem header files.
+ */
+
+#define CTL_MAXNAME 24 /* largest number of components supported */
+
+/*
+ * Each subsystem defined by sysctl defines a list of variables
+ * for that subsystem. Each name is either a node with further
+ * levels defined below it, or it is a leaf of some particular
+ * type given below. Each sysctl level defines a set of name/type
+ * pairs to be used by sysctl(8) in manipulating the subsystem.
+ */
+struct ctlname {
+ char *ctl_name; /* subsystem name */
+ int ctl_type; /* type of name */
+};
+
+#define CTLTYPE 0xf /* Mask for the type */
+#define CTLTYPE_NODE 1 /* name is a node */
+#define CTLTYPE_INT 2 /* name describes an integer */
+#define CTLTYPE_STRING 3 /* name describes a string */
+#define CTLTYPE_QUAD 4 /* name describes a 64-bit number */
+#define CTLTYPE_OPAQUE 5 /* name describes a structure */
+#define CTLTYPE_STRUCT CTLTYPE_OPAQUE /* name describes a structure */
+#define CTLTYPE_UINT 6 /* name describes an unsigned integer */
+#define CTLTYPE_LONG 7 /* name describes a long */
+#define CTLTYPE_ULONG 8 /* name describes an unsigned long */
+
+#define CTLFLAG_RD 0x80000000 /* Allow reads of variable */
+#define CTLFLAG_WR 0x40000000 /* Allow writes to the variable */
+#define CTLFLAG_RW (CTLFLAG_RD|CTLFLAG_WR)
+#define CTLFLAG_NOLOCK 0x20000000 /* XXX Don't Lock */
+#define CTLFLAG_ANYBODY 0x10000000 /* All users can set this var */
+#define CTLFLAG_SECURE 0x08000000 /* Permit set only if securelevel<=0 */
+#define CTLFLAG_PRISON 0x04000000 /* Prisoned roots can fiddle */
+#define CTLFLAG_DYN 0x02000000 /* Dynamic oid - can be freed */
+#define CTLFLAG_SKIP 0x01000000 /* Skip this sysctl when listing */
+#define CTLMASK_SECURE 0x00F00000 /* Secure level */
+#define CTLFLAG_TUN 0x00080000 /* Tunable variable */
+#define CTLFLAG_MPSAFE 0x00040000 /* Handler is MP safe */
+#define CTLFLAG_VNET 0x00020000 /* Prisons with vnet can fiddle */
+#define CTLFLAG_RDTUN (CTLFLAG_RD|CTLFLAG_TUN)
+
+/*
+ * Secure level. Note that CTLFLAG_SECURE == CTLFLAG_SECURE1.
+ *
+ * Secure when the securelevel is raised to at least N.
+ */
+#define CTLSHIFT_SECURE 20
+#define CTLFLAG_SECURE1 (CTLFLAG_SECURE | (0 << CTLSHIFT_SECURE))
+#define CTLFLAG_SECURE2 (CTLFLAG_SECURE | (1 << CTLSHIFT_SECURE))
+#define CTLFLAG_SECURE3 (CTLFLAG_SECURE | (2 << CTLSHIFT_SECURE))
+
+/*
+ * USE THIS instead of a hardwired number from the categories below
+ * to get dynamically assigned sysctl entries using the linker-set
+ * technology. This is the way nearly all new sysctl variables should
+ * be implemented.
+ * e.g. SYSCTL_INT(_parent, OID_AUTO, name, CTLFLAG_RW, &variable, 0, "");
+ */
+#define OID_AUTO (-1)
+
+/*
+ * The starting number for dynamically-assigned entries. WARNING!
+ * ALL static sysctl entries should have numbers LESS than this!
+ */
+#define CTL_AUTO_START 0x100
+
+#ifdef _KERNEL
+#define SYSCTL_HANDLER_ARGS struct sysctl_oid *oidp, void *arg1, int arg2, \
+ struct sysctl_req *req
+
+/* definitions for sysctl_req 'lock' member */
+#define REQ_UNLOCKED 0 /* not locked and not wired */
+#define REQ_LOCKED 1 /* locked and not wired */
+#define REQ_WIRED 2 /* locked and wired */
+
+/* definitions for sysctl_req 'flags' member */
+#if defined(__amd64__) || defined(__ia64__)
+#define SCTL_MASK32 1 /* 32 bit emulation */
+#endif
+
+/*
+ * This describes the access space for a sysctl request. This is needed
+ * so that we can use the interface from the kernel or from user-space.
+ */
+struct sysctl_req {
+ struct thread *td; /* used for access checking */
+ int lock; /* locking/wiring state */
+ void *oldptr;
+ size_t oldlen;
+ size_t oldidx;
+ int (*oldfunc)(struct sysctl_req *, const void *, size_t);
+#ifndef __rtems__
+ void *newptr;
+#else /* __rtems__ */
+ const void *newptr;
+#endif /* __rtems__ */
+ size_t newlen;
+ size_t newidx;
+ int (*newfunc)(struct sysctl_req *, void *, size_t);
+ size_t validlen;
+ int flags;
+};
+
+SLIST_HEAD(sysctl_oid_list, sysctl_oid);
+
+/*
+ * This describes one "oid" in the MIB tree. Potentially more nodes can
+ * be hidden behind it, expanded by the handler.
+ */
+struct sysctl_oid {
+ struct sysctl_oid_list *oid_parent;
+ SLIST_ENTRY(sysctl_oid) oid_link;
+ int oid_number;
+ u_int oid_kind;
+ void *oid_arg1;
+ int oid_arg2;
+ const char *oid_name;
+ int (*oid_handler)(SYSCTL_HANDLER_ARGS);
+ const char *oid_fmt;
+ int oid_refcnt;
+ const char *oid_descr;
+};
+
+#define SYSCTL_IN(r, p, l) (r->newfunc)(r, p, l)
+#define SYSCTL_OUT(r, p, l) (r->oldfunc)(r, p, l)
+
+int sysctl_handle_int(SYSCTL_HANDLER_ARGS);
+int sysctl_msec_to_ticks(SYSCTL_HANDLER_ARGS);
+int sysctl_handle_long(SYSCTL_HANDLER_ARGS);
+int sysctl_handle_quad(SYSCTL_HANDLER_ARGS);
+int sysctl_handle_intptr(SYSCTL_HANDLER_ARGS);
+int sysctl_handle_string(SYSCTL_HANDLER_ARGS);
+int sysctl_handle_opaque(SYSCTL_HANDLER_ARGS);
+
+int sysctl_dpcpu_int(SYSCTL_HANDLER_ARGS);
+int sysctl_dpcpu_long(SYSCTL_HANDLER_ARGS);
+int sysctl_dpcpu_quad(SYSCTL_HANDLER_ARGS);
+
+/*
+ * These functions are used to add/remove an oid from the mib.
+ */
+void sysctl_register_oid(struct sysctl_oid *oidp);
+void sysctl_unregister_oid(struct sysctl_oid *oidp);
+
+/* Declare a static oid to allow child oids to be added to it. */
+#ifndef __rtems__
+#define SYSCTL_DECL(name) \
+ extern struct sysctl_oid_list sysctl_##name##_children
+#else /* __rtems__ */
+#define SYSCTL_DECL(name) \
+ extern struct sysctl_oid_list _bsd_sysctl_##name##_children
+#endif /* __rtems__ */
+
+/* Hide these in macros */
+#define SYSCTL_CHILDREN(oid_ptr) (struct sysctl_oid_list *) \
+ (oid_ptr)->oid_arg1
+#define SYSCTL_CHILDREN_SET(oid_ptr, val) \
+ (oid_ptr)->oid_arg1 = (val);
+#ifndef __rtems__
+#define SYSCTL_STATIC_CHILDREN(oid_name) \
+ (&sysctl_##oid_name##_children)
+#else /* __rtems__ */
+#define SYSCTL_STATIC_CHILDREN(oid_name) \
+ (&_bsd_sysctl_##oid_name##_children)
+#endif /* __rtems__ */
+
+/* === Structs and macros related to context handling === */
+
+/* All dynamically created sysctls can be tracked in a context list. */
+struct sysctl_ctx_entry {
+ struct sysctl_oid *entry;
+ TAILQ_ENTRY(sysctl_ctx_entry) link;
+};
+
+TAILQ_HEAD(sysctl_ctx_list, sysctl_ctx_entry);
+
+#ifndef __rtems__
+#define SYSCTL_NODE_CHILDREN(parent, name) \
+ sysctl_##parent##_##name##_children
+#else /* __rtems__ */
+#define SYSCTL_NODE_CHILDREN(parent, name) \
+ _bsd_sysctl_##parent##_##name##_children
+#endif /* __rtems__ */
+
+#ifndef NO_SYSCTL_DESCR
+#define __DESCR(d) d
+#else
+#define __DESCR(d) ""
+#endif
+
+/* This constructs a "raw" MIB oid. */
+#ifndef __rtems__
+#define SYSCTL_OID(parent, nbr, name, kind, a1, a2, handler, fmt, descr) \
+ static struct sysctl_oid sysctl__##parent##_##name = { \
+ &sysctl_##parent##_children, { NULL }, nbr, kind, \
+ a1, a2, #name, handler, fmt, 0, __DESCR(descr) }; \
+ DATA_SET(sysctl_set, sysctl__##parent##_##name)
+#else /* __rtems__ */
+#define SYSCTL_OID(parent, nbr, name, kind, a1, a2, handler, fmt, descr) \
+ static struct sysctl_oid sysctl__##parent##_##name = { \
+ &_bsd_sysctl_##parent##_children, { NULL }, nbr, kind, \
+ a1, a2, #name, handler, fmt, 0, __DESCR(descr) }; \
+ DATA_SET(sysctl_set, sysctl__##parent##_##name)
+#endif /* __rtems__ */
+
+#define SYSCTL_ADD_OID(ctx, parent, nbr, name, kind, a1, a2, handler, fmt, descr) \
+ sysctl_add_oid(ctx, parent, nbr, name, kind, a1, a2, handler, fmt, __DESCR(descr))
+
+/* This constructs a node from which other oids can hang. */
+#define SYSCTL_NODE(parent, nbr, name, access, handler, descr) \
+ struct sysctl_oid_list SYSCTL_NODE_CHILDREN(parent, name); \
+ SYSCTL_OID(parent, nbr, name, CTLTYPE_NODE|(access), \
+ (void*)&SYSCTL_NODE_CHILDREN(parent, name), 0, handler, "N", descr)
+
+#define SYSCTL_ADD_NODE(ctx, parent, nbr, name, access, handler, descr) \
+ sysctl_add_oid(ctx, parent, nbr, name, CTLTYPE_NODE|(access), \
+ NULL, 0, handler, "N", __DESCR(descr))
+
+/* Oid for a string. len can be 0 to indicate '\0' termination. */
+#define SYSCTL_STRING(parent, nbr, name, access, arg, len, descr) \
+ SYSCTL_OID(parent, nbr, name, CTLTYPE_STRING|(access), \
+ arg, len, sysctl_handle_string, "A", descr)
+
+#define SYSCTL_ADD_STRING(ctx, parent, nbr, name, access, arg, len, descr) \
+ sysctl_add_oid(ctx, parent, nbr, name, CTLTYPE_STRING|(access), \
+ arg, len, sysctl_handle_string, "A", __DESCR(descr))
+
+/* Oid for an int. If ptr is NULL, val is returned. */
+#define SYSCTL_INT(parent, nbr, name, access, ptr, val, descr) \
+ SYSCTL_OID(parent, nbr, name, CTLTYPE_INT|CTLFLAG_MPSAFE|(access), \
+ ptr, val, sysctl_handle_int, "I", descr)
+
+#define SYSCTL_ADD_INT(ctx, parent, nbr, name, access, ptr, val, descr) \
+ sysctl_add_oid(ctx, parent, nbr, name, CTLTYPE_INT|CTLFLAG_MPSAFE|(access), \
+ ptr, val, sysctl_handle_int, "I", __DESCR(descr))
+
+/* Oid for an unsigned int. If ptr is NULL, val is returned. */
+#define SYSCTL_UINT(parent, nbr, name, access, ptr, val, descr) \
+ SYSCTL_OID(parent, nbr, name, CTLTYPE_UINT|CTLFLAG_MPSAFE|(access), \
+ ptr, val, sysctl_handle_int, "IU", descr)
+
+#define SYSCTL_ADD_UINT(ctx, parent, nbr, name, access, ptr, val, descr) \
+ sysctl_add_oid(ctx, parent, nbr, name, CTLTYPE_UINT|CTLFLAG_MPSAFE|(access), \
+ ptr, val, sysctl_handle_int, "IU", __DESCR(descr))
+
+#define SYSCTL_XINT(parent, nbr, name, access, ptr, val, descr) \
+ SYSCTL_OID(parent, nbr, name, CTLTYPE_UINT|CTLFLAG_MPSAFE|(access), \
+ ptr, val, sysctl_handle_int, "IX", descr)
+
+#define SYSCTL_ADD_XINT(ctx, parent, nbr, name, access, ptr, val, descr) \
+ sysctl_add_oid(ctx, parent, nbr, name, CTLTYPE_UINT|CTLFLAG_MPSAFE|(access), \
+ ptr, val, sysctl_handle_int, "IX", __DESCR(descr))
+
+/* Oid for a long. The pointer must be non NULL. */
+#define SYSCTL_LONG(parent, nbr, name, access, ptr, val, descr) \
+ SYSCTL_OID(parent, nbr, name, CTLTYPE_LONG|CTLFLAG_MPSAFE|(access), \
+ ptr, val, sysctl_handle_long, "L", descr)
+
+#define SYSCTL_ADD_LONG(ctx, parent, nbr, name, access, ptr, descr) \
+ sysctl_add_oid(ctx, parent, nbr, name, CTLTYPE_LONG|CTLFLAG_MPSAFE|(access), \
+ ptr, 0, sysctl_handle_long, "L", __DESCR(descr))
+
+/* Oid for an unsigned long. The pointer must be non NULL. */
+#define SYSCTL_ULONG(parent, nbr, name, access, ptr, val, descr) \
+ SYSCTL_OID(parent, nbr, name, CTLTYPE_ULONG|CTLFLAG_MPSAFE|(access), \
+ ptr, val, sysctl_handle_long, "LU", __DESCR(descr))
+
+#define SYSCTL_ADD_ULONG(ctx, parent, nbr, name, access, ptr, descr) \
+ sysctl_add_oid(ctx, parent, nbr, name, CTLTYPE_ULONG|CTLFLAG_MPSAFE|(access), \
+ ptr, 0, sysctl_handle_long, "LU", __DESCR(descr))
+
+#define SYSCTL_XLONG(parent, nbr, name, access, ptr, val, descr) \
+ SYSCTL_OID(parent, nbr, name, CTLTYPE_ULONG|CTLFLAG_MPSAFE|(access), \
+ ptr, val, sysctl_handle_long, "LX", __DESCR(descr))
+
+#define SYSCTL_ADD_XLONG(ctx, parent, nbr, name, access, ptr, descr) \
+ sysctl_add_oid(ctx, parent, nbr, name, CTLTYPE_ULONG|CTLFLAG_MPSAFE|(access), \
+ ptr, 0, sysctl_handle_long, "LX", __DESCR(descr))
+
+/* Oid for a quad. The pointer must be non NULL. */
+#define SYSCTL_QUAD(parent, nbr, name, access, ptr, val, descr) \
+ SYSCTL_OID(parent, nbr, name, CTLTYPE_QUAD|CTLFLAG_MPSAFE|(access), \
+ ptr, val, sysctl_handle_quad, "Q", __DESCR(descr))
+
+#define SYSCTL_ADD_QUAD(ctx, parent, nbr, name, access, ptr, descr) \
+ sysctl_add_oid(ctx, parent, nbr, name, CTLTYPE_QUAD|CTLFLAG_MPSAFE|(access), \
+ ptr, 0, sysctl_handle_quad, "Q", __DESCR(descr))
+
+/* Oid for an opaque object. Specified by a pointer and a length. */
+#define SYSCTL_OPAQUE(parent, nbr, name, access, ptr, len, fmt, descr) \
+ SYSCTL_OID(parent, nbr, name, CTLTYPE_OPAQUE|(access), \
+ ptr, len, sysctl_handle_opaque, fmt, descr)
+
+#define SYSCTL_ADD_OPAQUE(ctx, parent, nbr, name, access, ptr, len, fmt, descr)\
+ sysctl_add_oid(ctx, parent, nbr, name, CTLTYPE_OPAQUE|(access), \
+ ptr, len, sysctl_handle_opaque, fmt, __DESCR(descr))
+
+/* Oid for a struct. Specified by a pointer and a type. */
+#define SYSCTL_STRUCT(parent, nbr, name, access, ptr, type, descr) \
+ SYSCTL_OID(parent, nbr, name, CTLTYPE_OPAQUE|(access), \
+ ptr, sizeof(struct type), sysctl_handle_opaque, \
+ "S," #type, descr)
+
+#define SYSCTL_ADD_STRUCT(ctx, parent, nbr, name, access, ptr, type, descr) \
+ sysctl_add_oid(ctx, parent, nbr, name, CTLTYPE_OPAQUE|(access), \
+ ptr, sizeof(struct type), sysctl_handle_opaque, "S," #type, __DESCR(descr))
+
+/* Oid for a procedure. Specified by a pointer and an arg. */
+#define SYSCTL_PROC(parent, nbr, name, access, ptr, arg, handler, fmt, descr) \
+ SYSCTL_OID(parent, nbr, name, (access), \
+ ptr, arg, handler, fmt, descr)
+
+#define SYSCTL_ADD_PROC(ctx, parent, nbr, name, access, ptr, arg, handler, fmt, descr) \
+ sysctl_add_oid(ctx, parent, nbr, name, (access), \
+ ptr, arg, handler, fmt, __DESCR(descr))
+
+/*
+ * A macro to generate a read-only sysctl to indicate the presense of optional
+ * kernel features.
+ */
+#define FEATURE(name, desc) \
+ SYSCTL_INT(_kern_features, OID_AUTO, name, CTLFLAG_RD, 0, 1, desc)
+
+#endif /* _KERNEL */
+
+/*
+ * Top-level identifiers
+ */
+#define CTL_UNSPEC 0 /* unused */
+#define CTL_KERN 1 /* "high kernel": proc, limits */
+#define CTL_VM 2 /* virtual memory */
+#define CTL_VFS 3 /* filesystem, mount type is next */
+#define CTL_NET 4 /* network, see socket.h */
+#define CTL_DEBUG 5 /* debugging parameters */
+#define CTL_HW 6 /* generic cpu/io */
+#define CTL_MACHDEP 7 /* machine dependent */
+#define CTL_USER 8 /* user-level */
+#define CTL_P1003_1B 9 /* POSIX 1003.1B */
+#define CTL_MAXID 10 /* number of valid top-level ids */
+
+#define CTL_NAMES { \
+ { 0, 0 }, \
+ { "kern", CTLTYPE_NODE }, \
+ { "vm", CTLTYPE_NODE }, \
+ { "vfs", CTLTYPE_NODE }, \
+ { "net", CTLTYPE_NODE }, \
+ { "debug", CTLTYPE_NODE }, \
+ { "hw", CTLTYPE_NODE }, \
+ { "machdep", CTLTYPE_NODE }, \
+ { "user", CTLTYPE_NODE }, \
+ { "p1003_1b", CTLTYPE_NODE }, \
+}
+
+/*
+ * CTL_KERN identifiers
+ */
+#define KERN_OSTYPE 1 /* string: system version */
+#define KERN_OSRELEASE 2 /* string: system release */
+#define KERN_OSREV 3 /* int: system revision */
+#define KERN_VERSION 4 /* string: compile time info */
+#define KERN_MAXVNODES 5 /* int: max vnodes */
+#define KERN_MAXPROC 6 /* int: max processes */
+#define KERN_MAXFILES 7 /* int: max open files */
+#define KERN_ARGMAX 8 /* int: max arguments to exec */
+#define KERN_SECURELVL 9 /* int: system security level */
+#define KERN_HOSTNAME 10 /* string: hostname */
+#define KERN_HOSTID 11 /* int: host identifier */
+#define KERN_CLOCKRATE 12 /* struct: struct clockrate */
+#define KERN_VNODE 13 /* struct: vnode structures */
+#define KERN_PROC 14 /* struct: process entries */
+#define KERN_FILE 15 /* struct: file entries */
+#define KERN_PROF 16 /* node: kernel profiling info */
+#define KERN_POSIX1 17 /* int: POSIX.1 version */
+#define KERN_NGROUPS 18 /* int: # of supplemental group ids */
+#define KERN_JOB_CONTROL 19 /* int: is job control available */
+#define KERN_SAVED_IDS 20 /* int: saved set-user/group-ID */
+#define KERN_BOOTTIME 21 /* struct: time kernel was booted */
+#define KERN_NISDOMAINNAME 22 /* string: YP domain name */
+#define KERN_UPDATEINTERVAL 23 /* int: update process sleep time */
+#define KERN_OSRELDATE 24 /* int: kernel release date */
+#define KERN_NTP_PLL 25 /* node: NTP PLL control */
+#define KERN_BOOTFILE 26 /* string: name of booted kernel */
+#define KERN_MAXFILESPERPROC 27 /* int: max open files per proc */
+#define KERN_MAXPROCPERUID 28 /* int: max processes per uid */
+#define KERN_DUMPDEV 29 /* struct cdev *: device to dump on */
+#define KERN_IPC 30 /* node: anything related to IPC */
+#define KERN_DUMMY 31 /* unused */
+#define KERN_PS_STRINGS 32 /* int: address of PS_STRINGS */
+#define KERN_USRSTACK 33 /* int: address of USRSTACK */
+#define KERN_LOGSIGEXIT 34 /* int: do we log sigexit procs? */
+#define KERN_IOV_MAX 35 /* int: value of UIO_MAXIOV */
+#define KERN_HOSTUUID 36 /* string: host UUID identifier */
+#define KERN_ARND 37 /* int: from arc4rand() */
+#define KERN_MAXID 38 /* number of valid kern ids */
+
+#define CTL_KERN_NAMES { \
+ { 0, 0 }, \
+ { "ostype", CTLTYPE_STRING }, \
+ { "osrelease", CTLTYPE_STRING }, \
+ { "osrevision", CTLTYPE_INT }, \
+ { "version", CTLTYPE_STRING }, \
+ { "maxvnodes", CTLTYPE_INT }, \
+ { "maxproc", CTLTYPE_INT }, \
+ { "maxfiles", CTLTYPE_INT }, \
+ { "argmax", CTLTYPE_INT }, \
+ { "securelevel", CTLTYPE_INT }, \
+ { "hostname", CTLTYPE_STRING }, \
+ { "hostid", CTLTYPE_UINT }, \
+ { "clockrate", CTLTYPE_STRUCT }, \
+ { "vnode", CTLTYPE_STRUCT }, \
+ { "proc", CTLTYPE_STRUCT }, \
+ { "file", CTLTYPE_STRUCT }, \
+ { "profiling", CTLTYPE_NODE }, \
+ { "posix1version", CTLTYPE_INT }, \
+ { "ngroups", CTLTYPE_INT }, \
+ { "job_control", CTLTYPE_INT }, \
+ { "saved_ids", CTLTYPE_INT }, \
+ { "boottime", CTLTYPE_STRUCT }, \
+ { "nisdomainname", CTLTYPE_STRING }, \
+ { "update", CTLTYPE_INT }, \
+ { "osreldate", CTLTYPE_INT }, \
+ { "ntp_pll", CTLTYPE_NODE }, \
+ { "bootfile", CTLTYPE_STRING }, \
+ { "maxfilesperproc", CTLTYPE_INT }, \
+ { "maxprocperuid", CTLTYPE_INT }, \
+ { "ipc", CTLTYPE_NODE }, \
+ { "dummy", CTLTYPE_INT }, \
+ { "ps_strings", CTLTYPE_INT }, \
+ { "usrstack", CTLTYPE_INT }, \
+ { "logsigexit", CTLTYPE_INT }, \
+ { "iov_max", CTLTYPE_INT }, \
+ { "hostuuid", CTLTYPE_STRING }, \
+ { "arc4rand", CTLTYPE_OPAQUE }, \
+}
+
+/*
+ * CTL_VFS identifiers
+ */
+#define CTL_VFS_NAMES { \
+ { "vfsconf", CTLTYPE_STRUCT }, \
+}
+
+/*
+ * KERN_PROC subtypes
+ */
+#define KERN_PROC_ALL 0 /* everything */
+#define KERN_PROC_PID 1 /* by process id */
+#define KERN_PROC_PGRP 2 /* by process group id */
+#define KERN_PROC_SESSION 3 /* by session of pid */
+#define KERN_PROC_TTY 4 /* by controlling tty */
+#define KERN_PROC_UID 5 /* by effective uid */
+#define KERN_PROC_RUID 6 /* by real uid */
+#define KERN_PROC_ARGS 7 /* get/set arguments/proctitle */
+#define KERN_PROC_PROC 8 /* only return procs */
+#define KERN_PROC_SV_NAME 9 /* get syscall vector name */
+#define KERN_PROC_RGID 10 /* by real group id */
+#define KERN_PROC_GID 11 /* by effective group id */
+#define KERN_PROC_PATHNAME 12 /* path to executable */
+#define KERN_PROC_OVMMAP 13 /* Old VM map entries for process */
+#define KERN_PROC_OFILEDESC 14 /* Old file descriptors for process */
+#define KERN_PROC_KSTACK 15 /* Kernel stacks for process */
+#define KERN_PROC_INC_THREAD 0x10 /*
+ * modifier for pid, pgrp, tty,
+ * uid, ruid, gid, rgid and proc
+ * This effectively uses 16-31
+ */
+#define KERN_PROC_VMMAP 32 /* VM map entries for process */
+#define KERN_PROC_FILEDESC 33 /* File descriptors for process */
+#define KERN_PROC_GROUPS 34 /* process groups */
+
+/*
+ * KERN_IPC identifiers
+ */
+#define KIPC_MAXSOCKBUF 1 /* int: max size of a socket buffer */
+#define KIPC_SOCKBUF_WASTE 2 /* int: wastage factor in sockbuf */
+#define KIPC_SOMAXCONN 3 /* int: max length of connection q */
+#define KIPC_MAX_LINKHDR 4 /* int: max length of link header */
+#define KIPC_MAX_PROTOHDR 5 /* int: max length of network header */
+#define KIPC_MAX_HDR 6 /* int: max total length of headers */
+#define KIPC_MAX_DATALEN 7 /* int: max length of data? */
+
+/*
+ * CTL_HW identifiers
+ */
+#define HW_MACHINE 1 /* string: machine class */
+#define HW_MODEL 2 /* string: specific machine model */
+#define HW_NCPU 3 /* int: number of cpus */
+#define HW_BYTEORDER 4 /* int: machine byte order */
+#define HW_PHYSMEM 5 /* int: total memory */
+#define HW_USERMEM 6 /* int: non-kernel memory */
+#define HW_PAGESIZE 7 /* int: software page size */
+#define HW_DISKNAMES 8 /* strings: disk drive names */
+#define HW_DISKSTATS 9 /* struct: diskstats[] */
+#define HW_FLOATINGPT 10 /* int: has HW floating point? */
+#define HW_MACHINE_ARCH 11 /* string: machine architecture */
+#define HW_REALMEM 12 /* int: 'real' memory */
+#define HW_MAXID 13 /* number of valid hw ids */
+
+#define CTL_HW_NAMES { \
+ { 0, 0 }, \
+ { "machine", CTLTYPE_STRING }, \
+ { "model", CTLTYPE_STRING }, \
+ { "ncpu", CTLTYPE_INT }, \
+ { "byteorder", CTLTYPE_INT }, \
+ { "physmem", CTLTYPE_ULONG }, \
+ { "usermem", CTLTYPE_ULONG }, \
+ { "pagesize", CTLTYPE_INT }, \
+ { "disknames", CTLTYPE_STRUCT }, \
+ { "diskstats", CTLTYPE_STRUCT }, \
+ { "floatingpoint", CTLTYPE_INT }, \
+ { "machine_arch", CTLTYPE_STRING }, \
+ { "realmem", CTLTYPE_ULONG }, \
+}
+
+/*
+ * CTL_USER definitions
+ */
+#define USER_CS_PATH 1 /* string: _CS_PATH */
+#define USER_BC_BASE_MAX 2 /* int: BC_BASE_MAX */
+#define USER_BC_DIM_MAX 3 /* int: BC_DIM_MAX */
+#define USER_BC_SCALE_MAX 4 /* int: BC_SCALE_MAX */
+#define USER_BC_STRING_MAX 5 /* int: BC_STRING_MAX */
+#define USER_COLL_WEIGHTS_MAX 6 /* int: COLL_WEIGHTS_MAX */
+#define USER_EXPR_NEST_MAX 7 /* int: EXPR_NEST_MAX */
+#define USER_LINE_MAX 8 /* int: LINE_MAX */
+#define USER_RE_DUP_MAX 9 /* int: RE_DUP_MAX */
+#define USER_POSIX2_VERSION 10 /* int: POSIX2_VERSION */
+#define USER_POSIX2_C_BIND 11 /* int: POSIX2_C_BIND */
+#define USER_POSIX2_C_DEV 12 /* int: POSIX2_C_DEV */
+#define USER_POSIX2_CHAR_TERM 13 /* int: POSIX2_CHAR_TERM */
+#define USER_POSIX2_FORT_DEV 14 /* int: POSIX2_FORT_DEV */
+#define USER_POSIX2_FORT_RUN 15 /* int: POSIX2_FORT_RUN */
+#define USER_POSIX2_LOCALEDEF 16 /* int: POSIX2_LOCALEDEF */
+#define USER_POSIX2_SW_DEV 17 /* int: POSIX2_SW_DEV */
+#define USER_POSIX2_UPE 18 /* int: POSIX2_UPE */
+#define USER_STREAM_MAX 19 /* int: POSIX2_STREAM_MAX */
+#define USER_TZNAME_MAX 20 /* int: POSIX2_TZNAME_MAX */
+#define USER_MAXID 21 /* number of valid user ids */
+
+#define CTL_USER_NAMES { \
+ { 0, 0 }, \
+ { "cs_path", CTLTYPE_STRING }, \
+ { "bc_base_max", CTLTYPE_INT }, \
+ { "bc_dim_max", CTLTYPE_INT }, \
+ { "bc_scale_max", CTLTYPE_INT }, \
+ { "bc_string_max", CTLTYPE_INT }, \
+ { "coll_weights_max", CTLTYPE_INT }, \
+ { "expr_nest_max", CTLTYPE_INT }, \
+ { "line_max", CTLTYPE_INT }, \
+ { "re_dup_max", CTLTYPE_INT }, \
+ { "posix2_version", CTLTYPE_INT }, \
+ { "posix2_c_bind", CTLTYPE_INT }, \
+ { "posix2_c_dev", CTLTYPE_INT }, \
+ { "posix2_char_term", CTLTYPE_INT }, \
+ { "posix2_fort_dev", CTLTYPE_INT }, \
+ { "posix2_fort_run", CTLTYPE_INT }, \
+ { "posix2_localedef", CTLTYPE_INT }, \
+ { "posix2_sw_dev", CTLTYPE_INT }, \
+ { "posix2_upe", CTLTYPE_INT }, \
+ { "stream_max", CTLTYPE_INT }, \
+ { "tzname_max", CTLTYPE_INT }, \
+}
+
+#define CTL_P1003_1B_ASYNCHRONOUS_IO 1 /* boolean */
+#define CTL_P1003_1B_MAPPED_FILES 2 /* boolean */
+#define CTL_P1003_1B_MEMLOCK 3 /* boolean */
+#define CTL_P1003_1B_MEMLOCK_RANGE 4 /* boolean */
+#define CTL_P1003_1B_MEMORY_PROTECTION 5 /* boolean */
+#define CTL_P1003_1B_MESSAGE_PASSING 6 /* boolean */
+#define CTL_P1003_1B_PRIORITIZED_IO 7 /* boolean */
+#define CTL_P1003_1B_PRIORITY_SCHEDULING 8 /* boolean */
+#define CTL_P1003_1B_REALTIME_SIGNALS 9 /* boolean */
+#define CTL_P1003_1B_SEMAPHORES 10 /* boolean */
+#define CTL_P1003_1B_FSYNC 11 /* boolean */
+#define CTL_P1003_1B_SHARED_MEMORY_OBJECTS 12 /* boolean */
+#define CTL_P1003_1B_SYNCHRONIZED_IO 13 /* boolean */
+#define CTL_P1003_1B_TIMERS 14 /* boolean */
+#define CTL_P1003_1B_AIO_LISTIO_MAX 15 /* int */
+#define CTL_P1003_1B_AIO_MAX 16 /* int */
+#define CTL_P1003_1B_AIO_PRIO_DELTA_MAX 17 /* int */
+#define CTL_P1003_1B_DELAYTIMER_MAX 18 /* int */
+#define CTL_P1003_1B_MQ_OPEN_MAX 19 /* int */
+#define CTL_P1003_1B_PAGESIZE 20 /* int */
+#define CTL_P1003_1B_RTSIG_MAX 21 /* int */
+#define CTL_P1003_1B_SEM_NSEMS_MAX 22 /* int */
+#define CTL_P1003_1B_SEM_VALUE_MAX 23 /* int */
+#define CTL_P1003_1B_SIGQUEUE_MAX 24 /* int */
+#define CTL_P1003_1B_TIMER_MAX 25 /* int */
+
+#define CTL_P1003_1B_MAXID 26
+
+#define CTL_P1003_1B_NAMES { \
+ { 0, 0 }, \
+ { "asynchronous_io", CTLTYPE_INT }, \
+ { "mapped_files", CTLTYPE_INT }, \
+ { "memlock", CTLTYPE_INT }, \
+ { "memlock_range", CTLTYPE_INT }, \
+ { "memory_protection", CTLTYPE_INT }, \
+ { "message_passing", CTLTYPE_INT }, \
+ { "prioritized_io", CTLTYPE_INT }, \
+ { "priority_scheduling", CTLTYPE_INT }, \
+ { "realtime_signals", CTLTYPE_INT }, \
+ { "semaphores", CTLTYPE_INT }, \
+ { "fsync", CTLTYPE_INT }, \
+ { "shared_memory_objects", CTLTYPE_INT }, \
+ { "synchronized_io", CTLTYPE_INT }, \
+ { "timers", CTLTYPE_INT }, \
+ { "aio_listio_max", CTLTYPE_INT }, \
+ { "aio_max", CTLTYPE_INT }, \
+ { "aio_prio_delta_max", CTLTYPE_INT }, \
+ { "delaytimer_max", CTLTYPE_INT }, \
+ { "mq_open_max", CTLTYPE_INT }, \
+ { "pagesize", CTLTYPE_INT }, \
+ { "rtsig_max", CTLTYPE_INT }, \
+ { "nsems_max", CTLTYPE_INT }, \
+ { "sem_value_max", CTLTYPE_INT }, \
+ { "sigqueue_max", CTLTYPE_INT }, \
+ { "timer_max", CTLTYPE_INT }, \
+}
+
+#ifdef _KERNEL
+
+/*
+ * Declare some common oids.
+ */
+#ifdef __rtems__
+#define sysctl__children _bsd_sysctl__children
+#endif /* __rtems__ */
+extern struct sysctl_oid_list sysctl__children;
+SYSCTL_DECL(_kern);
+SYSCTL_DECL(_kern_features);
+SYSCTL_DECL(_kern_ipc);
+SYSCTL_DECL(_kern_proc);
+SYSCTL_DECL(_kern_sched);
+SYSCTL_DECL(_kern_sched_stats);
+SYSCTL_DECL(_sysctl);
+SYSCTL_DECL(_vm);
+SYSCTL_DECL(_vm_stats);
+SYSCTL_DECL(_vm_stats_misc);
+SYSCTL_DECL(_vfs);
+SYSCTL_DECL(_net);
+SYSCTL_DECL(_debug);
+SYSCTL_DECL(_debug_sizeof);
+SYSCTL_DECL(_dev);
+SYSCTL_DECL(_hw);
+SYSCTL_DECL(_hw_bus);
+SYSCTL_DECL(_hw_bus_devices);
+SYSCTL_DECL(_hw_bus_info);
+SYSCTL_DECL(_machdep);
+SYSCTL_DECL(_user);
+SYSCTL_DECL(_compat);
+SYSCTL_DECL(_regression);
+SYSCTL_DECL(_security);
+SYSCTL_DECL(_security_bsd);
+
+extern char machine[];
+extern char osrelease[];
+extern char ostype[];
+extern char kern_ident[];
+
+/* Dynamic oid handling */
+struct sysctl_oid *sysctl_add_oid(struct sysctl_ctx_list *clist,
+ struct sysctl_oid_list *parent, int nbr, const char *name,
+ int kind, void *arg1, int arg2,
+ int (*handler) (SYSCTL_HANDLER_ARGS),
+ const char *fmt, const char *descr);
+void sysctl_rename_oid(struct sysctl_oid *oidp, const char *name);
+int sysctl_move_oid(struct sysctl_oid *oidp,
+ struct sysctl_oid_list *parent);
+int sysctl_remove_oid(struct sysctl_oid *oidp, int del, int recurse);
+int sysctl_ctx_init(struct sysctl_ctx_list *clist);
+int sysctl_ctx_free(struct sysctl_ctx_list *clist);
+struct sysctl_ctx_entry *sysctl_ctx_entry_add(struct sysctl_ctx_list *clist,
+ struct sysctl_oid *oidp);
+struct sysctl_ctx_entry *sysctl_ctx_entry_find(struct sysctl_ctx_list *clist,
+ struct sysctl_oid *oidp);
+int sysctl_ctx_entry_del(struct sysctl_ctx_list *clist,
+ struct sysctl_oid *oidp);
+
+int kernel_sysctl(struct thread *td, int *name, u_int namelen, void *old,
+#ifndef __rtems__
+ size_t *oldlenp, void *new, size_t newlen,
+#else /* __rtems__ */
+ size_t *oldlenp, const void *newp, size_t newlen,
+#endif /* __rtems__ */
+ size_t *retval, int flags);
+#ifndef __rtems__
+int kernel_sysctlbyname(struct thread *td, char *name,
+ void *old, size_t *oldlenp, void *new, size_t newlen,
+ size_t *retval, int flags);
+int userland_sysctl(struct thread *td, int *name, u_int namelen, void *old,
+ size_t *oldlenp, int inkernel, void *new, size_t newlen,
+ size_t *retval, int flags);
+#endif /* __rtems__ */
+int sysctl_find_oid(int *name, u_int namelen, struct sysctl_oid **noid,
+ int *nindx, struct sysctl_req *req);
+void sysctl_lock(void);
+void sysctl_unlock(void);
+int sysctl_wire_old_buffer(struct sysctl_req *req, size_t len);
+
+#ifndef __rtems__
+#else /* !_KERNEL */
+#endif /* __rtems__ */
+#include <rtems/freebsd/sys/cdefs.h>
+
+__BEGIN_DECLS
+int sysctl(int *, u_int, void *, size_t *, void *, size_t);
+int sysctlbyname(const char *, void *, size_t *, void *, size_t);
+int sysctlnametomib(const char *, int *, size_t *);
+__END_DECLS
+#endif /* _KERNEL */
+
+#endif /* !_SYS_SYSCTL_HH_ */
diff --git a/rtems/freebsd/sys/sysent.h b/rtems/freebsd/sys/sysent.h
new file mode 100644
index 00000000..936ffd88
--- /dev/null
+++ b/rtems/freebsd/sys/sysent.h
@@ -0,0 +1 @@
+/* EMPTY */
diff --git a/rtems/freebsd/sys/syslimits.h b/rtems/freebsd/sys/syslimits.h
new file mode 100644
index 00000000..936ffd88
--- /dev/null
+++ b/rtems/freebsd/sys/syslimits.h
@@ -0,0 +1 @@
+/* EMPTY */
diff --git a/rtems/freebsd/sys/syslog.h b/rtems/freebsd/sys/syslog.h
new file mode 100644
index 00000000..7284e899
--- /dev/null
+++ b/rtems/freebsd/sys/syslog.h
@@ -0,0 +1,203 @@
+/*-
+ * Copyright (c) 1982, 1986, 1988, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * @(#)syslog.h 8.1 (Berkeley) 6/2/93
+ * $FreeBSD$
+ */
+
+#ifndef _SYS_SYSLOG_HH_
+#define _SYS_SYSLOG_HH_
+
+#define _PATH_LOG "/var/run/log"
+#define _PATH_LOG_PRIV "/var/run/logpriv"
+#define _PATH_OLDLOG "/dev/log" /* backward compatibility */
+
+/*
+ * priorities/facilities are encoded into a single 32-bit quantity, where the
+ * bottom 3 bits are the priority (0-7) and the top 28 bits are the facility
+ * (0-big number). Both the priorities and the facilities map roughly
+ * one-to-one to strings in the syslogd(8) source code. This mapping is
+ * included in this file.
+ *
+ * priorities (these are ordered)
+ */
+#define LOG_EMERG 0 /* system is unusable */
+#define LOG_ALERT 1 /* action must be taken immediately */
+#define LOG_CRIT 2 /* critical conditions */
+#define LOG_ERR 3 /* error conditions */
+#define LOG_WARNING 4 /* warning conditions */
+#define LOG_NOTICE 5 /* normal but significant condition */
+#define LOG_INFO 6 /* informational */
+#define LOG_DEBUG 7 /* debug-level messages */
+
+#define LOG_PRIMASK 0x07 /* mask to extract priority part (internal) */
+ /* extract priority */
+#define LOG_PRI(p) ((p) & LOG_PRIMASK)
+#define LOG_MAKEPRI(fac, pri) ((fac) | (pri))
+
+#ifdef SYSLOG_NAMES
+#define INTERNAL_NOPRI 0x10 /* the "no priority" priority */
+ /* mark "facility" */
+#define INTERNAL_MARK LOG_MAKEPRI((LOG_NFACILITIES<<3), 0)
+typedef struct _code {
+ const char *c_name;
+ int c_val;
+} CODE;
+
+CODE prioritynames[] = {
+ { "alert", LOG_ALERT, },
+ { "crit", LOG_CRIT, },
+ { "debug", LOG_DEBUG, },
+ { "emerg", LOG_EMERG, },
+ { "err", LOG_ERR, },
+ { "error", LOG_ERR, }, /* DEPRECATED */
+ { "info", LOG_INFO, },
+ { "none", INTERNAL_NOPRI, }, /* INTERNAL */
+ { "notice", LOG_NOTICE, },
+ { "panic", LOG_EMERG, }, /* DEPRECATED */
+ { "warn", LOG_WARNING, }, /* DEPRECATED */
+ { "warning", LOG_WARNING, },
+ { NULL, -1, }
+};
+#endif
+
+/* facility codes */
+#define LOG_KERN (0<<3) /* kernel messages */
+#define LOG_USER (1<<3) /* random user-level messages */
+#define LOG_MAIL (2<<3) /* mail system */
+#define LOG_DAEMON (3<<3) /* system daemons */
+#define LOG_AUTH (4<<3) /* authorization messages */
+#define LOG_SYSLOG (5<<3) /* messages generated internally by syslogd */
+#define LOG_LPR (6<<3) /* line printer subsystem */
+#define LOG_NEWS (7<<3) /* network news subsystem */
+#define LOG_UUCP (8<<3) /* UUCP subsystem */
+#define LOG_CRON (9<<3) /* clock daemon */
+#define LOG_AUTHPRIV (10<<3) /* authorization messages (private) */
+ /* Facility #10 clashes in DEC UNIX, where */
+ /* it's defined as LOG_MEGASAFE for AdvFS */
+ /* event logging. */
+#define LOG_FTP (11<<3) /* ftp daemon */
+#define LOG_NTP (12<<3) /* NTP subsystem */
+#define LOG_SECURITY (13<<3) /* security subsystems (firewalling, etc.) */
+#define LOG_CONSOLE (14<<3) /* /dev/console output */
+
+ /* other codes through 15 reserved for system use */
+#define LOG_LOCAL0 (16<<3) /* reserved for local use */
+#define LOG_LOCAL1 (17<<3) /* reserved for local use */
+#define LOG_LOCAL2 (18<<3) /* reserved for local use */
+#define LOG_LOCAL3 (19<<3) /* reserved for local use */
+#define LOG_LOCAL4 (20<<3) /* reserved for local use */
+#define LOG_LOCAL5 (21<<3) /* reserved for local use */
+#define LOG_LOCAL6 (22<<3) /* reserved for local use */
+#define LOG_LOCAL7 (23<<3) /* reserved for local use */
+
+#define LOG_NFACILITIES 24 /* current number of facilities */
+#define LOG_FACMASK 0x03f8 /* mask to extract facility part */
+ /* facility of pri */
+#define LOG_FAC(p) (((p) & LOG_FACMASK) >> 3)
+
+#ifdef SYSLOG_NAMES
+CODE facilitynames[] = {
+ { "auth", LOG_AUTH, },
+ { "authpriv", LOG_AUTHPRIV, },
+ { "console", LOG_CONSOLE, },
+ { "cron", LOG_CRON, },
+ { "daemon", LOG_DAEMON, },
+ { "ftp", LOG_FTP, },
+ { "kern", LOG_KERN, },
+ { "lpr", LOG_LPR, },
+ { "mail", LOG_MAIL, },
+ { "mark", INTERNAL_MARK, }, /* INTERNAL */
+ { "news", LOG_NEWS, },
+ { "ntp", LOG_NTP, },
+ { "security", LOG_SECURITY, },
+ { "syslog", LOG_SYSLOG, },
+ { "user", LOG_USER, },
+ { "uucp", LOG_UUCP, },
+ { "local0", LOG_LOCAL0, },
+ { "local1", LOG_LOCAL1, },
+ { "local2", LOG_LOCAL2, },
+ { "local3", LOG_LOCAL3, },
+ { "local4", LOG_LOCAL4, },
+ { "local5", LOG_LOCAL5, },
+ { "local6", LOG_LOCAL6, },
+ { "local7", LOG_LOCAL7, },
+ { NULL, -1, }
+};
+#endif
+
+#ifdef _KERNEL
+#define LOG_PRINTF -1 /* pseudo-priority to indicate use of printf */
+#endif
+
+/*
+ * arguments to setlogmask.
+ */
+#define LOG_MASK(pri) (1 << (pri)) /* mask for one priority */
+#define LOG_UPTO(pri) ((1 << ((pri)+1)) - 1) /* all priorities through pri */
+
+/*
+ * Option flags for openlog.
+ *
+ * LOG_ODELAY no longer does anything.
+ * LOG_NDELAY is the inverse of what it used to be.
+ */
+#define LOG_PID 0x01 /* log the pid with each message */
+#define LOG_CONS 0x02 /* log on the console if errors in sending */
+#define LOG_ODELAY 0x04 /* delay open until first syslog() (default) */
+#define LOG_NDELAY 0x08 /* don't delay open */
+#define LOG_NOWAIT 0x10 /* don't wait for console forks: DEPRECATED */
+#define LOG_PERROR 0x20 /* log to stderr as well */
+
+#ifdef _KERNEL
+
+#else /* not _KERNEL */
+
+/*
+ * Don't use va_list in the vsyslog() prototype. Va_list is typedef'd in two
+ * places (<machine/varargs.h> and <machine/stdarg.h>), so if we include one
+ * of them here we may collide with the utility's includes. It's unreasonable
+ * for utilities to have to include one of them to include syslog.h, so we get
+ * __va_list from <sys/_types.h> and use it.
+ */
+#include <rtems/freebsd/sys/cdefs.h>
+#include <rtems/freebsd/sys/_types.h>
+
+__BEGIN_DECLS
+void closelog(void);
+void openlog(const char *, int, int);
+int setlogmask(int);
+void syslog(int, const char *, ...) __printflike(2, 3);
+#if __BSD_VISIBLE
+void vsyslog(int, const char *, __va_list) __printflike(2, 0);
+#endif
+__END_DECLS
+
+#endif /* !_KERNEL */
+
+#endif
diff --git a/rtems/freebsd/sys/sysproto.h b/rtems/freebsd/sys/sysproto.h
new file mode 100644
index 00000000..936ffd88
--- /dev/null
+++ b/rtems/freebsd/sys/sysproto.h
@@ -0,0 +1 @@
+/* EMPTY */
diff --git a/rtems/freebsd/sys/systm.h b/rtems/freebsd/sys/systm.h
new file mode 100644
index 00000000..d4ba7bc5
--- /dev/null
+++ b/rtems/freebsd/sys/systm.h
@@ -0,0 +1,418 @@
+/*-
+ * Copyright (c) 1982, 1988, 1991, 1993
+ * The Regents of the University of California. All rights reserved.
+ * (c) UNIX System Laboratories, Inc.
+ * All or some portions of this file are derived from material licensed
+ * to the University of California by American Telephone and Telegraph
+ * Co. or Unix System Laboratories, Inc. and are reproduced herein with
+ * the permission of UNIX System Laboratories, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * @(#)systm.h 8.7 (Berkeley) 3/29/95
+ * $FreeBSD$
+ */
+
+#ifndef _SYS_SYSTM_HH_
+#define _SYS_SYSTM_HH_
+
+#include <rtems/freebsd/machine/atomic.h>
+#include <rtems/freebsd/machine/cpufunc.h>
+#include <rtems/freebsd/sys/callout.h>
+#include <rtems/freebsd/sys/cdefs.h>
+#include <rtems/freebsd/sys/queue.h>
+#include <rtems/freebsd/sys/stdint.h> /* for people using printf mainly */
+
+extern int cold; /* nonzero if we are doing a cold boot */
+extern int rebooting; /* boot() has been called. */
+extern const char *panicstr; /* panic message */
+extern char version[]; /* system version */
+extern char copyright[]; /* system copyright */
+extern int kstack_pages; /* number of kernel stack pages */
+
+extern int nswap; /* size of swap space */
+
+extern u_long pagesizes[]; /* supported page sizes */
+extern long physmem; /* physical memory */
+extern long realmem; /* 'real' memory */
+
+extern char *rootdevnames[2]; /* names of possible root devices */
+
+extern int boothowto; /* reboot flags, from console subsystem */
+extern int bootverbose; /* nonzero to print verbose messages */
+
+extern int maxusers; /* system tune hint */
+extern int ngroups_max; /* max # of supplemental groups */
+extern int vm_guest; /* Running as virtual machine guest? */
+
+/*
+ * Detected virtual machine guest types. The intention is to expand
+ * and/or add to the VM_GUEST_VM type if specific VM functionality is
+ * ever implemented (e.g. vendor-specific paravirtualization features).
+ */
+enum VM_GUEST { VM_GUEST_NO = 0, VM_GUEST_VM, VM_GUEST_XEN };
+
+#ifdef INVARIANTS /* The option is always available */
+#define KASSERT(exp,msg) do { \
+ if (__predict_false(!(exp))) \
+ panic msg; \
+} while (0)
+#define VNASSERT(exp, vp, msg) do { \
+ if (__predict_false(!(exp))) { \
+ vn_printf(vp, "VNASSERT failed\n"); \
+ panic msg; \
+ } \
+} while (0)
+#else
+#define KASSERT(exp,msg) do { \
+} while (0)
+
+#define VNASSERT(exp, vp, msg) do { \
+} while (0)
+#endif
+
+#ifndef CTASSERT /* Allow lint to override */
+#define CTASSERT(x) _CTASSERT(x, __LINE__)
+#define _CTASSERT(x, y) __CTASSERT(x, y)
+#define __CTASSERT(x, y) typedef char __assert ## y[(x) ? 1 : -1]
+#endif
+
+/*
+ * Assert that a pointer can be loaded from memory atomically.
+ *
+ * This assertion enforces stronger alignment than necessary. For example,
+ * on some architectures, atomicity for unaligned loads will depend on
+ * whether or not the load spans multiple cache lines.
+ */
+#define ASSERT_ATOMIC_LOAD_PTR(var, msg) \
+ KASSERT(sizeof(var) == sizeof(void *) && \
+ ((uintptr_t)&(var) & (sizeof(void *) - 1)) == 0, msg)
+
+/*
+ * XXX the hints declarations are even more misplaced than most declarations
+ * in this file, since they are needed in one file (per arch) and only used
+ * in two files.
+ * XXX most of these variables should be const.
+ */
+extern int osreldate;
+extern int envmode;
+extern int hintmode; /* 0 = off. 1 = config, 2 = fallback */
+extern int dynamic_kenv;
+extern struct mtx kenv_lock;
+extern char *kern_envp;
+extern char static_env[];
+extern char static_hints[]; /* by config for now */
+
+extern char **kenvp;
+
+/*
+ * General function declarations.
+ */
+
+struct inpcb;
+struct lock_object;
+struct malloc_type;
+struct mtx;
+struct proc;
+struct socket;
+struct thread;
+struct tty;
+struct ucred;
+struct uio;
+struct _jmp_buf;
+
+int setjmp(struct _jmp_buf *);
+void longjmp(struct _jmp_buf *, int) __dead2;
+int dumpstatus(vm_offset_t addr, off_t count);
+int nullop(void);
+int eopnotsupp(void);
+int ureadc(int, struct uio *);
+void hashdestroy(void *, struct malloc_type *, u_long);
+void *hashinit(int count, struct malloc_type *type, u_long *hashmark);
+void *hashinit_flags(int count, struct malloc_type *type,
+ u_long *hashmask, int flags);
+#define HASH_NOWAIT 0x00000001
+#define HASH_WAITOK 0x00000002
+
+void *phashinit(int count, struct malloc_type *type, u_long *nentries);
+void g_waitidle(void);
+
+#ifdef RESTARTABLE_PANICS
+void panic(const char *, ...) __printflike(1, 2);
+#else
+void panic(const char *, ...) __dead2 __printflike(1, 2);
+#endif
+
+void cpu_boot(int);
+void cpu_flush_dcache(void *, size_t);
+void cpu_rootconf(void);
+void critical_enter(void);
+void critical_exit(void);
+void init_param1(void);
+void init_param2(long physpages);
+void init_param3(long kmempages);
+void tablefull(const char *);
+int kvprintf(char const *, void (*)(int, void*), void *, int,
+ __va_list) __printflike(1, 0);
+void log(int, const char *, ...) __printflike(2, 3);
+void log_console(struct uio *);
+int printf(const char *, ...) __printflike(1, 2);
+int snprintf(char *, size_t, const char *, ...) __printflike(3, 4);
+int sprintf(char *buf, const char *, ...) __printflike(2, 3);
+int uprintf(const char *, ...) __printflike(1, 2);
+int vprintf(const char *, __va_list) __printflike(1, 0);
+int vsnprintf(char *, size_t, const char *, __va_list) __printflike(3, 0);
+int vsnrprintf(char *, size_t, int, const char *, __va_list) __printflike(4, 0);
+int vsprintf(char *buf, const char *, __va_list) __printflike(2, 0);
+int ttyprintf(struct tty *, const char *, ...) __printflike(2, 3);
+int sscanf(const char *, char const *, ...) __nonnull(1) __nonnull(2);
+int vsscanf(const char *, char const *, __va_list) __nonnull(1) __nonnull(2);
+long strtol(const char *, char **, int) __nonnull(1);
+u_long strtoul(const char *, char **, int) __nonnull(1);
+quad_t strtoq(const char *, char **, int) __nonnull(1);
+u_quad_t strtouq(const char *, char **, int) __nonnull(1);
+void tprintf(struct proc *p, int pri, const char *, ...) __printflike(3, 4);
+void hexdump(const void *ptr, int length, const char *hdr, int flags);
+#define HD_COLUMN_MASK 0xff
+#define HD_DELIM_MASK 0xff00
+#define HD_OMIT_COUNT (1 << 16)
+#define HD_OMIT_HEX (1 << 17)
+#define HD_OMIT_CHARS (1 << 18)
+
+#define ovbcopy(f, t, l) bcopy((f), (t), (l))
+void bcopy(const void *from, void *to, size_t len) __nonnull(1) __nonnull(2);
+void bzero(void *buf, size_t len) __nonnull(1);
+
+void *memcpy(void *to, const void *from, size_t len) __nonnull(1) __nonnull(2);
+void *memmove(void *dest, const void *src, size_t n) __nonnull(1) __nonnull(2);
+
+int copystr(const void * __restrict kfaddr, void * __restrict kdaddr,
+ size_t len, size_t * __restrict lencopied)
+ __nonnull(1) __nonnull(2);
+int copyinstr(const void * __restrict udaddr, void * __restrict kaddr,
+ size_t len, size_t * __restrict lencopied)
+ __nonnull(1) __nonnull(2);
+int copyin(const void * __restrict udaddr, void * __restrict kaddr,
+ size_t len) __nonnull(1) __nonnull(2);
+int copyout(const void * __restrict kaddr, void * __restrict udaddr,
+ size_t len) __nonnull(1) __nonnull(2);
+
+int fubyte(const void *base);
+long fuword(const void *base);
+int fuword16(void *base);
+int32_t fuword32(const void *base);
+int64_t fuword64(const void *base);
+int subyte(void *base, int byte);
+int suword(void *base, long word);
+int suword16(void *base, int word);
+int suword32(void *base, int32_t word);
+int suword64(void *base, int64_t word);
+uint32_t casuword32(volatile uint32_t *base, uint32_t oldval, uint32_t newval);
+u_long casuword(volatile u_long *p, u_long oldval, u_long newval);
+
+void realitexpire(void *);
+
+int sysbeep(int hertz, int period);
+
+void hardclock(int usermode, uintfptr_t pc);
+void hardclock_cpu(int usermode);
+void softclock(void *);
+void statclock(int usermode);
+void profclock(int usermode, uintfptr_t pc);
+
+void startprofclock(struct proc *);
+void stopprofclock(struct proc *);
+void cpu_startprofclock(void);
+void cpu_stopprofclock(void);
+
+int cr_cansee(struct ucred *u1, struct ucred *u2);
+int cr_canseesocket(struct ucred *cred, struct socket *so);
+int cr_canseeinpcb(struct ucred *cred, struct inpcb *inp);
+
+char *getenv(const char *name);
+void freeenv(char *env);
+int getenv_int(const char *name, int *data);
+int getenv_uint(const char *name, unsigned int *data);
+int getenv_long(const char *name, long *data);
+int getenv_ulong(const char *name, unsigned long *data);
+int getenv_string(const char *name, char *data, int size);
+int getenv_quad(const char *name, quad_t *data);
+int setenv(const char *name, const char *value);
+int unsetenv(const char *name);
+int testenv(const char *name);
+
+typedef uint64_t (cpu_tick_f)(void);
+void set_cputicker(cpu_tick_f *func, uint64_t freq, unsigned var);
+extern cpu_tick_f *cpu_ticks;
+uint64_t cpu_tickrate(void);
+uint64_t cputick2usec(uint64_t tick);
+
+#ifdef APM_FIXUP_CALLTODO
+struct timeval;
+void adjust_timeout_calltodo(struct timeval *time_change);
+#endif /* APM_FIXUP_CALLTODO */
+
+#include <rtems/freebsd/sys/libkern.h>
+
+/* Initialize the world */
+void consinit(void);
+void cpu_initclocks(void);
+void usrinfoinit(void);
+
+/* Finalize the world */
+void shutdown_nice(int);
+
+/* Timeouts */
+typedef void timeout_t(void *); /* timeout function type */
+#define CALLOUT_HANDLE_INITIALIZER(handle) \
+ { NULL }
+
+void callout_handle_init(struct callout_handle *);
+struct callout_handle timeout(timeout_t *, void *, int);
+void untimeout(timeout_t *, void *, struct callout_handle);
+caddr_t kern_timeout_callwheel_alloc(caddr_t v);
+void kern_timeout_callwheel_init(void);
+
+/* Stubs for obsolete functions that used to be for interrupt management */
+static __inline void spl0(void) { return; }
+static __inline intrmask_t splbio(void) { return 0; }
+static __inline intrmask_t splcam(void) { return 0; }
+static __inline intrmask_t splclock(void) { return 0; }
+static __inline intrmask_t splhigh(void) { return 0; }
+static __inline intrmask_t splimp(void) { return 0; }
+static __inline intrmask_t splnet(void) { return 0; }
+static __inline intrmask_t splsoftcam(void) { return 0; }
+static __inline intrmask_t splsoftclock(void) { return 0; }
+static __inline intrmask_t splsofttty(void) { return 0; }
+static __inline intrmask_t splsoftvm(void) { return 0; }
+static __inline intrmask_t splsofttq(void) { return 0; }
+static __inline intrmask_t splstatclock(void) { return 0; }
+static __inline intrmask_t spltty(void) { return 0; }
+static __inline intrmask_t splvm(void) { return 0; }
+static __inline void splx(intrmask_t ipl __unused) { return; }
+
+/*
+ * Common `proc' functions are declared here so that proc.h can be included
+ * less often.
+ */
+int _sleep(void *chan, struct lock_object *lock, int pri, const char *wmesg,
+ int timo) __nonnull(1);
+#define msleep(chan, mtx, pri, wmesg, timo) \
+ _sleep((chan), &(mtx)->lock_object, (pri), (wmesg), (timo))
+int msleep_spin(void *chan, struct mtx *mtx, const char *wmesg, int timo)
+ __nonnull(1);
+int pause(const char *wmesg, int timo);
+#define tsleep(chan, pri, wmesg, timo) \
+ _sleep((chan), NULL, (pri), (wmesg), (timo))
+void wakeup(void *chan) __nonnull(1);
+void wakeup_one(void *chan) __nonnull(1);
+
+/*
+ * Common `struct cdev *' stuff are declared here to avoid #include poisoning
+ */
+
+struct cdev;
+dev_t dev2udev(struct cdev *x);
+const char *devtoname(struct cdev *cdev);
+
+int poll_no_poll(int events);
+
+/* XXX: Should be void nanodelay(u_int nsec); */
+void DELAY(int usec);
+
+/* Root mount holdback API */
+struct root_hold_token;
+
+struct root_hold_token *root_mount_hold(const char *identifier);
+void root_mount_rel(struct root_hold_token *h);
+void root_mount_wait(void);
+int root_mounted(void);
+
+
+/*
+ * Unit number allocation API. (kern/subr_unit.c)
+ */
+struct unrhdr;
+struct unrhdr *new_unrhdr(int low, int high, struct mtx *mutex);
+void delete_unrhdr(struct unrhdr *uh);
+void clean_unrhdr(struct unrhdr *uh);
+void clean_unrhdrl(struct unrhdr *uh);
+int alloc_unr(struct unrhdr *uh);
+int alloc_unrl(struct unrhdr *uh);
+void free_unr(struct unrhdr *uh, u_int item);
+
+/*
+ * This is about as magic as it gets. fortune(1) has got similar code
+ * for reversing bits in a word. Who thinks up this stuff??
+ *
+ * Yes, it does appear to be consistently faster than:
+ * while (i = ffs(m)) {
+ * m >>= i;
+ * bits++;
+ * }
+ * and
+ * while (lsb = (m & -m)) { // This is magic too
+ * m &= ~lsb; // or: m ^= lsb
+ * bits++;
+ * }
+ * Both of these latter forms do some very strange things on gcc-3.1 with
+ * -mcpu=pentiumpro and/or -march=pentiumpro and/or -O or -O2.
+ * There is probably an SSE or MMX popcnt instruction.
+ *
+ * I wonder if this should be in libkern?
+ *
+ * XXX Stop the presses! Another one:
+ * static __inline u_int32_t
+ * popcnt1(u_int32_t v)
+ * {
+ * v -= ((v >> 1) & 0x55555555);
+ * v = (v & 0x33333333) + ((v >> 2) & 0x33333333);
+ * v = (v + (v >> 4)) & 0x0F0F0F0F;
+ * return (v * 0x01010101) >> 24;
+ * }
+ * The downside is that it has a multiply. With a pentium3 with
+ * -mcpu=pentiumpro and -march=pentiumpro then gcc-3.1 will use
+ * an imull, and in that case it is faster. In most other cases
+ * it appears slightly slower.
+ *
+ * Another variant (also from fortune):
+ * #define BITCOUNT(x) (((BX_(x)+(BX_(x)>>4)) & 0x0F0F0F0F) % 255)
+ * #define BX_(x) ((x) - (((x)>>1)&0x77777777) \
+ * - (((x)>>2)&0x33333333) \
+ * - (((x)>>3)&0x11111111))
+ */
+static __inline uint32_t
+bitcount32(uint32_t x)
+{
+
+ x = (x & 0x55555555) + ((x & 0xaaaaaaaa) >> 1);
+ x = (x & 0x33333333) + ((x & 0xcccccccc) >> 2);
+ x = (x + (x >> 4)) & 0x0f0f0f0f;
+ x = (x + (x >> 8));
+ x = (x + (x >> 16)) & 0x000000ff;
+ return (x);
+}
+
+#endif /* !_SYS_SYSTM_HH_ */
diff --git a/rtems/freebsd/sys/taskqueue.h b/rtems/freebsd/sys/taskqueue.h
new file mode 100644
index 00000000..1fe094d5
--- /dev/null
+++ b/rtems/freebsd/sys/taskqueue.h
@@ -0,0 +1,161 @@
+/*-
+ * Copyright (c) 2000 Doug Rabson
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _SYS_TASKQUEUE_HH_
+#define _SYS_TASKQUEUE_HH_
+
+#ifndef _KERNEL
+#error "no user-servicable parts inside"
+#endif
+
+#include <rtems/freebsd/sys/queue.h>
+#include <rtems/freebsd/sys/_task.h>
+
+struct taskqueue;
+struct thread;
+
+/*
+ * A notification callback function which is called from
+ * taskqueue_enqueue(). The context argument is given in the call to
+ * taskqueue_create(). This function would normally be used to allow the
+ * queue to arrange to run itself later (e.g., by scheduling a software
+ * interrupt or waking a kernel thread).
+ */
+typedef void (*taskqueue_enqueue_fn)(void *context);
+
+struct taskqueue *taskqueue_create(const char *name, int mflags,
+ taskqueue_enqueue_fn enqueue,
+ void *context);
+int taskqueue_start_threads(struct taskqueue **tqp, int count, int pri,
+ const char *name, ...) __printflike(4, 5);
+int taskqueue_enqueue(struct taskqueue *queue, struct task *task);
+void taskqueue_drain(struct taskqueue *queue, struct task *task);
+void taskqueue_free(struct taskqueue *queue);
+void taskqueue_run(struct taskqueue *queue);
+void taskqueue_block(struct taskqueue *queue);
+void taskqueue_unblock(struct taskqueue *queue);
+int taskqueue_member(struct taskqueue *queue, struct thread *td);
+
+/*
+ * Functions for dedicated thread taskqueues
+ */
+void taskqueue_thread_loop(void *arg);
+void taskqueue_thread_enqueue(void *context);
+
+/*
+ * Initialise a task structure.
+ */
+#define TASK_INIT(task, priority, func, context) do { \
+ (task)->ta_pending = 0; \
+ (task)->ta_priority = (priority); \
+ (task)->ta_func = (func); \
+ (task)->ta_context = (context); \
+} while (0)
+
+/*
+ * Declare a reference to a taskqueue.
+ */
+#define TASKQUEUE_DECLARE(name) \
+extern struct taskqueue *taskqueue_##name
+
+/*
+ * Define and initialise a global taskqueue that uses sleep mutexes.
+ */
+#define TASKQUEUE_DEFINE(name, enqueue, context, init) \
+ \
+struct taskqueue *taskqueue_##name; \
+ \
+static void \
+taskqueue_define_##name(void *arg) \
+{ \
+ taskqueue_##name = \
+ taskqueue_create(#name, M_NOWAIT, (enqueue), (context)); \
+ init; \
+} \
+ \
+SYSINIT(taskqueue_##name, SI_SUB_CONFIGURE, SI_ORDER_SECOND, \
+ taskqueue_define_##name, NULL); \
+ \
+struct __hack
+#define TASKQUEUE_DEFINE_THREAD(name) \
+TASKQUEUE_DEFINE(name, taskqueue_thread_enqueue, &taskqueue_##name, \
+ taskqueue_start_threads(&taskqueue_##name, 1, PWAIT, \
+ "%s taskq", #name))
+
+/*
+ * Define and initialise a global taskqueue that uses spin mutexes.
+ */
+#define TASKQUEUE_FAST_DEFINE(name, enqueue, context, init) \
+ \
+struct taskqueue *taskqueue_##name; \
+ \
+static void \
+taskqueue_define_##name(void *arg) \
+{ \
+ taskqueue_##name = \
+ taskqueue_create_fast(#name, M_NOWAIT, (enqueue), \
+ (context)); \
+ init; \
+} \
+ \
+SYSINIT(taskqueue_##name, SI_SUB_CONFIGURE, SI_ORDER_SECOND, \
+ taskqueue_define_##name, NULL); \
+ \
+struct __hack
+#define TASKQUEUE_FAST_DEFINE_THREAD(name) \
+TASKQUEUE_FAST_DEFINE(name, taskqueue_thread_enqueue, \
+ &taskqueue_##name, taskqueue_start_threads(&taskqueue_##name \
+ 1, PWAIT, "%s taskq", #name))
+
+/*
+ * These queues are serviced by software interrupt handlers. To enqueue
+ * a task, call taskqueue_enqueue(taskqueue_swi, &task) or
+ * taskqueue_enqueue(taskqueue_swi_giant, &task).
+ */
+TASKQUEUE_DECLARE(swi_giant);
+TASKQUEUE_DECLARE(swi);
+
+/*
+ * This queue is serviced by a kernel thread. To enqueue a task, call
+ * taskqueue_enqueue(taskqueue_thread, &task).
+ */
+TASKQUEUE_DECLARE(thread);
+
+/*
+ * Queue for swi handlers dispatched from fast interrupt handlers.
+ * These are necessarily different from the above because the queue
+ * must be locked with spinlocks since sleep mutex's cannot be used
+ * from a fast interrupt handler context.
+ */
+TASKQUEUE_DECLARE(fast);
+int taskqueue_enqueue_fast(struct taskqueue *queue, struct task *task);
+struct taskqueue *taskqueue_create_fast(const char *name, int mflags,
+ taskqueue_enqueue_fn enqueue,
+ void *context);
+
+#endif /* !_SYS_TASKQUEUE_HH_ */
diff --git a/rtems/freebsd/sys/time.h b/rtems/freebsd/sys/time.h
new file mode 100644
index 00000000..ea52c8f3
--- /dev/null
+++ b/rtems/freebsd/sys/time.h
@@ -0,0 +1,351 @@
+/*-
+ * Copyright (c) 1982, 1986, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * @(#)time.h 8.5 (Berkeley) 5/4/95
+ * $FreeBSD$
+ */
+
+#ifndef _SYS_TIME_HH_
+#define _SYS_TIME_HH_
+
+#include <rtems/freebsd/sys/_timeval.h>
+#include <rtems/freebsd/sys/types.h>
+#include <rtems/freebsd/sys/timespec.h>
+
+#ifndef __rtems__
+struct timezone {
+ int tz_minuteswest; /* minutes west of Greenwich */
+ int tz_dsttime; /* type of dst correction */
+};
+#endif
+
+#define DST_NONE 0 /* not on dst */
+#define DST_USA 1 /* USA style dst */
+#define DST_AUST 2 /* Australian style dst */
+#define DST_WET 3 /* Western European dst */
+#define DST_MET 4 /* Middle European dst */
+#define DST_EET 5 /* Eastern European dst */
+#define DST_CAN 6 /* Canada */
+
+#if __BSD_VISIBLE
+struct bintime {
+ time_t sec;
+ uint64_t frac;
+};
+
+static __inline void
+bintime_addx(struct bintime *bt, uint64_t x)
+{
+ uint64_t u;
+
+ u = bt->frac;
+ bt->frac += x;
+ if (u > bt->frac)
+ bt->sec++;
+}
+
+static __inline void
+bintime_add(struct bintime *bt, const struct bintime *bt2)
+{
+ uint64_t u;
+
+ u = bt->frac;
+ bt->frac += bt2->frac;
+ if (u > bt->frac)
+ bt->sec++;
+ bt->sec += bt2->sec;
+}
+
+static __inline void
+bintime_sub(struct bintime *bt, const struct bintime *bt2)
+{
+ uint64_t u;
+
+ u = bt->frac;
+ bt->frac -= bt2->frac;
+ if (u < bt->frac)
+ bt->sec--;
+ bt->sec -= bt2->sec;
+}
+
+/*-
+ * Background information:
+ *
+ * When converting between timestamps on parallel timescales of differing
+ * resolutions it is historical and scientific practice to round down rather
+ * than doing 4/5 rounding.
+ *
+ * The date changes at midnight, not at noon.
+ *
+ * Even at 15:59:59.999999999 it's not four'o'clock.
+ *
+ * time_second ticks after N.999999999 not after N.4999999999
+ */
+
+static __inline void
+bintime2timespec(const struct bintime *bt, struct timespec *ts)
+{
+
+ ts->tv_sec = bt->sec;
+ ts->tv_nsec = ((uint64_t)1000000000 * (uint32_t)(bt->frac >> 32)) >> 32;
+}
+
+static __inline void
+timespec2bintime(const struct timespec *ts, struct bintime *bt)
+{
+
+ bt->sec = ts->tv_sec;
+ /* 18446744073 = int(2^64 / 1000000000) */
+ bt->frac = ts->tv_nsec * (uint64_t)18446744073LL;
+}
+
+static __inline void
+bintime2timeval(const struct bintime *bt, struct timeval *tv)
+{
+
+ tv->tv_sec = bt->sec;
+ tv->tv_usec = ((uint64_t)1000000 * (uint32_t)(bt->frac >> 32)) >> 32;
+}
+
+static __inline void
+timeval2bintime(const struct timeval *tv, struct bintime *bt)
+{
+
+ bt->sec = tv->tv_sec;
+ /* 18446744073709 = int(2^64 / 1000000) */
+ bt->frac = tv->tv_usec * (uint64_t)18446744073709LL;
+}
+#endif /* __BSD_VISIBLE */
+
+#ifdef _KERNEL
+
+/* Operations on timespecs */
+#define timespecclear(tvp) ((tvp)->tv_sec = (tvp)->tv_nsec = 0)
+#define timespecisset(tvp) ((tvp)->tv_sec || (tvp)->tv_nsec)
+#define timespeccmp(tvp, uvp, cmp) \
+ (((tvp)->tv_sec == (uvp)->tv_sec) ? \
+ ((tvp)->tv_nsec cmp (uvp)->tv_nsec) : \
+ ((tvp)->tv_sec cmp (uvp)->tv_sec))
+#define timespecadd(vvp, uvp) \
+ do { \
+ (vvp)->tv_sec += (uvp)->tv_sec; \
+ (vvp)->tv_nsec += (uvp)->tv_nsec; \
+ if ((vvp)->tv_nsec >= 1000000000) { \
+ (vvp)->tv_sec++; \
+ (vvp)->tv_nsec -= 1000000000; \
+ } \
+ } while (0)
+#define timespecsub(vvp, uvp) \
+ do { \
+ (vvp)->tv_sec -= (uvp)->tv_sec; \
+ (vvp)->tv_nsec -= (uvp)->tv_nsec; \
+ if ((vvp)->tv_nsec < 0) { \
+ (vvp)->tv_sec--; \
+ (vvp)->tv_nsec += 1000000000; \
+ } \
+ } while (0)
+
+/* Operations on timevals. */
+
+#define timevalclear(tvp) ((tvp)->tv_sec = (tvp)->tv_usec = 0)
+#define timevalisset(tvp) ((tvp)->tv_sec || (tvp)->tv_usec)
+#define timevalcmp(tvp, uvp, cmp) \
+ (((tvp)->tv_sec == (uvp)->tv_sec) ? \
+ ((tvp)->tv_usec cmp (uvp)->tv_usec) : \
+ ((tvp)->tv_sec cmp (uvp)->tv_sec))
+
+/* timevaladd and timevalsub are not inlined */
+
+#endif /* _KERNEL */
+
+#ifndef _KERNEL /* NetBSD/OpenBSD compatible interfaces */
+
+#define timerclear(tvp) ((tvp)->tv_sec = (tvp)->tv_usec = 0)
+#define timerisset(tvp) ((tvp)->tv_sec || (tvp)->tv_usec)
+#ifndef __rtems__
+#define timercmp(tvp, uvp, cmp) \
+ (((tvp)->tv_sec == (uvp)->tv_sec) ? \
+ ((tvp)->tv_usec cmp (uvp)->tv_usec) : \
+ ((tvp)->tv_sec cmp (uvp)->tv_sec))
+#define timeradd(tvp, uvp, vvp) \
+ do { \
+ (vvp)->tv_sec = (tvp)->tv_sec + (uvp)->tv_sec; \
+ (vvp)->tv_usec = (tvp)->tv_usec + (uvp)->tv_usec; \
+ if ((vvp)->tv_usec >= 1000000) { \
+ (vvp)->tv_sec++; \
+ (vvp)->tv_usec -= 1000000; \
+ } \
+ } while (0)
+#define timersub(tvp, uvp, vvp) \
+ do { \
+ (vvp)->tv_sec = (tvp)->tv_sec - (uvp)->tv_sec; \
+ (vvp)->tv_usec = (tvp)->tv_usec - (uvp)->tv_usec; \
+ if ((vvp)->tv_usec < 0) { \
+ (vvp)->tv_sec--; \
+ (vvp)->tv_usec += 1000000; \
+ } \
+ } while (0)
+#endif
+#endif
+
+/*
+ * Names of the interval timers, and structure
+ * defining a timer setting.
+ */
+#define ITIMER_REAL 0
+#define ITIMER_VIRTUAL 1
+#define ITIMER_PROF 2
+
+#ifndef __rtems__
+struct itimerval {
+ struct timeval it_interval; /* timer interval */
+ struct timeval it_value; /* current value */
+};
+#endif
+
+/*
+ * Getkerninfo clock information structure
+ */
+struct clockinfo {
+ int hz; /* clock frequency */
+ int tick; /* micro-seconds per hz tick */
+ int spare;
+ int stathz; /* statistics clock frequency */
+ int profhz; /* profiling clock frequency */
+};
+
+/* These macros are also in time.h. */
+#ifndef CLOCK_REALTIME
+#define CLOCK_REALTIME 0
+#define CLOCK_VIRTUAL 1
+#define CLOCK_PROF 2
+#define CLOCK_MONOTONIC 4
+#define CLOCK_UPTIME 5 /* FreeBSD-specific. */
+#define CLOCK_UPTIME_PRECISE 7 /* FreeBSD-specific. */
+#define CLOCK_UPTIME_FAST 8 /* FreeBSD-specific. */
+#define CLOCK_REALTIME_PRECISE 9 /* FreeBSD-specific. */
+#define CLOCK_REALTIME_FAST 10 /* FreeBSD-specific. */
+#define CLOCK_MONOTONIC_PRECISE 11 /* FreeBSD-specific. */
+#define CLOCK_MONOTONIC_FAST 12 /* FreeBSD-specific. */
+#define CLOCK_SECOND 13 /* FreeBSD-specific. */
+#define CLOCK_THREAD_CPUTIME_ID 14
+#endif
+
+#ifndef TIMER_ABSTIME
+#define TIMER_RELTIME 0x0 /* relative timer */
+#define TIMER_ABSTIME 0x1 /* absolute timer */
+#endif
+
+#ifdef _KERNEL
+
+/*
+ * Kernel to clock driver interface.
+ */
+void inittodr(time_t base);
+void resettodr(void);
+
+extern time_t time_second;
+extern time_t time_uptime;
+extern struct timeval boottime;
+
+/*
+ * Functions for looking at our clock: [get]{bin,nano,micro}[up]time()
+ *
+ * Functions without the "get" prefix returns the best timestamp
+ * we can produce in the given format.
+ *
+ * "bin" == struct bintime == seconds + 64 bit fraction of seconds.
+ * "nano" == struct timespec == seconds + nanoseconds.
+ * "micro" == struct timeval == seconds + microseconds.
+ *
+ * Functions containing "up" returns time relative to boot and
+ * should be used for calculating time intervals.
+ *
+ * Functions without "up" returns GMT time.
+ *
+ * Functions with the "get" prefix returns a less precise result
+ * much faster than the functions without "get" prefix and should
+ * be used where a precision of 10 msec is acceptable or where
+ * performance is priority. (NB: "precision", _not_ "resolution" !)
+ *
+ */
+
+void binuptime(struct bintime *bt);
+void nanouptime(struct timespec *tsp);
+void microuptime(struct timeval *tvp);
+
+void bintime(struct bintime *bt);
+void nanotime(struct timespec *tsp);
+void microtime(struct timeval *tvp);
+
+void getbinuptime(struct bintime *bt);
+void getnanouptime(struct timespec *tsp);
+void getmicrouptime(struct timeval *tvp);
+
+void getbintime(struct bintime *bt);
+void getnanotime(struct timespec *tsp);
+void getmicrotime(struct timeval *tvp);
+
+/* Other functions */
+int itimerdecr(struct itimerval *itp, int usec);
+int itimerfix(struct timeval *tv);
+int ppsratecheck(struct timeval *, int *, int);
+int ratecheck(struct timeval *, const struct timeval *);
+void timevaladd(struct timeval *t1, const struct timeval *t2);
+void timevalsub(struct timeval *t1, const struct timeval *t2);
+int tvtohz(struct timeval *tv);
+#else /* !_KERNEL */
+#include <rtems/freebsd/time.h>
+
+#include <rtems/freebsd/sys/cdefs.h>
+#include <rtems/freebsd/sys/select.h>
+
+__BEGIN_DECLS
+int setitimer(int, const struct itimerval *, struct itimerval *);
+int utimes(const char *, const struct timeval *);
+
+#if __BSD_VISIBLE
+int adjtime(const struct timeval *, struct timeval *);
+int futimes(int, const struct timeval *);
+int futimesat(int, const char *, const struct timeval [2]);
+int lutimes(const char *, const struct timeval *);
+int settimeofday(const struct timeval *, const struct timezone *);
+#endif
+
+#ifndef __rtems__
+#if __XSI_VISIBLE
+int getitimer(int, struct itimerval *);
+int gettimeofday(struct timeval *, struct timezone *);
+#endif
+#endif
+
+__END_DECLS
+
+#endif /* !_KERNEL */
+
+#endif /* !_SYS_TIME_HH_ */
diff --git a/rtems/freebsd/sys/timespec.h b/rtems/freebsd/sys/timespec.h
new file mode 100644
index 00000000..936ffd88
--- /dev/null
+++ b/rtems/freebsd/sys/timespec.h
@@ -0,0 +1 @@
+/* EMPTY */
diff --git a/rtems/freebsd/sys/tree.h b/rtems/freebsd/sys/tree.h
new file mode 100644
index 00000000..040c9aae
--- /dev/null
+++ b/rtems/freebsd/sys/tree.h
@@ -0,0 +1,765 @@
+/* $NetBSD: tree.h,v 1.8 2004/03/28 19:38:30 provos Exp $ */
+/* $OpenBSD: tree.h,v 1.7 2002/10/17 21:51:54 art Exp $ */
+/* $FreeBSD$ */
+
+/*-
+ * Copyright 2002 Niels Provos <provos@citi.umich.edu>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _SYS_TREE_HH_
+#define _SYS_TREE_HH_
+
+#include <rtems/freebsd/sys/cdefs.h>
+
+/*
+ * This file defines data structures for different types of trees:
+ * splay trees and red-black trees.
+ *
+ * A splay tree is a self-organizing data structure. Every operation
+ * on the tree causes a splay to happen. The splay moves the requested
+ * node to the root of the tree and partly rebalances it.
+ *
+ * This has the benefit that request locality causes faster lookups as
+ * the requested nodes move to the top of the tree. On the other hand,
+ * every lookup causes memory writes.
+ *
+ * The Balance Theorem bounds the total access time for m operations
+ * and n inserts on an initially empty tree as O((m + n)lg n). The
+ * amortized cost for a sequence of m accesses to a splay tree is O(lg n);
+ *
+ * A red-black tree is a binary search tree with the node color as an
+ * extra attribute. It fulfills a set of conditions:
+ * - every search path from the root to a leaf consists of the
+ * same number of black nodes,
+ * - each red node (except for the root) has a black parent,
+ * - each leaf node is black.
+ *
+ * Every operation on a red-black tree is bounded as O(lg n).
+ * The maximum height of a red-black tree is 2lg (n+1).
+ */
+
+#define SPLAY_HEAD(name, type) \
+struct name { \
+ struct type *sph_root; /* root of the tree */ \
+}
+
+#define SPLAY_INITIALIZER(root) \
+ { NULL }
+
+#define SPLAY_INIT(root) do { \
+ (root)->sph_root = NULL; \
+} while (/*CONSTCOND*/ 0)
+
+#define SPLAY_ENTRY(type) \
+struct { \
+ struct type *spe_left; /* left element */ \
+ struct type *spe_right; /* right element */ \
+}
+
+#define SPLAY_LEFT(elm, field) (elm)->field.spe_left
+#define SPLAY_RIGHT(elm, field) (elm)->field.spe_right
+#define SPLAY_ROOT(head) (head)->sph_root
+#define SPLAY_EMPTY(head) (SPLAY_ROOT(head) == NULL)
+
+/* SPLAY_ROTATE_{LEFT,RIGHT} expect that tmp hold SPLAY_{RIGHT,LEFT} */
+#define SPLAY_ROTATE_RIGHT(head, tmp, field) do { \
+ SPLAY_LEFT((head)->sph_root, field) = SPLAY_RIGHT(tmp, field); \
+ SPLAY_RIGHT(tmp, field) = (head)->sph_root; \
+ (head)->sph_root = tmp; \
+} while (/*CONSTCOND*/ 0)
+
+#define SPLAY_ROTATE_LEFT(head, tmp, field) do { \
+ SPLAY_RIGHT((head)->sph_root, field) = SPLAY_LEFT(tmp, field); \
+ SPLAY_LEFT(tmp, field) = (head)->sph_root; \
+ (head)->sph_root = tmp; \
+} while (/*CONSTCOND*/ 0)
+
+#define SPLAY_LINKLEFT(head, tmp, field) do { \
+ SPLAY_LEFT(tmp, field) = (head)->sph_root; \
+ tmp = (head)->sph_root; \
+ (head)->sph_root = SPLAY_LEFT((head)->sph_root, field); \
+} while (/*CONSTCOND*/ 0)
+
+#define SPLAY_LINKRIGHT(head, tmp, field) do { \
+ SPLAY_RIGHT(tmp, field) = (head)->sph_root; \
+ tmp = (head)->sph_root; \
+ (head)->sph_root = SPLAY_RIGHT((head)->sph_root, field); \
+} while (/*CONSTCOND*/ 0)
+
+#define SPLAY_ASSEMBLE(head, node, left, right, field) do { \
+ SPLAY_RIGHT(left, field) = SPLAY_LEFT((head)->sph_root, field); \
+ SPLAY_LEFT(right, field) = SPLAY_RIGHT((head)->sph_root, field);\
+ SPLAY_LEFT((head)->sph_root, field) = SPLAY_RIGHT(node, field); \
+ SPLAY_RIGHT((head)->sph_root, field) = SPLAY_LEFT(node, field); \
+} while (/*CONSTCOND*/ 0)
+
+/* Generates prototypes and inline functions */
+
+#define SPLAY_PROTOTYPE(name, type, field, cmp) \
+void name##_SPLAY(struct name *, struct type *); \
+void name##_SPLAY_MINMAX(struct name *, int); \
+struct type *name##_SPLAY_INSERT(struct name *, struct type *); \
+struct type *name##_SPLAY_REMOVE(struct name *, struct type *); \
+ \
+/* Finds the node with the same key as elm */ \
+static __inline struct type * \
+name##_SPLAY_FIND(struct name *head, struct type *elm) \
+{ \
+ if (SPLAY_EMPTY(head)) \
+ return(NULL); \
+ name##_SPLAY(head, elm); \
+ if ((cmp)(elm, (head)->sph_root) == 0) \
+ return (head->sph_root); \
+ return (NULL); \
+} \
+ \
+static __inline struct type * \
+name##_SPLAY_NEXT(struct name *head, struct type *elm) \
+{ \
+ name##_SPLAY(head, elm); \
+ if (SPLAY_RIGHT(elm, field) != NULL) { \
+ elm = SPLAY_RIGHT(elm, field); \
+ while (SPLAY_LEFT(elm, field) != NULL) { \
+ elm = SPLAY_LEFT(elm, field); \
+ } \
+ } else \
+ elm = NULL; \
+ return (elm); \
+} \
+ \
+static __inline struct type * \
+name##_SPLAY_MIN_MAX(struct name *head, int val) \
+{ \
+ name##_SPLAY_MINMAX(head, val); \
+ return (SPLAY_ROOT(head)); \
+}
+
+/* Main splay operation.
+ * Moves node close to the key of elm to top
+ */
+#define SPLAY_GENERATE(name, type, field, cmp) \
+struct type * \
+name##_SPLAY_INSERT(struct name *head, struct type *elm) \
+{ \
+ if (SPLAY_EMPTY(head)) { \
+ SPLAY_LEFT(elm, field) = SPLAY_RIGHT(elm, field) = NULL; \
+ } else { \
+ int __comp; \
+ name##_SPLAY(head, elm); \
+ __comp = (cmp)(elm, (head)->sph_root); \
+ if(__comp < 0) { \
+ SPLAY_LEFT(elm, field) = SPLAY_LEFT((head)->sph_root, field);\
+ SPLAY_RIGHT(elm, field) = (head)->sph_root; \
+ SPLAY_LEFT((head)->sph_root, field) = NULL; \
+ } else if (__comp > 0) { \
+ SPLAY_RIGHT(elm, field) = SPLAY_RIGHT((head)->sph_root, field);\
+ SPLAY_LEFT(elm, field) = (head)->sph_root; \
+ SPLAY_RIGHT((head)->sph_root, field) = NULL; \
+ } else \
+ return ((head)->sph_root); \
+ } \
+ (head)->sph_root = (elm); \
+ return (NULL); \
+} \
+ \
+struct type * \
+name##_SPLAY_REMOVE(struct name *head, struct type *elm) \
+{ \
+ struct type *__tmp; \
+ if (SPLAY_EMPTY(head)) \
+ return (NULL); \
+ name##_SPLAY(head, elm); \
+ if ((cmp)(elm, (head)->sph_root) == 0) { \
+ if (SPLAY_LEFT((head)->sph_root, field) == NULL) { \
+ (head)->sph_root = SPLAY_RIGHT((head)->sph_root, field);\
+ } else { \
+ __tmp = SPLAY_RIGHT((head)->sph_root, field); \
+ (head)->sph_root = SPLAY_LEFT((head)->sph_root, field);\
+ name##_SPLAY(head, elm); \
+ SPLAY_RIGHT((head)->sph_root, field) = __tmp; \
+ } \
+ return (elm); \
+ } \
+ return (NULL); \
+} \
+ \
+void \
+name##_SPLAY(struct name *head, struct type *elm) \
+{ \
+ struct type __node, *__left, *__right, *__tmp; \
+ int __comp; \
+\
+ SPLAY_LEFT(&__node, field) = SPLAY_RIGHT(&__node, field) = NULL;\
+ __left = __right = &__node; \
+\
+ while ((__comp = (cmp)(elm, (head)->sph_root)) != 0) { \
+ if (__comp < 0) { \
+ __tmp = SPLAY_LEFT((head)->sph_root, field); \
+ if (__tmp == NULL) \
+ break; \
+ if ((cmp)(elm, __tmp) < 0){ \
+ SPLAY_ROTATE_RIGHT(head, __tmp, field); \
+ if (SPLAY_LEFT((head)->sph_root, field) == NULL)\
+ break; \
+ } \
+ SPLAY_LINKLEFT(head, __right, field); \
+ } else if (__comp > 0) { \
+ __tmp = SPLAY_RIGHT((head)->sph_root, field); \
+ if (__tmp == NULL) \
+ break; \
+ if ((cmp)(elm, __tmp) > 0){ \
+ SPLAY_ROTATE_LEFT(head, __tmp, field); \
+ if (SPLAY_RIGHT((head)->sph_root, field) == NULL)\
+ break; \
+ } \
+ SPLAY_LINKRIGHT(head, __left, field); \
+ } \
+ } \
+ SPLAY_ASSEMBLE(head, &__node, __left, __right, field); \
+} \
+ \
+/* Splay with either the minimum or the maximum element \
+ * Used to find minimum or maximum element in tree. \
+ */ \
+void name##_SPLAY_MINMAX(struct name *head, int __comp) \
+{ \
+ struct type __node, *__left, *__right, *__tmp; \
+\
+ SPLAY_LEFT(&__node, field) = SPLAY_RIGHT(&__node, field) = NULL;\
+ __left = __right = &__node; \
+\
+ while (1) { \
+ if (__comp < 0) { \
+ __tmp = SPLAY_LEFT((head)->sph_root, field); \
+ if (__tmp == NULL) \
+ break; \
+ if (__comp < 0){ \
+ SPLAY_ROTATE_RIGHT(head, __tmp, field); \
+ if (SPLAY_LEFT((head)->sph_root, field) == NULL)\
+ break; \
+ } \
+ SPLAY_LINKLEFT(head, __right, field); \
+ } else if (__comp > 0) { \
+ __tmp = SPLAY_RIGHT((head)->sph_root, field); \
+ if (__tmp == NULL) \
+ break; \
+ if (__comp > 0) { \
+ SPLAY_ROTATE_LEFT(head, __tmp, field); \
+ if (SPLAY_RIGHT((head)->sph_root, field) == NULL)\
+ break; \
+ } \
+ SPLAY_LINKRIGHT(head, __left, field); \
+ } \
+ } \
+ SPLAY_ASSEMBLE(head, &__node, __left, __right, field); \
+}
+
+#define SPLAY_NEGINF -1
+#define SPLAY_INF 1
+
+#define SPLAY_INSERT(name, x, y) name##_SPLAY_INSERT(x, y)
+#define SPLAY_REMOVE(name, x, y) name##_SPLAY_REMOVE(x, y)
+#define SPLAY_FIND(name, x, y) name##_SPLAY_FIND(x, y)
+#define SPLAY_NEXT(name, x, y) name##_SPLAY_NEXT(x, y)
+#define SPLAY_MIN(name, x) (SPLAY_EMPTY(x) ? NULL \
+ : name##_SPLAY_MIN_MAX(x, SPLAY_NEGINF))
+#define SPLAY_MAX(name, x) (SPLAY_EMPTY(x) ? NULL \
+ : name##_SPLAY_MIN_MAX(x, SPLAY_INF))
+
+#define SPLAY_FOREACH(x, name, head) \
+ for ((x) = SPLAY_MIN(name, head); \
+ (x) != NULL; \
+ (x) = SPLAY_NEXT(name, head, x))
+
+/* Macros that define a red-black tree */
+#define RB_HEAD(name, type) \
+struct name { \
+ struct type *rbh_root; /* root of the tree */ \
+}
+
+#define RB_INITIALIZER(root) \
+ { NULL }
+
+#define RB_INIT(root) do { \
+ (root)->rbh_root = NULL; \
+} while (/*CONSTCOND*/ 0)
+
+#define RB_BLACK 0
+#define RB_RED 1
+#define RB_ENTRY(type) \
+struct { \
+ struct type *rbe_left; /* left element */ \
+ struct type *rbe_right; /* right element */ \
+ struct type *rbe_parent; /* parent element */ \
+ int rbe_color; /* node color */ \
+}
+
+#define RB_LEFT(elm, field) (elm)->field.rbe_left
+#define RB_RIGHT(elm, field) (elm)->field.rbe_right
+#define RB_PARENT(elm, field) (elm)->field.rbe_parent
+#define RB_COLOR(elm, field) (elm)->field.rbe_color
+#define RB_ROOT(head) (head)->rbh_root
+#define RB_EMPTY(head) (RB_ROOT(head) == NULL)
+
+#define RB_SET(elm, parent, field) do { \
+ RB_PARENT(elm, field) = parent; \
+ RB_LEFT(elm, field) = RB_RIGHT(elm, field) = NULL; \
+ RB_COLOR(elm, field) = RB_RED; \
+} while (/*CONSTCOND*/ 0)
+
+#define RB_SET_BLACKRED(black, red, field) do { \
+ RB_COLOR(black, field) = RB_BLACK; \
+ RB_COLOR(red, field) = RB_RED; \
+} while (/*CONSTCOND*/ 0)
+
+#ifndef RB_AUGMENT
+#define RB_AUGMENT(x) do {} while (0)
+#endif
+
+#define RB_ROTATE_LEFT(head, elm, tmp, field) do { \
+ (tmp) = RB_RIGHT(elm, field); \
+ if ((RB_RIGHT(elm, field) = RB_LEFT(tmp, field)) != NULL) { \
+ RB_PARENT(RB_LEFT(tmp, field), field) = (elm); \
+ } \
+ RB_AUGMENT(elm); \
+ if ((RB_PARENT(tmp, field) = RB_PARENT(elm, field)) != NULL) { \
+ if ((elm) == RB_LEFT(RB_PARENT(elm, field), field)) \
+ RB_LEFT(RB_PARENT(elm, field), field) = (tmp); \
+ else \
+ RB_RIGHT(RB_PARENT(elm, field), field) = (tmp); \
+ } else \
+ (head)->rbh_root = (tmp); \
+ RB_LEFT(tmp, field) = (elm); \
+ RB_PARENT(elm, field) = (tmp); \
+ RB_AUGMENT(tmp); \
+ if ((RB_PARENT(tmp, field))) \
+ RB_AUGMENT(RB_PARENT(tmp, field)); \
+} while (/*CONSTCOND*/ 0)
+
+#define RB_ROTATE_RIGHT(head, elm, tmp, field) do { \
+ (tmp) = RB_LEFT(elm, field); \
+ if ((RB_LEFT(elm, field) = RB_RIGHT(tmp, field)) != NULL) { \
+ RB_PARENT(RB_RIGHT(tmp, field), field) = (elm); \
+ } \
+ RB_AUGMENT(elm); \
+ if ((RB_PARENT(tmp, field) = RB_PARENT(elm, field)) != NULL) { \
+ if ((elm) == RB_LEFT(RB_PARENT(elm, field), field)) \
+ RB_LEFT(RB_PARENT(elm, field), field) = (tmp); \
+ else \
+ RB_RIGHT(RB_PARENT(elm, field), field) = (tmp); \
+ } else \
+ (head)->rbh_root = (tmp); \
+ RB_RIGHT(tmp, field) = (elm); \
+ RB_PARENT(elm, field) = (tmp); \
+ RB_AUGMENT(tmp); \
+ if ((RB_PARENT(tmp, field))) \
+ RB_AUGMENT(RB_PARENT(tmp, field)); \
+} while (/*CONSTCOND*/ 0)
+
+/* Generates prototypes and inline functions */
+#define RB_PROTOTYPE(name, type, field, cmp) \
+ RB_PROTOTYPE_INTERNAL(name, type, field, cmp,)
+#define RB_PROTOTYPE_STATIC(name, type, field, cmp) \
+ RB_PROTOTYPE_INTERNAL(name, type, field, cmp, __unused static)
+#define RB_PROTOTYPE_INTERNAL(name, type, field, cmp, attr) \
+attr void name##_RB_INSERT_COLOR(struct name *, struct type *); \
+attr void name##_RB_REMOVE_COLOR(struct name *, struct type *, struct type *);\
+attr struct type *name##_RB_REMOVE(struct name *, struct type *); \
+attr struct type *name##_RB_INSERT(struct name *, struct type *); \
+attr struct type *name##_RB_FIND(struct name *, struct type *); \
+attr struct type *name##_RB_NFIND(struct name *, struct type *); \
+attr struct type *name##_RB_NEXT(struct type *); \
+attr struct type *name##_RB_PREV(struct type *); \
+attr struct type *name##_RB_MINMAX(struct name *, int); \
+ \
+
+/* Main rb operation.
+ * Moves node close to the key of elm to top
+ */
+#define RB_GENERATE(name, type, field, cmp) \
+ RB_GENERATE_INTERNAL(name, type, field, cmp,)
+#define RB_GENERATE_STATIC(name, type, field, cmp) \
+ RB_GENERATE_INTERNAL(name, type, field, cmp, __unused static)
+#define RB_GENERATE_INTERNAL(name, type, field, cmp, attr) \
+attr void \
+name##_RB_INSERT_COLOR(struct name *head, struct type *elm) \
+{ \
+ struct type *parent, *gparent, *tmp; \
+ while ((parent = RB_PARENT(elm, field)) != NULL && \
+ RB_COLOR(parent, field) == RB_RED) { \
+ gparent = RB_PARENT(parent, field); \
+ if (parent == RB_LEFT(gparent, field)) { \
+ tmp = RB_RIGHT(gparent, field); \
+ if (tmp && RB_COLOR(tmp, field) == RB_RED) { \
+ RB_COLOR(tmp, field) = RB_BLACK; \
+ RB_SET_BLACKRED(parent, gparent, field);\
+ elm = gparent; \
+ continue; \
+ } \
+ if (RB_RIGHT(parent, field) == elm) { \
+ RB_ROTATE_LEFT(head, parent, tmp, field);\
+ tmp = parent; \
+ parent = elm; \
+ elm = tmp; \
+ } \
+ RB_SET_BLACKRED(parent, gparent, field); \
+ RB_ROTATE_RIGHT(head, gparent, tmp, field); \
+ } else { \
+ tmp = RB_LEFT(gparent, field); \
+ if (tmp && RB_COLOR(tmp, field) == RB_RED) { \
+ RB_COLOR(tmp, field) = RB_BLACK; \
+ RB_SET_BLACKRED(parent, gparent, field);\
+ elm = gparent; \
+ continue; \
+ } \
+ if (RB_LEFT(parent, field) == elm) { \
+ RB_ROTATE_RIGHT(head, parent, tmp, field);\
+ tmp = parent; \
+ parent = elm; \
+ elm = tmp; \
+ } \
+ RB_SET_BLACKRED(parent, gparent, field); \
+ RB_ROTATE_LEFT(head, gparent, tmp, field); \
+ } \
+ } \
+ RB_COLOR(head->rbh_root, field) = RB_BLACK; \
+} \
+ \
+attr void \
+name##_RB_REMOVE_COLOR(struct name *head, struct type *parent, struct type *elm) \
+{ \
+ struct type *tmp; \
+ while ((elm == NULL || RB_COLOR(elm, field) == RB_BLACK) && \
+ elm != RB_ROOT(head)) { \
+ if (RB_LEFT(parent, field) == elm) { \
+ tmp = RB_RIGHT(parent, field); \
+ if (RB_COLOR(tmp, field) == RB_RED) { \
+ RB_SET_BLACKRED(tmp, parent, field); \
+ RB_ROTATE_LEFT(head, parent, tmp, field);\
+ tmp = RB_RIGHT(parent, field); \
+ } \
+ if ((RB_LEFT(tmp, field) == NULL || \
+ RB_COLOR(RB_LEFT(tmp, field), field) == RB_BLACK) &&\
+ (RB_RIGHT(tmp, field) == NULL || \
+ RB_COLOR(RB_RIGHT(tmp, field), field) == RB_BLACK)) {\
+ RB_COLOR(tmp, field) = RB_RED; \
+ elm = parent; \
+ parent = RB_PARENT(elm, field); \
+ } else { \
+ if (RB_RIGHT(tmp, field) == NULL || \
+ RB_COLOR(RB_RIGHT(tmp, field), field) == RB_BLACK) {\
+ struct type *oleft; \
+ if ((oleft = RB_LEFT(tmp, field)) \
+ != NULL) \
+ RB_COLOR(oleft, field) = RB_BLACK;\
+ RB_COLOR(tmp, field) = RB_RED; \
+ RB_ROTATE_RIGHT(head, tmp, oleft, field);\
+ tmp = RB_RIGHT(parent, field); \
+ } \
+ RB_COLOR(tmp, field) = RB_COLOR(parent, field);\
+ RB_COLOR(parent, field) = RB_BLACK; \
+ if (RB_RIGHT(tmp, field)) \
+ RB_COLOR(RB_RIGHT(tmp, field), field) = RB_BLACK;\
+ RB_ROTATE_LEFT(head, parent, tmp, field);\
+ elm = RB_ROOT(head); \
+ break; \
+ } \
+ } else { \
+ tmp = RB_LEFT(parent, field); \
+ if (RB_COLOR(tmp, field) == RB_RED) { \
+ RB_SET_BLACKRED(tmp, parent, field); \
+ RB_ROTATE_RIGHT(head, parent, tmp, field);\
+ tmp = RB_LEFT(parent, field); \
+ } \
+ if ((RB_LEFT(tmp, field) == NULL || \
+ RB_COLOR(RB_LEFT(tmp, field), field) == RB_BLACK) &&\
+ (RB_RIGHT(tmp, field) == NULL || \
+ RB_COLOR(RB_RIGHT(tmp, field), field) == RB_BLACK)) {\
+ RB_COLOR(tmp, field) = RB_RED; \
+ elm = parent; \
+ parent = RB_PARENT(elm, field); \
+ } else { \
+ if (RB_LEFT(tmp, field) == NULL || \
+ RB_COLOR(RB_LEFT(tmp, field), field) == RB_BLACK) {\
+ struct type *oright; \
+ if ((oright = RB_RIGHT(tmp, field)) \
+ != NULL) \
+ RB_COLOR(oright, field) = RB_BLACK;\
+ RB_COLOR(tmp, field) = RB_RED; \
+ RB_ROTATE_LEFT(head, tmp, oright, field);\
+ tmp = RB_LEFT(parent, field); \
+ } \
+ RB_COLOR(tmp, field) = RB_COLOR(parent, field);\
+ RB_COLOR(parent, field) = RB_BLACK; \
+ if (RB_LEFT(tmp, field)) \
+ RB_COLOR(RB_LEFT(tmp, field), field) = RB_BLACK;\
+ RB_ROTATE_RIGHT(head, parent, tmp, field);\
+ elm = RB_ROOT(head); \
+ break; \
+ } \
+ } \
+ } \
+ if (elm) \
+ RB_COLOR(elm, field) = RB_BLACK; \
+} \
+ \
+attr struct type * \
+name##_RB_REMOVE(struct name *head, struct type *elm) \
+{ \
+ struct type *child, *parent, *old = elm; \
+ int color; \
+ if (RB_LEFT(elm, field) == NULL) \
+ child = RB_RIGHT(elm, field); \
+ else if (RB_RIGHT(elm, field) == NULL) \
+ child = RB_LEFT(elm, field); \
+ else { \
+ struct type *left; \
+ elm = RB_RIGHT(elm, field); \
+ while ((left = RB_LEFT(elm, field)) != NULL) \
+ elm = left; \
+ child = RB_RIGHT(elm, field); \
+ parent = RB_PARENT(elm, field); \
+ color = RB_COLOR(elm, field); \
+ if (child) \
+ RB_PARENT(child, field) = parent; \
+ if (parent) { \
+ if (RB_LEFT(parent, field) == elm) \
+ RB_LEFT(parent, field) = child; \
+ else \
+ RB_RIGHT(parent, field) = child; \
+ RB_AUGMENT(parent); \
+ } else \
+ RB_ROOT(head) = child; \
+ if (RB_PARENT(elm, field) == old) \
+ parent = elm; \
+ (elm)->field = (old)->field; \
+ if (RB_PARENT(old, field)) { \
+ if (RB_LEFT(RB_PARENT(old, field), field) == old)\
+ RB_LEFT(RB_PARENT(old, field), field) = elm;\
+ else \
+ RB_RIGHT(RB_PARENT(old, field), field) = elm;\
+ RB_AUGMENT(RB_PARENT(old, field)); \
+ } else \
+ RB_ROOT(head) = elm; \
+ RB_PARENT(RB_LEFT(old, field), field) = elm; \
+ if (RB_RIGHT(old, field)) \
+ RB_PARENT(RB_RIGHT(old, field), field) = elm; \
+ if (parent) { \
+ left = parent; \
+ do { \
+ RB_AUGMENT(left); \
+ } while ((left = RB_PARENT(left, field)) != NULL); \
+ } \
+ goto color; \
+ } \
+ parent = RB_PARENT(elm, field); \
+ color = RB_COLOR(elm, field); \
+ if (child) \
+ RB_PARENT(child, field) = parent; \
+ if (parent) { \
+ if (RB_LEFT(parent, field) == elm) \
+ RB_LEFT(parent, field) = child; \
+ else \
+ RB_RIGHT(parent, field) = child; \
+ RB_AUGMENT(parent); \
+ } else \
+ RB_ROOT(head) = child; \
+color: \
+ if (color == RB_BLACK) \
+ name##_RB_REMOVE_COLOR(head, parent, child); \
+ return (old); \
+} \
+ \
+/* Inserts a node into the RB tree */ \
+attr struct type * \
+name##_RB_INSERT(struct name *head, struct type *elm) \
+{ \
+ struct type *tmp; \
+ struct type *parent = NULL; \
+ int comp = 0; \
+ tmp = RB_ROOT(head); \
+ while (tmp) { \
+ parent = tmp; \
+ comp = (cmp)(elm, parent); \
+ if (comp < 0) \
+ tmp = RB_LEFT(tmp, field); \
+ else if (comp > 0) \
+ tmp = RB_RIGHT(tmp, field); \
+ else \
+ return (tmp); \
+ } \
+ RB_SET(elm, parent, field); \
+ if (parent != NULL) { \
+ if (comp < 0) \
+ RB_LEFT(parent, field) = elm; \
+ else \
+ RB_RIGHT(parent, field) = elm; \
+ RB_AUGMENT(parent); \
+ } else \
+ RB_ROOT(head) = elm; \
+ name##_RB_INSERT_COLOR(head, elm); \
+ return (NULL); \
+} \
+ \
+/* Finds the node with the same key as elm */ \
+attr struct type * \
+name##_RB_FIND(struct name *head, struct type *elm) \
+{ \
+ struct type *tmp = RB_ROOT(head); \
+ int comp; \
+ while (tmp) { \
+ comp = cmp(elm, tmp); \
+ if (comp < 0) \
+ tmp = RB_LEFT(tmp, field); \
+ else if (comp > 0) \
+ tmp = RB_RIGHT(tmp, field); \
+ else \
+ return (tmp); \
+ } \
+ return (NULL); \
+} \
+ \
+/* Finds the first node greater than or equal to the search key */ \
+attr struct type * \
+name##_RB_NFIND(struct name *head, struct type *elm) \
+{ \
+ struct type *tmp = RB_ROOT(head); \
+ struct type *res = NULL; \
+ int comp; \
+ while (tmp) { \
+ comp = cmp(elm, tmp); \
+ if (comp < 0) { \
+ res = tmp; \
+ tmp = RB_LEFT(tmp, field); \
+ } \
+ else if (comp > 0) \
+ tmp = RB_RIGHT(tmp, field); \
+ else \
+ return (tmp); \
+ } \
+ return (res); \
+} \
+ \
+/* ARGSUSED */ \
+attr struct type * \
+name##_RB_NEXT(struct type *elm) \
+{ \
+ if (RB_RIGHT(elm, field)) { \
+ elm = RB_RIGHT(elm, field); \
+ while (RB_LEFT(elm, field)) \
+ elm = RB_LEFT(elm, field); \
+ } else { \
+ if (RB_PARENT(elm, field) && \
+ (elm == RB_LEFT(RB_PARENT(elm, field), field))) \
+ elm = RB_PARENT(elm, field); \
+ else { \
+ while (RB_PARENT(elm, field) && \
+ (elm == RB_RIGHT(RB_PARENT(elm, field), field)))\
+ elm = RB_PARENT(elm, field); \
+ elm = RB_PARENT(elm, field); \
+ } \
+ } \
+ return (elm); \
+} \
+ \
+/* ARGSUSED */ \
+attr struct type * \
+name##_RB_PREV(struct type *elm) \
+{ \
+ if (RB_LEFT(elm, field)) { \
+ elm = RB_LEFT(elm, field); \
+ while (RB_RIGHT(elm, field)) \
+ elm = RB_RIGHT(elm, field); \
+ } else { \
+ if (RB_PARENT(elm, field) && \
+ (elm == RB_RIGHT(RB_PARENT(elm, field), field))) \
+ elm = RB_PARENT(elm, field); \
+ else { \
+ while (RB_PARENT(elm, field) && \
+ (elm == RB_LEFT(RB_PARENT(elm, field), field)))\
+ elm = RB_PARENT(elm, field); \
+ elm = RB_PARENT(elm, field); \
+ } \
+ } \
+ return (elm); \
+} \
+ \
+attr struct type * \
+name##_RB_MINMAX(struct name *head, int val) \
+{ \
+ struct type *tmp = RB_ROOT(head); \
+ struct type *parent = NULL; \
+ while (tmp) { \
+ parent = tmp; \
+ if (val < 0) \
+ tmp = RB_LEFT(tmp, field); \
+ else \
+ tmp = RB_RIGHT(tmp, field); \
+ } \
+ return (parent); \
+}
+
+#define RB_NEGINF -1
+#define RB_INF 1
+
+#define RB_INSERT(name, x, y) name##_RB_INSERT(x, y)
+#define RB_REMOVE(name, x, y) name##_RB_REMOVE(x, y)
+#define RB_FIND(name, x, y) name##_RB_FIND(x, y)
+#define RB_NFIND(name, x, y) name##_RB_NFIND(x, y)
+#define RB_NEXT(name, x, y) name##_RB_NEXT(y)
+#define RB_PREV(name, x, y) name##_RB_PREV(y)
+#define RB_MIN(name, x) name##_RB_MINMAX(x, RB_NEGINF)
+#define RB_MAX(name, x) name##_RB_MINMAX(x, RB_INF)
+
+#define RB_FOREACH(x, name, head) \
+ for ((x) = RB_MIN(name, head); \
+ (x) != NULL; \
+ (x) = name##_RB_NEXT(x))
+
+#define RB_FOREACH_FROM(x, name, y) \
+ for ((x) = (y); \
+ ((x) != NULL) && ((y) = name##_RB_NEXT(x), (x) != NULL); \
+ (x) = (y))
+
+#define RB_FOREACH_SAFE(x, name, head, y) \
+ for ((x) = RB_MIN(name, head); \
+ ((x) != NULL) && ((y) = name##_RB_NEXT(x), (x) != NULL); \
+ (x) = (y))
+
+#define RB_FOREACH_REVERSE(x, name, head) \
+ for ((x) = RB_MAX(name, head); \
+ (x) != NULL; \
+ (x) = name##_RB_PREV(x))
+
+#define RB_FOREACH_REVERSE_FROM(x, name, y) \
+ for ((x) = (y); \
+ ((x) != NULL) && ((y) = name##_RB_PREV(x), (x) != NULL); \
+ (x) = (y))
+
+#define RB_FOREACH_REVERSE_SAFE(x, name, head, y) \
+ for ((x) = RB_MAX(name, head); \
+ ((x) != NULL) && ((y) = name##_RB_PREV(x), (x) != NULL); \
+ (x) = (y))
+
+#endif /* _SYS_TREE_HH_ */
diff --git a/rtems/freebsd/sys/ttycom.h b/rtems/freebsd/sys/ttycom.h
new file mode 100644
index 00000000..1f5c8e60
--- /dev/null
+++ b/rtems/freebsd/sys/ttycom.h
@@ -0,0 +1,146 @@
+/*-
+ * Copyright (c) 1982, 1986, 1990, 1993, 1994
+ * The Regents of the University of California. All rights reserved.
+ * (c) UNIX System Laboratories, Inc.
+ * All or some portions of this file are derived from material licensed
+ * to the University of California by American Telephone and Telegraph
+ * Co. or Unix System Laboratories, Inc. and are reproduced herein with
+ * the permission of UNIX System Laboratories, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * @(#)ttycom.h 8.1 (Berkeley) 3/28/94
+ * $FreeBSD$
+ */
+
+#ifndef _SYS_TTYCOM_HH_
+#define _SYS_TTYCOM_HH_
+
+#include <rtems/freebsd/sys/ioccom.h>
+
+/*
+ * Tty ioctl's except for those supported only for backwards compatibility
+ * with the old tty driver.
+ */
+
+/*
+ * Window/terminal size structure. This information is stored by the kernel
+ * in order to provide a consistent interface, but is not used by the kernel.
+ */
+struct winsize {
+ unsigned short ws_row; /* rows, in characters */
+ unsigned short ws_col; /* columns, in characters */
+ unsigned short ws_xpixel; /* horizontal size, pixels */
+ unsigned short ws_ypixel; /* vertical size, pixels */
+};
+
+ /* 0-2 compat */
+ /* 3-4 obsolete */
+ /* 5-7 obsolete or unused */
+ /* 8-10 compat */
+ /* 11-12 obsolete or unused */
+#define TIOCEXCL _IO('t', 13) /* set exclusive use of tty */
+#define TIOCNXCL _IO('t', 14) /* reset exclusive use of tty */
+#define TIOCGPTN _IOR('t', 15, int) /* Get pts number. */
+#define TIOCFLUSH _IOW('t', 16, int) /* flush buffers */
+ /* 17-18 compat */
+#define TIOCGETA _IOR('t', 19, struct termios) /* get termios struct */
+#define TIOCSETA _IOW('t', 20, struct termios) /* set termios struct */
+#define TIOCSETAW _IOW('t', 21, struct termios) /* drain output, set */
+#define TIOCSETAF _IOW('t', 22, struct termios) /* drn out, fls in, set */
+ /* 23-25 obsolete or unused */
+#define TIOCGETD _IOR('t', 26, int) /* get line discipline */
+#define TIOCSETD _IOW('t', 27, int) /* set line discipline */
+#define TIOCPTMASTER _IO('t', 28) /* pts master validation */
+ /* 29-69 free */
+ /* 80-84 slip */
+#define TIOCGDRAINWAIT _IOR('t', 86, int) /* get ttywait timeout */
+#define TIOCSDRAINWAIT _IOW('t', 87, int) /* set ttywait timeout */
+ /* 88 slip, ppp; conflicts */
+#define TIOCTIMESTAMP _IOR('t', 89, struct timeval) /* enable/get timestamp
+ * of last input event */
+ /* 70-90 ppp; many conflicts */
+#define TIOCMGDTRWAIT _IOR('t', 90, int) /* modem: get wait on close */
+#define TIOCMSDTRWAIT _IOW('t', 91, int) /* modem: set wait on close */
+ /* 90-92 tap; some conflicts */
+#define TIOCDRAIN _IO('t', 94) /* wait till output drained */
+#define TIOCSIG _IOWINT('t', 95) /* pty: generate signal */
+#define TIOCEXT _IOW('t', 96, int) /* pty: external processing */
+ /* 90-97 tun; some conflicts */
+#define TIOCSCTTY _IO('t', 97) /* become controlling tty */
+#define TIOCCONS _IOW('t', 98, int) /* become virtual console */
+#define TIOCGSID _IOR('t', 99, int) /* get session id */
+ /* 100 see consio.h */
+#define TIOCSTAT _IO('t', 101) /* simulate ^T status message */
+#define TIOCUCNTL _IOW('t', 102, int) /* pty: set/clr usr cntl mode */
+#define UIOCCMD(n) _IO('u', n) /* usr cntl op "n" */
+#define TIOCSWINSZ _IOW('t', 103, struct winsize) /* set window size */
+#define TIOCGWINSZ _IOR('t', 104, struct winsize) /* get window size */
+#define TIOCMGET _IOR('t', 106, int) /* get all modem bits */
+#define TIOCM_LE 0001 /* line enable */
+#define TIOCM_DTR 0002 /* data terminal ready */
+#define TIOCM_RTS 0004 /* request to send */
+#define TIOCM_ST 0010 /* secondary transmit */
+#define TIOCM_SR 0020 /* secondary receive */
+#define TIOCM_CTS 0040 /* clear to send */
+#define TIOCM_DCD 0100 /* data carrier detect */
+#define TIOCM_RI 0200 /* ring indicate */
+#define TIOCM_DSR 0400 /* data set ready */
+#define TIOCM_CD TIOCM_DCD
+#define TIOCM_CAR TIOCM_DCD
+#define TIOCM_RNG TIOCM_RI
+#define TIOCMBIC _IOW('t', 107, int) /* bic modem bits */
+#define TIOCMBIS _IOW('t', 108, int) /* bis modem bits */
+#define TIOCMSET _IOW('t', 109, int) /* set all modem bits */
+#define TIOCSTART _IO('t', 110) /* start output, like ^Q */
+#define TIOCSTOP _IO('t', 111) /* stop output, like ^S */
+#define TIOCPKT _IOW('t', 112, int) /* pty: set/clear packet mode */
+#define TIOCPKT_DATA 0x00 /* data packet */
+#define TIOCPKT_FLUSHREAD 0x01 /* flush packet */
+#define TIOCPKT_FLUSHWRITE 0x02 /* flush packet */
+#define TIOCPKT_STOP 0x04 /* stop output */
+#define TIOCPKT_START 0x08 /* start output */
+#define TIOCPKT_NOSTOP 0x10 /* no more ^S, ^Q */
+#define TIOCPKT_DOSTOP 0x20 /* now do ^S ^Q */
+#define TIOCPKT_IOCTL 0x40 /* state change of pty driver */
+#define TIOCNOTTY _IO('t', 113) /* void tty association */
+#define TIOCSTI _IOW('t', 114, char) /* simulate terminal input */
+#define TIOCOUTQ _IOR('t', 115, int) /* output queue size */
+ /* 116-117 compat */
+#define TIOCSPGRP _IOW('t', 118, int) /* set pgrp of tty */
+#define TIOCGPGRP _IOR('t', 119, int) /* get pgrp of tty */
+#define TIOCCDTR _IO('t', 120) /* clear data terminal ready */
+#define TIOCSDTR _IO('t', 121) /* set data terminal ready */
+#define TIOCCBRK _IO('t', 122) /* clear break bit */
+#define TIOCSBRK _IO('t', 123) /* set break bit */
+ /* 124-127 compat */
+
+#define TTYDISC 0 /* termios tty line discipline */
+#define SLIPDISC 4 /* serial IP discipline */
+#define PPPDISC 5 /* PPP discipline */
+#define NETGRAPHDISC 6 /* Netgraph tty node discipline */
+#define H4DISC 7 /* Netgraph Bluetooth H4 discipline */
+
+#endif /* !_SYS_TTYCOM_HH_ */
diff --git a/rtems/freebsd/sys/types.h b/rtems/freebsd/sys/types.h
new file mode 100644
index 00000000..b99f6b8d
--- /dev/null
+++ b/rtems/freebsd/sys/types.h
@@ -0,0 +1,360 @@
+/*-
+ * Copyright (c) 1982, 1986, 1991, 1993, 1994
+ * The Regents of the University of California. All rights reserved.
+ * (c) UNIX System Laboratories, Inc.
+ * All or some portions of this file are derived from material licensed
+ * to the University of California by American Telephone and Telegraph
+ * Co. or Unix System Laboratories, Inc. and are reproduced herein with
+ * the permission of UNIX System Laboratories, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * @(#)types.h 8.6 (Berkeley) 2/19/95
+ * $FreeBSD$
+ */
+
+#ifndef _SYS_TYPES_HH_
+#define _SYS_TYPES_HH_
+
+#include <rtems/freebsd/sys/cdefs.h>
+
+/* Machine type dependent parameters. */
+#include <rtems/freebsd/machine/endian.h>
+#include <rtems/freebsd/sys/_types.h>
+
+#include <rtems/freebsd/sys/_pthreadtypes.h>
+
+#if __BSD_VISIBLE
+typedef unsigned char u_char;
+typedef unsigned short u_short;
+typedef unsigned int u_int;
+typedef unsigned long u_long;
+#ifndef _KERNEL
+typedef unsigned short ushort; /* Sys V compatibility */
+typedef unsigned int uint; /* Sys V compatibility */
+#endif
+#endif
+
+/*
+ * XXX POSIX sized integrals that should appear only in <sys/stdint.h>.
+ */
+#ifndef _INT8_T_DECLARED
+typedef __int8_t int8_t;
+#define _INT8_T_DECLARED
+#endif
+
+#ifndef _INT16_T_DECLARED
+typedef __int16_t int16_t;
+#define _INT16_T_DECLARED
+#endif
+
+#ifndef _INT32_T_DECLARED
+typedef __int32_t int32_t;
+#define _INT32_T_DECLARED
+#endif
+
+#ifndef _INT64_T_DECLARED
+typedef __int64_t int64_t;
+#define _INT64_T_DECLARED
+#endif
+
+#ifndef _UINT8_T_DECLARED
+typedef __uint8_t uint8_t;
+#define _UINT8_T_DECLARED
+#endif
+
+#ifndef _UINT16_T_DECLARED
+typedef __uint16_t uint16_t;
+#define _UINT16_T_DECLARED
+#endif
+
+#ifndef _UINT32_T_DECLARED
+typedef __uint32_t uint32_t;
+#define _UINT32_T_DECLARED
+#endif
+
+#ifndef _UINT64_T_DECLARED
+typedef __uint64_t uint64_t;
+#define _UINT64_T_DECLARED
+#endif
+
+#ifndef _INTPTR_T_DECLARED
+typedef __intptr_t intptr_t;
+typedef __uintptr_t uintptr_t;
+#define _INTPTR_T_DECLARED
+#endif
+
+#ifndef __rtems__
+typedef __uint8_t u_int8_t; /* unsigned integrals (deprecated) */
+typedef __uint16_t u_int16_t;
+typedef __uint32_t u_int32_t;
+typedef __uint64_t u_int64_t;
+
+typedef __uint64_t u_quad_t; /* quads (deprecated) */
+typedef __int64_t quad_t;
+typedef quad_t * qaddr_t;
+
+typedef char * caddr_t; /* core address */
+#endif /* __rtems__ */
+typedef __const char * c_caddr_t; /* core address, pointer to const */
+typedef __volatile char *v_caddr_t; /* core address, pointer to volatile */
+
+#ifndef _BLKSIZE_T_DECLARED
+typedef __blksize_t blksize_t;
+#define _BLKSIZE_T_DECLARED
+#endif
+
+typedef __cpuwhich_t cpuwhich_t;
+typedef __cpulevel_t cpulevel_t;
+typedef __cpusetid_t cpusetid_t;
+
+#ifndef _BLKCNT_T_DECLARED
+typedef __blkcnt_t blkcnt_t;
+#define _BLKCNT_T_DECLARED
+#endif
+
+#ifndef _CLOCK_T_DECLARED
+typedef __clock_t clock_t;
+#define _CLOCK_T_DECLARED
+#endif
+
+#ifndef _CLOCKID_T_DECLARED
+typedef __clockid_t clockid_t;
+#define _CLOCKID_T_DECLARED
+#endif
+
+typedef __cpumask_t cpumask_t;
+typedef __critical_t critical_t; /* Critical section value */
+#ifndef __rtems__
+typedef __int64_t daddr_t; /* disk address */
+#endif /* __rtems__ */
+
+#ifndef _DEV_T_DECLARED
+typedef __dev_t dev_t; /* device number or struct cdev */
+#define _DEV_T_DECLARED
+#endif
+
+#ifndef _FFLAGS_T_DECLARED
+typedef __fflags_t fflags_t; /* file flags */
+#define _FFLAGS_T_DECLARED
+#endif
+
+typedef __fixpt_t fixpt_t; /* fixed point number */
+
+#ifndef _FSBLKCNT_T_DECLARED /* for statvfs() */
+typedef __fsblkcnt_t fsblkcnt_t;
+typedef __fsfilcnt_t fsfilcnt_t;
+#define _FSBLKCNT_T_DECLARED
+#endif
+
+#ifndef _GID_T_DECLARED
+typedef __gid_t gid_t; /* group id */
+#define _GID_T_DECLARED
+#endif
+
+#ifndef _IN_ADDR_T_DECLARED
+typedef __uint32_t in_addr_t; /* base type for internet address */
+#define _IN_ADDR_T_DECLARED
+#endif
+
+#ifndef _IN_PORT_T_DECLARED
+typedef __uint16_t in_port_t;
+#define _IN_PORT_T_DECLARED
+#endif
+
+#ifndef _ID_T_DECLARED
+typedef __id_t id_t; /* can hold a uid_t or pid_t */
+#define _ID_T_DECLARED
+#endif
+
+#ifndef _INO_T_DECLARED
+typedef __ino_t ino_t; /* inode number */
+#define _INO_T_DECLARED
+#endif
+
+#ifndef _KEY_T_DECLARED
+typedef __key_t key_t; /* IPC key (for Sys V IPC) */
+#define _KEY_T_DECLARED
+#endif
+
+#ifndef _LWPID_T_DECLARED
+typedef __lwpid_t lwpid_t; /* Thread ID (a.k.a. LWP) */
+#define _LWPID_T_DECLARED
+#endif
+
+#ifndef _MODE_T_DECLARED
+typedef __mode_t mode_t; /* permissions */
+#define _MODE_T_DECLARED
+#endif
+
+#ifndef _ACCMODE_T_DECLARED
+typedef __accmode_t accmode_t; /* access permissions */
+#define _ACCMODE_T_DECLARED
+#endif
+
+#ifndef _NLINK_T_DECLARED
+typedef __nlink_t nlink_t; /* link count */
+#define _NLINK_T_DECLARED
+#endif
+
+#ifndef _OFF_T_DECLARED
+typedef __off_t off_t; /* file offset */
+#define _OFF_T_DECLARED
+#endif
+
+#ifndef _PID_T_DECLARED
+typedef __pid_t pid_t; /* process id */
+#define _PID_T_DECLARED
+#endif
+
+typedef __register_t register_t;
+
+#ifndef _RLIM_T_DECLARED
+typedef __rlim_t rlim_t; /* resource limit */
+#define _RLIM_T_DECLARED
+#endif
+
+typedef __segsz_t segsz_t; /* segment size (in pages) */
+
+#ifndef _SIZE_T_DECLARED
+typedef __size_t size_t;
+#define _SIZE_T_DECLARED
+#endif
+
+#ifndef _SSIZE_T_DECLARED
+typedef __ssize_t ssize_t;
+#define _SSIZE_T_DECLARED
+#endif
+
+#ifndef _SUSECONDS_T_DECLARED
+typedef __suseconds_t suseconds_t; /* microseconds (signed) */
+#define _SUSECONDS_T_DECLARED
+#endif
+
+#ifndef _TIME_T_DECLARED
+typedef __time_t time_t;
+#define _TIME_T_DECLARED
+#endif
+
+#ifndef _TIMER_T_DECLARED
+typedef __timer_t timer_t;
+#define _TIMER_T_DECLARED
+#endif
+
+#ifndef _MQD_T_DECLARED
+typedef __mqd_t mqd_t;
+#define _MQD_T_DECLARED
+#endif
+
+typedef __u_register_t u_register_t;
+
+#ifndef _UID_T_DECLARED
+typedef __uid_t uid_t; /* user id */
+#define _UID_T_DECLARED
+#endif
+
+#ifndef _USECONDS_T_DECLARED
+typedef __useconds_t useconds_t; /* microseconds (unsigned) */
+#define _USECONDS_T_DECLARED
+#endif
+
+typedef __vm_offset_t vm_offset_t;
+typedef __vm_ooffset_t vm_ooffset_t;
+typedef __vm_paddr_t vm_paddr_t;
+typedef __vm_pindex_t vm_pindex_t;
+typedef __vm_size_t vm_size_t;
+
+#ifdef _KERNEL
+typedef int boolean_t;
+typedef struct device *device_t;
+typedef __intfptr_t intfptr_t;
+
+/*-
+ * XXX this is fixed width for historical reasons. It should have had type
+ * __int_fast32_t. Fixed-width types should not be used unless binary
+ * compatibility is essential. Least-width types should be used even less
+ * since they provide smaller benefits.
+ * XXX should be MD.
+ * XXX this is bogus in -current, but still used for spl*().
+ */
+typedef __uint32_t intrmask_t; /* Interrupt mask (spl, xxx_imask...) */
+
+typedef __uintfptr_t uintfptr_t;
+typedef __uint64_t uoff_t;
+typedef char vm_memattr_t; /* memory attribute codes */
+typedef struct vm_page *vm_page_t;
+
+#ifndef __rtems__
+#define offsetof(type, field) __offsetof(type, field)
+#endif /* __rtems__ */
+
+#endif /* !_KERNEL */
+
+/*
+ * The following are all things that really shouldn't exist in this header,
+ * since its purpose is to provide typedefs, not miscellaneous doodads.
+ */
+#if __BSD_VISIBLE
+
+#include <rtems/freebsd/sys/select.h>
+
+/*
+ * minor() gives a cookie instead of an index since we don't want to
+ * change the meanings of bits 0-15 or waste time and space shifting
+ * bits 16-31 for devices that don't use them.
+ */
+#define major(x) ((int)(((u_int)(x) >> 8)&0xff)) /* major number */
+#define minor(x) ((int)((x)&0xffff00ff)) /* minor number */
+#define makedev(x,y) ((dev_t)(((x) << 8) | (y))) /* create dev_t */
+
+/*
+ * These declarations belong elsewhere, but are repeated here and in
+ * <stdio.h> to give broken programs a better chance of working with
+ * 64-bit off_t's.
+ */
+#ifndef _KERNEL
+__BEGIN_DECLS
+#ifndef _FTRUNCATE_DECLARED
+#define _FTRUNCATE_DECLARED
+int ftruncate(int, off_t);
+#endif
+#ifndef _LSEEK_DECLARED
+#define _LSEEK_DECLARED
+off_t lseek(int, off_t, int);
+#endif
+#ifndef _MMAP_DECLARED
+#define _MMAP_DECLARED
+void * mmap(void *, size_t, int, int, int, off_t);
+#endif
+#ifndef _TRUNCATE_DECLARED
+#define _TRUNCATE_DECLARED
+int truncate(const char *, off_t);
+#endif
+__END_DECLS
+#endif /* !_KERNEL */
+
+#endif /* __BSD_VISIBLE */
+
+#endif /* !_SYS_TYPES_HH_ */
diff --git a/rtems/freebsd/sys/ucontext.h b/rtems/freebsd/sys/ucontext.h
new file mode 100644
index 00000000..2c9bed19
--- /dev/null
+++ b/rtems/freebsd/sys/ucontext.h
@@ -0,0 +1,99 @@
+/*-
+ * Copyright (c) 1999 Marcel Moolenaar
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer
+ * in this position and unchanged.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _SYS_UCONTEXT_HH_
+#define _SYS_UCONTEXT_HH_
+
+#include <rtems/freebsd/sys/signal.h>
+#include <rtems/freebsd/machine/ucontext.h>
+
+typedef struct __ucontext {
+ /*
+ * Keep the order of the first two fields. Also,
+ * keep them the first two fields in the structure.
+ * This way we can have a union with struct
+ * sigcontext and ucontext_t. This allows us to
+ * support them both at the same time.
+ * note: the union is not defined, though.
+ */
+ sigset_t uc_sigmask;
+ mcontext_t uc_mcontext;
+
+ struct __ucontext *uc_link;
+ stack_t uc_stack;
+ int uc_flags;
+#define UCF_SWAPPED 0x00000001 /* Used by swapcontext(3). */
+ int __spare__[4];
+} ucontext_t;
+
+#if defined(_KERNEL) && defined(COMPAT_FREEBSD4)
+#if defined(__i386__)
+struct ucontext4 {
+ sigset_t uc_sigmask;
+ struct mcontext4 uc_mcontext;
+ struct ucontext4 *uc_link;
+ stack_t uc_stack;
+ int __spare__[8];
+};
+#else /* __i386__ */
+#define ucontext4 ucontext
+#endif /* __i386__ */
+#endif /* _KERNEL */
+
+#ifndef _KERNEL
+
+__BEGIN_DECLS
+
+int getcontext(ucontext_t *);
+int setcontext(const ucontext_t *);
+void makecontext(ucontext_t *, void (*)(void), int, ...);
+int signalcontext(ucontext_t *, int, __sighandler_t *);
+int swapcontext(ucontext_t *, const ucontext_t *);
+
+__END_DECLS
+
+#else /* _KERNEL */
+
+struct thread;
+
+/*
+ * Flags for get_mcontext(). The low order 4 bits (i.e a mask of 0x0f) are
+ * reserved for use by machine independent code. All other bits are for use
+ * by machine dependent code.
+ */
+#define GET_MC_CLEAR_RET 1
+
+/* Machine-dependent functions: */
+int get_mcontext(struct thread *, mcontext_t *, int);
+int set_mcontext(struct thread *, const mcontext_t *);
+
+#endif /* !_KERNEL */
+
+#endif /* !_SYS_UCONTEXT_HH_ */
diff --git a/rtems/freebsd/sys/ucred.h b/rtems/freebsd/sys/ucred.h
new file mode 100644
index 00000000..0a3df629
--- /dev/null
+++ b/rtems/freebsd/sys/ucred.h
@@ -0,0 +1,110 @@
+/*-
+ * Copyright (c) 1989, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * @(#)ucred.h 8.4 (Berkeley) 1/9/95
+ * $FreeBSD$
+ */
+
+#ifndef _SYS_UCRED_HH_
+#define _SYS_UCRED_HH_
+
+#include <rtems/freebsd/bsm/audit.h>
+
+/*
+ * Credentials.
+ *
+ * Please do not inspect cr_uid directly to determine superuserness. The
+ * priv(9) interface should be used to check for privilege.
+ */
+#if defined(_KERNEL) || defined(_WANT_UCRED)
+struct ucred {
+ u_int cr_ref; /* reference count */
+#define cr_startcopy cr_uid
+ uid_t cr_uid; /* effective user id */
+ uid_t cr_ruid; /* real user id */
+ uid_t cr_svuid; /* saved user id */
+ int cr_ngroups; /* number of groups */
+ gid_t cr_rgid; /* real group id */
+ gid_t cr_svgid; /* saved group id */
+ struct uidinfo *cr_uidinfo; /* per euid resource consumption */
+ struct uidinfo *cr_ruidinfo; /* per ruid resource consumption */
+ struct prison *cr_prison; /* jail(2) */
+ void *cr_pspare; /* general use */
+ u_int cr_flags; /* credential flags */
+ void *cr_pspare2[2]; /* general use 2 */
+#define cr_endcopy cr_label
+ struct label *cr_label; /* MAC label */
+ struct auditinfo_addr cr_audit; /* Audit properties. */
+ gid_t *cr_groups; /* groups */
+ int cr_agroups; /* Available groups */
+};
+#define NOCRED ((struct ucred *)0) /* no credential available */
+#define FSCRED ((struct ucred *)-1) /* filesystem credential */
+#endif /* _KERNEL || _WANT_UCRED */
+
+#define XU_NGROUPS 16
+
+/*
+ * This is the external representation of struct ucred.
+ */
+struct xucred {
+ u_int cr_version; /* structure layout version */
+ uid_t cr_uid; /* effective user id */
+ short cr_ngroups; /* number of groups */
+ gid_t cr_groups[XU_NGROUPS]; /* groups */
+ void *_cr_unused1; /* compatibility with old ucred */
+};
+#define XUCRED_VERSION 0
+
+/* This can be used for both ucred and xucred structures. */
+#define cr_gid cr_groups[0]
+
+#ifdef _KERNEL
+struct proc;
+struct thread;
+
+void change_egid(struct ucred *newcred, gid_t egid);
+void change_euid(struct ucred *newcred, struct uidinfo *euip);
+void change_rgid(struct ucred *newcred, gid_t rgid);
+void change_ruid(struct ucred *newcred, struct uidinfo *ruip);
+void change_svgid(struct ucred *newcred, gid_t svgid);
+void change_svuid(struct ucred *newcred, uid_t svuid);
+void crcopy(struct ucred *dest, struct ucred *src);
+struct ucred *crcopysafe(struct proc *p, struct ucred *cr);
+struct ucred *crdup(struct ucred *cr);
+void cred_update_thread(struct thread *td);
+void crfree(struct ucred *cr);
+struct ucred *crget(void);
+struct ucred *crhold(struct ucred *cr);
+int crshared(struct ucred *cr);
+void cru2x(struct ucred *cr, struct xucred *xcr);
+void crsetgroups(struct ucred *cr, int n, gid_t *groups);
+int groupmember(gid_t gid, struct ucred *cred);
+#endif /* _KERNEL */
+
+#endif /* !_SYS_UCRED_HH_ */
diff --git a/rtems/freebsd/sys/uio.h b/rtems/freebsd/sys/uio.h
new file mode 100644
index 00000000..9876b88a
--- /dev/null
+++ b/rtems/freebsd/sys/uio.h
@@ -0,0 +1,117 @@
+/*-
+ * Copyright (c) 1982, 1986, 1993, 1994
+ * The Regents of the University of California. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * @(#)uio.h 8.5 (Berkeley) 2/22/94
+ * $FreeBSD$
+ */
+
+#ifndef _SYS_UIO_HH_
+#define _SYS_UIO_HH_
+
+#include <rtems/freebsd/sys/cdefs.h>
+#include <rtems/freebsd/sys/_types.h>
+#include <rtems/freebsd/sys/_iovec.h>
+
+#ifndef _SSIZE_T_DECLARED
+typedef __ssize_t ssize_t;
+#define _SSIZE_T_DECLARED
+#endif
+
+#ifndef _OFF_T_DECLARED
+typedef __off_t off_t;
+#define _OFF_T_DECLARED
+#endif
+
+#if __BSD_VISIBLE
+enum uio_rw { UIO_READ, UIO_WRITE };
+
+/* Segment flag values. */
+enum uio_seg {
+ UIO_USERSPACE, /* from user data space */
+ UIO_SYSSPACE, /* from system space */
+ UIO_NOCOPY /* don't copy, already in object */
+};
+#endif
+
+#ifdef _KERNEL
+
+struct uio {
+ struct iovec *uio_iov; /* scatter/gather list */
+ int uio_iovcnt; /* length of scatter/gather list */
+ off_t uio_offset; /* offset in target object */
+ ssize_t uio_resid; /* remaining bytes to process */
+ enum uio_seg uio_segflg; /* address space */
+ enum uio_rw uio_rw; /* operation */
+ struct thread *uio_td; /* owner */
+};
+
+/*
+ * Limits
+ *
+ * N.B.: UIO_MAXIOV must be no less than IOV_MAX from <sys/syslimits.h>
+ * which in turn must be no less than _XOPEN_IOV_MAX from <limits.h>. If
+ * we ever make this tunable (probably pointless), then IOV_MAX should be
+ * removed from <sys/syslimits.h> and applications would be expected to use
+ * sysconf(3) to find out the correct value, or else assume the worst
+ * (_XOPEN_IOV_MAX). Perhaps UIO_MAXIOV should be simply defined as
+ * IOV_MAX.
+ */
+#define UIO_MAXIOV 1024 /* max 1K of iov's */
+
+struct vm_object;
+struct vm_page;
+
+struct uio *cloneuio(struct uio *uiop);
+int copyinfrom(const void * __restrict src, void * __restrict dst,
+ size_t len, int seg);
+int copyiniov(struct iovec *iovp, u_int iovcnt, struct iovec **iov,
+ int error);
+int copyinstrfrom(const void * __restrict src, void * __restrict dst,
+ size_t len, size_t * __restrict copied, int seg);
+int copyinuio(struct iovec *iovp, u_int iovcnt, struct uio **uiop);
+void uio_yield(void);
+int uiomove(void *cp, int n, struct uio *uio);
+int uiomove_frombuf(void *buf, int buflen, struct uio *uio);
+int uiomove_fromphys(struct vm_page *ma[], vm_offset_t offset, int n,
+ struct uio *uio);
+int uiomoveco(void *cp, int n, struct uio *uio, int disposable);
+
+#else /* !_KERNEL */
+
+__BEGIN_DECLS
+ssize_t readv(int, const struct iovec *, int);
+ssize_t writev(int, const struct iovec *, int);
+#if __BSD_VISIBLE
+ssize_t preadv(int, const struct iovec *, int, off_t);
+ssize_t pwritev(int, const struct iovec *, int, off_t);
+#endif
+__END_DECLS
+
+#endif /* _KERNEL */
+
+#endif /* !_SYS_UIO_HH_ */
diff --git a/rtems/freebsd/sys/unistd.h b/rtems/freebsd/sys/unistd.h
new file mode 100644
index 00000000..ecebaf6a
--- /dev/null
+++ b/rtems/freebsd/sys/unistd.h
@@ -0,0 +1,188 @@
+/*-
+ * Copyright (c) 1989, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * @(#)unistd.h 8.2 (Berkeley) 1/7/94
+ * $FreeBSD$
+ */
+
+#ifndef _SYS_UNISTD_HH_
+#define _SYS_UNISTD_HH_
+
+#include <rtems/freebsd/sys/cdefs.h>
+
+/*
+ * POSIX options and option groups we unconditionally do or don't
+ * implement. Those options which are implemented (or not) entirely
+ * in user mode are defined in <unistd.h>. Please keep this list in
+ * alphabetical order.
+ *
+ * Anything which is defined as zero below **must** have an
+ * implementation for the corresponding sysconf() which is able to
+ * determine conclusively whether or not the feature is supported.
+ * Anything which is defined as other than -1 below **must** have
+ * complete headers, types, and function declarations as specified by
+ * the POSIX standard; however, if the relevant sysconf() function
+ * returns -1, the functions may be stubbed out.
+ */
+#define _POSIX_ADVISORY_INFO -1
+#define _POSIX_ASYNCHRONOUS_IO 0
+#define _POSIX_CHOWN_RESTRICTED 1
+#define _POSIX_CLOCK_SELECTION -1
+#define _POSIX_CPUTIME -1
+#define _POSIX_FSYNC 200112L
+#define _POSIX_IPV6 0
+#define _POSIX_JOB_CONTROL 1
+#define _POSIX_MAPPED_FILES 200112L
+#define _POSIX_MEMLOCK -1
+#define _POSIX_MEMLOCK_RANGE 200112L
+#define _POSIX_MEMORY_PROTECTION 200112L
+#define _POSIX_MESSAGE_PASSING 200112L
+#define _POSIX_MONOTONIC_CLOCK 200112L
+#define _POSIX_NO_TRUNC 1
+#define _POSIX_PRIORITIZED_IO -1
+#define _POSIX_PRIORITY_SCHEDULING 200112L
+#define _POSIX_RAW_SOCKETS 200112L
+#define _POSIX_REALTIME_SIGNALS 200112L
+#define _POSIX_SEMAPHORES -1
+#define _POSIX_SHARED_MEMORY_OBJECTS 200112L
+#define _POSIX_SPORADIC_SERVER -1
+#define _POSIX_SYNCHRONIZED_IO -1
+#define _POSIX_TIMEOUTS 200112L
+#define _POSIX_TIMERS 200112L
+#define _POSIX_TYPED_MEMORY_OBJECTS -1
+#define _POSIX_VDISABLE 0xff
+
+#if __XSI_VISIBLE
+#define _XOPEN_SHM 1
+#define _XOPEN_STREAMS -1
+#endif
+
+/*
+ * Although we have saved user/group IDs, we do not use them in setuid
+ * as described in POSIX 1003.1, because the feature does not work for
+ * root. We use the saved IDs in seteuid/setegid, which are not currently
+ * part of the POSIX 1003.1 specification. XXX revisit for 1003.1-2001
+ * as this is now mandatory.
+ */
+#ifdef _NOT_AVAILABLE
+#define _POSIX_SAVED_IDS 1 /* saved set-user-ID and set-group-ID */
+#endif
+
+/* Define the POSIX.1 version we target for compliance. */
+#define _POSIX_VERSION 200112L
+
+/* access function */
+#define F_OK 0 /* test for existence of file */
+#define X_OK 0x01 /* test for execute or search permission */
+#define W_OK 0x02 /* test for write permission */
+#define R_OK 0x04 /* test for read permission */
+
+/* whence values for lseek(2) */
+#ifndef SEEK_SET
+#define SEEK_SET 0 /* set file offset to offset */
+#define SEEK_CUR 1 /* set file offset to current plus offset */
+#define SEEK_END 2 /* set file offset to EOF plus offset */
+#endif
+#if __BSD_VISIBLE
+#define SEEK_DATA 3 /* set file offset to next data past offset */
+#define SEEK_HOLE 4 /* set file offset to next hole past offset */
+#endif
+
+#ifndef _POSIX_SOURCE
+/* whence values for lseek(2); renamed by POSIX 1003.1 */
+#define L_SET SEEK_SET
+#define L_INCR SEEK_CUR
+#define L_XTND SEEK_END
+#endif
+
+/* configurable pathname variables */
+#define _PC_LINK_MAX 1
+#define _PC_MAX_CANON 2
+#define _PC_MAX_INPUT 3
+#define _PC_NAME_MAX 4
+#define _PC_PATH_MAX 5
+#define _PC_PIPE_BUF 6
+#define _PC_CHOWN_RESTRICTED 7
+#define _PC_NO_TRUNC 8
+#define _PC_VDISABLE 9
+
+#if __POSIX_VISIBLE >= 199309
+#define _PC_ASYNC_IO 53
+#define _PC_PRIO_IO 54
+#define _PC_SYNC_IO 55
+#endif
+
+#if __POSIX_VISIBLE >= 200112
+#define _PC_ALLOC_SIZE_MIN 10
+#define _PC_FILESIZEBITS 12
+#define _PC_REC_INCR_XFER_SIZE 14
+#define _PC_REC_MAX_XFER_SIZE 15
+#define _PC_REC_MIN_XFER_SIZE 16
+#define _PC_REC_XFER_ALIGN 17
+#define _PC_SYMLINK_MAX 18
+#endif
+
+#if __BSD_VISIBLE
+#define _PC_ACL_EXTENDED 59
+#define _PC_ACL_PATH_MAX 60
+#define _PC_CAP_PRESENT 61
+#define _PC_INF_PRESENT 62
+#define _PC_MAC_PRESENT 63
+#define _PC_ACL_NFS4 64
+#endif
+
+/* From OpenSolaris, used by SEEK_DATA/SEEK_HOLE. */
+#define _PC_MIN_HOLE_SIZE 21
+
+#if __BSD_VISIBLE
+/*
+ * rfork() options.
+ *
+ * XXX currently, some operations without RFPROC set are not supported.
+ */
+#define RFNAMEG (1<<0) /* UNIMPL new plan9 `name space' */
+#define RFENVG (1<<1) /* UNIMPL copy plan9 `env space' */
+#define RFFDG (1<<2) /* copy fd table */
+#define RFNOTEG (1<<3) /* UNIMPL create new plan9 `note group' */
+#define RFPROC (1<<4) /* change child (else changes curproc) */
+#define RFMEM (1<<5) /* share `address space' */
+#define RFNOWAIT (1<<6) /* give child to init */
+#define RFCNAMEG (1<<10) /* UNIMPL zero plan9 `name space' */
+#define RFCENVG (1<<11) /* UNIMPL zero plan9 `env space' */
+#define RFCFDG (1<<12) /* close all fds, zero fd table */
+#define RFTHREAD (1<<13) /* enable kernel thread support */
+#define RFSIGSHARE (1<<14) /* share signal handlers */
+#define RFLINUXTHPN (1<<16) /* do linux clone exit parent notification */
+#define RFSTOPPED (1<<17) /* leave child in a stopped state */
+#define RFHIGHPID (1<<18) /* use a pid higher than 10 (idleproc) */
+#define RFPPWAIT (1<<31) /* parent sleeps until child exits (vfork) */
+#define RFKERNELONLY (RFSTOPPED | RFHIGHPID | RFPPWAIT)
+
+#endif /* __BSD_VISIBLE */
+
+#endif /* !_SYS_UNISTD_HH_ */
diff --git a/rtems/freebsd/time.h b/rtems/freebsd/time.h
new file mode 100644
index 00000000..936ffd88
--- /dev/null
+++ b/rtems/freebsd/time.h
@@ -0,0 +1 @@
+/* EMPTY */
diff --git a/rtems/freebsd/vm/uma.h b/rtems/freebsd/vm/uma.h
new file mode 100644
index 00000000..61bcaf36
--- /dev/null
+++ b/rtems/freebsd/vm/uma.h
@@ -0,0 +1,636 @@
+/*-
+ * Copyright (c) 2002, 2003, 2004, 2005 Jeffrey Roberson <jeff@FreeBSD.org>
+ * Copyright (c) 2004, 2005 Bosko Milekic <bmilekic@FreeBSD.org>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice unmodified, this list of conditions, and the following
+ * disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ *
+ */
+
+/*
+ * uma.h - External definitions for the Universal Memory Allocator
+ *
+*/
+
+#ifndef VM_UMA_H
+#define VM_UMA_H
+
+#include <rtems/freebsd/sys/param.h> /* For NULL */
+#include <rtems/freebsd/sys/malloc.h> /* For M_* */
+
+/* User visible parameters */
+#define UMA_SMALLEST_UNIT (PAGE_SIZE / 256) /* Smallest item allocated */
+
+/* Types and type defs */
+
+struct uma_zone;
+/* Opaque type used as a handle to the zone */
+typedef struct uma_zone * uma_zone_t;
+
+void zone_drain(uma_zone_t);
+
+/*
+ * Item constructor
+ *
+ * Arguments:
+ * item A pointer to the memory which has been allocated.
+ * arg The arg field passed to uma_zalloc_arg
+ * size The size of the allocated item
+ * flags See zalloc flags
+ *
+ * Returns:
+ * 0 on success
+ * errno on failure
+ *
+ * Discussion:
+ * The constructor is called just before the memory is returned
+ * to the user. It may block if necessary.
+ */
+typedef int (*uma_ctor)(void *mem, int size, void *arg, int flags);
+
+/*
+ * Item destructor
+ *
+ * Arguments:
+ * item A pointer to the memory which has been allocated.
+ * size The size of the item being destructed.
+ * arg Argument passed through uma_zfree_arg
+ *
+ * Returns:
+ * Nothing
+ *
+ * Discussion:
+ * The destructor may perform operations that differ from those performed
+ * by the initializer, but it must leave the object in the same state.
+ * This IS type stable storage. This is called after EVERY zfree call.
+ */
+typedef void (*uma_dtor)(void *mem, int size, void *arg);
+
+/*
+ * Item initializer
+ *
+ * Arguments:
+ * item A pointer to the memory which has been allocated.
+ * size The size of the item being initialized.
+ * flags See zalloc flags
+ *
+ * Returns:
+ * 0 on success
+ * errno on failure
+ *
+ * Discussion:
+ * The initializer is called when the memory is cached in the uma zone.
+ * The initializer and the destructor should leave the object in the same
+ * state.
+ */
+typedef int (*uma_init)(void *mem, int size, int flags);
+
+/*
+ * Item discard function
+ *
+ * Arguments:
+ * item A pointer to memory which has been 'freed' but has not left the
+ * zone's cache.
+ * size The size of the item being discarded.
+ *
+ * Returns:
+ * Nothing
+ *
+ * Discussion:
+ * This routine is called when memory leaves a zone and is returned to the
+ * system for other uses. It is the counter-part to the init function.
+ */
+typedef void (*uma_fini)(void *mem, int size);
+
+/*
+ * What's the difference between initializing and constructing?
+ *
+ * The item is initialized when it is cached, and this is the state that the
+ * object should be in when returned to the allocator. The purpose of this is
+ * to remove some code which would otherwise be called on each allocation by
+ * utilizing a known, stable state. This differs from the constructor which
+ * will be called on EVERY allocation.
+ *
+ * For example, in the initializer you may want to initialize embedded locks,
+ * NULL list pointers, set up initial states, magic numbers, etc. This way if
+ * the object is held in the allocator and re-used it won't be necessary to
+ * re-initialize it.
+ *
+ * The constructor may be used to lock a data structure, link it on to lists,
+ * bump reference counts or total counts of outstanding structures, etc.
+ *
+ */
+
+
+/* Function proto types */
+
+/*
+ * Create a new uma zone
+ *
+ * Arguments:
+ * name The text name of the zone for debugging and stats. This memory
+ * should not be freed until the zone has been deallocated.
+ * size The size of the object that is being created.
+ * ctor The constructor that is called when the object is allocated.
+ * dtor The destructor that is called when the object is freed.
+ * init An initializer that sets up the initial state of the memory.
+ * fini A discard function that undoes initialization done by init.
+ * ctor/dtor/init/fini may all be null, see notes above.
+ * align A bitmask that corresponds to the requested alignment
+ * eg 4 would be 0x3
+ * flags A set of parameters that control the behavior of the zone.
+ *
+ * Returns:
+ * A pointer to a structure which is intended to be opaque to users of
+ * the interface. The value may be null if the wait flag is not set.
+ */
+uma_zone_t uma_zcreate(char *name, size_t size, uma_ctor ctor, uma_dtor dtor,
+ uma_init uminit, uma_fini fini, int align,
+ u_int32_t flags);
+
+/*
+ * Create a secondary uma zone
+ *
+ * Arguments:
+ * name The text name of the zone for debugging and stats. This memory
+ * should not be freed until the zone has been deallocated.
+ * ctor The constructor that is called when the object is allocated.
+ * dtor The destructor that is called when the object is freed.
+ * zinit An initializer that sets up the initial state of the memory
+ * as the object passes from the Keg's slab to the Zone's cache.
+ * zfini A discard function that undoes initialization done by init
+ * as the object passes from the Zone's cache to the Keg's slab.
+ *
+ * ctor/dtor/zinit/zfini may all be null, see notes above.
+ * Note that the zinit and zfini specified here are NOT
+ * exactly the same as the init/fini specified to uma_zcreate()
+ * when creating a master zone. These zinit/zfini are called
+ * on the TRANSITION from keg to zone (and vice-versa). Once
+ * these are set, the primary zone may alter its init/fini
+ * (which are called when the object passes from VM to keg)
+ * using uma_zone_set_init/fini()) as well as its own
+ * zinit/zfini (unset by default for master zone) with
+ * uma_zone_set_zinit/zfini() (note subtle 'z' prefix).
+ *
+ * master A reference to this zone's Master Zone (Primary Zone),
+ * which contains the backing Keg for the Secondary Zone
+ * being added.
+ *
+ * Returns:
+ * A pointer to a structure which is intended to be opaque to users of
+ * the interface. The value may be null if the wait flag is not set.
+ */
+uma_zone_t uma_zsecond_create(char *name, uma_ctor ctor, uma_dtor dtor,
+ uma_init zinit, uma_fini zfini, uma_zone_t master);
+
+/*
+ * Add a second master to a secondary zone. This provides multiple data
+ * backends for objects with the same size. Both masters must have
+ * compatible allocation flags. Presently, UMA_ZONE_MALLOC type zones are
+ * the only supported.
+ *
+ * Returns:
+ * Error on failure, 0 on success.
+ */
+int uma_zsecond_add(uma_zone_t zone, uma_zone_t master);
+
+/*
+ * Definitions for uma_zcreate flags
+ *
+ * These flags share space with UMA_ZFLAGs in uma_int.h. Be careful not to
+ * overlap when adding new features. 0xf0000000 is in use by uma_int.h.
+ */
+#define UMA_ZONE_PAGEABLE 0x0001 /* Return items not fully backed by
+ physical memory XXX Not yet */
+#define UMA_ZONE_ZINIT 0x0002 /* Initialize with zeros */
+#define UMA_ZONE_STATIC 0x0004 /* Statically sized zone */
+#define UMA_ZONE_OFFPAGE 0x0008 /* Force the slab structure allocation
+ off of the real memory */
+#define UMA_ZONE_MALLOC 0x0010 /* For use by malloc(9) only! */
+#define UMA_ZONE_NOFREE 0x0020 /* Do not free slabs of this type! */
+#define UMA_ZONE_MTXCLASS 0x0040 /* Create a new lock class */
+#define UMA_ZONE_VM 0x0080 /*
+ * Used for internal vm datastructures
+ * only.
+ */
+#define UMA_ZONE_HASH 0x0100 /*
+ * Use a hash table instead of caching
+ * information in the vm_page.
+ */
+#define UMA_ZONE_SECONDARY 0x0200 /* Zone is a Secondary Zone */
+#define UMA_ZONE_REFCNT 0x0400 /* Allocate refcnts in slabs */
+#define UMA_ZONE_MAXBUCKET 0x0800 /* Use largest buckets */
+#define UMA_ZONE_CACHESPREAD 0x1000 /*
+ * Spread memory start locations across
+ * all possible cache lines. May
+ * require many virtually contiguous
+ * backend pages and can fail early.
+ */
+#define UMA_ZONE_VTOSLAB 0x2000 /* Zone uses vtoslab for lookup. */
+
+/*
+ * These flags are shared between the keg and zone. In zones wishing to add
+ * new kegs these flags must be compatible. Some are determined based on
+ * physical parameters of the request and may not be provided by the consumer.
+ */
+#define UMA_ZONE_INHERIT \
+ (UMA_ZONE_OFFPAGE | UMA_ZONE_MALLOC | UMA_ZONE_HASH | \
+ UMA_ZONE_REFCNT | UMA_ZONE_VTOSLAB)
+
+/* Definitions for align */
+#define UMA_ALIGN_PTR (sizeof(void *) - 1) /* Alignment fit for ptr */
+#define UMA_ALIGN_LONG (sizeof(long) - 1) /* "" long */
+#define UMA_ALIGN_INT (sizeof(int) - 1) /* "" int */
+#define UMA_ALIGN_SHORT (sizeof(short) - 1) /* "" short */
+#define UMA_ALIGN_CHAR (sizeof(char) - 1) /* "" char */
+#define UMA_ALIGN_CACHE (0 - 1) /* Cache line size align */
+
+/*
+ * Destroys an empty uma zone. If the zone is not empty uma complains loudly.
+ *
+ * Arguments:
+ * zone The zone we want to destroy.
+ *
+ */
+void uma_zdestroy(uma_zone_t zone);
+
+/*
+ * Allocates an item out of a zone
+ *
+ * Arguments:
+ * zone The zone we are allocating from
+ * arg This data is passed to the ctor function
+ * flags See sys/malloc.h for available flags.
+ *
+ * Returns:
+ * A non-null pointer to an initialized element from the zone is
+ * guaranteed if the wait flag is M_WAITOK. Otherwise a null pointer
+ * may be returned if the zone is empty or the ctor failed.
+ */
+
+void *uma_zalloc_arg(uma_zone_t zone, void *arg, int flags);
+
+/*
+ * Allocates an item out of a zone without supplying an argument
+ *
+ * This is just a wrapper for uma_zalloc_arg for convenience.
+ *
+ */
+static __inline void *uma_zalloc(uma_zone_t zone, int flags);
+
+static __inline void *
+uma_zalloc(uma_zone_t zone, int flags)
+{
+ return uma_zalloc_arg(zone, NULL, flags);
+}
+
+/*
+ * Frees an item back into the specified zone.
+ *
+ * Arguments:
+ * zone The zone the item was originally allocated out of.
+ * item The memory to be freed.
+ * arg Argument passed to the destructor
+ *
+ * Returns:
+ * Nothing.
+ */
+
+void uma_zfree_arg(uma_zone_t zone, void *item, void *arg);
+
+/*
+ * Frees an item back to a zone without supplying an argument
+ *
+ * This is just a wrapper for uma_zfree_arg for convenience.
+ *
+ */
+static __inline void uma_zfree(uma_zone_t zone, void *item);
+
+static __inline void
+uma_zfree(uma_zone_t zone, void *item)
+{
+ uma_zfree_arg(zone, item, NULL);
+}
+
+/*
+ * XXX The rest of the prototypes in this header are h0h0 magic for the VM.
+ * If you think you need to use it for a normal zone you're probably incorrect.
+ */
+
+/*
+ * Backend page supplier routines
+ *
+ * Arguments:
+ * zone The zone that is requesting pages.
+ * size The number of bytes being requested.
+ * pflag Flags for these memory pages, see below.
+ * wait Indicates our willingness to block.
+ *
+ * Returns:
+ * A pointer to the allocated memory or NULL on failure.
+ */
+
+typedef void *(*uma_alloc)(uma_zone_t zone, int size, u_int8_t *pflag, int wait);
+
+/*
+ * Backend page free routines
+ *
+ * Arguments:
+ * item A pointer to the previously allocated pages.
+ * size The original size of the allocation.
+ * pflag The flags for the slab. See UMA_SLAB_* below.
+ *
+ * Returns:
+ * None
+ */
+typedef void (*uma_free)(void *item, int size, u_int8_t pflag);
+
+
+
+/*
+ * Sets up the uma allocator. (Called by vm_mem_init)
+ *
+ * Arguments:
+ * bootmem A pointer to memory used to bootstrap the system.
+ *
+ * Returns:
+ * Nothing
+ *
+ * Discussion:
+ * This memory is used for zones which allocate things before the
+ * backend page supplier can give us pages. It should be
+ * UMA_SLAB_SIZE * boot_pages bytes. (see uma_int.h)
+ *
+ */
+
+void uma_startup(void *bootmem, int boot_pages);
+
+/*
+ * Finishes starting up the allocator. This should
+ * be called when kva is ready for normal allocs.
+ *
+ * Arguments:
+ * None
+ *
+ * Returns:
+ * Nothing
+ *
+ * Discussion:
+ * uma_startup2 is called by kmeminit() to enable us of uma for malloc.
+ */
+
+void uma_startup2(void);
+
+/*
+ * Reclaims unused memory for all zones
+ *
+ * Arguments:
+ * None
+ * Returns:
+ * None
+ *
+ * This should only be called by the page out daemon.
+ */
+
+void uma_reclaim(void);
+
+/*
+ * Sets the alignment mask to be used for all zones requesting cache
+ * alignment. Should be called by MD boot code prior to starting VM/UMA.
+ *
+ * Arguments:
+ * align The alignment mask
+ *
+ * Returns:
+ * Nothing
+ */
+void uma_set_align(int align);
+
+/*
+ * Switches the backing object of a zone
+ *
+ * Arguments:
+ * zone The zone to update.
+ * obj The VM object to use for future allocations.
+ * size The size of the object to allocate.
+ *
+ * Returns:
+ * 0 if kva space can not be allocated
+ * 1 if successful
+ *
+ * Discussion:
+ * A NULL object can be used and uma will allocate one for you. Setting
+ * the size will limit the amount of memory allocated to this zone.
+ *
+ */
+struct vm_object;
+int uma_zone_set_obj(uma_zone_t zone, struct vm_object *obj, int size);
+
+/*
+ * Sets a high limit on the number of items allowed in a zone
+ *
+ * Arguments:
+ * zone The zone to limit
+ *
+ * Returns:
+ * Nothing
+ */
+void uma_zone_set_max(uma_zone_t zone, int nitems);
+
+/*
+ * Obtains the effective limit on the number of items in a zone
+ *
+ * Arguments:
+ * zone The zone to obtain the effective limit from
+ *
+ * Return:
+ * 0 No limit
+ * int The effective limit of the zone
+ */
+int uma_zone_get_max(uma_zone_t zone);
+
+/*
+ * Obtains the approximate current number of items allocated from a zone
+ *
+ * Arguments:
+ * zone The zone to obtain the current allocation count from
+ *
+ * Return:
+ * int The approximate current number of items allocated from the zone
+ */
+int uma_zone_get_cur(uma_zone_t zone);
+
+/*
+ * The following two routines (uma_zone_set_init/fini)
+ * are used to set the backend init/fini pair which acts on an
+ * object as it becomes allocated and is placed in a slab within
+ * the specified zone's backing keg. These should probably not
+ * be changed once allocations have already begun, but only be set
+ * immediately upon zone creation.
+ */
+void uma_zone_set_init(uma_zone_t zone, uma_init uminit);
+void uma_zone_set_fini(uma_zone_t zone, uma_fini fini);
+
+/*
+ * The following two routines (uma_zone_set_zinit/zfini) are
+ * used to set the zinit/zfini pair which acts on an object as
+ * it passes from the backing Keg's slab cache to the
+ * specified Zone's bucket cache. These should probably not
+ * be changed once allocations have already begun, but only be set
+ * immediately upon zone creation.
+ */
+void uma_zone_set_zinit(uma_zone_t zone, uma_init zinit);
+void uma_zone_set_zfini(uma_zone_t zone, uma_fini zfini);
+
+/*
+ * Replaces the standard page_alloc or obj_alloc functions for this zone
+ *
+ * Arguments:
+ * zone The zone whose backend allocator is being changed.
+ * allocf A pointer to the allocation function
+ *
+ * Returns:
+ * Nothing
+ *
+ * Discussion:
+ * This could be used to implement pageable allocation, or perhaps
+ * even DMA allocators if used in conjunction with the OFFPAGE
+ * zone flag.
+ */
+
+void uma_zone_set_allocf(uma_zone_t zone, uma_alloc allocf);
+
+/*
+ * Used for freeing memory provided by the allocf above
+ *
+ * Arguments:
+ * zone The zone that intends to use this free routine.
+ * freef The page freeing routine.
+ *
+ * Returns:
+ * Nothing
+ */
+
+void uma_zone_set_freef(uma_zone_t zone, uma_free freef);
+
+/*
+ * These flags are setable in the allocf and visible in the freef.
+ */
+#define UMA_SLAB_BOOT 0x01 /* Slab alloced from boot pages */
+#define UMA_SLAB_KMEM 0x02 /* Slab alloced from kmem_map */
+#define UMA_SLAB_KERNEL 0x04 /* Slab alloced from kernel_map */
+#define UMA_SLAB_PRIV 0x08 /* Slab alloced from priv allocator */
+#define UMA_SLAB_OFFP 0x10 /* Slab is managed separately */
+#define UMA_SLAB_MALLOC 0x20 /* Slab is a large malloc slab */
+/* 0x40 and 0x80 are available */
+
+/*
+ * Used to pre-fill a zone with some number of items
+ *
+ * Arguments:
+ * zone The zone to fill
+ * itemcnt The number of items to reserve
+ *
+ * Returns:
+ * Nothing
+ *
+ * NOTE: This is blocking and should only be done at startup
+ */
+void uma_prealloc(uma_zone_t zone, int itemcnt);
+
+/*
+ * Used to lookup the reference counter allocated for an item
+ * from a UMA_ZONE_REFCNT zone. For UMA_ZONE_REFCNT zones,
+ * reference counters are allocated for items and stored in
+ * the underlying slab header.
+ *
+ * Arguments:
+ * zone The UMA_ZONE_REFCNT zone to which the item belongs.
+ * item The address of the item for which we want a refcnt.
+ *
+ * Returns:
+ * A pointer to a u_int32_t reference counter.
+ */
+u_int32_t *uma_find_refcnt(uma_zone_t zone, void *item);
+
+/*
+ * Used to determine if a fixed-size zone is exhausted.
+ *
+ * Arguments:
+ * zone The zone to check
+ *
+ * Returns:
+ * Non-zero if zone is exhausted.
+ */
+int uma_zone_exhausted(uma_zone_t zone);
+int uma_zone_exhausted_nolock(uma_zone_t zone);
+
+/*
+ * Exported statistics structures to be used by user space monitoring tools.
+ * Statistics stream consists of a uma_stream_header, followed by a series of
+ * alternative uma_type_header and uma_type_stat structures.
+ */
+#define UMA_STREAM_VERSION 0x00000001
+struct uma_stream_header {
+ u_int32_t ush_version; /* Stream format version. */
+ u_int32_t ush_maxcpus; /* Value of MAXCPU for stream. */
+ u_int32_t ush_count; /* Number of records. */
+ u_int32_t _ush_pad; /* Pad/reserved field. */
+};
+
+#define UTH_MAX_NAME 32
+#define UTH_ZONE_SECONDARY 0x00000001
+struct uma_type_header {
+ /*
+ * Static per-zone data, some extracted from the supporting keg.
+ */
+ char uth_name[UTH_MAX_NAME];
+ u_int32_t uth_align; /* Keg: alignment. */
+ u_int32_t uth_size; /* Keg: requested size of item. */
+ u_int32_t uth_rsize; /* Keg: real size of item. */
+ u_int32_t uth_maxpages; /* Keg: maximum number of pages. */
+ u_int32_t uth_limit; /* Keg: max items to allocate. */
+
+ /*
+ * Current dynamic zone/keg-derived statistics.
+ */
+ u_int32_t uth_pages; /* Keg: pages allocated. */
+ u_int32_t uth_keg_free; /* Keg: items free. */
+ u_int32_t uth_zone_free; /* Zone: items free. */
+ u_int32_t uth_bucketsize; /* Zone: desired bucket size. */
+ u_int32_t uth_zone_flags; /* Zone: flags. */
+ u_int64_t uth_allocs; /* Zone: number of allocations. */
+ u_int64_t uth_frees; /* Zone: number of frees. */
+ u_int64_t uth_fails; /* Zone: number of alloc failures. */
+ u_int64_t _uth_reserved1[3]; /* Reserved. */
+};
+
+struct uma_percpu_stat {
+ u_int64_t ups_allocs; /* Cache: number of allocations. */
+ u_int64_t ups_frees; /* Cache: number of frees. */
+ u_int64_t ups_cache_free; /* Cache: free items in cache. */
+ u_int64_t _ups_reserved[5]; /* Reserved. */
+};
+
+#endif
diff --git a/rtems/freebsd/vm/uma_dbg.h b/rtems/freebsd/vm/uma_dbg.h
new file mode 100644
index 00000000..341cecbf
--- /dev/null
+++ b/rtems/freebsd/vm/uma_dbg.h
@@ -0,0 +1,55 @@
+/*-
+ * Copyright (c) 2002, 2003, 2004, 2005 Jeffrey Roberson <jeff@FreeBSD.org>
+ * Copyright (c) 2004, 2005 Bosko Milekic <bmilekic@FreeBSD.org>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice unmodified, this list of conditions, and the following
+ * disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ *
+ */
+
+/*
+ *
+ * This file includes definitions, structures, prototypes, and inlines used
+ * when debugging users of the UMA interface.
+ *
+ */
+
+#ifndef VM_UMA_DBG_H
+#define VM_UMA_DBG_H
+
+int trash_ctor(void *mem, int size, void *arg, int flags);
+void trash_dtor(void *mem, int size, void *arg);
+int trash_init(void *mem, int size, int flags);
+void trash_fini(void *mem, int size);
+
+/* For use only by malloc */
+int mtrash_ctor(void *mem, int size, void *arg, int flags);
+void mtrash_dtor(void *mem, int size, void *arg);
+int mtrash_init(void *mem, int size, int flags);
+void mtrash_fini(void *mem, int size);
+
+void uma_dbg_free(uma_zone_t zone, uma_slab_t slab, void *item);
+void uma_dbg_alloc(uma_zone_t zone, uma_slab_t slab, void *item);
+
+#endif /* VM_UMA_DBG_H */
diff --git a/rtems/freebsd/vm/uma_int.h b/rtems/freebsd/vm/uma_int.h
new file mode 100644
index 00000000..16115201
--- /dev/null
+++ b/rtems/freebsd/vm/uma_int.h
@@ -0,0 +1,440 @@
+/*-
+ * Copyright (c) 2002-2005, 2009 Jeffrey Roberson <jeff@FreeBSD.org>
+ * Copyright (c) 2004, 2005 Bosko Milekic <bmilekic@FreeBSD.org>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice unmodified, this list of conditions, and the following
+ * disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ *
+ */
+
+/*
+ * This file includes definitions, structures, prototypes, and inlines that
+ * should not be used outside of the actual implementation of UMA.
+ */
+
+/*
+ * Here's a quick description of the relationship between the objects:
+ *
+ * Kegs contain lists of slabs which are stored in either the full bin, empty
+ * bin, or partially allocated bin, to reduce fragmentation. They also contain
+ * the user supplied value for size, which is adjusted for alignment purposes
+ * and rsize is the result of that. The Keg also stores information for
+ * managing a hash of page addresses that maps pages to uma_slab_t structures
+ * for pages that don't have embedded uma_slab_t's.
+ *
+ * The uma_slab_t may be embedded in a UMA_SLAB_SIZE chunk of memory or it may
+ * be allocated off the page from a special slab zone. The free list within a
+ * slab is managed with a linked list of indexes, which are 8 bit values. If
+ * UMA_SLAB_SIZE is defined to be too large I will have to switch to 16bit
+ * values. Currently on alpha you can get 250 or so 32 byte items and on x86
+ * you can get 250 or so 16byte items. For item sizes that would yield more
+ * than 10% memory waste we potentially allocate a separate uma_slab_t if this
+ * will improve the number of items per slab that will fit.
+ *
+ * Other potential space optimizations are storing the 8bit of linkage in space
+ * wasted between items due to alignment problems. This may yield a much better
+ * memory footprint for certain sizes of objects. Another alternative is to
+ * increase the UMA_SLAB_SIZE, or allow for dynamic slab sizes. I prefer
+ * dynamic slab sizes because we could stick with 8 bit indexes and only use
+ * large slab sizes for zones with a lot of waste per slab. This may create
+ * ineffeciencies in the vm subsystem due to fragmentation in the address space.
+ *
+ * The only really gross cases, with regards to memory waste, are for those
+ * items that are just over half the page size. You can get nearly 50% waste,
+ * so you fall back to the memory footprint of the power of two allocator. I
+ * have looked at memory allocation sizes on many of the machines available to
+ * me, and there does not seem to be an abundance of allocations at this range
+ * so at this time it may not make sense to optimize for it. This can, of
+ * course, be solved with dynamic slab sizes.
+ *
+ * Kegs may serve multiple Zones but by far most of the time they only serve
+ * one. When a Zone is created, a Keg is allocated and setup for it. While
+ * the backing Keg stores slabs, the Zone caches Buckets of items allocated
+ * from the slabs. Each Zone is equipped with an init/fini and ctor/dtor
+ * pair, as well as with its own set of small per-CPU caches, layered above
+ * the Zone's general Bucket cache.
+ *
+ * The PCPU caches are protected by critical sections, and may be accessed
+ * safely only from their associated CPU, while the Zones backed by the same
+ * Keg all share a common Keg lock (to coalesce contention on the backing
+ * slabs). The backing Keg typically only serves one Zone but in the case of
+ * multiple Zones, one of the Zones is considered the Master Zone and all
+ * Zone-related stats from the Keg are done in the Master Zone. For an
+ * example of a Multi-Zone setup, refer to the Mbuf allocation code.
+ */
+
+/*
+ * This is the representation for normal (Non OFFPAGE slab)
+ *
+ * i == item
+ * s == slab pointer
+ *
+ * <---------------- Page (UMA_SLAB_SIZE) ------------------>
+ * ___________________________________________________________
+ * | _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ ___________ |
+ * ||i||i||i||i||i||i||i||i||i||i||i||i||i||i||i| |slab header||
+ * ||_||_||_||_||_||_||_||_||_||_||_||_||_||_||_| |___________||
+ * |___________________________________________________________|
+ *
+ *
+ * This is an OFFPAGE slab. These can be larger than UMA_SLAB_SIZE.
+ *
+ * ___________________________________________________________
+ * | _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ |
+ * ||i||i||i||i||i||i||i||i||i||i||i||i||i||i||i||i||i||i||i| |
+ * ||_||_||_||_||_||_||_||_||_||_||_||_||_||_||_||_||_||_||_| |
+ * |___________________________________________________________|
+ * ___________ ^
+ * |slab header| |
+ * |___________|---*
+ *
+ */
+
+#ifndef VM_UMA_INT_H
+#define VM_UMA_INT_H
+
+#define UMA_SLAB_SIZE PAGE_SIZE /* How big are our slabs? */
+#define UMA_SLAB_MASK (PAGE_SIZE - 1) /* Mask to get back to the page */
+#define UMA_SLAB_SHIFT PAGE_SHIFT /* Number of bits PAGE_MASK */
+
+#define UMA_BOOT_PAGES 48 /* Pages allocated for startup */
+
+/* Max waste before going to off page slab management */
+#define UMA_MAX_WASTE (UMA_SLAB_SIZE / 10)
+
+/*
+ * I doubt there will be many cases where this is exceeded. This is the initial
+ * size of the hash table for uma_slabs that are managed off page. This hash
+ * does expand by powers of two. Currently it doesn't get smaller.
+ */
+#define UMA_HASH_SIZE_INIT 32
+
+/*
+ * I should investigate other hashing algorithms. This should yield a low
+ * number of collisions if the pages are relatively contiguous.
+ *
+ * This is the same algorithm that most processor caches use.
+ *
+ * I'm shifting and masking instead of % because it should be faster.
+ */
+
+#define UMA_HASH(h, s) ((((unsigned long)s) >> UMA_SLAB_SHIFT) & \
+ (h)->uh_hashmask)
+
+#define UMA_HASH_INSERT(h, s, mem) \
+ SLIST_INSERT_HEAD(&(h)->uh_slab_hash[UMA_HASH((h), \
+ (mem))], (s), us_hlink)
+#define UMA_HASH_REMOVE(h, s, mem) \
+ SLIST_REMOVE(&(h)->uh_slab_hash[UMA_HASH((h), \
+ (mem))], (s), uma_slab, us_hlink)
+
+/* Hash table for freed address -> slab translation */
+
+SLIST_HEAD(slabhead, uma_slab);
+
+struct uma_hash {
+ struct slabhead *uh_slab_hash; /* Hash table for slabs */
+ int uh_hashsize; /* Current size of the hash table */
+ int uh_hashmask; /* Mask used during hashing */
+};
+
+/*
+ * Structures for per cpu queues.
+ */
+
+struct uma_bucket {
+ LIST_ENTRY(uma_bucket) ub_link; /* Link into the zone */
+ int16_t ub_cnt; /* Count of free items. */
+ int16_t ub_entries; /* Max items. */
+ void *ub_bucket[]; /* actual allocation storage */
+};
+
+typedef struct uma_bucket * uma_bucket_t;
+
+struct uma_cache {
+ uma_bucket_t uc_freebucket; /* Bucket we're freeing to */
+ uma_bucket_t uc_allocbucket; /* Bucket to allocate from */
+ u_int64_t uc_allocs; /* Count of allocations */
+ u_int64_t uc_frees; /* Count of frees */
+};
+
+typedef struct uma_cache * uma_cache_t;
+
+/*
+ * Keg management structure
+ *
+ * TODO: Optimize for cache line size
+ *
+ */
+struct uma_keg {
+ LIST_ENTRY(uma_keg) uk_link; /* List of all kegs */
+
+ struct mtx uk_lock; /* Lock for the keg */
+ struct uma_hash uk_hash;
+
+ char *uk_name; /* Name of creating zone. */
+ LIST_HEAD(,uma_zone) uk_zones; /* Keg's zones */
+ LIST_HEAD(,uma_slab) uk_part_slab; /* partially allocated slabs */
+ LIST_HEAD(,uma_slab) uk_free_slab; /* empty slab list */
+ LIST_HEAD(,uma_slab) uk_full_slab; /* full slabs */
+
+ u_int32_t uk_recurse; /* Allocation recursion count */
+ u_int32_t uk_align; /* Alignment mask */
+ u_int32_t uk_pages; /* Total page count */
+ u_int32_t uk_free; /* Count of items free in slabs */
+ u_int32_t uk_size; /* Requested size of each item */
+ u_int32_t uk_rsize; /* Real size of each item */
+ u_int32_t uk_maxpages; /* Maximum number of pages to alloc */
+
+ uma_init uk_init; /* Keg's init routine */
+ uma_fini uk_fini; /* Keg's fini routine */
+ uma_alloc uk_allocf; /* Allocation function */
+ uma_free uk_freef; /* Free routine */
+
+ struct vm_object *uk_obj; /* Zone specific object */
+ vm_offset_t uk_kva; /* Base kva for zones with objs */
+ uma_zone_t uk_slabzone; /* Slab zone backing us, if OFFPAGE */
+
+ u_int16_t uk_pgoff; /* Offset to uma_slab struct */
+ u_int16_t uk_ppera; /* pages per allocation from backend */
+ u_int16_t uk_ipers; /* Items per slab */
+ u_int32_t uk_flags; /* Internal flags */
+};
+typedef struct uma_keg * uma_keg_t;
+
+/* Page management structure */
+
+/* Sorry for the union, but space efficiency is important */
+struct uma_slab_head {
+ uma_keg_t us_keg; /* Keg we live in */
+ union {
+ LIST_ENTRY(uma_slab) _us_link; /* slabs in zone */
+ unsigned long _us_size; /* Size of allocation */
+ } us_type;
+ SLIST_ENTRY(uma_slab) us_hlink; /* Link for hash table */
+ u_int8_t *us_data; /* First item */
+ u_int8_t us_flags; /* Page flags see uma.h */
+ u_int8_t us_freecount; /* How many are free? */
+ u_int8_t us_firstfree; /* First free item index */
+};
+
+/* The standard slab structure */
+struct uma_slab {
+ struct uma_slab_head us_head; /* slab header data */
+ struct {
+ u_int8_t us_item;
+ } us_freelist[1]; /* actual number bigger */
+};
+
+/*
+ * The slab structure for UMA_ZONE_REFCNT zones for whose items we
+ * maintain reference counters in the slab for.
+ */
+struct uma_slab_refcnt {
+ struct uma_slab_head us_head; /* slab header data */
+ struct {
+ u_int8_t us_item;
+ u_int32_t us_refcnt;
+ } us_freelist[1]; /* actual number bigger */
+};
+
+#define us_keg us_head.us_keg
+#define us_link us_head.us_type._us_link
+#define us_size us_head.us_type._us_size
+#define us_hlink us_head.us_hlink
+#define us_data us_head.us_data
+#define us_flags us_head.us_flags
+#define us_freecount us_head.us_freecount
+#define us_firstfree us_head.us_firstfree
+
+typedef struct uma_slab * uma_slab_t;
+typedef struct uma_slab_refcnt * uma_slabrefcnt_t;
+typedef uma_slab_t (*uma_slaballoc)(uma_zone_t, uma_keg_t, int);
+
+
+/*
+ * These give us the size of one free item reference within our corresponding
+ * uma_slab structures, so that our calculations during zone setup are correct
+ * regardless of what the compiler decides to do with padding the structure
+ * arrays within uma_slab.
+ */
+#define UMA_FRITM_SZ (sizeof(struct uma_slab) - sizeof(struct uma_slab_head))
+#define UMA_FRITMREF_SZ (sizeof(struct uma_slab_refcnt) - \
+ sizeof(struct uma_slab_head))
+
+struct uma_klink {
+ LIST_ENTRY(uma_klink) kl_link;
+ uma_keg_t kl_keg;
+};
+typedef struct uma_klink *uma_klink_t;
+
+/*
+ * Zone management structure
+ *
+ * TODO: Optimize for cache line size
+ *
+ */
+struct uma_zone {
+ char *uz_name; /* Text name of the zone */
+ struct mtx *uz_lock; /* Lock for the zone (keg's lock) */
+
+ LIST_ENTRY(uma_zone) uz_link; /* List of all zones in keg */
+ LIST_HEAD(,uma_bucket) uz_full_bucket; /* full buckets */
+ LIST_HEAD(,uma_bucket) uz_free_bucket; /* Buckets for frees */
+
+ LIST_HEAD(,uma_klink) uz_kegs; /* List of kegs. */
+ struct uma_klink uz_klink; /* klink for first keg. */
+
+ uma_slaballoc uz_slab; /* Allocate a slab from the backend. */
+ uma_ctor uz_ctor; /* Constructor for each allocation */
+ uma_dtor uz_dtor; /* Destructor */
+ uma_init uz_init; /* Initializer for each item */
+ uma_fini uz_fini; /* Discards memory */
+
+ u_int64_t uz_allocs; /* Total number of allocations */
+ u_int64_t uz_frees; /* Total number of frees */
+ u_int64_t uz_fails; /* Total number of alloc failures */
+ u_int32_t uz_flags; /* Flags inherited from kegs */
+ u_int32_t uz_size; /* Size inherited from kegs */
+ uint16_t uz_fills; /* Outstanding bucket fills */
+ uint16_t uz_count; /* Highest value ub_ptr can have */
+
+ /*
+ * This HAS to be the last item because we adjust the zone size
+ * based on NCPU and then allocate the space for the zones.
+ */
+ struct uma_cache uz_cpu[1]; /* Per cpu caches */
+};
+
+/*
+ * These flags must not overlap with the UMA_ZONE flags specified in uma.h.
+ */
+#define UMA_ZFLAG_BUCKET 0x02000000 /* Bucket zone. */
+#define UMA_ZFLAG_MULTI 0x04000000 /* Multiple kegs in the zone. */
+#define UMA_ZFLAG_DRAINING 0x08000000 /* Running zone_drain. */
+#define UMA_ZFLAG_PRIVALLOC 0x10000000 /* Use uz_allocf. */
+#define UMA_ZFLAG_INTERNAL 0x20000000 /* No offpage no PCPU. */
+#define UMA_ZFLAG_FULL 0x40000000 /* Reached uz_maxpages */
+#define UMA_ZFLAG_CACHEONLY 0x80000000 /* Don't ask VM for buckets. */
+
+#define UMA_ZFLAG_INHERIT (UMA_ZFLAG_INTERNAL | UMA_ZFLAG_CACHEONLY | \
+ UMA_ZFLAG_BUCKET)
+
+#ifdef _KERNEL
+/* Internal prototypes */
+static __inline uma_slab_t hash_sfind(struct uma_hash *hash, u_int8_t *data);
+void *uma_large_malloc(int size, int wait);
+void uma_large_free(uma_slab_t slab);
+
+/* Lock Macros */
+
+#define KEG_LOCK_INIT(k, lc) \
+ do { \
+ if ((lc)) \
+ mtx_init(&(k)->uk_lock, (k)->uk_name, \
+ (k)->uk_name, MTX_DEF | MTX_DUPOK); \
+ else \
+ mtx_init(&(k)->uk_lock, (k)->uk_name, \
+ "UMA zone", MTX_DEF | MTX_DUPOK); \
+ } while (0)
+
+#define KEG_LOCK_FINI(k) mtx_destroy(&(k)->uk_lock)
+#define KEG_LOCK(k) mtx_lock(&(k)->uk_lock)
+#define KEG_UNLOCK(k) mtx_unlock(&(k)->uk_lock)
+#define ZONE_LOCK(z) mtx_lock((z)->uz_lock)
+#define ZONE_UNLOCK(z) mtx_unlock((z)->uz_lock)
+
+/*
+ * Find a slab within a hash table. This is used for OFFPAGE zones to lookup
+ * the slab structure.
+ *
+ * Arguments:
+ * hash The hash table to search.
+ * data The base page of the item.
+ *
+ * Returns:
+ * A pointer to a slab if successful, else NULL.
+ */
+static __inline uma_slab_t
+hash_sfind(struct uma_hash *hash, u_int8_t *data)
+{
+ uma_slab_t slab;
+ int hval;
+
+ hval = UMA_HASH(hash, data);
+
+ SLIST_FOREACH(slab, &hash->uh_slab_hash[hval], us_hlink) {
+ if ((u_int8_t *)slab->us_data == data)
+ return (slab);
+ }
+ return (NULL);
+}
+
+#ifndef __rtems__
+static __inline uma_slab_t
+vtoslab(vm_offset_t va)
+{
+ vm_page_t p;
+ uma_slab_t slab;
+
+ p = PHYS_TO_VM_PAGE(pmap_kextract(va));
+ slab = (uma_slab_t )p->object;
+
+ if (p->flags & PG_SLAB)
+ return (slab);
+ else
+ return (NULL);
+}
+
+static __inline void
+vsetslab(vm_offset_t va, uma_slab_t slab)
+{
+ vm_page_t p;
+
+ p = PHYS_TO_VM_PAGE(pmap_kextract(va));
+ p->object = (vm_object_t)slab;
+ p->flags |= PG_SLAB;
+}
+
+static __inline void
+vsetobj(vm_offset_t va, vm_object_t obj)
+{
+ vm_page_t p;
+
+ p = PHYS_TO_VM_PAGE(pmap_kextract(va));
+ p->object = obj;
+ p->flags &= ~PG_SLAB;
+}
+#endif
+
+/*
+ * The following two functions may be defined by architecture specific code
+ * if they can provide more effecient allocation functions. This is useful
+ * for using direct mapped addresses.
+ */
+void *uma_small_alloc(uma_zone_t zone, int bytes, u_int8_t *pflag, int wait);
+void uma_small_free(void *mem, int size, u_int8_t flags);
+#endif /* _KERNEL */
+
+#endif /* VM_UMA_INT_H */